summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-12-15 19:57:49 +0100
committeradamhrv <adam@ahprojects.com>2018-12-15 19:57:49 +0100
commit82b2c0b5d6d7baccbe4d574d96e18fe2078047d7 (patch)
treea8784b7ec2bc5a0451c252f66a6b786f3a2504f5
parent8e978af21c2b29f678a09701afb3ec7d65d0a6ab (diff)
parentc5b02ffab8d388e8a2925e51736b902a48a95e71 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
-rw-r--r--.gitignore7
-rw-r--r--README.md33
-rw-r--r--builder/README.md21
-rw-r--r--builder/__init__.py (renamed from __init__.py)0
-rw-r--r--builder/builder.py90
-rw-r--r--builder/parser.py172
-rw-r--r--builder/paths.py6
-rw-r--r--builder/s3.py61
-rw-r--r--datasets/citations-1.csv77
-rw-r--r--datasets/citations-2.csv89
-rw-r--r--datasets/citations-3.csv51
l---------datasets/citations.csv1
-rw-r--r--faiss/requirements.txt11
-rw-r--r--faiss/run.sh1
-rw-r--r--faiss/static/css/app.css289
-rw-r--r--faiss/static/favicon.icobin0 -> 15086 bytes
-rw-r--r--faiss/static/img/play.pngbin0 -> 1231 bytes
-rw-r--r--faiss/static/index.html83
-rw-r--r--faiss/static/js/app.js491
-rw-r--r--faiss/static/js/dataUriToBlob.js58
-rw-r--r--faiss/static/js/metadata-app.js50
-rw-r--r--faiss/static/js/store2.min.js5
-rw-r--r--faiss/static/metadata.html11
-rw-r--r--faiss/static/search.html1
-rw-r--r--faiss/util.py29
-rw-r--r--faiss/wsgi.py5
-rw-r--r--ids.json1
-rw-r--r--megapixels/app/models/sql_factory.py152
-rw-r--r--megapixels/app/server/api.py54
-rw-r--r--megapixels/app/server/create.py36
l---------megapixels/app/server/static1
-rw-r--r--megapixels/app/settings/app_cfg.py14
-rw-r--r--megapixels/app/utils/file_utils.py46
-rw-r--r--megapixels/cli_faiss.py36
-rw-r--r--megapixels/cli_flask.py19
-rw-r--r--megapixels/commands/faiss/build_db.py15
-rw-r--r--megapixels/commands/faiss/build_faiss.py58
-rw-r--r--megapixels/commands/faiss/sync_metadata.py18
-rw-r--r--s2-papers.py64
-rw-r--r--scraper/README.md146
-rw-r--r--scraper/__init__.py0
-rw-r--r--scraper/check-counts.py (renamed from check-counts.py)0
-rw-r--r--scraper/client/actions.js9
-rw-r--r--scraper/client/app.js46
-rw-r--r--scraper/client/common/activeLink.component.js16
-rw-r--r--scraper/client/common/classifier.component.js99
-rw-r--r--scraper/client/common/common.css347
-rw-r--r--scraper/client/common/detectionBoxes.component.js15
-rw-r--r--scraper/client/common/detectionList.component.js16
-rw-r--r--scraper/client/common/footer.component.js10
-rw-r--r--scraper/client/common/gate.component.js21
-rw-r--r--scraper/client/common/header.component.js1
-rw-r--r--scraper/client/common/index.js36
-rw-r--r--scraper/client/common/keyframe.component.js118
-rw-r--r--scraper/client/common/keyframes.component.js95
-rw-r--r--scraper/client/common/loader.component.js10
-rw-r--r--scraper/client/common/sidebar.component.js37
-rw-r--r--scraper/client/common/table.component.js121
-rw-r--r--scraper/client/common/video.component.js47
-rw-r--r--scraper/client/index.js19
-rw-r--r--scraper/client/metadata/index.js25
-rw-r--r--scraper/client/session.js5
-rw-r--r--scraper/client/store.js38
-rw-r--r--scraper/client/types.js21
-rw-r--r--scraper/client/util.js167
-rw-r--r--scraper/content-script.crx (renamed from content-script.crx)bin26845 -> 26845 bytes
-rw-r--r--scraper/content-script.pem (renamed from content-script.pem)0
-rw-r--r--scraper/content-script/.gitignore (renamed from content-script/.gitignore)0
-rw-r--r--scraper/content-script/alone-off.png (renamed from content-script/alone-off.png)bin1259 -> 1259 bytes
-rw-r--r--scraper/content-script/alone-on.png (renamed from content-script/alone-on.png)bin1271 -> 1271 bytes
-rw-r--r--scraper/content-script/background.js (renamed from content-script/background.js)0
-rw-r--r--scraper/content-script/check.js (renamed from content-script/check.js)0
-rw-r--r--scraper/content-script/icon-128.png (renamed from content-script/icon-128.png)bin15156 -> 15156 bytes
-rw-r--r--scraper/content-script/icon-16.png (renamed from content-script/icon-16.png)bin1490 -> 1490 bytes
-rw-r--r--scraper/content-script/icon-48.png (renamed from content-script/icon-48.png)bin3973 -> 3973 bytes
-rw-r--r--scraper/content-script/index.html (renamed from content-script/index.html)0
-rw-r--r--scraper/content-script/manifest.json (renamed from content-script/manifest.json)0
-rw-r--r--scraper/content-script/options.html (renamed from content-script/options.html)0
-rw-r--r--scraper/content-script/options.js (renamed from content-script/options.js)0
-rw-r--r--scraper/datasets/citations-20181031.csv (renamed from datasets/citations-2018310.csv)0
-rw-r--r--scraper/datasets/citations-20181207.csv440
l---------scraper/datasets/citations.csv1
-rw-r--r--scraper/datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv (renamed from datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv)0
-rw-r--r--scraper/datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv (renamed from datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv)0
-rw-r--r--scraper/datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv (renamed from datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv)0
-rw-r--r--scraper/datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv (renamed from datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv (renamed from datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv (renamed from datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv (renamed from datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv (renamed from datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv (renamed from datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv (renamed from datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv (renamed from datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv)0
-rw-r--r--scraper/datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv (renamed from datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv (renamed from datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv (renamed from datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv)0
-rw-r--r--scraper/datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv (renamed from datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv)0
-rw-r--r--scraper/datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv (renamed from datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv (renamed from datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv (renamed from datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv (renamed from datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv)0
-rw-r--r--scraper/datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv (renamed from datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv (renamed from datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv)0
-rw-r--r--scraper/datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv (renamed from datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv (renamed from datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv (renamed from datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv (renamed from datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv)0
-rw-r--r--scraper/datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv (renamed from datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv (renamed from datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv (renamed from datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv)0
-rw-r--r--scraper/datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv (renamed from datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv)0
-rw-r--r--scraper/datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv (renamed from datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv (renamed from datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv (renamed from datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv (renamed from datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Robust face landmark estimation under occlusion .csv (renamed from datasets/scholar/entries/Robust face landmark estimation under occlusion .csv)0
-rw-r--r--scraper/datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv (renamed from datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv (renamed from datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv (renamed from datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv)0
-rw-r--r--scraper/datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv (renamed from datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv)0
-rw-r--r--scraper/datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv (renamed from datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv)0
-rw-r--r--scraper/datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv (renamed from datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv)0
-rw-r--r--scraper/datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv (renamed from datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv)0
-rw-r--r--scraper/datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv (renamed from datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv)0
-rw-r--r--scraper/datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv (renamed from datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv)0
-rw-r--r--scraper/expand-uni-lookup.py42
-rw-r--r--scraper/ids.json1
-rw-r--r--scraper/pdf_dump_first_page.sh19
-rw-r--r--scraper/reports/all_institutions.csv1499
-rw-r--r--scraper/reports/all_institutions_sorted.csv1745
-rw-r--r--scraper/reports/doi_domains.html1
-rw-r--r--scraper/reports/doi_institutions.csv2171
-rw-r--r--scraper/reports/doi_institutions.html1
-rw-r--r--scraper/reports/doi_institutions_geocoded.csv1430
-rw-r--r--scraper/reports/doi_institutions_unattributed.csv406
-rw-r--r--scraper/reports/doi_institutions_unknown.csv856
-rw-r--r--scraper/reports/doi_institutions_unknown.html1
-rw-r--r--scraper/reports/first_pages.html48171
-rw-r--r--scraper/reports/institution_names-1.csv714
-rw-r--r--scraper/reports/institution_names-2.csv714
-rw-r--r--scraper/reports/institution_names-3.csv712
-rw-r--r--scraper/reports/institution_names-4.csv711
-rw-r--r--scraper/reports/institution_names.csv3563
-rw-r--r--scraper/reports/institutions.html1
-rw-r--r--scraper/reports/institutions_found.csv1042
-rw-r--r--scraper/reports/institutions_found/found-1.csv479
-rw-r--r--scraper/reports/institutions_found/found-2.csv510
-rw-r--r--scraper/reports/institutions_found/found-3.csv811
-rw-r--r--scraper/reports/institutions_found/found-4.csv896
-rw-r--r--scraper/reports/institutions_missing.html11693
-rw-r--r--scraper/reports/institutions_not_found.csv1773
-rw-r--r--scraper/reports/institutions_not_found/not-found-1.csv845
-rw-r--r--scraper/reports/institutions_not_found/not-found-2.csv876
-rw-r--r--scraper/reports/institutions_not_found/not-found-3.csv1373
-rw-r--r--scraper/reports/institutions_not_found/not-found-4.csv1439
-rw-r--r--scraper/reports/leaflet.arc.js2
-rw-r--r--scraper/reports/leaflet.bezier.js254
-rw-r--r--scraper/reports/map.js92
-rw-r--r--scraper/reports/misc/all_doi-1.csv749
-rw-r--r--scraper/reports/misc/all_doi-2.csv749
-rw-r--r--scraper/reports/misc/all_doi-3.csv749
-rw-r--r--scraper/reports/misc/all_doi-4.csv748
-rw-r--r--scraper/reports/misc/all_doi.csv2995
-rw-r--r--scraper/reports/misc/db_paper_doi.csv1928
-rw-r--r--scraper/reports/misc/db_paper_pdf-1.csv1639
-rw-r--r--scraper/reports/misc/db_paper_pdf-2.csv1639
-rw-r--r--scraper/reports/misc/db_paper_pdf-3.csv1639
-rw-r--r--scraper/reports/misc/db_paper_pdf.csv4917
-rw-r--r--scraper/reports/misc/db_paper_pdf_list.csv7615
-rw-r--r--scraper/reports/misc/missing-1.csv817
-rw-r--r--scraper/reports/misc/missing-2.csv817
-rw-r--r--scraper/reports/misc/missing-3.csv815
-rw-r--r--scraper/reports/misc/missing.csv2449
-rw-r--r--scraper/reports/misc/raw_paper_doi.csv1067
-rw-r--r--scraper/reports/misc/raw_paper_pdf.csv1354
-rw-r--r--scraper/reports/misc/raw_paper_pdf_list.csv2434
-rw-r--r--scraper/reports/pdf_institutions_deduped.csv1676
-rw-r--r--scraper/reports/pdf_unknown_bigrams.html1
-rw-r--r--scraper/reports/pdf_unknown_terms.html1
-rw-r--r--scraper/reports/pdf_unknown_trigram.html1
-rw-r--r--scraper/reports/reddot.pngbin0 -> 1102 bytes
-rw-r--r--scraper/reports/report_coverage.html1
-rw-r--r--scraper/reports/report_index.html1
-rw-r--r--scraper/reports/reports.css18
-rwxr-xr-xscraper/reports/snap.svg-min.js21
-rw-r--r--scraper/reports/stats/empty_papers.csv2012
-rw-r--r--scraper/reports/stats/geocoded_papers.csv12699
-rw-r--r--scraper/reports/stats/no_separator_papers.csv1264
-rw-r--r--scraper/reports/stats/unknown_papers.csv61110
-rw-r--r--scraper/requirements.txt6
-rw-r--r--scraper/s2-citation-report.py291
-rw-r--r--scraper/s2-doi-report.py249
-rw-r--r--scraper/s2-dump-db-pdf-urls.py122
-rw-r--r--scraper/s2-dump-ids.py (renamed from s2-dump-ids.py)5
-rw-r--r--scraper/s2-dump-missing-paper-ids.py40
-rw-r--r--scraper/s2-extract-papers.py (renamed from s2-extract-papers.py)30
-rw-r--r--scraper/s2-fetch-doi.py69
-rw-r--r--scraper/s2-fetch-google-sheet.py4
-rw-r--r--scraper/s2-fetch-pdf.py49
-rw-r--r--scraper/s2-geocode-spreadsheet.py83
-rw-r--r--scraper/s2-geocode.py81
-rw-r--r--scraper/s2-merge-csv.py28
-rw-r--r--scraper/s2-papers.py (renamed from s2-search.py)24
-rw-r--r--scraper/s2-pdf-first-pages.py133
-rw-r--r--scraper/s2-pdf-report.py102
-rw-r--r--scraper/s2-raw-papers.py44
-rw-r--r--scraper/s2-search.py78
-rw-r--r--scraper/s2.py (renamed from s2.py)50
-rw-r--r--scraper/samples/s2-orc-paper.json131
-rw-r--r--scraper/samples/s2-paper-detail.json3114
-rw-r--r--scraper/samples/s2-papers-api.json2855
-rw-r--r--scraper/samples/s2-search-api.json270
-rw-r--r--scraper/scholar-fetch.py (renamed from scholar-fetch.py)0
-rw-r--r--scraper/split-csv.py (renamed from split-csv.py)24
-rw-r--r--scraper/util.py298
-rwxr-xr-xscraper/vendor/scholar.py (renamed from vendor/scholar.py)0
-rw-r--r--server/app/README.md17
-rw-r--r--server/app/__init__.py39
-rw-r--r--server/app/basemodels.py5
-rw-r--r--server/app/favicon.icobin0 -> 318 bytes
-rw-r--r--server/app/index.html161
-rw-r--r--server/app/main/__init__.py5
-rw-r--r--server/app/main/errors.py32
-rw-r--r--server/app/main/forms.py60
-rw-r--r--server/app/main/img_proc_config.py20
-rw-r--r--server/app/main/paths.py19
-rw-r--r--server/app/main/tasks.py374
-rw-r--r--server/app/main/utils.py37
-rw-r--r--server/app/main/views.py300
-rw-r--r--server/app/static/css/bootstrap.min.css6
-rw-r--r--server/app/static/css/dullbrown-theme.css502
-rw-r--r--server/app/static/css/projector.css52
-rw-r--r--server/app/static/js/app.js158
-rw-r--r--server/app/static/js/upload.js319
-rw-r--r--server/app/static/js/util.js32
-rw-r--r--server/app/static/js/vendor/ExifReader.js1363
-rw-r--r--server/app/static/js/vendor/canvas-to-blob.js111
-rw-r--r--server/app/static/js/vendor/jquery-3.3.1.min.js2
-rw-r--r--server/app/static/js/vendor/nanobar.min.js3
-rw-r--r--server/app/static/js/vendor/prefixfree.js527
-rw-r--r--server/app/templates/403.html35
-rw-r--r--server/app/templates/404.html33
-rw-r--r--server/app/templates/500.html34
-rw-r--r--server/app/templates/base.html33
-rw-r--r--server/app/templates/celery.html43
-rw-r--r--server/app/templates/display.html69
-rw-r--r--server/app/templates/index.html161
-rw-r--r--server/celery_worker.py7
-rw-r--r--server/config.py78
-rwxr-xr-xserver/deploy.sh20
-rw-r--r--server/dulldream.wsgi.py13
-rwxr-xr-xserver/run-celery.sh3
-rwxr-xr-xserver/run-dev.sh1
-rwxr-xr-xserver/run-gunicorn.sh2
-rwxr-xr-xserver/run-redis.sh2
-rw-r--r--server/run.py12
-rw-r--r--site/assets/css/css.css383
-rw-r--r--site/assets/css/fonts.css41
-rw-r--r--site/assets/data/3dlm_0_10.json1
-rw-r--r--site/assets/fonts/Roboto_300.eotbin0 -> 17481 bytes
-rw-r--r--site/assets/fonts/Roboto_300.svg312
-rw-r--r--site/assets/fonts/Roboto_300.ttfbin0 -> 35468 bytes
-rw-r--r--site/assets/fonts/Roboto_300.woffbin0 -> 19916 bytes
-rw-r--r--site/assets/fonts/Roboto_300.woff2bin0 -> 15440 bytes
-rw-r--r--site/assets/fonts/Roboto_400.eotbin0 -> 17405 bytes
-rw-r--r--site/assets/fonts/Roboto_400.svg308
-rw-r--r--site/assets/fonts/Roboto_400.ttfbin0 -> 35408 bytes
-rw-r--r--site/assets/fonts/Roboto_400.woffbin0 -> 19824 bytes
-rw-r--r--site/assets/fonts/Roboto_400.woff2bin0 -> 15344 bytes
-rw-r--r--site/assets/fonts/Roboto_500.eotbin0 -> 17596 bytes
-rw-r--r--site/assets/fonts/Roboto_500.svg305
-rw-r--r--site/assets/fonts/Roboto_500.ttfbin0 -> 35588 bytes
-rw-r--r--site/assets/fonts/Roboto_500.woffbin0 -> 20012 bytes
-rw-r--r--site/assets/fonts/Roboto_500.woff2bin0 -> 15552 bytes
-rw-r--r--site/assets/fonts/Roboto_700.eotbin0 -> 17391 bytes
-rw-r--r--site/assets/fonts/Roboto_700.svg309
-rw-r--r--site/assets/fonts/Roboto_700.ttfbin0 -> 35236 bytes
-rw-r--r--site/assets/fonts/Roboto_700.woffbin0 -> 19888 bytes
-rw-r--r--site/assets/fonts/Roboto_700.woff2bin0 -> 15436 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_300.eotbin0 -> 18064 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_300.svg392
-rw-r--r--site/assets/fonts/Roboto_Mono_300.ttfbin0 -> 32160 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_300.woffbin0 -> 19884 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_300.woff2bin0 -> 16380 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_400.eotbin0 -> 17757 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_400.svg390
-rw-r--r--site/assets/fonts/Roboto_Mono_400.ttfbin0 -> 31052 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_400.woffbin0 -> 19576 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_400.woff2bin0 -> 16028 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_500.eotbin0 -> 17909 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_500.svg387
-rw-r--r--site/assets/fonts/Roboto_Mono_500.ttfbin0 -> 31168 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_500.woffbin0 -> 19656 bytes
-rw-r--r--site/assets/fonts/Roboto_Mono_500.woff2bin0 -> 16016 bytes
-rw-r--r--site/assets/img/ajax-loader.gifbin0 -> 1849 bytes
-rw-r--r--site/assets/img/icon_camera.svg2
-rw-r--r--site/assets/img/megapixels_logo.svg14
-rw-r--r--site/assets/img/megapixels_logo_white.svg14
-rw-r--r--site/assets/js/app/face.js227
-rw-r--r--site/assets/js/app/site.js30
-rw-r--r--site/assets/js/vendor/oktween.js159
-rw-r--r--site/assets/js/vendor/three.meshline.js486
-rw-r--r--site/assets/js/vendor/three.min.js963
-rw-r--r--site/assets/test/face.html227
l---------site/content1
-rw-r--r--site/public/about/credits/index.html57
-rw-r--r--site/public/about/disclaimer/index.html57
-rw-r--r--site/public/about/index.html57
-rw-r--r--site/public/about/press/index.html55
-rw-r--r--site/public/about/privacy/index.html134
-rw-r--r--site/public/about/style/index.html90
-rw-r--r--site/public/about/terms/index.html69
l---------site/public/assets1
-rw-r--r--site/public/datasets/lfw/index.html283
-rw-r--r--site/public/datasets/lfw/what/index.html142
-rw-r--r--site/public/datasets/vgg_face2/index.html84
-rw-r--r--site/public/datasets/vgg_faces2/index.html63
-rw-r--r--site/public/index.html78
-rw-r--r--site/public/research/00_introduction/index.html86
-rw-r--r--site/public/research/01_from_1_to_100_pixels/index.html95
-rw-r--r--site/public/research/from_1_to_100_pixels/index.html101
-rw-r--r--site/public/research/index.html51
-rw-r--r--site/templates/home.html33
-rw-r--r--site/templates/layout.html47
-rw-r--r--site/templates/page.html5
-rw-r--r--site/templates/paper.html4
-rw-r--r--site/templates/research.html23
-rw-r--r--util.py31
328 files changed, 221910 insertions, 387 deletions
diff --git a/.gitignore b/.gitignore
index 7821e2dd..3cd21111 100644
--- a/.gitignore
+++ b/.gitignore
@@ -123,9 +123,6 @@ venv.bak/
# Rope project settings
.ropeproject
-# mkdocs documentation
-/site
-
# mypy
.mypy_cache/
@@ -151,6 +148,10 @@ config/settings/production.py
old-package.json
+scraper/datasets/
+scraper/reports/papers/
*.tar
+.creds
+
diff --git a/README.md b/README.md
index c1db0f32..3a23d3b4 100644
--- a/README.md
+++ b/README.md
@@ -1,36 +1,3 @@
# MegaPixels
FaceQuery.me, mozilla, nytimes
-
-# megapixels dev
-
-## installation
-
-```
-pip install urllib3
-pip install bs4
-pip install http
-```
-
-## ascii
-
-```
-xxxxdddd5xxdddd5555vvvv~xxxxdddd55vvvv7777~~~~xxxxdddddd5555vvvv7777
-xxxxdddd5xxdddd55v~xxxxddddddddd55vvvv7777~~~~xxxxdddd5555v5vvvv7777
-xxxxdddd555vvvv77~~xxxxdddddddd55vvvv77777~~~xxxxddd55555v5vvvv77777
-aaaaxddd5555vvvv77xxxxddddddddd55vvvvv77777~~xxxxddd55555v5vvvvddddd
-aaaaayyMMMMMqqqqddeeeexxxxxxdddd55vvvv77777~~xxaayyyyMMMMMqMqqqvdddd
-aaaaayyMMMMMqqqqqddeeeaaaaaayyyyMMqqqqqddddeeaaaayyyyMMMMMqMqqqqdddd
-ccccaDD%%%%%##qqqddeeaaaaaayyyyMMqqqqddddeeeeaaaayyyyMMMMMqMqqqq@@@@
-ccccc%%%###qqqd#@@eeeaaaaaayyyyMMqqqqddddeeeeacccDDDD%%%%%#%###q@@@@
-cccx%%%%###i##@#@eeeeaccccccDyyyM%##qddd@@eeeeccccDDyMMMMqMqqqq@@@@@
-llcccDDDD%%%%###@eeeecccccccDDDD%%####@@@@eeeecccDDD%%%%#%###q@&&&&&
-lllllSDDD%%%%###@llllcccccccDDDD%%####@@@@eeeecclSSD%%%%#%####@&&&&&
-llllSSSSQQ%%%##&&&lllllcclllSDDD%%####@@@@eellcllSSD%%%%#%####&&&&&&
-aallSSSSQQQQQ%%&&&llllllllllSSSD%Q%%##@@&&llllllllSSQQQQ%Q%%%#&&rrrr
-aaaaiiiiVVQQQ%%drrrrrrrrllllSSSSQQ%%%%&&&&llllllllSSSQQQQ%Q%%%%&rrrr
-aaaaiiiiVVVVVdddrrrrrrrraaaaiSSSQVdd%%&&rrrrrraaaaiiSQQQQ%Q%%%%rrrrr
-xxxxdddd55VVVddv7777~~~~aaaaiiiiVVddddrrrrrrrraaaaiiiiVVVVdVdddd7777
-xxxxdddd5555vvvv7777~~~~xxxxdiiiV555vvvv7777~~~~xxxxdddd5555vvvv7777
-```
->>>>>>> 0dc3e40434c23e4d48119465f39b03bf35fb56bd
diff --git a/builder/README.md b/builder/README.md
new file mode 100644
index 00000000..1a6d3a1e
--- /dev/null
+++ b/builder/README.md
@@ -0,0 +1,21 @@
+Megapixels Static Site Generator
+================================
+
+The index, blog, and about other pages are built using this static site generator.
+
+## Metadata
+
+```
+status: published|draft|private
+title: From 1 to 100 Pixels
+desc: High resolution insights from low resolution imagery
+slug: from-1-to-100-pixels
+published: 2018-12-04
+updated: 2018-12-04
+authors: Adam Harvey, Berit Gilma, Matthew Stender
+```
+
+## S3 Assets
+
+Static assets: `v1/site/about/assets/picture.jpg`
+Dataset assets: `v1/datasets/lfw/assets/picture.jpg`
diff --git a/__init__.py b/builder/__init__.py
index e69de29b..e69de29b 100644
--- a/__init__.py
+++ b/builder/__init__.py
diff --git a/builder/builder.py b/builder/builder.py
new file mode 100644
index 00000000..620fc710
--- /dev/null
+++ b/builder/builder.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+from dotenv import load_dotenv
+load_dotenv()
+
+import os
+import glob
+from jinja2 import Environment, FileSystemLoader, select_autoescape
+
+import s3
+import parser
+from paths import *
+
+env = Environment(
+ loader=FileSystemLoader(template_path),
+ autoescape=select_autoescape([])
+)
+
+def build_page(fn, research_posts):
+ metadata, sections = parser.read_metadata(fn)
+
+ if metadata is None:
+ print("{} has no metadata".format(fn))
+ return
+
+ print(metadata['url'])
+
+ dirname = os.path.dirname(fn)
+ output_path = public_path + metadata['url']
+ output_fn = os.path.join(output_path, "index.html")
+
+ skip_h1 = False
+
+ if metadata['url'] == '/':
+ template = env.get_template("home.html")
+ elif 'research/' in fn:
+ skip_h1 = True
+ template = env.get_template("research.html")
+ else:
+ template = env.get_template("page.html")
+
+ if 'datasets/' in fn:
+ s3_dir = s3_datasets_path
+ else:
+ s3_dir = s3_site_path
+
+ s3_path = s3.make_s3_path(s3_dir, metadata['path'])
+
+ if 'index.md' in fn:
+ s3.sync_directory(dirname, s3_dir, metadata)
+
+ content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1)
+
+ html = template.render(
+ metadata=metadata,
+ content=content,
+ research_posts=research_posts,
+ latest_research_post=research_posts[-1],
+ )
+
+ os.makedirs(output_path, exist_ok=True)
+ with open(output_fn, "w") as file:
+ file.write(html)
+
+ print("______")
+
+def build_research_index(research_posts):
+ metadata, sections = parser.read_metadata('../site/content/research/index.md')
+ template = env.get_template("page.html")
+ s3_path = s3.make_s3_path(s3_site_path, metadata['path'])
+ content = parser.parse_markdown(sections, s3_path, skip_h1=False)
+ content += parser.parse_research_index(research_posts)
+ html = template.render(
+ metadata=metadata,
+ content=content,
+ research_posts=research_posts,
+ latest_research_post=research_posts[-1],
+ )
+ output_fn = public_path + '/research/index.html'
+ with open(output_fn, "w") as file:
+ file.write(html)
+
+def build_site():
+ research_posts = parser.read_research_post_index()
+ for fn in glob.iglob(os.path.join(content_path, "**/*.md"), recursive=True):
+ build_page(fn, research_posts)
+ build_research_index(research_posts)
+
+if __name__ == '__main__':
+ build_site()
diff --git a/builder/parser.py b/builder/parser.py
new file mode 100644
index 00000000..dd3643bf
--- /dev/null
+++ b/builder/parser.py
@@ -0,0 +1,172 @@
+import os
+import re
+import glob
+import mistune
+
+import s3
+from paths import *
+
+renderer = mistune.Renderer(escape=False)
+markdown = mistune.Markdown(renderer=renderer)
+
+def fix_images(lines, s3_path):
+ real_lines = []
+ block = "\n\n".join(lines)
+ for line in block.split("\n"):
+ if "![" in line:
+ line = line.replace('![', '')
+ alt_text, tail = line.split('](', 1)
+ url, tail = tail.split(')', 1)
+ if ':' in alt_text:
+ tail, alt_text = alt_text.split(':', 1)
+ img_tag = "<img src='{}' alt='{}'>".format(s3_path + url, alt_text.replace("'", ""))
+ if len(alt_text):
+ line = "<div class='image'>{}<div class='caption'>{}</div></div>".format(img_tag, alt_text)
+ else:
+ line = "<div class='image'>{}</div>".format(img_tag, alt_text)
+ real_lines.append(line)
+ return "\n".join(real_lines)
+
+def format_section(lines, s3_path, type=''):
+ if len(lines):
+ lines = fix_images(lines, s3_path)
+ if type:
+ return "<section class='{}'>{}</section>".format(type, markdown(lines))
+ else:
+ return "<section>" + markdown(lines) + "</section>"
+ return ""
+
+def format_metadata(section):
+ meta = []
+ for line in section.split('\n'):
+ key, value = line[2:].split(': ', 1)
+ meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value))
+ return "<section><div class='meta'>{}</div></section>".format(''.join(meta))
+
+def parse_markdown(sections, s3_path, skip_h1=False):
+ groups = []
+ current_group = []
+ for section in sections:
+ if skip_h1 and section.startswith('# '):
+ continue
+ elif section.startswith('+ '):
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_metadata(section))
+ current_group = []
+ elif '![wide:' in section:
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_section([section], s3_path, type='wide'))
+ current_group = []
+ elif '![' in section:
+ groups.append(format_section(current_group, s3_path))
+ groups.append(format_section([section], s3_path, type='images'))
+ current_group = []
+ else:
+ current_group.append(section)
+ groups.append(format_section(current_group, s3_path))
+ content = "".join(groups)
+ return content
+
+def parse_research_index(research_posts):
+ content = "<div class='research_index'>"
+ for post in research_posts:
+ s3_path = s3.make_s3_path(s3_site_path, post['path'])
+ if 'image' in post:
+ post_image = s3_path + post['image']
+ else:
+ post_image = 'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
+ row = "<a href='{}'><section class='wide'><img src='{}' alt='Research post' /><section><h1>{}</h1><h2>{}</h2></section></section></a>".format(
+ post['path'],
+ post_image,
+ post['title'],
+ post['tagline'])
+ content += row
+ content += '</div>'
+ return content
+
+def read_metadata(fn):
+ with open(fn, "r") as file:
+ data = file.read()
+ data = data.replace("\n ", "\n")
+ if "\n" in data:
+ data = data.replace("\r", "")
+ else:
+ data = data.replace("\r", "\n")
+ sections = data.split("\n\n")
+ return parse_metadata(fn, sections)
+
+default_metadata = {
+ 'status': 'published',
+ 'title': 'Untitled Page',
+ 'desc': '',
+ 'slug': '',
+ 'published': '2018-12-31',
+ 'updated': '2018-12-31',
+ 'authors': 'Adam Harvey',
+ 'sync': 'true',
+ 'tagline': '',
+}
+
+def parse_metadata_section(metadata, section):
+ for line in section.split("\n"):
+ if ': ' not in line:
+ continue
+ key, value = line.split(': ', 1)
+ metadata[key.lower()] = value
+
+def parse_metadata(fn, sections):
+ found_meta = False
+ metadata = {}
+ valid_sections = []
+ for section in sections:
+ if not found_meta and ': ' in section:
+ found_meta = True
+ parse_metadata_section(metadata, section)
+ continue
+ if '-----' in section:
+ continue
+ if found_meta:
+ valid_sections.append(section)
+
+ if 'title' not in metadata:
+ print('warning: {} has no title'.format(fn))
+ for key in default_metadata:
+ if key not in metadata:
+ metadata[key] = default_metadata[key]
+
+ basedir = os.path.dirname(fn.replace(content_path, ''))
+ basename = os.path.basename(fn)
+ if basedir == '/':
+ metadata['path'] = '/'
+ metadata['url'] = '/'
+ elif basename == 'index.md':
+ metadata['path'] = basedir + '/'
+ metadata['url'] = metadata['path']
+ else:
+ metadata['path'] = basedir + '/'
+ metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/'
+
+ if metadata['status'] == 'published|draft|private':
+ metadata['status'] = 'published'
+
+ metadata['sync'] = metadata['sync'] != 'false'
+
+ metadata['author_html'] = '<br>'.join(metadata['authors'].split(','))
+ return metadata, valid_sections
+
+def read_research_post_index():
+ posts = []
+ for fn in sorted(glob.glob('../site/content/research/*/index.md')):
+ metadata, valid_sections = read_metadata(fn)
+ if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft':
+ continue
+ posts.append(metadata)
+ if not len(posts):
+ posts.append({
+ 'title': 'Placeholder',
+ 'slug': 'placeholder',
+ 'date': 'Placeholder',
+ 'url': '/',
+ })
+ return posts
+
diff --git a/builder/paths.py b/builder/paths.py
new file mode 100644
index 00000000..356f2f3d
--- /dev/null
+++ b/builder/paths.py
@@ -0,0 +1,6 @@
+
+s3_site_path = "v1/site"
+s3_datasets_path = "v1" # datasets is already in the filename
+public_path = "../site/public"
+content_path = "../site/content"
+template_path = "../site/templates"
diff --git a/builder/s3.py b/builder/s3.py
new file mode 100644
index 00000000..41ecdf61
--- /dev/null
+++ b/builder/s3.py
@@ -0,0 +1,61 @@
+import os
+import glob
+import boto3
+from paths import *
+
+session = boto3.session.Session()
+
+s3_client = session.client(
+ service_name='s3',
+ aws_access_key_id=os.getenv('S3_KEY'),
+ aws_secret_access_key=os.getenv('S3_SECRET'),
+ endpoint_url=os.getenv('S3_ENDPOINT'),
+ region_name=os.getenv('S3_REGION'),
+)
+
+def sync_directory(base_fn, s3_path, metadata):
+ fns = {}
+ for fn in glob.glob(os.path.join(base_fn, 'assets/*')):
+ fns[os.path.basename(fn)] = True
+
+ if not metadata['sync']:
+ return
+
+ remote_path = s3_path + metadata['url']
+
+ directory = s3_client.list_objects(Bucket=os.getenv('S3_BUCKET'), Prefix=remote_path)
+ prefixes = []
+
+ if 'Contents' in directory:
+ for obj in directory['Contents']:
+ s3_fn = obj['Key']
+ fn = os.path.basename(s3_fn)
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ if fn in fns:
+ del fns[fn]
+ if obj['LastModified'].timestamp() < os.path.getmtime(os.path.join(local_fn)):
+ print("s3 update {}".format(s3_fn))
+ s3_client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })
+ else:
+ print("s3 delete {}".format(s3_fn))
+ response = s3_client.delete_object(
+ Bucket=os.getenv('S3_BUCKET'),
+ Key=s3_fn,
+ )
+
+ for fn in fns:
+ local_fn = os.path.join(base_fn, 'assets', fn)
+ s3_fn = os.path.join(remote_path, 'assets', fn)
+ print("s3 create {}".format(s3_fn))
+ s3_client.upload_file(
+ local_fn,
+ os.getenv('S3_BUCKET'),
+ s3_fn,
+ ExtraArgs={ 'ACL': 'public-read' })
+
+def make_s3_path(s3_dir, metadata_path):
+ return "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_dir, metadata_path)
diff --git a/datasets/citations-1.csv b/datasets/citations-1.csv
deleted file mode 100644
index f9400fcd..00000000
--- a/datasets/citations-1.csv
+++ /dev/null
@@ -1,77 +0,0 @@
-Database Name,Title,Journal/Pub/Conference,Year,Pages,Volume,Author1,Author2,Author3,Author4,Author5,Author 6,PDF,Priority,URL,bibtex_reference_only,notes
-PIE,"The CMU Pose, Illumination, and Expression Database",IEEE Transactions on Pattern Analysis and Machine Intelligence,Dec 2003,"25, No. 12",,T. Sim,S. Baker,M. Bsat,,,,,,http://www.cs.cmu.edu/~simonb/pie_db/pami.pdf,,
-YouTubeFaces,Face Recognition in Unconstrained Videos with Matched Background Similarity,IEEE Conf. on Computer Vision and Pattern Recognition (CVPR),2011,,,Lior Wolf,Tal Hassner,Itay Maoz,,,,,,,,
-Names and Faces,Who's in the Picture ,NIPS,2004,,,Tamara L. Berg,Alexander C. Berg,Jaety Edwards,David A. Forsyth,,,,2,http://www.cs.berkeley.edu/%7Eaberg/papers/berg_whos_in_the_picture.pdf,,
-RaFD ,Presentation and validation of the Radboud Faces Database,Cognition & Emotion,2010,1377-1388,24.8,"Langner, O.","Dotsch, R."," Bijlstra, G.","Wigboldus, D.H.J.","Hawk, S.T.","van Knippenberg, A.",,,http://dx.doi.org/10.1080/02699930903485076,DOI: 10.1080/02699930903485076,
-MIFS,Spoofing Faces Using Makeup: An Investigative Study,"Proc. of 3rd IEEE International Conference on Identity, Security and Behavior Analysis (ISBA), (New Delhi, India)",2017,,,C. Chen,A. Dantcheva,T. Swearingen,A. Ross,,,,,http://www.cse.msu.edu/~rossarun/pubs/ChenFaceMakeupSpoof_ISBA2017.pdf,,
-MegaFace 2,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016,,,"Kemelmacher-Shlizerman, Ira","Seitz, Steven M","Miller, Daniel","Brossard, Evan",,,,If you're using or participating in Challenge 1 please cite:,http://megaface.cs.washington.edu/KemelmacherMegaFaceCVPR16.pdf,"@inproceedings{kemelmacher2016megaface,
-title={The megaface benchmark: 1 million faces for recognition at scale},
-author={Kemelmacher-Shlizerman, Ira and Seitz, Steven M and Miller, Daniel and Brossard, Evan},
-booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
-pages={4873--4882},
-year={2016}
-}",
-CMDP,Distance Estimation of an Unknown Person from a Portrait ,"ECCV 2014, Zurich, Switzerland",2014,,,X. P. Burgos-Artizzu,M.R. Ronchi,P. Perona,,,,,,http://www.vision.caltech.edu/~mronchi/papers/ECCV14_FaceDistancePortrait_PAPER.pdf,"@incollection{perona2014PortraitDistanceEstimation,
- title={Distance Estimation of an Unknown Person from a Portrait},
- author={Xavier P. Burgos-Artizzu, Matteo Ruggero Ronchi and Pietro Perona},
- booktitle={Computer Vision--ECCV 2014},
- pages={313--327},
- year={2014},
- publisher={Springer}
-}
-",
-MORPH non-commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,"IEEE 7th International Conference on Automatic Face and Gesture Recognition, Southampton, UK",2006,341-345,,Karl Ricanek Jr,Tamirat Tesafaye,,,,,,,,,
-CK+,The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression,"Proceedings of the Third International Workshop on CVPR for Human Communicative Behavior Analysis (CVPR4HB 2010), San Francisco, USA",2010,94-101,,"Ambadar, Z.","Cohn, J.F.","Kanade, T.","Lucey, P.","Matthews, I.A.","Saragih, J.M.",,,http://ieeexplore.ieee.org/document/5543262/,"@article{Lucey2010TheEC,
- title={The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression},
- author={Patrick Lucey and Jeffrey F. Cohn and Takeo Kanade and Jason M. Saragih and Zara Ambadar and Iain A. Matthews},
- journal={2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops},
- year={2010},
- pages={94-101}
-}",
-LFWP,Localizing Parts of Faces Using a Consensus of Exemplars,Proceedings of the 24th IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2011,,,Peter N. Belhumeur,"David W. Jacobs,",David J. Kriegman,Neeraj Kumar,,,,,http://neerajkumar.org/projects/face-parts/base/papers/nk_cvpr2011_faceparts.pdf,,
-Adience,Age and Gender Estimation of Unfiltered Faces,"Transactions on Information Forensics and Security (IEEE-TIFS), special issue on Facial Biometrics in the Wild",2014,2170 - 2179,9,Eran Eidinger,Roee Enbar, Tal Hassner,,,,,,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf,,
-300-W,300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge,"Proceedings of IEEE Int’l Conf. on Computer Vision (ICCV-W), 300 Faces in-the-Wild Challenge (300-W). Sydney, Australia",2013,,,C. Sagonas,G. Tzimiropoulos,S. Zafeiriou,M. Pantic,,,,2,,,
-AFW,"Face detection, pose estimation and landmark localization in the wild","Computer Vision and Pattern Recognition (CVPR) Providence, Rhode Island,",2012,,,X. Zhu,D. Ramanan,,,,,,,http://www.ics.uci.edu/~xzhu/paper/face-cvpr12.pdf,,
-MsCeleb,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,European Conference on Computer Vision,2016,,,"Guo, Yandong","Zhang, Lei","Hu, Yuxiao","He, Xiaodong","Gao, Jianfeng",,,,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/08/MSCeleb-1M-a.pdf,"@INPROCEEDINGS { guo2016msceleb,
- author = {Guo, Yandong and Zhang, Lei and Hu, Yuxiao and He, Xiaodong and Gao, Jianfeng},
- title = {M{S}-{C}eleb-1{M}: A Dataset and Benchmark for Large Scale Face Recognition},
- booktitle = {European Conference on Computer Vision},
- year = {2016},
- organization={Springer}}",
-LAG,Large Age-Gap Face Verification by Feature Injection in Deep Networks,Pattern Recognition Letters,2017,36-42,90,Simone Bianco,,,,,,bianco2017large-age.pdf,,http://www.ivl.disco.unimib.it/activities/large-age-gap-face-verification/,,
-IMDB,DEX: Deep EXpectation of apparent age from a single image,IEEE International Conference on Computer Vision Workshops (ICCVW),Dec 2015,,,Rasmus Rothe,Radu Timofte,Luc Van Gool,,,,,2,,"@InProceedings{Rothe-ICCVW-2015,
- author = {Rasmus Rothe and Radu Timofte and Luc Van Gool},
- title = {DEX: Deep EXpectation of apparent age from a single image},
- booktitle = {IEEE International Conference on Computer Vision Workshops (ICCVW)},
- year = {2015},
- month = {December},
-}",
-IMDB,Deep expectation of real and apparent age from a single image without facial landmarks,International Journal of Computer Vision (IJCV),Jul 2016,,,Rasmus Rothe,Radu Timofte,Luc Van Gool,,,,,1,,"@article{Rothe-IJCV-2016,
- author = {Rasmus Rothe and Radu Timofte and Luc Van Gool},
- title = {Deep expectation of real and apparent age from a single image without facial landmarks},
- journal = {International Journal of Computer Vision (IJCV)},
- year = {2016},
- month = {July},
-}",
-UMD,UMDFaces: An Annotated Face Dataset for Training Deep Networks,Arxiv preprint,2016,,,Ankan Bansal,Anirudh Nanduri,Carlos D Castillo,Rajeev Ranjan,Rama Chellappa,,,1,https://arxiv.org/abs/1611.01484v2,"@article{bansal2016umdfaces,
- title={UMDFaces: An Annotated Face Dataset for Training Deep Networks},
- author={Bansal, Ankan and Nanduri, Anirudh and Castillo, Carlos D and Ranjan, Rajeev and Chellappa, Rama}
- journal={arXiv preprint arXiv:1611.01484v2},
- year={2016}
- }",
-LFW,Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments.,"University of Massachusetts, Amherst, Technical Report ",2007,07-49,,Gary B. Huang,Manu Ramesh,Tamara Berg,Erik Learned-Miller,,,,,http://vis-www.cs.umass.edu/lfw/lfw.pdf,,various citaton depending on various datasets provided. Citation used here was first one published in 2007
-CelebA,From Facial Parts Responses to Face Detection: A Deep Learning Approach,"in IEEE International Conference on Computer Vision (ICCV),",2015,,,S. Yang,P. Luo,C. C. Loy,X. Tang,,,,,https://arxiv.org/abs/1509.06451,"The following paper employed CelebA for face detection. (linked on the project website) @inproceedings{liu2015faceattributes,
- author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},
- title = {Deep Learning Face Attributes in the Wild},
- booktitle = {Proceedings of International Conference on Computer Vision (ICCV)},
- month = December,
- year = {2015}
-}",
-UMD,The Do's and Don'ts for CNN-based Face Verification,Arxiv preprint,2017,,,Ankan Bansal,Carlos Castillo,"Rajeev Ranjan,",Rama Chellappa,,,,2,https://arxiv.org/abs/1705.07426,"@article{bansal2017dosanddonts,
- title = {The Do's and Don'ts for CNN-based Face Verification},
- author = {Bansal, Ankan and Castillo, Carlos and Ranjan, Rajeev and Chellappa, Rama},
- journal = {arXiv preprint arXiv:1705.07426},
- year = {2017}
- }",
-Yale Face Database B,From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose,PAMI,2001,,,Athinodoros Georghiades,Peter Belhumeur,David Kriegman,,,,,,,,
-CAISA Webface,Learning Face Representation from Scratch,arXiv preprint arXiv:1411.7923.,2014,,,Dong Yi,Zhen Lei, Shengcai Liao,Stan Z. Li,,,,,https://arxiv.org/abs/1411.7923,,
diff --git a/datasets/citations-2.csv b/datasets/citations-2.csv
deleted file mode 100644
index ec11b2d0..00000000
--- a/datasets/citations-2.csv
+++ /dev/null
@@ -1,89 +0,0 @@
-Database Name,Title,Journal/Pub/Conference,Year,Pages,Volume,Author1,Author2,Author3,Author4,Author5,Author 6,PDF,Priority,URL,bibtex_reference_only,notes
-COFW,Robust face landmark estimation under occlusion ,"ICCV 2013, Sydney, Australia",2013,,,X. P. Burgos-Artizzu,P. Perona,P. Dollár,,,,,,http://www.vision.caltech.edu/%7Expburgos/papers/ICCV13%20Burgos-Artizzu.pdf,,
-Names and Faces,Names and Faces ,U.C. Berkeley Technical Report,Jan. 2007,,,Tamara L. Berg,Alexander C. Berg,Jaety Edwards,Michael Maire,Ryan White,"Yee Whye Teh, Erik Learned-Miller, David A. Forsyth",,1,http://www.cs.berkeley.edu/%7Eaberg/papers/journal_berg.pdf,,
-FaceTracer,FaceTracer: A Search Engine for Large Collections of Images with Faces,European Conference on Computer Vision (ECCV),2008,340-353,,N. Kumar,P. N. Belhumeur,S. K. Nayar,,,,,1,http://www1.cs.columbia.edu/CAVE/publications/pdfs/Kumar_ECCV08.pdf,,
-MIW,Automatic Facial Makeup Detection with Application in Face Recognition,"Proc. of 6th IAPR International Conference on Biometrics (ICB), (Madrid, Spain)",2013,,,C. Chen,A. Dantcheva,A. Ross,,,,,,https://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf,,
-SVW,Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis,"Proc. International Conference on Automatic Face and Gesture Recognition (FG 2015), Ljubljana, Slovenia",2015,,,Seyed Morteza Safdarnejad, Xiaoming Liu, Lalita Udpa, Brooks Andrus,"John Wood,",Dean Craven,,,http://cvlab.cse.msu.edu/pdfs/Morteza_FG2015.pdf," @inproceedings{ sports-videos-in-the-wild-svw-a-video-dataset-for-sports-analysis,
- author = { Seyed Morteza Safdarnejad and Xiaoming Liu and Lalita Udpa and Brooks Andrus and John Wood and Dean Craven },
- title = { Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis },
- booktitle = { Proc. International Conference on Automatic Face and Gesture Recognition },
- address = { Ljubljana, Slovenia },
- month = { May },
- year = { 2015 },
-} ",
-LFW-a,,,,,,,,,,,,,,,Comply with any instructions specified for the original LFW data set,
-Helen,Interactive Facial Feature Localization,ECCV,2012,,,Vuong Le,Jonathan Brandt,Zhe Lin,Lubomir Boudev,Thomas S. Huang,,,,http://www.ifp.illinois.edu/~vuongle2/helen/eccv2012_helen_final.pdf,,
-Caltech 10K Web Faces,Pruning Training Sets for Learning of Object Categories,Proc. IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2005,,,Anelia Angelova,Yaser Abu-Mostafa,Pietro Perona,,,,,,http://www.vision.caltech.edu/anelia/DataPruning/Angelova05DataPruning.pdf,This is a paper using the dataset (linked on the project website),
-FaceTracer,Face Swapping: Automatically Replacing Faces in Photographs,ACM Trans. on Graphics (also Proc. of ACM SIGGRAPH),2008,,,D. Bitouk,N. Kumar,S. Dhillon,P.N. Belhumeur,S. K. Nayar,,,2,http://www1.cs.columbia.edu/CAVE/publications/pdfs/Bitouk_SIGGRAPH08.pdf,,
-HRT Transgender Database,Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset,"In Proc. of IEEE Intl. Conf. on Biometrics: Theory, Applications, and Systems",2013,,,Gayathri Mahalingam,Karl Ricanek Jr.,,,,,,,https://pdfs.semanticscholar.org/b066/733d533250f4ddafd22c12456def7fa24f4c.pdf,,
-MUCT,The MUCT Landmarked Face Database,Pattern Recognition Association of South Africa,2010,,,,S. Milborrow,J. Morkel,F. Nicolls,,,,,http://www.milbo.org/muct/The-MUCT-Landmarked-Face-Database.pdf,"@article{Milborrow10,
- author={S. Milborrow and J. Morkel and F. Nicolls},
- title={{The MUCT Landmarked Face Database}},
- journal={Pattern Recognition Association of South Africa},
- year=2010,
- note={\url{http://www.milbo.org/muct}}
-}",
-AFLW,"Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization",,,,,Martin Koestinger,Paul Wohlhart,Peter M. Roth,Horst Bischof,,,,,https://files.icg.tugraz.at/seafhttp/files/d18813db-78c3-46a9-8614-bc0c8d428114/koestinger_befit_11.pdf,"@INPROCEEDINGS{koestinger11a,
- author = {Martin Koestinger, Paul Wohlhart, Peter M. Roth and Horst Bischof},
- title = {{Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization}},
- booktitle = {{Proc. First IEEE International Workshop on Benchmarking Facial Image Analysis Technologies}},
- year = {2011}
-} ",
-PIPA,Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues,arXiv:1501.05703 [cs.CV],2015,,,Ning Zhang, Manohar Paluri,Yaniv Taigman,Rob Fergus,Lubomir Bourdev,,,,https://arxiv.org/pdf/1501.05703.pdf,"@inproceedings{piper,
- Author = {Ning Zhang and Manohar Paluri and Yaniv Taigman and Rob Fergus and Lubomir Bourdev},
- Title = {Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues},
- Eprint = {arXiv:1501.05703},
- Year = {2015}}",
-UCF101,UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild,CRCV-TR-12-01,2012,,,"Soomro, K.","Roshan Zamir, A.","Shah, M.",,,,,2,,"@inproceedings{UCF101,
- author = {Soomro, K. and Roshan Zamir, A. and Shah, M.},
- booktitle = {CRCV-TR-12-01},
- title = {{UCF101}: A Dataset of 101 Human Actions Classes From
- Videos in The Wild},
- year = {2012}}",
-YMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,"Proc. of 5th IEEE International Conference on Biometrics: Theory, Applications and Systems (BTAS), (Washington DC, USA)",2012,,,A. Dantcheva,C. Chen,A. Ross,,,,,1,https://www.cse.msu.edu/~rossarun/pubs/DantchevaChenRossFaceCosmetics_BTAS2012.pdf,,
-FDDB,FDDB: A Benchmark for Face Detection in Unconstrained Settings,"Technical Report UM-CS-2010-009, Dept. of Computer Science, University of Massachusetts",2010,,,Vidit Jain,Erik Learned-Mille,,,,,,,http://vis-www.cs.umass.edu/fddb/fddb.pdf,"@TechReport{fddbTech,
- author = {Vidit Jain and Erik Learned-Miller},
- title = {FDDB: A Benchmark for Face Detection in Unconstrained Settings},
- institution = {University of Massachusetts, Amherst},
- year = {2010},
- number = {UM-CS-2010-009}
- }",
-Hipsterwars,Hipster Wars: Discovering Elements of Fashion Styles.,In European Conference on Computer Vision,2014,,,M. Hadi Kiapour,Kota Yamaguchi,Alexander C. Berg,Tamara L. Berg,,,,,http://tamaraberg.com/papers/hipster_eccv14.pdf,"@inproceedings{
- HipsterWarsECCV14,
- title = {Hipster Wars: Discovering Elements of Fashion Styles}
- author = {M. Hadi Kiapour, Kota Yamaguchi, Alexander C. Berg, Tamara L. Berg},
- booktitle={European Conference on Computer Vision},
- year = {2014}
- }",
-LAG,Large Age-Gap Face Verification by Feature Injection in Deep Networks,In Pattern Recognition Letters,2017,36-42,90,Simone Bianco,,,,,,,,http://www.ivl.disco.unimib.it/download/bianco2017large-age.pdf,,
-CMDP,Distance Estimation of an Unknown Person from a Portrait,ECCV 2014,2014,,,X. P. Burgos-Artizzu,M.R. Ronchi,P. Perona,,,,,,,,
-Columbia Gaze Data Set,Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction,ACM Symposium on User Interface Software and Technology (UIST),2013,271-280,,B.A. Smith,Q. Yin,S.K. Feiner,S.K. Nayar,,,,,http://www.cs.columbia.edu/~brian/publications/gaze_locking.html,,
-IJB-A,"Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A
-",Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition,2015,1931-1939,07-12-June-2015,"Klare, B. F.","Klein, B.","Taborsky, E.","Blanton, A.","Cheney, J.","Allen, K., ... Jain, A. K.",,,http://ieeexplore.ieee.org/document/7298803/,"DOI: 10.1109/CVPR.2015.7298803 @inbook{882e95bdca414797b4a8e2bfcb5b1fa4,
-title = ""Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A"",
-abstract = ""Rapid progress in unconstrained face recognition has resulted in a saturation in recognition accuracy for current benchmark datasets. While important for early progress, a chief limitation in most benchmark datasets is the use of a commodity face detector to select face imagery. The implication of this strategy is restricted variations in face pose and other confounding factors. This paper introduces the IARPA Janus Benchmark A (IJB-A), a publicly available media in the wild dataset containing 500 subjects with manually localized face images. Key features of the IJB-A dataset are: (i) full pose variation, (ii) joint use for face recognition and face detection benchmarking, (iii) a mix of images and videos, (iv) wider geographic variation of subjects, (v) protocols supporting both open-set identification (1:N search) and verification (1:1 comparison), (vi) an optional protocol that allows modeling of gallery subjects, and (vii) ground truth eye and nose locations. The dataset has been developed using 1,501,267 million crowd sourced annotations. Baseline accuracies for both face detection and face recognition from commercial and open source algorithms demonstrate the challenge offered by this new unconstrained benchmark."",
-author = ""Klare, {Brendan F.} and Ben Klein and Emma Taborsky and Austin Blanton and Jordan Cheney and Kristen Allen and Patrick Grother and Alan Mah and Mark Burge and Jain, {Anil K.}"",
-year = ""2015"",
-month = ""10"",
-doi = ""10.1109/CVPR.2015.7298803"",
-isbn = ""9781467369640"",
-volume = ""07-12-June-2015"",
-pages = ""1931--1939"",
-booktitle = ""Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition"",
-publisher = ""IEEE Computer Society"",
-
-}
-",
-AgeDB,"AgeDB: the first manually collected, in-the-wild age database",Proceedings of IEEE Int’l Conf. on Computer Vision and Pattern Recognition (CVPR-W 2017,2017,,,S. Moschoglou,A. Papaioannou,C. Sagonas,J. Deng,I. Kotsia, S. Zafeiriou,,,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"@inproceedings{AgeDB,
- author = {S. Moschoglou and A. Papaioannou and C. Sagonas and J. Deng and I. Kotsia and S. Zafeiriou},
- address = {Honolulu, Hawaii},
- booktitle = {Proceedings of IEEE Int’l Conf. on Computer Vision and Pattern Recognition (CVPR-W 2017)},
- month = {June},
- title = {AgeDB: the first manually collected, in-the-wild age database},
- year = {2017},
-}",
-WIDER FACE,WIDER FACE: A Face Detection Benchmark,IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016,,,"Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou",,,,,,,,,"@inproceedings{yang2016wider,
- Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
- Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
- Title = {WIDER FACE: A Face Detection Benchmark},
- Year = {2016}}",
diff --git a/datasets/citations-3.csv b/datasets/citations-3.csv
deleted file mode 100644
index 57db254d..00000000
--- a/datasets/citations-3.csv
+++ /dev/null
@@ -1,51 +0,0 @@
-Database Name,Title,Journal/Pub/Conference,Year,Pages,Volume,Author1,Author2,Author3,Author4,Author5,Author 6,PDF,Priority,URL,bibtex_reference_only,notes
-imSitu,Situation Recognition: Visual Semantic Role Labeling for Image Understanding,"(1) Computer Science & Engineering, University of Washington, Seattle, WA
-(2) Allen Institute for Artificial Intelligence (AI2), Seattle, WA",,,,Mark Yatskar,Luke Zettlemoyer,Ali Farhadi,,,,,,https://homes.cs.washington.edu/~my89/publications/situations.pdf,,
-JAFFE,Coding Facial Expressions with Gabor Wavelets,3rd IEEE International Conference on Automatic Face and Gesture Recognition,1998,200-205,,Michael J. Lyons,Shigeru Akemastu,Miyuki Kamachi,Jiro Gyoba,,,,,http://www.kasrl.org/fg98-1.pdf,,
-UCF101,THUMOS Challenge: Action Recognition with a Large Number of Classes,,2015,,,"Gorban, A.","Idrees, H.","Jiang, Y.-G.","Roshan Zamir, A.","Laptev, I.","Shah, M. and Sukthankar, R.",,1,http://www.thumos.info/,"@misc{THUMOS15,
- author = ""Gorban, A. and Idrees, H. and Jiang, Y.-G. and Roshan Zamir, A. and Laptev,
- I. and Shah, M. and Sukthankar, R."",
- title = ""{THUMOS} Challenge: Action Recognition with a Large
- Number of Classes"",
- howpublished = ""\url{http://www.thumos.info/}"",
- Year = {2015}}",
-IMFDB,Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations,"National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)",2013,,,Shankar Setty,et al,,,,,,,http://cvit.iiit.ac.in/projects/IMFDB/imfdb.pdf,"@InProceedings{imfdb,
-author = {Shankar Setty, Moula Husain, Parisa Beham, Jyothi Gudavalli, Menaka Kandasamy, Radhesyam Vaddi, Vidyagouri Hemadri, J C Karure, Raja Raju, Rajan, Vijay Kumar and C V Jawahar},
-title = {{I}ndian {M}ovie {F}ace {D}atabase: {A} {B}enchmark for {F}ace {R}ecognition {U}nder {W}ide {V}ariations},
-booktitle = {National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)},
-month = {Dec},
-year = {2013}
-} ",
-LFW-a,Effective Face Recognition by Combining Multiple Descriptors and Learned Background Statistics,"IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 33(10),",2011,,,Lior Wolf,Tal Hassner,Yaniv Taigman,,,,,,http://www.openu.ac.il/home/hassner/projects/Patchlbp/WolfHassnerTaigman_TPAMI11.pdf,,
-MORPH commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,"IEEE 7th International Conference on Automatic Face and Gesture Recognition, Southampton, UK",2006,341-345,,Karl Ricanek Jr,Tamirat Tesafaye,,,,,,,,,
-SCUT-FBP,SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception,arXiv:1511.02459 [cs.CV],2015,,,Duorui Xie,Lingyu Liang,Lianwen Jin,Jie Xu,Mengru Li,,,,https://arxiv.org/ftp/arxiv/papers/1511/1511.02459.pdf,,
-Names and Faces,Names and Faces in the News,"Computer Vision and Pattern Recognition (CVPR), Washington D.C.",2004,848-854,,Tamara L. Berg,Alexander C. Berg,Jaety Edwards,Michael Maire,Ryan White,"Yee Whye Teh, Erik Learned-Miller, David A. Forsyth",,3,http://www.cs.berkeley.edu/%7Eaberg/papers/berg_names_and_faces.pdf,,
-FaceScrub,A data-driven approach to cleaning large face datasets,Proc. IEEE International Conference on Image Processing (ICIP),2014,,,H.-W. Ng,S. Winkler,,,,,,,,,
-UCF Selfie,"How to Take a Good Selfie?, in Proceedings of ACM Multimedia Conference 2015 (ACMMM 2015), Brisbane, Australia",,2015,,,Mahdi M. Kalayeh,Misrak Seifu,Wesna LaLanne,Mubarak Shah,,,,,,,
-300-W,A semi-automatic methodology for facial landmark annotation,"Proceedings of IEEE Int’l Conf. Computer Vision and Pattern Recognition (CVPR-W), 5th Workshop on Analysis and Modeling of Faces and Gestures (AMFG 2013). Oregon, USA,",2013,,,C. Sagonas,G. Tzimiropoulos,S. Zafeiriou,M. Pantic,,,,3,,,
-YMU,Automatic Facial Makeup Detection with Application in Face Recognition,"Proc. of 6th IAPR International Conference on Biometrics (ICB), (Madrid, Spain)",2013,,,C. Chen,A. Dantcheva,A. Ross,,,,,2,https://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf,,
-WIDER,Recognize Complex Events from Static Images by Fusing Deep Channels,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015,,,"Xiong, Yuanjun and Zhu, Kai and Lin, Dahua and Tang, Xiaoou",,,,,,,,,,
-YaleFaces,Eigenfaces vs. fisherfaces: Recognition using class specific linear projection,"IEEE Transactions on Pattern Analysis and Machine Intelligence, Special Issue on Face Recognition",1997,711--720,17(7),P. N. Bellhumer,J. Hespanha,D. Kriegman,,,,,,,,
-PubFig,Attribute and Simile Classifiers for Face Verification,International Conference on Computer Vision (ICCV),2009,,,Neeraj Kumar,Alexander C. Berg,Peter N. Belhumeur,Shree K. Nayar,,,,,http://www.cs.columbia.edu/CAVE/publications/pdfs/Kumar_ICCV09.pdf,,
-Face Research Lab London Set,Face Research Lab London Set. figshare,,2017,,,"DeBruine, Lisa","Jones, Benedict",,,,,,,https://doi.org/10.6084/m9.figshare.5047666.v3,,
-MALF,Fine-grained Evaluation on Face Detection in the Wild.,Proceedings of the 11th IEEE International Conference on Automatic Face and Gesture Recognition Conference and Workshops.,2015,,,Bin Yang*,Junjie Yan*,Zhen Lei,Stan Z. Li,,,,,http://www.cbsr.ia.ac.cn/faceevaluation/faceevaluation15.pdf,"@inproceedings{faceevaluation15,
-title={Fine-grained Evaluation on Face Detection in the Wild},
-author={Yang, Bin and Yan, Junjie and Lei, Zhen and Li, Stan Z},
-booktitle={Automatic Face and Gesture Recognition (FG), 11th IEEE International
-Conference on},
-year={2015},
-organization={IEEE}
-}",
-FaceScrub,A data-driven approach to cleaning large face datasets,"Proc. IEEE International Conference on Image Processing (ICIP), Paris, France",2014,,,H.-W. Ng,S. Winkler,,,,,,,http://vintage.winklerbros.net/Publications/icip2014a.pdf,,
-VMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,"Proc. of 5th IEEE International Conference on Biometrics: Theory, Applications and Systems (BTAS), (Washington DC, USA)",2012,,,A. Dantcheva,C. Chen,A. Ross,,,,,,https://www.cse.msu.edu/~rossarun/pubs/DantchevaChenRossFaceCosmetics_BTAS2012.pdf,,
-300-W,300 faces In-the-wild challenge: Database and results,"Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation ""In-The-Wild""",2016,,,C. Sagonas,E. Antonakos,"G, Tzimiropoulos",S. Zafeiriou,M. Pantic,,,1,,,
-CK,Comprehensive Database for Facial Expression Analysis,"Proceedings of the Fourth IEEE International Conferenc
-e on Automatic Face and Gesture Recognition
-(FG'00)
-",2000,484-490,,"Kanade, T.","Cohn, J. F.","Tian, Y.",,,,,,http://www.pitt.edu/~jeffcohn/biblio/Cohn-Kanade_Database.pdf,,
-MegaFace 2,Level Playing Field for Million Scale Face Recognition,IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017,,,"Nech, Aaron","Kemelmacher-Shlizerman, Ira",,,,,,If you're participating or using data from Challenge 2 please cite:,https://homes.cs.washington.edu/~kemelmi/ms.pdf,"@inproceedings{nech2017level,
-title={Level Playing Field For Million Scale Face Recognition},
-author={Nech, Aaron and Kemelmacher-Shlizerman, Ira},
-booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
-year={2017}
-}",
diff --git a/datasets/citations.csv b/datasets/citations.csv
deleted file mode 120000
index a2ab42cc..00000000
--- a/datasets/citations.csv
+++ /dev/null
@@ -1 +0,0 @@
-citations-2018310.csv \ No newline at end of file
diff --git a/faiss/requirements.txt b/faiss/requirements.txt
new file mode 100644
index 00000000..1d60aabc
--- /dev/null
+++ b/faiss/requirements.txt
@@ -0,0 +1,11 @@
+Pillow
+h5py
+tensorflow
+Keras
+Flask
+opencv-python
+imagehash
+scikit-image
+scikit-learn
+imutils
+
diff --git a/faiss/run.sh b/faiss/run.sh
new file mode 100644
index 00000000..8f9e77e2
--- /dev/null
+++ b/faiss/run.sh
@@ -0,0 +1 @@
+uwsgi --http 127.0.0.1:5000 --file wsgi.py --callable app --processes 1
diff --git a/faiss/static/css/app.css b/faiss/static/css/app.css
new file mode 100644
index 00000000..a3b24736
--- /dev/null
+++ b/faiss/static/css/app.css
@@ -0,0 +1,289 @@
+/* css boilerplate */
+
+* { box-sizing: border-box; }
+html,body {
+ margin: 0; padding: 0;
+ width: 100%; height: 100%;
+}
+body {
+ font-family: Helvetica, sans-serif;
+ font-weight: 300;
+ padding-top: 60px;
+}
+
+/* header */
+
+header {
+ position: fixed;
+ top: 0;
+ left: 0;
+ height: 60px;
+ width: 100%;
+ background: #11f;
+ color: white;
+ align-items: stretch;
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-between;
+ z-index: 3;
+}
+header > section {
+ justify-content: flex-start;
+ align-items: center;
+ display: flex;
+ flex: 1 0;
+ font-weight: bold;
+}
+header > section:last-of-type {
+ justify-content: flex-end;
+}
+header a {
+ color: hsla(0,0%,100%,.89);
+ text-decoration: none;
+ line-height: 18px;
+ font-size: 14px;
+ font-weight: 700;
+ padding: .35rem .4rem;
+ white-space: nowrap;
+}
+header .logged-in {
+ font-size: 12px;
+ font-weight: normal;
+ padding: 0 0.5rem;
+}
+header .logout {
+ padding: 0 6px;
+ border-left: 1px solid #99f;
+}
+header .logout a {
+ font-size: 12px;
+}
+.menuToggle {
+ width: 30px;
+ height: 30px;
+ margin: 5px;
+ cursor: pointer;
+ line-height: 1;
+}
+
+/* form at the top */
+
+#form {
+ display: flex;
+ flex-direction: row;
+ justify-content: space-between;
+ align-items: center;
+ margin: 20px;
+ padding: 20px;
+ border: 1px solid #ddd;
+}
+input[type=text] {
+ border: 1px solid #888;
+ padding: 4px;
+ font-size: 15px;
+}
+input[type=file] {
+ max-width: 200px;
+ border-radius: 2px;
+}
+input[type=file]:invalid + button { visibility: hidden!important; }
+input[type=file]:valid + button { visibility: visible!important; }
+#form > div {
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+}
+#form > div * {
+ margin: 0 3px;
+}
+
+/* saving UI form */
+
+label {
+ display: block;
+ white-space: nowrap;
+ padding-bottom: 10px;
+}
+label:last-child {
+ padding-bottom: 0;
+}
+label span {
+ display: inline-block;
+ min-width: 80px;
+}
+.saving_ui {
+ display: none;
+}
+.saving .saving_ui {
+ display: flex;
+ border: 1px solid #ddd;
+ margin: 20px;
+ padding: 20px;
+ flex-direction: row;
+ justify-content: space-between;
+}
+
+/* query box, shows either searched image, directory name, etc */
+
+.loading .results,
+.prefetch .query, .prefetch .results,
+.browsing .score, .browsing .browse,
+.photo .browse,
+.saving .score {
+ display: none;
+}
+.browsing .query div { display: inline; margin-left: 5px; font-weight: bold; }
+.saving .query div { display: inline; margin-left: 5px; font-weight: bold; }
+.load_message {
+ opacity: 0;
+}
+.loading .load_message {
+ display: block;
+ margin: 20px;
+ font-weight: bold;
+}
+
+.query {
+ margin: 20px;
+}
+.query > div {
+ margin-top: 10px;
+ position: relative;
+ display: flex;
+ flex-direction: row;
+ align-items: flex-start;
+}
+.query img {
+ cursor: crosshair;
+ max-width: 400px;
+ display: block;
+}
+.query > div > .box {
+ position: absolute;
+ border: 1px solid #11f;
+ background: rgba(17,17,255,0.1);
+ pointer-events: none;
+}
+.query canvas {
+ margin-left: 20px;
+ max-width: 200px;
+}
+
+/* search results */
+
+.results {
+ display: flex;
+ flex-direction: row;
+ flex-wrap: wrap;
+}
+.results > div {
+ display: flex;
+ flex-direction: column;
+ justify-content: flex-end;
+ width: 210px;
+ margin: 15px;
+ padding: 5px;
+ border: 1px solid transparent;
+}
+.results > div.saved {
+ border-radius: 2px;
+ background: #fafaaa;
+}
+.results > div img {
+ cursor: pointer;
+ max-width: 210px;
+ margin-bottom: 10px;
+}
+.results > div > div {
+ display: flex;
+ flex-direction: row;
+ justify-content: space-between;
+ align-items: center;
+}
+.results a:visited .btn {
+ color: #99d;
+}
+.score {
+ font-size: 12px;
+ color: #444;
+}
+
+
+/* spinner */
+
+.loader {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ position: absolute;
+ top: 0; left: 0;
+ width: 100%; height: 100%;
+ background: rgba(255,255,255,0.9);
+}
+.loader > div {
+ background: white;
+ padding: 20px;
+ box-shadow: 0 1px 2px #bbb;
+ border-radius: 2px;
+}
+.spinner {
+ position: relative;
+ width: 32px;
+ height: 32px;
+ color: #11f;
+ margin: 0 auto;
+}
+.spinner:after {
+ position: absolute;
+ margin: auto;
+ width: 100%;
+ height: 100%;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ content: " ";
+ display: inline-block;
+ border-radius: 50%;
+ border-style: solid;
+ border-width: 0.15em;
+ -webkit-background-clip: padding-box;
+ border-color: currentColor currentColor currentColor transparent;
+ box-sizing: border-box;
+ -webkit-animation: ld-cycle 0.7s infinite linear;
+ animation: ld-cycle 0.7s infinite linear;
+}
+@-webkit-keyframes ld-cycle {
+ 0%, 50%, 100% {
+ animation-timing-function: cubic-bezier(0.5, 0.5, 0.5, 0.5);
+ }
+ 0% {
+ -webkit-transform: rotate(0);
+ transform: rotate(0);
+ }
+ 50% {
+ -webkit-transform: rotate(180deg);
+ transform: rotate(180deg);
+ }
+ 100% {
+ -webkit-transform: rotate(360deg);
+ transform: rotate(360deg);
+ }
+}
+@keyframes ld-cycle {
+ 0%, 50%, 100% {
+ animation-timing-function: cubic-bezier(0.5, 0.5, 0.5, 0.5);
+ }
+ 0% {
+ -webkit-transform: rotate(0);
+ transform: rotate(0);
+ }
+ 50% {
+ -webkit-transform: rotate(180deg);
+ transform: rotate(180deg);
+ }
+ 100% {
+ -webkit-transform: rotate(360deg);
+ transform: rotate(360deg);
+ }
+}
diff --git a/faiss/static/favicon.ico b/faiss/static/favicon.ico
new file mode 100644
index 00000000..d97f2f59
--- /dev/null
+++ b/faiss/static/favicon.ico
Binary files differ
diff --git a/faiss/static/img/play.png b/faiss/static/img/play.png
new file mode 100644
index 00000000..40f76045
--- /dev/null
+++ b/faiss/static/img/play.png
Binary files differ
diff --git a/faiss/static/index.html b/faiss/static/index.html
new file mode 100644
index 00000000..cf59c628
--- /dev/null
+++ b/faiss/static/index.html
@@ -0,0 +1,83 @@
+<!doctype html>
+<html>
+<head>
+<meta charset="UTF-8" />
+<base href='/search/' />
+<link rel="stylesheet" href="static/css/app.css">
+<link rel="shortcut icon" href="static/favicon.ico" />
+<title>VFrame Image Import</title>
+</head>
+<body class='prefetch'>
+
+<header>
+ <section class="navbar-section">
+ <a href="/"><img class="menuToggle" alt='logo' src="static/css/vframe-logo.png" /></a>
+ <a href="/categories/">Categories</a>
+ <a href="/groups/user/">Assignments</a>
+ <a href="/images/new/">Add Image</a>
+ <a href="/search/">Search</a>
+ </section>
+
+ <section class="navbar-section last-navbar-section">
+ <span class="menu-help"><a href="/static/explore/treemap.html">Explore Data</a></span>
+ <span class="menu-help"><a href="/help/">Help</a></span>
+ <span class="login-out logged-in"><span class="capitalize"></span></span>
+ <span class="logout login-out"><a href="/accounts/logout/">Logout</a></span>
+ </section>
+</header>
+
+<div class="container">
+
+<div id="form">
+ <div>
+ <input type="file" name="img" accept="image/*" required>
+ <button class='btn upload_again'>Upload Again</button>
+ </div>
+ <div class="general_ui">
+ <button class='btn panic'>Panic</button>
+ <button class='btn random'>Random</button>
+ <button class='btn view_saved'>View Saved</button>
+ </div>
+</div>
+
+<div class="saving_ui">
+ <div>
+ <label><span>Title</span> <input type="text" name="title" placeholder="Enter a title"></label>
+ <label><span></span><input type="checkbox" name="graphic"> Graphic content</label>
+ </label>
+ <label><span></span>
+ <button class='btn create_new_group'>Create New Group</button>
+ <button class='btn check'>Check Duplicates</button>
+ <button class='btn reset'>Clear Selection</button>
+ </label>
+ </div>
+</div>
+
+<div class="query">
+ <span class='msg'></span>
+ <div></div>
+</div>
+
+<div class="results">
+</div>
+
+
+</div>
+<script type="text/html" id="result-template">
+ <div class='{className}'>
+ <img src="{img}" crossorigin="anonymous">
+ <div>
+ <div class='score'>{score}</div>
+ <a href='{metadata}'><button class='btn metadata'>Info</button></a>
+ <a href='{browse}'><button class='btn browse'>Expand</button></a>
+ <a href='{search}'><button class='btn search'>Search</button></a>
+ </div>
+ </div>
+</script>
+</body>
+<script src="static/js/store2.min.js"></script>
+<script src="static/js/dataUriToBlob.js"></script>
+<script src="static/js/app.js"></script>
+</html>
+
+
diff --git a/faiss/static/js/app.js b/faiss/static/js/app.js
new file mode 100644
index 00000000..77164c76
--- /dev/null
+++ b/faiss/static/js/app.js
@@ -0,0 +1,491 @@
+/* eslint no-use-before-define: 0, camelcase: 0, one-var-declaration-per-line: 0, one-var: 0, quotes: 0, prefer-destructuring: 0, no-alert: 0, no-console: 0, no-multi-assign: 0 */
+
+function loadApp() {
+ const result_template = document.querySelector('#result-template').innerHTML
+ const results_el = document.querySelector('.results')
+ const query_div = document.body.querySelector('.query > div')
+ let bounds
+ let token, username
+ let x, y, mouse_x, mouse_y, dx, dy, box
+ let dragging = false
+ let cropping = false
+ let creating = false
+ let did_check = false
+
+ function init() {
+ login()
+ bind()
+ route()
+ }
+ function bind() {
+ window.onpopstate = route
+ document.querySelector('[name=img]').addEventListener('change', upload)
+ on('click', '.results a', preventDefault)
+ on('click', '.search', search)
+ on('click', '.panic', panic)
+ on('click', '.upload_again', upload_again)
+ on('click', '.browse', browse)
+ on('click', '.results img', save)
+ on('click', '.view_saved', loadSaved)
+ on('click', '.create_new_group', createNewGroup)
+ on('click', '.reset', reset)
+ on('click', '.random', random)
+ on('click', '.check', check)
+ on('mousedown', '.query img', down)
+ window.addEventListener('mousemove', move)
+ window.addEventListener('mouseup', up)
+ window.addEventListener('keydown', keydown)
+ }
+ function route() {
+ const path = window.location.pathname.split('/')
+ // remove initial slash
+ path.shift()
+ // remove dummy route
+ if (path[0] === 'search') path.shift()
+ switch (path[0]) {
+ case 'fetch':
+ search({ target: { url: window.location.search.substr(1).split('=')[1] } })
+ break
+ case 'view':
+ search(path.slice(1))
+ break
+ case 'q':
+ if (path.length === 3) {
+ search({ target: { dir: path[1], fn: path[2] } })
+ } else {
+ browse({ target: { dir: path[1], fn: null } })
+ }
+ break
+ case 'saved':
+ loadSaved()
+ break
+ default:
+ break
+ }
+ }
+ function keydown(e) {
+ switch (e.keyCode) {
+ case 27: // escape
+ panic()
+ break
+ default:
+ break
+ }
+ }
+
+ // load search results
+ function loadResults(data) {
+ console.log(data)
+ if (!data.query.url) return
+ // console.log(data)
+ document.body.className = 'searching'
+ const path = getPathFromImage(data.query.url)
+ pushState('searching', "/search/fetch/?url=" + path.url)
+ if (path.dir === 'uploaded' && path.fn.match('_filename')) {
+ loadMessage(
+ "<a href='javascript:history.go(-1)'>&lt; Back</a> | "
+ + "Searching subregion, "
+ + "found " + data.results.length + " images"
+ )
+ } else {
+ loadMessage(
+ "Found " + data.results.length + " images"
+ )
+ }
+ loadQuery(data.query.url)
+ if (!data.results.length) {
+ results_el.innerHTML = "No results"
+ return
+ }
+ const saved = window.store.get('saved', [])
+
+ results_el.innerHTML = data.results.map(res => {
+ const { distance, file, hash, frame, url } = res
+ const isSaved = saved.indexOf(url) !== -1
+ const { type } = getPathFromImage(url)
+ let className = isSaved ? 'saved' : ''
+ className += ' ' + type
+ let t = result_template
+ .replace('{score}', Math.floor(clamp(1 - distance, 0, 1) * 100) + "%")
+ .replace('{browse}', '/search/q/' + hash)
+ .replace('{search}', '/search/view/' + [file, hash, frame].join('/'))
+ .replace('{metadata}', '/metadata/' + hash)
+ .replace('{className}', className)
+ .replace('{saved_msg}', isSaved ? 'Saved' : 'Save')
+ .replace('{img}', url)
+ return t
+ }).join('')
+ }
+
+ function loadDirectory(data) {
+ console.log(data)
+ document.body.className = 'browsing'
+ pushState('searching', "/search/q/" + data.path)
+ loadMessage("Video: <b>" + data.path + "</b>")
+ loadQuery("")
+ if (!data.results.length) {
+ results_el.innerHTML = "No frames found"
+ return
+ }
+ const saved = window.store.get('saved', [])
+ results_el.innerHTML = data.results
+ .map(result => [parseInt(result.frame, 10), result])
+ .sort((a, b) => a[0] - b[0])
+ .map(pair => {
+ let { file, hash, frame, url } = pair[1]
+ const isSaved = saved.indexOf(url) !== -1
+ let className = isSaved ? 'saved' : ''
+ let t = result_template
+ .replace('{img}', url)
+ .replace('{browse}', '/search/q/' + hash)
+ .replace('{search}', '/search/view/' + [file, hash, frame].join('/'))
+ .replace('{metadata}', '/metadata/' + hash)
+ .replace('{className}', className)
+ .replace('{saved_msg}', isSaved ? 'Saved' : 'Save')
+ return t
+ }).join('')
+ }
+ function loadSaved() {
+ document.body.className = 'saving'
+ pushState('View saved', "/search/saved")
+ const saved = window.store.get('saved', [])
+ cropping = false
+ loadMessage(saved.length + " saved image" + (saved.length === 1 ? "" : "s"))
+ loadQuery('')
+ const box_el = document.querySelector('.box')
+ if (box_el) box_el.parentNode.removeChild(box_el)
+ results_el.innerHTML = saved.map(href => {
+ const { url, dir } = getPathFromImage({ src: href })
+ let className = 'saved'
+ let t = result_template
+ .replace('{img}', href)
+ .replace('{browse}', '/search/q/' + dir)
+ .replace('{search}', '/search/fetch/?url=' + url)
+ .replace('{metadata}', '/metadata/' + dir)
+ .replace('{className}', className)
+ .replace('{saved_msg}', 'Saved')
+ return t
+ }).join('')
+ }
+ function loadQuery(path) {
+ if (cropping) return
+ const qd = document.querySelector('.query div')
+ qd.innerHTML = ''
+ if (path.match(/(gif|jpe?g|png)$/)) {
+ const img = new Image()
+ img.setAttribute('crossorigin', 'anonymous')
+ img.src = path.replace('sm', 'md')
+ qd.appendChild(img)
+ } else {
+ qd.innerHTML = path || ""
+ }
+ }
+ function loadMessage(msg) {
+ document.querySelector('.query .msg').innerHTML = msg
+ }
+
+ // panic button
+ function panic() {
+ loadMessage('Query cleared')
+ loadQuery('')
+ results_el.innerHTML = ''
+ }
+
+ // adding stuff to localstorage
+ function save(e) {
+ const { url } = getPathFromImage(e.target)
+ const saved = window.store.get('saved', [])
+ let newList = saved || []
+ if (saved.indexOf(url) !== -1) {
+ newList = saved.filter(f => f !== url)
+ e.target.parentNode.classList.remove('saved')
+ } else {
+ newList.push(url)
+ e.target.parentNode.classList.add('saved')
+ }
+ window.store.set('saved', newList)
+ }
+ function reset() {
+ const shouldReset = window.confirm("This will reset the saved images. Are you sure?")
+ if (!shouldReset) return
+ window.store.set('saved', [])
+ loadSaved()
+ document.querySelector('[name=title]').value = ''
+ window.alert("Reset saved images")
+ }
+
+ // submit the new group
+ function createNewGroup() {
+ const title = document.querySelector('[name=title]').value.trim().replace(/[^-_a-zA-Z0-9 ]/g, "")
+ const saved = window.store.get('saved', [])
+ const graphic = document.querySelector('[name=graphic]').checked
+ if (!title.length) return alert("Please enter a title for this group")
+ if (!saved.length) return alert("Please pick some images to save")
+ if (!did_check) {
+ alert('Automatically checking for duplicates. Please doublecheck your selection.')
+ return check()
+ }
+ if (creating) return null
+ creating = true
+ return http_post("/api/images/import/new/", {
+ title,
+ graphic,
+ saved
+ }).then(res => {
+ console.log(res)
+ window.store.set('saved', [])
+ window.location.href = '/groups/show/' + res.image_group.id
+ }).catch(res => {
+ alert('Error creating group. The server response is logged to the console.')
+ console.log(res)
+ creating = false
+ })
+ }
+
+ // api queries
+ function login() {
+ const isLocal = (window.location.hostname === '0.0.0.0')
+ try {
+ // csrftoken = "test" // getCookie('csrftoken')
+ const auth = JSON.parse(window.store.get('persist:root').auth)
+ token = auth.token
+ username = auth.user.username
+ if (!token && !isLocal) {
+ window.location.href = '/'
+ }
+ } catch (e) {
+ if (!isLocal) {
+ window.location.href = '/'
+ }
+ }
+ document.querySelector('.logged-in .capitalize').innerHTML = username || 'user'
+ }
+
+ function upload(e) {
+ cropping = false
+ const files = e.dataTransfer ? e.dataTransfer.files : e.target.files
+ let i, f
+ for (i = 0, f; i < files.length; i++) {
+ f = files[i]
+ if (f && f.type.match('image.*')) break
+ }
+ if (!f) return
+ do_upload(f)
+ }
+
+ function do_upload(f) {
+ const fd = new FormData()
+ fd.append('query_img', f)
+ document.body.className = 'loading'
+ http_post('/search/api/upload', fd).then(loadResults)
+ }
+
+ function upload_again() {
+ const { files } = document.querySelector('input[type=file]')
+ if (!files.length) {
+ window.alert('Please upload a file.')
+ return
+ }
+ upload({
+ dataTransfer: { files }
+ })
+ }
+
+ function search(e) {
+ if (e.length) return search_by_vector(e)
+ const { url } = getPath(e.target)
+ cropping = false
+ document.body.className = 'loading'
+ loadQuery(url)
+ loadMessage('Loading results...')
+ http_get('/search/api/fetch/?url=' + url).then(loadResults)
+ }
+
+ function search_by_vector(e) {
+ cropping = false
+ document.body.className = 'loading'
+ loadQuery('')
+ loadMessage('Loading results...')
+ http_get('/search/api/search/' + e.join('/')).then(loadResults)
+ }
+
+ function browse(e) {
+ document.body.className = 'loading'
+ cropping = false
+ let dir;
+ if (e.target.dir) {
+ dir = e.target.dir
+ }
+ else {
+ const href = e.target.parentNode.href
+ dir = href.split('/')[5]
+ console.log(href, dir)
+ }
+ loadMessage('Listing video...')
+ http_get('/search/api/list/' + dir).then(loadDirectory)
+ }
+
+ function check() {
+ http_post('/api/images/import/search/', {
+ saved: window.store.get('saved') || [],
+ }).then(res => {
+ console.log(res)
+ const { good, bad } = res
+ did_check = true
+ window.store.set('saved', good)
+ if (!bad.length) {
+ return alert("No duplicates found.")
+ }
+ bad.forEach(path => {
+ const el = document.querySelector('img[src="' + path + '"]')
+ if (el) el.parentNode.classList.remove('saved')
+ })
+ return alert("Untagged " + bad.length + " duplicate" + (bad.length === 1 ? "" : "s") + ".")
+ })
+ }
+
+ function random() {
+ http_get('/search/api/random').then(loadResults)
+ }
+
+ // drawing a box
+ function down(e) {
+ e.preventDefault()
+ dragging = true
+ bounds = query_div.querySelector('img').getBoundingClientRect()
+ mouse_x = e.pageX
+ mouse_y = e.pageY
+ x = mouse_x - bounds.left
+ y = mouse_y - bounds.top
+ dx = dy = 0
+ box = document.querySelector('.box') || document.createElement('div')
+ box.className = 'box'
+ box.style.left = x + 'px'
+ box.style.top = y + 'px'
+ box.style.width = 0 + 'px'
+ box.style.height = 0 + 'px'
+ query_div.appendChild(box)
+ }
+ function move(e) {
+ if (!dragging) return
+ e.preventDefault()
+ dx = clamp(e.pageX - mouse_x, 0, bounds.width - x)
+ dy = clamp(e.pageY - mouse_y, 0, bounds.height - y)
+ box.style.width = dx + 'px'
+ box.style.height = dy + 'px'
+ }
+ function up(e) {
+ if (!dragging) return
+ dragging = false
+ e.preventDefault()
+ const img = query_div.querySelector('img')
+ const canvas = query_div.querySelector('canvas') || document.createElement('canvas')
+ const ctx = canvas.getContext('2d')
+ const ratio = img.naturalWidth / bounds.width
+ canvas.width = dx * ratio
+ canvas.height = dy * ratio
+ if (dx < 10 || dy < 10) {
+ if (canvas.parentNode) canvas.parentNode.removeChild(canvas)
+ const box_el = document.querySelector('.box')
+ if (box_el) box_el.parentNode.removeChild(box_el)
+ return
+ }
+ query_div.appendChild(canvas)
+ ctx.drawImage(
+ img,
+ x * ratio,
+ y * ratio,
+ dx * ratio,
+ dy * ratio,
+ 0, 0, canvas.width, canvas.height
+ )
+ cropping = true
+ const blob = window.dataUriToBlob(canvas.toDataURL('image/jpeg', 0.9))
+ do_upload(blob)
+ }
+
+ // utility functions
+ function http_get(url) {
+ return fetch(url).then(res => res.json())
+ }
+ function http_post(url, data) {
+ let headers
+ if (data instanceof FormData) {
+ headers = {
+ Accept: 'application/json, application/xml, text/play, text/html, *.*',
+ Authorization: 'Token ' + token,
+ }
+ } else {
+ headers = {
+ Accept: 'application/json, application/xml, text/play, text/html, *.*',
+ 'Content-Type': 'application/json; charset=utf-8',
+ Authorization: 'Token ' + token,
+ }
+ data = JSON.stringify(data)
+ }
+
+ // headers['X-CSRFToken'] = csrftoken
+ return fetch(url, {
+ method: 'POST',
+ body: data,
+ credentials: 'include',
+ headers,
+ }).then(res => res.json())
+ }
+ function on(evt, sel, handler) {
+ document.addEventListener(evt, function (event) {
+ let t = event.target
+ while (t && t !== this) {
+ if (t.matches(sel)) {
+ handler.call(t, event)
+ }
+ t = t.parentNode
+ }
+ })
+ }
+ function getPathFromImage(el) {
+ const url = el.src ? el.src : el
+ const partz = url.split('/')
+ let type, dir, fn
+ if (partz.length === 3) {
+ type = 'photo'
+ dir = ''
+ fn = ''
+ }
+ if (partz.length === 9) {
+ type = 'photo'
+ dir = partz[6]
+ fn = ''
+ } else if (partz.length === 10) {
+ type = 'video'
+ dir = partz[6]
+ fn = partz[7]
+ }
+ return { type, dir, fn, url }
+ }
+ function getPath(el) {
+ if (el.url) {
+ return getPathFromImage(el.url)
+ } if (el.dir) {
+ return el
+ }
+ el = el.parentNode.parentNode.parentNode.querySelector('img')
+ return getPathFromImage(el)
+ }
+ function pushState(txt, path) {
+ if (window.location.pathname === path) return
+ console.log('pushstate', path)
+ window.history.pushState({}, txt, path)
+ }
+ function preventDefault(e) {
+ if (e && !e.target.classList.contains('metadata')) {
+ e.preventDefault()
+ }
+ }
+ function clamp(n, a, b) { return n < a ? a : n < b ? n : b }
+
+ // initialize the app when the DOM is ready
+ document.addEventListener('DOMContentLoaded', init)
+}
+
+loadApp()
diff --git a/faiss/static/js/dataUriToBlob.js b/faiss/static/js/dataUriToBlob.js
new file mode 100644
index 00000000..80189b8d
--- /dev/null
+++ b/faiss/static/js/dataUriToBlob.js
@@ -0,0 +1,58 @@
+var dataUriToUint8Array = function(uri){
+ var data = uri.split(',')[1];
+ var bytes = atob(data);
+ var buf = new ArrayBuffer(bytes.length);
+ var u8 = new Uint8Array(buf);
+ for (var i = 0; i < bytes.length; i++) {
+ u8[i] = bytes.charCodeAt(i);
+ }
+ return u8
+}
+
+window.dataUriToBlob = (function(){
+/**
+ * Blob constructor.
+ */
+
+var Blob = window.Blob;
+
+/**
+ * ArrayBufferView support.
+ */
+
+var hasArrayBufferView = new Blob([new Uint8Array(100)]).size == 100;
+
+/**
+ * Return a `Blob` for the given data `uri`.
+ *
+ * @param {String} uri
+ * @return {Blob}
+ * @api public
+ */
+
+var dataUriToBlob = function(uri){
+ var data = uri.split(',')[1];
+ var bytes = atob(data);
+ var buf = new ArrayBuffer(bytes.length);
+ var arr = new Uint8Array(buf);
+ for (var i = 0; i < bytes.length; i++) {
+ arr[i] = bytes.charCodeAt(i);
+ }
+
+ if (!hasArrayBufferView) arr = buf;
+ var blob = new Blob([arr], { type: mime(uri) });
+ blob.slice = blob.slice || blob.webkitSlice;
+ return blob;
+};
+
+/**
+ * Return data uri mime type.
+ */
+
+function mime(uri) {
+ return uri.split(';')[0].slice(5);
+}
+
+return dataUriToBlob;
+
+})()
diff --git a/faiss/static/js/metadata-app.js b/faiss/static/js/metadata-app.js
new file mode 100644
index 00000000..fa2265fa
--- /dev/null
+++ b/faiss/static/js/metadata-app.js
@@ -0,0 +1,50 @@
+!function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{configurable:!1,enumerable:!0,get:r})},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=212)}([function(e,t,n){var r=n(99),o=36e5,i=6e4,a=2,u=/[T ]/,s=/:/,c=/^(\d{2})$/,l=[/^([+-]\d{2})$/,/^([+-]\d{3})$/,/^([+-]\d{4})$/],f=/^(\d{4})/,d=[/^([+-]\d{4})/,/^([+-]\d{5})/,/^([+-]\d{6})/],p=/^-(\d{2})$/,h=/^-?(\d{3})$/,m=/^-?(\d{2})-?(\d{2})$/,v=/^-?W(\d{2})$/,y=/^-?W(\d{2})-?(\d{1})$/,g=/^(\d{2}([.,]\d*)?)$/,_=/^(\d{2}):?(\d{2}([.,]\d*)?)$/,b=/^(\d{2}):?(\d{2}):?(\d{2}([.,]\d*)?)$/,w=/([Z+-].*)$/,x=/^(Z)$/,E=/^([+-])(\d{2})$/,O=/^([+-])(\d{2}):?(\d{2})$/;function S(e,t,n){t=t||0,n=n||0;var r=new Date(0);r.setUTCFullYear(e,0,4);var o=7*t+n+1-(r.getUTCDay()||7);return r.setUTCDate(r.getUTCDate()+o),r}e.exports=function(e,t){if(r(e))return new Date(e.getTime());if("string"!=typeof e)return new Date(e);var n=(t||{}).additionalDigits;n=null==n?a:Number(n);var T=function(e){var t,n={},r=e.split(u);if(s.test(r[0])?(n.date=null,t=r[0]):(n.date=r[0],t=r[1]),t){var o=w.exec(t);o?(n.time=t.replace(o[1],""),n.timezone=o[1]):n.time=t}return n}(e),k=function(e,t){var n,r=l[t],o=d[t];if(n=f.exec(e)||o.exec(e)){var i=n[1];return{year:parseInt(i,10),restDateString:e.slice(i.length)}}if(n=c.exec(e)||r.exec(e)){var a=n[1];return{year:100*parseInt(a,10),restDateString:e.slice(a.length)}}return{year:null}}(T.date,n),R=k.year,j=function(e,t){if(null===t)return null;var n,r,o,i;if(0===e.length)return(r=new Date(0)).setUTCFullYear(t),r;if(n=p.exec(e))return r=new Date(0),o=parseInt(n[1],10)-1,r.setUTCFullYear(t,o),r;if(n=h.exec(e)){r=new Date(0);var a=parseInt(n[1],10);return r.setUTCFullYear(t,0,a),r}if(n=m.exec(e)){r=new Date(0),o=parseInt(n[1],10)-1;var u=parseInt(n[2],10);return r.setUTCFullYear(t,o,u),r}if(n=v.exec(e))return i=parseInt(n[1],10)-1,S(t,i);if(n=y.exec(e)){i=parseInt(n[1],10)-1;var s=parseInt(n[2],10)-1;return S(t,i,s)}return null}(k.restDateString,R);if(j){var P,C=j.getTime(),M=0;return T.time&&(M=function(e){var t,n,r;if(t=g.exec(e))return(n=parseFloat(t[1].replace(",",".")))%24*o;if(t=_.exec(e))return n=parseInt(t[1],10),r=parseFloat(t[2].replace(",",".")),n%24*o+r*i;if(t=b.exec(e)){n=parseInt(t[1],10),r=parseInt(t[2],10);var a=parseFloat(t[3].replace(",","."));return n%24*o+r*i+1e3*a}return null}(T.time)),T.timezone?P=function(e){var t,n;return(t=x.exec(e))?0:(t=E.exec(e))?(n=60*parseInt(t[2],10),"+"===t[1]?-n:n):(t=O.exec(e))?(n=60*parseInt(t[2],10)+parseInt(t[3],10),"+"===t[1]?-n:n):0}(T.timezone):(P=new Date(C+M).getTimezoneOffset(),P=new Date(C+M+P*i).getTimezoneOffset()),new Date(C+M+P*i)}return new Date(e)}},function(e,t,n){"use strict";e.exports=n(213)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(220),o=n(117),i=n(224);n.d(t,"Provider",function(){return r.b}),n.d(t,"createProvider",function(){return r.a}),n.d(t,"connectAdvanced",function(){return o.a}),n.d(t,"connect",function(){return i.a})},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(340));t.default=r.default||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e}},function(e,t,n){e.exports={default:n(243),__esModule:!0}},function(e,t,n){"use strict";t.__esModule=!0,t.default=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(126));t.default=function(){function e(e,t){for(var n=0;n<t.length;n++){var o=t[n];o.enumerable=o.enumerable||!1,o.configurable=!0,"value"in o&&(o.writable=!0),(0,r.default)(e,o.key,o)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}()},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(78));t.default=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!==(void 0===t?"undefined":(0,r.default)(t))&&"function"!=typeof t?e:t}},function(e,t,n){"use strict";t.__esModule=!0;var r=a(n(266)),o=a(n(270)),i=a(n(78));function a(e){return e&&e.__esModule?e:{default:e}}t.default=function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+(void 0===t?"undefined":(0,i.default)(t)));e.prototype=(0,o.default)(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(r.default?(0,r.default)(e,t):e.__proto__=t)}},function(e,t){var n=e.exports={version:"2.5.7"};"number"==typeof __e&&(__e=n)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Video=t.Keyframes=t.Keyframe=t.DetectionBoxes=t.DetectionList=t.Classifier=t.ActiveLink=t.TableCell=t.TableRow=t.TableTuples=t.TableArray=t.TableObject=t.Gate=t.Loader=t.Sidebar=t.Footer=t.Header=void 0;var r=v(n(287)),o=v(n(318)),i=v(n(330)),a=v(n(334)),u=v(n(335)),s=v(n(336)),c=v(n(337)),l=v(n(338)),f=v(n(339)),d=v(n(344)),p=v(n(357)),h=v(n(497)),m=n(498);function v(e){return e&&e.__esModule?e:{default:e}}n(500),t.Header=r.default,t.Footer=s.default,t.Sidebar=l.default,t.Loader=c.default,t.Gate=f.default,t.TableObject=m.TableObject,t.TableArray=m.TableArray,t.TableTuples=m.TableTuples,t.TableRow=m.TableRow,t.TableCell=m.TableCell,t.ActiveLink=o.default,t.Classifier=i.default,t.DetectionList=u.default,t.DetectionBoxes=a.default,t.Keyframe=d.default,t.Keyframes=p.default,t.Video=h.default},function(e,t,n){e.exports=n(221)()},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){var r=n(75)("wks"),o=n(55),i=n(13).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n.d(t,"createStore",function(){return s}),n.d(t,"combineReducers",function(){return l}),n.d(t,"bindActionCreators",function(){return d}),n.d(t,"applyMiddleware",function(){return h}),n.d(t,"compose",function(){return p}),n.d(t,"__DO_NOT_USE__ActionTypes",function(){return o});var r=n(227),o={INIT:"@@redux/INIT"+Math.random().toString(36).substring(7).split("").join("."),REPLACE:"@@redux/REPLACE"+Math.random().toString(36).substring(7).split("").join(".")},i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function u(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)return!1;for(var t=e;null!==Object.getPrototypeOf(t);)t=Object.getPrototypeOf(t);return Object.getPrototypeOf(e)===t}function s(e,t,n){var a;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(s)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var c=e,l=t,f=[],d=f,p=!1;function h(){d===f&&(d=f.slice())}function m(){if(p)throw new Error("You may not call store.getState() while the reducer is executing. The reducer has already received the state as an argument. Pass it down from the top reducer instead of reading it from the store.");return l}function v(e){if("function"!=typeof e)throw new Error("Expected the listener to be a function.");if(p)throw new Error("You may not call store.subscribe() while the reducer is executing. If you would like to be notified after the store has been updated, subscribe from a component and invoke store.getState() in the callback to access the latest state. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");var t=!0;return h(),d.push(e),function(){if(t){if(p)throw new Error("You may not unsubscribe from a store listener while the reducer is executing. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");t=!1,h();var n=d.indexOf(e);d.splice(n,1)}}}function y(e){if(!u(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(p)throw new Error("Reducers may not dispatch actions.");try{p=!0,l=c(l,e)}finally{p=!1}for(var t=f=d,n=0;n<t.length;n++){(0,t[n])()}return e}return y({type:o.INIT}),(a={dispatch:y,subscribe:v,getState:m,replaceReducer:function(e){if("function"!=typeof e)throw new Error("Expected the nextReducer to be a function.");c=e,y({type:o.REPLACE})}})[r.a]=function(){var e,t=v;return(e={subscribe:function(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new TypeError("Expected the observer to be an object.");function n(){e.next&&e.next(m())}return n(),{unsubscribe:t(n)}}})[r.a]=function(){return this},e},a}function c(e,t){var n=t&&t.type;return"Given "+(n&&'action "'+String(n)+'"'||"an action")+', reducer "'+e+'" returned undefined. To ignore an action, you must explicitly return the previous state. If you want this reducer to hold no value, you can return null instead of undefined.'}function l(e){for(var t=Object.keys(e),n={},r=0;r<t.length;r++){var i=t[r];0,"function"==typeof e[i]&&(n[i]=e[i])}var a=Object.keys(n);var u=void 0;try{!function(e){Object.keys(e).forEach(function(t){var n=e[t];if(void 0===n(void 0,{type:o.INIT}))throw new Error('Reducer "'+t+"\" returned undefined during initialization. If the state passed to the reducer is undefined, you must explicitly return the initial state. The initial state may not be undefined. If you don't want to set a value for this reducer, you can use null instead of undefined.");if(void 0===n(void 0,{type:"@@redux/PROBE_UNKNOWN_ACTION_"+Math.random().toString(36).substring(7).split("").join(".")}))throw new Error('Reducer "'+t+"\" returned undefined when probed with a random type. Don't try to handle "+o.INIT+' or other actions in "redux/*" namespace. They are considered private. Instead, you must return the current state for any unknown actions, unless it is undefined, in which case you must return the initial state, regardless of the action type. The initial state may not be undefined, but can be null.')})}(n)}catch(e){u=e}return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(u)throw u;for(var r=!1,o={},i=0;i<a.length;i++){var s=a[i],l=n[s],f=e[s],d=l(f,t);if(void 0===d){var p=c(s,t);throw new Error(p)}o[s]=d,r=r||d!==f}return r?o:e}}function f(e,t){return function(){return t(e.apply(this,arguments))}}function d(e,t){if("function"==typeof e)return f(e,t);if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new Error("bindActionCreators expected an object or a function, instead received "+(null===e?"null":void 0===e?"undefined":i(e))+'. Did you write "import ActionCreators from" instead of "import * as ActionCreators from"?');for(var n=Object.keys(e),r={},o=0;o<n.length;o++){var a=n[o],u=e[a];"function"==typeof u&&(r[a]=f(u,t))}return r}function p(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return 0===t.length?function(e){return e}:1===t.length?t[0]:t.reduce(function(e,t){return function(){return e(t.apply(void 0,arguments))}})}function h(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return function(e){return function(){for(var n=arguments.length,r=Array(n),o=0;o<n;o++)r[o]=arguments[o];var i=e.apply(void 0,r),u=function(){throw new Error("Dispatching while constructing your middleware is not allowed. Other middleware would not be applied to this dispatch.")},s={getState:i.getState,dispatch:function(){return u.apply(void 0,arguments)}},c=t.map(function(e){return e(s)});return u=p.apply(void 0,c)(i.dispatch),a({},i,{dispatch:u})}}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(319);n.d(t,"BrowserRouter",function(){return r.a});var o=n(320);n.d(t,"HashRouter",function(){return o.a});var i=n(152);n.d(t,"Link",function(){return i.a});var a=n(321);n.d(t,"MemoryRouter",function(){return a.a});var u=n(322);n.d(t,"NavLink",function(){return u.a});var s=n(323);n.d(t,"Prompt",function(){return s.a});var c=n(324);n.d(t,"Redirect",function(){return c.a});var l=n(153);n.d(t,"Route",function(){return l.a});var f=n(92);n.d(t,"Router",function(){return f.a});var d=n(325);n.d(t,"StaticRouter",function(){return d.a});var p=n(326);n.d(t,"Switch",function(){return p.a});var h=n(327);n.d(t,"generatePath",function(){return h.a});var m=n(328);n.d(t,"matchPath",function(){return m.a});var v=n(329);n.d(t,"withRouter",function(){return v.a})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.login=t.post=t.preloadImage=t.keyframeUri=t.metadataUri=t.imageUrl=t.hashPath=t.clamp=t.px=t.percent=t.timestamp=t.padSeconds=t.courtesyS=t.verify=t.isVerified=t.pad=t.formatName=t.widths=t.isDesktop=t.isMobile=t.isAndroid=t.isiPad=t.isiPhone=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(93));var o=t.isiPhone=!(!navigator.userAgent.match(/iPhone/i)&&!navigator.userAgent.match(/iPod/i)),i=t.isiPad=!!navigator.userAgent.match(/iPad/i),a=t.isAndroid=!!navigator.userAgent.match(/Android/i),u=t.isMobile=o||i||a,s=t.isDesktop=!u;document.body.parentNode.classList.add(s?"desktop":"mobile");t.widths={th:160,sm:320,md:640,lg:1280};var c="id url cc sa fp md5 sha256".split(" ").map(function(e){return"_"+e}),l=c.map(function(e){return e.toUpperCase()}),f=(t.formatName=function(e){return c.forEach(function(t,n){return e=e.replace(t,l[n])}),e.replace(/_/g," ")},t.pad=function(e,t){for(var n=String(e||0);n.length<t;)n="0"+n;return n}),d=t.isVerified=function(e){return 1===e||"1"===e||"verified"===e},p=(t.verify=function(e){return d(e)?"verified":"unverified"},t.courtesyS=function(e,t){return e+" "+(1===e?t:t+"s")},t.padSeconds=function(e){return e<10?"0"+e:e}),h=(t.timestamp=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0;e/=arguments.length>1&&void 0!==arguments[1]?arguments[1]:25;var t=p(Math.round(e)%60);return(e=Math.floor(e/60))>60?Math.floor(e/60)+":"+p(e%60)+":"+t:e%60+":"+t},t.percent=function(e){return(100*e).toFixed(1)+"%"},t.px=function(e,t){return Math.round(e*t)+"px"},t.clamp=function(e,t,n){return e<t?t:e<n?e:n},t.hashPath=function(e){if(!e||e.length<9)throw new Error("Invalid sha256");return[e.slice(0,3),e.slice(3,6),e.slice(6,9),e].join("/")}),m=t.imageUrl=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"th";return["https://sa-vframe.ams3.digitaloceanspaces.com/v1/media/keyframes",d(e)?null:"unverified",h(t),f(n,6),r,"index.jpg"].filter(function(e){return!!e}).join("/")},v=(t.metadataUri=function(e,t){return"/metadata/"+e+"/"+t+"/"},t.keyframeUri=function(e,t){return"/metadata/"+e+"/keyframe/"+f(t,6)+"/"},t.preloadImage=function(e){var t=e.verified,n=e.hash,r=e.frame,o=e.url;n&&r&&(o=m(t,n,r,"md"));var i=new Image,a=!1;i.onload=function(){a||(a=!0,i.onload=null)},i.crossOrigin="anonymous",i.src=o,i.complete&&i.onload()},null),y="",g="",_=(t.post=function(e,t,n){_();var o=void 0;t instanceof FormData?o={Accept:"application/json, application/xml, text/play, text/html, *.*"}:(o={Accept:"application/json, application/xml, text/play, text/html, *.*","Content-Type":"application/json; charset=utf-8"},t=(0,r.default)(t));var i={method:"POST",body:t,headers:o,credentials:"include"};return n&&(o.Authorization="Token "+y),fetch(e,i).then(function(e){return e.json()})},t.login=function(){if(v)return v;var e="0.0.0.0"===window.location.hostname||"127.0.0.1"===window.location.hostname;try{var t=JSON.parse(JSON.parse(localStorage.getItem("persist:root")).auth);return y=t.token,g=t.user.username,y&&console.log("logged in",g),v=t,y||e||(window.location.href="/"),t}catch(t){return e||(window.location.href="/"),{}}})},function(e,t,n){var r=n(13),o=n(10),i=n(34),a=n(26),u=n(25),s=function(e,t,n){var c,l,f,d=e&s.F,p=e&s.G,h=e&s.S,m=e&s.P,v=e&s.B,y=e&s.W,g=p?o:o[t]||(o[t]={}),_=g.prototype,b=p?r:h?r[t]:(r[t]||{}).prototype;for(c in p&&(n=t),n)(l=!d&&b&&void 0!==b[c])&&u(g,c)||(f=l?b[c]:n[c],g[c]=p&&"function"!=typeof b[c]?n[c]:v&&l?i(f,r):y&&b[c]==f?function(e){var t=function(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)};return t.prototype=e.prototype,t}(f):m&&"function"==typeof f?i(Function.call,f):f,m&&((g.virtual||(g.virtual={}))[c]=f,e&s.R&&_&&!_[c]&&a(_,c,f)))};s.F=1,s.G=2,s.S=4,s.P=8,s.B=16,s.W=32,s.U=64,s.R=128,e.exports=s},function(e,t,n){"use strict";e.exports=function(e,t,n,r,o,i,a,u){if(!e){var s;if(void 0===t)s=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,o,i,a,u],l=0;(s=new Error(t.replace(/%s/g,function(){return c[l++]}))).name="Invariant Violation"}throw s.framesToPop=1,s}}},function(e,t,n){var r=n(23);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.random=t.browse=t.search=t.searchByFrame=t.searchByVerifiedFrame=t.upload=t.updateOptions=t.panic=t.publicUrl=void 0;var r=s(n(4)),o=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),i=n(94),a=n(17),u=s(n(354));function s(e){return e&&e.__esModule?e:{default:e}}var c={upload:function(){return"https://syrianarchive.vframe.io/search/api/upload"},search:function(){return"https://syrianarchive.vframe.io/search/api/fetch"},searchByVerifiedFrame:function(e,t,n){return"https://syrianarchive.vframe.io/search/api/search/"+e+"/"+t+"/"+(0,a.pad)(n,6)},searchByFrame:function(e,t){return"https://syrianarchive.vframe.io/search/api/search/"+e+"/"+(0,a.pad)(t,6)},browse:function(e){return"https://syrianarchive.vframe.io/search/api/list/"+e},random:function(){return"https://syrianarchive.vframe.io/search/api/random"},check:function(){return"https://syrianarchive.vframe.io/api/images/import/search"}},l=t.publicUrl={browse:function(e){return"/search/browse/"+e},searchByVerifiedFrame:function(e,t,n){return"/search/keyframe/"+(0,a.verify)(e)+"/"+t+"/"+(0,a.pad)(n,6)},searchByFrame:function(e,t){return"/search/keyframe/"+e+"/"+(0,a.pad)(t,6)},review:function(){return"/search/review/"}},f=function(e,t){return{type:o.search.loading,tag:e,offset:t}},d=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;return{type:o.search.loaded,tag:e,data:t,offset:n}},p=function(e,t){return{type:o.search.error,tag:e,err:t}};t.panic=function(){return function(e){i.history.push("/search/"),e({type:o.search.panic})}},t.updateOptions=function(e){return function(t){t({type:o.search.update_options,opt:e})}},t.upload=function(e,t){return function(n){var o=i.store.getState().search.options,u=new FormData;u.append("query_img",e),u.append("limit",o.perPage),t||n(f("query")),(0,a.post)(c.upload(),u).then(function(e){if(t){var o=e.query.timing;e.query=(0,r.default)({},t,{timing:o});var a={};if(e.query.crop){var u=e.query.crop,s=u.x,c=u.y,l=u.w,f=u.h;a.crop=[s,c,l,f].map(function(e){return parseInt(e,10)}).join(",")}t.url&&!t.hash&&(a.url=t.url)}else e.query.url&&!window.location.search.match(e.query.url)&&i.history.push("/search/?url="+e.query.url);n(d("query",e))}).catch(function(e){return n(p("query",e))})}},t.searchByVerifiedFrame=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0;return function(o){var s=i.store.getState().search.options;o(f("query",r));var l=u.default.stringify({limit:s.perPage,offset:r});(0,a.preloadImage)({verified:e,hash:t,frame:n}),fetch(c.searchByVerifiedFrame(e,t,n)+"?"+l,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return o(d("query",e,r))}).catch(function(e){return o(p("query",e))})}},t.searchByFrame=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;return function(r){var o=i.store.getState().search.options;r(f("query",n));var s=u.default.stringify({limit:o.perPage,offset:n});(0,a.preloadImage)({verified:!1,hash:e,frame:t}),fetch(c.searchByFrame(e,t)+"?"+s,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return r(d("query",e,n))}).catch(function(e){return r(p("query",e))})}},t.search=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;return function(n){var r=i.store.getState().search.options;n(f("query",t));var o=u.default.stringify({url:e,limit:r.perPage,offset:t});0===e.indexOf("static")&&(0,a.preloadImage)({uri:e}),fetch(c.search(e)+"?"+o,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return n(d("query",e,t))}).catch(function(e){return n(p("query",e))})}},t.browse=function(e){return function(t){var n="browse";t(f(n)),fetch(c[n](e),{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(e){return t(d(n,e))}).catch(function(e){return t(p(n,e))})}},t.random=function(){return function(e){var t=i.store.getState().search.options,n=u.default.stringify({limit:t.perPage});e(f("query")),fetch(c.random()+"?"+n,{method:"GET",mode:"cors"}).then(function(e){return e.json()}).then(function(t){e(d("query",t)),i.history.push(l.searchByVerifiedFrame(t.query.verified,t.query.hash,t.query.frame))}).catch(function(t){return e(p("query",t))})}}},function(e,t,n){var r=n(20),o=n(125),i=n(77),a=Object.defineProperty;t.f=n(24)?Object.defineProperty:function(e,t,n){if(r(e),t=i(t,!0),r(n),o)try{return a(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,n){e.exports=!n(35)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,t){var n={}.hasOwnProperty;e.exports=function(e,t){return n.call(e,t)}},function(e,t,n){var r=n(22),o=n(43);e.exports=n(24)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(275);n.d(t,"createBrowserHistory",function(){return r.a});var o=n(278);n.d(t,"createHashHistory",function(){return o.a});var i=n(279);n.d(t,"createMemoryHistory",function(){return i.a});var a=n(62);n.d(t,"createLocation",function(){return a.a}),n.d(t,"locationsAreEqual",function(){return a.b});var u=n(47);n.d(t,"parsePath",function(){return u.d}),n.d(t,"createPath",function(){return u.b})},function(e,t,n){e.exports={default:n(331),__esModule:!0}},function(e,t,n){var r=n(0),o=n(30);e.exports=function(e){var t=r(e),n=t.getFullYear(),i=new Date(0);i.setFullYear(n+1,0,4),i.setHours(0,0,0,0);var a=o(i),u=new Date(0);u.setFullYear(n,0,4),u.setHours(0,0,0,0);var s=o(u);return t.getTime()>=a.getTime()?n+1:t.getTime()>=s.getTime()?n:n-1}},function(e,t,n){var r=n(66);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setHours(0,0,0,0),t}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t,n){"use strict";var r=n(70),o=Object.keys||function(e){var t=[];for(var n in e)t.push(n);return t};e.exports=f;var i=n(54);i.inherits=n(32);var a=n(201),u=n(111);i.inherits(f,a);for(var s=o(u.prototype),c=0;c<s.length;c++){var l=s[c];f.prototype[l]||(f.prototype[l]=u.prototype[l])}function f(e){if(!(this instanceof f))return new f(e);a.call(this,e),u.call(this,e),e&&!1===e.readable&&(this.readable=!1),e&&!1===e.writable&&(this.writable=!1),this.allowHalfOpen=!0,e&&!1===e.allowHalfOpen&&(this.allowHalfOpen=!1),this.once("end",d)}function d(){this.allowHalfOpen||this._writableState.ended||r.nextTick(p,this)}function p(e){e.end()}Object.defineProperty(f.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),Object.defineProperty(f.prototype,"destroyed",{get:function(){return void 0!==this._readableState&&void 0!==this._writableState&&(this._readableState.destroyed&&this._writableState.destroyed)},set:function(e){void 0!==this._readableState&&void 0!==this._writableState&&(this._readableState.destroyed=e,this._writableState.destroyed=e)}}),f.prototype._destroy=function(e,t){this.push(null),this.end(),r.nextTick(t,e)}},function(e,t,n){var r=n(56);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,o){return e.call(t,n,r,o)}}return function(){return e.apply(t,arguments)}}},function(e,t){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,t){e.exports={}},function(e,t,n){var r=n(130),o=n(73);e.exports=function(e){return r(o(e))}},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=t.asType=function(e,t){return[e,t].join("_").toUpperCase()},o=t.tagAsType=function(e,t){return t.reduce(function(t,n){return t[n]=r(e,n),t},{})};t.metadata=o("metadata",["loading","loaded","loaded_many","error","set_hash"]),t.search=o("search",["loading","loaded","error","panic","update_options"]),t.review=o("review",["loading","loaded","error","save","unsave","refresh","clear","dedupe","create","set_count"]),t.init="@@INIT"},function(e,t){var n,r,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function u(e){if(n===setTimeout)return setTimeout(e,0);if((n===i||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:i}catch(e){n=i}try{r="function"==typeof clearTimeout?clearTimeout:a}catch(e){r=a}}();var s,c=[],l=!1,f=-1;function d(){l&&s&&(l=!1,s.length?c=s.concat(c):f=-1,c.length&&p())}function p(){if(!l){var e=u(d);l=!0;for(var t=c.length;t;){for(s=c,c=[];++f<t;)s&&s[f].run();f=-1,t=c.length}s=null,l=!1,function(e){if(r===clearTimeout)return clearTimeout(e);if((r===a||!r)&&clearTimeout)return r=clearTimeout,clearTimeout(e);try{r(e)}catch(t){try{return r.call(null,e)}catch(t){return r.call(this,e)}}}(e)}}function h(e,t){this.fun=e,this.array=t}function m(){}o.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];c.push(new h(e,t)),1!==c.length||l||u(p)},h.prototype.run=function(){this.fun.apply(null,this.array)},o.title="browser",o.browser=!0,o.env={},o.argv=[],o.version="",o.versions={},o.on=m,o.addListener=m,o.once=m,o.off=m,o.removeListener=m,o.removeAllListeners=m,o.emit=m,o.prependListener=m,o.prependOnceListener=m,o.listeners=function(e){return[]},o.binding=function(e){throw new Error("process.binding is not supported")},o.cwd=function(){return"/"},o.chdir=function(e){throw new Error("process.chdir is not supported")},o.umask=function(){return 0}},function(e,t,n){var r=n(73);e.exports=function(e){return Object(r(e))}},function(e,t){e.exports=!0},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){"use strict";var r=n(249)(!0);n(127)(String,"String",function(e){this._t=String(e),this._i=0},function(){var e,t=this._t,n=this._i;return n>=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t,n){var r=n(129),o=n(82);e.exports=Object.keys||function(e){return r(e,o)}},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t,n){"use strict";n.d(t,"a",function(){return r}),n.d(t,"f",function(){return o}),n.d(t,"c",function(){return i}),n.d(t,"e",function(){return a}),n.d(t,"g",function(){return u}),n.d(t,"d",function(){return s}),n.d(t,"b",function(){return c});var r=function(e){return"/"===e.charAt(0)?e:"/"+e},o=function(e){return"/"===e.charAt(0)?e.substr(1):e},i=function(e,t){return new RegExp("^"+t+"(\\/|\\?|#|$)","i").test(e)},a=function(e,t){return i(e,t)?e.substr(t.length):e},u=function(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e},s=function(e){var t=e||"/",n="",r="",o=t.indexOf("#");-1!==o&&(r=t.substr(o),t=t.substr(0,o));var i=t.indexOf("?");return-1!==i&&(n=t.substr(i),t=t.substr(0,i)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}},c=function(e){var t=e.pathname,n=e.search,r=e.hash,o=t||"/";return n&&"?"!==n&&(o+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(o+="#"===r.charAt(0)?r:"#"+r),o}},function(e,t){e.exports=function(e){var t=[];return t.toString=function(){return this.map(function(t){var n=function(e,t){var n=e[1]||"",r=e[3];if(!r)return n;if(t&&"function"==typeof btoa){var o=function(e){return"/*# sourceMappingURL=data:application/json;charset=utf-8;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(e))))+" */"}(r),i=r.sources.map(function(e){return"/*# sourceURL="+r.sourceRoot+e+" */"});return[n].concat(i).concat([o]).join("\n")}return[n].join("\n")}(t,e);return t[2]?"@media "+t[2]+"{"+n+"}":n}).join("")},t.i=function(e,n){"string"==typeof e&&(e=[[null,e,""]]);for(var r={},o=0;o<this.length;o++){var i=this[o][0];"number"==typeof i&&(r[i]=!0)}for(o=0;o<e.length;o++){var a=e[o];"number"==typeof a[0]&&r[a[0]]||(n&&!a[2]?a[2]=n:n&&(a[2]="("+a[2]+") and ("+n+")"),t.push(a))}},t}},function(e,t,n){var r={},o=function(e){var t;return function(){return void 0===t&&(t=e.apply(this,arguments)),t}}(function(){return window&&document&&document.all&&!window.atob}),i=function(e){var t={};return function(e){if("function"==typeof e)return e();if(void 0===t[e]){var n=function(e){return document.querySelector(e)}.call(this,e);if(window.HTMLIFrameElement&&n instanceof window.HTMLIFrameElement)try{n=n.contentDocument.head}catch(e){n=null}t[e]=n}return t[e]}}(),a=null,u=0,s=[],c=n(317);function l(e,t){for(var n=0;n<e.length;n++){var o=e[n],i=r[o.id];if(i){i.refs++;for(var a=0;a<i.parts.length;a++)i.parts[a](o.parts[a]);for(;a<o.parts.length;a++)i.parts.push(v(o.parts[a],t))}else{var u=[];for(a=0;a<o.parts.length;a++)u.push(v(o.parts[a],t));r[o.id]={id:o.id,refs:1,parts:u}}}}function f(e,t){for(var n=[],r={},o=0;o<e.length;o++){var i=e[o],a=t.base?i[0]+t.base:i[0],u={css:i[1],media:i[2],sourceMap:i[3]};r[a]?r[a].parts.push(u):n.push(r[a]={id:a,parts:[u]})}return n}function d(e,t){var n=i(e.insertInto);if(!n)throw new Error("Couldn't find a style target. This probably means that the value for the 'insertInto' parameter is invalid.");var r=s[s.length-1];if("top"===e.insertAt)r?r.nextSibling?n.insertBefore(t,r.nextSibling):n.appendChild(t):n.insertBefore(t,n.firstChild),s.push(t);else if("bottom"===e.insertAt)n.appendChild(t);else{if("object"!=typeof e.insertAt||!e.insertAt.before)throw new Error("[Style Loader]\n\n Invalid value for parameter 'insertAt' ('options.insertAt') found.\n Must be 'top', 'bottom', or Object.\n (https://github.com/webpack-contrib/style-loader#insertat)\n");var o=i(e.insertInto+" "+e.insertAt.before);n.insertBefore(t,o)}}function p(e){if(null===e.parentNode)return!1;e.parentNode.removeChild(e);var t=s.indexOf(e);t>=0&&s.splice(t,1)}function h(e){var t=document.createElement("style");return void 0===e.attrs.type&&(e.attrs.type="text/css"),m(t,e.attrs),d(e,t),t}function m(e,t){Object.keys(t).forEach(function(n){e.setAttribute(n,t[n])})}function v(e,t){var n,r,o,i;if(t.transform&&e.css){if(!(i=t.transform(e.css)))return function(){};e.css=i}if(t.singleton){var s=u++;n=a||(a=h(t)),r=g.bind(null,n,s,!1),o=g.bind(null,n,s,!0)}else e.sourceMap&&"function"==typeof URL&&"function"==typeof URL.createObjectURL&&"function"==typeof URL.revokeObjectURL&&"function"==typeof Blob&&"function"==typeof btoa?(n=function(e){var t=document.createElement("link");return void 0===e.attrs.type&&(e.attrs.type="text/css"),e.attrs.rel="stylesheet",m(t,e.attrs),d(e,t),t}(t),r=function(e,t,n){var r=n.css,o=n.sourceMap,i=void 0===t.convertToAbsoluteUrls&&o;(t.convertToAbsoluteUrls||i)&&(r=c(r));o&&(r+="\n/*# sourceMappingURL=data:application/json;base64,"+btoa(unescape(encodeURIComponent(JSON.stringify(o))))+" */");var a=new Blob([r],{type:"text/css"}),u=e.href;e.href=URL.createObjectURL(a),u&&URL.revokeObjectURL(u)}.bind(null,n,t),o=function(){p(n),n.href&&URL.revokeObjectURL(n.href)}):(n=h(t),r=function(e,t){var n=t.css,r=t.media;r&&e.setAttribute("media",r);if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}.bind(null,n),o=function(){p(n)});return r(e),function(t){if(t){if(t.css===e.css&&t.media===e.media&&t.sourceMap===e.sourceMap)return;r(e=t)}else o()}}e.exports=function(e,t){if("undefined"!=typeof DEBUG&&DEBUG&&"object"!=typeof document)throw new Error("The style-loader cannot be used in a non-browser environment");(t=t||{}).attrs="object"==typeof t.attrs?t.attrs:{},t.singleton||"boolean"==typeof t.singleton||(t.singleton=o()),t.insertInto||(t.insertInto="head"),t.insertAt||(t.insertAt="bottom");var n=f(e,t);return l(n,t),function(e){for(var o=[],i=0;i<n.length;i++){var a=n[i];(u=r[a.id]).refs--,o.push(u)}e&&l(f(e,t),t);for(i=0;i<o.length;i++){var u;if(0===(u=o[i]).refs){for(var s=0;s<u.parts.length;s++)u.parts[s]();delete r[u.id]}}}};var y=function(){var e=[];return function(t,n){return e[t]=n,e.filter(Boolean).join("\n")}}();function g(e,t,n,r){var o=n?"":r.css;if(e.styleSheet)e.styleSheet.cssText=y(t,o);else{var i=document.createTextNode(o),a=e.childNodes;a[t]&&e.removeChild(a[t]),a.length?e.insertBefore(i,a[t]):e.appendChild(i)}}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setDate(n.getDate()+o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=Number(t);return new Date(n+o)}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);return n.setFullYear(t,0,4),n.setHours(0,0,0,0),o(n)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=r(t).getTime();return n<o?-1:n>o?1:0}},function(e,t,n){(function(e){function n(e){return Object.prototype.toString.call(e)}t.isArray=function(e){return Array.isArray?Array.isArray(e):"[object Array]"===n(e)},t.isBoolean=function(e){return"boolean"==typeof e},t.isNull=function(e){return null===e},t.isNullOrUndefined=function(e){return null==e},t.isNumber=function(e){return"number"==typeof e},t.isString=function(e){return"string"==typeof e},t.isSymbol=function(e){return"symbol"==typeof e},t.isUndefined=function(e){return void 0===e},t.isRegExp=function(e){return"[object RegExp]"===n(e)},t.isObject=function(e){return"object"==typeof e&&null!==e},t.isDate=function(e){return"[object Date]"===n(e)},t.isError=function(e){return"[object Error]"===n(e)||e instanceof Error},t.isFunction=function(e){return"function"==typeof e},t.isPrimitive=function(e){return null===e||"boolean"==typeof e||"number"==typeof e||"string"==typeof e||"symbol"==typeof e||void 0===e},t.isBuffer=e.isBuffer}).call(t,n(204).Buffer)},function(e,t){var n=0,r=Math.random();e.exports=function(e){return"Symbol(".concat(void 0===e?"":e,")_",(++n+r).toString(36))}},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},function(e,t,n){var r=n(22).f,o=n(25),i=n(14)("toStringTag");e.exports=function(e,t,n){e&&!o(e=n?e:e.prototype,i)&&r(e,i,{configurable:!0,value:t})}},function(e,t,n){n(254);for(var r=n(13),o=n(26),i=n(36),a=n(14)("toStringTag"),u="CSSRuleList,CSSStyleDeclaration,CSSValueList,ClientRectList,DOMRectList,DOMStringList,DOMTokenList,DataTransferItemList,FileList,HTMLAllCollection,HTMLCollection,HTMLFormElement,HTMLSelectElement,MediaList,MimeTypeArray,NamedNodeMap,NodeList,PaintRequestList,Plugin,PluginArray,SVGLengthList,SVGNumberList,SVGPathSegList,SVGPointList,SVGStringList,SVGTransformList,SourceBufferList,StyleSheetList,TextTrackCueList,TextTrackList,TouchList".split(","),s=0;s<u.length;s++){var c=u[s],l=r[c],f=l&&l.prototype;f&&!f[a]&&o(f,a,c),i[c]=i.Array}},function(e,t){t.f={}.propertyIsEnumerable},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=t.LOCATION_CHANGE="@@router/LOCATION_CHANGE",o=(t.onLocationChanged=function(e,t){return{type:g("LOCATION_CHANGE"),payload:{location:e,action:t}}},t.CALL_HISTORY_METHOD="@@router/CALL_HISTORY_METHOD"),i=function(e){return function(){for(var t=arguments.length,n=Array(t),r=0;r<t;r++)n[r]=arguments[r];return{type:g("CALL_HISTORY_METHOD"),payload:{method:e,args:n}}}},a=t.push=g("updateLocation")("push"),u=t.replace=g("updateLocation")("replace"),s=t.go=g("updateLocation")("go"),c=t.goBack=g("updateLocation")("goBack"),l=t.goForward=g("updateLocation")("goForward");t.routerActions={push:g("push"),replace:g("replace"),go:g("go"),goBack:g("goBack"),goForward:g("goForward")};function f(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}var d=null;function p(){if(null===d){var e=f();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),d=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return d}function h(){var e=f();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function m(){var e=p(),t=h(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=f();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var v="__INTENTIONAL_UNDEFINED__",y={};function g(e){var t=m();if(void 0===t[e])return function(e){switch(e){case"LOCATION_CHANGE":return r;case"CALL_HISTORY_METHOD":return o;case"updateLocation":return i;case"push":return a;case"replace":return u;case"go":return s;case"goBack":return c;case"goForward":return l}return}(e);var n=t[e];return n===v?void 0:n}function _(e,t){var r=m();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?v:t,function(){b(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function b(e){var t=m();delete t[e],0==Object.keys(t).length&&delete h()[p]}function w(e){var t=m(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(y,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",g),e("__GetDependency__",g),e("__Rewire__",_),e("__set__",_),e("__reset__",b),e("__ResetDependency__",b),e("__with__",w)}(),t.__get__=g,t.__GetDependency__=g,t.__Rewire__=_,t.__set__=_,t.__ResetDependency__=b,t.__RewireAPI__=y,t.default=y}).call(t,n(3))},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";n.d(t,"a",function(){return u}),n.d(t,"b",function(){return s});var r=n(276),o=n(277),i=n(47),a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},u=function(e,t,n,o){var u=void 0;"string"==typeof e?(u=Object(i.d)(e)).state=t:(void 0===(u=a({},e)).pathname&&(u.pathname=""),u.search?"?"!==u.search.charAt(0)&&(u.search="?"+u.search):u.search="",u.hash?"#"!==u.hash.charAt(0)&&(u.hash="#"+u.hash):u.hash="",void 0!==t&&void 0===u.state&&(u.state=t));try{u.pathname=decodeURI(u.pathname)}catch(e){throw e instanceof URIError?new URIError('Pathname "'+u.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):e}return n&&(u.key=n),o?u.pathname?"/"!==u.pathname.charAt(0)&&(u.pathname=Object(r.a)(u.pathname,o.pathname)):u.pathname=o.pathname:u.pathname||(u.pathname="/"),u},s=function(e,t){return e.pathname===t.pathname&&e.search===t.search&&e.hash===t.hash&&e.key===t.key&&Object(o.a)(e.state,t.state)}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function d(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var p=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=d(this,e.call.apply(e,[this].concat(i))),r.state={match:r.computeMatch(r.props.history.location.pathname)},d(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:f({},this.context.router,{history:this.props.history,route:{location:this.props.history.location,match:this.state.match}})}},t.prototype.computeMatch=function(e){return{path:"/",url:"/",params:{},isExact:"/"===e}},t.prototype.componentWillMount=function(){var e=this,t=this.props,n=t.children,r=t.history;a()(null==n||1===s.a.Children.count(n),"A <Router> may have only one child element"),this.unlisten=r.listen(function(){e.setState({match:e.computeMatch(r.location.pathname)})})},t.prototype.componentWillReceiveProps=function(e){o()(this.props.history===e.history,"You cannot change <Router history>")},t.prototype.componentWillUnmount=function(){this.unlisten()},t.prototype.render=function(){var e=this.props.children;return e?s.a.Children.only(e):null},t}(s.a.Component);p.propTypes={history:l.a.object.isRequired,children:l.a.node},p.contextTypes={router:l.a.object},p.childContextTypes={router:l.a.object.isRequired},t.a=p},function(e,t,n){"use strict";var r=n(140),o=n.n(r),i={},a=0;t.a=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments[2];"string"==typeof t&&(t={path:t});var r=t,u=r.path,s=r.exact,c=void 0!==s&&s,l=r.strict,f=void 0!==l&&l,d=r.sensitive,p=void 0!==d&&d;if(null==u)return n;var h=function(e,t){var n=""+t.end+t.strict+t.sensitive,r=i[n]||(i[n]={});if(r[e])return r[e];var u=[],s={re:o()(e,u,t),keys:u};return a<1e4&&(r[e]=s,a++),s}(u,{end:c,strict:f,sensitive:p}),m=h.re,v=h.keys,y=m.exec(e);if(!y)return null;var g=y[0],_=y.slice(1),b=e===g;return c&&!b?null:{path:u,url:"/"===u&&""===g?"/":g,isExact:b,params:v.reduce(function(e,t,n){return e[t.name]=_[n],e},{})}}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(126));t.default=function(e,t,n){return t in e?(0,r.default)(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=(i<n?7:0)+i-n;return o.setDate(o.getDate()-a),o.setHours(0,0,0,0),o}},function(e,t,n){var r=n(31),o=6e4,i=864e5;e.exports=function(e,t){var n=r(e),a=r(t),u=n.getTime()-n.getTimezoneOffset()*o,s=a.getTime()-a.getTimezoneOffset()*o;return Math.round((u-s)/i)}},function(e,t,n){var r=n(0),o=n(100);e.exports=function(e,t){var n=r(e),i=Number(t),a=n.getMonth()+i,u=new Date(0);u.setFullYear(n.getFullYear(),a,1),u.setHours(0,0,0,0);var s=o(u);return n.setMonth(a,Math.min(s,n.getDate())),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()-o.getTime()}},function(e,t,n){"use strict";(function(t){!t.version||0===t.version.indexOf("v0.")||0===t.version.indexOf("v1.")&&0!==t.version.indexOf("v1.8.")?e.exports={nextTick:function(e,n,r,o){if("function"!=typeof e)throw new TypeError('"callback" argument must be a function');var i,a,u=arguments.length;switch(u){case 0:case 1:return t.nextTick(e);case 2:return t.nextTick(function(){e.call(null,n)});case 3:return t.nextTick(function(){e.call(null,n,r)});case 4:return t.nextTick(function(){e.call(null,n,r,o)});default:for(i=new Array(u-1),a=0;a<i.length;)i[a++]=arguments[a];return t.nextTick(function(){e.apply(null,i)})}}}:e.exports=t}).call(t,n(40))},function(e,t,n){var r=n(204),o=r.Buffer;function i(e,t){for(var n in e)t[n]=e[n]}function a(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(i(r,t),t.Buffer=a),i(o,a),a.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},a.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},a.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},a.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){"use strict";t.a=function(e){"undefined"!=typeof console&&"function"==typeof console.error&&console.error(e);try{throw new Error(e)}catch(e){}}},function(e,t){e.exports=function(e){if(void 0==e)throw TypeError("Can't call method on "+e);return e}},function(e,t,n){var r=n(75)("keys"),o=n(55);e.exports=function(e){return r[e]||(r[e]=o(e))}},function(e,t,n){var r=n(10),o=n(13),i=o["__core-js_shared__"]||(o["__core-js_shared__"]={});(e.exports=function(e,t){return i[e]||(i[e]=void 0!==t?t:{})})("versions",[]).push({version:r.version,mode:n(42)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(e,t,n){var r=n(23),o=n(13).document,i=r(o)&&r(o.createElement);e.exports=function(e){return i?o.createElement(e):{}}},function(e,t,n){var r=n(23);e.exports=function(e,t){if(!r(e))return e;var n,o;if(t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;if("function"==typeof(n=e.valueOf)&&!r(o=n.call(e)))return o;if(!t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;throw TypeError("Can't convert object to primitive value")}},function(e,t,n){"use strict";t.__esModule=!0;var r=a(n(247)),o=a(n(257)),i="function"==typeof o.default&&"symbol"==typeof r.default?function(e){return typeof e}:function(e){return e&&"function"==typeof o.default&&e.constructor===o.default&&e!==o.default.prototype?"symbol":typeof e};function a(e){return e&&e.__esModule?e:{default:e}}t.default="function"==typeof o.default&&"symbol"===i(r.default)?function(e){return void 0===e?"undefined":i(e)}:function(e){return e&&"function"==typeof o.default&&e.constructor===o.default&&e!==o.default.prototype?"symbol":void 0===e?"undefined":i(e)}},function(e,t){var n=Math.ceil,r=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?r:n)(e)}},function(e,t,n){var r=n(20),o=n(251),i=n(82),a=n(74)("IE_PROTO"),u=function(){},s=function(){var e,t=n(76)("iframe"),r=i.length;for(t.style.display="none",n(131).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write("<script>document.F=Object<\/script>"),e.close(),s=e.F;r--;)delete s.prototype[i[r]];return s()};e.exports=Object.create||function(e,t){var n;return null!==e?(u.prototype=r(e),n=new u,u.prototype=null,n[a]=e):n=s(),void 0===t?n:o(n,t)}},function(e,t,n){var r=n(79),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t){e.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(e,t,n){t.f=n(14)},function(e,t,n){var r=n(13),o=n(10),i=n(42),a=n(83),u=n(22).f;e.exports=function(e){var t=o.Symbol||(o.Symbol=i?{}:r.Symbol||{});"_"==e.charAt(0)||e in t||u(t,e,{value:a.f(e)})}},function(e,t){t.f=Object.getOwnPropertySymbols},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(136);n.d(t,"MemoryRouter",function(){return r.a});var o=n(138);n.d(t,"Prompt",function(){return o.a});var i=n(139);n.d(t,"Redirect",function(){return i.a});var a=n(89);n.d(t,"Route",function(){return a.a});var u=n(63);n.d(t,"Router",function(){return u.a});var s=n(141);n.d(t,"StaticRouter",function(){return s.a});var c=n(142);n.d(t,"Switch",function(){return c.a});var l=n(88);n.d(t,"generatePath",function(){return l.a});var f=n(64);n.d(t,"matchPath",function(){return f.a});var d=n(143);n.d(t,"withRouter",function(){return d.a})},function(e,t,n){"use strict";var r=n(61),o=n.n(r);t.a=function(){var e=null,t=[];return{setPrompt:function(t){return o()(null==e,"A history supports only one prompt at a time"),e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,r,i){if(null!=e){var a="function"==typeof e?e(t,n):e;"string"==typeof a?"function"==typeof r?r(a,i):(o()(!1,"A history needs a getUserConfirmation function in order to use a prompt message"),i(!0)):i(!1!==a)}else i(!0)},appendListener:function(e){var n=!0,r=function(){n&&e.apply(void 0,arguments)};return t.push(r),function(){n=!1,t=t.filter(function(e){return e!==r})}},notifyListeners:function(){for(var e=arguments.length,n=Array(e),r=0;r<e;r++)n[r]=arguments[r];t.forEach(function(e){return e.apply(void 0,n)})}}}},function(e,t,n){"use strict";var r=n(140),o=n.n(r),i={},a=0;t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"/",t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"/"===e?e:function(e){var t=e,n=i[t]||(i[t]={});if(n[e])return n[e];var r=o.a.compile(e);return a<1e4&&(n[e]=r,a++),r}(e)(t,{pretty:!0})}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=n(64),d=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function p(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var h=function(e){return 0===s.a.Children.count(e)},m=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=p(this,e.call.apply(e,[this].concat(i))),r.state={match:r.computeMatch(r.props,r.context.router)},p(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:d({},this.context.router,{route:{location:this.props.location||this.context.router.route.location,match:this.state.match}})}},t.prototype.computeMatch=function(e,t){var n=e.computedMatch,r=e.location,o=e.path,i=e.strict,u=e.exact,s=e.sensitive;if(n)return n;a()(t,"You should not use <Route> or withRouter() outside a <Router>");var c=t.route,l=(r||c.location).pathname;return Object(f.a)(l,{path:o,strict:i,exact:u,sensitive:s},c.match)},t.prototype.componentWillMount=function(){o()(!(this.props.component&&this.props.render),"You should not use <Route component> and <Route render> in the same route; <Route render> will be ignored"),o()(!(this.props.component&&this.props.children&&!h(this.props.children)),"You should not use <Route component> and <Route children> in the same route; <Route children> will be ignored"),o()(!(this.props.render&&this.props.children&&!h(this.props.children)),"You should not use <Route render> and <Route children> in the same route; <Route children> will be ignored")},t.prototype.componentWillReceiveProps=function(e,t){o()(!(e.location&&!this.props.location),'<Route> elements should not change from uncontrolled to controlled (or vice versa). You initially used no "location" prop and then provided one on a subsequent render.'),o()(!(!e.location&&this.props.location),'<Route> elements should not change from controlled to uncontrolled (or vice versa). You provided a "location" prop initially but omitted it on a subsequent render.'),this.setState({match:this.computeMatch(e,t.router)})},t.prototype.render=function(){var e=this.state.match,t=this.props,n=t.children,r=t.component,o=t.render,i=this.context.router,a=i.history,u=i.route,c=i.staticContext,l={match:e,location:this.props.location||u.location,history:a,staticContext:c};return r?e?s.a.createElement(r,l):null:o?e?o(l):null:"function"==typeof n?n(l):n&&!h(n)?s.a.Children.only(n):null},t}(s.a.Component);m.propTypes={computedMatch:l.a.object,path:l.a.string,exact:l.a.bool,strict:l.a.bool,sensitive:l.a.bool,component:l.a.func,render:l.a.func,children:l.a.oneOfType([l.a.func,l.a.node]),location:l.a.object},m.contextTypes={router:l.a.shape({history:l.a.object.isRequired,route:l.a.object.isRequired,staticContext:l.a.object})},m.childContextTypes={router:l.a.object.isRequired},t.a=m},function(e,t,n){"use strict";e.exports=n(288)},function(e,t,n){"use strict";t.a=function(e){"undefined"!=typeof console&&"function"==typeof console.error&&console.error(e);try{throw new Error(e)}catch(e){}}},function(e,t,n){"use strict";var r=n(63);t.a=r.a},function(e,t,n){e.exports={default:n(333),__esModule:!0}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.history=t.store=void 0;var r=n(15),o=n(135),i=n(27),a=f(n(345)),u=n(17),s=f(n(346)),c=f(n(347)),l=f(n(353));function f(e){return e&&e.__esModule?e:{default:e}}var d=(0,r.combineReducers)({auth:function(){return arguments.length>0&&void 0!==arguments[0]?arguments[0]:(0,u.login)()},metadata:s.default,search:c.default,review:l.default});var p=(0,i.createBrowserHistory)(),h=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1],n=window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__||r.compose;return(0,r.createStore)((0,o.connectRouter)(t)(d),e,n((0,r.applyMiddleware)(a.default,(0,o.routerMiddleware)(t))))}({},p);t.store=h,t.history=p},function(e,t,n){var r=n(96),o=n(14)("iterator"),i=n(36);e.exports=n(10).getIteratorMethod=function(e){if(void 0!=e)return e[o]||e["@@iterator"]||i[r(e)]}},function(e,t,n){var r=n(46),o=n(14)("toStringTag"),i="Arguments"==r(function(){return arguments}());e.exports=function(e){var t,n,a;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(e){}}(t=Object(e),o))?n:i?r(t):"Object"==(a=r(t))&&"function"==typeof t.callee?"Arguments":a}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.create=t.dedupe=t.exportCSV=t.clear=t.refresh=t.toggleSaved=t.unsave=t.save=void 0;var r=f(n(365)),o=f(n(28)),i=n(165),a=f(n(478)),u=f(n(496)),s=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),c=n(17),l=n(159);function f(e){return e&&e.__esModule?e:{default:e}}var d=function(){return"/api/images/import/new/"},p=function(e){return{type:s.metadata.loading,tag:e}},h=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return{type:s.metadata.loaded,tag:e,data:t}},m=function(e,t){return{type:s.metadata.error,tag:e,err:t}},v=t.save=function(e){return function(t){console.log("save",e);var n=(0,l.getSavedFromStore)(),r=n[e.hash]||{frames:{},hash:e.hash,verified:e.verified};e.frame&&(r.frames[parseInt(e.frame,10)]=!0),r.verified=e.verified,n[e.hash]=r,t({type:s.review.save,saved:n}),t({type:s.review.dedupe,payload:!1})}},y=t.unsave=function(e){return function(t){console.log("unsave",e);var n=(0,l.getSavedFromStore)(),r=n[e.hash];r&&e.frame&&r.frames[parseInt(e.frame,10)]&&(r.frames[parseInt(e.frame,10)]=!1),t({type:s.review.unsave,saved:n})}};t.toggleSaved=function(e){return function(t){var n=e.hash,r=e.frame,o=(0,l.getSavedFromStore)(),i=o[n],a=!1;console.log(o,i),i&&r&&i.frames&&i.frames[parseInt(r,10)]&&(a=i.frames[parseInt(r,10)]),console.log(a),a?y(e)(t):v(e)(t)}},t.refresh=function(){return function(e){var t=(0,l.getSavedFromStore)();(0,o.default)(t).forEach(function(e){var n=t[e],r=0,i=(0,o.default)(n.frames);i.forEach(function(e){n.frames[e]||(delete n.frames[e],r+=1)}),i.length&&i.length!==r||delete t[e]}),e({type:s.review.refresh,saved:t})}},t.clear=function(){return function(e){e({type:s.review.clear})}},t.exportCSV=function(){return function(e){console.log("export CSV");var t=(0,l.getSavedFromStore)(),n=(0,o.default)(t).sort().map(function(e){var n=t[e],r=n.verified,i=n.hash,a=n.frames;return[i,(0,o.default)(a).join(", "),(0,c.verify)(r)]});(0,a.default)(n,function(t,r){var o=new Blob([r],{type:"text/csv"});(0,u.default)(o,"vsearch_investigation_"+(0,i.format)(new Date,"YYYYMMDD_HHmm")+".csv"),e(h("csv",{count:n.length}))})}},t.dedupe=function(){return function(e){return e(p("dedupe")),new r.default(function(t,n){var r=(0,l.getSavedUrls)();(0,c.post)("/api/images/import/search/",{urls:r}).then(function(n){var r=n.good,o=n.bad,i=(0,l.getSavedFromStore)();o.forEach(function(e){var t=e.image;console.log(t);var n=t.sa_hash,r=t.frame,o=parseInt(r,10);i[n]&&i[n].frames[o]&&(i[n].frames[o]=!1,1)}),e({type:s.review.save,saved:i}),e({type:s.review.dedupe,payload:!0}),t(r,o)}).catch(function(t){e({type:s.review.dedupe,payload:!1}),n(t)})})}},t.create=function(e){var t=e.title,n=e.graphic;return function(e){var r=(0,l.getSavedUrls)();return t?r?(e(p("create")),(0,c.post)(d(),{title:t,graphic:n,urls:r}).then(function(t){e(h("create")),window.location.href="/groups/show/"+t.image_group.id}).catch(function(t){e(m("create")),console.log(t)})):e(m("create","No images to save")):e(m("create","No title"))}}},function(e,t,n){"use strict";var r=n(56);e.exports.f=function(e){return new function(e){var t,n;this.promise=new e(function(e,r){if(void 0!==t||void 0!==n)throw TypeError("Bad Promise constructor");t=e,n=r}),this.resolve=r(t),this.reject=r(n)}(e)}},function(e,t){e.exports=function(e){return e instanceof Date}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear(),o=t.getMonth(),i=new Date(0);return i.setFullYear(n,o+1,0),i.setHours(0,0,0,0),i.getDate()}},function(e,t,n){var r=n(50);e.exports=function(e,t){var n=Number(t);return r(e,7*n)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e).getTime(),o=r(t).getTime();return n>o?-1:n<o?1:0}},function(e,t,n){var r=n(0),o=n(174),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setMonth(n.getMonth()-u*s),u*(s-(i(n,a)===-u))}},function(e,t,n){var r=n(69);e.exports=function(e,t){var n=r(e,t)/1e3;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(389),o=n(390);e.exports={distanceInWords:r(),format:o()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0),o=n(30),i=n(52),a=6048e5;e.exports=function(e){var t=r(e),n=o(t).getTime()-i(t).getTime();return Math.round(n/a)+1}},function(e,t,n){var r=n(66);e.exports=function(e,t,n){var o=r(e,n),i=r(t,n);return o.getTime()===i.getTime()}},function(e,t){function n(){this._events=this._events||{},this._maxListeners=this._maxListeners||void 0}function r(e){return"function"==typeof e}function o(e){return"object"==typeof e&&null!==e}function i(e){return void 0===e}e.exports=n,n.EventEmitter=n,n.prototype._events=void 0,n.prototype._maxListeners=void 0,n.defaultMaxListeners=10,n.prototype.setMaxListeners=function(e){if(!function(e){return"number"==typeof e}(e)||e<0||isNaN(e))throw TypeError("n must be a positive number");return this._maxListeners=e,this},n.prototype.emit=function(e){var t,n,a,u,s,c;if(this._events||(this._events={}),"error"===e&&(!this._events.error||o(this._events.error)&&!this._events.error.length)){if((t=arguments[1])instanceof Error)throw t;var l=new Error('Uncaught, unspecified "error" event. ('+t+")");throw l.context=t,l}if(i(n=this._events[e]))return!1;if(r(n))switch(arguments.length){case 1:n.call(this);break;case 2:n.call(this,arguments[1]);break;case 3:n.call(this,arguments[1],arguments[2]);break;default:u=Array.prototype.slice.call(arguments,1),n.apply(this,u)}else if(o(n))for(u=Array.prototype.slice.call(arguments,1),a=(c=n.slice()).length,s=0;s<a;s++)c[s].apply(this,u);return!0},n.prototype.addListener=function(e,t){var a;if(!r(t))throw TypeError("listener must be a function");return this._events||(this._events={}),this._events.newListener&&this.emit("newListener",e,r(t.listener)?t.listener:t),this._events[e]?o(this._events[e])?this._events[e].push(t):this._events[e]=[this._events[e],t]:this._events[e]=t,o(this._events[e])&&!this._events[e].warned&&(a=i(this._maxListeners)?n.defaultMaxListeners:this._maxListeners)&&a>0&&this._events[e].length>a&&(this._events[e].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[e].length),"function"==typeof console.trace&&console.trace()),this},n.prototype.on=n.prototype.addListener,n.prototype.once=function(e,t){if(!r(t))throw TypeError("listener must be a function");var n=!1;function o(){this.removeListener(e,o),n||(n=!0,t.apply(this,arguments))}return o.listener=t,this.on(e,o),this},n.prototype.removeListener=function(e,t){var n,i,a,u;if(!r(t))throw TypeError("listener must be a function");if(!this._events||!this._events[e])return this;if(a=(n=this._events[e]).length,i=-1,n===t||r(n.listener)&&n.listener===t)delete this._events[e],this._events.removeListener&&this.emit("removeListener",e,t);else if(o(n)){for(u=a;u-- >0;)if(n[u]===t||n[u].listener&&n[u].listener===t){i=u;break}if(i<0)return this;1===n.length?(n.length=0,delete this._events[e]):n.splice(i,1),this._events.removeListener&&this.emit("removeListener",e,t)}return this},n.prototype.removeAllListeners=function(e){var t,n;if(!this._events)return this;if(!this._events.removeListener)return 0===arguments.length?this._events={}:this._events[e]&&delete this._events[e],this;if(0===arguments.length){for(t in this._events)"removeListener"!==t&&this.removeAllListeners(t);return this.removeAllListeners("removeListener"),this._events={},this}if(r(n=this._events[e]))this.removeListener(e,n);else if(n)for(;n.length;)this.removeListener(e,n[n.length-1]);return delete this._events[e],this},n.prototype.listeners=function(e){return this._events&&this._events[e]?r(this._events[e])?[this._events[e]]:this._events[e].slice():[]},n.prototype.listenerCount=function(e){if(this._events){var t=this._events[e];if(r(t))return 1;if(t)return t.length}return 0},n.listenerCount=function(e,t){return e.listenerCount(t)}},function(e,t,n){(t=e.exports=n(201)).Stream=t,t.Readable=t,t.Writable=n(111),t.Duplex=n(33),t.Transform=n(207),t.PassThrough=n(488)},function(e,t,n){"use strict";(function(t,r,o){var i=n(70);function a(e){var t=this;this.next=null,this.entry=null,this.finish=function(){!function(e,t,n){var r=e.entry;e.entry=null;for(;r;){var o=r.callback;t.pendingcb--,o(n),r=r.next}t.corkedRequestsFree?t.corkedRequestsFree.next=e:t.corkedRequestsFree=e}(t,e)}}e.exports=g;var u,s=!t.browser&&["v0.10","v0.9."].indexOf(t.version.slice(0,5))>-1?r:i.nextTick;g.WritableState=y;var c=n(54);c.inherits=n(32);var l={deprecate:n(487)},f=n(203),d=n(71).Buffer,p=o.Uint8Array||function(){};var h,m=n(205);function v(){}function y(e,t){u=u||n(33),e=e||{};var r=t instanceof u;this.objectMode=!!e.objectMode,r&&(this.objectMode=this.objectMode||!!e.writableObjectMode);var o=e.highWaterMark,c=e.writableHighWaterMark,l=this.objectMode?16:16384;this.highWaterMark=o||0===o?o:r&&(c||0===c)?c:l,this.highWaterMark=Math.floor(this.highWaterMark),this.finalCalled=!1,this.needDrain=!1,this.ending=!1,this.ended=!1,this.finished=!1,this.destroyed=!1;var f=!1===e.decodeStrings;this.decodeStrings=!f,this.defaultEncoding=e.defaultEncoding||"utf8",this.length=0,this.writing=!1,this.corked=0,this.sync=!0,this.bufferProcessing=!1,this.onwrite=function(e){!function(e,t){var n=e._writableState,r=n.sync,o=n.writecb;if(function(e){e.writing=!1,e.writecb=null,e.length-=e.writelen,e.writelen=0}(n),t)!function(e,t,n,r,o){--t.pendingcb,n?(i.nextTick(o,r),i.nextTick(O,e,t),e._writableState.errorEmitted=!0,e.emit("error",r)):(o(r),e._writableState.errorEmitted=!0,e.emit("error",r),O(e,t))}(e,n,r,t,o);else{var a=x(n);a||n.corked||n.bufferProcessing||!n.bufferedRequest||w(e,n),r?s(b,e,n,a,o):b(e,n,a,o)}}(t,e)},this.writecb=null,this.writelen=0,this.bufferedRequest=null,this.lastBufferedRequest=null,this.pendingcb=0,this.prefinished=!1,this.errorEmitted=!1,this.bufferedRequestCount=0,this.corkedRequestsFree=new a(this)}function g(e){if(u=u||n(33),!(h.call(g,this)||this instanceof u))return new g(e);this._writableState=new y(e,this),this.writable=!0,e&&("function"==typeof e.write&&(this._write=e.write),"function"==typeof e.writev&&(this._writev=e.writev),"function"==typeof e.destroy&&(this._destroy=e.destroy),"function"==typeof e.final&&(this._final=e.final)),f.call(this)}function _(e,t,n,r,o,i,a){t.writelen=r,t.writecb=a,t.writing=!0,t.sync=!0,n?e._writev(o,t.onwrite):e._write(o,i,t.onwrite),t.sync=!1}function b(e,t,n,r){n||function(e,t){0===t.length&&t.needDrain&&(t.needDrain=!1,e.emit("drain"))}(e,t),t.pendingcb--,r(),O(e,t)}function w(e,t){t.bufferProcessing=!0;var n=t.bufferedRequest;if(e._writev&&n&&n.next){var r=t.bufferedRequestCount,o=new Array(r),i=t.corkedRequestsFree;i.entry=n;for(var u=0,s=!0;n;)o[u]=n,n.isBuf||(s=!1),n=n.next,u+=1;o.allBuffers=s,_(e,t,!0,t.length,o,"",i.finish),t.pendingcb++,t.lastBufferedRequest=null,i.next?(t.corkedRequestsFree=i.next,i.next=null):t.corkedRequestsFree=new a(t),t.bufferedRequestCount=0}else{for(;n;){var c=n.chunk,l=n.encoding,f=n.callback;if(_(e,t,!1,t.objectMode?1:c.length,c,l,f),n=n.next,t.bufferedRequestCount--,t.writing)break}null===n&&(t.lastBufferedRequest=null)}t.bufferedRequest=n,t.bufferProcessing=!1}function x(e){return e.ending&&0===e.length&&null===e.bufferedRequest&&!e.finished&&!e.writing}function E(e,t){e._final(function(n){t.pendingcb--,n&&e.emit("error",n),t.prefinished=!0,e.emit("prefinish"),O(e,t)})}function O(e,t){var n=x(t);return n&&(!function(e,t){t.prefinished||t.finalCalled||("function"==typeof e._final?(t.pendingcb++,t.finalCalled=!0,i.nextTick(E,e,t)):(t.prefinished=!0,e.emit("prefinish")))}(e,t),0===t.pendingcb&&(t.finished=!0,e.emit("finish"))),n}c.inherits(g,f),y.prototype.getBuffer=function(){for(var e=this.bufferedRequest,t=[];e;)t.push(e),e=e.next;return t},function(){try{Object.defineProperty(y.prototype,"buffer",{get:l.deprecate(function(){return this.getBuffer()},"_writableState.buffer is deprecated. Use _writableState.getBuffer instead.","DEP0003")})}catch(e){}}(),"function"==typeof Symbol&&Symbol.hasInstance&&"function"==typeof Function.prototype[Symbol.hasInstance]?(h=Function.prototype[Symbol.hasInstance],Object.defineProperty(g,Symbol.hasInstance,{value:function(e){return!!h.call(this,e)||this===g&&(e&&e._writableState instanceof y)}})):h=function(e){return e instanceof this},g.prototype.pipe=function(){this.emit("error",new Error("Cannot pipe, not readable"))},g.prototype.write=function(e,t,n){var r=this._writableState,o=!1,a=!r.objectMode&&function(e){return d.isBuffer(e)||e instanceof p}(e);return a&&!d.isBuffer(e)&&(e=function(e){return d.from(e)}(e)),"function"==typeof t&&(n=t,t=null),a?t="buffer":t||(t=r.defaultEncoding),"function"!=typeof n&&(n=v),r.ended?function(e,t){var n=new Error("write after end");e.emit("error",n),i.nextTick(t,n)}(this,n):(a||function(e,t,n,r){var o=!0,a=!1;return null===n?a=new TypeError("May not write null values to stream"):"string"==typeof n||void 0===n||t.objectMode||(a=new TypeError("Invalid non-string/buffer chunk")),a&&(e.emit("error",a),i.nextTick(r,a),o=!1),o}(this,r,e,n))&&(r.pendingcb++,o=function(e,t,n,r,o,i){if(!n){var a=function(e,t,n){e.objectMode||!1===e.decodeStrings||"string"!=typeof t||(t=d.from(t,n));return t}(t,r,o);r!==a&&(n=!0,o="buffer",r=a)}var u=t.objectMode?1:r.length;t.length+=u;var s=t.length<t.highWaterMark;s||(t.needDrain=!0);if(t.writing||t.corked){var c=t.lastBufferedRequest;t.lastBufferedRequest={chunk:r,encoding:o,isBuf:n,callback:i,next:null},c?c.next=t.lastBufferedRequest:t.bufferedRequest=t.lastBufferedRequest,t.bufferedRequestCount+=1}else _(e,t,!1,u,r,o,i);return s}(this,r,a,e,t,n)),o},g.prototype.cork=function(){this._writableState.corked++},g.prototype.uncork=function(){var e=this._writableState;e.corked&&(e.corked--,e.writing||e.corked||e.finished||e.bufferProcessing||!e.bufferedRequest||w(this,e))},g.prototype.setDefaultEncoding=function(e){if("string"==typeof e&&(e=e.toLowerCase()),!(["hex","utf8","utf-8","ascii","binary","base64","ucs2","ucs-2","utf16le","utf-16le","raw"].indexOf((e+"").toLowerCase())>-1))throw new TypeError("Unknown encoding: "+e);return this._writableState.defaultEncoding=e,this},Object.defineProperty(g.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),g.prototype._write=function(e,t,n){n(new Error("_write() is not implemented"))},g.prototype._writev=null,g.prototype.end=function(e,t,n){var r=this._writableState;"function"==typeof e?(n=e,e=null,t=null):"function"==typeof t&&(n=t,t=null),null!==e&&void 0!==e&&this.write(e,t),r.corked&&(r.corked=1,this.uncork()),r.ending||r.finished||function(e,t,n){t.ending=!0,O(e,t),n&&(t.finished?i.nextTick(n):e.once("finish",n));t.ended=!0,e.writable=!1}(this,r,n)},Object.defineProperty(g.prototype,"destroyed",{get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(e){this._writableState&&(this._writableState.destroyed=e)}}),g.prototype.destroy=m.destroy,g.prototype._undestroy=m.undestroy,g.prototype._destroy=function(e,t){this.end(),t(e)}}).call(t,n(40),n(485).setImmediate,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.fetchMetadata=t.fetchMediaRecord=t.dispatchFetch=t.setHash=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(503)),o=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39));var i={mediaRecord:function(e){return"/search/api/mediarecord/"+e},metadata:function(e){return"/api/metadata/"+e+"/"}},a=function(e,t){return{type:o.metadata.loading,tag:e,hash:t}},u=function(e,t,n){return{type:o.metadata.error,tag:e,hash:t,err:n}},s=(t.setHash=function(e){return function(t){t({type:o.metadata.set_hash,hash:e})}},t.dispatchFetch=function(e,t){return function(n){n(a(e,t)),function(e){return fetch(e,{}).then(function(e){return e.json()})}(i[e](t)).then(function(r){return n(function(e,t,n){return{type:o.metadata.loaded,tag:e,hash:t,data:n}}(e,t,r))}).catch(function(r){return n(u(e,t,r))})}});t.fetchMediaRecord=function(e){return function(t){return s("mediaRecord",e)(t)}},t.fetchMetadata=function(e){return function(t){t(a("metadata",e)),(0,r.default)(i.metadata(e)).then(function(e){return e.json()}).then(function(n){return t(function(e,t,n){return{type:o.metadata.loaded_many,tag:e,hash:t,data:n}}("metadata",e,n))}).catch(function(n){return t(u("metadata",e,n))})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.places365=t.coco=void 0;var r=i(n(509)),o=i(n(510));function i(e){return e&&e.__esModule?e:{default:e}}t.coco=r.default,t.places365=o.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(4)),o=y(n(5)),i=y(n(6)),a=y(n(7)),u=y(n(8)),s=y(n(9)),c=n(1),l=y(c),f=n(16),d=n(15),p=n(2),h=n(165),m=n(17),v=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.query,n=e.metadata,r=e.sugarcube;if(!t||!n||!n.mediainfo||"loading"===n.metadata)return l.default.createElement("div",{className:"searchMeta"});var o=n.mediainfo.sugarcube_id,i=n.mediainfo.metadata.mediainfo.video,a=t.crop||{},u=a.x,s=a.y,c=a.w,d=a.h;return l.default.createElement("div",{className:"searchMeta"},"verified"in t&&l.default.createElement("span",{className:t.verified?"verified":"unverified"},t.verified?"verified":"unverified"),t.hash&&l.default.createElement("span",null,"sha256: ",l.default.createElement(f.Link,{className:"sha256",to:v.publicUrl.browse(t.hash)},t.hash)),t.frame&&l.default.createElement("span",null,"Frame: ",(0,m.timestamp)(t.frame,i.frame_rate)," / ",(0,m.timestamp)(i.duration/1e3,1)),t.crop&&l.default.createElement("span",null,"Crop: ",parseInt(c,10)+"x"+parseInt(d,10)+" @ ("+parseInt(u,10)+", "+parseInt(s,10)+")"),!(!i||!i.encoded_date)&&l.default.createElement("span",null,"Date: ",(0,h.format)(new Date(i.encoded_date),"DD-MMM-YYYY")),!(!r||!o)&&l.default.createElement("span",null,"sugarcube: ",o))}}]),t}(c.Component);t.default=(0,p.connect)(function(e){return{metadata:e.metadata}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},v),e)}})(g)},function(e,t,n){"use strict";
+/*
+object-assign
+(c) Sindre Sorhus
+@license MIT
+*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,a,u=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),s=1;s<arguments.length;s++){for(var c in n=Object(arguments[s]))o.call(n,c)&&(u[c]=n[c]);if(r){a=r(n);for(var l=0;l<a.length;l++)i.call(n,a[l])&&(u[a[l]]=n[a[l]])}}return u}},function(e,t,n){"use strict";n.d(t,"b",function(){return i}),n.d(t,"a",function(){return a});var r=n(12),o=n.n(r),i=o.a.shape({trySubscribe:o.a.func.isRequired,tryUnsubscribe:o.a.func.isRequired,notifyNestedSubs:o.a.func.isRequired,isSubscribed:o.a.func.isRequired}),a=o.a.shape({subscribe:o.a.func.isRequired,dispatch:o.a.func.isRequired,getState:o.a.func.isRequired})},function(e,t,n){"use strict";t.a=function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=r.getDisplayName,h=void 0===i?function(e){return"ConnectAdvanced("+e+")"}:i,m=r.methodName,v=void 0===m?"connectAdvanced":m,y=r.renderCountProp,g=void 0===y?void 0:y,_=r.shouldHandleStateChanges,b=void 0===_||_,w=r.storeKey,x=void 0===w?"store":w,E=r.withRef,O=void 0!==E&&E,S=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(r,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef"]),T=x+"Subscription",k=f++,R=((t={})[x]=c.a,t[T]=c.b,t),j=((n={})[T]=c.b,n);return function(t){a()("function"==typeof t,"You must pass a component to the function returned by "+v+". Instead received "+JSON.stringify(t));var n=t.displayName||t.name||"Component",r=h(n),i=l({},S,{getDisplayName:h,methodName:v,renderCountProp:g,shouldHandleStateChanges:b,storeKey:x,withRef:O,displayName:r,wrappedComponentName:n,WrappedComponent:t}),c=function(n){function o(e,t){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,n.call(this,e,t));return i.version=k,i.state={},i.renderCount=0,i.store=e[x]||t[x],i.propsMode=Boolean(e[x]),i.setWrappedInstance=i.setWrappedInstance.bind(i),a()(i.store,'Could not find "'+x+'" in either the context or props of "'+r+'". Either wrap the root component in a <Provider>, or explicitly pass "'+x+'" as a prop to "'+r+'".'),i.initSelector(),i.initSubscription(),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,n),o.prototype.getChildContext=function(){var e,t=this.propsMode?null:this.subscription;return(e={})[T]=t||this.context[T],e},o.prototype.componentDidMount=function(){b&&(this.subscription.trySubscribe(),this.selector.run(this.props),this.selector.shouldComponentUpdate&&this.forceUpdate())},o.prototype.componentWillReceiveProps=function(e){this.selector.run(e)},o.prototype.shouldComponentUpdate=function(){return this.selector.shouldComponentUpdate},o.prototype.componentWillUnmount=function(){this.subscription&&this.subscription.tryUnsubscribe(),this.subscription=null,this.notifyNestedSubs=p,this.store=null,this.selector.run=p,this.selector.shouldComponentUpdate=!1},o.prototype.getWrappedInstance=function(){return a()(O,"To access the wrapped instance, you need to specify { withRef: true } in the options argument of the "+v+"() call."),this.wrappedInstance},o.prototype.setWrappedInstance=function(e){this.wrappedInstance=e},o.prototype.initSelector=function(){var t=e(this.store.dispatch,i);this.selector=function(e,t){var n={run:function(r){try{var o=e(t.getState(),r);(o!==n.props||n.error)&&(n.shouldComponentUpdate=!0,n.props=o,n.error=null)}catch(e){n.shouldComponentUpdate=!0,n.error=e}}};return n}(t,this.store),this.selector.run(this.props)},o.prototype.initSubscription=function(){if(b){var e=(this.propsMode?this.props:this.context)[T];this.subscription=new s.a(this.store,e,this.onStateChange.bind(this)),this.notifyNestedSubs=this.subscription.notifyNestedSubs.bind(this.subscription)}},o.prototype.onStateChange=function(){this.selector.run(this.props),this.selector.shouldComponentUpdate?(this.componentDidUpdate=this.notifyNestedSubsOnComponentDidUpdate,this.setState(d)):this.notifyNestedSubs()},o.prototype.notifyNestedSubsOnComponentDidUpdate=function(){this.componentDidUpdate=void 0,this.notifyNestedSubs()},o.prototype.isSubscribed=function(){return Boolean(this.subscription)&&this.subscription.isSubscribed()},o.prototype.addExtraProps=function(e){if(!(O||g||this.propsMode&&this.subscription))return e;var t=l({},e);return O&&(t.ref=this.setWrappedInstance),g&&(t[g]=this.renderCount++),this.propsMode&&this.subscription&&(t[T]=this.subscription),t},o.prototype.render=function(){var e=this.selector;if(e.shouldComponentUpdate=!1,e.error)throw e.error;return Object(u.createElement)(t,this.addExtraProps(e.props))},o}(u.Component);return c.WrappedComponent=t,c.displayName=r,c.childContextTypes=j,c.contextTypes=R,c.propTypes=R,o()(c,t)}};var r=n(118),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=(n.n(u),n(223)),c=n(116),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var f=0,d={};function p(){}},function(e,t,n){"use strict";var r={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i=Object.defineProperty,a=Object.getOwnPropertyNames,u=Object.getOwnPropertySymbols,s=Object.getOwnPropertyDescriptor,c=Object.getPrototypeOf,l=c&&c(Object);e.exports=function e(t,n,f){if("string"!=typeof n){if(l){var d=c(n);d&&d!==l&&e(t,d,f)}var p=a(n);u&&(p=p.concat(u(n)));for(var h=0;h<p.length;++h){var m=p[h];if(!(r[m]||o[m]||f&&f[m])){var v=s(n,m);try{i(t,m,v)}catch(e){}}}return t}return t}},function(e,t){e.exports=function(e){if(!e.webpackPolyfill){var t=Object.create(e);t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),Object.defineProperty(t,"exports",{enumerable:!0}),t.webpackPolyfill=1}return t}},function(e,t,n){"use strict";t.a=function(e){return function(t,n){var r=e(t,n);function o(){return r}return o.dependsOnOwnProps=!1,o}},t.b=function(e,t){return function(t,n){n.displayName;var o=function(e,t){return o.dependsOnOwnProps?o.mapToProps(e,t):o.mapToProps(e)};return o.dependsOnOwnProps=!0,o.mapToProps=function(t,n){o.mapToProps=e,o.dependsOnOwnProps=r(e);var i=o(t,n);return"function"==typeof i&&(o.mapToProps=i,o.dependsOnOwnProps=r(i),i=o(t,n)),i},o}};n(121);function r(e){return null!==e.dependsOnOwnProps&&void 0!==e.dependsOnOwnProps?Boolean(e.dependsOnOwnProps):1!==e.length}},function(e,t,n){"use strict";n(229),n(72)},function(e,t,n){"use strict";var r=n(231).a.Symbol;t.a=r},function(e,t,n){var r=n(25),o=n(41),i=n(74)("IE_PROTO"),a=Object.prototype;e.exports=Object.getPrototypeOf||function(e){return e=o(e),r(e,i)?e[i]:"function"==typeof e.constructor&&e instanceof e.constructor?e.constructor.prototype:e instanceof Object?a:null}},function(e,t,n){var r=n(18),o=n(10),i=n(35);e.exports=function(e,t){var n=(o.Object||{})[e]||Object[e],a={};a[e]=t(n),r(r.S+r.F*i(function(){n(1)}),"Object",a)}},function(e,t,n){e.exports=!n(24)&&!n(35)(function(){return 7!=Object.defineProperty(n(76)("div"),"a",{get:function(){return 7}}).a})},function(e,t,n){e.exports={default:n(245),__esModule:!0}},function(e,t,n){"use strict";var r=n(42),o=n(18),i=n(128),a=n(26),u=n(36),s=n(250),c=n(57),l=n(123),f=n(14)("iterator"),d=!([].keys&&"next"in[].keys()),p=function(){return this};e.exports=function(e,t,n,h,m,v,y){s(n,t,h);var g,_,b,w=function(e){if(!d&&e in S)return S[e];switch(e){case"keys":case"values":return function(){return new n(this,e)}}return function(){return new n(this,e)}},x=t+" Iterator",E="values"==m,O=!1,S=e.prototype,T=S[f]||S["@@iterator"]||m&&S[m],k=T||w(m),R=m?E?w("entries"):k:void 0,j="Array"==t&&S.entries||T;if(j&&(b=l(j.call(new e)))!==Object.prototype&&b.next&&(c(b,x,!0),r||"function"==typeof b[f]||a(b,f,p)),E&&T&&"values"!==T.name&&(O=!0,k=function(){return T.call(this)}),r&&!y||!d&&!O&&S[f]||a(S,f,k),u[t]=k,u[x]=p,m)if(g={values:E?k:w("values"),keys:v?k:w("keys"),entries:R},y)for(_ in g)_ in S||i(S,_,g[_]);else o(o.P+o.F*(d||O),t,g);return g}},function(e,t,n){e.exports=n(26)},function(e,t,n){var r=n(25),o=n(37),i=n(252)(!1),a=n(74)("IE_PROTO");e.exports=function(e,t){var n,u=o(e),s=0,c=[];for(n in u)n!=a&&r(u,n)&&c.push(n);for(;t.length>s;)r(u,n=t[s++])&&(~i(c,n)||c.push(n));return c}},function(e,t,n){var r=n(46);e.exports=Object("z").propertyIsEnumerable(0)?Object:function(e){return"String"==r(e)?e.split(""):Object(e)}},function(e,t,n){var r=n(13).document;e.exports=r&&r.documentElement},function(e,t,n){var r=n(129),o=n(82).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,o)}},function(e,t,n){var r=n(59),o=n(43),i=n(37),a=n(77),u=n(25),s=n(125),c=Object.getOwnPropertyDescriptor;t.f=n(24)?c:function(e,t){if(e=i(e),t=a(t,!0),s)try{return c(e,t)}catch(e){}if(u(e,t))return o(!r.f.call(e,t),e[t])}},function(e,t){},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=t.createMatchSelector=t.getAction=t.getLocation=t.routerMiddleware=t.connectRouter=t.ConnectedRouter=t.routerActions=t.goForward=t.goBack=t.go=t.replace=t.push=t.CALL_HISTORY_METHOD=t.LOCATION_CHANGE=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=a(n(273)),i=a(n(284));function a(e){return e&&e.__esModule?e:{default:e}}var u=j("createAll")(j("plainStructure")),s=u.LOCATION_CHANGE,c=u.CALL_HISTORY_METHOD,l=u.push,f=u.replace,d=u.go,p=u.goBack,h=u.goForward,m=u.routerActions,v=u.ConnectedRouter,y=u.connectRouter,g=u.routerMiddleware,_=u.getLocation,b=u.getAction,w=u.createMatchSelector;function x(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.LOCATION_CHANGE=s,t.CALL_HISTORY_METHOD=c,t.push=l,t.replace=f,t.go=d,t.goBack=p,t.goForward=h,t.routerActions=m,t.ConnectedRouter=v,t.connectRouter=y,t.routerMiddleware=g,t.getLocation=_,t.getAction=b,t.createMatchSelector=w;var E=null;function O(){if(null===E){var e=x();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),E=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return E}function S(){var e=x();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function T(){var e=O(),t=S(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=x();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var k="__INTENTIONAL_UNDEFINED__",R={};function j(e){var t=T();if(void 0===t[e])return function(e){switch(e){case"createAll":return o.default;case"plainStructure":return i.default}return}(e);var n=t[e];return n===k?void 0:n}function P(e,t){var n=T();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?k:t,function(){C(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function C(e){var t=T();delete t[e],0==Object.keys(t).length&&delete S()[O]}function M(e){var t=T(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(R,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",j),e("__GetDependency__",j),e("__Rewire__",P),e("__set__",P),e("__reset__",C),e("__ResetDependency__",C),e("__with__",M)}(),t.__get__=j,t.__GetDependency__=j,t.__Rewire__=P,t.__set__=P,t.__ResetDependency__=C,t.__RewireAPI__=R,t.default=R}).call(t,n(3))},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(63);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createMemoryHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<MemoryRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { MemoryRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={initialEntries:s.a.array,initialIndex:s.a.number,getUserConfirmation:s.a.func,keyLength:s.a.number,children:s.a.node},t.a=d},function(e,t,n){"use strict";n.d(t,"b",function(){return r}),n.d(t,"a",function(){return o}),n.d(t,"e",function(){return i}),n.d(t,"c",function(){return a}),n.d(t,"g",function(){return u}),n.d(t,"h",function(){return s}),n.d(t,"f",function(){return c}),n.d(t,"d",function(){return l});var r=!("undefined"==typeof window||!window.document||!window.document.createElement),o=function(e,t,n){return e.addEventListener?e.addEventListener(t,n,!1):e.attachEvent("on"+t,n)},i=function(e,t,n){return e.removeEventListener?e.removeEventListener(t,n,!1):e.detachEvent("on"+t,n)},a=function(e,t){return t(window.confirm(e))},u=function(){var e=window.navigator.userAgent;return(-1===e.indexOf("Android 2.")&&-1===e.indexOf("Android 4.0")||-1===e.indexOf("Mobile Safari")||-1!==e.indexOf("Chrome")||-1!==e.indexOf("Windows Phone"))&&(window.history&&"pushState"in window.history)},s=function(){return-1===window.navigator.userAgent.indexOf("Trident")},c=function(){return-1===window.navigator.userAgent.indexOf("Firefox")},l=function(e){return void 0===e.state&&-1===navigator.userAgent.indexOf("CriOS")}},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(19),s=n.n(u);var c=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.enable=function(e){this.unblock&&this.unblock(),this.unblock=this.context.router.history.block(e)},t.prototype.disable=function(){this.unblock&&(this.unblock(),this.unblock=null)},t.prototype.componentWillMount=function(){s()(this.context.router,"You should not use <Prompt> outside a <Router>"),this.props.when&&this.enable(this.props.message)},t.prototype.componentWillReceiveProps=function(e){e.when?this.props.when&&this.props.message===e.message||this.enable(e.message):this.disable()},t.prototype.componentWillUnmount=function(){this.disable()},t.prototype.render=function(){return null},t}(o.a.Component);c.propTypes={when:a.a.bool,message:a.a.oneOfType([a.a.func,a.a.string]).isRequired},c.defaultProps={when:!0},c.contextTypes={router:a.a.shape({history:a.a.shape({block:a.a.func.isRequired}).isRequired}).isRequired},t.a=c},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(38),s=n.n(u),c=n(19),l=n.n(c),f=n(27),d=n(88),p=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var h=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.isStatic=function(){return this.context.router&&this.context.router.staticContext},t.prototype.componentWillMount=function(){l()(this.context.router,"You should not use <Redirect> outside a <Router>"),this.isStatic()&&this.perform()},t.prototype.componentDidMount=function(){this.isStatic()||this.perform()},t.prototype.componentDidUpdate=function(e){var t=Object(f.createLocation)(e.to),n=Object(f.createLocation)(this.props.to);Object(f.locationsAreEqual)(t,n)?s()(!1,"You tried to redirect to the same route you're currently on: \""+n.pathname+n.search+'"'):this.perform()},t.prototype.computeTo=function(e){var t=e.computedMatch,n=e.to;return t?"string"==typeof n?Object(d.a)(n,t.params):p({},n,{pathname:Object(d.a)(n.pathname,t.params)}):n},t.prototype.perform=function(){var e=this.context.router.history,t=this.props.push,n=this.computeTo(this.props);t?e.push(n):e.replace(n)},t.prototype.render=function(){return null},t}(o.a.Component);h.propTypes={computedMatch:a.a.object,push:a.a.bool,from:a.a.string,to:a.a.oneOfType([a.a.string,a.a.object]).isRequired},h.defaultProps={push:!1},h.contextTypes={router:a.a.shape({history:a.a.shape({push:a.a.func.isRequired,replace:a.a.func.isRequired}).isRequired,staticContext:a.a.object}).isRequired},t.a=h},function(e,t,n){var r=n(280);e.exports=h,e.exports.parse=i,e.exports.compile=function(e,t){return s(i(e,t))},e.exports.tokensToFunction=s,e.exports.tokensToRegExp=p;var o=new RegExp(["(\\\\.)","([\\/.])?(?:(?:\\:(\\w+)(?:\\(((?:\\\\.|[^\\\\()])+)\\))?|\\(((?:\\\\.|[^\\\\()])+)\\))([+*?])?|(\\*))"].join("|"),"g");function i(e,t){for(var n,r=[],i=0,a=0,u="",s=t&&t.delimiter||"/";null!=(n=o.exec(e));){var f=n[0],d=n[1],p=n.index;if(u+=e.slice(a,p),a=p+f.length,d)u+=d[1];else{var h=e[a],m=n[2],v=n[3],y=n[4],g=n[5],_=n[6],b=n[7];u&&(r.push(u),u="");var w=null!=m&&null!=h&&h!==m,x="+"===_||"*"===_,E="?"===_||"*"===_,O=n[2]||s,S=y||g;r.push({name:v||i++,prefix:m||"",delimiter:O,optional:E,repeat:x,partial:w,asterisk:!!b,pattern:S?l(S):b?".*":"[^"+c(O)+"]+?"})}}return a<e.length&&(u+=e.substr(a)),u&&r.push(u),r}function a(e){return encodeURI(e).replace(/[\/?#]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}function u(e){return encodeURI(e).replace(/[?#]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}function s(e){for(var t=new Array(e.length),n=0;n<e.length;n++)"object"==typeof e[n]&&(t[n]=new RegExp("^(?:"+e[n].pattern+")$"));return function(n,o){for(var i="",s=n||{},c=(o||{}).pretty?a:encodeURIComponent,l=0;l<e.length;l++){var f=e[l];if("string"!=typeof f){var d,p=s[f.name];if(null==p){if(f.optional){f.partial&&(i+=f.prefix);continue}throw new TypeError('Expected "'+f.name+'" to be defined')}if(r(p)){if(!f.repeat)throw new TypeError('Expected "'+f.name+'" to not repeat, but received `'+JSON.stringify(p)+"`");if(0===p.length){if(f.optional)continue;throw new TypeError('Expected "'+f.name+'" to not be empty')}for(var h=0;h<p.length;h++){if(d=c(p[h]),!t[l].test(d))throw new TypeError('Expected all "'+f.name+'" to match "'+f.pattern+'", but received `'+JSON.stringify(d)+"`");i+=(0===h?f.prefix:f.delimiter)+d}}else{if(d=f.asterisk?u(p):c(p),!t[l].test(d))throw new TypeError('Expected "'+f.name+'" to match "'+f.pattern+'", but received "'+d+'"');i+=f.prefix+d}}else i+=f}return i}}function c(e){return e.replace(/([.+*?=^!:${}()[\]|\/\\])/g,"\\$1")}function l(e){return e.replace(/([=!:$\/()])/g,"\\$1")}function f(e,t){return e.keys=t,e}function d(e){return e.sensitive?"":"i"}function p(e,t,n){r(t)||(n=t||n,t=[]);for(var o=(n=n||{}).strict,i=!1!==n.end,a="",u=0;u<e.length;u++){var s=e[u];if("string"==typeof s)a+=c(s);else{var l=c(s.prefix),p="(?:"+s.pattern+")";t.push(s),s.repeat&&(p+="(?:"+l+p+")*"),a+=p=s.optional?s.partial?l+"("+p+")?":"(?:"+l+"("+p+"))?":l+"("+p+")"}}var h=c(n.delimiter||"/"),m=a.slice(-h.length)===h;return o||(a=(m?a.slice(0,-h.length):a)+"(?:"+h+"(?=$))?"),a+=i?"$":o&&m?"":"(?="+h+"|$)",f(new RegExp("^"+a,d(n)),t)}function h(e,t,n){return r(t)||(n=t||n,t=[]),n=n||{},e instanceof RegExp?function(e,t){var n=e.source.match(/\((?!\?)/g);if(n)for(var r=0;r<n.length;r++)t.push({name:r,prefix:null,delimiter:null,optional:!1,repeat:!1,partial:!1,asterisk:!1,pattern:null});return f(e,t)}(e,t):r(e)?function(e,t,n){for(var r=[],o=0;o<e.length;o++)r.push(h(e[o],t,n).source);return f(new RegExp("(?:"+r.join("|")+")",d(n)),t)}(e,t,n):function(e,t,n){return p(i(e,n),t,n)}(e,t,n)}},function(e,t,n){"use strict";var r=n(38),o=n.n(r),i=n(19),a=n.n(i),u=n(1),s=n.n(u),c=n(12),l=n.n(c),f=n(27),d=n(63),p=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function h(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var m=function(e){return"/"===e.charAt(0)?e:"/"+e},v=function(e,t){return e?p({},t,{pathname:m(e)+t.pathname}):t},y=function(e){return"string"==typeof e?e:Object(f.createPath)(e)},g=function(e){return function(){a()(!1,"You cannot %s with <StaticRouter>",e)}},_=function(){},b=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=h(this,e.call.apply(e,[this].concat(i))),r.createHref=function(e){return m(r.props.basename+y(e))},r.handlePush=function(e){var t=r.props,n=t.basename,o=t.context;o.action="PUSH",o.location=v(n,Object(f.createLocation)(e)),o.url=y(o.location)},r.handleReplace=function(e){var t=r.props,n=t.basename,o=t.context;o.action="REPLACE",o.location=v(n,Object(f.createLocation)(e)),o.url=y(o.location)},r.handleListen=function(){return _},r.handleBlock=function(){return _},h(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.getChildContext=function(){return{router:{staticContext:this.props.context}}},t.prototype.componentWillMount=function(){o()(!this.props.history,"<StaticRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { StaticRouter as Router }`.")},t.prototype.render=function(){var e=this.props,t=e.basename,n=(e.context,e.location),r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["basename","context","location"]),o={createHref:this.createHref,action:"POP",location:function(e,t){if(!e)return t;var n=m(e);return 0!==t.pathname.indexOf(n)?t:p({},t,{pathname:t.pathname.substr(n.length)})}(t,Object(f.createLocation)(n)),push:this.handlePush,replace:this.handleReplace,go:g("go"),goBack:g("goBack"),goForward:g("goForward"),listen:this.handleListen,block:this.handleBlock};return s.a.createElement(d.a,p({},r,{history:o}))},t}(s.a.Component);b.propTypes={basename:l.a.string,context:l.a.object.isRequired,location:l.a.oneOfType([l.a.string,l.a.object])},b.defaultProps={basename:"",location:"/"},b.childContextTypes={router:l.a.object.isRequired},t.a=b},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(38),s=n.n(u),c=n(19),l=n.n(c),f=n(64);var d=function(e){function t(){return function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t),function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){l()(this.context.router,"You should not use <Switch> outside a <Router>")},t.prototype.componentWillReceiveProps=function(e){s()(!(e.location&&!this.props.location),'<Switch> elements should not change from uncontrolled to controlled (or vice versa). You initially used no "location" prop and then provided one on a subsequent render.'),s()(!(!e.location&&this.props.location),'<Switch> elements should not change from controlled to uncontrolled (or vice versa). You provided a "location" prop initially but omitted it on a subsequent render.')},t.prototype.render=function(){var e=this.context.router.route,t=this.props.children,n=this.props.location||e.location,r=void 0,i=void 0;return o.a.Children.forEach(t,function(t){if(null==r&&o.a.isValidElement(t)){var a=t.props,u=a.path,s=a.exact,c=a.strict,l=a.sensitive,d=a.from,p=u||d;i=t,r=Object(f.a)(n.pathname,{path:p,exact:s,strict:c,sensitive:l},e.match)}}),r?o.a.cloneElement(i,{location:n,computedMatch:r}):null},t}(o.a.Component);d.contextTypes={router:a.a.shape({route:a.a.object.isRequired}).isRequired},d.propTypes={children:a.a.node,location:a.a.object},t.a=d},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(118),s=n.n(u),c=n(89),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};t.a=function(e){var t=function(t){var n=t.wrappedComponentRef,r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["wrappedComponentRef"]);return o.a.createElement(c.a,{children:function(t){return o.a.createElement(e,l({},r,t,{ref:n}))}})};return t.displayName="withRouter("+(e.displayName||e.name)+")",t.WrappedComponent=e,t.propTypes={wrappedComponentRef:a.a.func},s()(t,e)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),n.d(t,"createStore",function(){return s}),n.d(t,"combineReducers",function(){return l}),n.d(t,"bindActionCreators",function(){return d}),n.d(t,"applyMiddleware",function(){return h}),n.d(t,"compose",function(){return p}),n.d(t,"__DO_NOT_USE__ActionTypes",function(){return o});var r=n(290),o={INIT:"@@redux/INIT"+Math.random().toString(36).substring(7).split("").join("."),REPLACE:"@@redux/REPLACE"+Math.random().toString(36).substring(7).split("").join(".")},i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},a=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function u(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)return!1;for(var t=e;null!==Object.getPrototypeOf(t);)t=Object.getPrototypeOf(t);return Object.getPrototypeOf(e)===t}function s(e,t,n){var a;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(s)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var c=e,l=t,f=[],d=f,p=!1;function h(){d===f&&(d=f.slice())}function m(){if(p)throw new Error("You may not call store.getState() while the reducer is executing. The reducer has already received the state as an argument. Pass it down from the top reducer instead of reading it from the store.");return l}function v(e){if("function"!=typeof e)throw new Error("Expected the listener to be a function.");if(p)throw new Error("You may not call store.subscribe() while the reducer is executing. If you would like to be notified after the store has been updated, subscribe from a component and invoke store.getState() in the callback to access the latest state. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");var t=!0;return h(),d.push(e),function(){if(t){if(p)throw new Error("You may not unsubscribe from a store listener while the reducer is executing. See https://redux.js.org/api-reference/store#subscribe(listener) for more details.");t=!1,h();var n=d.indexOf(e);d.splice(n,1)}}}function y(e){if(!u(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(p)throw new Error("Reducers may not dispatch actions.");try{p=!0,l=c(l,e)}finally{p=!1}for(var t=f=d,n=0;n<t.length;n++){(0,t[n])()}return e}return y({type:o.INIT}),(a={dispatch:y,subscribe:v,getState:m,replaceReducer:function(e){if("function"!=typeof e)throw new Error("Expected the nextReducer to be a function.");c=e,y({type:o.REPLACE})}})[r.a]=function(){var e,t=v;return(e={subscribe:function(e){if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new TypeError("Expected the observer to be an object.");function n(){e.next&&e.next(m())}return n(),{unsubscribe:t(n)}}})[r.a]=function(){return this},e},a}function c(e,t){var n=t&&t.type;return"Given "+(n&&'action "'+String(n)+'"'||"an action")+', reducer "'+e+'" returned undefined. To ignore an action, you must explicitly return the previous state. If you want this reducer to hold no value, you can return null instead of undefined.'}function l(e){for(var t=Object.keys(e),n={},r=0;r<t.length;r++){var i=t[r];0,"function"==typeof e[i]&&(n[i]=e[i])}var a=Object.keys(n);var u=void 0;try{!function(e){Object.keys(e).forEach(function(t){var n=e[t];if(void 0===n(void 0,{type:o.INIT}))throw new Error('Reducer "'+t+"\" returned undefined during initialization. If the state passed to the reducer is undefined, you must explicitly return the initial state. The initial state may not be undefined. If you don't want to set a value for this reducer, you can use null instead of undefined.");if(void 0===n(void 0,{type:"@@redux/PROBE_UNKNOWN_ACTION_"+Math.random().toString(36).substring(7).split("").join(".")}))throw new Error('Reducer "'+t+"\" returned undefined when probed with a random type. Don't try to handle "+o.INIT+' or other actions in "redux/*" namespace. They are considered private. Instead, you must return the current state for any unknown actions, unless it is undefined, in which case you must return the initial state, regardless of the action type. The initial state may not be undefined, but can be null.')})}(n)}catch(e){u=e}return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(u)throw u;for(var r=!1,o={},i=0;i<a.length;i++){var s=a[i],l=n[s],f=e[s],d=l(f,t);if(void 0===d){var p=c(s,t);throw new Error(p)}o[s]=d,r=r||d!==f}return r?o:e}}function f(e,t){return function(){return t(e.apply(this,arguments))}}function d(e,t){if("function"==typeof e)return f(e,t);if("object"!==(void 0===e?"undefined":i(e))||null===e)throw new Error("bindActionCreators expected an object or a function, instead received "+(null===e?"null":void 0===e?"undefined":i(e))+'. Did you write "import ActionCreators from" instead of "import * as ActionCreators from"?');for(var n=Object.keys(e),r={},o=0;o<n.length;o++){var a=n[o],u=e[a];"function"==typeof u&&(r[a]=f(u,t))}return r}function p(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return 0===t.length?function(e){return e}:1===t.length?t[0]:t.reduce(function(e,t){return function(){return e(t.apply(void 0,arguments))}})}function h(){for(var e=arguments.length,t=Array(e),n=0;n<e;n++)t[n]=arguments[n];return function(e){return function(){for(var n=arguments.length,r=Array(n),o=0;o<n;o++)r[o]=arguments[o];var i=e.apply(void 0,r),u=function(){throw new Error("Dispatching while constructing your middleware is not allowed. Other middleware would not be applied to this dispatch.")},s={getState:i.getState,dispatch:function(){return u.apply(void 0,arguments)}},c=t.map(function(e){return e(s)});return u=p.apply(void 0,c)(i.dispatch),a({},i,{dispatch:u})}}}},function(e,t,n){e.exports=n(294)()},function(e,t,n){"use strict";n.d(t,"b",function(){return i}),n.d(t,"a",function(){return a});var r=n(145),o=n.n(r),i=o.a.shape({trySubscribe:o.a.func.isRequired,tryUnsubscribe:o.a.func.isRequired,notifyNestedSubs:o.a.func.isRequired,isSubscribed:o.a.func.isRequired}),a=o.a.shape({subscribe:o.a.func.isRequired,dispatch:o.a.func.isRequired,getState:o.a.func.isRequired})},function(e,t,n){"use strict";t.a=function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=r.getDisplayName,h=void 0===i?function(e){return"ConnectAdvanced("+e+")"}:i,m=r.methodName,v=void 0===m?"connectAdvanced":m,y=r.renderCountProp,g=void 0===y?void 0:y,_=r.shouldHandleStateChanges,b=void 0===_||_,w=r.storeKey,x=void 0===w?"store":w,E=r.withRef,O=void 0!==E&&E,S=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(r,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef"]),T=x+"Subscription",k=f++,R=((t={})[x]=c.a,t[T]=c.b,t),j=((n={})[T]=c.b,n);return function(t){a()("function"==typeof t,"You must pass a component to the function returned by "+v+". Instead received "+JSON.stringify(t));var n=t.displayName||t.name||"Component",r=h(n),i=l({},S,{getDisplayName:h,methodName:v,renderCountProp:g,shouldHandleStateChanges:b,storeKey:x,withRef:O,displayName:r,wrappedComponentName:n,WrappedComponent:t}),c=function(n){function o(e,t){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,n.call(this,e,t));return i.version=k,i.state={},i.renderCount=0,i.store=e[x]||t[x],i.propsMode=Boolean(e[x]),i.setWrappedInstance=i.setWrappedInstance.bind(i),a()(i.store,'Could not find "'+x+'" in either the context or props of "'+r+'". Either wrap the root component in a <Provider>, or explicitly pass "'+x+'" as a prop to "'+r+'".'),i.initSelector(),i.initSubscription(),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,n),o.prototype.getChildContext=function(){var e,t=this.propsMode?null:this.subscription;return(e={})[T]=t||this.context[T],e},o.prototype.componentDidMount=function(){b&&(this.subscription.trySubscribe(),this.selector.run(this.props),this.selector.shouldComponentUpdate&&this.forceUpdate())},o.prototype.componentWillReceiveProps=function(e){this.selector.run(e)},o.prototype.shouldComponentUpdate=function(){return this.selector.shouldComponentUpdate},o.prototype.componentWillUnmount=function(){this.subscription&&this.subscription.tryUnsubscribe(),this.subscription=null,this.notifyNestedSubs=p,this.store=null,this.selector.run=p,this.selector.shouldComponentUpdate=!1},o.prototype.getWrappedInstance=function(){return a()(O,"To access the wrapped instance, you need to specify { withRef: true } in the options argument of the "+v+"() call."),this.wrappedInstance},o.prototype.setWrappedInstance=function(e){this.wrappedInstance=e},o.prototype.initSelector=function(){var t=e(this.store.dispatch,i);this.selector=function(e,t){var n={run:function(r){try{var o=e(t.getState(),r);(o!==n.props||n.error)&&(n.shouldComponentUpdate=!0,n.props=o,n.error=null)}catch(e){n.shouldComponentUpdate=!0,n.error=e}}};return n}(t,this.store),this.selector.run(this.props)},o.prototype.initSubscription=function(){if(b){var e=(this.propsMode?this.props:this.context)[T];this.subscription=new s.a(this.store,e,this.onStateChange.bind(this)),this.notifyNestedSubs=this.subscription.notifyNestedSubs.bind(this.subscription)}},o.prototype.onStateChange=function(){this.selector.run(this.props),this.selector.shouldComponentUpdate?(this.componentDidUpdate=this.notifyNestedSubsOnComponentDidUpdate,this.setState(d)):this.notifyNestedSubs()},o.prototype.notifyNestedSubsOnComponentDidUpdate=function(){this.componentDidUpdate=void 0,this.notifyNestedSubs()},o.prototype.isSubscribed=function(){return Boolean(this.subscription)&&this.subscription.isSubscribed()},o.prototype.addExtraProps=function(e){if(!(O||g||this.propsMode&&this.subscription))return e;var t=l({},e);return O&&(t.ref=this.setWrappedInstance),g&&(t[g]=this.renderCount++),this.propsMode&&this.subscription&&(t[T]=this.subscription),t},o.prototype.render=function(){var e=this.selector;if(e.shouldComponentUpdate=!1,e.error)throw e.error;return Object(u.createElement)(t,this.addExtraProps(e.props))},o}(u.Component);return c.WrappedComponent=t,c.displayName=r,c.childContextTypes=j,c.contextTypes=R,c.propTypes=R,o()(c,t)}};var r=n(296),o=n.n(r),i=n(297),a=n.n(i),u=n(90),s=(n.n(u),n(298)),c=n(146),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var f=0,d={};function p(){}},function(e,t,n){"use strict";t.a=function(e){return function(t,n){var r=e(t,n);function o(){return r}return o.dependsOnOwnProps=!1,o}},t.b=function(e,t){return function(t,n){n.displayName;var o=function(e,t){return o.dependsOnOwnProps?o.mapToProps(e,t):o.mapToProps(e)};return o.dependsOnOwnProps=!0,o.mapToProps=function(t,n){o.mapToProps=e,o.dependsOnOwnProps=r(e);var i=o(t,n);return"function"==typeof i&&(o.mapToProps=i,o.dependsOnOwnProps=r(i),i=o(t,n)),i},o}};n(149);function r(e){return null!==e.dependsOnOwnProps&&void 0!==e.dependsOnOwnProps?Boolean(e.dependsOnOwnProps):1!==e.length}},function(e,t,n){"use strict";n(302),n(91)},function(e,t,n){"use strict";var r=n(304).a.Symbol;t.a=r},function(e,t,n){"use strict";var r=function(){};e.exports=r},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(19),s=n.n(u),c=n(27),l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)},p=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.handleClick=function(e){if(r.props.onClick&&r.props.onClick(e),!e.defaultPrevented&&0===e.button&&!r.props.target&&!d(e)){e.preventDefault();var t=r.context.router.history,n=r.props,o=n.replace,i=n.to;o?t.replace(i):t.push(i)}},f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.render=function(){var e=this.props,t=(e.replace,e.to),n=e.innerRef,r=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["replace","to","innerRef"]);s()(this.context.router,"You should not use <Link> outside a <Router>"),s()(void 0!==t,'You must specify the "to" property');var i=this.context.router.history,a="string"==typeof t?Object(c.createLocation)(t,null,null,i.location):t,u=i.createHref(a);return o.a.createElement("a",l({},r,{onClick:this.handleClick,href:u,ref:n}))},t}(o.a.Component);p.propTypes={onClick:a.a.func,target:a.a.string,replace:a.a.bool,to:a.a.oneOfType([a.a.string,a.a.object]).isRequired,innerRef:a.a.oneOfType([a.a.string,a.a.func])},p.defaultProps={replace:!1},p.contextTypes={router:a.a.shape({history:a.a.shape({push:a.a.func.isRequired,replace:a.a.func.isRequired,createHref:a.a.func.isRequired}).isRequired}).isRequired},t.a=p},function(e,t,n){"use strict";var r=n(89);t.a=r.a},function(e,t,n){e.exports={default:n(349),__esModule:!0}},function(e,t,n){var r=n(20);e.exports=function(e,t,n,o){try{return o?t(r(n)[0],n[1]):t(n)}catch(t){var i=e.return;throw void 0!==i&&r(i.call(e)),t}}},function(e,t,n){var r=n(36),o=n(14)("iterator"),i=Array.prototype;e.exports=function(e){return void 0!==e&&(r.Array===e||i[o]===e)}},function(e,t,n){var r=n(14)("iterator"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,function(){throw 2})}catch(e){}e.exports=function(e,t){if(!t&&!o)return!1;var n=!1;try{var i=[7],a=i[r]();a.next=function(){return{done:n=!0}},i[r]=function(){return a},e(i)}catch(e){}return n}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&e.__esModule?e:{default:e}}(n(352)).default.namespace("vcat.search");t.default=r},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.setSaved=t.getSaved=t.getCountFromStore=t.getSavedFromStore=t.getSavedCount=t.getSavedUrls=void 0;var r=s(n(93)),o=s(n(28)),i=s(n(158)),a=n(94),u=n(17);function s(e){return e&&e.__esModule?e:{default:e}}t.getSavedUrls=function(){var e=c();return(0,o.default)(e).sort().map(function(t){var n=e[t],r=n.verified,i=n.hash,a=n.frames;return(0,o.default)(a).map(function(e){return a[e]&&(0,u.imageUrl)(r,i,e)}).filter(function(e){return!!e})}).reduce(function(e,t){return t&&t.length?e.concat(t):e},[])},t.getSavedCount=function(e){return e=e||c(),(0,o.default)(e).sort().map(function(t){var n=e[t].frames;return(0,o.default)(n).filter(function(e){return n[e]}).filter(function(e){return!!e}).length}).reduce(function(e,t){return e+t},0)};var c=t.getSavedFromStore=function(){return a.store.getState().review.saved};t.getCountFromStore=function(){return a.store.getState().review.count||0},t.getSaved=function(){try{return JSON.parse((0,i.default)("saved"))||{}}catch(e){return console.log("error getting saved!",e),{}}},t.setSaved=function(e){try{(0,i.default)("saved",(0,r.default)(e))}catch(e){console.log("error setting saved!",e)}}},function(e,t,n){"use strict";t.__esModule=!0;var r=i(n(359)),o=i(n(362));function i(e){return e&&e.__esModule?e:{default:e}}t.default=function(){return function(e,t){if(Array.isArray(e))return e;if((0,r.default)(Object(e)))return function(e,t){var n=[],r=!0,i=!1,a=void 0;try{for(var u,s=(0,o.default)(e);!(r=(u=s.next()).done)&&(n.push(u.value),!t||n.length!==t);r=!0);}catch(e){i=!0,a=e}finally{try{!r&&s.return&&s.return()}finally{if(i)throw a}}return n}(e,t);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}()},function(e,t,n){var r=n(20),o=n(56),i=n(14)("species");e.exports=function(e,t){var n,a=r(e).constructor;return void 0===a||void 0==(n=r(a)[i])?t:o(n)}},function(e,t,n){var r,o,i,a=n(34),u=n(370),s=n(131),c=n(76),l=n(13),f=l.process,d=l.setImmediate,p=l.clearImmediate,h=l.MessageChannel,m=l.Dispatch,v=0,y={},g=function(){var e=+this;if(y.hasOwnProperty(e)){var t=y[e];delete y[e],t()}},_=function(e){g.call(e.data)};d&&p||(d=function(e){for(var t=[],n=1;arguments.length>n;)t.push(arguments[n++]);return y[++v]=function(){u("function"==typeof e?e:Function(e),t)},r(v),v},p=function(e){delete y[e]},"process"==n(46)(f)?r=function(e){f.nextTick(a(g,e,1))}:m&&m.now?r=function(e){m.now(a(g,e,1))}:h?(i=(o=new h).port2,o.port1.onmessage=_,r=a(i.postMessage,i,1)):l.addEventListener&&"function"==typeof postMessage&&!l.importScripts?(r=function(e){l.postMessage(e+"","*")},l.addEventListener("message",_,!1)):r="onreadystatechange"in c("script")?function(e){s.appendChild(c("script")).onreadystatechange=function(){s.removeChild(this),g.call(e)}}:function(e){setTimeout(a(g,e,1),0)}),e.exports={set:d,clear:p}},function(e,t){e.exports=function(e){try{return{e:!1,v:e()}}catch(e){return{e:!0,v:e}}}},function(e,t,n){var r=n(20),o=n(23),i=n(98);e.exports=function(e,t){if(r(e),o(t)&&t.constructor===e)return t;var n=i.f(e);return(0,n.resolve)(t),n.promise}},function(e,t,n){e.exports={addDays:n(50),addHours:n(166),addISOYears:n(167),addMilliseconds:n(51),addMinutes:n(169),addMonths:n(68),addQuarters:n(170),addSeconds:n(171),addWeeks:n(101),addYears:n(172),areRangesOverlapping:n(377),closestIndexTo:n(378),closestTo:n(379),compareAsc:n(53),compareDesc:n(102),differenceInCalendarDays:n(67),differenceInCalendarISOWeeks:n(380),differenceInCalendarISOYears:n(173),differenceInCalendarMonths:n(174),differenceInCalendarQuarters:n(381),differenceInCalendarWeeks:n(382),differenceInCalendarYears:n(176),differenceInDays:n(177),differenceInHours:n(383),differenceInISOYears:n(384),differenceInMilliseconds:n(69),differenceInMinutes:n(385),differenceInMonths:n(103),differenceInQuarters:n(386),differenceInSeconds:n(104),differenceInWeeks:n(387),differenceInYears:n(388),distanceInWords:n(179),distanceInWordsStrict:n(392),distanceInWordsToNow:n(393),eachDay:n(394),endOfDay:n(106),endOfHour:n(395),endOfISOWeek:n(396),endOfISOYear:n(397),endOfMinute:n(398),endOfMonth:n(181),endOfQuarter:n(399),endOfSecond:n(400),endOfToday:n(401),endOfTomorrow:n(402),endOfWeek:n(180),endOfYear:n(403),endOfYesterday:n(404),format:n(405),getDate:n(406),getDay:n(407),getDayOfYear:n(182),getDaysInMonth:n(100),getDaysInYear:n(408),getHours:n(409),getISODay:n(186),getISOWeek:n(107),getISOWeeksInYear:n(410),getISOYear:n(29),getMilliseconds:n(411),getMinutes:n(412),getMonth:n(413),getOverlappingDaysInRanges:n(414),getQuarter:n(175),getSeconds:n(415),getTime:n(416),getYear:n(417),isAfter:n(418),isBefore:n(419),isDate:n(99),isEqual:n(420),isFirstDayOfMonth:n(421),isFriday:n(422),isFuture:n(423),isLastDayOfMonth:n(424),isLeapYear:n(185),isMonday:n(425),isPast:n(426),isSameDay:n(427),isSameHour:n(187),isSameISOWeek:n(189),isSameISOYear:n(190),isSameMinute:n(191),isSameMonth:n(193),isSameQuarter:n(194),isSameSecond:n(196),isSameWeek:n(108),isSameYear:n(198),isSaturday:n(428),isSunday:n(429),isThisHour:n(430),isThisISOWeek:n(431),isThisISOYear:n(432),isThisMinute:n(433),isThisMonth:n(434),isThisQuarter:n(435),isThisSecond:n(436),isThisWeek:n(437),isThisYear:n(438),isThursday:n(439),isToday:n(440),isTomorrow:n(441),isTuesday:n(442),isValid:n(184),isWednesday:n(443),isWeekend:n(444),isWithinRange:n(445),isYesterday:n(446),lastDayOfISOWeek:n(447),lastDayOfISOYear:n(448),lastDayOfMonth:n(449),lastDayOfQuarter:n(450),lastDayOfWeek:n(199),lastDayOfYear:n(451),max:n(452),min:n(453),parse:n(0),setDate:n(454),setDay:n(455),setDayOfYear:n(456),setHours:n(457),setISODay:n(458),setISOWeek:n(459),setISOYear:n(168),setMilliseconds:n(460),setMinutes:n(461),setMonth:n(200),setQuarter:n(462),setSeconds:n(463),setYear:n(464),startOfDay:n(31),startOfHour:n(188),startOfISOWeek:n(30),startOfISOYear:n(52),startOfMinute:n(192),startOfMonth:n(465),startOfQuarter:n(195),startOfSecond:n(197),startOfToday:n(466),startOfTomorrow:n(467),startOfWeek:n(66),startOfYear:n(183),startOfYesterday:n(468),subDays:n(469),subHours:n(470),subISOYears:n(178),subMilliseconds:n(471),subMinutes:n(472),subMonths:n(473),subQuarters:n(474),subSeconds:n(475),subWeeks:n(476),subYears:n(477)}},function(e,t,n){var r=n(51),o=36e5;e.exports=function(e,t){var n=Number(t);return r(e,n*o)}},function(e,t,n){var r=n(29),o=n(168);e.exports=function(e,t){var n=Number(t);return o(e,r(e)+n)}},function(e,t,n){var r=n(0),o=n(52),i=n(67);e.exports=function(e,t){var n=r(e),a=Number(t),u=i(n,o(n)),s=new Date(0);return s.setFullYear(a,0,4),s.setHours(0,0,0,0),(n=o(s)).setDate(n.getDate()+u),n}},function(e,t,n){var r=n(51),o=6e4;e.exports=function(e,t){var n=Number(t);return r(e,n*o)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,3*n)}},function(e,t,n){var r=n(51);e.exports=function(e,t){var n=Number(t);return r(e,1e3*n)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,12*n)}},function(e,t,n){var r=n(29);e.exports=function(e,t){return r(e)-r(t)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return 12*(n.getFullYear()-o.getFullYear())+(n.getMonth()-o.getMonth())}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return Math.floor(t.getMonth()/3)+1}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()-o.getFullYear()}},function(e,t,n){var r=n(0),o=n(67),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setDate(n.getDate()-u*s),u*(s-(i(n,a)===-u))}},function(e,t,n){var r=n(167);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(102),o=n(0),i=n(104),a=n(103),u=n(105),s=1440,c=2520,l=43200,f=86400;e.exports=function(e,t,n){var d=n||{},p=r(e,t),h=d.locale,m=u.distanceInWords.localize;h&&h.distanceInWords&&h.distanceInWords.localize&&(m=h.distanceInWords.localize);var v,y,g={addSuffix:Boolean(d.addSuffix),comparison:p};p>0?(v=o(e),y=o(t)):(v=o(t),y=o(e));var _,b=i(y,v),w=y.getTimezoneOffset()-v.getTimezoneOffset(),x=Math.round(b/60)-w;if(x<2)return d.includeSeconds?b<5?m("lessThanXSeconds",5,g):b<10?m("lessThanXSeconds",10,g):b<20?m("lessThanXSeconds",20,g):b<40?m("halfAMinute",null,g):m(b<60?"lessThanXMinutes":"xMinutes",1,g):0===x?m("lessThanXMinutes",1,g):m("xMinutes",x,g);if(x<45)return m("xMinutes",x,g);if(x<90)return m("aboutXHours",1,g);if(x<s)return m("aboutXHours",Math.round(x/60),g);if(x<c)return m("xDays",1,g);if(x<l)return m("xDays",Math.round(x/s),g);if(x<f)return m("aboutXMonths",_=Math.round(x/l),g);if((_=a(y,v))<12)return m("xMonths",Math.round(x/l),g);var E=_%12,O=Math.floor(_/12);return E<3?m("aboutXYears",O,g):E<9?m("overXYears",O,g):m("almostXYears",O+1,g)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=6+(i<n?-7:0)-(i-n);return o.setDate(o.getDate()+a),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0),o=n(183),i=n(67);e.exports=function(e){var t=r(e);return i(t,o(t))+1}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=new Date(0);return n.setFullYear(t.getFullYear(),0,1),n.setHours(0,0,0,0),n}},function(e,t,n){var r=n(99);e.exports=function(e){if(r(e))return!isNaN(e);throw new TypeError(toString.call(e)+" is not an instance of Date")}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getFullYear();return t%400==0||t%4==0&&t%100!=0}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getDay();return 0===t&&(t=7),t}},function(e,t,n){var r=n(188);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMinutes(0,0,0),t}},function(e,t,n){var r=n(108);e.exports=function(e,t){return r(e,t,{weekStartsOn:1})}},function(e,t,n){var r=n(52);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(192);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setSeconds(0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()===o.getFullYear()&&n.getMonth()===o.getMonth()}},function(e,t,n){var r=n(195);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3;return t.setMonth(o,1),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(197);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMilliseconds(0),t}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getFullYear()===o.getFullYear()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=t&&Number(t.weekStartsOn)||0,o=r(e),i=o.getDay(),a=6+(i<n?-7:0)-(i-n);return o.setHours(0,0,0,0),o.setDate(o.getDate()+a),o}},function(e,t,n){var r=n(0),o=n(100);e.exports=function(e,t){var n=r(e),i=Number(t),a=n.getFullYear(),u=n.getDate(),s=new Date(0);s.setFullYear(a,i,15),s.setHours(0,0,0,0);var c=o(s);return n.setMonth(i,Math.min(u,c)),n}},function(e,t,n){"use strict";(function(t,r){var o=n(70);e.exports=_;var i,a=n(202);_.ReadableState=g;n(109).EventEmitter;var u=function(e,t){return e.listeners(t).length},s=n(203),c=n(71).Buffer,l=t.Uint8Array||function(){};var f=n(54);f.inherits=n(32);var d=n(482),p=void 0;p=d&&d.debuglog?d.debuglog("stream"):function(){};var h,m=n(483),v=n(205);f.inherits(_,s);var y=["error","close","destroy","pause","resume"];function g(e,t){i=i||n(33),e=e||{};var r=t instanceof i;this.objectMode=!!e.objectMode,r&&(this.objectMode=this.objectMode||!!e.readableObjectMode);var o=e.highWaterMark,a=e.readableHighWaterMark,u=this.objectMode?16:16384;this.highWaterMark=o||0===o?o:r&&(a||0===a)?a:u,this.highWaterMark=Math.floor(this.highWaterMark),this.buffer=new m,this.length=0,this.pipes=null,this.pipesCount=0,this.flowing=null,this.ended=!1,this.endEmitted=!1,this.reading=!1,this.sync=!0,this.needReadable=!1,this.emittedReadable=!1,this.readableListening=!1,this.resumeScheduled=!1,this.destroyed=!1,this.defaultEncoding=e.defaultEncoding||"utf8",this.awaitDrain=0,this.readingMore=!1,this.decoder=null,this.encoding=null,e.encoding&&(h||(h=n(206).StringDecoder),this.decoder=new h(e.encoding),this.encoding=e.encoding)}function _(e){if(i=i||n(33),!(this instanceof _))return new _(e);this._readableState=new g(e,this),this.readable=!0,e&&("function"==typeof e.read&&(this._read=e.read),"function"==typeof e.destroy&&(this._destroy=e.destroy)),s.call(this)}function b(e,t,n,r,o){var i,a=e._readableState;null===t?(a.reading=!1,function(e,t){if(t.ended)return;if(t.decoder){var n=t.decoder.end();n&&n.length&&(t.buffer.push(n),t.length+=t.objectMode?1:n.length)}t.ended=!0,O(e)}(e,a)):(o||(i=function(e,t){var n;(function(e){return c.isBuffer(e)||e instanceof l})(t)||"string"==typeof t||void 0===t||e.objectMode||(n=new TypeError("Invalid non-string/buffer chunk"));return n}(a,t)),i?e.emit("error",i):a.objectMode||t&&t.length>0?("string"==typeof t||a.objectMode||Object.getPrototypeOf(t)===c.prototype||(t=function(e){return c.from(e)}(t)),r?a.endEmitted?e.emit("error",new Error("stream.unshift() after end event")):w(e,a,t,!0):a.ended?e.emit("error",new Error("stream.push() after EOF")):(a.reading=!1,a.decoder&&!n?(t=a.decoder.write(t),a.objectMode||0!==t.length?w(e,a,t,!1):T(e,a)):w(e,a,t,!1))):r||(a.reading=!1));return function(e){return!e.ended&&(e.needReadable||e.length<e.highWaterMark||0===e.length)}(a)}function w(e,t,n,r){t.flowing&&0===t.length&&!t.sync?(e.emit("data",n),e.read(0)):(t.length+=t.objectMode?1:n.length,r?t.buffer.unshift(n):t.buffer.push(n),t.needReadable&&O(e)),T(e,t)}Object.defineProperty(_.prototype,"destroyed",{get:function(){return void 0!==this._readableState&&this._readableState.destroyed},set:function(e){this._readableState&&(this._readableState.destroyed=e)}}),_.prototype.destroy=v.destroy,_.prototype._undestroy=v.undestroy,_.prototype._destroy=function(e,t){this.push(null),t(e)},_.prototype.push=function(e,t){var n,r=this._readableState;return r.objectMode?n=!0:"string"==typeof e&&((t=t||r.defaultEncoding)!==r.encoding&&(e=c.from(e,t),t=""),n=!0),b(this,e,t,!1,n)},_.prototype.unshift=function(e){return b(this,e,null,!0,!1)},_.prototype.isPaused=function(){return!1===this._readableState.flowing},_.prototype.setEncoding=function(e){return h||(h=n(206).StringDecoder),this._readableState.decoder=new h(e),this._readableState.encoding=e,this};var x=8388608;function E(e,t){return e<=0||0===t.length&&t.ended?0:t.objectMode?1:e!=e?t.flowing&&t.length?t.buffer.head.data.length:t.length:(e>t.highWaterMark&&(t.highWaterMark=function(e){return e>=x?e=x:(e--,e|=e>>>1,e|=e>>>2,e|=e>>>4,e|=e>>>8,e|=e>>>16,e++),e}(e)),e<=t.length?e:t.ended?t.length:(t.needReadable=!0,0))}function O(e){var t=e._readableState;t.needReadable=!1,t.emittedReadable||(p("emitReadable",t.flowing),t.emittedReadable=!0,t.sync?o.nextTick(S,e):S(e))}function S(e){p("emit readable"),e.emit("readable"),P(e)}function T(e,t){t.readingMore||(t.readingMore=!0,o.nextTick(k,e,t))}function k(e,t){for(var n=t.length;!t.reading&&!t.flowing&&!t.ended&&t.length<t.highWaterMark&&(p("maybeReadMore read 0"),e.read(0),n!==t.length);)n=t.length;t.readingMore=!1}function R(e){p("readable nexttick read 0"),e.read(0)}function j(e,t){t.reading||(p("resume read 0"),e.read(0)),t.resumeScheduled=!1,t.awaitDrain=0,e.emit("resume"),P(e),t.flowing&&!t.reading&&e.read(0)}function P(e){var t=e._readableState;for(p("flow",t.flowing);t.flowing&&null!==e.read(););}function C(e,t){return 0===t.length?null:(t.objectMode?n=t.buffer.shift():!e||e>=t.length?(n=t.decoder?t.buffer.join(""):1===t.buffer.length?t.buffer.head.data:t.buffer.concat(t.length),t.buffer.clear()):n=function(e,t,n){var r;e<t.head.data.length?(r=t.head.data.slice(0,e),t.head.data=t.head.data.slice(e)):r=e===t.head.data.length?t.shift():n?function(e,t){var n=t.head,r=1,o=n.data;e-=o.length;for(;n=n.next;){var i=n.data,a=e>i.length?i.length:e;if(a===i.length?o+=i:o+=i.slice(0,e),0===(e-=a)){a===i.length?(++r,n.next?t.head=n.next:t.head=t.tail=null):(t.head=n,n.data=i.slice(a));break}++r}return t.length-=r,o}(e,t):function(e,t){var n=c.allocUnsafe(e),r=t.head,o=1;r.data.copy(n),e-=r.data.length;for(;r=r.next;){var i=r.data,a=e>i.length?i.length:e;if(i.copy(n,n.length-e,0,a),0===(e-=a)){a===i.length?(++o,r.next?t.head=r.next:t.head=t.tail=null):(t.head=r,r.data=i.slice(a));break}++o}return t.length-=o,n}(e,t);return r}(e,t.buffer,t.decoder),n);var n}function M(e){var t=e._readableState;if(t.length>0)throw new Error('"endReadable()" called on non-empty stream');t.endEmitted||(t.ended=!0,o.nextTick(I,t,e))}function I(e,t){e.endEmitted||0!==e.length||(e.endEmitted=!0,t.readable=!1,t.emit("end"))}function A(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1}_.prototype.read=function(e){p("read",e),e=parseInt(e,10);var t=this._readableState,n=e;if(0!==e&&(t.emittedReadable=!1),0===e&&t.needReadable&&(t.length>=t.highWaterMark||t.ended))return p("read: emitReadable",t.length,t.ended),0===t.length&&t.ended?M(this):O(this),null;if(0===(e=E(e,t))&&t.ended)return 0===t.length&&M(this),null;var r,o=t.needReadable;return p("need readable",o),(0===t.length||t.length-e<t.highWaterMark)&&p("length less than watermark",o=!0),t.ended||t.reading?p("reading or ended",o=!1):o&&(p("do read"),t.reading=!0,t.sync=!0,0===t.length&&(t.needReadable=!0),this._read(t.highWaterMark),t.sync=!1,t.reading||(e=E(n,t))),null===(r=e>0?C(e,t):null)?(t.needReadable=!0,e=0):t.length-=e,0===t.length&&(t.ended||(t.needReadable=!0),n!==e&&t.ended&&M(this)),null!==r&&this.emit("data",r),r},_.prototype._read=function(e){this.emit("error",new Error("_read() is not implemented"))},_.prototype.pipe=function(e,t){var n=this,i=this._readableState;switch(i.pipesCount){case 0:i.pipes=e;break;case 1:i.pipes=[i.pipes,e];break;default:i.pipes.push(e)}i.pipesCount+=1,p("pipe count=%d opts=%j",i.pipesCount,t);var s=(!t||!1!==t.end)&&e!==r.stdout&&e!==r.stderr?l:_;function c(t,r){p("onunpipe"),t===n&&r&&!1===r.hasUnpiped&&(r.hasUnpiped=!0,p("cleanup"),e.removeListener("close",y),e.removeListener("finish",g),e.removeListener("drain",f),e.removeListener("error",v),e.removeListener("unpipe",c),n.removeListener("end",l),n.removeListener("end",_),n.removeListener("data",m),d=!0,!i.awaitDrain||e._writableState&&!e._writableState.needDrain||f())}function l(){p("onend"),e.end()}i.endEmitted?o.nextTick(s):n.once("end",s),e.on("unpipe",c);var f=function(e){return function(){var t=e._readableState;p("pipeOnDrain",t.awaitDrain),t.awaitDrain&&t.awaitDrain--,0===t.awaitDrain&&u(e,"data")&&(t.flowing=!0,P(e))}}(n);e.on("drain",f);var d=!1;var h=!1;function m(t){p("ondata"),h=!1,!1!==e.write(t)||h||((1===i.pipesCount&&i.pipes===e||i.pipesCount>1&&-1!==A(i.pipes,e))&&!d&&(p("false write response, pause",n._readableState.awaitDrain),n._readableState.awaitDrain++,h=!0),n.pause())}function v(t){p("onerror",t),_(),e.removeListener("error",v),0===u(e,"error")&&e.emit("error",t)}function y(){e.removeListener("finish",g),_()}function g(){p("onfinish"),e.removeListener("close",y),_()}function _(){p("unpipe"),n.unpipe(e)}return n.on("data",m),function(e,t,n){if("function"==typeof e.prependListener)return e.prependListener(t,n);e._events&&e._events[t]?a(e._events[t])?e._events[t].unshift(n):e._events[t]=[n,e._events[t]]:e.on(t,n)}(e,"error",v),e.once("close",y),e.once("finish",g),e.emit("pipe",n),i.flowing||(p("pipe resume"),n.resume()),e},_.prototype.unpipe=function(e){var t=this._readableState,n={hasUnpiped:!1};if(0===t.pipesCount)return this;if(1===t.pipesCount)return e&&e!==t.pipes?this:(e||(e=t.pipes),t.pipes=null,t.pipesCount=0,t.flowing=!1,e&&e.emit("unpipe",this,n),this);if(!e){var r=t.pipes,o=t.pipesCount;t.pipes=null,t.pipesCount=0,t.flowing=!1;for(var i=0;i<o;i++)r[i].emit("unpipe",this,n);return this}var a=A(t.pipes,e);return-1===a?this:(t.pipes.splice(a,1),t.pipesCount-=1,1===t.pipesCount&&(t.pipes=t.pipes[0]),e.emit("unpipe",this,n),this)},_.prototype.on=function(e,t){var n=s.prototype.on.call(this,e,t);if("data"===e)!1!==this._readableState.flowing&&this.resume();else if("readable"===e){var r=this._readableState;r.endEmitted||r.readableListening||(r.readableListening=r.needReadable=!0,r.emittedReadable=!1,r.reading?r.length&&O(this):o.nextTick(R,this))}return n},_.prototype.addListener=_.prototype.on,_.prototype.resume=function(){var e=this._readableState;return e.flowing||(p("resume"),e.flowing=!0,function(e,t){t.resumeScheduled||(t.resumeScheduled=!0,o.nextTick(j,e,t))}(this,e)),this},_.prototype.pause=function(){return p("call pause flowing=%j",this._readableState.flowing),!1!==this._readableState.flowing&&(p("pause"),this._readableState.flowing=!1,this.emit("pause")),this},_.prototype.wrap=function(e){var t=this,n=this._readableState,r=!1;for(var o in e.on("end",function(){if(p("wrapped end"),n.decoder&&!n.ended){var e=n.decoder.end();e&&e.length&&t.push(e)}t.push(null)}),e.on("data",function(o){(p("wrapped data"),n.decoder&&(o=n.decoder.write(o)),!n.objectMode||null!==o&&void 0!==o)&&((n.objectMode||o&&o.length)&&(t.push(o)||(r=!0,e.pause())))}),e)void 0===this[o]&&"function"==typeof e[o]&&(this[o]=function(t){return function(){return e[t].apply(e,arguments)}}(o));for(var i=0;i<y.length;i++)e.on(y[i],this.emit.bind(this,y[i]));return this._read=function(t){p("wrapped _read",t),r&&(r=!1,e.resume())},this},Object.defineProperty(_.prototype,"readableHighWaterMark",{enumerable:!1,get:function(){return this._readableState.highWaterMark}}),_._fromList=C}).call(t,n(3),n(40))},function(e,t){var n={}.toString;e.exports=Array.isArray||function(e){return"[object Array]"==n.call(e)}},function(e,t,n){e.exports=n(109).EventEmitter},function(e,t,n){"use strict";(function(e){
+/*!
+ * The buffer module from node.js, for the browser.
+ *
+ * @author Feross Aboukhadijeh <feross@feross.org> <http://feross.org>
+ * @license MIT
+ */
+var r=n(480),o=n(481),i=n(202);function a(){return s.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function u(e,t){if(a()<t)throw new RangeError("Invalid typed array length");return s.TYPED_ARRAY_SUPPORT?(e=new Uint8Array(t)).__proto__=s.prototype:(null===e&&(e=new s(t)),e.length=t),e}function s(e,t,n){if(!(s.TYPED_ARRAY_SUPPORT||this instanceof s))return new s(e,t,n);if("number"==typeof e){if("string"==typeof t)throw new Error("If encoding is specified then the first argument must be a string");return f(this,e)}return c(this,e,t,n)}function c(e,t,n,r){if("number"==typeof t)throw new TypeError('"value" argument must not be a number');return"undefined"!=typeof ArrayBuffer&&t instanceof ArrayBuffer?function(e,t,n,r){if(t.byteLength,n<0||t.byteLength<n)throw new RangeError("'offset' is out of bounds");if(t.byteLength<n+(r||0))throw new RangeError("'length' is out of bounds");t=void 0===n&&void 0===r?new Uint8Array(t):void 0===r?new Uint8Array(t,n):new Uint8Array(t,n,r);s.TYPED_ARRAY_SUPPORT?(e=t).__proto__=s.prototype:e=d(e,t);return e}(e,t,n,r):"string"==typeof t?function(e,t,n){"string"==typeof n&&""!==n||(n="utf8");if(!s.isEncoding(n))throw new TypeError('"encoding" must be a valid string encoding');var r=0|h(t,n),o=(e=u(e,r)).write(t,n);o!==r&&(e=e.slice(0,o));return e}(e,t,n):function(e,t){if(s.isBuffer(t)){var n=0|p(t.length);return 0===(e=u(e,n)).length?e:(t.copy(e,0,0,n),e)}if(t){if("undefined"!=typeof ArrayBuffer&&t.buffer instanceof ArrayBuffer||"length"in t)return"number"!=typeof t.length||function(e){return e!=e}(t.length)?u(e,0):d(e,t);if("Buffer"===t.type&&i(t.data))return d(e,t.data)}throw new TypeError("First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.")}(e,t)}function l(e){if("number"!=typeof e)throw new TypeError('"size" argument must be a number');if(e<0)throw new RangeError('"size" argument must not be negative')}function f(e,t){if(l(t),e=u(e,t<0?0:0|p(t)),!s.TYPED_ARRAY_SUPPORT)for(var n=0;n<t;++n)e[n]=0;return e}function d(e,t){var n=t.length<0?0:0|p(t.length);e=u(e,n);for(var r=0;r<n;r+=1)e[r]=255&t[r];return e}function p(e){if(e>=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function h(e,t){if(s.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return B(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return W(e).length;default:if(r)return B(e).length;t=(""+t).toLowerCase(),r=!0}}function m(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function v(e,t,n,r,o){if(0===e.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return-1;n=e.length-1}else if(n<0){if(!o)return-1;n=0}if("string"==typeof t&&(t=s.from(t,r)),s.isBuffer(t))return 0===t.length?-1:y(e,t,n,r,o);if("number"==typeof t)return t&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):y(e,[t],n,r,o);throw new TypeError("val must be string, number or Buffer")}function y(e,t,n,r,o){var i,a=1,u=e.length,s=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;a=2,u/=2,s/=2,n/=2}function c(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}if(o){var l=-1;for(i=n;i<u;i++)if(c(e,i)===c(t,-1===l?0:i-l)){if(-1===l&&(l=i),i-l+1===s)return l*a}else-1!==l&&(i-=i-l),l=-1}else for(n+s>u&&(n=u-s),i=n;i>=0;i--){for(var f=!0,d=0;d<s;d++)if(c(e,i+d)!==c(t,d)){f=!1;break}if(f)return i}return-1}function g(e,t,n,r){n=Number(n)||0;var o=e.length-n;r?(r=Number(r))>o&&(r=o):r=o;var i=t.length;if(i%2!=0)throw new TypeError("Invalid hex string");r>i/2&&(r=i/2);for(var a=0;a<r;++a){var u=parseInt(t.substr(2*a,2),16);if(isNaN(u))return a;e[n+a]=u}return a}function _(e,t,n,r){return Y(B(t,e.length-n),e,n,r)}function b(e,t,n,r){return Y(function(e){for(var t=[],n=0;n<e.length;++n)t.push(255&e.charCodeAt(n));return t}(t),e,n,r)}function w(e,t,n,r){return b(e,t,n,r)}function x(e,t,n,r){return Y(W(t),e,n,r)}function E(e,t,n,r){return Y(function(e,t){for(var n,r,o,i=[],a=0;a<e.length&&!((t-=2)<0);++a)n=e.charCodeAt(a),r=n>>8,o=n%256,i.push(o),i.push(r);return i}(t,e.length-n),e,n,r)}function O(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function S(e,t,n){n=Math.min(e.length,n);for(var r=[],o=t;o<n;){var i,a,u,s,c=e[o],l=null,f=c>239?4:c>223?3:c>191?2:1;if(o+f<=n)switch(f){case 1:c<128&&(l=c);break;case 2:128==(192&(i=e[o+1]))&&(s=(31&c)<<6|63&i)>127&&(l=s);break;case 3:i=e[o+1],a=e[o+2],128==(192&i)&&128==(192&a)&&(s=(15&c)<<12|(63&i)<<6|63&a)>2047&&(s<55296||s>57343)&&(l=s);break;case 4:i=e[o+1],a=e[o+2],u=e[o+3],128==(192&i)&&128==(192&a)&&128==(192&u)&&(s=(15&c)<<18|(63&i)<<12|(63&a)<<6|63&u)>65535&&s<1114112&&(l=s)}null===l?(l=65533,f=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),o+=f}return function(e){var t=e.length;if(t<=T)return String.fromCharCode.apply(String,e);var n="",r=0;for(;r<t;)n+=String.fromCharCode.apply(String,e.slice(r,r+=T));return n}(r)}t.Buffer=s,t.SlowBuffer=function(e){+e!=e&&(e=0);return s.alloc(+e)},t.INSPECT_MAX_BYTES=50,s.TYPED_ARRAY_SUPPORT=void 0!==e.TYPED_ARRAY_SUPPORT?e.TYPED_ARRAY_SUPPORT:function(){try{var e=new Uint8Array(1);return e.__proto__={__proto__:Uint8Array.prototype,foo:function(){return 42}},42===e.foo()&&"function"==typeof e.subarray&&0===e.subarray(1,1).byteLength}catch(e){return!1}}(),t.kMaxLength=a(),s.poolSize=8192,s._augment=function(e){return e.__proto__=s.prototype,e},s.from=function(e,t,n){return c(null,e,t,n)},s.TYPED_ARRAY_SUPPORT&&(s.prototype.__proto__=Uint8Array.prototype,s.__proto__=Uint8Array,"undefined"!=typeof Symbol&&Symbol.species&&s[Symbol.species]===s&&Object.defineProperty(s,Symbol.species,{value:null,configurable:!0})),s.alloc=function(e,t,n){return function(e,t,n,r){return l(t),t<=0?u(e,t):void 0!==n?"string"==typeof r?u(e,t).fill(n,r):u(e,t).fill(n):u(e,t)}(null,e,t,n)},s.allocUnsafe=function(e){return f(null,e)},s.allocUnsafeSlow=function(e){return f(null,e)},s.isBuffer=function(e){return!(null==e||!e._isBuffer)},s.compare=function(e,t){if(!s.isBuffer(e)||!s.isBuffer(t))throw new TypeError("Arguments must be Buffers");if(e===t)return 0;for(var n=e.length,r=t.length,o=0,i=Math.min(n,r);o<i;++o)if(e[o]!==t[o]){n=e[o],r=t[o];break}return n<r?-1:r<n?1:0},s.isEncoding=function(e){switch(String(e).toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"latin1":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return!0;default:return!1}},s.concat=function(e,t){if(!i(e))throw new TypeError('"list" argument must be an Array of Buffers');if(0===e.length)return s.alloc(0);var n;if(void 0===t)for(t=0,n=0;n<e.length;++n)t+=e[n].length;var r=s.allocUnsafe(t),o=0;for(n=0;n<e.length;++n){var a=e[n];if(!s.isBuffer(a))throw new TypeError('"list" argument must be an Array of Buffers');a.copy(r,o),o+=a.length}return r},s.byteLength=h,s.prototype._isBuffer=!0,s.prototype.swap16=function(){var e=this.length;if(e%2!=0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(var t=0;t<e;t+=2)m(this,t,t+1);return this},s.prototype.swap32=function(){var e=this.length;if(e%4!=0)throw new RangeError("Buffer size must be a multiple of 32-bits");for(var t=0;t<e;t+=4)m(this,t,t+3),m(this,t+1,t+2);return this},s.prototype.swap64=function(){var e=this.length;if(e%8!=0)throw new RangeError("Buffer size must be a multiple of 64-bits");for(var t=0;t<e;t+=8)m(this,t,t+7),m(this,t+1,t+6),m(this,t+2,t+5),m(this,t+3,t+4);return this},s.prototype.toString=function(){var e=0|this.length;return 0===e?"":0===arguments.length?S(this,0,e):function(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return j(this,t,n);case"utf8":case"utf-8":return S(this,t,n);case"ascii":return k(this,t,n);case"latin1":case"binary":return R(this,t,n);case"base64":return O(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return P(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}.apply(this,arguments)},s.prototype.equals=function(e){if(!s.isBuffer(e))throw new TypeError("Argument must be a Buffer");return this===e||0===s.compare(this,e)},s.prototype.inspect=function(){var e="",n=t.INSPECT_MAX_BYTES;return this.length>0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),"<Buffer "+e+">"},s.prototype.compare=function(e,t,n,r,o){if(!s.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw new RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return-1;if(t>=n)return 1;if(t>>>=0,n>>>=0,r>>>=0,o>>>=0,this===e)return 0;for(var i=o-r,a=n-t,u=Math.min(i,a),c=this.slice(r,o),l=e.slice(t,n),f=0;f<u;++f)if(c[f]!==l[f]){i=c[f],a=l[f];break}return i<a?-1:a<i?1:0},s.prototype.includes=function(e,t,n){return-1!==this.indexOf(e,t,n)},s.prototype.indexOf=function(e,t,n){return v(this,e,t,n,!0)},s.prototype.lastIndexOf=function(e,t,n){return v(this,e,t,n,!1)},s.prototype.write=function(e,t,n,r){if(void 0===t)r="utf8",n=this.length,t=0;else if(void 0===n&&"string"==typeof t)r=t,n=this.length,t=0;else{if(!isFinite(t))throw new Error("Buffer.write(string, encoding, offset[, length]) is no longer supported");t|=0,isFinite(n)?(n|=0,void 0===r&&(r="utf8")):(r=n,n=void 0)}var o=this.length-t;if((void 0===n||n>o)&&(n=o),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var i=!1;;)switch(r){case"hex":return g(this,e,t,n);case"utf8":case"utf-8":return _(this,e,t,n);case"ascii":return b(this,e,t,n);case"latin1":case"binary":return w(this,e,t,n);case"base64":return x(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return E(this,e,t,n);default:if(i)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),i=!0}},s.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var T=4096;function k(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;o<n;++o)r+=String.fromCharCode(127&e[o]);return r}function R(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;o<n;++o)r+=String.fromCharCode(e[o]);return r}function j(e,t,n){var r=e.length;(!t||t<0)&&(t=0),(!n||n<0||n>r)&&(n=r);for(var o="",i=t;i<n;++i)o+=F(e[i]);return o}function P(e,t,n){for(var r=e.slice(t,n),o="",i=0;i<r.length;i+=2)o+=String.fromCharCode(r[i]+256*r[i+1]);return o}function C(e,t,n){if(e%1!=0||e<0)throw new RangeError("offset is not uint");if(e+t>n)throw new RangeError("Trying to access beyond buffer length")}function M(e,t,n,r,o,i){if(!s.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||t<i)throw new RangeError('"value" argument is out of bounds');if(n+r>e.length)throw new RangeError("Index out of range")}function I(e,t,n,r){t<0&&(t=65535+t+1);for(var o=0,i=Math.min(e.length-n,2);o<i;++o)e[n+o]=(t&255<<8*(r?o:1-o))>>>8*(r?o:1-o)}function A(e,t,n,r){t<0&&(t=4294967295+t+1);for(var o=0,i=Math.min(e.length-n,4);o<i;++o)e[n+o]=t>>>8*(r?o:3-o)&255}function D(e,t,n,r,o,i){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function N(e,t,n,r,i){return i||D(e,0,n,4),o.write(e,t,n,r,23,4),n+4}function L(e,t,n,r,i){return i||D(e,0,n,8),o.write(e,t,n,r,52,8),n+8}s.prototype.slice=function(e,t){var n,r=this.length;if(e=~~e,t=void 0===t?r:~~t,e<0?(e+=r)<0&&(e=0):e>r&&(e=r),t<0?(t+=r)<0&&(t=0):t>r&&(t=r),t<e&&(t=e),s.TYPED_ARRAY_SUPPORT)(n=this.subarray(e,t)).__proto__=s.prototype;else{var o=t-e;n=new s(o,void 0);for(var i=0;i<o;++i)n[i]=this[i+e]}return n},s.prototype.readUIntLE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e],o=1,i=0;++i<t&&(o*=256);)r+=this[e+i]*o;return r},s.prototype.readUIntBE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e+--t],o=1;t>0&&(o*=256);)r+=this[e+--t]*o;return r},s.prototype.readUInt8=function(e,t){return t||C(e,1,this.length),this[e]},s.prototype.readUInt16LE=function(e,t){return t||C(e,2,this.length),this[e]|this[e+1]<<8},s.prototype.readUInt16BE=function(e,t){return t||C(e,2,this.length),this[e]<<8|this[e+1]},s.prototype.readUInt32LE=function(e,t){return t||C(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},s.prototype.readUInt32BE=function(e,t){return t||C(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},s.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=this[e],o=1,i=0;++i<t&&(o*=256);)r+=this[e+i]*o;return r>=(o*=128)&&(r-=Math.pow(2,8*t)),r},s.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||C(e,t,this.length);for(var r=t,o=1,i=this[e+--r];r>0&&(o*=256);)i+=this[e+--r]*o;return i>=(o*=128)&&(i-=Math.pow(2,8*t)),i},s.prototype.readInt8=function(e,t){return t||C(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},s.prototype.readInt16LE=function(e,t){t||C(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt16BE=function(e,t){t||C(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},s.prototype.readInt32LE=function(e,t){return t||C(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},s.prototype.readInt32BE=function(e,t){return t||C(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},s.prototype.readFloatLE=function(e,t){return t||C(e,4,this.length),o.read(this,e,!0,23,4)},s.prototype.readFloatBE=function(e,t){return t||C(e,4,this.length),o.read(this,e,!1,23,4)},s.prototype.readDoubleLE=function(e,t){return t||C(e,8,this.length),o.read(this,e,!0,52,8)},s.prototype.readDoubleBE=function(e,t){return t||C(e,8,this.length),o.read(this,e,!1,52,8)},s.prototype.writeUIntLE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=1,i=0;for(this[t]=255&e;++i<n&&(o*=256);)this[t+i]=e/o&255;return t+n},s.prototype.writeUIntBE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=n-1,i=1;for(this[t+o]=255&e;--o>=0&&(i*=256);)this[t+o]=e/i&255;return t+n},s.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,255,0),s.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},s.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):I(this,e,t,!0),t+2},s.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):I(this,e,t,!1),t+2},s.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):A(this,e,t,!0),t+4},s.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):A(this,e,t,!1),t+4},s.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=0,a=1,u=0;for(this[t]=255&e;++i<n&&(a*=256);)e<0&&0===u&&0!==this[t+i-1]&&(u=1),this[t+i]=(e/a>>0)-u&255;return t+n},s.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=n-1,a=1,u=0;for(this[t+i]=255&e;--i>=0&&(a*=256);)e<0&&0===u&&0!==this[t+i+1]&&(u=1),this[t+i]=(e/a>>0)-u&255;return t+n},s.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,127,-128),s.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},s.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):I(this,e,t,!0),t+2},s.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):I(this,e,t,!1),t+2},s.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),s.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):A(this,e,t,!0),t+4},s.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),s.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):A(this,e,t,!1),t+4},s.prototype.writeFloatLE=function(e,t,n){return N(this,e,t,!0,n)},s.prototype.writeFloatBE=function(e,t,n){return N(this,e,t,!1,n)},s.prototype.writeDoubleLE=function(e,t,n){return L(this,e,t,!0,n)},s.prototype.writeDoubleBE=function(e,t,n){return L(this,e,t,!1,n)},s.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r<n&&(r=n),r===n)return 0;if(0===e.length||0===this.length)return 0;if(t<0)throw new RangeError("targetStart out of bounds");if(n<0||n>=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t<r-n&&(r=e.length-t+n);var o,i=r-n;if(this===e&&n<t&&t<r)for(o=i-1;o>=0;--o)e[o+t]=this[o+n];else if(i<1e3||!s.TYPED_ARRAY_SUPPORT)for(o=0;o<i;++o)e[o+t]=this[o+n];else Uint8Array.prototype.set.call(e,this.subarray(n,n+i),t);return i},s.prototype.fill=function(e,t,n,r){if("string"==typeof e){if("string"==typeof t?(r=t,t=0,n=this.length):"string"==typeof n&&(r=n,n=this.length),1===e.length){var o=e.charCodeAt(0);o<256&&(e=o)}if(void 0!==r&&"string"!=typeof r)throw new TypeError("encoding must be a string");if("string"==typeof r&&!s.isEncoding(r))throw new TypeError("Unknown encoding: "+r)}else"number"==typeof e&&(e&=255);if(t<0||this.length<t||this.length<n)throw new RangeError("Out of range index");if(n<=t)return this;var i;if(t>>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(i=t;i<n;++i)this[i]=e;else{var a=s.isBuffer(e)?e:B(new s(e,r).toString()),u=a.length;for(i=0;i<n-t;++i)this[i+t]=a[i%u]}return this};var U=/[^+\/0-9A-Za-z-_]/g;function F(e){return e<16?"0"+e.toString(16):e.toString(16)}function B(e,t){var n;t=t||1/0;for(var r=e.length,o=null,i=[],a=0;a<r;++a){if((n=e.charCodeAt(a))>55295&&n<57344){if(!o){if(n>56319){(t-=3)>-1&&i.push(239,191,189);continue}if(a+1===r){(t-=3)>-1&&i.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),o=n;continue}n=65536+(o-55296<<10|n-56320)}else o&&(t-=3)>-1&&i.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return i}function W(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(U,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function Y(e,t,n,r){for(var o=0;o<r&&!(o+n>=t.length||o>=e.length);++o)t[o+n]=e[o];return o}}).call(t,n(3))},function(e,t,n){"use strict";var r=n(70);function o(e,t){e.emit("error",t)}e.exports={destroy:function(e,t){var n=this,i=this._readableState&&this._readableState.destroyed,a=this._writableState&&this._writableState.destroyed;return i||a?(t?t(e):!e||this._writableState&&this._writableState.errorEmitted||r.nextTick(o,this,e),this):(this._readableState&&(this._readableState.destroyed=!0),this._writableState&&(this._writableState.destroyed=!0),this._destroy(e||null,function(e){!t&&e?(r.nextTick(o,n,e),n._writableState&&(n._writableState.errorEmitted=!0)):t&&t(e)}),this)},undestroy:function(){this._readableState&&(this._readableState.destroyed=!1,this._readableState.reading=!1,this._readableState.ended=!1,this._readableState.endEmitted=!1),this._writableState&&(this._writableState.destroyed=!1,this._writableState.ended=!1,this._writableState.ending=!1,this._writableState.finished=!1,this._writableState.errorEmitted=!1)}}},function(e,t,n){"use strict";var r=n(71).Buffer,o=r.isEncoding||function(e){switch((e=""+e)&&e.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return!0;default:return!1}};function i(e){var t;switch(this.encoding=function(e){var t=function(e){if(!e)return"utf8";for(var t;;)switch(e){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return e;default:if(t)return;e=(""+e).toLowerCase(),t=!0}}(e);if("string"!=typeof t&&(r.isEncoding===o||!o(e)))throw new Error("Unknown encoding: "+e);return t||e}(e),this.encoding){case"utf16le":this.text=s,this.end=c,t=4;break;case"utf8":this.fillLast=u,t=4;break;case"base64":this.text=l,this.end=f,t=3;break;default:return this.write=d,void(this.end=p)}this.lastNeed=0,this.lastTotal=0,this.lastChar=r.allocUnsafe(t)}function a(e){return e<=127?0:e>>5==6?2:e>>4==14?3:e>>3==30?4:e>>6==2?-1:-2}function u(e){var t=this.lastTotal-this.lastNeed,n=function(e,t,n){if(128!=(192&t[0]))return e.lastNeed=0,"�";if(e.lastNeed>1&&t.length>1){if(128!=(192&t[1]))return e.lastNeed=1,"�";if(e.lastNeed>2&&t.length>2&&128!=(192&t[2]))return e.lastNeed=2,"�"}}(this,e);return void 0!==n?n:this.lastNeed<=e.length?(e.copy(this.lastChar,t,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):(e.copy(this.lastChar,t,0,e.length),void(this.lastNeed-=e.length))}function s(e,t){if((e.length-t)%2==0){var n=e.toString("utf16le",t);if(n){var r=n.charCodeAt(n.length-1);if(r>=55296&&r<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1],n.slice(0,-1)}return n}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=e[e.length-1],e.toString("utf16le",t,e.length-1)}function c(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed){var n=this.lastTotal-this.lastNeed;return t+this.lastChar.toString("utf16le",0,n)}return t}function l(e,t){var n=(e.length-t)%3;return 0===n?e.toString("base64",t):(this.lastNeed=3-n,this.lastTotal=3,1===n?this.lastChar[0]=e[e.length-1]:(this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1]),e.toString("base64",t,e.length-n))}function f(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+this.lastChar.toString("base64",0,3-this.lastNeed):t}function d(e){return e.toString(this.encoding)}function p(e){return e&&e.length?this.write(e):""}t.StringDecoder=i,i.prototype.write=function(e){if(0===e.length)return"";var t,n;if(this.lastNeed){if(void 0===(t=this.fillLast(e)))return"";n=this.lastNeed,this.lastNeed=0}else n=0;return n<e.length?t?t+this.text(e,n):this.text(e,n):t||""},i.prototype.end=function(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+"�":t},i.prototype.text=function(e,t){var n=function(e,t,n){var r=t.length-1;if(r<n)return 0;var o=a(t[r]);if(o>=0)return o>0&&(e.lastNeed=o-1),o;if(--r<n||-2===o)return 0;if((o=a(t[r]))>=0)return o>0&&(e.lastNeed=o-2),o;if(--r<n||-2===o)return 0;if((o=a(t[r]))>=0)return o>0&&(2===o?o=0:e.lastNeed=o-3),o;return 0}(this,e,t);if(!this.lastNeed)return e.toString("utf8",t);this.lastTotal=n;var r=e.length-(n-this.lastNeed);return e.copy(this.lastChar,0,r),e.toString("utf8",t,r)},i.prototype.fillLast=function(e){if(this.lastNeed<=e.length)return e.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);e.copy(this.lastChar,this.lastTotal-this.lastNeed,0,e.length),this.lastNeed-=e.length}},function(e,t,n){"use strict";e.exports=i;var r=n(33),o=n(54);function i(e){if(!(this instanceof i))return new i(e);r.call(this,e),this._transformState={afterTransform:function(e,t){var n=this._transformState;n.transforming=!1;var r=n.writecb;if(!r)return this.emit("error",new Error("write callback called multiple times"));n.writechunk=null,n.writecb=null,null!=t&&this.push(t),r(e);var o=this._readableState;o.reading=!1,(o.needReadable||o.length<o.highWaterMark)&&this._read(o.highWaterMark)}.bind(this),needTransform:!1,transforming:!1,writecb:null,writechunk:null,writeencoding:null},this._readableState.needReadable=!0,this._readableState.sync=!1,e&&("function"==typeof e.transform&&(this._transform=e.transform),"function"==typeof e.flush&&(this._flush=e.flush)),this.on("prefinish",a)}function a(){var e=this;"function"==typeof this._flush?this._flush(function(t,n){u(e,t,n)}):u(this,null,null)}function u(e,t,n){if(t)return e.emit("error",t);if(null!=n&&e.push(n),e._writableState.length)throw new Error("Calling transform done when ws.length != 0");if(e._transformState.transforming)throw new Error("Calling transform done when still transforming");return e.push(null)}o.inherits=n(32),o.inherits(i,r),i.prototype.push=function(e,t){return this._transformState.needTransform=!1,r.prototype.push.call(this,e,t)},i.prototype._transform=function(e,t,n){throw new Error("_transform() is not implemented")},i.prototype._write=function(e,t,n){var r=this._transformState;if(r.writecb=n,r.writechunk=e,r.writeencoding=t,!r.transforming){var o=this._readableState;(r.needTransform||o.needReadable||o.length<o.highWaterMark)&&this._read(o.highWaterMark)}},i.prototype._read=function(e){var t=this._transformState;null!==t.writechunk&&t.writecb&&!t.transforming?(t.transforming=!0,this._transform(t.writechunk,t.writeencoding,t.afterTransform)):t.needTransform=!0},i.prototype._destroy=function(e,t){var n=this;r.prototype._destroy.call(this,e,function(e){t(e),n.emit("close")})}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Sugarcube=t.Places365=t.Coco=t.KeyframeStatus=t.KeyframeSingle=t.KeyframeList=t.Summary=t.MediaInfo=t.MediaRecord=t.Heading=void 0;var r=p(n(502)),o=p(n(504)),i=p(n(505)),a=p(n(506)),u=p(n(507)),s=p(n(508)),c=p(n(511)),l=p(n(512)),f=p(n(513)),d=p(n(514));function p(e){return e&&e.__esModule?e:{default:e}}n(515),t.Heading=r.default,t.MediaRecord=i.default,t.MediaInfo=o.default,t.Summary=a.default,t.KeyframeList=u.default,t.KeyframeSingle=s.default,t.KeyframeStatus=c.default,t.Coco=l.default,t.Places365=f.default,t.Sugarcube=d.default},function(e,t,n){"use strict";t.decode=t.parse=n(521),t.encode=t.stringify=n(522)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=g(n(4)),o=g(n(5)),i=g(n(6)),a=g(n(7)),u=g(n(8)),s=g(n(9)),c=n(1),l=g(c),f=(n(16),n(15)),d=n(2),p=g(n(523)),h=n(17),m=n(11),v=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21)),y=g(n(114));function g(e){return e&&e.__esModule?e:{default:e}}var _={dragging:!1,draggingBox:!1,bounds:null,mouseX:0,mouseY:0,box:{x:0,y:0,w:0,h:0}},b=function(e){function t(){(0,i.default)(this,t);var e=(0,u.default)(this,(t.__proto__||(0,o.default)(t)).call(this));return e.state=(0,r.default)({},_),e.handleMouseDown=e.handleMouseDown.bind(e),e.handleMouseDownOnBox=e.handleMouseDownOnBox.bind(e),e.handleMouseMove=e.handleMouseMove.bind(e),e.handleMouseUp=e.handleMouseUp.bind(e),e}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){document.body.addEventListener("mousemove",this.handleMouseMove),document.body.addEventListener("mouseup",this.handleMouseUp)}},{key:"componentDidUpdate",value:function(e){!this.state.bounds||this.props.query.query&&e.query.query&&this.props.query.query.url===e.query.query.url||this.setState((0,r.default)({},_))}},{key:"componentWillUnmount",value:function(){document.body.removeEventListener("mousemove",this.handleMouseMove),document.body.removeEventListener("mouseup",this.handleMouseUp)}},{key:"handleMouseDown",value:function(e){e.preventDefault();var t=this.imgRef.getBoundingClientRect(),n=e.pageX,r=e.pageY,o=n-t.left,i=r-t.top;this.setState({dragging:!0,bounds:t,mouseX:n,mouseY:r,box:{x:o,y:i,w:1,h:1}})}},{key:"handleMouseDownOnBox",value:function(e){var t=this.imgRef.getBoundingClientRect(),n=e.pageX,o=e.pageY;this.setState({draggingBox:!0,bounds:t,mouseX:n,mouseY:o,initialBox:(0,r.default)({},this.state.box),box:(0,r.default)({},this.state.box)})}},{key:"handleMouseMove",value:function(e){var t=this.state,n=t.dragging,r=t.draggingBox,o=t.bounds,i=t.mouseX,a=t.mouseY,u=t.initialBox,s=t.box;if(n){e.preventDefault();var c=s.x,l=s.y,f=(0,h.clamp)(e.pageX-i,0,o.width-c),d=(0,h.clamp)(e.pageY-a,0,o.height-l);this.setState({box:{x:c,y:l,w:f,h:d}})}else if(r){e.preventDefault();var p=u.x,m=u.y,v=u.w,y=u.h,g=e.pageX-i,_=e.pageY-a;this.setState({box:{x:(0,h.clamp)(p+g,0,o.width-v),y:(0,h.clamp)(m+_,0,o.height-y),w:v,h:y}})}}},{key:"handleMouseUp",value:function(e){var t=this,n=this.props.actions,o=this.state,i=o.dragging,a=o.draggingBox,u=o.bounds,s=o.box;if(i||a){e.preventDefault();var c=s.x,l=s.y,f=s.w,d=s.h,h=this.imgRef,m=document.createElement("canvas"),v=m.getContext("2d"),y=h.naturalWidth/u.width;if(m.width=f*y,m.height=d*y,f<10||d<10)this.setState({dragging:!1,draggingBox:!1,box:{x:0,y:0,w:0,h:0}});else{this.setState({dragging:!1,draggingBox:!1});var g=new Image,_=!1;g.onload=function(){if(!_){_=!0,g.onload=null,v.drawImage(g,Math.round(c*y),Math.round(l*y),Math.round(f*y),Math.round(d*y),0,0,m.width,m.height);var e=(0,p.default)(m.toDataURL("image/jpeg",.9));n.upload(e,(0,r.default)({},t.props.query.query,{crop:{x:c,y:l,w:f,h:d}}))}},g.crossOrigin="anonymous",g.src=h.src,g.complete&&g.onload()}}}},{key:"render",value:function(){var e=this,t=this.props.query.query,n=this.state.box,r=n.x,o=n.y,i=n.w,a=n.h;if(!t)return null;if(t.loading)return l.default.createElement("div",{className:"searchQuery column"},l.default.createElement("h2",null,"Loading results..."),l.default.createElement(m.Loader,null));var u=t.url;return u&&0===u.indexOf("static")&&(u="/search/"+u),l.default.createElement("div",{className:"searchQuery row"},l.default.createElement("div",{className:"searchBox"},l.default.createElement("img",{src:u,ref:function(t){return e.imgRef=t},onMouseDown:this.handleMouseDown,crossOrigin:"anonymous"}),!!i&&l.default.createElement("div",{className:"box",style:{left:r,top:o,width:i,height:a},onMouseDown:this.handleMouseDownOnBox})),l.default.createElement("div",null,l.default.createElement("h3",null,"Your Query"),l.default.createElement(y.default,{query:t})))}}]),t}(c.Component);t.default=(0,d.connect)(function(e){return{query:e.search.query,options:e.search.options}},function(e){return{actions:(0,f.bindActionCreators)((0,r.default)({},v),e)}})(b)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=f(n(4)),o=f(n(1)),i=n(16),a=n(15),u=n(2),s=(l(n(209)),n(11)),c=l(n(21));function l(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function f(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.withRouter)((0,u.connect)(function(e){return{query:e.search.query.query,results:e.search.query.results,options:e.search.options}},function(e){return{searchActions:(0,a.bindActionCreators)((0,r.default)({},c),e)}})(function(e){var t=e.query,n=e.results,r=e.options;return!t||t.reset||t.loading||!n?o.default.createElement("div",null):t.loading||n.length?o.default.createElement("div",{className:"searchResults"},o.default.createElement("div",{className:"searchResultsHeading row"},o.default.createElement("div",{className:"column"},o.default.createElement("h3",null,"Search Results"),o.default.createElement("small",{className:"subtitle"},"Searched 10,523,176 frames from 576,234 videos (took ",t.timing.toFixed(2)," ms)"))),o.default.createElement(s.Keyframes,{frames:n,showHash:!0,showTimestamp:r.groupByHash,showSearchButton:!0,showSaveButton:!0,groupByHash:r.groupByHash})):o.default.createElement("div",{className:"searchResults"},o.default.createElement("h3",null,"No results"))}))},function(e,t,n){"use strict";var r=c(n(1)),o=c(n(214)),i=n(218),a=n(2),u=c(n(242)),s=n(94);function c(e){return e&&e.__esModule?e:{default:e}}var l=document.createElement("div");document.body.appendChild(l),o.default.render(r.default.createElement(i.AppContainer,null,r.default.createElement(a.Provider,{store:s.store},r.default.createElement(u.default,{history:s.history}))),l)},function(e,t,n){"use strict";
+/** @license React v16.5.2
+ * react.production.min.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */var r=n(115),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,u=o?Symbol.for("react.fragment"):60107,s=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,l=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,d=o?Symbol.for("react.async_mode"):60111,p=o?Symbol.for("react.forward_ref"):60112;o&&Symbol.for("react.placeholder");var h="function"==typeof Symbol&&Symbol.iterator;function m(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}var v={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y={};function g(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}function _(){}function b(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}g.prototype.isReactComponent={},g.prototype.setState=function(e,t){"object"!=typeof e&&"function"!=typeof e&&null!=e&&m("85"),this.updater.enqueueSetState(this,e,t,"setState")},g.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},_.prototype=g.prototype;var w=b.prototype=new _;w.constructor=b,r(w,g.prototype),w.isPureReactComponent=!0;var x={current:null,currentDispatcher:null},E=Object.prototype.hasOwnProperty,O={key:!0,ref:!0,__self:!0,__source:!0};function S(e,t,n){var r=void 0,o={},a=null,u=null;if(null!=t)for(r in void 0!==t.ref&&(u=t.ref),void 0!==t.key&&(a=""+t.key),t)E.call(t,r)&&!O.hasOwnProperty(r)&&(o[r]=t[r]);var s=arguments.length-2;if(1===s)o.children=n;else if(1<s){for(var c=Array(s),l=0;l<s;l++)c[l]=arguments[l+2];o.children=c}if(e&&e.defaultProps)for(r in s=e.defaultProps)void 0===o[r]&&(o[r]=s[r]);return{$$typeof:i,type:e,key:a,ref:u,props:o,_owner:x.current}}function T(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var k=/\/+/g,R=[];function j(e,t,n,r){if(R.length){var o=R.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function P(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>R.length&&R.push(e)}function C(e,t,n){return null==e?0:function e(t,n,r,o){var u=typeof t;"undefined"!==u&&"boolean"!==u||(t=null);var s=!1;if(null===t)s=!0;else switch(u){case"string":case"number":s=!0;break;case"object":switch(t.$$typeof){case i:case a:s=!0}}if(s)return r(o,t,""===n?"."+M(t,0):n),1;if(s=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var l=n+M(u=t[c],c);s+=e(u,l,r,o)}else if(l=null===t||"object"!=typeof t?null:"function"==typeof(l=h&&t[h]||t["@@iterator"])?l:null,"function"==typeof l)for(t=l.call(t),c=0;!(u=t.next()).done;)s+=e(u=u.value,l=n+M(u,c++),r,o);else"object"===u&&m("31","[object Object]"==(r=""+t)?"object with keys {"+Object.keys(t).join(", ")+"}":r,"");return s}(e,"",t,n)}function M(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,function(e){return t[e]})}(e.key):t.toString(36)}function I(e,t){e.func.call(e.context,t,e.count++)}function A(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?D(e,r,n,function(e){return e}):null!=e&&(T(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(k,"$&/")+"/")+n)),r.push(e))}function D(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(k,"$&/")+"/"),C(e,A,t=j(t,i,r,o)),P(t)}var N={Children:{map:function(e,t,n){if(null==e)return e;var r=[];return D(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;C(e,I,t=j(null,null,t,n)),P(t)},count:function(e){return C(e,function(){return null},null)},toArray:function(e){var t=[];return D(e,t,null,function(e){return e}),t},only:function(e){return T(e)||m("143"),e}},createRef:function(){return{current:null}},Component:g,PureComponent:b,createContext:function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,Provider:null,Consumer:null,unstable_read:null}).Provider={$$typeof:l,_context:e},e.Consumer=e,e.unstable_read=function(e,t){var n=x.currentDispatcher;return null===n&&m("277"),n.readContext(e,t)}.bind(null,e),e},forwardRef:function(e){return{$$typeof:p,render:e}},Fragment:u,StrictMode:s,unstable_AsyncMode:d,unstable_Profiler:c,createElement:S,cloneElement:function(e,t,n){(null===e||void 0===e)&&m("267",e);var o=void 0,a=r({},e.props),u=e.key,s=e.ref,c=e._owner;if(null!=t){void 0!==t.ref&&(s=t.ref,c=x.current),void 0!==t.key&&(u=""+t.key);var l=void 0;for(o in e.type&&e.type.defaultProps&&(l=e.type.defaultProps),t)E.call(t,o)&&!O.hasOwnProperty(o)&&(a[o]=void 0===t[o]&&void 0!==l?l[o]:t[o])}if(1===(o=arguments.length-2))a.children=n;else if(1<o){l=Array(o);for(var f=0;f<o;f++)l[f]=arguments[f+2];a.children=l}return{$$typeof:i,type:e.type,key:u,ref:s,props:a,_owner:c}},createFactory:function(e){var t=S.bind(null,e);return t.type=e,t},isValidElement:T,version:"16.5.2",__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{ReactCurrentOwner:x,assign:r}},L={default:N},U=L&&N||L;e.exports=U.default||U},function(e,t,n){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(e){console.error(e)}}(),e.exports=n(215)},function(e,t,n){"use strict";
+/** @license React v16.5.2
+ * react-dom.production.min.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */var r=n(1),o=n(115),i=n(216);function a(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}r||a("227");var u=!1,s=null,c=!1,l=null,f={onError:function(e){u=!0,s=e}};function d(e,t,n,r,o,i,a,c,l){u=!1,s=null,function(e,t,n,r,o,i,a,u,s){var c=Array.prototype.slice.call(arguments,3);try{t.apply(n,c)}catch(e){this.onError(e)}}.apply(f,arguments)}var p=null,h={};function m(){if(p)for(var e in h){var t=h[e],n=p.indexOf(e);if(-1<n||a("96",e),!y[n])for(var r in t.extractEvents||a("97",e),y[n]=t,n=t.eventTypes){var o=void 0,i=n[r],u=t,s=r;g.hasOwnProperty(s)&&a("99",s),g[s]=i;var c=i.phasedRegistrationNames;if(c){for(o in c)c.hasOwnProperty(o)&&v(c[o],u,s);o=!0}else i.registrationName?(v(i.registrationName,u,s),o=!0):o=!1;o||a("98",r,e)}}}function v(e,t,n){_[e]&&a("100",e),_[e]=t,b[e]=t.eventTypes[n].dependencies}var y=[],g={},_={},b={},w=null,x=null,E=null;function O(e,t,n,r){t=e.type||"unknown-event",e.currentTarget=E(r),function(e,t,n,r,o,i,f,p,h){if(d.apply(this,arguments),u){if(u){var m=s;u=!1,s=null}else a("198"),m=void 0;c||(c=!0,l=m)}}(t,n,void 0,e),e.currentTarget=null}function S(e,t){return null==t&&a("30"),null==e?t:Array.isArray(e)?Array.isArray(t)?(e.push.apply(e,t),e):(e.push(t),e):Array.isArray(t)?[e].concat(t):[e,t]}function T(e,t,n){Array.isArray(e)?e.forEach(t,n):e&&t.call(n,e)}var k=null;function R(e,t){if(e){var n=e._dispatchListeners,r=e._dispatchInstances;if(Array.isArray(n))for(var o=0;o<n.length&&!e.isPropagationStopped();o++)O(e,t,n[o],r[o]);else n&&O(e,t,n,r);e._dispatchListeners=null,e._dispatchInstances=null,e.isPersistent()||e.constructor.release(e)}}function j(e){return R(e,!0)}function P(e){return R(e,!1)}var C={injectEventPluginOrder:function(e){p&&a("101"),p=Array.prototype.slice.call(e),m()},injectEventPluginsByName:function(e){var t,n=!1;for(t in e)if(e.hasOwnProperty(t)){var r=e[t];h.hasOwnProperty(t)&&h[t]===r||(h[t]&&a("102",t),h[t]=r,n=!0)}n&&m()}};function M(e,t){var n=e.stateNode;if(!n)return null;var r=w(n);if(!r)return null;n=r[t];e:switch(t){case"onClick":case"onClickCapture":case"onDoubleClick":case"onDoubleClickCapture":case"onMouseDown":case"onMouseDownCapture":case"onMouseMove":case"onMouseMoveCapture":case"onMouseUp":case"onMouseUpCapture":(r=!r.disabled)||(r=!("button"===(e=e.type)||"input"===e||"select"===e||"textarea"===e)),e=!r;break e;default:e=!1}return e?null:(n&&"function"!=typeof n&&a("231",t,typeof n),n)}function I(e,t){if(null!==e&&(k=S(k,e)),e=k,k=null,e&&(T(e,t?j:P),k&&a("95"),c))throw t=l,c=!1,l=null,t}var A=Math.random().toString(36).slice(2),D="__reactInternalInstance$"+A,N="__reactEventHandlers$"+A;function L(e){if(e[D])return e[D];for(;!e[D];){if(!e.parentNode)return null;e=e.parentNode}return 7===(e=e[D]).tag||8===e.tag?e:null}function U(e){return!(e=e[D])||7!==e.tag&&8!==e.tag?null:e}function F(e){if(7===e.tag||8===e.tag)return e.stateNode;a("33")}function B(e){return e[N]||null}function W(e){do{e=e.return}while(e&&7!==e.tag);return e||null}function Y(e,t,n){(t=M(e,n.dispatchConfig.phasedRegistrationNames[t]))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function $(e){if(e&&e.dispatchConfig.phasedRegistrationNames){for(var t=e._targetInst,n=[];t;)n.push(t),t=W(t);for(t=n.length;0<t--;)Y(n[t],"captured",e);for(t=0;t<n.length;t++)Y(n[t],"bubbled",e)}}function q(e,t,n){e&&n&&n.dispatchConfig.registrationName&&(t=M(e,n.dispatchConfig.registrationName))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function H(e){e&&e.dispatchConfig.registrationName&&q(e._targetInst,null,e)}function z(e){T(e,$)}var G=!("undefined"==typeof window||!window.document||!window.document.createElement);function V(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var X={animationend:V("Animation","AnimationEnd"),animationiteration:V("Animation","AnimationIteration"),animationstart:V("Animation","AnimationStart"),transitionend:V("Transition","TransitionEnd")},K={},Q={};function J(e){if(K[e])return K[e];if(!X[e])return e;var t,n=X[e];for(t in n)if(n.hasOwnProperty(t)&&t in Q)return K[e]=n[t];return e}G&&(Q=document.createElement("div").style,"AnimationEvent"in window||(delete X.animationend.animation,delete X.animationiteration.animation,delete X.animationstart.animation),"TransitionEvent"in window||delete X.transitionend.transition);var Z=J("animationend"),ee=J("animationiteration"),te=J("animationstart"),ne=J("transitionend"),re="abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange seeked seeking stalled suspend timeupdate volumechange waiting".split(" "),oe=null,ie=null,ae=null;function ue(){if(ae)return ae;var e,t,n=ie,r=n.length,o="value"in oe?oe.value:oe.textContent,i=o.length;for(e=0;e<r&&n[e]===o[e];e++);var a=r-e;for(t=1;t<=a&&n[r-t]===o[i-t];t++);return ae=o.slice(e,1<t?1-t:void 0)}function se(){return!0}function ce(){return!1}function le(e,t,n,r){for(var o in this.dispatchConfig=e,this._targetInst=t,this.nativeEvent=n,e=this.constructor.Interface)e.hasOwnProperty(o)&&((t=e[o])?this[o]=t(n):"target"===o?this.target=r:this[o]=n[o]);return this.isDefaultPrevented=(null!=n.defaultPrevented?n.defaultPrevented:!1===n.returnValue)?se:ce,this.isPropagationStopped=ce,this}function fe(e,t,n,r){if(this.eventPool.length){var o=this.eventPool.pop();return this.call(o,e,t,n,r),o}return new this(e,t,n,r)}function de(e){e instanceof this||a("279"),e.destructor(),10>this.eventPool.length&&this.eventPool.push(e)}function pe(e){e.eventPool=[],e.getPooled=fe,e.release=de}o(le.prototype,{preventDefault:function(){this.defaultPrevented=!0;var e=this.nativeEvent;e&&(e.preventDefault?e.preventDefault():"unknown"!=typeof e.returnValue&&(e.returnValue=!1),this.isDefaultPrevented=se)},stopPropagation:function(){var e=this.nativeEvent;e&&(e.stopPropagation?e.stopPropagation():"unknown"!=typeof e.cancelBubble&&(e.cancelBubble=!0),this.isPropagationStopped=se)},persist:function(){this.isPersistent=se},isPersistent:ce,destructor:function(){var e,t=this.constructor.Interface;for(e in t)this[e]=null;this.nativeEvent=this._targetInst=this.dispatchConfig=null,this.isPropagationStopped=this.isDefaultPrevented=ce,this._dispatchInstances=this._dispatchListeners=null}}),le.Interface={type:null,target:null,currentTarget:function(){return null},eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(e){return e.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null},le.extend=function(e){function t(){}function n(){return r.apply(this,arguments)}var r=this;t.prototype=r.prototype;var i=new t;return o(i,n.prototype),n.prototype=i,n.prototype.constructor=n,n.Interface=o({},r.Interface,e),n.extend=r.extend,pe(n),n},pe(le);var he=le.extend({data:null}),me=le.extend({data:null}),ve=[9,13,27,32],ye=G&&"CompositionEvent"in window,ge=null;G&&"documentMode"in document&&(ge=document.documentMode);var _e=G&&"TextEvent"in window&&!ge,be=G&&(!ye||ge&&8<ge&&11>=ge),we=String.fromCharCode(32),xe={beforeInput:{phasedRegistrationNames:{bubbled:"onBeforeInput",captured:"onBeforeInputCapture"},dependencies:["compositionend","keypress","textInput","paste"]},compositionEnd:{phasedRegistrationNames:{bubbled:"onCompositionEnd",captured:"onCompositionEndCapture"},dependencies:"blur compositionend keydown keypress keyup mousedown".split(" ")},compositionStart:{phasedRegistrationNames:{bubbled:"onCompositionStart",captured:"onCompositionStartCapture"},dependencies:"blur compositionstart keydown keypress keyup mousedown".split(" ")},compositionUpdate:{phasedRegistrationNames:{bubbled:"onCompositionUpdate",captured:"onCompositionUpdateCapture"},dependencies:"blur compositionupdate keydown keypress keyup mousedown".split(" ")}},Ee=!1;function Oe(e,t){switch(e){case"keyup":return-1!==ve.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"blur":return!0;default:return!1}}function Se(e){return"object"==typeof(e=e.detail)&&"data"in e?e.data:null}var Te=!1;var ke={eventTypes:xe,extractEvents:function(e,t,n,r){var o=void 0,i=void 0;if(ye)e:{switch(e){case"compositionstart":o=xe.compositionStart;break e;case"compositionend":o=xe.compositionEnd;break e;case"compositionupdate":o=xe.compositionUpdate;break e}o=void 0}else Te?Oe(e,n)&&(o=xe.compositionEnd):"keydown"===e&&229===n.keyCode&&(o=xe.compositionStart);return o?(be&&"ko"!==n.locale&&(Te||o!==xe.compositionStart?o===xe.compositionEnd&&Te&&(i=ue()):(ie="value"in(oe=r)?oe.value:oe.textContent,Te=!0)),o=he.getPooled(o,t,n,r),i?o.data=i:null!==(i=Se(n))&&(o.data=i),z(o),i=o):i=null,(e=_e?function(e,t){switch(e){case"compositionend":return Se(t);case"keypress":return 32!==t.which?null:(Ee=!0,we);case"textInput":return(e=t.data)===we&&Ee?null:e;default:return null}}(e,n):function(e,t){if(Te)return"compositionend"===e||!ye&&Oe(e,t)?(e=ue(),ae=ie=oe=null,Te=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1<t.char.length)return t.char;if(t.which)return String.fromCharCode(t.which)}return null;case"compositionend":return be&&"ko"!==t.locale?null:t.data;default:return null}}(e,n))?((t=me.getPooled(xe.beforeInput,t,n,r)).data=e,z(t)):t=null,null===i?t:null===t?i:[i,t]}},Re=null,je=null,Pe=null;function Ce(e){if(e=x(e)){"function"!=typeof Re&&a("280");var t=w(e.stateNode);Re(e.stateNode,e.type,t)}}function Me(e){je?Pe?Pe.push(e):Pe=[e]:je=e}function Ie(){if(je){var e=je,t=Pe;if(Pe=je=null,Ce(e),t)for(e=0;e<t.length;e++)Ce(t[e])}}function Ae(e,t){return e(t)}function De(e,t,n){return e(t,n)}function Ne(){}var Le=!1;function Ue(e,t){if(Le)return e(t);Le=!0;try{return Ae(e,t)}finally{Le=!1,(null!==je||null!==Pe)&&(Ne(),Ie())}}var Fe={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function Be(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return"input"===t?!!Fe[e.type]:"textarea"===t}function We(e){return(e=e.target||e.srcElement||window).correspondingUseElement&&(e=e.correspondingUseElement),3===e.nodeType?e.parentNode:e}function Ye(e){if(!G)return!1;var t=(e="on"+e)in document;return t||((t=document.createElement("div")).setAttribute(e,"return;"),t="function"==typeof t[e]),t}function $e(e){var t=e.type;return(e=e.nodeName)&&"input"===e.toLowerCase()&&("checkbox"===t||"radio"===t)}function qe(e){e._valueTracker||(e._valueTracker=function(e){var t=$e(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&void 0!==n&&"function"==typeof n.get&&"function"==typeof n.set){var o=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return o.call(this)},set:function(e){r=""+e,i.call(this,e)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(e){r=""+e},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}(e))}function He(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=$e(e)?e.checked?"true":"false":e.value),(e=r)!==n&&(t.setValue(e),!0)}var ze=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,Ge=/^(.*)[\\\/]/,Ve="function"==typeof Symbol&&Symbol.for,Xe=Ve?Symbol.for("react.element"):60103,Ke=Ve?Symbol.for("react.portal"):60106,Qe=Ve?Symbol.for("react.fragment"):60107,Je=Ve?Symbol.for("react.strict_mode"):60108,Ze=Ve?Symbol.for("react.profiler"):60114,et=Ve?Symbol.for("react.provider"):60109,tt=Ve?Symbol.for("react.context"):60110,nt=Ve?Symbol.for("react.async_mode"):60111,rt=Ve?Symbol.for("react.forward_ref"):60112,ot=Ve?Symbol.for("react.placeholder"):60113,it="function"==typeof Symbol&&Symbol.iterator;function at(e){return null===e||"object"!=typeof e?null:"function"==typeof(e=it&&e[it]||e["@@iterator"])?e:null}function ut(e){if(null==e)return null;if("function"==typeof e)return e.displayName||e.name||null;if("string"==typeof e)return e;switch(e){case nt:return"AsyncMode";case Qe:return"Fragment";case Ke:return"Portal";case Ze:return"Profiler";case Je:return"StrictMode";case ot:return"Placeholder"}if("object"==typeof e){switch(e.$$typeof){case tt:return"Context.Consumer";case et:return"Context.Provider";case rt:var t=e.render;return t=t.displayName||t.name||"",e.displayName||(""!==t?"ForwardRef("+t+")":"ForwardRef")}if("function"==typeof e.then&&(e=1===e._reactStatus?e._reactResult:null))return ut(e)}return null}function st(e){var t="";do{e:switch(e.tag){case 4:case 0:case 1:case 2:case 3:case 7:case 10:var n=e._debugOwner,r=e._debugSource,o=ut(e.type),i=null;n&&(i=ut(n.type)),n=o,o="",r?o=" (at "+r.fileName.replace(Ge,"")+":"+r.lineNumber+")":i&&(o=" (created by "+i+")"),i="\n in "+(n||"Unknown")+o;break e;default:i=""}t+=i,e=e.return}while(e);return t}var ct=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,lt=Object.prototype.hasOwnProperty,ft={},dt={};function pt(e,t,n,r,o){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=o,this.mustUseProperty=n,this.propertyName=e,this.type=t}var ht={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ht[e]=new pt(e,0,!1,e,null)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];ht[t]=new pt(t,1,!1,e[1],null)}),["contentEditable","draggable","spellCheck","value"].forEach(function(e){ht[e]=new pt(e,2,!1,e.toLowerCase(),null)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ht[e]=new pt(e,2,!1,e,null)}),"allowFullScreen async autoFocus autoPlay controls default defer disabled formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ht[e]=new pt(e,3,!1,e.toLowerCase(),null)}),["checked","multiple","muted","selected"].forEach(function(e){ht[e]=new pt(e,3,!0,e,null)}),["capture","download"].forEach(function(e){ht[e]=new pt(e,4,!1,e,null)}),["cols","rows","size","span"].forEach(function(e){ht[e]=new pt(e,6,!1,e,null)}),["rowSpan","start"].forEach(function(e){ht[e]=new pt(e,5,!1,e.toLowerCase(),null)});var mt=/[\-:]([a-z])/g;function vt(e){return e[1].toUpperCase()}function yt(e,t,n,r){var o=ht.hasOwnProperty(t)?ht[t]:null;(null!==o?0===o.type:!r&&(2<t.length&&("o"===t[0]||"O"===t[0])&&("n"===t[1]||"N"===t[1])))||(function(e,t,n,r){if(null===t||void 0===t||function(e,t,n,r){if(null!==n&&0===n.type)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return!r&&(null!==n?!n.acceptsBooleans:"data-"!==(e=e.toLowerCase().slice(0,5))&&"aria-"!==e);default:return!1}}(e,t,n,r))return!0;if(r)return!1;if(null!==n)switch(n.type){case 3:return!t;case 4:return!1===t;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}(t,n,o,r)&&(n=null),r||null===o?function(e){return!!lt.call(dt,e)||!lt.call(ft,e)&&(ct.test(e)?dt[e]=!0:(ft[e]=!0,!1))}(t)&&(null===n?e.removeAttribute(t):e.setAttribute(t,""+n)):o.mustUseProperty?e[o.propertyName]=null===n?3!==o.type&&"":n:(t=o.attributeName,r=o.attributeNamespace,null===n?e.removeAttribute(t):(n=3===(o=o.type)||4===o&&!0===n?"":""+n,r?e.setAttributeNS(r,t,n):e.setAttribute(t,n))))}function gt(e){switch(typeof e){case"boolean":case"number":case"object":case"string":case"undefined":return e;default:return""}}function _t(e,t){var n=t.checked;return o({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:null!=n?n:e._wrapperState.initialChecked})}function bt(e,t){var n=null==t.defaultValue?"":t.defaultValue,r=null!=t.checked?t.checked:t.defaultChecked;n=gt(null!=t.value?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:"checkbox"===t.type||"radio"===t.type?null!=t.checked:null!=t.value}}function wt(e,t){null!=(t=t.checked)&&yt(e,"checked",t,!1)}function xt(e,t){wt(e,t);var n=gt(t.value),r=t.type;if(null!=n)"number"===r?(0===n&&""===e.value||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if("submit"===r||"reset"===r)return void e.removeAttribute("value");t.hasOwnProperty("value")?Ot(e,t.type,n):t.hasOwnProperty("defaultValue")&&Ot(e,t.type,gt(t.defaultValue)),null==t.checked&&null!=t.defaultChecked&&(e.defaultChecked=!!t.defaultChecked)}function Et(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!("submit"!==r&&"reset"!==r||void 0!==t.value&&null!==t.value))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}""!==(n=e.name)&&(e.name=""),e.defaultChecked=!e.defaultChecked,e.defaultChecked=!!e._wrapperState.initialChecked,""!==n&&(e.name=n)}function Ot(e,t,n){"number"===t&&e.ownerDocument.activeElement===e||(null==n?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,null)}),"xlink:actuate xlink:arcrole xlink:href xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,"http://www.w3.org/1999/xlink")}),["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(mt,vt);ht[t]=new pt(t,1,!1,e,"http://www.w3.org/XML/1998/namespace")}),ht.tabIndex=new pt("tabIndex",1,!1,"tabindex",null);var St={change:{phasedRegistrationNames:{bubbled:"onChange",captured:"onChangeCapture"},dependencies:"blur change click focus input keydown keyup selectionchange".split(" ")}};function Tt(e,t,n){return(e=le.getPooled(St.change,e,t,n)).type="change",Me(n),z(e),e}var kt=null,Rt=null;function jt(e){I(e,!1)}function Pt(e){if(He(F(e)))return e}function Ct(e,t){if("change"===e)return t}var Mt=!1;function It(){kt&&(kt.detachEvent("onpropertychange",At),Rt=kt=null)}function At(e){"value"===e.propertyName&&Pt(Rt)&&Ue(jt,e=Tt(Rt,e,We(e)))}function Dt(e,t,n){"focus"===e?(It(),Rt=n,(kt=t).attachEvent("onpropertychange",At)):"blur"===e&&It()}function Nt(e){if("selectionchange"===e||"keyup"===e||"keydown"===e)return Pt(Rt)}function Lt(e,t){if("click"===e)return Pt(t)}function Ut(e,t){if("input"===e||"change"===e)return Pt(t)}G&&(Mt=Ye("input")&&(!document.documentMode||9<document.documentMode));var Ft={eventTypes:St,_isInputEventSupported:Mt,extractEvents:function(e,t,n,r){var o=t?F(t):window,i=void 0,a=void 0,u=o.nodeName&&o.nodeName.toLowerCase();if("select"===u||"input"===u&&"file"===o.type?i=Ct:Be(o)?Mt?i=Ut:(i=Nt,a=Dt):(u=o.nodeName)&&"input"===u.toLowerCase()&&("checkbox"===o.type||"radio"===o.type)&&(i=Lt),i&&(i=i(e,t)))return Tt(i,n,r);a&&a(e,o,t),"blur"===e&&(e=o._wrapperState)&&e.controlled&&"number"===o.type&&Ot(o,"number",o.value)}},Bt=le.extend({view:null,detail:null}),Wt={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function Yt(e){var t=this.nativeEvent;return t.getModifierState?t.getModifierState(e):!!(e=Wt[e])&&!!t[e]}function $t(){return Yt}var qt=0,Ht=0,zt=!1,Gt=!1,Vt=Bt.extend({screenX:null,screenY:null,clientX:null,clientY:null,pageX:null,pageY:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,getModifierState:$t,button:null,buttons:null,relatedTarget:function(e){return e.relatedTarget||(e.fromElement===e.srcElement?e.toElement:e.fromElement)},movementX:function(e){if("movementX"in e)return e.movementX;var t=qt;return qt=e.screenX,zt?"mousemove"===e.type?e.screenX-t:0:(zt=!0,0)},movementY:function(e){if("movementY"in e)return e.movementY;var t=Ht;return Ht=e.screenY,Gt?"mousemove"===e.type?e.screenY-t:0:(Gt=!0,0)}}),Xt=Vt.extend({pointerId:null,width:null,height:null,pressure:null,tangentialPressure:null,tiltX:null,tiltY:null,twist:null,pointerType:null,isPrimary:null}),Kt={mouseEnter:{registrationName:"onMouseEnter",dependencies:["mouseout","mouseover"]},mouseLeave:{registrationName:"onMouseLeave",dependencies:["mouseout","mouseover"]},pointerEnter:{registrationName:"onPointerEnter",dependencies:["pointerout","pointerover"]},pointerLeave:{registrationName:"onPointerLeave",dependencies:["pointerout","pointerover"]}},Qt={eventTypes:Kt,extractEvents:function(e,t,n,r){var o="mouseover"===e||"pointerover"===e,i="mouseout"===e||"pointerout"===e;if(o&&(n.relatedTarget||n.fromElement)||!i&&!o)return null;if(o=r.window===r?r:(o=r.ownerDocument)?o.defaultView||o.parentWindow:window,i?(i=t,t=(t=n.relatedTarget||n.toElement)?L(t):null):i=null,i===t)return null;var a=void 0,u=void 0,s=void 0,c=void 0;"mouseout"===e||"mouseover"===e?(a=Vt,u=Kt.mouseLeave,s=Kt.mouseEnter,c="mouse"):"pointerout"!==e&&"pointerover"!==e||(a=Xt,u=Kt.pointerLeave,s=Kt.pointerEnter,c="pointer");var l=null==i?o:F(i);if(o=null==t?o:F(t),(e=a.getPooled(u,i,n,r)).type=c+"leave",e.target=l,e.relatedTarget=o,(n=a.getPooled(s,t,n,r)).type=c+"enter",n.target=o,n.relatedTarget=l,r=t,i&&r)e:{for(o=r,c=0,a=t=i;a;a=W(a))c++;for(a=0,s=o;s;s=W(s))a++;for(;0<c-a;)t=W(t),c--;for(;0<a-c;)o=W(o),a--;for(;c--;){if(t===o||t===o.alternate)break e;t=W(t),o=W(o)}t=null}else t=null;for(o=t,t=[];i&&i!==o&&(null===(c=i.alternate)||c!==o);)t.push(i),i=W(i);for(i=[];r&&r!==o&&(null===(c=r.alternate)||c!==o);)i.push(r),r=W(r);for(r=0;r<t.length;r++)q(t[r],"bubbled",e);for(r=i.length;0<r--;)q(i[r],"captured",n);return[e,n]}},Jt=Object.prototype.hasOwnProperty;function Zt(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function en(e,t){if(Zt(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),r=Object.keys(t);if(n.length!==r.length)return!1;for(r=0;r<n.length;r++)if(!Jt.call(t,n[r])||!Zt(e[n[r]],t[n[r]]))return!1;return!0}function tn(e){var t=e;if(e.alternate)for(;t.return;)t=t.return;else{if(0!=(2&t.effectTag))return 1;for(;t.return;)if(0!=(2&(t=t.return).effectTag))return 1}return 5===t.tag?2:3}function nn(e){2!==tn(e)&&a("188")}function rn(e){if(!(e=function(e){var t=e.alternate;if(!t)return 3===(t=tn(e))&&a("188"),1===t?null:e;for(var n=e,r=t;;){var o=n.return,i=o?o.alternate:null;if(!o||!i)break;if(o.child===i.child){for(var u=o.child;u;){if(u===n)return nn(o),e;if(u===r)return nn(o),t;u=u.sibling}a("188")}if(n.return!==r.return)n=o,r=i;else{u=!1;for(var s=o.child;s;){if(s===n){u=!0,n=o,r=i;break}if(s===r){u=!0,r=o,n=i;break}s=s.sibling}if(!u){for(s=i.child;s;){if(s===n){u=!0,n=i,r=o;break}if(s===r){u=!0,r=i,n=o;break}s=s.sibling}u||a("189")}}n.alternate!==r&&a("190")}return 5!==n.tag&&a("188"),n.stateNode.current===n?e:t}(e)))return null;for(var t=e;;){if(7===t.tag||8===t.tag)return t;if(t.child)t.child.return=t,t=t.child;else{if(t===e)break;for(;!t.sibling;){if(!t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}}return null}var on=le.extend({animationName:null,elapsedTime:null,pseudoElement:null}),an=le.extend({clipboardData:function(e){return"clipboardData"in e?e.clipboardData:window.clipboardData}}),un=Bt.extend({relatedTarget:null});function sn(e){var t=e.keyCode;return"charCode"in e?0===(e=e.charCode)&&13===t&&(e=13):e=t,10===e&&(e=13),32<=e||13===e?e:0}var cn={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},ln={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"},fn=Bt.extend({key:function(e){if(e.key){var t=cn[e.key]||e.key;if("Unidentified"!==t)return t}return"keypress"===e.type?13===(e=sn(e))?"Enter":String.fromCharCode(e):"keydown"===e.type||"keyup"===e.type?ln[e.keyCode]||"Unidentified":""},location:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,repeat:null,locale:null,getModifierState:$t,charCode:function(e){return"keypress"===e.type?sn(e):0},keyCode:function(e){return"keydown"===e.type||"keyup"===e.type?e.keyCode:0},which:function(e){return"keypress"===e.type?sn(e):"keydown"===e.type||"keyup"===e.type?e.keyCode:0}}),dn=Vt.extend({dataTransfer:null}),pn=Bt.extend({touches:null,targetTouches:null,changedTouches:null,altKey:null,metaKey:null,ctrlKey:null,shiftKey:null,getModifierState:$t}),hn=le.extend({propertyName:null,elapsedTime:null,pseudoElement:null}),mn=Vt.extend({deltaX:function(e){return"deltaX"in e?e.deltaX:"wheelDeltaX"in e?-e.wheelDeltaX:0},deltaY:function(e){return"deltaY"in e?e.deltaY:"wheelDeltaY"in e?-e.wheelDeltaY:"wheelDelta"in e?-e.wheelDelta:0},deltaZ:null,deltaMode:null}),vn=[["abort","abort"],[Z,"animationEnd"],[ee,"animationIteration"],[te,"animationStart"],["canplay","canPlay"],["canplaythrough","canPlayThrough"],["drag","drag"],["dragenter","dragEnter"],["dragexit","dragExit"],["dragleave","dragLeave"],["dragover","dragOver"],["durationchange","durationChange"],["emptied","emptied"],["encrypted","encrypted"],["ended","ended"],["error","error"],["gotpointercapture","gotPointerCapture"],["load","load"],["loadeddata","loadedData"],["loadedmetadata","loadedMetadata"],["loadstart","loadStart"],["lostpointercapture","lostPointerCapture"],["mousemove","mouseMove"],["mouseout","mouseOut"],["mouseover","mouseOver"],["playing","playing"],["pointermove","pointerMove"],["pointerout","pointerOut"],["pointerover","pointerOver"],["progress","progress"],["scroll","scroll"],["seeking","seeking"],["stalled","stalled"],["suspend","suspend"],["timeupdate","timeUpdate"],["toggle","toggle"],["touchmove","touchMove"],[ne,"transitionEnd"],["waiting","waiting"],["wheel","wheel"]],yn={},gn={};function _n(e,t){var n=e[0],r="on"+((e=e[1])[0].toUpperCase()+e.slice(1));t={phasedRegistrationNames:{bubbled:r,captured:r+"Capture"},dependencies:[n],isInteractive:t},yn[e]=t,gn[n]=t}[["blur","blur"],["cancel","cancel"],["click","click"],["close","close"],["contextmenu","contextMenu"],["copy","copy"],["cut","cut"],["auxclick","auxClick"],["dblclick","doubleClick"],["dragend","dragEnd"],["dragstart","dragStart"],["drop","drop"],["focus","focus"],["input","input"],["invalid","invalid"],["keydown","keyDown"],["keypress","keyPress"],["keyup","keyUp"],["mousedown","mouseDown"],["mouseup","mouseUp"],["paste","paste"],["pause","pause"],["play","play"],["pointercancel","pointerCancel"],["pointerdown","pointerDown"],["pointerup","pointerUp"],["ratechange","rateChange"],["reset","reset"],["seeked","seeked"],["submit","submit"],["touchcancel","touchCancel"],["touchend","touchEnd"],["touchstart","touchStart"],["volumechange","volumeChange"]].forEach(function(e){_n(e,!0)}),vn.forEach(function(e){_n(e,!1)});var bn={eventTypes:yn,isInteractiveTopLevelEventType:function(e){return void 0!==(e=gn[e])&&!0===e.isInteractive},extractEvents:function(e,t,n,r){var o=gn[e];if(!o)return null;switch(e){case"keypress":if(0===sn(n))return null;case"keydown":case"keyup":e=fn;break;case"blur":case"focus":e=un;break;case"click":if(2===n.button)return null;case"auxclick":case"dblclick":case"mousedown":case"mousemove":case"mouseup":case"mouseout":case"mouseover":case"contextmenu":e=Vt;break;case"drag":case"dragend":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"dragstart":case"drop":e=dn;break;case"touchcancel":case"touchend":case"touchmove":case"touchstart":e=pn;break;case Z:case ee:case te:e=on;break;case ne:e=hn;break;case"scroll":e=Bt;break;case"wheel":e=mn;break;case"copy":case"cut":case"paste":e=an;break;case"gotpointercapture":case"lostpointercapture":case"pointercancel":case"pointerdown":case"pointermove":case"pointerout":case"pointerover":case"pointerup":e=Xt;break;default:e=le}return z(t=e.getPooled(o,t,n,r)),t}},wn=bn.isInteractiveTopLevelEventType,xn=[];function En(e){var t=e.targetInst,n=t;do{if(!n){e.ancestors.push(n);break}var r;for(r=n;r.return;)r=r.return;if(!(r=5!==r.tag?null:r.stateNode.containerInfo))break;e.ancestors.push(n),n=L(r)}while(n);for(n=0;n<e.ancestors.length;n++){t=e.ancestors[n];var o=We(e.nativeEvent);r=e.topLevelType;for(var i=e.nativeEvent,a=null,u=0;u<y.length;u++){var s=y[u];s&&(s=s.extractEvents(r,t,i,o))&&(a=S(a,s))}I(a,!1)}}var On=!0;function Sn(e,t){if(!t)return null;var n=(wn(e)?kn:Rn).bind(null,e);t.addEventListener(e,n,!1)}function Tn(e,t){if(!t)return null;var n=(wn(e)?kn:Rn).bind(null,e);t.addEventListener(e,n,!0)}function kn(e,t){De(Rn,e,t)}function Rn(e,t){if(On){var n=We(t);if(null===(n=L(n))||"number"!=typeof n.tag||2===tn(n)||(n=null),xn.length){var r=xn.pop();r.topLevelType=e,r.nativeEvent=t,r.targetInst=n,e=r}else e={topLevelType:e,nativeEvent:t,targetInst:n,ancestors:[]};try{Ue(En,e)}finally{e.topLevelType=null,e.nativeEvent=null,e.targetInst=null,e.ancestors.length=0,10>xn.length&&xn.push(e)}}}var jn={},Pn=0,Cn="_reactListenersID"+(""+Math.random()).slice(2);function Mn(e){return Object.prototype.hasOwnProperty.call(e,Cn)||(e[Cn]=Pn++,jn[e[Cn]]={}),jn[e[Cn]]}function In(e){if(void 0===(e=e||("undefined"!=typeof document?document:void 0)))return null;try{return e.activeElement||e.body}catch(t){return e.body}}function An(e){for(;e&&e.firstChild;)e=e.firstChild;return e}function Dn(e,t){var n,r=An(e);for(e=0;r;){if(3===r.nodeType){if(n=e+r.textContent.length,e<=t&&n>=t)return{node:r,offset:t-e};e=n}e:{for(;r;){if(r.nextSibling){r=r.nextSibling;break e}r=r.parentNode}r=void 0}r=An(r)}}function Nn(){for(var e=window,t=In();t instanceof e.HTMLIFrameElement;){try{e=t.contentDocument.defaultView}catch(e){break}t=In(e.document)}return t}function Ln(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var Un=G&&"documentMode"in document&&11>=document.documentMode,Fn={select:{phasedRegistrationNames:{bubbled:"onSelect",captured:"onSelectCapture"},dependencies:"blur contextmenu dragend focus keydown keyup mousedown mouseup selectionchange".split(" ")}},Bn=null,Wn=null,Yn=null,$n=!1;function qn(e,t){var n=t.window===t?t.document:9===t.nodeType?t:t.ownerDocument;return $n||null==Bn||Bn!==In(n)?null:("selectionStart"in(n=Bn)&&Ln(n)?n={start:n.selectionStart,end:n.selectionEnd}:n={anchorNode:(n=(n.ownerDocument&&n.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:n.anchorOffset,focusNode:n.focusNode,focusOffset:n.focusOffset},Yn&&en(Yn,n)?null:(Yn=n,(e=le.getPooled(Fn.select,Wn,e,t)).type="select",e.target=Bn,z(e),e))}var Hn={eventTypes:Fn,extractEvents:function(e,t,n,r){var o,i=r.window===r?r.document:9===r.nodeType?r:r.ownerDocument;if(!(o=!i)){e:{i=Mn(i),o=b.onSelect;for(var a=0;a<o.length;a++){var u=o[a];if(!i.hasOwnProperty(u)||!i[u]){i=!1;break e}}i=!0}o=!i}if(o)return null;switch(i=t?F(t):window,e){case"focus":(Be(i)||"true"===i.contentEditable)&&(Bn=i,Wn=t,Yn=null);break;case"blur":Yn=Wn=Bn=null;break;case"mousedown":$n=!0;break;case"contextmenu":case"mouseup":case"dragend":return $n=!1,qn(n,r);case"selectionchange":if(Un)break;case"keydown":case"keyup":return qn(n,r)}return null}};function zn(e,t){return e=o({children:void 0},t),(t=function(e){var t="";return r.Children.forEach(e,function(e){null!=e&&(t+=e)}),t}(t.children))&&(e.children=t),e}function Gn(e,t,n,r){if(e=e.options,t){t={};for(var o=0;o<n.length;o++)t["$"+n[o]]=!0;for(n=0;n<e.length;n++)o=t.hasOwnProperty("$"+e[n].value),e[n].selected!==o&&(e[n].selected=o),o&&r&&(e[n].defaultSelected=!0)}else{for(n=""+gt(n),t=null,o=0;o<e.length;o++){if(e[o].value===n)return e[o].selected=!0,void(r&&(e[o].defaultSelected=!0));null!==t||e[o].disabled||(t=e[o])}null!==t&&(t.selected=!0)}}function Vn(e,t){return null!=t.dangerouslySetInnerHTML&&a("91"),o({},t,{value:void 0,defaultValue:void 0,children:""+e._wrapperState.initialValue})}function Xn(e,t){var n=t.value;null==n&&(n=t.defaultValue,null!=(t=t.children)&&(null!=n&&a("92"),Array.isArray(t)&&(1>=t.length||a("93"),t=t[0]),n=t),null==n&&(n="")),e._wrapperState={initialValue:gt(n)}}function Kn(e,t){var n=gt(t.value),r=gt(t.defaultValue);null!=n&&((n=""+n)!==e.value&&(e.value=n),null==t.defaultValue&&e.defaultValue!==n&&(e.defaultValue=n)),null!=r&&(e.defaultValue=""+r)}function Qn(e){var t=e.textContent;t===e._wrapperState.initialValue&&(e.value=t)}C.injectEventPluginOrder("ResponderEventPlugin SimpleEventPlugin EnterLeaveEventPlugin ChangeEventPlugin SelectEventPlugin BeforeInputEventPlugin".split(" ")),w=B,x=U,E=F,C.injectEventPluginsByName({SimpleEventPlugin:bn,EnterLeaveEventPlugin:Qt,ChangeEventPlugin:Ft,SelectEventPlugin:Hn,BeforeInputEventPlugin:ke});var Jn={html:"http://www.w3.org/1999/xhtml",mathml:"http://www.w3.org/1998/Math/MathML",svg:"http://www.w3.org/2000/svg"};function Zn(e){switch(e){case"svg":return"http://www.w3.org/2000/svg";case"math":return"http://www.w3.org/1998/Math/MathML";default:return"http://www.w3.org/1999/xhtml"}}function er(e,t){return null==e||"http://www.w3.org/1999/xhtml"===e?Zn(t):"http://www.w3.org/2000/svg"===e&&"foreignObject"===t?"http://www.w3.org/1999/xhtml":e}var tr=void 0,nr=function(e){return"undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction?function(t,n,r,o){MSApp.execUnsafeLocalFunction(function(){return e(t,n)})}:e}(function(e,t){if(e.namespaceURI!==Jn.svg||"innerHTML"in e)e.innerHTML=t;else{for((tr=tr||document.createElement("div")).innerHTML="<svg>"+t+"</svg>",t=tr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function rr(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&3===n.nodeType)return void(n.nodeValue=t)}e.textContent=t}var or={animationIterationCount:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},ir=["Webkit","ms","Moz","O"];function ar(e,t){for(var n in e=e.style,t)if(t.hasOwnProperty(n)){var r=0===n.indexOf("--"),o=n,i=t[n];o=null==i||"boolean"==typeof i||""===i?"":r||"number"!=typeof i||0===i||or.hasOwnProperty(o)&&or[o]?(""+i).trim():i+"px","float"===n&&(n="cssFloat"),r?e.setProperty(n,o):e[n]=o}}Object.keys(or).forEach(function(e){ir.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),or[t]=or[e]})});var ur=o({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function sr(e,t){t&&(ur[e]&&(null!=t.children||null!=t.dangerouslySetInnerHTML)&&a("137",e,""),null!=t.dangerouslySetInnerHTML&&(null!=t.children&&a("60"),"object"==typeof t.dangerouslySetInnerHTML&&"__html"in t.dangerouslySetInnerHTML||a("61")),null!=t.style&&"object"!=typeof t.style&&a("62",""))}function cr(e,t){if(-1===e.indexOf("-"))return"string"==typeof t.is;switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}function lr(e,t){var n=Mn(e=9===e.nodeType||11===e.nodeType?e:e.ownerDocument);t=b[t];for(var r=0;r<t.length;r++){var o=t[r];if(!n.hasOwnProperty(o)||!n[o]){switch(o){case"scroll":Tn("scroll",e);break;case"focus":case"blur":Tn("focus",e),Tn("blur",e),n.blur=!0,n.focus=!0;break;case"cancel":case"close":Ye(o)&&Tn(o,e);break;case"invalid":case"submit":case"reset":break;default:-1===re.indexOf(o)&&Sn(o,e)}n[o]=!0}}}function fr(){}var dr=null,pr=null;function hr(e,t){switch(e){case"button":case"input":case"select":case"textarea":return!!t.autoFocus}return!1}function mr(e,t){return"textarea"===e||"option"===e||"noscript"===e||"string"==typeof t.children||"number"==typeof t.children||"object"==typeof t.dangerouslySetInnerHTML&&null!==t.dangerouslySetInnerHTML&&null!=t.dangerouslySetInnerHTML.__html}function vr(e){for(e=e.nextSibling;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}function yr(e){for(e=e.firstChild;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}new Set;var gr=[],_r=-1;function br(e){0>_r||(e.current=gr[_r],gr[_r]=null,_r--)}function wr(e,t){gr[++_r]=e.current,e.current=t}var xr={},Er={current:xr},Or={current:!1},Sr=xr;function Tr(e,t){var n=e.type.contextTypes;if(!n)return xr;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var o,i={};for(o in n)i[o]=t[o];return r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=i),i}function kr(e){return null!==(e=e.childContextTypes)&&void 0!==e}function Rr(e){br(Or),br(Er)}function jr(e){br(Or),br(Er)}function Pr(e,t,n){Er.current!==xr&&a("168"),wr(Er,t),wr(Or,n)}function Cr(e,t,n){var r=e.stateNode;if(e=t.childContextTypes,"function"!=typeof r.getChildContext)return n;for(var i in r=r.getChildContext())i in e||a("108",ut(t)||"Unknown",i);return o({},n,r)}function Mr(e){var t=e.stateNode;return t=t&&t.__reactInternalMemoizedMergedChildContext||xr,Sr=Er.current,wr(Er,t),wr(Or,Or.current),!0}function Ir(e,t,n){var r=e.stateNode;r||a("169"),n?(t=Cr(e,t,Sr),r.__reactInternalMemoizedMergedChildContext=t,br(Or),br(Er),wr(Er,t)):br(Or),wr(Or,n)}var Ar=null,Dr=null;function Nr(e){return function(t){try{return e(t)}catch(e){}}}function Lr(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=null,this.index=0,this.ref=null,this.pendingProps=t,this.firstContextDependency=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.effectTag=0,this.lastEffect=this.firstEffect=this.nextEffect=null,this.childExpirationTime=this.expirationTime=0,this.alternate=null}function Ur(e){return!(!(e=e.prototype)||!e.isReactComponent)}function Fr(e,t,n){var r=e.alternate;return null===r?((r=new Lr(e.tag,t,e.key,e.mode)).type=e.type,r.stateNode=e.stateNode,r.alternate=e,e.alternate=r):(r.pendingProps=t,r.effectTag=0,r.nextEffect=null,r.firstEffect=null,r.lastEffect=null),r.childExpirationTime=e.childExpirationTime,r.expirationTime=t!==e.pendingProps?n:e.expirationTime,r.child=e.child,r.memoizedProps=e.memoizedProps,r.memoizedState=e.memoizedState,r.updateQueue=e.updateQueue,r.firstContextDependency=e.firstContextDependency,r.sibling=e.sibling,r.index=e.index,r.ref=e.ref,r}function Br(e,t,n){var r=e.type,o=e.key;e=e.props;var i=void 0;if("function"==typeof r)i=Ur(r)?2:4;else if("string"==typeof r)i=7;else e:switch(r){case Qe:return Wr(e.children,t,n,o);case nt:i=10,t|=3;break;case Je:i=10,t|=2;break;case Ze:return(r=new Lr(15,e,o,4|t)).type=Ze,r.expirationTime=n,r;case ot:i=16;break;default:if("object"==typeof r&&null!==r)switch(r.$$typeof){case et:i=12;break e;case tt:i=11;break e;case rt:i=13;break e;default:if("function"==typeof r.then){i=4;break e}}a("130",null==r?r:typeof r,"")}return(t=new Lr(i,e,o,t)).type=r,t.expirationTime=n,t}function Wr(e,t,n,r){return(e=new Lr(9,e,r,t)).expirationTime=n,e}function Yr(e,t,n){return(e=new Lr(8,e,null,t)).expirationTime=n,e}function $r(e,t,n){return(t=new Lr(6,null!==e.children?e.children:[],e.key,t)).expirationTime=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function qr(e,t){e.didError=!1;var n=e.earliestPendingTime;0===n?e.earliestPendingTime=e.latestPendingTime=t:n>t?e.earliestPendingTime=t:e.latestPendingTime<t&&(e.latestPendingTime=t),Hr(t,e)}function Hr(e,t){var n=t.earliestSuspendedTime,r=t.latestSuspendedTime,o=t.earliestPendingTime,i=t.latestPingedTime;0===(o=0!==o?o:i)&&(0===e||r>e)&&(o=r),0!==(e=o)&&0!==n&&n<e&&(e=n),t.nextExpirationTimeToWorkOn=o,t.expirationTime=e}var zr=!1;function Gr(e){return{baseState:e,firstUpdate:null,lastUpdate:null,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function Vr(e){return{baseState:e.baseState,firstUpdate:e.firstUpdate,lastUpdate:e.lastUpdate,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function Xr(e){return{expirationTime:e,tag:0,payload:null,callback:null,next:null,nextEffect:null}}function Kr(e,t){null===e.lastUpdate?e.firstUpdate=e.lastUpdate=t:(e.lastUpdate.next=t,e.lastUpdate=t)}function Qr(e,t){var n=e.alternate;if(null===n){var r=e.updateQueue,o=null;null===r&&(r=e.updateQueue=Gr(e.memoizedState))}else r=e.updateQueue,o=n.updateQueue,null===r?null===o?(r=e.updateQueue=Gr(e.memoizedState),o=n.updateQueue=Gr(n.memoizedState)):r=e.updateQueue=Vr(o):null===o&&(o=n.updateQueue=Vr(r));null===o||r===o?Kr(r,t):null===r.lastUpdate||null===o.lastUpdate?(Kr(r,t),Kr(o,t)):(Kr(r,t),o.lastUpdate=t)}function Jr(e,t){var n=e.updateQueue;null===(n=null===n?e.updateQueue=Gr(e.memoizedState):Zr(e,n)).lastCapturedUpdate?n.firstCapturedUpdate=n.lastCapturedUpdate=t:(n.lastCapturedUpdate.next=t,n.lastCapturedUpdate=t)}function Zr(e,t){var n=e.alternate;return null!==n&&t===n.updateQueue&&(t=e.updateQueue=Vr(t)),t}function eo(e,t,n,r,i,a){switch(n.tag){case 1:return"function"==typeof(e=n.payload)?e.call(a,r,i):e;case 3:e.effectTag=-1025&e.effectTag|64;case 0:if(null===(i="function"==typeof(e=n.payload)?e.call(a,r,i):e)||void 0===i)break;return o({},r,i);case 2:zr=!0}return r}function to(e,t,n,r,o){zr=!1;for(var i=(t=Zr(e,t)).baseState,a=null,u=0,s=t.firstUpdate,c=i;null!==s;){var l=s.expirationTime;l>o?(null===a&&(a=s,i=c),(0===u||u>l)&&(u=l)):(c=eo(e,0,s,c,n,r),null!==s.callback&&(e.effectTag|=32,s.nextEffect=null,null===t.lastEffect?t.firstEffect=t.lastEffect=s:(t.lastEffect.nextEffect=s,t.lastEffect=s))),s=s.next}for(l=null,s=t.firstCapturedUpdate;null!==s;){var f=s.expirationTime;f>o?(null===l&&(l=s,null===a&&(i=c)),(0===u||u>f)&&(u=f)):(c=eo(e,0,s,c,n,r),null!==s.callback&&(e.effectTag|=32,s.nextEffect=null,null===t.lastCapturedEffect?t.firstCapturedEffect=t.lastCapturedEffect=s:(t.lastCapturedEffect.nextEffect=s,t.lastCapturedEffect=s))),s=s.next}null===a&&(t.lastUpdate=null),null===l?t.lastCapturedUpdate=null:e.effectTag|=32,null===a&&null===l&&(i=c),t.baseState=i,t.firstUpdate=a,t.firstCapturedUpdate=l,e.expirationTime=u,e.memoizedState=c}function no(e,t,n){null!==t.firstCapturedUpdate&&(null!==t.lastUpdate&&(t.lastUpdate.next=t.firstCapturedUpdate,t.lastUpdate=t.lastCapturedUpdate),t.firstCapturedUpdate=t.lastCapturedUpdate=null),ro(t.firstEffect,n),t.firstEffect=t.lastEffect=null,ro(t.firstCapturedEffect,n),t.firstCapturedEffect=t.lastCapturedEffect=null}function ro(e,t){for(;null!==e;){var n=e.callback;if(null!==n){e.callback=null;var r=t;"function"!=typeof n&&a("191",n),n.call(r)}e=e.nextEffect}}function oo(e,t){return{value:e,source:t,stack:st(t)}}var io={current:null},ao=null,uo=null,so=null;function co(e,t){var n=e.type._context;wr(io,n._currentValue),n._currentValue=t}function lo(e){var t=io.current;br(io),e.type._context._currentValue=t}function fo(e){ao=e,so=uo=null,e.firstContextDependency=null}function po(e,t){return so!==e&&!1!==t&&0!==t&&("number"==typeof t&&1073741823!==t||(so=e,t=1073741823),t={context:e,observedBits:t,next:null},null===uo?(null===ao&&a("277"),ao.firstContextDependency=uo=t):uo=uo.next=t),e._currentValue}var ho={},mo={current:ho},vo={current:ho},yo={current:ho};function go(e){return e===ho&&a("174"),e}function _o(e,t){wr(yo,t),wr(vo,e),wr(mo,ho);var n=t.nodeType;switch(n){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:er(null,"");break;default:t=er(t=(n=8===n?t.parentNode:t).namespaceURI||null,n=n.tagName)}br(mo),wr(mo,t)}function bo(e){br(mo),br(vo),br(yo)}function wo(e){go(yo.current);var t=go(mo.current),n=er(t,e.type);t!==n&&(wr(vo,e),wr(mo,n))}function xo(e){vo.current===e&&(br(mo),br(vo))}var Eo=(new r.Component).refs;function Oo(e,t,n,r){n=null===(n=n(r,t=e.memoizedState))||void 0===n?t:o({},t,n),e.memoizedState=n,null!==(r=e.updateQueue)&&0===e.expirationTime&&(r.baseState=n)}var So={isMounted:function(e){return!!(e=e._reactInternalFiber)&&2===tn(e)},enqueueSetState:function(e,t,n){e=e._reactInternalFiber;var r=aa(),o=Xr(r=Mi(r,e));o.payload=t,void 0!==n&&null!==n&&(o.callback=n),Qr(e,o),Ii(e,r)},enqueueReplaceState:function(e,t,n){e=e._reactInternalFiber;var r=aa(),o=Xr(r=Mi(r,e));o.tag=1,o.payload=t,void 0!==n&&null!==n&&(o.callback=n),Qr(e,o),Ii(e,r)},enqueueForceUpdate:function(e,t){e=e._reactInternalFiber;var n=aa(),r=Xr(n=Mi(n,e));r.tag=2,void 0!==t&&null!==t&&(r.callback=t),Qr(e,r),Ii(e,n)}};function To(e,t,n,r,o,i,a){return"function"==typeof(e=e.stateNode).shouldComponentUpdate?e.shouldComponentUpdate(r,i,a):!t.prototype||!t.prototype.isPureReactComponent||(!en(n,r)||!en(o,i))}function ko(e,t,n,r){e=t.state,"function"==typeof t.componentWillReceiveProps&&t.componentWillReceiveProps(n,r),"function"==typeof t.UNSAFE_componentWillReceiveProps&&t.UNSAFE_componentWillReceiveProps(n,r),t.state!==e&&So.enqueueReplaceState(t,t.state,null)}function Ro(e,t,n,r){var o=e.stateNode,i=kr(t)?Sr:Er.current;o.props=n,o.state=e.memoizedState,o.refs=Eo,o.context=Tr(e,i),null!==(i=e.updateQueue)&&(to(e,i,n,o,r),o.state=e.memoizedState),"function"==typeof(i=t.getDerivedStateFromProps)&&(Oo(e,t,i,n),o.state=e.memoizedState),"function"==typeof t.getDerivedStateFromProps||"function"==typeof o.getSnapshotBeforeUpdate||"function"!=typeof o.UNSAFE_componentWillMount&&"function"!=typeof o.componentWillMount||(t=o.state,"function"==typeof o.componentWillMount&&o.componentWillMount(),"function"==typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount(),t!==o.state&&So.enqueueReplaceState(o,o.state,null),null!==(i=e.updateQueue)&&(to(e,i,n,o,r),o.state=e.memoizedState)),"function"==typeof o.componentDidMount&&(e.effectTag|=4)}var jo=Array.isArray;function Po(e,t,n){if(null!==(e=n.ref)&&"function"!=typeof e&&"object"!=typeof e){if(n._owner){var r=void 0;(n=n._owner)&&(2!==n.tag&&3!==n.tag&&a("110"),r=n.stateNode),r||a("147",e);var o=""+e;return null!==t&&null!==t.ref&&"function"==typeof t.ref&&t.ref._stringRef===o?t.ref:((t=function(e){var t=r.refs;t===Eo&&(t=r.refs={}),null===e?delete t[o]:t[o]=e})._stringRef=o,t)}"string"!=typeof e&&a("284"),n._owner||a("254",e)}return e}function Co(e,t){"textarea"!==e.type&&a("31","[object Object]"===Object.prototype.toString.call(t)?"object with keys {"+Object.keys(t).join(", ")+"}":t,"")}function Mo(e){function t(t,n){if(e){var r=t.lastEffect;null!==r?(r.nextEffect=n,t.lastEffect=n):t.firstEffect=t.lastEffect=n,n.nextEffect=null,n.effectTag=8}}function n(n,r){if(!e)return null;for(;null!==r;)t(n,r),r=r.sibling;return null}function r(e,t){for(e=new Map;null!==t;)null!==t.key?e.set(t.key,t):e.set(t.index,t),t=t.sibling;return e}function o(e,t,n){return(e=Fr(e,t,n)).index=0,e.sibling=null,e}function i(t,n,r){return t.index=r,e?null!==(r=t.alternate)?(r=r.index)<n?(t.effectTag=2,n):r:(t.effectTag=2,n):n}function u(t){return e&&null===t.alternate&&(t.effectTag=2),t}function s(e,t,n,r){return null===t||8!==t.tag?((t=Yr(n,e.mode,r)).return=e,t):((t=o(t,n,r)).return=e,t)}function c(e,t,n,r){return null!==t&&t.type===n.type?((r=o(t,n.props,r)).ref=Po(e,t,n),r.return=e,r):((r=Br(n,e.mode,r)).ref=Po(e,t,n),r.return=e,r)}function l(e,t,n,r){return null===t||6!==t.tag||t.stateNode.containerInfo!==n.containerInfo||t.stateNode.implementation!==n.implementation?((t=$r(n,e.mode,r)).return=e,t):((t=o(t,n.children||[],r)).return=e,t)}function f(e,t,n,r,i){return null===t||9!==t.tag?((t=Wr(n,e.mode,r,i)).return=e,t):((t=o(t,n,r)).return=e,t)}function d(e,t,n){if("string"==typeof t||"number"==typeof t)return(t=Yr(""+t,e.mode,n)).return=e,t;if("object"==typeof t&&null!==t){switch(t.$$typeof){case Xe:return(n=Br(t,e.mode,n)).ref=Po(e,null,t),n.return=e,n;case Ke:return(t=$r(t,e.mode,n)).return=e,t}if(jo(t)||at(t))return(t=Wr(t,e.mode,n,null)).return=e,t;Co(e,t)}return null}function p(e,t,n,r){var o=null!==t?t.key:null;if("string"==typeof n||"number"==typeof n)return null!==o?null:s(e,t,""+n,r);if("object"==typeof n&&null!==n){switch(n.$$typeof){case Xe:return n.key===o?n.type===Qe?f(e,t,n.props.children,r,o):c(e,t,n,r):null;case Ke:return n.key===o?l(e,t,n,r):null}if(jo(n)||at(n))return null!==o?null:f(e,t,n,r,null);Co(e,n)}return null}function h(e,t,n,r,o){if("string"==typeof r||"number"==typeof r)return s(t,e=e.get(n)||null,""+r,o);if("object"==typeof r&&null!==r){switch(r.$$typeof){case Xe:return e=e.get(null===r.key?n:r.key)||null,r.type===Qe?f(t,e,r.props.children,o,r.key):c(t,e,r,o);case Ke:return l(t,e=e.get(null===r.key?n:r.key)||null,r,o)}if(jo(r)||at(r))return f(t,e=e.get(n)||null,r,o,null);Co(t,r)}return null}function m(o,a,u,s){for(var c=null,l=null,f=a,m=a=0,v=null;null!==f&&m<u.length;m++){f.index>m?(v=f,f=null):v=f.sibling;var y=p(o,f,u[m],s);if(null===y){null===f&&(f=v);break}e&&f&&null===y.alternate&&t(o,f),a=i(y,a,m),null===l?c=y:l.sibling=y,l=y,f=v}if(m===u.length)return n(o,f),c;if(null===f){for(;m<u.length;m++)(f=d(o,u[m],s))&&(a=i(f,a,m),null===l?c=f:l.sibling=f,l=f);return c}for(f=r(o,f);m<u.length;m++)(v=h(f,o,m,u[m],s))&&(e&&null!==v.alternate&&f.delete(null===v.key?m:v.key),a=i(v,a,m),null===l?c=v:l.sibling=v,l=v);return e&&f.forEach(function(e){return t(o,e)}),c}function v(o,u,s,c){var l=at(s);"function"!=typeof l&&a("150"),null==(s=l.call(s))&&a("151");for(var f=l=null,m=u,v=u=0,y=null,g=s.next();null!==m&&!g.done;v++,g=s.next()){m.index>v?(y=m,m=null):y=m.sibling;var _=p(o,m,g.value,c);if(null===_){m||(m=y);break}e&&m&&null===_.alternate&&t(o,m),u=i(_,u,v),null===f?l=_:f.sibling=_,f=_,m=y}if(g.done)return n(o,m),l;if(null===m){for(;!g.done;v++,g=s.next())null!==(g=d(o,g.value,c))&&(u=i(g,u,v),null===f?l=g:f.sibling=g,f=g);return l}for(m=r(o,m);!g.done;v++,g=s.next())null!==(g=h(m,o,v,g.value,c))&&(e&&null!==g.alternate&&m.delete(null===g.key?v:g.key),u=i(g,u,v),null===f?l=g:f.sibling=g,f=g);return e&&m.forEach(function(e){return t(o,e)}),l}return function(e,r,i,s){var c="object"==typeof i&&null!==i&&i.type===Qe&&null===i.key;c&&(i=i.props.children);var l="object"==typeof i&&null!==i;if(l)switch(i.$$typeof){case Xe:e:{for(l=i.key,c=r;null!==c;){if(c.key===l){if(9===c.tag?i.type===Qe:c.type===i.type){n(e,c.sibling),(r=o(c,i.type===Qe?i.props.children:i.props,s)).ref=Po(e,c,i),r.return=e,e=r;break e}n(e,c);break}t(e,c),c=c.sibling}i.type===Qe?((r=Wr(i.props.children,e.mode,s,i.key)).return=e,e=r):((s=Br(i,e.mode,s)).ref=Po(e,r,i),s.return=e,e=s)}return u(e);case Ke:e:{for(c=i.key;null!==r;){if(r.key===c){if(6===r.tag&&r.stateNode.containerInfo===i.containerInfo&&r.stateNode.implementation===i.implementation){n(e,r.sibling),(r=o(r,i.children||[],s)).return=e,e=r;break e}n(e,r);break}t(e,r),r=r.sibling}(r=$r(i,e.mode,s)).return=e,e=r}return u(e)}if("string"==typeof i||"number"==typeof i)return i=""+i,null!==r&&8===r.tag?(n(e,r.sibling),(r=o(r,i,s)).return=e,e=r):(n(e,r),(r=Yr(i,e.mode,s)).return=e,e=r),u(e);if(jo(i))return m(e,r,i,s);if(at(i))return v(e,r,i,s);if(l&&Co(e,i),void 0===i&&!c)switch(e.tag){case 2:case 3:case 0:a("152",(s=e.type).displayName||s.name||"Component")}return n(e,r)}}var Io=Mo(!0),Ao=Mo(!1),Do=null,No=null,Lo=!1;function Uo(e,t){var n=new Lr(7,null,null,0);n.type="DELETED",n.stateNode=t,n.return=e,n.effectTag=8,null!==e.lastEffect?(e.lastEffect.nextEffect=n,e.lastEffect=n):e.firstEffect=e.lastEffect=n}function Fo(e,t){switch(e.tag){case 7:var n=e.type;return null!==(t=1!==t.nodeType||n.toLowerCase()!==t.nodeName.toLowerCase()?null:t)&&(e.stateNode=t,!0);case 8:return null!==(t=""===e.pendingProps||3!==t.nodeType?null:t)&&(e.stateNode=t,!0);default:return!1}}function Bo(e){if(Lo){var t=No;if(t){var n=t;if(!Fo(e,t)){if(!(t=vr(n))||!Fo(e,t))return e.effectTag|=2,Lo=!1,void(Do=e);Uo(Do,n)}Do=e,No=yr(t)}else e.effectTag|=2,Lo=!1,Do=e}}function Wo(e){for(e=e.return;null!==e&&7!==e.tag&&5!==e.tag;)e=e.return;Do=e}function Yo(e){if(e!==Do)return!1;if(!Lo)return Wo(e),Lo=!0,!1;var t=e.type;if(7!==e.tag||"head"!==t&&"body"!==t&&!mr(t,e.memoizedProps))for(t=No;t;)Uo(e,t),t=vr(t);return Wo(e),No=Do?vr(e.stateNode):null,!0}function $o(){No=Do=null,Lo=!1}var qo=ze.ReactCurrentOwner;function Ho(e,t,n,r){t.child=null===e?Ao(t,null,n,r):Io(t,e.child,n,r)}function zo(e,t,n,r,o){n=n.render;var i=t.ref;return Or.current||t.memoizedProps!==r||i!==(null!==e?e.ref:null)?(Ho(e,t,n=n(r,i),o),t.memoizedProps=r,t.child):Zo(e,t,o)}function Go(e,t){var n=t.ref;(null===e&&null!==n||null!==e&&e.ref!==n)&&(t.effectTag|=128)}function Vo(e,t,n,r,o){var i=kr(n)?Sr:Er.current;return i=Tr(t,i),fo(t),n=n(r,i),t.effectTag|=1,Ho(e,t,n,o),t.memoizedProps=r,t.child}function Xo(e,t,n,r,o){if(kr(n)){var i=!0;Mr(t)}else i=!1;if(fo(t),null===e)if(null===t.stateNode){var a=kr(n)?Sr:Er.current,u=n.contextTypes,s=null!==u&&void 0!==u,c=new n(r,u=s?Tr(t,a):xr);t.memoizedState=null!==c.state&&void 0!==c.state?c.state:null,c.updater=So,t.stateNode=c,c._reactInternalFiber=t,s&&((s=t.stateNode).__reactInternalMemoizedUnmaskedChildContext=a,s.__reactInternalMemoizedMaskedChildContext=u),Ro(t,n,r,o),r=!0}else{a=t.stateNode,u=t.memoizedProps,a.props=u;var l=a.context;s=Tr(t,s=kr(n)?Sr:Er.current);var f=n.getDerivedStateFromProps;(c="function"==typeof f||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(u!==r||l!==s)&&ko(t,a,r,s),zr=!1;var d=t.memoizedState;l=a.state=d;var p=t.updateQueue;null!==p&&(to(t,p,r,a,o),l=t.memoizedState),u!==r||d!==l||Or.current||zr?("function"==typeof f&&(Oo(t,n,f,r),l=t.memoizedState),(u=zr||To(t,n,u,r,d,l,s))?(c||"function"!=typeof a.UNSAFE_componentWillMount&&"function"!=typeof a.componentWillMount||("function"==typeof a.componentWillMount&&a.componentWillMount(),"function"==typeof a.UNSAFE_componentWillMount&&a.UNSAFE_componentWillMount()),"function"==typeof a.componentDidMount&&(t.effectTag|=4)):("function"==typeof a.componentDidMount&&(t.effectTag|=4),t.memoizedProps=r,t.memoizedState=l),a.props=r,a.state=l,a.context=s,r=u):("function"==typeof a.componentDidMount&&(t.effectTag|=4),r=!1)}else a=t.stateNode,u=t.memoizedProps,a.props=u,l=a.context,s=Tr(t,s=kr(n)?Sr:Er.current),(c="function"==typeof(f=n.getDerivedStateFromProps)||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(u!==r||l!==s)&&ko(t,a,r,s),zr=!1,l=t.memoizedState,d=a.state=l,null!==(p=t.updateQueue)&&(to(t,p,r,a,o),d=t.memoizedState),u!==r||l!==d||Or.current||zr?("function"==typeof f&&(Oo(t,n,f,r),d=t.memoizedState),(f=zr||To(t,n,u,r,l,d,s))?(c||"function"!=typeof a.UNSAFE_componentWillUpdate&&"function"!=typeof a.componentWillUpdate||("function"==typeof a.componentWillUpdate&&a.componentWillUpdate(r,d,s),"function"==typeof a.UNSAFE_componentWillUpdate&&a.UNSAFE_componentWillUpdate(r,d,s)),"function"==typeof a.componentDidUpdate&&(t.effectTag|=4),"function"==typeof a.getSnapshotBeforeUpdate&&(t.effectTag|=256)):("function"!=typeof a.componentDidUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=256),t.memoizedProps=r,t.memoizedState=d),a.props=r,a.state=d,a.context=s,r=f):("function"!=typeof a.componentDidUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||u===e.memoizedProps&&l===e.memoizedState||(t.effectTag|=256),r=!1);return Ko(e,t,n,r,i,o)}function Ko(e,t,n,r,o,i){Go(e,t);var a=0!=(64&t.effectTag);if(!r&&!a)return o&&Ir(t,n,!1),Zo(e,t,i);r=t.stateNode,qo.current=t;var u=a?null:r.render();return t.effectTag|=1,null!==e&&a&&(Ho(e,t,null,i),t.child=null),Ho(e,t,u,i),t.memoizedState=r.state,t.memoizedProps=r.props,o&&Ir(t,n,!0),t.child}function Qo(e){var t=e.stateNode;t.pendingContext?Pr(0,t.pendingContext,t.pendingContext!==t.context):t.context&&Pr(0,t.context,!1),_o(e,t.containerInfo)}function Jo(e,t){if(e&&e.defaultProps)for(var n in t=o({},t),e=e.defaultProps)void 0===t[n]&&(t[n]=e[n]);return t}function Zo(e,t,n){null!==e&&(t.firstContextDependency=e.firstContextDependency);var r=t.childExpirationTime;if(0===r||r>n)return null;if(null!==e&&t.child!==e.child&&a("153"),null!==t.child){for(n=Fr(e=t.child,e.pendingProps,e.expirationTime),t.child=n,n.return=t;null!==e.sibling;)e=e.sibling,(n=n.sibling=Fr(e,e.pendingProps,e.expirationTime)).return=t;n.sibling=null}return t.child}function ei(e,t,n){var r=t.expirationTime;if(!Or.current&&(0===r||r>n)){switch(t.tag){case 5:Qo(t),$o();break;case 7:wo(t);break;case 2:kr(t.type)&&Mr(t);break;case 3:kr(t.type._reactResult)&&Mr(t);break;case 6:_o(t,t.stateNode.containerInfo);break;case 12:co(t,t.memoizedProps.value)}return Zo(e,t,n)}switch(t.expirationTime=0,t.tag){case 4:return function(e,t,n,r){null!==e&&a("155");var o=t.pendingProps;if("object"==typeof n&&null!==n&&"function"==typeof n.then){var i=n=function(e){switch(e._reactStatus){case 1:return e._reactResult;case 2:throw e._reactResult;case 0:throw e;default:throw e._reactStatus=0,e.then(function(t){if(0===e._reactStatus){if(e._reactStatus=1,"object"==typeof t&&null!==t){var n=t.default;t=void 0!==n&&null!==n?n:t}e._reactResult=t}},function(t){0===e._reactStatus&&(e._reactStatus=2,e._reactResult=t)}),e}}(n);i="function"==typeof i?Ur(i)?3:1:void 0!==i&&null!==i&&i.$$typeof?14:4,i=t.tag=i;var u=Jo(n,o);switch(i){case 1:return Vo(e,t,n,u,r);case 3:return Xo(e,t,n,u,r);case 14:return zo(e,t,n,u,r);default:a("283",n)}}if(i=Tr(t,Er.current),fo(t),i=n(o,i),t.effectTag|=1,"object"==typeof i&&null!==i&&"function"==typeof i.render&&void 0===i.$$typeof){t.tag=2,kr(n)?(u=!0,Mr(t)):u=!1,t.memoizedState=null!==i.state&&void 0!==i.state?i.state:null;var s=n.getDerivedStateFromProps;return"function"==typeof s&&Oo(t,n,s,o),i.updater=So,t.stateNode=i,i._reactInternalFiber=t,Ro(t,n,o,r),Ko(e,t,n,!0,u,r)}return t.tag=0,Ho(e,t,i,r),t.memoizedProps=o,t.child}(e,t,t.type,n);case 0:return Vo(e,t,t.type,t.pendingProps,n);case 1:var o=t.type._reactResult;return e=Vo(e,t,o,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 2:return Xo(e,t,t.type,t.pendingProps,n);case 3:return e=Xo(e,t,o=t.type._reactResult,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 5:return Qo(t),null===(r=t.updateQueue)&&a("282"),o=null!==(o=t.memoizedState)?o.element:null,to(t,r,t.pendingProps,null,n),(r=t.memoizedState.element)===o?($o(),t=Zo(e,t,n)):(o=t.stateNode,(o=(null===e||null===e.child)&&o.hydrate)&&(No=yr(t.stateNode.containerInfo),Do=t,o=Lo=!0),o?(t.effectTag|=2,t.child=Ao(t,null,r,n)):(Ho(e,t,r,n),$o()),t=t.child),t;case 7:wo(t),null===e&&Bo(t),r=t.type,o=t.pendingProps;var i=null!==e?e.memoizedProps:null,u=o.children;return mr(r,o)?u=null:null!==i&&mr(r,i)&&(t.effectTag|=16),Go(e,t),1073741823!==n&&1&t.mode&&o.hidden?(t.expirationTime=1073741823,t.memoizedProps=o,t=null):(Ho(e,t,u,n),t.memoizedProps=o,t=t.child),t;case 8:return null===e&&Bo(t),t.memoizedProps=t.pendingProps,null;case 16:return null;case 6:return _o(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=Io(t,null,r,n):Ho(e,t,r,n),t.memoizedProps=r,t.child;case 13:return zo(e,t,t.type,t.pendingProps,n);case 14:return e=zo(e,t,o=t.type._reactResult,Jo(o,r=t.pendingProps),n),t.memoizedProps=r,e;case 9:return Ho(e,t,r=t.pendingProps,n),t.memoizedProps=r,t.child;case 10:return Ho(e,t,r=t.pendingProps.children,n),t.memoizedProps=r,t.child;case 15:return Ho(e,t,(r=t.pendingProps).children,n),t.memoizedProps=r,t.child;case 12:e:{if(r=t.type._context,o=t.pendingProps,u=t.memoizedProps,i=o.value,t.memoizedProps=o,co(t,i),null!==u){var s=u.value;if(0===(i=s===i&&(0!==s||1/s==1/i)||s!=s&&i!=i?0:0|("function"==typeof r._calculateChangedBits?r._calculateChangedBits(s,i):1073741823))){if(u.children===o.children&&!Or.current){t=Zo(e,t,n);break e}}else for(null!==(u=t.child)&&(u.return=t);null!==u;){if(null!==(s=u.firstContextDependency))do{if(s.context===r&&0!=(s.observedBits&i)){if(2===u.tag||3===u.tag){var c=Xr(n);c.tag=2,Qr(u,c)}(0===u.expirationTime||u.expirationTime>n)&&(u.expirationTime=n),null!==(c=u.alternate)&&(0===c.expirationTime||c.expirationTime>n)&&(c.expirationTime=n);for(var l=u.return;null!==l;){if(c=l.alternate,0===l.childExpirationTime||l.childExpirationTime>n)l.childExpirationTime=n,null!==c&&(0===c.childExpirationTime||c.childExpirationTime>n)&&(c.childExpirationTime=n);else{if(null===c||!(0===c.childExpirationTime||c.childExpirationTime>n))break;c.childExpirationTime=n}l=l.return}}c=u.child,s=s.next}while(null!==s);else c=12===u.tag&&u.type===t.type?null:u.child;if(null!==c)c.return=u;else for(c=u;null!==c;){if(c===t){c=null;break}if(null!==(u=c.sibling)){u.return=c.return,c=u;break}c=c.return}u=c}}Ho(e,t,o.children,n),t=t.child}return t;case 11:return i=t.type,o=(r=t.pendingProps).children,fo(t),o=o(i=po(i,r.unstable_observedBits)),t.effectTag|=1,Ho(e,t,o,n),t.memoizedProps=r,t.child;default:a("156")}}function ti(e){e.effectTag|=4}var ni=void 0,ri=void 0,oi=void 0;function ii(e,t){var n=t.source,r=t.stack;null===r&&null!==n&&(r=st(n)),null!==n&&ut(n.type),t=t.value,null!==e&&2===e.tag&&ut(e.type);try{console.error(t)}catch(e){setTimeout(function(){throw e})}}function ai(e){var t=e.ref;if(null!==t)if("function"==typeof t)try{t(null)}catch(t){Ci(e,t)}else t.current=null}function ui(e){switch("function"==typeof Dr&&Dr(e),e.tag){case 2:case 3:ai(e);var t=e.stateNode;if("function"==typeof t.componentWillUnmount)try{t.props=e.memoizedProps,t.state=e.memoizedState,t.componentWillUnmount()}catch(t){Ci(e,t)}break;case 7:ai(e);break;case 6:li(e)}}function si(e){return 7===e.tag||5===e.tag||6===e.tag}function ci(e){e:{for(var t=e.return;null!==t;){if(si(t)){var n=t;break e}t=t.return}a("160"),n=void 0}var r=t=void 0;switch(n.tag){case 7:t=n.stateNode,r=!1;break;case 5:case 6:t=n.stateNode.containerInfo,r=!0;break;default:a("161")}16&n.effectTag&&(rr(t,""),n.effectTag&=-17);e:t:for(n=e;;){for(;null===n.sibling;){if(null===n.return||si(n.return)){n=null;break e}n=n.return}for(n.sibling.return=n.return,n=n.sibling;7!==n.tag&&8!==n.tag;){if(2&n.effectTag)continue t;if(null===n.child||6===n.tag)continue t;n.child.return=n,n=n.child}if(!(2&n.effectTag)){n=n.stateNode;break e}}for(var o=e;;){if(7===o.tag||8===o.tag)if(n)if(r){var i=t,u=o.stateNode,s=n;8===i.nodeType?i.parentNode.insertBefore(u,s):i.insertBefore(u,s)}else t.insertBefore(o.stateNode,n);else r?(i=t,u=o.stateNode,8===i.nodeType?(s=i.parentNode).insertBefore(u,i):(s=i).appendChild(u),null===s.onclick&&(s.onclick=fr)):t.appendChild(o.stateNode);else if(6!==o.tag&&null!==o.child){o.child.return=o,o=o.child;continue}if(o===e)break;for(;null===o.sibling;){if(null===o.return||o.return===e)return;o=o.return}o.sibling.return=o.return,o=o.sibling}}function li(e){for(var t=e,n=!1,r=void 0,o=void 0;;){if(!n){n=t.return;e:for(;;){switch(null===n&&a("160"),n.tag){case 7:r=n.stateNode,o=!1;break e;case 5:case 6:r=n.stateNode.containerInfo,o=!0;break e}n=n.return}n=!0}if(7===t.tag||8===t.tag){e:for(var i=t,u=i;;)if(ui(u),null!==u.child&&6!==u.tag)u.child.return=u,u=u.child;else{if(u===i)break;for(;null===u.sibling;){if(null===u.return||u.return===i)break e;u=u.return}u.sibling.return=u.return,u=u.sibling}o?(i=r,u=t.stateNode,8===i.nodeType?i.parentNode.removeChild(u):i.removeChild(u)):r.removeChild(t.stateNode)}else if(6===t.tag?(r=t.stateNode.containerInfo,o=!0):ui(t),null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return;6===(t=t.return).tag&&(n=!1)}t.sibling.return=t.return,t=t.sibling}}function fi(e,t){switch(t.tag){case 2:case 3:break;case 7:var n=t.stateNode;if(null!=n){var r=t.memoizedProps,o=null!==e?e.memoizedProps:r;e=t.type;var i=t.updateQueue;if(t.updateQueue=null,null!==i){for(n[N]=r,"input"===e&&"radio"===r.type&&null!=r.name&&wt(n,r),cr(e,o),t=cr(e,r),o=0;o<i.length;o+=2){var u=i[o],s=i[o+1];"style"===u?ar(n,s):"dangerouslySetInnerHTML"===u?nr(n,s):"children"===u?rr(n,s):yt(n,u,s,t)}switch(e){case"input":xt(n,r);break;case"textarea":Kn(n,r);break;case"select":e=n._wrapperState.wasMultiple,n._wrapperState.wasMultiple=!!r.multiple,null!=(i=r.value)?Gn(n,!!r.multiple,i,!1):e!==!!r.multiple&&(null!=r.defaultValue?Gn(n,!!r.multiple,r.defaultValue,!0):Gn(n,!!r.multiple,r.multiple?[]:"",!1))}}}break;case 8:null===t.stateNode&&a("162"),t.stateNode.nodeValue=t.memoizedProps;break;case 5:case 15:case 16:break;default:a("163")}}function di(e,t,n){(n=Xr(n)).tag=3,n.payload={element:null};var r=t.value;return n.callback=function(){pa(r),ii(e,t)},n}function pi(e,t,n){(n=Xr(n)).tag=3;var r=e.stateNode;return null!==r&&"function"==typeof r.componentDidCatch&&(n.callback=function(){null===Ti?Ti=new Set([this]):Ti.add(this);var n=t.value,r=t.stack;ii(e,t),this.componentDidCatch(n,{componentStack:null!==r?r:""})}),n}function hi(e){switch(e.tag){case 2:kr(e.type)&&Rr();var t=e.effectTag;return 1024&t?(e.effectTag=-1025&t|64,e):null;case 3:return kr(e.type._reactResult)&&Rr(),1024&(t=e.effectTag)?(e.effectTag=-1025&t|64,e):null;case 5:return bo(),jr(),0!=(64&(t=e.effectTag))&&a("285"),e.effectTag=-1025&t|64,e;case 7:return xo(e),null;case 16:return 1024&(t=e.effectTag)?(e.effectTag=-1025&t|64,e):null;case 6:return bo(),null;case 12:return lo(e),null;default:return null}}ni=function(){},ri=function(e,t,n,r,i){var a=e.memoizedProps;if(a!==r){var u=t.stateNode;switch(go(mo.current),e=null,n){case"input":a=_t(u,a),r=_t(u,r),e=[];break;case"option":a=zn(u,a),r=zn(u,r),e=[];break;case"select":a=o({},a,{value:void 0}),r=o({},r,{value:void 0}),e=[];break;case"textarea":a=Vn(u,a),r=Vn(u,r),e=[];break;default:"function"!=typeof a.onClick&&"function"==typeof r.onClick&&(u.onclick=fr)}sr(n,r),u=n=void 0;var s=null;for(n in a)if(!r.hasOwnProperty(n)&&a.hasOwnProperty(n)&&null!=a[n])if("style"===n){var c=a[n];for(u in c)c.hasOwnProperty(u)&&(s||(s={}),s[u]="")}else"dangerouslySetInnerHTML"!==n&&"children"!==n&&"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&"autoFocus"!==n&&(_.hasOwnProperty(n)?e||(e=[]):(e=e||[]).push(n,null));for(n in r){var l=r[n];if(c=null!=a?a[n]:void 0,r.hasOwnProperty(n)&&l!==c&&(null!=l||null!=c))if("style"===n)if(c){for(u in c)!c.hasOwnProperty(u)||l&&l.hasOwnProperty(u)||(s||(s={}),s[u]="");for(u in l)l.hasOwnProperty(u)&&c[u]!==l[u]&&(s||(s={}),s[u]=l[u])}else s||(e||(e=[]),e.push(n,s)),s=l;else"dangerouslySetInnerHTML"===n?(l=l?l.__html:void 0,c=c?c.__html:void 0,null!=l&&c!==l&&(e=e||[]).push(n,""+l)):"children"===n?c===l||"string"!=typeof l&&"number"!=typeof l||(e=e||[]).push(n,""+l):"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&(_.hasOwnProperty(n)?(null!=l&&lr(i,n),e||c===l||(e=[])):(e=e||[]).push(n,l))}s&&(e=e||[]).push("style",s),i=e,(t.updateQueue=i)&&ti(t)}},oi=function(e,t,n,r){n!==r&&ti(t)};var mi={readContext:po},vi=ze.ReactCurrentOwner,yi=0,gi=0,_i=!1,bi=null,wi=null,xi=0,Ei=!1,Oi=null,Si=!1,Ti=null;function ki(){if(null!==bi)for(var e=bi.return;null!==e;){var t=e;switch(t.tag){case 2:var n=t.type.childContextTypes;null!==n&&void 0!==n&&Rr();break;case 3:null!==(n=t.type._reactResult.childContextTypes)&&void 0!==n&&Rr();break;case 5:bo(),jr();break;case 7:xo(t);break;case 6:bo();break;case 12:lo(t)}e=e.return}wi=null,xi=0,Ei=!1,bi=null}function Ri(e){for(;;){var t=e.alternate,n=e.return,r=e.sibling;if(0==(512&e.effectTag)){var i=t,u=(t=e).pendingProps;switch(t.tag){case 0:case 1:break;case 2:kr(t.type)&&Rr();break;case 3:kr(t.type._reactResult)&&Rr();break;case 5:bo(),jr(),(u=t.stateNode).pendingContext&&(u.context=u.pendingContext,u.pendingContext=null),null!==i&&null!==i.child||(Yo(t),t.effectTag&=-3),ni(t);break;case 7:xo(t);var s=go(yo.current),c=t.type;if(null!==i&&null!=t.stateNode)ri(i,t,c,u,s),i.ref!==t.ref&&(t.effectTag|=128);else if(u){var l=go(mo.current);if(Yo(t)){i=(u=t).stateNode;var f=u.type,d=u.memoizedProps,p=s;switch(i[D]=u,i[N]=d,c=void 0,s=f){case"iframe":case"object":Sn("load",i);break;case"video":case"audio":for(f=0;f<re.length;f++)Sn(re[f],i);break;case"source":Sn("error",i);break;case"img":case"image":case"link":Sn("error",i),Sn("load",i);break;case"form":Sn("reset",i),Sn("submit",i);break;case"details":Sn("toggle",i);break;case"input":bt(i,d),Sn("invalid",i),lr(p,"onChange");break;case"select":i._wrapperState={wasMultiple:!!d.multiple},Sn("invalid",i),lr(p,"onChange");break;case"textarea":Xn(i,d),Sn("invalid",i),lr(p,"onChange")}for(c in sr(s,d),f=null,d)d.hasOwnProperty(c)&&(l=d[c],"children"===c?"string"==typeof l?i.textContent!==l&&(f=["children",l]):"number"==typeof l&&i.textContent!==""+l&&(f=["children",""+l]):_.hasOwnProperty(c)&&null!=l&&lr(p,c));switch(s){case"input":qe(i),Et(i,d,!0);break;case"textarea":qe(i),Qn(i);break;case"select":case"option":break;default:"function"==typeof d.onClick&&(i.onclick=fr)}c=f,u.updateQueue=c,(u=null!==c)&&ti(t)}else{d=t,i=c,p=u,f=9===s.nodeType?s:s.ownerDocument,l===Jn.html&&(l=Zn(i)),l===Jn.html?"script"===i?((i=f.createElement("div")).innerHTML="<script><\/script>",f=i.removeChild(i.firstChild)):"string"==typeof p.is?f=f.createElement(i,{is:p.is}):(f=f.createElement(i),"select"===i&&p.multiple&&(f.multiple=!0)):f=f.createElementNS(l,i),(i=f)[D]=d,i[N]=u;e:for(d=i,p=t,f=p.child;null!==f;){if(7===f.tag||8===f.tag)d.appendChild(f.stateNode);else if(6!==f.tag&&null!==f.child){f.child.return=f,f=f.child;continue}if(f===p)break;for(;null===f.sibling;){if(null===f.return||f.return===p)break e;f=f.return}f.sibling.return=f.return,f=f.sibling}p=i;var h=s,m=cr(f=c,d=u);switch(f){case"iframe":case"object":Sn("load",p),s=d;break;case"video":case"audio":for(s=0;s<re.length;s++)Sn(re[s],p);s=d;break;case"source":Sn("error",p),s=d;break;case"img":case"image":case"link":Sn("error",p),Sn("load",p),s=d;break;case"form":Sn("reset",p),Sn("submit",p),s=d;break;case"details":Sn("toggle",p),s=d;break;case"input":bt(p,d),s=_t(p,d),Sn("invalid",p),lr(h,"onChange");break;case"option":s=zn(p,d);break;case"select":p._wrapperState={wasMultiple:!!d.multiple},s=o({},d,{value:void 0}),Sn("invalid",p),lr(h,"onChange");break;case"textarea":Xn(p,d),s=Vn(p,d),Sn("invalid",p),lr(h,"onChange");break;default:s=d}sr(f,s),l=void 0;var v=f,y=p,g=s;for(l in g)if(g.hasOwnProperty(l)){var b=g[l];"style"===l?ar(y,b):"dangerouslySetInnerHTML"===l?null!=(b=b?b.__html:void 0)&&nr(y,b):"children"===l?"string"==typeof b?("textarea"!==v||""!==b)&&rr(y,b):"number"==typeof b&&rr(y,""+b):"suppressContentEditableWarning"!==l&&"suppressHydrationWarning"!==l&&"autoFocus"!==l&&(_.hasOwnProperty(l)?null!=b&&lr(h,l):null!=b&&yt(y,l,b,m))}switch(f){case"input":qe(p),Et(p,d,!1);break;case"textarea":qe(p),Qn(p);break;case"option":null!=d.value&&p.setAttribute("value",""+gt(d.value));break;case"select":(s=p).multiple=!!d.multiple,null!=(p=d.value)?Gn(s,!!d.multiple,p,!1):null!=d.defaultValue&&Gn(s,!!d.multiple,d.defaultValue,!0);break;default:"function"==typeof s.onClick&&(p.onclick=fr)}(u=hr(c,u))&&ti(t),t.stateNode=i}null!==t.ref&&(t.effectTag|=128)}else null===t.stateNode&&a("166");break;case 8:i&&null!=t.stateNode?oi(i,t,i.memoizedProps,u):("string"!=typeof u&&(null===t.stateNode&&a("166")),i=go(yo.current),go(mo.current),Yo(t)?(c=(u=t).stateNode,i=u.memoizedProps,c[D]=u,(u=c.nodeValue!==i)&&ti(t)):(c=t,(u=(9===i.nodeType?i:i.ownerDocument).createTextNode(u))[D]=c,t.stateNode=u));break;case 13:case 14:case 16:case 9:case 10:case 15:break;case 6:bo(),ni(t);break;case 12:lo(t);break;case 11:break;case 4:a("167");default:a("156")}if(t=bi=null,u=e,1073741823===xi||1073741823!==u.childExpirationTime){for(c=0,i=u.child;null!==i;)s=i.expirationTime,d=i.childExpirationTime,(0===c||0!==s&&s<c)&&(c=s),(0===c||0!==d&&d<c)&&(c=d),i=i.sibling;u.childExpirationTime=c}if(null!==t)return t;null!==n&&0==(512&n.effectTag)&&(null===n.firstEffect&&(n.firstEffect=e.firstEffect),null!==e.lastEffect&&(null!==n.lastEffect&&(n.lastEffect.nextEffect=e.firstEffect),n.lastEffect=e.lastEffect),1<e.effectTag&&(null!==n.lastEffect?n.lastEffect.nextEffect=e:n.firstEffect=e,n.lastEffect=e))}else{if(null!==(e=hi(e)))return e.effectTag&=511,e;null!==n&&(n.firstEffect=n.lastEffect=null,n.effectTag|=512)}if(null!==r)return r;if(null===n)break;e=n}return null}function ji(e){var t=ei(e.alternate,e,xi);return null===t&&(t=Ri(e)),vi.current=null,t}function Pi(e,t,n){_i&&a("243"),_i=!0,vi.currentDispatcher=mi;var r=e.nextExpirationTimeToWorkOn;r===xi&&e===wi&&null!==bi||(ki(),xi=r,bi=Fr((wi=e).current,null,xi),e.pendingCommitExpirationTime=0);for(var o=!1;;){try{if(t)for(;null!==bi&&!da();)bi=ji(bi);else for(;null!==bi;)bi=ji(bi)}catch(e){if(null===bi)o=!0,pa(e);else{null===bi&&a("271");var i=bi,u=i.return;if(null!==u){e:{var s=u,c=i,l=e;u=xi,c.effectTag|=512,c.firstEffect=c.lastEffect=null,Ei=!0,l=oo(l,c);do{switch(s.tag){case 5:s.effectTag|=1024,s.expirationTime=u,Jr(s,u=di(s,l,u));break e;case 2:case 3:c=l;var f=s.stateNode;if(0==(64&s.effectTag)&&null!==f&&"function"==typeof f.componentDidCatch&&(null===Ti||!Ti.has(f))){s.effectTag|=1024,s.expirationTime=u,Jr(s,u=pi(s,c,u));break e}}s=s.return}while(null!==s)}bi=Ri(i);continue}o=!0,pa(e)}}break}if(_i=!1,so=uo=ao=vi.currentDispatcher=null,o)wi=null,e.finishedWork=null;else if(null!==bi)e.finishedWork=null;else{if(null===(t=e.current.alternate)&&a("281"),wi=null,Ei){if(o=e.latestPendingTime,i=e.latestSuspendedTime,u=e.latestPingedTime,0!==o&&o>r||0!==i&&i>r||0!==u&&u>r)return e.didError=!1,0!==(n=e.latestPingedTime)&&n<=r&&(e.latestPingedTime=0),n=e.earliestPendingTime,t=e.latestPendingTime,n===r?e.earliestPendingTime=t===r?e.latestPendingTime=0:t:t===r&&(e.latestPendingTime=n),n=e.earliestSuspendedTime,t=e.latestSuspendedTime,0===n?e.earliestSuspendedTime=e.latestSuspendedTime=r:n>r?e.earliestSuspendedTime=r:t<r&&(e.latestSuspendedTime=r),Hr(r,e),void(e.expirationTime=e.expirationTime);if(!e.didError&&!n)return e.didError=!0,e.nextExpirationTimeToWorkOn=r,r=e.expirationTime=1,void(e.expirationTime=r)}e.pendingCommitExpirationTime=r,e.finishedWork=t}}function Ci(e,t){var n;e:{for(_i&&!Si&&a("263"),n=e.return;null!==n;){switch(n.tag){case 2:case 3:var r=n.stateNode;if("function"==typeof n.type.getDerivedStateFromCatch||"function"==typeof r.componentDidCatch&&(null===Ti||!Ti.has(r))){Qr(n,e=pi(n,e=oo(t,e),1)),Ii(n,1),n=void 0;break e}break;case 5:Qr(n,e=di(n,e=oo(t,e),1)),Ii(n,1),n=void 0;break e}n=n.return}5===e.tag&&(Qr(e,n=di(e,n=oo(t,e),1)),Ii(e,1)),n=void 0}return n}function Mi(e,t){return 0!==gi?e=gi:_i?e=Si?1:xi:1&t.mode?(e=Xi?2+10*(1+((e-2+15)/10|0)):2+25*(1+((e-2+500)/25|0)),null!==wi&&e===xi&&(e+=1)):e=1,Xi&&(0===Yi||e>Yi)&&(Yi=e),e}function Ii(e,t){e:{(0===e.expirationTime||e.expirationTime>t)&&(e.expirationTime=t);var n=e.alternate;null!==n&&(0===n.expirationTime||n.expirationTime>t)&&(n.expirationTime=t);var r=e.return;if(null===r&&5===e.tag)e=e.stateNode;else{for(;null!==r;){if(n=r.alternate,(0===r.childExpirationTime||r.childExpirationTime>t)&&(r.childExpirationTime=t),null!==n&&(0===n.childExpirationTime||n.childExpirationTime>t)&&(n.childExpirationTime=t),null===r.return&&5===r.tag){e=r.stateNode;break e}r=r.return}e=null}}null!==e&&(!_i&&0!==xi&&t<xi&&ki(),qr(e,t),_i&&!Si&&wi===e||(t=e,e=e.expirationTime,null===t.nextScheduledRoot?(t.expirationTime=e,null===Ni?(Di=Ni=t,t.nextScheduledRoot=t):(Ni=Ni.nextScheduledRoot=t).nextScheduledRoot=Di):(0===(n=t.expirationTime)||e<n)&&(t.expirationTime=e),Fi||(Gi?Vi&&(Bi=t,Wi=1,la(t,1,!0)):1===e?ca(1,null):ia(t,e))),ta>ea&&(ta=0,a("185")))}function Ai(e,t,n,r,o){var i=gi;gi=1;try{return e(t,n,r,o)}finally{gi=i}}var Di=null,Ni=null,Li=0,Ui=void 0,Fi=!1,Bi=null,Wi=0,Yi=0,$i=!1,qi=!1,Hi=null,zi=null,Gi=!1,Vi=!1,Xi=!1,Ki=null,Qi=i.unstable_now(),Ji=2+(Qi/10|0),Zi=Ji,ea=50,ta=0,na=null,ra=1;function oa(){Ji=2+((i.unstable_now()-Qi)/10|0)}function ia(e,t){if(0!==Li){if(t>Li)return;null!==Ui&&i.unstable_cancelScheduledWork(Ui)}Li=t,e=i.unstable_now()-Qi,Ui=i.unstable_scheduleWork(sa,{timeout:10*(t-2)-e})}function aa(){return Fi?Zi:(ua(),0!==Wi&&1073741823!==Wi||(oa(),Zi=Ji),Zi)}function ua(){var e=0,t=null;if(null!==Ni)for(var n=Ni,r=Di;null!==r;){var o=r.expirationTime;if(0===o){if((null===n||null===Ni)&&a("244"),r===r.nextScheduledRoot){Di=Ni=r.nextScheduledRoot=null;break}if(r===Di)Di=o=r.nextScheduledRoot,Ni.nextScheduledRoot=o,r.nextScheduledRoot=null;else{if(r===Ni){(Ni=n).nextScheduledRoot=Di,r.nextScheduledRoot=null;break}n.nextScheduledRoot=r.nextScheduledRoot,r.nextScheduledRoot=null}r=n.nextScheduledRoot}else{if((0===e||o<e)&&(e=o,t=r),r===Ni)break;if(1===e)break;n=r,r=r.nextScheduledRoot}}Bi=t,Wi=e}function sa(e){if(e.didTimeout&&null!==Di){oa();var t=Di;do{var n=t.expirationTime;0!==n&&Ji>=n&&(t.nextExpirationTimeToWorkOn=Ji),t=t.nextScheduledRoot}while(t!==Di)}ca(0,e)}function ca(e,t){if(zi=t,ua(),null!==zi)for(oa(),Zi=Ji;null!==Bi&&0!==Wi&&(0===e||e>=Wi)&&(!$i||Ji>=Wi);)la(Bi,Wi,Ji>=Wi),ua(),oa(),Zi=Ji;else for(;null!==Bi&&0!==Wi&&(0===e||e>=Wi);)la(Bi,Wi,!0),ua();if(null!==zi&&(Li=0,Ui=null),0!==Wi&&ia(Bi,Wi),zi=null,$i=!1,ta=0,na=null,null!==Ki)for(e=Ki,Ki=null,t=0;t<e.length;t++){var n=e[t];try{n._onComplete()}catch(e){qi||(qi=!0,Hi=e)}}if(qi)throw e=Hi,Hi=null,qi=!1,e}function la(e,t,n){if(Fi&&a("245"),Fi=!0,null===zi||n){var r=e.finishedWork;null!==r?fa(e,r,t):(e.finishedWork=null,Pi(e,!1,n),null!==(r=e.finishedWork)&&fa(e,r,t))}else null!==(r=e.finishedWork)?fa(e,r,t):(e.finishedWork=null,Pi(e,!0,n),null!==(r=e.finishedWork)&&(da()?e.finishedWork=r:fa(e,r,t)));Fi=!1}function fa(e,t,n){var r=e.firstBatch;if(null!==r&&r._expirationTime<=n&&(null===Ki?Ki=[r]:Ki.push(r),r._defer))return e.finishedWork=t,void(e.expirationTime=0);e.finishedWork=null,e===na?ta++:(na=e,ta=0),Si=_i=!0,e.current===t&&a("177"),0===(n=e.pendingCommitExpirationTime)&&a("261"),e.pendingCommitExpirationTime=0,r=t.expirationTime;var o=t.childExpirationTime;if(r=0===r||0!==o&&o<r?o:r,e.didError=!1,0===r?(e.earliestPendingTime=0,e.latestPendingTime=0,e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0):(0!==(o=e.latestPendingTime)&&(o<r?e.earliestPendingTime=e.latestPendingTime=0:e.earliestPendingTime<r&&(e.earliestPendingTime=e.latestPendingTime)),0===(o=e.earliestSuspendedTime)?qr(e,r):r>e.latestSuspendedTime?(e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0,qr(e,r)):r<o&&qr(e,r)),Hr(0,e),vi.current=null,1<t.effectTag?null!==t.lastEffect?(t.lastEffect.nextEffect=t,r=t.firstEffect):r=t:r=t.firstEffect,dr=On,Ln(o=Nn())){if("selectionStart"in o)var i={start:o.selectionStart,end:o.selectionEnd};else e:{var u=(i=(i=o.ownerDocument)&&i.defaultView||window).getSelection&&i.getSelection();if(u&&0!==u.rangeCount){i=u.anchorNode;var s=u.anchorOffset,c=u.focusNode;u=u.focusOffset;try{i.nodeType,c.nodeType}catch(e){i=null;break e}var l=0,f=-1,d=-1,p=0,h=0,m=o,v=null;t:for(;;){for(var y;m!==i||0!==s&&3!==m.nodeType||(f=l+s),m!==c||0!==u&&3!==m.nodeType||(d=l+u),3===m.nodeType&&(l+=m.nodeValue.length),null!==(y=m.firstChild);)v=m,m=y;for(;;){if(m===o)break t;if(v===i&&++p===s&&(f=l),v===c&&++h===u&&(d=l),null!==(y=m.nextSibling))break;v=(m=v).parentNode}m=y}i=-1===f||-1===d?null:{start:f,end:d}}else i=null}i=i||{start:0,end:0}}else i=null;for(pr={focusedElem:o,selectionRange:i},On=!1,Oi=r;null!==Oi;){o=!1,i=void 0;try{for(;null!==Oi;){if(256&Oi.effectTag){var g=Oi.alternate;e:switch(s=Oi,s.tag){case 2:case 3:if(256&s.effectTag&&null!==g){var _=g.memoizedProps,b=g.memoizedState,w=s.stateNode;w.props=s.memoizedProps,w.state=s.memoizedState;var x=w.getSnapshotBeforeUpdate(_,b);w.__reactInternalSnapshotBeforeUpdate=x}break e;case 5:case 7:case 8:case 6:break e;default:a("163")}}Oi=Oi.nextEffect}}catch(e){o=!0,i=e}o&&(null===Oi&&a("178"),Ci(Oi,i),null!==Oi&&(Oi=Oi.nextEffect))}for(Oi=r;null!==Oi;){g=!1,_=void 0;try{for(;null!==Oi;){var E=Oi.effectTag;if(16&E&&rr(Oi.stateNode,""),128&E){var O=Oi.alternate;if(null!==O){var S=O.ref;null!==S&&("function"==typeof S?S(null):S.current=null)}}switch(14&E){case 2:ci(Oi),Oi.effectTag&=-3;break;case 6:ci(Oi),Oi.effectTag&=-3,fi(Oi.alternate,Oi);break;case 4:fi(Oi.alternate,Oi);break;case 8:li(b=Oi),b.return=null,b.child=null,b.alternate&&(b.alternate.child=null,b.alternate.return=null)}Oi=Oi.nextEffect}}catch(e){g=!0,_=e}g&&(null===Oi&&a("178"),Ci(Oi,_),null!==Oi&&(Oi=Oi.nextEffect))}if(S=pr,O=Nn(),E=S.focusedElem,_=S.selectionRange,O!==E&&E&&E.ownerDocument&&function e(t,n){return!(!t||!n)&&(t===n||(!t||3!==t.nodeType)&&(n&&3===n.nodeType?e(t,n.parentNode):"contains"in t?t.contains(n):!!t.compareDocumentPosition&&!!(16&t.compareDocumentPosition(n))))}(E.ownerDocument.documentElement,E)){null!==_&&Ln(E)&&(O=_.start,void 0===(S=_.end)&&(S=O),"selectionStart"in E?(E.selectionStart=O,E.selectionEnd=Math.min(S,E.value.length)):(O=((g=E.ownerDocument||document)&&g.defaultView||window).getSelection(),b=E.textContent.length,S=Math.min(_.start,b),_=void 0===_.end?S:Math.min(_.end,b),!O.extend&&S>_&&(b=_,_=S,S=b),b=Dn(E,S),w=Dn(E,_),b&&w&&(1!==O.rangeCount||O.anchorNode!==b.node||O.anchorOffset!==b.offset||O.focusNode!==w.node||O.focusOffset!==w.offset)&&((g=g.createRange()).setStart(b.node,b.offset),O.removeAllRanges(),S>_?(O.addRange(g),O.extend(w.node,w.offset)):(g.setEnd(w.node,w.offset),O.addRange(g))))),O=[];for(S=E;S=S.parentNode;)1===S.nodeType&&O.push({element:S,left:S.scrollLeft,top:S.scrollTop});for("function"==typeof E.focus&&E.focus(),E=0;E<O.length;E++)(S=O[E]).element.scrollLeft=S.left,S.element.scrollTop=S.top}for(pr=null,On=!!dr,dr=null,e.current=t,Oi=r;null!==Oi;){r=!1,E=void 0;try{for(O=n;null!==Oi;){var T=Oi.effectTag;if(36&T){var k=Oi.alternate;switch(g=O,(S=Oi).tag){case 2:case 3:var R=S.stateNode;if(4&S.effectTag)if(null===k)R.props=S.memoizedProps,R.state=S.memoizedState,R.componentDidMount();else{var j=k.memoizedProps,P=k.memoizedState;R.props=S.memoizedProps,R.state=S.memoizedState,R.componentDidUpdate(j,P,R.__reactInternalSnapshotBeforeUpdate)}var C=S.updateQueue;null!==C&&(R.props=S.memoizedProps,R.state=S.memoizedState,no(0,C,R));break;case 5:var M=S.updateQueue;if(null!==M){if(_=null,null!==S.child)switch(S.child.tag){case 7:_=S.child.stateNode;break;case 2:case 3:_=S.child.stateNode}no(0,M,_)}break;case 7:var I=S.stateNode;null===k&&4&S.effectTag&&hr(S.type,S.memoizedProps)&&I.focus();break;case 8:case 6:case 15:case 16:break;default:a("163")}}if(128&T){var A=Oi.ref;if(null!==A){var D=Oi.stateNode;switch(Oi.tag){case 7:var N=D;break;default:N=D}"function"==typeof A?A(N):A.current=N}}var L=Oi.nextEffect;Oi.nextEffect=null,Oi=L}}catch(e){r=!0,E=e}r&&(null===Oi&&a("178"),Ci(Oi,E),null!==Oi&&(Oi=Oi.nextEffect))}_i=Si=!1,"function"==typeof Ar&&Ar(t.stateNode),T=t.expirationTime,t=t.childExpirationTime,0===(t=0===T||0!==t&&t<T?t:T)&&(Ti=null),e.expirationTime=t,e.finishedWork=null}function da(){return!!$i||!(null===zi||zi.timeRemaining()>ra)&&($i=!0)}function pa(e){null===Bi&&a("246"),Bi.expirationTime=0,qi||(qi=!0,Hi=e)}function ha(e,t){var n=Gi;Gi=!0;try{return e(t)}finally{(Gi=n)||Fi||ca(1,null)}}function ma(e,t){if(Gi&&!Vi){Vi=!0;try{return e(t)}finally{Vi=!1}}return e(t)}function va(e,t,n){if(Xi)return e(t,n);Gi||Fi||0===Yi||(ca(Yi,null),Yi=0);var r=Xi,o=Gi;Gi=Xi=!0;try{return e(t,n)}finally{Xi=r,(Gi=o)||Fi||ca(1,null)}}function ya(e,t,n,r,o){var i=t.current;return n=function(e){if(!e)return xr;e=e._reactInternalFiber;e:{(2!==tn(e)||2!==e.tag&&3!==e.tag)&&a("170");var t=e;do{switch(t.tag){case 5:t=t.stateNode.context;break e;case 2:if(kr(t.type)){t=t.stateNode.__reactInternalMemoizedMergedChildContext;break e}break;case 3:if(kr(t.type._reactResult)){t=t.stateNode.__reactInternalMemoizedMergedChildContext;break e}}t=t.return}while(null!==t);a("171"),t=void 0}if(2===e.tag){var n=e.type;if(kr(n))return Cr(e,n,t)}else if(3===e.tag&&kr(n=e.type._reactResult))return Cr(e,n,t);return t}(n),null===t.context?t.context=n:t.pendingContext=n,t=o,(o=Xr(r)).payload={element:e},null!==(t=void 0===t?null:t)&&(o.callback=t),Qr(i,o),Ii(i,r),r}function ga(e,t,n,r){var o=t.current;return ya(e,t,n,o=Mi(aa(),o),r)}function _a(e){if(!(e=e.current).child)return null;switch(e.child.tag){case 7:default:return e.child.stateNode}}function ba(e){var t=2+25*(1+((aa()-2+500)/25|0));t<=yi&&(t=yi+1),this._expirationTime=yi=t,this._root=e,this._callbacks=this._next=null,this._hasChildren=this._didComplete=!1,this._children=null,this._defer=!0}function wa(){this._callbacks=null,this._didCommit=!1,this._onCommit=this._onCommit.bind(this)}function xa(e,t,n){e={current:t=new Lr(5,null,null,t?3:0),containerInfo:e,pendingChildren:null,earliestPendingTime:0,latestPendingTime:0,earliestSuspendedTime:0,latestSuspendedTime:0,latestPingedTime:0,didError:!1,pendingCommitExpirationTime:0,finishedWork:null,timeoutHandle:-1,context:null,pendingContext:null,hydrate:n,nextExpirationTimeToWorkOn:0,expirationTime:0,firstBatch:null,nextScheduledRoot:null},this._internalRoot=t.stateNode=e}function Ea(e){return!(!e||1!==e.nodeType&&9!==e.nodeType&&11!==e.nodeType&&(8!==e.nodeType||" react-mount-point-unstable "!==e.nodeValue))}function Oa(e,t,n,r,o){Ea(n)||a("200");var i=n._reactRootContainer;if(i){if("function"==typeof o){var u=o;o=function(){var e=_a(i._internalRoot);u.call(e)}}null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)}else{if(i=n._reactRootContainer=function(e,t){if(t||(t=!(!(t=e?9===e.nodeType?e.documentElement:e.firstChild:null)||1!==t.nodeType||!t.hasAttribute("data-reactroot"))),!t)for(var n;n=e.lastChild;)e.removeChild(n);return new xa(e,!1,t)}(n,r),"function"==typeof o){var s=o;o=function(){var e=_a(i._internalRoot);s.call(e)}}ma(function(){null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)})}return _a(i._internalRoot)}function Sa(e,t){var n=2<arguments.length&&void 0!==arguments[2]?arguments[2]:null;return Ea(t)||a("200"),function(e,t,n){var r=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return{$$typeof:Ke,key:null==r?null:""+r,children:e,containerInfo:t,implementation:n}}(e,t,null,n)}Re=function(e,t,n){switch(t){case"input":if(xt(e,n),t=n.name,"radio"===n.type&&null!=t){for(n=e;n.parentNode;)n=n.parentNode;for(n=n.querySelectorAll("input[name="+JSON.stringify(""+t)+'][type="radio"]'),t=0;t<n.length;t++){var r=n[t];if(r!==e&&r.form===e.form){var o=B(r);o||a("90"),He(r),xt(r,o)}}}break;case"textarea":Kn(e,n);break;case"select":null!=(t=n.value)&&Gn(e,!!n.multiple,t,!1)}},ba.prototype.render=function(e){this._defer||a("250"),this._hasChildren=!0,this._children=e;var t=this._root._internalRoot,n=this._expirationTime,r=new wa;return ya(e,t,null,n,r._onCommit),r},ba.prototype.then=function(e){if(this._didComplete)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},ba.prototype.commit=function(){var e=this._root._internalRoot,t=e.firstBatch;if(this._defer&&null!==t||a("251"),this._hasChildren){var n=this._expirationTime;if(t!==this){this._hasChildren&&(n=this._expirationTime=t._expirationTime,this.render(this._children));for(var r=null,o=t;o!==this;)r=o,o=o._next;null===r&&a("251"),r._next=o._next,this._next=t,e.firstBatch=this}this._defer=!1,t=n,Fi&&a("253"),Bi=e,Wi=t,la(e,t,!0),ca(1,null),t=this._next,this._next=null,null!==(t=e.firstBatch=t)&&t._hasChildren&&t.render(t._children)}else this._next=null,this._defer=!1},ba.prototype._onComplete=function(){if(!this._didComplete){this._didComplete=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++)(0,e[t])()}},wa.prototype.then=function(e){if(this._didCommit)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},wa.prototype._onCommit=function(){if(!this._didCommit){this._didCommit=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++){var n=e[t];"function"!=typeof n&&a("191",n),n()}}},xa.prototype.render=function(e,t){var n=this._internalRoot,r=new wa;return null!==(t=void 0===t?null:t)&&r.then(t),ga(e,n,null,r._onCommit),r},xa.prototype.unmount=function(e){var t=this._internalRoot,n=new wa;return null!==(e=void 0===e?null:e)&&n.then(e),ga(null,t,null,n._onCommit),n},xa.prototype.legacy_renderSubtreeIntoContainer=function(e,t,n){var r=this._internalRoot,o=new wa;return null!==(n=void 0===n?null:n)&&o.then(n),ga(t,r,e,o._onCommit),o},xa.prototype.createBatch=function(){var e=new ba(this),t=e._expirationTime,n=this._internalRoot,r=n.firstBatch;if(null===r)n.firstBatch=e,e._next=null;else{for(n=null;null!==r&&r._expirationTime<=t;)n=r,r=r._next;e._next=r,null!==n&&(n._next=e)}return e},Ae=ha,De=va,Ne=function(){Fi||0===Yi||(ca(Yi,null),Yi=0)};var Ta={createPortal:Sa,findDOMNode:function(e){if(null==e)return null;if(1===e.nodeType)return e;var t=e._reactInternalFiber;return void 0===t&&("function"==typeof e.render?a("188"):a("268",Object.keys(e))),e=null===(e=rn(t))?null:e.stateNode},hydrate:function(e,t,n){return Oa(null,e,t,!0,n)},render:function(e,t,n){return Oa(null,e,t,!1,n)},unstable_renderSubtreeIntoContainer:function(e,t,n,r){return(null==e||void 0===e._reactInternalFiber)&&a("38"),Oa(e,t,n,!1,r)},unmountComponentAtNode:function(e){return Ea(e)||a("40"),!!e._reactRootContainer&&(ma(function(){Oa(null,null,e,!1,function(){e._reactRootContainer=null})}),!0)},unstable_createPortal:function(){return Sa.apply(void 0,arguments)},unstable_batchedUpdates:ha,unstable_interactiveUpdates:va,flushSync:function(e,t){Fi&&a("187");var n=Gi;Gi=!0;try{return Ai(e,t)}finally{Gi=n,ca(1,null)}},unstable_flushControlled:function(e){var t=Gi;Gi=!0;try{Ai(e)}finally{(Gi=t)||Fi||ca(1,null)}},__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{Events:[U,F,B,C.injectEventPluginsByName,g,z,function(e){T(e,H)},Me,Ie,Rn,I]},unstable_createRoot:function(e,t){return Ea(e)||a("278"),new xa(e,!0,null!=t&&!0===t.hydrate)}};!function(e){var t=e.findFiberByHostInstance;(function(e){if("undefined"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__)return!1;var t=__REACT_DEVTOOLS_GLOBAL_HOOK__;if(t.isDisabled||!t.supportsFiber)return!0;try{var n=t.inject(e);Ar=Nr(function(e){return t.onCommitFiberRoot(n,e)}),Dr=Nr(function(e){return t.onCommitFiberUnmount(n,e)})}catch(e){}})(o({},e,{findHostInstanceByFiber:function(e){return null===(e=rn(e))?null:e.stateNode},findFiberByHostInstance:function(e){return t?t(e):null}}))}({findFiberByHostInstance:L,bundleType:0,version:"16.5.2",rendererPackageName:"react-dom"});var ka={default:Ta},Ra=ka&&Ta||ka;e.exports=Ra.default||Ra},function(e,t,n){"use strict";e.exports=n(217)},function(e,t,n){"use strict";
+/** @license React v16.5.2
+ * schedule.production.min.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */Object.defineProperty(t,"__esModule",{value:!0});var r=null,o=!1,i=!1,a="object"==typeof performance&&"function"==typeof performance.now,u={timeRemaining:a?function(){var e=m()-performance.now();return 0<e?e:0}:function(){var e=m()-Date.now();return 0<e?e:0},didTimeout:!1};function s(){if(!o){var e=r.timesOutAt;i?h():i=!0,p(l,e)}}function c(){var e=r,t=r.next;if(r===t)r=null;else{var n=r.previous;r=n.next=t,t.previous=n}e.next=e.previous=null,(e=e.callback)(u)}function l(e){o=!0,u.didTimeout=e;try{if(e)for(;null!==r;){var n=t.unstable_now();if(!(r.timesOutAt<=n))break;do{c()}while(null!==r&&r.timesOutAt<=n)}else if(null!==r)do{c()}while(null!==r&&0<m()-t.unstable_now())}finally{o=!1,null!==r?s():i=!1}}var f,d,p,h,m,v=Date,y="function"==typeof setTimeout?setTimeout:void 0,g="function"==typeof clearTimeout?clearTimeout:void 0,_="function"==typeof requestAnimationFrame?requestAnimationFrame:void 0,b="function"==typeof cancelAnimationFrame?cancelAnimationFrame:void 0;function w(e){f=_(function(t){g(d),e(t)}),d=y(function(){b(f),e(t.unstable_now())},100)}if(a){var x=performance;t.unstable_now=function(){return x.now()}}else t.unstable_now=function(){return v.now()};if("undefined"==typeof window){var E=-1;p=function(e){E=setTimeout(e,0,!0)},h=function(){clearTimeout(E)},m=function(){return 0}}else if(window._schedMock){var O=window._schedMock;p=O[0],h=O[1],m=O[2]}else{"undefined"!=typeof console&&("function"!=typeof _&&console.error("This browser doesn't support requestAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"),"function"!=typeof b&&console.error("This browser doesn't support cancelAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"));var S=null,T=!1,k=-1,R=!1,j=!1,P=0,C=33,M=33;m=function(){return P};var I="__reactIdleCallback$"+Math.random().toString(36).slice(2);window.addEventListener("message",function(e){if(e.source===window&&e.data===I){T=!1;var n=t.unstable_now();if(e=!1,0>=P-n){if(!(-1!==k&&k<=n))return void(R||(R=!0,w(A)));e=!0}if(k=-1,n=S,S=null,null!==n){j=!0;try{n(e)}finally{j=!1}}}},!1);var A=function(e){R=!1;var t=e-P+M;t<M&&C<M?(8>t&&(t=8),M=t<C?C:t):C=t,P=e+M,T||(T=!0,window.postMessage(I,"*"))};p=function(e,t){S=e,k=t,j?window.postMessage(I,"*"):R||(R=!0,w(A))},h=function(){S=null,T=!1,k=-1}}t.unstable_scheduleWork=function(e,n){var o=t.unstable_now();if(e={callback:e,timesOutAt:n=void 0!==n&&null!==n&&null!==n.timeout&&void 0!==n.timeout?o+n.timeout:o+5e3,next:null,previous:null},null===r)r=e.next=e.previous=e,s();else{o=null;var i=r;do{if(i.timesOutAt>n){o=i;break}i=i.next}while(i!==r);null===o?o=r:o===r&&(r=e,s()),(n=o.previous).next=o.previous=e,e.next=o,e.previous=n}return e},t.unstable_cancelScheduledWork=function(e){var t=e.next;if(null!==t){if(t===e)r=null;else{e===r&&(r=t);var n=e.previous;n.next=t,t.previous=n}e.next=e.previous=null}}},function(module,exports,__webpack_require__){"use strict";var evalAllowed=!1;try{eval("evalAllowed = true")}catch(e){}var platformSupported=!!Object.setPrototypeOf&&evalAllowed;module.exports=__webpack_require__(219)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&"object"==typeof e&&"default"in e?e.default:e}(n(1)),o=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")},i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t},a=function(e){function t(){return o(this,t),i(this,e.apply(this,arguments))}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.render=function(){return r.Children.only(this.props.children)},t}(r.Component);t.AppContainer=a,t.hot=function(){return function(e){return e}},t.areComponentsEqual=function(e,t){return e===t},t.setConfig=function(){},t.cold=function(e){return e}},function(e,t,n){"use strict";t.a=u;var r=n(1),o=(n.n(r),n(12)),i=n.n(o),a=n(116);n(72);function u(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"store",n=arguments[1]||t+"Subscription",o=function(e){function o(n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.call(this,n,r));return i[t]=n.store,i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,e),o.prototype.getChildContext=function(){var e;return(e={})[t]=this[t],e[n]=null,e},o.prototype.render=function(){return r.Children.only(this.props.children)},o}(r.Component);return o.propTypes={store:a.a.isRequired,children:i.a.element.isRequired},o.childContextTypes=((e={})[t]=a.a.isRequired,e[n]=a.b,e),o}t.b=u()},function(e,t,n){"use strict";var r=n(222);function o(){}e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var u=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw u.name="Invariant Violation",u}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t};return n.checkPropTypes=o,n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){"use strict";n.d(t,"a",function(){return i});var r=null,o={notify:function(){}};var i=function(){function e(t,n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.store=t,this.parentSub=n,this.onStateChange=r,this.unsubscribe=null,this.listeners=o}return e.prototype.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},e.prototype.notifyNestedSubs=function(){this.listeners.notify()},e.prototype.isSubscribed=function(){return Boolean(this.unsubscribe)},e.prototype.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.onStateChange):this.store.subscribe(this.onStateChange),this.listeners=function(){var e=[],t=[];return{clear:function(){t=r,e=r},notify:function(){for(var n=e=t,r=0;r<n.length;r++)n[r]()},get:function(){return t},subscribe:function(n){var o=!0;return t===e&&(t=e.slice()),t.push(n),function(){o&&e!==r&&(o=!1,t===e&&(t=e.slice()),t.splice(t.indexOf(n),1))}}}}())},e.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=o)},e}()},function(e,t,n){"use strict";var r=n(117),o=n(225),i=n(226),a=n(238),u=n(239),s=n(240),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function l(e,t,n){for(var r=t.length-1;r>=0;r--){var o=t[r](e);if(o)return o}return function(t,r){throw new Error("Invalid value of type "+typeof e+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function f(e,t){return e===t}t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.connectHOC,n=void 0===t?r.a:t,d=e.mapStateToPropsFactories,p=void 0===d?a.a:d,h=e.mapDispatchToPropsFactories,m=void 0===h?i.a:h,v=e.mergePropsFactories,y=void 0===v?u.a:v,g=e.selectorFactory,_=void 0===g?s.a:g;return function(e,t,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},a=i.pure,u=void 0===a||a,s=i.areStatesEqual,d=void 0===s?f:s,h=i.areOwnPropsEqual,v=void 0===h?o.a:h,g=i.areStatePropsEqual,b=void 0===g?o.a:g,w=i.areMergedPropsEqual,x=void 0===w?o.a:w,E=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),O=l(e,p,"mapStateToProps"),S=l(t,m,"mapDispatchToProps"),T=l(r,y,"mergeProps");return n(_,c({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:O,initMapDispatchToProps:S,initMergeProps:T,pure:u,areStatesEqual:d,areOwnPropsEqual:v,areStatePropsEqual:b,areMergedPropsEqual:x},E))}}()},function(e,t,n){"use strict";t.a=function(e,t){if(o(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),i=Object.keys(t);if(n.length!==i.length)return!1;for(var a=0;a<n.length;a++)if(!r.call(t,n[a])||!o(e[n[a]],t[n[a]]))return!1;return!0};var r=Object.prototype.hasOwnProperty;function o(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}},function(e,t,n){"use strict";var r=n(15),o=n(120);t.a=[function(e){return"function"==typeof e?Object(o.b)(e,"mapDispatchToProps"):void 0},function(e){return e?void 0:Object(o.a)(function(e){return{dispatch:e}})},function(e){return e&&"object"==typeof e?Object(o.a)(function(t){return Object(r.bindActionCreators)(e,t)}):void 0}]},function(e,t,n){"use strict";(function(e,r){var o,i=n(228);o="undefined"!=typeof self?self:"undefined"!=typeof window?window:void 0!==e?e:r;var a=Object(i.a)(o);t.a=a}).call(t,n(3),n(119)(e))},function(e,t,n){"use strict";t.a=function(e){var t,n=e.Symbol;"function"==typeof n?n.observable?t=n.observable:(t=n("observable"),n.observable=t):t="@@observable";return t}},function(e,t,n){"use strict";var r=n(230),o=n(235),i=n(237),a="[object Object]",u=Function.prototype,s=Object.prototype,c=u.toString,l=s.hasOwnProperty,f=c.call(Object);t.a=function(e){if(!Object(i.a)(e)||Object(r.a)(e)!=a)return!1;var t=Object(o.a)(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&c.call(n)==f}},function(e,t,n){"use strict";var r=n(122),o=n(233),i=n(234),a="[object Null]",u="[object Undefined]",s=r.a?r.a.toStringTag:void 0;t.a=function(e){return null==e?void 0===e?u:a:s&&s in Object(e)?Object(o.a)(e):Object(i.a)(e)}},function(e,t,n){"use strict";var r=n(232),o="object"==typeof self&&self&&self.Object===Object&&self,i=r.a||o||Function("return this")();t.a=i},function(e,t,n){"use strict";(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.a=n}).call(t,n(3))},function(e,t,n){"use strict";var r=n(122),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,u=r.a?r.a.toStringTag:void 0;t.a=function(e){var t=i.call(e,u),n=e[u];try{e[u]=void 0;var r=!0}catch(e){}var o=a.call(e);return r&&(t?e[u]=n:delete e[u]),o}},function(e,t,n){"use strict";var r=Object.prototype.toString;t.a=function(e){return r.call(e)}},function(e,t,n){"use strict";var r=n(236),o=Object(r.a)(Object.getPrototypeOf,Object);t.a=o},function(e,t,n){"use strict";t.a=function(e,t){return function(n){return e(t(n))}}},function(e,t,n){"use strict";t.a=function(e){return null!=e&&"object"==typeof e}},function(e,t,n){"use strict";var r=n(120);t.a=[function(e){return"function"==typeof e?Object(r.b)(e,"mapStateToProps"):void 0},function(e){return e?void 0:Object(r.a)(function(){return{}})}]},function(e,t,n){"use strict";n(121);var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function o(e,t,n){return r({},n,e,t)}t.a=[function(e){return"function"==typeof e?function(e){return function(t,n){n.displayName;var r=n.pure,o=n.areMergedPropsEqual,i=!1,a=void 0;return function(t,n,u){var s=e(t,n,u);return i?r&&o(s,a)||(a=s):(i=!0,a=s),a}}}(e):void 0},function(e){return e?void 0:function(){return o}}]},function(e,t,n){"use strict";t.a=function(e,t){var n=t.initMapStateToProps,i=t.initMapDispatchToProps,a=t.initMergeProps,u=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["initMapStateToProps","initMapDispatchToProps","initMergeProps"]),s=n(e,u),c=i(e,u),l=a(e,u);0;return(u.pure?o:r)(s,c,l,e,u)};n(241);function r(e,t,n,r){return function(o,i){return n(e(o,i),t(r,i),i)}}function o(e,t,n,r,o){var i=o.areStatesEqual,a=o.areOwnPropsEqual,u=o.areStatePropsEqual,s=!1,c=void 0,l=void 0,f=void 0,d=void 0,p=void 0;function h(o,s){var h=!a(s,l),m=!i(o,c);return c=o,l=s,h&&m?(f=e(c,l),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):h?(e.dependsOnOwnProps&&(f=e(c,l)),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):m?function(){var t=e(c,l),r=!u(t,f);return f=t,r&&(p=n(f,d,l)),p}():p}return function(o,i){return s?h(o,i):function(o,i){return f=e(c=o,l=i),d=t(r,l),p=n(f,d,l),s=!0,p}(o,i)}}},function(e,t,n){"use strict";n(72)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(5)),o=y(n(6)),i=y(n(7)),a=y(n(8)),u=y(n(9)),s=n(1),c=y(s),l=n(135),f=n(86),d=n(11),p=v(n(208)),h=v(n(517)),m=v(n(528));function v(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){return c.default.createElement(l.ConnectedRouter,{history:this.props.history},c.default.createElement("div",null,c.default.createElement(d.Header,null),c.default.createElement("div",{className:"app"},c.default.createElement(f.Route,{path:"/metadata/",component:d.Sidebar}),c.default.createElement("div",{className:"body"},c.default.createElement(f.Route,{path:"/search/",component:h.Menu}),c.default.createElement(f.Route,{path:"/metadata/:hash/",component:p.Heading}),c.default.createElement(f.Switch,null,c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/summary/",component:p.Summary}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/mediaRecord/",component:p.MediaRecord}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/mediaInfo/",component:p.MediaInfo}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/keyframe/:frame/",component:p.KeyframeSingle}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/keyframe/",component:p.KeyframeList}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/coco/",component:p.Coco}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/places365/",component:p.Places365}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/sugarcube/",component:p.Sugarcube}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/:hash/",component:p.Summary}),c.default.createElement(f.Route,{exact:!0,path:"/metadata/",render:function(){return c.default.createElement("div",{className:"notFound"},c.default.createElement("h4",null,"NOT FOUND"))}}),c.default.createElement(f.Route,{exact:!0,path:"/search/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/keyframe/:verified/:hash/:frame/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/keyframe/:hash/:frame/",component:h.Container}),c.default.createElement(f.Route,{exact:!0,path:"/search/browse/:hash/",component:h.Browse}),c.default.createElement(f.Route,{exact:!0,path:"/search/random/",component:h.Random}),c.default.createElement(f.Route,{exact:!0,path:"/search/review/",component:m.Saved})))),c.default.createElement(d.Footer,null)))}}]),t}(s.Component);t.default=g},function(e,t,n){n(244),e.exports=n(10).Object.getPrototypeOf},function(e,t,n){var r=n(41),o=n(123);n(124)("getPrototypeOf",function(){return function(e){return o(r(e))}})},function(e,t,n){n(246);var r=n(10).Object;e.exports=function(e,t,n){return r.defineProperty(e,t,n)}},function(e,t,n){var r=n(18);r(r.S+r.F*!n(24),"Object",{defineProperty:n(22).f})},function(e,t,n){e.exports={default:n(248),__esModule:!0}},function(e,t,n){n(44),n(58),e.exports=n(83).f("iterator")},function(e,t,n){var r=n(79),o=n(73);e.exports=function(e){return function(t,n){var i,a,u=String(o(t)),s=r(n),c=u.length;return s<0||s>=c?e?"":void 0:(i=u.charCodeAt(s))<55296||i>56319||s+1===c||(a=u.charCodeAt(s+1))<56320||a>57343?e?u.charAt(s):i:e?u.slice(s,s+2):a-56320+(i-55296<<10)+65536}}},function(e,t,n){"use strict";var r=n(80),o=n(43),i=n(57),a={};n(26)(a,n(14)("iterator"),function(){return this}),e.exports=function(e,t,n){e.prototype=r(a,{next:o(1,n)}),i(e,t+" Iterator")}},function(e,t,n){var r=n(22),o=n(20),i=n(45);e.exports=n(24)?Object.defineProperties:function(e,t){o(e);for(var n,a=i(t),u=a.length,s=0;u>s;)r.f(e,n=a[s++],t[n]);return e}},function(e,t,n){var r=n(37),o=n(81),i=n(253);e.exports=function(e){return function(t,n,a){var u,s=r(t),c=o(s.length),l=i(a,c);if(e&&n!=n){for(;c>l;)if((u=s[l++])!=u)return!0}else for(;c>l;l++)if((e||l in s)&&s[l]===n)return e||l||0;return!e&&-1}}},function(e,t,n){var r=n(79),o=Math.max,i=Math.min;e.exports=function(e,t){return(e=r(e))<0?o(e+t,0):i(e,t)}},function(e,t,n){"use strict";var r=n(255),o=n(256),i=n(36),a=n(37);e.exports=n(127)(Array,"Array",function(e,t){this._t=a(e),this._i=0,this._k=t},function(){var e=this._t,t=this._k,n=this._i++;return!e||n>=e.length?(this._t=void 0,o(1)):o(0,"keys"==t?n:"values"==t?e[n]:[n,e[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(e,t){e.exports=function(){}},function(e,t){e.exports=function(e,t){return{value:t,done:!!e}}},function(e,t,n){e.exports={default:n(258),__esModule:!0}},function(e,t,n){n(259),n(134),n(264),n(265),e.exports=n(10).Symbol},function(e,t,n){"use strict";var r=n(13),o=n(25),i=n(24),a=n(18),u=n(128),s=n(260).KEY,c=n(35),l=n(75),f=n(57),d=n(55),p=n(14),h=n(83),m=n(84),v=n(261),y=n(262),g=n(20),_=n(23),b=n(37),w=n(77),x=n(43),E=n(80),O=n(263),S=n(133),T=n(22),k=n(45),R=S.f,j=T.f,P=O.f,C=r.Symbol,M=r.JSON,I=M&&M.stringify,A=p("_hidden"),D=p("toPrimitive"),N={}.propertyIsEnumerable,L=l("symbol-registry"),U=l("symbols"),F=l("op-symbols"),B=Object.prototype,W="function"==typeof C,Y=r.QObject,$=!Y||!Y.prototype||!Y.prototype.findChild,q=i&&c(function(){return 7!=E(j({},"a",{get:function(){return j(this,"a",{value:7}).a}})).a})?function(e,t,n){var r=R(B,t);r&&delete B[t],j(e,t,n),r&&e!==B&&j(B,t,r)}:j,H=function(e){var t=U[e]=E(C.prototype);return t._k=e,t},z=W&&"symbol"==typeof C.iterator?function(e){return"symbol"==typeof e}:function(e){return e instanceof C},G=function(e,t,n){return e===B&&G(F,t,n),g(e),t=w(t,!0),g(n),o(U,t)?(n.enumerable?(o(e,A)&&e[A][t]&&(e[A][t]=!1),n=E(n,{enumerable:x(0,!1)})):(o(e,A)||j(e,A,x(1,{})),e[A][t]=!0),q(e,t,n)):j(e,t,n)},V=function(e,t){g(e);for(var n,r=v(t=b(t)),o=0,i=r.length;i>o;)G(e,n=r[o++],t[n]);return e},X=function(e){var t=N.call(this,e=w(e,!0));return!(this===B&&o(U,e)&&!o(F,e))&&(!(t||!o(this,e)||!o(U,e)||o(this,A)&&this[A][e])||t)},K=function(e,t){if(e=b(e),t=w(t,!0),e!==B||!o(U,t)||o(F,t)){var n=R(e,t);return!n||!o(U,t)||o(e,A)&&e[A][t]||(n.enumerable=!0),n}},Q=function(e){for(var t,n=P(b(e)),r=[],i=0;n.length>i;)o(U,t=n[i++])||t==A||t==s||r.push(t);return r},J=function(e){for(var t,n=e===B,r=P(n?F:b(e)),i=[],a=0;r.length>a;)!o(U,t=r[a++])||n&&!o(B,t)||i.push(U[t]);return i};W||(u((C=function(){if(this instanceof C)throw TypeError("Symbol is not a constructor!");var e=d(arguments.length>0?arguments[0]:void 0),t=function(n){this===B&&t.call(F,n),o(this,A)&&o(this[A],e)&&(this[A][e]=!1),q(this,e,x(1,n))};return i&&$&&q(B,e,{configurable:!0,set:t}),H(e)}).prototype,"toString",function(){return this._k}),S.f=K,T.f=G,n(132).f=O.f=Q,n(59).f=X,n(85).f=J,i&&!n(42)&&u(B,"propertyIsEnumerable",X,!0),h.f=function(e){return H(p(e))}),a(a.G+a.W+a.F*!W,{Symbol:C});for(var Z="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),ee=0;Z.length>ee;)p(Z[ee++]);for(var te=k(p.store),ne=0;te.length>ne;)m(te[ne++]);a(a.S+a.F*!W,"Symbol",{for:function(e){return o(L,e+="")?L[e]:L[e]=C(e)},keyFor:function(e){if(!z(e))throw TypeError(e+" is not a symbol!");for(var t in L)if(L[t]===e)return t},useSetter:function(){$=!0},useSimple:function(){$=!1}}),a(a.S+a.F*!W,"Object",{create:function(e,t){return void 0===t?E(e):V(E(e),t)},defineProperty:G,defineProperties:V,getOwnPropertyDescriptor:K,getOwnPropertyNames:Q,getOwnPropertySymbols:J}),M&&a(a.S+a.F*(!W||c(function(){var e=C();return"[null]"!=I([e])||"{}"!=I({a:e})||"{}"!=I(Object(e))})),"JSON",{stringify:function(e){for(var t,n,r=[e],o=1;arguments.length>o;)r.push(arguments[o++]);if(n=t=r[1],(_(t)||void 0!==e)&&!z(e))return y(t)||(t=function(e,t){if("function"==typeof n&&(t=n.call(this,e,t)),!z(t))return t}),r[1]=t,I.apply(M,r)}}),C.prototype[D]||n(26)(C.prototype,D,C.prototype.valueOf),f(C,"Symbol"),f(Math,"Math",!0),f(r.JSON,"JSON",!0)},function(e,t,n){var r=n(55)("meta"),o=n(23),i=n(25),a=n(22).f,u=0,s=Object.isExtensible||function(){return!0},c=!n(35)(function(){return s(Object.preventExtensions({}))}),l=function(e){a(e,r,{value:{i:"O"+ ++u,w:{}}})},f=e.exports={KEY:r,NEED:!1,fastKey:function(e,t){if(!o(e))return"symbol"==typeof e?e:("string"==typeof e?"S":"P")+e;if(!i(e,r)){if(!s(e))return"F";if(!t)return"E";l(e)}return e[r].i},getWeak:function(e,t){if(!i(e,r)){if(!s(e))return!0;if(!t)return!1;l(e)}return e[r].w},onFreeze:function(e){return c&&f.NEED&&s(e)&&!i(e,r)&&l(e),e}}},function(e,t,n){var r=n(45),o=n(85),i=n(59);e.exports=function(e){var t=r(e),n=o.f;if(n)for(var a,u=n(e),s=i.f,c=0;u.length>c;)s.call(e,a=u[c++])&&t.push(a);return t}},function(e,t,n){var r=n(46);e.exports=Array.isArray||function(e){return"Array"==r(e)}},function(e,t,n){var r=n(37),o=n(132).f,i={}.toString,a="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];e.exports.f=function(e){return a&&"[object Window]"==i.call(e)?function(e){try{return o(e)}catch(e){return a.slice()}}(e):o(r(e))}},function(e,t,n){n(84)("asyncIterator")},function(e,t,n){n(84)("observable")},function(e,t,n){e.exports={default:n(267),__esModule:!0}},function(e,t,n){n(268),e.exports=n(10).Object.setPrototypeOf},function(e,t,n){var r=n(18);r(r.S,"Object",{setPrototypeOf:n(269).set})},function(e,t,n){var r=n(23),o=n(20),i=function(e,t){if(o(e),!r(t)&&null!==t)throw TypeError(t+": can't set as prototype!")};e.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(e,t,r){try{(r=n(34)(Function.call,n(133).f(Object.prototype,"__proto__").set,2))(e,[]),t=!(e instanceof Array)}catch(e){t=!0}return function(e,n){return i(e,n),t?e.__proto__=n:r(e,n),e}}({},!1):void 0),check:i}},function(e,t,n){e.exports={default:n(271),__esModule:!0}},function(e,t,n){n(272);var r=n(10).Object;e.exports=function(e,t){return r.create(e,t)}},function(e,t,n){var r=n(18);r(r.S,"Object",{create:n(80)})},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},i=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(60)),a=l(n(274)),u=l(n(281)),s=l(n(282)),c=l(n(283));function l(e){return e&&e.__esModule?e:{default:e}}var f=function(e){return o({},_("actions"),_("createSelectors")(e),{ConnectedRouter:_("createConnectedRouter")(e),connectRouter:_("createConnectRouter")(e),routerMiddleware:_("routerMiddleware")})};function d(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=_("createAll");var p=null;function h(){if(null===p){var e=d();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),p=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return p}function m(){var e=d();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function v(){var e=h(),t=m(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=d();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var y="__INTENTIONAL_UNDEFINED__",g={};function _(e){var t=v();if(void 0===t[e])return function(e){switch(e){case"actions":return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).filter(function(e){return"__get__"!==e&&"__set__"!==e&&"__reset__"!==e&&"__with__"!==e&&"__GetDependency__"!==e&&"__Rewire__"!==e&&"__ResetDependency__"!==e&&"__RewireAPI__"!==e}).reduce(function(t,n){return t[n]=e[n],t},{})}(i);case"createSelectors":return c.default;case"createConnectedRouter":return a.default;case"createConnectRouter":return u.default;case"routerMiddleware":return s.default;case"createAll":return f}return}(e);var n=t[e];return n===y?void 0:n}function b(e,t){var n=v();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?y:t,function(){w(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function w(e){var t=v();delete t[e],0==Object.keys(t).length&&delete m()[h]}function x(e){var t=v(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(g,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",_),e("__GetDependency__",_),e("__Rewire__",b),e("__set__",b),e("__reset__",w),e("__ResetDependency__",w),e("__with__",x)}();var E=void 0===f?"undefined":r(f);function O(e,t){Object.defineProperty(f,e,{value:t,enumerable:!1,configurable:!0})}"object"!==E&&"function"!==E||!Object.isExtensible(f)||(O("__get__",_),O("__GetDependency__",_),O("__Rewire__",b),O("__set__",b),O("__reset__",w),O("__ResetDependency__",w),O("__with__",x),O("__RewireAPI__",g)),t.__get__=_,t.__GetDependency__=_,t.__Rewire__=b,t.__set__=b,t.__ResetDependency__=w,t.__RewireAPI__=g}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),i=n(1),a=f(i),u=f(n(12)),s=n(2),c=n(86),l=n(60);function f(e){return e&&e.__esModule?e:{default:e}}var d=function(e){var t=e.getIn,n=e.toJS,r=function(e){function r(e,o){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,r);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,(r.__proto__||Object.getPrototypeOf(r)).call(this,e));i.inTimeTravelling=!1,i.unsubscribe=o.store.subscribe(function(){var r=n(t(o.store.getState(),["router","location"])),a=r.pathname,u=r.search,s=r.hash,c=e.history.location,l=c.pathname,f=c.search,d=c.hash;l===a&&f===u&&d===s||(i.inTimeTravelling=!0,e.history.push({pathname:a,search:u,hash:s}))});var a=function(t,n){i.inTimeTravelling?i.inTimeTravelling=!1:e.onLocationChanged(t,n)};return i.unlisten=e.history.listen(a),a(e.history.location,e.history.action),i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(r,b("Component")),o(r,[{key:"componentWillUnmount",value:function(){this.unlisten(),this.unsubscribe()}},{key:"render",value:function(){var e=this.props,t=e.history,n=e.children;return b("React").createElement(b("Router"),{history:t},n)}}]),r}();r.contextTypes={store:b("PropTypes").shape({getState:b("PropTypes").func.isRequired,subscribe:b("PropTypes").func.isRequired}).isRequired},r.propTypes={history:b("PropTypes").shape({action:b("PropTypes").string.isRequired,listen:b("PropTypes").func.isRequired,location:b("PropTypes").object.isRequired,push:b("PropTypes").func.isRequired}).isRequired,location:b("PropTypes").oneOfType([b("PropTypes").object,b("PropTypes").string]).isRequired,action:b("PropTypes").string.isRequired,basename:b("PropTypes").string,children:b("PropTypes").oneOfType([b("PropTypes").func,b("PropTypes").node]),onLocationChanged:b("PropTypes").func.isRequired};return b("connect")(function(e){return{action:t(e,["router","action"]),location:t(e,["router","location"])}},function(e){return{onLocationChanged:function(t,n){return e(b("onLocationChanged")(t,n))}}})(r)};function p(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=b("createConnectedRouter");var h=null;function m(){if(null===h){var e=p();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),h=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return h}function v(){var e=p();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function y(){var e=m(),t=v(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=p();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var g="__INTENTIONAL_UNDEFINED__",_={};function b(e){var t=y();if(void 0===t[e])return function(e){switch(e){case"Component":return i.Component;case"PropTypes":return u.default;case"onLocationChanged":return l.onLocationChanged;case"connect":return s.connect;case"createConnectedRouter":return d;case"React":return a.default;case"Router":return c.Router}return}(e);var n=t[e];return n===g?void 0:n}function w(e,t){var n=y();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?g:t,function(){x(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function x(e){var t=y();delete t[e],0==Object.keys(t).length&&delete v()[m]}function E(e){var t=y(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(_,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",b),e("__GetDependency__",b),e("__Rewire__",w),e("__set__",w),e("__reset__",x),e("__ResetDependency__",x),e("__with__",E)}();var O=void 0===d?"undefined":r(d);function S(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}"object"!==O&&"function"!==O||!Object.isExtensible(d)||(S("__get__",b),S("__GetDependency__",b),S("__Rewire__",w),S("__set__",w),S("__reset__",x),S("__ResetDependency__",x),S("__with__",E),S("__RewireAPI__",_)),t.__get__=b,t.__GetDependency__=b,t.__Rewire__=w,t.__set__=w,t.__ResetDependency__=x,t.__RewireAPI__=_}).call(t,n(3))},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(19),a=n.n(i),u=n(62),s=n(47),c=n(87),l=n(137),f="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},d=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},p=function(){try{return window.history.state||{}}catch(e){return{}}};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};a()(l.b,"Browser history needs a DOM");var t=window.history,n=Object(l.g)(),r=!Object(l.h)(),i=e.forceRefresh,h=void 0!==i&&i,m=e.getUserConfirmation,v=void 0===m?l.c:m,y=e.keyLength,g=void 0===y?6:y,_=e.basename?Object(s.g)(Object(s.a)(e.basename)):"",b=function(e){var t=e||{},n=t.key,r=t.state,i=window.location,a=i.pathname+i.search+i.hash;return o()(!_||Object(s.c)(a,_),'You are attempting to use a basename on a page whose URL path does not begin with the basename. Expected path "'+a+'" to begin with "'+_+'".'),_&&(a=Object(s.e)(a,_)),Object(u.a)(a,r,n)},w=function(){return Math.random().toString(36).substr(2,g)},x=Object(c.a)(),E=function(e){d(N,e),N.length=t.length,x.notifyListeners(N.location,N.action)},O=function(e){Object(l.d)(e)||k(b(e.state))},S=function(){k(b(p()))},T=!1,k=function(e){T?(T=!1,E()):x.confirmTransitionTo(e,"POP",v,function(t){t?E({action:"POP",location:e}):R(e)})},R=function(e){var t=N.location,n=P.indexOf(t.key);-1===n&&(n=0);var r=P.indexOf(e.key);-1===r&&(r=0);var o=n-r;o&&(T=!0,M(o))},j=b(p()),P=[j.key],C=function(e){return _+Object(s.b)(e)},M=function(e){t.go(e)},I=0,A=function(e){1===(I+=e)?(Object(l.a)(window,"popstate",O),r&&Object(l.a)(window,"hashchange",S)):0===I&&(Object(l.e)(window,"popstate",O),r&&Object(l.e)(window,"hashchange",S))},D=!1,N={length:t.length,action:"POP",location:j,createHref:C,push:function(e,r){o()(!("object"===(void 0===e?"undefined":f(e))&&void 0!==e.state&&void 0!==r),"You should avoid providing a 2nd state argument to push when the 1st argument is a location-like object that already has state; it is ignored");var i=Object(u.a)(e,r,w(),N.location);x.confirmTransitionTo(i,"PUSH",v,function(e){if(e){var r=C(i),a=i.key,u=i.state;if(n)if(t.pushState({key:a,state:u},null,r),h)window.location.href=r;else{var s=P.indexOf(N.location.key),c=P.slice(0,-1===s?0:s+1);c.push(i.key),P=c,E({action:"PUSH",location:i})}else o()(void 0===u,"Browser history cannot push state in browsers that do not support HTML5 history"),window.location.href=r}})},replace:function(e,r){o()(!("object"===(void 0===e?"undefined":f(e))&&void 0!==e.state&&void 0!==r),"You should avoid providing a 2nd state argument to replace when the 1st argument is a location-like object that already has state; it is ignored");var i=Object(u.a)(e,r,w(),N.location);x.confirmTransitionTo(i,"REPLACE",v,function(e){if(e){var r=C(i),a=i.key,u=i.state;if(n)if(t.replaceState({key:a,state:u},null,r),h)window.location.replace(r);else{var s=P.indexOf(N.location.key);-1!==s&&(P[s]=i.key),E({action:"REPLACE",location:i})}else o()(void 0===u,"Browser history cannot replace state in browsers that do not support HTML5 history"),window.location.replace(r)}})},go:M,goBack:function(){return M(-1)},goForward:function(){return M(1)},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=x.setPrompt(e);return D||(A(1),D=!0),function(){return D&&(D=!1,A(-1)),t()}},listen:function(e){var t=x.appendListener(e);return A(1),function(){A(-1),t()}}};return N}},function(e,t,n){"use strict";function r(e){return"/"===e.charAt(0)}function o(e,t){for(var n=t,r=n+1,o=e.length;r<o;n+=1,r+=1)e[n]=e[r];e.pop()}t.a=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=e&&e.split("/")||[],i=t&&t.split("/")||[],a=e&&r(e),u=t&&r(t),s=a||u;if(e&&r(e)?i=n:n.length&&(i.pop(),i=i.concat(n)),!i.length)return"/";var c=void 0;if(i.length){var l=i[i.length-1];c="."===l||".."===l||""===l}else c=!1;for(var f=0,d=i.length;d>=0;d--){var p=i[d];"."===p?o(i,d):".."===p?(o(i,d),f++):f&&(o(i,d),f--)}if(!s)for(;f--;f)i.unshift("..");!s||""===i[0]||i[0]&&r(i[0])||i.unshift("");var h=i.join("/");return c&&"/"!==h.substr(-1)&&(h+="/"),h}},function(e,t,n){"use strict";var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};t.a=function e(t,n){if(t===n)return!0;if(null==t||null==n)return!1;if(Array.isArray(t))return Array.isArray(n)&&t.length===n.length&&t.every(function(t,r){return e(t,n[r])});var o=void 0===t?"undefined":r(t);if(o!==(void 0===n?"undefined":r(n)))return!1;if("object"===o){var i=t.valueOf(),a=n.valueOf();if(i!==t||a!==n)return e(i,a);var u=Object.keys(t),s=Object.keys(n);return u.length===s.length&&u.every(function(r){return e(t[r],n[r])})}return!1}},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(19),a=n.n(i),u=n(62),s=n(47),c=n(87),l=n(137),f=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},d={hashbang:{encodePath:function(e){return"!"===e.charAt(0)?e:"!/"+Object(s.f)(e)},decodePath:function(e){return"!"===e.charAt(0)?e.substr(1):e}},noslash:{encodePath:s.f,decodePath:s.a},slash:{encodePath:s.a,decodePath:s.a}},p=function(){var e=window.location.href,t=e.indexOf("#");return-1===t?"":e.substring(t+1)},h=function(e){var t=window.location.href.indexOf("#");window.location.replace(window.location.href.slice(0,t>=0?t:0)+"#"+e)};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};a()(l.b,"Hash history needs a DOM");var t=window.history,n=Object(l.f)(),r=e.getUserConfirmation,i=void 0===r?l.c:r,m=e.hashType,v=void 0===m?"slash":m,y=e.basename?Object(s.g)(Object(s.a)(e.basename)):"",g=d[v],_=g.encodePath,b=g.decodePath,w=function(){var e=b(p());return o()(!y||Object(s.c)(e,y),'You are attempting to use a basename on a page whose URL path does not begin with the basename. Expected path "'+e+'" to begin with "'+y+'".'),y&&(e=Object(s.e)(e,y)),Object(u.a)(e)},x=Object(c.a)(),E=function(e){f(L,e),L.length=t.length,x.notifyListeners(L.location,L.action)},O=!1,S=null,T=function(){var e=p(),t=_(e);if(e!==t)h(t);else{var n=w(),r=L.location;if(!O&&Object(u.b)(r,n))return;if(S===Object(s.b)(n))return;S=null,k(n)}},k=function(e){O?(O=!1,E()):x.confirmTransitionTo(e,"POP",i,function(t){t?E({action:"POP",location:e}):R(e)})},R=function(e){var t=L.location,n=M.lastIndexOf(Object(s.b)(t));-1===n&&(n=0);var r=M.lastIndexOf(Object(s.b)(e));-1===r&&(r=0);var o=n-r;o&&(O=!0,I(o))},j=p(),P=_(j);j!==P&&h(P);var C=w(),M=[Object(s.b)(C)],I=function(e){o()(n,"Hash history go(n) causes a full page reload in this browser"),t.go(e)},A=0,D=function(e){1===(A+=e)?Object(l.a)(window,"hashchange",T):0===A&&Object(l.e)(window,"hashchange",T)},N=!1,L={length:t.length,action:"POP",location:C,createHref:function(e){return"#"+_(y+Object(s.b)(e))},push:function(e,t){o()(void 0===t,"Hash history cannot push state; it is ignored");var n=Object(u.a)(e,void 0,void 0,L.location);x.confirmTransitionTo(n,"PUSH",i,function(e){if(e){var t=Object(s.b)(n),r=_(y+t);if(p()!==r){S=t,function(e){window.location.hash=e}(r);var i=M.lastIndexOf(Object(s.b)(L.location)),a=M.slice(0,-1===i?0:i+1);a.push(t),M=a,E({action:"PUSH",location:n})}else o()(!1,"Hash history cannot PUSH the same path; a new entry will not be added to the history stack"),E()}})},replace:function(e,t){o()(void 0===t,"Hash history cannot replace state; it is ignored");var n=Object(u.a)(e,void 0,void 0,L.location);x.confirmTransitionTo(n,"REPLACE",i,function(e){if(e){var t=Object(s.b)(n),r=_(y+t);p()!==r&&(S=t,h(r));var o=M.indexOf(Object(s.b)(L.location));-1!==o&&(M[o]=t),E({action:"REPLACE",location:n})}})},go:I,goBack:function(){return I(-1)},goForward:function(){return I(1)},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=x.setPrompt(e);return N||(D(1),N=!0),function(){return N&&(N=!1,D(-1)),t()}},listen:function(e){var t=x.appendListener(e);return D(1),function(){D(-1),t()}}};return L}},function(e,t,n){"use strict";var r=n(61),o=n.n(r),i=n(47),a=n(62),u=n(87),s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},l=function(e,t,n){return Math.min(Math.max(e,t),n)};t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.getUserConfirmation,n=e.initialEntries,r=void 0===n?["/"]:n,f=e.initialIndex,d=void 0===f?0:f,p=e.keyLength,h=void 0===p?6:p,m=Object(u.a)(),v=function(e){c(x,e),x.length=x.entries.length,m.notifyListeners(x.location,x.action)},y=function(){return Math.random().toString(36).substr(2,h)},g=l(d,0,r.length-1),_=r.map(function(e){return"string"==typeof e?Object(a.a)(e,void 0,y()):Object(a.a)(e,void 0,e.key||y())}),b=i.b,w=function(e){var n=l(x.index+e,0,x.entries.length-1),r=x.entries[n];m.confirmTransitionTo(r,"POP",t,function(e){e?v({action:"POP",location:r,index:n}):v()})},x={length:_.length,action:"POP",location:_[g],index:g,entries:_,createHref:b,push:function(e,n){o()(!("object"===(void 0===e?"undefined":s(e))&&void 0!==e.state&&void 0!==n),"You should avoid providing a 2nd state argument to push when the 1st argument is a location-like object that already has state; it is ignored");var r=Object(a.a)(e,n,y(),x.location);m.confirmTransitionTo(r,"PUSH",t,function(e){if(e){var t=x.index+1,n=x.entries.slice(0);n.length>t?n.splice(t,n.length-t,r):n.push(r),v({action:"PUSH",location:r,index:t,entries:n})}})},replace:function(e,n){o()(!("object"===(void 0===e?"undefined":s(e))&&void 0!==e.state&&void 0!==n),"You should avoid providing a 2nd state argument to replace when the 1st argument is a location-like object that already has state; it is ignored");var r=Object(a.a)(e,n,y(),x.location);m.confirmTransitionTo(r,"REPLACE",t,function(e){e&&(x.entries[x.index]=r,v({action:"REPLACE",location:r}))})},go:w,goBack:function(){return w(-1)},goForward:function(){return w(1)},canGo:function(e){var t=x.index+e;return t>=0&&t<x.entries.length},block:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return m.setPrompt(e)},listen:function(e){return m.appendListener(e)}};return x}},function(e,t){e.exports=Array.isArray||function(e){return"[object Array]"==Object.prototype.toString.call(e)}},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(60),i=function(e){var t=e.filterNotRouter,n=e.fromJS,r=e.getIn,o=e.merge,i=e.setIn;return function(e){var a=n({location:e.location,action:e.action});return function(e){return function(n,u){var s=a;n&&(s=r(n,["router"])||s,n=t(n));var c=e(n,u);return i(c,["router"],function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.type,r=t.payload;return n===p("LOCATION_CHANGE")?o(e,r):e}(s,u))}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("createConnectRouter");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"LOCATION_CHANGE":return o.LOCATION_CHANGE;case"createConnectRouter":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(60);var i=function(e){return function(t){return function(t){return function(n){if(n.type!==p("CALL_HISTORY_METHOD"))return t(n);var r=n.payload,o=r.method,i=r.args;e[o].apply(e,function(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t<e.length;t++)n[t]=e[t];return n}return Array.from(e)}(i))}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("routerMiddleware");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"CALL_HISTORY_METHOD":return o.CALL_HISTORY_METHOD;case"routerMiddleware":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=n(86),i=function(e){var t=e.getIn,n=e.toJS,r=function(e){return n(t(e,["router","location"]))};return{getLocation:r,getAction:function(e){return n(t(e,["router","action"]))},createMatchSelector:function(e){var t=null,n=null;return function(o){var i=(r(o)||{}).pathname;if(i===t)return n;t=i;var a=p("matchPath")(i,e);return a&&n&&a.url===n.url||(n=a),n}}}};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("createSelectors");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"matchPath":return o.matchPath;case"createSelectors":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var n=l();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":r(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0}),t.__RewireAPI__=t.__ResetDependency__=t.__set__=t.__Rewire__=t.__GetDependency__=t.__get__=void 0;var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},o=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},i=u(n(285)),a=u(n(286));function u(e){return e&&e.__esModule?e:{default:e}}var s={filterNotRouter:function(e){e.router;return function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["router"])},fromJS:function(e){return e},getIn:v("getIn"),merge:function(e,t){return o({},e,t)},setIn:v("setIn"),toJS:function(e){return e}};function c(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=v("structure");var l=null;function f(){if(null===l){var e=c();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),l=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return l}function d(){var e=c();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function p(){var e=f(),t=d(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=c();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var h="__INTENTIONAL_UNDEFINED__",m={};function v(e){var t=p();if(void 0===t[e])return function(e){switch(e){case"getIn":return i.default;case"setIn":return a.default;case"structure":return s}return}(e);var n=t[e];return n===h?void 0:n}function y(e,t){var n=p();if("object"!==(void 0===e?"undefined":r(e)))return n[e]=void 0===t?h:t,function(){g(e)};Object.keys(e).forEach(function(t){n[t]=e[t]})}function g(e){var t=p();delete t[e],0==Object.keys(t).length&&delete d()[f]}function _(e){var t=p(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(m,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",v),e("__GetDependency__",v),e("__Rewire__",y),e("__set__",y),e("__reset__",g),e("__ResetDependency__",g),e("__with__",_)}();var b=void 0===s?"undefined":r(s);function w(e,t){Object.defineProperty(s,e,{value:t,enumerable:!1,configurable:!0})}"object"!==b&&"function"!==b||!Object.isExtensible(s)||(w("__get__",v),w("__GetDependency__",v),w("__Rewire__",y),w("__set__",y),w("__reset__",g),w("__ResetDependency__",g),w("__with__",_),w("__RewireAPI__",m)),t.__get__=v,t.__GetDependency__=v,t.__Rewire__=y,t.__set__=y,t.__ResetDependency__=g,t.__RewireAPI__=m}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=function(e,t){if(!e)return e;var n=t.length;if(n){for(var r=e,o=0;o<n&&r;++o)r=r[t[o]];return r}};function o(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=f("getIn");var i=null;function a(){if(null===i){var e=o();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),i=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return i}function u(){var e=o();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function s(){var e=a(),t=u(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=o();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var c="__INTENTIONAL_UNDEFINED__",l={};function f(e){var t=s();if(void 0===t[e])return function(e){switch(e){case"getIn":return r}return}(e);var n=t[e];return n===c?void 0:n}function d(e,t){var r=s();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?c:t,function(){p(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function p(e){var t=s();delete t[e],0==Object.keys(t).length&&delete u()[a]}function h(e){var t=s(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(l,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",f),e("__GetDependency__",f),e("__Rewire__",d),e("__set__",d),e("__reset__",p),e("__ResetDependency__",p),e("__with__",h)}();var m=void 0===r?"undefined":n(r);function v(e,t){Object.defineProperty(r,e,{value:t,enumerable:!1,configurable:!0})}"object"!==m&&"function"!==m||!Object.isExtensible(r)||(v("__get__",f),v("__GetDependency__",f),v("__Rewire__",d),v("__set__",d),v("__reset__",p),v("__ResetDependency__",p),v("__with__",h),v("__RewireAPI__",l)),t.__get__=f,t.__GetDependency__=f,t.__Rewire__=d,t.__set__=d,t.__ResetDependency__=p,t.__RewireAPI__=l}).call(t,n(3))},function(e,t,n){"use strict";(function(e){Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};var o=function(e,t,n,o){if(o>=n.length)return t;var i=n[o],a=p("setInWithPath")(e&&e[i],t,n,o+1);if(!e){var u=isNaN(i)?{}:[];return u[i]=a,u}if(Array.isArray(e)){var s=[].concat(e);return s[i]=a,s}return r({},e,function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}({},i,a))},i=function(e,t,n){return p("setInWithPath")(e,n,t,0)};function a(){try{if(e)return e}catch(e){try{if(window)return window}catch(e){return this}}}t.default=p("setIn");var u=null;function s(){if(null===u){var e=a();e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__||(e.__$$GLOBAL_REWIRE_NEXT_MODULE_ID__=0),u=__$$GLOBAL_REWIRE_NEXT_MODULE_ID__++}return u}function c(){var e=a();return e.__$$GLOBAL_REWIRE_REGISTRY__||(e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)),__$$GLOBAL_REWIRE_REGISTRY__}function l(){var e=s(),t=c(),n=t[e];return n||(t[e]=Object.create(null),n=t[e]),n}!function(){var e=a();e.__rewire_reset_all__||(e.__rewire_reset_all__=function(){e.__$$GLOBAL_REWIRE_REGISTRY__=Object.create(null)})}();var f="__INTENTIONAL_UNDEFINED__",d={};function p(e){var t=l();if(void 0===t[e])return function(e){switch(e){case"setInWithPath":return o;case"setIn":return i}return}(e);var n=t[e];return n===f?void 0:n}function h(e,t){var r=l();if("object"!==(void 0===e?"undefined":n(e)))return r[e]=void 0===t?f:t,function(){m(e)};Object.keys(e).forEach(function(t){r[t]=e[t]})}function m(e){var t=l();delete t[e],0==Object.keys(t).length&&delete c()[s]}function v(e){var t=l(),n=Object.keys(e),r={};function o(){n.forEach(function(e){t[e]=r[e]})}return function(i){n.forEach(function(n){r[n]=t[n],t[n]=e[n]});var a=i();return a&&"function"==typeof a.then?a.then(o).catch(o):o(),a}}!function(){function e(e,t){Object.defineProperty(d,e,{value:t,enumerable:!1,configurable:!0})}e("__get__",p),e("__GetDependency__",p),e("__Rewire__",h),e("__set__",h),e("__reset__",m),e("__ResetDependency__",m),e("__with__",v)}();var y=void 0===i?"undefined":n(i);function g(e,t){Object.defineProperty(i,e,{value:t,enumerable:!1,configurable:!0})}"object"!==y&&"function"!==y||!Object.isExtensible(i)||(g("__get__",p),g("__GetDependency__",p),g("__Rewire__",h),g("__set__",h),g("__reset__",m),g("__ResetDependency__",m),g("__with__",v),g("__RewireAPI__",d)),t.__get__=p,t.__GetDependency__=p,t.__Rewire__=h,t.__set__=h,t.__ResetDependency__=m,t.__RewireAPI__=d}).call(t,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(e){return e&&e.__esModule?e:{default:e}}(n(90)),o=(n(144),n(292));n(315);t.default=(0,o.connect)(function(e){return{auth:e.auth,isAuthenticated:e.auth.isAuthenticated}},function(e){return{}})(function(e){return e.isAuthenticated||"0.0.0.0"===window.location.hostname?function(e){return r.default.createElement("header",{className:"navbar"},r.default.createElement("section",{className:"navbar-section first-navbar-section"},r.default.createElement("a",{href:"/"},r.default.createElement("img",{className:"menuToggle",alt:"logo",src:"/static/vframe-logo.png"})),r.default.createElement("a",{href:"/",className:"vcat-btn"},r.default.createElement("b",null,"VCAT")),r.default.createElement("a",{href:"/categories/"},"Categories"),r.default.createElement("a",{href:"/images/new/"},"Upload"),r.default.createElement("a",{href:"/search/"},"Search")),r.default.createElement("section",{className:"navbar-section last-navbar-section"},r.default.createElement("a",{href:"/stats/hierarchy.html",className:""},"Stats"),r.default.createElement("a",{href:"/help/"},"Help"),r.default.createElement("span",{className:"login-btn logged-in capitalize"},e.auth.user.username,r.default.createElement("a",{href:"/accounts/logout/"},"Logout")),r.default.createElement("a",{href:"/groups/user/"},"My Assignments")))}(e):r.default.createElement("header",{className:"navbar"},r.default.createElement("section",{className:"navbar-section"},r.default.createElement("a",{href:"/"},r.default.createElement("img",{className:"menuToggle",alt:"logo",src:"/static/vframe-logo.png"})),r.default.createElement("a",{href:"/",className:"vcat-btn"},r.default.createElement("b",null,"VCAT"))),r.default.createElement("section",{className:"navbar-section last-navbar-section"},r.default.createElement("span",{className:""},r.default.createElement("a",{href:"/accounts/login",className:""},"Login"))))})},function(e,t,n){"use strict";
+/** @license React v16.5.2
+ * react.production.min.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */var r=n(289),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,u=o?Symbol.for("react.fragment"):60107,s=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,l=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,d=o?Symbol.for("react.async_mode"):60111,p=o?Symbol.for("react.forward_ref"):60112;o&&Symbol.for("react.placeholder");var h="function"==typeof Symbol&&Symbol.iterator;function m(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,u){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,r,o,i,a,u],c=0;(e=Error(t.replace(/%s/g,function(){return s[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}var v={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},y={};function g(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}function _(){}function b(e,t,n){this.props=e,this.context=t,this.refs=y,this.updater=n||v}g.prototype.isReactComponent={},g.prototype.setState=function(e,t){"object"!=typeof e&&"function"!=typeof e&&null!=e&&m("85"),this.updater.enqueueSetState(this,e,t,"setState")},g.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},_.prototype=g.prototype;var w=b.prototype=new _;w.constructor=b,r(w,g.prototype),w.isPureReactComponent=!0;var x={current:null,currentDispatcher:null},E=Object.prototype.hasOwnProperty,O={key:!0,ref:!0,__self:!0,__source:!0};function S(e,t,n){var r=void 0,o={},a=null,u=null;if(null!=t)for(r in void 0!==t.ref&&(u=t.ref),void 0!==t.key&&(a=""+t.key),t)E.call(t,r)&&!O.hasOwnProperty(r)&&(o[r]=t[r]);var s=arguments.length-2;if(1===s)o.children=n;else if(1<s){for(var c=Array(s),l=0;l<s;l++)c[l]=arguments[l+2];o.children=c}if(e&&e.defaultProps)for(r in s=e.defaultProps)void 0===o[r]&&(o[r]=s[r]);return{$$typeof:i,type:e,key:a,ref:u,props:o,_owner:x.current}}function T(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var k=/\/+/g,R=[];function j(e,t,n,r){if(R.length){var o=R.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function P(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>R.length&&R.push(e)}function C(e,t,n){return null==e?0:function e(t,n,r,o){var u=typeof t;"undefined"!==u&&"boolean"!==u||(t=null);var s=!1;if(null===t)s=!0;else switch(u){case"string":case"number":s=!0;break;case"object":switch(t.$$typeof){case i:case a:s=!0}}if(s)return r(o,t,""===n?"."+M(t,0):n),1;if(s=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var l=n+M(u=t[c],c);s+=e(u,l,r,o)}else if(l=null===t||"object"!=typeof t?null:"function"==typeof(l=h&&t[h]||t["@@iterator"])?l:null,"function"==typeof l)for(t=l.call(t),c=0;!(u=t.next()).done;)s+=e(u=u.value,l=n+M(u,c++),r,o);else"object"===u&&m("31","[object Object]"==(r=""+t)?"object with keys {"+Object.keys(t).join(", ")+"}":r,"");return s}(e,"",t,n)}function M(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,function(e){return t[e]})}(e.key):t.toString(36)}function I(e,t){e.func.call(e.context,t,e.count++)}function A(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?D(e,r,n,function(e){return e}):null!=e&&(T(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(k,"$&/")+"/")+n)),r.push(e))}function D(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(k,"$&/")+"/"),C(e,A,t=j(t,i,r,o)),P(t)}var N={Children:{map:function(e,t,n){if(null==e)return e;var r=[];return D(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;C(e,I,t=j(null,null,t,n)),P(t)},count:function(e){return C(e,function(){return null},null)},toArray:function(e){var t=[];return D(e,t,null,function(e){return e}),t},only:function(e){return T(e)||m("143"),e}},createRef:function(){return{current:null}},Component:g,PureComponent:b,createContext:function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,Provider:null,Consumer:null,unstable_read:null}).Provider={$$typeof:l,_context:e},e.Consumer=e,e.unstable_read=function(e,t){var n=x.currentDispatcher;return null===n&&m("277"),n.readContext(e,t)}.bind(null,e),e},forwardRef:function(e){return{$$typeof:p,render:e}},Fragment:u,StrictMode:s,unstable_AsyncMode:d,unstable_Profiler:c,createElement:S,cloneElement:function(e,t,n){(null===e||void 0===e)&&m("267",e);var o=void 0,a=r({},e.props),u=e.key,s=e.ref,c=e._owner;if(null!=t){void 0!==t.ref&&(s=t.ref,c=x.current),void 0!==t.key&&(u=""+t.key);var l=void 0;for(o in e.type&&e.type.defaultProps&&(l=e.type.defaultProps),t)E.call(t,o)&&!O.hasOwnProperty(o)&&(a[o]=void 0===t[o]&&void 0!==l?l[o]:t[o])}if(1===(o=arguments.length-2))a.children=n;else if(1<o){l=Array(o);for(var f=0;f<o;f++)l[f]=arguments[f+2];a.children=l}return{$$typeof:i,type:e.type,key:u,ref:s,props:a,_owner:c}},createFactory:function(e){var t=S.bind(null,e);return t.type=e,t},isValidElement:T,version:"16.5.2",__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{ReactCurrentOwner:x,assign:r}},L={default:N},U=L&&N||L;e.exports=U.default||U},function(e,t,n){"use strict";
+/*
+object-assign
+(c) Sindre Sorhus
+@license MIT
+*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,a,u=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),s=1;s<arguments.length;s++){for(var c in n=Object(arguments[s]))o.call(n,c)&&(u[c]=n[c]);if(r){a=r(n);for(var l=0;l<a.length;l++)i.call(n,a[l])&&(u[a[l]]=n[a[l]])}}return u}},function(e,t,n){"use strict";(function(e,r){var o,i=n(291);o="undefined"!=typeof self?self:"undefined"!=typeof window?window:void 0!==e?e:r;var a=Object(i.a)(o);t.a=a}).call(t,n(3),n(119)(e))},function(e,t,n){"use strict";t.a=function(e){var t,n=e.Symbol;"function"==typeof n?n.observable?t=n.observable:(t=n("observable"),n.observable=t):t="@@observable";return t}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(293),o=n(147),i=n(299);n.d(t,"Provider",function(){return r.b}),n.d(t,"createProvider",function(){return r.a}),n.d(t,"connectAdvanced",function(){return o.a}),n.d(t,"connect",function(){return i.a})},function(e,t,n){"use strict";t.a=u;var r=n(90),o=(n.n(r),n(145)),i=n.n(o),a=n(146);n(91);function u(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"store",n=arguments[1]||t+"Subscription",o=function(e){function o(n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,o);var i=function(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}(this,e.call(this,n,r));return i[t]=n.store,i}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(o,e),o.prototype.getChildContext=function(){var e;return(e={})[t]=this[t],e[n]=null,e},o.prototype.render=function(){return r.Children.only(this.props.children)},o}(r.Component);return o.propTypes={store:a.a.isRequired,children:i.a.element.isRequired},o.childContextTypes=((e={})[t]=a.a.isRequired,e[n]=a.b,e),o}t.b=u()},function(e,t,n){"use strict";var r=n(295);function o(){}e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var u=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw u.name="Invariant Violation",u}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t};return n.checkPropTypes=o,n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){"use strict";var r={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i=Object.defineProperty,a=Object.getOwnPropertyNames,u=Object.getOwnPropertySymbols,s=Object.getOwnPropertyDescriptor,c=Object.getPrototypeOf,l=c&&c(Object);e.exports=function e(t,n,f){if("string"!=typeof n){if(l){var d=c(n);d&&d!==l&&e(t,d,f)}var p=a(n);u&&(p=p.concat(u(n)));for(var h=0;h<p.length;++h){var m=p[h];if(!(r[m]||o[m]||f&&f[m])){var v=s(n,m);try{i(t,m,v)}catch(e){}}}return t}return t}},function(e,t,n){"use strict";e.exports=function(e,t,n,r,o,i,a,u){if(!e){var s;if(void 0===t)s=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,o,i,a,u],l=0;(s=new Error(t.replace(/%s/g,function(){return c[l++]}))).name="Invariant Violation"}throw s.framesToPop=1,s}}},function(e,t,n){"use strict";n.d(t,"a",function(){return i});var r=null,o={notify:function(){}};var i=function(){function e(t,n,r){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.store=t,this.parentSub=n,this.onStateChange=r,this.unsubscribe=null,this.listeners=o}return e.prototype.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},e.prototype.notifyNestedSubs=function(){this.listeners.notify()},e.prototype.isSubscribed=function(){return Boolean(this.unsubscribe)},e.prototype.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.onStateChange):this.store.subscribe(this.onStateChange),this.listeners=function(){var e=[],t=[];return{clear:function(){t=r,e=r},notify:function(){for(var n=e=t,r=0;r<n.length;r++)n[r]()},get:function(){return t},subscribe:function(n){var o=!0;return t===e&&(t=e.slice()),t.push(n),function(){o&&e!==r&&(o=!1,t===e&&(t=e.slice()),t.splice(t.indexOf(n),1))}}}}())},e.prototype.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=o)},e}()},function(e,t,n){"use strict";var r=n(147),o=n(300),i=n(301),a=n(311),u=n(312),s=n(313),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function l(e,t,n){for(var r=t.length-1;r>=0;r--){var o=t[r](e);if(o)return o}return function(t,r){throw new Error("Invalid value of type "+typeof e+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function f(e,t){return e===t}t.a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.connectHOC,n=void 0===t?r.a:t,d=e.mapStateToPropsFactories,p=void 0===d?a.a:d,h=e.mapDispatchToPropsFactories,m=void 0===h?i.a:h,v=e.mergePropsFactories,y=void 0===v?u.a:v,g=e.selectorFactory,_=void 0===g?s.a:g;return function(e,t,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},a=i.pure,u=void 0===a||a,s=i.areStatesEqual,d=void 0===s?f:s,h=i.areOwnPropsEqual,v=void 0===h?o.a:h,g=i.areStatePropsEqual,b=void 0===g?o.a:g,w=i.areMergedPropsEqual,x=void 0===w?o.a:w,E=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),O=l(e,p,"mapStateToProps"),S=l(t,m,"mapDispatchToProps"),T=l(r,y,"mergeProps");return n(_,c({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:O,initMapDispatchToProps:S,initMergeProps:T,pure:u,areStatesEqual:d,areOwnPropsEqual:v,areStatePropsEqual:b,areMergedPropsEqual:x},E))}}()},function(e,t,n){"use strict";t.a=function(e,t){if(o(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),i=Object.keys(t);if(n.length!==i.length)return!1;for(var a=0;a<n.length;a++)if(!r.call(t,n[a])||!o(e[n[a]],t[n[a]]))return!1;return!0};var r=Object.prototype.hasOwnProperty;function o(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}},function(e,t,n){"use strict";var r=n(144),o=n(148);t.a=[function(e){return"function"==typeof e?Object(o.b)(e,"mapDispatchToProps"):void 0},function(e){return e?void 0:Object(o.a)(function(e){return{dispatch:e}})},function(e){return e&&"object"==typeof e?Object(o.a)(function(t){return Object(r.bindActionCreators)(e,t)}):void 0}]},function(e,t,n){"use strict";var r=n(303),o=n(308),i=n(310),a="[object Object]",u=Function.prototype,s=Object.prototype,c=u.toString,l=s.hasOwnProperty,f=c.call(Object);t.a=function(e){if(!Object(i.a)(e)||Object(r.a)(e)!=a)return!1;var t=Object(o.a)(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&c.call(n)==f}},function(e,t,n){"use strict";var r=n(150),o=n(306),i=n(307),a="[object Null]",u="[object Undefined]",s=r.a?r.a.toStringTag:void 0;t.a=function(e){return null==e?void 0===e?u:a:s&&s in Object(e)?Object(o.a)(e):Object(i.a)(e)}},function(e,t,n){"use strict";var r=n(305),o="object"==typeof self&&self&&self.Object===Object&&self,i=r.a||o||Function("return this")();t.a=i},function(e,t,n){"use strict";(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.a=n}).call(t,n(3))},function(e,t,n){"use strict";var r=n(150),o=Object.prototype,i=o.hasOwnProperty,a=o.toString,u=r.a?r.a.toStringTag:void 0;t.a=function(e){var t=i.call(e,u),n=e[u];try{e[u]=void 0;var r=!0}catch(e){}var o=a.call(e);return r&&(t?e[u]=n:delete e[u]),o}},function(e,t,n){"use strict";var r=Object.prototype.toString;t.a=function(e){return r.call(e)}},function(e,t,n){"use strict";var r=n(309),o=Object(r.a)(Object.getPrototypeOf,Object);t.a=o},function(e,t,n){"use strict";t.a=function(e,t){return function(n){return e(t(n))}}},function(e,t,n){"use strict";t.a=function(e){return null!=e&&"object"==typeof e}},function(e,t,n){"use strict";var r=n(148);t.a=[function(e){return"function"==typeof e?Object(r.b)(e,"mapStateToProps"):void 0},function(e){return e?void 0:Object(r.a)(function(){return{}})}]},function(e,t,n){"use strict";n(149);var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function o(e,t,n){return r({},n,e,t)}t.a=[function(e){return"function"==typeof e?function(e){return function(t,n){n.displayName;var r=n.pure,o=n.areMergedPropsEqual,i=!1,a=void 0;return function(t,n,u){var s=e(t,n,u);return i?r&&o(s,a)||(a=s):(i=!0,a=s),a}}}(e):void 0},function(e){return e?void 0:function(){return o}}]},function(e,t,n){"use strict";t.a=function(e,t){var n=t.initMapStateToProps,i=t.initMapDispatchToProps,a=t.initMergeProps,u=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(t,["initMapStateToProps","initMapDispatchToProps","initMergeProps"]),s=n(e,u),c=i(e,u),l=a(e,u);0;return(u.pure?o:r)(s,c,l,e,u)};n(314);function r(e,t,n,r){return function(o,i){return n(e(o,i),t(r,i),i)}}function o(e,t,n,r,o){var i=o.areStatesEqual,a=o.areOwnPropsEqual,u=o.areStatePropsEqual,s=!1,c=void 0,l=void 0,f=void 0,d=void 0,p=void 0;function h(o,s){var h=!a(s,l),m=!i(o,c);return c=o,l=s,h&&m?(f=e(c,l),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):h?(e.dependsOnOwnProps&&(f=e(c,l)),t.dependsOnOwnProps&&(d=t(r,l)),p=n(f,d,l)):m?function(){var t=e(c,l),r=!u(t,f);return f=t,r&&(p=n(f,d,l)),p}():p}return function(o,i){return s?h(o,i):function(o,i){return f=e(c=o,l=i),d=t(r,l),p=n(f,d,l),s=!0,p}(o,i)}}},function(e,t,n){"use strict";n(91)},function(e,t,n){var r=n(316);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".navbar {\n height: 50px;\n color:#aaa;\n font-size: 14px;\n}\n.navbar b {\n font-weight: 900;\n}\n.navbar {\n background: #11F;\n color: white;\n font-family: 'Helvetica', sans-serif;\n}\n.navbar a {\n color: rgba(255,255,255,0.89);\n text-decoration: none;\n line-height: 18px;\n font-size: 14px;\n font-weight: 500;\n}\n.navbar section.first-navbar-section * {\n font-weight: bold;\n}\n.navbar section.first-navbar-section > * {\n padding: 5px 8px 5px 8px;\n}\n.navbar section > * {\n padding: 5px 5px;\n}\n.navbar section.first-navbar-section > .vcat-btn {\n font-size: 16px;\n padding-left: 0;\n}\n.navbar .btn-link:focus,\n.navbar .btn-link:hover,\n.navbar .btn-link:active,\n.navbar a:focus,\n.navbar a:hover,\n.navbar a:active {\n text-decoration: none;\n color: white;\n}\n.menubar a:focus,\n.menubar a:hover,\n.menubar a:active {\n color: white;\n}\n.menuToggle {\n width: 26px;\n height: 26px;\n cursor: pointer;\n margin: 0 0 0 5px;\n line-height: 1;\n}\n.navbar a.navbar-brand {\n font-size: .8rem;\n}\n\n.navbar .last-navbar-section {\n padding-right: 8px;\n}\n.navbar .logout {\n padding: 0 .25rem;\n}\n.navbar .logged-in {\n border-left: 1px solid #99f;\n margin-left: .25rem;\n padding: .25rem .25rem .25rem .75rem;\n user-select: none;\n text-transform: capitalize;\n color: rgba(255,255,255,0.89);\n position: relative;\n min-width: 65px;\n}\n.navbar .logged-in a {\n position: absolute;\n top: 0; left: 0;\n width: 100%;\n height: 100%;\n display: flex;\n justify-content: center;\n align-items: center;\n background: #11f;\n opacity: 0;\n transition: 0.1s all;\n}\n.navbar .logged-in:hover a {\n opacity: 1;\n}\n",""])},function(e,t){e.exports=function(e){var t="undefined"!=typeof window&&window.location;if(!t)throw new Error("fixUrls requires window.location");if(!e||"string"!=typeof e)return e;var n=t.protocol+"//"+t.host,r=n+t.pathname.replace(/\/[^\/]*$/,"/");return e.replace(/url\s*\(((?:[^)(]|\((?:[^)(]+|\([^)(]*\))*\))*)\)/gi,function(e,t){var o,i=t.trim().replace(/^"(.*)"$/,function(e,t){return t}).replace(/^'(.*)'$/,function(e,t){return t});return/^(#|data:|http:\/\/|https:\/\/|file:\/\/\/|\s*$)/i.test(i)?e:(o=0===i.indexOf("//")?i:0===i.indexOf("/")?n+i:r+i.replace(/^\.\//,""),"url("+JSON.stringify(o)+")")})}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.to,n=e.className,i=void 0===n?"navlink":n,a=e.children;return r.default.createElement("span",{className:i},r.default.createElement(o.NavLink,{to:t},a))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(16)},function(e,t,n){"use strict";var r=n(151),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(92);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createBrowserHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<BrowserRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { BrowserRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={basename:s.a.string,forceRefresh:s.a.bool,getUserConfirmation:s.a.func,keyLength:s.a.number,children:s.a.node},t.a=d},function(e,t,n){"use strict";var r=n(151),o=n.n(r),i=n(1),a=n.n(i),u=n(12),s=n.n(u),c=n(27),l=n(92);function f(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}var d=function(e){function t(){var n,r;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=f(this,e.call.apply(e,[this].concat(i))),r.history=Object(c.createHashHistory)(r.props),f(r,n)}return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}(t,e),t.prototype.componentWillMount=function(){o()(!this.props.history,"<HashRouter> ignores the history prop. To use a custom history, use `import { Router }` instead of `import { HashRouter as Router }`.")},t.prototype.render=function(){return a.a.createElement(l.a,{history:this.history,children:this.props.children})},t}(a.a.Component);d.propTypes={basename:s.a.string,getUserConfirmation:s.a.func,hashType:s.a.oneOf(["hashbang","noslash","slash"]),children:s.a.node},t.a=d},function(e,t,n){"use strict";var r=n(136);t.a=r.a},function(e,t,n){"use strict";var r=n(1),o=n.n(r),i=n(12),a=n.n(i),u=n(153),s=n(152),c=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};var f=function(e){var t=e.to,n=e.exact,r=e.strict,i=e.location,a=e.activeClassName,f=e.className,d=e.activeStyle,p=e.style,h=e.isActive,m=e["aria-current"],v=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["to","exact","strict","location","activeClassName","className","activeStyle","style","isActive","aria-current"]),y="object"===(void 0===t?"undefined":l(t))?t.pathname:t,g=y&&y.replace(/([.+*?=^!:${}()[\]|/\\])/g,"\\$1");return o.a.createElement(u.a,{path:g,exact:n,strict:r,location:i,children:function(e){var n=e.location,r=e.match,i=!!(h?h(r,n):r);return o.a.createElement(s.a,c({to:t,className:i?[f,a].filter(function(e){return e}).join(" "):f,style:i?c({},p,d):p,"aria-current":i&&m||null},v))}})};f.propTypes={to:s.a.propTypes.to,exact:a.a.bool,strict:a.a.bool,location:a.a.object,activeClassName:a.a.string,className:a.a.string,activeStyle:a.a.object,style:a.a.object,isActive:a.a.func,"aria-current":a.a.oneOf(["page","step","location","date","time","true"])},f.defaultProps={activeClassName:"active","aria-current":"page"},t.a=f},function(e,t,n){"use strict";var r=n(138);t.a=r.a},function(e,t,n){"use strict";var r=n(139);t.a=r.a},function(e,t,n){"use strict";var r=n(141);t.a=r.a},function(e,t,n){"use strict";var r=n(142);t.a=r.a},function(e,t,n){"use strict";var r=n(88);t.a=r.a},function(e,t,n){"use strict";var r=n(64);t.a=r.a},function(e,t,n){"use strict";var r=n(143);t.a=r.a},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(28)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(17),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.tag,n=e.sha256,o=e.verified,i=e.keyframes,a=void 0===i?{}:i,u=e.labels,s=e.summary,c=e.aspectRatio,p=void 0===c?1.777:c,h=e.showAll,m=0,v=(0,r.default)(a).map(function(e){return parseInt(e,10)}),y=v.sort(function(e,t){return e-t}).map(function(e){var t=a[e];return t.length||h?(m+=t.length,{frame:e,detections:t}):null}).filter(function(e){return!!e}),g=y.reduce(function(e,t){return t.detections.reduce(function(e,t){var n=t.idx;return n in e||(e[n]=[u[n],0]),e[n][1]+=1,e},e),e},{}),_=(0,r.default)(g).map(function(e){return g[e]}).sort(function(e,t){return t[1]-e[1]});return s?l.default.createElement("div",null,l.default.createElement("h3",null,t," Detections"),l.default.createElement(d.TableTuples,{list:_})):l.default.createElement("div",null,l.default.createElement("h2",null,t),l.default.createElement("h3",null,"Detections"),l.default.createElement(d.TableTuples,{list:_}),l.default.createElement("h3",null,"Frames"),l.default.createElement("ul",{className:"meta"},l.default.createElement("li",null,"Displaying ",y.length," / ",(0,f.courtesyS)(v.length,"frame")),l.default.createElement("li",null,(0,f.courtesyS)(m,"detection")," found")),l.default.createElement("div",{className:"thumbnails"},y.map(function(e){var t=e.frame,r=e.detections;return l.default.createElement(d.Keyframe,{key:t,sha256:n,frame:t,verified:o,size:"th",showFrame:!0,showTimestamp:!0,aspectRatio:p,detectionList:[{labels:u,detections:r}]},l.default.createElement(d.DetectionList,{labels:u,detections:r,width:160,height:90}))})))}}]),t}(c.Component);t.default=h},function(e,t,n){n(332),e.exports=n(10).Object.keys},function(e,t,n){var r=n(41),o=n(45);n(124)("keys",function(){return function(e){return o(r(e))}})},function(e,t,n){var r=n(10),o=r.JSON||(r.JSON={stringify:JSON.stringify});e.exports=function(e){return o.stringify.apply(o,arguments)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.detections,n=e.width,i=e.height;return t.map(function(e,t){var a=e.rect;return a&&r.default.createElement("div",{className:"rect",key:t,style:{left:(0,o.px)(a[0],n),top:(0,o.px)(a[1],i),width:(0,o.px)(a[2]-a[0],n),height:(0,o.px)(a[3]-a[1],i)}})})};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(17)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.detections,n=e.labels,o=e.tag,i=e.showEmpty;return r.default.createElement("span",{className:"detectionList"},o&&r.default.createElement("h3",null,o),!t.length&&i&&r.default.createElement("label",null,r.default.createElement("small",null,"No detections")),t.map(function(e,t){var o=e.idx,i=e.score;e.rect;return r.default.createElement("label",{key:t},r.default.createElement("small",{className:"title"},(n[o]||"Unknown").replace(/_/," ")),r.default.createElement("small",null,i.toFixed(2)))}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){return r.default.createElement("footer",null)};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1));n(16),n(2)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",{className:"loaderWrapper"},r.default.createElement("div",{className:"loader"}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=d(n(5)),o=d(n(6)),i=d(n(7)),a=d(n(8)),u=d(n(9)),s=n(1),c=d(s),l=n(16),f=n(2);function d(e){return e&&e.__esModule?e:{default:e}}var p=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){var e=this.props.hash;return e?c.default.createElement("div",{className:"sidebar"},c.default.createElement("h4",null,"Media"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/summary/"},"Summary"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/mediaRecord/"},"Media Record"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/mediaInfo/"},"Media Info"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/sugarcube/"},"Sugarcube"),c.default.createElement("h4",null,"Keyframes"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/keyframe/"},"Keyframe"),c.default.createElement("h4",null,"Detectors"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/places365/"},"Places 365"),c.default.createElement(l.NavLink,{to:"/metadata/"+e+"/coco/"},"Coco")):c.default.createElement("div",{className:"sidebar"})}}]),t}(s.Component);t.default=(0,f.connect)(function(e){return{hash:e.metadata.hash}})(p)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=a(n(4)),o=a(n(1)),i=n(2);function a(e){return e&&e.__esModule?e:{default:e}}t.default=(0,i.connect)(function(e){return{app:e.metadata}})(function(e){var t=e.app,n=e.tag,i=e.View,a=t[n];return a?"loading"===a?o.default.createElement("div",{className:"tableObject loading"},n,": Loading"):a.err?o.default.createElement("div",{className:"tableObject error"},n," Error: ",a.err):o.default.createElement(i,(0,r.default)({data:a},e)):null})},function(e,t,n){e.exports={default:n(341),__esModule:!0}},function(e,t,n){n(342),e.exports=n(10).Object.assign},function(e,t,n){var r=n(18);r(r.S+r.F,"Object",{assign:n(343)})},function(e,t,n){"use strict";var r=n(45),o=n(85),i=n(59),a=n(41),u=n(130),s=Object.assign;e.exports=!s||n(35)(function(){var e={},t={},n=Symbol(),r="abcdefghijklmnopqrst";return e[n]=7,r.split("").forEach(function(e){t[e]=e}),7!=s({},e)[n]||Object.keys(s({},t)).join("")!=r})?function(e,t){for(var n=a(e),s=arguments.length,c=1,l=o.f,f=i.f;s>c;)for(var d,p=u(arguments[c++]),h=l?r(p).concat(l(p)):r(p),m=h.length,v=0;m>v;)f.call(p,d=h[v++])&&(n[d]=p[d]);return n}:s},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.verified,n=e.sha256,c=e.frame,l=e.score,f=e.isSaved,d=e.fps,p=void 0===d?25:d,h=e.size,m=void 0===h?"th":h,v=e.className,y=e.showHash,g=e.showFrame,_=e.showTimestamp,b=e.showScore,w=e.showSearchButton,x=e.showSaveButton,E=e.to,O=e.children,S=e.detectionList,T=void 0===S?[]:S,k=e.aspectRatio,R=void 0===k?1.777:k,j=e.onClick,P=e.reviewActions;if(!n)return null;var C=i.widths[m],M=Math.round(C/R);return r.default.createElement("div",{className:(v||"keyframe")+(f?" isSaved":"")},r.default.createElement("div",{className:"thumbnail"},r.default.createElement(s,{to:E||(0,i.keyframeUri)(n,c),onClick:j},r.default.createElement("img",{alt:"Frame #"+c,src:(0,i.imageUrl)(t,n,c,m),width:C,height:M,onClick:j}),T.map(function(e,t){var n=e.labels,o=e.detections;return r.default.createElement(a.DetectionBoxes,{key:t,labels:n,detections:o,width:C,height:M})})),P&&(w||x)&&r.default.createElement("label",{className:"searchButtons"},w&&r.default.createElement(o.Link,{to:u.publicUrl.searchByVerifiedFrame(t,n,c),className:"btn"},"Search"),x&&(f?r.default.createElement("button",{onClick:function(){return P.unsave({hash:n,frame:c,verified:t})},className:"btn btn-primary saved"},"Saved"):r.default.createElement("button",{onClick:function(){return P.save({hash:n,frame:c,verified:t})},className:"btn btn save"},"Save")))),(y||g||_||b)&&r.default.createElement("label",null,y&&r.default.createElement("small",null,r.default.createElement(o.Link,{to:u.publicUrl.browse(n)},r.default.createElement("span",{title:n,className:"sha256 "+(0,i.verify)(t)},"▶ ",n.substr(0,6)))),g&&r.default.createElement("small",null,r.default.createElement("span",null,"Frame #",c)),_&&r.default.createElement("small",null,(0,i.timestamp)(c,p)),b&&!!l&&r.default.createElement("small",null,l)),O)};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(16),i=n(17),a=n(11),u=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));var s=function(e){return e.onClick?e.children:e.to.match(/^http/)?r.default.createElement("a",{href:e.to,target:"_blank",rel:"noopener noreferrer"},e.children):r.default.createElement(o.Link,e)}},function(e,t,n){"use strict";function r(e){return function(t){var n=t.dispatch,r=t.getState;return function(t){return function(o){return"function"==typeof o?o(n,r,e):t(o)}}}}Object.defineProperty(t,"__esModule",{value:!0});var o=r();o.withExtraArgument=r,t.default=o},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=a(n(65)),o=a(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:u,t=arguments[1];switch(t.type){case i.metadata.set_hash:return e=(0,o.default)({},e,{hash:t.hash});case i.metadata.loading:return(0,o.default)({},e,(0,r.default)({},t.tag,"loading"));case i.metadata.loaded:return(0,o.default)({},e,(0,r.default)({},t.tag,t.data));case i.metadata.loaded_many:return t.data.reduce(function(e,t){return e[t.name]=t.data||"error",e},(0,o.default)({},e,(0,r.default)({},t.tag,"loaded")));case i.metadata.error:return(0,o.default)({},e,(0,r.default)({},t.tag,t.err));default:return e}};var i=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39));function a(e){return e&&e.__esModule?e:{default:e}}var u={}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=s(n(348)),o=s(n(65)),i=s(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c(),t=arguments[1];switch(t.type){case a.search.loading:return"query"===t.tag&&t.offset?(0,i.default)({},e,{query:(0,i.default)({},e.query,{loadingMore:!0})}):(0,i.default)({},e,(0,o.default)({},t.tag,l[t.tag]||l.loading));case a.search.loaded:return"query"===t.tag&&t.offset?(0,i.default)({},e,{query:{query:t.data.query,results:[].concat((0,r.default)(e.query.results),(0,r.default)(t.data.results)),loadingMore:!1}}):(0,i.default)({},e,(0,o.default)({},t.tag,t.data));case a.search.error:return(0,i.default)({},e,(0,o.default)({},t.tag,{error:t.err}));case a.search.panic:return(0,i.default)({},c());case a.search.update_options:return u.default.setAll(t.opt),(0,i.default)({},e,{options:(0,i.default)({},t.opt)});default:return e}};var a=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(39)),u=s(n(158));function s(e){return e&&e.__esModule?e:{default:e}}var c=function(){return{query:{reset:!0},browse:{reset:!0},options:{thumbnailSize:(0,u.default)("thumbnailSize")||"th",perPage:parseInt((0,u.default)("perPage"),10)||50,groupByHash:(0,u.default)("groupByHash")}}},l={query:{query:{loading:!0},results:[]},loading:{loading:!0}}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(154));t.default=function(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t<e.length;t++)n[t]=e[t];return n}return(0,r.default)(e)}},function(e,t,n){n(44),n(350),e.exports=n(10).Array.from},function(e,t,n){"use strict";var r=n(34),o=n(18),i=n(41),a=n(155),u=n(156),s=n(81),c=n(351),l=n(95);o(o.S+o.F*!n(157)(function(e){Array.from(e)}),"Array",{from:function(e){var t,n,o,f,d=i(e),p="function"==typeof this?this:Array,h=arguments.length,m=h>1?arguments[1]:void 0,v=void 0!==m,y=0,g=l(d);if(v&&(m=r(m,h>2?arguments[2]:void 0,2)),void 0==g||p==Array&&u(g))for(n=new p(t=s(d.length));t>y;y++)c(n,y,v?m(d[y],y):d[y]);else for(f=g.call(d),n=new p;!(o=f.next()).done;y++)c(n,y,v?a(f,m,[o.value,y],!0):o.value);return n.length=y,n}})},function(e,t,n){"use strict";var r=n(22),o=n(43);e.exports=function(e,t,n){t in e?r.f(e,t,o(0,n)):e[t]=n}},function(e,t){!function(t,n){var r={version:"2.7.0",areas:{},apis:{},inherit:function(e,t){for(var n in e)t.hasOwnProperty(n)||(t[n]=e[n]);return t},stringify:function(e){return void 0===e||"function"==typeof e?e+"":JSON.stringify(e)},parse:function(e){try{return JSON.parse(e)}catch(t){return e}},fn:function(e,t){for(var n in r.storeAPI[e]=t,r.apis)r.apis[n][e]=t},get:function(e,t){return e.getItem(t)},set:function(e,t,n){e.setItem(t,n)},remove:function(e,t){e.removeItem(t)},key:function(e,t){return e.key(t)},length:function(e){return e.length},clear:function(e){e.clear()},Store:function(e,t,n){var o=r.inherit(r.storeAPI,function(e,t,n){return 0===arguments.length?o.getAll():"function"==typeof t?o.transact(e,t,n):void 0!==t?o.set(e,t,n):"string"==typeof e||"number"==typeof e?o.get(e):e?o.setAll(e,t):o.clear()});o._id=e;try{t.setItem("_safariPrivate_","sucks"),o._area=t,t.removeItem("_safariPrivate_")}catch(e){}return o._area||(o._area=r.inherit(r.storageAPI,{items:{},name:"fake"})),o._ns=n||"",r.areas[e]||(r.areas[e]=o._area),r.apis[o._ns+o._id]||(r.apis[o._ns+o._id]=o),o},storeAPI:{area:function(e,t){var n=this[e];return n&&n.area||(n=r.Store(e,t,this._ns),this[e]||(this[e]=n)),n},namespace:function(e,t){if(!e)return this._ns?this._ns.substring(0,this._ns.length-1):"";var n=e,o=this[n];return o&&o.namespace||(o=r.Store(this._id,this._area,this._ns+n+"."),this[n]||(this[n]=o),t||o.area("session",r.areas.session)),o},isFake:function(){return"fake"===this._area.name},toString:function(){return"store"+(this._ns?"."+this.namespace():"")+"["+this._id+"]"},has:function(e){return this._area.has?this._area.has(this._in(e)):!!(this._in(e)in this._area)},size:function(){return this.keys().length},each:function(e,t){for(var n=0,o=r.length(this._area);n<o;n++){var i=this._out(r.key(this._area,n));if(void 0!==i&&!1===e.call(this,i,t||this.get(i)))break;o>r.length(this._area)&&(o--,n--)}return t||this},keys:function(e){return this.each(function(e,t){t.push(e)},e||[])},get:function(e,t){var n=r.get(this._area,this._in(e));return null!==n?r.parse(n):t||n},getAll:function(e){return this.each(function(e,t){t[e]=this.get(e)},e||{})},transact:function(e,t,n){var r=this.get(e,n),o=t(r);return this.set(e,void 0===o?r:o),this},set:function(e,t,n){var o=this.get(e);return null!=o&&!1===n?t:r.set(this._area,this._in(e),r.stringify(t),n)||o},setAll:function(e,t){var n,r;for(var o in e)r=e[o],this.set(o,r,t)!==r&&(n=!0);return n},add:function(e,t){var n=this.get(e);if(n instanceof Array)t=n.concat(t);else if(null!==n){var o=typeof n;if(o===typeof t&&"object"===o){for(var i in t)n[i]=t[i];t=n}else t=n+t}return r.set(this._area,this._in(e),r.stringify(t)),t},remove:function(e){var t=this.get(e);return r.remove(this._area,this._in(e)),t},clear:function(){return this._ns?this.each(function(e){r.remove(this._area,this._in(e))},1):r.clear(this._area),this},clearAll:function(){var e=this._area;for(var t in r.areas)r.areas.hasOwnProperty(t)&&(this._area=r.areas[t],this.clear());return this._area=e,this},_in:function(e){return"string"!=typeof e&&(e=r.stringify(e)),this._ns?this._ns+e:e},_out:function(e){return this._ns?e&&0===e.indexOf(this._ns)?e.substring(this._ns.length):void 0:e}},storageAPI:{length:0,has:function(e){return this.items.hasOwnProperty(e)},key:function(e){var t=0;for(var n in this.items)if(this.has(n)&&e===t++)return n},setItem:function(e,t){this.has(e)||this.length++,this.items[e]=t},removeItem:function(e){this.has(e)&&(delete this.items[e],this.length--)},getItem:function(e){return this.has(e)?this.items[e]:null},clear:function(){for(var e in this.items)this.removeItem(e)},toString:function(){return this.length+" items in "+this.name+"Storage"}}},o=r.Store("local",function(){try{return localStorage}catch(e){}}());o.local=o,o._=r,o.area("session",function(){try{return sessionStorage}catch(e){}}()),"function"==typeof n&&void 0!==n.amd?n("store2",[],function(){return o}):void 0!==e&&e.exports?e.exports=o:(t.store&&(r.conflict=t.store),t.store=o)}(this,this.define)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=s(n(65)),o=s(n(4));t.default=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:l,t=arguments[1],n=t.saved;switch(t.type){case i.review.save:case i.review.unsave:case i.review.refresh:return a.setSaved(n),(0,o.default)({},e,{count:a.getSavedCount(n),saved:(0,o.default)({},n)});case i.review.clear:return a.setSaved({}),(0,o.default)({},e,{count:0,saved:{}});case i.review.dedupe:return(0,o.default)({},e,{deduped:t.deduped});case i.review.loading:return(0,o.default)({},e,(0,r.default)({},t.tag,{loading:!0}));case i.review.loaded:return(0,o.default)({},e,(0,r.default)({},t.tag,t.data||{}));case i.review.error:return(0,o.default)({},e,(0,r.default)({},t.tag,{error:t.err}));default:return e}};var i=u(n(39)),a=u(n(159));function u(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function s(e){return e&&e.__esModule?e:{default:e}}var c=a.getSaved(),l={saved:c,count:a.getSavedCount(c),deduped:!1,dedupe:{count:0},create:{}}},function(e,t,n){"use strict";const r=n(355),o=n(356);function i(e,t){return t.encode?t.strict?r(e):encodeURIComponent(e):e}function a(e,t){return t.decode?o(e):e}function u(e){const t=e.indexOf("?");return-1===t?"":e.slice(t+1)}function s(e,t){const n=function(e){let t;switch(e.arrayFormat){case"index":return(e,n,r)=>{t=/\[(\d*)\]$/.exec(e),e=e.replace(/\[\d*\]$/,""),t?(void 0===r[e]&&(r[e]={}),r[e][t[1]]=n):r[e]=n};case"bracket":return(e,n,r)=>{t=/(\[\])$/.exec(e),e=e.replace(/\[\]$/,""),t?void 0!==r[e]?r[e]=[].concat(r[e],n):r[e]=[n]:r[e]=n};default:return(e,t,n)=>{void 0!==n[e]?n[e]=[].concat(n[e],t):n[e]=t}}}(t=Object.assign({decode:!0,arrayFormat:"none"},t)),r=Object.create(null);if("string"!=typeof e)return r;if(!(e=e.trim().replace(/^[?#&]/,"")))return r;for(const o of e.split("&")){let[e,i]=o.replace(/\+/g," ").split("=");i=void 0===i?null:a(i,t),n(a(e,t),i,r)}return Object.keys(r).sort().reduce((e,t)=>{const n=r[t];return Boolean(n)&&"object"==typeof n&&!Array.isArray(n)?e[t]=function e(t){return Array.isArray(t)?t.sort():"object"==typeof t?e(Object.keys(t)).sort((e,t)=>Number(e)-Number(t)).map(e=>t[e]):t}(n):e[t]=n,e},Object.create(null))}t.extract=u,t.parse=s,t.stringify=((e,t)=>{if(!e)return"";const n=function(e){switch(e.arrayFormat){case"index":return(t,n,r)=>null===n?[i(t,e),"[",r,"]"].join(""):[i(t,e),"[",i(r,e),"]=",i(n,e)].join("");case"bracket":return(t,n)=>null===n?[i(t,e),"[]"].join(""):[i(t,e),"[]=",i(n,e)].join("");default:return(t,n)=>null===n?i(t,e):[i(t,e),"=",i(n,e)].join("")}}(t=Object.assign({encode:!0,strict:!0,arrayFormat:"none"},t)),r=Object.keys(e);return!1!==t.sort&&r.sort(t.sort),r.map(r=>{const o=e[r];if(void 0===o)return"";if(null===o)return i(r,t);if(Array.isArray(o)){const e=[];for(const t of o.slice())void 0!==t&&e.push(n(r,t,e.length));return e.join("&")}return i(r,t)+"="+i(o,t)}).filter(e=>e.length>0).join("&")}),t.parseUrl=((e,t)=>{const n=e.indexOf("#");return-1!==n&&(e=e.slice(0,n)),{url:e.split("?")[0]||"",query:s(u(e),t)}})},function(e,t,n){"use strict";e.exports=(e=>encodeURIComponent(e).replace(/[!'()*]/g,e=>`%${e.charCodeAt(0).toString(16).toUpperCase()}`))},function(e,t,n){"use strict";var r=new RegExp("%[a-f0-9]{2}","gi"),o=new RegExp("(%[a-f0-9]{2})+","gi");function i(e,t){try{return decodeURIComponent(e.join(""))}catch(e){}if(1===e.length)return e;t=t||1;var n=e.slice(0,t),r=e.slice(t);return Array.prototype.concat.call([],i(n),i(r))}function a(e){try{return decodeURIComponent(e)}catch(o){for(var t=e.match(r),n=1;n<t.length;n++)t=(e=i(t,n).join("")).match(r);return e}}e.exports=function(e){if("string"!=typeof e)throw new TypeError("Expected `encodedURI` to be of type `string`, got `"+typeof e+"`");try{return e=e.replace(/\+/g," "),decodeURIComponent(e)}catch(t){return function(e){for(var t={"%FE%FF":"��","%FF%FE":"��"},n=o.exec(e);n;){try{t[n[0]]=decodeURIComponent(n[0])}catch(e){var r=a(n[0]);r!==n[0]&&(t[n[0]]=r)}n=o.exec(e)}t["%C2"]="�";for(var i=Object.keys(t),u=0;u<i.length;u++){var s=i[u];e=e.replace(new RegExp(s,"g"),t[s])}return e}(e)}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=m(n(358)),o=m(n(160)),i=m(n(28)),a=m(n(4)),u=m(n(1)),s=n(16),c=n(15),l=n(2),f=n(11),d=h(n(97)),p=h(n(21));function h(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function m(e){return e&&e.__esModule?e:{default:e}}function v(e){var t=e.saved,n=void 0===t?{}:t,o=e.frames,i=e.options,c=e.review,l=(e.search,e.minDistance),d=e.label,h=e.count,m=(0,r.default)(e,["saved","frames","options","review","search","minDistance","label","count"]);return o?u.default.createElement("div",{className:d?"keyframes keyframeGroup":"keyframes"},d&&u.default.createElement("h4",null,u.default.createElement(s.Link,{to:p.publicUrl.browse(d)},d)," (",h,")"),o.map(function(e){var t=e.hash,r=e.frame,o=e.verified,s=e.distance;return u.default.createElement(f.Keyframe,(0,a.default)({key:t+"_"+r,sha256:t,frame:r,score:100-Math.round(s-l)+"%",verified:o,isSaved:!!n[t]&&!!n[t].frames&&!!n[t].frames[parseInt(r,10)],size:i.thumbnailSize,onClick:function(){return c.toggleSaved({verified:o,hash:t,frame:r})},reviewActions:c},m))})):null}t.default=(0,l.connect)(function(e){return{saved:e.review.saved,options:e.search.options}},function(e){return{review:(0,c.bindActionCreators)((0,a.default)({},d),e),search:(0,c.bindActionCreators)((0,a.default)({},p),e)}})(function(e){var t=e.frames,n=e.groupByHash,r=0;if(t&&t.length&&(r=t[0].distance||0),!n)return u.default.createElement(v,(0,a.default)({minDistance:r},e));var s=t.reduce(function(e,t){return e[t.hash]?e[t.hash].push(t):e[t.hash]=[t],e},{});return(0,i.default)(s).map(function(e){return[s[e].length,e]}).sort(function(e,t){return t[0]-e[0]}).map(function(t){var n=(0,o.default)(t,2),i=n[0],c=n[1];return u.default.createElement(v,(0,a.default)({},e,{count:i,key:c,minDistance:r,frames:s[c],label:c}))})})},function(e,t,n){"use strict";t.__esModule=!0,t.default=function(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}},function(e,t,n){e.exports={default:n(360),__esModule:!0}},function(e,t,n){n(58),n(44),e.exports=n(361)},function(e,t,n){var r=n(96),o=n(14)("iterator"),i=n(36);e.exports=n(10).isIterable=function(e){var t=Object(e);return void 0!==t[o]||"@@iterator"in t||i.hasOwnProperty(r(t))}},function(e,t,n){e.exports={default:n(363),__esModule:!0}},function(e,t,n){n(58),n(44),e.exports=n(364)},function(e,t,n){var r=n(20),o=n(95);e.exports=n(10).getIterator=function(e){var t=o(e);if("function"!=typeof t)throw TypeError(e+" is not iterable!");return r(t.call(e))}},function(e,t,n){e.exports={default:n(366),__esModule:!0}},function(e,t,n){n(134),n(44),n(58),n(367),n(375),n(376),e.exports=n(10).Promise},function(e,t,n){"use strict";var r,o,i,a,u=n(42),s=n(13),c=n(34),l=n(96),f=n(18),d=n(23),p=n(56),h=n(368),m=n(369),v=n(161),y=n(162).set,g=n(371)(),_=n(98),b=n(163),w=n(372),x=n(164),E=s.TypeError,O=s.process,S=O&&O.versions,T=S&&S.v8||"",k=s.Promise,R="process"==l(O),j=function(){},P=o=_.f,C=!!function(){try{var e=k.resolve(1),t=(e.constructor={})[n(14)("species")]=function(e){e(j,j)};return(R||"function"==typeof PromiseRejectionEvent)&&e.then(j)instanceof t&&0!==T.indexOf("6.6")&&-1===w.indexOf("Chrome/66")}catch(e){}}(),M=function(e){var t;return!(!d(e)||"function"!=typeof(t=e.then))&&t},I=function(e,t){if(!e._n){e._n=!0;var n=e._c;g(function(){for(var r=e._v,o=1==e._s,i=0,a=function(t){var n,i,a,u=o?t.ok:t.fail,s=t.resolve,c=t.reject,l=t.domain;try{u?(o||(2==e._h&&N(e),e._h=1),!0===u?n=r:(l&&l.enter(),n=u(r),l&&(l.exit(),a=!0)),n===t.promise?c(E("Promise-chain cycle")):(i=M(n))?i.call(n,s,c):s(n)):c(r)}catch(e){l&&!a&&l.exit(),c(e)}};n.length>i;)a(n[i++]);e._c=[],e._n=!1,t&&!e._h&&A(e)})}},A=function(e){y.call(s,function(){var t,n,r,o=e._v,i=D(e);if(i&&(t=b(function(){R?O.emit("unhandledRejection",o,e):(n=s.onunhandledrejection)?n({promise:e,reason:o}):(r=s.console)&&r.error&&r.error("Unhandled promise rejection",o)}),e._h=R||D(e)?2:1),e._a=void 0,i&&t.e)throw t.v})},D=function(e){return 1!==e._h&&0===(e._a||e._c).length},N=function(e){y.call(s,function(){var t;R?O.emit("rejectionHandled",e):(t=s.onrejectionhandled)&&t({promise:e,reason:e._v})})},L=function(e){var t=this;t._d||(t._d=!0,(t=t._w||t)._v=e,t._s=2,t._a||(t._a=t._c.slice()),I(t,!0))},U=function(e){var t,n=this;if(!n._d){n._d=!0,n=n._w||n;try{if(n===e)throw E("Promise can't be resolved itself");(t=M(e))?g(function(){var r={_w:n,_d:!1};try{t.call(e,c(U,r,1),c(L,r,1))}catch(e){L.call(r,e)}}):(n._v=e,n._s=1,I(n,!1))}catch(e){L.call({_w:n,_d:!1},e)}}};C||(k=function(e){h(this,k,"Promise","_h"),p(e),r.call(this);try{e(c(U,this,1),c(L,this,1))}catch(e){L.call(this,e)}},(r=function(e){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=n(373)(k.prototype,{then:function(e,t){var n=P(v(this,k));return n.ok="function"!=typeof e||e,n.fail="function"==typeof t&&t,n.domain=R?O.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&I(this,!1),n.promise},catch:function(e){return this.then(void 0,e)}}),i=function(){var e=new r;this.promise=e,this.resolve=c(U,e,1),this.reject=c(L,e,1)},_.f=P=function(e){return e===k||e===a?new i(e):o(e)}),f(f.G+f.W+f.F*!C,{Promise:k}),n(57)(k,"Promise"),n(374)("Promise"),a=n(10).Promise,f(f.S+f.F*!C,"Promise",{reject:function(e){var t=P(this);return(0,t.reject)(e),t.promise}}),f(f.S+f.F*(u||!C),"Promise",{resolve:function(e){return x(u&&this===a?k:this,e)}}),f(f.S+f.F*!(C&&n(157)(function(e){k.all(e).catch(j)})),"Promise",{all:function(e){var t=this,n=P(t),r=n.resolve,o=n.reject,i=b(function(){var n=[],i=0,a=1;m(e,!1,function(e){var u=i++,s=!1;n.push(void 0),a++,t.resolve(e).then(function(e){s||(s=!0,n[u]=e,--a||r(n))},o)}),--a||r(n)});return i.e&&o(i.v),n.promise},race:function(e){var t=this,n=P(t),r=n.reject,o=b(function(){m(e,!1,function(e){t.resolve(e).then(n.resolve,r)})});return o.e&&r(o.v),n.promise}})},function(e,t){e.exports=function(e,t,n,r){if(!(e instanceof t)||void 0!==r&&r in e)throw TypeError(n+": incorrect invocation!");return e}},function(e,t,n){var r=n(34),o=n(155),i=n(156),a=n(20),u=n(81),s=n(95),c={},l={};(t=e.exports=function(e,t,n,f,d){var p,h,m,v,y=d?function(){return e}:s(e),g=r(n,f,t?2:1),_=0;if("function"!=typeof y)throw TypeError(e+" is not iterable!");if(i(y)){for(p=u(e.length);p>_;_++)if((v=t?g(a(h=e[_])[0],h[1]):g(e[_]))===c||v===l)return v}else for(m=y.call(e);!(h=m.next()).done;)if((v=o(m,g,h.value,t))===c||v===l)return v}).BREAK=c,t.RETURN=l},function(e,t){e.exports=function(e,t,n){var r=void 0===n;switch(t.length){case 0:return r?e():e.call(n);case 1:return r?e(t[0]):e.call(n,t[0]);case 2:return r?e(t[0],t[1]):e.call(n,t[0],t[1]);case 3:return r?e(t[0],t[1],t[2]):e.call(n,t[0],t[1],t[2]);case 4:return r?e(t[0],t[1],t[2],t[3]):e.call(n,t[0],t[1],t[2],t[3])}return e.apply(n,t)}},function(e,t,n){var r=n(13),o=n(162).set,i=r.MutationObserver||r.WebKitMutationObserver,a=r.process,u=r.Promise,s="process"==n(46)(a);e.exports=function(){var e,t,n,c=function(){var r,o;for(s&&(r=a.domain)&&r.exit();e;){o=e.fn,e=e.next;try{o()}catch(r){throw e?n():t=void 0,r}}t=void 0,r&&r.enter()};if(s)n=function(){a.nextTick(c)};else if(!i||r.navigator&&r.navigator.standalone)if(u&&u.resolve){var l=u.resolve(void 0);n=function(){l.then(c)}}else n=function(){o.call(r,c)};else{var f=!0,d=document.createTextNode("");new i(c).observe(d,{characterData:!0}),n=function(){d.data=f=!f}}return function(r){var o={fn:r,next:void 0};t&&(t.next=o),e||(e=o,n()),t=o}}},function(e,t,n){var r=n(13).navigator;e.exports=r&&r.userAgent||""},function(e,t,n){var r=n(26);e.exports=function(e,t,n){for(var o in t)n&&e[o]?e[o]=t[o]:r(e,o,t[o]);return e}},function(e,t,n){"use strict";var r=n(13),o=n(10),i=n(22),a=n(24),u=n(14)("species");e.exports=function(e){var t="function"==typeof o[e]?o[e]:r[e];a&&t&&!t[u]&&i.f(t,u,{configurable:!0,get:function(){return this}})}},function(e,t,n){"use strict";var r=n(18),o=n(10),i=n(13),a=n(161),u=n(164);r(r.P+r.R,"Promise",{finally:function(e){var t=a(this,o.Promise||i.Promise),n="function"==typeof e;return this.then(n?function(n){return u(t,e()).then(function(){return n})}:e,n?function(n){return u(t,e()).then(function(){throw n})}:e)}})},function(e,t,n){"use strict";var r=n(18),o=n(98),i=n(163);r(r.S,"Promise",{try:function(e){var t=o.f(this),n=i(e);return(n.e?t.reject:t.resolve)(n.v),t.promise}})},function(e,t,n){var r=n(0);e.exports=function(e,t,n,o){var i=r(e).getTime(),a=r(t).getTime(),u=r(n).getTime(),s=r(o).getTime();if(i>a||u>s)throw new Error("The start of the range cannot be after the end of the range");return i<s&&u<a}},function(e,t,n){var r=n(0);e.exports=function(e,t){if(!(t instanceof Array))throw new TypeError(toString.call(t)+" is not an instance of Array");var n,o,i=r(e).getTime();return t.forEach(function(e,t){var a=r(e),u=Math.abs(i-a.getTime());(void 0===n||u<o)&&(n=t,o=u)}),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){if(!(t instanceof Array))throw new TypeError(toString.call(t)+" is not an instance of Array");var n,o,i=r(e).getTime();return t.forEach(function(e){var t=r(e),a=Math.abs(i-t.getTime());(void 0===n||a<o)&&(n=t,o=a)}),n}},function(e,t,n){var r=n(30),o=6e4,i=6048e5;e.exports=function(e,t){var n=r(e),a=r(t),u=n.getTime()-n.getTimezoneOffset()*o,s=a.getTime()-a.getTimezoneOffset()*o;return Math.round((u-s)/i)}},function(e,t,n){var r=n(175),o=n(0);e.exports=function(e,t){var n=o(e),i=o(t);return 4*(n.getFullYear()-i.getFullYear())+(r(n)-r(i))}},function(e,t,n){var r=n(66),o=6e4,i=6048e5;e.exports=function(e,t,n){var a=r(e,n),u=r(t,n),s=a.getTime()-a.getTimezoneOffset()*o,c=u.getTime()-u.getTimezoneOffset()*o;return Math.round((s-c)/i)}},function(e,t,n){var r=n(69),o=36e5;e.exports=function(e,t){var n=r(e,t)/o;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(0),o=n(173),i=n(53),a=n(178);e.exports=function(e,t){var n=r(e),u=r(t),s=i(n,u),c=Math.abs(o(n,u));return n=a(n,s*c),s*(c-(i(n,u)===-s))}},function(e,t,n){var r=n(69),o=6e4;e.exports=function(e,t){var n=r(e,t)/o;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(103);e.exports=function(e,t){var n=r(e,t)/3;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(177);e.exports=function(e,t){var n=r(e,t)/7;return n>0?Math.floor(n):Math.ceil(n)}},function(e,t,n){var r=n(0),o=n(176),i=n(53);e.exports=function(e,t){var n=r(e),a=r(t),u=i(n,a),s=Math.abs(o(n,a));return n.setFullYear(n.getFullYear()-u*s),u*(s-(i(n,a)===-u))}},function(e,t){e.exports=function(){var e={lessThanXSeconds:{one:"less than a second",other:"less than {{count}} seconds"},xSeconds:{one:"1 second",other:"{{count}} seconds"},halfAMinute:"half a minute",lessThanXMinutes:{one:"less than a minute",other:"less than {{count}} minutes"},xMinutes:{one:"1 minute",other:"{{count}} minutes"},aboutXHours:{one:"about 1 hour",other:"about {{count}} hours"},xHours:{one:"1 hour",other:"{{count}} hours"},xDays:{one:"1 day",other:"{{count}} days"},aboutXMonths:{one:"about 1 month",other:"about {{count}} months"},xMonths:{one:"1 month",other:"{{count}} months"},aboutXYears:{one:"about 1 year",other:"about {{count}} years"},xYears:{one:"1 year",other:"{{count}} years"},overXYears:{one:"over 1 year",other:"over {{count}} years"},almostXYears:{one:"almost 1 year",other:"almost {{count}} years"}};return{localize:function(t,n,r){var o;return r=r||{},o="string"==typeof e[t]?e[t]:1===n?e[t].one:e[t].other.replace("{{count}}",n),r.addSuffix?r.comparison>0?"in "+o:o+" ago":o}}}},function(e,t,n){var r=n(391);e.exports=function(){var e=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],t=["January","February","March","April","May","June","July","August","September","October","November","December"],n=["Su","Mo","Tu","We","Th","Fr","Sa"],o=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],i=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],a=["AM","PM"],u=["am","pm"],s=["a.m.","p.m."],c={MMM:function(t){return e[t.getMonth()]},MMMM:function(e){return t[e.getMonth()]},dd:function(e){return n[e.getDay()]},ddd:function(e){return o[e.getDay()]},dddd:function(e){return i[e.getDay()]},A:function(e){return e.getHours()/12>=1?a[1]:a[0]},a:function(e){return e.getHours()/12>=1?u[1]:u[0]},aa:function(e){return e.getHours()/12>=1?s[1]:s[0]}};return["M","D","DDD","d","Q","W"].forEach(function(e){c[e+"o"]=function(t,n){return function(e){var t=e%100;if(t>20||t<10)switch(t%10){case 1:return e+"st";case 2:return e+"nd";case 3:return e+"rd"}return e+"th"}(n[e](t))}}),{formatters:c,formattingTokensRegExp:r(c)}}},function(e,t){var n=["M","MM","Q","D","DD","DDD","DDDD","d","E","W","WW","YY","YYYY","GG","GGGG","H","HH","h","hh","m","mm","s","ss","S","SS","SSS","Z","ZZ","X","x"];e.exports=function(e){var t=[];for(var r in e)e.hasOwnProperty(r)&&t.push(r);var o=n.concat(t).sort().reverse();return new RegExp("(\\[[^\\[]*\\])|(\\\\)?("+o.join("|")+"|.)","g")}},function(e,t,n){var r=n(102),o=n(0),i=n(104),a=n(105),u=1440,s=43200,c=525600;e.exports=function(e,t,n){var l=n||{},f=r(e,t),d=l.locale,p=a.distanceInWords.localize;d&&d.distanceInWords&&d.distanceInWords.localize&&(p=d.distanceInWords.localize);var h,m,v,y={addSuffix:Boolean(l.addSuffix),comparison:f};f>0?(h=o(e),m=o(t)):(h=o(t),m=o(e));var g=Math[l.partialMethod?String(l.partialMethod):"floor"],_=i(m,h),b=m.getTimezoneOffset()-h.getTimezoneOffset(),w=g(_/60)-b;if("s"===(v=l.unit?String(l.unit):w<1?"s":w<60?"m":w<u?"h":w<s?"d":w<c?"M":"Y"))return p("xSeconds",_,y);if("m"===v)return p("xMinutes",w,y);if("h"===v)return p("xHours",g(w/60),y);if("d"===v)return p("xDays",g(w/u),y);if("M"===v)return p("xMonths",g(w/s),y);if("Y"===v)return p("xYears",g(w/c),y);throw new Error("Unknown unit: "+v)}},function(e,t,n){var r=n(179);e.exports=function(e,t){return r(Date.now(),e,t)}},function(e,t,n){var r=n(0);e.exports=function(e,t,n){var o=r(e),i=r(t),a=void 0!==n?n:1,u=i.getTime();if(o.getTime()>u)throw new Error("The first date cannot be after the second date");var s=[],c=o;for(c.setHours(0,0,0,0);c.getTime()<=u;)s.push(r(c)),c.setDate(c.getDate()+a);return s}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMinutes(59,59,999),t}},function(e,t,n){var r=n(180);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);n.setFullYear(t+1,0,4),n.setHours(0,0,0,0);var i=o(n);return i.setMilliseconds(i.getMilliseconds()-1),i}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setSeconds(59,999),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3+3;return t.setMonth(o,0),t.setHours(23,59,59,999),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setMilliseconds(999),t}},function(e,t,n){var r=n(106);e.exports=function(){return r(new Date)}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r+1),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear();return t.setFullYear(n+1,0,0),t.setHours(23,59,59,999),t}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r-1),o.setHours(23,59,59,999),o}},function(e,t,n){var r=n(182),o=n(107),i=n(29),a=n(0),u=n(184),s=n(105);var c={M:function(e){return e.getMonth()+1},MM:function(e){return d(e.getMonth()+1,2)},Q:function(e){return Math.ceil((e.getMonth()+1)/3)},D:function(e){return e.getDate()},DD:function(e){return d(e.getDate(),2)},DDD:function(e){return r(e)},DDDD:function(e){return d(r(e),3)},d:function(e){return e.getDay()},E:function(e){return e.getDay()||7},W:function(e){return o(e)},WW:function(e){return d(o(e),2)},YY:function(e){return d(e.getFullYear(),4).substr(2)},YYYY:function(e){return d(e.getFullYear(),4)},GG:function(e){return String(i(e)).substr(2)},GGGG:function(e){return i(e)},H:function(e){return e.getHours()},HH:function(e){return d(e.getHours(),2)},h:function(e){var t=e.getHours();return 0===t?12:t>12?t%12:t},hh:function(e){return d(c.h(e),2)},m:function(e){return e.getMinutes()},mm:function(e){return d(e.getMinutes(),2)},s:function(e){return e.getSeconds()},ss:function(e){return d(e.getSeconds(),2)},S:function(e){return Math.floor(e.getMilliseconds()/100)},SS:function(e){return d(Math.floor(e.getMilliseconds()/10),2)},SSS:function(e){return d(e.getMilliseconds(),3)},Z:function(e){return f(e.getTimezoneOffset(),":")},ZZ:function(e){return f(e.getTimezoneOffset())},X:function(e){return Math.floor(e.getTime()/1e3)},x:function(e){return e.getTime()}};function l(e){return e.match(/\[[\s\S]/)?e.replace(/^\[|]$/g,""):e.replace(/\\/g,"")}function f(e,t){t=t||"";var n=e>0?"-":"+",r=Math.abs(e),o=r%60;return n+d(Math.floor(r/60),2)+t+d(o,2)}function d(e,t){for(var n=Math.abs(e).toString();n.length<t;)n="0"+n;return n}e.exports=function(e,t,n){var r=t?String(t):"YYYY-MM-DDTHH:mm:ss.SSSZ",o=(n||{}).locale,i=s.format.formatters,f=s.format.formattingTokensRegExp;o&&o.format&&o.format.formatters&&(i=o.format.formatters,o.format.formattingTokensRegExp&&(f=o.format.formattingTokensRegExp));var d=a(e);return u(d)?function(e,t,n){var r,o,i=e.match(n),a=i.length;for(r=0;r<a;r++)o=t[i[r]]||c[i[r]],i[r]=o||l(i[r]);return function(e){for(var t="",n=0;n<a;n++)i[n]instanceof Function?t+=i[n](e,c):t+=i[n];return t}}(r,i,f)(d):"Invalid Date"}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getDate()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getDay()}},function(e,t,n){var r=n(185);e.exports=function(e){return r(e)?366:365}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getHours()}},function(e,t,n){var r=n(52),o=n(101),i=6048e5;e.exports=function(e){var t=r(e),n=r(o(t,60)).valueOf()-t.valueOf();return Math.round(n/i)}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMilliseconds()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMinutes()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getMonth()}},function(e,t,n){var r=n(0),o=864e5;e.exports=function(e,t,n,i){var a=r(e).getTime(),u=r(t).getTime(),s=r(n).getTime(),c=r(i).getTime();if(a>u||s>c)throw new Error("The start of the range cannot be after the end of the range");if(!(a<c&&s<u))return 0;var l=(c>u?u:c)-(s<a?a:s);return Math.ceil(l/o)}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getSeconds()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getFullYear()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()>o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()<o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 1===r(e).getDate()}},function(e,t,n){var r=n(0);e.exports=function(e){return 5===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()>(new Date).getTime()}},function(e,t,n){var r=n(0),o=n(106),i=n(181);e.exports=function(e){var t=r(e);return o(t).getTime()===i(t).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 1===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return r(e).getTime()<(new Date).getTime()}},function(e,t,n){var r=n(31);e.exports=function(e,t){var n=r(e),o=r(t);return n.getTime()===o.getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 6===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return 0===r(e).getDay()}},function(e,t,n){var r=n(187);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(189);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(190);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(191);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(193);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(194);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(196);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(108);e.exports=function(e,t){return r(new Date,e,t)}},function(e,t,n){var r=n(198);e.exports=function(e){return r(new Date,e)}},function(e,t,n){var r=n(0);e.exports=function(e){return 4===r(e).getDay()}},function(e,t,n){var r=n(31);e.exports=function(e){return r(e).getTime()===r(new Date).getTime()}},function(e,t,n){var r=n(31);e.exports=function(e){var t=new Date;return t.setDate(t.getDate()+1),r(e).getTime()===r(t).getTime()}},function(e,t,n){var r=n(0);e.exports=function(e){return 2===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){return 3===r(e).getDay()}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e).getDay();return 0===t||6===t}},function(e,t,n){var r=n(0);e.exports=function(e,t,n){var o=r(e).getTime(),i=r(t).getTime(),a=r(n).getTime();if(i>a)throw new Error("The start of the range cannot be after the end of the range");return o>=i&&o<=a}},function(e,t,n){var r=n(31);e.exports=function(e){var t=new Date;return t.setDate(t.getDate()-1),r(e).getTime()===r(t).getTime()}},function(e,t,n){var r=n(199);e.exports=function(e){return r(e,{weekStartsOn:1})}},function(e,t,n){var r=n(29),o=n(30);e.exports=function(e){var t=r(e),n=new Date(0);n.setFullYear(t+1,0,4),n.setHours(0,0,0,0);var i=o(n);return i.setDate(i.getDate()-1),i}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth();return t.setFullYear(t.getFullYear(),n+1,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getMonth(),o=n-n%3+3;return t.setMonth(o,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e),n=t.getFullYear();return t.setFullYear(n+1,0,0),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(0);e.exports=function(){var e=Array.prototype.slice.call(arguments).map(function(e){return r(e)}),t=Math.max.apply(null,e);return new Date(t)}},function(e,t,n){var r=n(0);e.exports=function(){var e=Array.prototype.slice.call(arguments).map(function(e){return r(e)}),t=Math.min.apply(null,e);return new Date(t)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setDate(o),n}},function(e,t,n){var r=n(0),o=n(50);e.exports=function(e,t,n){var i=n&&Number(n.weekStartsOn)||0,a=r(e),u=Number(t),s=a.getDay();return o(a,((u%7+7)%7<i?7:0)+u-s)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMonth(0),n.setDate(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setHours(o),n}},function(e,t,n){var r=n(0),o=n(50),i=n(186);e.exports=function(e,t){var n=r(e),a=Number(t),u=i(n);return o(n,a-u)}},function(e,t,n){var r=n(0),o=n(107);e.exports=function(e,t){var n=r(e),i=Number(t),a=o(n)-i;return n.setDate(n.getDate()-7*a),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMilliseconds(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setMinutes(o),n}},function(e,t,n){var r=n(0),o=n(200);e.exports=function(e,t){var n=r(e),i=Number(t)-(Math.floor(n.getMonth()/3)+1);return o(n,n.getMonth()+3*i)}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setSeconds(o),n}},function(e,t,n){var r=n(0);e.exports=function(e,t){var n=r(e),o=Number(t);return n.setFullYear(o),n}},function(e,t,n){var r=n(0);e.exports=function(e){var t=r(e);return t.setDate(1),t.setHours(0,0,0,0),t}},function(e,t,n){var r=n(31);e.exports=function(){return r(new Date)}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r+1),o.setHours(0,0,0,0),o}},function(e,t){e.exports=function(){var e=new Date,t=e.getFullYear(),n=e.getMonth(),r=e.getDate(),o=new Date(0);return o.setFullYear(t,n,r-1),o.setHours(0,0,0,0),o}},function(e,t,n){var r=n(50);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(166);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(51);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(169);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(68);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(170);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(171);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(101);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){var r=n(172);e.exports=function(e,t){var n=Number(t);return r(e,-n)}},function(e,t,n){(function(t){var r,o,i,a;i=n(479),a=n(493),o=n(495),e.exports=function(){var e,n,o,i,a;return 3===arguments.length?(o=arguments[0],i=arguments[1],e=arguments[2]):2===arguments.length?(Array.isArray(arguments[0])?o=arguments[0]:i=arguments[0],"function"==typeof arguments[1]?e=arguments[1]:i=arguments[1]):1===arguments.length&&("function"==typeof arguments[0]?e=arguments[0]:Array.isArray(arguments[0])?o=arguments[0]:i=arguments[0]),null==i&&(i={}),a=new r(i),o&&t.nextTick(function(){var e,t,n;for(t=0,n=o.length;t<n;t++)e=o[t],a.write(e);return a.end()}),e&&(n=[],a.on("readable",function(){var e,t;for(t=[];e=a.read();)t.push(n.push(e));return t}),a.on("error",function(t){return e(t)}),a.on("end",function(){return e(null,n.join(""))})),a},r=function(e={}){var t,n,o,a,u,s,c,l,f,d,p,h,m,v,y,g,_,b;for(g in _={},e)b=e[g],_[g]=b;switch(_.objectMode=!0,i.Transform.call(this,_),this.options=_,null==(t=this.options).delimiter&&(t.delimiter=","),null==(n=this.options).quote&&(n.quote='"'),null==(l=this.options).quoted&&(l.quoted=!1),null==(f=this.options).quotedEmpty&&(f.quotedEmpty=void 0),null==(d=this.options).quotedString&&(d.quotedString=!1),null==(p=this.options).eof&&(p.eof=!0),null==(h=this.options).escape&&(h.escape='"'),null==(m=this.options).header&&(m.header=!1),this.options.columns=r.normalize_columns(this.options.columns),null==(v=this.options).formatters&&(v.formatters={}),this.options.formatters.bool&&(this.options.formatters.boolean=this.options.formatters.bool),null==(y=this.options.formatters).string&&(y.string=function(e){return e}),null==(o=this.options.formatters).date&&(o.date=function(e){return""+e.getTime()}),null==(a=this.options.formatters).boolean&&(a.boolean=function(e){return e?"1":""}),null==(u=this.options.formatters).number&&(u.number=function(e){return""+e}),null==(s=this.options.formatters).object&&(s.object=function(e){return JSON.stringify(e)}),null==(c=this.options).rowDelimiter&&(c.rowDelimiter="\n"),null==this.countWriten&&(this.countWriten=0),this.options.rowDelimiter){case"auto":this.options.rowDelimiter=null;break;case"unix":this.options.rowDelimiter="\n";break;case"mac":this.options.rowDelimiter="\r";break;case"windows":this.options.rowDelimiter="\r\n";break;case"ascii":this.options.rowDelimiter="";break;case"unicode":this.options.rowDelimiter="\u2028"}return this},a.inherits(r,i.Transform),e.exports.Stringifier=r,r.prototype._transform=function(e,t,n){var o,i,a;if(null!=e){if(!(a="object"!=typeof e)){0!==this.countWriten||Array.isArray(e)||null==(o=this.options).columns&&(o.columns=r.normalize_columns(Object.keys(e)));try{this.emit("record",e,this.countWriten)}catch(e){return i=e,this.emit("error",i)}if(this.options.eof){if(null==(e=this.stringify(e)))return;e+=this.options.rowDelimiter}else{if(null==(e=this.stringify(e)))return;(this.options.header||this.countWriten)&&(e=this.options.rowDelimiter+e)}}return"number"==typeof e&&(e=`${e}`),0===this.countWriten&&this.headers(),a||this.countWriten++,this.push(e),n()}},r.prototype._flush=function(e){return 0===this.countWriten&&this.headers(),e()},r.prototype.stringify=function(e){var t,n,r,i,a,u,s,c,l,f,d,p,h,m,v,y,g,_,b,w,x,E;if("object"!=typeof e)return e;if(r=this.options.columns,c=this.options.delimiter,y=this.options.quote,f=this.options.escape,Array.isArray(e))r&&e.splice(r.length);else{if(t=[],r)for(p=h=0,g=r.length;0<=g?h<g:h>g;p=0<=g?++h:--h)E=o(e,r[p].key),t[p]=void 0===E||null===E?"":E;else for(n in e)t.push(e[n]);e=t,t=null}if(Array.isArray(e)){for(v="",p=m=0,_=e.length;0<=_?m<_:m>_;p=0<=_?++m:--m){x=typeof(d=e[p]);try{"string"===x?d=this.options.formatters.string(d):"number"===x?d=this.options.formatters.number(d):"boolean"===x?d=this.options.formatters.boolean(d):d instanceof Date?d=this.options.formatters.date(d):"object"===x&&null!==d&&(d=this.options.formatters.object(d))}catch(e){return l=e,void this.emit("error",l)}if(d){if("string"!=typeof d)return this.emit("error",Error("Formatter must return a string, null or undefined")),null;s=d.indexOf(c)>=0,a=""!==y&&d.indexOf(y)>=0,i=d.indexOf(f)>=0&&f!==y,u=d.indexOf(this.options.rowDelimiter)>=0,(w=a||s||u||this.options.quoted||this.options.quotedString&&"string"==typeof e[p])&&i&&(b="\\"===f?new RegExp(f+f,"g"):new RegExp(f,"g"),d=d.replace(b,f+f)),a&&(b=new RegExp(y,"g"),d=d.replace(b,f+y)),w&&(d=y+d+y),v+=d}else(this.options.quotedEmpty||null==this.options.quotedEmpty&&""===e[p]&&this.options.quotedString)&&(v+=y+y);p!==e.length-1&&(v+=c)}e=v}return e},r.prototype.headers=function(){var e;if(this.options.header&&this.options.columns)return e=this.options.columns.map(function(e){return e.header}),e=this.options.eof?this.stringify(e)+this.options.rowDelimiter:this.stringify(e),this.push(e)},r.normalize_columns=function(e){var t,n,r;if(null==e)return null;if(null!=e){if("object"!=typeof e)throw Error('Invalid option "columns": expect an array or an object');e=Array.isArray(e)?function(){var n,r,o;for(o=[],n=0,r=e.length;n<r;n++)if("string"==typeof(t=e[n]))o.push({key:t,header:t});else{if("object"!=typeof t||null==t||Array.isArray(t))throw Error("Invalid column definition: expect a string or an object");if(!t.key)throw Error('Invalid column definition: property "key" is required');null==t.header&&(t.header=t.key),o.push(t)}return o}():function(){var t;for(n in t=[],e)r=e[n],t.push({key:n,header:r});return t}()}return e}}).call(t,n(40))},function(e,t,n){e.exports=o;var r=n(109).EventEmitter;function o(){r.call(this)}n(32)(o,r),o.Readable=n(110),o.Writable=n(489),o.Duplex=n(490),o.Transform=n(491),o.PassThrough=n(492),o.Stream=o,o.prototype.pipe=function(e,t){var n=this;function o(t){e.writable&&!1===e.write(t)&&n.pause&&n.pause()}function i(){n.readable&&n.resume&&n.resume()}n.on("data",o),e.on("drain",i),e._isStdio||t&&!1===t.end||(n.on("end",u),n.on("close",s));var a=!1;function u(){a||(a=!0,e.end())}function s(){a||(a=!0,"function"==typeof e.destroy&&e.destroy())}function c(e){if(l(),0===r.listenerCount(this,"error"))throw e}function l(){n.removeListener("data",o),e.removeListener("drain",i),n.removeListener("end",u),n.removeListener("close",s),n.removeListener("error",c),e.removeListener("error",c),n.removeListener("end",l),n.removeListener("close",l),e.removeListener("close",l)}return n.on("error",c),e.on("error",c),n.on("end",l),n.on("close",l),e.on("close",l),e.emit("pipe",n),e}},function(e,t,n){"use strict";t.byteLength=function(e){var t=c(e),n=t[0],r=t[1];return 3*(n+r)/4-r},t.toByteArray=function(e){for(var t,n=c(e),r=n[0],a=n[1],u=new i(function(e,t,n){return 3*(t+n)/4-n}(0,r,a)),s=0,l=a>0?r-4:r,f=0;f<l;f+=4)t=o[e.charCodeAt(f)]<<18|o[e.charCodeAt(f+1)]<<12|o[e.charCodeAt(f+2)]<<6|o[e.charCodeAt(f+3)],u[s++]=t>>16&255,u[s++]=t>>8&255,u[s++]=255&t;2===a&&(t=o[e.charCodeAt(f)]<<2|o[e.charCodeAt(f+1)]>>4,u[s++]=255&t);1===a&&(t=o[e.charCodeAt(f)]<<10|o[e.charCodeAt(f+1)]<<4|o[e.charCodeAt(f+2)]>>2,u[s++]=t>>8&255,u[s++]=255&t);return u},t.fromByteArray=function(e){for(var t,n=e.length,o=n%3,i=[],a=0,u=n-o;a<u;a+=16383)i.push(f(e,a,a+16383>u?u:a+16383));1===o?(t=e[n-1],i.push(r[t>>2]+r[t<<4&63]+"==")):2===o&&(t=(e[n-2]<<8)+e[n-1],i.push(r[t>>10]+r[t>>4&63]+r[t<<2&63]+"="));return i.join("")};for(var r=[],o=[],i="undefined"!=typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",u=0,s=a.length;u<s;++u)r[u]=a[u],o[a.charCodeAt(u)]=u;function c(e){var t=e.length;if(t%4>0)throw new Error("Invalid string. Length must be a multiple of 4");var n=e.indexOf("=");return-1===n&&(n=t),[n,n===t?0:4-n%4]}function l(e){return r[e>>18&63]+r[e>>12&63]+r[e>>6&63]+r[63&e]}function f(e,t,n){for(var r,o=[],i=t;i<n;i+=3)r=(e[i]<<16&16711680)+(e[i+1]<<8&65280)+(255&e[i+2]),o.push(l(r));return o.join("")}o["-".charCodeAt(0)]=62,o["_".charCodeAt(0)]=63},function(e,t){t.read=function(e,t,n,r,o){var i,a,u=8*o-r-1,s=(1<<u)-1,c=s>>1,l=-7,f=n?o-1:0,d=n?-1:1,p=e[t+f];for(f+=d,i=p&(1<<-l)-1,p>>=-l,l+=u;l>0;i=256*i+e[t+f],f+=d,l-=8);for(a=i&(1<<-l)-1,i>>=-l,l+=r;l>0;a=256*a+e[t+f],f+=d,l-=8);if(0===i)i=1-c;else{if(i===s)return a?NaN:1/0*(p?-1:1);a+=Math.pow(2,r),i-=c}return(p?-1:1)*a*Math.pow(2,i-r)},t.write=function(e,t,n,r,o,i){var a,u,s,c=8*i-o-1,l=(1<<c)-1,f=l>>1,d=23===o?Math.pow(2,-24)-Math.pow(2,-77):0,p=r?0:i-1,h=r?1:-1,m=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(u=isNaN(t)?1:0,a=l):(a=Math.floor(Math.log(t)/Math.LN2),t*(s=Math.pow(2,-a))<1&&(a--,s*=2),(t+=a+f>=1?d/s:d*Math.pow(2,1-f))*s>=2&&(a++,s/=2),a+f>=l?(u=0,a=l):a+f>=1?(u=(t*s-1)*Math.pow(2,o),a+=f):(u=t*Math.pow(2,f-1)*Math.pow(2,o),a=0));o>=8;e[n+p]=255&u,p+=h,u/=256,o-=8);for(a=a<<o|u,c+=o;c>0;e[n+p]=255&a,p+=h,a/=256,c-=8);e[n+p-h]|=128*m}},function(e,t){},function(e,t,n){"use strict";var r=n(71).Buffer,o=n(484);function i(e,t,n){e.copy(t,n)}e.exports=function(){function e(){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.head=null,this.tail=null,this.length=0}return e.prototype.push=function(e){var t={data:e,next:null};this.length>0?this.tail.next=t:this.head=t,this.tail=t,++this.length},e.prototype.unshift=function(e){var t={data:e,next:this.head};0===this.length&&(this.tail=t),this.head=t,++this.length},e.prototype.shift=function(){if(0!==this.length){var e=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,e}},e.prototype.clear=function(){this.head=this.tail=null,this.length=0},e.prototype.join=function(e){if(0===this.length)return"";for(var t=this.head,n=""+t.data;t=t.next;)n+=e+t.data;return n},e.prototype.concat=function(e){if(0===this.length)return r.alloc(0);if(1===this.length)return this.head.data;for(var t=r.allocUnsafe(e>>>0),n=this.head,o=0;n;)i(n.data,t,o),o+=n.data.length,n=n.next;return t},e}(),o&&o.inspect&&o.inspect.custom&&(e.exports.prototype[o.inspect.custom]=function(){var e=o.inspect({length:this.length});return this.constructor.name+" "+e})},function(e,t){},function(e,t,n){(function(e){var r=void 0!==e&&e||"undefined"!=typeof self&&self||window,o=Function.prototype.apply;function i(e,t){this._id=e,this._clearFn=t}t.setTimeout=function(){return new i(o.call(setTimeout,r,arguments),clearTimeout)},t.setInterval=function(){return new i(o.call(setInterval,r,arguments),clearInterval)},t.clearTimeout=t.clearInterval=function(e){e&&e.close()},i.prototype.unref=i.prototype.ref=function(){},i.prototype.close=function(){this._clearFn.call(r,this._id)},t.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},t.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},t._unrefActive=t.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;t>=0&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},n(486),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(t,n(3))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,o=1,i={},a=!1,u=e.document,s=Object.getPrototypeOf&&Object.getPrototypeOf(e);s=s&&s.setTimeout?s:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick(function(){l(e)})}:function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?function(){var t="setImmediate$"+Math.random()+"$",n=function(n){n.source===e&&"string"==typeof n.data&&0===n.data.indexOf(t)&&l(+n.data.slice(t.length))};e.addEventListener?e.addEventListener("message",n,!1):e.attachEvent("onmessage",n),r=function(n){e.postMessage(t+n,"*")}}():e.MessageChannel?function(){var e=new MessageChannel;e.port1.onmessage=function(e){l(e.data)},r=function(t){e.port2.postMessage(t)}}():u&&"onreadystatechange"in u.createElement("script")?function(){var e=u.documentElement;r=function(t){var n=u.createElement("script");n.onreadystatechange=function(){l(t),n.onreadystatechange=null,e.removeChild(n),n=null},e.appendChild(n)}}():r=function(e){setTimeout(l,0,e)},s.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n<t.length;n++)t[n]=arguments[n+1];var a={callback:e,args:t};return i[o]=a,r(o),o++},s.clearImmediate=c}function c(e){delete i[e]}function l(e){if(a)setTimeout(l,0,e);else{var t=i[e];if(t){a=!0;try{!function(e){var t=e.callback,r=e.args;switch(r.length){case 0:t();break;case 1:t(r[0]);break;case 2:t(r[0],r[1]);break;case 3:t(r[0],r[1],r[2]);break;default:t.apply(n,r)}}(t)}finally{c(e),a=!1}}}}}("undefined"==typeof self?void 0===e?this:e:self)}).call(t,n(3),n(40))},function(e,t,n){(function(t){function n(e){try{if(!t.localStorage)return!1}catch(e){return!1}var n=t.localStorage[e];return null!=n&&"true"===String(n).toLowerCase()}e.exports=function(e,t){if(n("noDeprecation"))return e;var r=!1;return function(){if(!r){if(n("throwDeprecation"))throw new Error(t);n("traceDeprecation")?console.trace(t):console.warn(t),r=!0}return e.apply(this,arguments)}}}).call(t,n(3))},function(e,t,n){"use strict";e.exports=i;var r=n(207),o=n(54);function i(e){if(!(this instanceof i))return new i(e);r.call(this,e)}o.inherits=n(32),o.inherits(i,r),i.prototype._transform=function(e,t,n){n(null,e)}},function(e,t,n){e.exports=n(111)},function(e,t,n){e.exports=n(33)},function(e,t,n){e.exports=n(110).Transform},function(e,t,n){e.exports=n(110).PassThrough},function(e,t,n){(function(e,r){var o=/%[sdj%]/g;t.format=function(e){if(!y(e)){for(var t=[],n=0;n<arguments.length;n++)t.push(u(arguments[n]));return t.join(" ")}n=1;for(var r=arguments,i=r.length,a=String(e).replace(o,function(e){if("%%"===e)return"%";if(n>=i)return e;switch(e){case"%s":return String(r[n++]);case"%d":return Number(r[n++]);case"%j":try{return JSON.stringify(r[n++])}catch(e){return"[Circular]"}default:return e}}),s=r[n];n<i;s=r[++n])m(s)||!b(s)?a+=" "+s:a+=" "+u(s);return a},t.deprecate=function(n,o){if(g(e.process))return function(){return t.deprecate(n,o).apply(this,arguments)};if(!0===r.noDeprecation)return n;var i=!1;return function(){if(!i){if(r.throwDeprecation)throw new Error(o);r.traceDeprecation?console.trace(o):console.error(o),i=!0}return n.apply(this,arguments)}};var i,a={};function u(e,n){var r={seen:[],stylize:c};return arguments.length>=3&&(r.depth=arguments[2]),arguments.length>=4&&(r.colors=arguments[3]),h(n)?r.showHidden=n:n&&t._extend(r,n),g(r.showHidden)&&(r.showHidden=!1),g(r.depth)&&(r.depth=2),g(r.colors)&&(r.colors=!1),g(r.customInspect)&&(r.customInspect=!0),r.colors&&(r.stylize=s),l(r,e,r.depth)}function s(e,t){var n=u.styles[t];return n?"["+u.colors[n][0]+"m"+e+"["+u.colors[n][1]+"m":e}function c(e,t){return e}function l(e,n,r){if(e.customInspect&&n&&E(n.inspect)&&n.inspect!==t.inspect&&(!n.constructor||n.constructor.prototype!==n)){var o=n.inspect(r,e);return y(o)||(o=l(e,o,r)),o}var i=function(e,t){if(g(t))return e.stylize("undefined","undefined");if(y(t)){var n="'"+JSON.stringify(t).replace(/^"|"$/g,"").replace(/'/g,"\\'").replace(/\\"/g,'"')+"'";return e.stylize(n,"string")}if(v(t))return e.stylize(""+t,"number");if(h(t))return e.stylize(""+t,"boolean");if(m(t))return e.stylize("null","null")}(e,n);if(i)return i;var a=Object.keys(n),u=function(e){var t={};return e.forEach(function(e,n){t[e]=!0}),t}(a);if(e.showHidden&&(a=Object.getOwnPropertyNames(n)),x(n)&&(a.indexOf("message")>=0||a.indexOf("description")>=0))return f(n);if(0===a.length){if(E(n)){var s=n.name?": "+n.name:"";return e.stylize("[Function"+s+"]","special")}if(_(n))return e.stylize(RegExp.prototype.toString.call(n),"regexp");if(w(n))return e.stylize(Date.prototype.toString.call(n),"date");if(x(n))return f(n)}var c,b="",O=!1,S=["{","}"];(p(n)&&(O=!0,S=["[","]"]),E(n))&&(b=" [Function"+(n.name?": "+n.name:"")+"]");return _(n)&&(b=" "+RegExp.prototype.toString.call(n)),w(n)&&(b=" "+Date.prototype.toUTCString.call(n)),x(n)&&(b=" "+f(n)),0!==a.length||O&&0!=n.length?r<0?_(n)?e.stylize(RegExp.prototype.toString.call(n),"regexp"):e.stylize("[Object]","special"):(e.seen.push(n),c=O?function(e,t,n,r,o){for(var i=[],a=0,u=t.length;a<u;++a)k(t,String(a))?i.push(d(e,t,n,r,String(a),!0)):i.push("");return o.forEach(function(o){o.match(/^\d+$/)||i.push(d(e,t,n,r,o,!0))}),i}(e,n,r,u,a):a.map(function(t){return d(e,n,r,u,t,O)}),e.seen.pop(),function(e,t,n){if(e.reduce(function(e,t){return 0,t.indexOf("\n")>=0&&0,e+t.replace(/\u001b\[\d\d?m/g,"").length+1},0)>60)return n[0]+(""===t?"":t+"\n ")+" "+e.join(",\n ")+" "+n[1];return n[0]+t+" "+e.join(", ")+" "+n[1]}(c,b,S)):S[0]+b+S[1]}function f(e){return"["+Error.prototype.toString.call(e)+"]"}function d(e,t,n,r,o,i){var a,u,s;if((s=Object.getOwnPropertyDescriptor(t,o)||{value:t[o]}).get?u=s.set?e.stylize("[Getter/Setter]","special"):e.stylize("[Getter]","special"):s.set&&(u=e.stylize("[Setter]","special")),k(r,o)||(a="["+o+"]"),u||(e.seen.indexOf(s.value)<0?(u=m(n)?l(e,s.value,null):l(e,s.value,n-1)).indexOf("\n")>-1&&(u=i?u.split("\n").map(function(e){return" "+e}).join("\n").substr(2):"\n"+u.split("\n").map(function(e){return" "+e}).join("\n")):u=e.stylize("[Circular]","special")),g(a)){if(i&&o.match(/^\d+$/))return u;(a=JSON.stringify(""+o)).match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)?(a=a.substr(1,a.length-2),a=e.stylize(a,"name")):(a=a.replace(/'/g,"\\'").replace(/\\"/g,'"').replace(/(^"|"$)/g,"'"),a=e.stylize(a,"string"))}return a+": "+u}function p(e){return Array.isArray(e)}function h(e){return"boolean"==typeof e}function m(e){return null===e}function v(e){return"number"==typeof e}function y(e){return"string"==typeof e}function g(e){return void 0===e}function _(e){return b(e)&&"[object RegExp]"===O(e)}function b(e){return"object"==typeof e&&null!==e}function w(e){return b(e)&&"[object Date]"===O(e)}function x(e){return b(e)&&("[object Error]"===O(e)||e instanceof Error)}function E(e){return"function"==typeof e}function O(e){return Object.prototype.toString.call(e)}function S(e){return e<10?"0"+e.toString(10):e.toString(10)}t.debuglog=function(e){if(g(i)&&(i=r.env.NODE_DEBUG||""),e=e.toUpperCase(),!a[e])if(new RegExp("\\b"+e+"\\b","i").test(i)){var n=r.pid;a[e]=function(){var r=t.format.apply(t,arguments);console.error("%s %d: %s",e,n,r)}}else a[e]=function(){};return a[e]},t.inspect=u,u.colors={bold:[1,22],italic:[3,23],underline:[4,24],inverse:[7,27],white:[37,39],grey:[90,39],black:[30,39],blue:[34,39],cyan:[36,39],green:[32,39],magenta:[35,39],red:[31,39],yellow:[33,39]},u.styles={special:"cyan",number:"yellow",boolean:"yellow",undefined:"grey",null:"bold",string:"green",date:"magenta",regexp:"red"},t.isArray=p,t.isBoolean=h,t.isNull=m,t.isNullOrUndefined=function(e){return null==e},t.isNumber=v,t.isString=y,t.isSymbol=function(e){return"symbol"==typeof e},t.isUndefined=g,t.isRegExp=_,t.isObject=b,t.isDate=w,t.isError=x,t.isFunction=E,t.isPrimitive=function(e){return null===e||"boolean"==typeof e||"number"==typeof e||"string"==typeof e||"symbol"==typeof e||void 0===e},t.isBuffer=n(494);var T=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];function k(e,t){return Object.prototype.hasOwnProperty.call(e,t)}t.log=function(){console.log("%s - %s",function(){var e=new Date,t=[S(e.getHours()),S(e.getMinutes()),S(e.getSeconds())].join(":");return[e.getDate(),T[e.getMonth()],t].join(" ")}(),t.format.apply(t,arguments))},t.inherits=n(32),t._extend=function(e,t){if(!t||!b(t))return e;for(var n=Object.keys(t),r=n.length;r--;)e[n[r]]=t[n[r]];return e}}).call(t,n(3),n(40))},function(e,t){e.exports=function(e){return e&&"object"==typeof e&&"function"==typeof e.copy&&"function"==typeof e.fill&&"function"==typeof e.readUInt8}},function(e,t,n){(function(t){var n="Expected a function",r="__lodash_hash_undefined__",o=1/0,i="[object Function]",a="[object GeneratorFunction]",u="[object Symbol]",s=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,c=/^\w*$/,l=/^\./,f=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,d=/\\(\\)?/g,p=/^\[object .+?Constructor\]$/,h="object"==typeof t&&t&&t.Object===Object&&t,m="object"==typeof self&&self&&self.Object===Object&&self,v=h||m||Function("return this")();var y=Array.prototype,g=Function.prototype,_=Object.prototype,b=v["__core-js_shared__"],w=function(){var e=/[^.]+$/.exec(b&&b.keys&&b.keys.IE_PROTO||"");return e?"Symbol(src)_1."+e:""}(),x=g.toString,E=_.hasOwnProperty,O=_.toString,S=RegExp("^"+x.call(E).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),T=v.Symbol,k=y.splice,R=F(v,"Map"),j=F(Object,"create"),P=T?T.prototype:void 0,C=P?P.toString:void 0;function M(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function I(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function A(e){var t=-1,n=e?e.length:0;for(this.clear();++t<n;){var r=e[t];this.set(r[0],r[1])}}function D(e,t){for(var n=e.length;n--;)if($(e[n][0],t))return n;return-1}function N(e,t){for(var n=0,r=(t=function(e,t){if(q(e))return!1;var n=typeof e;if("number"==n||"symbol"==n||"boolean"==n||null==e||z(e))return!0;return c.test(e)||!s.test(e)||null!=t&&e in Object(t)}(t,e)?[t]:function(e){return q(e)?e:B(e)}(t)).length;null!=e&&n<r;)e=e[W(t[n++])];return n&&n==r?e:void 0}function L(e){return!(!H(e)||function(e){return!!w&&w in e}(e))&&(function(e){var t=H(e)?O.call(e):"";return t==i||t==a}(e)||function(e){var t=!1;if(null!=e&&"function"!=typeof e.toString)try{t=!!(e+"")}catch(e){}return t}(e)?S:p).test(function(e){if(null!=e){try{return x.call(e)}catch(e){}try{return e+""}catch(e){}}return""}(e))}function U(e,t){var n=e.__data__;return function(e){var t=typeof e;return"string"==t||"number"==t||"symbol"==t||"boolean"==t?"__proto__"!==e:null===e}(t)?n["string"==typeof t?"string":"hash"]:n.map}function F(e,t){var n=function(e,t){return null==e?void 0:e[t]}(e,t);return L(n)?n:void 0}M.prototype.clear=function(){this.__data__=j?j(null):{}},M.prototype.delete=function(e){return this.has(e)&&delete this.__data__[e]},M.prototype.get=function(e){var t=this.__data__;if(j){var n=t[e];return n===r?void 0:n}return E.call(t,e)?t[e]:void 0},M.prototype.has=function(e){var t=this.__data__;return j?void 0!==t[e]:E.call(t,e)},M.prototype.set=function(e,t){return this.__data__[e]=j&&void 0===t?r:t,this},I.prototype.clear=function(){this.__data__=[]},I.prototype.delete=function(e){var t=this.__data__,n=D(t,e);return!(n<0||(n==t.length-1?t.pop():k.call(t,n,1),0))},I.prototype.get=function(e){var t=this.__data__,n=D(t,e);return n<0?void 0:t[n][1]},I.prototype.has=function(e){return D(this.__data__,e)>-1},I.prototype.set=function(e,t){var n=this.__data__,r=D(n,e);return r<0?n.push([e,t]):n[r][1]=t,this},A.prototype.clear=function(){this.__data__={hash:new M,map:new(R||I),string:new M}},A.prototype.delete=function(e){return U(this,e).delete(e)},A.prototype.get=function(e){return U(this,e).get(e)},A.prototype.has=function(e){return U(this,e).has(e)},A.prototype.set=function(e,t){return U(this,e).set(e,t),this};var B=Y(function(e){e=function(e){return null==e?"":function(e){if("string"==typeof e)return e;if(z(e))return C?C.call(e):"";var t=e+"";return"0"==t&&1/e==-o?"-0":t}(e)}(e);var t=[];return l.test(e)&&t.push(""),e.replace(f,function(e,n,r,o){t.push(r?o.replace(d,"$1"):n||e)}),t});function W(e){if("string"==typeof e||z(e))return e;var t=e+"";return"0"==t&&1/e==-o?"-0":t}function Y(e,t){if("function"!=typeof e||t&&"function"!=typeof t)throw new TypeError(n);var r=function(){var n=arguments,o=t?t.apply(this,n):n[0],i=r.cache;if(i.has(o))return i.get(o);var a=e.apply(this,n);return r.cache=i.set(o,a),a};return r.cache=new(Y.Cache||A),r}function $(e,t){return e===t||e!=e&&t!=t}Y.Cache=A;var q=Array.isArray;function H(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function z(e){return"symbol"==typeof e||function(e){return!!e&&"object"==typeof e}(e)&&O.call(e)==u}e.exports=function(e,t,n){var r=null==e?void 0:N(e,t);return void 0===r?n:r}}).call(t,n(3))},function(e,t,n){(function(t){var n=function(){try{return Function("return this")()||(0,eval)("this")}catch(e){return"object"==typeof window&&window.window===window?window:"object"==typeof self&&self.self===self?self:"object"==typeof t&&t.global===t?t:this}}();function r(e,t,n){var r=new XMLHttpRequest;r.open("GET",e),r.responseType="blob",r.onload=function(){a(r.response,t,n)},r.onerror=function(){console.error("could not download file")},r.send()}function o(e){var t=new XMLHttpRequest;return t.open("HEAD",e,!1),t.send(),t.status>=200&&t.status<=299}function i(e){try{e.dispatchEvent(new MouseEvent("click"))}catch(n){var t=document.createEvent("MouseEvents");t.initMouseEvent("click",!0,!0,window,0,0,0,80,20,!1,!1,!1,!1,0,null),e.dispatchEvent(t)}}var a=n.saveAs||"object"!=typeof window||window!==n?function(){}:"download"in HTMLAnchorElement.prototype?function(e,t,a){var u=n.URL||n.webkitURL,s=document.createElement("a");t=t||e.name||"download",s.download=t,s.rel="noopener","string"==typeof e?(s.href=e,s.origin!==location.origin?o(s.href)?r(e,t,a):i(s,s.target="_blank"):i(s)):(s.href=u.createObjectURL(e),setTimeout(function(){u.revokeObjectURL(s.href)},4e4),setTimeout(function(){i(s)},0))}:"msSaveOrOpenBlob"in navigator?function(e,t,n){if(t=t||e.name||"download","string"==typeof e)if(o(e))r(e,t,n);else{var i=document.createElement("a");i.href=e,i.target="_blank",setTimeout(function(){clikc(i)})}else navigator.msSaveOrOpenBlob(function(e,t){return void 0===t?t={autoBom:!1}:"object"!=typeof t&&(console.warn("Depricated: Expected third argument to be a object"),t={autoBom:!t}),t.autoBom&&/^\s*(?:text\/\S*|application\/xml|\S*\/\S*\+xml)\s*;.*charset\s*=\s*utf-8/i.test(e.type)?new Blob([String.fromCharCode(65279),e],{type:e.type}):e}(e,n),t)}:function(e,t,o,i){if((i=i||open("","_blank"))&&(i.document.title=i.document.body.innerText="downloading..."),"string"==typeof e)return r(e,t,o);var a="application/octet-stream"===e.type,u=/constructor/i.test(n.HTMLElement)||n.safari,s=/CriOS\/[\d]+/.test(navigator.userAgent);if((s||a&&u)&&"object"==typeof FileReader){var c=new FileReader;c.onloadend=function(){var e=c.result;e=s?e:e.replace(/^data:[^;]*;/,"data:attachment/file;"),i?i.location.href=e:location=e,i=null},c.readAsDataURL(e)}else{var l=n.URL||n.webkitURL,f=l.createObjectURL(e);i?i.location=f:location.href=f,i=null,setTimeout(function(){l.revokeObjectURL(f)},4e4)}};e.exports=n.saveAs=a.saveAs=a}).call(t,n(3))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(17),p=n(11);function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){var e,n,r,a;(0,i.default)(this,t);for(var s=arguments.length,c=Array(s),l=0;l<s;l++)c[l]=arguments[l];return n=r=(0,u.default)(this,(e=t.__proto__||(0,o.default)(t)).call.apply(e,[this].concat(c))),r.state={playing:!1},a=n,(0,u.default)(r,a)}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this,t=this.props,n=t.app,r=t.data,o=t.size,i=this.state.playing,a=r.metadata.sugarcube.fp.replace("/var/www/files/","https://cube.syrianarchive.org/"),u=n.mediainfo,s=u.sha256,c=u.verified,f=n.mediainfo.metadata.mediainfo.video,p=n.keyframe.metadata.keyframe.basic[0];return l.default.createElement("div",{className:"video"},i?l.default.createElement("video",{src:a,autoPlay:!0,controls:!0,muted:!0}):l.default.createElement("div",{className:"bg",style:{width:d.widths[o||"sm"],height:d.widths[o||"sm"]/f.aspect_ratio,backgroundImage:"url("+(0,d.imageUrl)(c,s,p,o)+")"},onClick:function(){return e.setState({playing:!0})}},l.default.createElement("div",{className:"play"})))}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"sugarcube"}})(function(e){return l.default.createElement(p.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=c(n(78)),o=c(n(499)),i=c(n(160)),a=c(n(28));t.TableObject=f,t.TableArray=d,t.TableTuples=function(e){var t=e.tag,n=e.list;return n?u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableTuples "+t},u.default.createElement("tbody",null,n.map(function(e,n){var r=(0,o.default)(e),i=r[0],a=r.slice(1);return u.default.createElement("tr",{key:t+"_"+n},u.default.createElement("th",null,(0,s.formatName)(i)),a.map(function(e,t){return u.default.createElement(h,{key:n+"_"+t,value:e})}))})))):null},t.TableRow=p,t.TableCell=h;var u=c(n(1)),s=n(17);function c(e){return e&&e.__esModule?e:{default:e}}var l="__HR__";function f(e){var t=e.tag,n=e.object,r=e.order,o=e.summary;if(!n)return null;if("loading"===n)return u.default.createElement("div",{className:"tableObject loading"},t,": Loading");if(n.err)return u.default.createElement("div",{className:"tableObject error"},t," Error: ",n.err);var s=(0,a.default)(n);if(r){var c=s.reduce(function(e,t){var n=r.indexOf(t);return-1!==n?e.order.push([n,t]):e.alpha.push(t),e},{order:[],alpha:[]});s=c.order.sort(function(e,t){return e[0]-t[0]}).map(function(e){var t=(0,i.default)(e,2);t[0];return t[1]}),o||(s=s.concat(c.alpha.sort()))}else s=s.sort();return u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableObject "+t},u.default.createElement("tbody",null,s.map(function(e,t){return u.default.createElement(p,{key:e+"_"+t,name:e,value:n[e]})}))))}function d(e){var t=e.tag,n=e.list;return n?u.default.createElement("div",null,t&&u.default.createElement("h3",null,t),u.default.createElement("table",{className:"tableArray "+t},u.default.createElement("tbody",null,n.map(function(e,n){return u.default.createElement("tr",{key:t+"_"+n},u.default.createElement(h,{value:e}))})))):null}function p(e){var t=e.name,n=e.value;return t===l?u.default.createElement("tr",null,u.default.createElement("th",{className:"tr"},u.default.createElement("hr",null))):u.default.createElement("tr",null,u.default.createElement("th",null,(0,s.formatName)(t)),u.default.createElement(h,{name:t,value:n}))}function h(e){var t=e.value;return t&&"object"===(void 0===t?"undefined":(0,r.default)(t))&&(t=t._raw?t.value:t.length?u.default.createElement(d,{nested:!0,tag:"",list:t}):u.default.createElement(f,{nested:!0,tag:"",object:t})),u.default.createElement("td",null,t)}},function(e,t,n){"use strict";t.__esModule=!0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(154));t.default=function(e){return Array.isArray(e)?e:(0,r.default)(e)}},function(e,t,n){var r=n(501);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,'/* css boilerplate */\n\n* { box-sizing: border-box; }\nhtml,body {\n margin: 0; padding: 0;\n width: 100%; height: 100%;\n}\nbody {\n font-family: Helvetica, sans-serif;\n font-weight: 300;\n}\n\nh1 {\n\n}\nh2 {\n font-weight: normal;\n margin: 10px 0;\n padding: 3px;\n font-size: 24px;\n}\nh3 {\n font-weight: normal;\n margin: 10px 0 0 0;\n padding: 3px;\n font-size: 18px;\n}\nh4 {\n font-weight: 300;\n font-size: 12px;\n letter-spacing: 2px;\n color: #888;\n text-transform: uppercase;\n margin: 5px 10px;\n margin-top: 20px;\n}\nh4:first-child {\n margin-top: 10px;\n}\n\n.app {\n width: 100%;\n height: 100%;\n display: flex;\n flex-direction: row;\n align-items: flex-start;\n justify-content: flex-start;\n}\n\n/* header stuff */\n\nheader {\n width: 100%;\n background: #11f;\n color: white;\n align-items: stretch;\n display: flex;\n flex-wrap: wrap;\n justify-content: space-between;\n z-index: 3;\n}\nheader > section {\n justify-content: flex-start;\n align-items: center;\n display: flex;\n flex: 1 0;\n font-weight: bold;\n}\nheader > section:last-of-type {\n justify-content: flex-end;\n}\n\n/* sidebar / body columns */\n\n.sidebar {\n display: flex;\n flex-direction: column;\n justify-content: flex-start;\n align-items: flex-start;\n height: 100%;\n float: left;\n width: 200px;\n flex: 0 0 200px;\n padding: 10px;\n margin-right: 10px;\n}\n.sidebar a {\n display: block;\n padding: 10px 10px;\n text-decoration: none;\n color: #444;\n}\n.sidebar a.active {\n font-weight: bold;\n color: #222;\n}\n.body {\n display: flex;\n flex-direction: column;\n align-items: flex-start;\n justify-content: flex-start;\n flex-grow: 1;\n}\n.body > div {\n padding-bottom: 40px;\n}\n\n/* buttons / forms */\n\n.btn:focus, .btn:hover {\n background: #f1f1fc;\n color: #4b48d6 !important;\n text-decoration: none;\n}\n.btn {\n -webkit-appearance: none;\n -moz-appearance: none;\n appearance: none;\n background: #fff;\n border: .05rem solid;\n border-radius: 2px;\n margin-right: 5px;\n color: #11f;\n cursor: pointer;\n display: inline-block;\n font-size: .8rem;\n height: 1.8rem;\n line-height: 1rem;\n outline: none;\n padding: .35rem .4rem;\n text-align: center;\n text-decoration: none;\n -webkit-transition: all .2s ease;\n -o-transition: all .2s ease;\n transition: all .2s ease;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n vertical-align: middle;\n white-space: nowrap;\n}\n.btn.reset,\n.btn.panic {\n color: #b00;\n}\n.btn.btn-primary {\n background: #11f;\n border-color: #11f;\n color: white;\n}\n.btn[disabled] {\n color: #bbb !important;\n border-color: #bbb !important;\n background: white !important;\n cursor: default;\n}\n.btn.btn-primary:focus,\n.btn.btn-primary:hover {\n background: #0808ee;\n color: white !important;\n}\n.row .btn {\n margin: 0 5px 0 0;\n}\ninput[type=text] {\n border: 1px solid #888;\n padding: 4px;\n font-size: 15px;\n}\n\n\n/* tables on metadata pages */\n\ntable {\n border: 0;\n margin: 0;\n padding: 0;\n border-spacing: 0;\n}\n.tableObject td,\n.tableObject th {\n padding: 3px;\n vertical-align: top;\n}\n.tableObject hr {\n width: 100%;\n color: transparent;\n border: 0;\n border-bottom: 1px solid #bbb;\n align: left;\n margin: 3px 0;\n padding: 0;\n}\n.tableObject th,\n.tableTuples th {\n min-width: 145px;\n text-align: left;\n text-transform: capitalize;\n padding: 3px;\n padding-right: 10px;\n font-weight: 300;\n color: #333;\n}\n.tableTuples td {\n text-align: right;\n padding: 3px;\n}\n.tableObject td {\n font-weight: normal;\n color: #000;\n}\n.tableObject .tableObject {\n border: 1px solid #ddd;\n}\n.tableArray {\n border: 1px solid #ddd;\n border-spacing: 0;\n}\n.tableArray td {\n border-bottom: 1px solid #ddd;\n}\n.gray {\n font-size: 12px;\n color: #888;\n display: block;\n}\n.sha256.heading {\n margin: 20px 0 0px;\n}\n.gray span {\n padding-right: 5px;\n}\n.gray {\n margin-bottom: 10px;\n}\n.gray a {\n color: #666;\n}\n\n.verified {\n color: #080;\n font-weight: bold;\n}\n.unverified {\n color: #f00;\n font-weight: 300;\n}\n\n.loading, .error {\n font-weight: normal;\n margin: 10px 0;\n padding: 3px;\n font-size: 24px;\n}\n\n.title {\n text-transform: capitalize;\n}\n.rect {\n position: absolute;\n}\n.rect { border: 1px solid rgba(0,0,255); background-color: rgba(0,0,255,0.1); }\n\n/* videos / video preloader */\n\nvideo {\n max-width: 640px;\n margin: 10px 0;\n}\n.video {\n margin: 0 0 10px 0;\n}\n.video .bg {\n cursor: pointer;\n position: relative;\n background-size: cover;\n}\n.video .play {\n position: absolute;\n top: 50%;\n left: 50%;\n transform: translate3d(-50%, -50%, 0);\n width: 20%;\n height: 20%;\n background-image: url(/search/static/img/play.png);\n background-position: center center;\n background-size: contain;\n background-repeat: no-repeat;\n}\n.desktop .video .play:hover {\n -webkit-filter: invert(60%) sepia(100%) saturate(500%) hue-rotate(160deg);\n}\n\n/* spectre.css loader */\n\n.loaderWrapper {\n display: inline-block;\n position: relative;\n width: .8rem;\n height: .8rem;\n padding: 10px;\n}\n.loader {\n color: transparent !important;\n min-height: .8rem;\n pointer-events: none;\n position: relative;\n}\n\n.loader::after {\n animation: loader 500ms infinite linear;\n border: .1rem solid #5755d9;\n border-radius: 50%;\n border-right-color: transparent;\n border-top-color: transparent;\n content: "";\n display: block;\n height: .8rem;\n left: 50%;\n margin-left: -.4rem;\n margin-top: -.4rem;\n position: absolute;\n top: 50%;\n width: .8rem;\n z-index: 1;\n}\n\n.loader.loader-lg {\n min-height: 2rem;\n}\n\n.loader.loader-lg::after {\n height: 1.6rem;\n margin-left: -.8rem;\n margin-top: -.8rem;\n width: 1.6rem;\n}\n\n@keyframes loader {\n 0% {\n transform: rotate(0deg);\n }\n 100% {\n transform: rotate(360deg);\n }\n}',""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(15),d=n(2),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(112));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){var e=this.props.match.params.hash;this.fetch(e)}},{key:"componentDidUpdate",value:function(e){var t=this.props.match.params.hash,n=e.match.params.hash;t&&t!==n&&this.fetch(t)}},{key:"fetch",value:function(e){this.props.actions.setHash(e),this.props.actions.fetchMediaRecord(e),this.props.actions.fetchMetadata(e)}},{key:"render",value:function(){return l.default.createElement("span",{className:"gray sha256 heading"},"sha256: ",this.props.hash)}}]),t}(c.Component);t.default=(0,d.connect)(function(e){return{hash:e.metadata.hash}},function(e){return{actions:(0,f.bindActionCreators)((0,r.default)({},p),e)}})(m)},function(e,t,n){var r,o,i;!function(n,a){o=[t,e],void 0===(i="function"==typeof(r=a)?r.apply(t,o):r)||(e.exports=i)}(0,function(e,t){"use strict";var n={timeout:5e3,jsonpCallback:"callback",jsonpCallbackFunction:null};function r(e){try{delete window[e]}catch(t){window[e]=void 0}}function o(e){var t=document.getElementById(e);t&&document.getElementsByTagName("head")[0].removeChild(t)}t.exports=function(e){var t=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],i=e,a=t.timeout||n.timeout,u=t.jsonpCallback||n.jsonpCallback,s=void 0;return new Promise(function(n,c){var l=t.jsonpCallbackFunction||"jsonp_"+Date.now()+"_"+Math.ceil(1e5*Math.random()),f=u+"_"+l;window[l]=function(e){n({ok:!0,json:function(){return Promise.resolve(e)}}),s&&clearTimeout(s),o(f),r(l)},i+=-1===i.indexOf("?")?"?":"&";var d=document.createElement("script");d.setAttribute("src",""+i+u+"="+l),t.charset&&d.setAttribute("charset",t.charset),d.id=f,document.getElementsByTagName("head")[0].appendChild(d),s=setTimeout(function(){c(new Error("JSONP request to "+e+" timed out")),r(l),o(f),window[l]=function(){r(l)}},a),d.onerror=function(){c(new Error("JSONP request to "+e+" failed")),r(l),o(f),s&&clearTimeout(s)}})}})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=t.metadata.mediainfo,o=r.audio,i=r.video,a=[];return i&&a.push(l.default.createElement(d.TableObject,{key:"video",tag:"mediaInfo: video",object:i,order:["width","height","encoded_date","tagged_date","frame_count","frame_rate","aspect_ratio","duration"],summary:n})),o&&a.push(l.default.createElement(d.TableObject,{key:"audio",tag:"mediaInfo: audio",object:o,order:["codec","encoded_date"],summary:n})),l.default.createElement("div",null,a||l.default.createElement("div",null,"No media info found"))}}]),t}(c.Component);t.default=(0,f.connect)(function(e){return{tag:"mediainfo"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=d(n(5)),o=d(n(6)),i=d(n(7)),a=d(n(8)),u=d(n(9)),s=n(1),c=d(s),l=n(2),f=n(11);function d(e){return e&&e.__esModule?e:{default:e}}var p=function(e){function t(){return(0,o.default)(this,t),(0,a.default)(this,(t.__proto__||(0,r.default)(t)).apply(this,arguments))}return(0,u.default)(t,e),(0,i.default)(t,[{key:"render",value:function(){return c.default.createElement(f.TableObject,{tag:"mediaRecord",object:this.props.mediaRecord})}}]),t}(s.Component);t.default=(0,l.connect)(function(e){return{mediaRecord:e.metadata.mediaRecord}})(p)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",null,r.default.createElement(o.Sugarcube,{summary:!0}),r.default.createElement(o.MediaRecord,null),r.default.createElement(o.MediaInfo,{summary:!0}),r.default.createElement(o.Places365,{summary:!0}),r.default.createElement(o.Coco,{summary:!0}))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1)),o=n(208)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(17),p=n(11);function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.list,r=t.sha256,o=t.verified,i=t.metadata.keyframe,a=(n?[n]:["dense","basic","expanded"]).map(function(e){return l.default.createElement("div",{key:e},l.default.createElement("h3",null,e),l.default.createElement("ul",{className:"meta"},l.default.createElement("li",null,(0,d.courtesyS)(i[e].length,"frame"))),l.default.createElement("div",{className:"thumbnails"},i[e].map(function(e){return l.default.createElement(p.Keyframe,{key:e,sha256:r,verified:o,frame:e,size:"th",showFrame:!0,showTimestamp:!0})})))});return l.default.createElement("div",{className:"keyframeLists"},a)}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"keyframe"}})(function(e){return l.default.createElement(p.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=_(n(4)),o=_(n(28)),i=_(n(5)),a=_(n(6)),u=_(n(7)),s=_(n(8)),c=_(n(9)),l=n(1),f=_(l),d=n(16),p=n(2),h=n(17),m=n(11),v=g(n(21)),y=g(n(113));function g(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function _(e){return e&&e.__esModule?e:{default:e}}var b=function(e){function t(){return(0,a.default)(this,t),(0,s.default)(this,(t.__proto__||(0,i.default)(t)).apply(this,arguments))}return(0,c.default)(t,e),(0,u.default)(t,[{key:"render",value:function(){var e=this.props,t=e.app,n=e.data,r=e.match,i=parseInt(r.params.frame,10),a=n.sha256,u=n.verified,s=t.mediainfo.metadata.mediainfo.video,c=s.width,l=s.height,p=s.aspect_ratio,g=t.keyframe.metadata.keyframe,_={places365:t.places365.metadata.places365[i],coco:t.coco.metadata.coco[i]},b=(0,o.default)(t.coco.metadata.coco).map(function(e){return parseInt(e,10)}).sort(function(e,t){return e-t}),w=b.length,x=b.indexOf(i),E=(x-1+w)%w,O=(x+1)%w,S=["dense","basic","expanded"].map(function(e){return e in g&&e}).filter(function(e){return!!e}).join(", "),T=["th","sm","md","lg"].map(function(e){return f.default.createElement("span",{key:e},f.default.createElement("a",{href:(0,h.imageUrl)(u,a,i,e),target:"_blank",rel:"noopener noreferrer"},"[",e,"]")," ")});return f.default.createElement("div",{className:"keyframeSummary"},f.default.createElement("h2",null,"Frame #",i),f.default.createElement("ul",{className:"meta"},f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.keyframeUri)(a,b[E])},"← #",b[E])),f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.metadataUri)(a,"keyframe")},"Index")),f.default.createElement("li",null,f.default.createElement(d.Link,{to:(0,h.keyframeUri)(a,b[O])},"#",b[O]," →"))),f.default.createElement(m.Keyframe,{sha256:a,frame:i,verified:u,size:"md",to:(0,h.imageUrl)(u,a,i,"lg"),aspectRatio:p,detectionList:[{labels:y.coco,detections:_.coco}]}),f.default.createElement(d.Link,{to:v.publicUrl.searchByVerifiedFrame(u,a,i),className:"btn"},"Search"),f.default.createElement(m.TableTuples,{tag:"Metadata",list:[["Width",c],["Height",l],["Keyframe sets",S],["Sizes",{_raw:!0,value:T}]]}),f.default.createElement(m.DetectionList,{tag:"Places365",detections:_.places365,labels:y.places365,showEmpty:!0}),f.default.createElement(m.DetectionList,{tag:"Coco",detections:_.coco,labels:y.coco,showEmpty:!0}))}}]),t}(l.Component);t.default=(0,p.connect)(function(){return{tag:"keyframe"}})(function(e){return f.default.createElement(m.Gate,(0,r.default)({View:b},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default="person\nbicycle\ncar\nmotorbike\naeroplane\nbus\ntrain\ntruck\nboat\ntraffic light\nfire hydrant\nstop sign\nparking meter\nbench\nbird\ncat\ndog\nhorse\nsheep\ncow\nelephant\nbear\nzebra\ngiraffe\nbackpack\numbrella\nhandbag\ntie\nsuitcase\nfrisbee\nskis\nsnowboard\nsports ball\nkite\nbaseball bat\nbaseball glove\nskateboard\nsurfboard\ntennis racket\nbottle\nwine glass\ncup\nfork\nknife\nspoon\nbowl\nbanana\napple\nsandwich\norange\nbroccoli\ncarrot\nhot dog\npizza\ndonut\ncake\nchair\nsofa\npottedplant\nbed\ndiningtable\ntoilet\ntvmonitor\nlaptop\nmouse\nremote\nkeyboard\ncell phone\nmicrowave\noven\ntoaster\nsink\nrefrigerator\nbook\nclock\nvase\nscissors\nteddy bear\nhair drier\ntoothbrush".split("\n")},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default="airfield\nairplane_cabin\nairport_terminal\nalcove\nalley\namphitheater\namusement_arcade\namusement_park\napartment_building/outdoor\naquarium\naqueduct\narcade\narch\narchaelogical_excavation\narchive\narena/hockey\narena/performance\narena/rodeo\narmy_base\nart_gallery\nart_school\nart_studio\nartists_loft\nassembly_line\nathletic_field/outdoor\natrium/public\nattic\nauditorium\nauto_factory\nauto_showroom\nbadlands\nbakery/shop\nbalcony/exterior\nbalcony/interior\nball_pit\nballroom\nbamboo_forest\nbank_vault\nbanquet_hall\nbar\nbarn\nbarndoor\nbaseball_field\nbasement\nbasketball_court/indoor\nbathroom\nbazaar/indoor\nbazaar/outdoor\nbeach\nbeach_house\nbeauty_salon\nbedchamber\nbedroom\nbeer_garden\nbeer_hall\nberth\nbiology_laboratory\nboardwalk\nboat_deck\nboathouse\nbookstore\nbooth/indoor\nbotanical_garden\nbow_window/indoor\nbowling_alley\nboxing_ring\nbridge\nbuilding_facade\nbullring\nburial_chamber\nbus_interior\nbus_station/indoor\nbutchers_shop\nbutte\ncabin/outdoor\ncafeteria\ncampsite\ncampus\ncanal/natural\ncanal/urban\ncandy_store\ncanyon\ncar_interior\ncarrousel\ncastle\ncatacomb\ncemetery\nchalet\nchemistry_lab\nchilds_room\nchurch/indoor\nchurch/outdoor\nclassroom\nclean_room\ncliff\ncloset\nclothing_store\ncoast\ncockpit\ncoffee_shop\ncomputer_room\nconference_center\nconference_room\nconstruction_site\ncorn_field\ncorral\ncorridor\ncottage\ncourthouse\ncourtyard\ncreek\ncrevasse\ncrosswalk\ndam\ndelicatessen\ndepartment_store\ndesert/sand\ndesert/vegetation\ndesert_road\ndiner/outdoor\ndining_hall\ndining_room\ndiscotheque\ndoorway/outdoor\ndorm_room\ndowntown\ndressing_room\ndriveway\ndrugstore\nelevator/door\nelevator_lobby\nelevator_shaft\nembassy\nengine_room\nentrance_hall\nescalator/indoor\nexcavation\nfabric_store\nfarm\nfastfood_restaurant\nfield/cultivated\nfield/wild\nfield_road\nfire_escape\nfire_station\nfishpond\nflea_market/indoor\nflorist_shop/indoor\nfood_court\nfootball_field\nforest/broadleaf\nforest_path\nforest_road\nformal_garden\nfountain\ngalley\ngarage/indoor\ngarage/outdoor\ngas_station\ngazebo/exterior\ngeneral_store/indoor\ngeneral_store/outdoor\ngift_shop\nglacier\ngolf_course\ngreenhouse/indoor\ngreenhouse/outdoor\ngrotto\ngymnasium/indoor\nhangar/indoor\nhangar/outdoor\nharbor\nhardware_store\nhayfield\nheliport\nhighway\nhome_office\nhome_theater\nhospital\nhospital_room\nhot_spring\nhotel/outdoor\nhotel_room\nhouse\nhunting_lodge/outdoor\nice_cream_parlor\nice_floe\nice_shelf\nice_skating_rink/indoor\nice_skating_rink/outdoor\niceberg\nigloo\nindustrial_area\ninn/outdoor\nislet\njacuzzi/indoor\njail_cell\njapanese_garden\njewelry_shop\njunkyard\nkasbah\nkennel/outdoor\nkindergarden_classroom\nkitchen\nlagoon\nlake/natural\nlandfill\nlanding_deck\nlaundromat\nlawn\nlecture_room\nlegislative_chamber\nlibrary/indoor\nlibrary/outdoor\nlighthouse\nliving_room\nloading_dock\nlobby\nlock_chamber\nlocker_room\nmansion\nmanufactured_home\nmarket/indoor\nmarket/outdoor\nmarsh\nmartial_arts_gym\nmausoleum\nmedina\nmezzanine\nmoat/water\nmosque/outdoor\nmotel\nmountain\nmountain_path\nmountain_snowy\nmovie_theater/indoor\nmuseum/indoor\nmuseum/outdoor\nmusic_studio\nnatural_history_museum\nnursery\nnursing_home\noast_house\nocean\noffice\noffice_building\noffice_cubicles\noilrig\noperating_room\norchard\norchestra_pit\npagoda\npalace\npantry\npark\nparking_garage/indoor\nparking_garage/outdoor\nparking_lot\npasture\npatio\npavilion\npet_shop\npharmacy\nphone_booth\nphysics_laboratory\npicnic_area\npier\npizzeria\nplayground\nplayroom\nplaza\npond\nporch\npromenade\npub/indoor\nracecourse\nraceway\nraft\nrailroad_track\nrainforest\nreception\nrecreation_room\nrepair_shop\nresidential_neighborhood\nrestaurant\nrestaurant_kitchen\nrestaurant_patio\nrice_paddy\nriver\nrock_arch\nroof_garden\nrope_bridge\nruin\nrunway\nsandbox\nsauna\nschoolhouse\nscience_museum\nserver_room\nshed\nshoe_shop\nshopfront\nshopping_mall/indoor\nshower\nski_resort\nski_slope\nsky\nskyscraper\nslum\nsnowfield\nsoccer_field\nstable\nstadium/baseball\nstadium/football\nstadium/soccer\nstage/indoor\nstage/outdoor\nstaircase\nstorage_room\nstreet\nsubway_station/platform\nsupermarket\nsushi_bar\nswamp\nswimming_hole\nswimming_pool/indoor\nswimming_pool/outdoor\nsynagogue/outdoor\ntelevision_room\ntelevision_studio\ntemple/asia\nthrone_room\nticket_booth\ntopiary_garden\ntower\ntoyshop\ntrain_interior\ntrain_station/platform\ntree_farm\ntree_house\ntrench\ntundra\nunderwater/ocean_deep\nutility_room\nvalley\nvegetable_garden\nveterinarians_office\nviaduct\nvillage\nvineyard\nvolcano\nvolleyball_court/outdoor\nwaiting_room\nwater_park\nwater_tower\nwaterfall\nwatering_hole\nwave\nwet_bar\nwheat_field\nwind_farm\nwindmill\nyard\nyouth_hostel\nzen_garden".split("\n")},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props.data.metadata;return l.default.createElement(d.TableObject,{tag:"Keyframe Status",object:e})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"keyframe_status"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(11),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(113));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.app,n=e.data,r=e.summary,o=e.showAll,i=n.metadata,a=n.sha256,u=n.verified,s=t.mediainfo.metadata.mediainfo.video.aspect_ratio;return console.log(this.props.data),l.default.createElement(d.Classifier,{tag:"Coco",sha256:a,verified:u,keyframes:i.coco,labels:p.coco,summary:r,aspectRatio:s,showAll:o})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"coco"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(4)),o=h(n(5)),i=h(n(6)),a=h(n(7)),u=h(n(8)),s=h(n(9)),c=n(1),l=h(c),f=n(2),d=n(11),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(113));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=t.metadata,o=t.sha256,i=t.verified;return console.log(this.props.data),l.default.createElement(d.Classifier,{tag:"Places365",sha256:o,verified:i,keyframes:r.places365,labels:p.places365,summary:n})}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"places365"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:m},e))})},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=p(n(4)),o=p(n(5)),i=p(n(6)),a=p(n(7)),u=p(n(8)),s=p(n(9)),c=n(1),l=p(c),f=n(2),d=n(11);function p(e){return e&&e.__esModule?e:{default:e}}var h=function(e){function t(){var e,n,r,a;(0,i.default)(this,t);for(var s=arguments.length,c=Array(s),l=0;l<s;l++)c[l]=arguments[l];return n=r=(0,u.default)(this,(e=t.__proto__||(0,o.default)(t)).call.apply(e,[this].concat(c))),r.state={playing:!1},a=n,(0,u.default)(r,a)}return(0,s.default)(t,e),(0,a.default)(t,[{key:"render",value:function(){var e=this.props,t=e.data,n=e.summary,r=(this.state.playing,t.metadata.sugarcube);r.fp.replace("/var/www/files/","https://cube.syrianarchive.org/");return l.default.createElement("div",{className:"sugarcube"},l.default.createElement(d.Video,null),!n&&l.default.createElement(d.TableObject,{tag:"Sugarcube",object:r}))}}]),t}(c.Component);t.default=(0,f.connect)(function(){return{tag:"sugarcube"}})(function(e){return l.default.createElement(d.Gate,(0,r.default)({View:h},e))})},function(e,t,n){var r=n(516);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".thumbnails {\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n padding: 3px;\n}\n.keyframe {\n padding: 3px 3px 3px 3px;\n margin: 0 10px 20px 0;\n box-shadow: 0 0 0 rgba(0,0,0,0);\n transition: all 0.4ms;\n}\n.keyframe.isSaved {\n background-color: #ecebb5;\n box-shadow: 0 2px 2px rgba(0,0,0,0.2);\n}\n.keyframe img {\n background-color: #eee;\n}\n.thumbnails a {\n text-decoration: none;\n}\n.desktop .thumbnails .keyframe:hover {\n background: #eee;\n}\n.keyframe a {\n position: relative;\n display: block;\n}\n.detectionList label,\n.keyframe label {\n display: flex;\n flex-direction: row;\n justify-content: space-between;\n padding-bottom: 3px;\n}\n.keyframe label {\n color: #888;\n font-size: 14px;\n}\n.keyframe.th, .keyframe.th img { width: 160px; }\n.keyframe.sm, .keyframe.sm img { width: 320px; }\n.keyframe.md, .keyframe.md img { width: 640px; }\n.keyframe.lg, .keyframe.lg img { width: 1280px; }\n.keyframe.th img {\n min-height: 90px;\n}\n.keyframe .sha256 {\n display: inline-block;\n min-width: auto;\n max-width: 60px;\n margin-right: 5px;\n overflow: hidden;\n}\n.keyframe label small {\n display: flex;\n align-items: flex-start;\n}\n.keyframes {\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n}\n\n.keyframeSummary .tableTuples td {\n text-align: left;\n}\n.keyframeSummary .detectionList {\n width: 326px;\n display: block;\n}\n.keyframeSummary .detectionList small {\n padding: 3px;\n font-size: inherit;\n}\n\nul.meta {\n list-style-type: none;\n margin: 3px 0 10px 0;\n padding: 0;\n padding-left: 3px;\n font-size: 12px;\n}\nul.meta li {\n list-style-type: none;\n display: inline-block;\n margin: 0; padding: 0;\n}\nul.meta li:first-child:before {\n content: '';\n padding: 0;\n}\nul.meta li:before {\n content: '\\B7';\n padding: 0 5px;\n}\n\n.sugarcube {\n margin-top: 10px;\n}",""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Browse=t.Results=t.Query=t.Meta=t.Container=t.Menu=void 0;var r=c(n(518)),o=c(n(520)),i=c(n(114)),a=c(n(210)),u=c(n(211)),s=c(n(525));function c(e){return e&&e.__esModule?e:{default:e}}n(526),t.Menu=r.default,t.Container=o.default,t.Meta=i.default,t.Query=a.default,t.Results=u.default,t.Browse=s.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=v(n(4)),o=v(n(5)),i=v(n(6)),a=v(n(7)),u=v(n(8)),s=v(n(9)),c=n(1),l=v(c),f=n(16),d=n(15),p=n(2),h=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21)),m=v(n(519));function v(e){return e&&e.__esModule?e:{default:e}}var y=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"upload",value:function(e){var t=e.dataTransfer?e.dataTransfer.files:e.target.files,n=void 0,r=void 0;for(n=0;n<t.length&&(!(r=t[n])||!r.type.match("image.*"));n++);r&&this.props.actions.upload(r)}},{key:"random",value:function(){this.props.actions.random()}},{key:"render",value:function(){var e=this,t=this.props,n=t.savedCount,r=t.options;return l.default.createElement("div",{className:"searchForm row"},l.default.createElement("div",{className:"row"},l.default.createElement("div",{className:"upload"},l.default.createElement("button",{className:"btn"},l.default.createElement("span",null,"⤴")," Search by Upload"),l.default.createElement("input",{type:"file",name:"img",accept:"image/*",onChange:this.upload.bind(this),required:!0})),l.default.createElement("button",{className:"btn random",onClick:this.random.bind(this)},l.default.createElement("span",null,"♘")," Random"),l.default.createElement(m.default,null),l.default.createElement(f.Link,{to:h.publicUrl.review()},l.default.createElement("button",{className:"btn btn-primary"},l.default.createElement("span",null,"⇪")," "+n+" Saved Image"+(1===n?"":"s")))),l.default.createElement("div",{className:"row searchOptions"},l.default.createElement("select",{className:"form-select",onChange:function(t){return e.props.actions.updateOptions({thumbnailSize:t.target.value})},value:r.thumbnailSize},l.default.createElement("option",{value:"th"},"Thumbnail"),l.default.createElement("option",{value:"sm"},"Small"),l.default.createElement("option",{value:"md"},"Medium"),l.default.createElement("option",{value:"lg"},"Large")),l.default.createElement("label",{className:"row"},l.default.createElement("input",{type:"checkbox",checked:r.groupByHash,onChange:function(t){return e.props.actions.updateOptions({groupByHash:t.target.checked})}})," Group by hash"),l.default.createElement("label",{className:"row"},l.default.createElement("input",{type:"number",value:r.perPage,className:"perPage",min:1,max:100,onChange:function(t){return e.props.actions.updateOptions({perPage:t.target.value})},onBlur:function(){return window.location.reload()}})," per page")))}}]),t}(c.Component);t.default=(0,p.connect)(function(e){return{options:e.search.options,savedCount:e.review.count}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},h),e)}})(y)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=h(n(5)),o=h(n(6)),i=h(n(7)),a=h(n(8)),u=h(n(9)),s=n(1),c=h(s),l=n(16),f=n(15),d=n(2),p=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(21));function h(e){return e&&e.__esModule?e:{default:e}}var m=function(e){function t(){(0,o.default)(this,t);var e=(0,a.default)(this,(t.__proto__||(0,r.default)(t)).call(this));return e.keydown=e.keydown.bind(e),e}return(0,u.default)(t,e),(0,i.default)(t,[{key:"componentDidMount",value:function(){document.addEventListener("keydown",this.keydown)}},{key:"componentWillUnmount",value:function(){document.removeEventListener("keydown",this.keydown)}},{key:"keydown",value:function(e){27===e.keyCode&&this.panic()}},{key:"panic",value:function(){this.props.actions.panic(),this.props.history.push("/search/")}},{key:"render",value:function(){var e=this;return c.default.createElement("button",{className:"btn panic",onClick:function(){return e.panic()}},c.default.createElement("span",null,"⚠")," Panic")}}]),t}(s.Component);t.default=(0,l.withRouter)((0,d.connect)(function(e){return{}},function(e){return{actions:(0,f.bindActionCreators)({panic:p.panic},e)}})(m))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=x(n(4)),o=x(n(93)),i=x(n(5)),a=x(n(6)),u=x(n(7)),s=x(n(8)),c=x(n(9)),l=n(1),f=x(l),d=n(16),p=n(15),h=n(2),m=w(n(209)),v=w(n(21)),y=w(n(112)),g=x(n(210)),_=x(n(211)),b=x(n(524));function w(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function x(e){return e&&e.__esModule?e:{default:e}}var E=function(e){function t(){return(0,a.default)(this,t),(0,s.default)(this,(t.__proto__||(0,i.default)(t)).apply(this,arguments))}return(0,c.default)(t,e),(0,u.default)(t,[{key:"componentDidMount",value:function(){var e=m.parse(this.props.location.search.substr(1));e&&e.url?this.props.searchActions.search(e.url):this.searchByHash()}},{key:"componentDidUpdate",value:function(e){e.match.params!==this.props.match.params&&(0,o.default)(this.props.match.params)!==(0,o.default)(e.match.params)&&this.searchByHash()}},{key:"searchByHash",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0,t=this.props.match.params,n=t.verified,r=t.hash,o=t.frame;n&&r&&o?this.props.searchActions.searchByVerifiedFrame(n,r,o,e):r&&o&&this.props.searchActions.searchByFrame(r,o,e),r&&!e&&this.props.metadataActions.fetchMetadata(r)}},{key:"searchByOffset",value:function(){var e=this.props.query.results.length,t=m.parse(this.props.location.search.substr(1));t&&t.url?this.props.searchActions.search(t.url,e):this.searchByHash(e)}},{key:"render",value:function(){var e=this,t=this.props.query,n=t.query,r=t.results,o=t.loadingMore,i=this.props.options,a=!0;n&&!n.reset&&!n.loading&&r&&r.length||(a=!1);var u=r&&r.length>Math.min(i.perPage,30),s=r&&r.length>i.perPage;return f.default.createElement("div",{className:"searchContainer"},f.default.createElement(g.default,null),f.default.createElement(_.default,null),a?o?f.default.createElement("div",{className:"loadingMore"},"Loading more results..."):f.default.createElement("button",{onClick:function(){return e.searchByOffset()},className:u?"btn loadMore wide":"btn loadMore"},"Load more"):f.default.createElement("div",null),!s&&f.default.createElement(b.default,null))}}]),t}(l.Component);t.default=(0,d.withRouter)((0,h.connect)(function(e){return{query:e.search.query,options:e.search.options,metadata:e.metadata}},function(e){return{searchActions:(0,p.bindActionCreators)((0,r.default)({},v),e),metadataActions:(0,p.bindActionCreators)((0,r.default)({},y),e)}})(E))},function(e,t,n){"use strict";function r(e,t){return Object.prototype.hasOwnProperty.call(e,t)}e.exports=function(e,t,n,i){t=t||"&",n=n||"=";var a={};if("string"!=typeof e||0===e.length)return a;var u=/\+/g;e=e.split(t);var s=1e3;i&&"number"==typeof i.maxKeys&&(s=i.maxKeys);var c=e.length;s>0&&c>s&&(c=s);for(var l=0;l<c;++l){var f,d,p,h,m=e[l].replace(u,"%20"),v=m.indexOf(n);v>=0?(f=m.substr(0,v),d=m.substr(v+1)):(f=m,d=""),p=decodeURIComponent(f),h=decodeURIComponent(d),r(a,p)?o(a[p])?a[p].push(h):a[p]=[a[p],h]:a[p]=h}return a};var o=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},function(e,t,n){"use strict";var r=function(e){switch(typeof e){case"string":return e;case"boolean":return e?"true":"false";case"number":return isFinite(e)?e:"";default:return""}};e.exports=function(e,t,n,u){return t=t||"&",n=n||"=",null===e&&(e=void 0),"object"==typeof e?i(a(e),function(a){var u=encodeURIComponent(r(a))+n;return o(e[a])?i(e[a],function(e){return u+encodeURIComponent(r(e))}).join(t):u+encodeURIComponent(r(e[a]))}).join(t):u?encodeURIComponent(r(u))+n+encodeURIComponent(r(e)):""};var o=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)};function i(e,t){if(e.map)return e.map(t);for(var n=[],r=0;r<e.length;r++)n.push(t(e[r],r));return n}var a=Object.keys||function(e){var t=[];for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&t.push(n);return t}},function(e,t){var n=window.Blob,r=100==new n([new Uint8Array(100)]).size;e.exports=function(e){for(var t=e.split(",")[1],o=atob(t),i=new ArrayBuffer(o.length),a=new Uint8Array(i),u=0;u<o.length;u++)a[u]=o.charCodeAt(u);r||(a=i);var s=new n([a],{type:function(e){return e.split(";")[0].slice(5)}(e)});return s.slice=s.slice||s.webkitSlice,s}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(){return r.default.createElement("div",{className:"safety"},r.default.createElement("div",null,r.default.createElement("h4",null,"Safety Tips"),r.default.createElement("ul",null,r.default.createElement("li",null," Look away if you see something traumatic "),r.default.createElement("li",null," Hit ",r.default.createElement("tt",null,"ESC")," to activate panic mode (hides all images) "),r.default.createElement("li",null," Use thumbnails to reduce details "),r.default.createElement("li",null," Take breaks and refresh yourself with positive imagery "))))};var r=function(e){return e&&e.__esModule?e:{default:e}}(n(1))},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=_(n(4)),o=_(n(5)),i=_(n(6)),a=_(n(7)),u=_(n(8)),s=_(n(9)),c=n(1),l=_(c),f=n(16),d=n(15),p=n(2),h=n(11),m=g(n(21)),v=g(n(112)),y=_(n(114));function g(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function _(e){return e&&e.__esModule?e:{default:e}}var b=function(e){function t(){return(0,i.default)(this,t),(0,u.default)(this,(t.__proto__||(0,o.default)(t)).apply(this,arguments))}return(0,s.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){this.browse()}},{key:"componentDidUpdate",value:function(e){e.match.params!==this.props.match.params&&this.browse()}},{key:"browse",value:function(){var e=this.props.match.params.hash;e&&this.props.searchActions.browse(e),e&&this.props.metadataActions.fetchMetadata(e)}},{key:"render",value:function(){var e=this.props,t=e.browse;e.options;return console.log("browse",t),!t||t.reset||t.loading?l.default.createElement("div",{className:"browseComponent column"},l.default.createElement("h3",null,"Loading keyframes..."),l.default.createElement(h.Loader,null)):l.default.createElement("div",{className:"browseComponent column"},l.default.createElement("h3",null,"Video Preview"),l.default.createElement(h.Video,{size:"md"}),l.default.createElement(y.default,{query:t,sugarcube:!0}),l.default.createElement("div",{className:"row buttons"},l.default.createElement(f.Link,{to:"/metadata/"+t.hash,className:"btn"},"View Full Metadata")),l.default.createElement("h3",null,"Keyframes"),l.default.createElement(h.Keyframes,{frames:t.frames,showHash:!0,showTimestamp:!0,showSearchButton:!0,showSaveButton:!0}))}}]),t}(c.Component);t.default=(0,f.withRouter)((0,p.connect)(function(e){return{browse:e.search.browse,options:e.search.options,metadata:e.metadata}},function(e){return{searchActions:(0,d.bindActionCreators)((0,r.default)({},m),e),metadataActions:(0,d.bindActionCreators)((0,r.default)({},v),e)}})(b))},function(e,t,n){var r=n(527);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".btn span {\n font-size: large;\n}\n.row {\n display: flex;\n flex-direction: row;\n}\n.column {\n display: flex;\n flex-direction: column;\n}\n\n.searchContainer h3 {\n padding: 0;\n margin-top: 0;\n margin-bottom: 5px;\n margin-left: 3px;\n}\n.searchContainer h4 {\n margin-left: 0;\n width: 100%;\n}\n.searchContainer .subtitle {\n display: block;\n margin-left: 3px;\n margin-bottom: 10px;\n}\n.searchForm {\n display: flex;\n justify-content: space-between;\n align-items: center;\n width: 100%;\n padding: 20px;\n background: #eee;\n}\n.searchForm .row {\n align-items: center;\n}\n\n.searchMeta {\n display: flex;\n flex-direction: column;\n font-size: 14px;\n line-height: 18px;\n padding: 0;\n}\n.searchMeta span {\n white-space: nowrap;\n overflow: hidden;\n text-overflow: ellipsis;\n max-width: calc(100vw - 23px - 640px - 30px);\n}\n\n.keyframe .thumbnail {\n position: relative;\n cursor: pointer;\n}\n.keyframe .searchButtons {\n position: absolute;\n bottom: 0; left: 0;\n padding: 0 5px 15px 5px;\n width: 100%;\n text-align: center;\n opacity: 0;\n transition: all 0.2s;\n}\n.desktop .keyframe .thumbnail:hover .searchButtons,\n.mobile .keyframe .searchButtons {\n opacity: 1;\n}\n.keyframe .searchButtons .btn {\n margin-right: 0;\n height: auto;\n padding: 0.15rem 0.3rem;\n}\n.keyframe a {\n text-decoration: none;\n}\n\n.body > div.searchForm {\n padding-bottom: 20px;\n}\n.upload {\n position: relative;\n cursor: pointer;\n}\n.upload .btn {\n pointer-events: none;\n cursor: pointer;\n}\n.upload input {\n position: absolute;\n top: 0; left: 0;\n width: 100%; height: 100%;\n opacity: 0;\n cursor: pointer;\n}\n\n.reviewSaved,\n.browseComponent,\n.searchQuery {\n margin: 0px 10px;\n padding: 13px;\n}\n.searchQuery img {\n cursor: crosshair;\n user-select: none;\n max-width: 640px;\n max-height: 480px;\n}\n.searchContainer .searchQuery h3 {\n margin-left: 0;\n margin-bottom: 10px;\n}\n\n.searchBox {\n min-width: 640px;\n margin: 0 10px 0 0;\n background-color: #eee;\n position: relative;\n}\n.searchBox img {\n display: block;\n}\n.searchBox .box {\n position: absolute;\n cursor: crosshair;\n border: 1px solid #11f;\n background-color: rgba(16,16,255,0.1);\n}\n\n.searchResults {\n margin: 0 20px 20px 20px;\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n}\n.searchResultsHeading {\n width: 100%;\n}\n.searchOptions .row {\n font-size: 12px;\n margin-left: 10px;\n}\n.searchOptions input {\n font-size: 12px;\n margin-right: 5px;\n font-family: Helvetica, sans-serif;\n}\n.searchOptions input[type=text],\n.searchOptions input[type=number] {\n width: 30px;\n text-align: right;\n}\n.keyframeGroup {\n max-width: 650px;\n display: flex;\n flex-direction: row;\n flex-wrap: wrap;\n align-items: flex-start;\n align-content: flex-start;\n justify-content: flex-start;\n}\n.keyframeGroup h4 a {\n color: #888;\n text-decoration: none\n}\n.keyframeGroup h4 a:hover {\n text-decoration: underline\n}\n\n/* load more button that gets bigger */\n\n.loadMore {\n width: 400px;\n margin: 20px;\n height: 40px;\n transition: all;\n}\n.loadMore.wide {\n width: calc(100% - 40px);\n margin: 20px;\n height: 100px;\n}\n.loadingMore {\n margin: 20px 20px 200px 20px;\n}\n\n/* health and safety warning */\n\n.safety div {\n display: inline-block;\n margin: 20px 20px;\n padding: 10px;\n background: #fff8e8;\n color: #111;\n box-shadow: 0 1px 2px rgba(0,0,0,0.2);\n font-size: 13px;\n line-height: 1.4;\n}\n.safety ul {\n margin: 0;\n padding: 0 21px;\n}\n.safety li {\n padding: 1px 0 0 0;\n}\n.safety h4 {\n margin-top: 5px;\n}\n\n/* browser section */\n\n.browseComponent h3 {\n margin-bottom: 10px;\n}\n.browseComponent .buttons {\n margin-top: 10px;\n}\n\n/* disable twiddle button on input[type=number] */\n\ninput::-webkit-outer-spin-button,\ninput::-webkit-inner-spin-button {\n -webkit-appearance: none;\n margin: 0;\n}\ninput[type='number'] {\n -moz-appearance:textfield;\n}\n",""])},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Saved=void 0;var r=function(e){return e&&e.__esModule?e:{default:e}}(n(529));n(531),t.Saved=r.default},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=y(n(4)),o=y(n(28)),i=y(n(5)),a=y(n(6)),u=y(n(7)),s=y(n(8)),c=y(n(9)),l=n(1),f=y(l),d=n(15),p=n(2),h=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(97)),m=n(11),v=y(n(530));function y(e){return e&&e.__esModule?e:{default:e}}var g=function(e){function t(){var e,n,r,o;(0,a.default)(this,t);for(var u=arguments.length,c=Array(u),l=0;l<u;l++)c[l]=arguments[l];return n=r=(0,s.default)(this,(e=t.__proto__||(0,i.default)(t)).call.apply(e,[this].concat(c))),r.state={showAnnotator:!1},o=n,(0,s.default)(r,o)}return(0,c.default)(t,e),(0,u.default)(t,[{key:"render",value:function(){var e=this,t=this.props.saved,n=this.state.showAnnotator,r=(0,o.default)(t).sort().map(function(e){var n=t[e],r=n.verified,i=n.hash,a=n.frames;return(0,o.default)(a).sort().map(function(e){return{verified:r,hash:i,frame:e}})}).reduce(function(e,t){return t&&t.length?e.concat(t):e},[]),i=0===r.length;return f.default.createElement("div",{className:"reviewSaved"},f.default.createElement("h2",null,"Saved Images"),f.default.createElement("div",{className:"reviewButtons"},f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.setState({showAnnotator:!n})}},"Import into VCAT"),f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.props.actions.exportCSV()}},"Export CSV"),f.default.createElement("button",{className:"btn",disabled:i,onClick:function(){return e.props.actions.refresh()}},"Refresh"),f.default.createElement("button",{className:"btn reset",disabled:i,onClick:function(){return confirm("This will clear your saved images.")&&e.props.actions.clear()}},"Reset")),n&&f.default.createElement(v.default,null),f.default.createElement(m.Keyframes,{frames:r,showHash:!0,showTimestamp:!0,showSearchButton:!0,showSaveButton:!0}))}}]),t}(l.Component);t.default=(0,p.connect)(function(e){return{saved:e.review.saved}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},h),e)}})(g)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=v(n(4)),o=v(n(65)),i=v(n(5)),a=v(n(6)),u=v(n(7)),s=v(n(8)),c=v(n(9)),l=n(1),f=v(l),d=n(15),p=n(2),h=n(11),m=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}(n(97));function v(e){return e&&e.__esModule?e:{default:e}}var y=function(e){function t(){(0,a.default)(this,t);var e=(0,s.default)(this,(t.__proto__||(0,i.default)(t)).call(this));return e.state={title:"",graphic:!1},e.handleInput=e.handleInput.bind(e),e}return(0,c.default)(t,e),(0,u.default)(t,[{key:"handleInput",value:function(e){var t=e.target,n=t.name,r=t.value;"title"===n&&(r=r.replace(/[^-_a-zA-Z0-9 ]/g,"")),"graphic"===n&&(r=e.target.checked),this.setState((0,o.default)({},n,r))}},{key:"render",value:function(){var e=this,t=this.props.review;return f.default.createElement("div",{className:"importMenu"},f.default.createElement("div",null,f.default.createElement("h3",null,"New VCAT Image Group"),f.default.createElement("label",null,f.default.createElement("input",{type:"text",name:"title",placeholder:"Title this group",autoComplete:"off",onChange:this.handleInput,value:this.state.title})),f.default.createElement("label",null,f.default.createElement("input",{type:"checkbox",name:"graphic",checked:this.state.graphic,onChange:this.handleInput})," ",f.default.createElement("small",null,"Graphic content")),f.default.createElement("label",null,f.default.createElement("button",{className:"btn check",onClick:this.props.actions.dedupe},t.dedupe.loading?"Deduping...":"Dedupe"),f.default.createElement("button",{className:"btn btn-primary create",onClick:function(){return e.props.actions.create(e.state)}},"Create Group"),(t.dedupe.loading||t.create.loading)&&f.default.createElement(h.Loader,null),!!t.dedupe.count&&t.dedupe.count+" images removed")))}}]),t}(l.Component);t.default=(0,p.connect)(function(e){return{review:e.review}},function(e){return{actions:(0,d.bindActionCreators)((0,r.default)({},m),e)}})(y)},function(e,t,n){var r=n(532);"string"==typeof r&&(r=[[e.i,r,""]]);var o={hmr:!0,transform:void 0,insertInto:void 0};n(49)(r,o);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(48)(!1)).push([e.i,".importMenu {\n padding: 10px;\n margin: 10px 0;\n background: #eee;\n}\n.reviewButtons {\n margin-bottom: 10px;\n}\n.importMenu h3 {\n margin-top: 0;\n margin-bottom: 10px;\n}\n.importMenu label {\n display: block;\n margin-bottom: 5px;\n}\n.importMenu input[type=text] {\n font-size: 13px;\n width: 250px;\n}",""])}]); \ No newline at end of file
diff --git a/faiss/static/js/store2.min.js b/faiss/static/js/store2.min.js
new file mode 100644
index 00000000..75e3ca37
--- /dev/null
+++ b/faiss/static/js/store2.min.js
@@ -0,0 +1,5 @@
+/*! store2 - v2.7.0 - 2018-03-04
+* Copyright (c) 2018 Nathan Bubna; Licensed (MIT OR GPL-3.0) */
+
+!function(a,b){var c={version:"2.7.0",areas:{},apis:{},inherit:function(a,b){for(var c in a)b.hasOwnProperty(c)||(b[c]=a[c]);return b},stringify:function(a){return void 0===a||"function"==typeof a?a+"":JSON.stringify(a)},parse:function(a){try{return JSON.parse(a)}catch(b){return a}},fn:function(a,b){c.storeAPI[a]=b;for(var d in c.apis)c.apis[d][a]=b},get:function(a,b){return a.getItem(b)},set:function(a,b,c){a.setItem(b,c)},remove:function(a,b){a.removeItem(b)},key:function(a,b){return a.key(b)},length:function(a){return a.length},clear:function(a){a.clear()},Store:function(a,b,d){var e=c.inherit(c.storeAPI,function(a,b,c){return 0===arguments.length?e.getAll():"function"==typeof b?e.transact(a,b,c):void 0!==b?e.set(a,b,c):"string"==typeof a||"number"==typeof a?e.get(a):a?e.setAll(a,b):e.clear()});e._id=a;try{b.setItem("_-bad-_","wolf"),e._area=b,b.removeItem("_-bad-_")}catch(a){}return e._area||(e._area=c.inherit(c.storageAPI,{items:{},name:"fake"})),e._ns=d||"",c.areas[a]||(c.areas[a]=e._area),c.apis[e._ns+e._id]||(c.apis[e._ns+e._id]=e),e},storeAPI:{area:function(a,b){var d=this[a];return d&&d.area||(d=c.Store(a,b,this._ns),this[a]||(this[a]=d)),d},namespace:function(a,b){if(!a)return this._ns?this._ns.substring(0,this._ns.length-1):"";var d=a,e=this[d];return e&&e.namespace||(e=c.Store(this._id,this._area,this._ns+d+"."),this[d]||(this[d]=e),b||e.area("session",c.areas.session)),e},isFake:function(){return"fake"===this._area.name},toString:function(){return"store"+(this._ns?"."+this.namespace():"")+"["+this._id+"]"},has:function(a){return this._area.has?this._area.has(this._in(a)):!!(this._in(a)in this._area)},size:function(){return this.keys().length},each:function(a,b){for(var d=0,e=c.length(this._area);d<e;d++){var f=this._out(c.key(this._area,d));if(void 0!==f&&a.call(this,f,b||this.get(f))===!1)break;e>c.length(this._area)&&(e--,d--)}return b||this},keys:function(a){return this.each(function(a,b){b.push(a)},a||[])},get:function(a,b){var d=c.get(this._area,this._in(a));return null!==d?c.parse(d):b||d},getAll:function(a){return this.each(function(a,b){b[a]=this.get(a)},a||{})},transact:function(a,b,c){var d=this.get(a,c),e=b(d);return this.set(a,void 0===e?d:e),this},set:function(a,b,d){var e=this.get(a);return null!=e&&d===!1?b:c.set(this._area,this._in(a),c.stringify(b),d)||e},setAll:function(a,b){var c,d;for(var e in a)d=a[e],this.set(e,d,b)!==d&&(c=!0);return c},add:function(a,b){var d=this.get(a);if(d instanceof Array)b=d.concat(b);else if(null!==d){var e=typeof d;if(e===typeof b&&"object"===e){for(var f in b)d[f]=b[f];b=d}else b=d+b}return c.set(this._area,this._in(a),c.stringify(b)),b},remove:function(a){var b=this.get(a);return c.remove(this._area,this._in(a)),b},clear:function(){return this._ns?this.each(function(a){c.remove(this._area,this._in(a))},1):c.clear(this._area),this},clearAll:function(){var a=this._area;for(var b in c.areas)c.areas.hasOwnProperty(b)&&(this._area=c.areas[b],this.clear());return this._area=a,this},_in:function(a){return"string"!=typeof a&&(a=c.stringify(a)),this._ns?this._ns+a:a},_out:function(a){return this._ns?a&&0===a.indexOf(this._ns)?a.substring(this._ns.length):void 0:a}},storageAPI:{length:0,has:function(a){return this.items.hasOwnProperty(a)},key:function(a){var b=0;for(var c in this.items)if(this.has(c)&&a===b++)return c},setItem:function(a,b){this.has(a)||this.length++,this.items[a]=b},removeItem:function(a){this.has(a)&&(delete this.items[a],this.length--)},getItem:function(a){return this.has(a)?this.items[a]:null},clear:function(){for(var a in this.items)this.removeItem(a)},toString:function(){return this.length+" items in "+this.name+"Storage"}}},d=c.Store("local",function(){try{return localStorage}catch(a){}}());d.local=d,d._=c,d.area("session",function(){try{return sessionStorage}catch(a){}}()),"function"==typeof b&&void 0!==b.amd?b("store2",[],function(){return d}):"undefined"!=typeof module&&module.exports?module.exports=d:(a.store&&(c.conflict=a.store),a.store=d)}(this,this.define);
+//# sourceMappingURL=store2.min.js.map \ No newline at end of file
diff --git a/faiss/static/metadata.html b/faiss/static/metadata.html
new file mode 100644
index 00000000..e74e1ee1
--- /dev/null
+++ b/faiss/static/metadata.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8">
+ <title>VFrame Metadata</title>
+ <link rel="shortcut icon" href="/search/static/favicon.ico" />
+ <meta name="viewport" content="width=device-width,initial-scale=1.0">
+ </head>
+ <body>
+ <script type="text/javascript" src="/search/static/js/metadata-app.js"></script></body>
+</html>
diff --git a/faiss/static/search.html b/faiss/static/search.html
new file mode 100644
index 00000000..056d06c1
--- /dev/null
+++ b/faiss/static/search.html
@@ -0,0 +1 @@
+search.html \ No newline at end of file
diff --git a/faiss/util.py b/faiss/util.py
new file mode 100644
index 00000000..97afbc22
--- /dev/null
+++ b/faiss/util.py
@@ -0,0 +1,29 @@
+import time
+import simplejson as json
+import pickle
+from os import path
+from collections import namedtuple
+
+# Converts JSON el['key'] to Pythonic object-style el.key
+def _json_object_hook(d):
+ return namedtuple('X', d.keys())(*d.values())
+
+# Load a JSON recipe
+def load_recipe(path):
+ with open(path) as fh:
+ return json.load(fh, object_hook=_json_object_hook)
+
+# Load a pickle file
+def load_pickle(data_dir, pkl_fn):
+ load_start = time.time()
+ with open(path.join(str(data_dir), str(pkl_fn)), 'rb') as fh:
+ raw = fh.read()
+ data = pickle.loads(raw)
+ load_end = time.time()
+ load_time = load_end - load_start
+ print("Pickle load time: {:.1f}s".format(load_time))
+ return data
+
+def read_json(fn):
+ with open(fn, 'r') as json_file:
+ return json.load(json_file)
diff --git a/faiss/wsgi.py b/faiss/wsgi.py
new file mode 100644
index 00000000..371862fb
--- /dev/null
+++ b/faiss/wsgi.py
@@ -0,0 +1,5 @@
+from server import app
+
+if __name__ == "__main__":
+ app.run()
+
diff --git a/ids.json b/ids.json
deleted file mode 100644
index 492c25f5..00000000
--- a/ids.json
+++ /dev/null
@@ -1 +0,0 @@
-["0d3bb75852098b25d90f31d2f48fd0cb4944702b", "4d16337cc0431cd43043dfef839ce5f0717c3483", "1b4b3d0ce900996a6da8928e16370e21d15ed83e", "697b0b9630213ca08a1ae1d459fabc13325bdcbb", "57246142814d7010d3592e3a39a1ed819dd01f3b", "7788fa76f1488b1597ee2bebc462f628e659f61e", "cca9ae621e8228cfa787ec7954bb375536160e0d", "9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2", "685f8df14776457c1c324b0619c39b3872df617b", "282a3ee79a08486f0619caf0ada210f5c3572367", "75249ebb85b74e8932496272f38af274fbcfd696", "0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277", "47190d213caef85e8b9dd0d271dbadc29ed0a953", "24c442ac3f6802296d71b1a1914b5d44e48b4f29", "809ea255d144cff780300440d0f22c96e98abd53", "8bdf6f03bde08c424c214188b35be8b2dec7cdea", "a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6", "3dfb822e16328e0f98a47209d7ecd242e4211f82", "d0509afe9c2c26fe021889f8efae1d85b519452a", "3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0", "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "088aabe3da627432fdccf5077969e3f6402f0a80", "31b05f65405534a696a847dd19c621b7b8588263", "21bd9374c211749104232db33f0f71eab4df35d5", "a8035ca71af8cc68b3e0ac9190a89fed50c92332", "084bebc5c98872e9307cd8e7f571d39ef9c1b81e", "a32c5138c6a0b3d3aff69bcab1015d8b043c91fb", "bcee40c25e8819955263b89a433c735f82755a03", "1275d6a800f8cf93c092603175fdad362b69c191", "32d8e555441c47fc27249940991f80502cb70bd5", "b4ee64022cc3ccd14c7f9d4935c59b16456067d3", "acd4280453b995cb071c33f7c9db5760432f4279", "486a82f50835ea888fbc5c6babf3cf8e8b9807bc", "1450296fb936d666f2f11454cc8f0108e2306741", "beab10d1bdb0c95b2f880a81a747f6dd17caa9c2", "6888f3402039a36028d0a7e2c3df6db94f5cb9bb", "1db45038ff49e4220a56b17a3b255df1c97b32c1", "d46b790d22cb59df87f9486da28386b0f99339d3", "8efda5708bbcf658d4f567e3866e3549fe045bbb", "d7cbedbee06293e78661335c7dd9059c70143a28", "eb027969f9310e0ae941e2adee2d42cdf07d938c", "7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a", "be4faea0971ef74096ec9800750648b7601dda65", "831b4d8b0c0173b0bac0e328e844a0fbafae6639", "a35dd69d63bac6f3296e0f1d148708cfa4ba80f6", "565f7c767e6b150ebda491e04e6b1de759fda2d4", "746c0205fdf191a737df7af000eaec9409ede73f", "13f065d4e6dfe2a130bd64d73eee97d10d9f7d33", "b0c1615ebcad516b5a26d45be58068673e2ff217", "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "1e8eee51fd3bf7a9570d6ee6aa9a09454254689d", "8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff", "2911e7f0fb6803851b0eddf8067a6fc06e8eadd6", "c866a2afc871910e3282fd9498dce4ab20f6a332", "9a7858eda9b40b16002c6003b6db19828f94a6c6", "5e0eb34aeb2b58000726540336771053ecd335fc", "24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9", "9131c990fad219726eb38384976868b968ee9d9c", "511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7", "e1256ff535bf4c024dd62faeb2418d48674ddfa2", "40bb090a4e303f11168dce33ed992f51afe02ff7", "8ccde9d80706a59e606f6e6d48d4260b60ccc736", "03f4c0fe190e5e451d51310bca61c704b39dcac8", "6789bddbabf234f31df992a3356b36a47451efc7", "64e216c128164f56bc91a33c18ab461647384869", "2c052a1c77a3ec2604b3deb702d77c41418c7d3e", "f442a2f2749f921849e22f37e0480ac04a3c3fec", "ef230e3df720abf2983ba6b347c9d46283e4b690", "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "2ea78e128bec30fb1a623c55ad5d55bb99190bd2", "4f3b652c75b1d7cf4997e0baaef2067b61e3a79b", "616d3d6d82dbc2697d150e879996d878ef74faef", "54a9ed950458f4b7e348fa78a718657c8d3d0e05", "49a7949fabcdf01bbae1c2eb38946ee99f491857", "d02e27e724f9b9592901ac1f45830341d37140fe", "6993bca2b3471f26f2c8a47adfe444bfc7852484", "405526dfc79de98f5bf3c97bf4aa9a287700f15d", "00fb2836068042c19b5197d0999e8e93b920eb9c", "007fbc7a1d7eae33b2bb59b175dd1033e5e178f3", "3b9b200e76a35178da940279d566bbb7dfebb787", "c5e37630d0672e4d44f7dee83ac2c1528be41c2e", "3107316f243233d45e3c7e5972517d1ed4991f91", "31ba7f5e09a2f0fe9cf7ea95314723206dcb6059", "ff9195f99a1a28ced431362f5363c9a5da47a37b", "a15c728d008801f5ffc7898568097bbeac8270a4", "9865fe20df8fe11717d92b5ea63469f59cf1635a", "291265db88023e92bb8c8e6390438e5da148e8f5", "1badfeece64d1bf43aa55c141afe61c74d0bd25e", "751fb994b2c553dc843774a5620bfcab8bc657fd", "1e21b925b65303ef0299af65e018ec1e1b9b8d60", "1b55c4e804d1298cbbb9c507497177014a923d22", "23ce6f404c504592767b8bec7d844d87b462de71", "ada063ce9a1ff230791c48b6afa29c401a9007f1", "a5e5094a1e052fa44f539b0d62b54ef03c78bf6a", "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "82e66c4832386cafcec16b92ac88088ffd1a1bc9", "4b4106614c1d553365bad75d7866bff0de6056ed", "59fc69b3bc4759eef1347161e1248e886702f8f7", "0750a816858b601c0dbf4cfb68066ae7e788f05d", "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "0c65226edb466204189b5aec8f1033542e2c17aa", "552122432b92129d7e7059ef40dc5f6045f422b5", "368e99f669ea5fd395b3193cd75b301a76150f9d", "e3d76f1920c5bf4a60129516abb4a2d8683e48ae", "3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827", "6c66ae815e7e508e852ecb122fb796abbcda16a8", "06850b60e33baa4ea9473811d58c0d5015da079e", "2c203050a6cca0a0bff80e574bda16a8c46fe9c2", "28d4e027c7e90b51b7d8908fce68128d1964668a", "2241eda10b76efd84f3c05bdd836619b4a3df97e", "be51854ef513362bc236b85dd6f0e2c2da51614b", "122f51cee489ba4da5ab65064457fbe104713526", "cbaa17be8c22e219a9c656559e028867dfb2c2ed", "fa08a4da5f2fa39632d90ce3a2e1688d147ece61", "5da827fe558fb2e1124dcc84ef08311241761726", "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "30870ef75aa57e41f54310283c0057451c8c822b", "b9d0774b0321a5cfc75471b62c8c5ef6c15527f5", "e87d6c284cdd6828dfe7c092087fbd9ff5091ee4", "587c48ec417be8b0334fa39075b3bfd66cc29dbe", "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e", "305346d01298edeb5c6dc8b55679e8f60ba97efb", "ee2ec0836ded2f3f37bf49fa0e985280a8addaca", "c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8", "1f3ae376b22136a2fe2e96632d4383653a42e4d4", "95f12d27c3b4914e0668a268360948bce92f7db3", "2fda461869f84a9298a0e93ef280f79b9fb76f94", "30cd39388b5c1aae7d8153c0ab9d54b61b474ffe", "321c8ba38db118d8b02c0ba209be709e6792a2c7", "61f04606528ecf4a42b49e8ac2add2e9f92c0def", "36219a3196aac2bd149bc786f083957a6e6da125", "0f81b0fa8df5bf3fcfa10f20120540342a0c92e5", "397085122a5cade71ef6c19f657c609f0a4f7473", "4c4e49033737467e28aa2bb32f6c21000deda2ef", "7c6686fa4d8c990e931f1d16deabf647bf3b1986", "88e2574af83db7281c2064e5194c7d5dfa649846", "12095f9b35ee88272dd5abc2d942a4f55804b31e", "f25aa838fb44087668206bf3d556d31ffd75235d", "6de935a02f87aa31e33245c3b85ea3b7f8b1111c", "cf736f596bf881ca97ec4b29776baaa493b9d50e", "ad5a35a251e07628dd035c68e44a64c53652be6b", "eb48a58b873295d719827e746d51b110f5716d6c", "9b9ccd4954cf9dd605d49e9c3504224d06725ab7", "5f448ab700528888019542e6fea1d1e0db6c35f2", "dce5e0a1f2cdc3d4e0e7ca0507592860599b0454", "a66d89357ada66d98d242c124e1e8d96ac9b37a0", "91883dabc11245e393786d85941fb99a6248c1fb", "cf5c9b521c958b84bb63bea9d5cbb522845e4ba7", "ebedc841a2c1b3a9ab7357de833101648281ff0e", "bf2f2696fdb4077b5ab18aa583f6376acadf2438", "445e3ba7eabcc55b5d24f951b029196b47830684", "e69a765d033ef6ea55c57ca41c146b27964c5cf2", "1d0128b9f96f4c11c034d41581f23eb4b4dd7780", "3b470b76045745c0ef5321e0f1e0e6a4b1821339", "ceeb67bf53ffab1395c36f1141b516f893bada27", "b76af8fcf9a3ebc421b075b689defb6dc4282670", "54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6", "0ea7b7fff090c707684fd4dc13e0a8f39b300a97", "a40f8881a36bc01f3ae356b3e57eac84e989eef0", "c2be82ed0db509087b08423c8cf39ab3c36549c3", "e6d6d1b0a8b414160f67142fc18e1321fe3f1c49", "45e616093a92e5f1e61a7c6037d5f637aa8964af", "9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb", "22c06284a908d8ad0994ad52119773a034eed7ee", "1a8ccc23ed73db64748e31c61c69fe23c48a2bb1", "3c6cac7ecf546556d7c6050f7b693a99cc8a57b3", "ac6c3b3e92ff5fbcd8f7967696c7aae134bea209", "5b5b9c6c67855ede21a60c834aea5379df7d51b7", "c858c74d30c02be2d992f82a821b925669bfca13", "713db3874b77212492d75fb100a345949f3d3235", "ccf16bcf458e4d7a37643b8364594656287f5bfc", "1b8541ec28564db66a08185510c8b300fa4dc793", "70a69569ba61f3585cd90c70ca5832e838fa1584", "a7a3ec1128f920066c25cb86fbc33445ce613919", "d03265ea9200a993af857b473c6bf12a095ca178", "65126e0b1161fc8212643b8ff39c1d71d262fbc1", "4e3b71b1aa6b6cb7aa55843d2214441f0076fe69", "c3418f866a86dfd947c2b548cbdeac8ca5783c15", "056ba488898a1a1b32daec7a45e0d550e0c51ae4", "087002ab569e35432cdeb8e63b2c94f1abc53ea9", "2a4153655ad1169d482e22c468d67f3bc2c49f12", "1b794b944fd462a2742b6c2f8021fecc663004c9", "ed1886e233c8ecef7f414811a61a83e44c8bbf50", "232b6e2391c064d483546b9ee3aafe0ba48ca519", "336488746cc76e7f13b0ec68ccfe4df6d76cdc8f", "c17a332e59f03b77921942d487b4b102b1ee73b6", "9993f1a7cfb5b0078f339b9a6bfa341da76a3168", "2df4d05119fe3fbf1f8112b3ad901c33728b498a", "9ca7899338129f4ba6744f801e722d53a44e4622", "465faf9974a60da00950be977f3bc2fc3e56f5d2", "21f5f65e832c5472d6d08f6ee280d65ff0202e29", "c00f402b9cfc3f8dd2c74d6b3552acbd1f358301", "034b3f3bac663fb814336a69a9fd3514ca0082b9", "500b92578e4deff98ce20e6017124e6d2053b451", "f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53", "303a7099c01530fa0beb197eb1305b574168b653", "89002a64e96a82486220b1d5c3f060654b24ef2a", "bbf28f39e5038813afd74cf1bc78d55fcbe630f1", "5810ce61fda464d4de2769bd899e12727bee0382", "4ac3cd8b6c50f7a26f27eefc64855134932b39be", "9ef2b2db11ed117521424c275c3ce1b5c696b9b3", "1672becb287ae3eaece3e216ba37677ed045db55", "5df17c81c266cf2ebb0778e48e825905e161a8d9", "4a8480d58c30dc484bda08969e754cd13a64faa1", "bc704680b5032eadf78c4e49f548ba14040965bf", "4068574b8678a117d9a434360e9c12fe6232dae0", "8c048be9dd2b601808b893b5d3d51f00907bdee0", "1fd7a17a6c630a122c1a3d1c0668d14c0c375de0", "35f921def890210dda4b72247849ad7ba7d35250", "27a586a435efdcecb151c275947fe5b5b21cf59b", "5239001571bc64de3e61be0be8985860f08d7e7e", "d140c5add2cddd4a572f07358d666fe00e8f4fe1", "b934f730a81c071dbfc08eb4c360d6fca2daa08f", "ab7923968660d04434271559c4634790dc68c58e", "8a336e9a4c42384d4c505c53fb8628a040f2468e", "766728bac030b169fcbc2fbafe24c6e22a58ef3c", "303065c44cf847849d04da16b8b1d9a120cef73a", "701f56f0eac9f88387de1f556acef78016b05d52", "47e8db3d9adb79a87c8c02b88f432f911eb45dc5", "029b53f32079063047097fa59cfc788b2b550c4b", "ed96f2eb1771f384df2349879970065a87975ca7", "3bc376f29bc169279105d33f59642568de36f17f", "45e7ddd5248977ba8ec61be111db912a4387d62f", "07d95be4922670ef2f8b11997e0c00eb643f3fca", "72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114", "0561bed18b6278434deae562d646e8adad72e75d", "afdf9a3464c3b015f040982750f6b41c048706f5", "47109343e502a4097cb7efee54bc5fbb14598c05", "1922ad4978ab92ce0d23acc4c7441a8812f157e5", "12d8730da5aab242795bdff17b30b6e0bac82998", "5721cd4b898f0e7df8de1e0215f630af94656be9", "ba1c0600d3bdb8ed9d439e8aa736a96214156284", "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "2f0b8579829b3d4efdbc03c96821e33d7cc65e1d", "2e091b311ac48c18aaedbb5117e94213f1dbb529", "e4fa062bff299a0bcef9f6b2e593c85be116c9f1", "2cd426f10178bd95fef3dede69ae7b67e73bb70c", "293ade202109c7f23637589a637bdaed06dc37c9", "bab2f4949a38a712a78aafbc0a3c392227c65f56", "75fd9acf5e5b7ed17c658cc84090c4659e5de01d", "ebc2a3e8a510c625353637e8e8f07bd34410228f", "963d0d40de8780161b70d28d2b125b5222e75596", "ed09db68bf317cad27df6ed96a0c16eab6b2f827", "76dff7008d9b8bf44ec5348f294d5518877c6182", "71b07c537a9e188b850192131bfe31ef206a39a0", "d28d32af7ef9889ef9cb877345a90ea85e70f7f1", "a0fd85b3400c7b3e11122f44dc5870ae2de9009a", "86597fe787e0bdd05935d25158790727257a40bd", "1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb", "5f5906168235613c81ad2129e2431a0e5ef2b6e4", "46b2ecef197b465abc43e0e017543b1af61921ac", "ce9e1dfa7705623bb67df3a91052062a0a0ca456", "940e5c45511b63f609568dce2ad61437c5e39683", "36e8ef2e5d52a78dddf0002e03918b101dcdb326", "898ff1bafee2a6fb3c848ad07f6f292416b5f07d", "0bc53b338c52fc635687b7a6c1e7c2b7191f42e5", "11ba01ce7d606bab5c2d7e998c6d94325521b8a0", "daa4cfde41d37b2ab497458e331556d13dd14d0b", "4b936847f39094d6cb0bde68cea654d948c4735d", "c5ea084531212284ce3f1ca86a6209f0001de9d1", "b29b42f7ab8d25d244bfc1413a8d608cbdc51855", "1f9ae272bb4151817866511bd970bffb22981a49", "7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2", "5cfbeae360398de9e20e4165485837bd42b93217", "4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7", "33ae696546eed070717192d393f75a1583cd8e2c", "5da98f7590c08e83889f3cec7b0304b3610abf42", "a820941eaf03077d68536732a4d5f28d94b5864a", "f095b5770f0ff13ba9670e3d480743c5e9ad1036", "3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e", "bbc5f4052674278c96abe7ff9dc2d75071b6e3f3", "1c1f957d85b59d23163583c421755869f248ceef", "b4362cd87ad219790800127ddd366cc465606a78", "3be8f1f7501978287af8d7ebfac5963216698249", "9cb7b3b14fd01cc2ed76784ab76304132dab6ff3", "7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4", "94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81", "b1fdd4ae17d82612cefd4e78b690847b071379d3", "708f4787bec9d7563f4bb8b33834de445147133b", "b730908bc1f80b711c031f3ea459e4de09a3d324", "193debca0be1c38dabc42dc772513e6653fd91d8", "04ff69aa20da4eeccdabbe127e3641b8e6502ec0", "0a6d344112b5af7d1abbd712f83c0d70105211d0", "7d7be6172fc2884e1da22d1e96d5899a29831ad2", "d5f8827fc7d66643bf018d5636e81ed41026b61a", "88e2efab01e883e037a416c63a03075d66625c26", "696236fb6f986f6d5565abb01f402d09db68e5fa", "f61829274cfe64b94361e54351f01a0376cd1253", "f423d8be5e13d9ef979debd3baf0a1b2e1d3682f", "96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d", "329d58e8fb30f1bf09acb2f556c9c2f3e768b15c", "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c", "837e99301e00c2244023a8a48ff98d7b521c93ac", "fdff2da5bdca66e0ab5874ef58ac2205fb088ed7", "b5da4943c348a6b4c934c2ea7330afaf1d655e79", "090ff8f992dc71a1125636c1adffc0634155b450", "b87b0fa1ac0aad0ca563844daecaeecb2df8debf", "3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f", "1ca815327e62c70f4ee619a836e05183ef629567", "2a84f7934365f05b6707ea0ac225210f78e547af", "d9deafd9d9e60657a7f34df5f494edff546c4fb8", "9207671d9e2b668c065e06d9f58f597601039e5e", "5c124b57699be19cd4eb4e1da285b4a8c84fc80d", "500fbe18afd44312738cab91b4689c12b4e0eeee", "1c1a98df3d0d5e2034ea723994bdc85af45934db", "a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc", "771a6a80dd08212d83a4e976522e1ce108881401", "6d8c9a1759e7204eacb4eeb06567ad0ef4229f93", "31e57fa83ac60c03d884774d2b515813493977b9", "b07582d1a59a9c6f029d0d8328414c7bef64dca0", "2e19371a2d797ab9929b99c80d80f01a1fbf9479", "48a9241edda07252c1aadca09875fabcfee32871", "830e5b1043227fe189b3f93619ef4c58868758a7", "ee418372b0038bd3b8ae82bd1518d5c01a33a7ec", "4f77a37753c03886ca9c9349723ec3bbfe4ee967", "87e6cb090aecfc6f03a3b00650a5c5f475dfebe1", "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "1fb980e137b2c9f8781a0d98c026e164b497ddb1", "c41a3c31972cf0c1be6b6895f3bf97181773fcfb", "38192a0f9261d9727b119e294a65f2e25f72d7e6", "62e913431bcef5983955e9ca160b91bb19d9de42", "86c053c162c08bc3fe093cc10398b9e64367a100", "6966d9d30fa9b7c01523425726ab417fd8428790", "f20ed84abcb1223f351a576ef10dfda9f277326b", "01e27b6d1af4c9c2f50e2908b5f3b2331ff24846", "5bd3d08335bb4e444a86200c5e9f57fd9d719e14", "607aebe7568407421e8ffc7b23a5fda52650ad93", "4140498e96a5ff3ba816d13daf148fffb9a2be3f", "37c8514df89337f34421dc27b86d0eb45b660a5e", "b7043048b4ba748c9c6317b6d8206192c34f57ff", "f0ae807627f81acb63eb5837c75a1e895a92c376", "bd13f50b8997d0733169ceba39b6eb1bda3eb1aa", "a40edf6eb979d1ddfe5894fac7f2cf199519669f", "084bd02d171e36458f108f07265386f22b34a1ae", "40e1743332523b2ab5614bae5e10f7a7799161f4", "f201baf618574108bcee50e9a8b65f5174d832ee", "2e3d081c8f0e10f138314c4d2c11064a981c1327", "5fa6f72d3fe16f9160d221e28da35c1e67a5d951", "2f7aa942313b1eb12ebfab791af71d0a3830b24c", "80ed678ef28ccc1b942e197e0393229cd99d55c8", "7eb85bcb372261bad707c05e496a09609e27fdb3", "438e7999c937b94f0f6384dbeaa3febff6d283b6", "0eac652139f7ab44ff1051584b59f2dc1757f53b", "5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4", "5fa6e4a23da0b39e4b35ac73a15d55cee8608736", "5f4219118556d2c627137827a617cf4e26242a6e", "5b0bf1063b694e4b1575bb428edb4f3451d9bf04", "17c0d99171efc957b88c31a465c59485ab033234", "88d63a0cc0b8a5303bdef286d6df118bb1d44d26", "21a2f67b21905ff6e0afa762937427e92dc5aa0b", "b5f79df712ad535d88ae784a617a30c02e0551ca", "6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d", "9aade3d26996ce7ef6d657130464504b8d812534", "530243b61fa5aea19b454b7dbcac9f463ed0460e", "95e3b78eb4d5b469f66648ed4f37e45e0e01e63e", "d4a5eaf2e9f2fd3e264940039e2cbbf08880a090", "0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a", "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "2724ba85ec4a66de18da33925e537f3902f21249", "e52272f92fa553687f1ac068605f1de929efafc2", "30f6c4bd29b9a8c94f37f3818cf6145c1507826f", "ca83053d9a790319b11a04eac5ab412e7fcab914", "64e82b42e1c41250bdf9eb952686631287cfd410", "1824b1ccace464ba275ccc86619feaa89018c0ad", "433a6d6d2a3ed8a6502982dccc992f91d665b9b3", "116d57b4e5dda41d72e497517f65159e6f12c517", "84e6669b47670f9f4f49c0085311dce0e178b685", "234c106036964131c0f2daf76c47ced802652046", "5c820e47981d21c9dddde8d2f8020146e600368f", "f0a4a3fb6997334511d7b8fc090f9ce894679faf", "83295bce2340cb87901499cff492ae6ff3365475", "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "fd809ee36fa6832dda57a0a2403b4b52c207549d", "5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725", "74ce7e5e677a4925489897665c152a352c49d0a2", "788a7b59ea72e23ef4f86dc9abb4450efefeca41", "266ed43dcea2e7db9f968b164ca08897539ca8dd", "63c74794aedb40dd6b1650352a2da7a968180302", "aa4af9b3811db6a30e1c7cc1ebf079078c1ee152", "7d1688ce0b48096e05a66ead80e9270260cb8082", "3c086601ce0bac61047b5b931b253bd4035e1e7a", "e4754afaa15b1b53e70743880484b8d0736990ff", "4f742c09ce12859b20deaa372c8f1575acfc99c9", "185263189a30986e31566394680d6d16b0089772", "63d865c66faaba68018defee0daf201db8ca79ed", "6a4ebd91c4d380e21da0efb2dee276897f56467a", "0209389b8369aaa2a08830ac3b2036d4901ba1f1", "5180c98815d7034e753a14ef6f54583f115da3aa", "2c62b9e64aeddf12f9d399b43baaefbca8e11148", "7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0", "6ba6045e4b404c44f9b4dfce2d946019f0e85a72", "c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd", "ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2", "6b8d0569fffce5cc221560d459d6aa10c4db2f03", "56fd4c05869e11e4935d48aa1d7abb96072ac242", "47d07217c501644d63adfec740346f244abaaae8", "1fe1a78c941e03abe942498249c041b2703fd3d2", "f070d739fb812d38571ec77490ccd8777e95ce7a", "0b0958493e43ca9c131315bcfb9a171d52ecbb8a", "ec1e03ec72186224b93b2611ff873656ed4d2f74", "228558a2a38a6937e3c7b1775144fea290d65d6c", "59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b", "dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43", "e97ba85a4550667b8a28f83a98808d489e0ff3bc", "0e8760fc198a7e7c9f4193478c0e0700950a86cd", "390f3d7cdf1ce127ecca65afa2e24c563e9db93b", "2fb8d7601fc3ad637781127620104aaab5122acd", "656a59954de3c9fcf82ffcef926af6ade2f3fdb5", "4848a48a2b8bacd2092e87961cd86818da8e7151", "c46a4db7247d26aceafed3e4f38ce52d54361817", "6e38011e38a1c893b90a48e8f8eae0e22d2008e8", "8875dcf2836315839741fd6944f249263408c27f", "044d9a8c61383312cdafbcc44b9d00d650b21c70", "86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd", "721e5ba3383b05a78ef1dfe85bf38efa7e2d611d", "60824ee635777b4ee30fcc2485ef1e103b8e7af9", "dee406a7aaa0f4c9d64b7550e633d81bc66ff451", "51b42da0706a1260430f27badcf9ee6694768b9b", "891b10c4b3b92ca30c9b93170ec9abd71f6099c4", "f2d5bb329c09a5867045721112a7dad82ca757a3", "50ccc98d9ce06160cdf92aaf470b8f4edbd8b899", "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb", "d68dbb71b34dfe98dee0680198a23d3b53056394", "995d55fdf5b6fe7fb630c93a424700d4bc566104", "927ba64123bd4a8a31163956b3d1765eb61e4426", "22e2066acfb795ac4db3f97d2ac176d6ca41836c", "d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e", "9ab963e473829739475b9e47514f454ab467a5af", "131e395c94999c55c53afead65d81be61cd349a4", "624496296af19243d5f05e7505fd927db02fd0ce", "deb89950939ae9847f0a1a4bb198e6dbfed62778", "1b0a071450c419138432c033f722027ec88846ea", "df80fed59ffdf751a20af317f265848fe6bfb9c9", "4836b084a583d2e794eb6a94982ea30d7990f663", "992ebd81eb448d1eef846bfc416fc929beb7d28b", "191d30e7e7360d565b0c1e2814b5bcbd86a11d41", "e01bb53b611c679141494f3ffe6f0b91953af658", "38cbb500823057613494bacd0078aa0e57b30af8", "375435fb0da220a65ac9e82275a880e1b9f0a557", "cad2bd940e7580490da9cc739e597d029e166504", "44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb", "9048732c8591a92a1f4f589b520a733f07578f80", "f7ae38a073be7c9cd1b92359131b9c8374579b13", "faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b", "f7dea4454c2de0b96ab5cf95008ce7144292e52a", "06c2dfe1568266ad99368fc75edf79585e29095f", "c3d3d2229500c555c7a7150a8b126ef874cbee1c", "324b9369a1457213ec7a5a12fe77c0ee9aef1ad4", "4e7ed13e541b8ed868480375785005d33530e06d", "eee06d68497be8bf3a8aba4fde42a13aa090b301", "bbd1eb87c0686fddb838421050007e934b2d74ab", "56ae6d94fc6097ec4ca861f0daa87941d1c10b70", "370b6b83c7512419188f5373a962dd3175a56a9b", "2201f187a7483982c2e8e2585ad9907c5e66671d", "3a2cf589f5e11ca886417b72c2592975ff1d8472", "8886b21f97c114a23b24dc7025bbf42885adc3a7", "438c4b320b9a94a939af21061b4502f4a86960e3", "cb13e29fb8af6cfca568c6dc523da04d1db1fff5", "b26e8f6ad7c2d4c838660d5a17337ce241442ed9", "286812ade95e6f1543193918e14ba84e5f8e852e", "ec8ec2dfd73cf3667f33595fef84c95c42125945", "8a63a2b10068b6a917e249fdc73173f5fd918db0", "54bb25a213944b08298e4e2de54f2ddea890954a", "faeefc5da67421ecd71d400f1505cfacb990119c", "7cfbf90368553333b47731729e0e358479c25340", "0b8839945259ec764ef0fad47471f34db39f40c3", "9b2c359c36c38c289c5bacaeb5b1dd06b464f301", "7b0f1fc93fb24630eb598330e13f7b839fb46cce", "3f204a413d9c8c16f146c306c8d96b91839fed0c", "36bb93c4f381adca267191811abb8cc7812363f9", "14fdec563788af3202ce71c021dd8b300ae33051", "02e628e99f9a1b295458cb453c09863ea1641b67", "0f21a39fa4c0a19c4a5b4733579e393cb1d04f71", "83f3491249f1ec8b546267f53449686754f2f7fd", "0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136", "0c75c7c54eec85e962b1720755381cdca3f57dfb", "9901f473aeea177a55e58bac8fd4f1b086e575a4", "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf", "62e834114b58a58a2ea2d7b6dd7b0ce657a64317", "4b6387e608afa83ac8d855de2c9b0ae3b86f31cc", "754f7f3e9a44506b814bf9dc06e44fecde599878", "127c7f87f289b1d32e729738475b337a6b042cf7", "b599f323ee17f12bf251aba928b19a09bfbb13bb", "a72f0be803c9290923643660caf3bffec4ea3611", "30fd1363fa14965e3ab48a7d6235e4b3516c1da1", "7492c611b1df6bce895bee6ba33737e7fc7f60a6", "9627f28ea5f4c389350572b15968386d7ce3fe49", "b93bf0a7e449cfd0db91a83284d9eba25a6094d8", "b11bb6bd63ee6f246d278dd4edccfbe470263803", "2960500033eb31777ed1af1fcb133dcab1b4a857", "2c17d36bab56083293456fe14ceff5497cc97d75", "c04843867ebbba4c3cac4febf9c500ba28ae66fc", "710c3aaffef29730ffd909a63798e9185f488327", "1ef4815f41fa3a9217a8a8af12cc385f6ed137e1", "6a52e6fce541126ff429f3c6d573bc774f5b8d89", "5aad5e7390211267f3511ffa75c69febe3b84cc7", "03e88bf3c5ddd44ebf0e580d4bd63072566613ad", "c38b1fa00f1f370c029984c55d4d2d40b529d00c", "e4e07f5f201c6986e93ddb42dcf11a43c339ea2e", "cc96eab1e55e771e417b758119ce5d7ef1722b43", "492f41e800c52614c5519f830e72561db205e86c", "a83fc450c124b7e640adc762e95e3bb6b423b310", "ac2e44622efbbab525d4301c83cb4d5d7f6f0e55", "3727ac3d50e31a394b200029b2c350073c1b69e3", "a60db9ca8bc144a37fe233b08232d9c91641cbb5", "6932baa348943507d992aba75402cfe8545a1a9b", "59eefa01c067a33a0b9bad31c882e2710748ea24", "badb95dbdfb3f044a46d7ba0ee69dba929c511b1", "20532b1f80b509f2332b6cfc0126c0f80f438f10", "f4ba07d2ae6c9673502daf50ee751a5e9262848f", "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "2ca10da4b59b406533ad1dc7740156e01782658f", "d06bcb2d46342ee011e652990edf290a0876b502", "8d646ac6e5473398d668c1e35e3daa964d9eb0f6", "152683f3ac99f829b476ea1b1b976dec6e17b911", "3d9e44d8f8bc2663192c7ce668ccbbb084e466e4", "91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11", "34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd", "4342a2b63c9c344d78cf153600cd918a5fecad59", "5b2cfee6e81ef36507ebf3c305e84e9e0473575a", "24e099e77ae7bae3df2bebdc0ee4e00acca71250", "3f22a4383c55ceaafe7d3cfed1b9ef910559d639", "3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3", "9c1cdb795fd771003da4378f9a0585730d1c3784", "8e24db957be2b643db464cc566bfabc650f1ffac", "cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0", "c6ea6fee4823b511eecf41f6c2574a0728055baf", "4ff11512e4fde3d1a109546d9c61a963d4391add", "7123e510dea783035b02f6c35e35a1a09677c5ab", "26949c1ba7f55f0c389000aa234238bf01a32d3b", "4dd71a097e6b3cd379d8c802460667ee0cbc8463", "ded968b97bd59465d5ccda4f1e441f24bac7ede5", "6d10beb027fd7213dd4bccf2427e223662e20b7d", "527dda77a3864d88b35e017d542cb612f275a4ec", "187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b", "985cd420c00d2f53965faf63358e8c13d1951fa8", "637b31157386efbde61505365c0720545248fbae", "023be757b1769ecb0db810c95c010310d7daf00b", "5fb59cf5b31a80d8c70d91660092ef86494be577", "6ad107c08ac018bfc6ab31ec92c8a4b234f67d49", "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "8f08b2101d43b1c0829678d6a824f0f045d57da5", "5a3da29970d0c3c75ef4cb372b336fc8b10381d7", "43e99b76ca8e31765d4571d609679a689afdc99e", "3d62b2f9cef997fc37099305dabff356d39ed477", "ab540c5be9f7ef688d3cd76765fcb794b92531fb", "ce691a37060944c136d2795e10ed7ba751cd8394", "064b797aa1da2000640e437cacb97256444dee82", "e0162dea3746d58083dd1d061fb276015d875b2e", "03d9ccce3e1b4d42d234dba1856a9e1b28977640", "46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4", "b6bb883dd14f2737d0d6225cf4acbf050d307634", "92e464a5a67582d5209fa75e3b29de05d82c7c86", "9939498315777b40bed9150d8940fc1ac340e8ba", "0a4f3a423a37588fde9a2db71f114b293fc09c50", "3176ee88d1bb137d0b561ee63edf10876f805cf0", "fb87045600da73b07f0757f345a937b1c8097463", "055de0519da7fdf27add848e691087e0af166637", "88a898592b4c1dfd707f04f09ca58ec769a257de", "726b8aba2095eef076922351e9d3a724bb71cb51", "902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb", "b908edadad58c604a1e4b431f69ac8ded350589a", "7df4f96138a4e23492ea96cf921794fc5287ba72", "a6ce2f0795839d9c2543d64a08e043695887e0eb", "3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2", "4c19690889fb3a12ec03e65bae6f5f20420b4ba4", "1a85956154c170daf7f15f32f29281269028ff69", "0de91641f37b0a81a892e4c914b46d05d33fd36e", "56359d2b4508cc267d185c1d6d310a1c4c2cc8c2", "0341405252c80ff029a0d0065ca46d0ade943b03", "0cbe059c181278a373292a6af1667c54911e7925", "013305c13cfabaea82c218b841dbe71e108d2b97", "529b1f33aed49dbe025a99ac1d211c777ad881ec", "f472cb8380a41c540cfea32ebb4575da241c0288", "3080026f2f0846d520bd5bacb0cb2acea0ffe16b", "4bbe460ab1b279a55e3c9d9f488ff79884d01608", "e74816bc0803460e20edbd30a44ab857b06e288e", "6ca2c5ff41e91c34696f84291a458d1312d15bf2", "3e40991ab1daa2a4906eb85a5d6a01a958b6e674", "85ae6fa48e07857e17ac4bd48fb804785483e268", "04661729f0ff6afe4b4d6223f18d0da1d479accf", "7071cd1ee46db4bc1824c4fd62d36f6d13cad08a", "657e702326a1cbc561e059476e9be4d417c37795", "93dcea2419ca95b96a47e541748c46220d289d77", "1a327c588b8f1057b40ecba451145dd885598e5d", "34fd227f4fdbc7fe028cc1f7d92cb59204333718", "42a6beed493c69d5bad99ae47ea76497c8e5fdae", "849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b", "96a9ca7a8366ae0efe6b58a515d15b44776faf6e", "282503fa0285240ef42b5b4c74ae0590fe169211", "8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82", "fd96432675911a702b8a4ce857b7c8619498bf9f", "8986585975c0090e9ad97bec2ba6c4b437419dae", "d3b0839324d0091e70ce34f44c979b9366547327", "badcd992266c6813063c153c41b87babc0ba36a3", "15ee80e86e75bf1413dc38f521b9142b28fe02d1", "24d376e4d580fb28fd66bc5e7681f1a8db3b6b78", "74ba4ab407b90592ffdf884a20e10006d2223015", "11d73f4f19077e6806d05dc7ecd17fbeb15bdf39", "51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee", "38f1fac3ed0fd054e009515e7bbc72cdd4cf801a", "a1e07c31184d3728e009d4d1bebe21bf9fe95c8e", "c7cd490e43ee4ff81e8f86f790063695369c2830", "b6f682648418422e992e3ef78a6965773550d36b", "2d8001ffee6584b3f4d951d230dc00a06e8219f8", "35ccc836df60cd99c731412fe44156c7fd057b99", "e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5", "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "e1630014a5ae3d2fb7ff6618f1470a567f4d90f5", "6c58e3a8209fef0e28ca2219726c15ea5f284f4f", "9cc8cf0c7d7fa7607659921b6ff657e17e135ecc", "58bf72750a8f5100e0c01e55fd1b959b31e7dbce", "c39ffc56a41d436748b9b57bdabd8248b2d28a32", "2f61d91033a06dd904ff9d1765d57e5b4d7f57a6", "85ec86f8320ba2ed8b3da04d1c291ce88b8969c0", "68e9c837431f2ba59741b55004df60235e50994d", "a065080353d18809b2597246bb0b48316234c29a", "93420d9212dd15b3ef37f566e4d57e76bb2fab2f", "acee2201f8a15990551804dd382b86973eb7c0a8", "614a7c42aae8946c7ad4c36b53290860f6256441", "32ecbbd76fdce249f9109594eee2d52a1cafdfc7", "6341274aca0c2977c3e1575378f4f2126aa9b050", "38183fe28add21693729ddeaf3c8a90a2d5caea3", "6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd", "54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d", "7f23a4bb0c777dd72cca7665a5f370ac7980217e", "fc8990088e0f1f017540900bc3f5a4996192ff05", "fe48f0e43dbdeeaf4a03b3837e27f6705783e576", "ccebd3bf069f5c73ea2ccc5791976f894bc6023d", "d4f0960c6587379ad7df7928c256776e25952c60", "c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee", "3fb26f3abcf0d287243646426cd5ddeee33624d4", "9e105c4a176465d14434fb3f5bae67f57ff5fba2", "f9fb7979af4233c2dd14813da94ec7c38ce9232a", "3c97c32ff575989ef2869f86d89c63005fc11ba9", "94eeae23786e128c0635f305ba7eebbb89af0023", "cf54a133c89f730adc5ea12c3ac646971120781c", "b3b467961ba66264bb73ffe00b1830d7874ae8ce", "40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b", "dfabe7ef245ca68185f4fcc96a08602ee1afb3f7", "00e3957212517a252258baef833833921dd308d4", "6dbdb07ce2991db0f64c785ad31196dfd4dae721", "9bd35145c48ce172b80da80130ba310811a44051", "67484723e0c2cbeb936b2e863710385bdc7d5368", "f3b7938de5f178e25a3cf477107c76286c0ad691", "3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f", "c86e6ed734d3aa967deae00df003557b6e937d3d", "7eb895e7de883d113b75eda54389460c61d63f67", "b388bf63c79e429dafee16c62b2732bcbea0d026", "eb87151fd2796ff5b4bbcf1906d41d53ac6c5595", "6909cd34a1eceba2140e2c02a842cefcecf33645", "5c35ac04260e281141b3aaa7bbb147032c887f0c", "0f9dd79de75a3dce394846369f09c05ddf250e31", "5d9f468a2841ea2f27bbe3ef2c6fe531d444be68", "cd023d2d067365c83d8e27431e83e7e66082f718", "244293024aebbb0ff42a7cf2ba49b1164697a127", "3399f8f0dff8fcf001b711174d29c9d4fde89379", "40c8cffd5aac68f59324733416b6b2959cb668fd", "d69271c7b77bc3a06882884c21aa1b609b3f76cc", "2f16459e2e24dc91b3b4cac7c6294387d4a0eacf", "8cd0855ca967ce47b0225b58bbadd38d8b1b41a1", "22f656d0f8426c84a33a267977f511f127bfd7f3", "b084683e5bab9b2bc327788e7b9a8e049d5fff8f", "e5d53a335515107452a30b330352cad216f88fc3", "407a26fff7fac195b74de9fcb556005e8785a4e9", "c3a3f7758bccbead7c9713cb8517889ea6d04687", "52d7eb0fbc3522434c13cc247549f74bb9609c5d", "3ed46ef5344927a30d71089ae203c9a9e35e4977", "6dc1f94b852538d572e4919238ddb10e2ee449a4", "878301453e3d5cb1a1f7828002ea00f59cbeab06", "405d9a71350c9a13adea41f9d7f7f9274793824f", "d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1", "2d79d338c114ece1d97cde1aa06ab4cf17d38254", "37f25732397864b739714aac001ea1574d813b0d", "c92bb26238f6e30196b0c4a737d8847e61cfb7d4", "96ba65bffdddef7c7737c0f42ff4299e95cd85c2", "c9c9ade2ef4dffb7582a629a47ea70c31be7a35e", "17ded725602b4329b1c494bfa41527482bf83a6f", "3f540faf85e1f8de6ce04fb37e556700b67e4ad3", "8c2b663f8be1702ed3e377b5e6e85921fe7c6389", "d115c4a66d765fef596b0b171febca334cea15b5", "e065a2cb4534492ccf46d0afc81b9ad8b420c5ec", "17501551acce05bfde4f0af77c21005f96e80553", "dcf71245addaf66a868221041aabe23c0a074312", "cc70fb1ab585378c79a2ab94776723e597afe379", "9efdb73c6833df57732b727c6aeac510cadb53fe", "0b82bf595e76898993ed4f4b2883c42720c0f277", "287de191c49a3caa38ad7594093045dfba1eb420", "a896ddeb0d253739c9aaef7fc1f170a2ba8407d3", "b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8", "e8c6853135856515fc88fff7c55737a292b0a15b", "72cbbdee4f6eeee8b7dd22cea6092c532271009f", "c23153aade9be0c941390909c5d1aad8924821db", "24286ef164f0e12c3e9590ec7f636871ba253026", "d50c6d22449cc9170ab868b42f8c72f8d31f9b6c", "377f2b65e6a9300448bdccf678cde59449ecd337", "96faccdddef887673d6007fed8ff2574580cae1f", "35d272877b178aa97c678e3fcbb619ff512af4c2", "437a720c6f6fc1959ba95e48e487eb3767b4e508", "1ee3b4ba04e54bfbacba94d54bf8d05fd202931d", "61bc124537f414f6fcb4d1ff476681b5a0ee222a", "55e87050b998eb0a8f0b16163ef5a28f984b01fa", "ab2c07c9867243fad2d66fa6aeabfb780433f319", "0d760e7d762fa449737ad51431f3ff938d6803fe", "405b43f4a52f70336ac1db36d5fa654600e9e643", "69de532d93ad8099f4d4902c4cad28db958adfea", "2710e1c58476e1996466530af825de6376a92833", "4d90d7834ae25ee6176c096d5d6608555766c0b1", "00a967cb2d18e1394226ad37930524a31351f6cf", "5bb684dfe64171b77df06ba68997fd1e8daffbe1", "e00d4e4ba25fff3583b180db078ef962bf7d6824", "795aa8064b34c4bf4acdd8be3f1e5d06da5a7756", "878169be6e2c87df2d8a1266e9e37de63b524ae7", "bc607bee2002c6c6bf694a15efd0a5d049767237", "c3a53b308c7a75c66759cbfdf52359d9be4f552b", "e8b2a98f87b7b2593b4a046464c1ec63bfd13b51", "68caf5d8ef325d7ea669f3fb76eac58e0170fff0", "04c2cda00e5536f4b1508cbd80041e9552880e67", "53bfe2ab770e74d064303f3bd2867e5bf7b86379", "c9bbd7828437e70cc3e6863b278aa56a7d545150", "30044dd951133187cb8b57e53a22cf9306fa7612", "e23bc755f7e161d524fcc33b7d927d67dd4a5e76", "14418ae9a6a8de2b428acb2c00064da129632f3e", "7c13fa0c742123a6a927771ce67da270492b588c", "8818b12aa0ff3bf0b20f9caa250395cbea0e8769", "6f7a8b3e8f212d80f0fb18860b2495be4c363eac", "3168e52567d564f0871c3f9ed7757dae9d66c55a", "4e5760521356745548246b1cb74c8d69675d9923", "f35a493afa78a671b9d2392c69642dcc3dd2cdc2", "4db0968270f4e7b3fa73e41c50d13d48e20687be", "29921072d8628544114f68bdf84deaf20a8c8f91", "bd9c9729475ba7e3b255e24e7478a5acb393c8e9", "0a68747d001aba014acd3b6ec83ba9534946a0da", "d7312149a6b773d1d97c0c2b847609c07b5255ec", "218139e5262cb4f012cd2e119074aa59b89ebc32", "64d7e62f46813b5ad08289aed5dc4825d7ec5cff", "30fb5c24cc15eb8cde5e389bf368d65fb96513e4", "bf5940d57f97ed20c50278a81e901ae4656f0f2c", "6196f4be3b28684f6528b8687adccbdf9ac5c67c", "69a55c30c085ad1b72dd2789b3f699b2f4d3169f", "ef5531711a69ed687637c48930261769465457f0", "ae85c822c6aec8b0f67762c625a73a5d08f5060d", "17579791ead67262fcfb62ed8765e115fb5eca6f", "0697bd81844d54064d992d3229162fe8afcd82cb", "d6102a7ddb19a185019fd2112d2f29d9258f6dec", "eed1dd2a5959647896e73d129272cb7c3a2e145c", "666300af8ffb8c903223f32f1fcc5c4674e2430b", "8a8861ad6caedc3993e31d46e7de6c251a8cda22", "8c8525e626c8857a4c6c385de34ffea31e7e41d1", "23c3eb6ad8e5f18f672f187a6e9e9b0d94042970", "84f904a71bee129a1cf00dc97f6cdbe1011657e6", "7e5aa453a21f56737db5e02d540f1b70ee6634ad", "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "ef458499c3856a6e9cd4738b3e97bef010786adb", "abeda55a7be0bbe25a25139fb9a3d823215d7536", "1586871a1ddfe031b885b94efdbff647cf03eff1", "d448d67c6371f9abf533ea0f894ef2f022b12503", "68070526920b387bfb91e4753d57d8e07fac51ee", "5bcc8ef74efbb959407adfda15a01dad8fcf1648", "27d709f7b67204e1e5e05fe2cfac629afa21699d", "3b84d074b8622fac125f85ab55b63e876fed4628", "18010284894ed0edcca74e5bf768ee2e15ef7841", "59e2037f5079794cb9128c7f0900a568ced14c2a", "5fb9944b18f5a4a6d20778816290ed647f5e3853", "bb2f61a057bbf176e402d171d79df2635ccda9f6", "2d411826cd7865638b65e1b5f92043c245f009f9", "35e0256b33212ddad2db548484c595334f15b4da", "782188821963304fb78791e01665590f0cd869e8", "83f80fd4eb614777285202fa99e8314e3e5b169c", "3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8", "2cdd5b50a67e4615cb0892beaac12664ec53b81f", "4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7", "4e0636a1b92503469b44e2807f0bb35cc0d97652", "0ee5c4112208995bf2bb0fb8a87efba933a94579", "e85a255a970ee4c1eecc3e3d110e157f3e0a4629", "420782499f38c1d114aabde7b8a8104c9e40a974", "09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081", "117f164f416ea68e8b88a3005e55a39dbdf32ce4", "67214e8d2f83eb41c14bfc86698eb6620e72e87c", "923ec0da8327847910e8dd71e9d801abcbc93b08", "9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807", "096ffc1ea5493242ba0c113178dab0c096412f81", "572dbaee6648eefa4c9de9b42551204b985ff863", "0a9345ea6e488fb936e26a9ba70b0640d3730ba7", "ee03ed3a8a9a8b6bf35dda832c34160e62893f92", "2480f8dccd9054372d696e1e521e057d9ac9de17", "556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7", "48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e", "8aed6ec62cfccb4dba0c19ee000e6334ec585d70", "2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb", "2597b0dccdf3d89eaffd32e202570b1fbbedd1d6", "86f3552b822f6af56cb5079cc31616b4035ccc4e", "2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a", "dc77287bb1fcf64358767dc5b5a8a79ed9abaa53", "649eb674fc963ce25e4e8ce53ac7ee20500fb0e3", "8ae642c87f0d6eeff1c6362571e7cd36dcda60ae", "e2f78d2f75a807b89a13115a206da4661361fa71", "871f5f1114949e3ddb1bca0982086cc806ce84a8", "57b052cf826b24739cd7749b632f85f4b7bcf90b", "bf776e3483419d7e0cb1dfd770be02d552e1fedf", "26c7eda262dfda1c3a3597a3bf1f2f1cc4013425", "8fa9cb5dac394e30e4089bf5f4ffecc873d1da96", "e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf", "356b431d4f7a2a0a38cf971c84568207dcdbf189", "60efdb2e204b2be6701a8e168983fa666feac1be", "27f8b01e628f20ebfcb58d14ea40573d351bbaad", "28cd46a078e8fad370b1aba34762a874374513a5", "1886b6d9c303135c5fbdc33e5f401e7fc4da6da4", "36c473fc0bf3cee5fdd49a13cf122de8be736977", "434d6726229c0f556841fad20391c18316806f73", "5d88702cdc879396b8b2cc674e233895de99666b", "4bf85ef995c684b841d0a5a002d175fadd922ff0", "d963bdff2ce5212fa585a83ca8fad96875bc0057", "90a754f597958a2717862fbaa313f67b25083bf9", "58d47c187b38b8a2bad319c789a09781073d052d", "59d225486161b43b7bf6919b4a4b4113eb50f039", "a9be20954e9177d8b2bc39747acdea4f5496f394", "c038beaa228aeec174e5bd52460f0de75e9cccbe", "e43045a061421bd79713020bc36d2cf4653c044d", "d1edb8ba9d50817dbfec7e30f25b1846941e84d8", "624e9d9d3d941bab6aaccdd93432fc45cac28d4b", "ef761435c1af2b3e5caba5e8bbbf5aeab69d934e", "d307a766cc9c728a24422313d4c3dcfdb0d16dd5", "0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d", "590628a9584e500f3e7f349ba7e2046c8c273fcf", "ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae", "0ccc535d12ad2142a8310d957cc468bbe4c63647", "82dad0941a7cada11d2e2f2359293fe5fabf913f", "68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5", "1c9efb6c895917174ac6ccc3bae191152f90c625", "43261920d2615f135d6e72b333fe55d3f2659145", "48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf", "02fc9e7283b79183eb3757a9b6ddeb8c91c209bb", "7ec431e36919e29524eceb1431d3e1202637cf19", "2f7e9b45255c9029d2ae97bbb004d6072e70fa79", "3b9c08381282e65649cd87dfae6a01fe6abea79b", "01dc1e03f39901e212bdf291209b7686266aeb13", "44d23df380af207f5ac5b41459c722c87283e1eb", "dc5d04d34b278b944097b8925a9147773bbb80cc", "b999364980e4c21d9c22cc5a9f14501432999ca4", "4b5eeea5dd8bd69331bd4bd4c66098b125888dea", "7c3e09e0bd992d3f4670ffacb4ec3a911141c51f", "f8f2d2910ce8b81cb4bbf84239f9229888158b34", "2b0102d77d3d3f9bc55420d862075934f5c85bec", "3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3", "b5979489e11edd76607c219a8bdc83ba4a88ab38", "560e0e58d0059259ddf86fcec1fa7975dee6a868", "e8f4ded98f5955aad114f55e7aca6b540599236b", "39f525f3a0475e6bbfbe781ae3a74aca5b401125", "af2d30fdb8c611dc5b883b90311d873e336fc534", "d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd", "377c6563f97e76a4dc836a0bd23d7673492b1aae", "089b5e8eb549723020b908e8eb19479ba39812f5", "206e24f7d4b3943b35b069ae2d028143fcbd0704", "4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec", "7ef0cc4f3f7566f96f168123bac1e07053a939b2", "a6b5ffb5b406abfda2509cae66cdcf56b4bb3837", "bd2d7c7f0145028e85c102fe52655c2b6c26aeb5", "10e7dd3bbbfbc25661213155e0de1a9f043461a2", "25960f0a2ed38a89fa8076a448ca538de2f1e183", "e95895262f66f7c5e47dd46a70110d89c3b4c203", "0077cd8f97cafd2b389783858a6e4ab7887b0b6b", "a5f70e0cd7da2b2df05fadb356a24743f3cf459a", "20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba", "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "1b5875dbebc76fec87e72cee7a5263d325a77376", "68bf7fc874c2db44d0446cdbb1e05f19c2239282", "250b73ec5a4f78b7b4ea3aba65c27fc1352154d5", "e9c008d31da38d9eef67a28d2c77cb7daec941fb", "c75e6ce54caf17b2780b4b53f8d29086b391e839", "224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db", "a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d", "0c1d85a197a1f5b7376652a485523e616a406273", "5435d5f8b9f4def52ac84bee109320e64e58ab8f", "6ffdbac58e15e0ff084310b0a804520ad4bd013e", "9af9a88c60d9e4b53e759823c439fc590a4b5bc5", "8f73af52d87c94d0bd43242462fd68d974eda331", "1d696a1beb42515ab16f3a9f6f72584a41492a03", "4b605e6a9362485bfe69950432fa1f896e7d19bf", "747fddd7345b60da121fc13c5440a18039b912e6", "c1f05b723e53ac4eb1133249b445c0011d42ca79", "b72eebffe697008048781ab7b768e0c96e52236a", "0133d1fe8a3138871075cd742c761a3de93a42ec", "57178b36c21fd7f4529ac6748614bb3374714e91", "361eaef45fccfffd5b7df12fba902490a7d24a8d", "f02f0f6fcd56a9b1407045de6634df15c60a85cd", "380d5138cadccc9b5b91c707ba0a9220b0f39271", "4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f", "533bfb82c54f261e6a2b7ed7d31a2fd679c56d18", "ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda", "3b557c4fd6775afc80c2cf7c8b16edde125b270e", "6bf88e29ac04d72297e6f8f2971c5b8579786e7f", "0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5", "38f7f3c72e582e116f6f079ec9ae738894785b96", "0f0366070b46972fcb2976775b45681e62a94a26", "1860b8f63ce501bd0dfa9e6f2debc080e88d9baa", "35d42f4e7a1d898bc8e2d052c38e1106f3e80188", "bec31269632c17206deb90cd74367d1e6586f75f", "ee72673c0394d0fff2b3d8372d8a9401867b8e13", "b59f441234d2d8f1765a20715e227376c7251cd7", "43dce79cf815b5c7068b1678f6200dabf8f5de31", "140c95e53c619eac594d70f6369f518adfea12ef", "571b83f7fc01163383e6ca6a9791aea79cafa7dd", "dd8a851f2a0c63bb97e33aaff1841695f601c863", "853bd61bc48a431b9b1c7cab10c603830c488e39", "b972683d702a65d3ee7a25bc931a5890d1072b6b", "ab80582807506c0f840bd1ba03a8b84f8ac72f79", "06aab105d55c88bd2baa058dc51fa54580746424", "43fe03ec1acb6ea9d05d2b22eeddb2631bd30437", "5180df9d5eb26283fb737f491623395304d57497", "9103148dd87e6ff9fba28509f3b265e1873166c9", "18d3532298fb7b8fb418453107f786178ca82e4a", "2884ff0d58a66d42371b548526d685760e514043", "885c37f94e9edbbb2177cfba8cb1ad840b2a5f20", "9fc993aeb0a007ccfaca369a9a8c0ccf7697261d", "09f58353e48780c707cf24a0074e4d353da18934", "99ced8f36d66dce20d121f3a29f52d8b27a1da6c", "4ba2f445fcbbad464f107b036c57aa807ac5c0c2", "5e7e055ef9ba6e8566a400a8b1c6d8f827099553", "c87f7ee391d6000aef2eadb49f03fc237f4d1170", "03adcf58d947a412f3904a79f2ab51cfdf0e838a", "a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb", "7f533bd8f32525e2934a66a5b57d9143d7a89ee1", "fed8cc533037d7d925df572a440fd89f34d9c1fd", "3bb6570d81685b769dc9e74b6e4958894087f3f1", "861a832b87b071a5d479186bbb2822f9ddbb67e4", "10e0e6f1ec00b20bc78a5453a00c792f1334b016", "344c0917c8d9e13c6b3546da8695332f86b57bd3", "f1061b2b5b7ca32edd5aa486aecc63a0972c84f3", "1fcdc113a5df2f45a1f4b3249c041d942a3a730b", "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "bb06ef67a49849c169781657be0bb717587990e0", "8813368c6c14552539137aba2b6f8c55f561b75f", "173657da03e3249f4e47457d360ab83b3cefbe63", "4a6fcf714f663618657effc341ae5961784504c7", "ccb54fc5f263a8bc2a8373839cb6855f528f10d3", "a6d47f7aa361ab9b37c7f3f868280318f355fadc", "1e07500b00fcd0f65cf30a11f9023f74fe8ce65c", "5e16f10f2d667d17c029622b9278b6b0a206d394", "08d55271589f989d90a7edce3345f78f2468a7e0", "7c8e0f3053e09da6d8f9a1812591a35bccd5c669", "1a40092b493c6b8840257ab7f96051d1a4dbfeb2", "c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0", "e79bacc03152ea55343e6af97bcd17d8904cf5ef", "2945cc9e821ab87fa17afc8802f3858435d1264c", "bb0ecedde7d6e837dc9a5e115302a2aaad1035e1", "b2ae5c496fe01bb2e2dee107f75b82c6a2a23374", "1283398de84ec0178dc74d41a87febfbfbcbbb02", "9f65319b8a33c8ec11da2f034731d928bf92e29d", "e43a18384695ae0acc820171236a39811ec2cd58", "3cd5b1d71c1d6a50fcc986589f2d0026c68d9803", "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "0aeb5020003e0c89219031b51bd30ff1bceea363", "4d6ad0c7b3cf74adb0507dc886993e603c863e8c", "2770b095613d4395045942dc60e6c560e882f887", "d903292dc4e752f6a3bf2abe668d17a2575044d4", "c9832564d5dc601113b4d80e5a05ede6fee9f7dd", "82b43bc9213230af9db17322301cbdf81e2ce8cc", "110359824a0e3b6480102b108372793265a24a86", "17479e015a2dcf15d40190e06419a135b66da4e0", "03ac1c694bc84a27621da6bfe73ea9f7210c6d45", "9ce97efc1d520dadaa0d114192ca789f23442727", "dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a", "aed6af12148b43e4a24ee6e2bc3604ca59bd99a5", "59a6c9333c941faf2540979dcfcb5d503a49b91e", "6479b61ea89e9d474ffdefa71f068fbcde22cc44", "14ce7635ff18318e7094417d0f92acbec6669f1c", "84574aa43a98ad8a29470977e7b091f5a5ec2366", "46e72046a9bb2d4982d60bcf5c63dbc622717f0f", "ec00ecb64fa206cea8b2e716955a738a96424084", "cd55fb30737625e86454a2861302b96833ed549d", "59a35b63cf845ebf0ba31c290423e24eb822d245", "77c5437107f8138d48cb7e10b2b286fa51473678", "ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee", "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "eed93d2e16b55142b3260d268c9e72099c53d5bc", "d9c0310203179d5328c4f1475fa4d68c5f0c7324", "c10b0a6ba98aa95d740a0d60e150ffd77c7895ad", "a955033ca6716bf9957b362b77092592461664b4", "0486eb243d167ab4b197b682e9eff9684b273df4", "b5fdd7778503f27c9d9bf77fab193b475fab6076", "08903bf161a1e8dec29250a752ce9e2a508a711c", "b8f64a94f536b46ef34a0223272e02f9be785ef9", "316d51aaa37891d730ffded7b9d42946abea837f", "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "7a65fc9e78eff3ab6062707deaadde024d2fad40", "5da740682f080a70a30dc46b0fc66616884463ec", "4688787d064e59023a304f7c9af950d192ddd33e", "3ac3a714042d3ebc159546c26321a1f8f4f5f80c", "f997a71f1e54d044184240b38d9dc680b3bbbbc0", "676f9eabf4cfc1fd625228c83ff72f6499c67926", "bb4be8e24d7b8ed56d81edec435b7b59bad96214", "cef73d305e5368ee269baff53ec20ea3ae7cdd82", "026e96c3c4751e1583bfe78b8c28bdfe854c4988", "b81cae2927598253da37954fb36a2549c5405cdb", "f28b7d62208fdaaa658716403106a2b0b527e763", "19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9", "97865d31b5e771cf4162bc9eae7de6991ceb8bbf", "44b827df6c433ca49bcf44f9f3ebfdc0774ee952", "e378ce25579f3676ca50c8f6454e92a886b9e4d7", "4377b03bbee1f2cf99950019a8d4111f8de9c34a", "32c9ebd2685f522821eddfc19c7c91fd6b3caf22", "aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52", "9e42d44c07fbd800f830b4e83d81bdb9d106ed6b", "cbd004d4c5e3b64321dc1a8f05fa5d64500389c2", "76cd5e43df44e389483f23cb578a9015d1483d70", "341002fac5ae6c193b78018a164d3c7295a495e4", "c68ec931585847b37cde9f910f40b2091a662e83", "89d3a57f663976a9ac5e9cdad01267c1fc1a7e06", "601655a17ca199ef674079482c9b37cdf8e094a9", "bec0c33d330385d73a5b6a05ad642d6954a6d632", "9d941a99e6578b41e4e32d57ece580c10d578b22", "6f6ce988a13ac08071a0e3349f80b7c8adc7a49d", "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "26fcefb80af66391e07e6239933de943c1cddc6e", "54204e28af73c7aca073835a14afcc5d8f52a515", "88bee9733e96958444dc9e6bef191baba4fa6efa", "2c1ffb0feea5f707c890347d2c2882be0494a67a", "57fd229097e4822292d19329a17ceb013b2cb648", "e4aeaf1af68a40907fda752559e45dc7afc2de67", "621e8882c41cdaf03a2c4a986a6404f0272ba511", "7ebb153704706e457ab57b432793d2b6e5d12592", "334d6c71b6bce8dfbd376c4203004bd4464c2099", "1fbde67e87890e5d45864e66edb86136fbdbe20e", "6a2b83c4ae18651f1a3496e48a35b0cd7a2196df", "a1d86c898da3aea54deafd60864aa05dff8a4c9c", "441bf5f7fe7d1a3939d8b200eca9b4bb619449a9", "49df381ea2a1e7f4059346311f1f9f45dd997164", "6fa3857faba887ed048a9e355b3b8642c6aab1d8", "8c66378df977606d332fc3b0047989e890a6ac76", "f56c407f918cf89ffa2ec3c51c383d53510c10e1", "9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73", "a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990", "f3ca2c43e8773b7062a8606286529c5bc9b3ce25", "950bf95da60fd4e77d5159254fed906d5ed5fbcb", "31835472821c7e3090abb42e57c38f7043dc3636", "945ef646679b6c575d3bbef9c6fc0a9629ac1b62", "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "0343f9401b98de36be957a30209fef45dd684270", "25d514d26ecbc147becf4117512523412e1f060b", "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "2c0acaec54ab2585ff807e18b6b9550c44651eab", "b7ec41005ce4384e76e3be854ecccd564d2f89fb", "8882d39edae556a351b6445e7324ec2c473cadb1", "6080f26675e44f692dd722b61905af71c5260af8", "fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139", "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "34c1e9a6166f4732d1738db803467f7abc47ba87", "f27fd2a1bc229c773238f1912db94991b8bf389a", "07f31bef7a7035792e3791473b3c58d03928abbf", "0e21c9e5755c3dab6d8079d738d1188b03128a31", "c5c53d42e551f3c8f6ca2c13335af80a882009fa", "c675534be881e59a78a5986b8fb4e649ddd2abbe", "cc91001f9d299ad70deb6453d55b2c0b967f8c0d", "4896909796f9bd2f70a2cb24bf18daacd6a12128", "a98316980b126f90514f33214dde51813693fe0d", "4268ae436db79c4eee8bc06e9475caff3ff70d57", "0595d18e8d8c9fb7689f636341d8a55cc15b3e6a", "48906f609446afcdaacbe1d65770d7a6165a8eee", "3bd10f7603c4f5a4737c5613722124787d0dd818", "1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0", "809e5884cf26b71dc7abc56ac0bad40fb29c671c", "17e563af203d469c456bb975f3f88a741e43fb71", "a52a69bf304d49fba6eac6a73c5169834c77042d", "de0df8b2b4755da9f70cf1613d7b12040d0ce8ef", "5b01d4338734aefb16ee82c4c59763d3abc008e6", "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "63cf5fc2ee05eb9c6613043f585dba48c5561192", "87f285782d755eb85d8922840e67ed9602cfd6b9", "988d1295ec32ce41d06e7cf928f14a3ee079a11e", "d44a93027208816b9e871101693b05adab576d89", "2ad7cef781f98fd66101fa4a78e012369d064830", "459e840ec58ef5ffcee60f49a94424eb503e8982", "9ea73660fccc4da51c7bc6eb6eedabcce7b5cead", "0d3b167b52e9f0bf509e3af003ea320e6070b665", "841855205818d3a6d6f85ec17a22515f4f062882", "1773d65c1dc566fd6128db65e907ac91b4583bed", "100105d6c97b23059f7aa70589ead2f61969fbc3", "29b86534d4b334b670914038c801987e18eb5532", "021e008282714eaefc0796303f521c9e4f199d7e", "03f7041515d8a6dcb9170763d4f6debd50202c2b", "ff01bc3f49130d436fca24b987b7e3beedfa404d", "f03a82fd4a039c1b94a0e8719284a777f776fb22", "1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d", "e9a5a38e7da3f0aa5d21499149536199f2e0e1f7", "4e061a302816f5890a621eb278c6efa6e37d7e2f", "417c2fa930bb7078fdf10cb85c503bd5270b9dc2", "70341f61dfe2b92d8607814b52dfd0863a94310e", "ac9a331327cceda4e23f9873f387c9fd161fad76", "1176c886afbd8685ecf0094450a02eb96b950f71", "bcc172a1051be261afacdd5313619881cbe0f676", "d6bdc70d259b38bbeb3a78db064232b4b4acc88f", "486840f4f524e97f692a7f6b42cd19019ee71533", "53fdcc3a5a7e42590c21bbb4fe90d7f353ca21e5", "47dabb566f2bdd6b3e4fa7efc941824d8b923a13", "2042f1cacea262ec924f74994e49d5e87d9d0445", "ac8441e30833a8e2a96a57c5e6fede5df81794af", "06c956d4aac65752672ce4bd5a379f10a7fd6148", "052f994898c79529955917f3dfc5181586282cf8", "d00c335fbb542bc628642c1db36791eae24e02b7", "4cdb6144d56098b819076a8572a664a2c2d27f72", "dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935", "e9b6804cd56cadb9342ec2ce412aacba7afd0723", "b971266b29fcecf1d5efe1c4dcdc2355cb188ab0", "4349f17ec319ac8b25c14c2ec8c35f374b958066", "58542eeef9317ffab9b155579256d11efb4610f2", "2983cf95743be82671a71528004036bd19172712", "22043cbd2b70cb8195d8d0500460ddc00ddb1a62", "d9e66b877b277d73f8876f537206395e71f58269", "e99718d08aca2c49cd2848eebdbb7c7855b4e484", "aa0c30bd923774add6e2f27ac74acd197b9110f2", "80be8624771104ff4838dcba9629bacfe6b3ea09", "8d3e95c31c93548b8c71dbeee2e9f7180067a888", "60643bdab1c6261576e6610ea64ea0c0b200a28d", "2e86402b354516d0a8392f75430156d629ca6281", "2a612a7037646276ff98141d3e7abbc9c91fccb8", "2e1415a814ae9abace5550e4893e13bd988c7ba1", "93eb3963bc20e28af26c53ef3bce1e76b15e3209", "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "0aebe97a92f590bdf21cdadfddec8061c682cdb2", "6d70344ae6f6108144a15e9debc7b0be4e3335f1", "78174c2be084e67f48f3e8ea5cb6c9968615a42c", "ab734bac3994b00bf97ce22b9abc881ee8c12918", "df577a89830be69c1bfb196e925df3055cafc0ed", "a3d8b5622c4b9af1f753aade57e4774730787a00", "bb4f83458976755e9310b241a689c8d21b481238", "973022a1f9e30a624f5e8f7158b5bbb114f4af32", "787c1bb6d1f2341c5909a0d6d7314bced96f4681", "e00d391d7943561f5c7b772ab68e2bb6a85e64c4", "432d8cba544bf7b09b0455561fea098177a85db1", "7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c", "71e56f2aebeb3c4bb3687b104815e09bb4364102", "f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3", "d6791b98353aa113d79f6fb96335aa6c7ea3b759", "6f22628d34a486d73c6b46eb071200a00e3abae3", "73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2", "5b5b568a0ba63d00e16a263051c73e09ab83e245", "fdaf65b314faee97220162980e76dbc8f32db9d6", "77362789d04db4c51be61eaffa4f43e03759e677", "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "5d01283474b73a46d80745ad0cc0c4da14aae194", "162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e", "b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e", "f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd", "76ce3d35d9370f0e2e27cfd29ea0941f1462895f", "47506951d2dc7c4bb4d2d33dd25b67a767e56680", "2ff9ffedfc59422a8c7dac418a02d1415eec92f1", "9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682", "39d6f8b791995dc5989f817373391189d7ac478a", "ad6745dd793073f81abd1f3246ba4102046da022", "ce6d60b69eb95477596535227958109e07c61e1e", "24cb375a998f4af278998f8dee1d33603057e525", "bb557f4af797cae9205d5c159f1e2fdfe2d8b096", "c903af0d69edacf8d1bff3bfd85b9470f6c4c243", "3774ffc9523b8f4a148d5e93eaae317dc18af3e6", "be393cd567b338da6ed60181c8ad429627578a31", "39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc", "11a47a91471f40af5cf00449954474fd6e9f7694", "b6a01cd4572b5f2f3a82732ef07d7296ab0161d3", "0081e2188c8f34fcea3e23c49fb3e17883b33551", "b85580ff2d8d8be0a2c40863f04269df4cd766d9", "19808134b780b342e21f54b60095b181dfc7a600", "cbca355c5467f501d37b919d8b2a17dcb39d3ef9", "56dca23481de9119aa21f9044efd7db09f618704", "08c76a4cc6f402c37a050cae5390427a5b66a467", "13fd0a4d06f30a665fc0f6938cea6572f3b496f7", "a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df", "a13a27e65c88b6cb4a414fd4f6bca780751a59db", "4fac09969ee80d485876e3198c7177181c600a4a", "7831ab4f8c622d91974579c1ff749dadc170c73c", "39d0de660e2116f32088ce07c3376759d0fdaff5", "ce6f459462ea9419ca5adcc549d1d10e616c0213", "174f46eccb5852c1f979d8c386e3805f7942bace", "d3d5d86afec84c0713ec868cf5ed41661fc96edc", "b8dba0504d6b4b557d51a6cf4de5507141db60cf", "a1081cb856faae25df14e25045cd682db8028141", "b32631f456397462b3530757f3a73a2ccc362342", "ec90738b6de83748957ff7c8aeb3150b4c9b68bb", "993d189548e8702b1cb0b02603ef02656802c92b", "098363b29eef1471c494382338687f2fe98f6e15", "6f22324fab61fbc5df1aac2c0c9c497e0a7db608", "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "9be653e1bc15ef487d7f93aad02f3c9552f3ee4a", "8bebb26880274bdb840ebcca530caf26c393bf45", "970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3", "c7c03324833ba262eeaada0349afa1b5990c1ea7", "534159e498e9cc61ea10917347637a59af38142d", "feea73095b1be0cbae1ad7af8ba2c4fb6f316d35", "c05ae45c262b270df1e99a32efa35036aae8d950", "210b98394c3be96e7fd75d3eb11a391da1b3a6ca", "f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7", "302c9c105d49c1348b8f1d8cc47bead70e2acf08", "6be0ab66c31023762e26d309a4a9d0096f72a7f0", "b8fc620a1563511744f1a9386bdfa09a2ea0f71b", "2050847bc7a1a0453891f03aeeb4643e360fde7d", "d3edbfe18610ce63f83db83f7fbc7634dde1eb40", "3b7f6035a113b560760c5e8000540fc46f91fed5", "2609079d682998da2bc4315b55a29bafe4df414e", "2d1f86e2c7ba81392c8914edbc079ac64d29b666", "39d6339a39151b5f88ec2d7acc38fe0618d71b5f", "406c5aeca71011fd8f8bd233744a81b53ccf635a", "3ce37af3ac0ed2eba08267a3605730b2e0433da5", "2be24e8a3f2b89bdaccd02521eff3b7bb917003e", "ed184fda0306079f2ee55a1ae60fbf675c8e11c6", "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "298c2be98370de8af538c06c957ce35d00e93af8", "e96cef8732f3021080c362126518455562606f2d", "9f2984081ef88c20d43b29788fdf732ceabd5d6a", "04bb3fa0824d255b01e9db4946ead9f856cc0b59", "2a88541448be2eb1b953ac2c0c54da240b47dd8a", "436d80cc1b52365ed7b2477c0b385b6fbbb51d3b", "e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69", "4d9c02567e7b9e065108eb83ea3f03fcff880462", "80d4cf7747abfae96328183dd1f84133023c2668", "2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44", "13be4f13dac6c9a93f969f823c4b8c88f607a8c4", "120bcc9879d953de7b2ecfbcd301f72f3a96fb87", "c1298120e9ab0d3764512cbd38b47cd3ff69327b", "245d98726674297208e76308c3a11ce3fc43bee2", "0fdc3cbf92027cb1200f3f94927bef017d7325ae", "fab60b3db164327be8588bce6ce5e45d5b882db6", "2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78", "c6382de52636705be5898017f2f8ed7c70d7ae96", "45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73", "9fab78015e6e91ba7241a923222acd6c576c6e27", "fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac", "d4c7d1a7a03adb2338704d2be7467495f2eb6c7b", "49e4f05fa98f63510de76e7abd8856ff8db0f38d", "0cb2dd5f178e3a297a0c33068961018659d0f443", "0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23", "3393459600368be2c4c9878a3f65a57dcc0c2cfa", "ebeb0546efeab2be404c41a94f586c9107952bc3", "48499deeaa1e31ac22c901d115b8b9867f89f952", "2348f1fa2940b01ec90e023fac8cc96812189774", "33ef419dffef85443ec9fe89a93f928bafdc922e", "70f189798c8b9f2b31c8b5566a5cf3107050b349", "cdcfc75f54405c77478ab776eb407c598075d9f8", "834f5ab0cb374b13a6e19198d550e7a32901a4b2", "a92147bed9c17c311c6081beb0ef4c3165b6268e", "e13360cda1ebd6fa5c3f3386c0862f292e4dbee4", "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "e4c2f8e4aace8cb851cb74478a63d9111ca550ae", "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "36bb5cca0f6a75be8e66f58cba214b90982ee52f", "e7b2b0538731adaacb2255235e0a07d5ccf09189", "fd15e397629e0241642329fc8ee0b8cd6c6ac807", "9963c73b03e4649959f021ef6f4fb1eac0b617d2", "5779e3e439c90d43648db107e848aeb954d3e347", "870433ba89d8cab1656e57ac78f1c26f4998edfb", "c220f457ad0b28886f8b3ef41f012dd0236cd91a", "026b5b8062e5a8d86c541cfa976f8eee97b30ab8", "e4a1b46b5c639d433d21b34b788df8d81b518729", "2af2b74c3462ccff3a6881ff7cf4f321b3242fa9", "8b19efa16a9e73125ab973429eb769d0ad5a8208", "831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9", "16b9d258547f1eccdb32111c9f45e2e4bbee79af", "628a3f027b7646f398c68a680add48c7969ab1d9", "8da32ff9e3759dc236878ac240728b344555e4e9", "6742c0a26315d7354ab6b1fa62a5fffaea06da14", "014e3d0fa5248e6f4634dc237e2398160294edce", "7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9", "5480aee1d01700bb98f5a0e06dd15bf36a4e45ea", "41cfc9edbf36754746991c2a1e9a47c0d129d105", "877100f430b72c5d60de199603ab5c65f611ce17", "275b5091c50509cc8861e792e084ce07aa906549", "51eba481dac6b229a7490f650dff7b17ce05df73", "00d9d88bb1bdca35663946a76d807fff3dc1c15f", "cd4941cbef1e27d7afdc41b48c1aff5338aacf06", "b3ba7ab6de023a0d58c741d6abfa3eae67227caf", "6e60536c847ac25dba4c1c071e0355e5537fe061", "58628e64e61bd2776a2a7258012eabe3c79ca90c", "747d5fe667519acea1bee3df5cf94d9d6f874f20", "3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4", "1b3587363d37dd197b6adbcfa79d49b5486f27d8", "5fa1724a79a9f7090c54925f6ac52f1697d6b570", "7d2556d674ad119cf39df1f65aedbe7493970256", "02239ae5e922075a354169f75f684cad8fdfd5ab", "2d8d089d368f2982748fde93a959cf5944873673", "22648dcd3100432fe0cc71e09de5ee855c61f12b", "51faacfa4fb1e6aa252c6970e85ff35c5719f4ff", "55ea0c775b25d9d04b5886e322db852e86a556cd", "3240c9359061edf7a06bfeb7cc20c103a65904c2", "23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f", "bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb", "60542b1a857024c79db8b5b03db6e79f74ec8f9f", "aa3c9de34ef140ec812be85bb8844922c35eba47", "8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c", "e480f8c00dfe217653c2569d0eec6e2ffa836d59", "ee463f1f72a7e007bae274d2d42cd2e5d817e751", "d5de42d37ee84c86b8f9a054f90ddb4566990ec0", "b2c60061ad32e28eb1e20aff42e062c9160786be", "4641986af5fc8836b2c883ea1a65278d58fe4577", "c4934d9f9c41dbc46f4173aad2775432fe02e0e6", "fa90b825346a51562d42f6b59a343b98ea2e501a", "daefac0610fdeff415c2a3f49b47968d84692e87", "cb84229e005645e8623a866d3d7956c197f85e11", "f08e425c2fce277aedb51d93757839900d591008", "66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5", "0c2875bb47db3698dbbb3304aca47066978897a4", "b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000", "8f3da45ff0c3e1777c3a7830f79c10f5896bcc21", "1a1118cd4339553ad0544a0a131512aee50cf7de", "7697295ee6fc817296bed816ac5cae97644c2d5b", "efa08283656714911acff2d5022f26904e451113", "936227f7483938097cc1cdd3032016df54dbd5b6", "69a9cf9bc8e585782824666fa3fb5ce5cf07cef2", "5a5f9e0ed220ce51b80cd7b7ede22e473a62062c", "fdfd57d4721174eba288e501c0c120ad076cdca8", "a8d52265649c16f95af71d6f548c15afc85ac905", "670637d0303a863c1548d5b19f705860a23e285c", "d46e793b945c4f391031656357625e902c4405e8", "ddf577e8b7c86b1122c1bc90cba79f641d2b33fa", "0b3a146c474166bba71e645452b3a8276ac05998", "ec12f805a48004a90e0057c7b844d8119cb21b4a", "f6e00d6430cbbaa64789d826d093f7f3e323b082", "36df81e82ea5c1e5edac40b60b374979a43668a5", "43eb03f95adc0df61af2c3b12a913c725b08d4f5", "134aad8153ab78345b2581efac2fe175a3084154", "33030c23f6e25e30b140615bb190d5e1632c3d3b", "370b5757a5379b15e30d619e4d3fb9e8e13f3256", "b5cd8151f9354ee38b73be1d1457d28e39d3c2c6", "143f7a51058b743a0d43026a523d9bbbc1ae43a8", "1b90507f02967ff143fce993a5abbfba173b1ed0", "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "86b69b3718b9350c9d2008880ce88cd035828432", "4f8345f31e38f65f1155569238d14bd8517606f4", "047f6afa87f48de7e32e14229844d1587185ce45", "352110778d2cc2e7110f0bf773398812fd905eb1", "31aa7c992692b74f17ddec665cd862faaeafd673", "9d42df42132c3d76e3447ea61e900d3a6271f5fe", "aa7c72f874951ff7ca3769439f2f39b7cfd4b202", "60cdcf75e97e88638ec973f468598ae7f75c59b4", "06d93a40365da90f30a624f15bf22a90d9cfe6bb", "8697ccb156982d40e88fda7fbf4297fa5171f24d", "0e7f277538142fb50ce2dd9179cffdc36b794054", "728b1b2a86a7ffda402e7ec1a97cd1988dcde868", "8384e104796488fa2667c355dd15b65d6d5ff957", "ee6b503ab512a293e3088fdd7a1c893a77902acb", "16fadde3e68bba301f9829b3f99157191106bd0f", "c81ee278d27423fd16c1a114dcae486687ee27ff", "dac2103843adc40191e48ee7f35b6d86a02ef019", "65475ce4430fb524675ebab6bcb570dfa07e0041", "c65a394118d34beda5dd01ae0df163c3db88fceb", "4cb0e0c0e9b92e457f2c546dc25b9a4ff87ff819", "4511e09ee26044cb46073a8c2f6e1e0fbabe33e8", "03b99f5abe0e977ff4c902412c5cb832977cf18e", "be86d88ecb4192eaf512f29c461e684eb6c35257", "1287bfe73e381cc8042ac0cc27868ae086e1ce3b", "23fdbef123bcda0f07d940c72f3b15704fd49a98", "8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b", "0af33f6b5fcbc5e718f24591b030250c6eec027a", "1943c6bf8df8a64bd539a5cd6d4e68785eb590c2", "519a724426b5d9ad384d38aaf2a4632d3824f243", "539ca9db570b5e43be0576bb250e1ba7a727d640", "322488c4000c686e9bfb7514ccdeacae33e53358", "663efaa0671eace1100fdbdecacd94216a17b1db", "3d1f976db6495e2bb654115b939b863d13dd3d05", "4bb03b27bc625e53d8d444c0ba3ee235d2f17e86", "1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca", "7bbaa09c9e318da4370a83b126bcdb214e7f8428", "36fc4120fc0638b97c23f97b53e2184107c52233", "103c8eaca2a2176babab2cc6e9b25d48870d6928", "178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4", "50eb75dfece76ed9119ec543e04386dfc95dfd13", "7c45339253841b6f0efb28c75f2c898c79dfd038", "5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6", "3abc833f4d689f37cc8a28f47fb42e32deaa4b17", "bc2852fa0a002e683aad3fb0db5523d1190d0ca5", "9d757c0fede931b1c6ac344f67767533043cba14", "3918b425bb9259ddff9eca33e5d47bde46bd40aa", "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "3fa628e7cff0b1dad3f15de98f99b0fdb09df834", "1921e0a97904bdf61e17a165ab159443414308ed", "4f8b4784d0fca31840307650f7052b0dde736a76", "76fd801981fd69ff1b18319c450cb80c4bc78959", "6c01b349edb2d33530e8bb07ba338f009663a9dd", "004d5491f673cd76150f43b0a0429214f5bfd823", "7ae0212d6bf8a067b468f2a78054c64ea6a577ce", "d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55", "3a0a839012575ba455f2b84c2d043a35133285f9", "fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d", "5b5962bdb75c72848c1fb4b34c113ff6101b5a87", "c4d439fe07a65b735d0c8604bd5fdaea13f6b072", "3d0379688518cc0e8f896e30815d0b5e8452d4cd", "bb070c019c0885232f114c7dca970d2afd9cd828", "190d8bd39c50b37b27b17ac1213e6dde105b21b8", "17738b0972571e7b4ae471d1b2dccea5ce057511", "68604e7e1b01cdbd3c23832976d66f1a86edaa8f", "df7ff512e8324894d20103fd8ab5da650e4d86db", "0e7c70321462694757511a1776f53d629a1b38f3", "7c36afc9828379de97f226e131390af719dbc18d", "3a804cbf004f6d4e0b041873290ac8e07082b61f", "cfffae38fe34e29d47e6deccfd259788176dc213", "0754e769eb613fd3968b6e267a301728f52358be", "413a184b584dc2b669fbe731ace1e48b22945443", "c10a15e52c85654db9c9343ae1dd892a2ac4a279", "55e18e0dde592258882134d2dceeb86122b366ab", "54756f824befa3f0c2af404db0122f5b5bbf16e0", "24f1febcdf56cd74cb19d08010b6eb5e7c81c362", "66029f1be1a5cee9a4e3e24ed8fcb65d5d293720", "28f311b16e4fe4cc0ff6560aae3bbd0cb6782966", "3d6943f1573f992d6897489b73ec46df983d776c", "8ed33184fccde677ec8413ae06f28ea9f2ca70f3", "b4f4b0d39fd10baec34d3412d53515f1a4605222", "676a136f5978783f75b5edbb38e8bb588e8efbbe", "235d5620d05bb7710f5c4fa6fceead0eb670dec5", "7e48711c627edf90e9b232f2cbc0e3576c8f2f2a", "cd6c2ae00157e3fb6ab56379843280eb4cbb01b4", "b6ac33d2c470077fa8dcbfe9b113beccfbd739f8", "a51d5c2f8db48a42446cc4f1718c75ac9303cb7a", "51e87b14f39f44a9f2866d5cc6440e7496ed1298", "2db05ef11041447dbc735362db68b04e562c1e35", "0e986f51fe45b00633de9fd0c94d082d2be51406", "6eddea1d991e81c1c3024a6cea422bc59b10a1dc", "cc05f758ccdf57d77b06b96b9d601bf2795a6cc4", "32dfd4545c87d9820cc92ca912c7d490794a81d6", "2f5e057e35a97278a9d824545d7196c301072ebf", "9776a9f3c59907f45baaeda4b8907dcdac98aef1", "d9b4b49378fcd77dcd5e755975b99ed4c7962f17", "e5737ffc4e74374b0c799b65afdbf0304ff344cb", "b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7", "7177649ece5506b315cb73c36098baac1681b8d2", "61971f8e6fff5b35faed610d02ad14ccfc186c70", "2902f62457fdf7e8e8ee77a9155474107a2f423e", "a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a", "6cfc337069868568148f65732c52cbcef963f79d", "ddb1a392582c624c9116cb00eac01aba220fad84", "1fef53b07c6c625545fc071c7386d41f87925675", "4c6233765b5f83333f6c675d3389bbbf503805e3", "c84233f854bbed17c22ba0df6048cbb1dd4d3248", "66330846a03dcc10f36b6db9adf3b4d32e7a3127", "c88ce5ef33d5e544224ab50162d9883ff6429aa3", "f33bd953d2df0a5305fc8a93a37ff754459a906c", "e9d43231a403b4409633594fa6ccc518f035a135", "1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177", "6afccf6c6cebfaa0579a23e7cc7737837b090f0f", "b558be7e182809f5404ea0fcf8a1d1d9498dc01a", "a735c6330430c0ff0752d117c54281b1396b16bf", "0a11b82aa207d43d1b4c0452007e9388a786be12", "6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0", "64ec02e1056de4b400f9547ce56e69ba8393e2ca", "e3b324101157daede3b4d16bdc9c2388e849c7d4", "9888ce5cb5cae8ba4f288806d126b1114e0a7f9b", "0b835284b8f1f45f87b0ce004a4ad2aca1d9e153", "0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3", "cc7e66f2ba9ac0c639c80c65534ce6031997acd7", "eb48170a6e1e020f002a6a0a808c1934d5c760b8", "a4898f55f12e6393b1c078803909ea715bf71730", "f345a05353f5784b64eefb7785661cc0be519521", "047d7cf4301cae3d318468fe03a1c4ce43b086ed", "e957d0673af7454dbf0a14813201b0e2570577e9", "d6639263381c929ebc579a541045a85aa21680f8", "89497854eada7e32f06aa8f3c0ceedc0e91ecfef", "b1a3b19700b8738b4510eecf78a35ff38406df22", "10d334a98c1e2a9e96c6c3713aadd42a557abb8b", "050a3346e44ca720a54afbf57d56b1ee45ffbe49", "4e27fec1703408d524d6b7ed805cdb6cba6ca132", "1255afbf86423c171349e874b3ac297de19f00cd", "1564bf0a268662df752b68bee5addc4b08868739", "193bc8b663d041bc34134a8407adc3e546daa9cc", "4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af", "e577484e5c3ecc6f073faf124468c8ae2f827a0f", "36b9f46c12240898bafa10b0026a3fb5239f72f3", "4aa27c1f8118dbb39809a0f79a28c0cbc3ede276", "2d072cd43de8d17ce3198fae4469c498f97c6277", "82cd5a5fec8a27887a35f1ecec684ec55eefad73", "b239a756f22201c2780e46754d06a82f108c1d03", "8be60114634caa0eff8566f3252cb9a1b7d5ef10", "43776d1bfa531e66d5e9826ff5529345b792def7", "7384c39a2d084c93566b98bc4d81532b5ad55892", "3f9a7d690db82cf5c3940fbb06b827ced59ec01e", "6da711d07b63c9f24d143ca3991070736baeb412", "2ce84465b9759166effc7302c2f5339766cc523d", "113b06e70b7eead8ae7450bafe9c91656705024c", "cfba667644508853844c45bfe5d0b8a2ffb756d3", "261a80216dda39b127d2b7497c068ec7e0fdf183", "b51e3d59d1bcbc023f39cec233f38510819a2cf9", "83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e", "27846b464369095f4909f093d11ed481277c8bba", "ff82825a04a654ca70e6d460c8d88080ee4a7fcc", "68021c333559ab95ca10e0dbbcc8a4840c31e157", "9b1c218a55ead45296bfd7ad315aaeff1ae9983e", "f531ce18befc03489f647560ad3e5639566b39dc", "a2136b13aa0bb4ea4e7fa99a6c657b11dffff563", "4e6c17966efae956133bf8f22edeffc24a0470c1", "a85f691c9f82a248aa2c86d4a63b9036d6cf47ab", "ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf", "58d43e32660446669ff54f29658961fe8bb6cc72", "392425be1c9d9c2ee6da45de9df7bef0d278e85f", "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "1d729693a888a460ee855040f62bdde39ae273af", "a0021e3bbf942a88e13b67d83db7cf52e013abfd", "0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad", "a5ae44070857aa00e54ea80394a04fda412b335c", "4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a", "2aea27352406a2066ddae5fad6f3f13afdc90be9", "cbc2de9b919bc63590b6ee2dfd9dda134af45286", "42ded74d4858bea1070dadb08b037115d9d15db5", "3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548", "b0c512fcfb7bd6c500429cbda963e28850f2e948", "fd4ac1da699885f71970588f84316589b7d8317b", "de162d4b8450bf2b80f672478f987f304b7e6ae4", "e03f69bad7e6537794a50a99da807c9df4ff5186", "980266ad6807531fea94252e8f2b771c20e173b3", "2c61a9e26557dd0fe824909adeadf22a6a0d86b0", "6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7", "b171f9e4245b52ff96790cf4f8d23e822c260780", "72ecaff8b57023f9fbf8b5b2588f3c7019010ca7", "c222f8079c246ead285894c47bdbb2dfc7741044", "113c22eed8383c74fe6b218743395532e2897e71", "383e64d9ef1fca9de677ac82486b4df42e96e861", "46a4551a6d53a3cd10474ef3945f546f45ef76ee", "283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43", "e295c1aa47422eb35123053038e62e9aa50a2e3a", "11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0", "3b64b8be33887e77e6def4c385985e43e2c15eea", "dd2f6a1ba3650075245a422319d86002e1e87808", "bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3", "e3a6e9ddbbfc4c5160082338d46808cea839848a", "3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd", "0974677f59e78649a40f0a1d85735410d21b906a", "c98b13871a3bc767df0bdd51ff00c5254ede8b22", "aa331fe378056b6d6031bb8fe6676e035ed60d6d", "b8a829b30381106b806066d40dd372045d49178d", "2dbde64ca75e7986a0fa6181b6940263bcd70684", "b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4", "93d903d2e48d6a8ad3e3d2aff2e57622efe649cd", "dc0341e5392c853f11283e99a7dc5c51be730aca", "4bd3de97b256b96556d19a5db71dda519934fd53", "174930cac7174257515a189cd3ecfdd80ee7dd54", "0387b32d0ebd034dc778972367e7d4194223785d", "a6e75b4ccc793a58ef0f6dbe990633f7658c7241", "ad37d01c4787d169daff7da52e80e2018aab6358", "7a81967598c2c0b3b3771c1af943efb1defd4482", "0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7", "62f0d8446adee6a5e8102053a63a61af07ac4098", "cbe1df2213a88eafc5dcaf55264f2523fe3ec981", "31d51e48dbd9e7253eafe0719f3788adb564a971", "31ba9d0bfaa2a44bae039e5625eb580afd962892", "d06c8e3c266fbae4026d122ec9bd6c911fcdf51d", "230c4a30f439700355b268e5f57d15851bcbf41f", "4b02387c2db968a70b69d98da3c443f139099e91", "37619564574856c6184005830deda4310d3ca580", "b3c60b642a1c64699ed069e3740a0edeabf1922c", "506c2fbfa9d16037d50d650547ad3366bb1e1cde", "1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f", "07a472ea4b5a28b93678a2dcf89028b086e481a2", "83fd5c23204147844a0528c21e645b757edd7af9", "85674b1b6007634f362cbe9b921912b697c0a32c", "046770df59c49c7ca9a1a4c268176ede2aa89e37", "6813208b94ffa1052760d318169307d1d1c2438e", "40f127fa4459a69a9a21884ee93d286e99b54c5f", "365f67fe670bf55dc9ccdcd6888115264b2a2c56", "3251f40ed1113d592c61d2017e67beca66e678bb", "33f7e78950455c37236b31a6318194cfb2c302a4", "d2f2b10a8f29165d815e652f8d44955a12d057e6", "e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66", "4a1d640f5e25bb60bb2347d36009718249ce9230", "5cbe1445d683d605b31377881ac8540e1d17adf0", "0a6a818b634cca4eb75a37bfd23b5c5c21331b12", "1e5a1619fe5586e5ded2c7a845e73f22960bbf5a", "2f489bd9bfb61a7d7165a2f05c03377a00072477", "f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8", "90b11e095c807a23f517d94523a4da6ae6b12c76", "d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f", "529baf1a79cca813f8c9966ceaa9b3e42748c058", "2ea247029ac1b8ded60023a369e8d259a8637bd2", "02820c1491b10a1ff486fed32c269e4077c36551", "213a579af9e4f57f071b884aa872651372b661fd", "2cb5db4df50921d276ad9e7186119a276324e465", "7c7ab59a82b766929defd7146fd039b89d67e984", "2969f822b118637af29d8a3a0811ede2751897b5", "aa127e6b2dc0aaccfb85e93e8b557f83ebee816b", "632441c9324cd29489cee3da773a9064a46ae26b", "2fa057a20a2b4a4f344988fee0a49fce85b0dc33", "293d69d042fe9bc4fea256c61915978ddaf7cc92", "7f2a4cd506fe84dee26c0fb41848cb219305173f", "4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8", "42ea8a96eea023361721f0ea34264d3d0fc49ebd", "632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c", "4c1ce6bced30f5114f135cacf1a37b69bb709ea1", "2f04ba0f74df046b0080ca78e56898bd4847898b", "e4abc40f79f86dbc06f5af1df314c67681dedc51", "59b83666c1031c3f509f063b9963c7ad9781ca23", "861c650f403834163a2c27467a50713ceca37a3e", "c12260540ec14910f5ec6e38d95bdb606826b32e", "5334ac0a6438483890d5eef64f6db93f44aacdf4", "13fd25a18ab3faebcd6a4ab95f4cc814fcda337a", "0f32df6ae76402b98b0823339bd115d33d3ec0a0", "9282239846d79a29392aa71fc24880651826af72", "250ebcd1a8da31f0071d07954eea4426bb80644c", "3a591a9b5c6d4c62963d7374d58c1ae79e3a4039", "ae89b7748d25878c4dc17bdaa39dd63e9d442a0d", "04f56dc5abee683b1e00cbb493d031d303c815fd", "fb0774049f2f34be194592822c74e2f2e603dea8", "a611c978e05d7feab01fb8a37737996ad6e88bd9", "d22b378fb4ef241d8d210202893518d08e0bb213", "8633732d9f787f8497c2696309c7d70176995c15", "18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae", "21bd60919e2e182a29af455353141ba4907b1b41", "7049187c5155d9652747413ce1ebc8dbb209fd69", "440b94b1624ca516b07e72ea8b3488072adc5e26", "7f415aee0137acab659c664eb1dff15f7b726bdd", "8ee62f7d59aa949b4a943453824e03f4ce19e500", "aff92784567095ee526a705e21be4f42226bbaab", "ca44a838da4187617dca9f6249d8c4b604661ec7", "bb83d5c7c17832d1eef14aa5d303d9dd65748956", "4932b929a2e09ddebedcb1abe8c62f269e7d4e33", "6489ad111fee8224b34f99d1bcfb5122786508cd", "7d73adcee255469aadc5e926066f71c93f51a1a5", "31ef5419e026ef57ff20de537d82fe3cfa9ee741", "7c95449a5712aac7e8c9a66d131f83a038bb7caa", "0ed96cc68b1b61e9eb4096f67d3dcab9169148b9", "239958d6778643101ab631ec354ea1bc4d33e7e0", "fe9d9c298d2e0c72408668fcff996e4bf58cc6c6", "cd444ee7f165032b97ee76b21b9ff58c10750570", "5e0e516226413ea1e973f1a24e2fdedde98e7ec0", "0b174d4a67805b8796bfe86cd69a967d357ba9b6", "e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8", "0b9db62b26b811e8c24eb9edc37901a4b79a897f", "eb9bcf9e3f8856c92e7720b63b7e846df37de0c3", "22137ce9c01a8fdebf92ef35407a5a5d18730dde", "4f051022de100241e5a4ba8a7514db9167eabf6e", "e82a0976db908e6f074b926f58223ac685533c65", "f94f366ce14555cf0d5d34248f9467c18241c3ee", "90eb66e75381cce7146b3953a2ae479a7beec539", "be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f", "0d746111135c2e7f91443869003d05cde3044beb", "6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c", "81e366ed1834a8d01c4457eccae4d57d169cb932", "982fcead58be419e4f34df6e806204674a4bc579", "ce5e50467e43e3178cbd86cfc3348e3f577c4489", "55c4efc082a8410b528af7325de8148b80cf41e3", "961a5d5750f18e91e28a767b3cb234a77aac8305", "53e081f5af505374c3b8491e9c4470fe77fe7934", "9a4c45e5c6e4f616771a7325629d167a38508691", "826015d9ade1637b3fcbeca071e3137d3ac1ef56", "de79437f74e8e3b266afc664decf4e6e4bdf34d7", "47eba2f95679e106e463e8296c1f61f6ddfe815b", "02431ed90700d5cfe4e3d3a20f1e97de3e131569", "1afdedba774f6689eb07e048056f7844c9083be9", "54e988bc0764073a5db2955705d4bfa8365b7fa9", "4eeccbbb98de4f2e992600482fd6b881ace014bb", "bcd162862b6d3a56b474039b2588a8f948d59fe0", "17a85799c59c13f07d4b4d7cf9d7c7986475d01c", "c94b3a05f6f41d015d524169972ae8fd52871b67", "e73f2839fc232c03e9f027c78bc419ee15810fe8", "7234468db46b37e2027ab2978c67b48b8581f796", "5aad56cfa2bac5d6635df4184047e809f8fecca2", "b53485dbdd2dc5e4f3c7cff26bd8707964bb0503", "53873fe7bbd5a2d171e2b1babc9cacaad6cabe45", "b36a80d15c3e48870ea6118b855055cc34307658", "142dcfc3c62b1f30a13f1f49c608be3e62033042", "32c20afb5c91ed7cdbafb76408c3a62b38dd9160", "a4725a5b43e7c36d9e30028dff66958f892254a0", "dad7b8be074d7ea6c3f970bd18884d496cbb0f91", "3803b91e784922a2dacd6a18f61b3100629df932", "fcceea054cb59f1409dda181198ed4070ed762c9", "7c1e1c767f7911a390d49bed4f73952df8445936", "44a3ec27f92c344a15deb8e5dc3a5b3797505c06", "2ff9618ea521df3c916abc88e7c85220d9f0ff06", "01e27c91c7cef926389f913d12410725e7dd35ab", "1565721ebdbd2518224f54388ed4f6b21ebd26f3", "562f7555e5cb79ce0fe834c4613264d8378dd007", "3f848d6424f3d666a1b6dd405a48a35a797dd147", "13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a", "22894c7a84984bd4822dcfe7c76a74673a242c36", "2ae139b247057c02cda352f6661f46f7feb38e45", "f4f9697f2519f1fe725ee7e3788119ed217dca34", "055cd8173536031e189628c879a2acad6cf2a5d0", "8a3c5507237957d013a0fe0f082cab7f757af6ee", "6a7e464464f70afea78552c8386f4d2763ea1d9c", "0f829fee12e86f980a581480a9e0cefccb59e2c5", "1723227710869a111079be7d61ae3df48604e653", "1fe59275142844ce3ade9e2aed900378dd025880", "69a9da55bd20ce4b83e1680fbc6be2c976067631", "43fb9efa79178cb6f481387b7c6e9b0ca3761da8", "614079f1a0d0938f9c30a1585f617fa278816d53", "dbab6ac1a9516c360cdbfd5f3239a351a64adde7", "d3367c9a4825295301225a05a190c0b7ed62736e", "1025c4922491745534d5d4e8c6e74ba2dc57b138", "1a6c9ef99bf0ab9835a91fe5f1760d98a0606243", "5b59e6b980d2447b2f3042bd811906694e4b0843", "c3ae4a4c9a9528791e36b64fea8d02b2fced7955", "cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3", "37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e", "0573f3d2754df3a717368a6cbcd940e105d67f0b", "68eb6e0e3660009e8a046bff15cef6fe87d46477", "642417f2bb1ff98989e0a0aa855253fed1fffe04", "132f88626f6760d769c95984212ed0915790b625", "c5d13e42071813a0a9dd809d54268712eba7883f", "5f1dcaff475ef18a2ecec0e114a9849a0a8002b9", "30c93fec078b98453a71f9f21fbc9512ab3e916f", "9abab00de61dd722b3ad1b8fa9bffd0001763f8b", "1aa766bbd49bac8484e2545c20788d0f86e73ec2", "795ea140df2c3d29753f40ccc4952ef24f46576c", "5be3cc1650c918da1c38690812f74573e66b1d32", "59bece468ed98397d54865715f40af30221aa08c", "524c25217a6f1ed17f47871e947a5581d775fa56", "a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892", "00a3cfe3ce35a7ffb8214f6db15366f4e79761e3", "7c953868cd51f596300c8231192d57c9c514ae17", "57dc55edade7074f0b32db02939c00f4da8fe3a6", "c2474202d56bb80663e7bece5924245978425fc1", "2866cbeb25551257683cf28f33d829932be651fe", "6226f2ea345f5f4716ac4ddca6715a47162d5b92", "e16f73f3a63c44cf285b8c1bc630eb8377b85b6d", "a00fdf49e5e0a73eb24345cb25a0bd1383a10021", "7e00fb79576fe213853aeea39a6bc51df9fdca16", "0d90c992dd08bfb06df50ab5c5c77ce83061e830", "912a6a97af390d009773452814a401e258b77640", "171ca25bc2cdfc79cad63933bcdd420d35a541ab", "7aafeb9aab48fb2c34bed4b86755ac71e3f00338", "14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d", "3acb6b3e3f09f528c88d5dd765fee6131de931ea", "b37f57edab685dba5c23de00e4fa032a3a6e8841", "14bca107bb25c4dce89210049bf39ecd55f18568", "5ae970294aaba5e0225122552c019eb56f20af74", "3f12701449a82a5e01845001afab3580b92da858", "6a931e7b7475635f089dd33e8d9a2899ae963804", "c30982d6d9bbe470a760c168002ed9d66e1718a2", "a255a54b8758050ea1632bf5a88a201cd72656e1", "4786638ffb3b2fb385cec80720cc6e7c3588b773", "7e0c75ce731131e613544e1a85ae0f2c28ee4c1f", "496074fcbeefd88664b7bd945012ca22615d812e", "3802c97f925cb03bac91d9db13d8b777dfd29dcc", "2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b", "6754c98ba73651f69525c770fb0705a1fae78eb5", "f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7", "5778d49c8d8d127351eee35047b8d0dc90defe85", "122f52fadd4854cf6c9287013520eced3c91e71a", "8d4f0517eae232913bf27f516101a75da3249d15", "0a23d374c6cf71a65e845569230420362fe4903a", "9d57c4036a0e5f1349cd11bc342ac515307b6720", "dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6", "9296f4ac0180e29226d6c016b5a4d5d2964eaaf6", "58bb77dff5f6ee0fb5ab7f5079a5e788276184cc", "1a140d9265df8cf50a3cd69074db7e20dc060d14", "346166da1a49e531923294300a731167e1436d5b", "cc31db984282bb70946f6881bab741aa841d3a7c", "fb9ad920809669c1b1455cc26dbd900d8e719e61", "f402e088dddfaad7667bd4def26092d05f247206", "6a38e4bb35673a73f041e34d3f2db7067482a9b5", "5c91fc106cfe9d57a9b149c1af29ca84d403fc7e", "556875fb04ed6043620d7ca04dfe3d8b3a9284f5", "87bee0e68dfc86b714f0107860d600fffdaf7996", "48729e4de8aa478ee5eeeb08a72a446b0f5367d5", "24959d1a9c9faf29238163b6bcaf523e2b05a053", "3619a9b46ad4779d0a63b20f7a6a8d3d49530339", "6c26744149ae08af8bc84137633495fa948b41ad", "aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a", "bd66dc891270d858de3adf97d42ed714860ae94d", "e287ff7997297ce1197359ed0fb2a0bd381638c9", "97e569159d5658760eb00ca9cb662e6882d2ab0e", "beae35eb5b2c7f63dfa9115f07b5ba0319709951", "285472527c5dc1c620d9644849e7519766c2d655", "fe961cbe4be0a35becd2d722f9f364ec3c26bd34", "25c108a56e4cb757b62911639a40e9caf07f1b4f", "ce56be1acffda599dec6cc2af2b35600488846c9", "92115b620c7f653c847f43b6c4ff0470c8e55dab", "774cbb45968607a027ae4729077734db000a1ec5", "79744fc71bea58d2e1918c9e254b10047472bd76", "47fdbd64edd7d348713253cf362a9c21f98e4296", "90c4f15f1203a3a8a5bf307f8641ba54172ead30", "414fdfe5f2e4f32a59bf15062b6e524cbf970637", "950171acb24bb24a871ba0d02d580c09829de372", "7643861bb492bf303b25d0306462f8fb7dc29878", "c32fb755856c21a238857b77d7548f18e05f482d", "661c78a0e2b63cbdb9c20dcf89854ba029b6bc87", "28f7d3d894705a92cac9b08d22701fadb6472676", "1a849b694f2d68c3536ed849ed78c82e979d64d5", "993374c1c9d58a3dec28160188ff6ac1227d02f5", "1862cb5728990f189fa91c67028f6d77b5ac94f6", "8fe5feeaa72eddc62e7e65665c98e5cb0acffa87", "7c7b0550ec41e97fcfc635feffe2e53624471c59", "77fbbf0c5729f97fcdbfdc507deee3d388cd4889", "15252b7af081761bb00535aac6bd1987391f9b79", "39c8b34c1b678235b60b648d0b11d241a34c8e32", "fefaa892f1f3ff78db4da55391f4a76d6536c49a", "26e570049aaedcfa420fc8c7b761bc70a195657c", "0d0b880e2b531c45ee8227166a489bf35a528cb9", "a775da3e6e6ea64bffab7f9baf665528644c7ed3", "d4c2d26523f577e2d72fc80109e2540c887255c8", "e896389891ba84af58a8c279cf8ab5de3e9320ee", "de398bd8b7b57a3362c0c677ba8bf9f1d8ade583", "5b6bed112e722c0629bcce778770d1b28e42fc96", "57ebeff9273dea933e2a75c306849baf43081a8c", "9c373438285101d47ab9332cdb0df6534e3b93d1", "a60907b7ee346b567972074e3e03c82f64d7ea30", "08f1e9e14775757298afd9039f46ec56e80677f9", "27c6cd568d0623d549439edc98f6b92528d39bfe", "55aafdef9d9798611ade1a387d1e4689f2975e51", "860588fafcc80c823e66429fadd7e816721da42a", "7dc498d45f9fcb97acee552c6f587b65d5122c35", "1fdeba9c4064b449231eac95e610f3288801fd3e", "434f1442533754b3098afd4e24abf1e3792b24db", "c872d6310f2079db0cee0e69cc96da1470055225", "74875368649f52f74bfc4355689b85a724c3db47", "24bf94f8090daf9bda56d54e42009067839b20df", "c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c", "ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b", "83011670e083dd52484578f8b6b3b4ccde3237ec", "501eda2d04b1db717b7834800d74dacb7df58f91", "6bacd4347f67ec60a69e24ed7cc0ac8073004e6f", "df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb", "b85d0aef3ee2883daca2835a469f5756917e76b7", "652ec3947d3d04dda719b1f5ba7c975e567166ef", "085ceda1c65caf11762b3452f87660703f914782", "729dbe38538fbf2664bc79847601f00593474b05", "0bf0029c9bdb0ac61fda35c075deb1086c116956", "1f8e44593eb335c2253d0f22f7f9dc1025af8c0d", "281486d172cf0c78d348ce7d977a82ff763efccd", "22ccd537857aca1ee4b961f081f07c58d42a7f32", "dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb", "4ae291b070ad7940b3c9d3cb10e8c05955c9e269", "aa5eeb1ab953411e915ea5e6298474dbebfa6fb6", "171d8a39b9e3d21231004f7008397d5056ff23af", "84dcf04802743d9907b5b3ae28b19cbbacd97981", "27812db1d2f68611cc284d65d11818082e572008", "d91fd82332a0db1bb4a8ac563f406098cfe9c4bb", "b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2", "1cb0c11620bde2734c1a428c789158ffff0d6c7b", "038ce930a02d38fb30d15aac654ec95640fe5cb0", "74325f3d9aea3a810fe4eab8863d1a48c099de11", "04616814f1aabe3799f8ab67101fbaf9fd115ae4", "b2a0e5873c1a8f9a53a199eecae4bdf505816ecb", "26ac607a101492bc86fd81a141311066cfe9e2b5", "e50ee29ca12028cb903cd498bb9cacd41bd5ce3a", "06d7ef72fae1be206070b9119fb6b61ce4699587", "006f283a50d325840433f4cf6d15876d475bba77", "b0358af78b7c5ee7adc883ef513bbcc84a18a02b", "1389ba6c3ff34cdf452ede130c738f37dca7e8cb", "4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56", "68f61154a0080c4aae9322110c8827978f01ac2e", "3f88ea8cf2eade325b0f32832561483185db5c10", "813c93c54c19fd3ef850728e6d4a31d279d26021", "ad8bd7016132a2f98ff1f41dac695285e71cc4b1", "102e374347698fe5404e1d83f441630b1abf62d9", "0931bef0a9c8c153184a1f9c286cf4883cbe99b6", "747c25bff37b96def96dc039cc13f8a7f42dbbc7", "4858d014bb5119a199448fcd36746c413e60f295", "eb9867f5efc98d3203ce1037f9a8814b0d15d0aa", "2f5ae4d6cd240ec7bc3f8ada47030e8439125df2", "bd26faef48080b5af294b19139c804ffec70825e", "9cd6a81a519545bf8aa9023f6e879521f85d4cd1", "59c21f5a24d0b408d528054b016915236bb85bf2", "ef26b36eb5966364c71d4fed135fe68f891127e5", "fc0f5859a111fb17e6dcf6ba63dd7b751721ca61", "1dc6c0ad19b41e5190fc9fe50e3ae27f49f18fa2", "1610d2d4947c03a89c0fda506a74ba1ae2bc54c2", "edc5a0a8b9fc6ae0e8d8091a2391767f645095d9", "03f98c175b4230960ac347b1100fbfc10c100d0c", "86c5478f21c4a9f9de71b5ffa90f2a483ba5c497", "bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd", "b6d0e461535116a675a0354e7da65b2c1d2958d4", "f79e4ba09402adab54d2efadd1c4bfe4e20c5da5", "f5c57979ec3d8baa6f934242965350865c0121bd", "106732a010b1baf13c61d0994552aee8336f8c85", "4eb8030b31ff86bdcb063403eef24e53b9ad4329", "ae2cf545565c157813798910401e1da5dc8a6199", "559645d2447004355c83737a19c9a811b45780f1", "64ba203c8cfc631d5f3f20419880523155fbeeb2", "17cf838720f7892dbe567129dcf3f7a982e0b56e", "488a61e0a1c3768affdcd3c694706e5bb17ae548", "ef4ecb76413a05c96eac4c743d2c2a3886f2ae07", "46551095a2cc4976d6be0165c31c37b0c5638719", "f76a6b1d6029769e2dc1be4dadbee6a7ba777429", "fc1e37fb16006b62848def92a51434fc74a2431a", "b73d9e1af36aabb81353f29c40ecdcbdf731dbed", "a15d9d2ed035f21e13b688a78412cb7b5a04c469", "afca252f314b46d5c1f2cb4e75ce15d551069b05", "2cac8ab4088e2bdd32dcb276b86459427355085c", "699b8250fb93b3fa64b2fc8f59fef036e172564d", "eaf020bc8a3ed5401fc3852f7037a03b2525586a", "72e603083c8b1cfa09200eb333927e8ea848fbc8", "4b04247c7f22410681b6aab053d9655cf7f3f888", "0a87d781fe2ae2e700237ddd00314dbc10b1429c", "243e9d490fe98d139003bb8dc95683b366866c57", "a52d9e9daf2cb26b31bf2902f78774bd31c0dd88", "90e7a86a57079f17f1089c3a46ea9bfd1d49226c", "dfb8a04a80d4b0794c0679d797cb90ec101e162c", "19a9f658ea14701502d169dc086651b1d9b2a8ea", "1a65cc5b2abde1754b8c9b1d932a68519bcb1ada", "466f80b066215e85da63e6f30e276f1a9d7c843b", "5b6593a6497868a0d19312952d2b753232414c23", "08cb294a08365e36dd7ed4167b1fd04f847651a9", "cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66", "ed273b5434013dcdb9029c1a9f1718da494a23a2", "ba017a8d16e47e57a1f3eb5a94c1ba24e6952274", "133900a0e7450979c9491951a5f1c2a403a180f0", "e328d19027297ac796aae2470e438fe0bd334449", "90ddf1aabf1c73b5fc45254a2de46e53a0bde857", "fb0f5e06048c0274c2a4056e353fa31f5790e381", "4c078c2919c7bdc26ca2238fa1a79e0331898b56", "18a013e1c72cf579d1b215f22d298521047e98a4", "bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c", "1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2", "e569f4bd41895028c4c009e5b46b935056188e91", "081189493ca339ca49b1913a12122af8bb431984", "83b7578e2d9fa60d33d9336be334f6f2cc4f218f", "0c05f60998628884a9ac60116453f1a91bcd9dda", "2edc6df161f6aadbef9c12408bdb367e72c3c967", "264f7ab36ff2e23a1514577a6404229d7fe1242b", "6144af24ce06af7d8cdd606e79cea5d6e73e2135", "bcac3a870501c5510df80c2a5631f371f2f6f74a", "8d712cef3a5a8a7b1619fb841a191bebc2a17f15", "7f703613149b190ea3bb0e3c803844895419846b", "3726d17fd7e57c75b8b9f7f57bdec9054534be5e", "214072c84378802a0a0fde0b93ffb17bc04f3759", "dda35768681f74dafd02a667dac2e6101926a279", "b8978a5251b6e341a1171e4fd9177aec1432dd3a", "1b150248d856f95da8316da868532a4286b9d58e", "e585dc6c810264d9f07e38c412379734a920714e", "3463f12ad434d256cd5f94c1c1bfd2dd6df36947", "44c278cbecd6c1123bfa5df92e0bda156895fa48", "18941b52527e6f15abfdf5b86a0086935706e83b", "5e62b2ab6fd3886e673fd5cbee160a5bee414507", "7ed5036a7c1eb2ea08fa2a12a446a9ccb6171c92", "cad52d74c1a21043f851ae14c924ac689e197d1f", "51d048b92f6680aca4a8adf07deb380c0916c808", "a0848d7b1bb43f4b4f1b4016e58c830f40944817", "8bbd40558a99e33fac18f6736b8fe99f4a97d9b1", "716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0", "cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6", "3fe3d6ff7e5320f4395571131708ecaef6ef4550", "a5ade88747fa5769c9c92ffde9b7196ff085a9eb", "63367972e1ada96dd47211d86ddee83f65ca1880", "0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a", "3b8c830b200f1df8ef705de37cbfe83945a3d307", "1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3", "b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2", "3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd", "10ab1b48b2a55ec9e2920a5397febd84906a7769", "b5f3b0f45cf7f462a9c463a941e34e102a029506", "dae9d0a9b77366f0cd52e38847e47691ee97bc1f", "6eba25166fe461dc388805cc2452d49f5d1cdadd", "102b27922e9bd56667303f986404f0e1243b68ab", "c59a9151cef054984607b7253ef189c12122a625", "9f1a854d574d0bd14786c41247db272be6062581", "3505c9b0a9631539e34663310aefe9b05ac02727", "8cb6daba2cb1e208e809633133adfee0183b8dd2", "50a0930cb8cc353e15a5cb4d2f41b365675b5ebf", "156cd2a0e2c378e4c3649a1d046cd080d3338bca", "5aafca76dbbbbaefd82f5f0265776afb5320dafe", "2162654cb02bcd10794ae7e7d610c011ce0fb51b", "31dd6bafd6e7c6095eb8d0591abac3b0106a75e3", "22a10d8d2a2cb9055557a3b335d6706100890afb", "20c2a5166206e7ffbb11a23387b9c5edf42b5230", "0acf23485ded5cb9cd249d1e4972119239227ddb", "21765df4c0224afcc25eb780bef654cbe6f0bc3a", "ab703224e3d6718bc28f7b9987eb6a5e5cce3b01", "73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c", "30f62b05b9a69d671be4112d47eba90028a26c71", "5226296884b3e151ce317a37f94827dbda0b9d16", "181708b09bde7f4904f8fd92b3668d76e7aff527", "d40c16285d762f7a1c862b8ac05a0fdb24af1202", "cc2eaa182f33defbb33d69e9547630aab7ed9c9c", "2042aed660796b14925db17c0a8b9fbdd7f3ebac", "2b64a8c1f584389b611198d47a750f5d74234426", "04f0292d9a062634623516edd01d92595f03bd3f", "25885e9292957feb89dcb4a30e77218ffe7b9868", "e57014b4106dd1355e69a0f60bb533615a705606", "fd126e36337999640a0b623611b5fec8de390d46", "78a4cabf0afc94da123e299df5b32550cd638939", "bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea", "8c50869b745fc094a4fb1b27861934c3c14d7199", "f4f6fc473effb063b7a29aa221c65f64a791d7f4", "6e00a406edb508312108f683effe6d3c1db020fb", "bcc5cbbb540ee66dc8b9a3453b506e895d8395de", "4a5592ae1f5e9fa83d9fa17451c8ab49608421e4", "7698ba9fd1f49157ca2666a93311afbf1ff4e66c", "3cb64217ca2127445270000141cfa2959c84d9e7", "e393a038d520a073b9835df7a3ff104ad610c552", "120b9c271c3a4ea0ad12bbc71054664d4d460bc3", "33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13", "d904f945c1506e7b51b19c99c632ef13f340ef4c", "587b8c147c6253878128ddacf6e5faf8272842a4", "5990c2e78394388e8a81a4b52baf35c13b22d2c9", "af29ad70ab148c83e1faa8b3098396bc1cd87790", "221252be5d5be3b3e53b3bbbe7a9930d9d8cad69", "256b46b12ab47283e6ada05fad6a2b501de35323", "91ead35d1d2ff2ea7cf35d15b14996471404f68d", "5ed5e534c8defd683909200c1dc31692942b7b5f", "d861c658db2fd03558f44c265c328b53e492383a", "8fbec9105d346cd23d48536eb20c80b7c2bbbe30", "680d662c30739521f5c4b76845cb341dce010735", "93dd4e512cd7647aecbfc0cd4767adf5d9289c3d", "2b2924af7ec219bd1fadcbd2c57014ed54efec86", "25695abfe51209798f3b68fb42cfad7a96356f1f", "31003ba1cf9f77ec5b7038996d2ce999fa04d0ea", "19b492d426f092d80825edba3b02e354c312295f", "054756fa720bdcf1d320ad7a353e54ca53d4d3af", "3b37d95d2855c8db64bd6b1ee5659f87fce36881", "f3ea181507db292b762aa798da30bc307be95344", "7343f0b7bcdaf909c5e37937e295bf0ac7b69499", "65f25a28629ecfe8bae42a33883a8b9ab3c7d047", "d350a9390f0818703f886138da27bf8967fe8f51", "e50ec6b6d1c189edc127eb403c41a64f34fc0a6c", "a2429cc2ccbabda891cc5ae340b24ad06fcdbed5", "8983485996d5d9d162e70d66399047c5d01ac451", "4014d74e8f5ea4d76c2c1add81d0c88d6e342478", "a76e57c1b2e385b68ffdf7609802d71244804c1d", "04c07ecaf5e962ac847059ece3ae7b6962b4e5c4", "9326d1390e8601e2efc3c4032152844483038f3f", "9ab126760f68071a78cabe006cf92995d6427025", "36486944b4feeb88c0499fecd253c5a53034a23f", "19c0069f075b5b2d8ac48ad28a7409179bd08b86", "4a0f98d7dbc31497106d4f652968c708f7da6692", "2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c", "a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8", "be57d2aaab615ec8bc1dd2dba8bee41a4d038b85", "3157be811685c93d0cef7fa4c489efea581f9b8e", "437642cfc8c34e445ea653929e2d183aaaeeb704", "ccb2ecb30a50460c9189bb55ba594f2300882747", "76b11c281ac47fe6d95e124673a408ee9eb568e3", "0dd72887465046b0f8fc655793c6eaaac9c03a3d", "f6511d8156058737ec5354c66ef6fdcf035d714d", "fc970d7694b1d2438dd101a146d2e4f29087963e", "b1534888673e6119f324082246016d28eba249aa", "a094e52771baabe4ab37ef7853f9a4f534227457", "4f7967158b257e86d66bdabfdc556c697d917d24", "29fd98f096fc9d507cd5ee7d692600b1feaf7ed1", "4850af6b54391fc33c8028a0b7fafe05855a96ff", "37ce1d3a6415d6fc1760964e2a04174c24208173", "3998c5aa6be58cce8cb65a64cb168864093a9a3e", "f7824758800a7b1a386db5bd35f84c81454d017a", "b3067deb3110e3a7566c032ac0c1e1608668ef3d", "b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57", "e5fbffd3449a2bfe0acb4ec339a19f5b88fff783", "3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a", "40217a8c60e0a7d1735d4f631171aa6ed146e719", "d42dbc995318e2936714c65c028700bfd3633049", "1606b1475e125bba1b2d87bcf1e33b06f42c5f0d", "2fe86e9c115562df2114eeedc7db1aece07a3638", "49fdafef327069516d887d8e69b5e96c983c3dd0", "39b22bcbd452d5fea02a9ee63a56c16400af2b83", "647b2e162e9c476728172f62463a8547d245cde3", "ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6", "561ae67de137e75e9642ab3512d3749b34484310", "0750c796467b6ef60b0caff5fb199337d54d431e", "166ef5d3fd96d99caeabe928eba291c082ec75a0", "3bebb79f8f49aa11dd4f6d60d903172db02bf4f3", "5c2a7518fb26a37139cebff76753d83e4da25159", "5632ba72b2652df3b648b2ee698233e76a4eee65", "397257783ccc8cace5b67cc71e0c73034d559a4f", "013909077ad843eb6df7a3e8e290cfd5575999d2", "e42998bbebddeeb4b2bedf5da23fa5c4efc976fa", "10f2b8188c745d43c1580f5ee6de71ad8d538b4d", "7ebd323ddfe3b6de8368c4682db6d0db7b70df62", "2742a61d32053761bcc14bd6c32365bfcdbefe35", "eb5c1e526fe2d17778c68f60c874c3da0129fabd", "b8378ab83bc165bc0e3692f2ce593dcc713df34a", "6b333b2c6311e36c2bde920ab5813f8cfcf2b67b", "1f9b2f70c24a567207752989c5bd4907442a9d0f", "9285f4a6a06e975bde3ae3267fccd971d4fff98a", "55cfc3c08000f9d21879582c6296f2a864b657e8", "a812368fe1d4a186322bf72a6d07e1cf60067234", "b8f3f6d8f188f65ca8ea2725b248397c7d1e662d", "60777fbca8bff210398ec8b1179bc4ecb72dfec0", "27a299b834a18e45d73e0bf784bbb5b304c197b3", "bcb99d5150d792001a7d33031a3bd1b77bea706b", "c20b2d365186f4471950fbe1ef8755de90efc000", "27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd", "8ad0a88a7583af819af66cf2d9e8adb860cf9c34", "87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd", "162dfd0d2c9f3621d600e8a3790745395ab25ebc", "3fb3c7dd12561e9443ac301f5527d539b1f4574e", "8a8127a06f432982bfb0150df3212f379b36840b", "872ff48a3acfbf96376fd048348372f5137615e4", "635158d2da146e9de559d2742a2fa234e06b52db", "d6e08345ba293565086cb282ba08b225326022fc", "887745c282edf9af40d38425d5fdc9b3fe139c08", "d511e903a882658c9f6f930d6dd183007f508eda", "3f957142ef66f2921e7c8c7eadc8e548dccc1327", "ce6a6d35f65e584214aaf24378ab85038decddbb", "a136ccaa67f660c45d3abb8551c5ed357faf7081", "d8c9ce0bd5e4b6d1465402a760845e23af5ac259", "1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2", "ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0", "d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0", "f5770dd225501ff3764f9023f19a76fad28127d4", "1ddea58d04e29069b583ac95bc0ae9bebb0bed07", "b5f2846a506fc417e7da43f6a7679146d99c5e96", "4e5dc3b397484326a4348ccceb88acf309960e86", "1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2", "062d67af7677db086ef35186dc936b4511f155d7", "1ca8c09abb73a02519d8db77e4fe107acfc589b6", "7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889", "daa52dd09b61ee94945655f0dde216cce0ebd505", "2cde051e04569496fb525d7f1b1e5ce6364c8b21", "ddaa8add8528857712424fd57179e5db6885df7c", "cd85f71907f1c27349947690b48bfb84e44a3db0", "a5f11c132eaab258a7cea2d681875af09cddba65", "f42dca4a4426e5873a981712102aa961be34539a", "31625522950e82ad4dffef7ed0df00fdd2401436", "63213d080a43660ac59ea12e3c35e6953f6d7ce8", "4ed2d7ecb34a13e12474f75d803547ad2ad811b2", "05bcc5235721fd6a465a63774d28720bacc60858", "35e6f6e5f4f780508e5f58e87f9efe2b07d8a864", "8878871ec2763f912102eeaff4b5a2febfc22fbe", "d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d", "a6e25cab2251a8ded43c44b28a87f4c62e3a548a", "e6540d70e5ffeed9f447602ea3455c7f0b38113e", "155199d7f10218e29ddaee36ebe611c95cae68c4", "473366f025c4a6e0783e6174ca914f9cb328fe70", "4e0e49c280acbff8ae394b2443fcff1afb9bdce6", "e6c491fb6a57c9a7c2d71522a1a066be2e681c84", "be068ce0d5284dbd2c4c8ba4a31a41da2f794193", "056294ff40584cdce81702b948f88cebd731a93e", "541f1436c8ffef1118a0121088584ddbfd3a0a8a", "e8b3a257a0a44d2859862cdec91c8841dc69144d", "3fd092b96c3339507732263c9e6379b307c26073", "5394d42fd27b7e14bd875ec71f31fdd2fcc8f923", "8e3c97e420e0112c043929087d6456d8ab61e95c", "cffebdf88e406c27b892857d1520cb2d7ccda573", "48186494fc7c0cc664edec16ce582b3fcb5249c0", "0728f788107122d76dfafa4fb0c45c20dcf523ca", "8879fed9f8f51a4c0734af22c5632cf6e9b07689", "fd33df02f970055d74fbe69b05d1a7a1b9b2219b", "d0d7671c816ed7f37b16be86fa792a1b29ddd79b", "b8d8501595f38974e001a66752dc7098db13dfec", "ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee", "2fea258320c50f36408032c05c54ba455d575809", "656f05741c402ba43bb1b9a58bcc5f7ce2403d9a", "45f858f9e8d7713f60f52618e54089ba68dfcd6d", "0d467adaf936b112f570970c5210bdb3c626a717", "e0d878cc095eaae220ad1f681b33d7d61eb5e425", "1b71d3f30238cb6621021a95543cce3aab96a21b", "67ba3524e135c1375c74fe53ebb03684754aae56", "d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9", "b0de0892d2092c8c70aa22500fed31aa7eb4dd3f", "2642810e6c74d900f653f9a800c0e6a14ca2e1c7", "a6e21438695dbc3a184d33b6cf5064ddf655a9ba", "2a14b6d9f688714dc60876816c4b7cf763c029a9", "a87e37d43d4c47bef8992ace408de0f872739efc", "8b74252625c91375f55cbdd2e6415e752a281d10", "e096b11b3988441c0995c13742ad188a80f2b461", "16572c545384174f8136d761d2b0866e968120a8", "c5935b92bd23fd25cae20222c7c2abc9f4caa770", "b9081856963ceb78dcb44ac410c6fca0533676a3", "2098983dd521e78746b3b3fa35a22eb2fa630299", "b133b2d7df9b848253b9d75e2ca5c68e21eba008", "75259a613285bdb339556ae30897cb7e628209fa", "d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5", "f22d6d59e413ee255e5e0f2104f1e03be1a6722e", "0831794eddcbac1f601dcb9be9d45531a56dbf7e", "2d4b9fe3854ccce24040074c461d0c516c46baf4", "40dd736c803720890d6bfc1e083f6050e35d8f7a", "18206e1b988389eaab86ef8c852662accf3c3663", "3fa738ab3c79eacdbfafa4c9950ef74f115a3d84", "70c9d11cad12dc1692a4507a97f50311f1689dbf", "8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832", "1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e", "5de5848dc3fc35e40420ffec70a407e4770e3a8d", "464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a", "e38371b69be4f341baa95bc854584e99b67c6d3a", "0726a45eb129eed88915aa5a86df2af16a09bcc1", "12150d8b51a2158e574e006d4fbdd3f3d01edc93", "416b559402d0f3e2b785074fcee989d44d82b8e5", "dbaf89ca98dda2c99157c46abd136ace5bdc33b3", "947ee3452e4f3d657b16325c6b959f8b8768efad", "9f4078773c8ea3f37951bf617dbce1d4b3795839", "6e9de9c3af3258dd18142e9bef2977b7ce153bd5", "e4d53e7f4c2052940841abc08f9574655f3f7fb4", "cc6d3ccc9e3dd0a43313a714316c8783cd879572", "7c17280c9193da3e347416226b8713b99e7825b8", "7f6061c83dc36633911e4d726a497cdc1f31e58a", "d915e634aec40d7ee00cbea96d735d3e69602f1a", "264a84f4d27cd4bca94270620907cffcb889075c", "2f69e9964f3b6bdc0d18749b48bb6b44a4171c64", "61e2044184d86d0f13e50ecaa3da6a4913088c76", "16fdd6d842475e6fbe58fc809beabbed95f0642e", "98e098ba9ff98fc58f22fed6d3d8540116284b91", "4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9", "362bfeb28adac5f45b6ef46c07c59744b4ed6a52", "86ed5b9121c02bcf26900913f2b5ea58ba23508f", "b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29", "6c80c834d426f0bc4acd6355b1946b71b50cbc0b", "8b30259a8ab07394d4dac971f3d3bd633beac811", "1221e25763c3be95c1b6626ca9e7feaa3b636d9a", "03c48d8376990cff9f541d542ef834728a2fcda2", "bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62", "1473a233465ea664031d985e10e21de927314c94", "944faf7f14f1bead911aeec30cc80c861442b610", "aac934f2eed758d4a27562dae4e9c5415ff4cdb7", "5ac946fc6543a445dd1ee6d5d35afd3783a31353", "11691f1e7c9dbcbd6dfd256ba7ac710581552baa", "7d7b036ed01765c9473d695f029142128d442aaa", "14c0f9dc9373bea1e27b11fa0594c86c9e632c8d", "81da427270c100241c07143885ba3051ec4a2ecb", "1f05473c587e2a3b587f51eb808695a1c10bc153", "190b3caa2e1a229aa68fd6b1a360afba6f50fde4", "4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367", "20eeb83a8b6fea64c746bf993f9c991bb34a4b30", "193474d008cab9fa1c1fa81ce094d415f00b075c", "673541a8cb1aa3ac63a288523ba71aec2a38280e", "197efbef17f92e5cb5076961b6cd9f59e88ffd9a", "620339aef06aed07a78f9ed1a057a25433faa58b", "a6b5ca99432c23392cec682aebb8295c0283728b", "704d88168bdfabe31b6ff484507f4a2244b8c52b", "db848c3c32464d12da33b2f4c3a29fe293fc35d1", "98127346920bdce9773aba6a2ffc8590b9558a4a", "bd21109e40c26af83c353a3271d0cd0b5c4b4ade", "ce933821661a0139a329e6c8243e335bfa1022b1", "0d087aaa6e2753099789cd9943495fbbd08437c0", "d1881993c446ea693bbf7f7d6e750798bf958900", "00d4c2db10f3a32d505d7b8adc7179e421443dec", "a46086e210c98dcb6cb9a211286ef906c580f4e8", "fc8fb68a7e3b79c37108588671c0e1abf374f501", "2601b679fdd637f3cd978753ae2f15e8759dd267", "045e83272db5e92aa4dc8bdfee908534c2608711", "22e189a813529a8f43ad76b318207d9a4b6de71a", "2d9e58ea582e054e9d690afca8b6a554c3687ce6", "20767ca3b932cbc7b8112db21980d7b9b3ea43a3", "e19ebad4739d59f999d192bac7d596b20b887f78", "add6d96fc018986f51a1aac47eae9ee3fc62fb66", "b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae", "2bcec23ac1486f4106a3aa588b6589e9299aba70", "1902288256839539aeb5feb3e1699b963a15aa1a", "2303d07d839e8b20f33d6e2ec78d1353cac256cf", "521cfbc1949289a7ffc3ff90af7c55adeb43db2a", "7fa3d4be12e692a47b991c0b3d3eba3a31de4d05", "42c9394ca1caaa36f535721fa9a64b2c8d4e0dee", "c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478", "a36c8a4213251d3fd634e8893ad1b932205ad1ca", "2c19d3d35ef7062061b9e16d040cebd7e45f281d", "5c493c42bfd93e4d08517438983e3af65e023a87", "0562fc7eca23d47096472a1d42f5d4d086e21871", "397aeaea61ecdaa005b09198942381a7a11cd129", "101d4cfbd6f8a7a10bd33505e2b183183f1d8770", "839a2155995acc0a053a326e283be12068b35cb8", "aafb8dc8fda3b13a64ec3f1ca7911df01707c453", "6c705285c554985ecfe1117e854e1fe1323f8c21", "dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335", "013d0acff1e5410fd9f6e15520d16f4ea02f03f6", "14b69626b64106bff20e17cf8681790254d1e81c", "ddfae3a96bd341109d75cedeaebb5ed2362b903f", "f257300b2b4141aab73f93c146bf94846aef5fa1", "8820d1d3fa73cde623662d92ecf2e3faf1e3f328", "15d653972d176963ef0ad2cc582d3b35ca542673", "4534d78f8beb8aad409f7bfcd857ec7f19247715", "052880031be0a760a5b606b2ad3d22f237e8af70", "0329d9be8ab1e3a1d5e4b9e7db5af5bbcc64e36f", "22646cf884cc7093b0db2c1731bd52f43682eaa8", "3634b4dd263c0f330245c086ce646c9bb748cd6b", "6c304f3b9c3a711a0cca5c62ce221fb098dccff0", "0c60eebe10b56dbffe66bb3812793dd514865935", "ce450e4849490924488664b44769b4ca57f1bc1a", "8e9b92a805d1ce0bf4e0c04133d26e28db036e6a", "6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d", "dfa80e52b0489bc2585339ad3351626dee1a8395", "c76f64e87f88475069f7707616ad9df1719a6099", "05e3acc8afabc86109d8da4594f3c059cf5d561f", "86374bb8d309ad4dbde65c21c6fda6586ae4147a", "1576ed0f3926c6ce65e0ca770475bca6adcfdbb4", "1c41965c5e1f97b1504c1bdde8037b5e0417da5e", "799c02a3cde2c0805ea728eb778161499017396b", "473031328c58b7461753e81251379331467f7a69", "8383faea09b4b4bef8117a1da897495ebd68691b", "28f1542c63f5949ee6f2d51a6422244192b5a900", "85c90ad5eebb637f048841ebfded05942bb786b7", "d4001826cc6171c821281e2771af3a36dd01ffc0", "6bb630dfa797168e6627d972560c3d438f71ea99", "05891725f5b27332836cf058f04f18d74053803f", "03ce2ff688f9b588b6f264ca79c6857f0d80ceae", "982ede05154c1afdcf6fc623ba45186a34f4b9f2", "d141c31e3f261d7d5214f07886c1a29ac734d6fc", "c0c8d720658374cc1ffd6116554a615e846c74b5", "3107085973617bbfc434c6cb82c87f2a952021b7", "81d232e1f432db7de67baf4f30f240c62d1a9055", "ad2339c48ad4ffdd6100310dcbb1fb78e72fac98", "bc36badb6606b8162d821a227dda09a94aac537f", "2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0", "3d5a1be4c1595b4805a35414dfb55716e3bf80d8", "3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1", "b5667d087aafcf6b91f3c77aa90cee1ac185f8f1", "5003754070f3a87ab94a2abb077c899fcaf936a6", "b759936982d6fb25c55c98955f6955582bdaeb27", "5e6f546a50ed97658be9310d5e0a67891fe8a102", "bd9157331104a0708aa4f8ae79b7651a5be797c6", "2a65d7d5336b377b7f5a98855767dd48fa516c0f", "6b99cd366f2ea8e1c9abadf73b05388c0e24fec3", "e3144f39f473e238374dd4005c8b83e19764ae9e", "bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317", "488375ae857a424febed7c0347cc9590989f01f7", "83b54b8c97dc14e302dad191327407ec0d5fb4a6", "d1f58798db460996501f224fff6cceada08f59f9", "267c6e8af71bab68547d17966adfaab3b4711e6b", "1e94cc91c5293c8fc89204d4b881552e5b2ce672", "cb08f679f2cb29c7aa972d66fe9e9996c8dfae00", "16eaa26a84468b27e559215db01c53286808ec2a", "6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a", "27c66b87e0fbb39f68ddb783d11b5b7e807c76e8", "631483c15641c3652377f66c8380ff684f3e365c", "b5cd9e5d81d14868f1a86ca4f3fab079f63a366d", "d1959ba4637739dcc6cc6995e10fd41fd6604713", "2b42f83a720bd4156113ba5350add2df2673daf0", "f6abecc1f48f6ec6eede4143af33cc936f14d0d0", "2bb53e66aa9417b6560e588b6235e7b8ebbc294c", "dee6609615b73b10540f32537a242baa3c9fca4d", "959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c", "1442319de86d171ce9595b20866ec865003e66fc", "05a7be10fa9af8fb33ae2b5b72d108415519a698", "a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531", "ac559873b288f3ac28ee8a38c0f3710ea3f986d9", "ba30cc9d8bac724dafc0aea247159cc7e7105784", "58081cb20d397ce80f638d38ed80b3384af76869", "4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4", "8d384e8c45a429f5c5f6628e8ba0d73c60a51a89", "9166f46aa3e58befaefd3537e5a11b31ebeea4d0", "fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81", "f8015e31d1421f6aee5e17fc3907070b8e0a5e59", "cda8fd9dd8b485e6854b1733d2294f69666c66f7", "3150e329e01be31ba08b6d76fc46b0da88a5ddeb", "66e9fb4c2860eb4a15f713096020962553696e12", "65bba9fba03e420c96ec432a2a82521ddd848c09", "825f56ff489cdd3bcc41e76426d0070754eab1a8", "195df1106f4d7aff0e9cb609358abbf80f54a716", "f1250900074689061196d876f551ba590fc0a064", "a503eb91c0bce3a83bf6f524545888524b29b166", "1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c", "1bbec7190ac3ba34ca91d28f145e356a11418b67", "41f26101fed63a8d149744264dd5aa79f1928265", "06f585a3a05dd3371cd600a40dc35500e2f82f9b", "49be50efc87c5df7a42905e58b092729ea04c2f5", "d708ce7103a992634b1b4e87612815f03ba3ab24", "3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6", "ebabd1f7bc0274fec88a3dabaf115d3e226f198f", "a000149e83b09d17e18ed9184155be140ae1266e", "bcf19b964e7d1134d00332cf1acf1ee6184aff00", "fa4f59397f964a23e3c10335c67d9a24ef532d5c", "1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9", "bd9eb65d9f0df3379ef96e5491533326e9dde315", "31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78", "4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d", "58eb9174211d58af76023ce33ee05769de57236c", "c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf", "fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3", "521b625eebea73b5deb171a350e3709a4910eebf", "dcea30602c4e0b7525a1bf4088620128d4cbb800", "f58f30932e3464fc808e539897efa4ee4e7ac59f", "e03e86ac61cfac9148b371d75ce81a55e8b332ca", "a472d59cff9d822f15f326a874e666be09b70cfd", "8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f", "1d6c09019149be2dc84b0c067595f782a5d17316", "202dc3c6fda654aeb39aee3e26a89340fb06802a", "247a8040447b6577aa33648395d95d80441a0cf3", "626859fe8cafd25da13b19d44d8d9eb6f0918647", "a0c37f07710184597befaa7e6cf2f0893ff440e9", "d44e6baf3464bf56d3a29daf280b1b525ac30f7d", "4d1f77d9418a212c61a3c75c04a5b3884f6441ba", "e0793fd343aa63b5f366c8ace61b9c5489c51a4d", "10cb39e93fac194220237f15dae084136fdc6740", "7e2f7c0eeaeb47b163a7258665324643669919e8", "351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd", "073eaa49ccde15b62425cda1d9feab0fea03a842", "d86fabd4498c8feaed80ec342d254fb877fb92f5", "4c0cc732314ba3ccccd9036e019b1cfc27850c17", "1252727e8096f48096ef89483d30c3a74500dd15", "1130c38e88108cf68b92ecc61a9fc5aeee8557c9", "446dc1413e1cfaee0030dc74a3cee49a47386355", "cb2917413c9b36c3bb9739bce6c03a1a6eb619b3", "3e685704b140180d48142d1727080d2fb9e52163", "1cad5d682393ffbb00fd26231532d36132582bb4", "cba090a5bfae7dd8a60a973259f0870ed68c4dd3", "3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e", "521aa8dcd66428b07728b91722cc8f2b5a73944b", "11bb2abe0ca614c15701961428eb2f260e3e2eef", "863ad2838b9b90d4461995f498a39bcd2fb87c73", "cd22e6532211f679ba6057d15a801ba448b9915c", "df9269657505fcdc1e10cf45bbb8e325678a40f5", "673d4885370b27c863e11a4ece9189a6a45931cc", "48e6c6d981efe2c2fb0ae9287376fcae59da9878", "6cb7648465ba7757ecc9c222ac1ab6402933d983", "08d40ee6e1c0060d3b706b6b627e03d4b123377a", "c90427085909029afd2af01d1967e80b78e01b88", "6dd8d8be00376ac760dc92f9c5f20520872c5355", "90298f9f80ebe03cb8b158fd724551ad711d4e71", "407de9da58871cae7a6ded2f3a6162b9dc371f38", "ac820d67b313c38b9add05abef8891426edd5afb", "1384a83e557b96883a6bffdb8433517ec52d0bea", "97b5800e144a8df48f1f7e91383b0f37bc37cf60", "9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32", "69fb98e11df56b5d7ec7d45442af274889e4be52", "32b8c9fd4e3f44c371960eb0074b42515f318ee7", "dc5cde7e4554db012d39fc41ac8580f4f6774045", "14558a70418ec4012c5f058145eef2d22d89284a", "f19bf8b5c1860cd81b5339804d5db9e791085aa7", "097340d3ac939ce181c829afb6b6faff946cdce0", "f7911b9ff58d07d19c68f4a30f40621f63c0f385", "fb1b6138aeb081adf853316c0d83ef4c5626a7fa", "a8a61badec9b8bc01f002a06e1426a623456d121", "10df1d4b278da991848fb71b572f687bd189c10e", "1a167e10fe57f6d6eff0bb9e45c94924d9347a3e", "826c66bd182b54fea3617192a242de1e4f16d020", "ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e", "b6ef158d95042f39765df04373c01546524c9ccd", "ae78469de00ea1e7602ca468dcf188cdfe2c80d4", "3e0377af0087b9b836bf6d95bc1c7085dfde4897", "abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18", "e76798bddd0f12ae03de26b7c7743c008d505215", "09926ed62511c340f4540b5bc53cf2480e8063f8", "46f2611dc4a9302e0ac00a79456fa162461a8c80", "f02a6bccdaee14ab55ad94263539f4f33f1b15bb", "392d35bb359a3b61cca1360272a65690a97a2b3f", "6964af90cf8ac336a2a55800d9c510eccc7ba8e1", "bcead1a92744e76c38caaa13159de4abfb81b1d0", "02cc96ad997102b7c55e177ac876db3b91b4e72c", "4adb97b096b700af9a58d00e45a2f980136fcbb5", "23aef683f60cb8af239b0906c45d11dac352fb4e", "a6b1d79bc334c74cde199e26a7ef4c189e9acd46", "17a8d1b1b4c23a630b051f35e47663fc04dcf043", "994b52bf884c71a28b4f5be4eda6baaacad1beee", "0145dc4505041bf39efa70ea6d95cf392cfe7f19", "554b9478fd285f2317214396e0ccd81309963efd", "2729e12ecb777a553e5ed0a1ac52dd37924e813d", "080c204edff49bf85b335d3d416c5e734a861151", "d0ac9913a3b1784f94446db2f1fb4cf3afda151f", "be5276e9744c4445fe5b12b785650e8f173f56ff", "745b42050a68a294e9300228e09b5748d2d20b81", "3e3f305dac4fbb813e60ac778d6929012b4b745a", "4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f", "580f86f1ace1feed16b592d05c2b07f26c429b4b", "09507f1f1253101d04a975fc5600952eac868602", "b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89", "2d84e30c61281d3d7cdd11676683d6e66a68aea6", "6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf", "2d4a3e9361505616fa4851674eb5c8dd18e0c3cf", "f2a7f9bd040aa8ea87672d38606a84c31163e171", "d700aedcb22a4be374c40d8bee50aef9f85d98ef", "d289ce63055c10937e5715e940a4bb9d0af7a8c5", "38f61e422ef75df4b96fb6081ce866556b6b854f", "274f87ad659cd90382ef38f7c6fafc4fc7f0d74d", "fbf196d83a41d57dfe577b3a54b1b7fa06666e3b", "4a9d906935c9de019c61aedc10b77ee10e3aec63", "9aad8e52aff12bd822f0011e6ef85dfc22fe8466", "645f09f4bc2e6a13663564ee9032ca16e35fc52d", "39b452453bea9ce398613d8dd627984fd3a0d53c", "20c02e98602f6adf1cebaba075d45cef50de089f", "73ed64803d6f2c49f01cffef8e6be8fc9b5273b8", "3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1", "c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774", "1dc241ee162db246882f366644171c11f7aed96d", "8befcd91c24038e5c26df0238d26e2311b21719a", "d35c82588645b94ce3f629a0b98f6a531e4022a3", "4c6daffd092d02574efbf746d086e6dc0d3b1e91", "071135dfb342bff884ddb9a4d8af0e70055c22a1", "3a0425c25beea6c4c546771adaf5d2ced4954e0d", "e546572f8205570de4518bcf8d0345465e51d7a0", "10af69f11301679b6fbb23855bf10f6af1f3d2e6", "a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca", "504028218290d68859f45ec686f435f473aa326c", "93e1e195f294c463f4832c4686775bf386b3de39", "a3c8c7da177cd08978b2ad613c1d5cb89e0de741", "48db8bf18e2f6f19e07e88384be855c8b7ea0ead", "164b0e2a03a5a402f66c497e6c327edf20f8827b", "4d19401e44848fe65b721971bc71a9250870ed5f", "ab0981d1da654f37620ca39c6b42de21d7eb58eb", "88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79", "b09b693708f412823053508578df289b8403100a", "e2d265f606cd25f1fd72e5ee8b8f4c5127b764df", "33f2b44742cc828347ccc5ec488200c25838b664", "3df8cc0384814c3fb05c44e494ced947a7d43f36", "c9b958c2494b7ba08b5b460f19a06814dba8aee0", "9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c", "24aac045f1e1a4c13a58eab4c7618dccd4c0e671", "badd371a49d2c4126df95120902a34f4bee01b00", "bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab", "1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5", "568cff415e7e1bebd4769c4a628b90db293c1717", "9758f3fd94239a8d974217fe12599f88fb413f3d", "defd44b02a1532f47bdd8c8f2375e3df64ac5d79", "f571fe3f753765cf695b75b1bd8bed37524a52d2", "d3008b4122e50a28f6cc1fa98ac6af28b42271ea", "70c58700eb89368e66a8f0d3fc54f32f69d423e1", "503db524b9a99220d430e741c44cd9c91ce1ddf8", "56f57786516dcc8ea3c0ffe877c1363bfb9981d2", "5945464d47549e8dcaec37ad41471aa70001907f", "4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2", "d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7", "558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f", "c6241e6fc94192df2380d178c4c96cf071e7a3ac", "f0f854f8cfe826fd08385c0c3c8097488f468076", "35b1c1f2851e9ac4381ef41b4d980f398f1aad68", "a9756ca629f73dc8f84ee97cfa8b34b8207392dc", "7ed3b79248d92b255450c7becd32b9e5c834a31e", "30cbd41e997445745b6edd31f2ebcc7533453b61", "07dc9f3b34284cc915dea7575f40ef0c04338126", "24115d209e0733e319e39badc5411bbfd82c5133", "079edd5cf7968ac4759dfe72af2042cf6e990efc", "0a9d204db13d395f024067cf70ac19c2eeb5f942", "167736556bea7fd57cfabc692ec4ae40c445f144", "74c19438c78a136677a7cb9004c53684a4ae56ff", "d5d5cc27ca519d1300e77e3c1a535a089f52f646", "d309e414f0d6e56e7ba45736d28ee58ae2bad478", "a87ab836771164adb95d6744027e62e05f47fd96", "86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6", "e75a589ca27dc4f05c2715b9d54206dee37af266", "a4c430b7d849a8f23713dc283794d8c1782198b2", "e2faaebd17d10e2919bd69492787e7565546a63f", "016800413ebd1a87730a5cf828e197f43a08f4b3", "5fac62a3de11125fc363877ba347122529b5aa50", "bd0e100a91ff179ee5c1d3383c75c85eddc81723", "9c065dfb26ce280610a492c887b7f6beccf27319", "17fa1c2a24ba8f731c8b21f1244463bc4b465681", "81a142c751bf0b23315fb6717bc467aa4fdfbc92", "ef2a5a26448636570986d5cda8376da83d96ef87", "b7426836ca364603ccab0e533891d8ac54cf2429", "d42a8c6528cdf1a63050f9a282f6b5daec6b4e73", "9f499948121abb47b31ca904030243e924585d5f", "22df6b6c87d26f51c0ccf3d4dddad07ce839deb0", "b54fe193b6faf228e5ffc4b88818d6aa234b5bb9", "8b744786137cf6be766778344d9f13abf4ec0683", "b5c749f98710c19b6c41062c60fb605e1ef4312a", "2b1327a51412646fcf96aa16329f6f74b42aba89", "062d0813815c2b9864cd9bb4f5a1dc2c580e0d90", "9077365c9486e54e251dd0b6f6edaeda30ae52b9", "c553f0334fcadf43607925733685adef81fbe406", "928b8eb47288a05611c140d02441660277a7ed54", "1ea74780d529a458123a08250d8fa6ef1da47a25", "5feb1341a49dd7a597f4195004fe9b59f67e6707", "fef6f1e04fa64f2f26ac9f01cd143dd19e549790", "4fd29e5f4b7186e349ba34ea30738af7860cf21f", "2d83ba2d43306e3c0587ef16f327d59bf4888dc3", "9e8382aa1de8f2012fd013d3b39838c6dad8fb4d", "df2899462e04559c024a773d91f6e06c262e136b", "47fb74785fbd8870c2e819fc91d04b9d9722386f", "371f40f6d32ece05cc879b6954db408b3d4edaf3", "44f23600671473c3ddb65a308ca97657bc92e527", "b69b239217d4e9a20fe4fe1417bf26c94ded9af9", "c574c72b5ef1759b7fd41cf19a9dcd67e5473739", "5a12e1d4d74fe1a57929eaaa14f593b80f907ea3", "faa29975169ba3bbb954e518bc9814a5819876f6", "53698b91709112e5bb71eeeae94607db2aefc57c", "691964c43bfd282f6f4d00b8b0310c554b613e3b", "bff567c58db554858c7f39870cff7c306523dfee", "ded41c9b027c8a7f4800e61b7cfb793edaeb2817", "82e3f4099503633c042a425e9217bfe47cfe9d4b", "062c41dad67bb68fefd9ff0c5c4d296e796004dc", "95d858b39227edeaf75b7fad71f3dc081e415d16", "329394480fc5e9e96de4250cc1a2b060c3677c94", "2c5d1e0719f3ad7f66e1763685ae536806f0c23b", "c4cfdcf19705f9095fb60fb2e569a9253a475f11", "d3b18ba0d9b247bfa2fb95543d172ef888dfff95", "09b0ef3248ff8f1a05b8704a1b4cf64951575be9", "b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89", "bb489e4de6f9b835d70ab46217f11e32887931a2", "828d7553a45eb0c3132e406105732a254369eb4d", "95288fa7ff4683e32fe021a78cbf7d3376e6e400", "2914e8c62f0432f598251fae060447f98141e935", "15ef449ac443c494ceeea8a9c425043f4079522e", "457cf73263d80a1a1338dc750ce9a50313745d1d", "05e658fed4a1ce877199a4ce1a8f8cf6f449a890", "ba29ba8ec180690fca702ad5d516c3e43a7f0bb8", "3852968082a16db8be19b4cb04fb44820ae823d4", "63f2d1a64737afa1608588b9651b1e4207e82d1c", "169618b8dc9b348694a31c6e9e17b989735b4d39", "00f7f7b72a92939c36e2ef9be97397d8796ee07c", "51cb09ee04831b95ae02e1bee9b451f8ac4526e3", "41aa8c1c90d74f2653ef4b3a2e02ac473af61e47", "032a1c95388fb5c6e6016dd8597149be40bc9d4d", "2c258eec8e4da9e65018f116b237f7e2e0b2ad17", "151b87de997e55db892b122c211f9c749f4293de", "e060e32f8ad98f10277b582393df50ac17f2836c", "7fe2ab9f54242ef8609ef9bf988f008c7d42407c", "3dd906bc0947e56d2b7bf9530b11351bbdff2358", "bbe1332b4d83986542f5db359aee1fd9b9ba9967", "333aa36e80f1a7fa29cf069d81d4d2e12679bc67", "8356832f883207187437872742d6b7dc95b51fde", "6257a622ed6bd1b8759ae837b50580657e676192", "8981be3a69cd522b4e57e9914bf19f034d4b530c", "c0d5c3aab87d6e8dd3241db1d931470c15b9e39d", "7dffe7498c67e9451db2d04bb8408f376ae86992", "64c4019f1ea9b54b1848418ac53c4e2584dc62d4", "10a285260e822b49023c4324d0fbbca7df8e128b", "4fcd19b0cc386215b8bd0c466e42934e5baaa4b7", "ca37eda56b9ee53610c66951ee7ca66a35d0a846", "1b635f494eff2e5501607ebe55eda7bdfa8263b8", "77fb9e36196d7bb2b505340b6b94ba552a58b01b", "b8375ff50b8a6f1a10dd809129a18df96888ac8b", "fcbf808bdf140442cddf0710defb2766c2d25c30", "fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192", "c588c89a72f89eed29d42f34bfa5d4cffa530732", "e6f20e7431172c68f7fce0d4595100445a06c117", "a92b5234b8b73e06709dd48ec5f0ec357c1aabed", "35308a3fd49d4f33bdbd35fefee39e39fe6b30b7", "78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7", "5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934", "c244c3c797574048d6931b6714ebac64d820dbb3", "1efacaa0eaa7e16146c34cd20814d1411b35538e", "eacba5e8fbafb1302866c0860fc260a2bdfff232", "4f0b8f730273e9f11b2bfad2415485414b96299f", "d02b32b012ffba2baeb80dca78e7857aaeececb0", "f557df59cd088ffb8e27506d8612d062407e96f4", "6e97a99b2879634ecae962ddb8af7c1a0a653a82", "7792fbc59f3eafc709323cdb63852c5d3a4b23e9", "c2dc29e0db76122dfed075c3b9ee48503b027809", "118ca3b2e7c08094e2a50137b1548ada7935e505", "a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10", "a6db73f10084ce6a4186363ea9d7475a9a658a11", "d83ae5926b05894fcda0bc89bdc621e4f21272da", "cb8a1b8d87a3fef15635eb4a32173f9c6f966055", "17045163860fc7c38a0f7d575f3e44aaa5fa40d7", "faca1c97ac2df9d972c0766a296efcf101aaf969", "ae8d5be3caea59a21221f02ef04d49a86cb80191", "468c8f09d2ad8b558b65d11ec5ad49208c4da2f2", "8bf647fed40bdc9e35560021636dfb892a46720e", "26575ad9e75efb440a7dc4ef8e548eed4e19dbd1", "12ccfc188de0b40c84d6a427999239c6a379cd66", "57eeaceb14a01a2560d0b90d38205e512dcca691", "94806f0967931d376d1729c29702f3d3bb70167c", "ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98", "533d14e539ae5cdca0ece392487a2b19106d468a", "34c8de02a5064e27760d33b861b7e47161592e65", "24ff832171cb774087a614152c21f54589bf7523", "62dccab9ab715f33761a5315746ed02e48eed2a0", "fffa2943808509fdbd2fc817cc5366752e57664a", "506ea19145838a035e7dba535519fb40a3a0018c", "677585ccf8619ec2330b7f2d2b589a37146ffad7", "508702ed2bf7d1b0655ea7857dd8e52d6537e765", "6d4e3616d0b27957c4107ae877dc0dd4504b69ab", "f38813f1c9dac44dcb992ebe51c5ede66fd0f491", "34bb11bad04c13efd575224a5b4e58b9249370f3", "5615d6045301ecbc5be35e46cab711f676aadf3a", "9d58e8ab656772d2c8a99a9fb876d5611fe2fe20", "90b7619eabe94731722ae884d0802256462457dc", "1056347fc5e8cd86c875a2747b5f84fd570ba232", "a100595c66f84c3ddd3da8d362a53f7a82f6e3eb", "3c56acaa819f4e2263638b67cea1ec37a226691d", "90d9209d5dd679b159051a8315423a7f796d704d", "703c9c8f20860a1b1be63e6df1622b2021b003ca", "c83e26622b275fdf878135e71c23325a31d0e5fc", "6de18708218988b0558f6c2f27050bb4659155e4", "3fb98e76ffd8ba79e1c22eda4d640da0c037e98a", "1eb4ea011a3122dc7ef3447e10c1dad5b69b0642", "62fddae74c553ac9e34f511a2957b1614eb4f937", "2288696b6558b7397bdebe3aed77bedec7b9c0a9", "80135ed7e34ac1dcc7f858f880edc699a920bf53", "a7267bc781a4e3e79213bb9c4925dd551ea1f5c4", "104ee18b513b52386f871e959c1f9e5072604e93", "c4b58ceafdf4cf55586b036b9eb4d6d3d9ecd9c4", "41aa209e9d294d370357434f310d49b2b0baebeb", "fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1", "30b74e60ec11c0ebc4e640637d56d85872dd17ce", "54969bcd728b0f2d3285866c86ef0b4797c2a74d", "2f7fc778e3dec2300b4081ba2a1e52f669094fcd", "769461ff717d987482b28b32b1e2a6e46570e3ff", "057d5f66a873ec80f8ae2603f937b671030035e6", "2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4", "881066ec43bcf7476479a4146568414e419da804", "6343bc0013343b6a5f96154f02d18dcd36a3f74c", "db1f48a7e11174d4a724a4edb3a0f1571d649670", "55c68c1237166679d2cb65f266f496d1ecd4bec6", "292c6b743ff50757b8230395c4a001f210283a34", "f0f4f16d5b5f9efe304369120651fa688a03d495", "baafe3253702955c6904f0b233e661b47aa067e1", "78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e", "9989eda2f5392cfe1f789bb0f6213a46d92d1302", "cfc4aa456d9da1a6fabd7c6ca199332f03e35b29", "1667a77db764e03a87a3fd167d88b060ef47bb56", "8895d6ae9f095a8413f663cc83f5b7634b3dc805", "3daafe6389d877fe15d8823cdf5ac15fd919676f", "c0f67e850176bb778b6c048d81c3d7e4d8c41003", "88f7a3d6f0521803ca59fde45601e94c3a34a403", "516d0d9eb08825809e4618ca73a0697137ebabd5", "3328413ee9944de1cc7c9c1d1bf2fece79718ba1", "6261eb75066f779e75b02209fbd3d0f02d3e1e45", "24eeb748a5e431510381ec7c8253bcb70eff8526", "14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b", "be4a20113bc204019ea79c6557a0bece23da1121", "6318d3842b36362bb45527b717e1a45ae46151d5", "2cdde47c27a8ecd391cbb6b2dea64b73282c7491", "6cbde27d9a287ae926979dbb18dfef61cf49860e", "a6270914cf5f60627a1332bcc3f5951c9eea3be0", "e52f73c77c7eaece6f2d8fdd0f15327f9f007261", "1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6", "217aa3aa0b3d9f6f394b5d26f03418187d775596", "05c5134125a333855e8d25500bf97a31496c9b3f", "a93781e6db8c03668f277676d901905ef44ae49f", "93f37c69dd92c4e038710cdeef302c261d3a4f92", "cec8936d97dea2fcf04f175d3facaaeb65e574bf", "0b2966101fa617b90510e145ed52226e79351072", "bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17", "cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66", "86f191616423efab8c0d352d986126a964983219", "1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8", "089513ca240c6d672c79a46fa94a92cde28bd567", "378418fdd28f9022b02857ef7dbab6b0b9a02dbe", "263ed62f94ea615c747c00ebbb4008385285b33b", "0be49fc1e0c9a6a50e449015945dd1cf92ccd07e", "414715421e01e8c8b5743c5330e6d2553a08c16d", "d961617db4e95382ba869a7603006edc4d66ac3b", "9652f154f4ae7807bdaff32d3222cc0c485a6762", "e793f8644c94b81b7a0f89395937a7f8ad428a89", "549c719c4429812dff4d02753d2db11dd490b2ae", "63a2e2155193dc2da9764ae7380cdbd044ff2b94", "6fea198a41d2f6f73e47f056692f365c8e6b04ce", "2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87", "6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f", "9931c6b050e723f5b2a189dd38c81322ac0511de", "66886f5af67b22d14177119520bd9c9f39cdd2e6", "3dce3bb30f0c19121a71e0bfe1d418f855cb13ce", "0290523cabea481e3e147b84dcaab1ef7a914612", "0cccf576050f493c8b8fec9ee0238277c0cfd69a", "374a0df2aa63b26737ee89b6c7df01e59b4d8531", "958c599a6f01678513849637bec5dc5dba592394", "060034b59275c13746413ca9c67d6304cba50da6", "7b43326477795a772c08aee750d3e433f00f20be", "d1dfdc107fa5f2c4820570e369cda10ab1661b87", "30b103d59f8460d80bb9eac0aa09aaa56c98494f", "106092fafb53e36077eba88f06feecd07b9e78e7", "782a05fbe30269ff8ab427109f5c4d0a577e5284", "9b6d0b3fbf7d07a7bb0d86290f97058aa6153179", "1aef6f7d2e3565f29125a4871cd60c4d86c48361", "a5a44a32a91474f00a3cda671a802e87c899fbb4", "19841b721bfe31899e238982a22257287b9be66a", "ebf204e0a3e137b6c24e271b0d55fa49a6c52b41", "2c3430e0cbe6c8d7be3316a88a5c13a50e90021d", "2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a", "01c9dc5c677aaa980f92c4680229db482d5860db", "6c1227659878e867a01888eef472dd96b679adb6", "2e5b160892b70a1e846aa9dcdf132b8011937ec6", "edd7504be47ebc28b0d608502ca78c0aea6a65a2", "cad24ba99c7b6834faf6f5be820dd65f1a755b29", "17a995680482183f3463d2e01dd4c113ebb31608", "b5160e95192340c848370f5092602cad8a4050cd", "057b80e235b10799d03876ad25465208a4c64caf", "edf60d081ffdfa80243217a50a411ab5407c961d", "e0dedb6fc4d370f4399bf7d67e234dc44deb4333", "225fb9181545f8750061c7693661b62d715dc542", "5c7adde982efb24c3786fa2d1f65f40a64e2afbf", "9e5690cdb4dfa30d98dff653be459e1c270cde7f", "90cc2f08a6c2f0c41a9dd1786bae097f9292105e", "afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3", "4f298d6d0c8870acdbf94fe473ebf6814681bd1f", "c37a971f7a57f7345fdc479fa329d9b425ee02be", "c3f76a9ebe53825e14f851120cca4e1fc29807de", "ba816806adad2030e1939450226c8647105e101c", "162403e189d1b8463952fa4f18a291241275c354", "0a82860d11fcbf12628724333f1e7ada8f3cd255", "ea46951b070f37ad95ea4ed08c7c2a71be2daedc", "6d5125c9407c7762620eeea7570af1a8ee7d76f3", "46c87fded035c97f35bb991fdec45634d15f9df2", "33402ee078a61c7d019b1543bb11cc127c2462d2", "359e8703fd6ca8172a645c5b5a45b1d2b30b1d14", "2227f978f084ebb18cb594c0cfaf124b0df6bf95", "c36f3cabeddce0263c944e9fe4afd510b5bae816", "398e0771e64cab6ca5d21754e32dce63f9e3c223", "04b851f25d6d49e61a528606953e11cfac7df2b2", "5dd473a4a9c6337b083edf38b6ddf5a6aece8908", "695426275dee2ec56bc0c0afe1c5b4227a350840", "2063222c5ce0dd233fa3056ddc245fca26bd5cf2", "0dfa460a35f7cab4705726b6367557b9f7842c65", "bdd203bcd3c41c336c5635fb026a78279d75b4be", "7142ac9e4d5498037aeb0f459f278fd28dae8048", "5f758a29dae102511576c0a5c6beda264060a401", "098fa9b4c3f7fb41c7a178d36f5dbb50a3ffa377", "60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09", "6a3a07deadcaaab42a0689fbe5879b5dfc3ede52", "aef58a54d458ab76f62c9b6de61af4f475e0f616", "69a41c98f6b71764913145dbc2bb4643c9bc4b0a", "0e36ada8cb9c91f07c9dcaf196d036564e117536", "eb100638ed73b82e1cce8475bb8e180cb22a09a2", "e0ab926cd48a47a8c7b16e27583421141f71f6df", "29f0a868644462aa7ebc21f4510d4209932a1b8c", "3c09fb7fe1886072670e0c4dd632d052102a3733", "9730b9cd998c0a549601c554221a596deda8af5b", "a301ddc419cbd900b301a95b1d9e4bb770afc6a3", "2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2", "03e1480f1de2ffbd85655d68aae63a01685c5862", "1ffb39ed4d684a80652dfa30d604b82b4c542615", "83d41f6548bb76241737dcd3fed9e182ee901ff9", "ae5f32e489c4d52e7311b66060c7381d932f4193", "a3f78cc944ac189632f25925ba807a0e0678c4d5", "a2359c0f81a7eb032cff1fe45e3b80007facaa2a", "2e68190ebda2db8fb690e378fa213319ca915cf8", "050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371", "5141cf2e59fb2ec9bb489b9c1832447d3cd93110", "7e467e686f9468b826133275484e0a1ec0f5bde6", "8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8", "911bef7465665d8b194b6b0370b2b2389dfda1a1", "b4d7ca26deb83cec1922a6964c1193e8dd7270e7", "645de797f936cb19c1b8dba3b862543645510544", "40dd2b9aace337467c6e1e269d0cb813442313d7", "7ee53d931668fbed1021839db4210a06e4f33190", "60d4cef56efd2f5452362d4d9ac1ae05afa970d1", "3bd50e33220af76ffc32a7e57688e248843b7f25", "fecccc79548001ecbd6cafd3067bcf14de80b11a", "34ec83c8ff214128e7a4a4763059eebac59268a6", "a1af7ec84472afba0451b431dfdb59be323e35b7", "d794ffece3533567d838f1bd7f442afee13148fd", "56a677c889e0e2c9f68ab8ca42a7e63acf986229", "0abf67e7bd470d9eb656ea2508beae13ca173198", "4205cb47ba4d3c0f21840633bcd49349d1dc02c1", "ebde9b9c714ed326157f41add8c781f826c1d864", "60462b981fda63c5f9d780528a37c46884fe0b54", "b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3", "c843f591658ca9dbb77944a89372a92006defe68", "6c5fbf156ef9fc782be0089309074cc52617b868", "085b5f9fd49432edab29e2c64f2a427fbce97f67", "d8f0bda19a345fac81a1d560d7db73f2b4868836", "251281d9cbd207038efbde0515f4077541967239", "2bb36c875754a2a8919f2f9b00a336c00006e453", "10e2f2ad1dedec6066e063cb2098b089b35905a8", "b191aa2c5b8ece06c221c3a4a0914e8157a16129", "3b60b047831146044d154156441c60f6edd80346", "0ff23392e1cb62a600d10bb462d7a1f171f579d0", "3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab", "ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff", "3f0e0739677eb53a9d16feafc2d9a881b9677b63", "eac1b644492c10546a50f3e125a1f790ec46365f", "06f39834e870278243dda826658319be2d5d8ded", "5dd496e58cfedfc11b4b43c4ffe44ac72493bf55", "1dabb080e3e968633f4b3774f19192f8378f5b67", "0229829e9a1eed5769a2b5eccddcaa7cd9460b92", "14b66748d7c8f3752dca23991254fca81b6ee86c", "451b6409565a5ad18ea49b063561a2645fa4281b", "19994e667d908bc0aacfb663ab0a2bb5ad16b221", "551fa37e8d6d03b89d195a5c00c74cc52ff1c67a", "a78b5495a4223b9784cc53670cc10b6f0beefd32", "c15b68986ecfa1e13e3791686ae9024f66983f14", "738c187d55745aac18d5fb5f6cc9e3568cd2d217", "96b1000031c53cd4c1c154013bb722ffd87fa7da", "2588acc7a730d864f84d4e1a050070ff873b03d5", "a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa", "056d5d942084428e97c374bb188efc386791e36d", "98fd92d68a143a5ced4a016fa3b7addd6b4a0122", "4fa0d73b8ba114578744c2ebaf610d2ca9694f45", "896f4d87257abd0f628c1ffbbfdac38c86a56f50", "0247998a1c045e601dc4d65c53282b5e655be62b", "06f8aa1f436a33014e9883153b93581eea8c5c70", "b3afa234996f44852317af382b98f5f557cab25a", "03b03f5a301b2ff88ab3bb4969f54fd9a35c7271", "9b684e2e2bb43862f69b12c6be94db0e7a756187", "12408baf69419409d228d96c6f88b6bcde303505", "7711a7404f1f1ac3a0107203936e6332f50ac30c", "e1312b0b0fd660de87fa42de39316b28f9336e70", "26a72e9dd444d2861298d9df9df9f7d147186bcd", "7fc5b6130e9d474dfb49d9612b6aa0297d481c8e", "574ad7ef015995efb7338829a021776bf9daaa08", "d8c9bad8d07ae4196027dfb8343b9d9aefb130ff", "4fc936102e2b5247473ea2dd94c514e320375abb", "1bad8a9640cdbc4fe7de12685651f44c4cff35ce", "cfc30ce53bfc204b8764ebb764a029a8d0ad01f4", "68d08ed9470d973a54ef7806318d8894d87ba610", "67ae7ee9557cb486d5e1129b9b24466ffb8c4766", "9c686b318cb7774b6da5e2c712743a5a6cafa423", "0055c7f32fa6d4b1ad586d5211a7afb030ca08cc", "e00241f00fb31c660df6c6f129ca38370e6eadb3", "7fd6bb30ad5d7eb3078efbb85f94d2d60e701115", "07c83f544d0604e6bab5d741b0bf9a3621d133da", "90c4a6c6f790dbcef9a29c9a755458be09e319b6", "279459cbbc5c6db4802e9c737cc72a612d76f7fc", "1ebdfceebad642299e573a8995bc5ed1fad173e3", "465d5bb11912005f0a4f0569c6524981df18a7de", "eee2d2ac461f46734c8e674ae14ed87bbc8d45c6", "f41d7f891a1fc4569fe2df66e67f277a1adef229", "b9f2a755940353549e55690437eb7e13ea226bbf", "14070478b8f0d84e5597c3e67c30af91b5c3a917", "9888edfb6276887eb56a6da7fe561e508e72a517", "7ad77b6e727795a12fdacd1f328f4f904471233f", "2f1485994ef2c09a7bb2874eb8252be8fe710db1", "b68f55bab12ca50b033d8b5c773ce5fe88c5923d", "780557daaa39a445b24c41f637d5fc9b216a0621", "04b4c779b43b830220bf938223f685d1057368e9", "07ea3dd22d1ecc013b6649c9846d67f2bf697008", "b03446a2de01126e6a06eb5d526df277fa36099f", "611961abc4dfc02b67edd8124abb08c449f5280a", "20a0b23741824a17c577376fdd0cf40101af5880", "4e444db884b5272f3a41e4b68dc0d453d4ec1f4c", "beabb0d9d30871d517c5d915cf852f7f5293f52f", "6f84e61f33564e5188136474f9570b1652a0606f", "45e459462a80af03e1bb51a178648c10c4250925", "18b9dc55e5221e704f90eea85a81b41dab51f7da", "675b2caee111cb6aa7404b4d6aa371314bf0e647", "94ac3008bf6be6be6b0f5140a0bea738d4c75579", "ec6a2093059fd6eada9944212f64a659881abb95", "372a8bf0ef757c08551d41e40cb7a485527b6cd7", "89bc311df99ad0127383a9149d1684dfd8a5aa34", "025720574ef67672c44ba9e7065a83a5d6075c36", "5f6ab4543cc38f23d0339e3037a952df7bcf696b", "09903df21a38e069273b80e94c8c29324963a832", "07fa153b8e6196ee6ef6efd8b743de8485a07453", "7e27d946d23229220bcb6672aacab88e09516d39", "f0398ee5291b153b716411c146a17d4af9cb0edc", "beb2f1a6f3f781443580ffec9161d9ce6852bf48", "57c59011614c43f51a509e10717e47505c776389", "44c9b5c55ca27a4313daf3760a3f24a440ce17ad", "17027a05c1414c9a06a1c5046899abf382a1142d", "aa581b481d400982a7e2a88830a33ec42ad0414f", "94498fae459167841e8b2f4b911493fc3c7da22f", "3af130e2fd41143d5fc49503830bbd7bafd01f8b", "07de8371ad4901356145722aa29abaeafd0986b9", "997b9ffe2f752ba84a66730cfd320d040e7ba2e2", "5c4d4fd37e8c80ae95c00973531f34a6d810ea3a", "06262d6beeccf2784e4e36a995d5ee2ff73c8d11", "9c1664f69d0d832e05759e8f2f001774fad354d6", "7f511a6a2b38a26f077a5aec4baf5dffc981d881", "ce3f3088d0c0bf236638014a299a28e492069753", "2a41388040141ef6b016c100ef833a2a73ab8b42", "43c3b6a564b284382fdf8ae33f974f4e7a89600e", "009a18d04a5e3ec23f8ffcfc940402fd8ec9488f", "70109c670471db2e0ede3842cbb58ba6be804561", "4c822785c29ceaf67a0de9c699716c94fefbd37d", "c61eaf172820fcafaabf39005bd4536f0c45f995", "4097fef623185557bb1842501cfdc97f812fc66d", "d63bd06340dd35590a22222509e455c49165ee13", "7c2c9b083817f7a779d819afee383599d2e97ed8", "170a5f5da9ac9187f1c88f21a88d35db38b4111a", "a546fd229f99d7fe3cf634234e04bae920a2ec33", "cec70cf159b51a18b39c80fac1ad34f65f3691ef", "0a7309147d777c2f20f780a696efe743520aa2db", "c317181fa1de2260e956f05cd655642607520a4f", "22f94c43dd8b203f073f782d91e701108909690b", "a3f684930c5c45fcb56a2b407d26b63879120cbf", "c1fc70e0952f6a7587b84bf3366d2e57fc572fd7", "22dada4a7ba85625824489375184ba1c3f7f0c8f", "6e46d8aa63db3285417c8ebb65340b5045ca106f", "f6fc112ff7e4746b040c13f28700a9c47992045e", "06526c52a999fdb0a9fd76e84f9795a69480cecf", "7c61d21446679776f7bdc7afd13aedc96f9acac1", "e8d1b134d48eb0928bc999923a4e092537e106f6", "afa57e50570a6599508ee2d50a7b8ca6be04834a", "b3b532e8ea6304446b1623e83b0b9a96968f926c", "b7c6df1ae0e8348feecd65e9ad574d1e04d212a5", "5f7094ba898a248e1e6b37e3d9fb795e59131cdc", "7577a1ddf9195513a5c976887ad806d1386bb1e9", "b3200539538eca54a85223bf0ec4f3ed132d0493", "11f17191bf74c80ad0b16b9f404df6d03f7c8814", "7f445191fa0475ff0113577d95502a96dc702ef9", "d691440030394c2e00a2ab47aba4f8b5fca5f25a", "9abd35b37a49ee1295e8197aac59bde802a934f3", "d93baa5ecf3e1196b34494a79df0a1933fd2b4ec", "ebb9d53668205c5797045ba130df18842e3eadef", "95f1790da3d0a4a5310a050512ce355b3c5aac86", "4abd49538d04ea5c7e6d31701b57ea17bc349412", "b08203fca1af7b95fda8aa3d29dcacd182375385", "4e6c9be0b646d60390fe3f72ce5aeb0136222a10", "34c062e2b8a3f6421b9f4ff22f115a36d4aba823", "b9dc8cc479cacda1f23b91df00eb03f88cc0c260", "a77e9f0bd205a7733431a6d1028f09f57f9f73b0", "4bbbee93519a4254736167b31be69ee1e537f942", "7c30ea47f5ae1c5abd6981d409740544ed16ed16", "c02847a04a99a5a6e784ab580907278ee3c12653", "8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b", "de048065ea2c5b3e306e2c963533df055e7dfcaa", "ed388878151a3b841f95a62c42382e634d4ab82e", "1ed617d14dbc53b20287d3405b14c68d8dad3965", "749d605dd12a4af58de1fae6f5ef5e65eb06540e", "5c8ae37d532c7bb8d7f00dfde84df4ba63f46297", "ddd9d7cb809589b701fba9f326d7cf998a63b14f", "f27e5a13c1c424504b63a9084c50f491c1b17978", "703dc33736939f88625227e38367cfb2a65319fe", "0fc254272db096a9305c760164520ad9914f4c9e", "3cbd3124b1b4f95fcdf53abd358d7ceec7861dda", "de3285da34df0262a4548574c2383c51387a24bf", "3cc2d6ace4cf0bc3a6c4df5ca8da892275ca201f", "55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96", "d444e010049944c1b3438c9a25ae09b292b17371", "8f3675e979629ca9cee9436d37763f546edb8d40", "6440d6c7081efe4538a1c75e93144f3d142feb41", "1c30bb689a40a895bd089e55e0cad746e343d1e2", "a9506c60ec48056087ee3e10d28ff7774fbbd553", "ec576efd18203bcb8273539fa277839ec92232a1", "1b211f8221162ce7ef212956b637b50e30ad48f4", "0faeec0d1c51623a511adb779dabb1e721a6309b", "0bce54bfbd8119c73eb431559fc6ffbba741e6aa", "7306d42ca158d40436cc5167e651d7ebfa6b89c1", "1fe1bd6b760e3059fff73d53a57ce3a6079adea1", "53a41c711b40e7fe3dc2b12e0790933d9c99a6e0", "17035089959a14fe644ab1d3b160586c67327db2", "8fe38962c24300129391f6d7ac24d7783e0fddd0", "6b17b219bd1a718b5cd63427032d93c603fcf24f", "dc974c31201b6da32f48ef81ae5a9042512705fe", "b4d209845e1c67870ef50a7c37abaf3770563f3e", "480ccd25cb2a851745f5e6e95d33edb703efb49e", "2525f336af31178b836e27f8c60056e18f1455d2", "38c901a58244be9a2644d486f9a1284dc0edbf8a", "f1ae9f5338fcff577b1ae9becdb66007fe57bd45", "a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f", "3266fcd1886e8ad883714e38203e66c0c6487f7b", "861a51e66553979535df2b41971150453ab26372", "cf784156547c3be146706e2763c1a52d939d1722", "211c42a567e02987a6f89b89527de3bf4d2e9f90", "e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638", "2717998d89d34f45a1cca8b663b26d8bf10608a9", "ef7b8f73e95faa7a747e0b04363fced0a38d33b0", "c829be73584966e3162f7ccae72d9284a2ebf358", "ddbd24a73ba3d74028596f393bb07a6b87a469c0", "ad4d1ecf5c5473c050e11f6876ce148de1c8920a", "eb566490cd1aa9338831de8161c6659984e923fd", "ce11b2d7905d2955c4282db5b68482edb846f29f", "107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53", "a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2", "781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed", "5a07945293c6b032e465d64f2ec076b82e113fa6", "b2e5df82c55295912194ec73f0dca346f7c113f6", "3bcd72be6fbc1a11492df3d36f6d51696fd6bdad", "abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72", "bd26dabab576adb6af30484183c9c9c8379bf2e0", "6622776d1696e79223f999af51e3086ba075dbd1", "48d18b5f17672af694f0f5b5ec577516dbf697f4", "df7af280771a6c8302b75ed0a14ffe7854cca679", "424745b006491ae2caef924287e50fc6706c06ee", "e6ee36444038de5885473693fb206f49c1369138", "0178929595f505ef7655272cc2c339d7ed0b9507", "942f6eb2ec56809430c2243a71d03cc975d0a673", "38d56ddcea01ce99902dd75ad162213cbe4eaab7", "52b102620fff029b80b3193bec147fe6afd6f42e", "4698a599425c3a6bae1c698456029519f8f2befe", "a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b", "4cc326fc977cf967eef5f3135bf0c48d07b79e2d", "c3d874336eb8fae92ab335393fd801fa8df98412", "f2c568fe945e5743635c13fe5535af157b1903d1", "fcc6fe6007c322641796cb8792718641856a22a7", "d61e794ec22a4d4882181da17316438b5b24890f", "0e05b365af662bc6744106a7cdf5e77c9900e967", "969dd8bc1179c047523d257516ade5d831d701ad", "159caaa56c2291bedbd41d12af5546a7725c58d4", "cdf0dc4e06d56259f6c621741b1ada5c88963c6d", "23e824d1dfc33f3780dd18076284f07bd99f1c43", "e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111", "1a53ca294bbe5923c46a339955e8207907e9c8c6", "55bc7abcef8266d76667896bbc652d081d00f797", "03f14159718cb495ca50786f278f8518c0d8c8c9", "1750db78b7394b8fb6f6f949d68f7c24d28d934f", "72119cb98f9502ec639de317dccea57fd4b9ee55", "cb4d8cef8cec9406b1121180d47c14dfef373882", "a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf", "7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a", "ac21c8aceea6b9495574f8f9d916e571e2fc497f", "a5f200d52b588030c76dcc38c504f65d772a1f5e", "559795d3f3b096ceddc03720ba62d79d50eae300", "e1179a5746b4bf12e1c8a033192326bf7f670a4d", "c0270a57ad78da6c3982a4034ffa195b9e932fda", "28a45770faf256f294ce3bbd5de25c6d5700976e", "b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23", "e66b4aa85524f493dafde8c75176ac0afad5b79c", "a2fbaa0b849ecc74f34ebb36d1442d63212b29d2", "23aba7b878544004b5dfa64f649697d9f082b0cf", "fb6f5cb26395608a3cf0e9c6c618293a4278a8ad", "a75de488eaacb1dafffbe667465390f101498aaf", "e6d46d923f201da644ae8d8bd04721dd9ac0e73d", "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "965f8bb9a467ce9538dec6bef57438964976d6d9", "45edb29fb7eed5a52040300e1fd3cd53f1bdb429", "272e487dfa32f241b622ac625f42eae783b7d9aa", "ca3e88d87e1344d076c964ea89d91a75c417f5ee", "55c40cbcf49a0225e72d911d762c27bb1c2d14aa", "af278274e4bda66f38fd296cfa5c07804fbc26ee", "0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1", "c65cfc9d3568c586faf18611c4124f6b7c0c1a13", "81af86e3d343a40ce06a3927b6aa8c8853f6811a", "6e173ad91b288418c290aa8891193873933423b3", "d4885ca24189b4414031ca048a8b7eb2c9ac646c", "8a6033cbba8598945bfadd2dd04023c2a9f31681", "c26b43c2e1e2da96e7caabd46e1d7314acac0992", "289cfcd081c4393c7d6f63510747b5372202f855", "0b5a82f8c0ee3640503ba24ef73e672d93aeebbf", "120785f9b4952734818245cc305148676563a99b", "8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259", "29156e4fe317b61cdcc87b0226e6f09e416909e0", "9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb", "dbed26cc6d818b3679e46677abc9fa8e04e8c6a6", "48734cb558b271d5809286447ff105fd2e9a6850", "7789a5d87884f8bafec8a82085292e87d4e2866f", "78f2c8671d1a79c08c80ac857e89315197418472", "5b97e997b9b654373bd129b3baf5b82c2def13d1", "758d7e1be64cc668c59ef33ba8882c8597406e53", "855184c789bca7a56bb223089516d1358823db0b", "d11d0151618987ce00a88ceda55d35f0bb89122e", "e8c051d9e7eb8891b23cde6cbfad203011318a4f", "9db4b25df549555f9ffd05962b5adf2fd9c86543", "926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0", "e3657ab4129a7570230ff25ae7fbaccb4ba9950c", "8862a573a42bbaedd392e9e634c1ccbfd177a01d", "bebea83479a8e1988a7da32584e37bfc463d32d4", "2a3e19d7c54cba3805115497c69069dd5a91da65", "254964096e523d5e48e03390ce440c9af337d200", "1885acea0d24e7b953485f78ec57b2f04e946eaf", "87ee56feefdb39938cda7f872e784d9d986713af", "c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8", "aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8", "7e18b5f5b678aebc8df6246716bf63ea5d8d714e", "67386772c289cd40db343bdc4cb8cb4f58271df2", "bba281fe9c309afe4e5cc7d61d7cff1413b29558", "203009d3608bdc31ffc3991a0310b9e98b630c4d", "28e1668d7b61ce21bf306009a62b06593f1819e3", "2eca099b90274fb28569f19ef945f43758f5b367", "5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e", "07e639abf1621ceff27c9e3f548fadfa2052c912", "e0ed0e2d189ff73701ec72e167d44df4eb6e864d", "9820920d4544173e97228cb4ab8b71ecf4548475", "38345264a9ca188c4facffe6e18a7e6865fb2966", "c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e", "bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103", "93747de3d40376761d1ef83ffa72ec38cd385833", "f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464", "fab2fc6882872746498b362825184c0fb7d810e4", "b7740dba37a3cbd5c832a8deb9a710a28966486a", "7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922", "7d81b804e23ee2bd04c1def6201b91be6de0d88a", "28b26597a7237f9ea6a9255cde4e17ee18122904", "dd6826e9520a6e72bcd24d1bdb930e78c1083b31", "2322ec2f3571e0ddc593c4e2237a6a794c61251d", "3cd5da596060819e2b156e8b3a28331ef633036b", "c0d1d9a585ef961f1c8e6a1e922822811181615c", "2ed4973984b254be5cba3129371506275fe8a8eb", "75308067ddd3c53721430d7984295838c81d4106", "b8caf1b1bc3d7a26a91574b493c502d2128791f6", "b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4", "f412d9d7bc7534e7daafa43f8f5eab811e7e4148", "169076ffe5e7a2310e98087ef7da25aceb12b62d", "e30dc2abac4ecc48aa51863858f6f60c7afdf82a", "7767059c935fb773d5e6f559b9eca6e72caa456d", "12cb3bf6abf63d190f849880b1703ccc183692fe", "0450dacc43171c6e623d0d5078600dd570de777e", "fbc53ab5697ee6f4f270153dbdee2d93cfda7b5f", "892c911ca68f5b4bad59cde7eeb6c738ec6c4586", "85fd2bda5eb3afe68a5a78c30297064aec1361f6", "77b1db2281292372c38926cc4aca32ef056011dc", "faf5583063682e70dedc4466ac0f74eeb63169e7", "3b38c06caf54f301847db0dd622a6622c3843957", "c472436764a30278337aca9681eee456bee95c34", "5bc0a89f4f73523967050374ed34d7bc89e4d9e1", "18d51a366ce2b2068e061721f43cb798177b4bb7", "732686d799d760ccca8ad47b49a8308b1ab381fb", "66f4d7c381bd1798703977de2e38b696c6641b77", "372fb32569ced35eaf3740a29890bec2be1869fa", "1dacc2f4890431d867a038fd81c111d639cf4d7e", "688754568623f62032820546ae3b9ca458ed0870", "44fbbaea6271e47ace47c27701ed05e15da8f7cf", "d83d2fb5403c823287f5889b44c1971f049a1c93", "43af016138d541c95e9d1880413e05356fa9a323", "34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9", "d24dafe10ec43ac8fb98715b0e0bd8e479985260", "f66f3d1e6e33cb9e9b3315d3374cd5f121144213", "57a14a65e8ae15176c9afae874854e8b0f23dca7", "f4411787688ca40466ee059ec64bf56d746733c1", "b85d953de16eecaecccaa8fad4081bd6abda9b1b", "4e4d034caa72dce6fca115e77c74ace826884c66", "1159ff04fd17c59515199e0fc2d5e01e72818b59", "c8fb8994190c1aa03c5c54c0af64c2c5c99139b4", "4ecfd4273b5418fd0f3121eaefda0a4c48f6aaf0", "0da4c3d898ca2fff9e549d18f513f4898e960aca", "e0244a8356b57a5721c101ead351924bcfb2eef4", "492116d16a39eb54454c7ffb1754cea27ad3a171", "968b983fa9967ff82e0798a5967920188a3590a8", "49659fb64b1d47fdd569e41a8a6da6aa76612903", "081286ede247c5789081502a700b378b6223f94b", "c06b13d0ec3f5c43e2782cd22542588e233733c3", "6a184f111d26787703f05ce1507eef5705fdda83", "a9fdbe102f266cc20e600fa6b060a7bc8d1134e9", "b32cf547a764a4efa475e9c99a72a5db36eeced6", "16892074764386b74b6040fe8d6946b67a246a0b", "d7fe2a52d0ad915b78330340a8111e0b5a66513a", "e180572400b64860e190a8bc04ef839fa491e056", "e7cac91da51b78eb4a28e194d3f599f95742e2a2", "cadab913f699adceebbd0f0abacb19d5f1deda84", "14e8dbc0db89ef722c3c198ae19bde58138e88bf", "b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c", "814d091c973ff6033a83d4e44ab3b6a88cc1cb66", "2dfe0e7e81f65716b09c590652a4dd8452c10294", "ee7e8aec3ebb37e41092e1285e4f81916ce92c18", "18c72175ddbb7d5956d180b65a96005c100f6014", "f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6", "68d4056765c27fbcac233794857b7f5b8a6a82bf", "35e87e06cf19908855a16ede8c79a0d3d7687b5c", "096eb8b4b977aaf274c271058feff14c99d46af3", "7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d", "0742d051caebf8a5d452c03c5d55dfb02f84baab", "f88ce52c5042f9f200405f58dbe94b4e82cf0d34", "66ec085c362f698b40d6e0e7b10629462280c062", "656aeb92e4f0e280576cbac57d4abbfe6f9439ea", "f2e9494d0dca9fb6b274107032781d435a508de6", "94e259345e82fa3015a381d6e91ec6cded3971b4", "2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76", "86881ce8f80adea201304ca6bb3aa413d94e9dd0", "bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b", "bd6099429bb7bf248b1fd6a1739e744512660d55", "edbb8cce0b813d3291cae4088914ad3199736aa0", "b749ca71c60904d7dad6fc8fa142bf81f6e56a62", "e3917d6935586b90baae18d938295e5b089b5c62", "29659b6fc4dceb117cec687d8accda5f514080ed", "869583b700ecf33a9987447aee9444abfe23f343", "0394040749195937e535af4dda134206aa830258", "adf31283550ff810540bad0edd2c8878ac252b20", "239e305c24155add73f2a0ba5ccbd66b37f77e14", "0a4a8768c1ed419baebe1c420bd9051760875cbe", "dbe0e533d715f8543bcf197f3b8e5cffa969dfc0", "794a51097385648e3909a1acae7188f5ab881710", "7735f63e5790006cb3d989c8c19910e40200abfc", "50e47857b11bfd3d420f6eafb155199f4b41f6d7", "d671a210990f67eba9b2d3dda8c2cb91575b4a7a", "f6fa97fbfa07691bc9ff28caf93d0998a767a5c1", "9306f61c7c3bdcdcb257cd437ca59df8e599e326", "bc871497626afb469d25c4975aa657159269aefe", "0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6", "d41bcb0c79f46aca47b9f9b8a779ce80a2a351f9", "a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4", "f0ca31fd5cad07e84b47d50dc07db9fc53482a46", "419a6fca4c8d73a1e43003edc3f6b610174c41d2", "ffcbedb92e76fbab083bb2c57d846a2a96b5ae30", "e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd", "be02c2ea2b54d8fa30e2528f91a801ecf9f2185c", "32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b", "6da3ff4250103369f4a6a39c8fb982438a97525c", "50ee027c63dcc5ab5cd0a6cdffb1994f83916a46", "dee39ab960882e70a87501118dfb61cf7a0cd017", "01c09acf0c046296643de4c8b55a9330e9c8a419", "b88ceded6467e9b286f048bb1b17be5998a077bd", "eef432868e85b95a7d9d9c7b8c461637052318ca", "ecdd83002f69c2ccc644d07abb44dd939542d89d", "f842b13bd494be1bbc1161dc6df244340b28a47f", "ea80a050d20c0e24e0625a92e5c03e5c8db3e786", "adaf2b138094981edd615dbfc4b7787693dbc396", "803803b5c2c61046d63674f85ecf0123f9d2c4b8", "158aa18c724107587bcc4137252d0ba10debf417", "6411c72a2da7180538baf316bac54748fdf2243c", "05e96d76ed4a044d8e54ef44dac004f796572f1a", "804b4c1b553d9d7bae70d55bf8767c603c1a09e3", "1e0d92b9b4011822825d1f7dc0eba6d83504d45d", "346dbc7484a1d930e7cc44276c29d134ad76dc3f", "1ec73ee49e422b4509c016ce244822144c849089", "de8657e9eab0296ac062c60a6e10339ccf173ec1", "a5d4cc596446517dfaa4d92276a12d5e1c0a284c", "d34f546e61eccbac2450ca7490f558e751e13ec3", "8536fd81b568b2c9e567adad83be3a048664ade6", "8c81705e5e4a1e2068a5bd518adc6955d49ae434", "0952ac6ce94c98049d518d29c18d136b1f04b0c0", "960ad662c2bb454d69006492cc3f52d1550de55d", "ad08426ca57da2be0e9f8c1f673e491582edb896", "0f29bc5d8458358d74dc8c4fd6968b4182dd71d2", "5e19d7307ea67799eb830d5ce971f893e2b8a9ca", "28d55935cc36df297fe21b98b4e2b07b5720612e", "11cc0774365b0cc0d3fa1313bef3d32c345507b1", "68bf34e383092eb827dd6a61e9b362fcba36a83a", "b2749caec0094e186d3ee850151c899b8508f47a", "15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4", "80840df0802399838fe5725cce829e1b417d7a2e", "9bac481dc4171aa2d847feac546c9f7299cc5aa0", "810f5606a4769fc3dd99611acf805596fb79223d", "2f3ec6d666d7b94b63a104f92859199428b77f78", "57f5711ca7ee5c7110b7d6d12c611d27af37875f", "e0765de5cabe7e287582532456d7f4815acd74c1", "ee46e391288dd3bc3e71cb47715a83dacb9d2907", "7c9622ad1d8971cd74cc9e838753911fe27ccac4", "3983637022992a329f1d721bed246ae76bc934f7", "a090d61bfb2c3f380c01c0774ea17929998e0c96", "2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd", "6eaeac9ae2a1697fa0aa8e394edc64f32762f578", "e4df98e4b45a598661a47a0a8900065716dafd6d", "b446cf353744a4b640af88d1848a1b958169c9f2", "bca39960ba46dc3193defe0b286ee0bea4424041", "0969e0dc05fca21ff572ada75cb4b703c8212e80", "21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13", "988849863c3a45bcedacf8bd5beae3cc9210ce28", "5c526ee00ec0e80ba9678fee5134dae3f497ff08", "01379c50c392c104694ccb871a4b6a36d514f102", "02f4b900deabbe7efa474f2815dc122a4ddb5b76", "60bffecd79193d05742e5ab8550a5f89accd8488", "61329bc767152f01aa502989abc854b53047e52c", "332d773b70f2f6fb725d49f314f57b8f8349a067", "4866a5d6d7a40a26f038fc743e16345c064e9842", "b3050dc48600acf2f75edf1f580a1f9e9cb3c14a", "da7bbfa905d88834f8929cb69f41a1b683639f4b", "0d06b3a4132d8a2effed115a89617e0a702c957a", "13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd", "c007ee91452b6c99c351b149cb8673f945bf0dd4", "050e51268b0fb03033428ac777ccfef2db752ab3", "888581e88c1cbfb8e905c317c6944b6ac2d4557c", "fbc591cde7fb7beb985437a22466f9cf4b16f8b1", "19666b9eefcbf764df7c1f5b6938031bcf777191", "641f34deb3bdd123c6b6e7b917519c3e56010cb7", "9c7444c6949427994b430787a153d5cceff46d5c", "a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba", "fcd945eb1cf5f87eefa444660dbdf94f5bb0092e", "0abfb5b89e9546f8a5c569ab35b39b888e7cea46", "108b2581e07c6b7ca235717c749d45a1fa15bb24", "22e121a8dea49e3042de305574356477ecacadda", "2f5b51af8053cf82ab52bbfd46b56999222ec21c", "016cbf0878db5c40566c1fbc237686fbad666a33", "e83e5960c2aabab654e1545eb419ef64c25800d5", "34c594abba9bb7e5813cfae830e2c4db78cf138c", "5dcf78de4d3d867d0fd4a3105f0defae2234b9cb", "bef926d63512dbffcf1af59f72295ef497f5acf9", "2ff6f7e489ae8ff054422444a5e0604e30f3e97b", "605f6817018a572797095b83bec7fae7195b2abc", "328da943e22adef5957c08b6909bda09d931a350", "262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19", "841a5de1d71a0b51957d9be9d9bebed33fb5d9fa", "d9739d1b4478b0bf379fe755b3ce5abd8c668f89", "ca902aeec4fa54d32a4fed9ba89a7fb2f7131734", "bfb98423941e51e3cd067cb085ebfa3087f3bfbe", "9dcfa771a7e87d7681348dd9f6cf9803699b16ce", "4042bbb4e74e0934f4afbedbe92dd3e37336b2f4", "71bece8ec4934e3034f76d8ba19199c5b8ec52ea", "d4df31006798ee091b86e091a7bf5dce6e51ba3e", "00d0b01d6a5f12216e078001b7c49225d2495b21", "fbb6ee4f736519f7231830a8e337b263e91f06fe", "4dbfbe5fd96c9efc8c3c2fd54406b62979482678", "9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f", "29d414bfde0dfb1478b2bdf67617597dd2d57fc6", "470dbd3238b857f349ebf0efab0d2d6e9779073a", "518edcd112991a1717856841c1a03dd94a250090", "29e96ec163cb12cd5bd33bdf3d32181c136abaf9", "bf961e4a57a8f7e9d792e6c2513ee1fb293658e9", "48fea82b247641c79e1994f4ac24cad6b6275972", "5167e16b53283be5587659ea8eaa3b8ef3fddd33", "c1a70d63d1667abfb1f6267f3564110d55c79c0d", "69063f7e0a60ad6ce16a877bc8f11b59e5f7348e", "352d61eb66b053ae5689bd194840fd5d33f0e9c0", "1677d29a108a1c0f27a6a630e74856e7bddcb70d", "4cf3419dbf83a76ccac11828ca57b46bbbe54e0a", "231a6d2ee1cc76f7e0c5912a530912f766e0b459", "a3775b3a0e78b890d9ca79b0aabd982551474a88", "e6e5a6090016810fb902b51d5baa2469ae28b8a1", "a8e7561ada380f2f50211c67fc45c3b3dea96bdb", "009bf86913f1c366d9391bf236867d84d12fa20c", "e5ea7295b89ef679e74919bf957f58d55ad49489", "1198572784788a6d2c44c149886d4e42858d49e4", "28f5138d63e4acafca49a94ae1dc44f7e9d84827", "13841d54c55bd74964d877b4b517fa94650d9b65", "97c1f68fb7162af326cd0f1bc546908218ec5da6", "78c1ad33772237bf138084220d1ffab800e1200d", "3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001", "5f0d4657eab4152a1785ee0a25b5b499cd1163ec", "5ebb247963d2d898d420f1f4a2486102a9d05aa9", "22dbdace88c8f4bda2843ed421e3708ec0744237", "69d29012d17cdf0a2e59546ccbbe46fa49afcd68", "7c25213a7fa5fe13199d3112613ea0b9045320d1", "bdf5434648356ce22bdbf81d2951e4bb00228e4d", "74b0095944c6e29837c208307a67116ebe1231c8", "8dffbb6d75877d7d9b4dcde7665888b5675deee1", "77be118034a700e5b7d9633f50f6fbb7fabec8ef", "0f22b89341d162a7a0ebaa3c622d9731e5551064", "817321d4008bf95e9be00cf6cb1554a1aed40027", "47f5f740e225281c02c8a2ae809be201458a854f", "74cec83ee694b5d0e07d5d0bacd0aa48a80776aa", "6a16b91b2db0a3164f62bfd956530a4206b23fea", "06fb92e110d077c27d401d2f9483964cd0615284", "ede16b198b83d04b52dc3f0dafc11fd82c5abac4", "1439bf9ba7ff97df9a2da6dae4784e68794da184", "ff9e042cccbed7e350a25b7d806cd17fb79dfdf9", "690d669115ad6fabd53e0562de95e35f1078dfbb", "92292fffc36336d63f4f77d6b8fc23b0c54090e9", "3d2c89676fcc9d64aaed38718146055152d22b39", "b9c9c7ef82f31614c4b9226e92ab45de4394c5f6", "4188bd3ef976ea0dec24a2512b44d7673fd4ad26", "a216f7863fc6ab15e2bb7a538dfe00924e1da0ab", "a50b4d404576695be7cd4194a064f0602806f3c4", "c138c76809b8da9e5822fb0ae38457e5d75287e0", "439ac8edfa1e7cbc65474cab544a5b8c4c65d5db", "656ef752b363a24f84cc1aeba91e4fa3d5dd66ba", "9ba358281f2946cba12fff266019193a2b059590", "7477cf04c6b086108f459f693a60272523c134db", "7ad1638f7d76c7e885bc84cd694c60f109f02159", "18855be5e7a60269c0652e9567484ce5b9617caa", "5f9dc3919fb088eb84accb1e490921a134232466", "3bdaf59665e6effe323a1b61308bcac2da4c1b73", "f86c6942a7e187c41dd0714531efd2be828e18ad", "2f8183b549ec51b67f7dad717f0db6bf342c9d02", "1a862270ad9168e3bf5471bda2793c32d4043aa4", "69a77cb816a31c65699cd11c4a3b1b82ae44e903", "6cd96f2b63c6b6f33f15c0ea366e6003f512a951", "40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a", "01d2cf5398c2b3e0f4fc8e8318a4492c95a0b242", "75e5ba7621935b57b2be7bf4a10cad66a9c445b9", "c5437496932dcb9d33519a120821da755951e1a9", "003ba2001bd2614d309d6ec15e9e2cbe86db03a1", "1da1299088a6bf28167c58bbd46ca247de41eb3c", "0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261", "e40df008fd0e5fd169840bf7d72a951411d13c59", "4ca9753ab023accbfa75a547a65344ee17b549ba", "48dcf45a1e38adbb9826594f7ffaa5e95ef78395", "1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9", "a317083d9aac4062e77aa0854513383c87e47ece", "3f4798c7701da044bdb7feb61ebdbd1d53df5cfe", "2fd007088a75916d0bf50c493d94f950bf55c5e6", "41c56c69b20b3f0b6c8a625009fc0a4d317e047a", "8f992ed6686710164005c20ab16cef6c6ad8d0ea", "760a712f570f7a618d9385c0cee7e4d0d6a78ed2", "2b3ceb40dced78a824cf67054959e250aeaa573b", "6e94c579097922f4bc659dd5d6c6238a428c4d22", "11fdff97f4511ae3d3691cfdeec5a19fa04db6ef", "85f27ec70474fe93f32864dd03c1d0f321979100", "1d97735bb0f0434dde552a96e1844b064af08f62", "3bfb9ba4b74b2b952868f590ff2f164de0c7d402", "f9752fd07b14505d0438bc3e14b23d7f0fe7f48b", "bafb8812817db7445fe0e1362410a372578ec1fc", "4909ed22b1310f1c6f2005be5ce3349e3259ff6a", "f510071fd7fdc6926e3958ebb85518bcfea17f89", "b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24", "503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99", "3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94", "50e45e9c55c9e79aaae43aff7d9e2f079a2d787b", "c7b58827b2d07ece676271ae0425e369e3bd2310", "43e268c118ac25f1f0e984b57bc54f0119ded520", "0b878d553f359b38753c6ea27d7acf500a90da15", "1fff309330f85146134e49e0022ac61ac60506a9", "519f4eb5fe15a25a46f1a49e2632b12a3b18c94d", "ad9ba7eade9d4299159512d6d5d07d7d3d26ae58", "449808b7aa9ee6b13ad1a21d9f058efaa400639a", "ae89e464576209b1082da38e0cee7aeabd03d932", "b41374f4f31906cf1a73c7adda6c50a78b4eb498", "1bd50926079e68a6e32dc4412e9d5abe331daefb", "9eb13f8e8d948146bfbae1260e505ba209c7fdc1", "e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e", "77037a22c9b8169930d74d2ce6f50f1a999c1221", "0bf3513d18ec37efb1d2c7934a837dabafe9d091", "7de8a8b437ec7a18e395be9bf7c8f2d502025cc6", "2bf646a6efd15ab830344ae9d43e10cc89e29f34", "b9cad920a00fc0e997fc24396872e03f13c0bb9c", "6aa0a47f4b986870370c622be51f00f3a1b9d364", "080ab68a898a3703feead145e2c38361ae84a0a8", "ad784332cc37720f03df1c576e442c9c828a587a", "6e8a81d452a91f5231443ac83e4c0a0db4579974", "cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8", "05e03c48f32bd89c8a15ba82891f40f1cfdc7562", "30a4b4ef252cb509b58834e7c40862124c737b61", "6f74c3885b684e52096497b811692bd766071530", "0e02dadab802128f6155e099135d03ca6b72f42c", "447a5e1caf847952d2bb526ab2fb75898466d1bc", "7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d", "f180cb7111e9a6ba7cfe0b251c0c35daaef4f517", "15cd05baa849ab058b99a966c54d2f0bf82e7885", "e51f1ee5535017e10a5f77100ff892509ec6b221", "5b9d41e2985fa815c0f38a2563cca4311ce82954", "1f24cef78d1de5aa1eefaf344244dcd1972797e8", "6769cfbd85329e4815bb1332b118b01119975a95", "3e4f84ce00027723bdfdb21156c9003168bc1c80", "092dd7cb6c9b415eb83afb104fa63d7d4290ac33", "4ea63435d7b58d41a5cbcdd34812201f302ca061", "d4331a8dd47b03433f8390da2eaa618751861c64", "d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584", "3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8", "f1ba2fe3491c715ded9677862fea966b32ca81f0", "df550cb749858648209707bec5410431ea95e027", "89945b7cd614310ebae05b8deed0533a9998d212", "9eaa967d19fc66010b7ade7d94eaf7971a1957f3", "2b0d14dbd079b3d78631117b1304d6c1579e1940", "c71f36c9376d444075de15b1102b4974481be84d", "30c5d2ec584e7b8273af6915aab420fc23ff2761", "17370f848801871deeed22af152489e39b6e1454", "2c883977e4292806739041cf8409b2f6df171aee", "4223666d1b0b1a60c74b14c2980069905088edc6", "0a4fc9016aacae9cdf40663a75045b71e64a70c9", "3e3a87eb24628ab075a3d2bde3abfd185591aa4c", "034c2ed71c31cb0d984d66c7ca753ef2cb6196ca", "68cf263a17862e4dd3547f7ecc863b2dc53320d8", "fdca08416bdadda91ae977db7d503e8610dd744f", "e35b09879a7df814b2be14d9102c4508e4db458b", "856317f27248cdb20226eaae599e46de628fb696", "907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224", "51f311f724883218bcc511b0403b9a7745b9d40e", "c1a16ee838d977160821951e7264af4b2e7c8265", "327ae6742cca4a6a684a632b0d160dd84d0d8632", "70444627cb765a67a2efba17b0f4b81ce1fc20ff", "65b9c71a4e5886e3ec8ff1f26038c3c08bd96dcb", "a35849af340f80791c4a901ec2f2bbbac06660f5", "cb8c067aeabacd0eb723c5bb23eb41d8d219c57d", "ea96bc017fb56593a59149e10d5f14011a3744a0", "65b1209d38c259fe9ca17b537f3fb4d1857580ae", "9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6", "d02c54192dbd0798b43231efe1159d6b4375ad36", "1048c753e9488daa2441c50577fe5fdba5aa5d7c", "5b0008ba87667085912ea474025d2323a14bfc90", "57ca530e9acb63487e8591cb6efb89473aa1e5b4", "24e6a28c133b7539a57896393a79d43dba46e0f6", "6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3", "661da40b838806a7effcb42d63a9624fcd684976", "0641dbee7202d07b6c78a39eecd312c17607412e", "e7cfaff65541cde4298a04882e00608d992f6703", "dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda", "35f084ddee49072fdb6e0e2e6344ce50c02457ef", "8a09668efc95eafd6c3056ff1f0fbc43bb5774db", "17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735", "612b8eda338fcde9400ea93779741282fe4132d6", "ab427f0c7d4b0eb22c045392107509451165b2ba", "1c80bc91c74d4984e6422e7b0856cf3cf28df1fb", "9028fbbd1727215010a5e09bc5758492211dec19", "7957abae15f631c5f5c50de68aa2ad08fe1f366f", "61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8", "78cec49ca0acd3b961021bc27d5cf78cbbbafc7e", "82d79658805f6c1aedf7b0b88b47b9555584d7ae", "894f27b6ea68a1ec9b7632533eabf2353b1e9d79", "2e0addeffba4be98a6ad0460453fbab52616b139", "713594c18978b965be87651bb553c28f8501df0a", "76d9f5623d3a478677d3f519c6e061813e58e833", "dfd8602820c0e94b624d02f2e10ce6c798193a25", "bd70f832e133fb87bae82dfaa0ae9d1599e52e4b", "45215e330a4251801877070c85c81f42c2da60fb", "89c84628b6f63554eec13830851a5d03d740261a", "68d40176e878ebffbc01ffb0556e8cb2756dd9e9", "281b91c35a1af97b1405bc724a04e2be6e24971b", "fb2bd6c2959a4f811b712840e599f695dad2967e", "0ed0e48b245f2d459baa3d2779bfc18fee04145b", "7aa062c6c90dba866273f5edd413075b90077b51", "54f442c7fa4603f1814ebd8eba912a00dceb5cb2", "a20036b7fbf6c0db454c8711e72d78f145560dc8", "40fb4e8932fb6a8fef0dddfdda57a3e142c3e823", "8f6d05b8f9860c33c7b1a5d704694ed628db66c7", "982fed5c11e76dfef766ad9ff081bfa25e62415a", "454bf5b99607b4418e931092476ad1798ce5efa4", "f869601ae682e6116daebefb77d92e7c5dd2cb15", "9a6da02db99fcc0690d7ffdc15340b125726ab95", "d7b6bbb94ac20f5e75893f140ef7e207db7cd483", "fc7b34a2e43bb3d3585e1963bb64a488e2f278a0", "0aa74ad36064906e165ac4b79dec298911a7a4db", "2116b13eb3af418ef02502715e8f3c98664e699a", "891b31be76e2baa83745f24c2e2013851dc83cbb", "dd8ad6ce8701d4b09be460a6cf058fcd5318c700", "8274069feeff6392b6c5d45d8bfaaacd36daedad", "e82360682c4da11f136f3fccb73a31d7fd195694", "765b2cb322646c52e20417c3b44b81f89860ff71", "1ad97cce5fa8e9c2e001f53f6f3202bddcefba22", "becd5fd62f6301226b8e150e1a5ec3180f748ff8", "58cb1414095f5eb6a8c6843326a6653403a0ee17", "cab3c6069387461c3a9e5d77defe9a84fe9c9032", "0db43ed25d63d801ce745fe04ca3e8b363bf3147", "d1bd956a8523629ed4e2533b01272f22cea534c6", "734d6049fe08d0a24f6aa70bf0d81c217dfca570", "0ebc50b6e4b01eb5eba5279ce547c838890b1418", "a62997208fec1b2fbca6557198eb7bc9340b2409", "31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a", "028e237cb539b01ec72c244f57fdcfb65bbe53d4", "1b5d445741473ced3d4d33732c9c9225148ed4a1", "d46b4e6871fc9974542215f001e92e3035aa08d9", "42ecfc3221c2e1377e6ff849afb705ecd056b6ff", "0c3f7272a68c8e0aa6b92d132d1bf8541c062141", "fb084b1fe52017b3898c871514cffcc2bdb40b73", "6643a7feebd0479916d94fb9186e403a4e5f7cbf", "0ba99a709cd34654ac296418a4f41a9543928149", "9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f", "a56c1331750bf3ac33ee07004e083310a1e63ddc", "f7ffc2dc6801b0feee7d863f02ae2ca34c3e6a66", "177d1e7bbea4318d379f46d8d17720ecef3086ac", "3b21aaf7def52964cf1fcc5f11520a7618c8fae3", "fc5538e60952f86fff22571c334a403619c742c3", "0ea6ee0931f2dc51b0440dfa197433faacd53010", "32c5c65db2af9691f8bb749c953c978959329f8f", "28aa89b2c827e5dd65969a5930a0520fdd4a3dc7", "c1581b5175994e33549b8e6d07b4ea0baf7fe517", "ffd81d784549ee51a9b0b7b8aaf20d5581031b74", "e78394213ae07b682ce40dc600352f674aa4cb05", "fcbec158e6a4ace3d4311b26195482b8388f0ee9", "e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2", "ffc5a9610df0341369aa75c0331ef021de0a02a9", "c95cd36779fcbe45e3831ffcd3314e19c85defc5", "aecb15e3e9191eb135bdba2426967bfac3f068db", "6afeb764ee97fbdedfa8f66810dfc22feae3fa1f", "63b29886577a37032c7e32d8899a6f69b11a90de", "70769def1284fe88fd57a477cde8a9c9a3dff13f", "3d0f9a3031bee4b89fab703ff1f1d6170493dc01", "2d925cddb4a42d235b637e4888e24ba876b09e4a", "54966a5ac5a2aa19760fb5197889fa9dcccac1d1", "f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c", "e12b2c468850acb456b0097d5535fc6a0d34efe3", "acab402d706dbde4bea4b7df52812681011f435e", "7535e3995deb84a879dc13857e2bc0796a2f7ce2", "39af06d29a74ad371a1846259e01c14b5343e3d1", "0ba449e312894bca0d16348f3aef41ca01872383", "ba99c37a9220e08e1186f21cab11956d3f4fccc2", "a34d75da87525d1192bda240b7675349ee85c123", "41d9a240b711ff76c5448d4bf4df840cc5dad5fc", "4b7c110987c1d89109355b04f8597ce427a7cd72", "eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf", "40854850a1ca24d9f1e62f2a0432edcbb5633f76", "2be9144a1e66de127192b01907c862381f4011d1", "8cb55413f1c5b6bda943697bba1dc0f8fc880d28", "35d90beea6b4dca8d949aae93f86cf53da72971f", "fa398c6d6bd03df839dce7b59e04f473bc0ed660", "a7ec294373ccc0598cbb0bbb6340c4e56fe5d979", "6f0900a7fe8a774a1977c5f0a500b2898bcbe149", "b018fa5cb9793e260b8844ae155bd06380988584", "275b3cb7c780c663eabbf4d6c6cbc8fe24287c70", "81b8a6cabcd6451b21d5b44e69b0a355d9229cc4", "0fdcfb4197136ced766d538b9f505729a15f0daf", "2f348a2ad3ba390ee178d400be0f09a0479ae17b", "9853136dbd7d5f6a9c57dc66060cab44a86cd662", "0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d", "2cf9088e9faa81872b355a4ea0a9fae46d3c8a08", "126535430845361cd7a3a6f317797fe6e53f5a3b", "4ba38262fe20fab3e4c80215147b498f83843b93", "ab87dfccb1818bdf0b41d732da1f9335b43b74ae", "b631f3c212aab45d73ddc119f1f7d00c3c502a72", "33ba256d59aefe27735a30b51caf0554e5e3a1df", "292eba47ef77495d2613373642b8372d03f7062b", "dbc3ab8c9f564f038e7779b87900c4a0426f3dd1", "6d4c64ca6936f868d793e1b164ddaf19243c19a7", "a3dc109b1dff3846f5a2cc1fe2448230a76ad83f", "aab9a617be6e5507beb457b1e6c2e5b046f9cff0", "7a84368ebb1a20cc0882237a4947efc81c56c0c0", "ffc9d6a5f353e5aec3116a10cf685294979c63d9", "1c6be6874e150898d9db984dd546e9e85c85724e", "eefe8bd6384f565d2e42881f1f9a468d1672989d", "176d9121e4e645344de4706dfb345ad456bfb84a", "499f2b005e960a145619305814a4e9aa6a1bba6a", "20a88cc454a03d62c3368aa1f5bdffa73523827b", "0fabb4a40f2e3a2502cd935e54e090a304006c1c", "0c247ac797a5d4035469abc3f9a0a2ccba49f4d8", "2340d810c515dc0c9fd319f598fa8012dc0368a0", "771505abd38641454757de75fe751d41e87f89a4", "03bd58a96f635059d4bf1a3c0755213a51478f12", "9c1860de6d6e991a45325c997bf9651c8a9d716f", "a200885bf6bfa0493d85e7617e65cdabe30a2dab", "91835984eaeb538606972de47c372c5fcfe8b6aa", "cb9092fe74ea6a5b2bb56e9226f1c88f96094388", "a88640045d13fc0207ac816b0bb532e42bcccf36", "72a87f509817b3369f2accd7024b2e4b30a1f588", "ef9b8724f857daec94690d03764dd1299d0cbbcd", "ff402bd06c9c4e94aa47ad80ccc4455efa869af3", "5e6fc99d8f5ebaab0e9c29bc0969530d201e0708", "d1079444ceddb1de316983f371ecd1db7a0c2f38", "26b9d546a4e64c1d759c67cd134120f98a43c2a6", "84be18c7683417786c13d59026f30daeed8bd8c9", "4abaebe5137d40c9fcb72711cdefdf13d9fc3e62", "cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b", "7a94936ce558627afde4d5b439ec15c59dbcdaa4", "3ee7a8107a805370b296a53e355d111118e96b7c", "72591a75469321074b072daff80477d8911c3af3", "84d7af78c8dba3cad0380a33511725db4db1a54d", "6d207360148ec3991b70952315cb3f1e8899e977", "2b300985a507533db3ec9bd38ade16a32345968e", "9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493", "655e94eccddbe1b1662432c1237e61cf13a7d57b", "59c9d416f7b3d33141cc94567925a447d0662d80", "3d948e4813a6856e5b8b54c20e50cc5050e66abe", "a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2", "017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637", "cea2911ccabab40e9c1e5bcc0aa1127cab0c789f", "5fa932be4d30cad13ea3f3e863572372b915bec8", "8f713e3c5b6b166c213e00a3873f750fb5939c9a", "9771e04f48d8a1d7ae262539de8924117a04c20d", "eefb8768f60c17d76fe156b55b8a00555eb40f4d", "e8fdacbd708feb60fd6e7843b048bf3c4387c6db", "5a5511dd059d732e60c62ef817532689f4e0ab46", "564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d", "00ebc3fa871933265711558fa9486057937c416e", "6cd5b56f4262c7e13f61a4a6f28eaa805f4e3291", "a3017bb14a507abcf8446b56243cfddd6cdb542b", "4b6be933057d939ddfa665501568ec4704fabb39", "d5b5c63c5611d7b911bc1f7e161a0863a34d44ea", "dedabf9afe2ae4a1ace1279150e5f1d495e565da", "6c0048265758442d1620c2a239590d0d9060c09d", "22fdd8d65463f520f054bf4f6d2d216b54fc5677", "50ce3f8744c219871fbdcab1342d49d589f2626b", "b97c7f82c1439fa1e4525e5860cb05a39cc412ea", "fec6648b4154fc7e0892c74f98898f0b51036dfe", "621f656fedda378ceaa9c0096ebb1556a42e5e0f", "e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf", "13d430257d595231bda216ef859950caa736ad1d", "eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2", "41ab4939db641fa4d327071ae9bb0df4a612dc89", "2f160a6526ebf10773680dadaba44b006bcec2cb", "11df25b4e074b7610ec304a8733fa47625d9faca", "5957936195c10521dadc9b90ca9b159eb1fc4871", "d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12", "6267dbeb54889be5bdb50c338a7c6ef82287084c", "fe5d6c65e51386f4d36f7434fe6fcd9494fe9361", "7405ed035d1a4b9787b78e5566340a98fe4b63a0", "2ee817981e02c4709d65870c140665ed25b005cc", "758d481bbf24d12615b751fd9ec121500a648bce", "3538d2b5f7ab393387ce138611ffa325b6400774", "01733018a79aa447a27f269a1b9a58cd5f39603e", "278e1441a77fbeebb22c45932d76c557e5663197", "09733129161ca7d65cf56a7ad63c17f493386027", "8694cd9748fb1c128f91a572119978075fede848", "df8da144a695269e159fb0120bf5355a558f4b02", "95008358a631a10ee3c24bfa2bf0c39d136a916e", "e0638e0628021712ac76e3472663ccc17bd8838c", "8147ee02ec5ff3a585dddcd000974896cb2edc53", "8ce9b7b52d05701d5ef4a573095db66ce60a7e1c", "ffaad0204f4af763e3390a2f6053c0e9875376be", "87f738d3883fc56ef0841484478b89c0f241df02", "052cec9fdbfe12ccd02688f3b7f538c0d73555b3", "3a95eea0543cf05670e9ae28092a114e3dc3ab5c", "a2002279c36255c2c78cf5ec0c42cbfe32fe011f", "c73dd452c20460f40becb1fd8146239c88347d87", "276dbb667a66c23545534caa80be483222db7769", "00dc942f23f2d52ab8c8b76b6016d9deed8c468d", "1125760c14ea6182b85a09bf3f5bad1bdad43ef5", "3f9ca2526013e358cd8caeb66a3d7161f5507cbc", "4657d87aebd652a5920ed255dca993353575f441", "dac6e9d708a9757f848409f25df99c5a561c863c", "5cb83eba8d265afd4eac49eb6b91cdae47def26d", "609c35a6fa80af8b2e4ce46b1b16ec36578fd07f", "31bb49ba7df94b88add9e3c2db72a4a98927bb05", "05b8673d810fadf888c62b7e6c7185355ffa4121", "0fee3b9191dc1cef21f54232a23530cd8169d3b2", "070ab604c3ced2c23cce2259043446c5ee342fd6", "8c4042191431e9eb43f00b0f14c23765ab9c6688", "90ac0f32c0c29aa4545ed3d5070af17f195d015f", "3c09d15b3e78f38618b60388ec9402e616fc6f8e", "ccca2263786429b1b3572886ce6a2bea8f0dfb26", "0d98750028ea7b84b86e6fec3e67d61e4f690d09", "1a0e1ba4408d12f8a28049da0ff8cad4f91690d5", "b704eaa339d55ef7eac56d0117a8e127fc597686", "1a9a192b700c080c7887e5862c1ec578012f9ed1", "f6dabb4d91bf7389f3af219d486d4e67cec18c17", "9863dd1e2a3d3b4910a91176ac0f2fee5eb3b5e1", "6b18628cc8829c3bf851ea3ee3bcff8543391819", "4d6462fb78db88afff44561d06dd52227190689c", "1cee993dc42626caf5dbc26c0a7790ca6571d01a", "63d8d69e90e79806a062cb8654ad78327c8957bb", "3327e21b46434f6441018922ef31bddba6cc8176", "1b2d9a1c067f692dd48991beff03cd62b9faebf2", "1d10010ea7af43d59e1909d27e4e0e987264c667", "f09d5b6433f63d7403df5650893b78cdcf7319b3", "5e09155cfb7a8bab2217e5d34cd0d6a4a0586868", "565590af15af3d02f0b592b2e201e36708e4fe50", "79cdc8c786c535366cafeced1f3bdeb18ff04e66", "f93606d362fcbe62550d0bf1b3edeb7be684b000", "9c9ef6a46fb6395702fad622f03ceeffbada06e5", "0c378c8dcf707145e1e840a9951519d4176a301f", "5101368f986aa9837fdb3a71cb4299dff6f6325d", "40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d", "19af008599fb17bbd9b12288c44f310881df951c", "425ea5656c7cf57f14781bafed51182b2e6da65f", "6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94", "1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9", "3d1959048eba5495e765a80c8e0bbd3d65b3d544", "9c6dfd3a38374399d998d5a130ffc2864c37f554", "7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2", "053c2f592a7f153e5f3746aa5ab58b62f2cf1d21", "ce54e891e956d5b502a834ad131616786897dc91", "8027a9093f9007200e8e69e05616778a910f4a5f", "a4f37cfdde3af723336205b361aefc9eca688f5c", "487f9ab19ca6779a014278d93f3e56ff82dac2e3", "daf05febbe8406a480306683e46eb5676843c424", "745e74ae84e1b2b8690d07db523531642023d6c4", "052c5ef6b20bf3e88bc955b6b2e86571be08ba64", "0181fec8e42d82bfb03dc8b82381bb329de00631", "f231e9408da20498ba51d93459b3fcdb7b666efb", "3c03d95084ccbe7bf44b6d54151625c68f6e74d0", "f5149fb6b455a73734f1252a96a9ce5caa95ae02", "576d0fea5a1ae9ce22996e726787c49023fc7522", "b262a2a543971e10fcbfc7f65f46115ae895d69e", "e43cc682453cf3874785584fca813665878adaa7", "f7093b138fd31956e30d411a7043741dcb8ca4aa", "6308e9c991125ee6734baa3ec93c697211237df8", "af97e792827438ddea1d5900960571939fc0533e", "89f4bcbfeb29966ab969682eae235066a89fc151", "0f1d42e1296474c9211fb57604574ba0cae4380d", "6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a", "9606b1c88b891d433927b1f841dce44b8d3af066", "5145e42dc46845f3aeb8307452765ba8dc59d2da", "e68869499471bcd6fa8b4dc02aa00633673c0917", "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "202d8d93b7b747cdbd6e24e5a919640f8d16298a", "73fbdd57270b9f91f2e24989178e264f2d2eb7ae", "197c64c36e8a9d624a05ee98b740d87f94b4040c", "3a04eb72aa64760dccd73e68a3b2301822e4cdc3", "63c022198cf9f084fe4a94aa6b240687f21d8b41", "0601416ade6707c689b44a5bb67dab58d5c27814", "03104f9e0586e43611f648af1132064cadc5cc07", "3294e27356c3b1063595885a6d731d625b15505a", "453bf941f77234cb5abfda4e015b2b337cea4f17", "2a2df7e790737a026434187f9605c4763ff71292", "0e50fe28229fea45527000b876eb4068abd6ed8c", "47f8b3b3f249830b6e17888df4810f3d189daac1", "535cdce8264ac0813d5bb8b19ceafa77a1674adf", "ab68837d09986c592dcab7d08ee6dfb40e02916f", "aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9", "64a08beb073f62d2ce44e25c4f887de9208625a4", "c252bc84356ed69ccf53507752135b6e98de8db4", "fa052fd40e717773c6dc9cc4a2f5c10b8760339f", "8ea30ade85880b94b74b56a9bac013585cb4c34b", "439647914236431c858535a2354988dde042ef4d", "916fbe5e8bec5e7757eeb9d452385db320204ee0", "ecac3da2ff8bc2ba55981467f7fdea9de80e2092", "8796f2d54afb0e5c924101f54d469a1d54d5775d", "80aa455068018c63237c902001b58844fcc6f160", "0e37d70794d5ccfef8b4cc22b4203245f33eec6e", "54bae57ed37ce50e859cbc4d94d70cc3a84189d5", "76b9fe32d763e9abd75b427df413706c4170b95c", "42dc36550912bc40f7faa195c60ff6ffc04e7cd6", "753a277c1632dd61233c488cc55d648de3caaaa3", "2be9284d531b8c573a4c39503ca50606446041a3", "9d46485ca2c562d5e295251530a99dd5df99b589", "3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5", "33b915476f798ca18ae80183bf40aea4aaf57d1e", "d588dd4f305cdea37add2e9bb3d769df98efe880", "64fd48fae4d859583c4a031b51ce76ecb5de614c", "d2bad850d30973a61b1a7d7dc582241a41e5c326", "6afed8dc29bc568b58778f066dc44146cad5366c", "83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05", "0cf1287c8fd41dcef4ac03ebeab20482f02dce20", "7ac9aaafe4d74542832c273acf9d631cb8ea6193", "1fa3948af1c338f9ae200038c45adadd2b39a3e4", "377a1be5113f38297716c4bb951ebef7a93f949a", "3dbfd2fdbd28e4518e2ae05de8374057307e97b3", "05a312478618418a2efb0a014b45acf3663562d7", "b5690409be6c4e98bd37181d41121adfef218537", "bbe949c06dc4872c7976950b655788555fe513b8", "9d5bfaf6191484022a6731ce13ac1b866d21ad18", "03701e66eda54d5ab1dc36a3a6d165389be0ce79", "58483028445bf6b2d1ad6e4b1382939587513fe1", "c2422c975d9f9b62fbb19738e5ce5e818a6e1752", "35208eda874591eac70286441d19785726578946", "6d66c98009018ac1512047e6bdfb525c35683b16", "e7b7df786cf5960d55cbac4e696ca37b7cee8dcd", "fd10b0c771a2620c0db294cfb82b80d65f73900d", "182f3aa4b02248ff9c0f9816432a56d3c8880706", "1d1caaa2312390260f7d20ad5f1736099818d358", "4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41", "3cd22b5b81a0172d608ff14be71b755d1f68c201", "3b2d5585af59480531616fe970cb265bbdf63f5b", "b3b4a7e29b9186e00d2948a1d706ee1605fe5811", "580054294ca761500ada71f7d5a78acb0e622f19", "a3bf6129d1ae136709063a5639eafd8018f50feb", "3df7401906ae315e6aef3b4f13126de64b894a54", "9d55ec73cab779403cd933e6eb557fb04892b634", "e42f3c27391821f9873539fc3da125b83bffd5a2", "076f2dca12b3e85c282fc678f0d22ad6a3e6dc14", "b13a882e6168afc4058fe14cc075c7e41434f43e", "1195f0bf8f745ba69da915203bcd79589b94aec5", "0b50e223ad4d9465bb92dbf17a7b79eccdb997fb", "0653dcdff992ad980cd5ea5bc557efb6e2a53ba1", "8855d6161d7e5b35f6c59e15b94db9fa5bbf2912", "1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f", "4e1d89149fc4aa057a8becce2d730ec6afd60efa", "c5844de3fdf5e0069d08e235514863c8ef900eb7", "8fee7b38358815e443f8316fa18768d76dba12e3", "159e792096756b1ec02ec7a980d5ef26b434ff78", "a1132e2638a8abd08bdf7fc4884804dd6654fa63", "dd0258367fadb632b612ccd84fbc1ef892e70aeb", "c4e2d5ebfebbb9dcee6a9866c3d6290481496df5", "c3b3636080b9931ac802e2dd28b7b684d6cf4f8b", "8ed32c8fad924736ebc6d99c5c319312ba1fa80b", "87309bdb2b9d1fb8916303e3866eca6e3452c27d", "42df75080e14d32332b39ee5d91e83da8a914e34", "2d35a07c4fa03d78d5b622ab703ea44850de8d39", "8f051647bd8d23482c6c3866c0ce1959b8bd40f6", "d9eed86e53ce5f7cba379fe77bbefb42e83c0d88", "11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8", "0da3c329ae14a4032b3ba38d4ea808cf6d115c4a", "0e677f2b798f5c1f7143ba983467321a7851565a", "365866dc937529c3079a962408bffaa9b87c1f06", "e510f2412999399149d8635a83eca89c338a99a1", "0c4659b35ec2518914da924e692deb37e96d6206", "4350bb360797a4ade4faf616ed2ac8e27315968e", "cfdbcb796d028b073cdf7b91162384cd1c14e621", "31afdb6fa95ded37e5871587df38976fdb8c0d67", "f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca", "b299c292b84aeb4f080a8b39677a8e0d07d51b27", "471befc1b5167fcfbf5280aa7f908eff0489c72b", "88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320", "b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3", "69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b", "25127c2d9f14d36f03d200a65de8446f6a0e3bd6", "1dae2f492d3ca2351349a73df6ee8a99b05ffc30", "51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6", "238fc68b2e0ef9f5ec043d081451902573992a03", "7a91617ec959acedc5ec8b65e55b9490b76ab871", "948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494", "14fa27234fa2112014eda23da16af606db7f3637", "acde297810059ca632ef3f7c002b63b40cb8796f", "6b742055a664bcbd1c6a85ae6796bd15bc945367", "072db5ba5b375d439ba6dbb6427c63cd7da6e940", "1270044a3fa1a469ec2f4f3bd364754f58a1cb56", "43cbe3522f356fbf07b1ff0def73756391dc3454", "eedfb384a5e42511013b33104f4cd3149432bd9e", "08fbe3187f31b828a38811cc8dc7ca17933b91e9", "7f203f2ff6721e73738720589ea83adddb7fdd27", "188abc5bad3a3663d042ce98c7a7327e5a1ae298", "3933416f88c36023a0cba63940eb92f5cef8001a", "daba8f0717f3f47c272f018d0a466a205eba6395", "2e157e8b57f679c2f1b8e16d6e934f52312f08f6", "dddd70fb2746a944e7428e2eb61ca06faff3fce9", "163ba5a998973f9ead6be0ca873aed5934d5022e", "66810438bfb52367e3f6f62c24f5bc127cf92e56", "622daa25b5e6af69f0dac3a3eaf4050aa0860396", "09138ad5ad1aeef381f825481d1b4f6b345c438c", "66d512342355fb77a4450decc89977efe7e55fa2", "170aa0f16cd655fdd4d087f5e9c99518949a1b5c", "2988f24908e912259d7a34c84b0edaf7ea50e2b3", "b2e6944bebab8e018f71f802607e6e9164ad3537", "9378ead3a09bc9f89fb711e2746facf399dd942e", "5253c94f955146ba7d3566196e49fe2edea1c8f4", "539ae0920815eb248939165dd5d1b0188ff7dca2", "844e3e6992c98e53b45e4eb88368d0d6e27fc1d6", "2d5d3905adfea7a6a8371dc2c5edc669cadacf70", "519f1486f0755ef3c1f05700ea8a05f52f83387b", "66a9935e958a779a3a2267c85ecb69fbbb75b8dc", "5d197c8cd34473eb6cde6b65ced1be82a3a1ed14", "d8b99eada922bd2ce4e20dc09c61a0e3cc640a62", "55fd4639c2126de5ad69d23b8a6e670a05911b9d", "34b42bcf84d79e30e26413f1589a9cf4b37076f9", "95f26d1c80217706c00b6b4b605a448032b93b75", "aea4128ba18689ff1af27b90c111bbd34013f8d5", "322b7a4ce006e4d14748dd064e80ffba573ebcd7", "4a14a321a9b5101b14ed5ad6aa7636e757909a7c", "4270460b8bc5299bd6eaf821d5685c6442ea179a", "0fba39bf12486c7684fd3d51322e3f0577d3e4e8", "7ce03597b703a3b6754d1adac5fbc98536994e8f", "466184b10fb7ce9857e6b5bd6b4e5003e09a0b16", "235bebe7d0db37e6727dfa1246663be34027d96b", "7bd37e6721d198c555bf41a2d633c4f0a5aeecc1", "829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a", "1e058b3af90d475bf53b3f977bab6f4d9269e6e8", "d75bd05865224a1341731da66b8d812a7924d6f6", "aca232de87c4c61537c730ee59a8f7ebf5ecb14f", "4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11", "1176a74fb9351ac2de81c198c4861d78e58f172d", "d05759932001aa6f1f71e7dc261c4716f57a5397", "d046030f7138e5a2dbe2b3eec1b948ad8c787538", "d18cca5e90884020e748e7fe2d13398d3cbd14fb", "4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676", "d20ea5a4fa771bc4121b5654a7483ced98b39148", "ca0804050cf9d7e3ed311f9be9c7f829e5e6a003", "6d5f876a73799cc628e4ad2d9cfcd88091272342", "2cfc28a96b57e0817cc9624a5d553b3aafba56f3", "a2cc3193ed56ef4cedaaf4402c844df28edb5639", "ce9a61bcba6decba72f91497085807bface02daf", "9854145f2f64d52aac23c0301f4bb6657e32e562", "5f871838710a6b408cf647aacb3b198983719c31", "eb38f20eaa1b849cabec99815883390f84daf279", "2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359", "ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9", "157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6", "f41e80f941a45b5880f4c88e5bf721872db3400f", "ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea", "1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91", "56f812661c3248ed28859d3b2b39e033b04ae6ae", "bbf1396eb826b3826c5a800975047beabde2f0de", "11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5", "b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad", "09df62fd17d3d833ea6b5a52a232fc052d4da3f5", "816eff5e92a6326a8ab50c4c50450a6d02047b5e", "bd07d1f68486052b7e4429dccecdb8deab1924db", "97032b13f1371c8a813802ade7558e816d25c73f", "2ed7d95588200c8c738c7dd61b8338538e04ea30", "0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64", "0a5ffc55b584da7918c2650f9d8602675d256023", "410bc0b3bd82c85c98df71ec0cfe995f14621077", "85785ae222c6a9e01830d73a120cdac75d0b838a", "3d143cfab13ecd9c485f19d988242e7240660c86", "edf98a925bb24e39a6e6094b0db839e780a77b08", "1d3bd75e2fb95cc0996a1a2eeaf21dfa42ab7ca0", "bfd0dd2d13166a9c59e04c62f5463eacfc8d0d2b", "331d6ace8d59fa211e5bc84a93fdc65695238c69", "89e31777f221ddb3bc9940d7f520c8114c4148a2", "6ff0f804b8412a50ae2beea5cd020c94a5de5764", "36b19e6bf2f0abc0387052436956a25b37488134", "759a3b3821d9f0e08e0b0a62c8b693230afc3f8d", "abac0fa75281c9a0690bf67586280ed145682422", "02a92b79391ddac0acef4f665b396f7f39ca2972", "176fc31a686fb70d73f1fa354bf043ad236f7aa3", "0c5ddfa02982dcad47704888b271997c4de0674b", "60b3601d70f5cdcfef9934b24bcb3cc4dde663e7", "b313751548018e4ecd5ae2ce6b3b94fbd9cae33e", "c822bd0a005efe4ec1fea74de534900a9aa6fb93", "c0723e0e154a33faa6ff959d084aebf07770ffaf", "2b10a07c35c453144f22e8c539bf9a23695e85fc", "01e63d0a21fad7a29301749e9eafed826101b636", "14e759cb019aaf812d6ac049fde54f40c4ed1468", "aa912375eaf50439bec23de615aa8a31a3395ad3", "cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd", "7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098", "ab58a7db32683aea9281c188c756ddf969b4cdbd", "d6a5eb4377e2a67420778eab61b5a89046307bae", "304b1f14ca6a37552dbfac443f3d5b36dbe1a451", "16c1b592d85d13f1ba4eff0afb4441bb78650785", "5ca14fa73da37855bfa880b549483ee2aba26669", "4b28de1ebf6b6cb2479b9176fab50add6ed75b78", "32d555faaaa0a6f6f9dfc9263e4dba75a38c3193", "965f3a60a762712c3fc040724e507d00357f8709", "884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e", "5040f7f261872a30eec88788f98326395a44db03", "d67dcaf6e44afd30c5602172c4eec1e484fc7fb7", "a59cdc49185689f3f9efdf7ee261c78f9c180789", "85a136b48c2036b16f444f93b086e2bd8539a498", "522a4ca705c06a0436bbe62f46efe24d67a82422", "3630324c2af04fd90f8668f9ee9709604fe980fd", "14d4c019c3eac3c3fa888cb8c184f31457eced02", "5b7cb9b97c425b52b2e6f41ba8028836029c4432", "9bc289a32bb5ab54b7a178b7234799f32e0568ce", "529e2ce6fb362bfce02d6d9a9e5de635bde81191", "0ef96d97365899af797628e80f8d1020c4c7e431", "841bf196ee0086c805bd5d1d0bddfadc87e424ec", "0a4b808ff800fb0041132854361f591ad01067a5", "bab88235a30e179a6804f506004468aa8c28ce4f", "60970e124aa5fb964c9a2a5d48cd6eee769c73ef", "a947c21a15fb0a02378c36271e1addf6b6e110eb", "aa94f214bb3e14842e4056fdef834a51aecef39c", "f812347d46035d786de40c165a158160bb2988f0", "1a3eee980a2252bb092666cf15dd1301fa84860e", "02e39f23e08c2cb24d188bf0ca34141f3cc72d47", "bef503cdfe38e7940141f70524ee8df4afd4f954", "0c30f6303dc1ff6d05c7cee4f8952b74b9533928", "9b474d6e81e3b94e0c7881210e249689139b3e04", "afdbbc5c84eb4e535c7c478b5227c0138b57af64", "8c85ef961826575bc2c2f4da7784bc3bfcf8b188", "1a031378cf1d2b9088a200d9715d87db8a1bf041", "004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4", "3598d10d7d4f2b543afa8bcf6b2c34a3696ef155", "c5022fbeb65b70f6fe11694575b8ad1b53412a0d", "0d88ab0250748410a1bc990b67ab2efb370ade5d", "8e452379fda31744d4a4383fcb8a9eab6dbc4ae4", "cf09e2cb82961128302b99a34bff91ec7d198c7c", "86ec0e331dd494533e16dd638661463b7e03edb7", "471bef061653366ba66a7ac4f29268e8444f146e", "a9af0dc1e7a724464d4b9d174c9cf2441e34d487", "77a9b1856ebbc9a6170ee4c572a515d6db062cef", "4f6adc53798d9da26369bea5a0d91ed5e1314df2", "3e51d634faacf58e7903750f17111d0d172a0bf1", "11367581c308f4ba6a32aac1b4a7cdb32cd63137", "a57ee5a8fb7618004dd1def8e14ef97aadaaeef5", "6691dfa1a83a04fdc0177d8d70e3df79f606b10f", "fd60166c2619c0db5e5159a3dfe9068aa4f1b32f", "4faded442b506ad0f200a608a69c039e92eaff11", "89cabb60aa369486a1ebe586dbe09e3557615ef8", "f6ca29516cce3fa346673a2aec550d8e671929a6", "f909d04c809013b930bafca12c0f9a8192df9d92", "310da8bd81c963bd510bf9aaa4d028a643555c84", "49e85869fa2cbb31e2fd761951d0cdfa741d95f3", "1db23a0547700ca233aef9cfae2081cd8c5a04d7", "505e55d0be8e48b30067fb132f05a91650666c41", "c32383330df27625592134edd72d69bb6b5cff5c", "4fbef7ce1809d102215453c34bf22b5f9f9aab26", "cdfa7dccbc9e9d466f8a5847004973a33c7fcc89", "687e17db5043661f8921fb86f215e9ca2264d4d2", "6a527eeb0b2480109fe987ed7eb671e0d847fca8", "5599ac2cd569ed83ecab8449d2f245e13034da06", "1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db", "d2fac640086ba89271ad7c1ebf36239ecd64605e", "dbf2d2ca28582031be6d16519ab887248f5e8ad8", "5a547df635a9a56ac224d556333d36ff68cbf088", "61b22b1016bf13aca8d2e57c4e5e004d423f4865", "2b2e6e073fe0876fdf96a336cbc14de0217ce070", "af654a7ec15168b16382bd604889ea07a967dac6", "918b72a47b7f378bde0ba29c908babf6dab6f833", "181045164df86c72923906aed93d7f2f987bce6c", "0db371a6bc8794557b1bffc308814f53470e885a", "78a4eb59ec98994bebcf3a5edf9e1d34970c45f6", "0319332ded894bf1afe43f174f5aa405b49305f0", "080e0efc3cf71260bfe9bdc62cd86614d1ebca46", "d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c", "5397c34a5e396658fa57e3ca0065a2878c3cced7", "191b70fdd6678ef9a00fd63710c70b022d075362", "552c55c71bccfc6de7ce1343a1cd12208e9a63b3", "4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef", "126204b377029feb500e9b081136e7a9010e3b6b", "603231c507bb98cc8807b6cbe2c860f79e8f6645", "b944cc4241d195b1609a7a9d87fce0e9ba1498bc", "d91a5589fd870bf62b7e4979d9d47e8acf6c655d", "e9d147e657619c393ca702117602fd7d15675f69", "ae836e2be4bb784760e43de88a68c97f4f9e44a1", "f0de1e61ba806f3db918f9e498fcc6dfa223b13d", "86b6de59f17187f6c238853810e01596d37f63cd", "efc78a7d95b14abacdfde5c78007eabf9a21689c", "5c624382057b55e46af4dc4c055a33c90e8bf08a", "cadba72aa3e95d6dcf0acac828401ddda7ed8924", "3cc3cf57326eceb5f20a02aefae17108e8c8ab57", "ae5bb02599244d6d88c4fe466a7fdd80aeb91af4", "72f4aaf7e2e3f215cd8762ce283988220f182a5b", "f2eab39cf68de880ee7264b454044a55098e8163", "7d18e9165312cf669b799aa1b883c6bbe95bf40e", "721b109970bf5f1862767a1bec3f9a79e815f79a", "7384610776ec405dc84e47f2d353aa6d3cc03b1d", "a2bcfba155c990f64ffb44c0a1bb53f994b68a15", "956317de62bd3024d4ea5a62effe8d6623a64e53", "16671b2dc89367ce4ed2a9c241246a0cec9ec10e", "8db609d84190b905913eb2f17f4e558c6e982208", "79b669abf65c2ca323098cf3f19fa7bdd837ff31", "3d36f941d8ec613bb25e80fb8f4c160c1a2848df", "ea86b75427f845f04e96bdaadfc0d67b3f460005", "80f72b26c6571aee2ff04704bc7fd1a69bfa0b3f", "c43862db5eb7e43e3ef45b5eac4ab30e318f2002", "8e63868e552e433dc536ba732f4c2af095602869", "280d59fa99ead5929ebcde85407bba34b1fcfb59", "33695e0779e67c7722449e9a3e2e55fde64cfd99", "71e95c3a31dceabe9cde9f117615be8bf8f6d40e", "16fc82d44188eb49a151bd5836a29911b3bfabcb", "a53f988d16f5828c961553e8efd38fed15e70bcc", "23a8d02389805854cf41c9e5fa56c66ee4160ce3", "36c2db5ff76864d289781f93cbb3e6351f11984c", "391b273af237b69ebbdfacb8e33b8e873421c780", "0e6f422c3f79c552c0c3d7eda0145aed8680f0ea", "48a417cfeba06feb4c7ab30f06c57ffbc288d0b5", "2489a839d0a761ef8520393a7e412c36f5f26324", "edd6ed94207ab614c71ac0591d304a708d708e7b", "bd0265ba7f391dc3df9059da3f487f7ef17144df", "e96ce25d11296fce4e2ecc2da03bd207dc118724", "45a6333fc701d14aab19f9e2efd59fe7b0e89fec", "5160569ca88171d5fa257582d161e9063c8f898d", "042825549296ea419d95fcf0b5e71f72070a5f0d", "2b5005c2abf2d9a8c16afa50306b6959dfc72275", "75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d", "4c6e1840451e1f86af3ef1cb551259cb259493ba", "d2cd9a7f19600370bce3ea29aba97d949fe0ceb9", "48af47406ec14b561a9cdfafc5b8bdfdc746eb8a", "59dac8b460a89e03fa616749a08e6149708dcc3a", "442d3aeca486de787de10bc41bfeb0b42c81803f", "d26b443f87df76034ff0fa9c5de9779152753f0c", "6daccf3d15c617873954bb75de26f6b6b0a42772", "2564920d6976be68bb22e299b0b8098090bbf259", "391642ec5ade3579654a14c3644af6f086af0158", "1ea4347def5868c622d7ce57cbe171fa68207e2b", "20ade100a320cc761c23971d2734388bfe79f7c5", "aac39ca161dfc52aade063901f02f56d01a1693c", "f486624efa750d718a670fba3c7f21b1c84ebaeb", "10bfa4cecd64b9584c901075d6b50f4fad898d0b", "61f1b14f04d2fa1d8a556adbdf93050b4637f44b", "ed94e7689cdae87891f08428596dec2a2dc6a002", "6aa61d28750629febe257d1cb69379e14c66c67f", "beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2", "e39a0834122e08ba28e7b411db896d0fdbbad9ba", "7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25", "3028690d00bd95f20842d4aec84dc96de1db6e59", "0e5dcc6ae52625fd0637c6bba46a973e46d58b9c", "28d7029cfb73bcb4ad1997f3779c183972a406b4", "20be15dac7d8a5ba4688bf206ad24cab57d532d6", "05f3d1e9fb254b275354ca69018e9ed321dd8755", "0c93cb1af3bba1bd90a03e921ff2d55acf35c01f", "b84f164dbccb16da75a61323adaca730f528edde", "85c007758e409eb3a9ae83375c7427dd517f4ab9", "c8e84cdff569dd09f8d31e9f9ba3218dee65e961", "c1d2d12ade031d57f8d6a0333cbe8a772d752e01", "261c3e30bae8b8bdc83541ffa9331b52fcf015e6", "c2fa83e8a428c03c74148d91f60468089b80c328", "90c2d4d9569866a0b930e91713ad1da01c2a6846", "b216040f110d2549f61e3f5a7261cab128cab361", "00d94b35ffd6cabfb70b9a1d220b6823ae9154ee", "5d09d5257139b563bd3149cfd5e6f9eae3c34776", "a513977bcd8cecd2ed1836bf91b31a80a1ebe27b", "0b572a2b7052b15c8599dbb17d59ff4f02838ff7", "2546dc7e2c2390233de16502413fe1097ecf3fb5", "520782f07474616879f94aae0d9d1fff48910254", "f4aed1314b2d38fd8f1b9d2bc154295bbd45f523", "153e5cddb79ac31154737b3e025b4fb639b3c9e7", "83b4899d2899dd6a8d956eda3c4b89f27f1cd308", "1888bf50fd140767352158c0ad5748b501563833", "43a03cbe8b704f31046a5aba05153eb3d6de4142", "7f2a234ad5c256733a837dbf98f25ed5aad214e8", "8fed5ea3b69ea441a8b02f61473eafee25fb2374", "1e213b03e1b8a6067bf37503904491e98b9e42df", "5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79", "1efaa128378f988965841eb3f49d1319a102dc36", "a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b", "78598e7005f7c96d64cc47ff47e6f13ae52245b8", "6fdf2f4f7ae589af6016305a17d460617d9ef345", "a9426cb98c8aedf79ea19839643a7cf1e435aeaa", "bc910ca355277359130da841a589a36446616262", "c7c8d150ece08b12e3abdb6224000c07a6ce7d47", "3146fabd5631a7d1387327918b184103d06c2211", "3f0c6dbfd3c9cd5625ba748327d69324baa593a6", "15aa6c457678e25f6bc0e818e5fc39e42dd8e533", "fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719", "69b18d62330711bfd7f01a45f97aaec71e9ea6a5", "768f6a14a7903099729872e0db231ea814eb05e9", "0141cb33c822e87e93b0c1bad0a09db49b3ad470", "5213549200bccec57232fc3ff788ddf1043af7b3", "29c340c83b3bbef9c43b0c50b4d571d5ed037cbd", "e465f596d73f3d2523dbf8334d29eb93a35f6da0", "b806a31c093b31e98cc5fca7e3ec53f2cc169db9", "11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d", "9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf", "747dc0add50b86f5ba9e3e7315943d520e08f9eb", "8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed", "a73405038fdc0d8bf986539ef755a80ebd341e97", "ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9", "df71a00071d5a949f9c31371c2e5ee8b478e7dc8", "5ee0103048e1ce46e34a04c45ff2c2c31529b466", "258b3b1df82186dd76064ef86b28555e91389b73", "206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8", "599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a", "862d17895fe822f7111e737cbcdd042ba04377e8", "13f6ab2f245b4a871720b95045c41a4204626814", "909c23143162d98ffb2447f0018f92ac6cf8591b", "18dfc2434a95f149a6cbb583cca69a98c9de9887", "c5adb33bd3557c94d0e54cfe2036a1859118a65e", "9e8637a5419fec97f162153569ec4fc53579c21e", "598744c8620e4ecbf449d14d7081fbf1cd05851f", "74156a11c2997517061df5629be78428e1f09cbd", "a85e9e11db5665c89b057a124547377d3e1c27ef", "d4453ec649dbde752e74da8ab0984c6f15cc6e06", "0bab5213911c19c40e936b08d2f8fba01e286b85", "88ad82e6f2264f75f7783232ba9185a2f931a5d1", "4223917177405eaa6bdedca061eb28f7b440ed8e", "c18d537037caf399c4fabfdec896c376675af58a", "159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1", "43ed518e466ff13118385f4e5d039ae4d1c000fb", "a26fd9df58bb76d6c7a3254820143b3da5bd584b", "355746e6e1770cfcc2e91479f8134c854a77ff96", "66490b5869822b31d32af7108eaff193fbdb37b0", "2f73203fd71b755a9601d00fc202bbbd0a595110", "195d331c958f2da3431f37a344559f9bce09c0f7", "dd715a98dab34437ad05758b20cc640c2cdc5715", "83e093a07efcf795db5e3aa3576531d61557dd0d", "fbe4f8a6af19f63e47801c6f31402f9baae5fecf", "05ea7930ae26165e7e51ff11b91c7aa8d7722002", "6a26893ed63830d00f6d011679d1b1ed2d8466a9", "b6f15bf8723b2d5390122442ab04630d2d3878d8", "2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40", "15bf0e70b069cea62d87d3bf706172c4a6a7779e", "06262d14323f9e499b7c6e2a3dec76ad9877ba04", "9391618c09a51f72a1c30b2e890f4fac1f595ebd", "37278ffce3a0fe2c2bbf6232e805dd3f5267eba3", "1e8fd77d4717e9cb6079e10771dd2ed772098cb3", "72282287f25c5419dc6fd9e89ec9d86d660dc0b5", "9ce4541d21ee3511bf3dc55bc3cd01222194d95a", "f11c76efdc9651db329c8c862652820d61933308", "0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b", "4da735d2ed0deeb0cae4a9d4394449275e316df2", "c2e03efd8c5217188ab685e73cc2e52c54835d1a", "5a10d74c7fc3294f76d771df413fe0b0b35f2ab5", "39ed31ced75e6151dde41944a47b4bdf324f922b", "411318684bd2d42e4b663a37dcf0532a48f0146d", "352a620f0b96a7e76b9195a7038d5eec257fd994", "0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457", "7a131fafa7058fb75fdca32d0529bc7cb50429bd", "27a00f2490284bc0705349352d36e9749dde19ab", "5121f42de7cb9e41f93646e087df82b573b23311", "c1482491f553726a8349337351692627a04d5dbe", "7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae", "212608e00fc1e8912ff845ee7a4a67f88ba938fc", "96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9", "e47e8fa44decf9adbcdb02f8a64b802fe33b29ef", "69adf2f122ff18848ff85e8de3ee3b2bc495838e", "9e182e0cd9d70f876f1be7652c69373bcdf37fb4", "98b2f21db344b8b9f7747feaf86f92558595990c", "fb3ff56ab12bd250caf8254eca30cd97984a949a", "3a27d164e931c422d16481916a2fa6401b74bcef", "3dc522a6576c3475e4a166377cbbf4ba389c041f", "b6f758be954d34817d4ebaa22b30c63a4b8ddb35", "1fc249ec69b3e23856b42a4e591c59ac60d77118", "d8526863f35b29cbf8ac2ae756eaae0d2930ffb1", "8509abbde2f4b42dc26a45cafddcccb2d370712f", "e5e5f31b81ed6526c26d277056b6ab4909a56c6c", "f74917fc0e55f4f5682909dcf6929abd19d33e2e", "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9", "5a259f2f5337435f841d39dada832ab24e7b3325", "0a64f4fec592662316764283575d05913eb2135b", "c71217b2b111a51a31cf1107c71d250348d1ff68", "c11eb653746afa8148dc9153780a4584ea529d28", "b73795963dc623a634d218d29e4a5b74dfbc79f1", "bd379f8e08f88729a9214260e05967f4ca66cd65", "d80a3d1f3a438e02a6685e66ee908446766fefa9", "8e0ab1b08964393e4f9f42ca037220fe98aad7ac", "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "eb526174fa071345ff7b1fad1fad240cd943a6d7", "7c80d91db5977649487388588c0c823080c9f4b4", "4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc", "0b8b8776684009e537b9e2c0d87dbd56708ddcb4", "0334cc0374d9ead3dc69db4816d08c917316c6c4", "de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0", "c808c784237f167c78a87cc5a9d48152579c27a4", "39c10888a470b92b917788c57a6fd154c97b421c", "3e3227c8e9f44593d2499f4d1302575c77977b2e", "4209783b0cab1f22341f0600eed4512155b1dee6", "f61d5f2a082c65d5330f21b6f36312cc4fab8a3b", "d78734c54f29e4474b4d47334278cfde6efe963a", "cb2470aade8e5630dcad5e479ab220db94ecbf91", "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "3795974e24296185d9b64454cde6f796ca235387", "e5823a9d3e5e33e119576a34cb8aed497af20eea", "d1a43737ca8be02d65684cf64ab2331f66947207", "bd74c3ca2ff03396109ac2d1131708636bd0d4d3", "ba788365d70fa6c907b71a01d846532ba3110e31", "266766818dbc5a4ca1161ae2bc14c9e269ddc490", "11ad162b3165b4353df8d7b4153fb26d6a310d11", "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "9b0489f2d5739213ef8c3e2e18739c4353c3a3b7", "1316296fae6485c1510f00b1b57fb171b9320ac2", "d29eec5e047560627c16803029d2eb8a4e61da75", "fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb", "e3c011d08d04c934197b2a4804c90be55e21d572", "c9efcd8e32dced6efa2bba64789df8d0a8e4996a", "38d8ff137ff753f04689e6b76119a44588e143f3", "6f5309d8cc76d3d300b72745887addd2a2480ba8", "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "6fed504da4e192fe4c2d452754d23d3db4a4e5e3", "a322479a6851f57a3d74d017a9cb6d71395ed806", "d949fadc9b6c5c8b067fa42265ad30945f9caa99", "b2add9fad0bcf7bf0660f99f389672cdf7cc6a70", "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "ffea8775fc9c32f573d1251e177cd283b4fe09c9", "8199803f476c12c7f6c0124d55d156b5d91314b6", "dec0c26855da90876c405e9fd42830c3051c2f5f", "d35534f3f59631951011539da2fe83f2844ca245", "8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8", "ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a", "2e0d56794379c436b2d1be63e71a215dd67eb2ca", "b446bcd7fb78adfe346cf7a01a38e4f43760f363", "6ca6ade6c9acb833790b1b4e7ee8842a04c607f7", "23dd8d17ce09c22d367e4d62c1ccf507bcbc64da", "313d5eba97fe064bdc1f00b7587a4b3543ef712a", "3cb2841302af1fb9656f144abc79d4f3d0b27380", "6afe1f668eea8dfdd43f0780634073ed4545af23", "a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1", "3933e323653ff27e68c3458d245b47e3e37f52fd", "37c5c65ae204ad3692cd30a3dc62f28a263ad468", "3827f1cab643a57e3cd22fbffbf19dd5e8a298a8", "6d91da37627c05150cb40cac323ca12a91965759", "707a542c580bcbf3a5a75cce2df80d75990853cc", "45c31cde87258414f33412b3b12fc5bec7cb3ba9", "4e1258db62e4762fd8647b250fda9c3567f86eb8", "a000e15656e84dd538f1f0b8f8639dd29f122c95", "496f3d14cf466f054d395a3c71fa2cd6a3dda61d", "f0b4f5104571020206b2d5e606c4d70f496983f9", "2271d554787fdad561fafc6e9f742eea94d35518", "5c02bd53c0a6eb361972e8a4df60cdb30c6e3930", "72a3bb0fb490355a926c5a689e12268bff9ff842", "d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3", "d8bf148899f09a0aad18a196ce729384a4464e2b", "396de136485d85242583951bee4e7b19234bc964", "609d81ddf393164581b3e3bf11609a712ac47522", "4735fa28fa2a2af98f7b266efd300a00e60dddf7", "c459014131cbcd85f5bd5c0a89115b5cc1512be9", "6584c3c877400e1689a11ef70133daa86a238602", "9e99f818b37d44ec6aac345fb2c5356d83d511c7", "84a74ef8680b66e6dccbc69ae80321a52780a68e", "33b61be191e63b0c9974be708180275c9d5b3057", "911505a4242da555c6828509d1b47ba7854abb7a", "0733ec1953f6c774eb3a723618e1268586b46359", "c175ebe550761b18bac24d394d85bdfaf3b7718c", "d4b4020e289c095ce2c2941685c6cd37667f5cc9", "e379e73e11868abb1728c3acdc77e2c51673eb0d", "55c46ae1154ed310610bdf5f6d9e7023d14c7eb4", "e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b", "b331ca23aed90394c05f06701f90afd550131fe3", "4f0bf2508ae801aee082b37f684085adf0d06d23", "060f67c8a0de8fee9c1732b63ab40627993f93d0", "9c81d436b300494bc88d4de3ac3ec3cc9c43c161", "12bb0cb32e48269da2902c4c6d41ea2966ba8462", "99726ad232cef837f37914b63de70d8c5101f4e2", "fadafdd7dedd2bdd775b4591a998c8b5254081e1", "5bb6703bc01e4f7ab7e043964ec6579ac06a7c03", "847e07387142c1bcc65035109ccce681ef88362c", "7081958a390d3033f5f33e22bbfec7055ea8d601", "5b719410e7829c98c074bc2947697fac3b505b64", "288dbc40c027af002298b38954d648fddd4e2fd3", "36b40c75a3e53c633c4afb5a9309d10e12c292c7", "049186d674173ebb76496f9ecee55e17ed1ca41b", "0e49a23fafa4b2e2ac097292acf00298458932b4", "3a49507c46a2b8c6411809c81ac47b2b1d2282c3", "8a40b6c75dd6392ee0d3af73cdfc46f59337efa9", "161eb88031f382e6a1d630cd9a1b9c4bc6b47652", "c48b68dc780c71ab0f0f530cd160aa564ed08ade", "259ddd3c618feec51576baac7eaaf80ea924b791", "5491478ae2c58af21389ed3af21babd362511a8e", "7e456e94f3080c761f858264428ee4c91cd187b2", "f17d8f14651c123d39e13a39dc79b7eb3659fe68", "79fa57dedafddd3f3720ca26eb41c82086bfb332", "10e704c82616fb5d9c48e0e68ee86d4f83789d96", "06d028bd761ad6f29e9f1835d6686d9880706438", "660b73b0f39d4e644bf13a1745d6ee74424d4a16", "0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b", "9bcfadd22b2c84a717c56a2725971b6d49d3a804", "5b6d05ce368e69485cb08dd97903075e7f517aed", "09cf3f1764ab1029f3a7d57b70ae5d5954486d69", "43a4dd79bb26e3b722ac8bea20f5916c30599851", "cda4fb9df653b5721ad4fe8b4a88468a410e55ec", "0ed4b4d6d1a0c49c4eb619aab36db559b620d99f", "2a8c9e43459c1051f5b8048a3863c7bb8121abb2", "2f95340b01cfa48b867f336185e89acfedfa4d92", "29c7dfbbba7a74e9aafb6a6919629b0a7f576530", "35a39c7da14b1d288c0f9201374b307f667d63a3", "75a74a74d6abbbb302a99de3225c8870fa149aee", "0aae88cf63090ea5b2c80cd014ef4837bcbaadd8", "874da338c01fb7a87d605fcde6c52835eee03d5e", "7ac4fc169fffa8e962b9df94f61e2adf6bac8f97", "180bd019eab85bbf01d9cddc837242e111825750", "fe50efe9e282c63941ec23eb9b8c7510b6283228", "35265cbd9c6ea95753f7c6b71659f7f7ef9081b6", "05c91e8a29483ced50c5f2d869617b80f7dacdd9", "19878141fbb3117d411599b1a74a44fc3daf296d", "0fd1715da386d454b3d6571cf6d06477479f54fc", "205af28b4fcd6b569d0241bb6b255edb325965a4", "4958c06da5581fd0b4904d3bf0ee09958ecdba5b", "81706277ed180a92d2eeb94ac0560f7dc591ee13", "951f21a5671a4cd14b1ef1728dfe305bda72366f", "9753ee59db115e1e84a7c045f2234a3f63f255b1", "8f8c0243816f16a21dea1c20b5c81bc223088594", "6c6bb85a08b0bdc50cf8f98408d790ccdb418798", "ae4e2c81c8a8354c93c4b21442c26773352935dd", "70d2f5e897086b8d3914f8fa1d9e479d71597e96", "b7eead8586ffe069edd190956bd338d82c69f880", "4b9c47856f8314ecbe4d0efc65278c2ededb2738", "5bfc32d9457f43d2488583167af4f3175fdcdc03", "f545b121b9612707339dfdc40eca32def5e60430", "9df86395c11565afa8683f6f0a9ca005485c5589", "cc7c63473c5bef5ae09f26b2258691d9ffdd5f93", "789b8fff223b0db0fe3babf46ea98b1d5197f0c0", "929bd1d11d4f9cbc638779fbaf958f0efb82e603", "70e79d7b64f5540d309465620b0dab19d9520df1", "e475e857b2f5574eb626e7e01be47b416deff268", "ca0185529706df92745e656639179675c717d8d5", "08e995c080a566fe59884a527b72e13844b6f176", "62f017907e19766c76887209d01d4307be0cc573", "ab1dfcd96654af0bf6e805ffa2de0f55a73c025d", "c5366f412f2e8e78280afcccc544156f63b516e3", "4aa8db1a3379f00db2403bba7dade5d6e258b9e9", "2138ccf78dcf428c22951cc066a11ba397f6fcef", "749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7", "42e155ea109eae773dadf74d713485be83fca105", "06ad99f19cf9cb4a40741a789e4acbf4433c19ae", "d8722ffbca906a685abe57f3b7b9c1b542adfa0c", "5613cb13ab381c8a8b81181ac786255705691626", "7d50df03d0c8a26eaaeaef47de68691f9ac73701", "3c7825dcf5a027bd07eb0fe4cce23910b89cf050", "b33e8db8ccabdfc49211e46d78d09b14557d4cba", "7a0fb972e524cb9115cae655e24f2ae0cfe448e0", "cd64530a910ba28cbd127c78913dd787184f8e6d", "af54dd5da722e104740f9b6f261df9d4688a9712", "df5fe0c195eea34ddc8d80efedb25f1b9034d07d", "faa46ef96493b04694555738100d9f983915cf9b", "913062218c7498b2617bb9d7821fe1201659c5cc", "d9218c2bbc7449dbccac351f55675efd810535db", "13940d0cc90dbf854a58f92d533ce7053aac024a", "bebb8a97b2940a4e5f6e9d3caf6d71af21585eda", "6ed738ff03fd9042965abdfaa3ed8322de15c116", "77652e55f73539df94f03489544504874f96d25e", "64102c217cba63a89cd2227dc4b3a9ed2104b73e", "179545c1fc645cb2ad9b31a30f48352d541876ff", "fae83b145e5eeda8327de9f19df286edfaf5e60c", "051a84f0e39126c1ebeeb379a405816d5d06604d", "0b80fdb5b78422efdb3cdb840c78630de0af61f3", "b747fcad32484dfbe29530a15776d0df5688a7db", "da5bfddcfe703ca60c930e79d6df302920ab9465", "d09fd7e0bb5d997963cfef45452724416b2bb052", "939f9fa056f8be445da19b43da64bd2405851a43", "5d233e6f23b1c306cf62af49ce66faac2078f967", "5042b358705e8d8e8b0655d07f751be6a1565482", "32728e1eb1da13686b69cc0bd7cce55a5c963cdd", "adc4bc7639d5f1c5ead8728882e2390339d061ed", "a729d0243b1e3b055f44248a32b3caf20b7e93be", "ea890846912f16a0f3a860fce289596a7dac575f", "403a108dec92363fd1f465340bd54dbfe65af870", "6c30b29b24dc11e37fe36c6e2c283e1c8fe5e339", "fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5", "9774430006f1ed017156b17f3cf669071e398c58", "f8ed5f2c71e1a647a82677df24e70cc46d2f12a8", "5a4c6246758c522f68e75491eb65eafda375b701", "1e9f1bbb751fe538dde9f612f60eb946747defaa", "0b78fd881d0f402fd9b773249af65819e48ad36d", "33ec047f1084e290c8a6f516bc75345b6bcf02a0", "52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7", "4c815f367213cc0fb8c61773cd04a5ca8be2c959", "d4026438ce2b92302fa635c05507cf0e888414c0", "fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2", "978b32ff990d636f7e2050bb05b8df7dfcbb42a1", "eb309b11fd2b8d28cbaf7a72a49df14630ed696a", "b6c047ab10dd86b1443b088029ffe05d79bbe257", "a5bf83f99f71e3840f651fbeef9f334d8e75fd75", "64e75f53ff3991099c3fb72ceca55b76544374e5", "f4465454811acb2021a46d84d94fc88e2dda00a6", "1d846934503e2bd7b8ea63b2eafe00e29507f06a", "37105ca0bc1f11fcc7c6b7946603f3d572571d76", "10550ee13855bd7403946032354b0cd92a10d0aa", "763b60feaabceebbe9eddfbaa0378b8b454327aa", "bd25c4ad7471580ed9787eae041b80a3c4fe97bb", "41c8e222ebb26e72050f5d26c82f25d7618b700f", "858901405086056361f8f1839c2f3d65fc86a748", "46976097c54e86032932d559c8eb82ffea4bb6bb", "b6c00e51590c48a48fae51385b3534c4d282f76c", "96e0b67f34208b85bd90aecffdb92bc5134befc8", "eab53c9e3e8442050aa6ad97003f2356a365adaa", "c27f64eaf48e88758f650e38fa4e043c16580d26", "ae1de0359f4ed53918824271c888b7b36b8a5d41", "258a8c6710a9b0c2dc3818333ec035730062b1a5", "880be65e233d4302744e2154b2ef172291ee9779", "e9d1b3767c06c896f89690deea7a95401ae4582b", "3f4c262d836b2867a53eefb959057350bf7219c9", "5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49", "96ec76d2579a3b877019e715da58d8c47d343399", "f963967e52a5fd97fa3ebd679fd098c3cb70340e", "dff38cac0a1004037024f0ed2a72f76f4e49318b", "23ecc496eaa238ac884e6bae5763f6138a9c90a3", "66b9d954dd8204c3a970d86d91dd4ea0eb12db47", "2d05e768c64628c034db858b7154c6cbd580b2d5", "12ded6a869b4e21149452234140257019af9494d", "5f453a35d312debfc993d687fd0b7c36c1704b16", "75908b6460eb0781130ed0aa94585be25a584996", "c696c9bbe27434cb6279223a79b17535cd6e88c8", "530ce1097d0681a0f9d3ce877c5ba31617b1d709", "da54a3d6dc5827abba96edf5ec1e6791ad05760b", "fa72e39971855dff6beb8174b5fa654e0ab7d324", "ca0363d29e790f80f924cedaf93cb42308365b3d", "88535dba55b0a80975df179d31a6cc80cae1cc92", "2293413ebd24e377c1785113b695cc8a918a5fdb", "6d618657fa5a584d805b562302fe1090957194ba", "1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3", "0badf61e8d3b26a0d8b60fe94ba5c606718daf0b", "78f57e5e23ca40af858e6e97ebecb694036bd8a8", "7c457c9a658327af6f6490729b4cab1239c22005", "1d51b256af68c5546d230f3e6f41da029e0f5852", "b11df79c812ff7ea63f7c93ec8eafefc3fd04f7e", "2d2fb01f761d21a459cfb34935bc47ab45a9913b", "3960882a7a1cd19dfb711e35a5fc1843ed9002e7", "4d83a25931ff8f73130a4d07e0209fcb3191db4b", "9101363521de0ec1cf50349da701996e4d1148c8", "526c79c6ce39882310b814b7918449d48662e2a9", "1181f1146db7170b09f28f7cc51c42c63547d84b", "2b43100a13811b33cc9f905fa1334bfd8b1873ba", "43ae4867d058453e9abce760ff0f9427789bab3a", "55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c", "a2b76ab614d92f5e71312b530f0b6281d0c500f7", "beb4546ae95f79235c5f3c0e9cc301b5d6fc9374", "f1d090fcea63d9f9e835c49352a3cd576ec899c1", "cb8382f43ce073322eba82809f02d3084dad7969", "af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2", "f5acfc4c017447ea94c9d9cb19a9f1fcd4aa51e6", "7f205b9fca7e66ac80758c4d6caabe148deb8581", "8ccbbd9da0749d96f09164e28480d54935ee171c", "7f59657c883f77dc26393c2f9ed3d19bdf51137b", "5a5f0287484f0d480fed1ce585dbf729586f0edc", "7825708552c86079d0d11f48033ced391c0754ce", "d0a21f94de312a0ff31657fd103d6b29db823caa", "6e782073a013ce3dbc5b9b56087fd0300c510f67", "f78863f4e7c4c57744715abe524ae4256be884a9", "9fb701dd40e35a6abc973b6d89a455de45dd8616", "8d3fbdb9783716c1832a0b7ab1da6390c2869c14", "a46283e90bcdc0ee35c680411942c90df130f448", "90d735cffd84e8f2ae4d0c9493590f3a7d99daf1", "f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c", "e496d6be415038de1636bbe8202cac9c1cea9dbe", "8b2704a5218a6ef70e553eaf0a463bd55129b69d", "363ca0a3f908859b1b55c2ff77cc900957653748", "1b02b9413b730b96b91d16dcd61b2420aef97414", "4d530a4629671939d9ded1f294b0183b56a513ef", "bf0836e5c10add0b13005990ba019a9c4b744b06", "c5f1ae9f46dc44624591db3d5e9f90a6a8391111", "19c82eacd77b35f57ac8815b979716e08e3339ca", "dd033d4886f2e687b82d893a2c14dae02962ea70", "25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8", "256ef946b4cecd8889df8d799d0c9175ae986af9", "055530f7f771bb1d5f352e2758d1242408d34e4d", "82c303cf4852ad18116a2eea31e2291325bc19c3", "0322e69172f54b95ae6a90eb3af91d3daa5e36ea", "1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb", "fdbc602a749ef070a7ac11c78dc8d468c0b60154", "7918e3e15099b4b2943746e1f6c9e3992a79c5f3", "099053f2cbfa06c0141371b9f34e26970e316426", "cde7901c0945683d0c677b1bb415786e4f6081e6", "c1c253a822f984de73f02d6a29c8c7cadc8f090c", "6cb4c7f52fbe386a4ab06d5ca61a11d69abba0e4", "0ec67c69e0975cfcbd8ba787cc0889aec4cc5399", "b87db5ac17312db60e26394f9e3e1a51647cca66", "57f4e54a63ef95596dbc743f391c3fff461f278b", "7887824e9cc42914165dd3d96b956bff7560e4e4", "39ce2232452c0cd459e32a19c1abe2a2648d0c3f", "77db171a523fc3d08c91cea94c9562f3edce56e1", "5b8237ae83bc457e3b29e7209126f61120fba082", "0580edbd7865414c62a36da9504d1169dea78d6f", "bf54b5586cdb0b32f6eed35798ff91592b03fbc4", "339937141ffb547af8e746718fbf2365cc1570c8", "8a12934c4cb793c6f1e40129f37847414c1cc5c0", "72160aae43cd9b2c3aae5574acc0d00ea0993b9e", "d3f40b393e0e6a88ae4b4072e01ddb0b420300af", "40c9dce0a4c18829c4100bff5845eb7799b54ca1", "874713dfa7ba8b3ffcc47ed5f8b60849d77f6ea8", "066d71fcd997033dce4ca58df924397dfe0b5fd1", "3ce2ecf3d6ace8d80303daf67345be6ec33b3a93", "819c93dfe531ad6aba71cd48942c9e07b7a89b1b", "324f39fb5673ec2296d90142cf9a909e595d82cf", "141768ab49a5a9f5adcf0cf7e43a23471a7e5d82", "80a6bb337b8fdc17bffb8038f3b1467d01204375", "5217ab9b723158b3ba2235e807d165e72fd33007", "1eb48895d86404251aa21323e5a811c19f9a55f9", "628f9c1454b85ff528a60cd8e43ec7874cf17931", "dc550f361ae82ec6e1a0cf67edf6a0138163382e", "3813a77005fcc87e1a65c272c9c7a9a87c80c000", "32df63d395b5462a8a4a3c3574ae7916b0cd4d1d", "6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6", "601834a4150e9af028df90535ab61d812c45082c", "5551a03353f571b552125dd4ee57301b69a10c46", "fba464cb8e3eff455fe80e8fb6d3547768efba2f", "d7bd37920a3a4a4d681151131e23a839695c8d5b", "a6f81619158d9caeaa0863738ab400b9ba2d77c2", "0614cafad1b546faa7e99c67c9bda6bae2cacb5e", "1ad780e02edf155c09ea84251289a054b671b98a", "d44ca9e7690b88e813021e67b855d871cdb5022f", "081a431107eb38812b74a8cd036ca5e97235b499", "751b26e7791b29e4e53ab915bfd263f96f531f56", "865d4ce1751ff3c0a8eb41077a9aa7bd94603c47", "64782a2bc5da11b1b18ca20cecf7bdc26a538d68", "831a64f59944fa05f023288f284325429026e4e8", "10e12d11cb98ffa5ae82343f8904cfe321ae8004", "7ef44b7c2b5533d00001ae81f9293bdb592f1146", "17d03da4db3bb89537d644b682b2a091d563af4a", "be8c517406528edc47c4ec0222e2a603950c2762", "6180bc0816b1776ca4b32ced8ea45c3c9ce56b47", "67e6ddce6fea17bb2b171c949ee224936d36c0d1", "8d1adf0ac74e901a94f05eca2f684528129a630a", "9e5c2d85a1caed701b68ddf6f239f3ff941bb707", "475e16577be1bfc0dd1f74f67bb651abd6d63524", "e6c8f5067ec2ad6af33745312b45fab03e7e038b", "d0f9143f6f43a39bff47daf8c596681581db72ea", "6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1", "142e233adceed9171f718a214a7eba8497af4324", "a125bc55bdf4bec7484111eea9ae537be314ec62", "512befa10b9b704c9368c2fbffe0dc3efb1ba1bf", "9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc", "3e69ed088f588f6ecb30969bc6e4dbfacb35133e", "f78fe101b21be36e98cd3da010051bb9b9829a1e", "dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e", "78d4d861c766af2a8da8855bece5da4e6eed2e1c", "44dd150b9020b2253107b4a4af3644f0a51718a3", "ee1465cbbc1d03cb9eddaad8618a4feea78a01ce", "8aae23847e1beb4a6d51881750ce36822ca7ed0b", "80e9c28c369a6c49f9dd10473c663a25dc9716d5", "600075a1009b8692480726c9cff5246484a22ec8", "6e93fd7400585f5df57b5343699cb7cda20cfcc2", "26c884829897b3035702800937d4d15fef7010e4", "f8ba921670c94ed94d94a98d64f38b857b0dc104", "f6742010372210d06e531e7df7df9c01a185e241", "c7685fdbee2d96ef056a89ab4fa43df5aeae7ba7", "52f23e1a386c87b0dab8bfdf9694c781cd0a3984", "68996c28bc050158f025a17908eb4bc805c3ee55", "41f195f421b548357088c2985077d6b14003ce7e", "eed7920682789a9afd0de4efd726cd9a706940c8", "6316a4b689706b0f01b40f9a3cef47b92bc52411", "6f957df9a7d3fc4eeba53086d3d154fc61ae88df", "aca728cab26b95fbe04ec230b389878656d8af5b", "0359f7357ea8191206b9da45298902de9f054c92", "bf1ebcaad91c2c0ed35544159415b3ad388cc7a9", "3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1", "76640cb1a683a479ce2e0d6681d821ff39126d63", "c49aed65fcf9ded15c44f9cbb4b161f851c6fa88", "757e4cb981e807d83539d9982ad325331cb59b16", "e5eb7fa8c9a812d402facfe8e4672670541ed108", "63c109946ffd401ee1195ed28f2fb87c2159e63d", "9294739e24e1929794330067b84f7eafd286e1c8", "4414a328466db1e8ab9651bf4e0f9f1fe1a163e4", "d185f4f05c587e23c0119f2cdfac8ea335197ac0", "8b6fded4d08bf0b7c56966b60562ee096af1f0c4", "c3dc4f414f5233df96a9661609557e341b71670d", "cdd30bd77c7a4fa21176a21498f65f6b8b873965", "e8f4a4e0fe0b2f0054b44b947828d71e10ec61a7", "c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0", "8bf57dc0dd45ed969ad9690033d44af24fd18e05", "7935f644c8044c0d3b81e2842e5ecc3672698bbb", "3f4bfa4e3655ef392eb5ad609d31c05f29826b45", "b85b754ace15f4e9bee4ee76296580ddfbc3a11e", "46f32991ebb6235509a6d297928947a8c483f29e", "52012b4ecb78f6b4b9ea496be98bcfe0944353cd", "ea3fa5e6004c0504feaa31e01b2ea19f138e9a78", "d65b82b862cf1dbba3dee6541358f69849004f30", "66533107f9abdc7d1cb8f8795025fc7e78eb1122", "a92e24c8c53e31fc444a13bd75b434b7207c58f1", "2045fe2f21c30f364d6e699ea0bf0ea21d7f460e", "72ffcc5b654b2468b9eff761279b29164f1df5d9", "f740bac1484f2f2c70777db6d2a11cf4280081d6", "b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807", "eedb2c34c36017b9c5aa6ce8bff2ab152e713cee", "fad895771260048f58d12158a4d4d6d0623f4158", "0abc13166e4a098fc34d4c708f3349fdd8f6f4c6", "2e9e07b871e7703c60d6849282174d99977ccea7", "b3f3d6be11ace907c804c2d916830c85643e468d", "b3330adb131fb4b6ebbfacce56f1aec2a61e0869", "60284c37249532fe7ff6b14834a2ae4d2a7fda02", "b728e7db6e5559a77dc59381bfb8df96d482a721", "496d62741e8baf3859c24bb22eaccd3043322126", "1ef5ce743a44d8a454dbfc2657e1e2e2d025e366", "00d0f2ec2036fb26ffcf882eb0aa47da0693192e", "25d3e122fec578a14226dc7c007fb1f05ddf97f7", "26947c3ead54e571286fdea25f1fc4d121817850", "237eba4822744a9eabb121fe7b50fd2057bf744c", "455204fa201e9936b42756d362f62700597874c4", "21ef129c063bad970b309a24a6a18cbcdfb3aff5", "5db075a308350c083c3fa6722af4c9765c4b8fef", "1b5acd1736f18e4fa202d88a80f774c6deea5733", "43bb2b58f906262035ef61e41768375bc8d99ae3", "43bb20ccfda7b111850743a80a5929792cb031f0", "a76969df111f9ee9f0b898b51ad23a721d289bdc", "b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41", "b71d1aa90dcbe3638888725314c0d56640c1fef1", "9110c589c6e78daf4affd8e318d843dc750fb71a", "38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7", "8ac2d704f27a2ddf19b40c8e4695da629aa52a54", "71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba", "14e428f2ff3dc5cf96e5742eedb156c1ea12ece1", "c66ecbae0f2bfa7cdbf5082fb8f0567878b4a599", "9e5acdda54481104aaf19974dca6382ed5ff21ed", "4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b", "516f784f145390e22cb4607cb525175ff4c7109b", "2bae810500388dd595f4ebe992c36e1443b048d2", "5e99b49b4c5fb2a72392ea199edacd650bd122c5", "dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006", "b89862f38fff416d2fcda389f5c59daba56241db", "c4541802086461420afb1ecb5bb8ccd5962a9f02", "681d222f91b12b00e9a4217b80beaa11d032f540", "f8ec92f6d009b588ddfbb47a518dd5e73855547d", "9aba281955117eb4a7aed36775f55f27e4dde42f", "cc713a92d8a3aff6f1586923ca9ba267d5e89251", "0eff410cd6a93d0e37048e236f62e209bc4383d1", "dee36d438d7dcb5923ab63dfe1e8676726dd4d69", "794ddb1f3b7598985d4d289b5b0664be736a50c4", "f3f77b803b375f0c63971b59d0906cb700ea24ed", "4e1836914bbcf94dc00e604b24b1b0d6d7b61e66", "b43b6551ecc556557b63edb8b0dc39901ed0343b", "2465fc22e03faf030e5a319479a95ef1dfc46e14", "83ac942d71ba908c8d76fc68de6173151f012b38", "2f837ff8b134b785ee185a9c24e1f82b4e54df04", "0629bc2b12245195af989e21573369329b7ef2b7", "984edce0b961418d81203ec477b9bfa5a8197ba3", "0d9815f62498db21f06ee0a9cc8b166acc93888e", "798e58c181f3ba3aecbe41acd1881860c5e2df3a", "fd5376fcb09001a3acccc03159e8ff5801129683", "2a7bca56e2539c8cf1ae4e9da521879b7951872d", "2564848f094f7c1cd5e599aa907947b10b5c7df2", "57d37ad025b5796457eee7392d2038910988655a", "d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f", "ecca2a2b84ea01ea425b8d2d9f376f15a295a7f5", "c82c147c4f13e79ad49ef7456473d86881428b89", "411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8", "e3a70f8ee84af6372b482c0b8b6e8e553dd0e1e5", "0d3882b22da23497e5de8b7750b71f3a4b0aac6b", "11f8d0a54e55c5e6537eef431cd548fa292ef90b", "ac2e166c76c103f17fdea2b4ecb137200b8d4703", "179564f157a96787b1b3380a9f79701e3394013d", "1a7a17c4f97c68d68fbeefee1751d349b83eb14a", "677ebde61ba3936b805357e27fce06c44513a455", "3f623bb0c9c766a5ac612df248f4a59288e4d29f", "accbd6cd5dd649137a7c57ad6ef99232759f7544", "eeb6d084f9906c53ec8da8c34583105ab5ab8284", "f781e50caa43be13c5ceb13f4ccc2abc7d1507c5", "4a484d97e402ed0365d6cf162f5a60a4d8000ea0", "9a276c72acdb83660557489114a494b86a39f6ff", "7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed", "ee6e4324123b99d94a7a23d9bddf026f39903693", "7ed5af241061a6d88e0632a51a91d59627b00c34", "a158c1e2993ac90a90326881dd5cb0996c20d4f3", "a7da7e5a6a4b53bf8736c470ff8381a654e8c965", "763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff", "27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5", "f1e44e64957397d167d13f8f551cae99e5c16c75", "9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4", "87552622efd0e85c2a71d4d2590e53d45f021dbf", "7c54240c23d42703ddc85089d167f4985614cc3a", "ad50f6899103eff0ee4504e539c38eb965fd1309", "fcb276874cd932c8f6204f767157420500c64bd0", "4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea", "bf00071a7c4c559022272ca5d39e07f727ebb479", "72d110df78a7931f5f2beaa29f1eb528cf0995d3", "27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba", "6159908dec4bc2c1102f416f8a52a31bf3e666a4", "7f268f29d2c8f58cea4946536f5e2325777fa8fa", "c180f22a9af4a2f47a917fd8f15121412f2d0901", "acff2dc5d601887741002a78f8c0c35a799e6403", "1eba6fc35a027134aa8997413647b49685f6fbd1", "3288e16c62a215254e2ed7c39675482b356c3bef", "184fc019bbec7f07bd9e34406f95f07faf7ed96f", "9abf6d56a7d336bc58f4e3328d2ee807032589f1", "1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3", "184750382fe9b722e78d22a543e852a6290b3f70", "363f540dc82ba8620262a04a67cfd6d3c85b0582", "694bdadb720d4237b701a5c8c10417843ed89c6f", "cef6cffd7ad15e7fa5632269ef154d32eaf057af", "fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7", "13afc4f8d08f766479577db2083f9632544c7ea6", "6ee2ea416382d659a0dddc7a88fc093accc2f8ee", "e57108607d94aa158eb22ae50540ae6080e48d4b", "2574860616d7ffa653eb002bbaca53686bc71cdd", "1467c4ab821c3b340abe05a1b13a19318ebbce98", "52885fa403efbab5ef21274282edd98b9ca70cbf", "c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4", "900207b3bc3a4e5244cae9838643a9685a84fee0", "cd3005753012409361aba17f3f766e33e3a7320d", "c23bd1917badd27093c8284bd324332b8c45bfcf", "a75edf8124f5b52690c08ff35b0c7eb8355fe950", "f3fed71cc4fc49b02067b71c2df80e83084b2a82", "b1f4423c227fa37b9680787be38857069247a307", "047ce307ad0c871bc2c9a5c1e4649cefae2ba50d", "443f4421e44d4f374c265e6f2551bf9830de5597", "3645d85ccd5bb7ce5df8d24e6ddb358eb1656df5", "ff946df1cea6c107b2c336419c34ea69cc3ddbc4", "cef841f27535c0865278ee9a4bc8ee113b4fb9f3", "268c4bb54902433bf00d11391178a162e5d674c9", "f6311d6b3f4d3bd192d866d2e898c30eea37d7d5", "3403cb92192dc6b2943d8dbfa8212cc65880159e", "f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4", "638e0d6f9f5d714d8a0edcf65297e8735b30db71", "7b618a699b79c1272f6c83101917ad021a58d96b", "9a84588fe7e758cfbe7062686a648fab787fc32f", "4ffd744a5f079c2d65f36e3ee0979b978f522a13", "c74aba9a096379b3dbe1ff95e7af5db45c0fd680", "9c2f20ed168743071db6268480a966d5d238a7ee", "583e0d218e1e7aaf9763a5493e7c18c2b8dd7464", "a3a97bb5131e7e67316b649bbc2432aaa1a6556e", "6b06b79ad1f1907e21380083b976b24a89a0f743", "8f99f7ccb85af6d4b9e015a9b215c529126e7844", "3409aa0ae519ee18043e347e60d85e53e452650a", "d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f", "bc866c2ced533252f29cf2111dd71a6d1724bd49", "eb3c45e78acee0824c8f7d997c6104d74e7213a8", "ea6f5c8e12513dbaca6bbdff495ef2975b8001bd", "01cc8a712e67384f9ef9f30580b7415bfd71e980", "41971dfbf404abeb8cf73fea29dc37b9aae12439", "d4ec62efcc631fa720dfaa1cbc5692b39e649008", "857544746a1d1071739d98718df51936a3488737", "3d68cedd80babfbb04ab197a0b69054e3c196cd9", "86bbead2fb5b77ceff7994be9474648672f244d9", "ebb1c29145d31c4afa3c9be7f023155832776cd3", "56bcc89fb1e05d21a8b7b880c6b4df79271ceca5", "0cc96359b1edba28d33fe9e663079c5674744672", "3fac7c60136a67b320fc1c132fde45205cd2ac66", "646c38494aa960c1c120c26619473f5968e5dc34", "9990e0b05f34b586ffccdc89de2f8b0e5d427067", "62c2d21f78fb89a11b436ab6ca9acd9abca145be", "e1c59e00458b4dee3f0e683ed265735f33187f77", "2360ecf058393141ead1ca6b587efa2461e120e4", "6dcf6b028a6042a9904628a3395520995b1d0ef9", "0b58b3a5f153f653c138257426bf8d572ae35a67", "02e668f9b75f4a526c6fdf7268c8c1936d8e6f09", "176f26a6a8e04567ea71677b99e9818f8a8819d0", "8dd3f05071fd70fb1c349460b526b0e69dcc65bf", "c9e955cb9709f16faeb0c840f4dae92eb875450a", "43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a", "6ee64c19efa89f955011531cde03822c2d1787b8", "86f8e6310d114bb24deb971e8bc7089df6ac3b57", "0ca36ecaf4015ca4095e07f0302d28a5d9424254", "d7c87f4ca39f79d93c954ffacac32bc6eb527e2c", "312b2566e315dd6e65bd42cfcbe4d919159de8a1", "74d3ff8324e02503c18fb2566ed29e2e22ce0d1b", "d8e5d94c3c8688f0ca0ee656c79847c7df04c77d", "df1a10668eaad727ec3fdf0d5df405bbe29392c9", "e59813940c5c83b1ce63f3f451d03d34d2f68082", "5b4bbba68053d67d12bd3789286e8a9be88f7b9d", "d893f75206b122973cdbf2532f506912ccd6fbe0", "1bcb1c6d6cebc9737f9933fcefbf3da8a612f994", "1a03dcc811131b0b702bd5a75c54ed26cd27151a", "44eb4d128b60485377e74ffb5facc0bf4ddeb022", "158e32579e38c29b26dfd33bf93e772e6211e188", "99e0c03686f7bc9d7add6cff39a941a047c3600a", "052fb35f731680d9d4e7d89c8f70f14173efb015", "02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7", "5ca23ceb0636dfc34c114d4af7276a588e0e8dac", "45e043dffc57a9070f483ac4aec2c5cd2cec22cb", "37ef18d71c1ca71c0a33fc625ef439391926bfbb", "d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4", "3c2b6282811c3077b7807d84068e6a879d163854", "33a1a049d15e22befc7ddefdd3ae719ced8394bf", "dab51ce14f59d552c0fc5c13b37ca64cae8d0164", "cce2f036d0c5f47c25e459b2f2c49fa992595654", "383ff2d66fecdc2fd02a31ac1fa392f48e578296", "dd8d09eab82d7ec4457317d9f9427122d2ffb649", "9bc01fa9400c231e41e6a72ec509d76ca797207c", "cbbd9880fb28bef4e33da418a3795477d3a1616e", "a1a5143a962ab3dc6f2a0d5300cde71d9f087404", "cbdcc28d36f1135d235b5067383b25dcac5d2ff3", "0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7", "d569c3e62f471aa75ed53e631ec05c1a3d594595", "86904aee566716d9bef508aa9f0255dc18be3960", "47ca2df3d657d7938d7253bed673505a6a819661", "dd3181c229819679186056cdfe94a772929ca758", "6eece104e430829741677cadc1dfacd0e058d60f", "23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3", "86d1fbaecd02b44309383830e6d985dc09e786aa", "2983efadb1f2980ab5ef20175f488f77b6f059d7", "93cbb3b3e40321c4990c36f89a63534b506b6daf", "982f5c625d6ad0dac25d7acbce4dabfb35dd7f23", "675b1fd2aaebe9c62be6b22b9ac6d278193cc581", "7d61b70d922d20c52a4e629b09465076af71ddfd", "e4bf70e818e507b54f7d94856fecc42cc9e0f73d", "782eee555067b2d6d24db87775e1ded5fb047491", "2d25045ec63f9132371841c0beccd801d3733908", "c3390711f5ce6f5f0728ef88c54148bf9d8783a2", "42765c170c14bd58e7200b09b2e1e17911eed42b", "47aeb3b82f54b5ae8142b4bdda7b614433e69b9a", "f2f731feb9d376ac50b3347a93e73a0d6528cdd9", "e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca", "a703d51c200724517f099ee10885286ddbd8b587", "21ec41a6ee3c655cf54c6db659d56480fc76e742", "3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e", "2e6776cd582c015b46faf616f29c98ce9cff51a2", "0c66d6162695ecbfc248074f58ced10d70a359ac", "1b79628af96eb3ad64dbb859dae64f31a09027d5", "aeeea6eec2f063c006c13be865cec0c350244e5b", "6592dcd17fc4df707020904cf5ff0927684f9f23", "468bb5344f74842a9a43a7e1a3333ebd394929b4", "3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07", "fb557b79157a6dda15f3abdeb01a3308528f71f2", "b5ae8b69677fb962421fe7072f1e842e71f3bea5", "fdf533eeb1306ba418b09210387833bdf27bb756", "e1c50cf0c08d70ff90cf515894b2b360b2bc788b", "2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c", "ceba8ca45bad226c401a509e6b8ccbf31361b0c9", "3bf690a6e2751b23bd8ae65c2ad133b249840bf9", "5d44c675addcb6e74cbc5a9c48df0d754bdbcd98", "ad9cb522cc257e3c5d7f896fe6a526f6583ce46f", "8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09", "111d0b588f3abbbea85d50a28c0506f74161e091", "18b344b5394988544c386783e7bb8e73e0466e0e", "fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93", "998244a44f90b3b569f9c93226df70239818ead9", "d522c162bd03e935b1417f2e564d1357e98826d2", "516f8728ad1d4f9f2701a2b5385f8c8e71b9d356", "405cf40f3ce74210f7e9862b2b828ce002b409ed", "22717ad3ad1dfcbb0fd2f866da63abbde9af0b09", "0435a34e93b8dda459de49b499dd71dbb478dc18", "75e9a141b85d902224f849ea61ab135ae98e7bfb", "fde611bf25a89fe11e077692070f89dcdede043a", "948f35344e6e063ffc35f10c547d5dd9204dee4e", "2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a", "516a27d5dd06622f872f5ef334313350745eadc3", "08ae100805d7406bf56226e9c3c218d3f9774d19", "8323af714efe9a3cadb31b309fcc2c36c8acba8f", "5865e824e3d8560e07840dd5f75cfe9bf68f9d96", "1e64b2d2f0a8a608d0d9d913c4baee6973995952", "b5bda4e1374acc7414107cde529ad8b3263fae4b", "a52c72cd8538c62156aaa4d7e5c54946be53b9bb", "feb0bd4ad219dc5005da84561b97ae53f4207440", "01729cb766b1016bac217a6a6cf24bbde19f56c8", "6dd052df6b0e89d394192f7f2af4a3e3b8f89875", "7975f12187a7686d861054649845ccc634c3b00f", "a5e436bb88ff28c68f981308faefd6eee48b9c8b", "cf98565a19ec05a63dbaf650660b7c3f72de7b2b", "a3eab933e1b3db1a7377a119573ff38e780ea6a3", "5dce578c8bc819592c9ec7bfab6302bbcd9a3f3d", "0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf", "5ab96ace21bf54625f3d18ea11801f540519bd3a", "b8048a7661bdb73d3613fde9d710bd45a20d13e7", "7c119e6bdada2882baca232da76c35ae9b5277f8", "e0bfcf965b402f3f209f26ae20ee88bc4d0002ab", "99a1180c3d39532efecfc5fa251d6893375c91a1", "0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0", "b69e7e2a7705a58a0e3f1b80ae542907b89ce02e", "bff77a3b80f40cefe79550bf9e220fb82a74c084", "8c955f3827a27e92b6858497284a9559d2d0623a", "1134a6be0f469ff2c8caab266bbdacf482f32179", "c3cfbd03efca980431e17fcbc507962377821681", "af7553d833886663550ce83b087a592a04b36419", "dd0760bda44d4e222c0a54d41681f97b3270122b", "067fe74aec42cb82b92cf6742c7cfb4a65f16951", "3a6334953cd2775fab7a8e7b72ed63468c71dee7", "497bf2df484906e5430aa3045cf04a40c9225f94", "ec983394f800da971d243f4143ab7f8421aa967c", "a45e6172713a56736a2565ddea9cb8b1d94721cd", "7c9a65f18f7feb473e993077d087d4806578214e", "f9d9b2a1197cdb73e977490756c0ff8a30cafc3e", "800cbbe16be0f7cb921842d54967c9a94eaa2a65", "81146c567fa5a3c83778c1c940780d00706fa2bf", "966cf4ca224e239a7192f9e79b60cc88aa604e27", "c54f9f33382f9f656ec0e97d3004df614ec56434", "d33fcdaf2c0bd0100ec94b2c437dccdacec66476", "eefdb69ac2c461e7791603d0f8c02ff3c8600adc", "816bd8a7f91824097f098e4f3e0f4b69f481689d", "50eb2ee977f0f53ab4b39edc4be6b760a2b05f96", "b13b101b6197048710e82f044ad2eda6b93affd8", "b6c83e6706a9931a2670bc686485d76b67cb92ea", "12226bca7a891e25b7d1e1a34a089521bba75731", "00af9945a3401bdad3cffa89f7e5a15660399282", "352c53e56c52a49d33dcdbec5690c2ba604b07d0", "971cb1bfe3d10fcb2037e684c48bd99842f42fa4", "1eeb39d618f5fab243dd07b955a8e0e722f6dfdb", "ec28217290897a059348dcdf287540a2e2c68204", "e726acda15d41b992b5a41feabd43617fab6dc23", "e96540252f2f83e394012d653452411efb9f744f", "cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150", "6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9", "34301fbf4624139a40176dbde6f5954b2df6de7b", "a44590528b18059b00d24ece4670668e86378a79", "8184a92e1ccc7fdeb4a198b226feb325c63d6870", "cc8e378fd05152a81c2810f682a78c5057c8a735", "a5b6a3234e15343d2e5417cff46c0a5f0943521e", "41b38da2f4137c957537908f9cb70cbd2fac8bc1", "6345c0062885b82ccb760c738a9ab7fdce8cd577", "640e12837241d52d04379d3649d050ee3760048c", "5c473cfda1d7c384724fbb139dfe8cb39f79f626", "978a219e07daa046244821b341631c41f91daccd", "1a71f9af98228f4d2b15cfaf415321813e29b087", "48de3ca194c3830daa7495603712496fe908375c", "bddc822cf20b31d8f714925bec192c39294184f7", "f6f2a212505a118933ef84110e487551b6591553", "924b14a9e36d0523a267293c6d149bca83e73f3b", "58538cc418bf41197fad4fc4ee2449b2daeb08b1", "022ec7d1642727b2cc3d9a9d7999ca84a280443f", "44aeda8493ad0d44ca1304756cc0126a2720f07b", "dbd958ffedc3eae8032be67599ec281310c05630", "dea409847d52bb0ad54bf586cb0482a29a584a7e", "2a9b398d358cf04dc608a298d36d305659e8f607", "429d4848d03d2243cc6a1b03695406a6de1a7abd", "90f4b20f4b7115cb84dda22e5e4eb9c50d7fddce", "053ee4a4793f54b02dfabde5436fd7ee479e79eb", "650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772", "b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0", "62750d78e819d745b9200b0c5c35fcae6fb9f404", "bd0201b32e7eca7818468f2b5cb1fb4374de75b9", "03167776e17bde31b50f294403f97ee068515578", "08ff81f3f00f8f68b8abd910248b25a126a4dfa4", "98af221afd64a23e82c40fd28d25210c352e41b7", "14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1", "0d735e7552af0d1dcd856a8740401916e54b7eee", "59f788c69c2ce520fd6f0b80d01aca72f7f8d859", "cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a", "b84dde74dddf6a3281a0b22c68999942d2722919", "8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3", "afef2b1d35fb807f422cfec0a370f7d08d4651d1", "8323529cf37f955fb3fc6674af6e708374006a28", "9ccaa13a577b20e88420d0a4b8c9545d5560261d", "8b4124bb68e5b3e6b8b77888beae7350dc594a40", "61831364ddc8db869618f1c7f0ad35ab2ab6bcf7", "aeb36fac7516753a14c3c690f352de78e70f8c6e", "32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2", "965c4a8087ae208c08e58aaf630ad412ac8ce6e2", "3753b9fcf95b97e2baf952993905cd6dfa8561cb", "831226405bb255527e9127b84e8eaedd7eb8e9f9", "ae7604b1840753e9c2e1ab7a97e02f91a9d81860", "fb915bcc1623cdf999c0e95992c0e0cf85e64d8e", "e26a7e343fe109e2b52d1eeea5b02dae836f3502", "cf6851c24f489dabff0238e01554edea6aa0fc7c", "a33f20773b46283ea72412f9b4473a8f8ad751ae", "bf1e0279a13903e1d43f8562aaf41444afca4fdc", "52c59f9f4993c8248dd3d2d28a4946f1068bcbbe", "b55d0c9a022874fb78653a0004998a66f8242cad", "7ab238c23c6640fe0b23d635d6b5fc38fa4a3b46", "ba83b28ac5ce92ef8437fdd499132823f487ff83", "62f60039a95692baaeaae79a013c7f545e2a6c3d", "3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8", "f935225e7811858fe9ef6b5fd3fdd59aec9abd1a", "58b8588c01196070674ceabe5366b20f73c2912d", "00d931eccab929be33caea207547989ae7c1ef39", "334166a942acb15ccc4517cefde751a381512605", "a7a5d9a2dece15ddbab77b7ecc81294cfa1fafdb", "60bdff71e241f9afc411221bd20aaebb4608576b", "9f6d04ce617d24c8001a9a31f11a594bd6fe3510", "ef35c30529df914a6975af62aca1b9428f678e9f", "6f75697a86d23d12a14be5466a41e5a7ffb79fad", "561bbc758f995894f43351b4267abf9748890705", "c043f8924717a3023a869777d4c9bee33e607fb5", "b3658514a0729694d86a8b89c875a66cde20480c", "3f63f9aaec8ba1fa801d131e3680900680f14139", "96ab0367d0112b6092cc130c330c8c11c2eb8238", "41781474d834c079e8fafea154d7916b77991b15", "1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9", "0f8116b631c17f7adf55df3faafc6f2c316599f6", "ae62c0a4b74ce672e8103dbf6d344d82c59f216c", "9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7", "d22785eae6b7503cb16402514fd5bd9571511654", "9dcc6dde8d9f132577290d92a1e76b5decc6d755", "111a9645ad0108ad472b2f3b243ed3d942e7ff16", "0cbc4dcf2aa76191bbf641358d6cecf38f644325", "ab6886252aea103b3d974462f589b4886ef2735a", "0b0eb562d7341231c3f82a65cf51943194add0bb", "65babb10e727382b31ca5479b452ee725917c739", "8fd9c22b00bd8c0bcdbd182e17694046f245335f", "367a786cfe930455cd3f6bd2492c304d38f6f488", "18dd3867d68187519097c84b7be1da71771d01a3", "87610276ccbc12d0912b23fd493019f06256f94e", "59e75aad529b8001afc7e194e21668425119b864", "c34e48d637705ffb52360c2afb6b03efdeb680bf", "84f86f8c559a38752ddfb417e58f98e1f8402f17", "1c6e22516ceb5c97c3caf07a9bd5df357988ceda", "4a4da3d1bbf10f15b448577e75112bac4861620a", "c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225", "5642bafa7955b69f05c11230151cd59fcbe43b8e", "0eb077a3e227b19f032f980d3a3206e4ae15e429", "04f55f81bbd879773e2b8df9c6b7c1d324bc72d8", "43fca653880f4e4d238c73d864e964475e4b90c8", "91d0e8610348ef4d5d4975e6de99bb2d429af778", "334e65b31ad51b1c1f84ce12ef235096395f1ca7", "6dfe0dafb4ed4bcfce670f321e724682ab261060", "3d6ee995bc2f3e0f217c053368df659a5d14d5b5", "27cccf992f54966feb2ab4831fab628334c742d8", "8706c3d49d1136035f298041f03bb70dc074f24d", "a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260", "06a9ed612c8da85cb0ebb17fbe87f5a137541603", "5c4ce36063dd3496a5926afd301e562899ff53ea", "e0446d14d25a178702c10752b803966a54b539e4", "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "b59cee1f647737ec3296ccb3daa25c890359c307", "66dcd855a6772d2731b45cfdd75f084327b055c2", "62007c30f148334fb4d8975f80afe76e5aef8c7f", "380dd0ddd5d69adc52defc095570d1c22952f5cc", "626913b8fcbbaee8932997d6c4a78fe1ce646127", "cfd4004054399f3a5f536df71f9b9987f060f434", "dc13229afbbc8b7a31ed5adfe265d971850c0976", "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "10195a163ab6348eef37213a46f60a3d87f289c5", "30b15cdb72760f20f80e04157b57be9029d8a1ab", "14e9158daf17985ccbb15c9cd31cf457e5551990", "fd53be2e0a9f33080a9db4b5a5e416e24ae8e198", "50ff21e595e0ebe51ae808a2da3b7940549f4035", "3ca5d3b8f5f071148cb50f22955fd8c1c1992719", "3c4f6d24b55b1fd3c5b85c70308d544faef3f69a", "ec0104286c96707f57df26b4f0a4f49b774c486b", "b7845e0b0ce17cde7db37d5524ef2a61dee3e540", "13719bbb4bb8bbe0cbcdad009243a926d93be433", "d444368421f456baf8c3cb089244e017f8d32c41", "df054fa8ee6bb7d2a50909939d90ef417c73604c", "fffefc1fb840da63e17428fd5de6e79feb726894", "1d776bfe627f1a051099997114ba04678c45f0f5", "6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365", "cb27b45329d61f5f95ed213798d4b2a615e76be2", "2cdc40f20b70ca44d9fd8e7716080ee05ca7924a", "c9c2de3628be7e249722b12911bebad84b567ce6", "d0144d76b8b926d22411d388e7a26506519372eb", "d5b0e73b584be507198b6665bcddeba92b62e1e5", "af6e351d58dba0962d6eb1baf4c9a776eb73533f", "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "8f89aed13cb3555b56fccd715753f9ea72f27f05", "08d41d2f68a2bf0091dc373573ca379de9b16385", "a713a01971e73d0c3118d0409dc7699a24f521d6", "4f1249369127cc2e2894f6b2f1052d399794919a", "2e231f1e7e641dd3619bec59e14d02e91360ac01", "5364e58ba1f4cdfcffb247c2421e8f56a75fad8d", "632fa986bed53862d83918c2b71ab953fd70d6cc", "112780a7fe259dc7aff2170d5beda50b2bfa7bda", "93af335bf8c610f34ce0cadc15d1dd592debc706", "36a3a96ef54000a0cd63de867a5eb7e84396de09", "5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b", "9e0285debd4b0ba7769b389181bd3e0fd7a02af6", "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "975978ee6a32383d6f4f026b944099e7739e5890", "28d99dc2d673d62118658f8375b414e5192eac6f", "c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c", "5f94969b9491db552ffebc5911a45def99026afe", "e7b6887cd06d0c1aa4902335f7893d7640aef823", "28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08", "d00e9a6339e34c613053d3b2c132fccbde547b56", "03c1fc9c3339813ed81ad0de540132f9f695a0f8", "288964068cd87d97a98b8bc927d6e0d2349458a2", "aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a", "56e079f4eb40744728fd1d7665938b06426338e5", "92b61b09d2eed4937058d0f9494d9efeddc39002", "36939e6a365e9db904d81325212177c9e9e76c54", "2957715e96a18dbb5ed5c36b92050ec375214aa6", "5b64584d6b01e66dfd0b6025b2552db1447ccdeb", "2b632f090c09435d089ff76220fd31fd314838ae", "1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc", "f77c9bf5beec7c975584e8087aae8d679664a1eb", "7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697", "7f21a7441c6ded38008c1fd0b91bdd54425d3f80", "dca2bb023b076de1ccd0c6b8d71faeb3fccb3978", "f5fae7810a33ed67852ad6a3e0144cb278b24b41", "407bb798ab153bf6156ba2956f8cf93256b6910a", "24f022d807352abf071880877c38e53a98254dcd", "d00787e215bd74d32d80a6c115c4789214da5edb", "b8e5800dfc590f82a0f7eedefce9abebf8088d12", "361d6345919c2edc5c3ce49bb4915ed2b4ee49be", "d454ad60b061c1a1450810a0f335fafbfeceeccc", "fc516a492cf09aaf1d319c8ff112c77cfb55a0e5", "2c1f8ddbfbb224271253a27fed0c2425599dfe47", "ed9d11e995baeec17c5d2847ec1a8d5449254525", "493c8591d6a1bef5d7b84164a73761cefb9f5a25", "d5444f9475253bbcfef85c351ea9dab56793b9ea", "86d0127e1fd04c3d8ea78401c838af621647dc95", "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "2b60fe300735ea7c63f91c1121e89ba66040b833", "97540905e4a9fdf425989a794f024776f28a3fa9", "0ba402af3b8682e2aa89f76bd823ddffdf89fa0a", "4560491820e0ee49736aea9b81d57c3939a69e12", "1277b1b8b609a18b94e4907d76a117c9783a5373", "cca476114c48871d05537abb303061de5ab010d6", "c62c07de196e95eaaf614fb150a4fa4ce49588b4", "91a1945b9c40af4944a6cdcfe59a0999de4f650a", "64ec0c53dd1aa51eb15e8c2a577701e165b8517b", "8f9c37f351a91ed416baa8b6cdb4022b231b9085", "a022eff5470c3446aca683eae9c18319fd2406d5", "af6cae71f24ea8f457e581bfe1240d5fa63faaf7", "a81c86cda6f1da2aa09b6737297addd3d4a64ffa", "ec05078be14a11157ac0e1c6b430ac886124589b", "2149d49c84a83848d6051867290d9c8bfcef0edb", "633c851ebf625ad7abdda2324e9de093cf623141", "13179bb3f2867ea44647b6fe0c8fb4109207e9f5", "5366573e96a1dadfcd4fd592f83017e378a0e185", "357963a46dfc150670061dbc23da6ba7d6da786e", "7fab17ef7e25626643f1d55257a3e13348e435bd", "02567fd428a675ca91a0c6786f47f3e35881bcbd", "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "b44f03b5fa8c6275238c2d13345652e6ff7e6ea9", "523854a7d8755e944bd50217c14481fe1329a969", "51f626540860ad75b68206025a45466a6d087aa6", "6e911227e893d0eecb363015754824bf4366bdb7", "ff60d4601adabe04214c67e12253ea3359f4e082", "4c170a0dcc8de75587dae21ca508dab2f9343974", "7c1cfab6b60466c13f07fe028e5085a949ec8b30", "ff8ef43168b9c8dd467208a0b1b02e223b731254", "00b08d22abc85361e1c781d969a1b09b97bc7010", "4c4236b62302957052f1bbfbd34dbf71ac1650ec", "28d06fd508d6f14cd15f251518b36da17909b79e", "1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee", "87a39f5002ef2de3143d1ea96ae19e002c44345b", "a2b9cee7a3866eb2db53a7d81afda72051fe9732", "7eaa97be59019f0d36aa7dac27407b004cad5e93", "6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d", "098a1ccc13b8d6409aa333c8a1079b2c9824705b", "0d4d8ce029deead6f2ce7075047aa645299ddd41", "00b29e319ff8b3a521b1320cb8ab5e39d7f42281", "2bcd9b2b78eb353ea57cf50387083900eae5384a", "adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6", "0fad544edfc2cd2a127436a2126bab7ad31ec333", "71e6a46b32a8163c9eda69e1badcee6348f1f56a", "aafb271684a52a0b23debb3a5793eb618940c5dd", "1828b1b0f5395b163fef087a72df0605249300c2", "459960be65dd04317dd325af5b7cbb883d822ee4", "24496e4acfb8840616b2960b0e2c80cc4c9e5a87", "8ba67f45fbb1ce47a90df38f21834db37c840079", "6ee8a94ccba10062172e5b31ee097c846821a822", "61e9e180d3d1d8b09f1cc59bdd9f98c497707eff", "70580ed8bc482cad66e059e838e4a779081d1648", "f16599e4ec666c6390c90ff9a253162178a70ef5", "14a5feadd4209d21fa308e7a942967ea7c13b7b6", "11a210835b87ccb4989e9ba31e7559bb7a9fd292", "136aae348c7ebc6fd9df970b0657241983075795", "76673de6d81bedd6b6be68953858c5f1aa467e61", "37866fea39deeff453802cde529dd9d32e0205a5", "7c42371bae54050dbbf7ded1e7a9b4109a23a482", "03264e2e2709d06059dd79582a5cc791cbef94b1", "176e5abddb87d029f85f60d1bbff67c66500e8c3", "7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf", "17189cfedbdbd219849b8e7f8cf0293d49465f9c", "3fd90098551bf88c7509521adf1c0ba9b5dfeb57", "852ff0d410a25ebb7936043a05efe2469c699e4b", "17b46e2dad927836c689d6787ddb3387c6159ece", "c9527df51e63b56c61cbf16f83d1a3c5c2c82499", "4c72a51a7c7288e6e17dfefe4f87df47929608e7", "5811944e93a1f3e35ece7a70a43a3de95c69b5ab", "060820f110a72cbf02c14a6d1085bd6e1d994f6a", "1033ca56c7e88d8b3e80546848826f572c4cd63e", "d9c4586269a142faee309973e2ce8cde27bda718", "1ce3a91214c94ed05f15343490981ec7cc810016", "3d0c21d4780489bd624a74b07e28c16175df6355", "ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b", "06560d5721ecc487a4d70905a485e22c9542a522", "146a7ecc7e34b85276dd0275c337eff6ba6ef8c0", "03d1d0a665e358863ff4de9ee7d78f64edd7e756", "502d30c5eac92c7db587d85d080343fbd9bc469e", "9d24179aa33a94c8c61f314203bf9e906d6b64de", "df2841a1d2a21a0fc6f14fe53b6124519f3812f9", "8006219efb6ab76754616b0e8b7778dcfb46603d", "5a34a9bb264a2594c02b5f46b038aa1ec3389072", "1ed49161e58559be399ce7092569c19ddd39ca0b", "7a3d46f32f680144fd2ba261681b43b86b702b85", "0b605b40d4fef23baa5d21ead11f522d7af1df06", "e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc", "8202da548a128b28dd1f3aa9f86a0523ec2ecb26", "8b7191a2b8ab3ba97423b979da6ffc39cb53f46b", "95289007f2f336e6636cf8f920225b8d47c6e94f", "5b6f0a508c1f4097dd8dced751df46230450b01a", "1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9", "d930c3d92a075d3f3dd9f5ea1a8f04e0d659b22b", "21104bcf07ef0269ab133471a3200b9bf94b2948", "2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924", "4328933890f5a89ad0af69990926d8484f403e4b", "db1a9b8d8ce9a5696a96f8db4206b6f72707730e", "0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd", "3ab036b680e8408ec74f78a918f3ffbf6c906d70", "50d15cb17144344bb1879c0a5de7207471b9ff74", "1f2d12531a1421bafafe71b3ad53cb080917b1a7", "2ffcd35d9b8867a42be23978079f5f24be8d3e35", "69b2a7533e38c2c8c9a0891a728abb423ad2c7e7", "366d20f8fd25b4fe4f7dc95068abc6c6cabe1194", "05ad478ca69b935c1bba755ac1a2a90be6679129", "10e4172dd4f4a633f10762fc5d4755e61d52dc36", "9213a415d798426c8d84efc6d2a69a2cbfa2af84", "79dd787b2877cf9ce08762d702589543bda373be", "2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02", "4df3143922bcdf7db78eb91e6b5359d6ada004d2", "23ebbbba11c6ca785b0589543bf5675883283a57", "30cc1ddd7a9b4878cca7783a59086bdc49dc4044", "19d4855f064f0d53cb851e9342025bd8503922e2", "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "eba4cfd76f99159ccc0a65cab0a02db42b548d85", "62648f91e38b0e8f69dded13b9858bd3a86bb6ed", "3b092733f428b12f1f920638f868ed1e8663fe57", "2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f", "21d5c838d19fcb4d624b69fe9d98e84d88f18e79", "b40290a694075868e0daef77303f2c4ca1c43269", "3312eb79e025b885afe986be8189446ba356a507", "c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4", "24f9248f01df3020351347c2a3f632e01de72090", "06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32", "484bac2a9ff3a43a6f85d109bbc579a4346397f5", "4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1", "97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759", "d383ba7bbf8b7b49dcef9f8abab47521966546bb", "729a9d35bc291cc7117b924219bef89a864ce62c", "a1dd9038b1e1e59c9d564e252d3e14705872fdec", "2f17c0514bb71e0ca20780d71ea0d50ff0da4938", "feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc", "eef725f4130ee326954e84e5f4ddf487da63c94e", "7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83", "3830047081ef4bc787f16edf5b244cb2793f75e5", "ae753fd46a744725424690d22d0d00fb05e53350", "8aff9c8a0e17be91f55328e5be5e94aea5227a35", "504d2675da7a56a36386568ee668938df6d82bbe", "c5765590c294146a8e3c9987d394c0990ab6a35b", "55a158f4e7c38fe281d06ae45eb456e05516af50", "3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171", "7862f646d640cbf9f88e5ba94a7d642e2a552ec9", "28bc378a6b76142df8762cd3f80f737ca2b79208", "6d670eb172355d46034a831d8dc569e17ab14d94", "a3f1db123ce1818971a57330d82901683d7c2b67", "3a0796161d838f9dc51c0ee5f700e668fa206db3", "329b2781007604652deb72139d14315df3bc2771", "52887969107956d59e1218abb84a1f834a314578", "65817963194702f059bae07eadbf6486f18f4a0a", "24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852", "24869258fef8f47623b5ef43bd978a525f0af60e", "7808937b46acad36e43c30ae4e9f3fd57462853d", "0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112", "fcd77f3ca6b40aad6edbd1dab9681d201f85f365", "6c92d87c84fa5e5d2bb5bed3ef38168786bacc49", "b34fdab6864782ce60fd90d09f5d886bd83f84f5", "2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4", "0de1450369cb57e77ef61cd334c3192226e2b4c2", "d59404354f84ad98fa809fd1295608bf3d658bdc", "312afff739d1e0fcd3410adf78be1c66b3480396", "8879083463a471898ff9ed9403b84db277be5bf6", "adfaf01773c8af859faa5a9f40fb3aa9770a8aa7", "08a98822739bb8e6b1388c266938e10eaa01d903", "4c523db33c56759255b2c58c024eb6112542014e", "034addac4637121e953511301ef3a3226a9e75fd", "52c91fcf996af72d191520d659af44e310f86ef9", "02e43d9ca736802d72824892c864e8cfde13718e", "78f438ed17f08bfe71dfb205ac447ce0561250c6", "9f094341bea610a10346f072bf865cb550a1f1c1", "3958db5769c927cfc2a9e4d1ee33ecfba86fe054", "16c884be18016cc07aec0ef7e914622a1a9fb59d", "008528d5e27919ee95c311266041e4fb1711c254", "6baaa8b763cc5553715766e7fbe7abb235fae33c", "8b2e3805b37c18618b74b243e7a6098018556559", "88e090ffc1f75eed720b5afb167523eb2e316f7f", "11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de", "2961e14c327341d22d5f266a6872aa174add8ac4", "b185f0a39384ceb3c4923196aeed6d68830a069f", "58b0be2db0aeda2edb641273fe52946a24a714c3", "06466276c4955257b15eff78ebc576662100f740", "076d3fc800d882445c11b9af466c3af7d2afc64f", "b13bf657ca6d34d0df90e7ae739c94a7efc30dc3", "7f4bc8883c3b9872408cc391bcd294017848d0cf", "794c0dc199f0bf778e2d40ce8e1969d4069ffa7b", "3690af0af51a067750f664c08e48b486d1cd476d", "a30869c5d4052ed1da8675128651e17f97b87918", "424259e9e917c037208125ccc1a02f8276afb667", "28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68", "f7dcadc5288653ec6764600c7c1e2b49c305dfaa", "00075519a794ea546b2ca3ca105e2f65e2f5f471", "68a3f12382003bc714c51c85fb6d0557dcb15467", "6014eeb333998c2b2929657d233ebbcb1c3412c9", "94a7c97d1e3eb5dbfb20b180780451486597a9be", "3411ef1ff5ad11e45106f7863e8c7faf563f4ee1", "1939168a275013d9bc1afaefc418684caf99ba66", "0716e1ad868f5f446b1c367721418ffadfcf0519", "346c9100b2fab35b162d7779002c974da5f069ee", "cff911786b5ac884bb71788c5bc6acf6bf569eff", "f7452a12f9bd927398e036ea6ede02da79097e6e", "0b45aeb0aede5e0c19b508ede802bdfec668aefd", "5c2e264d6ac253693469bd190f323622c457ca05", "670531f3925c1ee6921f1550a988a034db727c3b", "8b1db0894a23c4d6535b5adf28692f795559be90", "1ae642a8d756c6aa7bc049c5c89d5072d8749637", "4d423acc78273b75134e2afd1777ba6d3a398973", "ad7b6d2e8d66f720cc83323a0700c25006d49609", "f2ad9b43bac8c2bae9dea694f6a4e44c760e63da", "0faf441a1ef1e788fb9ccd20484b104a1fa95ee8", "1a41831a3d7b0e0df688fb6d4f861176cef97136", "6bcee7dba5ed67b3f9926d2ae49f9a54dee64643", "014143aa16604ec3f334c1407ceaa496d2ed726e", "a52581a7b48138d7124afc7ccfcf8ec3b48359d0", "b75cee96293c11fe77ab733fc1147950abbe16f9", "7d7870b7633678db2d39d4a5d69d10337ca827d9", "0e8a28511d8484ad220d3e8dde39220c74fab14b", "c4b00e86841db3fced2a5d8ac65f80d0d3bbe352", "c79cf7f61441195404472102114bcf079a72138a", "18a9f3d855bd7728ed4f988675fa9405b5478845", "d5c66a48bc0a324750db3d295803f47f6060043d", "e55f7250f3b8ee722814f8809620a851c31e5b0e", "0be418e63d111e3b94813875f75909e4dc27d13a", "a119844792fd9157dec87e3937685c8319cac62f", "439ec47725ae4a3660e509d32828599a495559bf", "865e9346b05f14f9bf85c1522c5aebe85420a517", "ccbfc004e29b3aceea091056b0ec536e8ea7c47e", "d2b3166b8a6a3e6e7bc116257e718e4fe94a0638", "c05441dd1bc418fb912a6fafa84c0659a6850bf0", "0f4cfcaca8d61b1f895aa8c508d34ad89456948e", "4686df20f0ee40cd411e4b43860ef56de5531d9e", "9e1c3b8b1653337094c1b9dba389e8533bc885b0", "d44d911c045a6df610cb4103f1ab09827fab8296", "501096cca4d0b3d1ef407844642e39cd2ff86b37", "34863ecc50722f0972e23ec117f80afcfe1411a9", "d03baf17dff5177d07d94f05f5791779adf3cd5f", "2be0ab87dc8f4005c37c523f712dd033c0685827", "0d5824e14593bcb349d636d255ba274f98bbb88f", "ec1a57e609eda72b4eb60155fac12db1da31f6c0", "7bfe085c10761f5b0cc7f907bdafe1ff577223e0", "93cd5c47e4a3425d23e3db32c6eaef53745bb32e", "2ad0ee93d029e790ebb50574f403a09854b65b7e", "b784bb1d2b2720dac8d4b92851a8d6360c35b0b2", "7f6599e674a33ed64549cd512ad75bdbd28c7f6c", "f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a", "03dba79518434ba4a937b2980fbdc8bafc048b36", "cf7a4442a6aad0e08d4aade8ec379c44f84bca8a", "fde0180735699ea31f6c001c71eae507848b190f", "57a1466c5985fe7594a91d46588d969007210581", "0b2277a0609565c30a8ee3e7e193ce7f79ab48b0", "9513503867b29b10223f17c86e47034371b6eb4f", "1791f790b99471fc48b7e9ec361dc505955ea8b1", "4d0ef449de476631a8d107c8ec225628a67c87f9", "0052de4885916cf6949a6904d02336e59d98544c", "fd9ab411dc6258763c95b7741e3d51adf5504040", "29a5d38390857e234c111f8bb787724c08f39110", "171389529df11cc5a8b1fbbe659813f8c3be024d", "06ab24721d7117974a6039eb2e57d1545eee5e46", "55432723c728a2ce90d817e9e9877ae9fbad6fe5", "e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef", "ecfa56b38ac2b58428d59c9b630b1437a9ff8278", "919cb6160db66a8fe0b84cb7f171aded48a13632", "1bc23c771688109bed9fd295ce82d7e702726327", "0553c6b9ee3f7d24f80e204d758c94a9d6b375d2", "e9b0a27018c7151016a9fe01c98b4c21d6ebf4be", "60a20d5023f2bcc241eb9e187b4ddece695c2b9b", "e6c834c816b5366875cf3060ccc20e16f19a9fc6", "ba931c3f90dd40a5db4301a8f0c71779a23043d6", "1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab", "fafe69a00565895c7d57ad09ef44ce9ddd5a6caa", "288d2704205d9ca68660b9f3a8fda17e18329c13", "3fc173805ed43602eebb7f64eea4d60c0386c612", "b3f7c772acc8bc42291e09f7a2b081024a172564", "40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd", "f437b3884a9e5fab66740ca2a6f1f3a5724385ea", "301b0da87027d6472b98361729faecf6e1d5e5f6", "98856ab9dc0eab6dccde514ab50c823684f0855c", "20a16efb03c366fa4180659c2b2a0c5024c679da", "eccd9acba3f6a605053dbde7f0890836e52aa085", "9d357bbf014289fb5f64183c32aa64dc0bd9f454", "ea5c9d5438cde6d907431c28c2f1f35e02b64b33", "3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c", "5748652924084b7b0220cddcd28f6b2222004359", "2debdb6a772312788251cc3bd1cb7cc8a6072214", "3cd7b15f5647e650db66fbe2ce1852e00c05b2e4", "8e378ef01171b33c59c17ff5798f30293fe30686", "0004f72a00096fa410b179ad12aa3a0d10fc853c", "1584edf8106e8f697f19b726e011b9717de0e4db", "333e7ad7f915d8ee3bb43a93ea167d6026aa3c22", "27aa23d7a05368a6b5e3d95627f9bab34284e5c4", "6f68c49106b66a5bd71ba118273b4c5c64b6619f", "325b048ecd5b4d14dce32f92bff093cd744aa7f8", "cb5cda13a4ccbc32ce912d51e402363c1b501b32", "19868a469dc25ee0db00947e06c804b88ea94fd0", "fd38163654a0551ed7f4e442851508106e6105d9", "6bca0d1f46b0f7546ad4846e89b6b842d538ee4e", "a0e7f8771c7d83e502d52c276748a33bae3d5f81", "2450c618cca4cbd9b8cdbdb05bb57d67e63069b1", "8dbe79830713925affc48d0afa04ed567c54724b", "1ecb56e7c06a380b3ce582af3a629f6ef0104457", "e3bb83684817c7815f5005561a85c23942b1f46b", "2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce", "9d61b0beb3c5903fc3032655dc0fd834ec0b2af3", "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "5e7cb894307f36651bdd055a85fdf1e182b7db30", "3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5", "55079a93b7d1eb789193d7fcdcf614e6829fad0f", "539287d8967cdeb3ef60d60157ee93e8724efcac", "ec40df721a80c62d4a768fe29b58d86b1a07f435", "27b451abfe321a696c852215bb7efb4c2e50c89f", "19dd371e1649ab55a46f4b98890d6937a411ec5d", "fe97d46c34630d14235132a95fb2d2ed7b2c4663", "783f3fccde99931bb900dce91357a6268afecc52", "5039b2081eb3c8efbf9e96fd27775731f38f6fc7", "18e54b74ed1f3c02b7569f53a7d930d72fc329f5", "8562b4f63e49847692b8cb31ef0bdec416b9a87a", "22dabd4f092e7f3bdaf352edd925ecc59821e168", "b562def2624f59f7d3824e43ecffc990ad780898", "3bcdb430b373fc0fafec93bdcd8125db338b20e4", "4d15254f6f31356963cc70319ce416d28d8924a3", "4967b0acc50995aa4b28e576c404dc85fefb0601", "6e91be2ad74cf7c5969314b2327b513532b1be09", "8990f8ea6441f97597429686542b9cdc46ed47de", "ad6cc071b2585e4bdb6233b7ad8d63e12538537d", "047d3cb2a6a9628b28cac077b97d95b04ca9044c", "4f03ba35440436cfa06a2ed2a571fea01cb36598", "4ed6c7740ba93d75345397ef043f35c0562fb0fd", "8c13f2900264b5cf65591e65f11e3f4a35408b48", "652aac54a3caf6570b1c10c993a5af7fa2ef31ff", "d7dd35a86117e46d24914ef49ccd99ea0a7bf705", "b5930275813a7e7a1510035a58dd7ba7612943bc", "de15af84b1257211a11889b6c2adf0a2bcf59b42", "31a2fb63a3fc67da9932474cda078c9ac43f85c5", "bbf01aa347982592b3e4c9e4f433e05d30e71305", "10bf35bf98cfe555dfc03b5f03f2769d330e3af9", "557115454c1b8e6eaf8dbb65122c5b00dc713d51", "846c028643e60fefc86bae13bebd27341b87c4d1", "8605e8f5d84b8325b1a81d968c296a5a5d741f31", "4309faac3248663ed56a6a841cac1855e302f090", "a03448488950ee5bf50e9e1d744129fbba066c50", "1d30f813798c55ae4fe454829be6e2948ee841da", "60a006bdfe5b8bf3243404fae8a5f4a9d58fa892", "38679355d4cfea3a791005f211aa16e76b2eaa8d", "2b8c5017633a82b15dbe0047cfc76ffdce462176", "4b71d1ff7e589b94e0f97271c052699157e6dc4a", "a26379d9993073d51611588c36f12db2b4ecb39a", "4cb31f16e94067ce5eaeb8eae00eb0b0d49d46b2", "5ec94adc9e0f282597f943ea9f4502a2a34ecfc2", "1f89439524e87a6514f4fbe7ed34bda4fd1ce286", "7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a", "9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5", "d72973a72b5d891a4c2d873daeb1bc274b48cddf", "67c3c1194ee72c54bc011b5768e153a035068c43", "306957285fea4ce11a14641c3497d01b46095989", "857ad04fca2740b016f0066b152bd1fa1171483f", "c6096986b4d6c374ab2d20031e026b581e7bf7e9", "55138c2b127ebdcc508503112bf1d1eeb5395604", "54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7", "c1f07ec629be1c6fe562af0e34b04c54e238dcd1", "50333790dd98c052dfafe1f9bf7bf8b4fc9530ba", "fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef", "f95321f4348cfacc52084aae2a19127d74426047", "40f06e5c052d34190832b8c963b462ade739cbf0", "9057044c0347fb9798a9b552910a9aff150385db", "cd596a2682d74bdfa7b7160dd070b598975e89d9", "e4df83b7424842ff5864c10fa55d38eae1c45fac", "309e17e6223e13b1f76b5b0eaa123b96ef22f51b", "e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6", "a63ec22e84106685c15c869aeb157aa48259e855", "91e58c39608c6eb97b314b0c581ddaf7daac075e", "8d2c0c9155a1ed49ba576ac0446ec67725468d87", "7a666a91a47da0d371a9ba288912673bcd5881e4", "07c90e85ac0f74b977babe245dea0f0abcf177e3", "0a29cee986471b495728b08756f135a2377d5a2a", "42350e28d11e33641775bef4c7b41a2c3437e4fd", "a42209dbfe6d2005295d790456ddb2138302cbe5", "7c0a6824b556696ad7bdc6623d742687655852db", "30188b836f2fa82209d7afbf0e4d0ee29c6b9a87", "016a8ed8f6ba49bc669dbd44de4ff31a79963078", "67a50752358d5d287c2b55e7a45cc39be47bf7d0", "63cff99eff0c38b633c8a3a2fec8269869f81850", "5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0", "2c34bf897bad780e124d5539099405c28f3279ac", "2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd", "02c2a29a4695eab7a8f859bf8697a5ca9f910d70", "ccdea57234d38c7831f1e9231efcb6352c801c55", "5b4b84ce3518c8a14f57f5f95a1d07fb60e58223", "e27c92255d7ccd1860b5fb71c5b1277c1648ed1e", "64d5772f44efe32eb24c9968a3085bc0786bfca7", "44f65e3304bdde4be04823fd7ca770c1c05c2cef", "602f772c69e4a1a65de00443c30d51fdd47a80aa", "22ad2c8c0f4d6aa4328b38d894b814ec22579761", "9d06d43e883930ddb3aa6fe57c6a865425f28d44", "923ede53b0842619831e94c7150e0fc4104e62f7", "243cd27dce38fd756a840b397c28ad21cfb78897", "8229f2735a0db0ad41f4d7252129311f06959907", "d43b6ca9257e9b24f89eb3867f2c04068a78c778", "e013c650c7c6b480a1b692bedb663947cd9d260f", "a0beb0cc6f167373f8b4b7458ff0ec42fc290a75", "c92da368a6a886211dc759fe7b1b777a64d8b682", "5bed2453a5b0c54a4a4a294f29c9658658a9881e", "b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a", "6c8c7065d1041146a3604cbe15c6207f486021ba", "2e0e056ed5927a4dc6e5c633715beb762628aeb0", "91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0", "3d0ef9bfd08a9252db6acfece3b83f3aa58b4cae", "99facca6fc50cc30f13b7b6dd49ace24bc94f702", "0294f992f8dfd8748703f953925f9aee14e1b2a2", "8a54f8fcaeeede72641d4b3701bab1fe3c2f730a", "4aea1213bdb5aa6c74b99fca1afc72d8a99503c6", "fac5a9a18157962cff38df6d4ae69f8a7da1cfa8", "60d765f2c0a1a674b68bee845f6c02741a49b44e", "5d0f72174e9ca1d620227b53ab1bbd8263fb4a9e", "030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f", "f75852386e563ca580a48b18420e446be45fcf8d", "b54c477885d53a27039c81f028e710ca54c83f11", "404042a1dcfde338cf24bc2742c57c0fb1f48359", "dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57", "8ed051be31309a71b75e584bc812b71a0344a019", "adad7446e371d27fdaee39475856e2058f3045e5", "007250c2dce81dd839a55f9108677b4f13f2640a", "0447bdb71490c24dd9c865e187824dee5813a676", "49068538b7eef66b4254cc11914128097302fab8", "b017963d83b3edf71e1673d7ffdec13a6d350a87", "8f9f599c05a844206b1bd4947d0524234940803d", "ff0617d750fa49416514c1363824b8f61baf8fb5", "a3a34c1b876002e0393038fcf2bcb00821737105", "4c1528bab3142ec957700ab502531e1a67e7f2f6", "2cf92ee60f719098acc3aae3981cedc47fa726b3", "85567174a61b5b526e95cd148da018fa2a041d43", "49394a5e0ca1d4bb77d8c9bfa963b8b8cb761ecf", "2a02355c1155f2d2e0cf7a8e197e0d0075437b19", "79617903c5cb56697f2e738e1463b9654e2d68ed", "9b000ccc04a2605f6aab867097ebf7001a52b459", "b73fdae232270404f96754329a1a18768974d3f6", "0241513eeb4320d7848364e9a7ef134a69cbfd55", "b856d8d6bff745bb1b4beb67e4b821fc20073840", "4d6d6369664a49f6992f65af4148cefef95055bc", "0efdd82a4753a8309ff0a3c22106c570d8a84c20", "27c9ddb72360f4cd0f715cd7ea82fa399af91f11", "5a87bc1eae2ec715a67db4603be3d1bb8e53ace2", "266ee26a6115f1521ce374e4ab106d997c7b1407", "51dcb36a6c247189be4420562f19feb00c9487f8", "050eda213ce29da7212db4e85f948b812a215660", "c0945953506a3d531331caf6c2b2a6d027e319f0", "4c0846bcfa64d9e810802c5b7ef0f8b43523fe54", "b3c398da38d529b907b0bac7ec586c81b851708f", "8dc9de0c7324d098b537639c8214543f55392a6b", "c92e36689ef561df726a7ae861d9c166c3934908", "e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24", "789a43f51e0a3814327dab4299e4eda8165a5748", "98519f3f615e7900578bc064a8fb4e5f429f3689", "5304cd17f9d6391bf31276e4419100f17d4423b2", "37b6d6577541ed991435eaf899a2f82fdd72c790", "e03bda45248b4169e2a20cb9124ae60440cad2de", "0066caed1238de95a431d836d8e6e551b3cde391", "7fc76446d2b11fc0479df6e285723ceb4244d4ef", "0a451fc7d2c6b3509d213c210ae880645edf90ed", "3bc776eb1f4e2776f98189e17f0d5a78bb755ef4", "3db75962857a602cae65f60f202d311eb4627b41", "2878b06f3c416c98496aad6fc2ddf68d2de5b8f6", "f5af4e9086b0c3aee942cb93ece5820bdc9c9748", "7753e3b9e158289cbaa22203166424ca9c229f68", "141eab5f7e164e4ef40dd7bc19df9c31bd200c5e", "d6cf3cab269877c58a16be011b74e07838d957c2", "0cdb49142f742f5edb293eb9261f8243aee36e12", "5e59193a0fc22a0c37301fb05b198dd96df94266", "642c66df8d0085d97dc5179f735eed82abf110d0", "68d566ed4041a7519acb87753036610bd64dcc09", "952138ae6534fad573dca0e6b221cdf042a36412", "226a5ff790b969593596a52b55b3718dcdd7bb7f", "ee5fe44871f5e36998a2fdfb20a511374cdd3877", "e6f3707a75d760c8590292b54bc8a48582da2cd4", "c29e33fbd078d9a8ab7adbc74b03d4f830714cd0", "0a297523188b03fdf9d2155bfdcca7e1bcab3762", "7d306512b545df98243f87cb8173df83b4672b18", "1afef6b389bd727c566cd6fbcd99adefe4c0cf32", "297d3df0cf84d24f7efea44f87c090c7d9be4bed", "56e03f8fcd16332f764352ba6e72c9c5092cac0f", "2ca43325a5dbde91af90bf850b83b0984587b3cc", "7ab7befcd319d55d26c1e4b7b9560da5763906f3", "c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6", "dab795b562c7cc270c9099b925d685bea0abe82a", "90ae02da16b750a9fd43f8a38440f848309c2fe0", "38e7f3fe450b126367ec358be9b4cc04e82fa8c7", "2b435ee691718d0b55d057d9be4c3dbb8a81526e", "25b2811118ed73c64682544fe78023bb8242c709", "773ce00841a23d32727aa1f54c29865fefd4ce02", "199c2df5f2847f685796c2523221c6436f022464", "1c2724243b27a18a2302f12dea79d9a1d4460e35", "f60a85bd35fa85739d712f4c93ea80d31aa7de07", "a56b0f76919aabe8b768f5fbaeca412276365aa2", "39c8ed5213882d4dbc74332245ffe201882c5de1", "d9072e6b7999bc2d5750eb58c67a643f38d176d6", "21b5af67618fcc047b495d2d5d7c2bf145753633", "023decb4c56f2e97d345593e4f7b89b667a6763d", "b208f2fc776097e98b41a4ff71c18b393e0a0018", "e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056", "6a5fe819d2b72b6ca6565a0de117c2b3be448b02", "7fce5769a7d9c69248178989a99d1231daa4fce9", "7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f", "ee92d36d72075048a7c8b2af5cc1720c7bace6dd", "85041e48b51a2c498f22850ce7228df4e2263372", "c8829013bbfb19ccb731bd54c1a885c245b6c7d7", "89c51f73ec5ebd1c2a9000123deaf628acf3cdd8", "b5fc4f9ad751c3784eaf740880a1db14843a85ba", "bb69f750ccec9624f6dabd334251def2bbddf166", "73d53a7c27716ae9a6d3484e78883545e53117ae", "c40c23e4afc81c8b119ea361e5582aa3adecb157", "1c4ceae745fe812d8251fda7aad03210448ae25e", "a803453edd2b4a85b29da74dcc551b3c53ff17f9", "f52efc206432a0cb860155c6d92c7bab962757de", "09e7397fbcf4cc54ee085599a3b9bb72539ab251", "621ed006945e9438910b5aa4f6214888dea3d791", "1fe990ca6df273de10583860933d106298655ec8", "1679943d22d60639b4670eba86665371295f52c3", "4ed40e6bb66dfa38a75d864d804d175a26b6c6f6", "ae96fc36c89e5c6c3c433c1163c25db1359e13ea", "dd8d53e67668067fd290eb500d7dfab5b6f730dd", "57f8e1f461ab25614f5fe51a83601710142f8e88", "e90e12e77cab78ba8f8f657db2bf4ae3dabd5166", "864d50327a88d1ff588601bf14139299ced2356f", "1bc214c39536c940b12c3a2a6b78cafcbfddb59a", "19eb486dcfa1963c6404a9f146c378fc7ae3a1df", "8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4", "4e94e7412d180da5a646f6a360e75ba2128f93aa", "44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8", "2251a88fbccb0228d6d846b60ac3eeabe468e0f1", "adb040081974369c46b943e9f75be4e405623102", "b9cedd09bdae827dacb138d6b054449d5346caf1", "4e37cd250130c6fd60e066f0c8efb3cbb778c421", "ce85d953086294d989c09ae5c41af795d098d5b2", "a9881ae58987da71b4c1ce01ba213eb4be2eef02", "9b8830655d4a5a837e3ffe835d14d6d71932a4f2", "a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc", "067126ce1f1a205f98e33db7a3b77b7aec7fb45a", "e2f91b21f3755914c193a546ba8718acf81c845b", "ee744ea13a0bbeba5de85ca3c75c9749054835e7", "69526cdf6abbfc4bcd39616acde544568326d856", "29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea", "c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f", "38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4", "5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372", "72345fed8d068229e50f9ea694c4babfd23244a0", "3a0ea368d7606030a94eb5527a12e6789f727994", "4cec3e5776090852bef015a8bbe74fed862aa2dd", "7d53678ef6009a68009d62cd07c020706a2deac3", "9f5e22fbc22e1b0a61bcd75202d299232e68de5d", "20b405d658b7bb88d176653758384e2e3e367039", "0f1cbe4e26d584c82008ccef9fb1e4669b82de1f", "92fada7564d572b72fd3be09ea3c39373df3e27c", "a54e0f2983e0b5af6eaafd4d3467b655a3de52f4", "f9d1f12070e5267afc60828002137af949ff1544", "89896474f007c99f5967bcc05a952654a3bbb736", "5dafab3c936763294257af73baf9fb3bb1696654", "4ea4116f57c5d5033569690871ba294dc3649ea5", "a96c45ed3a44ad79a72499be238264ae38857988", "c5468665d98ce7349d38afb620adbf51757ab86f", "51a8dabe4dae157aeffa5e1790702d31368b9161", "7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b", "49570b41bd9574bd9c600e24b269d945c645b7bd", "11bda1f054effb3116115b0699d74abec3e93a4b", "d8b568392970b68794a55c090c4dd2d7f90909d2", "2696d3708d6c6cccbd701f0dac14cc94d72dd76d", "b58d381f9f953bfe24915246b65da872aa94f9aa", "77e747b12d22827fa84f506eefdac4ec37948359", "303517dfc327c3004ae866a6a340f16bab2ee3e3", "57b7325b8027745b130490c8f736445c407f4c4c", "00eccc565b64f34ad53bf67dfaf44ffa3645adff", "df2494da8efa44d70c27abf23f73387318cf1ca8", "99b8a24aacaa53fa3f8a7e48734037c7b16f1c40", "2f78e471d2ec66057b7b718fab8bfd8e5183d8f4", "9cd4f72d33d1cedc89870b4f4421d496aa702897", "2afdda6fb85732d830cea242c1ff84497cd5f3cb", "a7e1327bd76945a315f2869bfae1ce55bb94d165", "6b3e360b80268fda4e37ff39b7f303e3684e8719", "cd33b3ca8d7f00c1738c41b2071a3164ba42ea61", "60e2b9b2e0db3089237d0208f57b22a3aac932c1", "c32c8bfadda8f44d40c6cd9058a4016ab1c27499", "df767f62a6bf3b09e6417d801726f2d5d642a202", "ea482bf1e2b5b44c520fc77eab288caf8b3f367a", "4cb8a691a15e050756640c0a35880cdd418e2b87", "40cd062438c280c76110e7a3a0b2cf5ef675052c", "ac1d97a465b7cc56204af5f2df0d54f819eef8a6", "41c42cb001f34c43d4d8dd8fb72a982854e173fb", "3ebce6710135d1f9b652815e59323858a7c60025", "c466ad258d6262c8ce7796681f564fec9c2b143d", "d930ec59b87004fd172721f6684963e00137745f", "3c47022955c3274250630b042b53d3de2df8eeda", "a65301ec723dfac73c1e884d26dedeb4de309429", "9fd1b8abbad25cb38f0c009288fb5db0fc862db6", "c207fd762728f3da4cddcfcf8bf19669809ab284", "7ed2c84fdfc7d658968221d78e745dfd1def6332", "aadfcaf601630bdc2af11c00eb34220da59b7559", "d231a81b38fde73bdbf13cfec57d6652f8546c3c", "cc38942825d3a2c9ee8583c153d2c56c607e61a7", "f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f", "c833c2fb73decde1ad5b5432d16af9c7bee1c165", "e52f57a7de675d14aed28e5d0f2f3c5a01715337", "d3424761e06a8f5f3c1f042f1f1163a469872129", "4e4e8fc9bbee816e5c751d13f0d9218380d74b8f", "0b5bd3ce90bf732801642b9f55a781e7de7fdde0", "e084b0e477ee07d78c32c3696ea22c94f5fdfbec", "ab989225a55a2ddcd3b60a99672e78e4373c0df1", "4215b34597d8ce1e8985afa8043400caf0ec7230", "a07f78124f83eef1ed3a6f54ba982664ae7ca82a", "cbfcd1ec8aa30e31faf205c73d350d447704afee", "4159663f0b292fd8cc7411929be9d669bb98b386", "dc1510110c23f7b509035a1eda22879ef2506e61", "18166432309000d9a5873f989b39c72a682932f5", "05f4d907ee2102d4c63a3dc337db7244c570d067", "16b0c171fb094f677fcdf78bbb9aaef0d5404942", "7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7", "e4391993f5270bdbc621b8d01702f626fba36fc2", "2cac70f9c8140a12b6a55cef834a3d7504200b62", "4b8c736524d548472d0725c971ee29240ae683f6", "aebb9649bc38e878baef082b518fa68f5cda23a5", "06719154ab53d3a57041b2099167e3619f1677bc", "f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0", "4f773c8e7ca98ece9894ba3a22823127a70c6e6c", "cfd8c66e71e98410f564babeb1c5fd6f77182c55", "c74b1643a108939c6ba42ae4de55cb05b2191be5", "153c8715f491272b06dc93add038fae62846f498", "b88d5e12089f6f598b8c72ebeffefc102cad1fc0", "caaa6e8e83abb97c78ff9b813b849d5ab56b5050", "d1ee9e63c8826a39d75fa32711fddbcc58d5161a", "26a89701f4d41806ce8dbc8ca00d901b68442d45", "0b20f75dbb0823766d8c7b04030670ef7147ccdd", "db67edbaeb78e1dd734784cfaaa720ba86ceb6d2", "88c21e06ed44da518a7e346fce416efedc771704", "a9adb6dcccab2d45828e11a6f152530ba8066de6", "0c7f27d23a162d4f3896325d147f412c40160b52", "3a0558ebfde592bd8bd07cb72b8ca8f700715bfb", "6c7a42b4f43b3a2f9b250f5803b697857b1444ac", "59f325e63f21b95d2b4e2700c461f0136aecc171", "396a19e29853f31736ca171a3f40c506ef418a9f", "44d93039eec244083ac7c46577b9446b3a071f3e", "45e9b5a7dba2f757567324fe35c2f2db87b015cc", "b5efe2e53aa417367314c1a907d0fe8053c71ecd", "4490b8d8ab2ac693c670751d4c2bff0a56d7393d", "e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f", "c03e01717b2d93f04cce9b5fd2dcfd1143bcc180", "8f8a5be9dc16d73664285a29993af7dc6a598c83", "31ffc95167a2010ce7aab23db7d5fc7ec439f5fb", "3f5cf3771446da44d48f1d5ca2121c52975bb3d3", "71f9861df104b90399dc15e12bbb14cd03f16e0b", "27ee8482c376ef282d5eb2e673ab042f5ded99d7", "4d4736173a5e72c266e52f3a43bdcb2b58f237a2", "f68ed499e9d41f9c3d16d843db75dc12833d988d", "b16580d27bbf4e17053f2f91bc1d0be12045e00b", "8210fd10ef1de44265632589f8fc28bc439a57e6", "0559fb9f5e8627fecc026c8ee6f7ad30e54ee929", "f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e", "739d400cb6fb730b894182b29171faaae79e3f01", "6c27eccf8c4b22510395baf9f0d0acc3ee547862", "b7f05d0771da64192f73bdb2535925b0e238d233", "6e1802874ead801a7e1072aa870681aa2f555f35", "d40cd10f0f3e64fd9b0c2728089e10e72bea9616", "e94168c35be1d4b4d2aaf42ef892e64a3874ed8c", "12c713166c46ac87f452e0ae383d04fb44fe4eb2", "122ee00cc25c0137cab2c510494cee98bd504e9f", "764882e6779fbee29c3d87e00302befc52d2ea8d", "1951dc9dd4601168ab5acf4c14043b124a8e2f67", "dc964b9c7242a985eb255b2410a9c45981c2f4d0", "35ec9b8811f2d755c7ad377bdc29741b55b09356", "8185be0689442db83813b49e215bf30870017459", "27883967d3dac734c207074eed966e83afccb8c3", "1d58d83ee4f57351b6f3624ac7e727c944c0eb8d", "b55f256bbd2e1a41ce6bfcd892dee12f5bcd7cb3", "227b18fab568472bf14f9665cedfb95ed33e5fce", "283d381c5c2ba243013b1c4f5e3b29eb906fa823", "56fb30b24e7277b47d366ca2c491749eee4d6bb1", "dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a", "c2b10909a0dd068b8e377a55b0a1827c8319118a", "ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6", "7644d90efef157e61fe4d773d8a3b0bad5feccec", "2a0623ae989f2236f5e1fe3db25ab708f5d02955", "321bd4d5d80abb1bae675a48583f872af3919172", "0532cbcf616f27e5f6a4054f818d4992b99d201d", "b3e60bb5627312b72c99c5ef18aa41bcc1d21aea", "fac8cff9052fc5fab7d5ef114d1342daba5e4b82", "8acdc4be8274e5d189fb67b841c25debf5223840", "b2cb335ded99b10f37002d09753bd5a6ea522ef1", "10ce3a4724557d47df8f768670bfdd5cd5738f95", "40205181ed1406a6f101c5e38c5b4b9b583d06bc", "ad77056780328bdcc6b7a21bce4ddd49c49e2013", "6459f1e67e1ea701b8f96177214583b0349ed964", "dfd934ae448a1b8947d404b01303951b79b13801", "795b555abb26e62ad89a93645122da530327c447", "e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc", "5c4f9260762a450892856b189df240f25b5ed333", "c6a4b23ead2dab3d5dc02a5916d4c383f0c53007", "57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5", "4f0d5cbcd30fef3978b9691c2e736daed2f841c1", "05d80c59c6fcc4652cfc38ed63d4c13e2211d944", "8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff", "2d23fa205acca9c21e3e1a04674f1e5a9528550e", "548233d67f859491e50c5c343d7d77a7531d4221", "0b3f354e6796ef7416bf6dde9e0779b2fcfabed2", "814369f171337ee1d8809446b7dbfc5e1ef9f4b5", "ff061f7e46a6213d15ac2eb2c49d9d3003612e49", "0874734e2af06883599ed449532a015738a1e779", "3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f", "0be764800507d2e683b3fb6576086e37e56059d1", "4df889b10a13021928007ef32dc3f38548e5ee56", "6f48e5e258da11e6ba45eeabe65a5698f17e58ef", "4ac4e8d17132f2d9812a0088594d262a9a0d339b", "c1ff88493721af1940df0d00bcfeefaa14f1711f", "11fdd940c9a23a34f7ab59809c26a02bce35c5f3", "4972aadcce369a8c0029e6dc2f288dfd0241e144", "49f70f707c2e030fe16059635df85c7625b5dc7e", "46072f872eee3413f9d05482be6446f6b96b6c09", "15e27f968458bf99dd34e402b900ac7b34b1d575", "4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c", "d4b88be6ce77164f5eea1ed2b16b985c0670463a", "0f533bc9fdfb75a3680d71c84f906bbd59ee48f1", "b209608a534957ec61e7a8f4b9d08286ae3d1d7f", "1e0add381031245b1d5129b482853ee738b498e1", "e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f", "fb3da9b47460eedf857e386a562cc5348d78d544", "d983dda8b03ed60fa3afafe5c50f1d9a495f260b", "92c2dd6b3ac9227fce0a960093ca30678bceb364", "82953e7b3d28ccd1534eedbb6de7984c59d38cd4", "b05943b05ef45e8ea8278e8f0870f23db5c83b23", "8fb2ec3bbd862f680be05ef348b595e142463524", "ad339a5fdaab95f3c8aad83b60ceba8d76107fa2", "4d01d78544ae0de3075304ff0efa51a077c903b7", "d7d166aee5369b79ea2d71a6edd73b7599597aaa", "6ab8f2081b1420a6214a6c127e5828c14979d414", "0db1207563a66343cc7cb7b54356c767fc8b876c", "3dcebd4a1d66313dcd043f71162d677761b07a0d", "05270b68547a2cd5bda302779cfc5dda876ae538", "b0f59b71f86f18495b9f4de7c5dbbebed4ae1607", "3cc3e01ac1369a0d1aa88fedda61d3c99a98b890", "3a92de0a4a0ef4f88e1647633f1fbb13cd6a3c95", "7701952e405c3d8a0947e2a309de281aa76bd3f4", "1eec03527703114d15e98ef9e55bee5d6eeba736", "05184f01e66d7139530729b281da74db35a178d2", "3abe50d0a806a9f5a5626f60f590632a6d87f0c4", "a2646865d7c3d7fb346cf714caf146de2ea0e68f", "5173a20304ea7baa6bfe97944a5c7a69ea72530f", "476f177b026830f7b31e94bdb23b7a415578f9a4", "684f5166d8147b59d9e0938d627beff8c9d208dd", "a8583e80a455507a0f146143abeb35e769d25e4e", "131130f105661a47e0ffb85c2fe21595785f948a", "fc45e44dd50915957e498186618f7a499953c6be", "ccb95192001b07bb25fc924587f9682b0df3de8e", "0d8415a56660d3969449e77095be46ef0254a448", "06b4e41185734f70ce432fdb2b121a7eb01140af", "fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f", "7f8cef6ba2f059e465b1b23057a6dbb23fba1c63", "e41246837c25d629ca0fad74643fb9eb8bf38009", "e71c15f5650a59755619b2a62fa93ac922151fd6", "2331df8ca9f29320dd3a33ce68a539953fa87ff5", "f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf", "c648d2394be3ff0c0ee5360787ff3777a3881b02", "e5e9e7cae71b13aabb30f6fe1f97cd153400be6c", "0a79d0ba1a4876086e64fc0041ece5f0de90fbea", "f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe", "097f674aa9e91135151c480734dda54af5bc4240", "6b6493551017819a3d1f12bbf922a8a8c8cc2a03", "dd0086da7c4efe61abb70dd012538f5deb9a8d16", "4b507a161af8a7dd41e909798b9230f4ac779315", "70bf1769d2d5737fc82de72c24adbb7882d2effd", "c32cd207855e301e6d1d9ddd3633c949630c793a", "91e507d2d8375bf474f6ffa87788aa3e742333ce", "9255d3b2bfee4aaae349f68e67c76a077d2d07ad", "9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1", "32b76220ed3a76310e3be72dab4e7d2db34aa490", "3e59d97d42f36fc96d33a5658951856a555e997b", "4a2d54ea1da851151d43b38652b7ea30cdb6dfb2", "4672513d0dbc398719d66bba36183f6e2b78947b", "94b9c0a6515913bad345f0940ee233cdf82fffe1", "4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99", "0b02bfa5f3a238716a83aebceb0e75d22c549975", "bc9003ad368cb79d8a8ac2ad025718da5ea36bc4", "082ad50ac59fc694ba4369d0f9b87430553b11db", "5c717afc5a9a8ccb1767d87b79851de8d3016294", "1e41a3fdaac9f306c0ef0a978ae050d884d77d2a", "3aebaaf888cba25be25097173d0b3af73d9ce7f9", "40c1de7b1b0a087c590537df55ecd089c86e8bfc", "32f62da99ec9f58dd93e3be667612abcf00df16a", "e865908ed5e5d7469b412b081ca8abd738c72121", "e0c081a007435e0c64e208e9918ca727e2c1c44e", "3ca6adc90aae5912baa376863807191ffd56b34e", "aba770a7c45e82b2f9de6ea2a12738722566a149", "dcce3d7e8d59041e84fcdf4418702fb0f8e35043", "74408cfd748ad5553cba8ab64e5f83da14875ae8", "6f8cffd9904415c8fa3a1e650ac143867a04f40a", "4d9a02d080636e9666c4d1cc438b9893391ec6c7", "4aefd3ffa712a9b7d9db0615d4ee1932de6060d6", "81f101cea3c451754506bf1c7edf80a661fa4dd1", "3ad56aed164190e1124abea4a3c4e1e868b07dee", "14ae16e9911f6504d994503989db34d2d1cb2cd4", "858ddff549ae0a3094c747fb1f26aa72821374ec", "dfecaedeaf618041a5498cd3f0942c15302e75c3", "a702fc36f0644a958c08de169b763b9927c175eb", "0a3863a0915256082aee613ba6dab6ede962cdcd", "349434653429733f5f49fe0e160027d994cef115", "5ba7882700718e996d576b58528f1838e5559225", "f66add890c2458466e1cb942ad3981f8651ace2d", "e1ab3b9dee2da20078464f4ad8deb523b5b1792e", "524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a", "7f68a5429f150f9eb7550308bb47a363f2989cb3", "883006c0f76cf348a5f8339bfcb649a3e46e2690", "e9f1cdd9ea95810efed306a338de9e0de25990a0", "2baec98c19804bf19b480a9a0aa814078e28bb3d", "24f3dfeb95bdecdc604d630acdfcafa1dc7c9124", "58db008b204d0c3c6744f280e8367b4057173259", "1de8f38c35f14a27831130060810cf9471a62b45", "4571626d4d71c0d11928eb99a3c8b10955a74afe", "962812d28a169b3fc1d4323f8d0fca69a22dac4c", "1f5b9ac2a37431b59fd1cecf8fe57b92b6b6398e", "a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b", "9790ec6042fb2665c7d9369bf28566b0ce75a936", "1ec98785ac91808455b753d4bc00441d8572c416", "11269e98f072095ff94676d3dad34658f4876e0e", "507c9672e3673ed419075848b4b85899623ea4b0", "cc2a9f4be1e465cb4ba702539f0f088ac3383834", "a777101b56fe46c4d377941afcf34edc2b8b5f6f", "c8585c95215bc53e28edb740678b3a0460ca8aa4", "d6bfa9026a563ca109d088bdb0252ccf33b76bc6", "da23d90bacf246b75ef752a2cbb138c4fcd789b7", "b95d13d321d016077bd2906f7fbd9be7c3643475", "d31af74425719a3840b496b7932e0887b35e9e0d", "48901e44cd3e17efcfc9866982f8bd7b2c26b99d", "8b1fa60b9164b60d1ca2705611fab063505a3ef5", "7da9464dbae52c8bda13461a4f44420c333b0342", "97946f13c1cf8924b0c1ce88682290ae87d630a1", "ad5a1621190d18dd429930ab5125c849ce7e4506", "9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23", "528069963f0bd0861f380f53270c96c269a3ea1c", "044ba70e6744e80c6a09fa63ed6822ae241386f2", "09686fd5eb5ec6f47d5ec24276c78d23607ec01e", "33e20449aa40488c6d4b430a48edf5c4b43afdab", "036c41d67b49e5b0a578a401eb31e5f46b3624e0", "5083c6be0f8c85815ead5368882b584e4dfab4d1", "c3e53788370341afe426f2216bed452cbbdaf117", "a5acda0e8c0937bfed013e6382da127103e41395", "df87193e15a19d5620f5a6458b05fee0cf03729f", "8b547b87fd95c8ff6a74f89a2b072b60ec0a3351", "eece52bd0ed4d7925c49b34e67dbb6657d2d649b", "f3df296de36b7c114451865778e211350d153727", "93978ba84c8e95ff82e8b5960eab64e54ca36296", "2e12c5ea432004de566684b29a8e148126ef5b70", "1599718bf756a0fb7157277b93f21cfcad04e383", "baaaf73ec28226d60d923bc639f3c7d507345635", "7cf579088e0456d04b531da385002825ca6314e2", "498fd231d7983433dac37f3c97fb1eafcf065268", "063a3be18cc27ba825bdfb821772f9f59038c207", "abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c", "d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc", "3aa9c8c65ce63eb41580ba27d47babb1100df8a3", "0be80da851a17dd33f1e6ffdd7d90a1dc7475b96", "a939e287feb3166983e36b8573cd161d12097ad8", "97137d5154a9f22a5d9ecc32e8e2b95d07a5a571", "fddca9e7d892a97073ada88eec39e03e44b8c46a", "ded8252fc6df715753e75ba7b7fee518361266ef", "938ae9597f71a21f2e47287cca318d4a2113feb2", "0334a8862634988cc684dacd4279c5c0d03704da", "e4d8ba577cabcb67b4e9e1260573aea708574886", "7b9b3794f79f87ca8a048d86954e0a72a5f97758", "4bc9a767d7e63c5b94614ebdc24a8775603b15c9", "ebc3d7f50231cdb18a8107433ae9adc7bd94b97a", "196c12571ab51273f44ea3469d16301d5b8d2828", "0c54e9ac43d2d3bab1543c43ee137fc47b77276e", "af13c355a2a14bb74847aedeafe990db3fc9cbd4", "6f16f4bd01aeefdd03d6783beacb7de118f5af8a", "ec22eaa00f41a7f8e45ed833812d1ac44ee1174e", "abba1bf1348a6f1b70a26aac237338ee66764458", "af3b803188344971aa89fee861a6a598f30c6f10", "1394ca71fc52db972366602a6643dc3e65ee8726", "6af75a8572965207c2b227ad35d5c61a5bd69f45", "9ed943f143d2deaac2efc9cf414b3092ed482610", "71b376dbfa43a62d19ae614c87dd0b5f1312c966", "3bd1d41a656c8159305ba2aa395f68f41ab84f31", "2d080662a1653f523321974a57518e7cb67ecb41", "78f79c83b50ff94d3e922bed392737b47f93aa06", "11a2ef92b6238055cf3f6dcac0ff49b7b803aee3", "e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5", "2c6e65d8ef8c17387b839ab6a82fb469117ae396", "086131159999d79adf6b31c1e604b18809e70ba8", "baad4e7ab0942a6b93ee2df39685f928efdae006", "7adaad633d3002f88cdee105d9c148e013202a06", "1c4404885443b65b7cbda3c131e54f769fbd827d", "bad15b4dea2399d57ee17f33a5ba8f04b012ef63", "9d24812d942e69f86279a26932df53c0a68c4111", "126076774da192d4d3f4efcd1accc719ee5f9683", "9b318098f3660b453fbdb7a579778ab5e9118c4c", "1bdef21f093c41df2682a07f05f3548717c7a3d1", "064cd41d323441209ce1484a9bba02a22b625088", "b503f481120e69b62e076dcccf334ee50559451e", "adf9998214598469f7a097bc50de4c23784f2a5a", "79fd4baca5f840d6534a053b22e0029948b9075e", "0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5", "2533c88b278e84a248200d3c5a281177d392e78f", "2c848cc514293414d916c0e5931baf1e8583eabc", "2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8", "8a866bc0d925dfd8bb10769b8b87d7d0ff01774d", "b6c53891dff24caa1f2e690552a1a5921554f994", "ea8d217231d4380071132ce37bf997164b60ec44", "7966146d72f9953330556baa04be746d18702047", "7ab8cafe454a9fd0fe5d51e718a010ef552b9271", "65fc8393610fceec665726fe4e48f00dc90f55fb", "4572fd17feb5d098e8044fe085e963036fea2a6d", "29c1f733a80c1e07acfdd228b7bcfb136c1dff98", "17768efd76a681902a33994da4d3163262bf657f", "0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2", "245f8ec4373e0a6c1cae36cd6fed5a2babed1386", "22ec8af0f0e5469e40592d29e28cfbdf1154c666", "25728e08b0ee482ee6ced79c74d4735bb5478e29", "3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9", "0bc0f9178999e5c2f23a45325fa50300961e0226", "857c64060963dd8d28e4740f190d321298ddd503", "35683a325c4fa02e9335dccbca9b67e2b55b87ec", "8edc48e7a110f176ca08c26c0085c4dbb4146c5b", "3fefc856a47726d19a9f1441168480cee6e9f5bb", "887b7676a4efde616d13f38fcbfe322a791d1413", "8a91ad8c46ca8f4310a442d99b98c80fb8f7625f", "540b39ba1b8ef06293ed793f130e0483e777e278", "941166547968081463398c9eb041f00eb04304f7", "11b89011298e193d9e6a1d99302221c1d8645bda", "b8ebda42e272d3617375118542d4675a0c0e501d", "c1c2775e19d6fd2ad6616f69bda92ac8927106a2", "2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c", "4b0cb10c6c3f2d581ac9eb654412f70bc72ed661", "6e7ffd67329ca6027357a133437505bc56044e65", "ffd4a5bf55fe089ac05ca96285e4e563325f3d1b", "c0f9fae059745e50658d9605bd8875fc3a2d0b4b", "434bf475addfb580707208618f99c8be0c55cf95", "d975a535cbf3e0a502a30ff7ad037241f9b798ae", "18409c220a0f330c24f0e095653a787813c3c85a", "133f42368e63928dc860cce7618f30ee186d328c", "8e461978359b056d1b4770508e7a567dbed49776", "503c0b83c64878eddec6f71798b7877f2ae1967e", "97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5", "7ff42ee09c9b1a508080837a3dc2ea780a1a839b", "359edbaa9cf56857dd5c7c94aaef77003ba8b860", "cfa931e6728a825caada65624ea22b840077f023", "f87b22e7f0c66225824a99cada71f9b3e66b5742", "1b4bc7447f500af2601c5233879afc057a5876d8", "78e1798c3077f4f8a4df04ca35cd73f82e9a38f3", "4d8de4dad40faa835e8a01e3aa465e1bb3a996f4", "2c4b96f6c1a520e75eb37c6ee8b844332bc0435c", "1ef6ad9e1742d0b2588deaf506ef83b894fb9956", "98a120802aef324599e8b9014decfeb2236a78a3", "8e0ede53dc94a4bfcf1238869bf1113f2a37b667", "18f57228614b1ea0f42e1376a78b94222e81bf7a", "4805f41c4f8cfb932b011dfdd7f8907152590d1a", "6d4103762e159130b32335cbf8893ee4dca26859", "d69719b42ee53b666e56ed476629a883c59ddf66", "d916602f694ebb9cf95d85e08dd53f653b6196c3", "9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7", "1d7dde30b8d0f75576f4a23b75b8350071fd4839", "8dcc95debd07ebab1721c53fa50d846fef265022", "14efb131bed66f1874dd96170f714def8db45d90", "2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1", "f0dac9a55443aa39fd9832bdff202a579b835e88", "10b06d05b8b3a2c925b951a6d1d5919f536ffed4", "0aa405447a8797e509521f0570e4679a42fdac9b", "aa5a7a9900548a1f1381389fc8695ced0c34261a", "26ad6ceb07a1dc265d405e47a36570cb69b2ace6", "11408af8861fb0a977412e58c1a23d61b8df458c", "48a402593ca4896ac34fbebf1e725ab1226ecdb7", "ef23e82180508606a3ab8d9a30205b5e3c0daf67", "23d5b2dccd48a17e743d3a5a4d596111a2f16c41", "84f3c4937cd006888b82f2eb78e884f2247f0c4e", "fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4", "973e3d9bc0879210c9fad145a902afca07370b86", "d99743ab1760b09b1bb88bc6e1dc5b9d0e48baac", "1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b", "8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0", "5760d29574d78e79e8343b74e6e30b3555e48676", "6821113166b030d2123c3cd793dd63d2c909a110", "8eb40d0a0a1339469a05711f532839e8ffd8126c", "ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf", "0021f46bda27ea105d722d19690f5564f2b8869e", "304a306d2a55ea41c2355bd9310e332fa76b3cb0", "f7a271acccf9ec66c9b114d36eec284fbb89c7ef", "a8f1fc34089c4f2bc618a122be71c25813cae354", "a082c77e9a6c2e2313d8255e8e4c0677d325ce3e", "9af1cf562377b307580ca214ecd2c556e20df000", "c9367ed83156d4d682cefc59301b67f5460013e0", "306127c3197eb5544ab1e1bf8279a01e0df26120", "2e535b8cd02c2f767670ba47a43ad449fa1faad7", "83ca4cca9b28ae58f461b5a192e08dffdc1c76f3", "1d19c6857e798943cd0ecd110a7a0d514c671fec", "bf37a81d572bb154581845b65a766fab1e5c7dda", "0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698", "b8a16fcb65a8cee8dd32310a03fe36b5dff9266a", "aadf4b077880ae5eee5dd298ab9e79a1b0114555", "db150d158ca696c7fb4f39b707f71d609481a250", "dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd", "0d902541c26f03ff95221e0e71d67c39e094a61d", "c997744db532767ee757197491d8ac28d10f1c0f", "ff8315c1a0587563510195356c9153729b533c5b", "96a8f115df9e2c938453282feb7d7b9fde6f4f95", "2679e4f84c5e773cae31cef158eb358af475e22f", "d36a1e4637618304c2093f72702dcdcc4dcd41d1", "4a3d96b2a53114da4be3880f652a6eef3f3cc035", "9fd8d24a9db7cbcdf607994051d89667e95d7186", "d7ecfb6108a379a0abf76bf3105b4c9baca8f84f", "ce70dd0d613b840754dce528c14c0ebadd20ffaa", "2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f", "a6ebe013b639f0f79def4c219f585b8a012be04f", "75503aff70a61ff4810e85838a214be484a674ba", "cc1b093cfb97475faabab414878fa7e4a2d97cd7", "f08cb47cd91a83ea849f2dfe2682529f3bb95aa9", "03baf00a3d00887dd7c828c333d4a29f3aacd5f5", "b2f9e0497901d22b05b9699b0ea8147861c2e2cc", "525da67fb524d46f2afa89478cd482a68be8a42b", "bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9", "b613b30a7cbe76700855479a8d25164fa7b6b9f1", "2b8dfbd7cae8f412c6c943ab48c795514d53c4a7", "dae144d7b02aab7338b15d561ea18854df563cd4", "7be60f8c34a16f30735518d240a01972f3530e00", "182470fd0c18d0c5979dff75d089f1da176ceeeb", "b5f9180666924a3215ab0b1faf712e70b353444d", "5b809871a895ea8422afc31c918056614ea94688", "98fb3890c565f1d32049a524ec425ceda1da5c24", "2b84630680e2c906f8d7ac528e2eb32c99ef203a", "9cadd166893f1b8aaecb27280a0915e6694441f5", "293193d24d5c4d2975e836034bbb2329b71c4fe7", "a71bd4b94f67a71bc5c3563884bb9d12134ee46a", "db82f9101f64d396a86fc2bd05b352e433d88d02", "706236308e1c8d8b8ba7749869c6b9c25fa9f957", "bd63d56bebbc5d7babc7c47cedcb11b8e3ad199c", "592370b4c7b58a2a141e507f3a2cc5bbd247a62e", "20cfb4136c1a984a330a2a9664fcdadc2228b0bc", "4d90bab42806d082e3d8729067122a35bbc15e8d", "8d91f06af4ef65193f3943005922f25dbb483ee4", "f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b", "3980dadd27933d99b2f576c3b36fe0d22ffc4746", "3bf673a1f620015cb8b5106b85c7168431bb48ff", "60c24e44fce158c217d25c1bae9f880a8bd19fc3", "337b5f0e70e04349da17e8069936e2260390aca0", "f9784db8ff805439f0a6b6e15aeaf892dba47ca0", "2dbc57abf3ceda80827b85593ce1f457b76a870b", "8bfec7afcf5015017406fc04c43c1f43eb723631", "205f035ec90a7fa50fd04fdca390ce83c0eea958", "592f14f4b12225fc691477a180a2a3226a5ef4f0", "5721216f2163d026e90d7cd9942aeb4bebc92334", "81513764b73dae486a9d2df28269c7db75e9beb3", "7b63ed54345d8c06523f6b03c41a09b5c8f227e2", "7e3367b9b97f291835cfd0385f45c75ff84f4dc5", "a3ed0f15824802359e05d9777cacd5488dfa7dba", "1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6", "d6c7092111a8619ed7a6b01b00c5f75949f137bf", "d0b67ec62086b55f00dc461ab58dc87b85388b2b", "be48b5dcd10ab834cd68d5b2a24187180e2b408f", "4dce568994fb43095067ac893bbc079058494587", "ec1bec7344d07417fb04e509a9d3198da850349f", "dc295e85e698af56cd115e5531b66e19f3b9e0ce", "a313851ed00074a4a6c0fccf372acb6a68d9bc0b", "9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3", "5f676d6eca4c72d1a3f3acf5a4081c29140650fb", "a3add3268c26876eb76decdf5d7dd78a0d5cf304", "c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e", "e4ad82afc563b783475ed45e9f2cd4c9e2a53e83", "a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529", "2c93c8da5dfe5c50119949881f90ac5a0a4f39fe", "176e6ba56e04c98e1997ffdef964ece90fd827b4", "cb992fe67f0d4025e876161bfd2dda467eaec741", "a2d9c9ed29bbc2619d5e03320e48b45c15155195", "3f7723ab51417b85aa909e739fc4c43c64bf3e84", "f6c70635241968a6d5fd5e03cde6907022091d64", "c270aff2b066ee354b4fe7e958a40a37f7bfca45", "3af8d38469fb21368ee947d53746ea68cd64eeae", "488d3e32d046232680cc0ba80ce3879f92f35cac", "dd0a334b767e0065c730873a95312a89ef7d1c03", "9e2ab407ff36f3b793d78d9118ea25622f4b7434", "3e7070323bca6106f19bea4c97ef67bd6249cb5d", "ae73f771d0e429a74b04a6784b1b46dfe98f53e4", "9ce0d64125fbaf625c466d86221505ad2aced7b1", "aea977a3b5556957ed5fb3ef21685ee84921eaa3", "715d3eb3665f46cd2fab74d35578a72aafbad799", "448ed201f6fceaa6533d88b0b29da3f36235e131", "f2004fff215a17ac132310882610ddafe25ba153", "406431d2286a50205a71f04e0b311ba858fc7b6c", "398558817e05e8de184cc4c247d4ea51ab9d4d58", "df6e68db278bedf5486a80697dec6623958edba8", "b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88", "7d45f1878d8048f6b3de5b3ec912c49742d5e968", "934647c80f484340adecc74ac7141ed0b1d21c2f", "cd9666858f6c211e13aa80589d75373fd06f6246", "610779e90b644cc18696d7ac7820d3e0598e24d0", "f702f1294c0cd74b31db39c698281744d3137eb4", "a4876b7493d8110d4be720942a0f98c2d116d2a0", "33aff42530c2fd134553d397bf572c048db12c28", "493bc7071e35e7428336a515d1d26020a5fb9015", "a949b8700ca6ba96ee40f75dfee1410c5bbdb3db", "d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d", "8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4", "bcf2710d46941695e421226372397c9544994214", "6ed22b934e382c6f72402747d51aa50994cfd97b", "e51e94cc3c74adf0cccfac3a8035a10016ce8a3b", "3b350afd8b82487aa97097170c269a25daa0c82d", "ee815f60dc4a090fa9fcfba0135f4707af21420d", "4dd2be07b4f0393995b57196f8fc79d666b3aec5", "7ee7b0602ef517b445316ca8aa525e28ea79307e", "5b73b7b335f33cda2d0662a8e9520f357b65f3ac", "1be785355ae29e32d85d86285bb8f90ea83171df", "b234cd7788a7f7fa410653ad2bafef5de7d5ad29", "cd687ddbd89a832f51d5510c478942800a3e6854", "3661a34f302883c759b9fa2ce03de0c7173d2bb2", "d30050cfd16b29e43ed2024ae74787ac0bbcf2f7", "74dbe6e0486e417a108923295c80551b6d759dbe", "20a432a065a06f088d96965f43d0055675f0a6c1", "113e5678ed8c0af2b100245057976baf82fcb907", "b86c49c6e3117ea116ec2d8174fa957f83502e89", "5a86842ab586de9d62d5badb2ad8f4f01eada885", "854dbb4a0048007a49df84e3f56124d387588d99", "0323b618d3a4c24bdda4f42361e19a2a7d497da5", "3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5", "8a3bb63925ac2cdf7f9ecf43f71d65e210416e17", "81b0550c58e7409b4f1a1cd7838669cfaa512eb3", "b351575e3eab724d62d0703e24ecae55025eef00", "c87c07d44633eca2cc1d11d2d967fc66eb8de871", "1c5d7d02a26aa052ecc47d301de4929083e5d320", "6a3fa483c64e72d9c96663ff031446a2bdb6b2eb", "2965d092ed72822432c547830fa557794ae7e27b", "fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9", "f3553148e322f4f64545d6667dfbc7607c82703a", "47d3b923730746bfaabaab29a35634c5f72c3f04", "52e270ca8f5b53eabfe00a21850a17b5cc10f6d5", "947cdeb52f694fb1c87fc16836f8877cd83dc652", "d3b73e06d19da6b457924269bb208878160059da", "1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113", "f762afd65f3b680330e390f88d4cc39485345a01", "ae4390873485c9432899977499c3bf17886fa149", "5e8de234b20f98f467581f6666f1ed90fd2a81be", "f9bce7bd7909f1c75dbeb44900d374bc89072df0", "3be8964cef223698e587b4f71fc0c72c2eeef8cf", "265a88a8805f6ba3efae3fcc93d810be1ea68866", "47d4838087a7ac2b995f3c5eba02ecdd2c28ba14", "fe6fefe5f2f8c97ed9a27f3171fc0afb62d5495e", "2f53b97f0de2194d588bc7fb920b89cd7bcf7663", "e1b656c846a360d816a9f240499ec4f306897b98", "349c909abf937ef0a5a12c28a28e98500598834b", "1831800ef8b1f262c92209f1ee16567105da35d6", "a1b1442198f29072e907ed8cb02a064493737158", "ace1e0f50fe39eb9a42586f841d53980c6f04b11", "85205914a99374fa87e004735fe67fc6aec29d36", "00f0ed04defec19b4843b5b16557d8d0ccc5bb42", "84508e846af3ac509f7e1d74b37709107ba48bde", "8a2210bedeb1468f223c08eea4ad15a48d3bc894", "a4543226f6592786e9c38752440d9659993d3cb3", "ab2b09b65fdc91a711e424524e666fc75aae7a51", "42fff5b37006009c2dbfab63c0375c7c7d7d8ee3", "6409b8879c7e61acf3ca17bcc62f49edca627d4c", "3506518d616343d3083f4fe257a5ee36b376b9e1", "808656563eea17470159e6540b05fe6f7ae58c2b", "9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d", "805a0f4b99f162ac4db0ef6e0456138c8d498c3a", "318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a", "915ff2bedfa0b73eded2e2e08b17f861c0e82a58", "aae31f092fadd09a843e1ca62af52dc15fc33c56", "86b51bd0c80eecd6acce9fc538f284b2ded5bcdd", "4983076c1a8b80ff5cd68b924b11df58a68b6c84", "0c435e7f49f3e1534af0829b7461deb891cf540a", "790aa543151312aef3f7102d64ea699a1d15cb29", "d46fda4b49bbc219e37ef6191053d4327e66c74b", "9ca93ad6200bfa9dd814ac64bfb1044c3a0c01ce", "00220a6783488054eb0fe7b915e882b1294f3318", "bf30477f4bd70a585588528355b7418d2f37953e", "31d60b2af2c0e172c1a6a124718e99075818c408", "a98ff1c2e3c22e3d0a41a2718e4587537b92da0a", "a6ce1a1de164f41cb8999c728bceedf65d66bb23", "84e4b7469f9c4b6c9e73733fa28788730fd30379", "a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8", "f6cf2108ec9d0f59124454d88045173aa328bd2e", "b2ddea9c71cd73fa63e09e8121bc7a098fae70b4", "252f202bfb14d363a969fce19df2972b83fa7ec0", "69ad67e204fb3763d4c222a6c3d05d6725b638ed", "935924ddb5992c11f3202bf995183130ad83d07b", "1d6068631a379adbcff5860ca2311b790df3a70f", "73b90573d272887a6d835ace89bfaf717747c59b", "ca60d007af691558de377cab5e865b5373d80a44", "ebce3f5c1801511de9e2e14465482260ba5933cc", "0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056", "d628aabf1a666a875e77c3d3fee857cd25891947", "d78373de773c2271a10b89466fe1858c3cab677f", "8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58", "703890b7a50d6535900a5883e8d2a6813ead3a03", "cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2", "ddea3c352f5041fb34433b635399711a90fde0e8", "6f288a12033fa895fb0e9ec3219f3115904f24de", "100428708e4884300e4c1ac1f84cbb16e7644ccf", "1c0acf9c2f2c43be47b34acbd4e7338de360e555", "ef940b76e40e18f329c43a3f545dc41080f68748", "0bf1f999a16461a730dd80e3a187d0675c216292", "464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb", "b6ae677b26da039e0112e434d40baf7dd929a3ba", "59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb", "ef559d5f02e43534168fbec86707915a70cd73a0", "4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d", "58e7dbbb58416b785b4a1733bf611f8106511aca", "fba386ac63fe87ee5a0cf64bf4fb90324b657d61", "630d1728435a529d0b0bfecb0e7e335f8ea2596d", "8e8a6623b4abd2452779c43f3c2085488dfcb323", "081fb4e97d6bb357506d1b125153111b673cc128", "e9bb045e702ee38e566ce46cc1312ed25cb59ea7", "bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5", "82d2af2ffa106160a183371946e466021876870d", "e45a556df61e2357a8f422bdf864b7a5ed3b8627", "bc08dfa22949fbe54e15b1a6379afade71835968", "677477e6d2ba5b99633aee3d60e77026fb0b9306", "85f6eaa1ed3ae15ec7e777b7f90a277eda38cf7f", "a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9", "bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c", "4acd683b5f91589002e6f50885df51f48bc985f4", "539cb169fb65a5542c84f42efcd5d2d925e87ebb", "745d49a2ff70450113f07124c2c5263105125f58", "39ce143238ea1066edf0389d284208431b53b802", "85e78aa374d85f9a61da693e5010e40decd3f986", "de92951ea021ec56492d76381a8ae560a972dd68", "4492914df003d690e5ff3cb3e0e0509a51f7753e", "192723085945c1d44bdd47e516c716169c06b7c0", "116f9e9cda25ff3187bc777ceb3ecd28077a7eca", "ce3304119ba6391cb6bb25c4b3dff79164df9ac6", "6ae96f68187f1cdb9472104b5431ec66f4b2470f", "03c56c176ec6377dddb6a96c7b2e95408db65a7a", "594ec0a7839885169c65133cfe50164d4cc74b5c", "c631a31be2c793d398175ceef7daff1848bb6408", "1ef1f33c48bc159881c5c8536cbbd533d31b0e9a", "a1cecbb759c266133084d98747d022c1e638340d", "54483d8b537e51317a8e6c6caf4949d4440c9368", "b472f91390781611d4e197564b0016d9643a5518", "9e4b052844d154c3431120ec27e78813b637b4fc", "992e4119d885f866cb715f4fbf0250449ce0db05", "58823377757e7dc92f3b70a973be697651089756", "f85ccab7173e543f2bfd4c7a81fb14e147695740", "41e5d92b13d36da61287c7ffd77ee71de9eb2942", "0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9", "d5b445c5716952be02172ca4d40c44f4f04067fa", "29fc4de6b680733e9447240b42db13d5832e408f", "f0f80055ab85254ca58c1b08017969a0c355881f", "ad08c97a511091e0f59fc6a383615c0cc704f44a", "d62d82c312c40437bc4c1c91caedac2ba5beb292", "88780bd55615c58d9bacc4d66fc2198e603a1714", "d122d66c51606a8157a461b9d7eb8b6af3d819b0", "06a799ad89a2a45aee685b9e892805e3e0251770", "0509c442550571907258f07aad9da9d00b1e468b", "4b4ecc1cb7f048235605975ab37bb694d69f63e5", "858b51a8a8aa082732e9c7fbbd1ea9df9c76b013", "75ce75c1a5c35ecdba99dd8b7ba900d073e35f78", "1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d", "de45bf9e5593a5549a60ca01f2988266d04d77da", "e111624fb4c5dc60b9e8223abfbf7c4196d34b21", "34c2ea3c7e794215588c58adf0eaad6dc267d082", "e16eeed2ada9166a035d238b1609462928db69db", "a7f188a7161b6605d58e48b2537c18a69bd2446f", "4da4e58072c15904d4ce31076061ebd3ab1cdcd5", "f49aebe58d30241f12c1d7d9f4e04b6e524d7a45", "dcc44853911c3df7db9c3ea5068e6c16aeec71c1", "42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0", "744fa8062d0ae1a11b79592f0cd3fef133807a03", "976c9f88c23e892c75c452b450407841e5161a32", "273b0511588ab0a81809a9e75ab3bd93d6a0f1e3", "b1a8315b4843da3d0b61c933a11d9b152cfaae70", "d264dedfdca8dc4c71c50311bcdd6ba3980eb331", "23086a13b83d1b408b98346cf44f3e11920b404d", "8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958", "e4bc529ced68fae154e125c72af5381b1185f34e", "2fdce3228d384456ea9faff108b9c6d0cf39e7c7", "aeaf5dbb3608922246c7cd8a619541ea9e4a7028", "7ebfa8f1c92ac213ff35fa27287dee94ae5735a1", "1feeab271621128fe864e4c64bab9b2e2d0ed1f1", "7373c4a23684e2613f441f2236ed02e3f9942dd4", "3e1190655cc7c1159944d88bdbe591b53f48d761", "75bf3b6109d7a685236c8589f8ead7d769ea863f", "b1df214e0f1c5065f53054195cd15012e660490a", "0ac664519b2b8abfb8966dafe60d093037275573", "5517b28795d7a68777c9f3b2b46845dcdb425b2c", "bbc21d6b7c6e807c6886d237a04b501158ca6bb8", "abf573864b8fbc0f1c491ca60b60527a3e75f0f5", "77d929b3c4bf546557815b41ed5c076a5792dc6b", "eb02daee558e483427ebcf5d1f142f6443a6de6b", "f5a95f857496db376d69f7ac844d1f56e3577b75", "9117fd5695582961a456bd72b157d4386ca6a174", "2c285dadfa6c07d392ee411d0213648a8a1cf68f", "1b300a7858ab7870d36622a51b0549b1936572d4", "a006cd95c14de399706c5709b86ac17fce93fcba", "ab8ecf98f457e29b000c44d49f5bf49ec92e571c", "c2e6daebb95c9dfc741af67464c98f1039127627", "3e03d19b950edadc74ca047dec86227282eccf71", "3769e65690e424808361e3eebfdec8ab91908aa9", "223ec77652c268b98c298327d42aacea8f3ce23f", "754626bd5fb06fee5e10962fdfeddd495513e84b", "9b42fb48d5ac70b6ca5382f50e71ed8bf3a84710", "0b0c2d9db83b4f002f23f4a20cfc5a3d10295372", "3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48", "0b183f5260667c16ef6f640e5da50272c36d599b", "daca9d03c1c951ed518248de7f75ff51e5c272cb", "d57c25c50e5e25fb07fc80b3c3d77b45e16e98cf", "0d7652652c742149d925c4fb5c851f7c17382ab8", "6aefe7460e1540438ffa63f7757c4750c844764d", "31c34a5b42a640b824fa4e3d6187e3675226143e", "c1fb854d9a04b842ff38bd844b50115e33113539", "459eb3cfd9b52a0d416571e4bc4e75f979f4b901", "87806c51dc8c1077953178367dcf5c75c553ce34", "24603ed946cb9385ec541c86d2e42db47361c102", "b82f89d6ef94d26bf4fec4d49437346b727c3bd4", "5e87f5076952cd442718d6b4addce905bae1a1a4", "48255c9e1d6e1d030728d33a71699757e337be08", "c37de914c6e9b743d90e2566723d0062bedc9e6a", "b2b535118c5c4dfcc96f547274cdc05dde629976", "99cd84a62edb2bda2fc2fdc362a72413941f6aa4", "94b729f9d9171e7c4489995e6e1cb134c8521f4e", "fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f", "1369e9f174760ea592a94177dbcab9ed29be1649", "a6d621a5aae983a6996849db5e6bc63fe0a234af", "9ce2fd6ae16b339886d0ce237faae811230c8ce6", "dec76940896a41a8a7b6e9684df326b23737cd5d", "160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b", "7a7f2403e3cc7207e76475e8f27a501c21320a44", "cb160c5c2a0b34aba7b0f39f5dda6aca8135f880", "da15344a4c10b91d6ee2e9356a48cb3a0eac6a97", "0701b01bc99bf3b64050690ceadb58a8800e81ed", "41de109bca9343691f1d5720df864cdbeeecd9d0", "2a4984fb48c175d1e42c6460c5f00963da9f26b6", "2661f38aaa0ceb424c70a6258f7695c28b97238a", "445461a34adc4bcdccac2e3c374f5921c93750f8", "e4c81c56966a763e021938be392718686ba9135e", "8fc36452a49cb0fd43d986da56f84b375a05b4c1", "179e566a2c1a2a48aa3d0028209c11ebe7d6740e", "99d06fe2f4d6d76acf40b6da67c5052e82055f5a", "608b01c70f0d1166c10c3829c411424d9ef550e7", "3cd8ab6bb4b038454861a36d5396f4787a21cc68", "7f4040b482d16354d5938c1d1b926b544652bf5b", "0ba1d855cd38b6a2c52860ae4d1a85198b304be4", "4e581831d24fd90b0b5228b9136e76fa3e8f8279", "a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd", "2e36b63fdf1353425a57a0665b0d0274efe92963", "03fe3d031afdcddf38e5cc0d908b734884542eeb", "2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84", "fe556c18b7ab65ceb57e1dd054a2ca21cefe153c", "7c4c442e9c04c6b98cd2aa221e9d7be15efd8663", "d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb", "72a5e181ee8f71b0b153369963ff9bfec1c6b5b0", "c0c0b8558b17aa20debc4611275a4c69edd1e2a7", "88ed558bff3600f5354963d1abe762309f66111e", "4cfe921ac4650470b0473fd52a2b801f4494ee64", "0d3068b352c3733c9e1cc75e449bf7df1f7b10a4", "51dc127f29d1bb076d97f515dca4cc42dda3d25b", "1a40c2a2d17c52c8b9d20648647d0886e30a60fa", "64f9519f20acdf703984f02e05fd23f5e2451977", "38bb66c97b35851051e95834639c205254771adc", "489b7e12a420eff0d585f3f866e76b838c2cd275", "e9cebf627c204c6949dcc077d04c57eb66b2c038", "32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347", "30cace74a7d51e9a928287e25bcefb968c49f331", "3daf1191d43e21a8302d98567630b0e2025913b0", "6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae", "2921719b57544cfe5d0a1614d5ae81710ba804fa", "715b69575dadd7804b4f8ccb419a3ad8b7b7ca89", "20a0f71d2c667f3c69df18f097f2b5678ac7d214", "2ac21d663c25d11cda48381fb204a37a47d2a574", "832e1d128059dd5ed5fa5a0b0f021a025903f9d5", "1297ee7a41aa4e8499c7ddb3b1fed783eba19056", "636c786d4e4ac530ac85e3883a2f2cf469e45fe2", "721d9c387ed382988fce6fa864446fed5fb23173", "4cf68a0b1a3f49393a8c11f3a18cccc7912b8424", "afc7092987f0d05f5685e9332d83c4b27612f964", "a1cda8e30ce35445e4f51b47ab65b775f75c9f18", "0773c320713dae62848fceac5a0ac346ba224eca", "2e98329fdec27d4b3b9b894687e7d1352d828b1d", "523b2cbc48decfabffb66ecaeced4fe6a6f2ac78", "1473e6f2d250307f0421f1e2ea68b6485d3bd481", "ee2217f9d22d6a18aaf97f05768035c38305d1fa", "d116bac3b6ad77084c12bea557d42ed4c9d78433", "6fa0c206873dcc5812f7ea74a48bb4bf4b273494", "2e8a0cc071017845ee6f67bd0633b8167a47abed", "0568fc777081cbe6de95b653644fec7b766537b2", "05785cb0dcaace54801aa486d4f8fdad3245b27a", "7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b", "402f6db00251a15d1d92507887b17e1c50feebca", "5ed66fb992bfefb070b5c39dc45b6e3ff5248c10", "0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7", "949fff3b0a73c81e7ff3d47caf7fbf9c664bcc70", "240d5390af19bb43761f112b0209771f19bfb696", "4398afa0aeb5749a12772f2d81ca688066636019", "4e6e5cb93e7e564bc426b5b27888d55101504c50", "1b9976fea3c1cf13f0a102a884f027d9d80a14b3", "01f0a4e1442a7804e1fe95798eff777d08e42014", "e14cc2715b806288fe457d88c1ad07ef55c65318", "3de5dc06f5d089dee111e048c7174a834f1363c1", "835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd", "25982e2bef817ebde7be5bb80b22a9864b979fb0", "480858e55abdbc07ca47b7dc10204613fdd9783c", "308025c378aef6acf9fe3acbddbfddcaa4271e8c", "c47bd9f6eb255da525dbcdfc111609c90bc4d2ae", "10ca2e03ff995023a701e6d8d128455c6e8db030", "1f5f67d315c9dad341d39129d8f8fe7fa58e564c", "5ac80e0b94200ee3ecd58a618fe6afd077be0a00", "c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5", "0a1138276c52c734b67b30de0bf3f76b0351f097", "d57982dc55dbed3d0f89589e319dc2d2bd598532", "00bfef58353564f4e4bd7e2cb68cb66953cf9103", "570308801ff9614191cfbfd7da88d41fb441b423", "3f14b504c2b37a0e8119fbda0eff52efb2eb2461", "98c548a4be0d3b62971e75259d7514feab14f884", "714d487571ca0d676bad75c8fa622d6f50df953b", "b11b71b704629357fe13ed97b216b9554b0e7463", "83bce0907937f09f5ccde26c361d52fe55fc8979", "c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f", "26437fb289cd7caeb3834361f0cc933a02267766", "bdbba95e5abc543981fb557f21e3e6551a563b45", "779d3f0cf74b7d33344eea210170c7c981a7e27b", "67d7022462c98e6c5de9f2254b46f0b8d3b92089", "2d98a1cb0d1a37c79a7ebcb727066f9ccc781703", "51683eac8bbcd2944f811d9074a74d09d395c7f3", "972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0", "03333e7ec198208c13627066bc76b0367f5e270f", "939d28859c8bd2cca2d692901e174cfd599dac74", "ba69d464bc360f94303ffc9f710009d16a5673a0", "f06b015bb19bd3c39ac5b1e4320566f8d83a0c84", "3d9db1cacf9c3bb7af57b8112787b59f45927355", "eefecac463ebfc0694b9831e842b574f3954fed6", "a0dfb8aae58bd757b801e2dcb717a094013bc178", "7a6e3ed956f71b20c41fbec008b1fa8dacad31a6", "1ac2882559a4ff552a1a9956ebeadb035cb6df5b", "95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6", "c59b62864a6d86eead075c88137a87070a984550", "93e451f71245f8e5ba346a48de2d09c0bccc3c22", "a9215666b4bcdf8d510de8952cf0d55b635727dc", "7e507370124a2ac66fb7a228d75be032ddd083cc", "91495c689e6e614247495c3f322d400d8098de43", "cc5edaa1b0e91bc3577547fc30ea094aa2722bf0", "8bed7ff2f75d956652320270eaf331e1f73efb35", "40389b941a6901c190fb74e95dc170166fd7639d", "2a826273e856939b58be8779d2136bffa0dddb08", "53c36186bf0ffbe2f39165a1824c965c6394fe0d", "39c48309b930396a5a8903fdfe781d3e40d415d0", "071af21377cc76d5c05100a745fb13cb2e40500f", "0235b2d2ae306b7755483ac4f564044f46387648", "09750c9bbb074bbc4eb66586b20822d1812cdb20", "7d8798e7430dcc68fcdbd93053c884fc44978906", "070de852bc6eb275d7ca3a9cdde8f6be8795d1a3", "23fd653b094c7e4591a95506416a72aeb50a32b5", "a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7", "c900e0ad4c95948baaf0acd8449fde26f9b4952a", "82a4a35b2bae3e5c51f4d24ea5908c52973bd5be", "a168ca2e199121258fbb2b6c821207456e5bf994", "4e43408a59852c1bbaa11596a5da3e42034d9380", "968f472477a8afbadb5d92ff1b9c7fdc89f0c009", "ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff", "fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e", "ce75deb5c645eeb08254e9a7962c74cab1e4c480", "9f43caad22803332400f498ca4dd0429fe7da0aa", "2a7058a720fa9da4b9b607ea00bfdb63652dff95", "c98def5f9d0c6ae519fe0aeebe5378f65b14e496", "9636c7d3643fc598dacb83d71f199f1d2cc34415", "55ee484f9cbd62111512485e3c1c3eadbf2e15c0", "2babf665198a91932a4ce557f627c28e7e8f31f2", "6f9026627fb31d4cfb08dbcc4ab852945dc42252", "48910f9b6ccc40226cd4f105ed5291571271b39e", "ea1303f6746f815b7518c82c9c4d4a00cd6328b9", "e101bab97bce2733222db9cfbb92a82779966508", "914d7527678b514e3ee9551655f55ffbd3f0eb0a", "aae742779e8b754da7973949992d258d6ca26216", "7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8", "c12034ca237ee330dd25843f2d05a6e1cfde1767", "5f1cd82343f4bd6972f674d50aecb453d06f04ad", "b41d585246360646c677a8238ec35e8605b083b0", "69ba86f7aac7b7be0ac41d990f5cd38400158f96", "5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf", "b1c5581f631dba78927aae4f86a839f43646220c", "0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9", "5a7520380d9960ff3b4f5f0fe526a00f63791e99", "0278acdc8632f463232e961563e177aa8c6d6833", "28c9198d30447ffe9c96176805c1cd81615d98c8", "3060ac37dec4633ef69e7bc63488548ab3511f61", "e1f6e2651b7294951b5eab5d2322336af1f676dc", "d91f9e8cbf271004ef1a293401197a10a26ccd1b", "08872d801f134e41753601e85971769b28314ca2", "539f55c0e2501c1d86791c8b54b225d9b3187b9c", "2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d", "36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958", "4d625677469be99e0a765a750f88cfb85c522cce", "7acbf0b060e948589b38d5501ca217463cfd5c2f", "59420fd595ae745ad62c26ae55a754b97170b01f", "31697737707d7f661cbc6785b76cf9a79fee3ccd", "9b93406f3678cf0f16451140ea18be04784faeee", "50614ff325f0c8ca20f99efc55d65a8d4cc768cd", "c44c84540db1c38ace232ef34b03bda1c81ba039", "6c690af9701f35cd3c2f6c8d160b8891ad85822a", "1251deae1b4a722a2155d932bdfb6fe4ae28dd22", "0ad4a814b30e096ad0e027e458981f812c835aa0", "5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0", "82f8652c2059187b944ce65e87bacb6b765521f6", "0fcf04fda0bea5265b73c85d2cc2f7f70416537b", "df0e280cae018cebd5b16ad701ad101265c369fa", "23ee7b7a9ca5948e81555aaf3a044cfec778f148", "ca447d6479554b27b4afbd0fd599b2ed39f2c335", "488e475eeb3bb39a145f23ede197cd3620f1d98a", "aefc7c708269b874182a5c877fb6dae06da210d4", "1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b", "0273414ba7d56ab9ff894959b9d46e4b2fef7fd0", "776362314f1479f5319aaf989624ac604ba42c65", "68c5238994e3f654adea0ccd8bca29f2a24087fc", "ab133af7ec2726f712dd049213e6a27449d28c78", "2ebc35d196cd975e1ccbc8e98694f20d7f52faf3", "f834c50e249c9796eb7f03da7459b71205dc0737", "a1dd806b8f4f418d01960e22fb950fe7a56c18f1", "ff8db3810f927506f3aa594d66d5e8658f3cf4d5", "a695c2240382e362262db72017ceae0365d63f8f", "bfdafe932f93b01632a5ba590627f0d41034705d", "ee1f9637f372d2eccc447461ef834a9859011ec1", "aade6c3dbea3b0a918f87c85a36cb6b06eff4f5b", "094357c1a2ba3fda22aa6dd9e496530d784e1721", "c5c56e9c884ac4070880ac481909bb6b621d2a3f", "f26097a1a479fb6f32b27a93f8f32609cfe30fdc", "d05513c754966801f26e446db174b7f2595805ba", "3b9d94752f8488106b2c007e11c193f35d941e92", "2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3", "9b9f6e5eb6d7fa50300d67502e8fda1006594b84", "a75ee7f4c4130ef36d21582d5758f953dba03a01", "d9ef1a80738bbdd35655c320761f95ee609b8f49", "189b1859f77ddc08027e1e0f92275341e5c0fdc6", "3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d", "8e55486aa456cae7f04fe922689b3e99a0e409fe", "18a849b1f336e3c3b7c0ee311c9ccde582d7214f", "6b8329730b2e13178a577b878631735a1cd58a71", "041b51a81a977b5c64682c55414ad8d165c1f2ce", "a967426ec9b761a989997d6a213d890fc34c5fe3", "78fdf2b98cf6380623b0e20b0005a452e736181e", "c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6", "c570d1247e337f91e555c3be0e8c8a5aba539d9f", "7859667ed6c05a467dfc8a322ecd0f5e2337db56", "26b606ac6beb2977a7853b032416c23c7b36cb8a", "4c71b0cdb6b80889b976e8eb4457942bd4dd7b66", "0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58", "09dd01e19b247a33162d71f07491781bdf4bfd00", "8a1e95b82d8cf27e0034e127091396efd4c8bd9e", "133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d", "b0b944b3a783c2d9f12637b471fe1efb44deb52b", "090e4713bcccff52dcd0c01169591affd2af7e76", "42f6f5454dda99d8989f9814989efd50fe807ee8", "cac8bb0e393474b9fb3b810c61efdbc2e2c25c29", "82417d8ec8ac6406f2d55774a35af2a1b3f4b66e", "061c84a4143e859a7caf6e6d283dfb30c23ee56e", "4c141534210df53e58352f30bab558a077fec3c6", "d61578468d267c2d50672077918c1cda9b91429b", "64cf1cda80a23ed6fc1c8e66065614ef7bdeadf3", "4b60e45b6803e2e155f25a2270a28be9f8bec130", "458677de7910a5455283a2be99f776a834449f61", "bc9bad25f8149318314971d8b8c170064e220ea8", "176a3e9e118712251124c1347516a92d5e315297", "401e6b9ada571603b67377b336786801f5b54eee", "053931267af79a89791479b18d1b9cde3edcb415", "b6620027b441131a18f383d544779521b119c1aa", "f63b3b8388bc4dcd4a0330402af37a59ce37e4f3", "90221884fe2643b80203991686af78a9da0f9791", "38a9ca2c49a77b540be52377784b9f734e0417e4", "e86008f6aebd0ab26bdb69d2549b2e8454b8959c", "053b263b4a4ccc6f9097ad28ebf39c2957254dfb", "06bad0cdda63e3fd054e7b334a5d8a46d8542817", "248db911e3a6a63ecd5ff6b7397a5d48ac15e77a", "48a5b6ee60475b18411a910c6084b3a32147b8cd", "53ce84598052308b86ba79d873082853022aa7e9", "3039627fa612c184228b0bed0a8c03c7f754748c", "3d2d439ead6e32877ce40e5568e62dee4a877836", "15728d6fd5c9fc20b40364b733228caf63558c31", "7f8d44e7fd2605d580683e47bb185de7f9ea9e28", "913961d716a4102d3428224f999295f12438399f", "be4f7679797777f2bc1fd6aad8af67cce5e5ce87", "1b6394178dbc31d0867f0b44686d224a19d61cf4", "b7774c096dc18bb0be2acef07ff5887a22c2a848", "83c1fee5ef4b7ba9d9730f3b550dd7bfbdaf591d", "56e885b9094391f7d55023a71a09822b38b26447", "4adca62f888226d3a16654ca499bf2a7d3d11b71", "7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5", "aa892fe17c06e2b18db2b12314499a741e755df7", "b75eecc879da38138bf3ace9195ae1613fb6e3cc", "ad6c7cc5c0f4ab273fef105ff3761d2c08609a20", "01e12be4097fa8c94cabeef0ad61498c8e7762f2", "0e93a5a7f6dbdb3802173dca05717d27d72bfec0", "7c349932a3d083466da58ab1674129600b12b81c", "8518b501425f2975ea6dcbf1e693d41e73d0b0af", "5fea26746f3140b12317fcf3bc1680f2746e172e", "08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d", "71bbda43b97e8dc8b67b2bde3c873fa6aacd439f", "f604c312ff4706f1849078b2ca28409f0fcd859d", "dc5d9399b3796db7fd850990402dce221b98c8be", "24cf9fe9045f50c732fc9c602358af89ae40a9f7", "fc23a386c2189f221b25dbd0bb34fcd26ccf60fa", "54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3", "c88c21eb9a8e08b66c981db35f6556f4974d27a8", "24b5ea4e262e22768813e7b6581f60e4ab9a8de7", "e3a6e5a573619a97bd6662b652ea7d088ec0b352", "16de1324459fe8fdcdca80bba04c3c30bb789bdf", "9d896605fbf93315b68d4ee03be0770077f84e40", "0faee699eccb2da6cf4307ded67ba8434368257b", "09b80d8eea809529b08a8b0ff3417950c048d474", "4863333b9e5f25423e273a0581de3edee8bb3b97", "46c82cfadd9f885f5480b2d7155f0985daf949fc", "be437b53a376085b01ebd0f4c7c6c9e40a4b1a75", "31ace8c9d0e4550a233b904a0e2aabefcc90b0e3", "32e9c9520cf6acb55dde672b73760442b2f166f5", "4b74f2d56cd0dda6f459319fec29559291c61bff", "778c1e95b6ea4ccf89067b83364036ab08797256", "0182d090478be67241392df90212d6cd0fb659e6", "bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5", "1da83903c8d476c64c14d6851c85060411830129", "1b69b860e22278a6f482507b8ce879082dd00c44", "004e3292885463f97a70e1f511dc476289451ed5", "4c6886c489e93ccab5a1124555a6f3e5b0104464", "e2b3aae594035e58f72125e313e92c7c4cc9d5bb", "55a7286f014cc6b51a3f50b1e6bc8acc8166f231", "7574f999d2325803f88c4915ba8f304cccc232d1", "c352b5ccd6fa1812b108d74d268ce3f19efccf0b", "f86ddd6561f522d115614c93520faad122eb3b56", "7d41b67a641426cb8c0f659f0ba74cdb60e7159a", "42e3dac0df30d754c7c7dab9e1bb94990034a90d", "3ada7640b1c525056e6fcd37eea26cd638815cd6", "14318d2b5f2cf731134a6964d8193ad761d86942", "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "290136947fd44879d914085ee51d8a4f433765fa", "d2598c088b0664c084413796f39697c6f821d56e", "3bd56f4cf8a36dd2d754704bcb71415dcbc0a165", "59bfeac0635d3f1f4891106ae0262b81841b06e4", "641f0989b87bf7db67a64900dcc9568767b7b50f", "1f8656e2254e353a91cceb08b33c25643a1b1fb7", "3d0b2da6169d38b56c58fe5f13342cf965992ece", "c614450c9b1d89d5fda23a54dbf6a27a4b821ac0", "a481e394f58f2d6e998aa320dad35c0d0e15d43c", "1a45ddaf43bcd49d261abb4a27977a952b5fff12", "9aab33ce8d6786b3b77900a9b25f5f4577cea461", "fa32b29e627086d4302db4d30c07a9d11dcd6b84", "5375a3344017d9502ebb4170325435de3da1fa16", "af4745a3c3c7b51dab0fd90d68b53e60225aa4a9", "2f59f28a1ca3130d413e8e8b59fb30d50ac020e2", "14fb3283d4e37760b7dc044a1e2906e3cbf4d23a", "a8638a07465fe388ae5da0e8a68e62a4ee322d68", "a325d5ea42a0b6aeb0390318e9f65f584bd67edd", "133f01aec1534604d184d56de866a4bd531dac87", "dac8fc521dfafb2d082faa4697f491eae00472c7", "5305bfdff39ae74d2958ba28d42c16495ce2ff86", "046865a5f822346c77e2865668ec014ec3282033", "7cee0311e71dca540aaf3d87bef3a6c97ca39bc3", "074af31bd9caa61fea3c4216731420bd7c08b96a", "0eed55ea9f401f25e1474cdbaf09367f44b4f490", "c3beae515f38daf4bd8053a7d72f6d2ed3b05d88", "6d8e3f3a83514381f890ab7cd2a1f1c5be597b69", "a6583c8daa7927eedb3e892a60fc88bdfe89a486", "7196b3832065aec49859c61318037b0c8c12363a", "16395b40e19cbc6d5b82543039ffff2a06363845", "5594beb2b314f5433bd7581f64bdbc58f2933dc4", "1fcb905e4505a781fb0b375eb470f5661e38ae39", "37179032085e710d1d62a1ba2e9c1f63bb4dde91", "100641ed8a5472536dde53c1f50fa2dd2d4e9be9", "35b3dc0e961a15a7a60b95490a989f91680acc7c", "d066575b48b552a38e63095bb1f7b56cbb1fbea4", "f4ebbeb77249d1136c355f5bae30f02961b9a359", "37007af698b990a3ea8592b11d264b14d39c843f", "706b9767a444de4fe153b2f3bff29df7674c3161", "fffe5ab3351deab81f7562d06764551422dbd9c4", "5fe3a9d54d5070308803dd8ef611594f59805400", "29f27448e8dd843e1c4d2a78e01caeaea3f46a2d", "aa577652ce4dad3ca3dde44f881972ae6e1acce7", "f7de943aa75406fe5568fdbb08133ce0f9a765d4", "7ab930146f4b5946ec59459f8473c700bcc89233", "80277fb3a8a981933533cf478245f262652a33b5", "56e25358ebfaf8a8b3c7c33ed007e24f026065d0", "1862bfca2f105fddfc79941c90baea7db45b8b16", "59319c128c8ac3c88b4ab81088efe8ae9c458e07", "def934edb7c7355757802a95218c6e4ed6122a72", "97b8249914e6b4f8757d22da51e8347995a40637", "8fa3478aaf8e1f94e849d7ffbd12146946badaba", "071ec4f3fb4bfe6ae9980477d208a7b12691710e", "d79365336115661b0e8dbbcd4b2aa1f504b91af6", "7d8c2d29deb80ceed3c8568100376195ce0914cb", "21258aa3c48437a2831191b71cd069c05fb84cf7", "e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a", "378ae5ca649f023003021f5a63e393da3a4e47f0", "d666ce9d783a2d31550a8aa47da45128a67304a7", "2020e8c0be8fa00d773fd99b6da55029a6a83e3d", "b13e2e43672e66ba45d1b852a34737e4ce04226b", "0ce3a786aed896d128f5efdf78733cc675970854", "81a4397d5108f6582813febc9ddbeff905474120", "9e60614fd57afe381ae42c6ee0b18f32f60bb493", "e5799fd239531644ad9270f49a3961d7540ce358", "4d0b3921345ae373a4e04f068867181647d57d7d", "ab1719f573a6c121d7d7da5053fe5f12de0182e7", "46a29a5026142c91e5655454aa2c2f122561db7f", "33ad23377eaead8955ed1c2b087a5e536fecf44e", "e4c3d5d43cb62ac5b57d74d55925bdf76205e306", "79dc84a3bf76f1cb983902e2591d913cee5bdb0e", "7f5346a169c9784ca79aca5d95ae8bf2ebab58e3", "6fe2efbcb860767f6bb271edbb48640adbd806c3", "0c59071ddd33849bd431165bc2d21bbe165a81e0", "e1f790bbedcba3134277f545e56946bc6ffce48d", "d92084e376a795d3943df577d3b3f3b7d12eeae5", "25c3cdbde7054fbc647d8be0d746373e7b64d150", "7aa4c16a8e1481629f16167dea313fe9256abb42", "0b4c4ea4a133b9eab46b217e22bda4d9d13559e6", "391b86cf16c2702dcc4beee55a6dd6d3bd7cf27b", "a74251efa970b92925b89eeef50a5e37d9281ad0", "3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0", "24de12df6953151ef5cd0379e205eb0f57ff9d1f", "2dd2c7602d7f4a0b78494ac23ee1e28ff489be88", "318a81acdd15a0ab2f706b5f53ee9d4d5d86237f", "fbc9ba70e36768efff130c7d970ce52810b044ff", "46f48211716062744ddec5824e9de9322704dea1", "784a83437b3dba49c0d7ccc10ac40497b84661a5", "db227f72bb13a5acca549fab0dc76bce1fb3b948", "3ce96f03874d42345c0727edc78b6949b20b4a11", "c41de506423e301ef2a10ea6f984e9e19ba091b4", "824d1db06e1c25f7681e46199fd02cb5fc343784", "743e582c3e70c6ec07094887ce8dae7248b970ad", "9590b09c34fffda08c8f54faffa379e478f84b04", "6256b47342f080c62acd106095cf164df2be6020", "778c9f88839eb26129427e1b8633caa4bd4d275e", "20d6a4aaf5abf2925fdce2780e38ab1771209f76", "f558af209dd4c48e4b2f551b01065a6435c3ef33", "26bbe76d1ae9e05da75b0507510b92e7e6308c73", "d9318c7259e394b3060b424eb6feca0f71219179", "e88988f4696e7e2925ed96467fde4314bfa95eff", "2a92bda6dbd5cce5894f7d370d798c07fa8783f4", "0163d847307fae508d8f40ad193ee542c1e051b4", "4563b46d42079242f06567b3f2e2f7a80cb3befe", "65b737e5cc4a565011a895c460ed8fd07b333600", "6577c76395896dd4d352f7b1ee8b705b1a45fa90", "20e505cef6d40f896e9508e623bfc01aa1ec3120", "4e343c66c5fe7426132869d552f0f205d1bc5307", "0b3786a3a0ea7ec08f01636124c183dbee8f625f", "976e0264bb57786952a987d4456850e274714fb8", "a0d6390dd28d802152f207940c7716fe5fae8760", "588bed36b3cc9e2f26c39b5d99d6687f36ae1177", "cc9057d2762e077c53e381f90884595677eceafa", "73ba33e933e834b815f62a50aa1a0e15c6547e83", "ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d", "94a11b601af77f0ad46338afd0fa4ccbab909e82", "96b1f2bde46fe4f6cc637398a6a71e8454291a6e", "af97a51f56cd6b793cf96692931a8d1ddbe4e3cc", "57c270a9f468f7129643852945cf3562cbb76e07", "2f88d3189723669f957d83ad542ac5c2341c37a5", "87147418f863e3d8ff8c97db0b42695a1c28195b", "385750bcf95036c808d63db0e0b14768463ff4c6", "a9d861e270b8b1e6deea1936b258f49f1823005b", "cacce7f4ce74e3269f5555aa6fd83e48baaf9c96", "833fa04463d90aab4a9fe2870d480f0b40df446e", "08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7", "0a15b8c7d529c7facc2d3b4c2111801dd4adfc28", "666939690c564641b864eed0d60a410b31e49f80", "8a2bedaa38abf173823944f0de2c84f5b2549609", "f39783847499dd56ba39c1f3b567f64dfdfa8527", "ab8f9a6bd8f582501c6b41c0e7179546e21c5e91", "b5747ecfa0f3be0adaad919d78763b1133c4d662", "922838dd98d599d1d229cc73896d55e7a769aa7c", "bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197", "8d0bc14589dea1f4f88914ffcb57a5c54830f2cc", "36b13627ee8a5a8cd04645213aabfa917bbd32f5", "11dc744736a30a189f88fa81be589be0b865c9fa", "633101e794d7b80f55f466fd2941ea24595e10e6", "52a9f957f776c8b3d913cfcd20452b9e31c27845", "5d33a10752af9ea30993139ac6e3a323992a5831", "aa90a466a2ff7781c36e7da7df0013aa5b117510", "96b6f8ac898c8ef6b947c50bb66fe6b1e6f2fb11", "d01303062b21cd9ff46d5e3ff78897b8499480de", "07a31bd7a0bd7118f8ac0bc735feef90e304fb08", "a14db48785d41cd57d4eac75949a6b79fc684e70", "9b246c88a0435fd9f6d10dc88f47a1944dd8f89e", "1910f5f7ac81d4fcc30284e88dee3537887acdf3", "4a4b5ae5793696b861aa009932e7a121d36ad67a", "15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a", "d3409f66d35f5828affda26fc3416771eb8154b1", "1177977134f6663fff0137f11b81be9c64c1f424", "89de30a75d3258816c2d4d5a733d2bef894b66b9", "7923742e2af655dee4f9a99e39916d164bc30178", "44b1399e8569a29eed0d22d88767b1891dbcf987", "22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7", "955e2a39f51c0b6f967199942d77625009e580f9", "dc107e7322f7059430b4ef4991507cb18bcc5d95", "140438a77a771a8fb656b39a78ff488066eb6b50", "ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9", "06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc", "f0f0e94d333b4923ae42ee195df17c0df62ea0b1", "4622b82a8aff4ac1e87b01d2708a333380b5913b", "7e8c8b1d72c67e2e241184448715a8d4bd88a727", "5666ed763698295e41564efda627767ee55cc943", "f7b4bc4ef14349a6e66829a0101d5b21129dcf55", "47a2727bd60e43f3253247b6d6f63faf2b67c54b", "0ec1673609256b1e457f41ede5f21f05de0c054f", "e10a257f1daf279e55f17f273a1b557141953ce2", "66e6f08873325d37e0ec20a4769ce881e04e964e", "0e73d2b0f943cf8559da7f5002414ccc26bc77cd", "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "19d3b02185ad36fb0b792f2a15a027c58ac91e8e", "4b7f21b48c7e0dc7334e36108f558d54642c17c0", "7a6d9f89e0925a220fe3dfba4f0d2745f8be6c9a", "2d31ab536b3c8a05de0d24e0257ca4433d5a7c75", "8686b15802529ff8aea50995ef14079681788110", "b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1", "fdfaf46910012c7cdf72bba12e802a318b5bef5a", "27218ff58c3f0e7d7779fba3bb465d746749ed7c", "2910fcd11fafee3f9339387929221f4fc1160973", "2cf3564d7421b661e84251d280d159d4b3ebb336", "556545eec370b9d300fc044a1aa63fc44fd79b0f", "fd7b6c77b46420c27725757553fcd1fb24ea29a8", "c1dfabe36a4db26bf378417985a6aacb0f769735", "4793f11fbca4a7dba898b9fff68f70d868e2497c", "0058cbe110933f73c21fa6cc9ae0cd23e974a9c7", "12692fbe915e6bb1c80733519371bbb90ae07539", "4d3c4c3fe8742821242368e87cd72da0bd7d3783", "3896c62af5b65d7ba9e52f87505841341bb3e8df", "2d38fd1df95f5025e2cee5bc439ba92b369a93df", "78fede85d6595e7a0939095821121f8bfae05da6", "55cc90968e5e6ed413dd607af2a850ac2f54e378", "100da509d4fa74afc6e86a49352751d365fceee5", "2f16baddac6af536451b3216b02d3480fc361ef4", "6581c5b17db7006f4cc3575d04bfc6546854a785", "7e8016bef2c180238f00eecc6a50eac473f3f138", "d2d9612d3d67582d0cd7c1833599b88d84288fab", "5dfebcb7bfefb1af1cfef61a151abfe98a7e7cfa", "1e58d7e5277288176456c66f6b1433c41ca77415", "9f8ebf149aed8a0eda5c3375c9947c6b26eb7873", "ac48ecbc7c3c1a7eab08820845d47d6ce197707c", "d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576", "356a144d2aa5cc5e74d178dae3963003871aa8a1", "59b6ff409ae6f57525faff4b369af85c37a8dd80", "0f4eb63402a4f3bae8f396e12133684fb760def1", "f05ad40246656a977cf321c8299158435e3f3b61", "4be774af78f5bf55f7b7f654f9042b6e288b64bd", "322c063e97cd26f75191ae908f09a41c534eba90", "bc27434e376db89fe0e6ef2d2fabc100d2575ec6", "5a93f9084e59cb9730a498ff602a8c8703e5d8a5", "359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9", "92c4636962b719542deb984bd2bf75af405b574c", "345bea5f7d42926f857f395c371118a00382447f", "6043006467fb3fd1e9783928d8040ee1f1db1f3a", "3c8da376576938160cbed956ece838682fa50e9f", "1b4f6f73c70353869026e5eec1dd903f9e26d43f", "2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522", "8e4808e71c9b9f852dc9558d7ef41566639137f3", "0d14261e69a4ad4140ce17c1d1cea76af6546056", "8db9188e5137e167bffb3ee974732c1fe5f7a7dc", "bf4825474673246ae855979034c8ffdb12c80a98", "0db36bf08140d53807595b6313201a7339470cfe", "5763b09ebca9a756b4adebf74d6d7de27e80e298", "051f03bc25ec633592aa2ff5db1d416b705eac6c", "7f904093e6933cab876e87532111db94c71a304f", "8ebe2df4d82af79f0f082ced70f3a73d7fb93b66", "2f841ff062053f38725030aa1b77db903dad1efb", "ea218cebea2228b360680cb85ca133e8c2972e56", "55966926e7c28b1eee1c7eb7a0b11b10605a1af0", "435642641312364e45f4989fac0901b205c49d53", "9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a", "e790a2538579c8e2ef9b314962ab26197d6664c6", "3f5693584d7dab13ffc12122d6ddbf862783028b", "5287d8fef49b80b8d500583c07e935c7f9798933", "710011644006c18291ad512456b7580095d628a2", "7a1ce696e260899688cb705f243adf73c679f0d9", "71f36c8e17a5c080fab31fce1ffea9551fc49e47", "76d1c6c6b67e67ced1f19a89a5034dafc9599f25", "5b89744d2ac9021f468b3ffd32edf9c00ed7fed7", "c65d2ee433ae095652abe3860eeafe6082c636c6", "29f4ac49fbd6ddc82b1bb697820100f50fa98ab6", "c9f588d295437009994ddaabb64fd4e4c499b294", "91df860368cbcebebd83d59ae1670c0f47de171d", "d73d2c9a6cef79052f9236e825058d5d9cdc1321", "34ce703b7e79e3072eed7f92239a4c08517b0c55", "384945abd53f6a6af51faf254ba8ef0f0fb3f338", "5502dfe47ac26e60e0fb25fc0f810cae6f5173c0", "fc2bad3544c7c8dc7cd182f54888baf99ed75e53", "dbb9601a1d2febcce4c07dd2b819243d81abb2c2", "5798055e11e25c404b1b0027bc9331bcc6e00555", "bbc4b376ebd296fb9848b857527a72c82828fc52", "0830c9b9f207007d5e07f5269ffba003235e4eff", "4e8168fbaa615009d1618a9d6552bfad809309e9", "a8faeef97e2a00eddfb17a44d4892c179a7cc277", "02dd0af998c3473d85bdd1f77254ebd71e6158c6", "8b8728edc536020bc4871dc66b26a191f6658f7c", "c5fe40875358a286594b77fa23285fcfb7bda68e", "eac97959f2fcd882e8236c5dd6035870878eb36b", "2e0f5e72ad893b049f971bc99b67ebf254e194f7", "9b1a70d6771547cbcf6ba646f8775614c0162aca", "3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac", "a6e8a8bb99e30a9e80dbf80c46495cf798066105", "4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb", "96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40", "b3154d981eca98416074538e091778cbc031ca29", "21e828071249d25e2edaca0596e27dcd63237346", "9821669a989a3df9d598c1b4332d17ae8e35e294", "550351edcfd59d3666984771f5248d95548f465a", "1768909f779869c0e83d53f6c91764f41c338ab5", "634541661d976c4b82d590ef6d1f3457d2857b19", "6a38c575733b0f7118970238e8f9b480522a2dbc", "e3e2c106ccbd668fb9fca851498c662add257036", "0431e8a01bae556c0d8b2b431e334f7395dd803a", "73fd7e74457e0606704c5c3d3462549f1b2de1ad", "adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be", "1b27ca161d2e1d4dd7d22b1247acee5c53db5104", "48f211a9764f2bf6d6dda4a467008eda5680837a", "0ea38a5ba0c8739d1196da5d20efb13406bb6550", "7a061e7eab865fc8d2ef00e029b7070719ad2e9a", "177bc509dd0c7b8d388bb47403f28d6228c14b5c", "7668ce758af72df8e0a10d4b3cb0fd58092fe3e1", "4307e8f33f9e6c07c8fc2aeafc30b22836649d8c", "5bf70c1afdf4c16fd88687b4cf15580fd2f26102", "6cddc7e24c0581c50adef92d01bb3c73d8b80b41", "2b507f659b341ed0f23106446de8e4322f4a3f7e", "e506cdb250eba5e70c5147eb477fbd069714765b", "15a9f812e781cf85c283f7cf2aa2928b370329c5", "57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1", "9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c", "1a81c722727299e45af289d905d7dcf157174248", "ff398e7b6584d9a692e70c2170b4eecaddd78357", "5e6ba16cddd1797853d8898de52c1f1f44a73279", "0486214fb58ee9a04edfe7d6a74c6d0f661a7668", "77c3574a020757769b2ca807ff4b95a88eaa2a37", "286adff6eff2f53e84fe5b4d4eb25837b46cae23", "a0f6196d27a39cde2dbf62c08d89cbe489600bb0", "e1d726d812554f2b2b92cac3a4d2bec678969368", "4bfce41cc72be315770861a15e467aa027d91641", "2e475f1d496456831599ce86d8bbbdada8ee57ed", "078d507703fc0ac4bf8ca758be101e75ea286c80", "affa61d044daa1a7d43a6803a743eab47c89c45d", "7d94fd5b0ca25dd23b2e36a2efee93244648a27b", "79f6a8f777a11fd626185ab549079236629431ac", "f5aee1529b98136194ef80961ba1a6de646645fe", "291f527598c589fb0519f890f1beb2749082ddfd", "43b8b5eeb4869372ef896ca2d1e6010552cdc4d4", "aeff403079022683b233decda556a6aee3225065", "397022a4460750c762dbb0aaebcacc829dee8002", "34dd83115195676e7a8b008eb0e9abe84b330b32", "cd23dc3227ee2a3ab0f4de1817d03ca771267aeb", "d6a9ea9b40a7377c91c705f4c7f206a669a9eea2", "124538b3db791e30e1b62f81d4101be435ee12ef", "eca706b4d77708452bdad1c98a23e4e88ce941ab", "09628e9116e7890bc65ebeabaaa5f607c9847bae", "0fe96806c009e8d095205e8f954d41b2b9fd5dcf", "8557914593e8540fcdd9b11aef076f68d41d3b4b", "214db8a5872f7be48cdb8876e0233efecdcb6061", "966e36f15b05ef8436afecf57a97b73d6dcada94", "023ed32ac3ea6029f09b8c582efbe3866de7d00a", "69a68f9cf874c69e2232f47808016c2736b90c35", "a8154d043f187c6640cb6aedeaa8385a323e46cf", "0af48a45e723f99b712a8ce97d7826002fe4d5a5", "7fa2605676c589a7d1a90d759f8d7832940118b5", "06f146dfcde10915d6284981b6b84b85da75acd4", "1630e839bc23811e340bdadad3c55b6723db361d", "47638197d83a8f8174cdddc44a2c7101fa8301b7", "cf4c1099bef189838877c8785812bc9baa5441ed", "902114feaf33deac209225c210bbdecbd9ef33b1", "e3b9863e583171ac9ae7b485f88e503852c747b6", "43476cbf2a109f8381b398e7a1ddd794b29a9a16", "5c3dce55c61ee86073575ac75cc882a215cb49e6", "e6c4715476216be00ea61fc276ff39fb4620d785", "c9424d64b12a4abe0af201e7b641409e182babab", "2d84c0d96332bb4fbd8acced98e726aabbf15591", "3a4f522fa9d2c37aeaed232b39fcbe1b64495134", "f69de2b6770f0a8de6d3ec1a65cb7996b3c99317", "bbfe0527e277e0213aafe068113d719b2e62b09c", "80bd795930837330e3ced199f5b9b75398336b87", "d6ca3dc01de060871839d5536e8112b551a7f9ff", "7914c3f510e84a3d83d66717aad0d852d6a4d148", "2559b15f8d4a57694a0a33bdc4ac95c479a3c79a", "42eda7c20db9dc0f42f72bb997dd191ed8499b10", "3b64efa817fd609d525c7244a0e00f98feacc8b4", "46e0703044811c941f0b5418139f89d46b360aa3", "12b533f7c6847616393591dcfe4793cfe9c4bb17", "7fcecaef60a681c47f0476e54e08712ee05d6154", "1cfe8c1d341dbf8cc43040b37ca3552385adb10b", "9e9052256442f4e254663ea55c87303c85310df9", "68f89c1ee75a018c8eff86e15b1d2383c250529b", "0b6616f3ebff461e4b6c68205fcef1dae43e2a1a", "5bb4fd87fa4a27ddacd570aa81c2d66eb4721019", "218ce079b9e64288faf20a87043dc32884105102", "4f0d9200647042e41dea71c35eb59e598e6018a7", "53f5cb365806c57811319a42659c9f68b879454a", "477236563c6a6c6db922045453b74d3f9535bfa1", "db9ef28cc3531a27c273d769e1b1d6b8aeff2db4", "a6793de9a01afe47ffbb516cc32f66625f313231", "59031a35b0727925f8c47c3b2194224323489d68", "a25106a76af723ba9b09308a7dcf4f76d9283589", "785eeac2e236a85a45b4e0356c0745279c31e089", "8bbbdff11e88327816cad3c565f4ab1bb3ee20db", "5bdd9f807eec399bb42972a33b83afc8b607c05c", "91e17338a12b5e570907e816bff296b13177971e", "346752e3ab96c93483413be4feaa024ccfe9499f", "2ef328e035b2b5501ceddc0052615d4cebac6f1f", "936c7406de1dfdd22493785fc5d1e5614c6c2882", "da4170c862d8ae39861aa193667bfdbdf0ecb363", "705e086bb666d129a6969882cfa49282116a638e", "3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10", "1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de", "e51927b125640bfc47bbf1aa00c3c026748c75bd", "8de5dc782178114d9424d33d9adabb2f29a1ab17", "2faa09413162b0a7629db93fbb27eda5aeac54ca", "9a59abdf3460970de53e09cb397f47d86744f472", "5922e26c9eaaee92d1d70eae36275bb226ecdb2e", "81fc86e86980a32c47410f0ba7b17665048141ec", "99d7678039ad96ee29ab520ff114bb8021222a91", "5050807e90a925120cbc3a9cd13431b98965f4b9", "2c06781ba75d51f5246d65d1acf66ab182e9bde6", "6d07e176c754ac42773690d4b4919a39df85d7ec", "f0a3f12469fa55ad0d40c21212d18c02be0d1264", "4a733a0862bd5f7be73fb4040c1375a6d17c9276", "9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c", "17d01f34dfe2136b404e8d7f59cebfb467b72b26", "1d4c25f9f8f08f5a756d6f472778ab54a7e6129d", "bed06e7ff0b510b4a1762283640b4233de4c18e0", "233be88c7ce1fbf1c1680643dca7869dc637b379", "185360fe1d024a3313042805ee201a75eac50131", "191674c64f89c1b5cba19732869aa48c38698c84", "9745a7f38c9bba9d2fd076813fc9ab7a128a3e19", "53509017a25ac074b5010bb1cdba293cdf399e9b", "4f591e243a8f38ee3152300bbf42899ac5aae0a5", "7f57e9939560562727344c1c987416285ef76cda", "587f81ae87b42c18c565694c694439c65557d6d5", "443acd268126c777bc7194e185bec0984c3d1ae7", "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "a9791544baa14520379d47afd02e2e7353df87e5", "a538b05ebb01a40323997629e171c91aa28b8e2f", "189a839c708f95772ccaad72bfb4d0321d1535d6", "09137e3c267a3414314d1e7e4b0e3a4cae801f45", "51b770e6b2af994ffc8793f59b24a9f619033a3a", "2af620e17d0ed67d9ccbca624250989ce372e255", "682760f2f767fb47e1e2ca35db3becbb6153756f", "9c23859ec7313f2e756a3e85575735e0c52249f4", "d98a36081a434451184fa4becb59bf5ec55f3a1e", "4568063b7efb66801e67856b3f572069e774ad33", "131178dad3c056458e0400bed7ee1a36de1b2918", "fd615118fb290a8e3883e1f75390de8a6c68bfde", "869a2fbe42d3fdf40ed8b768edbf54137be7ac71", "6324fada2fb00bd55e7ff594cf1c41c918813030", "dc84d3f29c52e6d296b5d457962c02074aa75d0f", "4682fee7dc045aea7177d7f3bfe344aabf153bd5", "c398684270543e97e3194674d9cce20acaef3db3", "2adffdffa16475ae71bb2adcf65840f01f1e53f7", "0b51197109813d921835cb9c4153b9d1e12a9b34", "0d538084f664b4b7c0e11899d08da31aead87c32", "bc66685acc64fa3c425c0ee6c443d3fa87db7364", "70e14e216b12bed2211c4df66ef5f0bdeaffe774", "b7fa06b76f4b9263567875b2988fb7bbc753e69f", "51528cdce7a92835657c0a616c0806594de7513b", "f839ae810338e3b12c8e2f8db6ce4d725738d2d9", "8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483", "2d0363a3ebda56d91d704d5ff5458a527775b609", "38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f", "96f4a1dd1146064d1586ebe86293d02e8480d181", "0f1cb558b32c516e2b6919fea0f97a307aaa9091", "f3fcaae2ea3e998395a1443c87544f203890ae15", "3519241c9ac13ca43e533844e2d3644d162dde22", "9755554b13103df634f9b1ef50a147dd02eab02f", "a51882cfd0706512bf50e12c0a7dd0775285030d", "86a8b3d0f753cb49ac3250fa14d277983e30a4b7", "0e454686f83284ced2ffc5740829552a032671a3", "271df16f789bd2122f0268c3e2fa46bc0cb5f195", "e4c3587392d477b7594086c6f28a00a826abf004", "bf4f79fd31493648d80d0a4a8da5edeeaba74055", "4aa286914f17cd8cefa0320e41800a99c142a1cd", "718d3137adba9e3078fa1f698020b666449f3336", "98c5dc00bd21a39df1d4411641329bdd6928de8a", "27aadf6e7441bf40675874df1cf4bb7e2dffdd9e", "314c4c95694ff12b3419733db387476346969932", "1617f56c86bf8ea61de62062a97961d23fcf03d3", "026a9cfe3135b7b62279bc08e2fb97e0e9fad5c4", "919d3067bce76009ce07b070a13728f549ebba49", "f4b5a8f6462a68e79d643648c780efe588e4b6ca", "fb4545782d9df65d484009558e1824538030bbb1", "14761b89152aa1fc280a33ea4d77b723df4e3864", "37ba12271d09d219dd1a8283bc0b4659faf3a6c6", "345cc31c85e19cea9f8b8521be6a37937efd41c2", "6359fcb0b4546979c54818df8271debc0d653257", "51c3050fb509ca685de3d9ac2e965f0de1fb21cc", "da928ac611e4e14e454e0b69dfbf697f7a09fb38", "cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba", "3137a3fedf23717c411483c7b4bd2ed646258401", "593234ba1d2e16a887207bf65d6b55bbc7ea2247", "3be027448ad49a79816cd21dcfcce5f4e1cec8a8", "f1da4d705571312b244ebfd2b450692fd875cd1f", "abe4c1d6b964c4f5443b0334a44f0b03dd1909f4", "1afd481036d57320bf52d784a22dcb07b1ca95e2", "061e29eae705f318eee703b9e17dc0989547ba0c", "6e2041a9b5d840b0c3e4195241cd110640b1f5f3", "ca54d0a128b96b150baef392bf7e498793a6371f", "dcc38db6c885444694f515d683bbb50521ff3990", "75da1df4ed319926c544eefe17ec8d720feef8c0", "d24d3370b2e7d254e999140024d8a7bddf701502", "447d8893a4bdc29fa1214e53499ffe67b28a6db5", "93675f86d03256f9a010033d3c4c842a732bf661", "2fda164863a06a92d3a910b96eef927269aeb730", "aece472ba64007f2e86300cc3486c84597f02ec7", "74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8", "38bbca5f94d4494494860c5fe8ca8862dcf9676e", "236a4f38f79a4dcc2183e99b568f472cf45d27f4", "9d3377313759dfdc1a702b341d8d8e4b1469460c", "259706f1fd85e2e900e757d2656ca289363e74aa", "57911d7f347dde0398f964e0c7ed8fdd0a882449", "b14b672e09b5b2d984295dfafb05604492bfaec5", "720763bcb5e0507f13a8a319018676eb24270ff0", "b235b4ccd01a204b95f7408bed7a10e080623d2e", "34b7e826db49a16773e8747bc8dfa48e344e425d", "ff46c41e9ea139d499dd349e78d7cc8be19f936c", "bb451dc2420e1a090c4796c19716f93a9ef867c9", "6d4b5444c45880517213a2fdcdb6f17064b3fa91", "366595171c9f4696ec5eef7c3686114fd3f116ad", "aac101dd321e6d2199d8c0b48c543b541c181b66", "55c81f15c89dc8f6eedab124ba4ccab18cf38327", "10f17534dba06af1ddab96c4188a9c98a020a459", "13c250fb740cb5616aeb474869db6ab11560e2a6", "0fd1bffb171699a968c700f206665b2f8837d953", "3ec05713a1eed6fa9b57fef718f369f68bbbe09f", "35f1bcff4552632419742bbb6e1927ef5e998eb4", "1f35a65eab258f042edb8e1d4d5fff34f00a85bd", "167ea1631476e8f9332cef98cf470cb3d4847bc6", "2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5", "167f07b9d2babb8920acfa320ab04ee2758b5db6", "13d9da779138af990d761ef84556e3e5c1e0eb94", "fadbb3a447d697d52771e237173b80782caaa936", "c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af", "2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475", "2a98351aef0eec1003bd5524933aed8d3f303927", "4542273a157bfd4740645a6129d1784d1df775d2", "18145b0b13aa477eeabef9ceec4299b60e87c563", "392c3cabe516c0108b478152902a9eee94f4c81e", "4fc7a540efb24bea338f82c8bdc64c214744a3de", "31b58ced31f22eab10bd3ee2d9174e7c14c27c01", "af0a8199328d4c806574866f419d1962def9305a", "67af3ec65f1dc535018f3671624e72c96a611c39", "4b89cf7197922ee9418ae93896586c990e0d2867", "55b4b1168c734eeb42882082bd131206dbfedd5b", "19bbecead81e34b94111a2f584cf55db9a80e60c", "d8288322f32ee4501cef5a9b667e5bb79ebd7018", "ea1eeefb676d39b5f456937f8894311587cc7c2f", "8de2dbe2b03be8a99628ffa000ac78f8b66a1028", "82a0a5d0785fb2c2282ed901a15c3ff02f8567df", "07ac2e342db42589322b28ef291c2702f4a793a8", "c6608fdd919f2bc4f8d7412bab287527dcbcf505", "f113aed343bcac1021dc3e57ba6cc0647a8f5ce1", "eb716dd3dbd0f04e6d89f1703b9975cad62ffb09", "b51b4ef97238940aaa4f43b20a861eaf66f67253", "26ec75b8ad066b36f814379a79ad57089c82c079", "ab0f9bc35b777eaefff735cb0dd0663f0c34ad31", "113cd9e5a4081ce5a0585107951a0d36456ce7a8", "9c1305383ce2c108421e9f5e75f092eaa4a5aa3c", "acb83d68345fe9a6eb9840c6e1ff0e41fa373229", "23172f9a397f13ae1ecb5793efd81b6aba9b4537", "0861f86fb65aa915fbfbe918b28aabf31ffba364", "af8fe1b602452cf7fc9ecea0fd4508ed4149834e", "c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad", "4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c", "cb669c1d1e17c2a54d78711fa6a9f556b83f1987", "08d2f655361335bdd6c1c901642981e650dff5ec", "03a8f53058127798bc2bc0245d21e78354f6c93b", "a6902db7972a7631d186bbf59c5ef116c205b1e8", "44855e53801d09763c1fb5f90ab73e5c3758a728", "7fd700f4a010d765c506841de9884df394c1de1c", "f2c30594d917ea915028668bc2a481371a72a14d", "334ac2a459190b41923be57744aa6989f9a54a51", "8d42a24d570ad8f1e869a665da855628fcb1378f", "02a98118ce990942432c0147ff3c0de756b4b76a", "5feee69ed183954fa76c58735daa7dd3549e434d", "1f8304f4b51033d2671147b33bb4e51b9a1e16fe", "477811ff147f99b21e3c28309abff1304106dbbe", "38682c7b19831e5d4f58e9bce9716f9c2c29c4e7", "031055c241b92d66b6984643eb9e05fd605f24e2", "19296e129c70b332a8c0a67af8990f2f4d4f44d1", "00616b487d4094805107bb766da1c234c3c75e73", "0113b302a49de15a1d41ca4750191979ad756d2f", "89d7cc9bbcd2fdc4f4434d153ecb83764242227b", "9635493998ad60764d7bbf883351af57a668d159", "0c167008408c301935bade9536084a527527ec74", "438b88fe40a6f9b5dcf08e64e27b2719940995e0", "d60e3eef429ed2a51bbd806125fa31f5bea072a4", "40b86ce698be51e36884edcc8937998979cd02ec", "0e1a18576a7d3b40fe961ef42885101f4e2630f8", "9d839dfc9b6a274e7c193039dfa7166d3c07040b", "74de03923a069ffc0fb79e492ee447299401001f", "1d21e5beef23eecff6fff7d4edc16247f0fd984a", "0f0241124d6092a0bb56259ac091467c2c6938ca", "2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9", "861b12f405c464b3ffa2af7408bff0698c6c9bf0", "121503705689f46546cade78ff62963574b4750b", "442f09ddb5bb7ba4e824c0795e37cad754967208", "4113269f916117f975d5d2a0e60864735b73c64c", "6486a58f675461d1c9f42a39e942bf39f4427f7d", "029317f260b3303c20dd58e8404a665c7c5e7339", "66af2afd4c598c2841dbfd1053bf0c386579234e", "411503a304a661b0c04c2b446a6e43e4a70942dc", "0470b0ab569fac5bbe385fa5565036739d4c37f8", "121fe33daf55758219e53249cf8bcb0eb2b4db4b", "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "82be2ede6b7613286b80c3e2afe3b5353f322bed", "8d6c4af9d4c01ff47fe0be48155174158a9a5e08", "e64b683e32525643a9ddb6b6af8b0472ef5b6a37", "5fc664202208aaf01c9b62da5dfdcd71fdadab29", "240eb0b34872c431ecf9df504671281f59e7da37", "b85c198ce09ffc4037582a544c7ffb6ebaeff198", "187d4d9ba8e10245a34f72be96dd9d0fb393b1aa", "8598d31c7ca9c8f5bb433409af5e472a75037b4d", "3be7b7eb11714e6191dd301a696c734e8d07435f", "499343a2fd9421dca608d206e25e53be84489f44", "0903bb001c263e3c9a40f430116d1e629eaa616f", "c6724c2bb7f491c92c8dd4a1f01a80b82644b793", "2be1e2f2b7208fdf7a379da37a2097cfe52bc196", "29479bb4fe8c04695e6f5ae59901d15f8da6124b", "11ddf5e47854e4e6109762835d2ce086bbdfbc5b", "4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308", "46f3b113838e4680caa5fc8bda6e9ae0d35a038c", "12cd96a419b1bd14cc40942b94d9c4dffe5094d2", "45513d0f2f5c0dac5b61f9ff76c7e46cce62f402", "316e67550fbf0ba54f103b5924e6537712f06bee", "0cf7da0df64557a4774100f6fde898bc4a3c4840", "53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9", "294bd7eb5dc24052237669cdd7b4675144e22306", "3feb69531653e83d0986a0643e4a6210a088e3e5", "60f980b1f146d659f8f8f0b4755ae2d5df64ca8d", "429c3588ce54468090cc2cf56c9b328b549a86dc", "3fde656343d3fd4223e08e0bc835552bff4bda40", "09c586624ec65d7ef2d4d8d321e98f61698dcfe2", "81bfe562e42f2eab3ae117c46c2e07b3d142dade", "c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd", "68a04a3ae2086986877fee2c82ae68e3631d0356", "6c2b392b32b2fd0fe364b20c496fcf869eac0a98", "75ebe1e0ae9d42732e31948e2e9c03d680235c39", "7b9961094d3e664fc76b12211f06e12c47a7e77d", "b49affdff167f5d170da18de3efa6fd6a50262a2", "db428d03e3dfd98624c23e0462817ad17ef14493", "1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43", "06400a24526dd9d131dfc1459fce5e5189b7baec", "348a16b10d140861ece327886b85d96cce95711e", "426913f890f07a5d79e6c23b83cd928ffc00e494", "14b162c2581aea1c0ffe84e7e9273ab075820f52", "7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f", "0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa", "1a7a2221fed183b6431e29a014539e45d95f0804", "3f4711c315d156a972af37fe23642dc970a60acf", "c089c7d8d1413b54f59fc410d88e215902e51638", "5892f8367639e9c1e3cf27fdf6c09bb3247651ed", "3026722b4cbe9223eda6ff2822140172e44ed4b1", "4500888fd4db5d7c453617ee2b0047cedccf2a27", "46ae4d593d89b72e1a479a91806c39095cd96615", "b56530be665b0e65933adec4cc5ed05840c37fc4", "29908288392a9326d7a2996c6cd6b3e6cb137265", "7171b46d233810df57eaba44ccd8eabd0ad1f53a", "4ea53e76246afae94758c1528002808374b75cfa", "6339e9385ae3609cb22f6b87175c7e6850f2c05b", "2495ebdcb6da8d8c2e82cf57fcaab0ec003d571d", "e19fb22b35c352f57f520f593d748096b41a4a7b", "b64cfb39840969b1c769e336a05a30e7f9efcd61", "ada42b99f882ba69d70fff68c9ccbaff642d5189", "fe5df5fe0e4745d224636a9ae196649176028990", "0363e93d49d2a3dbe057cc7754825ebf30f0f816", "d5f751d31a9d2d754d0d136d5b02c24b28fb94a0", "9ab463d117219ed51f602ff0ddbd3414217e3166", "1a878e4667fe55170252e3f41d38ddf85c87fcaf", "ed28e8367fcb7df7e51963add9e2d85b46e2d5d6", "99c20eb5433ed27e70881d026d1dbe378a12b342", "1772a7614c9b7daf01ffcda499c901ab7c768c4a", "5185f2a40836a754baaa7419a1abdd1e7ffaf2ad", "2f8ef26bfecaaa102a55b752860dbb92f1a11dc6", "22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505", "642a386c451e94d9c44134e03052219a7512b9de", "3991223b1dc3b87883cec7af97cf56534178f74a", "2cae619d0209c338dc94593892a787ee712d9db0", "28c0cb56e7f97046d6f3463378d084e9ea90a89a", "3042d3727b2f80453ff5378b4b3043abb2d685a1", "75b833dde2e76c5de5912db3444d62c4131d15dc", "3946b8f862ecae64582ef0912ca2aa6d3f6f84dc", "5d479f77ecccfac9f47d91544fd67df642dfab3c", "3779e0599481f11fc1acee60d5108d63e55819b3", "bcfeac1e5c31d83f1ed92a0783501244dde5a471", "c5a561c662fc2b195ff80d2655cc5a13a44ffd2d", "91e57667b6fad7a996b24367119f4b22b6892eca", "51224ed7519e71346076060092462e3d59ca3ab9", "5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43", "727ecf8c839c9b5f7b6c7afffe219e8b270e7e15", "e935270db6bd778283de9767075763a538181d8e", "541bccf19086755f8b5f57fd15177dc49e77d675", "8bf945166305eb8e304a9471c591139b3b01a1e1", "00f1e5e954f9eb7ffde3ca74009a8c3c27358b58", "86b105c3619a433b6f9632adcf9b253ff98aee87", "110c55b440b7c6a1692da9d8ee52389e43f6e76e", "4439746eeb7c7328beba3f3ef47dc67fbb52bcb3", "0786a6d5ce6db8a68cef05bb5f5b84ec1b0c2cde", "4be03fd3a76b07125cd39777a6875ee59d9889bd", "3edc43e336be075dca77c7e173b555b6c14274d8", "11b3877df0213271676fa8aa347046fd4b1a99ad", "a6eb6ad9142130406fb4ffd4d60e8348c2442c29", "eb70c38a350d13ea6b54dc9ebae0b64171d813c9", "5b86c36e3eb59c347b81125d5dd57dd2a2c377a9", "3765c26362ad1095dfe6744c6d52494ea106a42c", "6ecd4025b7b5f4894c990614a9a65e3a1ac347b2", "4ed54d5093d240cc3644e4212f162a11ae7d1e3b", "19746957aa0d800d550da246a025ad44409cdb03", "0037bff7be6d463785d4e5b2671da664cd7ef746", "93721023dd6423ab06ff7a491d01bdfe83db7754", "2bab44d3a4c5ca79fb8f87abfef4456d326a0445", "2e1fd8d57425b727fd850d7710d38194fa6e2654", "6d7a32f594d46f4087b71e2a2bb66a4b25da5e30", "ed07856461da6c7afa4f1782b5b607b45eebe9f6", "a2e0966f303f38b58b898d388d1c83e40b605262", "6a4419ce2338ea30a570cf45624741b754fa52cb", "cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52", "c146aa6d56233ce700032f1cb179700778557601", "30aa681ab80a830c3890090b0da3f1e786bd66ff", "4db9e5f19366fe5d6a98ca43c1d113dac823a14d", "cbbd13c29d042743f0139f1e044b6bca731886d0", "4dca3d6341e1d991c902492952e726dc2a443d1c", "b18858ad6ec88d8b443dffd3e944e653178bc28b", "9b07084c074ba3710fee59ed749c001ae70aa408", "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "58d0c140597aa658345230615fb34e2c750d164c", "1648cf24c042122af2f429641ba9599a2187d605", "228ea13041910c41b50d0052bdce924037c3bc6a", "3a76e9fc2e89bdd10a9818f7249fbf61d216efc4", "c98983592777952d1751103b4d397d3ace00852d", "eb8519cec0d7a781923f68fdca0891713cb81163", "22ec256400e53cee35f999244fb9ba6ba11c1d06", "dde5125baefa1141f1ed50479a3fd67c528a965f", "5fff61302adc65d554d5db3722b8a604e62a8377", "6193c833ad25ac27abbde1a31c1cabe56ce1515b", "047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff", "d2eb1079552fb736e3ba5e494543e67620832c52", "270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0", "62fd622b3ca97eb5577fd423fb9efde9a849cbef", "93af36da08bf99e68c9b0d36e141ed8154455ac2", "582edc19f2b1ab2ac6883426f147196c8306685a", "ea079334121a0ba89452036e5d7f8e18f6851519", "b55e70df03d9b80c91446a97957bc95772dcc45b", "fe7c0bafbd9a28087e0169259816fca46db1a837", "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "a32d4195f7752a715469ad99cb1e6ebc1a099de6", "b15a06d701f0a7f508e3355a09d0016de3d92a6d", "0647c9d56cf11215894d57d677997826b22f6a13", "6dd5dbb6735846b214be72983e323726ef77c7a9", "28e1982d20b6eff33989abbef3e9e74400dbf508", "f0f501e1e8726148d18e70c8e9f6feea9360d119", "a694180a683f7f4361042c61648aa97d222602db", "fb228b214e28af26f77cc1195d03c9d851b78ec6", "1f745215cda3a9f00a65166bd744e4ec35644b02", "69c2ac04693d53251500557316c854a625af84ee", "bc811a66855aae130ca78cd0016fd820db1603ec", "af9419f2155785961a5c16315c70b8228435d5f8", "8a0159919ee4e1a9f4cbfb652a1be212bf0554fd", "4c648fe9b7bfd25236164333beb51ed364a73253", "407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0", "1171e8a96ffb15fdb265aaba02be014a38137ad5", "ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b", "3e687d5ace90c407186602de1a7727167461194a", "20eabf10e9591443de95b726d90cda8efa7e53bb", "a38dd439209b0913b14b1c3c71143457d8cf9b78", "10f66f6550d74b817a3fdcef7fdeba13ccdba51c", "d3a3d15a32644beffaac4322b9f165ed51cfd99b", "9d66de2a59ec20ca00a618481498a5320ad38481", "ea2ee5c53747878f30f6d9c576fd09d388ab0e2b", "539ffd51f18404e1ef83371488cf5a27cd16d064", "9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0", "7813d405450013bbdb0b3a917319d5964a89484a", "24b637c98b22cd932f74acfeecdb50533abea9ae", "5dbb2d556f2e63a783a695a517f5deb11aafd7ea", "310dcf9edb491b63d09a9eb55a99ad6bb46da1d4", "5dc056fe911a3e34a932513abe637076250d96da", "9ed4ad41cbad645e7109e146ef6df73f774cd75d", "edef98d2b021464576d8d28690d29f5431fd5828", "1d5aad4f7fae6d414ffb212cec1f7ac876de48bf", "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "3ff79cf6df1937949cc9bc522041a9a39d314d83", "1d0dd20b9220d5c2e697888e23a8d9163c7c814b", "e8aa1f207b4b0bb710f79ab47a671d5639696a56", "03babadaaa7e71d4b65203e27e8957db649155c6", "31c0968fb5f587918f1c49bf7fa51453b3e89cf7", "2c8743089d9c7df04883405a31b5fbe494f175b4", "e57ce6244ec696ff9aa42d6af7f09eed176153a8", "566038a3c2867894a08125efe41ef0a40824a090", "2a98b850139b911df5a336d6ebf33be7819ae122", "d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b", "280bc9751593897091015aaf2cab39805768b463", "3d89f9b4da3d6fb1fdb33dea7592b5992069a096", "3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96", "2a35d20b2c0a045ea84723f328321c18be6f555c", "cb522b2e16b11dde48203bef97131ddca3cdaebd", "ce8db0fe11e7c96d08de561506f9f8f399dabbb2", "4e8c608fc4b8198f13f8a68b9c1a0780f6f50105", "b98e7a8f605c21e25ac5e32bfb1851a01f30081b", "0b8c92463f8f5087696681fb62dad003c308ebe2", "fea83550a21f4b41057b031ac338170bacda8805", "ccfebdf7917cb50b5fcd56fb837f841a2246a149", "125d82fee1b9fbcc616622b0977f3d06771fc152", "0b242d5123f79defd5f775d49d8a7047ad3153bc", "5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7", "2d294c58b2afb529b26c49d3c92293431f5f98d0", "e293a31260cf20996d12d14b8f29a9d4d99c4642", "77d31d2ec25df44781d999d6ff980183093fb3de", "6f9824c5cb5ac08760b08e374031cbdabc953bae", "b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172", "86afb1e38a96f2ac00e792ef353a971fd13c8474", "d50a40f2d24363809a9ac57cf7fbb630644af0e5", "dac34b590adddef2fc31f26e2aeb0059115d07a1", "845f45f8412905137bf4e46a0d434f5856cd3aec", "cc9d068cf6c4a30da82fd6350a348467cb5086d4", "78d645d5b426247e9c8f359694080186681f57db", "2836d68c86f29bb87537ea6066d508fde838ad71", "13141284f1a7e1fe255f5c2b22c09e32f0a4d465", "14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6", "472ba8dd4ec72b34e85e733bccebb115811fd726", "b3cb91a08be4117d6efe57251061b62417867de9", "31f905d40a4ac3c16c91d5be8427762fa91277f1", "6b089627a4ea24bff193611e68390d1a4c3b3644", "bd8f3fef958ebed5576792078f84c43999b1b207", "b7b461f82c911f2596b310e2b18dd0da1d5d4491", "d77f18917a58e7d4598d31af4e7be2762d858370", "e9363f4368b04aeaa6d6617db0a574844fc59338", "c62c910264658709e9bf0e769e011e7944c45c90", "d458c49a5e34263c95b3393386b5d76ba770e497", "2f13dd8c82f8efb25057de1517746373e05b04c4", "9729930ab0f9cbcd07f1105bc69c540330cda50a", "b97f694c2a111b5b1724eefd63c8d64c8e19f6c9", "6fa7a1c8a858157deee3b582099e5e234798bb4a", "91b1a59b9e0e7f4db0828bf36654b84ba53b0557", "134f1cee8408cca648d8b4ca44b38b0a7023af71", "5c19c4c6a663fe185a739a5f50cef6a12a4635a1", "8959e0e9a24c0fe79f3fd3acca9d139edc0abcfd", "9c59bb28054eee783a40b467c82f38021c19ff3e", "c03f48e211ac81c3867c0e787bea3192fcfe323e", "14b87359f6874ff9b8ee234b18b418e57e75b762", "518a3ce2a290352afea22027b64bf3950bffc65a", "0e2d956790d3b8ab18cee8df6c949504ee78ad42", "f355e54ca94a2d8bbc598e06e414a876eb62ef99", "a0061dae94d916f60a5a5373088f665a1b54f673", "439ca6ded75dffa5ddea203dde5e621dc4a88c3e", "56a653fea5c2a7e45246613049fb16b1d204fc96", "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "5fc97d6cb5af21ed196e44f22cee31ce8c51ef13", "8913a5b7ed91c5f6dec95349fbc6919deee4fc75", "34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c", "b6259115b819424de53bb92f64cc459dcb649f31", "69eb6c91788e7c359ddd3500d01fb73433ce2e65", "9a3535cabf5d0f662bff1d897fb5b777a412d82e", "65b1760d9b1541241c6c0222cc4ee9df078b593a", "5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e", "a29a22878e1881d6cbf6acff2d0b209c8d3f778b", "9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534", "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "e3c8e49ffa7beceffca3f7f276c27ae6d29b35db", "474b461cd12c6d1a2fbd67184362631681defa9e", "467b602a67cfd7c347fe7ce74c02b38c4bb1f332", "40b10e330a5511a6a45f42c8b86da222504c717f", "e20e2db743e8db1ff61279f4fda32bf8cf381f8e", "23120f9b39e59bbac4438bf4a8a7889431ae8adb", "9887ab220254859ffc7354d5189083a87c9bca6e", "dc3dc18b6831c867a8d65da130a9ff147a736745", "0034e37a0faf0f71395245b266aacbf5412f190a", "fdb33141005ca1b208a725796732ab10a9c37d75", "7783095a565094ae5b3dccf082d504ddd7255a5c", "0a511058edae582e8327e8b9d469588c25152dc6", "54948ee407b5d32da4b2eee377cc44f20c3a7e0c", "1eb9c859ff7537182a25556635954bcd11830822", "62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4", "02e133aacde6d0977bca01ffe971c79097097b7f", "a532cfc69259254192aee3fc5be614d9197e7824", "0e4fa61871755b5548a5c970c8103f7b2ada24f3", "abdd17e411a7bfe043f280abd4e560a04ab6e992", "4526992d4de4da2c5fae7a5ceaad6b65441adf9d", "a35ed55dc330d470be2f610f4822f5152fcac4e1", "f6f06be05981689b94809130e251f9e4bf932660", "7fc3442c8b4c96300ad3e860ee0310edb086de94", "bf3bf5400b617fef2825eb987eb496fea99804b9", "86fa086d02f424705bbea53943390f009191740a", "85ccf2c9627a988ebab7032d0ec2d76ec7832c98", "f4373f5631329f77d85182ec2df6730cbd4686a9", "982d4f1dee188f662a4b5616a045d69fc5c21b54", "2f2aa67c5d6dbfaf218c104184a8c807e8b29286", "15136c2f94fd29fc1cb6bedc8c1831b7002930a6", "3e9ab40e6e23f09d16c852b74d40264067ac6abc", "3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3", "3ba74755c530347f14ec8261996dd9eae896e383", "77869f274d4be4d4b4c438dbe7dff4baed521bd8", "81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5", "780c8a795baca1ba4cb4956cded877dd3d1ca313", "9989ad33b64accea8042e386ff3f1216386ba7f1", "b0502dcc6df378ee3ddeefeeb1cc51a20e04f39b", "17de5a9ce09f4834629cd76b8526071a956c9c6d", "20111924fbf616a13d37823cd8712a9c6b458cd6", "5951e9e13ff99f97f301a336f24a14d80459c659", "5134353bd01c4ea36bd007c460e8972b1541d0ad", "e6da1fcd2a8cda0c69b3d94812caa7d844903007", "1921795408345751791b44b379f51b7dd54ebfa2", "e724c9a69613bef36f67ae7ed6850b1942918804", "c4c1fb882ae8b48c461e1f7c359ea3ea15da29fa", "96e1ccfe96566e3c96d7b86e134fa698c01f2289", "b166ce267ddb705e6ed855c6b679ec699d62e9cb", "972e044f69443dfc5c987e29250b2b88a6d2f986", "edbddf8c176d6e914f0babe64ad56c051597d415", "e6d6203fa911429d76f026e2ec2de260ec520432", "0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab", "bbf20adb59b7461e0d040e665bf64ae5f478eda0", "3cfbe1f100619a932ba7e2f068cd4c41505c9f58", "86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663", "e1449be4951ba7519945cd1ad50656c3516113da", "f1aa120fb720f6cfaab13aea4b8379275e6d40a2", "8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2", "7df277c37ac75851684f926fd3fb4daced3e79f8", "c17c7b201cfd0bcd75441afeaa734544c6ca3416", "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "d78fbd11f12cbc194e8ede761d292dc2c02d38a2", "d340a135a55ecf7506010e153d5f23155dcfa7e8", "ef032afa4bdb18b328ffcc60e2dc5229cc1939bc", "66886997988358847615375ba7d6e9eb0f1bb27f", "d878a67b2ef6a0a5dec72db15291f12419040ab1", "f92ade569cbe54344ffd3bb25efd366dcd8ad659", "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "939123cf21dc9189a03671484c734091b240183e", "3ebb0209d5e99b22c67e425a67a959f4db8d1f47", "dcb6f06631021811091ce691592b12a237c12907", "01c4cf9c7c08f0ad3f386d88725da564f3c54679", "b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0", "927ad0dceacce2bb482b96f42f2fe2ad1873f37a", "66a2c229ac82e38f1b7c77a786d8cf0d7e369598", "40ca925befa1f7e039f0cd40d57dbef6007b4416", "464de30d3310123644ab81a1f0adc51598586fd2", "ae425a2654a1064c2eda29b08a492c8d5aab27a2", "5dd57b7e0e82a33420c054da7ea3f435d49e910e", "b5857b5bd6cb72508a166304f909ddc94afe53e3", "110919f803740912e02bb7e1424373d325f558a9", "c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1", "f5eb0cf9c57716618fab8e24e841f9536057a28a", "42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830", "934efd61b20f5b8b151a2df7cd373f0b387c02b0", "c8adbe00b5661ab9b3726d01c6842c0d72c8d997", "9825c4dddeb2ed7eaab668b55403aa2c38bc3320", "4180978dbcd09162d166f7449136cb0b320adf1f", "4b9b30066a05bdeb0e05025402668499ebf99a6b", "7bdcd85efd1e3ce14b7934ff642b76f017419751", "1da5fc63d66fbf750b0e15c5ef6d4274ca73cca1", "fff31548617f208cd5ae5c32917afd48abc4ff6a", "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "612075999e82596f3b42a80e6996712cc52880a3", "9806d3dc7805dd8c9c20d7222c915fc4beee7099", "e66a6ae542907d6a0ebc45da60a62d3eecf17839", "15cf7bdc36ec901596c56d04c934596cf7b43115", "93c0405b1f5432eab11cb5180229720604ffd030", "6332a99e1680db72ae1145d65fa0cccb37256828", "307a810d1bf6f747b1bd697a8a642afbd649613d", "1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d", "d04d5692461d208dd5f079b98082eda887b62323", "4aa093d1986b4ad9b073ac9edfb903f62c00e0b0", "f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b", "a961f1234e963a7945fed70197015678149b37d8", "c4d0d09115a0df856cdb389fbccb20f62b07b14e", "68c17aa1ecbff0787709be74d1d98d9efd78f410", "b375db63742f8a67c2a7d663f23774aedccc84e5", "1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5", "09ce14b84af2dc2f76ae1cf227356fa0ba337d07", "9f131b4e036208f2402182a1af2a59e3c5d7dd44", "3b75681f0162752865d85befd8b15e7d954ebfe6", "e049d3db7c59f8173aa91dd4bd1bd0beebdaa260", "27dafedccd7b049e87efed72cabaa32ec00fdd45", "d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d", "fdd19fee07f2404952e629cc7f7ffaac14febe01", "660c99ac408b535bb0468ab3708d0d1d5db30180", "2d3c17ced03e4b6c4b014490fe3d40c62d02e914", "dd600e7d6e4443ebe87ab864d62e2f4316431293", "4512b87d68458d9ba0956c0f74b60371b6c69df4", "c444c4dab97dd6d6696f56c1cacda051dde60448", "28b9d92baea72ec665c54d9d32743cf7bc0912a7", "3965d61c4f3b72044f43609c808f8760af8781a2", "370e0d9b89518a6b317a9f54f18d5398895a7046", "13a994d489c15d440c1238fc1ac37dad06dd928c", "cfbb2d32586b58f5681e459afd236380acd86e28", "ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c", "96f0e7416994035c91f4e0dfa40fd45090debfc5", "f4d30896c5f808a622824a2d740b3130be50258e", "d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea", "3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e", "fe108803ee97badfa2a4abb80f27fa86afd9aad9", "1aeef2ab062c27e0dbba481047e818d4c471ca57", "d6ae7941dcec920d5726d50d1b1cdfe4dde34d35", "5de9670f72d10682bf2cb3156988346257e0489f", "3802da31c6d33d71b839e260f4022ec4fbd88e2d", "69adbfa7b0b886caac15ebe53b89adce390598a3", "a92c207031b0778572bf41803dba1a21076e128b", "f19ab817dd1ef64ee94e94689b0daae0f686e849", "610a4451423ad7f82916c736cd8adb86a5a64c59", "c18a03568d4b512a0d8380cbb1fbf6bd56d11f05", "25866eb48b94e85fa675b1d393163d27ffd62ba6", "afe9cfba90d4b1dbd7db1cf60faf91f24d12b286", "8e33183a0ed7141aa4fa9d87ef3be334727c76c0", "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef", "c4f3185f010027a0a97fcb9753d74eb27a9cfd3e", "eac6aee477446a67d491ef7c95abb21867cf71fc", "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "faf19885431cb39360158982c3a1127f6090a1f6", "148eb413bede35487198ce7851997bf8721ea2d6", "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "a55c0810e6c84f8e51953c0d8fd9971696d205f0", "cd74d606e76ecddee75279679d9770cdc0b49861", "67c703a864aab47eba80b94d1935e6d244e00bcb", "351158e4481e3197bd63acdafd73a5df8336143b", "d38b32d91d56b01c77ef4dd7d625ce5217c6950b", "3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b", "5b0ebb8430a04d9259b321fc3c1cc1090b8e600e", "3046baea53360a8c5653f09f0a31581da384202e", "5f01f14ca354266106d8aa1b07c45e8c9ac3e273", "21b16df93f0fab4864816f35ccb3207778a51952", "43aa40eaa59244c233f83d81f86e12eba8d74b59", "578117ff493d691166fefc52fd61bad70d8752a9", "79db191ca1268dc88271abef3179c4fe4ee92aed", "90fb58eeb32f15f795030c112f5a9b1655ba3624", "d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1", "02467703b6e087799e04e321bea3a4c354c5487d", "c07ab025d9e3c885ad5386e6f000543efe091c4b", "1280b35e4a20036fcfd82ee09f45a3fca190276f", "17aa78bd4331ef490f24bdd4d4cd21d22a18c09c", "9ff931ca721d50e470e1a38e583c7b18b6cdc2cc", "59d45281707b85a33d6f50c6ac6b148eedd71a25", "47e14fdc6685f0b3800f709c32e005068dfc8d47", "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "06518858bd99cddf9bc9200fac5311fc29ac33b4", "178a82e3a0541fa75c6a11350be5bded133a59fd", "8964524580ea2cff41a6b5858b623788bbefb8a4", "683ec608442617d11200cfbcd816e86ce9ec0899", "f1d6da83dcf71eda45a56a86c5ae13e7f45a8536", "48174c414cfce7f1d71c4401d2b3d49ba91c5338", "3d4d3f70352dc833e454a5756d682f27eca46e5d", "5dd3c9ac3c6d826e17c5b378d1575b68d02432d7", "1de23d7fe718d9fab0159f58f422099e44ad3f0a", "5c92355b2808621d237a89dc7b3faa5cdb990ab5", "1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3", "4bc4a7c4142e8b37389fddd1e2338298b8b56e96", "c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d", "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f", "ee7093e91466b81d13f4d6933bcee48e4ee63a16", "3dda181be266950ba1280b61eb63ac11777029f9", "e40cb4369c6402ae53c81ce52b73df3ef89f578b", "d2baa43471d959075fc4c93485643cbd009797fd", "cc3c273bb213240515147e8be68c50f7ea22777c", "12003a7d65c4f98fb57587fd0e764b44d0d10125", "4b3f425274b0c2297d136f8833a31866db2f2aec", "d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8", "ac206a97e981df4514dcae28442beaea31845f35", "dbe255d3d2a5d960daaaba71cb0da292e0af36a7", "7a09e8f65bd85d4c79f0ae90d4e2685869a9894f", "f45d6a7bdb6741242da6192d18c97ac39e6308db", "f4fc77660665ae58993065c6a336367e9a6c85f7", "21959bc56a160ebd450606867dce1462a913afab", "2717b044ae9933f9ab87f16d6c611352f66b2033", "4686bdcee01520ed6a769943f112b2471e436208", "6b35b15ceba2f26cf949f23347ec95bbbf7bed64", "89272b78b651038ff4d294b9ccca0018d2c9033b", "5bb53fb36a47b355e9a6962257dd465cd7ad6827", "04317e63c08e7888cef480fe79f12d3c255c5b00", "60737db62fb5fab742371709485e4b2ddf64b7b2", "c0a8c0e6ccf9882969ba0eda0b898affa015437b", "2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd", "bf8a520533f401347e2f55da17383a3e567ef6d8", "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "90ad0daa279c3e30b360f9fe9371293d68f4cebf", "192235f5a9e4c9d6a28ec0d333e36f294b32f764", "93971a49ef6cc88a139420349a1dfd85fb5d3f5c", "4344ba6e33faaa616d01248368e66799548ca48b", "e9e40e588f8e6510fa5537e0c9e083ceed5d07ad", "27a0a7837f9114143717fc63294a6500565294c2", "af53ce0f3a039c685b754e1f704817e03e182412", "5028c0decfc8dd623c50b102424b93a8e9f2e390", "3826e47f0572ab4d0fe34f0ed6a49aa8303e0428", "166186e551b75c9b5adcc9218f0727b73f5de899", "1d3dd9aba79a53390317ec1e0b7cd742cba43132", "85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9", "82ccd62f70e669ec770daf11d9611cab0a13047e", "71f07c95a2b039cc21854c602f29e5be053f2aba", "13bda03fc8984d5943ed8d02e49a779d27c84114", "0a88f5936528dcfdd27df886b07e62f2fd2072d0", "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "51c7c5dfda47647aef2797ac3103cf0e108fdfb4", "0857281a3b6a5faba1405e2c11f4e17191d3824d", "653d19e64bd75648cdb149f755d59e583b8367e3", "5506a1a1e1255353fde05d9188cb2adc20553af5", "7897c8a9361b427f7b07249d21eb9315db189496", "b8b9cef0938975c5b640b7ada4e3dea6c06d64e9", "4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4", "8af411697e73f6cfe691fe502d4bfb42510b4835", "e4e3faa47bb567491eaeaebb2213bf0e1db989e1", "833f6ab858f26b848f0d747de502127406f06417", "8ee5b1c9fb0bded3578113c738060290403ed472", "217de4ff802d4904d3f90d2e24a29371307942fe", "f73174cfcc5c329b63f19fffdd706e1df4cc9e20", "edfce091688bc88389dd4877950bd58e00ff1253", "2b7ef95822a4d577021df16607bf7b4a4514eb4b", "917bea27af1846b649e2bced624e8df1d9b79d6f", "0f9bf5d8f9087fcba419379600b86ae9e9940013", "e22adcd2a6a7544f017ec875ce8f89d5c59e09c8", "50f0c495a214b8d57892d43110728e54e413d47d", "28be652db01273289499bc6e56379ca0237506c0", "8b1f697d81de1245c283b4f8f055b9b76badfa66", "d3d71a110f26872c69cf25df70043f7615edcf92", "209324c152fa8fab9f3553ccb62b693b5b10fb4d", "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "184dba921b932143d196c833310dee6884fa4a0a", "3fe4109ded039ac9d58eb9f5baa5327af30ad8b6", "3c1aef7c2d32a219bdbc89a44d158bc2695e360a", "22e678d3e915218a7c09af0d1602e73080658bb7", "2ef1b1b5ed732634e005df779fd9b21da0ffe60c", "a3a2f3803bf403262b56ce88d130af15e984fff0", "1e3068886b138304ec5a7296702879cc8788143d", "30c96cc041bafa4f480b7b1eb5c45999701fe066", "cf6c59d359466c41643017d2c212125aa0ee84b2", "a192845a7695bdb372cccf008e6590a14ed82761", "258a2dad71cb47c71f408fa0611a4864532f5eba", "919bdc161485615d5ee571b1585c1eb0539822c8", "08ee541925e4f7f376538bc289503dd80399536f", "0ca66283f4fb7dbc682f789fcf6d6732006befd5", "761304bbd259a9e419a2518193e1ff1face9fd2d", "9fc04a13eef99851136eadff52e98eb9caac919d", "566a39d753c494f57b4464d6bde61bf3593f7ceb", "54ba18952fe36c9be9f2ab11faecd43d123b389b", "a2bd81be79edfa8dcfde79173b0a895682d62329", "a95dc0c4a9d882a903ce8c70e80399f38d2dcc89", "8c7bceba769762126fd3dae78d622908bb83c3d3", "6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe", "4e97b53926d997f451139f74ec1601bbef125599", "021a19e240f0ae0554eff814e838e1e396be6572", "06fe63b34fcc8ff68b72b5835c4245d3f9b8a016", "44078d0daed8b13114cffb15b368acc467f96351", "2e5cfa97f3ecc10ae8f54c1862433285281e6a7c", "38f1d8d25c0332798e0929594af2c43092d2c5c8", "7480d8739eb7ab97c12c14e75658e5444b852e9f", "aba9acb4a607071af10684f2cfbdefa0507a4e9a", "0df0d1adea39a5bef318b74faa37de7f3e00b452", "6502cf30c088c6c7c4b2a05b7777b032c9dde7cd", "2400c4994655c4dd59f919c4d6e9640f57f2009f", "c3c463a9ee464bb610423b7203300a83a166b500", "f65b47093e4d45013f54c3ba09bbcce7140af6bb", "153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4", "34546ef7e6148d9a1fb42cfab5f0ce11c92c760a", "198b6beb53e0e61357825d57938719f614685f75", "e7436b8e68bb7139b823a7572af3decd96241e78", "814b05113ba0397d236736f94c01e85bb034c833", "5f57a1a3a1e5364792b35e8f5f259f92ad561c1f", "c3285a1d6ec6972156fea9e6dc9a8d88cd001617", "2004afb2276a169cdb1f33b2610c5218a1e47332", "b7f7a4df251ff26aca83d66d6b479f1dc6cd1085", "3a9681e2e07be7b40b59c32a49a6ff4c40c962a2", "87e5b4d95f95a0975e855cf5ad402db7a3c64ff5", "bbcb4920b312da201bf4d2359383fb4ee3b17ed9", "230527d37421c28b7387c54e203deda64564e1b7", "96ccd94151a348c9829ab1d943cb13e9e933952f", "74c8116d647612e8cd20a2528eeed38f76d09126", "e988be047b28ba3b2f1e4cdba3e8c94026139fcf", "8e94ed0d7606408a0833e69c3185d6dcbe22bbbe", "d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e", "31a38fd2d9d4f34d2b54318021209fe5565b8f7f", "8f6263e4d3775757e804796e104631c7a2bb8679", "303828619630ca295f772be0a7b9fe8007dfaea3", "8e3d0b401dec8818cd0245c540c6bc032f169a1d", "feb6e267923868bff6e2108603d00fdfd65251ca", "2ab034e1f54c37bfc8ae93f7320160748310dc73", "1d1a7ef193b958f9074f4f236060a5f5e7642fc1", "5039834df68600a24e7e8eefb6ba44a5124e67fc", "41c97af4801ac302f09902aeec2af17b481563ab", "5157dde17a69f12c51186ffc20a0a6c6847f1a29", "53507e2de66eaba996f14fd2f54a5535056f1e59", "9788b491ddc188941dadf441fc143a4075bff764", "661ca4bbb49bb496f56311e9d4263dfac8eb96e9", "2784d9212dee2f8a660814f4b85ba564ec333720", "57de1a09db680e0b4878ceda68d626ae4e44ccfe", "6e3a181bf388dd503c83dc324561701b19d37df1", "b68150bfdec373ed8e025f448b7a3485c16e3201", "4217473596b978f13a211cdf47b7d3f6588c785f", "de878384f00b6ce1caa66ac01735fb4b63ad0279", "9be94fa0330dd493f127d51e4ef7f9fd64613cfc", "6a0368b4e132f4aa3bbdeada8d894396f201358a", "046a694bbb3669f2ff705c6c706ca3af95db798c", "5e28673a930131b1ee50d11f69573c17db8fff3e", "740e095a65524d569244947f6eea3aefa3cca526", "019e471667c72b5b3728b4a9ba9fe301a7426fb2", "543f21d81bbea89f901dfcc01f4e332a9af6682d", "46e866f58419ff4259c65e8256c1d4f14927b2c6", "f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1", "b910590a0eb191d03e1aedb3d55c905129e92e6b", "aaec8141d57d29aa3cedf1baec9633180ddb7a3d", "68e6cfb0d7423d3fae579919046639c8e2d04ad7", "3a3f75e0ffdc0eef07c42b470593827fcd4020b4", "951368a1a8b3c5cd286726050b8bdf75a80f7c37", "937e89cdf056358d1d5befe334a0e1f497f7d643", "a1f40bcfadbeee66f67ab0755dd3037c030a7450", "01c7a778cde86ad1b89909ea809d55230e569390", "98c2053e0c31fab5bcb9ce5386335b647160cc09", "a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60", "a5eb36f1e77245dfc9e5c0c03998529331e4c89b", "0a5b2e642683ff20b6f0cee16a32a68ba0099908", "bc12715a1ddf1a540dab06bf3ac4f3a32a26b135", "00049f989067d082f7f8d0581608ad5441d09f8b", "75858dbee2c248a60741fbc64dcad4f8b63d51cb", "20e504782951e0c2979d9aec88c76334f7505393", "95d0cd902ff0fa253b6757ba3c8e09ce25b494cc", "ac86ccc16d555484a91741e4cb578b75599147b2", "ee56823f2f00c8c773e4ebc725ca57d2f9242947", "2dd6c988b279d89ab5fb5155baba65ce4ce53c1e", "d78077a7aa8a302d4a6a09fb9737ab489ae169a6", "3f5e8f884e71310d7d5571bd98e5a049b8175075", "0e652a99761d2664f28f8931fee5b1d6b78c2a82", "edcb662834aae8878a209c769ed664f8bd48b751", "916ad644614cccae728c8a12c089f01af62fb12e", "0ee661a1b6bbfadb5a482ec643573de53a9adf5e", "6342a4c54835c1e14159495373ab18b4233d2d9b", "270acff7916589a6cc9ca915b0012ffcb75d4899", "2cd7821fcf5fae53a185624f7eeda007434ae037", "09b43b59879d59493df2a93c216746f2cf50f4ac", "7f3a73babe733520112c0199ff8d26ddfc7038a0", "48853c25dc75481b0c77f408a8a76383287ebe2a", "7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794", "998542e5e3882bb0ce563d390b1e1bff5460e80c", "40273657e6919455373455bd9a5355bb46a7d614", "e692870efb009da4b9316678b354ae935fdf48eb", "be28ed1be084385f5d389db25fd7f56cd2d7f7bf", "63a6c256ec2cf2e0e0c9a43a085f5bc94af84265", "38a2661b6b995a3c4d69e7d5160b7596f89ce0e6", "5bae9822d703c585a61575dced83fa2f4dea1c6d", "2c424f21607ff6c92e640bfe3da9ff105c08fac4", "52bf00df3b970e017e4e2f8079202460f1c0e1bd", "aa1129780cc496918085cd0603a774345c353c54", "51348e24d2199b06273e7b65ae5f3fc764a2efc7", "a8748a79e8d37e395354ba7a8b3038468cb37e1f", "f1280f76933ba8b7f4a6b8662580504f02bb4ab6", "9649a19b49607459cef32f43db4f6e6727080bdb", "fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46", "765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d", "20b994a78cd1db6ba86ea5aab7211574df5940b3", "68484ae8a042904a95a8d284a7f85a4e28e37513", "831d661d657d97a07894da8639a048c430c5536d", "26727dc7347e3338d22e8cf6092e3a3c7568d763", "a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3", "13901473a12061f080b9d54219f16db7d406e769", "241d2c517dbc0e22d7b8698e06ace67de5f26fdf", "7a85b3ab0efb6b6fcb034ce13145156ee9d10598", "c3fb2399eb4bcec22723715556e31c44d086e054", "34108098e1a378bc15a5824812bdf2229b938678", "2c92839418a64728438c351a42f6dc5ad0c6e686", "1fd6004345245daf101c98935387e6ef651cbb55", "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "404776aa18031828f3d5dbceed39907f038a47fe", "d0dd1364411a130448517ba532728d5c2fe78ed9", "5bde1718253ec28a753a892b0ba82d8e553b6bf3", "620e1dbf88069408b008347cd563e16aeeebeb83", "6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0", "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "ada73060c0813d957576be471756fa7190d1e72d", "353b6c1f431feac6edde12b2dde7e6e702455abd", "ad27d13d163757b65110f98a0e7dd7f5bc8c8030", "195b61470720c7faa523e10e68d0c8d8f27d7c7a", "0c069a870367b54dd06d0da63b1e3a900a257298", "2f28db98e8250cff29bc64b569801c739036e4ef", "0f811d717c459c897a4fbffb3ccd9ac794be0b8f", "62c435bc714f13a373926e3b1914786592ed1fef", "7143518f847b0ec57a0ff80e0304c89d7e924d9a", "7f82f8a416170e259b217186c9e38a9b05cb3eb4", "8e36100cb144685c26e46ad034c524b830b8b2f2", "8dd9c97b85e883c16e5b1ec260f9cd610df52dec", "9487cea80f23afe9bccc94deebaa3eefa6affa99", "43fce0c6b11eb50f597aa573611ac6dc47e088d3", "24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39", "0c1314d98bb6b99af00817644c1803dbc0fb5ff5", "2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d", "71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f", "6f2dc51d607f491dbe6338711c073620c85351ac", "7c66e7f357553fd4b362d00ff377bffb9197410e", "01125e3c68edb420b8d884ff53fb38d9fbe4f2b8", "5e9ec3b8daa95d45138e30c07321e386590f8ec7", "ae0765ebdffffd6e6cc33c7705df33b7e8478627", "20b437dc4fc44c17f131713ffcbb4a8bd672ef00", "27eb7a6e1fb6b42516041def6fe64bd028b7614d", "4b3eaedac75ac419c2609e131ea9377ba8c3d4b8", "4c87aafa779747828054cffee3125fcea332364d", "143bee9120bcd7df29a0f2ad6f0f0abfb23977b8", "981449cdd5b820268c0876477419cba50d5d1316", "581e920ddb6ecfc2a313a3aa6fed3d933b917ab0", "bd236913cfe07896e171ece9bda62c18b8c8197e", "aafeb3d76155ec28e8ab6b4d063105d5e04e471d", "032825000c03b8ab4c207e1af4daeb1f225eb025", "fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a", "3fb4bf38d34f7f7e5b3df36de2413d34da3e174a", "beb49072f5ba79ed24750108c593e8982715498e", "95ea564bd983129ddb5535a6741e72bb1162c779", "9797de286a3101fc31fb51995c18ec7d3eab804d", "4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f", "0c36c988acc9ec239953ff1b3931799af388ef70", "7c825562b3ff4683ed049a372cb6807abb09af2a", "22264e60f1dfbc7d0b52549d1de560993dd96e46", "bdfcc45cfa495939789b73eec7e6e98a4d7e3f41", "969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce", "d5d7e89e6210fcbaa52dc277c1e307632cd91dab", "cf805d478aeb53520c0ab4fcdc9307d093c21e52", "b1d89015f9b16515735d4140c84b0bacbbef19ac", "c6f3399edb73cfba1248aec964630c8d54a9c534", "5fa04523ff13a82b8b6612250a39e1edb5066521", "ede5982980aa76deae8f9dc5143a724299d67742", "f5eb411217f729ad7ae84bfd4aeb3dedb850206a", "3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c", "3a1c40eced07d59a3ea7acda94fa833c493909c1", "793e7f1ba18848908da30cbad14323b0389fd2a8", "59e9934720baf3c5df3a0e1e988202856e1f83ce", "25ff865460c2b5481fa4161749d5da8501010aa0", "7f5b379b12505d60f9303aab1fea48515d36d098", "8f71c97206a03c366ddefaa6812f865ac6df87e9", "aab3561acbd19f7397cbae39dd34b3be33220309", "9ac43a98fe6fde668afb4fcc115e4ee353a6732d", "636b8ffc09b1b23ff714ac8350bb35635e49fa3c", "67b79c2336b9a2efbfc805b9a6912a0959e392a9", "78f08cc9f845dc112f892a67e279a8366663e26d", "1e5ca4183929929a4e6f09b1e1d54823b8217b8e", "19f076998ba757602c8fec04ce6a4ca674de0e25", "e200c3f2849d56e08056484f3b6183aa43c0f13a", "f4210309f29d4bbfea9642ecadfb6cf9581ccec7", "0b7d1386df0cf957690f0fe330160723633d2305", "33c2131cc85c0f0fef0f15ac18f28312347d9ba4", "84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1", "23c66ab737367a96f1422ce5c4ff8421709ef70d", "0699475af70765d0810881d3536b44a3c1d745a2", "55804f85613b8584d5002a5b0ddfe86b0d0e3325", "c7de0c85432ad17a284b5b97c4f36c23f506d9d1", "13604bbdb6f04a71dea4bd093794e46730b0a488", "3888d7a40f3cea5e4a851c8ca97a2d7810a62867", "3c63fa505a44902f13698ec10d7f259b1d0878ee", "537d8c4c53604fd419918ec90d6ef28d045311d0", "b5d7c5aba7b1ededdf61700ca9d8591c65e84e88", "732e4016225280b485c557a119ec50cffb8fee98", "718824256b4461d62d192ab9399cfc477d3660b4", "d790093cb85fc556c0089610026e0ec3466ab845", "6f26ab7edd971148723d9b4dc8ddf71b36be9bf7", "aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912", "d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4", "3773e5d195f796b0b7df1fca6e0d1466ad84b5e7", "61542874efb0b4c125389793d8131f9f99995671", "b5402c03a02b059b76be829330d38db8e921e4b5", "2c8f24f859bbbc4193d4d83645ef467bcf25adc2", "218b2c5c9d011eb4432be4728b54e39f366354c1", "dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd", "0f2461a265be997c962fa562ae48378fb964b7b4", "40a74eea514b389b480d6fe8b359cb6ad31b644a", "5456166e3bfe78a353df988897ec0bd66cee937f", "38861d0d3a0292c1f54153b303b0d791cbba1d50", "571f493c0ade12bbe960cfefc04b0e4607d8d4b2", "84ec0983adb8821f0655f83b8ce47f36896ca9ee", "5da139fc43216c86d779938d1c219b950dd82a4c", "60496b400e70acfbbf5f2f35b4a49de2a90701b5", "28e0ed749ebe7eb778cb13853c1456cb6817a166", "b7c5f885114186284c51e863b58292583047a8b4", "2d146cc0908c931d87f6e6e5d08b117c30a69b8d", "5f7c4c20ae2731bfb650a96b69fd065bf0bb950e", "44834929e56f2a8f16844fde519039d647006216", "7a595800b490ff437ab06fe7612a678d5fe2b57d", "68f69e6c6c66cfde3d02237a6918c9d1ee678e1b", "8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3", "0a85bdff552615643dd74646ac881862a7c7072d", "a1e97c4043d5cc9896dc60ae7ca135782d89e5fc", "cd2c54705c455a4379f45eefdf32d8d10087e521", "0da75b0d341c8f945fae1da6c77b6ec345f47f2a", "2b339ece73e3787f445c5b92078e8f82c9b1c522", "b5968e7bb23f5f03213178c22fd2e47af3afa04c", "3e0a1884448bfd7f416c6a45dfcdfc9f2e617268", "1451e7b11e66c86104f9391b80d9fb422fb11c01", "d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c", "c8b9217ee36aebb9735e525b718490dc27c8c1cb", "0aaf785d7f21d2b5ad582b456896495d30b0a4e2", "c97a5f2241cc6cd99ef0c4527ea507a50841f60b", "0b84f07af44f964817675ad961def8a51406dd2e", "eb8a3948c4be0d23eb7326d27f2271be893b3409", "ff1f45bdad41d8b35435098041e009627e60d208", "725c3605c2d26d113637097358cd4c08c19ff9e1", "2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83", "ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb", "808b685d09912cbef4a009e74e10476304b4cccf", "ae936628e78db4edb8e66853f59433b8cc83594f", "ca096e158912080493a898b0b8a4bd2902674fed", "6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19", "5f771fed91c8e4b666489ba2384d0705bcf75030", "3d24b386d003bee176a942c26336dbe8f427aadd", "8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125", "7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35", "dad6b36fd515bda801f3d22a462cc62348f6aad8", "f1748303cc02424704b3a35595610890229567f9", "b755505bdd5af078e06427d34b6ac2530ba69b12", "5c435c4bc9c9667f968f891e207d241c3e45757a", "31f1e711fcf82c855f27396f181bf5e565a2f58d", "81c21f4aafab39b7f5965829ec9e0f828d6a6182", "1890470d07a090e7b762091c7b9670b5c2e1c348", "97d1d561362a8b6beb0fdbee28f3862fb48f1380", "af12a79892bd030c19dfea392f7a7ccb0e7ebb72", "7bc1e7d000ab517161a83b1fedf353e619516ddf", "2d7c2c015053fff5300515a7addcd74b523f3f66", "b234d429c9ea682e54fca52f4b889b3170f65ffc", "fa24bf887d3b3f6f58f8305dcd076f0ccc30272a", "fcd3d69b418d56ae6800a421c8b89ef363418665", "e198a7b9e61dd19c620e454aaa81ae8f7377ade0", "8d8461ed57b81e05cc46be8e83260cd68a2ebb4d", "42a5dc91852c8c14ed5f4c3b451c9dc98348bc02", "7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e", "15e0b9ba3389a7394c6a1d267b6e06f8758ab82b", "3083d2c6d4f456e01cbb72930dc2207af98a6244", "ff3859917d4121f47de0d46922a103c78514fcab", "29db16efc3b378c50511f743e5197a4c0b9e902f", "09111da0aedb231c8484601444296c50ca0b5388", "3b1260d78885e872cf2223f2c6f3d6f6ea254204", "cd63759842a56bd2ede3999f6e11a74ccbec318b", "893239f17dc2d17183410d8a98b0440d98fa2679", "ee65cee5151928c63d3ef36fcbb582fabb2b6d2c", "25337690fed69033ef1ce6944e5b78c4f06ffb81", "b249f10a30907a80f2a73582f696bc35ba4db9e2", "49e1aa3ecda55465641b2c2acc6583b32f3f1fc6", "17670b60dcfb5cbf8fdae0b266e18cf995f6014c", "1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12", "29ce6b54a87432dc8371f3761a9568eb3c5593b0", "1c530de1a94ac70bf9086e39af1712ea8d2d2781", "e5dfd17dbfc9647ccc7323a5d62f65721b318ba9", "cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f", "87b607b8d4858a16731144d17f457a54e488f15d", "56c700693b63e3da3b985777da6d9256e2e0dc21", "cb7a743b9811d20682c13c4ee7b791ff01c62155", "6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c", "1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6", "11b904c9180686574e6047bbd9868c354ca46cb4", "9e28243f047cc9f62a946bf87abedb65b0da0f0a", "be7444c891caf295d162233bdae0e1c79791d566", "931f99bc6865d3d0c80c15d5b1c05338dfe98982", "9f49013657cbce384df9b16a2a17293bc4c9d967", "23d55061f7baf2ffa1c847d356d8f76d78ebc8c1", "ffc81ced9ee8223ab0adb18817321cbee99606e6", "1e344b99583b782e3eaf152cdfa15f217b781181", "197eaa59a003a4c7cc77c1abe0f99d942f716942", "4b9ec224949c79a980a5a66664d0ac6233c3d575", "892400017e5c93611dc8361e7749135520d66f25", "c7c53d75f6e963b403057d8ba5952e4974a779ad", "29631ca6cff21c9199c70bcdbbcd5f812d331a96", "b47a3c909ee9b099854619054fd00e200b944aa9", "1e1e66783f51a206509b0a427e68b3f6e40a27c8", "4a3758f283b7c484d3f164528d73bc8667eb1591", "cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74", "bd8b7599acf53e3053aa27cfd522764e28474e57", "70569810e46f476515fce80a602a210f8d9a2b95", "14014a1bdeb5d63563b68b52593e3ac1e3ce7312", "189e5a2fa51ed471c0e7227d82dffb52736070d8", "4f00a48a60cbf750b4ccbd698d5547d83b3eaf3f", "1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5", "35e4b6c20756cd6388a3c0012b58acee14ffa604", "15f3d47b48a7bcbe877f596cb2cfa76e798c6452", "08f6ad0a3e75b715852f825d12b6f28883f5ca05", "3cc46bf79fb9225cf308815c7d41c8dd5625cc29", "8f5facdc0a2a79283864aad03edc702e2a400346", "812d3f6975f4cb87e9905ef18696c5c779227634", "c0b02be66a5a1907e8cfb8117de50f80b90a65a8", "1135a818b756b057104e45d976546970ba84e612", "3dce635ce4b55fb63fc6d41b38640403b152a048", "0cf2eecf20cfbcb7f153713479e3206670ea0e9c", "ba2bbef34f05551291410103e3de9e82fdf9dddd", "f2902f5956d7e2dca536d9131d4334f85f52f783", "4e8f301dbedc9063831da1306b294f2bd5b10477", "1c17450c4d616e1e1eece248c42eba4f87de9e0d", "621741b87258c745f8905d15ba81aaf2a8be60d2", "0c2370e156a4eb8d84a5fdb049c5a894c3431f1c", "362ba8317aba71c78dafca023be60fb71320381d", "33554ff9d1d3b32f67020598320d3d761d7ec81f", "ff012c56b9b1de969328dacd13e26b7138ff298b", "a784a0d1cea26f18626682ab108ce2c9221d1e53", "2336de3a81dada63eb00ea82f7570c4069342fb5", "2bbbbe1873ad2800954058c749a00f30fe61ab17", "5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65", "60ce4a9602c27ad17a1366165033fe5e0cf68078", "2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13", "ebbceab4e15bf641f74e335b70c6c4490a043961", "0c6a566ebdac4bd14e80cd6bf4631bc7458e1595", "d84a48f7d242d73b32a9286f9b148f5575acf227", "a01f9461bc8cf8fe40c26d223ab1abea5d8e2812", "c418a3441f992fea523926f837f4bfb742548c16", "c907104680ad53bdc673f2648d713e4d26335825", "15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c", "c035c193eed5d72c7f187f0bc880a17d217dada0", "f214bcc6ecc3309e2efefdc21062441328ff6081", "0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306", "fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f", "2f2406551c693d616a840719ae1e6ea448e2f5d3", "f67a73c9dd1e05bfc51219e70536dbb49158f7bc", "97f3d35d3567cd3d973c4c435cdd6832461b7c3c", "4b519e2e88ccd45718b0fc65bfd82ebe103902f7", "9d3aa3b7d392fad596b067b13b9e42443bbc377c", "31a36014354ee7c89aa6d94e656db77922b180a5", "76a52ebfc5afd547f8b73430ec81456cf25ddd69", "8c5cf18c456957c63248245791f44a685e832345", "16bce9f940bb01aa5ec961892cc021d4664eb9e4", "0f92e9121e9c0addc35eedbbd25d0a1faf3ab529", "6486b36c6f7fd7675257d26e896223a02a1881d9", "6e198f6cc4199e1c4173944e3df6f39a302cf787", "604a281100784b4d5bc1a6db993d423abc5dc8f0", "23edcd0d2011d9c0d421193af061f2eb3e155da3", "8355d095d3534ef511a9af68a3b2893339e3f96b", "b034cc919af30e96ee7bed769b93ea5828ae361b", "b1891010a0722117c57e98809e1f2b26cd8e9ee3", "59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5", "8000c4f278e9af4d087c0d0895fff7012c5e3d78", "217a21d60bb777d15cd9328970cab563d70b5d23", "3edb0fa2d6b0f1984e8e2c523c558cb026b2a983", "3cb488a3b71f221a8616716a1fc2b951dd0de549", "834736698f2cc5c221c22369abe95515243a9fc3", "68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5", "5058a7ec68c32984c33f357ebaee96c59e269425", "b1bb517bd87a1212174033fc786b2237844b04e6", "0a325d70cc381b136a8f4e471b406cda6d27668c", "97c59db934ff85c60c460a4591106682b5ab9caa", "d119443de1d75cad384d897c2ed5a7b9c1661d98", "a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f", "1c147261f5ab1b8ee0a54021a3168fa191096df8", "b84b7b035c574727e4c30889e973423fe15560d7", "4e626b2502ee042cf4d7425a8e7a228789b23856", "dcb44fc19c1949b1eda9abe998935d567498467d", "c29fe5ed41d2240352fcb8d8196eb2f31d009522", "3337cfc3de2c16dee6f7cbeda5f263409a9ad81e", "ef36ca8abf0a23e661f3b1603057963a70e16704", "452ea180cf4d08d7500fc4bc046fd7141fd3d112", "205f3d654b7d28d00d15b034a8c5b2a8740bd8b6", "f24e379e942e134d41c4acec444ecf02b9d0d3a9", "469ee1b00f7bbfe17c698ccded6f48be398f2a44", "d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d", "19da9f3532c2e525bf92668198b8afec14f9efea", "4551194408383b12db19a22cca5db0f185cced5c", "575141e42740564f64d9be8ab88d495192f5b3bc", "7c11fa4fd91cb57e6e216117febcdd748e595760", "c4ca092972abb74ee1c20b7cae6e69c654479e2c", "3c0bbfe664fb083644301c67c04a7f1331d9515f", "0821028073981f9bd2dba2ad2557b25403fe7d7d", "0e4baf74dfccef7a99c6954bb0968a2e35315c1f", "a05b1254630257fe27ee195ef05cc50ce6e41f22", "8b10383ef569ea0029a2c4a60cc2d8c87391b4db", "a7191958e806fce2505a057196ccb01ea763b6ea", "59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1", "d37ca68742b2999667faf464f78d2fbf81e0cb07", "c05a7c72e679745deab9c9d7d481f7b5b9b36bdd", "d5fa9d98c8da54a57abf353767a927d662b7f026", "a9fc23d612e848250d5b675e064dba98f05ad0d9", "eb6ee56e085ebf473da990d032a4249437a3e462", "d9810786fccee5f5affaef59bc58d2282718af9b", "659dc6aa517645a118b79f0f0273e46ab7b53cd9", "f374ac9307be5f25145b44931f5a53b388a77e49", "3d94f81cf4c3a7307e1a976dc6cb7bf38068a381", "141cb9ee401f223220d3468592effa90f0c255fa", "d79530e1745b33f3b771d0b38d090b40afc04191", "f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a", "414d78e32ac41e6ff8b192bc095fe55f865a02f4", "63488398f397b55552f484409b86d812dacde99a", "a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9", "6ab33fa51467595f18a7a22f1d356323876f8262", "d82b93f848d5442f82154a6011d26df8a9cd00e7", "362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c", "1459d4d16088379c3748322ab0835f50300d9a38", "b1301c722886b6028d11e4c2084ee96466218be4", "d3c004125c71942846a9b32ae565c5216c068d1e", "48cfc5789c246c6ad88ff841701204fc9d6577ed", "cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab", "16820ccfb626dcdc893cc7735784aed9f63cbb70", "ed32df6b122b15a52238777c9993ed31107b4bed", "0c741fa0966ba3ee4fc326e919bf2f9456d0cd74", "0ad8149318912b5449085187eb3521786a37bc78", "b5f9306c3207ac12ac761e7d028c78b3009a219c", "997c7ebf467c579b55859315c5a7f15c1df43432", "a591639bfcabc4091ff556364074c58521159ff9", "b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae", "02d650d8a3a9daaba523433fbe93705df0a7f4b1", "8411fe1142935a86b819f065cd1f879f16e77401", "999289b0ef76c4c6daa16a4f42df056bf3d68377", "2e27667421a7eeab278e0b761db4d2c725683c3f", "e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd", "51bb86dc8748088a198b216f7e97616634147388", "435dc062d565ce87c6c20a5f49430eb9a4b573c4", "6a5d7d20a8c4993d56bcf702c772aa3f95f99450", "2a6783ae51d7ee781d584ef9a3eb8ab1997d0489", "cb004e9706f12d1de83b88c209ac948b137caae0", "f231046d5f5d87e2ca5fae88f41e8d74964e8f4f", "a6e4f924cf9a12625e85c974f0ed136b43c2f3b5", "4aeb87c11fb3a8ad603311c4650040fd3c088832", "d6c8f5674030cf3f5a2f7cc929bad37a422b26a0", "4aabd6db4594212019c9af89b3e66f39f3108aac", "7c8909da44e89a78fe88e815c83a4ced34f99149", "574751dbb53777101502419127ba8209562c4758", "8cffe360a05085d4bcba111a3a3cd113d96c0369", "abbc6dcbd032ff80e0535850f1bc27c4610b0d45", "1ca1b4f787712ede215030d22a0eea41534a601e", "b7894c1f805ffd90ab4ab06002c70de68d6982ab", "989332c5f1b22604d6bb1f78e606cb6b1f694e1a", "070c8ee3876c06f9a65693e536d61097ace40417", "72c0c8deb9ea6f59fde4f5043bff67366b86bd66", "05a0d04693b2a51a8131d195c68ad9f5818b2ce1", "6c6f0e806e4e286f3b18b934f42c72b67030ce17", "37eb666b7eb225ffdafc6f318639bea7f0ba9a24", "935a7793cbb8f102924fa34fce1049727de865c2", "387b54cf6c186c12d83f95df6bd458c5eb1254ee", "b6a23f72007cb40223d7e1e1cc47e466716de945", "1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c", "70c2c2d2b7e34ff533a8477eff9763be196cd03a", "0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e", "f2896dd2701fbb3564492a12c64f11a5ad456a67", "ed70d1a9435c0b32c0c75c1a062f4f07556f7016", "1c93b48abdd3ef1021599095a1a5ab5e0e020dd5", "fe866887d3c26ee72590c440ed86ffc80e980293", "2facf3e85240042a02f289a0d40fee376c478d0f", "df674dc0fc813c2a6d539e892bfc74f9a761fbc8", "0f112e49240f67a2bd5aaf46f74a924129f03912", "daa120032d8f141bc6aae20e23b1b754a0dd7d5f", "ac26166857e55fd5c64ae7194a169ff4e473eb8b", "13aef395f426ca8bd93640c9c3f848398b189874", "9cda3e56cec21bd8f91f7acfcefc04ac10973966", "2b5cb5466eecb131f06a8100dcaf0c7a0e30d391", "7195cb08ba2248f3214f5dc5d7881533dd1f46d9", "cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f", "fc798314994bf94d1cde8d615ba4d5e61b6268b6", "1f5725a4a2eb6cdaefccbc20dccadf893936df12", "963a004e208ce4bd26fa79a570af61d31651b3c3", "a6e43b73f9f87588783988333997a81b4487e2d5", "4919663c62174a9bc0cc7f60da8f96974b397ad2", "05318a267226f6d855d83e9338eaa9e718b2a8dd", "00301c250d667700276b1e573640ff2fd7be574d", "f47404424270f6a20ba1ba8c2211adfba032f405", "4b5ff8c67f3496a414f94e35cb35a601ec98e5cf", "70db3a0d2ca8a797153cc68506b8650908cb0ada", "876583a059154def7a4bc503b21542f80859affd", "7553fba5c7f73098524fbb58ca534a65f08e91e7", "a608c5f8fd42af6e9bd332ab516c8c2af7063c61", "c49075ead6eb07ede5ada4fe372899bd0cfb83ac", "44fb4dcf88eb482e2ab79fd4540caf941613b970", "e9fcd15bcb0f65565138dda292e0c71ef25ea8bb", "47a003e6bbfc5bf04a099ca53c67ddfdbea71315", "4ab84f203b0e752be83f7f213d7495b04b1c4c79", "1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69", "744d23991a2c48d146781405e299e9b3cc14b731", "23675cb2180aac466944df0edda4677a77c455cd", "0fc5c6f06e40014a56f492172f44c073d269e95c", "c5421a18583f629b49ca20577022f201692c4f5d", "6e12ba518816cbc2d987200c461dc907fd19f533", "2bf03e8fb775718ac9730524a176ddd189c0e457", "fcb97ede372c5bddde7a61924ac2fd29788c82ce", "64ca0dbe60bf8f8243fad73a2494c3fa7a2770e2", "bc6de183cd8b2baeebafeefcf40be88468b04b74", "68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090", "d4288daef6519f6852f59ac6b85e21b8910f2207", "6adecb82edbf84a0097ff623428f4f1936e31de0", "efb24d35d8f6a46e1ff3800a2481bc7e681e255e", "c58ece1a3fa23608f022e424ec5a93cddda31308", "e0423788eb91772de9d708a17799179cf3230d63", "aee3427d0814d8a398fd31f4f46941e9e5488d83", "aca273a9350b10b6e2ef84f0e3a327255207d0f5", "85f7f03b79d03da5fae3a7f79d9aac228a635166", "d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698", "d7a84db2a1bf7b97657b0250f354f249394dd700", "e16efd2ae73a325b7571a456618bfa682b51aef8", "3c6542295cf7fe362d7d629ac10670bf30cdabce", "0e192ca16ce1c967e21d62f9810591eed3d6904b", "4f37f71517420c93c6841beb33ca0926354fa11d", "25bcd5aa3bbe56c992547fba683418655b46fc4a", "cce332405ce9cd9dccc45efac26d1d614eaa982d", "635d2696aa597a278dd6563f079be06aa76a33c0", "7ad7897740e701eae455457ea74ac10f8b307bed", "69ff40fd5ce7c3e6db95a2b63d763edd8db3a102", "a5f35880477ae82902c620245e258cf854c09be9", "a422f2d0212f54807ff678f209293a27c7791ec5", "9944c451b4a487940d3fd8819080fe16d627892d", "136f92989e982ecf795cb27d65b48464eaec9323", "19705579b8e7d955092ef54a22f95f557a455338", "8cc07ae9510854ec6e79190cc150f9f1fe98a238", "c1e76c6b643b287f621135ee0c27a9c481a99054", "7117ed0be436c0291bc6fb6ea6db18de74e2464a", "5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48", "62b3598b401c807288a113796f424612cc5833ca", "3cb057a24a8adba6fe964b5d461ba4e4af68af14", "446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03", "68c1090f912b69b76437644dd16922909dd40d60", "204f1cf56794bb23f9516b5f225a6ae00d3d30b8", "fcd3d557863e71dd5ce8bcf918adbe22ec59e62f", "c4fb2de4a5dc28710d9880aece321acf68338fde", "c00df53bd46f78ae925c5768d46080159d4ef87d", "02bd665196bd50c4ecf05d6852a4b9ba027cd9d0", "f074e86e003d5b7a3b6e1780d9c323598d93f3bc", "68d2afd8c5c1c3a9bbda3dd209184e368e4376b9", "fdf8e293a7618f560e76bd83e3c40a0788104547", "cd4c047f4d4df7937aff8fc76f4bae7718004f40", "244b57cc4a00076efd5f913cc2833138087e1258", "759cf57215fcfdd8f59c97d14e7f3f62fafa2b30", "5789f8420d8f15e7772580ec373112f864627c4b", "622c84d79a9420ed6f3a78f29233d56b1e99cc21", "043efe5f465704ced8d71a067d2b9d5aa5b59c29", "2a171f8d14b6b8735001a11c217af9587d095848", "14ee4948be56caeb30aa3b94968ce663e7496ce4", "3b73f8a2b39751efb7d7b396bf825af2aaadee24", "0c20fd90d867fe1be2459223a3cb1a69fa3d44bf", "c13211a15abd3ca187ef36b9f816891f901ba788", "8f60c343f76913c509ce623467bf086935bcadac", "be632b206f1cd38eab0c01c5f2004d1e8fc72880", "04dcdb7cb0d3c462bdefdd05508edfcff5a6d315", "8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a", "ca8f23d9b9a40016eaf0467a3df46720ac718e1d", "01bef320b83ac4405b3fc5b1cff788c124109fb9", "dbc8ffd6457147ff06cd3f56834e3ec6dccb2057", "df51dfe55912d30fc2f792561e9e0c2b43179089", "12055b8f82d5411f9ad196b60698d76fbd07ac1e", "4cd0da974af9356027a31b8485a34a24b57b8b90", "c34532fe6bfbd1e6df477c9ffdbb043b77e7804d", "71fd29c2ae9cc9e4f959268674b6b563c06d9480", "2b286ed9f36240e1d11b585d65133db84b52122c", "31182c5ffc8c5d8772b6db01ec98144cd6e4e897", "5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2", "d7593148e4319df7a288180d920f2822eeecea0b", "98142103c311b67eeca12127aad9229d56b4a9ff", "16d9b983796ffcd151bdb8e75fc7eb2e31230809", "ed0cf5f577f5030ac68ab62fee1cf065349484cc", "e853484dc585bed4b0ed0c5eb4bc6d9d93a16211", "2cf5f2091f9c2d9ab97086756c47cd11522a6ef3", "87e592ee1a7e2d34e6b115da08700a1ae02e9355", "26d407b911d1234e8e3601e586b49316f0818c95", "2e6cfeba49d327de21ae3186532e56cadeb57c02", "0a85afebaa19c80fddb660110a4352fd22eb2801", "c7f0c0636d27a1d45b8fcef37e545b902195d937", "4ccf64fc1c9ca71d6aefdf912caf8fea048fb211", "fd892e912149e3f5ddd82499e16f9ea0f0063fa3", "06c2086f7f72536bf970ca629151b16927104df3", "56f231fc40424ed9a7c93cbc9f5a99d022e1d242", "b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e", "6dddf1440617bf7acda40d4d75c7fb4bf9517dbb", "3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2", "d0296efc3c532269aaa7e8f856f5d1807af847fb", "8d5998cd984e7cce307da7d46f155f9db99c6590", "366e650a578a3732ebe10267f04bcf9d3129f076", "6f3054f182c34ace890a32fdf1656b583fbc7445", "f1ea8bdb3bd39d8269628bc7b99b2d918ea23ef7", "803c92a3f0815dbf97e30c4ee9450fd005586e1a", "0e3840ea3227851aaf4633133dd3cbf9bbe89e5b", "a7664247a37a89c74d0e1a1606a99119cffc41d4", "f7b422df567ce9813926461251517761e3e6cda0", "71c4b8e1bb25ee80f4317411ea8180dae6499524", "ca37933b6297cdca211aa7250cbe6b59f8be40e5", "4b61d8490bf034a2ee8aa26601d13c83ad7f843a", "fdd80b2139ff1b9becb17badd053b9a4a6a243f2", "7cee802e083c5e1731ee50e731f23c9b12da7d36", "26a44feb7a64db7986473ca801c251aa88748477", "2c2f03edc9b76e5ac132b54b2e3313237e22b5e7", "854b1f0581f5d3340f15eb79452363cbf38c04c8", "f519723238701849f1160d5a9cedebd31017da89", "0deea943ac4dc1be822c02f97d0c6c97e201ba8d", "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "b40c001b3e304dccb28c745bd54aa281c8ff1f29", "4c81c76f799c48c33bb63b9369d013f51eaf5ada", "a16fb74ea66025d1f346045fda00bd287c20af0e", "0951f42abbf649bb564a21d4ff5dddf9a5ea54d9", "c19222d138eb45903a3aa7e46030979d50769771", "6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc", "ec5c63609cf56496715b0eba0e906de3231ad6d1", "ac12ba5bf81de83991210b4cd95b4ad048317681", "341ed69a6e5d7a89ff897c72c1456f50cfb23c96", "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "16d6737b50f969247339a6860da2109a8664198a", "31ea88f29e7f01a9801648d808f90862e066f9ea", "0dccc881cb9b474186a01fd60eb3a3e061fa6546", "b07f9dfc904d317fa71c1efa9b466460abc0bee5", "56f86bef26209c85f2ef66ec23b6803d12ca6cd6", "841c99e887eb262e397fdf5b0490a2ae6c82d6b5", "9d4692e243e25eb465a0480376beb60a5d2f0f13", "7587a09d924cab41822a07cd1a988068b74baabb", "be0a0e563445119b82d664d370e646e53e69a4c5", "30457461333c8797457c18636732327e6dde1d04", "29f298dd5f806c99951cb434834bc8dcc765df18", "dc2f16f967eac710cb9b7553093e9c977e5b761d", "cd7a7be3804fd217e9f10682e0c0bfd9583a08db", "7aa32e0639e0750e9eee3ce16e51e9f94241ae88", "e8b56ed34ece9b1739fff0df6af3b65390c468d3", "f0cee87e9ecedeb927664b8da44b8649050e1c86", "bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4", "56c2fb2438f32529aec604e6fc3b06a595ddbfcc", "772a30f1a7a3071e5ce6ad4b0dbddc67889f5873", "58df849378fbcfb6b1a8ebddfbe4caa450226b9d", "e8951cc76af80da43e3528fe6d984071f17f57e7", "43836d69f00275ba2f3d135f0ca9cf88d1209a87", "ecfb93de88394a244896bfe6ee7bf39fb250b820", "5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c", "1fe121925668743762ce9f6e157081e087171f4c", "361c9ba853c7d69058ddc0f32cdbe94fbc2166d5", "d0471d5907d6557cf081edf4c7c2296c3c221a38", "5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c", "f0ba5c89094b15469f95fd2a05a46b68b8faf1ca", "7361b900018f22e37499443643be1ff9d20edfd6", "25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b", "2e9c780ee8145f29bd1a000585dd99b14d1f5894", "d278e020be85a1ccd90aa366b70c43884dd3f798", "3a05415356bd574cad1a9f1be21214e428bbc81b", "017e94ad51c9be864b98c9b75582753ce6ee134f", "9215d36c501d6ee57d74c1eeb1475efd800d92d3", "0cfca73806f443188632266513bac6aaf6923fa8", "732e8d8f5717f8802426e1b9debc18a8361c1782", "c5fff7adc5084d69390918daf09e832ec191144b", "9f3c9e41f46df9c94d714b1f080dafad6b4de1de", "00a38ebce124879738b04ffc1536018e75399193", "026e4ee480475e63ae68570d73388f8dfd4b4cde", "b161d261fabb507803a9e5834571d56a3b87d147", "63a4105adbe182e67d8fd324de5c84a6df444294", "321db1059032b828b223ca30f3304257f0c41e4c", "b839bc95794dc65340b6e5fea098fa6e6ea5e430", "f4003cbbff3b3d008aa64c76fed163c10d9c68bd", "bc6a7390135bf127b93b90a21b1fdebbfb56ad30", "cfdc632adcb799dba14af6a8339ca761725abf0a", "6c0ad77af4c0850bd01bb118e175ecc313476f27", "c254b4c0f6d5a5a45680eb3742907ec93c3a222b", "1ea8085fe1c79d12adffb02bd157b54d799568e4", "b5f4e617ac3fc4700ec8129fcd0dcf5f71722923", "88399c7fa890f1252178cd5e4979971509bd904f", "28312c3a47c1be3a67365700744d3d6665b86f22", "80193dd633513c2d756c3f568ffa0ebc1bb5213e", "9d36c81b27e67c515df661913a54a797cd1260bb", "53cfe4817ac2eecbe4e286709a9140a5fe729b35", "009cd18ff06ff91c8c9a08a91d2516b264eee48e", "1b70bbf7cdfc692873ce98dd3c0e191580a1b041", "0aa8a0203e5f406feb1815f9b3dd49907f5fd05b", "26433d86b9c215b5a6871c70197ff4081d63054a", "36018404263b9bb44d1fddaddd9ee9af9d46e560", "80c8d143e7f61761f39baec5b6dfb8faeb814be9", "9fa1be81d31fba07a1bde0275b9d35c528f4d0b8", "eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6", "516a556aa1019052f6a162ca9c1a345f553f7f25", "e73b9b16adcf4339ff4d6723e61502489c50c2d9", "d6687d30a264974de234c48ac25616a112736f61", "b073313325b6482e22032e259d7311fb9615356c", "1de690714f143a8eb0d6be35d98390257a3f4a47", "3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a", "0517d08da7550241fb2afb283fc05d37fce5d7b7", "b736bf09e1f94a8722c121c19f7a22d340c13e0b", "97f9c3bdb4668f3e140ded2da33fe704fc81f3ea", "1541d5cb8af55930968c02f9185c1a3b5da6b7ea", "f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1", "81e11e33fc5785090e2d459da3ac3d3db5e43f65", "8845c03bee88fdd2f400ed2bddba038366c82abe", "36ea75e14b69bed454fde6076ea6b85ed87fbb14", "1d79ec93a9feba817c75c31604c3f8df346eabe8", "779ad364cae60ca57af593c83851360c0f52c7bf", "8adb2fcab20dab5232099becbd640e9c4b6a905a", "52e2dab86eb1444750b5dc45885288216741220b", "47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa", "ac37285f2f5ccf99e9054735a36465ee35a6afdd", "961939e96eed6620b1752721ab520745ac5329c6", "b0d7013577219f34dc8208d31b2af3ee4c358157", "1455591d81c4ddabfe31de9f57f53e9b91e71fa2", "39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df", "2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3", "41000c3a3344676513ef4bfcd392d14c7a9a7599", "7e9df45ece7843fe050033c81014cc30b3a8903a", "40ee38d7ff2871761663d8634c3a4970ed1dc058", "46ded0e6e0042e43b94cf179b902d7932fbbdae1", "46538b0d841654a0934e4c75ccd659f6c5309b72", "0296ca8ffceef73d774dfd171447ff3ce2e764aa", "7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4", "5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a", "750c19d5bb23ac6956b6cfff15129f226a61dfe9", "7c6dbaebfe14878f3aee400d1378d90d61373921", "9ac15845defcd0d6b611ecd609c740d41f0c341d", "ced7811f2b694e54e3d96ec5398e4b6afca67fc0", "f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93", "b084ad222c1fc9409d355d8e54ac3d1e86f2ca18", "a0f94e9400938cbd05c4b60b06d9ed58c3458303", "a752ed42171c49c4616c9a367d2ff4b1eac09cbe", "600025c9a13ff09c6d8b606a286a79c823d89db8", "659db2ceb304984a23f883ee5414168131c3567d", "75cd81d2513b7e41ac971be08bbb25c63c37029a", "31bf8d7f5d373a2dece747448306e2228be51016", "6601a0906e503a6221d2e0f2ca8c3f544a4adab7", "01c948d2b73abe8be1ac128a6439c1081ebca95a", "9e7646b7e9e89be525cda1385cc1351cc28a896e", "ed0d8ca1701247b22516ffb1b47f28554b167608", "68f9cb5ee129e2b9477faf01181cd7e3099d1824", "a458b319f5a2763ff9c6dc959eefa77673c56671", "897aa4aaa474fed41233faec9b70b802aea5fdea", "7a8c2743db1749c2d9f16f62ee633574c1176e34", "1bd9dbe78918ed17b0a3ac40623f044cb3d3552c", "9961f1e5cf8fda29912344773bc75c47f18333a0", "6c9266aa77ea01b9d26a98a483b56e9e8b80eeba", "4ef0a6817a7736c5641dc52cbc62737e2e063420", "1ff79eba66d838d8c1cc90c22fab251bb7babc42", "2eb37a3f362cffdcf5882a94a20a1212dfed25d9", "04522dc16114c88dfb0ebd3b95050fdbd4193b90", "0172867f4c712b33168d9da79c6d3859b198ed4c", "2f8ef56c1007a02cdc016219553479d6b7e097fb", "aa52910c8f95e91e9fc96a1aefd406ffa66d797d", "c0ff7dc0d575658bf402719c12b676a34271dfcd", "9b78ce9fdac30864d1694a56328b3c8a96cccef5", "ec44510ca9c0093c5eb860128d17506614168bcf", "26a5136ee4502500fb50cd5ade814aad45422771", "ff42ec628b0980909bbb84225d0c4f8d9ac51e03", "4015e8195db6edb0ef8520709ca9cb2c46f29be7", "abb396490ba8b112f10fbb20a0a8ce69737cd492", "6ae75eaa7e9f1379338eae94fbb43664bb3c898a", "550858b7f5efaca2ebed8f3969cb89017bdb739f", "63eefc775bcd8ccad343433fc7a1dd8e1e5ee796", "8de6deefb90fb9b3f7d451b9d8a1a3264b768482", "32a440720ee988b7b41de204b2910775171ee12c", "02fda07735bdf84554c193811ba4267c24fe2e4a", "9441253b638373a0027a5b4324b4ee5f0dffd670", "62374b9e0e814e672db75c2c00f0023f58ef442c", "badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e", "2182ca35e1a5b3cff9c5ce5308f5d0d12e4f911a", "aa0be8029ea4c657ac8440958364add54ce8c29c", "21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc", "be40014beffaa9faacee12bb3412969f98b6a43d", "2b773fe8f0246536c9c40671dfa307e98bf365ad", "546b4a865af7e9493270ee2c8f644070b534019d", "b62571691a23836b35719fc457e093b0db187956", "0831a511435fd7d21e0cceddb4a532c35700a622", "4a64758786e3f49fc13781304197591ffbd69a6e", "62e61f9f7445e8dec336415ac0c7e677f9f5f7c1", "0115f260069e2e501850a14845feb400142e2443", "1bba358c9323883ddd54224ad24d2ac4d8218fec", "5b693cb3bedaa2f1e84161a4261df9b3f8e77353", "2b7b55a4143ad23aa31f00b11efebdd8246231a8", "29f0414c5d566716a229ab4c5794eaf9304d78b6", "5a0ae814be58d319dfc9fd98b058a2476801201c", "b19e83eda4a602abc5a8ef57467c5f47f493848d", "bf1e0545785b05b47caa3ffe7d16982769986f38", "27169761aeab311a428a9dd964c7e34950a62a6b", "1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a", "18bfda16116e76c2b21eb2b54494506cbb25e243", "cfd933f71f4a69625390819b7645598867900eab", "6ec004e4c1171c4c4858eec7c927f567684b80bc", "7f44e5929b11ce2192c3ae81fbe602081a7ab5c4", "1226a230b0be43d03b6e0ff5a22f5752f30834bb", "a956ff50ca958a3619b476d16525c6c3d17ca264", "56e4dead93a63490e6c8402a3c7adc493c230da5", "dfb6aa168177d4685420fcb184def0aa7db7cddb", "14811696e75ce09fd84b75fdd0569c241ae02f12", "915d4a0fb523249ecbc88eb62cb150a60cf60fa0", "3b410ae97e4564bc19d6c37bc44ada2dcd608552", "91067f298e1ece33c47df65236853704f6700a0b", "d687fa99586a9ad229284229f20a157ba2d41aea", "81dd68de9d88c49db1ae509dbc66c7a82809c026", "91167aceafbc9c1560381b33c8adbc32a417231b", "3fbd68d1268922ee50c92b28bd23ca6669ff87e5", "c73199c180e5c01a5d53c19b8e079b0f6d07d618", "74f643579949ccd566f2638b85374e7a6857a9fc", "c64502696438b4c9f9e12e64daaf7605f62ce3f0", "9c4cc11d0df2de42d6593f5284cfdf3f05da402a", "37c5e3b6175db9eaadee425dc51bc7ce05b69a4e", "adf5caca605e07ee40a3b3408f7c7c92a09b0f70", "2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2", "ffb1cb0f9fd65247f02c92cfcb152590a5d68741", "340d1a9852747b03061e5358a8d12055136599b0", "26af867977f90342c9648ccf7e30f94470d40a73", "315a90543d60a5b6c5d1716fe9076736f0e90d24", "9e10ea753b9767aa2f91dafe8545cd6f44befd7f", "3ec860cfbd5d953f29c43c4e926d3647e532c8b0", "47b508abdaa5661fe14c13e8eb21935b8940126b", "8160b3b5f07deaa104769a2abb7017e9c031f1c1", "476755252e53799b490c5a88fde81eef9a64fb7e", "142e5b4492bc83b36191be4445ef0b8b770bf4b0", "32575ffa69d85bbc6aef5b21d73e809b37bf376d", "286a5c19a43382a21c8d96d847b52bba6b715a71", "4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea", "ea85378a6549bb9eb9bcc13e31aa6a61b655a9af", "dff838ba0567ef0a6c8fbfff9837ea484314efc6", "58778fafdc43f5d5b973c57843b13c6d2f05cf68", "94b60008e5f576f46bd3c385398cf2ecbb16f499", "e171fba00d88710e78e181c3e807c2fdffc6798a", "2d244d70ed1a2ba03d152189f1f90ff2b4f16a79", "65869cc5ef00d581c637ae8ea6ca02ae4bb2b996", "ad247138e751cefa3bb891c2fe69805da9c293d7", "02c993d361dddba9737d79e7251feca026288c9c", "afba76d0fe40e1be381182aec822431e20de8153", "18636347b8741d321980e8f91a44ee054b051574", "956e9b69b3366ed3e1670609b53ba4a7088b8b7e", "e5342233141a1d3858ed99ccd8ca0fead519f58b", "b7cf7bb574b2369f4d7ebc3866b461634147041a", "1ab19e516b318ed6ab64822efe9b2328836107a4", "ce5eac297174c17311ee28bda534faaa1d559bae", "7dd578878e84337d6d0f5eb593f22cabeacbb94c", "00214fe1319113e6649435cae386019235474789", "56e6f472090030a6f172a3e2f46ef9daf6cad757", "29322b9a3744afaa5fc986b805d9edb6ff5ea9fe", "9949ac42f39aeb7534b3478a21a31bc37fe2ffe3", "10e70a34d56258d10f468f8252a7762950830d2b", "d3b550e587379c481392fb07f2cbbe11728cf7a6", "24cce97c3fe3c3fc21f1225e4a9f6c1e736e6bb9", "57034dc2d16ff1cbef24a61c0a415580820f9a15", "a378fc39128107815a9a68b0b07cffaa1ed32d1f", "24936849676b25a36eb6216e458286dcaee314e5", "7f1078a2ebfa23a58adb050084d9034bd48a8a99", "ece3407b15d7d2dcf37cfe9b8fc87542a2c1162d", "cc8bf03b3f5800ac23e1a833447c421440d92197", "157647b0968d95f9288b27d6d9179a8e1ef5c970", "ec89f2307e29cc4222b887eb0619e0b697cf110d", "63340c00896d76f4b728dbef85674d7ea8d5ab26", "926c67a611824bc5ba67db11db9c05626e79de96", "a98a69739527f46c0a73c983789210d098c1eb09", "a35d3ba191137224576f312353e1e0267e6699a1", "4de757faa69c1632066391158648f8611889d862", "75b51140d08acdc7f0af11b0ffa1edb40ebbd059", "33aa980544a9d627f305540059828597354b076c", "09f9409430bba2afb84aa8214dbbb43bfd4cf056", "16e95a907b016951da7c9327927bb039534151da", "27e0684fa5b57715162ac6c58a6ea283c7db1719", "511b06c26b0628175c66ab70dd4c1a4c0c19aee9", "c178a86f4c120eca3850a4915134fff44cbccb48", "5859774103306113707db02fe2dd3ac9f91f1b9e", "6b6ff9d55e1df06f8b3e6f257e23557a73b2df96", "2aaa6969c03f435b3ea8431574a91a0843bd320b", "4118b4fc7d61068b9b448fd499876d139baeec81", "26c8ed504f852eda4a2e63dbbbc3480e57f43c70", "466a5add15bb5f91e0cfd29a55f5fb159a7980e5", "52258ec5ec73ce30ca8bc215539c017d279517cf", "956c634343e49319a5e3cba4f2bd2360bdcbc075", "b9b5624045c6f9d77fd1a029f4ff27aab26fa9fe", "cdf2c8752f1070b0385a94c7bf22e8b54cac521b", "972ef9ddd9059079bdec17abc8b33039ed25c99c", "1aa61dd85d3a5a2fe819cba21192ec4471c08628", "969626c52d30ea803064ddef8fb4613fa73ba11d", "66837add89caffd9c91430820f49adb5d3f40930", "3ffbc912de7bad720c995385e1fdc439b1046148", "5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0", "3986161c20c08fb4b9b791b57198b012519ea58b", "07a328999666ef2dc28ce57bc1881d10e6f0b370", "b484141b99d3478a12b8a6854864c4b875d289b8", "4f028efe6708fc252851eee4a14292b7ce79d378", "f888c165f45febf3d17b8604a99a2f684d689cbc", "6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8", "f856532a729bd337fae1eb7dbe55129ae7788f45", "1a47f12a2490f6775c0ad863ac856de27f5b3e03", "ca606186715e84d270fc9052af8500fe23befbda", "493ec9e567c5587c4cbeb5f08ca47408ca2d6571", "dd05cbfa0045759088d610173a78c792a4f17e4c", "1e7c73602e6a17986b2e66ef411748056acf2545", "568ced900cbf7437c9e87b60a17e16f0c1e0c442", "80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923", "adf62dfa00748381ac21634ae97710bb80fc2922", "a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825", "b6145d3268032da70edc9cfececa1f9ffa4e3f11", "3270b2672077cc345f188500902eaf7809799466", "10f4bbf87a44bab3d79e330e486c897e95f5f33f", "a6e2ee89cbe6fabad88713ef1f8e9da5dd7cf167", "6c36ed5391cb3fda6c55a4f71e991f9138e226d0", "0b9ce839b3c77762fff947e60a0eb7ebbf261e84", "80677676b127b67938c8db06a15d87f5dd4bd7f1", "19c0c7835dba1a319b59359adaa738f0410263e8", "1a96d54c326d19e32bed00642a177ea439341fa2", "ee18e29a2b998eddb7f6663bb07891bfc7262248", "7a9c317734acaf4b9bd8e07dd99221c457b94171", "7118162a994c564004d167018c0048386f408dd6", "e48e94959c4ce799fc61f3f4aa8a209c00be8d7f", "487df616e981557c8e1201829a1d0ec1ecb7d275", "1979e270093b343d62e97816eeed956062e155a0", "09718bf335b926907ded5cb4c94784fd20e5ccd8", "849a1d1accafe9e41b7015bf8cf85efe7e742df3", "a308077e98a611a977e1e85b5a6073f1a9bae6f0", "751970d4fb6f61d1b94ca82682984fd03c74f127", "e8686663aec64f4414eba6a0f821ab9eb9f93e38", "2ccedc961d4d9cd9a88297c0061d67f81773f8b8", "d3e04963ff42284c721f2bc6a90b7a9e20f0242f", "f19777e37321f79e34462fc4c416bd56772031bf", "45efd6c2dd4ca19eed38ceeb7c2c5568231451e1", "41f8477a6be9cd992a674d84062108c68b7a9520", "01d23cbac762b0e46251f5dbde08f49f2d13b9f8", "b89d4c474b42f9a241e347915391b4aba391c307", "514a74aefb0b6a71933013155bcde7308cad2b46", "016435db03820374d6af65b68f001f0918914e4f", "f9e0209dc9e72d64b290d0622c1c1662aa2cc771", "c7745f941532b7d6fa70db09e81eb1167f70f8a7", "895081d6a5545ad6385bfc6fcf460fc0b13bac86", "7b47dd9302b3085cd6705614b88d7bdbc8ae5c13", "1e19ea6e7f1c04a18c952ce29386252485e4031e", "194f5d3c240d06575403c9a422a0ebc86d43b91e", "d9e34af95c21c0e114b61abccbc653480b370c3b", "07da958db2e561cc7c24e334b543d49084dd1809", "505e5fe9e897ddbddcf4edab8c8a97d5e56e9d8d", "6a6269e591e11f41d59c2ca1e707aaa1f0d57de6", "b656abc4d1e9c8dc699906b70d6fcd609fae8182", "287795991fad3c61d6058352879c7d7ae1fdd2b6", "4033ac52dba394e390a86cd149b9838f1d7834b5", "3bcb93aa2a5e5eda039679516292af2f7c0ff9ac", "18d5b0d421332c9321920b07e0e8ac4a240e5f1f", "3266fbaaa317a796d0934b9a3f3bb7c64992ac7d", "58217ae5423828ed5e1569bee93d491569d79970", "4613b3a9344622b2997039afe3d47df1fd4de72f", "7ca337735ec4c99284e7c98f8d61fb901dbc9015", "fc20149dfdff5fdf020647b57e8a09c06e11434b", "cebfafea92ed51b74a8d27c730efdacd65572c40", "cbcf5da9f09b12f53d656446fd43bc6df4b2fa48", "1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2", "1e8eec6fc0e4538e21909ab6037c228547a678ba", "271e2856e332634eccc5e80ba6fa9bbccf61f1be", "4300fa1221beb9dc81a496cd2f645c990a7ede53", "7de386bf2a1b2436c836c0cc1f1f23fccb24aad6", "73c5bab5c664afa96b1c147ff21439135c7d968b", "0b79356e58a0df1d0efcf428d0c7c4651afa140d", "eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2", "db93049981abca0a281918b8d0655572922553de", "f16a605abb5857c39a10709bd9f9d14cdaa7918f", "c362116a358320e71fb6bc8baa559142677622d2", "62a30f1b149843860938de6dd6d1874954de24b7", "ec54000c6c0e660dd99051bdbd7aed2988e27ab8", "ee461d060da58d6053d2f4988b54eff8655ecede", "979fd81d135078886808839391adf1249c354cca", "17cf6195fd2dfa42670dc7ada476e67b381b8f69", "c69a66a8b9c71d6c3c19980969550090af854b89", "27e5b7ae3506a0f7472ee9089cd2472442e71c14", "71d68af11df855f886b511e4fc1635c1e9e789b0", "cbf3e848c5d2130dd640d9bd546403b8d78ce0f9", "ff5dd6f96e108d8233220cc262bc282229c1a582", "a78ef252d7e7cd86e4a72c2a7be628e73824fb92", "3419af6331e4099504255a38de6f6b7b3b1e5c14", "a6634ff2f9c480e94ed8c01d64c9eb70e0d98487", "6b14d2554d653b0c2fd0537535e3411864979a37", "9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03", "d9a1dd762383213741de4c1c1fd9fccf44e6480d", "23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e", "20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6", "eb240521d008d582af37f0497f12c51f4bab16c8", "9958942a0b7832e0774708a832d8b7d1a5d287ae", "646ef290bc69ab38547632cb12ef1dd74a7c97ee", "843e6f1e226480e8a6872d8fd7b7b2cd74b637a4", "838dad9d1d68d29be280d92e69410eaac40084bc", "58684a925693a0e3e4bb1dd2ebe604885be034d2", "134cea33099cafc6615e57437e29d7c3906a2b48", "11c04c4f0c234a72f94222efede9b38ba6b2306c", "ddbb6e0913ac127004be73e2d4097513a8f02d37", "a94d2bc6854ee329ee02910e6cdb9d9228f85944", "3d42e17266475e5d34a32103d879b13de2366561", "b9cedd1960d5c025be55ade0a0aa81b75a6efa61", "c2c3ff1778ed9c33c6e613417832505d33513c55", "a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9", "d33b26794ea6d744bba7110d2d4365b752d7246f", "58ec93d804ceec167963d7ca1f6955a652b331aa", "e7e8c0bbee09b5af6f7df1de8f0f26da992737c4", "dbd5e9691cab2c515b50dda3d0832bea6eef79f2", "d963e640d0bf74120f147329228c3c272764932b", "b959055bae89f279015f0f6b1eca3e37ecbdd339", "132527383890565d18f1b7ad50d76dfad2f14972", "ef3a0b454370991a9c18ac7bfd228cf15ad53da0", "48463a119f67ff2c43b7c38f0a722a32f590dfeb", "7f9260c00a86a0d53df14469f1fa10e318ee2a3c", "a2d04db895dd17f2a8291b300a63604842c06d09", "04644c97784700c449f2c885cb4cab86447f0bd4", "b74a3ede83e10544640e5f58707f567e00281f54", "0fae5d9d2764a8d6ea691b9835d497dd680bbccd", "b65b51c796ed667c4c7914bf12b1926fd6bbaa0c", "5860cf0f24f2ec3f8cbc39292976eed52ba2eafd", "10fcbf30723033a5046db791fec2d3d286e34daa", "1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4", "12ebeb2176a5043ad57bc5f3218e48a96254e3e9", "7755bac678027f23fe59e13119182a9c7c18f9f7", "e0dc6f1b740479098c1d397a7bc0962991b5e294", "0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e", "7085d21f483743007cc6a8e3fa01d8bdf592ad33", "396b2963f0403109d92a4d4f26205f279ea79d2c", "56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe", "f8162276f3b21a3873dde7a507fd68b4ab858bcc", "d0a8889f694422614bf3ecccd69aa1d4f7822606", "4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27", "04250e037dce3a438d8f49a4400566457190f4e2", "d5e1173dcb2a51b483f86694889b015d55094634", "99001ac9fdaf7649c0d0bd8d2078719bafd216d9", "bd78a853df61d03b7133aea58e45cd27d464c3cf", "e1d1540a718bb7a933e21339f1a2d90660af7353", "f64574ee0e6247b84d573ddb5c6e2c4ba798ffff", "6859b891a079a30ef16f01ba8b85dc45bd22c352", "18cd79f3c93b74d856bff6da92bfc87be1109f80", "f652cb159a2cf2745aabcbf6a7beed4415e79e34", "d84e3254e3c4f4c17484643b8c3abdf5b0dbb761", "e309715b7865b9aa3027b7eb6fef9fb75a0cba28", "3b3482e735698819a6a28dcac84912ec01a9eb8a", "86ab027a1930276bb2c4695d65668e6704538b01", "867e709a298024a3c9777145e037e239385c0129", "25de28e6470b742539f124b93181166a3812e3af", "f1e13c1e8426243320014c45cf2c9382d9cbfac2", "ffea2b26e422c1009afa7e200a43b31a1fae86a9", "39acf4bb06b889686ca17fd8c89887a3cec26554", "4fb0954ef02a178fd64f1c8cd0408866982bac2c", "1462bc73834e070201acd6e3eaddd23ce3c1a114", "1742ffea0e1051b37f22773613f10f69d2e4ed2c", "9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6", "48319e611f0daaa758ed5dcf5a6496b4c6ef45f2", "5aadd85e2a77e482d44ac2a215c1f21e4a30d91b", "3d2c932f4f2693a87a0b855048e60f142214f475", "8754b7dba08911fca67db5bf13a6e6abd546d2e2", "62c2f898fe70c2c7ee2435cbe837be18184431d4", "433bb1eaa3751519c2e5f17f47f8532322abbe6d", "49dd4b359f8014e85ed7c106e7848049f852a304", "9d60ad72bde7b62be3be0c30c09b7d03f9710c5f", "29a013b2faace976f2c532533bd6ab4178ccd348", "ed08ac6da6f8ead590b390b1d14e8a9b97370794", "5c8ab6a48bf7c5302b800c1077884f4898ad0beb", "6b44543571fe69f088be577d0c383ffc65eceb2a", "ecc4be938f0e61a9c6b5111e0a99013f2edc54b9", "e726174d516605f80ff359e71f68b6e8e6ec6d5d", "163d0e6ea8c8b88b4383a4eaa740870e2458b9b0", "d9327b9621a97244d351b5b93e057f159f24a21e", "889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7", "de8381903c579a4fed609dff3e52a1dc51154951", "21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1", "d2a6f77ce311e51bb36a5301c1a4a2d220a2947b", "67a99c92166d77db02f6cc059f1aeddc32580d4b", "32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99", "7e600faee0ba11467d3f7aed57258b0db0448a72", "08c18b2f57c8e6a3bfe462e599a6e1ce03005876", "0ce8a45a77e797e9d52604c29f4c1e227f604080", "1bddad4dc0dfa8efa402aa5d18c29304a5760f12", "f2cc459ada3abd9d8aa82e92710676973aeff275", "c363c5d44214bf518a085fb13896909f821f39e8", "ea026456729f0ec54c697198e1fd089310de4ae2", "c9be1001706bcdd8b35fa9cae733c592e90c7ec3", "82f4e8f053d20be64d9318529af9fadd2e3547ef", "760ba44792a383acd9ca8bef45765d11c55b48d4", "73dcb4c452badb3ee39a2f222298b234d08c21eb", "68003e92a41d12647806d477dd7d20e4dcde1354", "fa80344137c4d158bf59be4ac5591d074483157a", "5ea165d2bbd305dc125415487ef061bce75dac7d", "a02f0aad91c2d88b49c443e1e39c3acfc067a705", "8320dbdd3e4712cca813451cd94a909527652d63", "08c1f8f0e69c0e2692a2d51040ef6364fb263a40", "4362368dae29cc66a47114d5ffeaf0534bf0159c", "4e93a8a47473bf57e24aec048cb870ab366a43d6", "427bec487c330e7e34cc2c8fc2d6558690421ea0", "0f7e9199dad3237159e985e430dd2bf619ef2db5", "6e379f2d34e14efd85ae51875a4fa7d7ae63a662", "8816f93e46a2c47e02d82294f94aa83f95ac379b", "aaf2436bc63a58d18192b71cc8100768e2f8a6cb", "72bf9c5787d7ff56a1697a3389f11d14654b4fcf", "d1dae2993bdbb2667d1439ff538ac928c0a593dc", "7ed6ff077422f156932fde320e6b3bd66f8ffbcb", "237fa91c8e8098a0d44f32ce259ff0487aec02cf", "56c0b225fd57cfe173e5206a4bb0ce153bfecc29", "bc955487a0b8d2fae3f2f44320389a12ae28f0f5", "2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb", "6582f4ec2815d2106957215ca2fa298396dde274", "33bba39be70f21e13769a10dbf96689aa4d3ecc6", "d2cb8814068c5a64a54ac8e5d0d3df6986370295", "ec90d333588421764dff55658a73bbd3ea3016d2", "73f467b4358ac1cafb57f58e902c1cab5b15c590", "46e86cdb674440f61b6658ef3e84fea95ea51fb4", "ceb763d6657a07b47e48e8a2956bcfdf2cf10818", "daa02cf195818cbf651ef81941a233727f71591f", "081cb09791e7ff33c5d86fd39db00b2f29653fa8", "06402979cb55ec7c4488204aab5bc23d5f432f50", "2a0efb1c17fbe78470acf01e4601a75735a805cc", "24e42e6889314099549583c7e19b1cb4cc995226", "77816b9567d5fed1f6085f33e1ddbcc73af2010e", "a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4", "75859ac30f5444f0d9acfeff618444ae280d661d", "7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364", "8e21399bb102e993edd82b003c306a068a2474da", "574705812f7c0e776ad5006ae5e61d9b071eebdb", "705a24f4e1766a44bbba7cf335f74229ed443c7b", "c291f0e29871c8b9509d1a2876c3e305839ad4ac", "13db9466d2ddf3c30b0fd66db8bfe6289e880802", "2cc4ae2e864321cdab13c90144d4810464b24275", "bed8feb11e8077df158e16bce064853cf217ba62", "433d2d5528d1401a402f2c1db40b933c494f11ba", "c051ea35a0d490c00e2b3b0a42eb6b7682d8e947", "9dbd098975069d01efe7f5ddfb3dae6b6695be0d", "19e62a56b6772bbd37dfc6b8f948e260dbb474f5", "96d34c1a749e74af0050004162d9dc5132098a79", "7ffc5c58e5b61ac7c45d8e6ed076248051ebea34", "46196735a201185db3a6d8f6e473baf05ba7b68f", "11c2d40fc63ecd88febadd8a9cac9521a6b7de66", "131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1", "0ad90118b4c91637ee165f53d557da7141c3fde0", "08f69a82fae49a4a1f13d06cae32d77bb8e5be1a", "3ff418ac82df0b5c2f09f3571557e8a4b500a62c", "50d961508ec192197f78b898ff5d44dc004ef26d", "5cb1dd76c672b99d9103db3842721289bacf6e1b", "cde373b159361705580498d8712b9b7063c0d58c", "7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b", "b3add9bc9e70b6b28ba31e843e9155e7c37f3958", "a8fd23934e5039bb818b8d1c47ccb540ce2c253c", "0c8a0a81481ceb304bd7796e12f5d5fa869ee448", "14fdce01c958043140e3af0a7f274517b235adf3", "38198502b6579354931bfa35e88dba6df806721c", "28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d", "421955c6d2f7a5ffafaf154a329a525e21bbd6d3", "ca458f189c1167e42d3a5aaf81efc92a4c008976", "e315959d6e806c8fbfc91f072c322fb26ce0862b", "4b94f531c203743a9f7f1e9dd009cdbee22ea197", "ed9de242a23ad546902e1d5ec022dbb029cc2282", "076c97826df63f70d55ea11f0b7ae47a7ad81ad3", "f772af1dbed4ae31d75ff257e6ba42a70039b417", "f28d549feffd414f38147d5e0460883fb487e2d3", "79c3a7131c6c176b02b97d368cd0cd0bc713ff7e", "26f03693c50eb50a42c9117f107af488865f3dc1", "bc98027b331c090448492eb9e0b9721e812fac84", "e9d77a85bc2fa672cc1bd10258c896c8d89b41e8", "77c53ec6ea448db4dad586e002a395c4a47ecf66", "8d71872d5877c575a52f71ad445c7e5124a4b174", "33792bb27ef392973e951ca5a5a3be4a22a0d0c6", "53dd25350d3b3aaf19beb2104f1e389e3442df61", "d28d697b578867500632b35b1b19d3d76698f4a9", "3328674d71a18ed649e828963a0edb54348ee598", "b598f7761b153ecb26e9d08d3c5817aac5b34b52", "655d9ba828eeff47c600240e0327c3102b9aba7c", "679b72d23a9cfca8a7fe14f1d488363f2139265f", "fa54ab106c7f6dbd3c004cea4ef74ea580cf50bf", "6448d23f317babb8d5a327f92e199aaa45f0efdc", "ac75c662568cbb7308400cc002469a14ff25edfd", "c91da328fe50821182e1ae4e7bcbe2b62496f8b9", "5bff2ffe533eb53c2e0e13ce020cc76199c12c74", "4e5c1284c3ca475d1b5715b1e7f6ca4c9902d28d", "a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134", "d5dc78eae7a3cb5c953c89376e06531d39b34836", "63d8110ac76f57b3ba8a5947bc6bdbb86f25a342", "4e4fa167d772f34dfffc374e021ab3044566afc3", "c50d73557be96907f88b59cfbd1ab1b2fd696d41", "fb7bf10cbc583db5d5eee945aa633fcb968e01ad", "901b0a76fde57c262fabd3a35d3d5ec8366a8480", "eed05da2c0ab7d2b0a3c665a5368efa81b185099", "9825aa96f204c335ec23c2b872855ce0c98f9046", "81e2a458b894705cc21a9719f743bfa61f1e6436", "1c65f3b3c70e1ea89114f955624d7adab620a013", "6ad5ac867c5ca56e0edaece153269d989b383b59", "a180dc9766490416246e7fbafadca14a3c500a46", "dced05d28f353be971ea2c14517e85bc457405f3", "1e799047e294267087ec1e2c385fac67074ee5c8", "0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae", "a758b744a6d6962f1ddce6f0d04292a0b5cf8e07", "768c332650a44dee02f3d1d2be1debfa90a3946c", "876bae52a5edd6c9deb8bb8ad90dc5b74b640615", "15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb", "2ae2e29c3e9cc2d94a26da5730df7845de0d631b", "0ba5369c5e1e87ea172089d84a5610435c73de00", "cb1b5e8b35609e470ce519303915236b907b13b6", "23fc83c8cfff14a16df7ca497661264fc54ed746", "556b9aaf1bc15c928718bc46322d70c691111158", "d0b7d3f9a59034d44e7cd1b434cfd27136a7c029", "207798603e3089a1c807c93e5f36f7767055ec06", "ad8540379884ec03327076b562b63bc47e64a2c7", "80a5afeb6968c7e736adc48bd4d5ec5b45b13f71", "49358915ae259271238c7690694e6a887b16f7ed", "1e917fe7462445996837934a7e46eeec14ebc65f", "6d4236a7a693555f701c0d149d1db89325035e23", "b2c25af8a8e191c000f6a55d5f85cf60794c2709", "e0b71d3c7d551684bd334af5b3671df7053a529d", "25f7f03acf62b2cf3672bb506c8827d00b048608", "f3cf10c84c4665a0b28734f5233d423a65ef1f23", "29756b6b16d7b06ea211f21cdaeacad94533e8b4", "c590c6c171392e9f66aab1bce337470c43b48f39", "72167c9e4e03e78152f6df44c782571c3058050e", "721119b5f15ccccfd711571fb5a676d622d231bf", "d394bd9fbaad1f421df8a49347d4b3fca307db83", "fc00d634797c5378ca9a441c2d4ce88761d3c7eb", "1b589016fbabe607a1fb7ce0c265442be9caf3a9", "9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c", "93108f1548e8766621565bdb780455023349d2b2", "a941434fce5d3fddcd78e2b82d46ccab0411fca9", "bc9ae4b87888202bfa174ec4e8caee1a087ab994", "1a41e5d93f1ef5b23b95b7163f5f9aedbe661394", "1149c6ac37ae2310fe6be1feb6e7e18336552d95", "45dbf1b6fbc7fdae09e2a1928b18fbfff331a979", "3c4106f2c670362f620b33ad7715ab6fd3eb2458", "8a4893d825db22f398b81d6a82ad2560832cd890", "afdc303b3325fbc1baa9f18a66bcad59d5aa675b", "70d8bda4aafb0272ac4b93cd43e2448446b8e94d", "b013cce42dd769db754a57351d49b7410b8e82ad", "5e806d8fa48216041fe719309534e3fa903f7b5b", "3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd", "4026dc62475d2ff2876557fc2b0445be898cd380", "ca9adaf5702a7eb9b69be98128e0cae7d6252f8b", "4d8ce7669d0346f63b20393ffaa438493e7adfec", "6b1b43d58faed7b457b1d4e8c16f5f7e7d819239", "b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d", "aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5", "5f344a4ef7edfd87c5c4bc531833774c3ed23542", "7782627fa2e545276996ff9e9a1686ac496df081", "f4c32b8bcf753033835c14a66e9c04b06bf086a3", "4f9e00aaf2736b79e415f5e7c8dfebda3043a97d", "4f9958946ad9fc71c2299847e9ff16741401c591", "58fa85ed57e661df93ca4cdb27d210afe5d2cdcd", "3167f415a861f19747ab5e749e78000179d685bc", "8d2c43759e221f39ab1b4bf70d6891ffd19fb8da", "a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670", "126214ef0dcef2b456cb413905fa13160c73ec8e", "5d185d82832acd430981ffed3de055db34e3c653", "cc589c499dcf323fe4a143bbef0074c3e31f9b60", "3e04feb0b6392f94554f6d18e24fadba1a28b65f", "a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51", "0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff", "1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9", "a6d7cf29f333ea3d2aeac67cde39a73898e270b7", "64153df77fe137b7c6f820a58f0bdb4b3b1a879b", "ed04e161c953d345bcf5b910991d7566f7c486f7", "ddf55fc9cf57dabf4eccbf9daab52108df5b69aa", "ae9257f3be9f815db8d72819332372ac59c1316b", "db0379c9b02e514f10f778cccff0d6a6acf40519", "898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c", "28fe6e785b32afdcd2c366c9240a661091b850cf", "201802c83b4f161de764bb1480735e0b090b5c3b", "360d66e210f7011423364327b7eccdf758b5fdd2", "5c3eb40b06543f00b2345f3291619a870672c450", "8c6b9c9c26ead75ce549a57c4fd0a12b46142848", "aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5", "9da63f089b8ee23120bfa8b4d9d9c8f605f421fc", "4d356f347ab6647fb3e8ed8c2154dbd359e479ed", "88bef50410cea3c749c61ed68808fcff84840c37", "1286641b8896ae737e140cfd3da2d081d4cd548e", "449b1b91029e84dab14b80852e35387a9275870e", "a3f689fa5d71bdc7e19a959ac5d0f995e8e56493", "291ce7be8daa99848bf13c32b237ad823d5738e9", "492afe8f07de6225f70b72c922df83effd909334", "6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2", "8ef465ff12ee1d2be2a99d1c628117a4ce890a6b", "176bd61cc843d0ed6aa5af83c22e3feb13b89fe1", "db3545a983ffd24c97c18bf7f068783102548ad7", "0f940d2cdfefc78c92ec6e533a6098985f47a377", "143571c2fc9b1b69d3172f8a35b8fad50bc8202a", "fb5280b80edcf088f9dd1da769463d48e7b08390", "d59f18fcb07648381aa5232842eabba1db52383e", "68d70d49ae5476181f3ceb4bc1caf493127b08b1", "778bff335ae1b77fd7ec67404f71a1446624331b", "c28461e266fe0f03c0f9a9525a266aa3050229f0", "baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143", "5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8", "9547a7bce2b85ef159b2d7c1b73dea82827a449f", "d4353952a408e1eae8c27a45cc358976d38dde00", "ac03849956ac470c41585d2ee34d8bb58bb3c764", "3ea8a6dc79d79319f7ad90d663558c664cf298d4", "8f3e120b030e6c1d035cb7bd9c22f6cc75782025", "6889d649c6bbd9c0042fadec6c813f8e894ac6cc", "9ac82909d76b4c902e5dde5838130de6ce838c16", "0b85b50b6ff03a7886c702ceabad9ab8c8748fdc", "0748b29b046d0659765649f7831a319ec23967e2", "36ce0b68a01b4c96af6ad8c26e55e5a30446f360", "62415bbd69270e6577136ba7120f4a682251cdbb", "d0e895a272d684a91c1b1b1af29747f92919d823", "84b4eb66ad75a74f77299f1ecb6aa6305362e8cd", "642b5173644caa5c5189982a3d1e41163fa9d595", "9026eb610916ec4ce77f0d7d543b7c2482ba4173", "c660500b49f097e3af67bb14667de30d67db88e3", "5e53f530871b5167be0f224993be8a38e85796e8", "5ddfd3d372f7679518db8fd763d5f8bc5899ed67", "c32f04ccde4f11f8717189f056209eb091075254", "73b05a7faf1b9363ffff125db101dbe2b0b3964f", "04c5268d7a4e3819344825e72167332240a69717", "d4ccc4f18a824af08649657660e60b67c6868d9c", "a57b92ed2d8aa5b41fe513c3e98cbf83b7141741", "b91f54e1581fbbf60392364323d00a0cd43e493c", "e74a2159f0f7afb35c7318a6e035bc31b8e69634", "49820ae612b3c0590a8a78a725f4f378cb605cd1", "9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd", "ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18", "553a605243b77a76c1ed4c1ad4f9a43ff45e391b", "9b164cef4b4ad93e89f7c1aada81ae7af802f3a4", "abce06a96a7c3095bfc36eed8779d89263769b85", "c84de67ec2a5d687869d0c3ca8ac974aaa5ee765", "47e3029a3d4cf0a9b0e96252c3dc1f646e750b14", "102b968d836177f9c436141e382915a4f8549276", "a8affc2819f7a722a41bb913dea9149ee0e23a1f", "b1ed708d090dd155ffa9ac9699a876292f31aaff", "95b9df34bcf4ae04beea55c11cf0cc4095aa38dc", "994f7c469219ccce59c89badf93c0661aae34264", "93b7ee9842114bc15202ff97941892aa848c0716", "06959f9cf3226179fa1b05efade843b7844fb2bc", "907475a4febf3f1d4089a3e775ea018fbec895fe", "993934822a42e70dd35fb366693d847164ca15ff", "6aa43f673cc42ed2fa351cbc188408b724cb8d50", "d280bcbb387b1d548173917ae82cb6944e3ceca6", "38787338ba659f0bfbeba11ec5b7748ffdbb1c3d", "a38045ed82d6800cbc7a4feb498e694740568258", "2bf08d4cb8d1201a9866ee7c4852bfcbf8f8e7f1", "51410d6bd9a41eacb105f15dbdaee520e050d646", "6fda12c43b53c679629473806c2510d84358478f", "4b321065f6a45e55cb7f9d7b1055e8ac04713b41", "82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d", "25c19d8c85462b3b0926820ee5a92fc55b81c35a", "74eae724ef197f2822fb7f3029c63014625ce1ca", "6ec275755f8776b620d0a4550be0e65caf2bc87a", "45c340c8e79077a5340387cfff8ed7615efa20fd", "82bef8481207de9970c4dc8b1d0e17dced706352", "f6ebfa0cb3865c316f9072ded26725fd9881e73e", "490a217a4e9a30563f3a4442a7d04f0ea34442c8", "f68f20868a6c46c2150ca70f412dc4b53e6a03c2", "6f0caff7c6de636486ff4ae913953f2a6078a0ab", "e0939b4518a5ad649ba04194f74f3413c793f28e", "564035f1b8f06e9bb061255f40e3139fa57ea879", "48f0055295be7b175a06df5bc6fa5c6b69725785", "6a657995b02bc9dee130701138ea45183c18f4ae", "00e9011f58a561500a2910a4013e6334627dee60", "60821d447e5b8a96dd9294a0514911e1141ff620", "eef0be751e9aca7776d83f25c8ffdc1a18201fd8", "946b4d840b026d91608758d04f2763e9b981234e", "1966bddc083886a9b547e1817fe6abc352a00ec3", "05a116cb6e220f96837e4418de4aa8e39839c996", "eafda8a94e410f1ad53b3e193ec124e80d57d095", "629a973ca5f3c7d2f4a9befab97d0044dfd3167a", "1821510693f5bed360c81706c97330d2fa7d1290", "65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220", "4353d0dcaf450743e9eddd2aeedee4d01a1be78b", "f43eeb578e0ca48abfd43397bbd15825f94302e4", "81b2a541d6c42679e946a5281b4b9dc603bc171c", "8f92cccacf2c84f5d69db3597a7c2670d93be781", "6d67a7fd9a4fa99624721f37b077c71dad675805", "7003d903d5e88351d649b90d378f3fc5f211282b", "57101b29680208cfedf041d13198299e2d396314", "f925879459848a3eeb0035fe206c4645e3f20d42", "937ffb1c303e0595317873eda5ce85b1a17f9943", "d855791bc23b4aa8e751d6a4e2ae7f5566a991e8", "f2b13946d42a50fa36a2c6d20d28de2234aba3b4", "57bf9888f0dfcc41c5ed5d4b1c2787afab72145a", "7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b", "834b15762f97b4da11a2d851840123dbeee51d33", "e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec", "8c6c0783d90e4591a407a239bf6684960b72f34e", "16f940b4b5da79072d64a77692a876627092d39c", "88850b73449973a34fefe491f8836293fc208580", "03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20", "93115b81d1efc1f6d2788972bdb89908764890b6", "cf185d0d8fcad2c7f0a28b7906353d4eca5a098b", "be07f2950771d318a78d2b64de340394f7d6b717", "2d87f4bf0606ce9939033b8f1fbc64b539eb18a6", "d1082eff91e8009bf2ce933ac87649c686205195", "42441f1fee81c8fd42a74504df21b3226a648739", "3a9fbd05aaab081189a8eea6f23ed730fa6db03c", "ad624331dc5f8dc3a72b1d5baf69634b2f345656", "7dcd3f58aa75f7ae96fdac9b1c2332a4f0b2dbd3", "43f6953804964037ff91a4f45d5b5d2f8edfe4d5", "395bf182983e0917f33b9701e385290b64e22f9a", "9d1cebed7672210f9c411c5ba422a931980da833", "7588388b3f68c1a1a6b3b336d8387fee5c57c985", "91811203c2511e919b047ebc86edad87d985a4fa", "d89a754d7c59e025d2bfcdb872d2d061e2e371ba", "31146bd416626d2bf912e0a0d12ca619fb49011b", "6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca", "a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d", "0cd8895b4a8f16618686f622522726991ca2a324", "82a610a59c210ff77cfdde7fd10c98067bd142da", "8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152", "5d9bed6974fb81efeaeeff605b075e73b119a2b5", "39ecdbad173e45964ffe589b9ced9f1ebfe2d44e", "38215c283ce4bf2c8edd597ab21410f99dc9b094", "0b0e679e6d3abe3adc8525d4fee49b388ccfdf9a", "2f67d5448b5372f639633d8d29aac9c0295b4d72", "b689d344502419f656d482bd186a5ee6b0140891", "ab8fb278db4405f7db08fa59404d9dd22d38bc83", "dc7df544d7c186723d754e2e7b7217d38a12fcf7", "fc7f140fcedfe54dd63769268a36ff3f175662b5", "c5c379a807e02cab2e57de45699ababe8d13fb6d", "17fad2cc826d2223e882c9fda0715fcd5475acf3", "b803cdb3377fa3b6194932607f51f2d1fafbf964", "030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f", "2d164f88a579ba53e06b601d39959aaaae9016b7", "39150acac6ce7fba56d54248f9c0badbfaeef0ea", "0be43cf4299ce2067a0435798ef4ca2fbd255901", "5a4881bfcb4ae49229f39320197c2d01b2fbf1f5", "ba17782ca5fc0d932317389c2adf94b5dbd3ebfe", "4ca1fcfd7650eeb0ac8d51cff31b70717cdddfdd", "1addc5c1fa80086d1ed58f71a9315ad13bd87ca2", "738a985fba44f9f5acd516e07d0d9578f2ffaa4e", "2238dddb76499b19035641d97711cf30d899dadb", "2e7e1ee7e3ee1445939480efd615e8828b9838f8", "0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc", "367f2668b215e32aff9d5122ce1f1207c20336c8", "a6ffe238eaf8632b4a8a6f718c8917e7f3261546", "d2a415365f997c8fe2dbdd4e06ceab2e654172f6", "08e24f9df3d55364290d626b23f3d42b4772efb6", "32adde2e33f4344900829c557c8533f8f0979f10", "90dd2a53236b058c79763459b9d8a7ba5e58c4f1", "2c2786ea6386f2d611fc9dbf209362699b104f83", "c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3", "454283ee7ea757dd25780807e4017cf43b4fc593", "776835eb176ed4655d6e6c308ab203126194c41e", "7212e033b37efa9c96ee51cb810c303249ab21e4", "9ef06cc958af2274afd193a1dca705c08234bcd3", "2c7185bcf31a4950b014b67ca7c63735ee00d56f", "d10cfcf206b0991e3bc20ac28df1f61c63516f30", "0daf696253a1b42d2c9d23f1008b32c65a9e4c1e", "310fe4e6cb6d090f7817de4c1034e35567b56e34", "0dbacb4fd069462841ebb26e1454b4d147cd8e98", "3fe1cfd2dc69a23c0b0cdf9456c057e6ea1ee1b9", "bd8d579715d58405dfd5a77f32920aafe018fce4", "6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9", "8e272978dd1500ce6e4c2ef5e91d4332078ff757", "0be2245b2b016de1dcce75ffb3371a5e4b1e731b", "3674f3597bbca3ce05e4423611d871d09882043b", "b63b6ed78b39166d87d4c56f8890873aa65976a2", "c91103e6612fa7e664ccbc3ed1b0b5deac865b02", "3e2b9ffeb708b4362ebfad95fa7bb0101db1579d", "bb750b4c485bc90a47d4b2f723be4e4b74229f7a", "24b37016fee57057cf403fe2fc3dda78476a8262", "85639cefb8f8deab7017ce92717674d6178d43cc", "a70e36daf934092f40a338d61e0fe27be633f577", "57893403f543db75d1f4e7355283bdca11f3ab1b", "b6685941588febbf66f9bf6a074cd548bc8a567f", "c83d142a47babe84e8c4addafa9e2bb9e9b757a5", "1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc", "c5be0feacec2860982fbbb4404cf98c654142489", "19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54", "ab6776f500ed1ab23b7789599f3a6153cdac84f7", "11ff2f54ecfda6c7f90ed84baf1cc5b4f07e726b", "4d21a2866cfd1f0fb2a223aab9eecfdec963059a", "51cc78bc719d7ff2956b645e2fb61bab59843d2b", "66aad5b42b7dda077a492e5b2c7837a2a808c2fa", "044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa", "c8fb8872203ee694d95da47a1f9929ac27186d87", "e14b046a564604508ea8e3369e7e9f612e148511", "7eb8476024413269bfb2abd54e88d3e131d0aa0e", "bffbd04ee5c837cd919b946fecf01897b2d2d432", "01c8d7a3460422412fba04e7ee14c4f6cdff9ad7", "6bb0425baac448297fbd29a00e9c9b9926ce8870", "95aef5184b89daebd0c820c8102f331ea7cae1ad", "f8ddb2cac276812c25021b5b79bf720e97063b1e", "2654ef92491cebeef0997fd4b599ac903e48d07a", "72450d7e5cbe79b05839c30a4f0284af5aa80053", "a9f0e940cfba3663dc8304dd5dc77509f024a3cc", "5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6", "2eb9f1dbea71bdc57821dedbb587ff04f3a25f07", "39f7878f447df7703f2c4ddeeffd7eb0e21f6cd4", "cf86616b5a35d5ee777585196736dfafbb9853b5", "1d6d6399fd98472012edb211981d5eb8370a07b0", "aae0e417bbfba701a1183d3d92cc7ad550ee59c3", "b871d1b8495025ff8a6255514ed39f7765415935", "5ea9cba00f74d2e113a10c484ebe4b5780493964", "4bd088ba3f42aa1e43ae33b1988264465a643a1f", "7002d6fc3e0453320da5c863a70dbb598415e7aa", "59690814e916d1c0e7aa9190678ba847cbd0046f", "9ac2960f646a46b701963230e6949abd9ac0a9b3", "32925200665a1bbb4fc8131cd192cb34c2d7d9e3", "0744af11a025e9c072ef6ad102af208e79cc6f44", "24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd", "480ab25eba799b59e0a1a51021c5126c88a58a0c", "40dab43abef32deaf875c2652133ea1e2c089223", "d778c46657a974e6e87df82b7ee2ced8e5c6f151", "0ba0f000baf877bc00a9e144b88fa6d373db2708", "a4e75766ef93b43608c463c233b8646439ce2415", "3bf579baf0903ee4d4180a29739bf05cbe8f4a74", "25f1f195c0efd84c221b62d1256a8625cb4b450c", "9277f1c5161bb41d4ed808c83d53509c8a1a2bdd", "6eb1e006b7758b636a569ca9e15aafd038d2c1b1", "41b997f6cec7a6a773cd09f174cb6d2f036b36cd", "f8f872044be2918de442ba26a30336d80d200c42", "cbe859d151466315a050a6925d54a8d3dbad591f", "c0ee89dc2dad76147780f96294de9e421348c1f4", "2c7c3a74da960cc76c00965bd3e343958464da45", "7636f94ddce79f3dea375c56fbdaaa0f4d9854aa", "98d1b5515b079492c8e7f0f9688df7d42d96da8e", "dcb50e1f439d1f9b14ae85866f4542e51b830a07", "bb22104d2128e323051fb58a6fe1b3d24a9e9a46", "cefd9936e91885ba7af9364d50470f6cb54315a4", "70516aede32cf0dbc539abd9416c44faafc868bd", "512b4c8f0f3fb23445c0c2dab768bcd848fa8392", "9b43897c551b134852bda113355f340e605ad4e7", "d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa", "dbfe62c02b544b48354fac741d90eb4edf815db5", "a484243027b19b57b5063ad2e4b414e1d383d3e8", "133da0d8c7719a219537f4a11c915bf74c320da7", "8fba84af61ac9b5e2bcb69b6730a597d7521ad73", "ee897a827bfc03e4682fb77018c27ec29a063d2c", "045275adac94cced8a898a815293700401e9955f", "531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab", "57f7d8c6ec690bd436e70d7761bc5f46e993be4c", "919d0e681c4ef687bf0b89fe7c0615221e9a1d30", "c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f", "5e821cb036010bef259046a96fe26e681f20266e", "4e490cf3cf26fe46507bb55a548c403b9c685ba0", "6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1", "d142e74c6a7457e77237cf2a3ded4e20f8894e1a", "f2abeb1a8dd32afb9a78856db38e115046afeb34", "96578785836d7416bf2e9c154f687eed8f93b1e4", "8e29884d4a0a1a53412e115e43f1b1cefe3bbc34", "8c9c8111e18f8798a612e7386e88536dfe26455e", "ad75330953d9aacc05b5ca1a50c4fed3e7ca1e21", "2e1b1969ded4d63b69a5ec854350c0f74dc4de36", "734cdda4a4de2a635404e4c6b61f1b2edb3f501d", "a660390654498dff2470667b64ea656668c98ecc", "949699d0b865ef35b36f11564f9a4396f5c9cddb", "35490b021dcdec12882870a31dce9a687205ab5c", "c61a8940d66eed9850b35dd3768f18b59471ca34", "d1dd80d77655876fb45b9420fe72444c303b219e", "e0e4910d575c4a8309f2069b38b99c972dbedc57", "389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26", "326613b5528b7806d6a06f43211800b54f34965e", "2cdd9e445e7259117b995516025fcfc02fa7eebb", "e9331ae2a887c02e0a908ebae2810a681aedee29", "467747f86df4537d6deff03dee8e552f760d7c16", "f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7", "0f53ab8b6c428127753281dd77cf94bdb889b624", "247a6b0e97b9447850780fe8dbc4f94252251133", "87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5", "235a347cb96ef22bf35b4cf37e2b4ee5cde9df77", "486f5e85944404a1b57333443070b0b8c588c262", "d85813b58e10a35703df3a8acf41aafe4b6e1dd2", "d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f", "c78fdd080df01fff400a32fb4cc932621926021f", "50c0de2cccf7084a81debad5fdb34a9139496da0", "d1775eb9d8898a9f66c28bb92b648c3174caec18", "265af79627a3d7ccf64e9fe51c10e5268fee2aae", "81d81a2060366f29fd100f793c11acf000bd2a7f", "071099a4c3eed464388c8d1bff7b0538c7322422", "5f2c210644c1e567435d78522258e0ae036deedb", "5da3bb198b087c15509f933215b141de9e8f43ed", "15cf1f17aeba62cd834116b770f173b0aa614bf4", "e8c6c3fc9b52dffb15fe115702c6f159d955d308", "7754b708d6258fb8279aa5667ce805e9f925dfd0", "52d4952426f40394af1db43f429e0b2a2e326197", "e52be9a083e621d9ed29c8e9914451a6a327ff59", "e860db656f39d738050b5f3e0bf72724e6a4ad5c", "ea79a2ad4ac307cb8c586b52bf06d7bf783003a8", "95b5296f7ec70455b0cf1748cddeaa099284bfed", "62694828c716af44c300f9ec0c3236e98770d7cf", "8576d0031f2b0fe1a0f93dd454e73d48d98a4c63", "855882a5943fc12fa9c0e8439c482e055b4b46f3", "51d6a8a61ea9588a795b20353c97efccec73f5db", "ee6f9a0f6eb5b615a36acc1444f4df1359cc2a63", "8fcf7dfa30fa0c4194aef41c508a95d59be38f23", "097104fc731a15fad07479f4f2c4be2e071054a2", "741485741734a99e933dd0302f457158c6842adf", "ac8e09128e1e48a2eae5fa90f252ada689f6eae7", "90cb074a19c5e7d92a1c0d328a1ade1295f4f311", "e6d689054e87ad3b8fbbb70714d48712ad84dc1c", "855bfc17e90ec1b240efba9100fb760c068a8efa", "021469757d626a39639e260492eea7d3e8563820", "4f4f920eb43399d8d05b42808e45b56bdd36a929", "1c3073b57000f9b6dbf1c5681c52d17c55d60fd7", "fcf393a90190e376b617cc02e4a473106684d066", "95023e3505263fac60b1759975f33090275768f3", "ac98e7c570eb4a9db23f85164010f94afba1251e", "6a8a3c604591e7dd4346611c14dbef0c8ce9ba54", "ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7", "399a2c23bd2592ebe20aa35a8ea37d07c14199da", "5550a6df1b118a80c00a2459bae216a7e8e3966c", "77223849321d57a03e0571a08e71eba06e38834a", "11a6593e6e35f95ebeb5233897d1d8bcad6f9c87", "33548531f9ed2ce6f87b3a1caad122c97f1fd2e9", "748e72af01ba4ee742df65e9c030cacec88ce506", "eee8a37a12506ff5df72c402ccc3d59216321346", "47541d04ec24662c0be438531527323d983e958e", "defa8774d3c6ad46d4db4959d8510b44751361d8", "1513949773e3a47e11ab87d9a429864716aba42d", "d56fe69cbfd08525f20679ffc50707b738b88031", "ad1679295a5e5ebe7ad05ea1502bce961ec68057", "3af1a375c7c1decbcf5c3a29774e165cafce390c", "d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa", "af62621816fbbe7582a7d237ebae1a4d68fcf97d", "8cedb92694845854f3ad0daf6c9adb6b81c293de", "d647099e571f9af3a1762f895fd8c99760a3916e", "f913bb65b62b0a6391ffa8f59b1d5527b7eba948", "d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e", "f6532bf13a4649b7599eb40f826aa5281e392c61", "473cbc5ec2609175041e1410bc6602b187d03b23", "74618fb4ce8ce0209db85cc6069fe64b1f268ff4", "4a03f07397c5d32463750facf010c532f45233a5", "292e1c88d43a77dbe5c610f4f611cfdb6d3212b6", "f0a9d69028edd1a39147848ad1116ca308d7491e", "dba493caf6647214c8c58967a8251641c2bda4c2", "7fb6bc6c920ca574677f0d3a40c5c377a095885b", "270733d986a1eb72efda847b4b55bc6ba9686df4", "5d2e5833ca713f95adcf4267148ac2ccf2318539", "134db6ca13f808a848321d3998e4fe4cdc52fbc2", "8b2c090d9007e147b8c660f9282f357336358061", "0b3144cdc9d6d5a1498d6178db20d1c49fb64de9", "df90850f1c153bfab691b985bfe536a5544e438b", "a79704c1ce7bf10c8753a8f51437ccbc61947d03", "f83dd9ff002a40228bbe3427419b272ab9d5c9e4", "842d82081f4b27ca2d4bc05c6c7e389378f0c7b8", "ae2c71080b0e17dee4e5a019d87585f2987f0508", "53c8cbc4a3a3752a74f79b74370ed8aeed97db85", "8127b7654d6e5c46caaf2404270b74c6b0967e19", "6f0d3610c4ee7b67e9d435d48bc98167761251e8", "6f5151c7446552fd6a611bf6263f14e729805ec7", "5bfad0355cdb62b22970777d140ea388a7057d4c", "14d7bce17265738f10f48987bb7bffb3eafc676e", "b42a97fb47bcd6bfa72e130c08960a77ee96f9ab", "07d986b1005593eda1aeb3b1d24078db864f8f6a", "6688b2b1c1162bc00047075005ec5c7fca7219fd", "b44ca5bb74b27d196f281b6741c645f425ff65c1", "82e1692467969940a6d6ac40eae606b8b4981f7e", "681399aa0ea4cbffd9ab22bf17661d6df4047349", "0c5afb209b647456e99ce42a6d9d177764f9a0dd", "350da18d8f7455b0e2920bc4ac228764f8fac292", "42afe6d016e52c99e2c0d876052ade9c192d91e7", "bccb35704cdd3f2765b1a3f0296d1bff3be019c1", "9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e", "3dabf7d853769cfc4986aec443cc8b6699136ed0", "1a8d40bcfb087591cc221086440d9891749d47b8", "03af8cf40283ff30f1da3637b024319d0c79bdf0", "400e6c777d5894db2f6538c8ebd1124352b1c064", "1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16", "90c4deaa538da42b9b044d7b68c3692cced66036", "0515e43c92e4e52254a14660718a9e498bd61cf5", "4d2975445007405f8cdcd74b7fd1dd547066f9b8", "2e832d5657bf9e5678fd45b118fc74db07dac9da", "45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8", "4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b", "70c25293e33f5c37143ae20e3b0198a68083a5ed", "4ae59d2a28abd76e6d9fb53c9e7ece833dce7733", "d074b33afd95074d90360095b6ecd8bc4e5bb6a2", "75879ab7a77318bbe506cb9df309d99205862f6c", "03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b", "eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a", "9436170c648c40b6f4cc3751fca3674aa82ffe9a", "b42b535fcd0d9bd41a6594a910ea4623e907ceb9", "60040e4eae81ab6974ce12f1c789e0c05be00303", "6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75", "ffec78f270dba4bdaf6bca7aedc16798bb9347ef", "e8f0f9b74db6794830baa2cab48d99d8724e8cb6", "8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf", "208a2c50edb5271a050fa9f29d3870f891daa4dc", "a7c39a4e9977a85673892b714fc9441c959bf078", "44d2ab6b7166274cc13b52d8f73a36839ca0d4a8", "2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3", "6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3", "ce30ddb5ceaddc0e7d308880a45c135287573d0e", "d5afd7b76f1391321a1340a19ba63eec9e0f9833", "c586463b8dbedce2bfce3ee90517085a9d9e2e13", "f7be8956639e66e534ed6195d929aed4e0b90cad", "16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb", "2b1129efcbafa61da1d660de3b5c84b646540311", "5b01c4eef1e83f98751bb3ef1e4fca34abb8f530", "910524c0d0fe062bf806bb545627bf2c9a236a03", "75d2ecbbcc934563dff6b39821605dc6f2d5ffcc", "205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa", "7f8d2d7eaa03132caefe0f3b126b5b369a712c9d", "aaeb8b634bb96a372b972f63ec1dc4db62e7b62a", "f0681fc08f4d7198dcde803d69ca62f09f3db6c5", "05c974b9fde42f87e28458fb7febf7a05f2dfd18", "db3984b143c59584a32d762d712d21c0e8cf38b8", "d567f2bbc6ce6d6acf0114e6514f31eff4da68f6", "d785fcf71cb22f9c33473cba35f075c1f0f06ffc", "36fe39ed69a5c7ff9650fd5f4fe950b5880760b0", "294d1fa4e1315e1cf7cc50be2370d24cc6363a41", "a6771936ffeba6e7fffad1d2c60e42519c615e24", "eb86c6642040944abc997848a32e631d1f25a2f5", "40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60", "6515fe829d0b31a5e1f4dc2970a78684237f6edb", "0b87d91fbda61cdea79a4b4dcdcb6d579f063884", "38cc2f1c13420170c7adac30f9dfac69b297fb76", "2dced31a14401d465cd115902bf8f508d79de076", "fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59", "f5603ceaebe3caf6a812edef9c4b38def78cbf34", "ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76", "d7b4d741b1dd4fb3f278efa5fdf2a5d8523caa0e", "4c8581246ed4d90c942a23ed7c0e007221fa684d", "2549ac0d3f40c1f6d72f641c2f05a17aef4bf42a", "ff7bc7a6d493e01ec8fa2b889bcaf6349101676e", "c30e4e4994b76605dcb2071954eaaea471307d80", "b261439b5cde39ec52d932a222450df085eb5a91", "e73b1137099368dd7909d203b80c3d5164885e44", "719a5286611c2a89890f713af54f4a00d10967e6", "ab1900b5d7cf3317d17193e9327d57b97e24d2fc", "1962e4c9f60864b96c49d85eb897141486e9f6d1", "580e48d3e7fe1ae0ceed2137976139852b1755df", "5df376748fe5ccd87a724ef31d4fdb579dab693f", "2f184c6e2c31d23ef083c881de36b9b9b6997ce9", "8b3c867e67b263d7a0577a112173a64009a3b4ba", "eff87ecafed67cc6fc4f661cb077fed5440994bb", "c259db2675f3bfb157f37e6c93b03d1d14dab4c7", "46c1af268d4b3c61a0a12be091ca008a3a60e4cd", "39dc2ce4cce737e78010642048b6ed1b71e8ac2f", "cf875336d5a196ce0981e2e2ae9602580f3f6243", "151481703aa8352dc78e2577f0601782b8c41b34", "606dff86a34c67c79d93f1e536487847a5bb7002", "cfa92e17809e8d20ebc73b4e531a1b106d02b38c", "28a900a07c7cbce6b6297e4030be3229e094a950", "9c4521dd25628b517dac3656410242b83b91e1e0", "335435a94f8fa9c128b9f278d929c9d0e45e2510", "bcc346f4a287d96d124e1163e4447bfc47073cd8", "2b4d092d70efc13790d0c737c916b89952d4d8c7", "86b985b285c0982046650e8d9cf09565a939e4f9", "0b9d3a0c61ee498f8ed54aaa22d3c4e72aa56f40", "0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d", "5d9971c6a9d5c56463ea186850b16f8969a58e67", "53e34ff4639806b7599c846f219c02b025da9d13", "4b3dd18882ff2738aa867b60febd2b35ab34dffc", "985bbe1d47b843fa0b974b4db91be23f218d1ce7", "5d485501f9c2030ab33f97972aa7585d3a0d59a7", "d41c11ebcb06c82b7055e2964914b9af417abfb2", "58ca110261680a70480eb0fd5d6f609c6689323f", "1b3b01513f99d13973e631c87ffa43904cd8a821", "66ebb070ea8de63afa11cc856fe2754ea39a93ff", "b961e512242ddad7712855ab00b4d37723376e5d", "dd031dbf634103ff3c58ce87aa74ec6921b2e21d", "539bbf8e4916481bd089d5641175085edf4cf049", "49e975a4c60d99bcc42c921d73f8d89ec7130916", "0f395a49ff6cbc7e796656040dbf446a40e300aa", "76dc11b2f141314343d1601635f721fdeef86fdb", "f3e005e567f16fa55c54b4c1b17f4538d799c7de", "b340f275518aa5dd2c3663eed951045a5b8b0ab1", "7f36dd9ead29649ed389306790faf3b390dc0aa2", "6a6406906470be10f6d6d94a32741ba370a1db68", "b1665e1ddf9253dcaebecb48ac09a7ab4095a83e", "265e76285e18587065a1e28246971f003c5267f3", "5f27ed82c52339124aa368507d66b71d96862cb7", "374c7a2898180723f3f3980cbcb31c8e8eb5d7af", "5db4fe0ce9e9227042144758cf6c4c2de2042435", "e8c9dcbf56714db53063b9c367e3e44300141ff6", "78216cd51e6e1cc014b83e27e7e78631ad44b899", "1071dde48a77f81c35ad5f0ca90a9daedb54e893", "e57e1dce81e888eb07054923602e35bfb5ef3eb8", "016f49a54b79ec787e701cc8c7d0280273f9b1ef", "98218fa05a171a641435c154afa17bc99cf3375e", "5ea9063b44b56d9c1942b8484572790dff82731e", "ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906", "dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8", "8812aef6bdac056b00525f0642702ecf8d57790b", "f1af714b92372c8e606485a3982eab2f16772ad8", "e8410c4cd1689829c15bd1f34995eb3bd4321069", "34d484b47af705e303fc6987413dc0180f5f04a9", "679b7fa9e74b2aa7892eaea580def6ed4332a228", "7ae8acf20f9415f99bfb95aa000d698b8499f1ee", "3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f", "35c973dba6e1225196566200cfafa150dd231fa8", "c60601bdb5465d8270fdf444e5d8aeccab744e29", "d83db03f8eae6dba91ce044c640c6b35ccf541f3", "6856a11b98ffffeff6e2f991d3d1a1232c029ea1", "3c78b642289d6a15b0fb8a7010a1fb829beceee2", "9f4f890f74ac91bdc4323e061502331945474b90", "b14e3fe0d320c0d7c09154840250d70bc88bb6c0", "416364cfdbc131d6544582e552daf25f585c557d", "287900f41dd880802aa57f602e4094a8a9e5ae56", "48cf1105eca8049e8625c5b30a69620b2381589c", "af3e6e20de06b03c33f8e85eced74c2d096730ea", "70d0bffa288e317bc62376f4f577c5bd7712e521", "a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be", "4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6", "dec5b11b01f35f72adb41d2be26b9b95870c5c00", "f9ccfe000092121a2016639732cdb368378256d5", "8f5ce25e6e1047e1bf5b782d045e1dac29ca747e", "2912c3ea67678a1052d7d5cbe734a6ad90fc360e", "31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362", "89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199", "016194dbcd538ab5a129ef1bcff3c6e073db63f9", "6a806978ca5cd593d0ccd8b3711b6ef2a163d810", "eacf974e235add458efb815ada1e5b82a05878fa", "1e2770ce52d581d9a39642b40bfa827e3abf7ea2", "3a60678ad2b862fa7c27b11f04c93c010cc6c430", "1dff919e51c262c22630955972968f38ba385d8a", "fe464b2b54154d231671750053861f5fd14454f5", "acc548285f362e6b08c2b876b628efceceeb813e", "744db9bd550bf5e109d44c2edabffec28c867b91", "e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9", "23860d947cf221b6ddb6d6cf3a7ac4b08c7cb8d3", "a57b37549edba625f5955759e259e52eb0af8773", "bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11", "25e2d3122d4926edaab56a576925ae7a88d68a77", "4fee2f524ef12741d2b0fa96f45a5ef9d20ada83", "cacd51221c592012bf2d9e4894178c1c1fa307ca", "5850aab97e1709b45ac26bb7d205e2accc798a87", "08f4832507259ded9700de81f5fd462caf0d5be8", "c87d5036d3a374c66ec4f5870df47df7176ce8b9", "29a9e9b5926e65512c25c845cceba42fc1be2958", "45877ff4694576f59c2a9ca45aa65f935378492a", "59efb1ac77c59abc8613830787d767100387c680", "fe9c460d5ca625402aa4d6dd308d15a40e1010fa", "72a00953f3f60a792de019a948174bf680cd6c9f", "28de411a5b3eb8411e7bcb0003c426aa91f33e97", "9f428db0d3cf26b9b929dd333a0445bcc7514cdf", "aa8341cb5d8f0b95f619d9949131ed5c896d6470", "76d939f73a327bf1087d91daa6a7824681d76ea1", "19fb5e5207b4a964e5ab50d421e2549ce472baa8", "75fcbb01bc7e53e9de89cb1857a527f97ea532ce", "a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a", "c980443ca996402de4b5e5424f872acda0368831", "52c71d20dced998a607c466241dfc2eb88183de8", "193ec7bb21321fcf43bbe42233aed06dbdecbc5c", "a285b6edd47f9b8966935878ad4539d270b406d1", "e9b731f00d16a10a31ceea446b2baa38719a31f1", "49ed46d45d7a9cbb1077d6f7cf151a63c2f02cab", "eb9312458f84a366e98bd0a2265747aaed40b1a6", "23e75f5ce7e73714b63f036d6247fa0172d97cb6", "dce3dff9216d63c4a77a2fcb0ec1adf6d2489394", "7f9be0e08784835de0f8bc3a82fcca02b3721dc1", "78f244dc2a171944836a89874b8f60e9fe80865d", "c72e6992f44ce75a40f44be4365dc4f264735cfb", "33bbf01413910bca26ed287112d32fe88c1cc0df", "33403e9b4bbd913ae9adafc6751b52debbd45b0e", "920a92900fbff22fdaaef4b128ca3ca8e8d54c3e", "6cb8c52bb421ce04898fa42cb997c04097ddd328", "d95e6185f82e3ef3880a98122522eca8c8c3f34e", "7d9fe410f24142d2057695ee1d6015fb1d347d4a", "0729628db4bb99f1f70dd6cb2353d7b76a9fce47", "1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61", "2f882ceaaf110046e63123b495212d7d4e99f33d", "0339459a5b5439d38acd9c40a0c5fea178ba52fb", "2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c", "a6ab23f67d85da26592055c0eac4c34f05c26519", "0c377fcbc3bbd35386b6ed4768beda7b5111eec6", "d6fb606e538763282e3942a5fb45c696ba38aee6", "08fbbfe87563595508a77629e47613d6bd1119eb", "8eb9aa6349db3dd1b724266fcd5fc39a83da022a", "36b23007420b98f368d092bab196a8f3cbcf6f93", "b6530ea4c42f0133468d1ff0a44738b505152a8e", "ce2945e369603fcec1fcdc6e19aac5996325cba9", "f9c86f8b0d312ceec871c8a3b6bc79bbe76c1069", "e5fbaeddbf98c667ec7c5575bda2158a36b55409", "a7c066e636b8953481b4a8d8ff25a43a96dd348f", "c068263bb09968fe69c053906279b16532b778f4", "2c811b647a6aac924920c06e607e9e8d4b8d872d", "ebfdb4842c69177b65022f00d3d038d645f3260b", "43010792bf5cdb536a95fba16b8841c534ded316", "bcb79e3ac69508060c8cba105f6a8622eb929ab1", "c7f752eea91bf5495a4f6e6a67f14800ec246d08", "451c42da244edcb1088e3c09d0f14c064ed9077e", "22143664860c6356d3de3556ddebe3652f9c912a", "711bb5f63139ee7a9b9aef21533f959671a7d80e", "8ff1f263d91f192269f6f3b324bdb1d30761ae41", "4db99a2268a120c7af636387241188064ea42338", "a2eb90e334575d9b435c01de4f4bf42d2464effc", "591a737c158be7b131121d87d9d81b471c400dba", "2744e6d526b8f2c1b297ac2d2458aaa08b0cda11", "8699268ee81a7472a0807c1d3b1db0d0ab05f40d", "db5a00984fa54b9d2a1caad0067a9ff0d0489517", "6c40fc9df6588f7cb721537883167eede1b8d369", "c53352a4239568cc915ad968aff51c49924a3072", "ae5e92abd5929ee7f0a5aa1622aa094bac4fae29", "9e297343da13cf9ba0ad8b5b75c07723136f4885", "edff76149ec44f6849d73f019ef9bded534a38c2", "1f94734847c15fa1da68d4222973950d6b683c9e", "1d7df3df839a6aa8f5392310d46b2a89080a3c25", "07377c375ac76a34331c660fe87ebd7f9b3d74c4", "52472ec859131844f38fc7d57944778f01d109ac", "7b455cbb320684f78cd8f2443f14ecf5f50426db", "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "9ca542d744149f0efc8b8aac8289f5e38e6d200c", "011e6146995d5d63c852bd776f782cc6f6e11b7b", "91d513af1f667f64c9afc55ea1f45b0be7ba08d4", "96e731e82b817c95d4ce48b9e6b08d2394937cf8", "5495e224ac7b45b9edc5cfeabbb754d8a40a879b", "84fe5b4ac805af63206012d29523a1e033bc827e", "92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d", "de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6", "2d748f8ee023a5b1fbd50294d176981ded4ad4ee", "4317856a1458baa427dc00e8ea505d2fc5f118ab", "efd308393b573e5410455960fe551160e1525f49", "eb3066de677f9f6131aab542d9d426aaf50ed2ce", "fb1627ed224bf7b1e3d80c097316ed7703951df2", "574b62c845809fd54cc168492424c5fac145bc83", "585260468d023ffc95f0e539c3fa87254c28510b", "84c5b45328dee855c4855a104ac9c0558cc8a328", "b50edfea790f86373407a964b4255bf8e436d377", "0c0db39cac8cb76b52cfdbe10bde1c53d68d202f", "7e2cfbfd43045fbd6aabd9a45090a5716fc4e179", "fb85867c989b9ee6b7899134136f81d6372526a9", "946017d5f11aa582854ac4c0e0f1b18b06127ef1", "76669f166ddd3fb830dbaacb3daa875cfedc24d9", "3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9", "15f70a0ad8903017250927595ae2096d8b263090", "050a149051a5d268fcc5539e8b654c2240070c82", "93d11da02205bbc5ae68e521e421f70a4b74a7f7", "5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f", "57ba4b6de23a6fc9d45ff052ed2563e5de00b968", "0019925779bff96448f0c75492717e4473f88377", "cb9921d5fc4ffa50be537332e111f03d74622442", "e7144f5c19848e037bb96e225d1cfd961f82bd9f", "8334da483f1986aea87b62028672836cb3dc6205", "d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0", "1ef4aac0ebc34e76123f848c256840d89ff728d0", "b8084d5e193633462e56f897f3d81b2832b72dff", "b2470969e4fba92f7909eac26b77d08cc5575533", "c678920facffd35853c9d185904f4aebcd2d8b49", "26ebe98753acec806b7281d085110c06d9cd1e16", "cdef0eaff4a3c168290d238999fc066ebc3a93e8", "8bfada57140aa1aa22a575e960c2a71140083293", "ec39e9c21d6e2576f21936b1ecc1574dadaf291e", "48121f5937accc8050b0c9bf2be6d1c58b07a8a0", "fa641327dc5873276f0af453a2caa1634c16f143", "0b642f6d48a51df64502462372a38c50df2051b1", "c758b9c82b603904ba8806e6193c5fefa57e9613", "b5ca8d4f259f35c1f3edfd9f108ce29881e478b0", "61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa", "ddf099f0e0631da4a6396a17829160301796151c", "8ec82da82416bb8da8cdf2140c740e1574eaf84f", "1e62ca5845a6f0492574a5da049e9b43dbeadb1b", "01e14d8ffd6767336d50c2b817a7b7744903e567", "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "7df268a3f4da7d747b792882dfb0cbdb7cc431bc", "78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c", "ea03a569272d329090fe60d6bff8d119e18057d7", "2594a77a3f0dd5073f79ba620e2f287804cec630", "7fcd03407c084023606c901e8933746b80d2ad57", "cbb27980eb04f68d9f10067d3d3c114efa9d0054", "55fdff2881d43050a8c51c7fdc094dbfbbe6fa46", "90498b95fe8b299ce65d5cafaef942aa58bd68b7", "92be73dffd3320fe7734258961fe5a5f2a43390e", "8bf243817112ac0aa1348b40a065bb0b735cdb9c", "ac855f0de9086e9e170072cb37400637f0c9b735", "64b9ad39d115f3e375bde4f70fb8fdef5d681df8", "6412d8bbcc01f595a2982d6141e4b93e7e982d0f", "ef2bb8bd93fa8b44414565b32735334fa6823b56", "f20e0eefd007bc310d2a753ba526d33a8aba812c", "6fbb179a4ad39790f4558dd32316b9f2818cd106", "55266ddbe9d5366e8cd1b0b645971cad6d12157a", "61f93ed515b3bfac822deed348d9e21d5dffe373", "054738ce39920975b8dcc97e01b3b6cc0d0bdf32", "26973cf1552250f402c82e9a4445f03fe6757b58", "80097a879fceff2a9a955bf7613b0d3bfa68dc23", "1275852f2e78ed9afd189e8b845fdb5393413614", "8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889", "14d72dc9f78d65534c68c3ed57305f14bd4b5753", "0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f", "82eff71af91df2ca18aebb7f1153a7aed16ae7cc", "0ee737085af468f264f57f052ea9b9b1f58d7222", "6eaf446dec00536858548fe7cc66025b70ce20eb", "19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b", "566563a02dbaebec07429046122426acd7039166", "177d03c5851f7082cb023a20fa8a2cd1dfb59467", "72a55554b816b66a865a1ec1b4a5b17b5d3ba784", "a3201e955d6607d383332f3a12a7befa08c5a18c", "35f03f5cbcc21a9c36c84e858eeb15c5d6722309", "cae41c3d5508f57421faf672ee1bea0da4be66e0", "15ef65fd68d61f3d47326e358c446b0f054f093a", "a75dfb5a839f0eb4b613d150f54a418b7812aa90", "5babbad3daac5c26503088782fd5b62067b94fa5", "0e2ea7af369dbcaeb5e334b02dd9ba5271b10265", "81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f", "de0eb358b890d92e8f67592c6e23f0e3b2ba3f66", "a4bb791b135bdc721c8fcc5bdef612ca654d7377", "3c563542db664321aa77a9567c1601f425500f94", "772474b5b0c90629f4d9c223fd9c1ef45e1b1e66", "7224d58a7e1f02b84994b60dc3b84d9fe6941ff5", "cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae", "ba6769c165967c8dcb11fe5e0be2153ddbe99c7e", "6ce23cf4f440021b7b05aa3c1c2700cc7560b557", "7d40e7e5c01bd551edf65902386401e1b8b8014b", "a8117a4733cce9148c35fb6888962f665ae65b1e", "dbced84d839165d9b494982449aa2eb9109b8467", "4c8ef4f98c6c8d340b011cfa0bb65a9377107970", "035c8632c1ffbeb75efe16a4ec50c91e20e6e189", "137aa2f891d474fce1e7a1d1e9b3aefe21e22b34", "6462ef39ca88f538405616239471a8ea17d76259", "918fc4c77a436b8a588f63b2b37420b7868fbbf8", "2f43b614607163abf41dfe5d17ef6749a1b61304", "ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17", "fd649233d62bf43d589818fbb41295e2d0669aeb", "61a3c45c9f802f9d5fa8d94fee811e203bac6487", "db36e682501582d1c7b903422993cf8d70bb0b42", "a14ae81609d09fed217aa12a4df9466553db4859", "0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a", "3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b", "9b9a1f18749e969c8f246894e59c62ae86b079be", "0394e684bd0a94fc2ff09d2baef8059c2652ffb0", "be4f18e25b06f430e2de0cc8fddcac8585b00beb", "ecd08edab496801fd4fde45362dde462d00ee91c", "6cce5ccc5d366996f5a32de17a403341db5fddc6", "1063be2ad265751fb958b396ee26167fa0e844d2", "101569eeef2cecc576578bd6500f1c2dcc0274e2", "d12bea587989fc78b47584470fd8f689b6ab81d2", "7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d", "5b721f86f4a394f05350641e639a9d6cb2046c45", "e4b825bf9d5df47e01e8d7829371d05208fc272d", "0c6a18b0cee01038eb1f9373c369835b236373ae", "34a41ec648d082270697b9ee264f0baf4ffb5c8d", "04470861408d14cc860f24e73d93b3bb476492d0", "53de11d144cd2eda7cf1bb644ae27f8ef2489289", "d082f35534932dfa1b034499fc603f299645862d", "a8e75978a5335fd3deb04572bb6ca43dbfad4738", "84c0f814951b80c3b2e39caf3925b56a9b2e1733", "62e0380a86e92709fe2c64e6a71ed94d152c6643", "e5b301ee349ba8e96ea6c71782295c4f06be6c31", "77cea27494499dd162221d1476bf70a87391790a", "481fb0a74528fa7706669a5cce6a212ac46eaea3", "0059b3dfc7056f26de1eabaafd1ad542e34c2c2e", "cd3b713722ccb1e2ae3b050837ca296b2a2dd82a", "98a660c15c821ea6d49a61c5061cd88e26c18c65", "83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6", "3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b", "0756efe121e37479157010e18723e0c8da02a34b", "cceec87bad847b9b87178bde8ce5cce6bf1a8e99", "651cafb2620ab60a0e4f550c080231f20ae6d26e", "b712f08f819b925ff7587b6c09a8855bc295d795", "33ac7fd3a622da23308f21b0c4986ae8a86ecd2b", "05287cbad6093deffe9a0fdb9115605595dfeaf0", "584909d2220b52c0d037e8761d80cb22f516773f", "b1efefcc9a5d30be90776571a6cc0071f3679753", "29e793271370c1f9f5ac03d7b1e70d1efa10577c", "c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290", "942b89d8d17e89e58c82453de2bfcbbeb09adc81", "b7b8e7813fbc12849f2daba5cab604abd8cbaab6", "146bbf00298ee1caecde3d74e59a2b8773d2c0fc", "384f972c81c52fe36849600728865ea50a0c4670", "5161e38e4ea716dcfb554ccb88901b3d97778f64", "214ac8196d8061981bef271b37a279526aab5024", "20da3ec27d221973c681ed8713f3e00ff10fef6b", "cd436f05fb4aeeda5d1085f2fe0384526571a46e", "81831ed8e5b304e9d28d2d8524d952b12b4cbf55", "205b34b6035aa7b23d89f1aed2850b1d3780de35", "a11ce3c9b78bf3f868b1467b620219ff651fe125", "b8d4754813b88ef1a583da2fcd164398824d04db", "2d88e7922d9f046ace0234f9f96f570ee848a5b5", "a324d61c79fe2e240e080f0dab358aa72dd002b3", "964a3196d44f0fefa7de3403849d22bbafa73886", "c84991fe3bf0635e326a05e34b11ccaf74d233dc", "9a98dd6d6aaba05c9e46411ea263f74df908203d", "28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b", "aad7b12936e0ced60bc0be95e8670b60b5d5ce20", "93d74b1315a09f568027b6d8b3068ef048d17889", "89e7d23e0c6a1d636f2da68aaef58efee36b718b", "395a91d49e9283e1bf2d61a75c3dc846b347ea74", "9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca", "f2d605985821597773bc6b956036bdbc5d307386", "ae8240095c9cca2c395f173fece2f46277b94929", "ce032dae834f383125cdd852e7c1bc793d4c3ba3", "68f19f06f49aa98b676fc6e315b25e23a1efb1f0", "532f7ec8e0c8f7331417dd4a45dc2e8930874066", "14ba910c46d659871843b31d5be6cba59843a8b8", "5c6de2d9f93b90034f07860ae485a2accf529285", "02601d184d79742c7cd0c0ed80e846d95def052e", "430c4d7ad76e51d83bbd7ec9d3f856043f054915", "741950ae2e503a614f257cdac653d1bb30cb8e79", "12c4ba96eaa37586f07be0d82b2e99964048dcb5", "61084a25ebe736e8f6d7a6e53b2c20d9723c4608", "499f1d647d938235e9186d968b7bb2ab20f2726d", "0a0007cfd40ae9694c84f109aea11ec4f2b6cf39", "49fe4f387ac7e5852a78b327ec42cc7300c5f8e0", "3a2fc58222870d8bed62442c00341e8c0a39ec87", "656531036cee6b2c2c71954bb6540ef6b2e016d0", "ffea4184a0b24807b5f4ed87f9a985c2a27027d9", "251e386a90f21db6d02806395b012b297cbf06ff", "13907865a97afde053d7bb7134d58a7bbc12043c", "5278b7a6f1178bf5f90cd3388908925edff5ad46", "78436256ff8f2e448b28e854ebec5e8d8306cf21", "cefaad8241bceb24827a71bf7c2556e458e57faa", "3e0035b447d0d4e11ceda45936c898256f321382", "3fbe4a46b94cdacbf076a66da7ea7e6546e96025", "40b0fced8bc45f548ca7f79922e62478d2043220", "63fd7a159e58add133b9c71c4b1b37b899dd646f", "a2af07176a38fe844b0e2fdf4abae65472628b38", "247cab87b133bd0f4f9e8ce5e7fc682be6340eac", "d723ebf3288126fa8cbb10ba7e2a6308aede857c", "3646b42511a6a0df5470408bc9a7a69bb3c5d742", "5e97a1095f2811e0bc188f52380ea7c9c460c896", "82eb267b8e86be0b444e841b4b4ed4814b6f1942", "da1477b4a65ae5a013e646b57e004f0cd60619a2", "01beab8f8293a30cf48f52caea6ca0fb721c8489", "76e2d7621019bd45a5851740bd2742afdcf62837", "41a6196f88beced105d8bc48dd54d5494cc156fb", "8cb403c733a5f23aefa6f583a17cf9b972e35c90", "b6052dc718c72f2506cfd9d29422642ecf3992ef", "2e8eb9dc07deb5142a99bc861e0b6295574d1fbd", "373c4d6af0ee233f0d669c3955c3a3ef2a009638", "501076313de90aca7848e0249e7f0e7283d669a1", "d3faed04712b4634b47e1de0340070653546deb2", "6974449ce544dc208b8cc88b606b03d95c8fd368", "84bc3ca61fc63b47ec3a1a6566ab8dcefb3d0015", "ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c", "6ef1996563835b4dfb7fda1d14abe01c8bd24a05", "521482c2089c62a59996425603d8264832998403", "7c47da191f935811f269f9ba3c59556c48282e80", "ac51d9ddbd462d023ec60818bac6cdae83b66992", "b03d6e268cde7380e090ddaea889c75f64560891", "8de06a584955f04f399c10f09f2eed77722f6b1c", "227b1a09b942eaf130d1d84cdcabf98921780a22", "9c25e89c80b10919865b9c8c80aed98d223ca0c6", "8c643e1a61f3f563ec382c1e450f4b2b28122614", "efeeb000107745e3fba04ee4676c0435eaf4257b", "cead57f2f7f7b733f4524c4b5a7ba7f271749b5f", "8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d", "efd28eabebb9815e34031316624e7f095c7dfcfe", "6f08885b980049be95a991f6213ee49bbf05c48d", "0717b47ab84b848de37dbefd81cf8bf512b544ac", "64cf86ba3b23d3074961b485c16ecb99584401de", "47382cb7f501188a81bb2e10cfd7aed20285f376", "2d79dece7890121469f515a6e773ba0251fc2d98", "1b60b8e70859d5c85ac90510b370b501c5728620", "cf54e9776d799aa183d7466094525251d66389a4", "1329206dbdb0a2b9e23102e1340c17bd2b2adcf5", "aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e", "2ad29b2921aba7738c51d9025b342a0ec770c6ea", "0a0b9a9ff827065e4ff11022b0e417ddf1d3734e", "31cdaaa7a47efe2ce0e78ebec29df4d2d81df265", "b1451721864e836069fa299a64595d1655793757", "cccd0edb5dafb3a160179a60f75fd8c835c0be82", "e7697c7b626ba3a426106d83f4c3a052fcde02a4", "66d087f3dd2e19ffe340c26ef17efe0062a59290", "6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2", "def569db592ed1715ae509644444c3feda06a536", "a575009c1c25e27cdba8cc2c6930759a5416f37d", "086655743dc5f16563c012ad43b2f9d06771d9c0", "9e5809122c0880183c7e42c7edd997f92de6d81e", "5fba1b179ac80fee80548a0795d3f72b1b6e49cd", "1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac", "6554ca3187b3cbe5d1221592eb546dfc11aac14b", "8149c30a86e1a7db4b11965fe209fe0b75446a8c", "63ce37da6c0c789099307337bb913e1104473854", "09f853ce12f7361c4b50c494df7ce3b9fad1d221", "c81b27932069e6c7016bfcaa5e861b99ac617934", "bd8e2d27987be9e13af2aef378754f89ab20ce10", "353a89c277cca3e3e4e8c6a199ae3442cdad59b5", "c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3", "d8f72f50cbe6e0fa4025bc990b7e8a52cc6bbad9", "862f2d84b4230d64ddb3e48967ad417089f2c291", "add50a7d882eb38e35fe70d11cb40b1f0059c96f", "b52886610eda6265a2c1aaf04ce209c047432b6d", "f8c94afd478821681a1565d463fc305337b02779", "88f2952535df5859c8f60026f08b71976f8e19ec", "0ac442bb570b086d04c4d51a8410fcbfd0b1779d", "1246534c3104da030fdb9e041819257e0d57dcbf", "72a03f06fcbf6af92fb3002e2fd9d43e75fd113e", "01b4b32c5ef945426b0396d32d2a12c69c282e29", "6c28b3550f57262889fe101e5d027912eb39564e", "45fbeed124a8956477dbfc862c758a2ee2681278", "e1dd586842419f3c40c0d7b70c120cdea72f5b5c", "a6590c49e44aa4975b2b0152ee21ac8af3097d80", "d79f9ada35e4410cd255db39d7cc557017f8111a", "621ff353960d5d9320242f39f85921f72be69dc8", "861802ac19653a7831b314cd751fd8e89494ab12", "d50751da2997e7ebc89244c88a4d0d18405e8507", "c847de9faa1f1a06d5647949a23f523f84aba7f3", "569988e19ab36582d4bd0ec98e344cbacf177f45", "4157e45f616233a0874f54a59c3df001b9646cd7", "c8292aa152a962763185e12fd7391a1d6df60d07", "d1184939e06dbc3b495c883c53b684c6d6aa9e48", "edde81b2bdd61bd757b71a7b3839b6fef81f4be4", "f3b84a03985de3890b400b68e2a92c0a00afd9d0", "a29566375836f37173ccaffa47dea25eb1240187", "5fea59ccdab484873081eaa37af88e26e3db2aed", "94325522c9be8224970f810554611d6a73877c13", "4e32fbb58154e878dd2fd4b06398f85636fd0cf4", "61262450d4d814865a4f9a84299c24daa493f66e", "b503793943a17d2f569685cd17e86b5b4fffe3fd", "c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d", "7f6cd03e3b7b63fca7170e317b3bb072ec9889e0", "14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74", "6ed27a41214716259676b6949999cdf4b12d0bdd", "da2b2be4c33e221c7f417875a6c5c74043b1b227", "8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa", "aed321909bb87c81121c841b21d31509d6c78f69", "f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a", "a3f69a073dcfb6da8038607a9f14eb28b5dab2db", "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "84ae55603bffda40c225fe93029d39f04793e01f", "6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf"] \ No newline at end of file
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py
new file mode 100644
index 00000000..e35c3e15
--- /dev/null
+++ b/megapixels/app/models/sql_factory.py
@@ -0,0 +1,152 @@
+import os
+import glob
+import time
+import pandas as pd
+
+from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+from app.utils.file_utils import load_recipe, load_csv_safe
+from app.settings import app_cfg as cfg
+
+connection_url = "mysql+mysqldb://{}:{}@{}/{}".format(
+ os.getenv("DB_USER"),
+ os.getenv("DB_PASS"),
+ os.getenv("DB_HOST"),
+ os.getenv("DB_NAME")
+)
+
+datasets = {}
+loaded = False
+
+def list_datasets():
+ return [dataset.describe() for dataset in datasets.values()]
+
+def get_dataset(name):
+ return datasets[name] if name in datasets else None
+
+def get_table(name, table_name):
+ dataset = get_dataset(name)
+ return dataset.get_table(table_name) if dataset else None
+
+def load_sql_datasets(replace=False, base_model=None):
+ global datasets, loaded
+ if loaded:
+ return datasets
+ engine = create_engine(connection_url) if replace else None
+ for path in glob.iglob(os.path.join(cfg.DIR_FAISS_METADATA, "*")):
+ dataset = load_sql_dataset(path, replace, engine, base_model)
+ datasets[dataset.name] = dataset
+ loaded = True
+ return datasets
+
+def load_sql_dataset(path, replace=False, engine=None, base_model=None):
+ name = os.path.basename(path)
+ dataset = SqlDataset(name, base_model=base_model)
+
+ for fn in glob.iglob(os.path.join(path, "*.csv")):
+ key = os.path.basename(fn).replace(".csv", "")
+ table = dataset.get_table(key)
+ if table is None:
+ continue
+ if replace:
+ print('loading dataset {}'.format(fn))
+ df = pd.read_csv(fn)
+ # fix columns that are named "index", a sql reserved word
+ df.columns = table.__table__.columns.keys()
+ df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False)
+ return dataset
+
+class SqlDataset:
+ """
+ Bridge between the facial information CSVs connected to the datasets, and MySQL
+ - each dataset should have files that can be loaded into these database models
+ - names will be fixed to work in SQL (index -> id)
+ - we can then have more generic models for fetching this info after doing a FAISS query
+ """
+ def __init__(self, name, engine=None, base_model=None):
+ self.name = name
+ self.tables = {}
+ if base_model is None:
+ self.engine = create_engine(connection_url)
+ base_model = declarative_base(engine)
+ self.base_model = base_model
+
+ def describe(self):
+ return {
+ 'name': self.name,
+ 'tables': list(self.tables.keys()),
+ }
+
+ def get_table(self, type):
+ if type in self.tables:
+ return self.tables[type]
+ elif type == 'uuids':
+ self.tables[type] = self.uuid_table()
+ elif type == 'roi':
+ self.tables[type] = self.roi_table()
+ elif type == 'identity_meta':
+ self.tables[type] = self.identity_table()
+ elif type == 'pose':
+ self.tables[type] = self.pose_table()
+ else:
+ return None
+ return self.tables[type]
+
+ # ==> uuids.csv <==
+ # index,uuid
+ # 0,f03fd921-2d56-4e83-8115-f658d6a72287
+ def uuid_table(self):
+ class UUID(self.base_model):
+ __tablename__ = self.name + "_uuid"
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36), nullable=False)
+ return UUID
+
+ # ==> roi.csv <==
+ # index,h,image_height,image_index,image_width,w,x,y
+ # 0,0.33000000000000007,250,0,250,0.32999999999999996,0.33666666666666667,0.35
+ def roi_table(self):
+ class ROI(self.base_model):
+ __tablename__ = self.name + "_roi"
+ id = Column(Integer, primary_key=True)
+ h = Column(Float, nullable=False)
+ image_height = Column(Integer, nullable=False)
+ image_index = Column(Integer, nullable=False)
+ image_width = Column(Integer, nullable=False)
+ w = Column(Float, nullable=False)
+ x = Column(Float, nullable=False)
+ y = Column(Float, nullable=False)
+ return ROI
+
+ # ==> identity.csv <==
+ # index,fullname,description,gender,images,image_index
+ # 0,A. J. Cook,Canadian actress,f,1,0
+ def identity_table(self):
+ class Identity(self.base_model):
+ __tablename__ = self.name + "_identity"
+ id = Column(Integer, primary_key=True)
+ fullname = Column(String(36), nullable=False)
+ description = Column(String(36), nullable=False)
+ gender = Column(String(1), nullable=False)
+ images = Column(Integer, nullable=False)
+ image_id = Column(Integer, nullable=False)
+ return Identity
+
+ # ==> pose.csv <==
+ # index,image_index,pitch,roll,yaw
+ # 0,0,11.16264458441435,10.415885631337728,22.99719032415318
+ def pose_table(self):
+ class Pose(self.base_model):
+ __tablename__ = self.name + "_pose"
+ id = Column(Integer, primary_key=True)
+ image_id = Column(Integer, primary_key=True)
+ pitch = Column(Float, nullable=False)
+ roll = Column(Float, nullable=False)
+ yaw = Column(Float, nullable=False)
+ return Pose
+
+
+# Session = sessionmaker(bind=engine)
+# session = Session()
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
new file mode 100644
index 00000000..c5e27dd2
--- /dev/null
+++ b/megapixels/app/server/api.py
@@ -0,0 +1,54 @@
+import os
+import re
+import time
+from flask import Blueprint, request, jsonify
+from PIL import Image # todo: try to remove PIL dependency
+
+from app.models.sql_factory import list_datasets, get_dataset, get_table
+
+sanitize_re = re.compile('[\W]+')
+valid_exts = ['.gif', '.jpg', '.jpeg', '.png']
+
+api = Blueprint('api', __name__)
+
+@api.route('/')
+def index():
+ return jsonify({ 'datasets': list_datasets() })
+
+@api.route('/dataset/<name>')
+def show(name):
+ dataset = get_dataset(name)
+ if dataset:
+ return jsonify(dataset.describe())
+ else:
+ return jsonify({ 'status': 404 })
+
+@api.route('/dataset/<dataset>/face', methods=['POST'])
+def upload(name):
+ file = request.files['query_img']
+ fn = file.filename
+ if fn.endswith('blob'):
+ fn = 'filename.jpg'
+
+ basename, ext = os.path.splitext(fn)
+ print("got {}, type {}".format(basename, ext))
+ if ext.lower() not in valid_exts:
+ return jsonify({ 'error': 'not an image' })
+
+ img = Image.open(file.stream).convert('RGB')
+
+ # vec = db.load_feature_vector_from_file(uploaded_img_path)
+ # vec = fe.extract(img)
+ # print(vec.shape)
+ # results = db.search(vec, limit=limit)
+
+ query = {
+ 'timing': time.time() - start,
+ }
+ results = []
+
+ print(results)
+ return jsonify({
+ 'query': query,
+ 'results': results,
+ })
diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py
new file mode 100644
index 00000000..9efed669
--- /dev/null
+++ b/megapixels/app/server/create.py
@@ -0,0 +1,36 @@
+from flask import Flask, Blueprint, jsonify
+from flask_sqlalchemy import SQLAlchemy
+from app.models.sql_factory import connection_url, load_sql_datasets
+
+from app.server.api import api
+
+db = SQLAlchemy()
+
+def create_app(script_info=None):
+ app = Flask(__name__, static_url_path='')
+ app.config['SQLALCHEMY_DATABASE_URI'] = connection_url
+ app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
+
+ db.init_app(app)
+ datasets = load_sql_datasets(replace=False, base_model=db.Model)
+
+ app.register_blueprint(api, url_prefix='/api')
+
+ @app.route('/', methods=['GET'])
+ def index():
+ return app.send_static_file('index.html')
+
+ @app.shell_context_processor
+ def shell_context():
+ return { 'app': app, 'db': db }
+
+ @app.route("/site-map")
+ def site_map():
+ links = []
+ for rule in app.url_map.iter_rules():
+ # url = url_for(rule.endpoint, **(rule.defaults or {}))
+ # print(url)
+ links.append((rule.endpoint))
+ return(jsonify(links))
+
+ return app
diff --git a/megapixels/app/server/static b/megapixels/app/server/static
new file mode 120000
index 00000000..1dc7a639
--- /dev/null
+++ b/megapixels/app/server/static
@@ -0,0 +1 @@
+../../../site/public \ No newline at end of file
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index e915b5d4..9ea4b72b 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -2,6 +2,7 @@ import os
from os.path import join
import logging
import collections
+from dotenv import load_dotenv
import cv2 as cv
@@ -49,6 +50,11 @@ DIR_MODELS_DLIB_5PT = join(DIR_MODELS_DLIB, 'shape_predictor_5_face_landmarks.da
DIR_MODELS_DLIB_68PT = join(DIR_MODELS_DLIB, 'shape_predictor_68_face_landmarks.dat')
DIR_MODELS_DLIB_FACEREC_RESNET = join(DIR_MODELS_DLIB, 'dlib_face_recognition_resnet_model_v1.dat')
+DIR_FAISS = join(DIR_APP, 'faiss')
+DIR_FAISS_INDEXES = join(DIR_FAISS, 'indexes')
+DIR_FAISS_METADATA = join(DIR_FAISS, 'metadata')
+DIR_FAISS_RECIPES = join(DIR_FAISS, 'recipes')
+
# Test images
DIR_TEST_IMAGES = join(DIR_APP, 'test', 'images')
@@ -65,6 +71,7 @@ FP_FONT = join(DIR_ASSETS, 'font')
DIR_COMMANDS_CV = 'commands/cv'
DIR_COMMANDS_ADMIN = 'commands/admin'
DIR_COMMANDS_DATASETS = 'commands/datasets'
+DIR_COMMANDS_FAISS = 'commands/faiss'
DIR_COMMANDS_MISC = 'commands/misc'
# -----------------------------------------------------------------------------
@@ -112,3 +119,10 @@ LOGFILE_FORMAT = "%(log_color)s%(levelname)-8s%(reset)s %(cyan)s%(filename)s:%(l
# -----------------------------------------------------------------------------
S3_MEDIA_ROOT = 's3://megapixels/v1/media/'
S3_METADATA_ROOT = 's3://megapixels/v1/metadata/'
+
+# -----------------------------------------------------------------------------
+# .env config for keys
+# -----------------------------------------------------------------------------
+
+DIR_DOTENV = join(DIR_APP, '.env')
+load_dotenv(dotenv_path=DIR_DOTENV)
diff --git a/megapixels/app/utils/file_utils.py b/megapixels/app/utils/file_utils.py
index 99282bd0..80239fe2 100644
--- a/megapixels/app/utils/file_utils.py
+++ b/megapixels/app/utils/file_utils.py
@@ -77,7 +77,7 @@ def load_csv(fp_in, as_list=True):
:returns: list of all CSV data
"""
if not Path(fp_in).exists():
- log.info('loading {}'.format(fp_in))
+ log.info('not found: {}'.format(fp_in))
log.info('loading: {}'.format(fp_in))
with open(fp_in, 'r') as fp:
items = csv.DictReader(fp)
@@ -86,6 +86,50 @@ def load_csv(fp_in, as_list=True):
log.info('returning {:,} items'.format(len(items)))
return items
+def unfussy_csv_reader(reader):
+ """Loads a CSV while ignoring possible data errors
+ :param reader: Special reader for load_csv_safe which ignores CSV parse errors
+ """
+ while True:
+ try:
+ yield next(reader)
+ except StopIteration:
+ return
+ except csv.Error:
+ print(csv.Error)
+ # log the problem or whatever
+ continue
+
+def load_csv_safe(fp_in, keys=True, create=False):
+ """Loads a CSV while ignoring possible data errors
+ :param fp_in: string filepath to JSON file
+ :param keys: boolean set to false if the first line is not headers (for some reason)
+ :param create: boolean set to true to return an empty keys/values if the CSV does not exist
+ """
+ try:
+ with open(fp_in, 'r', newline='', encoding='utf-8') as f:
+ # reader = csv.reader( (line.replace('\0','') for line in f) )
+ reader = csv.reader(f)
+ lines = list(unfussy_csv_reader(reader))
+ if keys:
+ keys = lines[0]
+ lines = lines[1:]
+ return keys, lines
+ return lines
+ except:
+ if create:
+ if keys:
+ return {}, []
+ return []
+ raise
+
+def load_recipe(fp_in):
+ """Loads a JSON file as an object with properties accessible with dot syntax
+ :param fp_in: string filepath to JSON file
+ """
+ with open(path) as fh:
+ return json.load(fh, object_hook=lambda d: collections.namedtuple('X', d.keys())(*d.values()))
+
def lazywrite(data, fp_out, sort_keys=True):
"""Writes JSON or Pickle data"""
diff --git a/megapixels/cli_faiss.py b/megapixels/cli_faiss.py
new file mode 100644
index 00000000..9953d9b7
--- /dev/null
+++ b/megapixels/cli_faiss.py
@@ -0,0 +1,36 @@
+# --------------------------------------------------------
+# add/edit commands in commands/faiss directory
+# --------------------------------------------------------
+
+import click
+
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+from app.models.click_factory import ClickSimple
+
+# click cli factory
+cc = ClickSimple.create(cfg.DIR_COMMANDS_FAISS)
+
+# --------------------------------------------------------
+# CLI
+# --------------------------------------------------------
+@click.group(cls=cc, chain=False)
+@click.option('-v', '--verbose', 'verbosity', count=True, default=4,
+ show_default=True,
+ help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL')
+@click.pass_context
+def cli(ctx, **kwargs):
+ """\033[1m\033[94mMegaPixels: FAISS Data Scripts\033[0m
+ """
+ ctx.opts = {}
+ # init logger
+ logger_utils.Logger.create(verbosity=kwargs['verbosity'])
+
+
+
+# --------------------------------------------------------
+# Entrypoint
+# --------------------------------------------------------
+if __name__ == '__main__':
+ cli()
+
diff --git a/megapixels/cli_flask.py b/megapixels/cli_flask.py
new file mode 100644
index 00000000..369bec01
--- /dev/null
+++ b/megapixels/cli_flask.py
@@ -0,0 +1,19 @@
+# --------------------------------------------------------
+# wrapper for flask CLI API
+# --------------------------------------------------------
+
+import click
+
+from flask.cli import FlaskGroup
+from app.server.create import create_app
+
+# from app.settings import app_cfg as cfg
+# from app.utils import logger_utils
+
+cli = FlaskGroup(create_app=create_app)
+
+# --------------------------------------------------------
+# Entrypoint
+# --------------------------------------------------------
+if __name__ == '__main__':
+ cli()
diff --git a/megapixels/commands/faiss/build_db.py b/megapixels/commands/faiss/build_db.py
new file mode 100644
index 00000000..0f979e41
--- /dev/null
+++ b/megapixels/commands/faiss/build_db.py
@@ -0,0 +1,15 @@
+"""
+Load all the CSV files into MySQL
+"""
+
+import click
+
+from app.models.sql_factory import load_sql_datasets
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """import the various CSVs into MySQL
+ """
+ print('Loading CSV datasets into SQL...')
+ load_sql_datasets(replace=True)
diff --git a/megapixels/commands/faiss/build_faiss.py b/megapixels/commands/faiss/build_faiss.py
new file mode 100644
index 00000000..96d3f99e
--- /dev/null
+++ b/megapixels/commands/faiss/build_faiss.py
@@ -0,0 +1,58 @@
+"""
+Index all of the FAISS datasets
+"""
+
+import os
+import glob
+import click
+import faiss
+import time
+import numpy as np
+
+from app.utils.file_utils import load_recipe, load_csv_safe
+from app.settings import app_cfg as cfg
+
+engine = create_engine('sqlite:///:memory:')
+
+class DefaultRecipe:
+ def __init__(self):
+ self.dim = 128
+ self.factory_type = 'Flat'
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """build the FAISS index.
+ - looks for all datasets in faiss/metadata/
+ - uses the recipe above by default
+ - however you can override this by adding a new recipe in faiss/recipes/{name}.json
+ """
+ datasets = []
+ for fn in glob.iglob(os.path.join(cfg.DIR_FAISS_METADATA, "*")):
+ name = os.path.basename(fn)
+ recipe_fn = os.path.join(cfg.DIR_FAISS_RECIPES, name + ".json")
+ if os.path.exists(recipe_fn):
+ build_faiss(name, load_recipe(recipe_fn))
+ else:
+ build_faiss(name, DefaultRecipe())
+
+def build_faiss(name, recipe):
+ vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "vecs.csv")
+ index_fn = os.path.join(cfg.DIR_FAISS_INDEXES, name + ".index")
+
+ index = faiss.index_factory(recipe.dim, recipe.factory_type)
+
+ keys, rows = load_csv_safe(vec_fn)
+ feats = np.array([ list(map(float, row[3].split(","))) for row in rows ]).astype('float32')
+ n, d = feats.shape
+
+ print("{}: training {} x {} dim vectors".format(name, n, d))
+ print(recipe.factory_type)
+
+ add_start = time.time()
+ index.add(feats)
+ add_end = time.time()
+ add_time = add_end - add_start
+ print("{}: add time: {:.1f}s".format(name, add_time))
+
+ faiss.write_index(index, index_fn)
diff --git a/megapixels/commands/faiss/sync_metadata.py b/megapixels/commands/faiss/sync_metadata.py
new file mode 100644
index 00000000..b01211b4
--- /dev/null
+++ b/megapixels/commands/faiss/sync_metadata.py
@@ -0,0 +1,18 @@
+"""
+Sync the FAISS metadata
+"""
+
+import subprocess
+import click
+
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """synchronize metadata files from s3"""
+ sts = subprocess.call([
+ "s3cmd", "sync",
+ "s3://megapixels/v1/metadata/",
+ cfg.DIR_FAISS_METADATA + '/',
+ ])
diff --git a/s2-papers.py b/s2-papers.py
deleted file mode 100644
index d5e8bcd4..00000000
--- a/s2-papers.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import os
-import sys
-import csv
-import subprocess
-import time
-import random
-import re
-import json
-import click
-from s2 import SemanticScholarAPI
-from util import *
-
-'''
-s2 search API format:
-results
-matchedAuthors
-matchedPresentations
-query
-querySuggestions
-results
-stats
-totalPages
-totalResults
-'''
-
-s2 = SemanticScholarAPI()
-
-@click.command()
-@click.option('--index', '-n', default=0, help='Index of CSV.')
-@click.option('--depth', '-d', default=1, help='Depth to recurse.')
-def fetch_papers(index, depth):
- keys, lines = read_citation_list(index)
- for line in lines:
- label = line[0]
- title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[1])
- entry_fn = './datasets/s2/entries/{}.json'.format(title)
- if not os.path.exists(entry_fn):
- print('not found: {}'.format(entry_fn))
- continue
- result = read_json(entry_fn)
- paper_id = result['id']
- paper = fetch_paper(paper_id)
- # get all of the paper's citations
-
-def fetch_paper(paper_id):
- os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
- paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
- if os.path.exists(paper_fn):
- return read_json(paper_fn)
- print(paper_id)
- paper = s2.paper(paper_id)
- if paper is None:
- print("Got none paper??")
- time.sleep(random.randint(20, 30))
- paper = s2.paper(paper_id)
- if paper is None:
- print("Paper not found")
- return None
- write_json(paper_fn, paper)
- time.sleep(random.randint(5, 10))
- return paper
-
-if __name__ == '__main__':
- fetch_papers()
diff --git a/scraper/README.md b/scraper/README.md
new file mode 100644
index 00000000..318bba9a
--- /dev/null
+++ b/scraper/README.md
@@ -0,0 +1,146 @@
+# megapixels dev
+
+## installation
+
+```
+conda create -n megapixels python=3.6
+pip install urllib3
+pip install requests
+pip install simplejson
+pip install click
+pip install pdfminer.six
+pip install csvtool
+npm install
+```
+
+## workflow
+
+```
+Paper in spreadsheet -> paper_name
+ -> S2 Search API -> paper_id
+ -> S2 Paper API -> citations
+ -> S2 Dataset -> full records with PDF URLs, authors, more citations
+ -> wget -> .pdf files
+ -> pdfminer.six -> pdf text
+ -> text mining -> named entities (organizations)
+ -> Geocoding service -> lat/lngs
+```
+
+To begin, export `datasets/citations.csv` from the Google doc.
+
+---
+
+## Extracting data from S2 / ORC
+
+The Open Research Corpus (ORC) is produced by the Allen Institute / Semantic Scholar (S2) / arXiv people. It may be downloaded here:
+
+http://labs.semanticscholar.org/corpus/
+
+We do a two-stage fetch process as only about 66% of their papers are in this dataset.
+
+### s2-search.py
+
+Loads titles from citations file and queries the S2 search API to get paper IDs, then uses the paper IDs from the search entries to query the S2 papers API to get first-degree citations, authors, etc.
+
+### s2-dump-ids.py
+
+Dump all the paper IDs and citation IDs from the queried papers.
+
+### s2-extract-papers.py
+
+Extracts papers from the ORC dataset which have been queried from the API.
+
+### s2-dump-missing-paper-ids.py
+
+Dump the citation IDs that were not found in the ORC dataset.
+
+### s2-raw-papers.py
+
+Some papers are not in the ORC dataset and must be scraped from S2 directly.
+
+---
+
+## Extracting data from Google Scholar
+
+Included in the content-script folder is a Chrome extension which scrapes Google Scholar through the browser, clicking the links and extracting PDF links, number of citations, etc, then saving a JSON file when it's done. Still requires work to process the output (crossreference with S2 and dump the PDFs).
+
+---
+
+## Scraping Institutions
+
+Once you have the data from S2, you can scrape all the PDFs (and other URLs) you find, and then extract institutions from those and geocode them.
+
+### s2-dump-pdf-urls.py
+
+Dump PDF urls (and also IEEE urls etc) to CSV files.
+
+### s2-fetch-pdf.py
+
+Fetch the PDFs.
+
+### s2-fetch-doi.py
+
+Fetch the files listed in ieee.json and process them.
+
+### pdf_dump_first_page.sh
+
+Use pdfminer.six to extract the first page from the PDFs.
+
+### s2-pdf-first-pages.py
+
+Perform initial extraction of university-like terms, to be geocoded.
+
+### s2-doi-report.py
+
+Extract named entities from the scraped DOI links (IEEE, ACM, etc).
+
+### s2-geocode.py
+
+Geocode lists of entities using Nominativ.
+
+### s2-citation-report.py
+
+For each paper in the citations CSV, find the corresponding paper in the database, and get all the citations.
+For each of the citations, try to find an address for each one. Embed the appropriate entries from institutions list and then render them on a leaflet map.
+
+---
+
+## Cleaning the Data
+
+After scraping these universities, we got up to 47% match rate on papers from the dataset. However there is still more to solve:
+
+- Fix the geocoding - this must be done manually - we will dedupe the entries in the entities table, then extract specific entities from the dataset.
+- Unknown addresses - we have addresses for some places but we need to a) geocode them again b) geocode just the city or something
+- Match across multiple lines
+- Empty addresses - some papers need to be gone through by hand? Maybe we can do digram/trigram analysis on the headings. Just finding common words would help.
+- Make a list of bogus papers - ones where PDFminer returned empty results, or which did not contain the word ABSTRACT, or were too long.
+
+### expand-uni-lookup.py
+
+By now I had a list of institutions in `reports/all_institutions.csv` (done by merging the results of the geocoding, as I had done this on 4 computers and thus had 4 files of institutions). This file must be gone through manually. This technique geocoded around 47% of papers.
+
+At this point I moved `reports/all_institutions.csv` into the Google Sheets. All further results use the CSV on Google Sheets.
+
+### s2-pdf-report.py
+
+Generates reports of things from the PDFs that were not found.
+
+### s2-geocode-spreadsheet.py
+
+To add new institutions, simply list them in the spreadsheet with the lat/lng fields empty. Then run this script and anything missing a lat/lng will get one.
+
+### s2-citation-report.py
+
+Generate the main report with maps and citation lists.
+
+---
+
+## Useful scripts for batch processing
+
+### split-csv.py
+
+Shuffle and split a CSV into multiple files.
+
+### merge-csv.py
+
+Merge a folder of CSVs into a single file, deduping based on the first column.
diff --git a/scraper/__init__.py b/scraper/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scraper/__init__.py
diff --git a/check-counts.py b/scraper/check-counts.py
index 4fed4494..4fed4494 100644
--- a/check-counts.py
+++ b/scraper/check-counts.py
diff --git a/scraper/client/actions.js b/scraper/client/actions.js
new file mode 100644
index 00000000..ba899f06
--- /dev/null
+++ b/scraper/client/actions.js
@@ -0,0 +1,9 @@
+import * as search from './search/search.actions'
+import * as review from './review/review.actions'
+import * as metadata from './metadata/metadata.actions'
+
+export {
+ search,
+ review,
+ metadata,
+}
diff --git a/scraper/client/app.js b/scraper/client/app.js
new file mode 100644
index 00000000..6c008ec6
--- /dev/null
+++ b/scraper/client/app.js
@@ -0,0 +1,46 @@
+import React, { Component } from 'react'
+import { ConnectedRouter } from 'connected-react-router'
+import { Route, Switch } from 'react-router'
+
+import { Header, Sidebar, Footer } from './common'
+import * as Metadata from './metadata'
+import * as Search from './search'
+import * as Review from './review'
+
+export default class App extends Component {
+ render() {
+ return (
+ <ConnectedRouter history={this.props.history}>
+ <div>
+ <Header />
+ <div className='app'>
+ <Route path="/metadata/" component={Sidebar} />
+ <div className='body'>
+ <Route path="/search/" component={Search.Menu} />
+ <Route path="/metadata/:hash/" component={Metadata.Heading} />
+ <Switch>
+ <Route exact path="/metadata/:hash/summary/" component={Metadata.Summary} />
+ <Route exact path="/metadata/:hash/mediaRecord/" component={Metadata.MediaRecord} />
+ <Route exact path="/metadata/:hash/mediaInfo/" component={Metadata.MediaInfo} />
+ <Route exact path="/metadata/:hash/keyframe/:frame/" component={Metadata.KeyframeSingle} />
+ <Route exact path="/metadata/:hash/keyframe/" component={Metadata.KeyframeList} />
+ <Route exact path="/metadata/:hash/coco/" component={Metadata.Coco} />
+ <Route exact path="/metadata/:hash/places365/" component={Metadata.Places365} />
+ <Route exact path="/metadata/:hash/sugarcube/" component={Metadata.Sugarcube} />
+ <Route exact path="/metadata/:hash/" component={Metadata.Summary} />
+ <Route exact path="/metadata/" render={() => <div className='notFound'><h4>NOT FOUND</h4></div>} />
+ <Route exact path="/search/" component={Search.Container} />
+ <Route exact path="/search/keyframe/:verified/:hash/:frame/" component={Search.Container} />
+ <Route exact path="/search/keyframe/:hash/:frame/" component={Search.Container} />
+ <Route exact path="/search/browse/:hash/" component={Search.Browse} />
+ <Route exact path="/search/random/" component={Search.Random} />
+ <Route exact path="/search/review/" component={Review.Saved} />
+ </Switch>
+ </div>
+ </div>
+ <Footer />
+ </div>
+ </ConnectedRouter>
+ )
+ }
+}
diff --git a/scraper/client/common/activeLink.component.js b/scraper/client/common/activeLink.component.js
new file mode 100644
index 00000000..59f63881
--- /dev/null
+++ b/scraper/client/common/activeLink.component.js
@@ -0,0 +1,16 @@
+import React from 'react'
+import { NavLink } from 'react-router-dom'
+
+export default function ActiveLink({
+ to,
+ className = 'navlink',
+ children
+}) {
+ return (
+ <span className={className}>
+ <NavLink to={to}>
+ {children}
+ </NavLink>
+ </span>
+ )
+}
diff --git a/scraper/client/common/classifier.component.js b/scraper/client/common/classifier.component.js
new file mode 100644
index 00000000..af6a4934
--- /dev/null
+++ b/scraper/client/common/classifier.component.js
@@ -0,0 +1,99 @@
+import React, { Component } from 'react'
+import { courtesyS } from '../util'
+
+import { TableTuples, DetectionList, Keyframe } from '.'
+
+export default class Classifier extends Component {
+ render() {
+ const {
+ tag,
+ sha256,
+ verified,
+ keyframes = {},
+ labels,
+ summary,
+ aspectRatio = 1.777,
+ showAll,
+ } = this.props
+ let totalDetections = 0
+ const keys = Object
+ .keys(keyframes)
+ .map(s => parseInt(s, 10))
+ const validKeyframes = keys
+ .sort((a, b) => a - b)
+ .map(frame => {
+ const detections = keyframes[frame]
+ if (detections.length || showAll) {
+ totalDetections += detections.length
+ return { frame, detections }
+ }
+ return null
+ })
+ .filter(f => !!f)
+ const detectionLookup = validKeyframes
+ .reduce((a, b) => {
+ b.detections.reduce((aa, { idx }) => {
+ if (!(idx in aa)) aa[idx] = [labels[idx], 0]
+ aa[idx][1] += 1
+ return aa
+ }, a)
+ return a
+ }, {})
+ const detectionCounts = Object.keys(detectionLookup)
+ .map(idx => detectionLookup[idx])
+ .sort((a, b) => b[1] - a[1])
+
+ if (summary) {
+ return (
+ <div>
+ <h3>{tag}{' Detections'}</h3>
+ <TableTuples
+ list={detectionCounts}
+ />
+ </div>
+ )
+ }
+ return (
+ <div>
+ <h2>{tag}</h2>
+ <h3>Detections</h3>
+ <TableTuples
+ list={detectionCounts}
+ />
+ <h3>Frames</h3>
+ <ul className='meta'>
+ <li>
+ {'Displaying '}{validKeyframes.length}{' / '}{courtesyS(keys.length, 'frame')}
+ </li>
+ <li>
+ {courtesyS(totalDetections, 'detection')}{' found'}
+ </li>
+ </ul>
+ <div className='thumbnails'>
+ {validKeyframes.map(({ frame, detections }) => (
+ <Keyframe
+ key={frame}
+ sha256={sha256}
+ frame={frame}
+ verified={verified}
+ size='th'
+ showFrame
+ showTimestamp
+ aspectRatio={aspectRatio}
+ detectionList={[
+ { labels, detections }
+ ]}
+ >
+ <DetectionList
+ labels={labels}
+ detections={detections}
+ width={160}
+ height={90}
+ />
+ </Keyframe>
+ ))}
+ </div>
+ </div>
+ )
+ }
+}
diff --git a/scraper/client/common/common.css b/scraper/client/common/common.css
new file mode 100644
index 00000000..4b939df0
--- /dev/null
+++ b/scraper/client/common/common.css
@@ -0,0 +1,347 @@
+/* css boilerplate */
+
+* { box-sizing: border-box; }
+html,body {
+ margin: 0; padding: 0;
+ width: 100%; height: 100%;
+}
+body {
+ font-family: Helvetica, sans-serif;
+ font-weight: 300;
+}
+
+h1 {
+
+}
+h2 {
+ font-weight: normal;
+ margin: 10px 0;
+ padding: 3px;
+ font-size: 24px;
+}
+h3 {
+ font-weight: normal;
+ margin: 10px 0 0 0;
+ padding: 3px;
+ font-size: 18px;
+}
+h4 {
+ font-weight: 300;
+ font-size: 12px;
+ letter-spacing: 2px;
+ color: #888;
+ text-transform: uppercase;
+ margin: 5px 10px;
+ margin-top: 20px;
+}
+h4:first-child {
+ margin-top: 10px;
+}
+
+.app {
+ width: 100%;
+ height: 100%;
+ display: flex;
+ flex-direction: row;
+ align-items: flex-start;
+ justify-content: flex-start;
+}
+
+/* header stuff */
+
+header {
+ width: 100%;
+ background: #11f;
+ color: white;
+ align-items: stretch;
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-between;
+ z-index: 3;
+}
+header > section {
+ justify-content: flex-start;
+ align-items: center;
+ display: flex;
+ flex: 1 0;
+ font-weight: bold;
+}
+header > section:last-of-type {
+ justify-content: flex-end;
+}
+
+/* sidebar / body columns */
+
+.sidebar {
+ display: flex;
+ flex-direction: column;
+ justify-content: flex-start;
+ align-items: flex-start;
+ height: 100%;
+ float: left;
+ width: 200px;
+ flex: 0 0 200px;
+ padding: 10px;
+ margin-right: 10px;
+}
+.sidebar a {
+ display: block;
+ padding: 10px 10px;
+ text-decoration: none;
+ color: #444;
+}
+.sidebar a.active {
+ font-weight: bold;
+ color: #222;
+}
+.body {
+ display: flex;
+ flex-direction: column;
+ align-items: flex-start;
+ justify-content: flex-start;
+ flex-grow: 1;
+}
+.body > div {
+ padding-bottom: 40px;
+}
+
+/* buttons / forms */
+
+.btn:focus, .btn:hover {
+ background: #f1f1fc;
+ color: #4b48d6 !important;
+ text-decoration: none;
+}
+.btn {
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ appearance: none;
+ background: #fff;
+ border: .05rem solid;
+ border-radius: 2px;
+ margin-right: 5px;
+ color: #11f;
+ cursor: pointer;
+ display: inline-block;
+ font-size: .8rem;
+ height: 1.8rem;
+ line-height: 1rem;
+ outline: none;
+ padding: .35rem .4rem;
+ text-align: center;
+ text-decoration: none;
+ -webkit-transition: all .2s ease;
+ -o-transition: all .2s ease;
+ transition: all .2s ease;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ vertical-align: middle;
+ white-space: nowrap;
+}
+.btn.reset,
+.btn.panic {
+ color: #b00;
+}
+.btn.btn-primary {
+ background: #11f;
+ border-color: #11f;
+ color: white;
+}
+.btn[disabled] {
+ color: #bbb !important;
+ border-color: #bbb !important;
+ background: white !important;
+ cursor: default;
+}
+.btn.btn-primary:focus,
+.btn.btn-primary:hover {
+ background: #0808ee;
+ color: white !important;
+}
+.row .btn {
+ margin: 0 5px 0 0;
+}
+input[type=text] {
+ border: 1px solid #888;
+ padding: 4px;
+ font-size: 15px;
+}
+
+
+/* tables on metadata pages */
+
+table {
+ border: 0;
+ margin: 0;
+ padding: 0;
+ border-spacing: 0;
+}
+.tableObject td,
+.tableObject th {
+ padding: 3px;
+ vertical-align: top;
+}
+.tableObject hr {
+ width: 100%;
+ color: transparent;
+ border: 0;
+ border-bottom: 1px solid #bbb;
+ align: left;
+ margin: 3px 0;
+ padding: 0;
+}
+.tableObject th,
+.tableTuples th {
+ min-width: 145px;
+ text-align: left;
+ text-transform: capitalize;
+ padding: 3px;
+ padding-right: 10px;
+ font-weight: 300;
+ color: #333;
+}
+.tableTuples td {
+ text-align: right;
+ padding: 3px;
+}
+.tableObject td {
+ font-weight: normal;
+ color: #000;
+}
+.tableObject .tableObject {
+ border: 1px solid #ddd;
+}
+.tableArray {
+ border: 1px solid #ddd;
+ border-spacing: 0;
+}
+.tableArray td {
+ border-bottom: 1px solid #ddd;
+}
+.gray {
+ font-size: 12px;
+ color: #888;
+ display: block;
+}
+.sha256.heading {
+ margin: 20px 0 0px;
+}
+.gray span {
+ padding-right: 5px;
+}
+.gray {
+ margin-bottom: 10px;
+}
+.gray a {
+ color: #666;
+}
+
+.verified {
+ color: #080;
+ font-weight: bold;
+}
+.unverified {
+ color: #f00;
+ font-weight: 300;
+}
+
+.loading, .error {
+ font-weight: normal;
+ margin: 10px 0;
+ padding: 3px;
+ font-size: 24px;
+}
+
+.title {
+ text-transform: capitalize;
+}
+.rect {
+ position: absolute;
+}
+.rect { border: 1px solid rgba(0,0,255); background-color: rgba(0,0,255,0.1); }
+
+/* videos / video preloader */
+
+video {
+ max-width: 640px;
+ margin: 10px 0;
+}
+.video {
+ margin: 0 0 10px 0;
+}
+.video .bg {
+ cursor: pointer;
+ position: relative;
+ background-size: cover;
+}
+.video .play {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate3d(-50%, -50%, 0);
+ width: 20%;
+ height: 20%;
+ background-image: url(/search/static/img/play.png);
+ background-position: center center;
+ background-size: contain;
+ background-repeat: no-repeat;
+}
+.desktop .video .play:hover {
+ -webkit-filter: invert(60%) sepia(100%) saturate(500%) hue-rotate(160deg);
+}
+
+/* spectre.css loader */
+
+.loaderWrapper {
+ display: inline-block;
+ position: relative;
+ width: .8rem;
+ height: .8rem;
+ padding: 10px;
+}
+.loader {
+ color: transparent !important;
+ min-height: .8rem;
+ pointer-events: none;
+ position: relative;
+}
+
+.loader::after {
+ animation: loader 500ms infinite linear;
+ border: .1rem solid #5755d9;
+ border-radius: 50%;
+ border-right-color: transparent;
+ border-top-color: transparent;
+ content: "";
+ display: block;
+ height: .8rem;
+ left: 50%;
+ margin-left: -.4rem;
+ margin-top: -.4rem;
+ position: absolute;
+ top: 50%;
+ width: .8rem;
+ z-index: 1;
+}
+
+.loader.loader-lg {
+ min-height: 2rem;
+}
+
+.loader.loader-lg::after {
+ height: 1.6rem;
+ margin-left: -.8rem;
+ margin-top: -.8rem;
+ width: 1.6rem;
+}
+
+@keyframes loader {
+ 0% {
+ transform: rotate(0deg);
+ }
+ 100% {
+ transform: rotate(360deg);
+ }
+} \ No newline at end of file
diff --git a/scraper/client/common/detectionBoxes.component.js b/scraper/client/common/detectionBoxes.component.js
new file mode 100644
index 00000000..c4872ea8
--- /dev/null
+++ b/scraper/client/common/detectionBoxes.component.js
@@ -0,0 +1,15 @@
+import React from 'react'
+
+import { px } from '../util'
+
+export default function DetectionBoxes({ detections, width, height }) {
+ return detections.map(({ rect }, i) => (
+ rect &&
+ <div className='rect' key={i} style={{
+ left: px(rect[0], width),
+ top: px(rect[1], height),
+ width: px(rect[2] - rect[0], width),
+ height: px(rect[3] - rect[1], height),
+ }} />
+ ))
+}
diff --git a/scraper/client/common/detectionList.component.js b/scraper/client/common/detectionList.component.js
new file mode 100644
index 00000000..416e66d8
--- /dev/null
+++ b/scraper/client/common/detectionList.component.js
@@ -0,0 +1,16 @@
+import React from 'react'
+
+export default function DetectionList({ detections, labels, tag, showEmpty }) {
+ return (
+ <span className='detectionList'>
+ {tag && <h3>{tag}</h3>}
+ {!detections.length && showEmpty && <label><small>No detections</small></label>}
+ {detections.map(({ idx, score, rect }, i) => (
+ <label key={i}>
+ <small className='title'>{(labels[idx] || 'Unknown').replace(/_/, ' ')}</small>
+ <small>{score.toFixed(2)}</small>
+ </label>
+ ))}
+ </span>
+ )
+}
diff --git a/scraper/client/common/footer.component.js b/scraper/client/common/footer.component.js
new file mode 100644
index 00000000..7c82b44b
--- /dev/null
+++ b/scraper/client/common/footer.component.js
@@ -0,0 +1,10 @@
+import React from 'react'
+import { Link } from 'react-router-dom'
+import { connect } from 'react-redux'
+
+export default function Footer(props) {
+ return (
+ <footer>
+ </footer>
+ );
+}
diff --git a/scraper/client/common/gate.component.js b/scraper/client/common/gate.component.js
new file mode 100644
index 00000000..9bf9287b
--- /dev/null
+++ b/scraper/client/common/gate.component.js
@@ -0,0 +1,21 @@
+import React from 'react'
+import { connect } from 'react-redux'
+
+function Gate(props) {
+ const { app, tag, View } = props
+ const data = app[tag]
+ if (!data) return null
+ if (data === 'loading') {
+ return <div className='tableObject loading'>{tag}{': Loading'}</div>
+ }
+ if (data.err) {
+ return <div className='tableObject error'>{tag}{' Error: '}{data.err}</div>
+ }
+ return <View data={data} {...props} />
+}
+
+const mapStateToProps = state => ({
+ app: state.metadata
+})
+
+export default connect(mapStateToProps)(Gate)
diff --git a/scraper/client/common/header.component.js b/scraper/client/common/header.component.js
new file mode 100644
index 00000000..84fe306f
--- /dev/null
+++ b/scraper/client/common/header.component.js
@@ -0,0 +1 @@
+/* imported from main vcat application */
diff --git a/scraper/client/common/index.js b/scraper/client/common/index.js
new file mode 100644
index 00000000..ad9fe5e1
--- /dev/null
+++ b/scraper/client/common/index.js
@@ -0,0 +1,36 @@
+import Header from 'vcat-header'
+
+import ActiveLink from './activeLink.component'
+import Classifier from './classifier.component'
+import DetectionBoxes from './detectionBoxes.component'
+import DetectionList from './detectionList.component'
+// import Header from './header.component'
+import Footer from './footer.component'
+import Loader from './loader.component'
+import Sidebar from './sidebar.component'
+import Gate from './gate.component'
+import Keyframe from './keyframe.component'
+import Keyframes from './keyframes.component'
+import Video from './video.component'
+import { TableObject, TableArray, TableTuples, TableRow, TableCell } from './table.component'
+import './common.css'
+
+export {
+ Header,
+ Footer,
+ Sidebar,
+ Loader,
+ Gate,
+ TableObject,
+ TableArray,
+ TableTuples,
+ TableRow,
+ TableCell,
+ ActiveLink,
+ Classifier,
+ DetectionList,
+ DetectionBoxes,
+ Keyframe,
+ Keyframes,
+ Video,
+}
diff --git a/scraper/client/common/keyframe.component.js b/scraper/client/common/keyframe.component.js
new file mode 100644
index 00000000..c77db3ac
--- /dev/null
+++ b/scraper/client/common/keyframe.component.js
@@ -0,0 +1,118 @@
+import React from 'react'
+import { Link } from 'react-router-dom'
+import { imageUrl, timestamp, keyframeUri, widths, verify } from '../util'
+import { DetectionBoxes } from '.'
+
+import * as searchActions from '../search/search.actions'
+
+export default function Keyframe({
+ verified,
+ sha256,
+ frame,
+ score,
+ isSaved,
+ fps = 25,
+ size = 'th',
+ className,
+ showHash,
+ showFrame,
+ showTimestamp,
+ showScore,
+ showSearchButton,
+ showSaveButton,
+ to,
+ children,
+ detectionList = [],
+ aspectRatio = 1.777,
+ onClick,
+ reviewActions,
+}) {
+ if (!sha256) return null
+ const width = widths[size]
+ const height = Math.round(width / aspectRatio)
+ return (
+ <div className={(className || 'keyframe') + (isSaved ? ' isSaved' : '')}>
+ <div className="thumbnail">
+ <PossiblyExternalLink to={to || keyframeUri(sha256, frame)} onClick={onClick}>
+ <img
+ alt={'Frame #' + frame}
+ src={imageUrl(verified, sha256, frame, size)}
+ width={width}
+ height={height}
+ onClick={onClick}
+ />
+ {detectionList.map(({ labels, detections }, i) => (
+ <DetectionBoxes
+ key={i}
+ labels={labels}
+ detections={detections}
+ width={width}
+ height={height}
+ />
+ ))}
+ </PossiblyExternalLink>
+ {(reviewActions && (showSearchButton || showSaveButton)) &&
+ <label className='searchButtons'>
+ {showSearchButton &&
+ <Link
+ to={searchActions.publicUrl.searchByVerifiedFrame(verified, sha256, frame)}
+ className='btn'
+ >
+ Search
+ </Link>
+ }
+ {showSaveButton && (isSaved
+ ? <button
+ onClick={() => reviewActions.unsave({ hash: sha256, frame, verified })}
+ className={'btn btn-primary saved'}
+ >
+ {'Saved'}
+ </button>
+ : <button
+ onClick={() => reviewActions.save({ hash: sha256, frame, verified })}
+ className={'btn btn save'}
+ >
+ {'Save'}
+ </button>
+ )}
+ </label>
+ }
+ </div>
+ {(showHash || showFrame || showTimestamp || showScore) &&
+ <label>
+ {showHash &&
+ <small>
+ <Link to={searchActions.publicUrl.browse(sha256)}>
+ <span
+ title={sha256}
+ className={'sha256 ' + verify(verified)}
+ >
+ {'▶ '}
+ {sha256.substr(0, 6)}
+ </span>
+ </Link>
+ </small>
+ }
+ {showFrame &&
+ <small>
+ <span>{'Frame #'}{frame}</span>
+ </small>
+ }
+ {showTimestamp && <small>{timestamp(frame, fps)}</small>}
+ {showScore && !!score && <small>{score}</small>}
+ </label>
+ }
+ {children}
+ </div>
+ )
+}
+
+const PossiblyExternalLink = props => {
+ if (props.onClick) {
+ return props.children
+ }
+ if (props.to.match(/^http/)) {
+ return <a href={props.to} target='_blank' rel='noopener noreferrer'>{props.children}</a>
+ }
+ return <Link {...props} />
+}
diff --git a/scraper/client/common/keyframes.component.js b/scraper/client/common/keyframes.component.js
new file mode 100644
index 00000000..62eda45e
--- /dev/null
+++ b/scraper/client/common/keyframes.component.js
@@ -0,0 +1,95 @@
+import React from 'react'
+import { Link } from 'react-router-dom'
+import { bindActionCreators } from 'redux'
+import { connect } from 'react-redux'
+
+import { Keyframe } from '.'
+import * as reviewActions from '../review/review.actions'
+import * as searchActions from '../search/search.actions'
+
+function Keyframes(props) {
+ // console.log(props)
+ let {
+ frames,
+ groupByHash,
+ } = props
+ let minDistance = 0
+ if (frames && frames.length) {
+ minDistance = frames[0].distance || 0
+ }
+ if (!groupByHash) {
+ return (
+ <KeyframeList
+ minDistance={minDistance}
+ {...props}
+ />
+ )
+ }
+ const frameGroups = frames.reduce((a, b) => {
+ if (a[b.hash]) {
+ a[b.hash].push(b)
+ } else {
+ a[b.hash] = [b]
+ }
+ return a
+ }, {})
+ return Object.keys(frameGroups)
+ .map(hash => [frameGroups[hash].length, hash])
+ .sort((a, b) => b[0] - a[0])
+ .map(([count, hash]) => (
+ <KeyframeList
+ {...props}
+ count={count}
+ key={hash}
+ minDistance={minDistance}
+ frames={frameGroups[hash]}
+ label={hash}
+ />
+ ))
+}
+
+function KeyframeList(props) {
+ let {
+ saved = {},
+ frames,
+ options,
+ review,
+ search,
+ minDistance,
+ label,
+ count,
+ ...frameProps
+ } = props
+ if (!frames) return null
+ return (
+ <div className={label ? 'keyframes keyframeGroup' : 'keyframes'}>
+ {label && <h4><Link to={searchActions.publicUrl.browse(label)}>{label}</Link> ({count})</h4>}
+ {frames.map(({ hash, frame, verified, distance }) => (
+ <Keyframe
+ key={hash + '_' + frame}
+ sha256={hash}
+ frame={frame}
+ score={100 - Math.round(distance - minDistance) + '%'}
+ verified={verified}
+ isSaved={!!saved[hash] && !!saved[hash].frames && !!saved[hash].frames[parseInt(frame, 10)]}
+ size={options.thumbnailSize}
+ onClick={() => review.toggleSaved({ verified, hash, frame })}
+ reviewActions={review}
+ {...frameProps}
+ />
+ ))}
+ </div>
+ )
+}
+
+const mapStateToProps = state => ({
+ saved: state.review.saved,
+ options: state.search.options,
+})
+
+const mapDispatchToProps = dispatch => ({
+ review: bindActionCreators({ ...reviewActions }, dispatch),
+ search: bindActionCreators({ ...searchActions }, dispatch),
+})
+
+export default connect(mapStateToProps, mapDispatchToProps)(Keyframes)
diff --git a/scraper/client/common/loader.component.js b/scraper/client/common/loader.component.js
new file mode 100644
index 00000000..6795424b
--- /dev/null
+++ b/scraper/client/common/loader.component.js
@@ -0,0 +1,10 @@
+import React, { Component } from 'react'
+
+export default function Loader() {
+ return (
+ <div className='loaderWrapper'>
+ <div className='loader'>
+ </div>
+ </div>
+ )
+} \ No newline at end of file
diff --git a/scraper/client/common/sidebar.component.js b/scraper/client/common/sidebar.component.js
new file mode 100644
index 00000000..487f3289
--- /dev/null
+++ b/scraper/client/common/sidebar.component.js
@@ -0,0 +1,37 @@
+import React, { Component } from 'react'
+import { NavLink } from 'react-router-dom'
+import { connect } from 'react-redux'
+
+class Sidebar extends Component {
+ render() {
+ const { hash } = this.props
+ if (!hash) {
+ return (
+ <div className="sidebar">
+ </div>
+ )
+ }
+ return (
+ <div className="sidebar">
+ <h4>Media</h4>
+ <NavLink to={'/metadata/' + hash + '/summary/'}>Summary</NavLink>
+ <NavLink to={'/metadata/' + hash + '/mediaRecord/'}>Media Record</NavLink>
+ <NavLink to={'/metadata/' + hash + '/mediaInfo/'}>Media Info</NavLink>
+ <NavLink to={'/metadata/' + hash + '/sugarcube/'}>Sugarcube</NavLink>
+
+ <h4>Keyframes</h4>
+ <NavLink to={'/metadata/' + hash + '/keyframe/'}>Keyframe</NavLink>
+
+ <h4>Detectors</h4>
+ <NavLink to={'/metadata/' + hash + '/places365/'}>Places 365</NavLink>
+ <NavLink to={'/metadata/' + hash + '/coco/'}>Coco</NavLink>
+ </div>
+ )
+ }
+}
+
+const mapStateToProps = state => ({
+ hash: state.metadata.hash,
+})
+
+export default connect(mapStateToProps)(Sidebar)
diff --git a/scraper/client/common/table.component.js b/scraper/client/common/table.component.js
new file mode 100644
index 00000000..76a1d57c
--- /dev/null
+++ b/scraper/client/common/table.component.js
@@ -0,0 +1,121 @@
+import React from 'react'
+
+import { formatName } from '../util'
+
+const __HR__ = '__HR__'
+
+export function TableObject({ tag, object, order, summary }) {
+ if (!object) return null
+ if (object === 'loading') {
+ return <div className='tableObject loading'>{tag}{': Loading'}</div>
+ }
+ if (object.err) {
+ return <div className='tableObject error'>{tag}{' Error: '}{object.err}</div>
+ }
+ let objects = Object.keys(object)
+ if (order) {
+ const grouped = objects.reduce((a, b) => {
+ const index = order.indexOf(b)
+ if (index !== -1) {
+ a.order.push([index, b])
+ } else {
+ a.alpha.push(b)
+ }
+ return a
+ }, { order: [], alpha: [] })
+ objects = grouped.order
+ .sort((a, b) => a[0] - b[0])
+ .map(([i, s]) => s)
+ if (!summary) {
+ objects = objects
+ // .concat([__HR__])
+ .concat(grouped.alpha.sort())
+ }
+ } else {
+ objects = objects.sort()
+ }
+ return (
+ <div>
+ {tag && <h3>{tag}</h3>}
+ <table className={'tableObject ' + tag}>
+ <tbody>
+ {objects.map((key, i) => (
+ <TableRow key={key + '_' + i} name={key} value={object[key]} />
+ ))}
+ </tbody>
+ </table>
+ </div>
+ )
+}
+
+export function TableArray({ tag, list }) {
+ if (!list) return null
+ return (
+ <div>
+ {tag && <h3>{tag}</h3>}
+ <table className={'tableArray ' + tag}>
+ <tbody>
+ {list.map((value, i) => (
+ <tr key={tag + '_' + i}>
+ <TableCell value={value} />
+ </tr>
+ ))}
+ </tbody>
+ </table>
+ </div>
+ )
+}
+
+export function TableTuples({ tag, list }) {
+ if (!list) return null
+ return (
+ <div>
+ {tag && <h3>{tag}</h3>}
+ <table className={'tableTuples ' + tag}>
+ <tbody>
+ {list.map(([key, ...values], i) => (
+ <tr key={tag + '_' + i}>
+ <th>{formatName(key)}</th>
+ {values.map((value, j) => (
+ <TableCell key={i + '_' + j} value={value} />
+ ))}
+ </tr>
+ ))}
+ </tbody>
+ </table>
+ </div>
+ )
+}
+
+export function TableRow({ name, value }) {
+ if (name === __HR__) {
+ return (
+ <tr>
+ <th className='tr'>
+ <hr />
+ </th>
+ </tr>
+ )
+ }
+ return (
+ <tr>
+ <th>{formatName(name)}</th>
+ <TableCell name={name} value={value} />
+ </tr>
+ )
+}
+
+export function TableCell({ value }) {
+ if (value && typeof value === 'object') {
+ if (value._raw) {
+ value = value.value
+ } else if (value.length) {
+ value = <TableArray nested tag={''} list={value} />
+ } else {
+ value = <TableObject nested tag={''} object={value} />
+ }
+ }
+ return (
+ <td>{value}</td>
+ )
+}
diff --git a/scraper/client/common/video.component.js b/scraper/client/common/video.component.js
new file mode 100644
index 00000000..e5525bf6
--- /dev/null
+++ b/scraper/client/common/video.component.js
@@ -0,0 +1,47 @@
+import React, { Component } from 'react'
+import { connect } from 'react-redux'
+import { imageUrl, widths } from '../util'
+
+import { Gate } from '.'
+
+class Video extends Component {
+ state = {
+ playing: false,
+ }
+
+ render() {
+ const { app, data, size } = this.props
+ const { playing } = this.state
+ const { sugarcube } = data.metadata
+ const url = sugarcube.fp.replace('/var/www/files/', 'https://cube.syrianarchive.org/')
+ const { sha256, verified } = app.mediainfo
+ const { video } = app.mediainfo.metadata.mediainfo
+ const keyframe = app.keyframe.metadata.keyframe.basic[0]
+ return (
+ <div className='video'>
+ {playing
+ ? <video src={url} autoPlay controls muted />
+ : <div
+ className='bg'
+ style={{
+ width: widths[size || 'sm'],
+ height: widths[size || 'sm'] / video.aspect_ratio,
+ backgroundImage: 'url(' + imageUrl(verified, sha256, keyframe, size) + ')',
+ }}
+ onClick={() => this.setState({ playing: true })}
+ >
+ <div className='play'></div>
+ </div>
+ }
+ </div>
+ )
+ }
+}
+
+const mapStateToProps = () => ({
+ tag: 'sugarcube',
+})
+
+export default connect(mapStateToProps)(props => (
+ <Gate View={Video} {...props} />
+))
diff --git a/scraper/client/index.js b/scraper/client/index.js
new file mode 100644
index 00000000..eddc5fb2
--- /dev/null
+++ b/scraper/client/index.js
@@ -0,0 +1,19 @@
+import React from 'react'
+import ReactDOM from 'react-dom'
+import { AppContainer } from 'react-hot-loader'
+import { Provider } from 'react-redux'
+
+import App from './app'
+
+import { store, history } from './store'
+
+const container = document.createElement('div')
+document.body.appendChild(container)
+
+ReactDOM.render(
+ <AppContainer>
+ <Provider store={store}>
+ <App history={history} />
+ </Provider>
+ </AppContainer>, container
+)
diff --git a/scraper/client/metadata/index.js b/scraper/client/metadata/index.js
new file mode 100644
index 00000000..0eef814e
--- /dev/null
+++ b/scraper/client/metadata/index.js
@@ -0,0 +1,25 @@
+import Heading from './heading.component'
+import MediaInfo from './mediaInfo.component'
+import MediaRecord from './mediaRecord.component'
+import Summary from './summary.component'
+import KeyframeList from './keyframeList.component'
+import KeyframeSingle from './keyframeSingle.component'
+import KeyframeStatus from './keyframeStatus.component'
+import Coco from './coco.component'
+import Places365 from './places365.component'
+import Sugarcube from './sugarcube.component'
+
+import './metadata.css'
+
+export {
+ Heading,
+ MediaRecord,
+ MediaInfo,
+ Summary,
+ KeyframeList,
+ KeyframeSingle,
+ KeyframeStatus,
+ Coco,
+ Places365,
+ Sugarcube,
+}
diff --git a/scraper/client/session.js b/scraper/client/session.js
new file mode 100644
index 00000000..5bfae7eb
--- /dev/null
+++ b/scraper/client/session.js
@@ -0,0 +1,5 @@
+import Storage from 'store2'
+
+const session = Storage.namespace('vcat.search')
+
+export default session
diff --git a/scraper/client/store.js b/scraper/client/store.js
new file mode 100644
index 00000000..043af351
--- /dev/null
+++ b/scraper/client/store.js
@@ -0,0 +1,38 @@
+import { applyMiddleware, compose, combineReducers, createStore } from 'redux'
+import { connectRouter, routerMiddleware } from 'connected-react-router'
+import { createBrowserHistory } from 'history'
+import thunk from 'redux-thunk'
+import { login } from './util'
+
+import metadataReducer from './metadata/metadata.reducer'
+import searchReducer from './search/search.reducer'
+import reviewReducer from './review/review.reducer'
+
+const rootReducer = combineReducers({
+ auth: (state = login()) => state,
+ metadata: metadataReducer,
+ search: searchReducer,
+ review: reviewReducer,
+})
+
+function configureStore(initialState = {}, history) {
+ const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose
+
+ const store = createStore(
+ connectRouter(history)(rootReducer), // new root reducer with router state
+ initialState,
+ composeEnhancers(
+ applyMiddleware(
+ thunk,
+ routerMiddleware(history)
+ ),
+ ),
+ )
+
+ return store
+}
+
+const history = createBrowserHistory()
+const store = configureStore({}, history)
+
+export { store, history }
diff --git a/scraper/client/types.js b/scraper/client/types.js
new file mode 100644
index 00000000..e3c64691
--- /dev/null
+++ b/scraper/client/types.js
@@ -0,0 +1,21 @@
+export const asType = (type, name) => [type, name].join('_').toUpperCase()
+export const tagAsType = (type, names) => (
+ names.reduce((tags, name) => {
+ tags[name] = asType(type, name)
+ return tags
+ }, {})
+)
+
+export const metadata = tagAsType('metadata', [
+ 'loading', 'loaded', 'loaded_many', 'error', 'set_hash'
+])
+
+export const search = tagAsType('search', [
+ 'loading', 'loaded', 'error', 'panic', 'update_options',
+])
+
+export const review = tagAsType('review', [
+ 'loading', 'loaded', 'error', 'save', 'unsave', 'refresh', 'clear', 'dedupe', 'create', 'set_count'
+])
+
+export const init = '@@INIT'
diff --git a/scraper/client/util.js b/scraper/client/util.js
new file mode 100644
index 00000000..ad303c64
--- /dev/null
+++ b/scraper/client/util.js
@@ -0,0 +1,167 @@
+/* Mobile check */
+
+export const isiPhone = !!((navigator.userAgent.match(/iPhone/i)) || (navigator.userAgent.match(/iPod/i)))
+export const isiPad = !!(navigator.userAgent.match(/iPad/i))
+export const isAndroid = !!(navigator.userAgent.match(/Android/i))
+export const isMobile = isiPhone || isiPad || isAndroid
+export const isDesktop = !isMobile
+
+const htmlClassList = document.body.parentNode.classList
+htmlClassList.add(isDesktop ? 'desktop' : 'mobile')
+
+/* Default image dimensions */
+
+export const widths = {
+ th: 160,
+ sm: 320,
+ md: 640,
+ lg: 1280,
+}
+
+/* Formatting functions */
+
+const acronyms = 'id url cc sa fp md5 sha256'.split(' ').map(s => '_' + s)
+const acronymsUpperCase = acronyms.map(s => s.toUpperCase())
+
+export const formatName = s => {
+ acronyms.forEach((acronym, i) => s = s.replace(acronym, acronymsUpperCase[i]))
+ return s.replace(/_/g, ' ')
+}
+
+// Use to pad frame numbers with zeroes
+export const pad = (n, m) => {
+ let s = String(n || 0)
+ while (s.length < m) {
+ s = '0' + s
+ }
+ return s
+}
+
+// Verified is 0/1 when retrieved from SQL, but 'verified' or 'unverified' when retrieved elsewhere
+export const isVerified = verified => verified === 1 || verified === '1' || verified === 'verified'
+export const verify = verified => isVerified(verified) ? 'verified' : 'unverified'
+
+export const courtesyS = (n, s) => n + ' ' + (n === 1 ? s : s + 's')
+
+export const padSeconds = n => n < 10 ? '0' + n : n
+
+export const timestamp = (n = 0, fps = 25) => {
+ n /= fps
+ let s = padSeconds(Math.round(n) % 60)
+ n = Math.floor(n / 60)
+ if (n > 60) {
+ return Math.floor(n / 60) + ':' + padSeconds(n % 60) + ':' + s
+ }
+ return (n % 60) + ':' + s
+}
+
+export const percent = n => (n * 100).toFixed(1) + '%'
+
+export const px = (n, w) => Math.round(n * w) + 'px'
+
+export const clamp = (n, a, b) => n < a ? a : n < b ? n : b
+
+/* URLs */
+
+export const hashPath = sha256 => {
+ if (!sha256 || sha256.length < 9) {
+ throw new Error('Invalid sha256')
+ }
+ return [
+ sha256.slice(0, 3),
+ sha256.slice(3, 6),
+ sha256.slice(6, 9),
+ sha256,
+ ].join('/')
+}
+
+export const imageUrl = (verified, sha256, frame, size = 'th') => [
+ 'https://' + process.env.S3_HOST + '/v1/media/keyframes',
+ isVerified(verified) ? null : 'unverified',
+ hashPath(sha256),
+ pad(frame, 6),
+ size,
+ 'index.jpg'
+].filter(s => !!s).join('/')
+
+export const metadataUri = (sha256, tag) => '/metadata/' + sha256 + '/' + tag + '/'
+export const keyframeUri = (sha256, frame) => '/metadata/' + sha256 + '/keyframe/' + pad(frame, 6) + '/'
+
+export const preloadImage = opt => {
+ let { verified, hash, frame, url } = opt
+ if (hash && frame) {
+ url = imageUrl(verified, hash, frame, 'md')
+ }
+ const image = new Image()
+ let loaded = false
+ image.onload = () => {
+ if (loaded) return
+ loaded = true
+ image.onload = null
+ }
+ // console.log(img.src)
+ image.crossOrigin = 'anonymous'
+ image.src = url
+ if (image.complete) {
+ image.onload()
+ }
+}
+
+/* AJAX */
+
+let cachedAuth = null
+let token = ''
+let username = ''
+
+export const post = (uri, data, credentials) => {
+ login()
+ let headers
+ if (data instanceof FormData) {
+ headers = {
+ Accept: 'application/json, application/xml, text/play, text/html, *.*',
+ }
+ } else {
+ headers = {
+ Accept: 'application/json, application/xml, text/play, text/html, *.*',
+ 'Content-Type': 'application/json; charset=utf-8',
+ }
+ data = JSON.stringify(data)
+ }
+ let opt = {
+ method: 'POST',
+ body: data,
+ headers,
+ credentials: 'include',
+ }
+ if (credentials) {
+ headers.Authorization = 'Token ' + token
+ }
+ // console.log(headers)
+ // headers['X-CSRFToken'] = csrftoken
+ return fetch(uri, opt).then(res => res.json())
+}
+
+// api queries
+export const login = () => {
+ if (cachedAuth) return cachedAuth
+ const isLocal = (window.location.hostname === '0.0.0.0' || window.location.hostname === '127.0.0.1')
+ try {
+ const auth = JSON.parse(JSON.parse(localStorage.getItem('persist:root')).auth)
+ // console.log('auth', auth)
+ token = auth.token
+ username = auth.user.username
+ if (token) {
+ console.log('logged in', username)
+ }
+ cachedAuth = auth
+ if (!token && !isLocal) {
+ window.location.href = '/'
+ }
+ return auth
+ } catch (e) {
+ if (!isLocal) {
+ window.location.href = '/'
+ }
+ return {}
+ }
+}
diff --git a/content-script.crx b/scraper/content-script.crx
index f6619fc9..f6619fc9 100644
--- a/content-script.crx
+++ b/scraper/content-script.crx
Binary files differ
diff --git a/content-script.pem b/scraper/content-script.pem
index d6575905..d6575905 100644
--- a/content-script.pem
+++ b/scraper/content-script.pem
diff --git a/content-script/.gitignore b/scraper/content-script/.gitignore
index 5ca0973f..5ca0973f 100644
--- a/content-script/.gitignore
+++ b/scraper/content-script/.gitignore
diff --git a/content-script/alone-off.png b/scraper/content-script/alone-off.png
index 677b2273..677b2273 100644
--- a/content-script/alone-off.png
+++ b/scraper/content-script/alone-off.png
Binary files differ
diff --git a/content-script/alone-on.png b/scraper/content-script/alone-on.png
index 7c0f441e..7c0f441e 100644
--- a/content-script/alone-on.png
+++ b/scraper/content-script/alone-on.png
Binary files differ
diff --git a/content-script/background.js b/scraper/content-script/background.js
index 78244b12..78244b12 100644
--- a/content-script/background.js
+++ b/scraper/content-script/background.js
diff --git a/content-script/check.js b/scraper/content-script/check.js
index 0644084d..0644084d 100644
--- a/content-script/check.js
+++ b/scraper/content-script/check.js
diff --git a/content-script/icon-128.png b/scraper/content-script/icon-128.png
index 75ccfc80..75ccfc80 100644
--- a/content-script/icon-128.png
+++ b/scraper/content-script/icon-128.png
Binary files differ
diff --git a/content-script/icon-16.png b/scraper/content-script/icon-16.png
index b2bfd098..b2bfd098 100644
--- a/content-script/icon-16.png
+++ b/scraper/content-script/icon-16.png
Binary files differ
diff --git a/content-script/icon-48.png b/scraper/content-script/icon-48.png
index b4b752a7..b4b752a7 100644
--- a/content-script/icon-48.png
+++ b/scraper/content-script/icon-48.png
Binary files differ
diff --git a/content-script/index.html b/scraper/content-script/index.html
index 577ab5b0..577ab5b0 100644
--- a/content-script/index.html
+++ b/scraper/content-script/index.html
diff --git a/content-script/manifest.json b/scraper/content-script/manifest.json
index dc7e9c9b..dc7e9c9b 100644
--- a/content-script/manifest.json
+++ b/scraper/content-script/manifest.json
diff --git a/content-script/options.html b/scraper/content-script/options.html
index 8b5ba90c..8b5ba90c 100644
--- a/content-script/options.html
+++ b/scraper/content-script/options.html
diff --git a/content-script/options.js b/scraper/content-script/options.js
index ef9396fa..ef9396fa 100644
--- a/content-script/options.js
+++ b/scraper/content-script/options.js
diff --git a/datasets/citations-2018310.csv b/scraper/datasets/citations-20181031.csv
index 68a3ae3e..68a3ae3e 100644
--- a/datasets/citations-2018310.csv
+++ b/scraper/datasets/citations-20181031.csv
diff --git a/scraper/datasets/citations-20181207.csv b/scraper/datasets/citations-20181207.csv
new file mode 100644
index 00000000..48a9ce2f
--- /dev/null
+++ b/scraper/datasets/citations-20181207.csv
@@ -0,0 +1,440 @@
+key,name,title,,,,Comments,,,,publication,day,month,year,pages,vol,author1,author2,author3,author4,author5,author6,funding1,funding2,funding3,funding4,priority,notes,pdf_filename,url,bibtex_copy
+10k_US_adult_faces,10K US Adult Faces,The intrinsic memorability of face images,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+3d_rma,3D-RMA,Automatic 3D Face Authentication,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+3dddb_unconstrained,3D Dynamic,A 3D Dynamic Database for Unconstrained Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+3dpes,3DPeS,3DPes: 3D People Dataset for Surveillance and Forensics,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+4dfab,4DFAB,4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+50_people_one_question,50 People One Question,Merging Pose Estimates Across Space and Time,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+a_pascal_yahoo,aPascal,Describing Objects by their Attributes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+aberdeen ,Aberdeen,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+adience,Adience,Age and Gender Estimation of Unfiltered Faces,,,,,,,,"Transactions on Information Forensics and Security (IEEE-TIFS), special issue on Facial Biometrics in the Wild",,,2014,2170 - 2179,9,Eran Eidinger,Roee Enbar, Tal Hassner,,,,,,,,,,,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf,
+afad,AFAD,Ordinal Regression with a Multiple Output CNN for Age Estimation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+afew_va,AFEW-VA,"AFEW-VA database for valence and arousal estimation in-the-wild
+",,,,"both paper refer to database. ""Collecting..."" describes how the database was created but the statistics we use are in ""afew-va..."". ",,,,IEEE MultiMedia,,,2012,pp. 34-41,"vol. 19, no. 3",,,,,,,,,,,,,"afew-va.pdf
+Dhall_Goecke_Lucey_Gedeon_M_2012.pdf",,
+afew_va,AFEW-VA,"Collecting Large, Richly Annotated Facial-Expression Databases from Movies",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+affectnet,AffectNet,"AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+aflw,AFLW,"Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization",,,,,,,,,,,,,,Martin Koestinger,Paul Wohlhart,Peter M. Roth,Horst Bischof,,,,,,,,,koestinger_befit_11.pdf,https://files.icg.tugraz.at/seafhttp/files/d18813db-78c3-46a9-8614-bc0c8d428114/koestinger_befit_11.pdf,"@INPROCEEDINGS{koestinger11a,
+ author = {Martin Koestinger, Paul Wohlhart, Peter M. Roth and Horst Bischof},
+ title = {{Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization}},
+ booktitle = {{Proc. First IEEE International Workshop on Benchmarking Facial Image Analysis Technologies}},
+ year = {2011}
+} "
+afw,AFW,"Face detection, pose estimation and landmark localization in the wild",,,,,,,,"Computer Vision and Pattern Recognition (CVPR) Providence, Rhode Island,",,,2012,,,X. Zhu,D. Ramanan,,,,,,,,,,,,http://www.ics.uci.edu/~xzhu/paper/face-cvpr12.pdf,
+agedb,AgeDB,"AgeDB: the first manually collected, in-the-wild age database",,,,,,,,Proceedings of IEEE Int’l Conf. on Computer Vision and Pattern Recognition (CVPR-W 2017,,,2017,,,S. Moschoglou,A. Papaioannou,C. Sagonas,J. Deng,I. Kotsia, S. Zafeiriou,,,,,,,agedb.pdf,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"@inproceedings{AgeDB,
+ author = {S. Moschoglou and A. Papaioannou and C. Sagonas and J. Deng and I. Kotsia and S. Zafeiriou},
+ address = {Honolulu, Hawaii},
+ booktitle = {Proceedings of IEEE Int’l Conf. on Computer Vision and Pattern Recognition (CVPR-W 2017)},
+ month = {June},
+ title = {AgeDB: the first manually collected, in-the-wild age database},
+ year = {2017},
+}"
+alert_airport,ALERT Airport,"A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+am_fed,AM-FED,Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+apis,APiS1.0,Pedestrian Attribute Classification in Surveillance: Database and Evaluation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ar_facedb,AR Face,The AR Face Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+awe_ears,AWE Ears,Ear Recognition: More Than a Survey,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+b3d_ac,B3D(AC),A 3-D Audio-Visual Corpus of Affective Communication,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bbc_pose,BBC Pose,Automatic and Efficient Human Pose Estimation for Sign Language Videos ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+berkeley_pose,BPAD,Describing People: A Poselet-Based Approach to Attribute Classification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bfm,BFM,A 3D Face Model for Pose and Illumination Invariant Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bio_id,BioID Face,Robust Face Detection Using the Hausdorff Distance,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bjut_3d,BJUT-3D,The BJUT-3D Large-Scale Chinese Face Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bosphorus,The Bosphorus,Bosphorus Database for 3D Face Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bp4d_plus,BP4D+,Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bp4d_spontanous,BP4D-Spontanous,A high resolution spontaneous 3D dynamic facial expression database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+brainwash,Brainwash,Brainwash dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+bu_3dfe,BU-3DFE,A 3D Facial Expression Database For Facial Behavior Research,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+buhmap_db,BUHMAP-DB ,Facial Feature Tracking and Expression Recognition for Sign Language,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cafe,CAFE,The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+caltech_10k_web_faces,Caltech 10K Web Faces, Pruning Training Sets for Learning of Object Categories,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+caltech_faces,Caltech Faces,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+caltech_pedestrians,Caltech Pedestrians,Pedestrian Detection: A Benchmark,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+caltech_pedestrians,Caltech Pedestrians,Pedestrian Detection: An Evaluation of the State of the Art,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+camel,CAMEL,CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cas_peal,CAS-PEAL,The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+casablanca,Casablanca,Context-aware {CNNs} for person head detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+casia_webface,CASIA Webface,Learning Face Representation from Scratch,,,,,,,,arXiv preprint arXiv:1411.7923.,,,2014,,,Dong Yi,Zhen Lei, Shengcai Liao,Stan Z. Li,,,,,,,,,1411.7923.pdf,https://arxiv.org/abs/1411.7923,
+caviar4reid,CAVIAR4REID,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+celeba,CelebA,Deep Learning Face Attributes in the Wild,,,,,,,,"in IEEE International Conference on Computer Vision (ICCV),",,,2015,,,S. Yang,P. Luo,C. C. Loy,X. Tang,,,,,,,,,Liu_Deep_Learning_Face_ICCV_2015_paper.pdf,https://arxiv.org/abs/1509.06451,"@inproceedings{liu2015faceattributes, author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang}, title = {Deep Learning Face Attributes in the Wild}, booktitle = {Proceedings of International Conference on Computer Vision (ICCV)}, month = December, year = {2015} }"
+celeba_plus,CelebFaces+,"Deep Learning Face Representation from Predicting 10,000 Classes",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cfd,CFD,The Chicago face database: A free stimulus set of faces and norming data,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+chalearn,ChaLearn,ChaLearn Looking at People: A Review of Events and Resources,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+chokepoint,ChokePoint,Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cityscapes,Cityscapes,The Cityscapes Dataset for Semantic Urban Scene Understanding,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cityscapes,Cityscapes,The Cityscapes Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+clothing_co_parsing,CCP,Clothing Co-Parsing by Joint Image Segmentation and Labeling,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cmdp,CMDP,Distance Estimation of an Unknown Person from a Portrait,,,,,,,,ECCV 2014,,,2014,,,X. P. Burgos-Artizzu,M.R. Ronchi,P. Perona,,,,,,,,,,ECCV14_FaceDistancePortrait_PAPER.pdf,http://www.vision.caltech.edu/~mronchi/papers/ECCV14_FaceDistancePortrait_PAPER.pdf,"@incollection{perona2014PortraitDistanceEstimation,
+ title={Distance Estimation of an Unknown Person from a Portrait},
+ author={Xavier P. Burgos-Artizzu, Matteo Ruggero Ronchi and Pietro Perona},
+ booktitle={Computer Vision--ECCV 2014},
+ pages={313--327},
+ year={2014},
+ publisher={Springer}
+}"
+cmu_pie,CMU PIE,"The CMU Pose, Illumination, and Expression Database",,,,,,,,IEEE Transactions on Pattern Analysis and Machine Intelligence,,12,2003,"25, No. 12",,T. Sim,S. Baker,M. Bsat,,,,,,,,,,,http://www.cs.cmu.edu/~simonb/pie_db/pami.pdf,
+coco,COCO,Microsoft COCO: Common Objects in Context,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+coco_action,COCO-a,Describing Common Human Visual Actions in Images,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+coco_qa,COCO QA,Exploring Models and Data for Image Question Answering,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cofw,COFW,Robust face landmark estimation under occlusion,,,,Paper for RCPR method includes creation of COFW dataset,,,,"ICCV 2013, Sydney, Australia",,,2013,,,X. P. Burgos-Artizzu,P. Perona,P. Dollár,,,,,,,,,,ICCV13 Burgos-Artizzu.pdf,http://www.vision.caltech.edu/%7Expburgos/papers/ICCV13%20Burgos-Artizzu.pdf,
+cohn_kanade,CK,Comprehensive Database for Facial Expression Analysis,,,,,,,,"Proceedings of the Fourth IEEE International Conferenc
+e on Automatic Face and Gesture Recognition
+(FG'00)
+",,,2000,484-490,,"Kanade, T.","Cohn, J. F.","Tian, Y.",,,,,,,,,,download.pdf,http://www.pitt.edu/~jeffcohn/biblio/Cohn-Kanade_Database.pdf,
+cohn_kanade_plus,CK+,The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression,,,,,,,,"Proceedings of the Third International Workshop on CVPR for Human Communicative Behavior Analysis (CVPR4HB 2010), San Francisco, USA",,,2010,94-101,,"Ambadar, Z.","Cohn, J.F.","Kanade, T.","Lucey, P.","Matthews, I.A.","Saragih, J.M.",,,,,,,paper.pdf,https://ieeexplore.ieee.org/document/5543262,"@article{Lucey2010TheEC,
+ title={The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression},
+ author={Patrick Lucey and Jeffrey F. Cohn and Takeo Kanade and Jason M. Saragih and Zara Ambadar and Iain A. Matthews},
+ journal={2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops},
+ year={2010},
+ pages={94-101}
+}"
+columbia_gaze,Columbia Gaze,Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction,,,,,,,,ACM Symposium on User Interface Software and Technology (UIST),,,2013,271-280,,B.A. Smith,Q. Yin,S.K. Feiner,S.K. Nayar,,,,,,,,,p271-smith.pdf,http://www.cs.columbia.edu/~brian/publications/gaze_locking.html,
+complex_activities,Ongoing Complex Activities,Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cuhk01,CUHK01,Human Reidentification with Transferred Metric Learning,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cuhk02,CUHK02,Locally Aligned Feature Transforms across Views,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cuhk03,CUHK03,DeepReID: Deep Filter Pairing Neural Network for Person Re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+cvc_01_barcelona,CVC-01,Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+czech_news_agency,UFI,Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+d3dfacs,D3DFACS,A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+dartmouth_children,Dartmouth Children,The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+data_61,Data61 Pedestrian,A Multi-Modal Graphical Model for Scene Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+deep_fashion,DeepFashion,DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+deep_fashion,DeepFashion,Fashion Landmark Detection in the Wild,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+disfa,DISFA,DISFA: A Spontaneous Facial Action Intensity Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+distance_nighttime,Long Distance Heterogeneous Face,Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+duke_mtmc,Duke MTMC,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+emotio_net,EmotioNet Database,"EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+eth_andreas_ess,ETHZ Pedestrian,Depth and Appearance for Mobile Scene Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+europersons,EuroCity Persons,The EuroCity Persons Dataset: A Novel Benchmark for Object Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+expw,ExpW,Learning Social Relation Traits from Face Images,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+expw,ExpW,From Facial Expression Recognition to Interpersonal Relation Prediction,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+face_research_lab,Face Research Lab London,Face Research Lab London Set. figshare,,,,OK No paper (not even on internet?),,,,,,,2017,,,"DeBruine, Lisa","Jones, Benedict",,,,,,,,,,,,https://doi.org/10.6084/m9.figshare.5047666.v3,
+face_scrub,FaceScrub,A data-driven approach to cleaning large face datasets,,,,,,,,Proc. IEEE International Conference on Image Processing (ICIP),,,2014,,,H.-W. Ng,S. Winkler,,,,,,,,,,,icip2014a.pdf,http://vintage.winklerbros.net/Publications/icip2014a.pdf,
+face_tracer,FaceTracer,FaceTracer: A Search Engine for Large Collections of Images with Faces,,,,,,,,European Conference on Computer Vision (ECCV),,,2008,340-353,,N. Kumar,P. N. Belhumeur,S. K. Nayar,,,,,,,,1,,Kumar_ECCV08.pdf,http://www1.cs.columbia.edu/CAVE/publications/pdfs/Kumar_ECCV08.pdf,
+face_tracer,FaceTracer,Face Swapping: Automatically Replacing Faces in Photographs,,,,,,,,ACM Trans. on Graphics (also Proc. of ACM SIGGRAPH),,,2008,,,D. Bitouk,N. Kumar,S. Dhillon,P.N. Belhumeur,S. K. Nayar,,,,,,2,,Bitouk_SIGGRAPH08.pdf,http://www1.cs.columbia.edu/CAVE/publications/pdfs/Bitouk_SIGGRAPH08.pdf,
+facebook,SFC,,,,,"OK no paper, private",,,,,,,,,,,,,,,,,,,,,,,,
+facebook_100,Facebook100,Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+faceplace,Face Place,Recognizing disguised faces,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+faces94,Faces94,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+faces95,Faces95,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+faces96,Faces96,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+families_in_the_wild,FIW,Visual Kinship Recognition of Families in the Wild,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+fddb,FDDB,FDDB: A Benchmark for Face Detection in Unconstrained Settings,,,,,,,,"Technical Report UM-CS-2010-009, Dept. of Computer Science, University of Massachusetts",,,2010,,,Vidit Jain,Erik Learned-Mille,,,,,,,,,,,fddb.pdf,http://vis-www.cs.umass.edu/fddb/fddb.pdf,"@TechReport{fddbTech,
+ author = {Vidit Jain and Erik Learned-Miller},
+ title = {FDDB: A Benchmark for Face Detection in Unconstrained Settings},
+ institution = {University of Massachusetts, Amherst},
+ year = {2010},
+ number = {UM-CS-2010-009}
+ }"
+fei,FEI,Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro,,,,"in portugese, but original paper",,,,,,,,,,,,,,,,,,,,,,,,
+feret,FERET,The FERET Verification Testing Protocol for Face Recognition Algorithms,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+feret,FERET,The FERET database and evaluation procedure for face-recognition algorithms,,,,paper not in nextcloud,,,,,,,,,,,,,,,,,,,,,,,,
+feret,FERET,FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+feret,FERET,The FERET Evaluation Methodology for Face-Recognition Algorithms,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ferplus,FER+,Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+fia,CMU FiA,The CMU Face In Action (FIA) Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+fiw_300,300-W,300 faces In-the-wild challenge: Database and results,,,,,,,,"Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation ""In-The-Wild""",,,2016,,,C. Sagonas,E. Antonakos,"G, Tzimiropoulos",S. Zafeiriou,M. Pantic,,,,,,1,,,,
+fiw_300,300-W,300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge,,,,,,,,"Proceedings of IEEE Int’l Conf. on Computer Vision (ICCV-W), 300 Faces in-the-Wild Challenge (300-W). Sydney, Australia",,,2013,,,C. Sagonas,G. Tzimiropoulos,S. Zafeiriou,M. Pantic,,,,,,,2,,,,
+fiw_300,300-W,A semi-automatic methodology for facial landmark annotation,,,,,,,,"Proceedings of IEEE Int’l Conf. Computer Vision and Pattern Recognition (CVPR-W), 5th Workshop on Analysis and Modeling of Faces and Gestures (AMFG 2013). Oregon, USA,",,,2013,,,C. Sagonas,G. Tzimiropoulos,S. Zafeiriou,M. Pantic,,,,,,,3,,,,
+florida_inmates,Florida Inmate,,,,,"OK no paper, not official database",,,,,,,,,,,,,,,,,,,,,,,,
+frav2d,FRAV2D,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+frav3d,FRAV3D,"MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+frgc,FRGC,Overview of the Face Recognition Grand Challenge,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+gallagher,Gallagher,Clothing Cosegmentation for Recognizing People,,,,,,,,IEEE Conference on Computer Vision and Pattern Recognition,,,2008,,,Andrew Gallagher,Tsuhan Chen,,,,,,,,,,,141.pdf,,
+gavab_db,Gavab,GavabDB: a 3D face database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+geofaces,GeoFaces,GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+georgia_tech_face_database,Georgia Tech Face,Maximum likelihood training of the embedded HMM for face detection and recognition,,,,"I think this is the correct paper – database was colected 1999, this is 2000",,,,,,,,,,,,,,,,,,,,,,,,
+gmu,Google Makeup,Parallel Optimized Pearson Correlation Condition (PO-PCC) for Robust Cosmetic Makeup Facial Recognition,,,,watermarked online publication,,,,,,,,,,,,,,,,,,,,,,,,
+google,Google (private),,,,,"OK no paper, private",,,,,,,,,,,,,,,,,,,,,,,,
+graz,Graz Pedestrian,Generic Object Recognition with Boosting,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+graz,Graz Pedestrian,Weak Hypotheses and Boosting for Generic Object Detection and Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+graz,Graz Pedestrian,Object Recognition Using Segmentation for Feature Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+grimace,GRIMACE,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+h3d,H3D,Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+hda_plus,HDA+,The HDA+ data set for research on fully automated re-identification systems,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+hda_plus,HDA+,A Multi-camera video data set for research on High-Definition surveillance,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+helen,Helen,Interactive Facial Feature Localization,,,,,,,,ECCV,,,2012,,,Vuong Le,Jonathan Brandt,Zhe Lin,Lubomir Boudev,Thomas S. Huang,,,,,,,,eccv2012_helen_final.pdf,http://www.ifp.illinois.edu/~vuongle2/helen/eccv2012_helen_final.pdf,
+hi4d_adsip,Hi4D-ADSIP,Hi4D-ADSIP 3-D dynamic facial articulation database,,,,paper?,,,,,,,,,,,,,,,,,,,,,,,,
+hid_equinox_infrared,HID,,,,,no paper,,,,,,,,,,,,,,,,,,,,,,,,
+hipsterwars,Hipsterwars,Hipster Wars: Discovering Elements of Fashion Styles,,,,,,,,In European Conference on Computer Vision,,,2014,,,M. Hadi Kiapour,Kota Yamaguchi,Alexander C. Berg,Tamara L. Berg,,,,,,,,,hipster_eccv14.pdf,http://tamaraberg.com/papers/hipster_eccv14.pdf,"@inproceedings{
+ HipsterWarsECCV14,
+ title = {Hipster Wars: Discovering Elements of Fashion Styles}
+ author = {M. Hadi Kiapour, Kota Yamaguchi, Alexander C. Berg, Tamara L. Berg},
+ booktitle={European Conference on Computer Vision},
+ year = {2014}
+ }"
+hollywood_headset,HollywoodHeads,Context-aware CNNs for person head detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+hrt_transgender,HRT Transgender,Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset,,,,,,,,"In Proc. of IEEE Intl. Conf. on Biometrics: Theory, Applications, and Systems",,,2013,,,Gayathri Mahalingam,Karl Ricanek Jr.,,,,,,,,,,,,https://pdfs.semanticscholar.org/b066/733d533250f4ddafd22c12456def7fa24f4c.pdf,
+hrt_transgender,HRT Transgender,Investigating the Periocular-Based Face Recognition Across Gender Transformation,,,,,,,,IEEE Trans. On Information Forensics and Security,,,2014,pp. 2180 – 2192,"vol. 9, no. 12",Gayathri Mahalingam,Karl Ricanek Jr.,Midori M. Albert,,,,,,,,,,,https://ieeexplore.ieee.org/document/6915725,
+hrt_transgender,HRT Transgender,Face recognition across gender transformation using SVM Classifier,,,,"Paper used for statistics, not mentionned in citations",,,,,,,,,,,,,,,,,,,,,,Face_Recognition_Across_Gender_Transformation_Usin.pdf,,
+ifad,IFAD,Indian Face Age Database: A Database for Face Recognition with Age Variation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ifdb,IFDB,"Iranian Face Database with age, pose and expression",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ifdb,IFDB,Iranian Face Database and Evaluation with a New Detection Algorithm,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+iit_dehli_ear,IIT Dehli Ear,Automated human identification using ear imaging,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ijb_a,IJB-A,Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A ,,,,,,,,Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition,,,2015,1931-1939,07-12-June-2015,"Klare, B. F.","Klein, B.","Taborsky, E.","Blanton, A.","Cheney, J.","Allen, K., ... Jain, A. K.",,,,,,,Klare_Pushing_the_Frontiers_2015_CVPR_paper.pdf,http://ieeexplore.ieee.org/document/7298803/,"DOI: 10.1109/CVPR.2015.7298803 @inbook{882e95bdca414797b4a8e2bfcb5b1fa4,
+title = ""Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A"",
+abstract = ""Rapid progress in unconstrained face recognition has resulted in a saturation in recognition accuracy for current benchmark datasets. While important for early progress, a chief limitation in most benchmark datasets is the use of a commodity face detector to select face imagery. The implication of this strategy is restricted variations in face pose and other confounding factors. This paper introduces the IARPA Janus Benchmark A (IJB-A), a publicly available media in the wild dataset containing 500 subjects with manually localized face images. Key features of the IJB-A dataset are: (i) full pose variation, (ii) joint use for face recognition and face detection benchmarking, (iii) a mix of images and videos, (iv) wider geographic variation of subjects, (v) protocols supporting both open-set identification (1:N search) and verification (1:1 comparison), (vi) an optional protocol that allows modeling of gallery subjects, and (vii) ground truth eye and nose locations. The dataset has been developed using 1,501,267 million crowd sourced annotations. Baseline accuracies for both face detection and face recognition from commercial and open source algorithms demonstrate the challenge offered by this new unconstrained benchmark."",
+author = ""Klare, {Brendan F.} and Ben Klein and Emma Taborsky and Austin Blanton and Jordan Cheney and Kristen Allen and Patrick Grother and Alan Mah and Mark Burge and Jain, {Anil K.}"",
+year = ""2015"",
+month = ""10"",
+doi = ""10.1109/CVPR.2015.7298803"",
+isbn = ""9781467369640"",
+volume = ""07-12-June-2015"",
+pages = ""1931--1939"",
+booktitle = ""Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition"",
+publisher = ""IEEE Computer Society"",
+
+}
+"
+ijb_b,IJB-B,IARPA Janus Benchmark-B Face Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ijb_c,IJB-C,IARPA Janus Benchmark C,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ilids_mcts,,"Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ilids_vid_reid,iLIDS-VID,Person Re-Identi cation by Video Ranking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+images_of_groups,Images of Groups,Understanding Groups of Images of People,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+imdb_wiki,IMDB,Deep expectation of real and apparent age from a single image without facial landmarks,,,,,,,,International Journal of Computer Vision (IJCV),,6,2016,,,Rasmus Rothe,Radu Timofte,Luc Van Gool,,,,,,,,1,,eth_biwi_01299.pdf,,"@article{Rothe-IJCV-2016,
+ author = {Rasmus Rothe and Radu Timofte and Luc Van Gool},
+ title = {Deep expectation of real and apparent age from a single image without facial landmarks},
+ journal = {International Journal of Computer Vision (IJCV)},
+ year = {2016},
+ month = {July},
+}"
+imdb_wiki,IMDB,DEX: Deep EXpectation of apparent age from a single image,,,,,,,,IEEE International Conference on Computer Vision Workshops (ICCVW),,12,2015,,,Rasmus Rothe,Radu Timofte,Luc Van Gool,,,,,,,,2,,eth_biwi_01229.pdf,,"@InProceedings{Rothe-ICCVW-2015,
+ author = {Rasmus Rothe and Radu Timofte and Luc Van Gool},
+ title = {DEX: Deep EXpectation of apparent age from a single image},
+ booktitle = {IEEE International Conference on Computer Vision Workshops (ICCVW)},
+ year = {2015},
+ month = {December},
+}"
+imfdb,IMFDB,Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations,,,,,,,,"National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)",,,2013,,,Shankar Setty,et al,,,,,,,,,,,imfdb.pdf,http://cvit.iiit.ac.in/projects/IMFDB/imfdb.pdf,"@InProceedings{imfdb,
+author = {Shankar Setty, Moula Husain, Parisa Beham, Jyothi Gudavalli, Menaka Kandasamy, Radhesyam Vaddi, Vidyagouri Hemadri, J C Karure, Raja Raju, Rajan, Vijay Kumar and C V Jawahar},
+title = {{I}ndian {M}ovie {F}ace {D}atabase: {A} {B}enchmark for {F}ace {R}ecognition {U}nder {W}ide {V}ariations},
+booktitle = {National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)},
+month = {Dec},
+year = {2013}
+} "
+imm_face,IMM Face Dataset,The IMM Face Database - An Annotated Dataset of 240 Face Images,,,,,,,,"Informatics and Mathematical Modelling, Technical University of Denmark, DTU",,5,2004,,,Michael M. Nordstrøm,Mads Larsen,Janusz Sierakowski,Mikkel B. Stegmann,,,,,,,,,imm3160.pdf,,
+immediacy,Immediacy,Multi-task Recurrent Neural Network for Immediacy Prediction,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+imsitu,imSitu,Situation Recognition: Visual Semantic Role Labeling for Image Understanding,,,,,,,,"(1) Computer Science & Engineering, University of Washington, Seattle, WA
+(2) Allen Institute for Artificial Intelligence (AI2), Seattle, WA",,,,,,Mark Yatskar,Luke Zettlemoyer,Ali Farhadi,,,,,,,,,,situations.pdf,https://homes.cs.washington.edu/~my89/publications/situations.pdf,
+inria_person,INRIA Pedestrian,Histograms of Oriented Gradients for Human Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+iqiyi,iQIYI-VID dataset ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+jaffe,JAFFE,Coding Facial Expressions with Gabor Wavelets,,,,,,,,3rd IEEE International Conference on Automatic Face and Gesture Recognition,,,1998,200-205,,Michael J. Lyons,Shigeru Akemastu,Miyuki Kamachi,Jiro Gyoba,,,,,,,,,fg98-1.pdf,http://www.kasrl.org/fg98-1.pdf,
+jiku_mobile,Jiku Mobile Video Dataset,The Jiku Mobile Video Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+jpl_pose,JPL-Interaction dataset,First-Person Activity Recognition: What Are They Doing to Me?,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+karpathy_instagram,Karpathy Instagram,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+kdef,KDEF,The Karolinska Directed Emotional Faces – KDEF,,,,"this is the original paper form 1998 with this title, couldn't find it though, so not in nextcloud folder",,,,,,,,,,,,,,,,,,,,,,,,
+kin_face,UB KinFace,Genealogical Face Recognition based on UB KinFace Database,,,,"this is the original paper title, couldn't find it though, so not in nextcloud folder",,,,,,,,,,,,,,,,,,,,,,,,
+kin_face,UB KinFace,Kinship Verification through Transfer Learning,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+kin_face,UB KinFace,Understanding Kin Relationships in a Photo,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+kinectface,KinectFaceDB,KinectFaceDB: A Kinect Database for Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+kitti,KITTI,Vision meets Robotics: The KITTI Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+lag,LAG,Large Age-Gap Face Verification by Feature Injection in Deep Networks,,,,,,,,Pattern Recognition Letters,,,2017,36-42,90,Simone Bianco,,,,,,,,,,,,bianco2017large-age.pdf,http://www.ivl.disco.unimib.it/activities/large-age-gap-face-verification/,"@article{bianco2017large-age, author = {Bianco, Simone}, year = {2017}, pages = {36-42}, title = {Large Age-Gap Face Verification by Feature Injection in Deep Networks}, volume = {90}, journal = {Pattern Recognition Letters}, doi = {10.1016/j.patrec.2017.03.006}}"
+large_scale_person_search,Large Scale Person Search,End-to-End Deep Learning for Person Search,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+leeds_sports_pose,Leeds Sports Pose,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+leeds_sports_pose_extended,Leeds Sports Pose Extended,Learning Effective Human Pose Estimation from Inaccurate Annotation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+lfw,LFW,Labeled Faces in the Wild: A Survey,,,,,,,, ,,,,,,,,,,,,,,,,,,,,
+lfw,LFW,Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments,,,,,,,,"University of Massachusetts, Amherst, Technical Report ",,,2007,07-49,,Gary B. Huang,Manu Ramesh,Tamara Berg,Erik Learned-Miller,,,,,,,,various citaton depending on various datasets provided. Citation used here was first one published in 2007,lfw.pdf,http://vis-www.cs.umass.edu/lfw/lfw.pdf,
+lfw,LFW,Labeled Faces in the Wild: Updates and New Reporting Procedures,,,,,,,, ,,,,,,,,,,,,,,,,,,,,
+lfw_a,LFW-a,"Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics",,,,,,,,"IEEE Trans. on Pattern Analysis and Machine Intelligence (TPAMI), 33(10),",,,2011,,,Lior Wolf,Tal Hassner,Yaniv Taigman,,,,,,,,,,jpatchlbp.pdf,http://www.openu.ac.il/home/hassner/projects/Patchlbp/WolfHassnerTaigman_TPAMI11.pdf,Comply with any instructions specified for the original LFW data set
+lfw_p,LFWP,Localizing Parts of Faces Using a Consensus of Exemplars,,,,,,,,Proceedings of the 24th IEEE Conference on Computer Vision and Pattern Recognition (CVPR),,,2011,,,Peter N. Belhumeur,"David W. Jacobs,",David J. Kriegman,Neeraj Kumar,,,,,,,,,nk_cvpr2011_faceparts.pdf,http://neerajkumar.org/projects/face-parts/base/papers/nk_cvpr2011_faceparts.pdf,
+m2vts,m2vts,The M2VTS Multimodal Face Database (Release 1.00),,,,,,,,,,,,,,,,,,,,,,,,,,,,
+m2vtsdb_extended,xm2vtsdb,XM2VTSDB: The Extended M2VTS Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mafl,MAFL,Facial Landmark Detection by Deep Multi-task Learning,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mafl,MAFL,Learning Deep Representation for Face Alignment with Auxiliary Attributes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+malf,MALF,Fine-grained Evaluation on Face Detection in the Wild.,,,,,,,,Proceedings of the 11th IEEE International Conference on Automatic Face and Gesture Recognition Conference and Workshops.,,,2015,,,Bin Yang*,Junjie Yan*,Zhen Lei,Stan Z. Li,,,,,,,,,faceevaluation15.pdf,http://www.cbsr.ia.ac.cn/faceevaluation/faceevaluation15.pdf,"@inproceedings{faceevaluation15,
+title={Fine-grained Evaluation on Face Detection in the Wild},
+author={Yang, Bin and Yan, Junjie and Lei, Zhen and Li, Stan Z},
+booktitle={Automatic Face and Gesture Recognition (FG), 11th IEEE International
+Conference on},
+year={2015},
+organization={IEEE}
+}"
+mapillary,Mapillary,The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+market_1501,Market 1501,Scalable Person Re-identification: A Benchmark,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+market1203,Market 1203,Orientation Driven Bag of Appearances for Person Re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mars,MARS,MARS: A Video Benchmark for Large-Scale Person Re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mcgill,McGill Real World,Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mcgill,McGill Real World,Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+meds,Multiple Encounter Dataset,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+megaage,MegaAge,Quantifying Facial Age by Posterior of Age Comparisons,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+megaface,MegaFace,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale ,,,,The 2 papers refer to respectively: MF and MF2 ,,,,IEEE Conference on Computer Vision and Pattern Recognition (CVPR),,,2017,,,"Nech, Aaron","Kemelmacher-Shlizerman, Ira",,,,,,,,,If you're participating or using data from Challenge 2 please cite:,,1705.00393.pdf,https://homes.cs.washington.edu/~kemelmi/ms.pdf,"@inproceedings{nech2017level,
+title={Level Playing Field For Million Scale Face Recognition},
+author={Nech, Aaron and Kemelmacher-Shlizerman, Ira},
+booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
+year={2017}
+}"
+megaface,MegaFace,Level Playing Field for Million Scale Face Recognition ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mifs,MIFS,Spoofing Faces Using Makeup: An Investigative Study,,,,,,,,"Proc. of 3rd IEEE International Conference on Identity, Security and Behavior Analysis (ISBA), (New Delhi, India)",,,2017,,,C. Chen,A. Dantcheva,T. Swearingen,A. Ross,,,,,,,,,,http://www.cse.msu.edu/~rossarun/pubs/ChenFaceMakeupSpoof_ISBA2017.pdf,
+mikki,MIKKI dataset,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+mit_cbcl,MIT CBCL,Component-based Face Recognition with 3D Morphable Models,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mit_cbcl_ped,CBCL,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+mit_cbclss,CBCLSS,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+miw,MIW,Automatic Facial Makeup Detection with Application in Face Recognition,,,,,,,,"Proc. of 6th IAPR International Conference on Biometrics (ICB), (Madrid, Spain)",,,2013,,,C. Chen,A. Dantcheva,A. Ross,,,,,,,,,,,https://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf,
+mmi_facial_expression,MMI Facial Expression Dataset,WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+moments_in_time,Moments in Time,Moments in Time Dataset: one million videos for event understanding,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+morph,MORPH Commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,,,,same pdf as morph non commercial,,,,"IEEE 7th International Conference on Automatic Face and Gesture Recognition, Southampton, UK",,,2006,341-345,,Karl Ricanek Jr,Tamirat Tesafaye,,,,,,,,,,,,,
+morph_nc,MORPH Non-Commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,,,,same pdf as morph commercial,,,,"IEEE 7th International Conference on Automatic Face and Gesture Recognition, Southampton, UK",,,2006,341-345,,Karl Ricanek Jr,Tamirat Tesafaye,,,,,,,,,,,,,
+mot,MOT,Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,,,,these 3 citations are from the MOT17,,,,,,,,,,,,,,,,,,,,,,,,
+mot,MOT,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mot,MOT,Learning to associate: HybridBoosted multi-target tracker for crowded scene,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mpi_large,Large MPI Facial Expression,The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mpi_small,Small MPI Facial Expression,The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mpii_gaze,MPIIGaze,Appearance-based Gaze Estimation in the Wild,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mpii_human_pose,MPII Human Pose,2D Human Pose Estimation: New Benchmark and State of the Art Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mr2,MR2,The MR2: A multi-racial mega-resolution database of facial stimuli,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mrp_drone,MRP Drone,Investigating Open-World Person Re-identification Using a Drone,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+msceleb,MsCeleb,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,,,,,,,,European Conference on Computer Vision,,,2016,,,"Guo, Yandong","Zhang, Lei","Hu, Yuxiao","He, Xiaodong","Gao, Jianfeng",,,,,,,,,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/08/MSCeleb-1M-a.pdf,"@INPROCEEDINGS { guo2016msceleb,
+ author = {Guo, Yandong and Zhang, Lei and Hu, Yuxiao and He, Xiaodong and Gao, Jianfeng},
+ title = {M{S}-{C}eleb-1{M}: A Dataset and Benchmark for Large Scale Face Recognition},
+ booktitle = {European Conference on Computer Vision},
+ year = {2016},
+ organization={Springer}}"
+msmt_17,MSMT17,Person Transfer GAN to Bridge Domain Gap for Person Re-Identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+mtfl,MTFL,Facial Landmark Detection by Deep Multi-task Learning,,,,same paper as in MAFL,,,,,,,,,,,,,,,,,,,,,,,,
+mtfl,MTFL,Learning Deep Representation for Face Alignment with Auxiliary Attributes,,,,same papers as in MAFL,,,,,,,,,,,,,,,,,,,,,,,,
+muct,MUCT,The MUCT Landmarked Face Database,,,,,,,,Pattern Recognition Association of South Africa,,,2010,,,,S. Milborrow,J. Morkel,F. Nicolls,,,,,,,,,,http://www.milbo.org/muct/The-MUCT-Landmarked-Face-Database.pdf,"@article{Milborrow10,
+ author={S. Milborrow and J. Morkel and F. Nicolls},
+ title={{The MUCT Landmarked Face Database}},
+ journal={Pattern Recognition Association of South Africa},
+ year=2010,
+ note={\url{http://www.milbo.org/muct}}
+}"
+mug_faces,MUG Faces,The MUG Facial Expression Database,,,,,,,,Procedings of 11th International Workshop on Image Analysis for Multimedia Interactive Services,12,4,2010,,,N. Aifanti,C. Papachristou,A. Delopoulos,,,,,,,,,,,,
+multi_pie,MULTIPIE,Multi-PIE,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+names_and_faces_news,News Dataset,Names and Faces,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+nd_2006,ND-2006,Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+nist_mid_mugshot,MID,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+nova_emotions,Novaemötions Dataset,Crowdsourcing facial expressions for affective-interaction,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+nova_emotions,Novaemötions Dataset,Competitive affective gamming: Winning with a smile,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+nudedetection,Nude Detection,A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+orl,ORL,Parameterisation of a Stochastic Model for Human Face Identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+penn_fudan,Penn Fudan,Object Detection Combining Recognition and Segmentation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+peta,PETA,Pedestrian Attribute Recognition At Far Distance,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pets,PETS 2017,PETS 2017: Dataset and Challenge,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pilot_parliament,PPB,Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pipa,PIPA,Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues,,,,,,,,arXiv:1501.05703 [cs.CV],,,2015,,,Ning Zhang, Manohar Paluri,Yaniv Taigman,Rob Fergus,Lubomir Bourdev,,,,,,,,,https://arxiv.org/pdf/1501.05703.pdf,"@inproceedings{piper,
+ Author = {Ning Zhang and Manohar Paluri and Yaniv Taigman and Rob Fergus and Lubomir Bourdev},
+ Title = {Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues},
+ Eprint = {arXiv:1501.05703},
+ Year = {2015}}"
+pku,PKU,Swiss-System Based Cascade Ranking for Gait-based Person Re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pku_reid,PKU-Reid,Orientation driven bag of appearances for person re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pornodb,Pornography DB,Pooling in Image Representation: the Visual Codeword Point of View,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+precarious,Precarious,Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+prid,PRID,Person Re-Identification by Descriptive and Discriminative Classification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+prw,PRW,Person Re-identification in the Wild,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+psu,PSU,Vision-based Analysis of Small Groups in Pedestrian Crowds,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+pubfig,PubFig,Attribute and Simile Classifiers for Face Verification,,,,,,,,International Conference on Computer Vision (ICCV),,,2009,,,Neeraj Kumar,Alexander C. Berg,Peter N. Belhumeur,Shree K. Nayar,,,,,,,,,,http://www.cs.columbia.edu/CAVE/publications/pdfs/Kumar_ICCV09.pdf,
+pubfig_83,pubfig83,Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+put_face,Put Face,The PUT face database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+qmul_grid,GRID,Multi-Camera Activity Correlation Analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+qmul_grid,GRID,Time-delayed correlation analysis for multi-camera activity understanding,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+qmul_ilids,QMUL-iLIDS,,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+qmul_surv_face,QMUL-SurvFace,Surveillance Face Recognition Challenge,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+rafd,RaFD,Presentation and validation of the Radboud Faces Database,,,,,,,,Cognition & Emotion,,,2010,1377-1388,24.8,"Langner, O.","Dotsch, R."," Bijlstra, G.","Wigboldus, D.H.J.","Hawk, S.T.","van Knippenberg, A.",,,,,,,,http://dx.doi.org/10.1080/02699930903485076,DOI: 10.1080/02699930903485076
+raid,RAiD,Consistent Re-identification in a Camera Network,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+rap_pedestrian,RAP,A Richly Annotated Dataset for Pedestrian Attribute Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+reseed,ReSEED,ReSEED: Social Event dEtection Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+saivt,SAIVT SoftBio,A Database for Person Re-Identification in Multi-Camera Surveillance Networks,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sarc3d,Sarc3D,SARC3D: a new 3D body model for People Tracking and Re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+scface,SCface,SCface – surveillance cameras face database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+scut_fbp,SCUT-FBP,SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception,,,,,,,,arXiv:1511.02459 [cs.CV],,,2015,,,Duorui Xie,Lingyu Liang,Lianwen Jin,Jie Xu,Mengru Li,,,,,,,,,https://arxiv.org/ftp/arxiv/papers/1511/1511.02459.pdf,
+scut_head,SCUT HEAD,Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sdu_vid,SDU-VID,A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sdu_vid,SDU-VID,Local descriptors encoded by Fisher vectors for person re-identification,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sdu_vid,SDU-VID,Person reidentification by video ranking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sheffield,Sheffield Face,Face Recognition: From Theory to Applications ,,,,OK no paper,,,,,,,,,,,,,,,,,,,,,,,,
+shinpuhkan_2014,Shinpuhkan 2014,Shinpuhkan2014: A Multi-Camera Pedestrian Dataset for Tracking People across Multiple Cameras,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+social_relation,Social Relation,From Facial Expression Recognition to Interpersonal Relation Prediction,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+social_relation,Social Relation,Learning Social Relation Traits from Face Images,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+soton,SOTON HiD,On a Large Sequence-Based Human Gait Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sports_videos_in_the_wild,SVW,Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis,,,,,,,,"Proc. International Conference on Automatic Face and Gesture Recognition (FG 2015), Ljubljana, Slovenia",,,2015,,,Seyed Morteza Safdarnejad, Xiaoming Liu, Lalita Udpa, Brooks Andrus,"John Wood,",Dean Craven,,,,,,,,http://cvlab.cse.msu.edu/pdfs/Morteza_FG2015.pdf," @inproceedings{ sports-videos-in-the-wild-svw-a-video-dataset-for-sports-analysis,
+ author = { Seyed Morteza Safdarnejad and Xiaoming Liu and Lalita Udpa and Brooks Andrus and John Wood and Dean Craven },
+ title = { Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis },
+ booktitle = { Proc. International Conference on Automatic Face and Gesture Recognition },
+ address = { Ljubljana, Slovenia },
+ month = { May },
+ year = { 2015 },
+} "
+stair_actions,STAIR Action,STAIR Actions: A Video Dataset of Everyday Home Actions,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stanford_drone,Stanford Drone,Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stickmen_buffy,Buffy Stickmen,Learning to Parse Images of Articulated Objects,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stickmen_buffy,Buffy Stickmen,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stickmen_family,We Are Family Stickmen,We Are Family: Joint Pose Estimation of Multiple Persons,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stickmen_pascal,Stickmen PASCAL,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stickmen_pascal,Stickmen PASCAL,Learning to Parse Images of Articulated Objects,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+stirling_esrc_3s,Stirling/ESRC 3D Face,,,,,no paper published yet (they say to cite the URL),,,,,,,,,,,,,,,,,,,,,,,,
+sun_attributes,SUN,The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+sun_attributes,SUN,"SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+svs,SVS,Pedestrian Attribute Classification in Surveillance: Database and Evaluation,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+texas_3dfrd,Texas 3DFRD,Texas 3D Face Recognition Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+texas_3dfrd,Texas 3DFRD,Anthropometric 3D Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tiny_faces,TinyFace,Low-Resolution Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tiny_images,Tiny Images,80 million tiny images: a large dataset for non-parametric object and scene recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+towncenter,TownCenter,Stable Multi-Target Tracking in Real-Time Surveillance Video,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_brussels,TUD-Brussels,Multi-Cue Onboard Pedestrian Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_campus,TUD-Campus, People-Tracking-by-Detection and People-Detection-by-Tracking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_crossing,TUD-Crossing, People-Tracking-by-Detection and People-Detection-by-Tracking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_motionpairs,TUD-Motionparis,Multi-Cue Onboard Pedestrian Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_multiview,TUD-Multiview,Monocular 3D Pose Estimation and Tracking by Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_pedestrian,TUD-Pedestrian,People-Tracking-by-Detection and People-Detection-by-Tracking,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tud_stadtmitte,TUD-Stadtmitte,Monocular 3D Pose Estimation and Tracking by Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+tvhi,TVHI,High Five: Recognising human interactions in TV shows,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+twinsburg_twins,ND-TWINS-2009-2010,,,,,OK No Paper,,,,,,,,,,,,,,,,,,,,,,,,
+uccs,UCCS,Large scale unconstrained open set face database,,,,need research access to the paper,,,,,,,,,,,,,,,,,,,,,,,,
+ucf_101,UCF101,UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild,,,,,,,,CRCV-TR-12-01,,,2012,,,"Soomro, K.","Roshan Zamir, A.","Shah, M.",,,,,,,,2,,,,"@inproceedings{UCF101,
+ author = {Soomro, K. and Roshan Zamir, A. and Shah, M.},
+ booktitle = {CRCV-TR-12-01},
+ title = {{UCF101}: A Dataset of 101 Human Actions Classes From
+ Videos in The Wild},
+ year = {2012}}"
+ucf_crowd,UCF-CC-50,Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ucf_selfie,UCF Selfie,How to Take a Good Selfie?,,,,,,,," in Proceedings of ACM Multimedia Conference 2015 (ACMMM 2015), Brisbane, Australia",,,2015,,,Mahdi M. Kalayeh,Misrak Seifu,Wesna LaLanne,Mubarak Shah,,,,,,,,,,,
+ufdd,UFDD,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+umb,UMB,UMB-DB: A Database of Partially Occluded 3D Faces,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+umd_faces,UMD,UMDFaces: An Annotated Face Dataset for Training Deep Networks,,,,,,,,Arxiv preprint,,,2016,,,Ankan Bansal,Anirudh Nanduri,Carlos D Castillo,Rajeev Ranjan,Rama Chellappa,,,,,,1,,,https://arxiv.org/abs/1611.01484v2,"@article{bansal2016umdfaces,
+ title={UMDFaces: An Annotated Face Dataset for Training Deep Networks},
+ author={Bansal, Ankan and Nanduri, Anirudh and Castillo, Carlos D and Ranjan, Rajeev and Chellappa, Rama}
+ journal={arXiv preprint arXiv:1611.01484v2},
+ year={2016}
+ }"
+umd_faces,UMD,The Do's and Don'ts for CNN-based Face Verification,,,,,,,,Arxiv preprint,,,2017,,,Ankan Bansal,Carlos Castillo,"Rajeev Ranjan,",Rama Chellappa,,,,,,,2,,,https://arxiv.org/abs/1705.07426,"@article{bansal2017dosanddonts,
+ title = {The Do's and Don'ts for CNN-based Face Verification},
+ author = {Bansal, Ankan and Castillo, Carlos and Ranjan, Rajeev and Chellappa, Rama},
+ journal = {arXiv preprint arXiv:1705.07426},
+ year = {2017}
+ }"
+unbc_shoulder_pain,UNBC-McMaster Pain,PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+urban_tribes,Urban Tribes,From Bikers to Surfers: Visual Recognition of Urban Tribes,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+used,USED Social Event Dataset,USED: A Large-scale Social Event Detection Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+v47,V47,Re-identification of Pedestrians with Variable Occlusion and Scale,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vadana,VADANA,VADANA: A dense dataset for facial image analysis,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vgg_celebs_in_places,CIP,Faces in Places: Compound Query Retrieval ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vgg_faces,VGG Face,Deep Face Recognition,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vgg_faces2,VGG Face2,VGGFace2: A dataset for recognising faces across pose and age,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+violent_flows,Violent Flows,Violent Flows: Real-Time Detection of Violent Crowd Behavior,,,,,,,,,,,2012,,,T. Hassner,,,,,,,,,,,,,,"T. Hassner, Y. Itcher, and O. Kliper-Gross, Violent Flows: Real-Time Detection of Violent Crowd Behavior, 3rd IEEE International Workshop on Socially Intelligent Surveillance and Monitoring (SISM) at the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), Rhode Island, June 2012 ."
+viper,VIPeR,"Evaluating Appearance Models for Recognition, Reacquisition, and Tracking",,,,,,,,,,,,,,,,,,,,,,,,,,,,
+visual_phrases,Phrasal Recognition,Recognition using Visual Phrases,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vmu,VMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,,,,,,,,"Proc. of 5th IEEE International Conference on Biometrics: Theory, Applications and Systems (BTAS), (Washington DC, USA)",,,2012,,,A. Dantcheva,C. Chen,A. Ross,,,,,,,,,,,https://www.cse.msu.edu/~rossarun/pubs/DantchevaChenRossFaceCosmetics_BTAS2012.pdf,
+voc,VOC,The PASCAL Visual Object Classes (VOC) Challenge,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+vqa,VQA,VQA: Visual Question Answering,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ward,WARD,Re-identify people in wide area camera network,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+who_goes_there,WGT,Who Goes There? Approaches to Mapping Facial Appearance Diversity,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+wider,WIDER,Recognize Complex Events from Static Images by Fusing Deep Channels,,,,,,,,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),,,2015,,,"Xiong, Yuanjun and Zhu, Kai and Lin, Dahua and Tang, Xiaoou",,,,,,,,,,,,,,
+wider_attribute,WIDER Attribute,Human Attribute Recognition by Deep Hierarchical Contexts,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+wider_face,WIDER FACE,WIDER FACE: A Face Detection Benchmark,,,,,,,,IEEE Conference on Computer Vision and Pattern Recognition (CVPR),,,2016,,,"Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou",,,,,,,,,,,,,,"@inproceedings{yang2016wider,
+ Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
+ Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
+ Title = {WIDER FACE: A Face Detection Benchmark},
+ Year = {2016}}"
+wildtrack,WildTrack,WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+wlfdb,,WLFDB: Weakly Labeled Face Databases,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+yale_faces,YaleFaces,Acquiring Linear Subspaces for Face Recognition under Variable Lighting,,,,"combined yale_faces, yale_faces_b, yale_faces_b_ext",,,,PAMI,,,2001,,,Athinodoros Georghiades,Peter Belhumeur,David Kriegman,,,,,,,,,,,,
+yale_faces,YaleFaces,From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose,,,,"combined yale_faces, yale_faces_b, yale_faces_b_ext",,,,,,,,,,,,,,,,,,,,,,,,
+yawdd,YawDD,YawDD: A Yawning Detection Dataset,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+yfcc_100m,YFCC100M,YFCC100M: The New Data in Multimedia Research,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+york_3d,UOY 3D Face Database,Three-Dimensional Face Recognition: An Eigensurface Approach,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+youtube_faces,YouTubeFaces,Face Recognition in Unconstrained Videos with Matched Background Similarity,,,,,,,,IEEE Conf. on Computer Vision and Pattern Recognition (CVPR),,,2011,,,Lior Wolf,Tal Hassner,Itay Maoz,,,,,,,,,,,,
+youtube_makeup,YMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,,,,,,,,"Proc. of 5th IEEE International Conference on Biometrics: Theory, Applications and Systems (BTAS), (Washington DC, USA)",,,2012,,,A. Dantcheva,C. Chen,A. Ross,,,,,,,,1,,,https://www.cse.msu.edu/~rossarun/pubs/DantchevaChenRossFaceCosmetics_BTAS2012.pdf,
+youtube_makeup,YMU,Automatic Facial Makeup Detection with Application in Face Recognition,,,,,,,,"Proc. of 6th IAPR International Conference on Biometrics (ICB), (Madrid, Spain)",,,2013,,,C. Chen,A. Dantcheva,A. Ross,,,,,,,,2,,,https://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf,
+youtube_poses,YouTube Pose,Personalizing Human Video Pose Estimation,,,,The paper doesn't specifically introduce the dataset but it's the only one talking about it,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file
diff --git a/scraper/datasets/citations.csv b/scraper/datasets/citations.csv
new file mode 120000
index 00000000..c8019514
--- /dev/null
+++ b/scraper/datasets/citations.csv
@@ -0,0 +1 @@
+citations-20181207.csv \ No newline at end of file
diff --git a/datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv b/scraper/datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv
index 38f502f9..38f502f9 100644
--- a/datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv
+++ b/scraper/datasets/scholar/entries/300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge.csv
diff --git a/datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv b/scraper/datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv
index eaaf1a93..eaaf1a93 100644
--- a/datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv
+++ b/scraper/datasets/scholar/entries/300 faces In-the-wild challenge: Database and results.csv
diff --git a/datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv b/scraper/datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv
index c1bf1f38..c1bf1f38 100644
--- a/datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv
+++ b/scraper/datasets/scholar/entries/A data-driven approach to cleaning large face datasets.csv
diff --git a/datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv b/scraper/datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv
index 31bf7b39..31bf7b39 100644
--- a/datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv
+++ b/scraper/datasets/scholar/entries/A semi-automatic methodology for facial landmark annotation.csv
diff --git a/datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv b/scraper/datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv
index 035e5e0f..035e5e0f 100644
--- a/datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv
+++ b/scraper/datasets/scholar/entries/Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization.csv
diff --git a/datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv b/scraper/datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv
index 1d6e856b..1d6e856b 100644
--- a/datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv
+++ b/scraper/datasets/scholar/entries/Attribute and Simile Classifiers for Face Verification.csv
diff --git a/datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv b/scraper/datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv
index 074471b7..074471b7 100644
--- a/datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv
+++ b/scraper/datasets/scholar/entries/Automatic Facial Makeup Detection with Application in Face Recognition.csv
diff --git a/datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv b/scraper/datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv
index 0b36206c..0b36206c 100644
--- a/datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv
+++ b/scraper/datasets/scholar/entries/Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues.csv
diff --git a/datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv b/scraper/datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv
index 86c81060..86c81060 100644
--- a/datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv
+++ b/scraper/datasets/scholar/entries/Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?.csv
diff --git a/datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv b/scraper/datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv
index 36b9e0cf..36b9e0cf 100644
--- a/datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv
+++ b/scraper/datasets/scholar/entries/Coding Facial Expressions with Gabor Wavelets.csv
diff --git a/datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv b/scraper/datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv
index e97d2c56..e97d2c56 100644
--- a/datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv
+++ b/scraper/datasets/scholar/entries/Comprehensive Database for Facial Expression Analysis.csv
diff --git a/datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv b/scraper/datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv
index b3728548..b3728548 100644
--- a/datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv
+++ b/scraper/datasets/scholar/entries/DEX: Deep EXpectation of apparent age from a single image.csv
diff --git a/datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv b/scraper/datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv
index ed47fdef..ed47fdef 100644
--- a/datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv
+++ b/scraper/datasets/scholar/entries/Deep expectation of real and apparent age from a single image without facial landmarks.csv
diff --git a/datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv b/scraper/datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv
index 5cd26552..5cd26552 100644
--- a/datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv
+++ b/scraper/datasets/scholar/entries/Distance Estimation of an Unknown Person from a Portrait .csv
diff --git a/datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv b/scraper/datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv
index 252d269c..252d269c 100644
--- a/datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv
+++ b/scraper/datasets/scholar/entries/Eigenfaces vs. fisherfaces: Recognition using class specific linear projection.csv
diff --git a/datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv b/scraper/datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv
index eeb0adb1..eeb0adb1 100644
--- a/datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv
+++ b/scraper/datasets/scholar/entries/FDDB: A Benchmark for Face Detection in Unconstrained Settings.csv
diff --git a/datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv b/scraper/datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv
index 2f1e41af..2f1e41af 100644
--- a/datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv
+++ b/scraper/datasets/scholar/entries/Face Recognition in Unconstrained Videos with Matched Background Similarity.csv
diff --git a/datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv b/scraper/datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv
index de202138..de202138 100644
--- a/datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv
+++ b/scraper/datasets/scholar/entries/Face Swapping: Automatically Replacing Faces in Photographs.csv
diff --git a/datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv b/scraper/datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv
index 43da8a92..43da8a92 100644
--- a/datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv
+++ b/scraper/datasets/scholar/entries/Face detection, pose estimation and landmark localization in the wild.csv
diff --git a/datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv b/scraper/datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv
index a03e78e4..a03e78e4 100644
--- a/datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv
+++ b/scraper/datasets/scholar/entries/FaceTracer: A Search Engine for Large Collections of Images with Faces.csv
diff --git a/datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv b/scraper/datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv
index 249cea3a..249cea3a 100644
--- a/datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv
+++ b/scraper/datasets/scholar/entries/Fine-grained Evaluation on Face Detection in the Wild..csv
diff --git a/datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv b/scraper/datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv
index e22f032b..e22f032b 100644
--- a/datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv
+++ b/scraper/datasets/scholar/entries/From Facial Parts Responses to Face Detection: A Deep Learning Approach.csv
diff --git a/datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv b/scraper/datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv
index b9feb021..b9feb021 100644
--- a/datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv
+++ b/scraper/datasets/scholar/entries/Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations.csv
diff --git a/datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv b/scraper/datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv
index a47bb51d..a47bb51d 100644
--- a/datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv
+++ b/scraper/datasets/scholar/entries/Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset.csv
diff --git a/datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv b/scraper/datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv
index 23c90284..23c90284 100644
--- a/datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv
+++ b/scraper/datasets/scholar/entries/Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments..csv
diff --git a/datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv b/scraper/datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv
index 9cd388eb..9cd388eb 100644
--- a/datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv
+++ b/scraper/datasets/scholar/entries/Large Age-Gap Face Verification by Feature Injection in Deep Networks.csv
diff --git a/datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv b/scraper/datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv
index f7130a67..f7130a67 100644
--- a/datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv
+++ b/scraper/datasets/scholar/entries/Level Playing Field for Million Scale Face Recognition.csv
diff --git a/datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv b/scraper/datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv
index 0fa7a800..0fa7a800 100644
--- a/datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv
+++ b/scraper/datasets/scholar/entries/Localizing Parts of Faces Using a Consensus of Exemplars.csv
diff --git a/datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv b/scraper/datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv
index a41ffc41..a41ffc41 100644
--- a/datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv
+++ b/scraper/datasets/scholar/entries/MORPH: A Longitudinal Image Database of Normal Adult Age-Progression.csv
diff --git a/datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv b/scraper/datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv
index 3af655d0..3af655d0 100644
--- a/datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv
+++ b/scraper/datasets/scholar/entries/MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition.csv
diff --git a/datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv b/scraper/datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv
index 89746fe9..89746fe9 100644
--- a/datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv
+++ b/scraper/datasets/scholar/entries/Presentation and validation of the Radboud Faces Database.csv
diff --git a/datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv b/scraper/datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv
index c5540c9a..c5540c9a 100644
--- a/datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv
+++ b/scraper/datasets/scholar/entries/Pruning Training Sets for Learning of Object Categories.csv
diff --git a/datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv b/scraper/datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv
index d632534a..d632534a 100644
--- a/datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv
+++ b/scraper/datasets/scholar/entries/Recognize Complex Events from Static Images by Fusing Deep Channels.csv
diff --git a/datasets/scholar/entries/Robust face landmark estimation under occlusion .csv b/scraper/datasets/scholar/entries/Robust face landmark estimation under occlusion .csv
index c1245878..c1245878 100644
--- a/datasets/scholar/entries/Robust face landmark estimation under occlusion .csv
+++ b/scraper/datasets/scholar/entries/Robust face landmark estimation under occlusion .csv
diff --git a/datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv b/scraper/datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv
index 9215c287..9215c287 100644
--- a/datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv
+++ b/scraper/datasets/scholar/entries/SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception.csv
diff --git a/datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv b/scraper/datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv
index 503356df..503356df 100644
--- a/datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv
+++ b/scraper/datasets/scholar/entries/Situation Recognition: Visual Semantic Role Labeling for Image Understanding.csv
diff --git a/datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv b/scraper/datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv
index 6fa46797..6fa46797 100644
--- a/datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv
+++ b/scraper/datasets/scholar/entries/Spoofing Faces Using Makeup: An Investigative Study.csv
diff --git a/datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv b/scraper/datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv
index a9d02e41..a9d02e41 100644
--- a/datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv
+++ b/scraper/datasets/scholar/entries/Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis.csv
diff --git a/datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv b/scraper/datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv
index d494c943..d494c943 100644
--- a/datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv
+++ b/scraper/datasets/scholar/entries/The Do's and Don'ts for CNN-based Face Verification.csv
diff --git a/datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv b/scraper/datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv
index 399b667f..399b667f 100644
--- a/datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv
+++ b/scraper/datasets/scholar/entries/The Extended Cohn-Kanade Dataset (CK+): A complete expression dataset for action unit and emotion-specified expression.csv
diff --git a/datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv b/scraper/datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv
index 68dc8389..68dc8389 100644
--- a/datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv
+++ b/scraper/datasets/scholar/entries/The MegaFace Benchmark: 1 Million Faces for Recognition at Scale.csv
diff --git a/datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv b/scraper/datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv
index 9cbf069c..9cbf069c 100644
--- a/datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv
+++ b/scraper/datasets/scholar/entries/UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild.csv
diff --git a/datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv b/scraper/datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv
index adf44ba7..adf44ba7 100644
--- a/datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv
+++ b/scraper/datasets/scholar/entries/UMDFaces: An Annotated Face Dataset for Training Deep Networks.csv
diff --git a/scraper/expand-uni-lookup.py b/scraper/expand-uni-lookup.py
new file mode 100644
index 00000000..4ba531fd
--- /dev/null
+++ b/scraper/expand-uni-lookup.py
@@ -0,0 +1,42 @@
+import os
+import gzip
+import glob
+import json
+import math
+import operator
+import click
+from util import *
+
+@click.command()
+def expand_uni_lookup():
+ addresses = load_unexpanded_addresses()
+ write_csv('reports/all_institutions_sorted.csv', keys=None, rows=sorted(addresses.values(), key=lambda x: x[0]))
+
+def load_unexpanded_addresses():
+ data = read_csv('reports/all_institutions.csv', keys=None)
+ lookup = {}
+ for row in data:
+ name = row[0]
+ if len(name.strip()) > 10:
+ uni_name = name
+ for part in name.split(', '):
+ if 'universit' in part.lower():
+ uni_name = part
+ new_row = convert_row(row)
+ if uni_name != name:
+ print(uni_name)
+ new_row[0] = uni_name
+ uni_row = new_row.copy()
+ uni_row[1] = uni_name
+ if uni_name not in lookup:
+ lookup[uni_name] = uni_row
+ lookup[name] = new_row
+ return lookup
+
+def convert_row(row):
+ return [
+ row[0], row[0], row[3], row[1], row[2],
+ ]
+
+if __name__ == '__main__':
+ expand_uni_lookup()
diff --git a/scraper/ids.json b/scraper/ids.json
new file mode 100644
index 00000000..655d6ab6
--- /dev/null
+++ b/scraper/ids.json
@@ -0,0 +1 @@
+["b6b1b0632eb9d4ab1427278f5e5c46f97753c73d", "53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4", "0bd7c720e64a3c6e82f5fbe2a0183a2055e0d8bb", "fb4d1028bceba7ea676e6d7a6e767ecddd841985", "264f7ab36ff2e23a1514577a6404229d7fe1242b", "475e16577be1bfc0dd1f74f67bb651abd6d63524", "49e4f05fa98f63510de76e7abd8856ff8db0f38d", "3a1c40eced07d59a3ea7acda94fa833c493909c1", "c9876861cc0e33fffe8c3ce7484ae27d3b2eeb75", "7478c2351c75183527f258aecce6931be9c9d624", "c52cebeb90539ef424c35ea37ab004ca4bf09a9f", "faabc70615649169b559403d7f15d45fca537cbd", "f9a1132c777b24e9361b1bcbccb9fcfc737f3194", "9696ad8b164f5e10fcfe23aacf74bd6168aebb15", "1ef1f33c48bc159881c5c8536cbbd533d31b0e9a", "1e0bbd854f201c0cb965da4356d70dde232b5bc9", "a4543226f6592786e9c38752440d9659993d3cb3", "b4843913e5ba0f1bfc12f179587d3789676c3310", "7f03d628f3371c44a1248962dcb3740fb8573fd0", "51670395402625f1b1ac42df471e6e3fe27865ce", "e541dca453879907b22b766e8751e1a39c316756", "d9c0310203179d5328c4f1475fa4d68c5f0c7324", "92be73dffd3320fe7734258961fe5a5f2a43390e", "c0264118ec190cbe17503c7b4099905b1508d396", "322a4a915a77f05936a66b3a010e569243c9bb3f", "ca60d007af691558de377cab5e865b5373d80a44", "2cbb2c7c0f3f78574b5e8cf197774d5b556b1202", "8af9f7c920a87acb3ae127756f498a51b535790a", "916ca7000c022fbd97ea15cc0094f0e53c408b56", "49ecf784afddf7d5cf31c90340eef9380c261f04", "498fd231d7983433dac37f3c97fb1eafcf065268", "2f29b13fcf7a92a3cc438014068f11f9e45d62be", "4f618cbf19917ce5b8703adbc14e15b0bf0d35cc", "aed124c053b9c510487d68e0faf32aff2a84c3b5", "d660abfbe5f84c1c49f1e7174eb166b8b23e53c4", "90f6f13e402454a964f6c5707b91dd2e13f3a9c9", "b87abdb1b11b6b2dce1749bdd0b396796da7e7a6", "2354bb91a72b7667a03c68ea74ede7a5c90b3afc", "4ae67bec2fa4f5774215634c66bb67619a54e677", "ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff", "573b687ad970e1931debbf366004c0983de28718", "734083b72b707dd2293ef2791f01506dec9f8a99", "140c95e53c619eac594d70f6369f518adfea12ef", "872dfdeccf99bbbed7c8f1ea08afb2d713ebe085", "146a7ecc7e34b85276dd0275c337eff6ba6ef8c0", "fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb", "313d5eba97fe064bdc1f00b7587a4b3543ef712a", "4e32fbb58154e878dd2fd4b06398f85636fd0cf4", "5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b", "9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682", "48a9241edda07252c1aadca09875fabcfee32871", "f3b84a03985de3890b400b68e2a92c0a00afd9d0", "86204fc037936754813b91898377e8831396551a", "06bd34951305d9f36eb29cf4532b25272da0e677", "b2cb335ded99b10f37002d09753bd5a6ea522ef1", "486840f4f524e97f692a7f6b42cd19019ee71533", "011e6146995d5d63c852bd776f782cc6f6e11b7b", "5fea59ccdab484873081eaa37af88e26e3db2aed", "2d748f8ee023a5b1fbd50294d176981ded4ad4ee", "f7824758800a7b1a386db5bd35f84c81454d017a", "02467703b6e087799e04e321bea3a4c354c5487d", "9cc8cf0c7d7fa7607659921b6ff657e17e135ecc", "377f2b65e6a9300448bdccf678cde59449ecd337", "18858cc936947fc96b5c06bbe3c6c2faa5614540", "1ffe20eb32dbc4fa85ac7844178937bba97f4bf0", "a357bc79b1ac6f2474ff6b9f001419745a8bc21c", "cd55fb30737625e86454a2861302b96833ed549d", "1e8eee51fd3bf7a9570d6ee6aa9a09454254689d", "31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a", "59fc69b3bc4759eef1347161e1248e886702f8f7", "6909cd34a1eceba2140e2c02a842cefcecf33645", "d1a43737ca8be02d65684cf64ab2331f66947207", "5226296884b3e151ce317a37f94827dbda0b9d16", "80be8624771104ff4838dcba9629bacfe6b3ea09", "50b58becaf67e92a6d9633e0eea7d352157377c3", "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "77362789d04db4c51be61eaffa4f43e03759e677", "ac2881bdf7b57dc1672a17b221d68a438d79fce8", "56fd4c05869e11e4935d48aa1d7abb96072ac242", "7e8c8b1d72c67e2e241184448715a8d4bd88a727", "99ced8f36d66dce20d121f3a29f52d8b27a1da6c", "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "368e99f669ea5fd395b3193cd75b301a76150f9d", "47e14fdc6685f0b3800f709c32e005068dfc8d47", "0077cd8f97cafd2b389783858a6e4ab7887b0b6b", "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "cb30c1370885033bc833bc7ef90a25ee0900c461", "052f994898c79529955917f3dfc5181586282cf8", "b503793943a17d2f569685cd17e86b5b4fffe3fd", "4e8168fbaa615009d1618a9d6552bfad809309e9", "a8117a4733cce9148c35fb6888962f665ae65b1e", "3cb2841302af1fb9656f144abc79d4f3d0b27380", "0c1d85a197a1f5b7376652a485523e616a406273", "c75e6ce54caf17b2780b4b53f8d29086b391e839", "aed321909bb87c81121c841b21d31509d6c78f69", "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "30180f66d5b4b7c0367e4b43e2b55367b72d6d2a", "3504907a2e3c81d78e9dfe71c93ac145b1318f9c", "8334da483f1986aea87b62028672836cb3dc6205", "af4759f5e636b5d9049010d5f0e2b0df2a69cd72", "2c052a1c77a3ec2604b3deb702d77c41418c7d3e", "3b64efa817fd609d525c7244a0e00f98feacc8b4", "84c5b45328dee855c4855a104ac9c0558cc8a328", "6fbb179a4ad39790f4558dd32316b9f2818cd106", "d4f1eb008eb80595bcfdac368e23ae9754e1e745", "d44a93027208816b9e871101693b05adab576d89", "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "d28d32af7ef9889ef9cb877345a90ea85e70f7f1", "a29566375836f37173ccaffa47dea25eb1240187", "29f298dd5f806c99951cb434834bc8dcc765df18", "ddf099f0e0631da4a6396a17829160301796151c", "44078d0daed8b13114cffb15b368acc467f96351", "77037a22c9b8169930d74d2ce6f50f1a999c1221", "54bb25a213944b08298e4e2de54f2ddea890954a", "5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c", "a2b4a6c6b32900a066d0257ae6d4526db872afe2", "e465f596d73f3d2523dbf8334d29eb93a35f6da0", "3dfb822e16328e0f98a47209d7ecd242e4211f82", "291265db88023e92bb8c8e6390438e5da148e8f5", "04bb3fa0824d255b01e9db4946ead9f856cc0b59", "1ef4aac0ebc34e76123f848c256840d89ff728d0", "7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22", "bb4f83458976755e9310b241a689c8d21b481238", "d29eec5e047560627c16803029d2eb8a4e61da75", "2e3d081c8f0e10f138314c4d2c11064a981c1327", "173657da03e3249f4e47457d360ab83b3cefbe63", "5180df9d5eb26283fb737f491623395304d57497", "6dcf418c778f528b5792104760f1fbfe90c6dd6a", "abdd17e411a7bfe043f280abd4e560a04ab6e992", "b7bde76f52b3a8a20a05d2a01dec1d1c2a16e609", "3cf1f89d73ca4b25399c237ed3e664a55cd273a2", "e4232e8fd566a7289ccb33f732c9093c9beb84a6", "2d1f86e2c7ba81392c8914edbc079ac64d29b666", "03f7041515d8a6dcb9170763d4f6debd50202c2b", "6afe1f668eea8dfdd43f0780634073ed4545af23", "be72b20247fb4dc4072d962ced77ed89aa40372f", "a2e0966f303f38b58b898d388d1c83e40b605262", "14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74", "3933e323653ff27e68c3458d245b47e3e37f52fd", "f27fd2a1bc229c773238f1912db94991b8bf389a", "9865fe20df8fe11717d92b5ea63469f59cf1635a", "7f6cd03e3b7b63fca7170e317b3bb072ec9889e0", "c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d", "f5eb411217f729ad7ae84bfd4aeb3dedb850206a", "28cd46a078e8fad370b1aba34762a874374513a5", "15ebec3796a2e23d31c8c8ddf6d21555be6eadc6", "da2b2be4c33e221c7f417875a6c5c74043b1b227", "57178b36c21fd7f4529ac6748614bb3374714e91", "96e731e82b817c95d4ce48b9e6b08d2394937cf8", "8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa", "870433ba89d8cab1656e57ac78f1c26f4998edfb", "3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0", "6341274aca0c2977c3e1575378f4f2126aa9b050", "17479e015a2dcf15d40190e06419a135b66da4e0", "a0b1990dd2b4cd87e4fd60912cc1552c34792770", "772474b5b0c90629f4d9c223fd9c1ef45e1b1e66", "2c92839418a64728438c351a42f6dc5ad0c6e686", "ac5ab8f71edde6d1a2129da12d051ed03a8446a1", "4b3f425274b0c2297d136f8833a31866db2f2aec", "17ded725602b4329b1c494bfa41527482bf83a6f", "b2ae5c496fe01bb2e2dee107f75b82c6a2a23374", "14b016c7a87d142f4b9a0e6dc470dcfc073af517", "b7ec41005ce4384e76e3be854ecccd564d2f89fb", "93420d9212dd15b3ef37f566e4d57e76bb2fab2f", "def2983576001bac7d6461d78451159800938112", "4b605e6a9362485bfe69950432fa1f896e7d19bf", "8d3e95c31c93548b8c71dbeee2e9f7180067a888", "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "7ce03597b703a3b6754d1adac5fbc98536994e8f", "6f22628d34a486d73c6b46eb071200a00e3abae3", "0aeb5020003e0c89219031b51bd30ff1bceea363", "48499deeaa1e31ac22c901d115b8b9867f89f952", "31b05f65405534a696a847dd19c621b7b8588263", "6ed27a41214716259676b6949999cdf4b12d0bdd", "99daa2839213f904e279aec7cef26c1dfb768c43", "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "b908487b30002d5ae1ebd819880a713494a45a40", "8da32ff9e3759dc236878ac240728b344555e4e9", "c43ed9b34cad1a3976bac7979808eb038d88af84", "628a3f027b7646f398c68a680add48c7969ab1d9", "832a9584e85af1675d49ee35fd13283b21ce3a3f", "b971266b29fcecf1d5efe1c4dcdc2355cb188ab0", "d92084e376a795d3943df577d3b3f3b7d12eeae5", "61262450d4d814865a4f9a84299c24daa493f66e", "069bb452e015ef53f0ef30e9690e460ccc73cf03", "dc13229afbbc8b7a31ed5adfe265d971850c0976", "626913b8fcbbaee8932997d6c4a78fe1ce646127", "4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4", "df51dfe55912d30fc2f792561e9e0c2b43179089", "b6f758be954d34817d4ebaa22b30c63a4b8ddb35", "dfaa547451aae219cd2ca7a761e6c16c1e1d0add", "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c", "c220f457ad0b28886f8b3ef41f012dd0236cd91a", "2f7e9b45255c9029d2ae97bbb004d6072e70fa79", "ec39e9c21d6e2576f21936b1ecc1574dadaf291e", "84ae55603bffda40c225fe93029d39f04793e01f", "73ea06787925157df519a15ee01cc3dc1982a7e0", "c6382de52636705be5898017f2f8ed7c70d7ae96", "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf", "eee06d68497be8bf3a8aba4fde42a13aa090b301", "e79bacc03152ea55343e6af97bcd17d8904cf5ef", "b5ca8d4f259f35c1f3edfd9f108ce29881e478b0", "a3201e955d6607d383332f3a12a7befa08c5a18c", "94f74c6314ffd02db581e8e887b5fd81ce288dbf", "4ac3cd8b6c50f7a26f27eefc64855134932b39be", "52d7eb0fbc3522434c13cc247549f74bb9609c5d", "a3f69a073dcfb6da8038607a9f14eb28b5dab2db", "19458454308a9f56b7de76bf7d8ff8eaa52b0173", "f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7", "1fc249ec69b3e23856b42a4e591c59ac60d77118", "746c0205fdf191a737df7af000eaec9409ede73f", "75249ebb85b74e8932496272f38af274fbcfd696", "e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227", "0081e2188c8f34fcea3e23c49fb3e17883b33551", "4d90d7834ae25ee6176c096d5d6608555766c0b1", "2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4", "e4c3d5d43cb62ac5b57d74d55925bdf76205e306", "ef230e3df720abf2983ba6b347c9d46283e4b690", "5f771fed91c8e4b666489ba2384d0705bcf75030", "2241eda10b76efd84f3c05bdd836619b4a3df97e", "5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd", "58d76380d194248b3bb291b8c7c5137a0a376897", "7fb5006b6522436ece5bedf509e79bdb7b79c9a7", "28646c6220848db46c6944967298d89a6559c700", "cdae8e9cc9d605856cf5709b2fdf61f722d450c1", "5812d8239d691e99d4108396f8c26ec0619767a6", "2ad7cef781f98fd66101fa4a78e012369d064830", "cfd4004054399f3a5f536df71f9b9987f060f434", "e4c3587392d477b7594086c6f28a00a826abf004", "47190d213caef85e8b9dd0d271dbadc29ed0a953", "ce6d60b69eb95477596535227958109e07c61e1e", "727d03100d4a8e12620acd7b1d1972bbee54f0e6", "39ed31ced75e6151dde41944a47b4bdf324f922b", "f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a", "38d8ff137ff753f04689e6b76119a44588e143f3", "282a3ee79a08486f0619caf0ada210f5c3572367", "49258cc3979103681848284470056956b77caf80", "0cb2dd5f178e3a297a0c33068961018659d0f443", "5905b4610389cd3b11a3a1ce06c05fee36a97f86", "3d78c144672c4ee76d92d21dad012bdf3c3aa1a0", "9627f28ea5f4c389350572b15968386d7ce3fe49", "4e7ed13e541b8ed868480375785005d33530e06d", "054738ce39920975b8dcc97e01b3b6cc0d0bdf32", "28d4e027c7e90b51b7d8908fce68128d1964668a", "582edc19f2b1ab2ac6883426f147196c8306685a", "0b82bf595e76898993ed4f4b2883c42720c0f277", "098363b29eef1471c494382338687f2fe98f6e15", "c866a2afc871910e3282fd9498dce4ab20f6a332", "87e6cb090aecfc6f03a3b00650a5c5f475dfebe1", "3b9b200e76a35178da940279d566bbb7dfebb787", "e11bc0f7c73c04d38b7fb80bd1ca886495a4d43c", "de79437f74e8e3b266afc664decf4e6e4bdf34d7", "6005a30bf103164fe3410185976b6b8b36537aca", "e988be047b28ba3b2f1e4cdba3e8c94026139fcf", "368d59cf1733af511ed8abbcbeb4fb47afd4da1c", "d6bdc70d259b38bbeb3a78db064232b4b4acc88f", "6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf", "62e913431bcef5983955e9ca160b91bb19d9de42", "acd4280453b995cb071c33f7c9db5760432f4279", "ceeb67bf53ffab1395c36f1141b516f893bada27", "fab60b3db164327be8588bce6ce5e45d5b882db6", "37619564574856c6184005830deda4310d3ca580", "91d513af1f667f64c9afc55ea1f45b0be7ba08d4", "eb027969f9310e0ae941e2adee2d42cdf07d938c", "3c97c32ff575989ef2869f86d89c63005fc11ba9", "140438a77a771a8fb656b39a78ff488066eb6b50", "a2af07176a38fe844b0e2fdf4abae65472628b38", "d03265ea9200a993af857b473c6bf12a095ca178", "1fb980e137b2c9f8781a0d98c026e164b497ddb1", "5df17c81c266cf2ebb0778e48e825905e161a8d9", "bbfe0527e277e0213aafe068113d719b2e62b09c", "e7906370eae8655fb69844ae1a3d986c9f37c902", "607aebe7568407421e8ffc7b23a5fda52650ad93", "3f204a413d9c8c16f146c306c8d96b91839fed0c", "5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4", "73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c", "0d538084f664b4b7c0e11899d08da31aead87c32", "247cab87b133bd0f4f9e8ce5e7fc682be6340eac", "9901f473aeea177a55e58bac8fd4f1b086e575a4", "9207671d9e2b668c065e06d9f58f597601039e5e", "c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd", "33ae696546eed070717192d393f75a1583cd8e2c", "12d8730da5aab242795bdff17b30b6e0bac82998", "2ea247029ac1b8ded60023a369e8d259a8637bd2", "3b470b76045745c0ef5321e0f1e0e6a4b1821339", "79eb06c8acce1feef4a8654287d9cf5081e19600", "a60db9ca8bc144a37fe233b08232d9c91641cbb5", "084bd02d171e36458f108f07265386f22b34a1ae", "f7ae38a073be7c9cd1b92359131b9c8374579b13", "d2f2b10a8f29165d815e652f8d44955a12d057e6", "411dc8874fd7b3a9a4c1fd86bb5b583788027776", "e52272f92fa553687f1ac068605f1de929efafc2", "3646b42511a6a0df5470408bc9a7a69bb3c5d742", "06d7ef72fae1be206070b9119fb6b61ce4699587", "5e97a1095f2811e0bc188f52380ea7c9c460c896", "2cf3564d7421b661e84251d280d159d4b3ebb336", "fb87045600da73b07f0757f345a937b1c8097463", "5b0bf1063b694e4b1575bb428edb4f3451d9bf04", "df80fed59ffdf751a20af317f265848fe6bfb9c9", "5f448ab700528888019542e6fea1d1e0db6c35f2", "d723ebf3288126fa8cbb10ba7e2a6308aede857c", "8f772d9ce324b2ef5857d6e0b2a420bc93961196", "f095b5770f0ff13ba9670e3d480743c5e9ad1036", "55cfc3c08000f9d21879582c6296f2a864b657e8", "76dff7008d9b8bf44ec5348f294d5518877c6182", "69737cf25979bf8d8886f63c44ef5b7c95a066e7", "131e395c94999c55c53afead65d81be61cd349a4", "3e0035b447d0d4e11ceda45936c898256f321382", "7c6686fa4d8c990e931f1d16deabf647bf3b1986", "9b9ccd4954cf9dd605d49e9c3504224d06725ab7", "3fbe4a46b94cdacbf076a66da7ea7e6546e96025", "40b0fced8bc45f548ca7f79922e62478d2043220", "329d58e8fb30f1bf09acb2f556c9c2f3e768b15c", "0c20fd90d867fe1be2459223a3cb1a69fa3d44bf", "4a4f0a47de1567f3f913e2632921797df36b2525", "63fd7a159e58add133b9c71c4b1b37b899dd646f", "0f81b0fa8df5bf3fcfa10f20120540342a0c92e5", "90e56a8515c8c2ff16f5c79c69811e283be852c7", "2c17d36bab56083293456fe14ceff5497cc97d75", "da1477b4a65ae5a013e646b57e004f0cd60619a2", "837e99301e00c2244023a8a48ff98d7b521c93ac", "29c5a44e01d1126505471b2ab46163d598c871c7", "5f4219118556d2c627137827a617cf4e26242a6e", "91883dabc11245e393786d85941fb99a6248c1fb", "7234468db46b37e2027ab2978c67b48b8581f796", "82eb267b8e86be0b444e841b4b4ed4814b6f1942", "993374c1c9d58a3dec28160188ff6ac1227d02f5", "266ed43dcea2e7db9f968b164ca08897539ca8dd", "185263189a30986e31566394680d6d16b0089772", "76e2d7621019bd45a5851740bd2742afdcf62837", "01e27b6d1af4c9c2f50e2908b5f3b2331ff24846", "01beab8f8293a30cf48f52caea6ca0fb721c8489", "7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4", "b4362cd87ad219790800127ddd366cc465606a78", "86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd", "bf2f2696fdb4077b5ab18aa583f6376acadf2438", "86c053c162c08bc3fe093cc10398b9e64367a100", "303a7099c01530fa0beb197eb1305b574168b653", "891b10c4b3b92ca30c9b93170ec9abd71f6099c4", "d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e", "3352426a67eabe3516812cb66a77aeb8b4df4d1b", "5b6bed112e722c0629bcce778770d1b28e42fc96", "6974449ce544dc208b8cc88b606b03d95c8fd368", "78a4cabf0afc94da123e299df5b32550cd638939", "4bbe460ab1b279a55e3c9d9f488ff79884d01608", "96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d", "fd809ee36fa6832dda57a0a2403b4b52c207549d", "b934f730a81c071dbfc08eb4c360d6fca2daa08f", "3be8f1f7501978287af8d7ebfac5963216698249", "963d0d40de8780161b70d28d2b125b5222e75596", "8cb403c733a5f23aefa6f583a17cf9b972e35c90", "5b5b9c6c67855ede21a60c834aea5379df7d51b7", "c3d3d2229500c555c7a7150a8b126ef874cbee1c", "b6052dc718c72f2506cfd9d29422642ecf3992ef", "0209389b8369aaa2a08830ac3b2036d4901ba1f1", "95e3b78eb4d5b469f66648ed4f37e45e0e01e63e", "9048732c8591a92a1f4f589b520a733f07578f80", "4c87aafa779747828054cffee3125fcea332364d", "b5f79df712ad535d88ae784a617a30c02e0551ca", "4a8480d58c30dc484bda08969e754cd13a64faa1", "41a6196f88beced105d8bc48dd54d5494cc156fb", "5befd78017c51dcc6cb1394e1144d50bc936c7c8", "bcc5cbbb540ee66dc8b9a3453b506e895d8395de", "be632b206f1cd38eab0c01c5f2004d1e8fc72880", "70a69569ba61f3585cd90c70ca5832e838fa1584", "0ec1673609256b1e457f41ede5f21f05de0c054f", "9d57c4036a0e5f1349cd11bc342ac515307b6720", "3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e", "09f853ce12f7361c4b50c494df7ce3b9fad1d221", "bd8e2d27987be9e13af2aef378754f89ab20ce10", "fd615118fb290a8e3883e1f75390de8a6c68bfde", "4b936847f39094d6cb0bde68cea654d948c4735d", "65fc8393610fceec665726fe4e48f00dc90f55fb", "f27b8b8f2059248f77258cf8595e9434cf0b0228", "0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a", "710c3aaffef29730ffd909a63798e9185f488327", "2f489bd9bfb61a7d7165a2f05c03377a00072477", "36e8ef2e5d52a78dddf0002e03918b101dcdb326", "214072c84378802a0a0fde0b93ffb17bc04f3759", "1b8541ec28564db66a08185510c8b300fa4dc793", "bd13f50b8997d0733169ceba39b6eb1bda3eb1aa", "0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7", "e4e07f5f201c6986e93ddb42dcf11a43c339ea2e", "c81b27932069e6c7016bfcaa5e861b99ac617934", "321c8ba38db118d8b02c0ba209be709e6792a2c7", "7d73adcee255469aadc5e926066f71c93f51a1a5", "afdf9a3464c3b015f040982750f6b41c048706f5", "6ef1996563835b4dfb7fda1d14abe01c8bd24a05", "84a20d0a47c0d826b77f73075530d618ba7573d2", "0e986f51fe45b00633de9fd0c94d082d2be51406", "aa4af9b3811db6a30e1c7cc1ebf079078c1ee152", "f070d739fb812d38571ec77490ccd8777e95ce7a", "84bc3ca61fc63b47ec3a1a6566ab8dcefb3d0015", "7d7be6172fc2884e1da22d1e96d5899a29831ad2", "ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c", "ac51d9ddbd462d023ec60818bac6cdae83b66992", "dd8ad6ce8701d4b09be460a6cf058fcd5318c700", "f0ae807627f81acb63eb5837c75a1e895a92c376", "1aa766bbd49bac8484e2545c20788d0f86e73ec2", "090ff8f992dc71a1125636c1adffc0634155b450", "12055b8f82d5411f9ad196b60698d76fbd07ac1e", "b3f18013079e0535dcda045ac5145c201287aec3", "7c47da191f935811f269f9ba3c59556c48282e80", "521482c2089c62a59996425603d8264832998403", "9ef2b2db11ed117521424c275c3ce1b5c696b9b3", "06c2dfe1568266ad99368fc75edf79585e29095f", "303065c44cf847849d04da16b8b1d9a120cef73a", "e97d824b8e80670d49d53c402f99e0fbeaafacdb", "27c6cd568d0623d549439edc98f6b92528d39bfe", "1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb", "89497854eada7e32f06aa8f3c0ceedc0e91ecfef", "7aafeb9aab48fb2c34bed4b86755ac71e3f00338", "527dda77a3864d88b35e017d542cb612f275a4ec", "7cfbf90368553333b47731729e0e358479c25340", "2969f822b118637af29d8a3a0811ede2751897b5", "1a140d9265df8cf50a3cd69074db7e20dc060d14", "373c4d6af0ee233f0d669c3955c3a3ef2a009638", "501076313de90aca7848e0249e7f0e7283d669a1", "9a45abde5e2ad08dcb6c267fba30a02fcd2e516e", "2e8eb9dc07deb5142a99bc861e0b6295574d1fbd", "5fa6e4a23da0b39e4b35ac73a15d55cee8608736", "dd0262d63ab7e2a9ab90478394b9fb56d17ed71c", "1648cf24c042122af2f429641ba9599a2187d605", "1f8e44593eb335c2253d0f22f7f9dc1025af8c0d", "cc96eab1e55e771e417b758119ce5d7ef1722b43", "2fda461869f84a9298a0e93ef280f79b9fb76f94", "2d79dece7890121469f515a6e773ba0251fc2d98", "64cf86ba3b23d3074961b485c16ecb99584401de", "2a4153655ad1169d482e22c468d67f3bc2c49f12", "0717b47ab84b848de37dbefd81cf8bf512b544ac", "b6bbaa26f19ced1ce357d5bce903d772d5a49102", "7d1688ce0b48096e05a66ead80e9270260cb8082", "e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8", "529b1f33aed49dbe025a99ac1d211c777ad881ec", "6a7e464464f70afea78552c8386f4d2763ea1d9c", "d42dbc995318e2936714c65c028700bfd3633049", "6fdf2f4f7ae589af6016305a17d460617d9ef345", "83295bce2340cb87901499cff492ae6ff3365475", "25982e2bef817ebde7be5bb80b22a9864b979fb0", "023be757b1769ecb0db810c95c010310d7daf00b", "57ebeff9273dea933e2a75c306849baf43081a8c", "38cbb500823057613494bacd0078aa0e57b30af8", "47382cb7f501188a81bb2e10cfd7aed20285f376", "ba1c0600d3bdb8ed9d439e8aa736a96214156284", "9aade3d26996ce7ef6d657130464504b8d812534", "390f3d7cdf1ce127ecca65afa2e24c563e9db93b", "336488746cc76e7f13b0ec68ccfe4df6d76cdc8f", "0f829fee12e86f980a581480a9e0cefccb59e2c5", "5c124b57699be19cd4eb4e1da285b4a8c84fc80d", "3c6cac7ecf546556d7c6050f7b693a99cc8a57b3", "e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7", "b1451721864e836069fa299a64595d1655793757", "788a7b59ea72e23ef4f86dc9abb4450efefeca41", "9a4c45e5c6e4f616771a7325629d167a38508691", "656a59954de3c9fcf82ffcef926af6ade2f3fdb5", "f633d6dc02b2e55eb24b89f2b8c6df94a2de86dd", "65126e0b1161fc8212643b8ff39c1d71d262fbc1", "40217a8c60e0a7d1735d4f631171aa6ed146e719", "31cdaaa7a47efe2ce0e78ebec29df4d2d81df265", "076d3fc800d882445c11b9af466c3af7d2afc64f", "2ad29b2921aba7738c51d9025b342a0ec770c6ea", "5f5906168235613c81ad2129e2431a0e5ef2b6e4", "3d62b2f9cef997fc37099305dabff356d39ed477", "0a0b9a9ff827065e4ff11022b0e417ddf1d3734e", "9ca7899338129f4ba6744f801e722d53a44e4622", "6342a4c54835c1e14159495373ab18b4233d2d9b", "ccf16bcf458e4d7a37643b8364594656287f5bfc", "deb89950939ae9847f0a1a4bb198e6dbfed62778", "1329206dbdb0a2b9e23102e1340c17bd2b2adcf5", "cf54e9776d799aa183d7466094525251d66389a4", "1b60b8e70859d5c85ac90510b370b501c5728620", "054756fa720bdcf1d320ad7a353e54ca53d4d3af", "7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0", "1b0a071450c419138432c033f722027ec88846ea", "c00f402b9cfc3f8dd2c74d6b3552acbd1f358301", "aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e", "433a6d6d2a3ed8a6502982dccc992f91d665b9b3", "62f0d8446adee6a5e8102053a63a61af07ac4098", "2724ba85ec4a66de18da33925e537f3902f21249", "cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8", "232b6e2391c064d483546b9ee3aafe0ba48ca519", "f7dea4454c2de0b96ab5cf95008ce7144292e52a", "940e5c45511b63f609568dce2ad61437c5e39683", "191d30e7e7360d565b0c1e2814b5bcbd86a11d41", "d4a5eaf2e9f2fd3e264940039e2cbbf08880a090", "d40c16285d762f7a1c862b8ac05a0fdb24af1202", "413160257096b9efcd26d8de0d1fa53133b57a3d", "68eb6e0e3660009e8a046bff15cef6fe87d46477", "159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1", "efeeb000107745e3fba04ee4676c0435eaf4257b", "cead57f2f7f7b733f4524c4b5a7ba7f271749b5f", "3802c97f925cb03bac91d9db13d8b777dfd29dcc", "5b2cfee6e81ef36507ebf3c305e84e9e0473575a", "1824b1ccace464ba275ccc86619feaa89018c0ad", "4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a", "f5aee1529b98136194ef80961ba1a6de646645fe", "f33bd953d2df0a5305fc8a93a37ff754459a906c", "8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d", "88e2efab01e883e037a416c63a03075d66625c26", "82cd5a5fec8a27887a35f1ecec684ec55eefad73", "70569810e46f476515fce80a602a210f8d9a2b95", "898ff1bafee2a6fb3c848ad07f6f292416b5f07d", "902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb", "a022eff5470c3446aca683eae9c18319fd2406d5", "0c75c7c54eec85e962b1720755381cdca3f57dfb", "3fb3c7dd12561e9443ac301f5527d539b1f4574e", "136f92989e982ecf795cb27d65b48464eaec9323", "22a10d8d2a2cb9055557a3b335d6706100890afb", "0750c796467b6ef60b0caff5fb199337d54d431e", "31e57fa83ac60c03d884774d2b515813493977b9", "6f08885b980049be95a991f6213ee49bbf05c48d", "efd28eabebb9815e34031316624e7f095c7dfcfe", "84e6669b47670f9f4f49c0085311dce0e178b685", "aff92784567095ee526a705e21be4f42226bbaab", "e957d0673af7454dbf0a14813201b0e2570577e9", "37278ffce3a0fe2c2bbf6232e805dd3f5267eba3", "0b0958493e43ca9c131315bcfb9a171d52ecbb8a", "4d3805141f21e88b1d32cf39e65e42ccf4287504", "4a1d640f5e25bb60bb2347d36009718249ce9230", "9326d1390e8601e2efc3c4032152844483038f3f", "a26fd9df58bb76d6c7a3254820143b3da5bd584b", "113b06e70b7eead8ae7450bafe9c91656705024c", "0de91641f37b0a81a892e4c914b46d05d33fd36e", "5dc056fe911a3e34a932513abe637076250d96da", "771a6a80dd08212d83a4e976522e1ce108881401", "72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114", "f423d8be5e13d9ef979debd3baf0a1b2e1d3682f", "1a85956154c170daf7f15f32f29281269028ff69", "e7697c7b626ba3a426106d83f4c3a052fcde02a4", "a7a3ec1128f920066c25cb86fbc33445ce613919", "66d087f3dd2e19ffe340c26ef17efe0062a59290", "cc4fc9a309f300e711e09712701b1509045a8e04", "2866cbeb25551257683cf28f33d829932be651fe", "47fdbd64edd7d348713253cf362a9c21f98e4296", "61f04606528ecf4a42b49e8ac2add2e9f92c0def", "0a6a818b634cca4eb75a37bfd23b5c5c21331b12", "c46a4db7247d26aceafed3e4f38ce52d54361817", "def569db592ed1715ae509644444c3feda06a536", "4140498e96a5ff3ba816d13daf148fffb9a2be3f", "54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6", "cccd0edb5dafb3a160179a60f75fd8c835c0be82", "6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2", "07d95be4922670ef2f8b11997e0c00eb643f3fca", "6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d", "f6c70635241968a6d5fd5e03cde6907022091d64", "7384610776ec405dc84e47f2d353aa6d3cc03b1d", "293ade202109c7f23637589a637bdaed06dc37c9", "78598e7005f7c96d64cc47ff47e6f13ae52245b8", "a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc", "ebc2a3e8a510c625353637e8e8f07bd34410228f", "a52d9e9daf2cb26b31bf2902f78774bd31c0dd88", "56e25056153a15eae2a6b10c109f812d2b753cee", "5c820e47981d21c9dddde8d2f8020146e600368f", "47109343e502a4097cb7efee54bc5fbb14598c05", "156cd2a0e2c378e4c3649a1d046cd080d3338bca", "414fdfe5f2e4f32a59bf15062b6e524cbf970637", "086655743dc5f16563c012ad43b2f9d06771d9c0", "500b92578e4deff98ce20e6017124e6d2053b451", "68021c333559ab95ca10e0dbbcc8a4840c31e157", "d6a9ea9b40a7377c91c705f4c7f206a669a9eea2", "0a6d344112b5af7d1abbd712f83c0d70105211d0", "a575009c1c25e27cdba8cc2c6930759a5416f37d", "e42998bbebddeeb4b2bedf5da23fa5c4efc976fa", "64e82b42e1c41250bdf9eb952686631287cfd410", "9e5809122c0880183c7e42c7edd997f92de6d81e", "3c086601ce0bac61047b5b931b253bd4035e1e7a", "5fba1b179ac80fee80548a0795d3f72b1b6e49cd", "5a10d74c7fc3294f76d771df413fe0b0b35f2ab5", "1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac", "08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d", "778c9f88839eb26129427e1b8633caa4bd4d275e", "1922ad4978ab92ce0d23acc4c7441a8812f157e5", "63ce37da6c0c789099307337bb913e1104473854", "fdf8e293a7618f560e76bd83e3c40a0788104547", "1f9ae272bb4151817866511bd970bffb22981a49", "0bc53b338c52fc635687b7a6c1e7c2b7191f42e5", "e50ee29ca12028cb903cd498bb9cacd41bd5ce3a", "b53485dbdd2dc5e4f3c7cff26bd8707964bb0503", "ad37d01c4787d169daff7da52e80e2018aab6358", "b1fdd4ae17d82612cefd4e78b690847b071379d3", "8a3c5507237957d013a0fe0f082cab7f757af6ee", "63c74794aedb40dd6b1650352a2da7a968180302", "6554ca3187b3cbe5d1221592eb546dfc11aac14b", "266cb58a82ce44ce8f0cffbfa89e44227096f424", "e4754afaa15b1b53e70743880484b8d0736990ff", "53bfe2ab770e74d064303f3bd2867e5bf7b86379", "fd4ac1da699885f71970588f84316589b7d8317b", "5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725", "0561bed18b6278434deae562d646e8adad72e75d", "8149c30a86e1a7db4b11965fe209fe0b75446a8c", "1c1a98df3d0d5e2034ea723994bdc85af45934db", "0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136", "21f5f65e832c5472d6d08f6ee280d65ff0202e29", "055de0519da7fdf27add848e691087e0af166637", "af11769a427eb8daa8435b1ea3252531b4275db8", "5bd3d08335bb4e444a86200c5e9f57fd9d719e14", "6604fd47f92ce66dd0c669dd66b347b80e17ebc9", "b03d6e268cde7380e090ddaea889c75f64560891", "980266ad6807531fea94252e8f2b771c20e173b3", "445e3ba7eabcc55b5d24f951b029196b47830684", "8de06a584955f04f399c10f09f2eed77722f6b1c", "cad2bd940e7580490da9cc739e597d029e166504", "227b1a09b942eaf130d1d84cdcabf98921780a22", "9c25e89c80b10919865b9c8c80aed98d223ca0c6", "056ba488898a1a1b32daec7a45e0d550e0c51ae4", "14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d", "a81da7746f4f58e7211e65f11e6520144f8c003d", "4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af", "37c8514df89337f34421dc27b86d0eb45b660a5e", "79fd4baca5f840d6534a053b22e0029948b9075e", "8c643e1a61f3f563ec382c1e450f4b2b28122614", "68c1090f912b69b76437644dd16922909dd40d60", "71b07c537a9e188b850192131bfe31ef206a39a0", "375435fb0da220a65ac9e82275a880e1b9f0a557", "5da98f7590c08e83889f3cec7b0304b3610abf42", "5fa6f72d3fe16f9160d221e28da35c1e67a5d951", "9c1cdb795fd771003da4378f9a0585730d1c3784", "24e099e77ae7bae3df2bebdc0ee4e00acca71250", "5239001571bc64de3e61be0be8985860f08d7e7e", "c17a332e59f03b77921942d487b4b102b1ee73b6", "2d072cd43de8d17ce3198fae4469c498f97c6277", "c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3", "0eac652139f7ab44ff1051584b59f2dc1757f53b", "2cd426f10178bd95fef3dede69ae7b67e73bb70c", "044d9a8c61383312cdafbcc44b9d00d650b21c70", "daa4cfde41d37b2ab497458e331556d13dd14d0b", "1672becb287ae3eaece3e216ba37677ed045db55", "30cd39388b5c1aae7d8153c0ab9d54b61b474ffe", "862f2d84b4230d64ddb3e48967ad417089f2c291", "4b3eaedac75ac419c2609e131ea9377ba8c3d4b8", "faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b", "353a89c277cca3e3e4e8c6a199ae3442cdad59b5", "2f7aa942313b1eb12ebfab791af71d0a3830b24c", "d8f72f50cbe6e0fa4025bc990b7e8a52cc6bbad9", "4b04247c7f22410681b6aab053d9655cf7f3f888", "b5da4943c348a6b4c934c2ea7330afaf1d655e79", "1a8ccc23ed73db64748e31c61c69fe23c48a2bb1", "88f2952535df5859c8f60026f08b71976f8e19ec", "f8c94afd478821681a1565d463fc305337b02779", "b52886610eda6265a2c1aaf04ce209c047432b6d", "4dd71a097e6b3cd379d8c802460667ee0cbc8463", "add50a7d882eb38e35fe70d11cb40b1f0059c96f", "cf736f596bf881ca97ec4b29776baaa493b9d50e", "4068574b8678a117d9a434360e9c12fe6232dae0", "6a4ebd91c4d380e21da0efb2dee276897f56467a", "91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11", "44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb", "0ac442bb570b086d04c4d51a8410fcbfd0b1779d", "75fd9acf5e5b7ed17c658cc84090c4659e5de01d", "35f921def890210dda4b72247849ad7ba7d35250", "8e24db957be2b643db464cc566bfabc650f1ffac", "f2d5bb329c09a5867045721112a7dad82ca757a3", "b1a3b19700b8738b4510eecf78a35ff38406df22", "03f98c175b4230960ac347b1100fbfc10c100d0c", "ed96f2eb1771f384df2349879970065a87975ca7", "c3ae4a4c9a9528791e36b64fea8d02b2fced7955", "0bab5213911c19c40e936b08d2f8fba01e286b85", "0eb45876359473156c0d4309f548da63470d30ee", "1246534c3104da030fdb9e041819257e0d57dcbf", "1fd7a17a6c630a122c1a3d1c0668d14c0c375de0", "bbf28f39e5038813afd74cf1bc78d55fcbe630f1", "6966d9d30fa9b7c01523425726ab417fd8428790", "22e2066acfb795ac4db3f97d2ac176d6ca41836c", "2c62b9e64aeddf12f9d399b43baaefbca8e11148", "bbc5f4052674278c96abe7ff9dc2d75071b6e3f3", "861802ac19653a7831b314cd751fd8e89494ab12", "f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53", "217de4ff802d4904d3f90d2e24a29371307942fe", "a820941eaf03077d68536732a4d5f28d94b5864a", "8875dcf2836315839741fd6944f249263408c27f", "63d865c66faaba68018defee0daf201db8ca79ed", "234c106036964131c0f2daf76c47ced802652046", "ef4b5bcaad4c36d7baa7bc166bd1712634c7ad71", "624496296af19243d5f05e7505fd927db02fd0ce", "d79f9ada35e4410cd255db39d7cc557017f8111a", "569988e19ab36582d4bd0ec98e344cbacf177f45", "cf5c9b521c958b84bb63bea9d5cbb522845e4ba7", "d50751da2997e7ebc89244c88a4d0d18405e8507", "038ce930a02d38fb30d15aac654ec95640fe5cb0", "2df4d05119fe3fbf1f8112b3ad901c33728b498a", "c847de9faa1f1a06d5647949a23f523f84aba7f3", "24c442ac3f6802296d71b1a1914b5d44e48b4f29", "f20ed84abcb1223f351a576ef10dfda9f277326b", "2a84f7934365f05b6707ea0ac225210f78e547af", "6144af24ce06af7d8cdd606e79cea5d6e73e2135", "ec1e03ec72186224b93b2611ff873656ed4d2f74", "621ff353960d5d9320242f39f85921f72be69dc8", "4157e45f616233a0874f54a59c3df001b9646cd7", "51b42da0706a1260430f27badcf9ee6694768b9b", "59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b", "95f12d27c3b4914e0668a268360948bce92f7db3", "3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f", "4c19690889fb3a12ec03e65bae6f5f20420b4ba4", "94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81", "034b3f3bac663fb814336a69a9fd3514ca0082b9", "7dc498d45f9fcb97acee552c6f587b65d5122c35", "60824ee635777b4ee30fcc2485ef1e103b8e7af9", "9cb7b3b14fd01cc2ed76784ab76304132dab6ff3", "edde81b2bdd61bd757b71a7b3839b6fef81f4be4", "b730908bc1f80b711c031f3ea459e4de09a3d324", "193debca0be1c38dabc42dc772513e6653fd91d8", "93d903d2e48d6a8ad3e3d2aff2e57622efe649cd", "fdb33141005ca1b208a725796732ab10a9c37d75", "d511e903a882658c9f6f930d6dd183007f508eda", "04ff69aa20da4eeccdabbe127e3641b8e6502ec0", "4f77a37753c03886ca9c9349723ec3bbfe4ee967", "564babec16b895d385d06d38545febd66ef02f35", "6d8c9a1759e7204eacb4eeb06567ad0ef4229f93", "c8292aa152a962763185e12fd7391a1d6df60d07", "89002a64e96a82486220b1d5c3f060654b24ef2a", "d1184939e06dbc3b495c883c53b684c6d6aa9e48", "45e7ddd5248977ba8ec61be111db912a4387d62f", "397257783ccc8cace5b67cc71e0c73034d559a4f", "dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43", "4836b084a583d2e794eb6a94982ea30d7990f663", "46b2ecef197b465abc43e0e017543b1af61921ac", "4c078c2919c7bdc26ca2238fa1a79e0331898b56", "5cfbeae360398de9e20e4165485837bd42b93217", "5e9ec3b8daa95d45138e30c07321e386590f8ec7", "f4ba07d2ae6c9673502daf50ee751a5e9262848f", "830e5b1043227fe189b3f93619ef4c58868758a7", "500fbe18afd44312738cab91b4689c12b4e0eeee", "a812368fe1d4a186322bf72a6d07e1cf60067234", "766728bac030b169fcbc2fbafe24c6e22a58ef3c", "363e5a0e4cd857e98de72a726ad6f80cea9c50ab", "72a1852c78b5e95a57efa21c92bdc54219975d8f", "47e8db3d9adb79a87c8c02b88f432f911eb45dc5", "72a03f06fcbf6af92fb3002e2fd9d43e75fd113e", "9ab963e473829739475b9e47514f454ab467a5af", "7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2", "1c1f957d85b59d23163583c421755869f248ceef", "0341405252c80ff029a0d0065ca46d0ade943b03", "087002ab569e35432cdeb8e63b2c94f1abc53ea9", "3bc376f29bc169279105d33f59642568de36f17f", "e4fa062bff299a0bcef9f6b2e593c85be116c9f1", "6a931e7b7475635f089dd33e8d9a2899ae963804", "01b4b32c5ef945426b0396d32d2a12c69c282e29", "438e7999c937b94f0f6384dbeaa3febff6d283b6", "2c14c3bb46275da5706c466f9f51f4424ffda914", "26ac607a101492bc86fd81a141311066cfe9e2b5", "6c28b3550f57262889fe101e5d027912eb39564e", "45fbeed124a8956477dbfc862c758a2ee2681278", "62e834114b58a58a2ea2d7b6dd7b0ce657a64317", "2fb8d7601fc3ad637781127620104aaab5122acd", "2e091b311ac48c18aaedbb5117e94213f1dbb529", "22137ce9c01a8fdebf92ef35407a5a5d18730dde", "029b53f32079063047097fa59cfc788b2b550c4b", "50ccc98d9ce06160cdf92aaf470b8f4edbd8b899", "1ca815327e62c70f4ee619a836e05183ef629567", "7343f0b7bcdaf909c5e37937e295bf0ac7b69499", "f201baf618574108bcee50e9a8b65f5174d832ee", "a0fd85b3400c7b3e11122f44dc5870ae2de9009a", "ded968b97bd59465d5ccda4f1e441f24bac7ede5", "e1dd586842419f3c40c0d7b70c120cdea72f5b5c", "ebedc841a2c1b3a9ab7357de833101648281ff0e", "7c953868cd51f596300c8231192d57c9c514ae17", "ca537e1726a8d8c371a71bbd6d9098774ab51955", "21a2f67b21905ff6e0afa762937427e92dc5aa0b", "d22dd4a6752a5ffa40aebd260ff63d2c2a9e1da1", "4b6387e608afa83ac8d855de2c9b0ae3b86f31cc", "fe961cbe4be0a35becd2d722f9f364ec3c26bd34", "337b5f0e70e04349da17e8069936e2260390aca0", "5180c98815d7034e753a14ef6f54583f115da3aa", "ad5a35a251e07628dd035c68e44a64c53652be6b", "eb48a58b873295d719827e746d51b110f5716d6c", "6e38011e38a1c893b90a48e8f8eae0e22d2008e8", "8c7bceba769762126fd3dae78d622908bb83c3d3", "721e5ba3383b05a78ef1dfe85bf38efa7e2d611d", "36219a3196aac2bd149bc786f083957a6e6da125", "ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2", "a6590c49e44aa4975b2b0152ee21ac8af3097d80", "64ba203c8cfc631d5f3f20419880523155fbeeb2", "109df0e8e5969ddf01e073143e83599228a1163f", "c01133cdc8dd24affdafc03086e419e02f8c64af", "295c44af9e9dc7d6bb7247f567f36001b8f6ad7b", "1e43d706d38cbacac563de9d0659230de00d73f2", "42e07485caa2211707c312afc6bc5f5f1ecb7f14", "845419c6183a9baead5fbaf4ad79694d247c4deb", "758572c5779a47e898caff7232af76eda253163b", "e61a7a02cd2b68043012231f8da1d7077e665040", "3da3fca7cb2e6c071b4b963446d8622b34a96ee7", "11ca91ee2c9974fbc8577207893381a46d79fd3d", "dae0a4ef50b347f145ed6de8f6c7fb94d350f937", "ff5f43762a4bc3286fcde0f45e23714fc23641b3", "0689b01927eb99d9a2520fdfd4b238ca4de1ca7e", "d33c9fe66bad7a90e34e8bc1332b73147a30d202", "7ce25a0852e2345be1a1bd02b8eb4cefb9d47073", "97cdf5887f47ee49c6fb0ec5dc1530ac5b19a36a", "691c034580aa9a7865c6eff95a819999f039976d", "41690be86b39c55a26ea056261513ddd726d6601", "829739639a3a3e31136582e9f97625599a020ff3", "7c0bd7ce51c62671d5ffc1506786b0b7861ce00a", "2e045673404592cb4c0ea136d7521bc6475b3b51", "509be79bd94d56ef7cd1af54e2be88983805bbe9", "0628ffefb911d1446914098d7c38a094c92c8a70", "d5c4a7e9270a68137b5d6611bf1a1478d60093f5", "f89aeea0b41bbb89275174eb8f84aa1832183f4b", "4ee380e444063f9b948a2fd82e5c11b97a570ad1", "a07e7443a51cf64a338500a834f5d80cee69d5d1", "e1e6a4146c082e5465cde38e9511de3d150b4ede", "2ee5d3c5197ee6f23bce8af515471c415e8c65d4", "76725cdaf2b5d78a5b95f2aff134593b78c42cab", "befd21f74248ca5f22f608043d64cdea67829737", "c961a96173f7c13883aff1f8b1628d3299e78857", "cb3ba84146d1324e1cdbde3764ca3b354ee09a2a", "d75d074c11a62780b836376249391da39660cad6", "283550fce0fdc0876db5df533625dffdfcd8d099", "1d1f83023686d43fd4e8805c8e517dffb02d118c", "7f6653e70c3860b537354875677722565e707efd", "22a5c5b366e56339b34a66ce2a4a106592656e40", "21392a527c80839067c0c814763f6b320ee6665e", "73e271ba44a183fb3d678ac2dbb025727da3725d", "63b0697ea7561029d648f5fc3cf940c29f2701b1", "90d8bf2199e7fd972dab3bd3dc6fb67536fa509b", "d9849dbe923b525f3039479178f43a5ebad27ea9", "47788b7a4700d1bbc972178f3680a028874afdb5", "9b1ec8b20a9ac4c9797c27b75cbee9302c3fd4f9", "4e510ca3e5811026a2853390b926633779446228", "26cd9c812c279347ae96db31cee1cbee0f646fa4", "f6eb6f13cb0af1c1d99bf2c5293f7fdfd2b002ec", "9eb111f6990d1494a3904f22be9836c202efd7d1", "0c5b03a6083950aacd9aee2d276a232e6ce3213c", "0a8cfe6bf63530d9ee402a6a6e1a7666008e43b7", "eef35aa64e16b3558c341599ca44aadcc22990a0", "41b24c890ae0ef99ff031c9c8549375af6025fb6", "3d1c9151929aece2c0cec96aa77f7d6ad30afbc9", "22baee247a44e3585f1dba2d01727b6be07d1944", "11373e3b60b0bb98da5075f85938004253dde3ee", "ac8269a3033ede3c1ce4381b1fef61375a54cfb9", "380d5a881f6f79e5253316bb1d6f56a117e12e0d", "309dd5555dad9dfc3f3889cf11b5dec8ab797da6", "a00a59d79ff863c5ac8f1e0fa4a015de4ed734c0", "a01ed75aad9b842c9bdc4bfbff6d90fa8b747466", "4fa85ff0c4f2c33a6f03f84f988cefe1b829a726", "1a229f1d21abe442520cba31a6e08663b3d31777", "10d0405b6c2072c0258430ab0b4f193f67db317c", "2911782a45e76936ade94216c1213d3d119cf4c1", "42c8d3a33a73ace976cdc6e0fc2a17c011d54021", "b438dc16bf97c1324ac66771efa67bdb9b853346", "b58672881dd8112cd3e6dedebcf8367ce2c9d78b", "38e509fc0d94e954a512128760f7a1f0d6fbc384", "a3916c952fb3ad2ef834fda08c23020f59f1e54e", "e73a14bbf3d00fb72b710b6c62639d65bf4ee415", "84af45a22535589053d0b00c9d6050c1150f9eaf", "ab69f49fedb6936ce04b2e9d1f161772b2f24b7d", "c3f5cf5594e66dbbeb1af72ddfe7d5e24a4f56c0", "ca581cd5bd0cecf346f2bc47f4b67bfee31b9da1", "9ac625867c50ce839d56a52ade92d3b971caff43", "c3305114a831c6507c819be70b23c068b07aefe9", "9f9b6224b103e9694444bbfb9efc01f67002014d", "7998babd6df1772532e594625d8c07eab2eefce1", "3d74d4177f5c1444b73221c12f359e858625a691", "048af40cbd0516eb935641065b2224f390731e2f", "81ed539ccd14f99ed4b2d126e4b6a0ccb4082031", "0740f71446e99273b89d89fa05ab439dc58c12e1", "853d5a725ba83397d1b75d6d6eef3eccf448d754", "c507eb104fbd4397da056b040b060d81d25d71dd", "89a9f4faa9a24690e140542be3c5db4a0db5648b", "c3599c91d0e3473178c1578b731b03e4be5d3ff1", "27c6b0883e51ec901e587963070eb2ad96871a33", "58f63d82c4f4c97e9f25b77a83ac04d084c3d56e", "8a85de5d0424cf7424887a7f1f0b36f37e65a10d", "751e352fe52946ca3d0f51956706313ce521b658", "75073faadb967823db48794e9cd54b681bb0729b", "381416c19b636c9bbab6ec5ebb1c1fa1be6faeca", "b8898949a2be260dfe9540b2a39816fc82595244", "711801297f23df9ac8ca1c2d3c9d7dfa2ed12043", "362d884ff43d8c7cd6bce184944cfc04cdd57c18", "424ca6d3746ad6975d0fbc7bd6152e4bcca4b281", "17dea513763c57dcd0e62085045fb5be6770c600", "3b23c39f21156f9ea86ad8bb2ca53b2cf56b4181", "ef75e38d567436a22171de2fb038b2f7c9ae2863", "bcaff57992ba8e66f4bdf49566b4f3d740a172d8", "06325345f9ffef958d9d7c704b28e6cbb3021b8c", "ebad62ebe00fe0f0c19ce04c3f7250506137fc71", "156db35e45030aa590625a3188c8784306d3453a", "45518c2350b9e727adf59f1626610917f71aea1e", "33e1398b73c9789debed1168536c93632c6f3f10", "93e4aafecd11ded7d691204bb576716da4e13cc7", "316486bada6023816c785c0d4eb401658737be3f", "f27c685b8f006cd9594266d98efc17bbf64d2d7f", "0828a2d546796192d84d0f622b9ee0be90e073b9", "147046f4e1b4d1272b482cda87364584db5c1526", "c36f933a46e1d1c51785295bb97154df9ceada36", "00796052277d41e2bb3a1284d445c1747aed295f", "34e5cd31cb05beed14a6e14b7c3158c0f67dd8cc", "606c5f3ed9befa7113bc28436a8a91f176934874", "6f7b74b131d83c5f97ca353cc0267da5910c5619", "36e21168155720d0210b8cc4ae031091d96701c8", "ac68dacd66ebe1c7eab56aaee9a8bef478be9a23", "c188bcd2e90273fe8078cb28dd7d4b131a2a46c9", "4ffd50725b9cdff4ab0f13c9182cf3fdb671e76c", "00434a4491b6710308c653c430784872849d1f36", "930663a0812a7a53963563b647c5957807d3d97d", "56917004d3cc3aca5668305fb33ddda4855dd519", "7650f44d9558928f2feca09467e87a75cbf85735", "b6bbd9a66d573e4b22fd0603acc707dbc5379648", "3f9c09e2fbefc9aeba6505f49317f9a2fc03a615", "3b1f325aef22021a75cf790fbf8cce74ea9786d7", "f1c76d97caa6f882764c1382c622a2dfb6aade43", "04daeb22aaab37f24345238fa23edeb7d40fbc55", "cba130014e6cc590a09aaeca0590623b496f126b", "bb0dc5b3da817a13795c11bcb4b7c80f2414f077", "c7713829e23ff40e27421cc34fbe0ef71b13684e", "d138270d3c06e85fa2c3da6f953818da4b72313a", "00ab6bb0df7fd605038d64eb5798b31481a39dd0", "01f42436042ddaa48998c87109cbe46cad6e7e52", "56a1cdad6b78c053085f8676bd7f5b8eb5e05435", "0a71b71421d8a33c41625963d19d5df85685dffc", "0f907be717c75a5630a4a86080118a3d21581ece", "ecd0a2e55f456b69243d1278fee15d8dbfc98c28", "a826646a8e4e8a746111d3a6915c8f0fcfcc3a00", "6ab6c1334c70db6e7705455a2db359e8d83042f9", "c8ee4812c32b0ad4e26d53b99e1514514bbcaf14", "ae6eb5fdef7fa235379cd741a79cdcc2f7102453", "2734b3a6345396499b2b7c6cc1b43fc7e9b375ee", "2b514d32318bb01ab04f75ef19ad1af63bce7943", "30052dfa6397cf9732a7385dc55f207a0ad24ca4", "6289d2c4c47d7101861153bfe78c92d16cf4581b", "1914d07f940dcae3d82642513718858925a26fc3", "b233634f8944080bce276b6d8962810699494c93", "0a84a63acef89a0f632ef08cb0b00af77ed8e7f5", "5b4ca29fd32b2d11b0e5ea7efbc34a34023915e2", "ec836d3239287400f17c06a230c2757def28fe3b", "9fd88e00acfc0d4b40644eeed28c0780b7025f66", "d80b26548469f5aeda2185138bca0816b1c4b10b", "beb63713d17df19fd7e7e337b2c87841725fdce4", "286b5b80bc76dbb63094a85951bb8e8895ee9f14", "fe5e1e869510d18d4c771b1fe924fca0a01f7222", "8e9ac116393057eca401c35aed19b10f04c265fd", "1a9e0bf9f7a9495bcdf1aeb214ccc9df9f2a9030", "30bb582c2c09abc7eb9dda7d9f80804eeb89f9d7", "ab0a1ad85ffd00df6640f0f5c3e7166a53563eac", "14a829e8a32ac6f0c7be194750128412644de67a", "007394c2bae389cf43e46db4567dafe206355c25", "656d0593bde5a9aecf0a791ec21cdb1fe05cc30a", "e5dc8be9b4678ae1f91764494acc96299cf44009", "cd331931b4657b39ee1ac57471cdc5a484661e50", "0c940ccba1bd9380a0ac723d791777fc1746a060", "a3dd5a595039dcc13d1a98930b7a68ee4307c991", "a565990d6b176bf9c82eec9354b0936fb141e631", "538a38193de5df19a095fc7187f5972de0461195", "9137ecfef874e36da780b6a14fe873dad5da8044", "6574eaab393aa8d674cd785fab16cae06a53151a", "0949f46d5db3169813ae23acafa345c6b8a37f08", "318d7a4bc9c7b1e3a01056815479564ed8ad78a4", "1542b8a1805d73a755d4b2eb402c5c861e6acd02", "3aadcd1e809d07965db79dbd4f026e89500c2a1f", "4add5fda38b0a651295ca2886a9a39ace48dcb3e", "9c31daf957e98eeabf58b22b7800e93a49a92d93", "4c05dc45b82b79e87f7b337ccf9f48d537c0e6e2", "e30bbeb927ccb1798e8a5372ad2c2a5f33d132b6", "eb871655e36fb2b837bbdacc4d98d903931f5caf", "cf64cdc889a4edaf641a307aa2b11d89d4d10a09", "a4acd75470152933faf9957f04579aa662a912a0", "a098721a7173b7079b9158c57818cbcca962b97e", "4861ee51f31fa27e7d1766de06591a27dc992085", "4d9e89d724145e6ab86df356764022210c1c0042", "069c9b3c7cf82310d3e06831208aea15f6fdfc32", "da90c9ff02e76a7e686ffe13bcdedbf949c86dfa", "92175241bd9b55b53403b9f6ffd3a6c956733490", "080a910dcb2541e0ca9384d27de3db7a0248f02a", "9bb1f695e399d030ba188b29b546cd9b26039715", "683260bf133c282439b91ac4427d42d73a5988b5", "e3b40ffd57a676aef377ef463849fd6b9a3d3b5d", "10195a163ab6348eef37213a46f60a3d87f289c5", "14e9158daf17985ccbb15c9cd31cf457e5551990", "3c4f6d24b55b1fd3c5b85c70308d544faef3f69a", "f46a526c423dd09a3f14f2c9a3838fb4f56fa730", "380dd0ddd5d69adc52defc095570d1c22952f5cc", "13719bbb4bb8bbe0cbcdad009243a926d93be433", "ec0104286c96707f57df26b4f0a4f49b774c486b", "b7845e0b0ce17cde7db37d5524ef2a61dee3e540", "fca9ebaa30d69ccec8bb577c31693c936c869e72", "d444368421f456baf8c3cb089244e017f8d32c41", "fffefc1fb840da63e17428fd5de6e79feb726894", "4522a7268facecf05769e90cae6555ac70c05cc8", "2cdc40f20b70ca44d9fd8e7716080ee05ca7924a", "d5b0e73b584be507198b6665bcddeba92b62e1e5", "c9c2de3628be7e249722b12911bebad84b567ce6", "6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc", "fd53be2e0a9f33080a9db4b5a5e416e24ae8e198", "305346d01298edeb5c6dc8b55679e8f60ba97efb", "9939498315777b40bed9150d8940fc1ac340e8ba", "4f1249369127cc2e2894f6b2f1052d399794919a", "a713a01971e73d0c3118d0409dc7699a24f521d6", "df7af280771a6c8302b75ed0a14ffe7854cca679", "cca9ae621e8228cfa787ec7954bb375536160e0d", "ab3fcd9d5fbd2d0ad48fba4005899cf13e08d07e", "8397956c7ad3bd24c6c6c0b38866e165367327c0", "30b15cdb72760f20f80e04157b57be9029d8a1ab", "8a991beca5bc864bbc1e26df953fd1fbd4dcb4bd", "50ff21e595e0ebe51ae808a2da3b7940549f4035", "8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b", "0dd74bbda5dd3d9305636d4b6f0dad85d6e19572", "3ca5d3b8f5f071148cb50f22955fd8c1c1992719", "92b61b09d2eed4937058d0f9494d9efeddc39002", "2b632f090c09435d089ff76220fd31fd314838ae", "5b64584d6b01e66dfd0b6025b2552db1447ccdeb", "36939e6a365e9db904d81325212177c9e9e76c54", "56e079f4eb40744728fd1d7665938b06426338e5", "d00787e215bd74d32d80a6c115c4789214da5edb", "407bb798ab153bf6156ba2956f8cf93256b6910a", "b8e5800dfc590f82a0f7eedefce9abebf8088d12", "39c10888a470b92b917788c57a6fd154c97b421c", "775c15a5dfca426d53c634668e58dd5d3314ea89", "8f89aed13cb3555b56fccd715753f9ea72f27f05", "65d705bbcc10f42683503b3599327c816265d951", "fc516a492cf09aaf1d319c8ff112c77cfb55a0e5", "ed9d11e995baeec17c5d2847ec1a8d5449254525", "fc1e37fb16006b62848def92a51434fc74a2431a", "d454ad60b061c1a1450810a0f335fafbfeceeccc", "a2344004f0e1409c0c9473d071a5cfd74bff0a5d", "288964068cd87d97a98b8bc927d6e0d2349458a2", "d00e9a6339e34c613053d3b2c132fccbde547b56", "ae88996aad98bfa49a49d653fd9476e5982e982c", "28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08", "c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c", "5f94969b9491db552ffebc5911a45def99026afe", "28d99dc2d673d62118658f8375b414e5192eac6f", "e7b6887cd06d0c1aa4902335f7893d7640aef823", "975978ee6a32383d6f4f026b944099e7739e5890", "9e0285debd4b0ba7769b389181bd3e0fd7a02af6", "ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd", "d1a8a46e1eb3769ed12f44075d63b49ccfe8c137", "36a3a96ef54000a0cd63de867a5eb7e84396de09", "5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b", "6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365", "dfbf941adeea19f5dff4a70a466ddd1b77f3b727", "1d776bfe627f1a051099997114ba04678c45f0f5", "cb27b45329d61f5f95ed213798d4b2a615e76be2", "b972683d702a65d3ee7a25bc931a5890d1072b6b", "fcc6fd9b243474cd96d5a7f4a974f0ef85e7ddf7", "00a967cb2d18e1394226ad37930524a31351f6cf", "26c7eda262dfda1c3a3597a3bf1f2f1cc4013425", "91a1945b9c40af4944a6cdcfe59a0999de4f650a", "a4cd3fc63ddc8468d3f684f32cb0578e41fed226", "2e231f1e7e641dd3619bec59e14d02e91360ac01", "00823e6c0b6f1cf22897b8d0b2596743723ec51c", "632fa986bed53862d83918c2b71ab953fd70d6cc", "d11d0151618987ce00a88ceda55d35f0bb89122e", "5364e58ba1f4cdfcffb247c2421e8f56a75fad8d", "dca2bb023b076de1ccd0c6b8d71faeb3fccb3978", "bb2944569a2b3d3b8340b36d4903c8cddf20047f", "7f21a7441c6ded38008c1fd0b91bdd54425d3f80", "f5fae7810a33ed67852ad6a3e0144cb278b24b41", "f77c9bf5beec7c975584e8087aae8d679664a1eb", "a9ad8f6c6bf110485921b17f9790241b1548487c", "1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc", "86d0127e1fd04c3d8ea78401c838af621647dc95", "ad01c5761c89fdf523565cc0dec77b9a6ec8e694", "d80a3d1f3a438e02a6685e66ee908446766fefa9", "d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9", "2b60fe300735ea7c63f91c1121e89ba66040b833", "493c8591d6a1bef5d7b84164a73761cefb9f5a25", "d5444f9475253bbcfef85c351ea9dab56793b9ea", "b8c08c1330779283b3fbf06d133faf8bd55ea941", "79c959833ff49f860e20b6654dbf4d6acdee0230", "af6e351d58dba0962d6eb1baf4c9a776eb73533f", "1cfca6b71b0ead87bbb79a8614ddec3a10100faa", "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8", "1b248ed8e7c9514648cd598960fadf9ab17e7fe8", "22bebedc1a5f3556cb4f577bdbe032299a2865e8", "7fab17ef7e25626643f1d55257a3e13348e435bd", "af6cae71f24ea8f457e581bfe1240d5fa63faaf7", "633c851ebf625ad7abdda2324e9de093cf623141", "ec05078be14a11157ac0e1c6b430ac886124589b", "2149d49c84a83848d6051867290d9c8bfcef0edb", "5366573e96a1dadfcd4fd592f83017e378a0e185", "81e628a23e434762b1208045919af48dceb6c4d2", "8c2233d763deb01761abe72b9b3dbb0b115916d3", "357963a46dfc150670061dbc23da6ba7d6da786e", "4560491820e0ee49736aea9b81d57c3939a69e12", "0ba402af3b8682e2aa89f76bd823ddffdf89fa0a", "c62c07de196e95eaaf614fb150a4fa4ce49588b4", "97540905e4a9fdf425989a794f024776f28a3fa9", "cca476114c48871d05537abb303061de5ab010d6", "df31e9c882dfb3ea5a3abe3b139ceacb1d90a302", "6601a96220005883572fad5aa6b4632e413c8e5e", "377c6563f97e76a4dc836a0bd23d7673492b1aae", "1277b1b8b609a18b94e4907d76a117c9783a5373", "523854a7d8755e944bd50217c14481fe1329a969", "9b0489f2d5739213ef8c3e2e18739c4353c3a3b7", "6e911227e893d0eecb363015754824bf4366bdb7", "51f626540860ad75b68206025a45466a6d087aa6", "4562ea84ebfc8d9864e943ed9e44d35997bbdf43", "02567fd428a675ca91a0c6786f47f3e35881bcbd", "b44f03b5fa8c6275238c2d13345652e6ff7e6ea9", "ff60d4601adabe04214c67e12253ea3359f4e082", "10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5", "b96ae619d7097c2492006c28267ba010578c1c6b", "30c8a2b6a505645b9f93dcc4d365eee6f46c4c37", "e941ee2d584938e6509c0676466023f8b43b9486", "4eb8030b31ff86bdcb063403eef24e53b9ad4329", "ab368594b9bd569e8d0fcf5c6010f1c31e3aa39e", "ccba0c4fafc82288a80cd38bb6ef3ac223198156", "9ee86caa5f52fc0ad013922e6320765013464db9", "c7eb127e9cd67d645b9a7f59c03bc73183faefeb", "71f969fdc6990b21536c5662c52110d7fdb29028", "1c80bc91c74d4984e6422e7b0856cf3cf28df1fb", "88274f2351f3c6006809891ed405119205a790af", "01d707c23f86971fd2929bd1b18c57892ff502bd", "103590b36d026928a90eae7ade9d7da318202168", "5cfe70ccacd302938620662190c573cb6f19bdfb", "3cb52304ec2aa2fd4437ce0e170a0b16409c0cdb", "6a4ac9ac5ddfeeb8adcff1795eccd39de25a00c4", "52e270ca8f5b53eabfe00a21850a17b5cc10f6d5", "db36e682501582d1c7b903422993cf8d70bb0b42", "30329e87c9a3e2510bad8e36104f1a2df5a9541e", "2f02328dc09396e37e159141c5e21bef3e6ff06e", "1b2568de7363a9f46094b9cac82f4fe2ec1a4f56", "3a4ecdf7d73b0fb392763048aa834a537a495537", "d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698", "8562ca7f86e7cc144aa2d34a9cce41431b9e13e9", "d9a39c6d5bd46bebe3cea4252d2ee19eb86c6bec", "b36faeba2383cef082f9f3f509dd2098a926e2f5", "dab134705fc67f560b4276a39a8219ed0b3d572e", "ccd2152c77ae65e4d3d0988990f6e243133a5efc", "e999ba2a263de4993a7de4541f45c1c544a83a04", "14d96bbd718f20ef2115025148283584382286ea", "1523ca87c74e967870e2aab738d9b25c15c03e8a", "ceb02a8f874c84ece88fcc7be1530a581b1cd1b0", "ef17e8f2f5367b528fb850797808f26d9cbcbb9e", "2776d11afa421ec7403606f902dc757de95583b2", "472500bb0fc49354445b25f851905dda621a42d0", "7e0c82b3225a12752dd1062292297b6201ca8d6e", "20523cbb076af203ae2a293074a0445fe95309e9", "f040e4fcedca0c07788ecb6e92ad246b9c1697a9", "502a1a3f24539a63cb253f6c66ccc08495be1500", "4d7b66a123135d37689005816aa15ab31167b6d3", "67f8af0085eb6b68a41bb2044f4ddca38b78b109", "556f9e77bdd170f18c1a4ffd492d1f0e6b7ee289", "694d831156293642e63103cd1921eed37e77a68f", "e3809bca0d39f8ab06594e2ecbb987fea148cb77", "1c9b9f3b3bf89f1c051c07411c226719aa468923", "98143f005c6d18ecb9e5b21a8ac6fb9f0b6b5005", "cf216fcd4cf537e53b9ed4f46e59c445e845cfc5", "35af45f799c65d21bbb3cd24f666de861bad33b0", "2a93ce4284c7f8605e1d9bc0a8b86036073ebf61", "d7f19812ee77e508b314d0ac6ab49d05ac81e0d1", "0534304bc09e92b2cfa0a8da59cfcf0be84d70a4", "f4df920748facebeb10c8037a9c9ef18a9d88564", "5f946db224f4d968b93b575eec65e18350aae893", "34f2aef5aa519d20379037259645d4c84526662c", "1a7181252e411ccd63dbc16a9b39bc660d302a66", "ce56be1acffda599dec6cc2af2b35600488846c9", "b8612b5c1aa0970b5d99340ad19d7fcede1b0854", "d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d", "e93a67a091e6d51a762a5e9754d3a12654508f31", "c38096dea0ee2d29f19537ee7e1ebe0805fbe0c6", "b7b88502be046874a559b070b98304dec76b9dab", "0c06d6e0336f6cb6c80beb445ec5fec51b5c735d", "265ffb2d850ce9ed4cfb2646b253611d07d0b19f", "9a15661ab810a742095f33180c481bc7a1d37308", "f5d1f955977fcd9ba6d07d5c5a8c0b2ced6ffdc2", "53590987ec7f83e2c027249d7b0bc116b9ae934d", "f0f501e1e8726148d18e70c8e9f6feea9360d119", "ecf54b9bece3239a63c788191ece3a2f678131de", "5828783a3b810b4941664a05449a94274f3ac63c", "8b454f38e35e1ee51bd1aa1b16d3d8db5d68fc05", "eb689fbc6ce71cdc05761f7d0053f4332e97015f", "123fe3aa5f2ffa937f2a0840bb59133e25a63bf8", "db480f100004e3ef075f9404041fe4f89fcf4e0c", "b58f3fb5bd3e36990a7c6a2f8cbb67edeb2ae764", "07c3c015cbe635ede679a87a9725a65902aa4a17", "60fb007eef153fdf9c3d6620c419bef1c657c555", "7106c438f60ee135dae483680d9112f9f844d520", "7292510a78ef9dfda8aa54dab318a7780b2e8faf", "88334a8ebab1cddf764a6813afad407388a851be", "5f769ba95ffea0ce76ac9d8e7cd47e2d1c91e1bf", "5b7fb82e160a76d977f78c32982c567a9820c013", "de4f113b82dbeb8cf3f6530777e1b543ce3286f4", "1a7101f44cf46ab7ec0a7a64d16e2024d9e721ef", "8050f9b0f9ee0953e6125cd9b8211bb792953642", "636c786d4e4ac530ac85e3883a2f2cf469e45fe2", "bfabdebd2185365e7b51f21138bb4e4b0d957a97", "cdde9cfb726e177b781dffbbb41d15cf58d7f888", "8170d124e78a3f127c2da291aa1116e85c13c02e", "71f2175dc19f1c61aecd97c5f7ac70bbd477b961", "466212a84d5b60f4517e8ab3e4473c3c9e081897", "ea939d72d55c095e57fedaaf2aa49f596002c196", "e3d174bc5632dce568bd947303d776b5ae98ec84", "2ca78bafe32dbca5f9f64ed4de5a893aa5ca03f7", "e688a6535dbdd6ce6928bc4eb2978f39628e5302", "2eebf7012a6179264c3c7ebcbedfc8f288cbdd87", "f75d8a1cafda3332aa4bdae60d2cb86e5fd6994c", "ce83369da319607fe2832485913b0f30c00920aa", "9eb52cca3ff73def20a7d51cb1779c32ed193289", "bd35ba85e58fb2a8ae92e87ff9ad28e5e4b35214", "4700200a108778d6557bcc11bdb6094a40038426", "fdfd57d4721174eba288e501c0c120ad076cdca8", "59b21f61ac46e1f982cbd9f49cb855ba5fcd3c45", "36f7db726f4a6eed1ecdfbc78c7eeb16bc2f213d", "cc1241ed81692c62e461e745460693a04f7237b4", "ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0", "3137eede6bbada4442e0193dc5918788b7e88aa1", "2f1b1cbdc1ea04be6f8c3ff08628b5eba9f01771", "16cddd80945d3ac84e4200e272a4bbcbc6381896", "21d7c74ed8a1c777debe96937c08b162ee01eac6", "36ab143da8b6f6d49811afaaa7bcbf81c22a210e", "06cb0939ed5fb2b3398d54a7fcdb865fe53f414a", "486e5c2996726ec0f7c37077a2752dc4bd8c1413", "3935dd4eefe87a86efbafde3be80e1a5fd4db491", "0b5c3cf7c8c643cb09d55a08b15de22e134081be", "d121714f0862cf923883008b570dcdd70e9128e5", "3434ba5677e5c98e82ee17a1f2d0ddef66d0b009", "03ce07616628ac7c7dac92ea714313b674217811", "ff39e33a2ac0838e8d82aaf8cdc18f9b65ca2ea2", "104451aacac7120f1aca9fc2e9ab33bca1a62e2b", "35535b2b03a9fcbcd8c09f753e1a69c977add020", "c46b13736b11c504957a4b2fc030e168439bcbca", "df724040bf460858b3e325fab0a4dd3374a647a7", "3b5787604b619c273bf98232b0bd3bce5d4a34ee", "29ed326a7da1678880db02e5d0e7cb7376dffb98", "d591b8a7acf83fab5f7a949d89f347ed061313d9", "bc6c73d873272984e10969145e2d7ecc2738e41c", "fc74cf2b661f7209f76deee94c7adddae352fea2", "ef6f2e0ab72d1882265964718b026394e25c7cca", "93567fb7d33bf588463156e6f7fd734420022e86", "f174dbcc50436437e6bbd4139d0947138a362a7f", "74ba4ab407b90592ffdf884a20e10006d2223015", "4b1e80211f34b731667a31f0f27937376866993a", "d60e386a44a7b841fc9f4369d5f71bd517a49ceb", "b8f6ab05e147811da7038711aebc931d061ccffc", "0b0535fbdc468d1fd6ff32545a717a8af14f634f", "013af5a24ec62b000d00d86c1a504573c0f35a3e", "c3d58b10bdf1df35946f015cf49d47f5dcc45c14", "7cb241d57f0f35f941207f5b302a8681d92f445e", "660c8a9fa166c1d81e65192e011eacfec208ec00", "ffcbedb92e76fbab083bb2c57d846a2a96b5ae30", "83a4b9c9ae3f75bf7e4a3222c46d99be7b7998ab", "facf25e1880d23eb993d4ad507256ebbc7e0d82d", "73a1c6815c0ef4d62cbdb7a73e84d7dcd370b50b", "ebea45aeb5e79797912407ac8c8534df8a8afec9", "9b59047031116e0d4b5bb5e9173b37f34512eefb", "3de3ad0f6ae59ff579dc32856740f9f9b801eadd", "09771c0f9eeba235555337034f4aa7a42c8833fd", "898ef892b4cb9c206afc2daae04eacb1a7c7f956", "1ef368e79d1a33d700905221696e552745e1ec7f", "fb6ae777034477d58890f811a11c533b6118a05e", "3e8de2f904dea8368477daebab0c0dc97e0229f4", "d089f8f6a2ab7c9535753cde8f6ba9b407518f3e", "434627a03d4433b0df03058724524c3ac1c07478", "bc2953c2d177b18f0870ff9e7439e00a904a0b33", "acd7f70bd1b96a81b35e641ec1936ce925073d08", "6084cac63fe6fcc1436610f1db4a3764ec2e3692", "e82e551757a4915d9cb9c989fea7226ef3b72fe3", "d6adb54f5d25dda71d157b5d574c70c732fdd722", "69990cd6478d7ba8a84b0d01b475e7525d421137", "462a07657f883486f29fafe3781af9ddf8a5d2b0", "ab450a7968555532d9ea79f81189c0d52f9c5f11", "cb2e10d1a6792354bc0ce24ee99ecf2142d16f9b", "b06b0c4ae4bbec4beb551ecf442965a0902fe516", "7ac4fc169fffa8e962b9df94f61e2adf6bac8f97", "796d5d1f6052cd600e183471a2354751883d8d5d", "c726ea46544968335f1e51be633f15d0cc0f0311", "9eb1127548e0cf842c3c31cb64894ec2a2887e90", "822bc017e4dccbbc453fc142145bd853dfb062dd", "1e7bfb5824f9a1a6b37026a642b37c460bc9bd48", "155033f2f096934042d659d10912ef29aa1cdbd1", "c2864a3551a3a5d41474d06639815939f8439add", "49ac9738b551d0f8d9c64d5b6e8b08c69e3b0421", "0bda07e2151ad616f008c88271553340461f0f30", "dd5822e9287508db32162ad9852e3549c247417a", "b754f9884d1a6ee3d45c87cb1cff5d84f4e7e35a", "bd94570249919e89284aadf00986e453d3d55b76", "4e61f3dc6aa7994613a3708e823aadd478c73f5f", "4bc67489bbe634271f8fde73a851d7a59946ed36", "2942a3bac393e0d8fd707f5d4fdc83d91ee63360", "412b3ef02c85087e5f1721176114672c722b17a4", "3f4ba94a2964e62c52e7f283bea764ac19cffd40", "0d4c620aa869585e31ca7018c813569f3ec1a028", "b800f5d6e502cbb8431d23de06015e599c8bb906", "a25ba58eb02ca6d19dfcea11da98ef01cc53ea9e", "3d53493916cdf49ba29aebef955ab6b32a05762f", "578ae2368d477b3ac24fec7273804fe62224e51d", "07fd87460b3f454c2e7c971aca55df85a374bf8d", "dccae0dfd5b10ee25e534e70acc52804fc030d0b", "11c8e6b236b7b8276121d922858b932995163a0d", "4b90f2e4f421dd9198d4c52cd3371643acddf1f9", "1788ff95fbb86dd330b1d85d89884f0032e33e44", "76b2732a8684babdfd95c655b2e1a1b79c3aeb9b", "150f4d8a46dd90048acada63c42c12392c5706f5", "9f91fd3e9621b88769ecc330f362a591876f948f", "42d9ddd942ec89a3fc6a7beed174fd75c3dabff7", "c14398df2b26429cfae0bf984d7bebbb8d5c8bd0", "f2eb772eca9b391659376c1262231297386202be", "39e43f391e6b6ce0055e80e53aa5218b77098439", "0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d", "26fef351a3a671a64b32b3c673a332e912cfed24", "4c4bcb934388af44ad993112aa8718cf99d0786a", "5b0552a8e0ffdf1b6e7f2573640f888815391dec", "9bec10a2dfa925259470843058aa9ea5fe7004fd", "bed8b4177974b8d7c3cbdb2011e8db5a3dd3b4cd", "33f73cf297065ace7f27e8d449765f1c51ef163c", "39c118c4f3c02daf7edcf207dfc690814967e8e8", "20ac210d0dae94effbcd1d20afd7ea559e7598d1", "3d0c21d4780489bd624a74b07e28c16175df6355", "c7c61d35025943031a0cefeece9a9215fd4019e5", "2a1b247b31d0b4baa5438ac502615152426165f7", "3b6680a28c87dec9f369263b8428e41a3844ac5f", "ebe44c125f6d5c893df73d20b602e479a38e5b23", "f614b449ee2fd45974214014c109d993aab73343", "04f6a747cba48be1cabbf5efe6ce3eb85e061395", "1b5a19828a1dd486ccab1e9c107dfe7bae20cfb7", "ce60c28cce422e1b8fec262eb2eb1b72b78c3e47", "c8ad5e167b5964c16811fc6427b50453ca3dff9f", "cade16378ca4815ea006be13e2d4a2fbd7baa3c5", "31c5d8109f3110fc8b7eeb6265e832e809cdaa39", "68c3e61cefcfe4812df54be12625dabe66fb06a4", "3579b0ddd571c1662b8c73abe7f0c268a9de0612", "29633712a36c3efc77ce3a9844a2e9a029daf310", "d39f311f1ae08efb6cd50bc5c0efe06532caad65", "4a5c314a8911c6326ae386a55360c47110b08025", "06bdbcfc590359a8f5d10c482d1f010c61f829a4", "1216f1c8f89a0c8e81fc873f67c4690a41251f0a", "c76f64e87f88475069f7707616ad9df1719a6099", "84efa16406c8838550cbbed48f0355b936bbe845", "708355d319a88485fdbbea3524104982b8cf37c2", "986be05b286d99d840583578c102af31c56428fd", "002c6674ece9be5ef783942de8f88026994d832d", "b5a778e8ce38d1131b9304652c09b2645b41e0c1", "01d785bb989850019001a418a16202fd7502ac14", "b2abaffc4d68ebf910dd85c0f7a367895ab90e2a", "89aa0607a9f97abb76f128dfa2c3c8ef93297018", "e66f4b9b60d032e842254e7a04a537dc55d2c555", "51e2167cae2e257aeeab0ba6600227576e6cc436", "2aaaf36271ad9b5bab69a9d0cd8b28080b8b74d0", "73dbe02e590fed82640c46129f64651fd1b33c24", "b63d331c081e78aa86fce353097f76fddd9496f0", "0d71d6dfb3ccd6adb1d618662fd02f49f0afba29", "08f46d6a91e513edd57a0ef15d5367b5d0545c1b", "6efb18dd98469ff3c4c7d82932ed459f5a0a2e1b", "46be3e9613953a8f58b92153d95526558dd4c2a2", "16b269d8185b6584f978d1170ef3540d24992243", "285472527c5dc1c620d9644849e7519766c2d655", "472df520353b3715ee8e5234db01d0f551e6d9a2", "e2d37596f1ad4823fe042f37137ff54048231de2", "c99301d734ee0973d3869db5f595a40ab5f20270", "c8c77e415de7edf033451a1d926921f559f18163", "ca960c0f6fa981070b423eb3390c29882117f47d", "02cc96ad997102b7c55e177ac876db3b91b4e72c", "5251cb5349e37495b3ca29b06e6ed7422f12d126", "c8359e5e274a8e5dbb1498f94040d88623865898", "80c8f02c945c1dbbec31983164c1e4e0b742c44a", "e07415e73b079ef270ddc7401c7d3e2be9835e73", "c88be8de97c569d2876eab9feac16d0d3f6b337f", "277cadfadc4550fc781be7df8cb4ec89e54b793e", "66860100a3355f26ffcb9dcbf27e27e4757d641d", "05d7812269e8a1c229e45665efdf88eeae605dfc", "5063e43e1c03ed0a282fce84ed356cabc43b3b0f", "f33a7e98930f9623615b6ab3133431a298586f85", "02c3432b5d97b4ed7b8522c1fc4388bd4eda8e67", "a608c5f8fd42af6e9bd332ab516c8c2af7063c61", "5a11ba25cd048f384a83882a5a4dc25db9493b80", "c8be6a59b1c29a1a44a0792985baf365298123e2", "5265be9c7b8b22f4e06a01736bbedf171caee74e", "f857141254bcb7b091ff4caeaee315daa71c028a", "b7b23814948afc5525975ed44f3dd247100e6722", "1874db58583e6e5aeb98f2329d9e1ba44b8f6022", "3d86d8ad67e4ff130444f8c269318bf81688fc21", "98fd92d68a143a5ced4a016fa3b7addd6b4a0122", "87555d5d5ad64071efd492c0c8faa7f66f020b0b", "d14e3100f4c761e98369bcb6e801e4fc500e44c7", "0cd79b2193ef5086fe17f621a449ef3d67f5b3c4", "8c5d1a334e7a88dc5e54383df1eef13188c2b6b5", "58bb77dff5f6ee0fb5ab7f5079a5e788276184cc", "e65da9b728493c4619beca5728f622f6e91c9dd7", "0d1d7471e8b08a4577b60a63b35fbd88dbf38ec0", "ba7636bdc1f090d0d252dfc99115b6f86c2b4ce9", "8c3c699f568ee825eefc4dc44b71c8b0bc592cca", "3f27a0455ef173dd91412844fe3c80d911ec55ca", "676733fb6d457401962305204155d6f4b7df5059", "00514ba3949302705b3b88af5eeef2d05cf8497d", "d9a5640b66ddbb4f88a8ee4248116ff9a8719129", "8df1766638f4d57935de811a137cbfec3d48cb59", "8a3f85c80c698f15639ced90b4e9d4baa23b572e", "c5613e627bf0c34a15c702fb42e8c3b1e0a63381", "602072750ce931fd592ee0ea6d968ba9746fe34c", "c33522fc5d2cf92c5a10f32ba9416365944cdb85", "260928b80e6bb414f70aa8ed678d8808d214036b", "8871428baa66c36fb0fa8c51f60bd9b80e118ec0", "ae64248d1744aa403d2e6e179d129d581b952475", "f05a3f7abeb8fc987defb3850cbe8e4545d81f2f", "29be6e76d9ed777ca032c40a6ab374a44bde38bd", "ab43c43d5eb2c5bee6de1b25c8bcb8068ab8bcd2", "1d1da7b8efbc937c2a6be5b1ebdecce92a4344d2", "876583a059154def7a4bc503b21542f80859affd", "4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8", "c840d85f6dce0fb69fb6113923f17e1e314c6134", "45aefa11101129862e323958b62505700bc281ae", "1436d72a51feefda3278068a164d263f6d845236", "c3442654a40ad3bc1eab5543924f5e8723de2e84", "6880f27e9fef716b0a67d0da37104e5f767bf5dc", "9ab471f5bb6a9d8232afad2918a1ff4180ac02aa", "07fc8b4ba4a0f61cf1ea7c0bfefc556d44fb334d", "30eed14dfdee78279536e680871bed4f128d5f46", "f4be6aacd94fb5a5f5ba1be70ecfd93a5c0a42ef", "ec53d383fc61393c5e95299a5010296c63a67be7", "180cf5ab4e021e64b9bf08f2ffc4a4712acd9a30", "a99cf14afb556187233f772fa9bf561d7cf0c088", "00a77f11d34bc72947c80778428c93aef0255fbb", "23e3d00d580dc61ecb9ac4163e3acebbf622d507", "e1d9f97416986524f65733742021d9f02c8f7d0d", "2af091bdc1235e58f13d8021fd0ad9b96f89c0e9", "df55a31d2192d86bdda5eb7a0f4cea07e7e5df6e", "f25295b1ed1e8dbcf6bfe17db294d0ab32802633", "de801c4bb4b420417d9514631a6cc4099fc64df2", "c933c4bef57be3585abb13bacb74aca29588a6ac", "6f813ccf106360cc9c3d6df849cc04d881d0a6e8", "8607209d9596d2b026ff75ac051f6e7ef296b7a2", "7a5326fbc8695bd54fff92e21f171f1569672b8d", "29fa7b334543b6b6a4927ea2c7ae4c6fa8f6a7c4", "46c3e8c2b2042b193659c0a613adc72100a2f301", "e8a083eb52e09f12b5cb0c7ffac03d368d447b20", "a11d284767e31bd7af07a0aa4ca1a4bc216eb6f2", "2013b485da672605b3e7a7a1336f094aaeccbe6d", "e5e40c7163fce41c3d42116b486f24e99e7ebeba", "31625522950e82ad4dffef7ed0df00fdd2401436", "b84787a4e89e93f7b003014202a3f5473ddd5938", "b8df2c2fa02a37d09b73277ca4edde654ac80953", "e3e98557ece5247661d849dc2d168f7498209e59", "768dc78ee5640cf49775b573c629fa569bf04a4e", "ce37e11f4046a4b766b0e3228870ae4f26dddd67", "b5c5a57f5ecd8e11cd47814d584daba53aa14d3c", "0ca2304166acc90c3ffb5934f9a6343aeb80bd03", "28f1f6cbe07b117387e2b07c11e7ac9c4ef8cf95", "cd147db3a3f1af79d97c71f708b3f02c1d3d0724", "05812833afba3b2a5a4b54853b0a1ed1cc8932d1", "8338a6a0c1918562d821fa00f13eb30a42aa5b07", "836a4ee4bffafba259e2d824fc89020de86daab0", "8c2c2aa08d54a2c4e96dc8032e517930255e0a02", "cb0c9a9882d8c3ef86cc8747b6ff8d68579dec61", "d7144bc7d91841963b037f210f9356d28f76e70e", "e5c4b75cb79aa5155ffd9498b3fcc790eb794e72", "f9fdc63934841a0c4d8d29fdea80e1972ffcfe1e", "f7ab8e56fc68575a0a5a94d315841f25630cf8a1", "1f7e831ed1ba3413d3d4f90a00191d406215d446", "8ac9faa5595729c5b799f5371068c60a20b01f16", "3f5741b49573122d278d1bff416ec34e1067a75a", "dd0aef0d44e740580212d6efb5286446494729ba", "c95c30fb990576704f2ccb3dc3335aaf43208856", "f53301b9b88f63f45e898fb70a4e812201e98c0c", "ab969cfae95f62d68c61830128b35786eb6c84a9", "8f74743ab435469ce35c17b07a97a19da1b77955", "857fface5ccd0fd4f30d6b1b3d2cd25a2b471501", "3f0f3c2bc151ef91959b06442b9ad80d405387a5", "cacbf4356935ace4af4534b44116a88be0faa137", "1d692f37c2594ddb30518da27bfc0f5044690d09", "02cce8b08e4839d16f2142c5723fc009ccb4e3e1", "bd433d471af50b571d7284afb5ee435654ace99f", "4da6b69f461ac5d441722285e1b92acc24301298", "4c55ea9c04d46d60ec5789f4e4c3224c41360768", "426c6a23c0a11c1dbebaa1ee91c67250e97188f9", "f8f14c0248a4974ce9a6226db81f9745a6b1ea97", "34bae042a882c0c4db79bee95420792f9f25eccd", "8ec40e98a6c50de446c8bcceb84210885b3c5b07", "05bcc5235721fd6a465a63774d28720bacc60858", "760efa4ed4ca4c838afd8b9e2820367f33a14988", "95ed2269c4a13771cc8dfe0ff2d4d6a7f4d73033", "2f6e6ffffeb9538362b4dfcf90ed242c57317a1f", "7594466248480647c38fbddc59d30abc34f4e2fb", "900bdd3fc700ebf9417c58df15a05eed8c52a90d", "2bbb772332a90b2aba893f7467daa76b373be240", "aef99e4bd20c33edc29aa85806412b386286632f", "68dd150767f947a596d347afdba5ef76c350f9c7", "28c367cf8d45a3ca8f3e2b59c4098ff5b4ed1f7e", "cb30b67a913f0ef4094687a85e7fb09807a763eb", "3facbc62550097ca50498d09ffe5418e9d5e3cc4", "b5c90dc06b63099c3d35c86c97fa24ebf9d41fb6", "d7612e01c10f351a3e2ff1a57465c3d17ddbb193", "84c5c870c456b53467922c7c7d36649c40b6d791", "543678282ab9e3a5d0cb2ca0293b8577c4ab48b0", "9241b103b76a3bdae157038b71783d7d372c71e9", "4d36d147297767cbe698436d77c0b93b1b47535c", "b16c86a0a234096bfedb590e557f6f298b882a9d", "4d1bdfb6979fc6ca58bc5bbb1f17df074905c220", "7c132e0a2b7e13c78784287af38ad74378da31e5", "28445005727d68aa2706a27205332f92e2239470", "417911ca5363d74b0eec6b5328fb4f4cd3ec043c", "4a6049e1926cc8e574301cfb229599cdc0a64e62", "f6fea582f2a08ad577ced55ea0e1c2a063f938e2", "11467733103a3e58ae88cb238f620cf6cafd4420", "23a84a4a77b6662d553c9252331e6b7920053125", "59b202ccc01bae85a88ad0699da7a8ae6aa50fef", "cec8936d97dea2fcf04f175d3facaaeb65e574bf", "c60746fb99a02536c2aa3dc866a260ca97ae59f8", "2070296caed1f915d05f7977181698104e2f14f9", "598f330fc061852162f2aaaf59ea9a3a55d3f6f7", "a155fcf9063a7a33368488123578180a0d1a5a78", "ab4c2e8071d99bdc8c1bff9bc0d6817300ee371a", "5ec4e91940455b8583d8cdcbe12a3b54ef0a0971", "29094526e1179208b43e6223b03a7a5340f45689", "034c2ed71c31cb0d984d66c7ca753ef2cb6196ca", "459668bca393e3c9aaadb01eb14108aaa4dc5ce8", "80a0dd8608d8dc10b21bf23dd9a97ca0c17f19e9", "f4318784c154de76c1f7069fee4bf1ecbb973cb7", "e205faa8febcb7e33c482b00f84939b153575292", "74b98307bb7795ea91908498dab34a4cf2916ed0", "5e8e3d2a79537a6cd0c138545bce63ddafaa853c", "8d2c0c9155a1ed49ba576ac0446ec67725468d87", "3c9c948eaa2f5c9c2a8ab13e03b6a462dd90c946", "a69f85258816ed1386a9f64870121f74ef517064", "dcc17dda96d1b332f22ca7f396d60c8eea372547", "a8682d432865a9417b30a482b462a9e07c66c0d7", "ad15e64c93676b57ebcef4c2c4183d41b246f8ae", "f2cf24b7107f8b8061c9b0f28a716e246f3ea5ea", "16bd481fb66259df9c4c22b54797d8e8adc910fc", "1f35a65eab258f042edb8e1d4d5fff34f00a85bd", "bf63599a05692ba4c18476f696edf98bc28a4f3d", "1a03dcc811131b0b702bd5a75c54ed26cd27151a", "7800f968f1ee3b12e7f1f415f8b042023a3a7926", "495548204b3923d3b1ddf71c805bf6f7f7350ec3", "0a8007f69954ac8bd05bede33341dd37dd7364fb", "2e86402b354516d0a8392f75430156d629ca6281", "ada6f115396d56841608a293f9b1711257f780bb", "2f85b98c954bb7d8997f9a9c72be6ebf8f9cad0d", "163e07487115641046022d57fcbc6dc9fd2669f2", "5c4c2e8181d50c74e26d2ad793d5aec668f61e23", "efbf1f37390dcabac9c4a799f79c97a280defb71", "c7fa2ac9d03d3bc3877c06ea66710c99c88d56f7", "6a14b0794d5f9de6d2d72517fdda211b3349bff5", "e9835bb131287d711e5e5435a5df8ce5302acb31", "8d99ecb26e321c54196a1a79c79ce9c3607bd9ef", "b5d02aaaa1834ef0bf0ffd017a4e1f27483bd2d9", "89f44f756c230e104cdf2ec0152d5f015586399c", "15696370ff33b6e5a81bf5131d80065d6e59804f", "32fb96472d1acf352db1e41691d307194148d0fe", "0586f44c8b6316f85628642a15b3b1cbb2956ced", "98b98a8413f21a48ee6effd52da8c31ece6a910d", "99a72e8a73ad1a80fcea8c1007909df64ddda8bd", "8f81eb82cd046891c88163bc7b472dcc779f5f08", "ece80165040e9d8304c5dd808a6cdb29c8ecbf5b", "dcd85851e488a18a3eefee3a2828f15f107f174c", "335486cb9bb326e2b33fb03a74d0f9d671490ae7", "ee5bd0ab876cf319bdedd09c2da5843f6ae714cd", "d9b4b49378fcd77dcd5e755975b99ed4c7962f17", "c317181fa1de2260e956f05cd655642607520a4f", "3e6fa6cf1fe2e23fdf7716f89b160333c7a93b26", "2f8df9fb4d51ac5f5a81dc19ea713b94bee20c18", "a8286a564d7cc99300958c724eae6b783274e9c5", "b5b620774304e6245a660b14c1207386d3abad17", "c93996cb126589b30c04bf1256c97a4431c0e8b6", "b501361ad3ad4f78a3966830a40d2b4f68466c80", "0d0467736b6479bb4f1f5ac8875fd964c5d44bf2", "ddc22765a8fa82d5e76cd7e1d6adf390fcc5b044", "cfe4759ab0f730a73dfe6f07a4ba327c3e1a85cb", "539923c8f2f4641f71056b71e5628d1b9b633835", "872d4a892b17f642072da657195a7aad3fcee965", "eba0510f6d34320857b0554627b5f2925553f820", "de0aaf8c6b5dea97327e8ef8060d9a708bf564af", "188d823521b7d00abd6876f87509938dddfa64cf", "46bd4df6176345097b0d239b3c8937f67130a69b", "6577a11fc1e022670a0867ca2622b72ef225616e", "96040876d7c74fef89733e509a34f9c244a88c35", "2d64839bcf82e0a89d7e4874909c6114083c8a4f", "b6601a8e30751770baaa9ac429d2592451f9d4ac", "c30fee3bc48240aebabc1617e00226ba5ca6aec7", "a8202c74e9df5e7557c90c6e27996aa53b71fe48", "7eee9c288bfa94bd955cafc3d12a02aeef7b4075", "c437d0485217685f9ea42c33e492090b58de1db6", "971b3c1396970420bd723a51f7550938a50c2c24", "6e74a055a70c69c287a34d86ce8b159456cf4420", "67b8a47b21cc5bb4b162d85b2e86bbc5884f6d6e", "4a53062c8e0a1ce54adff22d79f409876fdfeea7", "a9d6d62f4f3f12ed565e5d75f8c4b7a202a3d809", "4dc8b1c193c421f8f570c0a7eac2fc73da06cb51", "44508e337a90223e935485d87d6fda15aaddd77a", "06c8fcb0429afd3aee153ba42e1fd8aa93f7214f", "01a152e7ca6accce4fa52e29b27feb76418583fb", "da55917aa3a8a95179bae92c5b01e4c8f2f61b75", "3bdcfa660fa278a9528bea6617f86055f6ef03a8", "0770f0f8f168c284a63e46b394150a8c429549da", "d64c362b631f0c94b22952e2d0860054f0854358", "84c8b29103480cf6f2b93e2fd4225b0d9d535ed6", "665b653b37b9760fe8633b513c12508dc4b45872", "3fd90098551bf88c7509521adf1c0ba9b5dfeb57", "61ba43517aacb8fdf68152322e5a5f91efa37d64", "a77e462997e903fec8d831af11b7f61b209c27a6", "dfbc3a6a629433f24f4e06fdfe8389f83afa7094", "f7ea5ef9b277ff45d8aac1b1eefeaaa601c2a726", "a51cdc57f35f536468325a40a7777954c864935b", "450e9f80a273df2cdaafd9ae3a9ff149950cc834", "d4fccc736869d038b705cfdecece647090734957", "fb404f93940fc4b7d38895c1e5d157943111454d", "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "3a5857ddc847f6d1c5a6e9e0732999a254a71bf3", "d33beb4f1477374fbcffd8e9df74ca2547eb77ee", "281949d1041dcbd7b9a9c2fac34859260027411a", "22fe619996b59c09cb73be40103a123d2e328111", "b8d5d2e4ad3c20657b7769391e0cd0bb47ba568d", "ac12ba5bf81de83991210b4cd95b4ad048317681", "5c0272a7aa3287a84e490ba6dffda68f3faabe85", "711b5758434dfa03ab9da1f4add4077766c1cf27", "7fa3d4be12e692a47b991c0b3d3eba3a31de4d05", "fed618637ac9d2fbdb0711f64ea752370dfaca61", "fc18642d17785ef1853749b5323bf87adb329537", "ea8abe31f3cac058cf757f16e1eefa11295322bc", "7be7699221c9afd582dab35bd7196c544972ad1d", "85e55a7d2574389c9e9032d9c332dda75d05fb93", "9b2c813a94cee031325b3e76e20db7072063549f", "2f48f1cb1cfef964fa70d7868b87d81455e7be2e", "bac5906adc227e390f2f70705e990a3e1ec369df", "7d5a83495c4eff62c98c3fd27d0992850611b2bd", "e72c4139e3d2d85915b065f2608f65ebe879b6f5", "d18e2ccc4f1413870a760a11e7a4fac19aed5553", "73f082696716e9ed0c04a7bac5e893a139a02a9c", "fbde058357f36b233e722ba2d3ee665ca34d57c9", "1471c0b72e4a88b39e59362bf169bb35915966a9", "9c641dd3ec81b1976ee6ad4c76eb1f9a6d5eb882", "ed08f7b720852e4430bb5ca1f859d53ff63be5b6", "13cc3eabae05e570bae1c752bb1ed9f84a557b07", "a48dc22579807434e33fabc22333608782156147", "c5f946db8ae431b1894724ceb2f98c9c9c2b2173", "de53ec6f806982a4940452428f9f6ea94a85ae68", "8b5af4bf220847d1237d97649c86dd76d43166fa", "a023d6c315d154e4fb74de6ddff5d4cdcb583326", "917611cfc0fee3e834d1a6cc13ad5bc18ae428f3", "da1544808fc200260d8329e8f9f3782e644c0ed4", "303828619630ca295f772be0a7b9fe8007dfaea3", "86cf00b2c22200745276239d32451ff14ee65296", "85103499138d2f160373ecfce400cb000a7e477d", "219aa7fe458e36e7b6402ad0d31cd5197884ac87", "a94310ae2eea247f288bd3a65c6d496b61bce1c5", "5ac7090b85dfd037a3530c5db8ee131aa9e01920", "263a5592cd872b9eeda2f2f01a3e782a02bad670", "6da06fc70f32454f7841b153c582e65aed7047e9", "f131a654bbf4c8de0679d3c6054c10bba4a919d4", "1dcd975b657b444a64fa44986ad5757e5377c0b4", "1964131464ebf42f378c64277924c8451c651b5c", "83c00537e0c3e226d999a5abf02464e138867e96", "90918dfd9d754e1cd07ed6acafec9001a4685ce5", "fb353b6017748ae0aba3d3ebed6d2362c3a985f4", "91db1fd5e086799c280221ee93c1f275b3d909d3", "ad72a3c74a2eb7a02288e2ec86057a37d80227b2", "d78fb5e8da040b7f20546122093e50812ede8b37", "b13499d60e7be1d593ec91fc952b9c32ce62bd57", "1adb6341dd9bfe88d631009992fe8a4ef80e2f2b", "34542357971c4809f2de54dc14ae29913acdcba3", "0fb5159b976e39530a9b4d4a1480efd3ab3cd458", "32bdafb45d38f7743dd5cd3ca4173beda7bdacc1", "c49113c602ae75277c07f47cf166083515574681", "4d4b1aa87af8bfd65ac7bc250bba5951aed40986", "be6facdbc38e38bfa059b0b11df481c400b6441e", "06118523e79fc6a0f75f05c75dfd95e283aa5eab", "64d46b5e05553a06ef9a73d3e2e06b1c611baef2", "f736b7cf8388f20bfe9619d63d9c4ce070091863", "2a3e19d7c54cba3805115497c69069dd5a91da65", "5e4ccf90bccd29453a591422505c628172b1a4d6", "4cd6f20db45cdd3a21c08e1a56754cdf98d070a1", "68694f7a022a9d6dfb3b7b176cec816fbd66c206", "c9b7449e6073135508796a4cbc9bbaaa94be5b4b", "fe09c8822a859d291eb2e452f1676823fd7182e8", "2c29f5245e20b49acad4c63220a17f3b1fb8cd00", "90c0a60aa511e74c2f20be1724692c27b5a6ab94", "006415b0ae3ac6ff9a2b482bc3d23ad15e8f09f2", "b0b07732a9ab9b2b9d9dba41e1b9811629fa43dc", "1b4af2d16e2578f0c00735c78aa262200625293e", "09ac8added26307b358b83884b55af29de8b5bf9", "1b5baa2ff3b6f88865fd244d87d39d58282d8597", "c40174aeb1be3998a2f8faae28d6689611bb7aad", "2a7007555f71e559c6aa33efbd3bd8b401638d82", "8d3b9a07483a9a80e7e8d67d9042ab6557c578d2", "f22a8c28a6de723e5451ce577a3ef8dfb26f5e2a", "eac847b4f1299196e5ffb6e829670830193cfe1d", "e5781730c9f1c81b08cf4b4a924f1058efe77908", "2848cde23fe32c30980183f33b6a2c2ce7526726", "b8968033438c1a53219dd75150b711a6aae6bee7", "4a869781d074f6be7a5001c59e41b25145bdd830", "eddb1a126eafecad2cead01c6c3bb4b88120d78a", "2550df6b33260cbe6fd60331ca6c7a8c0b48e80d", "03b149f6ae3e366fb45ec09e0350b55cf5ac0459", "34f53b176b1fffe9ab8c1102dfbbe93a625850f0", "eaa42e78fd94447534a2e1114fe5bf64d4604169", "d3f17c2b4e44f008a547fb8c9154bc8465493afc", "440b94b1624ca516b07e72ea8b3488072adc5e26", "f538a9d7ac2d9f4f71a18c8495ce2b31850547c0", "4bb1f497ec631d815733000f36157c5907d269ae", "065ae5d34ad86a79a480e3a0ec5a97f219353495", "a78818aa505beb2c4d0902f6d9dc04481aca3c0d", "33133bf1625a469b7c6ac6a2c05c6849584d87bf", "f806e043f352aea95276fd9dc5ddb361291faf05", "0b3437b872be122d35c52aabd077e45a5c4a1ffb", "b008d973ee93fd3b13d1148fb7533dbdbc8374d6", "0f1cd34e77c141e75b02ad4f5b4c2dfcbbffc05a", "d54b1bdf0c42ea3d906cb93bf276307b77c1502f", "7143ef99ee9434fa4829a69fc877451497f0b4bc", "b1d8d693e34c8cd80cf2e361f0910d5454393c11", "63f2c3e312d07c6452bdad0a8adef1b879950500", "2a06b31e778bed978055cec7596bdf2690d13b49", "522fab628aab972f39835521e31564b4b6c64fe5", "4c28ca360096d1ebf9a0c1d5ac10bedfa35819b4", "a66057955282d22a23c76e904daa2969adc94cb2", "b917e8b974dd96efcfd84e765d81a69c96d01107", "6c514a85b840c461cf6959927e6a34414e1e0f5e", "2a18e025c0c89c2dd94ee92b1e496767de8a3406", "4a961dda9fc9a07d6a0bfbe59cc38b2605e61d2f", "1560fc1817f2bdf1b54dfb9672a9f509b5d1f6d3", "7a515a8d994f4605789c96bc7c389986797d1e34", "b0e7c177084be76fb73df3c4bcf1846676a2d615", "514391e5da6521d46fe1d219d3d4872925ac93c4", "a5a00debc37bc250f35b5990f5edc658518916cd", "3b8a5be5508f809a2d68a78d21cbf1690db57d5c", "264ad2a00b65af52acf500cc6432828843530df7", "0c8fc57295ca78ac5f4f40d1209a1028eacc6de6", "80ce72984af1739a4f6d4c972dbaee62fb8c5a74", "e8039e1531dd86da960be26d59718d2452f9943b", "ea344c132c7ae5b5fc56061ef0008715436ec261", "059dc8bbf912caed67f287ad8811d3fb41fa2eba", "cdba015be9db1e047a51b7e06403528b3551587e", "05b6da9e4b07c55115eca805b4d05f97b8a6f6c7", "10b3afc6a10149cd88bc6f4007b41895d661d5fe", "bf4825474673246ae855979034c8ffdb12c80a98", "b6bee0dfbdfd2ce16567a36cf495001e6e7e2b24", "096e389c28cdd15b8765baa29ae55d98f8c3c4b4", "25885e9292957feb89dcb4a30e77218ffe7b9868", "29107badb19e7c5c89f57f81f50df08422e53304", "9ae74af82cade9b5dde8c89d9bdde7b08795331c", "7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a", "cefdfc7f7348a500772a4d5347ccf26706302d8b", "482442da6d4ffb6bc93c24847da3e361a3a3c2a7", "280cfa2b9420086497fc908aa551ebca4741fc1a", "919327d4f264775bd4ab2923d7786d5b2c859409", "372688ad240474724683703e65a02f30e8d293ff", "e917bb1f7efdfc448b8b63c52e8f643e68630a11", "914fd65d29094e434346806bdddeb17d9468610d", "7683e11deaf14c2b94c709c5ef23318986643aea", "3cfc8c00d390abe5f94ba7a1251e085a794b35bb", "1a2a0d757f8afd0b241b160030ea0b9f3e2395c4", "e6725576c706d2b195664ef838accd1d07ef7dad", "5dbac0b5e88c4af9a1adb4d482e74d0ddfe26844", "f8c1d0123b95c538077d54f6c4ee2c355edd75ac", "7a5155d7fe984fdb75768cfa4c9f5e65accd775f", "90915cc93248174c4729be65159fb946d2ad5f72", "09a05ecae987d9ababf5fe52323f69fa3e889d83", "23e19cc9d2318b07eeaf8a9d34245131eb1a58be", "555488f1da920bb1a06b4d19ff687805993eb7fb", "8032a89ba67e2b35e2789983426842f688c49a93", "666676bd70062061c25080ee9b2b3161176a95d6", "f8599ad5332cdf2c9919988ba300bb4b438b5834", "2cb5db4df50921d276ad9e7186119a276324e465", "5da53a17165fcc64e8fb6e9ca532bfb6d95ff622", "744d23991a2c48d146781405e299e9b3cc14b731", "ad836360812f87e45795f8345de3bdc6b13add81", "8488610866b29f279461f67ae948a3cfc72f6961", "98bc9301c352d5d72398a99bc64b8f3371c3ac0d", "ea47705d5d3ce77d05c7b1bef551a18408671aa0", "09e1072c509e1d24a34dfbbaba1c3700e1eb1338", "fb59657a80f578233cb7846bddc96221289521dd", "26029a63b2377ef81e3898f55bb204fd853c3e31", "e2baf990bc60ef0d24b7556d238e40566ad23d2f", "4b1fc77a54e9daece9f11ec881a2ec40919337b7", "8b6c284eb4299a70820ec91322a2937c489fbd89", "6a904a27f5afc9261162f556b52a5847a62d7a66", "4d9c64750ef4565dc47cec0c513458b53dd5c9a7", "5c48d6ea9b022c077b1873ec48ea4f37a91ac77a", "b651814360e3899cd9206bfd23621aca6551e69c", "5eb25ec961c6a86c93001a44d38b3eb894e7e5fb", "9b76691bdbd01d8526f7e4ef32b9c705d28312a5", "77be85f6c3c465ef8e17d3ec6251794cf4ff5940", "48a42303559ea518ba06f54a8cfce4226bb0e77e", "ff269353b4e49274ff85dfb98b531888c98da365", "1f7cd3343f4b6b0f936c94e3a45c477c014e2b5c", "5f7354634e13c9fad64163d53beb0a8eb5df30e1", "150855fdaca2ff3ae5a51da4f82f120a92cac104", "26a03fe498b2c065087d17b1af930d69a89a0a8f", "269c1f9df4a36b361d32bfdc81457b0a32b60966", "9d7b2d1f3d6705bc8a4656fa27fb6dde20033f25", "f00e51ec0e3894bdb2977a01824f37b15bb82c6e", "46c00c4c4dfaee99976705209fc2ac1972081ab9", "189c0e5df2611dea909e51256b30c3ce3d25b5a4", "3046baea53360a8c5653f09f0a31581da384202e", "d1dd80d77655876fb45b9420fe72444c303b219e", "30835d3253e54256b5b77918bb1690824b3f24a8", "0f89c1000f1efd79d8c6b2d0a59bcc76e9272b1e", "5b336b0f258ad449c4fab155c80e98a18bf13cbc", "d9f0640716ec25278e6f1a4fdda5596660504c54", "a815bdb576d8b3b85c2671c58c6263fe07f25fa6", "e6098aa34918a8d01bd265690e3677fd474ba17d", "6d9f13fee6668ae2844635fc5d47a1bb298268df", "cd468236213273b96d985dcc859f24c0a19e3077", "adb6fbff14b660aa6547edbef27c01ff427eba33", "89742f28108330f97df94df98f73b459b02ca33d", "99e9ae76ee720314a90968be5f889d233c67054c", "effa69fab7c4fdb30265a4bb404f869d327ae326", "b5b50d3d043637d4984987c5e67b8b3daa3019e4", "5acf8478d39c3e521436c66cfeec6187c0526e55", "d1580e818932ba23b38784b48d3e472ec91cd746", "41e6160f79940d797c420e0e6d5aba90091553b1", "1bcc4f0f58848190ae0b2098eadf06002d5f70b4", "210fd81446006bf542b595fa0743b808cb86acbf", "26a32691321574ac1c90c58f47ec73fdfbc8507a", "f9592652d962f470d539d42e21989e71df943041", "2520c3d5d114974167561591a57f80e89650f862", "9257c88484247ac19e25c34de2261d34e7a06b41", "1ffedba43c470ee93a3cf9db547ec0b55f23b31f", "3a65dfbeb848ccf4221acc820a987d4e9d46c6db", "2cdc1b728c90d4da31f924879a39d00008d52daa", "5af5802cc6128bafbde1ae12e0ab41612aee9e3b", "51a81a17328ad36f1bbc15e240076b68d3271c0c", "22bd68b654df468fc06cd9fa4575a33eb353ed00", "171d7762137725839fe5292901fe90d91b74811d", "70cbbf1ac971a89e18240e70d86fde2ac5190bad", "e6cb632a172e0c11de6dc57e6842becf5224389c", "888d98dfc312400861a89bd433492094dbe787de", "409507208528c295f5f69759ce67c5e155e1113d", "28ff4c98b7a922f4502c69003f686fe0f94083a6", "853feff8674f4a856e6568c9ddce5eace014de8c", "f029e3fc47cab0b23da307dd2ec6d2a064091f83", "c19e896775ce69b9fabae2c09625e8f49fbd5d76", "0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a", "65a8c76a25c1eb95436bab22e99f78f6b390c87a", "6a9e9ef07aaff89b4492b0e7d7be51ba9a4d4346", "d903292dc4e752f6a3bf2abe668d17a2575044d4", "565a3ec8b9fbf62d3341ba0047e14afc8f40cfdb", "76b6577f47d6782bf75aca04e361a7b7381b4a84", "eac1b644492c10546a50f3e125a1f790ec46365f", "bf27d02b422e571e945da6fdec842e6c5fd509f0", "d5607567305b690f914fe8b043f3ca48aed57fc9", "e3f717aac41694595ae9105eb5678f82214e1a0a", "1e2d965df330a72b3426279f9327f77330c2ee64", "3c63fa505a44902f13698ec10d7f259b1d0878ee", "63c65e8584d2c3fb8833af772eb713f438cbdfe0", "4643c7440babac8b7790494f16979dcd3ffdd2d7", "0703843838b9cf1240a9459fe5e0fbe3580c63c2", "957cbe90130c85ffa610a31df072680134fbdd46", "cdc535719aa041b7bbd529eab4582619a04b706e", "3b996a2e641be7bd395620d30364a27d1558cbad", "1633c30909f4f3d91ea4256c76c71abf62a52bd8", "3efb04a9847284680b48214855eb0a962efa5c7b", "df112f7435fff4b8f073be9d2315cb2c1e211176", "a46f285b928aa547df8d8d8d63d2f9256a73aae7", "42c1111c9cbb74b2755f58c6e9e84e7d1d11cc6e", "94e6d270f96778856991ec66ffcbef731957ef12", "4b8d80f91d271f61b26db5ad627e24e59955c56a", "b69752efd9c76ba1b902ce49d0ce741d50ae78cf", "ea572991a75acfc8a8791955f670d2c48db49023", "8102311b200c68e7928eb28563fd99cd5e8fbfc1", "6e92d77c934c4c86ce824e67ef63d625735aa187", "225fb9181545f8750061c7693661b62d715dc542", "ea5dd7125c73756d7d81e49fa9826198f533cff7", "d96585b5d9eb2bf503d0c34c3a3decd287228ca7", "793e896c2f66fb66bfc6c834f2678cf349af4e20", "185360fe1d024a3313042805ee201a75eac50131", "ac5943ceac434ad342cb2f7eb179d0948f12610c", "554b8ddce8bd1d06bb09129cf654bc793406d4ff", "3ac6e3721a5295df4987b2915f07a3e853390e39", "dc6b40252885249f1e71831f57d9ddbaae8df635", "74295da4cb95115c84e6453f769e62dd607f21c6", "6d79999f8dc0cb9f86a87eaa2eb313a4eaeb2e5a", "504bf308431785153402b7058102f6920a0d8ab3", "d3761354b7df1228eabf46032fd01a4109229d43", "a7a1d3036c542824f2c681c3bf08f5b85f05d9e9", "f48665764089d42bb0123914e4ed0a3770f5d706", "aaf4d938f2e66d158d5e635a9c1d279cdc7639c0", "c44e2fa02f0b578a2cc92795fe6a4c578f65dc97", "8a722c17e6bda2df13f03ca522119f4c8b5bfff8", "0ae8377a984125802a69a93df7c9fe640b55aeac", "5b3950d447f596f996cdac5ab3bd37bfdb9014db", "a77e0db38ed7ad95a3bca95fea72048985c54508", "6a747f26f68d113e6df7915177788e844da77496", "f5fba67fa306d8692525c6f9d034ea6e99ad17f7", "674302298dc8f2dee16d85fd775e24a8537e8179", "2aa531b4aaf005db13ff93cc1bea7602d7fe2efb", "ddc17537f7895464523b5aff77d41d756a12bf18", "812725dc3968aaff6429ec7c3f44ba1ca2116013", "26acc572c644d57445170a309daf7765aca6ab45", "1ff89bd94d8a21b7ca4bf844e2d366f854822918", "4907a834b176dd9053de35f531e1d87f202cbd31", "eea77e2a891e49e65d4bed54c1b24411f33203a3", "395dadff1eab9c8177f843326ec864567342eba5", "2f0b8579829b3d4efdbc03c96821e33d7cc65e1d", "3c307cddf2bf84b4abcc96a362fe385c2d891c9b", "95f4b88d4b0a725d786b34558b60af47f5442230", "f402e088dddfaad7667bd4def26092d05f247206", "3a345ab83d1bfe4a63d3d44bc4ed243e10255a59", "96c21795747e50f6bce9a116428762d5a796f37c", "124fddbb5cbe4e5e6ea69be1467437aad01eb5d9", "1c1346bfa6b1e4deb34ed2933df1ef0783492770", "eed87e0cab48938ad5a3809a04c0f54a7dd39cb8", "c719a718073128a985c957cdfa3f298706a180e6", "06cb7c6601b7ee0d89cccd5311dcda9e5316e02d", "80433f3c41f383abf495ff2b368616af6d545694", "c266fc51bf6d769057de606a980988c50fc8b99e", "1eb27702acf0ec3e36d695f03385fab96b1e3c1e", "1a51bc5f9f12f6794297a426739350ae57c87731", "4abaf7d4b9577131cb2f93e913f8bd83f924da4c", "2fa057a20a2b4a4f344988fee0a49fce85b0dc33", "7ae0212d6bf8a067b468f2a78054c64ea6a577ce", "b980c72a148c69320621163012bdaa3ead77635e", "cee3bafca4b8474ebff82c9661617ab6cacda6c7", "9f88bc8ac1f69362e4ca2bfd651c5f5d6aea5853", "bde3c1298d4136369c8607dd5dc3f0800a27a8df", "3320fb3aebea4050fa1ce3c1d20882d84eb55ef1", "b285e50220fb6c09cf3c724c7e48093373df3c58", "eef5ad2dfb15c9bd866b03242d4be868068e45a8", "33801601485eaa27b838a17e073d81796d8b78d9", "635804db97b8251c54674ade12f3de494d9aa6d1", "da4f41beeb80e57b79855274d4375f4bc514d11c", "de608b8696b151405105abef1de66a2199cf1f3c", "23ea8a34570342855611a78a4ff00ddd902e6123", "3f764cbe8959a4df0ce844030b8c6c5935a71270", "66ef0364f2e865c35ce5003e129ba6fc57a2afa4", "1a7e385d2aa041ca8931784fb7664e9905194565", "71ac84476210b9bb40e8f3f069dcee94df214c8e", "8bba26895022749e2273729f96051571eabc7b99", "13bb3fe26a825aeefa0a45134c1d727d251955ca", "035886f58b550be140b1d4dbba0ea0479030589f", "56d5c8bee7d28d2fc6a2b1d00d80285f84618797", "8916cbd3eb66475182a177ade018ed8a3eed26b7", "3213390558a08e35222eec6fb028c8cfaa0c80c2", "32c45df9e11e6751bcea1b928f398f6c134d22c6", "9be5129fec3b6f1efc22e19dae3ae684961f5efb", "ad3d4b8498748b7724c38c5d2e85f9d4e5913ffa", "27b87bdee46964757b83b5afb4184e438cad6b1b", "a638c6db2c0629bab6dc2f75f297fe0ae4024de7", "ecd08edab496801fd4fde45362dde462d00ee91c", "3445cc781ebdcf65840bd6314bc0c8c634f1ef5e", "6ad32b70ee21b6fc16ff4caf7b4ada2aaf13cabc", "01d83dcb526a8b751df80ec493caf1937ba99155", "377cf44d0ae544d6758b2789ca6c39c1b33716fc", "cd0a04c0af9b6c523884415ba54bff370fd02fab", "0aaa66501298c3df27293eca7906e93d8013b729", "43fb9efa79178cb6f481387b7c6e9b0ca3761da8", "ddd4287e62bf3bd422e0f372476c8d90ce115803", "e7975261539329e83a4b404c0da177f78c00dbd4", "7605cb2d5db991d70783f5fb91397ab03d3f0c92", "59b095bdbd4b3f4a8240ad011b1d0b318b526d78", "c43474a5f910329b79d61a853d4a26b926103d87", "03ca829e8680ab4cdabd491b3b42639c58f4cdce", "9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807", "00eb50f2c9792168bbd387fa7871bb7af5e7c8d7", "461052af565d7195c2c84a413d1118c412dc6935", "3164189f84710de9f8150385a41a7079a57186df", "5b36c2877e36e0d5b1b1d9ee98727c8c65239a2b", "16dd06c0d712f681d7d04447cfd9ee7c424b4814", "158a8037ce1c577620550da385d2275a31b9ccaa", "c4b6bff9c8d00944ed2cbbfec8c37810a9f47b76", "bb4ca5831eb8d913eb3583c9907137a9cdac0599", "28d7b900aa8c48bef81db589b6ef76c639dd8c26", "fd8b1715ad34858bf8650ac549c4249d86edbb7c", "0917de8a3be50f2a813e7b77fc53b81125a58acb", "d226eae6fe4b0198a96e9bc5106dbda8554ebd2e", "4cac18612a3e38c84c190d3c8571722b35ad546f", "089095d2da66c95b6e672bf3cfa33a36125b8232", "114b34fd2a2a2acd4a968cbaeb5e0d2251fb2835", "03270297b85b4a5a0d3468767ece17446c97df1d", "2e0bd693d12c43c2e86c7a4d8809445f380c5556", "1babf4bc962593593c83ac70f3b7ee64b3e5a680", "51972609a7c0070bf517c29f108f3e7240b94e59", "f34a6c1bc9a7872c8dc4c35b678f87bb966ab0ab", "3e72a8895ddef2c13257599b87de12b5f4ee5d87", "f86b5b6df1bfdf003570c114dc578f0a07b49264", "73704242a548e8725926762faf7333e5598d0228", "2f7452476910a7dbf6231b6b27aed67d9ed455d3", "e1febe25c3f678878c97bec28036c797bc5e6d0d", "f6f12e0fbfce067d02445abde76be0522e4db329", "934a77d099a38374ef1babe02d95952c089cce5f", "0af65df112db18248ed24a1c0fb5fe8524015336", "0adffd02029363c204a561092e1e0cc05cacfee7", "76ebe6d24ee69e3f853740fb75085a2118d40d51", "fd2504f2621b5c5cce4a3d609ac612242bb45552", "245130ac792531ca9981f9c5907190eac19ebb50", "e010304979c869d6084fb3ae902e48bb44f5fb73", "0ba6f4fb548d8289fb42d68ac64d55f9e3a274ca", "c91cf824cc7954483284a6e0019c83baeac236ef", "d58e9c94206b72de545f7720c38f87f1c8b81839", "83d1617092b34804c3825fdf4292120c382fe043", "0ad119275960fd1b68004feeb84d41b91bc273c8", "09ef5cd40bc9dcdbdd2d3587b0efe25551d25618", "588a21c28ea77a71efab5b2ed4f307eda49b6d1b", "bed87f5eb70fd6dba86fce27562a7e4cb804cb66", "ba3f4d05971d3d9ded23725846b864f5a8e8a7a1", "e907bc6f6fab7e21a80d2223f02d0f62e0b7aa54", "8765f22fbcdcf610a08b01db01edc4b8cc67d082", "e5cedb6822be94933f7d7d0b6870756da1be7f7b", "643abe6001946ebb7e262465edcf78d600c38f4f", "bdbaf77951b845859a7203a33d91b6a595f5f9f3", "1d05bb320ca36cfa062ef8ff76590b9ae3acca58", "e52c20df9b8813ca771ac09f59782cbce6fa92b5", "f32ba87d83419a67ff462374dc26fd7976ef0778", "75b1790ffcf51489fcfbf14b11f1b90a076345cc", "9ffd5de6f4ab1076885d112810e2da65e081ac7b", "75873df8a65cf8fead79ac7ebca7f910d4fbf2a3", "a8b94780259125046220a5908ca6f032963a7572", "a228ba020bd321d29ab24485cb2988a62707fd64", "a3be57fc74460463f03c2a14e81e7e62c05c692e", "7a71a750aa2ae7c65b661252d17e5a15199e5a54", "68437099d962ad09844f8d553db83c29292e8723", "5d8c94a69414b119b245df4d959be99c7c3b9b61", "9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a", "534166ab1f6525cc05839a658705aa7443e548b0", "696d114f57aa6798a5d16aaf847a78942ab9949f", "f9ee16257c8c749c884ad8146fe57d85787b6430", "032ede597491cfbdf7424d221bd74742b6707397", "d9d799bd6cc5289dd28fa9c0087c963f7bdb3485", "8ac2736683dac9a467602ee19f5a290096259148", "5c6215a32fa943d07cd2d0401d646f93faaf34e1", "29beac55634bde386498a00b4274ac947d89062f", "40010e1918e1f342b14c8ec74e570101f07471b2", "36b322095bd0953d6076096111e4a020f427793b", "6bf58047438f54720e03252d50984d1a340a116a", "b640c36acc0e748553f78280fce7a840965c5cec", "04f7eab5d03ac6ad678f2fc8adf29bc1a84a2084", "27e9f54586475d495e68b4218fdbd6e926c0accf", "292c4bd6fa516393e9c8c5f1dae5afe0bb0ece35", "036a8cb922a30d766b0fc0ba5954098a1d2a09f5", "9e504d225a566fc57ff203f82cb1cb56b902a7f5", "a67d54cf585c9491ab8a3e2d58d9c4b223359602", "e34b0815f02b4dd5c8ba95f48b5833f9cdd4215b", "49609ea8946d5c4d8fad96553b10e2b07f4e2485", "b8f3f6d8f188f65ca8ea2725b248397c7d1e662d", "1e8a265ec741584e851b83b5efc00351048bbe3f", "7ea84857f29df907e6e605b19ff6c45cefb396d7", "3109c36a10f2ef3f2368018a4125f5e2981b1918", "53ee7e9839e1ac76e1168480a7e3227d568f4062", "65e19cd4f848451758f23779fd86ba7f12f18ea0", "c50c034d264083757eadeee5d0b94d933fe78544", "9fab78015e6e91ba7241a923222acd6c576c6e27", "959a35247782161474a0fcca4e8f2f1369c4601f", "02b72a5a4389cb32a7dd784b1c9084e8412e2e78", "1cf01968594ae59d28b12c9a35fc43d944563071", "c7da4495dbefe97addf3d67a11cc3089f54bf799", "02e4025fd63f168810724156fb6b20b0b14dccdc", "6cbb3c47010e406de656d13fe289522bb3071bc0", "541d69fdf97e5ded611ad0dd46f62bb9d2e19a51", "7b8ace072475a9a42d6ceb293c8b4a8c9b573284", "b3082004118a9a9afef26f9c35509f1e88e51f0b", "9879c158fee5b77b4f91b8f0fc5f2e236bf2de19", "adaf2f6e627870eec71ee416db1a37c023937231", "3812d8864b87b3c715fb59b501eaee0539be269e", "7d92d82eae23fe872e8d29116ae22cbd0b15abce", "e7d80ba5f259f52f77cd38d7fb83f5b751a87022", "779db93204bee4a9540db1e79ceb0b45e5af77e9", "629ee690787badcd05abcff7c9cc80e149f0fa79", "616b7093cfe6ec679f25d63f62c16e937227258f", "1fef45786e707e6b9b8517b0403e596ecbdea6a5", "fe5c43aa19da5cbbf5a42e4697659875f7389b91", "d73d2c9a6cef79052f9236e825058d5d9cdc1321", "6b6943a138938c31b285c1bb11213b87404feddf", "a06a7b1236c16d3628b39e3c37d566499c3446f0", "393b428f8f6a3f94cb9303ba9ee5441a1ab9e48e", "5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7", "0c30850067c296a01b72cf4803c9712926ae5a95", "99bc5b7f555485b79726de3f1ec40e5d4d978965", "2ffd1e152e4d322f03d09be3edfc162508b9938a", "db557f519ab9c7a002d5b1e84b9b3c4ab614c952", "71ceeb34631718e3492fe7c103ceb9cc2de3c260", "0862940a255d980d46ef041ab20f153276f96214", "3a691c376e51ac23e8c19f48207a10abdac3688e", "814a3acea78a7a79a499c52ff2efc57254f8d02c", "01085ce83c6e8781f3d59bf8fb6a2f14c7fda9d6", "e1fb8ab53996f06e9a35de6b553333bd6279bcbd", "cd27e8dcd5e4b495d5a0d0094f8fab72bb3b1295", "95cfb4cf747268a065a27f876cdd10d4e59b6180", "8021235b11ef4d9495287a9d55047df08ae5a5e0", "ccdbc6dbef24e9846bd8c845a97fa70c97cf5866", "0cba3c4ec4c1dd85b637a078b9c05244196009e9", "1b6afc2cdf931a02df46d5052b4409c770ef8660", "f0fa7d7764e4934f21c348341d79b93bbeb6e6cb", "917411f5a2c799cf16f03114147cc726fee950dd", "4ac4b0a2d06ff5df1cc4941f8ae47843b4593bba", "87c7df30fe0cd89a37487e09bb82041e1a2c884e", "6534a1e93a18220fc2c6a8920fba5d583314d8ff", "1dc4b5e93233fc632b070c8ff282ef0fe9141f64", "bb9b45f4b97935a95272c409d212589bc2a9a0cc", "bbc4bbf7aa80a8108d62644fea24e6f70a805df9", "728ad450605c7667526fce3fff75177a3c5e4188", "a20210d875221088d6428330787606e12605c68f", "4a83d9d07cbac4a8a279073e3873d01f3215f2f8", "79cc0f893af976fe1052240518f47f3bee56c6f6", "149e5e5eeea5a9015ab5ae755f62c45ef70fa79b", "3ed186b4337f48e263ef60acffb49f16d5a85511", "363ebbebb0d54ff78deb3946806aaa68d5232517", "51da79450a9cc567dd8bd55d0d1ea24a1809619a", "a06761b3181a003c2297d8e86c7afc20e17fd2c6", "1be498d4bbc30c3bfd0029114c784bc2114d67c0", "12e4545d07e1793df87520f384b37a015815d2f7", "8879083463a471898ff9ed9403b84db277be5bf6", "f726738954e7055bb3615fa7e8f59f136d3e0bdc", "16d6737b50f969247339a6860da2109a8664198a", "31ea88f29e7f01a9801648d808f90862e066f9ea", "d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d", "984edce0b961418d81203ec477b9bfa5a8197ba3", "7cee802e083c5e1731ee50e731f23c9b12da7d36", "b8b9cef0938975c5b640b7ada4e3dea6c06d64e9", "30457461333c8797457c18636732327e6dde1d04", "0dccc881cb9b474186a01fd60eb3a3e061fa6546", "c95d8b9bddd76b8c83c8745747e8a33feedf3941", "cb522b2e16b11dde48203bef97131ddca3cdaebd", "b07f9dfc904d317fa71c1efa9b466460abc0bee5", "07a1e6d26028b28185b7a3eee86752c240a24261", "10126b467391e153d36f1a496ef5618097775ad1", "c8adbe00b5661ab9b3726d01c6842c0d72c8d997", "635158d2da146e9de559d2742a2fa234e06b52db", "841c99e887eb262e397fdf5b0490a2ae6c82d6b5", "56f86bef26209c85f2ef66ec23b6803d12ca6cd6", "e5563a0d6a2312c614834dc784b5cc7594362bff", "81fc86e86980a32c47410f0ba7b17665048141ec", "dc2f16f967eac710cb9b7553093e9c977e5b761d", "7a65fc9e78eff3ab6062707deaadde024d2fad40", "0951f42abbf649bb564a21d4ff5dddf9a5ea54d9", "eb6ee56e085ebf473da990d032a4249437a3e462", "4f37f71517420c93c6841beb33ca0926354fa11d", "7587a09d924cab41822a07cd1a988068b74baabb", "be0a0e563445119b82d664d370e646e53e69a4c5", "9d4692e243e25eb465a0480376beb60a5d2f0f13", "e16831b6818a3ffec0785bac21911062ab04370e", "03f3bde03f83c3ff4f346d761fde4ce031dd4c69", "58df849378fbcfb6b1a8ebddfbe4caa450226b9d", "2e58ec57d71b2b2a3e71086234dd7037559cc17e", "bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4", "56c2fb2438f32529aec604e6fc3b06a595ddbfcc", "772a30f1a7a3071e5ce6ad4b0dbddc67889f5873", "e8b56ed34ece9b1739fff0df6af3b65390c468d3", "1fc88451a83f088ce028a0f715b9f9b600f4bd1c", "e295c1aa47422eb35123053038e62e9aa50a2e3a", "7aa32e0639e0750e9eee3ce16e51e9f94241ae88", "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "1862f2df2e278505c9ca970f9c5a25ea3aeb9686", "e8951cc76af80da43e3528fe6d984071f17f57e7", "6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81", "29db16efc3b378c50511f743e5197a4c0b9e902f", "16820ccfb626dcdc893cc7735784aed9f63cbb70", "bc749f0e81eafe9e32d56336750782f45d82609d", "ecfb93de88394a244896bfe6ee7bf39fb250b820", "282503fa0285240ef42b5b4c74ae0590fe169211", "43836d69f00275ba2f3d135f0ca9cf88d1209a87", "0435a34e93b8dda459de49b499dd71dbb478dc18", "7173871866fc7e555e9123d1d7133d20577054e8", "341ed69a6e5d7a89ff897c72c1456f50cfb23c96", "8355d095d3534ef511a9af68a3b2893339e3f96b", "854b1f0581f5d3340f15eb79452363cbf38c04c8", "1135a818b756b057104e45d976546970ba84e612", "5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c", "361c9ba853c7d69058ddc0f32cdbe94fbc2166d5", "7aa4c16a8e1481629f16167dea313fe9256abb42", "ac9a331327cceda4e23f9873f387c9fd161fad76", "0deea943ac4dc1be822c02f97d0c6c97e201ba8d", "d38b32d91d56b01c77ef4dd7d625ce5217c6950b", "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "6193c833ad25ac27abbde1a31c1cabe56ce1515b", "b18858ad6ec88d8b443dffd3e944e653178bc28b", "25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b", "7361b900018f22e37499443643be1ff9d20edfd6", "166186e551b75c9b5adcc9218f0727b73f5de899", "9755554b13103df634f9b1ef50a147dd02eab02f", "f4373f5631329f77d85182ec2df6730cbd4686a9", "017e94ad51c9be864b98c9b75582753ce6ee134f", "1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12", "3a05415356bd574cad1a9f1be21214e428bbc81b", "d278e020be85a1ccd90aa366b70c43884dd3f798", "0a325d70cc381b136a8f4e471b406cda6d27668c", "24286ef164f0e12c3e9590ec7f636871ba253026", "96e0cfcd81cdeb8282e29ef9ec9962b125f379b0", "1fe121925668743762ce9f6e157081e087171f4c", "c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6", "2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58", "f0ba5c89094b15469f95fd2a05a46b68b8faf1ca", "5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c", "d0471d5907d6557cf081edf4c7c2296c3c221a38", "5c09d905f6d4f861624821bf9dfe2aae29137e9c", "31f1e711fcf82c855f27396f181bf5e565a2f58d", "9f3c9e41f46df9c94d714b1f080dafad6b4de1de", "732e8d8f5717f8802426e1b9debc18a8361c1782", "0cfca73806f443188632266513bac6aaf6923fa8", "9215d36c501d6ee57d74c1eeb1475efd800d92d3", "fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f", "c5fff7adc5084d69390918daf09e832ec191144b", "d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b", "00a38ebce124879738b04ffc1536018e75399193", "dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935", "f4003cbbff3b3d008aa64c76fed163c10d9c68bd", "c254b4c0f6d5a5a45680eb3742907ec93c3a222b", "17de5a9ce09f4834629cd76b8526071a956c9c6d", "bc6a7390135bf127b93b90a21b1fdebbfb56ad30", "321db1059032b828b223ca30f3304257f0c41e4c", "6c0ad77af4c0850bd01bb118e175ecc313476f27", "cfdc632adcb799dba14af6a8339ca761725abf0a", "b839bc95794dc65340b6e5fea098fa6e6ea5e430", "1aeef2ab062c27e0dbba481047e818d4c471ca57", "2911e7f0fb6803851b0eddf8067a6fc06e8eadd6", "63a4105adbe182e67d8fd324de5c84a6df444294", "42a5dc91852c8c14ed5f4c3b451c9dc98348bc02", "026e4ee480475e63ae68570d73388f8dfd4b4cde", "b161d261fabb507803a9e5834571d56a3b87d147", "b910590a0eb191d03e1aedb3d55c905129e92e6b", "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "8a6033cbba8598945bfadd2dd04023c2a9f31681", "c26b43c2e1e2da96e7caabd46e1d7314acac0992", "289cfcd081c4393c7d6f63510747b5372202f855", "7492c611b1df6bce895bee6ba33737e7fc7f60a6", "96e9bc6b54d1c79406cf37ae45fd35ef04d647c6", "120785f9b4952734818245cc305148676563a99b", "d9dc41d3bc92e194c5a881ee9d741f898310ce9e", "8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259", "48734cb558b271d5809286447ff105fd2e9a6850", "7789a5d87884f8bafec8a82085292e87d4e2866f", "78f2c8671d1a79c08c80ac857e89315197418472", "5b97e997b9b654373bd129b3baf5b82c2def13d1", "a59ff55217eb07a0343b4c51d658a59d5f6be113", "466f80b066215e85da63e6f30e276f1a9d7c843b", "9b8f7a6850d991586b7186f0bb7e424924a9fd74", "bc704680b5032eadf78c4e49f548ba14040965bf", "7b6f0c4b22aee0cb4987cba9df121d4076fac5a5", "6932baa348943507d992aba75402cfe8545a1a9b", "6ad107c08ac018bfc6ab31ec92c8a4b234f67d49", "9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb", "ffc7de9e2519f54b0c843879013e24cb7ee2a2ac", "12095f9b35ee88272dd5abc2d942a4f55804b31e", "e8c051d9e7eb8891b23cde6cbfad203011318a4f", "32a8c1bf38f3057fdb808d27d5c1a82a168fc100", "7123e510dea783035b02f6c35e35a1a09677c5ab", "6742c0a26315d7354ab6b1fa62a5fffaea06da14", "84a0f6db2b7155a83728101728794713898a859a", "926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0", "5fb59cf5b31a80d8c70d91660092ef86494be577", "0f21a39fa4c0a19c4a5b4733579e393cb1d04f71", "0e8760fc198a7e7c9f4193478c0e0700950a86cd", "8862a573a42bbaedd392e9e634c1ccbfd177a01d", "bebea83479a8e1988a7da32584e37bfc463d32d4", "85678d8aef7188bd59f18829de5b3980af7404b6", "786e57ed6877dc8491b1bb9253f8b82c02732977", "cfb1b2006d24a81bc3f489ca0eb391e7f03788d6", "4905323aaf61952e07f62c18fa662c7da895e40e", "3727ac3d50e31a394b200029b2c350073c1b69e3", "a856449c724f958dbb2f0629228d26a322153ba3", "758d7e1be64cc668c59ef33ba8882c8597406e53", "855184c789bca7a56bb223089516d1358823db0b", "889ff5c4001f2bf88bbd54941b15474b8273eaa1", "4ca0f009594150a760da17c5dbb71107e206834f", "465faf9974a60da00950be977f3bc2fc3e56f5d2", "b45a9f95980c434582c920bf15a8099ec267c1f7", "0b8839945259ec764ef0fad47471f34db39f40c3", "1885acea0d24e7b953485f78ec57b2f04e946eaf", "4848a48a2b8bacd2092e87961cd86818da8e7151", "87ee56feefdb39938cda7f872e784d9d986713af", "014e3d0fa5248e6f4634dc237e2398160294edce", "d9deafd9d9e60657a7f34df5f494edff546c4fb8", "254964096e523d5e48e03390ce440c9af337d200", "44d23df380af207f5ac5b41459c722c87283e1eb", "d561f8bb5d09e47348c86b40b5f6e4fe524fed36", "67ab22dff1c21e8680f94948d80b77314b325d66", "aecb2c5b26c4a66fb241a966237fa6f50679a5ca", "e78fdd62f67c38fcc6ac1421f045c9437f352b86", "66652367a369d18e1845dd14220dc94a9748c9fd", "c68c391be18920ea1c065b714692dd968bf5a15d", "845299d67c87dc7f5f610b0c4380feb4daa4d0cc", "d01067340615131f9109f71590ff66f418ce8f97", "a81769a36c9ed7b6146a408eb253eb8e0d3ad41e", "f611f46455ed6ad9af85eeb22e294082dced9bed", "0e950d7ad2282d49e8cada91d5d6b50b42a23979", "8031dd2c6583d8681fdd85bdae4371c7c745713f", "81d327ec41c67728b15438bca86d10b72de1d88f", "b42a8325d5cabefd11cee59f4b2b5901eb7f18c6", "b7407b2ea67b8c82246f013f4966c4cac1507e60", "f775be87ca71180d1cf97d81678f4fd713343e01", "106092fafb53e36077eba88f06feecd07b9e78e7", "60189e2b592056d43a28b6ffa491867f793ebe1e", "44484d2866f222bbb9b6b0870890f9eea1ffb2d0", "99d3bc6d62675297693e5e57ff0770e7017f9637", "c7cda3ae7c301080d0f062b11452e12c0272f8cf", "1527526b657e91fc259d8f74747db9292ec8cd4b", "82485c89a6b48077b03b65a774fd5768ea768d4d", "70ab6dacf948624998d61e3476d0604c9ea2d349", "b8e9a328d3952755941380f9eb88072c3c1e4f74", "700bafb57112febd5d5712f474b0ecc39379320c", "2c98300103c86401c979ec158a659fb07fd2a36e", "50bf4f77d8b66ec838ad59a869630eace7e0e4a7", "2311d2488707655b79cf2b115e3c720bd4791918", "9cf6d66a0b4e5a3347466a60caea411d67c4b5b7", "16e5d7b733086e0a7dbebc7218f80da53bd71646", "712590793270e2f115fbd8891314a413047fccc3", "10952c8fed04a5847192decdecfeee03c9ea997f", "35bddbae441ae8d5f381b3aa343d12eae3196efa", "7da961cb039b1a01cad9b78d93bdfe2a69ed3ccf", "bff1e1ecf00c37ec91edc7c5c85c1390726c3687", "2170636d5d31eb461618b5da10f4473c67e74e73", "b866bafa296ae836726a8a1df78639ad9533623b", "b3ccd57f7c15a1dd61d18007622d6332fe9ab041", "611849b55ef6b164f21e52cefd05300041e72152", "2f3f87d8cacf32232c3a34517fa991289ed03884", "3441edf456e25b222d7b237b96faa7e3e00daaf7", "71529e3e51f2967e338124652e93a3d34eb6c5e1", "d1ba33106567c880bf99daba2bd31fe88df4ecba", "acaa89fb6263aef7ad58a37d9cac79c8fcaa29ca", "e23a75430f777e982b0715b6f8a048d4bbfea438", "2011d4da646f794456bebb617d1500ddf71989ed", "2b853328b37db50be2e5ddf7e6629121f607140c", "7969cc315bbafcd38a637eb8cd5d45ba897be319", "3a8023d206613c930cee8e9166fcbbfd743e6634", "1fbc81959c49d04e292ef4cfc68dd431105cdd83", "64e4d1152cd43193f2f9a33c0fa2800b1aab1d55", "421046882e42a0572c8654ae1df06bc789088c2f", "81eecb00eeadb5fe36cd840b687439bfdca7ff30", "230527d37421c28b7387c54e203deda64564e1b7", "f9824a85f30c687f7b3a3fbf25f842d3d87e77ea", "6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3", "02e43d9ca736802d72824892c864e8cfde13718e", "0994b8a23880be0b28c0c4b214b6d4ae91d1182f", "f62d71e701c9fd021610e2076b5e0f5b2c7c86ca", "67538df7950dbba0ab7885a23b7abf6f56f39537", "8e346f6cb75fa50e0c4e8fee4f198788b91569e1", "b33d610d62e5255c77ca4abcc9de8aa3c57b60aa", "56423685e039d82d3cc88f797fc2b73f2d93e200", "1b93698c1784db4abf72b500e51d4806e6430522", "9ff961417d02f94d3e8a2accc591edd6f79cc21c", "f10f6c294130c76981f0e584af5811c44636eda5", "2161f6b7ee3c0acc81603b01dc0df689683577b9", "02f038ed453de0551813159284746126168f5e15", "c0153b92a59fe0cb6405c888dd536d44d34a92fb", "0ecce20af2ec42e39d4294322df48a42a58e42d4", "fc8d33f351111ed43f56ba6809558d5227d4dcbe", "0195bf2b913dbc527fb081674f480ecd8a799c34", "37a4eb74f9c9d6333864dbe1e0803d30c2e4db7c", "00bf7bcf31ee71f5f325ca5307883157ba3d580f", "1f5c409e9b6aec60003b5d4534373f9b07ff8443", "5c07464391cda9440cf05c67ab5f3b2b777459d6", "5f02e49aa0fe467bbeb9de950e4abb6c99133feb", "18ab9be9af94f2bf4d3828161ffb232d1462526a", "7ac25c5391251611696d16e677bd71040d80d583", "b07cf8eabdbc66df9cbc2bb93bedec1c8e5c41f0", "6701efa7f715b3c296843b8ff5414a6610711cf9", "063f78c20405158d87114a8aef1bb7557230bd89", "e6f918f5cbe3c167c9e4e09074a474bb3b30121d", "5293960de53b0118ef3c8b410d27b23b9cec9bf7", "6a2ecd18bba2667cefe1f9c3689c7cbe0380a276", "aa8447c65f93670820124e0a99a2b5e3536700be", "090dd649d9873282cfb7918fc12d11cb673dc15e", "76977a8e7afd645e83d3e5aaca47a44922188d47", "8e7493bdabddc2ec99cfa2b9b862343f70c1701a", "78fde57462fb68530a49f913c89343da5727580d", "3147bb14bf4228735ecf4bc2a421590b3de86c0f", "fef7fb6f3e239c346cd144960e27875889fdd650", "5696e95cb9538fe5f5cc27cd47543b62761021b7", "f727b12c905ac585de60811048c9f9dd4188b498", "1091ee239b2344a526a5617233914345389b04fe", "9ab57be9cd26652fcc02ce994ee53ede05cfa79c", "160ab0e879f4451fa4df88cd567508150894ba9d", "367008b91eb57c5ea64ef7520dfcabc0c5c85532", "b39caa8b3f8d2da981285a0115a61a66b5f50b93", "8edcd935362c899e630349784e4ff8adb3a69cdc", "3f9f5a8966c035dc179a60c042b160aee2bf8f53", "46638b810bf69023bca41db664b49bc935bcba3c", "d0d148c5124d53152e93d25ed91523dc7d83a248", "160da454cc64c1117c3a164b9bf375d73fb81720", "6a167375410115d83be377de1f9714929f301b00", "c58a68628745ad189e47dcd956c7e23dc831da1f", "3c975c6af75d62336b6d90d2c920f82021726f68", "5e1514de6d20d3b1d148d6925edc89a6c891ce47", "12bce6e2db10faa4f370f9e40a6084296080b5cb", "76ad6daa899a8657c9c17480e5fc440fda53acec", "6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e", "808ed09fc6d56aa221e95e82d3ed37a9cce1da88", "60d75d32d345c519fa5c0d8d6b6eb62e633a8d13", "7557e81c1189f0ef9643519e0664d60baed51721", "cf682939be6828d1a70161618024e02af660d1bb", "b3c375f2113eb891682b94d49a8a557f18d3ad1e", "ae299fad29ba650fbf1e14c7c95ba8ae32e095f0", "51144f331aa9310e4fd9b4dab00e4baf6fba7ef3", "6ce99d07c3673a8fc43bc659bb659df753bc3fdd", "76e48cd3b4b25cdb6c094ff660ed8e43be1e2f34", "592bbab1e073908c75584879bc00911e7246aebf", "6448acca3bf4d14ad44756974a6486a317095a54", "ff8c21c13c9f442e6cebe405be4a4ce09a7fb98a", "4469d0d0ac2f6f0221dc865b132958df33faa95e", "ac5c5be6234c459fedea5d66fd34e916e4730949", "64a6c30ca95e85427c56acb4c1c20f62c6ec0709", "eca9b9dd665556423278b85f79e1d589009a7ea7", "2ab79698e74812142600f7e02315b9ac235a93ee", "1898cc980300b5646116ee68d54957da439a7236", "ae936628e78db4edb8e66853f59433b8cc83594f", "3c3c628aabdd32617ab7a602c9ad4b9c98a46406", "ea00489323104d70dd43bac5e15390ec4d6dfe8f", "63ac85ec1bff6009bb36f0b24ef189438836bc91", "bccfda60d53fd1ca114355f606fcfcc2bc9da529", "0f18522f7cc5068a2dc89e28dd41054384fab289", "b5790f1bc586a77ff2cbea002b7ad2646e32af6b", "09b43b59879d59493df2a93c216746f2cf50f4ac", "5a603ab4b6353fc244361930c28723b3bc091f4b", "05a6a40c840c069631a825509f3095697592e1c4", "2b50f8e4568ecd84e2f9d6357254272d8db4bbd4", "f2843da00dc202eb8748b1b690f7b5dd0849af20", "754ee07789f6ff28fc121bb9f771895e971ac28c", "1459d4d16088379c3748322ab0835f50300d9a38", "3f45d73a7b8d10a59a68688c11950e003f4852fc", "0a808a17f5c86413bd552a324ee6ba180a12f46d", "7f45292369a5214984b4a15c65330ada474b0150", "37ba12271d09d219dd1a8283bc0b4659faf3a6c6", "21e880907053301b621d318a4b81dbe1b51c3aad", "83d58ac8221c3ccf33cb41ec9a8b3556e5ebb397", "79931447522097044cbc89e4bc2f69dd6e8f0e58", "c8c714c100a754baf7d86d240ec35207fcf84b06", "ec792ad2433b6579f2566c932ee414111e194537", "40248cd4a742cb33c14e835fe6b847ad3f8d5b96", "febff0f6faa8dde77848845e4b3e6f6c91180d33", "e5a9b24056782a343ddc903aebc21e6c277a6b2d", "1f6dd0ff2e8493b81e3699b520193198d4eed4e6", "ff27935acd65177a7a1ba5f228171601e8987127", "a6840267e169382d3c5ab2645f6da60dd842d6ac", "3005a4afddab849d9070788ac0e4e95e0fff2216", "fcb64ef4421cebb80eb33f62c7726f339eb2bb62", "6c518aabdbba2c073eab6a3bb4120023851e524c", "498af0c92b41f02ae328857020bc5e01df59c252", "e6eda2bfec3036cf431a45fa021070ab21bb3488", "0a85bdff552615643dd74646ac881862a7c7072d", "00a6d711f2bf7974384d2f4b5e61d0bbc493a6b7", "4953dc81247efe5a1c28c79fd1d4ab69bbb9f21c", "85cbeac58cb0a6010836ddd8b2d093eabd31767c", "9785429538389146c8061ec856e74e957a246f2d", "454f403857e487d6a885180e0e0f7216a342fb0e", "2ac31bc7a4dd0256166208dcc8d5dfa99347117e", "d8db46f1775641051d8596dad3d37d1d731558f7", "8d8afef13a8f6195d3b874231e5e767cf62f3c50", "aa2deffe0b56f7b9b8cdd69f5b3ee6bb79801797", "0c69b1e916476bbdd8b1b38bbb1aa877709a0f28", "93fa3d850e1aa84ce6fc0f2fdd0bda028fb6ca8f", "56c5d08103c5bf4b263a81da73135455136bbe6d", "dc6d518585c18504b2e69223c062cdd691c79bbd", "2dbff0b15221234e00bec4a00b4897c631904fcf", "487668cc36443a67378f253afe05a550eda2c4f1", "4770f73bc3ad0097fc8cdff3a3f8feaed1060595", "1a3bd11734742234ed8b12bf6221f2569b2a59db", "3fd7bfd90f0dfc3369bfe718e27aff30cf268c23", "0f911ed7c457979200848db3974f13ed3d7457a4", "1ce69b2ab1821b7434c099c2c2d65a2cef878645", "e3aca5a0afd13085bcc39dde7691524a07d34206", "ccf934a335793fe416b0115183783d2c355b64ed", "e8dac6b899e2be56b4d8b4b5bfb422eb1fe2cb68", "0a2aca07c9e15de3d5924e156af9a8e1a67b4cab", "ad01687649d95cd5b56d7399a9603c4b8e2217d7", "3da3630061a1b70ceb843711629eb320085cc9ee", "f27ac0f3a0c69571cb7d707d39bac2c5bd9c43a4", "a855b07ab632ef67b0538325acc34e38f000245d", "bcd59b43aaef7f466eda609e3f887a3db4ae3b41", "3e159084e12ece3664a17bf4dd0eed8c5f06a33f", "57c5501d9937fa43291dbd3f95288aded8b2dca1", "4d6e98fb5fcb7b5983f615a45ac1d81d1b570ca0", "3387805b752dadfa34cb8eb63d9dc86aff49934a", "6403117f9c005ae81f1e8e6d1302f4a045e3d99d", "afaec3f0da9f2b8721b7979c8e77341bae59f98a", "16a61b9b213cf5d9020a3cfdf3c1575b1fac8c07", "a7fe834a0af614ce6b50dc093132b031dd9a856b", "6123e52c1a560c88817d8720e05fbff8565271fb", "c37c3853ab428725f13906bb0ff4936ffe15d6af", "cde66b2f8c7f2e8b808073cc82f49fecfa0da04d", "9963c73b03e4649959f021ef6f4fb1eac0b617d2", "5da0e870d1f9161989ee9110c932477b3030d501", "205c1452f62a3130ad3b570ce11271aecec715a5", "1b3505018e39a794eab032e7e313784b21be42e9", "4acbfe48fb279dbbc62f5be868dde5a4ce799904", "1e21078efc0aa7a3881d0e87cb5dd5918523f525", "17db741725b9f8406f69b27a117e99bee1a9a323", "a46edfb0d5374fae7633b56ab767e26cc8ea25ac", "a23f55dacd3b2f3e61d6a30da49c307ed0102807", "de02043fd479a2bcf23d30ab4496cc4e0d84f699", "9125903bf7aa68920fdca8296f703b9013a877ed", "25bb4212af72d64ec20cac533f58f7af1472e057", "8ac074829b55bb6b4c67f062ca9ec62bb79f865f", "274e8c0c513ff82713f2f332694cf2b29b7c3bb1", "135fe2a0a0e6b726e5d81299edad4b3ce39d6614", "5438e7c8151311d76ed7335051069c7c07113a12", "38b55d95189c5e69cf4ab45098a48fba407609b4", "0a103350842cca3794a784fb04ed70ca78e7f29d", "bffefd41cea42f4db45e8c98b0651f74a8aa90d6", "37d7cb06c0a1e632dedcc1f23db22cbdc130e6aa", "39b0bce87eec467adfe5bebcfe628ff5bd397fc7", "19bcd3bd41825a67f48db701a68030c5e6763152", "813cc31130ceab1668da1ac6646eea4b6a4be6b6", "37b207d2c4a82a57f80e96353f79ecd71320a854", "2e59865aa2ddcecaf9275abcad9b134558c686c2", "2d4c5768a65f05f96ae71a269422d0c3d371b26a", "2cd03c6e78d09bb98872bb34bb70e08c32dc5f7e", "fea0895326b663bf72be89151a751362db8ae881", "0d754504848d071936a5d603c8b3db0c48a3da6d", "3ca1e06dfbaeed0f8dc49bf345369fb8e43da53d", "b3ed0c6d71a0b7b4a056dd88919c49f05ab69a9f", "f3084f7f60fe087c66606f21a1df6de890feeea8", "f34fdcaba3ab1c2533a00b10d83263af5d488e8d", "61245ce4b3d8faf4721a27cd92337226bff82f74", "6313ecbdfee7d04cbf22755c4b53193f42467a92", "b33bbdac96c3540ebb07c3655643e6badc368c59", "bf976d746b1223a60e93f6fcb1321e433eda94f6", "6782b148bd2de66cd73f3d69f481d40cce866fff", "2c3b31284e8ed00a473338be011570967cc15958", "03babadaaa7e71d4b65203e27e8957db649155c6", "1cf1505e4917d647f9416823e4574f7e3a67c55c", "0843ec2b76ef9401e60654fbfe71bac44ed19fae", "faffbf70dbd094346bad9b8a4ddeb973c51a70a9", "59a76bab968ac4cd740eb376ce9a26f6c1b103e4", "09d78009687bec46e70efcf39d4612822e61cb8c", "628aea69160e273ae3b3ae982deb11e9089d0336", "4679f58139f1be2aab2598378555f277bc299d2a", "974b9f0af3af675c092b96e7ac68e391cffdcf49", "359c2483195a3217095dcf8738abe47181a0fc94", "a27740f8a3834d6bc605a6b383c4d802ced373c9", "a52d6daf72281521ee99dabd82cd80093e8d6f4a", "0f1539368f90918fc3c4d5431e384986ad768506", "1bcf142e3b2f5a7ade185f220ee031657203c293", "34d523624f78ff114c589a1222f1e3ee1c854c35", "01a31f75c9c3296cf3cb45b7bad97acb300b7459", "7bb929b8ff2f83de91318865232819be8b13fbe5", "4c822705edd305d04f2c02ac9b1b73421e857961", "d8c2a08673bcda9fca35972112bff8764d207764", "b534163cf101520e0868c46a754748fd0e4e0ef9", "f2eb8b38e5366dd98350af304c678c42d858017c", "2b52292f09451cf4c803b52c2995b51d17e96153", "61ab9bf00e444d12816886da0d68b833c47c152e", "e9b72a152928f683aad5f81347777534b6715e67", "a784fd9391cc7013c618186017bbf43b806fa004", "eee4cc389ca85d23700cba9627fa11e5ee65d740", "887502ea2d8a335d8e72deb23fec2784df713b8d", "35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62", "1ca8c09abb73a02519d8db77e4fe107acfc589b6", "63856e83b69ac15e1252c1c3d89114dcf806fbcc", "de8303e9206096dd9f4ba9d876057345ff1f164a", "3d0660e18c17db305b9764bb86b21a429241309e", "1131088237aacddcc078547b4455e8572c61766b", "e959a426d02dd014c1346131ac38ed50114c17b7", "20405902028e631e239cbc0ff6148f5f1d8050a0", "79519f181ca9378e72bdb41ca647ba6d2e65b106", "d182c6d9ac4777b5ad73afdd64b7b68d76037212", "8cbee37fdc2353e93494a687ceddfdce2ad71e85", "8027f50bbcee3938196c6d5519464df16c275f8d", "925e907458e7621ed4390db20d170e98d155d693", "aa90a466a2ff7781c36e7da7df0013aa5b117510", "87d1283ccc9bfb0c550ebed8ec0b025dc14b160f", "0ad1fefa54f69d9efa0112f2e60c19841d5e9346", "014844a9e6ae39a101fb79f103aa047699f88246", "1fa9c5af78b3ca04476f4ee6910684dc19008f5e", "788eceb4d1b7556d1c9033224da2348b4402d6ca", "df9a102288582d8edadeddcb8d55068a06cc471c", "95052cd12cfca8b0f8162dc53fe5615fc9c06b22", "1cf6bc0866226c1f8e282463adc8b75d92fba9bb", "fbe3ba628fa8435bf1ad4ed153c93109624b8dff", "7ed9913de03dd2990b68751842306c2636852647", "3cc0d9c1f690addd2c82e60f2a460e3c557ff242", "62aaa33c46a7c4c2d8a80c81954101576200799d", "e8691980eeb827b10cdfb4cc402b3f43f020bc6a", "1883116d33a3e0321d2fe96e0a8a62546aca4ee9", "3965d73c9d7c97cdb391bfd86a15bfd3534cbd32", "4ae33d64f8515a023f10e20af20f62a2a5a76f13", "d88eb94d7054d2668b1a8dfa311721f37ae1f059", "fd9286f0e465deffad59123f46fa4f66cb15c3e4", "56143653c9bb0f01fb8a58da02b7ef7241170eec", "1eda03469d860ac725122bd27faaae6b2cb47d0d", "15f8bf1eb9bb3c86ad75454651a518267028c917", "706600aa77ffb165097e4aeccb2b214dabdb8092", "617c4e23fc7ca51d98dacb28779214b3e79e9720", "a1030e6e0e6995768dbcafedc712a59db090d2b4", "428e42f8d5cbffc068e2e5fe8f697c9c9ee113a9", "940c42892bc5b012be2b2ac7421ccb15005781e6", "38b18585e4bdb78347d44caa561e69a0045ade8d", "a4a90a2db209db2d5c49adfd2091ede2d4130f60", "6f42cb23262066b4034aba99bf674783ed6cac8b", "aa5fbe092f8a4dcb43c31ab93af0290900b4f0e2", "5582bebed97947a41e3ddd9bd1f284b73f1648c2", "21c99706bb26e9012bfb4d8d48009a3d45af59b2", "34c9753893fe4713568542e7d96dc9a9e6545ec8", "3d1382fa43c31e594ed2d84dda9984b1db047b0e", "a079309d28b6f8753ca26a789bd0bc43de9bd9f8", "380d50f3ccc07fa4f41282395a78c51e33985c39", "7730d2a16d6b85edfacdfa37124abda79a667702", "1cee733ee31e245dac4655a870fd9226163a52b5", "80c11a3ad362b294d5faa0d8e5c384db1d585795", "12441a74e709ddab53f9039cf507491df7b3840a", "5ba1db56bccc090ce5eceb13f46f2cd15ba3aa55", "b5af4b9d68f1b9b2c2999a726f6d2fbb2a49a3bf", "7ff18900bf1d8acbcb81e2f6d8e77fe95e1ddbd0", "a86ea8041bcc91097a8bbb450cb94a616ee85ae6", "1d251acc459931d927f5befdfb5b9cdf643cd8bc", "84be05dd82a7208a6e7b3d238df27b123cc917ce", "cb8f1f77a8b19d99dfe0c7b50dae3978cf646aa9", "fc450e42aa2a491ff0afda144718d4f73d4d89f2", "10a36dea0167511b66deca65fdca978aa9afdb11", "300fb25626bebfc84cf2f6458784b5cdf5c3ffc2", "8a4119c2898f611a6ffa0b4b72acf322d1b455b1", "1cc084aaf9ffb015f76eb2406e11745ab847ef3e", "03f98bfb129028b80ce98686c573830671ee1e3d", "8e112ad656ff90720ae609841bd0fcb2caa90d65", "2385007824daaf9eac9476fccb1501b7ac166ceb", "a0c81783ec60bd64aefc49285eb082a8185d49c1", "a7bfb6426359140a0bc0c84741ad9a3ac83eff04", "28633f80f1eae857d670cb245fbeb5d4e6e47a58", "2d83dbf4c8eabc6bdef3326c4a30d5f33ffc944e", "05fa7085663bbbd1057c0d240158091930c59c6a", "f5a22d8f49c0a3038cd5cacb017819a7bf781ef3", "4e614e344ecbb36770d45fc14d3b5152b653aa97", "517e5e6d8e17511fd74fc58ef53bdd57bb7b4651", "828ac57f755db989e2886042a85278ae4823297c", "c86ce9fc2bd5aea98869cf1f31d03e05e7ec672c", "cb3e81b912f24e66d91509e8ab41d09b522a397a", "610e0bee525a6573932e077f091505f54a5c4ede", "2ab8956fef9526741c1e68c94d9a9da74a87960c", "3aad63c3c049eedb1c6da4871faa90e797b933e8", "1c1a24169be56e01b0e36e260f49025260a5c7e7", "d7221695df4de3f34d5e4a877b71c14bc88760d2", "fb193923274c9b028254075c3b6decdae70b2ec0", "03c820f35afdc38dd05e4c663d2877e2602bcde0", "a3f24a03b9b55327704fba3aed182323137113c2", "ad2339c48ad4ffdd6100310dcbb1fb78e72fac98", "e6beb5d95fa262b8717cc264d79a879285db15d4", "b44999bb2e23cf8ca0a413a2d006cc9800794650", "fe466e84fa2e838adc3c37ee327cd68004ae08fe", "29cf7937a1c1848c24b294569d50a2f7122de51b", "c3cdf580a667a7b91191bbe149cd27b2054cbc43", "044e0d86e2db70d4c0b767bf0994913e90e105e3", "3468740e4a9fc72a269f4f0ca8470ccd60925f92", "5f9c3b25eaca97af3c86460d365a3dd485ecbf96", "173a38768848cfe57a6b20b5ae019ce613e58781", "dfb342327c5e883d21a1f91cd283b36dbc2a3661", "cceab479d37060b0952439d9bd6fbbba5de1d550", "1042683cf5733244238198ff486d3a65e70c9621", "01959ef569f74c286956024866c1d107099199f7", "48f45accce6a4a22e4ead41fe292a915f3531f5b", "72cebd7d046080899703ed3cd96e3019a9f60f13", "8b2d224c8b69191c02dce750257c39d46b1c4a7b", "1da2431a799f68888b7e035fe49fe47a4735b71b", "9a6268d2bc1221ea154097feadea0c58f234d02f", "0ec03a13063e5811ec9461cf7af04f4f3110ccaa", "49004f22a420e0897f7b811239c1e098b0c655bf", "62b26c5fd8279bc0e7ec58ad18d4b65d6203da0d", "111ff5420111751454a2f4f55b7bb75d837ed5f4", "228db5326a10cd67605ce103a7948207a65feeb1", "16bfd904f5a76bb52d5cd8a25721277047a02e89", "e59e1c43ee86e3e68b83d8a9916ebe6375606bb3", "00f5bfc2fb760249ba4e9c72b72eea4574068339", "747e9b36c5a1b0b8a9572da0ab416ddd1e1d2d33", "fa50b5a54aa340d6fe7f46feb02229f1ab0f12c0", "9fb31d0375552500bd494af20ab0c3109c9be3d2", "175e9bb50cc062c6c1742a5d90c8dfe31d2e4e22", "a6eb6ad9142130406fb4ffd4d60e8348c2442c29", "783e48629dfbb44697b15a3bc0cb2aa3eea490eb", "10c4b2489d7e1ee43a1d19724d3c1e9c33ca3f29", "17d4fee6b21c9277375d6cf0c9087828595009b6", "998b7c8608fb9f80177ce54230761d8c3d82b2da", "a47e51dd3f73817679ff0e987a0064d43db25060", "e83aa5155451fdca2ae4ed669a37a6de98296006", "115808104b2a9c3ab6e2e60582ab7e33b937b754", "50f3587a6316ae59493f4c408eadefe3bbf891fe", "266b61c5696c83c069e67d242ad5b7d0f5f1dee9", "7e4b638e028498e900747b600f46cd723f1f231e", "1aef6f7d2e3565f29125a4871cd60c4d86c48361", "5287d8fef49b80b8d500583c07e935c7f9798933", "7a8e54033d166bb5bcb2acfc89c2659b45baa6e6", "9563456bfdd8b18df7f764400c04976771eb8728", "0197f278e2dedd67ec5067f47037b8cdd3ae8509", "ad5950257e053b08657ea298f7b89ba358b8bfcf", "f8809a55945c283d249f4c4adb5d74e452cdfaa0", "0f827a43026854e4f7e4ea5edad2c70c7d72e9a1", "35fe83665c61adb513781c7208b92706ae2a1578", "2231f44be9a8472a46d8e8a628b4e52b9a8f44e0", "091d0c7b3576fd6f3bb2bec344deb8f81fc1f7c6", "b08c35f8e529f47a3fdc4f0713ffe77d94c57d87", "0612745dbd292fc0a548a16d39cd73e127faedde", "211fe99400bde5116efea3b42719d00a34931dcd", "defbca385b48173d3dbd7bb8b8fbd35ba06239c3", "6eeff23d6e0127cfbbd0374a83341173a418ba7f", "a3d071d2a5c11329aa324b2cae6b7b6ca7800213", "5bc7a664f7c7155bd1c2da0939d04231f230c4c6", "4066f186ff58d300090c652925ed0aed3355efec", "06e7648e945b39b8ccaa9120c796adc170dc81e4", "20e476887f9ad432ea35a5f712485e4e77363d64", "1ac6a33f04f6c5a8084c15c85295f987cc8e3d72", "329c06c00c627c0b041d330f3c0142a88b7cb1e5", "4f69ad0e52e37ba06db1c2b89c180f3ba331cc4a", "19d1855e021561d6da9d0200bb18e47f51cddda6", "15b0e598d9692d77aa33370dd3a1a47ba5f99aa6", "1b94c49c119c7490d2df6a2dd093e5ddd8bfba14", "7f201b4226d62bf449a68ebcc159acf8b95289be", "0910a4c470a410fac446f4026f7c8ef512ae7427", "0b57eb772ad9129ea4011c7fcb16c57967409018", "98025d3d44e9379736adb1228919272ded9298ae", "969e616e56b4ff4be4ae5730cb36d9d454f288a7", "974cadd15684c96618d04f845794cec5568a86a6", "4cefd47f3327b6d30bf99e61651b18319c4ee829", "ca1c710c14f95c3b0cf027fb068d53d595809a5c", "216c6d29a6f57c37ef8f26f88b6ec9be5b855a66", "90dd771829094dad1230e32b8bc4385bfe86c4e5", "6115445ca062b8f865f0b447c059813088b9dd49", "f22058a3003cee6b17c6c25c8a635a653e78614c", "03eb382e04cca8cca743f7799070869954f1402a", "637648198f9e91654ce27eaaa40512f2dc870fc1", "20b5cfa2d35ff437bcc81d4c7f82f8b1f69dcec3", "ec3621e900cc50afd067584bb1246a8b4e338fa8", "1c6690ab404b23d5026dd3ad0c7a49ce2875c1b3", "30b32f4a6341b5809428df1271bdb707f2418362", "1e7995220c6f17dc649b0caeab34c617248aa167", "a3f4f163e87b28901389e189bb7f0f655995793f", "287c5be2610e1c61798851feb32b88c424acfbf9", "6861552bf6730529d3fac5d6f2bb7e0f491edea2", "c67d62592ff24a25764e489a8a68672d40f50da7", "b41c90bce7fecdcf5980a9990f8693ff07997b65", "ab84d00079d0a29e44bdc4c83037dc76b0fbef05", "ce386ab4511f38a7671576a9cd32e5557853180e", "6e60536c847ac25dba4c1c071e0355e5537fe061", "355c8c0dbd80de9d23affb37ac102179b6b2a908", "55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96", "b9953824b3d4cd2be77ecbc5db3f7dec3dfa031e", "2d3d4883350a48708cdc0c260479110e5eed965a", "20e24a40dc855fa69aa3d85b4bfdcfb8c9dadb74", "d740d0a960368633ed32fc84877b8391993acdca", "4d510bca00b625f86606cb0096299b993090534a", "356b431d4f7a2a0a38cf971c84568207dcdbf189", "27f8b01e628f20ebfcb58d14ea40573d351bbaad", "60efdb2e204b2be6701a8e168983fa666feac1be", "7071cd1ee46db4bc1824c4fd62d36f6d13cad08a", "36c473fc0bf3cee5fdd49a13cf122de8be736977", "434d6726229c0f556841fad20391c18316806f73", "90a754f597958a2717862fbaa313f67b25083bf9", "d963bdff2ce5212fa585a83ca8fad96875bc0057", "25ddb883545658f5fea5c18d6024d54dfdee48fc", "1886b6d9c303135c5fbdc33e5f401e7fc4da6da4", "59d225486161b43b7bf6919b4a4b4113eb50f039", "a9be20954e9177d8b2bc39747acdea4f5496f394", "c038beaa228aeec174e5bd52460f0de75e9cccbe", "ac6c3b3e92ff5fbcd8f7967696c7aae134bea209", "d1edb8ba9d50817dbfec7e30f25b1846941e84d8", "e43045a061421bd79713020bc36d2cf4653c044d", "5d88702cdc879396b8b2cc674e233895de99666b", "acfe5b5c99be70fa3120d410e7be55b9fe299f40", "4bf85ef995c684b841d0a5a002d175fadd922ff0", "d307a766cc9c728a24422313d4c3dcfdb0d16dd5", "590628a9584e500f3e7f349ba7e2046c8c273fcf", "68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5", "1c9efb6c895917174ac6ccc3bae191152f90c625", "43261920d2615f135d6e72b333fe55d3f2659145", "0ccc535d12ad2142a8310d957cc468bbe4c63647", "82dad0941a7cada11d2e2f2359293fe5fabf913f", "ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae", "48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf", "02fc9e7283b79183eb3757a9b6ddeb8c91c209bb", "7ec431e36919e29524eceb1431d3e1202637cf19", "3b9c08381282e65649cd87dfae6a01fe6abea79b", "01dc1e03f39901e212bdf291209b7686266aeb13", "dc5d04d34b278b944097b8925a9147773bbb80cc", "b999364980e4c21d9c22cc5a9f14501432999ca4", "624e9d9d3d941bab6aaccdd93432fc45cac28d4b", "ef761435c1af2b3e5caba5e8bbbf5aeab69d934e", "4707c5e306e69330c5757e74a6313544484840c8", "f8f2d2910ce8b81cb4bbf84239f9229888158b34", "7c3e09e0bd992d3f4670ffacb4ec3a911141c51f", "4b5eeea5dd8bd69331bd4bd4c66098b125888dea", "2b0102d77d3d3f9bc55420d862075934f5c85bec", "a74251efa970b92925b89eeef50a5e37d9281ad0", "a422f2d0212f54807ff678f209293a27c7791ec5", "860588fafcc80c823e66429fadd7e816721da42a", "38183fe28add21693729ddeaf3c8a90a2d5caea3", "d1777d3ea950e6aac92dd359075701bc28ba1cb2", "32ecbbd76fdce249f9109594eee2d52a1cafdfc7", "4e6c17966efae956133bf8f22edeffc24a0470c1", "9d5db7427b44d83bf036ff4cff382c23c6c7b6d8", "441bf5f7fe7d1a3939d8b200eca9b4bb619449a9", "38192a0f9261d9727b119e294a65f2e25f72d7e6", "6495d989fe33b19d2b7755f9077d8b5bf3190151", "ccebd3bf069f5c73ea2ccc5791976f894bc6023d", "c146aa6d56233ce700032f1cb179700778557601", "a4de780b8b333f073667c1f2dd7b3ae54a97f380", "a5f35880477ae82902c620245e258cf854c09be9", "0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b", "d3b0839324d0091e70ce34f44c979b9366547327", "2f16459e2e24dc91b3b4cac7c6294387d4a0eacf", "7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794", "7117ed0be436c0291bc6fb6ea6db18de74e2464a", "c1e76c6b643b287f621135ee0c27a9c481a99054", "174930cac7174257515a189cd3ecfdd80ee7dd54", "2aea27352406a2066ddae5fad6f3f13afdc90be9", "5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48", "5cbe1445d683d605b31377881ac8540e1d17adf0", "287de191c49a3caa38ad7594093045dfba1eb420", "2f04ba0f74df046b0080ca78e56898bd4847898b", "ba397fe5d4f0beaa7370b88e9875dbba19aa7bfc", "fce1c3f4948cf300694c18c3fcc5486cd060af13", "5b90bf3ebad1583beebcae5f892db2add248bcad", "8cc07ae9510854ec6e79190cc150f9f1fe98a238", "1606b1475e125bba1b2d87bcf1e33b06f42c5f0d", "a1e07c31184d3728e009d4d1bebe21bf9fe95c8e", "04661729f0ff6afe4b4d6223f18d0da1d479accf", "22f656d0f8426c84a33a267977f511f127bfd7f3", "9bd35145c48ce172b80da80130ba310811a44051", "95ea564bd983129ddb5535a6741e72bb1162c779", "45e616093a92e5f1e61a7c6037d5f637aa8964af", "9944c451b4a487940d3fd8819080fe16d627892d", "a1f40bcfadbeee66f67ab0755dd3037c030a7450", "b3e521baceadee36ac22b6a06266e8abd6a701f7", "3cb057a24a8adba6fe964b5d461ba4e4af68af14", "8ee5b1c9fb0bded3578113c738060290403ed472", "bb070c019c0885232f114c7dca970d2afd9cd828", "43e99b76ca8e31765d4571d609679a689afdc99e", "6eba25166fe461dc388805cc2452d49f5d1cdadd", "f074e86e003d5b7a3b6e1780d9c323598d93f3bc", "1389ba6c3ff34cdf452ede130c738f37dca7e8cb", "02bd665196bd50c4ecf05d6852a4b9ba027cd9d0", "85674b1b6007634f362cbe9b921912b697c0a32c", "8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7", "a66d89357ada66d98d242c124e1e8d96ac9b37a0", "726b8aba2095eef076922351e9d3a724bb71cb51", "419fec1a76d9233dcaa8d2c98ea622d19f663261", "446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03", "a065080353d18809b2597246bb0b48316234c29a", "fd96432675911a702b8a4ce857b7c8619498bf9f", "c3a3f7758bccbead7c9713cb8517889ea6d04687", "204f1cf56794bb23f9516b5f225a6ae00d3d30b8", "1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177", "37ce1d3a6415d6fc1760964e2a04174c24208173", "e8c6853135856515fc88fff7c55737a292b0a15b", "ec8ec2dfd73cf3667f33595fef84c95c42125945", "cc31db984282bb70946f6881bab741aa841d3a7c", "614a7c42aae8946c7ad4c36b53290860f6256441", "085ceda1c65caf11762b3452f87660703f914782", "fcd3d557863e71dd5ce8bcf918adbe22ec59e62f", "c00df53bd46f78ae925c5768d46080159d4ef87d", "1efaa128378f988965841eb3f49d1319a102dc36", "c4fb2de4a5dc28710d9880aece321acf68338fde", "c94b3a05f6f41d015d524169972ae8fd52871b67", "2a171f8d14b6b8735001a11c217af9587d095848", "c7cd490e43ee4ff81e8f86f790063695369c2830", "d689063294e217f1ec8b83fe4b60e706f1934787", "96a9ca7a8366ae0efe6b58a515d15b44776faf6e", "3b73f8a2b39751efb7d7b396bf825af2aaadee24", "043efe5f465704ced8d71a067d2b9d5aa5b59c29", "969dd8bc1179c047523d257516ade5d831d701ad", "c13211a15abd3ca187ef36b9f816891f901ba788", "ede5982980aa76deae8f9dc5143a724299d67742", "a3d0ebb50d49116289fb176d28ea98a92badada6", "ed07856461da6c7afa4f1782b5b607b45eebe9f6", "0974677f59e78649a40f0a1d85735410d21b906a", "293d69d042fe9bc4fea256c61915978ddaf7cc92", "c5ea084531212284ce3f1ca86a6209f0001de9d1", "28bac5bc6e8a15f704563c5cb723b7c71f5413fa", "b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8", "5789f8420d8f15e7772580ec373112f864627c4b", "01125e3c68edb420b8d884ff53fb38d9fbe4f2b8", "17c0d99171efc957b88c31a465c59485ab033234", "622c84d79a9420ed6f3a78f29233d56b1e99cc21", "b0c512fcfb7bd6c500429cbda963e28850f2e948", "b8378ab83bc165bc0e3692f2ce593dcc713df34a", "b8978a5251b6e341a1171e4fd9177aec1432dd3a", "d115c4a66d765fef596b0b171febca334cea15b5", "31af1f2614823504d1d643d1b019c6f9d2150b15", "19705579b8e7d955092ef54a22f95f557a455338", "3fb26f3abcf0d287243646426cd5ddeee33624d4", "055cd8173536031e189628c879a2acad6cf2a5d0", "488676e61fcf7b79d83c25fb103c8d8a854d8987", "d4c2d26523f577e2d72fc80109e2540c887255c8", "3251f40ed1113d592c61d2017e67beca66e678bb", "6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd", "d06bcb2d46342ee011e652990edf290a0876b502", "e5737ffc4e74374b0c799b65afdbf0304ff344cb", "42ea8a96eea023361721f0ea34264d3d0fc49ebd", "19a9f658ea14701502d169dc086651b1d9b2a8ea", "0ea7b7fff090c707684fd4dc13e0a8f39b300a97", "66490b5869822b31d32af7108eaff193fbdb37b0", "04dcdb7cb0d3c462bdefdd05508edfcff5a6d315", "eb87151fd2796ff5b4bbcf1906d41d53ac6c5595", "35d272877b178aa97c678e3fcbb619ff512af4c2", "9e8f95503bebdfb623d4e5b51347f72677d89d99", "492f41e800c52614c5519f830e72561db205e86c", "15aa6c457678e25f6bc0e818e5fc39e42dd8e533", "b11bb6bd63ee6f246d278dd4edccfbe470263803", "3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2", "bdfcc45cfa495939789b73eec7e6e98a4d7e3f41", "66dcd855a6772d2731b45cfdd75f084327b055c2", "c3a53b308c7a75c66759cbfdf52359d9be4f552b", "795ea140df2c3d29753f40ccc4952ef24f46576c", "244b57cc4a00076efd5f913cc2833138087e1258", "cd4c047f4d4df7937aff8fc76f4bae7718004f40", "853fc1794892175e2318f55785ca8e2ce6fd7537", "0f9dd79de75a3dce394846369f09c05ddf250e31", "9ea992f009492888c482d5f4006281eaa8b758e7", "31d51e48dbd9e7253eafe0719f3788adb564a971", "b558be7e182809f5404ea0fcf8a1d1d9498dc01a", "5c0d105cfcc78d689d948a2aa8d654cab4e545f2", "407a26fff7fac195b74de9fcb556005e8785a4e9", "7384c39a2d084c93566b98bc4d81532b5ad55892", "6dbdb07ce2991db0f64c785ad31196dfd4dae721", "1a12eec3ceb1c81cde4ae6e8f27aac08b36317d4", "cc70fb1ab585378c79a2ab94776723e597afe379", "2f61d91033a06dd904ff9d1765d57e5b4d7f57a6", "878301453e3d5cb1a1f7828002ea00f59cbeab06", "e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5", "4c6233765b5f83333f6c675d3389bbbf503805e3", "02820c1491b10a1ff486fed32c269e4077c36551", "e659221538d256b2c3e0724deff749eda903fc7d", "b55e70df03d9b80c91446a97957bc95772dcc45b", "3176ee88d1bb137d0b561ee63edf10876f805cf0", "961a5d5750f18e91e28a767b3cb234a77aac8305", "0d746111135c2e7f91443869003d05cde3044beb", "ca8f23d9b9a40016eaf0467a3df46720ac718e1d", "8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a", "ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c", "01bef320b83ac4405b3fc5b1cff788c124109fb9", "22c06284a908d8ad0994ad52119773a034eed7ee", "1b794b944fd462a2742b6c2f8021fecc663004c9", "2f5ae4d6cd240ec7bc3f8ada47030e8439125df2", "239958d6778643101ab631ec354ea1bc4d33e7e0", "40c8cffd5aac68f59324733416b6b2959cb668fd", "a5e5094a1e052fa44f539b0d62b54ef03c78bf6a", "6f9026627fb31d4cfb08dbcc4ab852945dc42252", "5aafca76dbbbbaefd82f5f0265776afb5320dafe", "4cd0da974af9356027a31b8485a34a24b57b8b90", "3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548", "5a3da29970d0c3c75ef4cb372b336fc8b10381d7", "44fe6787f864812e666fbed8c18e18ad0eff190f", "37eff004718e0d66a249310ab5108206d158d945", "a71185a29a989d9ef1bb6f2a3e225f1746640a89", "1c8d585fb7e82abf43f45014494018a843774d2b", "15a0546ee32ac391f342a6188446dd6699a1d7b8", "2dcbd8f2533dd08d09987bd069eeacbd6024731d", "699b6cbd72ee0274699b939863813499c377ea00", "6ac1dc59e823d924e797afaf5c4a960ed7106f2a", "1dcccd889562daec06fee30829c61a4b5a386cc2", "b40881a905cf6c4963658df4f64b860f9b1755fe", "16dd9ea784a862c45d1d2af6d2fb83198f567719", "12831caca9674e0ab3fe2fc02a447ddb5a372994", "f355e54ca94a2d8bbc598e06e414a876eb62ef99", "bb35ef89addbbc28d960bc0cab70d8a29fdf6eee", "927ac98da38db528b780f14996bb02b05009c9cc", "f926020afaf2fcaf15fd6423dc3d76f88551701c", "68e6cfb0d7423d3fae579919046639c8e2d04ad7", "c6752af29ae0d5f3af8c823be02cfec38a8b65f9", "9a6b80f8ea7e5f24e3da05a5151ba8b42494962f", "ae8ed3b0b8043c5af76390751938edfd100fa9cd", "8935ffe454758e2e5def0b5190de6e28c350b3b8", "1ac4c3ede7e6a630c7dcbf204a14a2215ce9f1cc", "d437a69d631b48583acc19c946b48e7d601d7853", "1768909f779869c0e83d53f6c91764f41c338ab5", "a92b5234b8b73e06709dd48ec5f0ec357c1aabed", "6b0b10836197d7934f53080a39787b7d8d2b81f2", "db49a5e6d73de616c66904138a8a19ce0a329c4d", "e57ce6244ec696ff9aa42d6af7f09eed176153a8", "29921072d8628544114f68bdf84deaf20a8c8f91", "c36ae7c5e9f9f992a5939e07283183707ee0a787", "cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3", "7b8b1571639f901275da22ee8f1de852350bf38e", "20b038c50cc7148dfb364e2de51cde120c907c9f", "b907537c602b95948da809f7d4aff4bc959d8ba1", "16597862a1df1a983c439e82e0462424f538bb48", "e0162dea3746d58083dd1d061fb276015d875b2e", "4afded694bb067c45b591c98e0951e8988d7d2d6", "9f829eb41c2ecb850fe20329e7da06eb369151f9", "c733e4a14b51623120da9b4571b4409bc99ab0cd", "3965d61c4f3b72044f43609c808f8760af8781a2", "bd0a6bea1985ece3388b1dae47fa76aab3562d6d", "9d138bc60593c2770d968ba56172332773e02fa5", "d50c6d22449cc9170ab868b42f8c72f8d31f9b6c", "2f73203fd71b755a9601d00fc202bbbd0a595110", "7df4f96138a4e23492ea96cf921794fc5287ba72", "6ef78987104b7e66c1a71f87b94c4b0ebf34330e", "6d62819a874ce889dde472cca0991ef90a43cb47", "fce94bd316dad2e38091549e15aa2d84f2e93943", "19bfe748ec8957ec82a7fef0f2585bb14ab8bdd4", "70af8e4ff3c029aea788bc28b45c56932b50c056", "c4f3375dab1886f37f542d998e61d8c30a927682", "9fd2859eb2e277bcf42a455741f623480238e381", "90ddf1aabf1c73b5fc45254a2de46e53a0bde857", "97f3d35d3567cd3d973c4c435cdd6832461b7c3c", "2a31b4bf2a294b6e67956a6cd5ed6d875af548e0", "b8658fc3b17e75afce025bcbb161dd02e7004b1f", "53a8cb1e6a6c29b75eb683c99551288a5d28c1a5", "3df44e3a547c7ccbc1222bdeaeef6c899c59dc30", "cc2bb4318191a04e3fc82c008c649f5b90151e4d", "bf107f242abea2e52d82dcd834e58b774205ec84", "96d3969dba2d1000f97de0b7cca50ea721d1f79d", "e1630014a5ae3d2fb7ff6618f1470a567f4d90f5", "625075ac5a48c9ac5e90cb6a7fece1f7a8ba2b2c", "04743c503620baffd75f93f8e4583fcba369ac9d", "66719918aa6562d14ea53286bf248d6f1a7d6b14", "0ad8149318912b5449085187eb3521786a37bc78", "5335e98334e37114c5bcf076189d1cba887a813b", "09701199771e344b12dbfc799e0f82456764757f", "d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f", "1287639dca7a7510af82f0840012db9d7873930e", "8d646ac6e5473398d668c1e35e3daa964d9eb0f6", "4a75d59c9c57da420441190071ba545eb4a75e1e", "856b8576999517c0cb7d95aef0159432604a8447", "1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113", "277c72916f3834cdba6c4cea1c8c651d2b46b78b", "826f1ac8ef16abd893062fdf5058a09881aed516", "e1e2b6a8944a4e6f195b6f7371ee9e6b0684ae6b", "7dd654ac5e775fa1fa585e257565455ae8832caf", "16243557482241171beccbbd694976103cc941ef", "1eab1ffed59092d6bf19900b7fb283e6dd0d01a2", "9103148dd87e6ff9fba28509f3b265e1873166c9", "0c049cc7320f9b92f91210ab6961aa6644c867cd", "14f6309369cc8d4826df11522de318b266df3bd5", "6eaf446dec00536858548fe7cc66025b70ce20eb", "a11f5e74b13a6353d14e024d06a902b9afa728b3", "6e12226cf0da453dc4b9879d7af6b43af3c31d2b", "296afa5f7e99fc16df47f961c9539347732f7b13", "2dbc57abf3ceda80827b85593ce1f457b76a870b", "7f23a4bb0c777dd72cca7665a5f370ac7980217e", "06560d5721ecc487a4d70905a485e22c9542a522", "ab703224e3d6718bc28f7b9987eb6a5e5cce3b01", "337e67c8c5247695bb384c35272beaf47d464c75", "3505c9b0a9631539e34663310aefe9b05ac02727", "4543052aeaf52fdb01fced9b3ccf97827582cef5", "03d9ccce3e1b4d42d234dba1856a9e1b28977640", "c05ae45c262b270df1e99a32efa35036aae8d950", "d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d", "021c3e8c3c64c25126315911f31cab1edca82ab3", "acddbb4625b121b93903343380651b3641bfa7d5", "a251171bb335608b3019f7b05b167b7e49a8dc23", "8f32df80a5f3b3b60599614990fcd6a7d5ae9619", "e1aa78cc4df90564da1dcdfd63e04c3cfd0e84e0", "0c251ee4c1f4cac3929cb889c525785335237ed3", "c5c7c5241023edba35d864820e5c900c78605c1c", "1423037dd56f85453cd4257861821aeeb7478bc1", "5061f591aa8ff224cd20cdcb3b62d156fb187bed", "6f491fcff042991767a8d5c3a919ce169e0e65f0", "ff8ef43168b9c8dd467208a0b1b02e223b731254", "6bf57ae6c63873253d1b95782f8c6b7bbc91b9ac", "52ed55387cd6e168b91391110fb4319160e82f50", "add9dc1725f8b96d2f7c5e242de29205096e7c98", "10c49dc22d5c7d885cba238634390013aeda6e0e", "b29b42f7ab8d25d244bfc1413a8d608cbdc51855", "8a63a2b10068b6a917e249fdc73173f5fd918db0", "582a6fdedcde646cdc4558106832f247dd4b27de", "cc8c5f23038663abf98874612a8b7a1bb000b990", "caf36ebfe947b911b247905f3995b46a8c635892", "c5e37630d0672e4d44f7dee83ac2c1528be41c2e", "bcf2710d46941695e421226372397c9544994214", "d46509935f7d485295587d4fc201c42108760379", "deea683731f468c7234e1089f48c4546e7003b18", "6baaa8b763cc5553715766e7fbe7abb235fae33c", "28a45770faf256f294ce3bbd5de25c6d5700976e", "7127f9e9a51236f213c5b7805be8714a3bcbfc28", "12a155be0f3d5e4c8bf08cc79fdb964bce1f4b55", "053931267af79a89791479b18d1b9cde3edcb415", "9fec253eb41438a9ab13bd5156a18c2c08ff610a", "227cef669b362a7756564519be22c7d060348f66", "d56a262763c84d2bb21e9ea40f2517610f61d05f", "134f1cee8408cca648d8b4ca44b38b0a7023af71", "3a2cf589f5e11ca886417b72c2592975ff1d8472", "a1e1bd4dacddc703a236681e987a09601ee1016d", "862d17895fe822f7111e737cbcdd042ba04377e8", "3abb51739b90c8bfd665e045b0eeadc87e065b63", "9151f229e7b4e318b0b12afe99993da0ee5e0e34", "55e66f369983474eba525f0f718c37794027075e", "3661a34f302883c759b9fa2ce03de0c7173d2bb2", "27f1fd71538ba420c63aa4c74704718a0633b22a", "9391618c09a51f72a1c30b2e890f4fac1f595ebd", "7a670b80c7e95b85efc6ef677cae539c4df20e4f", "24e6e8d725f08c44dac42a588f41092e392a3514", "ef559d5f02e43534168fbec86707915a70cd73a0", "7afc3db34d9e16a70064ee4937b0425045cdb4e8", "d1fac29bac01b7412770902914f67ed7d6b31516", "8c3754f43d604dfe6d149a051eeaff8c2237f217", "231b769f2e13724754fa09e7e5ab7d4b843075a0", "6cb68c1f7558e01966ad1e1fa81feeeae3dee666", "66449f6eb492864c86ab80e81417b5003a76a36b", "50decd1955df6c7c6edb2d74d1663edc7a0f53e2", "91fe49e7d80aca437f1d951ce58e87f7353c81ff", "36c2715522c3df4237d8e034dfe49d67eafd6382", "6f9c14c9d4cd38e70598e4135419741c4f7eacf0", "a83fc450c124b7e640adc762e95e3bb6b423b310", "3813d74ddf2540c06aa48fc42468bd0d97f51708", "98cfbe37a68406ef194354de7e5ea453c4ea9adf", "3ef1db04e9f4d3c986d3eef5216562c844e9faba", "755416b8d2080f5d9e894130e5115a471e9d8793", "12811f1dc14c9377903d4c814e112071118071a5", "2eb1dc9d1cf571462f7bc616b0dc52c8e402e331", "2783efc96a0d59473e4236ccf1db6ed7e958839e", "1d6c0e7b9591ea332671080df2e53119a34c90bc", "c872d6310f2079db0cee0e69cc96da1470055225", "76b11c281ac47fe6d95e124673a408ee9eb568e3", "41199678ad9370ff8ca7e9e3c2617b62a297fac3", "8c3a7bb72c59628e9b7982d90525ff9654854946", "8ad0a88a7583af819af66cf2d9e8adb860cf9c34", "7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b", "a7a0099caf89bedbf4de1c61499f999ea4fc7d98", "e151c99b5e55bfc03047a2c6c2118cd9e4ad829b", "187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b", "3a0425c25beea6c4c546771adaf5d2ced4954e0d", "85955fe6cdf4f9f35fc9eab6cc4fccbb819e68a1", "441d532b4207ca57b37eb210a79373c556d90846", "969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce", "09e63de98c7551079486f66bddb62a253fc596b7", "1188c925d90e93a205c5fc15d11fb2ae02660f2e", "7e9b6aa4909042d5f1faf074c50383e465e4508c", "79584ed8638392e253ffae2d5ec936239d088285", "dbb9601a1d2febcce4c07dd2b819243d81abb2c2", "60e2b9b2e0db3089237d0208f57b22a3aac932c1", "2976605dc3b73377696537291d45f09f1ab1fbf5", "5793b25e2492d47f5faf9b93b8c0fe36802de8b6", "488601935d69906f925381b92c7ecff069a79dbf", "1fe59275142844ce3ade9e2aed900378dd025880", "4b02387c2db968a70b69d98da3c443f139099e91", "96f77524d0a26c27775162b1474915c1452f346f", "aca8c4a62ed6e590889f1e859d7bc79311fa6f4d", "87147418f863e3d8ff8c97db0b42695a1c28195b", "85dc159dd1eec52147b24f32f8ddab135abeb8ad", "a0798a0a422520241cc02282946882dd1ef853cd", "9a0b2816f6f1ee04b54c7bf8269c5f2f31049f0e", "1b7f9cc57ab8f3f551bdb0d5f153191ec403895e", "5da43ff9c246ae37d9006bba3406009cb4fb1dcf", "5ff63170ae8eb78a055ab34f52b33756ce2b738e", "332339c32d41cc8176d360082b4d9faa90dadffa", "259bd09bc382763f864986498e46ab0178714f58", "bc93a5d09aad16356808843338bdd34df6b5b01b", "1b27ca161d2e1d4dd7d22b1247acee5c53db5104", "58303432a25cc86bfe9c77cf4c04f91695a24304", "665f0763ad7f320cb59fcb6a745906d3d6799d99", "70e14e216b12bed2211c4df66ef5f0bdeaffe774", "6c97af4c5d9908c288626d833818d7095f635765", "6dc784e98680f417d8dd1a78a417b8ce803ec143", "e5320955580401d5a5b2ae8b507e8f0b47e08118", "00e3957212517a252258baef833833921dd308d4", "3312eb79e025b885afe986be8189446ba356a507", "34786071f672b55fcdb24213a95f2ee52623ff23", "7aac397df8109d8d9ad55cc2c1f9a07589d58d3b", "f8e64dd25c3174dff87385db56abc48101b69009", "24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852", "188d26a005b6aac1448b9c52529b93a186c33685", "e8096f4f625441ddb4914b17d1b9da3f80bae92e", "3947fe473d8cfa443ea4cf6571d2aebe7b2066b6", "88e2574af83db7281c2064e5194c7d5dfa649846", "6623d8efb11bdca7348249c357902a5527a71e84", "e58f08ad6e0edd567f217ef08de1701a8c29fcc8", "0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f", "988d1295ec32ce41d06e7cf928f14a3ee079a11e", "e883cf759c3abecf59bf9f13053b1eb59bde01a6", "b908edadad58c604a1e4b431f69ac8ded350589a", "ad8bd7016132a2f98ff1f41dac695285e71cc4b1", "a6270914cf5f60627a1332bcc3f5951c9eea3be0", "a47f834281c39b1b851757b807c92f43dc975206", "284be8be0c6bedc36dfe43229bc84345ab0aedc2", "ef3eb3b3e2285e964b385c015caa0f8a24057f0e", "a70fa8af52e4cc32dae09e6e753f1dd3ec198327", "68aea17b80e7e98245a8717cbce01bc229b0f175", "57fd8bafa4526b9a56fe43fac22dd62b2ab94563", "46e46dffe4f8b724ec51179b3be1ae321fdb2d39", "e28915617dcad57c84f5feb2b93763548a44defd", "5e2b918f2dee17cb79d692e10aa2103ca9129e2c", "8d8d333eb194bce847a4bbfc85fe332643622a34", "09fc8a48b2a9746005b33743fca9bc8b17c724f5", "2b507f659b341ed0f23106446de8e4322f4a3f7e", "ab540c5be9f7ef688d3cd76765fcb794b92531fb", "7f8d44e7fd2605d580683e47bb185de7f9ea9e28", "6c0e0c3e66622023c64c664c3411a6fe1c87d5c5", "a2a200fb7cce52a3394f15def028ebfad3dcbe1d", "6ad5a38df8dd4cdddd74f31996ce096d41219f72", "12169ff906633e486599660ebf77dd73060640b9", "65146d19b4a16b16101fac739cbbf1608179c81d", "509b6ce763a1a594e2370a3c4f4a14b2975cc6fd", "4183d1b79d54f5638063e6c59a2a873ee2cd1bed", "397c395aed9d96aef064b9ceb9f0eae9421eb00a", "1bd65302bca0c1a593490088a0ce85988f3cc90a", "ece02507e17c7e6a5ce4d58f990f3e01c6555aa4", "43d4927f5113c5e376ab05d41e33063a6d06d727", "8ac70b37a44674063198f562657887347d885da6", "41ed93fd97aa76b4abfda7a09168ad1799f34664", "87cab840df202609bfcfb5a9ee3293e61c7c85db", "b1d89015f9b16515735d4140c84b0bacbbef19ac", "5311ab5db9e221f123c983efe58f18b6989b4230", "dcc4ee602d3b0ea9baee81dd7c7886fb19ac1e20", "e7a8549865978b478699647bd259f71c516c4479", "257eb6d5ca49eb4ea90658a8668d1853d9c38af7", "9e1c209923b320691eadff40f96fc83b65521b29", "220ce8b5151900e994b79ed515648892ff6c5368", "925811b9fdd6c0d901bdd63245ead6a781f38bcb", "4dea287ad9271d4ac73c58c03b8e6e714dd2db6c", "6c896ca9bafd7479c8291d0448e2910117ee059f", "645f06028ef15c6ba8005d3508805bd2d61f25bb", "af3de028f6f2369a6a7bd42147d99dd683c8bfa0", "0344631fa49ba51a2ba25d96113618bff08cabdf", "27123e0e9743f27fb25e4f1a325a01c89240d0bc", "a18c8f76f2599d6d61f26cb1d4025ea386919dfe", "322cba6843f05ef7d4aa1a91166c4babcfab18cb", "a290019f7125f6ebdc0dcec3b03b771de6905dd0", "a65e953df1dbc007862f8eaa8c12ceb225d15837", "09aaabb99e0cfa932342b72cb4fcdbb2ce3aada8", "1868aeb7f13e64ebc78869b371ef321572d6167f", "0d076edd62e258316bc310fafcec88db3ab85434", "af2c7b9adbf898b251d3d5d0659fd21fcd0197ba", "9e5378e7b336c89735d3bb15cf67eff96f86d39a", "6d3d8c986eb9e9d47743e6bbcc4c34c536feef39", "f83e563288e5d7a54444bbcf28a28a37b72a0644", "4f77672c57a4fe2d925aea61cc54ba48220816a6", "aaa6fe8045e1a071e1762cffe4f59e0bd508daf9", "6e61641c9a9cddb38948b6600c0ebc3d2057c697", "1bd1645a629f1b612960ab9bba276afd4cf7c666", "1978297fa32ca39f57f450608a48a19048b09270", "ec26d7b1cb028749d0d6972279cf4090930989d8", "390cc673792dbf47939f621aef5bb774ca01dc46", "18fe745e0840b7b086fb7d14850a95ebbd5ae57b", "46f04ef2bd5bcba7a14f32ca712efc420152c825", "e0a5db6594b592d46dbbbeca28a69248f0bfe421", "28707ee894ec3d9aec8c98887dabc6aba35cff82", "7fdab95ff454a900c710f464c1129cd173059912", "82a2a523c4488c34b486c920046f4ebbf8ea828e", "77c81c13a110a341c140995bedb98101b9e84f7f", "a308ad39f3cc25096f493280319621a25c2c7f46", "2315371408e02cdff6f54359f159f192009d1600", "f0864a4e2f7dc4b3bacc36a0617a1860bcb6aba1", "73764fa9bed84ad2c932dc8089ace7fa8fa7c1d3", "9c09b9410da8b1c5f0e3f6b65502160734214782", "dd6a34ee3408bc8844f6fa388e0e7287dfc03c5f", "572785b5d6f6fa4b174d79725f82c056b0fb4565", "318f7b59fc22d6326f77b24939860b0137bf8e77", "659fc18b1ec79a7437e6e7b1dce145d423e82199", "0e55a3a498cf7df0b980de1980c1e8c36fac1f27", "54d7e756a8f14d01a557fe87f62cbf90e905b04a", "9043df1de4f6e181875011c1379d1a7f68a28d6c", "a5ec50c3f268ec68d49acd31e850cb69b65c5a5a", "17e2c45b833bd06edcb8c50be2360b713ef8a8ea", "25d48ab3b05bf299fe61ed6580674e893f08380b", "05e6ef04116fb096e590d73d6938e4fed6426263", "fd23502287ae4ca8db63e4e5080c359610398be5", "363cb83220451baa9f785a1fa738e41178e015c7", "3b304585d5af0afe98a85d6e0559315fbf3a7807", "1d630cc482f7a261738eb8b3b2021cf27c38370e", "20ca3dc873d7c986d7b1b233fdcf85e78b92914e", "03ae36b2ed0215b15c5bc7d42fbe20b1491e551a", "0b2d49cb2d2de06b022e2c636e337d294171dc22", "e56b80cf274933e3441a32909b949a4f56e18721", "53822d61e829ef02a95a6c89fea082114fd3e16b", "b3004f4ddb8948a081035dec9c4f7f40205926ab", "860cc25e1cee40d70d001180ff665809c6e36594", "14975c7487dd4ddc85568e4b114ff162a334f66a", "585dd98414d548ef7c19b250ee12c1bbef9f7060", "e55f2e7f72ab8f5ad0c0372651f35702c9d54351", "5f6d110e5e098a3ed5a68c688a89db733ef965bf", "f7040d2109cb42b373b1785ccb7a03faea824873", "d09a3ec68311af4ebd3cbf7ed906aadffa94a464", "e0739088d578b2abf583e30953ffa000620cca98", "e9ac109c395ededb23dfc78fe85d76eeb772ee7e", "8dea22172bd3008ec3c8008bc6edfdfe1e33e439", "09edf114f8764c82713f8dd35b1b32ad83ecaa17", "003021d47a752fc2d8bb8555643cbcd6361e74b4", "fd206ad103fcc58dfbb84779d09cc94546d21f42", "02ccd5f0eb9a48a6af088197b950fb30a8e3abcc", "135de89f2fcc94b7e7db7cb21097855b66c90fa8", "b5f7b17b0feb3a1f3af60dce61fd9a9c6b067368", "18200e8db6fc63f16d5ed098b5abc17bf0939333", "b4ee2a6b5fdf66f57e94a998cff2acef4af7d256", "ce0cc5f078c5224b9599caf518d74ae3023be0a6", "b2a6518b47903f5e4318f31c099bbbe8f2425ab9", "09e5f2f819a21162d833f356670a140cd555a740", "bd88bb2e4f351352d88ee7375af834360e223498", "e9d89ff7c1bcd610fa8eb44d86598ab5a14bb353", "f63b1aecb09cf4943a7994e98ba88ca99647d4e4", "9851429995e617d5726b80531525ed80a0fa1e1a", "11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de", "4308bd8c28e37e2ed9a3fcfe74d5436cce34b410", "811c4c1647bbe6c048b61ab3611bd10c2d1f5ebc", "59e9934720baf3c5df3a0e1e988202856e1f83ce", "8609035f1b9fa5bddfbbffd287a98ba47a1ecba0", "191753aa338f24bb41f7bacb4326e0c0a1b90459", "97528579d713b7d656b25bd0cfd1a84b6d004c90", "3eb5f1d466228c1345d92f906ab31ab93c160837", "79e1b9e425621dd5a683026b7158479c10f6780a", "8455d208f43ec69971eabfcb28fddf64c3c9896b", "be9cab9e9040b667e7902a4d9fbf1a358b350d60", "dab8b00e5619ceec615b179265cd6d315a97911d", "782ac29bd3aa298a61884154d2e342d08f66bd69", "4b152bd834840fd0c01b9af8c68c6518d0e18e4b", "6155d504d59c52dc3a6b8ad6aeae8bf249afd5ac", "330bcf952a5a20aac0e334aad1de4cd6ba6ed6eb", "fb0aa3debfd29cf867370e83ca780b8428d204f8", "8b4a10cfa107f3c6546caa32e5012d342d02212a", "1152b88194214d4ea0f85b727f4b120915ad8056", "0eea197144b631b33857821559886b6ea063b68c", "b9e43abd1f798f222dd7bf9caf8b597a0eed6316", "bbc76f0e50ab96e7318816e24c65fd3459d0497c", "30aeffe0d77e85fcf2488eb9947c55b7cfacfedc", "7147154adab91d815ea42eb0694d139e3fea2056", "26cfc1cbcb58e3fd33dd3f04a4ee3e25a12d358c", "ba6aec7f1b17833e7983809926a90951469f5520", "ccdf24d85fc14b4710dcee268355548f166ba870", "29fade91a56b540f33c61da32a114adc8c08c447", "78c9a63be8e07dc6acb90f4fe3f06821719eaa34", "8061564db4c15aed794f9e9c4f624757101cdd56", "a9d54e40407be7d8e496830e75a5b6020461ce4b", "12fd9a2349446a47d50251db6f981a12330f5ac3", "77851ca35105ebe007d99e5d78ceb3473491071c", "d39cb62e58646e953f5878aec8b77e66e85ecedd", "e3c420b29b8590442decd330ef70494c2209f149", "acf0db156406ddad1ace2ff2696cb60d0a04cf7c", "4562272025a5bcdb321408116c699798a7997847", "741e2682b45a3dccab341cf272312a3c75c4b49a", "650f4ccbe7d4aa49ae80e246df394ca6c60894ec", "1aeea66eae8fc452631132785edd1a607ee021cb", "12bbd57ce427a9f847fdf4456eab3bd5caeb5891", "722bc99b2e6a48c997dbb13a819f6da58539e543", "5b6bdf478860b1e3f797858e71abd14f98684b61", "daaa90ba7248d22d655479431dbd3edf349469cc", "30a059872d0fff3442504c24880c93738036e6aa", "4be63e7891180e28085d03bb992abbc5104ac446", "662a88ad79e390c2436ee124f03c5b5763074498", "caacb99b5cd40992296f914a3bece80059047400", "790099d124b798ff055a8ab3ec49bfb5d2d5350e", "1a54a8b0c7b3fc5a21c6d33656690585c46ca08b", "b41d14ca81ab76f8528277e65bd2f57078796779", "f72f6a45ee240cc99296a287ff725aaa7e7ebb35", "4dd72cdafead8a98dbc77a1a74bd66ffb90d3e01", "fb3af250a2ff85145519fea9ece7187452d02a50", "9bd973e64750a94dcf528da402b39e3a53118312", "ab9d368b2ebcc34e38046de49437b7bb224c5b56", "7442f8ccba3a5810ddb4b4d10f51fb93af7895c9", "dcfcc635b299184bb96a020ac6501e4b14fa0674", "2a86bc520586f611771c2052b50ac52239414dd2", "d7c27a0b9d21480d34d43b58ec1edf40f1181747", "75a66e636021bcfde447135ba9a9ed893d3bc436", "0d49fcaf53ac1f3b5fe12bef92c9035a36865802", "6358b95b1c97df4f10f57a90913f672e44d2094b", "82d9296eb2edc12f6cb830fba78d5bf9469a94b9", "9bc9524aa6f3742bc811f294591a60d90ee62712", "8b06d12795c1714960cc0db17687777aad28ab42", "0949e3a8b5e70d47f07dffe3aebb1a709f283199", "a6e039f0b4f586c2014e42c36ea173e249636f28", "a27c7afac5a34141ec5415defed6d4d85325230a", "432be99dde7d93001044048501c72c70e4ea2927", "993acefc2e350f9661125bb74df136e2b614ea23", "c1aa52ad21d0ec20102eb5402c60ac91c49612bb", "470167a92227411d95b1c8687b12bd20e890c146", "611f9faa6f3aeff3ccd674d779d52c4f9245376c", "f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4", "89a245eae1e7eda7aa8e360c0cdb4bf6a72da225", "2dc1d97001da2fb71524adcffc27492726ace37a", "54509dbe70cd3015007bbd5fa1fd8793b388319e", "c8f035510b72b84c21430a887ed03c8836eeddc2", "5d1f177964e767ac5ce7512cc33fec728096bd07", "183381620f30e8d03ad5eeea091dafd8e0892cac", "9a52f7d3d4952cabfba7d88da980371b8c3e2e37", "95cfe8da0d8225c8f6304713719846a7716894cf", "3e940fe776e387a1c9c9fa65ab5cd7fc22fb4549", "715216a92c338a3c35319026d38ed0da0c57d013", "8a37b56c92fa18c1129ca029935db0e837b73675", "ac7031769c08423774ae4346f7492f6814176268", "0393723dff4c00262c1daf34c26d27fa6fc52ab6", "1e94ae83bd49cc9c6366a7f486ed5956e5fa0e14", "1f5e47ad5490a63c7bea79000999b711055fbf2a", "0a20e2fbe52efdb794b7566ce5233c41f4c5efc9", "4a691bd830cd0fdbb4a13ba91160e973386250dd", "e3c5c5623af4b1a1f719cac24850dcaa6a304bd5", "342067ae1d5b52b62c2f31b1426bad933ef90e38", "fd2c82efc9a0c84d306fc47b4cfb7e75e4174d46", "4a8b21da621a33b805906140b08c699a09ef06a9", "13f07d51c073964d11f9af6463fe3ffe5475c393", "24cf50f79329c29e1fff1f83a9b9daa6130b9cc4", "19d1b811df60f86cbd5e04a094b07f32fff7a32a", "c9bbf31afbec278ca735e91cf5e9c70dd3aa41a4", "b92f276ecf9077f7c09ce410336f8b9a819df4fc", "e579ae31f4c2a20490a1e774b1b959203be0135d", "95a835cdb5dc46e4de071865f9dccdaf9ec944ad", "e7f268bfe0016412db538b57300d46d7bd728867", "8d3114a3236ec9adabcf0c40613a23f00c272a1c", "2f5d44dc3e1b5955942133ff872ebd31716ec604", "1ab7d8da096c418c0bf93de14d128eb008a92db4", "2a0623ae989f2236f5e1fe3db25ab708f5d02955", "2a6b48e5c972355c9fb91988b324c2faf6bb1c5a", "f854faafdd6c4f4144012f1de26573eddabd040c", "33bd8406c5630e3651ea514d833da8396d7a5c03", "0a7a7b3f05918fb4fc33f04cb7e31232fa197f76", "4fc96ad5c9c0155961ace769f3a73b728854fa98", "65da43d70b16fdb32b182e4ede59fdb9dc3f3350", "8bf7212a3ccaa93fcf202654c8e123a62bde71d8", "6643a7feebd0479916d94fb9186e403a4e5f7cbf", "1d679b371c9dfd833cee0925de483562d2bc7d88", "c09032896722aa35a905d8905c1cfe67cead6e01", "696bfa059fcc459c30af21c84d116ad77fb11197", "46df854f57b6553b4b3238779e46bf2a3a3fffcf", "8a7bd4202e49fcdb947d71c9f2da0e7a953c7021", "3e0a12352fe3e9fb9246ee0f81ff7fbf0600f818", "a94aac3caccebd82413dd05707ef8bf525dc46b9", "1da1903050c8af71c7bdb3cd608e1182826ef72e", "d925540a8cdedb92c7f20ebfd9b8baf36fe6caa4", "40905b69c5d9fe95a25de37877f5045061c61a20", "40ee38d7ff2871761663d8634c3a4970ed1dc058", "d9434e64e880640243f5257dacfdcc0355a577dd", "143ac3b7338e240b106863d35177c4567ef9c1aa", "286a5c19a43382a21c8d96d847b52bba6b715a71", "d3b5a52062e5f5415df527705cb24af9b0846617", "8544cce3a7f85e4952acf51c464f1fce15250760", "0b0b0d9b15613a6e3c4f9a4dd1c17c0313ca4303", "fef89593599b78db7d133fc6893519b3ee8ff8d2", "a1e198454bd0868b4da9bca7a35218dd235cfdda", "f25aa838fb44087668206bf3d556d31ffd75235d", "e6d6d1b0a8b414160f67142fc18e1321fe3f1c49", "a40f8881a36bc01f3ae356b3e57eac84e989eef0", "c2be82ed0db509087b08423c8cf39ab3c36549c3", "e69a765d033ef6ea55c57ca41c146b27964c5cf2", "1d0128b9f96f4c11c034d41581f23eb4b4dd7780", "9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb", "713db3874b77212492d75fb100a345949f3d3235", "c858c74d30c02be2d992f82a821b925669bfca13", "b48d3694a8342b6efc18c9c9124c62406e6bf3b3", "dce5e0a1f2cdc3d4e0e7ca0507592860599b0454", "397085122a5cade71ef6c19f657c609f0a4f7473", "1966055f13a7475100d18843f85717f312511805", "ba21fd28003994480f713b0a1276160fea2e89b5", "9993f1a7cfb5b0078f339b9a6bfa341da76a3168", "64cac22210861d4e9afb00b781da90cf99f9d19c", "4e3b71b1aa6b6cb7aa55843d2214441f0076fe69", "8c048be9dd2b601808b893b5d3d51f00907bdee0", "ab7923968660d04434271559c4634790dc68c58e", "37381718559f767fc496cc34ceb98ff18bc7d3e1", "d140c5add2cddd4a572f07358d666fe00e8f4fe1", "5810ce61fda464d4de2769bd899e12727bee0382", "f1a58bb78149f408471ce166a13cd9176e5edc5b", "5721cd4b898f0e7df8de1e0215f630af94656be9", "bab2f4949a38a712a78aafbc0a3c392227c65f56", "ce9e1dfa7705623bb67df3a91052062a0a0ca456", "11ba01ce7d606bab5c2d7e998c6d94325521b8a0", "b87b0fa1ac0aad0ca563844daecaeecb2df8debf", "c41a3c31972cf0c1be6b6895f3bf97181773fcfb", "b07582d1a59a9c6f029d0d8328414c7bef64dca0", "ee418372b0038bd3b8ae82bd1518d5c01a33a7ec", "8a336e9a4c42384d4c505c53fb8628a040f2468e", "696236fb6f986f6d5565abb01f402d09db68e5fa", "f61829274cfe64b94361e54351f01a0376cd1253", "708f4787bec9d7563f4bb8b33834de445147133b", "a40edf6eb979d1ddfe5894fac7f2cf199519669f", "b7043048b4ba748c9c6317b6d8206192c34f57ff", "ef52f1e2b52fd84a7e22226ed67132c6ce47b829", "4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7", "88d63a0cc0b8a5303bdef286d6df118bb1d44d26", "f354cd137fdc40a3ff6a4004f2a052966c275627", "7eb85bcb372261bad707c05e496a09609e27fdb3", "80ed678ef28ccc1b942e197e0393229cd99d55c8", "74ce7e5e677a4925489897665c152a352c49d0a2", "4f742c09ce12859b20deaa372c8f1575acfc99c9", "530243b61fa5aea19b454b7dbcac9f463ed0460e", "ca83053d9a790319b11a04eac5ab412e7fcab914", "f0a4a3fb6997334511d7b8fc090f9ce894679faf", "30f6c4bd29b9a8c94f37f3818cf6145c1507826f", "e97ba85a4550667b8a28f83a98808d489e0ff3bc", "228558a2a38a6937e3c7b1775144fea290d65d6c", "6ba6045e4b404c44f9b4dfce2d946019f0e85a72", "1fe1a78c941e03abe942498249c041b2703fd3d2", "6b8d0569fffce5cc221560d459d6aa10c4db2f03", "9ca0626366e136dac6bfd628cec158e26ed959c7", "dee406a7aaa0f4c9d64b7550e633d81bc66ff451", "6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb", "d68dbb71b34dfe98dee0680198a23d3b53056394", "995d55fdf5b6fe7fb630c93a424700d4bc566104", "68f0d2b41b1eb4ea6b5e841c64f48b58b21253b0", "992ebd81eb448d1eef846bfc416fc929beb7d28b", "e01bb53b611c679141494f3ffe6f0b91953af658", "93884e46c49f7ae1c7c34046fbc28882f2bd6341", "456c8c8ba65fb933166cce1699a2d12a37f60233", "6a13e4a294115c439063617ec31d26f156e1142a", "352fa54953cfe0da7f1547bc6fdc43e0e53595cd", "e44b644dba198a4f8de553c9795aee77c0d23f37", "3bc9d71a7d1d3641a3ee1df46cc45ec62ffd2020", "4cfa2fe87c250534fd2f285c2300e7ca2cd9e325", "3be8a8ddb40399f1b0c02156440167152f8b0cba", "39bbe9885ad1e12e79bc620d83f7768d2fc04994", "c4be56287fd666f9cfff257018a42e00dc56499d", "c3500a6300ac14256fc836a53eb9c56f007cc61d", "02d6df5060281cf13fbef68a8f1ddc29983fe8b3", "ca22c95ccea3e5ceaf95956811cb507af1bdd672", "c694b397a3a0950cd20699a687fe6c8a3173b107", "00e8968c5922b71bf3be2e9733fce82f3c40cf44", "77addbb49abb80ccd3ebfb5b6f2d3b0687ce90f7", "c7dd846c0abc896e5fd0940ac07927553cc55734", "d5bef023a7d1032a5c717109a9c1b600ee1e8a71", "8732d702aeb08e9c604b36dcaa5933aea91a228d", "fbb6e707c8a5f189d8ad416597e23671b884448b", "de766084610ee5db289e94bf2865369d6bb89444", "4cc0bcc342647693c21a0ca2cd1e4064faf2fb47", "bc99f98b5f1fd158cc31d693061c402a36222dbb", "19e4c7d3f3b60235848fdf1e2d23f6fa6f5b6586", "470b89e2c5248eb58e09129aa9b4d8bc77497e7e", "6414453e462f1a022302bce98cadd8a817629521", "4417258e4fe9e60d044a72197cb67471272991a5", "e810ddd9642db98492bd6a28b08a8655396c1555", "16f403ab388c27768155b466fcd828b4ad2a41b9", "59d10820e0a04d2d1acc43bb18a76c52e9946721", "b33e062f36dec4e49558133426b50c2536cb0a1b", "d3fa2f9b41e43a9e7ff3f940eabecd98cdd0082e", "01e5c95aa20a44eed21b5037697043e57f606f3a", "e1c0e8f58fa035048f7762e1ec9780c37be2af62", "886fc74b943011ce5ce192ff98d6ea9dcac7ef11", "2845cc51262f3af6aafbad62690a23e9bc847b07", "5c48f97a8a8217025abafeababaef6288fd7ded6", "c7fd727cc1b68a611bfda0e273d2524007651233", "94e008564e4f091a887fdda379e7d26d90920c54", "44bbb7cd8b3cba9c00ba55746867fb29df59102f", "928d711b592aca0635e3b4717f7cae31f5aaa9ce", "41915a85714bbfea53af9e65abc01a7b54e5cd13", "38cc2896058131e4656443aedfb1b9dae61b99cd", "27276945ce8b103b2341729e99da4e76acee19c6", "40ce2567ccc2552287f8a1c25e9f6086efa6bf8f", "1412f4024babbc01b671f7ee4a22d86db1545268", "426b47af132293e9ffe6071a3ede59cfdc1aa3fb", "fe4609fdf8fc8ea18204ffe673c2b06acbe8d0fd", "d2a8779b4f533e1dce709ced55196ef28e9a4c90", "5432392d916e730c53962be202c115133e6d7777", "4ed4b1fbba79a1f02c290ff36ee8bc4d78247bae", "a8e5d204549fcf93c5bea88b0f99a2e4da9648e7", "f0d18a5d205c23d1309387dfbd4ecfbcf3b1687e", "4cd85c27016362a23936d670b8b0cc0abcfe5a8f", "c2fafcbbf334447e8e3a18a2339eaff63ed2b4e3", "9899eb0ae24aa8c992244afe5f4455e9f96c1f18", "cd4cce724c8a33f72b068a267cd6152c31851013", "304fa45de90874e89b7a5511c88551994ea8c89d", "64753fe167a46208e28237fa98db8daedbef83e4", "bf42000d04efceab3f0f799a9b3f2058f91cf3a4", "3af0400c011700f3958062edfdfed001e592391c", "8fa290b5d92c1f427edb62d29988056383e02047", "45db5e4f5da9d2020b44fe2c0f0040d1142fbd3d", "167f0115862b79a7558fbdbb899458ac29ab63f8", "107dbd2ffa3bb26786ebb7bb57a308c7d1f4dbc4", "ca283c6fdf43b7ad59949207834a6a573381a9c9", "9e594ae4f549e0d838f497de31a5b597a6826d55", "f3e4d5b5d761387313a002a06aa66de625362987", "231e545fdb1a516e29604fbd740e207b6f25c7dc", "555b332252522fce0f31b0c0b7630cf4f36ba0a5", "6316f0adb6e2ebe503b7fb623ab28d7b47fef5de", "0136bf1d3747770a7fb4fcdeaf0b4b195815ed67", "4c16fe03bb96328b715acfe40491a90034858800", "f0fc82cabfbb7d7a8505ef1f78becaf179b9d72c", "87e05a26fb4c45dbe2b0b10c8ab20e7662d46912", "a10a3efbd1e2b6f997bc0a64ca0539e80f81ec7c", "710ce8cf25f31df8547b888519b414187e989257", "07eeb8f39f7d397a2ab236ce830c3b5c19adf9d7", "132781c1b2495ff0e792b46b94fdf33867394e4a", "5cd58501fd184a0fe5c05026ba1965ad12e68205", "a2e7f4e97013239c8d25c9f4eba71ce313ebb1bd", "7bc8d81a38899b60704681125ec4fc584a3e7ba4", "61e97d8440627bdc9772b3b2083c65f44a51107d", "c72ac3dec0d0b2d5ca4945b07bd6b72c365bdc13", "9941a52ef4db2eb338eec061a950af6a95f82510", "1750399579e616cc3274b82f0c3c10562573e2f7", "24486f70e0fa7a44844adefe352b18aaeb04fdb0", "161eb9ecc119952c137959e87a796da0f3c62cd1", "bd5c222323d6b46ea71f329cafe11d38533f6f3a", "9c571732af31360b79cee46b1809d98a42423dc1", "76c018c6dfc81f61c3912c5ed442d9a72f64e467", "00cb08dcef72bfaa1aab0664d34168615ac6a5cc", "0b8ef6f5ec5dfc3eded5241fd3d636a596b94d26", "9a7b7d61481e3a5bca1ef809358d46ac87405f67", "19a3374ac2f917b408b4bcdca33fc9e9fd7ff260", "30f49d6595359a4a18c728ec83f99346d1e16348", "50984f8345a3120d0e6c0a75adc2ac1a13e37961", "908fa5bd0cdeda8b321b8b86a17ae44c1b0983eb", "38d9edb98ca01b0b8deee33b4705d8c9a927375b", "2a8d0125c8d27699ccd75c76bda774e065060709", "324c91551c3cde44bbcb9d97bc14db7ca6d31850", "9b2607d4a8f7252bf13628afa1b5e5cb55ca65a6", "0b8abd8f45d161d2e9a2b2bbdedd4e103f2d0efd", "303be881f6cd4907c5e357bc1bb5547d8ea1da5a", "15db3bb041ee06a369f0cd478369c75618a35387", "9af4d310415afb925e157e7120b7aa596298888c", "9016d7e5461aa3328efcfb74a7624487c4db2ffa", "8c30b154811453b6a1017bb27e3becefde44f689", "06f2df0ec9ab6968411e34f581dd8f5d40500d7f", "f2da70f632db70eb42cf5bc5e2428f4bc53909ad", "c61a5961a344748272fe51ddf4584b22d9c10cde", "8b98aa8be775eb4618779a682ba994b36adba24e", "80c80df96ba7e4df75802bb8710318746cc5d9d0", "080ffa4c89bf85f9610f413350199b52898a554d", "14151dcee2b86469eef5d28036ff068eeb428064", "e0d04853f1d1323130d165a69f7cc939986598d2", "cdbb1f8c13000441c5dacd7dc26774315818a09e", "9be3bd737c1ecae5fd895583dfc0613926abeb6f", "cfbffa4d143a72476d962906e413c5ed6306b09c", "ed732b3a1f8fe733686a35688b090f426d018f9b", "a951f9b3aa95fe53cd9b19e15ebfdbde3fd5af62", "7e2a443cb069f1e3b0d7c41fecf55774ac584895", "d4e99d6f9e91fcd58c9fd00932d1197a9e03d08d", "f780c064fc7a3ded0412a6eac184ada57338f650", "3c917f071bfc1244c75fca3ceed0a8c46bb975cc", "06a33dfb6365762c2f83f3ab975d982a27b8dbaa", "69d9b79757d76b73ed940754f4d05288b76eb8c3", "5ccccb8df66e5b2b88211ffc7335d356260b801b", "c34911e9fefd987470edf8f620d9ce8f0030339d", "21bdcd9be2e9e75ec1d060d8d748a372d9ced230", "04a1f6d15815957562932f030ce7590521a27763", "8f48b2da711417d1f1f39069501577c84abb8d37", "93e16f85ce6468f25d53b4bf03e7251012c464ff", "e8d2d991dcfb12b287ab06d282a86802e565780c", "4af997701ce14ba689f7f964a72bcae0a2432435", "d4e4369babdba158bfdce1b605f92d6b1b665be4", "e37f0b9dadc0bc6dc56ab0fb2c348dcca436bcc0", "301662c2a6ed86e48f21c1d24bfc67b403201b0c", "c563a1a197e8e9b5119063a8fd57fa5a7ca0da03", "2fcd3007f197fa2c799978162d49598c4180ae69", "c64ab3ab8b8276ad6ea9dd117ca42a2fa0efc8db", "db20827ca6343eb46dfe9a207248c4d50f1d72fe", "3e50ee66e4bae05600f38170847b1d945e2f99d1", "7105585fd49ba914e980c45cd72dd2cfcabea7c9", "5836c6e12eeec6bb14c71189f30a24422847a694", "344682f69dd9bec68d89a79b0b7f28a3891ab857", "bb1a6080072bd54eaa5afa1d29cc02525946d7bb", "cc78b2d4d0f756941356074bb17e8e94f7108eae", "bd0d93e67c0b439caf372b704a377670f0c89be8", "f650aa86c7ebd31fa079f4db62a53b12f1b5d61f", "68becbe61cf30ef93b2679866d3a511e919ffb2f", "e9877c4fe5df59bd4e705216ebc7d5ceb16eff60", "d31e827d7570de3088f7ce582a4be2dbd38dc1b0", "f412d9d7bc7534e7daafa43f8f5eab811e7e4148", "9c62f4b09ca590f74c75115184fc1a9833625edc", "f2a90a4b597e440182f722e5e1e154d9986b1e90", "872d1392408358b88490047651052c87ca754040", "b55489547790f7fb2c8b4689530b5660fbc8ee64", "9078307c58d74ed6aab70363a5addc054db7fd1d", "acfacced2dce0fb4d39907b3c3cc22b4eb63e7dd", "b0b628bda8a6c4267eeaf91420b8610400ff398f", "d37013e4ce0f5dd6b61a4ffadecc401274966602", "06ab50dccff619c58bb699ee182824b5dca65000", "7fe4ff1e2861830c16a44526cceba39e3677b22e", "8f5566fa00f8c79f4720e14084489e784688ab0b", "aa01add66c95fba6f71d1399d0d8667356d3de34", "c1bbcdf3b5901e3378a89808b07e53a502c295f0", "980fd3fb067215017af8d13381e1d95fe3a34727", "122af84cdd28be10b0a8d7a82b9946738e086024", "4a9086cf2637b7ea54855187b978af7a89bfceff", "ab567ca60fc3f72f27746b4d9e505042ab282ca3", "e00526ff149bd61f6811ba2f2145ed22d9306319", "94bf6b804dfcedb0bf6b0d5c711bb7fe305f3704", "22f7b7e6d4997a489bad794d44c6e02af7a1c506", "e4d2cc8fe567e8e1f2e0c5eb751ff9e9361346c0", "083ac08287af7df220d88dca2fbf5b1812e35ee8", "2e55fd3f5138e55250aed84a7dc17adfc34970d3", "72007faf3bc77e1d98d3552f36c0b6b74aa9e379", "71912976a7a4a5321b7e7ea20163fe3928cc5b71", "df518051c5d10da2c772cef2654d6612eae0198a", "b1ec55cbf2e9a6785e1f1f2fc060e4171ec88b4b", "4168fd6fd9e672223fefc9706596121d653e39ff", "945cd58065f923e3cdc46a28c2b3f0c22ebfca9e", "f2490341a21e54a800e5ac7a1c0a25b60f0e0c36", "1be9ee50f4d4f59b9761a366bba9127213dc4f33", "6872615b0298aa01affa3b8d71e4d5547244278f", "5eee9c417157916ee66689718af65965c423b2b7", "e75255911aa88fda7c0ce8b42b0ca2d2a43bf33e", "6b3ef0e49fac54dc76a8706b8be1f724ca6ae32e", "672f32b140360cf6051c1ea22851d1f9e2f9a153", "0a814669f4a0198e46a3a0d91a1bbb81bb089216", "07311ac342cbc736030bf698fe376ee64a11a3eb", "2e53a5dbadfd30b834feea80c365ffff3925eb76", "969ed0575736943c2db62793583f99365d10fbac", "272c6b6ccf144954a154b83bf5789341ee3f9ed2", "3bd8f6577bd4dab492f9a0836bee1d99e461f028", "040a3fce405aa203585f8037caf5631cd63d8cd9", "bf5064c9143fe2ec32abeff7351db82201372b7b", "0f5e10cfca126682e1bad1a07848919489df6a65", "18fc6bac478f069dbf35f1ebdf6f5d7d711872a0", "badef8089c6b1b4cd479ea406c6b7358b68d2c26", "0f42c64a74bc6e3e83821aa8ab5dd8e3a4b797cd", "a94c3091be2090df6144bd121e41e7dfa96ec0e9", "630d88e479046ef18e1b801bc37e2e1b3df85cc8", "8a7726e58c2e24b0a738b48ae35185aaaacb8fe9", "3412d9f3c620155bf3eb203f5817a310000f0c63", "8145ff6adab3397a5ac52cc62a7c53dae59763db", "033998b0ac8dd5b86693bd0d27cd3daa00459c17", "1d187e1d0e9eb874f85e3ecdb75ca0a7bd98d8bc", "f7a91f74b0f8ac03459770bf4ba20af58a72a559", "43de246e9cc197623e27ab41a69530a8d121c77e", "6d84d92d9ed6c226f0cc6401bc425a23432c9f96", "a4b80c31fb4ab8d5c648e809bef01823f13a8b08", "122cc9d15937d3d24a51a69e45440544d8aaaac4", "1947791685597368400ca0429695658d1f68541c", "8d97e0102b5d89c62e5c6697eeaaefc82b36c809", "40f6c9355dbf01a240b4c26b0fd00b5cfbd5f67d", "e06f6d6b473c8966309571931143ccf646ced981", "50894e607cd5eb616913b520c4e238a73f432b86", "f558a3812106764fb1af854a02da080cc42c197f", "e2ff4d1bbd6333763292d9f605855b14c27b550b", "fb748a6953e72ad6d508109f8d809c25570ff07b", "a9b249b42a64cedf166a49858cc94fe930f8a1b2", "fd212b37ebf5a287478261f31b214e69ccd37cc4", "f9028b47a4755a7349108b1dc281f13add5c6c12", "d83403dba5dd0f822f1acaced47b4ddc93e386b3", "2ed18139791ad8287b085c1539895d587800a373", "196258fd1c722574680a72ae8fb4cb5132ff7a37", "8d96fbc52ffd784dee573d44e0c47a3577fd0266", "743c7e1aef6461d6582cf8deeb5d518e45215f89", "06e15d0d6f92a11bb5b46b5a3e0250cccc452c92", "0e610db9b0626db3ab64cd7a8d2c4e49bfc48ed2", "50bf19a06915778a0bcbdef700f91b56258a4e1f", "60bd1d33d74619f08baf0d7477b3f8cb8fc711e5", "6d7ba173121edd5defadfde04f7c1e7bc72859c2", "25560ab44f2ac093c2ef22daae33b0dc9b828901", "381d15951b5beb2456ac016ac7f15fd27aa07d1c", "139ee1b1d98e7ac9d659a5d1bbe8c75588539b29", "b5872d6952a0a073491e845c2071c5b06d92ba29", "ca400e0c7a739ce5555b2e3eccccbcea65e71b11", "a45ec771ca2db81088c52c173eed9ec2022a8a70", "3e42e336d67dad79ab6355c02f1f045f8a71a18f", "7e37ac2a1e70ba2acdb9a563267431cf4cbd5eb8", "0a60e76e6983e1647469172a50907023913b0c9f", "1c8b08599961bc19083f4a501b7ab60a03ab972b", "b1e27fade89e973f4087ed9a243981b0e713b22c", "79f12f28b060221f3b80ea1b7b16779ef9362ca8", "88dc2b2f6d033b290ed56b844c98c3ee6efde80b", "8e7749f635b161558efa3e98a324e88c73e2b18f", "1875b2325b3efcb49dec51c6416f40862db4fe74", "ea251fc90da36fdbaf7be76f449a9e0dac1d42ef", "4fde52cd3af5c698f0807bc3b821ebb3a270a986", "9a3ea7c922f5df1af6b14df0f71f546243f2bffb", "21f5652d4f88ac039c58aa530328e65a39eb7b38", "cb3d38cd18c99aca9c2a228aeb4998f394c7b1b3", "c6d78245ab09c5690e483962dd51e0408fbf5cc7", "1df1aa9179506554744bf16b238d05ebd1e2d4d5", "fdd94d77377df6e55d14e41a28141dc241d8b5d6", "55c6cd3b3a0c0335de050468f55a5cc4bdc30681", "1a515f0b852c2e93272677dbf6ecb05c7be0ea2e", "85401b669a989da15bb3d2b37d4598c21d9d061b", "cbadf6b89571d387eb5f1d56ae5671ad16ed1155", "2eae02d59a3f455f3714ce674d85d3f073c9d7a2", "dbbaa5d4a5d04267e5be454624f8d3be8265fe7c", "bc3a7dcc237041aa4b0d70e07c9bc441dbbc9c97", "bc8e1c2284008319ee325ff7ea19916726235f55", "92373095869f1b9e93823f0bd16bb8527c1665dc", "9090d175a155c6d50dc534b404cea7927e94a198", "019a95631c49011330773e953194a0c73c61f3f0", "bffe37791ee7aa277ba6d7c5ff2cb9bddddea09f", "37fdb70003ab93267ee6c75a333cb62d9e4d0798", "bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5", "aa8cec9cec1f15f95bbe0ef4d7809e199de0f30b", "431e80aeee80a74f41d8af1336016340cd8e4848", "89f73328e509e3ab2df01481cf55cb53050f6343", "527dd9647c474490ac33ac5b0a19aa76b226610d", "77fe6c859a59ac4438794d38d018d1e3c02d36dd", "f06f3e1cef2d04af915a932e83b22e46a45f3b73", "2eb96e784d2b34ba56654ebd0f357f0b121f73cb", "1450b9f2e69e2a4d0400bffaa535712b5fbab562", "c495fdc195f30ca853fbdea45bf4e8b59e1632bf", "984d5ed1fa80124117fdd0aa9a5be69f269da268", "182496e9533ad3a5eef6a06b815a276c18eaea2e", "fbc5de479d74d4adc95e111ffec6a417a90bcb4f", "76bfa74a6311db5d84bad2a7a941f30dd750d01c", "60c06e5884a672e0ba3bf1d3488307489583b7e5", "23ca7c4367f7317c61ebb0574e3d04cfd9bc3893", "88132a786442ab8a5038d81164384c1c1f7231c8", "7a04b5db3f589ac857b51effa1be3eae7fa8dd4e", "c3fd3b9e0de036241d6e0f94fdc5364551e10b6b", "c52d7271abc7578ac47b6550dfda2540486f8320", "10cefc1ec65f42a25670067683c30d92873e7023", "32ffc4f2665f0061b556f60c4db0f3f5999ef004", "0449b56b6b19a3c42766962782bfb88576b5bd62", "baa1e6894024223a928aa00be698247ae253e7cb", "25d58e8c9a0c46d44dede888c4548479e8fee994", "a1e1e7e976c22af9de26d9b74c2ece282e20218c", "7d513b7c22d8e771ce657489bb8e515dab897650", "de46cbf18c7da9efc9368241463919e22230b0b0", "c1bf99570889a43ba2b16e6141b365d74608973d", "8aff946f5d678f689cc9476e48d8b122671205ae", "58c84f968b7b7d61f6073e1a8421ffad1597a46f", "ea5eaaadb8bc928fb7543d6fa24f9f4a229ff979", "30256c10cb7ec139b4245855850998c39b297975", "4bfe7037b2d92215aeb5e116988ade7e6733a6b9", "ac752e998ea646411438fd517c36e1e8c6507d15", "12c5cd899d5ed85741197baed191f3b8b7fac495", "45dee419077eb71abb6687195d97b0a7867c867b", "6e3d95b039a013666cb0544926e6cd2d4d2b8798", "d7ebd31d4616c297292a36785727f1bc5b470290", "178f9df87c6a5ebfd850695b0df026c8fbf2d287", "4780cece6d4adeb0b070fbefbd587b89f4acf3f7", "0e4dd115461e3dd608bcf809f4a8689b994fef29", "c0ead9bada2fb7cdebf7dadbc8548d08387966ae", "e59587d5a5e8dd6d66912bd06a12a2665fe8fd07", "0ab06a5d8c13470d2808ce5b12839c0659ce1d3a", "2744d19a3026a516431ad92f1b60a9237aa2ef6d", "7a00365f9c7bced9ce47246794932f60564cb662", "34ce6a2b0f4404ef4f2a7d3eb68718454840fb10", "d487d2f13590747f2a405e53bd137bd0f50ae4c7", "328bc4d5495723f9a1037660b5d9c1176713bf24", "b0e95789aadf2631576e4aeba44e96ad9d26d1ff", "9f61362052e7675b3053a9d1b682ad917ce0e3d1", "a2f69a94380ccfd463886d26f07c4dba791f84d4", "2a87f95e36938ca823b33c72a633d8d902d5cb86", "7180cb0c2773be3c15cc2737fed0fe19b08e1538", "d8f74df3b39204fe06c648796a933506d9980f5c", "f26b3a916aaa50fe6ef554fff744559815ccf954", "84a69f6357b137028e3aa51376ce2dffad5e0179", "619d215c2e80eedcc5a65c00fdcf5852f9cdedf8", "3caf02979d7cd83d2f3894574c86babf3e201bf3", "29a606ba5b9ae9bc16d05a832d4e54d769c63dae", "2fa04fc0bcbc92886902a62dbf538c490084efa4", "25e62096a44e3fe2f641b492379e7c4babce7ee6", "85489639f395608174f686d634d6e27ef44c9d77", "ff825a46f0a4e9f6ad748aeefd18f34f6b4addfb", "2666050601ce1a7281a9154fb781ebcb40695863", "ae60fccb686272d12e909c9de99efb652e0934ec", "91d94e1b92208b3cb5b0f4a1e48b79fb5851c381", "3064424c1abe01dd2f4d2c9022f5ee1312e3cec9", "852bdbcd091f48e07e9b989cb326e631e2932d7f", "db06ec1ac87394fbd66c16040c10ffeee4877799", "580130144d9a123c499d6e23fe0b934dffb00c73", "70428cd53b628b120128ad4e8f9201231e2713c6", "d20e7d7ab8e767dc1c170ca2141d8ba64a4d092b", "c391029d67e5a0c352f9f328b838cb19528336fe", "480810001ed845ec04a20b00461a8a82fcffbb52", "5727ac51ad6fb67d81cc3ef2c04440c179bd53ab", "2e02597b9a8239700703920d5b74f765576d6f43", "7e51a42049193726e9ac547b76e929d803e441f3", "0a3a33b872c84dac88bcd6f5bd460ef03584e0f7", "3f688723fb984bce9b60329f8f1ec3346be7f7e3", "c28745625f048d86f2ad0f38a41ddc0683d36a96", "feba048e15c1931086f909d4be04ade134942947", "e5402d593fc55c337594a4e0d31c259e891e7596", "49b3f6d8712c01f315686b6b8541eda8c5ee428a", "c6dab0aba7045f078313a4186cd507ff8eb8ce32", "656268296532913eb34929e82ee19808429de06a", "23fc6c6e1cd52a77215a285a462840cbb96aec39", "3b6310052026fc641d3fa639647342c45d8f5bd5", "c1dc0e5226eb8d92d141fd8bddc1ccdc797698da", "97bcf007516cb70d8cb17b7de6452aa06c4b9c76", "715b79490e57a2e057d4868e9c4fb0e1e14582fb", "a0fde5b5f8aa197380da30210c10c8c3ff2a25df", "3825b2ccbf2b305fa051bd7b62306108d61a753e", "54f892f62580c11a5742f60ff87f8231406e6502", "df28cd627afe6d20eb198b8406ff25ece340653d", "4f922f6602f39baae94f63954005776e1da05671", "421a7b0b6cf45ccba3df41a99fbb272d324489d9", "2ea8029283e6bbb03c023070d042cb19647f06af", "4e6b3c687ad5a78e97c1fe3c1a221bcb32e36bb4", "64b14354afc0e33b1786c0c5ab1af46e76b4631c", "dcc8c248ab26da04760b50dec4623ee6ea099ef1", "8b9c53e7d65ba7a7be3d588d00481f2ff49b5ef4", "3b6602e64e62e5703151d17475d4728bd2095256", "203956dec006b8c313bfd166be58d1e70b3dffd9", "399ab5652908d99a5be1a664425f6463f67df2aa", "621e2686f2113c2181653b2fe524a09eb91d5baa", "3315329b633116fb5e1e8400b6932bab41df58c6", "0856622ce2fcc4e39fd396427abae90cddf78fd0", "d1c103c63d930d3ae7397618f486117a48e35f16", "6eb5db8e6a79ad59bf4f4a5fccdd5b10237408d7", "20b8b3bad07b31e8ee83b2d865266ec58667992e", "ec40a234a74b62a9725b0747caf08d679e757429", "0af68a284808a8da294e0ec18b9879e3f7a5b21b", "e7f4951c1106bff0460665ef67d11fb9c2d07c41", "6c0b662be91c0e09c0caf039cb7c80bdb01d7198", "27eb092a9adbfcb3aea1b13bde580f1fd5c7b8f0", "ce282197f226fc757af4d27cbdc3416ca62ac5bb", "7d057676c9ba7b313adf0b191f64eb26ac2f9dd6", "863f2a473e9e60dbfffe9f7eb576b9bbe3d3a6b4", "93a66d470c1840d11eaa96ead3b600450b3cc9f8", "1ed8c43191d240a71dd2758e47e31be13660addf", "786f2e480cb81c9df8d213ac156a5333946a2b8f", "693e6afdd6a104181da54471122169c68d7ddd7a", "b13b6e3dfdf6d708a923c547113d99047f1a0374", "b47dae9d6499c6a777847a26297a647f0de49214", "36d487129fd0b828255e417e0d10cf13d7f525cf", "f38ad869023c43b59431a3bb55f2fe8fb6ff0f05", "be7bb84581b09f47668966d0cb70df0876c84a21", "bc4537bc5834b41a631d9a807500d199b438fb27", "e8e8f40ceff8b71d5dafa6b680d40690dfae940c", "67bf0b6bc7d09b0fe7a97469f786e26f359910ef", "a0d1e2934e6fbf42175fe6f04c281a976dc33975", "05fcbe4009543ec8943bdc418ee81e9594b899a4", "9e71ebc94e558cd7bd343959bbe59ff9b817171e", "e80635b9b48df5ad263c51ecec62d7d4bd7327fd", "f32a06e6a9b00c3db88e6f4bac235b7d660f5d8c", "4f8bd3519a6e8a05db9e35b027c0c65c91d2ff62", "8f0b6845689a0b6adda2feb52b9345f9d9a2a8b3", "887cd2271ca5a58501786d49afa53139f48c66f3", "df3d2f514d41c0c37293d88d4a594e5cfc6c3bea", "8990d4dae35481774239f3eea4662887b43c779c", "2c7aeb73447bf6d32630aaeb7a18256b62ae1674", "5fc371760fd4c8abe94b91ae2ca03d428ac05faa", "c3dc704790e1a170919087baab0ad10d7df6c24e", "f0865d11131a84ef1d91e1c8b5718692f153267d", "33430277086192476fa6c32eae88688b0cb21228", "c0efa1a3cea5b1f450283b81eee9942defaad4d2", "045fbe21ea8e501d443fa2d297c1292264712c62", "607bfdbf583c4dfa29491eedc3934f2293e1fa96", "3dd5a70191613e0867d32f368fad6ec25c63cfb4", "de0157390682eebc838e271f4fe8f704251ddef1", "ef29e5515b9ae3af358e511a7faa8cdc69bd073b", "ac56b4d6f9775211dfc966e9151862fd508d3142", "bb534e04c39f807cdc38ca9acc29e26fe7f374df", "42b4e00e55d3b61da5a9054ba038f13600e70f9a", "34fdfd174a8cc7d11bb28494ecbf4a85c88558f9", "aeabf3d23d09967d182d1a95837b79d46689592e", "b327fe29e568e7bc224f2f05da1662abbac6a8e2", "8e7a6e9da1d57809fc2c12f11d3a26b07f5ca9b4", "5adfb8a5b37bd2784ac3e16f556841c2befb3931", "b903e77d8f4377b96b02e6e5fae5745e87f1c522", "1ac20a7a76f7b83ccd8ea0aab64e2b24ecd23915", "e7f00f6e5994c5177ec114ee353cc7064d40a78f", "0f90a76b8cfcaea8ed0d24ab958295848f7c98fc", "0a55e4191c90ec1edb8d872237a2dacd5f6eda90", "253f98d7e8005fb221a1fdfacf576401591a5db5", "4a4a3effdfffb51a0f82d3b0904c017086996ac6", "899333d57c2b2457f7240964311ada8df20c9ad9", "08cc9a589cbeb5333751ef030a3f04e9be531900", "ae87896c38f1871457d811a0588487db0155a833", "c6e99ff40ccae0d7ce8e32666ed7f75e3a381d9b", "e096db52fc8316e66273b456c58b073f9b689074", "d6ceebb0cde7fb0fbe916472d7b613a2d7d2e1e6", "d409d8978034de5e5e8f9ee341d4a00441e3d05f", "053a4e5a89716f3f9e71bd09718bd9021a5114e0", "77646ba359a87a5df2605ccd2a580f2dadd017b5", "fa7689cedcf2f0ddb6fa4a3c0dbefc6fa63e1a14", "7b9ebcc8b9c05ef661182fe73438b7725584817d", "77dc158a979731d2ed01145b1d3ead34a6c33487", "7db5404feaa08e3e53bbf4fea7d89bcf509cfdbd", "b778c0e5ec6cebbabc77fc56f9b7438f2974a4ea", "34f2bf9e98fc234d2c29f751f59407deef4f4404", "c6657c1263bac59b006d1da1174ec4bcea0dff3d", "56c701467da819088c3f734f3ba36a793d645992", "99b41df501f25f4aee9c1f94a75510b2fbcc6bed", "14f67bc2c25ffb81c37d39da9f1f932c4a16efba", "c372356800ae51aebf8054a0d16ccad77bd06e41", "57ce2a7078dbd8e98266270e1c3c78e71c7c9bd3", "05b9a81aadd8827437dd857bdcca0a39b13bb1b3", "3f600008dd9745e8357f5b7b3c1a69b8be6b7767", "58a11053cb0d1322900273a450e4adf371252cd5", "addbddc42462975a02f4933d36f430b874b3d52b", "a361e820a85fa91f23091068f8177c58489304b1", "8b4d8c26e4f76ae55474df2a3753bbbd0d75b8be", "5a6c021f80d82f3fae283865b259e398f9ed0f32", "03d10c88aebd7aabe603d455c7bafa9231c7cf51", "4db64fbc3dd2486a74dba3350d44c51e561f515f", "f2bc0ab0cdf34a1df441ed9678489cb810474c84", "b239b39c08a08d9c3b1da68a7bce162b580a746e", "fab35a4ecc333dd9cea0ea839f18454e749d3509", "07eb30d6bcb96d7d66192f0cf43038eabd6fdd13", "c737e65d7e8696f5a2878ac623c61aeff434f92d", "403e7fed4fa1785af8309b1c4c736d98fa75be5b", "7a2cee9a210e7b418fa6169f8cf027f7993a3ee5", "d0462aa7754ffdf39962e2003344937258a0e42e", "6b5850c5a288fd26480ebcbbfc43172597e0d442", "281114ccee453fffa82a8b1bbff47b38e930f9a7", "6e1b85aabb132ed741381fdf00909475d16cd3ba", "57179347f63dbabc298c40dbc0fad92b4ac12b83", "c2d065bc8067384c40b3e8146cadc9a0c4c1d633", "5cc9fdd3a588f6e62e46d7884c1dbeef92a782f2", "257e008c01a32b9b642553f3f1e59e61efcac4a6", "e534582cfc1b98001fa1ad17cc1df47aeab1257f", "a1f9ef1236ddb57efc1ebbd87a1a69db9bc38c4b", "38b0a67727dea3fe563e8662517bd0fda2fd5e06", "794f76c111ba1a4ca718e84ae74ee8d2a67c4173", "4be117c52b1caa9d7555130652ee8a1492b1e7ed", "68b44eb4c7440046783146064ae9e715a72766dc", "3737961f634876f59d4ffd5bbd198bf38b2cfdda", "b8471908880c916ebc70ac900e9446705ed258f4", "02b25bec70f500269e547014635b42f556d8e173", "718bb5362dc0b3bbc89216f73ee46ece36c3e775", "eb33adf3f8eb5c07b58a1433734ab1fee5d77c93", "b63957152a0f37ddc99904a5bddb60b3f056b8cf", "07cb6efa6734b5cc22a38b0855189d12791a0551", "5741255d30f4848273c921ad177b32ff1cfe0671", "f5050ffebf973d4d848049dcf661891acd950b82", "1067ef2c4d8c73bb710add5c7bfe35dd74bcb98a", "431f013143de3159c0c0033fee2fb4840d213b6f", "531a40720f2809c560840e6d3afb11a31ad0b9a0", "c8ebe4c7d884c468d572a1ccf8583ac912215088", "c1087c588960dd7c00a2b5feed57fbdb70d066f1", "1629148029595a5c728edb89aee90dc4797e9881", "d46a5bba21f897f1c4b3366dcb663820ef1c282d", "fbff92bd8ad39aeda14b7547ecd26ee602e065dd", "812a6ced985317b3b9429ef0455645a9744af6d1", "c3c73bb626efec988aadbac519c61810710282fe", "e79a34f9942172ad97c5fadca3701db3e29d32e2", "f8beb25e944004d283e1b347e3473089da244335", "0a7ae4e41ea12426f0692fab66fad3dadfca8148", "db63c15fb5f086667091752e6515647b733b9451", "1d1fe1bb2cecd94b1f905cf1d0675d214f6ebc50", "8954d46e1d7a11b20b2c688e5fb8bce4901650d6", "804d856f09602f2b8e9184db155bf1b9ab7f31e7", "d930d20ba42a5d868dd78dd73bac0f72110e0bc5", "19d414907377e60e8db604a8c32852119533c8f7", "78a2a964b61308f683fae6f3a62e3a8aece51bae", "bdf64dd341925ea7b9b3abbb49cab3cf978f8e21", "b5f5781cba3c3da807359a6f600aa19c666a3f81", "b8b46df1b013c30d791972ee109425a94e3adc06", "8e34de64c9cbd5d62b0ce53ab3d99092605d6c94", "70990e1b13cec2b3e4831a00c6ac901dae76b27a", "f820bca64665ac90fbed5881599a049198d71118", "6dc41784c56de1925e6a7be8ddfdc68f18a21e55", "a2ee76eef0ee147f96e40a6846d178128fec68a3", "30e18a16d4c7092694d55743ff92965e5dec2692", "c793a38c3d16b093c12ba8a9d12dfa88159ecd38", "6d5b0f6e5258d370f9af8a2cebf035fe61905db1", "ff948365684d3aa1a834deb49f326e264b56677a", "031c721ce468a136b9bac87da7274229e7b967b0", "f7514435495cd76552a4de01652a08ff8c2863c7", "4189862b2ce9c71e1b451deb58dd42f50c7d04a1", "5e74d92d841d1bc1c9c2d80219f98bf892f239c4", "635e3ce6fb0b28f38fb77f25770911bf08f0ff03", "a0453a7b2fe97280f1e33b22619991d3e5aeb989", "4bfd8b4f673c04ab9111e7d162e84e8926685c5a", "7538ad235caf4dbc64a8b94a6146e1212d4de1ff", "72944b4266523effe97708bff89e1d57d6aebf50", "02a2fa826a348cc3bc46a1a31a49dce8d06ca366", "e20abf7143f4a224824c3db7213049dee2573b4e", "b0d6e204c36f029300787f6334cb727325f8983a", "040173a26e1dd6c7ab2e636e2ec3dbe0b1349986", "27421586a04584d38dd961b37d0ca85408acfe59", "40f2b3af6b55efae7992996bd0c474a9c1574008", "eb66b3321d53ca68161c2caa2fe4f228c40c003f", "954af3d46d023d73c7ee97f2264451080f542084", "9b762be133142412ffc08da9ed5d13cfcdb803d1", "e6868f172df3736e052fec4c00b63780b3d739fe", "22fb836a593267d9ff09a4d12aa5b4a6fd52c81e", "a943f3987999ffe1c50d944e79568a53ab18a5fa", "5a383940c769660e53558d8f4bfcca7f5c730e75", "3b4619b1e138841800bae12ecf5263a0290782a5", "cb310356d1c5f567b2a8796b708f6e1e10fa1917", "722c33bfb4443f4f0a98ab709d40e379e7787c38", "fb03d1500deb642c53a6523d8011a9e8f8aa983b", "8711d150bce8691c61b2a29cf9efbc2747585569", "e2af85dc41269bc7c50fcf2fb35bfeb75e3d6ee4", "54760ceffc46a7a5425260834840fcfe910e0f3b", "0adace244eb8d075cdd3fa67d653edfff2fa200f", "09798b13739edabd55830fc5589d8ed263d62c82", "c2c058afe227f2099aae4f204688b22239d6837a", "d1295a93346411bb833305acc0e092c9e3b2eff1", "eedf9480de99e3373d2321f61ee5b71ea3ebf493", "204c73cd39d3befc4f669661b0451a7f858570ff", "c2adfc55e0ab9be6e8f5e4ebeb20770dca307cef", "3c895e65b811239604796c1e2d1bdca3494c50c5", "b18efa91e9893ae5fdfcaf880bae5c569fab4d18", "8387c58a5a3fd847f9b03760842dd49fec7cbb0e", "fc857cebd4150e3fe3aee212f128241b178f0d0a", "f89edc5a4d938bf6df0a780163b872b9edeef5d8", "19a30ad283f2ab2d84f1c666d17492da14056d75", "7a72ac1c77110d03dc0482f2556e9bdb36582fcb", "21679eb7e953bd132803703c27dcd56484d497e6", "73c13ba142588f45aaa92805fe75ca2691ac981b", "3fee5c6343c969f33a7db4c7f7da1e152effd911", "614f4f8fe47e7c0bcf64aa0ad39dc371e4b4ab7b", "44ce0051d9482d96169ff5564085fe9867eb3193", "44054c64ae7ee16a8a8348bb57345aae95a8ddae", "16892074764386b74b6040fe8d6946b67a246a0b", "43016e51bc6e7939521ec3c2fcff78f35bfc5e92", "e1e5d64318ec0a493995fb83ef4f433ddde82e77", "261fa69e3260296652e02d8757f9af16f7dd6ac9", "5ae332be64c8eecc92a61deb2c3c3587cdcc59ef", "95d8523b7782b9f162a73e60796d355e1d4de301", "94d5ebe936c101699e678f6f0cddd8a732986814", "1d0a6759de0d55d15439b0367f0aa49c1e248c5c", "00cb5ee9c7f016a8ece5dd3b34e74ee65ee19e2d", "1dea4f56c04d12abbc9e1ed7c48c7ccc09e7f5bb", "6135c12b13700eaa93016f65bb0021df00f26f5f", "929218b75858e244c1ca99a6bec07ed9465737c7", "10cdb31a23c3233527ad2f8beebe7803b7a51a8c", "27c15e9932bc039220f6937ae8e2cf9f1391f7f8", "79a36b19ea363c14af27a1f4112a9eccdd582837", "cb5f65bb52ef54b46368ba33db2ddf56cee5b8c5", "731840289e35c61c6e21ae18f2da2751bd8e2f20", "e577847c36251dc31282ad57ea969ea8297369be", "ecdd4731e197f4afda804602f533565c19ffc271", "3885cfd634c025c6e27c4db8211d72f54f864f90", "33236cd0b9454ab88ec9deddfb8ce8e492056770", "d8cbe136dd95d287786d0ed5f0d0e53f143bca7f", "f1a772608cae0d3189ad1293d5b7631435f02e44", "5e51f001c1349f3dffaba37f7e4bd08b370d813e", "992eca71ee8314ede9bf680b6966730f6bb77bc5", "9d6c5dc5b212d8a8e94e7c52b0a2e4550aa2e117", "10aeed13a7c0317eac9608bf0459e1e44eda3625", "7066ca7d19a714012dd899f3ac0a84e4c0dc92e7", "2525483e2d899c435437bd874208071183223b46", "9361b784e73e9238d5cefbea5ac40d35d1e3103f", "3c3477903b5138fe5e5e007556981387dd391f77", "89e324b9c64a800e57ad82eddecc03f2cc0b7cc5", "1110f86433302fd6c4cc92e2ac1ddf80b917b25c", "43fe6e4c3163435fd829d3c1c41ba17de4e13559", "290eda31bc13cbd5933acec8b6a25b3e3761c788", "9070045c1a9564a5f25b42f3facc7edf4c302483", "b6d0e461535116a675a0354e7da65b2c1d2958d4", "8cc56d82787069ca91ac6bb8133e15da6dfe564c", "6986658717d6430be30acea59a1296125062d5a4", "9cd7487e0eed11dabc94dd867178204c53eb2270", "5d58d4164493924906231a28153e50342fdf1198", "6ff6f356244a647ff2a6d9dc27343d3e22840571", "626c71140a93afbb2234642f2634ebf635949263", "04bb0a1ccca86a4c1084fc7472ea07189c110aa7", "d0c75482c5d2d0c1c860fe6f46bf72767bd02848", "4ab31fc841890b90d66c30ed2f694a0597f0a081", "3c3eb65a936296d6ae5058b564f6d0e0c07772cf", "084352b63e98d3b3310521fb3bda8cb4a77a0254", "5369b021f2abf5daa77fa5602569bb3b8bb18546", "2a2fd2538e19652721bc664f92056fbd08c604fd", "94490145def938ca1f8bb265d10b66924937a367", "58eba9930b63cc14715368acf40017293b8dc94f", "b06b0086e84038abbe5088f3429603778f2b8fdf", "7996a71a86693a721e45ff9482d286a310ada3ad", "61a768171bcaf5d0b5359af551d9732015bc4036", "7e53ab07d0ce28484830329036a1fc018b9644dd", "de0c4459c46c5efbad02cd9a1f4687a12883c5d7", "66c4f727e9d6caccc4d3154de174258ce63b9a35", "6e604946a0a51911db0e887378ba1ae103dcfb9e", "508ce149508610edd93a17561a460f2199c8609e", "3e0db33884ca8c756b26dc0df85c498c18d5f2ec", "2da0f99ae90ea3e6ccbd3f43e52dbf5aa1553363", "9458642e7645bfd865911140ee8413e2f5f9fcd6", "30f7609d111bb3bc006e3dd38678291528aa14d3", "38b5a83f7941fea5fd82466f8ce1ce4ed7749f59", "e1678add566a002e1fe5536d8607888549e36ff8", "9355b789dac162ad50c31cdf5d9a196cdc09828e", "5a2578a7acea823d3c8ab30b1658a44cc1f63078", "50137d663802224e683951c48970496b38b02141", "8cf679ef0ea28557acb86546e4b1b1a617d1c698", "78045e2b93745b16a174137074e430ccd5ff53ff", "1a6abbe2e052c07ae2524ffdf91a8b340cdb2718", "f3d0d2f66ab04d9b8d6a590b339cd692b22174df", "50d1021392b6b226cb6a022b69b55396dfec99fa", "fa62bb5355ee5447ec651f56b9e06645eb2f56f0", "78d7fe93404686eeaefce0aa3d9200272c0f4413", "0d8a2034bbdefa214d8debecc704cadc5b9ec6e8", "14d5bd23667db4413a7f362565be21d462d3fc93", "ad467baa4f59d18ed998757bcba3df3c2a753df8", "218982f0878a3de667fac2bb18b9f50949aefc1c", "c31f7fa9d050c8a7181fc3407313f1fddd06de8c", "cc76729795069f24d66e8c29f1e08b50f8980a53", "4f182750ff26f68d284a416b2fcfc5c53c8e9c24", "586c989e12d19a48eb7084a3e07e9149c9fa7092", "0f8b4a64eea40c1f0aa655d4e77e46543ff558b7", "91836a9d42edec708826d9bef97e4eafe2f9afdd", "6106028c73d22570a01212814e1e4f4edb4abed6", "3da12b99cd8040bb374eed160f8016b3fe492967", "422d352a7d26fef692a3cd24466bfb5b4526efea", "a7e20614f707cd8fb02cec5ba60ffcf48ea66d92", "224547337e1ace6411a69c2e06ce538bc67923f7", "c04236e02381fb8e084069559e92900d4ec1af7a", "1180b1da7221c8c614ac7f6960772b78342f233e", "bb7c093c41fcec269b6a7a950902cc95429bb289", "6f089f9959cc711e16f1ebe0c6251aaf8a65959a", "90fe4ea1d3c7a6c5efdec3e4429d314d21e6dd52", "356ec17af375b63a015d590562381a62f352f7d5", "aa420d32c48a3fd526a91285673cd55ca9fe2447", "a9a4e19337f04d9ad14fa3d231a9ed13735139c9", "bd75a21797c8fbed8e9a8f293163e4d4c94149fc", "64e0690dd176a93de9d4328f6e31fc4afe1e7536", "32cd02519928aa91dca18074778a59b2cba19765", "6527cf0b9dbddbd0c6429a35a3cbded3ca336583", "bbb44958216281607c282427f3001f7bc36e2385", "8e0b92b7ccf6a2d110011b79637d257805d8c96e", "fc30d7dbf4c3cdd377d8cd4e7eeabd5d73814b8f", "4e82908e6482d973c280deb79c254631a60f1631", "256c91400aa7e92160c889654614f70213947f06", "1883387726897d94b663cc4de4df88e5c31df285", "4d38a43865e0bcc87dc6ad83331539429e00543c", "823db29d4c2a79e309ad2b394a4aaa83d9e15284", "1f527b5406356018e6dc401a4be8098a5a451891", "880e232f260b0f9d649a4e6408b1cf82f270bd6d", "57e9b0d3ab6295e914d5a30cfaa3b2c81189abc1", "6aaa77e241fe55ae0c4ad281e27886ea778f9e23", "dfc14e8bd07aeb2b97b3b5129ca511f0d157a6b5", "64e0bd1210f180e0610b2a1faa188051a1de29bf", "b4288f34528fbda2d2781454aadccae0d578d59a", "ed432ecd59021a96d8995269a34678c4c2774507", "2d81cf3214281af85eb1d9d270a897d62302e88e", "9b93626cc87411964712d05905c2dd0a1247d16d", "4643da1a650d34aca300f3222d07fc9c1e622891", "367b5b814aa991329c2ae7f8793909ad8c0a56f1", "ceb9a051f1273c7159aca80891f21ca3357b68ab", "d29895fca6ae688514850856208c2b88f01ea3e0", "498c8abcef3b709be97167ea5870e85d0c586945", "b17b20c3a3804482a1af3be897758d4f3be26677", "b28eb219db9370cf20063288225cc2f3e6e5f984", "a518ca2df1b74e3080c93cea11e4b769c82d33eb", "bf5214de5a1460e01eccffb74c60a57b43cf099e", "6749db5ae9547d5fe0fb4c743c631eb5bac69d37", "b281f6cf99eeb8dbb9bb0c31a57827c8c0493e7f", "30c93fec078b98453a71f9f21fbc9512ab3e916f", "1627a0c46c29e020268f9225c9042bd6cedd83b5", "057517452369751bd63d83902ea91558d58161da", "fc685ef21c4b2db0304b830f6a20e31eb0ec0737", "71d4924367ba5d31d87bf1ac724db3f285b345c1", "14effecdf4e99a6ef3bb590582ca07e642d49632", "5172baf893b1afe2650e015606b8537e16db88e5", "325c9f6f848407a22b86e3253cb7f29fac19e40c", "f8b26b2ec62cf76f58f95938233bc22ae1902144", "043831b3164152b4d67ce55db90eb3623d715cbf", "70b42bbd76e6312d39ea06b8a0c24beb4a93e022", "70be5432677c0fbe000ac0c28dda351a950e0536", "32b9be86de4f82c5a43da2a1a0a892515da8910d", "743ddad3a1d084dc3cd1ad87d6ad21653c96b540", "e7617e3beaa1a1a8a3e24ec517735c5cf5c8a8a8", "98220d35ae6a3ba745f7dea1434f000ca60c62c0", "cc5f4d5aa9c3ffa75a335f3305a1caf9cbdeb71f", "5e0df06d92176f362d52962de866e2d825185afb", "80b41fb824f3751b03017bf7ec8c5f71b7e214b2", "0348ff27225e45578c39f000e86bd96aef060c97", "3e03435792619833d4e2aa14344761b003c10c67", "dba7757ea7a390ec0509834a5f457c6f28ae6b76", "8e64f7f38db57ddc197cc7a9c51b914920ee99cc", "184c3e66a746376716d5e816d95e1a7cb8e04390", "24f367315bdb96ac001d568b8e2c0ff022845ca5", "0abd6c8cd2dafd05eaa6ba7e676581e92f3bbbae", "a44b91f46ba66c8279b93caab6842444de0c9343", "4b6ea0c9b4d4d7f86e5c61a84bf855f4b5deed10", "4452c36dc4c5e9f11d041489c8ff2e7006d33c80", "be75a0ff3999754f20e63fde90f4c68b4af22d60", "83d956ed39127058e02395924f96b68e2f8289e0", "4d3318ffb97733c5ded63ea82fffb60ccbf6b2b7", "eb67693ae74a3e5fba42b96b41953f85c7414a8e", "784705fdf2c412fcf764841b980cfb85ef3944c1", "a8ed00afc46064b18a6bcc7aa282e554891eacf2", "dc53c4bb04e787a0d45dd761ba2101cc51c17b82", "2dfba157e0b5db5becb99b3c412ac729cf3bb32d", "f0cc615b14c97482faa9c47eb855303c71ff03a7", "af8cd04bbe4902123d7042985159a6a5da9d9fb9", "2323cb559c9e18673db836ffc283c27e4a002ed9", "d7df298c94b33bc500f97406313fad1d5ed7060f", "e7e070a929d7911ef2f8c6f6d4e3c5d62c5a9890", "e6d48d23308a9e0a215f7b5ba6ae30ee5d2f0ef5", "45e459462a80af03e1bb51a178648c10c4250925", "1d14182d23547723a32fc3e51e5c59a0402d31e2", "32d072ce790b62750fbe343d9dd8620939d84975", "074fea1fbfedeb80936c7cc1df7efee8f0b680b9", "5bae9822d703c585a61575dced83fa2f4dea1c6d", "12a6d0617af44b46b70cd5c4ce055400b134aa8f", "0a5718f6a60ca18e6b6de5660c49040ac0045d7a", "6947ecf688956184bd7371b3689fb9494139320c", "97ee35db6b389a7bcc4b7975d12dbcd165226aad", "468da6836ece8e1941684a00c5d054ef632d5d96", "f34bc8a28f71be31371b1af0ddc3d1227e33281f", "29113ed00421953e0ddc4fa6784eaba60f05e801", "448284cba619e3d0cdf4c0023db91ec3fdcaa349", "8f3786e2064a103472ec227a03759bb5056aaffc", "1f4fed0183048d9014e22a72fd50e1e5fbe0777c", "25252e21edb700772ae7bee300c070024c4e87ae", "37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e", "53df8b8f6fc1b2e4631a08c2dfce2e8978b0da54", "b4f67b4286ea99e7f0a57536282445e801b97847", "02af5e40653b5a545b62aa6aebfaca6557f4173d", "adcdebf739d1f911a3ab26ca6f496b7789bcf5ac", "e23b3779547682b3e1f5a9108ed1afddc97af679", "10c077bf2dd1bed928926feb37837862ab786808", "2a7935706d43c01789d43a81a1d391418f220a0a", "a0b489eeb4f7fd2249da756d829e179a6718d9d1", "ac611487ad19e0ff182890ed567c449bfaabd0f4", "ba004e5789bdc565734727ce22820dca77d6407c", "283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43", "3dffacda086689c1bcb01a8dad4557a4e92b8205", "6512f42fd70b42300ea3f318e860d270cd6d3b0a", "29acb73f1aec78996d77eede7a7b72e892be9a84", "715d3eb3665f46cd2fab74d35578a72aafbad799", "12336e7d5d2ca4e1fdd2a52d50b2a5c987c08b0b", "30492d91c8ac2e476a25bf7ddf445be00b3d2fe3", "7a776f080b270c8759b2b4fe601682276d1b2eb4", "a5f094bd197126025cabc50b30e0f03d56d8c594", "244377600b1474e1da3b86a08683e629990d1417", "26d25c21a4da2429d5b923b86410ad73320d61ba", "3ed9730e5ec8716e8cdf55f207ef973a9c854574", "a09c071f8468897176ce4351de36e7c8b7c2dea6", "1167136efcb52cf49e89b90949149312bab19cc3", "20100323ec5c32ae91add8e866d891a78f1a2bbe", "9a40afeb829b7f7f7696c9c0767a5da8cc5eed5a", "fa5aca45965e312362d2d75a69312a0678fdf5d7", "869df5e8221129850e81e77d4dc36e6c0f854fe6", "b18f94c5296a9cebe9e779d50d193fd180f78ed9", "99e386cf71727ee66d5ac77693e107c78508f899", "b53289f3f3b17dad91fa4fd25d09fdbc14f8c8cc", "0749deedd6dce98b48475f335e2c322b16fcb202", "b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57", "eae17f9364f3838b23cf5cb53ce6be0f1d3d931c", "fc5561d95b63f05a2f88e66291dbd269e4141a00", "0382a1301094c6ba60b18ffa8d12da6ca0863339", "ffabb5dc6f3a3831d1d88028594c06d22d39ebfc", "d8bc2e2537cecbe6e751d4791837251a249cd06d", "a936a60766cbccb27217c3ccfedaa16d4c7b517b", "b787113305ddef424def20920b8b098c7f18bd98", "918ab79e963d5e339a2696ee4aed123599f291e6", "6ec8bb9c7aaec908b76dad89529c5ded7cab8e83", "4a44381f7c639451a797b2d3016b1d4cb54736dc", "19c5dded4a2d1b7e62e29c71a4a7bd0911e2f5ae", "608305c25aae19dd346153dadedac851f0b7f9ff", "5b36d1d7279d0db75f52f4934b123a3757bf8092", "da9d71087025ca20fe0259b9aabf803e9d556160", "06abf34ed52cefe1181451700346f870c2adb0b5", "d9bda208addf00a55df23821f6d4abdb85e73599", "347573e0b27a01748f8a6781dd84bb312aea5c53", "449a170266998d364b960df58506981920c732bc", "3fb7c5824fe8d3cb1ced8737b81f9e6fe5581612", "1ae3dd081b93c46cda4d72100d8b1d59eb585157", "6a9c460952a96a04e12caa7bae07ae2f7df1238e", "2b9410889dc6870cc6e0476dbc681049b28ccacb", "2329b177c71c7087013ab4bfdc3154a6ba87ff8c", "823f4300ddf64a95324db89035946638ecb02aa0", "08030f9d34cc96384f672d9f9f296914d594335b", "df3b560a5d6c8cc5fa1477d3a89245a0d3b60715", "ec87dc43a549dc35dec244e514ea2c3a4109ffe2", "82fb85517ee1bb3746348221de3d5347c6b3ed0e", "23515e0f5fbcba94dc78e719c127ea42d087c81d", "d67d9abc9c9f9e84ab3647d21db70ca98117f709", "e3b92cc14f2c33bfdc07b794292a30384f8d0ad1", "b8af24279c58a718091817236f878c805a7843e1", "920246280e7e70900762ddfa7c41a79ec4517350", "72dc0c1e033be838582ef8da6ae3d51b6375cebb", "c0262e24324a6a4e6af5bd99fc79e2eb802519b3", "828b73e8a4d539eeae82601b5f5a4392818c6430", "2814d558b4d7425b5dae6b3dbbf5f4a08650fcb1", "d7f11ebb73bfe74a57c33f7e75f7981ad9385580", "64c78c8bf779a27e819fd9d5dba91247ab5a902b", "82a922e775ec3a83d2d5637030860f587697ae42", "b8a829b30381106b806066d40dd372045d49178d", "2914a20df10f3bb55c5d4764ece85101c1a3e5a8", "d8bb184afa8e68c569592471d83fdcd9120fe5c6", "13a3952eea0445fc4a1825be10acbd3a189355e3", "1120e88663a38ed05120af378f57ecf557660160", "609cadcb4d7fe32d616f8e778b7fac9dd578bd81", "0726152a1c1a5723ac34d54abec0dc8d4659598e", "e1f815c50a6c0c6d790c60a1348393264f829e60", "2c9179fec33f69a5c1a453034dc7d3d3302839d3", "c1c119361952120507d010b96d53ee8bee13c4b7", "0824768412cf2c3e9f550025eee06bb34e5f3afd", "1db11bd3e2d0794cbb0fab25508b494e0f0a46ea", "37115eaa2789baf9edca92b48305e434a5ac5c02", "03d364f8545dbcf0d7240c5bb8dc39636c698ddb", "e12d5019baca91c3662aef317d406ae884f9c742", "495fa8f7d9d0e4d472c49de34a9d17343668f4a4", "6624a89894990a9f5edb06feb9e4d5ce6147459f", "679136c2844eeddca34e98e483aca1ff6ef5e902", "86e040f940f4b6399aaf41eb3f86b3cb948ee7dc", "0e181a1b4c63143466c9ac858d46491f1ad11092", "b58417561ea400b60bd976104e43b1361e1314ba", "ccde43d13203bf29ccf351e8c9a79ee4b0b36142", "6ee5205408fc6db03460c05765ae0f21a6eb9552", "13caf4d2e0a4b6fcfcd4b9e8e2341b8ebd38258d", "fa83597bf71dbeb606bca6593bcef8ecd51e8661", "c4cd19cf41a2f5cd543d81b94afe6cc42785920a", "076fd6fd85b93858155a1c775f1897f83d52b4c2", "e2efecac66fec3890b7ffeaa54ee1c6400f9029e", "909f91c1957ce2bf9d76ee2109a865e87bf17057", "d7eae9f76dcfa978b99eef430feb9420eac702eb", "0b7f5d25cd1142b5761194f1ebeeba6780aaabb2", "2bb968e8f9df0fa72dd72e5d705ea7b75af8dcd7", "73052a2bf7b41b7be2447fadc13c29be1d994708", "4a0f152a07a9becb986b516a1281a4482b38db81", "47119c99f5aa1e47bbeb86de0f955e7c500e6a93", "6bda2c2ab87ce2fda3b2f7e87643835f921551a0", "7202235dd82965b4bf18216040e40433f96274cf", "5b7aa540a01da5ea530a9d2f8362ebc8f2f7e00a", "137239cd29634465f35ce261718efece57cfc617", "b183914d0b16647a41f0bfd4af64bf94a83a2b14", "c9c8ceadefa12adfb867cffcd2e5a37e4db81868", "6e32c368a6157fb911c9363dc3e967a7fb2ad9f7", "356a0c92b61a56699211d5c5d9e4d78c9373e819", "1f82eebadc3ffa41820ad1a0f53770247fc96dcd", "85136f57350f29ff299dd13b28885666ae305e57", "9363bf52a5bb2ac94bf247ca56e7cf55fb29ee4e", "b1bccc71e48ec4a114cef69a492b2e5b3d1f123b", "fe3208c3fd29876fc17a782a2f4f1bf3891bae0e", "7d3698c0e828d05f147682b0f5bfcd3b681ff205", "c11f05de35310725d83a4ac9a393f06168acc967", "b45bb3876a7f980490e0b78ed70064d276b13761", "60b6d71256ab6f77ad1f43b9e11b4a5ea2dc3aad", "a48a8e337a155d01a9652f3ea36675710e600222", "84ba9ff28d9d4fdb38d843d2a6cd70d38644cedb", "4ec4392246a7760d189cd6ea48a81664cd2fe4bf", "7854876ab5d87248ace94615731ed3e3e56af769", "273b973092a4491974d173cc5258c74aede692cc", "9d89f1bc88fd65e90b31a2129719384796bed17a", "22909dd19a0ec3b6065334cb5be5392cb24d839d", "be24e5fd1ec27d444c66183e89b5033db9155de9", "9fd5ecc538a9344814dc00b92beb45c54d5dff3e", "4c975a0615112fd9eeeed157ab4842509b0e15ba", "e94c05eecc78008c12090e24e13526dd26d0a644", "90c4c8bafe50500312350e73d93eda6e63196832", "29fc2a8c6283543fd18d758b5079530f7316bd68", "2258e01865367018ed6f4262c880df85b94959f8", "a8420e7fa53b81b8069ced8d9c743c141e2fc432", "1d8a522bbfddfe86951ea7605c7df058b2c2e1b0", "9c576520ed9c960270715f790a62b9337ce88bd2", "a9fcc7552ab942c0b54d05488da2310127fda764", "4b0893bf71e4e13529cefb286c78b166a9491552", "1d81293bc17a135cfd35912146c538cd81830381", "4e4e3ddb55607e127a4abdef45d92adf1ff78de2", "296cacad71b4181eca4a571cf080d2baee229dcc", "f09bcd575c8bea73dcfb413840fb14d6a4caa4a8", "50797bf1a167ba294f640d3ae237cee962427cf0", "2b88c583cd62130f1e2c6921db9703a0c5746a90", "3d1b0c7e9ef0e31dd635041539e795dc07ebee86", "1442f7813f2a14c14f977b2fff833f5c502db59a", "42b58427ed3265ad3c46b3e378ab09c26e91117c", "ef92368c21c1310a6134de0cf5befac4443e3e85", "5da14895692fad7d1f403279faa743a657fa7ac4", "b3f0a87043f7843b79744ec19dc0b93324d055d5", "02e97e65fd0ec9a6d98a255d0396eb796a5e444a", "fbdc18d9e6bfcbac30b8bb330c71b6f2be48395e", "76c327cce83905bda72944407fce7746ed0cb6fc", "39df8bc28c6a5ba494bf2d31f68bba06acf934a5", "8b928332d60e995bce78ff543a2d7a37f1103ac6", "f4cf8e1ec82d186303fa8c9e22ed0ff68b55496d", "d7f7eb0fbe3339d13f5a6a23df0fd27fdb357d48", "95296302a7fc82edf782cece082d7319cfa584b7", "a9624e3c04272fe06fcc440a04b373533439c111", "c433ef13220c2ed4d2558283f8515b0e6e09bcad", "c0d21722d83c126af4175add38ffc893a33ee01e", "632102b986e07d4a38ca3097d360519b32ba96ea", "4eb2f74766378c218e01cca0f23db47df00bd1fc", "9e644b1e33dd9367be167eb9d832174004840400", "d15662886a615521d4205fa1ededb3d794b9f944", "5f0e9cc18374a670dfea4698424c9d48494f3093", "b0a1f562a55aae189d6a5cb826582b2e7fb06d3c", "14bfa18bc0a9eaf28732948813c764390c80acc4", "11a1e99fc65fb8567d7f52dce941231ea949db0a", "b674bacb45c1c96e9c93792019478a7760fbe7a6", "3d3f9c4e14f5a1cbac756045dd22d2fcd3056169", "001973a77bf8fa82314de667af5b041d856b0069", "3b2f78a4edf5da876e52513d0e3960da7d3a253f", "57165586f65f25edd9d14f0173c4c35dab8c2e66", "90465149a7cb3f581697922f3c1b87de5be246cf", "a07a894108b5ddc19d18e66e969f47a3b2a6e006", "fca56ec29f1d567f829be5f8477a81f1ce732f86", "72a79f351d4ae03ff940ff920898e41ce960f58e", "3b8f605c86007f7f22131e7c31ca5c327304589f", "c74a42afeae520ff6ab280d17bccf0d082ba8de5", "2965d092ed72822432c547830fa557794ae7e27b", "5348367971754f6b63bd0c595fc3e331eed8950d", "2cc17e1ccb5f1f67f8ce882e683d8c66475330be", "5db25e8c1e45bcdb64b743f81dbdc69f32c70004", "62f5a4424ee0f383f3477cbd9d9e7b2e9c6adb14", "5bf6e7599c38ade8ef3b97a3411a7f84418b6062", "78f4e29fafad7a0156cff6d14e9b92c8b8533d4c", "47d967496693a4842749df307280197fdb8b9c7a", "a4bab165158b9627280fb3052b1c731210f2a901", "f59ac278349083a50871822ea08172258030265a", "b40622567165b372a6e15928ab5ed41413d1d50d", "1849b903e19b3abfeff7b0c86cc875f6b42b0818", "3be9286e5d6a9d9167c64b05be6fb0712ffbba35", "37c4541037b67e8f4c538b285efe80aa251a49b9", "40d4fab85e2e1557e61d03b92429d64c6efba101", "07f4ba45b771ed123b08261d88acda19406a7987", "8ae88dc7ba177128f89223f48921e3a74d8bf1ff", "eb0cf5727275f89323e2cbb4c0f0515b8ece75f8", "00433afe3fbf9aa4ac8a32778f7ab6878f142593", "241d2c517dbc0e22d7b8698e06ace67de5f26fdf", "9f889c81bdb1d791e22c5f455baf32829b5b788b", "4f44e4c6470225566d7c94105e06eadb239e20bb", "a6c40d0fb4c0420d1d974f9fbfae83da514ebfbe", "45a44e61236f7c144d9ec11561e236b2960c7cf6", "ffc02195ad8eeafa955680b128b99ebccf4b0a91", "b467f07202aacb8c5f7e053a7fd4e72d0da16b22", "4469ff0b698d4752504b4b900b0cbef38ded59e4", "866dd3a16e035a7c724f8ec02d26e45655f7a002", "3e08d000ba3dd382c16e4295435ef8264235ccbc", "18219d85bb14f851fc4714df19cc7f38dff8ddc3", "8aa5f1b2639da73c2579ea9037a4ebf4579fdc4f", "aa01a6124ca44e88b1e6819ec434d7a26efd956a", "b3a45118534144f50a56653dac8109c73fc2c0e8", "cad7845e9668884caf4842b14983ec0e45bbbc75", "267bd60e442d87c44eaae3290610138e63d663ab", "ef61e43a1cce95afdc0696879085e834b981d5de", "44880df54e6caa3e7263db7a4d5cb77838f4698f", "d5aa657e0e34dac4fcff484496b0e17d5ae0f52e", "f5adb841e30eb635b91e95c03575f3b8767c9ed5", "7a329ed2735f5401d6f978036fb859b10a4fb96a", "59e266adc3525b4325156f0cc0052c1d76b1c9ae", "5d28a54b1b27280482463df85bb66bc2914ff893", "45ca696076e9c073e6cf699766f808899589bc88", "cd422f5ac7781a4f313e07f1268589ddba845a79", "53079196041fedeb5f1e236b1c76c7108fd8346e", "6d0767ce00979afc26940456994970bb488d4b46", "286f443fa85bc9d892ab54878c0ace0264d0dcff", "d0f709ab39e280467d854064132570c1d5316de5", "1f55079a33023518748d66869e7c539994c61c86", "ef8da5d714c269f9573dabb2da5ef5b33c13a0b0", "d7f22666206258c206c4634ad9b22f95e44504f5", "2ec56b437ad9391ce5ed85b68561a4e58f21d976", "76056730a78d286d1d636f7d6239ec510a1a346f", "128ea529f079431220ea63229c934f36287910f9", "40fc2803d3df9759a3853b040588a7d9be9542d2", "8ec3325c12340d1b8b746b7e9b40616ace1f4d0b", "1bb73d8f1224a846473d0a2ddc4289ae3e21b61c", "719e58a8b256cdcc88f7980e4798fe8e6aa1a808", "14261ffecd904d9aff97d98c70e49ec1d870112f", "f25ad1aa804dd6f7cfb9b02afa371fe1b9bf4378", "0480b71244b59ed13cfe844c8bac8883a0c40573", "c1f2887cf7ceb5c34138cec1ee6a7fa4869e58aa", "9a57f0233a616bab84fc35101637765510b1ed83", "a110602d15b1c98459071501483366a56cfb59d5", "235f4fad10a5d9e043759354a7cb94122a8f10fc", "5417ec4aa67a2b00e4dc4b86a5ef6ddf0b9cdb3d", "e90816e1a0e14ea1e7039e0b2782260999aef786", "2781de1aa6f4c9621ad3af38fc58b894696f1791", "7c23cfa319ea8e4fea8dfa28c8778edd9049d3e8", "2bdc0c79b26fed51bc2af1af16117879ee3f571e", "0f94f4934d0a26dfd243852036468ecc9bf8d22c", "54f0fa07dee7bd270d3bd8da9011ca90df78af59", "f0d29be1a93158d320bef285442f63bb090f6c31", "c9168495c99b37ce601bc778419c2667f34cb29b", "25ae83767c926898047bbc50971b5b11de34e12a", "7960336aed2aa701c147ccfe36d153046f1500bc", "18ab703c9959fbea7ad253a4062eb705b245552c", "8dc2b137b2a1a3713f6ce5e78f621a9f0f036bf8", "e0659abe7b377b146bcd8ac5040e620bd7f4ede4", "1a10782ce899b0f884aaa8ef4659976adb5d0699", "94bc017ea8795c774c9dd572ee44673a51dc631f", "7711330fb88e2522a5779a09c1622b75557f9254", "c238f871c029d8c33949f8410f8cf3bf79ffc102", "e5f39fee9b241b83f27ff9fce0fd027268c6d030", "8db60e5196657c4f6188ffee3d2a48d188014f6d", "fd5d77fe7c56a4f29c0a7a3bb66c0c2ff6286eec", "48c494a8f1fdda835417ccc395a42fe210efec2c", "5ffaa04d15fd159c420cc10fddf15ae39a6db865", "e3bbdd6efc906f6ae17e5b1d62497420991b977d", "46a93d3bb2a2abea38e432a9a125d225d729f0e4", "632c114e12a6b88bd488ddfb1960d669f101ca3f", "11ce6be0fa8972659bd288a5f51c152fe38a1881", "2103bb6772bf01e43a8a4e8e34f16baac7d7c331", "203abfcc3df8de6606cf34fa32cf225627f52d00", "fbece7c5584662d1da447b12e9a2be6fe626216e", "092a4a0f16287c26dcc833958b87b32346546c8b", "d4d3e579a2a8e3ca44d42e3bcc31c3f36b3e46a5", "29a46aed79df53a1984ee755bed4c8ba2ae94040", "5014f5dd9d3586dae0572325085647ddc187fa3f", "047fef806e6afb3430bfc6d8a6ab09b48afb9822", "1d08754a95715d1058772b48ecfb082bddfb16d8", "f6d35f4884d0834f2a2b29fd6341f3b4afefb119", "486a0044b9c86c6f648f153f3d3f2e534342b754", "664ccdcc614a8ecfbfedadc7b42b9537fe43d3f1", "2972997fad77516bbae27fdafc0bb44f0708e806", "bf4e6ec60e5603324f6a40d2a060420322dbdd62", "bb5855056f9a7e4c8f945bd965526302fb5db254", "9a5473662819063cb60c1b29e6544b9314b9b29f", "fcceea054cb59f1409dda181198ed4070ed762c9", "0db41739f514c4c911c54a4c90ab5f07db3862dc", "b5db3a52a8d22d3a6cc3ea66f7babef7b8503f53", "54d78ad2ed30557474fabd1d3a9e5db1c76fbeaa", "a91c74e3bdbc560653e25fdb02d337a8d20186f4", "e7c2670a0785d0fa1fd768f5c119f0e43a376b97", "344f647463ef160956143ebc8ce370cca144961a", "7730fd15ff14dd84d71f965bfeab8e4d790d91d8", "70b23c6d8c713f5663603343b19ea5873b6696cc", "dc771cd7780538953811a5b6ae0e901ca68cce3d", "ce12bbb8ce974df4b64f18e478d7fa99b722de03", "2e713b922c760b7cc8d3e7d12088e9806f2e9a8d", "f9296decd223b13fca96836caf42aa037cd5055e", "413f4e770cd05fb382d8ceb4f33cd2726f34b9b3", "92980965514210b4f6dd074d122078d54684f724", "b498990a90a2b60802f9532fdf88f23caed7e864", "5e56c8776b5aa6edce068255134ea31670755b0c", "343192938edf3e650169f1650349206f2b02e00c", "1ed39db202e606f25aff93f3e4fe135283a50cc2", "59778de271938df6de938deac17fd614f4640ac5", "0aaf9868487c1d2ca4c73aaa5f3530df6ee431d3", "8e74244e220a1c9e89417caa1ad22f649884d311", "5f2a3fe58974a4e60dde5cbee46a64718ac5c179", "df01f6be9573a7864e86d960db7cf3cef3a8199d", "8e679b8f165b46521c76a766ddfb538fa90d3f94", "22d5719a95be420e0d1817799498e987eaf6676b", "7da261ca2cff495bd7744504797c4c3d5b8b9b4c", "b13254c2c9ca90f57e385d34abc7fe78d74e5222", "557890cef6e9285909904fa141ccddddc0da90dd", "5d15b9a200f1eea522846cfeacbec77a7b59a8b7", "a6e7513371a49cd7b8b30bb444e8fc448c5326cb", "d01e65591745fc46a3f69a6c9387be17caf55c16", "55e28e4c174bb7ad2fd80be3c13a033bbd91ac7a", "1d4f56a9bb093c52569917537a93c7671db28e6f", "18babfe4c7230522527a068654eeea10b1a827fd", "21a82c06f71a99ce40f5ffb72fe2908abac8433c", "a9721f9680bb21a0849a912ed24eec9ba50def9e", "159afb7fb0740f0b48b812ed5183c2229089044d", "2112edee4a60602e9e5dc5e4f9e352f983f0c8c1", "8877c5afa16b025452e444e0798292fe7ee4dca6", "53f981cb6f1cf19b08255c571d62cc1073fd792b", "bd8ff0bf1c6ae27789cefebe2f55dac25ce6f1c8", "46f698dacdb5f76d6b4dae67cb1ae4da2b789398", "c2f2c89d7615df07b540748d6c53485c4cbfa9c0", "4a64b020c72db15a729939a2c041ef4f5830f0f7", "5e8677eb21c3a5d24c52bcb93404416f7eeebc31", "7e90f9bf61451307eedf50d35774b8398ee5e15c", "9f182f2f1ed71e57cf56ef161e05ca3e8816fecc", "3e7b5b07da3465103929b4347852d456c0f0ed58", "f668ada16a00836498445bd29037e4af21f75d7a", "b5c945e04cdf204358e7964290867b38435ef458", "f7dbb15ed72d1282445178dab3368d7676763aa6", "5df0fed3b37ffac6d0ae7c0a3ccce41c7044e8e8", "61ed130bd12c5ff8170a9bcbb4e108657ec5942a", "aa5ed6ee0b2fd53df5cab952aa368f8c4908ffeb", "f3b56b873c48929361c1cada7b18177e3f4d2727", "b6cff4ba590eab964bc49e079ed1a72a49d9e3e3", "391ad25a3c5c7c504d46bbd21cb13464dfe052e6", "9cabbb686883635d8755706ee4f1349d812d7ccb", "35e730f7967155b9394f9e5d3cadf2b955ce9a7b", "14f0283c703e450e5f17cbe94878896de865ce30", "998b6a419e5ad9ed5b9e83ed43ee9688878505f1", "7e02b0bce72a88f2f70b199c5dc87a01fe217832", "8d7451f5c5849cb0397c3ec8cd7e2081b5d9421d", "9fd7d9e982fc2c0710a25f4df568d35262deda8c", "1a20c1d04b93d91cf2fd0b4e3c7bf1153a93942c", "d04d53038d4267cf25badc5d6acccd2fc910a8a7", "8172af5b5ebc0da8c0b1b0e7fa967f9a1bebe646", "9bf0db2da5fcf245d353350f69c8eed8a63eaaeb", "a8123a4e68642b602b5094f2f670ed7aefdd2f58", "500eeaffef9b28d9253d5ac3fd0bb3a9317e482a", "797230c284a24ae7b3c9781f8188a59345b90838", "22ee43dbd2bdefbc8945d453c6cd453f49ab5eb7", "517cc1084952133b6d2ecd0a535cdc3ddf8955d7", "f496c321dc171ddd1c0f280436fe40722e1b03ce", "9eb891b89443bdfc8434e4c9e08dda0253fd242c", "b35464a8609a257b0b43b1a0faee0402a054f1be", "2f04c7aaac3a884088be550d1be51b4a0b585a2e", "817502a2b3d7db07f89158ce2996a9e4bc433a8b", "5b74a57508069c719cff3c5410984be76f6b7785", "d5c14e768f91e97c16d6aeac8b108458d3eb8004", "075d9baf2ac827327a5fe63bb1f873c4f54f95df", "ec0585295e05bc8d4eb57c597eff7b274d274704", "db76e57cf0b58416a85a2169b19ab441ed82bd28", "016860404c0926dda53b9bf4745f3eb9708fa1d2", "4c39000bbd6761dd9e5609fe310af51facb835a9", "71599901b889e24372012ac405d5cfaef79f94b8", "0c8769bf0501fdd7fbc94ca81601de4a40679295", "e3255ab54d4ae6cfbdb7bf103108663bf4c6e0fc", "6475c1e95da0a3bd36786a32d00a893d85460e9e", "65bccb76384bc95c8fe53f2d2a8e3f048fd880bf", "39b741be40e093f92519cd15cd2deb6e114d6200", "0e56ba97ea6eac96c57ad4773af88a22d44f41b6", "42cc8637a5e7b8203722ba0dca995814f6dfd525", "ba1cf2d0493f25da61bd816f92712291999c0ef6", "dacf0682d0a58af9b5dcfe15243f30aaa2d94a34", "2dd3dff173686d66af70e7180fabd8755dd1307d", "c610db0ee2d111452f70ce4854e48ab9d5c2b1ab", "20f9a09defe5b02b98c464ca6df36b3b6358f60b", "92a93693f43a49a7b320d5771c6afaff98b27864", "5cdb9ef5ad646a6bdcc7c1fc33eab24e0a4a6bc1", "65237b5e96c7492a0e5d01ddea5b1d381da408cd", "c3717efdfd3d19018153d52c21b8b582c9629fbf", "220f6ef6f4bf4729871822e08080142359012e10", "a67a84115304d3e55e00607d0dfbd3b20c8b8672", "5ca2e14f91dffb4784c443fe5cfe7838c3f3713c", "a9e0e667537c9059b3050a64d22b8fe86787d913", "2c5bfdf6b4f9c06a42280e99d101e628a1dc597f", "c8633d0eff70feb0a378337237d4a03fec582290", "82319857563e7b578bcb66ec4df1c85decd6a624", "4fec93d62355224981ff6323cca962e6989c0b1e", "1a660d8576ed749610e0e040076d27973aee44ee", "8c8a61fc2c0e426aa64e50756b777475f3beb49b", "f9f34a0558a58720e1aa55421ff7583f049208a4", "2524d9d45471fa8908b17f04060b68e7cfeff841", "fd9b86fb36a0b48e4eca0d73f8c3c683fdaf6231", "2731da4a48291640b2698cd5909c283484db10c3", "f2b627f11a36c359de9aab6c44796e1d30d30b64", "0f7fdd7f98ee5fbfa7d293e0f1fa399b7a4ec13a", "14e9eaa6ac23996e9a62060c8da90bdb7116ee37", "28b59fcd3d642f8d92a7c868c0076b00bd7f55cf", "bbdc66937d1ebd5cbde36f3fe8051e168bd69603", "5c24758a6deec8fd726b25aaf1df69fdbc47072d", "fd68662e0fe3412d47ca07d33246212d4ecfe590", "b15e703ce4f01f4f0d52e835e2c907d5e8361bba", "58c17688fc08eb69ff3b20803a227693289d63c5", "60978f66eac568ae65d3acdc6559273fc30bc8c4", "0fcda01765c5a0b4cff99b5ed5139a6e1eddb689", "8db43d306a70e23e2a0e6eb2fda60f14b73f65d0", "b40856b30023d469810aacfb48e1c502804e8b7b", "166e7fd811d104254155c90506f2f7e77947534c", "baf3b3abb3b449a8c1c7850c3ba0ae813aa6d2ad", "6008ac8efbea0f9cfddfabbfa02f62f742685028", "e5a67c425ef3947f2294964692c9c3efc8d0ccab", "9d6c04aa6eb0c3d7dce7e5717a16b56f86ffea48", "67c25e73b89166563cf5b70ffc043bbff23a321c", "7c9f884137a22c3bb5cefcd7dfc55e3a83979771", "27b630b8d189cc9d6315cdb031573184e7c875b3", "812e06a4cee26629e198a0a6d991616933ab14d8", "e48dea5a5c4d74ec12b6817997bbef7975bde526", "d5bbfef5511d215bd3d3cfee3926d313d48c3001", "f3a57b32a53db39b188879c4ce2c22d6929f43e0", "056d5d942084428e97c374bb188efc386791e36d", "98a8ce914de8846105ee1ee56c0b201d0b0357ac", "072457191c91a50bd16df6d211380c9fcc21a057", "b88b57d52f6f16986381fa0ba9d5a1387886c157", "d8fcfb25195b71b85cec72edf160f00f160cbe70", "6c10226a1f68997d85aa0970521a9ff9507e0caf", "2a8812f286669c4bc0eca462c6cbd90a4a9fb5a8", "c8e32484bbbc63908080284790edafc4b66008d2", "d5f1e21f71874874fd4122899a83aeae58367c5e", "6b55153f8d87bfd0dfb2f24eb2aa61d40e314cae", "b7b5fd3e2cfc39967e389b974c1cb418b2bf1b8f", "6dfd05ae49dd3b7ed372b2a02937670cb69dbc5d", "272b364f0ed647dbdbc4ae80f10ddaf8ada3a07d", "4b7302e805eb079ccf2b9b6bf99c09479d4190e2", "2bfd2add86071064bcf3795dbb57bdb1643ce2c4", "2701bd6850dc1b811ef7697cc1cd19405b99f990", "42af47b99e972f2f5f1897f00082f37496aeb7e8", "3481a544e28cfc14108b0785eef7c12747e622ee", "2b8c1daa238f042612260f0c3f8ff36e27c434db", "e87b1ae1d9af4182e7c5b62c9bd01b15979dd59d", "e37c8e2823cc3429caca4420f19adf329c62d313", "fba5b5a8675d5f9e57243fd7a1320193db321150", "d84f9b4dd297075aecec1135a280f92c8ce7f1af", "aaaefba1bd0a9a9ec6c66a822d11fb907a05625c", "eb8ee9e0672241ab3acf7ded2d22d97861da65fe", "9ec2fc7df2db247a55a1af4fcb85710b25402ec1", "0f29710e54f714eeea5233628afc68c680d881bb", "4767a0c9f7261a4265db650d3908c6dd1d10a076", "4a0cec395dfe1ab634f53cf295409281c40ed5c0", "0b8c9acb478c856eb157c648b25b3d60117392fd", "f9c580a24bd3f73190ccdd255208478e999d632d", "c887a86430f0444f71ea4ce6da807455d0435dc7", "940002939d519227427619ec437e04676235d558", "e604e8a2a639df10704e3f97fc93332589b8c160", "4a0707131dab1c64c03bfa0809b050d34fafeeb5", "5e024e0fd57ab03e4fc767ffa5fa4c71328d5ad7", "2fa16dc0ee50550c1bf58c410912d48cddbc3554", "baa6be848d52268b71c2486e43940dc64412543c", "dc23beb1e5c7402b1a9d5a7c854e62a253d0815e", "b3182d09761014b4efa2787b6a66700b52c979a6", "751923405938a9f6b9dff74cf34dd34c7427b30f", "159b87e6e68b18f4daa3505bfc415be9b21a7db6", "8628cb21f0ba3c270d731646b350a059204d26a4", "5fb635801e65a12b5c52085da78152d2df6c2ff2", "7e6f7ce8ec6f62c4bf68f84207973914fc8e79b9", "0c25a4636ebde18e229f7e459f1adaab1e9a2db9", "0393a25756c6e5d298ae880f6b7b5c9285092def", "7372c1e9cb87dad88bc160536263e461bb7ab04c", "9b477dde9aabadba1ad2f2f180e755f31af904aa", "6388c3f3559b61632942856bbede67b724542c9e", "d8671247f6188620c6e382ffcd15d3e909647c63", "529b933b0dc9c657cf829fd9bb7ff7c47d5e6d19", "3227b933d917c73902746ac2c36a72927064a3c0", "d120b3c3ea620cbc6400f0e31d584fd2f2e8c8b7", "82ff25b6e7749e0210b2f8d5a0666f3499745154", "6eeb16c858a53d48b7d669d1feada739c80c8563", "0afc4ce1ec16ef0a9ebb255004348720e0fe8c64", "2c39e50b2000fd73f648b25d70321815e410162d", "e3cc912b9ca074e3d419c1dd289fd1b067fb61d5", "d7c094f5be41a13a579d8922ec4d50c70be1c276", "13b2e01030ae41983003e3ae53b5bb3ed3e764f0", "280eea0fa86c811b5422012788853514876854aa", "1e82a8965f08e8d38b16f39412e6e3c456f6f22e", "9edd7c738171b0f36b65ae771711c38ed1dc38ad", "00b543d51bf6d16b4027ded325387518cb7fcfe1", "efe208a03e2f75ddcebf8bb0f10b1c0bea4824be", "bab47c7bf80c9310f947cbdaf71b3c983c497b68", "507bc8ab4a598fb2f4108713e8c1e55c180c007e", "4774b9853968b12156287bd42bb425d79f99e313", "3c7d2ac29c867600d3bda5b356db76e3a5014a71", "71d3ed17c0642234a921bb45fcadd86520794941", "25a5f7179b794ab2bb7283c8337480fccee51944", "349f305fff405c0f38b9df2e1648450eb841fcea", "e3cc5d86b2032d01c1b40de0da3b7f4458c9c0ee", "1bea531e8271202462c7907f60a8458fa5aec00d", "e95cdc2997760c90aaaac6561c84e68d5f94aa36", "1ea2a53a6cb9c08312276a2f0646935d5fab5ed3", "c2d35b387518496d8100f70e82597b002eba600e", "fcf0963095ff472132fe46d0cc543aff2735a0bb", "4b2bafe419e5b40f667601f112c6f6cf8a7c86e3", "3a92179c7d9f938dd313ea2620f5b3329f167d56", "ad8e7c9bf20a0507acb90b17574da631b3d8b7cd", "0f07dcf92588945eb0d70893cdf0fe4a48552763", "081f0ff96de80b4a29d9f52e6e058bb9482efc2a", "574476d73737fc8cdf2fde04a9750d1c3d83934c", "361164861d8e4676079219f6d099358a31fc4025", "e10a257f1daf279e55f17f273a1b557141953ce2", "cc7e66f2ba9ac0c639c80c65534ce6031997acd7", "f08b25b904aac24b84e35ee7acbe208a7775f27b", "0fd2956ef990443f584112fa093f85a90a43c4af", "e9336cd4c63139c481c2ed9624b04f5b9a477d80", "5646fb8250f8c900e5f281665089363abcfbac8a", "6fbf3df212eddaca8d3600065b341ad5174ae5c1", "819cc41c4b5370ef9ad6eaced1fe4005aa5fd2e6", "becb704450c6b2f7f57f03955036a5b66380b816", "a8b63bede77e752ead39453838a8ab66aed7b970", "3d21b7b4f48e614bc2f2b87eb110aa329b7d66d8", "1419956b08f9ab398cd2100ddec74271ef5fa72c", "78a77643f8040b3b6630985fe4236a302d73e562", "1922082de84afb69c966a7628e86012342b475fd", "21262e01039e5994114b4c102fc80e9afa3f1bde", "eebe513912302604fe0601b7e4e85203f69f3b1c", "afa24eb881c7011e851f8a12dea32a7293cffefa", "321d1f8d13075275b207dd048e9b655aa8846d57", "7046611b1b7a8054801e513cdd89926b63f1027f", "4938651efabea4c55acb9485bdb0858a82e9013f", "bfe69413c439327680cba369c155f1d0fffb401d", "77a53d4141a8081657ce08b13dc3328ac4a4e689", "572791e2f290dc0ecb05e56bfa714c4b7af79b08", "d75a9e646500d543094f7c0ab80c9f5c30808304", "c79eb3b4d8324eb824493e53bfcb4d3980398523", "b8b202fa955801da840afc9f523d439d14d87cc1", "da523ee3b7e8077713ebb7d903c3dc3bcb78921a", "ed0b765d4a477240a5c221c8b05dcc7e3abefd4f", "9829848932525928e5b06a8505dba2edda22c569", "5d9822e581ea793bc70998ee8b40c6ae37e5a994", "d80f1af929b507593c06442c6ad93e381b40283d", "2efc4eee3953f6b52e23989bbcc2598a91e18ba0", "3d2758e39308a64d2163cb638e684f58f868dbbc", "5fe89653d22d35cf98a6fe6e6793da82a55f5c9f", "7af8fa8897c6f1ec1e7f9eadb01f74b48c185588", "38079a7f127ee9279ac99c4e18f87e80777cc895", "e9c000b765ba5519050a61726e007c430cd5bfcb", "6e17c16fda71e8cd7abcf5b52b7f6438a69d9958", "4443ee5eaa56e41acddb62cacbc2f6d8c84ccd59", "5ce40105e002f9cb428a029e8dec6efe8fad380e", "b7e4f5531485b21c9ff6b0a3f93c15fb7a25ae81", "1be32039596ff52fa09772f4606b65845d1c5853", "291be6e3027575287c24f4363e4bf7a8b415d4c1", "c223b2b7d38dc4e0ad418c404b2d3c43c62213bc", "9dc263210770e7e836040c8e9d0edff40814254b", "813dc79f5f439050d68c923a410940d0d139ceb2", "4adb3fb481d5ec33d6161d780162b616ab6c44e2", "8ec7194952ee9e7cf383b1a1b0aeccaed5b7daaa", "cdf246d2eb993e80786443a984729b874f958657", "35035f79256a3f19a111fff34df6d14876d83fab", "ac0d88ca5f75a4a80da90365c28fa26f1a26d4c4", "458e44d20f7a85a0ce378b48a41febb16383c075", "444c99362e81925c1e710eb901e8f3b4659e6a42", "1e335a6d3cdfe8f53540766b1495c45f72d8fb2f", "89f680a78d118f14753056496dd26059ca08a6c7", "8fe0a35dc47698b45f3812bb502b0921b349ae56", "0816cbac9ea8f4425d9b57fd46174cb35cd5d7cc", "328ffd128c86fe0b2d78fc59f3b83cf9c267ade9", "e38c0c03f272270aed6a39fbc6275b132dfb498d", "2a547bf34d185f80a0d476148721b6f05c276256", "21ad377aeb6ff099c5a59fa82cec95b0bba177bb", "8e0cc47c194ef7daf15aaef14d61e493879ae137", "db640eddc51258cf6b11e442745d9a4bd5d6995b", "5be6226f1fa0c3a6f5be829b00f484582d531b6f", "7d6a8c2a857a4eb3d9f6c3c470906f2182c9473a", "4dbdf48318339a7ce06317137e7aa664ead783ab", "739e67fe178d1f96419846b34d6b2a90e6f7d3c1", "afeac9270149b927b592e2299d11095fbdf8d308", "8432cae0d463b46980dc3ba585affaaca90d1072", "4c6ffe1cc479d86adf5126174900e2f23afb22a9", "cef092bf9beed65e379ab48ef2b43498d4aaea92", "46bdfee362a4de978d24d53fd704d64d82273718", "d04631e40b237ae29cb8d2bd187b04033580e63b", "7e703d57e9eea387f4d01e47f4990fbe403e4a8e", "1da09ba7340c77b3f943c15f80ff40f6f9d14eeb", "cf6b966a2d45fd44ef8f86cf0ba805cdc4d4ad57", "e281a0f632f5dee3697578d8ddf69da92c9410ab", "c5ba4e0a8abadb68b3de135e3da522059a99b2cd", "0b79f0999eab1e2ac586a97dfc9a71809e7ab262", "b98aec5bbe7116fa3ae5f9b4d77cb1f1141eaabd", "bd4f2e7a196c0d6033a49390ee8836f4f551b7c8", "1454b646866052ef61a838737bd74ae191210cf0", "cbae3eaf926aede9bec7ce2e28c35c1c50b1b43f", "4820e34baf57e7b3d8d70df915c1710e6e93d631", "b8a4e7c21c3163b7595dac0cb00cf518e2dd82b5", "79e39f3d0577b9c5a47b93eb6d75bec04d14c07a", "b12c9773c6d948d36698666ec351a78c449c0d06", "27a2fad58dd8727e280f97036e0d2bc55ef5424c", "896e2776174dcb86d311789ab83a266151d0595b", "d914c53cdf26acc64259d381fbd45c4e150633ee", "b3e6e4bed1b5f73aa114d19dcab214661a1d0cd6", "300b819bbbe857f5fe89d0895f907073fc288719", "54c5fad54492650f6eccb90bafcab8c2b779ee2f", "6b78f2ece211c2d1eb6699e1e057b7beb3e0b4a7", "10b36c003542545f1e2d73e8897e022c0c260c32", "25dbf9b12475454585d5050c5cf446e1c4f6dd27", "b3b97439c33063c9c45c305b67dd8f6066052096", "6f79c4b82f9ccdee918659a8f7091b8ab99fe889", "ac7f898ff5789914d423526c392ee61b979fdd8e", "d0fbebf278b7e4f10a25e583743080a57b2b3f3a", "86374bb8d309ad4dbde65c21c6fda6586ae4147a", "01018a509f32601e1bbf7f0159aad1a513e23f92", "9db841848aa96f60e765299de4cce7abe5ccb47d", "98fe0a775064202fc07676e099b6d853f2826894", "8566231abd7e5bc71ee0bc0da84b8d76ce07a501", "4d5c179ffdf09255edadb1bb7233e0c23e55b8b4", "ae872749c88331a93f8078aebf3a8d7f6d9c48fa", "4f7b5130d0356ada11f34bf469b2a9c6a00a5e6b", "b64c2baf82c51a7538136c32f5193bdfef946297", "ad88fcfd12b62d607259db8d98e2a1a0a9642ca0", "c9df1be91e16b3fb566e54fb0bc4ae58cce9935e", "de92ac27693598254554531d8cadfd4728c423a1", "c81430d4a9171d52920e36cc85bbfdea8457487b", "1bf01e83fba634bab085ec5f0ab86a1a67da8577", "11a6af9b32a93c4053dc12f70afac64a4138b2d1", "29875d51da4063707a5e16a90493c69be6fcc006", "88d84fb1aab19f756c9d839e2a473834ae4b68b5", "c9baea734a14e4302829769ac39fe8c48fbae5a1", "546cef6f86fb5a9fd59d40d9df63301c8a9d7d15", "8dcc1e0f0215dd5fcb6d698c35180d40dadc8dac", "63dd47f7ccf4bb22c1e308b263ec65ae382c86ce", "b5968e7bb23f5f03213178c22fd2e47af3afa04c", "4801256b4ee39e71d5a9a1046c57e3ad4af6735a", "8dcc95debd07ebab1721c53fa50d846fef265022", "b50edfea790f86373407a964b4255bf8e436d377", "8d6d0fdf4811bc9572326d12a7edbbba59d2a4cc", "232f21aa133b75156665175158c65b89bded4032", "22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b", "74fcbd059e6749ee5073b7323d121132799f97a1", "9d1e32f6af50354b64ca8f004746073473559056", "c0387e788a52f10bf35d4d50659cfa515d89fbec", "f622efd8f7ecdad1b5352ce6d8677f44638425da", "6c690af9701f35cd3c2f6c8d160b8891ad85822a", "75a92d92ee59555c847973a7422d7356514cde2d", "5bb24d1250df62a56cab1445f1d8c5c61269b785", "451d777ee33833a3b5eb6ba5292fae162c6d265f", "a53ffc7a9ca049cf265dc37c818b6283f56b6dbc", "97527b3c58591926800e3586e13d99a8cf5b17ff", "6f0417ed89e3750a3b39d0bd7de2b4d12879c2a4", "2e43e602dd3800e08d7660e20a128fea49393d52", "6a90a2bf7f275b81596e0dacc78ad2991eac0311", "d348197e47a8e081bd3f12a22bc52b055ecd8302", "cd7c007f5831b294160eaf1cc6270af4a0ca9bf4", "e7397f7f0e83494825d63b75bdd40c3879f369cd", "a6f79e6b63ff8f6a2e472625d9311e8c60ac36f2", "0063b44da282eec78045ab59d2debbf61959a4a4", "ddf25fe84789821d204fd09026bb02d891d50399", "7c349932a3d083466da58ab1674129600b12b81c", "d1dd0c714950cbd89f76ec6b039201eadf74cade", "fd1003eeca71ea2e92747767bfc6c862c6036f37", "dbb7b563e84903dad4953a8e9f23e3c54c6d7e78", "101c5b39f4fc4dda1f39bf0c00e196f0a4720af2", "1ceb1c0fc4a9673da6c394ef729e02c9fb96a83a", "dbd566003baf517c2682be50bf50cc2b046ab511", "824364e018b4445bcc61ae86e112e68a4a067108", "a94b832facb57ea37b18927b13d2dd4c5fa3a9ea", "2f2aa67c5d6dbfaf218c104184a8c807e8b29286", "2f529605ed776d4fbeac2d73054247b495504ac7", "af4745a3c3c7b51dab0fd90d68b53e60225aa4a9", "5f5164cf998a10d2bef37741adb562ab07fac413", "dbec415ba09ab66ea5855aaa1267796b75ef7e7b", "2efc6f98720b804345c030e22aef6c9f4a53023e", "610c341985633b2d31368f8642519953c39ff7e8", "6cf9322009fb8f36c01fc54d213e9cd745e62468", "6345eef2ffe46da6d77d07446c1329da7ea00f45", "afc560bde09cb47eb2811423b14d14bb6a5d35f6", "2f43233c1c165f225bb002874dac967736525d85", "0deca8c53adcc13d8da72050d9a4b638da52264b", "360a590703542f2ba345b432416398b6dad9e3fb", "900175d24928921600d09985211b6b9bfea44ce0", "77f98d1dc114e4a7a99bbdb2363d74f549dfa417", "4a9afcc6ba45c0ff05ea93d306ff73ede32f7ed4", "fae76d0e710545972807f18e45936ec5c6f1fe5d", "cecd1bae0495610fc6f1ee05f6bd65d701b75259", "9e154dbb24f5701921870f5dbd5609182e2e9bc2", "221c18238b829c12b911706947ab38fd017acef7", "a6582fda1ddd10c210e367119e01dfbec4a65b16", "e5a1864f6073f35920a8f7a0a368ff66b9dc6284", "7bfc5bbad852f9e6bea3b86c25179d81e2e7fff6", "4e12080616da4b540c8f79db2dd1b654cd8345ce", "fc11de4aa7a2cbc3440732bd66ca6a246cde79f4", "96ad3c4455a9b05fb6db749495b4ae26a6fb2fab", "78569509e61269f5d2276b80f4fd41c22617ccc4", "bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb", "98c5b88db35d7ab2d3cc0a63c7ff1414160d2aa6", "185aaed9d48f42463791726f1ddf4e1be64a47d9", "2312bc2d48a0f68bd5ab1b024d5726786455da3a", "443acd268126c777bc7194e185bec0984c3d1ae7", "7323b594d3a8508f809e276aa2d224c4e7ec5a80", "56d3df5ce2ffb695728c091252087979be31f0c7", "3597ca03bded3717f5c88273e4b7dbf24545ff83", "a9426cb98c8aedf79ea19839643a7cf1e435aeaa", "925b634cc26a8e74c2ee8889472a77e7af37874d", "21bc819e9d6c5d5c545b50addaab6f54f719a05d", "60593c176ba39e8cb63ba6a7bf936553984bb67c", "80c8f118f37f990905205eee4f3b3811e0488bf9", "b4ee6b62f6a89feede06da5fb7e5ad6ec0265175", "02ddda27cef81c363ffffc4edfdfbd1dea14149e", "e5ab1ab5cae0bf5103b6a1bbb2e8f078d0b01df3", "306ae56a4fc8f090e58a237749950e1607382ed7", "0ba544ff0d837ba5279b03eb91246d00f2c78817", "3ebbacf0bfe95781e70ee37085bb2addf30a40a7", "57fe081950f21ca03b5b375ae3e84b399c015861", "aa261599d70a9e649501cae5cf46fbc56229fad8", "1dc35905a1deff8bc74688f2d7e2f48fd2273275", "b36e945243d56fe25ba83821cb56aabb6bf92979", "4b970ce5297312ce8869745d3aabdd3001dc47ef", "7673d5fa77770629d040fae54c214c60ba69574c", "2e469c6e86d8f9827d8e665db3b343fefb0451d5", "17e0d8f9cc53f2c711d73eb7af33f59302a1ce85", "5bc5cfc2622f6b0a0003d7b115726d075205a2cc", "1d351a4a61d55c982061195f46bc93fbc390fdc4", "a0a41d30cd7eed54c714d9088319c86220e0f30a", "7c61692a8cd885a36e8c93114a81417182e0a111", "efc7a620e21abbc882d5a26f0e7a78ae6960be20", "1f913bcfcdce870bbaa9ffedf179478d1fd44522", "3274a13562029f36e2f0fad3270e3ecb9ca013bd", "0ffee18b495830d373dbc65f67a452d94938900b", "910da5e0afef96c8acca3c6a4314a9ab5121b1e4", "8a323bfe03d72f288b0fea8ebc419b4214792058", "4d3335465f818e065ffc70db58db43d07b9d265c", "2a12c72b0328a23b0d7ea63db1f93abf3054beec", "79c7a3fd16c575f68de835efdc5e25e9fcc7d438", "d8d1fb804d1f4760393c6fd70c9072fa1b39f02c", "fe7849b48c0b70118030ba0da9e39b1b190f9077", "4814c849a1c441d05f33c7089364086de88d3f52", "d9dafc343727db2b6060c868d748f97eff6bac7d", "d3e9c5a63215a9c46bc61ec04df5285ac355e42c", "012e281061126caf2e2c94ca6ba0116c8a8930fb", "1f4fff64adef5ec6ae21e8647d5a042bf71d64d9", "b1bd2e80fac0b3629f46a3e7fd403e82688fc733", "62d1f420eb14c11a6df613994b3cf15f9db05ca0", "776b77306bdb852c89a22ba142fb57c8e8bb7bb5", "16d38a5af0f60603c5edbdc32885146f6df5e0ba", "a317cd9435900b9d47217f2896d55fc2c49c8b6b", "922e0a51a3b8c67c4c6ac09a577ff674cbd28b34", "4ba1cf65eb86aba729192d2f0fe2cd064ac346cf", "e028bef776114add5e323a6353c57c8c5a8fdfad", "e1924a925cfcc3db29f8006b8eed83aad9721a24", "d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae", "98fad0d5c3ebdab54732b49cad3d8b3a3a55d9e6", "9b6d61491120bdd579f53e8c5f7cbe1e05cbc91e", "b91f54e1581fbbf60392364323d00a0cd43e493c", "3f008ef45af7edd6473d164e3da2a8baa6f37ff0", "1d6faea849d6d05bb74f84622302976a2305a17f", "982db27f0a092d5c8db88e959a77fae5b4f9cdf6", "09749e7b0ae6bd9ab37671fcc4f0e7a7bcf9ff2e", "26f5b8a79fac681ffb132c4863c51a55bc2b20e2", "6097c33a382c62a44379926ee96b23b51dba49c4", "58ecbe5e7d10b4176ceaecc36ae05e15908289c2", "c8f216dbd43dda14783677f44bb336c92211cd46", "8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09", "5a6b2f3a542322be153fc9104f3064f2a1bc76eb", "cefd107b19201cd9f403e2f9332c690e81f770b5", "d82681348489f4f04690e65b9ffe21b68c89b5ff", "37acfccfa44747f956e4c0c56aa7ed5cd5ecdb85", "0629bc2b12245195af989e21573369329b7ef2b7", "016194dbcd538ab5a129ef1bcff3c6e073db63f9", "35d7b5738350a1bbfd8d7a591433d1664f909009", "147951fa2e0df54c5ddda0ff82dec057dcc61f66", "432d5eca44ff558642491f3bb7f44f500993fd38", "c32fb755856c21a238857b77d7548f18e05f482d", "4aa27c1f8118dbb39809a0f79a28c0cbc3ede276", "220b66f1d7d33c3d6439133c211810d7cd642354", "f7c9bafc66dc8d8002cbb2ea926378bce2b3b251", "a3add3268c26876eb76decdf5d7dd78a0d5cf304", "261a7be6c650de797c7490aeeefba98662acaa20", "b36a80d15c3e48870ea6118b855055cc34307658", "24078db5422dfddf14b00fa79c38efa553845a10", "7ab41d2fb37079d20db5e25fd6e71755673f82f0", "abe9f3b91fd26fa1b50cd685c0d20debfb372f73", "9aa8daa04347f904e277b633dd70feae12cd7c42", "5ef98034e3b662c6bc0dc8f4e863b0ff72b6e9d4", "ed2f711cf9bcd9d7ab039d746af109ed9573421a", "a6ac6463b5c89ac9eb013c978f213b309cc6a5c7", "06444375d9a0c44c9eb2d697c6b8a7d2a8aef430", "174b6d661b96840e27cd9435c2dbb8e538b2c8a6", "91f67f69597a52b905c748a15db427c61f352073", "5ec89f73a8d1e817ebf654f91318d28c9cfebead", "74dbcc09a3456ddacf5cece640b84045ebdf6be1", "e84e49c9530897fad7927a06ac4a48ddaf0adf0f", "dedc7b080b8e13d72f8dc33e248e7637d191fdbf", "1f3370e2e6381408efe11e69ab12586bd6f74dc8", "18c57ddc9c0164ee792661f43a5578f7a00d0330", "ae9ab89c51d264fb7b6b57d37399a7c629836e35", "05b435174d24b14b17df4ce5af79dc6086a2b16f", "a825680aeb853fc34c65b5844c4c4391148f18c3", "3907d83f14ba9e2b8a93c3f02b04ca0b81901c4b", "8f712a92c63856d700a1573d8b911d8cec700451", "f249c266321d661ae398c26ddb8c7409f6455ba1", "73f56d17e91e54c9e4bc86477d01399f07792f79", "f53ac4a7f9dc5b641fed6a48187e12fc9cac7a87", "7fa5ede4a34dbe604ce317d529eed78db6642bc0", "7e6aace92f8caaee4fbce488fc35f03baf30ff1b", "64e471bc1bf422bd760a39203647dd6b3e83c1da", "05fdd29536d55fe3ad00689b6f60ada8bc761e91", "13fad0ab765c8f92c2c7ff05eecbb164a3e0e56d", "394bf41cd8578ec10cd34452c688c3e3de1c16a7", "1fd8476926a2b583657661e3a9deeb93033c2c85", "a20ba0057067a3a4cf662c0c5e1119a173d090ae", "600ef88c6cf44b05e3812d468e34f51cc62529d3", "ea9cecb5b619cfa4afef6c70e193c2303696a4f9", "b7c4b22d44be82b2e1074c5c40b76461db4b0292", "8d74fbd46f9d5d615e350c4593cbc5b5ca63fa8b", "b4b1b39f8902208bbd37febfb68e08809098036d", "e3498ea2ece32753fe714bcd5186d457a6bbd1f9", "2453dd38cde21f3248b55d281405f11d58168fa9", "ccb9ffa26b28dffc4f7d613821d1a9f0d60ea3f4", "462e4d0b35bf571bfc35dcd8e9bd589dca07a464", "3df5e17e87144b1e84b5ab9467bc2c2f233b66c7", "d38af10096aa90dfccd7e4cec9757900bf6958bd", "e22979cdf147a63be74f3816ef59ef11f3508919", "5a14209a5241877f92743d04282598f41fd3e50f", "93e3aaa3bf274cb6769002fda32f61acc46f3e8b", "c5318c79bc1b880e8356211b837b684f1ee6e5c4", "8c1e828a4826a1fb3eb47ee432f5333b974fa141", "a8eeace37181dd87d5125c213add6e15fdd9d9f7", "36b2aa7248152fdad7bc7f670d0b577c9728d466", "bf4ec5068e6ff0b008a09f0c94bfaac290ae7d3b", "0790c400bfe6fbefe88ef7791476e1abf1952089", "442cf9b24661c9ea5c2a1dcabd4a5b8af1cd89da", "56d9e2856e4edac2677672053c4ab767a2265465", "4bfce93a0d431e9f9c908bdf467f8b58fbb719f1", "04eda7eee3e0282de50e54554f50870dd17defa1", "622078fc9a773f37e61c8e5523b99ead27bb46d0", "00319cd17cebae5e1095a248260bd7be15781362", "622949b1aacd316c60a7034c44121c698a3fb6a4", "90a4125974564a5ab6c2ce2ff685fc36e9cf0680", "c3dd6c1ddbb9cfcc1bed6383ffaa0b1ce4d13625", "b786a16ca5d84257bb98024751429c9f42005e62", "f0a0f341fa1f91ee58a5020297bea02f8863cb26", "30aac3becead355545b5ab7f0c3158040360021e", "6fbe1d6815d7ac427f9ca528960f07bc8bf34a37", "f1a05136c8b8f9334a4b3d9de2a4b192d2c762c2", "81d0815a09b6c73ddb68c96efbdaf3f92968eb1f", "a80fdcb8c837fe7a516de7397373f4c4d6de2884", "25e9a2ec45c34d4610359196dc505a72c3833336", "935e639bebf905af2e35e8b1e7aa0538d7122185", "7f26671b68b5491e553ab191e85abc1bef43e90b", "211435a4e14d00f4aaed191acfb548185ee800b9", "087a6baef9d81178f2f8f001aa9be4c0d4ed7733", "4c477ba5513ec9c629ca3442c1fee15612259905", "229e105fd4d34815e476702dd5ca4362943c475d", "330dc3d87dc80dfc9fd5292cbe3c644d24c000fe", "9fae24003bbedecdb617f9779215d79d06b90dd8", "dbf6a25d10a312b38b862fd88f6cec9ed71400dc", "7820021d8a3eace969cea5f4865c5aeef4673596", "0c1d746b8e0218feb8ecac1b0b4bde16c67dffaa", "c45681fa9d9c36a6a196017ef283ac38904f91bb", "45f858f9e8d7713f60f52618e54089ba68dfcd6d", "52a86eccad4ae8b15b4585d1691845bf2b8707d0", "157ee7498320119f6f5da2d9c592448986edea7e", "15e024d8f5625ec03c8ac592fbc093687cfb5f02", "56662bb8a29e7d0064a35fb38cbabaef4578f3e0", "57bd01c042a5f64659b3a9f91c048b8594f762f6", "5cff58d081a4732b11e6da498196ed6fbb54d15b", "9716416a15e79a36e3481bcdad79cdc905603e6d", "97265d64859e06900c11ae5bb5f03f3bd265f858", "264a344e4d65cbc14d52f0c875f4534217c37bdd", "a19904e76b5ded44e6aeb9af85997d160de6bb22", "473f05ce910e7f20266880eb580d3a3aa877a98e", "0969aa7d4557699b7460e4159658828efafed8bd", "300ce2d1ff744fea95fda05d2f3d48766c283042", "baf2607f15aab164289e1d4b4097813a65cd118e", "d0e20aa3d61b77d17f005a1d24d7cf47600836ef", "f0dd265dfbe9ffe86ca56ba053335626720059a3", "513b8dc73a9fbc467e1ac130fe8c842b5839ca51", "7882c67f555b761e10ecc70216db25382890d9d7", "0ee3aa2a78f9680bb65a823bd9195c879572ec1c", "cac3bf3ceba79e6a6c8e51eb44c6862b81661f85", "a776acc53591c3eb0b53501d9758d984e2e52a97", "10773e5c1bc8a9a901a8baf4d0b891397975ea9d", "47488b5e84c60a32f59a253750d06bcb8f6f7f63", "a6a42e06c9501052ffbaa1fda4ad5552da6c76b6", "bf7cdcda49853bca3d91255de1a82a1fb066ac12", "5b94093939ac42aba54ab41eb1725aeba1bd5c34", "f0b77702c8f2249ee1f48e51ff9b86faffe177c9", "c0a0adb7f02d5509969e6107c914f7cc6e9ec881", "00f8387fdeb6f009ffdc9bef6a94b37294d77633", "41a5e043d499967f405e823b959e2ac4fdf9ff71", "423b941641728a21e37f41359a691815cdd84ceb", "833bdee366f1e6250dea59bdebdcad271c7cfddd", "9cf69de9e06e39f7f7ce643b3327bf69be8b9678", "b300c37a6132446590cd88ce4dc24e47945e9a23", "1fbb66a9407470e1da332c4ef69cdc34e169a3d7", "666939690c564641b864eed0d60a410b31e49f80", "54fbdd6f2f916db9a518f1a2e0b5d235931f47a8", "96ece788e0db984a27a11a9a10b6430f6f7299c5", "0306a275e80d11d65c4261b8f3d45317a49c1bf7", "2bd94a9140be6fe58e5431b03a45502b31fe6cda", "0089a590154694e0de340f357a022f6a38d60946", "51e8e8c4cac8260ef21c25f9f2a0a68aedbc6d58", "3b01a839d174dad6f2635cff7ebe7e1aaad701a4", "cc246025ec8e1d32ecfbeefaba0727fdf73cd9cb", "d467035d83fb4e86c4a47b2ca87894388deb8c44", "264a2b946fae4af23c646cc08fc56947b5be82cf", "04bd29ec1ae0b64367ec37ddde51a0d8f8b7f670", "480888bad59b314236f2d947ebf308ae146c98e4", "802f6a704b93291aa049e80c16af7d2bce7dd92c", "a1ee55d529e04a80f4eae3b30d0961a985a64fa4", "3e9219fdcbc17772041456cf8dbfd361a82cbdd4", "867f1a262a704ef4cabe84899310370182dd598f", "0cd736baf31dceea1cc39ac72e00b65587f5fb9e", "f727837e03a039d9bcec6d02cd87256f5a5854a4", "df90c50792971b4debfe8d1db5604337bf9181e3", "4fb569af589d89f11d84d4b828459231345cc301", "9fd864442f18a8b739efa0534998b2afdd6f08e8", "b8452417df1047ebace6fb1368b4b91be79b8cc6", "a2db611b6179f3bc4cfe0e891df7b9d4ab58d642", "a8c6954ae821f846cedc50a86e2095a20ef454c8", "8e0091f7360b7c1cf07dbd88ca13bc83a5b6a6d7", "6634318fa67bb8866d2579760c7b5763d0ee46a3", "0d5c01814f64f3be401efa4f6495d2677c16a8d8", "87ab2e74e2ab93de0316f09d76e7573052628989", "f852d7ff0b1ade73fdb2dc43578cf414a6c57cab", "6424574cb92b316928c37232869bfadcb5b4c20f", "51eba481dac6b229a7490f650dff7b17ce05df73", "0c05f60998628884a9ac60116453f1a91bcd9dda", "d12c343e60f9cc1a0c6c94c138f38e6bffe22001", "c3d60c8b1dff411982ccd8875496f1e74d2cefc4", "f825f784733c764128ddd1c620b0b0884941e2b6", "efa2aacb0fbee857015fad1dba72767f56be6f39", "fdc39dbf35b1cac8d99434140b8e5341d3f1e38b", "f1dd778af01de993ba47858cdb6305c64f91288a", "17113b0f647ce05b2e50d1d40c856370f94da7de", "8031b81338c05d5fe4e2e5f8820d185b32734fb6", "e2cfd82831bde290f6016a3e9a576e8a79a2cac5", "1193317829bfcc9b9dffa5ae85a2e2114254b37e", "c1e714a9ec329629798a88ebff8657c349fec739", "62d1b32d67e4a4b58a66cba91629aae5f7968962", "549d55a06c5402696e063ce36b411f341a64f8a9", "5663d02a09dc2f53b826849ac96f235c82586af2", "558719ec858120908ef40b27a5d32904a68f6dd9", "44bfa5311f0921664e9036f63cadd71049a35f35", "133f1f2679892d408420d8092283539010723359", "98d04187f091f402a90a6a9a2108393ca5f91563", "d03ae9148a2573bcf959c7fa343cef0be416cb55", "b0d52bb1c9cff9416fe766e9cba94ceeab12d51f", "0fbf59328d32e1a9950dfa08c3ec87eb94398651", "af740db182b541eef80bb0a2dfebd1f07bb0e316", "7128f1239cbd1007ef19d8fd8cdab083d33a6984", "9b4e90866c1f096a57383fb7320ac9d516a2f88d", "a3bb0ec5c4380141828b9619ad92c6ecd42a09be", "6feb0d42232c31eecee5d90290287afe803e88a5", "ea638559b6dd6b5520f9abe2674b92c07873a157", "e92855e7f939c0b43bb4e14e932b6ed4e9312259", "300b8caf79783a7eba5608b5819b6fed14273d2d", "f8015e31d1421f6aee5e17fc3907070b8e0a5e59", "ab26528c5c414156c7668592cbb04268d2aefc52", "6b9e8acef979c13fa9ecc8fe9b635b312fedbcbe", "27d9d09126c1f2138f6aa719c4937da0bf8a8b87", "747b15ecd9a9e28bbd733527c59e5dd0aa5de7a1", "6e4a1850bcb2b76d8e08ecf1d80d15722aaf6fe8", "f7091323c825553e1c5bae6969516fd3bf9c0617", "9abab00de61dd722b3ad1b8fa9bffd0001763f8b", "fef5ae23381369852a59578afa5d80414de68b83", "72fd97d21d6465d4bb407b6f8f3accd4419a2fb4", "899ff288be86ac85133c73cdca3ea20639934e6d", "62b83bf64f200ebb9fa16dfb7108b85e390b2207", "384b21d51dd466517f5ced3a59b997ddea10aa42", "d4700db5a8290ad8e501c2d6d93432dcb833092e", "2577211aeaaa1f2245ddc379564813bee3d46c06", "d805574910996579177625fb1dacacab17f70cd7", "15ea40bb1025212c231fd12a6d7edebb260b805f", "d73221adda13a99e8dd8dab101abcfeae6b7b706", "035c606bc6a05e2018e57859737877043673b7b9", "3900fb44902396f94fb070be41199b4beecc9081", "06a23ffbd9752ce204197df59812b2ebd1a097ff", "e7b5c98310a04923c2c75b38d0b6ab97d2a15178", "56e65fae871aca5f7977ed0c33f153efe67707d8", "a62d352db6efa30b66bff378b0c27792ed37d8fa", "1abf6491d1b0f6e8af137869a01843931996a562", "2bcd59835528c583bb5b310522a5ba6e99c58b15", "be313072e9706df300d86bfac54079acfb9c1ef0", "3920a205990abc7883c70cc96a0410a2d056c2a8", "2725a68be6bc677bd435c19664569ecd45c52d7a", "b6810adcfd507b2e019ebc8afe4f44f953faf946", "1f98daf89f9a3dba655f0a4eb4164118ea6226ef", "683874b070da69ce358ed5dd673ebe3e42fc2137", "0e08cf0b19f0600dadce0f6694420d643ea9828b", "81bf7a4b8b3c21d42cb82f946f762c94031e11b8", "8096d92af2b3fdf6c43769d5aca9082a7958a07d", "2da694d7f494265a8193f17dfc492c577ad4db1e", "f18c34458460b9b62b51213b9165b37c057c5837", "991673d4f9dd08893723549ff3ea866b2dc18047", "30d8fbb9345cdf1096635af7d39a9b04af9b72f9", "71d8fae870ea78a89e231247afb3259267e09799", "8a83c15c7e4bdbb7924ac521d7f260d24ded442a", "a6574d111bfb12d6a9988bdbbf24639d3c4534ec", "ffc8f9fe66a14aa0657e59e219364b5a852ecb8f", "f56edb6f2bf4f5bc9d54284289212b8d4a437c1b", "87204e4e1a96b8f59cb91828199dacd192292231", "07e8420c5528e8d0152424a124ddb92380e32329", "e903fc4e9636d5e5635b6970b2520b920e919a68", "30a4637cbc461838c151073b265fb08e00492ff4", "606cfdcc43203351dbb944a3bb3719695e557e37", "0b1c1c32b7ab78106590bf76fd4261aa6ce25599", "996dc200dd686c39e235aec78f7c6acd903a970f", "3de3c479164312ab3a1795ee84f20c16632c04c4", "b14b672e09b5b2d984295dfafb05604492bfaec5", "30ccfd2b4b6d5b30581356ccefcf96fd77c1766a", "e108d0bfe686438e4c5ae4cb9b8ddc476deba9e3", "47b6cd69c0746688f6e17b37d73fa12422826dbc", "d6c936f270cffd8d63b57cd7c6c31b0d9c6e32d3", "bd36544bfecd5b9ea58d0eab186968b3c9d181aa", "67490b6f34c827f107b046adeef0f5476132d4f8", "be668bb10ab47b7b09590ad4c310e13ad70e84f9", "319955a10dcddd6e97d160132141feee54de358b", "14421119527aa5882e1552a651fbd2d73bc94637", "3410a1489d04ec6fcfbb3d76d39055117931ccf0", "69b647afe6526256a93033eac14ce470204e7bae", "81825711c2aaa1b9d3ead1a300e71c4353a41382", "2788dd3b087351ceb891f7497d64b438e8e5f808", "2ce073da76e6ed87eda2da08da0e00f4f060f1a6", "e286f4fb60fd819dd36db44d0f56dc76932aaee4", "a0107238471816ac3b8fec32bb8499c082338593", "2313c827d3cb9a291b6a00d015c29580862bbdcc", "839a2155995acc0a053a326e283be12068b35cb8", "f10727c4827118a147f1301d2fd5e206aedfeae7", "a65c76169bdb8479353806556f61bf94fdec7e10", "4599b9d9a379385a3d31681696d2523beeb0e9c1", "106732a010b1baf13c61d0994552aee8336f8c85", "5056186a5001921d0a24587e26167a7ee9d88cf9", "634e02d6107529d672cbbdf5b97990966e289829", "28e9ae07540e3709e7a3a6242f636f893ba557e6", "2084902f20c3c3dd58ca063a2ec0d63e715e660c", "96e5e6ebebc0afab75983d444402893d1a467a16", "d0137881f6c791997337b9cc7f1efbd61977270d", "bb8218a0fa421320421549ec13969834636e4d47", "ed173a39f4cd980eef319116b6ba39cec1b37c42", "84cf838be40e2ab05732fbefbb93ccb2afb0cb48", "b082f440ee91e2751701401919584203b37e1e1a", "801593e5de666e4d93d0a40e8b563907191c7ff8", "6008213e4270e88cb414459de759c961469b92dd", "0fd7e70003c366cb93be06b5a3f250f798b939f3", "f2b95f135b95c3df4f6ebe6015098a2e1667711d", "250c0c37947d6dc32e1923c8df99bf64dc5d7e10", "28f9cf85ebbff86207e1f6067880bb23daff0878", "7f15f56d7c0a17d9c81ca21029e7fd133b2b9347", "0c17d8370ad804635591a258e6ea1ae802fb5902", "90b4470032f2796a347a0080bcd833c2db0e8bf0", "a6d2e304b2270b679855b87cd467ad572c9d2974", "0adaaeaa04e8d1901c886bced6ee4a0fbbf9fb4c", "beecaf2d6e9d102b6b2459ea38e15179a4b55ffd", "7cda4fc187f446a52cc5c9ac0e6a0752c1f0d5e9", "0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277", "75777920d6b05794639c0029052f20f932aec9f3", "623092852405f452994b7067b963cd918b909566", "17d22b7289d1bd22f6ccbe33ff50ed1ca270a690", "896de2bc83ef4a2ffee582a91898ea29617222a5", "3de71ddc07619c0dd6bbaa3f7b412a9262a0e761", "d4901683e2c2552fc2d62d4eb3b1f5d5fa60a5ff", "943c372336ced4b28e15e02fe8db1f4b23bf6835", "0e0179eb4b43016691f0f1473a08089dda21f8f0", "135c957f6a80f250507c7707479e584c288f430f", "d4ca67160781e5c74b0385c3d45f35dcc0f79b8a", "a7e8ce268c16ea8c10e4c5ccd8d6e53702423faa", "c588c89a72f89eed29d42f34bfa5d4cffa530732", "c72b063e23b8b45b57a42ebc2f9714297c539a6f", "21ff1d20dd7b3e6b1ea02036c0176d200ec5626d", "339b6e6b358b40db5807ae9701556fed9b7961c4", "3c68763caa67dee55bca76f0f71dd4530f3fd57c", "78a144d5dce1a61c92420e77c11116f541a7617f", "20c59a55795eaa4f2629cc83fb556dc8c5bcfc1f", "774f67303ea4a3a94874f08cf9a9dacc69b40782", "061ffd3967540424ac4e4066f4a605d8318bab90", "a49b309ba14ad00a3bcb7b99a45d5bcf9bbc0ab1", "86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6", "43b7bdfcbf061a51a5c4cf55172ffd1191ea9665", "93c14fa9778fc98fe1e078131179d3402c7d1d0c", "1a2e9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e", "8fd1910454feb9a28741992b87a271381edd1af8", "c6f58adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae", "016473c5b809ff55304a2923c36eaf58f02f02e4", "fff12919cf912347776b70aa76af7635280dc401", "3c8db2ca155ce4e15ec8a2c4c4b979de654fb296", "8ccd6aaf1ee4b66c13fffbf560e3920f9bdf5f10", "559b95b135a842f8946f62067de7e0c523870d2a", "802ecaabffbece0dc2c31d44b693967c683fc5ff", "b5c94166b3ceb7166e89abe1e951556690b87310", "3f06d445371c252d5a6ba977181987094148d6de", "b4f5cf797a1c857f32e5740d53d9990bc925af2b", "159b52158512481df7684c341401efbdbc5d8f02", "333aa36e80f1a7fa29cf069d81d4d2e12679bc67", "3bad18554678ab46bbbf9de41d36423bc8083c83", "db58b4a053c2bb1f7681fd479e8dcdbff42cc6cd", "07191c2047b5b643dd72a0583c1d537ba59f977a", "ae6e8851dfd9c97e37e1cbd61b21cc54d5e2b9c7", "9d2ad0b408bddc9c5a713e250b52aa48f1786a46", "df33c32673a050ad9fd84d55efb48214e1a9358e", "5375a3344017d9502ebb4170325435de3da1fa16", "769f0c9975236e46151ffec1724c23508c38f6c3", "6d973fb5f682c491be94aa40a184a1707a8dc24a", "cd3b9597c7f182e31b9943bc67a1a918ff0012aa", "ea5a020e64107c75f2f723967af5ad3b97fb2ec3", "63b89e654124eb2b8edeeb82c6373bdcf228744e", "79828e6e9f137a583082b8b5a9dfce0c301989b8", "41308edf82ae645923efea2d6979d076b975ee25", "2771e262e54948ad2c35a80caabc7af181521d39", "4f7e4b1b74955b54c434bdf76c47fb1e96db74e0", "ec83c63e28ae2a658bc76a6750e078c3a54b9760", "b1177aad0db8bd6b605ffe0d68addaf97b1f9a6b", "5ac18d505ed6d10e8692cbb7d33f6852e6782692", "668d2afb63f27feba389ab9d16e682ea13934337", "0b92f5734ffd7d9ff1dd0f3639e92ed18fda7824", "676600ed722d4739d669715c16a1ed2fc117b3d4", "a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000", "5990c2e78394388e8a81a4b52baf35c13b22d2c9", "b66a93884f80a243f50da97e33211693a317dc45", "9528e2e8c20517ab916f803c0371abb4f0ed488b", "0a59337568cbf74e7371fb543f7ca34bbc2153ac", "06774cc8b0ab364866beaf3efda1b2d012a7bcf9", "e2272f50ffa33b8e41509e4b795ad5a4eb27bb46", "e5604c3f61eb7e8b80bf423f7828d8c1fa0f1d32", "b8d61dc56a4112e0317c6a7323417ee649476148", "006a9f68bcf6edca62d8750af55168971cf0890c", "5740a5f9cbfe790afc0ba9a425cfb71197927470", "28af8e1a3cb3a158f8a642c8493fcfb207743d0a", "795aa8064b34c4bf4acdd8be3f1e5d06da5a7756", "6c803cd67f2ceeb11882b322abd386ac8a61ce25", "f1f3ecbcd46fb5ac2731ca118b61a4e3c9a429ea", "cf2a313b039b8adfee2a14ca5e81f2f5da52b0f2", "efe88ca3b72085ed90f8ebcd6c30f9397316ead6", "e4ada3966980ac76b0173ff9a0c407759647f2fa", "b47204251cdf97e1e78d0de99732e3d78288ea2d", "db0a4af734dab1854c2e8dfe499fe0e353226e45", "003afe78ec7989371f648fd8957a6ce79083cf11", "ffe0f43206169deef3a2bf64cec90fe35bb1a8e5", "8f9fa03690428cde478f1a27d4773f78d857b88f", "93ed1c9274906f1916d58cd618a9a82858448a3f", "05a22ebec697cfa5e8e2883d68e6f4762bbdebd7", "66660f5e8b2a4a695abe0f9e1df32d230126f773", "ba7892fc2f7bb5851cff243257da0da0f2d5bc05", "479eb6579194d4d944671dfe5e90b122ca4b58fd", "2a86bcdfb1d817ddb76ba202319f8267a36c0f62", "1ef46f7bb7463ead4369a796435106da63578733", "0bc82ec532228427a497ac47391d524e3b4537ae", "cb45511e4089fb8aff17b0adb6a3f386046a0a6e", "d289ce63055c10937e5715e940a4bb9d0af7a8c5", "fce5bc434a909d6d480fda7cb04cac3d278f8eeb", "cba3fda21e073df8e97920ebefa63712b9796c89", "b4f517f2f229a5f9d8b64bf7572c515c6ed414bd", "2892ca4979521e2d2aab24e33ac571e16ff030c4", "2b4b0795358d0264f846e8b3c19ec3180da301cc", "65edab091e437d3b9d093dcb8be7c5dc4ce0fe0f", "eb71c9267cbf93823b06b379f0041fd5d63703fe", "21a1654b856cf0c64e60e58258669b374cb05539", "276d35fef150f61adf53270eb6e50625022d4e7f", "16d658ed2ff10ff4c729479962c5989116adece1", "061bba574c7c2ef0ba9de91afc4fcab70feddd4f", "8262facae51097b3004f492d6de456286a2a58a2", "c4d3033356066ef8133f03f4060bb8cad842918f", "c798f01248311387efb1d9dc67c737d61950145b", "227a312324edd41892eb2c1dbc4bf8d94984a326", "aeee98c90799cd44dde4046754cff27c8ed28d44", "53f0d493c64c908c479f28b4b0cc38aa2124697d", "ee2217f9d22d6a18aaf97f05768035c38305d1fa", "ff3354d042fc3f9f7cfec5e6bc0dc91a8b55818f", "dae70eb5e393c0f05fa7b75d5ad48070743f622f", "da44881db32c132eb9cdef524618e3c8ed340b47", "76808ece9a138a8df2a28962bf1563208dbde6a3", "050a149051a5d268fcc5539e8b654c2240070c82", "09222c50d8ffcc74bbb7462400bd021772850bba", "add020816e4eea7ec547d0c3233b15abf3413fd2", "d6d6edce271935feec96484d0e1f16dcc24973fd", "cc94b423c298003f0f164e63e63177d443291a77", "035ef7b25991b0f7ea841a2270ed053198aab09e", "be213d0e9e7c41c0783f3efa2fddb07d0efebcb4", "83a811fd947415df2413d15386dbc558f07595cb", "675b2caee111cb6aa7404b4d6aa371314bf0e647", "b336f946d34cb427452517f503ada4bbe0181d3c", "765b2cb322646c52e20417c3b44b81f89860ff71", "a51d5c2f8db48a42446cc4f1718c75ac9303cb7a", "3a5f5aca6138abcf22ede1af5572e01eb0f761d1", "ce300b006f42c1b64ca0e53d1cf28d11a98ece8f", "065b4890957866a831ccf35694056dcec6f48acc", "2e32b8a7f86a0e9b3d185411c166cfe0c067a0eb", "71b038958df0b7855fc7b8b8e7dcde8537a7c1ad", "3aec4bf3cee81e9574879bb04e5da7f8b310671b", "062d173e11c19595909f598ccda04998b2bcfa27", "eb716dd3dbd0f04e6d89f1703b9975cad62ffb09", "469e0e79c936130b3727d598fac46913c75489f6", "c5f1a1e7e9d7dc2f795e86fd4fb780da09ff0e45", "460845e06ca99f292fa2265beb4e535d20ba16f8", "84c08d24012f5af14589c371d34b146202850c96", "af7cab9b4a2a2a565a3efe0a226c517f47289077", "a89ad89748ed86ae916f0b0143ab61f01e637205", "3a6ebdfb6375093885e846153a48139ef1ecfae6", "a7e9d230bc44dfbe56757f3025d5b4caa49032f3", "ca548fe8e5db29b876115d31544b68add98ae6a8", "787303db8e707feee2fa2b93dfc46e3d3cc244cd", "b13ca5fdd9de920915d602047305c1ccc6cab4cb", "06de3eab314437cc3ed08c3db5171a79c1f684c6", "7d3dd33950f4a1be56eb88c0791263b3e3a6deee", "fbf20dc3367864462d7630aad81c436e50d1cd60", "07de8371ad4901356145722aa29abaeafd0986b9", "cd61c4253eafb8ae5d6131f4ab55bdcbf1586e40", "cbdaf66888797ccbbbae7e47b1880246a643ec70", "af386bb1b5e8c9f65b3ae836198a93aa860d6331", "d6b1b0e60e1764982ef95d4ade8fcaa10bfb156a", "37b3637dab65b91a5c91bb6a583e69c448823cc1", "83d16fb8f53156c9e2b28d75abb6532af515440f", "25d7067e0120c6eb863f03a420e644ebbcf2ceed", "05e45f61dc7577c50114a382abc6e952ae24cdac", "6845cc4e9ceb13e8204cfe8ec818c91b0cc21f52", "192235f5a9e4c9d6a28ec0d333e36f294b32f764", "95029b1041a169e5b4e1ad79f60bfedb7a6844d0", "aa23d33983b1abd2d8a677040eb875e93c478a7f", "0728f788107122d76dfafa4fb0c45c20dcf523ca", "f809f9e5a03817d238718723a7b4ac04abcd3f12", "e443cb55dcc54de848e9f0c11a6194568a875011", "6775c818b26263c885b0ce85c224dfd942c9652e", "7b1be02cbbef951875813ad55d3016ec2aee17f6", "7335de73c5607015779085dd990022efd67faf2c", "42f512d36722b09d1c83d328051badd374769fed", "3cdb1364c3e66443e1c2182474d44b2fb01cd584", "6900bb437679dd0b0c5cea0acdaa9429d0127d38", "be48780eb72d9624a16dd211d6309227c79efd43", "3d5575e9ba02128d94c20330f4525fc816411ec2", "78f62042bfb3bb49ba10e142d118a9bb058b2a19", "0690ba31424310a90028533218d0afd25a829c8d", "0c7aac75ccd17d696cff2e1ce95db0493f5c18a2", "6c78add400f749c897dc3eb93996eda1c796e91c", "b61c0b11b1c25958d202b4f7ca772e1d95ee1037", "970e571305ed9dde9308e559694044e204d6e2ad", "79894ddf290d3c7a768d634eceb7888564b5cf19", "0deca454f2bc1be9421653f4f3f6ed9e38846628", "fec2a5a06a3aab5efe923a78d208ec747d5e4894", "155ce5d596c7b525110ca24db11e47d521b487ce", "5ac63895a7d3371a739d066bb1631fc178d8276a", "4e559f23bcf502c752f2938ad7f0182047b8d1e4", "7536b6a9f3cb4ae810e2ef6d0219134b4e546dd0", "11b89011298e193d9e6a1d99302221c1d8645bda", "de3245c795bc50ebdb5d929c8da664341238264a", "cc2eaa182f33defbb33d69e9547630aab7ed9c9c", "78598c69201cccfc060d47fc0415f2f9365035fc", "9c71e6f4e27b3a6f0f872ec683b0f6dfe0966c05", "14725e03c93088c071f51c68137b5b8fcfe2129e", "656a5d4d84c450792402b3c69eecbdbca4cad4cb", "aaa82dfc7942ae16c1d7155a109582505ccee4ec", "b1abe5b8eb9a153ab1013243e2180777ec2fdb56", "1e4474f86bbf4bc78336c2f0040f6cf995d07edd", "0cfcc1cd8bae5f5899cef0995debd7b38c46e817", "b88b83d2ffd30bf3bc3be3fb7492fd88f633b2fe", "189b73b11634e4ce5268006ba74646cb0b4fa754", "c636cd6eba286357fe807c0ca4b02c3b9b7b5619", "b6a3802075d460093977f8566c451f950edf7a47", "2e7874ec37df91db1934d61d9e1181de5e4efb36", "122f8f315feaf90ae78c4061846a42f0ac514c1c", "ca1b0b87d1a21332bb8e9458e5acca4e596f7c83", "618c13f1e13cc5346ed5c069a77acaa720b6a1a8", "4d507e9e9e2f8834cf56eb675f5c5e5477cd15b2", "cd6cab9357f333ad9966abc76f830c190a1b7911", "29b1a44d1e1ffa05c2bf7f4be931c5045f427718", "0fe8b5503681128da84a8454a4cc94470adc09ea", "8d01119f663d667edb8cbc360340a4ee1a20043b", "3c6b46b7867a387ef46cfa7eeb3f0cfda47af2d8", "9bbc952adb3e3c6091d45d800e806d3373a52bac", "6e209d7d33c0be8afae863f4e4e9c3e86826711f", "bba22e04fbe124bf58330e5d911d873a80afa0eb", "912f6a6ac8703e095d21e2049da4871cc6d4d23b", "70671018d4597b6d2d0c99b38b1f1a3f1271eaec", "0079d56c8e183ef36f876b84327b97ee9454825b", "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "46d85e1dc7057bef62647bd9241601e9896a1b02", "f840e727e55e9ab1d4f069ecc38da78cd2ebd23b", "67e3fac91c699c085d47774990572d8ccdc36f15", "3d21d0923d9f7dc3836f2790dd8e1db98e0c2013", "5710ddc7254500901111394e968cfc41b4fadbe1", "5ad99fe589826adf9f2a95c5b3fb6e86f3d276fc", "a4f29217d2120ed1490aea7e1c5b78c3b76e972f", "c85aa12331bdeaba06d4c3e44b969e6060c3310c", "f2d07a77711a8d74bbfa48a0436dae18a698b05a", "d76f68c2d0a45ab224065d57836bf3da360c82f2", "09fbcd901db726caec1f3bcbda5266ca72c7deb6", "56e95fa26fb417776824e5adf6d6d511e5b30110", "5b705232a9ab0a91030abf0aef2464a74c9eeb0f", "a6e695ddd07aad719001c0fc1129328452385949", "ff11cb09e409996020a2dc3a8afc3b535e6b2482", "7b8bc2ff998c8ad5fd0464da8da3b42c32d51750", "770e5117d02ece2ae62d734089fc6147f9a90be8", "7c98c27f4be40a7675ba9c85179ce72d12593a7a", "fb1732a1476798c42a0123aaf127036bf8daef09", "e103fa24d7fa297cd206b22b3bf670bfda6c65c4", "9a781a01b5a9c210dd2d27db8b73b7d62bc64837", "b68875e99baa9118a6df314e947d6fbfa78e29d6", "ac559888f996923c06b1cf90db6b57b12e582289", "c05733052172753c8736e07fa8004dbaacfb623d", "2a4fc35acaf09517e9c63821cadd428a84832416", "f60437dc3d8687930d82988713fe16184117ef27", "cd4850de71e4e858be5f5e6ef7f48d5bf7decea6", "2ff945f495ac137b4e0cc7e9e1d516c555607822", "c5decf0a3906c85b6540e96c9c7003957c6d395b", "1c0e8c3fb143eb5eb5af3026eae7257255fcf814", "790bce6cbe30ef9bc4431c988d0d747da1c6bb1d", "32cde90437ab5a70cf003ea36f66f2de0e24b3ab", "1a11c2e4e74c0876c6259b8148575a0665e1a7cc", "34b925a111ba29f73f5c0d1b363f357958d563c1", "c76b611a986a2e09df22603d93b2d9125aaff369", "4b7ce0fc27cd84c2b44f16c16b1c5c6b612c6881", "9d0ac8e084fd6ac32528aa480f71fab4b4f5d3a1", "d7f3836f2d28adf15fc809bd4f90afb1f61ba8e0", "70b0538af40672e3be4b72f97cec486693d5204f", "7cb4ab1bfff61bf0d1ebec6c4402b7e45e62c609", "738947f9b0bc50d1b7d5cbe84856bc18fcaee386", "0f366de3ea595932dad06389f6e61fe0dd8cbe74", "1927d01b6b9acf865401b544e25b62a7ddbac5fa", "4cc98d819de01db8e0eedfac1022e6bd0b1bed03", "1ecd20f7fc34344e396825d27bc5a9871ab0d0c2", "6a60c3994e4701b2d6ddd40609f6f0bb03fc9605", "038b5a38510f5b2064a0b24c94e59ee2b7c49815", "5ef49174ca2b54c1bb54df828acc52075cf1634b", "26aa0aff1ea1baf848a521363cc455044690e090", "260d8f563e581e4151501375fbf58425f401b1a1", "3548cb9ee54bd4c8b3421f1edd393da9038da293", "25ee08db14dca641d085584909b551042618b8bf", "d0f81c31e11af1783644704321903a3d2bd83fd6", "88fb26bfcd7a52b3f6a5e9e6cf0ea04628d1049a", "155219f3e6dcf5d5f44815a7493b6b7cc8e02263", "a996f22a2d0c685f7e4972df9f45e99efc3cbb76", "4da5f0c1d07725a06c6b4a2646e31ea3a5f14435", "40dd2b9aace337467c6e1e269d0cb813442313d7", "0f1cd6eda6c6d09af59f0590a711f299b39fe948", "26c58e24687ccbe9737e41837aab74e4a499d259", "9a7784eea6bfa62bf2834ee0b87a3cdda46006f2", "299b65d5d3914dad9aae2f936165dcebcf78db88", "2a218c17944d72bfdc7f078f0337cab67536e501", "9c49e4ba8ad0ba4634fe9306fb612695ed2b8cae", "cb5dcd048b0eaa78a887a014be26a8a7b1325d36", "17444ea2d3399870939fb9afea78157d5135ec4a", "c5f02a3aadcaaf0dfb11788cdfba037fbcab5929", "63660c50e2669a5115c2379e622549d8ed79be00", "5b10fa6b4c0921af7b36a58f4fd2d8fca6e3c9b1", "f6e58f54b82e19ccf7c397cf8200ebf5160eb73c", "3b6bbb187c09f6e64fd5bf2d528ca8bd3363d115", "3f70938c1ea282a2f2efd4609223d04a8f1e1288", "46994b489f7c673d031f6ef644e84ebe5d843d93", "472541ccd941b9b4c52e1f088cc1152de9b3430f", "31b2b1dedaa171bae9a10dbf291e445f1c3e38c7", "049ad2deb4ce7d1f98057694406879816c4ac049", "383d64b27fb3cdf2beff43f3beb8caac8c21a886", "9184b0c04013bfdfd82f4f271b5f017396c2f085", "1a5151b4205ab27b1c76f98964debbfc11b124d5", "8d07e8bb63d489e6b60b187b679670cef24f9bee", "a05dff6fa9b4829306b1cd311163e587c8cb1ca2", "fdb6599cb273a88ff38a987b19ec2ec4b7efb2ff", "766a3939b9c1f785cb2c1dc60719c9290e606042", "b9b7b37d7edf4482a6f440e282c3418ab1913afa", "57488aa24092fa7118aa5374c90b282a32473cf9", "1cb68fa98a0d9871a394cd0035488df167b9c2cf", "7771807cd05f78a4591f2d0b094ddd3e0bd5339a", "8a9a3cfe8dff0d0792db3173a4aa0e354a17cddb", "7f4292745f5169655f831ffa017aab9acd7a994c", "4558338873556d01fd290de6ddc55721c633a1ad", "8339ac6062a7a17c604b1517c16d18e2cd365132", "85957b49896246bb416c0a182e52b355a8fa40b4", "a5c63f38e2e6ca7fff48fc5cd1dbdb8f6362c99f", "7fb8d9c36c23f274f2dd84945dd32ec2cc143de1", "67c30688bd46d305c610a83a0b28e86e10ef5cc4", "3eff18934f5870b27f80c8b1d7104967460e3035", "9547a02bdfe44c39ab3537142cba44fc52b8e834", "de10f93b0a3656822aa7c0b5d62074ff5eac60b2", "136954b17d0931cc5aa93aa80ed91b5e7b402fcb", "5d425633681755fb9235023aefc78e712fb7efc8", "b5e3beb791cc17cdaf131d5cca6ceb796226d832", "2a40917ef436000b22bc7c6f35400440ef673d36", "532c089b43983935e1001c5e35aa35440263beaf", "35fc0b28d0d674b28dd625d170bc641a36b17318", "be9dde86ebd10ecb05808e034e3cadd210fe0bfb", "e4cb27d2a3e1153cb517d97d61de48ff0483c988", "8d203fba8a43e58703618cac3b5147c92f5919d7", "132fb6cfb40a82001bef31b030d0e740ebb69c26", "a67da2dd79c01e8cc4029ecc5a05b97967403862", "4ab69672e1116427d685bf7c1edb5b1fd0573b5e", "989c7cdafa9b90ab2ea0a9d8fa60634cc698f174", "85af6c005df806b57b306a732dcb98e096d15bfb", "58a6eb3584b2f5df2f25d39a218904d510cae516", "cdb293381ff396d6e9c0f5e9578d411e759347fd", "7b73276eca7d2413a219961685f6fe6870c42ff8", "6fe149e588a5bf15bf89edfedb1a29cc31384ddc", "0e67717484684d90ae9d4e1bb9cdceb74b194910", "5b4b84ce3518c8a14f57f5f95a1d07fb60e58223", "07c80339af2dc54c94c03c01db71a3d7d2bb9ea8", "a2bfab80a4b48717aa647cb38069632c5962c6a6", "e7dd1ad267f5b9483400bf03e101de7f9cca1e39", "4dc6659b5022ecc2c4e1459e9dff16ddece4147e", "7ee53d931668fbed1021839db4210a06e4f33190", "51424fddff93b26381e5bf63b3003d610283955f", "47203943c86e4d9355ffd99cd3d75f37211fd805", "45ff38add61df32a027048624f58952a67a7c5f5", "9ee4d3c173c41ffb6f5aa3c40951aefe3da11d5b", "0a789733ccb300d0dd9df6174faaa7e8c64e0409", "ad6313f284b081076c006d301124d12ba24272a3", "9595a267de2b0ecf7e4e2962a606c8854551e203", "9d3a6e459e0cecda20a8afd69d182877ff0224cf", "f5083b4e28e42a2da7bafd2a742ab8e21c12559f", "42e0d7fe2039b075ac2372d883fa994eb0a68b48", "5df11c59e3b47189486445f5833675bf08359bfe", "e8d57223a3b88c58131a6642579e66f4739edfb7", "c908bd540a02af5e1cf2d556ed2a24fa7bdf2a67", "943a1e218b917172199e524944006aa349f58968", "72b6e589217e54879982e1f559197a9408e03b34", "35c3dead77e705132762006e588984ef36ee3604", "2f886f948724595e1940aab6b9869b6144d42982", "725bc4fd72070b58fb7e6819a1087e6cce16550c", "5f68e2131d9275d56092e9fca05bcfc65abea0d8", "f989a20fbcc2d576c0c4514a0e5085c741580778", "cf94200a476dc15d6da95db809349db4cfd8e92c", "3e0415f0e8c36f20042d6a1f8b7c216fb5543c3a", "9a0331bac634f67c2a993c36da95481fe53709bf", "fdee0cf79e9a2695857afeee6526352918c9f315", "0b70facac4d10c7c73e7fdf3a85848ce429d98ab", "53197621c4f41ab4e35f3bb27224c6fde0938a60", "25dba68e4db0ce361032126b91f734f9252cae7c", "883767948f535ea2bf8a0c03047ca9064e1b078f", "229bce6384ae16a388881e766bfa5a672b61dc9b", "568727a76dc1242e3d48392f9c19678a27c63482", "6768b558cc58e113096540c123ef3b2c2d2469a1", "357df3ee0f0c30d5c8abc5a1bdf70122322d6fbd", "559295770dc2e2e3a1348df31ac5c3f3e66f1764", "18095a530b532a70f3b615fef2f59e6fdacb2d84", "adfaf01773c8af859faa5a9f40fb3aa9770a8aa7", "9397e7acd062245d37350f5c05faf56e9cfae0d6", "03a24d15533dae78de78fd9d5f6c9050fb97f186", "17d4fd92352baf6f0039ec64d43ca572c8252384", "86d827a84e9f9bbd27e87c5b2199c79e34427369", "30a29f6c407749e97bc7c2db5674a62773af9d27", "280d632ef3234c5ab06018c6eaccead75bc173b3", "cf98c333c8d7d5870c1ce5538bb0c3de3de16657", "3eaa860f2735fce8b839237397455c13dfad1ed1", "0f945f796a9343b51a3dc69941c0fa1a98c0f448", "0db6a58927a671c01089c53248b0e1c36bdc3231", "63111778d25b1105fec5e09bedf9122eafe34fd1", "5979acf247d8af73b41da0eab6d8a7eda1036c9a", "08bdb84d5c66265b3b6d33e8f95c4cc27caf33ad", "14d0afea52c4e9b7a488f6398e4a92bd4f4b93c7", "16b5890dd6ec399a235d4ba13484fbdd4e4f5cd4", "8da1b0834688edb311a803532e33939e9ecf8292", "f42d3225afd9e463ddb7a355f64b54af8bd14227", "3e63af98d0da4e267411de8743e68f65b00e85a1", "dc6c47d15ffc0fd59e51ed03556c3566afe5710b", "f13552e2e2843716e7a1c7c2492cfcc6e86aa03c", "da9080d5b433f73444078ac79c3a8a4515ad958e", "e963ac953e084f6223a5d8379a52631508426ed7", "a1dd88f44d045b360569a9a8721f728afbd951c3", "322c063e97cd26f75191ae908f09a41c534eba90", "28b039cb4b00cc4deaf791b5e923d58637714f49", "0534b58649818607a8ad31e6a63e4b1d37be16a5", "0c36c988acc9ec239953ff1b3931799af388ef70", "f6f4d887fb62d33a9a18cbb7bc58bd6247384a35", "08f00e5adaba03628144dbc97daefa8ceb6e5322", "fc027fccb19512a439fc17181c34ee1c3aad51b5", "426840ccf74bbd8b087cf357efdb80ecc85ea2ab", "24b31c4d044fc8a625a229fd8296b71836d4a422", "165c27a4bfb56562c807279bef9d15f1bced5ca0", "ba051292ca6e8c689542831479e436be7035c147", "a3a7b48f707b87198d93f3fc7cd6dd1bd753fcc3", "7a82e4a2787b404b4c9c9238fa626319e85a528d", "c0f17f99c44807762f2a386ac6579c364330e082", "8c00fe016755483f63c96b5530d259bf815c9d29", "c48bde5b9ff17b708ab3e4f7c62a31a46c77f2f1", "19441b8be551e8134dd9eb33238309bc2de0a42f", "451eed7fd8ae281d1cc76ca8cdecbaf47816e55a", "992b93ab9d016640551a8cebcaf4757288154f32", "7489990ea3d6ab4c1c86c9ed9f049399961dfaef", "886caeee31c23134adafd0b191e911650de85a24", "228b899a16400c4e23ac1a7eb6de3e69797bbcf7", "424e918134ed7c70fa73450bd6af1bd982071a27", "c5e039f2565def55ad7587c443868e815bc3ff7d", "e7713751e08a6e837a4b9bcd766b021ee5c15502", "923e9b437a55853120f1778f55fcd956d81260f8", "7fb74f5abab4830e3cdaf477230e5571d9e3ca57", "10793d1475607929fedc6d9a677911ad16843e58", "c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6", "add80957faa3cc64f293f239c01752dcf022ebe7", "c27c2fe9642fb82a3dfc314ce6003fe7a88eb1ec", "2581a12189eb1a0b5b27a7fd1c2cbe44c88fcc20", "96416b1b44fb05302c6e9a8ab1b74d9204995e73", "aa2ddae22760249729ac2c2c4e24c8b665bcd40e", "60542b1a857024c79db8b5b03db6e79f74ec8f9f", "bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d", "456abee9c8d31f004b2f0a3b47222043e20f5042", "7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb", "6bbcec054017a6fd64af8bf325cb6e3e7244ba55", "b99588bd0393a60a0c627970ab6cab7338d08ca6", "57558bfe896ca98777f6345a07e65752585496cb", "d78b190f98f9630cab261eabc399733af052f05c", "5080655990fe0e0446bcb038b3e0adad0218bd29", "7142e659d6466717cdb8a242d8e34fce176b3f4a", "d58c44bd9b464d9ac1db1344445c31364925f75a", "0391dca8171f52015eba4fb0e4be3be071950fc9", "fbb9cdd699baf86e9d616b259ada02449c2322ca", "3347d3e9f8a2da66e1c00f6a1e56bb37d27145ae", "e849b9b3e65130712e23afb872ac925e1e9a6b73", "3f775e3be9e1a00ebf4fd281e524932e88cec0ae", "9588b3416aa68104b58546a4f82e234a3670d266", "81ba5202424906f64b77f68afca063658139fbb2", "3261a6ca620845566a61ebd0205dfb75d1c0d0f8", "5087d9bdde0ba5440eb8658be7183bf5074a2a94", "1f18708439ba1dadd81568e102216731d44340d5", "fcabf1c0f4a26431d4df95ddeec2b1dff9b3e928", "7f29c53c6483919c8467fb596af9596ba546f11b", "7b83867b7f79cbfbfc71996bcf07fe7ee7a7600c", "1699acad478e85bac4632612b66307dee4cfc60b", "0b6f64c78c44dc043e2972fa7bfe2a5753768609", "c1b971cd7263e788e114cf8c4aa076a2e170990f", "016eb7b32d1fdec0899151fb03799378bf59bbe5", "2e3f24d9cdba1d0343248a81c13bca96db123c21", "fce4680e09b2521dfe75819dbf20d973fec4b864", "503c16d9cb1560f13a7d6baedf8c9f889b22459d", "cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae", "e7ef466eac953e2fc1ca59b08027c4a9feb3cb9e", "28737575297a20d431dd2b777a79a8be2c9c2bbd", "7e736f25911c91cda343c000aabc773ed9a94fdf", "65eff143b099e53dcf39692c2fb542b0ee1fdfb6", "80ce8b6abd00c4148f03011944ce03d38fb82c39", "1d6905e88f64ac826344d89c51ad8daea3b95e0e", "66a9c35828b1a4c264008a35fdb975a07c128fa2", "583e2dab221ad5d54c1b1cc0a9df4f1254bf3942", "b797f3fa4e732d52092f9eb863350440d5de8bb1", "46702e0127e16a4d6a1feda3ffc5f0f123957e87", "d2b2cb1d5cc1aa30cf5be7bcb0494198934caabb", "7fbff9fa2ba7a7ff57a433e8bb19cfd99d52132d", "50c5a552c191bff34ca74e0f8dbac159e3814533", "860952e6d159003648290fac0362883b4e7adda6", "386a5c06d334d20227e8b2daf5433a2bef385648", "446fbff6a2a7c9989b0a0465f960e236d9a5e886", "0ecaabbf846bbc78c91bf7ff71b998b61c0082d8", "291e5377df2eec4835b5c6889896941831a11c69", "b69fbf046faf685655b5fa52fef07fb77e75eff4", "4438d4dd2dd096906c321b166b092ee51474804e", "b4c02e071432a9a986501b7317b524f216e87ec8", "13bda03fc8984d5943ed8d02e49a779d27c84114", "7bc74eec0a0aa43ae2b9448e16dd9d1fa4234fb8", "87a66ccc68374ffb704ee6fb9fa7df369718095c", "4960ab1cef23e5ccd60173725ea280f462164a0e", "8856fbf333b2aba7b9f1f746e16a2b7f083ee5b8", "2dd14a73911446f90c9dae9bd46eb172e2c7fba2", "f9f01af981f8d25f0c96ea06d88be62dabb79256", "6cda4d23983298ef2c9bd719805e66f4fda7e6fc", "ca90c12af32c057c9bfd46a7a054d799a27f0e78", "09066d7d0bb6273bf996c8538d7b34c38ea6a500", "415a4e43bb1cd0570a51eab1bc09796cbb09b2be", "574f05ab2f135fad33ccbde85debdd12bb41bc87", "81bae1505b1d404cecebb9e1b17162ac124cf4e8", "4a06c130c57b373718d15aa25ebb958d78bf2840", "4aeebd1c9b4b936ed2e4d988d8d28e27f129e6f1", "232ff2dab49cb5a1dae1012fd7ba53382909ec18", "1b32284d732e0aec506411b71e6150df53d167f7", "809e25da311366bfd684228e16184737d948eef6", "465c34c3334f29de28f973b7702a235509649429", "caa2ded6d8d5de97c824d29b0c7a18d220c596c8", "72642c9f4dc2ec2f4cc28dc056f8e542ff20b178", "289d833a35c2156b7e332e67d1cb099fd0683025", "0fbdd4b8eb9e4c4cfbe5b76ab29ab8b0219fbdc0", "f94f79168c1cfaebb8eab5151e01d56478ab0b73", "4b37efd3987c1e625b063a6998bd6b282c844915", "6bb51f431f348b2b3e1db859827e80f97a576c30", "1b0548e52a1ffc7ebffe5200e2111525c9f7fd4a", "b78e611c32dc0daf762cfa93044558cdb545d857", "c67eee5e0ecc020264fae089a08a1abf8c0b6240", "e605242319ba495bc5f47abe9f1c08d508d83627", "3a8f16d8f7adae8bd0cdc5cc5114dac0b388a9f6", "688cb9fd33769b152806c04ef6fc276629a9f300", "6a1b76f1ef876061ec479ab9bc13fcd517eb4188", "bc12715a1ddf1a540dab06bf3ac4f3a32a26b135", "19317c6aa7f88c79abe03b146aa5d340a11fde3f", "143c8b8a45d7176240b1bd7a6e7aab705866ccb2", "28b72ad9229f38ec61f950e1d794d6af070d1800", "1c9333bcf523388d75f852e0689b0e7f5a04faa4", "050e7e32fdc48150f66cb5edf166790c69652b8b", "17a9db524ddbeb5577a94924c2a7cca048dd19f9", "a81d396c9210282d461f9f08b7b9794b096ecdfe", "4d1757aacbc49c74a5d4e53259c92ab0e47544da", "d488dad9fa81817c85a284b09ebf198bf6b640f9", "d392098688a999c70589c995bd4427c212eff69d", "1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a", "72e9acdd64e71fc2084acaf177aafaa2e075bd8c", "1b79c2e3816eead394145f2f1d12d3314431a035", "f2d95a5b29986a6a28746b30adfa43497b27ff02", "9da9ee38d5845d39497b10b0ab442580e75ee4d3", "266b5b038750e1ab1311e38554e4c2c8ba6564fd", "220f8088f2fc1ddd9df1a0b583d3d01cb929ee8d", "07b8a9a225b738c4074a50cf80ee5fe516878421", "8380b8f4e36c993eef23af42ccb382ae60aceabf", "6a1e5f4dbabf451122bf35228c8b25c79c7d235f", "d146d7726369be92c6d67ff201a8a9300540a03f", "2d0dfa8779aefa1a9a89a1b400188fa9114b4c0a", "1bbe0371ca22c2fdb6e0d098049bbf6430324bdb", "9954f7ee5288724184f9420e39cca9165efa6822", "7e6c2d3142e03af2ca925418c34239ef8c262f8a", "ccdb0c44cb6f30924801877ece4f797b66670985", "cc835394ac6c47263c57aa815f1c5b9ea9cd0261", "0a773ed20a5920897788dd6f0d63c20defca8ab0", "a91caf771905ddff8cb271f04e7ede1a8b6d529b", "d3f349c401dd1b3eec10e6c2d8bb51b9a0f3580e", "be6f29e129a99529f7ed854384d1f4da04c4ca1f", "e212b2bc41645fe467a73d004067fcf1ca77d87f", "7ff636c82898a35d3239573f8e3a29da89c73ed4", "51c4ecf4539f56c4b1035b890f743b3a91dd758b", "007e86cb55f0ba0415a7764a1e9f9566c1e8784b", "d6dfe23018172d29c36746d24f73bf86e1aaa0a6", "9bc05a3505ed3210729aa59e405a974259925268", "54d97ea9a5f92761dddd148fb0e602c2293e7c16", "6140b789d5767dc124f9c3d8db7062d0e637082e", "0e923b74fd41f73f57e22f66397feeea67e834f0", "93cba94ff0ff96f865ce24ea01e9c006369d75ff", "19766585a701749fc297a5ca6b8cdc0c62d4ba1b", "30e6cf0c3cb38997acb05a2f5ed86269643ae3ed", "b13c28b35571627162cf46765821c739a7dc2d62", "24fc311970e097efc317c0f98d2df37b828bfbad", "45fa259ad3c2453226093ae72d8a88e2e5ed2252", "5c4d4fd37e8c80ae95c00973531f34a6d810ea3a", "183ad3409a53914247affc599b33af38d94937be", "0cc2dd2900339836e6d42f2cb0e542bbe5627454", "2fd5b8ef1c62e3fcef69bb7deebbb7d77d5998bd", "2ce76250731cb19ccc5ffff43e4c6abec8f5af79", "0f08d62e882026ac83ebf26c0bd288c553873814", "71b973c87965e4086e75fd2379dd1bd8e3f8231e", "20c02e98602f6adf1cebaba075d45cef50de089f", "c17ed26650a67e80151f5312fa15b5c423acc797", "1e2b8778cfe44de4bbe4a099ee7cdff5c2ca5f38", "5b3725c8b5e058ec3a383b621aa9316b90738b2e", "6838a27d34b0a47e44dc94dde63e3f83986c984d", "d7da0f595d135474cc2193d382b22458b313cdbf", "0ce08f1cc6684495d12c2da157a056c7b88ffcd9", "d4d5a35371d35640d01f78abf9f51debb098cb4b", "1b12c10ccf3a814d7465c67542ae7b0f76521a81", "289b9161cf1b474837c1b5a0fdf444ca72b95aee", "728a8c4ed6b5565a250bd1e0587293a6a97f515b", "f2a158e5bd95e5dc7ab9169b613fed64d3aea663", "40c3b350008ada8f3f53a758e69992b6db8a8f95", "78bf95cfef847473ac9c83e625f2a52da82b0f08", "a28f831b4014fa75a69f3c56e39d9c40fc0af48f", "635bea02dae6d4402b53eb3b31930b53ef00adc0", "29aa9c557a46ad214a4236143d93072ab018841e", "a96c45ed3a44ad79a72499be238264ae38857988", "567078a51ea63b70396dca5dabb50a10a736d991", "6e4e5ef25f657de8fb383c8dfeb8e229eea28bb9", "cf528f9fe6588b71efa94c219979ce111fc9c1c9", "3b67645cd512898806aaf1df1811035f2d957f6b", "03484414f657a6cce8e08dc8a560ac9db65088f4", "7a7a53b05e22305b2963c05ac89830e099146767", "5d165ff5b0b389e32809c17838a2afc218a91d62", "ef2e36daf429899bb48d80ce6804731c3f99bb85", "cd444ee7f165032b97ee76b21b9ff58c10750570", "333be4858994e6d9364341aeb520f7800a0f6a07", "0632a9ace74f540e8793f89a84bb7555ba9deece", "75410eb80800f8b51b555da7d61b03b3fe58cc47", "051d8bbf12877c46ae9a598a386c5b72d1b103ac", "15728d6fd5c9fc20b40364b733228caf63558c31", "1c521ac6e68436f6c6aad3c0eb7ffa557fe25b0d", "ae6e8ad5bce62d2f93ae3a039368f4e90c83922b", "576ffe2304aba0b799b4d3b8880f4b5a244ece5f", "7791ce332eb0e9b5e1f839c62a9b8cf5bfd51110", "79a3a07661b8c6a36070fd767344e15c847a30ef", "5aa7f33cdc00787284b609aa63f5eb5c0a3212f6", "8599560c50a55e75928dba6bbcbb98ef180a0798", "cf77d2e7411814b30aca203376709b12a0eb3e08", "edcf668846a3aaf55120aef0c806854936208b3d", "c303dba374c094eebb7005724b76d33547ffd830", "3159c9423862b22c6326801ad4353ae2cfe30d32", "38f88655debf4bf32978a7b39fbd56aea6ee5752", "0069406e49ba09432e9723d4a2eef27b9b8994b8", "7b3b2912c1d7a70839bc71a150e33f8634d0fff3", "056892b7e573608e64c3c9130e8ce33353a94de2", "7c33367c65094b25e30527176d85f4e74fb07c72", "7e5414277148c8fdf9903068b001887225b69868", "79e7f1e13e8aafee6558729804cf1284134815b3", "acdc333f7b32d987e65ce15f21db64e850ca9471", "da4137396f26bf3e76d04eeed0c94e11b7824aa6", "5240941af3b263609acaa168f96e1decdb0b3fe4", "126250d6077a6a68ae06277352eb42c4fa4c8b10", "447b57ff91e3572da718c18b2bc016839ce418e8", "0cbbbfac2fe925479c6b34712e056f840a10fa4d", "28df3f11894ce0c48dd8aee65a6ec76d9009cbbd", "4baf3b165489122a1f8b574240c2a7fa9b6a7a14", "325000c2ebe4fcfd08946aef91aee8bec22026a5", "aa8c3eb6e821cb44ed5a15a2f09fba332e5561c6", "cb84229e005645e8623a866d3d7956c197f85e11", "636027f52ab111b2b22332ab2ec5346d03aac305", "a79a109a1dcf103cbb929cc3807572683bd3c35a", "3dba6c86541aad3ec8f54c55d57eca9aa98f4ed2", "535ed3850e79ccd51922601546ef0fc48c5fb468", "c679fd4e29597c64e5921fad796183ae30db8396", "247ca98c5a46616044cf6ae32b0d5b4140a7a161", "ed90a9d379f6412a1580e7eda5cb91640000dc42", "bff8bb99347ff866e1fb77476fe3a660f4dc2af5", "b8fb23610ce59509e926fe068281476deeaf5687", "deff1653be22ef7ea2a4befe0eccdf660111c504", "8306e384e7ca48445843bc025b08236cd181d7c6", "e20ab84ac7fa0a5d36d4cf2266b7065c60e1c804", "1b1d9b528c69e082dc5685089090bd2d849d887d", "a1fdf45e6649b0020eb533c70d6062b9183561ff", "775c51b965e8ff37646a265aab64136b4a620526", "0688c0568f3ab418719260d443cc0d86c3af2914", "3988ed2b900af26c07432d0f9f3c2679f3c532ac", "3bfa75238e15e869b902ceb62b31ffddbe8ccb0d", "6d34ccaa0429255264c747f4ad86cef299a36f3b", "5d92531e74c4c2cdce91fdcd3c7ff090c8c29504", "beec0138d21271379bdfa89317a0a1d648733bad", "c919a9f61656cdcd3a26076057ee006c48e8f609", "c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2", "c0006a2268d299644e9f1b455601bcbe89ddc2b5", "273b9b7c63ac9196fb12734b49b74d0523ca4df4", "e771661fa441f008c111ea786eb275153919da6e", "4ceb9f530549f3edb3369fd0bf7406d55354f9c4", "8f05c4c1b3c1ad31ec95ccb87bca24a884b5ad4c", "e1537df2f7ef11c4a7609d4fa094d8c5e72c9a2b", "5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa", "042daa253452d0e7e4b5920f5d56b3c7d8d7507b", "245922e5251c103c2021577cc0f99791d748ac64", "6a75ef6b36489cb59c61f21f3cd09c50ad5b2995", "461f6b955f6129aa1977eadef028dab9d8897f1c", "0f2d6a2c37203af0a3b10a02773b659a71468d32", "066000d44d6691d27202896691f08b27117918b9", "42f4653f0693f16e087e4b913407d9b0278154c9", "8c244417db2082f4d5897548e72ef304ae886e52", "9bd02152ff9fb93daef3f4dc7ef032479a6c03f9", "18e1863e70cc93759a041b8aa745d0c0da51ad31", "2b339ece73e3787f445c5b92078e8f82c9b1c522", "cfec839f66000e45f6bf29af76e4641a30975ef5", "0ae07f24251946b2086fb992031c298ada2805de", "88e1580e975ec0edab7327783f59665dc711ee7c", "08e6d7d062dca6ed869a21d7e5634ccbfd0dba32", "e532dc46f7a28114fbe03d256e0e28ed30929692", "a22bc85367a6474a91fecea9dd20681451c6fd0d", "4c4622ae838ce86313c2979c0ebeec077713161e", "4f7821d28033be3757ad567e0c4e79a43e1b4a72", "1adb472cf79b9adc4f1223686528c524e5d790be", "10e2f2ad1dedec6066e063cb2098b089b35905a8", "0e2c746b2e9d7337b63afcfb9b36998ebbb9a829", "4b004f3c524778d524bfb0cda923bc6e895f9ea9", "07625af8d73142e239b5cdccb1dd226648e4b0d4", "13ae3c8afef5a0d6f4c9e684da9fc1fa96caaeb6", "5499003468a81f2c6dc616be6416e766b6bd3a13", "65637a5fd52509d33476e45aa882ed66a0584bac", "2637a5d0b677eb3145e5bc484337f99b8486014f", "62e2c431d375bbafd988d53c4d39f240c8b7977b", "e076f818b090e42036821c69727cfa3b7da49373", "3b466bb66ee79c8e9bcdb6cf9acb54b864dda735", "dc22de0ed56958013234cf7128952390fb47345a", "08c6943a17f267ef27316cff9248b3036a7059f3", "c4a56e121cb183e2595d0c9761344d124dafd9cc", "92fdb0c40f05fd02857f70efca469eac1c3a0e5f", "adca02d4b34a9851d1c9c0a7c1bb8d5178b59b85", "133900a0e7450979c9491951a5f1c2a403a180f0", "3e01f0bac3d5df0744caf8f42ae189e113d0758d", "f71f3dd37793d7db809501e2eda47ca28052c946", "0a68747d001aba014acd3b6ec83ba9534946a0da", "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "1e00b758e50fc1896511fab1cc00abddf5bfdde5", "3b1ba9818e2ee6a54e7ec033c5b2ec8bdbe2935f", "74f21f2edfa985280be63f8a01aa00541f3a5625", "603a282933dd7cc2bf012df66d1a24f6d7d0c0ae", "35f1bde669b94c8a3af93c3c5220ca3e3d2f76f1", "66dd296261268061140f60b0cebec9580148c346", "31bbb2901d6ff0f53886e44e69c818f3604b2223", "806bf27919283dd4db3aef8a64e053ca5052840d", "429bf689fe333e1a2dcd8f1d50cd0a905972029e", "2c9861da30143bcad1281e858f2b441f6eb85e70", "23a59bfb96c4f543673e05b3cf6dc01b4173745b", "4fc6bd10f05731b4b4948b426b164900a04cf1ec", "550a7764e30f9805484e10d5b9936c539c5cd2df", "98c190845e1483edf35ddc1c5a2c321f21fafb58", "a9978df0b4df4d7b04bc4e9464c67f9ff7c31d3d", "5f4a873118e033e5e168ee99d64474b4cc4d94a3", "34c456590968581b5bc0baacfe2dc1e8ab0f2689", "0ad6dc4554fd5c0212993677c160af31fd27e243", "b2b28eeeaa2b613bf30b5bfee5ec4272ce184bf3", "41f6368bc4ec5e334c81a9d16185205b3acecee3", "7710f1fc67f11a91afaa951f1b26e07e280391c5", "815e71b83bbfde33756fd5aea4eef74008342ce1", "2ea33c152a6ec4b33c379eb4cea81b34edd84813", "4a111ca4ba39386b489f9c0a9c7949e932563ddb", "9807b1e0c9a89aa9b9244c3e8a720428a33e710f", "a303fca91c181f1084d94948169ab73c45e2073e", "0a851d13f1460eb66450473ef2c8e1bec9c7e75e", "224ffad672f7e6c7995780eb9bd3c8a141cb25cd", "bca6e77e7e0db8f632af7395e99028025854ea0d", "e890ed30b4204b7294cdc291de7cb37a0c860f44", "ccc57280e2c50381a692a67ed53124ad1b735686", "a1e97c4043d5cc9896dc60ae7ca135782d89e5fc", "94686d5df14875ed800a9f710bfa43ba4eb19b75", "0269312a7d49209fd9f1875e24df6a1d178fb15c", "572dbaee6648eefa4c9de9b42551204b985ff863", "16d1e29b588fd26f5f0ac8038110f7b8500a1ec9", "e44d8409bb5233bd1822555bf85095a80e27fd49", "1a0e73f5319fe08dc92b049e0e3a82b8d052c741", "f5cfcefeb52bfed6c8f84b035824ce8a8112837d", "f4c029044afa6cb3b08d5e47701d532b3aed9a40", "eb161064ea62eaf6369a28ff04493b47cf29cc0c", "b2597f8d137b86920156b73fc9d6e3e64d597438", "64c1d9a031ec0e6785dc92edc0d00cc0802e32b0", "0313924b600ebb8f608705d96c06b133b3b9627a", "ef8f6c58926db1c7433ef899dc6a0eb554a37565", "0c24ccc6d6c386a8d555a81166eaf6e8d4dfccc3", "ecd473cfbce5f058a3c9388b220b21de1ece8eb8", "67aa8c2e7fd5b079d8940ab4c5a8ab4013e45205", "0c3c83b7f030fe661548d362ddf33f37bb44043d", "4adc7003141377fb4a854f948634599a823f898e", "f9e9e352862938ce6374381fa4a233ad01ddd3bb", "27f322483a2f5f98995d4f780084000ceebc6cce", "cab1c7561d49fdc3a9d519af9b9cdff7bf4c54b4", "182274b15e760537ae868d984155f5aeac88da23", "8ad407142de84b66144029845587c77ae94fd240", "cd64bfee5c008c1d96b3d0e440ef94270f50fe5f", "8b86e6297fc9b4804fb2b78622ca1e7c8dc952c3", "49664965ba3d988c327a05470fa7519f77dd639b", "7ebc96b4b7886b263808c2cd62b21158ebf6297c", "123bc74a006a75fefcdd9995cbdc1c6c64c8bed6", "8bf53b6a162ce625d0fd5840dc41e1c845b929c4", "7c0b4ace044848f8e156e5ec898847a075905495", "aab45f8c7a4338e80069e6a8611c09160964bea6", "898acd25d8ba5c073166fc38105fbb74128c9823", "dc9678f4563a865df780760dbf0e6ff4541fc0ce", "ef66ed8d8db41f67048d077fd4b772c8ba748090", "df353e3a46cca8c1ef274994f5a6dcb580231726", "7bd2f332a96fd64e015157d9564ada73cff0cf3b", "75cf72819b8741777a961157f43d994238219f5e", "99a1c58b075b22c6b219f543e1566b215ad08396", "a96b6e645a8d3eb8efc7358a852cbfbaa32ae245", "1116e7ce6565b3c2f3bdae80b11bec166353a7cd", "474c8e5bcbc744ff7045bfbedfdb336ad0ad12e3", "5af1e8a38b64c6694b9a34cd0b1596f2c905d3ff", "69efe30bbfa798e2b65e3d42d92427a8ffe9b0d2", "8b6605aa3c52696992f39aeb5ef584cc1765e8e2", "0a4ba4d5bd6e07a31fa4586322fd5e07d9f9975e", "a50099f5364d3d4e82991418647c727f0f9c297c", "a60146c458adfe9207f015d7a77cb7dfb54f744f", "69a55c30c085ad1b72dd2789b3f699b2f4d3169f", "79f2e8a93993d51574a14ee08ba81a73abd8065d", "7ade8aade0d464ea9a677c7c22a51d1f81edb6e9", "0779875eff440365184dd8bf44e9f85f78267c5f", "4f36755fd732684b977a041ee3b0acc3492e5b6e", "aed274a7e72bdd9250e22d22614b87c8b5bfe8ce", "d24855c3d8106b2809640579caff9fc5733aecd1", "f48b89fa0aa7435cfb7fcd801a51b2504b9c4515", "4332314ac4ab56153f68a9e55e92b3659e93a5b4", "93de2f92e71fca56254ebabd7ee64ecfb5d10692", "0900b6288b61bf627649b67aa82be017cc6ea646", "5520acfa1f4e678f1abbaab67ec76e903c3d3bdc", "269e098a1297ddb1e3fcd85524dc8272ef2295e9", "e414195d6e816a45e2b29c3694f22a707e29120c", "7e3b5d30b83a20c7cffdacf53b3ffbaf81002b54", "1623be99eb8266c9dab2300ac61bf279b65fc176", "cc622a0ac114821be935ca9c66cc177b93e18876", "f65896855e5df3db5422b57ab360287efa213066", "6b8c949cd7a42f1b9f3b614ca40676b1a7a4a336", "26bfabc72c937fd4feddf585b4989407dea90c59", "bd26dabab576adb6af30484183c9c9c8379bf2e0", "4af21922090f8160c528d9aa77a169503555716e", "6622776d1696e79223f999af51e3086ba075dbd1", "424745b006491ae2caef924287e50fc6706c06ee", "e6ee36444038de5885473693fb206f49c1369138", "0178929595f505ef7655272cc2c339d7ed0b9507", "942f6eb2ec56809430c2243a71d03cc975d0a673", "38d56ddcea01ce99902dd75ad162213cbe4eaab7", "52b102620fff029b80b3193bec147fe6afd6f42e", "a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b", "4698a599425c3a6bae1c698456029519f8f2befe", "4cc326fc977cf967eef5f3135bf0c48d07b79e2d", "f2c568fe945e5743635c13fe5535af157b1903d1", "c3d874336eb8fae92ab335393fd801fa8df98412", "0fa956029110bd82b34208cd18a77ca34d2c5eed", "dae315b084b9164ac68da26aaa73de877f73f75c", "0b888196dda951287dddb60bd44798aab16d6fca", "7e435d78693aec1b87b6f690a8716a60a5e5ff8c", "127b17fdd8860605680cfd053398fa95d12ccc03", "4ad702b784d0a2fef099a4f0336c92c92a412009", "cfbfdcf64b6b8b91460c5567efbffa9f68592bea", "7d94fd5b0ca25dd23b2e36a2efee93244648a27b", "22648dcd3100432fe0cc71e09de5ee855c61f12b", "2d7d8c468bdf123b50ea473fe78a178bfc50724c", "6425b6fb2465fbac50d084b66d93d5cc4fc81ae2", "b01ed5c62abdc37c7318c155e12e366238bdc2f5", "f44af3b10a67fe62fd26eb82dd228a3cdeb980e1", "878169be6e2c87df2d8a1266e9e37de63b524ae7", "82e32c8af359a8cac8ed4db9f26149ea3e3e7e6a", "fa33e20a3265d9a506c11a392cde9c367c30284e", "44444f7e0e76e5ccac18c98600ddd09722dfa405", "135fc59c8adb8d97a0a8dacf615f1b18a2102372", "ccf413e4a730ee228769c82a8af1fddc2857fbe8", "a838a1184cb9ca86ae910509bb318266101ae656", "f2889f3ab8e330e1ba6b23d493f8d727f49a9bc8", "0878d67f1bca06d3ea8a9354901fba9bf0135cd4", "177c48590469c62d430cf74fee7b5bd28bfbbc1d", "88c307c51594c6d802080a0780d0d654e2e2891f", "821864bf264f924ac7d63c02ad3fdfff3cefd990", "2c72096bbecd70000f919b1cec3f31a649c94fd5", "04d9913c3988b24522741ad9917108bf9c56ee74", "19242af1c54b2c876b3a930f2406b9553f294fba", "3836b6c5e29a7d0ff58c73e5d5c03dc7e8603819", "d00f6ec074bbe777ba2e419b39729283a28101c5", "3ff4784d3f28c87f41c82ed9778c8c919b486cd4", "4634bf44a0c994e2bed89686225f8cef601a0224", "1e8c87181ac8db93431a0c7470c71561e1ee565f", "b38e5da11281be44c82d184079d762c9d526ba2e", "56a0ead811a1bf15e42be8a9a007b0299636f213", "281486d172cf0c78d348ce7d977a82ff763efccd", "56762e5e7022562394179697c1dfa4fed65a7816", "b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000", "06ba3492e3a9a2e98df2c81b91ec94787e3f97fb", "55a7286f014cc6b51a3f50b1e6bc8acc8166f231", "0e0d6a582ef1e422cbff5dcd16286d316b0eb972", "cfea594615bfa443cb517fab14180354da8a3897", "975af82c9ce82a1fad760d58ba0a661217689aa9", "6edb41364802b0fdd1e3e98d644fe78b1ecbbe45", "3e2588aaa719c63e48fe599a7f0dbea10a41b4eb", "5db46dda9f0f08220d49a5db1204f149bd4f6a4a", "3cec488a0910b69f50811cebe8c655dca22078d5", "f11d070cdc9ee12b201757ca4a50a3682967ba0c", "3bdcc99f45b58e4ddf4ffde5f58bea1ddada2744", "013ae78fc6bd26a13799fe2e07a6ad363aca9ba7", "f523c55a0a8057c5b08add761353ca79946feb07", "20fa38fca576d983b1658127d5cf058962b23179", "d55cce6ecbad2c6ecccbaa1cb0d14ae3a46b1454", "c34787b4708b34742774ba3abba8ace39c6b9052", "4d42d42de4445545b5e3045be296f917acd33ab5", "2803a7e8e6057d4e9462b37b258e670df61a742d", "cb6be69c67b0b15ebbda89a126f4dd62a4d32958", "2e105974d58cdefcc866c5f6ca73ea033881ddd7", "18ab6fa14ccab61f266b17e1055ee108cd2bb0f5", "60115d62f7d0e918af4d3040624df57353f76053", "71797806fb9685a9a743c84c9e859948f7c6a77b", "dc81be32ca84d43f99a4c94d4a686c84956d30fd", "8d203e9f7aa88ee167f5eb620a63dcf2dc64fb2e", "2068f66a10254d457cdb5fab74b0128b24bfdb65", "d45e856d22714d6ea7bd80a8c73d2be3b1f16f27", "83c95ca60cfec38be7c6b20c123fba2ea7eafd03", "e26b87ff2e1553f4ee0d8b657295187abc6f312c", "841471bf6f4c980bdb77e712f608ec64f8ad5833", "60eea6b85ba791ab85b198cfe7473adec29bcfd2", "ad0be3afaeb9308f3d9065111fa016911b771a35", "8abfda3c1e1599bed454661f15ee0bbe7f6b8c12", "fc3ee00a751ca4871e3ba40b81120b1bc3a57fc0", "4c130b216126434c8cd857431c9c4a7a7c10aca8", "0c07b1faeb4c63c603bcd124640c6ffe07df801c", "b19e8bce7a3180456f8748caabade89dd802ea84", "c50630e485d3c7785ea9e1f3bff35ea00e926a56", "5cc4c125e4fac9377c3dc6218bcb6b6b37d9af4e", "1aa52a25c2967b8bc228268c9ab5a96a32d2189b", "3dc0a3803c6e1c3c32192a5378100faa2a57ee3e", "285356448b8d6e4bd84c67758502a76336f30b0e", "50a8dc4c1d40967a95b684eb421edd03415fb7ab", "1b7a7d291235e4b6e5f97722124070feb26f3cc1", "a9d2c96cead937e53e614abb9fd051574a55c77a", "fe7f5c7da203c48aa1a9a2468aae55c6e0053df9", "c5b803c2fee9bbf6d9132f633de70332b5e80a4d", "b34e7a2218abd5894525a60ed4f106cb9c3dc1e8", "abb3df5b61dc7550db96fc112f98fb99a9db8c93", "8c3cf0c579a28890e21428fcad7f09175e65e43d", "209324c152fa8fab9f3553ccb62b693b5b10fb4d", "d646c276af63f2e56541ee7acbd9c9188d7a30f1", "acf13c52c86a3b38642ba0c6cbcd1b771778965c", "a2c61a7c5292af2e2b4e42f257ac65113decd42d", "6f3a8528841ea323d965d558195710fd8f916ffd", "b408b939c0f3be9cce0f84871a78a71d1684cd77", "3ef997bf6306d157c062f0744ea0d8ce8f390e2a", "ec21434615f72267d26d8e2d8cb7671561d26fc6", "46c65ee1a3e49bb77c7c73dcbfeb5d86db7fc3ee", "128335bef19faa51f127e6a07a434b8949f59b0b", "bc7a3573a464bca2cdca71f6f32e798464b85ee6", "a79b694bd4ef51207787da1948ed473903b751ef", "39836fbbcd2a664edb31119e88870c38b83df352", "5c8ad080ccb3f5e3c999c2948029f0bd005d5635", "02239ae5e922075a354169f75f684cad8fdfd5ab", "1d5fe82303712a70c1d231ead2ee03f042d8ad70", "5ce84883ab78e7e61a4e84a80cce8c86265f6ae9", "cfc14272b915828a232e29dfc2099f842b144974", "5a96f2bfa2deae2bc35b250251d5fbe82ef4932b", "5c107e9d3e09bc33c416cd214d3760ba8c32a470", "483e19f50ff47b0bf5e57b0cea65a7f084779b92", "9c8da385750db215dc0728dc310251b320d319af", "5e16cc5dc7ef8b4fc1320abbfeb838b4fe041905", "0b2c543e0c47454c4512569175094e6cb6ae02a9", "aed5b3b976077ecdcf3f88ffc511f63d9f9e8697", "3c4c5dbee4b4243487cee6c042e607bfad6eebaa", "f6c63f514d4f261a5665c01b6ba4ccc0c69b788a", "cb0047121fdb983e8189dd69006ddce89fe3bc3a", "3533a7714b19396bba8297e0ca22f85ac68ca18a", "592f14f4b12225fc691477a180a2a3226a5ef4f0", "127759fc41d62b516298fff2706dfcc754ff1ee8", "f51d9d34635ad9f6310d767869710e78fc4174bf", "37e1fc37a3ee90f24d85ad6fd3e5c51d3f5ab4fd", "c13291eaf9ca1b91ef3feb9d58a9a894130631e3", "c0d5fa2e57646f2cc7dbb9633261af7d20f8a51e", "6dc3b8a5fdceaea4b32df8552cbb5a22ef83c197", "542289d1acfebb9d79ea7a10c8e1516924e09973", "73200504c7381c48c900894455995b9188676cd5", "1f1b4e91c6e6699a2191d1d62a0304870163e48e", "2aa08ab3d6c227e3b071dc470a2f36dc5d4a2403", "3676c29babe1563ee64a1149d2ae2f9f1369fe25", "4e880a6f35844ffb16bbb1baedcc75ffac8dafaa", "8384387a3739280b15d38f39429aadb7c9bd620f", "f03b9b0895f5fb3351bbf3db4b1139af85650543", "0cd8d70a2476d91c4fd6699de0e106c94aa2d9ef", "a522ca53d9ed5e80314f998cf6719e55e0f36b6e", "c54e8c7a4f9c2ebd8787aecafa4cfdb35bfd49e0", "353480b21d5745590db5f70b016a27e25f5b9aec", "497243ed80033921c3c82c278780381a7d9d783e", "f08e425c2fce277aedb51d93757839900d591008", "8aaa97c686c60f611fe5a979d9afbc29dde3d33f", "0725b950792ddbe4edf812a7ee8cef14447236ed", "7ca08c7a1b61258a8f36435be7a96abde64be081", "919e827c449ca77bcff4ce5f2ccbccdab8399ac6", "ceca60c4bf1a5c4e5893ae6685e7a9f80ca47f27", "5bfec86bb67a1c49359e8a171917311d48688068", "977beecdf0b5c3487d03738cff501c79770f0858", "254a93cff4d0e5770e3dc519053c7ff08d72787b", "3573dd5b2982e1406f2ef6a1680149d4f9bd95d1", "2c2261212051ae0d2586b90715cc411344570916", "5060e2e7d94e002a5376f4edfd2e48ac01d6221f", "446f572df97f0b852a1a5f91015faf17944c1234", "aa3c9de34ef140ec812be85bb8844922c35eba47", "646fda224def3651e3d31c419f49aaa6a90686ac", "2953fa360c79f2c77bbc53c8154f49136333bfa6", "d92c9295a050b09db921b8ef986264dc5d7eba22", "3dc6f72bda1707e6a96174ff943991bb2b7ff319", "d45dc3546702db7fcef8d4863db319ca84cc8d3d", "4e2873a2ea525507f5cd08e54ba363b06bc10e0a", "4764257e844f11e57ff72159bdcfb3dbfe17816a", "30193451e552286645baa00db7dcd05780d9e1da", "4efc523df04fe19b600e372b9cfc9acf2e0b21d8", "ecbaa92c289f4f5ff9a57b19a2725036a92311f5", "41b2068f134adf9afb3dae2d8811e2d21f471e3d", "b1e8476673ee55f3e33bfb7c5f309032522c4c1f", "fff854b3d8f8e916162dc5451cf6f46caf50002b", "7d8c2d29deb80ceed3c8568100376195ce0914cb", "24be26a04906987e7958c1544834bf9f18a92571", "0801d539bacc3e3bc42e2e96bdb8f5d9bec67b41", "454dd76eb0a82286c054a6dd9d9413e09ad66801", "f632790471b2bed7ba7c28b12cda9360ec586a63", "0ca96dc1557032ff9259562a5b8fc026334997a6", "fc50c9392fd23b6c88915177c6ae904a498aacea", "5e07d6951b7bc0c4113313a9586ce8178eacdf57", "171042ba12818238e3c0994ff08d71f8c28d4134", "ddbfea5302fcb5cbc2ca4c498a592ddb063b9eff", "bc494a3442ec7adff4527e60947214c0015f3b3a", "f20f93a5b2291283c0e40bd0418927efb06acb6a", "da5075fa79da6cd7b81e5d3dc24161217ef86368", "40a0e080a01094cdb2174e9154540c217d3f9440", "e04428ce77d6d459b7063d6bda7a8f72a539f284", "dda1822872942f658b89e7e1c1ffe08c35e7b290", "20d27c336bef081adf7faffd42721b77a3c92508", "1cdf8790a675037579bbe2ee4f39f731f7672fae", "687ef116d7115498f12dff1b3338d959f164ef6b", "131125a5aadb48ec3eceb404cedbff713c401feb", "3085671f6232aac4492ad861d09334b8f3a7e2a7", "56fcb57a328caf184c1634d934271b18b86b53e8", "75bf811670e6aa344f1cb360d43cd6ba10919ad4", "1b644cdfceb4dae53f82b4eaedc39f912895fbe8", "1ad88221f308bf9f36775650f880f32d91ce929a", "20dbdf02497aa84510970d0f5e8b599073bca1bc", "10fb32ef34f815e9056ba71bc4b67a9951b4475b", "2a92b610d2eed67b934ef2075264e243e6e1ea91", "057b80e235b10799d03876ad25465208a4c64caf", "5a0bacb6246e40a3595a90a6c55ccf9573322312", "a043d23d07ffd2590308fcab3a137c44927093b9", "50bc8a4e7e6ab9837c6244b29ff800f523494d65", "11f515ead5b4a7259668f2620e808fa8ba5ea65c", "2a0cec7f0f8b63f182ea0c52cb935580acabafcc", "b50eb94ab9c9a6ecb76a40a0043a74fc48d5f554", "1432654a204391b6e2ec197138be0f7c8cb83ae5", "715c7187b27b452424379254f5dc55909913b339", "3851ed2e3c00083f68c2811694736ebdaa9ed8b5", "f496235629c02c98ad83b37d3d054ccfd0de0131", "ecf589ab160976d283d751a8dc407b6cdaff67b0", "f302185b1416d8b47620c67b3942a8675bbb4679", "9ed7d774684a1770445c1c53e276011a8364b9e2", "69870df2c7a6d2e2bfef201968aecd24eb18794d", "434a0aebf3522638d75614b0de1f0c2dcc1b19f1", "91df860368cbcebebd83d59ae1670c0f47de171d", "32c7e4f6d7848676922705484a00c94dac803af9", "980195d60ceb13b08c89c496ce5efd6f34d500ed", "f95616b1593467f5b11689582d934da34e6ad1ee", "7781ce5bb1b53533d2060aefaf8ddb95a6c77316", "fdfceb0fd9561723e604bed586bca9a8450c207e", "3efb04937f6d87ab9540700e04d8133102c67bc0", "77685c77a1fa39890006fe13f43738aac49a2c51", "d1d4c49e764a200bc90113b0ba9c34664d0f9462", "cb34481714bc7194ac108a1568d34e120f256405", "8dce6fa7a13cc94954cbc6be9a709a4ce696ead3", "f18fe123e9017f3a9ac7cd140560a30b5a9b82c2", "2fb71cb0f08102fe8c9ba5929c1dc96d87737039", "4946ba10a4d5a7d0a38372f23e6622bd347ae273", "0e23229289b1fbea14bc425718bc0a227d100b8e", "40e7536f43c8a2623ce27e182a0e66028b58de89", "08dc94471605308669c8d3d8284ba94fcc93e345", "aeaca298275536a93cc43123c487837c1ceeb1dc", "8628edf89482aef7fba204f3f0a9e9f5b12ec477", "9bac3639b2671dcdbdbbd36e8e9022d7334a3796", "11d9bee72759e23f19117fc8cbb60b487e8ac79e", "c8dc902b82831e1f1b587c590cdc34b5d12bdc5c", "f09432b7f470268c28d3d4ebd17a44773b678900", "0921548f06db5d4959126c823cda0bbeae542937", "ad6b23435649d3d88a6b33154b9e6e3e5648a33d", "01efec88d36070dc3bc49f341a77476f74d373bc", "cfc30ce53bfc204b8764ebb764a029a8d0ad01f4", "efa65394d0ec5a16ecd57075951016502c541c0d", "0bdff80ffb4015fa12951f14c9d7673dd915fc81", "16d3ec32a4de0a75589c667a6b90fa79eb3a38c3", "6a8d382d34143143e98b040e006f473bd450502d", "06599d41a3256245aa0cb2e9e56b29459c2e2c69", "b6ecc8d34ebc8895378abe2b8f35e3a0691f5d26", "74b9d1e80d3df707963fad57c50d7c25936da535", "facdb71e8175c33ec54c2248fa6cfc319e27cfa5", "a1eb455fa852fb3ee14eb0907a7db9081a42b3a7", "0c8d675bcd4489e886f35bee2a347c948ffee270", "5fa1724a79a9f7090c54925f6ac52f1697d6b570", "58da24cf5db383781a9803a4dbe97e443c8a3b29", "dd900526f95079e6532a26d0423357bf8ad43afc", "25c56f52c528112da99d0ae7e559500ef7532d3a", "bed7834ae7d371171977a590872f60d137c2f951", "161876ad06d5a349ccde4b4db3d3759ef43268a8", "0c2875bb47db3698dbbb3304aca47066978897a4", "35cbf049074382e757bbfc8cc45ccbe467833a7a", "4a9831e5fec549edee454709048a51997ef60fb7", "8c6427cc1f4e1bbe5d6da34a4511842361f4fbb6", "7f65bbc93cf414d4889773b697b1833e85f0a15f", "510d2879c03a2a0fa01ac6d6b95eb1067f2d1bf9", "a9e28863c7fb963b40a379c5a4e0da00eb031933", "813e9f76fb9e3f007f0bc819eab66b0b5fbd8204", "ff311fbb5600234fd639c96522d1b450b6190cdd", "48b38d157272f03f6b44c0df61130534d11d8569", "178b55ded04d351c5a7df2e94a81aa3051d7fd8b", "2f52b6cd87e6d72a11168fef0865743dde9ea0ae", "76295bf84f26477457bd78250d0d9f6f9bb3de12", "0ae74fabc585cfd1cf60ea3f9e218c59a4539091", "5aeaee0e3a324970c02ae8463e1b358597457d03", "cebf72d51bca4aa26c73fc76b5ac9a126dbf6c3b", "96094b030013ca2d9b6d5a14b6f1fbbc57eb8a89", "39e7ac344b17d97267ec80681aeded17e3e6d786", "ea4098d86802dff863fe9f91cbc75b195d452d34", "7605857f551d128e7c3babfc019950250f81bca9", "9b17b9c40ea8bb8904b782e91627c1f022a5574f", "f0e26f749fb67182a5d3864e62a3460ac333e5e4", "2b2ba4857991c40fb854080dc5f9e48e60c35e68", "5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8", "c5cad01443d4de135250d2784f0d070defd6120a", "1d9b6745c0fd793db6dda8975b498ca517961d25", "b569f22ce779d221ec008c0baa354796d71e3d80", "0bc9f1749e23b37ea5b5588c5bfe23879174d343", "968ab65077c4be1c1071120052b2e4b4f3d3c59a", "3e1aa21ab4a5c242f54f23fbbeb5da29f9a965a6", "58ed094f1359394fa216e957bb48a726862165ce", "9af9fa7727df11b86301a252db8a916c3a516a8d", "fd18475cf9165b33de1587a303fc68c5e77ed630", "2081f94fb82ab8d05ca92742fe949fc97147f926", "a05e84f77e1dacaa1c59ba0d92919bdcfe4debbb", "ddfde5d6f4e720aeb770a20e4197db3a0c279958", "61f0cb2e3fdc6a5d0719184e51d2dc483a945ac1", "9a9570bfebd3c970879f8d99804e74093d9bb6e9", "40f7ea135907d2f4abeae0475d9a88477239d504", "720e06688e1038026070253891037652f5d0d9f5", "ce6dbde2ad8b5b9aee2ccf4a7e33e63ccfc3689a", "bc2856e70ad3c8fe439dec6cc6a2e03d6e090fb7", "d972b4da29aebf5da7c02e77a9118b0f60895985", "391af839051826ec317a6ea61010734baf536551", "f3b3d2c0d1d84a7f7bbaaaecb58457c15a947544", "adf38763bf75fd7adc60ccd4990f604e0863bc8e", "2c69688a2fc686cad14bfa15f8a0335b26b54054", "d7c6e4348542fd2b5e64a73d9c1fd0172e2b1774", "7917a7549f00306db8775d2d559460fc93dbde5a", "fbedfe317e60e5ec83c8fd0554bc345404ca90f5", "4265269bc894caa97efbfcfe5b83da7413f86a30", "02218fcd3aece5a7bd19255d74b12f63dfa5c1a7", "4efb9e426e349968523e1b1cdbbdbfd3e1912f84", "2c3c72fffcbbf66cbb649b64aa51199722140ad1", "29ce15f6520d7427cf1c0cce62e49fca0f40c19d", "224d4cf75e8baf32a795f38ee8ccfdf82e4c5a70", "14934f05299ee02675317cf65de7661970f80421", "d814981606fe5954148e45c737f1debe7b5b36c4", "28c4103d1e27b4312115d3a6baacf3afbba01a55", "eabbf37742b79147c3bcf42d376dbceaae869a01", "afe3a0d463e2f099305c745ddbf943844583795d", "96f0908cc138aceb2d5e0180c440e5adc711d855", "86e5f81bde496549e9df2b1abdef0879a3135adb", "4a53ac7f99a42da17a7f1ba04f5c6d6831e31151", "b7216846c743d94fcd43e1b543c9d16ae11d3c48", "38525bca4b1c5f9b8108743f57fd468492713bca", "08eaa845a72a2b78e08e58592d8785942fced649", "e1371af87f6d5e22ef6d8c5f9977f5e924f176f6", "cf393385803f4a8501d0690250c848c7149338ac", "14abfe2c7a94bd882efb78da387d8973ace54c0b", "81e31899aa9f0f54db069f0f4c2a29ed9587fe89", "36091ff6b5d5a53d9641f5c3388b8c31b9ad4b49", "0410659b6a311b281d10e0e44abce9b1c06be462", "6e09a291d61f0e26ce3522a1b0fce952fb811090", "47ce78c9f49248a7d1bd395befb43e45d89555ee", "6f8de996c9659459d4dc6a10cb3d8a43cb846422", "55202f10bb1d7640b0b279a4cdc8e9925cd9ef81", "b73d82be8270db40577b002789a26e4a226df1ef", "8a8224266b8ab1483f6548307ab96227147f34da", "b711d50a6c467f3db266f2199a9031f7391b184f", "afb1bc830febdb9893fd938fbdb20856b4ff3922", "91c184e7fb0c7cce5319b8db85c1488b3861976f", "02607f5d3c7638d0207279d96f39d435f102bf4d", "fc72b2bb34f6a8216767df80ae13e09d1ef0ebda", "007ab5528b3bd310a80d553cccad4b78dc496b02", "57417c4a523d93801c8901d6f3c3740eaa65c9ae", "77882930692d41db107430a5a524ff5e4bb2ee5c", "45954ed44b99edc5f0d1100a1ea33d856602d78a", "a1f1a06b840558c4433f0e06a4e9172539469e21", "0914eb61b743300828c84f9e235ce6165a171be5", "ddcb77d09e4e9e2a948f9ffe7eaa5554dceb8ce3", "56754b3d841b31dc5fe2cddff5a1242786411e63", "e4e599fa3ca042d30321aa5502a135c1de87e688", "247b14570940601f5c7a2da1db532ecf1c302288", "2e17cf6a339fd071ad222062f868e882ef4120a4", "18193194b7000f442c9df5ab16735a1f3ccbb630", "1235dd37312cb20aced0e97d953f6379d8a0c7d4", "2d36f8444581d806ce6e36ec1d9bdede193db005", "9976b88d15f89b6c82b16564735d489a7524821d", "f565ac8e175e4659fadd3b5b6507ebac2d90a2b7", "442b6114ae8316c95f59acabe6de26f2b569cc02", "de2e8127105a37ff1f59be13a010ab0d3f4fa650", "8fc21217ee89c505930b540b716b11bab89d3bcd", "b502931168ad9cf1dd08721fa6c0980c24b34c03", "7f836c047bc86d52e3a28098b53311cb2186acaa", "20d320529adf99aff7ca7bd562123caeaa8e7af7", "e2113e6c136c87802a35e75122db7e4e57c9774d", "dac07680925b6c56b7ddf184dbdaf143a5d4816d", "b9bed097cb806ba48cd0245ab50d1a123022eafc", "fe7c0bafbd9a28087e0169259816fca46db1a837", "6a69b790a7ec5a396607eb717da2b271a750faaa", "ca494a2f20c267210a677ed9c509c4570f420fdf", "8e416d760feb5f23bc1a6dab98eb1f6e75ab8907", "0e031312cb6e1634e3115e428505e2be9ef46b75", "e6aadde93aedc06525523415e574507cf5c8cc44", "9ca2dfe8a6265c4f6ea12bae0e7ff6ffc9128226", "a036c45eea6e1985ebeca669179cc5e8e9e70019", "97add9744ae63c5e7af9d9861ecc18a2734d3f0c", "d84dccf9afffaf4e0cbb73f1ade34362a9fbe770", "cc2df3a03ee731478ed48838c284ad4548563308", "9c7d3d2a524aedb8bf687441f26dac5ed8c490c5", "ada13fd37da7a28e74aaed4a413533fa4f4b3b37", "a8d52265649c16f95af71d6f548c15afc85ac905", "4b8f805e18c205916285c4a8ca5f233cb8952cc8", "66c84abd01fdd84d9cd241dea8e487580f4f8922", "983534325c649e391fefe87025337187021b9830", "36918b2ef6b20ffb8cffe458c0067742500c6149", "212165422ce25ccabb4d354fae2d2352b60f2b7d", "7b66dababebd800e95d23a1fde299d44a52e98ed", "71766bf224d5c74a0be6996b38d8885c2eed5a2c", "af6c3c4826137ef638ded6ea1664e14a53d23798", "07ea3dd22d1ecc013b6649c9846d67f2bf697008", "5ffa8cfea2f5bea0ec7cecfdf76f9478ca87df89", "aadc142d4e216432899326c7162540955f8b5590", "b46e7d361a030f96d54a9717127f17d0cc833e32", "b2d4ed138816c671c3f698290557d26600377025", "828a7b3122ebd5b8b0c617902bc04ac5a6c60240", "bbfe095e11ecfdb9d9e8577e119bbd67170d6925", "d5c6c0fb51947a2df1389f1aab7a635bf687ac1d", "30a3eee5e9302108416f6234d739373dde68d373", "107c5030f2c55e0a7cf4c6159cbbd4f719b0d9fb", "0574dc64c8275b09ed587dc3977f4d3c990bd4df", "02aa54dbb461f6bde6fe8ba0591c3c5cabed7e59", "ce57cc478421adf85a9058a0cc8fad8ebfd81c52", "931a70ec0bfc1d86894ff37a6f702a033e0129e3", "05ce73c39368aca1d10ab48dbe0dee80ee084bdb", "03c6a002268c066fd6947452533e6b316f8576a6", "e1e2e32f29cf7d23881e98dfe018d9049bdb070d", "aa959e6402dce927b46988e50f1662d8762bcd1c", "ffd73d1956163a4160ec2c96b3ab256f79fc92e8", "0b4d3e59a0107f0dad22e74054bab1cf1ad9c32e", "8db9f32b0de29cfb7fd8e3d225be47b801cc9848", "fe1077f6b79e14457db77d7477a477f40f87e7e6", "d888895cd56d336aa1367fac8072da782bdbc0fb", "86c158ef6caaf247d5d14e07c5edded0147df8b7", "5171157c2c09a85ad6558c5c03da6b75b0cf5fe6", "b755e80ce1985fc300e1983adefc8f14830702c4", "281e961f0d8dd6251e3124b43944820faba8a53f", "00fe3d95d0fd5f1433d81405bee772c4fe9af9c6", "f9f08511f77c29ff948e146434dfb23608d3deb5", "4ca8ff09f24f0838022f1d0b94af4331f6e538cd", "1a86eb42952412ee02e3f6da06f874f1946eff6b", "d1aa4a707be30f68f237eae0208cfc0622ea299e", "8bf647fed40bdc9e35560021636dfb892a46720e", "c127ac138a22c155a79f362562a52c070e2b4022", "10be82098017fc2d60b0572cea8032afabad5d1a", "3d5187a957cc90f4143e6302786d65dbedf7d9bb", "2411270f111a160c9289d56132651c896a5738f6", "9709d362a15414b062efa9cf4a212469af803a7a", "d13bb317e87f3f6da10da11059ebf4350b754814", "43bb4b073f7b2b9b626c7f3263cc61932271ab74", "68c6df1249e1ee56835f79e1877506a16d8418f4", "1504eae5487e1e062fef96e1e424de5d3a5a3858", "2562d6ec0044eee9d604fe3a351f80d4d10d4a3d", "caf912b716905ccbf46d6d00d6a0b622834a7cd9", "4af36d3ce93f7ed82a7dc321fca926d540691b33", "0c6602439185ad8268ebcd99d1ac4afd66fb4c7b", "bfe9560daea296350c9fb4a9b2b9bf9d10fc1a3e", "1f69fa423b076e19dc2ccf6bc9013f09ae39133c", "7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889", "6b6afc9557dc0670bf2792bde4c4389ac52c707f", "20289282fedfd60d9d4a7153f460f5c8e0a502b8", "e64e5449d0d10cfcf63edc8a0b28fc96d09d3535", "5f6116b6e5f21da66a304e9f59f3e224e188caef", "0389a3b0fcdb4c244628e603ffaff620f6575bfc", "b341a33f098ce9dc6dbf5c50e8a1f7fe43fb21f2", "594cd8ed19aad3ce29d11c74d2c5fbf1a864be0c", "810eafc9e854ea9b1d7a9e9f755f8102310d5db6", "b2624c3cb508bf053e620a090332abce904099a1", "03a721080a69be37db3a2d56c006c60f472b419d", "ccc073d9894c0678e995086e1ca4d281de84f0ff", "8befcd91c24038e5c26df0238d26e2311b21719a", "141c9f6817331a6f3cccf82ebda5c8fd66c88b98", "2fcd5cff2b4743ea640c4af68bf4143f4a2cccb1", "14401d4aae737a3ed118eca071f27f11dac7eda6", "6e025c0415c9ff0705d4e4439a48e8fffe7d44c1", "23ed7f18100717ba814b2859196e10c5d4fed216", "d48bd355d091e7ae75ade4e878fe346741e7da1a", "d1f9047dd3a5a086131582b4e5811c07220b2361", "9cd3ea5cbbe0716fe19ff750940222cdedb22fc8", "16f341786f7fa8b117e8812a58742771c089e68f", "72a2b7e46c51cd65810c9b511996a1776405eec8", "500993a8852f766d4bac7b5039b9072b587e4d09", "f55deed4fa5d6d806790610dad9cf7505c1adde8", "b22e36568321a27ca8a2a09e9a6a3fa6cb89507e", "8b9db19d0d3e2a7d740be811810a043a04d6226a", "bb021f58f8822d12f5747d583a46005ade4a0b10", "7dacb063f783df07f89934c962c3e170acb166cc", "2e9d33cba9f547a2e3febe088bae443f1d74d594", "265644f1b6740ca34bfbe9762b90b33021adde62", "cb94ea16f12bde2de91d3cf3fac03a20b02611b1", "2cba9d9157da4e976827959ca65626d9c5cd87fc", "2a73b610bd8d670f3b57debcbad7930db80f40e1", "3d22f972448a2336677ae6ff2877fae010c7dfa2", "2fa3ad0329386bf9f55eb2c011e031ca71a11299", "e8221cac88d0f892d33e23a1f2bfc6ae7f67647f", "8fdfd4c5039cf7d70470a2a3ac52bfd229bcd4e2", "55f2626b7250b3b24dd0d2bab3ef3c3bbd9b3758", "880760777e3671593ba50b7a17b0d30b655fc86d", "31ca0d6488a27a140263291c51ec924b8a49967b", "ceac30061d8f7985987448f4712c49eeb98efad2", "7e60b357a668b685203d50742d25927b71c43c2a", "31a22514efe2b25088a91d8d4db9bb31ae1e9575", "3ea3bbdc9aedd24fe0b5122e04b1d59e7e14135c", "3b1b94441010615195a5c404409ce2416860508c", "5146832515ba8b4ad48372967d9fb7dcdea61869", "a941a3e8299fb7897fbba7467a52d14e13e7a706", "8616ff1d0fd7bcfc5fd81d1e8a9b189c21f3b93d", "1a645bcd029cc5ce21b973146f21a9655047cc96", "d03f1257066ce5dd843c6977858a1daef0671f3d", "d6a4a34829b3b55497210ddbe88ad63ff801faae", "bc9f3c466c6f6b386f4ef1195853d498cf3c182e", "5e8a7a2eef68f568c023f37e41576fa811e5c628", "4f09328793b907074adc8d4e10d2d763d7a4b513", "004dc8de3a6832c8d4764144570dc122b5265ec5", "42fdc21ffa22ef868575cd1ca56179e503baa103", "3d88b669e7a412f765f1dfa54724937b8f563611", "b4223cc72543656c28b55af1ffdabb1e47a0f2dd", "7f33a5fcc5db4625c66972f0e6f06540b64d4f1e", "63199f9d0034e82a0a7c9519d1a5bd31cc9de39f", "102e7bd7660357e1814c821c7f697f2eccececa4", "f6ef7200c08170aa1bf68a2fafed10bb4296c595", "3aef744dad3982a7ae1ad97b4f126b6772fc3d07", "934350482f3f19d431f35960a14dc249bd069303", "25bdcfdcdd9a944ce5adb8d2663856f242c580a1", "414315d44a489d09c6e1933033ffba6396974ee1", "f196a79c5e4b570013e4aa031cdd0fc0c98fc07d", "2609626519d8fa0ccc53bce49a3a21b928deeca6", "0ae247153afd87f98829359a8b5df0f68d788d75", "cd32d7383b1e987329d2412f2907b7db6dd8d396", "3cb8128b41b419a1fdc7a95bf8e65a37aff79676", "0dc49271dc30794c8d4e7f9da025880fcdc8498b", "91b8f1e4299b0f7ad716ece76565c6689d5d1b98", "ae6193531d42fc20c9c991143ce323034d7aaa8d", "bbf56398dba5593a2aed1c3857fa011442b3aed6", "a9453721f35f364e176a5aaa7bdb622f72fbcaec", "fd069af1ede370625703f7984e52f282fcd6342e", "589951bd421e2b701225fe6626fe980d94ad2770", "1d82e7736268917cc3d87a2ee0896b03e02a5ff6", "15f57134b42638cbd57d0d8c4437e8b6b6a8bac4", "cf280435c471ee099148c4eb9eb2e106ccb2b218", "89d02ceae9e972eca633ae6ff9da9ee8a85fb171", "26eb2c900814707ae962184ad4173e754247a80a", "f88a0f44ff7ec5fe0facf0facac0a094c7bd6cb8", "ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e", "5aaa84090c50da903ea1d61495c0fe96a5470909", "9405a9180139f23f4dd9d90aa4e86944b35b8c88", "77a0e3e366e061b0ceb4a7a901ee18e420185447", "8f3da45ff0c3e1777c3a7830f79c10f5896bcc21", "289fb3709475f5c87df8d97f129af54029d27fee", "3f55d26dd638c849745b95e912c28d88445ba5e1", "f827b596b4099b0490ab46a9dd2922db2b708963", "a0e9064d59cb3b23b425bb954dd8c77fdc8637c8", "532837c431617d37c03361ba5a7d5fdb082c55f4", "d353b30b9ca3124fad08e3bdc8167dfe994efb34", "53a41c711b40e7fe3dc2b12e0790933d9c99a6e0", "7358fe63042e186c03df0fb2d5f933eda94cb36a", "d372629db7d6516c4729c847eb3f6484ee86de94", "37f065b8ddcfde6d7fae8fee81f98d5657360808", "799537fa855caf53a6a3a7cf20301a81e90da127", "c99a23a5bb5d5b10098395f59e9f8f79c79a75bd", "3402b5e354eebcf443789f3c8d3c97eccd3ae55e", "f54d9dbad1f60de83485232707c945f209af867e", "6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19", "878634c30842b5812c56fe772719424bab69e7ad", "e2b2bb648cfb60ee18bd66bc6e8a6f9daf7c9d74", "81363f85b2827c5d972b7b0691498464e922fdea", "ebaae38a09c5a4909049e16af759c71db9cc87dc", "08e69a871487b52510699c07859b4aaec122d3df", "55ca9fe4ae98904bfe026d22dcf1420ff9c0dd86", "fc5a100c117cd7291d626f1ec3402bec235f2635", "0074ccd17382bf077bf08d649a97541ad64478fd", "3f10b9d98a276fb9e21e5742ce88bc7f48629715", "c1655b32a30165f7863ece52c54270662a28be0e", "d1f3fb7e8be9d8db50f29403ffbbf6ec58623e61", "ca8c296c5c74d351d866ac317d9680626b0bc6a7", "a212be7ec1ff75ecfee52c7c49c73d7244a87eb7", "05f3f8f6f97db00bafa2efd2ac9aac570603c0c6", "0e96646eb97bade66848b1fe50a9fc6ab946ed42", "2f43bfedb8cffc9e44de9f95db80b26395a29cc8", "014b8df0180f33b9fea98f34ae611c6447d761d2", "978b916b76e2b8e48e95ac6044445529c61c2735", "dd887e7c94e0eb7748f3aea3c3a85cc73f94c11d", "5da3bb198b087c15509f933215b141de9e8f43ed", "dfe7700ed053d4788ecea4a18431806581e03291", "e1b029a76c54196d63c7522a73a3709512687eb6", "c9d3b06d71f69dad7a9d3f312cf9dd008e2634ef", "416c647cd9f8c1d77db8676195dff7ae5dfc1fd8", "687fd3caae9e71c7d2f0e5fd922dc1b6091c159e", "c8e029d94dd0766a5356cc00f033c8604ad94327", "5825623a0768232da3ce121efe223638510420b4", "0bc3e6618786c5133b7f8b0033f8917e61b42a91", "f4a0f08880014331858ecea7ea9f8d27a14f0b2a", "04134359b72232feac6d3b9f88a8e99d4e60c608", "c7b2547f41a51bf300490ef9d1e5988082466bc9", "cb160c5c2a0b34aba7b0f39f5dda6aca8135f880", "cb8ad4f9a50ccd46802289fc9199f5016b15fdc3", "3b5afa25af267061f3a9075c1029b0838bc4d94d", "1eb596303ce1f90e8070090be02c768e91fd75ed", "ae818858a88299090748446b8662e68628612c65", "3596c23a0f13c36d2c71c4cba4351363954dd02a", "29619496c688f8400a90fef79b4fa756967ed0f7", "2230848e506553159e0edfc20472b8cd6084be17", "1c2802c2199b6d15ecefe7ba0c39bfe44363de38", "e96a3d4df7f6956ba185107747c3d7c16d1ed845", "0a3651647cb44b87ec8373ab4a1b53e2ac352bc2", "19c53302bda8a82ec40d314a85b1713f43058a1a", "022d74ae2f8680e780b18e0cbb041d5c5a57c7a5", "cd87fea30b68ad1c9ebcb71a224c53cde3516adb", "484c2617471fd742c4806f9281e5add45c6831a7", "37aa876f5202d1db6919f0a0dd5a0f76508c02fb", "2bc5a49e6d8351e276cf5b37fcc0a6eecebd5797", "0ca2f48fad7f69fb415ecbb99945250cbf8f011c", "815e77b8f2e8f17205e46162b3addd02b2ea8ff0", "bbd9b5e4d4761d923d21a060513e826bf5bfc620", "10d255fb0bb651b6e9cc69855a970c44f121f2c9", "3e682d368422ff31632760611039372a07eeabc6", "4f58c42856f1c23f15833d86721adae76215a023", "e772c6ea8a30ec21c3ede6c8abfd540621174fc2", "ce2fd44a8c43642b76f219fe32291c1b2644cb73", "5ade87a54c8baec555c37d59071c6fb4a9a55cf7", "e94804b7f2515740671a678239eccdb79a050272", "c20b2ec72ebf798e9567a145465e37a755fc34d8", "b7ac537d97efcb968ca8e353ff5b0563e26b9dbe", "343d21ae54b45ef219ac4ba024265eeabf4d6edd", "07d6238d8f8edbfe0fd2887fa0a7939735f21e13", "7d7ee56f28688283ab9958bcc94fe88413fd89b6", "4065d038ecbda579a0791aaf46fc62bbcba5b1f3", "7a0cd36d02ad962f628d9d504d02a850e27d5bfb", "a287643d3eddca3dcc09b3532f2b070a28d4a022", "2bfe6128731674488249316cd2db83fe9045278d", "e27ef52c641c2b5100a1b34fd0b819e84a31b4df", "c91406fea53e869e431dc9a799b989321a66c9da", "be0bd420b78be8dfc0aad65dddae10ff1ec30a94", "03ed6f09a29fe5d0dbf6d59798f88a5311c966d3", "0e46943b2b12a8df6a62202651555a1d464cebec", "186e933fb752b5644b4ea6fa3007ff76b6bf125d", "27043999be114bacb4d53fc6dd04316727986de8", "06101c79b9eb53ac1b85f8d8b556f02a80603465", "7b2e0c87aece7ff1404ef2034d4c5674770301b2", "e3e36ccd836458d51676789fb133b092d42dac16", "4d6043a25bf48c6fd6aff6a46597fe1902a9c6a7", "add34d4cbb4ea6c7aba4d4149a8ddaa5c2a00f40", "2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e", "aeee02b8c8bb749a1203fa634407319dd6874667", "e1dcc3946fa750da4bc05b1154b6321db163ad62", "95aa80cf672771730393e1d7d263ab6f6d6e535d", "25b83cffddff334d78c55db4d67c65b1d8999b2f", "acffa26dd7e988ee5c0b339eed0b2b4e14567790", "978b0c301383267a8ea8ce4f9a4c58bae236bdc2", "1a0a06e659eb075d414286d61bd36931770db799", "123286df95d93600f4281c60a60c69121c6440c7", "86e87d276b5b01a6b4b09b5487781fab740aca2e", "95c1a75417ec4aef764a93c3d98589d3a5f93c8e", "c570f70459af243af9ca73709646239d82d07655", "48c0059feb14ca3deedfa7e3b53fbc34bd6d8efb", "6afccf6c6cebfaa0579a23e7cc7737837b090f0f", "674fcadf1b895e3a79380d3ac5afb43d406fd31a", "833b6e61468fe655b5067ca91608fc37246c767b", "d69719b42ee53b666e56ed476629a883c59ddf66", "3b14bdb0b1a7353d94973ef4c1578e1bd4a4e35e", "23afec5c3edf6c65fc28d360a82820d34bbdc8a8", "eef0be751e9aca7776d83f25c8ffdc1a18201fd8", "b613b30a7cbe76700855479a8d25164fa7b6b9f1", "39c48309b930396a5a8903fdfe781d3e40d415d0", "834b15762f97b4da11a2d851840123dbeee51d33", "3146fabd5631a7d1387327918b184103d06c2211", "74408cfd748ad5553cba8ab64e5f83da14875ae8", "6dfe0dafb4ed4bcfce670f321e724682ab261060", "a07c3e6fb9036aa0a04c37d065c49d2e4b46dbd9", "b6f15bf8723b2d5390122442ab04630d2d3878d8", "ab041f1ce47627ef8a0165169bbb226e7c8a6fee", "0021f46bda27ea105d722d19690f5564f2b8869e", "4f3fe60e2c806505ce1122973a53da721a543d28", "f24e13dd7b70daaeca7a7395cd83c2ece4587b20", "43325f1459ddb03317adc2a2f83b44403028eafc", "28af188e26836934c9beea8b2bc8cd53447197fa", "2a4984fb48c175d1e42c6460c5f00963da9f26b6", "102caab9bdf31c1bb4838529be45608ef29efbbd", "7b618a699b79c1272f6c83101917ad021a58d96b", "82a7bdc2ca2ba706446fb1b1c8696e0d0d7cc8d0", "76fd59062e563353097694d38855e94efbd53143", "d57982dc55dbed3d0f89589e319dc2d2bd598532", "77d929b3c4bf546557815b41ed5c076a5792dc6b", "a5acda0e8c0937bfed013e6382da127103e41395", "cb83430c8427c4c57cf417f131b410401d64f232", "1ac2882559a4ff552a1a9956ebeadb035cb6df5b", "58823377757e7dc92f3b70a973be697651089756", "9f44dc7897e91a539ad97a4c6cab609213114259", "ebf0259c7b95a9ecabe67507345742d45339428f", "938ae9597f71a21f2e47287cca318d4a2113feb2", "69291d44eb4fdf848a06defe99a74cb75026c70b", "5083c6be0f8c85815ead5368882b584e4dfab4d1", "35c0954acde9c86df8bbcb6edccbcd702796f5eb", "7e507370124a2ac66fb7a228d75be032ddd083cc", "bbe4d0db1fdb0da1a5550c889505020789e3520d", "9b318098f3660b453fbdb7a579778ab5e9118c4c", "1dd56ce2f980283a9f167b9a84c3d42a47481524", "f79ab9baccd466d86460214c5cee9f3be0af4064", "e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf", "1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3", "0f5275b472344dbfc4a26a9ba73dff23844b7e84", "f575acc7e71aa7c02b5d747b8ee8293fb4bdaf98", "a54fcdcb02da0844d28b3191145bbc99675714df", "0f4b902a2e12378e0ac0cb6fff7dd4c5f81e2c0a", "89d38db614621e925756db6e9b068fefd812d4a1", "832e1d128059dd5ed5fa5a0b0f021a025903f9d5", "0ddf59fddd97a40f77ba169298bde80cd38d0d82", "548a0523c9e66b793f2145dbd05dcb4d32fccfec", "abba1bf1348a6f1b70a26aac237338ee66764458", "ed416bf3e3a5d8e1259415eafa9ae20caf25c870", "f9bce7bd7909f1c75dbeb44900d374bc89072df0", "0d3d290e93ac76d5ef2d6c8bbced79fb3101ad36", "bf87e84403589f33b7dd076c6e34b0c7eb39a7a7", "8359f65fd0e0ada2a3de8aead37a6680b53de2a6", "d8bcd7e5764c0b749e0aeee5569db1a9dc3d54a1", "02e628e99f9a1b295458cb453c09863ea1641b67", "c6badb2cc1191f9dd5e5bea7df75a76349176d01", "ffe8a4cef9dec30ddd2c956c2f63b128a4568f84", "8ca29760334b7bdeaa7ad7ae4ff54c3b24420dd2", "037e17ac0272b4db0d4761067dbf0ee56d91e6dd", "b29fa452d737e2b6aa16d6f82a9a8daaea655287", "51683eac8bbcd2944f811d9074a74d09d395c7f3", "39098c6c2b365605b16ca9d5f67fc0df2c4fe8c4", "1d59ffad091a5bffa5fe935b79f5bfc08d2e802d", "fed90d84dc199d7bd996caddd1cab26ded89de98", "598744c8620e4ecbf449d14d7081fbf1cd05851f", "c8585c95215bc53e28edb740678b3a0460ca8aa4", "528069963f0bd0861f380f53270c96c269a3ea1c", "f898ac4c468d46fde9f6393541759cb8d22a0ce2", "24f3dfeb95bdecdc604d630acdfcafa1dc7c9124", "69a26a8d376f84ea965f858d181e878b82a47e6a", "b1b4b030b405b35689f6405e400697a2701e8119", "324cf94743359df3ada2f86ee8cd3bb6dccae695", "91bda4b5ea44084f932523a12ed784ead126950f", "8b4e353fe825725b53fb42e4ece889b7716d5b41", "d62356b0b6490aad238adf0a29016140245735b3", "ba597c956e5bba455f60276a1de92a12707927a1", "7e0c75ce731131e613544e1a85ae0f2c28ee4c1f", "a9215666b4bcdf8d510de8952cf0d55b635727dc", "b2b535118c5c4dfcc96f547274cdc05dde629976", "858ddff549ae0a3094c747fb1f26aa72821374ec", "439ec47725ae4a3660e509d32828599a495559bf", "c9b36584dcfd69ef8a765719d799090fcf0a96aa", "f472cb8380a41c540cfea32ebb4575da241c0288", "1586871a1ddfe031b885b94efdbff647cf03eff1", "7ada60106605bebb66812f85eed16d64d1acb972", "00f0ed04defec19b4843b5b16557d8d0ccc5bb42", "6ae9f4dc7433ba3433b39ee932b22fd57922c2ee", "5d5533b8b95f25f63e07786cf3e063c8db356f1f", "20260d36506911e04ad1efed1e60b06bfc178d52", "21ef129c063bad970b309a24a6a18cbcdfb3aff5", "2ae40bda27db2da1c05f3e71ce8d1c809a909bbb", "4398afa0aeb5749a12772f2d81ca688066636019", "3b80e625b3073be59ec0ca3bb846cb3e4c1fd426", "428d1777846efa8e86b694791b8dbf114e188f30", "05487784c1c94e17c26862e342c1b81acfe11258", "63dbacac269c29b46b2b0bddbef828db025689dd", "48d784e556646cf1a42eff051cb2083a2d8e3234", "459960be65dd04317dd325af5b7cbb883d822ee4", "9790ec6042fb2665c7d9369bf28566b0ce75a936", "8da11cd99ed1eddca4d245ed16f4dd5d24d3cbec", "71354f47df241ad2e8b6c065f89f1c5afe077530", "91ae3f2775bc53321480df63fac07998c3f5a0c9", "04f0292d9a062634623516edd01d92595f03bd3f", "aa5a7a9900548a1f1381389fc8695ced0c34261a", "99cd84a62edb2bda2fc2fdc362a72413941f6aa4", "1a398504e8822e4d079167be9684096fe862c0d5", "47d4838087a7ac2b995f3c5eba02ecdd2c28ba14", "b82f89d6ef94d26bf4fec4d49437346b727c3bd4", "036c1d7bddcf704b213d89f66d9649a9c8e4f3be", "b92a1ed9622b8268ae3ac9090e25789fc41cc9b8", "ab41364a58b34844b281046c3d8678f7d537a97e", "91dda4183c6118de8195e07a623962dbd22cc34e", "76b5ce50ab603a6d175fd21f4b1404dff3c897c2", "a105d1ff8d42ac3ef4a59718a3fd214700c9e3b2", "0b61cad6ae6e7ab99f2e3c187bd8530da71f10ae", "7b931eb8c539a75e7517ad57eebe1780bc5960e4", "0ad344673a12c718637c851758484d7889125347", "439f3a865dfb7b42c600a095a6fcee1c1f4768ad", "67dca0d4b87ab2a4f18b5a1ef76f6ba17b599245", "82a1c84c9a6add84ab1b7df48917fd7616844e47", "80e0f776604be5bd72381479b8dcedf98f82d470", "032c1e19a59cdbeb3fb741a812980f52c1461ce1", "98af84d3a97c946369670a813f4399e36b20be62", "e2b8ba13586bb9a96e4813472d1f763d37ead47d", "b91a7b79ba6216c2f18ae7ddaf6f97f46f015ddc", "9c3b9dee9da817134325357afbebbd1a0d67cab2", "88c564ea1148cecf903282eb0976187075fe2144", "795cef320de562836caccfc6c6d2b99b3f38fc8f", "21913787b7ed62773926a287b60308d1960e6966", "59b71e19819c1c6aee98020b34bf92e605f33819", "7804dc14a7d57cfbbab584c3fd7a111410451b64", "097dc32f712550f655facf74212a70ce3828d98c", "9729930ab0f9cbcd07f1105bc69c540330cda50a", "a0e5afb1237d47f7a8ac66e7b5ada24cec5222cb", "20eaa3ebe2b6e1aff7c4585733c9fb0cfc941919", "a9acda127ca506e1b2a4641eab1bc14292b304c8", "a22f27857692fc4d78c7f0b0126676a4c0ec3985", "027bdb0f502cc61b73be32427a8dd56e213cc2b8", "f08266cea120e8aa091983da5269ee5e35febe75", "5383473d1a669beb0089f72a9a5075e943f0270f", "004e3292885463f97a70e1f511dc476289451ed5", "0155c2921f060a95c0eca8c64bf62a1eaac591e4", "416f9e0f460bdf57b5889472049324b5dc26ed89", "94a76e349e43f09c863a9c77e47722c5ade3740e", "6ceacd889559cfcf0009e914d47f915167231846", "6b32c89af605d42d166d3674fd7f56fbc5bb7b47", "f3fc96377dc3456948fd3431ce940258926ce04e", "39db2ff704cc30a7e94989de33ff4290ea4a6df1", "49f22f29e57f5867b47348555136844ffa6c6603", "2bdba44420e400eceab79f02a8552ee97e940225", "46f48211716062744ddec5824e9de9322704dea1", "95e4f000204a3a6bef97af7328a63992b1b3d7c6", "9e263d429c3b87aae2653b6fb925b32b63c172cd", "1099d475ee0807fc0e4aec55b636db4abc01dcb6", "3719960f974173f23b88a207a42d67d7a393a89a", "41a21f4b272a33e71b73e2d533e0d86875aecac9", "fd56132a1fc505d39513994cd97ef4429aebe9cb", "0986130d760cc80895397f277a3254ae53266243", "e9b8f2ee742b32ae272c950cc6fa2d5a2d05f028", "c48c452f26e54f37faaf025ca3c76b33ce3e40f6", "810cb76794fe11990cfe30f48faf222f7469ce3d", "a3fe284b029269ad5f071dd37bb137593c67dfc2", "00b370765678c44acd5313f3946b2431890721a9", "e043d79f4dc41c9decaf637d8ffdd11f8ed59f2b", "51c3050fb509ca685de3d9ac2e965f0de1fb21cc", "f19f875c535b940cc642f5366925e92ac3caac97", "ad60b6d777ab5dda8a67dc4bdf974abec69d258a", "e6540d70e5ffeed9f447602ea3455c7f0b38113e", "6c73ca2b877d333157c136cdd3f5e96cd8909cc7", "9170443fb0e6a343bdcb68b3f49ae8e3abadfb48", "ad629f981e834925db35fc3c6592cdddfc4362b6", "e9a76f501ae3b2dca197f4b273aba19826dd4113", "8e31c59c973cecce1a7ef114344513d1b6ec9001", "22cf367d14e646914cc959bbcd402df0c20cd0dc", "ed702537d487de0737582f7ef7e937f4fe9b28fd", "fe86ef81dcd6cdfe3a67437b53ab480557b04a71", "eb0e5db282f88d47b65f98df70c2e7c78b8647a6", "eb3e0a14851b681ee254ff0c2be5c6b29025c823", "b810ee6da48d3778f5b4dba9088706af1169edde", "3810b6299140bf2c7d6d0cced765c0777d603923", "32a336e2a99eb113eeba7cbf622b463cd46d3138", "cf74dceae075bde213d2aafad115d2afc893c21b", "064b797aa1da2000640e437cacb97256444dee82", "0b835284b8f1f45f87b0ce004a4ad2aca1d9e153", "0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3", "f345a05353f5784b64eefb7785661cc0be519521", "d6639263381c929ebc579a541045a85aa21680f8", "047d7cf4301cae3d318468fe03a1c4ce43b086ed", "e3b324101157daede3b4d16bdc9c2388e849c7d4", "6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0", "050a3346e44ca720a54afbf57d56b1ee45ffbe49", "1255afbf86423c171349e874b3ac297de19f00cd", "a735c6330430c0ff0752d117c54281b1396b16bf", "3ed2ebfd783298a9a2e412529ffabdeb98bd552d", "0a11b82aa207d43d1b4c0452007e9388a786be12", "64ec02e1056de4b400f9547ce56e69ba8393e2ca", "10d334a98c1e2a9e96c6c3713aadd42a557abb8b", "9888ce5cb5cae8ba4f288806d126b1114e0a7f9b", "eb48170a6e1e020f002a6a0a808c1934d5c760b8", "a4898f55f12e6393b1c078803909ea715bf71730", "b171f9e4245b52ff96790cf4f8d23e822c260780", "6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7", "1ef4815f41fa3a9217a8a8af12cc385f6ed137e1", "72ecaff8b57023f9fbf8b5b2588f3c7019010ca7", "c222f8079c246ead285894c47bdbb2dfc7741044", "3b64b8be33887e77e6def4c385985e43e2c15eea", "cbaa17be8c22e219a9c656559e028867dfb2c2ed", "383e64d9ef1fca9de677ac82486b4df42e96e861", "11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0", "c98b13871a3bc767df0bdd51ff00c5254ede8b22", "aa331fe378056b6d6031bb8fe6676e035ed60d6d", "3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd", "e3a6e9ddbbfc4c5160082338d46808cea839848a", "113c22eed8383c74fe6b218743395532e2897e71", "46a4551a6d53a3cd10474ef3945f546f45ef76ee", "2dbde64ca75e7986a0fa6181b6940263bcd70684", "bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3", "b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4", "a2136b13aa0bb4ea4e7fa99a6c657b11dffff563", "ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf", "1d729693a888a460ee855040f62bdde39ae273af", "392425be1c9d9c2ee6da45de9df7bef0d278e85f", "9b1c218a55ead45296bfd7ad315aaeff1ae9983e", "a5ae44070857aa00e54ea80394a04fda412b335c", "cbc2de9b919bc63590b6ee2dfd9dda134af45286", "e03f69bad7e6537794a50a99da807c9df4ff5186", "de162d4b8450bf2b80f672478f987f304b7e6ae4", "a0021e3bbf942a88e13b67d83db7cf52e013abfd", "0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad", "d8896861126b7fd5d2ceb6fed8505a6dff83414f", "42ded74d4858bea1070dadb08b037115d9d15db5", "2c61a9e26557dd0fe824909adeadf22a6a0d86b0", "66425ac80e8c5995f9a680ae8b0f077893d6713b", "58d43e32660446669ff54f29658961fe8bb6cc72", "f531ce18befc03489f647560ad3e5639566b39dc", "6cfc337069868568148f65732c52cbcef963f79d", "1fef53b07c6c625545fc071c7386d41f87925675", "2f5e057e35a97278a9d824545d7196c301072ebf", "9776a9f3c59907f45baaeda4b8907dcdac98aef1", "7177649ece5506b315cb73c36098baac1681b8d2", "b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7", "61971f8e6fff5b35faed610d02ad14ccfc186c70", "c175f1666f3444e407660c5935a05b2a53f346f0", "5445e5fe5e42698413cfaf90f8c52009f19539ff", "e9d43231a403b4409633594fa6ccc518f035a135", "c92bb26238f6e30196b0c4a737d8847e61cfb7d4", "c88ce5ef33d5e544224ab50162d9883ff6429aa3", "c84233f854bbed17c22ba0df6048cbb1dd4d3248", "66330846a03dcc10f36b6db9adf3b4d32e7a3127", "ddb1a392582c624c9116cb00eac01aba220fad84", "a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a", "6eddea1d991e81c1c3024a6cea422bc59b10a1dc", "32dfd4545c87d9820cc92ca912c7d490794a81d6", "cc05f758ccdf57d77b06b96b9d601bf2795a6cc4", "690f5d35489c63ec7309b9e4d77c929815065257", "ff82825a04a654ca70e6d460c8d88080ee4a7fcc", "7714a5aa27ab5ad4d06a81fbb3e973d3b1002ac1", "b51e3d59d1bcbc023f39cec233f38510819a2cf9", "261a80216dda39b127d2b7497c068ec7e0fdf183", "83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e", "cfba667644508853844c45bfe5d0b8a2ffb756d3", "27846b464369095f4909f093d11ed481277c8bba", "2ce84465b9759166effc7302c2f5339766cc523d", "5da740682f080a70a30dc46b0fc66616884463ec", "e577484e5c3ecc6f073faf124468c8ae2f827a0f", "1564bf0a268662df752b68bee5addc4b08868739", "193bc8b663d041bc34134a8407adc3e546daa9cc", "6da711d07b63c9f24d143ca3991070736baeb412", "8be60114634caa0eff8566f3252cb9a1b7d5ef10", "43776d1bfa531e66d5e9826ff5529345b792def7", "b239a756f22201c2780e46754d06a82f108c1d03", "46eaeee1d2887c27b614852c728411cfedf52144", "3f9a7d690db82cf5c3940fbb06b827ced59ec01e", "36b9f46c12240898bafa10b0026a3fb5239f72f3", "b1546f723d11f95ee50b1db1bdce8de8de85a54f", "bcd162862b6d3a56b474039b2588a8f948d59fe0", "17a85799c59c13f07d4b4d7cf9d7c7986475d01c", "4eeccbbb98de4f2e992600482fd6b881ace014bb", "22894c7a84984bd4822dcfe7c76a74673a242c36", "3f848d6424f3d666a1b6dd405a48a35a797dd147", "1afdedba774f6689eb07e048056f7844c9083be9", "562f7555e5cb79ce0fe834c4613264d8378dd007", "6c58e3a8209fef0e28ca2219726c15ea5f284f4f", "54e988bc0764073a5db2955705d4bfa8365b7fa9", "13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a", "53873fe7bbd5a2d171e2b1babc9cacaad6cabe45", "5aad56cfa2bac5d6635df4184047e809f8fecca2", "a4725a5b43e7c36d9e30028dff66958f892254a0", "01e27c91c7cef926389f913d12410725e7dd35ab", "32c20afb5c91ed7cdbafb76408c3a62b38dd9160", "142dcfc3c62b1f30a13f1f49c608be3e62033042", "1565721ebdbd2518224f54388ed4f6b21ebd26f3", "2ff9618ea521df3c916abc88e7c85220d9f0ff06", "e73f2839fc232c03e9f027c78bc419ee15810fe8", "7c1e1c767f7911a390d49bed4f73952df8445936", "3803b91e784922a2dacd6a18f61b3100629df932", "44a3ec27f92c344a15deb8e5dc3a5b3797505c06", "dad7b8be074d7ea6c3f970bd18884d496cbb0f91", "f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8", "7f2a4cd506fe84dee26c0fb41848cb219305173f", "aa127e6b2dc0aaccfb85e93e8b557f83ebee816b", "632441c9324cd29489cee3da773a9064a46ae26b", "1e5a1619fe5586e5ded2c7a845e73f22960bbf5a", "365f67fe670bf55dc9ccdcd6888115264b2a2c56", "e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66", "7c7ab59a82b766929defd7146fd039b89d67e984", "90b11e095c807a23f517d94523a4da6ae6b12c76", "213a579af9e4f57f071b884aa872651372b661fd", "529baf1a79cca813f8c9966ceaa9b3e42748c058", "96faccdddef887673d6007fed8ff2574580cae1f", "4aa18f3a1c85f7a09d3b0d6b28c0339199892d60", "33f7e78950455c37236b31a6318194cfb2c302a4", "8aed6ec62cfccb4dba0c19ee000e6334ec585d70", "b6aa94b81b2165e492cc2900e05dd997619bfe7a", "8f08b2101d43b1c0829678d6a824f0f045d57da5", "0f63499e22a1d77ef898f6b3db550231b09af59e", "0387b32d0ebd034dc778972367e7d4194223785d", "dc0341e5392c853f11283e99a7dc5c51be730aca", "a6e75b4ccc793a58ef0f6dbe990633f7658c7241", "4bd3de97b256b96556d19a5db71dda519934fd53", "cbe1df2213a88eafc5dcaf55264f2523fe3ec981", "83fd5c23204147844a0528c21e645b757edd7af9", "046770df59c49c7ca9a1a4c268176ede2aa89e37", "40f127fa4459a69a9a21884ee93d286e99b54c5f", "6813208b94ffa1052760d318169307d1d1c2438e", "506c2fbfa9d16037d50d650547ad3366bb1e1cde", "64ab36d095764c48140ef5aaaf6932cc93bceec9", "7a81967598c2c0b3b3771c1af943efb1defd4482", "b3c60b642a1c64699ed069e3740a0edeabf1922c", "07a472ea4b5a28b93678a2dcf89028b086e481a2", "14418ae9a6a8de2b428acb2c00064da129632f3e", "37c42f0a0e2e97a74113e1a1e1a79b04e0c64244", "0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7", "230c4a30f439700355b268e5f57d15851bcbf41f", "d06c8e3c266fbae4026d122ec9bd6c911fcdf51d", "31ba9d0bfaa2a44bae039e5625eb580afd962892", "1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f", "24d376e4d580fb28fd66bc5e7681f1a8db3b6b78", "0573f3d2754df3a717368a6cbcd940e105d67f0b", "73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2", "5b59e6b980d2447b2f3042bd811906694e4b0843", "2ae139b247057c02cda352f6661f46f7feb38e45", "f4f9697f2519f1fe725ee7e3788119ed217dca34", "5f1dcaff475ef18a2ecec0e114a9849a0a8002b9", "383bed81e8c96e263ba6519a69587fb7ffd64216", "614079f1a0d0938f9c30a1585f617fa278816d53", "642417f2bb1ff98989e0a0aa855253fed1fffe04", "c5d13e42071813a0a9dd809d54268712eba7883f", "132f88626f6760d769c95984212ed0915790b625", "1a6c9ef99bf0ab9835a91fe5f1760d98a0606243", "dcf71245addaf66a868221041aabe23c0a074312", "69a9da55bd20ce4b83e1680fbc6be2c976067631", "d3367c9a4825295301225a05a190c0b7ed62736e", "1723227710869a111079be7d61ae3df48604e653", "fe48f0e43dbdeeaf4a03b3837e27f6705783e576", "4932b929a2e09ddebedcb1abe8c62f269e7d4e33", "982fcead58be419e4f34df6e806204674a4bc579", "152683f3ac99f829b476ea1b1b976dec6e17b911", "6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c", "ce5e50467e43e3178cbd86cfc3348e3f577c4489", "eb9bcf9e3f8856c92e7720b63b7e846df37de0c3", "6489ad111fee8224b34f99d1bcfb5122786508cd", "0b9db62b26b811e8c24eb9edc37901a4b79a897f", "4f051022de100241e5a4ba8a7514db9167eabf6e", "0ed96cc68b1b61e9eb4096f67d3dcab9169148b9", "7c95449a5712aac7e8c9a66d131f83a038bb7caa", "fe9d9c298d2e0c72408668fcff996e4bf58cc6c6", "826015d9ade1637b3fcbeca071e3137d3ac1ef56", "81e366ed1834a8d01c4457eccae4d57d169cb932", "5e0e516226413ea1e973f1a24e2fdedde98e7ec0", "0b174d4a67805b8796bfe86cd69a967d357ba9b6", "be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f", "90eb66e75381cce7146b3953a2ae479a7beec539", "3214ce1c8c86c0c4670e3f8b8f4351d8fa44434d", "f94f366ce14555cf0d5d34248f9467c18241c3ee", "47eba2f95679e106e463e8296c1f61f6ddfe815b", "02431ed90700d5cfe4e3d3a20f1e97de3e131569", "e82a0976db908e6f074b926f58223ac685533c65", "31ef5419e026ef57ff20de537d82fe3cfa9ee741", "55c4efc082a8410b528af7325de8148b80cf41e3", "f3495bf7f7d827c72cc4e7a4850eaf54a998db11", "53e081f5af505374c3b8491e9c4470fe77fe7934", "04f56dc5abee683b1e00cbb493d031d303c815fd", "ae89b7748d25878c4dc17bdaa39dd63e9d442a0d", "fb0774049f2f34be194592822c74e2f2e603dea8", "3a591a9b5c6d4c62963d7374d58c1ae79e3a4039", "9282239846d79a29392aa71fc24880651826af72", "59b83666c1031c3f509f063b9963c7ad9781ca23", "8633732d9f787f8497c2696309c7d70176995c15", "d22b378fb4ef241d8d210202893518d08e0bb213", "861c650f403834163a2c27467a50713ceca37a3e", "e4abc40f79f86dbc06f5af1df314c67681dedc51", "18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae", "a611c978e05d7feab01fb8a37737996ad6e88bd9", "250ebcd1a8da31f0071d07954eea4426bb80644c", "11be33019f591214c8f79dbcb24a50d8f7fa5c95", "d6791b98353aa113d79f6fb96335aa6c7ea3b759", "ea0785c2d4ac8f8d6415cffdb83547bfc4e7adba", "5334ac0a6438483890d5eef64f6db93f44aacdf4", "13fd25a18ab3faebcd6a4ab95f4cc814fcda337a", "0f32df6ae76402b98b0823339bd115d33d3ec0a0", "ca44a838da4187617dca9f6249d8c4b604661ec7", "4c1ce6bced30f5114f135cacf1a37b69bb709ea1", "3b84d074b8622fac125f85ab55b63e876fed4628", "bb83d5c7c17832d1eef14aa5d303d9dd65748956", "632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c", "7f415aee0137acab659c664eb1dff15f7b726bdd", "8ee62f7d59aa949b4a943453824e03f4ce19e500", "21bd60919e2e182a29af455353141ba4907b1b41", "46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4", "96ba65bffdddef7c7737c0f42ff4299e95cd85c2", "1a849b694f2d68c3536ed849ed78c82e979d64d5", "c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee", "92115b620c7f653c847f43b6c4ff0470c8e55dab", "25c108a56e4cb757b62911639a40e9caf07f1b4f", "e287ff7997297ce1197359ed0fb2a0bd381638c9", "bd66dc891270d858de3adf97d42ed714860ae94d", "1287bfe73e381cc8042ac0cc27868ae086e1ce3b", "beae35eb5b2c7f63dfa9115f07b5ba0319709951", "774cbb45968607a027ae4729077734db000a1ec5", "b12431e61172443c534ea523a4d7407e847b5c5b", "97e569159d5658760eb00ca9cb662e6882d2ab0e", "40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b", "67484723e0c2cbeb936b2e863710385bdc7d5368", "950171acb24bb24a871ba0d02d580c09829de372", "90c4f15f1203a3a8a5bf307f8641ba54172ead30", "b599f323ee17f12bf251aba928b19a09bfbb13bb", "7643861bb492bf303b25d0306462f8fb7dc29878", "28f7d3d894705a92cac9b08d22701fadb6472676", "661c78a0e2b63cbdb9c20dcf89854ba029b6bc87", "2e942d19333651bf6012374ea9e78d6937fd33ac", "7ed5036a7c1eb2ea08fa2a12a446a9ccb6171c92", "081189493ca339ca49b1913a12122af8bb431984", "1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2", "dda35768681f74dafd02a667dac2e6101926a279", "5e62b2ab6fd3886e673fd5cbee160a5bee414507", "7f703613149b190ea3bb0e3c803844895419846b", "bcac3a870501c5510df80c2a5631f371f2f6f74a", "1b150248d856f95da8316da868532a4286b9d58e", "a5ade88747fa5769c9c92ffde9b7196ff085a9eb", "cad52d74c1a21043f851ae14c924ac689e197d1f", "e8875b317c2e0ed6fba0c908d599b3772a400bdd", "83b7578e2d9fa60d33d9336be334f6f2cc4f218f", "51d048b92f6680aca4a8adf07deb380c0916c808", "a0848d7b1bb43f4b4f1b4016e58c830f40944817", "849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b", "8bbd40558a99e33fac18f6736b8fe99f4a97d9b1", "8d712cef3a5a8a7b1619fb841a191bebc2a17f15", "3726d17fd7e57c75b8b9f7f57bdec9054534be5e", "2201f187a7483982c2e8e2585ad9907c5e66671d", "e569f4bd41895028c4c009e5b46b935056188e91", "cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6", "2edc6df161f6aadbef9c12408bdb367e72c3c967", "3463f12ad434d256cd5f94c1c1bfd2dd6df36947", "716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0", "3fe3d6ff7e5320f4395571131708ecaef6ef4550", "44c278cbecd6c1123bfa5df92e0bda156895fa48", "e585dc6c810264d9f07e38c412379734a920714e", "171d8a39b9e3d21231004f7008397d5056ff23af", "84dcf04802743d9907b5b3ae28b19cbbacd97981", "dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb", "27812db1d2f68611cc284d65d11818082e572008", "22ccd537857aca1ee4b961f081f07c58d42a7f32", "aa5eeb1ab953411e915ea5e6298474dbebfa6fb6", "b0358af78b7c5ee7adc883ef513bbcc84a18a02b", "4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56", "c9bbd7828437e70cc3e6863b278aa56a7d545150", "006f283a50d325840433f4cf6d15876d475bba77", "d91fd82332a0db1bb4a8ac563f406098cfe9c4bb", "b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2", "1cb0c11620bde2734c1a428c789158ffff0d6c7b", "652ec3947d3d04dda719b1f5ba7c975e567166ef", "74325f3d9aea3a810fe4eab8863d1a48c099de11", "04616814f1aabe3799f8ab67101fbaf9fd115ae4", "b2a0e5873c1a8f9a53a199eecae4bdf505816ecb", "b85d0aef3ee2883daca2835a469f5756917e76b7", "68f61154a0080c4aae9322110c8827978f01ac2e", "3f88ea8cf2eade325b0f32832561483185db5c10", "813c93c54c19fd3ef850728e6d4a31d279d26021", "4ae291b070ad7940b3c9d3cb10e8c05955c9e269", "c9c9ade2ef4dffb7582a629a47ea70c31be7a35e", "26e570049aaedcfa420fc8c7b761bc70a195657c", "434f1442533754b3098afd4e24abf1e3792b24db", "c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c", "24bf94f8090daf9bda56d54e42009067839b20df", "74875368649f52f74bfc4355689b85a724c3db47", "ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b", "df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb", "a60907b7ee346b567972074e3e03c82f64d7ea30", "55aafdef9d9798611ade1a387d1e4689f2975e51", "501eda2d04b1db717b7834800d74dacb7df58f91", "6bacd4347f67ec60a69e24ed7cc0ac8073004e6f", "08f1e9e14775757298afd9039f46ec56e80677f9", "e896389891ba84af58a8c279cf8ab5de3e9320ee", "9c373438285101d47ab9332cdb0df6534e3b93d1", "de398bd8b7b57a3362c0c677ba8bf9f1d8ade583", "83011670e083dd52484578f8b6b3b4ccde3237ec", "1862cb5728990f189fa91c67028f6d77b5ac94f6", "0d0b880e2b531c45ee8227166a489bf35a528cb9", "77fbbf0c5729f97fcdbfdc507deee3d388cd4889", "97b86d486c836abbe8fb2cfd8810181593e2ab53", "93cfc6fd29d50fe6589f9506b503f32f6d0372f4", "7c7b0550ec41e97fcfc635feffe2e53624471c59", "fefaa892f1f3ff78db4da55391f4a76d6536c49a", "15252b7af081761bb00535aac6bd1987391f9b79", "a775da3e6e6ea64bffab7f9baf665528644c7ed3", "39c8b34c1b678235b60b648d0b11d241a34c8e32", "8fe5feeaa72eddc62e7e65665c98e5cb0acffa87", "dfb8a04a80d4b0794c0679d797cb90ec101e162c", "0a87d781fe2ae2e700237ddd00314dbc10b1429c", "cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66", "afca252f314b46d5c1f2cb4e75ce15d551069b05", "90e7a86a57079f17f1089c3a46ea9bfd1d49226c", "18a013e1c72cf579d1b215f22d298521047e98a4", "e328d19027297ac796aae2470e438fe0bd334449", "a15d9d2ed035f21e13b688a78412cb7b5a04c469", "bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c", "46551095a2cc4976d6be0165c31c37b0c5638719", "eaf020bc8a3ed5401fc3852f7037a03b2525586a", "ba017a8d16e47e57a1f3eb5a94c1ba24e6952274", "d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1", "ed273b5434013dcdb9029c1a9f1718da494a23a2", "1a65cc5b2abde1754b8c9b1d932a68519bcb1ada", "08cb294a08365e36dd7ed4167b1fd04f847651a9", "5b6593a6497868a0d19312952d2b753232414c23", "841855205818d3a6d6f85ec17a22515f4f062882", "72e603083c8b1cfa09200eb333927e8ea848fbc8", "fb0f5e06048c0274c2a4056e353fa31f5790e381", "b73d9e1af36aabb81353f29c40ecdcbdf731dbed", "699b8250fb93b3fa64b2fc8f59fef036e172564d", "243e9d490fe98d139003bb8dc95683b366866c57", "a8035ca71af8cc68b3e0ac9190a89fed50c92332", "10f2b8188c745d43c1580f5ee6de71ad8d538b4d", "013909077ad843eb6df7a3e8e290cfd5575999d2", "2742a61d32053761bcc14bd6c32365bfcdbefe35", "49fdafef327069516d887d8e69b5e96c983c3dd0", "39b22bcbd452d5fea02a9ee63a56c16400af2b83", "2fe86e9c115562df2114eeedc7db1aece07a3638", "647b2e162e9c476728172f62463a8547d245cde3", "3bebb79f8f49aa11dd4f6d60d903172db02bf4f3", "5632ba72b2652df3b648b2ee698233e76a4eee65", "72cbbdee4f6eeee8b7dd22cea6092c532271009f", "ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6", "561ae67de137e75e9642ab3512d3749b34484310", "b3067deb3110e3a7566c032ac0c1e1608668ef3d", "7ebd323ddfe3b6de8368c4682db6d0db7b70df62", "3998c5aa6be58cce8cb65a64cb168864093a9a3e", "100105d6c97b23059f7aa70589ead2f61969fbc3", "3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a", "4f7967158b257e86d66bdabfdc556c697d917d24", "4850af6b54391fc33c8028a0b7fafe05855a96ff", "eb5c1e526fe2d17778c68f60c874c3da0129fabd", "29fd98f096fc9d507cd5ee7d692600b1feaf7ed1", "fd126e36337999640a0b623611b5fec8de390d46", "5ed5e534c8defd683909200c1dc31692942b7b5f", "91ead35d1d2ff2ea7cf35d15b14996471404f68d", "e57014b4106dd1355e69a0f60bb533615a705606", "104863192ffb35e285f7546d1ac842e001ac9084", "33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13", "23b466abb866e3f160f4573a69666f861aef59cc", "2042aed660796b14925db17c0a8b9fbdd7f3ebac", "7698ba9fd1f49157ca2666a93311afbf1ff4e66c", "d861c658db2fd03558f44c265c328b53e492383a", "2b64a8c1f584389b611198d47a750f5d74234426", "f4f6fc473effb063b7a29aa221c65f64a791d7f4", "3cb64217ca2127445270000141cfa2959c84d9e7", "120b9c271c3a4ea0ad12bbc71054664d4d460bc3", "8c50869b745fc094a4fb1b27861934c3c14d7199", "93dcea2419ca95b96a47e541748c46220d289d77", "d904f945c1506e7b51b19c99c632ef13f340ef4c", "fc8990088e0f1f017540900bc3f5a4996192ff05", "b93bf0a7e449cfd0db91a83284d9eba25a6094d8", "221252be5d5be3b3e53b3bbbe7a9930d9d8cad69", "af29ad70ab148c83e1faa8b3098396bc1cd87790", "587b8c147c6253878128ddacf6e5faf8272842a4", "bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea", "256b46b12ab47283e6ada05fad6a2b501de35323", "3ed46ef5344927a30d71089ae203c9a9e35e4977", "ce6a6d35f65e584214aaf24378ab85038decddbb", "56ae6d94fc6097ec4ca861f0daa87941d1c10b70", "3f957142ef66f2921e7c8c7eadc8e548dccc1327", "1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2", "60777fbca8bff210398ec8b1179bc4ecb72dfec0", "1f9b2f70c24a567207752989c5bd4907442a9d0f", "d8c9ce0bd5e4b6d1465402a760845e23af5ac259", "a136ccaa67f660c45d3abb8551c5ed357faf7081", "872ff48a3acfbf96376fd048348372f5137615e4", "d6e08345ba293565086cb282ba08b225326022fc", "162dfd0d2c9f3621d600e8a3790745395ab25ebc", "887745c282edf9af40d38425d5fdc9b3fe139c08", "87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd", "27a299b834a18e45d73e0bf784bbb5b304c197b3", "c20b2d365186f4471950fbe1ef8755de90efc000", "bcb99d5150d792001a7d33031a3bd1b77bea706b", "27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd", "1ddea58d04e29069b583ac95bc0ae9bebb0bed07", "2c285dadfa6c07d392ee411d0213648a8a1cf68f", "06b6606e47e071bbe070093c78120207578126fd", "47dabb566f2bdd6b3e4fa7efc941824d8b923a13", "d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0", "f5770dd225501ff3764f9023f19a76fad28127d4", "9285f4a6a06e975bde3ae3267fccd971d4fff98a", "637b31157386efbde61505365c0720545248fbae", "8a8127a06f432982bfb0150df3212f379b36840b", "0d90c992dd08bfb06df50ab5c5c77ce83061e830", "912a6a97af390d009773452814a401e258b77640", "c30982d6d9bbe470a760c168002ed9d66e1718a2", "c2474202d56bb80663e7bece5924245978425fc1", "a00fdf49e5e0a73eb24345cb25a0bd1383a10021", "6226f2ea345f5f4716ac4ddca6715a47162d5b92", "3f12701449a82a5e01845001afab3580b92da858", "b37f57edab685dba5c23de00e4fa032a3a6e8841", "57dc55edade7074f0b32db02939c00f4da8fe3a6", "5be3cc1650c918da1c38690812f74573e66b1d32", "4289f5d6255955a7a97e85b3d16e29accb52d96d", "171ca25bc2cdfc79cad63933bcdd420d35a541ab", "59bece468ed98397d54865715f40af30221aa08c", "524c25217a6f1ed17f47871e947a5581d775fa56", "020d97ca2bf617b7ffed5a31aa8a27ffa5efadbb", "7240aad3fa4adf65e401345c877ee58a01b76fb1", "3acb6b3e3f09f528c88d5dd765fee6131de931ea", "a255a54b8758050ea1632bf5a88a201cd72656e1", "503a1ce4cc56514f7bdb7539da2a33a6dae3d0e8", "4786638ffb3b2fb385cec80720cc6e7c3588b773", "7e00fb79576fe213853aeea39a6bc51df9fdca16", "56dca23481de9119aa21f9044efd7db09f618704", "00a3cfe3ce35a7ffb8214f6db15366f4e79761e3", "d170adb2c508edaedb731ada8cb995172a839a1f", "14bca107bb25c4dce89210049bf39ecd55f18568", "5ae970294aaba5e0225122552c019eb56f20af74", "a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892", "940a675de8a48b54bac6b420f551529d2bc53b99", "122f52fadd4854cf6c9287013520eced3c91e71a", "6754c98ba73651f69525c770fb0705a1fae78eb5", "0a23d374c6cf71a65e845569230420362fe4903a", "3619a9b46ad4779d0a63b20f7a6a8d3d49530339", "dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6", "e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea", "8d4f0517eae232913bf27f516101a75da3249d15", "9296f4ac0180e29226d6c016b5a4d5d2964eaaf6", "496074fcbeefd88664b7bd945012ca22615d812e", "a6d47f7aa361ab9b37c7f3f868280318f355fadc", "87bee0e68dfc86b714f0107860d600fffdaf7996", "556875fb04ed6043620d7ca04dfe3d8b3a9284f5", "5c91fc106cfe9d57a9b149c1af29ca84d403fc7e", "2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b", "24959d1a9c9faf29238163b6bcaf523e2b05a053", "15ee80e86e75bf1413dc38f521b9142b28fe02d1", "48729e4de8aa478ee5eeeb08a72a446b0f5367d5", "346166da1a49e531923294300a731167e1436d5b", "fb9ad920809669c1b1455cc26dbd900d8e719e61", "6a38e4bb35673a73f041e34d3f2db7067482a9b5", "6c26744149ae08af8bc84137633495fa948b41ad", "e8b2a98f87b7b2593b4a046464c1ec63bfd13b51", "5778d49c8d8d127351eee35047b8d0dc90defe85", "aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a", "c23153aade9be0c941390909c5d1aad8924821db", "747c25bff37b96def96dc039cc13f8a7f42dbbc7", "102e374347698fe5404e1d83f441630b1abf62d9", "4858d014bb5119a199448fcd36746c413e60f295", "26949c1ba7f55f0c389000aa234238bf01a32d3b", "bd26faef48080b5af294b19139c804ffec70825e", "eb9867f5efc98d3203ce1037f9a8814b0d15d0aa", "9cd6a81a519545bf8aa9023f6e879521f85d4cd1", "bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd", "f79e4ba09402adab54d2efadd1c4bfe4e20c5da5", "0931bef0a9c8c153184a1f9c286cf4883cbe99b6", "edc5a0a8b9fc6ae0e8d8091a2391767f645095d9", "17cf838720f7892dbe567129dcf3f7a982e0b56e", "559645d2447004355c83737a19c9a811b45780f1", "50236de1791a009dc77d7266c412469a22437b4b", "86c5478f21c4a9f9de71b5ffa90f2a483ba5c497", "59c21f5a24d0b408d528054b016915236bb85bf2", "71f1ccf6c09ba6307398a2c5a317a744ae60bfbf", "ef26b36eb5966364c71d4fed135fe68f891127e5", "1610d2d4947c03a89c0fda506a74ba1ae2bc54c2", "1dc6c0ad19b41e5190fc9fe50e3ae27f49f18fa2", "ef4ecb76413a05c96eac4c743d2c2a3886f2ae07", "fc0f5859a111fb17e6dcf6ba63dd7b751721ca61", "405d9a71350c9a13adea41f9d7f7f9274793824f", "488a61e0a1c3768affdcd3c694706e5bb17ae548", "a896ddeb0d253739c9aaef7fc1f170a2ba8407d3", "ad0863aa16301d7c6617f8b965a64cf58f38594f", "798de108d7055752d0d2d97fbff6579c0c750e82", "1dca6a54d201dd56b41a5475aaf498a207083b0e", "58bf72750a8f5100e0c01e55fd1b959b31e7dbce", "b06f2949eb748331c40a8b2381517fa09757ad17", "239df42479c69cf95e7194cc0ec3d8cf7d4a98e8", "45f5ebf76cb908e805662d2d3be7a57d4324a391", "565590af15af3d02f0b592b2e201e36708e4fe50", "1ea21515ddad82f1e85b4c5883b93ea3909019b5", "ae233a6f07d61e2c032bd09d92bdf20c27305c1f", "b3cb117f2424209d5997d5745772dfadd02dc80d", "bb34bcf28021a658ce89d65d229df76d4dc620c6", "8e92168860d8c6591a0c088573629e4d167f5947", "6f81178c6f042f5e0b7dc226c1509596dbff63f0", "23f18899037c77efea58cbde7b029225af7e0db9", "86824ba374daff17ff2235484055b3bb9c464555", "96a8f115df9e2c938453282feb7d7b9fde6f4f95", "3b7a6291712f9796a3add49a3f65f0acaa3e1335", "123a9768700433c405bd7266f4c57ca8222e7fe1", "5c0dc4dff1dfb5e27b19bef0713bccd9f85ce3b2", "bfab7043e206f428f98be47ecc02d506be1148d7", "405ff96909968c6de716c9a650c8f27aa5b336b0", "653b957d4c70d6cbf8de443df497e47edcab77b4", "7811fd02bb77e2f6644f34c0f445d096199c3c2f", "50a0930cb8cc353e15a5cb4d2f41b365675b5ebf", "b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2", "1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3", "0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a", "3b8c830b200f1df8ef705de37cbfe83945a3d307", "181708b09bde7f4904f8fd92b3668d76e7aff527", "20c2a5166206e7ffbb11a23387b9c5edf42b5230", "5480aee1d01700bb98f5a0e06dd15bf36a4e45ea", "3325860c0c82a93b2eac654f5324dd6a776f609e", "4209f140d64ce6fb891eb6ada26eaeb40af123e2", "bc7e6175aed6538eca08962e133aede11fc75bcf", "2e9739056c9d1fe7b37046328f00cae603f59441", "3e58fbb8cb96880e018ca18a60e2d86e3cb0c10a", "f730990ad4f10e7ce09e7680b7864751787445dd", "779eb6a059990b9cee55e0add7bc34aed87b3733", "188e012533977266355bfabc62d6adbf0f92d6b1", "3f85020032ae335baf57aaf65c4831b67e4030c9", "64bd5878170bfab423bc3fc38d693202ef4ba6b6", "b266be4d9fab8bf307ee2e6fdd6180ac7f6ef893", "318985dc2b8d5a1882b709eedeaac4a2e7de1d81", "3837f81524286ed5f9142d245743733766aa4017", "3884b78a06ccfde3249c16ac450b5254d033126a", "4500bc692662ab8e14db8e3144db9876f24e940b", "3f3a483402a3a2b800cf2c86506a37f6ef1a5332", "8dc81389a61d4d80644f44e1fcfd35ccfb332082", "008baae7037a47f69804c2eb8438d366a6e67486", "506f744801c97f005fa04a09e4a4ae5fdabe94d7", "877c5a3ad915c2bbb5595252d08163f34ce58957", "10f3ead41bf8de97aee9c25b345b8b7495a99aab", "cba90ec61155a233fee33b529401e65d9481213a", "4fec8a97d6d87713c5c00f369fc1373fba4377e3", "32cda87d7e0c3ecd9cc6280c797b5c456cd79abd", "9d7199db4897724c06fff907727458ff2660e7a5", "faacebeb542792fae28745f51a943892be8d36a6", "08d40ee6e1c0060d3b706b6b627e03d4b123377a", "2e55a287328b234db16fb538eddbbc185d51582a", "4903156f9f0edae78c24f9b9b3348a3a4d20384f", "673d4885370b27c863e11a4ece9189a6a45931cc", "b3ad5304072264ef0419b810d6ed0c69a109d5a5", "5fcbdbc0ffd5ce2c5eb3b4c18d7ad2edb00d85d1", "f52db83298a918d6ad7bda8263f91904807c4ced", "a3fa023d7355662d066882df8dead0cac6a8321e", "61efa60e16c06e2820d863bc55f3c60e86f3f6e7", "1038aa6c1f63c1de9045f10e47ed573810cb4a52", "47096e7103a2fbb6f6ede05e996209497d41db6a", "1f4aa88107d7c4b91b1436b721b7630b93ce7d06", "8f941592d46b98885781060e41cc199dae0690a3", "d2f717d1799b5cec5f1f426511527bd7e6e05d9d", "f1bb2c95dc270ffa9c2f88e29ae5d2178b4459cb", "6483ebbb9c28024431c8ada03354217453ca1b3b", "309acdd149f5f0ea12acb103b36bb59e6e631671", "0672976bb2c3b4bde4381f28bf4bbdeeabd3a22e", "6a3a07deadcaaab42a0689fbe5879b5dfc3ede52", "99227909e5733d76b0d50fc3fab975ab7a43fce3", "b7a09eaadcb21bf9ab234d87c954e329518580c5", "99911a8effd2ab3af4b4ba802920f3e1720a83e6", "9f4078773c8ea3f37951bf617dbce1d4b3795839", "1caac27548cc7f98380e4e95ccbc8e6e164489c8", "dcb0afca54aa2bde50319ad5720d613a6eca36c3", "f0c21345a13c0e1da2b74aef4e8b987feb266bb5", "3615bbdd4fe81acd9e5d166af731b5556b19a2cd", "b6f682648418422e992e3ef78a6965773550d36b", "979f63114a30d60c5c06d4c9c18c8249c3a63099", "1a86620ea59816564db30fe0ae94cc422c5266e3", "f3b7938de5f178e25a3cf477107c76286c0ad691", "0eae752f8949d97e41831e509da721ad673dfc2b", "08b70ab782141a2d7003226a0f438a6aea0a0d46", "2d1f710ba593833cdb0b63880f60146504cf1dc5", "53ac22fff7ae3ed08565439ac30656846cac2465", "fb210da5526e967a6aaaa1a4cc1134fa0976ad11", "5c44807fb7a38d4c9c3ef3bdfb950b44c4a02a3f", "2645a1c4ee285ebf4081ef1674bcf2e546908c18", "e68b1fdc4e515f947c96f65ec7ac2521edbc06b2", "393a62cab9e2a1cc82c1663fdbbf1aefb781c36b", "24e98b70dc6982af2dd3a5bb4e501cc1b61f7d2b", "d847d2b75bf301007a9e67889bdae5b147559ed3", "92e5708ed3b622ca1f0f6ac28ffd6e789c528cdf", "571a29d324cbacd036d8a30ffd0f586ba128a10e", "00f17fca3cf3ab4262edde3626e6230a89ff1a1f", "f90b97008d921004487d1232ad20dcd9d678435f", "8e4355225f0db7945952fbdf29e234e71313d30b", "1eb4ea011a3122dc7ef3447e10c1dad5b69b0642", "131e9edbe4b0322a467b7e8c35f6b0c0ca750e21", "d6c4069044b976c48c384c4562338942a842cf55", "45e8ef229fae18b0a2ab328037d8e520866c3c81", "f7e16e57b93b9dac11280427c7575a0a0ae4e0a8", "0b4189d874ee67f259a1a366ac93740d500064a5", "ab6ed75e1b1952e4461fc603bcfd042bb462635f", "8189e4f5fc09ae691c77bbd0d4e09b8853b02edf", "ad7dfaadf9d99eadbb001ff0e0974f53704012b1", "44b5430d98aa581ebae4295f9f6441f4acb891ff", "f26a8dcfbaf9f46c021c41a3545fcfa845660c47", "a738bd92c2be3b61b7a4b55c028550559b7d9d96", "3f0f97d8256c6fe22a346bc54f8df67f6f674f22", "cf80b4f78e639504cbf056f29bc1efecf31b1bb2", "93a28e5131a762aeb888b76bcc6689e8696ab8d2", "0f2f4edb7599de34c97f680cf356943e57088345", "20cdaf21acd50fd2cfbdd0eb697a8906cfb012e2", "c03c16668426d8b069e75cb440686e12a9adbcd7", "d708ce7103a992634b1b4e87612815f03ba3ab24", "c0ef854f4119a74b37211aa4cc36b8c1addd9057", "a016fbe8d09402316c7b38946ccd502d76aa8c74", "1c7a050394371bcb064868dfe681ff4c29ce2101", "0c61d6a33b9d3c190b4adc15658cfe969dedfbdf", "6f71862aa00d61fc8fd7f205de35ee8af458ec0c", "246218fd60d47975990908c48274341b47255292", "45f884c4c3bcdabdca46ee0e3794ce1631b9c558", "118e87ee5a8e0faa71b6ca5af6ff38f875132464", "97295e92dfe49f37de65c5130097ccab84cfe2f7", "9a27d5efc7b74ba23c07d3a45f20285998bf1577", "51e2f8f402c3d972368483169503221fd3088383", "1e3df3ca8feab0b36fd293fe689f93bb2aaac591", "0f6d6a67d4439c021dcbaaeab61b6b29e88d45d9", "3e7f54801c886ea2061650fd24fc481e39be152f", "94ac7f52e2e94ecf1fd3bac53028967b7dd62f36", "ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98", "2c13e817232e8693ecbd7a139cfb1825a720ff96", "6d4e3616d0b27957c4107ae877dc0dd4504b69ab", "11cf7aa5d940d0680e287b6e7f13490a619fdf47", "1a0912bb76777469295bb2c059faee907e7f3258", "41e085d52e85a224a66e6b0884f053c05f285877", "6111832ed676ad0789d030577c87d4a539242bd3", "7bd6d0bca27ff68621acd10d6d1709f084f97602", "b6049a15e204327e44b6d1ee3a1148bd7e21c635", "e4896772d51a66b743e0d072d53cf26f6b61fc75", "69f097ceab54d580e7cc8eb52ee79ad2182c2686", "312620fb93a30b0448ec8ffd728b8ee2858ef74c", "e5533c70706109ee8d0b2a4360fbe73fd3b0f35d", "c306c207ac7299872280b47c88f28db4811a319f", "14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b", "5136344faad16d313ddb47927ca8fd080acd6b70", "283b3160f02db64759259b4eb39dd54c4969d6f8", "8bdbb685174d6023e63c55fdf9ad9b2ac78e79bd", "905ba09d4db4f5e150457599553610fc2cb7e105", "dd609e4bd83cfcdbf64fc794da73a36398076890", "3aea679168c72c6df7ead45d4f7f1fd7f3680a11", "54ed052738ca0f4570c74931857b3275fca9993b", "714a420173f328999c3b81fb70ce85be925b725f", "b7676adde75c6d1bcabf56c7e2f7fa484155e8a8", "0c60eebe10b56dbffe66bb3812793dd514865935", "289fe7756c5e0796edcdadb6d1a38890010c7cf2", "85d8c16ccd76e2eec303f98f2d1ab239dc3947a2", "6f44303f9664a4ceabd0f4bc74cb3886aad5012f", "7fbee617687fb0ad9e4562a33bd2007c1c438992", "1c9a61c8ec255d033201fb9b394b283a6b6acacc", "e719e1ed86bf2214512d5631e31716effe2e23d2", "4e1ade72128a6e530577dbbe69bd0afa0ef0e140", "1ebf201b34d9687fa17e336a608ab43e466ca13f", "1dabb080e3e968633f4b3774f19192f8378f5b67", "f9717a0056ad863c5f9dc00916ab87bdf1cdf5f7", "05ffc37ed1289c9dbd01f1cd96d5a5ae908b12cb", "790fb309a30e3d96efdeb5b16a29a79b7a67aa24", "7750d24f5861551ea8a9b371f8f9e5c404a6e81f", "7e1a8659fa224e2734450cf33cabd194e2c8f2aa", "1dca96fdcab180133644442df4ad78eeec1aa00b", "6fc1e886659838b2ca08dbaca291420785fd51bd", "2f3125bf303bca19d9cdc9ffe1de2aacf7a23023", "a73a16203b644353a287a4759bc951450e67d700", "ea3801a15f4568856581357cfc4e5bb2de185a2e", "0c5f9f5083b9fca4dcdbc4b122099ac1f630728b", "c96bd1a584d0e5d86148cfcab0f573825bc3fb5b", "ba9cc1577393afcf8b6b34423535ce71e4eab2fd", "87a4711136040f5d6929d7e31d8dae881afa5d3f", "a9a6cd6d40b563a02ed899114559a6e14f2f39a1", "42e9bfa84eecbafe32e1d2f5d52acfd617b57d18", "2d96178c760b08a6892647fb53b0d46b113db163", "0c8f09f66f590690e391a3fe25b3167b174c0f82", "606e920681b6bd2910a1cccda2403ba7e361a3a9", "061d303381266e1ee751f5b7551d25324c043bed", "ba82f4ebd5e62c049387dcb6a1bffbc5d23aea2b", "54abfe3acd987b5041878c29ec74204a11e73ad1", "68147c43ad2ddebf223bd14a7928cbe26c7f270e", "30b74c53bd7a9b364920e5074b52b3f737a71c89", "ee62cff7f027a31c3bd7f313bfdc04e7d79eb04a", "1178beb48d666d7fc41b2d476f6a92450c0726c0", "4b87d095bcc944b868f8a33679914d54828db3ce", "d23bf3200adece389d6e7c866ca9105d999b23fa", "46a70d4020609c175bfc9f19e99aebd1c8edb20b", "88bf14cd272fda73e5bc8fb48102a93149792e37", "28daa489dace2d2f040dcdbbd2d4ab919b046254", "591f04d62f44c22d1d82c9e074b066c21b420394", "26b906ad166ed81e59d999ed9bb577f30de81e97", "7803281f4b94cb25ed17786fd63807d223cf7af4", "8d1b3fff760c2574a78a849f9b710f8880c94dd2", "5830e0816667e08bb0efca538d892ea329307daa", "01d3a0b5557dfaf9eb6656d52768c2e9c51e5443", "5cb343e447c7fd933ff8f57fc9c99c5673cad97d", "5afd6c5eb5cc1e8496bb78b8f7b3a00b2900deb3", "a345fc597b15c26f3f2823ccd5aac0d4c976279e", "1b73bd672c6abe6918f91812f4334db23189d1d6", "aad7f9eeb10d4f655c3e3d18d3542603ad3071b4", "0a81810af97e8ab5b8c483209b4d0ff7210436f9", "5b4fddc0b86deea2fc139c43ee07892ad211a2dd", "6668ca5ab57d68070f90671a4f92a6bc25f80470", "16ee8df3c0202a2756117ab0686cffda65362a4c", "36d18202745ec9abb70d8f7e6a4f28a55871e657", "0ab1734693b15bd1aeae06c5736fc7ad12f90aa0", "a84934a2db769b7523399c8eaf6d2d7582415c5c", "6bb2cb6f3cfcfe6fe4f46aea2311c85a0ad29f6c", "50e5aac9037380108099c09ac53f8cc3f1b31bf3", "d8b3aafb25c235be5c62da07881807872ac3e831", "98324ad5027c6b163d7a670570ffe2f8df70717c", "c02c914de25034ecd2c3287c2e731ab1130e7bee", "d8b58c5b403dc28437af8244ec812efdfbc6b2e0", "4f3f08bcc36778d45dfd5c6f6b8aff070bcfe9a4", "795cea6b95af22238600aa129b1975e83c531858", "16be1bcce9a9cd94c571313da26590c80fcec5b2", "ff5a3b2fae2ee1cf4f1c32ff7e5fdccf72815578", "e22e848d3bbe8e75bba09203b70bdb1324dad471", "0cb5a80d9ddc9876c73b6013fa927a8202736349", "8c357c8716e7a0606587cc67b209276b08483f3e", "3ed60f021fe469f2423d04917e69864251d23e08", "de1b83d1c7df1dcb39f9efcac443e87d17752f7c", "b573a57b3da678631bd78f25ecdeac7cd36fa617", "2dd46b83a1cf5c7c811a462728d9797c270c2cb4", "40d3b108399253862a151f242e4906f280c88418", "71de9b3b8f482863d544da0f26ac2876b4fc210a", "44f18ef0800e276617e458bc21502947f35a7f94", "95deb62b82ede5c6732c5c498d3f9452866eaba7", "25f5df29342a04936ba0d308b4d1b8245a7e8f5c", "0fbc9584cc276ba54d133730624199a631a2c6db", "d813ec3a3442f2885b76ac0133c4c5d76f9f8065", "c8c5944ec503744304e026284182fce26d74cd92", "d05825a394f11a391c8815f6b0d394cdb4cfaa95", "a6404e91af8d1644aa7eea307ffceefa715dd7ea", "227de3327012e8141cc58068fe9bc197773254b8", "23e2b9d1ac20e114f48850ab32b3d9136bec6826", "1cd0bc067e66bc1f66a73b401a4a470e43e4bb9e", "557e5e38a4c5b95e2bc86f491b03e5c8c7add857", "72282287f25c5419dc6fd9e89ec9d86d660dc0b5", "695cee9a0a48affc413eddb1d735995cad36083d", "9937a4d3fa66c0eea48b2090b5a9b6c51a1cce66", "5ad0e283c4c2aa7b9985012979835d0131fe73d8", "52572058f015761f2113aa25a341c607a286fca4", "f568eff0b3d8b9ae527e6b4483e2bc2ce5fd01bb", "50d871b436317bd387320b9fc796294e5d4d6216", "59444832eb559c0060020b57cddbb899efc4567b", "65b51d3e0b46e80236d496b25b424d22c6de4348", "a48c71153265d6da7fbc4b16327320a5cbfa6cba", "41da7c52a09072fd9c5275f03f4fa6f6d41e1aed", "72edc24c67c34b5f2c98086a689bf0f3591e393d", "35498b80ee457e409c0962e03a6e170a917c83af", "6f8fa219ea82ded79757de59250b7213f9f5a104", "691964c43bfd282f6f4d00b8b0310c554b613e3b", "321dc2958e7874a3896e7df96213cd808d3b2b27", "989282f579fdca0ebdc890cf05cac88c29f9eb49", "bd17484e0a6773a74c51c41e773e202080682b3b", "303e7b842a5f4210f83e5fea9f764767d546c9a3", "24cf9fe9045f50c732fc9c602358af89ae40a9f7", "9d3ac3d29164c2665c371a3c71de75bea753eb47", "2bb2abecb4fa7071bc2760784c6f7661e7e725da", "09750ce4a8fa0a0fc596bdda8bf58db74fa9a0e1", "4a1eacd06dbeed8acef3e4ad68b28af3bcebda56", "995495e36f4a2af999875ea4f197ca98c5e5c8de", "64a5709d41f4c2ef0383cee9932e89bb58085588", "148316962e1ebb7086837e25cbee9ecbd71e5940", "3daafe6389d877fe15d8823cdf5ac15fd919676f", "1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6", "0f4724cc069609a9544ca7d9a429b52cfe89c182", "2d1b8f60f2724efd6c9344870fb60e8525157d70", "cc5a1cf7ad9d644f21a5df799ffbcb8d1e24abe1", "1d0dcb458aa4d30b51f7c74b159be687f39120a0", "d4f9b24fbf6cc5b7c599f9847e4cf76f358797e4", "212ffbe247d3cc3cb32a12c43a74a1146e3fe18c", "09fd76c02abdd1bca7b98ab9fa66450cec33b9dc", "96bf907ec08df2d3176be66f369e3cc3d6cdc7f7", "b29c83b6ee10857e09ac3503916ae1b129642cae", "981847c0a3d667aae385276221834edbb8ebd11c", "f4b40b3dc27897fdc40f419a42d64fd1ff80cc9d", "f10f7f315bf48b3f545015fd5dad967aafb79abb", "4a88237199595feaa3f0e3289cbdd201a3ce28ff", "dbbf2fd0d59641bb7735f93c70e4673b80d031a6", "a102edaa9fd458316637ce51a0b7aba2ee651637", "3adbf4ed5e4e3f59afb7509119667c8701c7cf37", "756eed9fe591cf53c7ebbaba05ceeb39b212f802", "e01ac06aa1f0b193a620bf70c5dad91128a1bc90", "ffdaa12d37c720561f74d23fc3b5d47afa268000", "f8eedcca6263062b6bab11ead255f719452f1c81", "6c38ab65df4a1bf546f1426e8a7f2f5cb5f765d3", "2677a79b6381f3e7787c5dca884fa53d0b28dfe2", "ebd28f04c2ab1e61430d309ecbf7c832173d65a5", "e4df9b008050bdda42670c020d6ebc88b086e36d", "d1a0425f764ce8847d20d278e4a4267c8258c4dc", "55c0113534c62b7f3f238210cf501b42d91cc33a", "6d16337d7329ac12dc1d0f072c820d291bcc8259", "437a720c6f6fc1959ba95e48e487eb3767b4e508", "77b7572bf1426b0d2fa8d34fab41ca7f0086f28d", "348035720dba98ff54f2ff8c375ace09287c89f6", "2031b062f4c41f43a32835430b1d55a422baa564", "70ab0ec5358e40fdcf7247f31e6e927cb21442f1", "06ca83cc7def5b0d582f4d933057c4370a6345d7", "c31fe741266d60177975754d23241879ade0279c", "744499b779a751bcc3a43a45eab6f7704140a701", "e48fa574960b23ba65b7ff1a732cc521213b5120", "b8f1d51fb48655ee38fc4634d016cffa817619ae", "5a97dbd14958386aa0d969b5a926bb64cfd01b4a", "81eb804756f27d08f2d193d1074e58e1c5d263ca", "3316521a5527c7700af8ae6aef32a79a8b83672c", "ba97bee7254831a2a9dfb86c170a111c4c248413", "1426045c4188f09fec46d0d2f246357a230a1748", "f1c2ba8c7797c4844fa61068b3ce9d319e6ced3f", "263ed42f7f5864a8933e4c0a361ddc198d44695a", "818dcb3bac6342c02eebd896cd0a46bcf2192b64", "489a91bbe01a5bad1d333dc1d0d15afa7e712ff0", "62ad7d1b10be9c99b80b5d73ef8183e6dc4673f0", "eb22aecb7b59ab01acdf498b33f5ba9ef1b64f64", "5e6ccd81669f7da05effc109bf349ca7ce4de2e2", "3bc6ddbac68eaa89487d29abb0d25d85161d3ae8", "ac5c93b789bdd557b90ce77221f1c01ead63041f", "5684d284310582ae0f69c5b7a4d6b791a13fcf49", "074c658fac7d7ebd88be8a24b46f2b301a9aeeeb", "61b288d120a44a0d92bae6e940eade40b1f26484", "b30f9116df47ba70a71e04ba8722a13f87719599", "5fd6863a59e88b45f62e82bb72dff3fb52c49be1", "1685ac0f9fedd83a178a2f64f25155fb37998d8f", "01424c2510fbca67c3cb016ac919f6a58e37541f", "88b40b93bc00b91a42ba9392d20a6d01a5ae7592", "9679d15c6699b521740408b2e899c03af89390ac", "2784ebc97a064bad57d8339e736754ac15f10bdd", "6ed559a0d04e7d4185eeea43f77e372483982e4b", "52049fb96156729ce0ad88f86fa617ecf7d237e1", "26fcfc86bb0a20a0a9b5919ecc0df5883c220d1b", "563523dc73375693314c20e1fe2a65e34915cd8f", "66b955311ab6841c4644414d8ce2faf6ca721602", "7e59d2d3416537dd958ff71b7a0bff87e639dad9", "00c3ccc8d7e799a39ca15415775e89e2b41a3972", "2c0a71b5e111d2c7d99c3f23989d317a0d845adc", "100b416ac4c0867dd683e75249941a4a6e178071", "0a53e6eb8b48890f7431da7d0f51d6624d4430cb", "0dc11a37cadda92886c56a6fb5191ded62099c28", "0239d0af254c5304414db1672ea25ad2e1cdf2ee", "665e6aa652b99350a08090faaf9d4bcc7800186e", "507660f778fe913f6e1957fe39a87cbf50a52b2e", "11b00a4be68e9622d7b4698aca84da85aca3e288", "422e46aa845435822b7d93c2fb9103cd94128a21", "150b61a610baa6b00557acfd1464af53f5fa27ac", "413c98ff2d95b5b945825268fd8ffdc65880f715", "7fbbc20eb5fc2f023f60d6b907765fa0d2633ce9", "3f2270762ff68d6771d93d800683ae6bc76855e7", "680402e42c874c14a32146865d985588985744a4", "a25f1b02c63857482dcaa621f3a52e2b34d8b022", "99582ce8439dce17d9d6f74eb54fc5c89dbe06d9", "934824fcd19fcff4baa2053ca669bef4c614143c", "c3831d2c4bd6acfe996d0e272b3c5f4fa03e8425", "efd308393b573e5410455960fe551160e1525f49", "5d5d8c802513c2889654a880345e5a4bec51f762", "1d4b44a770aad450744d6b5483314976c80974b8", "6a3cbe2bb27b2a7d32c358e0be4ed268f7d4455c", "8558ea46c8f7e56c57073b27408c6638e81293f0", "95704293fcaa01473e5c0b389d8afdcc0178d8c7", "f3d6b854e605abfb3270ad309eefdad7300716d1", "a72fca41dad5bbd99c237c06311faa52a4a36bce", "07ed099e7d9c88d8e272d7191a4c7c5a68e3a6bd", "c25a8347a108678cabfd80575c74defd838edd9d", "dda1be806ab56ca58187621a0c2e4d2b8ad429ac", "a62dbc13006f6a647e89f9187fa0f3d6ea782b82", "90eb9f6a1b7e3dae24e438b201e6b1f671a87eb5", "8d177b422cbdbc793d4fb6b82fc2745a508b6cd0", "16fdc3829dc8322a26eac46e93703000005f3d6d", "3af9e70e81ea67729953c9c0e5269881b35e3cc7", "ea3353efbe7b856ced106718d04ea7d83e2a2310", "7b3231245a3d518085c8e747e2c2232963f49bc5", "0818ed460c7c0d7bbed760145fcf83b3b426d344", "816617fa6801fb2abd3d4475c459bf6e3221954d", "6bb55ed3761eb1556acbd1a0d15c2c9099bab0b7", "8ff967ed7130f81abc896d7b84f7c629aed5cf49", "c896946612069f162864edfbecf5c1a8a077ed79", "6481ca04cdfd7d37ee397783120d12ba27569cc3", "1c51aeece7a3c30302ebd83bdcaa65df0bfc48fe", "49e6636cadff564e47f6fee1063648459d92d847", "4106c49eb96b506ea1125c27e2b2f32ad79f8c48", "77ca49df9b6616d3234073f1bf647e0e3d5795d8", "72572392ea68b82f5efe5623ff948b9032cbeab5", "27f5cfd010f5414b7e0426d61df4f204432623a4", "4335d53e763b2caf20f06928cd420ae09e5041ad", "a73bc57fb0aa429ba5f7f12b6d02e2c6274cabdd", "a576d19473a12e16262266989376ad1e77e8e817", "72d067a6e1fd447ef512262248ad5f73823a3842", "548329f292f62fdaed27298e657cea465c3dd229", "bacd919cc95351f3766e8dfe0988e71fe2d0b8dc", "e7b6b6c7311813b69316eafa38f92bef2dcea13f", "4c95912db440086efb2b7acca4e8a1693cf4b3b2", "1f281b87ef58c0dd244ec4743fb1f899b4948308", "91eb155bba11c8f0e7e164c30c4e097d00a9df51", "bf3a09f7598afe4e3ab925636f167e55f2b70a9e", "436f798d1a4e54e5947c1e7d7375c31b2bdb4064", "085ca7f8935808986ae1c6afbbb62f6804049f26", "47a1aece15408b93ecdb4721a21e2841624319e9", "9555785f2a69c6f77e08c9ce85af35026d1d65f5", "29778f86a936c5a5fbedcdffdc11d0ddfd3984f1", "203ea8ab1d9c48977be97e6caf3fdbcc84101354", "234112fd23604cae638b133e6581a3a91b8b6107", "863b15316453bad8ee8da5ce7c7bc460d74a4bba", "3f4607f71888df5b69719cc926e8d07988f82dd9", "877d0d47700b82da0563161f57cfe2dcc2be0fbb", "dbf2ffd4ceb7714dacda92fb7c9b0f962235587e", "891ecd15c285aeb7286762b8a02e9897cd9df5a1", "10b1794dae6128480e5c56ee83f0113930c101cf", "7d7ab791ae3cfa72b4feacf1e09a4493c1a5a87c", "ef59e9bc97205237d776d553e304f4f72e418ead", "134f214cba50b25e82359fdd182fa0fa106d4792", "17614bcb0f96d576dee34e1349f8be3d56786dd2", "06890d068a7fb82fa78443038ad26ca7623f7a98", "457d3ca924afc21719d19175caf285aa575d1c90", "017a5a3f7d0ca925ca0b72bab059526a03ed6476", "b196d2b8a79446b9dff3170b38b8480744194ddb", "3f5158ea65bb483c6797462faffa16fea9f0b004", "242c07160d7de751dd918a1a331cb157776d7a22", "6dec0491f7f2ed1b1357d93d47ce5c082683401b", "3e211a93388dcf29dda4cd6d3d515042f2cffee7", "e61608681a2c2081b2b48ddcfd979d2266ff10be", "22859493a419d791ec737817df53f5225e3c48fc", "71286a2b3d564daf171cdef54ff8972159152729", "26cdb9b6d94c1d6c6a01792fee3c176585f594ac", "405a70c184e00eefcf797a0e842578ea0b51f6cd", "fc9766fdbe687b3c8b93b6d164357d4918687cb6", "3c68834951564fdc2ace1dcd5bf7d1317a22a176", "407e4b401395682b15c431347ec9b0f88ceec04b", "ccfaddc870dec71195b2d733f0901195d03aa251", "422f22dc13e581e09c4ed705091c73e1a676dec0", "74cefa1d796c84dc4343fdf383f15ca1e8ebb6ba", "04c6810b810f0e06f68954efb937a28de506aa43", "f07956d0031ff046c5c719296f7916d7897fdd21", "9da04235f7677d1e22754125465656c93f2dfbea", "fe464b2b54154d231671750053861f5fd14454f5", "1b9a63f103e15400371a52d48cf4746cb048a037", "1b4424e06ac29b72535727b92f261f39d065e858", "267bb08aa4eeefa1ef653716ca0ab572748a3a4e", "24d630946023cb421b9d960dd9983b4b5dcb800d", "1e02dfeb93e8fd8753d2e69baf705baf8996cb81", "179a37cb5416cea7d24b5820e75327ecf105e488", "873b28aa7cb9e4a923e46338be472d910aa87a3e", "83d0b7100ddce32e37af72585f9aa4181e6447e3", "1be15c1b5755c6176b2894abf05aafd15e43d6d9", "2f41c7ba65fa3d2819469fba450754266c98740e", "ba227bb94ea9414bad8846673c904a10d813e443", "c4f628e04b5dae3d0c949d08b884497bce2925fe", "1183db5f409e8498d1a0f542703f908275a6dc34", "6fac6fb9b2fff94e2babc4906646cf6427c591a0", "1040a32d5bd5e6f4c8bc1932345ef93671e2c019", "3db824136c1a1d447a1fd20fa70e478097d23fe4", "0149741d647c6044d830f5dcac0dadbe3a72b43c", "fae4185a5fc540b057ea9e0402223e653327d0f9", "28a16718b633dbc7f612de637fdb0d49c0e09219", "c0bc727b6b46a875cb6c0cb7e6eae1885270e281", "0032567b2d936c463a6ee4f0fa9cfa9f87c91be9", "4fe91feab83d947a0d3bd85adcf18ab1b3d9e05f", "904c53ea063d7d1e13b99d55257801d69d073775", "38a3611138388490c2cd60dfbf795932d5e55a79", "a4b09fe27dc38a7646877440d76947cdcc895d4c", "5f534bacc658f620a15b5647adecb0ea813286c8", "50c4ece0f07f2fbc3cf2fef98df24aeea0145899", "36972f34d35acab784359ddac4789e19118ac6d4", "d88aa79ba54c70c521881eff72933a9284570091", "24cbfcd31227d232d37d4e257e8300afffec9182", "8966af6a8049192556e9c9356886a135595c19b8", "f9d171019bfeb71733fe36f7fae14f342ca9e51c", "d4c657ce3b7e47237201393aa6bba0e19442bfd2", "d2a9abfe8d7b1163d055470c5b60394c3f0094b9", "84f6f20496fadb975922b47528fd94c71e872950", "ba87bcf4bf799001641b7afd7d1025600f57c4a1", "3b788fd0817336b3db3e111fa2ff50b665070e95", "5665d98136cc39322d47cb782b8e49d141c5a29e", "3c0420a0dd90d0900613ac1f1a1174b626df26d9", "2edc56745377adfcf60bdefe0f61064ad49c282e", "34df09a9445089c8f23eff5b2a43a822c9713f6e", "573c11e7e00389a033787984223ced536e15c904", "75522dfc1610c8765185c4344d97db33e1af5047", "a6e901131bbec0b754647cbc1d7a7e3580d351f1", "2f1c5d6cc5fba4a1d6af1adb6b4e7804f0020c2d", "6332a99e1680db72ae1145d65fa0cccb37256828", "fc0630823bc02d8ad7a2b6dfd1e457392ec525cb", "90c01ab7f0516990e722c366155a796577bd31d8", "b7da09c74bb94dabfc484275dd5c3bfdf07e34f4", "036839afdfe7ae59bfcddd22d2c688b03bef3bee", "1ca40e1d0ae377296ac6804c81c1e5bcbc5475c8", "09ca15b1c1d65012e5bc07e5a44bad7b72609a02", "31786e6d5187d7bc41678cbd2d1bf8edf1ddfed9", "22267d537cbaed08c2005c42f251bb6097aa1505", "732d0d3f57e93c96ee85c33b39012111a90624c2", "591a4dddbebd3d3ce3d86f9910be40aafcb73a90", "cab31574a018a28590b92d60cccd9cde906d9b88", "ce391bcdb64f7659ddc5a0c2e5c73854c1e8031c", "d82a39da1e524f6ac8b5622002bccb974bc9b69e", "253325f09f07c2f7a05191f76e4977f473f4bac5", "2349eab05cd0c6f94ba5314c037d198aa12c2f0f", "3295ec2e52cd83cec75fc7c7064a843756b4d1ee", "ade18cf978e4b00fb74352a7eba90b4f4509d645", "df4e60c93b6d3079f633a078b37662130f54a5fc", "fde6f06101aeaadd47e8752ba35583ffddb73667", "9b69cf7c6b3148fda4bce898b5f63ad8650b282f", "278eead9d16d003e5470bed36f4e3d680e37957b", "6e0288b874320b1b6461016fde8b215c3ba46b90", "f6780d4ccf4c734baab5b1fcb301021a86446850", "f938a5e5bc9a309b00da6b372a175e9a27aa16b1", "f50b6aba0254809ba83c55d2b144508007c23c58", "e45374e8d9491fe396497fc9fd91bd2f2f036315", "1f614a97e16671c091b1bcd1a33e1280822b53db", "63f367d50b248680138cb4b3aec3143fad3a7112", "cd490432e35ed5c5b7d80e1525e2780d7467ffb6", "e6cfbfeb178e7fb35baf3215626705c790b7605b", "3d91ba69bfbb2ba018419342d279f2d7571530f6", "598ccf73ba504a31d65b50c7ede8982c3b1d9192", "d77fbaab4f3883d8c08862b2d47362884ed98624", "3e3111f5f9285a2ff481a6668f734b4e53bf2673", "851f3dcfde59313dc2c8b87314f5a191d82194f4", "37a3e7a01655b4806df2b95aad193a2965e48a5c", "7b607888d3a9952e0553874ea77fa1ad4f6ba2ad", "242094969481f2600aaf66d0e4e6567569d8cc5a", "d2336dbae3916135bc26dd064514441ea94a8a2b", "f4b15fdcaaa3ad604b82df05f5d7f59dbcfe861d", "1c6e067098fa86ee3f96365f28669b06f9ce0c7a", "c757f6ee46208c1c26572265803068f8d837c384", "5020a75c45416073d0b07b1deb7382bc80de1779", "24475686f64825c6eb503e57636fc1fcda724407", "3974c8a9894a6ee33969fe556e6d18dbaac771ba", "de11bb7e73d96ad36dee8b0f8fa827f18cd3ddb3", "8a091254ba45ab9fe7d72c8104409bee5aa8f199", "005c996a9059af96454c3d6f83338068d3608585", "3d2e872997a0f5f07786121807cffb357fa91152", "4ee87ed965e78adb1035a5322350afac9ca901f5", "52b6df1fe810d36fd615eb7c47aa1fd29376e769", "8d39eda4a3d8bca380bf3293fcb9c034a6abdb7f", "4f4de2071e649043fe220888fc374b67d06f2867", "afe5066281a94061d5f76a08335b62d75a1d76fa", "c6481bdef3a75f74b7c28bb957755f75003d869d", "ef3697668eb643de27995827c630cfd029b10c37", "e8391fd7ef979a63c389ab0fa7c00fe67e4498f8", "1936a73920c5a7eb97e8b73cb9a6096aa509e402", "3ca983d40b9de7dc12b989fce213b4abee652c9e", "f74dbf3481fc3228ea821da232128b98ad5f7a60", "9e179295ff4d894b92a98e5323c544ec6e269891", "3d8c8acb8c59e9f23f048f44a23f36ffd791cdf5", "06cfc431b70ec6a6783284953a668984600e77e2", "4e9eb9e4faf5bddae87e311a744cc04406a87fa2", "5bd91c5aa3468d3435ff33d03b3d8348724f96da", "7e8aae934643c566eb90463424992a73cb75bb3b", "65086cbda9714c538417f7b25f9cf661e6d72833", "5d7a207f36f22dc470a007abf1095d30e8020650", "2603efdc673e9c7cfa0c1e1dfda512b6ef54ea2c", "67620ee24ddefbbbdfcb35e385795afc9cc30df9", "6db59b031406546682a773baed2caed529aaf37c", "f6e00d6430cbbaa64789d826d093f7f3e323b082", "2803c3247c11a30a8075dbc2db6ff96f58c2ae97", "5578ce163920cc5ccfd709e6ada927d508272b51", "02a4fabb41ce20d10e4055fff49ac98b86723000", "a32dadf343f811e6837b8ac5bab873674fa626b3", "25e06d516f3b26fe84d2715e8d9b347c0ade2fda", "5d09fefbcee2410eb448dc75f5957bed8587624b", "69df1b392fc782478f8ff63040855d50514ca782", "60cc2e8abc20c145727e7089c55bdba5722436d0", "83acde484baf81ee3a09c30ec250c11c111d2c0a", "330b66b43201057ca41622256737eceed1c5908b", "331429a6994b73c25ca0c4d0e2794e9119ac870c", "14f936e4eca8382ad835bf18b4a11d2e6682fd71", "72969929a95227fd7f9a1abaa832097c0c93dd71", "15d1326f054f4fadea463f217ce54bad6908705a", "4286b193cf1c381ac18d1fd994d6acd2a284ddad", "05605b7c951267ef23408ca5b80dfd3b5c40f3e8", "2ecb3e485b4935d3f7d25ebe8179724b9228bbec", "070199a5087590f96c4422b82e4803911bb0652e", "eb0593aee32f29a300e25628bfae7f2e260c6e9f", "ede0c040db8e24ffb1cb24cf435e032276c7f8b7", "b96017e1023e0431e326b78bef3cca2a083212dd", "2897a94242d205cba5c7254930a3b4c732d590af", "dbd3b57b942cb860207c377ac41d777f51ceabfa", "c79178a47403f317f837e4a8aa9fd03bfed1dfc7", "0d82013cbe9f65ddb34e5d99eab730fce4f0effe", "fdaba5860f753b1e5714d582db9851cd0aa29139", "20044724665208227ad54d9ea98b08dfb1420689", "4166aa3fa97e9e835056133d2140c2f405e8a9ab", "100f57d2eb737d6cb467bfac6e4bbfa9b39e774f", "532f070082eb565704a2f6481ed64bdbc7e6aa24", "bae912f94bccb41e5492d8f1efb1603c86acd1fc", "48d4b174736902b6c773c139cbe83588074b03ec", "b375db63742f8a67c2a7d663f23774aedccc84e5", "7ea07b7b27d59300840df17e5881dbe3a4769872", "747fdbdb1fecfefd3eb194da4816a85a61cbafcc", "72cb689ace0277d64f45243af8f66a8a95e88c44", "e15eedb39dac379e217110002a6576f7bb213aaa", "d26891a7769397bce150a2619ddae1636eae8263", "885e6f1ef99d04a057d2543cbf2ffc9e7bcfb309", "35d94887e4eb075f2603b2c69b19d31471351ff7", "9dc70aa3d51a9403e1894a7fa535ace99b527861", "8b4b0734555e446888f05c25ea330c6c53ddf307", "4684c487758df6b6bf4b69f3fe22e1aad874378a", "0d5f898d59ce592ce5cc62643753aee72c4153ce", "82594dc43b73453c958feea6ede8472cc0af869f", "15c7fe9c9154113f9824f68ca1870564600b66d6", "9576c9640f174abf2c6d064d2a109eb4a0b1af16", "741637b8b6e4fdd30cb40b3881f29c3377eb4749", "c2484fd7e368a17d25bb76a0644c6978d18d16fc", "4822c1bf765cf99193a231c000c19ae5d0c10a00", "07a65c9368d41a07e9ff1065e5a76d69178008f7", "c39e647e7a76ac60916f1f8b9ad382b5d68bc5fb", "730c523e478725676ecb0fa389af644baf80627e", "a53c907114b909709780d116acd00dffde3aecab", "15e27e189fe9549d674ebd0f55a7bf1fa026cb85", "71ab53b0b3635411d5985f71cc56bb1784023834", "b85645681e976ad7a4800cabd316bfd3e0bee362", "d13f176178f90efa6f91e9f45f710e72e5675c9a", "0ad84d9e32bf7f3b612bff5be3492f2e545d3d14", "5c138b421a8e7d14c33a1dd99ef57d24833e3e84", "434fe2cca3321c08ef30a0076864298cf608e0d5", "517918025bdaa0dae2c657b3cc265d67dd38a029", "907189aacae7bff389d6c6592d6e2586dab5168d", "5d31ac0626f1388e3b22e9dce8837b37189e6269", "3c5338479915115edff232b2a871c2e73c971c14", "07b9a622f4a167c653d8ae5d73071608208f94d6", "24dbe0a133908500d25753542bbb720d71678c42", "b525a863eab597055e02351acfeab64754d22690", "8f34d05a3353329079dd327be0c013669c8083b1", "2b0a27ef0b49a6d067128d831046064c12bee37a", "d0d186779ae4a4e53101a26dc741254e822e07ab", "33594e1bfe93fc6c74c2bcfc1bc39c524fa9e2ca", "e80ac8c09bd1161c8709124d799a25b8af109224", "756ecdb4a87ee1395a8daa26d26de490ef47eea4", "935ce31268232b25c9f685128ae0ae9e5c3a0e8e", "0ff1da972f03085a2bbe87b88940da6201e1aaf9", "275260b6118b56ee2a3d6bbdf250d0e424b4223c", "400aa5cb2fec558f7827c3638993bae34752ff31", "73818ae13a89c2377c30cdb029fdb7e490fe5367", "7be6c9234351536f3ca8c1f61cd12dc9e94f1a45", "d7431a266a151fb92abb7ff93fd458f21c6c3c41", "4f5e1e70f51b30e4606f991ed0e912c84af90251", "96416ad7328b57794e17c2f83ecfedea75c84828", "5677e80cd3f3924ff4bbade111d012c313b15d86", "56af5fae5142a7777001d80a2df0aa644186c4e8", "26ad124271c118e207113ae42f0fd3d30f204ea1", "aae7648c8b3acad61463cc1bdb4f17da40f819a4", "2f4623b24ee7204d12fbfd95d63366ab659f3e71", "aa782f4af587ee68936f0f5361fc1448ef61bdd9", "8326b11dd0b81dcc169ce21fc12e0c9d632db6bd", "778eefd9f0f6189456fc25b7cdd2c3f4403a37a8", "2b1358efbceda12de2f36398cdbdb3c7bccc70d4", "78358320d0319c9239c0379efc0e7827b9c948a2", "36c9731f24e5daa42c1e2c6c68258567dfa78a0a", "3903cbd56446436a4a3b8443c26c90fc1b69f5e0", "f7bed3080668246b517a0c787698b53f67140a7d", "60f592326a1537d63e13a2c3ffa3c03f0c1be6e2", "f7af6fe6fb6393f7780163ae37c5931ce566daac", "0fd53d7e1ab8f42c710cb77b5ec4cc2b22158a4c", "f193ca76a878af87603ae8ac823a3e6d1c2e3c7e", "7b331c80a91acf3616afd88e78801ac55c874f43", "9a339fe893deb250c63fb1fd31cb818b24ef16f8", "2822a883d149956934a20614d6934c6ddaac6857", "2703b2e07bd0d11655f4f17a405b3f43ea13dd31", "0b57e48dcb065c4675c62d535f9eaef876cb3d69", "2b8f87a454b80da5800337ccb5579cc7afbef45e", "08bc0dd59187eaf919dfedf1d5849d1a875835df", "69ad645516fa5cd4cd45fc217edcbf83bc0f65be", "3f42db34a79cf600b416a246ad3fd146a4afbdf4", "2472d6e4459dd65cd77b5fce99220d3b30854408", "d48647a035eb108a0df1daec63177841d81d3ea5", "dd6a938aeec20fe17eb9884387b98116a6fa7f5c", "7c4864065f4e107cb5be49a8dba8cf7d94b8340f", "712f0a7a3a026faf42d017a95fc609644cc9f715", "1c2db743b37306e50c4234da53510c113f50f9ff", "c1980e5d5c998ddec31cda9da148c354406a5eca", "ebabf19e66ef1253fda8d39a0569787c65e60a9e", "8acab2740c5b43f1d72009f849c1a8802d98aef2", "465e8625141ba0a641359cea9f478767a26cd7d3", "78e6b410d56d65a1a6af0e65e07fe8afeea04af3", "2dbb4b45b6a392268ce45d16fb944a652d434bd2", "3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0", "881802e3df5abb317d8956e79848a8c142d283d4", "1828b1b0f5395b163fef087a72df0605249300c2", "a312599f9a842cf686c6cf80b770e05840d32a5a", "d1e66107eb084ea0ef5a97f3363f8787b8df91ed", "38765f67cb480de8d719524cb20d32116e2dff28", "c1100efda7c00d3181a6a065ab1474c2f864e267", "3394168ff0719b03ff65bcea35336a76b21fe5e4", "3be772572a3cdd81f53298336c929571e0e096fa", "61c4969c78cff37357ac794af5ac8e439751b39f", "0fd877cb088e38b00b44f52f5483be8f356788c2", "26db78cc0cac058ea6e46101ad2e7e91f510cd2e", "e9a8e80bbb4cef2167dfc0505392518a087b7e0a", "285b4e0493aa1789801275869cb3e0df1fb73cfd", "f4a9b4a3b680ff01e8e2ee7f532161905db91c25", "6937fe93e6238ee21904c172809bea0086da4570", "93a8dbd0823cc1924bfe37d88af36d4f0545bb12", "ad560c4c6b78da4736962df380a42736909cdb3b", "ce1cbcf0f671423eada02e6699d637afbd9ef570", "bd98a68ef57d60aa6c939504d06d95fe08e2aceb", "04e2f0de3aff901d94b0840f7d2fc303639ece4a", "9e48808f283598edb5a78ec2590a35ff163cc8ed", "a34ad53bd23ba33eb2644acaae821f08e23d1c42", "aba742ca4edc7dd37ff481d12e4b94c153baae77", "df358507940f83318a812a2d7ef73f8d985333ad", "300073a2f9720388e35a27e11ebe53c619969616", "7340b7cb7620904624bad5694a32cf66bd32f7b7", "11d04269aa147450f37215beb3ae44207daf3511", "079aacc817bcd84dd8a2e20666e8a62d60ddf724", "642486fd468e818fddb8a2ec156535a9d74fa4dc", "87779d07a76b1070a1c144ea680bd7c24e4acffa", "a3dd6a08c4132358877e3b3c3eb87c3f3f4adda1", "663cca096b98c8f0444608b188e464028ee34368", "92b2386e11164738d9285117ae647b4788da2c31", "63344dee49a1ab7e27ac34eefc30fb948a0bf9bb", "5b818c73ce5681e523d6fe9ed8603c7afc0a9089", "a047ea8f6dcafae5ce077952ee2b554580ed072a", "ef11f2cd92ae23832260184db767b8474bd84273", "aa2ad3df24d8d8c4a4d2fe85f0d4e635d595f0a2", "0cb7a27177a782a091916bca3d8edb02f88577b5", "21e2a03825b260a7dbccafc0c113af3f9fd73581", "8161b85a9d4593bc58f225a3aa29eb9e3f59f652", "d291569f332a216e2a12238a117d747b0f4ba880", "4fcdfd50ddc5690bb9d4cfb986598f3796baf585", "28b281cc0d527d3966d753f5d96b3bd0ede10e25", "c11c89e303a6b46de324efa01a0f749b4246c516", "4ffc1e005990ce708f7e8191bcf14971b2d26e2e", "191a3f67bfeec06383d083c9d826676bcd9205b2", "980230e7d3367420c35851bf47b40dfbe5b7649b", "0ef40a21edf2b48c73fd51c21d213ee69ca30a4b", "dceb0c8093171d5d044fb23b7a9b212634babbc2", "647c6ac5e0bfee0241d583650f18c6314f28aaee", "52bf00df3b970e017e4e2f8079202460f1c0e1bd", "0fbe3aa3f288949c90956155bf706a9f33d173bb", "955dc25def91eff6bfa5698249bb189ccfa83367", "3642ccf1133a965ea943c212b3314c792730be18", "a7438874b2c008e614c46151fe244e5cd8455a29", "e9a1b1cfbfcf60698fa7b5b8738f5990182abc3b", "09edba9db405dde61630c70bba00ae9c5dd7ed37", "8ab52e11760b929a83dae617afd3d55fb528d110", "0acc526e1fbef5bed4c63623e370a4710206e997", "9c82f58ddea417b1044f64ea3a7c08899be6f29a", "82fae97673a353271b1d4c001afda1af6ef6dc23", "4f6ab8af1b059e6130d5fd8c4e4adee4079ae2e6", "cabe652bb3b150f35db9db1434cec69f081c4a60", "cc392ab1cfaee298e05488a4a1d84ece12220880", "49ae4afe91239a8259dc0c390179d47bc395beda", "02b21488531c0edc44f7622a035d1b980b5364ca", "323fabb6cb4e74518fd4c7ad6ea5a1b2674e63d3", "6d2b633743178bd5aac1073b60d81ceb41933a4a", "23fa51635c646aa621bb18ff76f31d5e48ac969b", "4c6decd726d04b916d9a2cdd468c64a8a0fc2fdb", "3752dc15fada54abc0af866273d03a28f4dc8975", "d76f9f0e8f3f39553ecafd311d55bb4964fe13d8", "164f3b9740d9ceb14658237fddede0f86b5e0c47", "06856633ce8dab74f6631c3f132d985acf2be355", "dd7ed20a65d811dcf863f796d6dcbe873f57e7c4", "0ff4b53d140c2af0771a8a3dfeb17c149659bf07", "3db349ea211ed3e71baf7c7f6739ea0b086c5db7", "d95213a0ef820c93bf0a41e1ce24aea1dc9f137d", "91ecd2e705cf27abaf38e5c1c25a19ca01c16124", "5a226afa04f03086e402b22ee2c43089b68fa3ba", "f1f7ee70b2fa29a93ed801a1da9853a4c51f3f05", "b7c2027a8ab6623a33d103ab4919ad3033badbc3", "2ccca849b37c26f403f04de2822771412f4197d5", "af2b0d3a6ffac71b2be2110e493255b72ed93b47", "48857fa69a4c4e3dced3f6c5b60ec29b6e621feb", "277c41ce2a485f09a842d793e599553ad751d34a", "2d95cf1df9701de410792997205c71208bde98d9", "548318d42e251b3ed7d98748a07cfcfcd0594575", "6ab1b6dec1b3f547af977706ef0a51324fa0640c", "b704f8360c369e65f0826ca23dac2d4e221d8997", "21fa37258834c2e3f075a8465d8de1c178cdaaf5", "1e0ba1a61ed0c6d4a76697de1e185ed5def60fb4", "0b440695c822a8e35184fb2f60dcdaa8a6de84ae", "9209095ac450f14c603582ac01692b0f11c9c33b", "e2e19b7259e43f7388bff1f10c5bce0a7c16dc38", "77516bc906d7d924cbfd8a2bf43dc06eea516163", "cde7901c0945683d0c677b1bb415786e4f6081e6", "eefc06f6e6af1c2b9b33fb12bdaff73e19a31d6c", "275b3cb7c780c663eabbf4d6c6cbc8fe24287c70", "95b5296f7ec70455b0cf1748cddeaa099284bfed", "849f56689d6d947770ca3e1f582521754ff72c4e", "bff4465ba5ad9af448f38d8aca6ac0a8e22a577b", "3ce25ff9a72bdbec5d39f6d8c43abe4d932b4fab", "55266ddbe9d5366e8cd1b0b645971cad6d12157a", "1d911007a6f2832e006773f247fad1f729d1c6ae", "065ffca373469c95db28891889289d79e873e2a2", "550e5d7f607951d7e33e8f23eb6b6bc97780915a", "c2251f1427a7e0c8e94a70950d8f08c205aa3e66", "064aaad2a9ac5044b333714e61955631faee87fd", "62eb53fb0fbde20945a5bf04fd63249157f48002", "ae93008a90842c113a426dae46a821a9a42ed731", "3b9fa107e8d68590bfb7ac03056c02ed0aea1c29", "1299d40434a894b4f443cc0550e2bdc30248dbc6", "56a74f0f8f18895b114839ee53c2f6e36950a5c5", "6c6566261ce22d11254ea45863e8fd6e13316be9", "63cdf4aa1492c5c8fb109a1bf03af4844982e265", "06625b0c5747ccb8524fec9f44e4a8aa1ecc2151", "478261574ddc6cf297611000735aa9808f8f0030", "3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b", "6e9de9c3af3258dd18142e9bef2977b7ce153bd5", "f49f1028052baa1588376a78a9dc64812748555e", "2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c", "d4a398aaf49807f1616bdccfe0dd56e6311681fc", "48e34a0e630953944a9f7450316629f538e20e0e", "06ef2ba33ec911aa0102fb938b53bd3cc36a475f", "ea586a20cffd4b3c825ccd34d8a917ee058c4aca", "ed283166e0238c4aa801bc76969776aaa73c5a77", "04b08a2735eff524f17d3f1a63eb7fc6484d4f83", "520e8b71c39dea06b296018a60bcca2dae53f456", "99b8a24aacaa53fa3f8a7e48734037c7b16f1c40", "ae5e92abd5929ee7f0a5aa1622aa094bac4fae29", "c200e8f4b171777673cd543f017368629e4ff69c", "40b886e29d3e553d4ecbaa7945a75e709e0d5699", "0748b29b046d0659765649f7831a319ec23967e2", "6910dd8525776b2f7aa048405542abd8d61bac5a", "bd8d3bce895143e58579e6ec7260dd4a41794cf8", "490a0174ec7ea3e838754ec883c2aa5ff52f8221", "b74e94e426c50b1bfdfce063b6521a464c6b71b9", "6fbc58272384ede0efa72753d78f1ef6db381ad7", "219a40b7c85ccdd377feef710aa9ea226c2932c1", "6497eb53fd7d3ff09190566be8099016fb49f801", "75d69d183a1a9e8312e21e88e40fddda0affb96a", "4a48a8a6b3efdfb353454a762c042fbde343632f", "d019bfb44fa65f7bc7b6f50f7077d609cd541427", "29c362ad0d13f9e60a25442c2298a1933c163431", "ab85c4082470e8053fcb418d4ee33659dac57e08", "6419037ed7396bcfa45ec995ce63058865889e72", "7b95bd44db15f7cf20bfc051c353841f3fcea383", "1142a9d15d536d2534d6e757911efc0716cbad9d", "1b71e4b59358ed7ecf6117e19fc944307e58a7af", "fad856784bb87c72d7f45eaba7e0457ad57a258e", "76438df197d4cd0a05d0b2c87290c1cc2da2c85b", "03eac9f1a4373d3570a024eb3983bfda235e3cbf", "37e4503e5b46dedf6ec337264cee26c0664f720a", "02c2a29a4695eab7a8f859bf8697a5ca9f910d70", "40bd5d4b01c89e84fe2b0f6b1cc22657bf4e8d80", "0b84f07af44f964817675ad961def8a51406dd2e", "8ba606d7667c50054d74083867230abbed755574", "8961677300a9ee30ca51e1a3cf9815b4a162265b", "3d0a787aac818909a01e039dd1878fbee52e8765", "b774d7c951b9c444572085e15f6a81a063abf123", "9ad27106b8e0cf14e8e2814dc318142138d5527b", "35b9af6057801fb2f28881840c8427c9cf648757", "d1d90bbc6bb4fdb0d928ff74bfd8671aaafa070e", "e69152334ac7bbb29d368862cf5ade75ed9bbadb", "e12a9dab5b153d43df8513ee5032161c338e10c1", "4cb36aea73a328da8ffcdc616407bae3c908aa07", "7229bf0f7111c6a2856eea30c367ede3b3cc799f", "a8d665fa7357f696dcfd188b91fda88da47b964e", "30666d175714e29f03a2a649955e714b0cfd88cc", "8569fc88a3d1ac8b873872becb2ee8bc01dc73bc", "3e0a1884448bfd7f416c6a45dfcdfc9f2e617268", "684c8acd49148020e9bf9c4f4aefc03708a6dac0", "c397acf6a2876afe25bb07824f2d6030816cb009", "1a07d9213a6082d69f40bb5373da60ba0d19f2d6", "989d7ee9b43a528f99f33a9f92d16ced735019d1", "5a88b2b9c77ae830be0b014d4788420f6a748723", "13ea9a2ed134a9e238d33024fba34d3dd6a010e0", "944efd74c6fd812c6c495a11e7b045c9b778702e", "be21529c47b79b688b420c5e296086698ba11350", "33ec945e6a4ae7fe0268ffe142098ea9caf6588f", "1d8c720c95096981edcdfe57941123dca515eb34", "e2d1e72fdb7e0b7a3ebb9ddc4cc161566ab74de2", "292286c0024d6625fe606fb5b8a0df54ea3ffe91", "456f00e213e03058a056069fa75c34929cf7d4e9", "0c769c19d894e0dbd6eb314781dc1db3c626df57", "dcd88a249b480d2e25326cdd11c5879fa31865cc", "e2374b76bdc2010b1c90fe5b2bf6c13d5a3b20ed", "1e2f07f7231eef629c78cba4ada0c9be29d77254", "a80d8506fa28334c947989ca153b70aafc63ac7f", "e1725b71f3f127d6a49d24f14bee05aada1e2f96", "e4819f0eb03bbd2164636c466763e458058135eb", "e6d8f332ae26e9983d5b42af4466ff95b55f2341", "29933de38d72a0941d763b7ac5a480e733ef74a2", "69f49bae5b1c15adc644b47e6c3b6c3f7aa84171", "7b8aa3ebeae17e5266dac23e87f603a5d5f7b1e3", "9a81f46fcf8c6c0efbe34649552b5056ce419a3d", "f8f92624c8794d54e08b3a8f94910952ae03cade", "0680ec4651e8f4d7ee5a2ea742a859fa2a9d11bd", "0d7810ba414b746b0d4f73aa94042bb0ea8f324d", "0df90f86da6e92c7a351be6d5f7cf9c1452124d0", "193089d56758ab88391d846edd08d359b1f9a863", "beb0239feac388e4ee04492159a45f7e2c71e1e3", "15e1af79939dbf90790b03d8aa02477783fb1d0f", "9dea76d985e50f2b34391f7277488a2dc31ebaf3", "f96dd74d4dc828cedea8b82503effc7a012a79fb", "a905cd521e6abf75f847c8a1bdaab9f24c992d81", "7251c5e33a47be2023a1fb3f9bb74fe8e413379f", "69c5fbc040f3ad70f396ec468bf1d725bb13531d", "4ffe93dfb895c86ebad874c70113c4870c9bd5e3", "2c2bf22e2f0a1817475aefb37e0c4e0404e8d479", "86c17a68aead58c7e8e26d9d58a857128da7753a", "f97342323ec16d67fcdd8969e5312e43d4a6edf8", "33c485b59249af2d763d6951cd11e4080f3bbb3d", "3db123d094c7ba33bbd3c4ccbea77e2093ad6174", "0441e485349cda6aee6fa64f11a39e8245eeac74", "76bce0419c3cb063032b1a4a53cfc618c093629e", "5ab3cbdaf3b14352f47c3d2a91c9f2c247fe94a7", "ee7034a5ef168f6bcb1b5892177870fc2563a646", "033fde43e6ff235fd560435bc060d5ffd14fb827", "e0e4910d575c4a8309f2069b38b99c972dbedc57", "b3376f115f8b13695f1b8c1a7f00f4cfea4cae53", "e7669c62339ccb4b644729b2060ca41eb7829f8e", "31b9251dedce1e10467a0a33f56ac4eb05ed0451", "490fa9ee39614e1ef1d74162e698e4a1f0e5f916", "fcb3f7498ae8185ed09ac8445fd9409a4fcc49a4", "fe055de0e633d637ef80cf569f0b36d5ec8bc543", "34f557dc85066384ecb5e76000f22f01100dec9c", "b4e6a967666cd8f943b9e16ab4ce61c108803493", "0419726a00e16ea89868792ca94f5b1b262c5597", "331cc0b310251cb180541023c4e2d45ccc2ce08a", "1257488885ab81ce41578bb244c258772681791a", "c24d1b707d2a8bf019e11dbf35c457384e044548", "aad8d2e32f1cc21eedbdd5e8ebff9f367daa6d92", "f2bccfb12c1546bdf73b11904ac44b1cfa130072", "11f8eb971b3ef63ffc1805e1508ff5e52c943cc4", "7017a4c7a972d546ef2d59d29bf7c0ba6888e2ba", "45fb9c434738bfef9fac631d2023680092a0c7b2", "253b9b36565b83d0196c3bd9bf05089d9aafa242", "19cfe13e8196872b81d6f31d2849dc540d146f7c", "89d590d7013433304aae1c97debd257b8dd801fa", "7d19cd007bffabefa59de061b303da464711af22", "712c13fbbabf086df5a6d795377d2b435e132a57", "08e6f526afc00c06ca9862029472e56a7c3de51a", "0d555309828e4c31b79bbdea55066ac175720f86", "e78572eeef8b967dec420013c65a6684487c13b2", "633e147b7d21ea38da4882233422a3e1eec747c9", "31a1d959e62dcc078fa2670fb9cb75b6bb070ef7", "59ee327192c270fc727c5f6d2ef90058ed072b14", "413a184b584dc2b669fbe731ace1e48b22945443", "0cfab1c2839ddacc19bc9af2e821d5c5fd4f28c1", "d7a0f9ab321e728b981e12775b4906f55d3aab15", "760acfa418e26c5fc03471899040404773e38799", "76cd878c37bcdb8b3ae678e96c9b7700184ddb46", "f6f0585ee1e03044c5e346d66549a956886e42e0", "01ae6eac1b235dc2057773d5e0bb7b08d7dda7aa", "3b8ad690f8d43d189ea2f2559c41b6eebac8dcc8", "62a7baef5a3381eef8e142a481a5d5c291447c7d", "70e3c02575e4041519434e0dacb291bbb8791380", "2f33884d0612fcc3f7eed66e1a4acc229860d6b5", "308647f22e3f1c80b7416b3c53fd56f9abfa904f", "508d5e0ef6cbce1997d968c5d4534a7baba84948", "8bfb238673869c428e7723e4b8ea5ec6bdfb9824", "37ff9f3c7254919f46550c1e5fe2d16fd5604816", "3e98719cc0b570c7a0c7c903efb010075dd267e7", "4c5041f8b93fd71a851445e84bfca0d7d0c3bb9b", "4696031ddcdfab8b768817fd974b601b6b68c7f1", "323d6d93b059372bbe26a86bad1b9d94b076f50e", "c4072181e35b989b812da9e346e45234967e356f", "c6638c7c1ec7b8fd5cdba039536fb44d12cff5c2", "2df5e2adf01a803405341af1943651f6d8658bce", "af10467547324b6eb1393a6148d8291539eeea1c", "e188aa199d4307fdbbf60e9e6612bcb001e1cab6", "a271f83cb1f72e0f9ca077499f51adb086fb449d", "5d75c4db2e09b19de26b7c77f946e89d449c4ca4", "d47d72afc590f5b96117a5227d45157135da21ad", "913352e569d3e5eeb90de2a7979533355e02acc9", "e72e852dca333d66559dbcfb050140fac5affe4f", "7950d67f7104e9bd82d957f0ed80f11982802397", "2aa06417fd361832df384cf7c003ed1d3c5ee8df", "8ee50fd3e19729a487f7196b682ccaa2d17aa0df", "6f5a3c34360caad4644aea897b8fe7dd72076d0f", "6b3c9c0e4d47bd960c0adc4d13ae524a5d9b94d1", "84eac516ed3b75233c5110468d3fddaec83a2895", "1169f3386a49daccbe199cccb518238a0130a537", "6424add0f4f99cb582ecc50c4a33ae18d9236021", "03f6d738f9b916f80ce22c3ba605a0fa4d7830c1", "0b7d052036b92f8c8cd4ed4717ee3b528b1e3c99", "42d8a6b1ef5acaaf4640a8974c6f99d60b56090c", "a8c00babbec804b4e236400459e26cab1e035d42", "cea08e768eff490c51ac1d884163cbc5ec862015", "3f09e7dfd30acb888b6512638439522c9bbfa955", "45b27c1cd68625b5f20a5286e09bceaaaa4c55f1", "c19ed5102ecd953d5c78d5a0b87eaa51658e07d8", "10d3c60f08df6b6e6eb68ef5709f9ac2b6313f0c", "faeb50fd92c9b4946cd2789ce5edfed6b93a058c", "df58cbf63a57ab1e1fe2cc22fed19b2f8cb89d16", "bcbda7b0fb9897bb7c20702cbbd5df9d0a6576fb", "f9bee6e61833c0323c9175402b73442d27ab9eb8", "18cf63b20521964f2115f6c939f70e582999bff5", "9d422e2c318ab63e6b49c83053757b4636f8308b", "63c71e317168d5b55dccaf5515ad96c9e87f7d9e", "d9c536addc6f84e8b6f48a555b0320992f950b1b", "01d2b2bffd1c6d77398cfe7011d4cbd3a0bc7fd1", "4cf17bca0e19070fbe9bb25644787f65fa6ebe1a", "abeda55a7be0bbe25a25139fb9a3d823215d7536", "280b0a4078232f13a7d4234a9ae176f01b762b12", "0c53b45321131e61d1266cb960fc47c401f856f1", "397f572e759aed28ffd4deb2d3acf18c991e8cf9", "ffa4d1ce1751f449f2d9829a5712c3641cc9c5e9", "82161d704bccae999e80a6fd54658aa35e143995", "f423e2072441925a16d95e7092005abf602b7145", "7ee637bee61a7a6d4b2d2d7aea921566bdf5922a", "b1bbe8b1afb97d590269bae95c34c87a8aa388cb", "68415682aa3e25178c9504866f64cf4b2a32273e", "43c1bf9bd7b18c9603324c328f0f2696278c5327", "504002dbd2de78f8d55c860a76a6ee322eb816a8", "2a3991ae72740f3661f98d2ad58a0595bbcd07ad", "a22c372911680793c7f94e3fd0b3843a2019f085", "c3e9086c01cce306620f1968958bb16fd9c084a8", "e20e6c134369e3d63ba0cbbc5f2088db24bf3d2e", "192c7672216dcfb60da0e7953c1b044d1c209d3d", "1c686359a30e68183d1b23e069c56a7c0b1fdae3", "e75575717530f475947c3ac2727621a67927901a", "fcbfa580dafafcaccca5819b232e1287406ed43e", "1e8cef4a24376bd12df5bbb4a1fd7a1af95cc2d8", "3c70360a4ba30b860d337308633842acbb908ee4", "b3014317cea72345a711d82d27f2c03c53932a31", "ca19bbcb4d8ec8fa22d6a2536f349dfcd329a390", "092b64ce89a7ec652da935758f5c6d59499cde6e", "0291b43490e02303c9414f03980e606950ec7261", "882c1e78bd8e89200fc639076eab19843d118432", "da7fc2231134fef949882bc193bc1802b318c6ff", "fdbd6d15f5be4d60592036eb21f4cbf68493d082", "042510b39c6cdb463610fdda2081b36ff469a353", "195aec5513d3fbd1892079fc0fb3ffea0e108ac5", "983f03659e42407b1779e407388ea86fa58043c6", "e24294adfcdb0334c310823c591f15e8829dc224", "1e254f77ccbf3bd796ac3f60001384f59eba4ec2", "872e93c59ff55ccf402337f066bd04d37ebb1edd", "74f4c444cf2b7db77ad0d2138a61b96be549fefe", "c76d143b3fa0d25e21580c583d39ab07fc937e71", "a8d3639b2548263201b84ff865fbb0626d8cbf3a", "7dd77046d1268e41e82fa452fefe91a779d3e7c4", "46eee6d5c5b1de8f19048910a2f862cf10f2b874", "291e6f2a365913100de8bd1071810b8155095f08", "95c5908d856010aa9836a4f1a6cebf3828bcb9f6", "4b387e36ced03088ba24c7ecc01ba5f30ae14e30", "4aa03bb7c10e280acf5d1f5653f336c5d93ecf88", "3686c59fac958de0b3911d5b08213994836ee96e", "b030f6b619e0f5fac51f8cb4933a4baf281f0798", "a2c1c83e2dbeb2dd42b06e97807b287e48c2f257", "f8dfa96a9d146b23bf266d6d880e485e636b0f60", "2933da06df9e47da8e855266f5ff50e03c0ccd27", "3c4d0c604d892572996c1ee0504a1f1e0887a2ce", "8d19cfe643582fae03ce024efaf117d1efef5e58", "27caf667432ad7dbb01921696857303641b34f83", "452bd41a9f9a0dcdc9d0ca3b9f0534db7990826e", "914c859b7d991a7e9c733011e1cbbb4277b6bf64", "d53cd5869a3d718ebfe8a383f76e7ede2458dd69", "211f413e811c7850446a1d24b52697b72c81176e", "7e481a07f829b9f9bb5d3f28eb7bbaff0558014a", "901670d2c74a0630d991e1789ec0406988e809cb", "09c4f72b8d0bcd2956bda94bd0776c14ec051819", "e71da35611f4ca7fa19180839a0680f65de78c4f", "cf75d967bb47e1085fd120d8373e32db835d515b", "db9f80948fced4926b09556aea4acb726951819b", "2a7bab2b201b93aca291b260c114f5b2a1f47954", "3e637cb31828efdfec8334c637f09a6fa04545ef", "0ab5a9e10ef99b71ade05f8d9bab3073232a2ef6", "cb13559e23fd88363d7eba62a98a269e6e41087e", "851e78906e1307773b664953bf2830f32b28511f", "b8a70bfba1cb51b92a3f168458f8b0af7f90df14", "15c99ba792bfb0496694884af5075c81a266ee46", "07892741feb277639b8a7d4c1dcc0054077cb7ce", "4e8206dd2e163c6a139bfd0ec3adf410e7b78c4a", "413c960e57ec3fe713e7b3e070cb6072726874bd", "26ffbd8414db0d21cbf194dd9b0f6f9400e8c0fa", "4d51b155e7ef7fd60a3f275d7885d07058f5aa5a", "e8304700fd89461ec9ecf471179ad87f08f3c2f7", "12d62f1360587fdecee728e6c509acc378f38dc9", "805b42d42a52e1e5e20de8950dc18ec9323575a7", "b2cbbaf194abaeea3a4e1c6993dcc597f119eb57", "ed27bc6e3c0e099d02150fcaef140f96f17f1e39", "474c8f4e31a51e2cb3c1e9fed83202b4483efb35", "b986a535e45751cef684a30631a74476e911a749", "0b198c9aececb8f4172ace5c25d468141c5df6bf", "147f31b603931c688687c6d64d330c9be2ab2f2f", "31c0968fb5f587918f1c49bf7fa51453b3e89cf7", "91c014ff243ea747ea3a84a9efd4a3e38a7217ee", "b84ccf1c07c6d2061c8aadaca3dfc4e7d41cc1c9", "91cc3981c304227e13ae151a43fbb124419bc0ce", "df527756b33d1c2722bd005b246f7df75ea0520a", "fb0bbbc159dad0113f5d01ac736945feeced1247", "bee609ea6e71aba9b449731242efdb136d556222", "f11d3728c1e3ca14c697ea2c95f428d38f340554", "98424c79970a80f30db837db84880a4c02e76f1a", "5418242dafa134e6021a30ecc8c566ac83823b56", "8d228b4c0787d9e29b0c1fff05f15198bda911c9", "e9f82ce15b332767c0d9e6326e46bdd6a15fc689", "f02feec9f8d15f929018e0f0aa14446f47112d22", "1f65cbc7894323a85f2964d05ae937070e70e43b", "fedb656c45aa332cfc373b413f3000b6228eee08", "14d0de03d21faf6e26bc81f9aa8c5d768489223c", "6d902439b736a7546dd8872b307fb760087ca629", "1f877687022f7b222c7ae1ec4ec21655a290220d", "e23004116d85976ad0a2a4da69914a2c9d1ea5ab", "7c9d8593cdf2f8ba9f27906b2b5827b145631a0b", "97711a255ead64265fe3736ce8a2392ef5c75ff0", "1c1e4415f0acf5d536c9579117d326471f0b678b", "32dc3e04dea2306ec34ca3f39db27a2b0a49e0a1", "df45ca54171804193c0b499e8f3d282cc8b06998", "58888b30e9123c1b1709be1efa92898e090d7bd2", "6b7e552ca66fd3235e99115d6f072061b9ada181", "4d8347a69e77cc02c1e1aba3a8b6646eac1a0b3d", "fd0e1fecf7e72318a4c53463fd5650720df40281", "4e6830f272eb7dabd42cf14133c57070dbdf212a", "5386c181d0b294d54123f001678d1125ca7b76dd", "07c6744e25ed01967e448a397f5d7e9d540345c3", "8cd61bb3469aa253d4411ef2295b50683a031d17", "19be4580df2e76b70a39af6e749bf189e1ca3975", "75b987f86af2bc7f68edc45be240dd30e1ef2699", "b7bc6a2dfe99668457b67007400f5ab760386d0c", "e9c7d47fb82de9b71bdde1ad9b81eb2b2970b8fa", "fde900199b2aa6d8a5364342885bdb2f5f7073c2", "e6ac47a768188971d0b478182db9026221a0807d", "f3196b1bcd1af733347edc923d43018724fc73c9", "de3eadd9da762010066eab138cbea5fa3089a290", "d3516392214e7c0dde80a2ea8ba45e70e462fea6", "a20f132a30e99541aa7ba6dddac86e6a393778e8", "97585f0fc4e55fa849bae0d40a7a31a8f4dc76c6", "b76ed28a9b8a8b9061e443c85e39de8ef0d63605", "c9b98c98357a154bceb2287c427c5fa9c17b4a07", "cb8567f074573a0d66d50e75b5a91df283ccd503", "61f064c24cb9776a408b8bf4cfbcb5a5105ac31a", "d2b8459b41172dc332cf00dc18a309c442347a7d", "936a60174ccc8f9448d38b269a53bc212125370e", "2e0b4fd1c7f49b88ea8ec40ace80f64f54a9215e", "2084e54505cfe4fd81005167b1b11d10b5f837d1", "d9912256502b9578cea7d149142832e0998d97ff", "8a77025bde5479a1366bb93c6f2366b5a6293720", "2102915d0c51cfda4d85133bd593ecb9508fa4bb", "eeec69e910430bebe3808773f5a6a155d77059a0", "949d20c44387918cde21f800d8d1cdf53f016bb4", "a3d11e98794896849ab2304a42bf83e2979e5fb5", "24ab0116bf4f56290aa8f8dd98524bb43fab6d85", "99b8e5b8544ed6aa45726311afb0679363c875ed", "6c7a36efbe07ab295ddebc60c834cf74ec30ba50", "f96525599a5abc069d2415950f6acd0ed37ab7d8", "974fea3530307da6d22ef91c6765f5404514b3c5", "84984c7201a7e5bc8ef4c01f0a7cfbe08c2c523b", "2fa4b37f91667970150481c37a1f4e294a49b7f0", "5cac869f7e47c290ba14d27a5d6b5aadaddfaa69", "463a1ca5f819af35e71ae47ea0e57293691507d3", "7481b7d5272326f4e9efcd49d31c7f42adb8ec4b", "d8949f4f4085b15978e20ed7c5c34a080dd637f2", "e6a8951063c3e87c7e8458faf9e5c4584c32cafd", "2f72cee2b9ae3d4271bda9f9bda1f11ad84ef616", "de0cfd94d16468cdaaa0fe725e214930587ed8ce", "02f94930219d2bb632d067ca2d31db61161ed5fb", "fe68d6fe52df8c28f7cf81b338c491e5bac6e33c", "d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6", "8acbf69f5877dac506bf04dc1802f327247cc27e", "f8796b8e8246ce41efb2904c053fe0ea2868e373", "a65f9ed8f5d6b86af819d257482a1039bf0db7e4", "8e54329a35b11e48d398dd3df3b27c72f48f5b2b", "0e36bf238d2db6c970ade0b5f68811ed6debc4e8", "998bb8720fe25c6b38fbc70b1142fafde46e39e5", "b94e57ee9278f06c65a96ce1b586cb7a5b2b7fbb", "b54ed594267ead88b99bed3230e0453f54d00873", "ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b", "c635564fe2f7d91b578bd6959904982aaa61234d", "6a03f9b4354cbe6bdb4c00eab3521a6463e563f7", "592e555ebe4bd2d821230e7074d7e9626af716b0", "53492cb14b33a26b10c91102daa2d5a2a3ed069d", "4129e1075c7856d8bebbf0655ae00a4843109429", "7c25ed788da1f5f61d8d1da23dd319dfb4e5ac2d", "83c19722450e8f7dcb89dabb38265f19efafba27", "6c70cad229cf3f02d3d490b42c7bd92c6eade1d1", "2b9082b6b5266f6f7d7a95892f30cc84138697e5", "f81f69570113e5171203ac121d1ec1d8b91df4a4", "a54d63c1a8c4db3c5034b1fdb08526459bb3c0b1", "384908bfad5b9e81d605344abcb9e99d8b0f4027", "5193328862366e114781cb6b196ae958c1553357", "d3797366259182070c598e95cef8fff1ddb21f65", "2fad06ed34169a5b1f736112364c58140577a6b4", "441e7df66fe6052a6b770c3aeca4acd8dea98643", "1e83e2abcb258cd62b160e3f31a490a6bc042e83", "c97a5f2241cc6cd99ef0c4527ea507a50841f60b", "9812542cae5a470ea601e7c3a871331694105093", "70ce1a17f257320fc718d61964b21e7aeabd8cd5", "a0c670e76594bb72992a92fd8d51b42cee868a50", "c8c60abb7cef001213d951d041e5f03042de0df5", "93817c245aaeb78dc8ebf1b24450942a3e24ffe7", "5e47ada58f5b7817acc46240f1090ba51dd0c637", "63e1ce7de0fdbce6e03d25b5001c670c30139aa8", "ea80a050d20c0e24e0625a92e5c03e5c8db3e786", "7abac083402d44a96769f9e68c8f6ad84ba80472", "3cbf60c4a73fadd05b59c3abd19df032303e8577", "3857ffcf39ec6183f0cbbe8c5f565b1ccd0dce5d", "f5a00153cb49c2f66b072832adf053ebabf50850", "1727601f148b937a49df10194edcee4800852a97", "4e5c09ace0cbb3a442bb4e32a22fba23c12ac063", "218603147709344d4ff66625d83603deee2854bf", "6843531814f19799c86af271b8643411360549e8", "9b7c6ef333c6e64f2dfa97a1a3614d0775d81a8a", "34cd99528d873e842083abec429457233fdb3226", "e902bad28f1370d5252e44fe4b7d0563aa9a2383", "5d2b396447fae5a64cbe6b5ef5e99ca2b88c2914", "67289bd3b7c9406429c6012eb7292305e50dff0b", "b0724a7b4b63d58e249379b889656a899455e0c2", "8ffc49aead99fdacb0b180468a36984759f2fc1e", "63d64d07966d4d128581246a891ff1d3dd3a2a96", "69a7c8bca699ee4100fbe6a83b72459c132a6f10", "6ce6da7a6b2d55fac604d986595ba6979580393b", "ffbe733a352c1d995f6f5c99ac0c7f01567165dc", "81aa7b082f97f5fb7d6ca6cec783a3d205adcc2d", "a77619c8d36c444cae62e767b33b2a8c4e30d4ee", "d950af49c44bc5d9f4a5cc1634e606004790b1e5", "c753521ba6fb06c12369d6fff814bb704c682ef5", "0353fe24ecd237f4d9ae4dbc277a6a67a69ce8ed", "e746447afc4898713a0bcf2bb560286eb4d20019", "61dfbdfe718aca026cafa06adc63055bd0fc562e", "110919f803740912e02bb7e1424373d325f558a9", "7237b27dac6dfe5c07a2c6c36ad848e6bcc7ac77", "a089b7627f4661e68fbb2acc4a741b98e54f1f09", "07ca211bde38009697c964702a29d0fe3260bf97", "124d60fae338b1f87455d1fc4ede5fcfd806da1a", "aec7ba707a37446fbf39cec960951ded2d575267", "9a9830c6edca29ca91f7bbc2683eb9ce43136469", "a65c3af3752866732fb2d1ccb3305ddf0fea91df", "47dc53371cfeec1b78a35e0e5f066d10b1826c43", "2056ba48e687d619c0ce69d0be323d48c5b90701", "1565bf91f8fdfe5f5168a5050b1418debc662151", "71cf2badae09d206b94e1a07cb73018e4334d638", "5ee96d5c4d467d00909472e3bc0d2c2d82ccb961", "d1dc5a8b4d13d2c51eec7bcb29d08f471d3b65dc", "3e46079c418d2d0ed2fd9d32ec1d721c2c6bd904", "8cc1e76ffcc34b7afa8b7e3c5a2339cf88699868", "fa3fb32fe0cd392960549b0adb7a535eb3656abd", "f2b5177d7c4f568295f6c2b9e02078e36d9ed286", "1bdb09190fc0c66f7e1a6deb7a0ebbaba6b2a42c", "c0e9d06383442d89426808d723ca04586db91747", "ba32bcea914685f2dacc689b3d9dc070eceae684", "c8bcd8e0b2ab6cc00a565efbcf904235c33ac2dc", "7965b2ce7d64991218515e20fc1fc0459fd20a38", "abc8638968909ab0fdfbf1049009082df554a49e", "d2b86b6dc93631990e21a12278e77f002fb4b116", "b5184bd428b9a89255900dce50b4320741706744", "95f858658c2955924c00e8abc2018c68c3837e83", "5645f567d83bacf318c9b8c4adb5e508b713e466", "61470aaccabd195a361112d38d80d3779498f784", "766209460c41d18ec357c6e1c8e79480b0c3c0f0", "11fa30ccbf62a64f650844b9cc39797e5faa82d5", "7ec16949adf6ea78054f6c6512f332a86960b91f", "801b0ae343a11a15fd7abc5720831afea6f0a61d", "e7711b98384cf72a3ef26a51653083e41781a6bd", "036fac2b87cf04c3d93e8a59da618d56a483a97d", "634123907dabf9dbb7e7bfc4755714e37ec91356", "5a672a0e5c95dd70041989d60672b7b2017f7018", "c2206c3b3cafd1cf1694b0a231c80ab806f0f59c", "0a058caa89d195930224148d3d2897c0c08fc668", "017e64d98005d7acc117ef916fdb1bd0f4089ebd", "4d799f6e09f442bde583a50a0a9f81131ef707bb", "c7fff0d0a6312965b269c6180b2112babd40564c", "08b28a8f2699501d46d87956cbaa37255000daa3", "1e006cb837d4d01efcc92167443ccf3282329f89", "7e186b41f5d2cfdf1940009e61d4e34a47b33c7c", "952a8ef56f35376a52e4540d3df9e48f4077b09e", "216c61796c6ead27b1042046e1d95a2038624d26", "b82a4a0457170258aaf622b81e6f739a220398eb", "6e12b8cb01abd5d6af6023e284009d417c53d160", "5084cf08beb04ad91ef3c5649077255097d0c5d9", "fc068f7f8a3b2921ec4f3246e9b6c6015165df9a", "76616a2709c03ade176db31fa99c7c61970eba28", "85faaad8eddebc960865e351c0e3ea81e25d42eb", "4e67d44495ab109133b3d94d56e1d82b8d75e9a1", "5eec4db50ad8237d881562d036c275d87dd14683", "34ca83537d5d5128dd80cd3c4cae3c45fa5f2263", "4ea6954b47baec061fa3f3e1228833eba7be07f9", "864fe713fd082585b67198ad7942a1568e536bd2", "2788f382e4396290acfc8b21df45cc811586e66e", "a6bc69831dea3efc5804b8ab65cf5a06688ddae0", "e1cb110c45c4416f7aff490db2674abe1460259e", "3c793fa4d7f673f1e9f6799729ec266ce573ec60", "61b0cfd75f5bce59cf79abb7b602e404fa5584e7", "317f5a56519df95884cce81cfba180ee3adaf5a5", "a6bbd477851c5642a67817e43302d22bc4a95aaf", "3b3686f87f9a22db2bda99cb279c817e7f8a001b", "631c4ca00eaa65b801c63d32c0f564e974009ddd", "014e249422b6bd6ff32b3f7d385b5a0e8c4c9fcf", "d3312da8c703ed7842285289c3d9478f333dbd48", "2788a2461ed0067e2f7aaa63c449a24a237ec341", "5be74c6fa7f890ea530e427685dadf0d0a371fc1", "b856c493c2e5cbb71791f56763886e5e0d40295c", "850d2697fec9c2bb434907db1c0a11e200f32dbe", "ece31d41b4da5457d570c04d22f19fcd026776b6", "064a79968f593d17934c1cd14def70aac56aecb9", "d6126460b1a7034a39e5fdf3cc189dca3001c84a", "6f70e85442959079bfb67b925c660fe86cb4ba24", "d4448f8aa320f04066cc43201d55ddd023eb712e", "c3d23ecf2132f61cd7a5dc0ebf11e3d6640e0e1a", "08d2a558ea2deb117dd8066e864612bf2899905b", "ca1db9dc493a045e3fadf8d8209eaa4311bbdc70", "3b311a1ce30f9c0f3dc1d9c0cf25f13127a5e48c", "608dede56161fd5f76bcf9228b4dd8c639d65b02", "1ee896784275d0517963815b7c7ae1c788940409", "207e0ac5301a3c79af862951b70632ed650f74f7", "787fe79e880ecb78ec6df797add20a8f93878b68", "5a08b451b0397782d81edb5b614bb2a523c6be98", "e1af55ad7bb26e5e1acde3ec6c5c43cffe884b04", "927ec8dde9eb0e3bc5bf0b1a0ae57f9cf745fd9c", "2624d84503bc2f8e190e061c5480b6aa4d89277a", "48fb35946641351f7480a5b88567aae59e526d82", "6a699b51c37b532460483cf2eaae25e4d868c73c", "99bc96eea249e28b3e741fbe15757a38d52631bc", "82d2af2ffa106160a183371946e466021876870d", "d916602f694ebb9cf95d85e08dd53f653b6196c3", "93978ba84c8e95ff82e8b5960eab64e54ca36296", "ee661eb1d6ebfdef0d0b0784529221c951cd1188", "116f9e9cda25ff3187bc777ceb3ecd28077a7eca", "97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5", "1a40092b493c6b8840257ab7f96051d1a4dbfeb2", "8e3c97e420e0112c043929087d6456d8ab61e95c", "4ed2d7ecb34a13e12474f75d803547ad2ad811b2", "06a9ed612c8da85cb0ebb17fbe87f5a137541603", "5c4ce36063dd3496a5926afd301e562899ff53ea", "e0446d14d25a178702c10752b803966a54b539e4", "1aad2da473888cb7ebc1bfaa15bfa0f1502ce005", "a8c58660bf2ee1fddc3ef05ce52c42775eb0b2b7", "a671cb0d366ab895249349ca457673150ecc8ee2", "3c6be0034477b07222f41f6fc558a64f0222a192", "9507e5c59c45e68b964fdaf40e39569dcc754be3", "74cce11cfd25618b0fee0bcceb2f23376121a1f6", "36cd55cdb1b032c8f29e011ed0637923afc46d3f", "78cd4291d123c4835b0298045aa89ba6674581b5", "c29f21f0b098123e523df17b23fdc4a82940cabe", "1a30e44d6b70d11f7b270c87eac099b75b2263f1", "d20efdf05444a9d7509b85f6d5cd59359b1062f2", "30bb55d3ef6905cfeedc12aa0dc70ccbf85c8293", "d1e388269ea8ce7074f804f79e158038f629a0df", "6446089a2a383ad9e4315aea0199084dc61490f9", "3564ee7ead6263a6a83107ec9610f72498163f0a", "10ad82949b65bae59410aaab5aac88d2caa6a3d7", "0ee7990e2ae054aab5f1fc08670fe5eddb96fb19", "73298c5610004a8337baeb79f33c1519c0ba59e4", "107231a511fa981ac8d13723d7aea52847580930", "dcf17cc3b4f8519a6789c1ea086689bcbc1d6f11", "bec3c3e6bb9c738dad942f00fc69848018c3b1cc", "692bc33f7466278900dd73f7f40c563f72cb6754", "f616a39587de5a965df965cabe541dfdcd604466", "3837f3faa722c91aa21d6f17ea1ac1cb5187bda1", "0229829e9a1eed5769a2b5eccddcaa7cd9460b92", "95df7770a5036c87104df23f333aa05e67723cdc", "7de79a149568048db336d92dba9ea5ca54145628", "85780c474c20f8d824e57081521e7420a4055f65", "30b7d284c29e49a55797041dffd26d8cfe3ad9ca", "5a021bb28e8c62a8c21fffa1ff35929ef2edce8d", "39d900da87fa2f8987567d22a924fb7674f9be67", "748260579dc2fb789335a88ae3f63c114795d047", "51a128eb0d72f318e9d3617ceff64539dfeaa608", "46c73ac90c8107e9580950eef9df6834edccd183", "929b889750747818f75a7974b45a8d309761e5b1", "4289f9f727af39414537a97e5eef90b06115a5db", "fd36e838ffc2f56afdbd87a98f1dc4e05d20ed33", "7cf8440b1c02c021f6ba8543ad490b4788bbe280", "fff755ee8522d5ab0931babeaded2f9113c44b95", "c42adbb77919328fad1fdbcc1ae7cdf12c118134", "c160bcbc8f0517a97e46042c84343bf3f0477478", "3837cd26a92e6c20b4351b3fd7e83a422e56cb89", "0cd8fabfc8e22be8275c317e7ccd37e640711c62", "05f3cc64e640a9aca2d0e6086aa6efaf103a3fe2", "99029377dac51a3f60063f61cdc5471866c348be", "9445d51fd7977fb11a34a0e522efdcdee0d5cd95", "3bac7069b9d3051f40ef4eecacc517d02107ba4a", "2842cebee2793c9b4f503895a32b328b7781b60e", "1d4c2dd3996cb3d87da6c35d72572637d3175ea5", "4deb349f0883cd1ad8c7b710dd4ac4e91e69f7b4", "4839f861709e6ae6d4d032228473ce1764acbdcc", "40b87d3b1e3dbbc82fb7d786004fe202e131c045", "67f88f37e4853b870debef2bd29b257b5b19f255", "057d879fe2d6c40ef79fe901cc62625a3b2ea8ba", "3d42aedd347f927a6bce28d0fa509c6d2132c11f", "0888b6904ef12bc7a3c59fa59c4051d5002de80f", "18101bfc08381ddea8fd944a3300dc8cffe34e63", "792e656d2297d3b00da73c7a606eb6f539311c25", "50f1427d24b13ac374859ef851bfe1e05355b958", "62cf98ce9c28dad430810a59a527df50cebb65fd", "08efc3cd63b5657ac65e68aaaa9cbacfd2596fea", "8546885e83f7901340c7893fdfc017cef86d910a", "aa299218f9b7cda78c440117f12f193c3c4a86cb", "f33ef5b2707078528f23e067565f992f4b03f4a7", "080e81f425dc129c4e4022568e2a23674ca41307", "133b0d480a8fac7c7e0c7511b5bdb0dc7d387d42", "5342b6e9ecdc993da2cfd992b4939fcc6fba231b", "14a7e7290f81e313804a000b125bcd1c341bf9b4", "6dbe18855b85bc6f218c53993cf289e2607518b1", "2917808d9018386af42e249ba4fb94bafcda54e5", "4fc67275bd9d68895933c3baddec266402cc2412", "1b715b4cef51be6bd5dd73c0d30257d853411a52", "3a192e0391c357124cd2ec2287b1706f523ecdfd", "71f9bed14188d861f248fb426a26a3a0b400843a", "b1103fabd55cc5022be550a0fb2ae25aae3f2d23", "64a62496e20d8d6a0a7cd0525304b67b848ccf41", "359fda4ff19dbd3634b867fbb3ef3cb6812691c5", "9d85f3cf2cc464b67ff0ead1a82197878da7494d", "dedbbb6e588e77969ab87571917d4f84a3b1722d", "9bdc406ad9e9fc0ce356e6d0e53780534f418849", "2deed841cfde51ce3b4e90880894efbbfdc18f18", "50d80e2020698b4cf49e6b820df0aea497d8fdd3", "114d6a2503847a72afeb38e79243ad10abc7e123", "b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29", "05f0b43fe16282656cf1fdce919ac0f9d433f4a5", "37a4199e63312f7901af853998951883e52ab062", "d49d1d6239e3a14da48074ab7c2f219d5bbf8861", "4dd2744a37bd1e666346a41dcd2a271945c74e2f", "158452a25143013e4c406ee2d41a7399c34df3db", "603dee8661aa9bf0d7af6c61fe5fa2e85227f166", "015a5fde8c9c89ae9ae8349183018acb8f0e741f", "5e84fd5c73dfeea9d51e1cf59ea6f8ecf2097603", "daa52dd09b61ee94945655f0dde216cce0ebd505", "31486c9342901ccdcef5a37c501ca9c56a1e3bc0", "1362b43a76412ed9ac67fd182a72b9457cae5aed", "1f4b741a9da2fb7623ff68c8e1df3f3cce5e2542", "22f21a5b230d6bcc2c4ab4e4d5ae57a20f09f348", "38a9dfdf72d67cea75298cf29d3ea563e9ce3137", "edff76149ec44f6849d73f019ef9bded534a38c2", "e2d8363021947b6ecee6f8f8da6f6571cd58d88a", "1f7b2087dd0784a04ba4d2a68c2db9588f36c33a", "00083b3b6c356aca3eccfa26988ce52a6682bd6b", "50997081b89dcee1c304a16f714385142d512fa5", "2c2371629ad7bcde46e62859b2e812f6e5fc64cf", "3c5b3c90f951ca0f4b4e3a0ac3dd6aacae4adf72", "b59df3832c3914c9aefc7f11017360a24bd11480", "3597dce344b088f913689abde927a59a0bedde48", "990bf0171deae7f788f4867c155a276fca5c891a", "40bb3ef2b4e556a3646a8cd77364a89b8773e4a4", "697e6552326bc04a80b510f91e3a83c23159fa4b", "3de03bdea67b43d5004ba5be14706975dfba1994", "6b99cd366f2ea8e1c9abadf73b05388c0e24fec3", "b85308870c2b6b8b46ec78908bfd3140ed1398ad", "2862615e5767a8a81257138f04de6c5bd33e2984", "3dd906bc0947e56d2b7bf9530b11351bbdff2358", "3154d7479881c7efd8a50909af921cfa8cff8e2e", "5b300b85e10aa312dfe31d8064d86f764d0aaa68", "3dc2bbbd0c17643a8cd08ddf2ba385af2fc4b405", "48d299fe3303c80f840816fc76971a42b4a8b624", "e4a6d0ce979c2067c6d0aec9e7a22113b8d3b7d7", "2e9cc64376ce44c6ef848a13aaab26e3878b9ec7", "2a5dc32ba784ff30c73011414bfb679fbc203069", "2d4a3e9361505616fa4851674eb5c8dd18e0c3cf", "5090e374a0d505040ca6fe957936a12026f5347a", "20bda5e991a7c0774f2767b80de0ff54db1adf1b", "ff59ad8cea158ef1c2ef15e43517e8fa33df97b1", "0816b525b03e47d995b3d97f1f9132a4f7a2cf9d", "b7c8452ac9791563d9a739bd079b05e518b20aea", "89dcf3d6f42f1a2fcdb0c81982ac1ea9e4ce2339", "c268c0d62eac349468f786ac50342213ef7865e0", "1e8fd77d4717e9cb6079e10771dd2ed772098cb3", "efa2b259407b5b9171dd085061d05b72b6309eb0", "78560fc9c224c1b605b3ed30cc3345863c5988e2", "6c26743e131a67b25738beffcee05da6af5d87d9", "6275aa21331a2712222b7ab2116e9589e21ae82c", "0dceca6bb3ac648c611f7097cf52a9b7f59be6f9", "071777bc168e9940bb04b207d3b061bbd5a0c01a", "5bce272adc5bd6934fe31ae3c648b4b62191353d", "4dcff552a198b58311b04935ea2250385f54c585", "7de1d463fef3c63cb228f5b4a6a72e62f66630e6", "3ca4ce8ab704b44701bf7ef8dda01c8dbb226fac", "5a48999cf31b26191e2db60d80794163d5f8c43d", "1ae19084d2cd53c70d7e44d419df32560e417fb9", "56f5a94047966eac4b2f97ded4b50513f9a09951", "4213502d0f226b9845b00c2882851ba4c57742ab", "e8de844fefd54541b71c9823416daa238be65546", "d0fe63de22729bcecf12a84554cdfbccdb44c391", "05ea7930ae26165e7e51ff11b91c7aa8d7722002", "9cdb83ed96f5aa74bc4e2e9edacfbb5263e8fc37", "8d30d06c0591e08a0b37ffc565dab376885499ab", "b3d592bfbdeddd4074cf7aa8a832f13cd9d3be0d", "f35acbb0b2870e5735561196d246463aec8ae7aa", "b86a3953682331302eeae1d977b1306038d4df53", "0be8b12f194fb604be69c139a195799e8ab53fd3", "8e4ff1aa78f8997b683f873c46999f384db4de18", "b0fafe26b03243a22e12b021266872afdb96572c", "995dd15671993b2165860c54bf5acbbe421c5f45", "275ad26b7e4d7847f7ad4eedda65f327007a9452", "22f2f77120cd28e9b2516179239380adef46b1be", "6c06ad0b4b7c981089b5a8037d5b9f9e5b928196", "214f552070a7eb5ef5efe0d6ffeaaa594a3c3535", "67b38b88f3b3acb4ebba3c1941cbab7290bf59fa", "0ef0db6b7bf2244459497a3bf24e56c7850cf369", "7cec368016b146254817d5c0138876d2772894c2", "30654fd93360a339e271d4b194b7f7463b2c5dac", "0760e5ba407eb29d7a1d1baa7903478f6c492dcd", "68a3f12382003bc714c51c85fb6d0557dcb15467", "0ae3182836b1b962902d664ddd524e8554b742cf", "f28fb0dd4187325b03ea3c82a21e6a34e788847f", "4a70c6e14bcd7a44838fdabdcdb33bc026c907b4", "92dd389bf0ca3d651ed3be85b14e6a31866b1ed2", "495015d21c26eac9a6bd64c836ee3370283641ec", "013e0fe2d203eaa33a4b42d057688815116cc6bb", "0f6d068ca799e99100fa5ff7503163fd1c9ae581", "2fa23deef68f0944fed54183c79f835df2d1747c", "0ba87571341beaf6a5c9a30e049be7b1fc9a4c60", "14f457bcb5c3e294919512b132bb171bdcaf5ec2", "c4cfdcf19705f9095fb60fb2e569a9253a475f11", "fcd248fe4da9d719ccb3babe4a82bbd7d7b6935d", "cad24ba99c7b6834faf6f5be820dd65f1a755b29", "abac0fa75281c9a0690bf67586280ed145682422", "6e6923a8b39cd22d714ae9364d18bec8178e5632", "0baee7f68c08f1a6b5190755adebc57145d18ccf", "4641986af5fc8836b2c883ea1a65278d58fe4577", "b2e4c58e5b7cd5db446a76551e9c37b650d5c2d7", "191f8b564c4f90d2ba7423fcce4efd7e902f4f77", "9772ccb519268f067da7707fc199ad942ac63c42", "d86fabd4498c8feaed80ec342d254fb877fb92f5", "29c721f628803b54bbc0eacd27e60d94772cc2e4", "72a4390a6c3b2bc2c3e7d83fc1f99e65e6137573", "de02a762f64fc11ad2fc2aefe62361758d0e0786", "759a7e443f725be44ded970745c5422fd3196127", "5cead7ba087ebe7314f96d875f3d3dbb8dbed1c7", "2e480b3ef788512d647129509ea2e7d20464bf45", "293ca770a66313c9427dc71cf86bef7e1b94f2d9", "26c8ea087dc0744240ee955db0daa9f4355e4f60", "3436b30d5c09a089252cea893fced7b3a5cbc675", "14761b89152aa1fc280a33ea4d77b723df4e3864", "249c9034959448e4ca96e9e753570c20ccbd90c9", "a2ab16c6eff749d2081d11ddc0b9e310eda62061", "fdc0754852b9c8366341972f1b5b4320b48d64a9", "5a0209515ab62e008efeca31f80fa0a97031cd9d", "3463aeda3a8d33bc88d8383cc4ad451a6775c75a", "5b249cf39370503f22fc7d4b257d735555d647ce", "c54e00aadcdc8c4dcf556dbe4d30ff3952df94f5", "22bc12fb82db4c5a5f52bd1ba70e25ffac94f428", "1f7127b9ae86d7fac305c6b824801e455e82f511", "2af06b88444cfabf92f54bcb3a31e9afd149ae69", "052651838d27835f39270101e140055e60a59d68", "c5632e2117d268159225d5c307b7efbb6428ccba", "36c91b1342c1357877e89b4c43f8eadb39755c0b", "16c6e567091d6eea117f50173f7c0e03c4e54ea0", "b6dc1cd3cabdfea7363d41773a315a0d241dc836", "d096bdd5743cbb33f0cd0ae984d188b2c302f054", "0e01db4197f71450118f81ae5a69ce4916b46421", "f94feceb5b725c6b303b758a0e5e90215b0174d3", "5ee0103048e1ce46e34a04c45ff2c2c31529b466", "143fde0dab5b736e6c2739da740e461707ad3391", "83bd99dfa4b9e55adc229765449528ca8b1b2fdb", "78e9abfcee29491ffa53e7a988401ea06fbbe719", "2f3a67394deb32f265bcff9daf2c829d4be36336", "d5525d00bc2099700711751e33f0fae9a58577ca", "f8403bf4e3060487cbc8acceb1fb256a4f1cfc76", "2da2ae720ea37fffe7247b9cf977fa86b04f97f4", "b984c9815fd556cc845adae1f9a206d2a0fa2d33", "d970117b1f976733d033dfd9ed374e77d0bd970d", "f7d658da66748ade53850e6b5002041a0dec1c63", "af34388e69800a168876f7446a621f68ca2215c0", "3229a96ffe8d305800b311cbdc2a5710e8f442f4", "151b87de997e55db892b122c211f9c749f4293de", "33f3b212d665d769a209b7a278dd9907ae2be952", "08ff22f76a567fcbc1afec6bfbf957a560cfadc7", "3805cd9f0db2a71bd33cb72ad6ca7bd23fe95e35", "08aedeb74dda306a14c699ffcef4f434a60f34e8", "19150b001031cc6d964e83cd28553004f653cc24", "590739cab80ad1219143401be0d929bc2885901b", "4745baf6c4ae7a088f03340fcc05ad7d18a0aca2", "e75cd1379b07d77358e5a2f4a042f624066603b6", "166b5bdea1f4f850af5b045a953d6de74bc18d1e", "74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8", "01a903739564f575b81c87f7a9e2cb7b609f7ada", "12ebb51d50f704b5d0a8d821e90dd336175ec8aa", "15e28e884fb6c7eba2610e3dfcd5b40dadb14155", "38c33ba18b909f5e40ba02c60237c64cc8011ca8", "2d69b3965685066081e533b29fde3364a6cc21e7", "eda20a2f33d0f6db44a2e7d060efad3caa6621e0", "ab1719f573a6c121d7d7da5053fe5f12de0182e7", "8d44aa6745ec0b30f1402531b3419f3310587dc7", "734e850ed1187a2abf91187c00a46c3ec172060c", "1ff616ae8b61f8167f2d626b7c1a36e018b23e94", "08263a9ab40ad37d0a4368f2a089c3d44c770d16", "3dedff7d7813af193589e44c204b08f34c297764", "936c7406de1dfdd22493785fc5d1e5614c6c2882", "2c55ac6330ce91a24131a81807237807134ec371", "54a2fdd4402c45abd7247e4c393ee58528baf8ee", "89588a697c8b81e38d3793db5055a65c4abc4845", "3240c9359061edf7a06bfeb7cc20c103a65904c2", "0000fcfd467a19cf0e59169c2f07d730a0f3a8b9", "3d4e6fb9c238c490f57aed72bcf9a81ea5f28972", "051830b0ea58d1568f19ec3297e301d9789c9a76", "53288f4c3bcb993f8561b4af1776ec3145d7a051", "4be79ee47771c670aa63bcdaff870f9dd8575a0d", "be2ce56434c8cf50c08f8be6f4f9b9f7c716eabd", "00d9d88bb1bdca35663946a76d807fff3dc1c15f", "be49dfb17f3743493ce77fd8e3d440ded451a0fd", "6e97a99b2879634ecae962ddb8af7c1a0a653a82", "0d0cee830772c3b2b274bfb5c3ad0ee42d8a0a57", "0324a22f71927bee2a448f800287cde562dc2726", "688f5cb02dc6c779fa9fd18f44b792f9626bdcd0", "59307b897ef4cc4bde2d2393f97310e7d89998ef", "719b741280607f258707d102feeb53dacf00ff8b", "1d524c57214384ad6a003c54b1918130744b69d2", "4d925db7c9e3cca2e8fed644f750d218a48cd081", "03bf59f6db62b5da617e42913e9cbb1e58b79f28", "26534206831483d9f5434fe2fe0839afe83cfca3", "04afb510e11e963fb18e3271ac966164db806120", "01aad32349489cabfcb619024b297d8f854e9d1f", "8e0a86634b286567433736a667e3a0bb7902470e", "f62d29f9e1d426297640c7f0e43961b181449554", "cd000f4a7a64db5e00b200b93cc3f13c9e313c01", "3726b82007512a15a530fd1adad57af58a9abb62", "2beb9777bf452d02f9bec5275c100f4a736def10", "033e3fe75da26d8d3dd3cb0f99640181655e6746", "3d275a4e4f44d452f21e0e0ff6145a5e18e6cf87", "208e903211ddc62b997afb5a1bd3c2c43e0e69ee", "8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c", "cb08f679f2cb29c7aa972d66fe9e9996c8dfae00", "13d6dde8767ac7176dcd6d4367974292bc627863", "d1c204da4e0ab653c32ae8fc325d5b69641b6ed7", "05a3f10b6cef0178f5d149ece2b0f08e8b6af7a8", "cb9dbaf955cfb90a89a7b608b7390fa444a2c3a2", "72c248c8d3bd76e2a31963aad7286b8d06ab7f8e", "05d0c5f579314e12c35fd35ed0858255d8c48887", "081307db6d8d709af26f49d24041086bb09abfd1", "3cc16be882dff500b8fce3d7cba3d79fdeec4db7", "a97a070e509377a66d0ba3b896c2db096b72971c", "7697295ee6fc817296bed816ac5cae97644c2d5b", "ab98abfbdfd700c27bee31ca1f8850db72120c5d", "1a6d748365dbf3b17f2db371a30469478ee7b142", "3a3a4408432408b62e2dc22de7820a5a2f7bbe9e", "42832bcb36ee3f69327c38d0d17e6e2a73aaa2a6", "ac4c19e52a58aea27593b99f0ebe5316339b9646", "5291304833a3565f8a2b6c13c1f12e6841925a87", "cac571e2e2d37b712055196e87d57a8da742e7cb", "3e78402eab72d87eda1f0b44ca7ff54ba0b6b914", "6eb7ae81554ad4db92ee6b578f47be659c8b9cbd", "f1af714b92372c8e606485a3982eab2f16772ad8", "4354a944535df5e62a2691e2f5c0567c2fc3f322", "b2749caec0094e186d3ee850151c899b8508f47a", "3288e16c62a215254e2ed7c39675482b356c3bef", "6b5438161cfe55d1bd44829db81f396819e9e6b9", "3a37f57a9b94fff82ffea4e77803ebe5ebf6401b", "897aa4aaa474fed41233faec9b70b802aea5fdea", "c82c147c4f13e79ad49ef7456473d86881428b89", "bdf46e52b9cc967628f423b1a69555a1114cc3e3", "28ced5aba250a35d38bfee1c8ea220c479f58ca3", "2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f", "f17d8f14651c123d39e13a39dc79b7eb3659fe68", "1ae15cde8586199f0359d4cceac3b1e14f9243ac", "a729d0243b1e3b055f44248a32b3caf20b7e93be", "19237fd84387378635ae135cc53ae934267addc5", "c54fd2ea5fe45c1377a45c396fd68d3bc00a699e", "c2c7066b485c2cea70320af5a9fcee975e52d872", "935924ddb5992c11f3202bf995183130ad83d07b", "08872d801f134e41753601e85971769b28314ca2", "2b8dfbd7cae8f412c6c943ab48c795514d53c4a7", "857c64060963dd8d28e4740f190d321298ddd503", "0bf1f999a16461a730dd80e3a187d0675c216292", "ab8ecf98f457e29b000c44d49f5bf49ec92e571c", "ebb1c29145d31c4afa3c9be7f023155832776cd3", "8e8c511ebc12a093d3f73a4717ec71c32e4dbd49", "ce1466c5656a1af9078299aae69b46a2cae7146c", "a8f1fc34089c4f2bc618a122be71c25813cae354", "2c4def184f940e2dd4302bdc130999c27054de3e", "1bcb1c6d6cebc9737f9933fcefbf3da8a612f994", "66e2c3d23af8ed76b116121827b9bc5e99cf4acc", "d74e14de664be4b784813d93e260abe379e2602d", "a5c1775ad8ae8795f83052676eb0ce097806560d", "398dbeafe5c96b90a243d408b1280524be5bbab2", "1d5901662dc4fa5be2375f35be07b4116fd450ea", "468bb5344f74842a9a43a7e1a3333ebd394929b4", "26c591cbb35d4d031d13e27a59adccb74bc89bc6", "c1c253a822f984de73f02d6a29c8c7cadc8f090c", "c2148f81ffffeaff3fed49448fa5485f65917865", "1ec98785ac91808455b753d4bc00441d8572c416", "11155ee686bfb675816a2acdf5a8ddf06e67b65f", "70998e14f795d82e367d890aae5476a91472f268", "bc0bfa9cae364816dac01151d96d0335eb8fb177", "a39d318b68a2c262b6351a05f447dfcb0555da88", "3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9", "af278274e4bda66f38fd296cfa5c07804fbc26ee", "d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4", "5799d5c6ee7bca0235a528117f8c5c0ac78da735", "1599718bf756a0fb7157277b93f21cfcad04e383", "f2004fff215a17ac132310882610ddafe25ba153", "352d4905e3a10afcb307be8534e4f4ee0be12551", "c74b188e7527a41eab514d4a5f72660184de3ced", "c767c06bf0374b894c42a9b086c51fbef69de1ec", "0418cdad1a10b58cf1a526b5a65a465f9324d67f", "29fc4de6b680733e9447240b42db13d5832e408f", "4a03f07397c5d32463750facf010c532f45233a5", "438f1841a0b09c96759dc870d663d837d07388e3", "2c848cc514293414d916c0e5931baf1e8583eabc", "e8410c4cd1689829c15bd1f34995eb3bd4321069", "6a5cb60fda310ee2528e9877bc5d2a07cd72505f", "196c12571ab51273f44ea3469d16301d5b8d2828", "600075a1009b8692480726c9cff5246484a22ec8", "3180192694594f345f6fc5bed5a473762dfec522", "bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab", "f152b6ee251cca940dd853c54e6a7b78fbc6b235", "4053e3423fb70ad9140ca89351df49675197196a", "250c8de6e884b5447d0b63d28855eb5078562197", "c4a5932f33e6f4ccbfc7218fac58350a530d0ad6", "0db787317ba0d63ec8f9918905e7db181a489026", "dd8bc239c1c5a142ded241cebf6a0b7a0efbdee2", "bf015fef90f6bbeb918830e45e5a5f7646c8d4c8", "8a2bd5dbcf0ab0130dfb97e2a035e5722aa9319e", "006350ae14784bb929b6a749d4e5c265a10168b7", "a0b2df8f72ff672cb0760c5221657a5f48f0ec5d", "174c8c8a392f4c37df712fe890e5c81cb8e60604", "00bd394ad1373372ea21297789bcbfe1693885f5", "0e25527a7df08c8cda5e86c7a255806289b0ff64", "e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa", "4ae3cdba121dec886a84eff146e438a55513002c", "19f7654f22416e6fdf430c1c873ad3e8c15e64f8", "8e34eb3f9f9bd7ba44f46b7e622ed083497768d0", "928ccc8c4ae415202d187a229009dd48e57871ba", "da50fc942b1edc535ca69f1b922e856620aacebc", "b8a53daa97fb917a89c351c47f0b197573e20023", "3dbfd2fdbd28e4518e2ae05de8374057307e97b3", "9bdb40f15d56975436d9223da4e9ef5416591a52", "f04c83d778ff4cc7c5770487f08de6b3c2271fbd", "68c279d4fcc02710056e73a3b0d0d564a7615cad", "ede1f00c2ac27ac90dbeb0df1840ac757447af34", "9c6d92f3d796242332ebf419a4f9b584864cfa15", "1ed6a05a226cb0d09afd76ff9b7560c404d8eb49", "23a8d02389805854cf41c9e5fa56c66ee4160ce3", "22ffcf96be0e252397962f51401e6cc70ed27fbc", "b259c82853a563c1f7725d37aa74cc4a6b6d5e3c", "288bddfabe739b32721df62d821632e3dafed06a", "ce588a19ac2390e22839b26de12f76fe215a7035", "6c66ae815e7e508e852ecb122fb796abbcda16a8", "30722904751c2e1cf287f268befdec2e4223b086", "66af0fb424e4bc07cc28e08c7bf3a8b70c094d60", "e6d9d3a2f1560e507a24b8cfe3d2f4369c79e0f6", "3acdccd33e518f22dcfe36ee29c332a644afdb25", "c5c4503a331b6fc09e01e66280a531bb9db0290d", "0875af310ab8c850b3232b3f6b84535ffff84e5d", "0d7ddcf97b1341d8d4bbc4718f4ca3094e994a1f", "976e6f7612df76306d920381e4d9cf2927dcd430", "af1a6c35f5d75122756d37faed062d5b5cd6bc71", "98960be5ae51d30118f091f7091299a49f2f34bb", "4e71e03d4122aad182ad51ab187d4b55b41fc957", "74671fd8dd510db4abdcb93864fb5d5f77c878a0", "93a93ee535980ee30e3a5e473a37d89ecb20c4a7", "e8baf6ddd2e651350b843fedfe58f761848d3524", "fc43ee52274f25725535bf7f6184618779125987", "a02f070080d4bd0fcef8b3234ca6b8ee7c97fb50", "37fe5b28d4531c93668d4a56d2e3411c2c5978b0", "26d04923efccaec3a2c4435ab760c84a46f458cc", "4eb0b82b294f601510cd965adcf0e8c386cbaf22", "aa6854612062edff9978b33e0a410f2717bc3027", "5725c06b406b5291915a6bef8b5c3d20b2873aa0", "7eb145b301a78d6006cb2c560f9368553101659c", "9e384187941e939453fc0c7585c1a8e76d535c02", "1b8508c6e341dcc803e52ed02968ae944c744f68", "24b6d839662e5d56f17fc26eab4d2901f6835ddf", "92b748f2629b3227a9c56bc9e580f45eb5bdfba5", "34669c5ebc30be367f231cb2df705e347c34f07a", "4272734bf16a87f22ea4ccc404d955ddfa745fd9", "bdf7298687fc1eab1589d32b0639426abf06da21", "0d14261e69a4ad4140ce17c1d1cea76af6546056", "5f19b98e5cd22198d25660d609cbd3f4a69c94e7", "fbbccf0454c84bea1fd5c5a1dcd9fd7bba301a44", "d802ed7d8b7aea71a10bd0d700fd11fde5729993", "a4e47b6cbadfe5085c0a83f39513bda0ed3e9a92", "3377c068d661cdc35ebc8ef54dd69a41ce3cd55b", "de04e7b854a27d514635e51221a233a3c27feaa2", "1dae68c188c78080e06f4633647d6d77f30a0b3a", "2ee04d30e68e327ab84a9444be36758e8fd86b82", "079a0b0db150b6bed24e06e3ad00a73c9fbe00a8", "de7a148970881cbd4e6a12b6a014e3dfeee98cc9", "2b2f98518464a27c400b52f8c0ae4456dcc314a6", "3868c75855df640a73b1fcdfa5df1bb92b878099", "3953b89a9001c8816c3c56b778e28b246524786c", "b7d425ea6b476c4af208a6b6a9e84ab17921dab4", "1903276bb462d3ccd4f1fac3a8e34a53045ef8a1", "78fdf2b98cf6380623b0e20b0005a452e736181e", "60ea05df719973ac4d9d70d3141e671131a55db5", "81f3306d1115004a4a54a7e411715014e0631821", "70c54f9bb5ba03821dba15225eefeb14a75e913f", "e813691808adcd7a0151fea51313dce9954de8a6", "446f114979c3026541c25883216925d9c12f9c1e", "aca232de87c4c61537c730ee59a8f7ebf5ecb14f", "c65b71ebb22745504f4374c4caa4939855544e7d", "6b47b1c5a628ddb939d0088b36753ca29b3f9b76", "c813413fc84be33d7c4ccdd4a1f025ccc73a77bd", "4fdeb5d59b218ecba0f72dc3c42f38a086417c0f", "b8aef59bac4035013bcdaa9b56d665fc8b4e187d", "cf09e2cb82961128302b99a34bff91ec7d198c7c", "d6b1e14d211145bbc083b230d1724826de430fb7", "f5be2fae5ba5e7d3550565867e6b79abef8e3f0a", "25cb142df18df30c416f5f41170d6b8c7ab2b957", "e2f7a7b3cf0681aea61a99e5289109c2d7f445b2", "9a9019972dece591f502a2f794e81648b9e064fe", "54bac87151febb2e9eecf237d6498f8ed8ac3b1e", "0c5b2a03df2ccd6f236c631f073b6448381a429e", "8b82af24bf580cbd22467ab69c588da87956a7f3", "8459692ecc49cc87311ad97de85576e383e36490", "53dbc015e9fb7bd0ecd54c4b6f238467edcff5e3", "7031d7fde9f184b72416759f8a9be4155616f456", "ce8efafd57a849e84d3f694bf12a21ce1a28029d", "047cd38ebf2ce7eeb885f654ed64d405a0421fab", "247df1d4fca00bc68e64af338b84baaecc34690b", "90ae02da16b750a9fd43f8a38440f848309c2fe0", "4c3a0d87f7aefcf3f420c86e8dc377edbbcc25bf", "5ad4e9f947c1653c247d418f05dad758a3f9277b", "ea533fac61db537fe1e1f351c98ae28db7272705", "992655a7eaa846cdf755bb1be93693d7b6fe9094", "b7cff43f653279a65e23a7a85c48b12a484148ef", "423e8cc1a7501066b7e0e5bb1beb5b9592337023", "1414d4880e368414cbbbbd215e8b0471f185aa03", "fbd5c9bbfb43aa4734cde7863897600fd42eb8ff", "6c3c845fe484bdb2b3549054644c7a06bd9b87b8", "a643302a89805bb8d3d204660a3a60420fee36e2", "fd4c46bfd3bb00ed93b0bb5b28ef0336f59f0c15", "729d23a3a439927f1917cb93c9c20b5c426065b3", "ee87aa52d9642607d86f011c0d7326c4bdc63121", "94106ca511a60fb4fa8402fef4bf22b9ebef83e9", "2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3", "2b4d40ef1610500c207f166e9a5b55dbfe234045", "13e348264fe1077caa44e1b59c71e67a8e4b5ad9", "764193fc925e817a3afe5930e1399268c9eaa136", "92a044df6c37571aac25756252dda27676492bb5", "c8d15a58794e4b383424d2d057a518689a278b8d", "163ba5a998973f9ead6be0ca873aed5934d5022e", "9cc91de928faac3fe397b2a93e4dc2cc7ed025bf", "4233746214b73039bd80d9650d53651ca24440e9", "0bc6c3ee31d35eecf505bc8eabb98d553b351ba2", "b5e4b4cf5178b06e0bb5fd016b8ff5f609eddc8e", "bc9003ad368cb79d8a8ac2ad025718da5ea36bc4", "06dee5ff4b41eadf5db5c6841d3441d388f08117", "35883d90bbb84a598c85ef8a324f653da7b5a0fc", "dc6263270cd23a51d8fffdfd7e408250442b40f3", "184750382fe9b722e78d22a543e852a6290b3f70", "9727c74a09aad74abd67ff1d2dff083cc73d4a2e", "7182bb21af9ed0ac8f34f2ab305fca563301f433", "2452dfb2c5a4578ac9497cc4dc3c6d5d03997210", "ae33dc04adcb83a486517c48078cdd4af7dcc7c7", "1015ac3d9d9b93227e59328160de09c8bc2c6d2c", "007e269f105b44042491537d5d7c35cf5b553feb", "83a65b371a2095e9fb7c3908dd5fef558cf99ce3", "116cc9e15e17dba817ac020732fbec30026e5f6b", "8fba84af61ac9b5e2bcb69b6730a597d7521ad73", "ce4853f2214ee1f4c47a97ff45d4e53f6ffd5087", "36bc31322b95425a8ae925b458a8bc6c6f34bcf5", "74c76a7ddf2a6c76744662dab88e20d329646a60", "299c1e335fb6c6b0cfa4a733f66655dd35bbdaa8", "107a293f2d8b104d70d7530e27d5e19566be968a", "cc353489ceaba1f58bd44f54316bc8319eba5fb9", "5633425e52156c8d5824d033103b237b1e579539", "9ef73533507b46278d0d27c41e16af2b8ecf23ef", "7a8ba1a6c90b56ae0a98fe43d015ab0f2a73912e", "01c9f0be6a300f385274b72a5463a650e51e300a", "3a0673199699cd51abe0f104ebe080f63d1b6d37", "64602e95d49e8acdbca0a7445cade7a4b3476875", "ced73382d686dee6232c313f014bc21ca7536db0", "419279b5d21234737b10715fd785eeb51b317767", "68eb648a6e18ac15ee31bcabbce64c45db619639", "bcb79e3ac69508060c8cba105f6a8622eb929ab1", "7ac6e6a4a7be438bc6aa4626d4beac780b875999", "caac13b7814e65161a3e9240e536722af0edac06", "41a174c27f0b431d62d0f50051bce7f5b3b4ce64", "590065c40574dc797e5aeb380d6e6dab79fad6e5", "de8381903c579a4fed609dff3e52a1dc51154951", "0c990e779067c563a79ae17c9d36094a745d7ed8", "c69de80ac804ce2bf7e47974ee984fd8a745a5d7", "28c24f16e20c83c747f2aca8232f2cb6614905f5", "4335805938a35a47cf86c985e993f73060405679", "6e44ddb54edbb80d5bb8f2ca3b36e40c486b9daf", "6856a11b98ffffeff6e2f991d3d1a1232c029ea1", "fc369a73eea045497f82634e6ea0c13477728f2e", "69da91e45d74db80e8eb436db31d384f5322c1b6", "df6aee6739132a91db5b66fa4956c356130a0d16", "5c3428d5c47e613fb394f2ffbd9668aa82e9e035", "591094c5428c6010f87ecb93c89976a54cd63bbd", "9fb964ecd41f50b40a3d347136df97b4b7f56b74", "0bcd89b356dc78aaf3573086f13e94b8e7b5bee6", "263fd526b86c037be6b17fc192a8cbc0768f9c37", "1ecf4055831ca23c9f6026ef866dac95c8b8f9de", "916318d7fb755b13de566a8a21625d47541d0d3d", "1d1e78bb93590a86ecfd2516f4e5789cc05d76f5", "f688a3eb3a8fb068ed916cfdd9fc71ee2ec453f3", "383a9bf5fc7ce9ca9e454a7e044ff0af4495b8da", "8d09e7c3c6b714574b2a4a7993ac94beb9d4f50d", "d628aabf1a666a875e77c3d3fee857cd25891947", "3002b5180c4b4fbf9c07145b5b435846c729c724", "94347c0f73c31a9fdf04e9d581cfc47ee94e9ae3", "2804e97b5c9dbaf4cb057c14478600cb2f9984de", "995b2868326837cde96e01390f87b2dee6239bdb", "34d8287c2c84b30ef056c0a07f13404ca5ec9471", "8af0ed43fb2f69730b8af570ea3852972174f1d4", "2a2b99fc9583419931681acfd83ac953a3df3270", "1b2e50412ec151486912f0bfd01703c8ec46b5a7", "59448d96cff9825d86e50d931fbb576e008efbe7", "c136e338606acb0e3a0752a75cf1cef7db5de0a6", "dbc3ab8c9f564f038e7779b87900c4a0426f3dd1", "734cdda4a4de2a635404e4c6b61f1b2edb3f501d", "71f07c95a2b039cc21854c602f29e5be053f2aba", "18c515d42666c95079f9a98eab59ac1cdfb10859", "1c26e415c7eae2f3b0f49e0519f0d985ec661c63", "0396dd7b16612bd39a9c8058bb7a91d9fcde6130", "c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d", "8f3d5f9ad240c186971edc652f8385dc2a53d2eb", "51cf3fa26b7c31c10427317fb5d72a6712023279", "1207ed9a9d8784427417c3efe788d2d76a7dc216", "543c601f8ebc0995040f4b8de4a339fd4c860cbb", "ed717bd09e8344c2cfa81ceedfb8baf2105708f5", "0b1cf351a4a6758606bea32d29c7d529e79ab7ce", "537fb9d35e56be9436b42a9e5e3405523c2f1e0e", "f3262127af7729ff295b863a217e2395b1c005a2", "695f45aad44683979d14a8be88a0f6ae1fa3974c", "06850b60e33baa4ea9473811d58c0d5015da079e", "ac6b280f2a43516fbaf92073304002f9f4da0188", "940865fc3f7ee5b386c4188c231eb6590db874e9", "df7e3f5cb90230f6bed1473c4984f336b56615c8", "a3334cdf92d1157dd691d88189e793d18164cd7b", "f96dad96192fd1c81e35fc3bf7bdb75a6f6f96d0", "19997d39447e570c7030a214eb4d81e3669ffd1f", "cc5a62bd7c45a9ca479506acb572566331354fa3", "130bf256f4cc3dded4fb701f74f6a34992be639b", "8e69534ae2f00025226c3a46dc6efb4faa3d396a", "dce54bd12826fd86de660e8bb1ac7ba19198aa98", "95616c511e1eada5c4fba090fe739a4554711e22", "2d331bdeaa0962eb210270ddb951525d47df9d6e", "dbb065aa2a6e6804e0ab8aee27314a6f68c4cde1", "804b4d57dace42dc8ac4d594db9ad93de33c8d6a", "91ee88754cc7a193d51656a3b53e16389bf4aadb", "233a21100a2728ed7fb9109495cd640ed76011ba", "c7774fd600630684cc1d6be8313e2935bb198880", "a62bcfa204fca20acc7b90aaac01b55d315fc971", "72cc1aeb77bee20bc2b98e6d2a7d754a512a7a94", "da11564599083db6e423e99d6b3ac1fd34771b9a", "2ede15b9454ea7b5593aed9624e14367d59f176f", "794a51097385648e3909a1acae7188f5ab881710", "b514e89249042899eafc890bf9815d2507b13d7e", "026ca771bd3995748b477e100ed4283a9bf8215a", "3f0549b74ede40707e36fbd67f4a32a38569ad9c", "7a334cd0be8600f92290c082113eb7f65d6b2f27", "4c8c89670a55e65ad9b92327d3386b5701dddabb", "aa9738c45df4c71ab2cc98384b4afb0fac9499de", "c54f9f33382f9f656ec0e97d3004df614ec56434", "232d99697a18c3065f2ba7c5f2d93d87731690f5", "6f1735c4222cb5d73ef70506aaae56e2ed496593", "fdf5ef8d3274ec3bf3d0bbb210a13c419d9ce9ff", "d7d472adfc0c36b964081b4fa8add4c86de695ff", "23a8e37363ca40515c7d66ec33cfce3c8c1c7909", "7c622df16f06d9f1c1af7262e91c54906e1b7e0e", "9a88d23234ee41965ac17fc5774348563448a94d", "61de0a35431a26a2f6a38e6f27f4ed3499e4ee95", "6c8c7065d1041146a3604cbe15c6207f486021ba", "df9c3dae5b14e739b1507e2e9f9158f933a3d246", "6f37ba7b7a5e13760e1b5ca6de085118b3fcb670", "ab368172c8acc87ec1dc87d1ad607546b2ea8f6a", "71406b7358812400d0626e8d62e7eb38cea99bbe", "f0aac566e3d2c06759b8f4f45a270d5af93b9705", "245d98726674297208e76308c3a11ce3fc43bee2", "ded2eaddaf214e63aae6be34f4f319df0a10c13e", "f66e2d403774d3fe49e8c37f8acecb4e43d1675d", "9ea337ffdf652803c805074d61b2d6a8d7040e95", "e0c081a007435e0c64e208e9918ca727e2c1c44e", "9b5db4e1613cd86e6ade687bed5b01cc9f66811d", "01ababc0985143ad57320b0599fb2f581d79d3c2", "afa004a8daaa7fc093a798bf97babdb00273e1a0", "afe9cfba90d4b1dbd7db1cf60faf91f24d12b286", "7688187b1ce5cbb1413d075f435ff294ba09cadc", "e121bf6f18e1cb114216a521df63c55030d10fbe", "1a2431e3b35a4a4794dc38ef16e9eec2996114a1", "5c271b5f96cfce1b4fdacc728ae8f8ebcbc738f9", "513d37674c661a72f2eac8a79c2f03139d6c020a", "48a7c9f9f810b5b5befe7675e8c7ffe40cf473ff", "0e22687fe92f765df06495df1462fc632ea240e4", "40536b0cc73fda29a335c6ecf9ce891dcb6d04cd", "e8686663aec64f4414eba6a0f821ab9eb9f93e38", "43301d1b4a66471512abd08fa2299bb99a7d24f8", "b0c601a43384524d85ff4aa0bce65b2091133b47", "96723b42451c42ec396381596490143aac8f85cd", "ed9967868fcca2ec38402d2bb3e6946b8e554472", "b6bf9d357f280ba8bb8338b2448f0f90773f5c57", "a120cac99c85548d0749dd83b0450520949e6474", "4a06ae9d41b384d6b1954b42a63385310b5d43fc", "adf423f2a76301e34aed59d4e6d6f5378dcdadb4", "2ec6518420c6f5f4e0a9329efa55acd896a92a0c", "161eb88031f382e6a1d630cd9a1b9c4bc6b47652", "354ddc8976a762ee03fb78b73adc3b5312e5f2a5", "6a211543f138d899c1fd98e0ac491ec9689ac167", "372c054ed554182b5f2a142467ee14aae5a462fd", "603ecf880ad770b566c4ffa49ffeb06340375194", "bc27d28a7f694025deb9f46432e751ce10ae194d", "b70e48161138ee8dc60354b3e76d923170002cb8", "ea2ee5c53747878f30f6d9c576fd09d388ab0e2b", "17f10af27762b443e4a857b9ffd11aab68524682", "0efb7d1413ada560ab1aee1ea4cc94d80737e662", "81d5c4b49fe17aaa3af837745cafdedb066a067d", "8233c1d79ddad9d969b995d4ef2c6f8ea9acc646", "bf3d0e41e4d0a2ef6dbdd3018e3c7f728b5efceb", "dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57", "4abaf4673d3745d312ed7e62770e6d264acee27a", "2fd9ecb40df6c7cd4f27c047223a1e45aae1bb95", "24ec4cd704d07865ce31fe539d00cd2597b5dfc9", "c28461e266fe0f03c0f9a9525a266aa3050229f0", "8edb2219370a86c4277549813d36a6c139503fb4", "1ebc39c63a722710038c2701cdf681c0c10169a8", "f218df397afb1f070ee093bb9a19616f61b562c4", "1339188247e3b8fd102b37501eb93cbeab71b870", "57d33c0f8d6998d665a7ec6672a56cf8e7729c14", "83968f81f23a34e18e850fe2cf68bab51e22e35c", "a11600deb182677f4fe586fcea59f10d032a6c6f", "9377376b366dc6b7c1ec03db7ffe611056263202", "3c8938fc1866121546f9cff0c3aa03a26fa7db6c", "bfd8bfce7c998a7bf209b7bf2e6c2e1f03c4334e", "552c55c71bccfc6de7ce1343a1cd12208e9a63b3", "c9311a0c5045d86a617bd05a5cc269f44e81508d", "4a227881f5763d2bda2e545eac346389b2b2017a", "22086b3c772ba638e7d50b10bcf544abd93c9305", "f2977284cc3c6653df957d886101cc485de1a9f9", "948317b46a24ba11bb0220875fe7a9949f3d939c", "c0c80aeccb1628926738ea8f09d238061a8daa29", "0133e8ba2d7e51c363c954f455ea7c4bae2be410", "10bbdbf86b3dd9a60f9be01401e0585250c97477", "2c12b2bd93f9ac8efc5c94e46bfa7a3cd0461052", "50da9965104d944a8ae648c9aaec43be8ea1c501", "86056abdd2f2dabfd733227196d633166793f7dd", "a149dd3fc6c7087c414f9655d92693ce9d3df6ec", "bf4073f334902cd76608655b2d54a624ccdbface", "9a601fd18aea081d28408d133140ffb1f6dfcda6", "05d4abd35ecdb5863e04ebb0011f525ba3ef0092", "5b693cb3bedaa2f1e84161a4261df9b3f8e77353", "4d9a6c4b1f7797962bb2554cf4bb869c7ea57a0a", "590a52702bdf7f9522cff02f477de1fa98fc2ff3", "17d212e81f1e9ab707a90a010afedc1d85678a26", "5047cae1b6f47ac1715479abfa3daf1c1a063977", "66849ba55446a5a7c5d057091a82a71d38c1ca50", "d4453ec649dbde752e74da8ab0984c6f15cc6e06", "afef2b1d35fb807f422cfec0a370f7d08d4651d1", "998e829cc72080c88a780f322d6bf7ab78dbd743", "6ee8a94ccba10062172e5b31ee097c846821a822", "5eae1a3e0dfd0834be6a003b979bf5b3dc923453", "6e318d89e8e3ea0ac40be9d06e6acfc624b30f76", "7fc5ab3743e6e9a2f4fe70152440e13a673e239b", "524890eef6beaeb2e206c7b1bf51b58298eb55ec", "af9a830f62478c3638880d9a870f0b10535b3f92", "bbf5575f0d20b79b61c8c0d8b7c2a57224c359de", "abb74644e2bb1d1e8610e9782a6050192c3ceddf", "7b0e81249159686337ca2cfe81662123906b6b26", "41e1084e74564ced3e1fa845250162d6d0f2b9c3", "474cefe3f67293f23cbec7b4b31e1c40d7c36352", "237ec7e6d20025c32069e41f8007bb97931a7fc6", "80f281f4ac7e06d1741b91279de5b12a7a167e30", "b5877c53c4bbb8b4090393c7c3f1b5ca34655cfa", "be9f60ec249bc57ee9d337060fb20bdb8a0729e7", "d827c72d6c9e35066b40bd205bbd71ce487a1c39", "65b1760d9b1541241c6c0222cc4ee9df078b593a", "2f3884bc41c15fc9771a298c25ce45b0b5596edd", "5f593354fec6d6ab770a3e000684b9280cef5bbc", "482769e4c4cf832128b52f1bdff873af1eee8ba8", "baf0af0ac2f2fbbf0c04141e12886ff850d77413", "a3dc109b1dff3846f5a2cc1fe2448230a76ad83f", "a9f7ab254a8c73a51f0eba5a8e13b48924b542c0", "fe005c5036ad646051cc779aafb63534bda14f06", "157d2c6dd8c9999b251099ef4211cff8030ae486", "03161081b47eba967fd3e663c57ec2f99f66eebd", "cbf69bc3e4c9b7d8cd33be81686d45f6a5f2d544", "b8ae7c6ff8d34cc639aba63b0f4b094720482280", "77cc3e55ff5e18eecc29f2fad1ced236ce9b0689", "e5d13afe956d8581a69e9dc2d1f43a43f1e2f311", "1c38a00dcb7e5e801f321eb5704eb06aaf031028", "0665853ee87112bc27a9aaec70672f521b91d38e", "456ccc8bbb538037ff00fabf25afb2aceb39149e", "9fb1d7cbf1baf5f347d159410d22912fcee1fdb1", "af54dd5da722e104740f9b6f261df9d4688a9712", "043e7a08398b1d634fa2bf3ddb81942686effb30", "3136cab00cfb223ceb9aff78af2c165b6e71a878", "3b38dc6d4f676ace52672f6788b66c9abb10d702", "2db41f32a41a090fa4ab1a230a27749758060136", "d3612bcc772761b611365fe21c42eafb181338ef", "9fea3ff012d5ee474a7f41956bc4a4c5b67f09e0", "3b44f90cb302541793407994378698d5509a9c15", "743679e51b879b01e9cd30172e63b0680977430b", "d088be037869cc814bdb8ecfbd9b7bbd51a5b056", "4689e75bca5a6eb1e3e1d6bcbd78d67ee39bb378", "e3f63d12be07c743e7590957f4ed38b06cd98aba", "f5af4e9086b0c3aee942cb93ece5820bdc9c9748", "125d82fee1b9fbcc616622b0977f3d06771fc152", "101c7d54194c02600865d7b0c638e6bfd428788c", "5fdd81fd5e4caa852b6be3e6bf7891578248d662", "d5b73e7b5f2a72404a6c80c9158ee1d109cda7e3", "00ae6ce99eb9ccefd8409e4ef5e3bbb5248821d6", "87c2806f1fd20287f00b43dab07822ab13035169", "734d6049fe08d0a24f6aa70bf0d81c217dfca570", "e686e9a642880662e56558b13d3d32f051d549b3", "81dbc36c38b820dff88bcca177bb644f55a4926f", "c018f62420913102fec2126f140a09dfb90893f4", "ac206a97e981df4514dcae28442beaea31845f35", "c0014e048a5d15ddfeffa075a1b819bcb93dd351", "6a6280189ead63b2eec733b8e8ac507e830928fd", "73da66ea59da581c31ff9dd5f7d8243356360eb9", "52144c6d20ddea70e59514c2aa9ec7dc801e5c5e", "4a3b28e5ad2ae2c2f17d681f6177da212e51ca32", "1fcd7978c6956fd9a0d752ecc9f5ac1a1b2896e9", "72dd9ecc3a1f32d53b4aeb03ea3db14236fbcb27", "4ecb93ffa3c3b195664ee6d627fd6e46f6798e73", "49d7fd8975413fb2912e111093749733712210dd", "9c16a974756ca43b2bf628358ddb08210857f464", "abc4d51d510cd8222484f7f4f11a739e8bce42ff", "7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83", "77cb6ea4feff6f44e9977cc7572185d24e48ce40", "33d631ee947ef0cb184ab6b172213abea9f540db", "81b6de17391f44c07b2efe75a529aa200604ee48", "091a4637735c1a86dfb94432d01db0f11519ba5f", "0679d05c11c8cd54a597fea870a23b3556c07e1a", "6bda5819d9bc2e174902d839a12127a57fdb43f7", "f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1", "c84b2cda2d645475f25d8b8f34b8f21ad3aa059c", "3c9f2444b1de1bf960664d8c3109f8b8d5dee44b", "d9739d1b4478b0bf379fe755b3ce5abd8c668f89", "80fcc4e66906d04e14f5ebc68b0a17d4e5ff0194", "bbb73e73d79a6fd994c55bef96eb0d293c90c13e", "771b7d76df1ed476dea859034a276f14ad1e49f1", "27c978bdb9de3a5135349976fdbc514ff547dcab", "695381eca0493f00842d6ae91ac7e41f53af4a64", "b7ae23d8309eb217e2d83012c614140bf7e9a512", "fec6648b4154fc7e0892c74f98898f0b51036dfe", "f121154f0a7625fbb1613bd4cc2e705f9de8fd0c", "9a9702372e243c5330d6e61d345d9e6f14e0148f", "6dbe8e5121c534339d6e41f8683e85f87e6abf81", "139bb2a4034a0498934185e8c6d515d8f9330e2a", "135fcdab631ab30ae837a743040f1c8751268e41", "dac8fc521dfafb2d082faa4697f491eae00472c7", "c6c4371bdc0214d66703deaa5e1e464d18cd641d", "8fa9cb5dac394e30e4089bf5f4ffecc873d1da96", "bf5940d57f97ed20c50278a81e901ae4656f0f2c", "6dd0597f8513dc100cd0bc1b493768cde45098a9", "0a7d081df9da6562a0c5459cb561f5bea0399f58", "d055f36e7975fa5b7785575dd64b5f95b9088465", "8a65a86ca07dba867b6435819239f96a6d825bf7", "fc2be7dbdd8f5cbcb6fdf5d18241ddd630d03864", "0dfb47e206c762d2f4caeb99fd9019ade78c2c98", "1792f5304fc9999a2981e45874f0d6ae3588b4bb", "575134b3d99ffde52f238b82b74d6ca3fc5ba08d", "e1d1ed79174cd8442409bcb3f296101852ddcb95", "045b45adbcb83a34d087c917b79274858a878937", "26c9e57116061594ef843141a6a8bc49759f766c", "782fb036d3bedcaa068610d17a365bdf6c5b1378", "d3cc9c370be5f28ce2eb06312df64a8a5a40feb5", "2fe2ea6e0bd939b3c2877d1fa6444b81d9940c35", "95debf4c4f88d48a71bae9bfea4032355805aa2f", "2251a1efad0cef802fd64fc79cc1b7007b64f425", "f1278b44acc73b41c2993574392047f8d10e997f", "397fffa6f785762acb3cd3c96c4c6b65058b816f", "abc5ee7fc8129c82fde8d151408042e4673762f6", "32510e7f88bc0767fbbc811397ba068dbc4cf549", "2954deae38c40a244f6a9c0714987d786c69db7c", "afb3bc6854003c7cc9e94cb16d62ef353b5a6569", "4e9dd2f7982dc71db5505dba7d7264d263dd93d6", "eea931e63c523599ba75524938a0be9ea36e9c2b", "cc3ef62b4a7eb6c4e45302deb89df2e547b6efcc", "eb0e0a40372db32d30ceaefad046b213fac977f4", "b3f96035651100b361e8c6d13e1f2022b406e196", "3d0eaa64bd321cfbee9d0426ead5e41324707ecf", "e312e7657cb98cf03d3b2bf8b21b0ff75fbd4613", "d2d5d61dfdae1c6492d15eae5f0f37f460ba4030", "45a6add58dcb5587f607b8eedd92078560c313c5", "1a5340212809bbbce6e0d61720209179dcaa8a26", "c660f261615f4a0185fda548b0ffb0e997a918ea", "7b1af8cc9c2c43fa9d528bcfb05142d714df3700", "0141c695e4cf87cc58e0d552004bcb53258c4915", "2ac986ec18c3572ee4f922ba9a90ae374563491c", "672982134f8a2c6143ad3179354f39ecc3ff798d", "7797e267972c98ba1fe38c019b689f8b77d5dc88", "c2672bba03b6a745ddaf5bf28536f7359d8fe2f0", "eca0d102a0e5d48ac19f1c2e99c7169b29b0497d", "77d51bea8f2511d2b039a6da3f5319857d6c7ee9", "5edfa28559c054b23acc43ce0f975a04ae27b331", "a3ab3385a634dc6022dca37e65494081b908f41f", "781e54b5ac35e21f30f5d1915103d7682b8d18fb", "1712178708eb74770af9fcce854dfe21af9d56fc", "f4557028562003c13eeca41b175dd4f4a03659bd", "d984b580e02da76cd4d991953e6d430fadf3d578", "267417efe1bf98ae904ad3970cb5742ec705082a", "58d821874f48c3ce748b523f7defdfb72a608392", "0496d10bcdd29395846d05c2de711db62be10630", "4f606761ce65399ef4ff24cd503ec09cf53562e9", "8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3", "272ac22c670fd0c7c3f1b4ca02e925ff22dd4b27", "233913367b5006277b04a8f7651f51425f13697e", "cf54d15a176ac0d8e30eb0af2fdbb3a9908064f8", "c3a20a5b67e478529a020dd1749fc52a2812f45b", "91ab27348b02d1a241f9f4778695d1bc26f55abe", "669b9fd79eb39f712527ee616e35e50eea7fd2fa", "1e2cfa23aa2a9981bdc7f8f007121de541c387a7", "a6b2c5c527557cc86ae2ce4332b18a7850ee4e1e", "53eeb83d2c8085d5457b364354525730805b4332", "5806ff24d62e868b73312f704e7ad8d74eecfbc0", "bf6a7250119f5c09c1a34f4d56f50399b061f3c8", "b7291fe7fb5480059bf56bf71c5bd54600080752", "7fc2979d8efa6cf5af0c66ca2556a83d434690d0", "16aec3ee9a97162b85b1d51c3c5ce73a472e74b8", "4bd09fc152d680dbd3027ca14deea290f7bdec36", "01e5eb25e262afa4289d39b964c837a22a32f5a2", "e48432872be1e0449f50c6807b274d57c87a641f", "3bb724ee496100e12087ced6564198d63d843259", "35b9ded80ce2b30ee115b8198d146890b9028d51", "c7e7f6dd94576b7072b805d6b2db0529c31a78fb", "86131422d485a09b12f40de30457683296f1d1c0", "ad5d2146a629b786712eb21d4dbfa31394ca07b4", "9b9f3ec91e8ba185b0c7fd7545b0721e0cba9ba7", "31e1d021bd06054bbfcd915794e84448ae681000", "235f8e797bc10561ecd684023d2c980d990ea217", "f37655c1a367e68fee926feb66b29af5be58a45a", "16283efecc7332e363c9419d7129bbd5d95cbf4d", "498b7c42b8f9add7c83920c49a4e9d2fef3a514a", "31f1c4cf34ce0bb35382c35b2f468cf72bffae0b", "997fea9df7057cd342299e90c1c6e6e9f1cc5a88", "ea481ceaf3ad8bef871a9efdddb27c345e0c3b4e", "36d76954bcb4f381f3590598d5f00bb842ffddf7", "6ebfec00388b6975c8c38aed1ebe006eae79bcfe", "15ec1faddbd61a9d50925c7b9b0c76642abe94e7", "d95acbbb63eedefd16f6c76c33ddf598348d039c", "ae85c822c6aec8b0f67762c625a73a5d08f5060d", "61847a342471d9482129bc2d6e0c79089f331040", "5864688d1af1c139c567f323b5056b2ded994b3e", "148a5fa66480afa7744409cde659f79c7c9b3fdc", "251281d9cbd207038efbde0515f4077541967239", "6e69de19576ea2dfa4cb84a450ce18eccd183a95", "2534997443c7e183c9f8e370ea1e82989ecc940d", "aea6ee66bb58bed475a7b4f8266210e3f49be3c5", "4949241e61243b8998835a48548a45473f41c27d", "a71e3cf566de457336aab9dd6a5f5d6282b4a6af", "8476f4121e495a99316d634c32e4c82aa3f8dbb8", "d675a54dd5e353f99a1bec3b2ddab925a6563653", "4b1d23d17476fcf78f4cbadf69fb130b1aa627c0", "2cef1515ac1d98f1dcfdc9b84c1a3b6f3758b9d1", "413e3b68cb2cf3f7ed51b95552c0b5b4bb762451", "e41e1e4d9e578c29bf648e7098c466935b50f1a9", "f08f909eb07b3fe986510901e1234436c584fbb4", "5f7fd05f09dd6433cb273a1d33bdf75873509983", "b0158b26f01d5fa18aac51ece055cad9a12f6d87", "90e55d06f0c0234712bb133df05a24ccfe7fc87c", "a3f1db123ce1818971a57330d82901683d7c2b67", "954e5ecd158a38c03a38232dbdbae37dff6dda66", "6f37b6da734c2a13d2860bc5a23e809148b39bd7", "0edafa576c8c89035db8ad24a8a1af5d457b746b", "199aabb19ea78576a74d573739a7f35cf04fac6e", "6049a381a0b79a90344306a885f118b47936ac9f", "6fa39c0221c8bcae9146d31646cd9f70aba7190c", "c0d10e4424a1b5e27a84874d140e922a829ea9a7", "1e944bd5a3907546d633691b8c83fec77d880657", "6c0f9acd62ca9f156ca632dad6d666209eae461e", "b75d4b08a5e4fd33e6a29c2e8fa64b6954a95165", "1e768d4f8114969070dc2b8864c44ccbf7ca042f", "b017f588d281bce0a92043ea47ed4cec5db3ff33", "b33fb5158af1671d65bd927595c56c52e24e4f11", "a63104ad235f98bc5ee0b44fefbcdb49e32c205a", "0ca35af582b95fbab3829f98308d104359c3b632", "6d741691b7164b636678340dbb5823e437e1c5a9", "360f8874e42894af71ede97cd153853e09238350", "4803b306597de49bbc8bf1a1d191063c2d2c9ebe", "07d49098ada2d8e1ca0608c70e559dd517ca3432", "5cebc83001ea0737cc46360850fd294327c82013", "d5c8e530685c6496ca9513790416b2ed61e5f6b6", "a9ae68734f2a8116917f75a02dc9c1f432b6c8eb", "943dabe57f65d9b7af589a4e4db6354db89ed414", "1a7243913d9b8c6855b1eb3bb6566f2f1041d50a", "9d7fa2e1a7c0a821477ceb4d4eff75eeb6574625", "8455f7e992596fbb39212897a9421c9005949f76", "f0ae665f5b4a9314c77dc9ec285a335ee6ecc15b", "0c4d99f49654fe04a8e229a20a6e0e0f0d81337b", "99273db59af7927c5f4bd0df2e424ed7ab655857", "a8fa12c662447903fbb751eaa967f861ea33abff", "2340a8fa6d90741c53e659cd1e7ca86ff900aa55", "ef14393a9b6da680390461901f8be949e101a88c", "a90193d1dc557abe386c0836fb3f16642702df7a", "1b612877c4fb6fb7faf395357cd8092e5ec5dae7", "cb522961fae4cfeedcce8d0e3cf4cdfc8fe2b701", "86f17e74b905c8251223caf9b4e99784264c6252", "ad500c69ab1b228868d47315340c3d7fac5dc175", "3b3e980a5eb1b838fd8f56c0276d311635537a8e", "10559503926fe22dbb382c31541b0eee5c2cebd8", "d1e1c489ffe0331313e9c626a971bbaacc633774", "2af9ee8ee3ab4a89ae0098a1f9caa1aa9dad4e8a", "f17bc1c74a381f06d607f2ec6a06b10dd692d01f", "816c1925de9e8557fa70ec67d0ff71a5059eb931", "810cb5228315ac027bb8fbca94f6f8faa6ff8016", "4631f58f5a1ecdfb1b0ab833c3c22f2c05c576ff", "299af7d4fe6da8ac0b390e3ce45c48f7a8b5bb37", "2805daf3795e4e153d79dbecfe88b830ddc068d3", "d88ac8d78f4030de81e4be1ef5048072e6dac2cd", "6a0279c043eadaa09b5b486593c0f2f4f68adeb0", "ef48f1d8ec88dabbf7253cb1c8a224cb95f604af", "0f7bf963a06682d69387c54632cec9e835423617", "4e4746094bf60ee83e40d8597a6191e463b57f76", "06e959c88dcce05847a395dc404725dd0488003d", "d8f0bda19a345fac81a1d560d7db73f2b4868836", "1d35391869824040a081de1f35ba56afa35698b9", "39a76fdc4b2d4b9e8ef8f69a87d17ae930520acc", "4c6d6bb5bafba9e04d8f2ce128be71fba1d1e0e8", "534f41985a7350261a03b8c0dc54e218115dc4a5", "d040a7fb0a0c904fc4a759c39e9705e8604ddd98", "e083d6f5084d8a8582af797999185c4e0d2c841a", "aa7c72f874951ff7ca3769439f2f39b7cfd4b202", "42bb241681c4bec1fa36211a204fa0dc8158e5ff", "0c1d5801f2b86afa969524dc74708a78450300d9", "e2edc7e7a2832e2f6014945afce4f76643cab02c", "ce0dbe6b1abecb54dcc98dbe652aa63d190dbc94", "0ee91554aedcb2cc4e2d2a15eb07eed1bbbac2c2", "481fde422a31e21ac12644e0df95cf66528f52c2", "6feea436272077e5c5da2bfc55b4ea5d9d1a5dfc", "1b31d4a584818ce0f140026d172601116c6bc714", "38227805c37b00ee9fc5e165bd4ad926f3d94ca7", "209df2d7724bc6defe87618b502e1d7c800a819f", "71ab6a9bdc852b75e12f6ce6ce68c1083475e627", "d79de82207d251ce411acb620a0c44a7ae71f5a5", "47dfddafed43bc5afef93ac90ea3376a02046151", "fa1f4f34cf4502ede6257305ed8e6c808b6b3643", "1609cab807f391fe744f089aefb647e482dc33a0", "821702d8c32176ab03fb39b2a60a6293a06d620f", "4ae234a7eda3fc4e28fadbc75ee2603a0e078fcb", "b4270de7380d305b4417f662686093c40d842da4", "bc4627e1bc3bbe21c46c4011ec4f9bd377ec83a4", "08847df8ea5b22c6a2d6d75352ef6270f53611de", "deae19c928571d3c1101660b0d643d7a7ee893b2", "bc1fa3efa43dfb79f6f8243d29327c8ee06e8a97", "18f7fe72fcefee11082534f4bd254d67e433a2bd", "6e7a9779dee831658e973ee26ac8bfed2d6da033", "d02b32b012ffba2baeb80dca78e7857aaeececb0", "6daf590f25f0a0e02c46b4d0d89df0eafbe23d06", "87c0f3408ac4411b615756f6c854177c440f0b2f", "e33b5e91eb12ee3d7a5d134669994cbde6673df9", "6dc17e91c0b02ff3b9e5c9283924279c28641db7", "7dba0e39bb059103e10fb81bce2fe831f520fb38", "3fe4109ded039ac9d58eb9f5baa5327af30ad8b6", "f66bc143d85d2b1d9aafec20f598a21d2b90b0c0", "4669b079c3ca15aba08130c36ead597014f7341a", "7cdd8c0f3c672000506696a6f8b96b9a99e778ae", "2e222383bd75d3c3961ac073e8aabd3557946601", "076f4d9713acd7188a13186ac857124ef44f466d", "6801c8ea1fcb2f76799234a9a81c6199dd61b24c", "02a99a43670ab83e77de9d935eb8d3d164e1972c", "39d6d71aec74a9a810239f04db2f8854d2f966eb", "795bd86fc22ec544e7cd9b3d3c2ccabe72de54ec", "86a84b3a61f67f59c5bc5545bc88296e46681ca5", "4b39b981133a91052956cc42d2967f349a95cd89", "9b555d8c8f518d907fa273d8691b008d55aedd92", "39971d444d54fd9569692f1a03f94d847161dbf7", "3f95a5146d97480388b2f8d4ae189e4f7374e715", "e1846cf49132cae0edf24ddbe281d470b67e8a14", "118c8bcbf52fbe15b38b67ef3f30839aa7faf2f4", "02a88a2f2765b17c9ea76fe13148b4b8a9050b95", "5012a2408b4df939dff96f77c456747eb15e967a", "35570297681daa3973498eabead361d0be961672", "1fd7e1f5dd4c514bfb3d77fceb454bc01de83ec8", "dba552e2c43d0de1c7d33b4c2c84dedbab71aada", "6d1cfdb82122cefbc0f27ee7a02d6a22483d6a05", "a107250d3568e5ac7a7504f04b1154da8e9892ba", "16fadde3e68bba301f9829b3f99157191106bd0f", "6c21d11d4e1e1666ef009a8d62d1c64dc75f79c9", "0827fa5586227437df34d63a8c49f5bc8ba077dc", "ebde14a5d0137e970bfea350eb0ccc7d4eec2394", "43734a8321c4b279a6e5bed9f87c849ddd38068b", "0108504305468275985da608b77dbbbe4aee34c7", "32ea1ed0155cd7d68eac5719693328620fd308f2", "15ec39729aa70d75699886150fc0e927278c8ade", "3fbff11afa17f4c6f13f52a944ff9588be52ae41", "26919156cec1cc5bec03f63f566c934b55b682cd", "1209fa3c61fb5cdb18d1afa55d64f155398827f5", "21cfe8372be299be84818b4bcbe07fa6736540b6", "0c6eff59e210c3af9865207302199412f3f91914", "33f885a7e1369128534aa5f3b867bd42de9ec683", "828dfdb73b6fc92952ca88edb8162d430e6cd995", "7d1d282db00de79c6bfcc409c483cbc6a4582626", "060820f110a72cbf02c14a6d1085bd6e1d994f6a", "3d82f99a994a9702520aeac6bcfbe1aabb2dcf78", "c61cfd298fcd38cad92b6962309e5fd66f576209", "2830fb5282de23d7784b4b4bc37065d27839a412", "2e1ff08fb5790e3b5ba7864408628467795a9df4", "ec3472acc24fe5ef9eb07a31697f2cd446c8facc", "6911686f00c99c51c21f057c45d561c88027f676", "60093318820f49b5a105352a6b8512d1601af153", "171585599fcc0cb2c2c190a3ff395c2f5bd331dc", "2236294e803316c5934fa387f27d128fa7819a03", "af58701bdd28a49d234ba87d8f1b90d1f001184e", "02bee2cef6b04e6b57cfa3fd54cabc756f0c2e8d", "b5b63c7b9ded97da404c404be0a47c3fd91fc477", "66986f4359c3507d671bad021d6fb2d6fa6aa2c0", "93798ead90afe86636ca582a92cadd846905a95d", "ab36110a9c443d32306631ee2fd37abfa64bc1cd", "3a92a00b41dc6217f7685148c8a378524fa1a542", "09348656bbbe88881d1257650a170af5e22f1008", "83af9d8242c5779c97d02a0a0351991e9bec89b2", "657bc02d1522eaa89c05e60ec8c5614b0574843f", "beb9cecc5ee89dfe6307cd3964743b1088c65082", "011d461718c39c9d196cb84b2e881c1660ef8f55", "b5817dc821447546c47bc549cf6676f5da78f570", "0206ebe8a7d587548ce8f4507ab919c43a369014", "7ae8bca039d0d3de01001c3cd587f1961c4bbe22", "29dbb9492292b574f7bfd8629d6801d3136887b7", "098a0bd7c948e9c94704ac5e8c768c8d430e1842", "0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112", "29693ce8b14c552e4e46d05d55cbff3942f95c30", "26dacf88181021939c09ffb3529ffd2854fc7ee6", "4ea759e13b0991772c61a4ede058d59d5e33a71b", "9c54c038664ec0c167211e9855a2275a97101708", "737649d6ce286b69df14c90635cda19a5a014139", "d73a0c3db0b347bc6f3796eb89d1342bf9ccee9b", "286ea63b1b5df1b8b67718f25b47357ec3168e97", "40bb090a4e303f11168dce33ed992f51afe02ff7", "7143518f847b0ec57a0ff80e0304c89d7e924d9a", "809ea255d144cff780300440d0f22c96e98abd53", "7f82f8a416170e259b217186c9e38a9b05cb3eb4", "d7cbedbee06293e78661335c7dd9059c70143a28", "a2d1818eb461564a5153c74028e53856cf0b40fd", "be5b455abd379240460d022a0e246615b0b86c14", "4d2022e3db712237b95fe381a75dbeb827551924", "2d8eff4b085b57788e2f4485c81eb80910f94da0", "0b4901e6724e533f6d5d2510e1c0199eea898c81", "e260847323b48a79bd88dd95a1499cd3053d3645", "8a56adc9605a894c513537f1a2c8d9459573c0a8", "725597072c76dad5caa92b7baa6e1c761addc300", "3423f3dcb0edee1c5c6a5505b9e8c0bbdcffbd51", "b5f2846a506fc417e7da43f6a7679146d99c5e96", "1221e25763c3be95c1b6626ca9e7feaa3b636d9a", "190b3caa2e1a229aa68fd6b1a360afba6f50fde4", "16fdd6d842475e6fbe58fc809beabbed95f0642e", "61e2044184d86d0f13e50ecaa3da6a4913088c76", "98e098ba9ff98fc58f22fed6d3d8540116284b91", "197efbef17f92e5cb5076961b6cd9f59e88ffd9a", "4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9", "d915e634aec40d7ee00cbea96d735d3e69602f1a", "2f69e9964f3b6bdc0d18749b48bb6b44a4171c64", "86ed5b9121c02bcf26900913f2b5ea58ba23508f", "193474d008cab9fa1c1fa81ce094d415f00b075c", "4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367", "944faf7f14f1bead911aeec30cc80c861442b610", "11691f1e7c9dbcbd6dfd256ba7ac710581552baa", "aac934f2eed758d4a27562dae4e9c5415ff4cdb7", "14c0f9dc9373bea1e27b11fa0594c86c9e632c8d", "1f05473c587e2a3b587f51eb808695a1c10bc153", "81da427270c100241c07143885ba3051ec4a2ecb", "7d7b036ed01765c9473d695f029142128d442aaa", "bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62", "03c48d8376990cff9f541d542ef834728a2fcda2", "673541a8cb1aa3ac63a288523ba71aec2a38280e", "1473a233465ea664031d985e10e21de927314c94", "8b30259a8ab07394d4dac971f3d3bd633beac811", "5ac946fc6543a445dd1ee6d5d35afd3783a31353", "264a84f4d27cd4bca94270620907cffcb889075c", "6c80c834d426f0bc4acd6355b1946b71b50cbc0b", "056294ff40584cdce81702b948f88cebd731a93e", "63213d080a43660ac59ea12e3c35e6953f6d7ce8", "8878871ec2763f912102eeaff4b5a2febfc22fbe", "115b16e1d73e0bed2cd5d22b1a83c030db885393", "e8b3a257a0a44d2859862cdec91c8841dc69144d", "2cde051e04569496fb525d7f1b1e5ce6364c8b21", "473366f025c4a6e0783e6174ca914f9cb328fe70", "4e0e49c280acbff8ae394b2443fcff1afb9bdce6", "155199d7f10218e29ddaee36ebe611c95cae68c4", "e6c491fb6a57c9a7c2d71522a1a066be2e681c84", "062d67af7677db086ef35186dc936b4511f155d7", "f42dca4a4426e5873a981712102aa961be34539a", "a6c96fceabd0e0efabc89679927ee1877f3cf4ac", "1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2", "a5f11c132eaab258a7cea2d681875af09cddba65", "4e5dc3b397484326a4348ccceb88acf309960e86", "35e6f6e5f4f780508e5f58e87f9efe2b07d8a864", "541f1436c8ffef1118a0121088584ddbfd3a0a8a", "be068ce0d5284dbd2c4c8ba4a31a41da2f794193", "cd85f71907f1c27349947690b48bfb84e44a3db0", "a6e25cab2251a8ded43c44b28a87f4c62e3a548a", "819d1dcea397e6e671acf74adccdef5750550873", "f2877cdbffb0c9a4de1f562099d2f0597bcfec0b", "e546572f8205570de4518bcf8d0345465e51d7a0", "563143c5f4fed0184c1f3e661917da94cfed1d46", "d700aedcb22a4be374c40d8bee50aef9f85d98ef", "f2a7f9bd040aa8ea87672d38606a84c31163e171", "6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf", "93e1e195f294c463f4832c4686775bf386b3de39", "10af69f11301679b6fbb23855bf10f6af1f3d2e6", "ecb4bab5296224bdedd389cf18748c2ff0050100", "4a9d906935c9de019c61aedc10b77ee10e3aec63", "1dc241ee162db246882f366644171c11f7aed96d", "c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774", "4c6daffd092d02574efbf746d086e6dc0d3b1e91", "071135dfb342bff884ddb9a4d8af0e70055c22a1", "73ed64803d6f2c49f01cffef8e6be8fc9b5273b8", "d35c82588645b94ce3f629a0b98f6a531e4022a3", "3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1", "fbf196d83a41d57dfe577b3a54b1b7fa06666e3b", "38f61e422ef75df4b96fb6081ce866556b6b854f", "274f87ad659cd90382ef38f7c6fafc4fc7f0d74d", "39b452453bea9ce398613d8dd627984fd3a0d53c", "48db8bf18e2f6f19e07e88384be855c8b7ea0ead", "a3c8c7da177cd08978b2ad613c1d5cb89e0de741", "645f09f4bc2e6a13663564ee9032ca16e35fc52d", "164b0e2a03a5a402f66c497e6c327edf20f8827b", "2d84e30c61281d3d7cdd11676683d6e66a68aea6", "b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89", "e19ebad4739d59f999d192bac7d596b20b887f78", "4597b7c4f13e1dfc456d156c6c05502fc5d38eec", "a6b5ca99432c23392cec682aebb8295c0283728b", "22e189a813529a8f43ad76b318207d9a4b6de71a", "fc8fb68a7e3b79c37108588671c0e1abf374f501", "704d88168bdfabe31b6ff484507f4a2244b8c52b", "2d9e58ea582e054e9d690afca8b6a554c3687ce6", "40ff3276e62f03fe216d8592d2fc994d8eead010", "20767ca3b932cbc7b8112db21980d7b9b3ea43a3", "98127346920bdce9773aba6a2ffc8590b9558a4a", "ea8bf8924e15607d959ae822f428815a9d435f3b", "a46086e210c98dcb6cb9a211286ef906c580f4e8", "d94969ec95d4c8cd7d0d4da3e83131b6f76cd7c4", "620339aef06aed07a78f9ed1a057a25433faa58b", "db848c3c32464d12da33b2f4c3a29fe293fc35d1", "d1881993c446ea693bbf7f7d6e750798bf958900", "2bcec23ac1486f4106a3aa588b6589e9299aba70", "1902288256839539aeb5feb3e1699b963a15aa1a", "11c67d6fedc3dd95b752ade4e46ee143ac494259", "add6d96fc018986f51a1aac47eae9ee3fc62fb66", "00d4c2db10f3a32d505d7b8adc7179e421443dec", "a4d4f52f922b3f28251ae03abcca8c0a369694fd", "c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478", "521cfbc1949289a7ffc3ff90af7c55adeb43db2a", "2303d07d839e8b20f33d6e2ec78d1353cac256cf", "2601b679fdd637f3cd978753ae2f15e8759dd267", "b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae", "041115cb5509466f7449451709387268a008aba2", "045e83272db5e92aa4dc8bdfee908534c2608711", "ce933821661a0139a329e6c8243e335bfa1022b1", "2c19d3d35ef7062061b9e16d040cebd7e45f281d", "5c493c42bfd93e4d08517438983e3af65e023a87", "4b1682da96af72ce0ddaa9384ce294611807a8b3", "6c304f3b9c3a711a0cca5c62ce221fb098dccff0", "052880031be0a760a5b606b2ad3d22f237e8af70", "101d4cfbd6f8a7a10bd33505e2b183183f1d8770", "397aeaea61ecdaa005b09198942381a7a11cd129", "81ed28ea6cfe71bfc4cfc35c6695fa07dd7cc42e", "799c02a3cde2c0805ea728eb778161499017396b", "dfa80e52b0489bc2585339ad3351626dee1a8395", "6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d", "14b69626b64106bff20e17cf8681790254d1e81c", "8e9b92a805d1ce0bf4e0c04133d26e28db036e6a", "3634b4dd263c0f330245c086ce646c9bb748cd6b", "0329d9be8ab1e3a1d5e4b9e7db5af5bbcc64e36f", "15d653972d176963ef0ad2cc582d3b35ca542673", "8820d1d3fa73cde623662d92ecf2e3faf1e3f328", "4534d78f8beb8aad409f7bfcd857ec7f19247715", "7b74b65983ae0abb09a540b6413a5a36b2df027a", "ddfae3a96bd341109d75cedeaebb5ed2362b903f", "f257300b2b4141aab73f93c146bf94846aef5fa1", "1ca155a4b65ae19ccb73df48516e4775770a382c", "aafb8dc8fda3b13a64ec3f1ca7911df01707c453", "013d0acff1e5410fd9f6e15520d16f4ea02f03f6", "4df34e0194faa27078832cb5078a2af6c9d0ea9b", "ce450e4849490924488664b44769b4ca57f1bc1a", "1c41965c5e1f97b1504c1bdde8037b5e0417da5e", "3174fceef3cf09ac35e8d1eb4e1b8b73a3b2c713", "2d8ffa4a27b3e3b792b2d2516bbcb1a47c114846", "05e3acc8afabc86109d8da4594f3c059cf5d561f", "1576ed0f3926c6ce65e0ca770475bca6adcfdbb4", "b759936982d6fb25c55c98955f6955582bdaeb27", "71167cf519940a7373adc221401c396198763ab0", "3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1", "03ce2ff688f9b588b6f264ca79c6857f0d80ceae", "f92d3abfb16e491a52b2d89549396d023562285a", "8383faea09b4b4bef8117a1da897495ebd68691b", "473031328c58b7461753e81251379331467f7a69", "28f1542c63f5949ee6f2d51a6422244192b5a900", "982ede05154c1afdcf6fc623ba45186a34f4b9f2", "5003754070f3a87ab94a2abb077c899fcaf936a6", "b5667d087aafcf6b91f3c77aa90cee1ac185f8f1", "16c2eb10de8b2bf0f2f8304c8a365a5c7f8b7a2b", "e35517f891736a413b9d8f18cd5109e215115c26", "bd9157331104a0708aa4f8ae79b7651a5be797c6", "e3144f39f473e238374dd4005c8b83e19764ae9e", "1e94cc91c5293c8fc89204d4b881552e5b2ce672", "81ede08b36f3abd423424804da8ff240606b3a5d", "488375ae857a424febed7c0347cc9590989f01f7", "2a65d7d5336b377b7f5a98855767dd48fa516c0f", "2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0", "bc36badb6606b8162d821a227dda09a94aac537f", "81d232e1f432db7de67baf4f30f240c62d1a9055", "d141c31e3f261d7d5214f07886c1a29ac734d6fc", "05891725f5b27332836cf058f04f18d74053803f", "6bb630dfa797168e6627d972560c3d438f71ea99", "bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317", "83b54b8c97dc14e302dad191327407ec0d5fb4a6", "85c90ad5eebb637f048841ebfded05942bb786b7", "267c6e8af71bab68547d17966adfaab3b4711e6b", "5e6f546a50ed97658be9310d5e0a67891fe8a102", "3d5a1be4c1595b4805a35414dfb55716e3bf80d8", "d1f58798db460996501f224fff6cceada08f59f9", "df9269657505fcdc1e10cf45bbb8e325678a40f5", "1384a83e557b96883a6bffdb8433517ec52d0bea", "6cb7648465ba7757ecc9c222ac1ab6402933d983", "521aa8dcd66428b07728b91722cc8f2b5a73944b", "3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e", "cba090a5bfae7dd8a60a973259f0870ed68c4dd3", "11bb2abe0ca614c15701961428eb2f260e3e2eef", "3e685704b140180d48142d1727080d2fb9e52163", "6dd8d8be00376ac760dc92f9c5f20520872c5355", "ac820d67b313c38b9add05abef8891426edd5afb", "9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32", "90298f9f80ebe03cb8b158fd724551ad711d4e71", "dc5cde7e4554db012d39fc41ac8580f4f6774045", "32b8c9fd4e3f44c371960eb0074b42515f318ee7", "446dc1413e1cfaee0030dc74a3cee49a47386355", "69fb98e11df56b5d7ec7d45442af274889e4be52", "1cad5d682393ffbb00fd26231532d36132582bb4", "5e4390e0c07f0f7c9fbd3681df53d09b16ed908b", "cb2917413c9b36c3bb9739bce6c03a1a6eb619b3", "4c0cc732314ba3ccccd9036e019b1cfc27850c17", "1252727e8096f48096ef89483d30c3a74500dd15", "6d69e5b03389da90fcc89b832a564f62ecbc54ff", "407de9da58871cae7a6ded2f3a6162b9dc371f38", "cd22e6532211f679ba6057d15a801ba448b9915c", "863ad2838b9b90d4461995f498a39bcd2fb87c73", "97b5800e144a8df48f1f7e91383b0f37bc37cf60", "c90427085909029afd2af01d1967e80b78e01b88", "1130c38e88108cf68b92ecc61a9fc5aeee8557c9", "92679c8cff92442f39de3405c21c8028162fe56a", "097340d3ac939ce181c829afb6b6faff946cdce0", "f7911b9ff58d07d19c68f4a30f40621f63c0f385", "f19bf8b5c1860cd81b5339804d5db9e791085aa7", "416b559402d0f3e2b785074fcee989d44d82b8e5", "0726a45eb129eed88915aa5a86df2af16a09bcc1", "b61a3f8b80bbd44f24544dc915f52fd30bbdf485", "cc6d3ccc9e3dd0a43313a714316c8783cd879572", "7c17280c9193da3e347416226b8713b99e7825b8", "e4d53e7f4c2052940841abc08f9574655f3f7fb4", "d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5", "7f6061c83dc36633911e4d726a497cdc1f31e58a", "34626bed8996e105e562119e1b4aa290114c89bf", "4698ed97f4a78e724c903ec1dd6e5538203237c8", "b133b2d7df9b848253b9d75e2ca5c68e21eba008", "947ee3452e4f3d657b16325c6b959f8b8768efad", "dbaf89ca98dda2c99157c46abd136ace5bdc33b3", "40dd736c803720890d6bfc1e083f6050e35d8f7a", "3fa738ab3c79eacdbfafa4c9950ef74f115a3d84", "f22d6d59e413ee255e5e0f2104f1e03be1a6722e", "0831794eddcbac1f601dcb9be9d45531a56dbf7e", "2d4b9fe3854ccce24040074c461d0c516c46baf4", "18206e1b988389eaab86ef8c852662accf3c3663", "2098983dd521e78746b3b3fa35a22eb2fa630299", "e49c59d19c1d652040f1bbd749c1e69a69f4b66c", "75259a613285bdb339556ae30897cb7e628209fa", "8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832", "70c9d11cad12dc1692a4507a97f50311f1689dbf", "1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e", "5de5848dc3fc35e40420ffec70a407e4770e3a8d", "e2059946b69e0854f21919c1cf13c3f618f48d12", "12150d8b51a2158e574e006d4fbdd3f3d01edc93", "e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5", "fe82d072a8d13cfefcd575db893f3374251f04a8", "a25d12d3eaaba6ec8ef4a2690068e9fbd74b977a", "b53259a81dcfa9913495bb47f62627c51e20f086", "d6a596c71828b7488d62627fefbe7fff123b62a3", "10cb39e93fac194220237f15dae084136fdc6740", "a0c37f07710184597befaa7e6cf2f0893ff440e9", "fa4f59397f964a23e3c10335c67d9a24ef532d5c", "1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9", "073eaa49ccde15b62425cda1d9feab0fea03a842", "4d1f77d9418a212c61a3c75c04a5b3884f6441ba", "49be50efc87c5df7a42905e58b092729ea04c2f5", "ebabd1f7bc0274fec88a3dabaf115d3e226f198f", "06f585a3a05dd3371cd600a40dc35500e2f82f9b", "e50682179979e32c8d916c6c289d12d35cc0d0b2", "8ad12d3ee186403b856639b58d7797aa4b89a6c7", "202dc3c6fda654aeb39aee3e26a89340fb06802a", "3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6", "dcea30602c4e0b7525a1bf4088620128d4cbb800", "b5f9d5be7561bb6eacee9012275b17c75696c388", "c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf", "521b625eebea73b5deb171a350e3709a4910eebf", "8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f", "f58f30932e3464fc808e539897efa4ee4e7ac59f", "a472d59cff9d822f15f326a874e666be09b70cfd", "04741341e26bdcd9ed1de18e5a95c31d7b64fa36", "1d6c09019149be2dc84b0c067595f782a5d17316", "4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d", "58eb9174211d58af76023ce33ee05769de57236c", "31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78", "e03e86ac61cfac9148b371d75ce81a55e8b332ca", "fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3", "a000149e83b09d17e18ed9184155be140ae1266e", "247a8040447b6577aa33648395d95d80441a0cf3", "e0793fd343aa63b5f366c8ace61b9c5489c51a4d", "3dcc51a37f2e5e91d77ff00f18178484c4e938cb", "bcf19b964e7d1134d00332cf1acf1ee6184aff00", "7e2f7c0eeaeb47b163a7258665324643669919e8", "351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd", "95d858b39227edeaf75b7fad71f3dc081e415d16", "35e808424317cf03b51516df7d083f45791311ae", "2c5d1e0719f3ad7f66e1763685ae536806f0c23b", "df2899462e04559c024a773d91f6e06c262e136b", "53698b91709112e5bb71eeeae94607db2aefc57c", "faa29975169ba3bbb954e518bc9814a5819876f6", "371f40f6d32ece05cc879b6954db408b3d4edaf3", "00f7f7b72a92939c36e2ef9be97397d8796ee07c", "169618b8dc9b348694a31c6e9e17b989735b4d39", "63f2d1a64737afa1608588b9651b1e4207e82d1c", "3852968082a16db8be19b4cb04fb44820ae823d4", "5a12e1d4d74fe1a57929eaaa14f593b80f907ea3", "09b0ef3248ff8f1a05b8704a1b4cf64951575be9", "ded41c9b027c8a7f4800e61b7cfb793edaeb2817", "e34840e4b952444d291619c784cb1f02dfae1e1d", "f636c087091847bd4ccd6d196ada6c0894b52d88", "95288fa7ff4683e32fe021a78cbf7d3376e6e400", "ba29ba8ec180690fca702ad5d516c3e43a7f0bb8", "329394480fc5e9e96de4250cc1a2b060c3677c94", "bff567c58db554858c7f39870cff7c306523dfee", "44f23600671473c3ddb65a308ca97657bc92e527", "82e3f4099503633c042a425e9217bfe47cfe9d4b", "b5979489e11edd76607c219a8bdc83ba4a88ab38", "501bfe67683ddfecf3710f5946c3b77f1ffe9adf", "062c41dad67bb68fefd9ff0c5c4d296e796004dc", "828d7553a45eb0c3132e406105732a254369eb4d", "bb489e4de6f9b835d70ab46217f11e32887931a2", "d3b18ba0d9b247bfa2fb95543d172ef888dfff95", "b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89", "2914e8c62f0432f598251fae060447f98141e935", "05e658fed4a1ce877199a4ce1a8f8cf6f449a890", "15ef449ac443c494ceeea8a9c425043f4079522e", "457cf73263d80a1a1338dc750ce9a50313745d1d", "3150e329e01be31ba08b6d76fc46b0da88a5ddeb", "66e9fb4c2860eb4a15f713096020962553696e12", "ee15b67b606ff49cd17467b062c11441e3b2dd70", "f6abecc1f48f6ec6eede4143af33cc936f14d0d0", "f1250900074689061196d876f551ba590fc0a064", "8d384e8c45a429f5c5f6628e8ba0d73c60a51a89", "a503eb91c0bce3a83bf6f524545888524b29b166", "b5cd9e5d81d14868f1a86ca4f3fab079f63a366d", "9166f46aa3e58befaefd3537e5a11b31ebeea4d0", "27c66b87e0fbb39f68ddb783d11b5b7e807c76e8", "fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81", "91f9f6623abc51086183cf1d2ea9954f503061fe", "2b42f83a720bd4156113ba5350add2df2673daf0", "ac559873b288f3ac28ee8a38c0f3710ea3f986d9", "4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4", "ea35d98dd074cee5042942804bd44a636f393d52", "959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c", "2bb53e66aa9417b6560e588b6235e7b8ebbc294c", "dee6609615b73b10540f32537a242baa3c9fca4d", "8f27df2d4fb7dd7ed5587640dcbe4dc1eb37acfb", "825f56ff489cdd3bcc41e76426d0070754eab1a8", "182b627d73de02764498c500aa7fb56cbeb1a424", "05a7be10fa9af8fb33ae2b5b72d108415519a698", "1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c", "195df1106f4d7aff0e9cb609358abbf80f54a716", "a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531", "65bba9fba03e420c96ec432a2a82521ddd848c09", "58081cb20d397ce80f638d38ed80b3384af76869", "ba30cc9d8bac724dafc0aea247159cc7e7105784", "1bbec7190ac3ba34ca91d28f145e356a11418b67", "cda8fd9dd8b485e6854b1733d2294f69666c66f7", "41f26101fed63a8d149744264dd5aa79f1928265", "1442319de86d171ce9595b20866ec865003e66fc", "fe8a8c4133698e4b68018d99c6a2bcec870c5464", "631483c15641c3652377f66c8380ff684f3e365c", "d1959ba4637739dcc6cc6995e10fd41fd6604713", "16eaa26a84468b27e559215db01c53286808ec2a", "6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a", "33f2b44742cc828347ccc5ec488200c25838b664", "ab0981d1da654f37620ca39c6b42de21d7eb58eb", "b09b693708f412823053508578df289b8403100a", "e2d265f606cd25f1fd72e5ee8b8f4c5127b764df", "07dc9f3b34284cc915dea7575f40ef0c04338126", "30cbd41e997445745b6edd31f2ebcc7533453b61", "503db524b9a99220d430e741c44cd9c91ce1ddf8", "558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f", "5945464d47549e8dcaec37ad41471aa70001907f", "b1d2001e877bb36c8ccc97bee62d9824a3b8874d", "ce6d23894f88349443e7c9fe512ca81291bb2e00", "7ed3b79248d92b255450c7becd32b9e5c834a31e", "9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c", "9758f3fd94239a8d974217fe12599f88fb413f3d", "568cff415e7e1bebd4769c4a628b90db293c1717", "1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5", "24aac045f1e1a4c13a58eab4c7618dccd4c0e671", "f571fe3f753765cf695b75b1bd8bed37524a52d2", "61933c42ed53f4fff5653489fb376ee967934701", "40041b80cef6dc23946ffa9628b6ac3b8dcc971a", "d3008b4122e50a28f6cc1fa98ac6af28b42271ea", "f0f854f8cfe826fd08385c0c3c8097488f468076", "c6241e6fc94192df2380d178c4c96cf071e7a3ac", "35b1c1f2851e9ac4381ef41b4d980f398f1aad68", "a9756ca629f73dc8f84ee97cfa8b34b8207392dc", "9bb6dbd75dab9d36897660419ca48ee385ef82da", "c9b958c2494b7ba08b5b460f19a06814dba8aee0", "3df8cc0384814c3fb05c44e494ced947a7d43f36", "56f57786516dcc8ea3c0ffe877c1363bfb9981d2", "079edd5cf7968ac4759dfe72af2042cf6e990efc", "70c58700eb89368e66a8f0d3fc54f32f69d423e1", "d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7", "88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79", "24115d209e0733e319e39badc5411bbfd82c5133", "66837b29270f3e03df64941a081d70c687c7955c", "0a9d204db13d395f024067cf70ac19c2eeb5f942", "392d35bb359a3b61cca1360272a65690a97a2b3f", "e8a150b743d487e9dbf6e023b0928f03b2c39aef", "c6c086748474dcda06d773891848aa1472de3560", "1a167e10fe57f6d6eff0bb9e45c94924d9347a3e", "5dd146912c2b2a313cea50acdcca3b4b54479142", "ae78469de00ea1e7602ca468dcf188cdfe2c80d4", "10df1d4b278da991848fb71b572f687bd189c10e", "826c66bd182b54fea3617192a242de1e4f16d020", "d0ac9913a3b1784f94446db2f1fb4cf3afda151f", "994b52bf884c71a28b4f5be4eda6baaacad1beee", "17a8d1b1b4c23a630b051f35e47663fc04dcf043", "bcead1a92744e76c38caaa13159de4abfb81b1d0", "580f86f1ace1feed16b592d05c2b07f26c429b4b", "a6b1d79bc334c74cde199e26a7ef4c189e9acd46", "4adb97b096b700af9a58d00e45a2f980136fcbb5", "4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f", "f02a6bccdaee14ab55ad94263539f4f33f1b15bb", "23aef683f60cb8af239b0906c45d11dac352fb4e", "fb1b6138aeb081adf853316c0d83ef4c5626a7fa", "46f2611dc4a9302e0ac00a79456fa162461a8c80", "a8a61badec9b8bc01f002a06e1426a623456d121", "2729e12ecb777a553e5ed0a1ac52dd37924e813d", "0145dc4505041bf39efa70ea6d95cf392cfe7f19", "080c204edff49bf85b335d3d416c5e734a861151", "745b42050a68a294e9300228e09b5748d2d20b81", "3e3f305dac4fbb813e60ac778d6929012b4b745a", "b6ef158d95042f39765df04373c01546524c9ccd", "554b9478fd285f2317214396e0ccd81309963efd", "be5276e9744c4445fe5b12b785650e8f173f56ff", "d5de42d37ee84c86b8f9a054f90ddb4566990ec0", "b449a9362a8dc7b8b1bdaa032a73577e2720ccb0", "3e0377af0087b9b836bf6d95bc1c7085dfde4897", "abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18", "09926ed62511c340f4540b5bc53cf2480e8063f8", "e76798bddd0f12ae03de26b7c7743c008d505215", "79fc892abaf44a84a758268efd4d1b9e6b64ecf5", "bd0e100a91ff179ee5c1d3383c75c85eddc81723", "016800413ebd1a87730a5cf828e197f43a08f4b3", "5fac62a3de11125fc363877ba347122529b5aa50", "e2faaebd17d10e2919bd69492787e7565546a63f", "5e387b29ba253ddfe402509b2608d3f964721a8a", "928b8eb47288a05611c140d02441660277a7ed54", "062d0813815c2b9864cd9bb4f5a1dc2c580e0d90", "d42a8c6528cdf1a63050f9a282f6b5daec6b4e73", "663eb30b98e84d67a7468b6a6a996fcca600bf0a", "5feb1341a49dd7a597f4195004fe9b59f67e6707", "1ea74780d529a458123a08250d8fa6ef1da47a25", "9c065dfb26ce280610a492c887b7f6beccf27319", "74c19438c78a136677a7cb9004c53684a4ae56ff", "e75a589ca27dc4f05c2715b9d54206dee37af266", "17fa1c2a24ba8f731c8b21f1244463bc4b465681", "167736556bea7fd57cfabc692ec4ae40c445f144", "b5c749f98710c19b6c41062c60fb605e1ef4312a", "9e8382aa1de8f2012fd013d3b39838c6dad8fb4d", "81a142c751bf0b23315fb6717bc467aa4fdfbc92", "b7426836ca364603ccab0e533891d8ac54cf2429", "ef2a5a26448636570986d5cda8376da83d96ef87", "d309e414f0d6e56e7ba45736d28ee58ae2bad478", "b54fe193b6faf228e5ffc4b88818d6aa234b5bb9", "8b744786137cf6be766778344d9f13abf4ec0683", "22df6b6c87d26f51c0ccf3d4dddad07ce839deb0", "9f499948121abb47b31ca904030243e924585d5f", "d5d5cc27ca519d1300e77e3c1a535a089f52f646", "a87ab836771164adb95d6744027e62e05f47fd96", "a4c430b7d849a8f23713dc283794d8c1782198b2", "c553f0334fcadf43607925733685adef81fbe406", "2b1327a51412646fcf96aa16329f6f74b42aba89", "9077365c9486e54e251dd0b6f6edaeda30ae52b9", "2d83ba2d43306e3c0587ef16f327d59bf4888dc3", "fef6f1e04fa64f2f26ac9f01cd143dd19e549790", "4fd29e5f4b7186e349ba34ea30738af7860cf21f", "49a038852b9e51af658405231045559d728e0970", "b9081856963ceb78dcb44ac410c6fca0533676a3", "16572c545384174f8136d761d2b0866e968120a8", "c5935b92bd23fd25cae20222c7c2abc9f4caa770", "1b71d3f30238cb6621021a95543cce3aab96a21b", "67ba3524e135c1375c74fe53ebb03684754aae56", "a6e21438695dbc3a184d33b6cf5064ddf655a9ba", "b8d8501595f38974e001a66752dc7098db13dfec", "7fad07a6cf4c0985c7146e12d8e6639234e447fd", "e0d878cc095eaae220ad1f681b33d7d61eb5e425", "d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9", "8b74252625c91375f55cbdd2e6415e752a281d10", "dfc784c860795f4f9aa704b7655f6d1321018980", "355cea44e2d40409a7a5be72b12511e43d259cb9", "2642810e6c74d900f653f9a800c0e6a14ca2e1c7", "e096b11b3988441c0995c13742ad188a80f2b461", "912f1f57a010194047b6438cc1ea6bec95c6c2b8", "b0de0892d2092c8c70aa22500fed31aa7eb4dd3f", "ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee", "8613fef2738325a5697e253276b160099100528d", "48186494fc7c0cc664edec16ce582b3fcb5249c0", "3fd092b96c3339507732263c9e6379b307c26073", "feaedb6766f42e867aab7f1a33ba4d7ddacfc7aa", "d0d7671c816ed7f37b16be86fa792a1b29ddd79b", "fd33df02f970055d74fbe69b05d1a7a1b9b2219b", "c6e2641d99c72bbffef8a97ec019dd9379dd8b3a", "5394d42fd27b7e14bd875ec71f31fdd2fcc8f923", "cffebdf88e406c27b892857d1520cb2d7ccda573", "2a14b6d9f688714dc60876816c4b7cf763c029a9", "b49aa569ff63d045b7c0ce66d77e1345d4f9745c", "a87e37d43d4c47bef8992ace408de0f872739efc", "0d467adaf936b112f570970c5210bdb3c626a717", "2fea258320c50f36408032c05c54ba455d575809", "9f5a73e6282c8c1c569622ce9eb505be237c2971", "2f57ad0f2558e2a25443bddc5a28bd598cdbecad", "656f05741c402ba43bb1b9a58bcc5f7ce2403d9a", "80135ed7e34ac1dcc7f858f880edc699a920bf53", "62fddae74c553ac9e34f511a2957b1614eb4f937", "2288696b6558b7397bdebe3aed77bedec7b9c0a9", "104ee18b513b52386f871e959c1f9e5072604e93", "c4b58ceafdf4cf55586b036b9eb4d6d3d9ecd9c4", "9989eda2f5392cfe1f789bb0f6213a46d92d1302", "516d0d9eb08825809e4618ca73a0697137ebabd5", "88f7a3d6f0521803ca59fde45601e94c3a34a403", "6261eb75066f779e75b02209fbd3d0f02d3e1e45", "db1f48a7e11174d4a724a4edb3a0f1571d649670", "c0f67e850176bb778b6c048d81c3d7e4d8c41003", "6318d3842b36362bb45527b717e1a45ae46151d5", "292c6b743ff50757b8230395c4a001f210283a34", "24eeb748a5e431510381ec7c8253bcb70eff8526", "55c68c1237166679d2cb65f266f496d1ecd4bec6", "41aa209e9d294d370357434f310d49b2b0baebeb", "f1ef5156eccee6c845bb9f0a99a3943218e145a3", "cfc4aa456d9da1a6fabd7c6ca199332f03e35b29", "2cdde47c27a8ecd391cbb6b2dea64b73282c7491", "78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e", "1667a77db764e03a87a3fd167d88b060ef47bb56", "54969bcd728b0f2d3285866c86ef0b4797c2a74d", "2f7fc778e3dec2300b4081ba2a1e52f669094fcd", "30b74e60ec11c0ebc4e640637d56d85872dd17ce", "057d5f66a873ec80f8ae2603f937b671030035e6", "769461ff717d987482b28b32b1e2a6e46570e3ff", "b6d977251b551471f5dddfb0a2e8f9c542e684d2", "fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1", "f0f4f16d5b5f9efe304369120651fa688a03d495", "881066ec43bcf7476479a4146568414e419da804", "2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4", "20da5315cfe5eab69d99bbda270e73ab488a49ba", "6343bc0013343b6a5f96154f02d18dcd36a3f74c", "baafe3253702955c6904f0b233e661b47aa067e1", "a7267bc781a4e3e79213bb9c4925dd551ea1f5c4", "e060e32f8ad98f10277b582393df50ac17f2836c", "b8375ff50b8a6f1a10dd809129a18df96888ac8b", "2c258eec8e4da9e65018f116b237f7e2e0b2ad17", "4fcd19b0cc386215b8bd0c466e42934e5baaa4b7", "f557df59cd088ffb8e27506d8612d062407e96f4", "10a285260e822b49023c4324d0fbbca7df8e128b", "1b635f494eff2e5501607ebe55eda7bdfa8263b8", "5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934", "ca37eda56b9ee53610c66951ee7ca66a35d0a846", "fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192", "35308a3fd49d4f33bdbd35fefee39e39fe6b30b7", "78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7", "032a1c95388fb5c6e6016dd8597149be40bc9d4d", "bbe1332b4d83986542f5db359aee1fd9b9ba9967", "c0d5c3aab87d6e8dd3241db1d931470c15b9e39d", "6257a622ed6bd1b8759ae837b50580657e676192", "9d530c341b3eda84c0b0a2c3149232daf16056f4", "866f70b2b3d91d15e235712378952d942c9c7478", "7fe2ab9f54242ef8609ef9bf988f008c7d42407c", "4f0b8f730273e9f11b2bfad2415485414b96299f", "c244c3c797574048d6931b6714ebac64d820dbb3", "eacba5e8fbafb1302866c0860fc260a2bdfff232", "51cb09ee04831b95ae02e1bee9b451f8ac4526e3", "9a03b7b71a82fc2c86b3b4cbec802dfc16978486", "e6f20e7431172c68f7fce0d4595100445a06c117", "41aa8c1c90d74f2653ef4b3a2e02ac473af61e47", "77fb9e36196d7bb2b505340b6b94ba552a58b01b", "64c4019f1ea9b54b1848418ac53c4e2584dc62d4", "8981be3a69cd522b4e57e9914bf19f034d4b530c", "7dffe7498c67e9451db2d04bb8408f376ae86992", "fcbf808bdf140442cddf0710defb2766c2d25c30", "cd323dc4b67965a4f16b5b0a55fcc1ff0396b375", "6fea198a41d2f6f73e47f056692f365c8e6b04ce", "1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8", "263ed62f94ea615c747c00ebbb4008385285b33b", "9652f154f4ae7807bdaff32d3222cc0c485a6762", "e793f8644c94b81b7a0f89395937a7f8ad428a89", "089513ca240c6d672c79a46fa94a92cde28bd567", "378418fdd28f9022b02857ef7dbab6b0b9a02dbe", "549c719c4429812dff4d02753d2db11dd490b2ae", "9b6d0b3fbf7d07a7bb0d86290f97058aa6153179", "93f37c69dd92c4e038710cdeef302c261d3a4f92", "217aa3aa0b3d9f6f394b5d26f03418187d775596", "a93781e6db8c03668f277676d901905ef44ae49f", "05c5134125a333855e8d25500bf97a31496c9b3f", "0b2966101fa617b90510e145ed52226e79351072", "63a2e2155193dc2da9764ae7380cdbd044ff2b94", "3dce3bb30f0c19121a71e0bfe1d418f855cb13ce", "0290523cabea481e3e147b84dcaab1ef7a914612", "0cccf576050f493c8b8fec9ee0238277c0cfd69a", "7b43326477795a772c08aee750d3e433f00f20be", "060034b59275c13746413ca9c67d6304cba50da6", "414715421e01e8c8b5743c5330e6d2553a08c16d", "0be49fc1e0c9a6a50e449015945dd1cf92ccd07e", "d961617db4e95382ba869a7603006edc4d66ac3b", "cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66", "274959f26d04848f71a355c09500fd7ebc271d69", "7b8e9c50f74ce6ca66a8ab61fb18ca31d26cf13f", "374a0df2aa63b26737ee89b6c7df01e59b4d8531", "0f9fe80fff218573a4805437ba7010fa823ca0e6", "a5be204b71d1daaf6897270f2373d1a5e37c3010", "96858cea5e9c72a93d438b6ba8d9e027db5416a7", "9931c6b050e723f5b2a189dd38c81322ac0511de", "66886f5af67b22d14177119520bd9c9f39cdd2e6", "3aa66f2829ef440842c71a52cdaff30398a90ccb", "d1dfdc107fa5f2c4820570e369cda10ab1661b87", "30b103d59f8460d80bb9eac0aa09aaa56c98494f", "782a05fbe30269ff8ab427109f5c4d0a577e5284", "e52f73c77c7eaece6f2d8fdd0f15327f9f007261", "bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17", "3c56acaa819f4e2263638b67cea1ec37a226691d", "677585ccf8619ec2330b7f2d2b589a37146ffad7", "506ea19145838a035e7dba535519fb40a3a0018c", "96f0da034d090a3ecadd0fb92333bb681f23ab14", "508702ed2bf7d1b0655ea7857dd8e52d6537e765", "30df8c70c11f6945b2b496b87c1923d2673092c6", "fffa2943808509fdbd2fc817cc5366752e57664a", "6de18708218988b0558f6c2f27050bb4659155e4", "34bb11bad04c13efd575224a5b4e58b9249370f3", "c83e26622b275fdf878135e71c23325a31d0e5fc", "57eeaceb14a01a2560d0b90d38205e512dcca691", "9d58e8ab656772d2c8a99a9fb876d5611fe2fe20", "79d3cb01f4907e895a7afced8b090427c39b9b84", "90b7619eabe94731722ae884d0802256462457dc", "2c9f486fcec3c80a41fcecf33b6f4653bac1aaf5", "e667250b0407b262e9d15929c86b6da347f9cdc9", "5615d6045301ecbc5be35e46cab711f676aadf3a", "1056347fc5e8cd86c875a2747b5f84fd570ba232", "f38813f1c9dac44dcb992ebe51c5ede66fd0f491", "26575ad9e75efb440a7dc4ef8e548eed4e19dbd1", "12ccfc188de0b40c84d6a427999239c6a379cd66", "468c8f09d2ad8b558b65d11ec5ad49208c4da2f2", "ae8d5be3caea59a21221f02ef04d49a86cb80191", "aa3e3f4f1159e3af45a32eb0b3e206204d201721", "a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10", "7792fbc59f3eafc709323cdb63852c5d3a4b23e9", "17045163860fc7c38a0f7d575f3e44aaa5fa40d7", "d83ae5926b05894fcda0bc89bdc621e4f21272da", "73598ca47948fa18e051ce5173d7556e0f485489", "cb8a1b8d87a3fef15635eb4a32173f9c6f966055", "a6db73f10084ce6a4186363ea9d7475a9a658a11", "118ca3b2e7c08094e2a50137b1548ada7935e505", "703c9c8f20860a1b1be63e6df1622b2021b003ca", "62dccab9ab715f33761a5315746ed02e48eed2a0", "533d14e539ae5cdca0ece392487a2b19106d468a", "24ff832171cb774087a614152c21f54589bf7523", "94806f0967931d376d1729c29702f3d3bb70167c", "d09d663055b3b6d588bf4de2f386bb144d09aea8", "34c8de02a5064e27760d33b861b7e47161592e65", "faca1c97ac2df9d972c0766a296efcf101aaf969", "c2dc29e0db76122dfed075c3b9ee48503b027809", "f14872986435c015c562a92c6c0d142bbdf1b1fb", "c15b68986ecfa1e13e3791686ae9024f66983f14", "738c187d55745aac18d5fb5f6cc9e3568cd2d217", "ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff", "451b6409565a5ad18ea49b063561a2645fa4281b", "19994e667d908bc0aacfb663ab0a2bb5ad16b221", "551fa37e8d6d03b89d195a5c00c74cc52ff1c67a", "896f4d87257abd0f628c1ffbbfdac38c86a56f50", "4fc936102e2b5247473ea2dd94c514e320375abb", "e1312b0b0fd660de87fa42de39316b28f9336e70", "5da8e841871e4a97534d981ee20002b183b45508", "574ad7ef015995efb7338829a021776bf9daaa08", "14de80b1b86ea342ba44c584e9e39b9089472658", "14b66748d7c8f3752dca23991254fca81b6ee86c", "03b03f5a301b2ff88ab3bb4969f54fd9a35c7271", "06f8aa1f436a33014e9883153b93581eea8c5c70", "12408baf69419409d228d96c6f88b6bcde303505", "b3afa234996f44852317af382b98f5f557cab25a", "a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa", "2588acc7a730d864f84d4e1a050070ff873b03d5", "0ff23392e1cb62a600d10bb462d7a1f171f579d0", "2bb36c875754a2a8919f2f9b00a336c00006e453", "b191aa2c5b8ece06c221c3a4a0914e8157a16129", "06f39834e870278243dda826658319be2d5d8ded", "f2abaa1476fe1f00358f3eaa77dde2f348f58982", "5dd496e58cfedfc11b4b43c4ffe44ac72493bf55", "7711a7404f1f1ac3a0107203936e6332f50ac30c", "d8c9bad8d07ae4196027dfb8343b9d9aefb130ff", "3f0e0739677eb53a9d16feafc2d9a881b9677b63", "68d08ed9470d973a54ef7806318d8894d87ba610", "67ae7ee9557cb486d5e1129b9b24466ffb8c4766", "26a72e9dd444d2861298d9df9df9f7d147186bcd", "7fc5b6130e9d474dfb49d9612b6aa0297d481c8e", "1bad8a9640cdbc4fe7de12685651f44c4cff35ce", "0247998a1c045e601dc4d65c53282b5e655be62b", "3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab", "4fa0d73b8ba114578744c2ebaf610d2ca9694f45", "162403e189d1b8463952fa4f18a291241275c354", "0dfa460a35f7cab4705726b6367557b9f7842c65", "7142ac9e4d5498037aeb0f459f278fd28dae8048", "5f758a29dae102511576c0a5c6beda264060a401", "bdd203bcd3c41c336c5635fb026a78279d75b4be", "098fa9b4c3f7fb41c7a178d36f5dbb50a3ffa377", "ebf204e0a3e137b6c24e271b0d55fa49a6c52b41", "ba816806adad2030e1939450226c8647105e101c", "a5a44a32a91474f00a3cda671a802e87c899fbb4", "6c1227659878e867a01888eef472dd96b679adb6", "9e5690cdb4dfa30d98dff653be459e1c270cde7f", "60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09", "01c9dc5c677aaa980f92c4680229db482d5860db", "2c3430e0cbe6c8d7be3316a88a5c13a50e90021d", "2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a", "695426275dee2ec56bc0c0afe1c5b4227a350840", "5dd473a4a9c6337b083edf38b6ddf5a6aece8908", "04b851f25d6d49e61a528606953e11cfac7df2b2", "fe41550ed350df4cd731a5df3dca5b0ea13511db", "2227f978f084ebb18cb594c0cfaf124b0df6bf95", "c36f3cabeddce0263c944e9fe4afd510b5bae816", "69eb86b4501a516b983b6269b7f154e4a8a4d588", "c97774191be232678a45d343a25fcc0c96c065e7", "33402ee078a61c7d019b1543bb11cc127c2462d2", "359e8703fd6ca8172a645c5b5a45b1d2b30b1d14", "3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3", "19841b721bfe31899e238982a22257287b9be66a", "6d5125c9407c7762620eeea7570af1a8ee7d76f3", "0a82860d11fcbf12628724333f1e7ada8f3cd255", "2e5b160892b70a1e846aa9dcdf132b8011937ec6", "e2888084b375163f7c956adff102fdbc9fe7fb40", "6e297f10a02580dfc74595ff8d7db34020002ec4", "c3f76a9ebe53825e14f851120cca4e1fc29807de", "c88e1ed0bd67a25c5c81d70de62e53e99876dd93", "d833c48334e906537f21757b6f9fa44da66f6c76", "5c7adde982efb24c3786fa2d1f65f40a64e2afbf", "edf60d081ffdfa80243217a50a411ab5407c961d", "4f298d6d0c8870acdbf94fe473ebf6814681bd1f", "2063222c5ce0dd233fa3056ddc245fca26bd5cf2", "c37a971f7a57f7345fdc479fa329d9b425ee02be", "b5160e95192340c848370f5092602cad8a4050cd", "398e0771e64cab6ca5d21754e32dce63f9e3c223", "17a995680482183f3463d2e01dd4c113ebb31608", "70af9756f10bf6128a47fef4509df7e8bb9a290e", "e0dedb6fc4d370f4399bf7d67e234dc44deb4333", "edd7504be47ebc28b0d608502ca78c0aea6a65a2", "050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371", "8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8", "a9f652787e5669168c7b8f632c3a343dfbaa6f4b", "7e467e686f9468b826133275484e0a1ec0f5bde6", "88493c1952000cde64c3f2294d36fc76fd23bf3f", "e624c73e3057a1de75e9d6d7e813771154ff1375", "a3f78cc944ac189632f25925ba807a0e0678c4d5", "2e68190ebda2db8fb690e378fa213319ca915cf8", "a2359c0f81a7eb032cff1fe45e3b80007facaa2a", "9d4c467adc09fb50c5e799fc124f3e82da8c3c22", "29f0a868644462aa7ebc21f4510d4209932a1b8c", "ae5f32e489c4d52e7311b66060c7381d932f4193", "eb100638ed73b82e1cce8475bb8e180cb22a09a2", "e0ab926cd48a47a8c7b16e27583421141f71f6df", "69a41c98f6b71764913145dbc2bb4643c9bc4b0a", "b613ea6c4fb5efdf17af090d64e9bdce41e28711", "aef58a54d458ab76f62c9b6de61af4f475e0f616", "0e36ada8cb9c91f07c9dcaf196d036564e117536", "fecccc79548001ecbd6cafd3067bcf14de80b11a", "b4d7ca26deb83cec1922a6964c1193e8dd7270e7", "34ec83c8ff214128e7a4a4763059eebac59268a6", "4205cb47ba4d3c0f21840633bcd49349d1dc02c1", "d794ffece3533567d838f1bd7f442afee13148fd", "83d41f6548bb76241737dcd3fed9e182ee901ff9", "a1af7ec84472afba0451b431dfdb59be323e35b7", "ebde9b9c714ed326157f41add8c781f826c1d864", "60462b981fda63c5f9d780528a37c46884fe0b54", "5141cf2e59fb2ec9bb489b9c1832447d3cd93110", "b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3", "f3dc67bb4cd3601ae9bdb7df4ed5036f525ff21d", "bb06ef67a49849c169781657be0bb717587990e0", "60d4cef56efd2f5452362d4d9ac1ae05afa970d1", "0abf67e7bd470d9eb656ea2508beae13ca173198", "9f6ca02ade848368a5e762cc3cf55a881c082faa", "03e1480f1de2ffbd85655d68aae63a01685c5862", "2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2", "3bd50e33220af76ffc32a7e57688e248843b7f25", "a301ddc419cbd900b301a95b1d9e4bb770afc6a3", "709d7f6b86c01fe90f10ae9216a91f95b1dcd2fb", "6c5fbf156ef9fc782be0089309074cc52617b868", "c843f591658ca9dbb77944a89372a92006defe68", "645de797f936cb19c1b8dba3b862543645510544", "085b5f9fd49432edab29e2c64f2a427fbce97f67", "3c09fb7fe1886072670e0c4dd632d052102a3733", "081d6ac51bbb7df142e3db6649fb5d663e90d569", "0055c7f32fa6d4b1ad586d5211a7afb030ca08cc", "04b4c779b43b830220bf938223f685d1057368e9", "8dd9eafab9498d495f0f6bf487d6a9c3aa7f3c57", "e00241f00fb31c660df6c6f129ca38370e6eadb3", "7fd6bb30ad5d7eb3078efbb85f94d2d60e701115", "6fdf9b9a80a2fa9c04a3966587c29d1dde978097", "279459cbbc5c6db4802e9c737cc72a612d76f7fc", "07c83f544d0604e6bab5d741b0bf9a3621d133da", "90c4a6c6f790dbcef9a29c9a755458be09e319b6", "b68f55bab12ca50b033d8b5c773ce5fe88c5923d", "9c686b318cb7774b6da5e2c712743a5a6cafa423", "780557daaa39a445b24c41f637d5fc9b216a0621", "e39f9565903a9701657ce3ade94c37d8a12f702e", "870eba6ab6eba89682be11100b744fd4864e437c", "b03446a2de01126e6a06eb5d526df277fa36099f", "4e444db884b5272f3a41e4b68dc0d453d4ec1f4c", "6f84e61f33564e5188136474f9570b1652a0606f", "20a0b23741824a17c577376fdd0cf40101af5880", "611961abc4dfc02b67edd8124abb08c449f5280a", "beabb0d9d30871d517c5d915cf852f7f5293f52f", "3af130e2fd41143d5fc49503830bbd7bafd01f8b", "07fa153b8e6196ee6ef6efd8b743de8485a07453", "7e27d946d23229220bcb6672aacab88e09516d39", "fab04dfcb35a29a46504d2ad3acbc642c602c7e8", "f0398ee5291b153b716411c146a17d4af9cb0edc", "17027a05c1414c9a06a1c5046899abf382a1142d", "32799cee51933ac4e1999358bad64817985826d7", "aa581b481d400982a7e2a88830a33ec42ad0414f", "7ad77b6e727795a12fdacd1f328f4f904471233f", "44c9b5c55ca27a4313daf3760a3f24a440ce17ad", "e34cb2d2f9f6ccf63de5a838ce5218e0b817469b", "1ebdfceebad642299e573a8995bc5ed1fad173e3", "beb2f1a6f3f781443580ffec9161d9ce6852bf48", "94498fae459167841e8b2f4b911493fc3c7da22f", "eee2d2ac461f46734c8e674ae14ed87bbc8d45c6", "b9f2a755940353549e55690437eb7e13ea226bbf", "f41d7f891a1fc4569fe2df66e67f277a1adef229", "14070478b8f0d84e5597c3e67c30af91b5c3a917", "465d5bb11912005f0a4f0569c6524981df18a7de", "9888edfb6276887eb56a6da7fe561e508e72a517", "2f1485994ef2c09a7bb2874eb8252be8fe710db1", "57c59011614c43f51a509e10717e47505c776389", "94ac3008bf6be6be6b0f5140a0bea738d4c75579", "025720574ef67672c44ba9e7065a83a5d6075c36", "5f6ab4543cc38f23d0339e3037a952df7bcf696b", "09903df21a38e069273b80e94c8c29324963a832", "89bc311df99ad0127383a9149d1684dfd8a5aa34", "372a8bf0ef757c08551d41e40cb7a485527b6cd7", "ec6a2093059fd6eada9944212f64a659881abb95", "18b9dc55e5221e704f90eea85a81b41dab51f7da", "2e927d0a2dc4b69fc03124ad876329b22a61f1b0", "06526c52a999fdb0a9fd76e84f9795a69480cecf", "009a18d04a5e3ec23f8ffcfc940402fd8ec9488f", "6044b30751c19b3231782fb0475c9ca438940690", "5f7094ba898a248e1e6b37e3d9fb795e59131cdc", "7f511a6a2b38a26f077a5aec4baf5dffc981d881", "b3200539538eca54a85223bf0ec4f3ed132d0493", "ebb9d53668205c5797045ba130df18842e3eadef", "4097fef623185557bb1842501cfdc97f812fc66d", "d93baa5ecf3e1196b34494a79df0a1933fd2b4ec", "d691440030394c2e00a2ab47aba4f8b5fca5f25a", "9abd35b37a49ee1295e8197aac59bde802a934f3", "b9dc8cc479cacda1f23b91df00eb03f88cc0c260", "7c2c9b083817f7a779d819afee383599d2e97ed8", "95f1790da3d0a4a5310a050512ce355b3c5aac86", "34c062e2b8a3f6421b9f4ff22f115a36d4aba823", "4bbbee93519a4254736167b31be69ee1e537f942", "a77e9f0bd205a7733431a6d1028f09f57f9f73b0", "22dada4a7ba85625824489375184ba1c3f7f0c8f", "cec70cf159b51a18b39c80fac1ad34f65f3691ef", "b08203fca1af7b95fda8aa3d29dcacd182375385", "4e6c9be0b646d60390fe3f72ce5aeb0136222a10", "c02847a04a99a5a6e784ab580907278ee3c12653", "a546fd229f99d7fe3cf634234e04bae920a2ec33", "ccf119021cf246fd75d37863646ccb85accee6a8", "4abd49538d04ea5c7e6d31701b57ea17bc349412", "a3f684930c5c45fcb56a2b407d26b63879120cbf", "d63bd06340dd35590a22222509e455c49165ee13", "4c822785c29ceaf67a0de9c699716c94fefbd37d", "c61eaf172820fcafaabf39005bd4536f0c45f995", "d56c5f0a23ecef2eeaad1b882829d709fa172632", "70109c670471db2e0ede3842cbb58ba6be804561", "22f94c43dd8b203f073f782d91e701108909690b", "b7c6df1ae0e8348feecd65e9ad574d1e04d212a5", "b3b532e8ea6304446b1623e83b0b9a96968f926c", "7577a1ddf9195513a5c976887ad806d1386bb1e9", "6e46d8aa63db3285417c8ebb65340b5045ca106f", "11f17191bf74c80ad0b16b9f404df6d03f7c8814", "e8d1b134d48eb0928bc999923a4e092537e106f6", "170a5f5da9ac9187f1c88f21a88d35db38b4111a", "43c3b6a564b284382fdf8ae33f974f4e7a89600e", "2a41388040141ef6b016c100ef833a2a73ab8b42", "283181a2173b485726664edc6fe73f0465387629", "06262d6beeccf2784e4e36a995d5ee2ff73c8d11", "997b9ffe2f752ba84a66730cfd320d040e7ba2e2", "ce3f3088d0c0bf236638014a299a28e492069753", "cee66bd89d1e25355e78573220adcd017a2d97d8", "8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b", "f6fc112ff7e4746b040c13f28700a9c47992045e", "c1fc70e0952f6a7587b84bf3366d2e57fc572fd7", "de048065ea2c5b3e306e2c963533df055e7dfcaa", "eb566490cd1aa9338831de8161c6659984e923fd", "abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72", "b2e5df82c55295912194ec73f0dca346f7c113f6", "107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53", "781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed", "ce11b2d7905d2955c4282db5b68482edb846f29f", "5a07945293c6b032e465d64f2ec076b82e113fa6", "6b17b219bd1a718b5cd63427032d93c603fcf24f", "2525f336af31178b836e27f8c60056e18f1455d2", "c829be73584966e3162f7ccae72d9284a2ebf358", "4e23bada7a8a13f698e8fe2df870bf8677efe2e6", "2717998d89d34f45a1cca8b663b26d8bf10608a9", "c074dcc5000320ebf13e7a974befced1ab70a08f", "ddbd24a73ba3d74028596f393bb07a6b87a469c0", "1fe1bd6b760e3059fff73d53a57ce3a6079adea1", "ad4d1ecf5c5473c050e11f6876ce148de1c8920a", "7306d42ca158d40436cc5167e651d7ebfa6b89c1", "0bce54bfbd8119c73eb431559fc6ffbba741e6aa", "ec576efd18203bcb8273539fa277839ec92232a1", "7552a6dbee4a915b578453ed9f35a4c6cc114aa1", "a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f", "861a51e66553979535df2b41971150453ab26372", "3266fcd1886e8ad883714e38203e66c0c6487f7b", "de3285da34df0262a4548574c2383c51387a24bf", "af13aae576f4d9ecc3de73a9ef8ff4396d057b8c", "3cc2d6ace4cf0bc3a6c4df5ca8da892275ca201f", "ddd9d7cb809589b701fba9f326d7cf998a63b14f", "749d605dd12a4af58de1fae6f5ef5e65eb06540e", "0fc254272db096a9305c760164520ad9914f4c9e", "3cbd3124b1b4f95fcdf53abd358d7ceec7861dda", "1e5c6c9fa9ba089931cfb2bc81e4368a4db5dd2d", "1ed617d14dbc53b20287d3405b14c68d8dad3965", "5c8ae37d532c7bb8d7f00dfde84df4ba63f46297", "211c42a567e02987a6f89b89527de3bf4d2e9f90", "e47e8fa44decf9adbcdb02f8a64b802fe33b29ef", "96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9", "69adf2f122ff18848ff85e8de3ee3b2bc495838e", "0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457", "7a131fafa7058fb75fdca32d0529bc7cb50429bd", "9f65319b8a33c8ec11da2f034731d928bf92e29d", "352a620f0b96a7e76b9195a7038d5eec257fd994", "571b83f7fc01163383e6ca6a9791aea79cafa7dd", "fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4", "e1256ff535bf4c024dd62faeb2418d48674ddfa2", "0b8b8776684009e537b9e2c0d87dbd56708ddcb4", "212608e00fc1e8912ff845ee7a4a67f88ba938fc", "7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae", "fed8cc533037d7d925df572a440fd89f34d9c1fd", "b73795963dc623a634d218d29e4a5b74dfbc79f1", "102280e80470ace006e14d6ec9adda082603dea1", "bd379f8e08f88729a9214260e05967f4ca66cd65", "a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d", "f74917fc0e55f4f5682909dcf6929abd19d33e2e", "cf4e5206722ba16061982b885f8c7c86beacd27c", "8509abbde2f4b42dc26a45cafddcccb2d370712f", "3dc522a6576c3475e4a166377cbbf4ba389c041f", "cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c", "d8526863f35b29cbf8ac2ae756eaae0d2930ffb1", "44f48a4b1ef94a9104d063e53bf88a69ff0f55f3", "fb3ff56ab12bd250caf8254eca30cd97984a949a", "76cd5e43df44e389483f23cb578a9015d1483d70", "9e182e0cd9d70f876f1be7652c69373bcdf37fb4", "98b2f21db344b8b9f7747feaf86f92558595990c", "459e840ec58ef5ffcee60f49a94424eb503e8982", "eb526174fa071345ff7b1fad1fad240cd943a6d7", "4cdb6144d56098b819076a8572a664a2c2d27f72", "8e0ab1b08964393e4f9f42ca037220fe98aad7ac", "040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d", "361eaef45fccfffd5b7df12fba902490a7d24a8d", "45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73", "e3d76f1920c5bf4a60129516abb4a2d8683e48ae", "c808c784237f167c78a87cc5a9d48152579c27a4", "4b48e912a17c79ac95d6a60afed8238c9ab9e553", "c71b0ed402437470f229b3fdabb88ad044c092ea", "c1482491f553726a8349337351692627a04d5dbe", "5121f42de7cb9e41f93646e087df82b573b23311", "2770b095613d4395045942dc60e6c560e882f887", "5a259f2f5337435f841d39dada832ab24e7b3325", "332548fd2e52b27e062bd6dcc1db0953ced6ed48", "5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9", "0a64f4fec592662316764283575d05913eb2135b", "c71217b2b111a51a31cf1107c71d250348d1ff68", "de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0", "0c65226edb466204189b5aec8f1033542e2c17aa", "a98316980b126f90514f33214dde51813693fe0d", "c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706", "6f5309d8cc76d3d300b72745887addd2a2480ba8", "c9efcd8e32dced6efa2bba64789df8d0a8e4996a", "43fe03ec1acb6ea9d05d2b22eeddb2631bd30437", "a322479a6851f57a3d74d017a9cb6d71395ed806", "d949fadc9b6c5c8b067fa42265ad30945f9caa99", "e13360cda1ebd6fa5c3f3386c0862f292e4dbee4", "6fed504da4e192fe4c2d452754d23d3db4a4e5e3", "3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f", "f1245d318eb3d775e101355f5f085a9bc4a0339b", "beab10d1bdb0c95b2f880a81a747f6dd17caa9c2", "3b4da93fbdf7ae520fa00d39ffa694e850b85162", "3a27d164e931c422d16481916a2fa6401b74bcef", "ad2cb5c255e555d9767d526721a4c7053fa2ac58", "406c5aeca71011fd8f8bd233744a81b53ccf635a", "f02f0f6fcd56a9b1407045de6634df15c60a85cd", "17423fe480b109e1d924314c1dddb11b084e8a42", "9329523dc0bd4e2896d5f63cf2440f21b7a16f16", "b4ee64022cc3ccd14c7f9d4935c59b16456067d3", "44e6ce12b857aeade03a6e5d1b7fb81202c39489", "f553f8022b1417bc7420523220924b04e3f27b8e", "4f10a7697fb2a2c626d1190db2afba83c4ffe856", "44b827df6c433ca49bcf44f9f3ebfdc0774ee952", "85860d38c66a5cf2e6ffd6475a3a2ba096ea2920", "cb2470aade8e5630dcad5e479ab220db94ecbf91", "5bf85a60cf7506b0c14d484a2a50f553ae9a45a9", "dd8084b2878ca95d8f14bae73e1072922f0cc5da", "e7b2b0538731adaacb2255235e0a07d5ccf09189", "3ac09c2589178dac0b6a2ea2edf04b7629672d81", "266766818dbc5a4ca1161ae2bc14c9e269ddc490", "ba788365d70fa6c907b71a01d846532ba3110e31", "11ad162b3165b4353df8d7b4153fb26d6a310d11", "feea73095b1be0cbae1ad7af8ba2c4fb6f316d35", "6d91da37627c05150cb40cac323ca12a91965759", "3827f1cab643a57e3cd22fbffbf19dd5e8a298a8", "2296d79753118cfcd0fecefece301557f4cb66e2", "cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce", "12ba7c6f559a69fbfaacf61bfb2f8431505b09a0", "511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7", "9a42c519f0aaa68debbe9df00b090ca446d25bc4", "3e3227c8e9f44593d2499f4d1302575c77977b2e", "987a649cb33302c41412419f8eeb77048aa5513e", "c7c8d150ece08b12e3abdb6224000c07a6ce7d47", "4209783b0cab1f22341f0600eed4512155b1dee6", "4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e", "7fa4e972da46735971aad52413d17c4014c49e6e", "f61d5f2a082c65d5330f21b6f36312cc4fab8a3b", "8199803f476c12c7f6c0124d55d156b5d91314b6", "dec0c26855da90876c405e9fd42830c3051c2f5f", "768f6a14a7903099729872e0db231ea814eb05e9", "01dfd60c0851c4e5a99176e99aa369e1b5f606b7", "65984ea40c3b17bb8965c215b61972cd660f61a7", "ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a", "d35534f3f59631951011539da2fe83f2844ca245", "b446bcd7fb78adfe346cf7a01a38e4f43760f363", "2e0d56794379c436b2d1be63e71a215dd67eb2ca", "831b4d8b0c0173b0bac0e328e844a0fbafae6639", "38f1fac3ed0fd054e009515e7bbc72cdd4cf801a", "0ed91520390ebdee13a0ac13d028f65d959bdc10", "436d80cc1b52365ed7b2477c0b385b6fbbb51d3b", "23dd8d17ce09c22d367e4d62c1ccf507bcbc64da", "6ca6ade6c9acb833790b1b4e7ee8842a04c607f7", "298cbc3dfbbb3a20af4eed97906650a4ea1c29e0", "6e7302a08e04e2120c50440f280fb77dcd5aeb35", "540831094fd9b80469c8dacb9320b7e342b50e03", "986224bad9684c359db7fac2192b7134b855fbe3", "893292315f5ecc73e84c5585900c53072de38550", "deda217e685a07f5fe6fec5f61bf12cf033e91f9", "5b8237ae83bc457e3b29e7209126f61120fba082", "2679e4f84c5e773cae31cef158eb358af475e22f", "3e421fd1775413bf89abd8e39a35e5e29d1a4dab", "e0e511a5d58a8d090ad169be4fcfdbeaef097a70", "f1ff567d7400ed7663d662c0b1e0ec6a9b900e70", "a7252d74d69a8dee603080eab8371cbca67cc9b6", "1c15e172032404df48ef685d6b1a5536843a7bd4", "0abd596e15ed9a814037089bc53e124e262f7464", "520796fed11df39bba7ea03844f4f465a6bf0655", "c8e3941b7638fa2533693998ccc1ee8765e8e1d7", "537061f3601965b5aab9f402763d9dcf451e1cef", "88909ec19d2c6750f836e8b9c15ee3e1236b37e7", "c904fb8be3e9948ccbf4f3c2549f0390a1f4903d", "890f9b0e5547c133b9294bd4045eb9b4e1b60880", "dbcab35c43c78411da8ceba4bdebe69f79308568", "4dff129a6f988d78c457ece463b774c3d81ac5c7", "44736c0c7cfced2c0f06c5ae8dd0111d9ea0dc20", "981449cdd5b820268c0876477419cba50d5d1316", "2d8001ffee6584b3f4d951d230dc00a06e8219f8", "9797de286a3101fc31fb51995c18ec7d3eab804d", "4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f", "bd236913cfe07896e171ece9bda62c18b8c8197e", "27da432cf2b9129dce256e5bf7f2f18953eef5a5", "aafeb3d76155ec28e8ab6b4d063105d5e04e471d", "acee2201f8a15990551804dd382b86973eb7c0a8", "032825000c03b8ab4c207e1af4daeb1f225eb025", "aa799d721f75510be236ac73f26ec92b0a89ab10", "581e920ddb6ecfc2a313a3aa6fed3d933b917ab0", "24d45df91ebcfac7a49cdfb7116e971e12880612", "7788fa76f1488b1597ee2bebc462f628e659f61e", "e065a2cb4534492ccf46d0afc81b9ad8b420c5ec", "11824658170994e4d4655e8f688bace16a0d3e48", "beb49072f5ba79ed24750108c593e8982715498e", "cd023d2d067365c83d8e27431e83e7e66082f718", "35ccc836df60cd99c731412fe44156c7fd057b99", "4203f10b41e7931a63598989aa14478c04b725c9", "a7663528eb6c9b79a68b94800e30da952c0b6bb2", "19d4b3679294563247c126148912d44cbf03e40e", "5fa04523ff13a82b8b6612250a39e1edb5066521", "f9fb7979af4233c2dd14813da94ec7c38ce9232a", "7c825562b3ff4683ed049a372cb6807abb09af2a", "22264e60f1dfbc7d0b52549d1de560993dd96e46", "69de532d93ad8099f4d4902c4cad28db958adfea", "ff01bc3f49130d436fca24b987b7e3beedfa404d", "cf805d478aeb53520c0ab4fcdc9307d093c21e52", "3399f8f0dff8fcf001b711174d29c9d4fde89379", "fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a", "3fb4bf38d34f7f7e5b3df36de2413d34da3e174a", "fb4c3b2f893baa1fbf8d16da2e09aa9868c61a7a", "cde8186c38c04dacac2e1fac1c3c68cf46516b9f", "3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c", "1e9f1bbb751fe538dde9f612f60eb946747defaa", "d69271c7b77bc3a06882884c21aa1b609b3f76cc", "6be0ab66c31023762e26d309a4a9d0096f72a7f0", "657e702326a1cbc561e059476e9be4d417c37795", "ad75879082132a73fe173a890a0f414f2c279739", "0aa0e5f96d512fcd2357129ad4363d6ae961327e", "d5d7e89e6210fcbaa52dc277c1e307632cd91dab", "26c89f890da91119ffa16d5a23fba963257ef3fc", "b3b467961ba66264bb73ffe00b1830d7874ae8ce", "7f5b379b12505d60f9303aab1fea48515d36d098", "5d9f468a2841ea2f27bbe3ef2c6fe531d444be68", "5c3fd194ba96c5eea41c0772ad0b2292dedcd197", "fdcca639b96aa982ae67544f58c7e60be9d0748e", "793e7f1ba18848908da30cbad14323b0389fd2a8", "25ff865460c2b5481fa4161749d5da8501010aa0", "c6ea6fee4823b511eecf41f6c2574a0728055baf", "015df3b57e44b8ddc51c87e5255fa4940bd91963", "101d1cff1aa5590a1f79bc485cbfec094a995f74", "0d760e7d762fa449737ad51431f3ff938d6803fe", "3f5b20c35f55417823f0201862d85af1f31e9348", "aab3561acbd19f7397cbae39dd34b3be33220309", "8f71c97206a03c366ddefaa6812f865ac6df87e9", "9ac43a98fe6fde668afb4fcc115e4ee353a6732d", "834ff8e06ed3f01c10958a276f1526fce7ffd387", "d5795049ff374404231e4d0aaa7725c2afcc73c3", "1b21acf8daaf86c4f2228fa3f5e9aa38ab8ad30d", "64b90c3220c43f58cb38da9af1a1b77da3dde63e", "3b4ec8af470948a72a6ed37a9fd226719a874ebc", "ac32cdb3e0a996c75d5df2973c1a2444a81c0a5e", "068a7c7849cb6480def2e124ac5a45564e094b2a", "2594bf77a1fef68d86be74a2cb79c55499cb2bec", "838ebcd2e57c319066d021eb51b5f0f18546d430", "3788a901df4c47d173a992a9622537301765e9cd", "9199c2b8cdad80735651a6a8bb9115c83acae650", "84f047ff1665f7fdb7653047caf4d43c2c9576c2", "8552f6e3f73db564a2e625cceb1d1348d70b598c", "915c5f261f15de60c934d64b4b15fdfaea7b4a91", "926e02d8d850cb2d08d49b9191ffbe7fa018b5f0", "e989a827fa3b00941410364cd3507e4779bffb69", "61f56afc7713ebfacfe382d1824966cb9b967d34", "1977d67a8db78d483a25c9fc6ad2ffad8d80c41c", "140a587914d1e60f636c1a479c4673631ad78d04", "89198580f18cad7f62393732868a977ffae57917", "e3466e5ba37f33474eff37555ac274cf098621cc", "411278b73afedca6976f02a8d3a38cdec3337f87", "f31c9328b5b4678388c19a39064a8056313f7cf4", "6ee1f57cbf7daa37576efca7e7d24040a5c94ee2", "2eef20a11324686099ee6f9b1a7613444b0d2112", "4b6eb9117c1b7833c8c6b95ecad427f8f994f023", "956466b5c3036ada2e18f8f7c1b7bf0650779d08", "5025a2f2a2d53db48f9504be93358c3ec5261646", "3a2a4edc0d3573b2d408ccc573bee4aa219c1e88", "14d72dc9f78d65534c68c3ed57305f14bd4b5753", "16f1a35d0149482d6b2b67df58b21b68622e6b9c", "c1660197f94786364e624efed4ba03c0d8ab6161", "abef85ca959f0e32168eb599d7aa3c769746f672", "ebe8408052d9bf05dc2007d01559dda6129840eb", "ff601ac3be7d90d64b57b83ad59ede15cd38a929", "9d6a35541f388db3f4ccf4c457e8ba23eb463654", "6581c5b17db7006f4cc3575d04bfc6546854a785", "399a5f7500648462fd8cf1704dfaeaea9d560e7e", "9c2978428e35f27a851432dd44f1d2721e0e10e3", "b55914fe572bd2449234eb05c300b32ad94e97d8", "21123fde70a1b2614fb4485d9ce786d2cc85a1ea", "ca97068efdacfde4a6408793cdfdf5d40f825039", "5136f69da8a61447a300a50c67d80d84a31b1257", "d75640108db01f7e0706780e6356a0c82c7eaf29", "1adbcca753c7b4f22cf3d6bc3a9579573d4d5846", "1608572473465387e4ed7b95936ecf7fbd10513f", "3c8f916264e8d15ba1bc618c6adf395e86dd7b40", "c1d70ce31ad6a8df5e55f256f545fafe0e08b644", "589d06db45e2319b29fc96582ea6c8be369f57ed", "c5482c2080ed780094091b9535865c4c4e5a2984", "5bcff482bd9652420f8f6b0e6e58ab59a562046e", "f39b94e1ab8beeaf05f28c7bbc08664b7c37ed8c", "7b83674072904f783881585a45017c1969e2d692", "4cc675422395ed7dc7e4772280f7c57cac6fbaee", "2d6130f043e69849fc0443bb489c5d21f933eddd", "95be490aef44da67ca1cef76b16df14b6e40c421", "bc27434e376db89fe0e6ef2d2fabc100d2575ec6", "7276a3ffa0941524083ac0fa9f0129746bca65d7", "44c019a5e72707a1f4e06c224b530b05684fbb0a", "149d0f12530a874093a478551e7f04c6641099d8", "1bd2ac7fa16af540f2c7ef753a5ab06ce54d3d02", "058d50af5456665dccda2b41b17bdfead72bdec8", "a89c12bae45ec11742c50261d6e2505272369d41", "26d3f57dd09efff6315ae0064cdad4877f5297d7", "de46fbad976ec6f02e53c70c1f92b4beb4d84f35", "7d66daaaf6a52c3c0f26972c10e08423a23f842c", "72f6bc80258293bddcca325fe95ce8df36ae6f06", "a3d8b5622c4b9af1f753aade57e4774730787a00", "bc8cbd105f45cdc447b6c7cf2093ec6a24b92728", "95300bce6b4554be7963620e87024449531987c5", "35e15c8aa2a3f017462a64b5ef940baf5993480f", "d4b34e327b62b7f3fbddfc403e4642b17245a3b7", "475de283dad61a8a9ed231dce0d8d62a54f4d062", "8e50abb8460805b9e5cb7a73b306b6be3a86baf1", "0c59071ddd33849bd431165bc2d21bbe165a81e0", "4e9f46a80ffaffa2dabde4fb48d6ac72398ef829", "9501db000474dbd182579d311dfb1b1ab8fa871f", "b42a8f2703867639f7ce68229b0916015878c5eb", "c87f7ee391d6000aef2eadb49f03fc237f4d1170", "3e90a70d768415e28fbf0dd56e53f8933784c416", "93afaa8c5fb5e0913e1f2a2de99636ddd30f7718", "36973330ae638571484e1f68aaf455e3e6f18ae9", "97bf8dcc43bd515cb602b5d263aa53029ec5f45e", "f8ebba8188c9f6b1688ba7ba76f297215e6cc7c7", "d94b37958657aa703d8a3d02a66ee251b4c3f597", "fccd0406749aecf76741460de7499689ebf4c676", "8de7da01bf4c2643f36dae4a8d2fa04906daff02", "50cbe0eb6ff6fac20629e86056d8d2e7d991ce52", "8298aa4a508a9586e5d54777b897eae64165a071", "7af4ee644a0b4269a080fc491f443b32d6930bff", "31ab7bb7a1f84553f3a9bc975131f4ec95caaf61", "a14260cd8c607afc6a9bd0c4df2ee22162e6d8c0", "16701e3cbd43b52e32d567649a194245dcd31829", "8824638e8077f62283d292804006ce94c92764bf", "bd46c9a7b51e8d8a7726b6fbacec80aa6f2097d4", "33526226231cce669317ece44e6af262b8395dd9", "b85e71d4e68588211c877fff8cda267b3a6bb6c9", "3149e6206484104959bcd112abb4e448fc35e311", "08d55271589f989d90a7edce3345f78f2468a7e0", "edc67e3e95c0ae693c4df3193e5957dfd3e9e5b3", "d406ec45ab1d1453cc207fff265077101154d613", "20b7d94df4011cd27e420976216b419890e254ff", "33c31cba98bda51f923caff75022d13bf62a5d60", "ee8e39c8d3d2b004fe51b40cf7f54549c2f635f3", "a6a5ee9cf956a08510269a674db6a4ef3aece36f", "23e881c9b791fd17e248b1fb4fc980710dd005d7", "82430878b6ee7a427ac389b51407599909576084", "734516f9eec8ec70ef924806fbc6f8093e94a0e8", "25bfe2d9fc573c2250ec3d7b084a0d50db06004f", "9d5a6d12b39c6574fe284d779d2c23c2800bb283", "5cd2b28b9e88b2be0b8c9d9ddce8440656477d1c", "b37538f9364252eec4182bdbb80ef1e4614c3acd", "6bdef989bd80d72c620aa9ac4c62c021cf4bd9d6", "12919f98aecdd74c1e0db56cba13d107553e421b", "92166eb883b0505040c2d61c758985e5ec051f83", "c09388ebb3180226d66902ea233f9485b0159851", "e0027a30bef2d84d33eb3d0d62d9ca4806a53230", "6355f7fd956466e8e9f09b297e6cdd155d66740e", "ab1b95677e8ea0f9881520b34a5f8192d141ef0e", "98f13ab2845cfe8513a0c05427a8b90d9c0c1b69", "dcca36085752eec824d489ed556378159464a0c8", "a577eefb31ba63baa087f321537b0be2784ec013", "0e5640677feb2e1d78639b516f7977e80d9d394f", "092597b8e0f31be1671025cea1b9fd28a48e04bc", "bf4780c028ba42bceb222bde11450672138602f5", "ac36284cfa95e613f9e726cd017a5dca207fe633", "8120e2d8233f5335b09e673e63395a76ae0e6bae", "137aa2f891d474fce1e7a1d1e9b3aefe21e22b34", "6462ef39ca88f538405616239471a8ea17d76259", "6dd5dbb6735846b214be72983e323726ef77c7a9", "e6d46d923f201da644ae8d8bd04721dd9ac0e73d", "6f6ce988a13ac08071a0e3349f80b7c8adc7a49d", "45f4b0087fdcc17f122cc4f7a9aa19dd51b40669", "2f43b614607163abf41dfe5d17ef6749a1b61304", "ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17", "133f01aec1534604d184d56de866a4bd531dac87", "fd649233d62bf43d589818fbb41295e2d0669aeb", "5435d5f8b9f4def52ac84bee109320e64e58ab8f", "ff398e7b6584d9a692e70c2170b4eecaddd78357", "a14ae81609d09fed217aa12a4df9466553db4859", "0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a", "84574aa43a98ad8a29470977e7b091f5a5ec2366", "61a3c45c9f802f9d5fa8d94fee811e203bac6487", "d9358de0d80f4f4d89c91b2b16fd52279b4834e4", "634541661d976c4b82d590ef6d1f3457d2857b19", "1063be2ad265751fb958b396ee26167fa0e844d2", "2d294c58b2afb529b26c49d3c92293431f5f98d0", "6cce5ccc5d366996f5a32de17a403341db5fddc6", "d12bea587989fc78b47584470fd8f689b6ab81d2", "101569eeef2cecc576578bd6500f1c2dcc0274e2", "07a31bd7a0bd7118f8ac0bc735feef90e304fb08", "4cb31f16e94067ce5eaeb8eae00eb0b0d49d46b2", "481fb0a74528fa7706669a5cce6a212ac46eaea3", "120bcc9879d953de7b2ecfbcd301f72f3a96fb87", "c5ed62f2b57e03d0a2e0cf13772b216ffad30c19", "9b9a1f18749e969c8f246894e59c62ae86b079be", "7f44e5929b11ce2192c3ae81fbe602081a7ab5c4", "0394e684bd0a94fc2ff09d2baef8059c2652ffb0", "7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d", "e4b825bf9d5df47e01e8d7829371d05208fc272d", "d8288322f32ee4501cef5a9b667e5bb79ebd7018", "5b721f86f4a394f05350641e639a9d6cb2046c45", "84c0f814951b80c3b2e39caf3925b56a9b2e1733", "e5b301ee349ba8e96ea6c71782295c4f06be6c31", "353b6c1f431feac6edde12b2dde7e6e702455abd", "62e0380a86e92709fe2c64e6a71ed94d152c6643", "a8e75978a5335fd3deb04572bb6ca43dbfad4738", "77cea27494499dd162221d1476bf70a87391790a", "34a41ec648d082270697b9ee264f0baf4ffb5c8d", "0c6a18b0cee01038eb1f9373c369835b236373ae", "b749ca71c60904d7dad6fc8fa142bf81f6e56a62", "7918698ffa86cdd6123bc2f1f613be1ab38c0d2f", "87f285782d755eb85d8922840e67ed9602cfd6b9", "5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0", "83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6", "bbcb4920b312da201bf4d2359383fb4ee3b17ed9", "3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b", "0ee661a1b6bbfadb5a482ec643573de53a9adf5e", "176d9121e4e645344de4706dfb345ad456bfb84a", "d082f35534932dfa1b034499fc603f299645862d", "46976097c54e86032932d559c8eb82ffea4bb6bb", "c9832564d5dc601113b4d80e5a05ede6fee9f7dd", "dce15becf620afd938818ce5ebd793c798782b70", "04470861408d14cc860f24e73d93b3bb476492d0", "8e64d872a419f122f870026179ccbc5daa1645fd", "8813368c6c14552539137aba2b6f8c55f561b75f", "53de11d144cd2eda7cf1bb644ae27f8ef2489289", "584909d2220b52c0d037e8761d80cb22f516773f", "60a006bdfe5b8bf3243404fae8a5f4a9d58fa892", "05287cbad6093deffe9a0fdb9115605595dfeaf0", "b712f08f819b925ff7587b6c09a8855bc295d795", "841a5de1d71a0b51957d9be9d9bebed33fb5d9fa", "b1efefcc9a5d30be90776571a6cc0071f3679753", "33ac7fd3a622da23308f21b0c4986ae8a86ecd2b", "b747fcad32484dfbe29530a15776d0df5688a7db", "cd3b713722ccb1e2ae3b050837ca296b2a2dd82a", "33ad23377eaead8955ed1c2b087a5e536fecf44e", "59031a35b0727925f8c47c3b2194224323489d68", "5dc52c64991c655a12936867594326cf6352eb8e", "0059b3dfc7056f26de1eabaafd1ad542e34c2c2e", "7b21db9efc3403fa054739921e29aedcc81b1fb1", "98a660c15c821ea6d49a61c5061cd88e26c18c65", "d04d5692461d208dd5f079b98082eda887b62323", "cceec87bad847b9b87178bde8ce5cce6bf1a8e99", "4686bdcee01520ed6a769943f112b2471e436208", "0756efe121e37479157010e18723e0c8da02a34b", "0fc4d0c328036cc197a48f278f7c15cb12860f3a", "651cafb2620ab60a0e4f550c080231f20ae6d26e", "d78077a7aa8a302d4a6a09fb9737ab489ae169a6", "00ebc3fa871933265711558fa9486057937c416e", "a324d61c79fe2e240e080f0dab358aa72dd002b3", "205b34b6035aa7b23d89f1aed2850b1d3780de35", "a11ce3c9b78bf3f868b1467b620219ff651fe125", "502d30c5eac92c7db587d85d080343fbd9bc469e", "b8d4754813b88ef1a583da2fcd164398824d04db", "964a3196d44f0fefa7de3403849d22bbafa73886", "2d88e7922d9f046ace0234f9f96f570ee848a5b5", "60b3601d70f5cdcfef9934b24bcb3cc4dde663e7", "20da3ec27d221973c681ed8713f3e00ff10fef6b", "5161e38e4ea716dcfb554ccb88901b3d97778f64", "81831ed8e5b304e9d28d2d8524d952b12b4cbf55", "146bbf00298ee1caecde3d74e59a2b8773d2c0fc", "a532cfc69259254192aee3fc5be614d9197e7824", "cd436f05fb4aeeda5d1085f2fe0384526571a46e", "384f972c81c52fe36849600728865ea50a0c4670", "214ac8196d8061981bef271b37a279526aab5024", "c84991fe3bf0635e326a05e34b11ccaf74d233dc", "0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab", "aad7b12936e0ced60bc0be95e8670b60b5d5ce20", "28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b", "0aebe97a92f590bdf21cdadfddec8061c682cdb2", "ad5a1621190d18dd429930ab5125c849ce7e4506", "9a98dd6d6aaba05c9e46411ea263f74df908203d", "3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96", "f2d605985821597773bc6b956036bdbc5d307386", "ae8240095c9cca2c395f173fece2f46277b94929", "93d74b1315a09f568027b6d8b3068ef048d17889", "89e7d23e0c6a1d636f2da68aaef58efee36b718b", "9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca", "4d3c4c3fe8742821242368e87cd72da0bd7d3783", "395a91d49e9283e1bf2d61a75c3dc846b347ea74", "b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1", "ce032dae834f383125cdd852e7c1bc793d4c3ba3", "65f25a28629ecfe8bae42a33883a8b9ab3c7d047", "02a92b79391ddac0acef4f665b396f7f39ca2972", "14ba910c46d659871843b31d5be6cba59843a8b8", "2348f1fa2940b01ec90e023fac8cc96812189774", "f56c407f918cf89ffa2ec3c51c383d53510c10e1", "532f7ec8e0c8f7331417dd4a45dc2e8930874066", "bcc172a1051be261afacdd5313619881cbe0f676", "68f19f06f49aa98b676fc6e315b25e23a1efb1f0", "29e793271370c1f9f5ac03d7b1e70d1efa10577c", "bfa763e7cec812f855c712895fa48eae89a34a00", "942b89d8d17e89e58c82453de2bfcbbeb09adc81", "b7b8e7813fbc12849f2daba5cab604abd8cbaab6", "8f6263e4d3775757e804796e104631c7a2bb8679", "0fabb4a40f2e3a2502cd935e54e090a304006c1c", "c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290", "80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7", "499f1d647d938235e9186d968b7bb2ab20f2726d", "49fe4f387ac7e5852a78b327ec42cc7300c5f8e0", "14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6", "57246142814d7010d3592e3a39a1ed819dd01f3b", "656531036cee6b2c2c71954bb6540ef6b2e016d0", "0a0007cfd40ae9694c84f109aea11ec4f2b6cf39", "2910fcd11fafee3f9339387929221f4fc1160973", "224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db", "3a2fc58222870d8bed62442c00341e8c0a39ec87", "61084a25ebe736e8f6d7a6e53b2c20d9723c4608", "251e386a90f21db6d02806395b012b297cbf06ff", "cefaad8241bceb24827a71bf7c2556e458e57faa", "ffea4184a0b24807b5f4ed87f9a985c2a27027d9", "1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f", "e6c4715476216be00ea61fc276ff39fb4620d785", "93eb3963bc20e28af26c53ef3bce1e76b15e3209", "78436256ff8f2e448b28e854ebec5e8d8306cf21", "13907865a97afde053d7bb7134d58a7bbc12043c", "5278b7a6f1178bf5f90cd3388908925edff5ad46", "78fede85d6595e7a0939095821121f8bfae05da6", "75e5ba7621935b57b2be7bf4a10cad66a9c445b9", "3e18b439a6fff09a0e4c245eb1298531cc766a72", "741950ae2e503a614f257cdac653d1bb30cb8e79", "5c6de2d9f93b90034f07860ae485a2accf529285", "5d2a9519b5d421a58dbb02b0b836f04ab396525d", "12c4ba96eaa37586f07be0d82b2e99964048dcb5", "430c4d7ad76e51d83bbd7ec9d3f856043f054915", "02601d184d79742c7cd0c0ed80e846d95def052e", "6479b61ea89e9d474ffdefa71f068fbcde22cc44", "13f06b08f371ba8b5d31c3e288b4deb61335b462", "7e6b11674e989d6a86afda241a51f7fa3790b93e", "94d0c0ed5bb9c13c2c8231adfdd9d96cf837514a", "ab6428083fb7f0e7eec46e2c4965c69bf55e5f87", "d7e8672caecc7e4b17e8d9d3cbd673d402c7e7af", "1da19761c5b36e3169e7c7e9d2b2c519b5276691", "2ba534b20211586708c7f6300e8c2d6ff2245adb", "049b8821cc1545c3d80428277aaefccf800e7fa3", "64fb6c31033e38eaaa10c0f7c2b7995f8fa84de3", "765ec537eebe7ff30d0c298fe15b1c3475cbdb98", "e64fa00da02cc774559db5be88bc2862afbfd432", "69ee78388e0f40941496ab92efe3e0fa065ad22e", "641f9c87356c0829e690272b010848242058b8bc", "21fbcd455db2ed78d47d10fd16ed88b667632906", "4823dcfb0bdc1af20e4da85035b8fc2c71a6add1", "c6bceb0eb8aded28edbe2607ecbe2f5ee2b57bdc", "28bf3aee9eecc2f7a7b4ac71bfe89534d3fe5f19", "fbb3e9903fbcf0b8778712c29459255731342cbe", "68e4ed4daa2ae94c789443ed222601a4a47f9a45", "214959c01b73e2d6eb4a39607de6fdc062526047", "5ec7e6b9cf06ab90cca7bda8e7a4b54ecb6859ac", "d0e684f9614ab97a8f4ec47775124242ce493f26", "f10ddad356ac9376e8c982f96cead7f6bdee3251", "081093b0b3195e3f6bfa283b49fee26b606d4f67", "20928315086a49e0cdea0ec66f2e78e9c564f794", "2799d53ca80d67f104bef207a667fa12b4c59d62", "4953da81a1a93ab3a30152d4403c5e8fa79edc09", "1da28f17e4df1f45056d6b8e76c08252ee909333", "a0ff4d516c81189fe0719f03f5caebe31d70e0fb", "df9a016950ffaaa8526e7332f0a6568ad43d054f", "da7ab36f1d14bedf0a3280257342bd1dba9833e1", "1059729bcca57731c81d8a9c866ceb8ed3547d8d", "6e75fcf384b31ea2108a81d868fbb886f39cd188", "02a5216a20423bfeda6bc49636e7a6ebfcf8576d", "20a2890c92b7ad628aedd25db3384276f0fdaf90", "1f61c1ba961c6328923f4c6219c6889ccb538506", "51e9630e2d3f353d43834d06ef5b75fbccf0243a", "0b2a3e66aadf351fc3da369c15d7d8a94e0e4468", "dda95e28395324aa87027d9692423b3a6f42dd4a", "2557e2ed0a19cbe2d78e3d4daa5d39e62be5d009", "8a9db3cb799922a7b1642a5b5d7361142de5d656", "f0d6c3a5dc85b84863987edc5e62ea8e5549d412", "f87ae55502267f82e031a8101b0efa626f3e6c7a", "ccb83c86a3b27d1663292c34742ce1b5fda43a67", "260081528f19f6f7e8e5ae16a776b62ad8c2ed0d", "60161c712a491764b6f227d72e9d01e956caa873", "1e5c8fded283dd4c305a1e4c9c1fc8e0988f9c01", "29401a5ad840cfa2c525003815149d62399e91d1", "b848eec33ad46861b3148bce7a7e53c7f8dd0db7", "fdc6715efab69e057d53d782e930fd3e700cc7f9", "0f51f1586edb3437812c8ccc339ce62f98d2dcc3", "15686988df40f5ec8475471ca14a6406aec4c85b", "7c8e64f20b58ddd1fc0e9c972c3eb0fe35b40a6b", "14eeb1ed623c7c2be0414c75f696523d46ae45d5", "ab8af4cb5243544e38852bb670aafe5a2fd9b3ec", "4b70374555c32c6a1e0db43674a7183170083450", "bad7254ae08f8bf1305e70c7de28374f67f151fd", "79d6d077a543a7b789d42dd10c5e102bbe2cbf2c", "0f9bf5d8f9087fcba419379600b86ae9e9940013", "6a4f694b028b3d8392cbb185a34e49a657245265", "1b510618969a298225764eaee54ee700fefb2d23", "e23707a6978e08f7a2b83d82330b360759a9c1a0", "21c478661268d62f32060472b34e7b9f31b76b54", "20124806b1bbd5617b01fefe2ad06f1af5717c78", "9c2039d036c01e421176d33c1436633d03be4678", "78c823c2b3e6b198eb01dcc553f2e2642d23af15", "ee7a2bf42afba263925b125b3c7de3a2cb7698c0", "0c922f8be9f0368c1abd53b8d9554f06b73a56cf", "c21db705a33212768c63be11747d075371c7307f", "ee0f601226819c01fc6b6add2e551887473bbc23", "f8bebca34cc787dd2652deb182cf66d346d06094", "ab036048cf90296171ad2bb7265c5a5b7f3252f7", "38dcf70922020b81b641ba44ad2424ec50e5c8fd", "b691463de5e30e7efd18b9d02cbf83c805834fe7", "38998d58a0c1048ad4c08d0022066e22ba6d1201", "868d2b757c5bf7583562abc58f5eb756d3bdbf26", "54389f3952811d03992d0d4b9f7b1c40b37ca613", "f89e5b1f61b221c7b00db55b64239a28f8ba9fe0", "d913c573a5ad25098bbca39a0d86e3b65ed4b6a0", "55bc43bc2b34acf3ab0cf0a4ef901ef5b786baf1", "ce4df7862bbf7e70d0052470e4bced479bf83703", "bc445bebf386b769263eed043e982bc1e4755080", "9217dd87274bb6b5ddf9bb403a065822eaf9efdf", "6f24906ec9888fd2a2c158794f22fd428c833c6e", "0464b56c5beee717b074ed950abcc959372256a6", "771d7744a78de4a3a7cff09fe05ff7f7e4d15142", "20e783a2df0486cd1c8b6b59fc76220f5718b304", "4a2ba5d7b41ae1d8334c5b8bb1e76ce29e4367ee", "226ecde126cbcefcab78d551be1bbd814738397f", "1cc952244563c61596d58d8f6c339a235fa975f7", "dff612c198dc50a7bef5a9cd48da5da1f893fa72", "7c5e33e18ef4fe92907bd1b9d752bcd85a4ec90c", "310dbc59aa3309f2a3813728783d81a9f7f1c939", "1100ad221e6ab2e898beb0e49af107b34c02090b", "7e9914314af3b9998b22ec8906544fe7786ce9e8", "061fb1b627554f52ff8f3ebb531e326767d845ec", "23095c6fc92f41a86f93276d446cfc72c7ce7b23", "46d7f41189c5e262df9ad1165d5a40d2b685bb0f", "d1a760d034200c0a34aa1dbdaa0620756c2aa5e8", "f98a975642972ce24e42e6957f63be556c11dd31", "f9cf3bbca1598a0309c1395c5a46f17f774f4094", "d29279725abfae6bffb81e59296443f3d5f7a689", "3877e8c9ccb2d9186d619b7827f1184138d1a837", "ce073cb70eec80d87c9e07a4ec2d4162d91e23a6", "544519fa0794d41a04307973156016b6c679ffa5", "ce06015fc0eb2add064ef93c9b97ad063c03aef4", "65057b96e3c81302caa1d2cf779a3adc8fb5af74", "13f8c13cfbf2a504f02745bd44da4ac40fd8f8df", "02fbf86b975c3f45b04de8288d1565cce8b53f62", "00dfd58bbaff871603e4a8aa81e67915b0675aeb", "d97094aafbcec8e4cb63098b42a7553ffd2cb854", "1a382d4e436e3e4f3d735f6e34ba2bc61e30838e", "da7151c9e15598971921a2fac4900b2503982535", "4bc98553a4c324d6f501b6403c1956fad700e559", "5c669416ecb41245b2525afcf4fb962d8b871ac1", "08779b7be598b6f4e50dfeaba6255631d9476ff6", "14a3194bb454f1f2e3fc1452045ac18c69959368", "2f5aa539fb27962aa4ba5b264ee503e6921bf531", "6bb19408458dbae075be7f1612b969b565b4767a", "43b7f3d356ae89b3772f3e64d4456ff0f442d4d3", "07a6468d70dd62ce63a90b1b67651729f9c3037a", "a8a50323fee41ca89940a812a90e817fea8a23a1", "2889eba77ec480a175df2a2e8728d73d12b9b790", "1ece9e67cdc9141f3d27102d820ed9490ea57039", "293c4f086152ae9d635ab1eb84b72f44cb57d129", "2cd9a7eefc126469b566fc429657bb889d13b4fa", "05603a8742c8fe8545cade27b65bcf8de3e5f43c", "1f9102f425f28552e477cf71af0846550f3f9ed9", "8fd52d2ef3d2bc8929f988c8c00dedc26d7a3e7d", "084145b7b828d93a5bb5f5dd04f3ccd003dcd5c1", "5c81048593a6729b2d0b948a1129a97bdbf82f11", "11138173fa5e72a6bba314881d8d5dd74c1ac83f", "22493d8d4d7b4604cae23638dce4981b36e30147", "afd492a598476b5a9b13e2b6d28a76b0707c0a35", "ee36ec51c08844c63c3faad94869de618cd31cfc", "07db2bec3691535ad9d8525b4cd0f230ddc6b384", "a7817a19ee8123026491b6389795c040e6040763", "12fa3c73a7764cb65bb76fed0601fc5d79893bcd", "52df9da94f8b76ec130d5128222ef7fd76304438", "19dfea55ce8c6999415cb6216de0e5bd108a4f79", "1bb652545b316701faf582d673a98060ee426f37", "32bd968e6cf31e69ee5fca14d3eadeec7f4187c6", "4d4a8bb9c47350746fc41b183db422175fea8d55", "601bed4ec12d11e4230dc715bf871de580f3b917", "12b2ae1ebbaed2e664a028b3d845456061722a6a", "d6b2d4f1bc08dc3d3922fb43b1b8e3614349f539", "acc550d31b50c8d95794dc35dd1e271f979a0854", "66cd8e7338b20999786343651658520ca9544006", "b6fd905efd5da32bd32047896074a821477cb564", "89dbfb9b75d3902748d73bfb5965e7d11e83c10e", "1fe73457d92f6158847e5e8dd18f040ef7cb3987", "b7239d619c5ad3d80a170bb33ca427bb4278f4a1", "ecde1c993f58a4ebd34aa7033e49f3f964b05fc6", "f691d38efe4cb8ba6fbd812d54de4098ac992c3c", "d3dcdd5bd1592ff8555629068e046ce0741d6062", "7e2fbda3cd115024ad0125806db7dc28fcc22489", "6d08526a7a02ffa51ac50fab2eb9f794af38539b", "3a430e576b134df617ff0d148e2d0bc5472b95d2", "abc618b5c4f69a34c655bbb93c6003cc671b0f72", "fbd5678f6f29ab9217c09b54bede92887e38329d", "b5fabc72ecdebd832fb02f1ea2e85672f2ef125e", "c4baa3d2fe702d3e96c500274f7fd9e63f8b3d6d", "0f708ace6f4829e466a8a549bd23f6fcf719ab9d", "f06b30bf5874ad6168615b4443d011dd44e1ceda", "0437028464aae0237a292bb5317838b2bc6e1ce9", "1dfa8cce7b8dfd4b954d3fd90bef7bf569c87fb8", "b0cce83fedd64f46af401e04d15f50f294bc8259", "f827c2d22bf74ba6047e265680a48f7279ea24e4", "543119d346faf7c4272811a6338b3ee957148589", "dd829179231c2d7a51bdfaaed25afb7b82f9c9d2", "c132a6e869cd171e403784c172961471733dce31", "14f55f333c29871867b48e1a9084132542d88083", "2c49e626d297e6ee26671459a77776b97b5f2c88", "3d23fcaec13373c514965ebf50f16939bde19aac", "8faa4f2e287ff1bcaba2e0cd84d82a66bb2982f5", "5078c4494a8ba13f568b6cea454cca571c7735a3", "513674c8bcaf3cedfc934f4e15a07a882c6bbf07", "9fede7e3fac47a4206a643c4647834e5680f2a8f", "b58e71a3336193bed5785b2818a4fec85dd5f5ff", "fb04a8cb4b573d6b565a5b0c369d775e6bfb04f1", "74410df341f44f5c915d97725ce396a862d44a7b", "143e3ec5a5a11547da2d77a17d0ca7b1940280b5", "15cf11ddfc046b2ed2766c375e8ad067baaf8347", "8f02ec0be21461fbcedf51d864f944cfc42c875f", "3137870bf1314e25c2246d4a9d77d941aadd5398", "069e61934f8104b76b31c1f18d2e9a4f5ec50de4", "2ade545f25f5ba66295aeab3a89583e7cf6101b3", "326c5970064825a95d87f5a096201699d8ccf51c", "df6216785f64c2c191df4d07012cc1a9f339cc26", "c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709", "e35f4238bbc6c4acf4fce9591fa5cebf64fd0c2e", "9922a2ec8dfb307bb1fcb334098fd912e23b3bab", "ed5519a03f52e47047079da2e0c480eb8c4a9805", "eae83dac25f323f24b0f2f9df1ad6dc47456231e", "7ba6ac1b769ad7098037c07a5b7399fe9d97fcc8", "c900e0ad4c95948baaf0acd8449fde26f9b4952a", "c72e6992f44ce75a40f44be4365dc4f264735cfb", "30fe65e38a5822184a463bf0d26a374b5e08d2b9", "faef5bedb0b1e92730febce4e6af33b803bd463a", "e31f24b92a19aeb9a7611a9ca09223c8f5238ae1", "f147057dfe4bbb4f9499de432cb2393547f2f339", "2594a77a3f0dd5073f79ba620e2f287804cec630", "66b9e9d488ef2bad9bf0d2fb98f73f38fec2bff8", "aaee760cd3e5669dd597f0daf8c50b4da995e7e5", "754626bd5fb06fee5e10962fdfeddd495513e84b", "4317856a1458baa427dc00e8ea505d2fc5f118ab", "1e3a9b0cfdeca614c5689a3419016c89bf9fbdfa", "555dba0fcb3c7bafb881a9a17a795c16475ce0da", "d082f64b8a0a07d105207eb822be58ffb61b353a", "82d5656c74362d6c5c5fd889fc48f7816bbb033a", "382e9f1ed5e3e0e7dd5b1f27cdd14e4202044f9b", "9b07084c074ba3710fee59ed749c001ae70aa408", "f5f5db54551556418c45ef009cec4647e7d0bc58", "603bfd8e1230816526e213855c5de172443f9ee1", "9e297343da13cf9ba0ad8b5b75c07723136f4885", "beeeade98988e55afe81faaedf06dc00848ec751", "710011644006c18291ad512456b7580095d628a2", "cb7bbede1c2eae831dd73440f439955c4310837f", "5991f26b871c8fd8f675c11e44c445e3cebfbe7d", "7bce03583d85b307d5b84872e2ff147661a70158", "91a7816609f991c1ac45b791c1cd3c6117194bb0", "f9034d80a0c318a8c564ce3aa9d8545d871b9663", "39ade96e6e680d58a8aaa8e2a72616b706707dce", "ee815f60dc4a090fa9fcfba0135f4707af21420d", "24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39", "2297ead8a0000dab33ebc73b7d5781b3258322b6", "88ad82e6f2264f75f7783232ba9185a2f931a5d1", "248e2d3079f4f59789770a7f57244a434e8467d0", "290a96c9aa653eb6dd64d5b0fa5bae7bf208ae14", "b36b1485cc07df374cf2b01e4797a98da887d641", "bd6158bed42b038863c8ace0c96700e87c1c0231", "270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0", "0b6314e9e741d19346d936eaaa7d6fcf46dd3ed7", "1dd3faf5488751c9de10977528ab96be24616138", "f616c433671302eff23923d38ea87223202818f6", "5c315aae464602115674716a7f976c4992fcb98e", "692aecba13add2b8c1d82db303f5b2ec743ceb44", "9ebe5d78163a91239f10c453d76082dfa329851d", "bc15e0ebe7ff84e090aa2d74d753d87906d497f7", "9fb93b7c2bae866608f26c4254e5bd69cc5031d6", "e4a1b46b5c639d433d21b34b788df8d81b518729", "61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa", "4e8b61165c8908284619acc62c46c7afac85d8a0", "05455f5e3c3989be4991cb74b73cdfd0d6522622", "486c9a0e5eb1e0bf107c31c2bf9689b25e18383b", "29c340c83b3bbef9c43b0c50b4d571d5ed037cbd", "2edf55ebc88e89c4caff0c49c6b8e79f46407d19", "42505464808dfb446f521fc6ff2cfeffd4d68ff1", "bd104ea0561755c6c0b963ef9e1126b21f44abb2", "04d08f834da05dec5494fc599d48ceccc992179e", "da59f4fa6dc73b2b8b041e7d4e0e7f121297658a", "40229a034d2fcddc3df32f906ec4ef6a3b3e017e", "751223e9636f4624551b37d8891f0e06eeb64a5d", "64f6c8c333bc043d41b83b6e62fbe3a521882ec3", "4562904c57a9184d9fdfab8a534a82484bba8e4e", "4a3d96b2a53114da4be3880f652a6eef3f3cc035", "19089ecd35606445c62ff4abaa26252f44dcda89", "17efdcdb40094119f1144252b3670356aac9ae4f", "8ee11bf054c0fccf2949357363987768c8893d34", "3b60520684aaf944935b39e20a8b31063b27a25a", "400ad65e42cc438eb7a587cff750648eca91d513", "09ba6b87736fa29aae88c5b4cf30f25188e4c6ef", "548d25088fd4acf71764f9c5d0c01901a13e6eed", "92799a9754a9481e3e90aa59dfe56e9b21b70cfc", "2ed9a69ee6509c0b3fe5a51d1116dccc877653ba", "ac86ccc16d555484a91741e4cb578b75599147b2", "dd320b262e7891c6e51b34e213aa0ccb14af3ecb", "7b9b7c42151c11c7cd170a528412a84b5ab626ca", "70755fffb5e42b10bc2aba2446ef2063af92da96", "fb3da9b47460eedf857e386a562cc5348d78d544", "bec26ea7335ed723a1c4360d6365f2dd846161e7", "3a415f3fc013bf3d045d9a45c7ed5d83996f4556", "179ae598004d76c56dcc95c5aab3419ec8996af1", "d7c279661dcb2c83ee63f149db3be7f0771237f5", "4da3b3888a74365fead3057f34d986c0a439fb84", "db1a9b8d8ce9a5696a96f8db4206b6f72707730e", "032707da24e5486d0bc319259ae8a8409a19a2f0", "cb124b7019f6c1d2b25d8a18a51d78cd12b511be", "24ddb628035b32fe0b412a11b09512b4612d8fe0", "a4bb791b135bdc721c8fcc5bdef612ca654d7377", "7a7b84b8d8c1edb07f16180ef2c243ef30d85e1d", "b4a09b6a7c78c3d54a0ce59ae3ebb6d4ebfd7d06", "22f44121a6de3ff942c5fbf4ab1d6734315baf66", "af9bca1ac3900182afae03f500618fc40da9ff87", "047f8d5d5134dd12c67038623417f05ab9885056", "da1ba46027b7236c937d276fb54e99906036c4ef", "03650399cbf53d916d10a507852c9e94a02ee13f", "6236962ce0d627fc23774f0680e77069b9667803", "b8556e7ace156cee0199c057c5bf6eacaae45e7c", "199d4e49f959a6d77da6054ddba7ea328d5acbb4", "8bfec7afcf5015017406fc04c43c1f43eb723631", "a760ce8baddf2da7946d2ed6f02ac3927f39a9da", "59b2edf39e0490892d8865b8252bd7f11e2b2228", "6372262685162f3f11ef7ac1882c327e98564875", "04b29b6f1210f4309f3d5ab9e6bd2c8a026ce244", "5865c3aae2125e1078d3d1b230e7206dcada16b3", "bcfd771ee51f2813e910b339d08d10057af1e294", "97a611717ddcb13a6fb5f19ebbc96dd005ed4887", "5c9e708d60db41727ebc2174bcd99fe838c21a29", "0c79485f64733bd128ef8c395034b6bc77abf94d", "639937b3a1b8bded3f7e9a40e85bd3770016cf3c", "0eb7a91ff9ce61ff22a7e410934402f0b70fd8a2", "74dd7f66c26219901e8937910996f0003fc718c6", "d0500d15ee605ae404d4a8d54dc38ed5b39f3832", "e224d8fd66e3594cf27bcd06cc2ed25fc4419b7f", "827f6ddae388c9ee727cb7d91fb276f774ee4cc9", "ac2e44622efbbab525d4301c83cb4d5d7f6f0e55", "33867c617e9f264c9e857d73358e0fd5b60a149a", "4115652b7fad7a474b5af1f4c063b1f9717b1bf8", "1328c0a8a357b303f6e853581360370ef2975612", "2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c", "02977d45f759bb913aa9fd32f415e826b788ffd5", "16e95a907b016951da7c9327927bb039534151da", "f3f65a8113d6a2dcbc690fd47dfee2dff0f41097", "2c7c3a74da960cc76c00965bd3e343958464da45", "646eb6e19d2d0c858d71d3d810f4642cfaa130c2", "fd4fab290541b621ed27773e191472b0dcb225cd", "630d2c5b60e28ff8710f415a8adb7a73f8162d9c", "34ed02e82e9816e7491b1af9f6f65d7fff87ff84", "3f9210830e31f42103c6550f75cb37fde18e5af1", "fd8168f1c50de85bac58a8d328df0a50248b16ae", "a1b6aed0b998f0e6e049fcc209287c8b2801d054", "6efa147457e1ce984f6418d219e14d94ef6cfbce", "2b8a61184b6423e3d5285803eb1908ff955db1a8", "bbe91ef3ec4303d77a3847aa18fe5d9ef2739566", "4c6cc339d1ef9cedcee85ad4bab0ca687ef39bda", "e934a2ea5d56284b41f4c9d791dd5f77541ae1ef", "c5b2d166f77f072dfbbbd538729bf7ac11f4094d", "88af2da57863b60ddd3776d61113b552e827d3b8", "0e677f2b798f5c1f7143ba983467321a7851565a", "b63246b95d5711486ddec96c214316f885a52b18", "587c47130af6b65c30861a18fbecab66aa0f9ba6", "2acf7e58f0a526b957be2099c10aab693f795973", "7697c8a0eea8b4f7e9b5c3378879cf34ba6d79b3", "e11247abf2c359428d414a97ea21e0744e2ef9ac", "037aeb661eb8018ae8dd92cc0b4029231dffc60b", "31bb49ba7df94b88add9e3c2db72a4a98927bb05", "44241248f16c172a1c2fb90e48fd728ba26220fc", "0f03f1bb36fc1b3ce49d777203961689456b311b", "16e8b0a1e8451d5f697b94c0c2b32a00abee1d52", "1dc0756f1a58d0ae8beff58d7ddf8fd5dc6d174e", "95601da73c7090ff23c101739426d4ad51ff4c90", "47aeb3b82f54b5ae8142b4bdda7b614433e69b9a", "ff8315c1a0587563510195356c9153729b533c5b", "033e517804afb183a32a5b916e79733968a468d7", "eb02daee558e483427ebcf5d1f142f6443a6de6b", "e054919d68b963bf484b5c9f5c73aa834018736b", "b88771387d5c0f09ea9a2ccc743b11471fb257b4", "449b87347fe7f9c3f17e969fab1617fbfd9ccb1b", "a6075b621cf11b967f1cdc64de854ff96dc04f66", "792928e5e539dfbba334c36bee337449c4918d6a", "ef57b2b689cb91e0c7f9f9a2691c340349d9b2d4", "ce3304119ba6391cb6bb25c4b3dff79164df9ac6", "e9a3399f3e8c28747db0fd418c5dfa1cef5c9243", "4104eccb9288a35cb3d00e29a9bf0d6282608d4d", "18aaeaba26d95482fc40d560c49f0a7f22ea0870", "099053f2cbfa06c0141371b9f34e26970e316426", "4e5be940b2b58247fd90552b4a2a72b841ed03e1", "ae2ee60219d63475c56fcb6c3f2b3664b3c4dbd9", "4189b1e61a4f244acb05b34c46628b7e78a8b8cb", "79b684396347d916c4a488f2024212ec9f4eff0b", "ba5460719b5f3ef11b93e1254647d8ad6ce55383", "ce094eb72aa42dea87ed9ab531ea31aa0a9561fb", "117c7cf24b9310ed785ef6fb84e95c73186f61e6", "22b1b97a0722621ae72fb3b33f76226b73549ea1", "75d7356d26995263dd035959709991e597c94b5f", "6d60c427036e63957f1ce72930146964c5743749", "4ed4143034fc6303737c7ad5118a72d9a5d12cf2", "6295f5a59c528953f282df71e4d0646ee2b4b50e", "da23d90bacf246b75ef752a2cbb138c4fcd789b7", "4014d74e8f5ea4d76c2c1add81d0c88d6e342478", "20cfb4136c1a984a330a2a9664fcdadc2228b0bc", "5f676d6eca4c72d1a3f3acf5a4081c29140650fb", "ad1679295a5e5ebe7ad05ea1502bce961ec68057", "55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c", "e351beaa000aa5875d00fef12eb14e1cb91530bf", "004ec53d1f12cc4c0a7c809bf3b7acaee2180fd9", "820b0a63599208bca29c6d2ceed4bfb38e8860d4", "4ef8462d844f92569b94d92d5a6f5c267a3a7304", "bf1ebcaad91c2c0ed35544159415b3ad388cc7a9", "325d5606c874bfc3b652d7924e2d7bedcca3d96e", "eed7920682789a9afd0de4efd726cd9a706940c8", "a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529", "4e6e5cb93e7e564bc426b5b27888d55101504c50", "47662d1a368daf70ba70ef2d59eb6209f98b675d", "4fac09969ee80d485876e3198c7177181c600a4a", "f76a04bdc43f1e440b274b299b07ce2e423431e6", "77eba8289e257df835e16ce8e0919acebd02f7e4", "740f744c93dfcc4788408c9054875872b518c11b", "483f85e1ebef9d10a951b3c01751892aca92a2c2", "45bedfcb562e48a64436ea3131bc91098eb93dab", "08f6ad0a3e75b715852f825d12b6f28883f5ca05", "a34582457b10b3627c853002c85afb434ac8e9ec", "54fc3551b3b08767d5d731092f10ba4573a2c822", "4688787d064e59023a304f7c9af950d192ddd33e", "fc798314994bf94d1cde8d615ba4d5e61b6268b6", "d781a60f7090222631c555b87da02ced570caabe", "1330847470ccad3d47a09c70c76de2913f414695", "9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73", "6ffdbac58e15e0ff084310b0a804520ad4bd013e", "527ed756eba3bc77eb58d22d4cfe27da04d3bbbb", "ccb54fc5f263a8bc2a8373839cb6855f528f10d3", "54f07b2e74f20f118cc9a37674fdcd5d74d985b2", "026b5b8062e5a8d86c541cfa976f8eee97b30ab8", "0d2a9f3357717e0a44eb82d5eabfc047cc4d46f1", "97865d31b5e771cf4162bc9eae7de6991ceb8bbf", "9f1319162974cb4d6125e8c6c52878ebc48eb8a7", "4d4be112c180d5a4484fe6e17e506ad6e1853f08", "7a7b386385ec5a458c6d45f58c399941c2f054d6", "a0067d23456c74d4bef5a8bef5bbe3c92e29c314", "63c7c0511e82172b6b60af21e56df68e2c6ab228", "f294278e03868257bfce132b8cf189359ada915a", "4b5dd0a1b866f928734bc36afd597adca20a7ec1", "9ff1a4754391a5cf91c998eeaf75b93a4f5f2451", "7b9a5d9d7386d47c51cb473f6338988bd6e9f2b1", "9151f8f0af49d8c94ba6f6ca13e9399beab7ecd7", "e64b683e32525643a9ddb6b6af8b0472ef5b6a37", "7831ab4f8c622d91974579c1ff749dadc170c73c", "8d2d27753d316494574c4e8ac51190921e0765bb", "3acfbc2aee9b2ed246a640930ebc2e350621f990", "28bd795c580ca24f40dc82cd01d9d277749d2661", "fbbb73b476639b89b5a0f400251fe33c4fec4f6d", "67af3aed0deb70eb0fcc089c47f15adfb8f637ee", "25d514d26ecbc147becf4117512523412e1f060b", "7f6a527a3dc2e526aa59a57cadb20ff727124973", "de0df8b2b4755da9f70cf1613d7b12040d0ce8ef", "344a5802999dddd0a6d1c4d511910af2eb922231", "34b4f264578fc674dd2bf8d478ec1314739a5629", "5431845cc36d8d0fbd489e6921a341fe2e654246", "eba31ad9871c6dd5c2e7c62a121bbb417dcb1223", "787c1bb6d1f2341c5909a0d6d7314bced96f4681", "4563b46d42079242f06567b3f2e2f7a80cb3befe", "6e173ad91b288418c290aa8891193873933423b3", "242ae7b1b1c3e1aafcbe9cef3cb23918c6f94f2c", "3ce37af3ac0ed2eba08267a3605730b2e0433da5", "1b4b3d0ce900996a6da8928e16370e21d15ed83e", "891dcefdcc89a6e5784393d5ec550ecd75ce1cda", "ff946df1cea6c107b2c336419c34ea69cc3ddbc4", "28e1982d20b6eff33989abbef3e9e74400dbf508", "75e4efae6de6d1ac787a6ca381fb49381fcb062b", "1f3ae376b22136a2fe2e96632d4383653a42e4d4", "45c31cde87258414f33412b3b12fc5bec7cb3ba9", "d8bf148899f09a0aad18a196ce729384a4464e2b", "396de136485d85242583951bee4e7b19234bc964", "a35f0515a98a4f37a14a4954240ba09a91017730", "d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3", "911505a4242da555c6828509d1b47ba7854abb7a", "2271d554787fdad561fafc6e9f742eea94d35518", "84a74ef8680b66e6dccbc69ae80321a52780a68e", "c459014131cbcd85f5bd5c0a89115b5cc1512be9", "a000e15656e84dd538f1f0b8f8639dd29f122c95", "f0b4f5104571020206b2d5e606c4d70f496983f9", "609d81ddf393164581b3e3bf11609a712ac47522", "9e99f818b37d44ec6aac345fb2c5356d83d511c7", "496f3d14cf466f054d395a3c71fa2cd6a3dda61d", "6584c3c877400e1689a11ef70133daa86a238602", "33b61be191e63b0c9974be708180275c9d5b3057", "4735fa28fa2a2af98f7b266efd300a00e60dddf7", "72a3bb0fb490355a926c5a689e12268bff9ff842", "5c02bd53c0a6eb361972e8a4df60cdb30c6e3930", "4e1258db62e4762fd8647b250fda9c3567f86eb8", "19359fb238888c0eb012a4ab5c6f0fa0e9be493b", "e379e73e11868abb1728c3acdc77e2c51673eb0d", "d4b4020e289c095ce2c2941685c6cd37667f5cc9", "c175ebe550761b18bac24d394d85bdfaf3b7718c", "0733ec1953f6c774eb3a723618e1268586b46359", "61dfebbb02dad16b56cd9e6c54b5da3ab41caf1c", "62dd66f9f4995cfdaafb479de50363ce0255b1bd", "5b719410e7829c98c074bc2947697fac3b505b64", "4f0bf2508ae801aee082b37f684085adf0d06d23", "060f67c8a0de8fee9c1732b63ab40627993f93d0", "b331ca23aed90394c05f06701f90afd550131fe3", "12bb0cb32e48269da2902c4c6d41ea2966ba8462", "288dbc40c027af002298b38954d648fddd4e2fd3", "36b40c75a3e53c633c4afb5a9309d10e12c292c7", "5491478ae2c58af21389ed3af21babd362511a8e", "e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b", "5bb6703bc01e4f7ab7e043964ec6579ac06a7c03", "99726ad232cef837f37914b63de70d8c5101f4e2", "55c46ae1154ed310610bdf5f6d9e7023d14c7eb4", "8a40b6c75dd6392ee0d3af73cdfc46f59337efa9", "0e49a23fafa4b2e2ac097292acf00298458932b4", "9c81d436b300494bc88d4de3ac3ec3cc9c43c161", "259ddd3c618feec51576baac7eaaf80ea924b791", "7e456e94f3080c761f858264428ee4c91cd187b2", "3a49507c46a2b8c6411809c81ac47b2b1d2282c3", "049186d674173ebb76496f9ecee55e17ed1ca41b", "824f20de33febe3f0b19276cb7cdd25cb1e789b7", "c48b68dc780c71ab0f0f530cd160aa564ed08ade", "7081958a390d3033f5f33e22bbfec7055ea8d601", "847e07387142c1bcc65035109ccce681ef88362c", "fadafdd7dedd2bdd775b4591a998c8b5254081e1", "05c91e8a29483ced50c5f2d869617b80f7dacdd9", "08e995c080a566fe59884a527b72e13844b6f176", "70e79d7b64f5540d309465620b0dab19d9520df1", "205af28b4fcd6b569d0241bb6b255edb325965a4", "ca0185529706df92745e656639179675c717d8d5", "0fd1715da386d454b3d6571cf6d06477479f54fc", "b313751548018e4ecd5ae2ce6b3b94fbd9cae33e", "9753ee59db115e1e84a7c045f2234a3f63f255b1", "4b9c47856f8314ecbe4d0efc65278c2ededb2738", "70d2f5e897086b8d3914f8fa1d9e479d71597e96", "b7eead8586ffe069edd190956bd338d82c69f880", "9df86395c11565afa8683f6f0a9ca005485c5589", "ae4e2c81c8a8354c93c4b21442c26773352935dd", "cc7c63473c5bef5ae09f26b2258691d9ffdd5f93", "6c6bb85a08b0bdc50cf8f98408d790ccdb418798", "19878141fbb3117d411599b1a74a44fc3daf296d", "8f8c0243816f16a21dea1c20b5c81bc223088594", "e475e857b2f5574eb626e7e01be47b416deff268", "81706277ed180a92d2eeb94ac0560f7dc591ee13", "4958c06da5581fd0b4904d3bf0ee09958ecdba5b", "789b8fff223b0db0fe3babf46ea98b1d5197f0c0", "951f21a5671a4cd14b1ef1728dfe305bda72366f", "5bfc32d9457f43d2488583167af4f3175fdcdc03", "f545b121b9612707339dfdc40eca32def5e60430", "929bd1d11d4f9cbc638779fbaf958f0efb82e603", "c0be23ae7f327f9415e583aee1936b9932c9b58b", "09cf3f1764ab1029f3a7d57b70ae5d5954486d69", "fe50efe9e282c63941ec23eb9b8c7510b6283228", "0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b", "180bd019eab85bbf01d9cddc837242e111825750", "29c7dfbbba7a74e9aafb6a6919629b0a7f576530", "35a39c7da14b1d288c0f9201374b307f667d63a3", "874da338c01fb7a87d605fcde6c52835eee03d5e", "75a74a74d6abbbb302a99de3225c8870fa149aee", "0aae88cf63090ea5b2c80cd014ef4837bcbaadd8", "cda4fb9df653b5721ad4fe8b4a88468a410e55ec", "0ed4b4d6d1a0c49c4eb619aab36db559b620d99f", "6d8a7399a195023f8546846231cc032342d0c622", "06d028bd761ad6f29e9f1835d6686d9880706438", "9bcfadd22b2c84a717c56a2725971b6d49d3a804", "10e704c82616fb5d9c48e0e68ee86d4f83789d96", "5b6d05ce368e69485cb08dd97903075e7f517aed", "79fa57dedafddd3f3720ca26eb41c82086bfb332", "35265cbd9c6ea95753f7c6b71659f7f7ef9081b6", "43a4dd79bb26e3b722ac8bea20f5916c30599851", "2f95340b01cfa48b867f336185e89acfedfa4d92", "6ed738ff03fd9042965abdfaa3ed8322de15c116", "13940d0cc90dbf854a58f92d533ce7053aac024a", "d8722ffbca906a685abe57f3b7b9c1b542adfa0c", "4d9c02567e7b9e065108eb83ea3f03fcff880462", "0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261", "7d50df03d0c8a26eaaeaef47de68691f9ac73701", "5613cb13ab381c8a8b81181ac786255705691626", "faa46ef96493b04694555738100d9f983915cf9b", "838a4bcfeb36dc7bdb4a38f776fc0a70ce8ae9f0", "aac900e572423cd0f10ae22cb87f610cdee607a7", "cd64530a910ba28cbd127c78913dd787184f8e6d", "2138ccf78dcf428c22951cc066a11ba397f6fcef", "62f017907e19766c76887209d01d4307be0cc573", "42e155ea109eae773dadf74d713485be83fca105", "4aa8db1a3379f00db2403bba7dade5d6e258b9e9", "ab1dfcd96654af0bf6e805ffa2de0f55a73c025d", "7a0fb972e524cb9115cae655e24f2ae0cfe448e0", "bebb8a97b2940a4e5f6e9d3caf6d71af21585eda", "d9218c2bbc7449dbccac351f55675efd810535db", "06ad99f19cf9cb4a40741a789e4acbf4433c19ae", "913062218c7498b2617bb9d7821fe1201659c5cc", "df5fe0c195eea34ddc8d80efedb25f1b9034d07d", "b33e8db8ccabdfc49211e46d78d09b14557d4cba", "749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7", "c5366f412f2e8e78280afcccc544156f63b516e3", "205f39189a7f696be8b4ee0018d071cb473b5006", "3c7825dcf5a027bd07eb0fe4cce23910b89cf050", "e496d6be415038de1636bbe8202cac9c1cea9dbe", "9fb701dd40e35a6abc973b6d89a455de45dd8616", "fcf393a90190e376b617cc02e4a473106684d066", "90d735cffd84e8f2ae4d0c9493590f3a7d99daf1", "f78863f4e7c4c57744715abe524ae4256be884a9", "f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c", "6cba90e81d460bb32188201a2c10622baa55015e", "5a5f0287484f0d480fed1ce585dbf729586f0edc", "97463b5a0fef72a576367f55d46aa3eb7576ae01", "d0a21f94de312a0ff31657fd103d6b29db823caa", "6e782073a013ce3dbc5b9b56087fd0300c510f67", "8d3fbdb9783716c1832a0b7ab1da6390c2869c14", "d05759932001aa6f1f71e7dc261c4716f57a5397", "f5acfc4c017447ea94c9d9cb19a9f1fcd4aa51e6", "cb8382f43ce073322eba82809f02d3084dad7969", "1181f1146db7170b09f28f7cc51c42c63547d84b", "7825708552c86079d0d11f48033ced391c0754ce", "7f205b9fca7e66ac80758c4d6caabe148deb8581", "a46283e90bcdc0ee35c680411942c90df130f448", "8ccbbd9da0749d96f09164e28480d54935ee171c", "7f59657c883f77dc26393c2f9ed3d19bdf51137b", "43ae4867d058453e9abce760ff0f9427789bab3a", "beb4546ae95f79235c5f3c0e9cc301b5d6fc9374", "2b43100a13811b33cc9f905fa1334bfd8b1873ba", "f1d090fcea63d9f9e835c49352a3cd576ec899c1", "8b2704a5218a6ef70e553eaf0a463bd55129b69d", "363ca0a3f908859b1b55c2ff77cc900957653748", "af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2", "051a84f0e39126c1ebeeb379a405816d5d06604d", "0b80fdb5b78422efdb3cdb840c78630de0af61f3", "adc4bc7639d5f1c5ead8728882e2390339d061ed", "fae83b145e5eeda8327de9f19df286edfaf5e60c", "526ce5c72af5e1f93b8029a26e2eed7d1ac009f5", "e198e10fc6b01e37b435f91559be67ba0e9bf40b", "ea890846912f16a0f3a860fce289596a7dac575f", "d09fd7e0bb5d997963cfef45452724416b2bb052", "403a108dec92363fd1f465340bd54dbfe65af870", "52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7", "33ec047f1084e290c8a6f516bc75345b6bcf02a0", "0b78fd881d0f402fd9b773249af65819e48ad36d", "5042b358705e8d8e8b0655d07f751be6a1565482", "9774430006f1ed017156b17f3cf669071e398c58", "f8ed5f2c71e1a647a82677df24e70cc46d2f12a8", "fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5", "5d233e6f23b1c306cf62af49ce66faac2078f967", "939f9fa056f8be445da19b43da64bd2405851a43", "6c30b29b24dc11e37fe36c6e2c283e1c8fe5e339", "32728e1eb1da13686b69cc0bd7cce55a5c963cdd", "5a4c6246758c522f68e75491eb65eafda375b701", "179545c1fc645cb2ad9b31a30f48352d541876ff", "64974a76d4bfd9b327fa378ddc1fbb7c566d9d53", "da5bfddcfe703ca60c930e79d6df302920ab9465", "77652e55f73539df94f03489544504874f96d25e", "64102c217cba63a89cd2227dc4b3a9ed2104b73e", "3fa3591399755c8a50c60877652ef083ca37d3b3", "4c815f367213cc0fb8c61773cd04a5ca8be2c959", "dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e", "e6c8f5067ec2ad6af33745312b45fab03e7e038b", "5e09155cfb7a8bab2217e5d34cd0d6a4a0586868", "2df731a01db3caf45105c40ac266f76fe1871470", "44dd150b9020b2253107b4a4af3644f0a51718a3", "6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1", "be8c517406528edc47c4ec0222e2a603950c2762", "17d03da4db3bb89537d644b682b2a091d563af4a", "7ef44b7c2b5533d00001ae81f9293bdb592f1146", "6733adb12458678c606759233f6f55782bace372", "8d1adf0ac74e901a94f05eca2f684528129a630a", "8aae23847e1beb4a6d51881750ce36822ca7ed0b", "9e5c2d85a1caed701b68ddf6f239f3ff941bb707", "142e233adceed9171f718a214a7eba8497af4324", "67e6ddce6fea17bb2b171c949ee224936d36c0d1", "512befa10b9b704c9368c2fbffe0dc3efb1ba1bf", "ee1465cbbc1d03cb9eddaad8618a4feea78a01ce", "3e69ed088f588f6ecb30969bc6e4dbfacb35133e", "f78fe101b21be36e98cd3da010051bb9b9829a1e", "78d4d861c766af2a8da8855bece5da4e6eed2e1c", "370b5757a5379b15e30d619e4d3fb9e8e13f3256", "f6742010372210d06e531e7df7df9c01a185e241", "9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc", "80e9c28c369a6c49f9dd10473c663a25dc9716d5", "26c884829897b3035702800937d4d15fef7010e4", "f8ba921670c94ed94d94a98d64f38b857b0dc104", "6e93fd7400585f5df57b5343699cb7cda20cfcc2", "c7685fdbee2d96ef056a89ab4fa43df5aeae7ba7", "a125bc55bdf4bec7484111eea9ae537be314ec62", "6316a4b689706b0f01b40f9a3cef47b92bc52411", "8bf57dc0dd45ed969ad9690033d44af24fd18e05", "41f195f421b548357088c2985077d6b14003ce7e", "e5eb7fa8c9a812d402facfe8e4672670541ed108", "4414a328466db1e8ab9651bf4e0f9f1fe1a163e4", "8b6fded4d08bf0b7c56966b60562ee096af1f0c4", "9294739e24e1929794330067b84f7eafd286e1c8", "d185f4f05c587e23c0119f2cdfac8ea335197ac0", "0359f7357ea8191206b9da45298902de9f054c92", "cdd30bd77c7a4fa21176a21498f65f6b8b873965", "e8f4a4e0fe0b2f0054b44b947828d71e10ec61a7", "52012b4ecb78f6b4b9ea496be98bcfe0944353cd", "ea3fa5e6004c0504feaa31e01b2ea19f138e9a78", "3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1", "76640cb1a683a479ce2e0d6681d821ff39126d63", "757e4cb981e807d83539d9982ad325331cb59b16", "68996c28bc050158f025a17908eb4bc805c3ee55", "63c109946ffd401ee1195ed28f2fb87c2159e63d", "3f4bfa4e3655ef392eb5ad609d31c05f29826b45", "46f32991ebb6235509a6d297928947a8c483f29e", "b85b754ace15f4e9bee4ee76296580ddfbc3a11e", "c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0", "52f23e1a386c87b0dab8bfdf9694c781cd0a3984", "c49aed65fcf9ded15c44f9cbb4b161f851c6fa88", "c3dc4f414f5233df96a9661609557e341b71670d", "7935f644c8044c0d3b81e2842e5ecc3672698bbb", "0cf7bb49fc6dd9ab0d60e7cff4bb3e53b2676a1b", "aca728cab26b95fbe04ec230b389878656d8af5b", "1d846934503e2bd7b8ea63b2eafe00e29507f06a", "37105ca0bc1f11fcc7c6b7946603f3d572571d76", "c27f64eaf48e88758f650e38fa4e043c16580d26", "ae1de0359f4ed53918824271c888b7b36b8a5d41", "d4026438ce2b92302fa635c05507cf0e888414c0", "eab53c9e3e8442050aa6ad97003f2356a365adaa", "15df73918e084a146cd215b839a3eec1cc813a78", "96e0b67f34208b85bd90aecffdb92bc5134befc8", "b6c00e51590c48a48fae51385b3534c4d282f76c", "978b32ff990d636f7e2050bb05b8df7dfcbb42a1", "f4465454811acb2021a46d84d94fc88e2dda00a6", "858901405086056361f8f1839c2f3d65fc86a748", "eb309b11fd2b8d28cbaf7a72a49df14630ed696a", "64e75f53ff3991099c3fb72ceca55b76544374e5", "a5bf83f99f71e3840f651fbeef9f334d8e75fd75", "41c8e222ebb26e72050f5d26c82f25d7618b700f", "258a8c6710a9b0c2dc3818333ec035730062b1a5", "880be65e233d4302744e2154b2ef172291ee9779", "10550ee13855bd7403946032354b0cd92a10d0aa", "763b60feaabceebbe9eddfbaa0378b8b454327aa", "96ec76d2579a3b877019e715da58d8c47d343399", "e9d1b3767c06c896f89690deea7a95401ae4582b", "bd25c4ad7471580ed9787eae041b80a3c4fe97bb", "b6c047ab10dd86b1443b088029ffe05d79bbe257", "ef473c96dde98e2015b2d135a17a2d734319649a", "3f4c262d836b2867a53eefb959057350bf7219c9", "c3a1a3d13bf1cb2b9c054857b857c3fb9d7176f6", "5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49", "f963967e52a5fd97fa3ebd679fd098c3cb70340e", "4263f2c81a8b79f637c3fdf432a9b1e58e1c25e5", "fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2", "5217ab9b723158b3ba2235e807d165e72fd33007", "1eb48895d86404251aa21323e5a811c19f9a55f9", "80a6bb337b8fdc17bffb8038f3b1467d01204375", "dc550f361ae82ec6e1a0cf67edf6a0138163382e", "72160aae43cd9b2c3aae5574acc0d00ea0993b9e", "d3f40b393e0e6a88ae4b4072e01ddb0b420300af", "64782a2bc5da11b1b18ca20cecf7bdc26a538d68", "874713dfa7ba8b3ffcc47ed5f8b60849d77f6ea8", "819c93dfe531ad6aba71cd48942c9e07b7a89b1b", "bf4fcd80083f3145176b64d15bab78456a7e5e43", "081a431107eb38812b74a8cd036ca5e97235b499", "751b26e7791b29e4e53ab915bfd263f96f531f56", "865d4ce1751ff3c0a8eb41077a9aa7bd94603c47", "601834a4150e9af028df90535ab61d812c45082c", "6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6", "32df63d395b5462a8a4a3c3574ae7916b0cd4d1d", "a6f81619158d9caeaa0863738ab400b9ba2d77c2", "d44ca9e7690b88e813021e67b855d871cdb5022f", "1ad780e02edf155c09ea84251289a054b671b98a", "d7bd37920a3a4a4d681151131e23a839695c8d5b", "0614cafad1b546faa7e99c67c9bda6bae2cacb5e", "628f9c1454b85ff528a60cd8e43ec7874cf17931", "831a64f59944fa05f023288f284325429026e4e8", "3813a77005fcc87e1a65c272c9c7a9a87c80c000", "10e12d11cb98ffa5ae82343f8904cfe321ae8004", "fba464cb8e3eff455fe80e8fb6d3547768efba2f", "40c9dce0a4c18829c4100bff5845eb7799b54ca1", "d19df82c5ea644937bf182fabdc0e36e78ea6867", "5551a03353f571b552125dd4ee57301b69a10c46", "324f39fb5673ec2296d90142cf9a909e595d82cf", "141768ab49a5a9f5adcf0cf7e43a23471a7e5d82", "066d71fcd997033dce4ca58df924397dfe0b5fd1", "3ce2ecf3d6ace8d80303daf67345be6ec33b3a93", "75908b6460eb0781130ed0aa94585be25a584996", "dae1726852228b9c3c2b45f440f38f904747e40f", "5f453a35d312debfc993d687fd0b7c36c1704b16", "526c79c6ce39882310b814b7918449d48662e2a9", "530ce1097d0681a0f9d3ce877c5ba31617b1d709", "da54a3d6dc5827abba96edf5ec1e6791ad05760b", "2d05e768c64628c034db858b7154c6cbd580b2d5", "c696c9bbe27434cb6279223a79b17535cd6e88c8", "1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3", "6d618657fa5a584d805b562302fe1090957194ba", "23ecc496eaa238ac884e6bae5763f6138a9c90a3", "dff38cac0a1004037024f0ed2a72f76f4e49318b", "12ded6a869b4e21149452234140257019af9494d", "66b9d954dd8204c3a970d86d91dd4ea0eb12db47", "1d51b256af68c5546d230f3e6f41da029e0f5852", "7c457c9a658327af6f6490729b4cab1239c22005", "2d2fb01f761d21a459cfb34935bc47ab45a9913b", "78f57e5e23ca40af858e6e97ebecb694036bd8a8", "4e4a47e2d285e55f3d0b6d449d6b9893615db5cd", "5911dcef05ffec02cc1dd88ec6feb1f1e0e8bdcb", "4188bd3ef976ea0dec24a2512b44d7673fd4ad26", "4d83a25931ff8f73130a4d07e0209fcb3191db4b", "b11df79c812ff7ea63f7c93ec8eafefc3fd04f7e", "3960882a7a1cd19dfb711e35a5fc1843ed9002e7", "2293413ebd24e377c1785113b695cc8a918a5fdb", "ca0363d29e790f80f924cedaf93cb42308365b3d", "0badf61e8d3b26a0d8b60fe94ba5c606718daf0b", "88535dba55b0a80975df179d31a6cc80cae1cc92", "9101363521de0ec1cf50349da701996e4d1148c8", "aa3e1824af497dc16ae27e6818a0e89c78a18371", "fa72e39971855dff6beb8174b5fa654e0ab7d324", "4d530a4629671939d9ded1f294b0183b56a513ef", "c5f1ae9f46dc44624591db3d5e9f90a6a8391111", "bf0836e5c10add0b13005990ba019a9c4b744b06", "b87db5ac17312db60e26394f9e3e1a51647cca66", "82c303cf4852ad18116a2eea31e2291325bc19c3", "256ef946b4cecd8889df8d799d0c9175ae986af9", "25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8", "19c82eacd77b35f57ac8815b979716e08e3339ca", "dd033d4886f2e687b82d893a2c14dae02962ea70", "0ec67c69e0975cfcbd8ba787cc0889aec4cc5399", "51b70582fb0d536d4a235f91bf6ad382f29e2601", "6cb4c7f52fbe386a4ab06d5ca61a11d69abba0e4", "0f29bc5d8458358d74dc8c4fd6968b4182dd71d2", "055530f7f771bb1d5f352e2758d1242408d34e4d", "0322e69172f54b95ae6a90eb3af91d3daa5e36ea", "39ce2232452c0cd459e32a19c1abe2a2648d0c3f", "8a12934c4cb793c6f1e40129f37847414c1cc5c0", "fdbc602a749ef070a7ac11c78dc8d468c0b60154", "3652e841fcd9828f3be9b1e6ba48fa7f1714804d", "4ac7de66841babac6b482cb54722074e2eb42079", "a4f37cfdde3af723336205b361aefc9eca688f5c", "1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb", "339937141ffb547af8e746718fbf2365cc1570c8", "7887824e9cc42914165dd3d96b956bff7560e4e4", "57f4e54a63ef95596dbc743f391c3fff461f278b", "0580edbd7865414c62a36da9504d1169dea78d6f", "7918e3e15099b4b2943746e1f6c9e3992a79c5f3", "77db171a523fc3d08c91cea94c9562f3edce56e1", "794ddb1f3b7598985d4d289b5b0664be736a50c4", "4e1836914bbcf94dc00e604b24b1b0d6d7b61e66", "798e58c181f3ba3aecbe41acd1881860c5e2df3a", "f3f77b803b375f0c63971b59d0906cb700ea24ed", "bcefb15246b1c9cea74a49a4ba1c990b6b97a19c", "3e76496aa3840bca2974d6d087bfa4267a390768", "2f837ff8b134b785ee185a9c24e1f82b4e54df04", "dee36d438d7dcb5923ab63dfe1e8676726dd4d69", "0eff410cd6a93d0e37048e236f62e209bc4383d1", "0d9815f62498db21f06ee0a9cc8b166acc93888e", "5f380850c50bf6f0963e38885776d9d7db81d7a0", "83ac942d71ba908c8d76fc68de6173151f012b38", "2a7bca56e2539c8cf1ae4e9da521879b7951872d", "2465fc22e03faf030e5a319479a95ef1dfc46e14", "b43b6551ecc556557b63edb8b0dc39901ed0343b", "b89862f38fff416d2fcda389f5c59daba56241db", "c4541802086461420afb1ecb5bb8ccd5962a9f02", "5e99b49b4c5fb2a72392ea199edacd650bd122c5", "9e5acdda54481104aaf19974dca6382ed5ff21ed", "f8ec92f6d009b588ddfbb47a518dd5e73855547d", "14e428f2ff3dc5cf96e5742eedb156c1ea12ece1", "4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b", "71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba", "cc713a92d8a3aff6f1586923ca9ba267d5e89251", "8ac2d704f27a2ddf19b40c8e4695da629aa52a54", "2bae810500388dd595f4ebe992c36e1443b048d2", "9aba281955117eb4a7aed36775f55f27e4dde42f", "516f784f145390e22cb4607cb525175ff4c7109b", "c66ecbae0f2bfa7cdbf5082fb8f0567878b4a599", "2564848f094f7c1cd5e599aa907947b10b5c7df2", "57d37ad025b5796457eee7392d2038910988655a", "681d222f91b12b00e9a4217b80beaa11d032f540", "f781e50caa43be13c5ceb13f4ccc2abc7d1507c5", "7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed", "763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff", "e51f1ee5535017e10a5f77100ff892509ec6b221", "d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f", "179564f157a96787b1b3380a9f79701e3394013d", "1a7a17c4f97c68d68fbeefee1751d349b83eb14a", "7ed5af241061a6d88e0632a51a91d59627b00c34", "a7da7e5a6a4b53bf8736c470ff8381a654e8c965", "7c54240c23d42703ddc85089d167f4985614cc3a", "9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4", "ad50f6899103eff0ee4504e539c38eb965fd1309", "ee6e4324123b99d94a7a23d9bddf026f39903693", "87552622efd0e85c2a71d4d2590e53d45f021dbf", "fcb276874cd932c8f6204f767157420500c64bd0", "bf00071a7c4c559022272ca5d39e07f727ebb479", "3f623bb0c9c766a5ac612df248f4a59288e4d29f", "accbd6cd5dd649137a7c57ad6ef99232759f7544", "4a484d97e402ed0365d6cf162f5a60a4d8000ea0", "9a276c72acdb83660557489114a494b86a39f6ff", "f1e44e64957397d167d13f8f551cae99e5c16c75", "677ebde61ba3936b805357e27fce06c44513a455", "eeb6d084f9906c53ec8da8c34583105ab5ab8284", "4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea", "9faeb2f8cd7e1deac432ab8e4294cd504976a293", "27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5", "a158c1e2993ac90a90326881dd5cb0996c20d4f3", "ecca2a2b84ea01ea425b8d2d9f376f15a295a7f5", "411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8", "ac2e166c76c103f17fdea2b4ecb137200b8d4703", "e3a70f8ee84af6372b482c0b8b6e8e553dd0e1e5", "11f8d0a54e55c5e6537eef431cd548fa292ef90b", "0d3882b22da23497e5de8b7750b71f3a4b0aac6b", "312b2566e315dd6e65bd42cfcbe4d919159de8a1", "052fb35f731680d9d4e7d89c8f70f14173efb015", "3410136b86b813b075a258842450835906d58600", "6ee64c19efa89f955011531cde03822c2d1787b8", "86f8e6310d114bb24deb971e8bc7089df6ac3b57", "5ca23ceb0636dfc34c114d4af7276a588e0e8dac", "33a1a049d15e22befc7ddefdd3ae719ced8394bf", "158e32579e38c29b26dfd33bf93e772e6211e188", "45e043dffc57a9070f483ac4aec2c5cd2cec22cb", "3c2b6282811c3077b7807d84068e6a879d163854", "99e0c03686f7bc9d7add6cff39a941a047c3600a", "02e668f9b75f4a526c6fdf7268c8c1936d8e6f09", "6dcf6b028a6042a9904628a3395520995b1d0ef9", "0b58b3a5f153f653c138257426bf8d572ae35a67", "e1c59e00458b4dee3f0e683ed265735f33187f77", "8dd3f05071fd70fb1c349460b526b0e69dcc65bf", "dab51ce14f59d552c0fc5c13b37ca64cae8d0164", "5b4bbba68053d67d12bd3789286e8a9be88f7b9d", "44eb4d128b60485377e74ffb5facc0bf4ddeb022", "e59813940c5c83b1ce63f3f451d03d34d2f68082", "176f26a6a8e04567ea71677b99e9818f8a8819d0", "02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7", "d893f75206b122973cdbf2532f506912ccd6fbe0", "df71a00071d5a949f9c31371c2e5ee8b478e7dc8", "df1a10668eaad727ec3fdf0d5df405bbe29392c9", "d8e5d94c3c8688f0ca0ee656c79847c7df04c77d", "37ef18d71c1ca71c0a33fc625ef439391926bfbb", "d7c87f4ca39f79d93c954ffacac32bc6eb527e2c", "74d3ff8324e02503c18fb2566ed29e2e22ce0d1b", "c9e955cb9709f16faeb0c840f4dae92eb875450a", "6ee2ea416382d659a0dddc7a88fc093accc2f8ee", "52885fa403efbab5ef21274282edd98b9ca70cbf", "cef6cffd7ad15e7fa5632269ef154d32eaf057af", "e57108607d94aa158eb22ae50540ae6080e48d4b", "1467c4ab821c3b340abe05a1b13a19318ebbce98", "2574860616d7ffa653eb002bbaca53686bc71cdd", "fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7", "c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4", "27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba", "acff2dc5d601887741002a78f8c0c35a799e6403", "f3fed71cc4fc49b02067b71c2df80e83084b2a82", "a75edf8124f5b52690c08ff35b0c7eb8355fe950", "6159908dec4bc2c1102f416f8a52a31bf3e666a4", "c180f22a9af4a2f47a917fd8f15121412f2d0901", "c23bd1917badd27093c8284bd324332b8c45bfcf", "184fc019bbec7f07bd9e34406f95f07faf7ed96f", "7f268f29d2c8f58cea4946536f5e2325777fa8fa", "13afc4f8d08f766479577db2083f9632544c7ea6", "363f540dc82ba8620262a04a67cfd6d3c85b0582", "cbba7d753caf82fd5325a1b380146fcfe0c72e32", "9abf6d56a7d336bc58f4e3328d2ee807032589f1", "1eba6fc35a027134aa8997413647b49685f6fbd1", "694bdadb720d4237b701a5c8c10417843ed89c6f", "d9eed86e53ce5f7cba379fe77bbefb42e83c0d88", "900207b3bc3a4e5244cae9838643a9685a84fee0", "cd3005753012409361aba17f3f766e33e3a7320d", "3645d85ccd5bb7ce5df8d24e6ddb358eb1656df5", "72d110df78a7931f5f2beaa29f1eb528cf0995d3", "b1f4423c227fa37b9680787be38857069247a307", "443f4421e44d4f374c265e6f2551bf9830de5597", "047ce307ad0c871bc2c9a5c1e4649cefae2ba50d", "25d3e122fec578a14226dc7c007fb1f05ddf97f7", "5db075a308350c083c3fa6722af4c9765c4b8fef", "f59c58b446e3f2eca4147241a64c051311a7c57a", "00d0f2ec2036fb26ffcf882eb0aa47da0693192e", "bb6ac4e26499dea5bdedb05b269f40f56247b4c6", "b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807", "f740bac1484f2f2c70777db6d2a11cf4280081d6", "2e9e07b871e7703c60d6849282174d99977ccea7", "1ef5ce743a44d8a454dbfc2657e1e2e2d025e366", "b3f3d6be11ace907c804c2d916830c85643e468d", "eedb2c34c36017b9c5aa6ce8bff2ab152e713cee", "b3330adb131fb4b6ebbfacce56f1aec2a61e0869", "97ea4b846be6598e2ee8e09134d1b3f966c4d0df", "60284c37249532fe7ff6b14834a2ae4d2a7fda02", "b728e7db6e5559a77dc59381bfb8df96d482a721", "2045fe2f21c30f364d6e699ea0bf0ea21d7f460e", "72ffcc5b654b2468b9eff761279b29164f1df5d9", "26947c3ead54e571286fdea25f1fc4d121817850", "237eba4822744a9eabb121fe7b50fd2057bf744c", "3749eb18758e0f8e97b086e6b36a98fda6e6f945", "455204fa201e9936b42756d362f62700597874c4", "d65b82b862cf1dbba3dee6541358f69849004f30", "66533107f9abdc7d1cb8f8795025fc7e78eb1122", "a92e24c8c53e31fc444a13bd75b434b7207c58f1", "9110c589c6e78daf4affd8e318d843dc750fb71a", "b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41", "a76969df111f9ee9f0b898b51ad23a721d289bdc", "38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7", "b71d1aa90dcbe3638888725314c0d56640c1fef1", "fad895771260048f58d12158a4d4d6d0623f4158", "0abc13166e4a098fc34d4c708f3349fdd8f6f4c6", "1b5acd1736f18e4fa202d88a80f774c6deea5733", "496d62741e8baf3859c24bb22eaccd3043322126", "43bb2b58f906262035ef61e41768375bc8d99ae3", "43bb20ccfda7b111850743a80a5929792cb031f0", "86d1fbaecd02b44309383830e6d985dc09e786aa", "0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7", "86904aee566716d9bef508aa9f0255dc18be3960", "47ca2df3d657d7938d7253bed673505a6a819661", "d569c3e62f471aa75ed53e631ec05c1a3d594595", "23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3", "93cbb3b3e40321c4990c36f89a63534b506b6daf", "0c66d6162695ecbfc248074f58ced10d70a359ac", "1aeb7bffb66e278b4b859538a70790bbe2fc9cdc", "cce2f036d0c5f47c25e459b2f2c49fa992595654", "a1a5143a962ab3dc6f2a0d5300cde71d9f087404", "cbbd9880fb28bef4e33da418a3795477d3a1616e", "dd3181c229819679186056cdfe94a772929ca758", "2983efadb1f2980ab5ef20175f488f77b6f059d7", "383ff2d66fecdc2fd02a31ac1fa392f48e578296", "3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e", "1b79628af96eb3ad64dbb859dae64f31a09027d5", "aeeea6eec2f063c006c13be865cec0c350244e5b", "f2f731feb9d376ac50b3347a93e73a0d6528cdd9", "2e6776cd582c015b46faf616f29c98ce9cff51a2", "782eee555067b2d6d24db87775e1ded5fb047491", "261a839307d5f6facf7dcb1cbf7beb064c3bc044", "6eece104e430829741677cadc1dfacd0e058d60f", "cbdcc28d36f1135d235b5067383b25dcac5d2ff3", "e4bf70e818e507b54f7d94856fecc42cc9e0f73d", "c3390711f5ce6f5f0728ef88c54148bf9d8783a2", "7d61b70d922d20c52a4e629b09465076af71ddfd", "675b1fd2aaebe9c62be6b22b9ac6d278193cc581", "982f5c625d6ad0dac25d7acbce4dabfb35dd7f23", "dd8d09eab82d7ec4457317d9f9427122d2ffb649", "2d25045ec63f9132371841c0beccd801d3733908", "21ec41a6ee3c655cf54c6db659d56480fc76e742", "a703d51c200724517f099ee10885286ddbd8b587", "e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca", "6da446b59944db9b3d7412ad0efc6c189812d56a", "cef841f27535c0865278ee9a4bc8ee113b4fb9f3", "857544746a1d1071739d98718df51936a3488737", "8f99f7ccb85af6d4b9e015a9b215c529126e7844", "3409aa0ae519ee18043e347e60d85e53e452650a", "3d68cedd80babfbb04ab197a0b69054e3c196cd9", "86bbead2fb5b77ceff7994be9474648672f244d9", "a3a97bb5131e7e67316b649bbc2432aaa1a6556e", "6b06b79ad1f1907e21380083b976b24a89a0f743", "4ffd744a5f079c2d65f36e3ee0979b978f522a13", "f6311d6b3f4d3bd192d866d2e898c30eea37d7d5", "268c4bb54902433bf00d11391178a162e5d674c9", "3403cb92192dc6b2943d8dbfa8212cc65880159e", "9a84588fe7e758cfbe7062686a648fab787fc32f", "f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4", "638e0d6f9f5d714d8a0edcf65297e8735b30db71", "ea6f5c8e12513dbaca6bbdff495ef2975b8001bd", "d4ec62efcc631fa720dfaa1cbc5692b39e649008", "41971dfbf404abeb8cf73fea29dc37b9aae12439", "01cc8a712e67384f9ef9f30580b7415bfd71e980", "d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f", "bc866c2ced533252f29cf2111dd71a6d1724bd49", "56bcc89fb1e05d21a8b7b880c6b4df79271ceca5", "62c2d21f78fb89a11b436ab6ca9acd9abca145be", "646c38494aa960c1c120c26619473f5968e5dc34", "6cd762e7cb1301abd0ddbb265dd9c7661ffc0458", "eb3c45e78acee0824c8f7d997c6104d74e7213a8", "9990e0b05f34b586ffccdc89de2f8b0e5d427067", "0cc96359b1edba28d33fe9e663079c5674744672", "9c2f20ed168743071db6268480a966d5d238a7ee", "583e0d218e1e7aaf9763a5493e7c18c2b8dd7464", "3fac7c60136a67b320fc1c132fde45205cd2ac66", "c74aba9a096379b3dbe1ff95e7af5db45c0fd680", "08ff81f3f00f8f68b8abd910248b25a126a4dfa4", "0dd86db7970caf6614965770e0143864b1d733fc", "e96540252f2f83e394012d653452411efb9f744f", "1eeb39d618f5fab243dd07b955a8e0e722f6dfdb", "03167776e17bde31b50f294403f97ee068515578", "352c53e56c52a49d33dcdbec5690c2ba604b07d0", "5c473cfda1d7c384724fbb139dfe8cb39f79f626", "bddc822cf20b31d8f714925bec192c39294184f7", "978a219e07daa046244821b341631c41f91daccd", "44aeda8493ad0d44ca1304756cc0126a2720f07b", "cc8e378fd05152a81c2810f682a78c5057c8a735", "98af221afd64a23e82c40fd28d25210c352e41b7", "e726acda15d41b992b5a41feabd43617fab6dc23", "ec28217290897a059348dcdf287540a2e2c68204", "197eafb6abb6b7d2813eec0891b143e27fc57386", "62750d78e819d745b9200b0c5c35fcae6fb9f404", "90f4b20f4b7115cb84dda22e5e4eb9c50d7fddce", "bd0201b32e7eca7818468f2b5cb1fb4374de75b9", "650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772", "053ee4a4793f54b02dfabde5436fd7ee479e79eb", "1a71f9af98228f4d2b15cfaf415321813e29b087", "429d4848d03d2243cc6a1b03695406a6de1a7abd", "dbd958ffedc3eae8032be67599ec281310c05630", "dea409847d52bb0ad54bf586cb0482a29a584a7e", "2a9b398d358cf04dc608a298d36d305659e8f607", "00af9945a3401bdad3cffa89f7e5a15660399282", "a5b6a3234e15343d2e5417cff46c0a5f0943521e", "f6f2a212505a118933ef84110e487551b6591553", "6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9", "8184a92e1ccc7fdeb4a198b226feb325c63d6870", "924b14a9e36d0523a267293c6d149bca83e73f3b", "34301fbf4624139a40176dbde6f5954b2df6de7b", "41b38da2f4137c957537908f9cb70cbd2fac8bc1", "022ec7d1642727b2cc3d9a9d7999ca84a280443f", "58538cc418bf41197fad4fc4ee2449b2daeb08b1", "b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0", "48de3ca194c3830daa7495603712496fe908375c", "640e12837241d52d04379d3649d050ee3760048c", "6345c0062885b82ccb760c738a9ab7fdce8cd577", "948f35344e6e063ffc35f10c547d5dd9204dee4e", "998244a44f90b3b569f9c93226df70239818ead9", "111d0b588f3abbbea85d50a28c0506f74161e091", "fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93", "516f8728ad1d4f9f2701a2b5385f8c8e71b9d356", "fb557b79157a6dda15f3abdeb01a3308528f71f2", "18b344b5394988544c386783e7bb8e73e0466e0e", "ad9cb522cc257e3c5d7f896fe6a526f6583ce46f", "6dd052df6b0e89d394192f7f2af4a3e3b8f89875", "a52c72cd8538c62156aaa4d7e5c54946be53b9bb", "08ae100805d7406bf56226e9c3c218d3f9774d19", "5865e824e3d8560e07840dd5f75cfe9bf68f9d96", "3bf690a6e2751b23bd8ae65c2ad133b249840bf9", "8323af714efe9a3cadb31b309fcc2c36c8acba8f", "ceba8ca45bad226c401a509e6b8ccbf31361b0c9", "2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a", "5d44c675addcb6e74cbc5a9c48df0d754bdbcd98", "405cf40f3ce74210f7e9862b2b828ce002b409ed", "d522c162bd03e935b1417f2e564d1357e98826d2", "22717ad3ad1dfcbb0fd2f866da63abbde9af0b09", "3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07", "cfdae1410575937f8557cbd379a6232974128d63", "75e9a141b85d902224f849ea61ab135ae98e7bfb", "b5ae8b69677fb962421fe7072f1e842e71f3bea5", "516a27d5dd06622f872f5ef334313350745eadc3", "fdf533eeb1306ba418b09210387833bdf27bb756", "e1c50cf0c08d70ff90cf515894b2b360b2bc788b", "feb0bd4ad219dc5005da84561b97ae53f4207440", "01729cb766b1016bac217a6a6cf24bbde19f56c8", "1e64b2d2f0a8a608d0d9d913c4baee6973995952", "fde611bf25a89fe11e077692070f89dcdede043a", "6592dcd17fc4df707020904cf5ff0927684f9f23", "b5bda4e1374acc7414107cde529ad8b3263fae4b", "d33fcdaf2c0bd0100ec94b2c437dccdacec66476", "eefdb69ac2c461e7791603d0f8c02ff3c8600adc", "6def8656831687bc9790015931bccf884e38358a", "5ab96ace21bf54625f3d18ea11801f540519bd3a", "50eb2ee977f0f53ab4b39edc4be6b760a2b05f96", "7dce05b7765541b3fb49a144fb39db331c14fdd1", "7c119e6bdada2882baca232da76c35ae9b5277f8", "b8048a7661bdb73d3613fde9d710bd45a20d13e7", "a5e436bb88ff28c68f981308faefd6eee48b9c8b", "cf98565a19ec05a63dbaf650660b7c3f72de7b2b", "81146c567fa5a3c83778c1c940780d00706fa2bf", "816bd8a7f91824097f098e4f3e0f4b69f481689d", "a3eab933e1b3db1a7377a119573ff38e780ea6a3", "7975f12187a7686d861054649845ccc634c3b00f", "0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf", "bff77a3b80f40cefe79550bf9e220fb82a74c084", "b69e7e2a7705a58a0e3f1b80ae542907b89ce02e", "ec983394f800da971d243f4143ab7f8421aa967c", "1134a6be0f469ff2c8caab266bbdacf482f32179", "8c955f3827a27e92b6858497284a9559d2d0623a", "af7553d833886663550ce83b087a592a04b36419", "c3cfbd03efca980431e17fcbc507962377821681", "067fe74aec42cb82b92cf6742c7cfb4a65f16951", "12226bca7a891e25b7d1e1a34a089521bba75731", "497bf2df484906e5430aa3045cf04a40c9225f94", "3a6334953cd2775fab7a8e7b72ed63468c71dee7", "e0bfcf965b402f3f209f26ae20ee88bc4d0002ab", "99a1180c3d39532efecfc5fa251d6893375c91a1", "dd0760bda44d4e222c0a54d41681f97b3270122b", "6acc92f30c7a141384b9b1bbec8dffe16b08a438", "b13b101b6197048710e82f044ad2eda6b93affd8", "b6c83e6706a9931a2670bc686485d76b67cb92ea", "800cbbe16be0f7cb921842d54967c9a94eaa2a65", "a45e6172713a56736a2565ddea9cb8b1d94721cd", "7c9a65f18f7feb473e993077d087d4806578214e", "5dce578c8bc819592c9ec7bfab6302bbcd9a3f3d", "966cf4ca224e239a7192f9e79b60cc88aa604e27", "c34e48d637705ffb52360c2afb6b03efdeb680bf", "334e65b31ad51b1c1f84ce12ef235096395f1ca7", "0b0eb562d7341231c3f82a65cf51943194add0bb", "ab6886252aea103b3d974462f589b4886ef2735a", "b3658514a0729694d86a8b89c875a66cde20480c", "96ab0367d0112b6092cc130c330c8c11c2eb8238", "65babb10e727382b31ca5479b452ee725917c739", "0f8116b631c17f7adf55df3faafc6f2c316599f6", "5e6fc99d8f5ebaab0e9c29bc0969530d201e0708", "1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9", "5642bafa7955b69f05c11230151cd59fcbe43b8e", "c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225", "43fca653880f4e4d238c73d864e964475e4b90c8", "0cbc4dcf2aa76191bbf641358d6cecf38f644325", "84f86f8c559a38752ddfb417e58f98e1f8402f17", "91d0e8610348ef4d5d4975e6de99bb2d429af778", "04f55f81bbd879773e2b8df9c6b7c1d324bc72d8", "0eb077a3e227b19f032f980d3a3206e4ae15e429", "c043f8924717a3023a869777d4c9bee33e607fb5", "111a9645ad0108ad472b2f3b243ed3d942e7ff16", "4a4da3d1bbf10f15b448577e75112bac4861620a", "419a6fca4c8d73a1e43003edc3f6b610174c41d2", "f96b3122f66c01cb78643d7e1b412e1bae16f2c4", "ae62c0a4b74ce672e8103dbf6d344d82c59f216c", "8fd9c22b00bd8c0bcdbd182e17694046f245335f", "367a786cfe930455cd3f6bd2492c304d38f6f488", "18dd3867d68187519097c84b7be1da71771d01a3", "3f63f9aaec8ba1fa801d131e3680900680f14139", "a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260", "6f75697a86d23d12a14be5466a41e5a7ffb79fad", "561bbc758f995894f43351b4267abf9748890705", "27cccf992f54966feb2ab4831fab628334c742d8", "8706c3d49d1136035f298041f03bb70dc074f24d", "87610276ccbc12d0912b23fd493019f06256f94e", "59e75aad529b8001afc7e194e21668425119b864", "9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7", "3d6ee995bc2f3e0f217c053368df659a5d14d5b5", "9dcc6dde8d9f132577290d92a1e76b5decc6d755", "3074da78949dc2d710892c66904b61d9ff684e50", "41781474d834c079e8fafea154d7916b77991b15", "d22785eae6b7503cb16402514fd5bd9571511654", "a3017bb14a507abcf8446b56243cfddd6cdb542b", "890103cb8d3d869298421da817d0a181487ec79a", "0d735e7552af0d1dcd856a8740401916e54b7eee", "b84dde74dddf6a3281a0b22c68999942d2722919", "14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1", "8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3", "52c59f9f4993c8248dd3d2d28a4946f1068bcbbe", "8323529cf37f955fb3fc6674af6e708374006a28", "aeb36fac7516753a14c3c690f352de78e70f8c6e", "61831364ddc8db869618f1c7f0ad35ab2ab6bcf7", "9ccaa13a577b20e88420d0a4b8c9545d5560261d", "8b4124bb68e5b3e6b8b77888beae7350dc594a40", "965c4a8087ae208c08e58aaf630ad412ac8ce6e2", "55c40cbcf49a0225e72d911d762c27bb1c2d14aa", "fb915bcc1623cdf999c0e95992c0e0cf85e64d8e", "e26a7e343fe109e2b52d1eeea5b02dae836f3502", "b55d0c9a022874fb78653a0004998a66f8242cad", "a33f20773b46283ea72412f9b4473a8f8ad751ae", "831226405bb255527e9127b84e8eaedd7eb8e9f9", "a7a5d9a2dece15ddbab77b7ecc81294cfa1fafdb", "334166a942acb15ccc4517cefde751a381512605", "00d931eccab929be33caea207547989ae7c1ef39", "58b8588c01196070674ceabe5366b20f73c2912d", "cf6851c24f489dabff0238e01554edea6aa0fc7c", "3753b9fcf95b97e2baf952993905cd6dfa8561cb", "32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2", "bf1e0279a13903e1d43f8562aaf41444afca4fdc", "f935225e7811858fe9ef6b5fd3fdd59aec9abd1a", "9f6d04ce617d24c8001a9a31f11a594bd6fe3510", "3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8", "62f60039a95692baaeaae79a013c7f545e2a6c3d", "7ab238c23c6640fe0b23d635d6b5fc38fa4a3b46", "eb2fe6dae7d4f3866948569f94e6ee23efaec49e", "8dffbb6d75877d7d9b4dcde7665888b5675deee1", "ae7604b1840753e9c2e1ab7a97e02f91a9d81860", "ba83b28ac5ce92ef8437fdd499132823f487ff83", "cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a", "60bdff71e241f9afc411221bd20aaebb4608576b", "59f788c69c2ce520fd6f0b80d01aca72f7f8d859", "84fe5b4ac805af63206012d29523a1e033bc827e", "7cffcb4f24343a924a8317d560202ba9ed26cd0b", "3c4290e6adce10cb809f5adec9559220eac79241", "80774ae06a562555cc1336da6004618cb9ea170c", "f249b2de6eacb5598abf3046fbb2efb9f15f3fcc", "d2df37ecfbf914d5b81e2e5e342e3907c6f55a14", "21732ecbba1a75d54e8b90c1804f0b77953466e6", "bcbebb62125751b9957fb5ab765702a445fb8146", "5119ca96eeca49a0b1a95cecc798abdb042beb38", "334ca0a146d466cb1ebc38d13a2dc64e371e979c", "45a4030b30bb8fe18611476acfe2f1c8b4eff322", "313e508202a6f4f2fc40a78b6237e52c2c0d22a2", "518c529fea2a362a93d2ad0d919b8d820c4cc0ea", "b136b5f3fb84867ba89ad5e2ef3266e09d54e232", "6a7da1e542123a5999408224356a7ac61400d868", "f988aa059bc3589f3cbddd0b87cea2588f623cc8", "372fefe66aa693e271ec6298fac1695208f36aee", "de005cf88856b4b56e8eea242fdb16dabec6b4a0", "e996da9beadff6f6694540c6b1794312f814dbae", "2e56209ed179be641e6df5efd11be8b3d54a62e9", "d4dabea2a8e3db01846bd14c03ab1be4884ca75a", "91e58c39608c6eb97b314b0c581ddaf7daac075e", "4c170a0dcc8de75587dae21ca508dab2f9343974", "7c1cfab6b60466c13f07fe028e5085a949ec8b30", "4c4236b62302957052f1bbfbd34dbf71ac1650ec", "be86d88ecb4192eaf512f29c461e684eb6c35257", "00b08d22abc85361e1c781d969a1b09b97bc7010", "14a5feadd4209d21fa308e7a942967ea7c13b7b6", "11a210835b87ccb4989e9ba31e7559bb7a9fd292", "70580ed8bc482cad66e059e838e4a779081d1648", "136aae348c7ebc6fd9df970b0657241983075795", "f16599e4ec666c6390c90ff9a253162178a70ef5", "1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee", "87a39f5002ef2de3143d1ea96ae19e002c44345b", "28d06fd508d6f14cd15f251518b36da17909b79e", "a2b9cee7a3866eb2db53a7d81afda72051fe9732", "1033ca56c7e88d8b3e80546848826f572c4cd63e", "5811944e93a1f3e35ece7a70a43a3de95c69b5ab", "1ce3a91214c94ed05f15343490981ec7cc810016", "d9c4586269a142faee309973e2ce8cde27bda718", "adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6", "2bcd9b2b78eb353ea57cf50387083900eae5384a", "71e6a46b32a8163c9eda69e1badcee6348f1f56a", "aafb271684a52a0b23debb3a5793eb618940c5dd", "0fad544edfc2cd2a127436a2126bab7ad31ec333", "61e9e180d3d1d8b09f1cc59bdd9f98c497707eff", "24496e4acfb8840616b2960b0e2c80cc4c9e5a87", "8ba67f45fbb1ce47a90df38f21834db37c840079", "37866fea39deeff453802cde529dd9d32e0205a5", "76673de6d81bedd6b6be68953858c5f1aa467e61", "7c42371bae54050dbbf7ded1e7a9b4109a23a482", "176e5abddb87d029f85f60d1bbff67c66500e8c3", "5da827fe558fb2e1124dcc84ef08311241761726", "03264e2e2709d06059dd79582a5cc791cbef94b1", "17189cfedbdbd219849b8e7f8cf0293d49465f9c", "852ff0d410a25ebb7936043a05efe2469c699e4b", "7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf", "17b46e2dad927836c689d6787ddb3387c6159ece", "c9527df51e63b56c61cbf16f83d1a3c5c2c82499", "4c72a51a7c7288e6e17dfefe4f87df47929608e7", "df2841a1d2a21a0fc6f14fe53b6124519f3812f9", "7a3d46f32f680144fd2ba261681b43b86b702b85", "1ed49161e58559be399ce7092569c19ddd39ca0b", "5a34a9bb264a2594c02b5f46b038aa1ec3389072", "0b605b40d4fef23baa5d21ead11f522d7af1df06", "e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc", "97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759", "729a9d35bc291cc7117b924219bef89a864ce62c", "2f17c0514bb71e0ca20780d71ea0d50ff0da4938", "feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc", "4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1", "d383ba7bbf8b7b49dcef9f8abab47521966546bb", "03d1d0a665e358863ff4de9ee7d78f64edd7e756", "9d24179aa33a94c8c61f314203bf9e906d6b64de", "ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b", "06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32", "2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f", "9ce4541d21ee3511bf3dc55bc3cd01222194d95a", "b40290a694075868e0daef77303f2c4ca1c43269", "c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4", "484bac2a9ff3a43a6f85d109bbc579a4346397f5", "3b092733f428b12f1f920638f868ed1e8663fe57", "21d5c838d19fcb4d624b69fe9d98e84d88f18e79", "24f9248f01df3020351347c2a3f632e01de72090", "00b29e319ff8b3a521b1320cb8ab5e39d7f42281", "098a1ccc13b8d6409aa333c8a1079b2c9824705b", "0d4d8ce029deead6f2ce7075047aa645299ddd41", "6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d", "7eaa97be59019f0d36aa7dac27407b004cad5e93", "95289007f2f336e6636cf8f920225b8d47c6e94f", "5b6f0a508c1f4097dd8dced751df46230450b01a", "21104bcf07ef0269ab133471a3200b9bf94b2948", "8202da548a128b28dd1f3aa9f86a0523ec2ecb26", "8b7191a2b8ab3ba97423b979da6ffc39cb53f46b", "1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9", "d930c3d92a075d3f3dd9f5ea1a8f04e0d659b22b", "0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd", "2ffcd35d9b8867a42be23978079f5f24be8d3e35", "1f2d12531a1421bafafe71b3ad53cb080917b1a7", "69b2a7533e38c2c8c9a0891a728abb423ad2c7e7", "50d15cb17144344bb1879c0a5de7207471b9ff74", "3ab036b680e8408ec74f78a918f3ffbf6c906d70", "4328933890f5a89ad0af69990926d8484f403e4b", "2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924", "366d20f8fd25b4fe4f7dc95068abc6c6cabe1194", "10e4172dd4f4a633f10762fc5d4755e61d52dc36", "fdbe7c520568d9a32048270d2c87113c635dc7e6", "05ad478ca69b935c1bba755ac1a2a90be6679129", "1a53ca294bbe5923c46a339955e8207907e9c8c6", "9213a415d798426c8d84efc6d2a69a2cbfa2af84", "2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02", "79dd787b2877cf9ce08762d702589543bda373be", "eef725f4130ee326954e84e5f4ddf487da63c94e", "ae753fd46a744725424690d22d0d00fb05e53350", "3830047081ef4bc787f16edf5b244cb2793f75e5", "8aff9c8a0e17be91f55328e5be5e94aea5227a35", "504d2675da7a56a36386568ee668938df6d82bbe", "55a158f4e7c38fe281d06ae45eb456e05516af50", "29e749b14ca6e46dc7b235e879f83b7b132dda8e", "3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171", "7862f646d640cbf9f88e5ba94a7d642e2a552ec9", "c5765590c294146a8e3c9987d394c0990ab6a35b", "b34fdab6864782ce60fd90d09f5d886bd83f84f5", "fcd77f3ca6b40aad6edbd1dab9681d201f85f365", "6c92d87c84fa5e5d2bb5bed3ef38168786bacc49", "7808937b46acad36e43c30ae4e9f3fd57462853d", "312afff739d1e0fcd3410adf78be1c66b3480396", "2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4", "d59404354f84ad98fa809fd1295608bf3d658bdc", "0de1450369cb57e77ef61cd334c3192226e2b4c2", "2163c401f6345350e38b2ce6f39a42871fb22c84", "06466276c4955257b15eff78ebc576662100f740", "2961e14c327341d22d5f266a6872aa174add8ac4", "3958db5769c927cfc2a9e4d1ee33ecfba86fe054", "88e090ffc1f75eed720b5afb167523eb2e316f7f", "008528d5e27919ee95c311266041e4fb1711c254", "16c884be18016cc07aec0ef7e914622a1a9fb59d", "58b0be2db0aeda2edb641273fe52946a24a714c3", "b185f0a39384ceb3c4923196aeed6d68830a069f", "8b2e3805b37c18618b74b243e7a6098018556559", "3a0796161d838f9dc51c0ee5f700e668fa206db3", "28bc378a6b76142df8762cd3f80f737ca2b79208", "b7b421be7c1dcbb8d41edb11180ba6ec87511976", "329b2781007604652deb72139d14315df3bc2771", "65817963194702f059bae07eadbf6486f18f4a0a", "52887969107956d59e1218abb84a1f834a314578", "6d670eb172355d46034a831d8dc569e17ab14d94", "3690af0af51a067750f664c08e48b486d1cd476d", "424259e9e917c037208125ccc1a02f8276afb667", "a30869c5d4052ed1da8675128651e17f97b87918", "794c0dc199f0bf778e2d40ce8e1969d4069ffa7b", "7f4bc8883c3b9872408cc391bcd294017848d0cf", "b81cae2927598253da37954fb36a2549c5405cdb", "b13bf657ca6d34d0df90e7ae739c94a7efc30dc3", "28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68", "00075519a794ea546b2ca3ca105e2f65e2f5f471", "f7dcadc5288653ec6764600c7c1e2b49c305dfaa", "62648f91e38b0e8f69dded13b9858bd3a86bb6ed", "eba4cfd76f99159ccc0a65cab0a02db42b548d85", "23ebbbba11c6ca785b0589543bf5675883283a57", "bd2d7c7f0145028e85c102fe52655c2b6c26aeb5", "ab03a1656d9e45c80379512161f6c90dfbb0b6b3", "a764cba765648c6e36782b02393ea2eed5cd69c7", "30cc1ddd7a9b4878cca7783a59086bdc49dc4044", "19d4855f064f0d53cb851e9342025bd8503922e2", "4df3143922bcdf7db78eb91e6b5359d6ada004d2", "6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "034addac4637121e953511301ef3a3226a9e75fd", "9f094341bea610a10346f072bf865cb550a1f1c1", "78f438ed17f08bfe71dfb205ac447ce0561250c6", "759a3b3821d9f0e08e0b0a62c8b693230afc3f8d", "52c91fcf996af72d191520d659af44e310f86ef9", "4c523db33c56759255b2c58c024eb6112542014e", "08a98822739bb8e6b1388c266938e10eaa01d903", "3d97f739ae76c8db1146da4aaeb0dc1ef3d31c33", "346c9100b2fab35b162d7779002c974da5f069ee", "1939168a275013d9bc1afaefc418684caf99ba66", "6014eeb333998c2b2929657d233ebbcb1c3412c9", "0716e1ad868f5f446b1c367721418ffadfcf0519", "cff911786b5ac884bb71788c5bc6acf6bf569eff", "58a3f4d9e1611e29e6378bc2d7cbad7600fe806e", "670531f3925c1ee6921f1550a988a034db727c3b", "1ae642a8d756c6aa7bc049c5c89d5072d8749637", "5c2e264d6ac253693469bd190f323622c457ca05", "8b1db0894a23c4d6535b5adf28692f795559be90", "0b45aeb0aede5e0c19b508ede802bdfec668aefd", "3411ef1ff5ad11e45106f7863e8c7faf563f4ee1", "94a7c97d1e3eb5dbfb20b180780451486597a9be", "65630863c572a54e1390d4b7f4c29ed5e152a93b", "e38c7d4f8a4399f402ab6bb364ec662fe897bed1", "00732bed67ca05a601afe8376b5121545d5c7450", "d9b69093a82ee7cb9ad499c76c9b0d30aa377454", "237734e3fd3abab005b0b97d61416ee16105f902", "832ed998ff123d4e0f86e6e3fd0d9f5428864600", "2c91faf6ac4c9fcdd4686e67b867a2b8c20fc3a0", "ce13682b1771c221f0e0ed36da1cc3aaddc52188", "97d3708dfcae89cbcbd260029601f2c1de4d7017", "be5c16a3db2efd50ce361bda76837c019eaf40ab", "5e59284a1497d877c500ddef7f33cb91fc445817", "4875bed500321dec353959a556541715da5c9d18", "e5bcbfd346121769b674a7ad35e594758de5553f", "9d67af2158807aa815b5a4485b076f7a18ce6ab4", "dc7a4d5ba20ca07d29c360b26e1e72afae9a77be", "d8d08bdcafdf892e3fc6ff3c38c2503ff9d41996", "71c21458a78b1f1497eb3dfbb78b61b854668217", "c3ec7ac15f409127b2ecf61e8edc12d7500dbd6f", "17555c227941654bc19d613742e2508f209c6d86", "e9f4624cc9c2d7e1b9fa2545982e7678b9a5aaae", "724a493411b7c5a904445406d3037df4a22b6c89", "eace134548f9be17c243b06f133bfac76a797676", "28b8d20162f007eab1acd9d7cdb8baac914de820", "0cf333cab1a9ccf671cebf31b78180f863c1caa7", "6a1da83440c7685f5a03e7bda17be9025e0892e3", "905794cd12e9553cdfdb9f81f2cf80f7dc5c1030", "512f7507034e35d7259845bc5e4e174ef2f652cf", "34510d3b68b23cc829c5435ac12a5041a8adc50a", "1b7a0fffb5ee96adece2f6079f5e9ab79c3bc50e", "c1b2668186fcd01b3c0e93a9a0a68e3eb88a09ab", "bf179c196b321bbcd58291e52b8259c3f4c1190c", "825bfa844e4493f205f66782c6ca68aa69018d9c", "07b358a22cbfba084189d287ba1ba50055c3cd09", "c72914e2e999c99753d1d0058c459af69af6662a", "780772a69b1556d5f725630dff8e79ec3ccb46bb", "220377caca34bed8a0081d48d153aecc11c211e1", "833fbf0e4be3ba82e7a1efdbc16813ee849d9942", "fe01e1099dc2ce02158de607be993f9fc8aade57", "25474c21613607f6bb7687a281d5f9d4ffa1f9f3", "bbbe31818a21f719ca0cc80329f2bf4cd2b0f39e", "940288e422a06956698b681b3fdf335a73c8b947", "3d88180732d63a4babf3a4b1a82dd7fdf27a7520", "09f4e1064afffd8464e9fd558fc8ef7be5e33170", "0c5a2bb5d1a1e9bb332207be61e13d0afb8f278c", "5f2a8e79d02ac5bf91109f29f999aa13be0983bb", "f6bc6924c91a749b5d9943065e93958fcfb5b870", "32d8194269faf6ae505a8d7937a3423e4830187e", "ebb5eedb6ce41317971885ff33da17ae2c9e8f7a", "fba95853ca3135cc52a4b2bc67089041c2a9408c", "49f01ad8e60882d0f3c450345251b6c6b499c3a2", "b05633a18a48d9c18735fd0a186a2654297ae543", "31abc53cfaeb5181765917dba03c85a9e8de3c26", "e8f753208fc354fa9aeb3fa9c6acb3d45e7eac7b", "5fcde9236d654a0f92a76c1a3f07c0cad954985c", "96fc93175169b788acd98f0a676dffab00651cbc", "f7f7c3cb8b1abaab289eb87feb56d71f2d37fdd5", "27a5f4d813d355a33b4b1e4fc625e60ad072b64e", "d56407072eb9847fa44d49969129b5a4d1ef9ceb", "1866d61f6dfe2e7863f50efd5dedd06549ed4b87", "334e559e8decadcedbe8e495b3f5430536cff32c", "2d120c8c74bc029a14fb0726ef103c873a5090eb", "55206f0b5f57ce17358999145506cd01e570358c", "47ec1950808910a541c9405ca6e9d8caf4b15891", "aa94f214bb3e14842e4056fdef834a51aecef39c", "470dbd3238b857f349ebf0efab0d2d6e9779073a", "bcc0a12f8dbc3efcd3ef353b0173c49a8889e763", "47c03504648672a75561abaaf9e5e4187138dbd7", "38f1d8d25c0332798e0929594af2c43092d2c5c8", "559c527d03880269bdf7d51d7edf376abc12c7c8", "2c6556931ba314455970d9a2a2887d38b1bad73f", "4e1d89149fc4aa057a8becce2d730ec6afd60efa", "668e93e89835ec662d21cf695b7347339ce74c78", "22deaa0f2e8e054caa47e2a0969d875d2c8fece9", "03e83659f0fc98dd03c354a2cc7a90d585ff9cf5", "861afe3d2ddaedffb107b7040c67dac392731929", "9055e7415beb421a861df974147394d09e442bed", "a3c7698c57a13fe914b1680d2762d4fad6be8bf3", "af2531b3834b92275a3353e4b2426217ddc4a839", "cfbfcf538c1c9bbf170a524995098fe4aacde374", "207e91b23253545c32dfedf71773f5af1dc88057", "1bbeb8139f9b5f0bd1f125f3cb02321d7f08a250", "6bed9d0aec57a121b7950149f294e35ddf8902a2", "0d90d046db16d3d5ce70590e6dab32cdd58928f6", "8bc814c9653ef7fe248986788dd2a53375317a3a", "70b234658083b9eec48f2e0e38fc47133ea6e35a", "2a7b1257ec819688b46272024855c1858e031db6", "78c8f69b02badf1e295c78069a2272c539d373a9", "de72d2563277eed2a9cee2b91b72dc3c462cf5fe", "4f854d04ee27c61c33b7437e21200ff0d811e064", "225fbfd99465033e993460a1bc838a87fbf42346", "3efe2e245fd1a80e837f9c4eb57c4971c644ee6e", "0fee3b9191dc1cef21f54232a23530cd8169d3b2", "4ac61814d0f624ebda190b240ede72f0b156ff22", "3c03d95084ccbe7bf44b6d54151625c68f6e74d0", "e059650472dd7bfd6907b02de491e312a0cb6d4e", "705a61f5e2d31a2382f3f07d1669755284d8407d", "88682b2258312103ce9f1a440e3a890612547e65", "66c0fcf637bede76a6ea61b58655c5fc7e890630", "5ece99e52efbd43ac7fed8a7d0d604218cba0337", "87ce943906579910572db0d0edda0813503b8015", "54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7", "71403805e67eeb6ec336e0cb83646fdb7c819757", "0b6bd0a6f396e1479dc30318102bf49c12959783", "61955df5dd9f62b839b61ad6d4e5bf2ab887da86", "092dd7cb6c9b415eb83afb104fa63d7d4290ac33", "bf7df7f49bc6b75568b008032822285cc09607cc", "68df1f746a3434ee8bcc8918d46809ddaad38b12", "bd866bbbaebc6bfc9707319312b44514e679f670", "e82360682c4da11f136f3fccb73a31d7fd195694", "31531ff4f106d1e196e619b859d0dc510e01c5a8", "2bafbe216e77fa6331ce808406cb14501e4c8f84", "42dc36550912bc40f7faa195c60ff6ffc04e7cd6", "42ae348a6526c31e8ed88f79f6f748508c532472", "1956757cabf13a09ec8a469ca9afcf4ff1110527", "00dfd28b91ef1e1bfa2e205dadaf23325b207751", "2757ff9bba677e7bceaa4802d85cc6f872618583", "c73199c180e5c01a5d53c19b8e079b0f6d07d618", "f3ba1a131d92975c2355f76dd3c97e1a9e043be8", "031641e5ce5c444bc197ac3f7083910b077dd5fe", "0ea2fa4039b6fc733dbc6942642287f141c1121c", "3b9d48a09510ebd8bd5045ba455279abb0a9baf8", "fbb22fe488b1eb973071ef117a0dcad9ede1651a", "b209608a534957ec61e7a8f4b9d08286ae3d1d7f", "124d43c5f76e70ec1f9eac62ef48f1dc2b547c04", "d3a793759027f2ca2f2df750a0495ba61e67cdad", "d154df56ac4382a0a81eb24b190bdba240546d87", "d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55", "34d207eb19a0f61194511951f2071aae36431d76", "4407cde7ab8fc38ccb22f2799ab6f0ff7ab65283", "d4ef263f1483a1c5f86d4ece106e7729e3d9fef6", "8765f312e35bba0650aa769b59da7e8fac9e98aa", "a5bec55a9668b103265bcf84ecca94128a6769cc", "b3453c618cc0f1a6feaf05b88bcc57df3585475c", "294eef6848403520016bb2c93bfb71b3c75c73fa", "38c61c11554135e09a2353afa536d010c7a53cbb", "b21b860cf21edb2e57f62b665203e5d8b64829d5", "573655c3023f938abcd60b4e188e108ee3ea9614", "a9877bb6c56e32e3c3552e379fca67e5031ccce5", "7606a74de57f67257c77a8bb0295ff4593566040", "eb53175fd51b46780a41c4b797013d80a98c2686", "d9327b9621a97244d351b5b93e057f159f24a21e", "c86bdec7c4aa6aa1b5872badb5e48193ff5920e8", "f9a9c8b36ccf107e65cc1d8da41d572e43da0b82", "a4d5ff6f1fb8b304c3e6fd5f1a7abd9b5c52955c", "50db43b7eaeb427e6e3e1d945f77d86f21a03b01", "555cb7b89386d0849014b9af7e3eebb377488a2e", "c7742e63579cfea8655606ec6bd9047140efe96a", "0c87f5a6deba422c0db261c4497b9b013b4ef5b8", "1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a", "89cabb60aa369486a1ebe586dbe09e3557615ef8", "9194c206269a09c251cb3d1c878f9f11639b053a", "45379046c6c1311dfa6d8e1941b3e2c7971ca2bc", "aeb83c009f37e74257209204d463c14982a2c64f", "7cd5d849212c294c452be009ff465ca7d3d923c8", "10723c39f9dcfcbd45d4ed7460006dba78c6b67f", "29a8492f5aaa212ad81c2e903c73937e7ced73ee", "38b110bc0280b56010efa0f772bee4a929c02aa8", "5cd2425bfbfbc1413c5c853d27c35f8ce5d8f144", "0b609b048b75d45fb17bf1e2763d83735db7d7da", "4226c9b155ef3c5c78bd122d870fec42ae695ad7", "0b64351566cc0145ef9c963edcafe8229fcb1fd5", "24d3e695af619e88613aba7dc0e7492c12fa4d0e", "602f772c69e4a1a65de00443c30d51fdd47a80aa", "3036e85864daa85935d1cebd28738dceaeb2194b", "dbd98092268bf3ebf8c63b2b40bdd01872358fa2", "1b784f1f9b93cbc894aedbb4223928dfc74bda10", "511dda02d39dc8107ac385ea8a572970e2eb9b7b", "2edae6fc9234fcdcce05c7879df17f568702d7fd", "d543ff291a96584586ad36d1209a8b1b0cafd1f1", "ef9dd272eae0f66c3d62785a6b29c2cc24e6dc19", "addf341760e7e97512756f041e7d5d01f9cb1445", "3528125426646dfb7b9ec2bfb937e1e70028bcbd", "00e4f90555b98e2286d4d07c87220a6766c441f0", "f969de7d5bf0c59ba73482b3a6232584adafbf82", "a147cec1434753777b3651101bdbda1489b09fd4", "4622d4027f807083e10d1ac6c3f5539d2167ee45", "397400dd7c31e47f8dec20a742695abed297a150", "d4ba8d84358778e1ad4c5a7c55dbce12753a1716", "6ecd8ee110381e073fe6b4e79029fbb59d2b0e02", "7c7b76ebe368afb38f256e30b4e874748274da85", "0fa9bb6df99691957f9dc16926f5981ec76eede5", "c7c405b6fc95ff2ccf2cb5b59942db4343558fc4", "95f7dc555d6ee4deaf3e30d4ed4c8a806bccb424", "7be9596f584a44b63bb584b8d213ac2a9b0ee0bc", "84d27f9027c208c3c09f1c8c9baae9d553c57d54", "030c43389bafdfefb4d6c7db0d121d0335d71342", "1133e636a1e49f1b1635b281c95c15d6f1483fb0", "6a0f62bbc27e5e194cec81e7c24a9b57d698817b", "d0326153e297c18e739ae847d2bfaa26c1aa0a57", "85c1926ea23ff4f472774fec8c6a993bb499e4f4", "c70ea40140e2f73e78d9d54335fbacce46d4d0b1", "2e9c9c0d01e6012d530024b2399ab5e77ad31a0e", "403b4deb13545144a34bee855042877f8b0ed809", "10689c0a253c858c898275b819609e3dbb6fae25", "0781a36b81d40595e8b38696677c836509ec6dc6", "e74a2159f0f7afb35c7318a6e035bc31b8e69634", "661c16658db873efeee3621603fe6bd53eaffac1", "e32a592aa7d441f834b6bf339e2e2805fcebef9c", "ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6", "0ccd410b6ae977a945a84bad1c2785cef4c73214", "714947e4d7f79f753c5c44eac701185e37086276", "6926676e0b710717e373926e1302bfb441c5c503", "0a66b92198b874ab007fb25da8a5a48b7c1c08d8", "2a9283b65c8f04cecc8fb6a2cca5610b18a6f677", "4df922499ed5f3bb4c40b2ab2a9418de05135592", "2cd0da17f7d20f01a58a22880f24d34cfddbca7c", "fd7c9d90ade72ad96990b284004f672a482029d6", "b7850320d321b0e26bfbdf276b2c42ea14b10cc7", "f006161327d3ea3484064c1a86e4c87c729fd7b8", "518ad8b4e86cfa9a7a7c14a95c524276569c9c43", "34103d6e466b47ae820612e527db8cb46077cb13", "0c5ddfa02982dcad47704888b271997c4de0674b", "0314e3dc7064370a5dce74c20d39d98f55308abc", "4a64758786e3f49fc13781304197591ffbd69a6e", "014892cc24798d04a066c50e33630bb3f1b882fe", "0ccdaeffd5820e98cd534f380e8331560140b62f", "3f44352b857f2fc18c18c5ebb2cbf994ee22f44c", "03563dfaf4d2cfa397d3c12d742e9669f4e95bab", "8f08ad65cae433ed0604d0f85dbac6a2c7a5ff28", "daf9c461bc515736749e14da67045d8a542c24a1", "9e36963aba45f76b9ee5056a92f1cc10894f7a77", "5d9890831e0628b4f1bf46084d52baa6405c7abd", "6c2b392b32b2fd0fe364b20c496fcf869eac0a98", "146879bd04a1ab25dce3484bc587e5f2ff1b1d91", "0a0ce9a865cbfaab0b0121e6db95f95ca13640ed", "043c682265532e1dad18e68f269bb75d162380b6", "c618d4d60ce16925d4fdb219a7227d0e3575a518", "3b6d7df0cc0aebb0736f3664da4ea8a03e559db9", "3ea7d777adc8d90a07f18d180ed7cf432ebb8154", "9ba3e2b8b678910c4fdf379c278dbc007c19aa38", "8aac66d15e0903257ec3abe6f126bf6316779011", "9771621830b7068e1e25ad378b40e334382ebfa3", "c2b8b49526e3dd537b641a6495e49a3d1a0ebbf2", "d455673be81868de822f3ca9a7f3fa13d4499ad9", "63a3e425c634d0280198ae1b70ef3aec27fc95cc", "08d0612dea89d7fc231a5bcc3e62998d804412b3", "ec2bf43338959e263d7fd5e3b2ef8665fa023ed9", "8a9c08bcfce9aa91fe587feeeef07dcc53774a92", "0b574f70d0965d66986bb9e89df693126652a4a6", "f0e321fb86e6ecd773e7557577015f59b18c713b", "45a3ba54fc2210cf8a4fba0cbdce9dad3cefc826", "30af3e6e0165ebc9a641420d14ca285105550205", "f157daaffa1754aae5963d9c49247142b07c8d4a", "b38c43e133bb608a09310ff26cca0b29d35b86d8", "eb5769a873e4f06c6813fc4bc21cd2c46a269f3a", "c4c41bf777a1dc0ced43ee64bee683be092e297f", "84187adc5e6412123405102bb3c2f0428713593c", "e3d9b54f74b7c8cc8d1cc96fac83cd88b7c3e09e", "eac6aee477446a67d491ef7c95abb21867cf71fc", "f37ee76a283db27fe65faf17d7b2900c5c0613b9", "a8eebadc262594d1ca86d5520f312c1779d00b33", "eb03f0ddf6e1dbed41bdc075df8a5c6c136fe276", "edd6ed94207ab614c71ac0591d304a708d708e7b", "22b1c7cc6ba65c5b5274ce4e8f017a6c1599c088", "d1466e141707a9ab363eac6f0767cfce89c7741e", "38ebbd3b9ab655507cb072e313a48ddec78798b7", "2485c98aa44131d1a2f7d1355b1e372f2bb148ad", "efbe52289f71eca9a0aaa8a5362f73334fa6b23c", "7ff1c4e0ad0dae92d4f25b93783fadde8f07276d", "3e6b70e5be3dbe688866d8dd4382ce05b201fd28", "ac37285f2f5ccf99e9054735a36465ee35a6afdd", "8740611875c4351ab9ce3729c2ff30e0863948ba", "476c216c1a9c74c665568f98203e8eff061d98c8", "4e1d7bad6cde28e65b12c5824b1016859e1ae704", "2006f6385d6902f7d50f9fdb56b9e699f1b87475", "fabbebafe1f7b1680f66edc8b4fff345658a58c3", "8f93fa329e4e0daa0303f13c7ab6717c5615304f", "e8db9f5b3ea405436dd5dcd247c85cacb5dd3969", "22043cbd2b70cb8195d8d0500460ddc00ddb1a62", "98218fa05a171a641435c154afa17bc99cf3375e", "66e5c4f051c49f0ce0d521c53864b211c6ae143b", "1bbf4275c1dbe3203b0e2261114850fbe8ca7e0e", "6da3ff4250103369f4a6a39c8fb982438a97525c", "4df889b10a13021928007ef32dc3f38548e5ee56", "137f8195eaa8e68f133395a9b9a232bffa7b2fc3", "07d7889bf4b7cb19d4a8d09996311cbec2a3da33", "e7e8c0bbee09b5af6f7df1de8f0f26da992737c4", "21697dda852b69885d71caa309482f14552d273b", "82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d", "cae25b66b485b5b76fb6f3d383b294f3456519a3", "5809d5eedbbc5d9ec7e64dbe1c4a9ed4f126ffb6", "41de109bca9343691f1d5720df864cdbeeecd9d0", "4347880f4ce93504e6f586ccec873cbe081cf9af", "ffc9d6a5f353e5aec3116a10cf685294979c63d9", "6f74c3885b684e52096497b811692bd766071530", "dfe823d9851d222f299ad26283c7de4b4a3941e8", "067126ce1f1a205f98e33db7a3b77b7aec7fb45a", "2e74cda095fdb1a7100745ff3b19190a1b2d0b0f", "69d5a48077ca6f61d24d7c787a5ddfd2d4ca5b36", "e3faabdc800d2400f072eb5b48e9ad6dc94d7625", "bfef76d0e287fc6401d69a9f65ff174e4fbf0970", "b5f9306c3207ac12ac761e7d028c78b3009a219c", "4678a2ae263e7952887df31f76ab404df74a4649", "42ba8fed261e754b86d51e4f69e81f142f39fdf8", "338f0018a3580fb4444d9af59e14957084d47996", "2ea846a2def214b0bac54b671d7690e0d24f1496", "536b37fe90a2f0bd8b40b7eb7ecf89b25a1c8ede", "4ab4e283c6c635bee029b4857be670504fa9d1b9", "739d400cb6fb730b894182b29171faaae79e3f01", "09b5b34d06fff4c76866d92516108ac68ac25ccf", "a2fe4f7bdfbdc32393ab6102c8e1063542229758", "4d3faf14bef2c835bed774c0c9cd5cee8827c6f0", "76e7484e7d9880a56f01ba28e9262397bd10eb2a", "afba76d0fe40e1be381182aec822431e20de8153", "0997f69e081bc460923a34e55b525a2aa3c4548a", "10ada578e2c7b33754091bf14c0d98b9e532e6fa", "ab954da6c71fd5d98cd92fbac763c6aa304ee4f2", "4bc411ee896755862158885843900c8bdde597c0", "50978b4e9bd0357dba7f16440ef8bebc43e82f9b", "d9fe0b257ec50a12ba1af749fad56a6f705d16a4", "1991936613c29f8d09f437a5120ab1fca2257ba3", "9d35d4fba9217404a7aab84a7d09e53c324710be", "457abee61182a320b301d73ecceff00d055f596e", "a32f28156b47fd262e04426806037d138bb3ed0b", "6eaeac9ae2a1697fa0aa8e394edc64f32762f578", "aa49556ee4f1ee3fcc9f0f713c755da30b0f505c", "51ab4022a10fda7324dcae2729ddd117d58d2b87", "24e79933d8d71dd9e72e289d9d89a061ccbb01c3", "a2646865d7c3d7fb346cf714caf146de2ea0e68f", "c85173942e190390bfe67d9513480904d56f3f56", "0365ea467c169134e858bb668a8e19bd251019e7", "e7721f40fed05aae4d49d84e9ebc94ced7015aac", "3aa98c08043558fec09bbf731cd7a8f09cf4eacf", "2a83a51c9596ed796da52bdac49ca30e4eb04345", "b03d5ed5b3f253703fa37d6445fab0e7cdf38ba1", "58be1f5b9437d2da2240c71ef56cbc06b34acff3", "0ffd230f2f1ad98a10fbc8a5cc39099922e833b6", "ca4580c5c5d8475801de42e493c5f97096677927", "7660adafc27d31f582a6b77665dc24144cbb4b6f", "91ddac7d1d63c52cbe30fe27674b9c1e54bc584c", "b42741dfc3a7f7d1d110978323e18fc71e2d67fe", "90496b7dd9f151f97681d3289ed4f6f18a57b918", "27187d4c36f71d08898a53dfda0e81df11b25f21", "a6e43499f0884b4ec4d69460b798021b6e2ae73e", "4ba3f9792954ee3ba894e1e330cd77da4668fa22", "2786b6571656b8a40776dad6899532d7863ff4f9", "25aa935217a52d83bc1637687a78017984fcb731", "9bf5d0de19321474eb946ac38d03b3db48390dff", "f4ca1d5132ce75f9ae9eadcfb055410024e6fa36", "e9e39e31419d9a22790b327bc1d6107fa832bdab", "99facca6fc50cc30f13b7b6dd49ace24bc94f702", "1b5da7547014010632c699edf7d1a459bea65fa5", "ab12db11007c33fa71cf228caee35da8277277a1", "ebd36259defde84deb0d4c09695b54befe538ac8", "d826c91f5ab4281742cf4f603e20192368dab9c6", "3a8245748a5b682845784dab131f6d8240b09f7a", "e6202ceac0c7b08da2f51ec1d9c9f1efe53d0402", "2c0c5c40f98d9b645549f235a680be5b729ebe48", "3255f6cbc14fdf8f499dfdb543f899b240708c07", "aa528ffb1c392d519f1183489eccbafbc85c10b0", "20af7f10485fca89c2c282e74016fe69765e4962", "ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba", "5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e", "9df680a0fe9142012ac49140aaba717388ba9a1a", "8234aa5414a53e705f0be9abcdcf1d3fec90c091", "adefabe194863b4f764ec982e3120554165c841c", "713345804a00c6c0083e4155b904956bb95949da", "8eeab0aeb3170b1ef6497745d2a9bf78c001331d", "ead2701e883174028a1b1b25472bc83bedc330aa", "3cc3e01ac1369a0d1aa88fedda61d3c99a98b890", "03ea1c3f867703f840c0e65df86e09055ad6f774", "7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2", "11342399df26a059848764cc550e0f1c83992e61", "3ab13f3ee6d66186c33766ac115d57f8b381468f", "3dbae414346398645001197a1d1ce37f5953aeae", "5ce63df36f893d2ea13b58a20e58ebc0854ef4f8", "2829288498cf03d87301f12a5bebf7f9faca0884", "cef2b5ab841568755233994b12cf046c408f881e", "4e25cd4e40494aa5073fcfbef7506336b84152f4", "c89d2396952e9ffe309b6e8dca8f3494ad276af2", "a0c70766f1238f975db0ba687220ee78271d8b22", "18260aeca5ade8454043f8a2286779d5853b5b32", "ac83b9ad20ecf63c7818ff1e43a99b4c626fac12", "c6a6e517660eb640adadf6e8fe262fe6752939be", "5d56587ee5652fc9bd7e3bdf5a533b4f627b6487", "0d185e6de595bd3844909d3606e9218a498a9bd8", "cc098a0f390573237f2f7211ccf0643cd1ef3c66", "ba0d84d97eeec7774534b91da78b10c5d924fdc8", "0eceb033643fbededefef1815ca7cea3b47f4a1c", "49812218d3b84ab65ddc52fd2e7e17c688d2dfe9", "ffc5a9610df0341369aa75c0331ef021de0a02a9", "c644a4fb7f8d30b7c7c0358e2b66a53553fb534c", "95d0cd902ff0fa253b6757ba3c8e09ce25b494cc", "112f76b909701ab8e64f5909981ec91fe8df60ae", "15860bc14c38c89256a4263b0d31eb67fd8ed923", "d90e292c4bc2fdbeec5e494c92194e4d3420d760", "ea482bf1e2b5b44c520fc77eab288caf8b3f367a", "1e5fab737794d18f4fb385a53d5ec0fc5c74f32b", "857c1ba9b7bea644966475bb9d6d4f29d7b13f2d", "c822bd0a005efe4ec1fea74de534900a9aa6fb93", "8caf100798c9a20db95acd7e9c73a6f9c3fe6c30", "c873b9a7c61d4157a96a98025967cd09472e567c", "249e7a25d52f45594f5e340a0bc9212d5e8aa0d2", "7b47ca13af16bdc1f4b88e9b68dd3ea52d959199", "6b8a5a2d018356b396301b27156fd69dd18b1d82", "aae29ef5ab6f9f24e8ab7985386c23294d2343cc", "cf06e540e1cea9a915d8081cc75962f982a564a4", "1abdf07ce2fca11a26222dedd581b68b141af3f2", "2663fa2f1777dc779a73d678c7919cce37b5fb61", "9910a0666886b17f0aa8cb0b58effcdd0fa70e68", "91d2c018cfc7dd5a16693fba7c15a22780df9763", "a60d1c8d2a5f28ee2ffe95e7efebe4e45a9aa939", "58ca5ac14af2765ce1d25c3a82d6f9312437ded0", "a495476cfae277b91402b1bf6c5a2207d3ae00da", "90ea3a35e946af97372c3f32a170b179fe8352aa", "6d6a106caef228b3eee1f5765740938a534db828", "3aa9d370378bce52238f2a8290926949ab38f0ae", "bef503cdfe38e7940141f70524ee8df4afd4f954", "c1bd99083098cf8dbfed8d25514755bc5356bc06", "42fc202713cb5205bba8be8a3b85a8be1e65d63f", "9dc15949c40c5f90087eee9da2600144126ac2e5", "94ead515ed260ed54b1b9df3b63d548e368be801", "1d8a0d9fe19921b4fab27e200aafe44cc76b1586", "ef9b8724f857daec94690d03764dd1299d0cbbcd", "7ddea8eae04ec694c69bab3ab28a4ece00b3a07d", "e7cfaff65541cde4298a04882e00608d992f6703", "cc37f5c056d78cecd7a44295661b5534fbbf5fa9", "1f989aa51b4fea5f109f8ae202c1df91910139ea", "b0c651f23516055583060e2197756e1390455de5", "12c548d99fdc59bd702910af2c3daa17ed43e5d7", "1182323392b11a3de72b403c1ba3fba34f547faf", "6267dbeb54889be5bdb50c338a7c6ef82287084c", "bb85ce38e114489fca36a4c5ab09a500f04edee4", "0136d9114d62aaedcfbb50ed9594d18e10424179", "215a3616f4a6b5b692282a0a7351f13071e4beda", "023decb4c56f2e97d345593e4f7b89b667a6763d", "e381edad6f9040712e6a50caf9c82465722aa04c", "0c642068ff8e4a437f8c16656b08d1ce3c47d59b", "2b249bc1370339eaf221b1c2a3fcaab37a629459", "4042bbb4e74e0934f4afbedbe92dd3e37336b2f4", "1bc8cc908cd722cf560b36e14a3333bf7b6114f4", "09016bb7db067f4cc80d243892de4149a86ab42b", "e267c813d8804019fbd8e018171dd05255b10fee", "8f713e3c5b6b166c213e00a3873f750fb5939c9a", "c6ac51ba7e269d4b6ad6a1772fba2039e1610d78", "f56e997d239c9176f45ae2352064700f31d5047c", "73bfcee974aa6a5d589da07cf374a037bb0e3b75", "3da4fa2365c01f53180050c7d332107089d913c0", "0a04d8b0099708fbceb63b58faa61ae0c772c8c4", "d5b335d78e16993fa018bc5261e1fe9bc3692ea2", "b3e51092fa8b127bef0e46c2e54f24bdaedf30c3", "4cec3e5776090852bef015a8bbe74fed862aa2dd", "cd8b6a831c0120a0f22befa500bdaf11eabd3439", "9e3d697dfd0364314aac51522ce3778bc542b17a", "e695b8ae49e905bdd240b8a6202ba97c97b12a7b", "7778068b0ea08bf85824d49045a8facbf90c4803", "074acb048b09fc95a2201ff00f67fd743b73e1fd", "2023f1f73947add416fe3e7dbe50e3ff0eb4f531", "632029daf2a667cb87cd3078a853d68412ea6896", "22e482d9136b5eab65b7e3eb7ee1743c1ad8167a", "e362aef53fd4e716adacfff443f7639339c0b88e", "7f7f57fb6548fbce752ac4ab0302d0b7ef29da06", "6403707d95046655a7bb405cf7869bd3464074e3", "4522a5ccae6f656e347211d86c2e6ef01654e4e2", "702edb659ac74c2fd00b276d1ba36141922b5054", "4dab3522bbf33f199996069106b514badb4f900a", "2546dc7e2c2390233de16502413fe1097ecf3fb5", "305dccd4004560572af2e849a36faf5626990517", "098f1939afa5a071e133c767ca49703b16443b9a", "b14c311050cb1aaf9dcfdafd87b55a2b5ee6b928", "7378f7f4165d28cd1ead2a8c0950098694163337", "3fdbfa10f6a640aace2c29593f1bf55daf70d6eb", "5ce4968e87c23e73ddea484e8a2d28e245ea48f6", "63cbfc7bfabd1e234c779f8445ea775b74d8fbe8", "3123e97a6b86913d994e44f8d9d5c639e0e2dc96", "fd93275f240d17b7adc22d5e1139379265fcf011", "6163381244823241373f6741a282f2c4a868b59c", "c7fc1a9dd3c0b2653b0c9ff668cafaff7670da92", "d822a13d173db2c5244b7f7d31babb513143f5a9", "5556234869c36195ffdcd29349e5dcdf695023e9", "4508f20e426f5873a38624311059ddc060ecae7a", "75329ae46925aceb6b9c10bb1fd118db434e1f9b", "ab84b913c338026c43e9a4a5c05e4d17c8147fd6", "c52f2a00fdbfb7fb10252796dbede6403e780da6", "7be6fe8c58ca12974c563689b7230b933dfca432", "c540439a8e9280264f314a89f8de2672ebe06bdf", "8966eebfc31dad7902ff5cf867142bf6af69a0e4", "609c35a6fa80af8b2e4ce46b1b16ec36578fd07f", "a36aa784e00d479bb0e6cb8aa6b6cd2dfeadfe1b", "a2002279c36255c2c78cf5ec0c42cbfe32fe011f", "8818ceb7bf41b07037a7396058b69a6da6dc06a6", "5a8d20ecd92d22bf077208a5e7b1bb008a9b7dbc", "b2de9cd4b124c36688cf2393341ed6b805120388", "9fc37eccb3d12329f208cb7d3a509024e182a100", "592805ef819c0adebea32a57f16ca7c08e0b5877", "05fd17673f1500d46196b0e38857eb3eaf09296e", "3f8e481ea845aa20704d8c93f6a3a72025219f64", "0f64e26d6dd6f1c99fe2050887fac26cafe9ed60", "318ee553c61888f2418280cb1d342c698d3444c9", "417df443367334351111a064a601355450b2531f", "36688a79cc8926f489ccb6e6dadba15afbb4b6a4", "1c7c477d8b7c4fec273c9edacb8135747a31bd59", "70444627cb765a67a2efba17b0f4b81ce1fc20ff", "50e45e9c55c9e79aaae43aff7d9e2f079a2d787b", "d6255a0db6f8f157c5c901d758c7a5f36416ab51", "af61bb846d4bd23f1cb24cc5fff6431b1b4a7f6b", "067a40d9fe0942abfc8a31342a95f165a88ca5d6", "e67e757a3d94b71b94e16c5a6a90d77bf61e9aab", "a1c1970f7c728cc96aea798d65d38df7c9ea61dc", "04072a097a2ac6a0ee9132bb61bc95bd68bb0621", "57cf990bb3d64668614787708efa7cb06d548d06", "2d71b4c8689f295ace4c49f54984cebbe00ff4cf", "a2e3c367995a238155f0b180743d5487ecdf8df5", "42b95f6f726c81150b602def7d22c6665d0941de", "851136b1d3f345d0d00c4ea36c66114444d04305", "45483f17551d9c6b550474dc7168ec31302e5d7b", "c8e20a4981e907c77ccbfe6ae39673aa43249f41", "ac2a8d9011a0eed8df15416f4771f9b6126bbefd", "5fa932be4d30cad13ea3f3e863572372b915bec8", "d07e9b04c1480d65e37e44bec3be95fc3206c17b", "fccc78bbf95f866687ad10c3749222d884fac79e", "65c7058623c19980c75be8ea5790f4a641b6846e", "6a915d1f466b9e13ab79d4b59ec4897647dc75e0", "0b20f75dbb0823766d8c7b04030670ef7147ccdd", "24e347ea064b80ef927483f40d661bb94e756e26", "0fd99990c9613434e7089310ec0214653d56932c", "192bf10c12bd5872da6989302d7cfb47491c7a6a", "4c293a98e929edaff6ed70c22a844c04e604e9fc", "6966a0835b6e4627108429c5f0f0c7f3de20905b", "a65301ec723dfac73c1e884d26dedeb4de309429", "9648a3790c62cee4253299f21368ce8028e3c8a6", "902114feaf33deac209225c210bbdecbd9ef33b1", "7bdab6e725ab1bbf8fcd6d7c451f6c4cc215ada9", "06b7713df895507720ee9cce4eee199531c018b9", "0d881a1b53f843f67608cf10416092a627178de7", "0971a5e835f365b6008177a867cfe4bae76841a5", "ee570863a3c2d5a6197caa763dd740dd700630ee", "2a69c47441b823ff589ff7cba0df623af31c77eb", "f42ef7968d66a93047215ecc8bd0a769deae7db2", "78f7304ba4c853c568dc4e38fef35aa2c003e3f3", "ea96bc017fb56593a59149e10d5f14011a3744a0", "c1a18684feeb2b966e2f03c2622f9a702e14204c", "0b50e223ad4d9465bb92dbf17a7b79eccdb997fb", "362cfe79a6822f9e317555c5e3469dd038b9053f", "1a45ddaf43bcd49d261abb4a27977a952b5fff12", "e7beddae52b1bfe952b0af2f4dd1b29dba9061e6", "8a38cd388847780dd6757d77b3eca8cff51bb182", "5039b2081eb3c8efbf9e96fd27775731f38f6fc7", "263ce02126d9e5f861eff30b3170eddc158018bf", "732a7b1b43f3a235fb83f9961a74e463e5ad773d", "71dcf25a3ea3801f09d6cc446dbf78e22481d609", "727c8c696c6acc04e57b6c3541613702c22c6f0f", "7efc581023c58eb96357cd72fe4a9f71365f6a97", "165abb6fdbadae997135feec447fc825edb31c6c", "1c400dcd6c3e54498d9a7bd5aa4c456079a9d236", "b944cc4241d195b1609a7a9d87fce0e9ba1498bc", "ab8778793b0f2f06d9e97b6277f3b1125f31432c", "8e52267bda3033cf58f7d1d0390f7d26e9ee7115", "867596b7c4a2e108dc5a024f85cdfd77a574f5a7", "8112972b8a6e0c7f9443dbcdfb4ed65c7484f8c2", "8a6b52fd31ebaf00e7abe57c4c50dee4683aee4b", "b270a993d03e639fffbe72c3f5c4b8f3564290a7", "67c703a864aab47eba80b94d1935e6d244e00bcb", "05dc38472fd4ccab196543b8f48e3117b199922a", "09fbfb566a8f2af9df4d3a1bf5df00d0693a22eb", "86b87fa14321f2ca8a4e606cd4de17763dc48ace", "6db710ffdc52ef785fc74635532e8e39ee64dda2", "3b9eaf8d913f99adeb9192f68808efb7d2c0fac5", "1d086defc586f914eb88acc380714478e0ad595c", "343c76120d0b213655ac8e49f1d87e883d519813", "1d4e0427dffec6ac75b96a564986046ea2b00980", "3ddfa1e5e57c8f439796d092b3059075600198b1", "ab05988c3af93e7753de79996cc409be0a8d2bd1", "937729cea19a955147e059a6f0ef0571cc6785c4", "21a1e0cd24e4a1d383556fe566bb2326da18f26c", "080e0efc3cf71260bfe9bdc62cd86614d1ebca46", "c44177896137e5010a2b336b943c23df1f3f92d3", "56fb30b24e7277b47d366ca2c491749eee4d6bb1", "b2e7b1a8bd7375a043ad4eb1c88dbc7d436d9634", "e24edb77083e9cba6fc50eadb3ec64ddbb43c120", "5e6d63dce8c41f1a4414f7124eaf8be9360fe485", "5960888c4de94c7caae1527e9f0cd1fb13dff808", "bf3b4f503c6dbbe15a6d22b215490f4b6176dd24", "df4efcff93c044fdfabd0dbcaa084e8aee5ae8a4", "2a628cc59bf229979ba4cb2530a7bea320aa3dc3", "f1471a408369689e2fc956b417dce24e47557a38", "c2fa83e8a428c03c74148d91f60468089b80c328", "f1e2dcaf6ca82ab7cf00b21c9f4b78f173069ed6", "07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1", "f3b46ed02db04594204b54c17e88341d7dc22db9", "0a52919e4473eb7bc20982094e8497570d797b13", "2cf7383e238fe37516e2607c4741f79a230834bf", "05f4d907ee2102d4c63a3dc337db7244c570d067", "309e5ae1554d2afc3b94eaea66b8f31ba85c434a", "347e62b6a241462b192622f78413a2c724deb0aa", "285faa4cc54ef9b1834128705e0f96ad17b61e0b", "7ef41e2be5116912fe8a4906b4fb89ac9dcf819d", "ba3d99ecf95445734c4f98e6d2b61fc98d31f21f", "a77f477cf52d3dd83b08ab7cf87acb2517e90587", "e3fae8109ff2f91ebfa1bced01452a3998c40ade", "853d6cfe9c08c971979d1dd138bb21c25ff750bf", "22c530788e4f1a665e77621152b2c4267482d9bc", "9432e1157f252ee626511b2270126436b0e80b73", "e648fda506496d7ac771d47926afcffbc8504c95", "745e74ae84e1b2b8690d07db523531642023d6c4", "f1052df3e311b7caa563685e741e0a1bb6b288df", "2921719b57544cfe5d0a1614d5ae81710ba804fa", "1a0b09e7e9182a68fc457bb888536b9023f6c9fd", "21f7980a22300983e1cb0fa02a9c300045a08740", "143b54525bdda1f83965002616a4e7b5b9f523a3", "0f41f1a4bd5141184ee3ed3cf8874eeb396d7862", "a86479f15fb728b29f98c9adfbc6b06a3d3c83fc", "58bac838068df358b536850a84ff806a23f061fc", "c285789796b61b49b851c6658f5bb57c1d445d2d", "6c22b549d854845c5d2f17d75417e4469e6d3f83", "5a79bc5e947c2cdd684fbbf0c8c02e3aa2f2fcc3", "8bd82d362c39c3f2160f5685149ed41125b0681c", "c7b58827b2d07ece676271ae0425e369e3bd2310", "2878b06f3c416c98496aad6fc2ddf68d2de5b8f6", "163738c0f74ec82ab670a868a051edb732543b6e", "8d7becc3667a0fb070e27956a6cf6d66be38f19e", "728bda577b98b0f4150c0a74eee2f6d0a3bc4f15", "016b1080c108718fc59e58e47b4867baebd57d8e", "81b4c5c0d4a804e5e6981b1f39386fb115592ba5", "28c14a6c64518c21888afb2d73fe8dff633ca4da", "9b48372c7adb3780873df7c6d4134f93c2b0aebb", "e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2", "70560383cbf7c0dc5e9be1f2fd9efba905377095", "9b4532181847d2a28c059e3c07a45c4ee8452cc6", "9b74de11c62ce16d0b4509554556e6b6b0d4f5c0", "be0f38b9b43f60db509bbb17063007933ef3b767", "ccebecf0e24f76262d85f55712010632ea04c0af", "6720edcea05b31a9b9a6db98ee71e8ed31efdc38", "0947f2689c71e0298411614aebf1c223168a0c9c", "092f955f701b31f3e58adb57c57e39a4dcab9fcd", "7644d90efef157e61fe4d773d8a3b0bad5feccec", "27405836469652ca9bfaf948c0c9dadd6465a566", "47ce856747e4eeb88388a1037fdb56d6c6089d72", "797858ca2245c46c42d802a8ac8c10679557ea37", "7e988b6f688f248d803be9846a4cbd4126afc785", "6b43dcc17e7219f6b8b76c65dc1a62271b11b2dc", "04e6afa6fad6a3c7182776231529e8d1ac98b558", "5f790739bb4e11bdf4fef85c293edc04aae903a3", "122ee00cc25c0137cab2c510494cee98bd504e9f", "d7d6d2dd2e9c205eccdb59f61ec11746996863ea", "c79fe054f971a454406f46b62b5a397b95240046", "ee9385efb66ee0b1bee31c1632141729bb7fb6f5", "433d2d5528d1401a402f2c1db40b933c494f11ba", "fa2603efaf717974c77162c93d800defae61a129", "f0da88d34a56a847039b35b6fbc971671e13cd53", "64da1bfef7db423f31ff92713fbbe1994ad4124d", "18c4a0e82fdddda2530b7281ad567abc0373a89f", "221c9fff1c25368a6b72ca679c67a3d6b35e2c00", "c7f16174007010dd0e681aa98b835b53012aec15", "a6c99f51f65c0cec6829db5b3be4942b78ce7e4c", "564555b7fdc45938d813650de7a7b1cd40005aa8", "77cfe37cd98910de3601795131305bea639a435a", "f4ce7c36586c27783a1b0e737c2834f39f9d029d", "266667e73d15a4b0ecd401b4d20af497d879679e", "ea923da826b9e6f89159cc960db7aac91b5ecbd6", "8582d5307793643e5b6a5e4354ee1ba32eff3809", "5bb78acceaa89675ca777f6c8071bf6ac9987e1d", "8f7ae27df3df63f0f9a0a8d595bd95f4dd6d2589", "aca57cd3f1f4edea9918814aabd0460c682cd56e", "ed38d22cd5558d1abb40b477027d52ff7b6d09db", "0b6f810f287561ff694a9406c7b319fd8549ca68", "3fef3bcc94728f5061fc98cde610f6a602b06148", "4e4fa167d772f34dfffc374e021ab3044566afc3", "b9e82ee9bb4cf016b5ed44b7acd2b42e1a5a6be2", "280d59fa99ead5929ebcde85407bba34b1fcfb59", "d5f289142cb4c8aa3dafa6da9f3d0212881af491", "1819d9a9099dafc987dd236c2174945e7922be13", "c901524f01c7a0db3bb01afa1d5828913c84628a", "418154a73f02b11c6759b8101084aab9d4536b6d", "ccb01a51ed204da01a12728bf168376bd2415b38", "28e77337bcb88e37d36f5660709a53e71377a2a8", "31cd61f05ea86a3eb08e06f1d0c2aa810805282f", "5700291077b509b11fb227f84ee9fc2de8f2df99", "355b26b7e9ca064ac474f4bb05f738ab11139b3c", "ad4c7b17471740af1b76d005acc561896ce73559", "da67293fe8ab15539dd045675fa2395435f239b6", "43123e77108e059098194deacae1d1a6044703a2", "e473456063cf96539b2347fd37dfe4122c40ce91", "4795965713f1ceab6d689a08f2eb7056c0e745a9", "419f05c5888804e0a9d9f2dc60839f2d8d65a7a6", "19b0ebf88a4672342429526a683d34ae5f65e4b6", "927ad0dceacce2bb482b96f42f2fe2ad1873f37a", "161c9ef7114bda7c5a60a29ee4a3161b0a76e676", "4cdfef0fec0918dcf5c40b9b53c9e3f48be0462b", "6ee5dbbc167167105162abd888ca4824a048fae0", "5134353bd01c4ea36bd007c460e8972b1541d0ad", "fdc259afcfe4cb9af551f75d86c438459153a808", "b3e856729f89b082b4108561479ff09394bb6553", "c315a050478781da65556a745e01286ee4a8676e", "40a82046d7d4e8148d22eb86b462440524da8f52", "6472df86bed51909f7b8aa0631f910db5a627c84", "60c12b3a1bfd547f5a165c95774a1a17d18a5941", "3ab7f06cf8e7e7ca34427f81b766b823647ac117", "0f1d42e1296474c9211fb57604574ba0cae4380d", "9e89c9e40ffd1848da10a2944edf4f7c72ef15fb", "b3effb96c09eabada94f9105241fe66658fe77b1", "14151238780bedb19c585ab3374b3240d61899b9", "13451899558d7217206b275ca0bb1f48fa4afdd9", "5e5e11e143140cc376db466d5b096a54b900c2ba", "b873246d9c474bf7799d6f45deb1155144dbd6b5", "d22064099b862eec0f430fb62a587dd888d471f3", "b3e50a64709a62628105546e392cf796f95ea0fb", "619f9c1552f8f4f7c5927a7369c79e34d6294083", "4ffa207907910091647c73fe5b1c448f2e6c4e76", "126204b377029feb500e9b081136e7a9010e3b6b", "7db00be42ded44f87f23661c49913f9d64107983", "e57dccb1d6ceaff28f3372483600c857bae20a90", "7ea35b35392c6ef5738635cec7d17b24fe3e4f04", "9398c7ca42671dbfb1684a9fa5213029917ebd0a", "c89e9028dd85a35aaa65be7830beec4448c2ba67", "3a24c276368fa63473078723ce4bc99c9ea36019", "5dcfb84ab3f5d5f1dd02f59e45154c9710de97b2", "e27c92255d7ccd1860b5fb71c5b1277c1648ed1e", "8e378ef01171b33c59c17ff5798f30293fe30686", "0a3fa8e6f158e7faec024d83964751a5d59fe836", "38679355d4cfea3a791005f211aa16e76b2eaa8d", "945cde41f10215678f6d94ef196aa7fd5ec3cdac", "f8ddeb23343cde8e2a9fdd87e877f0ce5461b42b", "17d519e0400fcd973387af8482aae949c1ccc521", "7dfedb083fadb6822c07be82233588c31f37317c", "b2c25af8a8e191c000f6a55d5f85cf60794c2709", "74d79a75656dabacbcd8a268ad49b3e5defa3fcc", "0f0146855de3cc6e0fd1e3c6a7bd0d3df19653bf", "938566dc8ee83a12d07e4d26bbb75e65ca7963cd", "23a2b75c92123b3e7bbaf1d98e434845167fe259", "31f2ddd9af75c7f9bec515945bc6c3055fbe9801", "691eb8eb9f5d5fbf5d76349098b78e5d6fc25ccc", "8ec193302458bd9805d04042e05b59662d7b411c", "7113b51f20c01ea5cbe0be04c19588d20f432f9f", "e29d51146bd7bd479d7a638054f1d6dcba71fa1b", "b6c0397fc911e12519655ec1eb1074574ae6f290", "e78042d77765c0fd3c09651b679e15ffd6b7e8a1", "41c42cb001f34c43d4d8dd8fb72a982854e173fb", "1283398de84ec0178dc74d41a87febfbfbcbbb02", "d4305c30ffe986fa53872b1b6cb61856b3d7346a", "576d8fb95655b67f50a7aafda2bb4e220eba19f4", "1bed38bc216f80a50617afa5c6d9cc4b2db72519", "08e0073b649a3fb6fac530267e3f32eb8e67c5fb", "5735c1b752f08a8263ba0d7c52ef5607f177f343", "8a12ee3c98b76d99531d5965f15bb77a10ec2569", "6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9", "dd30d7e32046c333de78a9380ac6b76f4ce307b0", "cfd933f71f4a69625390819b7645598867900eab", "daf4dfc87cac71f1293e5269ade8e7bc7c9c42b9", "f2ba98d57341a11a7c193cf7b4e82d22947644b2", "c61f0003e2626f456e56b2356a87d090cded520a", "88e6c221273af85e8736b4b865ad90f9a1464480", "8b8b3375bc51ae357528a1f015c4d094418c9f71", "8366ccfcceca3ae232ecc5efeb073e480f386308", "468253c79c14439d238f1fc6c642e1c12145dfc5", "0cdf238fd44684b49302c22b062772e7c66ea182", "34032cf0f94cc6645b7fb5df821c72039151c0fa", "53a43a5e650d0c0857f856df60d8a90772e542dd", "78342d17c6c6fff00cf1b20602f3213a3f61ba56", "6cd5b56f4262c7e13f61a4a6f28eaa805f4e3291", "8f98e1e041e7d3e27397c268e85e815065329d2d", "e592f6dc3bf1d53044cd59ce4a75fdacd0ecc80d", "f6328f02ab64c992d76967dbfd1a66d325173723", "c8c83ab64d99b16ef3248cbeccc95f7049e324d5", "08f69a82fae49a4a1f13d06cae32d77bb8e5be1a", "003f161768db4b70358971d40eb8e1ad535f001f", "65b9c71a4e5886e3ec8ff1f26038c3c08bd96dcb", "acaa781f353c769ae5f6101aab140f51b2d33cd2", "10e5b4d4be3ccbedbc0c67a9c30200e6d79e0283", "ea5efa35de61f82088fa72b575863a59c9ad7269", "60ec284f67c1012419e5dea508d1bae4bc144bb2", "27ae7c8c650ffef74c465640f423d9008014e1ca", "b419e0e1192d307d536421d811d10657f65eb72b", "2e491c8e3d1d3314ea5e50943c0bdf2aa57b99b7", "01f5689a4010ae14ca444c36bec81f12ce528912", "6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe", "287627a80bd51b1da4041273e584a188a8992de1", "203a1ecdf7e488d81e5661a6735b767c4fe2b37d", "51f311f724883218bcc511b0403b9a7745b9d40e", "f66a93565b73299cb726b58fe2866d30e34f94c2", "098388c08ef7d23ab583819b793b0057c0396dc8", "55e8cfd4a96bdc77d10459c0aa73991ff098c60e", "0b19177107a102ee81e5ef1bb9fb2f2881441503", "4bd088ba3f42aa1e43ae33b1988264465a643a1f", "50eb757c375d3b4f53f3ae70632839f5c4eb1cff", "1a3f7b9fc451b54110aaebae56c65413c620f6e2", "430ff8b02caf541377749673dbf71c4d95213f5e", "181045164df86c72923906aed93d7f2f987bce6c", "13f9922632ff5311046229b849615fcd2f5d0c06", "db6d00f9237cce392c08b422662b48baa2ed1b80", "9b7a41215af8950ac8cc791aba4a90e5eb908836", "38ea19546355e41ee1d57febc07613e7d3122607", "988849863c3a45bcedacf8bd5beae3cc9210ce28", "8a5cb84819e8e03dd99d664cf3127eced5fe11c9", "00785be241b28161c320693ca3611e1787a25978", "15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb", "8432320153aa3a348138e27ef80ae3e8631bb6f8", "8b0af7d056e7e8a5ef2bf1278fa0740771e23401", "05caf67982ce3416a28550f291211bd1459f9aeb", "1cb95f013ec3e78acdda6ac6cfdb362ae6a5ceac", "33595d1135d9eecbda62bc568d2545aa3161276d", "004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4", "bb667cbbf050040fa39cd9e756cd5bf485fccf32", "42ab6c438bf5a6e0e74cc2dd9192a12f2406ca33", "fcd945eb1cf5f87eefa444660dbdf94f5bb0092e", "0e6f422c3f79c552c0c3d7eda0145aed8680f0ea", "cea973a31280003a434705dfb9edd9d99b601d1e", "287d0f1c15d40e9f3a851e4bed0991ed5d642c06", "9b69ea8034a24db2bb1a1eef73ec11b6367d2f2e", "6020b69bc6834dc905d26c91be165c29de8347be", "0af7632427f70f2327cdf5188b814fa55d7551df", "3107486fe666a3004b720125bd2b05ff9382fdb8", "134e7b130283d64731b5698638fc96ed8f0743e9", "56e6f472090030a6f172a3e2f46ef9daf6cad757", "34a1d075e6f4a9cea93b916ebac2f3bbdc2c5ab8", "2fdc469096f72533726964260c80b4c14ae62fab", "d7731565ec4cb1b910290ccb580405cb55224286", "77008ab857beb64a5d35e2ef3575db2ed1e087fc", "87f0a779ce4e060e3e076df3cc651e0f3f01b2ae", "e17783170ecc48253fa16123a041ae298184f4ff", "f7db1a670a99fd68dc3c6478eb9aeadc2838a897", "adef82b510dd72999bb04e13660c9a77b5abeb4c", "dc2b9fa93b4fffe066e21e6d20f254acf27e0988", "91c522b163a345f0e1b91e1564793cbea1b2de27", "64be271fd50fce1cf8434020145a1b6e16f75c1a", "73111b3403ae8b59ca2955dfdee6c5d5eb9fe04d", "38a9b0963a279e7aab52cd50b095ecc9f8e6da49", "302c2293e36e0704ccfe9af759a8505df588eb07", "ae3fc0955509a9a8ca2686eab5b445af8e126524", "e7dc0d5545e6e028b03a82d2f5bb3bccc995a0d7", "e984017c5849ea78e3f50e374a5539770989536d", "c5a72b85b7a8a6413ab59d44e074dadd12b2dbad", "12c708a709480722aae9324648d0404ec55f151e", "62713c466be23af553fb7aa59fca0fda39e94f38", "c1ee2e1d53f9ffc9fca5e3e8da7c89dc2a2133d9", "2129304075990cd2f3317ea67a2acf52b7d7a3e2", "274cd31032db89a98b275af408ef5141a4d7b59e", "4f385f862e43424e4ebee369e6cab8a93f740507", "e072acd8a041b90cd86c39f90f235eea7feb37cf", "b6aaaf6290ba0ca13be61d122907617f1ea86315", "67d7ba941dc91eb016551102fd9642263ae47de8", "53b8288f71340a98c2531cd3e152c94613a38cae", "6c032a08fba885960e531a02641d121b81cb7c32", "fcd360f7eb8c1e9e61bee5d63f6fb500b49989e3", "b3b920c797259d1340fcd2cee619203821dabe23", "45c4514ca2b7903b4c8f43e396bce73f014b72be", "0de793e8892725adce2ab86ab710b4e536e6977b", "1096445f1185265c56edb1be3bde6ac4e8d91386", "5a5ae31263517355d15b7b09d74cb03e40093046", "b800b625b7cef0b6971b5d46d8a6f37c3c4f4057", "8e59851a9b59d818f2c0beaf23760e9326439a86", "9c8a2d66b8fd6973751b8ee2fe6738327968cfcb", "770b3855cdd15b49c89e4053b6cedafe53cecd6f", "ec5b793ef2f8afbf2bf914204358fa7319ceab10", "ceface94e4c73ff1458be056caf2c8312fd1a515", "8f60e94729366f46cd4602df5f38113020874745", "0e87a1dd0a0a639b1bf45ad47008c02e05170729", "433fd3234c13dfa73c93b68104f568f479481eef", "b131f10070538e55bb9f6026cc5c518736a0780d", "de724211683bb92931a5d80193e5dee31ca2e045", "7af6d86139aa86cb5897904563a9f67c016a176d", "bfba87606ff05b4065548e0e90be5b94404bb847", "e7bd805c001e04b3c015b7ec11497cd5247a1a77", "86d4e1fda741cff189ab0edf6e62152c46740526", "dd0086da7c4efe61abb70dd012538f5deb9a8d16", "8042b633b35aee9402bc2369b5c25413d2abc271", "5b5306f5b32aece520b76269d59bf2bb5d06fe46", "987b49719617981c0f1a6d12134f660675144632", "bbcf6f54d3e991f85a949544abf20b781d5ba2ed", "9ccca698224219cf48068a0d1df111ce209a0ebe", "de69410bf819bcc6d487de3548293a542c381e3d", "08f48d8dd64328ec6c91cf0de8d19e80e65ad52c", "884e1de068accb6cbdc8d3e1bd590a033628baea", "573179ee797ee20f41f8146be5c2664c349c2229", "6b45999ffe9444614d286c3597ddab389402a337", "0c41e4e699bd4f64d744ad0bc820ab20da367499", "6f41b528abc34c249038f612a6c1033790ace628", "e7893712592c592eeaaf1c7112aee41f6d2d3998", "55432723c728a2ce90d817e9e9877ae9fbad6fe5", "97de882a75b09efa53c8d39a6921cced2173414f", "e5e9e7cae71b13aabb30f6fe1f97cd153400be6c", "35cdd4df9f039f475247bf03fdcc605e40683dce", "7e19f7a82528fa79349f1fc61c7f0d35a9ad3a5e", "67a50752358d5d287c2b55e7a45cc39be47bf7d0", "79ade61f677dcadfc2b46444d2e0275d25ca1f06", "6a951df76a56fc89e5df3fbba2e5699ccad4f199", "94ac0bdc3fc106006a69f294647a3dc2ecaff532", "9984f16ef4dd45d087b3689ef8b8bd42ea74e1d7", "aa2f9b948382b3aef373dfd2952ef1474bd4712a", "c1e02305a0fc236fb737afb02d81ff7d59f14787", "ff2e25cb67209de8ae922abdfc31f922b130276e", "c3b5ec36a29b320a576f6b9e58188b505becb4aa", "45b3e64312f64cec0a88bc8e2af06ac628392899", "e63a0ea338dfc7293ddd68074baf250e99d0c6d5", "4948c1791412cf13b770d922399c625527b51a6f", "0b2277a0609565c30a8ee3e7e193ce7f79ab48b0", "a9d3547ab16a9cc936bf5991bf8fb475eadce931", "ed575e997ebb5df75e1bea804dda4b7bc96f9c9f", "4c302936f43c30430b0b07debd6ed6ef260b5225", "e92548bba37998b0a28bb7a697dd41f4db006ab5", "49214332796abe6bc4dc9ef18ce4dc7d692bb404", "5e09a85527a2c471ce35b21a3b22ae1620c80176", "0626501c230d2b74a8975e8b71c3ff30f0aeea2c", "0e02dadab802128f6155e099135d03ca6b72f42c", "66f8115136a11684e3b95c5aaa1476a871d58a66", "0139eb62a87649bf7d259542b5afc6be121b094b", "04a8090e6e1780683bbb044a4ae7deea192c7616", "18bca470bf51f5cc42148cd7e34fa58280be8eb2", "327ae6742cca4a6a684a632b0d160dd84d0d8632", "6d6522a6b9345db90caa771449b4230cd402f797", "5e4ad1f19e88b6dc87000f64b984d8f09abe7baf", "bc27c0d99e6f21b8a4fac6a0cf1079f6755554cc", "997ffa2cd7f3c7ba3730fb348c9804f3f575f32a", "127d16add62d6747481d23064a302fd878e33b56", "13fa2c37bcd9686232765e663e00b09159b9a846", "687e17db5043661f8921fb86f215e9ca2264d4d2", "d2e51df571802666e3f224be25d2b7715f018c9f", "8111eb725133da1f0128967bf8cf488dbd94ce2b", "d2518b01092160cecec2e986935b0129b0bbff45", "9981201c3ab04cb3ff18b689d01815255bde89c0", "590c277e8ca10f2c2d7e32eb4a9dc61078a67b96", "f77563386ac293620ce2b90b5d7250ab5d8f9f50", "e7c96fd067bc622abda2015cdaeed0f462d21df6", "5cd34abb1e96e0c11f427364e40b1e87d6fc62c2", "a77342abe136fdbef8da9b43055356e3596c570c", "3d58204f9f89b66db916278dc2d269e1f79ffc43", "e76dd43bfc3555c005aa5d725a226c9bf4b6bf6a", "b1c257c4800ad1f427a08da0813a8d9d34dbdea0", "6c06452671a501edd6fb66c2c05ded614045a9ec", "5b14abbea83270282ef94fcf3f3a73e7d8fee023", "0bdadea798eaf39995a2c3ee4e772f579f4dff43", "71c549df77b0fc2ebe0dc20d39d0a629a563bd7a", "f8e51a8124f4a393eb29cd0b1c52f347fb82aa20", "4bdcf36206c9310eea59c7a7d8db5179eb146947", "ebca525383c4c451e97e801f2e2532d65e88dfeb", "8a00f5cf6ae47ae8bc6d29729ee84f9e71cad853", "3b964d6a527f24b1a1f8499b0f4dbb0ed982d5e2", "3d6229044f6605604818f39f08c5270a5a132a03", "bbb121177ce1c2a7ba2a85aa5cf8e2b9606196de", "54fc77b3b3f8ed657437c4048fa6fffea6495b30", "95225bab187483e37823daab5c503f6b327fb008", "0052de4885916cf6949a6904d02336e59d98544c", "94c2f28d51193077cd1dd391962a04f254033215", "669727b3258bb3edc38709147f348dc67e3fcac4", "be5975d5ac261f6224ad43e78d0f14bf17c3c941", "10791a00b28ace9729895f3d287a7147acdc8944", "253d2fd2891a97d4caa49d87094dac1ec18c7752", "1dede3e0f2e0ed2984aca8cd98631b43c3f887b9", "77e747b12d22827fa84f506eefdac4ec37948359", "e113a840e088d07e14349f979d21a18d246a6217", "a216f7863fc6ab15e2bb7a538dfe00924e1da0ab", "2b8fa6187db53c53a01174838e7ff8b77205bedf", "080ab68a898a3703feead145e2c38361ae84a0a8", "7302d800c962b5cb705a269cda8525634cfe64a7", "087a1f9c2846fb1f9d7c29a0f042bbce929bbf5f", "06687e82ecc94f716d86d3e9f6bfbd30655c6631", "e00bdb0b046c4d21517ca808a4233a6fd5f3faee", "1e4c717a8a5eed5c3385b77641ebe3d8c4ceb3ac", "2633ee01b41edf9df7bf399e55e14d0c7412523a", "fa9f2312fd0a4eb5bbfadaba9dea47ccd34453d2", "5567d6ac4f88b48bd1fb364e546236131a64fecb", "e0d15d47c00e35d8bdc7ee4a1dd7751a4783a501", "56c448434df9f01de306815122f6517a33235e22", "3a85fd73b2063fe9c9c9a124e36f25bf60e83eea", "5d7f9e1463b596eb5d77865a8b1a0e149215303b", "eab4046cb0a4f7debecfa55388256fed7f3ee677", "39803a9c075d543e19384d79fb4c36b207892179", "1fc21daaf3274af4cb4f20a8ddc97489eadbdd32", "10261848b16292a5c8c700de6c6c9f692867c9c8", "072c5665b700c7b5da541bbe7dace1ecc870ab8a", "82f6cc54ddb4df9fae811467bdf25f25985c7e2f", "d16c8ac2d194a6e862be0d1c4edf1ca2cdf5dc18", "9794d69194ac772c3e92ee1f322a36feb3c16239", "9ef17e297937a918b79e5ab72d1a1e3868d2d65f", "eaeababea109602e6bd4fd0aceaf94b3f6c91070", "5e1e618275155d8d2bc1a2a21eb7c139992d58f5", "7b3fe45f887a37f78bb356874702adae91dda105", "91d057823291857ec6259c2deb9b7c95d4198154", "db8012add5bf84808b72df63b7733c940e28f2b0", "2cff3c291e03dda9ed6cf9747eeffc5642762e52", "f1a62862bf3ab26588f880ec8d6f04d14b6cc2e7", "c1a16ee838d977160821951e7264af4b2e7c8265", "36513f869e5ba2928369014244dff998ab93728c", "c031d1792f088c4feca14ed8ee05423a7f77fe8d", "2dfc48168c0de9e6c7135293c95b7d794fcfbbbf", "5be1f85e1f1b9e3f6807c69df1cadf44c3d9a2db", "9c430bc3aeaba0a037b8a5e5804ea2f4327ddd65", "ec91c6d6235f31c751b03489d7b1d472dfc9da26", "720ef31b8fb5076c861fa55f55456ccbc9174132", "3056a5686a404a862807f7aa7c70312bc0225e0e", "80f53cece53b82915e096f3ad1730f9ce7ee5808", "324d82129642f84838be71bd7401f38c80fb87d7", "d9bbaa38d7997f334ef8d662fd2ce380d495545a", "0ef11c7bda735015d7ea76b8d760aa7bb989a4f2", "5543224d6f8e22e7eaabfcbc4bed9e8a9451e3f8", "33d8edb9b721e1132109bc37171b6eee26333ec8", "15dc50e3bd1063c3760e1c17177a0e898175c61e", "da8eb0d7666d481ba0d50a03067dbc1913131495", "e6af98d1567dad534262ec0863264bb26157533f", "9af012d1df7ec582ad4ff3181e15a536db310693", "72eab1e61706519e8c05cc042f0597b439874413", "d69ef8b5658fabd0ac092fb2bfd0c9c109574dcc", "235b4729951e036297bab2191a69c507a882d609", "65355cbb581a219bd7461d48b3afd115263ea760", "029fa43a49a2f5df4bee8aa6a9574f8da5098f98", "25403c52a7c3092866773b0e765ab55841d3cb67", "81884e1de00e59f24bc20254584d73a1a1806933", "6a4419ce2338ea30a570cf45624741b754fa52cb", "cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52", "31b58ced31f22eab10bd3ee2d9174e7c14c27c01", "0c2de1b4fe7c5da8adf6351533a9c39503ad7a4c", "4f77c682f133d5010762556ebf512533524da071", "b26f6e3cad2b3d129c0e70e9307ce9197cad2123", "0cc22d1dab50d9bab9501008e9b359cd9e51872a", "ee0f87a93fee7a7dc8d13760464dbd6ce1526626", "7c8d57ca9cbefd1c2b3f4d45ab6791adba2d6bb4", "0371b7cba37970f22040a10bd29219778dcc3947", "bea5780d621e669e8069f05d0f2fc0db9df4b50f", "aa2a4f7cf8866d513053873a410879ab5b34b53a", "05b0383b4cfe007bbad92e72ee361f95e7e9a458", "23e6517b8ed4dd2ff5dd452f25fd5dafdf905906", "6dba85c931592dc54ec1b8db08b2a723686bac54", "9c2f3e9c223153b70f37ee84224d67b5a577bd58", "3e734cc79496091e8b08df8d781d005651885c38", "6dd68c403d3edfa77c034c46925da058a539921f", "c80e9854c5ff43b0958119d04e884418d7c05c38", "ca6b2b75db9ff8444744df9149601a4ef2beefd4", "186d6d47855cb00c5bc99497932422b8963510cd", "e0aa9ab8f00b2bf0dd1b6ffd5c00e5a15b6a67e1", "2fdd09747f491249e706fb0df51dc6b59f0b7b23", "582c87ef9e98c24694c83eb03853eb96a4d84809", "4699f98cfdb19e57c2c14c046d0a658ed2267aa7", "936d8ba4bf32ba96fd40a7a3e7f6e0d1b15040e5", "4a13206bc77708debbcbfb01928a3d23ae632d8a", "2d31ab536b3c8a05de0d24e0257ca4433d5a7c75", "31a521f2e1abee3b6be6e98b33aa915e3c4dd8f9", "87e4d8e0fc4019405001683678cd199fc9936369", "fd752a384a46e99e71694d8fc82b50d0b506e2a1", "e97a0cbd5f6a239a351855f691c917d9338d7116", "68ce1572b18c95fe9c60bc11d9d33f8310902154", "61366c2eed49519e3adef44e8b7146db1fcc2113", "0cec42a1593a02ce3f4a44d375e3b95f5797aa21", "4c7232d5c3caf09e9e5c53d697bb470246d3f4c7", "3dafecf541e7aba8b6431f6deb50d37e7ea8a8ff", "40a883133e4a21f2b67f2ae689f619cb29fcf2ad", "c9cd6be6bf5f09b245b707aed6f6cb51dd688da3", "edef734a0bc729162ff7a805363eda88e1633c8d", "057c8f04bc5e9f528589eeb3806734e38b1ecc83", "011c5bb510c9a4c24e2fc07e7464fa8493237058", "e597aca96ea1c928f13d15b7c4b46e3d41861afe", "152ca42d6701db43dbd8a37901d56a52e4a9e6f9", "3a30a54d8df929933e5e20bb7a4484f0a5a325f1", "77bb7759e09b47b35d5447d1d6fe07957f939f68", "bc307736ba15033c33cbff0ed15fc0fb96b7aef4", "8d10cb114d58ff3688b06445ab1f006567a3ac82", "36c19ab9e0e358b2ba485a32a57eafebe0753c20", "17ff59bb388b155f613f7566ba7cd71ec780cdec", "b9dbc8ddb68fd4bc5bc958fc0a8e63152fc87c88", "856b1e24fcf33dbf0e8bbf07e75c2cf712654b5b", "3b109df57fa91067d88523d1caf5af40ed9c1989", "d002e8e3c803e416c894811af86371c79272ec41", "1921e0a97904bdf61e17a165ab159443414308ed", "e5aac48bd54d0954e351123c0ee8128890dc782f", "33e0687306fc700b3e34633e1219da0d52dd9fd7", "84124eba5ccd5a25d2275c3dd6d2f15e30225ef7", "ae5195c44ef7bff090bb5a17a9fe5f86a8c3b316", "9d146936d7d06622e271764c8a050a92bc168f3c", "3ba3ef6d8394055d43bf4fe62227fbae8ab9b195", "771e78ccea7a03dd94bca10a7215dfe3b0f4623b", "27183d23f50884a0e06b978acf9ad77dbcbfb112", "3521904cced380b849325d6fda2a4d855edbe405", "50131e57e14eafd385d94fb31e63f86a5bab9b9f", "0443c1dcf47b196752c89f726cc840a2437382ed", "5456166e3bfe78a353df988897ec0bd66cee937f", "9fa5d5b2cd6d625973d735e70d44824eb0118a33", "4354ed06582b37e52bc23d0b1e86993d88c00e92", "47785095076b48393bdcffea3267c686a5214900", "566a39d753c494f57b4464d6bde61bf3593f7ceb", "c2e9300b0e72dca0b95ccd4181fc2a7a5178dea7", "eef408a49b943ff664142a980b08d76c5baad512", "2cd7821fcf5fae53a185624f7eeda007434ae037", "2e079604c7a00c43f06e214280cea18a89dcecef", "57d54fe3f48857863ad9d19bb196d13600ff1878", "6184ddbe780cb934f036b04dd1d28226b6bcbcce", "0068204e6f250c7e8a26e5dcccc37b36808bca32", "700af3eb255ecfc9cb93d33fee763047875252ef", "065ec79836040f89df3c850a3b065de9222a8871", "0c2c53d71942ad3171b693f565812f1db43215e0", "e99e1ae39e8d0f2b5c607903dadb4a630d06599e", "96fda2ce5803979ba0295413b2750e9733619dd5", "cae208e906f5c21d7c8f00c5e237cced25883da0", "43e43a94a580a9cfd4e1f5bc4d9d6964003af446", "8b20737b454fa8c2848979b5c76be9915a65a75f", "7ff0ad5c34f02b9c394ed0d8a3db9c270dc70e44", "df8341b479434721d3738cc672cf976c080ab7e2", "d52fa3620648792337809cb664efefec9dc0628f", "70b420850e16ec2afe42d5c0006742d9045b3e7f", "62e878445851c9d5e89a0ef8d49f11acd77e78ec", "22c169fa05a0d5710bc111e451161e9d9141c29d", "4610b1e9b18f913fbbdb5bee6502f55a47610ff5", "213777d00adf001486e6fbaff879eeba7ff3070d", "260a975bb1562127634e3447890447d593e4d6dc", "e91783cd31ffe99b050a3d2ee06f632385079423", "5c97b70b4e2430dfb9e98bcfeaac77c87e45359e", "8758775ff9fa05b05f98a43cf5effe6b08cc1241", "005f4fb2256c6fa293e738bb53ebf437a5b98d73", "78bdba66b1a5fb19824be37c4f5c2d20e0e3b34f", "0c54851799df1cec365e4c8c8e48d8b44f3a277e", "2311cdd241c118395a510776ec226aff7725ebc8", "d264a1b519c1e68d6f07cd05b7422061647e1dfe", "63ccfa415c7933f5a9f067b43501c3d6896a5c7b", "06cea45f1b965b9820d80ca1107661b54cdb7e8e", "1693e615c3a7a843880eb5bbf4e3f1beb0580f5c", "965bf5726891428a2af6922ead4779c0a1331392", "1dc45403839d6aefe65c6e7f2179d5ea697dfeac", "8f84ff8921dcaf1a2682a759f1e8e515ab0eae4a", "08d625158727bd97ba6fc58992158ee55a53011c", "409220cf5137d6dc6c85f440d618e44d244f402e", "2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb", "cebb9e30f8046109923ff5768a2ba9bcdb25fee5", "12d0c11d546d91e776a170898ebf3a38c010695c", "164251f012186767d9f00a3baf8735dd2180fee7", "95ed9e883b6321658b95a8db28d0704e90973a9d", "870debd8a8a69c948b8fab4237234ecd6573e897", "0be936107834d08f381018f374979e0949e6b932", "944ab8c7d73bf2ca439205543c906b7797c269f5", "497ab61499d85ab9ea2f0959ad3e5547cc4ac8e9", "bef6daed8cd1ac90ee1c0a42e5c019bbf523491c", "33d045b39bc4645ff2a8bffd83a49697631ff968", "0e9f55c0ff758a91c6764f833b14b09ca788db20", "48e681ee58b2615500cad3c69046bfa0f0400a33", "155959429a6f44e7b980ff00f2d5c0343d71c4dd", "d517b13f2b152c913b81ce534a149493517dbdad", "1cdff2cd2e3cf8dbeb8f0a42df0cdc77c953dc81", "e1bf18d2933e5f24d598fcaa5318c45cea373c39", "b3c8752cada163af9f72d37d2781ecd49b4c8c52", "3ceef6572b00bef961c0246a220edcc48553ed2d", "166d8f840c502c5095c8651540dd393743d63ce9", "9c6a16a241c82ae695896572edd2f31febcc822f", "320ea4748b1f7e808eabedbedb75cce660122d26", "0a2c9e1dab6e32bff5fa3fa4a1de10547f818d2d", "7f4c9a659aa32482a646b7a7e1e6e68cead381e9", "0d6f874b5a0772d1ea88e85a010a01e381d02982", "3a39cb039fb0f569ab88dfb058d98650a17c9f5c", "015d24c1a8621bcb6b6beac3c4d5a34af5589ec6", "5fc8dd97ee6dcf60b474f2f169331a05b0242805", "209a1f024ade2a22d483a7e2b10307a9d000826a", "170d15b04ff9211378169037b5b28cdca6548541", "401a8272c60216d1ce8be58edc13b42b1bfdf912", "2afde5e414aa94e20e2b30a5aa277ac36ca41d6a", "f9129b3858c14b5f6cca1fcbf31c4816d94a5038", "f2e70cc1603100548df96eef6cd9e28c547801b8", "af17312546ad1016ae7f2ceca1a4fd2a95f0946c", "177cbeb83c3a0868b9a5c75cd74edf4b972cba80", "b103cbe3fd070aca73da3acfb3e176e58fa090ea", "0535101387033bf0b5ddf662a8c4d98caa1adc52", "3a0cceb1a10697e3e17738579d27708c9c3303a8", "46d0a519da10160a20a3070cc53e5b9401066526", "dfd18b71f5c53ec2a95fcbe327cf7710da3b4851", "0bfabcf5c74cc17fe8b5777093699789411868b9", "3ec5afaee732157a1039d25b953aec38bc151638", "2ad9a338d81340b7b02510e7f9e390f9202ca72d", "0fb3b63090f95af97723efe565893eb25ea9188c", "2f349ec19443523bc6c1e4b15fb677b1c188e253", "75e9401e70c05c4d080e2d17f83ed2b61b44b3af", "b695052732594b3fb50d4dd8cf3b6bcafc65da7c", "357b39c9667ec894410042a8338a6c63f1a97bac", "72a1ecfcd5f0b022fef49cab72bb476e41dea40e", "15d2703ac86652aaa8182ff60da19fc1bccb22ce", "c600e985ae3af9143b41271abd040a1c1e89177e", "5c60f84f041432f27b6578eb586aa3a754f7a25f", "3e3f19cfb5a7f93d71b2a4578a9cb9ec968960ba", "3805d47da61527137b6f44b92af3017a2dfe7bd5", "2850aa5324998b6d656d9d9c20f0eaf9d8946e2f", "18f348d56a2ff1c0904685ce8b6818b84867b7a4", "eb3f204bba4662bcb46a64144f15c0fcd51a02b8", "1469b18d0bb62b01ef4012eeedc478536897dbc3", "1748867e04ba16673ec5231f6a2ca0ae03835658", "7e7e4af2a79288fd2e391020edff8552ea1ece9a", "72635f4e479e234a9ceb9c836153830621b308c7", "2891ceceaf586e4ae013d932978074ff0a06801f", "01e77cd46ab75bab8f4b176455f0daa592e5f979", "61f93ed515b3bfac822deed348d9e21d5dffe373", "8e6e1861ac3e130f3e5ba97d9c6ab21355b3a55f", "4bb603d515ef338ee69b54ff7ad73b5d8117f4e5", "a87bc818f7409ac97c8719aa8fae2c40d214ebbc", "134fe1c4f45cea3339c094fee817e7a024d73d88", "625d68fdb0db5c3ad27c8defd608c3841086392d", "23000287004800912e3469772f3a2a48704dd303", "24065d385bae5579be07607a1f63eb79cebf8773", "2108b67bd717d349a3474a5ec3a34113b5943a6b", "9603b3a4649fd217752972909d627bde8e0a5023", "1cf4abbd052c94e63557b7922f7a5fc7e22c6e3f", "709198f1a7d42fb87d46a8f5dc48e23e6564df1c", "e41876930495ee879b0d1cda9a85f82102884b17", "2813b7e9da327baca5ced6da94e1754a6d80390f", "a21b8aadb27cd10d8a228fe1aad27c0c88d67f15", "1e9758d282568763b209252bc3aeb7b47d269881", "7cd99c15e5a1791f1fa5c9c9969312c7dd646574", "4af4098deffc22cf901f38b4634d316df68975ab", "a1497db913ea4031315e24a1027177ad0c4b680a", "56bb321e0e180f72be9c4e9eb791b251073750e2", "2ed6fadc9777d67ca62a163d7d456522f4875ad0", "b6a01cd4572b5f2f3a82732ef07d7296ab0161d3", "b89f8c7c104d4acdfcc03eb04908d6359a6eda1f", "12149fc431d2b3ec4d1f194e92e74c765e51ee67", "f970cc735d87ad8484a29a5bad69f529dd557471", "4cff5b5099b0227730efa9e9fd724a63dc0c0c2f", "6c3b2fd0cb23ddb6ed707d6c9986a78d6b76bf43", "805c77bd351fc98d6acbee68b73af915c5cb6776", "0eeca9b515768d11cd5f9c37dfd997b808213738", "c5b05718963f4edff80456c441796e4199ad8d41", "7dbd91389960498ee38ca7588025ec61a08ec942", "a5ee556c355392db1750df92ae2dc8867073e771", "8328ced86dffd1bfe300dca9e960ee328ae9ab0d", "ca754b826476b3e4083a0a6fbac3ac39b494fd43", "423aacfe7467961e32f012bc6de10d636ebc0236", "66313d48a6352e731e40450f80a66c64aabae817", "a4b0f11334d33de4f41bfeebfc520fb5b034a31e", "0709192e10ead7498a1deb9f15bcb8504282d3ee", "20c70df0e353a4d125fb945c7965e8f8ae817bcc", "78bb037c4ac31b2846766086fde83558dc3c17d2", "8bf05e179d50def46b008147fd3cce6c582a542f", "3ce8a74b47f81ec66046f2486afa1a89e3165dfd", "00dec7b4e082e9345e1b34e36d42669f12c129f2", "67fdcbc07358605a8fd8eadf1200329af3c25749", "30e547dfab832ea0428b137d9e4824a22d8efd0b", "09dd01e19b247a33162d71f07491781bdf4bfd00", "817f38e6e9844d4513047916fe88561a758846e7", "558540d73fec6fd3856fe0695ad8d9c0b5fe1773", "253cedd3022e25a79bcaffe74e3405db65c6d2ce", "288d2704205d9ca68660b9f3a8fda17e18329c13", "da86500475912ef286d384b4d556df748cedad0e", "02809c81a2948a5080b03b787c73565828a27675", "c6260f83e86dd4d1ece92e528422ecc6e36c13ef", "8458e49fb08d2cca3a8d7355465e182c30785220", "513d9d0fdc9efa0f042ed1a3c8eab1fbb564f67b", "026050f71175d235f3f91ca0e99e994c00f9b5a6", "771a9e7dc747fa2282815a4863502183f4e887c8", "d91d1fc3f54fafaa66df12b9db2f83b477992e37", "2ba9d72e7f87a458a06bcafdba68ef7abc1c68b5", "4b042eb64ddb8991c0e63fff02b1c51c378a8f58", "956d6e48598cac9aa6129a87a7f8cdb634917aa1", "0707316cac06f02a18019595a43b98ff8d56e63f", "584525fa2e6b3bbd4ca5e971aa7c372b6ce07e8c", "2409557812a3d26258949ba73a05031591f42bdc", "93d3f2e546314305e8102538c4714e30e9146858", "98142e84a3cee08661b31371a2c610183df82c8f", "da288fca6b3bcaee87a034529da5621bb90123d1", "46773c8a2fa5012f7b3e16b44214de0da3f68859", "1da7d851c8d6761b4e1ab3e037596969a295ae50", "f7e91701ca7db1d9de3e8a1d082e2cf0cbbda7a5", "80936791605589168f05e0d56d1a934a876dee02", "747ca08cbf258da8d2b89ba31f24bdb17d7132bb", "0bdfc21178347ed4f137d4c7d0ba14c996c66b6e", "756db84f76d745211464b5686a67bfdc23e18c19", "09ae4b2c851a06e0bde3f4e00b9b7c6e5ac3ddac", "485eb41be3ce1600e9934167808b0319a6c3ec2f", "437edf4b1e8939a3833d8eb814447d9132d7d758", "0fe5d8acc77f54d60edc56c012f35517d9c861da", "ff9af51b07a7e80706361cd064a25d99cde64236", "6ab94ed33779d21d233c274cdc65c308955668a9", "719969807953d7ea8bda0397b1aadbaa6e205718", "1839830486082578d2612e46a89e0e727ea1773a", "a00ee78381f0bd5926851a68d6ee68368b44a5e1", "c790d53077e17534f071a3b87cdf8bb362c7e839", "9da2abae3072fd9fcff0e13b8f00fc21f22d0085", "06aec820d7d4b15f8c49ac4b8246377015693abd", "f57891b2e5860f42c9cbe3c58e926b891270277e", "a165619977bc69a910a771e1096551073122775b", "6e7cfcefe82471a6aca78b59be0285467ce37b8b", "67a3cc056a539d17f00b0be550a2fc7cb2118dc5", "d22a8bac307e1550a9542c3d4e316496b968bf4f", "e09249f111051df1dabd0fdc90194e9f513a5a7f", "dda820650eed893fd88d4bc9fe11fd238f3f4bbf", "1eccf152d034b9ef575a29e6030661627ed0f9fb", "176e8eeefce2039189bfd65a8d0346e480591db7", "91eae81dbba3013261292296bb929a18d73b447f", "539ca9db570b5e43be0576bb250e1ba7a727d640", "d806790866ab9bad77f60436fe77232db8e0c1ba", "a9f63dcae167630b0c6ba4131897539151217e2b", "89b4111f14cdf342188f96d3962581fd0afa042f", "88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320", "17daa9ddaf524de914e7440157fc0314db171884", "6d88fb85fe5c61bd65e0a373cd39fac81a19596a", "0c320e30b37e0ac25e97a7d5201c8918603f0fcb", "856448a97a34f8ff60b22cbee936897c74f7a26f", "4d267098356dc4cfcd3f5aefcc26588ffb23b8dc", "7ae33229eb26ba8567c29a44289dfeb803ab2426", "02227c94dd41fe0b439e050d377b0beb5d427cda", "7b8baf809d9b643145e089b7a1650923487cf451", "7671234c3726fda01b2842f85327624f0dda8ead", "34072c31c2c778df471c9f0c43ba6198dfd0db32", "97f4a74eec00c5d2a42c6575f99d95567b78fcc6", "304baa0481562d468fb7cfa1f89e726f82701a39", "cb658e9e0823dc7afe66b593307b230cc2747790", "68b697c207761c2acf077072ef0fcfef102f7d72", "61a2b79e27df799f941e9c7d06797ae4d4533a2e", "22f3158eccb3ec449f23db87e270a46202cc9571", "efb181d56c385cf20efb9c0d1543fc0e7464d3f0", "488493dc29c844b36660395266d8d347c7cfa9ce", "1f736b39ef43e3dcc82e6c62a38b94a630b7d830", "6f68ca4cc05ef8db344f0bf1ee394e93d519e77e", "1c727208d1d9bb1f712a27ec626dae862efc3a6c", "d5e12c9286038afaf9ae764b044929cd9a458c95", "b41d2d4750ba7fdfe072d253f408e5b60c75eb1f", "431ab9a99eb7bd9bd9ced2d32afdda71bbdcddd0", "435bc494d3606d1137fb8b70d481bd6497f15090", "49c5f7b313eb4d7b4442074b9807200ac6aadda5", "48832468be331e0257afd88ea71b807503551ca0", "50a4a7725ee35124cca4e72a52bdf71f5088faf2", "34cc32571dc9567cdf094765ebf8399288d833da", "83b20fdd3eafd21a6971dacc73d85c484a093bfc", "9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02", "72da95453c41e13e945735fee83ac19bc732693e", "4a3ba4a8f6945382b50d053b58aa0fc7c5199b4d", "6d7dabc58f53c0233d6d593a8fee76d1c7f44033", "c69da63f19a75a8cca99a2ab26203d89f18e8786", "004db007ac193d8df3630c453b5ceed773ccc4d9", "6ef0b43cf897f527540c29cae0618aabb7329072", "9bd36eff633c52c6f6e8ead009367f6b6c43f16f", "a49acd70550c209965a6d39d7ff92d11f0a5b1b6", "02d423de6ee65037ea6c3f4a8f2f269c210fb163", "41399dac2075aa82421c8603edb29427da3a6166", "13c4a4359e9d7f5b2abe1b9542c0950946b0565a", "6e3f5d761df91f4ff598cf1bf8814c2f5597b553", "97f5fb31317f75c7ba412caa1adb46de4c51c52d", "3cd1c3c7ce30e3adc26df36ed550174311a9aa80", "55ef8c3c28e2afda486d8471205204927127c605", "365b72a225a18a930b96e7c0b215b9fede8a0968", "05818eddd8a35fed7f3041d591ef966f8e79bd9a", "55eb5691479268718627a39237fadbe649b34ecc", "3dc78b41ed926b88c9cc4d40c6c5250bfafad74a", "1fbde67e87890e5d45864e66edb86136fbdbe20e", "0cd032a93890d61b9bd187119abee0d6aeb899f7", "395e007cf11dd3082d059b8c96dcffae628ffb4f", "b569166b4c67927c7e139350416b60e127a202ea", "60aa6b163fd8bc16965807fdd47634bedb04989d", "9f2d7b7f5d983cfc02dc3b06dadddc4902afdd83", "3a4abb2b2a8a499bc10b29e0eb293d3e24503053", "04b2cddc8e04a02d685d6476f00d0d25d4dd5e72", "02c8de83c3bd2226a918c925400628902b6f175a", "9f33f248d326a06b57c6ecdb651d3ec620d96dea", "00869507c550e2d840d730b04ca1e4346f205da0", "5b042a76c6e61d411f68b8193ec67ad8dd1abc5e", "67a496908ff624d0e8d8ac2412231c53f1424d59", "9a3454677eb0f356354fb5cfbfb38357e257eb23", "f678f31e7bb5eda34098b0fed608cfad5e372509", "a2afaa782be91f5baf9e9f1794d57dd29143cbf4", "a62ca056821a3179b116662b28338433ba5b5e7d", "a12d9c1af6078f923066df9ab8eddb3cec85df1a", "866ae642b5b54abf78b4657cb1de24f7d4cca7f2", "1701ee9e9518a055e82e79f6425645c4797c19de", "349db48589d9c2177b2067b112b8411513242e95", "57db5b35f2473fc3608fe3519d6763c1d4984eed", "88a0ff6b180703a2d90bc86b40520e35a08fe02c", "55e4cf29055d1556baf72cd17d2bdb692c8554c0", "f79c4bf83371627ba139b61eb427463b93cd687b", "d9b0dca3a9bf7cbf60ae3eea7bca26cf987d1445", "fcd7407d0df030d03e3a8879f184d4b3ceac4fb2", "f5a52b69dde106cb69cb7c35dd8ca23071966876", "2d79d338c114ece1d97cde1aa06ab4cf17d38254", "3af0a26ef9a4084703b310eb997ca630d0bae237", "86564bcb628d4ba6728babcd7c5a38d5fee39241", "9564b36faecb356a56ce4a3545722a94daccd4cd", "c48ee576130473efe6dc3ee47f552bc581aa68b2", "659fc2a483a97dafb8fb110d08369652bbb759f9", "a2ad9ae7c5adbbce9ded16ac3ebdfa96505c0f46", "0ad0a1293f80c838c843726eeddf5a97f33f0c89", "136b9952f29632ab3fa2bbf43fed277204e13cb5", "735c38361d77e707ac48f0d040493c65ca559d3c", "3239f9fb3c11cc29c65664254133beb339f13f40", "84d6714ae7bc73acadcca41b94148d61c8599ea8", "1fed6a571d9f688e18960e560d9441f5c5e3e2bd", "30e28beb92239447aff0718119195c0539aa58d8", "06f969d3858b6d14425fcbe7ff12b72e213ee240", "5d7f52a7d9814688c13b84ab35526fc9bf57d1bf", "33030c23f6e25e30b140615bb190d5e1632c3d3b", "5582aafd943f2b67805cdb4aba9e2f288dfe0ca8", "313387fc6c5b5561f23fdc63a546b18f54f6bebc", "68667c5d7144c4ad5c8d447c9891356ebcc83e3f", "fbb304770d33f44006d134906481208ad087ce63", "c2d39d5a0f476f8fe06a1d8023301e3b3b45236f", "afe2b41cd5af65feda66fc71bd76db602954c6eb", "3068dad264ece487e21fbb689d8f47d498c5aaa4", "904a8241ef400bd85b1ad10267a1177bbde1c048", "7a92007531fb19c46b1d69f93b52ce6650c1ca99", "383f874ba7975c83b55c694ec0a70f51dc3a0ee5", "b0c379f740292ad2cad2c990a445f69167e18894", "141487cd6d32f6916bdcb029ac8159eba44e23de", "0e2af97f07625cb3cf5e30f1c9d807124cbbc850", "faff106a31c0eaa60511c7d9a8e4cd4c42374b34", "d5b66763cf15d4eefc8a5f15a498262bd9e58526", "412a82b94129477d3cce2f737365219103715db2", "0e8a6bfa372d64b9630d2f6182ac07799a9c2161", "89cdedb35b487bcf07d6f53aa91463ea2de8da66", "882190b77ac33c5c988b1b818afcf83e5ccf9c4f", "7a1bf891e09cc7e12a0400c93aa0d1d598a3aaf7", "987dd3dd6079e5fa8a10a1c53b2580fd71e27ede", "ce5747733e8d796ca98a8d2b465f9bc3cbf24931", "3eda28fba2917dac593bf5d760ec79fc9732da00", "d8abf01fce0d44665949e7a73716fff7731fa6da", "b2180fc4f5cb46b5b5394487842399c501381d67", "2328b0b5d4e9d4b78b1b9002407a533c21ff66f1", "adf1b20cffb0ab12d20f878d07373efc4c1bc6c4", "27d90cdd54bcc8f8ecfa60d886143288977a5c63", "0903b956a68073eee3760572059abd5b24b026da", "06d656c53b17ad7c4ca6345d19cbca271d93ef02", "d2f16e84f5df5f00f2d5b7326ab28bf0fa233007", "759b28cb6527f8820f1cffc3581884c5caa19091", "6729895ecfd8eed9e73e898b54d6c7f18c095a91", "19746957aa0d800d550da246a025ad44409cdb03", "e5dcec59afdab7c15e3a874e9b602b8fc42b9019", "81ff6d7f934f7134d93b2039d788b72f8593693c", "9507b1a7af5442f8c247451a63400893de34d9f9", "861f4aac1178bf1c4dd1373dbf2794be54c195d4", "5bf7a81e48bbae954d7a19615292d9d88698cd75", "e09ee005c07fdb5a370c73909a447e5303a74129", "346f03322807177d6a7011734d988cb8628821d9", "5ff8f8d8904deb0df670856d63905b1eaadffbb4", "cbd20c2199062724eee841016f1575cb7d5309b4", "b9fd6c8ae5c3dce4a7a40989d6dbf62f0093dc6e", "b8dc8b405ab941b9ca8313f08a6cdecae1124316", "1343f43e231d793a0bb45eb13ae2560e99aff6e1", "85b32c201ef787e9e28538f1bcbefe30ad785535", "5c77901df1e0f52a9774b39e730c31afbc1214a7", "9a99c23aaac3598180c115e3843d06faa4211fe4", "205814d838b67f4baf97a187a6a781dfa5271df9", "bd2043d46ebd3b788a1ada7ee32627e88b79e75e", "7b3022707facb5a2f0e8e042ea4f70fee61ae58d", "9b95153e4d3972d59fabef0fddce9b7207836b1b", "214db8a5872f7be48cdb8876e0233efecdcb6061", "364c79d2d98819b27641c651cf6553142ef747bf", "d522d63e0e8bdbee314b45085baf40caa08fd6b1", "f5c99652c4c89e56156faf2bed361a15de6162d5", "cee700093d6672df48d169ef194861026fe31e8e", "bec2c65a8419b9ecaf04e8c854b5ad391894a6f1", "4879d56e5edc07ba5a34bc08700f0eed72131131", "2f20cf49eb6a0818313c29d64eb6d30ddfb8d747", "83ce2c969ea323784b9098b9b170e015d559a1df", "77927f4f812513d6ecfef26e0ddbc53c85796ccb", "7eb4c9ebd70092b3d93a79cdf68bc4e2f35b4b65", "0cc5804c5f113c60ee5894f25ab7078364eef986", "4bb1a4ebdfa98e30e12fffec3fe5a75c6c113bab", "b1f42e2b1b560c2451a1d704430633aed71f2bb9", "595d0fe1c259c02069075d8c687210211908c3ed", "90d8dbaa799430d7384425061317e0fa55bf5cbb", "6bc96ba9680947097cdcef4a235de394c7597187", "d61165b4551c90a522c34c233187dd0735fba50e", "7f45650e4c9dd8cbc2bf2dd411fc24ba5631de60", "e97dbd90d97ae8f499a399d5fa2780d6ca08a393", "9f44ac286c4d9b9005f639f844dd2e74b371262f", "2006451682f84ce94a77ea85e954e26b5cf48f6e", "8cb4349f7d4b04a2e98b727524d3699bad50de1c", "3d00fad9ebc9c4cd13bef710de91f4c9d1870887", "ec9f036195ccfdac51b6daf241c45ce7010d0d78", "614a547cb976fae955e276feb2ccc9a33f1c7806", "4c971c934a3c56d08af92117cc8b505e03754262", "0a100cb8a1accb69905374ef35c13f36c073873e", "0781498a38ac67722bb690cd04f69a80e07a55ae", "d9fdc9c63bb4838031eac017ba9b8e9bda3cb845", "a8156fbf31a2f8df36384a2710399ffb89778fb1", "477811ff147f99b21e3c28309abff1304106dbbe", "3bd63bea64c770df5049879f4398e65f958ebd23", "e97f551cf836b51e44464eeb84a86fda00914a8a", "919717628a5ea1bb258489a0f5bae11d24da5787", "fcc1ae9761926e9e7dbd23c2cb95ca39b0a71073", "228eaf76e75de3ae12c28e5134b7b6095c16c49d", "fb76adeff0309ff4c8de4d0b413a8e3a637774d0", "25e979e3c2d4fde4f297bf845796664424ab4c29", "7d941dbab0bb645af81781bd3867ebde11c3641d", "785337b262534e43a4e14d21283e180bbce23621", "6f911cb4b7c83baf67d2c262558535d360d37a3d", "f759880a3314850d3a712bcd96494b62f60d5ece", "4f7d9c5fc3e0fd1b1a4860003bf2b482a215f721", "2a4117849c88d4728c33b1becaa9fb6ed7030725", "dadfa5bebaec79a378e3c260d6c5c415f82bb5d1", "823e57c126124254cf96c723fe1bace505271220", "19e7ba0098ea0b3fbb7b6333ece0e9740f726db3", "e1e490b5d0a179b8eea022b64e83bbd611114d4e", "1856e71437886af2366b620bcfe4caf891425f7b", "bfb8d036810fba95c73c6fa6e8f1a40dcc15d745", "10d8a48deae967b627839cc95c98b6c080ba9966", "4a8b8746def96caa3efd65548040c5c597c4312a", "a649bc66524e5e61e4d34cc00159099b6b58db2f", "cb5ea214f4a3ddd50e821efea003340a8036408c", "f2b79ae191fc03a93ed50eea773279f67c8351e1", "1d730a452a5c03cc23f90d4fde71c08864f31c35", "43f0e2207d628deba1f91c810c38f33a1978cd58", "be62019734554152c4feef62ba3092894b402efb", "e55f8123c5077dbb0d3531534cc4bbf2bd025f5a", "59bdd317abe8d87fb525eb4e3197a9311e2766e7", "3bebff841ce7d40f0309bbc0e8cc454694061e82", "8949563597276246f9f480d4b38b3b7851fd5495", "ae7ebf1c6111af9d00dfeceec4b48b528b437956", "160bd6ca22957e528d2d81b59e71836b9644c9f4", "92f2c4f5583f0b58799f4834bc2808ee785e27f1", "81840ed3a0af562d428a9c8f9d8dcd5605c4c1f8", "acfa01182d18d8f3fbbd7df6be0998269116ba6d", "a15663e0c0a2427ac4da5161e4ed75d331a5a2be", "066077e9d76c779fd9c399b8fd51cb6a6da3e3e7", "51348e24d2199b06273e7b65ae5f3fc764a2efc7", "0c7608a158207052e0d615cd86d886a50d1f33da", "05531fda7a82b18c42c7ae9d053349fdb8281b33", "f0cc9d73b1e5d7f91f43071950bfda619b1443bf", "651125ca22947e95e5be6206c3056988b850266a", "3aa9d042e977f99d1124d6ce6cd83d3feb975ec2", "2cc8371c483f76fff65a5fb1c9cc89e974ce83ea", "6198c7d579726fcc0d4c62ac156b503fc9e39251", "0b07f20c2037a6ca5fcc1dd022092fd5c57dd647", "d9c143d75fee89d24cd5566ec05af40f54dc8208", "01239e3c4dd6b7b271df08c17398ceb260979ced", "e3c433ab9608d7329f944552ba1721e277a42d74", "949079cc466e875df1ee6bd6590052ba382a35cf", "9ecd0f6bf02bbaf43a9b186c633d04263049ab6f", "32ef19e90e7834ec09ef19fcef7cd2aa6eff85a9", "5ad42766d7ba40ceccd8047f6810b9cf6ac79748", "ba8e0bda11af08b6037666b67cf54ae1f780822d", "072fa3b84eb93db06134a2795c62cc0982475e9d", "e24a5e843d2ea999393b9f278f4b5c80f8a651d1", "9949c9f75cd1bd8330ce113c72cdae628659e8f3", "ad7e2dd9ce31c2093d5b611372c44654d8d594de", "ab559473a01836e72b9fb9393d6e07c5745528f3", "fe95b902eb362ad39f91e2325300d3f7a9119c48", "283a2bb8aece06b975a1109aaa8daecdf4d3df42", "887b7d34ebac80bbe3fb3792ed579dd82ff7e373", "003927ec8deedf8cb515ad3b145ef2a5a556cbf4", "81fe36a1a49eabe38c7d98602447eec518af1aa2", "5e286a45a4780a142e1420728ab99cb92993ab50", "6c9ed3378dd53a5ad9e30613ba2e1ef363bd1f9d", "42d9484b8258b84869589fd60ebd3ff0acbe0b3e", "9d743bbef448e7c145aeb11e55cc05fdbafe9d6d", "8520da50e5e234c14272921868ff36d55e6c7837", "5f592ed440b5fd4f454429981ee76dec73f934b3", "e3c4c44e33e1ae41137db586d89fd09d22712252", "877d083b2a3a75cc1bb25f770a9c5684bf5f6f44", "58ee038c6e4f62c01474f3d08f13589ea4ecef34", "12c68afcd77584f3db55b42f38c3ac0e19389b60", "0630b3677323c8c987f16f37545ac6073293de8d", "4a8f521b929f72da2e9ee4af9f43e941f02bd114", "45e79ff0b11cf28a707ae225361ea4c455480689", "a32f693e98ae35da5508c8eee245a876b6e130a1", "afb51f0e173cd9ab1d41075862945ae6bc593cde", "e2279676b01e477b5e7333bab276678f4ad34753", "13b8d657f0f9a0178339570bdc153bfd10a81300", "1ddacefa549de21f734f43016115ce7d54ab3d94", "b411850a3614fbb06bc77e6f776b2f23af563a90", "38211dc39e41273c0007889202c69f841e02248a", "75003c069da53911f714d8d28b121ed9b29e0911", "6a2b83c4ae18651f1a3496e48a35b0cd7a2196df", "ea3f9321d4609ac3a659b66aae204f0b0e2a8ba1", "4d87784afdb704d9eca14010212afd5cd74c60ec", "de10c2806f7c5dce2381435546d95435ea362e37", "7a4f3d17672ecd89e4ad0d4f3a9257352a055d9b", "92e7e867daa56d531646e75c4e1b72e6983e59d6", "a6a6cfae45e8633c01793debf43592b7d515f65d", "1272d526614e40ce859e73de7e39a54baffd28cc", "da79aa36f748e3783cd11c3b0e02ae4f2e79e89c", "2a869bc7c1488023c6e791e9c9071badfbad749d", "2d4215a73e4cabc12a8ea5f49a3661d741add0c4", "65539436abf0eedabeb915a52f787b962722c99a", "5397dc42e7660d3dcd96abc95359e6dbeaed31d2", "4bc1c2cea06e5f42905f5ee99a6e2c1693c098f6", "ad26e62e83f706fec34d9393c8e62b22e2c75e45", "b46538db32a8954a087214370586551ac12632d5", "f23d4ed760a35fbfaeab47efde3d876c1818d3d1", "d5484f516e5e1253a90529a2018a75d697fa9e05", "8a382337ada0665ac5edf9e76c4caed0817a35cc", "2d12efd5aef4c180ecfaf65184eb7b56e5a40329", "aad03480c30c0a3d917d171d8d6b914026fe5105", "67fd4f209aa6e8359fc86bdc12c62bbdb0529077", "0a66015112da542b9b6687e4b3c9ff73565d0844", "25642be46de0f2e74e0da81a14646f8bfcc9000a", "9ed3e04586f311b1e2b5ded9c9c4bfeeecf27f0c", "318eb316c0117059dd47978854cfa92baeaac1d2", "cf8f5cad6aa87a6364f6b5dd985116b902050acf", "0b4b6932d5df74b366d9235b40334bc40d719c72", "e5b01b4439ee779b425fdd99732101d0d0a47e7d", "7bf8ba8c7fff5e8aa23b5bc68aa1756f6a55bfc0", "454cb7c8f4e9c777b1062e71cf3bef717b941657", "3d25eb8241345f86101fda145d95d89c27844fd1", "8d0dffcf36e76ebbb5ff9389750264d9fb77265f", "ed7745cd990064ba1029b15ba866b59cc7cf1cad", "86dac8e830702e720880bc62d5c1d835fbaf477b", "1225619985309d5b7ea7cd55985707a2e07dec90", "3794632e53a463a3b9bf83791ba6f61f1d654a79", "e819d8ec94ff9b07f81bcfcf6eb66301aa271805", "b717d84d551de252300b9f161a5551162a936119", "a2e86c23cde8899ac39d0df43d6c5e4dcf0ae2e6", "6a26016754371bfacc6fa2d116fe9d2efdafab5e", "092d5bc60a21933abf98aa85ace8a9c85df16958", "e9c2b7677660820019ac5fe0fff9ac3409555b63", "1afe9919ddb2b245e21b610fa96037724bcdf648", "4a31ca27b987606ae353b300488068b5240633ee", "41fafb5392ad5e33e5169d870812ab5edca301a1", "15b44a1c3602385b6cf3eeb049cb2d6c12bb7d74", "e19b60e5b8083828285a2baa781ceaad27f6353c", "2fdb79eac9398a05bc67304f381842a05979c943", "19d3b02185ad36fb0b792f2a15a027c58ac91e8e", "1d72a3afd6d8dd1d9a5e7c4aa5838e48ab18c41d", "5b7cf29f164ec59a15ddb55b4af84ca07231f35d", "4e93d17fddba420dd8de02ce979fd0ed02d19f7d", "6c24fed42d9a1ec283d2aa39a2dd768256a1a066", "6f07560cb2ad1d15746df6da0f992601c7bb8815", "87f64a3f33f464a2602d5fb0d717d553c91fc39c", "02086be014c4a276663e66ffde4d14f9c4cebe7e", "1e8ead5045c4b4de598c4eb570bfd9da14970129", "b5747ecfa0f3be0adaad919d78763b1133c4d662", "57c34c5fdd602df84f187f8903efc2972da806a7", "0edd128d362be5d2d107970bcb05579cbaafd948", "624706748e2e62a4e07ae761543da6d41e3f8fcd", "759f7f9e4a363223dc06903ef88fed27a3a64826", "ad4ee8e1ea26edbd2b1f921d899197d56cb2f57b", "cb474ea7c5f6d3c25a75a653baaad19cd12c8536", "4297deda7ea77fb90de2509c763738584b2353de", "2467767aaba7c380e742ff3e3d1a84a649163f38", "ad3caae50feee550b047e17699cfe7bb9e243cf5", "8bfbd28e6684e5507a150b7cc26392648b394e49", "9f775ea5be1ae5f4182d07fb7c9399df82872205", "e88b1040dc8a546f181fcd973227ae6121f15b70", "ee24d974dd3805e5d30c44c48b53a0ce23cc05f9", "66e6f08873325d37e0ec20a4769ce881e04e964e", "081e540e0f3b4741c1f27092f52fef01bb81f06d", "7a061e7eab865fc8d2ef00e029b7070719ad2e9a", "39689bd427f5668a8fbb3113019c2b8393f0b1a7", "958055a314841c2d963db16075d9f66f6a803d59", "646fa86edc22ccc452a44ac7a5953ba62fc0929b", "b01de5e9554109a006a0cface1f11d45922abc0b", "e985e7ec130ce4552222d7fb4b2d2f923fd2a501", "678166999912492688251a1ce98dfb79d3c60ddd", "29c23c7d5d70aef54168ba20dccdd14f570901a3", "05384ac77be3211fb7d221802bc79eb3c9fa2873", "1da87be8bf6b337076f1cf49629a5974a2f71439", "c2b9d6742e504491800cee44adb05d2d706fc209", "7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5", "da115fc803e692d18802400940855eb6c78691e4", "84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1", "1d03698a46ff12fdfaf4811528b3e7961dfd2fe6", "34b6466e3e69547f6d464ad6b5660b1e629a5c35", "031497b0061f4536eb431545af69161d3a2b2d42", "abb1289cfdc4c23d72d0680c3ec100eae74d4fdb", "70161d387927c542860669e929374f3b032d1b11", "5d90f06bb70a0a3dced62413346235c02b1aa086", "02fa3f9517bfede1c2b61570f792f6ed8de364f3", "23b22f10d3e0a5726f58ae10c494a28103979c6f", "ecc2ea05877d720b725fb89bc3b0586a51cabdc7", "d44b198e7ab75f2c06257f4129031b5ef5b3d575", "ed6801362ab442097e7f753f163b9e9c0584b257", "5d056f03efb6470a8e13843da58a95cc985c5946", "5e0f8c355a37a5a89351c02f174e7a5ddcb98683", "556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7", "13564c316b729cfb516f0c8a319990ea8e69349a", "3eda9f9c29cec4f44e210d40b54810de525d75fb", "ad2afeb4c1975c637291bc3f7087d665c3f501c8", "5ebd9457a3a09889fad8cc86a91b274da5986636", "5535ecf25b0ab5b2ebf26af15fc2caf62e25dad3", "3554ed270f27f8c3d6f8d847f6c6b2c17a9668dd", "5d38cc38f3a92e86cf2b6167810eec5893721d03", "86ee4b6b357c7f30009c34163ee7b5eccf6dafa8", "fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac", "32c6086b1605698c8b775b6920741981e85b217d", "046f1c194a09fc84f535c27a3373622223a80c67", "09f47fb5a14bb607b889c5da669e670fe02efb40", "8afe84f915d3dbc45c57011e62f5dbf9003dfb4c", "aacd2fefca976b963701669a77808fde973c1d02", "0f1392c1180582a45b42e621e1526f03cc6e9ca6", "280d45fb813e75622b7c584ee7fba70066245871", "3dd1338a5d0aa47fa2aef31654ee1392b8089991", "c3c435e495cfca37828622262f997975ad6c7e4c", "3981d46c0b453f484aede9215bf9d85974d86d63", "b06b960293b1c7744580e03539713c9fd83c0b63", "75cfaaab672ab005539bdf1a9c79eed18be6d7f8", "6c5682f8ea2aa384878f91a561689a544dd0fec1", "1eec874715f47c5dfed4986dd87fe5fe93f619bb", "51d438a7d0841fa25367323f7b12d76c76d44caa", "4a8085987032e85ac8017d9977a4b76b0d8fa4ac", "4f46dba09e075b2e7dfae1ba2a71e8e21b46e88d", "9a5a1763e0342d41cb1d1eef18a007be6e8dba89", "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "da9b7005385544f063c0789438e79aa6da9adcf1", "60ffc8db53b02e95d852f5a06f97686486f72195", "0a8c6b40d6ca75bc1995083825e362137b130624", "927f86a6498e2a528f025e86782ce4ede317ae0b", "1901aed12ca845cf22bc20db6427635a145cbf22", "a3fd234763844663f72a8fa22a076eeadce7245c", "4a1a7e5d52e097d1defb523575fb8de1a5b24171", "e76119b7812aabbeefd322ea70c95f2805e27f22", "22b5bcd590f6d4c04b8de28217b001da9667ec33", "ea370d80391c3f4b05e04c2248d8656351b99773", "ab34ed858412b08441259374a83f4b3adb615789", "61acd4e07657094c2720bb60299dba0014ec89a6", "8f310ac62e2e3685cf836bec5ad2dd5945f8c018", "51e43578ad761c7c4d58cb159eee0f8e6cf0f7a4", "954909051c1d7d5a8ba885f1c09afe04c8aab0fb", "89358e65aec4d6665098c7dbbe3975296cc7a2fc", "515de436f65bddcf9e24cc0d3b82689bfaae3f14", "701d2c119733809f65311bc96733330b3ab59dce", "d474666b880ad7b8a7dfd910d4f0adb7302072df", "833fa04463d90aab4a9fe2870d480f0b40df446e", "30cc8b99a4d86493f111a2a4a0e4707a45302ff6", "9bd0c4082a13de0be6c7daba999b55061011f3a5", "02f1d5c896ced7f6f002eb7514ba49eca940b75c", "58622d45472f454ea64fd456d9b52ed9f7dad7f4", "0f0499989f3331396af94f92c29f2eda9b58d4dc", "08aaaf56277f7f9897353a6b09a63ea90b4cc554", "0630b153af22b949e3160612dfd913d5a5f25f98", "0756e1de70c4e3a58c78f2e9cdb2646555386724", "165d966940dcccf9c9976ebffcabe72d66996b05", "3bdef2961f9572d2d0f35148a7fa8a3a81f50dea", "cef9e528ff7fc8b3c7ed6fc0147c4db207bbe6df", "1c90ad1e264c29a8d180de47373257a5f1b5aa57", "41cbf0750fa0d08880068f9a89be92232795f357", "086a91d8db2780a14a21335260e97a9b7b27f546", "2270c94d3f9d9451b3d337aa5ba2d5681cb98497", "1de800d988f32380c54e430636ebf8913eadcc98", "7e463877264e70d53c844cf4b1bf3b15baec8cfb", "d689cdb4e535be040316722229e6362de6617f9e", "1e511a36cd6c793189c544a6f935958a2d98a737", "79f6a8f777a11fd626185ab549079236629431ac", "16eb964a5a0f5fa3692440f07dd60b23354f5f58", "d4bf98518791e0f9177516d4566ca5146fb09593", "1ed5d99fe46c0b5083f97e65841cd8535a9451c1", "9db7bc834ad534d48b22a87ab5f706833cc18d79", "1697a4188b9f75ff5324eb9957b8317f459bbf59", "3be7b7eb11714e6191dd301a696c734e8d07435f", "00d63b30e7e8383ea3dd2993499df70a51295d13", "a52d6c456122007f10c90989a1e81dc8e1c599da", "b4fe4bdd0e42aadf3f7046e9c681d3585ba8a205", "77d31d2ec25df44781d999d6ff980183093fb3de", "c64464957177bc9e98f6ca2251bde1b49144af25", "4e47a5eee68b2828bf7d36e7ef70e1d0f6920678", "87cd95dbe885762ec0f733bc9d232eb4d63cc995", "b401854f6ad4a0a01aa273342f6c40e6caaa8552", "9e1712ac91c7a882070a8e2740ed476d59d6d5d4", "6d2c81825312cde3914eb9f06d7965612df0d06f", "bd65efb5d1fbff19dcd3cd24452f359013eac188", "5b25b9053ceafe1cf8258d8daa818a2da80c800f", "9f47852dc4b17b706b2fbf942afef193b1723042", "9a9998098e2b28c55e0b1c9032c6092572096386", "c04d4dea28947da14cb2c62a41feec51763ceaac", "44ee63d38e80492fa9875cf3fc28efcbe44ad8ea", "10b987b076fe56e08c89693cdb7207c13b870540", "383a58de852715c8544abe60fa64d29fb7ea5688", "61e8584f5f37e6f47bdd2be2f93251ed5934cf48", "753988cd61d17a677511df50fc3f6c40ba29ae1c", "0523e14247d74c4505cd5e32e1f0495f291ec432", "5c1e0e94d6cb74448c7b3c1e0db42121be4e9bd6", "29b3f9f0fb821883a3c3bccbf0337c242c3b8a64", "0c8ac71e174a941ea7e14e7b503a12ae7eeca9db", "ea4a93a97bf0bff059034c707fa75a2ca13d8048", "110e44112bb0b742ca2c8ee607fc359698ee1198", "e6e665028740bb3f667a55bac7949ca8999fbbf4", "0a70401d161c6c180d84e8139ee8bfbaadb2baad", "2682f197ab1437b3c79027320a983de8fa7a400c", "e5f803e055a3916d3915c2c19a1773d0dab08844", "f724cbf5035e2df0dbe9a4992a0100465f5c6db5", "5853875ecc400b3b365f73cbf44e8680da2bc5ca", "be22647956f1bc8cf6f936ae3c85f5637492b6b8", "96b1f2bde46fe4f6cc637398a6a71e8454291a6e", "3b2697d76f035304bfeb57f6a682224c87645065", "5ac707ab88c565b1ed34fac89939f0cd2451eb22", "2d54dc50bbc1a0a63b6f1000bc255f88d57a7a63", "0c8ab9e08e6ba0de2cb9987cb0cc542d280b04c4", "0159b548d04a21938f066adc44bd7ca95bcb226b", "5b9dc0f10704b5663c06c7dde2732d4a6076de55", "82f8652c2059187b944ce65e87bacb6b765521f6", "0cebf440622050f8149d14b803a969917348844b", "0462aa8b7120a34f111e81f77acd1cc7d81680a6", "faefb598a66284e31154251b94cdb3e1bda53122", "0fca9a022f4910dda7f8bdc92bbbe8a9c6e35303", "55c16592502db5c2cc30711f4d04e4d3aa04d278", "3c7204e7f1f7023a569a3e347f172f511ba1c752", "b7a5b5d284f517397dc2a8310ad8aee2e7897f54", "763b9ab0218760aaee314fc92c62efc9a2095b46", "811cab87c37cafde75d5b289639fa409d8c4af86", "11a7c4aadb47753c8d30cbda4ab347c361e4c66a", "f6c3a40ad2dc387bb6a1887497707146280dd695", "abb68f5f393f60695ab16dcee08f2638ab3c7809", "520901f189d7943ff060239d4152b34edc0524ae", "52b7f1323229c132ed0a79194d7cedc3b289ff37", "45a2d0b9d5361742a567bd1978036c0c14c5bd1f", "23a432a388552ab52437d428e5af2b6c195be635", "0029418d56d8fe71d1d45bdaad88e5cc75dc58e7", "ad84f49b2cd1b85a6d7df2304144a093f5b610a8", "2cfee31ff4aa21e4e21cd5f018e1a9ed9ef33f5c", "4a79923948c6ccda965077287dc6fd1d3728d680", "a1557a512ff254a27c11810d362609c237ff6e30", "d3015812feb640c79ca8a098e7e27c35f4355ede", "1f833d80883c112ca854aca12de671fce2162efe", "2704959c75a2e6741867ae18f11fa822fa544c74", "3c4c15a6597223887e0a5384237fd2a89b176e4a", "f180db1f0216c097ed9d669ea69e9d3eddd8eb8a", "05b6c32304dd1673c14f1e1efce4e4d5c4402275", "427c24c75128412166326e2afda1e3cd5a35a16a", "29895e2b4d15c22a627d23be2e25bb5f6317fba9", "5d5c3aaab050ebea3f7610109617c5d30c17bf55", "11a089c83fc91bb78c7c5104b5763887edc13875", "f986968735459e789890f24b6b277b0920a9725d", "e447ef5eaec9869570951065d1635e7717598ea0", "b3b51d80bae381e7a143c6cb532873a273b38e51", "252e48be0fd63d3a786021efa8733f8891101a82", "124a31521fb0b1a9cd1b1618baf07bc69684cd0c", "37d18c386548277a6c94d221f0de7f24da7bb8ae", "456d3118c93d30fbafb076f0a9d0d614d2c847a2", "779e5beb515ed26c47dbfc08304fe49233063c1b", "0e8f56d7e0b639e182d1d9693b79653cfd98aaa3", "e0b868e7dbf9e43d01082c5f24fcb9e3c01db81a", "165b7b9ed474805c35cb60204671c9bb2053c976", "b75c93c70e8534553006c084ddc72de39517ded4", "64d83ccbcb1d87bfafee57f0c2d49043ee3f565b", "8b302c0edefa8f6a2ce6a41c32fab0f1ef36e523", "a511463a423f842bdb524009f6ce6c6b0ffa0f77", "1e1334f76177ddf3ddc35f7359a1e04b65438dc4", "d817729f2fc8509d138f8db73ef0ca519615c8fe", "e0526b013176e2cbb1d13b4bbbdc156d09a0276b", "7df67fa3a69f580767cfe52b74329c26a2fb6352", "8eeac4ca19ddc919423c42447c28ce546a25c4f8", "2a9114c24c9ab0d3beb598a33df54966522dec88", "dc9f29118e38602c03bb2866f8b12ce478aad52c", "5d9d1b95d5afd58f6e53512b7ddd04b78d62864c", "48b4f49ec708677fc9f70edc74fd0f92ef986406", "efef00465e1b2f4003e838e50f9c8fa1c8ffaf3e", "e265b020323937234d4ef85b4773e95549baf580", "bcee0fcc3af1f05d567d25012a64a08857d31bd5", "de4bcded33b562a8af7f601d17c3202965eb4034", "b4720674dcd92d28978e24727d5b40edb363dfe9", "cfd700cb28529a9119824389451ddde9c041275e", "bade9b38c45afd4f988e246974427685f3ff599f", "a2e29b757f4021ed5b9eb7eebf78a0bddb460790", "0ffbf9c316c90f9a1fa3fa7eca5cdcdd751c8a52", "5f57a1a3a1e5364792b35e8f5f259f92ad561c1f", "10ca3d8802ab0cc6ce000682a42fd9f6575a2006", "0947c7c46943ebbb6a4b5c795c9b54552c8e0b5a", "464cd3c5f0e9a05dd685a7b71fe88b913da520b4", "0bf26d2fd1b375f50c0a6bef086f09f7698c3156", "6e428db07a54a824f77a4c1a8fe9e70d6049e79c", "ece390deb6576dbc1fdf132f182a1cc75eb67832", "a4c12fb22e733e855459f92a2bc563645ee25f34", "6ce9b17283a3fc4f2a3df7fa1eaed80bac69f69e", "4c69da79843016d5d934464d3777030741978180", "6eebd8762996501b28d3d94a7c166c79d37e7a57", "db299ad09f629a0fcd45b74fa567da476d83a4f3", "8773d5dcc15ad7f3de8a3deb1fcd2b5e522bf869", "a9ab913cd7d2330b93e0cdab3d5fe6cc47d74513", "c71db5d3546e22227662ee0f0ce586495ef18899", "1f03f21ba6c1bf66b025029b10d4bc9bd7f65a81", "c30b9fb837e912ccf3919fdb64e9543fca57799e", "1baeaa776c4f1cf17e690a73f8b38b8064c0e794", "0bc94d0c1e75d90be373c09941899a9810080924", "032ab4966465facd284531865529b124ef173a0e", "f445493badf53febbaeab340a4fca98d9e4ab7f7", "695f6dc7165aa3fca15d1b1deb4c496fc093ac19", "04d316dd48c44f90d137ff11b9b802020423778c", "49c88aa6a22a41eef4058578ce1470964439b35f", "413388515ec220144c91dd0cfb7dae0c36b35d73", "426f20cf5f836f410b6ed31a990ed1bbaaf6733b", "0e790522e68e44a5c99515e009049831b15cf29f", "75c30403bad798381afa70f225e402ee7d84cd34", "4fa2b00f78b2a73b63ad014f3951ec902b8b24ae", "61668aeeb60bd2ede1f9b0873f0e19f6f845a029", "c450b8d43ed1b53a1d4c870caeff72ea13dcc866", "3413af6c689eedb4fe3e7d6c5dc626647976307a", "17501551acce05bfde4f0af77c21005f96e80553", "c158009b33989c6677f1daa3f5926887c9471c5e", "9d896605fbf93315b68d4ee03be0770077f84e40", "cf0ff92591563ab59e18449b79f058d1f9db9e35", "cb4f0656ce177161667759b46e20aec5488550fa", "54087bd8624755b9366651d5f38fdd3bbbaaeb52", "cade92eccd80dad65ac7bf93410177c885a4c2a5", "545dc167a4879ce2d61836cb300479c305f8e096", "0701f2ee5a06e9ab760ab9326a33b1d4b8d83414", "6834a469562cb563bc91ae08f4e2aa6b03e27b1a", "5b5d8b55e3365f74f4dbdbfdf7b72452a688692f", "2c7934a2f1671286370cd9adebc2872c6dd318f5", "9f59d0a003558066d2ff4fc1c77f461b4d233663", "50f88b364e818f6051330582854f80ea5ee3df5e", "969b889dc0de12a2fa10611f1a5fe57ab3de1e49", "f1d3e4aaa37dbd7996a0bbc2ac6015e4d0b2c687", "dbc077fd5412d11b33e18cbc2c5d8b92f6691c8e", "8959e091e26ff040e3e495984e1704401b7f500b", "6bd6460ec06adc1bd69d9517d116fd1545c04ac7", "5bec2b3851a540233258ae3e35b833ee5a7d22c8", "f9dd2ca5f50642378bd1ac03db4bf74a65b83481", "378b6d94bacffb0fcc1063476a7b9694e877ba12", "832b37fab195a8ed71614c87666b9f6e71e367c6", "39f525f3a0475e6bbfbe781ae3a74aca5b401125", "07421497a80b7611f50c343edcf8222fb79f0bfe", "114b12559cbc2916b90728b09f158030d332e6a1", "6d43831c4501ff44ed0ea70ef696e1c496b68a1d", "82d446206a3e9afba7e5b8c112227df681ef422a", "1119b4b038fd7d1d337d4aee232dea6c56f20cf1", "4987ac5638e1fdb116cc76626465f166998d7536", "d0a9bbd3bd9dcb62f9874fc1378a7f1a17f44563", "062864148cc9e0fdc7cd6dff35611fd30da719d5", "21bebef8ced5d1e77667c667b54287782556eebc", "5a4fe479d3b737dd29c4cd8b1241a050ea7caccf", "dbfce5a560a0808c2108ef30bd384a92da6977eb", "050836151004b1997972c3fcbff0b85de8308e38", "4f21a805c8f6c0341d78dcfebd4e1841f9fa0f86", "85ac4459daedecf04c46c0fd90adf57238a5993a", "63859f5b6d28aadbf9a41cb161a47fafc56b63dc", "b340c9efbf477b8842bd27168fe1d81879f6417e", "f9e388544ae371cdd1d73b2e444cb46d9532f530", "a5683661e80668e05170bf17bcbd754d1b07af74", "70d64970a44c093e9756936fbbf1d88acb04dec5", "0162e84bbe995ec06e8e59dd9023c67d8f0e8880", "4913477a16c8354f032546b1444728c592823586", "78c91d969c55a4a61184f81001c376810cdbd541", "c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd", "480492ca998b3393b370d176d1f990a3db1c8e12", "eaa4c662f5f6408dd860757f7ab669a13b7f0a24", "74aa887cc3d52b7086d435ba5799ca8b1826e51f", "01b22a3a096df88a2c123e04c43c60e5888ab555", "0bdd8f824fa4d4e770e34268a78dca12fb6a135b", "af0fb9f0848a87e4782af88d56199ea03eaf2aea", "7525e1fed92a780b6cb78190da360a3a7b611885", "e18cc09c3d3d79df6cd40ea5cf13ad40eacb8a73", "01c6a4ec199f92c11b5e46ae3a94e4228bac3990", "3cd27ba9984367f0024767f439a5089255737036", "219b7b157f2a559ecdffe21c2a0edf5285931298", "95ad4f39a2368b26c429cf8118364d63c3617ccc", "e54f6f8220d958450602a3df56153c6b82cc60fa", "52d9477a8293d44b0f8be5c07d56d468d035b0b0", "6c52c12644321d4256306feaf784ccae6ebc4fea", "b79412cee14e583a5c6816c1124913f560303a95", "3765c26362ad1095dfe6744c6d52494ea106a42c", "4cc5fb6cf48b2c58b283460b19f3beeb7e5b6a22", "f5db05adb6e89986d9ae2da0b81e1ce7c8efd9ba", "afd1e4157245d56711d4f16a5b7c9fd1f39a5139", "0691b9cd1b9b44bff297a62277be514ede9df01d", "94c4c325c8f7faa85a12b70fe7218e87049ed203", "970b4d2ed1249af97cdf2fffdc7b4beae458db89", "62e34fff5a29d2e27bb3c7ccca43daad2d164ac0", "2c953b06c1c312e36f1fdb9919567b42c9322384", "29d591806cdc6ef0d580e4a21f32e5ad9d09d148", "7fa5ba03236425c2b3023391ca3437df4e69a255", "19cd6053bbb9b9c67da0c0881e31019f9ce28154", "56f942a738022cb9af243f3336ba1f035783f73c", "575d6a05bb27316ad677f19e79473e314e6c6f94", "5120fb7db8eadb26118847d0553fca1c22ed6f07", "4bb83b00e7b8eb27ad04d4bb80499e91fc471a07", "076d909aa2674b29fcbf09b5cd5c200926710c3d", "b6bf15f123a814538fff5db757a474be6fc0c72f", "f6fa97fbfa07691bc9ff28caf93d0998a767a5c1", "fb15a8189d02e3b989d553a779670f409f53fb17", "2a88541448be2eb1b953ac2c0c54da240b47dd8a", "d900842af0542c283fbeadeec9905922c566e7c2", "81006fe4c4947d225b9fa17e6b98b8acb36a7692", "b73ba189d0d1a3e2502716fee60c6865a7964d6e", "2b726b240fb75e459eacc8a6916a26a93373088f", "d0b77a2cb1621d0b22005c61656349ed3f7b747a", "5c6841db352d54ba6e18f362b8cc6509a15a8fd3", "3b5b6d19d4733ab606c39c69a889f9e67967f151", "74e68366a075b1d61952cbf7d51e23c2cb831279", "de1aafc4f737d17b211fb84b80bace40f82c148b", "a4cf949a3f6b7b094983acbd6a042766e09e8a2f", "9933357ce4ebb5647925ca8ce39ec68984b4e5a6", "c338045f80ab3465bdc381f2b1791744b060fbb3", "86eab1845deb3614233360b6bc33ce1ff074458e", "7fcecaef60a681c47f0476e54e08712ee05d6154", "d23d747e9936299b177b0358ff0502a884276aa2", "bd77b5e4edd665a0f42203760b8fb7933101a448", "9328c702d646928690b5725203b79d7dcc217802", "ab243a29fc832df9776efecd37a3a9ae5ee48293", "037e7e2941239cc35782b8023f421799bc71bddc", "2049ca79ce94ddfe0cc3d39bf770f580a740f3ac", "2c4dd7d5ec500f180cc8301d96389a81d3a99c65", "0359935f5fe14eb4cb8af9486291f760495de7dd", "07eaf19eecf4ccdd5f8e3367c1675d9f4addd2df", "b9f65f3ea5aef51e7955e1972aefc1dddb400b59", "12138be732a2aa10e4eef460979bec64eb8e4f4c", "f8a3135aa603d53bbc5c13a7b16984761694475d", "d9bc16dcbc13502389704e4a0bdd8ee7af618069", "059eaf1206d0f28c19b06972d9946364e0e52546", "6434b95401aea9ece22b2b29950118afc163c2db", "34aa3dca30dc5cbf86c92d5035e35d264540a829", "f9edda8351a65d23967132a4f9f5d24ff3bf4f11", "3a772ed83fdc90e10def9d38f59153aee49cd47b", "e38c93bb8f7ee103eba4b78443d94f55a63bdf08", "ffae2fe85d3c93610ac6270db2ddf1f2f6779ea8", "bf065e114e50120a6f2718d656ff56823cec639e", "99b7ff97ad54308b816e47d9bbf6704b787b8f52", "7cd097f3866d56114c1778c0d9ac1c4a9a35cff9", "06b502150115324c8890b6d01184d74dc6b8ca72", "527087539d468976684031414d244c75b35fe1b1", "5b3f94200bbe38e1115e9ad11a5f5707a7904732", "76f73c884e4437a22afcba60193bbd7f35e64aaf", "1337acf12805a24968c0518e695ca94f103e630f", "64b22e5af5dc07309c85a742728ff6f476bd71d1", "f31040b6d7e135753eccd6ca573d784e9ac9c8c6", "5f2989f2c323a4fb5de8720cdfbbae1887d8e6bb", "31f5eebfebac54cf5817deea7da32994637a5b28", "765263556ce90c5c0d86d3e6d8a21e04a307b60d", "aeb0f4ffb57e40c93606458707622c0b37ea3790", "b76ddd8e9098c4b361ef72ddaef42bf3c85f5825", "0353903b504d6246edcdc6b2c7d32e59b5c0a863", "6d77eef66324951d70d98d6dc99c0e95e5b2fdf6", "38eb71578f82477f4b032481bd401f19f14eaf25", "4991dcef497ddd7ea115663985a9e0635494a95d", "8973910c8acfd296922d9691a533b3c5061ec815", "cf203d22a4a292f24cb6eb8bd50d4b0991fdab7d", "0c0dc5c307483642e15283d0d52a4159483d3df6", "966f2106b12d2425185add5666816abf8c834e64", "7e45b27ec7339dc557866b31e74c71a52e99fd32", "2edb87494278ad11641b6cf7a3f8996de12b8e14", "8e290eb707a39603ef2862ead218a3ebc4e9d687", "75c9dc4cbf7224b244a5c9046f998543ee8926e7", "1951f201578b89943a625e16efc1254ed7bbe191", "75cb21fa931e957941c0237a1030aa36209bae36", "1ed4791ea3bb9a9503a1cb61b525757e0e4700dc", "2fa1629d75a03b950c56bf9b3430b2983abd7881", "53e8781bb152e8e05ffe03737082448ac3378e37", "d315396cf26613a552a41630a9698b71b6fb5f9a", "4f3484a1b08b332479f0cc0197528e9007292a90", "ecb097261781ece1688919fc638517460e18ab4c", "6ce111fdfb7ebc8f1fe23ceaf859f7be799d5c91", "930a33327a030a5a30e3912feafbba0ef0d57e13", "d74da7687d4c5d384039d5f551d8cb1a1adb3194", "708ccbc563c9da72137800ed86949c4c6e1a0fdd", "df3c2ac15c71b6cdc07f4268ee83d4fc1984545f", "c4f632a1b6faa43c217e63c58a4764511104c303", "a1e14192035fd64425840d613450d272c43dd39f", "a9351c53346be32b7d6e0e80a93f2e383fe85eae", "ccf4049dbb61de4e2f8ef918b8f639b148109e65", "639f0d9e9dacc3d6f91aa2b1a9b9152220c24789", "205f3d654b7d28d00d15b034a8c5b2a8740bd8b6", "c2b1007824fa7ce3a7a94209f0be0902a3454bae", "3ab0b3c02f4fa7f1d14315599f4f91563ae565f8", "87e4178f71990818a3c125a41db91749bba17cc2", "e58b63832dd254bc0cbee190db00de50004b46b3", "012cdba79f672e45240791d6c52f213640347b54", "0d5bb6e4d2394e78c25ef9312ccbb3085d294d66", "1afdc0b42d25df25a7cd4b304493e9b521c84f0f", "84908a9162b7243e70bff6861d084813ed011f0f", "700ae4e8a6e64e08a5d551bf174f31ca20b61e18", "e0c6748799529ee035b21f040187d1e6df232624", "0bf7d4f5d05dacfa539e418a2fcfdce3a9624ed7", "19ae769aa067b5bd781882cd18551f7abe01bcf1", "92dbaaabb91d14ec71843970c604303680c26695", "7bcd98ee2df3d14eae7bbed713208cb7da7b5db0", "4d48f40d4599dcebd3b31667cc113e140f95607d", "8899b274fdb4ff77de40a15a958ab5ea4b4a6477", "5f1019f26479545a6f2f9c364b995579e283d2ce", "e6ffa1c305512e0d4efb9a88cfec8294ef476e06", "2171a1414844b161c9f08f08f76d33926d1db321", "8c618c038c60a385d220193f87b8b0759aab0fd7", "2fdb3576715829aa9bbaf74825236bbb71d06f1a", "2e355890915492ddd46063828f8534b734b8f58f", "9f1b39e8d157b74181c666e85e5d55550d762409", "774b649c75078e10759b3b6c8ea581e68fc45a40", "99eb4cea0d9bc9fe777a5c5172f8638a37a7f262", "1ecbcdf1ff0b78170b39985a26a7ab034ab73c60", "f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f", "c06b13d0ec3f5c43e2782cd22542588e233733c3", "d59a9d80e7d8c875d2b73241a8b02078ea6ad0a7", "320fa825e86f3b74ba3b3ebaef14e1186784f1ec", "6aeadade814902ddb9d7fd0e1d2d64a70f0e3ff9", "dda0b381c162695f21b8d1149aab22188b3c2bc0", "43edcd7968be3abd25f8ec44ab6d6182b7277afd", "d9533bede70753bf1fba1e4cc7ad492b88ccf373", "5223f3485b96bffe7dd4b3aa71e63fd2b049fcf0", "5b1d78b160560db5f581e65289ce5e2f99eb9b1f", "8e8e23be83ac0900e082bc795337887c48467701", "52a152b985b298be4b382d0b6045e31f43850c6f", "309a62afbe1ca7ac181452c5b3c50e2f860e7e65", "4a303369828d9334022a0f5e8ad2b1a715d1c0c9", "5fb23b55de1c613517f55a1b878bc68bd4b543e3", "8a14dfe0e11e03505db9c0d84bce96f165223cae", "2bc16bf87ceec85822912ef612385e519a6f98b5", "e91c7dbd33a3047c70d550e201ebdf4353cbe929", "39df6ca15f41e5a674ed8cd1654e699dbc8b8c11", "e42e7735f94a8f498ef0bf790ab43a668f904848", "fcec633bbdeaab2d61fcc6d86f74383ccc3621f9", "1ba61a4fedc217f7bd052d1b2904567c9985dc44", "8abe89ab85250fd7a8117da32bc339a71c67dc21", "41ddd29d9e56bb87b9f988afc75cd597657b2600", "96330777eb14dd6bc31d61b0d7f276083a04e34f", "5d8ab5c473eb9e083ceb35ebeb00a062114ee6ac", "0141cb33c822e87e93b0c1bad0a09db49b3ad470", "bc910ca355277359130da841a589a36446616262", "5957936195c10521dadc9b90ca9b159eb1fc4871", "a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b", "5213549200bccec57232fc3ff788ddf1043af7b3", "69b18d62330711bfd7f01a45f97aaec71e9ea6a5", "fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719", "25695abfe51209798f3b68fb42cfad7a96356f1f", "9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf", "747dc0add50b86f5ba9e3e7315943d520e08f9eb", "8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed", "ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9", "a73405038fdc0d8bf986539ef755a80ebd341e97", "11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d", "b806a31c093b31e98cc5fca7e3ec53f2cc169db9", "c38b1fa00f1f370c029984c55d4d2d40b529d00c", "427cf6528901d7fd5e5acfe4175ef809ed8ee7fa", "258b3b1df82186dd76064ef86b28555e91389b73", "102b27922e9bd56667303f986404f0e1243b68ab", "74156a11c2997517061df5629be78428e1f09cbd", "9e8637a5419fec97f162153569ec4fc53579c21e", "c5adb33bd3557c94d0e54cfe2036a1859118a65e", "909c23143162d98ffb2447f0018f92ac6cf8591b", "18dfc2434a95f149a6cbb583cca69a98c9de9887", "13f6ab2f245b4a871720b95045c41a4204626814", "a85e9e11db5665c89b057a124547377d3e1c27ef", "4223917177405eaa6bdedca061eb28f7b440ed8e", "3d9e44d8f8bc2663192c7ce668ccbbb084e466e4", "355746e6e1770cfcc2e91479f8134c854a77ff96", "c18d537037caf399c4fabfdec896c376675af58a", "43ed518e466ff13118385f4e5d039ae4d1c000fb", "370b6b83c7512419188f5373a962dd3175a56a9b", "fbe4f8a6af19f63e47801c6f31402f9baae5fecf", "195d331c958f2da3431f37a344559f9bce09c0f7", "83e093a07efcf795db5e3aa3576531d61557dd0d", "dd715a98dab34437ad05758b20cc640c2cdc5715", "2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40", "6a26893ed63830d00f6d011679d1b1ed2d8466a9", "df3d85ecf8e326774cab59aab75b572fcf9767cc", "06262d14323f9e499b7c6e2a3dec76ad9877ba04", "15bf0e70b069cea62d87d3bf706172c4a6a7779e", "141ee531d03fb6626043e33dd8f269a6f1f63a4b", "c2e03efd8c5217188ab685e73cc2e52c54835d1a", "4da735d2ed0deeb0cae4a9d4394449275e316df2", "3080026f2f0846d520bd5bacb0cb2acea0ffe16b", "3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3", "be57d2aaab615ec8bc1dd2dba8bee41a4d038b85", "f11c76efdc9651db329c8c862652820d61933308", "dd65f71dac86e36eecbd3ed225d016c3336b4a13", "562989741c0627b2f966d3abd5f87047503d0fb8", "305887fe0fce91470c6cb042616cb36486dc0e3b", "ede1ba873ac2fa186a8d2f58be6cde2284c65d1d", "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "49e2c1bae80e6b75233348102dc44671ee52b548", "e92c934c047d0ec23e7ed3a749e14a0150dc1bc8", "0235b2d2ae306b7755483ac4f564044f46387648", "3bb5dc57a828cebd484750a51d9cdcbaa51d8f96", "d84230a2fc9950fccfd37f0291d65e634b5ffc32", "8df57cc014ef7079e32bd7928dfde7e78430789a", "20b8a76e988e796f0f225876a69842f6839e4c98", "046865a5f822346c77e2865668ec014ec3282033", "0aa303109a3402aa5a203877847d549c4a24d933", "c6096986b4d6c374ab2d20031e026b581e7bf7e9", "705375260ffd0ed261fb6a85d57126f7537bef1d", "05d082daa3e6f86adeb78b26e8cf07b94b418377", "930a6ea926d1f39dc6a0d90799d18d7995110862", "f1280f76933ba8b7f4a6b8662580504f02bb4ab6", "0965a62c9c354d2c7175e313ade9e38120f1bd4e", "49e541e0bbc7a082e5c952fc70716e66e5713080", "13d3b696cdbec99b2fdbb34e9d031bf8c683f112", "37b6d6577541ed991435eaf899a2f82fdd72c790", "c030de7fc30b0fbfbfc7719f6b22a66589435dfc", "df310591dfba9672252d693bc87da73c246749c9", "8bed7ff2f75d956652320270eaf331e1f73efb35", "0d3068b352c3733c9e1cc75e449bf7df1f7b10a4", "0d3ff34d8490a9a53de1aac1dea70172cb02e013", "ff81f2b4ff19f043b61b5f720643448711ebdb6d", "fcc82154067dfe778423c2df4ed69f0bec6e1534", "d84568d42a02b6d365889451f208f423edb1f0f3", "9636c7d3643fc598dacb83d71f199f1d2cc34415", "19e32a17c7091dc88cab9e858110deb7769b3f5c", "45513d0f2f5c0dac5b61f9ff76c7e46cce62f402", "1ab881ec87167af9071b2ad8ff6d4ce3eee38477", "20aa8348cf4847b9f72fe8ddbca8a2594ea23856", "dbf6d2619bd41ce4c36488e15d114a2da31b51c9", "1e1a67a78badc619b2f9938e4a03922dcbee0fb6", "0d0041aefb16c5f7b1e593b440bb3df7b05b411c", "b593f13f974cf444a5781bbd487e1c69e056a1f7", "da24f3e196c5345ce08dfcc835574035da197f48", "9892fca93b59750c89ca123a21805b928dd56047", "aca273a9350b10b6e2ef84f0e3a327255207d0f5", "f1748303cc02424704b3a35595610890229567f9", "3edc43e336be075dca77c7e173b555b6c14274d8", "974b32c4b74cc6a15f44cf8b5874d6f20273b21f", "8b8728edc536020bc4871dc66b26a191f6658f7c", "1937d32117b6ce27188408ec984f0bcf2375e6a7", "3991223b1dc3b87883cec7af97cf56534178f74a", "3c8da376576938160cbed956ece838682fa50e9f", "1e516273554d87bbe1902fa0298179c493299035", "14c988aa9086207b337dcc5611aad08422129b42", "fd67d0efbd94c9d8f9d2f0a972edd7320bc7604f", "79f4bd6b3f0ebc4b81c13e7720c42285464a858b", "9857eeded6b7608ff862174742b38946102f5008", "79581c364cefe53bff6bdd224acd4f4bbc43d6d4", "f2c30594d917ea915028668bc2a481371a72a14d", "035fb6317dd793a51c57ccce6f79c4aa1437ea93", "58f94993d32c01b617adbb7a782cddb5b2dc461d", "a44ba9b6fad29cd3cf3a72b868f42e32297221fc", "37eb666b7eb225ffdafc6f318639bea7f0ba9a24", "151c342b33d9b8332c92185307f8d9dc9baa8047", "0dfe04d3fd96dee4952bac36bafd3778c106dc1d", "ea69a469cf2828f7ec2a1df5d0fd4f669ce6ced1", "34698ba3bf6b093781fc1bb89f1d0ba7c93ede3b", "66af2afd4c598c2841dbfd1053bf0c386579234e", "9d1cebed7672210f9c411c5ba422a931980da833", "4f0e82a326e9ff158a5092ba6206877cf961e19c", "d1eab889815d3686e3c3e0f80246b058823f10e1", "0d57d3d2d04fc96d731cac99a7a8ef79050dac75", "3b9d94752f8488106b2c007e11c193f35d941e92", "fbc9ba70e36768efff130c7d970ce52810b044ff", "b22f5f0929704752a16d0f65f00a5161a059d8e3", "1f8304f4b51033d2671147b33bb4e51b9a1e16fe", "ffd152065390103497e29f00acc040567e1481b6", "8a84fd21ffe7acfa207439d2e30ed15e491b1991", "4793f11fbca4a7dba898b9fff68f70d868e2497c", "eddc4989cdb20c8cdfb22e989bdb2cb9031d0439", "090e4713bcccff52dcd0c01169591affd2af7e76", "291f527598c589fb0519f890f1beb2749082ddfd", "a01f9461bc8cf8fe40c26d223ab1abea5d8e2812", "35e4b6c20756cd6388a3c0012b58acee14ffa604", "bff80048f4efd4fc2082a37b7c8f645cdd9cc0c8", "2c8743089d9c7df04883405a31b5fbe494f175b4", "34022637860443c052375c45c4f700afcb438cd0", "28126d165f73c2a18600a9b0440f5e80191d52d9", "e3e2c106ccbd668fb9fca851498c662add257036", "cf5384353e9285d92ecc59395cb1855168a22fcf", "be65ca1665e041a8d801759fec9a918ef7ff5c39", "a5219fff98dfe3ec81dee95c4ead69a8e24cc802", "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "8e88a651e7bc9d200fcd7af0ca93690e0929f469", "3d24b386d003bee176a942c26336dbe8f427aadd", "890a4a3f7006a39b614d8836d67cc46a6a2a36c9", "111ae23b60284927f2545dfc59b0147bb3423792", "c1c34a3ab7815af1b9bcaf2822e4b9da8505f915", "767723f2e8ea0a39ca2b309c83a4c1d27c1e2c54", "5b2bc289b607ca1a0634555158464f28fe68a6d3", "14c37ea85ba8d74d053a34aedd7e484659fd54d4", "15f3d47b48a7bcbe877f596cb2cfa76e798c6452", "fd30eb1d0cb417202c7cb499aca1d81eb932932b", "fd51665efe2520a55aa58b2f1863a3bd9870529f", "97d1d561362a8b6beb0fdbee28f3862fb48f1380", "01d8c844848d3aae4da671f0131d050149e15b82", "d252e10024a22c8274ae67dbf37aa854d75a85f2", "6aaf91619dc41d1e442c5439fbf006965758b915", "e29ba8930f379245035ff7d33b5693592ee491aa", "c55a6c98887b3079647d0edb4778d81bab6708f6", "8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0", "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "2c5b5a5e4b8cd001e535118c2fa90bff95d51648", "2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d", "c9adf1ff0bad0e0397ae98d4e0857192bfc3d59e", "18de899c853120a1a2cd502ebc3e970b92e1882f", "aea50d3414ecb20dc2ba77b0277d0df59bde2c2c", "0a0d5283439f088c158fcec732e2593bb3cd57ad", "422ca72f0ce9d63d2fab1ecc7f3c77e5c0fbfbff", "08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7", "b9eb55c2c573e2fffd686b00a39185f0142ef816", "b81e765ee5acc1aaedb205e09318577e4f9c18c2", "95ace502ba23a8a5543b882937de23b892112cca", "683f5c838ea2c9c50f3f5c5fa064c00868751733", "d14badb33740b42833ccd620b344fd75c35df2b7", "81b4f1f6e8b993a04fb7e91842ad39691cb9e4c7", "83272537fc29345d32f68a8ab37e4170df10d3bb", "c9f588d295437009994ddaabb64fd4e4c499b294", "5bd2ab5f5eff88d0489d949972e50875891bc0fd", "2160788824c4c29ffe213b2cbeb3f52972d73f37", "2299d971cb38dd3335fa94cfd5b9ecc2f7901dd9", "a30073beb1350032cbbbff79b71c2c3f1322b614", "9e9c600919332dcabbd32bbe81a00d1e47449193", "51f9c3017455ca7440f34c01bf23764d3cc07aee", "f7ac8523770b5965aadc27cb5364d77853113be4", "2eefaa9c278346b9e0eb51085cff490b0a43688f", "5b9ae36868ad04f8eec4b51c425ea987ef74caaf", "3039381ced50a910234ceca5133a69aceb324faf", "82ec2ff0bef7db7e5ea48c42336200fb0e44dbf9", "4d0e64da142299039841660ea03f24575174afa8", "ddcb62a2398cbf499e1852ee07956d4fd58e771d", "e4d90019c312ed87a236a11374caeea9cc4e6940", "29f21bf4aa648f0996b41b03fc11b07a0e550f46", "6ab30c67fa966b6bcee61de6294245e2dd8604d8", "4813d9332a1f3ef2bf5846e81005895322310bed", "b8e35566129299c3591af0fd4f127e5e0d0b5774", "18ced9c7bab4d7fa69ccf2d3c8783317ba94e59f", "4afc85117d687628df7d88836e2346bc4aa77e16", "4af25075729aa4d0fa4ecf6c948f59ec15bf9565", "3f04365f59ee8bced434ef82a04ab0aa0ff43cd6", "c5de09398c13f440c03bb4429d320e0e65c63b52", "219f54d3b9e2d674d9866ae80fc6caab2a9a6ed2", "92d6131082286750232fb6546470adf3f8f9c7cf", "a02c87cb623f5e20cd986ffcf046d63a5add0b85", "7b8ae23573fb33e6a762e914128c425a7f381fb7", "cebfafea92ed51b74a8d27c730efdacd65572c40", "d535965ba320a20bb4df7fc671c83795ef48aeaa", "6bca057c25b48fa7d1607e5701c46392ec906822", "50e5dd45a94a56cb973e51dc3347e621266db7e4", "2d05fd37ef8f148711819d777757bdcacfaaf175", "516c59a82888f9b401db596ff067a0e4f9bf3db3", "4cf0c6d3da8e20d6f184a4eaa6865d61680982b8", "623da0faea1f98f238936e34f361518829edfdf4", "4348c8706b92a9bd90dbbd735f824ec79e96dd71", "83df0ec6071dfda29da831860fdb2a1f19a6b3bc", "3e91300a76e5c88fa9850067546a6e2fa1c8914d", "0670849811a6ba4fbfcbe11126b811dd94e06e66", "e2928b6bf05943782ff19c040804833c95435666", "d1eeb413be2a5abe0e20fb57060a57649244409a", "3d919e63307610258720cd0d39b37904acd5c77e", "45ede580b1e402aae6832256586211a47c53afe3", "bc5be2dacfb9b9a524bbce627268eaba70066236", "4e5e6d405331aa4aafc88e3ab31c7f45720c00b2", "e596a4aedb5cda6f0df35d38549564a0dd5546a7", "8e8c141e06d52cee1917b7268abca315bf3af714", "decc4de8b6964ba473744741c3a46ac37f2d6e3e", "bca09d92a25e5cc96df5c8d2eb87e2854cdc02b1", "dced1e101af923fd1a70c09df972f3b81a998a6b", "07050b9fcf949487e32aa30d0534e46d7eea58b0", "8fb849fe51fbf4b56393cfef26397caef2a22fb0", "78a7b042dfdc8c062a1ae9b4b93195b434e91aca", "8722ab37a03336f832e4098224cb63cd02cdfe0a", "c5fcea39a6d3e0abdfcf15ff62cec0950813ed0a", "4061f7c60deb3f22e10fab6ae3fccb69e06c34b0", "eb724fed2a6457a2ee41a205892004116180bbfd", "2ce38dbb4d4228ae4a7016b0422155a274b88659", "35ee8aaa4a2ce4d36a1c608268a430eddcbf5c1a", "bb562d026664e53cada5e78ff99d9b8f223117e9", "7bee43956fd72d86ce7d1f8f6667aefd2de75f98", "58f7b9ebdb9b380cdfbef12b8abefceee0160a58", "dc041f307d467918ba684d3c425fb23016f3b28e", "3633b26502ef1697a59722be794947bc510dd0aa", "a654fdf590a063d15344caccecff76d971635993", "351de1f7862bd13a82fcfcaa698b4efd53bc2c35", "b1f177f70086ee19cda2738aaa124d534f09b3a4", "202a923504ea81e94c06a81581539b893b461ee5", "2864c8df356b1b915e16bb285bda64bfd7396f74", "3dcf02ecdd6bb1d487c554ea987777bd5db08950", "8f20c3d2389bd6729bdb1cb5831d3ff0d7ac2c65", "67e7c14ecae22868e8159c52941194cdf6f003a2", "c2b03ad0572786fd37c4a4a908356cbf85d79f75", "a94563ed497381c349307499e30ccc75054230b8", "85f14bb2ed4b9d680ae4062cbd571752a1ff1dfa", "7ed6ff077422f156932fde320e6b3bd66f8ffbcb", "534ac5cd8e2503b333efcc94d92e5359b54190b1", "2f7a5dfc04b037d1841e1fdd08d1462f34c6a1da", "fa72b7140f9fa4fb975344109e597e9566c65f4a", "a5c27bf9b27af011b779ea1aff0543ae5334e563", "da8d102e9e5ed5d79200e019b8d47a256a1f8452", "06f7e0aee7fc5807ab862432a4e5ade2cda73c4b", "f73b15d33b9dcf329cf605815be7493b162b1fab", "9114f5247562c0a71ea9aef23d474e06dd96d8cb", "3d855f0665a912ff2c7736cecf9b8eae3effc281", "740d4a7322eec687d1e6f0d2b71fe2d804e1ae70", "1958da636ce92d36c0985a6cb00696d90b2475f3", "5b7856b7669a5746b7f14d2ae5452aa2dc89d454", "0fc52c1f7a9601067cce260f8dcb7a7e796f4281", "ed1da141e83ecbf3998fe88f222c3d50136c548e", "a80355dcee5156b064e31b39c6b72037044ed87c", "34994e291f2ddfbb2938599730a5f7a79498dfe1", "113ced4a8c5ecb6da1b2eb63c1300cd8df982917", "4dcc09fc3718721b41460dda559c1c6f507287b7", "214c966d1f9c2a4b66f4535d9a0d4078e63a5867", "7ee17d2001c9fcef63e3a56610cacc743861d944", "47de0569259e6a420c3eda69cdebf01bf85a1acd", "f312fce73aabd97bf4fc02fe2829f6959e251b1e", "7033b916a7f2510ca9766b7a8ed15920a9f9e2f3", "fb22404e46dd46b2c2cb9a85227a1ab6a8ae4f52", "533d91cbb5e306c96b71b6f776382f3956e5dc7d", "cc00df59309c13f72fcbd44c88509210c157ce47", "45619a2b7b41fea02345badf880530519d3d4c8f", "f4065d13bcad78b563108075f650c29a2f3f1917", "58f445fa45d4bdafac43893a55b21348f9e1e6c2", "40b2652cf3bdee159dacb6e18c761003c31f4205", "cf009a6b02fbef514a4bac9695a928080ceac764", "a9f3aed7013b1a09b7fd7464b45e3e0835600718", "02039d41e9b61af510bab0b3252396a0e350afa5", "1b3d5d95e1fcded017f193f5cf9772bf8a1ed108", "3493b2232449635aff50fc17e03163cb4b66f1b5", "7c0cf2fa4ed7cfb1cf41c986fdc3b82c53177854", "f39b88ac61264e9a33dcdf47722f0d048a8e490f", "4c8a4f659e827a3189e14f0efd00987dc4c7785f", "382f1ebe6009e580949d5513bc298cb253a1eeda", "cb1e34d7fcb7fae914fcb65cb9cf25199d49cec9", "0babc4af06d210cf38bdf8324c339b6cf3f424fa", "8b162c2a15bc7aa56cdc1be9773611bc21536782", "22a12ec4258f223b43761e5c4729787d1aaa623b", "3027727790598d913a8ff9a1bab4538176ad9fc8", "49d4cb2e1788552a04c7f8fec33fbfabb3882995", "c595863b90b904a7b3197667b62efa16b0fd5ff6", "36d858eb19bba43244b92f7faabfce47b13f2403", "3cf24e5e15deef58cfc9ef43470a7db9a2bb97d2", "9d4ebcd84c4ba2241cca3242e22888558b62a0e0", "cd855c776240150f4dba7a5975c7011a9c6737ac", "5e4a451faf2e47486a5dbeca8a5109b53e22d95a", "75f302f1372136c5e43e523bacc0a2ddf04c3237", "11b45236b2798091ddab35c572a35f447bb8d717", "ee2bb1a7f58b4c3735a52e7692e6293413d72dec", "6555977f0d5f993e0bee7c2ec3d1fc819057da15", "75a54f49fccee29faca8931fa8ba700030dcaa75", "c5ee2621e5a0692677890df9a10963293ab14fc2", "48b9f9ddf17bd29b957b09f9000576e53acf8719", "d6600601e1a1101d991c0bffbd737975db9ca262", "e4444820fb3f6d1f41c6ea51c6b2ab8ceb04a3a5", "0a850a9fc853c358aea1167e1f965cda8980b7fd", "0c6fa98b7b99d807df7c027e8e97751f1bbb9140", "0a267d927cfae039cf0a9c995a59ded563344eb6", "d7ed878c08c90186e3bf607c20ff943834ad0d68", "67296e6cd0084c301339889c4ef1f71a04406b3d", "cc9f473584c1a7f224b42d4a3a3ea2864173cc28", "105fdf31d14ec55fda91c05059ec83162ba7ce3a", "47f84928dd6e40797255fa1e1bbb3c12b2659a7c", "5b600cfabfb3c99085ca949fc432684e7ac86471", "8d7d02bdd3a6dfc01982468ed3eb4e66d99a302f", "12dcb25f10d42ad2b4352ba9fe7a6a32ee2635a6", "323cd51bc18c700fa88044dd24ae663a7eabaa68", "d0a188debff9baca296787dfb207f151cb78300a", "7de028e5c878b56057559bfbd57f1ce6482ec282", "577c1d59e43f04a4bfda95b0b9e3b41d893bc0a2", "cada850299f0aa71ecd9b37a2496802ad8d48455", "4b4106614c1d553365bad75d7866bff0de6056ed", "8a29378973987bdb040f35349d1c5a86a538c0fc", "fe6cf3776cc88a09b5f3013601c35b9b2b06d2d2", "562449f9e8e7f91521e64b656b4adb472437f757", "d6dab84451254d7fbb5b9e1d40a7d2a92dec13b3", "ede16b198b83d04b52dc3f0dafc11fd82c5abac4", "a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4", "d50abaf36212d31c758643c70f487ee272c0b627", "b4b6a0129bf6a716fca80a4cfc322687a72fa927", "19f8f011516fe6ffa4ed74776a0149c9dbdc5ccf", "f3062992cb10107b9d1e3699c8a61d5281886c4b", "33919313bb3cf09b00f9fa2253b30af33a52bc51", "81fc46dd71121cfafbb11455745ae62f6eca0b25", "95f641ddebc34740b7d44b0b8e8bff0163534891", "230aa8fe68068f2e64876bdf1c41d20b4923288f", "212211d642aa75f66f8ad3ec04da3a4cc089a5b3", "30c43c0eb26839541861819f420f1ac2dad0711c", "4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7", "8a6ee59cda77eeb7e126e3bc3d82e742ae1b3e58", "4233b07033a1ef8af188383f30602a5fd0aa2181", "fa747db22e9e6cd7a64019eec6e0dd53e94be4b3", "545388d4a8e79cab605cea9b3e1ff1da0f848f8e", "0f8a0c308bb63aaf2741a4817ea0464a85854e4d", "fd3131b370a81e25aa962d266b2b32922ea4e613", "93ece2d76dc69c7340c6e2c499b58c27dc9e3de7", "550c8162757b9fb649efab8529d86daa99700fb1", "2bb4d44b3d9ea2406f9ab1bb7540b8c0b5a94549", "af62dfa3e70a33fcf14a38744e0111ab8e5dfa67", "11e05870a90cfcf5b1d2e0d0d3df9d1afb9267ff", "f5f3faa71ed2b61fa3a99bde25d6e84bfbefbb0b", "b137480d2ccf3b53433de208815ce891d95af912", "537f8c50369490cc90303522dd4ab80100fa1e59", "9a08459b0cb133f0f4352c58225446f9dc95ecc4", "da3708836813fece4170585f2f9248a34c6ad183", "a2dd13729206a7434ef1f0cd016275c0d6f3bb6d", "a6f1e230ed26eb93474ade60e8f31b3aa389162d", "09b0040ad09d61f3403c57c437c03271f8614add", "f266c3bb846ef13a8bf0d09a66461e1e77464475", "72fe1d86581e4672a534852d1e4f4680811db074", "21033d95e673cc4b28d81b15c80ad46cc0bdc22b", "453febab65d4047e04b320daf8877bf5a018b6a8", "2d02cf53bc0f2d919b89bec8f9160b50916bb625", "1a39ce3f624844a1288b6deff545f6c4d79c4fae", "22db07c472b2d7bc7704b8c2bbd8f620e2e68ca9", "307f0da392588b172a68512969d1fb61f85fbc0f", "c81b303005459285a5864ea4de71f77025cd5be5", "54378aef6ec3423ad44d61de61b301be9f5c686d", "228408c76823355010bb13e5b3f32823b35a176c", "89ee022431da849b4991e41c0894d7535008ae99", "31c52ecc98ebf046df914fd113abcae334e1cf0e", "270f029b03ee1bdfeae4ff4c5167b450d185a981", "603e10c9dbadd51ad0938e32b730221c020d677d", "bf2b886510c9a932c41348a60d1b9c843bdcd544", "ed3c4d2d28faaccbaef876a7daaecc3cccadb48f", "0c56f414251d6c9f43623ee683dc6cae3be1045a", "1cf53b650c4a3e212bd6f25e3c9fe8c757862a7d", "6b913b4b4d071b2c91a6fbc317282bf5f610a274", "c0de99c5f15898e2d28f9946436fec2b831d4eae", "ce3edf04c9f0c9da462832cbf8c5a1982e3e6bf8", "1987f56cb6bcba142f9a0a580c4351fb3e407b8c", "4a56d5e483ddea93f14bfbe350a3063b2b9126cb", "09e15bb266da86d0a9525d2a94ac0b38f0b53b88", "7280000cba3a3279454890372668362d00cf3ab0", "f3a34525fa7021322f132c80c9517f240cf1e742", "99ae92bae7c873432a6a60238b33d494bbae13eb", "3dc3f0b64ef80f573e3a5f96e456e52ee980b877", "565428129e99dbfd6f3cba5f09f086a35a1cf4ec", "22c89775cb5309eae5ac1f9ce9d1c2d569439492", "0a811063cfd674275f91006d28cb8620c781e817", "23e824d1dfc33f3780dd18076284f07bd99f1c43", "1e9700ab5cdb35d358379fef53260991d63f621f", "2880f343892683a6dbc6f655f1185d44bf4d1629", "4c648fe9b7bfd25236164333beb51ed364a73253", "0ba5369c5e1e87ea172089d84a5610435c73de00", "cb1b5e8b35609e470ce519303915236b907b13b6", "23fc83c8cfff14a16df7ca497661264fc54ed746", "fc00d634797c5378ca9a441c2d4ce88761d3c7eb", "0b183f5260667c16ef6f640e5da50272c36d599b", "29756b6b16d7b06ea211f21cdaeacad94533e8b4", "4572fd17feb5d098e8044fe085e963036fea2a6d", "721119b5f15ccccfd711571fb5a676d622d231bf", "f3cf10c84c4665a0b28734f5233d423a65ef1f23", "72167c9e4e03e78152f6df44c782571c3058050e", "976c9f88c23e892c75c452b450407841e5161a32", "8b547b87fd95c8ff6a74f89a2b072b60ec0a3351", "9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c", "1e917fe7462445996837934a7e46eeec14ebc65f", "1b589016fbabe607a1fb7ce0c265442be9caf3a9", "3c4106f2c670362f620b33ad7715ab6fd3eb2458", "d394bd9fbaad1f421df8a49347d4b3fca307db83", "556b9aaf1bc15c928718bc46322d70c691111158", "d0b7d3f9a59034d44e7cd1b434cfd27136a7c029", "ad8540379884ec03327076b562b63bc47e64a2c7", "bc9ae4b87888202bfa174ec4e8caee1a087ab994", "1a41e5d93f1ef5b23b95b7163f5f9aedbe661394", "1149c6ac37ae2310fe6be1feb6e7e18336552d95", "93108f1548e8766621565bdb780455023349d2b2", "80a5afeb6968c7e736adc48bd4d5ec5b45b13f71", "c590c6c171392e9f66aab1bce337470c43b48f39", "45dbf1b6fbc7fdae09e2a1928b18fbfff331a979", "8a4893d825db22f398b81d6a82ad2560832cd890", "3cd8ab6bb4b038454861a36d5396f4787a21cc68", "a941434fce5d3fddcd78e2b82d46ccab0411fca9", "49358915ae259271238c7690694e6a887b16f7ed", "207798603e3089a1c807c93e5f36f7767055ec06", "e0b71d3c7d551684bd334af5b3671df7053a529d", "6d4236a7a693555f701c0d149d1db89325035e23", "25f7f03acf62b2cf3672bb506c8827d00b048608", "4d8ce7669d0346f63b20393ffaa438493e7adfec", "3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd", "aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5", "a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670", "58fa85ed57e661df93ca4cdb27d210afe5d2cdcd", "6b1b43d58faed7b457b1d4e8c16f5f7e7d819239", "8d2c43759e221f39ab1b4bf70d6891ffd19fb8da", "126214ef0dcef2b456cb413905fa13160c73ec8e", "4026dc62475d2ff2876557fc2b0445be898cd380", "68333b73613c59914bfe1264a440b3cf854dc15c", "5d185d82832acd430981ffed3de055db34e3c653", "cc589c499dcf323fe4a143bbef0074c3e31f9b60", "47d3b923730746bfaabaab29a35634c5f72c3f04", "a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8", "64153df77fe137b7c6f820a58f0bdb4b3b1a879b", "a6d7cf29f333ea3d2aeac67cde39a73898e270b7", "a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51", "dd0a334b767e0065c730873a95312a89ef7d1c03", "ed04e161c953d345bcf5b910991d7566f7c486f7", "7782627fa2e545276996ff9e9a1686ac496df081", "f4c32b8bcf753033835c14a66e9c04b06bf086a3", "133f42368e63928dc860cce7618f30ee186d328c", "1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9", "0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff", "cc1b093cfb97475faabab414878fa7e4a2d97cd7", "5f344a4ef7edfd87c5c4bc531833774c3ed23542", "4f9958946ad9fc71c2299847e9ff16741401c591", "ca9adaf5702a7eb9b69be98128e0cae7d6252f8b", "0369baf2366fca2f2afdf86efec4874dc8fad194", "4f9e00aaf2736b79e415f5e7c8dfebda3043a97d", "74dbe6e0486e417a108923295c80551b6d759dbe", "b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d", "70d8bda4aafb0272ac4b93cd43e2448446b8e94d", "afdc303b3325fbc1baa9f18a66bcad59d5aa675b", "1297ee7a41aa4e8499c7ddb3b1fed783eba19056", "b013cce42dd769db754a57351d49b7410b8e82ad", "3167f415a861f19747ab5e749e78000179d685bc", "3e04feb0b6392f94554f6d18e24fadba1a28b65f", "5e806d8fa48216041fe719309534e3fa903f7b5b", "ddf55fc9cf57dabf4eccbf9daab52108df5b69aa", "946b4d840b026d91608758d04f2763e9b981234e", "f68f20868a6c46c2150ca70f412dc4b53e6a03c2", "564035f1b8f06e9bb061255f40e3139fa57ea879", "2bf08d4cb8d1201a9866ee7c4852bfcbf8f8e7f1", "2d3c17ced03e4b6c4b014490fe3d40c62d02e914", "4b321065f6a45e55cb7f9d7b1055e8ac04713b41", "490a217a4e9a30563f3a4442a7d04f0ea34442c8", "60821d447e5b8a96dd9294a0514911e1141ff620", "f6ebfa0cb3865c316f9072ded26725fd9881e73e", "82bef8481207de9970c4dc8b1d0e17dced706352", "e0939b4518a5ad649ba04194f74f3413c793f28e", "6f0caff7c6de636486ff4ae913953f2a6078a0ab", "6a657995b02bc9dee130701138ea45183c18f4ae", "715b69575dadd7804b4f8ccb419a3ad8b7b7ca89", "d280bcbb387b1d548173917ae82cb6944e3ceca6", "993934822a42e70dd35fb366693d847164ca15ff", "38787338ba659f0bfbeba11ec5b7748ffdbb1c3d", "06959f9cf3226179fa1b05efade843b7844fb2bc", "05a116cb6e220f96837e4418de4aa8e39839c996", "994f7c469219ccce59c89badf93c0661aae34264", "93b7ee9842114bc15202ff97941892aa848c0716", "48f0055295be7b175a06df5bc6fa5c6b69725785", "6fda12c43b53c679629473806c2510d84358478f", "8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0", "51410d6bd9a41eacb105f15dbdaee520e050d646", "25c19d8c85462b3b0926820ee5a92fc55b81c35a", "a38045ed82d6800cbc7a4feb498e694740568258", "907475a4febf3f1d4089a3e775ea018fbec895fe", "3e03d19b950edadc74ca047dec86227282eccf71", "45c340c8e79077a5340387cfff8ed7615efa20fd", "6ec275755f8776b620d0a4550be0e65caf2bc87a", "74eae724ef197f2822fb7f3029c63014625ce1ca", "00e9011f58a561500a2910a4013e6334627dee60", "629a973ca5f3c7d2f4a9befab97d0044dfd3167a", "eafda8a94e410f1ad53b3e193ec124e80d57d095", "1966bddc083886a9b547e1817fe6abc352a00ec3", "65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220", "1821510693f5bed360c81706c97330d2fa7d1290", "201802c83b4f161de764bb1480735e0b090b5c3b", "5c3eb40b06543f00b2345f3291619a870672c450", "898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c", "e45a556df61e2357a8f422bdf864b7a5ed3b8627", "5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8", "7ea7c073d13e80ec5015f41f1d57f0674502cc5e", "4d356f347ab6647fb3e8ed8c2154dbd359e479ed", "91495c689e6e614247495c3f322d400d8098de43", "aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5", "db0379c9b02e514f10f778cccff0d6a6acf40519", "fb5280b80edcf088f9dd1da769463d48e7b08390", "9547a7bce2b85ef159b2d7c1b73dea82827a449f", "d59f18fcb07648381aa5232842eabba1db52383e", "d4353952a408e1eae8c27a45cc358976d38dde00", "9da63f089b8ee23120bfa8b4d9d9c8f605f421fc", "58db008b204d0c3c6744f280e8367b4057173259", "8c6b9c9c26ead75ce549a57c4fd0a12b46142848", "28fe6e785b32afdcd2c366c9240a661091b850cf", "db3545a983ffd24c97c18bf7f068783102548ad7", "0f940d2cdfefc78c92ec6e533a6098985f47a377", "baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143", "46b031a3e368f25dd1e42f70f21165fef7b16de2", "8ef465ff12ee1d2be2a99d1c628117a4ce890a6b", "68d70d49ae5476181f3ceb4bc1caf493127b08b1", "360d66e210f7011423364327b7eccdf758b5fdd2", "449b1b91029e84dab14b80852e35387a9275870e", "492afe8f07de6225f70b72c922df83effd909334", "1286641b8896ae737e140cfd3da2d081d4cd548e", "291ce7be8daa99848bf13c32b237ad823d5738e9", "a3f689fa5d71bdc7e19a959ac5d0f995e8e56493", "6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2", "ae9257f3be9f815db8d72819332372ac59c1316b", "88bef50410cea3c749c61ed68808fcff84840c37", "fac0151ed0494caf10c7d778059f176ba374e29c", "176bd61cc843d0ed6aa5af83c22e3feb13b89fe1", "143571c2fc9b1b69d3172f8a35b8fad50bc8202a", "778bff335ae1b77fd7ec67404f71a1446624331b", "491cf4d86ed895000a35ba96f46261984c0bdf7c", "b1ed708d090dd155ffa9ac9699a876292f31aaff", "a8affc2819f7a722a41bb913dea9149ee0e23a1f", "0004f72a00096fa410b179ad12aa3a0d10fc853c", "95b9df34bcf4ae04beea55c11cf0cc4095aa38dc", "47e3029a3d4cf0a9b0e96252c3dc1f646e750b14", "6f16f4bd01aeefdd03d6783beacb7de118f5af8a", "c84de67ec2a5d687869d0c3ca8ac974aaa5ee765", "0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056", "14efb131bed66f1874dd96170f714def8db45d90", "abce06a96a7c3095bfc36eed8779d89263769b85", "d4ccc4f18a824af08649657660e60b67c6868d9c", "04c5268d7a4e3819344825e72167332240a69717", "d0e895a272d684a91c1b1b1af29747f92919d823", "9b164cef4b4ad93e89f7c1aada81ae7af802f3a4", "8fc36452a49cb0fd43d986da56f84b375a05b4c1", "553a605243b77a76c1ed4c1ad4f9a43ff45e391b", "3c113a1e8b61431c4a73f72462329eddf9449a48", "ac03849956ac470c41585d2ee34d8bb58bb3c764", "2cf92ee60f719098acc3aae3981cedc47fa726b3", "3ea8a6dc79d79319f7ad90d663558c664cf298d4", "49820ae612b3c0590a8a78a725f4f378cb605cd1", "102b968d836177f9c436141e382915a4f8549276", "0b85b50b6ff03a7886c702ceabad9ab8c8748fdc", "a57b92ed2d8aa5b41fe513c3e98cbf83b7141741", "9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd", "8f3e120b030e6c1d035cb7bd9c22f6cc75782025", "969127a099a12bd5bb88809c75cafe0a62f6ca05", "ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18", "359edbaa9cf56857dd5c7c94aaef77003ba8b860", "6889d649c6bbd9c0042fadec6c813f8e894ac6cc", "9ac82909d76b4c902e5dde5838130de6ce838c16", "84b4eb66ad75a74f77299f1ecb6aa6305362e8cd", "c660500b49f097e3af67bb14667de30d67db88e3", "5ddfd3d372f7679518db8fd763d5f8bc5899ed67", "e4bc529ced68fae154e125c72af5381b1185f34e", "642b5173644caa5c5189982a3d1e41163fa9d595", "73b05a7faf1b9363ffff125db101dbe2b0b3964f", "62415bbd69270e6577136ba7120f4a682251cdbb", "9026eb610916ec4ce77f0d7d543b7c2482ba4173", "73b90573d272887a6d835ace89bfaf717747c59b", "5e53f530871b5167be0f224993be8a38e85796e8", "36ce0b68a01b4c96af6ad8c26e55e5a30446f360", "c32f04ccde4f11f8717189f056209eb091075254", "95aef5184b89daebd0c820c8102f331ea7cae1ad", "2eb9f1dbea71bdc57821dedbb587ff04f3a25f07", "2654ef92491cebeef0997fd4b599ac903e48d07a", "84508e846af3ac509f7e1d74b37709107ba48bde", "72450d7e5cbe79b05839c30a4f0284af5aa80053", "a9f0e940cfba3663dc8304dd5dc77509f024a3cc", "f8ddb2cac276812c25021b5b79bf720e97063b1e", "5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6", "7002d6fc3e0453320da5c863a70dbb598415e7aa", "5ea9cba00f74d2e113a10c484ebe4b5780493964", "aae0e417bbfba701a1183d3d92cc7ad550ee59c3", "7eb8476024413269bfb2abd54e88d3e131d0aa0e", "c8fb8872203ee694d95da47a1f9929ac27186d87", "3060ac37dec4633ef69e7bc63488548ab3511f61", "6bb0425baac448297fbd29a00e9c9b9926ce8870", "e14b046a564604508ea8e3369e7e9f612e148511", "39f7878f447df7703f2c4ddeeffd7eb0e21f6cd4", "ebc3d7f50231cdb18a8107433ae9adc7bd94b97a", "bffbd04ee5c837cd919b946fecf01897b2d2d432", "01c8d7a3460422412fba04e7ee14c4f6cdff9ad7", "59690814e916d1c0e7aa9190678ba847cbd0046f", "9ac2960f646a46b701963230e6949abd9ac0a9b3", "0744af11a025e9c072ef6ad102af208e79cc6f44", "32925200665a1bbb4fc8131cd192cb34c2d7d9e3", "24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd", "480ab25eba799b59e0a1a51021c5126c88a58a0c", "40dab43abef32deaf875c2652133ea1e2c089223", "10b06d05b8b3a2c925b951a6d1d5919f536ffed4", "9277f1c5161bb41d4ed808c83d53509c8a1a2bdd", "25f1f195c0efd84c221b62d1256a8625cb4b450c", "3bf579baf0903ee4d4180a29739bf05cbe8f4a74", "d778c46657a974e6e87df82b7ee2ced8e5c6f151", "41b997f6cec7a6a773cd09f174cb6d2f036b36cd", "0ba0f000baf877bc00a9e144b88fa6d373db2708", "6eb1e006b7758b636a569ca9e15aafd038d2c1b1", "ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf", "b871d1b8495025ff8a6255514ed39f7765415935", "cf86616b5a35d5ee777585196736dfafbb9853b5", "9af1cf562377b307580ca214ecd2c556e20df000", "1d6d6399fd98472012edb211981d5eb8370a07b0", "1b9976fea3c1cf13f0a102a884f027d9d80a14b3", "a4e75766ef93b43608c463c233b8646439ce2415", "87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5", "247a6b0e97b9447850780fe8dbc4f94252251133", "0f53ab8b6c428127753281dd77cf94bdb889b624", "265af79627a3d7ccf64e9fe51c10e5268fee2aae", "0278acdc8632f463232e961563e177aa8c6d6833", "eefecac463ebfc0694b9831e842b574f3954fed6", "d1775eb9d8898a9f66c28bb92b648c3174caec18", "389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26", "03fe3d031afdcddf38e5cc0d908b734884542eeb", "326613b5528b7806d6a06f43211800b54f34965e", "c2e6daebb95c9dfc741af67464c98f1039127627", "673a7fdf36bb2ab2beca5678bd29eebf6eba0582", "81d81a2060366f29fd100f793c11acf000bd2a7f", "15cf1f17aeba62cd834116b770f173b0aa614bf4", "c78fdd080df01fff400a32fb4cc932621926021f", "2cdd9e445e7259117b995516025fcfc02fa7eebb", "7754b708d6258fb8279aa5667ce805e9f925dfd0", "52d4952426f40394af1db43f429e0b2a2e326197", "e860db656f39d738050b5f3e0bf72724e6a4ad5c", "071099a4c3eed464388c8d1bff7b0538c7322422", "5f2c210644c1e567435d78522258e0ae036deedb", "ea79a2ad4ac307cb8c586b52bf06d7bf783003a8", "126076774da192d4d3f4efcd1accc719ee5f9683", "50c0de2cccf7084a81debad5fdb34a9139496da0", "d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f", "d85813b58e10a35703df3a8acf41aafe4b6e1dd2", "4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef", "486f5e85944404a1b57333443070b0b8c588c262", "09686fd5eb5ec6f47d5ec24276c78d23607ec01e", "235a347cb96ef22bf35b4cf37e2b4ee5cde9df77", "d57c25c50e5e25fb07fc80b3c3d77b45e16e98cf", "62694828c716af44c300f9ec0c3236e98770d7cf", "8576d0031f2b0fe1a0f93dd454e73d48d98a4c63", "e9331ae2a887c02e0a908ebae2810a681aedee29", "f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7", "467747f86df4537d6deff03dee8e552f760d7c16", "f925879459848a3eeb0035fe206c4645e3f20d42", "3de5dc06f5d089dee111e048c7174a834f1363c1", "6d67a7fd9a4fa99624721f37b077c71dad675805", "4da4e58072c15904d4ce31076061ebd3ab1cdcd5", "ad624331dc5f8dc3a72b1d5baf69634b2f345656", "81b2a541d6c42679e946a5281b4b9dc603bc171c", "42441f1fee81c8fd42a74504df21b3226a648739", "03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20", "18409c220a0f330c24f0e095653a787813c3c85a", "d89a754d7c59e025d2bfcdb872d2d061e2e371ba", "dfecaedeaf618041a5498cd3f0942c15302e75c3", "f2b13946d42a50fa36a2c6d20d28de2234aba3b4", "6409b8879c7e61acf3ca17bcc62f49edca627d4c", "4353d0dcaf450743e9eddd2aeedee4d01a1be78b", "7dcd3f58aa75f7ae96fdac9b1c2332a4f0b2dbd3", "43f6953804964037ff91a4f45d5b5d2f8edfe4d5", "937ffb1c303e0595317873eda5ce85b1a17f9943", "91811203c2511e919b047ebc86edad87d985a4fa", "d855791bc23b4aa8e751d6a4e2ae7f5566a991e8", "395bf182983e0917f33b9701e385290b64e22f9a", "7588388b3f68c1a1a6b3b336d8387fee5c57c985", "7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b", "488d3e32d046232680cc0ba80ce3879f92f35cac", "cf185d0d8fcad2c7f0a28b7906353d4eca5a098b", "8f92cccacf2c84f5d69db3597a7c2670d93be781", "be07f2950771d318a78d2b64de340394f7d6b717", "2d87f4bf0606ce9939033b8f1fbc64b539eb18a6", "f43eeb578e0ca48abfd43397bbd15825f94302e4", "93115b81d1efc1f6d2788972bdb89908764890b6", "e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec", "57bf9888f0dfcc41c5ed5d4b1c2787afab72145a", "8c6c0783d90e4591a407a239bf6684960b72f34e", "88850b73449973a34fefe491f8836293fc208580", "57101b29680208cfedf041d13198299e2d396314", "7003d903d5e88351d649b90d378f3fc5f211282b", "16f940b4b5da79072d64a77692a876627092d39c", "b9000808ac1516e5bf134d02d362a46e1793b092", "3a9fbd05aaab081189a8eea6f23ed730fa6db03c", "d1082eff91e8009bf2ce933ac87649c686205195", "b689d344502419f656d482bd186a5ee6b0140891", "2f67d5448b5372f639633d8d29aac9c0295b4d72", "0b0e679e6d3abe3adc8525d4fee49b388ccfdf9a", "39ecdbad173e45964ffe589b9ced9f1ebfe2d44e", "1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b", "1addc5c1fa80086d1ed58f71a9315ad13bd87ca2", "738a985fba44f9f5acd516e07d0d9578f2ffaa4e", "68249064f7d5046abef785ada541244fa67b4346", "38215c283ce4bf2c8edd597ab21410f99dc9b094", "1d7dde30b8d0f75576f4a23b75b8350071fd4839", "0be43cf4299ce2067a0435798ef4ca2fbd255901", "fc7f140fcedfe54dd63769268a36ff3f175662b5", "17fad2cc826d2223e882c9fda0715fcd5475acf3", "c5c379a807e02cab2e57de45699ababe8d13fb6d", "ae4390873485c9432899977499c3bf17886fa149", "39150acac6ce7fba56d54248f9c0badbfaeef0ea", "2d164f88a579ba53e06b601d39959aaaae9016b7", "6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca", "0cd8895b4a8f16618686f622522726991ca2a324", "030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f", "b803cdb3377fa3b6194932607f51f2d1fafbf964", "82a610a59c210ff77cfdde7fd10c98067bd142da", "5a4881bfcb4ae49229f39320197c2d01b2fbf1f5", "32adde2e33f4344900829c557c8533f8f0979f10", "4ca1fcfd7650eeb0ac8d51cff31b70717cdddfdd", "ba17782ca5fc0d932317389c2adf94b5dbd3ebfe", "8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152", "b86c49c6e3117ea116ec2d8174fa957f83502e89", "5d9bed6974fb81efeaeeff605b075e73b119a2b5", "78f79c83b50ff94d3e922bed392737b47f93aa06", "dc7df544d7c186723d754e2e7b7217d38a12fcf7", "c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e", "a949b8700ca6ba96ee40f75dfee1410c5bbdb3db", "31146bd416626d2bf912e0a0d12ca619fb49011b", "a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d", "b7b732df067a146b6254dfda404b124c1bff43b9", "b472f91390781611d4e197564b0016d9643a5518", "367f2668b215e32aff9d5122ce1f1207c20336c8", "08e24f9df3d55364290d626b23f3d42b4772efb6", "d2a415365f997c8fe2dbdd4e06ceab2e654172f6", "2238dddb76499b19035641d97711cf30d899dadb", "ab8fb278db4405f7db08fa59404d9dd22d38bc83", "2e7e1ee7e3ee1445939480efd615e8828b9838f8", "a6ffe238eaf8632b4a8a6f718c8917e7f3261546", "f08cb47cd91a83ea849f2dfe2682529f3bb95aa9", "0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc", "4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b", "2e832d5657bf9e5678fd45b118fc74db07dac9da", "70c25293e33f5c37143ae20e3b0198a68083a5ed", "4ae59d2a28abd76e6d9fb53c9e7ece833dce7733", "e8f0f9b74db6794830baa2cab48d99d8724e8cb6", "8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf", "9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e", "45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8", "3dabf7d853769cfc4986aec443cc8b6699136ed0", "03af8cf40283ff30f1da3637b024319d0c79bdf0", "90c4deaa538da42b9b044d7b68c3692cced66036", "400e6c777d5894db2f6538c8ebd1124352b1c064", "1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16", "3bf673a1f620015cb8b5106b85c7168431bb48ff", "2e708431df3e7a9585a338e1571f078ddbe93a71", "bccb35704cdd3f2765b1a3f0296d1bff3be019c1", "0c435e7f49f3e1534af0829b7461deb891cf540a", "8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58", "33e20449aa40488c6d4b430a48edf5c4b43afdab", "9436170c648c40b6f4cc3751fca3674aa82ffe9a", "8b9e94fb3bb64389e9765ffde365862231b5972c", "1a8d40bcfb087591cc221086440d9891749d47b8", "2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c", "6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75", "c0c0b8558b17aa20debc4611275a4c69edd1e2a7", "0515e43c92e4e52254a14660718a9e498bd61cf5", "6d70344ae6f6108144a15e9debc7b0be4e3335f1", "93e451f71245f8e5ba346a48de2d09c0bccc3c22", "4d2975445007405f8cdcd74b7fd1dd547066f9b8", "ffec78f270dba4bdaf6bca7aedc16798bb9347ef", "60040e4eae81ab6974ce12f1c789e0c05be00303", "036c41d67b49e5b0a578a401eb31e5f46b3624e0", "03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b", "eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a", "d074b33afd95074d90360095b6ecd8bc4e5bb6a2", "75879ab7a77318bbe506cb9df309d99205862f6c", "b42b535fcd0d9bd41a6594a910ea4623e907ceb9", "d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e", "f913bb65b62b0a6391ffa8f59b1d5527b7eba948", "3d13ab40e9b9d471c77907f4cb846f12c306e9db", "0b3144cdc9d6d5a1498d6178db20d1c49fb64de9", "6f5151c7446552fd6a611bf6263f14e729805ec7", "6f0d3610c4ee7b67e9d435d48bc98167761251e8", "681399aa0ea4cbffd9ab22bf17661d6df4047349", "8127b7654d6e5c46caaf2404270b74c6b0967e19", "f6cf2108ec9d0f59124454d88045173aa328bd2e", "8cedb92694845854f3ad0daf6c9adb6b81c293de", "9963af1199679e176f0836e6d63572b3a69fa7da", "c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e", "f0a9d69028edd1a39147848ad1116ca308d7491e", "292e1c88d43a77dbe5c610f4f611cfdb6d3212b6", "dba493caf6647214c8c58967a8251641c2bda4c2", "74618fb4ce8ce0209db85cc6069fe64b1f268ff4", "f6532bf13a4649b7599eb40f826aa5281e392c61", "a79704c1ce7bf10c8753a8f51437ccbc61947d03", "930cbd5a6c63f0de7984a9b1db437a35ffacce3b", "42afe6d016e52c99e2c0d876052ade9c192d91e7", "842d82081f4b27ca2d4bc05c6c7e389378f0c7b8", "5d2e5833ca713f95adcf4267148ac2ccf2318539", "473cbc5ec2609175041e1410bc6602b187d03b23", "7fb6bc6c920ca574677f0d3a40c5c377a095885b", "8b2c090d9007e147b8c660f9282f357336358061", "134db6ca13f808a848321d3998e4fe4cdc52fbc2", "7a7f2403e3cc7207e76475e8f27a501c21320a44", "07d986b1005593eda1aeb3b1d24078db864f8f6a", "d647099e571f9af3a1762f895fd8c99760a3916e", "ae2c71080b0e17dee4e5a019d87585f2987f0508", "53c8cbc4a3a3752a74f79b74370ed8aeed97db85", "72d9bee3a71b005677dfe1c0a416bbda6eb5fa95", "f83dd9ff002a40228bbe3427419b272ab9d5c9e4", "df90850f1c153bfab691b985bfe536a5544e438b", "b44ca5bb74b27d196f281b6741c645f425ff65c1", "82e1692467969940a6d6ac40eae606b8b4981f7e", "350da18d8f7455b0e2920bc4ac228764f8fac292", "0c5afb209b647456e99ce42a6d9d177764f9a0dd", "5bfad0355cdb62b22970777d140ea388a7057d4c", "e4ad82afc563b783475ed45e9f2cd4c9e2a53e83", "b42a97fb47bcd6bfa72e130c08960a77ee96f9ab", "14d7bce17265738f10f48987bb7bffb3eafc676e", "6688b2b1c1162bc00047075005ec5c7fca7219fd", "6a8a3c604591e7dd4346611c14dbef0c8ce9ba54", "6506f9a8a2e73eeaea185273df909feccb68f944", "ee6f9a0f6eb5b615a36acc1444f4df1359cc2a63", "8fcf7dfa30fa0c4194aef41c508a95d59be38f23", "e1ab3b9dee2da20078464f4ad8deb523b5b1792e", "097104fc731a15fad07479f4f2c4be2e071054a2", "ac8e09128e1e48a2eae5fa90f252ada689f6eae7", "741485741734a99e933dd0302f457158c6842adf", "021469757d626a39639e260492eea7d3e8563820", "c59b62864a6d86eead075c88137a87070a984550", "e6d689054e87ad3b8fbbb70714d48712ad84dc1c", "063a3be18cc27ba825bdfb821772f9f59038c207", "4f4f920eb43399d8d05b42808e45b56bdd36a929", "90cb074a19c5e7d92a1c0d328a1ade1295f4f311", "ba69d464bc360f94303ffc9f710009d16a5673a0", "51d6a8a61ea9588a795b20353c97efccec73f5db", "ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7", "855bfc17e90ec1b240efba9100fb760c068a8efa", "399a2c23bd2592ebe20aa35a8ea37d07c14199da", "11a6593e6e35f95ebeb5233897d1d8bcad6f9c87", "33548531f9ed2ce6f87b3a1caad122c97f1fd2e9", "5550a6df1b118a80c00a2459bae216a7e8e3966c", "05b8673d810fadf888c62b7e6c7185355ffa4121", "77223849321d57a03e0571a08e71eba06e38834a", "748e72af01ba4ee742df65e9c030cacec88ce506", "eee8a37a12506ff5df72c402ccc3d59216321346", "47541d04ec24662c0be438531527323d983e958e", "af62621816fbbe7582a7d237ebae1a4d68fcf97d", "ac98e7c570eb4a9db23f85164010f94afba1251e", "d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa", "3af1a375c7c1decbcf5c3a29774e165cafce390c", "59d45281707b85a33d6f50c6ac6b148eedd71a25", "113e5678ed8c0af2b100245057976baf82fcb907", "d4dd4600e8f4ecfd11fa4a4a702b1f08bc9ec6f7", "855882a5943fc12fa9c0e8439c482e055b4b46f3", "95023e3505263fac60b1759975f33090275768f3", "1c3073b57000f9b6dbf1c5681c52d17c55d60fd7", "defa8774d3c6ad46d4db4959d8510b44751361d8", "1513949773e3a47e11ab87d9a429864716aba42d", "d56fe69cbfd08525f20679ffc50707b738b88031", "96578785836d7416bf2e9c154f687eed8f93b1e4", "8c9c8111e18f8798a612e7386e88536dfe26455e", "28c9198d30447ffe9c96176805c1cd81615d98c8", "a660390654498dff2470667b64ea656668c98ecc", "ad75330953d9aacc05b5ca1a50c4fed3e7ca1e21", "2e1b1969ded4d63b69a5ec854350c0f74dc4de36", "f8f872044be2918de442ba26a30336d80d200c42", "6af75a8572965207c2b227ad35d5c61a5bd69f45", "cbe859d151466315a050a6925d54a8d3dbad591f", "cefd9936e91885ba7af9364d50470f6cb54315a4", "bb22104d2128e323051fb58a6fe1b3d24a9e9a46", "c0ee89dc2dad76147780f96294de9e421348c1f4", "7636f94ddce79f3dea375c56fbdaaa0f4d9854aa", "f2abeb1a8dd32afb9a78856db38e115046afeb34", "808656563eea17470159e6540b05fe6f7ae58c2b", "d142e74c6a7457e77237cf2a3ded4e20f8894e1a", "9b43897c551b134852bda113355f340e605ad4e7", "4e490cf3cf26fe46507bb55a548c403b9c685ba0", "abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c", "c61a8940d66eed9850b35dd3768f18b59471ca34", "35490b021dcdec12882870a31dce9a687205ab5c", "949699d0b865ef35b36f11564f9a4396f5c9cddb", "5e821cb036010bef259046a96fe26e681f20266e", "8e29884d4a0a1a53412e115e43f1b1cefe3bbc34", "ef2084979a3191403c1b8b48f503d06f346afb8f", "41c56c69b20b3f0b6c8a625009fc0a4d317e047a", "a168ca2e199121258fbb2b6c821207456e5bf994", "dcb50e1f439d1f9b14ae85866f4542e51b830a07", "98d1b5515b079492c8e7f0f9688df7d42d96da8e", "70516aede32cf0dbc539abd9416c44faafc868bd", "919d0e681c4ef687bf0b89fe7c0615221e9a1d30", "133da0d8c7719a219537f4a11c915bf74c320da7", "a484243027b19b57b5063ad2e4b414e1d383d3e8", "57efdcf4d56f15846c9c5104ce2cd414532ced7d", "57f7d8c6ec690bd436e70d7761bc5f46e993be4c", "045275adac94cced8a898a815293700401e9955f", "ee897a827bfc03e4682fb77018c27ec29a063d2c", "8656f48aa77f25462b3ad2edf2b1aa965b2b7b38", "436b2f5bf23bc0bd80680ee2ed279cbd55939b86", "531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab", "6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1", "a2d9c9ed29bbc2619d5e03320e48b45c15155195", "512b4c8f0f3fb23445c0c2dab768bcd848fa8392", "d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa", "dbfe62c02b544b48354fac741d90eb4edf815db5", "c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f", "2c2786ea6386f2d611fc9dbf209362699b104f83", "90dd2a53236b058c79763459b9d8a7ba5e58c4f1", "454283ee7ea757dd25780807e4017cf43b4fc593", "b6685941588febbf66f9bf6a074cd548bc8a567f", "c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3", "c5be0feacec2860982fbbb4404cf98c654142489", "bd8d579715d58405dfd5a77f32920aafe018fce4", "2c7185bcf31a4950b014b67ca7c63735ee00d56f", "9ef06cc958af2274afd193a1dca705c08234bcd3", "0323b618d3a4c24bdda4f42361e19a2a7d497da5", "7212e033b37efa9c96ee51cb810c303249ab21e4", "310fe4e6cb6d090f7817de4c1034e35567b56e34", "d10cfcf206b0991e3bc20ac28df1f61c63516f30", "0daf696253a1b42d2c9d23f1008b32c65a9e4c1e", "c91103e6612fa7e664ccbc3ed1b0b5deac865b02", "a70e36daf934092f40a338d61e0fe27be633f577", "0be2245b2b016de1dcce75ffb3371a5e4b1e731b", "3e2b9ffeb708b4362ebfad95fa7bb0101db1579d", "8e272978dd1500ce6e4c2ef5e91d4332078ff757", "57893403f543db75d1f4e7355283bdca11f3ab1b", "f702f1294c0cd74b31db39c698281744d3137eb4", "11ff2f54ecfda6c7f90ed84baf1cc5b4f07e726b", "ab6776f500ed1ab23b7789599f3a6153cdac84f7", "4d21a2866cfd1f0fb2a223aab9eecfdec963059a", "19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54", "b63b6ed78b39166d87d4c56f8890873aa65976a2", "0dbacb4fd069462841ebb26e1454b4d147cd8e98", "c997744db532767ee757197491d8ac28d10f1c0f", "9ce0d64125fbaf625c466d86221505ad2aced7b1", "85639cefb8f8deab7017ce92717674d6178d43cc", "3674f3597bbca3ce05e4423611d871d09882043b", "776835eb176ed4655d6e6c308ab203126194c41e", "bb750b4c485bc90a47d4b2f723be4e4b74229f7a", "24b37016fee57057cf403fe2fc3dda78476a8262", "3fe1cfd2dc69a23c0b0cdf9456c057e6ea1ee1b9", "c83d142a47babe84e8c4addafa9e2bb9e9b757a5", "66aad5b42b7dda077a492e5b2c7837a2a808c2fa", "51cc78bc719d7ff2956b645e2fb61bab59843d2b", "044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa", "406caefc7f51e8a16833402e4757704d5d84a1f8", "1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc", "c37b5c43b58f2810bba78fcf2251d5b631428b48", "f858c5d0052d19fe780affccb1f38b1e826ed091", "2a6f554a356521199112bae333537de62e13db73", "76dc11b2f141314343d1601635f721fdeef86fdb", "49e975a4c60d99bcc42c921d73f8d89ec7130916", "dd031dbf634103ff3c58ce87aa74ec6921b2e21d", "ecdf8e5393eead0b63c5bc4fbe426db5a70574eb", "0f395a49ff6cbc7e796656040dbf446a40e300aa", "947cdeb52f694fb1c87fc16836f8877cd83dc652", "39dc2ce4cce737e78010642048b6ed1b71e8ac2f", "eff87ecafed67cc6fc4f661cb077fed5440994bb", "edef98d2b021464576d8d28690d29f5431fd5828", "8b3c867e67b263d7a0577a112173a64009a3b4ba", "c259db2675f3bfb157f37e6c93b03d1d14dab4c7", "66ebb070ea8de63afa11cc856fe2754ea39a93ff", "23d5b2dccd48a17e743d3a5a4d596111a2f16c41", "8eb40d0a0a1339469a05711f532839e8ffd8126c", "081fb4e97d6bb357506d1b125153111b673cc128", "1b3b01513f99d13973e631c87ffa43904cd8a821", "580e48d3e7fe1ae0ceed2137976139852b1755df", "b961e512242ddad7712855ab00b4d37723376e5d", "4b3dd18882ff2738aa867b60febd2b35ab34dffc", "d41c11ebcb06c82b7055e2964914b9af417abfb2", "5d485501f9c2030ab33f97972aa7585d3a0d59a7", "58ca110261680a70480eb0fd5d6f609c6689323f", "719a5286611c2a89890f713af54f4a00d10967e6", "cf875336d5a196ce0981e2e2ae9602580f3f6243", "e73b1137099368dd7909d203b80c3d5164885e44", "ab1900b5d7cf3317d17193e9327d57b97e24d2fc", "0b9d3a0c61ee498f8ed54aaa22d3c4e72aa56f40", "28a900a07c7cbce6b6297e4030be3229e094a950", "606dff86a34c67c79d93f1e536487847a5bb7002", "86b985b285c0982046650e8d9cf09565a939e4f9", "bcc346f4a287d96d124e1163e4447bfc47073cd8", "2b4d092d70efc13790d0c737c916b89952d4d8c7", "cfa92e17809e8d20ebc73b4e531a1b106d02b38c", "b261439b5cde39ec52d932a222450df085eb5a91", "f3e005e567f16fa55c54b4c1b17f4538d799c7de", "539bbf8e4916481bd089d5641175085edf4cf049", "53e34ff4639806b7599c846f219c02b025da9d13", "5d9971c6a9d5c56463ea186850b16f8969a58e67", "5df376748fe5ccd87a724ef31d4fdb579dab693f", "2f184c6e2c31d23ef083c881de36b9b9b6997ce9", "985bbe1d47b843fa0b974b4db91be23f218d1ce7", "9c4521dd25628b517dac3656410242b83b91e1e0", "335435a94f8fa9c128b9f278d929c9d0e45e2510", "1962e4c9f60864b96c49d85eb897141486e9f6d1", "151481703aa8352dc78e2577f0601782b8c41b34", "0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d", "416364cfdbc131d6544582e552daf25f585c557d", "ded8252fc6df715753e75ba7b7fee518361266ef", "48cf1105eca8049e8625c5b30a69620b2381589c", "4d9a02d080636e9666c4d1cc438b9893391ec6c7", "179e566a2c1a2a48aa3d0028209c11ebe7d6740e", "d36a1e4637618304c2093f72702dcdcc4dcd41d1", "287900f41dd880802aa57f602e4094a8a9e5ae56", "e57e1dce81e888eb07054923602e35bfb5ef3eb8", "016f49a54b79ec787e701cc8c7d0280273f9b1ef", "5ea9063b44b56d9c1942b8484572790dff82731e", "1071dde48a77f81c35ad5f0ca90a9daedb54e893", "78216cd51e6e1cc014b83e27e7e78631ad44b899", "d83db03f8eae6dba91ce044c640c6b35ccf541f3", "b340f275518aa5dd2c3663eed951045a5b8b0ab1", "dd600e7d6e4443ebe87ab864d62e2f4316431293", "5f27ed82c52339124aa368507d66b71d96862cb7", "265e76285e18587065a1e28246971f003c5267f3", "b1665e1ddf9253dcaebecb48ac09a7ab4095a83e", "e8c9dcbf56714db53063b9c367e3e44300141ff6", "72a5e181ee8f71b0b153369963ff9bfec1c6b5b0", "4d15254f6f31356963cc70319ce416d28d8924a3", "9f4f890f74ac91bdc4323e061502331945474b90", "b14e3fe0d320c0d7c09154840250d70bc88bb6c0", "8812aef6bdac056b00525f0642702ecf8d57790b", "dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8", "679b7fa9e74b2aa7892eaea580def6ed4332a228", "7ae8acf20f9415f99bfb95aa000d698b8499f1ee", "5db4fe0ce9e9227042144758cf6c4c2de2042435", "34d484b47af705e303fc6987413dc0180f5f04a9", "374c7a2898180723f3f3980cbcb31c8e8eb5d7af", "6a6406906470be10f6d6d94a32741ba370a1db68", "c60601bdb5465d8270fdf444e5d8aeccab744e29", "35c973dba6e1225196566200cfafa150dd231fa8", "3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f", "3fefc856a47726d19a9f1441168480cee6e9f5bb", "1159ff04fd17c59515199e0fc2d5e01e72818b59", "ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906", "3c78b642289d6a15b0fb8a7010a1fb829beceee2", "7f36dd9ead29649ed389306790faf3b390dc0aa2", "70d0bffa288e317bc62376f4f577c5bd7712e521", "af3e6e20de06b03c33f8e85eced74c2d096730ea", "4acd683b5f91589002e6f50885df51f48bc985f4", "f7be8956639e66e534ed6195d929aed4e0b90cad", "ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76", "398558817e05e8de184cc4c247d4ea51ab9d4d58", "c586463b8dbedce2bfce3ee90517085a9d9e2e13", "f5603ceaebe3caf6a812edef9c4b38def78cbf34", "ff7bc7a6d493e01ec8fa2b889bcaf6349101676e", "c30e4e4994b76605dcb2071954eaaea471307d80", "d7b4d741b1dd4fb3f278efa5fdf2a5d8523caa0e", "f0681fc08f4d7198dcde803d69ca62f09f3db6c5", "d567f2bbc6ce6d6acf0114e6514f31eff4da68f6", "5b01c4eef1e83f98751bb3ef1e4fca34abb8f530", "a6771936ffeba6e7fffad1d2c60e42519c615e24", "a7c39a4e9977a85673892b714fc9441c959bf078", "7f8d2d7eaa03132caefe0f3b126b5b369a712c9d", "d5afd7b76f1391321a1340a19ba63eec9e0f9833", "6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3", "2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3", "910524c0d0fe062bf806bb545627bf2c9a236a03", "2549ac0d3f40c1f6d72f641c2f05a17aef4bf42a", "d785fcf71cb22f9c33473cba35f075c1f0f06ffc", "40389b941a6901c190fb74e95dc170166fd7639d", "44d2ab6b7166274cc13b52d8f73a36839ca0d4a8", "208a2c50edb5271a050fa9f29d3870f891daa4dc", "294d1fa4e1315e1cf7cc50be2370d24cc6363a41", "4c8581246ed4d90c942a23ed7c0e007221fa684d", "951368a1a8b3c5cd286726050b8bdf75a80f7c37", "eb86c6642040944abc997848a32e631d1f25a2f5", "4679f4a7da1cf45323c1c458b30d95dbed9c8896", "db3984b143c59584a32d762d712d21c0e8cf38b8", "6515fe829d0b31a5e1f4dc2970a78684237f6edb", "16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb", "2b1129efcbafa61da1d660de3b5c84b646540311", "205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa", "75d2ecbbcc934563dff6b39821605dc6f2d5ffcc", "ce30ddb5ceaddc0e7d308880a45c135287573d0e", "aaeb8b634bb96a372b972f63ec1dc4db62e7b62a", "6ed22b934e382c6f72402747d51aa50994cfd97b", "2dced31a14401d465cd115902bf8f508d79de076", "36fe39ed69a5c7ff9650fd5f4fe950b5880760b0", "38cc2f1c13420170c7adac30f9dfac69b297fb76", "fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59", "0b87d91fbda61cdea79a4b4dcdcb6d579f063884", "40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60", "05c974b9fde42f87e28458fb7febf7a05f2dfd18", "bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11", "289c6413c9b1d37c0608ee0027d28466ef3a552f", "25e2d3122d4926edaab56a576925ae7a88d68a77", "fe9c460d5ca625402aa4d6dd308d15a40e1010fa", "e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9", "493bc7071e35e7428336a515d1d26020a5fb9015", "0ac664519b2b8abfb8966dafe60d093037275573", "59efb1ac77c59abc8613830787d767100387c680", "c98def5f9d0c6ae519fe0aeebe5378f65b14e496", "4d8de4dad40faa835e8a01e3aa465e1bb3a996f4", "45877ff4694576f59c2a9ca45aa65f935378492a", "8f5ce25e6e1047e1bf5b782d045e1dac29ca747e", "4fee2f524ef12741d2b0fa96f45a5ef9d20ada83", "cacd51221c592012bf2d9e4894178c1c1fa307ca", "6a806978ca5cd593d0ccd8b3711b6ef2a163d810", "7767059c935fb773d5e6f559b9eca6e72caa456d", "2912c3ea67678a1052d7d5cbe734a6ad90fc360e", "f9ccfe000092121a2016639732cdb368378256d5", "4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6", "a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be", "eacf974e235add458efb815ada1e5b82a05878fa", "0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9", "08f4832507259ded9700de81f5fd462caf0d5be8", "dec5b11b01f35f72adb41d2be26b9b95870c5c00", "acc548285f362e6b08c2b876b628efceceeb813e", "5850aab97e1709b45ac26bb7d205e2accc798a87", "c87d5036d3a374c66ec4f5870df47df7176ce8b9", "a57b37549edba625f5955759e259e52eb0af8773", "744db9bd550bf5e109d44c2edabffec28c867b91", "9f428db0d3cf26b9b929dd333a0445bcc7514cdf", "75fcbb01bc7e53e9de89cb1857a527f97ea532ce", "19fb5e5207b4a964e5ab50d421e2549ce472baa8", "28de411a5b3eb8411e7bcb0003c426aa91f33e97", "76d939f73a327bf1087d91daa6a7824681d76ea1", "aa8341cb5d8f0b95f619d9949131ed5c896d6470", "28ce99940265407517faf7c45755675054ef78c4", "72a00953f3f60a792de019a948174bf680cd6c9f", "c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f", "1dff919e51c262c22630955972968f38ba385d8a", "23860d947cf221b6ddb6d6cf3a7ac4b08c7cb8d3", "29a9e9b5926e65512c25c845cceba42fc1be2958", "31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362", "3a60678ad2b862fa7c27b11f04c93c010cc6c430", "89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199", "1e2770ce52d581d9a39642b40bfa827e3abf7ea2", "a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a", "c980443ca996402de4b5e5424f872acda0368831", "920a92900fbff22fdaaef4b128ca3ca8e8d54c3e", "8eb9aa6349db3dd1b724266fcd5fc39a83da022a", "33bbf01413910bca26ed287112d32fe88c1cc0df", "a63638b26d36bab8db10bd95fb287c727bab33ec", "d6fb606e538763282e3942a5fb45c696ba38aee6", "972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0", "7d9fe410f24142d2057695ee1d6015fb1d347d4a", "193ec7bb21321fcf43bbe42233aed06dbdecbc5c", "22143664860c6356d3de3556ddebe3652f9c912a", "08fbbfe87563595508a77629e47613d6bd1119eb", "4d423acc78273b75134e2afd1777ba6d3a398973", "36b23007420b98f368d092bab196a8f3cbcf6f93", "52c71d20dced998a607c466241dfc2eb88183de8", "ebfdb4842c69177b65022f00d3d038d645f3260b", "2c811b647a6aac924920c06e607e9e8d4b8d872d", "a6ab23f67d85da26592055c0eac4c34f05c26519", "0c377fcbc3bbd35386b6ed4768beda7b5111eec6", "33403e9b4bbd913ae9adafc6751b52debbd45b0e", "78f244dc2a171944836a89874b8f60e9fe80865d", "0729628db4bb99f1f70dd6cb2353d7b76a9fce47", "93a4c7ac0b09671db8cd3adbe62851d7befc4658", "7f9be0e08784835de0f8bc3a82fcca02b3721dc1", "6cb8c52bb421ce04898fa42cb997c04097ddd328", "54483d8b537e51317a8e6c6caf4949d4440c9368", "d95e6185f82e3ef3880a98122522eca8c8c3f34e", "0559fb9f5e8627fecc026c8ee6f7ad30e54ee929", "9f5e22fbc22e1b0a61bcd75202d299232e68de5d", "2661f38aaa0ceb424c70a6258f7695c28b97238a", "53b35519e09772fb7ec470fdec51c6edb43c4f13", "2fe0555f2b92a81992247519cb8fdc047069e2b0", "b9fbf264d61c44c9737513d2cfd331b34d24fd64", "23103aec4de7687bdc18df4c00f3c55a8f760935", "2313914c626383e87077762c03208e119c04b479", "2f23f7d08c7b8670289cfedd1e571f44a3bace8b", "bb774b8ae76ec0818f6eebe10eee17ed30d7079b", "91f1442d3a3e608562af6d1aefe2a7119a930614", "c915bb79e1fed83938dfc8a781dee6d24742c61e", "5478a70badcf4d6da383d86163f0acc2c28b6bd3", "3b5128bfe35875d0cead04b7d19024d841b605f9", "6ae63a0097c41b63c2d5df352de985f1058220e9", "f1ecb4a14f1f30012ec8b70ec39882888f29b07e", "bf9b34433bdf14e595a1ed89a23c416990639215", "3cba534f71e2201ea84487335acb685e349717c0", "62070fbd22b2a4bba830668c2e9720ec4bff4171", "697c2a3a87c434134ddef19f74d3220355e508e2", "2bed84ee6f8e592dde39a93a77fe06fddf55b817", "4ddd0b6644457a2027fe206cee57121e9493e700", "236a99cc6364661e855cc1585ed166bb06cd30b1", "ef7f02a53ff27f79fd73d57607f47edab9ecd901", "7e467f75d9c331ccc3794cef0f4ae8dafe80cee3", "22ab35c9a382a7133ef05631f7ad11715e7b49b6", "fc7cd432db404e7724df7671d6e010109fe0c944", "d02469c43d0e5531b41984e869eb2af30bef4019", "89878feef7141b42b2f20d66c6c3572a1b761a5e", "fa37f12a3a7cffdf7ba5dc140d2b25f0d435c663", "7a617426bc02c10dd781dbbe44d3fbc7a2ab5793", "4f3f116cc0f19bc70242d1ec9eac942c35c168ba", "e293469719cea119bf9688922137f90451f7a67a", "57f8b25fd16218d321a4ce9b3de72f129b9f8e66", "0ff14ec76e5fe7f17dce102e781ffce2738c8d4b", "150326137da214210b46e0b7f22e30f7e6529006", "f75229ac6e06bba98b926574661735f7bacdc396", "bb893fac40eb901229567abb507a8cb82553d198", "631d21e51ca9100f1eca3c80dcf42db81cfc7e2b", "9584052327664425337b163695ee55c2b866d674", "0fcdcf8c0474b3f2ce5616880540c1a757f1cc7f", "81c03eda1d175fbe351980ac4cffe42c5dec47b0", "5367610430dc0380dfbe8344e08537267875968c", "3896fba68bac1d13478e4b8c48c353a44c958ee1", "97692960a11d4316880fb229cca699293e133945", "1f53ca209f982500069fed73efe2345358eff79e", "8d40150c7ec59daba7d1a34eba291ff2eac6388c", "baff74e4a9880d7477799822d8e68224466f3e76", "9067f14f5708b3ca1c6a8194b2d550fdffb3c1bd", "da0a22969a3f72eefe994bd9fec446cdf7566491", "4895bc6e7ebb894e73c08b9dea50eea293c8dcbc", "c7cc53622fc3f3b0970bc02dffd08d004d03916c", "28863c0f3a1aa186bf1c281c73784cf36e2bf361", "9ca82f5936723a773fb44336cd66c315f2024d34", "28f5f8dc2f2f9f2a4e49024fe6aa7e9a63b23ab0", "4cce5a690decc1d81b41975bf88eaefef2a71222", "d5a6a31fcebf5971f20bd25ed7faaa59c6c98772", "3e578969cad015f6e01cd4221985bf7ea4cb634d", "08276cb2bfa9da2d0cfccba881b109346749159e", "d6efd1b7b39d91b067488e0c4bf800ce3e3704d8", "4939355a627ec5d3be0ea51bcf8bf6137a965230", "5bf2132de8be99547af4aee6013fec8226c763b5", "9f7c1b794805be34bc2091e02c382c5461e0bcb4", "4d0d5101bb563061e4155ddb7df943a629c49e4b", "d8586c794456f88400231db046b0d33be7781185", "b6d5b71f78226c8696d7502e4514feb575088979", "066476a38f8751696f5f7b47c0fb7f1d8ecdac1a", "4f12aa6c5c9bc44c859fa1e584beefa1445d3277", "1db9e85f9bcbf14ff6174889985eb3db4cb0c132", "1df314a1e4dce42fd9fab094b79a0f2a10ad0b03", "617a7e6418b5a330fb98c38c61b4c8859a8118cb", "413a1a00f0eab2fcc3dcc0d821fb2f34e85f5d7a", "9588a42bff63fb36015e10fac9f3121154c3ab1d", "c17d6339cbc00f929141719db30bdaa22c8d2ec2", "8ab83d6da06a1c0a9f55f32c4d52bb5943b13e2c", "7c18965f5573020f32b151a08178ee4906b5bf4c", "19941761feadceb20897974483ede809c4793aa0", "cb0a6de9341e0cd74ddf499fb18d6336332a9604", "3605647befd040a819f00b1539a6e3cc5ffb53b8", "9a4d73973f2914a1cd7fc666263dd9b990d5d405", "df3668d32dbd32ddce651a535fa46a7b060d8d6d", "a21b0a67b4bcc4c86482294eade38f6e868497de", "f6f80fde9d9d5da840e5aef37d459baf9906daec", "38b9f2faaffbc7c6ad7fb3fb01c387f3155de68f", "bbc4bdab563b8b4cea55dfd6a7ea32680e082933", "1e93ec0f5c29069beedbe7d617f5167b82b70730", "db056590e54d7be16fb1c96deb9e94914ea9f838", "61c4b35443b152679c923d5db6c26daaec304172", "6a13aeb21bacde6ab3f2882ad11cfff888f092a4", "1cf29a0131211079fc73908ecf211ee78f090ad9", "27448716366bed56515c1b32579daf224165861e", "56fcc0ef7c10ff322626fec29f532af1860ff2f7", "2d27e2d8188743c4e3ca30fda5c25e70775f03e8", "34785299c5e8604f5b7c09c8190915da7429adbb", "c5af99522e324b72c8a563a5d6b7c9a0101efb65", "067e74dacc2cfcaa946983d97d86eda3c2776560", "538a9230ddc14b8a5d3f5f195aac4ec43e37d16f", "330dda431e0343a96f9d630a0b4ee526bd93ad11", "3a42872d476891d9cbff3a512aad38f5a14c2cdc", "04275ff79937ae8c37a7c69fa6bf818d2eae6b8b", "30044dd951133187cb8b57e53a22cf9306fa7612", "322eff0dbf5d7dc18688be29ad5fd7eb8c8d6d54", "eb3a5714b5768755a046cfa944433394534e96fd", "9ea8091ef0bf002263a3ba6f0962f3698e2a20c9", "3f0bbffd94a885c13103d98b0075a746e125551f", "f8e27d3d390e37b0a82e9ba5d9486178b25ae135", "d680cfe583fe61e49656cc7b9dbd480c6159cf0b", "7c1db13ae2c62d1f860fd2664885c9c93a28cab8", "04964e2697778dc843671c7764f0f912e46991ca", "bd2752acf6821282655933d1946f43bb4ac5e901", "05e3167206bc440d5aacf2256fd2e2e421b0808c", "7809a42a833b49725f3a4bb8f70f63f4d2cee11c", "a2f2996145d3d670608af1cbbda59c1ac28d4f7c", "200f1a55c5974c4cac243bed3131ac5a9338840d", "3365a33cf433ce6f8e6dff6f53f9b1c9cb43ac5b", "17ecd1a68ee571c47a571c9236b8ac299046ec02", "36d8cc038db71a473d0c94c21f2b68a840dff21c", "0a572c16e635312f118d1a53f0ff6446402d3c32", "27cc7ca12603c5f569c1746f9b1b85a95850d6ea", "7edb08d07fec2d4eaadedfa2b4e07afaedea4807", "6e6129b78fcb84ec7a6f861ac658cbbee4aea962", "10f641aabdd8bc1eb87fae74c63b814d8ef274a5", "398310036d4da84b0ccc8682e3ec1a6da36f123b", "6ee3fbc4768f578601d42b1596aaf2b0cfa1d40a", "c7f63fc2ff20513c6dc233ec3419417b43b39209", "1df49237f269b6809bcc90232776407359558d55", "06992ca951456bb88523f702f904dfd23eb27c53", "87ad56e06d48fa9b30e2915473c488c1b4b7e6ae", "18ffd83b80c577b3bb2a761e71b0355c4601c1fe", "62dc594c1eac220d2116506d187d9fdd5ff8e795", "97ede92a6a3579f9fc8ad7c179eaaf37b3966e5a", "463bca4464c5a92cedeb3c1ec3aa80e973c7ec69", "d5856f47fe117c114e8bcfbf2abc4e80691a512c", "0422a9bc1bde71d3b4fc4f52b4a62b15f2fb101f", "02bee6bf61566cfc3963fe42b320a740a9458920", "aa32f5b0a866b04a89f75cda32e0975a541864ff", "98bda8768fd4a384695ecc736876a87f51c4ca0e", "ef6a6fb5a4c7e7d34bc314bd7a93757420e18a25", "f84e392a19f227bbedd124c7841db3861d600cdc", "479b0502716e6f0f15b52a3619554b4484aab276", "42ea94af578b796aa96b68cd1c8ab6f55f22b050", "bc3e01016c4b2b75e9163c91fa65b64dcfb1acc9", "76205da3dd8ef47e20daf0ff2eb64cef952fbc6c", "cb472b1bd1fc60717adba88faf5c93c3d820784a", "78a802b2c520cd32cc96f22238e1c05d88dd0068", "5ba8bb7d204e7a5a29a043792546577500e2e5c1", "fbc93b13b8a6a5e4ed11310ce4da3be0b7541da8", "6c388fc4503636245fd464a05a9f843b303ad79a", "c16cc7006ad3ba5f2c5ce022bfc97a6fbfff847b", "c92f26b4a7116ab923e84e351662d1c8a6048b47", "72e8010136460340683a52c2aee4edaee0b48559", "ec52f9e75ff172e290d552b639e1369ed75e2718", "efe26cfe3b9440cd6984414e0e8c8400b7c630f2", "453f9c46cbc0e769b6cee60d65a99b23631f0ba9", "b5ef9846d98a020bebddd05cbe5dfc4800d7e6aa", "cebf5c315255f2d2a67f77603975e996742e8ef0", "ca4a8f9e15a5d24e3b65413587164723a4421b6f", "ab0715642330502d5efca948e4753651cb004d84", "39340257d9a478b3c3b736ad31df1c0a6a78c851", "91027fd707aed714c9095551e3d63b3e18ee138b", "e4421c1fdeca4917853804cf9218fd23084ad1dc", "a9c0b2f9f6405bbf9be163246867175cfee8a616", "aea72c9d3ff2ec944d60b08b80f0caf2c994ca5f", "3ec653164169c1a1b5c12ece2130326606a24e6c", "2df1f7f9281f5b82774155dc443820641e69bc8f", "04241ba56d4499a00beb6991d2460d571a218d85", "885f634029576fac22a3ae88c3d5232012b23175", "d3d37a44a7a0453445e6e433a527b0164ec99b88", "a3c93737a4497350768b0dda08dbc0826670dc5b", "19bc52323383732c3c7d73e11726f6232515d2f9", "b0751b7361e691b0df7dc70f6c39d41d6cfc04ed", "06a6976d9a3deac30b0a571d31f85c11ae4eb8ad", "8641593c67d87d81e528448a527e45fc9a5aa145", "52ebc9e9f3129f9a4f5fd79221249941479be376", "eb902c51df0aa2140ca5196f6f7e847fbe5562c1", "10ef80d27640c2da1012b5a40d672832ecb5f333", "955aa3e7317e236e41f05ec2853b64236c252af0", "489bbd771728df3a5533cac8a98e51c950ebb1f9", "7b358ed87f39a12d737070dc22b4c547ce378648", "ba153c968a142d90f6286e69a5f8bdf7eb9331ef", "b4d117e109b3a6762d1b675defd9f2b228613ac1", "e91542348c857db04fbddd15c12e0b2ed846f68a", "46386d4aa6a2b96106ab1d18658103622b24f9d8", "44993de87bbbce71f14d7917944d055700217696", "0d5cf8b424a4a850b7b682581a89aeddbce8016e", "baac597e78beadc5d38a8ee5783faa3d658b6649", "a01ba008252d2ce32f326f50c208c9ad9d5c78a6", "735418826055951ba8660bb008d92bfe6910330e", "bd477abdb4146aa4501b87d0aafaaec5d50e0fe3", "df33835a29b71264f165dcbad5e3bea3b9b6d20f", "7c66066cf8710f35f1bd076c8567fef12e1b84d5", "ca4681b7768c2b1d30adeb32c6f0b3bb9db1653d", "e467f7e2434ca74bdd4b19808a6b3d78b8c5ba1a", "2d1b69e2d6b5674d1623ddd9e42c8f700893d6ed", "5784c3a6049899654e0871fdd4d51799e22ccbf6", "dcce157aa2e5db081b36fd16544a038becb408ab", "eb3436a52fac7dd498efeaad0861c39d5361f7f2", "f1ddea510a838300c107194910b8f99d98c07fbb", "3e687d5ace90c407186602de1a7727167461194a", "f25e34095ca39f43159f13dfd95c45108681773f", "610a01b2997c1705213d42b796607a31a63a8e6c", "cc19f6eddaa96f6404e9831d65d6cf103d6066c9", "10345e247b82ef83be7ceae907d602485eb4fe4e", "7e984bbad042b145d1ff8351c4a7c5fb6a81e0b1", "29d83a055041b148f7e888b0c6f51abda6b06ad1", "f9fafd8ea1190ffbc2757eed0f0a8bbff610c43e", "16404d38775e2e8ddcd28e2ca6e79cf8a82d0d9f", "1a6b2972506d7d85100552bee99ce2b267e30d41", "281d699210e0316a57735c8710e7fc05a936b85c", "ce1ee9a23e935a16edbd0c2c56de7c99f4c0b34a", "d69b542b3714b5e90c384d39b5ab0c4bf9dd5375", "1559eaca4ee5c4f1a7816992975e0249f79c32d2", "0a4dfef84834d6e2bb0d2a9d1a5f5be40e7b8530", "3a55188f8ee3abe6d179d16984885be6e3b6daf5", "8473ccaa87f506f3d27e52d04ec4078668d7fc2e", "1b7c12c65d29827713359ab998d01c2083070ad4", "1d5d68bee741d81771e9224fe53806e85ed469aa", "d4fd1e1febfa9e93066d23ebd1d3fcef83bdc54b", "b64a94c42df080196acb74cd06847297e361ff62", "96ff19932d81bfafbd5bf1ca18f0b439e345445d", "04bb97226cbd73792440aab4734220d440fe48c7", "c0c306b4b0b289176980396c5efd6735e71d5f13", "6ac7fe3a292dc5e0f7d27e11b85ed8277905e9ba", "20a6de85d7d5f445dfaba90ab2e33879142023fc", "f4c42c58c6649538c85b85b5357b29793798109b", "6e80ad43c5f383c1d87b1ced2a336fe5cd44e044", "3edf3a996790fef8957e21c68ddf48b52238e662", "aba91a6dc0794242cd033269ab0c626de9e65ae2", "e76704e56ba5f485499ddfdfbed7088609e45766", "7278f4c361f960b2e54275c5efd98535f9ccaded", "5df93d7da8ab46f1d0e9deadd4e5e5568acd7651", "0c95ff762bdf6a20609f49f1eb5248de3f748866", "042f4315f757676c7a19281239d616523b0621f0", "bc67ab4dd66d9c6455283d90bdf4096de25e5faa", "2e082232eb37c98052e62eec76e674a491082544", "ca627984743536d9403cbc25c00d033bcc1cb839", "76d7cec5b32a2624b7a7a0cce4eaf4d43a3fefb7", "cc09cf5831fcae802ed2905a61ab502956655bbe", "923d914e2a2f04490df530891f287768b1f0d602", "08ff3e9f5ad47e59592ad993348b817003b9c0e4", "1ce1bb9d63f189f3af88e3f874cb901694f790cc", "51898d1039979363ee7eb97cbe3c02dc7d52f024", "8dbb08fdd8827383ce74dde937b74cf21b687cbb", "1222705b626a33974e85985ddabfcea135e9ddce", "04bf170753cee3d1da1b9ab41a5b0874685142fa", "865dd23274e95d947286bf34f6f06198cde3457e", "78571045118e275005beaff1a261b2d33460f0a0", "de725093e13cdc90209d981bea69730c7f6ee03d", "4f3ed8752966639eeb423f00ddb5480f6f27ad2e", "6f5cc6efcb3f983cf8518525b941d807cafb59ae", "497d46649af7dab664cdb9d47242df6dc06b1a48", "53f4576394c1381f03c46dc95f56d22eda51c527", "9a7fcd09afd8c3ae227e621795168c94ffbac71d", "2b6c031c61b78a9f9ee958d291d29c8ab359404e", "f918983c4dceb6b655bb7b68b3506c85f377cc8b", "62aeecbe5db3e4ed6b783f4b580157f4f1c8ba45", "11e53fbf0a0a487c1190e65d9daef3f11a769fdf", "471635c61fffa75cd09121b14e4da155c667c5bf", "fc74e14a3195fdf91157d5ea86d35c576fcf01d6", "57e8e226e605fe6491111c5dc9461527c5fce56c", "25b215169540e9109107a048c9e68159af82b771", "0c3c469e46668ea2c38a6de610d675975f337522", "d5fe9c84710b71a754676b2ee67cec63e8cd184b", "d12a53e797f799108415ee925c6cb6e626ce6d89", "dd87e53456d6cc6adbd04ac0a84a1314ce20835d", "6d500b0c342c1cf23efff049ef121bcf5e606ea1", "1dfe35869c4cdb41cc1bd2c622d38d57ef8e310f", "7a4ea124a971bdda4acea4b539092d4d22c0e169", "197c406b95340dfcdef542db532e0f7a967b9cda", "1d4d0aa949242d3f90fd3bf2e759ff5bb29ae3e7", "671697cf84dfbe53a1cb0bed29b9f649c653bbc5", "026509ad687f9cdaba8f2dac0fe5720e0553a8bd", "23347ef3e4d7be7d5b02e37de829bddcbfcd232f", "f287fe74e92dd701eee876629988a8e51ca9cc0f", "094f5e36dae2602e179f2c1d95a616df3dbe967f", "50744b8598d9a95eff73f433c9492c884b81a840", "be25d7bff3b5928adf6c0a7f5495d47113f80997", "193c9bd069e9457ac8650a8dfd4319bb3f4afd56", "017229c2df23c542b30c59f4a5eeb747e3d34729", "3c8b65999388baad4342baf363e2b872bdabb564", "06f15e73f962cd3642f941de716891e5a252b605", "bb6fb618824e29fe8cf16f5956e8265052377ad5", "0f266f62b441a0826608eb722af06be176b82699", "8d66f4204e625d078d72760752505dc7f34f6b99", "69a605b2ef38c59e0c8da284d6f27d33e3573620", "10064de078ea535d9f7f3c63f4bb8de351aa4070", "f34c85c24661ba9990146737fd557f7508677263", "8239a0b4cdb480c9fb913c7476f12825418b0909", "87b9d7d4f5fcef5680b9e74ce50c76be504c70a5", "217de4cf88e116f505fdf802eba55572a5d7f227", "c165003060eeb01e05800a5ee4cd327f1e0bf5e3", "987f73c1e17540716f47e2b4bd434a09ceab5074", "0de0c329e07ffb91d100424259a4a18973d731a9", "06d30fda7559ae1a6ac49ff7a9fb9280aaad2be8", "1e1a3ee9626c740be78f9c5f75f9c4d7edc45666", "2da845c75bf9ff02bd27b6e2ceb4732e89b05fad", "79dccc435d82c3e720f713030b39e013382cf057", "2333cf918f50ac2ae201a837166d310adf3a00b0", "13d53896f0ee30121c8dc75dcbfd5ff6c722199b", "2e786f3353667b537636fc1912118961e512be88", "d78dde04ac4215ed0ed6f2bd5d85094b389d7f5e", "6bd5f00d56ba2c6eb10cc8b533d9e9afaaadb658", "abc2cae2e03c4e94523c92418b4500fe5bddbda5", "c5543534fa4596d98e564ab9f792e8d97bfedb7a", "da633d1d43a7c5ea2c505e6902a7cf3d9da2a0f8", "97ec813ef2809557a6b9dcb58897756d634dde16", "c56857cf866603e44ca7a10c122a79c10c5ad4cb", "8f91464e6d7a38940360dc8f3efcfed58a02d9a4", "090b341def78df92d562e7d8e7f9d131a68ca769", "86844b7b10996bcd32db53f2b551e552f9ac1ce8", "f367fca7949e8ea925d211b3d2af3542f3624c16", "3a28fe49e7a856ddd60d134696a891ed7bca5962", "94ab01c46798b55cf02f21e87201d48f4fe746fd", "5d2a01e3a445a92ecdce5f20656fd87e65982708", "ed2a4a38b9bd78b530b946628fd25fcc38f148a1", "ad0cf51af6320cf4ced419f9dfab208fda85a10b", "370277791a0708b7c93deb21da172e025b558643", "df7d35c1876a22f563fb358724e9d02825ba7476", "4fff3f15b48e8e46280798df387d3dacd193fa1d", "5b8b9ab0feabc023f3ebcbd99d86bcbdb79245ba", "7d2e8b82be208ea2271260f9bd3ef0194c2385af", "003b141fb02078a4b5d02f4f803001ce22d73ba7", "1546b65e5e95543cf2dc0ead92b758fb31a5f4d6", "8bc480d32fb4b04c64a1de32fcdb1d281317c9f9", "0593bd23851b9f545ff7218887c09f4c62b7aaad", "4a3a9d02999fcf0895db31d644f40c98254ac4b1", "23339e409363a89cb5fe64e18e78a36286724de0", "b20840700d490364aa161536d014c6a13604502f", "c3a9767797c58f3bbdcbf0cc950ec3e55de5827a", "9487b0a0baa8483a27afed98cd2b688c5a1cde52", "a98312681f224874961f9cab444ba04666e7a0bf", "a4f38e32c23fd1f5a1e1157a4e62b38731f2e5d8", "e4d33362b4f99ab77fd6ceaafa183c087c79faea", "b3d8705d46a1d63b40a76bbcf8822b2e90b3b9ad", "8d5998cd984e7cce307da7d46f155f9db99c6590", "ab1f98b59fa98216f052ae19adce6fd94ebb800d", "69a4db0468263635ec810c49eb42f1edca728e84", "ed74afbd3e36f0fdf54da1e4fcb773c21b5de9b9", "cff0e53006c6145d96322e6401e840f405b6ed02", "a6adefcb723a3e52f24987115cc3ae7f6d08283c", "522ef585f181ecf7ce926fa159edf44fbef55ade", "2690264001ccd4b682b7b4c0334c80af6f5e9c9c", "35f3c4012e802332faf0a1426e9acf8365601551", "ac5a1b5a90dfeb4d22c37d806385cb9046e5edcb", "2af2aa21538783e46911fb857a23dbb88ed90c2b", "c50318651f70913c2e7924d6c9d8a7b326c07ad8", "f8ec2079838520fcb9394574bdd956ac9d3d5832", "4f93cd09785c6e77bf4bc5a788e079df524c8d21", "837635f647c42d03812a7f4ab5f87c5a49372a0b", "aeec61ef41d55b5c1becfdc00c2e4dbca0e379c0", "132f4a27b3d20e3783d854343b06d7fa3a6fd142", "0cd7ff53729dafe9175009d7f04570dbbf41a608", "c3a101f8fb6dd2fddfee94774ea3dbc8df8f45de", "5c1a418120208c27dc724bc24853ecfcabb22e84", "80688e72b00013eabe57ce88be0c204d0b5aea2c", "ec7951e69c7f31dde16072d369491250378f875b", "4f8b8a1f454eef624f1aed78bc4b60ee39b7b0ae", "d6daaec16ac90de8f99640f687ad7e9e92a46840", "5122a5d4bdf58b4f413d4de1fb250d4ab5e0608a", "389363432ee9fcf0e0cfe67b7b4f62618e1f4b59", "b3655bcc6f491ae995c652c7f51e1b9b3a36d39c", "2abd13f249d6483f1a433a4deb8c088d40c24586", "4dc056cfe5d06cb9e4cbf60ef5044f956ab92b91", "58eeae4de083747a2c018b6ffef4fdaf7aad3c7a", "16fda65f258ca22d856bb0252891deecc59efc3d", "c048513689fbba0a12a1ab9cb08ab3a533918519", "191eb9c416a991819fce7822b72407f265f19608", "0268d1744377ffbff48b014f513ea3e5e4a4dab1", "72fd97a0f595fc108d9cddf1949928143cbcf22e", "99ad5ac59f2139c021115b2e63c81fef19f01080", "c398684270543e97e3194674d9cce20acaef3db3", "dbe64cf1c505ca5ccd0f8ad914bc0eca97e44e8b", "3fd970da1fd9ebcf1b97f4d16f5274b25666471b", "5e0832848fab012b7e59580264257e0a3d05c596", "c5637543e80f97c9ddab8b54a635cf71941e2786", "7920936d6468b027cf52793f6ac6a05b550d87d7", "a1a49e2c1a424ef2dc6a5cf787d5eadf8421aaa1", "da833d8ec9c91d55256effccd370b2e62a896ccb", "c4b3a1cf8842da8c64f7abf4a352583d5fd9762c", "5257c447f9c50ee8bb2011fb72f8bd40bc0291d8", "6757254d27b761ada5dbd88642bd0112fcb962cf", "b66418ecc37ea0c79da5425e9ceac939ca9075ae", "9006647d190af09e773bfd7ca9d5632f30ac0d72", "f8f373d50d1d38e182a8c1b96c821eccac39f07b", "5d5af53310c56d7e7678b92b69b88ca8fd95dd1f", "884e63b5371883a1502f5c39a08e5100c89a5427", "286e4e6b0360c06f659d351ac885aafb62a6b73d", "c2ff1af8ae5768aaaf52cd4809bde3cfe9bef962", "85c7aab0f58f17816064699865cd0836bfbf2e82", "8985f8d9aa656ad5b85d0d06ebe027f8c4ddf46b", "35058a8166a8fa4479167ba33b3010cc8c839f44", "ebecc2516c998ec6acd1ecc5484702c4ce4c980f", "b6862bb11e7e72f7c2e71de9d8e5aa731f8a0df7", "c2d102743e265d1b7c6073b087d030425786deb9", "fbd781143a3f4c9d03c227cfbd1f528d658195ce", "1b1d31dcd365c48ca39b4eadcdabf1c70104e490", "ed7d4400d5e07efb7b7cc3624dbe5963b77fda33", "7cb85b17a35511b99bbb3975a333c714c1aad4b9", "6aeee62bd32ebc3c5349689f9e4283afe8d162b4", "ff70cfaf3e085a6c32bfa7ebedb98adfb7658210", "2b10b0f309546878ec418ae6e6f0a993fd7f3293", "9d1858a2ac2a963384dd6809dbdb2b4bdeffe040", "bce887343456e4344b8174b99cea641a97a7bfa6", "43f37a725dd58015bdca53937518042d81ca1078", "22344ddcae83e732ba0c2116d7ee9016aebb12be", "0edd3517579a110da989405309e4235e47dd8937", "f08e3f5fa4dc2a07e7024911fce7d01d87962752", "25b1a031a0559a0bc4079e9011bdf527e1a39d19", "c51fb195bd9fe3b7d001179a3a39bb8252304f1b", "5dd2f26a303272b2e1bfb7b6d994eed9e67d30c9", "72e14386d0ef1aa09c52e07086fc310c440db16f", "5c70bca2b3dd0a47b6259d384a709be55a60369e", "d1d9e6027288cdd64509ea62f88a3cbd9320c180", "0549dc0290fe988ede74c4e030ae485c13eaa54a", "03a83517298203605b502648ded886fee5a7436e", "6193ddcb2500e696ad572e6b483eab65c66f8c44", "92891d260e46adeff84ec5ea0817c0b6a70c253d", "c391478faa3a8903678a7bbc4ab17c8f9601e273", "6b359aefefe6b6c511c41afb873820462f5f42cc", "83a2385034b98af16f4a1a6cad2c7a481ec0a9cb", "e75601d37c2368ced9f5fc579608db7bceeae46a", "b03abe450365433450120b3eba460ee4e2cfc763", "6fe2efbcb860767f6bb271edbb48640adbd806c3", "e135f8118145b6a2e2a6a2088c04c26ca6d38642", "310a88a60ffa2d8a0fa7ef9fc77fa842d16eed57", "678f2f9b3597cb0304f5c0c937fa2f5b25daa6aa", "4914ba227a02f1e2aa95cf49b6afd2a8427d5546", "9e8f9786ea868f042f7d984cddbd9a6dc23969ee", "a88ca6e81e04b737ee6a416263d305db06de72c6", "0ea53c86d24b1b80389eefaf0a84fb9b2108b795", "4b8762d7637868b6ba0c97c95b2d4949d103ecdc", "b3adc7617dff08d7427142837a326b95d2e83969", "063146e2b400cad120d41371a024de319eb67c05", "81f52fef843df38fb22fdbdf26da18f9123712b3", "420fd3f64781cc21e188f8325a8df8911c286b8c", "5fbad7c39509a3edb4f8a946e2676562e88264bc", "2b4bed0fadee29a84a272d7c52adc4a70e1a2b52", "bc21bf4c733e117d2d969fd5605bba4251467243", "319aeaba5dfb4f7de44668bbedbbfdcb7ebc50fa", "989ca38616b52f23c2720ba5c6df2493dc025d0a", "2b86919eb8073d9b0e137b23cc9a14fab8bc601b", "a21685146f68de1f87e206c0a22dbc0188d55b2d", "a75095fcfa78972dd222810fb3e39d77ff6493aa", "f7a2424eb5af9613544a945772addcf2e19b5f92", "d4712c75a1a51ecbc74e362747926a16a2cd36ed", "d335c623f30a290f3291554ec500a8ded69d03db", "a71d0bf3b8fa6cf0069fe12f3fe6d695fac7dd44", "4aeb5520a941fb59f20093cbeaf4b84b35df78fc", "f3c4e3c50f1213cf74f30b1dae9717cfdf465a36", "64a39028f5d70ff2f7c3861847adb1f995319c2f", "32f37cbc7806c37e8b618d935800bdcd6e7108cc", "0183eff3a60f44bc6e4bcade37518f6470af3437", "c55b0bcd8081999f265468f87f281959bfc786f7", "71392858b6af5b50b1cd7c740560697101f60e46", "c01d65f262814e15daee73538a0a20d2a88e05e7", "8b653159e55dbac7130403cafcc9d3e86eb47ab9", "7ab233422c7505ad58e052acc74ce5c557a220b8", "1d58593a61f887e2ea367a5f59dade2493ae0b44", "d458c49a5e34263c95b3393386b5d76ba770e497", "bb491d3bd43d8fb018cb7f14ca4a17738225bafb", "299d260138692a0affe8ec0b8cc0ef4a6474e6a2", "c3980cbaf613cad1fbd0ab6da472c789cda583a9", "fb19c7cec103193ea4f4265a2d9534a20893b2a8", "f563ef1a0fd024edb91a889b17b64aca84624be6", "d399a5dc23866e4590d7a76174154a582b93a18d", "15e0b9ba3389a7394c6a1d267b6e06f8758ab82b", "6f9824c5cb5ac08760b08e374031cbdabc953bae", "e953eff0c214438f7f32f3b17994ef7d4ecc1e4f", "63f38f60022ab78aa5e47bd84070547409ab3cc8", "77f064553b780471a2812ed2cb667d7332433bdb", "a4b9161ef85519206b7b2b9b0e1598b2beb4e584", "85c2de95080c1e8d955ac57f64a6b51ac186af32", "960870523484a7f66cf8afbe833afd7d343b68f5", "5b9693d2f6b7b731f9abdbfa5c35d641b881daff", "42efcb8cac3889ac25368770058e000249f68d13", "6862945b8abc805eec2dcff0eb2445f7c8ea9a80", "7634fd094c435e2084abe3f5553bd814c9d0ec40", "e7cf3e01b9d74531cbf9f7158e099829e233e261", "284e96db3ab52fa2e2d16cd611ec6d2639de5d6e", "46e866f58419ff4259c65e8256c1d4f14927b2c6", "5ccb73fa509b4c56c765cf5ef850060ca8686bfa", "b28e142376a2dd639f58935f2f63a9dc7651131e", "ea672c7b77228ca570aab7888b96de7952b2b21b", "4263630a35c5ee34ccf9dbd81c0541d92d0c7d5b", "5a62f0b5d5afaec50318a6d9063920a6aca6e3f2", "551a11f9db279cb1485b1598adb8552d345c265e", "bcf73131c2be397fa2105ac45df3ce1a55c07c2f", "8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483", "1e9461b2e48e11638b85c2f2dc7bca043f9d60a8", "76cb86ab21796d81790b1c98c10e4090ea187c7a", "5194cbd51f9769ab25260446b4fa17204752e799", "54171243dfae9d7343c78026c9b94004df3853bb", "dc60ce6eb9ace1dd9ec9daa26a0bb456532ce0b3", "940ab36a8b2cdf6cb6a08093bd382ad375717942", "9ad0e5b0342a3878b18ec245939725a5a904a52f", "6a203565275610eac73461438f4cff1a35d5075c", "a3d8887625040d3c07f779ac5353452fd48058e4", "af9d41c598fc5ae57b20948cf664273da4664931", "caed930d54a3f24b86163dd31eee9890ec5b5c5f", "655b1f83ef218ee6a030b5541d2865bc6599e6d9", "9bd3b15944ac28aa7021db126e4344a478bdabf4", "79a166bf02402f7ed9947b826afc72f24f65b311", "538f735450463f40c78f60797899fcee47df72bc", "e86e2106dbedbb6d8b1195b77540971b9d58a198", "6e0121e3e8caa27d6283e75fd13a323aa37d0df4", "27784c7d525b338f236de37a824a010d636102cf", "ec949cb716fb33cb9273fc90f36b0351056ef0e0", "9ce97efc1d520dadaa0d114192ca789f23442727", "1ac4e83971cb2d0114eb3101651239c27d1b5b2b", "b5c4056a8b07a050f70660720cf606a2e00dd7d7", "af9685d885b88aaee4ce9c985669032a9ac9029f", "20828d021b5987bebc3ce495e44eb9e48eb9be3e", "29954bf080407f23c8ac140202bd2ae5a48fdede", "a3f67dbb0d72b236ff7c11b9d3611478d04b902e", "38b61bb76131659916c50f4f7926f4d225076f1d", "39d091bd1908229ba0b2490f762b23a8c7050693", "902794cdcc6d908a2955f4c7361a881fa76afc98", "5ad88a16e2efe9bb67c20cdbd9b003ffb79c12ef", "593dc59fc8b89d5040be7fa5239755ab47b0e01d", "5d76a25936366c6619d2b5e6b74106cdb53a4978", "238015b2bb185f7154d9461066b7a54c96064565", "1253223b9efba52a9d0ab2efac79d2c0d010e7a3", "56bc524d7cc1ff2fad8f27c0414cac437fc2b4f0", "45f85371e3480b2631bbd03a5c2f57e0b00f00a2", "f77b3e6b6eb4bc6d6bfeed290a1bc533bb97968a", "73599349402bf8f0d97f51862d11d128cdba44ef", "22dc91889312e796ad36b363bc5ed959714e4694", "04b194d6358957e5a48b3e33a0738de59cf7cccf", "986a1f8d07e2ae3df98fe91f6fa5303c808df763", "4cca640761c980c77a696a64ad3c1e95b82109be", "7a88d33b3e23a2cdf1e8a2b848c73a12a34ba88c", "9838ba7a31a096503def7b69bf48e5d327f95caa", "690a1b410cda35bdfb5bbc8d89af1b7a97c703d9", "1a1955920ee36d58265fe17100ca451d899e8372", "6293d33e176ba7ccd59e94f8a137876c1d581e1f", "f1cec5f837638efd8fd592cf5493f33ed1fb6995", "70920447b8300fd65745c0a884523e4d52d000ef", "9e889034999221e37da9f72bbb60383712fe7806", "7385c53466fe95d847d21a04a805f532ab3e0101", "444a3d811380318d6c01d952e751e0aa55588595", "299ca90452aa8a7dd517de3ff3c9bf224d5100c7", "747fdee12e633addeae3b74c12643cbac2c925ec", "1df8172763d403244e77be8ad0461d3f989a9370", "c74562aba74472794736728fc5ba95891e624f4a", "dc63e89e014beabe084c1dc72838c473d8c7ccfe", "10d68ad6411aecf321c1a590a481901623495181", "1212af29cc596e8d058c1dc450b2040c51be6d6c", "3db739406a5cc3326a35ce59cb261a41a0ca8f30", "db412dcee9aba0615d7972fdee55655061ed0178", "003846e4559fa32699f08ecd09de13ed5a4e92d2", "75d8f2da0e9d80eef141c765254d7752445afb53", "84af83ff6412a756df58b6436f0d2e3c049e1f12", "83b700f0777a408eb36eef4b1660beb3f6dc1982", "4cace31e5bb5ff11318b66ff1050e84ab5f334cf", "8b2c9743de156189bf927a158f21e08412ff0de0", "ceb4040acf7f27b4ca55da61651a14e3a1ef26a8", "c626a9d75dfd73e26cf30793d5ef71527cd9fa95", "9fa59e7bf64875da23547ebb49d34fba5233c61b", "b3a82f7df6d19898da0d0a01285b8331e099cea4", "0adb5923fb1955f7ca0a85454afe17e5d25425df", "5c733b5994b8d457f8496dcf8f6864a95e53b9a0", "83963d1454e66d9cc82e28ff4efc562f5fe6b7d3", "cd4941cbef1e27d7afdc41b48c1aff5338aacf06", "b3ba7ab6de023a0d58c741d6abfa3eae67227caf", "9b7878eb0681d107a3892c2a166beeb6c0e2d36f", "58628e64e61bd2776a2a7258012eabe3c79ca90c", "55ea0c775b25d9d04b5886e322db852e86a556cd", "1b3587363d37dd197b6adbcfa79d49b5486f27d8", "a15f4e3adb56dbbdd6f922489efef48fc5efa003", "23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f", "3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4", "747d5fe667519acea1bee3df5cf94d9d6f874f20", "e480f8c00dfe217653c2569d0eec6e2ffa836d59", "ee463f1f72a7e007bae274d2d42cd2e5d817e751", "bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb", "c4934d9f9c41dbc46f4173aad2775432fe02e0e6", "daefac0610fdeff415c2a3f49b47968d84692e87", "b2c60061ad32e28eb1e20aff42e062c9160786be", "66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5", "c8fc65c83473c633e2bf1c13031ccd10617cc8a2", "2d8d089d368f2982748fde93a959cf5944873673", "1a1118cd4339553ad0544a0a131512aee50cf7de", "efa08283656714911acff2d5022f26904e451113", "638e04272c312d64337b14f001529084f2c40bef", "936227f7483938097cc1cdd3032016df54dbd5b6", "69a9cf9bc8e585782824666fa3fb5ce5cf07cef2", "e0b5815b0d3d6c02a114ee27dc6ea2d2c40a4458", "71f4a824767eed13fb02fda38b1f615710f8128f", "c112f761bf0f7135f82331b22c330130a7f71cfc", "7dd6feca2c444323ec8e7017ddd8d1253b29c6a1", "ad5079e4446d7c4bbf1895d8af56f6d299eb1f0c", "26a6b2051fe7970f94584e9efbfcf7bdcfd1d6d6", "660cb4c5ba9a6bc0d1bcf6b0dff66c47512cb2b5", "c8f428eabaee6423270882a5d79505637ec9724b", "336cc68bc266fb4172eb7367ce1ad7b69fcbb763", "022edc074693c52d4e689947bd2def8b2117fa8b", "f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c", "dd1a85e4c2ab2ed241af0e8d6e588498597abc8d", "178a82e3a0541fa75c6a11350be5bded133a59fd", "a7152589980ec27375023d719eec6acc04b7d4fd", "70eb48e06d9d5edf84246b772673b6d44af4b3c6", "182268d943926a048ea3f128ae8a3f3679d3dce2", "c92be69c7b174c1fe6eeae6d7e6f6535420f48ac", "0a5d5f359614a5cb9f42f5b9e2ee6409975703e2", "15d1582c8b65dbab5ca027467718a2c286ddce7a", "5cfa8d0384bcdf5dfd7501561c748e69f3a2a747", "39fc0fe46ddf43f13073cbab077d981547889dc1", "e52f57a7de675d14aed28e5d0f2f3c5a01715337", "58ca00c22a7a365f71ddbd8bd56c33ceecc325be", "1afef6b389bd727c566cd6fbcd99adefe4c0cf32", "0277d5e54daf955fe670505bb1c34ad67396448d", "4e4311a5fd99b17bed31b7006a572d29a58cdcf3", "7fbd2cf032ca4f390f1699ec6743f55b60e2bb4a", "23b3b07cb484bd3aaeaa3728f8977c44f50443f6", "53282ff3b130e9503c5d9e8ae77a2d63ac68e07c", "53c3f71f5ea3f93c33e5d68813f24135cb5ac6af", "5b14d9264ea1020f05d4e2fc6144e5021986d917", "0e05b365af662bc6744106a7cdf5e77c9900e967", "92facdae51b96e256649ff3dd112ec5cd3feb466", "cc91001f9d299ad70deb6453d55b2c0b967f8c0d", "7060bf98867cffc08e8aad59f572d3939f19a774", "65c2464aeec7b6ea84a05529576d421e19944e15", "fdceab2fd385f1d546f2a19b8186b325d5499477", "14d78b7e0d0813da722e8447ce4ad57a3435b6d2", "57f3a57fd5569f15dfbb2cbc69e7f6af1e88b1da", "eb98feac659ff5c7e27bc8eb4f425bb158e3fc5e", "635ae8939739bbaeb4d35afd1c1491bcb3f82737", "2c786b32a621a52fc7d00499e4b056f149a4fba7", "3f8537c2141ba19a03876c7bb5c1e71a01b56838", "899924764816c2c68d15d964a24f4fddba8d3b29", "c026c4eeb0d29d2842eb0b34754715e497cb1af3", "0f24db0c7a8db28595c9419354ec1407c2b7d89b", "ddfc978b3a149015c9e517fbfe1b8563df5e7356", "b74aacefedefa3f4338e1c5999dbe756ea2ff837", "485e0d178bafa959ac956aa8de6556a2439c6663", "5926e1e4ae5dace2c723a9c539c4a17433bda8c6", "13a994d489c15d440c1238fc1ac37dad06dd928c", "6a4508df37c95e4c9e6d26ad70a2e718134b8425", "0831a511435fd7d21e0cceddb4a532c35700a622", "f9b2d4a38d647f2819951f8876ec5d067696e5bc", "2ad448227b759de8a603fa57d3a038a9bee12785", "116f0b1f54365eccffbb3310146a43c6935cdb04", "47e4673730082456836e127f6833b20cf102f260", "c2fffbc7c82ebfde82e6dcb36231a3969e91ee1b", "da53d733ac5c59397d925ff40c24932791b98ca1", "c553e4a2aaafca0736064c07944eace986b4d6a9", "a7eee3222623778294461102d0dc770d4e09a7c5", "dedabf9afe2ae4a1ace1279150e5f1d495e565da", "5d197c8cd34473eb6cde6b65ced1be82a3a1ed14", "88f8519f442826f9b7b2649c1cfcbc5c82160428", "907311b9631d9dbc9550fe2425569735f495cc8e", "37b7f55bdbf0a9a61e137199e521c29873f30ced", "a094e52771baabe4ab37ef7853f9a4f534227457", "aea36332b83ac569dabc88618eb6c4ec8a7e68c9", "901bd72b9caaf78e096c3d3e015fbbfa5e90c8c1", "a12bc1b9bcd81cc0a8f7209f0538c5f356e5f4d3", "7b7df29da8ccf4a74908f70845c7b1e523a174c2", "ec0177cfdee435c6522ca4ee8a5f97ac0412472e", "dfdc683a113c6543de36c5bec9325bbf4a2ad25c", "1048c753e9488daa2441c50577fe5fdba5aa5d7c", "e97ab76f97a09f585bc21d1fcda4ee78b77d0cb9", "9729ff547b6882b49898c1f5abb69646edf77e71", "d035c8bef0e0619fd68ab2cc1c9e71dfd730aefe", "e3c741a093847839d37817c16506cd7137a5579b", "2aad85de05e8b9137558926678c94442371d37ec", "23c7465c16ea9343f74a400f92b970e84878b65a", "b372432ccd4c9cf169b1eee2adadae074eb3a3fd", "37992120053b50b2f92eaa1949273bf828a54b50", "2c06c14b68f6621b4d1b9e1cf8fe81069a8d42ce", "4996867811a2b66ff662ac8fc9eec119cf7f0ae7", "9e10ea753b9767aa2f91dafe8545cd6f44befd7f", "e43cc682453cf3874785584fca813665878adaa7", "f74dc0b52d46fd575cbb8bdf753c821a752525e0", "66080593dc4ea2347d4ff8c10e4b4dedf0d16ad2", "c570d1247e337f91e555c3be0e8c8a5aba539d9f", "c28e9523565104d233351665f69c63e6cf0d1424", "0d595eda148555402d59750cb08f79bd52a859d5", "1d6068631a379adbcff5860ca2311b790df3a70f", "81f1d80dc9775995d9c080043ce128fad0298f48", "326df1b94624b7958cff0f7e3d16e612ea9d7e4d", "5791a0c98d63fb8378dfaa0f36a0a229c04df6a0", "318c96a87a7f2e573a3c21f4b2076399556a8e0d", "c92e36689ef561df726a7ae861d9c166c3934908", "0a2d2b79ba39e2140c93543b8ce873f106c08e3d", "46a29a5026142c91e5655454aa2c2f122561db7f", "e4b1048e37c7f862392864dc980cdd35202f197f", "2fc15f80080b4317cad60ad645300b49afddb19e", "7ee610cd384cf1950d6254562e00490ad05eec57", "0a0028d18578c3ee0c01edd4157d7c1809db30cb", "17196a4876f65123d83289e6e52d73f1f92ceffd", "06e9149b7ef8bff3a4b5a18fe01da9a522f91891", "c6b73d64390db1aa492b904ca577913f217950a4", "bbf49e0dc67663b2d116eebdae93abb0f276ac8a", "c4b937a68c068e9d70548b7dc2cf5527aff16924", "0b37f9fc4fee278375c44d03d23bbea5d026dd2f", "b19bebb229d50451602e752435582d340294a896", "f7059afb640bf21787542f2163eb7a5c7fd0b687", "08e56681533a23c79309718f76cc0081a8baefe4", "3f4b8fe5edfac918c1c74317242b2d91346d5fb6", "6a3ec8a8904436020fac8ce7873c0f47abfac17c", "9446d1f95d5621ad1e5f3e4d4e9379f8f6ded21f", "25d474ff23515eeccbc071897c144957edfbd7a5", "002d1619748a99aa683b5c30b7eafebdfe6adfc4", "35842f3b8f11918588687333920b2cf64599d76b", "afbaf5e3ccaef3e53ec85e42b97392a0e95d3991", "e55f7250f3b8ee722814f8809620a851c31e5b0e", "df570f400e2dc597a8aec9b31786f682b822e21d", "8a287a95b93b2f42279c321a828a6fc362a3db21", "50832083df91f83333364544b24f8af4798eca32", "052cec9fdbfe12ccd02688f3b7f538c0d73555b3", "233ad668dba2bda721348254d41265ceb24ba2f1", "892db59add66fc581ae1a7338ff8bd6b7aa0f2b4", "c03e01717b2d93f04cce9b5fd2dcfd1143bcc180", "9ae25adfda5e9788e740886080856d12a9f0d95f", "07b682ecd645712fd1d1d1ce31c02ad548e3b05e", "36bb93c4f381adca267191811abb8cc7812363f9", "4c797506d610525591288f813621b271ce879452", "5d14cc415a93e6f3a625ed7794e1fdcf99ea5713", "4fb9f05dc03eb4983d8f9a815745bb47970f1b93", "46e5d8f4cd7a49117876258fe70ca902ebba801b", "032eda9419ba8cc57b093d4d025555f664349f85", "29230bbb447b39b7fc3de7cb34b313cc3afe0504", "ff249ee2d69f14fc130fe8433732e9733ef066c6", "09f5033e1e91dae1f7f31cba2b65bbff1d5f8ca3", "d56b65d0f65afdfdc217c880e9c8fdcafb23bfbe", "25742cfeedc8c6c4016b0d06f10893d469f62896", "02fd953af0391d395bb7495d4450bbaaeb1ed70d", "1a5a79b4937b89420049bc279a7b7f765d143881", "3dac3d47ed220f010549d78819b27035d1ec6844", "1b4add23ab3e3ce40d8ec48120bd710d8b53d2c2", "bbdd99189aee252adcb0ae80d094e2858c645a78", "da2d7ca77376b90a79287a517f596af628c8f488", "3991c704ef1030c5bfead2b58463d39842b52985", "c4f4122d16e1fdb77cb94152d0d1222b69ddc32b", "8a3eaaef13bdaee26142fd2784de07e1d24926ca", "1b1eb9b758676f9b249ef17bb473cfd38c11e62d", "3527651a701bb7b16577e40d928825b3e3823f8c", "f5c5f5fb2bfd11b65265a7a088b50185bdc7bccd", "fff32fd598e41ec6dd6903082d77f43f16908cfd", "5388638c7801b11958d937c89ece764bc769e298", "c8a22550297a25dadd283089f009015bc0df5eed", "5ab8d83870a6fa71f787f3fbfdd03786801a3496", "4a12d2c80ae7a4622bf500400ad6fcff83dfb5de", "66cf52b29ef32d5a13c646eceaca98cb08fee6ba", "6f8ea33c29de7ef94f674c4c847185a127c6ea2f", "0580bfaf154e5e2286a115b421b17ea1ccb9a7f7", "b0b9fca75b56a9f0010805c7055e8e677e7b82f3", "d7d777071fb256706c90193ba4a8046d5fea15bb", "b5d7b34821c09e158db7ae61b8aa628af357c9e0", "f2d2e07bc55792531291a8b91dccf103148371b9", "482e8a9323fca1e27fccf03d2a58a36873d0ae10", "d4881df7dafb7beed668cf6360066de4960c89b3", "2193cb7a612e25fd4fe773312ec03d6c8a2dfa03", "ebc0fe539f8562bc10181e15f42c4e21816a922c", "e38709a2ec162a6f2a2fa3b4b6463e752267b154", "18eb8e376ea07f8ff68f7eaee16e34fcb0c4b23f", "993cf62bb73e18550cea82e9774ab59e43f5449e", "dc452f3e531c4057c930f0538d5652ad9034d1aa", "9d4c05c7c9284c8e303641b95e997f11df2dd1a7", "4f10b81f822091ce2142e33f0578940da1e25ad3", "189619de93e83cdc26e275bc7652463328ab3f5c", "6fa3857faba887ed048a9e355b3b8642c6aab1d8", "3ffd6531263bc1d0b7acd4a234a578c285f77917", "50f7d3faeeaca41748df4b8fd1187712add72bb4", "fc9e48e6688792a3edfe373da51e2b4e7182c6f2", "ee46e391288dd3bc3e71cb47715a83dacb9d2907", "fba88d9030853870d83949bb1baa8f47f1c4c2c2", "6075c07ecb29d551ffa474c3eca45f2da5fd5007", "e905d36e04aeb356c07182b840ebcabb417e7c9b", "4cbd8adfdc4d622d9a7dd3676869042f16d073db", "c3be8f94eefd5bf984e9bc2e8918630de1729238", "c397408e784004240e866d0f31cea7b9e44fdd0c", "d0dcef424ab6b32d00bdc66e8d4a61ebe911fff8", "a5f10a8c7a64c3339f7cbad9b518ba29f5f74b7b", "db9bdf10452e99a6dcbc5c01e8c934f85d61b101", "0b4c4ea4a133b9eab46b217e22bda4d9d13559e6", "dc964b9c7242a985eb255b2410a9c45981c2f4d0", "129a6daa54a7334930b6413875b6154acef3922a", "39c09940e03fe5c0e6c9da28253cddfeb25a4aca", "d3af3935eac968372b42e5bd6cf32a95420b0ac1", "a03f51c5a56401986e451e9d50ace2bc4686e12c", "964e43f4983a42ef3790c265bdce42c1fce56d79", "fc1f1374e45883aac33092401bed89dc0566de7e", "c593c6080c75133191a27381a58cd07c97aa935b", "5484ad04ac0a256b51fd1a3eae48483480862ab1", "7a76c45fdaaa2756233d00b4b1f2e3a580df9870", "bec31269632c17206deb90cd74367d1e6586f75f", "7acc05ae92823c12b28d6ad73cb2a7707ccb6c7b", "5955e31c413a4a08d149de8af843355ac45525bc", "e3a3a6c1f4802ea1cd0c34d0b34e4c83689895ac", "06c7b96d46cc8d78f32a11f649707d22c585ba95", "79b669abf65c2ca323098cf3f19fa7bdd837ff31", "2d690c63b00e68782666ebf86ac0756fad100a18", "09ad57265a1f0dfd60b3360daa499f5e93059c22", "2b2e6e073fe0876fdf96a336cbc14de0217ce070", "278e1441a77fbeebb22c45932d76c557e5663197", "03c970e744cdfaa170fe8f5a188548c68f2bd4f0", "b0502dcc6df378ee3ddeefeeb1cc51a20e04f39b", "32ac4e5a8dee203c0b99e15484893fd9d62de43a", "4b450ead769a48d22c57741c0dc0631df759bfc6", "d5194642b3c68920e9fbf9839f5704db5a39ae9e", "494f4eb035faac6ac647203d36d0319ed44db244", "57b55a7a1adc8ec06285ebaf93995d67cf80c719", "35067d880e9cad43811c6669a6b39e337cc4acc4", "4ecd459aa4b4590bdc552e07b6d0bbe132fb1fcf", "629722342f719ee413e9bb07072a2fc2b4f09a26", "6502cf30c088c6c7c4b2a05b7777b032c9dde7cd", "b527c241e2fbd88b8ba1617f3b04e21b9c8ea832", "f39ab00b78607ac3685ef284cb3c78d0ceb2686c", "b8711c88f5e7064e4626dea33e5dd1faedf78ef3", "7e8edc45fa80cb0f7bc2c20e8eb893dcadde2c8c", "7c0d3882c02c2adac5f587c18b4dd1179c41753c", "18855be5e7a60269c0652e9567484ce5b9617caa", "589b30ebdb76659ce5d3a19cd9fa0e7a3466d85d", "02b9e1e24a3d392186673b2f7ebfd2602534e52a", "ac61f5e442e653e2503aea85425f0b9dba9f768a", "025ef3fde2ee852ffabfa726eb6ea95a7d8de802", "013b72b0941eec78c6a23bb8e94b9447793b7833", "1a00927d3719a0b6c2699f0ad1e1f4cb8402d4ea", "08bf83aeb7ec36815afd1183d8268ab4e10a2961", "2d9e88db5887b7ee3501bd745f671d3acfc268ba", "4c1e47ba68b81d210718f837b197253164decaf0", "175ecbc1e6204601bc4afb5b5f058244f183b6ff", "c94ae3d1c029a70cabdab906fe1460d84fd42acd", "b8053da77bf1a5b4c87fddf6140be0a612cfc164", "75e8c765a51ade8c9a19ac8b7bae6eedc0542d66", "0b5899af61f410cc76bfc81e7cbce4499bda2570", "42a6cbc729904392808195d8d4c91636496c92a7", "f84a8a7d9bb4a07e5f07cceaf9c80163315b12fe", "7d0cb85f9f63afc23ce42b92337b12ef91fc091e", "e0969c22be0c93a2894ab2451eb5331e190fe98e", "8ad4742e656c409e5a813c1a6d5f21fd2e3a9225", "fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f", "de86a9f484addcfee57a6f5a9224aa77bd23345b", "db157b6a9733fc50086bd8bd197c7d995f5d13ce", "ce5ee0c014b5559c8f294c34076de7e978f84844", "2a3768ac4f6b3bfbcce4001c0c2fd35cfcc7679d", "4963aeb5e5e71671ed7403a576eef82d2d95a267", "fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46", "3bbdfa097a4c39012cb322b23051e360c2f7f023", "578e755e669caee147964f9412c23943cd0f0789", "68cf263a17862e4dd3547f7ecc863b2dc53320d8", "eb24c37a43cd3e4a957c47898b7bbfe7c8695e3e", "d97288e3cb6ac11a7445e5a896e89866cc47718e", "765094fa3cd745bed29c20dad92dbed8c4cfebea", "8229f2735a0db0ad41f4d7252129311f06959907", "d0de92865a53576af3dd118f4d1fa73be12aee9b", "510257c8cedc079aae88ccbf3dd10a84051c8fdc", "e042c4d038373a68cca109336598c0323e7a9b60", "aa2b82cec652b8f9808a98ac708058a481822210", "6665aea27cbe927ba67c1a3b512362aa07a544d0", "a06ef8ef4838c048b814563f7cca479c7d4513f2", "36cf36021540ad81d86d6148b133751d45caeed5", "f7ffc2dc6801b0feee7d863f02ae2ca34c3e6a66", "af6a0bab11a95dfe99b835c724e7901b9bb8aaf6", "56f759838cb154f540be809d5fad6540457facc8", "aa6b683c891c15389b90eced73c56f34065ee3be", "68ae4db6acf5361486f153ee0c0d540e0823682a", "5eb2ae4f7c07015b99dba5ab6ce9d9386d493eaa", "05a6fdc344fa9255316e0998e0f831d7e892a25d", "59423a5286d25134a916dc9439ebebe28893aaec", "279acfde0286bb76dd7717abebc3c8acf12d2c5f", "1319275904ef21118a83f04c5ba73787314ec784", "4ef4553de88191fd46ec8000589c3fc1988f06b5", "afeed4b79eb88138fa342b33d6ab33e26945e19c", "06eafdc0b281edb8ab4d65012da5d0c94b55970b", "362ba8317aba71c78dafca023be60fb71320381d", "c34030a215f0731ead15b358d947f03c33e828bb", "a7664247a37a89c74d0e1a1606a99119cffc41d4", "16f0195a60ed55b5e1a955c420f94a99e5815258", "dda0f68fe949dc594ac3c42438d780adfe0d742e", "b5f773d41be527ad5af5ab26e5986c88543d73b9", "0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3", "c7657e8c546ecf2987fea4d27df17992a4822aed", "7bdcd85efd1e3ce14b7934ff642b76f017419751", "a0e3775fd5d5df951ac7f65d3a9165bf4b96fbd8", "3949967b873dca8c8adf0761777e2702415c67d4", "355a0e14992896f88d4043183f7d5787df8e1ce7", "2809ead5d1c8f436f6c761f76d1fc2dfe1977035", "2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78", "14373c9fd08dee8f7195a88430121c69bbebbe1b", "af1fa9d29512fc8f4c07efdf75d3f640567a5262", "27e97b67a8401def58eb41b4b00d3dfb0e4ad1a8", "0a5ffc55b584da7918c2650f9d8602675d256023", "1b224ad99c42e696b6d98c05a87f1738e28c6c5e", "87a63c4f8ad387b6555b37b99e003e4054cadfb3", "7f44f2d7b4a84b6d87dd6f7089ce3ee1e6359272", "68f8997b5c6c59028eac099621cbbd583549d35c", "51173e0f31f362f3ea59ae3e98c5cdf31b2a2ec5", "77816b9567d5fed1f6085f33e1ddbcc73af2010e", "54f96a4b675eff2d75d1fcb517fc4a9cd64f6966", "36e4578e29adacc5b44edd3bf9f2a77561b0f2e0", "c8c0e8523481996a77423eac861dbcec7d5a7716", "87cfd0aa3baa43efdf40fe36452516b89c05bb5e", "39316490f69f9469d0142ba42b3deb5d29d70ec5", "34d14e8752f7e368b78b4e72beb851087cc336b1", "336b2f1e477a1fea7651d5c1b6eab7fd17a039aa", "40c357a9479bc17c170260091aa968f6f6fbaa78", "5b5962bdb75c72848c1fb4b34c113ff6101b5a87", "bc6a0a107068b5a1715510e815c0103eaf80672a", "1d58d83ee4f57351b6f3624ac7e727c944c0eb8d", "28e1c113b1b57e0731c189d28e404cea3bddf260", "d00c335fbb542bc628642c1db36791eae24e02b7", "aa925691f99a48355bf1b6b2c22a0260af35605d", "a3f68fe7c296f6fa6ad508d1cf19d0f01f50e63f", "9492e93d4ceba8b4d95efbe38f844efd34a95222", "e781f2b75fa9b2bb0f41dfa8dd248bcf874e2080", "47f2088afb616bde5468818e23d79e1ae5a562cd", "f0ca04fe6de04a46f44dabd8744b4163e8e0b4d3", "6eee5d9c5343b7953028a49d202caa683ed9b448", "b47386e10125462d60d66f8d6d239a69c5966853", "26c8ed504f852eda4a2e63dbbbc3480e57f43c70", "d0c916d9045dd3d028774cf06255bbd041c72af0", "ea908e1ccadfc1b8c2a93edc060112e0a498d354", "c6f9d500f55201ac56812829fc2d258e4d662a1a", "ea72e537ac714d015c802280345957b40a262da4", "e9ccd438d6d55ba0d11a63eb95c773d63b3ea4e5", "721b890875660e87e7e3d9dd6917709b5fc5e34d", "dcb6f06631021811091ce691592b12a237c12907", "9c50b2562518ef985fe547f79857a3952e723f7d", "f515aea60852c9c302fcd8645106966eeb11469f", "01c948d2b73abe8be1ac128a6439c1081ebca95a", "e9368e9fbdd60813f2763490cd706c82c28a8a02", "4da00571d3bdca847033a27f0a34000b1b3945e3", "f74b62933362cce595ac247fc6f54ede68697d75", "baa4b2d31c1ce7b1c8ccf5cbd3e12ae3e89e1d7e", "9a2af8c3be3ff4677efaedc3141846c64c85dddb", "ba9e967208976f24a09730af94086e7ae0417067", "8c5852530abaefcdce805d1e339677351c6ec7fe", "7c5c4ed47bccd0016d53b7bbc27a41dd74bebf1e", "4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3", "2854f8c3dc28654067e721246b611eee872c0a57", "5782d17ad87262739d69dcbe76cadfa881179a91", "38192f06ac19172299ab543483d2e0eca2f889c0", "8bf4f45460536852c5f4189de009d0d1bff3ccde", "07d249512522ae946089460c086b98205bcd17f3", "18010284894ed0edcca74e5bf768ee2e15ef7841", "200ab001770a39d5465c661d0078f4d9410f343c", "ec25f39fa6b4ef4529981a1ae051086e93642d27", "44482010dbd63fba4f7457cbdb7cf61e44c78617", "0b9c5bfb4d8349bb3f6ddd6fb612b7f9657c93f8", "59deea5077fb537f8d7258c740c4b9a5cf1f4b8e", "828cfe547a2c9719dea68698dfa168b0bdd22aed", "5b70beeadc31ac8421bd9fe54fbe696b90eba1cf", "935758cbc31ef82bd248b6d64be4e0050f191036", "f7a37cf724aef23d0e714a35d54352243e5b52ee", "35c0220ab8a8281129a00ac32ef2f488fb562eb7", "a10f734e30d8dcb8506c9ea5b1074e6c668904e2", "5c9cfeb77f7e5040a4ca3775e524247a0bcb73db", "ec7d418ddf95d231b2afc70ed8c94d0764abec61", "3af2b34ada1355e8d96e0014eb89aea49c301293", "514fdf2152dda3a39fc05eb6e1c80314837d96a2", "06046fcc31cd0b8062f3ea2d568e561dc258921b", "766039c203f76009c5efabe7b24914cc66fe117f", "9612fd66fcd3902bc267a62c146398eb8d30830e", "79d13b74952449667c769be76dac9065db1acc22", "87747b6a1bff0944fc3e4891de9c3ba8868aef66", "2ad2af8e3bdeb0302de07defc3fec9b387414a27", "a764a07699f59ef5277730c20af881585371be4f", "db35faccd8d9fcf25c363b4781cb50dbd76649b4", "f6ba16aee3c40b69dc88c947ae59811104b1bd49", "9b678aa28facf4f90081d41c2c484c6addddb86d", "29c1f733a80c1e07acfdd228b7bcfb136c1dff98", "53ee5798cc8224db919a957594ebd72fdac60e2f", "20e5b077f05784a1d3aac5b281934069475cccfb", "258dda85eadcd2081d1e0131826aceac7f1e2415", "943262361be04747aba71d45fb4854cf72019851", "197f945b66995e4d006497808586f828f8a88a86", "50fbd28cde388656afbd48812830d6bc7e71be2f", "0608313884bea3c286d6cf95ccf9bbff4c77c9f5", "37a892395061206b58127f04fee0e4d2db33803a", "eda8796530fd9ba23b39d50cf349fee01ccee144", "5f6ac935f13aa4913abcca424e226b6903d50da6", "2d34187db078c55bbd93e896d8dbfa860c87bae4", "9fe55487c40983b1da71c073104cdc2d6f5cc7bf", "76615d7bc69ef0e50338a8c3e59c75d361ef0db4", "221debbd7878ed303eaa4666f8df04a48e4c5070", "366595171c9f4696ec5eef7c3686114fd3f116ad", "273dc39c3e7a18aac3cbd5f2db93969e9cc7613f", "afd9802722bde66eda6c04ff608fffaa4e210435", "37347e4c1b35196761fc1620e451738f880f0392", "3eb298bfcc33f6e40bfd2e8788b13b256d2c0391", "ad30152944a42975f16a53cf0e0666e9937e9d73", "faa750becfb1fb65511130d73b14cd692da07444", "2a6c7d5aa087233ff8a09bdaa34d5f76f3330a4f", "0e62b741d4421b284cc6a27cea0b1e95b799882e", "53df7d12472ee0c466a2bb59c4a17274858345de", "8dc697e79cd23c2eda6a5d483f2e5aaa4eec4426", "04aca8c96971acce8ac4303bf514e83c87e692ce", "651616afc04ccf20ca2c015b2f78be98ea5ee24f", "308f56c37c930c0b0e72b40c84a93b8486e40ea3", "e1d726d812554f2b2b92cac3a4d2bec678969368", "f32db58cbb8319eb8f2cfa2720c810f8410eb569", "7c9fc1a61359a07cba839945360b1fb617a5af0e", "1f1f74e3a9b9d4c73fae912e22eea492b9a96ed0", "071680ca97de050a372ea79f2b99f102bb3ca6ef", "026fd26cf0fc1d1bd80d7e265058c21c15e60f79", "59420fd595ae745ad62c26ae55a754b97170b01f", "fafe1b7a5084e7294fa29a69abf4c0440bd523fe", "de6aacc6fc9a706e7a821651951a94634dc3b4f9", "14819d286c9b46c8e57c7e809db879f9e1451226", "550edcdc27aff4e7ea8807356a265a0031434a49", "05b603d7c6004de3c028e40b4434804f752290b9", "71cbe1b52e2fdb8fa8a8278eb590f8065d3e7fcb", "360d606cfe31daa9955fc098afa2ab4230127d7e", "2fa4f66a7c3846a189ea1f962592d7c20d9683b1", "dcea75410fefbe70a4736fabbf178a951b6743ed", "a754989741afb89e588b52de375054dffbeda39d", "16e2e9e4741795c004d15e95532b07943d3a3242", "4e613c9342d6e90f7af5fd3f246c6d82a33fe98d", "1b2297ba37fece76568c8b53369e6fd34d63175a", "c0b372d1a6515ba6ddd2f1661bcd0c3bce3b5fa5", "e4501da190012623d5048d57b7e650de27643b8d", "0eb668a7542057bdda8296f856a91352dcea28e7", "656e7c7739e3f334d4f275c71499485501aabc44", "a1b4c98907c0c08671e69ebaaca6315000d5099c", "723f879b93097c22ffa4fe6b587d3a070a67136b", "9f61c20ab97351ee7de898f04af5d8e278564952", "1b55a0ad1d4738a7d46ed787542991d4a05ae27e", "81542ef1b2e441e3b39f71ca12d49cee715db21e", "2fbe4ffef775bb9c2cea535a07ecd48ef30adcaf", "50953b9a15aca6ef3351e613e7215abdcae1435e", "ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d", "a9a75b2ff6c68aeb2837a4d9ab3a6e8e8275c81b", "0e8defaafbbde9031fb2942eccaf980b2f20f04e", "5ac02cd4572b93e5245d7876e446d70df4e01bb5", "f7a0ba33e13de1095258bf58390bcaeb60516877", "91f820e2cb6fb5a8adc83e6065cbdf071aca84bd", "3445e917f0712be391591442bfa1bca82b7ebd1a", "ea086c5b91972d14aa63328237e1c94ee19b8c5e", "2cc3bd2ee340ee096726c9c4c9e7d20031723c6d", "6c62330cbd60f2cb6cb80b920104d0df3116cb3f", "c52aa6b9c7b89782f2316ce8ef2156fa06a3696d", "4bcbb221693f45d12f6ea623d6ca068f0bf7ba06", "1f8d539885f78e1a9d1314e952f3099e71676a5b", "6066d0a5f1123b9e158185113c1e18c4687610c4", "08fe9658c086b842980e86c66bde3cef95bb6bec", "30962cf6f47396df88bf1c8827ebda8f0a6ff516", "31697737707d7f661cbc6785b76cf9a79fee3ccd", "2c564f5241b0905baafc3677e7ca15c27fd2c6e7", "69f27ca2f1280587004c8fae6b3b0021305e52eb", "4af50ecb45709829a840a75ddc84f56f288c5a64", "29b3be93a60bbc5fe842826030853f99753b08bd", "8150f267cd2852f27639d4d85c3a311360346c88", "6b9be95acc206c63c23c931cb0e102f1060a0bcd", "1d2dab7790303bbe7894d0ff08ecf87d57b1fbca", "bf4f76c3da8a46783dfd2b72651e2300901ced25", "99e1ab1fb08af137cad6efbc0454c6e1e68dca51", "f015cb3f5ecf61e5f6e597bdc4d39351f9c392e1", "a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b", "0d232056ee26b5da9b6b0658be12053a76484d2b", "271207db1eeca3f9c528abf529d62fb6d41156b8", "a89eae439dfa7cb727bd5193a5130ae6afcd42e8", "65ef33636f07d4d1aa1b22a5b67f1f402d6a5900", "71f36c8e17a5c080fab31fce1ffea9551fc49e47", "2ba5e4c421b1413139e4bc5d935d6d48cc753757", "3a818a5fe2c36ff29212b4da9f4fba3280dfd497", "5081b0dbd23de994fe03fc12aec3b7dec7c4f74a", "12692fbe915e6bb1c80733519371bbb90ae07539", "85192977775e1f1001334a13de5d32736fbfd24c", "88a9b3043a951c622667dcd5f70acd2c850b3950", "55436c98bc60a18472bcab364386d43c0841b651", "38a169b6e67ef7768f91fa208c9b5544f6f57f16", "0e7d8ae484d8a0ecf65855dad9e7514730b4e07f", "25ed9bd6c5febac832f3d68b96123e6ba013df83", "d28c12e270a06e977b59194cc6564787c87caa7e", "3b9ee03255eb5a0040676eead1767db431e83562", "484708cc3bd4aaff0ccf166f6ead108f0842a04e", "12c6f3ae8f20a1473a89b9cbb82d0f02275ea62b", "28bdaf9b7fc5af73482e324d45acf91722f07340", "b13e77dd6b6891347d977f011e03ef2c51008fd8", "0416f5d1564d1f2a597acac04e81b02b2eff67d2", "2431eeb2df8877d78901fa37a091a23dc207c2b2", "669ae4a3a21b5800829ac9ee7e780fa42f9bc5ad", "04e1c7618e4ef3e16159b27c522ddf94068c4f99", "1acfe213bde3f69a8eff515d92ae88220cec4bad", "8a0538eb80b5d41c0e5991aceeef47db01603033", "452c9b3a94897ac40b2dedccf2c6b9b358e5631a", "184e4a62fc9c3c8ea8948aceebb1debe0b5fc54a", "4e4f2c4d2ea47636ca2ab795770d6b3214640f37", "69d9e8f0170cc1148accbb9d4c6902296ea9516b", "16727cd69372019267589a27574147e8cf3b25f8", "682e889fdfa43278d616d3fe18cec3126a4382a5", "7249b263d0a84d2d9d03f2f7b378778d129f9af9", "0ae69840d9dadcffdf13b0b712f89050d65559d3", "c693c578d783323d130d642bd04d391aac7e8f81", "2774b83247331d10a87195290f8e9aa096640dde", "0f82a869a80b6114bd16437dbf703bcae84da7b9", "97e7810f21a145caddc7e5168b59f0ab8894f669", "3901489fd02e47c1f4b16b55f6d27192be5dc652", "536b5739c2162301bff19730a65bfbe8b86179b6", "02e774dd585c35a00a2e2d30c1b9c9079f03bf3d", "23861361973990f5d5106d3099a61480d052c9d8", "391e52ac04408d3e6496614ffafd6ac89c1b6c45", "0f18ce082b7dba524759cf3fbc21bfd1e586dea3", "471b72ca18e3ca0c3d5edb8f28ab8eca596f78e0", "856c09ab10efbc8c61a84a951746654d947370f3", "c49a71015d869c1c438ca7308ec7c04d633fc065", "8a609abea6d8d71a4547426d8ba858c955a2ef5d", "0da2a7ee04092645867614db3574cb261f33b6e2", "c383d55c717caaef3655be23fb051a2168e14ca2", "d78d2718047b8a5f9167d697d9acad99cfb6daf1", "99c37dba394b100ba8f3d895c0ee0e57d5852347", "375993fd5f94c7b02169ff0d71a74d1b84262dfc", "32a54bec72f62407084289dfc1b6df4c9a7ce1cf", "9a6d6049d4a4a1b87f6b78a644d3828a84a27526", "fecce467b42856eadb8dd0c08674d9381f52efab", "0485e96bb0c1276fe2a27271b939b6e67997acfc", "5a0b50fcc4f444e6b89aeb312eca37c9d6e2bb75", "ac6c9623c063bfbdb87097e1f2e437ad7c5e1b35", "fb4545782d9df65d484009558e1824538030bbb1", "7c62f5e4a62758f44bd98f087f92b6b6b1f2043b", "50399793a5334654dedcea635cad291dda77de96", "1486f2e32deac2b61d37b52e48d07fcd5208a164", "5414d792f8b9dafcf58d582545d3e7605b087ee0", "2c375f93c0d0db944ea3ee5e5b4428c5b647f3fa", "04c2cda00e5536f4b1508cbd80041e9552880e67", "c324986c8599fee2f6da7b59751e89ed9624afa3", "621cff3d984455cc75e8ec94574af107ebfdb982", "1be8aa30f905577c1d60150fb6ba84ddaabb2f6e", "538c6000369594084b122c37b3219ad15b58cb37", "03389a530f52c2397b7507376e25a109c24a3f45", "0014a057ebdeca672b1cdee8104cca4dc928ef3e", "4459593cf12181988b8cec7e43f834f6831826cc", "5250a8bbbaa9dcd25f6b2a062b7d1f3ee7406c74", "485c0aa6a58ba7f1c14e06f5b49679a441ac07a2", "1fb54581120c38528984a657b1da6c24679dbea6", "8e6526b46a52a18028336a8d026e9d466aa12edf", "c9aab37b323d9cb2504ad0f20ecc8cfe210c0469", "42e3dac0df30d754c7c7dab9e1bb94990034a90d", "ab44a7585b45e72affe1746fc302baccd6412969", "b4a4e93343e778d0b86c56132a63aceaa70911f7", "4178b559c2381d88bd81a0e73874f1e3261e17e9", "f367feef8f486966916bd0769de8c7b5a59250b1", "40b193d7fe51bd2ca5240d280a0fd5fe1dd6eb0e", "ae9efe3990ddb6169286f50ee4325ffb0b3dcfa7", "1e46af829a955dc5ca9c53f94eb416bcd9e2a2ce", "7809e6d99147bc4a5a8f1379b39354b3e65ccebb", "6009f5c357a8b972c5eaafd104f03fde185568eb", "472b22afde0446d85f4ea096510a9d2f342ab7c7", "544fd5065c0f4f6b0a9ba1805785b5ef3cd68231", "8d007d8d75cb84e3350889ad5e1cc6520688e65e", "0569d7d3d8f96140adc8ec5a6016fdc97e7ef8aa", "a6f4d114dae7664a5161a21fc2bdc76c86a2d69b", "9e453713e7750995587c7fe5233f70cb63da8047", "98126d18be648640fc3cfeb7ffc640a2ec1d5f6f", "11a3084768f035c824662a85a348f02466693d2a", "7411761e789ccb1da80984472f5df5cb084e8ba3", "d8ce4c55d04b93bdb94c1d0427cfe40431bef941", "1ab56eb6128da34027242b1314e51b9b18b960db", "42495ae78d48209891874e90a4436a3e1b74ef0c", "bc843c35530e38396e8ba55b8891dbe8324054a8", "27aadf6e7441bf40675874df1cf4bb7e2dffdd9e", "1113b4fcf644616e2587eacead2bca4b794ac47d", "4602bbec65b0c718d5887fdf2381fb7cee77a64d", "13ccce0e6fdf7831d07d588042d12e04e772dcff", "c9974b7d35d9425faf8d31ede581c605f4f0abc1", "25366ca0d124ca6222c7edf72681943969055024", "0834dff6e1d37ecb36137e019f8e2c933d5e74f6", "8627248c6e3c3e316e3964d12e0a44e23aa969f3", "47d8d8ead70e26eb791c4dea5fe1a4d666ee2462", "941a2710ed3cbdccb869bd6327bafb8adf730818", "11f5dd9f1cb14d14a48499d05907ac05a20828e9", "1507b1b81665b81f799b6cef530b9fbac2ee3685", "04f4679765d2f71576dd77c1b00a2fd92e5c6da4", "215f43a46ad30cf0574a2a10cd81fe7741768746", "9f949f6e40e604ef05ed690ad732a2f6625997b1", "5d33a10752af9ea30993139ac6e3a323992a5831", "895d366c5d9c544661cb1de060c5c6b8a2990b87", "c666aea88c48b287080de410d4830f64f0b5ca2a", "cef2c6670884729cd339e51e736c98d61ee61c43", "1f35f0400d6d112e3b27231d0d9241258efd782d", "314e3cd001a3aa8e25378b915cc3d9a8125e31f8", "c2dfdc01a39d193e017531bdc2abdbdefbb2ab33", "8333f252d35a17752587734d4d777fa6bea79f0b", "8c99f35d6c3851513adb2c2d5c385c989879e05b", "50d2fa4e3a4e961cf35cef6d11ea745f9d1b3839", "03af78f067ed1f6ea0108a4d2ab7120e7ef852ac", "0cf4105ec11fb5846e5ea1b9dea11f8ba16e391f", "29a23b9bf42a862679ccf5fcbf7607f13b15ad9b", "29b96e41948e35a5bc4a9e7ae978808bc5b0c841", "6b063f5119e7eee6dde3b2e6c98e28974f0b0f7f", "96d6e0bf752c42ede0170e9b332ca390ac75cd1f", "08d6aecf1ee531f8c62c22a256b2c2e58081df9d", "2492bb313093cbfe885d1f3f9da2feba4923baf5", "260aed27abfe751b3d90aad9c0805d35c359ebd5", "0cdac46ec42be2d81f64ec4ee53d88be43290d52", "9ba138e1a2f477e8256b38964a699a7ab182f221", "284b4ae707d6f6fdee850d3960f54178be6109f0", "fa64163eb54c595d1d7041b2394e9029202f4da4", "4e6c23835189943b1c830eafb8be1c09889b23f6", "7d81257d709c0fceebd38ed76fbaa137ff2e32ed", "176a3e9e118712251124c1347516a92d5e315297", "079b6800e3130ca2ef1815a35632ab6998848ef3", "5e1b42d07eb84cddc1ebae607f3041aa2ef8fce8", "684f87f29176db22743b327edd070ce366a8cf76", "5f0b1f37fc9c65c56106438b9aa4c6e0909d6fc0", "16c3f99f8f48d4ccb71b782f79601d5efeab8461", "627412bf4cf2706f6dc9530313ecf06bbc532cca", "e837fe3e1d97200717e22fb26ec2c545119a5e42", "a1bbe8b9eab55cdf58746fbf790eeaf626878615", "96fb791de077106501397151d5cb4f245330ddba", "f95004e9a61777a0c4fe6014acb9bc5d63024af0", "6cb02f55b43041a2b43bc13db9ce01e8e6181f29", "01ece1dd9a0a2a7289d791625c6c7446d38584e7", "0431e8a01bae556c0d8b2b431e334f7395dd803a", "b00796447d670f9413e831ffb4ed548a380816a2", "441132dd4ec14991644723c9642ac3a63181753e", "d737048ceaa81eb59ef2e4cebe2faffe1f19da8f", "acdba6bd93cbfff78c5cb03cff85b70390f77f85", "a1c6f88330762cc97f26585c124c6b3ac791eb89", "161207d6909b673b8471ef455688e163773e66ad", "acdaaad999ee792c9cd13320bb687d9a94197a05", "4238ce9152b02b5ae07e8e42074d1ab7462ac652", "36b9faf0d6c4c6296193b8d5d7833624a181624c", "18ccd8bd64b50c1b6a83a71792fd808da7076bc9", "2939169aed69aa2626c5774d9b20e62c905e479b", "1f4aa1d14bb99e152dd1c7ac3cfd5afa8f6a012f", "6555ef4e6f9582b5cb06199a70d4f54df04314ff", "0ef399b8bad6b3d4a908e2a9318f2ba51699b4f1", "15605634feb1a5770182a8f2c3515daf102ed463", "96c298354bee7c6c8dcc58f8fa749cfa75f5452e", "a7d804aca2243843df383a7f9e312c74980d6d9d", "81ebda23819ed215c83435bd861e26483b56f5c5", "8fa3478aaf8e1f94e849d7ffbd12146946badaba", "22dff8fa7cc57f7b4f2903c6fbf6ffa7f1bea0d7", "0d0199e48d22ff4b80c983e3b28532f908467da7", "5d7395085f2636dd2b6262bc7f3fef14058f4765", "de7be25d3e87a20fc9ce5bf90a60f175e64f75fd", "0683be899f3e04b8b55a501e8ffafc0484b44056", "32202387d048e40fe692c892692f3c93f29f6d29", "29963f39e4bfd074d30a94b5150b2bd0afb4ee16", "29f4ac49fbd6ddc82b1bb697820100f50fa98ab6", "28dac6b73df69f35b11f8f10ef023674a2f39af5", "2ad9e4596f38d58019a6f8073f238803f52a2773", "1862bfca2f105fddfc79941c90baea7db45b8b16", "2a259fd1b4442a71cd127afac417a650ffc379d9", "09a6261c3334471bb0bc1a173aff672afe963ae3", "8ce5425f20f2c9e27d954a7d86503b9a0a33c34c", "1d7df7000a3e8fafa21679db4efe2ffedcfe0335", "13c0c418df650ad94ac368c81e2133ec9e166381", "48f211a9764f2bf6d6dda4a467008eda5680837a", "5a74e0b78e6ac5ac32d6c3b7a20c807d57c4c4d1", "a30d5b636086d80791578cbd0e0b02d87ab42d27", "dd6b6beba7202deb1ceeb241438fdfd48e88b394", "d2f0b7392be6ec4d753c9414d57bfc18dcc407a0", "6c79992e7dedd67380b1f107e9ba012b77eb8071", "516a014f4654c90a22ae3d363b6e80bda68a084d", "0c663a7224a60488502a937ed3bc2b869260b6c0", "c4136792e590ebbd7c490d9c0e0feec0204c00f8", "544829d3b2e878c8f28fae5aa0c226e65ba6242a", "b62ffb6a17d75363c8873a236f1d8c49d07c8a0e", "9f97bec3ba3071b41609f8f4590b8635116d7e9f", "09ff849e7e22f0ca2f46347ddfd8596f34d8712b", "9f1be5e5b1b87792a5a5c34f3a755cca23e3e517", "0d21472dbf20d4c1bd48a15267b4a59eff80e309", "1dbcb8998f2527df4f8cf7750e61124fa5f6a6f7", "6c50dffd2c9c96bd868c79ba20260770ee5538eb", "488e475eeb3bb39a145f23ede197cd3620f1d98a", "385e45a0b9e88929ffe8a341c886a6de41d372f3", "1601ac8e682622f489b4a18792025c0843d47b86", "86e5a0806329909f6bebf4af73155be2ff185cac", "4eb2d6699c17cebad1ccf58014cab271a56032a0", "47e388dcd33feda1dadac82cd60fd7f7fb599594", "3859d584d3fb794c2b74b42f0f195d16ce8e3820", "a9e99d6efadcf5d8f67949c5fd4e1f1c024868de", "1362d0680de25838c19d49baec3077ade76108df", "0a26477bd1e302219a065eea16495566ead2cede", "8accffd31d4969ef29cc8496b1628b4b3164d53f", "23bdd2d82068419bf4923e6a0198fc0fa4468807", "ca54d0a128b96b150baef392bf7e498793a6371f", "1d55edf77c5f2f3607d5f30419b4c871fd6b07a6", "f70be41cac83bb02cee9cbdecb1cffeba7250e89", "12fe91ab616b797e22543ae6c2afa7866dbc9a49", "a470a81f989d5354239f1044c90e07b78c6beed7", "73acee80dbcf4ed119d863e4ad6c7bf1bcc542ca", "7e526c32a9ba12d3aeb69c70ee38b178ba203d6e", "a2b9c998264ab1920ea8f2e07c3590ebb3dc6f35", "e0fa145e17fc1e530e011a9f724b4b4aa337bb4a", "4bfce41cc72be315770861a15e467aa027d91641", "8f1ac3c8fe6bcb1da2cdef60bc218ba1e264074f", "37f075b425b5cabc9de37400792b87520f8ef226", "c5595be9cc9dcd14dc4369d3f6089a83cfcdc968", "e02d8652209fb722b73a37339817cf9c396be95c", "39d406df1823aad167a429f60ae8f1d3dc4250fa", "200f68f899f0bf72dd2c49ba2b4a5027e0291531", "34bb062432bb5a95f0cefae4dcf9936b850e9c39", "c4943c2c7334f3a1e7ee380de65d6903803685ff", "1f00863b64688414e688397e4bfaa69c82d36881", "13ab059e6b592ca7bcb14337316ec1ac14aa5c5a", "236171d2c673194045b4c2e2837ddcc4a2041b8a", "fce3d53829b739803a1ef8e80a4d4607ee319581", "968c62bb2927ca300ef953644e652ba7d2c2e5e6", "085fce160b0fa279597bf23b518c56c735d9e7ff", "97486b088729a87865d08daf56072add84db3ec6", "560b3895591d7b1631122b995a5c20889b1abd3d", "1654fadee3e70d744a4eb231932b87c41c1e3ae5", "e79bfb8dc4ebbdeb971545bd31ffc1392ea0ad4c", "100da509d4fa74afc6e86a49352751d365fceee5", "76f3450e50c20fca00dd6319df38503c5d7ebad0", "1395f0561db13cad21a519e18be111cbe1e6d818", "8b2dd5c61b23ead5ae5508bb8ce808b5ea266730", "1a1ed320882c00c94d9f738b7b14eadd941376ed", "b7f0d1d65763fb57ee9a3624116a42a2fe763707", "df969647a0ee9ea25b23589f44be5240b5097236", "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e", "ac79059866be081a8492c642291be159220979a5", "2dc7d439e99f15a499cd2dcbdfbc1c0c7648964d", "19c0069f075b5b2d8ac48ad28a7409179bd08b86", "cefb019babc9836a164c068dad8ec957856a6a62", "f91388f87e10674226f4def4cda411adc01da496", "75d571d53eb250e222d66461fa2400956b40eaa9", "f04cffcd0cc68e28cf05827ab998cf84b1ab0f3d", "9cd8e1ccc5a410c7f31c7e404588597c0bb1952b", "0fe8d8e90889917acca22b9078a1a5607e603d8c", "aed5aecd3f0a07036e570c84c06cd37ab8904acc", "9be0de78bb69e7b243e92ab7530f9fd5a08c62cc", "850d84e4c73a8f0762c8c798b2b7fd6f2787263a", "3c089b8175ebca4a3e2caa5b1291f1c39341414c", "8239e4a37825979f66ff0419ccd50a08aebfbadf", "05a26e093a101a9e6d9cac4e39a29afd6f1ca77e", "dbda7c3a09ada41ad45f6dfa1aa803e2a87ddbcd", "1ef79451c83cd625a9d1c8941bcab850956f30f0", "7566032327a19f9ba770022677de34d7e7aeaac8", "ea864dadea36d20adc24f47232d97676638723ab", "b33b155e9ceb953626d690e169cad4ac1117edad", "08bbb59036c4b85a2418f9702ccd37929c5dd154", "33648b41b4c9a776dc059bee96d6df994009828c", "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "76e76fb584f381a2dca627a276462c52ed680b90", "b7216839f399a8ad47636f952dfa28ab34aab5b8", "f942739b7f9bc3c0b84f760bb2fd4895e1363ec0", "2c7946d5d2f1572c20e9843eb2033b8eb9771bf3", "0c7844b63a05ec086fba231ad9eb3114ffb4139e", "831a016b31d8ecb2c017acae3d8fe2c31d5c7873", "53993c7fabf631cbd8a44ab3e42c6bdf784db456", "a58817ac72b96d06e062b5e13e674ff1871df17e", "9b4f651dafbf0a59099ef43deda0a621c8b74465", "1fd54172f7388cd83ed78ff9165519296de5cf20", "f095d5549de0b35ac18b2b3e178f39a5667e89f2", "9f5ce56dd0900368ff6f0bc4a4055e6f4ceb0bc7", "2d21e6f8bd9e9f647f3517f51347ad89b4381a7f", "f106ff6b2dd497650e7e2096b24a23d620a2306b", "f3ec43a7b22f6e5414fec473acda8ffd843e7baf", "3cd40bfa1ff193a96bde0207e5140a399476466c", "1ca9ab2c1b5e8521cba20f78dcf1895b3e1c36ac", "07a29f43713833da42b24e3915b63601c39d7627", "826aa28f44628f1740b75da3ecaf76251d2e36e7", "77518d7e11f9d106dd989a628bcea823384321bf", "e55dfdb438c0ca607dc970b382ab3d820574586b", "cdfd6930edf117c80ba9db0961e61a9576e85f3b", "2befea9b289f22547f8911aa56672d6373c1ac64", "d6992b0a3a736293b5a9084aa9615cc825d1c3ab", "1cbc189a4484cd2b1371798bae2ff50c0442ce60", "b956d741740186c99c64bb789c87da000ed3c3a5", "645736f2027c5cc64e8ca98ef46f28ae9b1b0110", "1d0ee1069bd433b5f754d70517d2e0fcc519515c", "51f2e751beeb14c37412a65c8e8f0746edf8382b", "93610676003ef1dcda3864b236bca3852cb05388", "eb65bea4586db7aff66895d13c1a6b94c5364497", "89f9225a7223133fa687e1c44bb758c3567f4f26", "43d073d3fdc22f0d74793fdac47ff56b66c95990", "1da57510321fb8b25dc4d21844fb9afa4e40571e", "a91a90192094963d0227f69c90069e99e4acf153", "eb9c20e96f22b9c890f7978878c5479d9e64bb47", "480858e55abdbc07ca47b7dc10204613fdd9783c", "3d99cf9ab3b7aeea322c3c6b10fab3fa3ffa0489", "3318ea550981a714d2c84522aee411460004347a", "05b9c1ad0cfe1bdb68f470492c9a593bf78d5192", "2b64a72d53f13417c6352d3e89fd27df91b5d697", "a72821008c41032e82f377b53bd96b5f7f8be025", "39c54d22a0f29605f96ab57720cde8c6aa743c10", "685fcf13c5e261bf4851ddd1273e048869124ac2", "ca17025fe9519b0eec7738995902be2992040a87", "f22b157fb9f9963b21a82860cb47585556bd79d5", "0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5", "0e41bb49b2b3b1fc4fadce856f164af51549bcb4", "3b92916dd9d772cf1d167461a548115013a954a8", "a03a2154f9aaff21c190634c888ed6d4f8d52150", "482321a30da9edc4da8efb73f8e7d763c56811f2", "014e1186209e4f942f3b5ba29b6b039c8e99ad88", "14df5bbb6fc34819f4ef43bb2b8cb1ada35613fe", "301bef4826d50d56ed0f53dbc6cc124bce986f67", "4e7951a083bdac5c5d62c221de93181398240234", "49435aab7cdf259335725acc96691f755e436f55", "ae19008898ea1347cf0f7ecb81b71aa18137085a", "e22022de2db3432b3d77a49180b58d29058750d2", "136112d29f8abfd8804f9b9c0e15d00f7c013c6c", "6301830b8b6ff01537ec5b3293ff229cc1d5a53b", "211744a5dfe8b03828bd6ffce4c5f8cc58ca9109", "54b309443f53ed960f588f64d6aefe53f87504b6", "9bf6fbccfdf013cfd076f9357a05fb00b50735ee", "8ea27ec3c92818c1ce7e95ad8db8ec2fd53e4de1", "6a6a393e8c8924a379110dd6d48a0237242579c1", "10d85459ab6a9350931fcb4709bba171cd31bbde", "39bce1d5e4b31a555f12f0a44e92abcad73aab4f", "0a00b08c51d922d1b30753e3e756867121c7aa02", "2753c410a4072d40a8eea5de392414999b7f4b6a", "01853c864e7eaf0c61cdb2315681224d6a14bde4", "51673c4e2f92c04245c94b2b77065239b6a4922b", "6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c", "2a5823a387e248c4e7312d49cfbb02a25519251a", "0b07e4341303272b26be11262feb66c0a4412c43", "ca50e441e275a3c04299bb6b59f6c098abecec1d", "ac9bdf668852dea5fa8ec4262f10562eda9bedd2", "6f3054f182c34ace890a32fdf1656b583fbc7445", "60ce5bcbb4604e536e653230d1b2e8b9b784a53e", "b1301c722886b6028d11e4c2084ee96466218be4", "6f98c18e834ba2cea0ad0157b53c34dbcd0c318e", "d6c8f5674030cf3f5a2f7cc929bad37a422b26a0", "387b54cf6c186c12d83f95df6bd458c5eb1254ee", "6a951a47aa545e08508b0b2c6a2bef45e154a3a9", "7942d0c6e5d1a2440061f2ea4bc27e32badb9c3d", "448c24fbe400ac164f3b97bce3cefc1577f91cca", "acc37d228f6cb2205497df81532c582ed71dd9fe", "3f4684674d2f62e24b46140e2c5df29d061ffea1", "b034cc919af30e96ee7bed769b93ea5828ae361b", "806c07757431ab3fd91f4276d350186cf6f9b7e4", "6dddf1440617bf7acda40d4d75c7fb4bf9517dbb", "7ed27f10ff2961611bb8604096a64adfa38c9022", "df7312cbabb7d75d915ba0d91dea77100ded5c56", "555222f2ad6dae447eef04f96fa40c1b8a397150", "db24a2c27656db88486479b26f99d8754a44f4b8", "330126c9dd71b3b0319d6429737186f1f20057a7", "dfabe7ef245ca68185f4fcc96a08602ee1afb3f7", "7d20c8afb1815205bd696c0dd2e4dbcc66ab4d31", "677251fae7ccc62bb776374daee146cc2b7f0f4b", "bd833fc0989c4eb3ffc88713bdfc8d7480132fc4", "3c8cf97f00cd8b4303eccc4134fa79b15cc3d564", "c62c4e5d8243da6bc1fde64097b2ab8971e6e51f", "96921b313f4c8dd6cb2299de1a24d4e9803ffdc1", "4c462a76517f70588a8406ad2a9fa290b7d77e5a", "1e95a366f9212654f79027894dbedf1ef44ca4c3", "3b319645bfdc67da7d02db766e17a3e0a37be47b", "687fc9ffea3ca36d87817faf37492941ec6eb0b9", "70111f6868ffab46cf32534d8b2175693c1bbc26", "698812f7d37e148c0a99e768f0a7d24e7b9605ab", "4a07fe50742c31daffd77cdcac15eaca72070b2a", "f1d3fee0a4dbd4cd30195d1218423bf22e23286d", "09d03b792923695deb0492d8fc3582a50e5f1a1e", "9d1e20c3d2d0d67d3c2f06dbfc336170c772f2fd", "1cc0183d8fbef098d29b6b5f621745ff099f6c6c", "604b79c12304af6826db4ea844dec6b2a2ca4e50", "7c8231cc89f628cad270f0c2d2228ad749a97d01", "82405ab7fcca6c008371a74a23562091912260f3", "38b3cae6ba1b98d6bc6f88d903916dac888cb951", "ac768ff426a3a04a835cdc627481afc898a138f9", "bacc83feb1146bb3d4cb3fa6304090c2ceb6e0d0", "dc947dc7a948aa8cb8b82f18c0de8707f6064a7d", "10d39dedfaf34d862e3ca7216521c6290044ff87", "d99ec70dac11292c63b7726c58c24dfacddb2889", "d92e04cc1fc0bf45deb4fba12f5aff9f2c1815ba", "37a95a78bee34bb26a64c7ec30f7bd0496e072f1", "d11c6beaf4d08fdecf89ca405a7e5e9117c085c7", "0d96c9d14f079b7b8b6b56b4fa86f611a4ff237f", "295bc82821cfde872a8c1baeec8594f3bdb94ae4", "9563d6fafb6ba09c082a57e8d9b31494029a45ac", "af64854f653f2c1724d04c9657adfcdabe0f8440", "5284e9d84ef74683c306314e7a79786438514c90", "9c1fa04553e96ccc59b9c0026e6e25fb2c7dae77", "69e4bcb9d7103165176112c675120f88b2ee7a13", "7858410077f9ba94ca60d0f6b4d29509e46a4ef9", "1abadb7a70c9faa69b618926aa4c61a2520659a1", "918b0901d1b00d63def88b1b27dbb20df72a0887", "15c8443f8d9f1f6537fa8ff470ac407bf2185b0e", "9dd227edfeb472076346cbe2c49811d1778a43a8", "2838373034071d045c360943ba6fcc7e551fb808", "2d48557e4107d93126e7f7b74fb04517697f6a52", "cb2367f93e3bd7b228f271904bd61a5723094a96", "01e812ad00b7743e9b24aa070a24023f05710b8b", "43e5817c18ec570d614669e3940d82791d285a10", "a1aac8e95cd262f974b26374ec8fe35c0f000185", "74ac172076ac9550b32cce7b8e8989f1eb113515", "edbddf8c176d6e914f0babe64ad56c051597d415", "fabbc7f921d77b5aa9157310df29ad81367fe92d", "47be79c0ecb598e1af44e57f386f79adf491f82b", "bafd1978d6a68db89b4b75008e1bb53aea81f632", "b94ec9abc3009cfcd1e45647926b4e5084d95136", "d4b61c59188b830b9bbeb346d86f950585204af1", "0959ef8fefe9e7041f508c2448fc026bc9e08393", "004aa2cb4b68850ee006af8a6807b3c1a6a198f0", "4143292a7544bce8c275b5f9a118268dfd3db950", "17257fc03b611315ae49bd53d229188b889002e6", "1fc73cbc0b91a5eec313afcd251edf069b8bddd1", "b67fade804ad0ab12e484582190899fea14bc799", "1b8290ff2fe1b04df14f2504b38beb749e2e75ca", "5fd80e47d53c64512a0b85a4c7a0beb24bc35766", "1696f6861c208b6a7cac95fbeba524867ad3e8d6", "189355bff03076cc5bddaa11239626051931144d", "dbb202b5dc073a2284044b4903a6057ac54c034f", "000cd8d20d91ded078949dfcde76817221ea96c8", "4bc176200ff718ea0a0650145d3d263b0b7dfb39", "846a3d3cab941d0a23223198c9001965d9dd7c50", "3e56a9b6c6aced2cb14f9cd7f89d145851c44113", "48f0f6d7547f43d068f4d83d7cc682b082cfaa4e", "fd38fd64398502c0f0fc010939e7f61d44a7b5a6", "4650ac406a79fa59ff147ffabc32e80c5edc1cbe", "9e4cd22ab92adcd74014709167b6cbb97baa3d1c", "876efbccff569e064e0adfd7f212ae69d727db40", "eb2d1d406405537773e70f7e949df656ee8779aa", "11f4f72f99883cc225fb0cdedd457b612eb76359", "056e2c82db905b93f7762a2ee7778d3aacc5a1f0", "25d75339720787e7003f2f103cf38cee8175972a", "b4a2e8d7c6c0b98f2cc4b0f9cf16a55b4e80eaea", "86a200647f89ed81db8031ccfbcb5368a32bed6c", "40cf7b70c1b4e04c236a9997139c924d2292f505", "55077b3293fe5f699d09df0675c2318109b4b90d", "fdabbcb3b49201a942fd36836563ef4ead86bc28", "146f989d2cea0e6825543d45c073f90dd8ae9939", "d55d6ccefe797317996805ebf58a74587b158950", "30c5000bd841388d426de26df678f567b75210c7", "6b57526152a6093171a05499cb62840ba28da660", "469d249a40639d4ffb62abfb2c25f5aab0812fa4", "84346cb26e983981e21faab2623045d6adb2da2b", "3535ba0cba9bf03443d52cbfc9a87090ca2e5d49", "57588971cfef4be8e0706f30cfafbf6c293fed3b", "cbe1a5b67c1d19aa1fca10473c6e88b4a444f77b", "b3ca58539e1407e0fb6b308194234279f78eb1d7", "9c43b59177cb5539ea649c188387fe374663bbb1", "f6ecf84620d314cc5ae51d485eceef3270bf41d7", "ef2db5a716f80888e387a18be8550cd7081de184", "7ebb153704706e457ab57b432793d2b6e5d12592", "2d1e009a7b7a6304903ba183e39395c358f652e8", "87defac1045bfa9af0162cd248d193e9be6eb25b", "c26f8f2ca512733b14ae2dc98e21c446466ddd6a", "2a08147bf88041c6e0354e26762b4e4d65d5163f", "cd2605b31feb84fb53a5a56b21499f4ebff20385", "7e49a6f11a8843b2ff5bdbf7cf95617c6219f757", "72f4c415b5f3ecf63380b6985c95c5af2ba72632", "093da3310d98b3c09e2770c2a6aa49eeca58cebe", "488370572904a8fd97f5bc68fbdf0b3b3984cc76", "81f30bc57b84a6e5b71983b50bdea32f32bee285", "1cea72fb523432d80b77224433d57828da44828c", "5eefe98aafffe665b19de515e3ba90c9c0b7219c", "878f70f6abb83f5158ca0bacfc2bacd49b1886b1", "c66eb0e17076bff559d8f94a8f967d52db2bab01", "5f724a84647c5a70865509910070077962433dca", "38283e35371f2a426305dee60e80cd28abb4f349", "9abd6bac662e8fdf4f71ccc26a89f7e360b7b879", "b0bf1be8731c60b2caf3a27f1e95b73875c4220b", "f6785ffe6fe2c30887637a61061a64f4d6725979", "fb42787f7f4a5e4cede43bf97eca216f12e354fa", "5a4ef8b9db11833e01ca8e715c6eec928bc80df0", "aecce5d8e06da797c087fb361732e84e62c04c4f", "12d813f14166578dea8aa6aacc945102dddfd05d", "52ed30920f2f96970c4f79d6768436ed855dad42", "0e815b773e480ef20a680dd35cd72ab26a141d2f", "d934eec76a2588934094098987de72bdf1214d48", "30def55b6277f1e636dfebe12799b12a1b3f48a2", "c380aa240ebcdb8bf2cad4f30bcef2390fada091", "29bd7de310438c2b9d8b6e7eb7df662079934747", "6d8a42dce4d79435c42bf8eefddbea0e38951f4e", "bca7d615cc143a255c0dc65235ba1acbac86ba32", "4b421db0f57608470ac1e26077ecb8a6cdccade5", "639a60373d3592bde75cd5b12ce07cbd6e6639d3", "db186bd2a276a574b2246e3e4d136f8a07c53ff2", "0b83bfd13552b57fe551b6fcf088fdd3d6055854", "7d6132a884d2b154059c461e107c7a8c41603ef7", "041ac91c85276f61bec3f0f3c42782e4f9a31f88", "eda6da71c261df17b4b9da5e72aad7893a871a84", "4b0966b65a4854cea31c2078a3ceb21637dfb1fe", "9ec74521d03d41f4157a458513c79017dd066a38", "dfbf49ed66a9e48671964872c84f75d7f916c131", "62195ef7e2f56063f8a7e9fa27ffbdd1a5696b4e", "1cbf3b90065e8a410668ed914e9d03a94a4d94aa", "d67440791eb55e36ae1470cb37da7ebe8ba44214", "0b6c912b0c6beef4aea8cd7d0a265483caedb7c9", "7910d3a86e03f4c41fbbe8029fab115547be151b", "82729f984c514bd0a5157c28b75ff0236d609384", "81a51cd6ecd467abb1ef38c8e35bdf1885f96fe3", "35f84e0020c26715691825594e2cf5553467a0e4", "e0d2a28bdcb1996f9659ce2d5fcdace3d369cff6", "e0e8c7145c9b389dad2f4e1982f2b9c31b766503", "21462ab847ee919abbd862c1f86b7c13ad34d519", "cd4bab5d6845c2141c9b3b635d99dce1db446028", "2bac4161a928eb33e6be700ed8ea4d823494b22c", "64afa85b79c7ad60d8f3f9265259c654c03a01e8", "d623428f02e80a689eb58d022237daeae2ae7b9c", "51fa0924576680490fab445bea7b8f4a649d8161", "351c7e6c2e2fd894626be20a480fa5749e016dc7", "87bba3f4292727091027b7888b5d8f364425344d", "eb4edbec8cb122de07951e3cf54c33fc30dd1c19", "102c02bd78c2a4d9a028b779933ff2f164e1e927", "f3ccdf54a3384e601fa36969e7b3f657e2516a3b", "bcb12dd13426ec7aa4d480ef829fef798f0e374d", "1c42f8ab39e22225ffd3222baeba4863435220a0", "0744143542ffcb45b1ad83078c23efa9d3ec2be4", "601e0569028924cca9b5f1afbca6f52aa7212c39", "486f08c875e88b3f1f157e7de1ae2cf5176f5431", "2396ff03c41c498ff20e3a0e5419afa45e4a9d41", "6def88cbf839bdcb2a61b3fbc82ad393ae3e23a9", "f61b4aa14b052e143db302402cf976ee93cb4eee", "c92e701c908908bda407f12edf6984b283e8c258", "5cd11d6b6cb7a2b8c00fcb535879edbd6b008a01", "4fdbe95edb967bfc0b44f0fa291cd86b178fca2e", "84c8eb2db35f7fd38c906ced741e2c5470ba7544", "a2aa272b32c356ec9933b32ca5809c09f2d21b9f", "1373195c26eab581138579f7389cdf8b7a94a4bb", "9c59304a619b7d503be95bd560f90be976a5309a", "8ec76d7d4a9abd09f088fb3f7a3351a7fda1fde0", "d9fda0030ca349da7b1dafca015bea95a6aabea0", "26f25c552f0d82d2d774e66e57d337bebd197d24", "b648d73edd1a533decd22eec2e7722b96746ceae", "0612832338287cd6569cad32f147bed6df134223", "92d5fd4ef31cf86a650c7b01c26f0ac93304f98a", "7f1d7688c4923cab09bf0a548e6f1fc1dd916b51", "9d1940f843c448cc378214ff6bad3c1279b1911a", "91bd017c1b19c36e430a22929d8de3af0795dfa4", "dc16b42a64741df2881604f28788f421e422d297", "44bb6ccb3526bb38364550263bc608116910da32", "dc83f97a2dd241bf1a9f53ad11d8f10eeb4f5dd6", "15e6c983e74dcf70d8a557b75bdc172e36692191", "d8c04365ed0627a5043996cdd26c1a56b5a630b8", "96fdc0131dc80ffa6d7b9c526e07f080414c54ec", "251f0d28d62b2651b49eef4d044c0153f51030a0", "a97f3d2313affd35c889c57f2ebe21e7ba2b5bbb", "15292f380f5996f539f4d5e93dba3082d53338fb", "29f46586c95af2fa6326724c867aa88b55b5400e", "7793c7431f3ddce74fe2d444df614d8d8fd9af4a", "43b3dc931cd43a490de3206fd041e118e3651d8a", "b8969d6e5658b360111f33d3f85eac63afcd7252", "0874a262c2ec7082658cbfc55892ec6e5ca6a374", "ead1db02b36146ef5c3ef29a1cc411a8f01bc56b", "c64df6c368b09d61c8319718d7d538a58392b01f", "5c76da86238e35ba0c01d5c0419a7806b8e20789", "9717bd66ad50aedabaea0f3af784c7ba9643b686", "ced4853617ba6af27f5447f9c4de07c3e05e8c3b", "8a790c808c293cf5d8ca089e8963b133d1300712", "9c2e5e2ba7c5b3a555c6c72f518e3631aab23c19", "1bb737b69878e8e9ce6a5b6a3c87856521c7e9a0", "b11872621d9550ec2f1d09f2f02237182744e2ee", "11943efec248fcac57ff6913424e230d0a02e977", "e63f4867c73eff9ff7cdf31246585a6915acef57", "282578039c767f3d393529565cae6be56fda6242", "55ea407e4e5aa3f8b8ba7663abdf8926dcdafa19", "4b3c1af4369c9ed6714451643ef9c06969849e73", "3e7ffb5658cf99968633ede18785c5cfdd6aa9eb", "7fbdb1d05a34d28b7f93544248edf7a2e0b8cd15", "420c46d7cafcb841309f02ad04cf51cb1f190a48", "c1130d5c7bb1311e04cffbaf2bf6cbe734adc2ac", "6f4a671537c9e60f042808451ff0fc06032d1221", "24c6240c511f4daa7cf51e28b0a9fb15e365d4cc", "2bf41bf420c8d86dd1bffbacd28c70fa8b12b6dd", "a1d6c9a03075848014e9bd9baa6edda25e512963", "14d037b6c84df142a3f60e21a4afab834d947885", "76d8b370d0a8fc63ead6ba657dd438d7155d659f", "3f6a6050609ba205ec94b8af186a9dca60a8f65e", "92de2ed3805968d6d95da4fa9c44423ef50a6a37", "661be86559295d3b2cbabcd31cc90848f601f55c", "516668a41d6106232a7cd56d20d3b3da343e5f36", "1c557a18fd8f4f1052ff3f831a694bd50ceb6664", "b02342a423eef6e19f473eba26b067405b525f16", "63c7c8dff73ec6798e38ed7466a4f8ff8a87f879", "59d88030c99de99d18d16dd5ffab7c0bcf6ac58e", "b018f4ea4b46701103046c472468631cc28ab311", "97b54703c267deef8c86ab6240c24d76a59864e7", "c33289788ca69a55c7eefe6e672c82a0cac5a299", "cfc22c35ad191cf9d70f4a3655840748b0e1322c", "147c33df99dd52502d65fe390ee45c585349b3b3", "7df103807902f45824329ab9b2a558b8baf950b2", "ea2e15492a0080b90b1fede934bf3eb7b50e472d", "5921d9a8e143b6d82a2722d9ee27bafa363475f0", "e564268a03b21fa092390db0c11ba1c33d2323f9", "fe43388bb2d939f2f9671b50dc0f47d8bc5ae6ed", "4d9d25e67ebabbfc0acd63798f1a260cb2c8a9bd", "d94395882da6da17cee0a6ea6f1058314f091f05", "c610888cadcf2aa45e7367f43e42eaa7a586652e", "f2b547b0bbda1478cbecbd5c184c3c42c3db7e3c", "87b763bdb23ed72e849f25a19879722dc2255ab1", "7f9cacb5fc126f87dbf53dd547a9fb9f58ded557", "120339e6ffb6748907d19bc918a743221e59372f", "d44647349c2a4e3705ff347532a159c3f92ab531", "84e3629b1c1c169125f777870e2009d8bcfdc2d7", "5ee220b6fb70a3d4d99be9d81d2c0e5de06ab3b9", "844568d9e49ec34536502bb8c66d5578c962abd6", "26d08a90848c8dba9e4e7c2adee6c38d713699d3", "8d94e72ebcbc8f93dc60eb42ac7058d6a94e8683", "a6a1a47c6cb2336c78f8424943eaf9f01bbf980f", "6ea6be2c270f7d366e9e93ced7ea5a17d3a24c1a", "5ff708d399962a07f77c9bbc0d5efda52aa6915e", "e21b1c10bee6a984971dcba414c22078dcfd21c2", "8f0c11a3332c434af11c01ee11ff7c492c7968da", "3f0126f467802562505d5f551dfb6bd138180268", "cb3010ae04bb144b49eb0c1061b695998d3a7441", "291dba3baa7d42f1e30b26a714e525cb73c05af1", "12f2325789febc95c9b453d12194bf4a778e60bd", "dc3cd4e110b526cb59bd7527d540120c5fae77ce", "ab134c9244c762f1429ccb7d737610d17d95f019", "7fbf1885f27fb72d5e553c4a2147375f928465ee", "cab372bc3824780cce20d9dd1c22d4df39ed081a", "3a13c964cc7adc5f010164ccb91d150457685a78", "6ef190ad2c9c6e11d12bc1b51a4c8a11a4692fb8", "8d7a55d184659ac97d02061a660ae4e30604185b", "e524f222a117890126bd9597934d0504adce85ec", "f79267b0f4c0110051c93f9faabe436215e4fc28", "0dab1ab19a44b73ce0fdd15014b635eb7362af3c", "ec29a0abcd80752674bc0404fedc2c1ec2c7341f", "90dcaeeed3cc5c6001a06e9fa674845a8fd471bd", "9d34280d545ac91031804931f4df7c31b6ab6011", "99c69fe118efbc47efc91ceaa3b2e711405eef20", "578e002828e5e106dd660c7273ebcb014e8068a6", "b18dad1d2d725ab6db64dbe678a033464915f074", "fac36fa1b809b71756c259f2c5db20add0cb0da0", "6973c343a4a59476d57251651551286d4bd55d58", "c5ab6895710b5eb7bb783456421dab70684c017c", "ccd3dcbccae7d903608530bddf6381db8e723a7d", "c2353b9cb7cea1081ca203f8c623b12f7007ba9d", "6729fec79b6cb472b4326745a67c6dde5772ed95", "850c5d1f97eee47a1fdaefc0894b52e51a3145fc", "384887c893c72f1bb19075f03e20490ab003c791", "927b52d5caa2a38abadea5be6b531c1aa03d99e0", "fde6d64175c459a26037a249e31c34cc0c9e3f7a", "d6b514a68abff3ab14af9fc0152cd5b28bd0192c", "edf074a5eb3a1f71cc710ccc42849dceb27e3531", "ffb2d596c22be7b0ed8f809fdfbeaa95bd4db835", "57680f0d53392178bb3c431e03bcd8626c12f620", "d1c091bf9402f1caf13892a3fae39326507401be", "479fa087bdffcd574470801f76eef9faafe589f5", "6e6724b247d452f3756d2ed5fcb76980f50066d1", "5f50b2c187718e3ecba68a1eee492f6f1a0c3355", "5955bb0325ec4dd3b56759aeb96cc9c18b09bf3e", "3947371c9376c8f8d18470f8ad8b91c1925397e2", "8dfdfcc3f34263779871d023fad973f4a1966ec0", "d84e075d571193bc616218a84951375e63ab20c8", "1ba4d5d3b0cb46d61f23279f70ae42735601a60c", "fe3f495aed57ccf889d2a42de528da5bcb94e36f", "703f8f9ed65ab87e67716cbfbee0e323aed5b9f5", "633f4e4d1e29d336a5472a9cf43163fdceafecfa", "4b4763303a15a4c6313bfb386756437f394a0129", "b2e2260b8d811948e71898d3adfa8aa6b64fe125", "8ca0a7f2e5a7b1676f9a409c3ed5749c8a569b83", "ad9937ff6c5bff4dae72ca90eddc4dd77751b3fa", "f408ee71b9db38ec1b1b785057d50d6e0d9b30ba", "9015fd773526e21e352037663de3f586ccf4e907", "f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a", "45481d31c19986eb5cbaf6bda54362efb2fb7203", "5f88bd922959590e0dd66d71519bdae75485e994", "2cc6e67be5c4acd451fe3e1aeddc4d1a0ad051d6", "32f0c95cee39eba143452d6a0fe93283575257e6", "e20b53bce9198baf7cef711efe9c1060b9ac07e0", "0b6406bb39bd18814ba5445d815b5e49757cfa03", "0f0a5d8a7a087204026a6b67000887dbf5b6a20f", "de87a5d5fbae0733806ba965b2d70fd04596f6e9", "950cfcbaafad1e2aaae43728fe499d8a4c90f6ec", "dddfc10d9649a936cc440c1f3590b14e51a81daa", "55c22f9c8f76b40793a8473248873f726abd8ce9", "e4450b61f1ccbe5bbec1e777baad5dd69fd6edbe", "023da8828f9c039c20ac9267a6b37813b74d4824", "1ad823bf77c691f1d2b572799f8a8c572d941118", "5578be51e09379061f526e8d0fee65e3613eee8a", "ae587a4a8842fbe01b9a043b66f762a89dca5074", "05929f5a20147fe4349b6fe76819c023e53ad8f6", "8948e9dce2dfaeb1d93ce146fab5364b6cd342c9", "12cecce5e5bd53212830cb0b8f5cc593a58c215e", "af053b8cf39612cec0148e14a9c4b7a789d7db11", "ca6b78e3d12134e12305fa4bcdf050ac102781df", "8f0b2d24dab016764eebeaa2070d31801948f6f5", "eaaec63bb86ee87d56f5844951143485ce84a4ea", "90943f17cb224c287d1bf117441781d43d2f9b49", "6a9e240e5d84b33c0835cd85c96c70ad5ffdc49c", "4910c4d7eea372034339f21141550f6d7cb28665", "20380acea8e7fb3819f994b3e6dcfcc3839d0b96", "8c5fa29c9bcab3d518fdf355e9da62fb0b58905e", "d8af6a45eaea68adda8597ae65f91ece152f7b21", "6a553f7ef42000001f407e95f4955e7ddde46a83", "916e217426eb1c56be2a74cb2a3b50cf3904a5cd", "8de026cf8a9a82d55743aaa4ec18c86029fda096", "92574a72c660a86a7ded738a1350851f416bec03", "b1edff56936e5d306e51479142b98cc2414c1a56", "6cd7a47bbba11a994cd8e68ee5eae2fcb0033054", "e1e1b3683ac278386cf1569e97f9aced0923f4a0", "6ce1a8240b0eba18c40136370e143209dec4a5a7", "576372383bfd6ce6944d885e60b19151efdffc99", "66b37797286952e7735901e152b4cdea171e8567", "463bfb0b55c085cda77c2c6e1583abb64baa5d0a", "43cb50f669a0d492256d11c6cc4128ba0ce79a3e", "e8def76dd702df44b0e1af7dbc0360bb7ef14562", "d2c9f842ed2e36b14b9ea2bb2253159cf5c495ed", "c64da9bbdc9942decc4566f89e13d991a6303683", "4da29057449df4de5b4646ae6a46d622b4d7d0ad", "a89e1fc2681a9a399cc5008ea34b5ec3fe7ca845", "564065591bbefa36830f99cca6110818f252e5f6", "05f988ce9a92436f194a8e06ba21b6a62a3aeef8", "02c44e0a7bb179be03601a7abedb006a28ad4e23", "277bfd1bc89044b4a523ef23f48bd053d5560657", "cb2bbc19ba323ac8a7d0530fb605462c8e608e1d", "aeabcbdff7ab810b961a9f7e4399b6c0421d00cd", "0f22005f8f2bc134f02c4a76cde30349e3389b8d", "1957956856dc04ebee5815bd62874687e2af7260", "22115fc69c612832a39187f3f1b3fc6fd9c9084c", "b61fd19071b887e7f7b91c1473af9fc653a560d7", "79d50641eec7fbb6588909f96aeaef4a7b42c9e9", "2c797d2daefba6cfceb8510219163dc7dcfa0a66", "fd070be9327291e229d3149ec60388dd4dbf74b1", "40932ccdd7cda22e90c1e16b4a4dc4930b122a9c", "9c03db9ad53be4862625256a24f56cc7b0a79c23", "264dcfb5be3f89dc0950472a2a274ef7b641b1af", "bb980dd94463b03c6584513bcccf780e43f089b2", "99e1fd6a378209d48c12a70229e4f6d4d83f4417", "877aff9bd05de7e9d82587b0e6f1cda28fd33171", "02e9f1bb203a5ade98308eaff4f6a5c96a2c11e0", "429961112a9b4f08f6b68acce8868b3468d72c6e", "030ff7012b92b805a60976f8dbd6a08c1cecebe6", "3070a1bd503c3767def898bbd50c7eea2bbf29c9", "de8a01d36df1c3a881523c3748fcfa988710fa15", "9c07704226e536834c4a8c01e1eb428584bacec6", "def3b2254caea169c5cbc4b771c44f1773c004fd", "c34ec5dd51880acf72336e85e4e45da5fcfc75f4", "daf74c34f7da0695b154f645c8b78a7397a98f16", "9c2aea0bd67c7fe232cca54ee2440b9d666479ea", "9534a04de5e99a44df76ea30140f66a62e83fdaa", "0a40415bdfe4bc9ef7e019e4f1442a9fb61f58b2", "eadf6cb8f16c507e4a73db33da201cde3d9b2f5a", "cb4fc4d49783f2049c48a062169f04eb744443ec", "aee79e0ba67506f1908e88463f1904a8c056cbc2", "84c01c9760cd294718bd7c4b4c93596db1e5e068", "35c473bae9d146072625cc3d452c8f6b84c8cc47", "367231b80e8201fc9c461fbb42047b20e89ea961", "c01876292b5d1ce6e746fd2e2053453847905bb2", "93dce341666b6a57f8888dddb25a3fd37df69b02", "6e40b120bf46807ef28ebdd8860e3109708bb888", "90ce797fd1218768737f1611cf830a70266e60f4", "0d130b5536bb1b909ff9a62737d768d4b4fab2f6", "699a7c88a6d226f59c7a5619b3cfad714415c31a", "7cee2a2bee27657e6599b13f9ed6536d5f46fd0a", "16022bcba6849cdbcf5866a075daa8e24c218c13", "658c802890c7133e2ade778b5d88b68bcd0dca9c", "2ca4934e250810f20cd6beb5c131301ac4b54cf3", "072fd0b8d471f183da0ca9880379b3bb29031b6a", "7954a1bd6e693da8f2ae69ad01233e937d600e9b", "a343bc9239a209af45c43f94b86651fd0074a364", "d6eda0c16d226976506396653d14044c185eaf3e", "4b69bbb6dc2959ea3d2e911ed45c6298dc531490", "90e994a802a0038f24c8e3735d7619ebb40e6e93", "242dc739b7cc49a7f54967a7d75d8a82f92bef59", "d3c1612ae08241dadf6abd650663f4f9351abaf9", "81eb9fca9093f58eabb8850512f8f46fe2bb07a2", "cba8b9949e71ff485a4ecba33128e2f206651cac", "74f17647637fe068e237d8d5a8cc37e081ec03d0", "d0d1a18469bd41b9464b393b56d209d53869bd77", "ac88405d34b7b6fa701e25d9fbdb56126cc9a8c3", "3d69d634f79dfcc717e18f73c886b854a157a3ef", "86cdc6ae46f53ac86b9e0ace2763c5fe15633055", "8cb3e0c4ad37dd7e0abd2eedd704d4d27edb0a17", "9b2553d375c27d3b5c5a91ea4f0fc0e083ca8d77", "322a7dad274f440a92548faa8f2b2be666b2d01f", "cee8ad964ff20dd5ff6c10c783ee271a7222987e", "77543f169cf1c9ef8707e54258c35568c7ad8250", "aa7e7637f3443a823ee799a560ab84103b0e9a7f", "eebe66c4d1a41b3c7830846306044c8f3fe0d350", "e8af37ac6e0a5b7f04b6824bb1f74e4f363b99b5", "060797f33c242b568189be251f9735afdc4c9f22", "0b90cd2e8abd6b23e7f8133f02e3e6d121cf4903", "6f640d448f00321b9b3bddb3a787cacd2f45cd1a", "06a2a3c6d44ab5572df55ce34d9b1216bc685385", "b288a369c6f05443cb794048065b7a86139733d3", "1cd9dba357e05c9be0407dc5d477fd528cfeb79b", "7363cc7e0c5b43ec12ba47bca587a325f719398a", "4c4454aa7a2a244c678f507a982fe8827ba419bb", "18d19cf4d09ac340428d091b24af561a3d5de3ea", "3bf1a5164e4bf751f0101e177074d2cf2fd54c95", "83ef7de2669bb2827208fd3a64ac910e276fbdb4", "267595dd40cd109c93e67874a1cf49ce79871f3a", "5ef2be1aadd2f666756b2ab66bc05d146ba0681b", "f4f0eeb4321eacc723e750ad27bf4a9401f1bde0", "d7173d9dc87234211050f2d2c38ddc78306ac1ef", "f9f86d087e84eaf0e6a09575982aa7b41fa62451", "33e7bc26047de3c1b607f04a644c2c03920201fd", "c7756864268459069a59c2276cf482377d5f997a", "f73b7efa3bec07c582ec4e42fbc43a4f4993c6bb", "3a2d3514b5cdccdb4c13aadb3929f3a78c03f020", "5f0dffd4f36394c92b26a1ee7b185ea4b3be58fd", "c95cd791ad0cb0a08cb39e987f725eabe3a08648", "c9e4e1bb544a892fe07c99cc9a999f0762237cc3", "11f73583ba373487967225ae4797d723ff367c1c", "b705ca751a947e3b761e2305b41891051525d9df", "9825c4dddeb2ed7eaab668b55403aa2c38bc3320", "0c1531a2fa4d6a270b9a09cde86bb0669f5975ff", "8a7882f765822ecc1f72610277037228c24e7bf7", "d951ff5f378b2a5f878423029123ad6b3491b444", "483351de2bdf58e21bf8a68a5d75e79a025956d6", "948853c269cf97251ba5082db0481ce6f96cf886", "b56e4f61bfd6da098941d2aee8a3ab1221ce834a", "eb65354fe51a177b2366f8d10b8140912e883d62", "3ad2b6c283b1c4687c4f782efb64a209d3cf4cfe", "17bc977c47562210d54966ce412804b237b1d763", "47f8ba44fde1f8a3a621b20cabb7e84515fb8313", "551fedfeaf55e3f7a7cf19d2b21f1a56f8cbe9f6", "fcd9df8238605f70a54492fb0c6bdc9f29afda98", "a49b661e42aea6f205e543a80106fc9c6ff0f9d4", "c867caf3f29abb2f3fd5c4c7e98e5f551a70be25", "79443a311d75fc0187314d21f8b065b33e5b41cd", "455943924a98593655ae7197ee3835b9f6a3b778", "23231becd8ca7bd3f1f10660e1709554a21c64bf", "05a2547d976420f7d1de19907e16280d15199008", "3303f6694fa1c48afbd6e104b72e98b7f52b1651", "7b35e6b9bb6575eddd5d4598481007c154f2bf9a", "0beaf17d42b1171dd245131825d2de67000f45ac", "c08b098c1c2b44bb1080f50cf605f451972dc52c", "20e903faf8e2e656a89d983541b15f2e0d614eeb", "ff1a958cabb2aa10cc947a90960da4a971720db1", "42854a0175d866f190378a3034406e11cd160568", "4753a125469da7649e9f58fb0db781622dff41f8", "f439f9a0bd535eab00cbb93c1fa7083615a08d1a", "2f0c30d6970da9ee9cf957350d9fa1025a1becb4", "3ba5e820f160dfd02544120ab6c1678421fb2c3b", "396aacab076a3607429f58ce442d5d57b5aaa794", "d014011b24c62d5b689c782c09b89c52970f46e7", "2b79da19774861621b6a9d0c769f95d33e5b6eb6", "5e525d319af3739ccb205a890d0eb8bbed811d20", "f174b24860b4cacbe047d3a5650cf8866d2244d9", "fe2f404a7345ca414b07cb1301656ce18e629207", "0d5fa5be4bfe085de8f88dbee1c3b2a6e5ab9ee2", "031716b430e4256c09d5b3559ca9f0be51cb30b6", "37bb9b45c6385789b819573b3716fe56a9e627db", "682f735ef796370f510218eb7afb4d2a36cd1256", "e2658c545d0b8033ea1d7a775a4ae18f56b3bcc7", "66c792b7e9946f8cb92fac185267d03371437451", "64b06918add69292c088455b62c4b0f06c727b1b", "3910b1cc849f999dc8a2c02a0313be32dd5d2b43", "079e20d0d870a5bade46cc9b4338a3d637399654", "95593fb20df8ce1273cebe0690cf2cdab054b9b5", "75827a2021ac2ad2256144b2a2fe301948d39b51", "22ef6475b220356f8547e9980c437f55ac2ad45c", "47c0c7f1a27d467e00a6fa7ea2ca0af2e3328b9e", "95bc6f4ff5033a091b6ddc6a4290a58c7e6ddc66", "62b72fcf996f6351b67ae57653c5d0f3a03d3599", "d67277eb00d58d20eaa18c346761fe4eeaab9c49", "1b7b95ee13d91e9c768de6417a8919f2a3384599", "649d0aa1be51cc545de52fc584640501efdcf68b", "830b48f210f3905117b335e305166df4ec092b8b", "98a6f2145a358cb2e54eddc99dd29911764bce0e", "6f41e2ba877ec690bd1c9e5e8742c4088f95c346", "0c03bb741972c99b71d8d733b92e5fa9430cbede", "cb4418b5bddaaceb92caea9e72c8cc528ce4e3cc", "f65c03004e3b2ef4b4224396f7a31ee75a252d85", "6e396401b3950eccdaf8265aeae8a4f0da8965a0", "af3d8552d31843136acc8eae994842c0cd5262b5", "4d653b19ce1c7cba79fc2f11271fb90f7744c95c", "4189aa74550c1761dd5927442d0a98ff3d3d1134", "d8a489e5cd8a7ca5faf646533263b46ea462a105", "998f2cfb4a3bac6b38d8a4a96a3827e06a0eaadb", "a7a66d713776e78ae60617eee2715443a8565a23", "ea3b13e512c846e2bb29d99f5f97fcf8c7f52836", "09d08e543a9b2fc350cb37e47eb087935c12be16", "e10662a59b5f8e1f5684409023f11ca727647320", "5849635e61ed7d6358f65f5a228a5148e4fea3b8", "dd54255065cf93895661c40073cdd031af7dd7e8", "feb4bcd20de6ce4f9503ef01c87390e662538c15", "27bcdd21f1be3d0990f86a231d29d46a5537e5cd", "833c1c0180ca36ea07ecfe44caf2b739c94f511e", "d1503151b39038a87acbd9ecce073ddc211a597d", "78c4061ac15acfcbd932a159e9c00116bed7f362", "5e3c8cd50301a13ad53a3fc9e3567ede63a76215", "a070eaf54e5f1f0980a3820477fe97fb8b1d80c7", "92d1e292c00cb307fd5ad074b5c172aabf052ae2", "1f8f0abfe4689aa93f2f6cc7ec4fd4c6adc2c2d6", "7fd97bc23c85213b8b2e4d28264f04ce6dc84e74", "30f113d985d876a3974838b2ead49a069b474e57", "2869d03482eca973a0d2640545e657428d05185f", "f3dab33e5f00e80ed4ce97d3443e96eb0ee96301", "48cfb97dbbc324d9f98effa5a7e07665f62080c3", "0755ee472e2bc2968b16f851a16c39b3f3e2d0d0", "d809c0ab068861c139a544e5d8eeaa73cc8a3f6b", "77b77d9f42b51c481d7de37fa2faf7baad6c7fa7", "9c6ff9a0977284ce08ebccc96b400620be3ba486", "0fb39e6d74e3166598e190b67a5ddb6f530ac49d", "243778aefb3c23d6774309c70217cb83f7204915", "951092e30c5188e6dc925264f580316639eba699", "3ee096aff93ab9a2374cdde06973db1996331d86", "da1049ae56eaca2e7d65946cf87b1e504d9fcb70", "5ff64afd70434b12e043ff39a91271eab6391124", "4ee9efbeb26f684557fd8d39afc8e90e9958a495", "33d682c52eb24875c556ec007bc38068d3e682c0", "8529c0b98ab4f6eb21715a54395420988dd69633", "3f60b1f800178841f4e0ecb79b64fe60b48ed03b", "7d52c9da079a4929faa0b39d8acb92240eb3a1f4", "0cbefba0f41982bdff091d0e5f0d5ef93185a55c", "8dc10cb0c8f6e449c22bb11399aa886d850fc701", "7e1bb3a908f6bcd3ba09b2d48f559536e3034d88", "6dd3a95bd46e3ab9c3f649a2034bf5ddba19c710", "85feb069aa605caf378d030754c478478729c0a2", "f375bc91a5f7b1f2d36e41841ccc22f202be2dcf", "67598e0b447294ad7414b8c73819c7ff395eb63e", "c72e13581cc69ab9cf59fb3d41015a67de68306e", "8670fea0d92c6a0e767d089083a39d5896db8534", "f43327075c17e71ee713ad727aa473230a432a90", "adc2a323af5f8be790b7fe5ded8b5b276f0a1b31", "2b3fe9a0356eaf50f1340dda3f3d14f6904905ec", "53819049f41998a5a1587dfccccc2db8612b45af", "484c4eec34e985d8ca0c20bf83efc56881180709", "ecb08081539711766acec0d252da13be716ecae3", "51cb2116c5a32d076f54b1a192cf4e850390f665", "a62cbd84251d325ea9e91642a9b37f3026cd3e20", "cb985b696085fdfdc664c74114b841d58382397c", "43694e7d5861a8bc8aa5884dba3efe2d387511c6", "174cd8e98f17b3f5bda1c8e16cb39e3dec800f74", "a3fdba7975494c34552b33cf839f21d62734e6f0", "c7391b43bd0216daf697fb77906b76c71f5c50e2", "ede3af38e30ca332af0c1ce3bd5144070f7fb7f3", "53cf087dbcbe0c4b145297fb0a32732ab2b18b66", "8bd59b6111c21ca9f133b2ac9f0a8b102e344076", "ac392dba43796a25c2eee5f08671537634a77029", "494e736c05ddf500830e9c51b5fb42be9b9bff1a", "21b58c8aba44c173493e418a797a1f36c6dae8a9", "bfebba8356c5d20dc6a9b2f72ff66adaf63321b7", "1e1dc91c2ac3ad0ae44941e711aed193231c3335", "6faab65a009b36ee3f79d3e4afdf3cc84d57cd67", "5b79ae0d1fd60d61d1b7e37ffe499f50088554c0", "6b9f23d75f7f85af831e15769a33b8f8d90367d0", "d40bd8d44fe78952769a9bb04fe74ce38ef07534", "8ec6a93973daffd3ab8ac70bdb902b3136bc8e87", "144d19b3f820c96b7816842356dadebf16678a8e", "e1140b86c64549cbcd138f868c82ee8aad77d103", "93b623ffb25fc32898f3c876c9aba0f5ec22d3ac", "8d879f4aa3284aca8d671d8360c6b6f2f0f07a23", "d0096742f59b9361eb0454a69c9afb0ad6574d96", "1b74479f6e597a33703a63161527d55cc5d3096f", "920ac5a925193d82a9aad4d976de295b75d2df23", "000f90380d768a85e2316225854fc377c079b5c4", "63fd747875052931aed46a37c6da7d7ebb7768ec", "d5579b2708a1c713e1b2feb8646533ce26085a3a", "c7752341b28a0ff96e8b63986afc669fada6cd50", "abb7c66487009ea20967b9c6708f660fd4197bbb", "0c493914e4a961c859b0a4435cadde02cfd56fef", "6d3c8c5869b512090b1283fba28f01c2748b0ebc", "ccb27d726c0b799e800a63eac18a33bf262852aa", "4f19d33e808a6675f11fb624499d303368deafa1", "5bf9493564d1ed173aee4dc701d4e62d5f926fe3", "9594cc518db7890c3f20073525e9a335b2049e02", "28f53ec7732299fa946ed3fc27bf691a6ab5c60c", "040eb316cec08b36ae0b57fede86043ee0526686", "fd80b1b1a3509e616af97a181d82c6cdf7ba4721", "2aa362740ac9a2b304a74122da820e3829689842", "acfb90f474209f56455c4d1ae60d524d8c4c9df8", "12feba45b219f129b5f12c16b5ffb5c1687b66e0", "5da9a5367bd70c004ad9b7e8cee95059490e33fc", "a33f155fa0e886eb4746fbe6aa8ad8f334929648", "2f21c68ff9fbd82a3241f79f985ec7e1dcdac41a", "de99971e61613f174c9e5aa41a2c600399f59953", "da1e0b9e445493d3e6dc0e3c23be194228c5d796", "3a888bf996bcac3feb7e130543df9ec8287db515", "b9e6f9e22134d4b5ac66fbf2ec7b7b702c6f4eb7", "987c9a137d638f3d561c52b6dd0f987734ad5460", "6a7462ac7e82dae50251ff97ffea3777f2ea81d8", "5f94e354faeba1d330088b926d1f7886067bc93f", "dc093a8975f97c5c1f18550cf7298982b0c5cdfd", "3ab5d67310aa3592b68e9ef55df4603507d0d486", "b81a5b676e5e8eee7dc99d5319ecb963f22d05c5", "b701f11ecf5d465c7d5c427914db2ad8c97bb8a9", "a6161e53d77d7cbd6e69d1b84e6d03d7041cb93e", "e8b616e244c6f51d925751a6fe2f11e488f986b7", "f4331705d8e7180455fb865436bfb34c08ed6d50", "965f7202890cc64594ebeaaeb7dcd5420709c55f", "c02dbf756b9e9e2bed37cb7d295529397cad616a", "027beed800f7d5e20194caf6d689345045e8d0d4", "cb422f464e849272d92b8f2fc3c5605a71c98e54", "874082164d9ab9fced08b9890c009b91a2e846f1", "e45bcda905b897513f4cff9e5c0a5bf475674a02", "7b79a754a8583f887857c539895a9dda6331ca2e", "56cf859363f1b5231418b40b957a9132a78ea546", "2504b7bddd1892bc905fc5df6b5afc0b109ef40e", "28446fa9d9ac0468cc715594a6dcc0ac5d9288a5", "239d2cc391e51bd6c65944bb3f3b48f1f3681877", "da013b84a93cc89d78f2d9a346fc275e3c159565", "06d4f3b135ffb69c2e3ba10544ed1e7d684761bb", "8a1294d2093b7f339e3d33da46e008aca3528893", "d979dbc55f73304a5d839079c070062e0b3ddbc5", "fb6bcf5763fe7ec7366f5991400090fa503613a7", "30caeca74168cd841759cef951c947f44ef0f547", "0baed6e8b8d5456980f2c9f64b6f566872c778be", "d80a794320836120bab46b467118c65fd6693f57", "2a2232f2972191a0606d588aa4f13c9f27d1972d", "0f12c93d685ec82d23f2c43d555e7687f80e5b7c", "3ef85a078a688615c61f04d8605ae27e17f554dd", "a5006c29b0609296b5c1368ff1113eeb12b119ad", "7b45aa509184b05064eafb362f80ba5778566a4e", "7ff83f10e49e81ce6f66270e8f3f42dd2c6eb3ed", "d444e010049944c1b3438c9a25ae09b292b17371", "56b9c6efe0322f0087d2f82b52129cc6b41ab356", "7a39a3ca168dfebb2e2d55b3fca0f750b32896da", "3aee6a6285869e6db48ad269eb110b542ad23c93", "0b5e4ac8a04c0ffd0f9045901525201db03c789d", "0e7fdc0b03a1481b2fa1b5d592125f41b6cb7ad7", "e60a7fc13b41c76f45107e03ceb850e413c7b4b0", "531b211d4cbe766e0b86c4bb6f24e924494360c5", "b05faf0ae510cbd7510a6242aafdda7de3088282", "1574abc94d22b03f8c9630f0eb7ad1f8ed67880e", "7d520f474f2fc59422d910b980f8485716ce0a3e", "97104def2b92b430c02f595d7802f9ba23b74cc7", "dc3d7128e15ed8d97f9b29021216fc1d4053fbaa", "c348118690d2e6544ec1e68f904dbf9e5b6397bd", "6957baa0db5576997aef9de43b93fe8fd4d07632", "1ec59aece51a698bce34f393cf6474b926fd89ad", "bed8a74e75ed96868ce81ed1080433ef5be66a52", "a3ed080262f130051d2a02e846f5d227a440b294", "e2b615e3b78aa18c293e7f03eb96591ccb721b55", "b55853483873d3947e8c962f1152128059369d93", "28eceb438da0b841bbd3d02684dbfa263838ed60", "f7580def2dd84a6a083188aadd9c66c99925860b", "4b3f47e3b8f62b8b5460c85c623e1d7e0b189706", "f6ff469fc4b3043530f64e8642ed822d119b42c9", "d6d7a248b1f59981277121b9c0626ee8d5495757", "295d978cf47c873936ad774169cac651ea5f3c96", "a36f8947c7e33f73157d3ffb0660776403fc197c", "b72c72e2c3d140c3064eae3aff17e0c0c177c963", "1172ce24f6e9242b9c26c84c6aa89a72ed8203d0", "6e647a430d603f4d82e44b4a87de580a0fcaec88", "13631379de6487fd0571e5919f4efb65d16c1633", "84968d6488e87c99b8560ab33110a5bf85aa5761", "e669c2fe2051648aeafa806bc10b380d5b99dbe3", "8ab450081c04e5587002e9c965d144958cd404be", "2a06341b40b3fd27483b2a8d8cbf86fddf45e423", "7c38572093b0d0ef72d828f59f95b3a6a067fe27", "79ef25ed4863311000975b955651c0515fe38f45", "8c04688425fa3e03c24d08b09faad49e33f2cc30", "a1653e88be986aee2f37792c3fb05f0ee7fbef94", "d4ea53af07a40dbb624ceabfb35de30638e3ac13", "e939fb6b762de242b22e295940e0d9d7d259e442", "1d455f918062f66e86ed53cf258284abd6abd8fc", "1d21ec277d2a72e506a9665f19ec5478dbedc20f", "c4d4498189f147ea1559b5cf097fdbccebc4dceb", "4cfdd0c8313ac4f92845dcd658115beb115b97ce", "72ef0ac03d3043bf664ca7c21abafc4191b24557", "638afad8ab360d3afdfeec4acad02b4406714e38", "fe6409e8e09d47758d4e71981ad951423bdce212", "ad7a7f70e460d4067d7170bcc0f1ea62eedd7234", "39675124e4fe1be08f42bdd2e1e237e5a87839ba", "46149723fef89d3b04019b4f62e4c0ceff7c76a0", "a3bf7248e38ed6f9456f0f309b36470c5c0dabd0", "0949548b95e225dcb0ab88ba21f385ac6b5d81ae", "44ae568a8cafbd4f4d495bf612bd6bb5c5116425", "548bc4203770450c21133bfb72c58f5fae0fbdf2", "f91e069b2e67801f54ec05a1265fc707a65feb37", "2cf6d4a3481b7ec40b704472017493ec17565e6f", "af8181d0394118638c1e140051d9c590617b7724", "7caca02d3c61271d22c43580677acb6d52b23503", "51a09f0305a0cdd48ccaac070363c71bc2c49938", "5f642f76d3503547d517790fdb5ef8d34f0e23ac", "be993d793e393127e3fb34d27fda255894edaedc", "cabc9a1fef57fb2cad91bdb0a84e18934ee5bdbe", "5d0e11844f1a210f16025e990de938f6732672ab", "d7c659ce0442bf1047e7d2e942837b18105f6f47", "368132f8dfcbd6e857dfc1b7dce2ab91bd9648ad", "7ab90189c9c66298c900fde3de4c8d77fd035d80", "9be696618cfcea90879747a8512f21b10cceac48", "0f9bd0d528603654de2687d3ae2472a522607ee3", "d8d13c540c326d45824073c32499590d7cfb291e", "d291befa3249b49d57e6bb5abc643ce6d728f7dd", "48c5eef4ef5ea4035bc0429e773c38ced7151157", "15623fe8875a36cac5283ff2f08cd50998599725", "ee82b4ecfc16070177fb78b8befc95121ff35419", "c20148703a706d5883d323c3386978714fe1508d", "9d9166e1d9e80bbe772423384af53a3d5da898ae", "b242124e3bf1ebfc57b5279d4d75ade924a5d1e3", "fb2379346def4846ac24bc41349e7cac7c1e7243", "e01058388d139e027482a7d89a2997606f7ef4fd", "18a4399b8afb460cbd4de2225f39ed23a95336d6", "7158179c1cc56edb32a2da3a139a168592bbd260", "32c801cb7fbeb742edfd94cccfca4934baec71da", "5882e62866fe1fcf7f8458e0bd0bcb39057afce3", "6965de4410921cff014a48b071f2c4c52c1da0fd", "ae47e73c25755427c9f5904425a35d7db737829b", "21c0bb96f989d2e5a61b48f1b88627763647b79d", "a3dab83995f27ec8d09cbc06fe815ade88232d10", "663981cfc5fc843ec2682b77ae427ac351bc2180", "834e738ade61dbf19bf8a7059921da345e9d5529", "5a716a15b94a84ef3a76edce1e9dadc0f633e498", "f8e55d0bb896d93385b590c13c5702b63b41d489", "caf0dff8d4008ad634821e7a3061d59cd1fd7fde", "4bdf749e43a8e0294ba345d2d3e524e21a144ef2", "133d9795a89a681c9f6db6a0244e8975992d968a", "4543845adcb6135f263cbf6c2813d10464199d52", "0c3d1f306fed393b92fe62f03496c0405ed1ab4d", "d9c5b08d51d1a08aa96e3577c5895fc224470d40", "f8537b3b02327e087a9bcd631711d7d02865f621", "aef3ecc926ed79478f9d1f38c0fec2a29bae9c3b", "e46732f0c818b059420f68162363c9d1a9dc5395", "dd056ecfdcc8ce89a550e56887f1df1a66f5cbff", "c3ea346826467f04779e55679679c7c7e549c8a2", "0e8a7994f9b8c7c7686a4b4659920970aafa1b77", "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "c4ee54f69de0b5b87e00c270c2ab4229da0720eb", "c57579b067c1a5f5b764344d3c7df227eeba9155", "bb65479d3f38b73826596e5da9dbf5ee4199d42c", "9a2ed8abaa17834cb8f227a9353c8cfed3a367cd", "0d08ab78ce86e053ff3003aef951a5174d56beb8", "03799d5eb1de8141db27856758a2f72436fcc926", "36db84d941f1a3ece8aacbfcdec79234f03aa1d2", "a22a56b95b2544e6d6d19f8907e89b2c6f07d067", "e9bfde4d9898a886c340e5ce4337c2969207990e", "4bfaf3d14625cf020e65fbbc5be262b83342facc", "a1ffab629c19b9c04fb047dae0471d3de73f3738", "9185db7ba8375311879d506562a7c8e89f34ff82", "0012d023347d01b791e7753f4f58567a5d480f7f", "d2df6e68a686ecc95a1e44505379db005f83ab18", "c05f9e4979cb33090db984226ff3cff6e2dc1950", "b76d6bb7aef87d03b6de039f01d3dba9224834b6", "6c182ec9bd85cf61b01c90955c81d71926b0198a", "439b6a5b91f1c5a751846bed7dd27c698a7ee2c4", "1e99c95ea015a0639448fdf60f9694fed5464500", "d57279d90f7f5278ac56974485810865cdb1446d", "4cf9a2af6ab02ba026832c057cf9fde7489d42ba", "7d1ac241fb603a4237cb681dbcf163a9f89e906a", "2e6c3557cb90f472e6798fcaa8ecc9dff3557f11", "9713e87734046cb7a2d040d71fa35cf93472ea80", "542a2ddc53d80d58a8791ab1a72dad660035e114", "b75df22c7c52b8d85dd7f155f7b495907ff3561f", "f4dc1ca2051dc191751eb92a753f028228134e62", "1e0fc2e5537db53080ec9a875df614dd8018c873", "dd173dc349312810ec45ed4b346190ff2250ddd8", "b9305c065b3c95fd0844d16a09fb9cc7c321cf58", "16d3954f0418bd9a2ac20a2be6db93d49213c680", "4e6305b9c9ec58db62548c666357521fb4b3f6f9", "18ddef5e311b62adf5d037a240324f569bbfd248", "99783c792947f17e41c94ddaac31766277809049", "aa892a303aac532ccec34ba0e741092df97fdb9b", "90aa891d181d918aafdf1a29d8620bc7859294b5", "5f0f8c9acc3e8eb50ca6e7d9c33cf3d9a8a54985", "0d3018c0630fe3f44f96c7cb4c6cabc1517b100a", "d18ee185ab659f218c97db53e22a2b98f3c642a3", "8aea75940c90fac8c1e5d7ece7d04a61555c3bf6", "a9bd2133928a9d3fe708c9ee6e8d64ae1fc9a43a", "49a690f5dacf8b6cae7f2a1c4ef2052042d6c729", "3bf66814817f582510e0f0a717112b78aca075a0", "14f0bce6645f39a44f5b0e695b5f28ea55fd9625", "ae5b2b449f59ae0f46f6a31ed4826d98241c394c", "806d7b97c3535a3c62ce243fe7008149062d14c1", "9fb7a23910f6464902f1b653025f3aeaa20b90dd", "427d6d9bc05b07c85fc6b2e52f12132f79a28f6c", "bba153ebdf11e6fb8716e35749c671ac96c14176", "1a219e7bcd8f30f886a1f24a8c05bc26bef83ff9", "15801fafe9fe893c50e4221a486e35f3d599fe26", "0e8c603fbf8ca02435e1a44ed41db077acd5c8a8", "4c02d6874a761182f3776a5a04142e713cd939fa", "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "bfdc7cde3bbfcba738a5eefe9143417ebf7d8f5c", "80471bb250eca1be53a455489e187c0152ac78b9", "c936b9a958a67cdd5665b923569d9d786c934029", "e27680dea38b7a0272042a82577eb58eb552785c", "2faccc93b7bf66ec8764ed3293849bed1bdbf115", "a65b93c01518755291e19a0545c1a3d20e401c0a", "fa634d2194974f52bff30e2ffab81c3f97c49b57", "46f5bb35ea99c62320199b1f0924a4e7c0b001d3", "1f2497491ee465f299eaa8a769640cf4f084ee09", "73f4be4b6e56f5bde875a8987f90ba799dde35b2", "7174e77f8e26aef3105996512b787b336320d46f", "d9877f4744993c6680d61bdeaef2dc4f5aae971f", "83c695de8b42e592b3f23948f90b699b82c0b068", "7e225e3e61527f35b7bf44d47e12cbadfc9441f8", "05fb0bb667e01eb118c5d91ef47c2e69f9be2f9f", "fa99cb089559566a4cc04fd5c1423fc1ee3791b4", "f9e21549678feea9129112feba7ab5b702abbded", "2060d46b178719080d51bccb150a3f8f227807ce", "ba1072f692840c6e3c6c506f164b741c22a27a68", "7cab6a3247f56e0e2fc38133ea0fb89c48dadda7", "5987fe1cac83ab8b2413dc53c7fea9df15a096e8", "ee89b903af1d8f26a8894a3773915c74f038883e", "9c889616034adce2af05d74eac44cf43a8106468", "fd645edffb18e76d89b9efe02bc0b76163de2261", "ec24412cdad5a912f1174ca9775442b5036e2298", "0334a8862634988cc684dacd4279c5c0d03704da", "11a47a91471f40af5cf00449954474fd6e9f7694", "58d0c140597aa658345230615fb34e2c750d164c", "0750a816858b601c0dbf4cfb68066ae7e788f05d", "b59cee1f647737ec3296ccb3daa25c890359c307", "a55dea7981ea0f90d1110005b5f5ca68a3175910", "8395cf3535a6628c3bdc9b8d0171568d551f5ff0", "eb8519cec0d7a781923f68fdca0891713cb81163", "a52a69bf304d49fba6eac6a73c5169834c77042d", "c607572fd2594ca83f732c9790fd590da9e69eb1", "cbbd13c29d042743f0139f1e044b6bca731886d0", "6d07e176c754ac42773690d4b4919a39df85d7ec", "818ecc8c8d4dc398b01a852df90cb8d972530fa5", "323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5", "672fae3da801b2a0d2bad65afdbbbf1b2320623e", "3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827", "5fff61302adc65d554d5db3722b8a604e62a8377", "dde5125baefa1141f1ed50479a3fd67c528a965f", "d31328b12eef33e7722b8e5505d0f9d9abe2ffd9", "228ea13041910c41b50d0052bdce924037c3bc6a", "d2eb1079552fb736e3ba5e494543e67620832c52", "6bb95a0f3668cd36407c85899b71c9fe44bf9573", "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "c98983592777952d1751103b4d397d3ace00852d", "c924137ca87e8b4e1557465405744f8b639b16fc", "3ac3a714042d3ebc159546c26321a1f8f4f5f80c", "380d5138cadccc9b5b91c707ba0a9220b0f39271", "b498640d8f0ac5a628563ff84dbef8d35d12a7ec", "91e17338a12b5e570907e816bff296b13177971e", "8efda5708bbcf658d4f567e3866e3549fe045bbb", "93af36da08bf99e68c9b0d36e141ed8154455ac2", "2359c3f763e96e0ee62b1119c897a32ce9715a77", "ea079334121a0ba89452036e5d7f8e18f6851519", "fb82681ac5d3487bd8e52dbb3d1fa220eeac855e", "097f1f58f1cb8dc9e0622e001d7f6cbb624d542c", "abddbb57258d85b1f3d9789128fd284d30a91e23", "8b879863237d315997857a5585afb2bbbf78c622", "4b57456642e1d21f2bda05aea586b7f419d309ce", "a6151d07560267818389909c9f1dd268a87f42ca", "0d8e7cda7d8a2ff737c0ad72f31dfd4d80d3a09a", "550c369cc3080c03b89d738d82f1ed50145c5aa7", "dca246cd06666a331b0203cb09a6ef51727bfdcc", "4d6e7d73f5226142ffc42b4e8380882d5071e187", "9b9b6d34deebb534de66017381be7578e13b761d", "9c341221e19fac7a5e38b9fe5c62361f780a7f08", "e3e44385a71a52fd483c58eb3cdf8d03960c0b70", "097d3892f5f2ba7be43a81908279f42a618839ec", "4bce0b90c3c7c1f4b5118b6432841cc5d99c2c72", "83ba6317d13482593264a4738dd9da6283d11832", "1a6a5a33aa27e44b80b51d0bbc26734a7fc2cccf", "4f00a48a60cbf750b4ccbd698d5547d83b3eaf3f", "ac1bbc44a0b342bb52d8aa92170b6473a148d130", "0b4c8e23800ca4c2fc58c4c7771c229faf0631be", "69522bd70f1c64e9073753ccf335382be5aa1cd9", "59319c128c8ac3c88b4ab81088efe8ae9c458e07", "988d5ad8d114f5f21a73b2ae464dca4277f5725f", "8886b21f97c114a23b24dc7025bbf42885adc3a7", "bfc04ce7752fac884cf5a78b30ededfd5a0ad109", "10ab1b48b2a55ec9e2920a5397febd84906a7769", "ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb", "808b685d09912cbef4a009e74e10476304b4cccf", "0da75b0d341c8f945fae1da6c77b6ec345f47f2a", "0aaf785d7f21d2b5ad582b456896495d30b0a4e2", "be393cd567b338da6ed60181c8ad429627578a31", "eb8a3948c4be0d23eb7326d27f2271be893b3409", "1451e7b11e66c86104f9391b80d9fb422fb11c01", "725c3605c2d26d113637097358cd4c08c19ff9e1", "ff1f45bdad41d8b35435098041e009627e60d208", "d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c", "269248eb8a44da5248cef840f7079b1294dbf237", "c8b9217ee36aebb9735e525b718490dc27c8c1cb", "7bfc7e45967897223b5d9278d0ef29cb2b5789be", "537328af75f50d49696972a6c34bca97c14bc762", "b68150bfdec373ed8e025f448b7a3485c16e3201", "3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0", "8bdf6f03bde08c424c214188b35be8b2dec7cdea", "041b51a81a977b5c64682c55414ad8d165c1f2ce", "a75ee7f4c4130ef36d21582d5758f953dba03a01", "d9ef1a80738bbdd35655c320761f95ee609b8f49", "3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d", "8e55486aa456cae7f04fe922689b3e99a0e409fe", "6b8329730b2e13178a577b878631735a1cd58a71", "0f0366070b46972fcb2976775b45681e62a94a26", "189b1859f77ddc08027e1e0f92275341e5c0fdc6", "a967426ec9b761a989997d6a213d890fc34c5fe3", "18a849b1f336e3c3b7c0ee311c9ccde582d7214f", "aa892fe17c06e2b18db2b12314499a741e755df7", "71bbda43b97e8dc8b67b2bde3c873fa6aacd439f", "ad6c7cc5c0f4ab273fef105ff3761d2c08609a20", "01e12be4097fa8c94cabeef0ad61498c8e7762f2", "83c1fee5ef4b7ba9d9730f3b550dd7bfbdaf591d", "1b6394178dbc31d0867f0b44686d224a19d61cf4", "dc5d9399b3796db7fd850990402dce221b98c8be", "be4f7679797777f2bc1fd6aad8af67cce5e5ce87", "b75eecc879da38138bf3ace9195ae1613fb6e3cc", "4adca62f888226d3a16654ca499bf2a7d3d11b71", "3eec9e8d5051e84624ea7e009a8947403dee99d1", "24b5ea4e262e22768813e7b6581f60e4ab9a8de7", "8518b501425f2975ea6dcbf1e693d41e73d0b0af", "54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3", "c88c21eb9a8e08b66c981db35f6556f4974d27a8", "5fea26746f3140b12317fcf3bc1680f2746e172e", "f604c312ff4706f1849078b2ca28409f0fcd859d", "fc23a386c2189f221b25dbd0bb34fcd26ccf60fa", "913961d716a4102d3428224f999295f12438399f", "5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0", "3291aff20c171927eed7896eba659ce599ccb666", "36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958", "9b93406f3678cf0f16451140ea18be04784faeee", "7acbf0b060e948589b38d5501ca217463cfd5c2f", "dc1105c7171a0922bfde7612aa069231720ee694", "59b11427853b7892a9f0d8ab6683d96ce59c2ff2", "539f55c0e2501c1d86791c8b54b225d9b3187b9c", "0fcf04fda0bea5265b73c85d2cc2f7f70416537b", "ca447d6479554b27b4afbd0fd599b2ed39f2c335", "1251deae1b4a722a2155d932bdfb6fe4ae28dd22", "0ad4a814b30e096ad0e027e458981f812c835aa0", "50614ff325f0c8ca20f99efc55d65a8d4cc768cd", "c44c84540db1c38ace232ef34b03bda1c81ba039", "df0e280cae018cebd5b16ad701ad101265c369fa", "23ee7b7a9ca5948e81555aaf3a044cfec778f148", "4d625677469be99e0a765a750f88cfb85c522cce", "0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58", "54756f824befa3f0c2af404db0122f5b5bbf16e0", "0abfb5b89e9546f8a5c569ab35b39b888e7cea46", "cbd004d4c5e3b64321dc1a8f05fa5d64500389c2", "133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d", "ff25c6602305ac46e9c35ffa4e30b14d679a5413", "b0b944b3a783c2d9f12637b471fe1efb44deb52b", "7859667ed6c05a467dfc8a322ecd0f5e2337db56", "b5bd67ada6de799d96f65ef0f1b6ba1cb85e3dd8", "26b606ac6beb2977a7853b032416c23c7b36cb8a", "82417d8ec8ac6406f2d55774a35af2a1b3f4b66e", "2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359", "853bd61bc48a431b9b1c7cab10c603830c488e39", "4c71b0cdb6b80889b976e8eb4457942bd4dd7b66", "159e792096756b1ec02ec7a980d5ef26b434ff78", "42f6f5454dda99d8989f9814989efd50fe807ee8", "cac8bb0e393474b9fb3b810c61efdbc2e2c25c29", "d61578468d267c2d50672077918c1cda9b91429b", "4c141534210df53e58352f30bab558a077fec3c6", "8a1e95b82d8cf27e0034e127091396efd4c8bd9e", "061c84a4143e859a7caf6e6d283dfb30c23ee56e", "1da83903c8d476c64c14d6851c85060411830129", "7d41b67a641426cb8c0f659f0ba74cdb60e7159a", "4c6886c489e93ccab5a1124555a6f3e5b0104464", "0faee699eccb2da6cf4307ded67ba8434368257b", "4863333b9e5f25423e273a0581de3edee8bb3b97", "31ace8c9d0e4550a233b904a0e2aabefcc90b0e3", "1b69b860e22278a6f482507b8ce879082dd00c44", "09b80d8eea809529b08a8b0ff3417950c048d474", "e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf", "e3a6e5a573619a97bd6662b652ea7d088ec0b352", "0182d090478be67241392df90212d6cd0fb659e6", "be437b53a376085b01ebd0f4c7c6c9e40a4b1a75", "f328137ba1924c8b451be32c7bd8d1d9a5c392d6", "bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5", "4b74f2d56cd0dda6f459319fec29559291c61bff", "778c1e95b6ea4ccf89067b83364036ab08797256", "e2b3aae594035e58f72125e313e92c7c4cc9d5bb", "ca3e88d87e1344d076c964ea89d91a75c417f5ee", "46c82cfadd9f885f5480b2d7155f0985daf949fc", "7574f999d2325803f88c4915ba8f304cccc232d1", "f86ddd6561f522d115614c93520faad122eb3b56", "c352b5ccd6fa1812b108d74d268ce3f19efccf0b", "2c0acaec54ab2585ff807e18b6b9550c44651eab", "88691c3b74753a8bd67459896c8660deece9a2b0", "2ebc35d196cd975e1ccbc8e98694f20d7f52faf3", "094357c1a2ba3fda22aa6dd9e496530d784e1721", "bfdafe932f93b01632a5ba590627f0d41034705d", "f26097a1a479fb6f32b27a93f8f32609cfe30fdc", "cbc562b3d3441b82ac756836027e0a085d142336", "a695c2240382e362262db72017ceae0365d63f8f", "424e52158b43e40f356af7eafb35c91a9e13db30", "a1dd806b8f4f418d01960e22fb950fe7a56c18f1", "390e212d4a874d8d2256e55fe0dee9193e4c376a", "ff8db3810f927506f3aa594d66d5e8658f3cf4d5", "d05513c754966801f26e446db174b7f2595805ba", "f834c50e249c9796eb7f03da7459b71205dc0737", "2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3", "c5c56e9c884ac4070880ac481909bb6b621d2a3f", "ab133af7ec2726f712dd049213e6a27449d28c78", "c63b614865bd9e5b4944894083e5e9d4aba82d86", "aade6c3dbea3b0a918f87c85a36cb6b06eff4f5b", "ee1f9637f372d2eccc447461ef834a9859011ec1", "1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b", "0273414ba7d56ab9ff894959b9d46e4b2fef7fd0", "776362314f1479f5319aaf989624ac604ba42c65", "68c5238994e3f654adea0ccd8bca29f2a24087fc", "4fc3c9aa51cd7922820bfd5547cf544ff99b415b", "aefc7c708269b874182a5c877fb6dae06da210d4", "14318d2b5f2cf731134a6964d8193ad761d86942", "930b3472592ced6665cd630be7ae57d4abb8b4b1", "3ada7640b1c525056e6fcd37eea26cd638815cd6", "fa32b29e627086d4302db4d30c07a9d11dcd6b84", "5d01283474b73a46d80745ad0cc0c4da14aae194", "290136947fd44879d914085ee51d8a4f433765fa", "a325d5ea42a0b6aeb0390318e9f65f584bd67edd", "641f0989b87bf7db67a64900dcc9568767b7b50f", "59bfeac0635d3f1f4891106ae0262b81841b06e4", "2f59f28a1ca3130d413e8e8b59fb30d50ac020e2", "d2598c088b0664c084413796f39697c6f821d56e", "3bd56f4cf8a36dd2d754704bcb71415dcbc0a165", "14fb3283d4e37760b7dc044a1e2906e3cbf4d23a", "004d5491f673cd76150f43b0a0429214f5bfd823", "a8638a07465fe388ae5da0e8a68e62a4ee322d68", "a481e394f58f2d6e998aa320dad35c0d0e15d43c", "177d1e7bbea4318d379f46d8d17720ecef3086ac", "9aab33ce8d6786b3b77900a9b25f5f4577cea461", "c614450c9b1d89d5fda23a54dbf6a27a4b821ac0", "1f8656e2254e353a91cceb08b33c25643a1b1fb7", "3d0b2da6169d38b56c58fe5f13342cf965992ece", "9e60614fd57afe381ae42c6ee0b18f32f60bb493", "81a4397d5108f6582813febc9ddbeff905474120", "e5799fd239531644ad9270f49a3961d7540ce358", "4d0b3921345ae373a4e04f068867181647d57d7d", "56e25358ebfaf8a8b3c7c33ed007e24f026065d0", "7ab930146f4b5946ec59459f8473c700bcc89233", "80277fb3a8a981933533cf478245f262652a33b5", "405526dfc79de98f5bf3c97bf4aa9a287700f15d", "2020e8c0be8fa00d773fd99b6da55029a6a83e3d", "0ce3a786aed896d128f5efdf78733cc675970854", "e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a", "d666ce9d783a2d31550a8aa47da45128a67304a7", "378ae5ca649f023003021f5a63e393da3a4e47f0", "b13e2e43672e66ba45d1b852a34737e4ce04226b", "d79365336115661b0e8dbbcd4b2aa1f504b91af6", "21258aa3c48437a2831191b71cd069c05fb84cf7", "97b8249914e6b4f8757d22da51e8347995a40637", "071ec4f3fb4bfe6ae9980477d208a7b12691710e", "c253694c153cc016d745df089bae0220e7f297ee", "4b60e45b6803e2e155f25a2270a28be9f8bec130", "b6620027b441131a18f383d544779521b119c1aa", "458677de7910a5455283a2be99f776a834449f61", "90221884fe2643b80203991686af78a9da0f9791", "75a9d9ea6c1a5ee55fc0ccb347b263785b15ac0a", "401e6b9ada571603b67377b336786801f5b54eee", "06bad0cdda63e3fd054e7b334a5d8a46d8542817", "38a9ca2c49a77b540be52377784b9f734e0417e4", "64cf1cda80a23ed6fc1c8e66065614ef7bdeadf3", "053b263b4a4ccc6f9097ad28ebf39c2957254dfb", "3039627fa612c184228b0bed0a8c03c7f754748c", "53ce84598052308b86ba79d873082853022aa7e9", "3bd10f7603c4f5a4737c5613722124787d0dd818", "f63b3b8388bc4dcd4a0330402af37a59ce37e4f3", "533bfb82c54f261e6a2b7ed7d31a2fd679c56d18", "248db911e3a6a63ecd5ff6b7397a5d48ac15e77a", "3d2d439ead6e32877ce40e5568e62dee4a877836", "e86008f6aebd0ab26bdb69d2549b2e8454b8959c", "96b6f8ac898c8ef6b947c50bb66fe6b1e6f2fb11", "a14db48785d41cd57d4eac75949a6b79fc684e70", "d3409f66d35f5828affda26fc3416771eb8154b1", "2f065b51547c671ad9267d08f9f428950849ab9f", "15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a", "1177977134f6663fff0137f11b81be9c64c1f424", "d01303062b21cd9ff46d5e3ff78897b8499480de", "1910f5f7ac81d4fcc30284e88dee3537887acdf3", "89de30a75d3258816c2d4d5a733d2bef894b66b9", "dc107e7322f7059430b4ef4991507cb18bcc5d95", "5666ed763698295e41564efda627767ee55cc943", "f0f0e94d333b4923ae42ee195df17c0df62ea0b1", "f7b4bc4ef14349a6e66829a0101d5b21129dcf55", "4a4b5ae5793696b861aa009932e7a121d36ad67a", "9b246c88a0435fd9f6d10dc88f47a1944dd8f89e", "f1061b2b5b7ca32edd5aa486aecc63a0972c84f3", "4622b82a8aff4ac1e87b01d2708a333380b5913b", "ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9", "22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7", "7923742e2af655dee4f9a99e39916d164bc30178", "955e2a39f51c0b6f967199942d77625009e580f9", "cacce7f4ce74e3269f5555aa6fd83e48baaf9c96", "57c270a9f468f7129643852945cf3562cbb76e07", "af97a51f56cd6b793cf96692931a8d1ddbe4e3cc", "1677d29a108a1c0f27a6a630e74856e7bddcb70d", "2f88d3189723669f957d83ad542ac5c2341c37a5", "ab8f9a6bd8f582501c6b41c0e7179546e21c5e91", "0a15b8c7d529c7facc2d3b4c2111801dd4adfc28", "a9d861e270b8b1e6deea1936b258f49f1823005b", "23c3eb6ad8e5f18f672f187a6e9e9b0d94042970", "385750bcf95036c808d63db0e0b14768463ff4c6", "11dc744736a30a189f88fa81be589be0b865c9fa", "8d0bc14589dea1f4f88914ffcb57a5c54830f2cc", "bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197", "633101e794d7b80f55f466fd2941ea24595e10e6", "36b13627ee8a5a8cd04645213aabfa917bbd32f5", "52a9f957f776c8b3d913cfcd20452b9e31c27845", "922838dd98d599d1d229cc73896d55e7a769aa7c", "8a2bedaa38abf173823944f0de2c84f5b2549609", "f39783847499dd56ba39c1f3b567f64dfdfa8527", "20d6a4aaf5abf2925fdce2780e38ab1771209f76", "e88988f4696e7e2925ed96467fde4314bfa95eff", "a0d6390dd28d802152f207940c7716fe5fae8760", "976e0264bb57786952a987d4456850e274714fb8", "f558af209dd4c48e4b2f551b01065a6435c3ef33", "65b737e5cc4a565011a895c460ed8fd07b333600", "9590b09c34fffda08c8f54faffa379e478f84b04", "588bed36b3cc9e2f26c39b5d99d6687f36ae1177", "94a11b601af77f0ad46338afd0fa4ccbab909e82", "992e4119d885f866cb715f4fbf0250449ce0db05", "2a92bda6dbd5cce5894f7d370d798c07fa8783f4", "d9318c7259e394b3060b424eb6feca0f71219179", "26bbe76d1ae9e05da75b0507510b92e7e6308c73", "0163d847307fae508d8f40ad193ee542c1e051b4", "26fcefb80af66391e07e6239933de943c1cddc6e", "73ba33e933e834b815f62a50aa1a0e15c6547e83", "4e343c66c5fe7426132869d552f0f205d1bc5307", "cc9057d2762e077c53e381f90884595677eceafa", "20e505cef6d40f896e9508e623bfc01aa1ec3120", "17738b0972571e7b4ae471d1b2dccea5ce057511", "6577c76395896dd4d352f7b1ee8b705b1a45fa90", "0b3786a3a0ea7ec08f01636124c183dbee8f625f", "6256b47342f080c62acd106095cf164df2be6020", "2dd2c7602d7f4a0b78494ac23ee1e28ff489be88", "17fd37ef5678ce967e47cb670ebf81cf26b70a5e", "db227f72bb13a5acca549fab0dc76bce1fb3b948", "3ce96f03874d42345c0727edc78b6949b20b4a11", "784a83437b3dba49c0d7ccc10ac40497b84661a5", "080936799b4ada10785f0e227a2d10b054473127", "25c3cdbde7054fbc647d8be0d746373e7b64d150", "824d1db06e1c25f7681e46199fd02cb5fc343784", "391b86cf16c2702dcc4beee55a6dd6d3bd7cf27b", "c41de506423e301ef2a10ea6f984e9e19ba091b4", "c0270a57ad78da6c3982a4034ffa195b9e932fda", "e1f790bbedcba3134277f545e56946bc6ffce48d", "14ce7635ff18318e7094417d0f92acbec6669f1c", "318a81acdd15a0ab2f706b5f53ee9d4d5d86237f", "24de12df6953151ef5cd0379e205eb0f57ff9d1f", "79dc84a3bf76f1cb983902e2591d913cee5bdb0e", "44b30a1048465cd56904cdcbec8e79dffab693bd", "7f5346a169c9784ca79aca5d95ae8bf2ebab58e3", "743e582c3e70c6ec07094887ce8dae7248b970ad", "5594beb2b314f5433bd7581f64bdbc58f2933dc4", "16395b40e19cbc6d5b82543039ffff2a06363845", "7cee0311e71dca540aaf3d87bef3a6c97ca39bc3", "d066575b48b552a38e63095bb1f7b56cbb1fbea4", "7196b3832065aec49859c61318037b0c8c12363a", "a6583c8daa7927eedb3e892a60fc88bdfe89a486", "35b3dc0e961a15a7a60b95490a989f91680acc7c", "0eed55ea9f401f25e1474cdbaf09367f44b4f490", "4d442ea40635a10fd3e642a7161dfc8f2b15a71e", "706b9767a444de4fe153b2f3bff29df7674c3161", "aa577652ce4dad3ca3dde44f881972ae6e1acce7", "f95ba7673789d1b4118d30e360a5a37fd75d3961", "fffe5ab3351deab81f7562d06764551422dbd9c4", "5fe3a9d54d5070308803dd8ef611594f59805400", "6d8e3f3a83514381f890ab7cd2a1f1c5be597b69", "c3beae515f38daf4bd8053a7d72f6d2ed3b05d88", "f4ebbeb77249d1136c355f5bae30f02961b9a359", "37007af698b990a3ea8592b11d264b14d39c843f", "074af31bd9caa61fea3c4216731420bd7c08b96a", "37179032085e710d1d62a1ba2e9c1f63bb4dde91", "1fcb905e4505a781fb0b375eb470f5661e38ae39", "5305bfdff39ae74d2958ba28d42c16495ce2ff86", "f7de943aa75406fe5568fdbb08133ce0f9a765d4", "29f27448e8dd843e1c4d2a78e01caeaea3f46a2d", "65f0b05052c3145a58c2653821e5429ca62555ce", "486a82f50835ea888fbc5c6babf3cf8e8b9807bc", "100641ed8a5472536dde53c1f50fa2dd2d4e9be9", "c1dfabe36a4db26bf378417985a6aacb0f769735", "27218ff58c3f0e7d7779fba3bb465d746749ed7c", "55cc90968e5e6ed413dd607af2a850ac2f54e378", "fd7b6c77b46420c27725757553fcd1fb24ea29a8", "3abc833f4d689f37cc8a28f47fb42e32deaa4b17", "556545eec370b9d300fc044a1aa63fc44fd79b0f", "96fc15d01a202446179546a5bea8106a414232a7", "47a2727bd60e43f3253247b6d6f63faf2b67c54b", "4b7f21b48c7e0dc7334e36108f558d54642c17c0", "7a6d9f89e0925a220fe3dfba4f0d2745f8be6c9a", "2d38fd1df95f5025e2cee5bc439ba92b369a93df", "0e73d2b0f943cf8559da7f5002414ccc26bc77cd", "fdfaf46910012c7cdf72bba12e802a318b5bef5a", "3896c62af5b65d7ba9e52f87505841341bb3e8df", "0058cbe110933f73c21fa6cc9ae0cd23e974a9c7", "8686b15802529ff8aea50995ef14079681788110", "5763b09ebca9a756b4adebf74d6d7de27e80e298", "0be418e63d111e3b94813875f75909e4dc27d13a", "8ebe2df4d82af79f0f082ced70f3a73d7fb93b66", "7f904093e6933cab876e87532111db94c71a304f", "051f03bc25ec633592aa2ff5db1d416b705eac6c", "0db36bf08140d53807595b6313201a7339470cfe", "8db9188e5137e167bffb3ee974732c1fe5f7a7dc", "ea218cebea2228b360680cb85ca133e8c2972e56", "55966926e7c28b1eee1c7eb7a0b11b10605a1af0", "76d1c6c6b67e67ced1f19a89a5034dafc9599f25", "7a1ce696e260899688cb705f243adf73c679f0d9", "435642641312364e45f4989fac0901b205c49d53", "2f841ff062053f38725030aa1b77db903dad1efb", "3f5693584d7dab13ffc12122d6ddbf862783028b", "e790a2538579c8e2ef9b314962ab26197d6664c6", "9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a", "5b89744d2ac9021f468b3ffd32edf9c00ed7fed7", "5dfebcb7bfefb1af1cfef61a151abfe98a7e7cfa", "15396d785fbbc977957565d3d7478a18672cb83b", "1b4f6f73c70353869026e5eec1dd903f9e26d43f", "2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522", "1e58d7e5277288176456c66f6b1433c41ca77415", "3d6943f1573f992d6897489b73ec46df983d776c", "4cb0e0c0e9b92e457f2c546dc25b9a4ff87ff819", "6043006467fb3fd1e9783928d8040ee1f1db1f3a", "0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23", "72119cb98f9502ec639de317dccea57fd4b9ee55", "7e8016bef2c180238f00eecc6a50eac473f3f138", "ac48ecbc7c3c1a7eab08820845d47d6ce197707c", "d2d9612d3d67582d0cd7c1833599b88d84288fab", "35d42f4e7a1d898bc8e2d052c38e1106f3e80188", "2f16baddac6af536451b3216b02d3480fc361ef4", "359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9", "5a93f9084e59cb9730a498ff602a8c8703e5d8a5", "92c4636962b719542deb984bd2bf75af405b574c", "345bea5f7d42926f857f395c371118a00382447f", "8e4808e71c9b9f852dc9558d7ef41566639137f3", "0f4eb63402a4f3bae8f396e12133684fb760def1", "d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576", "9f8ebf149aed8a0eda5c3375c9947c6b26eb7873", "f05ad40246656a977cf321c8299158435e3f3b61", "356a144d2aa5cc5e74d178dae3963003871aa8a1", "59b6ff409ae6f57525faff4b369af85c37a8dd80", "4be774af78f5bf55f7b7f654f9042b6e288b64bd", "bbc4b376ebd296fb9848b857527a72c82828fc52", "5798055e11e25c404b1b0027bc9331bcc6e00555", "fc2bad3544c7c8dc7cd182f54888baf99ed75e53", "159caaa56c2291bedbd41d12af5546a7725c58d4", "9854145f2f64d52aac23c0301f4bb6657e32e562", "2e0f5e72ad893b049f971bc99b67ebf254e194f7", "53fdcc3a5a7e42590c21bbb4fe90d7f353ca21e5", "c5fe40875358a286594b77fa23285fcfb7bda68e", "a8faeef97e2a00eddfb17a44d4892c179a7cc277", "34ce703b7e79e3072eed7f92239a4c08517b0c55", "b3154d981eca98416074538e091778cbc031ca29", "02dd0af998c3473d85bdd1f77254ebd71e6158c6", "eac97959f2fcd882e8236c5dd6035870878eb36b", "5502dfe47ac26e60e0fb25fc0f810cae6f5173c0", "384945abd53f6a6af51faf254ba8ef0f0fb3f338", "9b1a70d6771547cbcf6ba646f8775614c0162aca", "3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac", "4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb", "96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40", "0830c9b9f207007d5e07f5269ffba003235e4eff", "782188821963304fb78791e01665590f0cd869e8", "a6e8a8bb99e30a9e80dbf80c46495cf798066105", "80bd795930837330e3ced199f5b9b75398336b87", "5c3dce55c61ee86073575ac75cc882a215cb49e6", "7fa2605676c589a7d1a90d759f8d7832940118b5", "06f146dfcde10915d6284981b6b84b85da75acd4", "0af48a45e723f99b712a8ce97d7826002fe4d5a5", "d6ca3dc01de060871839d5536e8112b551a7f9ff", "4ed6c7740ba93d75345397ef043f35c0562fb0fd", "3a4f522fa9d2c37aeaed232b39fcbe1b64495134", "47638197d83a8f8174cdddc44a2c7101fa8301b7", "966e36f15b05ef8436afecf57a97b73d6dcada94", "8557914593e8540fcdd9b11aef076f68d41d3b4b", "69a68f9cf874c69e2232f47808016c2736b90c35", "7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c", "0fe96806c009e8d095205e8f954d41b2b9fd5dcf", "1630e839bc23811e340bdadad3c55b6723db361d", "cf4c1099bef189838877c8785812bc9baa5441ed", "f663ad5467721159263c1cde261231312893f45d", "43476cbf2a109f8381b398e7a1ddd794b29a9a16", "c9424d64b12a4abe0af201e7b641409e182babab", "2d84c0d96332bb4fbd8acced98e726aabbf15591", "f69de2b6770f0a8de6d3ec1a65cb7996b3c99317", "023ed32ac3ea6029f09b8c582efbe3866de7d00a", "078d507703fc0ac4bf8ca758be101e75ea286c80", "0486214fb58ee9a04edfe7d6a74c6d0f661a7668", "eca706b4d77708452bdad1c98a23e4e88ce941ab", "34dd83115195676e7a8b008eb0e9abe84b330b32", "cd23dc3227ee2a3ab0f4de1817d03ca771267aeb", "c1f05b723e53ac4eb1133249b445c0011d42ca79", "1d696a1beb42515ab16f3a9f6f72584a41492a03", "77c3574a020757769b2ca807ff4b95a88eaa2a37", "affa61d044daa1a7d43a6803a743eab47c89c45d", "0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1", "aeff403079022683b233decda556a6aee3225065", "397022a4460750c762dbb0aaebcacc829dee8002", "2e475f1d496456831599ce86d8bbbdada8ee57ed", "43b8b5eeb4869372ef896ca2d1e6010552cdc4d4", "1df84bf495d15569258513f229325d922b91e045", "5b73b7b335f33cda2d0662a8e9520f357b65f3ac", "09628e9116e7890bc65ebeabaaa5f607c9847bae", "124538b3db791e30e1b62f81d4101be435ee12ef", "1d53aebe67d0e088e2da587fd6b08c8e8ed7f45c", "286adff6eff2f53e84fe5b4d4eb25837b46cae23", "adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be", "9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c", "9821669a989a3df9d598c1b4332d17ae8e35e294", "21e828071249d25e2edaca0596e27dcd63237346", "15a9f812e781cf85c283f7cf2aa2928b370329c5", "57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1", "f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7", "177bc509dd0c7b8d388bb47403f28d6228c14b5c", "1a81c722727299e45af289d905d7dcf157174248", "0ea38a5ba0c8739d1196da5d20efb13406bb6550", "e506cdb250eba5e70c5147eb477fbd069714765b", "4268ae436db79c4eee8bc06e9475caff3ff70d57", "6a38c575733b0f7118970238e8f9b480522a2dbc", "73fd7e74457e0606704c5c3d3462549f1b2de1ad", "6cddc7e24c0581c50adef92d01bb3c73d8b80b41", "5e6ba16cddd1797853d8898de52c1f1f44a73279", "2785c5769489825671a6138fdf0537fcd444038a", "550351edcfd59d3666984771f5248d95548f465a", "4307e8f33f9e6c07c8fc2aeafc30b22836649d8c", "7668ce758af72df8e0a10d4b3cb0fd58092fe3e1", "71dcbca34d71bda0bc41c33c04d2c1a740274feb", "785eeac2e236a85a45b4e0356c0745279c31e089", "a6793de9a01afe47ffbb516cc32f66625f313231", "68f89c1ee75a018c8eff86e15b1d2383c250529b", "0b6616f3ebff461e4b6c68205fcef1dae43e2a1a", "9e9052256442f4e254663ea55c87303c85310df9", "a25106a76af723ba9b09308a7dcf4f76d9283589", "db9ef28cc3531a27c273d769e1b1d6b8aeff2db4", "346752e3ab96c93483413be4feaa024ccfe9499f", "5bdd9f807eec399bb42972a33b83afc8b607c05c", "8bbbdff11e88327816cad3c565f4ab1bb3ee20db", "82e66c4832386cafcec16b92ac88088ffd1a1bc9", "42eda7c20db9dc0f42f72bb997dd191ed8499b10", "46e0703044811c941f0b5418139f89d46b360aa3", "2ff9ffedfc59422a8c7dac418a02d1415eec92f1", "2559b15f8d4a57694a0a33bdc4ac95c479a3c79a", "7914c3f510e84a3d83d66717aad0d852d6a4d148", "1cfe8c1d341dbf8cc43040b37ca3552385adb10b", "223ec77652c268b98c298327d42aacea8f3ce23f", "477236563c6a6c6db922045453b74d3f9535bfa1", "53f5cb365806c57811319a42659c9f68b879454a", "4f0d9200647042e41dea71c35eb59e598e6018a7", "218ce079b9e64288faf20a87043dc32884105102", "5bb4fd87fa4a27ddacd570aa81c2d66eb4721019", "2ef328e035b2b5501ceddc0052615d4cebac6f1f", "0d3bb75852098b25d90f31d2f48fd0cb4944702b", "6324fada2fb00bd55e7ff594cf1c41c918813030", "316d51aaa37891d730ffded7b9d42946abea837f", "869a2fbe42d3fdf40ed8b768edbf54137be7ac71", "0b51197109813d921835cb9c4153b9d1e12a9b34", "2adffdffa16475ae71bb2adcf65840f01f1e53f7", "51528cdce7a92835657c0a616c0806594de7513b", "f839ae810338e3b12c8e2f8db6ce4d725738d2d9", "0653dcdff992ad980cd5ea5bc557efb6e2a53ba1", "bc66685acc64fa3c425c0ee6c443d3fa87db7364", "13f065d4e6dfe2a130bd64d73eee97d10d9f7d33", "b7fa06b76f4b9263567875b2988fb7bbc753e69f", "96f4a1dd1146064d1586ebe86293d02e8480d181", "4682fee7dc045aea7177d7f3bfe344aabf153bd5", "dc84d3f29c52e6d296b5d457962c02074aa75d0f", "2d0363a3ebda56d91d704d5ff5458a527775b609", "565f7c767e6b150ebda491e04e6b1de759fda2d4", "38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f", "4568063b7efb66801e67856b3f572069e774ad33", "9c23859ec7313f2e756a3e85575735e0c52249f4", "09137e3c267a3414314d1e7e4b0e3a4cae801f45", "51b770e6b2af994ffc8793f59b24a9f619033a3a", "2af620e17d0ed67d9ccbca624250989ce372e255", "682760f2f767fb47e1e2ca35db3becbb6153756f", "f3fcaae2ea3e998395a1443c87544f203890ae15", "189a839c708f95772ccaad72bfb4d0321d1535d6", "d98a36081a434451184fa4becb59bf5ec55f3a1e", "676f9eabf4cfc1fd625228c83ff72f6499c67926", "3519241c9ac13ca43e533844e2d3644d162dde22", "131178dad3c056458e0400bed7ee1a36de1b2918", "9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c", "7f57e9939560562727344c1c987416285ef76cda", "9745a7f38c9bba9d2fd076813fc9ab7a128a3e19", "705e086bb666d129a6969882cfa49282116a638e", "da4170c862d8ae39861aa193667bfdbdf0ecb363", "5922e26c9eaaee92d1d70eae36275bb226ecdb2e", "17d01f34dfe2136b404e8d7f59cebfb467b72b26", "4a733a0862bd5f7be73fb4040c1375a6d17c9276", "2c06781ba75d51f5246d65d1acf66ab182e9bde6", "b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88", "5050807e90a925120cbc3a9cd13431b98965f4b9", "587f81ae87b42c18c565694c694439c65557d6d5", "233be88c7ce1fbf1c1680643dca7869dc637b379", "99d7678039ad96ee29ab520ff114bb8021222a91", "f0a3f12469fa55ad0d40c21212d18c02be0d1264", "1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de", "2faa09413162b0a7629db93fbb27eda5aeac54ca", "9a59abdf3460970de53e09cb397f47d86744f472", "3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10", "53509017a25ac074b5010bb1cdba293cdf399e9b", "191674c64f89c1b5cba19732869aa48c38698c84", "4f591e243a8f38ee3152300bbf42899ac5aae0a5", "8de5dc782178114d9424d33d9adabb2f29a1ab17", "e51927b125640bfc47bbf1aa00c3c026748c75bd", "bed06e7ff0b510b4a1762283640b4233de4c18e0", "a538b05ebb01a40323997629e171c91aa28b8e2f", "a9791544baa14520379d47afd02e2e7353df87e5", "32d8e555441c47fc27249940991f80502cb70bd5", "6359fcb0b4546979c54818df8271debc0d653257", "345cc31c85e19cea9f8b8521be6a37937efd41c2", "dcc38db6c885444694f515d683bbb50521ff3990", "593234ba1d2e16a887207bf65d6b55bbc7ea2247", "1617f56c86bf8ea61de62062a97961d23fcf03d3", "026a9cfe3135b7b62279bc08e2fb97e0e9fad5c4", "042825549296ea419d95fcf0b5e71f72070a5f0d", "f4b5a8f6462a68e79d643648c780efe588e4b6ca", "919d3067bce76009ce07b070a13728f549ebba49", "3be027448ad49a79816cd21dcfcce5f4e1cec8a8", "552122432b92129d7e7059ef40dc5f6045f422b5", "abe4c1d6b964c4f5443b0334a44f0b03dd1909f4", "f1da4d705571312b244ebfd2b450692fd875cd1f", "271df16f789bd2122f0268c3e2fa46bc0cb5f195", "4aa286914f17cd8cefa0320e41800a99c142a1cd", "0e454686f83284ced2ffc5740829552a032671a3", "bf4f79fd31493648d80d0a4a8da5edeeaba74055", "86a8b3d0f753cb49ac3250fa14d277983e30a4b7", "1afd481036d57320bf52d784a22dcb07b1ca95e2", "061e29eae705f318eee703b9e17dc0989547ba0c", "08c76a4cc6f402c37a050cae5390427a5b66a467", "6e2041a9b5d840b0c3e4195241cd110640b1f5f3", "da928ac611e4e14e454e0b69dfbf697f7a09fb38", "cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba", "3137a3fedf23717c411483c7b4bd2ed646258401", "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e", "314c4c95694ff12b3419733db387476346969932", "a51882cfd0706512bf50e12c0a7dd0775285030d", "98c5dc00bd21a39df1d4411641329bdd6928de8a", "718d3137adba9e3078fa1f698020b666449f3336", "75da1df4ed319926c544eefe17ec8d720feef8c0", "d24d3370b2e7d254e999140024d8a7bddf701502", "7ace44190729927e5cb0dd5d363fcae966fe13f7", "f9c563ec6c8238aaf420327bd7f9d8fbf8de3bce", "1d1ffd3a372e9470c43ff785436165940276212b", "07faa38d4d0e9d14d72bd049362efa83fae78ee3", "3fffeda70f3d1a798ff5f1dbfcf5ecb30eaa5c4e", "fa3ae082e585e131e83b5ce934c6549d6a056205", "09396d72f2f8f82794fd6a2a36d173c86a56904c", "84676c330e4f8962703ca531db761c96bfda8067", "b380d7cd4bd8087a2b106233ddaa86a341f0d0bf", "755d1f600b493136b71e8b7fec9b7d4c6af7bf21", "3490683560ca18d19884949dccca0ad7c98d4749", "2f0d5cd2d25ea2f3add0139cf4b61f358435bab8", "cc4a2cab31ed06d0d8723df0bdf8cd0ece71bbe9", "dee460eab759c02b03e5cb6495ca73112d857e55", "e58a1ec612091abf014bb48481def84f1b397939", "0376e3273d2c7bf2af53fbf5904c70bdb56383d1", "f3ff9544d117dcdb001c07dfe37c4ad46d4a23d9", "06830abc752757837883b4950160bba167a90cdf", "f6dd24709765025f6b2de1106995843624e149a8", "287afb29b5aef6255a5882418b87e6b41cc9b29d", "7c70697c3db90adca733a4e36cd5e1d603551da4", "ae88ee0fe6064617422ee00cd452c0c8d8aa1ba9", "55d9b72b2110e3a0a6fd7b002eee17945280ba15", "11a2f0c7bdc78bcf149bae0a07ded2aa0ce33ab5", "12c2f7cee1f6abff0d4de9b4b90caa3b5c6084a0", "138625d22b594a9301fd5d4e94b1357e0cd24a50", "6e4f07882d417a76fd7f25d89e848f944c3b3c3e", "62e698f4d2c9c70f51012bb3a4940783d9e01cc6", "c3231c32839b8317530f4624f8b2b028cb02ff5a", "9a49c7d584359bb444a81bcca55b03eb6b944c4c", "2ca761938bd789b82d1a4ca85e7b8d5661093660", "8af0af4545078d5b92a40cf7c2e95113496babce", "c2021ac068c23ba6a5360312fbfa0c0d2cfb47fd", "af1eab707e690e73a5b9073ed07a0436fd4e0b66", "286c1e0b34ee6d40706ca6a02604420a192204e7", "6ddc0558a2e0f97b36e6af2e3f3858ba184cd257", "ff7de2ea4d21e7d32d7f07e07fd278bebf6b5d66", "07a17771ca169bc01deb8f7dac1ff0c574ddc512", "233053b5962d4001adc7616c07616d1b43048a7f", "a92b0dfcf17686a8048fb286c6ecf2417243a3ad", "6fb3c2dc0facf6240618f33cd851053d192b6f8d", "976103822fbb72e71aa2b528ec67915a7b151688", "f049ea3ca734c217c380a1802d15a6d85378f55d", "9b9bf5e623cb8af7407d2d2d857bc3f1b531c182", "68eb46d2920d2e7568d543de9fa2fc42cb8f5cbb", "26e118084d09771bfb996628a7569200100bff1a", "09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081", "0cb5079c39933bd8897fde7edecf156ff57830d7", "4db0968270f4e7b3fa73e41c50d13d48e20687be", "8c8525e626c8857a4c6c385de34ffea31e7e41d1", "f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b", "64d7e62f46813b5ad08289aed5dc4825d7ec5cff", "e2df2b16d63dddc7a571ed3b993e5ffccbf2337f", "eed1dd2a5959647896e73d129272cb7c3a2e145c", "d7312149a6b773d1d97c0c2b847609c07b5255ec", "e5961e65169098351571e0828b46e3f8558a9faa", "fa9f1b236d0a252d4a56e26e8a9a41d496803413", "485f42b9bf3c5b63b52c34f7f3069a685b355a5d", "e8ff0d5d75221feba7f1aba066b2d596b7e7685b", "e3660a13fcd75cf876a6ce355c2c1a578cfb57cb", "2b1a7d4aee670b1df3ef9acc2c77940679666601", "c7efe52121f526791f5b414d60aa3e97bc058aef", "5428c21579fe82c822e5b0af86ab2dc4b826e6b7", "6821113166b030d2123c3cd793dd63d2c909a110", "d259d3652f03c7b80e29c986e9540ab00b1f1133", "80510c47d7fad872b18d865f3957568dc512780c", "1f27f9c0da385080f05f8cfaf0771e5aee6d9ab2", "7b2e083302f7ef8e93a3f83a2ffc0c366a743cba", "6bdca1b4ae169686da39d6f0e3ba70b3d39e0cbc", "9fd77ac4fae2df8468f22549985cfb53ed517cc9", "416c3781d02efa590e76dbf4ccb88ca1fd7b4de1", "51628cc779c6ccf8061aa77a18eed5c495515bcd", "c7873037228a009917085556c514a90f9817cb08", "578117ff493d691166fefc52fd61bad70d8752a9", "0ae6b023b599a17cf32b83606a9565c3ad539ecf", "5f9e6f2f84a6a9a64b1d5868e2782b4bae82b567", "87b5d74ae97a991bf5b45f0f947525234c37d370", "16c7c31a7553d99f1837fc6e88e77b5ccbb346b8", "314142838289cac416098cbc2bbe796dcbd2357b", "66284b8894bab0165c4210cd2df749f0b015c88e", "51b1d708bdb758de8ce217c51992f794c70abe95", "24e6a28c133b7539a57896393a79d43dba46e0f6", "2f910e4a72a2fea67ea6c83f36c7017fe26ca9a8", "0318c74b1cdec63de4c9577e9e600a48241adfc2", "5ec650d4506ac2bb066760420ee8185c93dbb2c1", "3c71475ac56fe2875de2a61254ea4f8636a1d971", "44347819fd94b20676862181ac4de55a5ca47a0c", "2de8508b33fe3ee290f94339a2f78f373e83b21d", "1c0f3a0fe2369e5357728ff81b6812e952f6337d", "51319bb12c67fb5b11cbf2012a7e2059718b52eb", "821fb8b05108b7ce5312bc5cac3b0e5d56e279ab", "a051854861453ae8a5e32ead7deed628c52f7fc5", "a19a5a604e07c525f9a8969c05f1c9ed5b497053", "02b33c654aa475f400552bf0743d8d2819e2c2bb", "9035dc898c10b203589699063908073b2f1d2e2a", "2eb14814511d93fcd01e81f4f838647eb10af3be", "0f57f91ab0bffdb2750cf52a9b2e4d8daf0abaa4", "107010b7f2abe3c0c9df62bcef35eb77f6fc76df", "c9018677ec5bf65d92fd9fefdbd32c4e292dc2e4", "60e065dbb795cc0d76ec187116eb87d1f42b5485", "9a9af8a5b6939a1da9936608fbf071f852eca7e1", "9f1ce2cb2c0e7e7ddf620cc59a7342bc04990efa", "1cbad37f54bb3d55c621c31e741472e15f832809", "586bfd960cbdba91eecbb06de994dacd38b9ab0f", "30fdf33bdc29c8bb3a2c5b1ff95ffd351c2f8e21", "464de30d3310123644ab81a1f0adc51598586fd2", "ae8febda8de382422962c4ff73f87d92c5e029df", "f49734bc34cb7cc50938a18e72d5fe338fa8a8fd", "9b976f7bfa636d89510fe5ad7fb7a8057b86a57f", "7f02f790f320194d5eccf7665066308e46f5d0e9", "98bb029afe2a1239c3fdab517323066f0957b81b", "16a2f42edb98495bb9b766c56a05edcd2ca4ef03", "616d3adb73ac0d8ba83d9ae4ed74c48e38e0f4e2", "097f482c548075305b5866d7d0fde7b67c30c52d", "8e861a7809905a003fffa821574e68ae0c0788e7", "2c2d2c82fda9f01f37cb86d80f5c0f5e963d68d0", "e56f2c9c0a2cf1db7ec49527cb70b345ec71cfbd", "c8654556a010a86b8eec681b290a1d06597c6202", "b1ffa7a926e129f8dccdd6f258fea034cbee9160", "50f3209d022d1ac185237425a3ad682d2234c1ca", "afd29ac2de84c8a6d48232477be018ec57d6f564", "75150b16cee83641f01074568fe4b2d2eb3d0a31", "ffe3d6fcfee0378e7afa0eb334a454144b5bd272", "22f21d58c6aecdb4f57c50fa9eb4952643eec0e9", "fccd8c1b41f7f7a91b7bbe896b31fa936b5aacbf", "e0cac58f3855cd84b9d28f508b2f7711e0d7e44a", "a04caf5898d01972e28a38faed9a2eeae78cf21e", "88b02b654a0206e47979618ff95697f4cab7f6a1", "2603d8578a6c95a9b9d4cb8a73bc66f18d523f37", "64f6f1cd23bbac1983ad4115475e4ef26ab86ba4", "b4a3f480e2004bdc8106de2f772283101bb290d0", "5bb14bba7510c590164007d7e3aa1bf88cb3faec", "45269e9746a5bd45b7a329ba1116622dba2e1ac1", "95a7ce2163b3fbd49bb8d0813a9f67e651bea3a5", "862f19f8317971fabc46cf0f994f4a8616f17b78", "15180235b45506f87057f53d688729fbd9f2e291", "c458db5d616058fbd9de19acc6c82827396cf195", "36cbcd70af6f2fd3e700e0a710acd5f1f6abebcf", "a0541d4a28d90a17cd3eaa9d1797882eacc8ccf0", "7346838bcd4c372fe79cb85ac519e0b4f06fd7b5", "7b6dc0a4feb3c940618f6c011ee1b75089e3b07a", "589b1677d6de28c47693c5816c32698860c32d10", "0611dae4ae932e0c5f28f08676d234dd9233732f", "2d919473cf43e2522b2366271b778ce6ce7dc75c", "a6f477f3c1cb2ab230fe8d89c31ae6af0b9c2346", "c25a71d01255b96c08dc36e79dc7200347574a04", "6752b59da83c03e64c73f9248a67304713b6efa9", "93498110032a458fddebfae80d7a93991e11673d", "54c5e9cded7da1f9dc695f5397d9d1a5ac5350af", "830fb48d04609bef50ae7f8f3513ce1b160894c9", "36358eff7c34de64c0ce8aa42cf7c4da24bf8e93", "d8f837265fe76e26c99052229c4997fbec20573a", "25f1a5121cb7fb67749a6f6dbc27fd48f177d5fb", "3ba5aa0995f129d2854d9690adb6d982bba4e675", "16e577820999e584c787ec611f55746cf9147518", "3c84e2ed018dd1d971b526f87e9d7c1f08e6230f", "10114df7ddbb221337cc1e99e1de0eab8e47c95d", "c7ec456099ae2d0eee073ca1d546d7ca57dd4a85", "df4525d7d99f7237c864adbcb2dab30d8f7447e0", "e255ca660def135eb6ac99127d08df032876d8ab", "bfec95482dc40936cbb33d386890aad48483adc4", "069cadd9d8e52ad2715a3551012a06e506191626", "b0623c1d8493d273d704ba1d0413db0de579ae77", "7cd5d7f8295b219b029a4231ae5cffb261e00ebe", "167ef72a08832f02e0bb4bb60b61b44adca4f804", "162ea969d1929ed180cc6de9f0bf116993ff6e06", "10f3d78cf8ae69a5889bea88d3ebe2c6507e5720", "4589d6bbb3186fc001ea2a42ae1ea2718edba915", "392c8e575f8520bb880959d494be0911d091b525", "f37dcdd86bea138706edb14459ca29de7b8d5ea9", "c675534be881e59a78a5986b8fb4e649ddd2abbe", "7480d8739eb7ab97c12c14e75658e5444b852e9f", "2a051c1f2787690fa9fa916fd548b62ce571f778", "40883844c1ceab95cb92498a92bfdf45beaa288e", "23e6e8ab8f62d8f67525313c823e3cb4424ac578", "ac968bf321f1dfa2d216dccc22fa5315de63d7bd", "f565ca9590820c341f1d29084e2d54ae490ffd41", "ca8ba835d8ebcc2d95d166ce34e051821367928f", "b859d1fc1a7ad756815490527319d458fa9af3d2", "4d1e46b1dcec1c9cbc4e7ff80dbf73e5e7ebcd67", "ceac97de889ed2f65af62f61a007651d03b36b6c", "2cf91c09d648d37c9833d9763721fe07213ae985", "d3b832f3c4e8b6d81eac24d6e070f756b9e8a7a1", "dbced84d839165d9b494982449aa2eb9109b8467", "6880013eb0b91a2b334e0be0dced0a1a79943469", "2dafea864f74a477414c3b71b742f7997e216102", "9f131b4e036208f2402182a1af2a59e3c5d7dd44", "e22adcd2a6a7544f017ec875ce8f89d5c59e09c8", "33554ff9d1d3b32f67020598320d3d761d7ec81f", "cfa205874bd192ab949132631a7eda995ecc57af", "c2508af974b8b1fd4ef097ef625e8bfd07474be0", "04dca7c7f85d607cba64ca56de3364a4085effa1", "cc4f29647db7b7d4fc30a66d63a341c20adc8840", "a58ba304ca0c37d94c7227feeeffb600a492d902", "864be4c09001478ec5a916401430f5099c022e06", "4747d169a5d6b48febfa111a8b28680159eb3bb2", "17ad76ef00d4cb584389682ca6b138a8bdc9a2da", "3faebe9d5c47fc90998811c4ac768706283d605c", "f0acbc128407606413df764cfea7350e8842c704", "4e09aa735d3f0514a5dd43bafa6a1515dfe76e9e", "8694cd9748fb1c128f91a572119978075fede848", "1280b35e4a20036fcfd82ee09f45a3fca190276f", "c286b2539ce1cbc11338409062f0c28a37dbc4c0", "dbc04694ef17c83bb12b3ad34da6092eab68ae68", "13be4f13dac6c9a93f969f823c4b8c88f607a8c4", "6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae", "8e33183a0ed7141aa4fa9d87ef3be334727c76c0", "aed6af12148b43e4a24ee6e2bc3604ca59bd99a5", "7205b67fb64aba0043b43d167243aed3af72aa6b", "524634e1055637b7c22b29e7e36437f4ba80df04", "cd22375bf1d917b928aee006b65cd92c7bfe0927", "f2da0ed6dcb86a8c0ddc8c13245272a726cec480", "7c8adb2fa156b119a1f576652c39fb06e4e19675", "c9a91d1200ac7547e49a665e3b2b51426eeae27d", "f879556115284946637992191563849e840789d1", "6aef8eeff5f532dcdad95043ba464720be664ab8", "59ef1efb9239a101c2782fab8adc09b7af07d336", "2a48dc596c7a2f496169360e819b56a6c8d38e67", "5b24ef13fc9a51a9892f164bc142ffefc0b7a8ee", "7f3a73babe733520112c0199ff8d26ddfc7038a0", "3e1d799e5b7d5bd7e0d3b3bffb292878d27c5b7e", "c9da8cf341ce0dc02cdc765fc350e181d7fb3f53", "cdef0eaff4a3c168290d238999fc066ebc3a93e8", "0ca295be89efd110327411d4aa52660bc0eb48c4", "82d82272b365028294662ede914caf64e73495fb", "ec00ecb64fa206cea8b2e716955a738a96424084", "0cf2eecf20cfbcb7f153713479e3206670ea0e9c", "3f540faf85e1f8de6ce04fb37e556700b67e4ad3", "5b4ecdb48673a29bdff063131002285b3bb0400b", "68e239671615101ce3db879888a27be64b8ee0e3", "60c3b9a6622e359a90c384bf81fc0d46caacf469", "973022a1f9e30a624f5e8f7158b5bbb114f4af32", "13831e47759e11f8cc6c77fa64ad34272b409b34", "bb783436fdf6cb50164e60ce0594b317d2c4f4b5", "49484ace2786445c2181d2f2975ec3ce34f9ed6d", "1f94734847c15fa1da68d4222973950d6b683c9e", "5ce80b41443518a14d800f6b93b4057bbb007432", "31d30089d00d89715167ca4a130a5d262e1d79d3", "265c53ce3fbdb3f2623c4b20f38b94d3ed1d878c", "f79a1ab802be2ca879ddb7b813028da61134f30f", "4fb11a58d5a3ffc0bb6d4ade334a366b4a431b02", "70ec156f7e6de0275c7e4e95e35f1bc1e92e29b3", "c2d054f0d7f455d94f1d92959e0e549443977c55", "7a66e302652f50699304dedf46384d33edc9f4c1", "e99718d08aca2c49cd2848eebdbb7c7855b4e484", "d7d6f1b1e832bc7f52ed34131e3f200badb601e3", "ab1728e84ac682ca0c53435f712a512ac139e9c8", "d2c24fd4b54fdb88e6574da7c42653a94078d58c", "e6ca412a05002b51d358c2e3061913c3dab6b810", "67ad329894b92714e2ccfcd519758cde054e65ab", "7fcd03407c084023606c901e8933746b80d2ad57", "16b9d258547f1eccdb32111c9f45e2e4bbee79af", "a8748a79e8d37e395354ba7a8b3038468cb37e1f", "831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9", "65783e66cb6d9c057edf7b37f6d0be24a78d3013", "23a955b33156abc9879d4d310c8f5a361a5e4ff8", "d0296efc3c532269aaa7e8f856f5d1807af847fb", "0192c20b9f4fab5a4d76d3e78239f0ddf5deca09", "954eca6c3daf17a081379d51980beb299c803d3a", "32f62da99ec9f58dd93e3be667612abcf00df16a", "5fd262d1962262225c59ae33b07e69f6635c3f67", "c5c53d42e551f3c8f6ca2c13335af80a882009fa", "e8d9f431ac20f0ef88291cf1b370fbbca028315a", "2625314d30a8dfaf918e93a8e7b243b2e078d191", "1a8ab75050d1c5a6722d7fa05bebf8fd4edd698d", "a75de488eaacb1dafffbe667465390f101498aaf", "c81326a1ecb7e71ae38a665779b8d959d3938d1a", "22f71559c88fe32b405a6fedf7ee099c32d9377e", "9ef9046cc26946acedda3f515d9149a76e19cd6e", "8f1f11c578e1d66be0ec81a6dc4eefd9a0704e56", "d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e", "0991634b6b7f5b5d7045fe859c24ccb46152251e", "ef2bb8bd93fa8b44414565b32735334fa6823b56", "e1449be4951ba7519945cd1ad50656c3516113da", "852c48452295e48945779491e9f6b076c66d7eee", "694b72a84306112bd012073d24716e406ffcd4cf", "dbe101c7c4b5ea5986be38e4d6de70bfc4324683", "ec32c8476b5dbf6564bdde991f53cabaf5d6363c", "f8daab1e4f63051b78eb43e98ab723f6c425a6b5", "8fc730d22f33d08be927e5449f359dc15b5c3503", "bad206bfdbca3657c08868cd990cc95eb61a0c5a", "51c7236feaa2ae23cef78c7bca75c69d7081e24a", "181d0534f2c0233804a6f90c75c919d868fd58e1", "3802da31c6d33d71b839e260f4022ec4fbd88e2d", "c6542d17b212d808cba48cd2b1536446b14e38b3", "b64cc1f0772e9620ecf916019de85b7adb357b7a", "b49425f78907fcc447d181eb713abffc74dd85e4", "89be4d7e7fd6b12f1bbcc490bdf45c723330f716", "77e69753fc7cf007a136b12f102e1e11a93f87f5", "bf45eec73c22f446458091656bafdb037a79a5a6", "ea2d43aa2490331cd1406e1432ce706c53139323", "a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df", "3a3c47b6da1ea1b8d57ce41d9ddb54a774e1914d", "e5a1b8c7f496900451014619387b429659faaf9c", "f2b2d50d6ca72666bab34e0f101ae1b18b434925", "5418b64b3ea9bc04ed518a6a8c0f19d6d3339295", "e0181f7596b475f7c7d31fd1eccad8e9b7379180", "ea03a569272d329090fe60d6bff8d119e18057d7", "49a5d855f91c6ec6d1724a200d33e92c41f73480", "3e67058c6ddd0afae692b7665f82124945ea2c5a", "1a31192ac79d0eccef596f93d6170117a0111396", "42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830", "c95cd36779fcbe45e3831ffcd3314e19c85defc5", "b85580ff2d8d8be0a2c40863f04269df4cd766d9", "e7d777dd96777e9381d792c377db5d7347c9baf5", "81d0ab3201fbaef5aff57e9df2c12c7b4f228987", "616e69647b02e69cffa7eeb83cf3e72b8c532653", "fe60d81f726c8e20948b927b456a94a96d78fa26", "6187f91f1e53cf6f62afe30e01c7b1ed43505c9e", "97b930a4fa4670a609b6ee8811409090fe55b313", "20e210bb6b1d3e637e2b2674aeead3fad8c2c70e", "e0b6defafb69fa3bbaec279b3cba92edf50c760b", "92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d", "a60540a8407fd117fd8e6857d4728e661f53dcc8", "ea5fdb7f6c2e94d1a67bff622c566ddc66be19ab", "7e1e189fff0b0cef3c231e6b2d01b65bef6027e7", "d15655bed4257ab86fcef8fd7cfdeb3d920e1bbd", "f7943ecda36b38725efda73d68b7ea70272451b8", "69c2b7565e080740e2bdb664e6b00fd760609889", "9e86e9853c261f2e4affe61674ca26a537ba65de", "8036bc816101ac5c000dab700d2108b907d832d8", "71039dd024174cb6b956e82a45ff7241fe615c66", "0c0f353dbac84311ea4f1485d4a8ac0b0459be8c", "7a923514c02872e9118b49f81d52f750a2c209a6", "5c9e0f15222424022201422c7de534e06575d3d6", "854890f35fc7955d94777395f6a66da433426d98", "e8d8a42d0ee8849bbaf99c52cadeb2f1ebe564b0", "437642cfc8c34e445ea653929e2d183aaaeeb704", "61c07d7387dcbfb8fa697f15316e3b265d78a2fa", "fcac68ed896610947dd80ed2d96fa3f5e482d903", "c89a230ac0faee883a42b88c3349dec59630e39a", "e513e8789e90251fc957351028e02eacb3a508ec", "021e008282714eaefc0796303f521c9e4f199d7e", "30723ada764c6ec186927522d666eaa8eeae35b1", "dbb30d2ded42d9ec660369b11f15752b5f3d5a44", "854dbb4a0048007a49df84e3f56124d387588d99", "c5f71486c16add42c3394edb41b8c064b0123824", "85387549277d6131dc8596ffacc7a21aeee0c6d1", "33737f966cca541d5dbfb72906da2794c692b65b", "8e5804b8dddff600ee7646b8c33bebad4b550b31", "2544249e92b324a7f79da6eb556c387a4fa5226e", "3103a8a83e60aa1ef93b55cb0b2e7f12b9e2913e", "98ef8b8e4df2c19bcd56827a5a9cdd7eb88364ac", "c8b4df94686ae4d308e859eddc0e00921a17fe75", "271fbc4c09b3f2eb9f56dc2bbac89262b3bc083d", "473185fec9673b5b4da74156d2505da6585cf7a3", "795b76ebf17c559d82ea6976f1749096036d6817", "88e3aefe454e72388bbbe7dfa0b74fcfc52032f0", "e2b4a1747e66f72baae9929f908ab064a4263f9e", "baeb207ea6f4b52eea129b9d8597d4b7a0891ad6", "8fddb940c70261a335831cb7aa11b0102f3d901e", "5d80149e005894ab57f47e667f3e060e247d8e43", "894f1e924dfb8dfb843c42835fa79e386ac07383", "cc70f4af018de5e5bdc8075dbdf1bbe49a6f0b4a", "1999248e52154aa8e2aa4c1e60899e26794eb1f9", "202aaa03da5c5c2707ac8fb42aeed7f582ce2848", "49bcb014810ff50edefb20eb1411e8cb3c132b99", "e817bb5ede09cb7feb4acff3cbcda9366598d253", "04221205249bdffd0f155ac68ac477613654aa42", "1b55c4e804d1298cbbb9c507497177014a923d22", "d46b790d22cb59df87f9486da28386b0f99339d3", "b0c1615ebcad516b5a26d45be58068673e2ff217", "80488ff21f7b69c1c9d20d88514a42bdad2602f4", "64e216c128164f56bc91a33c18ab461647384869", "4edc7f27d4512b69be54abfc6b9876e5b00725ab", "4b6ea82fa73d2137c884ad43f7865d88b24ff01d", "08307351e8f2a40d7ff9bbee8deaa54dc1bad055", "4db6456b6933d0ae60bd1d7bb7ae01cea2ca9a9d", "e387db84cd31f14e468bb329ac008a80e645400e", "77c5437107f8138d48cb7e10b2b286fa51473678", "ecc09ab9c61dc3a3a15f55332f63bccbf443f291", "1c147261f5ab1b8ee0a54021a3168fa191096df8", "885c37f94e9edbbb2177cfba8cb1ad840b2a5f20", "f5af3c28b290dc797c499283e2d0662570f9ed02", "5495e224ac7b45b9edc5cfeabbb754d8a40a879b", "6fa9bae381274518d3972294d81e460f0c63900b", "a23e7e71fb92a56c2e7717f6356e8b69fc2f4bfc", "3555bdf6acee09ec7eb08b891a0f30b82c1b6482", "e9ae8bbfec913300eedede3ec48acb56c15ebdea", "ceaed903eaeaa1a541831e2c70b4b3251de67719", "a74abb9ae03bcf212ab97c4412c1be140b3142f7", "a04273851ae262e884b175c22decd56cbd24e14e", "81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f", "eb3066de677f9f6131aab542d9d426aaf50ed2ce", "7ffef9f26c39377ee937d29b8990580266a7a8a5", "bb6801a2e7e3f319d6dc72a4ab5b4d16919d074d", "55089f9bc858ae7e9addf30502ac11be4347c05a", "1b51e3e98c8b2a2977cdfabc8483dd55c9dcad3f", "5e0eb34aeb2b58000726540336771053ecd335fc", "3d1b59fa0252e72afd8b107a1dac47cc2d5f8bb2", "01350214f850f43d72268df4f98b05901fbbe06c", "c784d4918ad33f4dd2991155ea583b4789ba3c11", "856bd32ee16cc531bcb1814fbae2f66582e21cc7", "3163d481923cc75d53c2ca940e23a07e7c85069c", "c2cb38fc68b877a96be99b814e8ee437e585f5b2", "43dce79cf815b5c7068b1678f6200dabf8f5de31", "9a0766fe421dcb3985caf99be46f8b9f5502913a", "e2f994059b831d4b752a5e8131ccee250ca56ffb", "0c0db39cac8cb76b52cfdbe10bde1c53d68d202f", "6ae47c7793e2f0f684ae07357335c7cf338d66ef", "a5bc960e186391ca0ba0718aec70069abb5134e5", "1bc5c938b79f23afb9931c99377d6ce7a99bf8fb", "5998a015a5f1b72761256759808f9529a7717058", "5cbc66ace06eb35a22a3196cc13f75ddb0b7cefa", "0cc3de80412455f6095d6ea293fa6ff7fb1eafb8", "293cefbe481a5a472d830a88ff140dfcc1869c31", "de95fa1dd69a2d0d2b76539357062062f8b1e7b8", "012ace850dfe366ceda69e560863fbefec2bc755", "5ba7ccde201b9199b2fcbb37b604a99d8d760008", "83e7c51c4d6f04049f5a3dbf4ac9e129ed96caee", "cd44668fd6b7e8d2606f8c634a5b571d172693ff", "6d4790026b63a175a635351b355cb7db7504cfc1", "51273a7abfe2018ccf2789a8e25d0c2ae565bc77", "555f5ef266335af8189714297ccbcd6ab77d83f2", "8855755a72c148dfde84bb08ae65d58c260e70d4", "f0225fd9e968a022ea6f993c1e44f01716908769", "1e7032e91fa01b90896c3cbfe5edf4f35ffd9628", "ffcb92719dcd993dda292ca82d4585950ea22ac9", "c5973aca863d73673e78d1310e8b21b4ea5aca00", "fb1627ed224bf7b1e3d80c097316ed7703951df2", "b3e5cc26fb8ae57b5e9e6dc161d5ea3c67bcdeab", "dd688b5e438f0b6bcd7121de03e518559516a306", "d3d5d86afec84c0713ec868cf5ed41661fc96edc", "1badfeece64d1bf43aa55c141afe61c74d0bd25e", "89c7f6a765aec6e7c754063bd723b1313f058948", "8e8a6623b4abd2452779c43f3c2085488dfcb323", "92527ace7f75188b5ec209ff7d59f431343075e4", "612075999e82596f3b42a80e6996712cc52880a3", "68caf5d8ef325d7ea669f3fb76eac58e0170fff0", "b30bdbad88c72938c476f1ea6827d8b10c300da4", "3d045d8d8b1af5eb6ba0b8365dc87d57bebafb4e", "4c81c76f799c48c33bb63b9369d013f51eaf5ada", "79f02a006c77f2d7fece8302bf54d851269a515a", "222d86787abed673600f1054796367f439c2eec1", "3929ff77805bc02993b187a6019dbed4f662d59c", "d345687bfeabd73b4ca86a1eadadadce8b0297a8", "3c563542db664321aa77a9567c1601f425500f94", "6aa201b23b93dc51939e68bcf12e386d3b459f2d", "989575349b4d2f8f8c8881bf4fb3a501ef906210", "4e71ac257b104bbc161331ab2a66e86515427146", "a70650358cc226e7f613b49f93d7eca044ca608e", "fea83550a21f4b41057b031ac338170bacda8805", "089b5e8eb549723020b908e8eb19479ba39812f5", "b8a5839f6b1e051f430f2b89d5a1a7e49a10655a", "9fc993aeb0a007ccfaca369a9a8c0ccf7697261d", "9fa224a709fba2df1cafdf1fda02bfdbd4015258", "688ae87c5e40583ecf9ec6d06d4d15a3e62f5556", "110359824a0e3b6480102b108372793265a24a86", "1c2b9e1e6359a12b0163d422a37d46e5feeb9f9d", "ada063ce9a1ff230791c48b6afa29c401a9007f1", "64a0323adf55db3d3de20cc2a8176961548379f4", "04c07ecaf5e962ac847059ece3ae7b6962b4e5c4", "096ef3f0b14e0589e1921d897b492c14fe0fcaf4", "f36647e63a11486ef9cf7a5a1c86a40fda5d408a", "df577a89830be69c1bfb196e925df3055cafc0ed", "b7cff2a6fb3861f36bc779984b312ebae9f1f365", "96f0e7416994035c91f4e0dfa40fd45090debfc5", "c3285a1d6ec6972156fea9e6dc9a8d88cd001617", "91dec705d119cb3cc40da18f51aafac3c5c191ce", "f81f5da2a1e4eb80b465b8dffca4c9e583a8a8a6", "4ec3c7fa51d823a43b3808c7c6baa2e153104bdf", "60f7de07de4d090990120483bd5407369b29a120", "dd888fd4f6d04623a34fc2286ff1e26280c2f0f0", "ea2d3a58982bfcf5b22986da513eadf2e74cbf4e", "2d1e729f09d7ba195de4cc43f203ff35017fb55d", "430cc88c6c73ab178c0e656b246f8f74d51c3a7f", "98b54eb04e531c34a20320e19b55f6721bd0d651", "8c93907f376fe93b9ec847f5034afd623b9774e6", "82b43bc9213230af9db17322301cbdf81e2ce8cc", "d94c7a89adf6f568bbe1510910850d5083a58b4f", "2a50dfe8d2157e1f75f587601fad8f3d81bbf10c", "c10b0a6ba98aa95d740a0d60e150ffd77c7895ad", "b68e1fe527cd37e29d87a9540a5d62c5ab067b8a", "174ef76c12e6426056ff2980bf220964030ed48e", "1e472cf9a290e8f59573628dba426cd6d74411f4", "f03a82fd4a039c1b94a0e8719284a777f776fb22", "39082dd549ee32805f48cdac3ede7d8537696afe", "0c13bf7fcb238cc844dea24c0e2ad694a5f2cbde", "f861ef9287f8771cf833f2626e7ff8487e26ebb3", "db458242dd526d84579aeee563355ca1a7dea5ea", "585260468d023ffc95f0e539c3fa87254c28510b", "46ee0288c382c7af7fa4f3a5e3c74d60a12c519a", "55fdff2881d43050a8c51c7fdc094dbfbbe6fa46", "d2044b92486248f87bafe937779cd2167efe170c", "9e42d44c07fbd800f830b4e83d81bdb9d106ed6b", "bb0ecedde7d6e837dc9a5e115302a2aaad1035e1", "1aa61dd85d3a5a2fe819cba21192ec4471c08628", "10e7dd3bbbfbc25661213155e0de1a9f043461a2", "cf18432bb77bf41377c477b5aaab9abd0f1f306c", "30861d747c87e2e838c1c30eed334b17cc93cdb6", "6f4ec006b6b9da4982169adea2914aa3d14ee753", "9da9b85e93a880aa5f026ca02bb188a445d909f2", "62e8010e2ac1523d3a3e7e1c13cb34e63e85ce04", "d0d161c1121ed89acc5fdc2cf689a91a83f39710", "da5932a4f3af37030f0460496d4e0b6f93294bfd", "c1c8ea4b2118095bea55cf6b51c36dbf95cc7f2c", "6e3660e8efb976c916e61fe17ea90aea7d13aac4", "d203333f94c45cc6b885772b6e5453fc74231eca", "d7d9fa9a5a57f9f3da7ab2c87ca58127665774cc", "c18bf07d5161d6ca7afb6eb5a4f385b6bd2fc381", "bbfd178e0c92ac547f86aedfcc439741a1a089bb", "438c4b320b9a94a939af21061b4502f4a86960e3", "ccc65463198ee0a0db9b303a3dc903c762dbccaa", "30fcfc6b7fe1809d79ea6ce08f50e2e53c203800", "7150323712cb700a68e7365a9c627b55c2c262dc", "0ebc58bb5d517db0111f3565c4eb378d93dad908", "6e7d799497b94954dc4232d840628c3a00263e42", "1e92c074ab9082863a48fecdbf212f1897687a74", "b632d47eb7421a3d622b0f1ceb009e4415ccc84d", "89475b4d09e541e09becb9aa134c8de117725205", "43cdec7e1a0f3dab0e70a80aacb4a89085fab3f4", "dca12da787c023c97058cdb7d56e18ef287084f7", "203fcd66c043e44fefd783b8f54105f0a577fc25", "8d1253f315c821bd2b354550ae9ea6d3d7be1d31", "f4ac6d29b6f544370d53aaff88c007c14a1fddfb", "3f6da4965b214ea947dd3b6cd33b41ece89e7e79", "3cc5f82147bd203e6d0a2cf9d2b2ac0ce31e58d6", "26fa4c87a2c9e21e9207cc4aee2b9890b1ad5a0d", "1e21b925b65303ef0299af65e018ec1e1b9b8d60", "03046f4dd5c1535a9d97662f75d1e3fc9bda407d", "db326e6b49121e7f80c802fd2e5b80a482c955df", "28475d32e92593b426be8ffdc4e852ed2c63affc", "36486944b4feeb88c0499fecd253c5a53034a23f", "ffbecbc581d98648dc670f9b5757c25348b25561", "daa120032d8f141bc6aae20e23b1b754a0dd7d5f", "784cc0363d44bf09f3f636abd1a532ddac95ca13", "1e62ca5845a6f0492574a5da049e9b43dbeadb1b", "5c35ac04260e281141b3aaa7bbb147032c887f0c", "4f863543407143a62e1bb053d435a947886ba619", "17e563af203d469c456bb975f3f88a741e43fb71", "8b266e68cc71f98ee42b04dc8f3e336c47f199cb", "9a845ce802227a13de16f3e29eebeb3c8817f83b", "c758b9c82b603904ba8806e6193c5fefa57e9613", "4cb852b0893edcf895aeccfb4e03055073777f5e", "33f2761d08da1c5b1b6a8f65ee6930075cf9927e", "1d7df3df839a6aa8f5392310d46b2a89080a3c25", "a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6", "8ccde9d80706a59e606f6e6d48d4260b60ccc736", "8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063", "83f80fd4eb614777285202fa99e8314e3e5b169c", "67751b7ce7f934ffadcf095f4189b31f890e9fdc", "e8e662e45e39249756d2b0090782434a5cf1f4aa", "b5ba0c50cfe2559f4197bb35cf50441118b768c8", "71edcfe5e3a4e1678698a0659a7e51555291d242", "36646992d460e12615a7f6cd249d4aa8626e0987", "b6259115b819424de53bb92f64cc459dcb649f31", "f33c427dc152c20537d2857bee1dda2287e85860", "abfb95a0cdcb1425e942cc90a2d4d9f35d2ccf5e", "cb96c819f20f05ad0d85bba91f86795162f63445", "4cfd770ccecae1c0b4248bc800d7fd35c817bbbd", "3a78f0c4b07ff756aec20a2f17ef0f25470c3cd6", "20d397c8d8865133ca7bbbd824e217e9fbf5a51a", "1dbcf2181cb9890397c88c7cba20941af9019a20", "50a893a51a93c1308b506d98927fb30bb763145a", "0cd87a66028f9d3c519a9459a213905b42b4c3b0", "422a75a0fc129c48f5ca03ea77e773bb5a0bdaeb", "8035e8796ed5bdd44477c523cd6b03f9adfa2d8e", "6b02d73f097d745e58bb99a880e559b78c4594a1", "4e4a4359c7dd25af7e2ef0910928cd9faa5d0cfb", "afb6d1e72d5b5506867a74beeb1e661599b8fff3", "2ba7c88a7e96d412c116d6bea4ba27be2ed4dd48", "e2bf47d2e3339f366de8947cbb5a894608b91bf9", "860196a306c9303ddaf323d702dacba68db658d2", "9f0c6797560de7f23bd3b016c9c328787c4cebf9", "fd0a1a2ecf69a6c1a6efcb18b8f23e4d5402f601", "2fa1037496dbcc04b705fcc4e9ed58cdc85df46e", "dd16038195c0f3ce6b1f3afc6a0482a12bb9ab6d", "10e0e6f1ec00b20bc78a5453a00c792f1334b016", "e4c2f8e4aace8cb851cb74478a63d9111ca550ae", "f4421adced24d729d5ed22559308c2b4719b44c2", "5cd47df260e65b2650a1123a2136ee5bc918d4c6", "de8e1a96fc02ddab239b23f222dbfe34249eca2c", "5b1780b24d82b9b93ed2c67d41885a2e7b6363f5", "0b37ff3bb5e73bc4fd70877b2a39b27debdb83e2", "0d35ab4d59c3731986965dcc935d11074832bc1d", "c9987af05f7df6539c5742072c027dfcf0394354", "55cad1f4943018459b761f89afd9292d347610f2", "621227db6a9b0ad374cf737fea3760b49c4de42c", "5288e1e7e914f73bf65c745f328844907226cd3e", "96eacc464c0177efc4f802f220888c7f675f24af", "16371cf22f9de60dd1edd7178669e5ba69143686", "035c8632c1ffbeb75efe16a4ec50c91e20e6e189", "23334c7b7860b8e4db873fdeed127a1034173d5c", "e9a5a38e7da3f0aa5d21499149536199f2e0e1f7", "103a7c3eba36792886ae8005f6492332e6b05bad", "49df381ea2a1e7f4059346311f1f9f45dd997164", "79dd5743438963d9fe4793f7c27530f2e7c4b20d", "aa591e478728f383337916ce4fabcc3f90cb58f0", "3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9", "765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d", "fa8353a89cb822d530d2da6bdf071b7315fa2b44", "389266c1cb73a7918d599be659d91e7fb7f431df", "76669f166ddd3fb830dbaacb3daa875cfedc24d9", "71c4b8e1bb25ee80f4317411ea8180dae6499524", "9f60f03abec7dd4ef614ae0bf91ae10e6aa7f2dd", "fde3f34a1accadb73269e4beef487611f682b781", "4f53f80cc0d11beb3a086b98ba8ebaed6ec688de", "8f1c1f1467cc829d273618f63da65d01dbf1f972", "026e96c3c4751e1583bfe78b8c28bdfe854c4988", "f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd", "9c513b0f304b1bb29de478a1227ddb201ed50217", "71fd29c2ae9cc9e4f959268674b6b563c06d9480", "d74c6e6fbd8952cbad96013e227374c903797162", "0daaa56d724c11e64338996e99a257fa69900236", "d59e60c87309556c73c2885d133b459f20c90d9f", "4572725e98f3e1b6f258c03643d74b69982aa39a", "3fd203807fab28243f84d2360572796869ccde90", "fdaf65b314faee97220162980e76dbc8f32db9d6", "f793970c7b57c3470561e9830cebbdd590a38bf4", "710b7a0c45ca6ba84768025ed1a02804a9530ea9", "b725f428e811b8b44795ac96be016efca4fca4d6", "1c71e653f86b06eb7d5b1d92694f34e6f57173de", "e44e8b0e8b041afe4501f199fa8af7610cfd4259", "624077c8c8c9306c12671870cacc0fb13ff20324", "4adbf8df68b1e1cb9da45054a12469720361c826", "754fa133a250d824c50b4c3b9c73975059954f41", "eb91eb5912de3d15f052a94cd0a188f553df90e7", "f2779f470ff7be49b64289ba52c692e5eed67fc2", "1dc94886ca1d4893208d38b18cb7ad1541a74b82", "1117073bba23f66717ca631e7077ad2b42f5c08b", "a693aa8f2bde89158bbe9aa8abec8428518c8d45", "129d0e22d6b847c8002fd2c70bb508cdf3286fb8", "fa72f77519d4e48d9dbb1f47eafdbc9bc4c824bd", "e43a18384695ae0acc820171236a39811ec2cd58", "2d7c2c015053fff5300515a7addcd74b523f3f66", "9badcba793a54dd90383a55d7dfee1281c510f75", "24fe0a4a2304da39b8ff5630ba9a64d505326d0e", "b2e308649c7a502456a8e3c95ac7fbe6f8216e51", "6ce23cf4f440021b7b05aa3c1c2700cc7560b557", "0a24a16cb9f6d95453d4cd6d0bd5bdad4199e3cc", "e483f644eb20d79402bab1f5d96025598e101f82", "b72eebffe697008048781ab7b768e0c96e52236a", "2e0481def73dbd3e6dfb447c1c3c8afdfaf9b7ec", "b465e173b81337dde5a0800753e34a40e4352d38", "e3c8e49ffa7beceffca3f7f276c27ae6d29b35db", "1dddfa634589e347648e79ae4e261af23553981e", "5850d734b264999c50735f42c5faafefd4e6adbf", "668d39ceb83d06c61ab58cb689a1b744ff520669", "9b8d73e83c111268745311e03f0c0f7f6c92c9f0", "046600fc88843159d5c53d089504b22f9876904d", "5b4abeb466a2c97a99b9621e0c83c95f4326e99b", "6789bddbabf234f31df992a3356b36a47451efc7", "b4e889af57295dff9498ba476893a359a91b8a3e", "f6684367e7925cd90fb8974640d41823191c7cff", "26d86dba4455e8322bd9ea53f490f3bbf95784d5", "946017d5f11aa582854ac4c0e0f1b18b06127ef1", "78bdaca41440f03b4d18a4caf9f0dace9afa08b0", "88f5f9d92c4fa696457a824c3eec204da05ba6a4", "a1cda8e30ce35445e4f51b47ab65b775f75c9f18", "1d3d05e294bb522b653bc6d11cb92d5c4140e41b", "da578542356d83fbe21a1ea89855bb0a02bdeb26", "cf6527d8d42a9958eea7d8d1f90ea4c86d591408", "aaa021feeec2f84c4a5f3c56b4c0fecb5a85a352", "ed7b46df8ee985d9dafcded81a2295a21e9f2293", "a9a414604cff39f1a03c5547385dc421e6c8452e", "c9d9cb2c647c6489814098438a9fbd916a8a1918", "92e464a5a67582d5209fa75e3b29de05d82c7c86", "f94867160e0885bc397892e4db56fa7ce9ebc83a", "8a26431833b0ea8659ef1d24bff3ac9e56dcfcd0", "ca92c54ec18e72d3ba86f3b80d82974707882abb", "acc9821b61ea804bd1e0b0e23a45f08fbf760a37", "2ea78e128bec30fb1a623c55ad5d55bb99190bd2", "8f96a6bf39d91c5cb37316d90b476cd98f447709", "bcab55f8bf0623df71623e673c767eed2159f05a", "fab7f1af3d67c7b7cf76ec1d8dfcb265da61a572", "d8afe2ac486762e37dc6616606d38b5967e9618d", "75d59ae0ed3ce51e37b383985cfff310251f591a", "8c269412a8c9e646641750dce2a1b2ee7b9c6b2e", "c94c2cf52fef0503c09268c7d1faee60465ee08e", "68484ae8a042904a95a8d284a7f85a4e28e37513", "d6e785d3466eeaaae1f6c792f679d1111ab30302", "c6fdbdbbbc7642daae22df0b7812e78d0647afb3", "1fc867b43092fe83c4e0bfa38a9a45ffaea86deb", "e9c008d31da38d9eef67a28d2c77cb7daec941fb", "2accfb97407e74b6f55d228cf7048a402dd8cb6c", "e4236c286787cc608ec42abba2e51eb36f108b14", "8e461978359b056d1b4770508e7a567dbed49776", "1b92973843c3a791bb5ca5a68405c3ecb3473ded", "ea6207e553a5c8a3e171a8f6b6297688ab43f92d", "da8d0855e7760e86fbec47a3cfcf5acd8c700ca8", "2945cc9e821ab87fa17afc8802f3858435d1264c", "911ee14fbd3f0b9ccbd91090fbe4aa65d73f46f5", "1daf148a6d5d86e8cbe76a13311514f1338bdb0d", "f881d2a04de838c8950a279e1ed8c0f9886452af", "f89e5a8800b318fa03289b5cc67df54b956875b4", "133477ccff666305d183cf1c35dcee40d0f2955a", "48143b1270a2df096577e6681b1f1ceadacf73e8", "1c99e412666d63e46e1c6606841837d3c18f48e6", "e2afea1a84a5bdbcb64d5ceadaa2249195e1fd82", "7ef0cc4f3f7566f96f168123bac1e07053a939b2", "96e1ccfe96566e3c96d7b86e134fa698c01f2289", "264175a074c56667f90db9780580368925944577", "b69badabc3fddc9710faa44c530473397303b0b9", "297c27c74e5cc731b5bd1ad95726b4192e3b902d", "7c0c9ab92d49941089979c1e344fe66efc873bdd", "773ecf8cfa7e544ac48cf146b71df19146e1400e", "7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0", "40c3f90f0abf842ee6f6009c414fde4f86b82005", "91bf682708317b1c84365ce9589c4c1d9fc014e8", "36132cf4fea1717f7d39150d1a0cc79ad78b069e", "fa60521dabd2b64137392b4885e4d989f4b86430", "e72c5fb54c3d14404ebd1bf993e51d0056f6c429", "49ead21ec51e7df53583ef5ade06606c8a75dfb8", "a90226c41b79f8b06007609f39f82757073641e2", "4889d2927a9120931978ec487f55114d99eeb65d", "833cd4265bd8162d3cfb483ce8f31eaef28e7a2e", "584f9ccba8576ecab61fd4575da7484c8f9a7bf2", "775be2fe9e6d7ca97209692ee3f85fb0f1b125af", "8b5122ea59d8d7f70e344ffb2553537b5ad07dd5", "e088a2537492ed5a22885e871a51102a95c97cb6", "e2fb33d0ba0fe5e0c33b576e090b10fa4741d12d", "c86afba9c77a9b1085ccc6c44c36fa3a1fdb51c5", "b50f2ad8d7f08f99d4ba198120120f599f98095e", "284b5dafe6d8d7552794ccd2efb4eabb12dc3512", "8ae92b73020dac2c98c72cbaf823cff1567bd91b", "40a63746a710baf4a694fd5a4dd8b5a3d9fc2846", "02aff7faf2f6b775844809805424417eed30f440", "20d17ffeb8adcbbe7cfe7b73cc998a1d20a91553", "96390f95a73a6bd495728b6cd2a97554ef187f76", "744fe47157477235032f7bb3777800f9f2f45e52", "bf5c5c346e5d378731030edb53fd0c8a49781468", "46471a285b1d13530f1885622d4551b48c19fc67", "dd096d3cac4a9f26d38e135f803621d932c84f83", "0d1a87dad1e4538cc7bd3c923767c8bf1a9b779f", "3d5a4b31e6e25cd0cfefa0b5925674377cdaea7d", "fd4537b92ab9fa7c653e9e5b9c4f815914a498c0", "ea7fbfd02bf17b310e1e7f4be12d106b4990c33d", "47493ad6e6d5591086c8a2b812bfae85aae50193", "290c8196341bbac80efc8c89af5fc60e1b8c80e6", "5a4a53339068eebd1544b9f430098f2f132f641b", "b10319193be303038a9f58e7552632791e3f1ada", "c86e6ed734d3aa967deae00df003557b6e937d3d", "81d67fa2f5eb76c9b0afb2d887e95ba78b6e46c9", "04cdc847f3b10d894582969feee0f37fbd3745e5", "7bcb505d93175d0b89ff7aca76caf579ddf12339", "92104ae97d3b57489751528a315966c0242a6efb", "637de43801f26fab8f567787485c57ab92273ce5", "41dd2ca8929bfdae49a4bf85de74df4723ef9c3b", "a59e338fec32adee012e31cdb0513ec20d6c8232", "91ea1d8f11c4e3a20234888f6ea5309678975563", "4156746cdc99a509549c4028c7122eb6dc90b1a1", "048ff69503ea4937f10f69b1f29f655594253246", "e909b9e0bbfc37d0b99acad5014e977daac7e2bd", "4f0eab1ee02f015313ebbbfada22407d1badd5d4", "542015e2c78c51203963b76632b7ea2a6c46aa74", "9e1c3c7f1dce662a877727a821bdf41c5cd906bb", "12417ed7ae81fb4e6c07f501ace9ea463349481b", "eb4d7688cd03f3863a175149f5fa293140f9df30", "834f5ab0cb374b13a6e19198d550e7a32901a4b2", "250449a9827e125d6354f019fc7bc6205c5fd549", "4131aa28d640d17e1d63ca82e55cc0b280db0737", "e4fb693d8b2755b8e989e0c59b28db3c75591503", "dfe5849fc844bd7b747b3ecbe0f28ffb7e6ee917", "4ed0be0b5d67cff63461ba79f2a7928d652cf310", "af6af58ba12920762638e1d0b8310a0d9961b7be", "cb8b2db657cd6b6ccac13b56e2ca62b7d88eda68", "fd7173634abac857405c78564e366c311a1cf4b3", "18ec3b37a33db39ac0633677e944cc81be58f7ba", "1b5866c5b3715b410bfb4ccca6d42b32162d4ef1", "5fd147f57fc087b35650f7f3891d457e4c745d48", "9635d5e2b33b2fec49b31cb80928c28763a90d85", "7e3147a01108607fa65ace289094e5b5b525929c", "ada1a5f2d2a3fb471de4a561ed13c52d0904b578", "d87ccfc42cf6a72821d357aab0990e946918350b", "f6c814f6efff8031b9ebc62cdf0f3b343441e7d3", "e5ee85c412942bdfd9df8cc519d4af31d6d08a67", "551b75dd57829b584de5f51b63426efac81018db", "79fc3c10ce0d0f48b25c8cf460048087c97e2e90", "c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1", "40559bd43d5480000e34e4fef3e8fe3782d1a688", "b0103d264f756144f8acc1994f2327699e280652", "257e61e6b38ae23b7ddce9907c05b0e78be4d79d", "641fd2edcf93fa29181952356e93a83a26012aa2", "2da1a80955df1612766ffdf63916a6a374780161", "c3955d74f2a084a8ddcbd7e73952c326e81804b2", "cfcf66e4b22dc7671a5941e94e9d4afae75ba2f8", "e90e23a757c346170df4f403d0c18bcea2874ed7", "3f6474bc611ec790444ffa6e644a258f3d2aed37", "7da7678882d06a1f93636f58fe89635da5b1dd0c", "4a855d86574c9bd0a8cfc522bc1c77164819c0bc", "e75a2c37940a0f4e8b9f8d40d059ae4da2c443dd", "e87d6c284cdd6828dfe7c092087fbd9ff5091ee4", "97ffadd639eb27d73b86fd5520d9d6b81772b891", "4941f92222d660f9b60791ba95796e51a7157077", "34a4c733bc2b53253dbebe67f1af83b969c2e657", "b0c3bc3e3ca143444f5193735f2aad89d1776276", "3632ac78294d39f8d51bb8f2ec270cf9c115d0f6", "0e312aee6b03a697d112a1bd8d25d84d1a122d8e", "0c882588ed7436f7122af2b324c598adbede49c1", "3555d849b85e9416e9496c9976084b0e692b63cd", "9019286143f89561509506c3164f36f0e7e3a364", "0b783e750da34c61ea404be8bc40788fd66c867d", "ff83aade985b981fbf2233efbbd749600e97454c", "91e435fe71861a8da569a089b4841522ac9aa369", "2f587ab6694fdcfe6bd2977120ebeb758e28d77f", "76ec5c774bb3fd04f9e68864a411286536a544c5", "b47add32c0b26e72f5670644618076dfd8bc1404", "c3e1ab13bb3c64ed129e286cec17465fc6bff0e1", "b348d5c7ac93d1148265284d71234e200c9c5f02", "1d81fe4a386a7d96b256eac41b99604cd132e019", "26690f2548c6dbf630de202b40dec417b20c9b6c", "b2504b0b2a7e06eab02a3584dd46d94a3f05ffdf", "e4b88898d8ac1086e82ecc2fba82fb174bf9adaa", "222db9e290b34ae30c39486697d8e8dac3175770", "6d83c33e917e63a2c1a9f23fdd58f01a95f0c87a", "472c8606e68b34f4cc796a11963155fe3c6bfaec", "d8b2e37eb9d2ee0e84bceafce84812cfa0b88211", "7f217ff1f3c21c84ed116d32e3b8d1509a306fbd", "0677dd5377895b3c61cea0e6a143f38b84f1ebd7", "886a50f269ace4b140ddee9d4c7277743b27e250", "174ddb6379b91a0e799e9988d0e522a5af18f91d", "bd17d6ba5525dec8762dbaacf6cc3e0cc3f5ff90", "0005a23c0db792ac9d0f5d408c39240ffe4c1d57", "907fbe706ec14101978a63c6252e0d75e657e8dd", "485f8b28dcb7a5ffc98beb49fcbb50cf0a0b6331", "c2b3bf311a9182b1452f5ade82fb6db6263e2ddc", "9340efcb976f6c28c7242480502e16f795895f28", "68b01afed57ed7130d993dffc03dcbfa36d4e038", "4539582f25f4316c1655fbb308ee8a5b11649e38", "d0fdf0f3f47d9f9d11e84961573b324c51518f34", "8699268ee81a7472a0807c1d3b1db0d0ab05f40d", "626c12d6ccb1405c97beca496a3456edbf351643", "5a1c2ca8b81f924bc7584c2ea873c024cc979a1d", "1450296fb936d666f2f11454cc8f0108e2306741", "df999184b1bb5691cd260b2b77df7ef00c0fe7b1", "da11b7bba74c4abbfb181bd7d07c4e6480d6c3e2", "f984a5ad2d379b4e4b51005a73cdbd978ce3d810", "fed9e971e042b40cc659aca6e338d79dc1d4b59c", "21d1315761131ea6b3e2afe7a745b606341616fd", "bf15ba4db09fd805763738ec2cb48c09481785dd", "4a45b8f8decc178305af06d758ac7428a9070fad", "11f732fe8f127c393cc8404ee8db2b3e85dd3d59", "9d7edd114f788763bb16280249fae97c4aa2c102", "8326d3e57796dad294ab1c14a0688221550098b6", "59f77456b4e2ffe84f99ac33796ee409143dbdac", "9e8b20ac34f560ae12bb51f3e3713ea755d36c85", "ba38c4f083ccde8a932952ba865e57d930476b1b", "6b95a3dbec92071c8552576930e69455c70e529c", "fd6d2e4f939b8d804a6b5908bded8f1ad2563e38", "0471eb1882bb7f538b40a0f76c7073992e7bf213", "a5531b5626c1ee3b6f9aed281a98338439d06d12", "f8a2a6b821a092ac43acd4e7366fe7c1e9285317", "1ac85387b1d5a05f752ddf671763f02e923a2a03", "8134b052a9aedd573dd16649a611f68b48e30cb2", "ecf2ba5ea183a6be63b57543a19dd41e8017daaf", "e4183e539b90ac02f55ccf16eb154bc269576290", "404c7839afe2fec48a06f83d2a532c05ad8ba0d3", "b4fe9594e1de682e7270645ba95ab64727b6632e", "79815f31f42708fd59da345f8fa79f635a070730", "ca42b7f881437976a6c60de0229ebbf31b58c3bd", "04d09bed8b05ed10d25c1cc2ab47381b0ee34c2f", "c35724d227eb1e3d680333469fb9b94c677e871f", "c8855bebdaa985dfc4c1a07e5f74a0e29787e47e", "5b16f0870546cd57a934f2ee039136a09abb96b9", "8bff7353fa4f75629ea418ca8db60477a751db93", "ab3b196c5386f7ec2d05870eeb8872c8b8e33d77", "ba26bf9ffd328d23faca2deea9ebb3292bddcd93", "8e723e8a3a5a9ea258591d384232e0251f842a1c", "056be8a896f71be4a1dee67b01f4d59e3e982304", "4320b0b7e65607e96326990675ac15880dc08b89", "4d3cbc5799d7963477da279dae9a08ac4d459157", "55d0eaf7393bb6bd0483c98894f16269d275c2bd", "cda6c8904c324e4eb32e83cada17cd1a7d47a348", "a8db91308f59bc9452e87fc553eecea67632c443", "9db7af606e6eb6238ca900145c8270245e9d2959", "f3356570afde9002601a46395d565031945c7a5a", "9fb372fd2fb79571de1cc388154d4a3f0547d440", "7fa62c091a14830ae256dc00b512f7d4b4cf5b94", "9d9106d48e30c07d45ab07c21f8c35d11ae4d35d", "6f9873e2a7bc279c4f0a45c1a6e831ef3ba78ae7", "318c4c25d86511690cc5df7b041a6392e8cc4ea8", "77ee75f96e6498ccc7bb7ebcca2acd7cc4e33229", "74a1e28dd2c03076124282482074e10bb02bc643", "39c254cd706b9fb89b369b41b1c4d3949cb554f8", "053f5a00d58541c417693a4e08a76005e135486e", "bea185a15d5df7bbfce83bc684c316412703efbb", "f7fdf862b7edeb5fd9d8fad7062c1f029b419769", "95a0c9f41d0cc6f45853d616d5476b8aee54ff0a", "8880af06d8497e9deda01e0a0eabacf9e1cf0490", "60bc358296ae11ac8f11286bba0a49ac7e797d26", "9063b17ccfdf73fc789d01d3c44c451244638528", "fc3e097ea7dd5daa7d314ecebe7faad9af5e62fb", "4fec382efed4e08a36fafa3710b97f0b20de1ebe", "9b19be86280c8dbb3fdccc24297449290bd2b6aa", "85cad2b23e2ed7098841285bae74aafbff921659", "e667ba14fd3ea15491ad7c7f2f7e3622d231eeae", "341de07abfb89bf78f3a72513c8bce40d654e0a3", "b63041d05b78a66724fbcb2803508999bf885d6b", "88726ee727d38c7a101f79412ff1cfc9b0e35f04", "7c2494c8b59a76404996e4b34889da36d140dd4a", "e22cf1ca10c11991c2a43007e37ca652d8f0d814", "ee0d7a1dd5f0821b6f48113a283b9196a38d1c6c", "16f79b8917f53eff7da88c6cde9300cff1572eb8", "d5d472266aae563010e12ae90fe5fe6f3c484cba", "947399fef66bd8c536c6f784a0501b34e4e094bf", "bb97664df153ac563e46ec2233346129cafe601b", "a765b506d29cb46420e125c86ab6ff442905e9d6", "8d9067da4ba5c57643ee7a84cd5c5d5674384937", "325723a7fa69f9976feeab5ba9abd3c11e3f7c80", "a47ac8569ab1970740cff9f1643f77e9143a62d4", "d80564cea654d11b52c0008891a0fd2988112049", "372bf2716c53e353be6c3f027493f1a40edb6640", "65ec52a3e0a0f6a46fd140ff83bb82d7d02a2d45", "66f55dc04aaf4eefdecef202211ad7563f7a703b", "3bdf9c8ba5f5cf1845fe69b3874f0036ea8c245a", "3ce4a61ada2720713535d7262e8229b33c5df79f", "cd1758d3b86c4f1caf01ec222b45daf15888d1a8", "d9ee64038aea3a60120e9f7de16eb4130940a103", "cbdca5e0f1fd3fd745430497d372a2a30b7bb0c5", "c87035f4b5cdb8597db20e9dc319c2a06d752197", "d21ebaab3f715dc7178966ff146711882e6a6fee", "019c5cb085dbbc8a0fc78645e385aa4e0b468fb8", "d1697250de6f91d3a3266c1ff0fdce0bf96acfe3", "84e9de36dd7915f9334db5cc1fe567e17d717495", "b22b4817757778bdca5b792277128a7db8206d08", "8a5099b2ae6912b4df22534a1b3065e147c38b9c", "ec2027c2dd93e4ee8316cc0b3069e8abfdcc2ecf", "cd5ef3aeebc231e2c833ef55cf0571aa990c5ff8", "296502c6370cabd2b7e38e71cfc757d2e5fa2199", "582519e667fe1520dedaa04ffacbb2161b6a5b84", "4cb48924acdcc0b20ef05ea5f5e856b081d9b40f", "6903496ee5d4c24ca5f3f18211f406e0ba8442d6", "68b6ec13d06facacf5637f90828ab5b6e352be60", "9baf0509f63a3322d127ae4374aa5b0f9d5439b8", "a6051a9ae4e09faa02dcc45c0d34ce3b1c50382b", "6de1299a192fdb852846e3cfa4a428b8fe81523f", "d81dbc2960e527e91c066102aabdaf9eb8b15f85", "2d6d4899c892346a9bc8902481212d7553f1bda4", "6b7f27cff688d5305c65fbd90ae18f3c6190f762", "6a7efb6f3471a2aff702d5e8080e066636335de4", "84c35fc21db3bcd407a4ffb009912b6ac5a47e3c", "c1586ee25e660f31cba0ca9ba5bf39ffcc020aab", "c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd", "b44d8ecac21867c540d9122a150c8d8c0875cbe6", "9cc4abd2ec10e5fa94ff846c5ee27377caf17cf0", "b099dfc5c823be79f9ca96168263c40d0020b92e", "0217fb2a54a4f324ddf82babc6ec6692a3f6194f", "7e654380bd0d1f4c00e85da71a3081d3ada432ef", "0fa88943665de1176b0fc6de4ed7469b40cdb08c", "eef29a4fef85c7ed8acde9ca42f8f09d944f361d", "b19f24ec92388513d1516d71292559417c776006", "09879f7956dddc2a9328f5c1472feeb8402bcbcf", "f8d68084931f296abfb5a1c4cd971f0b0294eaa4", "70a3bea7e9a4f7af6e80832d467a457c18d2389a", "82821e227683d66543a303f4faddc1376a91a463", "7cca4d680152ed43e6dd8cc55d9ea55e2ed64eae", "72a6044a0108e0f8f1e68cd70ada46c81a416324", "c9d7219d54eccb9e49b72044d805e103fe17ba80", "0c2d0734a2c9d3e4d8a585b3f2ad4f642bf06dea", "999cdddf1ca23e4a72028d2a88537cf4a7aa9396", "9cc3172efb42d2f9fa1b9ae7b7eef9cc349cdef9", "0ee737085af468f264f57f052ea9b9b1f58d7222", "57235f22abcd6bb928007287b17e235dbef83347", "69c8b0ec77d3164df2069a5133780a36ec8e91ad", "42a712dbfe07262ba2b479e800008f08ad1c1388", "0e31e5e899e2c22d5871054f954f6dd01a33b9d0", "6d6834a094767356474d34b099a2f042ddb44e69", "7fe7fe517119e39eab2ab7cc5f03103d9d1c03ee", "9da2b79c6942852e8076cdaa4d4c93eb1ae363f1", "3c051c8721b65fca8c506de68068dc8fca6adcc5", "bc17c2075d7f7bc414acc00a88ff5a464eedaebe", "189eedfc81ee47b2b44caf8bfe816726697ba421", "2727927c7493cef9785b3a06a38f5c1ce126fc23", "a8ef9a39e68bbc7f6f25a8155cab52aab6708886", "102a2096ba2e2947dc252445f764e7583b557680", "e90a925fea8456718527a73a3621fba9b848de28", "3e2ec9cea926bd02072aa41bd81eb4c593e205e9", "6211ba456908d605e85d102d63b106f1acb52186", "9bfda2f5144867d5712a8fcbea9dd5fa69d3312b", "f1aa120fb720f6cfaab13aea4b8379275e6d40a2", "403530d1c418de29cbc595775ec45e16183950e5", "c6c3cee8adacff8a63ab84dc847141315e874400", "40fa315150ddcaa1e0996046d140b8882f375f7d", "f672d6352a5864caab5a5a286fbc1ce042b55c16", "04fd269c96f11235fbbb985bb16dacedaa3098fd", "3cafea5212ff4217beb293e2de8ca0f160ad623a", "fe030b87e3c985c9dedab130949e2868e3e5e7d5", "7b0c88bc555e3ced093e5cecb5dc1996f42eeeec", "688550a6c72f14cb8f2d9d86802c7cfc3d3d800e", "51236676c3bba877d82c31b393db1af4846527ac", "b8b2acc7e5bd94651c8bb025b6311c108c7a7d37", "76cb2ecc96f02b1d8a7a0d1681fbb55367a4b765", "42f8ef9d5ebf969a7e2b4d1eef4b332db562e5d4", "8ea9093542075bd8cc4928a4c671a95f363c61ef", "2d2102d3fe127444e203a2ab11c2b3d5f56874cc", "b603bcd53a045c7c991106423c79d5a2975b3da4", "c9b90cf9cdd901bd3072d6dfd8ddc523c55944b1", "1b72222651c5b0295981e26d1333fadfcfb6a480", "45c182f8d003a2d505e4d1d491b5d03159a70b81", "c9367ed83156d4d682cefc59301b67f5460013e0", "16f48e8b7f1f6c03c888e3f4664ce3fa1261296b", "fbbb0c1ac9b26047348fa1acfcc1e4b47fcd94c5", "5375149a74361b51d734613be5d2ccba0c6b6955", "d6ae7941dcec920d5726d50d1b1cdfe4dde34d35", "fdcc1e66697a724bd2d0d2da368de04a7eaf9209", "6196fa926ae752f927cd550b74259069e18abc71", "4563cbfbdba1779fc598081071ae40be021cb81d", "08809165154c9c557d368cddfa3ae66ccaceaed9", "a4ee9f089ab9a48a6517a6967281247339a51747", "fdd33dd6c6463564c4756fdecbfc81be82834f73", "35800a537017803dd08274710388734db66b54f0", "ea94d834f912f092618d030f080de8395fe39b3f", "f53b8e719dbbdacf7365e4a0e5ecae875d00c3a9", "7933a312c4a4ba431eb0357fd05e8609ca66eaa7", "9a989c7032051566d3ade03e5650ea6a41a5a9ed", "2b852a4e5026ab962050a0ef23a6892e06abb152", "0ae192e146431a52d7bb51923e9bdd7292ab12ef", "bb1bc9df5e9cec3e8a03a027b8016b8fc25be73a", "d7fe2a52d0ad915b78330340a8111e0b5a66513a", "b7c4fe5c89df51ebd1f89a34c66b94cc6019d8e6", "f29a24ee71940aa46b2c3438d4ddb89b33acdbc4", "4e97b53926d997f451139f74ec1601bbef125599", "21011f38e721e74c3979ec6f3426aec811423640", "1e1ab3d08fd71ab7368464d9adf78be1170fa728", "17f3358d219c05f3cb8d68bdfaf6424567d66984", "b362b812ececef21100d7a702447fcf5ab6d4715", "a562180056cc4906d6d5ef9d2b4ed098d8512317", "d84263e22c7535cb1a2a72c88780d5a407bd9673", "0dcdef6b8d97483f4d4dab461e1cb5b3c4d1fe1a", "e225a7cbfce4f7c9c29507c04190e6d6b6b46f7f", "07a8a4b8f207b2db2a19e519027f70cd1c276294", "751e11880b54536a89bfcc4fd904b0989345a601", "a58bef564df2bebbcb24c58c4a69bc6c51ab2d39", "9817e0d11701e9ce0e31a32338ff3ff0969621ed", "a7e5a46e47dd21cc9347b913dd3dde2f0ad832ed", "621ea1f1e364262348135c803557e7b3454a804e", "f792f75f6d2bf265569d4e63dd139c4d04ec7fdb", "56f5005c4be6f816f6f43795cc4825d798cd53ef", "5417bd72d1b787ade0c485f1188189474c199f4d", "e459158c2217904d5fe9a409896bd49622f17ebe", "c8dcb7b3c5ed43e61b90b50fedc76568d8e30675", "a157ebc849d57ccff00a52a68b24e4ac8eba9536", "82088af865626e2340db12b2e42f3a258053d593", "2a8aedea2031128868f1c6dd44329c5bb7afc419", "2c94682176f320f406f78c484f9135f085d1c0f0", "8818dafda0cf230731ac2f962d8591c89a9fac09", "c14fe5e69383fa9dfbd256965df06a99fae5558d", "ec3eb92b9a56b1fa84b127b8acc980555cd1f2e0", "f5eb0cf9c57716618fab8e24e841f9536057a28a", "3075baf2abc1849d2dc2f1448c272ca2f8b7694d", "42f422a9a67ba71a9ac699205940d8cc2dca8317", "d2860bb05f747e4628e95e4d84018263831bab0d", "4e29533438d5c612ab24b80c840446eafcb5995f", "f9570079f33ab11394175d57db0aa94251c48c61", "00d0ad219577c70a3d6295e8839841b2f1898e29", "ec488139105565477bb8a3c6cb3c874c35fcb2b6", "0ad318510969560e2fca3d7b257e6b6f7a541b3e", "57522ff758642e054d7c50753ec1c3fe598533f0", "bfffcd2818a1679ac7494af63f864652d87ef8fa", "f67afec4226aba674e786698b39b85b124945ddd", "7b07a87ff71b85f3493d1944034a960917b8482f", "02b0bf28f34c3c403abecd2fb4fb7d4969c0e0db", "63db76fc3ab23beb921be682d70eb021cb6c4f16", "442ee5a3f51ca93a642c20fa69326f3b17367565", "c5b311152a4e611288a77fbb1460eb0fbb049de3", "273785b386eaf01be96e217a2a8aa1c2ee694c2e", "4eaaefc53fd61d27b9ce310c188fe76003a341bd", "ffd0ba45cc6b0c8f72a09617144786ffb26be771", "edcb662834aae8878a209c769ed664f8bd48b751", "d5c4e3c101041556e00b25c0dcb09716827ed5b3", "330b3db69f70f01afd674a2b7bce4bb5000bf164", "666aa18ed45a0a92959d91d0f9a4c928aceb1450", "d9584adbbb214465e4f2d4dfae1b12d33de7630b", "6066e13aea80f64b6ad1415cfc3839c1f8590c04", "40638a7a9e0a0499af46053c6efc05ce0b088a28", "b1c80444ecf42c303dbf65e47bea999af7a172bf", "e0515dc0157a89de48e1120662afdd7fe606b544", "db3ce18f318ee732dab2e2f574062c94f7398943", "e23ed8642a719ff1ab08799257d9566ed3bba403", "2e10560579f2bdeae0143141f26bd9f0a195b4b7", "95896eef75a5fc6c8a7ac2531e76c423d678d2e7", "b959d5655a3b2f92c2c1a8a7896fecafafea979d", "b7ccfc78cb54525f9cba996b73c780068a05527e", "471908e99d6965f0f6d249c9cd013485dc2b21df", "006af49a030aa5b17046cfaf40de8f9246b96adf", "27c4369463ff28f4ab16e9d9eba6f48102c8793e", "bc995457cf5f4b2b5ef62106856571588d7d70f2", "5d7070067a75f57c841d0d30b23e21101da606b2", "3e4bd583795875c6550026fc02fb111daee763b4", "62b90583723174220b26c92bd67f6c422ad75570", "b1ffd13e8f68401a603eea9806bc37e396a3c77d", "3d38022d7ba71e865ca406d28acd3fe547024319", "e68ef9597613cd2b6cf76e81c13eb061ee468485", "cfcb4d0d9ba7eb86f068c4fe0f9e6676a37481bc", "edd6b6cd62d4c3b5d288721510e579be62c941d6", "9941a408ae031d1254bbc0fe7a63fac5f85fe347", "de60ee528db713d264ffea870b33f8be054fb8c7", "a91fd02ed2231ead51078e3e1f055d8be7828d02", "cfa931e6728a825caada65624ea22b840077f023", "57932806423204b445bac4abeaede97edb90fa03", "c231d8638e8b5292c479d20f7dd387c53e581a1a", "a59978ac12815cada0936dce760a6ff6aef376d9", "c8a5c5c8e1293b7e877a848b7a9e5426c5400651", "00f1b6927785b6f4305cc35c1b0bfbbe2010c31f", "fb6f5cb26395608a3cf0e9c6c618293a4278a8ad", "2baea24cc71793ba40cf738b7ad1914f0e549863", "6c61e496afd6577aa330b1f48ad0cec1d35b32d0", "e1740c8a562901ac1b94c78b33c4416500cedebc", "a8760dc83e8cb88f241cc206855fbbad680889a0", "3cea3aba77649d718991d0cb30135887267c11e8", "4efb08fcd652c60764b6fd278cee132b71c612a1", "245f8b05bdd1ac65a09a476440dc4b05ac05d4a0", "877397982198554e9294f0ddddd8d971cc87cefe", "d4a925cb0ca66b1cacec325751f4a85e5b74790d", "db0d33590dc15de2d30cf0407b7a26ae79cd51b5", "eb72dcf0ba423d0e12d63cd7881f2ac5dfda7984", "53c14feecdf23c40c594c25a0075c7150fa2f9e2", "008936436a5dada1366ccf239786f913a47c340d", "938acfc9001174fdf9007e5dea2cfc993a0b9a09", "e21c45b14d75545d40ed07896f26ec6f766f6a4b", "06be17bcc4136476855fc594759dddc6f8b6150f", "6b327af674145a34597986ec60f2a49cff7ed155", "6f900e683ea1fc85825a403d1ba2df7875f35bb9", "8929e704b6af7f09ad027714b75972cb9df57483", "5a9103153c7f36d8a28bfd66e89ff05c93129415", "33658ee91ae67f3c92542dd0f0838b48c994ae4d", "4e91defcc0b5ddf18fa70c34d91ce94a0be0f4d7", "c03ef6e94808185c1080ac9b155ac3b159b4f1ec", "14a022a3eb8cc9681b1ab075650d462788de1fa0", "00049f989067d082f7f8d0581608ad5441d09f8b", "f0483ebab9da2ba4ae6549b681cf31aef2bb6562", "69b5dd48d0f6f95f4dba5ad8b35b51de446b632f", "0a13581afdae66bcf52755bfb53410e6e54c1840", "91edca64a666c46b0cbca18c3e4938e557eeb21a", "23fd82c04852b74d655015ff0876e6c5defc6e61", "b04d4b1e8b510180726f49a66dbaaf23c9ef64a0", "e0082ae9e466f7c855fb2c2300215ced08f61432", "95a3af61b398976c13d96baa32481e1bf4a31984", "ae2b2493f35cecf1673eb3913fdce37e037b53a2", "29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a", "d060a7a715f2e233dd09777bf651be10fa19f3d3", "630af2eb466fac956f9a43bf877be0eae6d80027", "ddefb92908e6174cf48136ae139efbb4bd198896", "d10eff69699591d26dbb69ed17d8afe06bc581db", "e3a7fca5f94d85814b600e870b90259eefedaf6e", "69556424ec4daaa2b932790dba7bc8b826abc574", "c3293ef751d3fb041bd3016fbc3fa5cc16f962fa", "7c2174a02f355a00f1fd5aac6dd62c84a919a2d1", "9d8978ee319d671283a90761aaed150c7cc9154b", "801a80f7a18fccb2e8068996a73aee2cf04ae460", "77acef6d0146465b9e9ad5817ad3e2c20ae64566", "b768cb6fc2616f3dbe9ef4e25dedd7d95781ba66", "e530b5dbced106b72ecd0d1ef542d2c9eaf00856", "665265289471d08a4b472329eb42965b51ac485a", "3ab93fe26a46f8bc0999e68af71a0907a63a5e65", "8bddd0afd064e2d45ab6cf9510f2631f7438c17b", "e309632d479b8f59e615d0f3c4bc69938361d187", "d5cf6a02f8308e948e3bcd1fd1ca660ea8ea8921", "e9afb44fa1bf048e90d68f755945bc2b81642239", "6b2db002cbc5312e4796de4d4b14573df2c01648", "5f69d59ad195a69618231ad83c4ad6342a569074", "389b2390fd310c9070e72563181547cf23dceea3", "b47602296ccda89bec7dfa592965dacf17ca1483", "2179e34ef3cca174101f57e3cef8e2360fc64303", "4eba5f6824f29533e0cd2660e49f2699c7e6501f", "788a3faa14ca191d7f187b812047190a70798428", "2ba23d9b46027e47b4483243871760e315213ffe", "59e91bd46492391beadce041806297e856af6ee6", "1667b7c68e733f95f81c12e6cac73e5f659abca1", "d3d887aebeeae44cefd5c2bdbb388d9ce109e335", "28121cd9150250fe51de62521065c7e2246a73e9", "3a40059e9dc4b19ae7f49b8746d8dda22456767f", "147b7998526ebbdf64b1662503b378d9f6456ccd", "372bc106c61e7eb004835e85bbfee997409f176a", "1cce875bf085602a2b0e486eb37dadc47e4efbb4", "e3582dffe5f3466cc5bc9d736934306c551ab33c", "346578304ff943b97b3efb1171ecd902cb4f6081", "231af7dc01a166cac3b5b01ca05778238f796e41", "73d57e2c855c39b4ff06f2d7394ab4ea35f597d4", "654ad3b6f7c6de7184a9e8eec724e56274f27e3f", "3355aff37b5e4ba40fc689119fb48d403be288be", "45824905119ec09447d60e1809434062d5f4c1e4", "279edb192f630f057516d8e56eae61713b6a1895", "62007c30f148334fb4d8975f80afe76e5aef8c7f", "be4c2b6fdde83179dd601541f57ee5d14fe1e98a", "a45450824c6e8e6b42fd9bbf52871104b6c6ce8b", "d5909f8d82bff4b86cc36ecd000f251c1a76293b", "dcda558e15e309d8e3158bf2cf8e921cdb59cf5f", "169565b280eb25a9cbcc1528420551371ffed314", "0bb574ad77f55f395450b4a9f863ecfdd4880bcd", "0e986ac9484e0587b6ccf01a5db735b9bf185157", "36e25994cfeab3dc487f9a82139c08f26cebf92f", "70f3d3d9a7402a0f62a5646a16583c6c58e3b07a", "99f565df31ef710a2d8a1b606e3b7f5f92ab657c", "7bc1318403cdb4895a4437993d288068a8e85f5f", "39d08fa8b028217384daeb3e622848451809a422", "2d42b5915ca18fdc5fa3542bad48981c65f0452b", "48810b60f1fe6fcb344538d5de8c54e5d64c20bb", "71bf455415f283dc70a2f0343fa8387acbf00fb2", "bcbbb240450a511841ee8510f8dd274e6c788f2b", "dfcb4773543ee6fbc7d5319b646e0d6168ffa116", "f580b0e1020ad67bdbb11e8d99a59c21a8df1e7d", "a46d4f5bf9c5baca38b52874e74d1e3f9b3b12cd", "daba4ff9ad8015f6c9626dbdfee950fda401424f", "43a2c871450ba4d8888e8692aa98cb10e861ea71", "bee512a8117ef26e5c9fbcc36da8d0d0fabcc5d5", "7958893d88c007d6569c1f2f9771d1c63b99422f", "99eddbd03e39c86260e282c7a0993617710d5cb1", "1a8a2539cffba25ed9a7f2b869ebb737276ccee1", "8175f126198c126f9708fa8a04f57af830fba6aa", "4771af2eeb920bde146c74ee0f56bd421793cd33", "f8cfabecbe587c611de2696a37f96e3f77ac8555", "7689d23a22682c92bdf9a1df975fa2cdd24f1b87", "03889b0e8063532ae56d36dd9c54c3784a69e4d4", "a0a950f513b4fd58cee54bccc49b852943ffd02c", "2780f8fc25320f4fb258442ceb790ffe301730fe", "c34532fe6bfbd1e6df477c9ffdbb043b77e7804d", "2b286ed9f36240e1d11b585d65133db84b52122c", "31182c5ffc8c5d8772b6db01ec98144cd6e4e897", "5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2", "829ddf932d7164ebc915095a4a94471049825410", "98142103c311b67eeca12127aad9229d56b4a9ff", "16d9b983796ffcd151bdb8e75fc7eb2e31230809", "9b2b7e1153638286305c12dee2aba8d3bbd22a1d", "ed0cf5f577f5030ac68ab62fee1cf065349484cc", "2cf5f2091f9c2d9ab97086756c47cd11522a6ef3", "e853484dc585bed4b0ed0c5eb4bc6d9d93a16211", "7b0f1fc93fb24630eb598330e13f7b839fb46cce", "0a85afebaa19c80fddb660110a4352fd22eb2801", "35ab5978376ea8113ff476076f18a677b4136d92", "4df37f8d253efc3fbb6bbc9fe7273a39eceac76d", "26d407b911d1234e8e3601e586b49316f0818c95", "1be10b1f05fe7a5dd28cbb63d61a992c5d9b611a", "0310d31020ae59bf3d6ac61b6206dfc0e79b4efe", "fd892e912149e3f5ddd82499e16f9ea0f0063fa3", "2a4bbee0b4cf52d5aadbbc662164f7efba89566c", "ea9857a5e5c72d435054a5a73e50dafb755a2597", "969c4d89d7b22b36d8fc569156ca6e040b31565d", "17f8f5fe7a6730ee8d735d055ccc12231aff4435", "cfe7ee6027bf1edcb7c516eb713b2d68af6205e9", "04be04189fe77a3bbd108b8c0ef78d63b0bd5118", "99f9a3437fbdc3ca7a54d4c9eb6973334f50f51f", "4ebf84c6389e842e90c39850f0152671ba7fa0dc", "1cc902dc999103c8ed27559affa5cdaed6fc2c38", "239323312b22934a31f946bfa496ad9a144c2483", "fa7377104d6246a38e432c95e2179e53c2678fd0", "bb31312a7f07486676cae4f7a2ad7da43b0700e2", "9c87036509db08781d442567895f24ea38933912", "50c89813eab745dc170bf440cba869d3999a2827", "172cd5d213cefd99e93039eaf3d8824b3ba203e4", "5a677694e23fc36c08e3752e9428fb2b90ab0397", "a05a770bb2b7778e195a578006482926dfc1af82", "2ad0ee93d029e790ebb50574f403a09854b65b7e", "389db56845978baef0141b876774ea06cfb13e04", "3e6613d86fd9c814afe056838a75e89cc025224d", "ce316d2366ec1b95ee91a98b4f426e6c00cdcdc4", "fa4b5f663d5d600e5ae3cb85ba1d080ab1721add", "2e956e178fd50ab140f30f9255a83d853c8be210", "b3e2bd3f89e49833d45c30af7d5c923489b4d5fc", "6411c72a2da7180538baf316bac54748fdf2243c", "aa197b6391e9d683d87126a3a98126f89c5b13f5", "32eeba2ff1ef4259de7802c8ee8cecb6d6c581a3", "0e57f5098993d15cb04335f9eaeb7203e7ce7b75", "44c4b799010f3136f0e793db1a3cc972947b937d", "a93ecf7b9780989c709714dde0f93f4d81eea640", "2d68cac1bd2f18631051cfbd4a46b67be1a939fe", "60bfe2903ddbd90827b51791c3a004c60ec9997f", "4914f51bc2f5a35c0d15924e39a51975c53f9753", "68d1c9d5cf3c4530171d1e88fee777592e00e1f2", "6f1278204ee89978cebdd9aced3d9c31c1ad40c0", "18233c55982050292ba7f6a5462c0e7576c3398d", "f3a8e2eb5190cadd037b722e7c1190c2d1148f10", "58d080c29764eec7c58682d7c7aa74fd3a2c6456", "68c586e81f593904221598f7ababb97570dbfe63", "fe3672deffb7b761228e288d798031c33fec2930", "7d8141fd805da71af51205b9218e0768e9371188", "0ebc50b6e4b01eb5eba5279ce547c838890b1418", "deaa2c335481da0af65a88fbacac548fae2f5705", "131130f105661a47e0ffb85c2fe21595785f948a", "42ecf293be16049b307fcf6a4ba589f9630caf8b", "30c5d2ec584e7b8273af6915aab420fc23ff2761", "03104f9e0586e43611f648af1132064cadc5cc07", "9b14225c6ff8ef043866775165eeb6473f9fbcb8", "b3208346d45215e63d854a28ab61d39a0eb1ee21", "fce7c61dac242ddc992055a08a01e8b2d55f8a8f", "6fe4704e7648200aea1a109634f183d0207c2443", "29d414bfde0dfb1478b2bdf67617597dd2d57fc6", "fd1b917476b114919de0ae1b6a4b96a52a410c20", "1b5875dbebc76fec87e72cee7a5263d325a77376", "37a23e76674e606ce779131d2c93496e8a53bb2f", "422fc05b3ef72e96c87b9aa4190efa7c7fb8c170", "ec75a1c846194cfb0fd202694390c55e24a27728", "f93750b355f4112abde2cdeb0efca6dcf76bd1ef", "aeedc6b7f2ceaaf9d9cd8e327ca979128c1947e9", "4f00f5fe9d762009f524fb97555088769b96328c", "1e058b3af90d475bf53b3f977bab6f4d9269e6e8", "0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d", "bd9834c57903db8d0095a92e9569317e5478cbbd", "f9d2e8f2038fb2c7e088185c0f1e05b841d8b8ca", "2b0d14dbd079b3d78631117b1304d6c1579e1940", "e724c9a69613bef36f67ae7ed6850b1942918804", "0c2772727b7b482f059cb624b4d4411d263e3092", "28226aedf1972af2008509cf3d1e7c6646c77f7b", "49b2545b8b9ed81cc547ec974e0b61d01b7bc759", "9454399b92939a650f6cc967d129a71c9ffbb59c", "31afdb6fa95ded37e5871587df38976fdb8c0d67", "2792e5d569b94406ca28f86c9999f569a3d60c6d", "11df25b4e074b7610ec304a8733fa47625d9faca", "42df75080e14d32332b39ee5d91e83da8a914e34", "48dcf45a1e38adbb9826594f7ffaa5e95ef78395", "89ceaf90a5cd5b4ec15e8f15b2a3befd81dd5f2a", "98aa4bc56aa6aa15735727a91bb3711bc90e73b2", "1e4c8ff205e6182b5c788f6b66d4d5b472470a55", "035d72b687247d8675e05538d4d0bc9ea4abda47", "1cb7936b82112de51c9e3b885735d6c81e86e1a0", "366ad07343e304ca3d04269d12bd49cd95f3f5c9", "2cbe913aaf7657dc53c64f03fc0e20b62aa369d9", "c39ef5554b9964f578572d403522380e95802650", "59f83e94a7f52cbb728d434426f6fe85f756259c", "9e759860762d40505f25d6fc5c4f4c1f6500d68b", "c6ccc6e1323bbed1479cb5cb0b75e5675b7cbe5b", "3b5d3e38690b4d76f77bbf90a45f17403d28857c", "ad0dbdd4b0eecf16cd93f8f1f261874f5830f64c", "dc9abb9857c54f6b178e2e08b6de3d4ea49a2749", "281b91c35a1af97b1405bc724a04e2be6e24971b", "286eb053f55e45ad5d0490c1c18f6d80381dfb4b", "7de8a8b437ec7a18e395be9bf7c8f2d502025cc6", "0ca66283f4fb7dbc682f789fcf6d6732006befd5", "2ad9a264cd44f42a5eb0f928ac78ba15e0d1cc18", "51c02f135d6c960b1141bde539059a279f9beb78", "adaff7ff015b4be77e8c0bdb9d002b614d6e2851", "8121824f4598d600e4cdb745cd2715e4655c9e88", "84e48da837978a0660184a0df7647e4b22b078e7", "85e8c103614cd330596c5d3eb69eb18dcf941435", "781d3550f54f3b4bfbd99ca9957aba6d6dec990e", "5409d9ff06ad715ee6996e44c88f930b9dd074fa", "11fb122efe711980ec4fb55e49bfbc03f538c462", "8a0d5ee127bdfa97e11f5becdb834505f8b5ad39", "5e4fb9b216657cbed1125b3be359ee482168c3e3", "06518858bd99cddf9bc9200fac5311fc29ac33b4", "eadbad2a715bd1b0822ec3790c65765c61924549", "70b742e0c65388c176d21e9f48aec00f4a5cb9ff", "da0b41c22e0918c99ba89a04eed4f8ed58cc1d66", "16bd796687ca17ac7ca28d28d856b324186628ba", "054b8f998b46660846e1b5001b5bfe70c2787397", "a3fe3e6e86eacbbc456152022af2c1a0c0c82f51", "a42f7038aed61c45b12f1b2c505c89757bb461a1", "4e6fe1ba927fe81a6f4529ea27985cc9431ed357", "8d152057cdc4eb0da992483e120fccce13075651", "86ec0e331dd494533e16dd638661463b7e03edb7", "ac11be5e2656f93c1b02b0222414bb3db8baddd5", "2a4b693127a28a2c56914bd2e5b99ea0f0883176", "e2b2812f1f43206ec4059b459745e724f22bb878", "8b29ee0a47efc11071ab8baec8369fd54970bfbb", "31e0303d98fd1bb6a1074d4fe0b14228e91b388b", "f7be808d5616325daf3097999ea6a9c4d84c268f", "08b25aa0cca422d3a896aa1fdd865a7e970666db", "5a8a4b0ec264e0959f0c1effcb9de4a74cf6b148", "e1773400a72bd7305a2c4f617b976c6f9b2d6662", "e73e78f95bb50770ea0bbd49b282ae762c296b58", "5194a8acc87dd05a92a21f94fea966a2815f9b38", "0af3c97068638ec2b79b93ff8b3fde9bd999f153", "fcbfb2744874fa709d72f6dfe37a1202789b1649", "de8657e9eab0296ac062c60a6e10339ccf173ec1", "810baa46ed829553bdb478dad2782cef2278ca60", "78cec49ca0acd3b961021bc27d5cf78cbbbafc7e", "b72bcf94a483c48558fc68a0d36336e4453ceff5", "f812347d46035d786de40c165a158160bb2988f0", "3d9d1f8075ebdd03f86b4e40b9a5d08447ade8d3", "a915feed51493490157e8211360d3bf53743e3c9", "1bd50926079e68a6e32dc4412e9d5abe331daefb", "5843b0d9346ea1fac1575f4d1d7795063eecaa28", "ba0ac513d656eef49666ea2231b516bab286661b", "916fbe5e8bec5e7757eeb9d452385db320204ee0", "7314a2d0251ab9d65c0a468c39bbeacc3d565408", "cef58d5b3e76a2ee1c157a6b1ea3329dc7624e6c", "fb3d3fae626ed54c0c5899c97a82215befb3df58", "294163a4126b3a886bf62ab896865ce3fc1147a8", "e96cef8732f3021080c362126518455562606f2d", "bac45e4797737f2c31fbb9e838be6841dabcd4ef", "442cc39db208a66acf3acc22589b13981bb303fd", "1ad4981e51cb55e1ff3ffeedabc7e6ac22e77343", "dbf2d2ca28582031be6d16519ab887248f5e8ad8", "b084ad222c1fc9409d355d8e54ac3d1e86f2ca18", "7f1078a2ebfa23a58adb050084d9034bd48a8a99", "449808b7aa9ee6b13ad1a21d9f058efaa400639a", "56f812661c3248ed28859d3b2b39e033b04ae6ae", "fbbbbc8fe255191b14608c6d877d9330d35273ca", "965faca4b89047ca0c90df0f12c06bc4cb9ec2dc", "b27b507fa7b68897adab421d942395e98519cb21", "d0bb66773b387ff4d75fdf61e0c52006057f553a", "18b09b3260aa110cf5b358cafa95b7440f26be7d", "24977d59a5de4eb597347bef00f0c097a641a8dd", "42cdfa8feefa47abbe849f13df17822494ea331b", "458713d5c1dd8ff95865302e51f0f8df22204d91", "865e9346b05f14f9bf85c1522c5aebe85420a517", "661da40b838806a7effcb42d63a9624fcd684976", "519f1486f0755ef3c1f05700ea8a05f52f83387b", "a3ccf7fa5c130c8bcd20cbcd356ad7a47cdd4296", "5dc003a75a302761778cb1c15d796e3d90dd9322", "5834555d239c27369e7a4167bb0c0fed725d761e", "c09cd44a4413de704bd74d825ca435b742b73ded", "fb7bf10cbc583db5d5eee945aa633fcb968e01ad", "3389fa2f292b72320f4554261eae34d57e2db7b6", "c78dfcc119e5a88ca5f4ce6787b2e99cd1e350b8", "3342018e8defb402896d2133cda0417e49f1e9aa", "d97e7799142e2c66b63fe63bc52632fdf305f313", "23d55061f7baf2ffa1c847d356d8f76d78ebc8c1", "d692a9be9983c4445ec3cf599d6ccaf379fc0cee", "2a98b850139b911df5a336d6ebf33be7819ae122", "914382fdda66162bb8621703bb83d8d69b261e86", "5304cd17f9d6391bf31276e4419100f17d4423b2", "8a23861d6d700ffa845dc85f2884355fa686ec37", "6a7ec333ccabd41b9d20f05c145b3377f6045f43", "e2a9b3e9001d57483acbb63dc2cfb91a90d3c12d", "f755d9b2b7ef66ffdf7504b34167b95d0685c18d", "79bd7fd2b40aadea84bced07f813ffc28c88bc85", "3ce8b89c34a3d22659e65305a36cfa4685643d93", "49e85869fa2cbb31e2fd761951d0cdfa741d95f3", "2b300985a507533db3ec9bd38ade16a32345968e", "9904026bcc39ff435b2ce8f5b1e80d735339ac5f", "bbe949c06dc4872c7976950b655788555fe513b8", "17ee08e549aef2a2c29b63a63444d6aabb37eb8c", "2491203e3b268235ea0269f41dbebd113d2a1b0a", "10156890bc53cb6be97bd144a68fde693bf13612", "9d5ec256ee5a6ee2d9602f651e88132f2669f690", "2254930afe019a0d10c9c0d8e11af8897ec77fa1", "35457de70ea13415b8abd3898a4a83021946501f", "08df9656391012c79e426ab4552faf2d8bef1e7c", "1e15c5cba95cbb475ddb67157fdd480f5253502e", "db6344e4f8a41c619573c8579595612a7cdfb080", "d7f5f0066cecaf8760433e7dfb0eaaaf61aa6ef6", "ec440ffc63baa8d150a49c678d99321ad2455c14", "a9e53a7533c9c743b57b6668c11be0c73525f188", "d0d56aa432da8fb92b2d5b3b5d83c3f05e349c6c", "1ec73ee49e422b4509c016ce244822144c849089", "d1864c668527825229828578cc857c9f5c1c684f", "887a1f28c52ec9a1cea0d8a68a82d0a04b3a0e25", "20daf06cea3dbc0b92f7ba4adb8fe7d95d27455e", "ad7f51b24f4325086b38b468e2057dd0a9725ccc", "8ab183883acba0501c3315a914aee755b5e517d8", "199fdc3c0b73d9469d2e732c97e889bfc8bf8bff", "3210666306517c4ef9a4c1a4463c728b0e3aeb72", "57632f82553f34bce21cc8419bc5381d50096592", "3b47e618c5ceb1c16db7f709dd1cfe53d7417b54", "ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea", "b631f3c212aab45d73ddc119f1f7d00c3c502a72", "a84ff30b8110561dc1c584e55ba56a991941b3bf", "3f17078de8d62246d2d28428d0d89a98a6652a29", "3c09d15b3e78f38618b60388ec9402e616fc6f8e", "b124116a533e58f342fe2547ee858c5e8f816fd1", "7a693500b5ac96f2f439989baf250e3305f69fa5", "1dae2f492d3ca2351349a73df6ee8a99b05ffc30", "17dcd3df2bc6e7c654ba412b2ce14c79f337f47d", "c1091cc8049d18d8fb012309eb87b9531be469b6", "8c2b663f8be1702ed3e377b5e6e85921fe7c6389", "e0638e0628021712ac76e3472663ccc17bd8838c", "7655bcbacf05a19698f2bcdd9cb5235b370e3618", "4dbfbe5fd96c9efc8c3c2fd54406b62979482678", "e4aaaf7034201fa94a9b9dc9bc8915cbe01c2c84", "1623b36a5e516dc9fff9c9dbd8101845eba5951a", "c6d71294962a74492ef190f6d4d33c068f324953", "0e4c87100aa7f585ccd969aa71dd5dfdf26e732d", "9ed4fb4f25ceadcc489305f766f26812077ca151", "f16e931dbe640660562e903c71f2676144a327b7", "8f2e83f6d70b9e161ad714fee79ed6d23ae2a93f", "c4a024d73902462275879fa6133bff22134fcc7e", "000115f877c3a3fc77736afe620a22500188f3a5", "501096cca4d0b3d1ef407844642e39cd2ff86b37", "b56882e8be1529717df8a5229edbad1d14f6a61a", "6c5c48066d59b429d57633b8716f9d69b8abb812", "3af6487da9a054f59beda393ae85718a1c5b200d", "5fcaa87dbde0c4ac437a6b674843927c70f76a78", "75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d", "31c16cc4c5b9da82ade50de69ef834c7712df8a5", "6c7a42b4f43b3a2f9b250f5803b697857b1444ac", "f789bdf00efb74b376fa9050f7ae0ef7ee29594a", "9ea25c97f3dee29f7861ab4110ca90b4ec0af01b", "227094e85ae30794d03f3cee426f40877ac2b11b", "03c53fb96a9acd2ec6ba52a2497410f980793bfa", "22dabd4f092e7f3bdaf352edd925ecc59821e168", "3a92de0a4a0ef4f88e1647633f1fbb13cd6a3c95", "367c571480ac46d48be050dee4e6103a0ebb5db5", "1b90507f02967ff143fce993a5abbfba173b1ed0", "5f4c09cce4971e2d1c066cd344cb3cd807cf7331", "471bef061653366ba66a7ac4f29268e8444f146e", "4d101c064323e4e72ca8d95d1b345c4328ed13ea", "b04d06b737bc8e9543d5ac6a1afa33aaeb3619c0", "2fde9f3cb021bb5c958a65d5a29ba998e1cc8c17", "fe8b2b2a2ace6d6af28dc0f1d63400554c8c675d", "25127c2d9f14d36f03d200a65de8446f6a0e3bd6", "5ebb247963d2d898d420f1f4a2486102a9d05aa9", "37838a832838ff3211b358bc51ba5105b9d82e89", "89e23ad356db842dbc23446c062af2a0d735b096", "982fed5c11e76dfef766ad9ff081bfa25e62415a", "1a1a60fd4dc88a14c016b95789385801c6b80574", "bd14bfd765b15425c76f6fd4e83968f9f1097202", "a53f988d16f5828c961553e8efd38fed15e70bcc", "73f965d9b521117d35ff3ea7e71250ac5d8388a9", "ac21c8aceea6b9495574f8f9d916e571e2fc497f", "f510071fd7fdc6926e3958ebb85518bcfea17f89", "7a7d9cf8a6e28da11b71057948975fd179ef34be", "315a90543d60a5b6c5d1716fe9076736f0e90d24", "4ede16d1aef065a975ae6f9761e623313aac7295", "92292fffc36336d63f4f77d6b8fc23b0c54090e9", "5ea5d25b31cd59036859c9b9d54b76052fd6490a", "24e42e6889314099549583c7e19b1cb4cc995226", "8dbe79830713925affc48d0afa04ed567c54724b", "59c9d416f7b3d33141cc94567925a447d0662d80", "74cec83ee694b5d0e07d5d0bacd0aa48a80776aa", "88e093d90d0734b471ce2c1f58fa0ace6dec1101", "88e453bd1f05fca156697a9dbab86d0c37fe3940", "19b03e7f37e4bc2f049ba57056f6e687e52e1f5c", "19fed85436eff43e60b9476e3d8742dfedba6384", "892d47a6e46fb95def22bf4c21a79548457e045e", "170aa0f16cd655fdd4d087f5e9c99518949a1b5c", "985d032bd45c3b1a6434d19526f9209ade72691a", "db5d1b4c295adb24c8cb58ec995ce11b569cbb77", "ae3c78acaa1b1422d5d01e073d850ed3ca13c664", "1fff309330f85146134e49e0022ac61ac60506a9", "37e35bfa93089930788bbb3eff3d7386906d05cd", "ec2ee72168368537ddb0eaac50f9e8c1b1d52a8c", "a81d3be2fecbfdff8ed1d04dc6659fd7143d9cd2", "f3880d1067915bbbaa0c47a736f46f488185250e", "b246d41b53db2243b4d7c3b811a92000fda1a69b", "341633ccce0f8c055dfc633765d905c269e28f82", "82cf87b569d3382edde1872c66b1992dbaeed71c", "bafb8812817db7445fe0e1362410a372578ec1fc", "c11de1b8b46729f0c055d957b55f6801dbf03048", "468c97bdfb67614d458ba63eee04756add5631b3", "ccdea57234d38c7831f1e9231efcb6352c801c55", "2d45911a4f3cad91ef5374720b5ff90efd8bc3a9", "332d773b70f2f6fb725d49f314f57b8f8349a067", "24c7554823bb8c1c0729c4ece5f3e50965aea74e", "9ff31462ae749d830fc8832f5808b2a82d747587", "40012a8e480a1724cce1a71e2b8584332225492b", "de309a1d10f819d69a4ef2c26d968d3b287c3dd5", "16ce8a3c8928430c12db8261a660118bfead4912", "832f1639970f1842f0457434987ebcbedfa778dd", "88bbedf7f6f0dcc830640c521acece28e67be356", "5173a20304ea7baa6bfe97944a5c7a69ea72530f", "8fe7354a92b4c74c22dc0a253dfe7320487d22ab", "66759e18b1a1d53178fc79d8275e301e4d2f4ee8", "c9c1255057652584603945508b7151206e9e9069", "58dbdfdc229237e0ba23eb55653f670cdce459a1", "119bca48c83538c06e4989cbbd9e7d079033ba2c", "cd2221518485f829d3fad81e33ef4033ffa66f75", "3514140d9c2e692abed0aebe0531f78c250f5806", "9057044c0347fb9798a9b552910a9aff150385db", "6a8743973e24c39b33ff50e023447daecbd9f25c", "166aa92160f73201b3656a57f192a10685e9e759", "31ea778b6f5c9c2653eb2bed307ac7b02bcc6894", "31dc87637fc68db9f7a2196c9a95331736a5bf61", "5eff7ac2ca34fadc967f07245caebdfdb812633d", "b2ff5762f7b68ca9d947a374cf3d87e85fc7e752", "68b01f77c7c4c09c725ff8a6081aa20ac5fbbb39", "2d22a60e69ebdb3fde056adcf4f6a08ccdb6106f", "3a2f235fa82b41aee2a45194c1b159f777abffe0", "6f0dd01fd7cac346a82618982dd81516387079de", "052c5ef6b20bf3e88bc955b6b2e86571be08ba64", "4b1abc5b52db2ba854101b137d1fe3aed9e21274", "9541e80b15ee3e1793f2caafc3502a6fd6947b24", "86c1bf121851aa901e3e7eb11a3b8cc5a08a921b", "8e36255da222c01a880c9b88d61f139f7bdba62f", "4bb9c5f1ef1240486374f4f80d8f65921f74c1ad", "8d53a331b722bc9add4add1571c8ff9f66ce2123", "c4fed8f23bc9ff1ffc27edb12970963ecf2dead9", "fa052fd40e717773c6dc9cc4a2f5c10b8760339f", "d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c", "a993500f229ea4a4a3162fea2cbbec9ba2a5e404", "9d1351af85dea756289a8c87f6d8513cdcac0735", "bdf5434648356ce22bdbf81d2951e4bb00228e4d", "7838fe67e3aa4a474abf3daff65ac20524d2958d", "55d8052477e599125442de86cf4b05bc6ea0fbf8", "f2eab39cf68de880ee7264b454044a55098e8163", "8106f23890cfd7c7e3d023a3f9acabbfca36c7ef", "23b48110cead14510ebb22dc388324466fd56c95", "2be0ab87dc8f4005c37c523f712dd033c0685827", "266bf8847801ff302c6f91f899f36269807317ee", "d94e77ee8da75691c19e4987369c546bec46354f", "8453d03942c2a96a9158ea8e7b23e023fb8b4704", "fa24a04f1e8095d47e2d2ce0076bf47bdd6f997a", "519f4eb5fe15a25a46f1a49e2632b12a3b18c94d", "9606b1c88b891d433927b1f841dce44b8d3af066", "e4cbe39daed8700a1d6f4a25a3a98645c4f231d0", "40492c5e4e7b790554c9a990549e01808127f625", "453bf941f77234cb5abfda4e015b2b337cea4f17", "34c594abba9bb7e5813cfae830e2c4db78cf138c", "5e9e72a4e856e38e4ae4f7625db7415a6d843912", "ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9", "96e7142ab905c54c033696ac3692e85692c43bf3", "871e5c0f69fab2bf331420a038f2beb0e000276d", "5ca50e0cad9b066dab61644e3cb6be773c9d6136", "78250b9481690aeb2f558e69c9e782dad2bf90e3", "66810438bfb52367e3f6f62c24f5bc127cf92e56", "44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8", "5abaaf9c222398caea40d62e45b81a847436b784", "0e64ae81817eb259c7802da39018757bc98116ac", "0296fc4d042ca8657a7d9dd02df7eb7c0a0017ad", "e5256d2798f997e666a71a20fcef35e2bf597349", "a8be27b214a75642d43c726b8f1e92aa8e4c3768", "05ef5efd9e42f49dbb9e50ec3fe367f275a94931", "01b5d63b60bcc35aa8bead42ea52a517f879bfc9", "c5f22a01fc1abce01393d2e3ed4b8ed91b0e3b4c", "8e88a97e09a853cf768ca1c732ba5f008fff77ca", "13aac86217231a7d118ecdff444ee07234fcff50", "3167c8a1d0415eb7dc241e395f55d559c43a99f9", "9b474d6e81e3b94e0c7881210e249689139b3e04", "2c34bf897bad780e124d5539099405c28f3279ac", "255971561c250d1ccee1402397586d2c7d0cd545", "92a25b281f1637d125cefefcbfc382f48f456f4c", "1b5d445741473ced3d4d33732c9c9225148ed4a1", "a8415394b782e45ac92a7d36f4af3cad47602b25", "cea50611ba73b5775cc2fe1e9c27990a0bb20cf8", "b11e97d5a12046ded77bc4dc0f762ac3c34e65cb", "f9b6242f8e9d5f7f9e31214444290f812889f946", "120c85cf69ea656b02262b4bc5761117fe35674e", "580054294ca761500ada71f7d5a78acb0e622f19", "6500d612b1391e5b02372cd45c7ea6143528c15c", "7c0d8e61d1c2fcd0c760f3df7e94aa38e52ef028", "73be334ecc48751269443b0db2629086125e69f5", "235bebe7d0db37e6727dfa1246663be34027d96b", "ee7093e91466b81d13f4d6933bcee48e4ee63a16", "73134cc8ab3cda6eeb7ac870ddf8d32430c48710", "dcb38b469b47ca409fda54a5f0fa3b8fdbb9e3fb", "97db1cf65b4583937b0899635b9e0f7d36bcebfa", "6f9c7a5d99a33c25ee384f760108e07f5923ccee", "3f7c4fb00be2124fe8e2e9d48caf86265b6471b7", "1a0e1ba4408d12f8a28049da0ff8cad4f91690d5", "d58f1d2fc5ea941253ff71aac7683fd3909cc71f", "ed968437157f6b2d55114e90059cdce746d7b090", "fb7c85b71a4bc838a1698d72542cc4833fc83784", "a012b41fc54060e11744db20ef6d191b290f1879", "897a5407c110dd4582fcdfd63696192dfd912e20", "d12d2f87517e3d63d62599a72752445819320c03", "7bf04c79f2659a404c9b9b91e0375e1450c3adbe", "76d955e83b1d64de95f37336322cbbca0019e3b2", "edbb8cce0b813d3291cae4088914ad3199736aa0", "1fe990ca6df273de10583860933d106298655ec8", "178c508fa49a488d9d0ef32048420bae58b4922e", "5996e84f482b7335cdb08ca218d450d37501e182", "0601416ade6707c689b44a5bb67dab58d5c27814", "4934fd234db9a095e51d36e738e706886d1dfa0a", "838dad9d1d68d29be280d92e69410eaac40084bc", "fca20ba6e116d8703c8c68bb855ec19e00fed2c6", "0f22b89341d162a7a0ebaa3c622d9731e5551064", "64281b49a34786912085396bafd67429725f1bcf", "1b55e82aa07b9e0009627e1d44a52b37ee26e24d", "dca13f5fba4a93039958abc7ef94ced7e16e5361", "bfb98423941e51e3cd067cb085ebfa3087f3bfbe", "a566780ffbaf2e1ee88a821be4d0ffade934c518", "30a4b4ef252cb509b58834e7c40862124c737b61", "4e729427f5cd4be22dad7bef0eb241e93497dbf4", "29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea", "eef432868e85b95a7d9d9c7b8c461637052318ca", "38b8f80b05db035f1ba9eb2e76629ce937fc956c", "8871ba2354e93b6d9b04b5d2f7582a5ff0888a7d", "90bc5091918e4b4b54dd10ef77c8628da75bb5f0", "f39fc7c420277616eea29754d0d367297c6f02c1", "46ecd7bbbb682abe153e469a5c8d4b0ef14b97d3", "c390fb954a07ecee473e0704ac065875121f6137", "2488e73f347e56a165369c5d7999f73a21283e20", "a409d6853571de9bee5e22c6c0cc088e3bd2d2ac", "23aba7b878544004b5dfa64f649697d9f082b0cf", "52274e5d9dc1d3501ee43cf741b835012aae574f", "c082afd5928165ccaf6d419aff5d0456d8ef78f3", "03e88bf3c5ddd44ebf0e580d4bd63072566613ad", "106b54ed74f0fffaf6408a9b847d4ac0aa0ffef9", "2502cdfb39474bf14407c2f6a1a4663d83d4bc0d", "d9c0489d0c2baa8be219b2bde51364461b53ccab", "7a7db5a1325844b62d2ecf8489872c8f515f1c37", "af657fade7bd78d9916bbf4305bee7170121d8ac", "aa1c73388a28f1d2c5b12134577644c9d577a54f", "854f9fb21853d1e50302dddcc1fd5c2e933ed8f4", "65a7aeff98fce04157f76a5fe6ad5ec10fc88e7c", "a93c3dc4efaa80382210f5f8395ac9b04a485f45", "61e192fae31e003c1d647a025cdf2b9529200835", "5dcf78de4d3d867d0fd4a3105f0defae2234b9cb", "e4a05b1a478a2aeb6c0b1a4a42f8bdb4f97122f6", "e486b9142b64bd18220b9b730a4eb6fae1e5e669", "4843c1873bd36b4702ee22b25e09b244849564a0", "28aa89b2c827e5dd65969a5930a0520fdd4a3dc7", "becd5fd62f6301226b8e150e1a5ec3180f748ff8", "19d00c90674de88c093c367425bf6820f3a7ea35", "363dfeb29263c6399365d86fcee97ed18f507768", "2564920d6976be68bb22e299b0b8098090bbf259", "5c11029cdf7876ca4507d27ea968a641ac64c031", "1ff057f2fb8258bd5359cded950a3627bd8ee1f4", "ebb139368e425d720d47a13dcd269014027b40ae", "01379c50c392c104694ccb871a4b6a36d514f102", "86b6de59f17187f6c238853810e01596d37f63cd", "cff145873a9fc6a5a836b6f348fe2721ad659605", "047f6afa87f48de7e32e14229844d1587185ce45", "df9281265ca87f83e5938b4b31efe0867c0ca3e6", "f909d04c809013b930bafca12c0f9a8192df9d92", "04a38470fe72ec113639749a2af5ff9024e62266", "75113a9e6a065e25cff08de01b7893825eb58641", "8f992ed6686710164005c20ab16cef6c6ad8d0ea", "312a261d15bf3f3ae5187069aed5bd2821f881bf", "92d6e4067acade472773ef94ec76e8586c4e256f", "3965ff8cee7e1542740d9ba5096ace33fc2f4980", "c32cd207855e301e6d1d9ddd3633c949630c793a", "5f4379b83102d1147876007e328e1b209e4b59af", "4f0aedbd0b5cb5939449da41579c93b98048fcdc", "d8e5362a16914e779a135a5285775be49d60dccb", "344449bc9491e7e8e2e45193e71c9a7219c094d4", "1535c185a01c952237ee50788b8123d503a0dd25", "3fb689c0f1db224d53d9fdaee578d3ff8522f807", "ae13485e75f5e7fc9a9659ce960c8b299c7b889b", "b45549a95120a744e6b882216f8a86481fedd255", "d88e3d5ca820cb240de4b662f0a6fd1172a678c7", "0e82489bd928af59b0e05eeea3e1ef952d9cb4db", "80840df0802399838fe5725cce829e1b417d7a2e", "564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d", "4f42346ad17a4e05bac7a4b3f96480900bd00999", "01576b5fe525d8dee025fd3776337d74dacdf224", "520782f07474616879f94aae0d9d1fff48910254", "246b8daf3942f0747cd7b99710757bdd59f2472e", "0fdcfb4197136ced766d538b9f505729a15f0daf", "8820c003644f9073c6354bf3c82256f0ba7f97f1", "77a9b1856ebbc9a6170ee4c572a515d6db062cef", "4ce68170f85560942ee51465e593b16560f9c580", "a9af0dc1e7a724464d4b9d174c9cf2441e34d487", "3f9ca2526013e358cd8caeb66a3d7161f5507cbc", "0135747b4d3c9a2d983f7d0d9f4c39e094825149", "209ac2bac0f4235bea01216a987d87d9380260c7", "e8fdacbd708feb60fd6e7843b048bf3c4387c6db", "e8aa1f207b4b0bb710f79ab47a671d5639696a56", "4096bcf05731a107a41dfc38abf20c93d161f27e", "38b8f638490af5f2e3c2cc6dbfd855509ac969fb", "1602475fdbcb700f10d17c3c3e80ea92c9ba2c44", "a5db464f4f8e7b688de5dc53733845f1db536004", "fe8f3472982048290f8af9ada5d920a172d6c362", "13fd0a4d06f30a665fc0f6938cea6572f3b496f7", "50c919fe195775af1982d2fa8fd4f9d8c2f5cfe8", "e029d87365f706193e02c2fbdbe6dee97eb796a9", "c8279a389738f3011edc6e9ddfefb0410df380ef", "6c4e173fdafa89ac7b40e1dddf953dcc833db92d", "aef8456577768f2ff029107149c9c6713e8707f6", "ae8cc8db9e05c79adad03da64a4a9ba0b00f4eb5", "ec12f805a48004a90e0057c7b844d8119cb21b4a", "856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b", "a9ebeca46445b8af728118b05e56d95d4985000c", "b2bbd23cda05b17c0311325227cf7dfa1ef995aa", "bd0265ba7f391dc3df9059da3f487f7ef17144df", "c32383330df27625592134edd72d69bb6b5cff5c", "c0e0b878ec8c56679faccb3c3f5e2ae968182da5", "5f0d4657eab4152a1785ee0a25b5b499cd1163ec", "a25ce4629f5adfc7daef2c531f56502e9a0e0b7a", "2582ba3b7ca215f1ab98c6dbcc0190f754c54059", "80265d7c9fe6a948dd8c975bd4d696fb7ba099c9", "ab39b26d623aee22cd43f78d9cb1f5e0e55808fd", "23a450a075d752f1ec2b1e5e225de13d3bc37636", "3d7a5d1fbec861542631fcb10f58e38f4f51a04c", "c6df59e1d77d84f418666235979cbce6d400d3ca", "c4b8da56721ff2e6cde29e8250cd4f71b37422fd", "a62997208fec1b2fbca6557198eb7bc9340b2409", "8d4f2339fcadc2d1ef2126a11dce08ce7cb75bdd", "8235f2f55ecfc80bd80cc1c2a1e8fe1e50bbec27", "aea7391a521dae31a4af4cc7ddfbd3c63c9e3902", "de26c1560db47f63ef2dc8171d7c2c52369ffede", "1826aa1a84ca8f78d319d0121e2650658c6bea47", "26e1e0d35fd00c5350025ef78c9d1a987e74f396", "b0d607d5e9e79540c9f2673f2224b2d51be3393c", "55249d73df3c38aca08f45a60ff54d9ac8b678a0", "0ff94e25a8ff3bd5c98899684d0885423fbe4f91", "9cc43e3a756485b78b991605f44eec9be3530350", "b5aa77cde8ac09bd821cfd34d0a0d1c12688779b", "e405c59d9e13c4d72050535f00cd3696ac004740", "e21702df3d0faf5615b358714b7b7392984d6883", "9babe1957e56fadebb32a64338d54fce794c7094", "48381007b85e8a3b74e5401b2dfc1a5dfc897622", "275b5091c50509cc8861e792e084ce07aa906549", "57ec50237ee588d3b40640c4f98410cbd996ee84", "38308a4fc038611797a5193c6d3abb593a6a3a37", "1d9497450f60b874eb6ecbf82e3d0808a6fe236c", "5f333a12dbf3671605bc3c715dcf08e37849e6e1", "294cc3d492a38f7a6886d55009286efdfb04395d", "17370f848801871deeed22af152489e39b6e1454", "95dfbcac082aa68a8fa97099c4fd46834c43153a", "4511e09ee26044cb46073a8c2f6e1e0fbabe33e8", "8e452379fda31744d4a4383fcb8a9eab6dbc4ae4", "599b7e1b4460c8ad77def2330ec76a2e0dfedb84", "907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224", "87cf55164a7cc676b68e84b7f39fcdbf7610ece4", "9c7032664c6902c5a936697b9bbc01f6446c58fa", "358ad3d1a9cda69d35a841edb8f819c69056f588", "10916d4eeacbf63a178c229868160189c6ce8850", "5d494e5517a25365fe204eaae3c3247f7e57260e", "202d8d93b7b747cdbd6e24e5a919640f8d16298a", "ec7a545ba99542b2b74340d2e863590e4f450bb7", "391642ec5ade3579654a14c3644af6f086af0158", "d54f508c943b8415bfdd30d9210869ec93ff3f03", "3127738c7a634d7b651405cb31fbc52ec7d5806a", "4e3983c2ebf5692c302d427e41bc1dd2ba162a69", "22e121a8dea49e3042de305574356477ecacadda", "34f9b561885198d3eaf8de2b6441d0a8aaeb9efa", "3283477ebd49488e1f3c78e6e828678ea2bb815b", "43d36a22629114e14a0952675e15c9c76f1f024c", "40f06e5c052d34190832b8c963b462ade739cbf0", "bb131650627cf2d1da570589f6c540041df1ae92", "39df4f8ad7add3863208a5f7b71e22ed1970ca58", "0f22251fa9c4bb120f00767053430fbab141fac3", "f6dabb4d91bf7389f3af219d486d4e67cec18c17", "51082f012c050dd8fa872962f93c5407a94f6daa", "90a70b38c5a1b40ac16e18628a7772923cdc5cb5", "816c8c8d0f02200f988625d4989a1b4b34d779c6", "72e417b8cb4912c0aa991de75153c3f732ab447d", "951af0494e8812fdb7d578b68c342ab876acb27e", "903727c8d2973c98aa215a1143f851847a3d5e66", "1d924f61190b0495e2031f2bb365f9a7ab2ea9f6", "eb92705f6920a5eca702066a0914cf74688e928d", "7c7551e47cda1822de672d7ed2e05b77572bc6cc", "2fe089ed85f3afebf3716c75925c8458baaed7f7", "0390e80ffde8a6e6cd544c6b91b19ec747c73637", "01a8c7335f0d9321c95d6a57f2dd9f128735f1d7", "0d52f1ae438a395fadebf04990d0d1750cdd0218", "c2b10909a0dd068b8e377a55b0a1827c8319118a", "66a4a03ea58792e4be90b20c60ddc65de736537e", "cd1a26f151cf6bb7aad90a7ca939df5bcbceca00", "d045f43bd9cbd5ad4833be011e3a33db55d1f7ce", "40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d", "783f3fccde99931bb900dce91357a6268afecc52", "5dbae8c58341d1f6e475ebd03aa6b8abbc9d149d", "a7ec294373ccc0598cbb0bbb6340c4e56fe5d979", "657bea04a19eebecc4519030a26d6337d3e31840", "5cbc9bb8b7bf21b07e1eb868d718d132311bce90", "1c1aa29b709370f78cc485b14c18b89a53229b62", "ad9ecacca5c28b098096ad0cbd81fe84405924e3", "8f88bcf3b2e0fb9cb09240541d4b65bcdcd89826", "69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b", "f1d7f33d0f769c2bf7ee91fcb266579d3677bbad", "746433b148d25bfbae9ecbc94e8db72580208546", "a513977bcd8cecd2ed1836bf91b31a80a1ebe27b", "05fa2ff1a6020a34cb4e0515a24f5eddc9ea0dd9", "3fb054dbdff35b7ac3940c167e7292c7646e1ad9", "815069f591122aa7b388615f944c17c7fa1eff14", "38558bd53b5bab485ca4abca35a0401c0c387883", "6153187bd47ece61f0d79a787e14130ec3e803fc", "d4b68168186cc48a59b491a4150c6b5370a07abd", "f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca", "b9c9c7ef82f31614c4b9226e92ab45de4394c5f6", "87f960948d6241700809105090aafaa680e5b6a4", "7a91617ec959acedc5ec8b65e55b9490b76ab871", "0a4fc9016aacae9cdf40663a75045b71e64a70c9", "cca228b47a603a9b9e2a1e3a1b278b35612d078d", "7afd34046a8a0fd6bd2f2c5cc25e3793151b28c4", "340d1a9852747b03061e5358a8d12055136599b0", "1fb2082d3f772933b586cca65af2099512b9c68b", "2a3227f54286d8a36736663781f194167f2b6582", "392245913bf63c8a9f44881628f8f3f587e08189", "028eb4f79eef3f136ad5e36930ae571df04cbe59", "fc64f43cdcf4898b15ddce8b441d2ab9daa324f0", "08de380a16259134da084ba859346d18a225dd00", "8ea56e4697430d1dbc728bad5a6e8ebafcced835", "051aa14e0b7dd4231636db39398c0c15b2687682", "09138ad5ad1aeef381f825481d1b4f6b345c438c", "0afde5f49ea236fe5e79f3ec9a7e7e2f9a306b06", "7e79c3a92f60c55a6970f89acfa152bcf74823e0", "684ffa16d86d82688edf52cc2e80b4021607b360", "059bf35362d896dddd5ebcd5b1b93682efa9f46f", "12727bb8a4a1462553a13a253a97c2569cbcba0a", "4aa1f51595a80d4c2db03e6a4ad407f71b35e6a0", "588041c603e5ce1cc8d3cfeae702a3439768ae0c", "83c332971c4534907afc4865179c2de30f2792c4", "4a6fcf714f663618657effc341ae5961784504c7", "b4007947e078d5ed204c79ecbeadff25e412b73a", "c0e5a471179d2d8c7025febe77a90c3a99c7c9fa", "6847a733a907eb215abd466e5bdf0b57bdec4cf2", "28e9cb6fd45dd5d6c0fc86a99c9bd4186a9b760f", "7eb8b748154167e8c8ef1e61b24e5369e057b03e", "0e8a28511d8484ad220d3e8dde39220c74fab14b", "f68e2e2e420312c1371c816f7c0471d43073ecb1", "12e80b3a89bc021a6352840fb4552df842a6fe7d", "72e51f8d989b4418974ca4f38c42ec10206eae0d", "14fa27234fa2112014eda23da16af606db7f3637", "d9208c964bed4cc0055e313353c73fd00a60c412", "442d3aeca486de787de10bc41bfeb0b42c81803f", "4d20fbd6dcdb4408dd6268951d86b92e8d96f332", "1176a74fb9351ac2de81c198c4861d78e58f172d", "bfdcd4d5cc10c8c64743fc7be7e7ad6709d93b53", "04adf2e51df06a03b6decf520b0952a54a538a18", "014043cd53e4faf203e8938f1f32cc494bb414af", "24b637c98b22cd932f74acfeecdb50533abea9ae", "e33bdda2a9eb3d9f99b334d5f75138aa1d880f31", "157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6", "1332421c4c2ccd7460ffd72d696a41bb2bd2db52", "7713517d7b38daba56ca5da62e2a9c35ec39008c", "dbf100379671730692abb489d4aaa25f866ffaba", "e5462299c4783d16d88d0143731902e835fcc615", "853e86939d05b59b78487ff2a3c1d024a9ceb77f", "6daccf3d15c617873954bb75de26f6b6b0a42772", "712609494dd049b44ebfd82698b9305ef07f027b", "1e57ace361b941d9d210e59a9bbac7697b6bcff5", "423be52973dab29c31a845ea54c9050aba0d650a", "4865174cb83bcad5011155c27ed6e09c50e46765", "5bed2453a5b0c54a4a4a294f29c9658658a9881e", "c71f36c9376d444075de15b1102b4974481be84d", "1b92fdffa3f87c1081e88c41b5fb0d7d31b3873e", "0454a9acd5f9215459ff86b070b382065bc3f162", "0ad4a9fad873e9c4914fd2464404b211f295d7b6", "2d709edbe61a8616bef2a1f45c4d777b4ac1e96c", "4afdb1c53d0173030868a9fecee4c0216dc45c9e", "9be5e52fe3977852ff719998e6dbc02a76478fcd", "260fca0b9eb144fc54f1872b8cc418ae3fdce756", "46a553e670027e838716e5a1a39577d7cd7a4893", "88590857138505ee524f3adf6da9c57352d917f2", "4990824c5ff6c993e0697e272026438c4a05c3d5", "f179f7888934b11dc5a2d8ff9205d1ca8b8a1599", "794d344d6aa97e3cb67a44739207aa9c1360db8d", "74e6110466306f41f703d84bb3d136ba414b1998", "560447750f45ea18cb21f202e30344c4fe12c52e", "24585f90bdf30583733841f70430d36948f16ae2", "56e470c1a13fca16fc83937701f03fb74ecf2303", "3bcdb430b373fc0fafec93bdcd8125db338b20e4", "bd1d466299f585f2f67500a6ceef19008c4cb637", "ee04c6c9c672fedf39f601a466f64a98541cbe19", "60a33bcfe4b40cf46772e6aa1ead10489e924847", "b9d81aee3d1a3d8cc64c70d2185e6c27b94f8f66", "b196f95a4274533b7f931a509eaf5507358945f9", "d1bd956a8523629ed4e2533b01272f22cea534c6", "03cb9b73fec5af0c1142eb1356e842b70c854bb9", "96094d6ad0b88639e7563b6f43296a6eefa673fb", "3748a828dabc6b5292b53cec6080cef33d78d3e3", "76b9fe32d763e9abd75b427df413706c4170b95c", "8d8a7bddf8fa85e6a8995346eeb5c09107167632", "1cc3c5f242d885738e9349a91d4beba82ae106a6", "bdaed05eefddd2829c937978852fcf3cedc84620", "4461a1b70e461ec298d7066ba103deda48d4ba22", "e2c09f29ac989a40a7cf8a24b44db7281a188758", "0d06b3a4132d8a2effed115a89617e0a702c957a", "961e411c99d8ca6807076c4fb18e2d63a383aa0f", "31ffc95167a2010ce7aab23db7d5fc7ec439f5fb", "4883baacb405dad3635a5e564b68f3190140c79c", "375e478acf62eede1cc69693c54d81aa718df9e7", "e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef", "ac86220efba082f5e9d55938ba7059dbdce87ca8", "6bdfd62ae9eb026dbc37f6f2db897fbee5cf8a5d", "a77008329c785c0d5d4dcb3d9c79073df85a9b4e", "faff8ca21ffbffcbc871f3a2f2f3938c7cff407b", "5150d0dc0f48ebd67e81f97786e96f766580a8f0", "2a47fefbe459e468efdda27a68dda9704270cfb8", "f8148fd099c4f6ee2af2575a572f6a26ebc628b7", "00533c79ba906cee9441654ada8071150e85f5ad", "63c022198cf9f084fe4a94aa6b240687f21d8b41", "ab032a95f579508609858abe96d97f4272f3db50", "9a0fd1341c567c23b8d5cfad7d6d2141b6313273", "753a277c1632dd61233c488cc55d648de3caaaa3", "f0dce694bd71e46e5b84512f06de046f83a8096e", "25fce91ce1b974865506c14d2e4714d8db2672d1", "3f67083cde69d50a47a5d6407389e7681872f861", "77967f001fbadab624318e79013df3abc95fa52c", "5bdb6ad866f52a3fa439e81a88b11d7a78904b07", "1bc23c771688109bed9fd295ce82d7e702726327", "f07449375fba8e786966733b2f092ac25e103ae8", "fa0363db4e4cb96c00a0ad200b1c6922b3cd87d5", "28d65e4d72638983fbc723b102d78b10587c06aa", "3c5aaf4053b12e802a6e17184aaf7d38d34dc3f0", "2ea47779a591d13e67bb6ff77af7b9f7194ba06a", "2a1deffc67ccb5f8ca5897ac3f31dac09af70f05", "cc03d60b73bc484da847b658ce8bf3abf3d47753", "642f19468ecf21cf97227b0d50fc1098afb37140", "5040f7f261872a30eec88788f98326395a44db03", "fb084b1fe52017b3898c871514cffcc2bdb40b73", "dca6ce475f6c2496a5f87e853d18e4b37fb27fb2", "520d586b50ecaa9753f714c6e76e6b819663d1a4", "2ae6bcd37f5aecb84a9222331b80c84a3c65e05f", "b19e83eda4a602abc5a8ef57467c5f47f493848d", "ad9ba7eade9d4299159512d6d5d07d7d3d26ae58", "f4815fd002f8b81f9f842a05e888be96e50306d7", "2d61bbf518a9a649405124c67084046ce132ef23", "59f325e63f21b95d2b4e2700c461f0136aecc171", "72fc4625f42e0b20962a26d203961bb116809de0", "b9fb66f09b358a4ce167b54eed8c596772a392d9", "27f9b43737e234cefb3c5cd72324a36cbe61ee3c", "06ce9ba74589ca179296318a76e882fe610b729b", "50076e679fcbb69cddd1e127165d87512d9b0616", "8b8dd053aae04f000e8b9d38f7397d7f1a3b5e99", "14f639a4c49280a4886f9590402d7651b1b042a4", "749ce4f00b0425376a884666d2f79e8c5191bea4", "0dc2fdf1b97c76de1e7380e8126f8acc7d87e23a", "13ac93950986fc023d45e9647197d80b86fa4867", "d040475eecdbfe361efdfc900d1cfd8671694fae", "fd60166c2619c0db5e5159a3dfe9068aa4f1b32f", "7e48711c627edf90e9b232f2cbc0e3576c8f2f2a", "85567174a61b5b526e95cd148da018fa2a041d43", "ffb15a1aa50aee4b80d062a59b3a5e1245dcd83f", "7a666a91a47da0d371a9ba288912673bcd5881e4", "2d7ce5b7e86845ddc2ecc05c2095445df264bb48", "3c6e28b3183e2786ac2004616e1190d2853cf40b", "5ecf564bc9eab26c96c17304744ff1029215a109", "913a6b40250ad229c7b5a7484fe2a92c0714042c", "209e1d36f36b8e7db3147b0e424874e54df9012e", "8ffc2cf0403d0c760708aef41a5868c2c464cea2", "19fcb95815e4c225b250f7deed9be3e90963933d", "38a75d92684122da464a7fb1f9adc8f6acec74da", "261c3e30bae8b8bdc83541ffa9331b52fcf015e6", "1512b9570669a9dfe2a252900ae94b276e3aff9e", "8682dd2caff6b658fbbc02dff9f37e95bc122a7d", "9ca14b251824fdfee090748e0a5c7052424ba06f", "cd1349619415202e82475353e2b2a60da2e5bd65", "adaf2b138094981edd615dbfc4b7787693dbc396", "7e5316031b08b8855e0d3e089b7b412ef3ba425f", "3d72f837e52c4c69ba996e6b8313a7ab4212a077", "a88640045d13fc0207ac816b0bb532e42bcccf36", "b0abf048d97a7beffc75fec1480d9bfe04a838a7", "69d29012d17cdf0a2e59546ccbbe46fa49afcd68", "1c05dc0f73f424561c488a282c711827047459c4", "9773cb8fff5e3735b34018212e83023cba227345", "12679cdcb4bc5e9c60a795c2418b40b5e1681652", "24cce97c3fe3c3fc21f1225e4a9f6c1e736e6bb9", "0fdb6823669959cb709fdb3070e7e5efeebb046a", "56a653fea5c2a7e45246613049fb16b1d204fc96", "334f267ed3bfd29febf3739caca3539374de9055", "226a5ff790b969593596a52b55b3718dcdd7bb7f", "a33edec6f5e544cde888409fd028b468a2e0bfba", "0095564f9e0afe920324fc75cf0b76d3f4825206", "5e832ea5328cdcc9b4346458672ad8288a56c0a7", "a44c8484151179f903499294e8a866b13f43792b", "b84f164dbccb16da75a61323adaca730f528edde", "3ea0bc7cd58d4214d4ed20e8acfa76054f73654d", "f15b8efe8b9511207bb1261e218a54bcfa20349b", "d8b99eada922bd2ce4e20dc09c61a0e3cc640a62", "0aa56fa873d27d9c83c001a6d0f7d1d3a7c901c8", "56d4eeb7fcdfd4f3156b9bdd20a9f35c995ebcac", "b9cedd09bdae827dacb138d6b054449d5346caf1", "43bf6489abd63992b82f2008b4417a1638955f0c", "108d44bee37d4190883a268274ff78a8fd20de54", "25110f89ffe3d3c49945b06771d0e27d4a2761e6", "609ff585468ad0faba704dde1a69edb9f847c201", "acd5b6e9bb6a87a028aeb33c805e352ad98ade02", "8536fd81b568b2c9e567adad83be3a048664ade6", "eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2", "9995be96f645dba68e392028cc2c221643241cd9", "6f3e7cc380afb0a0e49c9c8d36368d31f77dfb48", "64a44e1d5cbefbb403811360a88f4d93e569ffbd", "efcedd5750f57f4c7f748783e91918e0f42da61f", "09fb440dd2daf2b93e36dd5df93950f0f3bda685", "72aa01cc6dbadc631407b4d2d0addec172dc5037", "d0ae349512127ae384e45d877edde04523a74eeb", "2cc2c2421963a2d303dfe1f7b87e70973a587a32", "00b202871ec41b8049e8393e463660525ecb61b5", "c3c7caee16dc77e5caf07890a2973367670b3511", "a78cf9ee6571b99c74e330dfe8d357e51fd8cd28", "c10870f23c9d464b40f8eb022987bab3ffb0b844", "3d2c89676fcc9d64aaed38718146055152d22b39", "6253515390d484e4a13b69f4f5d495f5d5e9c0ba", "0ef999f06955193aae575b2d00f00d136f546028", "2243752f527b502b848fb1385ab3bfbe8f57e135", "2bdadae8c95f365bb1477c91a38857d2fb413ef5", "072db5ba5b375d439ba6dbb6427c63cd7da6e940", "a5fdfd7e04f62ad04269c5fa08024c9143dc2473", "aaa914b97bc2e0f84847a982699f2021e31a51ac", "0760b9375db1505e9b9c182e98bb9579dd9197af", "0f533bc9fdfb75a3680d71c84f906bbd59ee48f1", "ced7811f2b694e54e3d96ec5398e4b6afca67fc0", "b73fdae232270404f96754329a1a18768974d3f6", "8e6957334ab60111fd7e2ae59b008a745223aabe", "c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0", "3cc288308ef3f24cf3524c59f4746ecb244a8cc0", "b446cf353744a4b640af88d1848a1b958169c9f2", "f86e659495578b8c16e53e920262560428aef463", "a2bcfba155c990f64ffb44c0a1bb53f994b68a15", "fb62fae47f2ccef2e11eefb112765cdbbe4f0400", "519db7bb7d1778bddfbe3725220756627373d69a", "9033ca38cc650a6dc8f7dca967712d1e98aeceef", "f718ab6b96688e24bec4be15d66041ef24a94134", "ab302d79e419348499acbda4a627b67dec89936f", "a07e8372d2d2d0ad3fd7dfb7683235a9fe6aadc3", "67865e222e679806c9fb24472633eb96adca92f5", "09fded4954d2df2ccabf5812a0cf5040e627a312", "237316762470d72a02795a7f57de9279e9cda16a", "d15e93d223ab255a5d3ed6918004784a4a636fd7", "1462fea6c71be9c442f443488fc7c45e1840e9ed", "f6480efe44e9a59737c1223ff7413bb2727cedd6", "f81cdc95c14819b9b234cc1c4cd79d02292cd572", "1e8e3a954762f58501b970928071ed1b58b4fe40", "41ea92251c668a99d2b9a31935fc71e6b6d82b6d", "ed6003db58b67f1dfac654868b437efcef6e2ccb", "d411065a097587a9374dc5178e9f6f6c45b965a4", "5dc168fce0712bc5dee40aafc4404a1167aa78bc", "b732393cd3877f7e6d3cf3ca033a42415bd6db56", "b831a08a7098b64485587541485859c9213e6dc2", "421b3a33ec70af2d733310f6c83ad713a314951d", "19d36b11b3e99615691a2c8deb964d803906ffa2", "a969efee78149357ec109c1de2238a0cc670858a", "1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5", "af4e324e4579567fdfb8d3216d5a76e9f94cc915", "0e64a202a673ebb9265d600d97c2ccff8acf64c9", "5ef3305303e86fcb1a0f8b7aaaacdc5974f3bd94", "9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73", "1b66b3b8ae9d1f2f673bdd12ccb0c326393ef302", "63216e4bbb8736c5587b41ebbd92043656b374c6", "68bf34e383092eb827dd6a61e9b362fcba36a83a", "4a24ed07acea711ca822a77f4b38311d4ad969ff", "d14538b0e5fc144c8f600591f75cfa977cc02daa", "336b2ae3e4db996538f930b754f7d233af56a628", "6c66d92fc116a787d39ca53134d23009c96b318d", "51a9de3b6ffd4b87f024cbde2157d03412b1384f", "6215c5713adeacbb33b9d1c4c739f2b0b50dd17f", "9d518344d5c7d889f9c90c6193be4757fa584770", "d8500323df235903919cd4270b02e2304a60f72f", "568988c7c3468b371928688a1414433809142079", "5872a8ae1879c3f20d94e7cc5a4fcef47b654c7e", "3bbf722c8d35bf3f7efb6361c5e2e58961f3d886", "fc8cc6c3f065daa92f45e6379f407c53ca31f375", "5712cfc11c561c453da6a31d515f4340dacc91a4", "07e6d293498c4f9048ee5a67ad32ca42d6af9b51", "056d1637fac0510146431a03d81de1cbf1147d65", "e85fa1b3225ed2ab0f3c11879efc9d55aa996d2f", "d797df0f60073583b76853de01aaf3ff41b3fae2", "baf420613e00a42c1f5215085f74c4e69782e7ae", "d7f76415d517e9105437372fd92eefde29e08af4", "2789920c91b54434da5aa9e68c5bfe943a917f80", "e40007540c4813c81bc8b54dda4dd6f6c21deaa8", "8ae02cef563120be51f8655e199a54af856059b7", "88ed558bff3600f5354963d1abe762309f66111e", "add9b7cce37c88df0c800f5c89fa94c3f7f879ed", "cdf0dc4e06d56259f6c621741b1ada5c88963c6d", "8660575ad46621b25daf7ca356b2d7c4f461e982", "333e7ad7f915d8ee3bb43a93ea167d6026aa3c22", "c2f83e4880b44579b7791516a5d9dfc175cb3620", "7aa4ed5cf9b33e0c3d90616d52c0f264b416db54", "2f25cea519db27f4c3b137e5d20d14f6e0381ccf", "0642189222e584e67979a27a4e178ac7175d73d3", "88f51f4e7fc2c963a4e0815862258fd7e1e05d58", "233ee51b2a874381c28e2c54515e5c92aec33316", "58d5fe8573aa6156d0b02a253ff820e0fc77f8c4", "03ce549e60833eea1a518081e93d76fad19fb9ed", "9cc495bccf03ef14580be951f8ceb36f712cbc7a", "4de83b6025526ef7a340ffca30626dac53d7f8cb", "4fc609df4e17b5854e3b7f4371e5f4192608eda5", "80f2284adfeead3d288664cd5a508cf4e5af13fb", "459b4d0ed3031e2fe5b3b3f176a5204dfb28157a", "7d2df9b943a666caa9154dbc1a0ba3dda8cf423b", "416087bc2df1e5150231d85b8103d816fc32a2a3", "ed2420d0fc7087d61633bd9a5b2907d1c2de1810", "acb4a2957e7131b442f87ac6e4fa2d3c1f31357e", "b2a33ad2ad1de7f7bb03d10ec4f958e5109f5744", "941166547968081463398c9eb041f00eb04304f7", "6121dafcacdba9aa3cafb0c2d4eb40544be88c3d", "722221f6c696b4a7cc094748aaad8158990ec41e", "e66a6ae542907d6a0ebc45da60a62d3eecf17839", "206fc605691939ae4e6398cb567352c2d1446bfc", "79bcc554fbdeb36c1d7d8966522012acf803c999", "2632bdc0add112b4ced6cc5aeb7ee37eb3d10db2", "264837a7c36ac409119cf71b22415d5c227a1870", "2e239dc61cf34f4de9ff2e055b96a012c9dc02ec", "1ba55051d3957895d77257cc9a5885068fb2e43a", "a125bc46fee1bd170a0654b8856d3b78d62e9d29", "de587631aa1357b08754b65bfee7b3176a0c0a47", "70480ee0e636a77f6289be98ae39300a584808f6", "c82b3e1cefdecc1fe60f2aa9db46479462629918", "33aff42530c2fd134553d397bf572c048db12c28", "bc77cfa7ee1d16c9b97052130c90f824160129a7", "e2acf0d058777458fcab6c7043429ff6e229a3bd", "2710e1c58476e1996466530af825de6376a92833", "1bd80812c58de8cb0127aea915a45ebbff42dc3b", "7d841607ce29ff4a75734ffbf569431425d8342f", "e474ddf96196f464dde83b2f018db9e1bcfeb87f", "df4a6ed5428c26b8d094f43efc255a1b4009c355", "83a715e33a789104d4c21773555e66b913c32921", "50765d4b5c9246c19f0839f887a7ab05c4757d39", "49cfb17f3bedb0167a62b6441c6b490b18e5dab0", "fc6c5ef8d97d0c4d7f2a8576f8f5fe8e0ed83fbb", "3622877ffa6179d876acd531feb14743c8760810", "3c3915c5b550dc514d308122beb9298afd32995a", "c003283920a77a42f2654a8f51a142e2ff2ab80e", "03fd8ab33126baefd0f1b84f4198a44be68dabae", "43bce3ce3c2edf327a1dbcb2ee34b743b8ea2fa1", "790aa543151312aef3f7102d64ea699a1d15cb29", "0d7652652c742149d925c4fb5c851f7c17382ab8", "9e79b3deb36a14483609fb775277c1c9f3fc5944", "13a94d8f5eafbbf411c9a70e1b30937a532664ef", "3f514fde39c78b98d4df0afd36bc9260db5f1225", "489b7e12a420eff0d585f3f866e76b838c2cd275", "5cbef6da9c2cc630722f1e48a59c3aa84a00c44a", "3c2819dae899559f1c61b3b34aeb5d41a6398440", "9630109529870d142fde01341da05967484e906c", "db82f9101f64d396a86fc2bd05b352e433d88d02", "c3203e18a11be454d3124748a48350f5b741722d", "cb4d8cef8cec9406b1121180d47c14dfef373882", "94ed45f469390a58bcf95fb6725b6715743eb89c", "18f8aa212165426c3862ca65bda1d69c655ec273", "3c77e4ce48d1bbcdb682cdc790806e2d5f2d2e1a", "1668963ea05061a70aa941e60500880de9e0c163", "88e6002375b534d88d070f45f466a0fe4db80360", "4264342722c48bb334d19b993400c5a133819e51", "805a0f4b99f162ac4db0ef6e0456138c8d498c3a", "fba539b41786f837acc5e5d876aaa7c6f3fc376c", "04e7a4ff6635552183e6a07b4ba7b415986b758b", "120f269cbbaeaef9dfe1de0d28ba8b39cd05b618", "07909972d01df8edbb4b9c9c28c46a1c71556549", "a4cc82d09d5c8e03f2a279a4eb44a9a9ba745cf0", "eb4515c58ba7446797b016be9db64e800c2fadd2", "366c14f477bf2ed16b1498d1c56a7e1f2af08e69", "a8c8b7360c45e25ed175828767424bf9690af322", "bbb274a7f79c94eb2862ca99dcb23de43b9ff8ae", "a9c7d8a704b5a30980ee400de0e8a2235221e0fd", "4b0111182ace7443f060a64754ca23b2fc7e1d77", "87a837ebe3b4770165dacf0738602b42902cb7a5", "00d8f67ac0ea0bb2c9827b60e1f47c300346cd7a", "0b4453df81091bcdafedc07b64bea946bf3441b2", "78345557ff69e20874dfc9e36a2defe92b6abcdb", "41af2f897d6d9f418d752898ff767f1e2a521b07", "8cd4234fbbc1a7a113723f70c592ee0351f974f8", "49396502143e920f7208bfd27202d6fead39992f", "95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6", "dd93611ab7fb697d92e97749ec09a2fdbd86b7f1", "5ef25b78dc947f1f4674da44945b050e3f4b9e17", "2528022c14428ad5912c323f6a356009457c985b", "48a71b7fbc317134624eb7019aaf59853d288755", "3aa9c8c65ce63eb41580ba27d47babb1100df8a3", "577f009d44839e279732ff901ebbb6a09dfb9fb7", "51245ec3a3d21f4b560e624064087b4625d61bd7", "65639b79576f22b705a601f062bb6905f0a396af", "13a3b4f65a722c537912f698453028fec8197e13", "36665db54fa91a07211bd3904924427bed7f897b", "2986a47f45f9975691ce81e332f4c8acfad4ca3e", "4bc31b1cc8e4f0204a02fd900ed0ef36747e5b77", "d665213b59f2460faf171d3b03ecd9c96d606883", "33891ca0f8fab0eab503f4b4bcee009a1cf3b880", "899e9e5fb53653b76a906709524945c56aba61ad", "4704c8e6330352f370107a3e9ae297a0e77f5543", "7803206f024ba6887d93e8aec91dd0097ffc5165", "77d4843a177031b2b5721824280033e2e601334c", "43f413567f533497242028bdbc369ab0e7f6577e", "40c3ded33e3737d7f8b75eb19c91b3d90c18f24b", "1550518a37d58e708023b9a1d457940a9c465717", "b1c6182f7886b04393a557f368360fca2410ca4c", "364584f8313e7601b1f5134d371e98aeb61110e8", "9cd5d74e3db99ae329aaa30e6716653e9580cd76", "1bc075fd7cc4bd847c9dbf4e990f295954b89fe6", "2ce2560cf59db59ce313bbeb004e8ce55c5ce928", "7870b08a65c815b49e554530c93936380833a325", "bd5593b47b40d7f516c3807a13e5c7327a934742", "2d51b52b3eeae8877d1a76ca564a35b8e5051c9d", "9168b36568b8abffab5b9de029be5941f673dca2", "ed91762544810590cc7eb3b84aec0bcdbc206465", "69dfec15a45786bd37474c8e059bc1ab0a7c8c25", "17d84ca10607442a405f3c4c8b4572bdd79801c2", "1521c815e67572f3a90e44b67675b599bacdb687", "9e6c15150179ce848402e89bd245831d9935f4f9", "16da7c95c218e9e97eea7734d6c243e8b825196d", "2c5ff99e7e9769677df3eeab9f198e3ead016c35", "8b9729519615bb8576d532c9bed799cb5ee6b06d", "845d7e0f5b477e62b484a7fd628e80651167d500", "dc1510110c23f7b509035a1eda22879ef2506e61", "7fa41631cdef8f7fba7e1289dd4c5f3723b172ab", "c82af405f945b99f59b324a1d07863d4998b0786", "520ddfa86afcdd99a7c6819473cf5fd80c2bcc62", "c7ee0eca1a9af0a2ce31d5a745e49a638bd5d59b", "fd451222670d2f185ae3211b5450fd6951e6af51", "fc8b04762e4c0acace1599f0bf0e73f530f0db43", "4eef479499b6c3baa546b9070fa890e11ec432bc", "03baf00a3d00887dd7c828c333d4a29f3aacd5f5", "b5940250c0a136b85a4706b1bb13f52be0037837", "5afe59aa89d2faf093ca83256ec86bd50b8cb5c6", "1540076e192ad448771f446c908db76249809d8e", "6983b8498c634cfa2d58d811fec4e06a6aeb573e", "2fa2a186dfae16958bb3bc8752c57a749ccb4f41", "cedc0927edeba2f157e6ad1c356e8f6487fbb6f4", "70bb5c2570673eae86a3f9ced55c7ef00e0be8b5", "5807932dbb95e9def833e98799554b62b66eed2a", "6caa275cc502513550bde0a32416a3b32470161b", "39b080aea9b342947058884ca25fb5bb1b8f6d66", "7de3a280481d5389deef8c4d49a3920147889835", "51edd6f87c3b8bcc228849e3456522b308596116", "a9c120de41679fe336e2779f3e6fe4b04945cb3a", "abba22ed4713a5ee5fa91fcf7b8dde58a9b621db", "2a152dae1ba70d0cc605b0f7418392ed1a294a4a", "24da9c1eb30ed5ef0052f760d5d847bf5cd1d2ba", "a964fbcfe85cdccb86e8c44d027b7f9e96fb22b6", "75ce75c1a5c35ecdba99dd8b7ba900d073e35f78", "cbd4713a000be4c0b47cf15036420b2d32b8d64b", "f367ed1edfc0f22316de7c942f0046315624d285", "abf5562757150545dfc1d6a552d84dcf766374ff", "75a3f622f273450d020af5bc5562a69a9dc02b77", "37faa075574e4abd323ff5ec85934464a5b93a83", "03a6cc09984669e3e85c779363a93ae7c7b5f124", "5a9126f4478384f6615bf57b6da7299dc17b9a6b", "401f056e1017151018e83d2b13b5eaec573b4dbc", "97572483020c6a6daab9acba4845b87a3f47fb6e", "5f708ce777ca58f6b1a9900f21256279defd5d03", "fec9fb202906e6f136ae92c3a3540b2a84257c4e", "d724c43d8f17e76a1fa73c3b685dec9308d106fd", "c7251a6455f405153df0290c25c25c085fda66c9", "5c2a4e1db32a6afeb294920de1999210bf1bcc6d", "5517b28795d7a68777c9f3b2b46845dcdb425b2c", "370ed90971eca7ad84c67d8804f97e02ff6fd5b4", "2a75f34663a60ab1b04a0049ed1d14335129e908", "d541986a647e7ab10cc8f882e1a1f5e6d725d8a2", "097451188ee94fb706b9f39c0f2a9dcc97bec1cb", "eda501bb1e610098648667eb25273adc4a4dc98d", "6960bfc668aad1b537fbf3f1b48328e7d440b80b", "2d98a1cb0d1a37c79a7ebcb727066f9ccc781703", "6e8d9202480e3c52ca5a207a9ab7841deb8d101c", "78e076efc67a1d02339c6c42d5da570af374734b", "b0c820c6996b9cf9798d778a46860d28f1beae64", "54b63296ff114133ebfd8e01992a69099deacc19", "a08f09c5923dd2a114da1504379e57e8eb87ced6", "968f472477a8afbadb5d92ff1b9c7fdc89f0c009", "176a507ebbfdc0fad141da14d30d89caa35bfaf9", "b11b71b704629357fe13ed97b216b9554b0e7463", "e97f4151b67e0569df7e54063d7c198c911edbdc", "4f4c067e684252cf5549f60036829a89b2f35fc8", "eece52bd0ed4d7925c49b34e67dbb6657d2d649b", "071305535de1c6f6ac820fc0b3a5f1e67261869c", "af8f59ceed0392159c3475c58af5b7ca8e4f6412", "1c006db94629ef30ec75377925e4b49b4aa8aed7", "d604af9dbaf6d8e5e94dc65bda076fdb609c692b", "434bf475addfb580707208618f99c8be0c55cf95", "487a097700f06c10c990e9150830de59008712ee", "d3ff28a5660f709ac7b5aa0780e77676b62de4dc", "99c46753dee61203dbdcbe65d5a19d4cba6e9b7c", "4747f4e167fd7b30090fffa650be3a49b564364c", "a4876b7493d8110d4be720942a0f98c2d116d2a0", "a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd", "d428aa4c1c84da422f8c99eb0147a49439d16f0d", "349c909abf937ef0a5a12c28a28e98500598834b", "3be8964cef223698e587b4f71fc0c72c2eeef8cf", "b02fe7db60c2973ab2f9cd42746a34d9ce34da1a", "0ee59e5baed4271ab85c85332550ca1539733a19", "45836122e1a8ed3de05ce0446a93a5adc5ef84ae", "81513764b73dae486a9d2df28269c7db75e9beb3", "4510faad196b34f70e5d27f012840d4987627b37", "45d4b8a1bdf306623ade653faff0c2be4d84960a", "24539a503e4f1686aa0daa775e9b50cc97953888", "d950a5293f68cab6602510261e858643fd7f1966", "9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d", "9b30771968b577ea1b71c0cfaee31f3824bfa027", "395978c1dee9fd75bbcb249e74ad6fb4d3c2b9fc", "053ff27aba868c64823dbbe2167a762dd3f33b53", "6625349c4705d25844ab6eb019e5962b012e9256", "aef59def2a65901de9d520d0442b42bb4a448f06", "7b4e0a98dcb4ba34afcc5901f51384ba727473a0", "45e81d04d01ef1db78a04ef7a9472fd4cd6de84c", "d65f11b44180d9997ad5ba6e6970fe4874891f4f", "5ec1f7dcce316b66bd107c45a50c03f106bc7fb4", "b8ebda42e272d3617375118542d4675a0c0e501d", "7d45f1878d8048f6b3de5b3ec912c49742d5e968", "1d4e1b4f37caf40dc70d211c6b2745195dfa6c3f", "0c48138a513ca458030cfcf74095071eb99082d9", "482a31cd4705f3d56e468cc33486847fc100f568", "ea8cb4a79b211fb288f747bdd64b3fc36e11c0fc", "1f8eefd6dd2f20fd78a67dfdfe33022c6f9981d6", "10af9d8f5895e9ff26fcfce779f9a1199ceba529", "2acf8de76c3015914131938253fedfce110720ec", "ec9976aae8042a6ebb8a823510c757a93b11737d", "ea7fd75c904430d6c062c770d9a740fd0aa8751c", "1e2087908e6ce34032c821c7fb6629f2d0733086", "5e9a6357fd7de7271dac77756c3992dce260eb49", "1061140c5177193585900e3a8a271366c0e48a43", "19fd089807f8925b9384bae6e66cbfe7e6d318aa", "2c93c8da5dfe5c50119949881f90ac5a0a4f39fe", "0d30a662061a495e4b5aeb92a2edfac868b225ea", "ecbe33456daa76b27fe83eee8dbf7a0b827540ed", "3aa53e8bb0a1a7e6d5fe4de146af92cd816755f4", "c9fe5df03f4de042cad42e11560304b886161a3c", "ba502e59fb58310a3e8f8889f02ed9e782e27c75", "73891f78310a6846da559ef36cdbac905961e4a3", "0fb17e7f2bb70ca6ad66bb13599fc6a33be9916b", "e1e116c20a943876d25e36987fc25885920b641b", "50bcbdcd9a21b88c2c3e640894081d1e225a5b80", "8d03e982bac627dfea7a785a597d5946c6b2c4bb", "819a321975c736e006870e76446d581e195cad2e", "a243f1c24e759e3f22dc0c1b2762bf0a9a2bef13", "de92951ea021ec56492d76381a8ae560a972dd68", "fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f", "2ac21d663c25d11cda48381fb204a37a47d2a574", "5ea5a9f6fbc7209355cb98fc70b177326e0fe09b", "4b0cb10c6c3f2d581ac9eb654412f70bc72ed661", "dfe2d36ca249876e5ab5500f155e3a5094dbc170", "35b6a0d001de8d58d2bcf5dfe8922d59576a87e2", "d537da9292589a64ef62f0ce32b28613d1fb3e51", "525da67fb524d46f2afa89478cd482a68be8a42b", "60d9189464cba1751c96b509e4a877b7625bcd35", "bbcbe1312063aece3585e41f63a1491eb3637ce5", "52ffe694e8d6847deb4579e2e8d7ca544643b9c4", "40c97adda90fd7d678fb3e614f2ac3acbfab6d9d", "5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf", "384112e458d887c036fb313953a217173eea5f93", "205f035ec90a7fa50fd04fdca390ce83c0eea958", "eb5b1dfe580722c0fc5ca202a1259b0bd62354c9", "a702fc36f0644a958c08de169b763b9927c175eb", "f5a95f857496db376d69f7ac844d1f56e3577b75", "2e535b8cd02c2f767670ba47a43ad449fa1faad7", "5b27999b3f066137de537e78113faf4bd942b7c7", "923412acb90ed2acbb29290147a567f39d2dfc95", "1b300a7858ab7870d36622a51b0549b1936572d4", "ee4fd1a1df6a01e7dabe82090b1024e2eb6d78a1", "9a36bbabea698a9fe0e11e2cf77a013dd7769f42", "807913b776bc5039cd3f195841419e55979ec7c7", "18269fcaba9feba85552b039a9052cd67e6d9c8b", "98f7081ed27e250d1f974d89377d1fbd3f78a347", "3e4bd67a10e291ad6d5614a6e97efb69b2dd051a", "1fc952fef09d63c61b9b8828f872b7a018eefac1", "02d2c2b0ee77fec8d85c114c20fdae318e95a1bd", "74d4224989b5937ee6c97eec1955e64ab0699f57", "0e4ad0e373eecb81ec3e171c42860589589ab1c5", "8edc48e7a110f176ca08c26c0085c4dbb4146c5b", "10ca2e03ff995023a701e6d8d128455c6e8db030", "1f6576ef2f8b986b44f06bb83b4238d1ffb6c990", "44062a8f6da687b40fb393dea7712f6fd677c8d6", "c3270b25c030790fc3100861fd06bfe3a7972542", "bd96c3af9c433b4eaf95c8a28f072e1b0fc2de1a", "826fd96d3f612fe1f6b3d8d8c77329645f29a318", "2ef0748522a98f3bd06d7819410fd084e34031eb", "db150d158ca696c7fb4f39b707f71d609481a250", "2a94265530b940b5516f431675f882f83581a745", "237a8db67c7cbf7a33a5d6b442ac89f2608e4346", "4b4ecc1cb7f048235605975ab37bb694d69f63e5", "f3df296de36b7c114451865778e211350d153727", "3c2bdfc703a77ecd0a991b03e620e8a911d5f8f4", "a9fdbe102f266cc20e600fa6b060a7bc8d1134e9", "d453e0d308919867b5814beb1394cd0cc1cb2378", "aeaf5dbb3608922246c7cd8a619541ea9e4a7028", "1b781faee797beff41ef67703dd80bd6da3c8b23", "f397b8c835425e4b18cc7d9088b7f810c6cf2563", "1db51b91aa2b98e91fd5e440f829a084e9932b8b", "9f43caad22803332400f498ca4dd0429fe7da0aa", "18001ed8ce46cf9df5574b1e360550ed9401cd76", "224868cc607dc38b7eca8536018580c577f9fedf", "3a27bdb9925d5b247868950a9575823b3194ac8b", "65bf352a3bafb9320265d8a3cc019a6f20156acb", "69447482c6d7d0fde4001231ca84c31f866a2d5d", "75bf3b6109d7a685236c8589f8ead7d769ea863f", "27eb7a6e1fb6b42516041def6fe64bd028b7614d", "69ad67e204fb3763d4c222a6c3d05d6725b638ed", "741facf45a9fd4886b5a7affd07406098253c327", "7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8", "5531e728850185b80835a78db2e4fd23e288f359", "160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b", "f991ffc087de855ea879a10044807f79949bb0fe", "cc2a9f4be1e465cb4ba702539f0f088ac3383834", "78fd41d5138b2b920d5b0e151da89ea4eff7560f", "3abfab8740ffc66c0c191ce32ce1240062620bea", "83e8bbbccb8613de490b1a362dd3fedc411cbfe0", "240d5390af19bb43761f112b0209771f19bfb696", "040601d28b683c3c8b48b29e93b6aa3c26dbdf5f", "25147c7010a008f8210deceba6470b9ce30e51c9", "338d88d38795c154caa87460b99fb3d016257054", "84d665608c7c005d38290df392b0ba0157ba32ee", "09251a324dc4865732e2ead50334bfb906f8ffb4", "a4874a54a2afd74d2cbed50f2276c91c49f12ccb", "e101bab97bce2733222db9cfbb92a82779966508", "dae144d7b02aab7338b15d561ea18854df563cd4", "e9ba629fd9533131735e2305929faf0c2c46538b", "944671287862d741a96a4026600b68f4a7976ee4", "6b59716a193d3f91f88277e4c8a0f4cd0b6873c4", "031532cc5c4e64e02e796360a16f89580a0ba552", "81aad254f9795040111f7319c862a04cfec16472", "1e4822f64f105a3f27888cc463e7e49e95c1e0f0", "4ed40e6bb66dfa38a75d864d804d175a26b6c6f6", "bf37a81d572bb154581845b65a766fab1e5c7dda", "b2d9877443ec7da2490027ccc932468f05c7bf85", "0f91e3e67ec5a71a6c29b9ea0fc1916b46a09b0a", "1cd3250280a0703ba57bbc357287a7213f901b7e", "a1b1442198f29072e907ed8cb02a064493737158", "58a5c2f9f60bdc6ab640767cb21fd6ba04eef5d7", "51dc127f29d1bb076d97f515dca4cc42dda3d25b", "b255474d62f082fa97f50ea1174bf339522f6c99", "3f114f494306cc17b6c3639c28424516ef4ed242", "5480bfe964e85770615a73837e5451888bfaf689", "273b0511588ab0a81809a9e75ab3bd93d6a0f1e3", "a24709b9b2ce01a6cf3e4b8d603533d66f411916", "706236308e1c8d8b8ba7749869c6b9c25fa9f957", "a313851ed00074a4a6c0fccf372acb6a68d9bc0b", "03333e7ec198208c13627066bc76b0367f5e270f", "7b304d7771ab7eb7734fb7428087d80c83accef7", "3ad56aed164190e1124abea4a3c4e1e868b07dee", "e260ce226de2c945967a7c8d8363f22af02dd2bb", "379aaada226a3629408dbb223c7a7252dcc425b8", "3e3ba138edbcf594cd0479ac2cddd5a8e3ee6a18", "39f48090df19dd0122590ef839226f8b2bcbe609", "47fc921add1421ff8adb730df7aa9e7f865bfdeb", "1e516f45f87a94ceca466c9a101a01720a535117", "703890b7a50d6535900a5883e8d2a6813ead3a03", "61a7aae4f90ce5214fe899647e58e803b70ba5eb", "c7abf02d3cce31d44641580b217ca39721f0302c", "cd687ddbd89a832f51d5510c478942800a3e6854", "952283f21ab30a1026b26911c160433ca147bf8c", "38ede8e62e82d5012b3a165e55c9bd84442967db", "0f2a910f98e9955d2fbd4841d31b4943b91ab382", "6f7a8b3e8f212d80f0fb18860b2495be4c363eac", "17579791ead67262fcfb62ed8765e115fb5eca6f", "1a8341f9e85a4ca2c8f349d050176c505f204b72", "4e5760521356745548246b1cb74c8d69675d9923", "3168e52567d564f0871c3f9ed7757dae9d66c55a", "7c13fa0c742123a6a927771ce67da270492b588c", "bd9c9729475ba7e3b255e24e7478a5acb393c8e9", "e23bc755f7e161d524fcc33b7d927d67dd4a5e76", "8e956117b2e22470814778fed6f4641e475efb44", "30fb5c24cc15eb8cde5e389bf368d65fb96513e4", "4d45612c41d3e27a30a5ec64e0d8e2362dcb6b73", "2d411826cd7865638b65e1b5f92043c245f009f9", "d6102a7ddb19a185019fd2112d2f29d9258f6dec", "0697bd81844d54064d992d3229162fe8afcd82cb", "8a8861ad6caedc3993e31d46e7de6c251a8cda22", "666300af8ffb8c903223f32f1fcc5c4674e2430b", "68070526920b387bfb91e4753d57d8e07fac51ee", "27d709f7b67204e1e5e05fe2cfac629afa21699d", "5bcc8ef74efbb959407adfda15a01dad8fcf1648", "bb2f61a057bbf176e402d171d79df2635ccda9f6", "5fb9944b18f5a4a6d20778816290ed647f5e3853", "59e2037f5079794cb9128c7f0900a568ced14c2a", "2cdd5b50a67e4615cb0892beaac12664ec53b81f", "3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8", "4e0636a1b92503469b44e2807f0bb35cc0d97652", "420782499f38c1d114aabde7b8a8104c9e40a974", "0ee5c4112208995bf2bb0fb8a87efba933a94579", "e85a255a970ee4c1eecc3e3d110e157f3e0a4629", "117f164f416ea68e8b88a3005e55a39dbdf32ce4", "dc396e33d350970aa27e072338ca8e95c20d815a", "923ec0da8327847910e8dd71e9d801abcbc93b08", "0a9345ea6e488fb936e26a9ba70b0640d3730ba7", "ee03ed3a8a9a8b6bf35dda832c34160e62893f92", "096ffc1ea5493242ba0c113178dab0c096412f81", "894a3ab0a3ef82352b2c294dd2bde2bd3403da8c", "a493a731dadababb6f2ae0b4b6233d861206345b", "f35a493afa78a671b9d2392c69642dcc3dd2cdc2", "84f904a71bee129a1cf00dc97f6cdbe1011657e6", "7e5aa453a21f56737db5e02d540f1b70ee6634ad", "5bea1762f43987f03ddbd8d9c46757cba281a05f", "86f3552b822f6af56cb5079cc31616b4035ccc4e", "649eb674fc963ce25e4e8ce53ac7ee20500fb0e3", "2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a", "dc77287bb1fcf64358767dc5b5a8a79ed9abaa53", "2597b0dccdf3d89eaffd32e202570b1fbbedd1d6", "a1e01feb2629f8a9a55b8f8c1c5d3ac6143d8b19", "ef458499c3856a6e9cd4738b3e97bef010786adb", "d448d67c6371f9abf533ea0f894ef2f022b12503", "b5bc03b29235a1fdfe316b88628eafd303b3ddf3", "41decbe12a8aa7996163636e09d1ce1372c271cd", "2480f8dccd9054372d696e1e521e057d9ac9de17", "57b052cf826b24739cd7749b632f85f4b7bcf90b", "871f5f1114949e3ddb1bca0982086cc806ce84a8", "e2f78d2f75a807b89a13115a206da4661361fa71", "bf776e3483419d7e0cb1dfd770be02d552e1fedf", "36340f697d6c9b64adf4047a38804c0dd5b016dc", "3ed81d552fb33fe64c766407570f3d8b062fb292", "4fd74b807b47a5975e9b0ab354bfd780e0d921d2", "30a327e5960c16f973ba791743d491a5a6d52dbd", "dc72dd4690f4373a7dd14223a53ea4cc16bd5210", "cfe73aa0d6bdc3d52b48854d1c057bad8c575331", "8a2ed61448d9e41295753f5bd0a662ac28373e6f", "b17197921cfd6e06da85881a03abb2da2608b0c7", "538faff98ddec5d9391a521c9a27aa0117af7d15", "756275128fae4ffe8389261e498f9bb49a8381b2", "310dcf9edb491b63d09a9eb55a99ad6bb46da1d4", "0cff6fe25622c3da377d073c256203b98a2169a8", "69abd57a49c6b430a83d9a1e09dce5a347c9c63e", "fb662d021f05236c8ff2bea3be901cd25251487c", "26973cf1552250f402c82e9a4445f03fe6757b58", "ff336983f0308bcb616301a6381b3296ae27c4ba", "5e03074c747c2428da51adf772bddb5510691568", "a92147bed9c17c311c6081beb0ef4c3165b6268e", "750e567370fd8c37bab657207195517405727a71", "7d6539d637f919fa20a9261e03aedcf59f92598e", "171f51ed4d084f0df52604d80cd8fb304c163ba1", "171a4ef673e40d09d7091082c7fd23b3758fc3c2", "34d53d2a418051c56cad9e0c90ea793af6cbb729", "8b665946429ab0715064d6f120cdea6f426ff29b", "2c2f03edc9b76e5ac132b54b2e3313237e22b5e7", "084cf3858b07d64fc29cb7f0f4dc0653c6246d3d", "2db0d42192618d0c7419321fac06b887d96dea53", "e26c2f7471b19bc881d0804c37f58ffafd68fcfc", "5ed062553280d48a42b688bc63ed3f81f3507dbc", "c5773cd1b7650af3096a807a14a3199de9341aa7", "c19845c84abc9e3afe17003fdcd545ed020d0624", "34dece36ee166864cf1b766d3243ba36fdf3f6e7", "cbca355c5467f501d37b919d8b2a17dcb39d3ef9", "6ac29ffbd9184e5df9a14e748e6a80d7ee912fea", "7acbc7edfeee7c3a19b6f204e1c290172150db5c", "72903a6b9894f13facf46a81bd7b659740b488e5", "3aecb2e2d4f9c3858b0a009fc07682ecd30c95ef", "9e3436fb5be28cfcb0995617f3867711c3013b7c", "e61449a2a919a2e9a9b4a685f6651dac16673d5d", "90ad0daa279c3e30b360f9fe9371293d68f4cebf", "621e8882c41cdaf03a2c4a986a6404f0272ba511", "a5766dd5f2efe0b44879799dd5499edfb6b44839", "8e71a8352a373697bca904e69dd728317d55917c", "0a69aec6826546283d1277b0cf933c398fc41e86", "70cb286bd9e5ca57b4b3f56b0c801a209804b552", "8d5ea0c79eecc9e6c857eac5d494d57960e0f587", "ab60ec2e8fc1f53cbc9c1277e758bfbcfecb4e00", "cf52545ed18be7288376063a0c9ab339d0e8ff78", "9952d6630a2fcadf34e356de07ebd2254651c95e", "86b1751b265b289b09de79956e77a01d82e12086", "e8d898a6adcd526874e0a41840b69760506a98a1", "2a2df7e790737a026434187f9605c4763ff71292", "1eafc8e7316d7257955ef09f903d318d55fac1fc", "10678172baa93d8318dd1945d09f38721a0c1ffa", "81a527f5679ed91ff6f5cd18e030ee5074a76a24", "634f698c05d640ab355e94a9a0cf9191891b3dcb", "3b7f6035a113b560760c5e8000540fc46f91fed5", "57ff1222a78a230c46fc81f22daa57981b0fa306", "1cf34b6d6aa2e5e18e1e2097bde30ce456bc373f", "3bd05ce100075dbc29fc87c95d2fe9cbc1476ea0", "99df887213407f612c1f5df502b637709a29cd6b", "f36c3ddd43ea7c2e803694aad89e5fd903715c81", "969a06bc35f3d8539238901e17fc05c66d278317", "ccf5852bfb55e1fa6760f76139ab44dab89f2a17", "a95c9d51b7fb53cf22cb13a806a780aa1f9d47e1", "1a86568fdba2b85a9f0b69d563dd22aa5a8d3562", "bf91c7f62afbc838cf6af5158fa994abe4369514", "3b152bdeedb97d68dd69bbb806c60c205e6fa696", "0c166b1e5ae46c157301da4965a453295ec85658", "12cae6e74ddb4a16d66ac410cf2e1b3bfc3618a8", "7ad204758df6c921010d9967a5b7449dd406ea56", "1cceae92d399b2ed30aa54ab616e8c39e6ffbe11", "2106de484c3f1e3a21f2708effc181f51ca7d709", "7eb8a3b55bb02867b959461e3577a2d6f22fda49", "dc256909b4c63191f92bdc4794347d40eb99c7f6", "ff54ff0dcea8a08bd987848387b2884e7fef9dd0", "4671f13ac3661f430e50b169108c67e11ebfdf1d", "8e457222d6f38847489d63557ec2e0de7356e2a5", "1d80eed476270c04ef9c1499dc4cd76030aed595", "d8e061960423a17748dedbcfe4b6a6918f79c262", "38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4", "bec0c33d330385d73a5b6a05ad642d6954a6d632", "041d3eedf5e45ce5c5229f0181c5c576ed1fafd6", "9b7b07cb338846b0a0e3b2fbfe8d45184b92152c", "6953911c6756ca70de1555df14a06f13305e1926", "903a8db7bac7a2826cd98ae5425d839852274f7d", "95bf7e3960d88f2492fcf10298b0719b1c7da248", "090b3189391f3e1917649b3a62696febbf0429e1", "a624f18087e663dbdbf176de45b863cc59bb2bb8", "56e242cf22cc34467f6053fed2eea407d36fb80d", "de2faaee4f1b2ecf23149995d0146347a13b9257", "d5c86768c067df0732187040c21f23fe2183e597", "6a27ffd788a0db64fef74e673786763c82902a26", "0a6a173a1d1d36285bae97f98f4b901067d40097", "71644fab2275cfd6a8f770a26aba4e6228e85dec", "bab060d284d0e2477de805cabbca93f8a71b277a", "c2776c5ae1aa8d640ad96f06a2c82707fe70a850", "16d2ead2c3e98aa1ee9c948855a027e1da2b8eea", "78b046f79754d61afb985f100f98b83399cb2b63", "8e069b2447751a2ac44056203a47fa97ac803e4b", "83335a287533fa24efdfad928db8fd8888ec4ef4", "0b0c4641ed1b6d18a1f0789485484d97c1c8cccd", "246fa412f26d5bf5b151a7c3f5287141bd08ae0b", "15e6e1551ce9a4094c57db70985e420e57c6997a", "edd4b6d99367a69865377109f5f8bcf872f0ba45", "5db08701e6a8173ada652309c09d1262f9df7f9b", "08b0664fd37cd434201a1b37c20c0919833a6ff1", "a473ad49f4570c2008d6738e9e83a6a393237f57", "c105bd7f3fcdcf531fecd29206f7a1a6ade8e6c6", "ac44314e877e10ecf4b2123553ed0bdc3fc963ef", "aadfcaf601630bdc2af11c00eb34220da59b7559", "8cfdd9047f8ad1eaf7d2b71385576f759c792173", "514273063cebe911f0b628d012e9553129181996", "2ec9a7729d6bddeadd1ba073dd27696aa1a20174", "f68ed499e9d41f9c3d16d843db75dc12833d988d", "17f9b24a4871d29ca1a83fae12e4b96bce0fba63", "57ef0d30fac07e1fef3346838a1dcc8f4eb9f315", "8fb0ae4fa49de45bbd51f97571732c1827516617", "3a387304d18f2786ee83804bd38efecc2a5fd323", "47165ad181b3da3a615e6e8bc8509fcfc53f49c4", "c75ba6ef724c0c3a9c9510a70da4cc8729b59a35", "451dfcdf9553b3af0dd911d02ff39399a12ebad7", "373f0b02f61b1256d6df39979f1317d747195a25", "070de852bc6eb275d7ca3a9cdde8f6be8795d1a3", "00c4325c669c52db182390b2ab4a2b9c20f06b8d", "886dfe069bd0f6bbb0a885e0bf2788007bfb737c", "1b807b6abaeef68edfbdc4200e198bf4e9613198", "a444fd7229e2b635f6d179043541d9d90d661978", "a67e7ca0c7e1e3020169b5c59dc492e9f62f0022", "a0172fc5e0bc49c3e12a0ae6769eeae40d22d28b", "105ee3d197af63c761730069981ae3bb1c4f90bd", "cc06565c14fb383d22de5478d67c81e1a553e036", "5191781bbfe562cfee0c57675a9fbe79a85473b9", "2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c", "33c3ef4dab7548364c7fbf5369105b8ea377e909", "a9359ce3a7d0a434739fbb60af48a4c3122906af", "40e30ba448a079152ccd13f9ba670aa272df66b3", "e429c9e38b98a1f0cc99eb890d6789ec311fa42d", "e9bbbdc38971a375756434406f574704819ef14d", "a6ea57828252de759995289e409d83add6e8fcb9", "9b47947c293f045f7d980c9ffe55668e93483b75", "721fbc63a647239158bf817311d1c084455398e9", "3db588f1e58c1207685771d8015fa9427d731a53", "aee5047d0d1378ada2b0c48ced7dd4b908f26931", "3412b4738b97f57d5cd321ce61bf9be453f89bf0", "d9df2ed64494f54c0e2529f2c05a16423a57235c", "c7ecb2ca791fe23c182a06e7700c4e41f5ffa79d", "71253c2e93d9431f3fa6d150299121b8ba27ddaf", "88ab3efdec83670cd036a9048842416baf50cf34", "28093e46fbf89c3d9c59d0a011738e3c5709164c", "8bad9d970664d4a9874ea71de9cc7b4360ba04ab", "ab89587477fcd48b374d4958778da16113965ff1", "b96b054df390efc6d620abb277b43c2d3af85c94", "f4c768a8f9ae211168acafed36d371d793e768b3", "57412e2966a04c106657c926bcfdcb5c3842444d", "a3f2c15f87a9cae87b449052b05892eed3282ff3", "18727c3f4ada0cec9e5914340cc672d0554d7784", "e465b7d0780b27da4b351a281a2711c63b42c7c1", "d47ce2b93d81d9d5c617bc885aec3c7c29b97933", "0c0a3a00c97634fe39a4bc39bcbbb9f929b220c4", "b264d8ac4e9f1477ebb8367598b9eafe87ed19f1", "79c6dc2bfbe3c4d8e21f4bfd36163abd2b3074ab", "0b0eb6363a0c5b80c544aff091d547122986131b", "1a03716411e72722f853b904a83d9c15a0d737a3", "775d5676dfda56c80c87dca30bd611f8fabd9ff6", "c552f30ec6e8f17d2986829f85639b887095bbdb", "6aa21d78af359853ee07288cfc8d047e914ce458", "301474a50a39b24917ad79bd2493f1168c4c1227", "cba31f4773e2e3ec849f7ebbe8633b5b7be5506e", "453e311c6de1285cd5ea6d93fd78a636eac0ba82", "1d843d285992549b7e87933f403b23f456a73e30", "cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0", "db84c6fd771a073023f2b42e48a68eb2d9d31e4a", "a5a01adb6e50ad22e650824e0def422e03e3e577", "97f665219a42faa8e625625257cc35f5dcbaf0ba", "e004b3fca78a6730acdca0dfcb73a4fe769872de", "85badd058bdba2f6b6dd513e9925dfac361d68d9", "ea099ee1183145131e29009f2af0e4b13ac583f0", "2299010c37e1c6101d42b1dc8379798c8ce091d9", "e163118b4a5b8016754134215433eee1f2c0065a", "ed700fef77b92ba942ad09fd6da00398e879db5b", "3eca8ed3164324698d0171e62dec24e8abda9e26", "5e8e1925d496c00c2e7d885fd4b0717cf82c1031", "6aaa45f48323ce50d0e4698dc98634fd66a728c2", "46e11b3059c1110c318091d136f36ec67159bc9d", "73c72161969a070b3caa40d4f075ba501a1b994b", "c784ba7120e807e244f508c71e96cd7677fe1109", "21b0b2f5df87318912d58d3b843da363a4fb91c3", "386696660b8e8a3f63ec269693c2c8801eda4b26", "64e053ab54c44968a1e6fa146a72f59f101bc951", "1834de2fdc73c52779cc2159a07d88c247d3f60b", "f7e6656442f075902bf9886dd24693ebb4ee1ffc", "b3345c179be86c3fa7f3fece7d1f0db93e2cf8dc", "95c63bddb6d6801c1d6cefa1765eec6a5797c416", "09c019141b209401b76a35184c86bab6cd1fe6b9", "928d8e39ed97a6deed59b3d0eece44a9fb7b3116", "df293bc88f5e4f4fccf7dcc0efd8dc8ee7ca3157", "fec268cf9566309d7429b879a4dc9df4baf2f56b", "8598e603438360884073fcf7b843ac489fad43b2", "a3b70bf7e676f92ebb6dec3e2889c9131634f8b9", "7c7a1f5586ecc46d4efda1018a5807e3e08f3b65", "10e155151d3a364b1249e765edf178f1c519c3ab", "dc08ae1a0ded26d13cfb98b47a6153d7d1e071ab", "d9e99cdc42bafd9f57485a1cd74bafea9f9f074c", "4cb9f0965bcb756f40848abde28f5d9903fd6db4", "2de9e64be2ac5f5f923478ac7bd14f847db6a765", "2a725b002dfacc566a83c8096aa28e0af0eca8b1", "adee5054f386c6eb8ca83417c9b9ce4571aa2633", "887377ea20d144c74ade6f73573dd5cf411f5527", "14629c6989721e452fd9a49b5c20b8e849bce82a", "f4d6361e20be2e472ec064936966a02baf473412", "c1fb854d9a04b842ff38bd844b50115e33113539", "a289f5c7c1273c2402bbed8b3f9ed9b4b6e29777", "03e6b8f173012cc2e1410404f9c3bb97e0881c00", "28209a6ef1de7c10ec13717eba8bad7c2f4feba7", "1f80f6cc8aeb3d37a43a8e782298adfbb1ffb5c5", "0568497f77d21122a6991e3d7147d5205451873a", "138778d75fc4e2fd490897ac064b9ac84b6b9f04", "11386a382a556a253406102f774c4056035dc8b0", "b3e84ed8092640e24b7c2bb3b0410ed41f0fbf86", "bd88ad1356a85bd70889ed1deeb27dcb2722fde1", "66d490c95ac100cedd2a0886a0a5510267e042f7", "8aa6c3601924c99ca420c7c37ffcffe00db1eb78", "160b6b8950a155b66ae772c90cccf642a99b1126", "263977d8867a68ac52ca4f7e19048ba2a51cda21", "352fbe52e9bcbcf8625a408dfca36b30460c8251", "38d6fac9afc1f10f2da6421defba484f32f1db38", "2e1add06cc82d139348056fe43282f1ca1832e5b", "44bf7e376bf6d0a64134de99ac92df11546c055d", "0e43417efcc2bf5cb11cae87502685c5e0455bac", "95342efc83fac4cf61046c99c23e99706358fe70", "870820d4c830f299c7db17ded13e7b6410fe3b54", "dff7c9a7e66eb996f29ff9768d07a61d119aa484", "527d2a15596cd2ee6d0a66ec47c7fd621bcef404", "5c6ccca19179fd217a74ccb954a4c4370e4203e2", "0d028a924d8fce70d9fc42daecf77eb7caea67d8", "c4a2da0fdd20bea8301c7ff872444e00672e2de0", "0d2e29f07275fe05a44b04f16cd3edd0c3f448f0", "2cad358676854505517307314728e8920fe53d77", "0a01d9b6b468f3e25867a028244ce4376b5e8d82", "2cd9e41de401ca9238e740d9daabd903f82040a6", "4d3a6c2cee0cf06ff6471fad3d65a5835d0552f8", "a7afa62f053dde1c33e904b980752a4f36629775", "cd4eea276db9e02abfed96e4fe6ff623c708cb5d", "c21c557a8afc8568d55a8e65b35ac2c5c516433c", "abd555f397abb6f46aad81c683b279cbd6d22637", "99a3a4151abbc2e5d33d4beec88dc55a057df299", "ccf044bf7ed04758b5926038e3dbc129765f66cc", "b4f6962068c27d10df9016090a0ca14f65f26b70", "134d4eb44ea1fd0a2719dbc8ee2e41003dfa3e25", "503166f937690c23dae7db65811f9b1b9b3cded2", "418d12dfcf5a2a5de898ac9728043448a85b9e71", "193a93d1291d9c12a0c4a159729bf9797267ca3f", "1eec7a6effd09be491ac983fb088e4aab02567c5", "4db99a2268a120c7af636387241188064ea42338", "89ad4323ee776782170d6f840fdd8a26c6b15d9b", "435514bc2103deb604d762095d8faf77be544b9a", "b14fd1ba72d1187a64a9cd4b024af17e8f80c14a", "f47e7253f0763579c6c045cd3fa5b34b0697f254", "20b4a81c0aeafb891f9888797eac78e242db9aeb", "d743a37f6a2af0203cd19cf746d4d532a553c12b", "fe5c36734551e837e5cc41673668050f44cf905e", "57e5930c790ab68af3201e211483d88819e7563d", "e24d52b2feb48a5112246da04048a846a33affb1", "7608953ef5c7a882bd2e7e7053a600e543748233", "e2fd91726073975f6e6eb7a88c9993da30f1621e", "524d119aa75dc9865db584cd4e0f17c957b8f56a", "828f098aacc01e52802b0f1dd25008cf73205d58", "eab8a51dfa9cf2af5dea1decdf57299b8a421d36", "5e9d00bd30f8e6ccacd5f57728aa9413a9bff2a0", "65150ea455cf30ff75a73c1d25df84687d4930e4", "dfeddb420dd7b37dde0322762d3b221c1cb0499b", "2d91ca2a115e62dadddcd0694e2d34ed36f1136a", "0765bda9fdd0bccad74d096be337ca083b3a4aef", "27a4bbd7bc90ad118f15c61bb30079d6e6bff78e", "2a955477f778b6c27f914bd36684f019a9eb5976", "b20a5427d79c660fe55282da2533071629bfc533", "9095f633a153c0e3a5503c0373c9c1dfeeefb0cc", "16a1984d55966840132e9845e5d450b39d41b72d", "d3bc28d99301082653f73288c68bbdddda1585dc", "ae2d194e8095fcf8512f9b1d2fb23304b59ffa3f", "cf814b618fcbc9a556cdce225e74a8806867ba84", "4dade6faf6d5d6db53d5bcb2e107311da1ad48ac", "c29487c5eb0cdb67d92af1bc0ecbcf825e2abec3", "a082c77e9a6c2e2313d8255e8e4c0677d325ce3e", "63db312ec494988e1af0c1db5f9d9ca40ef89237", "5b207dd63dd1a3a8fd085c8b4577945bf5fb4946", "503ca6ce6351e476c063c474394a324fe9ba6ea8", "2884ff0d58a66d42371b548526d685760e514043", "95fe27cb6af4bb754a59a690e842a2d5792e4f4a", "50d967ba885059fdbb6a14ed7784397d951efccc", "04379f40d2a26dd769c53488b7b08a5123f89347", "ed6a47f0e2e621d8420082ba1d0078189d76352f", "35b95ccd1350c5207dca8fd16d3d9906aea1d7bb", "9de6b3f7a60cea9749ae38ad9b700a7350212350", "03fddc552004800a5c5b575d3639f9a8eb5c0ebb", "74b45758fac8229b2f1549b1e855f4462a4218b7", "ce114dd0b87b68bfb30b73e2b0957e3dd4856a02", "1d9306ea0f0239c88aecbcf0a48a11c964a0fcd4", "6b3c5ad668d793893dd5169c771c23bc9ffeff31", "53facd4da5f1d1f98f876211421957f5fbe8a29a", "c84ca95638893700d8f806e844984a5b2c50b5e3", "ac9227ff4262405b8eec8ebe4802b763bd6f55e1", "a31262a25c6ff6cd1e2e4162ac13e657eb169d02", "bc703a84b98866aed444d8c3e72c58e5886ca44f", "2d080662a1653f523321974a57518e7cb67ecb41", "5d3b6c9a0a8b71b875a565f5cd133d83817fdc38", "1068f6eca07c35426ca67961f00c3cac4866f155", "692a8ed54c4336d181c6c15046407b7ee3183e77", "f0365c525bba4561d43e16f3fc79a898fa8294b2", "05dc1d27ade984a1d85e104b11aa3380fcd0d8ad", "0385b65a4941899340ef59f605fb3e943d62330c", "cb5d7fe8416810cfdc5b3b9d2b60f44ff9c5e78d", "e851a51b7e8bb43e49c9edfcd0b44da64b693646", "3379938454ad1ea9522f164d0330a4e9b1a233ab", "17f29dba3809527c3b9533247045a488417ec21c", "122c674f264c53d762af841669209e131b49b3f2", "6c40a215574f95d9dd2f5ac3523f2ffc03d1a84b", "6d96bf377c96e1dd9b43e9f12e0ee2a66543edbe", "ce75deb5c645eeb08254e9a7962c74cab1e4c480", "832aae00e16c647716f1be38de233c9c15af9a28", "871e6c1de2e0ba86bad8975b8411ad76a6a9aef9", "795c8a097e74618e862f77659cb212333ef79a48", "5853c2df979b14baaee7053d4dc6e2b305c87e61", "95d7fae081259f29066e568689b007a44d6c26ab", "7010c55055aae36edc121befce125ea04f0fc52c", "00bc6570d7bec88593cdeafc0feafa32c81aeea9", "a0ddf6e9697631f771d73b721a3d871db6a04f6c", "19c5a8e8015a950cebe0deb94c2154e0da44ade8", "03e2aa8a191c7b1a579a7ca4c9ba6b6b6d6e09bd", "2de1703cb70bfbbd8ec2aa7830b42f2a17856e05", "c4458cc521d8da6faeedc8c4f09505dace844a05", "8291491723d24fd242a3a93248f6475cb084999c", "1154e9a62825e6dd0a253b4cf9b98cd6452bcaf1", "8c98e81895fb00a0eb91dae680b2384c9d3526f8", "1bca1a09e2ef62b1960c23ff6653ae2d5aef5718", "40f5ae73e598114edab3ddaefc38fbdbf5c114b9", "50dc0ec4d30e67ed0065c707ddac407a021fee34", "180953265b3ad550682c8f0dc693eda87b82ec91", "ad5679c50e249610c687fb9a4bb859e7e07d2775", "4c41b774a6bdf43d980f640880cc49b82ae19b34", "57e562cf99b3dfbb6baa5bbf665aa6fd97ffe8ca", "d1318b84c082befd4eb9de651fd524de5378807d", "fe4986bbb10f3417372a02fed1218acb5162ddec", "384156c658b312946eebab736235f03f726c787a", "ffd3806a1a8305f5fa7fcc75e614922f1e1d393d", "53c5f995e76ead002f1b0a78bfd50de3b1faf593", "22f8148e43c50341bad686d7fccb425b0682e667", "dda7bb490171a1d3364928fb8143bbe021146c5f", "5e004f0255e8e34b4f542fabd36d839d74cc38db", "0041ea67f32bef4949fedcef97562ad16fe5a7f9", "02e1edb2f78c9b24e76e547ca8e31501d0be95f6", "900dc8bb7274701d4eacddb71643b6a8a51a3b75", "6f7f9c366d16fc386940d2ee6e9938af6e158cf5", "0170158c227ee1ccf0a6a2d642699ff184c84bab", "069ebb57ccca31ab68983e07044e65ce1a04174f", "479cd0af9f345bd44cd180a5e26f3e799391e31d", "5c31c8b75469336cc89373067022af810f4eb19e", "69aef3ce50967a00c568849fed630c573f6cd1eb", "ca0ac8cf1ed07d96e7951e8546980ad8a7dddfce", "3506ef7168e07840187ec978b47f3a05a753101d", "e45e473481f5160b0f823af5f6d8711efb26c8d1", "f96970f75b0f37787a47073bf7d02111f45abe83", "49c603c2910bbaaf877d2a0946f5f1448e90bbbb", "9900be092f81547ad71e4124cd850048e1969063", "d1c0592f4f9f0ff2e14e0591d87539e5141b7361", "c259602c6c5ef3e3ee7397849a1a20c8561ba432", "16f2dca5fa64ce02da0d0c8daaeb2b2cbe405fd7", "7df3b0c7cc025e97ff83aef9ee2b526e32770a20", "252f912df6f301eb35923f97dda5db731fed2446", "2b4512097cb0056886f2d4d2ca7f5b034a647237", "2c56eedd28be5f0d246a728c5e78f0d8c57f1da9", "50b6d2db19fb71ff5cfde8e2bfa484b10fbb39fe", "0dd72a3522b99aedea83b47c5d7b33a1df058fd0", "5b8164fc9c65cd96fb529c3c8db551027009d4d1", "9579bed02269e3f1f81c1d3b4dd396426e06c459", "62e1641bdcf3d5c4c4a23f6fd1690785c797add1", "90a7e37935891e2e7ede39d0c2da04d2aff54df9", "a1a4059c05fef739b5ede0e77ad4a90acf085e38", "507c9672e3673ed419075848b4b85899623ea4b0", "daee91e5f88efcdf154dbf6f123a97ed8c5bb643", "8f976aea4eb95019d2d50ae989bb9231b091315c", "5cccd151d7c36c53bd582a71d780632a79384917", "8d955b025495522e67e8cb6e29436001ebbd0abb", "fd89d46397d48aa38a317c4e2fa3c9a52a187998", "4e5698894946680e4d6e766346355b2dc1959819", "c76eee7a6656664bc37890f3754ae202255ffff3", "f9e07fbe2d2181e7cbcce673aa2c3d40b85b8a3d", "465b75fa4b84948e19d8bf2ebf4fe4459c3c87ae", "8b4061697db9d3dd6536b812d395553f4cfb534e", "094c3e8733ba08a879522a5501b291c9e134ab75", "a219e7a1fa717d4575284ccc80e850088dbe9597", "7c598d8b947b7cad18ae101ccb9b82c43726c51b", "699ad5c2bfe38df4c7e7af0b8d7c54a73a21b07f", "0acb29c97b60d7d24ccd6a23f89175cd3b9c2cf0", "6b5f32d129f73bd1e2aa8323bf78cec3ed12c539", "4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06", "b69d14265a221538332747fcc486d0f9338c2822", "db43367d687cab574e791f1442cc5a088859a744", "319a27b8559e2301c9dbf0462bcf0ca6ad3449ad", "fef3efeffade0e39f2c279653b4785b372be410e", "b488897ff5a357ad31c3b15ee9440de17df2200e", "4a1fdf137e720beb6dd3c8a2623b21676b142f2d", "172f303e61c0dc9556dbd080b80f365080783360", "ae42dc9ef4a03caf69c23c117621108211977405", "5fb16fc243c2bcb37149d5bdc3aefa05198ae840", "468aaa87ccdba65f3115bd0864f7772b6706c00e", "1d93a1af770040cb8a64e96215884ee363a8f53a", "49fb5306f72a0618093753cebb81c82e031e9f9c", "36119c10f75094e0568cae8256400c94546d973b", "7a9890cdbb62a60ba88a515655535151b568bc44", "ae620eff5ce7aed89036dc1834e175b92be089bf", "54983972aafc8e149259d913524581357b0f91c3", "92cc2cecbf065c4b55a3bceb0d9e475fcd70f8c7", "e3e2914ecc57f00f31b2769d501bf16dd99547c9", "6ede0d34104d1f5821654039f0ac8532f39dacb0", "2717b37097b8f413275731539ad3344011b1d4aa", "6e0b1ea97421e080402a8db23358b01c04c905e5", "faf40ce28857aedf183e193486f5b4b0a8c478a2", "f6136ee2e56e5f3abc42049425794073aca306a8", "d5b37090422967018477439c25f672eed2f5110a", "84881da6fd1da4fe24d8b39f960f7a2fb71a4197", "120b22e7a47923e42a123b9b68a93ccac5aaea6d", "135b95d67996d88e6208b6061a50262035bf3349", "9bcfa6d23ea628ccfabf6900ef05437e7cecb1c6", "fed3ed2d361411d324c664daa127be5edc6a1c8b", "6b35bc911a868ef3f66c0ae44a7ebebdaa61ccd2", "f68255269d509ff617c2532bd2da71edf9576efc", "24b476b0851d6d5d276814f3034b8a1ce3ac6905", "2042f1cacea262ec924f74994e49d5e87d9d0445", "1dcf08c37fe2e8e78d3f1857547a965a0ac29526", "f1cc060b6adcca3f8130e8670230899160d26848", "61fe4426929c3b066b93a582667e3f0cc3b41959", "e819a577c57c83a133a0a0e81180d14dc13b82e9", "210de355b1e20a57c98608b98fd2bb8a62fb40ce", "52969cdd2c5eaccb534fe1296a61517b7ec42a54", "c67844afb694bae09deb472fc1f97051d43e63d8", "b0c66e95b5eb36471bcdcdaad7d9368556110109", "72a672cd20a3b7b2a123772ac0f9a27cfe96effe", "676c76c4e3ac2f91a2209ecdae8d20be4de7c9c0", "dcad0ff8ca4c60d69feeb1f642019798b52ca981", "7aed6480ec8882075c7f8fab20b7d257acd65ae1", "778952cc94d5baa5132ffbe2cf342f80032f5f73", "cad8aa5fc13325a83649dd78ebaef27159c0d5bc", "80d6594a2649893bf67f67e84c1cda802f2f296b", "f8d434471c2850c5f1d0757d42142b655fb46ddb", "ce54dd2b0c6c75208ac77420233419066dd0117f", "87f1b49dee91ff0065ab4ed1f0ddb74fd0af6b5c", "035d6dbaf2053d065464e065bb824695b1ab5a95", "7c3c4c64cd5c0d962f8f0c875dd86006cafe30b8", "4bde15a51413fafa04193e72c15e132e7716d8a6", "47abff9dcb7cd8628bf3bda89069896ee2f356fb", "cf103f2fe5595a55f918ecbd9119800f4747fc8e", "b89fc897a5c538b2de462fb68bab4e588eab68f9", "fc836ff649af617c986f273ac3374261ec811124", "c06ca26b33ab2e9ce118ca02018be5834e8164a6", "1665835aa8146a71dcb83e12dede78e3f3f06e7e", "7e5fc58d742ac5fbc16c3e33284c08fe9d9556ec", "10aff52a74c75742e6b7379e581a6760f80ae8f9", "232384b7a252003af706ece11f5db4461cb46e44", "e8767c07279240ec65b01c5c835e7b125d7baf3e", "75a59bc6938fb2071ed01a5fe8e88781e43a5c3b", "c7b8783c5db016ea819749ef611e5d70571972c5", "b3c21608a4f78ad3d436bfe005d5c82c941273c5", "2ede43350eef0f579d5abcd26dfe24afcc3ad05c", "e58e8163ab8d74aceea898d0955e71936ff336ef", "6864b089c8586b0e3f6bd6736cabea96b1c4a28a", "3a7f9b4badc7407273325650763e887ad7b5cc9e", "2ad868dbac32c0d6dc100b2dd02e52d79e2e035e", "39634443c52f43c815735926a7113402c9b737d5", "06e5d9ad3363b8834229bf7e055a94092994e097", "c1a63923ab7dff0fb8ee264866e10b374bbc939b", "98d4dc2e5b47a26a367eb074f5f3dc6e86befd98", "2c07d9a383e0bb7e1c8ba07084ba8bcf71af2aad", "b8345f687588867bc800a0c149d59db4b873160d", "8c71e28a4ffb283a9cf3c5549e2fc64e9b0ecd5c", "636b8ffc09b1b23ff714ac8350bb35635e49fa3c", "96ccd94151a348c9829ab1d943cb13e9e933952f", "67b79c2336b9a2efbfc805b9a6912a0959e392a9", "78f08cc9f845dc112f892a67e279a8366663e26d", "1e5ca4183929929a4e6f09b1e1d54823b8217b8e", "19f076998ba757602c8fec04ce6a4ca674de0e25", "e200c3f2849d56e08056484f3b6183aa43c0f13a", "f4210309f29d4bbfea9642ecadfb6cf9581ccec7", "0b7d1386df0cf957690f0fe330160723633d2305", "33c2131cc85c0f0fef0f15ac18f28312347d9ba4", "0699475af70765d0810881d3536b44a3c1d745a2", "23c66ab737367a96f1422ce5c4ff8421709ef70d", "3888d7a40f3cea5e4a851c8ca97a2d7810a62867", "13604bbdb6f04a71dea4bd093794e46730b0a488", "537d8c4c53604fd419918ec90d6ef28d045311d0", "b5d7c5aba7b1ededdf61700ca9d8591c65e84e88", "732e4016225280b485c557a119ec50cffb8fee98", "718824256b4461d62d192ab9399cfc477d3660b4", "55804f85613b8584d5002a5b0ddfe86b0d0e3325", "c7de0c85432ad17a284b5b97c4f36c23f506d9d1", "3773e5d195f796b0b7df1fca6e0d1466ad84b5e7", "d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4", "d790093cb85fc556c0089610026e0ec3466ab845", "6f26ab7edd971148723d9b4dc8ddf71b36be9bf7", "aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912", "2c8f24f859bbbc4193d4d83645ef467bcf25adc2", "b5402c03a02b059b76be829330d38db8e921e4b5", "61542874efb0b4c125389793d8131f9f99995671", "dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd", "0f2461a265be997c962fa562ae48378fb964b7b4", "218b2c5c9d011eb4432be4728b54e39f366354c1", "2d146cc0908c931d87f6e6e5d08b117c30a69b8d", "44834929e56f2a8f16844fde519039d647006216", "5f7c4c20ae2731bfb650a96b69fd065bf0bb950e", "40a74eea514b389b480d6fe8b359cb6ad31b644a", "84ec0983adb8821f0655f83b8ce47f36896ca9ee", "571f493c0ade12bbe960cfefc04b0e4607d8d4b2", "38861d0d3a0292c1f54153b303b0d791cbba1d50", "b7c5f885114186284c51e863b58292583047a8b4", "5da139fc43216c86d779938d1c219b950dd82a4c", "28e0ed749ebe7eb778cb13853c1456cb6817a166", "60496b400e70acfbbf5f2f35b4a49de2a90701b5", "7a595800b490ff437ab06fe7612a678d5fe2b57d", "68f69e6c6c66cfde3d02237a6918c9d1ee678e1b", "cbb141925e919aef18f9168b79b4c4aeb871ccff", "35f2541ef1b5dc2df8283143b1b98c6309ed47dd", "b657702aed7aff8f1a86fa32d30a07197f8348c5", "43fbe350681185ec9a18991dbcb19d694ce4f245", "0493b82694d8754582bf54802c4dbf64586ab9c4", "96d98463ba845651907b0b03f613b91c97d3465a", "9bdd3ce1879f8fd32d2a3f2c4cedcadcf292a1a5", "1303a83494da62032feee866225b731f6317f605", "ee5fa8ac1c33fcf9a10a185ae23f0ea0534e770f", "c528e6285ed170c9a838446c062c8dfbe31c546e", "ce9a9f178018fc266fbf554bbde63155a48eaedb", "15037913b5d3f299da509218f0b914227d10b929", "376b73334bd9aebed1fbb69c4ed3848ec0826b6c", "9ad65c5c5a2b22ef0343831fe0dabc2055d72497", "d6143ddbee74c10996d291c666fa17bd87f9d4e2", "7a0b78879a13bd42c63cd947f583129137b16830", "30801beeb4436ce1f15e641b74a3daae836b0a0d", "c5c0cda46a77a7ea8c1f6d4d762b189ef424ffa4", "7a3f17a8a4c49afed6cb575e82a46fcf1e3a83de", "10061bcb0fa8306b5479914c3e9fe889c5ed5c9b", "4f5ceebe7d166b2b96ef080e179e8f58f7787e5d", "22e4e64c1172c90ba23f634d850931ee5f9a972f", "a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8", "887270a39b2b3a0ef9c098acd5cd20cc4ad9c99d", "aeef1fcaeb3e5f3eac93ee275426a7f5eb586e0b", "a24f84b156bbb1edeb1d0761f5940de318b7ed9d", "df8717e1153c48e457ea5ace1aa97c30ee7374bb", "0bc695e580d41ad163d5ec601bdcf384a0bf91cd", "9507d84995b374c35af5abd5f8de08d1d80a3f19", "4640dfc0bfe7923c08d0c762a9c33b52b9029409", "2868b2b13512261d07f33ff05dbe0c10eacfe0bb", "a30efa3271161dc7409530fe0ea76bad62a6f191", "3beb94f61b5909fca8917b0475983ea2c66f1df2", "06d89147794d0889b2e031b0c6811423806f5ea0", "bb0071e21e1f00568ea80dd22c5bcef06bdebe2c", "10cb7f4f86c6437b496a1c98955ba413c7540cd4", "9ee3a5a9f1df0e243d495a7faa011b75d4337031", "3e2e9ac490726c37a0797792dd2aa9d20404b9b0", "c4cdfef8b24437718511ca7be223b158b5416d0a", "6844a700aee36bd809d1188f6f9e81707c513f19", "ee4bd9419405e051b709d90b63d9d264bd2fd796", "71559cae0bc89398e75a2f24674d61cb51909390", "166d069ea056fbb42b10ca660956fee881e6c875", "2daa7d027772a64c94369fac39cc184b0b62d747", "6225e9c2a9ee47b4d3d58313a839f6e170b48525", "61d20ada590e7627351d19565c93c6bed012b5ec", "1a790c7669943af5868e49d15cf282cbbd506f02", "1548cea1fa9be7a23d4d1e38086336913d501e44", "29936a4dcc91adf9708b938f0d3fc0f38409eaff", "1dd3a58ab363cb396bf36223fadc8d2341bfdb83", "034f7fcf5a393ac3307ac3609c2b971df6efaff6", "047d3cb2a6a9628b28cac077b97d95b04ca9044c", "8d9bcb6e557576bd4d3142602c40eadc87407620", "525eb080b158a492bfd02b421891c7383303dac5", "170862138b7be1b8d92c3abf7cf2466bc435f1ec", "07ad6bb9b21c065cd92ab2f24a22c1d4a8f205a7", "8263834bbe6e986a703370810f9b963e2d25a7f7", "845c03910c7cfd02de7df9622a9973e8b085c0d8", "ca11dc3a8064583aaf79061866bbcf04caece162", "a8b674f17d8d8c7d5e4ff701c3525db2ffa1803d", "22c01d758a4941c01239fa8facdb3407559132ed", "7238ad190988f86ad1ca5bbb8e5d0021802276c8", "451bf4124ec8a55b9112cf9cc167d304fa004924", "4497c58a1c7dda2027b3de6385ca628c62d965cb", "71869b405b39c812d783cfddc56c4e20bccc6a28", "037c0f087050284f828f71a8c81a4972134baf10", "dba3ec4420a0bcca3264f75f4c975cabdbb1af74", "4c2f3c6384888ee81453b01bb81f35871f618c3f", "aec84e5aec1b6d83baeb4d447cde399864e25467", "f95d53ff893305741d60e234772003ec8579828b", "7357f37a193992f06eba68ee71faef8c093e8aba", "ac99cce29398a6885598925a03196ab9b298ffd5", "916498961a51f56a592c3551b0acc25978571fa7", "639b8da4c981de46c5975d8c8f74e82567e7351c", "6cabae60cb9081346a409018a3f99f197b192e4b", "b85a2d00bd69572757acff140bd32c24085e6c94", "38e083047f1a8273a4a166aa4c39ea85b7b6cec2", "f13530aebae0a1a03a5dbe7f5375506e4f92d0c3", "5c5dbca68946434afb201f0df90011104c85e4c4", "79e43278ccd7b71eebf16a0ba0627efd023a0b8e", "c04fec95a448f9b01dd4399b3a5a365f67448bdf", "08c2fdbe89fda66ec26453c4ea3f190e3e3d794f", "b62486261104d5136aea782ee8596425b5f228da", "c7d7cf88d2e9f3194aec2121eb19dbfed170dba8", "ae85ff7fb5a7e7a232793c743ad11baf849a61bb", "b5476afccf97fc498f51170e65ac9cd9665fd2ce", "49be1b36f1a9b948e029d81fdca23989c3e4b268", "411eb3c02c676b80aabb9c1455cb581320834e38", "27fda2c61f3fe1f74e18bd11555df7751d178bca", "7638cb16631fbcdf621aaf392fec5108e6fa9f47", "6dd007b6e518a3aa96111028c4664f2647e5e81a", "74e1efe5e3564c4c6a9aebcb18103e941e31e335", "ad1f223e83338b0b08779b3736d5a3b7ccfec592", "8b64dbeac77fe8d6bf440311337451f9f61b9ea0", "5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79", "c14781a995933e2b89d40a95ca1247845ddfe3ab", "5d7de2eb2ee99798bfb2e50ed5169e3b8a35469a", "baabaaf0a3d06b9b5ac378bae4f69dcd69f78643", "635e5b6219a655b73f47ae74751ae43577d22da6", "70bfe8dfd9c9b05c8854a5d4aca9c3ee3a3b7eff", "1b2183c2b9608b7f815551c9ba602f22205126b1", "6890af11d4c0a3189e974ffe7cf03088cf532ab7", "67d7022462c98e6c5de9f2254b46f0b8d3b92089", "09669da2fe4764196eb0e2eff240291d54607882", "b0d61c3e9851bb83cda8bc079e92d73a43e479bc", "04fcc44647e867917c22375e0ade3276d873fd14", "286ea509dad9f46fe04db96d09a503f5cd94a6a6", "92aa964d5f8b4a99a17e40095c4e3480723dd74a", "d37a88bb3e3840464bbc177569b76cf3aa1ec081", "1fd8c71a8859da611a8fde1cbb2bba1c7cf00b4c", "1505e0aea7f82488dad1448e79b22c3b0ebc65cf", "789cff184f607384b4a45f361143e5c348ef97e5", "2279cae83716e2a00181593a7b10966020dd11d1", "c8673edad31a9f991f6c446c057e03e130611a7b", "84e21140422935d0a18ef0a616ed1ce1541112b0", "97304c55262bbd9354aa78d2f52eb73d0a13c9ff", "b95023870b21653a3777c640aeb32c342a3d906e", "18aae0f20fdc6aab093c72c81005247d2cbc8512", "e7a922049a9bf54a0b13cd1d475a58e36c7c9b3e", "ec946da86686310da231e2bc1919651c8777e394", "b40f176684ada07faac259aa3a8d46121543dc75", "c0ca42fa8788aa8cb6cd7d76337560615f9e8358", "175feee910c542f9af9a7e8ca75610ba6d133b24", "2475d216fd52994ac69ef922f4daf73e47f9535d", "e9bb5cf8eca585fb1b5b7e3ade05937cbb3ee040", "30923ae494447f60b9e7edbfcfa56c6f0be358dc", "dd9716a7d00fc6656537bb035e35acdeb21fa497", "e471973e006ebc0fc801cd354d314e16a872c02e", "64aeab4a2678efa0a60a4d57bf81e3ab640cd476", "4d6d6369664a49f6992f65af4148cefef95055bc", "5e59193a0fc22a0c37301fb05b198dd96df94266", "611c8dcb8cc4c328f0b3be7961adb47689b371c1", "e58cc174962bba8a27d2acab99cfee30ee4fbce5", "303adb08a8499d8a7e61313df67ecc276c2d04f5", "c42b28d722dcf2b276fe41da1a811e6bf9e68010", "57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5", "693905c29feb7f9be3517308c8a9c2dc68aa8682", "4d178cc41201681fe9b3e5484fb88ef6dc674ddd", "14e38bafe584fa0f3cf5899027c61247ff14204c", "6d43bac8348a76ca5e3b765ad5b4d8c302c186f1", "80e6c8a17b178786d0db50dc9c68ad68df7f1fe0", "3bb9cb67390f134ced24f64e997ac825e0a16c34", "f9378db092580854602e37d2285d0be10671eb3b", "a84d6c761fbbc66a3d6e0a5ab8a1dcfc944f6753", "48bf7357723abf7770400d68f914d6a7ca5a1a5f", "44f4b1b90f8d5515f2486e07e4cb4b9589c27518", "4bfdbe2ffc6311c8a297355422d914cb666b358a", "70c012367fd77d6b6dbd97620724fcdf72bb15ea", "2fb1ecd1451dc0c016cfe4cc43cb9620a766f1b2", "7a540e0e2049a8f0118be2eab9a2ec5f57e022c9", "06a5e1b1161a84e082f9b4be6327c5b16579b04b", "d670583c4065132282dbcb4387ee6a83e85f8af1", "119ccee35247b83565d385150baf1181fa5c3e74", "dc22d96593e552700f98dd4bf76ee838f9f11145", "7da818df4207bdfd5ebfc609e48da360b4b678d1", "2067ab35379381f05acaa7406a30d0ee02c0b8cc", "c5420ef59d7508d82e53671b0d623027eb58e6ed", "90282cd8e122e102124b765ecbb22025a238f249", "6a8a30d0039a266a6ae62978359bef3e0256e205", "f19527b2ceabf50831e78ac04161107c936efb2b", "6f53466b17a2f9da4dbd1d870e822a1f8e837044", "21e82350472bf6a12af0f761b8dea91cb16bf42f", "30998485c920f62c307c29c4832b70bbce748eaf", "14860877a790d99296a990281b22e6b6a430b64f", "811dff89b6d4657e5a0b8534e208baefd2204cee", "5017d635ba8dd630fc0375bfa71cf2a3397fae8d", "f6b0f62871526f8c4680cc54aa6add755f1477b4", "8b26744e11e5f226f187bf903b88933c5b0fcdc0", "96a3dc5169c095c154fc873d6655903618336448", "2088d93e7f4fa27b8498428d2ed64f144ab8cf3e", "9728c3e32f57b54dea94fa9737c8f300de5cc468", "35071331c379a9c830c96117c4f69e4bc0dc0bd4", "93f9607034c9b7b7693c60e9d2631adc15a2a524", "696ca58d93f6404fea0fc75c62d1d7b378f47628", "d02bf4082850a667bf0b7b6205df1cf9c1899233", "06096a9927d45ff82eed34e6b3d6c8fbdc397756", "a247c12bc54f7792e381c6e71d98348f8059ca15", "7d7f60e41dd9cb84ac5754d59e5a8b418fc7a685", "6709e3b3b860ddda5f79b30a0bb6080c6b747816", "f95f5e43f34e1bfb425b6491fc09558c44d2973d", "34fd4689d406d28100709b3be71958721d6ef11a", "d6ad7334d6e2575d61f86f91b8edac8053af8c35", "001dc49f7f3348841b4086f966bfe4e9dfadf03e", "9d27197ecc73ac0c3a664499c62975068251bf9c", "c29ca739fa740b3155c755655d590582305ef9a8", "67bee729d046662c6ebd9d3d695823c9d820343a", "3a35154f765dcba4e3789a38346bf54bce69e336", "e282bf5a679ca4e8b7d9a2ed56d3b40dc440ab53", "657ae9ecb59cb2a27e57784577a9efb60de81126", "ef3b777cbe362a5e97c5ef27eb3289ebfdb70b53", "115724ce1ce9422dad095b301c7d096498ad50d3", "a8bed0a96d9ad3e5c7ecbaaf2a8967e034e72cb3", "5dc7c33475b545271d1de726fd88bb68dfb7e11b", "96c866f07ff999ee11459519aa361fa4fdfc2139", "ac207f5e368e285b0dd54387e3a898c550249b20", "03d89c8eac079df1ff9acbded0336352cdb04624", "bf05e710dae791f82cc639a09dbe5ec66fed2008", "4078c37c39dc5c7c65a5494651ba6dd443cf9269", "0d0a348510cb2fbefbb3225ee18fafc1479eaeef", "7729b0d1d3e26ce0eec1f019f3a98d6c7d926e10", "7523ead2a91191f0ecfb88fba5c0f2deeddaa256", "31ea3186aa7072a9e25218efe229f5ee3cca3316", "f1368b0001e454381eafc35324740c928cb2ad1e", "9d3472849dc2cadf194ae29adbf46bdda861d8b7", "3832a6d6b1f78cdadee6968d51c1c7c2922ab3cd", "311fcda76dc7b7cf50b17c705a2aaaaab5ed6a04", "d1b0d2ec2f01c3aab06119192cf9ba23146cc662", "bf6913250ed359fdf130d6465b90b2a0b6fae04e", "ff172624dd0a3bd31ca925b73cd7295d596173e2", "428818a9edfb547431be6d7ec165c6af576c83d5", "7fac20f3908c69bd336ea252e28c79f5abaa6dbe", "18cc17c06e34baaa3e196db07e20facdbb17026d", "37eaf94fa6619ee857019937677cb055a2a51bf3", "bb79bb04e569f9319fbc9d8e1f275bbb2cf8d32e", "24d967ac44319cd053544c1b77e2e294b90efe81", "89f9fcc6b6bbc3c8c13f37d602d42a5c7196bcdd", "74cbb3acfc401a397c9a4e151ff8e3ecf5ea76d0", "778ce81457383bd5e3fdb11b145ded202ebb4970", "f5350ef1d45574e33f5b0f1c013a5bb00e1b1c55", "fb3bce3a6221eb65451584efa898ecbe211bdab6", "a19de85fa1533a1a1929b98b5fc3b1fb618dc668", "a8d41c63462da7dbddf4094eddaa0bb6d72d0fdc", "4fa6a688f350831503d158f8f618c58d1e06bc5d", "36f1110aed28165483f2dc7250fd187412467f61", "72f9116a04e584081635500e9f0789fa26e4d15f", "09beb1aadfcfee89ffa8b97cfa983b1c497e3dba", "627107c02c2df1366965f11678dd3c4fb14ac9b3", "397f343180dc4c9f40c1c706217956126a09d157", "456983805a8781d6429bed1ed66dc9f3902767af", "29ca8ddf79d4cd1dc20cc8160a6d3326933e943f", "2ba64762acdd21046999a2a31e031d339df5e8cf", "6d0b4f5b3391463376c013a6c00d76daf38da578", "49512d11c468dc2fe3fe832d8c4dc8e0a01b0a4b", "5d2d797ee4053dada784639d7462abbfb2220031", "d5813a4a0cca115b05e03d8d8c1ac8bf07176e96", "697c0c583cb62bdc847106f9ec79384ce66d8679", "b10427999fbde2d90e3541c477e2f6ba4c8f08cc", "844bfcee3bc559960ae7a2b1fd68fcf7a926dc5a", "3ac78d0fd4f0c01650277bb25eab6957d4eeb655", "243b82ad6f062c77cb1ad8e5fc56f5fa6f34fc15", "43e3cd896d4dada4114a8961b98ae9f6d6ff9401", "768cb0e32de3f1b5aebe04448aaec4c25586680c", "2ceaa8d6ee74105a6b5561661db299c885f9135b", "3f04caa9d17e6b26e4446578c020bf3b35df9de3", "a453863082a7fb42c9b402023294390eb4167fbe", "f17d6db4844f26a023f92b8771a1c33cea91b9e4", "6128190a8c18cde6b94e0fae934d6fcc406ea0bb", "b5bfe824fc49fe78b538ac15f21c4cd6a9d44347", "091433bc8791bb66797b519811834a8a53af622d", "579b2962ac567a39742601cafe3fc43cf7a7109c", "f19c24761fe51e6711ce444c2671a551dc6aafcc", "ad5dc94b28bee087a34f52114c52bd09d2acd8cb", "da8f9211ea60755bb40811bb92de76be389566c6", "117aae1dc5b3aee679a690f7dab84e9a23add930", "7a736b7347fc5ea93c196ddfe0630ecddc17d324", "350af77e01e78e8e3534f42b80b5dd35a602e73c", "d2cda0dbb8b2e83ce3e70d818f78d2add803c661", "a34fb5c4b8b58ca19c376b1312e4d9955fe1d857", "082d339e29b1b1a9a800a1d72b401f69b6a157c5", "25b9ef5c78dbf17c71e6fd94054dd55d66c39264", "4715303bdb871e57cbb597b7e936a6ef4aa2f71a", "75064b7675553c22112b76b5687e0aed4089b0ea", "159d16cdc48135632c2d5790e5baaf8d0631f510", "29b737cdb317e47e7cc219b438ea38e8fdceb45c", "39a20428734b1b38b8e93c1c23283f4c85ff27f4", "ddeececa11517bea0d21804e3f724612dac1a5c5", "904949e9bf204c275ce366237ec1d3ebcf864a1a", "df9a08016fa553a169d893ce2d3fca375bab4781", "5da0224590d91defe8c75db0ab5e12d50b6ab6f3", "5d8eb0ccd00d66b649c6a4c06edb0e34093a2357", "d42142285c46207a16bd4294e437d504e419a9b7", "b9d73e86a98e93d558366fc3dd002393677808a3", "e6020915b9530fa585453f60a8934aed30558be4", "de09cb5eeff5e7a752567c009f7621dfa6ebb2da", "f0243850576e364368c3f743012e78165d8bf249", "dadb7ddfde3478238d23a8bacf5eddecc59e84c9", "a759570e6ef674cd93068020c2e6bd036961f7c6", "7ab02556d18d116228a964e38b7f454cf9f2b189", "a0e03c5b647438299c79c71458e6b1776082a37b", "c2cf74ba6f107aa9508e7ef1bad93916d944cb4c", "0e0900b88c33b671be5dd2ded9885b6526d6b429", "7985ac55e170273dd0ffa6bd756e588bab301d57", "0486c77b8f251e8ae52f5ab6304288c1a8bcc48a", "554b53f6e5e37d0f8c8eade1a962b39ce591f6ae", "54969341ec539ddaaf7537b7353e3cea84790eac", "caab1c1d53718315f54bc4df42eb9a727fa18483", "1491c73713ff0b931e5bc1e990b9e762bfe7b60b", "2e3c8c8a413a317295bd44d86d089ed70a0b0c29", "a910f0468ffaf85aad72c96a7214565945cd2819", "53bed2d3d75c4320ad5af4a85e31bf92e3c704ef", "19158dfe2815e7f9eebc5822687e83d0a89ae147", "58d16e23e1192be4acaf6a29c1f5995817146554", "39e55283e6eb3f0f9db07cf1b20e0de8d5aac10e", "5e053cd164b02433c4efc0fc675f6273a8a1c46a", "c70ad19c90491e2de8de686b6a49f9bbe44692c0", "4c33746fc5688da61059daa93978ac887f04cce8", "3b0b706fc94b35a1eddd830685e07870315b9565", "78990bd69e12f7d123b6a0ce6b1674ea801f2319", "deb78e302c2efdac51b742f4d3e8041b5838e533", "bfc9a449e6364817a5a3e19b73b1527a85c32d02", "56ffece2817a0363f551210733a611830ba1155d", "4cea60c30d404abfd4044a6367d436fa6f67bb89", "8f48c9ca2ea3101083be19344633372fe1a2efcd", "d7c9bd2587204071b87feaad01d631e7ea591c6b", "fd3d94fac6a282414406716040b10c1746634ecd", "c06447df3e50ec451240205cefa0708caee8ab8c", "3adf719a5f451a61e98823783b5f2e049bbffa2d", "89887e95169efb35726cbeecf6a252de6fbcac3b", "33264f4cfc7fa52ff2a6e9f739070e8501ce07bc", "b5793958cd1654b4817ebb57f5484dfd8861f916", "1a9997d8421d577a728f6ac119d4b14a3f46402c", "561ed7e47524fb3218e6a38f41cd877a9c33d3b9", "1e54025a6b399bfc210a52a8c3314e8f570c2204", "8cc23e554d98522b377d227dc78e9382a0ed35e5", "d81b0a79558cabaaf3db22caf89454f4e012f21b", "0d07db3510c7f9c2ceab65444cb8fc8ec49197b2", "eb044760b6502431da6b6f3d5ad11aaab851a1ff", "928857c96ef837f43ec87135de69780f6667cc70", "afb353801ce723951f0d8f9ed4b5ff9b41615601", "4d22c000c12aaedadcf075736dfc998dea932f06", "3417673c59544fcd33820a0a583a7543c70ac595", "27ccf0cdf0c7a74640697dfb5d1cf85969a5da2e", "8ff0c2020bf42b97d82a275c98a923a0df3bce33", "1eadafc27372b33a73eca062438a58d4280fd3a1", "abc2e6431a7092fb11418b79ca1c41a76b811ea0", "558c587373e2ea44898f70de7858da71aa217b8d", "a13a4e4cc8f4744b40668fe7cca660ae0e88537d", "5c3d8cf726f17bbb326551253c810429d332d3f3", "16161051ee13dd3d836a39a280df822bf6442c84", "205e895e03969c96f3c482b0bd26308b16a12bd0", "4ed613b6f0427d3ec4cad6c51dcc451786812959", "d350e3753756b1c6946d5d9150626b2de4f7a8e4", "daf489ba1a9012ec2e5236fddfcc46f38cf848a9", "a360526696a2698ad31dfca4c529e098d2dbdbd1", "e74bddccc40e65b31081a1599cbe7385d5d3e1c0", "1e0a5ce5204f3f7503c39df6d200627cc331efe2", "9263ca6211ec39469f0daa8790ccaecbd5898423", "80eab89ff1c5c2cfc1ea62e2088cfc9b62de8d35", "de6ab8cd9d402c976082b707b1207c3ad49ae204", "d0631ba22add59684fff926d80d2e6948dfb7d7e", "789c76749a15614d97ac8f4ec18b3ce7d80a2d28", "0d75052f1d7350fa035a35566555ce7b65d1cd2f", "e42200a4f407f3a58119a2abdb3b9b218ec8ac7d", "45fccb72a1bc078ecb260c3e9871dda4edf37087", "673952e036b92617d56deac4166aea3064da7fed", "0041afaf2b17f1a33bd514db27b17ce34670fdb8", "108961c7366e36825ffed94ac9eab603e05b6bc6", "6c76bea8342ebba9957e5ddb5ffab9a953398a90", "e2b093c6ebe4352ba9a1b281c621b798aae8d71c", "e5abe63d687f927a0ac61e9ad62f88b355d89caf", "4706d61276b953eadeac572bd449cfa70d2e0b82", "3d6b71b359d5db96a69ca322a5336110d89fb10d", "4061524d5867325aab871ecf25ba58acd7872192", "9bd7f95a4c752a44e96d2205ceb6fcefe9232c8b", "96788880589a514c3ae9de29695c0127d6e76b8f", "bea0bb77c0d75c3d70fefc274bfbff93a3eff015", "4170882122b559fc39ab3eafd66babe2429ba858", "90fbeb4c871d3916c2b428645a1e1482f05826e1", "f43b60a33c585827bfa354d3d49fb148a1c26c3f", "15f85fe1d930bde435c2e04372e27e8c8c4ba19a", "f164313e63d5c6d0a5f4b55fd0ffcc25c436bc14", "bf92d7784a687b1aab20a0de2679498af641ab18", "aefb110f14dd8d59c5465c7d91bd8b34a7c69597", "05014b04223562c7c7485a1277552564d0ddc6de", "3c9ad25e91cace6ac93069480745d4578b7f29f5", "ae0514be12d200bd9fecf0d834bdcb30288c7a1e", "4dea9cce0825c0cdb1a4a28c4ab8416d6e3e047c", "5458ccf22bdea7197e28b433ef06d5225fb030a7", "31ffe7b6447221ade78c71c36e8e86279a8478b6", "2c7932c2096669113328a75d1ad1d1bfb8f86ad0", "e79847c3bf3ffefe9304e212d8dda7aaa29eaada", "0bcc4ccbe7b12166bb6e8669ab6b5c7edfe6294e", "cd36768795c696c990ff5c89be8d8b3b205858bd", "b2e0e08e4d4c722d0f54f5a124ca28a67d74ce3e", "8fde6363ee7e0a849f323a40fb2029da74056370", "c65cfc9d3568c586faf18611c4124f6b7c0c1a13", "81af86e3d343a40ce06a3927b6aa8c8853f6811a", "2050847bc7a1a0453891f03aeeb4643e360fde7d", "a427ee25ef515ddd9cf50b4cc3a7376f57d58926", "b7a9fa746f22aa543c1e682554a834329b17d1c2", "d6daef592b96866a61bbba0fe9c2d3015c3ebedf", "ad62c6e17bc39b4dec20d32f6ac667ae42d2c118", "c086fccde785d803ee2fab1979f70fd6f313d621", "56ffa7d906b08d02d6d5a12c7377a57e24ef3391", "086131159999d79adf6b31c1e604b18809e70ba8", "c5392bdc97e525403a38563ba19caef342879116", "b9c7b2b419b44def1b0b4da9f93fcc339fc20a41", "cca198ae698e7956992f2fb326c04965b2964a18", "bbab98fe736523919d17661055756a0fe6fe4e8d", "a59800f16ad02f550c600fff4179167bad0b8654", "7880138c9ec1f0f78b7c896a93179e9b38f44a47", "8e9288e4619ee50631b68cf248eb56f4fc97a5e8", "f10484fdcf67cfed25121d4162412e523fbdec95", "d78373de773c2271a10b89466fe1858c3cab677f", "57723da316c019628460daeaba143537d4dd1327", "b38c7a58b4c5298705b8f63dcb6a1c21ee297af8", "b2d13185695bdef08679d83d1c195f7014d5f4cf", "0f085f389a52e13586fe50f2dae49e105225303f", "d36bf3b5616e12e402dea79b8a645e445f67b675", "29732d196d199acdb9d5122207b4613bb3aedf8e", "9abc9e3cadbec9139b39dfddb0de6c08b7aaf2d0", "dc128d77a641312f358071edfd12f05988306465", "fc970d7694b1d2438dd101a146d2e4f29087963e", "1f284b24d4b937e326316e2b5bfde227ea2fcdc3", "410eb51d63a3d6196ca9cab812f3233f00ab5906", "5dc14823862ff1f07dec483d5b4860727055ea79", "29c6b06ac98dbdaf25e4cc9a05b4ab314923cccd", "20a2b3a0d6a8a5334425e7190cbbfcf5d02ce8e9", "a5da6a6d4243a89e974a6467cb5c6df6d914a946", "78749b58299ecebf100e2512872029f89878449b", "3ec77809aaa7bd30858a4274e3c28a2a0259b30c", "331a3487bc4a876ab1761bf0f67ead5170e1dc5c", "3f14b504c2b37a0e8119fbda0eff52efb2eb2461", "98ff7597197bfc7b4fe42575cf17d0fc42f501bd", "883006c0f76cf348a5f8339bfcb649a3e46e2690", "485c869aa24c3241dbaaa5d4f0b5f696ee4ef77c", "15912abb1fe1457bb358d2d2b0e586c1987b6e25", "e36ca54c2195fa4291b24ecc6ec0ba2d286992e6", "d5de20cca347d6c5e6f662292e4d52e765ff5cee", "3cfc097ba6badb4f89a9591b950e65b60782589b", "896366051fac2c6c97ad44a2ee9eebcf3ffd59cf", "3b2df7d70ecbe3d0d65d27801d159ddaa150bf42", "20133e8f14e5d3c4543a97ee99308b9d1532a52f", "214eb90d0386379972cded05e9f57b884edb1675", "919b1f80f818c2c1710a674536d4957890bbfd82", "9e8dd40aea9204ad670b312a46ba807bfc0c61ce", "f51771c6cd9061acc9c468e7b44d5d3b6c552b32", "aadf4b077880ae5eee5dd298ab9e79a1b0114555", "a6d621a5aae983a6996849db5e6bc63fe0a234af", "600c8fcef0480b7061574532861369c1c631de75", "e60ee0573a6b6f6bd1800d353993c74ce6121765", "568067d7232c753e182dbc1d7075364560ffc363", "922ea66a77fbe53f781c83d3553f8b3e406965e7", "25728e08b0ee482ee6ced79c74d4735bb5478e29", "141baee5b83ecbe7242b6f3b8e8b55a8f2e484a9", "09e3967a34cca8dc0f00c9ee7a476a96812a55e0", "ecb4b28c5ef9be94e2c34a55f42b40a54ff500fb", "ae03cfed10cb1dbd0328d70dc84789dd141ef1a3", "5ed66fb992bfefb070b5c39dc45b6e3ff5248c10", "53bb52eb910c3a0ac5dc7f379b1f3f7c29af529d", "67545a21b41ec6dd60376aff84bc0945cdb79590", "89d12f74c4195ec04c8bd6e16919e339b2fdcc63", "1a1f63cbd8465d1bbee9bca24124b52ea4ec2762", "f3f66fced81d87d0b83119e8561037de3c7dbdfe", "434ad689f9f8bc034fa8489f80f851686b8b449e", "9fbe2611b1e2a49199fdee96c2083da625ba57df", "75503aff70a61ff4810e85838a214be484a674ba", "31d60b2af2c0e172c1a6a124718e99075818c408", "3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd", "f442e57e13d1da68723d68cb68d7c78e1788cc7f", "403b3d0594989629c95e5bc5230d4ccb1691f255", "76229d9e68bca10f3876f351856d6911857be827", "f9329c2ab6d27864e498daed3709665522c1809a", "1c408790a7bc5cc8b0c2e23668ad326d0ccbebd4", "7cd3062284a9f93df05cb11161d16114be945a5b", "5c879f9e2e79d6c6af8d4c821575e73876240a83", "0c75db5e3c27bcb0d07311a950d0d25cb57c731e", "fc950b230a0189cc63b2e2295b2dc761d5b2270c", "326b9c8391e89f5bd032aebd1b65e925083c269b", "c5c6ec48ae98d86171360b19e3ec03738c712f53", "f06b015bb19bd3c39ac5b1e4320566f8d83a0c84", "35be5bea87c465c97127c64919d115e235d62e82", "604a4f7c0958c5cac017b853a7d0f5f5b4a4c509", "c1c2775e19d6fd2ad6616f69bda92ac8927106a2", "41df6b9618bd930d6bf2c4704ff123b5fa5dcb2a", "29fc5339e299b47c3d4f871974069a2971b4b8b6", "bab65e5a5e0768fbddfaa0fa85f9fe9a51d38b6c", "2baec98c19804bf19b480a9a0aa814078e28bb3d", "181979ca0f9db45da0d0e456b79165248d611486", "df7606059011e0f13f7e0d4cf02574343b21676c", "b2046c78d4e2f00a72ee9a76875746d2d3f47e1c", "07f07f9c324666a7d139735e4a06cae96b9d36ec", "85754413313949f2fce98bfbcb49a18e5897849a", "058237d4d9405f372bf6e1105def6cdf3f85a718", "5147c249aadf9dd20d24a025995e79f5d6e4e5f4", "560e0e58d0059259ddf86fcec1fa7975dee6a868", "97129acc201d6e86e21514ca9ed3fcc0877f1367", "e8f4ded98f5955aad114f55e7aca6b540599236b", "4f623e3821d14553b3b286e20910db9225fb723f", "af2d30fdb8c611dc5b883b90311d873e336fc534", "d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd", "a6b5ffb5b406abfda2509cae66cdcf56b4bb3837", "1b3e7caf4b456e3762a827aa623c3fb88ca0b1a0", "20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba", "a5f70e0cd7da2b2df05fadb356a24743f3cf459a", "ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda", "6bf88e29ac04d72297e6f8f2971c5b8579786e7f", "0133d1fe8a3138871075cd742c761a3de93a42ec", "3b557c4fd6775afc80c2cf7c8b16edde125b270e", "250b73ec5a4f78b7b4ea3aba65c27fc1352154d5", "747fddd7345b60da121fc13c5440a18039b912e6", "8f73af52d87c94d0bd43242462fd68d974eda331", "9af9a88c60d9e4b53e759823c439fc590a4b5bc5", "b59f441234d2d8f1765a20715e227376c7251cd7", "0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5", "38f7f3c72e582e116f6f079ec9ae738894785b96", "dd8a851f2a0c63bb97e33aaff1841695f601c863", "ee72673c0394d0fff2b3d8372d8a9401867b8e13", "1860b8f63ce501bd0dfa9e6f2debc080e88d9baa", "bb4be8e24d7b8ed56d81edec435b7b59bad96214", "19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9", "06aab105d55c88bd2baa058dc51fa54580746424", "2c203050a6cca0a0bff80e574bda16a8c46fe9c2", "4ba2f445fcbbad464f107b036c57aa807ac5c0c2", "09f58353e48780c707cf24a0074e4d353da18934", "5e7e055ef9ba6e8566a400a8b1c6d8f827099553", "c68ec931585847b37cde9f910f40b2091a662e83", "30fd7b1f8502b1c1d7a855946d99d2d5323ec973", "4377b03bbee1f2cf99950019a8d4111f8de9c34a", "e378ce25579f3676ca50c8f6454e92a886b9e4d7", "601655a17ca199ef674079482c9b37cdf8e094a9", "aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52", "32c9ebd2685f522821eddfc19c7c91fd6b3caf22", "6dac0abef00a9f22b04973e396053476a12fba64", "9d941a99e6578b41e4e32d57ece580c10d578b22", "5c812e8968b88c25d18a066f8a28c0421555d2c9", "88bee9733e96958444dc9e6bef191baba4fa6efa", "334d6c71b6bce8dfbd376c4203004bd4464c2099", "57fd229097e4822292d19329a17ceb013b2cb648", "54204e28af73c7aca073835a14afcc5d8f52a515", "e4aeaf1af68a40907fda752559e45dc7afc2de67", "1e07500b00fcd0f65cf30a11f9023f74fe8ce65c", "5e16f10f2d667d17c029622b9278b6b0a206d394", "7c8e0f3053e09da6d8f9a1812591a35bccd5c669", "3cd5b1d71c1d6a50fcc986589f2d0026c68d9803", "03ac1c694bc84a27621da6bfe73ea9f7210c6d45", "007abf56a59d186c0787abbb4f78a90bd48103c9", "59a6c9333c941faf2540979dcfcb5d503a49b91e", "4d6ad0c7b3cf74adb0507dc886993e603c863e8c", "1176c886afbd8685ecf0094450a02eb96b950f71", "1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d", "70341f61dfe2b92d8607814b52dfd0863a94310e", "29b86534d4b334b670914038c801987e18eb5532", "417c2fa930bb7078fdf10cb85c503bd5270b9dc2", "4e061a302816f5890a621eb278c6efa6e37d7e2f", "8c66378df977606d332fc3b0047989e890a6ac76", "945ef646679b6c575d3bbef9c6fc0a9629ac1b62", "f3ca2c43e8773b7062a8606286529c5bc9b3ce25", "950bf95da60fd4e77d5159254fed906d5ed5fbcb", "0343f9401b98de36be957a30209fef45dd684270", "8882d39edae556a351b6445e7324ec2c473cadb1", "24a20ebfe86859e0d91c2b44188f115b58ba8d9c", "31835472821c7e3090abb42e57c38f7043dc3636", "a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990", "2a612a7037646276ff98141d3e7abbc9c91fccb8", "ab734bac3994b00bf97ce22b9abc881ee8c12918", "60643bdab1c6261576e6610ea64ea0c0b200a28d", "432d8cba544bf7b09b0455561fea098177a85db1", "2e1415a814ae9abace5550e4893e13bd988c7ba1", "e00d391d7943561f5c7b772ab68e2bb6a85e64c4", "5a1255d65e8309131638b3eb94aad5c52ab3629a", "861a832b87b071a5d479186bbb2822f9ddbb67e4", "03adcf58d947a412f3904a79f2ab51cfdf0e838a", "7f533bd8f32525e2934a66a5b57d9143d7a89ee1", "1fcdc113a5df2f45a1f4b3249c041d942a3a730b", "344c0917c8d9e13c6b3546da8695332f86b57bd3", "a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb", "3bb6570d81685b769dc9e74b6e4958894087f3f1", "e9b6804cd56cadb9342ec2ce412aacba7afd0723", "06c956d4aac65752672ce4bd5a379f10a7fd6148", "58542eeef9317ffab9b155579256d11efb4610f2", "2983cf95743be82671a71528004036bd19172712", "4349f17ec319ac8b25c14c2ec8c35f374b958066", "aa0c30bd923774add6e2f27ac74acd197b9110f2", "d9e66b877b277d73f8876f537206395e71f58269", "2ed27364d8bb58990b814dd0d746d8ec419b8d94", "63cf5fc2ee05eb9c6613043f585dba48c5561192", "0d3b167b52e9f0bf509e3af003ea320e6070b665", "73e0ff21029d337af53e030ec269c1b7aaffc3b7", "1773d65c1dc566fd6128db65e907ac91b4583bed", "9ea73660fccc4da51c7bc6eb6eedabcce7b5cead", "5b01d4338734aefb16ee82c4c59763d3abc008e6", "07f31bef7a7035792e3791473b3c58d03928abbf", "0e21c9e5755c3dab6d8079d738d1188b03128a31", "34c1e9a6166f4732d1738db803467f7abc47ba87", "d0a6a700779ac8cb70d7bb95f9a5afdda60152d9", "0595d18e8d8c9fb7689f636341d8a55cc15b3e6a", "fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139", "1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0", "48906f609446afcdaacbe1d65770d7a6165a8eee", "809e5884cf26b71dc7abc56ac0bad40fb29c671c", "6080f26675e44f692dd722b61905af71c5260af8", "eed93d2e16b55142b3260d268c9e72099c53d5bc", "a955033ca6716bf9957b362b77092592461664b4", "f61c13c3f842ecd88defbb53e4fab52cb6cb7967", "ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee", "08903bf161a1e8dec29250a752ce9e2a508a711c", "59a35b63cf845ebf0ba31c290423e24eb822d245", "0486eb243d167ab4b197b682e9eff9684b273df4", "46e72046a9bb2d4982d60bcf5c63dbc622717f0f", "48320c6c156e7e25bfc04171b5ee6003de356a11", "95f990600abb9c8879e4f5f7cd03f3d696fcdec4", "b8f64a94f536b46ef34a0223272e02f9be785ef9", "a13a27e65c88b6cb4a414fd4f6bca780751a59db", "39d0de660e2116f32088ce07c3376759d0fdaff5", "8bebb26880274bdb840ebcca530caf26c393bf45", "c7c03324833ba262eeaada0349afa1b5990c1ea7", "6f22324fab61fbc5df1aac2c0c9c497e0a7db608", "ec90738b6de83748957ff7c8aeb3150b4c9b68bb", "970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3", "534159e498e9cc61ea10917347637a59af38142d", "b32631f456397462b3530757f3a73a2ccc362342", "e5954958314b2184d7c7017ef2b8e1be47da23e5", "ce6f459462ea9419ca5adcc549d1d10e616c0213", "9be653e1bc15ef487d7f93aad02f3c9552f3ee4a", "924fdb38c7186f216fd8844905fdad7db910544c", "b8dba0504d6b4b557d51a6cf4de5507141db60cf", "174f46eccb5852c1f979d8c386e3805f7942bace", "c903af0d69edacf8d1bff3bfd85b9470f6c4c243", "24cb375a998f4af278998f8dee1d33603057e525", "19808134b780b342e21f54b60095b181dfc7a600", "3774ffc9523b8f4a148d5e93eaae317dc18af3e6", "39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc", "9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74", "39d6f8b791995dc5989f817373391189d7ac478a", "ad6745dd793073f81abd1f3246ba4102046da022", "bb557f4af797cae9205d5c159f1e2fdfe2d8b096", "0252256fa23eceb54d9eea50c9fb5c775338d9ea", "76ce3d35d9370f0e2e27cfd29ea0941f1462895f", "47506951d2dc7c4bb4d2d33dd25b67a767e56680", "539085fb25edc3b9efee6577d709115ff8415015", "f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3", "500c096c3be8c6dc084cdbf1b24288926b2dfefc", "71e56f2aebeb3c4bb3687b104815e09bb4364102", "2be24e8a3f2b89bdaccd02521eff3b7bb917003e", "298c2be98370de8af538c06c957ce35d00e93af8", "9f2984081ef88c20d43b29788fdf732ceabd5d6a", "ed184fda0306079f2ee55a1ae60fbf675c8e11c6", "210b98394c3be96e7fd75d3eb11a391da1b3a6ca", "d3edbfe18610ce63f83db83f7fbc7634dde1eb40", "2609079d682998da2bc4315b55a29bafe4df414e", "39d6339a39151b5f88ec2d7acc38fe0618d71b5f", "e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69", "2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44", "3393459600368be2c4c9878a3f65a57dcc0c2cfa", "08b76e6923eea74ab0ed149811b3144fa21c7c73", "d4c7d1a7a03adb2338704d2be7467495f2eb6c7b", "0fdc3cbf92027cb1200f3f94927bef017d7325ae", "80d4cf7747abfae96328183dd1f84133023c2668", "ebeb0546efeab2be404c41a94f586c9107952bc3", "fd15e397629e0241642329fc8ee0b8cd6c6ac807", "8b19efa16a9e73125ab973429eb769d0ad5a8208", "36bb5cca0f6a75be8e66f58cba214b90982ee52f", "2af2b74c3462ccff3a6881ff7cf4f321b3242fa9", "5779e3e439c90d43648db107e848aeb954d3e347", "70f189798c8b9f2b31c8b5566a5cf3107050b349", "cdcfc75f54405c77478ab776eb407c598075d9f8", "563c940054e4b456661762c1ab858e6f730c3159", "cd01a0018f2b8f1211e8dfe311c28e32773c58dc", "9889596a98824bdf7e7c59b62e732c0b2d356c69", "12dfc8d4062b83a0b824b1676533482f14e4978c", "fa40bf7f7d16a72cb6e203e0ea438fc4c575396d", "4749503fe584b4747c1e371b7bc101dc083ed0c3", "c196cf87e475fa305f534f6a25fe676e1737294c", "3964caa0a1d788eb30365972880f83b71df1ab21", "41cfc9edbf36754746991c2a1e9a47c0d129d105", "877100f430b72c5d60de199603ab5c65f611ce17", "7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9", "8627f019882b024aef92e4eb9355c499c733e5b7", "d1bca67dd26d719b3e7a51acecd7c54c7b78b34a", "9d5ee493ee1b7955bbc71b13edc7f97efe53d743", "821382c28f4a2f047adcbe7e7b55e947de8f6e97", "77ad2727065cb3dc5c91975604af01c82ec5c9f6", "4e6ee936eb50dd032f7138702fa39b7c18ee8907", "15f795d436aaa9e77ccccb00b9df49bf0127f8b1", "04d3299b91413aef9b412deace3da92409cd6639", "359a4142f6a55a58a3e18628e3ee52c76744fcb0", "41c87d3342a85712a3591b6d49d99be8fc8d35d9", "eae625274767cb695fa2121ccdcb30828ffc9b66", "619eaaa60f0194d456591983a6f26b04cd9e9a52", "1fc5dad90c459d25a53d73125ea50094e28d6525", "c322706351370b598612dc1e73b8bee78e0e8a5e", "1394ca71fc52db972366602a6643dc3e65ee8726", "8df05de407b829abb357e230bead5407cabe7305", "48cdb6a640b4259c61c476fb529d7c176e8345a9", "12c7ecbfd714c160d2a6bb9cf03fa8b88e8da62b", "af370cbe392b7fb2b9f26476a7e063e0f4c46815", "d16968e5baac6d26b9cef5034f9d84bcc3ec627c", "d8b251cbcda6289bbeaff56692da963aa5a80cd2", "832377d50d133da3514ae3c51c0e6043ab856eea", "96b4124cf7626301ed3bb2d2b2233a490804e35e", "085c61ad31208717a6bc28059708ebdebbb1ca43", "90a2c7db91c3a2ad1249a4c9e6d7d872529cae6a", "9a9a888bcce37e582b8a5b5f12f662e487443e5c", "2fa241edb56734539c3b3487eda159e0b3e0f31c", "b769007cb6931464168f63ebb4571e46d8c804b7", "016dd886d5cb01c55a0204e2988274cf9417b564", "134dd3bb637b51c61fa9d2332f11e39efc0b359a", "b58852921f43412f5ed1616135270572b5862107", "28795f32b324eb3601e9a8c1ce93335691e120f3", "181eb2a54f70d9d5355e7b91c2000bb236e35c40", "0f322826b609c03f0d4eea735999fd37a4cabfbf", "490bd6dc220b42a706a919166dec7620a1c663f7", "0f5bf2a208d262aa0469bd3185f6e2e56acada81", "b8494e587377ee718fa205438e4a7e9dae64024c", "82a7ee86e3a8a0cf5a0447cabe94150e30b01f25", "8241008f9d3d5e866f648eb454db2054202121ef", "78dd8db25230f7001e86c94a6796b5e74f9d6e00", "1550c3835822843a02b2144cef8abc534441f5d4", "9a37ce9d206a1ea9241aa8405b50c8078fc8dd34", "5522073ebd53a6502cec9d716a77bb2c18aca593", "7b8ed16ca5c03d21b0b2e56d9ee5cc24d668625e", "384af919a685fbcb8dce37475a45cbf8dfe5c8f5", "0f3b3688af4e87b27ad38bf70aeffb64288bfe27", "8f690751e82bd3aa3a6b142309d3010ed6d577ca", "3765df816dc5a061bc261e190acc8bdd9d47bec0", "5cb8fe6b51813600d5b43a63ca4b8c1cb1237793", "6d8612f7856f569f5635ff07a6b94480a9c7c284", "82da8e52232df3124b7130a9dab90af9f165de90", "1a9a7913db0f726996d28e15ca6de59d36e54c4f", "2155739f578e33449546f45a0b4cf64dbd614025", "9f22e0749405dfc3e3211474b933aa7514722e4b", "f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e", "abfcafaa765433b8f5b8be7eae392a8daec54b8e", "4efaa2a1a14ba6e8bea779eae49d6220fc771f2a", "820e4727827646c79a9a5d862c510d26be5356f1", "c594231d4eefe6c46ade599c477a435fab64b1d7", "b29e60ddcabff5002c3ddec135ec94dd991d8d5a", "cb489395a7a89bc6299e78e75ac7c0207bcd39bb", "17635e22a73da3ff60a72715b7dd8837de6fee89", "30f84c48bdf2f6152075dd9651a761a84b2f2166", "2adfedf71aa84a7417d95e8c4fa2288dc995b41b", "a9fb2ec954dbb8e1ee6b3a33e0e5c06db2d89d3c", "f153cbec29d86a58b5f15231fd14e7037a210682", "b6ae677b26da039e0112e434d40baf7dd929a3ba", "bbab2c3d0ebc0957c5e962298ffd8c6d4bc25c5a", "6fee701352f0f5c4abea3e918ddcf078243253cc", "dc295e85e698af56cd115e5531b66e19f3b9e0ce", "de058103b26ea4f15074392a2eee62a3aa935b80", "f202c78e58d33a65c19183414ad0ee91be440d61", "75b20672a6290a8e2769ba0226d9187c0ccd5843", "e4433daf01a4e55ffca764c1e161f83552db081f", "fb3844521f9719e4904e5d3d7e1e549e5881b1f4", "4c863a15c4da0d0ccd20c5897a4e33fb771fe3eb", "dcc064b8bf7744801ae7dfe4cbfd11b7e5a5b673", "6cad008ad80081dc42752e813ee6924e3c174dc7", "4f5e5fea12c44a5be7107748320e6d66192b7acb", "c747c45c2fb3d678954bf1a16a3d9cc4dd4b8f01", "72b34e9536665f776b0f282ddb63120afa21c84e", "35410a58514cd5fd66d9c43d42e8222526170c1b", "596e414872debe1441b5e40216febe8788df9b35", "61d2057142a1e995ed4c8d337d0f557188eb8162", "21a763b9038dd75ed1c59bb2c9bc92cff805c34e", "cd0f7b3f545cc4bfa5e2d7185789e8ead7e3cee2", "2b8667df1a0332386d8d799fbac0327496ce02c9", "b05ac3b2286c30fcab385f682b3519a823857112", "e6d50d65a87425e7f0b4ec08c53d200f12f75590", "a23d2ce6ad9640f24c63dc7a42c45faf559f4039", "0f9683ca40a2257ae602eb781a57a4e5d5c53b03", "147e699946e8c54d2176b4d868db03dd1c7bdb8f", "dafe65b9ca65c67b3a655755ca26e0ffef049df1", "f69a289a3bc6b61c612ba6ff4033f122100daccb", "78a9bebc5a9a3f10017cac4475fbc970f3a3ed35", "3369692338841f14ce032fc5d0b5b4fe7cc79f1a", "c3de7c38493cfe67654411d77f47069cfa7b077b", "48ffb705e94dde426b7241108ca915a5ecab6414", "420346d7231608ad845fd7f06c8134cc1e150821", "55b74bc00871b43d69f8c80adb5af6e0d54d24e0", "5357bdaf7c54619016bdb7ebfa991a65a6cc8353", "df8a4d17bd48cd9c9f1e74396fa95cdf3381012b", "841ab6951767498cc49f97557499b54b3aee4992", "b6ef46621d8660eb53836202fa58f04fa20adfd7", "658b5690828c6d3f745d47828867a6cc33370b18", "d787f691af05a56eb0e91437fc6b1dfe5fbccbb9", "f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464", "7c79d3a40c1a1f5b9692ed23396b0f13453c225c", "36436beaff15390381b04cbe543c73dcef9872ac", "8c5dcd5a0b3c9940e544993327eab6425ce645d5", "46aba81002e24209ca1962e305e47ef7a1821b32", "524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a", "c631a31be2c793d398175ceef7daff1848bb6408", "ecf4690ddd3ad26f9cd1749d16ef1aa06d391f92", "9730a140831f51a6640236e42059b948c5466d0c", "00858aa4fe471f0412ca869382a2f6c5ac907f50", "38fb67d26d27653f7aa538a1f0237e281d5a4e6d", "b97ed8715ef7b93c3540c24245e91a2685708529", "7f3c6bf191a8633d10fad32e23fa06a3c925ffee", "05904c87cb1d0b1f17fcb018fa0344c020694f36", "faf5583063682e70dedc4466ac0f74eeb63169e7", "8a63b2fb15c1ed01bb1c9c42561219e5162f3851", "75308067ddd3c53721430d7984295838c81d4106", "ec89c5f2f5acce23b0d05736cd9f32d4ca6dc382", "a84f80ca4e29b49cab1035ed8c7877caf2dbe914", "5ad65c6474c135a6c15e7127d8bb91de8c8a55a1", "0c9f715835bee028a358701cf5a73ecbc3a7e242", "db85195e171f7b75e4e6f99ed3029d31ee557e13", "54568bdce3405ffbe2a6f5820711f966e2d2faf3", "90f0646c0801f1dad43d2374d1145be8e005bdbf", "bf8bcda2e4d04b6bd6f5e70622e972baf525a1c7", "0f2ffd582674bd856247bc5482d85e6db3b49b8f", "fc38d7e2dc7006383e5b86b922c815251ac55631", "7934c91f09e5bf819519d4348aafdda7c99267bb", "09a106feed520651d785fd8a2df26910f5928f2e", "1582c29d0f752f95a12f5a8ce08d5e5c752f6822", "9d0bf3b351fb4d80cee5168af8367c5f6c8b2f3a", "ef363588ceb966d862b4347051d20b8e1b8834f1", "725a45ad75caf0112d649253f8a69793b1f00e80", "a7a7d4be51d0918cbec78d84dd1f7363535fd60c", "645766177de2ef61619572bc09ce239c232d7d5c", "b0b4ce1962ad6732965aa7f4b3dd1bfd32f0ae5c", "eb1208a7f535de6c6180e4dbeb6eef2a27500c52", "15eafc361edebe3da9ff8debaae16f487fbf17f2", "0ec17d929f62660fb3d1bcdd791f9639034f5344", "e1f794bacd01eecb623bead652bdc9f86e17944e", "b317d03d82c22f52dbd79a3a19b1384aa53a3925", "8304b011f09655927010d2893710d17dd6ea4f39", "09eaa332ddcd036b0f0950bbdb3624072f105a3b", "8ff3c7b46ab36f1d01e96681baf512859cc80a4d", "e39af9fb267c9deb81f9c73bbd71f5674b4358c0", "49659fb64b1d47fdd569e41a8a6da6aa76612903", "20388099cc415c772926e47bcbbe554e133343d1", "2ba64deeb3e170e4776e2d2704771019cf9c8639", "284bf12324805f23b920bec0174be003c248cc9b", "0b74a2dead1ff07447ef49cd2a861a77d5b5de64", "7056a051e0589ab6aa299c7d2a31588800b8c93e", "8575adafc04a7915bd71c3733e379577da0c4406", "0956a3c628959afcf870f5d7ec581160a4aa5221", "3df8ca618c05514be39a4cde3e68988e73ae6f50", "6af98f9843ba629ae1b0347e8b8d81a263f8d7f2", "f152fcfbca25a19e9984fcf264cd7f0e34a1e236", "814d091c973ff6033a83d4e44ab3b6a88cc1cb66", "0a391c4d7aafa73324549f212cf28640ed471a81", "4c5b56f0da26c745c7375308607beab08d13460d", "73bbbfac7b144f835840fe7f7b5139283bf4f3f1", "519b69f50689cf0c702c8432282d98054095cec4", "048eb50c398fa01bd15329945113341102d96454", "a43c7b7f702c52022549a3533b5a487a1bfa3812", "83ca4cca9b28ae58f461b5a192e08dffdc1c76f3", "f16921c1c6e8bce89bce7679cbd824d65b494e4d", "f1ec3752535e0aa6aafe3930974a22250e652ca1", "a3a6e3cadfed3c0a520e4417fc27da561324fbc6", "c0d1d9a585ef961f1c8e6a1e922822811181615c", "7ffe0284db4581e814cdcdba48236e63c50a7d54", "30c96cc041bafa4f480b7b1eb5c45999701fe066", "b73f43a34af3f5ebac0a88066d8bd2eb39873be3", "47412bd10b4d40edf77d8976ae37081493ecb601", "8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11", "00d14af37bc75b6477b4846f6ab561cdc89c96a2", "1df1391795000c8085f81316043f0a0adca87379", "8f2e594f55ca1b1675d8bfef25922c97109cb599", "0f349677a24dce888851dcf44f5c886d9f4681ec", "dcc1512561b342c003b489f9235c0fca527ac0b0", "f07e54347b9c2c9ecc0878f1cc1d7fd8ac7e2c58", "32610513986d27c9e3c3045e4f5e04f8b47c9082", "42b0e7f924dc96df281207d5b6560436c61fbaeb", "adc71e6925f1879f902173e659f7f63caf19c97d", "ac5d9753a53b0d69308596908032f85b416c0056", "6898b0934d2bc34acc61a3c63fbb20337d7b9a95", "4b3db55f583cdf3fc396d83fe9bc8439d8f5d294", "1f5cea8440403bb06ce4868a0d7123b12e73dbd9", "e030697c19dd1919dbdd889b69df7ab002a8af19", "fd4f9955ec28b63443039cb9d4e15bae796defe4", "188951263d3140b3b5f5579e7a745317356e75ce", "1001f69c8f5f265e141a471ebf74096eac289878", "753119d52d611a0d08a2d43601719325f89fbbc2", "0da4c3d898ca2fff9e549d18f513f4898e960aca", "0bf2765d431c16de7b8f9c644684e69fa52598eb", "44fbbd3def64d52c956277628a89aba77b24686b", "08e23683a4e876d41490f46acb8adb886e3a4d70", "27fb07897db51ce23db4ef93e2621717ee1db64c", "11c7e7010da7a93287e09b71f920237d0e5aac02", "16647dc1bc87ba1e7b8bcd7e1ea8ccebcfe20fa5", "0bf675632ef3f5ea2cbb0d9a7895e8982c988270", "2be9c1d61092ec315f068bb30f4651cf4f3c7658", "54b16b233e6130354e7d3f0d001cc5491f85e998", "8c7284a0958c31f57b0558d3951d4486379ffacc", "40757d94d6ef33555fc940d556ebfb0d32410fbb", "8ffd5f01c592cacc2cfde8486f934f6b2c9f95b0", "e7cac91da51b78eb4a28e194d3f599f95742e2a2", "042e83c87d9cd16eb2309d08b71ad955fc8a65d1", "d5eab901316fec455161994f15da6513198e089b", "04b22b008669fa981602c7723b44cb4a5cb2d480", "9b68fc7b3f395ba1df39d1304c36365424c73819", "c010c66620a57b14f0f1b0f7cbc0173d2a844c0e", "887a035bf946b44d218c1c13f0046197bc35b83e", "8c8284461633083764e01fe720a186267504990f", "67ad519dee36c4368163c1617f5018fee4aa30e8", "21967faefa55857c6a09f9fe52a10a394757d59c", "760ae5b075a59d6b448115b0de549f29a58d7f74", "1aa0b335906e91cc026741e3523b088677755762", "926ca7ce14332f9f848c28565d0f2f9a2d1e35a8", "295266d09fde8f85e6e577b5181cbc73a1594b6b", "c7c0feedf979e7cc9139926e89c44e4189c585f5", "6ed82fcb566cf73e01b58d95218204e276f28272", "7b4d985d03ebf8465757877f0eeaea00fa77676b", "e896b084a247213f48b703c98f2ec6f55a02a2f5", "ec4af4a6e89d61c05dcdf89f7f5d0a404bed4027", "fa11590fea86049fff1eb412642753422738c584", "340716ba8c6ab315a4253cb3750c74aca54dc3aa", "68ba19afe924699b4a0c84af91c05deb5b03e3bd", "7da9464dbae52c8bda13461a4f44420c333b0342", "ec5f89e822d9fcbc7b7422dc401478fc29f9c02d", "3c90f2603ef99222697b76d7ab123f513a1f4baa", "6a536aa4ecd6359d54a34aca7eff828e4df02730", "00ba9f2005b750efddd9f883acb03201342b9288", "ee267e831aba3a2ead7ce6109b48afd41a30323f", "7869d8b9899226132d410ad6d409746bafe58f77", "29a6cbf089a8d916b563e02480a1844909754bcf", "11bfc54a64ca69786323551bbf88b85b216ae486", "7d4a04c03b73d34c86f5d06cbb88cca4287d8b37", "790ad3255083ac475185d9de8159ae3cf2e0068b", "318b52b1f37669c24415f4aab6266c72a3b255fe", "a66373beaad40fb5a8e2e1b42c5a2213b166a55c", "2fb9363fc33568c21e696e82c6869170016335ac", "95bfeca1a19479dfa4d76b3b7211be5462174ec1", "a6bd679c8a9346a39a003f536f36b7f77c0e09df", "eea0640261e2d9bb6b851a519ef1a036093ec04f", "8e46ad44e47d323c8cbede0fb9bad5614369a43d", "e1eca56ced4fd2a6a3048ba7240f0fe1991ba45e", "defcfed9c43bdf8a4388daade4899ef9d3345458", "f8af9978e0f722d38191e5780fc769dda8a60158", "52f71cc9c312aa845867ad1695c25a6d1d94ba0e", "d5440779ca69a2f010e57250f53a9be0116305e3", "ec6855acd0871d3e000872a5dd89db97c1554e18", "6e08cc6e871eb7b19f4a540714c4f21f0fdbb86c", "75e0d77d50dc0fe492f2c58c1ca86c5e21b2511c", "247232ab9eabb4f2480dd70557a1ee89afed4f20", "72edfb91e4b3d42547591be9e8c6eb07e7190499", "131059ea24073d08de0bd153f9caddc123911e51", "7c90dc1b0eb50900e38e796b5e18d50fb83571ba", "c02cc6af3cc93e86e86fb66412212babda8fb858", "adc0b5d9f010f8b7d9900fcb1703c3882e340d65", "e4485930357db8248543eb78ce3bc9f32050694e", "28858a6e956d712331986b31d1646d6b497ff1a9", "963cd7dbac6fdf518b1c082a5b0a6c151c7b97fa", "116888b8f08419f027f5047f0ff1557b16f69d5a", "12e5ff3d6771d725f09bb0b2f14d17a64d4c1c25", "d93a1f71adb7c2e641af966bcac5e594e0b3723e", "5ce035891b920e4728a50af7e4afb54e088f5183", "e681120f3eac3ea412a306097d1818c2e0d7e6bd", "063f0e6afe13df9913617dbc2230ad4263a595bc", "d0e1ad4f3f608124cd3efc2d5bd01b421ffc3274", "ddabb7494b5b6e87c279912071840f105c7a4d15", "39742f9b3a9f7adefbe936de68249148576b90da", "57a903864f1a64160457cb9ff172aa4c865b6a9a", "cffc94574c8796cbd8234422a979e57e67eca7b5", "f86c65bc2753ae71826a0dafbf46a75d22fb5b5b", "6f5486c415a6aae48fbbc546358993d551ac1cd4", "6746292c46975ba575a48c2b05b09ab056c26967", "c43490eb0a3ce18fb2326ef1d0828664b60e73e2", "42b0124c382ee7a3b56c2063e769aae94d7eda7b", "3584b132d652e60056c2cc63f11705131eaf3120", "5ab2c97ada652ff8f641e1b30cc27050c0ffa7e0", "8d6344658fa9673b1f4ac0d0bad53617ee127aaa", "69dc87575b56ba7f60fa24bdd4fceabeeaf39a80", "ac5b3e24a7dd2970c323ca7679625a7d29602480", "2b0e1a62d7168df5f29e2e9c7fc72ae43c39fdb2", "4c0ce0ed9cc92115874be4397f6240769d3ed84f", "42e793b1dd6669b74ad106071c432aa5015b8631", "f110f7be74261469fe9b0cc5a3b4ef35e2092d5b", "4b3d08514df70aa04961343d355b0a242fa81519", "1e9c3d0d87e09ea359ce1e31114b677d627bf9e7", "18d8c54c1977f41b7ed71c1eeebf162298323c6f", "58cbd5a31e92cff29e29e8b25ee79f30ff4e6d4b", "22ded1d36fdf941584a2dccb8ce981085e991116", "a01e3cd6072535838493005107530f12f4d7bae6", "2c107cd3e9ee1efdabcff8972624c0456ab419b3", "7484911e00afec5c08e7b83f3a1259d60035d77f", "dca423a36ecf6bccb13208d78a18fb5bc78ab339", "b2f4871cf9f61c44b16c733369d8730e90d9cc0d", "096e68f8d632f4363056d54a7de9c59d66b806d8", "b370eb9839be558e7db8390ce342312bd4835be9", "3da9a9091cfa8f4bf625829faf7a4c35a8fe91e0", "f4f616115bcc049b52c4e6e8aca9e08837730951", "f26d34d8a8d082ce2c81937f61c28f3769c38372", "79335495e54446541a3655d145911beba7c29d7d", "dd0be14c30714c77421dfe6cba31ed0b523434ae", "b47ea4d5b0040d85181925bda74da4ab5303768f", "e5d27e52fafde2b09ae6568fc6bde28468f5517e", "000b27b8725432580ef9d5b9c5402fc7b76fd68b", "97ba2eac9f7e6567b228ebb2c18163933ae3f05c", "293d371d585d13159e53df703f724165704c9329", "3a53bad58f8467092477857ff9c2ae904d7108d2", "6c54261f601c8a569149b77d32efe6c58f2e4a2e", "044da4715e439b4f91cee8eec55299e30a615c56", "8b9f529700a93a2ff6e227c76a1333883a1f6213", "22d5ec0e9756761b686f85473890a6f6675b524a", "081456e22734a2cdef442345f80182e84d1c6124", "8e42fc867827a10aad2f425fdd06d83d6462a3a6", "08e3a0f80f10fc40cc1c043cbc4c873a76a6f6e8", "68245f308f8049dc40f146e296d6e6a6bdba1ff4", "794cf037dac115755cd15295d8c5fc1c00242548", "dbaa466fa6b9c107307fdfb60a9a0c74db0dab38", "137457bbf46009b25d7f6d853083b6da02bfd6b9", "a68c07cb446f63fa6b48eda04c93392219c09700", "768ea76f9690b74bff51b6c7bada3994681f79bc", "feb4367aafc60159c8dedcaba2d5a66fdd64066c", "fbc53ab5697ee6f4f270153dbdee2d93cfda7b5f", "376ea595a6ff5b876367654833de1e1778bacd1e", "776c5e37eecd26049ae31f56b3249c390e25e4e9", "7e3693fffef8d83ac109309a77f2545d32c10fc3", "31470cf8fda53c4460de4373e5ac4544236c44af", "56852a56dd830a6ee3882773c453025ddec652e2", "4e39951c2f8b4600239dec7e10b7ee1ba3a000dd", "09232b786e009655c5e03d2b3fcd7b40d75382bf", "0b2cd813a6e4eede24a8e603f361fddf82e67757", "b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4", "d81253d750f2c204899e71fd68ad60680f9c8d57", "6b6946ce943da5ba4bf6471609d3355cadec172e", "430482d92007a3eec7009a2603aa5c1f2e63f661", "0c98defb5a83ea5dc5d90538d1cc8c4b6267a1cb", "fed7ee7152b9477c75251a133bb7e26679cb3dba", "1f1339196b0c65126145e0fdfdd88a4e197d5896", "248291285074203eb9ee8e0b8b517ac4ce7dc4aa", "acfecef9e56ff36455aed13f8e6be1a79b42f20f", "368e7f24c43f5d628d938dd453127917b6cbefda", "74113bb67eef4cfa28ebfa8bd38a614c82bdfdea", "127c229a3306bfc8170b84b12316f4a8024cc7ab", "bbf534b8ee9455b8e492a252bef26f9293d4f91a", "2e9dc528c023a6634a51d5a74e95e5f432da9aaa", "2dd409b2eff010631c1e6f3d92b873292064afa7", "941fe72a1aa8ee31ab5d15fc035ef856ce032b74", "823a47273e0e6101be67858f5c5f08e235f2d58a", "9fbc0135e76b0fd972517e06e833593ecf6ac49a", "6eb8e193687c16f0edc3742d3549ad175ef648d1", "9ab4c53832c521276c766afd35e943aec3b2a4cc", "74032e526edb45bc6c79cb5576e69486e72a316d", "52db20e91a46e68d7feda4b9d2755312f423b5b2", "3caebf3075e52483c7a7179b3491882af0aaaa37", "2d532fd0636fd49dd893c9dff7fe615f974ec826", "cdbb354904bc05129fa1ded42a4f0c793dbb3de1", "fe35639349a87808481e64f9cbea065339063154", "a6afb698b19faa376fe9e04e63b35668bb608f3f", "ea5645229264b0925708a09994555756498316a1", "c69ea9367e1244bfa5d3fc290b8a33be3abd8c24", "9cf07922cf91c4aea66c8d72606ca444f4607cc6", "343b07fc60e2bf9c2a2c8da7d46cee1c3bb85cf6", "6e80caed3f2ac86db775bd5e7d64925b00f1a0ca", "9bfe2732a905cb0aab370d1146a29b9d4129321d", "0ad9448aa6cd539fa55a0c6054558576cea3a000", "8747cc07e564e567ed0e8de038a222782206d986", "9fb1bd7d98a2fa79e1b9cb21b865ec7af0c1283f", "32cde4e3d7e225cd0f98f53e54295140df9600bf", "d11cd0c7484094a5c778d29ad9f73783613a2f6b", "73a4fe5072a30c132e8a0a18384caae4c112f198", "69188668dd6fe2075212a085bb63b5651f06704d", "57a14a65e8ae15176c9afae874854e8b0f23dca7", "cb9057a47f6d3367a6756507ceb1b1f9b596eb7a", "a2fce1c551a3c3b1cac16a96f86a59cd7fbd4c80", "fb35a3dadbe6d9a1823eb12e33fccf9a3db3c2a2", "f524b1aac4f2a29dab45d7e8726517798dbc9782", "c291d95ced5f5039f62d29db25ab094ef61e3df5", "2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7", "dbebd79aa29a078bfb7a8044d7a56acb24f8354c", "bfb7619312e0d961b979fdcd7a327776a1fb2e26", "79d3e7321e50be745bef92ba1405b486bd1f133d", "20eb57978ec863e031e0960c6799d756a041d60a", "1f7cf2df2fa7719c9db3fe57a0f01d65f08a9a8f", "082a8642455b9a5cfb27c07cf9969106f8a7bf3c", "0cff123a31dcc115377ecca6ba137bebca909ff8", "06c333fc146d0a87f591c82a1f22925ccef378b1", "66cc90ea586c914e6a3b50fe703f4379d530fad7", "86bb3f1189095547defc95d0f3cebd21ea47e36d", "60e7831264573be83d9e34a0cf4fc9780a04495e", "806f466034e0c3e609e672559e23d5d8bea6fe3d", "b99957df60ee58e38ad71ed70b9973c22d5c26cc", "f614f9ba33554cfd1a474be03520319b51651a35", "34351420203bae3f7be1c2866e63f34410f68099", "523b2cbc48decfabffb66ecaeced4fe6a6f2ac78", "95e83661648ba6bf2f0fbbf436bc8304c3cf016f", "4eb22856671b9340e5ae532a021be62b9d31c9bc", "a2505774d5654685c6d899760759520b339e6c1e", "c2fe8856caa3dfa282b2c4c11c5c5dcb3b57e14e", "02e05ad42dbe99257eee1bff3e28feaa005e5924", "68150d92e2ca3141ff3f4ab3d770e07f6ca13961", "1802aebb98424af6fa8f3d4dc024da2f1d3ea1e5", "6e7c2f13bc2cf5547f4d8a845dc115108e52b27a", "f8ca23fdb2c9f62a0492147f1f015579049ad3cc", "42e640fc7d37c51b157e7007117eacb78d7789a9", "f27bdc4f7ec2006425f999055df071d64640836e", "a625ea5942e68364ceaecaccd7e5cb85b7aedfbb", "2670c4b556264605c32326f49ab4a8b4e83ab57f", "f883739c699e33937c88bd510be37cb16a23b79d", "6a55d6db1b31f44c9bb37b070fbf7c8f64a31f13", "87bdafbcf3569c06eef4a397beffc451f5101f94", "dd6826e9520a6e72bcd24d1bdb930e78c1083b31", "b8caf1b1bc3d7a26a91574b493c502d2128791f6", "3c1d87a9d7cc6599ba235884e25e5030941a00a0", "d377e648734f429ae50c889c43b7b2e9c5ca2d66", "b97a155bdd86491c8d32f02d6dfe5b73aaef4549", "421387011b5cdd2cb4a1fdf04728d350741a0ac1", "338d4ea0813c668d6e43eb025ea580fbd76bec8a", "8eb2e7c9017b4a110978a1bb504accbc7b9ba211", "05bba1f1626f02ef4ca497090b4a04d47f36ebb6", "183cf20917780d9f9c6fbeae3fd15afddda7eff0", "48d27876c99241eb23305c90bee91ee1e7316352", "3373ca46fa2c19112aebd772983ce70183ac1690", "5d04bd7104f08f7fb91967613ffc519c27641e99", "ee3a905ec8cd2e62dc642fad33d6f5f8516968a8", "eeb16417c905887bba26b680a68d05bbc758bcf0", "767936728b07238bbf38661fc3c2000d0c17b598", "10f66f6550d74b817a3fdcef7fdeba13ccdba51c", "d3a3d15a32644beffaac4322b9f165ed51cfd99b", "8ea8cdee6f62751d87339f821d2b2a094ab4b260", "a38dd439209b0913b14b1c3c71143457d8cf9b78", "280bc9751593897091015aaf2cab39805768b463", "539ffd51f18404e1ef83371488cf5a27cd16d064", "8e2bd1192b60cdb75c99234ccbd50ca920a47d00", "1d5aad4f7fae6d414ffb212cec1f7ac876de48bf", "b941d4a85be783a6883b7d41c1afa7a9db451831", "20eabf10e9591443de95b726d90cda8efa7e53bb", "0cdb49142f742f5edb293eb9261f8243aee36e12", "9d66de2a59ec20ca00a618481498a5320ad38481", "566038a3c2867894a08125efe41ef0a40824a090", "7813d405450013bbdb0b3a917319d5964a89484a", "3ff79cf6df1937949cc9bc522041a9a39d314d83", "9ed4ad41cbad645e7109e146ef6df73f774cd75d", "b98e7a8f605c21e25ac5e32bfb1851a01f30081b", "3d89f9b4da3d6fb1fdb33dea7592b5992069a096", "4e8c608fc4b8198f13f8a68b9c1a0780f6f50105", "ce8db0fe11e7c96d08de561506f9f8f399dabbb2", "2836d68c86f29bb87537ea6066d508fde838ad71", "845f45f8412905137bf4e46a0d434f5856cd3aec", "dac34b590adddef2fc31f26e2aeb0059115d07a1", "d50a40f2d24363809a9ac57cf7fbb630644af0e5", "13141284f1a7e1fe255f5c2b22c09e32f0a4d465", "0b8c92463f8f5087696681fb62dad003c308ebe2", "ccfebdf7917cb50b5fcd56fb837f841a2246a149", "518439ba2895c84ba686db5b83674c440e637c0b", "472ba8dd4ec72b34e85e733bccebb115811fd726", "86afb1e38a96f2ac00e792ef353a971fd13c8474", "0b242d5123f79defd5f775d49d8a7047ad3153bc", "e293a31260cf20996d12d14b8f29a9d4d99c4642", "78d645d5b426247e9c8f359694080186681f57db", "b3cb91a08be4117d6efe57251061b62417867de9", "2a35d20b2c0a045ea84723f328321c18be6f555c", "19dd371e1649ab55a46f4b98890d6937a411ec5d", "ac12a36330248eaddadd3e6e75b909e023c7674a", "cc9d068cf6c4a30da82fd6350a348467cb5086d4", "31f905d40a4ac3c16c91d5be8427762fa91277f1", "5c19c4c6a663fe185a739a5f50cef6a12a4635a1", "91b1a59b9e0e7f4db0828bf36654b84ba53b0557", "d77f18917a58e7d4598d31af4e7be2762d858370", "bd8f3fef958ebed5576792078f84c43999b1b207", "c51fbd2574e488e486483e39702a3d7754cc769b", "b7b461f82c911f2596b310e2b18dd0da1d5d4491", "6b089627a4ea24bff193611e68390d1a4c3b3644", "75e7fa7290b9b740559725b9c59df0d457523ee3", "518a3ce2a290352afea22027b64bf3950bffc65a", "14b87359f6874ff9b8ee234b18b418e57e75b762", "0e2d956790d3b8ab18cee8df6c949504ee78ad42", "34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c", "69eb6c91788e7c359ddd3500d01fb73433ce2e65", "fff0a848b57361e1e99548c95fbc2ec9ae00ce32", "ec40df721a80c62d4a768fe29b58d86b1a07f435", "c03f48e211ac81c3867c0e787bea3192fcfe323e", "8959e0e9a24c0fe79f3fd3acca9d139edc0abcfd", "a29a22878e1881d6cbf6acff2d0b209c8d3f778b", "8913a5b7ed91c5f6dec95349fbc6919deee4fc75", "a0061dae94d916f60a5a5373088f665a1b54f673", "439ca6ded75dffa5ddea203dde5e621dc4a88c3e", "9c59bb28054eee783a40b467c82f38021c19ff3e", "5fc97d6cb5af21ed196e44f22cee31ce8c51ef13", "1584edf8106e8f697f19b726e011b9717de0e4db", "2f13dd8c82f8efb25057de1517746373e05b04c4", "b97f694c2a111b5b1724eefd63c8d64c8e19f6c9", "6fa7a1c8a858157deee3b582099e5e234798bb4a", "72da7e3cf1136dd0c916f9e966937da0e26c64b6", "54948ee407b5d32da4b2eee377cc44f20c3a7e0c", "77869f274d4be4d4b4c438dbe7dff4baed521bd8", "108b2581e07c6b7ca235717c749d45a1fa15bb24", "1eb9c859ff7537182a25556635954bcd11830822", "62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4", "3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3", "3e9ab40e6e23f09d16c852b74d40264067ac6abc", "4526992d4de4da2c5fae7a5ceaad6b65441adf9d", "0e4fa61871755b5548a5c970c8103f7b2ada24f3", "02e133aacde6d0977bca01ffe971c79097097b7f", "85ccf2c9627a988ebab7032d0ec2d76ec7832c98", "780c8a795baca1ba4cb4956cded877dd3d1ca313", "0a511058edae582e8327e8b9d469588c25152dc6", "982d4f1dee188f662a4b5616a045d69fc5c21b54", "81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5", "86fa086d02f424705bbea53943390f009191740a", "a35ed55dc330d470be2f610f4822f5152fcac4e1", "3ba74755c530347f14ec8261996dd9eae896e383", "7fc3442c8b4c96300ad3e860ee0310edb086de94", "f6f06be05981689b94809130e251f9e4bf932660", "15136c2f94fd29fc1cb6bedc8c1831b7002930a6", "b306bd9b485c6a6c1e4550beb1910ed9b6585359", "dc3dc18b6831c867a8d65da130a9ff147a736745", "467b602a67cfd7c347fe7ce74c02b38c4bb1f332", "9887ab220254859ffc7354d5189083a87c9bca6e", "40b10e330a5511a6a45f42c8b86da222504c717f", "0034e37a0faf0f71395245b266aacbf5412f190a", "23120f9b39e59bbac4438bf4a8a7889431ae8adb", "e20e2db743e8db1ff61279f4fda32bf8cf381f8e", "9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534", "474b461cd12c6d1a2fbd67184362631681defa9e", "7783095a565094ae5b3dccf082d504ddd7255a5c", "fff31548617f208cd5ae5c32917afd48abc4ff6a", "9806d3dc7805dd8c9c20d7222c915fc4beee7099", "15cf7bdc36ec901596c56d04c934596cf7b43115", "b5857b5bd6cb72508a166304f909ddc94afe53e3", "5dd57b7e0e82a33420c054da7ea3f435d49e910e", "4180978dbcd09162d166f7449136cb0b320adf1f", "934efd61b20f5b8b151a2df7cd373f0b387c02b0", "ae425a2654a1064c2eda29b08a492c8d5aab27a2", "01c4cf9c7c08f0ad3f386d88725da564f3c54679", "0faf441a1ef1e788fb9ccd20484b104a1fa95ee8", "b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0", "24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9", "4b9b30066a05bdeb0e05025402668499ebf99a6b", "6bfb0f8dd1a2c0b44347f09006dc991b8a08559c", "259706f1fd85e2e900e757d2656ca289363e74aa", "66a2c229ac82e38f1b7c77a786d8cf0d7e369598", "1da5fc63d66fbf750b0e15c5ef6d4274ca73cca1", "307a810d1bf6f747b1bd697a8a642afbd649613d", "edb5813a32ce1167feb263ca2803d0ae934d902c", "7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d", "8798d2243e852be5285948a93abdef65751ccc47", "1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d", "27dafedccd7b049e87efed72cabaa32ec00fdd45", "68c17aa1ecbff0787709be74d1d98d9efd78f410", "610a4451423ad7f82916c736cd8adb86a5a64c59", "88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5", "3b75681f0162752865d85befd8b15e7d954ebfe6", "cfbb2d32586b58f5681e459afd236380acd86e28", "78f08685d44b6c6f82983d9b0f9c6ac2f7203a5e", "c4d0d09115a0df856cdb389fbccb20f62b07b14e", "fdd19fee07f2404952e629cc7f7ffaac14febe01", "370e0d9b89518a6b317a9f54f18d5398895a7046", "a961f1234e963a7945fed70197015678149b37d8", "660c99ac408b535bb0468ab3708d0d1d5db30180", "e69261094b118eb52ab370ab4d0c7158f51846e4", "c444c4dab97dd6d6696f56c1cacda051dde60448", "fe108803ee97badfa2a4abb80f27fa86afd9aad9", "3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e", "be4faea0971ef74096ec9800750648b7601dda65", "d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea", "a92c207031b0778572bf41803dba1a21076e128b", "f19ab817dd1ef64ee94e94689b0daae0f686e849", "553ec63f804e578edf221ab642a1f05612657c22", "5de9670f72d10682bf2cb3156988346257e0489f", "4512b87d68458d9ba0956c0f74b60371b6c69df4", "3ede3ed28329bf48fbd06438a69c4f855bef003f", "28b9d92baea72ec665c54d9d32743cf7bc0912a7", "69adbfa7b0b886caac15ebe53b89adce390598a3", "09ce14b84af2dc2f76ae1cf227356fa0ba337d07", "3a9681e2e07be7b40b59c32a49a6ff4c40c962a2", "74c8116d647612e8cd20a2528eeed38f76d09126", "87e5b4d95f95a0975e855cf5ad402db7a3c64ff5", "feb6e267923868bff6e2108603d00fdfd65251ca", "8e3d0b401dec8818cd0245c540c6bc032f169a1d", "2ab034e1f54c37bfc8ae93f7320160748310dc73", "1d1a7ef193b958f9074f4f236060a5f5e7642fc1", "6d10beb027fd7213dd4bccf2427e223662e20b7d", "8e94ed0d7606408a0833e69c3185d6dcbe22bbbe", "9660594e91ca3b37e573a0408f3a10f5107e443f", "e7436b8e68bb7139b823a7572af3decd96241e78", "814b05113ba0397d236736f94c01e85bb034c833", "b7f7a4df251ff26aca83d66d6b479f1dc6cd1085", "2004afb2276a169cdb1f33b2610c5218a1e47332", "198b6beb53e0e61357825d57938719f614685f75", "34546ef7e6148d9a1fb42cfab5f0ce11c92c760a", "31a38fd2d9d4f34d2b54318021209fe5565b8f7f", "153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4", "5039834df68600a24e7e8eefb6ba44a5124e67fc", "1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91", "41c97af4801ac302f09902aeec2af17b481563ab", "5157dde17a69f12c51186ffc20a0a6c6847f1a29", "c0a8c0e6ccf9882969ba0eda0b898affa015437b", "5028c0decfc8dd623c50b102424b93a8e9f2e390", "e66b4aa85524f493dafde8c75176ac0afad5b79c", "8af411697e73f6cfe691fe502d4bfb42510b4835", "1d3dd9aba79a53390317ec1e0b7cd742cba43132", "82ccd62f70e669ec770daf11d9611cab0a13047e", "d23ec100432d860b12308941f8539af82a28843f", "653d19e64bd75648cdb149f755d59e583b8367e3", "7897c8a9361b427f7b07249d21eb9315db189496", "93971a49ef6cc88a139420349a1dfd85fb5d3f5c", "bf8a520533f401347e2f55da17383a3e567ef6d8", "2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd", "af53ce0f3a039c685b754e1f704817e03e182412", "b49affdff167f5d170da18de3efa6fd6a50262a2", "e9e40e588f8e6510fa5537e0c9e083ceed5d07ad", "f73174cfcc5c329b63f19fffdd706e1df4cc9e20", "e4e3faa47bb567491eaeaebb2213bf0e1db989e1", "4344ba6e33faaa616d01248368e66799548ca48b", "833f6ab858f26b848f0d747de502127406f06417", "0857281a3b6a5faba1405e2c11f4e17191d3824d", "0a88f5936528dcfdd27df886b07e62f2fd2072d0", "fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6", "27a0a7837f9114143717fc63294a6500565294c2", "51c7c5dfda47647aef2797ac3103cf0e108fdfb4", "3826e47f0572ab4d0fe34f0ed6a49aa8303e0428", "5506a1a1e1255353fde05d9188cb2adc20553af5", "85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9", "1921795408345751791b44b379f51b7dd54ebfa2", "b166ce267ddb705e6ed855c6b679ec699d62e9cb", "bbf20adb59b7461e0d040e665bf64ae5f478eda0", "9472338240929e1ed38e52e029dbfa85a42ae095", "b69ff748b1cc7da3843acdd7f1c33f0c0debf3f5", "f92ade569cbe54344ffd3bb25efd366dcd8ad659", "9989ad33b64accea8042e386ff3f1216386ba7f1", "3ebb0209d5e99b22c67e425a67a959f4db8d1f47", "942fd0b406fe1d24b50d745cd31fd31220c78f0c", "86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663", "66886997988358847615375ba7d6e9eb0f1bb27f", "e6d6203fa911429d76f026e2ec2de260ec520432", "3cfbe1f100619a932ba7e2f068cd4c41505c9f58", "20111924fbf616a13d37823cd8712a9c6b458cd6", "5951e9e13ff99f97f301a336f24a14d80459c659", "939123cf21dc9189a03671484c734091b240183e", "ef032afa4bdb18b328ffcc60e2dc5229cc1939bc", "972e044f69443dfc5c987e29250b2b88a6d2f986", "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "c4c1fb882ae8b48c461e1f7c359ea3ea15da29fa", "d340a135a55ecf7506010e153d5f23155dcfa7e8", "d78fbd11f12cbc194e8ede761d292dc2c02d38a2", "e1b656c846a360d816a9f240499ec4f306897b98", "c17c7b201cfd0bcd75441afeaa734544c6ca3416", "d878a67b2ef6a0a5dec72db15291f12419040ab1", "7df277c37ac75851684f926fd3fb4daced3e79f8", "e6da1fcd2a8cda0c69b3d94812caa7d844903007", "cd74d606e76ecddee75279679d9770cdc0b49861", "43aa40eaa59244c233f83d81f86e12eba8d74b59", "6a16b91b2db0a3164f62bfd956530a4206b23fea", "9ff931ca721d50e470e1a38e583c7b18b6cdc2cc", "90fb58eeb32f15f795030c112f5a9b1655ba3624", "2c6e65d8ef8c17387b839ab6a82fb469117ae396", "9ca93ad6200bfa9dd814ac64bfb1044c3a0c01ce", "25866eb48b94e85fa675b1d393163d27ffd62ba6", "5b0ebb8430a04d9259b321fc3c1cc1090b8e600e", "5f01f14ca354266106d8aa1b07c45e8c9ac3e273", "492f3def325296164cd32b80d19a591b72b480cd", "d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1", "17aa78bd4331ef490f24bdd4d4cd21d22a18c09c", "79db191ca1268dc88271abef3179c4fe4ee92aed", "3983370efe7a7521bde255017171724d845b3383", "faf19885431cb39360158982c3a1127f6090a1f6", "21b16df93f0fab4864816f35ccb3207778a51952", "c4f3185f010027a0a97fcb9753d74eb27a9cfd3e", "b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef", "351158e4481e3197bd63acdafd73a5df8336143b", "70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e", "3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b", "a55c0810e6c84f8e51953c0d8fd9971696d205f0", "9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6", "148eb413bede35487198ce7851997bf8721ea2d6", "c18a03568d4b512a0d8380cbb1fbf6bd56d11f05", "c07ab025d9e3c885ad5386e6f000543efe091c4b", "f45d6a7bdb6741242da6192d18c97ac39e6308db", "f4fc77660665ae58993065c6a336367e9a6c85f7", "48174c414cfce7f1d71c4401d2b3d49ba91c5338", "21959bc56a160ebd450606867dce1462a913afab", "f1d6da83dcf71eda45a56a86c5ae13e7f45a8536", "684f5166d8147b59d9e0938d627beff8c9d208dd", "4d16337cc0431cd43043dfef839ce5f0717c3483", "683ec608442617d11200cfbcd816e86ce9ec0899", "8964524580ea2cff41a6b5858b623788bbefb8a4", "e855856d4b61b6a732005418f543c49195cb1542", "e40cb4369c6402ae53c81ce52b73df3ef89f578b", "5c92355b2808621d237a89dc7b3faa5cdb990ab5", "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "4ea4116f57c5d5033569690871ba294dc3649ea5", "3d4d3f70352dc833e454a5756d682f27eca46e5d", "cc3c273bb213240515147e8be68c50f7ea22777c", "4bc4a7c4142e8b37389fddd1e2338298b8b56e96", "9d757c0fede931b1c6ac344f67767533043cba14", "dbe255d3d2a5d960daaaba71cb0da292e0af36a7", "0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f", "1de23d7fe718d9fab0159f58f422099e44ad3f0a", "5bb53fb36a47b355e9a6962257dd465cd7ad6827", "46b960d3d871b2ee19d1b8e8838e7036c2ee56ed", "3dda181be266950ba1280b61eb63ac11777029f9", "d2baa43471d959075fc4c93485643cbd009797fd", "5dd3c9ac3c6d826e17c5b378d1575b68d02432d7", "1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3", "60737db62fb5fab742371709485e4b2ddf64b7b2", "89272b78b651038ff4d294b9ccca0018d2c9033b", "6b35b15ceba2f26cf949f23347ec95bbbf7bed64", "d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8", "12003a7d65c4f98fb57587fd0e764b44d0d10125", "7a09e8f65bd85d4c79f0ae90d4e2685869a9894f", "57ca530e9acb63487e8591cb6efb89473aa1e5b4", "e692870efb009da4b9316678b354ae935fdf48eb", "be28ed1be084385f5d389db25fd7f56cd2d7f7bf", "0e652a99761d2664f28f8931fee5b1d6b78c2a82", "2dd6c988b279d89ab5fb5155baba65ce4ce53c1e", "a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3", "c3fb2399eb4bcec22723715556e31c44d086e054", "831d661d657d97a07894da8639a048c430c5536d", "7a85b3ab0efb6b6fcb034ce13145156ee9d10598", "63a6c256ec2cf2e0e0c9a43a085f5bc94af84265", "38a2661b6b995a3c4d69e7d5160b7596f89ce0e6", "9649a19b49607459cef32f43db4f6e6727080bdb", "aa1129780cc496918085cd0603a774345c353c54", "48853c25dc75481b0c77f408a8a76383287ebe2a", "270acff7916589a6cc9ca915b0012ffcb75d4899", "34108098e1a378bc15a5824812bdf2229b938678", "1fd6004345245daf101c98935387e6ef651cbb55", "2c424f21607ff6c92e640bfe3da9ff105c08fac4", "13901473a12061f080b9d54219f16db7d406e769", "3f5e8f884e71310d7d5571bd98e5a049b8175075", "26727dc7347e3338d22e8cf6092e3a3c7568d763", "5a8f96f6906af8fbf73810b88c68b84a31555f60", "adaed4e92c93eb005198e41f87cf079e46050b5a", "404776aa18031828f3d5dbceed39907f038a47fe", "d0dd1364411a130448517ba532728d5c2fe78ed9", "c81ee278d27423fd16c1a114dcae486687ee27ff", "32743e72cdb481b7a30a3d81a96569dcbea4e409", "ada73060c0813d957576be471756fa7190d1e72d", "e00d4e4ba25fff3583b180db078ef962bf7d6824", "5bde1718253ec28a753a892b0ba82d8e553b6bf3", "2fce767ad830e0203d62ce30bbe75213b959d19c", "fbd7d591e6eecb9a947e377d5b1a865a9f86a11f", "62c435bc714f13a373926e3b1914786592ed1fef", "771505abd38641454757de75fe751d41e87f89a4", "ad27d13d163757b65110f98a0e7dd7f5bc8c8030", "b28346f6a962c6bbe309c891cfe04c90b97c1fc4", "97d811ae99bcbcf9f63c2f447041ab6d74a20b1e", "0106a2f6251dc9ffc90709c6f0d9b54c1e82326b", "4ddd55a9f103001da8dc24d123d9223dbb67f884", "5c4f9260762a450892856b189df240f25b5ed333", "5302df3216856b0c54267455078c206948c8d545", "eb32aa2988fdfdc8656f9f31b35ed4d52110b039", "9ec9a80b1c9ee6450f4419f01e457bb87d91bd5e", "6b6493551017819a3d1f12bbf922a8a8c8cc2a03", "11862b8d6e308127acd3ca0685eda6f0e88dd0a4", "195b61470720c7faa523e10e68d0c8d8f27d7c7a", "0c069a870367b54dd06d0da63b1e3a900a257298", "3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c", "2f28db98e8250cff29bc64b569801c739036e4ef", "17be95132dc3dc121822703e3c8476edd199a10f", "36c5421d477697a8692fe6a51ce62473e690c62f", "77c7d8012fe4179a814c1241a37a2256361bc1a4", "ab0d227b63b702ba80f70fd053175cd1b2fd28cc", "2acd0c90d14bf5003975c5b2414400fb6e53cb44", "edfce091688bc88389dd4877950bd58e00ff1253", "1e3068886b138304ec5a7296702879cc8788143d", "d3d71a110f26872c69cf25df70043f7615edcf92", "06fe63b34fcc8ff68b72b5835c4245d3f9b8a016", "258a2dad71cb47c71f408fa0611a4864532f5eba", "22e678d3e915218a7c09af0d1602e73080658bb7", "c3c463a9ee464bb610423b7203300a83a166b500", "cf6c59d359466c41643017d2c212125aa0ee84b2", "2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc", "919bdc161485615d5ee571b1585c1eb0539822c8", "a3a2f3803bf403262b56ce88d130af15e984fff0", "a192845a7695bdb372cccf008e6590a14ed82761", "aba9acb4a607071af10684f2cfbdefa0507a4e9a", "21e158bcda4e10da88ee8da3799a6144b60d791f", "2b7ef95822a4d577021df16607bf7b4a4514eb4b", "8b1f697d81de1245c283b4f8f055b9b76badfa66", "28be652db01273289499bc6e56379ca0237506c0", "50f0c495a214b8d57892d43110728e54e413d47d", "1a40c2a2d17c52c8b9d20648647d0886e30a60fa", "9fc04a13eef99851136eadff52e98eb9caac919d", "761304bbd259a9e419a2518193e1ff1face9fd2d", "54ba18952fe36c9be9f2ab11faecd43d123b389b", "a2bd81be79edfa8dcfde79173b0a895682d62329", "2e5cfa97f3ecc10ae8f54c1862433285281e6a7c", "0df0d1adea39a5bef318b74faa37de7f3e00b452", "021a19e240f0ae0554eff814e838e1e396be6572", "09e7578833f13a1f91d7a95b71a159af4e38a305", "f65b47093e4d45013f54c3ba09bbcce7140af6bb", "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "a32d4195f7752a715469ad99cb1e6ebc1a099de6", "b15a06d701f0a7f508e3355a09d0016de3d92a6d", "0647c9d56cf11215894d57d677997826b22f6a13", "20747cf1685ba615f19b0db7b17b66bc58ce3453", "559795d3f3b096ceddc03720ba62d79d50eae300", "a5f200d52b588030c76dcc38c504f65d772a1f5e", "a694180a683f7f4361042c61648aa97d222602db", "272e487dfa32f241b622ac625f42eae783b7d9aa", "21bd9374c211749104232db33f0f71eab4df35d5", "fb228b214e28af26f77cc1195d03c9d851b78ec6", "a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf", "a2fbaa0b849ecc74f34ebb36d1442d63212b29d2", "af9419f2155785961a5c16315c70b8228435d5f8", "55bc7abcef8266d76667896bbc652d081d00f797", "45edb29fb7eed5a52040300e1fd3cd53f1bdb429", "1f745215cda3a9f00a65166bd744e4ec35644b02", "69c2ac04693d53251500557316c854a625af84ee", "1750db78b7394b8fb6f6f949d68f7c24d28d934f", "e1179a5746b4bf12e1c8a033192326bf7f670a4d", "bef926d63512dbffcf1af59f72295ef497f5acf9", "03f14159718cb495ca50786f278f8518c0d8c8c9", "8a0159919ee4e1a9f4cbfb652a1be212bf0554fd", "fcc6fe6007c322641796cb8792718641856a22a7", "1171e8a96ffb15fdb265aaba02be014a38137ad5", "ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b", "407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0", "cd010fc089c580c87c5cff4aa6a9b1d6d41e2470", "e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111", "965f8bb9a467ce9538dec6bef57438964976d6d9", "fa2d02343be1de448ac51c3a668c29f231b362f8", "241cb0c59f63fab66f4772b47a17a41cf6e5f23d", "a894c3f61b79efed2389eaa3b9744b0427087f56", "116261c74ad54646f7d1d6be38cb9930f1bf44f6", "10f74097f65d3e80a79a271dcdcd9b6159a1f58d", "42750fbdc5bff1a2955d2ef4e5d197e101843bed", "3c531cd922bdf3f03fee12c82a77a7eaedc6734c", "f774f80fa4b5a8760084921f093730da519c6681", "026168fd2bcfbcd02012e379f35b7cfdc4c95ee1", "8770d091d91626e80c7fdacdea6bbd464986fb13", "56370ddb77e57c212e562b600d9299ef0b28184b", "6b6866fbb4354e30ab34db9d6a8a07da4bf25777", "a19f08d7b1ce8b451df67ec125dd9254b5a05d95", "28ef59700a57bb4d9416e09608cc1a03749d2103", "3ad1e34dad185501ebae44dd9dbf0f5c91aff183", "ff8d115b73a301cb31d58c8621ea22e1749e570e", "1e394f669c4f63c593677d2850c3d022a6fc1ac8", "31fc3b044ec908f7f61386422727ef23784178c0", "0fcca61391e7ee7718f5d2c05adc658f2978a2e8", "490a0b6ff5b982e884622bb9c81250f05c069f32", "98a18702cd9be53341f12c0e711df9d985120ad7", "d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584", "ae5743712462f99bc3618eaed9814ef305048ff4", "f7aa587f5bcad000c07f41ac91c4fcd9ed3c2a8d", "7c0f7d47da05a41e8671b059ade70dd2df7070db", "770bf54c87e55c986c5bc383f3ba240284ef8908", "ef13cefa7700e1286c52700cee5b36c4e186b27b", "3eebe8a5adaa49e54ea909b4e2aeb436025c84d5", "b4a2ab88955b9b23db59bcd1b7b8488b2d4ecb8a", "76a313ca56efaaf03c854b2c3fc94b514fbc5e75", "044264a61868bc4e0efb8b501f326cb93f9fade0", "7c26559e7269679ef52a85d02c6ff7000c2387d2", "ab68837d09986c592dcab7d08ee6dfb40e02916f", "8370912226ee7783c459368593bc3f88310b1414", "15a559e988cf565879bdb5941b93c35686c0ff6f", "73d8fafee6be9d4fa789ece2192f259199f00e60", "14635cd7a9c91ecc0133d02113bc8f6332492f31", "e5d4e5d68ab05a02dcab035a0d8ac8123a1bdcb0", "f8aa5d47c0de3eb423d545f3318e3c3715524a27", "59f8d0e79eb02c30a5f872038129c4b5dd9bc73a", "5454c5900b6b6a0cf36df65d667129fcbd5262dc", "90fbcea84f621ee5d73482c5cb02479778aecccd", "bf735bb7557e73bc6f68853cba828b55bd163726", "96defc23602f84af2ee6b7e7c75afaa7ee560eb6", "0c93cb1af3bba1bd90a03e921ff2d55acf35c01f", "2bbfe26aac63693556a7eacb45b7271532d0128b", "f5541330741315b98e590e405c96c72bac49d51c", "a70cfa3d84d0ea64a09faf00ee1b0069d1ebf73f", "8064d7a28c763ec37a840450d729f23428ad8f8b", "d095bafdecbae3a234d92ee96005b45cb5b1f55f", "3759b4fa10eabe047ff417b3076458b44132dc8b", "0c1f066a2246fd8d817318e3081f6fe3589f42ea", "5b9c849c2acbdea6e3cfc730def4f083f169521c", "d7f5f4ae54e8020e8c01f5ea5de22a370d3e4b21", "74ba4ba7a2c97826690b9d45edcc82532d1039bc", "fef0b51c865cc72b64bfafd6a1bf3539c3c1d290", "d5cf8636e5435cb2d31165fe1edea5a5e3ae798d", "273fadb4247020b830f48be556b4b44fc900b94f", "17c63dddd8d37b3b8c70d509cfee81fe71d84e2e", "1fe0fd340996ac94d6aed72a22ace49a34ee728e", "0cc2fc148eef46c1141edd276d903853052fc19d", "f3f06ab2acce6aacb0cbf709784ee7d90df27969", "d7b6bbb94ac20f5e75893f140ef7e207db7cd483", "efc6e9bc366ef4b0de3fde4c81dff91f3f03063f", "124476c2815bbfb523c77943c74356f94f79b580", "4d231311cdfe3aba13766bd0b358d4db0a9af3d3", "cd6aace4f2e7b95e3069e708ebca1e9082ae0559", "36ca720185b62e92a7f3cce75418356a5a125d24", "4845a5041529d0c010020cf08ceb376c096abc9f", "197a3c1863c780507798c9550dd6faadeb65caaa", "bace9d834e3582333b9460e33f0d6712eddab94e", "103b33d0006cae8fdbc9998b3136264771cfe025", "bea56c0e615e6cea496f52331432bbc344d55192", "49f70f707c2e030fe16059635df85c7625b5dc7e", "01c0e86de95062b64bc56216d8bb769cc896b223", "cd6978bf6b98794552bd52d166b5e04626fb6d6d", "cd4252d1f0a124dcc91af28f527ad1fa7be3a195", "b1bd58bb76ae9e4504622a941e1da21a24b5cfdd", "df0090524461ac8e16987a6e30d4287f7c8e0c8c", "547679aed8f1043628600a40f261aff2e951d316", "cce8da044492b41f692aa56a03ad5f11102ce973", "0cf0f28d4d9e571cab6c5cf9dcf91657fd869b2b", "9039b8097a78f460db9718bc961fdc7d89784092", "3bfb9ba4b74b2b952868f590ff2f164de0c7d402", "e4d08ef1b4350c7e03bdfb716200370c2ea87a6a", "cb11a150fc245958799e763069a6ae3080814d40", "902d1b14b076120cb21029b51ed8e63529fe686d", "dd7875abad93418e275825116e029766ada9b9c6", "b96583c64a3e098adff6114a5dd42208a6ca67fa", "7b47dd9302b3085cd6705614b88d7bdbc8ae5c13", "2eb37a3f362cffdcf5882a94a20a1212dfed25d9", "ea9085c359fe8d01298c60ed8b2a2737b67a9f63", "111f2f1255fa9e5a82753bf5b3f2f0974e87f86d", "b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a", "0939dcc2006eb7d196b63106ef1fcd54a514e4a8", "bca52740ba679b67a508894e68a0e52f6bf62079", "147fe6bfc76f30ccacc3620662511e452bc395f6", "085ba9f82e15603f1fe2a29dfa0182d46465a591", "edf8b7b4275560b2f779f7cc6a698fa43338442b", "2c885d046ea482774d0c657613a061ad21668557", "a6e670f5dee62e041d1da9bb4f3e1097055f1535", "fddf524012a4e1e7be94827d2ee5506d2678e540", "04644c97784700c449f2c885cb4cab86447f0bd4", "d7e8c6da1a95f41d8097b7b713890ccde13ef1d8", "76a6fa5db2e117b5ad926503d24863deb26f949f", "90e8610ffd1550648712d78074236daba6caa58f", "98cb6011e3713239c0e758e0befd94e060fd9064", "6f5d57460e0e156497c4667a875cc5fa83154e3a", "b759ce285c6ed41dc8efbae1fbe951e83f4ae292", "3eb174af28a80be4e3ce5d55750d7d38c2ff4da6", "7b905905b616be6ddacb1808ca9849ab19863967", "4e246e8b505b67dda0fee6b344806e2faa504b7f", "3399021c2cd7bc4f46494c5aa38bac82ef290e7b", "3515f79514fda3d82bd8117f51986769d6f13b5c", "dabf269f516adc6bf87a7ceb455cceda4466917a", "a06f0b2d569cbef0822ae5e8625b4cb2a7f1d78c", "13f762bbbfd0921cf8904265eb027ac99e70ba6d", "7c327903219c4da8bd127b467b47ac0867529973", "d2ed9996d42d255f05c6a56aea28e385d75929d8", "791eb376d4db96376eba3ef804657c5f0ba7229a", "fd13c64f629d57b85ade538e15821fd00f4e5f1c", "be02c2ea2b54d8fa30e2528f91a801ecf9f2185c", "6818c60209f0c4f979d1ff4211aba49afe9ae78d", "e5e453771109f60be145c2b09121f7e85fa5b1dc", "ead587db6b2b76726e98b17cb1fbf973a34ddf31", "61f4e08b938986ea80f711c73cadbc84e1811181", "541b13515480c0371bb8bb79cf17120645edccc7", "c9b139b78e5337580047138d7fc2dff3b8fcf31f", "a8948941f7a24c09cd7c26f3635d8571c7998570", "745b77016f972288925602d0d67bbba7d2c0fee9", "d7702b849b136f614bd751bdd416c747805bbcab", "8ab16c26678245ef009cbbf87d750cfd18e21572", "0b3be3656a90edf9d8e7c88c89927eb42e674aa6", "f7f1c57d38748d718309d7d55ce79e41d60f0940", "9f5098dea38669135c8552a83f4730330a15ebba", "cf7e6d057e6ef01904770be3dfc9da29f9c1e197", "1d5f22e73aa0d8115af0be61fc8832de501f4a1b", "c8b1850cdba18e3de513a315810fa4bbde2da867", "2c53cb4222cd9ccc868a07d494b8a4ce102658fa", "b67e2ccd0f05df5358464b9b38da3bcb9feda1ab", "27c6f50eafc11957df59b8a6c1894600f3011802", "69d1b055807ef35a8f9490775348cce899421841", "5aadd85e2a77e482d44ac2a215c1f21e4a30d91b", "b3fb2255fa1535bf8a8609d4a47ab09d5ba3e417", "05ff2975863bc9f7b16f2c0b85a1d17f8b76cdb3", "cea6a4b34e3bf3837dbac1058770f1009a4a580e", "98512028c62db408b413fec756e99f9085c46bad", "410017a1810308564dc54cb986b12f079428f966", "9c93512df188d7dbab63ebe47586a930559e6279", "07b75e2fd8e2eab17d096bbc7fd0d408d5973d9c", "88146053e69432e1162884a4717c250dd1c9a7a5", "c1c3e32ecf6da8e1372fab7d504cb8cd2c86fd93", "3e6c43835e0579fbca60db2cb124364549dfd2b0", "5fc15baee1383d502775fab8ee91d56f4875429c", "2c963e79a88a3f8ba71cd8d5c9f9f92c925f534c", "6c984bb3243f3b8d0afd8d90cd4ce85eb8f1dd3c", "37c3eb6c0f6427a5f401bc2b2939f25d22493808", "52d158c08e269b62cdc7c0cbe339206d20de370d", "4fe0c6c83d998a0660bc5280c8ab6e61df9df887", "8bbafa3efb7b96adb95128ea2a30a363bfe06812", "6149229bb6c705b8e4d9fea772e7e375437947d7", "cc34b0ab84e82a6d8ebce08eff1b7556026b5352", "4541f3ee510b593243ff9a66d3586ef9125c2931", "108c973b51514f54cf2a078ca243ff0cde091f4b", "62d9750adb300cd53fb107b174cb6a07fb8b96b5", "228c28bd18a2d58cd771a75e8718b14dc32051e0", "fe7cb074f2bed5d3e0dd4e19255beceef6ef7233", "a57b87baca7f3512372e7d9cfb5a712c80b53289", "14aad0d391a9491eb122d5b6af6c325a0e090dc7", "7ed81b41ce0a0ef6d6704b1629162266042de45c", "c45183ec95f89aff793a2629a0520006b4153d6a", "cdf2510d1fa51e911ef8f2618d41707b0c037d3f", "919d08e9645404c60b88f5f5e8511e363ccdf922", "d76e0235c3e5f826f99392d3165541a1944308bc", "60acfe1311d3e624287321256eca4adef5f77ce8", "10f16af880a8c7127c1b343a25fdc936c2bd5b11", "71bece8ec4934e3034f76d8ba19199c5b8ec52ea", "673ebf0d41100f67d9b6feb0dba7ef96d23d81e6", "e5ae390bca95fd014918717472a5507eab7fb069", "7085d21f483743007cc6a8e3fa01d8bdf592ad33", "fbd047862ea869973ecf8fc35ae090ca00ff06d8", "5c97cef9cebf101b74699f583f3e324aebccde32", "9108682a3bce19d67d7b48d94bebfbf6ed29fd8e", "21241d07840e3cc30feda59642571a9b459c817b", "9c7444c6949427994b430787a153d5cceff46d5c", "b7b6fa6aa0155a9512836857a0140a356a8190af", "69e52ce4df3fc14d2321637ac4e9843dc2e68b0b", "5efa4cbe10eca74f0806c13257c6403ea5732214", "99428c49e54b72fa11d131dffa6b37c6c633b019", "7cb0f54a9a02b32e8fecee6d32fa3d392ad29379", "cf7b4fa0a8b58473b94496f353f3c8d0f9531b71", "c7abfb920bbd82e117411817b32b5dc31d04eecb", "f4808e78bc648f9e1829c83a68a3e8ed4e7cf325", "3575d74eb548c3187ec5b0d27383ac966b9d7110", "9d839dfc9b6a274e7c193039dfa7166d3c07040b", "1f7e6d9ae543c67fa9cac2476d7b5396f881ee69", "0a4f3a423a37588fde9a2db71f114b293fc09c50", "8d09c8c6b636ef70633a3f1bb8ff6b4d4136b5cf", "7ed88b4444de20cda76cd73f7374ccb76e753002", "4f892475be26333ddf1b72c21f0c9c4ca129bd80", "9214e71ca44d87a9f43ba719f411d5307d78fc4a", "397349476582198639abc7a8b933e350cbc24c37", "5e2266d4ca1377bdf38ad2c07d0d9e0200813522", "527d596a56aa238dfc450c3ebfdae31e82c6c175", "c16479cfa79fe9996ca16fc30add9099815abb04", "d5b5c63c5611d7b911bc1f7e161a0863a34d44ea", "718cd7e570600401c64e31bc08ab404be97d19c3", "5bdb621ae5eab5d64ae4984ab0e6e74a1b4b7a57", "6fc129d384431d17eb7aa22afd6ab68f1084f038", "149c21e5f1c52429fb1585d30b50bc850a16edcd", "179253152fba4626e02a57067c3eb5302431e537", "59945763707557baace208253c029265b4b6e0a9", "2a77e3221d0512aa5674cf6f9041c1ce81fc07f0", "8c0f38c7c07c631d0b5414a84dda2992bdc4514f", "d8f96bb4130604bb5e160611d499451882f4166c", "58b80f0e484d32c9fe5b57648848e048270d435b", "3d67e97227846f579d1825e00d395d30e17f5d0e", "43ba38f4f79f7ea47f472ad70f00f4c96c9dcdfa", "2cb31b1a334de6763732a0e147ce30ac1eb849d4", "40099b528b170c180022e98577544435321e9d79", "8ed7924cd4aae54392e89f29d8a6f8a8f2a15de0", "a4b5c5a1a29d5d609d44bc9c29df0d96a30cb68c", "d7daf70491eb407db4b951861062edeceb8ccd6e", "57f06c0bab366438ebea9ea047678971653923ff", "9b5b2fd938a9337475cb90a143cf7568f8f63709", "702ac86ca51e18a3a50ab0ba7c379673c077d97a", "8044b9683d3fb609365fd4b053712ea7fa2b4e22", "7197e4236fe7c24f6b088b16a404128c1bb28a0f", "ce06644d1d71e258363407288638ce76b561fadc", "fab6e12a913223b69e1b9f0672df6c89275b1ed0", "892b845bbd1b2eba8d014c852ab5b8a8dced8efe", "fba6b4b66aa4dc04f96336ca94887a669adf8e49", "ad46f1de2001474cce1047d88703f61580c8a5de", "3fee310b47f361e4d7c298679db411999847e375", "99cb716cd7687db8ef3d0403c85b1ab90869800f", "607850dc8e640c25f027f2eee202dee5605cf27c", "1fbb93701bc417f7b7e3273f05dccb385826a984", "3bf0f03e37929eb40de452a1e2773fb8b77ebcc1", "13ec6666b8b722ad9eb68a21a302e3f2f1ab4df7", "23e1746c449e675a4ffa3833b0ac5c5a7b743f7f", "243cd27dce38fd756a840b397c28ad21cfb78897", "596e101d08b1972ca53e6ba8a5696eb8db5a3d55", "1d56d3b129e2671d42f656114a628e31fb181965", "b610e52b0a8fa11af3d01944c0383f015cade9c0", "439ac8edfa1e7cbc65474cab544a5b8c4c65d5db", "67dca5503eb4068c6ed5be34b7488a4aad6686a2", "156b194d0cee545337524bd993ae640ed227b79e", "15dc99e91da7c6cf808d9481ba4a9dfc5914bc63", "f98fdd7f7d95a56af8c8c11298af99c9a6142576", "c253e241ae1a7a3fde79bdd62ce5095a0d18ed71", "9ee5218a2a74fafbc4227f6c7c587b72e141bd33", "29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d", "2af266cdba82b168bc61408e5d5e9141540ba028", "c4abbd51777785234d5b77734a281d50d2606441", "024b7d7b1961bc4886c7a5c8fbe45dd27cd14384", "4e608c77043f56b0abfb2760fb2fd2516b5412b0", "654d5579fa954887f69333de752eec78b11a73d8", "763d9eef06b454d722c88ffab8dfb9538a57c06b", "b8d15b12418569089c786e3754e169bc43001644", "df3a01b4a962e3657cf9126cbceaf14899852499", "476c00f8da4ef04477ca7398111841e2eccb6110", "932b157ea4e554af580124b5575097d47fb6a707", "e8d0cb340284ff7d97fb2d65556659a3e4bb0fa6", "3dbb2ca6942eb49538d92823fe22c7475e866ca1", "01266af6b6cc067ba8f6c507b3a194a3ce7e87e4", "e994ae6b25c082a541cd68354ef4e4952420f705", "3ed22ecc6b3a4c89d07d59c21b7050a277e4e950", "bacaaa8e59baf853bdd0b3d5f8e3c352c9be8e98", "b717c61f6a1d7c65f4505b4aa1d199e851676c60", "89f810798ede05087512be80f2e52ebe81b7b6cd", "ba84a1f07df13a46c2c3af12b9d9dc33f09dc107", "8d44ac33d768fdc436c1b8ce995e2a6dbc4ad74b", "0533987210ba1e35037d6e5295e288ae2421b36f", "842e42d30dc31de1833047c268f0a5cdff16f2ce", "ada4901e0022b4fdeb9ec3ae26b986199f7ae3be", "4002fc29854f79ce19bc1694194dd526ebacd17b", "d8029237cde893218d21ba551fd127d045ae3422", "c3b85c36a94d25c8b3a46acb9a990c3b98a775bf", "9ca4581dc2b2be1781f46f94d6e597c934e55f87", "92bc5cfe5d0492c532e7d118a467104fc53bf487", "d0933550b75237c285c8bb2393185475014dbc2d", "363f973cbc1e46c6607b945f9ac96cb4a6262c28", "3faedba96bd6b72c6669bdcb82ae0788cdcb3a43", "f984a9bb5c6e7b8a055b810bff468d7f8d80a7ff", "2ec393b4fa5739c54ac9f61e583f5e41cfb2687c", "606dcfcdcca767db34230c4798273565a1235320", "58abb5001087f51dd2e9ab17b9fb8fb3567988e8", "e29a71bbe4e999e7375a3e847cd2b86e5db4b7de", "6603e7de5b155c86407edc43099b46b974b7f0bb", "7d337edc28b85ecc07adfcfc08e14eb0508dfe74", "0da611ca979327840161df87564fd07299c268b5", "2baf54199b4b0047f3610ba691fb0a718dbce97e", "2cdb8df791cb15eef805443293319ec8690ff88f", "3130eb9bfab5e5a095ab989ba3cc6a2ec62c156d", "def897862020dd63498a5ade81141c65582695df", "2fda164863a06a92d3a910b96eef927269aeb730", "38bbca5f94d4494494860c5fe8ca8862dcf9676e", "aece472ba64007f2e86300cc3486c84597f02ec7", "236a4f38f79a4dcc2183e99b568f472cf45d27f4", "9d3377313759dfdc1a702b341d8d8e4b1469460c", "57911d7f347dde0398f964e0c7ed8fdd0a882449", "720763bcb5e0507f13a8a319018676eb24270ff0", "34b7e826db49a16773e8747bc8dfa48e344e425d", "ff46c41e9ea139d499dd349e78d7cc8be19f936c", "b235b4ccd01a204b95f7408bed7a10e080623d2e", "190d8bd39c50b37b27b17ac1213e6dde105b21b8", "10f17534dba06af1ddab96c4188a9c98a020a459", "13c250fb740cb5616aeb474869db6ab11560e2a6", "167ea1631476e8f9332cef98cf470cb3d4847bc6", "35f1bcff4552632419742bbb6e1927ef5e998eb4", "3ec05713a1eed6fa9b57fef718f369f68bbbe09f", "0fd1bffb171699a968c700f206665b2f8837d953", "6d4b5444c45880517213a2fdcdb6f17064b3fa91", "aac101dd321e6d2199d8c0b48c543b541c181b66", "55c81f15c89dc8f6eedab124ba4ccab18cf38327", "24041477d6e412e4afc441992f4b170831f725c7", "bb451dc2420e1a090c4796c19716f93a9ef867c9", "febb6454a3bfbc76f4c7934854d377ac15666215", "2a98351aef0eec1003bd5524933aed8d3f303927", "13d9da779138af990d761ef84556e3e5c1e0eb94", "fadbb3a447d697d52771e237173b80782caaa936", "c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af", "3cd7b15f5647e650db66fbe2ce1852e00c05b2e4", "167f07b9d2babb8920acfa320ab04ee2758b5db6", "2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5", "2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475", "4542273a157bfd4740645a6129d1784d1df775d2", "18145b0b13aa477eeabef9ceec4299b60e87c563", "392c3cabe516c0108b478152902a9eee94f4c81e", "af0a8199328d4c806574866f419d1962def9305a", "4fc7a540efb24bea338f82c8bdc64c214744a3de", "67af3ec65f1dc535018f3671624e72c96a611c39", "55b4b1168c734eeb42882082bd131206dbfedd5b", "22ad2c8c0f4d6aa4328b38d894b814ec22579761", "8de2dbe2b03be8a99628ffa000ac78f8b66a1028", "3d0379688518cc0e8f896e30815d0b5e8452d4cd", "4b89cf7197922ee9418ae93896586c990e0d2867", "19bbecead81e34b94111a2f584cf55db9a80e60c", "ea1eeefb676d39b5f456937f8894311587cc7c2f", "26ec75b8ad066b36f814379a79ad57089c82c079", "ee6b503ab512a293e3088fdd7a1c893a77902acb", "ab0f9bc35b777eaefff735cb0dd0663f0c34ad31", "bc2852fa0a002e683aad3fb0db5523d1190d0ca5", "07ac2e342db42589322b28ef291c2702f4a793a8", "82a0a5d0785fb2c2282ed901a15c3ff02f8567df", "b51b4ef97238940aaa4f43b20a861eaf66f67253", "f113aed343bcac1021dc3e57ba6cc0647a8f5ce1", "c6608fdd919f2bc4f8d7412bab287527dcbcf505", "af8fe1b602452cf7fc9ecea0fd4508ed4149834e", "23172f9a397f13ae1ecb5793efd81b6aba9b4537", "113cd9e5a4081ce5a0585107951a0d36456ce7a8", "acb83d68345fe9a6eb9840c6e1ff0e41fa373229", "0861f86fb65aa915fbfbe918b28aabf31ffba364", "9c1305383ce2c108421e9f5e75f092eaa4a5aa3c", "c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad", "5feee69ed183954fa76c58735daa7dd3549e434d", "38682c7b19831e5d4f58e9bce9716f9c2c29c4e7", "3d1f976db6495e2bb654115b939b863d13dd3d05", "334ac2a459190b41923be57744aa6989f9a54a51", "44855e53801d09763c1fb5f90ab73e5c3758a728", "02a98118ce990942432c0147ff3c0de756b4b76a", "7fd700f4a010d765c506841de9884df394c1de1c", "a6902db7972a7631d186bbf59c5ef116c205b1e8", "4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c", "43eb03f95adc0df61af2c3b12a913c725b08d4f5", "08d2f655361335bdd6c1c901642981e650dff5ec", "cb669c1d1e17c2a54d78711fa6a9f556b83f1987", "03a8f53058127798bc2bc0245d21e78354f6c93b", "89d7cc9bbcd2fdc4f4434d153ecb83764242227b", "031055c241b92d66b6984643eb9e05fd605f24e2", "00616b487d4094805107bb766da1c234c3c75e73", "0113b302a49de15a1d41ca4750191979ad756d2f", "19296e129c70b332a8c0a67af8990f2f4d4f44d1", "9635493998ad60764d7bbf883351af57a668d159", "438b88fe40a6f9b5dcf08e64e27b2719940995e0", "0c167008408c301935bade9536084a527527ec74", "d60e3eef429ed2a51bbd806125fa31f5bea072a4", "40b86ce698be51e36884edcc8937998979cd02ec", "0e1a18576a7d3b40fe961ef42885101f4e2630f8", "235d5620d05bb7710f5c4fa6fceead0eb670dec5", "74de03923a069ffc0fb79e492ee447299401001f", "2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9", "0f0241124d6092a0bb56259ac091467c2c6938ca", "1d21e5beef23eecff6fff7d4edc16247f0fd984a", "861b12f405c464b3ffa2af7408bff0698c6c9bf0", "0b3a146c474166bba71e645452b3a8276ac05998", "7c36afc9828379de97f226e131390af719dbc18d", "46f3b113838e4680caa5fc8bda6e9ae0d35a038c", "2be1e2f2b7208fdf7a379da37a2097cfe52bc196", "c6724c2bb7f491c92c8dd4a1f01a80b82644b793", "29479bb4fe8c04695e6f5ae59901d15f8da6124b", "4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308", "31aa7c992692b74f17ddec665cd862faaeafd673", "11ddf5e47854e4e6109762835d2ce086bbdfbc5b", "b3ee1a0ff6cb36621c65c4a7b05a5179db280d35", "6486a58f675461d1c9f42a39e942bf39f4427f7d", "029317f260b3303c20dd58e8404a665c7c5e7339", "0470b0ab569fac5bbe385fa5565036739d4c37f8", "7c45339253841b6f0efb28c75f2c898c79dfd038", "4113269f916117f975d5d2a0e60864735b73c64c", "121503705689f46546cade78ff62963574b4750b", "442f09ddb5bb7ba4e824c0795e37cad754967208", "411503a304a661b0c04c2b446a6e43e4a70942dc", "121fe33daf55758219e53249cf8bcb0eb2b4db4b", "82be2ede6b7613286b80c3e2afe3b5353f322bed", "8d6c4af9d4c01ff47fe0be48155174158a9a5e08", "df7ff512e8324894d20103fd8ab5da650e4d86db", "240eb0b34872c431ecf9df504671281f59e7da37", "187d4d9ba8e10245a34f72be96dd9d0fb393b1aa", "5fc664202208aaf01c9b62da5dfdcd71fdadab29", "499343a2fd9421dca608d206e25e53be84489f44", "8598d31c7ca9c8f5bb433409af5e472a75037b4d", "b85c198ce09ffc4037582a544c7ffb6ebaeff198", "68a04a3ae2086986877fee2c82ae68e3631d0356", "75ebe1e0ae9d42732e31948e2e9c03d680235c39", "09c586624ec65d7ef2d4d8d321e98f61698dcfe2", "81bfe562e42f2eab3ae117c46c2e07b3d142dade", "1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43", "db428d03e3dfd98624c23e0462817ad17ef14493", "7b9961094d3e664fc76b12211f06e12c47a7e77d", "3fde656343d3fd4223e08e0bc835552bff4bda40", "06400a24526dd9d131dfc1459fce5e5189b7baec", "178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4", "3026722b4cbe9223eda6ff2822140172e44ed4b1", "4500888fd4db5d7c453617ee2b0047cedccf2a27", "4f8b4784d0fca31840307650f7052b0dde736a76", "325b048ecd5b4d14dce32f92bff093cd744aa7f8", "7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f", "14b162c2581aea1c0ffe84e7e9273ab075820f52", "0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa", "348a16b10d140861ece327886b85d96cce95711e", "426913f890f07a5d79e6c23b83cd928ffc00e494", "5892f8367639e9c1e3cf27fdf6c09bb3247651ed", "3f4711c315d156a972af37fe23642dc970a60acf", "1a7a2221fed183b6431e29a014539e45d95f0804", "c089c7d8d1413b54f59fc410d88e215902e51638", "46ae4d593d89b72e1a479a91806c39095cd96615", "6339e9385ae3609cb22f6b87175c7e6850f2c05b", "2495ebdcb6da8d8c2e82cf57fcaab0ec003d571d", "29908288392a9326d7a2996c6cd6b3e6cb137265", "4ea53e76246afae94758c1528002808374b75cfa", "7171b46d233810df57eaba44ccd8eabd0ad1f53a", "40205181ed1406a6f101c5e38c5b4b9b583d06bc", "b64cfb39840969b1c769e336a05a30e7f9efcd61", "e19fb22b35c352f57f520f593d748096b41a4a7b", "b56530be665b0e65933adec4cc5ed05840c37fc4", "0363e93d49d2a3dbe057cc7754825ebf30f0f816", "9ab463d117219ed51f602ff0ddbd3414217e3166", "d5f751d31a9d2d754d0d136d5b02c24b28fb94a0", "2f8ef26bfecaaa102a55b752860dbb92f1a11dc6", "fe5df5fe0e4745d224636a9ae196649176028990", "1772a7614c9b7daf01ffcda499c901ab7c768c4a", "5185f2a40836a754baaa7419a1abdd1e7ffaf2ad", "1a878e4667fe55170252e3f41d38ddf85c87fcaf", "99c20eb5433ed27e70881d026d1dbe378a12b342", "ed28e8367fcb7df7e51963add9e2d85b46e2d5d6", "ada42b99f882ba69d70fff68c9ccbaff642d5189", "8697ccb156982d40e88fda7fbf4297fa5171f24d", "294bd7eb5dc24052237669cdd7b4675144e22306", "429c3588ce54468090cc2cf56c9b328b549a86dc", "53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9", "316e67550fbf0ba54f103b5924e6537712f06bee", "3feb69531653e83d0986a0643e4a6210a088e3e5", "03b99f5abe0e977ff4c902412c5cb832977cf18e", "c175381a6b84ebd0a920ff44ccdccabd98bdfb94", "60f980b1f146d659f8f8f0b4755ae2d5df64ca8d", "0cf7da0df64557a4774100f6fde898bc4a3c4840", "12cd96a419b1bd14cc40942b94d9c4dffe5094d2", "5b86c36e3eb59c347b81125d5dd57dd2a2c377a9", "4ed54d5093d240cc3644e4212f162a11ae7d1e3b", "0037bff7be6d463785d4e5b2671da664cd7ef746", "2bab44d3a4c5ca79fb8f87abfef4456d326a0445", "2e1fd8d57425b727fd850d7710d38194fa6e2654", "6ecd4025b7b5f4894c990614a9a65e3a1ac347b2", "11b3877df0213271676fa8aa347046fd4b1a99ad", "93721023dd6423ab06ff7a491d01bdfe83db7754", "6d7a32f594d46f4087b71e2a2bb66a4b25da5e30", "28c0cb56e7f97046d6f3463378d084e9ea90a89a", "642a386c451e94d9c44134e03052219a7512b9de", "3946b8f862ecae64582ef0912ca2aa6d3f6f84dc", "3042d3727b2f80453ff5378b4b3043abb2d685a1", "75b833dde2e76c5de5912db3444d62c4131d15dc", "bcfeac1e5c31d83f1ed92a0783501244dde5a471", "22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505", "676a136f5978783f75b5edbb38e8bb588e8efbbe", "3779e0599481f11fc1acee60d5108d63e55819b3", "91e57667b6fad7a996b24367119f4b22b6892eca", "5d479f77ecccfac9f47d91544fd67df642dfab3c", "c5a561c662fc2b195ff80d2655cc5a13a44ffd2d", "0b278c9dc9b16b46ed602eab884ad7a37a988031", "2cae619d0209c338dc94593892a787ee712d9db0", "86b105c3619a433b6f9632adcf9b253ff98aee87", "68604e7e1b01cdbd3c23832976d66f1a86edaa8f", "0786a6d5ce6db8a68cef05bb5f5b84ec1b0c2cde", "00f1e5e954f9eb7ffde3ca74009a8c3c27358b58", "4439746eeb7c7328beba3f3ef47dc67fbb52bcb3", "6c01b349edb2d33530e8bb07ba338f009663a9dd", "51224ed7519e71346076060092462e3d59ca3ab9", "110c55b440b7c6a1692da9d8ee52389e43f6e76e", "2d7aa6af536a703471c56cc94bfd99471963b305", "541bccf19086755f8b5f57fd15177dc49e77d675", "727ecf8c839c9b5f7b6c7afffe219e8b270e7e15", "5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43", "4be03fd3a76b07125cd39777a6875ee59d9889bd", "e935270db6bd778283de9767075763a538181d8e", "8bf945166305eb8e304a9471c591139b3b01a1e1", "d61e794ec22a4d4882181da17316438b5b24890f", "8e63715d458ff79170a010c283c79427ce81ff0c", "b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23", "0297448f3ed948e136bb06ceff10eccb34e5bb77", "354dd9f05094d4f45a330e81d79c57941e968936", "0dd151d003ac9b7f3d6936ccdd5ff38fce76c29f", "853416c2a96ad46bdf3ef044f7a11e19d86fe073", "b1045a2de35d0adf784353f90972118bc1162f8d", "5fc87fc2207dd5a54ee9592c06bb7a95eff1895c", "d5d3c1b299e81b4ab96d052f8a37013305b731d9", "df14ef329c575eac56e6429251a439fac8d67418", "b9436b3db0f0eb9e5c98e036c7b503a9f63ac2b1", "dd49d67fbd69ec9bcd0bf3e8ec46c67404dc4591", "479f44f9b4c401327a721550334b8d491f6b3f16", "a33262933df8534de571027d78ccd936bb9ec263", "ac9a0997267d7c46a396ec243a4903276d986b7b", "0c302ac0101545e1942aa6c3e053c0185d948ff3", "837792b672a3a4a06a22b2c26a8ecd3812fe8330", "02b1a5d4b113211198e9c66d51153eb63ca680e2", "46836605c2ef5f78796644da3d385f66825518ba", "8bfb2803f624815c7454752da4136f0f3b9ec431", "252f01e74cc32435da52a42229c8102a88698e3a", "e59a68c328c69c294991f87b741a5d4e952defba", "54eb104b9b59315a24038e366283c1034c30ff69", "d951552faf169e62bfb362f44f471020faa2e6cc", "82475afbd13452349777c73f68c771b23e15d830", "46544182751970a7053bde08f34511667b28e346", "fde5e4538967f325916c1f944242304466edb41d", "28b6adbc5ef790413431cdb2f512432862778b3b", "78102141a1b78101515f93385e7b71a4aa1955c5", "641dfce99363f8f75b0e03f8578f6c413eb65591", "61764c068ad7d2ec988e6ec315d6ed2ed7489c2e", "5e55d9dabe06ee6b4d4b31dfd3723f6016a6c937", "9055b155cbabdce3b98e16e5ac9c0edf00f9552f", "8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125", "dad6b36fd515bda801f3d22a462cc62348f6aad8", "7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35", "8d8461ed57b81e05cc46be8e83260cd68a2ebb4d", "e198a7b9e61dd19c620e454aaa81ae8f7377ade0", "3083d2c6d4f456e01cbb72930dc2207af98a6244", "7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e", "1890470d07a090e7b762091c7b9670b5c2e1c348", "81c21f4aafab39b7f5965829ec9e0f828d6a6182", "b755505bdd5af078e06427d34b6ac2530ba69b12", "af12a79892bd030c19dfea392f7a7ccb0e7ebb72", "5c435c4bc9c9667f968f891e207d241c3e45757a", "fcd3d69b418d56ae6800a421c8b89ef363418665", "fa24bf887d3b3f6f58f8305dcd076f0ccc30272a", "b234d429c9ea682e54fca52f4b889b3170f65ffc", "7bc1e7d000ab517161a83b1fedf353e619516ddf", "13f03aab62fc29748114a0219426613cf3ba76ae", "ee65cee5151928c63d3ef36fcbb582fabb2b6d2c", "083a2bc86e0984968b06593ba06654277b252f00", "3af28e9e9e883c235b6418a68bda519b08f9ae26", "3b1260d78885e872cf2223f2c6f3d6f6ea254204", "25337690fed69033ef1ce6944e5b78c4f06ffb81", "09111da0aedb231c8484601444296c50ca0b5388", "893239f17dc2d17183410d8a98b0440d98fa2679", "cd63759842a56bd2ede3999f6e11a74ccbec318b", "9f49013657cbce384df9b16a2a17293bc4c9d967", "ffc81ced9ee8223ab0adb18817321cbee99606e6", "11b904c9180686574e6047bbd9868c354ca46cb4", "87b607b8d4858a16731144d17f457a54e488f15d", "be7444c891caf295d162233bdae0e1c79791d566", "9e28243f047cc9f62a946bf87abedb65b0da0f0a", "931f99bc6865d3d0c80c15d5b1c05338dfe98982", "93e962f8886eae13b02ad2aa98bdedfbd7e68709", "1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6", "56c700693b63e3da3b985777da6d9256e2e0dc21", "1e344b99583b782e3eaf152cdfa15f217b781181", "197eaa59a003a4c7cc77c1abe0f99d942f716942", "892400017e5c93611dc8361e7749135520d66f25", "4b9ec224949c79a980a5a66664d0ac6233c3d575", "bd8b7599acf53e3053aa27cfd522764e28474e57", "cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74", "29631ca6cff21c9199c70bcdbbcd5f812d331a96", "c7c53d75f6e963b403057d8ba5952e4974a779ad", "b47a3c909ee9b099854619054fd00e200b944aa9", "1e1e66783f51a206509b0a427e68b3f6e40a27c8", "0c2370e156a4eb8d84a5fdb049c5a894c3431f1c", "4e8f301dbedc9063831da1306b294f2bd5b10477", "1c17450c4d616e1e1eece248c42eba4f87de9e0d", "3dce635ce4b55fb63fc6d41b38640403b152a048", "f2902f5956d7e2dca536d9131d4334f85f52f783", "ba2bbef34f05551291410103e3de9e82fdf9dddd", "c0b02be66a5a1907e8cfb8117de50f80b90a65a8", "ff012c56b9b1de969328dacd13e26b7138ff298b", "621741b87258c745f8905d15ba81aaf2a8be60d2", "1ae3a26a985fe525b23f080a9e1041ecff0509ad", "0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306", "f67a73c9dd1e05bfc51219e70536dbb49158f7bc", "2f2406551c693d616a840719ae1e6ea448e2f5d3", "15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c", "f214bcc6ecc3309e2efefdc21062441328ff6081", "c035c193eed5d72c7f187f0bc880a17d217dada0", "c907104680ad53bdc673f2648d713e4d26335825", "8c5cf18c456957c63248245791f44a685e832345", "31a36014354ee7c89aa6d94e656db77922b180a5", "4b519e2e88ccd45718b0fc65bfd82ebe103902f7", "23edcd0d2011d9c0d421193af061f2eb3e155da3", "604a281100784b4d5bc1a6db993d423abc5dc8f0", "6486b36c6f7fd7675257d26e896223a02a1881d9", "6e198f6cc4199e1c4173944e3df6f39a302cf787", "16bce9f940bb01aa5ec961892cc021d4664eb9e4", "0f92e9121e9c0addc35eedbbd25d0a1faf3ab529", "9d3aa3b7d392fad596b067b13b9e42443bbc377c", "76a52ebfc5afd547f8b73430ec81456cf25ddd69", "217a21d60bb777d15cd9328970cab563d70b5d23", "b1bb517bd87a1212174033fc786b2237844b04e6", "a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f", "68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5", "d119443de1d75cad384d897c2ed5a7b9c1661d98", "5058a7ec68c32984c33f357ebaee96c59e269425", "97c59db934ff85c60c460a4591106682b5ab9caa", "834736698f2cc5c221c22369abe95515243a9fc3", "3edb0fa2d6b0f1984e8e2c523c558cb026b2a983", "3cb488a3b71f221a8616716a1fc2b951dd0de549", "b1891010a0722117c57e98809e1f2b26cd8e9ee3", "8000c4f278e9af4d087c0d0895fff7012c5e3d78", "59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5", "1c530de1a94ac70bf9086e39af1712ea8d2d2781", "cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f", "e5dfd17dbfc9647ccc7323a5d62f65721b318ba9", "17670b60dcfb5cbf8fdae0b266e18cf995f6014c", "29ce6b54a87432dc8371f3761a9568eb3c5593b0", "49e1aa3ecda55465641b2c2acc6583b32f3f1fc6", "2fd96238a7e372146cdf6c2338edc932031dd1f0", "b249f10a30907a80f2a73582f696bc35ba4db9e2", "5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65", "2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13", "42ee339802ec9195b2439074e5b7120e74ad79a4", "2bbbbe1873ad2800954058c749a00f30fe61ab17", "60ce4a9602c27ad17a1366165033fe5e0cf68078", "2336de3a81dada63eb00ea82f7570c4069342fb5", "019e471667c72b5b3728b4a9ba9fe301a7426fb2", "c418a3441f992fea523926f837f4bfb742548c16", "ebbceab4e15bf641f74e335b70c6c4490a043961", "d84a48f7d242d73b32a9286f9b148f5575acf227", "0c6a566ebdac4bd14e80cd6bf4631bc7458e1595", "8f5facdc0a2a79283864aad03edc702e2a400346", "812d3f6975f4cb87e9905ef18696c5c779227634", "3cc46bf79fb9225cf308815c7d41c8dd5625cc29", "189e5a2fa51ed471c0e7227d82dffb52736070d8", "1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5", "14014a1bdeb5d63563b68b52593e3ac1e3ce7312", "659dc6aa517645a118b79f0f0273e46ab7b53cd9", "a9fc23d612e848250d5b675e064dba98f05ad0d9", "d5fa9d98c8da54a57abf353767a927d662b7f026", "c05a7c72e679745deab9c9d7d481f7b5b9b36bdd", "d9810786fccee5f5affaef59bc58d2282718af9b", "f374ac9307be5f25145b44931f5a53b388a77e49", "7c11fa4fd91cb57e6e216117febcdd748e595760", "c4ca092972abb74ee1c20b7cae6e69c654479e2c", "3c0bbfe664fb083644301c67c04a7f1331d9515f", "575141e42740564f64d9be8ab88d495192f5b3bc", "0e4baf74dfccef7a99c6954bb0968a2e35315c1f", "555f75077a02f33a05841f9b63a1388ec5fbcba5", "0821028073981f9bd2dba2ad2557b25403fe7d7d", "59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1", "4551194408383b12db19a22cca5db0f185cced5c", "a05b1254630257fe27ee195ef05cc50ce6e41f22", "8b10383ef569ea0029a2c4a60cc2d8c87391b4db", "d37ca68742b2999667faf464f78d2fbf81e0cb07", "a7191958e806fce2505a057196ccb01ea763b6ea", "b7894c1f805ffd90ab4ab06002c70de68d6982ab", "abbc6dcbd032ff80e0535850f1bc27c4610b0d45", "989332c5f1b22604d6bb1f78e606cb6b1f694e1a", "70c2c2d2b7e34ff533a8477eff9763be196cd03a", "070c8ee3876c06f9a65693e536d61097ace40417", "1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c", "6c6f0e806e4e286f3b18b934f42c72b67030ce17", "05a0d04693b2a51a8131d195c68ad9f5818b2ce1", "1ca1b4f787712ede215030d22a0eea41534a601e", "72c0c8deb9ea6f59fde4f5043bff67366b86bd66", "b6a23f72007cb40223d7e1e1cc47e466716de945", "935a7793cbb8f102924fa34fce1049727de865c2", "574751dbb53777101502419127ba8209562c4758", "4aabd6db4594212019c9af89b3e66f39f3108aac", "73d15a975b0595e0cc2e0981a9396a89c474dc7e", "51bb86dc8748088a198b216f7e97616634147388", "8cffe360a05085d4bcba111a3a3cd113d96c0369", "cb004e9706f12d1de83b88c209ac948b137caae0", "435dc062d565ce87c6c20a5f49430eb9a4b573c4", "75650bfc20036d99314f7ddae8f2baecde3d57e2", "6a5d7d20a8c4993d56bcf702c772aa3f95f99450", "2a6783ae51d7ee781d584ef9a3eb8ab1997d0489", "c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8", "4aeb87c11fb3a8ad603311c4650040fd3c088832", "a6e4f924cf9a12625e85c974f0ed136b43c2f3b5", "7c8909da44e89a78fe88e815c83a4ced34f99149", "a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9", "f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a", "141cb9ee401f223220d3468592effa90f0c255fa", "362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c", "d79530e1745b33f3b771d0b38d090b40afc04191", "6ab33fa51467595f18a7a22f1d356323876f8262", "414d78e32ac41e6ff8b192bc095fe55f865a02f4", "3d94f81cf4c3a7307e1a976dc6cb7bf38068a381", "63488398f397b55552f484409b86d812dacde99a", "6c5fdec4dfddd51babf0fbd1275f2f2fa6bbbff0", "4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d", "d82b93f848d5442f82154a6011d26df8a9cd00e7", "6adecb82edbf84a0097ff623428f4f1936e31de0", "fcb97ede372c5bddde7a61924ac2fd29788c82ce", "2bf03e8fb775718ac9730524a176ddd189c0e457", "1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69", "0fc5c6f06e40014a56f492172f44c073d269e95c", "23675cb2180aac466944df0edda4677a77c455cd", "64ca0dbe60bf8f8243fad73a2494c3fa7a2770e2", "bc6de183cd8b2baeebafeefcf40be88468b04b74", "d4288daef6519f6852f59ac6b85e21b8910f2207", "6e12ba518816cbc2d987200c461dc907fd19f533", "c5421a18583f629b49ca20577022f201692c4f5d", "68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090", "e0423788eb91772de9d708a17799179cf3230d63", "c58ece1a3fa23608f022e424ec5a93cddda31308", "3337cfc3de2c16dee6f7cbeda5f263409a9ad81e", "dcb44fc19c1949b1eda9abe998935d567498467d", "469ee1b00f7bbfe17c698ccded6f48be398f2a44", "c29fe5ed41d2240352fcb8d8196eb2f31d009522", "fa518a033b1f6299d1826389bd1520cf52291b56", "4e626b2502ee042cf4d7425a8e7a228789b23856", "b84b7b035c574727e4c30889e973423fe15560d7", "19da9f3532c2e525bf92668198b8afec14f9efea", "f24e379e942e134d41c4acec444ecf02b9d0d3a9", "fc04a50379e08ddde501816eb1f9560c36d01a39", "ef36ca8abf0a23e661f3b1603057963a70e16704", "452ea180cf4d08d7500fc4bc046fd7141fd3d112", "df674dc0fc813c2a6d539e892bfc74f9a761fbc8", "2facf3e85240042a02f289a0d40fee376c478d0f", "7195cb08ba2248f3214f5dc5d7881533dd1f46d9", "0f112e49240f67a2bd5aaf46f74a924129f03912", "ed70d1a9435c0b32c0c75c1a062f4f07556f7016", "ac26166857e55fd5c64ae7194a169ff4e473eb8b", "fe866887d3c26ee72590c440ed86ffc80e980293", "0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e", "f2896dd2701fbb3564492a12c64f11a5ad456a67", "1c93b48abdd3ef1021599095a1a5ab5e0e020dd5", "9cda3e56cec21bd8f91f7acfcefc04ac10973966", "13aef395f426ca8bd93640c9c3f848398b189874", "cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f", "2b5cb5466eecb131f06a8100dcaf0c7a0e30d391", "cce332405ce9cd9dccc45efac26d1d614eaa982d", "635d2696aa597a278dd6563f079be06aa76a33c0", "25bcd5aa3bbe56c992547fba683418655b46fc4a", "85f7f03b79d03da5fae3a7f79d9aac228a635166", "aee3427d0814d8a398fd31f4f46941e9e5488d83", "e16efd2ae73a325b7571a456618bfa682b51aef8", "3c6542295cf7fe362d7d629ac10670bf30cdabce", "7ad7897740e701eae455457ea74ac10f8b307bed", "69ff40fd5ce7c3e6db95a2b63d763edd8db3a102", "d7a84db2a1bf7b97657b0250f354f249394dd700", "d3c004125c71942846a9b32ae565c5216c068d1e", "999289b0ef76c4c6daa16a4f42df056bf3d68377", "b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae", "22532c6e38ded690dc1420f05c18e23f6f24804d", "a591639bfcabc4091ff556364074c58521159ff9", "2e27667421a7eeab278e0b761db4d2c725683c3f", "84fd7c00243dc4f0df8ab1a8c497313ca4f8bd7b", "997c7ebf467c579b55859315c5a7f15c1df43432", "0c741fa0966ba3ee4fc326e919bf2f9456d0cd74", "ed32df6b122b15a52238777c9993ed31107b4bed", "cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab", "02d650d8a3a9daaba523433fbe93705df0a7f4b1", "8411fe1142935a86b819f065cd1f879f16e77401", "48cfc5789c246c6ad88ff841701204fc9d6577ed", "70db3a0d2ca8a797153cc68506b8650908cb0ada", "05318a267226f6d855d83e9338eaa9e718b2a8dd", "fa23122db319440fb5a7253e19709f992b4571b9", "4919663c62174a9bc0cc7f60da8f96974b397ad2", "e9fcd15bcb0f65565138dda292e0c71ef25ea8bb", "47a003e6bbfc5bf04a099ca53c67ddfdbea71315", "7553fba5c7f73098524fbb58ca534a65f08e91e7", "1f5725a4a2eb6cdaefccbc20dccadf893936df12", "44fb4dcf88eb482e2ab79fd4540caf941613b970", "56359d2b4508cc267d185c1d6d310a1c4c2cc8c2", "a6e43b73f9f87588783988333997a81b4487e2d5", "f47404424270f6a20ba1ba8c2211adfba032f405", "963a004e208ce4bd26fa79a570af61d31651b3c3", "4b5ff8c67f3496a414f94e35cb35a601ec98e5cf", "c49075ead6eb07ede5ada4fe372899bd0cfb83ac", "00301c250d667700276b1e573640ff2fd7be574d", "2bf8541199728262f78d4dced6fb91479b39b738", "15f51d51c05c22e1dca3a40fb1af46941d91f598", "148721b162dd355812fae94c8aaf365e5e2c3a79", "8cadbd63ecf964626d76a68b855781a6928859cf", "e42d055d59f6b5b0bf677975d21544aad26a5417", "81c7d56f1a77097c8fa14b76cb359d7f436741a0", "7ddb2e298acbe29ccaea131e8a6475d451eb90ad", "ccb0353fd1aa19b50fca8d69f9b9c9f1752dd55b", "71f98c3f7a5b02ab193110d5ae9f9d48a1c5ec38", "25764e6e767c49feb9a92e4a0cb2210796843c88", "99e2c2e7748173f16d51f524cf6add081a478a2c", "edbfbcebb14234b438d90d6dcd9b667e9071952d", "6f99209b7b274e564e95622990ffc4becb3232ca", "1b1e6ed3627fe1eb24d0dfe19f5a1a14be48f8dd", "ec096d96a9cb18203a14c06e150e67febc4f4265", "a9f5acdcf1fbc9563aaad943cbe1c195b796aa62", "fd8bb112b197e23183feeb6d1f4506d180caa4fc", "7894e3888bf0bb4d37569457491517554872eab9", "b95c8bf1fa882d3c2ff466bae1eabdc533781806", "72ae9384187c147dd17d53d0d23d2b604cd09efc", "8e610860a0a273d5a2676e9d53328820f2f59a85", "90621c2f4390d5fe75d16ec0ca1fa4eb190904b3", "324608bf8fecc064bc491da21291465ab42fa6b6", "8d34ab23d6770a6caebc2be79881481dbf7c64d3", "76a0016ce19363ef8f7ba5c3964c4a0c29b608ca", "09c19512612df09b1e69c62d813f6050321bdee3", "5906297bd4108376a032cb4c610d3e2926750d47", "b9262301b11a4d41c8346626a86b603cd2e63992", "a84b4e391afed6bc4ff5ea8814093a57034d664a", "2cbed30dcccb78ad18e3fedb30db187bf2ee09e7", "67134d7bf637f7ac4e354bcb374d7c28c7740ab8", "e05444e51d292bda871388c22b97400ed4cf73a8", "a3b1bb72c1966c4b59d2428fb293c26f386c4f71", "4f41f7a2f1f5eb5f26d47aeb168dbeb0f9ed453f", "6c4f34eee17d66677fd332772761f41213cdc59b", "11a34bda2daecad5f7c1caa309897cc9cc334480", "69511f2cf0ff1892758d360e1416617987a2ed2c", "3093a57be04309e2380ac98b568dd8fcb8077ada", "2bdb6162beb1f66baefb83465421526b934d43ab", "54bb3a17d536c7b88e56d294464f3d54de2ea9b3", "7214d9356398aa39923c69650bcf761d4ab6307f", "0185dc40af02d6fed5a0786b9ba445d646f5c98f", "ef9677e5c7dbcc87940b11c3627db56cca9bbf52", "592173bf31727dc67ae1c7927f1f82ae940b2b31", "a11a63e00c0e587adf4efc1425c0651c242263b7", "091b4ad74ac5bec206604673506b19838d6a0c52", "4085e77b15bd340d9e18be651b37e7790f676cd0", "2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9", "02a2c5b332d883d726929474060a7e62411c010a", "20b6b28bca735f8185822dc92b8579b70ab555eb", "59948ee0f334ccdc4e94e5bb6a7a019c764e1815", "7f310839e62c2623f6267b533047b323f61d2b27", "7c8242e68445118798e2e1eedc287e88a3d8e76a", "4d334cfafd11a93394917adcffef6c1d27aa178b", "c08ef9ebf46e5a88c4ee1aa64dac104ddc07bee2", "4bab23e4ce9b6c65a067953fe202c20c387f00c8", "617b719e6c31cdfe7c5c485a755435b95f0c4991", "4ea9bfcb7791cc07882f78b4747b8c8064ec6f7d", "8fcdeda0c2f4e265e2180eb5ed39f6548ae3ba99", "62f7de0e26f5716beb32b9d14e646e76b3a2e2af", "b5354bcad6c11983f9614546371262c454c994ed", "14e336258fc0e0bf7311ba73d6f3abed339fec85", "300eb15b819ecc9668be26735e5038efc4e05281", "154683bb947c48321ecec162062204b1242b51b8", "340798e6b7a9863005863f38c1bbfda5cf85d201", "b4ab2555d5690e8e6fb1cf23c995a120181698a6", "06cf3f55b2bd959d9228d29e1aa3e71ba7cece94", "83e7254431486d24715d4170680c6cbc8bdb2328", "00433d2ad90b40bc5ad22a591aac0da68037003e", "4950ae0e837657b611113e219bf848f0c657dcf9", "a74bd6c9c4631117a036ce0e1c8e3d2a0b1f1f5e", "6bccfe8068da78fe3caa43bba686919513fe451f", "b75cee96293c11fe77ab733fc1147950abbe16f9", "6b78f258cbe1f34a938547eb1585e43f8fdbd200", "3265c7799f9d14e29de37b1e37aec4330cd1d747", "b9bd9cab426f4d4a0b0d0077f6d9dca2ec01ce3c", "cf192e801761d1d9351f5f5163f9f24970f88812", "0d4fce2853a867e055a0062c2ef2f8accfc623f3", "120dc243f034d517a2181d1788d921510ef30cbf", "d4efe6e5b4229b03fc7c62ee34c878626d368c09", "7d30939e2d6f8b980910f4eeca5338d072f5ecb6", "e5bdd9cdb0845cbc2d1301198cf18f3c0f056161", "a975f1aea5dbb748955da0e17eef8d2270a49f25", "9e2120e48d497b373c53563275c3786c11749883", "d497b9e50dc2aacfb1693ca4de6ebf904404d98d", "80f5443b0204f28c44ee2dd94e72f8dbfa22910d", "a58d942c5dd42c9257bd295a13cb49474014a8cb", "47ae41d513630b8e4a03a124d7c20eae97ddbf55", "4d5c34fb36cf8c74880a62814750760bce0aef16", "eaf8397f8effd1f595f14fb888ff7c9e54f35833", "0e41758075d91e58412f012c2d03531c5baf7cdc", "b95acfe00686cc6f6526fcd1f30b6f38061d3a29", "c9c3ba7bebee553490a9ddbc6840292ed5aed90b", "59a0b3537bf4f764c192812c4b48049f5c8fccc3", "3f3d0852249ff7924e152efe948d0aee87d4238f", "4d89a8228bcf17f444d82ea271a548cb16fd0786", "0f96adc5a1b50cd587b510cf7c0bccd8186ccf82", "86e005b54819ca54d35daa2ae7ead498f41d84ce", "f7970f1686d322953501163b14ee4368cb5f0498", "a9a2ef9417dfe480a047a11a15814d3886ed0729", "11ed823555aabf7e32df5b09a04111a686f8ebb6", "b749d6459a343de2b708c44ceb801cc201bb9c42", "b61b4eb2e28b9cf35578498e1bbcc35ec0a07651", "8399c71abc9a820bacd9c4e21c85c461c0b830b3", "936a4af585f9a7d3b95c078ad31e8e41e22cb406", "aa435de0833fb6cabb1bd97f85a80d4a593807a8", "21f990f3bb8c7dfe57f31d912fb555819f1a64bd", "41f7c03519a2b108c064a2126daf627edde14c1e", "e8632e5bf43f7c59f4e1978833db8aa405c76c58", "e27acf161f569aa876e46ffae2058bb275f12a60", "f3a9baff7b059c528a7f72dd458db569892ee29c", "b0a376888a33defd6fcfe396a11e6ea6d4f99f0e", "b33b88a5fa5d4f20c24dd0e5f3b3529b7545c9e6", "c6fe64e24184056c16ad87f89f6d8ffed1423611", "b69f7660985be23abda72990cb1f367778960275", "f735188dbcb276cd1da248110712fde0d1b2aec7", "af858b8f788c81d40c3c21568d3cd0d058dcc9f7", "7c202efbaefdd283be4a4579c6d579ce03e48cd0", "03ea398fcefc53a1bd041346c895aadcffed0261", "1456f147381bf7c385225d854c2fb48c19eca285", "afde6c95ca696de65599a27590b31112a3eb6f6d", "224e78cc643e38c2cdcdaaa5123ecd7cf7a08674", "b2fde24de782d2979b946c49986cabdb12e84eba", "9561c7ef4f89019eb7fb779a7b18ef810964b491", "3b9c3218439c7a6af9bf8ad97f9ac59874ef700e", "0144b29bde2579e0a1b8ab3a38306c5621a5c30b", "09c4732280c3b2586e390d818ef0056a8de73e2c", "1e41a3fdaac9f306c0ef0a978ae050d884d77d2a", "19c5dad301b1bc0cdb719f0d80e42e73d3c7a421", "b8349ba39a034d7bc693b6613f2bc173f0ac27b8", "ffb34b2b63e80e3541ecbdfd75ebe88304258210", "e3f2e337d4470545398cc6753a54c21debf9c37b", "c1059a702f53c44bb26d3313964e811adf01d9b4", "91816b4b5fb74710144b3294dec61aab4de12fd3", "8af0854c652c90d4004e1868bc5fafec3e4ce724", "bc25f5e10c839d08ac8827fbe7724cd713008803", "eb0a94e92dec2fb48943078eaafac33502fa3162", "2c98165dd72bac574ed463b00f1dd4c276808cb4", "cab160d3ee7873f7905c0334de1a066e20a25679", "89dd979043cbda9f3964c31acc69873c63040552", "f902592858a68efb8f8ff333d0ff6864ce81832a", "7cd9b79cfe05e772afaaff2a0aecf3bd5eb53f79", "6ef28af882e408ff63f83ca670392a008d203fbc", "e93a65ff1c7c29736cef5701f079f75ecfb76f5f", "08dc6e67f71e6c666cc7eadbf31b494e4bee362e", "8645fe95f3f503f854b08096c2874a3f7ea6b79b", "796e333796024acf662fe76c4761607eaaa98a5d", "f4b9b5bdb28641fd1a2db0b3730eef02c7bac489", "4dcd40005726e66a0e4ed33635b38bb8107a671a", "4eb600aa4071b9a73da49e5374d6e22ca46eaba6", "f4d68024b096c8e203f8b8ccf40d0efcd06f2c4c", "d897e942dd9d07c3d1787b44151b691ecef7ec1c", "c140eb6fddbfa4eaf8fcabfaa4c205215bca6be2", "e2e8db754b1ab4cd8aa07f5c5940f6921a1b7187", "b3416a5f7339c7e83d68ba1d00d00576880a8f04", "bf23de0c2b478114cc5c4733e4e701a1d4662cc0", "f76808d6811cb3790e7fc3ddb08c733febbdefba", "aeb2d4d6418e34e4cd91925906d6b648e32c9cc2", "19b727567c0d63dd5594f96e70f5ccd8ff1b4e33", "ccd5bd5ce40640ebc6665b97a86ba3d28e457d11", "d6683c74c17d4fcc48ce3d9df9df6aea38fd4923", "b1e6078cfd44ea816cf2fbc05d10c69543834150", "4b990e033578424dc7cca28df9b638098cbdd1ce", "64372501affd8571db20dc606b0146a76c266303", "b4f90b09bb99405885bc9413288468f5892a62f7", "cba3061a883cdfb45c6d26fdee7dd53e6614d388", "4931562044a691fe41b638550b54a0a689674e83", "f1354246b647074dde9109eec4307f2d0b4e7fa2", "1670729d1edc9bc6103ee823f1137d302be41397", "0ceb23d9f7cd7bf095642d40dc650be9c1cedca5", "bfad1067e24d9fc86487d4533559721838bdb710", "43d2deb66ad01e1a6b77e47ae31bf8dc12f81c22", "3979fc22340bb275e5d1d97f00ba0dc2cf1e881d", "bb06c12e83255b2c3afca1e3e115e721c53b46b3", "ad9d98f34f6b7d3f0d3b742e49bd42fb6b4040a4", "89e4f5a1eb6a97459bb748f4f7bc5c2696354aad", "587f58539ad6fdfd277853ca611730a81e773232", "03dd7ca6fdf2f4785089e286969f7ee5ccea0a02", "6b68e3388ddecfcb0671dee6fba9a895aaf3d4e3", "362250566948f17693b737122fc1434173982da8", "04ff060369c86ccb07414935bd3e3b85e4896261", "22029beb936c9871757813758c5ae3e5820260c9", "d097056d7cf1648c2e2de8e3a15f2a8d526cacd5", "66ef3bc9136f062f31ad20d1f5e0eb57074b9493", "6dbe76f51091ca6a626a62846a946ce687c3dbe8", "34128e93f4af820cea65477526645cdc82e0e59b", "3c1c8e171450a9b279df939d4c9209d8dbf6b2fe", "fded7773f14e4680c5a818bab5de4b3d29bfc732", "be707bf7c7096df0fcf5bb07ef0fa53494d6a781", "a91d0ebc1255d6de1c4588767b3b5e1fc630e56f", "3678dac7e9998567b92f526046a16e2910ced55d", "ef75007cd6e5b990d09e7f3c4ba119be6c2546fb", "dd72ed9a30e4d04703487df29a8762940bd79967", "8eda955303623b68ab207abb233fac17b92c6632", "56cbfa23b9a60320fc0f8365991544f015a610b3", "70f0636b14b9e3916a780d70a5c712e8fea739da", "5f6f246175d4fb759888e2e42a04360724dd4c74", "069c40a8ca5305c9a0734c1f6134eb19a678f4ab", "5acfef824dccbb56d9095ec2ca54290e8ba6eaeb", "b0894e102cc3784c408573fce3a1ee0130d7ded9", "b5cf931cf0bd606575bc793c0c8ec6d913d08bc6", "49957368eceaa751c0b9c49251512ca6a8800cff", "7a27c7d5ec6728b6b81a476d38db6efe1d938215", "42c3adafbf8fce4b9d0986be184f2b1955958162", "a25fb4cf0a360bb4962da16d0f1c2e3bf570a183", "22259622612a839d97133d4809f80447dfeb5d56", "3151b110ecdcf2105def494bfb0775f21259d7e8", "09a8ae8fc95bd3c9fb8022da2c32c519d5fc06bc", "111ac5d013ac59aa8da919a470cdf83b437f9721", "75b4a3434023c5068c510b91bf4c3679e22ff3c7", "34c7254d2f420df6309260b2bb461a9c107dfd5a", "4e33798e364826af1241d28d57977bec9a579709", "16c855aea9789e2b7a77f35dc4181efc93dec69c", "763e0d4cd22eb1339fe346e8c62bf45738734dda", "e3e0f24539706023d88883a848b96b3be0366892", "0037d05fe2fc9553e58206f40ca39760396b5911", "3b4177556f1c9f5a8f8e1b2e8d824dee20e388e4", "3a165f7e22f0667b401cba1b2615048193781b4c", "0033e0ce8720f913761f9edb9a6c378eed8366a8", "41d9fbf55ea7142b13b68d8ddfe764896569cd32", "5298e3ddb8ca2f40b67aad2507a76cda71bea6ba", "78f0cf1cac51c51121080a3785e763e4397fc330", "37d6cde8be756b70d22262f1acc3442a0c6aa7ea", "669ddd0b5f742876fe84cfb3dd7ff30bcaab52be", "0da8f43b7705116eebdf6f828c82046c8f63db2a", "f8a38c8aa9d2f9f2db64fcc98f5b4770551d70f2", "35be6ce15347870395931af308f3e84f1e79db1c", "faa111d749eb228c686643e4667dd1bc21c724f2", "13636bbe75556f069eeccd6a84329d9fc4a057fd", "1d2f16a6bc39e2cccb0bac09b8b958684901d9ea", "a1abafcb88d668495b6ac0599382bfb26031b8a1", "46106d9f9d9b90401b7984794536e2f45fff1dbe", "ecaa37790566c82c892d4764bed2b6133a3a8e32", "a3ab0490b94868c52ff614bb0c6e47019da84313", "97b24c34114730ef556737800912758c5232c47b", "0fa24e602d65af82fc429edb4e0980dc534d4b16", "0534576e8937c209fe03bcd5e64ded9bd09d897b", "9700f1cf75c3908aca56d154b2a4424ce557a7b8", "496a3c906a31ebc2876bee410530489d4b64945d", "4a95dacb1d38a07e73007082b8ed7651a4b5277c", "2f951dcba9539270ca3feb9becc4539feb89e80a", "2475ad865b2102cef83a87adfe0d2e71d4791e53", "01c687f0cb8c8e1002376f834c9b43b4b653a52f", "fec5c0100c72d7c1c823a91dc146ecd5e98e77ff", "31b0f482908d16d82826f2fc5fba67128cb07e4e", "984ecfbda7249e67eca8d9b1697e81f80e2e483d", "398ad0036b899aec04502c243dd129c1f3e4c21e", "0ae910ef0cb2f193a43d3a592b7b62ef8bd13058", "013ae3813686580980924a8fc97caf7776b8a63a", "3f4b67309e6a2a9a1e303fbc0606225df0d3c2ab", "63da9079437f6090b44eec60ec3986c25c13be73", "53d39f9260c6ee29dbf118382be91ea09475e628", "454ec30d0a491800458a52a5aa655eb76a28f4f5", "302fee58f8c9498e8a5e543312e7c11baf7e0827", "478cab795c8bc62cb68d3ffa9b0dfc290201416c", "d476b357c5bbc7bfae06a3876a5c0852d31d1b6e", "405b43f4a52f70336ac1db36d5fa654600e9e643", "41350b58df5de5ff081bdbd4c308407f2657d658", "394276284bfdb5049aedbe94b215e81b1bf4fb03", "93d80d544b5e5e5f84605b29f3fdb9b502f2e99b", "fb44fc4c92ffbca1a41053e5cb673e76212f1df1", "41562c2982e1e32acd8283571c7b1f3d6bc36fd3", "aee90db1f66b77113b0a62701deb01ca96b6d9e6", "204db062f4952ce446cbb28fbc40d4a7f4424b03", "ebb527dad52f28610f9153952c10a95d8f01f5f9", "2c3cac0f568ae9261ff9c80eeda55a13e83ae7fb", "9857c4aed140d6829d763004c028e7b61bbac00c", "a8f032b300b99dedb9c0f8362557302696d5ee9a", "17b6eb93b41baeb5e1b0a16ecb0673a72368a34b", "6821a3fa67d9d58655c26e24b568fda1229ac5be", "de7b815a83de8496a3bd384b3caa54352a85cbaa", "5fc9d4b713fa26a1b50759fca8fb2ab24fe96ebe", "506e2850a564b6085d8f0af4834a97ddd301d423", "5f58bf2c25826cb6ee927a1461aa72bd623157ff", "d69e644016042d1032995bc9f51e2d72a1c1cd93", "9b401be9fe35b759ae48c9dd5e9b7e4382511a55", "005503ccf270890ea2582370feed4506f3785004", "e65452aa90850570f7b8d52cbd7082ed669c19f3", "60b66ec51ddadd132453f700d1781e8e7a8f78c8", "abaa114931d71f80e82fddf076e2a62666126f9d", "a2a17b7421bd46224127e35e3451b1af36528a6a", "bb1b1002210de5f46102036502ee2732a8050174", "0b39f2e02c0e8102092f980615449a5c6c3087e9", "4502b2691d6c92f3306b66bce70caecf550ae55f", "06680961e99aadb366968e5f515da58864ecd784", "02be981d45a8ca14e30e1cf9dfffd977f85e6ee4", "8236dfad541d0caa066ccfb2bb04731e3c74db37", "6de25382753e1b20246520c5dc8b591d9371c816", "47f31f9126a386dce81e3ef935579fd8c9a134f6", "1740a0732e8e308f5dd395313313cc3289666f13", "14d6ddb48d1b8a593665576d7e25f17be1447b2e", "0020207f7e004a5f3faeee9b7c3ee86ceae88a2d", "50fb5e2f0c2fe8679c218ff88d4906e5a0812d34", "a25cb1b45b0415bae92d7ee5a384010968b8a657", "0c678d593cec6ff51c18bde3847fffbf58a66282", "d6380f8294d62757fbc07419adf76ea87e0acbcd", "8b46df5e851e819473a726503a543a95e130e33d", "c88173aac29baa13d615c5be858290a14f0493c9", "2299409035a585e89938b0179037c8d05976d174", "dcba9cd587be2ed5437370e12e3591bdde86dc3c", "cf4e94d0337744280da87ff351412bbe702af2b7", "1b1027b3b03ed19fe579a32319b3e251efebb3c7", "096bd380a2f653dc5e43069b97e1505186c47d5b", "6976165d1ed210363be5b45933144c16183b64c4", "87dd1c52a3805c59eeab527b8c8c1214415026a6", "073c9ec4ff069218f358b9dd8451a040cf1a4a82", "31c174f2190889d5792358713e078336926d7ee4", "c4827fe8002ea61a2748b78369afe3a0747d1a0c", "119ac3d4a8c9bc5c36087140fbdddab788d10e5c", "b849bfe51138d88f6cae2d602b5e2a42565fb1c7", "98a60b218ff8addaf213e97e2f4b54d39e45f5b9", "12169222eeee058578629e5097f250c3992530b1", "6f83a82f7944fc679241b7e1cf9f3e17a6d17550", "9e84fdc47fb3c79bc0dbab96ff8e9ee15692a3aa", "60ab5c64375c4f5f8949a184fd9bfb68778ae6ea", "5b7b9dacba6aa8a1967eed630145e77695c75590", "f38d09b11a8f6853ef27822df317646b70af5949", "6d33cfc76f9febf45a8ce9ca4c78959340beaf6c", "a76e57c1b2e385b68ffdf7609802d71244804c1d", "b5050d74dd8f0384506bcd365b31044c80d476c0", "ccfae6f7458391fdb42170af6abf1b6840cfe88d", "08c66211b17a0ac7cad53995b15b0098cad8135a", "e7ad31d5dfbf2d726fe53f1367b45b7174aa4b62", "d5488ccb99a3e7adab37d5c7ea05b163bc717d07", "b039e95a998d3f0c29ba06aacb0bbef3a02b551e", "af057d399c41910aef723cede855ff15216df303", "e0590d0705bb0ee7182eb8c45671b142fd37460b", "984e6ae7d2d018f7947c84b440a4cb9047d8e502", "531b50e7b829547db9ad143d232bc07b1a64e3e3", "2e384f057211426ac5922f1b33d2aa8df5d51f57", "349c50a22c9f5b46f4ed0f03912706b2c9d484d5", "000aac6ba1c67150d2d6fcc9acbe484b24de4c06", "2dba6a057b7e7d14fc6fee076ccf1d22506be54a", "4c60b754ead9755121d7cf7ef31f86ff282cc8fb", "e1660c10ae661cf951602232b36047b19198f599", "6bc459c548bba7a04e2e255845b28060ec390407", "2c71d189e131d8a0b1f832202392b83b31ee2818", "2c28c95066b1df918f956f3cc072e29fd452dcad", "87363751b8e3d51a002dea6d32df553ee5315cb7", "ec443db55db1a6721387b2054b94f6df020994ae", "06554235c2c9361a14c0569206b58a355a63f01b", "3502544f66da8fdeda0daf8f6671a16c52e8e353", "aaaeca92457a72ec4e7e538cf6393c4c1dc8e670", "e8327930af0c719e3084d0ffb284704888976515", "dda403e6d9b61e3fa84fafb3aa2f70884d03a944", "5e706ba3d5c7237a580716aacda350b867c85e5f", "e0da17d5a8460ab74d4e8db338779feb2bb9fbbe", "f30860a38945ebcef57484a977937dc0294d0ce6", "28e287d5aae3eb2c2ddbe3791e76a4cafdca7ef1", "44ec89df8d9f42e323ea90599f23ae58e3a8925a", "069f2092c5d22e6d4c1e27c30e18dc63848fa3c3", "4c9f704c699f106ebedc258d00d951cb74f34940", "76ff6a68d7a8dcc12b6ba68e914294f6720a466d", "ee82faba334ede5f46b19d93639679b318bfa869", "e3f0e0dbc8e14e3dfb8fe9f9ecf6dab2c4713823", "a04429934abecd22a136fc2e1a659ff557f7cd84", "1f436aa4e68274037fff44e6cfbcd0a1ee3f60df", "8ff67b8bb53883f2e1c30990bdf44ec2d8b502b9", "0ad90ad5d2050ebaba5b5cddeb474c7d889bec3e", "850da881833eb87c608f4e3c08be3f367103e839", "beb7a0329c3042c2ce63b5789e2581bb8e2dbbea", "20e64f44ce2977a4dc5099fce6f73842613f0865", "bb030eaf7c25953369ee111dc1555f4f85409bb4", "8656567dcd6917d144572d5a0730726db2b2c472", "55dcaee65936583846e8c4fa36589df066ebadfa", "3f3ce530fe7e75c648b6959980008b0b1f99727a", "000a83a533f9c945addce83e466e308df1ae79c5", "d6b1c0c2107abb01ee4241963eab26e261510f12", "105bc5bde56723abdd3979c7b9adaa0a1616520d", "4091b6a3ab33e2aa923ee23c8db7e33d167ff67a", "b4ad2bdbf82c8bd1454f6d743b956bcfbad54101", "205cbac63de77af22e003c0c98c1a4a351747708", "d881a59d00971c754e02bfaaf4c48ec6dfbc1343", "bb21a57edd10c042bd137b713fcbf743021ab232", "576bfffd7d58a9c70ff73e39033f31739e6f09b2", "0a3051c8dde80975640d42dca21fac17ed60f987", "01c840aa27a6c234c0e55e9a5874719bb4d8fbe3", "72ed3be320e435a1dc093c84071a22d3d64fd997", "5fa08db02ff78d8f5d2a50ce92d30af811eb7d64", "5e9e3afeea446a2ae19e3a8e0678f08b73b0b36b", "7573ff84d71de19fe7d387bb4a6de73cb28402f4", "65fbd8c6b6a8814c3d8b28b4e14b2e262e60c58c", "58ef0c54f01073e43ae5e9662f450002540355e9", "a35338cb4686cff66710b7f8102e5eabfc38adb8", "68cff7e6aad5407f74aa4e7a11ce4ff0102eb7ed", "63b20102be65bbb3453152e504e79c2af2eb9059", "8646f22a46b65c2018bc39ad3cbdb939e788a1fc", "a01d22166ed62f5ad485ae32827c70d583a88564", "efaa24efd9b1adaf4b0ad708147fad3fc12f227f", "86be567bab1293ed847979d2c56a662fcbcbc1d5", "67e488d4d31d65a31d4bc2a3337c587720af2a12", "7c428ce264662bceae0b78f915d4d4797a2492f2", "3a95bfa1d4a989b162e07fa69b85cb6d31a674ab", "98582edd6029c94844f5a40d246eaa86f74d8512", "0566bf06a0368b518b8b474166f7b1dfef3f9283", "73351b313df89572afe1332625044f7e5dd0ce06", "6ed483dbd967c035976830e65d0cea4fde4ac14a", "480040b64a972bf51f8debabc4f9421fd2c7b829", "ae7480d4fee5b5fd47bcb8ce37395fa043ed3356", "a353d425a602d04f1dfde2142650fe0fb5193159", "040033d73d1efe316c8f0a8ed702b833a0550d83", "996ca49f17757a146c0ed4eb4e122ee4af200e06", "67a6bd37e91f2c334b1092fd9e9b16be93f82377", "18804d8e981fa66135c0ffa6fdb2b8b3fec6d753", "6f4a596c80b5ccaf44a076760761c4f132920b11", "a432ee5977443b5c29001f4bd10c6303cc364d4d", "900d5fadd4daf867dcd90929d0c2c31d5976d13a", "68a677a326a290a82bc08686465019414ebe1d98", "82f6dad08432a5f1b737ba91dd002ff1f89170f7", "d6837519e7f028e83140ac2585fe84ebe4aca18a", "7f3c226f2ee3563461a4c5a2c323fb34beab80f5", "4dba7e19e2958d8ab75261219747aebc675c6f8a", "e0e19769ad446c2a74c0616fcfb551059c899ce6", "82a4562d9ef19aec3aeaf9bd9f0ac4e09bdf5c86", "ebc2643567b1c614727cd7ecf1d0604972572568", "131e00d1296a952ed236bc264dc16f7e486c6e79", "3f1ca715733c617c96a5d6deeb76c93c710d082d", "183c8da12a07e2002fd71edbabeca5b3bfb45d66", "f90efe7d3d6eef4fe653343442163bf20495b5aa", "20a052963f2c46aff817f34a09c396c44b3e46da", "36597e65169d576d0a68dca7023c57efcfee5c4f", "2f000034f040f6a23c756671477f5f573514af8a", "ba244239a2700b13307a112d09c895cda2fc9af5", "2118b1ce0c2551e75d30fb6ba24482e50b319a90", "18fe5b96b620454baa5342ee6b8fb2908ed22988", "779f67f2fe406828bbe7a19e8736cb5fd309e321", "47b34a8ad5100582aa7cbfd85df3ca7659adc392", "08ca2a2a543ee74e2bd6585e0a059b30aae65d30", "91b0081a348d182d616f74a0c9fb80d56acf4198", "0c8a6fd107320d5d40a3c5bdca1efcb4b8f774fb", "b6d33a07d55688b5a0478a0a44f8ec905376f37b", "4d803109f3d9cca7c514db21a0494972d5681faa", "45d6323b4256a1cd11189141dd056525890f76c1", "2cb7b6c6456735e5cf778ef9864bf590f7813ccf", "05bac1f503ad77b095730f3b55214f7785b3f65d", "c2eed73654b544a705b194ade58cd82488c6c5b9", "8ffea367dbab7ec446a202f39120f5b3769c7f91", "17e769ef3d86e74c21f2616c7f7a6f20a4e2fbaa", "94d48f61cea7ce848af500f4a02f3ea4459bce27", "b051715249e47fa7e987e1a5504830af0521c38c", "00f2ff5aca7737f2ddf67d4621fa4df6a951b0bc", "36a95f1a9fbe518427bbf33293488c71161313a9", "d246b50b99eb2d2f1e67b855e059002b96a64f86", "e427c8d3c1b616d319c8b5f233e725d4ebfd9768", "ed02b45d05e58803596891d660837c21be70a0af", "bb30cc05068067fc33d34930283bfcd9ad9b411f", "40614596ea5ae9ac78a88bed0419366831f9715e", "dceaef5e7cbfc4d0150c2d765cc3df4349b8b2bd", "663efaa0671eace1100fdbdecacd94216a17b1db", "7f7c3a99923549601c81cd5e9659ca01e8a42f47", "858adc1499e556dd4d2c65705dc62d2e3592b3bf", "c5e4467b5830d7dad4e940f0766ae728f22e38fc", "dea749f087a8c9a9baa9167b4eaff50bd3eb9d16", "e06f94ebf10b511d121725c318cd289c55349c2d", "4ff7f5928f96ddc877b4b8675cc41cc08f4bd561", "7256160e8884091f6024a9495291bd0b10e8af6d", "255bb1a38169c7b78fb4da747cde18a961755d7a", "50af3b6f7192951b42c2531ee931c8244e505a5c", "ed07fa6df6a8fc27015d25717c9f730dc9eede84", "e33b1833b2d0cd7b0450b22b96a567a59c9e4685", "4c4f7ef4cc9efef2465e9fe6b7a6fc8d92fd8d1f", "3e56cbce67d312af2b3a7d0981e9cb33d2236bea", "d865c5e85191cfc0da714290d8583a2fb1179fd4", "1eb249b515f7c09ae2663c1b5c49243906aabf22", "e762f25f13d6dbb95dc59af5e6fbb2160fcf4d55", "a8788ce65d01018a0e1b4cdaf6466f495e68f7e3", "52b0104a43f55c5652001c06dfabfc4c327018bf", "4425df6cc10917644c44a7f4177a5d7cc1c8b7bc", "d89f17c77369b7a7148c77ecdb2e1fabf90e650f", "a7fc39214fe447f650441d033401ca73b45c6633", "361367838ee5d9d5c9a77c69c1c56b1c309ab236", "18a7edd0bfe5a3d6ceb4d2053081e479cfa1e920", "49a2f3262958465c8cfd5a59bc0f9f4effd1936b", "73ec2d5a6b4bee0f268b793ff646330507497e38", "0a561e6f0aecd182ddaf526220acc75f6583816e", "9bd9050c53d90dfa86cb22501812afe6fc897406", "22029de24dbf6867658145264f36b161c40a09d8", "9bda68ea52bddf5365e3230761c95424ff1ddec5", "f3e2fd2388c33b09df32c29f381e71b48dc227ab", "6ada03f390f92704f3df1556846697c54c00f7da", "218595e1979007ccd6b1bc5a30a3484841c0eafa", "f084867370c76854cc7f3474c9a1f3e9bccbdb7d", "e00e8dc033b7009eb70e38891ee7e0eea80402b0", "1d99282d00f7cf3e4d912428313848add8de8220", "8d559aeefb291d5b017c263a49f38e8a28439344", "00cc6b9ad1aa0bb6c170eca1f2b8df2a7e249338", "684114d3f1405efd50ed9a59063936a1a7fdc970", "54dae5187de3898d8034719bcaa3e0100ae72d76", "140dbcb0be3ce7961ed551f129698e9ad4c9aa8c", "6a14652508138fcf0aa8c518109165f65c88fd3f", "ff3ec3607b77a1dbb685cf90dd23a273d622dda5", "e09c7bbf1bef602018928acb395f09448a0366b8", "664e3e84dee394701241ce31222b985268ea005d", "a6a5c91ac583b75c796e3c0763f1ea0a58615a8f", "6b1f12995c88412607d8c36b3d5b0aa6a5cba7a3", "a7e274db8f1389b95469588995f18c1c42b62534", "93173b854598140ce16f5d3263c5e13d73dd3e37", "3cd9411181cd4f12798c64f0442c199cc24a56a7", "e5687f9584deca1fafb68b50fa79b9fcfbd1d379", "5f943f9bfe3154fbd368034903ea11620d2946eb", "36c31db023db39d8ba3ddaa42e23acfdff5c7530", "4444f498f328c901eefb3b908f6d226d1d2f2588", "2057837e059a1dde8c6c4c0587e652b79c04780a", "12fa75f90b0dcf254c33145fe08e7ce0f099066a", "7268747251a61d1ce3285baade15c372d4c566d6", "0bcde128b115af74d0986306184502ae7c8822f6", "7249520f2b70b1daa935adb559fc1fbaf26e3dc9", "146e6504d473b92e56108b7276d96aebaa58ccfc", "3a4779f3f73d2ebfaccbc0dad0bdbf7ac0570c0d", "3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5", "5f0facd360d54bc0d532c90ec2ced4c54043d15b", "57df27685196fad070bd2da14ed865fda87d93a9", "3b5c8c04c9c640faf7bd7b1df465a8f6153b711b", "b67e0ae9d64ec06b3e1c25c7f7e8b86020612d33", "54ffc4c83974d5915025f80e54e350cd30ef96d7", "71899cbc5b4b25b2e834919f58d7620484d7e848", "8def62fd86b5ea0a41fd9f892bd95b01bf072e88", "6bee77418af305d632b21eb03872a0d268eeebac", "5a3800bee147ad58ab7d6c55d8a2be484c17a511", "dbbdb23bb2512ef0922c5396cb95f713257b6ac8", "922a8f36112c3f484df7161b334bbfc78600afc8", "f929915cefe8f581e3425c9d94639e25771e759d", "2163d8cfafbac93ef36b955cf7f50c8ff589d3e5", "11deee1b9e199d1cc0d07d4c4d9f888447e19845", "df9491cc46eacf66bb963a699b73ec1a82aec4eb", "b79f3d9f8de4d1cc6679676146a40d2a8596f32d", "0e245275cf1ce11d47291d0930d9b289cea9cc14", "87da8bd9eb2fff2d77809c8bee3bed8c93cb5b4b", "73713880d4d1ec4c8f4608a94f67ea9e9f9a97a5", "f867cd63fa18da1c52061ba22954ee9d138906dd", "018e730f8947173e1140210d4d1760d05c9d3854", "c2fb2cb5487ad404b8e66daf74198496c40bef32", "2e230fba690287b9cf891adc0a0a089df3a383b1", "316bed02e22aa6742dffcd50c29a7365c5a5a437", "38abaa549c4f398079dc5b1e5957315f66918e23", "a7e78f80e0e37d0c17bc09058c27996e32e4454e", "70e90b9df5b8617ef6636c5492db727f9d48d0ec", "0c9d9ebecfce885f3b1e7bd82ec1b74e9f17b9de", "34a256eb89fde78d61c2184787f5c3183dae49cc", "8b724754d7c7d7e5f98c1982b144fddb66add843", "00b0ea36d426b35994b8a586a18651abf1dd1f93", "0b48e1bd69c5b87f197397f933ce7f5261fa00bb", "8f0bb07bba345af8e22bfadf392660ac26e821e2", "2547607a98eff30654994902f518e30caf2f8271", "155448563c354b01d12610b5864b511644cfeb27", "77fb0266b354d33f3725629c2ddce3d2342b318a", "58c3e04e8105cf8f2721c4a4a6487db752bb8852", "ada53a115e1551f3fbad3dc5930c1187473a78a4", "c2be88c6d99605abca7a7377935c8809eb8d328e", "214c001098b4b2601e35346866480180a60f53b7", "f98cbf32989387733529fa4fc943f0a7e97b5c07", "760a8a46089ca9fc7d06ea44b207b948569237ba", "0b937abb3b356a2932d804f9fc4b463485f63d0e", "93017577a8070458bc2c0a91e653fba0773466d5", "4dd00e37f4129b0c62e111906fd8b239520c08e9", "419550e7b918c64785f087b17f7fde6c94bc6d4e", "a96c3b0e4ba2949053a9e1e00751b76ef5b05816", "35f6e4a61bb3541348300be3347ab56d0be75744", "7c02a578734af8008177bb3f27549198b2503178", "31abe8a99741bd3d2a3b8b70bec1d1a45ede94b4", "b73a6c7083f3dbc8b355f934aaf84438c10a7963", "18fe8a42cb3b7ce14e29a11faa3d7cc45f1be22f", "0e1f6abdd24a4e929511740168e2f67351751302", "75c3ba0c7e5b0d4a11e9d2e073ccd02ee688c0c9", "20f8057b602ae9e24ef4ee436250f35dd9757327", "52884a0c7913be319c1a2395f009cea47b03f128", "59138911e0526dd1d8c5466b2793b6bb02c35ca9", "f187b0ed2224b2861442a73ad2966c1789afc09a", "e39d1345a5aef8a5ee32c0a774de877b903de50c", "0bd949f948f8f7afc0578d23d065b36c5c03c509", "6737a429dd615a0d9ac78d836c6b65bfd9ec36e8", "8df3bef321cd1b259cf6fb1ef264a2e885610044", "52162b19e058f11b5d010f6b9f1f4944ce8db3a6", "18dd8e04ecb5421b13aac39c288cd8dc3a541178", "a32ebfa79097fdf5c9d44d2f74e33b7c8343425c", "65a858ca95dcfa032e812a7f1fc7ee5bdac88f5b", "696e4f16723db3d1cb7888acb9ab6924a40cebfb", "2c3138782317a97526a83a7ce264c0c772ddf7e3", "5a5f7d39433d68059e513b947a9fde62b5d4d3fe", "2e5ebb2ed819b97c6c54570d684576387dc55e93", "7bd837a934c6cd6ee858bdfd4ee0f8fa3663fed7", "19c0e8f6fbe49b0065039ed7b23da3ef0fb9852d", "e10ca043fae02972f19292efacddd8e0f216b70c", "00b03ee4a7e31a999715d7a0c31d283d646106fa", "569dbb5c8a84d3b378cb2e38bb86ad7d826c8d10", "7df7d5040694691936a92cdd51898e8bcf8d28a9", "6b011aa54aeabae8ac172a0cf0dd4333d1bfd327", "cb1214e42fa81977bc21f4b3c8e194a9b68278f5", "de1216664eb86122f31b5e072301b2fb7f187251", "321fbbe7da848b602f376219ed9aed6a7f4b7f57", "4b7d5b17c0daa35f682417c32e80022c6645dc7f", "6c510b56b468303adf9ea2fe94bd93d98347f07c", "19a3e5495b420c1f5da283bf39708a6e833a6cc5", "127316fbe268c78c519ceb23d41100e86639418a", "16f034d56a15f3b69e49b2b216bfb650582ef0a0", "fdf31db5aa8cf8a7f9ac84fcc7b0949e8e000a41", "19676dd4422301a11aa5fe5e5316e2c412987302", "fecad388186269e3d8d71a75c42f56e661861c3e", "1b6d2f8f9cbbf5e20e445a60cb7840a30975f297", "2ec55c3fb5fa493ebfacc58115cf28f283a50a02", "540502d954529b37bfb2a0944126c7c4268ee39d", "87d0c3359a9a99fddfbc5a388e211762a79ed5d7", "d647c13b55a8c8413dd470b376fb92da77955dcb", "f7ed39dec6e9060dc3dc58656ddf823916a2a643", "b352bd38298608afab5df341857313c146c1418c", "a43c3ebeee65d44bbedac7548483485a14eacf52", "3d67aa108e65e636158abc0f31b703af3d31baa6", "30a14aabde46aa236a7b437a4942a92d417f3653", "6c4d5ac0eed17513e3ceacd396526b8ad6c8fc09", "215f26774779e260087c66eda49e22429619db94", "9adc420f1f9c97fc1fb7975447fdf244b061bbbc", "09c5fe448694eea3cf3166ccccb2c81048fe0601", "5fc621cdef59c38ef898a2adc2b4472a8396119a", "3d740c4f2246ce8e63d0eacc2cc1a5c31259e9ee", "1151a81118368e7596843b8db2508e4974fd7435", "6971bdac5119c4cc1b6d92adac605e13f1bcd80f", "9434524669777d281a8a7358f20181c9e157942e", "1e5f33a4843fe609aedd060c50278d0d62e0adc8", "7710232a3d8bb1ef4ab0b5b6042bed19380bf0de", "b8f09ff53e5a1700492100b8cd1b9e9783485376", "81ec47297e44a623029013a92e4510d504e4a86b", "c3941b5074a3c23ced4b3ff598653724ec7007f1", "f31b271bd0d17fd4a2ed302c3bac6ae4f548b180", "18d4210a5bb56e92045ef0637208685abaaca6a5", "62beb92e4de7b682619eba0aa39c14a39c95718f", "db625c4c26c7df67c9099e78961d479532628ec7", "d492b06c4eb8eeff00e167b2365d60d4370bac0a", "f39d3ed10131f986be5fb8a10b77d44bc9feada8", "2b285e5eaeb7a2aa7e37c5ae6762b838d3742b4e", "a448aeed546a13cad7977dd0e8139f0ee43baed0", "45bc489553c41b489e29c4f538fa554d9009151a", "dbbfb8ab9355d00ec3b2a9be12747e2e20458bb5", "ff643f726e01ad2955d6df752fadc95a1140be78", "03de6b2a3c81b26eecbec2705173da3dba25ecbb", "a81cc726d1f73521de45a59c9f439a31bb341423", "070943ccdaeb3ed97c89d729d93fca6f4e7631da", "76f7664511917bb575081ad3555e383de54562f1", "3e5321136cf8007c98b77c1d5a17823deba20b7e", "9e3fe7f7c125ad60fc1018bd515c6eb4a9e76f2e", "049584922a6bb15ceb25fa1f771f834b9befbcae", "74e537ee4061aa64278dd59a4d135f83f1b8bb20", "a00a757b26d5c4f53b628a9c565990cdd0e51876", "3162505b76ad387de62ec3696ce037f73c60f118", "3f8b082c10561edd3ffc5d67a3d675cfdff6d94c", "07adc7429fb22352946b675023df7db11c905701", "5b35bde1c144cbc96f25c5359ff44e898191dbe1", "0fa42d4478b514b0f961e26bccbaf2b75d42e912", "fc5100e0760955e26115caa26cad4426947fc691", "5fb50e750f700f920f06b3982bd16ea920d11f68", "2a7e02ebdb7622815dbce8cf227189e2c92d026c", "60ed2c1acfddd02a0c0361366fc1a913e68946f1", "77c7f5c5852c189b59c34ebbbbec03e5e4060428", "39db629b96eda72a23a49d54f32689e0651ca4ae", "5dc452b75efb033501e9baaaab468b0a8014b86d", "f19f7bfccefec7d0e2f8adad1e718d4a4b7d31e3", "8a382f000f98cdab7f7b79e543c75c6b8f93b6f9", "0d6765cc4458d7d25577776aa88876240dfc640f", "29d2b60bdd998479df7f088859905379e30967a5", "6f1be86c77492af422e936028858c9180b52b698", "11f476a5da2366cfa6e4b4e2654a0833fa7d4fa4", "79f9a15b4e838d6db91249a85d72fadb07aee927", "f8106b414d81df11ef2e9c26dd83f812711eec35", "b84a0cdb313554f43282ad10fc6305d758a5c51e", "409ff083816d8357fe839e3ea0e62d648a5532aa", "6e1c597fdad6c43ce6e404f14f336576d8373acd", "1ebcf5dbb37fcd369530b0ee4df5d4a60f756f3e", "01915181692c821cc5a0a703047bd5b07c1f9af5", "a45ec977363157d40f4c57b8c98d7aa15ec6fd7a", "519a724426b5d9ad384d38aaf2a4632d3824f243", "d4f8168242f688af29bcbbe1cc5aec7cd12a601c", "76e8fd009eb7e126af8de59953b1fb9d3d841800", "8ff80024836dfee9a6a9a38586aed3e3df1d7a43", "edceeaa885f3eb29761580095059f8a34be8408b", "6d4559883ffb8cc611644dce9f1422a98139a7eb", "3ee522805e16bf7816ec4abfaf0c7648b5cb5c95", "6047e9af00dcffbd2effbfa600735eb111f7de65", "a5ee4693668d976dbd79a753c62e0614af2f5060", "97d9c57576a573955c1b21b63f5b5ae44438e973", "34f29a2485b003614f7e6a4c42dac656475b397d", "308dd9a212f0528f5bad80c0954db4b229576cd5", "f4f5a68c8e7a90865c4e1a653db4ae788e387bb1", "448e1e7a993837fb3867ed15dba31d5ca1d526b5", "ad08c6b0b42db6d6ba30387d558f5e427e39b7dc", "b56ffd4b244b2c3094cdb930ee569fb4e3bd95f0", "76fe2c44c1b803425e609b8d847733fbbddabc0b", "aabe235a028a4d533053d78034f85bea39690d4f", "5f54f38043ecba3871fe18529c89309267132127", "51bfc693d170b4171f5bd9f9aed51f1fe8b5304d", "3afae8025cf543b1acfd3b8955b48bdc0979f303", "6cd557019b7775d8647ca31260734c786fdb69ec", "903f8670d13237328e0a0f9f655b8f534a9b6651", "9f9a5cf4e84e99edebd55b2c160cce821c2439a7", "ac0e595afa57db8c9310d72b2a2eb0758dc1e48f", "56d162799d5e004723341492f776399693d76433", "81c3d1be0c69e9d3e13054969e4b67ee69a4e6f0", "353020020597c01f47e59f9862dbf42289e138df", "529341eb910ca5125b4aa6aa83bfc5fc8bf44fe3", "558c4917dc9a1d34f62c0ab713b1b9a37ad04853", "eaf356e0ddf7701fa3d52d5159a78202a4866296", "8c6c743e21592304ee28ec073657bf128376ff8c", "1bf0b5186af083117af136dfcb08ed28828664d0", "0582d338a5e5b325c282e2ff13bfd62cf4d08108", "3e309126c78261f242d21826bfac37412f5437cd", "e06d0d7513a42755ad8b33c21ec4c1660f5e0cc5", "2e68f29f26f91985e0ad12b3229e46edefe1e871", "cd6c2ae00157e3fb6ab56379843280eb4cbb01b4", "0700d9c983b9c52341a4e17b70bdaff59cb539e5", "1d93f7de9f6d2daa77d844dd928aaa1e699ed312", "10cb43143c3370e54a4e365aecc29505ea968bec", "1774b5a76d139a5532284f797ea7a36318bbcefd", "1a6b88105b2e5c5f92bcded1acb0769818858838", "4552dfc16a89d1066051b0dc7ee888d0873d2793", "0687e472b5accce40299a6dd109c38e4167fea94", "312ce75c7b86333a03afc2cbf8f53047f7eab3d6", "03184ac97ebf0724c45a29ab49f2a8ce59ac2de3", "5b6c603fba0a66fb3c037632079bdca82ec3bf91", "3b99c007994340381b6a2ba15346ce03c323b523", "7e01ce7a1c14971088afa3ee73f92db451e2c536", "4b18303edf701e41a288da36f8f1ba129da67eb7", "05bb9a8877a82a474db3a0ee65772028a715e8cd", "2d1a072e89f97324cd5ec8a8235ef20d1bfdd70f", "bd4e3d9303fca21cf51c88ae1ade73f852beb9f3", "1e0dd12f2bff234a4df71641bc95068733506858", "20e4818919db0176c6c2ac30a468249ffeaff151", "29d10748dfb3bc6883dae224bc4c6ddf774bf363", "fd67b9812fa4aef6c5dfb633df4406105cdb4e8f", "f31beb62d18d8ac7544b4658cae1d2c7730520b2", "bb6f922cc6f94beacc93aead7af53e9bcb9fe3b4", "a8d3dc5c68032c60ebbe3b547ac948d7cf8dd1d8", "0f96eee0407b9ce9ea01629ed01bcf6802f97272", "c3b037fd6fb4542f7ed18c194a03ae328bcca423", "a53fe4347da39dcf61ac37cee66c945e79a5052e", "1ccf5670461638542b32fc7bd86cd47bf2f9d050", "2607f0093fecd4fee5244d56fcf3f53ff22e949e", "c90c51357ccd50bc1f2976cb63a5b7077feef4ad", "586f7cafee0456c25e850dcf42b38195a8a80055", "2ab9c36e19090ed9ac5295b3704708bdce80462d", "30e2b97b06590b7e39e6e53976c5b8265ed7392c", "a0dc8911c47c3d0e1643ecfaa7032cee6fb5eb64", "0d82ac80275283c3dd26aca9e629ee6a9ca8a07a", "03016dd2ed29f1d8c090fff88f2cacbdb03da5f3", "05fcbd15764437c8d185c79eaf5cc1683f62dbf4", "98c7a6210ca7bc81d2f7092ab28451f47039e920", "a08004ac0fac652d92a6c8bc6d7629f344db4d50", "33c050241a203601b1e64ad45415e24c455ba7d0", "b7c2173668a4c23b79450111887d8b1e4199f89c", "0f6911bc1e6abee8bbf9dd3f8d54d40466429da7", "26a471a491c2fb162ad403ed932b481d386306c7", "48daba0289e036c5aba6e360377b72ed6dde674b", "b2dc7244fbfac36d5df5c2c74de9c170aa7c8f1e", "820b1349751d7e932b74c3de94b96557fa2534cf", "7938d26e7681048690c917b67e206c1b8ea3dcc9", "3e4ec7bdd279573d328a26b720854894e68230ed", "91fe43fd76571513c8caf3aca20a405f5d99f3fd", "20717f1cb12ab208458c0f2505b237d8f061f97a", "af267b44c3ae6c2a0587310021a6180962e835d6", "8052bc5f9beb389b3144d423e7b5d6fcf5d0cc4f", "499842b3df387b81dbb2436c764d22b1a3f42cae", "355de7460120ddc1150d9ce3756f9848983f7ff4", "a84c039818d2abeba21f792c0522e9f75582518e", "4212a93f011aa47c6344c0cdc3e991740d8c7c04", "eecd9a070ed333077a066bfdcf776c51c2c74406", "7a6bacdbc04d06842dd68d27b9f065f472b5aa1f", "6e99832e265999194aa88958d892db62afbd7ac9", "4701112bfe9946a97a60c2bbb2d47dc784942c3f", "21ac5d1c34675bf6056d2670f9fa3dde530b1716", "7249e94317ff7cb5dc39441f3473a2d4f1c1d30b", "483ca50670c5f7d33f7c722dd71105327a30ea60", "10d003ba5062c048f0e324c897f849b0c9bc2aab", "e778e618862ea1c9a97e89e942228c4de98c9a86", "0cbe9732bc80761d7770e952275d5757b1acaea2", "f824b40ae80534bfd7363a630100504bc8ae1316", "488fff23542ff397cdb1ced64db2c96320afc560", "45556fc103f8255cd60ce756c6f9ecdaac3fda79", "7c0773c7578433a2277e919ac824f142d5de351c", "256cca1ad1ef681121bc617f32b5f4dd96aa8ac7", "113a31b4c58c613d5847b0d4b9890795d33e9189", "970e723404885e94e77780766b39ee951dd7abb3", "738d5a6491ae0fef5d2debc17f951534061cf6f8", "8322ed1a3db7c63af40280a782e39fb01bfe96dd", "431fc5903ab4853820eac6614073c5b7aec0ac31", "3ce92cac0f3694be2f2918bf122679c6664a1e16", "da995212c9c8a933307cd893d862f5bf7d99f3ec", "8627e6ccb42c909b5c1f94304af986472effb6f1", "506e76681d02dc3a3748e326fb57c4e4ab66778e", "e1ca483bf02befe4a15f6a4b7d8a3d7f39cd1d05", "3e59a900cecbe5dddaddea51f97fb7d12d99b44b", "7e25544be9ba701c8cf02c841e0bbadb36fa0e29", "996ce2e3c1b7d0b37a1d18c24f48b0aa3d4ced6b", "5bf4f97b631937b2176db9c80dee965e2e2286be", "913e85383c8bcdb53a55be8e4ef50814b3193417", "6eeeb96350c676bbb9bf765851362e590e32eaed", "c1cf510bdb4a097426bd95c0c2e970081d3e9ab3", "1dea8b1bba7f12575049d92631706b5999d6899d", "558613d96d7c125c00eae0c58c56ee6983208fd5", "5fa35645a0fe8f5e081302238a70a1963a1276d7", "a7e5c01e3dca9284f8acffad750cdbb29689d3fb", "99a898c0e45ebe96403c6d57afe678ef2517e88c", "f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5", "a7b4291b1feebaed4a36808df2a17a3e452b9efa", "e344cfff1ec2a46e230983157ef34efba5d65340", "debe5f241a678d40b0457d448710d9a3483787ac", "e55d5dd7d8dfa8f979fdc0769a209441ca04c96d", "4a19f6545473363b16d4a10ed13fef29b38856d3", "073bcb3b1aed5cdf7bff4e9fe46a21175f42c877", "541c68e2c65f6dce6179801c9f92dc7803dc71b5", "553ffb04c193eedde286c944f4816d46248d9822", "f63f754ff445a1651780e58577bdd5e823847146", "5326fb54202d535b210aefdf396eeefcf7d0ee7c", "0657da204bbd5f1e92882b2ccdf4f883659ccd37", "33ea400ca2105b9a3cd0e3c7c147e06c2d3c6d79", "4b8ce1bfedb285d8d609d1059dd0183420d63671", "a3825a14676c5f88478af2cd254ba4c531d5e92c", "1a4bc1821e32acc3a2a8dbd9774b8d201e9f11b9", "3c1e1961db0f0a351d5a4e21cd30bcbd9f88be57", "95a9e256c8f8bbce0d86199cacea92b15004dd45", "f9e92f5768998fbb876fb41facb1bba17b10c7af", "afe987d36438efaa2b5116c444b5fc47462f11d9", "0c60805cd71725cad770db897801d149a97788ce", "022c485c5617bbf0b7f40475f9758cddd11a91af", "2def47989c6f9143184b5eaaf3aca3f2833f3e05", "429b8d5bb05e1a580fad0222b9e9496985465e40", "2e1822bf06d80f5ad07a79a4bfff98c1c18fb573", "88e30a988d4a496d61eb241d4cafe5cc88688ae6", "529073b49fb28e8919d6862f2ae445477c7337bd", "bb4362bd6f0bc5bb467fc8f169723243caa97d1d", "59cca46a0442fc6bd0525e5f13cef5b5a9747d34", "32b7b671c786aa74f7d8f9817b12b3a59c0b84c2", "7f2061fd27be3afac4d020a87ba40fded935a97f", "37d969028935706c3f1a3200a6d7ec25ea3b30c7", "ed7f167c84372512dcbf9dd38d39879edde6819e", "adf9998214598469f7a097bc50de4c23784f2a5a", "423cfa55a14cd92ada32245b416b587ef9c29308", "eba3fd6a446cb043c0347c9b4ce40567f1ce9110", "7c7a23e8e846c1e1a6c63925d73d0d0806a040ef", "6ae4f3ff909ae6171ad54e8c5d942d1c83706e45", "8e67f2e8b15a1b0b6b0fa1bf7ca6a72b5d0a51e9", "b0a96954377390e80de59f0063e5703a21391eb3", "c1c8fec3f30a3708dd146ea056d54a2fcfdb5880", "9004a833c65b89c88d2f50835dc47f2319a2c3d5", "57aa1aad8883bab59054f95d6d2771fa9d271df5", "0d48c282737793b234c56382053cc69cdddeccb0", "25d35a9153463faaef9413c26db598f36755735d", "bc7f431c4c5cecfc7bf95b2f0704d81469f23580", "70d71c2f8c865438c0158bed9f7d64e57e245535", "833a2c168849697aae3589bbeef0cbca22808fe8", "aaa29212a3f9e6a35f78600231a690b4a3c83fd5", "9d6a2180a5f452356526edd8b4833180fa09cb3f", "0da5384dbd1646ed722bd9dc7f7387cbcadcb41f", "c9811aa6dd338408e3f4eee7459376a90c2def4f", "6f3c76b7c0bd8e1d122c6ea808a271fd4749c951", "f2efc85f9e20840c591b4590fd9ed202f727546a", "688680d9902f688b9ac2d47c399ceebd1014d785", "c3f48742f951084c085ac070690ec2b3c489217b", "cc3e1a6376928138dff5582b7a56d40cfb3b7367", "c259693737ce52e2e37972e15334cbe78b653e69", "745d289e559dcbbfb938790735aa221fbb585a05", "7dbe77557878f9a2ac65c253256d498100711a10", "0ca475433d74abb3c0f38fbe9d212058dc771570", "b3d8438c23306909c04dabb4c2d12d15d82959fb", "6e885d831568520aa95f523f625623e46578efd0", "dfbd1f1392720fb3a2ae149a3de66ee53a5bd073", "c00935052d80cddedb53da1ed042055faaf3aa1b", "6d432962055a8c521e6b388d5a0a2140a0019a5e", "50b548facfb728af123befabd6e3292c44f1a8c5", "44442a26062c20dab7db4a9862349b598efca119", "95c3749c8b7a900ece82b3d5fbef16eb0c853112", "5509acb71d28a18915939b6b68a2620c07f6cdc8", "2dc62458979dfc00ec195258ea8809077c5de442", "b92f984f328851a5572e38ee816ebdcc515f2a0a", "63ada149373f27d1ded6df24acaa5871d0c3e379", "ee2ec0836ded2f3f37bf49fa0e985280a8addaca", "b88e0c3a6a95e5193085a258cd281802852e5a4a", "d4ea0438b6c0479a7d7611130a0dc242a22f93eb", "4a374a6fe2ecd5f4889d7141a0521dea087ee667", "65d588e2ff7b4f2903efbeded978885f7da5d0e0", "b14d06fa5dae7428b946db9ea48baa81a4f5a4cd", "528c03761682f73eed7d736c19551856fe92b1e1", "359742c64ed3700f021cc72685aca41c7ec166e4", "4bf455b329fe6b860e8936098d83fff246f207a1", "4387998d52fedcab7d3d7144662787152e212893", "36d01ab112e0904ed59bb5cbf3a05352547c5a1a", "d79121a03584123fad02c4f2607f0e63d08ff7c2", "745ec003b7fbeb52aecd00c41ac889fcd4d88bcd", "a175f20189f028a1420b76ae42f6dfe99d8d6847", "b97cdc4bd0b021caefe7921c8c637b76f8a8114b", "794fd0fb684f90704e108677edb40d3ff6a85f8c", "3f0b5ada4ad2090e788c40a8f6f33ed4fe539eee", "7d7b564aba3161231c789169cafec38342a18ea7", "33e0b83b705835c1d0ea878225e82f4a0f25826c", "9a3fe4631e8507e3409631d506de3cbe793f0b42", "fcba7fa09f8bcbb203cf09b149558dabbfc47d13", "20be0f8992db53d3fe88fd3b03d9768da6e256cd", "6583e5e9c01da5d70a9ccba799fd53bc4ec015d6", "6cfb2876161b21e9de46ade1dadb7779fd1ea7c7", "c55dcc587a53ff82cf3f79d84e7df67f4c8f77ed", "22894791cb1e139177cef3fbb1ebda417a4b549f", "ca8b529e389381c8b51ddf83788b7a3eafb8f859", "5cb1277bc7257e7b4cfc1699199c6d8e13ff0b1a", "9a00aa841e631589fa5c4b3ac4107e4dc9b25f3b", "6ade1e0d4744d2eb5bf7bab97289ffd7eeb5a661", "5481464c7058475da126796df43c6942ee12c11b", "4f567a5f0a8ab016e17838990cee66e637fbcdbe", "bdd85b0db321ed22a3b2dc77154dac8b3ec3ad4d", "241b86d3c71d14b8cc6044a425b047a0724cfdc9", "456ae882c62434974448c37086b01fe707e04f5c", "a8896fa513ff0587e2e8dea0f3ef585d4d04feff", "093b6af0e5f00f9578088a49822d8d500283cab0", "bd5802008156ed1ee6919ccaf21ba6c06bad2a4c", "6245c9eac2de0a8e63ea658c8fa1486ece855d8b", "582ec12e629b2efbe1aae96fd331404e7a9a855e", "e4511fd8a4cafcb270df76e594017dd70806f29d", "b285337ba61c2bb54181dbbb4f4863efe1aa6ec2", "c458e611c0606771b508c1a8bee4435e02ef0eec", "1a5d52e026f877f682bec19d0edb81aedc6e14a1", "fadd240a54d0598c0cfe679d3cacfff0300f13fe", "8e3e61d1b14360bdf7a5c51b3be77bfc9a4823e1", "842ad1d5b6ea8a982be544b562ec91d907f879bd", "3f4377109a92cf4e422b7e2ae95ef3144323ea72", "c12b241a8b6fcd28c6af8ed11c3746fd2a6fca0e", "56c0f83b7f6a3a9edfa9dec3e2ff65baef089d74", "6b4da897dce4d6636670a83b64612f16b7487637", "fc27c2c8a2486f5918451fbef198f46b5bf45d2c", "821ba3eba1e36a29cc482f5378f4a0d0f6893159", "277096c5e536784da9856ac083a972715ce9f9c3", "07faf42fe021a0965a07ef7273d89a452aec6b90", "20a0f71d2c667f3c69df18f097f2b5678ac7d214", "e0d9d6bc35f1ec90d5e41d9bf176b2a00dce3a97", "0dcff2dbf287a6e2937f495e1cd887297863296d", "040ec1bab630b4609cb55c3e0e2dbd4c3064d8c4", "523303e477d3b5f27373047c576b9b6dbe478f8d", "2e0eb98d045565978f048d1eebc0f0f2fdf020b5", "3d32f5e055d6e441d059b1f55b3cb3c1f5f85c26", "678b367b2d5250f278c994238bbf816098252d9d", "b2d96d82d9504fa3e0880b12c4b1c048a7c3afd9", "b9054aadbbb91f74d373cc82d70b7c513e47139c", "2ad7100498f3e4d00ec4424099b90fddb659e972", "b2ea00005c5c8a900ef64fb625b7e7b9f766719e", "186370ecc1f05ef8d3f611873a039fcde3af68b5", "06a632adff4f89e8ccb001bfffa1b8a558015938", "a2cdd215586701c883dc3959c80f53ee5c091fe7", "4a0f98d7dbc31497106d4f652968c708f7da6692", "ce2e1bb891ffc0b114855a92f78e8aed289073af", "5c7db2907c586f4f2d6ae5937b0dc0f4d1bc834a", "2a56a51490f6ccfaf6fcbdf546a5515bef5203a1", "67610fad509a008cf8e9e47b47cf8468b4f0a5f1", "ae0a0ee1c6e2adcddffebf9b0e429a25b7d9c0e1", "8a91cb96dd520ba3e1f883aa6d57d4d716c5d1c8", "315ff3853dd408b765fbf83344974eda9ac37705", "8748c232a93cfe595de6938f209a170fca51c1d5", "771431afa9b5c936dc970db8d02ae06f49d68638", "947bd44270618f5a1b046b68f1ada3c11d97b440", "2bce7f8a53fb8ec93dd218dbdf55b48ac54ae8b3", "6ca21247f5963f6d459e09278812d60c35d10335", "4a77c8ab5d538541ac5f37ae6200d34360ff36b6", "77a4096c59f51711469d4a2d8936fc5ef62ffdf8", "46d29ee2b97362299ef83c06ffc4461906f1ccda", "eefeb9dea84320b1d9ae5997a2368516f42c2ce0", "3f50b80eca9d15b5626ec05062c48149e98ab3f5", "2ea46531f7d837c1e4b9e6a8d8fc084c6e526545", "0f5700e8aa4cba32828ca12cd4e3732a33148951", "25afe234435ede5fd95e47c3b58ed2c1da318f46", "bea2c35ef78eb40df52e27cf4098f28a79bcbad5", "799629203a7055764380b12b0406b94653de325e", "50ed931266a22bc166afef38f4b217fe9b4d5d74", "a33c5b508c64d1b01f3d4567835de6a4242b6911", "0af15b37d9a24c05df92d9004d11f78fa69b00c8", "e8ff87c9072d67dcbcd5491b1e5a0cecc2ee309d", "fab0d19c58815eccb0db7215fe45d6a32066ca1c", "009678c2034cf4a9924a78d533d2ec81303a946e", "36fa002f36e14ab7d24ebcdd99b6589ed726b383", "dde15326b39e5a048c27ec22a2dd3d47959afc56", "4eb2903ecfc5dee98c5671c9459bcea71c59c79d", "697b0b9630213ca08a1ae1d459fabc13325bdcbb", "685f8df14776457c1c324b0619c39b3872df617b", "9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2", "9a7858eda9b40b16002c6003b6db19828f94a6c6", "088aabe3da627432fdccf5077969e3f6402f0a80", "07dbf04089b015db773fe95e664fa73aef874b36", "bcee40c25e8819955263b89a433c735f82755a03", "6888f3402039a36028d0a7e2c3df6db94f5cb9bb", "1db45038ff49e4220a56b17a3b255df1c97b32c1", "d53c5a974f9fccf18f3c8f7d73522d6ca7162115", "a35dd69d63bac6f3296e0f1d148708cfa4ba80f6", "54a9ed950458f4b7e348fa78a718657c8d3d0e05", "d02e27e724f9b9592901ac1f45830341d37140fe", "616d3d6d82dbc2697d150e879996d878ef74faef", "49a7949fabcdf01bbae1c2eb38946ee99f491857", "00fb2836068042c19b5197d0999e8e93b920eb9c", "3107316f243233d45e3c7e5972517d1ed4991f91", "ff9195f99a1a28ced431362f5363c9a5da47a37b", "03f4c0fe190e5e451d51310bca61c704b39dcac8", "f442a2f2749f921849e22f37e0480ac04a3c3fec", "4f3b652c75b1d7cf4997e0baaef2067b61e3a79b", "751fb994b2c553dc843774a5620bfcab8bc657fd", "122f51cee489ba4da5ab65064457fbe104713526", "fa08a4da5f2fa39632d90ce3a2e1688d147ece61", "587c48ec417be8b0334fa39075b3bfd66cc29dbe", "3607afdb204de9a5a9300ae98aa4635d9effcda2", "10464196584476a7f3d887fda42444d08f5f8ad4", "0b6c10ea6bf8a6c254e00fcc2163c4b6fc0f1c3a", "763b5765f7c22adfe2baadea8e891562063a68ba", "333123d85fb8142e5942459d8000fe4db32829f5", "6b5cf028b9fa3191119067f087b189d97017d31f", "798d042a70b2c824998b3fc39a6e21799b588832", "034f7d5b3878f8b2db92a7cb7f12edcd5681eca7", "25b367dd1cc584a89e8fd7b34a7d98d212a9f168", "54df4b428f1f07331e9711e8c029375224abac18", "b1cdbb1bcba07082c7172ef9c99f5a755eab04f1", "7684b221e5875dc4f1526036375898f3d2407d8a", "3f2dbb34932edcb69295e57d4b8d6a8f68e28df4", "c0afa514524a4cf4b1772c1738ceb6989bff1b71", "63470476067035a47bc14c217a5a458d2c299f04", "85c4bd014c082dd2b0e53adf8b128a5edf76d455", "b0771b7ca52022b37a563464f823af67c0b36c03", "72c69a7a791ff86f84f082b73e09733bb90edfd7", "c478faecfa337bb2d37ac7c63aca47d4148ffc6a", "a746bf64f2950d290b96033c095514f3195eac29", "6b6791c0a3f06c356035747f7e5f87d54bc5a657", "22c392ef712fe5ac990b4e0322e0b0ae245829cd", "c5e089ea32790a16a30b986f6d5c9583a346e143", "51c2c974558fe4e194ee7c81925580b7922add33", "429439611b809f364fe4a9d9a2239bfbb815529d", "361aa2cfbc51ece34be511986205095363db94c5", "707f3ccb8b1889445c54ac2cb6452aa646346a78", "2fb092e2f36f26e8cb9f8f3443046be0ff24f4ac", "602e1a579a0996142e1ad24345f5c1d5b57327b2", "4eca3e3c4876fc7ec81224d4ec2f159c9e7c72c3", "ec0239b3547639195c95d322632b6a83b648e8df", "f5f87a0bd3afc8372759f0995bdef7106381ab7f", "87bbc0c2284794d88d0fef44b8ceb6c2e1e2c67c", "9fbcce73bc2d28f12764cabf22fb4ebf7b7c628e", "307dae1bfa57c0c5dcf2abd22f2e16f7e894fa29", "4df54d4758b1a883902c036b2a10ef6d0f2d4af9", "7da866310e9738a2d37007aa528463188d943ef2", "1a24785a6cb7f245728962cc46a5a491c7afe61b", "d8ddaddfd4843d483505718f3487e312310ba23e", "5f0b7245bedfc984b327b8e144c3cba9d9b2a807", "bd6333229199e7b4ac4b9ae7a7cae50ff9b9f5b2", "edf254f131ee6bb42555bdafdc4775a3edf3c28e", "b6aa10c634aeaf1af841ffcdb2fe7d3825f1870b", "530914de437419c103f7aa2cc687eaf53972d785", "0557776160a80a612541f0b3eb79b30e884e1b96", "7d2154d95380864f30ccd5842c5f46bbdf0e374a", "23b93f3b237481bd1d36941ca3312bb16f4beb58", "d0b876118eb1c9d0b397d29c47de904354d95648", "b10e2efc35ceae28e1aa463f44bc76f3b2d3556c", "36f039e39efde3558531b99d85cd9e3ab7d396b3", "6ae02dac51860d9124db22a09a160a6478123b33", "92020e6540fe9feb38616334645a0ba28dcac69d", "89a1e42f645c657510eef13c151851f945eb3681", "1b7cc9137b493791f4d1e38efbc2074f8fb9f3e9", "ccb2f6a7b662fb1d6b8d70e01c615b4964b594e9", "04df36ea27f14f96bb1b33d76103d1dee7c6e0ca", "057d9240489746ad35bb73c2d1c86f5f27a733a2", "c1cf5dda56c72b65e86f3a678f76644f22212748", "53b88d7b05b9986b3d8f10c0fdc647738727c730", "5150f982e12bedb4f3e4041bff579233a8164f64", "6a59d68bced702ffe02d46285982dd31dc53fc76", "fc1f2d9fe13c26f20d715aa738bf4302d4093016", "58386e36dd873d4e2c49bd522ed7f32d62990f99", "fb66546a16751810754430286fe4c636e4411ca4", "b349714e9eb089c3a756c03533525cb3d5a84ff8", "4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0", "425d8fedbbd64ccd630851cd2c2f618243977cfb", "8cc48c7e146a1cc8c6839b32b7692b0bf5dc6fb4", "6d0177bb1cd292a2ad4a14e7b9173fcc8b72569c", "5be6340c55d4a45e96e811bdeac3972328ca9247", "67f78b4b4669bae388f4f2c9660f265a987e8f22", "158974dc6503cb4939b87a1fffe17871e8a48c91", "340866188aa3865642d374a48f56ed2741b3b8de", "140d5459e2ab0499a3a9db41bc75bdfae81d5266", "75b60cdbaa2ab77b169ed0d1e478ebff07468ef8", "9e7f7e273be2ebfccbf43e20d2683368ebd7e5fa", "67eeaf667b5f948d763a8d2a7284b7a394145d12", "244293024aebbb0ff42a7cf2ba49b1164697a127", "f4c2ecc328a1e9ccd13ce785f97fc2febc0106ee", "a9ff867711fae973fe63078c8eaf759af44c0f26", "377c328e833b690f4788810e9877c80badc9b199", "65d9839ac30215b5c38c5fba22782985f6991d74", "60f1f1c56ff4e607bb34cf0e1407ebeadc864817", "ce3ee08f4d937a6dcb2d6dd0a1ca100920f312e6", "312b807a24b8c30876c1750530b08e4d9627e231", "3514f66f155c271981a734f1523572edcd8fd10e", "fa66007e6c159c374de78f2d3e47317df7fc8214", "47ef3ae5832b3985e9f461b26428867258d4b261", "a1132e2638a8abd08bdf7fc4884804dd6654fa63", "230e45294a95a7b773a27dcc3fc52ad5e116f9a7", "60d4403ed3d5bebbf89e8627acb1768ceb29e8e5", "8ff73c1d0f4f0a190f889ad7f1ecccf709c25929", "d1bfb6a9182e5712d8aef46b2fe93ef4ad4fe705", "10f4d6b419365b75d63f24c8e71a3440ed5b5dc3", "54966a5ac5a2aa19760fb5197889fa9dcccac1d1", "a86f388811dcd726239b01b317389743e5896e50", "4244d3340304b114e5c00e7b5797d2338a5c2b82", "99f1a82f6439ae8280327f7a378512ec1488cd21", "04ef0b28534cdac18a2059f73ecfe940d6bed277", "cc38942825d3a2c9ee8583c153d2c56c607e61a7", "826f9e286eb0c9165c04bc5811aa7793050c7666", "b5a03ac5125ea25a9aab05577a382c740420ee47", "11fa5abb5d5d09efbf9dacae6a6ceb9b2647f877", "6a1fd51107770edbdd832a1934ff5461e891f2e1", "66aaa719c84c5ab833eb9cd8a8566d9a110ffc48", "a8c138225b8f768fda9c796f65edfaf53f3bc867", "3d59a9d07295173bf8b578d0c8efe7322df5701b", "e7e9a228d1562ea991a31ecdd2c10163a95fe7d1", "7aa83aee1e8b2da7ec90c67e63161c24e85f4ba1", "01a078cd25b7ce1049efc07bd754980771150775", "239c06cd437832faa55a8e7292c50e45229a3d7c", "080d9658e40581c7ba8c0cc1d86d1157eda92a3e", "2f6baf1fc904f92834880264d6e9569284ad6487", "27b9e75bcaf9e12127f7181bcb7f1fcb105462c4", "c64502696438b4c9f9e12e64daaf7605f62ce3f0", "97946f13c1cf8924b0c1ce88682290ae87d630a1", "d8ce90d4905578ffdc6bd8665fa24d8fe90ef3f8", "7d7cfc8dc71967f93c2b5ec611747e63c06e1aa1", "6ca45b402a204351691c6f12a84cba3be1c5fd56", "7fd4e67938d02452e256c69822285778f95eb045", "027f769aed0cfcb3169ef60f182ce1decc0e99eb", "fea7c360842121780ec29f4cf0f65eeecd707336", "83984fecbf4dac1a6399c57e147015935d7c60a3", "6ae13c7dcd1d10d2dfe58546a49da09b0b471d68", "52bc0f02e34ed1e2ce1f77d8f07aea2b87813e89", "adb2d1e241933ef363bcf03d865a9219d2911780", "d116a2a8e59e337fc132978d4ae99549c3fd8e15", "5eb2cf87852d84575eafa38677eb50b9f79005d4", "597ed9b810d84abaa30cf0ce4ea8893cb44e33f1", "7663fc96495d699e041f462642a07ba994fb9352", "c165a5e05ececa9a4b4efd55c6fbfede8a06a8f7", "e360a722400cabfe2d5959503ed500125f529433", "26172460c2c47886f8b0e141c15de29c9766bfbe", "d596540ba9402bf8cf2ffd168446c15cec12d6fe", "00f1cccba86736cb6b6f39759ca6749f819252f0", "de1a5ce1abd2e82825a967b199e98480cfb8a909", "a6eb8cb1c35d0f53f8d2c9a404e374c01275544b", "c6338a96e14521930b8844afa64afc03bc9f6c54", "42fc724ef54deb40f05c521db69e2f3becf9fa3e", "75b1d8339085ab03f45c0316b976755b6c5da9e9", "e4ecf4c117cbb4dd1fbcfe31d5076af94a51eda1", "592b108b241d2d062c3035b6a5ba827180885bb7", "f9b8539d48d6350435ab5550fd47451e779d2466", "6a8e4947a72c9c3b9267994a5b8f657ea63deb7e", "bdb74f1b633b2c48d5e9d101e09bad2db8d68be6", "bf96a0f037e7472e4b6cb1dae192a5fedbbbd88a", "f46097e264c7b0e47c4b1d1b476e5e6c1db9cc30", "1f56d8442452c527140909d9f5b857b7eb7c997d", "8f828dc79d32a4a7b3d3791b32718564e104357a", "484c380c322b2b5cb5756c9e94608949fb5d5e4d", "ac57b04359818c17d416ee53ae05a5f126eca4db", "2199cb39adbf22b2161cd4f65662e4a152885bae", "706b1123217febf934ee5c33b4af27507a85771a", "34d44256d910861815373476371429a5f6f31c92", "4d1fc3245b05731a313e61165c1109f42f5b4a0c", "42afe5fd3f7b1d286a20e9306c6bc8624265f658", "dbf86aca1b92ffda84110864a7ce9fcfde9f5f52", "236942bb64f1711b4763424b2f795fb518c9d8d4", "1c65f3b3c70e1ea89114f955624d7adab620a013", "9fe2fed0bbcb9e41e058031b98e7247fea854bac", "66c989bd4cc3c9bb8d006d239664a82006ab0615", "b29f348e8675f75ff160ec65ebeeb3f3979b65d8", "10ecc6ca956a3268d94016e6ec040ba0b6304730", "75753c1b07cf3e368762a401d591dcec6a8e8a53", "8dd839f6bd403f5d155267e2740db293eca8b5f6", "73fa81d2b01c81c6ede71d046f9101440884e604", "16a431d87d0f01c6d70d2b7476dfb3948064b740", "511662e02373433c8c9e27d1425707069e3695b7", "2a2deb5dd30a71ab6366dc737359604a9c8f76d5", "867e709a298024a3c9777145e037e239385c0129", "15667845de2531b59736d866531728a771500d34", "4bc9a767d7e63c5b94614ebdc24a8775603b15c9", "dde24967490f58c8d10b2a00f12bf9103bd9b4a6", "90ddf36ded1624251acac0b900c42eed3fe37505", "9bd8fe15e5f6ac46e2815d1105f64aba1d8d76cb", "f28e1ef4a0126207a4ccc16b359a41ccfefd2bef", "f58ee95c2c4bdb1432e15d981dcbdb2038a55184", "0f2f1e6e23e4bb9f16ba969d50582e0064ac471c", "22f1b026bd78fdc2e945bcf88a6d69d44b484ec6", "fbc591cde7fb7beb985437a22466f9cf4b16f8b1", "b4c60fe73d1e788ebe0e24b0c8989e4fda110ac5", "30aff559ad25dd3490712749793547bc89b0f103", "aee92d9db9c313e321336e53a6097c57cc0853ab", "6c36965f46bb21a36f56a1ed2741b7bc4df55f8d", "79d899a672d4ff83f018276fb7fe6b1f4342e8b3", "0431b78553b0ac6c91bea31ed866255697c8308b", "cebda64dda05fd13e54221986182e9d08a31909d", "ed1ca78784cb4752be46e0fd867bddb92dd02ec8", "5145e42dc46845f3aeb8307452765ba8dc59d2da", "75aef130afb8c862575d457db6e168e8d77ae4f0", "0cf9f33546917e2d0edec03037f1a084d75f917b", "13ef55829b636d248dca450bf4dbd743ef269131", "173a1110e3f5fe6a5518d7ceb025730b073bad62", "0315c68902edca77d2c15cfc1f1335d55343c715", "70eeacf9f86ba08fceb3dd703cf015016dac1930", "536d1f74c6543afcf2bc711befd82ac7886d1c33", "7f0fadae16cc74b6176ba940aa2f8b5a0a67e09e", "48319e611f0daaa758ed5dcf5a6496b4c6ef45f2", "78f415961debaacdcbe7e4958cae28f7a922886f", "d92581c452e780710938cfbfa0f1ca2ffccc5d5e", "ec6b3d7ae26d241709df49734b4c1df9d2a27c48", "eae97767244ddb8bfdaa3b370ee0c00a3aecc8a5", "b36833aacc1ae72f9ccb9eee9d9623df19802fe0", "28f5db383ed771c40e9131229de707af844cf197", "013e9e0f712d8caa89dd0881ab8dcf90d687ba50", "500ddabbfa3bb1064b6250cdd3d5fe207f7aed67", "9945e46412ccf8a8d2f1d42bc3a5270ac71751f6", "51f9c71debee2ace4c9c29b78a01909f61875acb", "5d02f269e3b9764a3bf5d254a385fd61759a84a7", "c2af954c89972a716968f97a67cc3841290937d3", "e9dc096762f503cfe0d56066c02d27082665b3cf", "b61cf5daf24c5d66110cf0ec6c32ca39f252c0ae", "510ad7d606c928fba52425dc804fba33dd8ff265", "fdfc73fabb7fb570c4d4ac1e29f3cce889da7f91", "c32b5f8d400cdfd4459b0dfdeccf011744df0b4b", "f4e6b45d5b41b3b85728531d9bf0a7976a3b9238", "6997039127d9b262d4a9aa9467c4f4fa3d596085", "5327241cdbfcfa39abadb4753c7f3706bc24f94a", "fc3bc272dbd1ba62cb183db35133b0833b3330b2", "838ed2aae603dec5851ebf5e4bc64b54db7f34be", "0aa5e9d2ff47585dbad5711482a388028899454e", "c9f3a5fe33782dd486cb32d9667fba0514711f04", "377a1be5113f38297716c4bb951ebef7a93f949a", "1ddd4a651389b411c2bd847146143577cfca02c9", "3bd6621394ec33f5d48fc09278b497add44cf923", "0e95f68171b27621a39e393afb7c74ef1506fe85", "d0d58e1885db56bdaa3890a1cd32c6d6a42f5f49", "faed896f28281a77d6847534ece9c97a3036e75d", "1c754b6cd6f672a0adaa1fba07a551c4cd79a444", "8e74ec37f1fa24eb9f112c0ed7b6d2c6f20383c3", "17dd242e6d7afb5d7fafcf9f8e8b201573ce4b89", "ea3503e9dc74b30b4c98a89843fe2ea0dc9221ab", "683fbd7593cf5c22ef54004bb89c469eab2f656e", "3f3acbe919d9324b379b13a0ba54d0035f2b5035", "4871300f1e5a58ce920e6b5be14e89c5da4aa4c4", "d6cf3cab269877c58a16be011b74e07838d957c2", "6ef97af2cf7d3654e294565c5447f573ff9e3cb5", "6ca9f4dd586c0d038bd82d22a1e298d8ac00af8e", "8c522c293ffbb4d8f451789e3f05f5815bf40b92", "803803b5c2c61046d63674f85ecf0123f9d2c4b8", "7e3ea628b91cc32bda7da4c64195d510b9a7a617", "8c995bb3483feb3ef11a236f80f8a493387b5829", "f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe", "ac8bd8663c26d8cc5d73fc91dd269c38d437f47b", "9cd8a2d07f07d888fcf50aa0735d0831edcf5e46", "93ae6b140d7dab7fea958da3fe537b0c80334c30", "71524b02d3df6e017c92f7ccff73cc9b65ee2287", "18a9f3d855bd7728ed4f988675fa9405b5478845", "0d29ff45f000d2fde0a16db54ce7ce014408e4d8", "fbd338f653bbf348dd33eadac1f439be072c27e8", "53ebaaca2d09fa3d20b4aee6720781b1ff6fc6a3", "6ad5ac867c5ca56e0edaece153269d989b383b59", "00b1451fcfc14fc8109e8108a61819e821fc4ac9", "432326edbc598774315a0def91d1fc224d732922", "bcebb4ce6e4410a97e03cb2fc4d38aaf29e636e9", "5f26ab1b415e3cfa9d9f20cc93154939f3c28ebc", "bcf7fb98ab0137d8a8b8a952819f5e13ec4648aa", "b357576afb70465e47144aef96955b1e4b9cc1f7", "1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9", "bc14c01ac17f7b3527435a650503ca26f3c84aca", "8ad9c753276254d4a3cf177d5c911b21892561bb", "92f0e02c9f4e95098452d0fd78ba46cd6e7b1f6d", "d4ced2086ccd9259ade8fabdba14e0e4d9fc0c40", "505ea4493e4b733352c921401a96d92b4e6d4448", "5454a2d207adccd04e4c3b4f38520ccac8a21d7b", "04d9abdae728f09e1d1f78e36a5de551c3a690f5", "5746dcc8164ab95dcf8569bd4f37dec58e1112a3", "1575e8413e86570b142b6e268aa945c2514a2420", "162c0826c27517309b7e27b0d1970e34c094596d", "422e01be2612b69d5b567abc3b218a01c1cca754", "0e5e7e5e04aee1a7582268d4d28b7aa91490584a", "62d5c16760018b08e301a940434c3fc2e862c385", "a69d98f4b308406bbeda6ba9871ed19f4b220d1e", "12044354032fdee40405cb12e8bbebb6d073a768", "0cbc3221a07db517c30b9d6605cbe9d103e19955", "dc6b2c758c8fda9aa40138112fafbfa0ac50a193", "84dcd7d08f61fb1f8996bf92801589e07fb18965", "95a5cb872321addb28d5dc22ffad9586f113738a", "f8292dec6d60debe9d4da5004d5195a79255d5d5", "3d61557da178b6210cb67c3d8934afe1fa434830", "009bf86913f1c366d9391bf236867d84d12fa20c", "038277dbfcd767b0a0899de42d3277b5b253cc8e", "5ee42f0f79aa1394bf6d7f51ac9fcdd69ca66fb3", "d6e7217ecf6d163b087efc0dbb8b43de700bff93", "36c948efd76f58ff1a5e42a2b69fbdc04913f7c4", "c3b3636080b9931ac802e2dd28b7b684d6cf4f8b", "5c717afc5a9a8ccb1767d87b79851de8d3016294", "cdcdaf29cca23483ee187fb0c480f3a3e45d9d30", "8c62ce3db6d4147bcc45cba0e225f87a30878810", "c18ce1d706b54010f8e8cd3f87c3e5f0f5950901", "9b305e918be3401e79c6dc019cf1e1563fde9a31", "6268ad4bc516a41a30db566e2207079fc483212e", "47440f514318b438ebf04d9932f5dafdb488a536", "1b544dc335311da2314444e9c11c4940dcd4edec", "df61bc3b712493351c9ab065d67fe2276990e79b", "b7deb56d090747314d91d6d5197a0162b0c0e1d5", "185e88da3d3c0aa104ba14a6bb4f5254aff8e8eb", "585efe3c8efd1a4fa2ed8221c278997521668bc1", "7125b81253ce09c46cdccff465b6066d8550c80b", "10a98632ed618c23c58af93e17d90ef654b1845f", "4ce18536eec7917da848be6b5f783d3ee3d49677", "f884a67187929e7dda66091c13867ed0a8a36d01", "84eec311063320352b771b71156292148f25b0f3", "9040e6ed20a5e64885e59ea585373663c5551273", "2f1c3257ced690a0e1c38ddf7792e36b24fcc8fb", "1d3ad3f3a4336b51d3858cf8e05b3bb99730a16e", "b395e6a9f28c5acbf81a58599283753c033b9540", "26be80fcd89edd7d4a0c2aeec463d1aa74f5ff47", "11feb48d2c4c8f8a5ed9054d49e7a13b0f75f2af", "bce61a182c7a1028eed0c0f67e779753a86503c2", "6bc4642c2b8108067914abccea3f48e18aec5200", "b7fb19677b8f6af81278f2e005667c5c6b44a2a4", "234318640a05a7e30b84f1743f2b29d63ed109b7", "f822342fa7ff901149cc18878bd79ab78e01fa85", "c39e7d03648c398e73e84bec1939660459b7c7e6", "175d6e5f9d12a531966f8a2e2eb13e849ae1b38f", "5fd9c7f6efa9902b926eaa8d49b8e46abf3af33a", "599a84997acba7d31fdc139867097858b4a08b1b", "653f3d16a192f23769d277097e981f49b14289b8", "9cf1a5797c9e31734ae881384325cbfb8608bd7d", "bac17d63893e8025d2410828f7411092192a3abc", "cea85314294f9731661a419f627cb99331ad9c50", "9be94fa0330dd493f127d51e4ef7f9fd64613cfc", "5cbb641f3e4a9543912b5a484c38cb89ba1212db", "47ecc0924c2a17a6664d9ff6c31e2b9b6e490294", "06834da1a4cd8df213db142501a72000912b8b0f", "ca3b5c52481b0a7fbe4f8a753376ec073ebedea8", "054953d915f65b66485b653cd2ffbf61568b2849", "439da29cf857151f386e6af488b2d60c098c4fd8", "f04de7248f26ac959c31aedc7bcc475253585eb5", "d8c0f6ae4c23ecba187abdd217e0c10689bbf63d", "ff5c698e1f451c7e6fc4f036fb79ba6ff899285f", "f64d17d32ba3ab1277ff4edf025503fc3ff87c14", "9f259c58034279e73af473f1f47d3ae3dadaf599", "afaa607aa9ad0e9dad0ce2fe5b031eb4e525cbd8", "f42234d1efc179079e6cc6a26667fc942ed61911", "640aa9d6b87d893d1a75e3c49067b9ca1a2babe6", "14d00c5949bad75f888a4b8eb0f9385d71fe0be8", "034050422f90938a43e9cfd292187aef124fef61", "b79873ccb1c03375f58f577c98d5bd7a371da4e9", "ed9de242a23ad546902e1d5ec022dbb029cc2282", "e0e71b59a34c97d15e5ff148fb9a43b892d45bd5", "9ea0785c52b24a9a18a8d415d4f0fc3af9bc622a", "549afb73666202ec3c02a59de611387f723c1cf9", "00edd45d8f4fd75fc329d6a6fcc7d87108baa3a9", "d3a1322c988b50049986365c27dcfce42828d2ca", "b16ff1331f961b2067c9464c491b7cbe90694758", "f403f1e407ec2dfbe8481188c17ca39455996c99", "034516f37171e7e6cffb8afa84c1f5d6d12d887f", "01d71a82d29912e68cf5d86720da8d7187e209a3", "e7120dde20d8518413180e8e635cc1dabc5e8072", "2f3f4e0c8a9c63e714a10a6711c67f5e84e4c7c1", "ae6c9610297186e0e1d4347a2d203fe5f86dd42b", "67faf6163bc7a9b29148add1950afe928719b361", "9841df3cc4dc89379039092816ef19af949257a8", "0edc70f3b5550f997d9011c6d4860feec136cea9", "34b124ecdc3471167cea1675a74a0232a881bc69", "03df507b31691baeb7343d3eb70d048943e2d4f4", "0dc34e186e8680336e88c3b5e73cde911a8774b8", "0f92f0cf1fb1d37f7f723892976ca61419768995", "5aa8c64b00ec1d8f64da8deb4d3293874ef9a863", "7189d5584416ef2a39d6ab16929dfecdddc10081", "3fa9bf4649ff5e0d63ee20a546e8814f3a93ca4d", "afdbbc5c84eb4e535c7c478b5227c0138b57af64", "8173c811950c1c357253b3ed7c9afbb21ce6874a", "380b8df0f340e5bbc3a953c62f9bc573ce073b92", "4cf68a0b1a3f49393a8c11f3a18cccc7912b8424", "04af8a7b2a6cac581246622c1d89ac1ba0d769ce", "15acfd430b06f32d17c27be13e7a74bd0c09c0f7", "545b95c768fd4c7b0f2a957359fa00acf97066fe", "bff354d05823c83215183c8824faefbc093de011", "301897fb2020b8c414eb1d93a0ec8fda4123476c", "412ab6f96f0dd928e158d07aefb391926b3d1a82", "7a3676dcf55e22c7249eac7615174309617c8246", "29445acb03961fb27ac9221875c0a25171502144", "98fb18a3079087cc1370bc3c63aa73281c632df3", "0e790be2a341556d80ee34eb8abec966a2018c5f", "5b3d5d1e23298bc58d8cdf9f30b998cede5611c5", "70502590316b61180fdee7ac21f740d1c836c929", "e1e60501677ae67c6a682bac2c17e4fc904ee380", "76e834df333586fa9906afbdabb9a33bef98a56b", "fbb6ee4f736519f7231830a8e337b263e91f06fe", "c56bf47e288e2e0b47bb0d49c0f1c4dd76511614", "d90026a9ca2489707aff2807617f3782f78097be", "a9f03e4bb90addab234423994bfd8c25854484ea", "0f5830844658e0745e6703cd08d65c033a97e96c", "282cee05661a690aa525f21b47c6ee39fb26a7c2", "6a41ba9db0affa701ea125e09a2fe7eb583e3ac9", "dc7203d64a985b86f2f44bf064220801ef279382", "693e0da15094071de5eebd2f36f8b4023f91f161", "62692164113ccd9badd188a225318855c4f01898", "a5173a67c7f32582739849cfa5e07278ee6746ab", "c59aa0580d4d81e38463b87e0cb99358e99bde99", "a7790555c65be0fc5b5de9bcb1dc550f4919ce3f", "35b9f09ed66955765dc7703e9cada605948c71d0", "70fa17afd5a2f5953032ed1ecb66ca0c3e11b150", "403d6a09c17268fb4bb0ae953107bf5f78ca9d05", "541a82917883657a9b9c069559e2d0b890904dfc", "17c0094c68d6efd19b80287c51d228fa50750f46", "563d507c86cf72c6c9745aca7393eb84845ae125", "9a15bb1a3b2f92ae0f6f5a6556f52077909fd736", "101c7bfc56091b627886636afcf1103c1cecccf6", "9c85d54a9f6c6a522a267bfdf375251947caef5e", "c599e49afcfc0aa4910cec58b5fc1198153514bc", "2debdb6a772312788251cc3bd1cb7cc8a6072214", "1fe74d637bc5e7d95abcd18b6967e51461fd8cdd", "67e00f7e928e6eab0faf1917252778b36bf64e39", "626a38a32e2255e5bef98880ebbddf6994840e9e", "383faf929ca0cd928ace9e11970604bd03883310", "38e7f3fe450b126367ec358be9b4cc04e82fa8c7", "258616ecf878701824548dec916a8b5094e0744a", "91bdc706ad1d7b246e457870a7eb8caff87ec05a", "a6f82935311e5db41f262ce010ab71632ae886ac", "5b27cb504e61687fc2f59d975b3593dc741050e4", "4cfae149d6acd8cffc12c06ed796f1f84dce0e73", "a9bd9298839bf239bb7ef891d2ffdb2a81902574", "c592b16d04b27ad9a7d676f28a6a2b0649ceeb94", "64c1f5a833efbf6d520a529384014e2ea6b6d113", "822c7bfebcc456e3598304f69eb8f4a2aee46f02", "529e2ce6fb362bfce02d6d9a9e5de635bde81191", "c4d7c6f4b294e26e619b5ea5c90b4f9136a466e5", "4cf74211e635c73ca5816199ef33d10c3462beae", "c258c23d30af319d363ce6d229c664ae08bc86d7", "0c5b32acd045996716f66e327a2d39423446a220", "a2afcabf3b3f4bbbb84c972bdf10f061350334db", "73a7ccf0facccd8943f7e54d19478f2bef9b7dab", "a84d0b681bbb6c42fccb26036af689d682a76b4f", "2431e8f4c01581e51a480f822971b81470979c45", "a30e987e9909a4e307c35809275cf80431211f22", "77052654a37b88719c014c5afd3db89cb2288aeb", "6eaf739f30ebbbe827e5fed3bdb7aa73d1241b19", "f03160078980321466e57fe4c5adba1f2c87ac61", "14811696e75ce09fd84b75fdd0569c241ae02f12", "23c9fe37fa0474967be4cc6c7a310dcc87b86b72", "6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8", "41b1069c06735a20f9b4281001285ee2167da309", "774c8945ccf0f5315482abb8cf84ac5d37c60aa0", "b7207c142b0b9f4def3ae7cd07ce50ca31d930e8", "9e7646b7e9e89be525cda1385cc1351cc28a896e", "e24d6b40e956e11e5f0a972dca6e8a3a337d502e", "b7a827bb393361c309fbba652967dee11d16857c", "bb7c5a521607a02e7a291dca7fc33b595c3b7bff", "e00c26e3d16a44baf7be389e94ed0025a0ea3867", "47bd6c1d7da596d3cf79f06ec0de816d10f11beb", "44ea7adc1182c1a95d7a7d91f9845065bd7f99ee", "47fdd1579f732dd6389f9342027560e385853180", "d487142a7b8c84cb47129d91d5837345ccaa88eb", "f99a0d944c3b8041332f3d0ff67f984e29255c30", "369634f497852e05d5e72b12874e2a3db2d3945f", "b6868c44202cd57b2ca7e2b9d654f0d9fdb3b61f", "4a7c76098adcabecd66bb5059a8878d1b0da68de", "ec6cef66d5548514c2df6dc5d365793deb8958d5", "c8da81fb5551941295ad815051d39dc461008751", "8d156f3b4f1ad5d041ae9f50a0b879e25c80749e", "ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c", "8bbdafe007c9e153bac5f3a2e2d898e0a23b95e2", "8139b28693afabfbbd1c5862048888d8e8d77406", "e24ab3b2a7b5938a48ea3c8c4bc29be2b02299fb", "83dc0675cf5ff9f9a4e07e4e20a6bd9642985949", "df58cf3fe7502a91a7d319be11680ee5b1c78e6d", "9947687ffe0bd2d6cd4fe717e534cfcb59302a4e", "88edb5a15fdb10fca4e8275016726210f1953421", "5f92de3683b4fee28ad3f431c889e7c8bff604f8", "3ea1057e765431733532e7b70e6929c75b977aa1", "39674d06b0df1df8d29f7a68d6588d5a94f1414c", "2c86351363c59d0f39f9f0296b149aee50047ee8", "446f003afd16aa932aa87c73543348f62eba0e67", "ce4a7531113d193f14fdea87d59cfd965ff8a845", "1efd06c3ad14dd94c3f7c10e8b54fcf4ce5201a8", "95f26d1c80217706c00b6b4b605a448032b93b75", "1a7db5776181155f90bba4df97cf3206ce0271a9", "80c8d143e7f61761f39baec5b6dfb8faeb814be9", "14a01628169a3a060b6af5d5dcdeeb584b648abf", "3e5917055297a6e9cbb5986fdbc0c345602b02e6", "829a9912d0cfaba106d1d594caf8aa97f9ffb318", "9f42dd958afc027d42843945623d8e0ddf4185d1", "1c7e1248ce254b3a9a0b6fef9e37d37620fc8aa3", "9ac7bb9be33f41d02754bc33a39974496ead0b27", "b12deb63a8964b3d5cd6a95f0b4860bbb377f630", "00091891790ee77816ebd785d25900254e6986bd", "67a56dd94906a5460c263e1a1b87fa3a52c4b453", "f67f33f0d739310bcf21ce79098041ffa5ffd6ac", "354650dd4057808bb8e6f080186ab9b85702b263", "ba1d0d2cde169bfef82dc271b580ed6688982e7c", "2596b967f0768b64a12e53b24661ffe8f2ac4d1b", "66a153048f20a04ea46c0bf442be3eb4d56a2d30", "9c75db3d0abcccef5113db7bafe2fe5ab0559dab", "13f74a94d492919a1ff13af16e2df2ab1bedf04a", "26c8ae944746d91256a8b9c283030b3b34a2c929", "220d62414053519f7b9a6aecb4aa9f775014c98c", "240c11a9699b95e69ea6cfa15f5c8de6dba4f09e", "7ed5dca8725d59714d61ef8e1a14cc4b71c56d3f", "c526f3e27c7d8ed5e07cf57ab378f17e1c548ebe", "d462d514c0a177eb82aec8175bf431189218e393", "034b8adb06a8ea330581ed8b733166c18034c54c", "89ee33b78797c0d6219d31200424f88ba8fbecfa", "75085220b42ea4a0ce5f269fd64c5414cb423e40", "1063708776b817751c2054e34a33efc9ca39c41f", "612215d2a81b254e92e5632e144c647e52568efc", "c8eadd001e5c0474ed02428e79e8d2071151bd1c", "b4b01120b61a00ef13f8666b0cd93d38a57d2b58", "4f81f0c0019862046710d70b6ea880f989949e9a", "c46bcb02f92612cf525fd84c6cc79b0638c2eac9", "1a257ff73b1dd95f905dbbce9bb233033d09e959", "eedd405b9c44da778ed3246ec3df2d5b26ca0f7f", "3073eff17368262d7c605bbcaf3b2fb015754d39", "8039c28bb693b872c732bad66eab34663df88f35", "6bcd9143215bb4f1bde742fdd0d3969671231831", "8ed32c8fad924736ebc6d99c5c319312ba1fa80b", "087a507075819e5b7ad886fad3097b23470f35f2", "d8c9c234cba6771f7d8f794f3202ead9a7786647", "841c226a543d810561ca2d3193c44dadc154f45b", "656b6133fd671f129fce0091a8dab39c97e604f2", "1c9a86d0b354e412da4b7c76fcd9cce716c8b09b", "52d349d2dbd86ae697cbfabb058b069b5d57ff84", "07c47160ed02e28164b90fb9659bf2881ffa79f2", "3ec98dabbcf440db746d4baf6550e44e12c5ee76", "bb80f32bebfa25c5999a43af19eae7cea8c183a6", "c9d272cc836ea5bbe946718d2a4f8b3d57a6c811", "06e768d74f076b251d53b0c86fc9910d7243bdc6", "0c17c42d71eacd2244e43fa55a8ed96607337cca", "e3e49e5807436d08c01cb953bfd845b274c4a85e", "72458e19f8561e74471449fb4cfd97c8b9b527e8", "4798e98fd4533648833c491b1ba289e370757371", "ffa4bbfc1981fb5c44b09fe22a38b91573814e11", "92f4d324150c8da468829d51d1df8f659752f5c4", "e000dd1aec1c7b1e9e781ec7ea66f2bde72faa5e", "a1be53dead395b2d83a4009bec76729fce95af83", "e30b70282854264cb8560da0352d8edbd2792439", "cb53c8a85d58ccb2635be5b7ff978ea6e8b78cde", "d92c08d8d10aef4cff8342f9251d0112e1c2fb45", "de48bb3a9974f6f1ed2aa36d066150015f9f8647", "0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae", "0480b458439069687ec41c90178ba7e9a056bcca", "26a5136ee4502500fb50cd5ade814aad45422771", "d8f7b26d25a026fe43487b6f77993e11b8b333e0", "d65bcbcddec932480c434f0ffa778e429cdd4ee7", "8de7c496c1dac3be5fa55de72867325153b119bd", "e845e4da77080e61ccb785c21b2bfeb6fafc3806", "bda61e9bcf02d02f61882790dbbdad8e4fed0986", "4b9b39bbdac95e24773789f1bb543149116cdc37", "978d9a5251028da5a23fd0aed8234ed22b4918c5", "3ad9f6c1d10a2d1e86c93a4182ee3b260a6f3edd", "fc69b65c7d03da6811689a2ca825c1bcf62cc7d9", "b3fa62a7028578be8d1f8eb0877c762a4d6639c1", "4a1d0719d360d2d4ac020ec75a63129bc9e5114e", "9c5bd5e27216f22b6dad96b04673e4ca63d0604c", "1dc07322715e093c560b30fdf1e168e58e9a9409", "3b448e114097439f02bed44bceb6c8f823ba5f51", "82766851c790a5225f3b932239e831e1b60f5ee7", "a19f927f79d94331914cf4bf77b4617d9cdb345e", "89efe00633f660b75ef32aa246b36efd85619da1", "af97b793a61ba6e2b02d0d29503b73b5bdc2150d", "ead6645a3b3794edda3c92926208a683008924ca", "9ded7bd7ee896de568cbb0281ec553c21de93131", "810ce6d1a3fbf6fd850b0ab75f4a86db02926dae", "c093506abe0d4a9fe2abc4f7bb0fe03cecaca4bd", "98aabc3bffe179cc5b6d8137493029338f470647", "3548415e23b536b9e41aa3d92c18880f38a1d80c", "f6cf220b8ef17e0a4bef0ff5aadc40eec9653159", "e1cc833f301c42579392f21335b70d0216b03ab4", "ddccc58c84de4ed2051f72f9870f3f958080307a", "568d65d0d8ff878ada581885e94eebb9ca96fa59", "61be827dcbc2f9e6e7828042e77bb13b11b321b4", "1665fe64f8439a1854595e2e73394517d44c35b4", "192723085945c1d44bdd47e516c716169c06b7c0", "677172a408cd8378261a96d888a5cebf68193842", "891938a103b72bbc9f277a8106acff72e7f9b6d5", "a010835842ac0e49eade395f056e1e33d45b6ea5", "8ee80acd51f30ec64e8f356764ed245478151ab4", "68003e92a41d12647806d477dd7d20e4dcde1354", "f07fc87ebee3d6644412cd14fe6dcdea7c7a9f26", "58192249cc6aa7b21ef32685f610585d5782544b", "dbee5b3340573d478244fb6ff4ddaa989dd11d50", "dfd94ec697ca1f16b5171e884626f014d2bcd754", "3d5b8127ce57279f9fd77d3a24d8034b485163a4", "1d9bd24e65345258259ee24332141e371c6e4868", "a61a3f7870f7cb0ff73fc9470b28ed94692efe74", "bef6449f2c508324774316e2989d58b1612fa49a", "ead955a336df22f4d5f9e467dab1fe87542d7f9d", "2eba2865c0ea8d228f5289b342dba4fa163fae17", "36b19e6bf2f0abc0387052436956a25b37488134", "fddca9e7d892a97073ada88eec39e03e44b8c46a", "c8af2aec7729fc64d01773a9f603ddadf8d88525", "a7beee421dc8f074338f32d7d8bd2c40a39176a2", "f5c83679b73ab59c2ada2b72610acdd63669b226", "13b8800b6ac35e80881e369961541cb6c110aba9", "12b3116aee4c8119f6d822bb32637ecc57704f3c", "aeb67c789430584e6ff34cf22af0d72b32c83ab4", "fdc60fe4654b5efe0752acabef0ec6258062be0f", "6f25be3ed3b3f872c373b151713b0a0992053c90", "aaec8141d57d29aa3cedf1baec9633180ddb7a3d", "7289a44c2714c75eed6bff46d9bdd399bc6f8ac0", "2cc0e431d7cc0bcb926b9a19e7be8a3592d670d4", "c9fbebe22206c7501948ed6646154a377f24b6d9", "705a24f4e1766a44bbba7cf335f74229ed443c7b", "2a4676ca1f805cd90aaf2b3193399f5ac2770987", "f7f6eb91b6ad95a3b47c0ec4ac02f323b98cfcfc", "16d7ca53474bf74cf242472a5cf7f427796fdd04", "fb1919aeaa26f2c559268c52deb68463ee58abee", "6fd3bafa25bf6d376bc9d1cc1311eb260d10d024", "2f7d3406a96a5f409872e13643463a4896d9a009", "3316312af29a7c2c22c9653e63d51564e264b6ee", "6626e883c82914d9fffc0761e2eddfdcd3ffd96d", "612b8eda338fcde9400ea93779741282fe4132d6", "0957189f98fef283b05a672177465867600dc5a6", "0d1b8977d9390470577fb595fffccf7c1aba06f5", "146b852de824ac9e105175ec0d82346e9f9cf083", "390ab26ec93606a835ed0a0ab501cf2375c8cbac", "9b7c922d00a6bebc60607168ebbda2ebdc703db7", "2b5ec4532fbd88f730e542d4bd5ad6dbe163e868", "ff4e8a8333e4ef506318160248c068250963806d", "66fb1e7a65abbfa171a3fd92dc67006490df7450", "846a25af091b5a7dc0b643d826455a80cabdfbf6", "cd5dbf0a6bbd6be291b53f17ae77656ae9c4bb7c", "719eb145ba64d701b6de2f69608be2e70e300204", "5236d6d60cdc0fbdb3caf78bccecca5d92b3d67f", "a1af05502eac70296ee22e5ab7e066420f5fe447", "aaba2a04c025f12f839ac71fb248da0dd6985d58", "2608a2499819053468f4e6f77a715c2dbfefdfb0", "1d8b85d7fadc62099eb15c4462379b2167538a7a", "3cf4bf6068da630fb4e4433f355b776f4aa4b8d3", "4e4ba3783e7fe7dcf4a3b4de1fe1d5b603029f3a", "021b38f989a7140e17babc194967dcc7ab4809ab", "c6d6193c8f611331c8178c3857f9ef92607a4507", "3ac173183f968a7f042d0b779009ded2af76d0fd", "18783a40a99f36b3bc618be6ebf06923fd63efab", "e7b81c3d22fd9735d807368dad28bc2411d8371e", "45e9b5a7dba2f757567324fe35c2f2db87b015cc", "90a46cf5ca0f13154864aeefe3e8e30e9fde754c", "d690b0fc5a9f0b033963afd92e70402ddf167839", "6637404f65c0896d5b52265bcd4fa5433fea1db1", "084bd219dd239dc4c9a02621a5333d3bc1446566", "f52a5e4e21daf4156bb0eafaadd77865e56a9aa5", "810f5606a4769fc3dd99611acf805596fb79223d", "82224858677af47b8c836df701eeea8fffaec924", "63d595ca536bb42b4d79421557c927c3289e1f76", "6eafe3fd654a18ad8fc15b2351c4921839587374", "9c6dfd3a38374399d998d5a130ffc2864c37f554", "c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8", "aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8", "7e18b5f5b678aebc8df6246716bf63ea5d8d714e", "67386772c289cd40db343bdc4cb8cb4f58271df2", "bba281fe9c309afe4e5cc7d61d7cff1413b29558", "4d58f886f5150b2d5e48fd1b5a49e09799bf895d", "4196e0b77f88ea01cd868c535befb52c2722454f", "e295f31df11ec700851c2413b9bba644a91b0629", "4f6e1fe403b13279cd4674615d6d07ce002c9dec", "20ac41b5bc3b0697e6de097797bc95478751eb25", "eb6243b1c9506f9450dab2a09db9c17fc2c2d364", "41c1b8f319e27be0c77c3b33cf877c29b1676501", "ec0758185c9d565de715fa7cc03e93809284d9ac", "c7822b90a04f25e06d2fbed6b72a5ff89ff22c41", "61be9e8b1f2d642eb0b91a6097fe1c50c37a285c", "cb522158aa3c91fda3089d152b0005605056852b", "c5b623805ad8a2ef49ccb6e9b08d394c83061946", "db861322aa584dcbd71c3d70660edfdc4464e0d9", "052bd24b03bd2acddf0aebae8111d4d2021c56b9", "8408f4b1193e8db25fec818a989d9fe3194d5ea6", "ef9081d153f96b96183666a5086c63cecf2f33e6", "1e2844dfdaabaf555fe6fa3162cfe454a4eeb3f6", "846f3857976ba437e0592a848e47f6a3370880a3", "b1a6a0fdb4c12d7e083bd4d828e0c469dec53205", "851ff9fe367ae63e9d46e4f0560d283da81b361a", "9c731b820c495904a6f7d255d7e6a3bf9e5fc365", "86f8dd2a96e1ef9574af1c4cf93f70227f7bfd59", "d2a5b9b8f02f39f7d9ef48d234ec61f4ddc6c291", "244a6d4f5f745f8c2a58a6a70d7ba2b91300c118", "93a819b07ec0e76cc2de9f05f41f148cbaa410f8", "96a7f2faf4baa09184deb458a03146805d62beed", "af44049d138b4f0a3cd4664dc6e7ecde04a754aa", "bff9d100e99dd6a99ec26ca867694075b1dcac92", "da7ffe21508ad8d6dd9de7da378e184cb43a56c8", "c05441dd1bc418fb912a6fafa84c0659a6850bf0", "61f1b14f04d2fa1d8a556adbdf93050b4637f44b", "d2b3166b8a6a3e6e7bc116257e718e4fe94a0638", "ec1a57e609eda72b4eb60155fac12db1da31f6c0", "d03baf17dff5177d07d94f05f5791779adf3cd5f", "e99665790f3d2f19a25cdad4d07226f339f2522b", "d5c66a48bc0a324750db3d295803f47f6060043d", "a119844792fd9157dec87e3937685c8319cac62f", "0d5824e14593bcb349d636d255ba274f98bbb88f", "ccbfc004e29b3aceea091056b0ec536e8ea7c47e", "9e1c3b8b1653337094c1b9dba389e8533bc885b0", "d44d911c045a6df610cb4103f1ab09827fab8296", "34863ecc50722f0972e23ec117f80afcfe1411a9", "4686df20f0ee40cd411e4b43860ef56de5531d9e", "0f4cfcaca8d61b1f895aa8c508d34ad89456948e", "eccd9acba3f6a605053dbde7f0890836e52aa085", "20a16efb03c366fa4180659c2b2a0c5024c679da", "98856ab9dc0eab6dccde514ab50c823684f0855c", "f437b3884a9e5fab66740ca2a6f1f3a5724385ea", "40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd", "3cd22b5b81a0172d608ff14be71b755d1f68c201", "b3f7c772acc8bc42291e09f7a2b081024a172564", "8fed5ea3b69ea441a8b02f61473eafee25fb2374", "5748652924084b7b0220cddcd28f6b2222004359", "9d357bbf014289fb5f64183c32aa64dc0bd9f454", "ea5c9d5438cde6d907431c28c2f1f35e02b64b33", "27aa23d7a05368a6b5e3d95627f9bab34284e5c4", "301b0da87027d6472b98361729faecf6e1d5e5f6", "60a20d5023f2bcc241eb9e187b4ddece695c2b9b", "fafe69a00565895c7d57ad09ef44ce9ddd5a6caa", "3fc173805ed43602eebb7f64eea4d60c0386c612", "ce9a61bcba6decba72f91497085807bface02daf", "e6c834c816b5366875cf3060ccc20e16f19a9fc6", "ba931c3f90dd40a5db4301a8f0c71779a23043d6", "ecfa56b38ac2b58428d59c9b630b1437a9ff8278", "171389529df11cc5a8b1fbbe659813f8c3be024d", "e9b0a27018c7151016a9fe01c98b4c21d6ebf4be", "06ab24721d7117974a6039eb2e57d1545eee5e46", "0553c6b9ee3f7d24f80e204d758c94a9d6b375d2", "919cb6160db66a8fe0b84cb7f171aded48a13632", "1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab", "cdfa7dccbc9e9d466f8a5847004973a33c7fcc89", "fde0180735699ea31f6c001c71eae507848b190f", "93cd5c47e4a3425d23e3db32c6eaef53745bb32e", "050e51268b0fb03033428ac777ccfef2db752ab3", "7f6599e674a33ed64549cd512ad75bdbd28c7f6c", "7bfe085c10761f5b0cc7f907bdafe1ff577223e0", "29a5d38390857e234c111f8bb787724c08f39110", "57a1466c5985fe7594a91d46588d969007210581", "1791f790b99471fc48b7e9ec361dc505955ea8b1", "fd9ab411dc6258763c95b7741e3d51adf5504040", "9513503867b29b10223f17c86e47034371b6eb4f", "d0ad7324fab174609f26c617869fa328960617e2", "cf7a4442a6aad0e08d4aade8ec379c44f84bca8a", "4d0ef449de476631a8d107c8ec225628a67c87f9", "b784bb1d2b2720dac8d4b92851a8d6360c35b0b2", "03dba79518434ba4a937b2980fbdc8bafc048b36", "1a41831a3d7b0e0df688fb6d4f861176cef97136", "014143aa16604ec3f334c1407ceaa496d2ed726e", "a52581a7b48138d7124afc7ccfcf8ec3b48359d0", "5f871838710a6b408cf647aacb3b198983719c31", "6bcee7dba5ed67b3f9926d2ae49f9a54dee64643", "f2ad9b43bac8c2bae9dea694f6a4e44c760e63da", "c79cf7f61441195404472102114bcf079a72138a", "c4b00e86841db3fced2a5d8ac65f80d0d3bbe352", "ad7b6d2e8d66f720cc83323a0700c25006d49609", "0d7f770c3b6857d5ef5dfe5f1b23e69f4a575fd3", "4d7bbaa2c7e89d5ba6940ee5804cf10a6b24d6ec", "7d7870b7633678db2d39d4a5d69d10337ca827d9", "a8e7561ada380f2f50211c67fc45c3b3dea96bdb", "391b273af237b69ebbdfacb8e33b8e873421c780", "cb5cda13a4ccbc32ce912d51e402363c1b501b32", "6f68c49106b66a5bd71ba118273b4c5c64b6619f", "1ecb56e7c06a380b3ce582af3a629f6ef0104457", "539287d8967cdeb3ef60d60157ee93e8724efcac", "2450c618cca4cbd9b8cdbdb05bb57d67e63069b1", "e3bb83684817c7815f5005561a85c23942b1f46b", "55079a93b7d1eb789193d7fcdcf614e6829fad0f", "42ecfc3221c2e1377e6ff849afb705ecd056b6ff", "6bca0d1f46b0f7546ad4846e89b6b842d538ee4e", "a0e7f8771c7d83e502d52c276748a33bae3d5f81", "5397c34a5e396658fa57e3ca0065a2878c3cced7", "25c3068e7964d3b894916a82b1fa93c9d6792886", "3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5", "5e7cb894307f36651bdd055a85fdf1e182b7db30", "19868a469dc25ee0db00947e06c804b88ea94fd0", "27b451abfe321a696c852215bb7efb4c2e50c89f", "fd38163654a0551ed7f4e442851508106e6105d9", "2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce", "9d61b0beb3c5903fc3032655dc0fd834ec0b2af3", "e013c650c7c6b480a1b692bedb663947cd9d260f", "404042a1dcfde338cf24bc2742c57c0fb1f48359", "f75852386e563ca580a48b18420e446be45fcf8d", "0294f992f8dfd8748703f953925f9aee14e1b2a2", "4aea1213bdb5aa6c74b99fca1afc72d8a99503c6", "a0beb0cc6f167373f8b4b7458ff0ec42fc290a75", "c92da368a6a886211dc759fe7b1b777a64d8b682", "8ed051be31309a71b75e584bc812b71a0344a019", "91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0", "5d0f72174e9ca1d620227b53ab1bbd8263fb4a9e", "b54c477885d53a27039c81f028e710ca54c83f11", "d43b6ca9257e9b24f89eb3867f2c04068a78c778", "adad7446e371d27fdaee39475856e2058f3045e5", "3d0ef9bfd08a9252db6acfece3b83f3aa58b4cae", "fac5a9a18157962cff38df6d4ae69f8a7da1cfa8", "2e0e056ed5927a4dc6e5c633715beb762628aeb0", "60d765f2c0a1a674b68bee845f6c02741a49b44e", "030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f", "652aac54a3caf6570b1c10c993a5af7fa2ef31ff", "69063f7e0a60ad6ce16a877bc8f11b59e5f7348e", "b5930275813a7e7a1510035a58dd7ba7612943bc", "fe97d46c34630d14235132a95fb2d2ed7b2c4663", "de15af84b1257211a11889b6c2adf0a2bcf59b42", "18e54b74ed1f3c02b7569f53a7d930d72fc329f5", "6e91be2ad74cf7c5969314b2327b513532b1be09", "4967b0acc50995aa4b28e576c404dc85fefb0601", "8990f8ea6441f97597429686542b9cdc46ed47de", "75e5a1a64d9d27dbb054fc8b8d47f0e23cbbbfa4", "8562b4f63e49847692b8cb31ef0bdec416b9a87a", "ad6cc071b2585e4bdb6233b7ad8d63e12538537d", "d7dd35a86117e46d24914ef49ccd99ea0a7bf705", "4f03ba35440436cfa06a2ed2a571fea01cb36598", "6af35225cfd744b79577c126e553f549e5b5cdcc", "8c13f2900264b5cf65591e65f11e3f4a35408b48", "ff0617d750fa49416514c1363824b8f61baf8fb5", "9b000ccc04a2605f6aab867097ebf7001a52b459", "266ee26a6115f1521ce374e4ab106d997c7b1407", "27c9ddb72360f4cd0f715cd7ea82fa399af91f11", "2a02355c1155f2d2e0cf7a8e197e0d0075437b19", "0efdd82a4753a8309ff0a3c22106c570d8a84c20", "49394a5e0ca1d4bb77d8c9bfa963b8b8cb761ecf", "79617903c5cb56697f2e738e1463b9654e2d68ed", "49068538b7eef66b4254cc11914128097302fab8", "b856d8d6bff745bb1b4beb67e4b821fc20073840", "16fc82d44188eb49a151bd5836a29911b3bfabcb", "a3a34c1b876002e0393038fcf2bcb00821737105", "a72f0be803c9290923643660caf3bffec4ea3611", "0447bdb71490c24dd9c865e187824dee5813a676", "439647914236431c858535a2354988dde042ef4d", "4c1528bab3142ec957700ab502531e1a67e7f2f6", "0241513eeb4320d7848364e9a7ef134a69cbfd55", "5a87bc1eae2ec715a67db4603be3d1bb8e53ace2", "8f9f599c05a844206b1bd4947d0524234940803d", "007250c2dce81dd839a55f9108677b4f13f2640a", "b017963d83b3edf71e1673d7ffdec13a6d350a87", "0a29cee986471b495728b08756f135a2377d5a2a", "42350e28d11e33641775bef4c7b41a2c3437e4fd", "016a8ed8f6ba49bc669dbd44de4ff31a79963078", "63cff99eff0c38b633c8a3a2fec8269869f81850", "30188b836f2fa82209d7afbf0e4d0ee29c6b9a87", "a42209dbfe6d2005295d790456ddb2138302cbe5", "7c0a6824b556696ad7bdc6623d742687655852db", "44f65e3304bdde4be04823fd7ca770c1c05c2cef", "6d66c98009018ac1512047e6bdfb525c35683b16", "64d5772f44efe32eb24c9968a3085bc0786bfca7", "2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd", "3d0f9a3031bee4b89fab703ff1f1d6170493dc01", "923ede53b0842619831e94c7150e0fc4104e62f7", "9d06d43e883930ddb3aa6fe57c6a865425f28d44", "e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6", "309e17e6223e13b1f76b5b0eaa123b96ef22f51b", "e4df83b7424842ff5864c10fa55d38eae1c45fac", "cd596a2682d74bdfa7b7160dd070b598975e89d9", "d6a5eb4377e2a67420778eab61b5a89046307bae", "d72973a72b5d891a4c2d873daeb1bc274b48cddf", "55138c2b127ebdcc508503112bf1d1eeb5395604", "857ad04fca2740b016f0066b152bd1fa1171483f", "67c3c1194ee72c54bc011b5768e153a035068c43", "306957285fea4ce11a14641c3497d01b46095989", "39a19a687b3182054b30f36f627bc6875b09dbd3", "a63ec22e84106685c15c869aeb157aa48259e855", "f95321f4348cfacc52084aae2a19127d74426047", "fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef", "c1f07ec629be1c6fe562af0e34b04c54e238dcd1", "50333790dd98c052dfafe1f9bf7bf8b4fc9530ba", "bd70f832e133fb87bae82dfaa0ae9d1599e52e4b", "07c90e85ac0f74b977babe245dea0f0abcf177e3", "773ce00841a23d32727aa1f54c29865fefd4ce02", "952138ae6534fad573dca0e6b221cdf042a36412", "1c2724243b27a18a2302f12dea79d9a1d4460e35", "642c66df8d0085d97dc5179f735eed82abf110d0", "199c2df5f2847f685796c2523221c6436f022464", "e6f3707a75d760c8590292b54bc8a48582da2cd4", "c29e33fbd078d9a8ab7adbc74b03d4f830714cd0", "68d566ed4041a7519acb87753036610bd64dcc09", "7d306512b545df98243f87cb8173df83b4672b18", "0a297523188b03fdf9d2155bfdcca7e1bcab3762", "7ab7befcd319d55d26c1e4b7b9560da5763906f3", "dab795b562c7cc270c9099b925d685bea0abe82a", "297d3df0cf84d24f7efea44f87c090c7d9be4bed", "2ca43325a5dbde91af90bf850b83b0984587b3cc", "25b2811118ed73c64682544fe78023bb8242c709", "56e03f8fcd16332f764352ba6e72c9c5092cac0f", "ee5fe44871f5e36998a2fdfb20a511374cdd3877", "2b435ee691718d0b55d057d9be4c3dbb8a81526e", "89c51f73ec5ebd1c2a9000123deaf628acf3cdd8", "73d53a7c27716ae9a6d3484e78883545e53117ae", "c40c23e4afc81c8b119ea361e5582aa3adecb157", "a56b0f76919aabe8b768f5fbaeca412276365aa2", "48fea82b247641c79e1994f4ac24cad6b6275972", "b5fc4f9ad751c3784eaf740880a1db14843a85ba", "e5ea7295b89ef679e74919bf957f58d55ad49489", "39c8ed5213882d4dbc74332245ffe201882c5de1", "7fce5769a7d9c69248178989a99d1231daa4fce9", "6a5fe819d2b72b6ca6565a0de117c2b3be448b02", "b208f2fc776097e98b41a4ff71c18b393e0a0018", "e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056", "85041e48b51a2c498f22850ce7228df4e2263372", "7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f", "ee92d36d72075048a7c8b2af5cc1720c7bace6dd", "a3d747a72803de5200c39f88a2b7fd300474f89b", "d9072e6b7999bc2d5750eb58c67a643f38d176d6", "21b5af67618fcc047b495d2d5d7c2bf145753633", "bb69f750ccec9624f6dabd334251def2bbddf166", "c8829013bbfb19ccb731bd54c1a885c245b6c7d7", "1c4ceae745fe812d8251fda7aad03210448ae25e", "ddf577e8b7c86b1122c1bc90cba79f641d2b33fa", "a26379d9993073d51611588c36f12db2b4ecb39a", "1bb14ddc0326a8e5b44eafd915738c2b1342f392", "4b71d1ff7e589b94e0f97271c052699157e6dc4a", "8605e8f5d84b8325b1a81d968c296a5a5d741f31", "557115454c1b8e6eaf8dbb65122c5b00dc713d51", "846c028643e60fefc86bae13bebd27341b87c4d1", "2b8c5017633a82b15dbe0047cfc76ffdce462176", "7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a", "31a2fb63a3fc67da9932474cda078c9ac43f85c5", "1d30f813798c55ae4fe454829be6e2948ee841da", "41272037f6a7a4058f9c52843b9025b7b3cd13a9", "10bf35bf98cfe555dfc03b5f03f2769d330e3af9", "bbf01aa347982592b3e4c9e4f433e05d30e71305", "4309faac3248663ed56a6a841cac1855e302f090", "a03448488950ee5bf50e9e1d744129fbba066c50", "1c6be6874e150898d9db984dd546e9e85c85724e", "5ec94adc9e0f282597f943ea9f4502a2a34ecfc2", "1f89439524e87a6514f4fbe7ed34bda4fd1ce286", "9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5", "4e37cd250130c6fd60e066f0c8efb3cbb778c421", "ae96fc36c89e5c6c3c433c1163c25db1359e13ea", "1679943d22d60639b4670eba86665371295f52c3", "2251a88fbccb0228d6d846b60ac3eeabe468e0f1", "bfcdfc11f9a02a47bda6e571f1e5aa8a5264b4a8", "adb040081974369c46b943e9f75be4e405623102", "4e94e7412d180da5a646f6a360e75ba2128f93aa", "57f8e1f461ab25614f5fe51a83601710142f8e88", "09e7397fbcf4cc54ee085599a3b9bb72539ab251", "a803453edd2b4a85b29da74dcc551b3c53ff17f9", "f52efc206432a0cb860155c6d92c7bab962757de", "a2a42aa37641490213b2de9eb8e83f3dab75f5ed", "8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4", "1bc214c39536c940b12c3a2a6b78cafcbfddb59a", "864d50327a88d1ff588601bf14139299ced2356f", "19eb486dcfa1963c6404a9f146c378fc7ae3a1df", "292eba47ef77495d2613373642b8372d03f7062b", "dd8d53e67668067fd290eb500d7dfab5b6f730dd", "621ed006945e9438910b5aa4f6214888dea3d791", "df767f62a6bf3b09e6417d801726f2d5d642a202", "c5468665d98ce7349d38afb620adbf51757ab86f", "51a8dabe4dae157aeffa5e1790702d31368b9161", "7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b", "303517dfc327c3004ae866a6a340f16bab2ee3e3", "b58d381f9f953bfe24915246b65da872aa94f9aa", "00eccc565b64f34ad53bf67dfaf44ffa3645adff", "cd33b3ca8d7f00c1738c41b2071a3164ba42ea61", "2f78e471d2ec66057b7b718fab8bfd8e5183d8f4", "40cd062438c280c76110e7a3a0b2cf5ef675052c", "4cb8a691a15e050756640c0a35880cdd418e2b87", "9cd4f72d33d1cedc89870b4f4421d496aa702897", "df2494da8efa44d70c27abf23f73387318cf1ca8", "57b7325b8027745b130490c8f736445c407f4c4c", "f9d1f12070e5267afc60828002137af949ff1544", "89896474f007c99f5967bcc05a952654a3bbb736", "5dafab3c936763294257af73baf9fb3bb1696654", "a7e1327bd76945a315f2869bfae1ce55bb94d165", "11bda1f054effb3116115b0699d74abec3e93a4b", "2afdda6fb85732d830cea242c1ff84497cd5f3cb", "49570b41bd9574bd9c600e24b269d945c645b7bd", "c32c8bfadda8f44d40c6cd9058a4016ab1c27499", "d8b568392970b68794a55c090c4dd2d7f90909d2", "5f6fafa788bd1b25c3c462c4013fd8fc0049be74", "98519f3f615e7900578bc064a8fb4e5f429f3689", "789a43f51e0a3814327dab4299e4eda8165a5748", "e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24", "7753e3b9e158289cbaa22203166424ca9c229f68", "141eab5f7e164e4ef40dd7bc19df9c31bd200c5e", "2f348a2ad3ba390ee178d400be0f09a0479ae17b", "7fc76446d2b11fc0479df6e285723ceb4244d4ef", "3db75962857a602cae65f60f202d311eb4627b41", "050eda213ce29da7212db4e85f948b812a215660", "0a451fc7d2c6b3509d213c210ae880645edf90ed", "4c0846bcfa64d9e810802c5b7ef0f8b43523fe54", "0066caed1238de95a431d836d8e6e551b3cde391", "e03bda45248b4169e2a20cb9124ae60440cad2de", "3bc776eb1f4e2776f98189e17f0d5a78bb755ef4", "b3c398da38d529b907b0bac7ec586c81b851708f", "8dc9de0c7324d098b537639c8214543f55392a6b", "51dcb36a6c247189be4420562f19feb00c9487f8", "c0945953506a3d531331caf6c2b2a6d027e319f0", "35208eda874591eac70286441d19785726578946", "ce85d953086294d989c09ae5c41af795d098d5b2", "5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372", "72345fed8d068229e50f9ea694c4babfd23244a0", "7d53678ef6009a68009d62cd07c020706a2deac3", "a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc", "59dac8b460a89e03fa616749a08e6149708dcc3a", "a9881ae58987da71b4c1ce01ba213eb4be2eef02", "9b8830655d4a5a837e3ffe835d14d6d71932a4f2", "45215e330a4251801877070c85c81f42c2da60fb", "69526cdf6abbfc4bcd39616acde544568326d856", "e2f91b21f3755914c193a546ba8718acf81c845b", "20b405d658b7bb88d176653758384e2e3e367039", "fcbec158e6a4ace3d4311b26195482b8388f0ee9", "3a0ea368d7606030a94eb5527a12e6789f727994", "92fada7564d572b72fd3be09ea3c39373df3e27c", "0f1cbe4e26d584c82008ccef9fb1e4669b82de1f", "a54e0f2983e0b5af6eaafd4d3467b655a3de52f4", "ee744ea13a0bbeba5de85ca3c75c9749054835e7", "18166432309000d9a5873f989b39c72a682932f5", "ab989225a55a2ddcd3b60a99672e78e4373c0df1", "c833c2fb73decde1ad5b5432d16af9c7bee1c165", "ac1d97a465b7cc56204af5f2df0d54f819eef8a6", "d231a81b38fde73bdbf13cfec57d6652f8546c3c", "c207fd762728f3da4cddcfcf8bf19669809ab284", "7ed2c84fdfc7d658968221d78e745dfd1def6332", "d930ec59b87004fd172721f6684963e00137745f", "621f656fedda378ceaa9c0096ebb1556a42e5e0f", "3c47022955c3274250630b042b53d3de2df8eeda", "d3424761e06a8f5f3c1f042f1f1163a469872129", "0b5bd3ce90bf732801642b9f55a781e7de7fdde0", "c466ad258d6262c8ce7796681f564fec9c2b143d", "9fd1b8abbad25cb38f0c009288fb5db0fc862db6", "a07f78124f83eef1ed3a6f54ba982664ae7ca82a", "4e4e8fc9bbee816e5c751d13f0d9218380d74b8f", "e084b0e477ee07d78c32c3696ea22c94f5fdfbec", "3ebce6710135d1f9b652815e59323858a7c60025", "cbfcd1ec8aa30e31faf205c73d350d447704afee", "4159663f0b292fd8cc7411929be9d669bb98b386", "4215b34597d8ce1e8985afa8043400caf0ec7230", "a200885bf6bfa0493d85e7617e65cdabe30a2dab", "3f5cf3771446da44d48f1d5ca2121c52975bb3d3", "d40cd10f0f3e64fd9b0c2728089e10e72bea9616", "e94168c35be1d4b4d2aaf42ef892e64a3874ed8c", "12c713166c46ac87f452e0ae383d04fb44fe4eb2", "f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e", "4d4736173a5e72c266e52f3a43bdcb2b58f237a2", "6c27eccf8c4b22510395baf9f0d0acc3ee547862", "7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d", "71f9861df104b90399dc15e12bbb14cd03f16e0b", "27ee8482c376ef282d5eb2e673ab042f5ded99d7", "8210fd10ef1de44265632589f8fc28bc439a57e6", "daba8f0717f3f47c272f018d0a466a205eba6395", "b16580d27bbf4e17053f2f91bc1d0be12045e00b", "b4c3743b10fffd2d44465f29a78c136e623295ff", "6e1802874ead801a7e1072aa870681aa2f555f35", "35f084ddee49072fdb6e0e2e6344ce50c02457ef", "ecac3da2ff8bc2ba55981467f7fdea9de80e2092", "b7f05d0771da64192f73bdb2535925b0e238d233", "8f8a5be9dc16d73664285a29993af7dc6a598c83", "fe5d6c65e51386f4d36f7434fe6fcd9494fe9361", "cfd8c66e71e98410f564babeb1c5fd6f77182c55", "c74b1643a108939c6ba42ae4de55cb05b2191be5", "153c8715f491272b06dc93add038fae62846f498", "f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0", "e4391993f5270bdbc621b8d01702f626fba36fc2", "e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f", "4b8c736524d548472d0725c971ee29240ae683f6", "396a19e29853f31736ca171a3f40c506ef418a9f", "44d93039eec244083ac7c46577b9446b3a071f3e", "aebb9649bc38e878baef082b518fa68f5cda23a5", "b5efe2e53aa417367314c1a907d0fe8053c71ecd", "88c21e06ed44da518a7e346fce416efedc771704", "caaa6e8e83abb97c78ff9b813b849d5ab56b5050", "0c7f27d23a162d4f3896325d147f412c40160b52", "a9adb6dcccab2d45828e11a6f152530ba8066de6", "b88d5e12089f6f598b8c72ebeffefc102cad1fc0", "db67edbaeb78e1dd734784cfaaa720ba86ceb6d2", "4f773c8e7ca98ece9894ba3a22823127a70c6e6c", "7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7", "d1ee9e63c8826a39d75fa32711fddbcc58d5161a", "16b0c171fb094f677fcdf78bbb9aaef0d5404942", "2cac70f9c8140a12b6a55cef834a3d7504200b62", "26a89701f4d41806ce8dbc8ca00d901b68442d45", "d91a5589fd870bf62b7e4979d9d47e8acf6c655d", "06719154ab53d3a57041b2099167e3619f1677bc", "8acdc4be8274e5d189fb67b841c25debf5223840", "dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a", "27883967d3dac734c207074eed966e83afccb8c3", "21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13", "fac8cff9052fc5fab7d5ef114d1342daba5e4b82", "10ce3a4724557d47df8f768670bfdd5cd5738f95", "e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc", "795b555abb26e62ad89a93645122da530327c447", "c6a4b23ead2dab3d5dc02a5916d4c383f0c53007", "0532cbcf616f27e5f6a4054f818d4992b99d201d", "321bd4d5d80abb1bae675a48583f872af3919172", "b3e60bb5627312b72c99c5ef18aa41bcc1d21aea", "6459f1e67e1ea701b8f96177214583b0349ed964", "ad77056780328bdcc6b7a21bce4ddd49c49e2013", "b13a882e6168afc4058fe14cc075c7e41434f43e", "8d4bc5abfc0a89452ea21d78066e4e049fddfc79", "283d381c5c2ba243013b1c4f5e3b29eb906fa823", "35ec9b8811f2d755c7ad377bdc29741b55b09356", "8185be0689442db83813b49e215bf30870017459", "b55f256bbd2e1a41ce6bfcd892dee12f5bcd7cb3", "227b18fab568472bf14f9665cedfb95ed33e5fce", "548233d67f859491e50c5c343d7d77a7531d4221", "15e27f968458bf99dd34e402b900ac7b34b1d575", "82953e7b3d28ccd1534eedbb6de7984c59d38cd4", "b05943b05ef45e8ea8278e8f0870f23db5c83b23", "d4b88be6ce77164f5eea1ed2b16b985c0670463a", "05d80c59c6fcc4652cfc38ed63d4c13e2211d944", "3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f", "2d23fa205acca9c21e3e1a04674f1e5a9528550e", "8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff", "4f0d5cbcd30fef3978b9691c2e736daed2f841c1", "ff061f7e46a6213d15ac2eb2c49d9d3003612e49", "e8dda897372e6b4cf903234c7a9c40117711d8d8", "92c2dd6b3ac9227fce0a960093ca30678bceb364", "d983dda8b03ed60fa3afafe5c50f1d9a495f260b", "0b3f354e6796ef7416bf6dde9e0779b2fcfabed2", "8fb2ec3bbd862f680be05ef348b595e142463524", "4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c", "c1ff88493721af1940df0d00bcfeefaa14f1711f", "0874734e2af06883599ed449532a015738a1e779", "4ac4e8d17132f2d9812a0088594d262a9a0d339b", "11fdd940c9a23a34f7ab59809c26a02bce35c5f3", "814369f171337ee1d8809446b7dbfc5e1ef9f4b5", "46072f872eee3413f9d05482be6446f6b96b6c09", "e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f", "1e0add381031245b1d5129b482853ee738b498e1", "6f48e5e258da11e6ba45eeabe65a5698f17e58ef", "0be764800507d2e683b3fb6576086e37e56059d1", "6ab8f2081b1420a6214a6c127e5828c14979d414", "d7d166aee5369b79ea2d71a6edd73b7599597aaa", "4d01d78544ae0de3075304ff0efa51a077c903b7", "7f8cef6ba2f059e465b1b23057a6dbb23fba1c63", "f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf", "a8583e80a455507a0f146143abeb35e769d25e4e", "7701952e405c3d8a0947e2a309de281aa76bd3f4", "3dcebd4a1d66313dcd043f71162d677761b07a0d", "05184f01e66d7139530729b281da74db35a178d2", "05270b68547a2cd5bda302779cfc5dda876ae538", "b0f59b71f86f18495b9f4de7c5dbbebed4ae1607", "0db1207563a66343cc7cb7b54356c767fc8b876c", "f2e9494d0dca9fb6b274107032781d435a508de6", "06b4e41185734f70ce432fdb2b121a7eb01140af", "ccb95192001b07bb25fc924587f9682b0df3de8e", "0d8415a56660d3969449e77095be46ef0254a448", "fc45e44dd50915957e498186618f7a499953c6be", "c648d2394be3ff0c0ee5360787ff3777a3881b02", "8010636454316faf1a09202542af040ffd04fefa", "1eec03527703114d15e98ef9e55bee5d6eeba736", "3abe50d0a806a9f5a5626f60f590632a6d87f0c4", "e71c15f5650a59755619b2a62fa93ac922151fd6", "476f177b026830f7b31e94bdb23b7a415578f9a4", "e41246837c25d629ca0fad74643fb9eb8bf38009", "2331df8ca9f29320dd3a33ce68a539953fa87ff5", "ad339a5fdaab95f3c8aad83b60ceba8d76107fa2", "3ca6adc90aae5912baa376863807191ffd56b34e", "dcce3d7e8d59041e84fcdf4418702fb0f8e35043", "aba770a7c45e82b2f9de6ea2a12738722566a149", "0b02bfa5f3a238716a83aebceb0e75d22c549975", "4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99", "6f8cffd9904415c8fa3a1e650ac143867a04f40a", "e865908ed5e5d7469b412b081ca8abd738c72121", "40c1de7b1b0a087c590537df55ecd089c86e8bfc", "91e507d2d8375bf474f6ffa87788aa3e742333ce", "3aebaaf888cba25be25097173d0b3af73d9ce7f9", "70bf1769d2d5737fc82de72c24adbb7882d2effd", "4b507a161af8a7dd41e909798b9230f4ac779315", "097f674aa9e91135151c480734dda54af5bc4240", "0a79d0ba1a4876086e64fc0041ece5f0de90fbea", "32b76220ed3a76310e3be72dab4e7d2db34aa490", "3e59d97d42f36fc96d33a5658951856a555e997b", "9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1", "94b9c0a6515913bad345f0940ee233cdf82fffe1", "6691dfa1a83a04fdc0177d8d70e3df79f606b10f", "4672513d0dbc398719d66bba36183f6e2b78947b", "4a2d54ea1da851151d43b38652b7ea30cdb6dfb2", "9255d3b2bfee4aaae349f68e67c76a077d2d07ad", "5ba7882700718e996d576b58528f1838e5559225", "9175b123837ecf55a9aae6c40ba245ddacbc37d5", "81f101cea3c451754506bf1c7edf80a661fa4dd1", "f66add890c2458466e1cb942ad3981f8651ace2d", "4aefd3ffa712a9b7d9db0615d4ee1932de6060d6", "7f68a5429f150f9eb7550308bb47a363f2989cb3", "14ae16e9911f6504d994503989db34d2d1cb2cd4", "e9f1cdd9ea95810efed306a338de9e0de25990a0", "349434653429733f5f49fe0e160027d994cef115", "0a3863a0915256082aee613ba6dab6ede962cdcd", "11269e98f072095ff94676d3dad34658f4876e0e", "1f5b9ac2a37431b59fd1cecf8fe57b92b6b6398e", "48901e44cd3e17efcfc9866982f8bd7b2c26b99d", "1de8f38c35f14a27831130060810cf9471a62b45", "a777101b56fe46c4d377941afcf34edc2b8b5f6f", "962812d28a169b3fc1d4323f8d0fca69a22dac4c", "d6bfa9026a563ca109d088bdb0252ccf33b76bc6", "b95d13d321d016077bd2906f7fbd9be7c3643475", "1439bf9ba7ff97df9a2da6dae4784e68794da184", "d31af74425719a3840b496b7932e0887b35e9e0d", "8b1fa60b9164b60d1ca2705611fab063505a3ef5", "67ef372d35382570d7a16488d5e2c6b8ef88677f", "2e12c5ea432004de566684b29a8e148126ef5b70", "f8ea0f76f2044168040fcd0a9e81072c88cde4a4", "c3e53788370341afe426f2216bed452cbbdaf117", "df87193e15a19d5620f5a6458b05fee0cf03729f", "9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23", "c8e84cdff569dd09f8d31e9f9ba3218dee65e961", "b6c53891dff24caa1f2e690552a1a5921554f994", "8a866bc0d925dfd8bb10769b8b87d7d0ff01774d", "2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8", "2533c88b278e84a248200d3c5a281177d392e78f", "7ab8cafe454a9fd0fe5d51e718a010ef552b9271", "b503f481120e69b62e076dcccf334ee50559451e", "064cd41d323441209ce1484a9bba02a22b625088", "ab87ab1cf522995510561cd9f494223704f1de91", "ea8d217231d4380071132ce37bf997164b60ec44", "245f8ec4373e0a6c1cae36cd6fed5a2babed1386", "22ec8af0f0e5469e40592d29e28cfbdf1154c666", "7966146d72f9953330556baa04be746d18702047", "1bdef21f093c41df2682a07f05f3548717c7a3d1", "17768efd76a681902a33994da4d3163262bf657f", "0bc0f9178999e5c2f23a45325fa50300961e0226", "c59104e304297bf732cdb062efcde52fb52a0d53", "e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5", "3bd1d41a656c8159305ba2aa395f68f41ab84f31", "1c4404885443b65b7cbda3c131e54f769fbd827d", "11a2ef92b6238055cf3f6dcac0ff49b7b803aee3", "9d24812d942e69f86279a26932df53c0a68c4111", "baad4e7ab0942a6b93ee2df39685f928efdae006", "7adaad633d3002f88cdee105d9c148e013202a06", "ec22eaa00f41a7f8e45ed833812d1ac44ee1174e", "af13c355a2a14bb74847aedeafe990db3fc9cbd4", "bad15b4dea2399d57ee17f33a5ba8f04b012ef63", "9ed943f143d2deaac2efc9cf414b3092ed482610", "71b376dbfa43a62d19ae614c87dd0b5f1312c966", "af3b803188344971aa89fee861a6a598f30c6f10", "7b9b3794f79f87ca8a048d86954e0a72a5f97758", "baaaf73ec28226d60d923bc639f3c7d507345635", "0c54e9ac43d2d3bab1543c43ee137fc47b77276e", "7cf579088e0456d04b531da385002825ca6314e2", "e4d8ba577cabcb67b4e9e1260573aea708574886", "60c7711bf9a00f697fff61474433da01f8550bf4", "0be80da851a17dd33f1e6ffdd7d90a1dc7475b96", "a939e287feb3166983e36b8573cd161d12097ad8", "d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc", "97137d5154a9f22a5d9ecc32e8e2b95d07a5a571", "6e7ffd67329ca6027357a133437505bc56044e65", "35683a325c4fa02e9335dccbca9b67e2b55b87ec", "7ff42ee09c9b1a508080837a3dc2ea780a1a839b", "d975a535cbf3e0a502a30ff7ad037241f9b798ae", "887b7676a4efde616d13f38fcbfe322a791d1413", "c0f9fae059745e50658d9605bd8875fc3a2d0b4b", "543d2992bd50aee2019b5553ceb64f84afcd83db", "8a91ad8c46ca8f4310a442d99b98c80fb8f7625f", "540b39ba1b8ef06293ed793f130e0483e777e278", "503c0b83c64878eddec6f71798b7877f2ae1967e", "78e1798c3077f4f8a4df04ca35cd73f82e9a38f3", "18f57228614b1ea0f42e1376a78b94222e81bf7a", "1b4bc7447f500af2601c5233879afc057a5876d8", "26ad6ceb07a1dc265d405e47a36570cb69b2ace6", "2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1", "11408af8861fb0a977412e58c1a23d61b8df458c", "013305c13cfabaea82c218b841dbe71e108d2b97", "2c4b96f6c1a520e75eb37c6ee8b844332bc0435c", "4805f41c4f8cfb932b011dfdd7f8907152590d1a", "98a120802aef324599e8b9014decfeb2236a78a3", "6d4103762e159130b32335cbf8893ee4dca26859", "1ef6ad9e1742d0b2588deaf506ef83b894fb9956", "9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7", "f0dac9a55443aa39fd9832bdff202a579b835e88", "0aa405447a8797e509521f0570e4679a42fdac9b", "2ca10da4b59b406533ad1dc7740156e01782658f", "8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4", "406431d2286a50205a71f04e0b311ba858fc7b6c", "cd9666858f6c211e13aa80589d75373fd06f6246", "934647c80f484340adecc74ac7141ed0b1d21c2f", "3e7070323bca6106f19bea4c97ef67bd6249cb5d", "ae73f771d0e429a74b04a6784b1b46dfe98f53e4", "9e2ab407ff36f3b793d78d9118ea25622f4b7434", "448ed201f6fceaa6533d88b0b29da3f36235e131", "df6e68db278bedf5486a80697dec6623958edba8", "d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d", "610779e90b644cc18696d7ac7820d3e0598e24d0", "b2f9e0497901d22b05b9699b0ea8147861c2e2cc", "dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd", "0d902541c26f03ff95221e0e71d67c39e094a61d", "b8a16fcb65a8cee8dd32310a03fe36b5dff9266a", "9fd8d24a9db7cbcdf607994051d89667e95d7186", "d7ecfb6108a379a0abf76bf3105b4c9baca8f84f", "ce70dd0d613b840754dce528c14c0ebadd20ffaa", "bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9", "edc37f68cc6a36ba51db77574788cce4b2719719", "76b227facbcd75cda35cb5bb8063d8d5cfcec4d0", "a6ebe013b639f0f79def4c219f585b8a012be04f", "f3553148e322f4f64545d6667dfbc7607c82703a", "f762afd65f3b680330e390f88d4cc39485345a01", "ab2b09b65fdc91a711e424524e666fc75aae7a51", "1831800ef8b1f262c92209f1ee16567105da35d6", "5e8de234b20f98f467581f6666f1ed90fd2a81be", "8a2210bedeb1468f223c08eea4ad15a48d3bc894", "85205914a99374fa87e004735fe67fc6aec29d36", "ace1e0f50fe39eb9a42586f841d53980c6f04b11", "fe6fefe5f2f8c97ed9a27f3171fc0afb62d5495e", "2f53b97f0de2194d588bc7fb920b89cd7bcf7663", "d3b73e06d19da6b457924269bb208878160059da", "3c5f390f99272c59fcf822ab78c90ee6bfa7926a", "42fff5b37006009c2dbfab63c0375c7c7d7d8ee3", "265a88a8805f6ba3efae3fcc93d810be1ea68866", "0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698", "48a402593ca4896ac34fbebf1e725ab1226ecdb7", "304a306d2a55ea41c2355bd9310e332fa76b3cb0", "9628af8b8496a2e69c8eb8a33e2db2db62551e9c", "bc78f0086ba2ceaadd0094003a5ab02df21a7bad", "74879f53e3e5fa580bdb3e2693861bdf40c0a8c1", "f7a271acccf9ec66c9b114d36eec284fbb89c7ef", "1d19c6857e798943cd0ecd110a7a0d514c671fec", "fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4", "973e3d9bc0879210c9fad145a902afca07370b86", "84f3c4937cd006888b82f2eb78e884f2247f0c4e", "5760d29574d78e79e8343b74e6e30b3555e48676", "dea8bb5933e5463569fcfc505b97f6de5899d73e", "d99743ab1760b09b1bb88bc6e1dc5b9d0e48baac", "306127c3197eb5544ab1e1bf8279a01e0df26120", "0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9", "ad08c97a511091e0f59fc6a383615c0cc704f44a", "a1cecbb759c266133084d98747d022c1e638340d", "f85ccab7173e543f2bfd4c7a81fb14e147695740", "9e4b052844d154c3431120ec27e78813b637b4fc", "d5b445c5716952be02172ca4d40c44f4f04067fa", "85e78aa374d85f9a61da693e5010e40decd3f986", "4492914df003d690e5ff3cb3e0e0509a51f7753e", "bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c", "1f2f712253a68cd9f8172de19297e35cec7919dd", "a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9", "6ae96f68187f1cdb9472104b5431ec66f4b2470f", "03c56c176ec6377dddb6a96c7b2e95408db65a7a", "f0f80055ab85254ca58c1b08017969a0c355881f", "539cb169fb65a5542c84f42efcd5d2d925e87ebb", "745d49a2ff70450113f07124c2c5263105125f58", "594ec0a7839885169c65133cfe50164d4cc74b5c", "39ce143238ea1066edf0389d284208431b53b802", "58e7dbbb58416b785b4a1733bf611f8106511aca", "fba386ac63fe87ee5a0cf64bf4fb90324b657d61", "cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2", "ef940b76e40e18f329c43a3f545dc41080f68748", "100428708e4884300e4c1ac1f84cbb16e7644ccf", "6f288a12033fa895fb0e9ec3219f3115904f24de", "85f6eaa1ed3ae15ec7e777b7f90a277eda38cf7f", "ddea3c352f5041fb34433b635399711a90fde0e8", "59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb", "464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb", "bc08dfa22949fbe54e15b1a6379afade71835968", "630d1728435a529d0b0bfecb0e7e335f8ea2596d", "3b42b5174a35c87194b42af583a618f21be8ddf8", "e9bb045e702ee38e566ce46cc1312ed25cb59ea7", "14fdec563788af3202ce71c021dd8b300ae33051", "2fdce3228d384456ea9faff108b9c6d0cf39e7c7", "0319332ded894bf1afe43f174f5aa405b49305f0", "23086a13b83d1b408b98346cf44f3e11920b404d", "1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d", "858b51a8a8aa082732e9c7fbbd1ea9df9c76b013", "88780bd55615c58d9bacc4d66fc2198e603a1714", "06a799ad89a2a45aee685b9e892805e3e0251770", "42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0", "d122d66c51606a8157a461b9d7eb8b6af3d819b0", "dcc44853911c3df7db9c3ea5068e6c16aeec71c1", "b1a8315b4843da3d0b61c933a11d9b152cfaae70", "8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958", "d264dedfdca8dc4c71c50311bcdd6ba3980eb331", "0509c442550571907258f07aad9da9d00b1e468b", "34c2ea3c7e794215588c58adf0eaad6dc267d082", "e16eeed2ada9166a035d238b1609462928db69db", "a7f188a7161b6605d58e48b2537c18a69bd2446f", "f49aebe58d30241f12c1d7d9f4e04b6e524d7a45", "e111624fb4c5dc60b9e8223abfbf7c4196d34b21", "de45bf9e5593a5549a60ca01f2988266d04d77da", "8d91f06af4ef65193f3943005922f25dbb483ee4", "2e091ec49b3d7a1f5647d9cdb844d72d33c05204", "7be60f8c34a16f30735518d240a01972f3530e00", "f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b", "3980dadd27933d99b2f576c3b36fe0d22ffc4746", "98fb3890c565f1d32049a524ec425ceda1da5c24", "a71bd4b94f67a71bc5c3563884bb9d12134ee46a", "9cadd166893f1b8aaecb27280a0915e6694441f5", "4d90bab42806d082e3d8729067122a35bbc15e8d", "2b84630680e2c906f8d7ac528e2eb32c99ef203a", "182470fd0c18d0c5979dff75d089f1da176ceeeb", "293193d24d5c4d2975e836034bbb2329b71c4fe7", "f9784db8ff805439f0a6b6e15aeaf892dba47ca0", "60c24e44fce158c217d25c1bae9f880a8bd19fc3", "b5f9180666924a3215ab0b1faf712e70b353444d", "7b63ed54345d8c06523f6b03c41a09b5c8f227e2", "5b809871a895ea8422afc31c918056614ea94688", "bd63d56bebbc5d7babc7c47cedcb11b8e3ad199c", "592370b4c7b58a2a141e507f3a2cc5bbd247a62e", "5721216f2163d026e90d7cd9942aeb4bebc92334", "8b2f99b0106143fd0193fcbf2b07eba80dc7f8dd", "c270aff2b066ee354b4fe7e958a40a37f7bfca45", "cb992fe67f0d4025e876161bfd2dda467eaec741", "a3ed0f15824802359e05d9777cacd5488dfa7dba", "7e3367b9b97f291835cfd0385f45c75ff84f4dc5", "4dce568994fb43095067ac893bbc079058494587", "3af8d38469fb21368ee947d53746ea68cd64eeae", "2e585adbe1f434396ca6a669dd91914d4d4bf42a", "9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3", "1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6", "d6c7092111a8619ed7a6b01b00c5f75949f137bf", "d0b67ec62086b55f00dc461ab58dc87b85388b2b", "ec1bec7344d07417fb04e509a9d3198da850349f", "be48b5dcd10ab834cd68d5b2a24187180e2b408f", "176e6ba56e04c98e1997ffdef964ece90fd827b4", "3f7723ab51417b85aa909e739fc4c43c64bf3e84", "3209e3af49b7b9c253100b7a39fcf8d013fe36a4", "3506518d616343d3083f4fe257a5ee36b376b9e1", "d46fda4b49bbc219e37ef6191053d4327e66c74b", "a55ec6bade29f23f8cb1337edf417b2da2f48695", "00220a6783488054eb0fe7b915e882b1294f3318", "915ff2bedfa0b73eded2e2e08b17f861c0e82a58", "86b51bd0c80eecd6acce9fc538f284b2ded5bcdd", "aae31f092fadd09a843e1ca62af52dc15fc33c56", "b2ddea9c71cd73fa63e09e8121bc7a098fae70b4", "c3341286ece958e6b05df56d788456b61313380b", "84e4b7469f9c4b6c9e73733fa28788730fd30379", "252f202bfb14d363a969fce19df2972b83fa7ec0", "318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a", "a6ce1a1de164f41cb8999c728bceedf65d66bb23", "ebce3f5c1801511de9e2e14465482260ba5933cc", "bf30477f4bd70a585588528355b7418d2f37953e", "a98ff1c2e3c22e3d0a41a2718e4587537b92da0a", "3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5", "c87c07d44633eca2cc1d11d2d967fc66eb8de871", "20a432a065a06f088d96965f43d0055675f0a6c1", "5a86842ab586de9d62d5badb2ad8f4f01eada885", "3b350afd8b82487aa97097170c269a25daa0c82d", "b234cd7788a7f7fa410653ad2bafef5de7d5ad29", "1be785355ae29e32d85d86285bb8f90ea83171df", "e51e94cc3c74adf0cccfac3a8035a10016ce8a3b", "6a3fa483c64e72d9c96663ff031446a2bdb6b2eb", "1c5d7d02a26aa052ecc47d301de4929083e5d320", "b351575e3eab724d62d0703e24ecae55025eef00", "d30050cfd16b29e43ed2024ae74787ac0bbcf2f7", "8a3bb63925ac2cdf7f9ecf43f71d65e210416e17", "81b0550c58e7409b4f1a1cd7838669cfaa512eb3", "7ee7b0602ef517b445316ca8aa525e28ea79307e", "4dd2be07b4f0393995b57196f8fc79d666b3aec5", "38bb66c97b35851051e95834639c205254771adc", "2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84", "fe556c18b7ab65ceb57e1dd054a2ca21cefe153c", "30cace74a7d51e9a928287e25bcefb968c49f331", "4cfe921ac4650470b0473fd52a2b801f4494ee64", "afc7092987f0d05f5685e9332d83c4b27612f964", "7c4c442e9c04c6b98cd2aa221e9d7be15efd8663", "d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb", "721d9c387ed382988fce6fa864446fed5fb23173", "588526811ab87b6e920b32ed9f78f91066d89101", "e9cebf627c204c6949dcc077d04c57eb66b2c038", "32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347", "1473e6f2d250307f0421f1e2ea68b6485d3bd481", "dae9d0a9b77366f0cd52e38847e47691ee97bc1f", "2e98329fdec27d4b3b9b894687e7d1352d828b1d", "64f9519f20acdf703984f02e05fd23f5e2451977", "0773c320713dae62848fceac5a0ac346ba224eca", "3e1190655cc7c1159944d88bdbe591b53f48d761", "863bf7728f8506b021c2582885905a83d507a0b8", "3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48", "9b42fb48d5ac70b6ca5382f50e71ed8bf3a84710", "0b0c2d9db83b4f002f23f4a20cfc5a3d10295372", "6aefe7460e1540438ffa63f7757c4750c844764d", "31c34a5b42a640b824fa4e3d6187e3675226143e", "a006cd95c14de399706c5709b86ac17fce93fcba", "abf573864b8fbc0f1c491ca60b60527a3e75f0f5", "367951ba687e4e52ca4ee1327627b332afc45fae", "3769e65690e424808361e3eebfdec8ab91908aa9", "daca9d03c1c951ed518248de7f75ff51e5c272cb", "9117fd5695582961a456bd72b157d4386ca6a174", "b1df214e0f1c5065f53054195cd15012e660490a", "7ebfa8f1c92ac213ff35fa27287dee94ae5735a1", "7373c4a23684e2613f441f2236ed02e3f9942dd4", "1feeab271621128fe864e4c64bab9b2e2d0ed1f1", "714d487571ca0d676bad75c8fa622d6f50df953b", "98c548a4be0d3b62971e75259d7514feab14f884", "570308801ff9614191cfbfd7da88d41fb441b423", "00bfef58353564f4e4bd7e2cb68cb66953cf9103", "c47bd9f6eb255da525dbcdfc111609c90bc4d2ae", "e14cc2715b806288fe457d88c1ad07ef55c65318", "308025c378aef6acf9fe3acbddbfddcaa4271e8c", "c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5", "0a1138276c52c734b67b30de0bf3f76b0351f097", "1f5f67d315c9dad341d39129d8f8fe7fa58e564c", "01f0a4e1442a7804e1fe95798eff777d08e42014", "83bce0907937f09f5ccde26c361d52fe55fc8979", "6fa0c206873dcc5812f7ea74a48bb4bf4b273494", "402f6db00251a15d1d92507887b17e1c50feebca", "2e8a0cc071017845ee6f67bd0633b8167a47abed", "d116bac3b6ad77084c12bea557d42ed4c9d78433", "0568fc777081cbe6de95b653644fec7b766537b2", "05785cb0dcaace54801aa486d4f8fdad3245b27a", "835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd", "303225eaedd489f61ac36e1f39cd04db7fd8bd41", "949fff3b0a73c81e7ff3d47caf7fbf9c664bcc70", "5ac80e0b94200ee3ecd58a618fe6afd077be0a00", "779d3f0cf74b7d33344eea210170c7c981a7e27b", "3d9db1cacf9c3bb7af57b8112787b59f45927355", "cc5edaa1b0e91bc3577547fc30ea094aa2722bf0", "2960500033eb31777ed1af1fcb133dcab1b4a857", "2a826273e856939b58be8779d2136bffa0dddb08", "bdbba95e5abc543981fb557f21e3e6551a563b45", "939d28859c8bd2cca2d692901e174cfd599dac74", "26437fb289cd7caeb3834361f0cc933a02267766", "09750c9bbb074bbc4eb66586b20822d1812cdb20", "23fd653b094c7e4591a95506416a72aeb50a32b5", "071af21377cc76d5c05100a745fb13cb2e40500f", "7d8798e7430dcc68fcdbd93053c884fc44978906", "a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7", "291b227b9ced8468a53870fc382ed13f5cb807bb", "7a6e3ed956f71b20c41fbec008b1fa8dacad31a6", "a0dfb8aae58bd757b801e2dcb717a094013bc178", "48255c9e1d6e1d030728d33a71699757e337be08", "459eb3cfd9b52a0d416571e4bc4e75f979f4b901", "c37de914c6e9b743d90e2566723d0062bedc9e6a", "5e87f5076952cd442718d6b4addce905bae1a1a4", "94b729f9d9171e7c4489995e6e1cb134c8521f4e", "87806c51dc8c1077953178367dcf5c75c553ce34", "0701b01bc99bf3b64050690ceadb58a8800e81ed", "da15344a4c10b91d6ee2e9356a48cb3a0eac6a97", "dec76940896a41a8a7b6e9684df326b23737cd5d", "99d06fe2f4d6d76acf40b6da67c5052e82055f5a", "445461a34adc4bcdccac2e3c374f5921c93750f8", "e4c81c56966a763e021938be392718686ba9135e", "608b01c70f0d1166c10c3829c411424d9ef550e7", "4e581831d24fd90b0b5228b9136e76fa3e8f8279", "9ce2fd6ae16b339886d0ce237faae811230c8ce6", "1369e9f174760ea592a94177dbcab9ed29be1649", "0ba1d855cd38b6a2c52860ae4d1a85198b304be4", "7f4040b482d16354d5938c1d1b926b544652bf5b", "4cb538b0b30dc6b425d2b5a5305d8a629cb1a847", "fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e", "4e43408a59852c1bbaa11596a5da3e42034d9380", "b41d585246360646c677a8238ec35e8605b083b0", "82a4a35b2bae3e5c51f4d24ea5908c52973bd5be", "00cb35cadee5c1ea11bd4c37cdfd234859c69617", "55ee484f9cbd62111512485e3c1c3eadbf2e15c0", "b1c5581f631dba78927aae4f86a839f43646220c", "be89d0288944090b86b761a1912fb5248f366c21", "3dec830b2514e82c714162622b3077966660112f", "5f1cd82343f4bd6972f674d50aecb453d06f04ad", "aae742779e8b754da7973949992d258d6ca26216", "914d7527678b514e3ee9551655f55ffbd3f0eb0a", "c12034ca237ee330dd25843f2d05a6e1cfde1767", "69ba86f7aac7b7be0ac41d990f5cd38400158f96", "48910f9b6ccc40226cd4f105ed5291571271b39e", "2babf665198a91932a4ce557f627c28e7e8f31f2", "e1f6e2651b7294951b5eab5d2322336af1f676dc", "d91f9e8cbf271004ef1a293401197a10a26ccd1b", "5a7520380d9960ff3b4f5f0fe526a00f63791e99", "2a7058a720fa9da4b9b607ea00bfdb63652dff95", "d53994f28deb2800120fab8a42852813b3b8c081", "244601b6f558843f946010f34229b0184b016c65", "9fd61592cefddd89718a7b22836b518b3be349c4", "28589357a7631581e55ec6db3cde2e24e4789482", "3d204dbc13f59f1a1678c773b30a1d85e305f548", "98f1613889657963b102460e4e970fe421c6ed3c", "784731961819abc5a5a199be1573abd828bd9af1", "4479efe6c81bcdb07b18d1cb66a06c66c6137fed", "7e7b4b4a84c2aa0ee69b5cea3a4da7f62a0a37d5", "234e3f821c31d0b5b7c59c3c013ad258fa6f5912", "6e795100d9689da21cdd5de16c3323afa28cccbc", "64b1de5ebd431354816ea2ebe04dd21b1953bd4f", "41612c66beaad320af9b7d34407c7d0f4ca7bfea", "0c553e57cb6fe7bdf3212fbf86bcc869958db27f", "033c3114f4951d338e34af67e1699ef779ab258d", "0e13f7fc698cbe78ddbf3412b13ca27a4d878fa8", "e6d4c0ac2352f108a078a4fd3f908a03b8571f2b", "6d6bb981bc8470de23e30890bd96a76ffd2b7ced", "2a6327a8bdbd31e2c08863b96c4f09245db8cab7", "f2bc0ccda62bda25adb3e8862a48ff373e8b4da9", "5aa63f9c0310c4dd64801b379266b778f4778445", "2056fb4cfe4aaa8a5d833f7494589499c2c5e8f5", "32d6ee09bd8f1a7c42708d6dd8a5fb85ac4e08bc", "b7a0e7dab11781c252e1145f3526aee388b4136d", "903210406f14a12b481524d543b14f16114797e2", "e2e920dfcaab27528c6fa65b6613d9af24793cb0", "f8ae3654c41b6ef5c5035a6db65b80137ad9a267", "7da6695d4a205d1404d6e9bf503527da8b13813e", "5f39d07dd39e5d7cfba535ada3a0ab9d5d0efb5b", "df50e6e2ad60825167c6b3e641eb5cda0f3dc505", "507af6591900a7165c529eca9fd370008c1ac87c", "246ec873db261257833231d657ec8995d686cc3e", "02f8c803fbf02bae0cd4ba8943fe3acccdf37402", "8e9f973e9d01fdd275af6c1460e5307d2ff3d2bc", "65683bd97720bc18a022b23755b32c8c988e8d5c", "46282f10271875647219b641dac2cc01c7dc8ab2", "0fb680b5136d80c13e8d15078ef18ca4aac269f6", "d64b24e9b01f4681d92fc29f36e46d94db7b8bb0", "8b0a4d41ee469547163ea154ad2b522d6d335671", "eabdefeb685dd71a39417bf40247d206af4f9b9e", "63ebe80e020d902bc1fdc865c23a9ad7d1eac17a", "44703dea094eb9558965db9439a07b9a74fd36b5", "c591cb28d12b7ee53af4e5c2050b74071527c248", "784ee59ea98a0878f1ba709f4385bffcdb4911d7", "a71106ef95103276fac010c10291f6dd6fd9d9f5", "938d9dd3e35cb8af5fb6b8b3f7c7ff9d6ba8b253", "714e8209cb0e2dc5dd36107a8d9d71e55ac887a3", "fc7627e57269e7035e4d56105358211076fe4f04", "e3b0caa1ff9067665e349a2480b057e2afdbc41f", "6dc82921719319deb1a193febb0388b2f32cecb0", "4ad51a99e489939755f1d4f5d1f5bc509c49e96d", "f01da5c6f99b8be70609158f0334d4a1ba987a65", "44206d5f810385e66aef5f29083a610d91815343", "e94dfdc5581f6bc0338e21ad555b5f1734f8697e", "2744e6d526b8f2c1b297ac2d2458aaa08b0cda11", "c53352a4239568cc915ad968aff51c49924a3072", "6c40fc9df6588f7cb721537883167eede1b8d369", "02c7740af5540f23a2da23d1769e64a8042ec62e", "9b2a272d4526b3eeeda0beb0d399074d5380a2b3", "9ca542d744149f0efc8b8aac8289f5e38e6d200c", "75858dbee2c248a60741fbc64dcad4f8b63d51cb", "7b455cbb320684f78cd8f2443f14ecf5f50426db", "014b4335d055679bc680a6ceb6f1a264d8ce8a4a", "b8084d5e193633462e56f897f3d81b2832b72dff", "b2470969e4fba92f7909eac26b77d08cc5575533", "d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0", "cbb27980eb04f68d9f10067d3d3c114efa9d0054", "8bf243817112ac0aa1348b40a065bb0b735cdb9c", "f20e0eefd007bc310d2a753ba526d33a8aba812c", "6412d8bbcc01f595a2982d6141e4b93e7e982d0f", "64b9ad39d115f3e375bde4f70fb8fdef5d681df8", "78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c", "a3ab2a7d596626a25f680b7dc9710ea2d34a8cbb", "91a4ebf1ca0314a74c436729700ef09bddaa6222", "15f70a0ad8903017250927595ae2096d8b263090", "57ba4b6de23a6fc9d45ff052ed2563e5de00b968", "5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f", "93d11da02205bbc5ae68e521e421f70a4b74a7f7", "cb9921d5fc4ffa50be537332e111f03d74622442", "291de30ceecb5dcf0644c35e2b5935d341ea148b", "0019925779bff96448f0c75492717e4473f88377", "e7144f5c19848e037bb96e225d1cfd961f82bd9f", "48121f5937accc8050b0c9bf2be6d1c58b07a8a0", "fa641327dc5873276f0af453a2caa1634c16f143", "7e2cfbfd43045fbd6aabd9a45090a5716fc4e179", "d40c4e370d35264e324e4e3d5df59e51518c9979", "8bfada57140aa1aa22a575e960c2a71140083293", "26ebe98753acec806b7281d085110c06d9cd1e16", "f3850d8ec9779e8e15da9831ba23d4cdca1dd4ee", "f29aae30c2cb4c73a3c814408ee5692e22176329", "01e14d8ffd6767336d50c2b817a7b7744903e567", "8ec82da82416bb8da8cdf2140c740e1574eaf84f", "de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6", "8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889", "20e504782951e0c2979d9aec88c76334f7505393", "80097a879fceff2a9a955bf7613b0d3bfa68dc23", "dd471f321ead8b405da6194057b2778ef3db7ea7", "a75dfb5a839f0eb4b613d150f54a418b7812aa90", "7224d58a7e1f02b84994b60dc3b84d9fe6941ff5", "0e2ea7af369dbcaeb5e334b02dd9ba5271b10265", "de0eb358b890d92e8f67592c6e23f0e3b2ba3f66", "82eff71af91df2ca18aebb7f1153a7aed16ae7cc", "f0b30a9bb9740c2886d96fc44d6f35b8eacab4f3", "15ef65fd68d61f3d47326e358c446b0f054f093a", "177d03c5851f7082cb023a20fa8a2cd1dfb59467", "35f03f5cbcc21a9c36c84e858eeb15c5d6722309", "72a55554b816b66a865a1ec1b4a5b17b5d3ba784", "cae41c3d5508f57421faf672ee1bea0da4be66e0", "91b7270b7f2a8a52df6a689f73d14986b2d48ba1", "ba6769c165967c8dcb11fe5e0be2153ddbe99c7e", "7d40e7e5c01bd551edf65902386401e1b8b8014b", "cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae", "4c8ef4f98c6c8d340b011cfa0bb65a9377107970", "059582bee125512b127296364e7700ebd9f80436", "20adfee9f931b48ad6ae236dc50b8106573d03f7", "2a067874fc1ec318b6d23f34bdb13ea4e95d5ca6", "919cf6064f6a9cd02f377752ce259cdf46562ac0", "b245580bd7bbdd8f96fd079fcfa23e05e731a0df", "f153f5099c69b644f2222a80de798ae82dc36d67", "423e0f595365640b653c1195749e01394cbcd937", "494daf40d90a07b58690985088e5bd109ddb315c", "e912481d2d885244b1c72e5d74932429394a5789", "8377ac1b2dffb11cf48f456be2531c95d14aa6e5", "ea8707f8d527018c063a688bbd5a88f74506b288", "03593afd7976bae2c105277f61f335b64fc3cd19", "75370d108f053f04dfbe8d42835f63fa165adcb6", "ac479607e6b44c69022a56b5847a055535ae63ed", "b4a60cfe62d78e315ed4206d455022ead27ecbf0", "39b765988e8bb6f650218c839640cac56e62adc8", "1d1603a1ec73a9a0ff972f3898c94eed2c741e51", "9b75cc65a03e5d817c89d71b24404e791f79eb6a", "336067ade694c79b5838b2e8158acf18546bc5a5", "19cfec264e863793dd96a5f308a3b603c6b9912e", "a1669fa7d3d8f0c0cafe770c79007949cd32b245", "5c2ee7136e7672543f529548b57884f644cc3570", "e608ccf3ac353cf7204ccf5659983d69bd09f515", "a31b862f1addbed64a2dac64d7d416e129cad6ad", "fc73090889036a0e42ea40827ac835cd5e135b16", "759f4f7601292c37e2f1c4a5a9f53075e9e355ec", "202cbc83c22a9c7b3d878cc1bed1c5cf152eb6fb", "23e85d879c37fad1e6c07fa13b896cb99535c297", "daef6fa60c7d79930ad0a341aab69f1f4fa80442", "d589e218cc3f1b10e77d272cca5df3525e06fc95", "81eae59b7462824040198b7be94df4f916024a4b", "f3ca251ac3b05397ea6d72f2a9a6f0cf619a2a32", "d2f3ba37ef34d5d39f799f8dd3557f1eb795aedd", "61bee369916333bac86103cfb07d6d2ae5e091b6", "7b8c709abb34fba9f5404293ac5209a78258acbc", "c419b3c1ff156a0e8262f8f43a4118fde57ae37f", "2d105eea4f594519bd337298c55b9af3da178293", "d78be582827941679ec2dfc1f56c254f61302476", "d10f8d58bf50f5b097b4344dc8cccbbe0c330bd9", "3daa086acd367dc971a2dc1382caba2031294233", "8e7548911c41b6f3a6ccbda6d3ab913eaa41e721", "7596c7ed735970813a1b47dcb5b998058d68f1d9", "bdbf414a2059d542f501ad9b1d21eacc9831082b", "a947448d1db19d99abe6de2f6b6d67804786a8b1", "005227ea30edc2907ca2c01d0729e247e2d9a350", "a663e729cb44cd02eda2d2a08d9117839dc67ca1", "fdb956c7705b7f57f56f944a0f3f4ede1d6f77fa", "ab1f057bfe02b80a14f4c011abb9ceb2a9c98b6c", "f6ad7fc8ab40c2c892dbaf3922b30fe0c0311c65", "ddc8f480898a846c2a6ba0dddd7d733ce35f0e19", "cea354a20227e80e1b37a9677fc8f2ba00f908b3", "bbb71cbca731295758563acdc67273b99618e1c0", "34ae449ae64cd2c6bfc2f102eac82bd606cd12f7", "8bd8b5ee7a38ae0a148d8afa1827f41aba5323cf", "d45fbd818f032566e9e8f8bdc0f658cdd6873e8f", "c5b6b81a75f7ec3211473eb1ca58897a6537a085", "64d1fcc26c2af47c8ed7436fe91546ba5bfc7a1f", "f832fdf1fac092b4140bf81d38e6bc6af5c1ea65", "fe710adb0e9e647d7ede0583b40d2aeb36c1fc7f", "90d07df2d165b034e38ec04b3f6343d483f6cb38", "0123d5b91e56d4c6e92eeb25b461afe8941f48b7", "9e1b3cf334aead8d2c29747f6ee7d1291dd83708", "d9374c2b5adb77203a37f9bfa8e1ec06f99ffa4a", "cd2c1e542ae8c08cfb8baea3dff788d143232de8", "fd9d7efd0ecff49249844a0096e77b2f864fae0d", "f499e84b489b0b4afe86e303803871700e561063", "074a12f9187beafe40386f19aa2544df30fa5703", "15fbe139cbfd19513763db06b8ffa2e21168ca4e", "45804e509e6d6e772d1c40e804e90c6659988abe", "428cfbd3c237d04edb06690a7e9e9a40c62fc8da", "a726858df7c9503116504206577a938df1a67815", "eb43002e771de05db5e3e7e8eb6fcc75de0e30c4", "485eb9e9392ebe6297e049f646b94a4ea8814cac", "6f8fc12004fa068c424369793fd39426e772b07d", "1560095c887f29ad2aa38c4fe098120b98f5f946", "8fe99c3d5ad9af54641dcd6b55e2b083a363d515", "e3b3ab8ccb2c2998e4a6f326a4d4ac5f9b99dc7b", "f06f4bc74dc0be0a628e99a5c86ab3e00ed00276", "7eff2b26a16e0898ebdd141e930d011a3d3e4e8b", "18ae7c9a4bbc832b8b14bc4122070d7939f5e00e", "7b67c38a6f49e02c03e1cea98146a506f607b0d7", "a2a8f6bf8f8cbc170fc8b4ac0fd4e7ec792a9b8d", "a2183537ccf24eb95e8e7520b33f9aa8f190e80e", "aa54d0ac723c1a45e31df69433a72f6dc711706a", "402dc9ca10b92e848741991256d8b53d220d6be5", "e3906b524a18cfa329c20cc422de78ed66d05f01", "71d68af11df855f886b511e4fc1635c1e9e789b0", "e1f03d7a1081d7f0a91535888909f8eb9709df1e", "110d474178b0bb5e2050537d89d08a76106ab736", "d2cd9a7f19600370bce3ea29aba97d949fe0ceb9", "8676167c1b45dff6fd9a35bfb1d3d326eafb6480", "eb14b933a374df4e909f6f61500d7a1ad34a8d8b", "a57995ecac8275b4c7d614f17538cf771b0b1657", "fedfc6915e2abcad08aeb9eed33dd3d1d20bf971", "a509f0528c5fbb36993324922b13a9a303ce82ee", "c192af9d3c689c3e9ab60bf8e704f900587c3bf2", "286812ade95e6f1543193918e14ba84e5f8e852e", "add85ee833e2a1c5cdbcd206d5423d63f20cda24", "9e1a96e0fc5d8867e82e6262a8d9499b3ae806e5", "e8f9a45fdd76fa33855d9a7a6e70ff1821d0e2e1", "06dfc1c6f62bffd5f8b8619d8c51db1ec4d25f3f", "6aa0a47f4b986870370c622be51f00f3a1b9d364", "af36ce6d1f2cbc61dff80526c530b714f797cc50", "4c56f119ebf7c71f2a83e4d79e8d88314b8e6044", "9cb152758ee57f2abcc0b59348752e528a2ed2f7", "c0aeae062aa27b71be728f2b9bb22b5ba0de27c1", "bb72fa16dc5e13ae37ba2c942043643d269b5cac", "b1e14a9292c071d63f45cd3c1f645e83027be4c2", "351550aa56b81de2eef4b8379dc85722366635dd", "6cadbc0122376be3c249ecfec7de8247ffbc4fb3", "1057137d8ebbbfc4e816d74edd7ab04f61a893f8", "e3f0c5a51d6c5085fbcb64d872d7db438da27474", "55ba5e4c07f6ecf827bfee04e96de35a170f7485", "a64eb5aa719bdb2f58023c1a97094daeeb8e1ff9", "4583d7d1d76dfe18e86e91f7438ce1a03cdcf68f", "5f34c96ddcf992e1b8660b5cb01e3c311b05023c", "683c77c94b53db26ab3175bea0cad5b5f38bc672", "383fae9c0b9b13af0ce5c5e88fa8ad40c7a3e7aa", "e352288274f62be6abc37c944120bdd4979dc250", "be427965a79aa1191a1ea9dd79717d89bcb74ad1", "6ab5acb5f32ef2d28f91109d40e5e859a9c101bf", "c7c0da2219c4324c5e8d5bc062c499286489c437", "1244b6abacfd74ccc6ac41e163d5a727c2b2dbf7", "9ef15afd2a6f58e96d8719d8d92fa10e16c8a243", "36fd702e5686f91b7e45434f8e2f6ef51feb2d54", "2479eb0d2eca1ec24825a68134fa67763c8f08b4", "228594425c26d4fa97e8bc2e22329ebaec5d4b63", "fa1b849697115ceede0a08ac552ea25ce2bf33a1", "1392d2f7310fcc1d07c3748b5416feaaf52c0b7e", "bb1f4c8e4f310047e50b7dc41d87292025d42eb7", "332740a7ababbfa0dbc974433bd5a213197c0dd1", "6462a1def50d89eb485a59f99b4094c33f705de7", "19bd2572ef52b8758312cd40e237e21f6f2a0feb", "a3c210e73d827e8a7ea78c91d620af249164b4ae", "74d745ece0f5d751bc96be2e0f3fd4b22cac25ec", "92b9c19da2c144257617e39bc8ace7293e710914", "3fb87254e76bba7ca98bb55d453b3b6f380cf713", "21ab1e521820824b41606554e94dd0584734d100", "b55dbcee433917d4fdf3976fa49cdd73a2bdbbb4", "3566f27d6857ce82270bccad45759da955060a35", "f57364601b020dccca729c967b11c4a5da43f3f6", "d1d15291dadc44f0cd192dc88bab3b10e2b07ccd", "d7d6200e41d574e2f3ddd9ded299613683519c7c", "2af680736f32ae37d579a8b5656eec1c6b158dec", "3d84ced34be1e8e61cb66f9270efc801fb4f4f2a", "72b4b8f4a9f25cac5686231b44a2220945fd2ff6", "84ba2ff2a4d75e46bfe39c18ad075f4972c3ed73", "d69ea1032b0fa7ed62cfb77c1569e66f2fa4703b", "b65bbf24479d4df65dcde2ac343f5c7cf96be3ef", "99c2adc3868050afdaeacbab976e8c48a3ce214b", "f40f8ef004fa18cd79d0a6a49f74b058dcfe49e5", "1037664753b281543ce300fed0852a64d24334ba", "9853136dbd7d5f6a9c57dc66060cab44a86cd662", "3714a415e63204e9c331b919cff6a14f7121c902", "43c76cf17767a43a345cd1a8d7c08d18578b53ec", "118546dead3bbde31ff4052b7dbf5a147d1e69f2", "5720784b7e45693109b867992e3f93e4c747e536", "126b98473cc25e604abd58eb6bcf720354ac7e7a", "4aab784972af579879758e894fc52f50ac37f4f3", "ddb49e36570af09d96059b3b6f08f9124aafe24f", "084f1a6c62a3464b1a9b745fee40af2895920301", "0318c3a969d714581fee93324f5fbe6f5ce685d6", "694dda2a9f6d86c4bf3f57d85dfd376e2067ec62", "dab6921a578c9ded6904a5a18bdd054aee62d2ad", "983c6d9d8aaeca30e6b84e2b2c6e2a514d6df254", "863ffd74d39c33b6351dea90c6f7f1e2bdf2d97c", "e72d35ae7c1f477ce4341a5fb3a15bcfe0481a0e", "2eb610d67ac07136fce4d9633edc28548aab76c8", "3f0e00188d751829c4548f9aacb939b982425ebd", "aa4928142b99224e96536d402ef8869b8391cf79", "26d721a3ef7b694fd358b8ed42cdc0abea7f2e9e", "c542fa8c4cfaff6a8d8efa9678e42e1b9ead8aa9", "1203b4fe233bb7514d7ba257089392c16a83a17b", "b5f9c5af707f55d96b1d3d65d970270d35a60987", "1899a89642e73bf32f283c71caec60765906a61b", "09af91e913324255bd8358e62cd3b8a25f7141ec", "b1e218046a28d10ec0be3272809608dea378eddc", "ddbed5c2a4c6de4304e2b7aa1d074f5374970b1b", "b6555e6d52c3c9a7e04bf6debe6a6f476c1c79d5", "2acf319c5eac89cc9e0ed24633e4408dbd4a8a5b", "b2e67e67e5bbb19a02524afcc217929b0a76a9a7", "f22a7a7a8cdd323270d1f8173c0289d61981dc73", "494c1630c93e74aca3169ae33734f2f733c95e05", "9f483933bcc872771707dcf0acb1382411ffee94", "2d2b1f9446e9b4cdb46327cda32a8d9621944e29", "bf47f87ccf1b2f9ad18cabf29a715114185648a0", "d23ca7d5f846223b793a5694dcfd9cc931aac1aa", "18f70d8e1697bc0b85753db2d4d64aeb696b052a", "560e888d8ddaf61c56a35fc9d20997422579900a", "b83155a18b95dcb551a0787b135d61d99eb82ac5", "f50bb87a850ad0f44eef9c2acae5b1dcccb945da", "7f04b65f2c6f96c7ce000f537fb691a93f61db52", "7ba1358f50d52004e445c5fd6def2fbba845c0d2", "f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1", "ba49fa7c313666a6e6e9b19c37c78d1a75a95d34", "c28f57d0a22e54fdd3c4a57ecb1785dda49f0e5e", "9b3ed8190d99b107837de142324e4aa2be8b7eb2", "66c92c9145c2b6a304eb1b3a58e2a717884fe064", "a3239de6f4c300b135d5c417890ab68be8e90801", "a7ee9c43058efbaa4caa7a93fd833d3c6e90f4b2", "4d8327e574409dcd30075a3879cd51175b701c61", "75d5e67e31cefa09ae46044fa1f9f7696e058c99", "8cc86387a3185c2c357f2ae748a17a8173ccb8e7", "271e2856e332634eccc5e80ba6fa9bbccf61f1be", "72b3202cf98c28249bdcab6099c9d3f61cdc4552", "249d4ea043ca1917025b00341b4339c3d76fcad5", "8318f563f915031c677decc3d133c2aee803591d", "de3ef07b202839a458c3ae9f16bc59e60dc857b1", "1b2dd300a43d0553f1deb578d9aea45d99472136", "5cd2a7ec2b47086b1e9ff6ebc096eae5e03d2f67", "dcace6f0611b77177f4aff4bb650afab0a819575", "560b46547720b3a892f90a337835875f74f4f4ec", "72771b33c6756426ae3228f6a3fde4ddf880986a", "e8e8d8a619eea66c41a1a2bdc0a921a3b6d74836", "65ee4de888e5b934429dcb126ee0ae544156c9bd", "c18d80d00f2a7107bfe780eeec21b51a634ea925", "00f5c0ad23a9d7c312a316d2a8c6539aadfc95aa", "bce36092b1910ff3d492f86aa3a39ed8faaf72d2", "73acf6406301440db5cd20d59843b145dba47556", "ba25c219b52d675b579941364ce6ee6700cea8e8", "98d39a1af025a29212e054ef36b4e0004bff4256", "8eda0af45fe1fe32a22661aa1d03e7267a8181c8", "55b55426fcba3e298a20a4b95753a906956fc2ac", "c08420b1bfa093e89e35e3b8d3a9e3e881f4f563", "abb396490ba8b112f10fbb20a0a8ce69737cd492", "55e0eb73c5243fb69695e16f3d7e47bde8e7c22d", "b1e7da535171de484992a0f1a4fb915e67fb873c", "7b522c5d6d2d0699c4183a543b8e65b1a66d9e74", "8e579a8a43f6af1d66e927a48b89e8296eba63f7", "714794c74941e45798d9c405a4fec1138cff2df3", "3ea79a2158151c666bbf696eba7a5edb5b284974", "34f8086eb67eb2cd332cd2d6bca0dd8f1e8f1062", "2def1c373a2f7842d0367cb48da87d777ab478b7", "72962038460e32b0dd01d083f7f4049be36a34b9", "d1097fc0fa5410888c7d791a0594d361114caac1", "f1ac9370bdf4c408fdc242719cceb3eae19b9a16", "6dff2bcbfad53238d24f4467a9504ce33ecdfa4d", "3ac3386a9d1a5651ca2062d5c1b23766bea2568e", "4cb2f596c796fda6d12248824f68460db84100e4", "505942c5f9b5779bda2859e22e9ed0b1c0c7b54a", "bd3d69121f38db767bf820e5f786c7e55b7628a7", "4c1ef2a628627798939dccc072d33f9e12b48640", "3c49dafc82ee24e70e338b896868cd9f82f0edd7", "1b3e66bef13f114943d460b4f942e941b4761ba2", "f16695987ee6f18aa7ce1bffe2080367407fd043", "6bfc87bf7449e3ed8f4603b955613a5cfa56a6c1", "5fc541e1431aea41261569af2f9473c8fbd0e5ec", "6fb33358bc7e1a73e88b4a87fb0962366ab959c9", "26ef54b182fe25e87622a2e846024c8d637c8b33", "11c25e20b74416a97bb93f68990ed12c25c8baae", "68eb5404a22fcca595cc6360e9a77a4b09156eb2", "028dc6a134f1204bd9ae28213e2e6665e82ddcb0", "063792ff9a139a5b8375afcd35e4ae6c8d83c352", "7446cc18f173f2885dfea6dd27bcb725989f2788", "a3b183d041f8f3e90a2cf904eaab544070216367", "5e6944abfed38fd30d8be45ee0c24dc1c0525ba1", "f4b729d218139f1e93cc9d4df05fbf699d2e9d07", "48059da276a8a93e0bb3faaa8421589f09377559", "1cf8dce62efb5183ad5e8512ddb9a03a2b0c2edb", "6af7f8817d19192b157d9612139a7dcc9ef8930a", "f329eebc11b8387dc25ecd2271d1264529d735f7", "2f4be1b5655df160c31cb132172922e0f440857c", "fffd4222961016e526631fa863de79df01c2497b", "a3571721ce0119866088622755d678be7ee4b374", "22ea4705091ddff95924cc092373aa8e87699765", "9cb96a3f7895bb36d459acbeb65ccea3970e0634", "bd245c4ee85a3685b0a8bd678d27bdf901cf5706", "5d11a2e6a6fab65d21a2a7082b6a37f86d5bb1ce", "fa4e709a7008248869584feca81250a8da8291e4", "13a82da2bfa24583caf78ab1d14b5cfa4798b3b3", "1d6a99e11ed5576bec973a35bc50d196c9a38e26", "3affe6f9c2244f4b32c1c0f7d7f1d24770d40efe", "4a9145d52184b20f7241f52509034819c79ee162", "2e38ff75a80ec92111261bf368781c7eef89eb14", "962e4fee051d00737bf4a87fd05d90ef86252ec7", "4c7c1c2d6eebd227b1e768eaafa5a61e27552567", "19dc5a1156819230e6ae425e9c9d56e898d6bcb9", "9caacc6001c25ebef373f3cd1746d20450c9480c", "51144365fac99623cc7ab9038b7cd24b9082d5ae", "aab9a617be6e5507beb457b1e6c2e5b046f9cff0", "2337b5b0212ad9b7fafd89d91016446450db64eb", "f3906c390e378ece7f785fb553e0b89c2cbfeeb2", "512ef8e228329e02b651e2963260f569a72b4dde", "64c9cc92ea496b9053fa5326567487b5f08bb13f", "a99b8c2165fea3e90bc8815a965b7df961367419", "55f669d7def736aa2f9454a8645cadc55df9ad87", "d5ddbb8cbd770f154af33d38f757ce5197757c34", "2c13e0f614712c6a478adea3ce011750d5e77587", "6c825dab1405d193628298adc552a4e4d17c7e69", "04fcc898fd376338f7d279a06fe78b42a5a4d74d", "894f540ed8e603a51c22c7040a5485dff856ae25", "be3fb00c4190e26b6a50a8c10c7a9ea5b8d45a2b", "7eae1076f95bd2a16d2901003fbc2e7441c80a56", "6577d30abd8bf5b21901572504bd82101a7eed75", "0c68a5c871b92639631ae65eea8da430054bb729", "0ce4110d4c3d8b19ca0f7f75bc680aa9ba8d239a", "cf65c5cfa2a2b0370407810479f179f5fbe88fb1", "6e82ce9897093ce4f5fa795887273992489c380d", "044ca9f2194aca3cef7fbc6b94eb9857819a17be", "58b49db3664cd623699eb4a69d1a9e3993bca731", "5cf549ca5680491f12a5ac5d42b171a64088da22", "1172b5f3edafb5eea176ad64ef0cec762ee22437", "bcaa5fab589d95890d539a3119657fa253176f0d", "0021e292c9d8fd19f5edd1cde5bc99c112f1992d", "82d3dc1dd35e7d2d13bc43614b575dce61b0aba3", "c20ac2441e6ec29ae926d3c5605b71ce10ef6dff", "9a7973f85c7db836efc5d00eab80019d7a94fade", "1edc51a447b9e26fdabd25b6daa1729df5bc5834", "961939e96eed6620b1752721ab520745ac5329c6", "695423ede04c7ccf05997c123fd8ab9b94c4a088", "c1a70d63d1667abfb1f6267f3564110d55c79c0d", "373813010983b274401b9b65157df57ce50f7011", "a80d991c0265e6637d8acd3bf75f2b4ccb3c759c", "760c5d7392f4f193312af0f5c4f178b73f3f77d2", "dbc749490275db26337c7e3201027e8cef8e371c", "d929534024614e3153c986e55d758ea7471d3fff", "e21691680d3c5bbccd89b0850504b604a53a940a", "906b93f519218d4ba2d1640a43fa53cc24531d4b", "0b875d54bd3ebb87ff63f0cb46bf53e1fa3299c6", "cd0856cd440cfc19b6989361baa084f61be1e5bc", "526ce11a6c80716fca69bdc111f32dfbe045e400", "19b9e5127155730c618c0e1b41e1c723f143651d", "1f00f197f222be4a3b081515eb8d3f014860cec5", "3f089131d71321461f84a06064689b6daad951f6", "5479da1038a530beb760a38dbb5b08947dfaefbd", "a73014cafb3e0b89ef6dae3372606c3d028ba2e3", "f8d66c5844b5ffd50ca25a1e99cc154d353d3ee9", "1e747986c9efd481d380b28896115812eed54f8f", "0e9f7d8554e065a586163845dd2bfba26e55cefb", "ad7a3b600c67a604d51cfdc721069af31469c397", "99cc7338102f4d979dd793f304ada404e11cb623", "a40b4bf3a921f07f4d07838f9092416189e077b5", "3898a9dcb22f87413f08bb44c656f4129e1c42df", "03318abb2db923cb15dc28bec1209a02be308ff6", "1f72bead6784e66e0e0e8ea434a342eedc7f9e64", "ed68ef03a97b8441fd6a676e095b01dd3b5bc54d", "aa5c2ac60a288132efeeb85c5af1fd0b39294eed", "8dc37d4993d2e3ad145ff0959b71ffcfb507e571", "b5710e847e1428e24713d6c7710738fd54f5382c", "039b29a58861f78fd4cac0f51c70eb90398091d6", "7d9dbef9bacf1257e942121f82c3f411f2a78fff", "b85901174fa83c76ae994603228ba5b4f299a1af", "50fc40ec6166dc33c6e59ef5dd75230651076f44", "504c1cc2ddea7db0f684269be3df05e9e95b6e2c", "87169aaa3e44fd1568a2fdf1e4ba2af385c34f4e", "2546f055c217055cdd140a55d812036b46b63e38", "50bada01c37daf2ed11350b4b0d2be28d9bafd0a", "02cf88e5ef78eb89c4b22ff301bc1211f5beaae3", "19911c7e66b05d5aa28673608fdfc50ef00591dd", "5392e33c69dfeaaf5b7b5743da7e3b79555ea371", "793651f4cf210bd81922d173346b037d66f2b4a4", "e032f5bbee040b3898170b3f9091384658caf0d2", "1fe8b8dc1271b0cb5ce37f21be5809546597cfdf", "baabad0fbd0a980f8ad6d4b870ab20dd17b9b1cc", "1e3739716e163fce6fded71eda078a18334aa83b", "617253f275f14490c61dc9d8cb23ceb9c9d4ba35", "b9696bdba6e16959258bad17ce26e6a643be5faf", "16344102d21291483d1fa7484be28b563df434ce", "559f611d95138c6d4255e493f59de33f9f786538", "0126f1566a5a9ba051137afb6c1fe28f93584def", "5ac8edd62fe23911e19d639287135f91e22421cc", "ca23c473becf0048ef294f7c25e579d7095cb27d", "9b7884c7522fbd8ea52234a2c1bc1454a81f7426", "25aa2410b759d98c7102f071a17c3b8934395048", "0a3a4d3ea026ec16a2340728acdc33764fcadcb4", "06b4522433beca98aea99f924fbaeb8f861df8cd", "2f77c0908716b0febfda19ff6a0e2970c23af440", "0aa74a922604e200fb92194301d4a4786cc1a74c", "f011882fbfbc0a2436e01c98c1cf517355aeb20a", "a618cc9c513762d4eb5db2f7f7b686e7e2b758ca", "a3fcf3d32a5a4fcc83027e3d367ecc0df3ec4f64", "c90b109301244e59771fec431a8d50a78e395956", "eedfb384a5e42511013b33104f4cd3149432bd9e", "b4ef46a5d5f52a9a55e78aad9b34c5977ea8b6de", "0eada24899ad4b177e91c4b9303898feba56ab92", "fe7ab34e71d110d836a30ffb287306a02af5f189", "ae6af6bfde5383f74f40092a5cd48ed6bfd7ef9a", "d13e81c7a3d6f62948a68663acfecc3a480d9b1c", "4af133c49d39c8b7aa9d82c17f1fd2c70e36233f", "0c1d40de56698e672d3906b96f47ae1361fc3912", "c5697c28272dc7e81c451a63495f229c740d597b", "2b11f435eaa58f141b050f3ab63279cc716507d9", "268e91262c85ff1ce99dfc5751e2b6e44c808325", "59cb6df1efe0d63b20f3e976c308e99334b9c363", "169731093e6b1a5ca51805a876011a9c250f11cb", "3b995415a12903a45587827651c38e722f98a8c0", "8a4df86ad831b107c49e431fc2e58c0a0ea3685a", "921aaac9b33ec6a417bfc8bb0e21e11e743342c2", "617294408da21693521adb2686209b04311703c6", "08a78e0c57d0b6474c09ef8c6d118b3e95da1e18", "ed3bb078cab7f423dacf6e946538c6fd22e96e1a", "c93ac931336befe341dc3eacfbf9695cca54f8f4", "36ea75e14b69bed454fde6076ea6b85ed87fbb14", "808d7e7c411ba8e4b31c63f34cce5a195db3dd38", "c89ddb0e978b78c062fbf9ea992da83e4b38778e", "415ce470a3c5f78bac68d945af5805a1c8ab86bd", "2d9a49666bd72e7ba06579d9411ceb2df5205466", "8fe43144c0ff36ffefca869eec0a63e71ca02049", "13836bf986d654b17bdf0dd92ab0968507484515", "0ef906e6c67d8a4a2558ee8978178da4402024e5", "d3f945e0f14cf069d8a3f97497e94044f5d3b21a", "5a6012176189639de6517d6515fc1aca58e05c11", "65821b839b8c6ecf6cba7be0ca132da59075e1b4", "eae04b2343fb2852b67c69b43dcae87c6cf313f3", "6946acb595095407871992da62298254658f8d84", "82f293d1a33cb92c1cf3e9f9386509e8b3cbcc81", "ce0aa94c79f60c35073f434a7fd6987180f81527", "4e99eaf58aa5fb4665dffec0009e2464feb0f66c", "cb6b9cc908a2862a17a1b4560ead1a49989275bc", "7b3b6b874b89f22c46cc481448171a28ec919a55", "9304de56c284748273f7eb7d867eedaee971cb55", "88ca99490c129cc2b825c23d43c67972ebb04dcf", "9cf2f38da9650b6a34a4fe00661bd195d7ec9fdc", "938d363a87fa4020fe1e526c439f6f52e66c33c9", "060ea3f72ee63d909600caad168cb26b4777b19e", "50d337e06079496c3efde863c2ba18f6118b6c69", "f921e6f5085f1ebbd8289081e499240a89bf6c43", "78688698a5137e42062d08da586aa9dea86ebf90", "3a4a908350d856577ac48caec10c0809e8396acf", "48019c177ec1e650d0d67feaaf38ae12a74fa644", "1f0088d0f731ca563f01fa72678ebbd541aa86da", "91ab486c5d654f60f7c0bbf62ce7532647ae6a30", "b68e8550eb4be5f36b30b15487a15226729ae379", "8bca3e860a08727511148e691835c9c516894b88", "3bb95239433149eb6e56c3feae13b1b85d6869de", "0f6c6f03fece9b0acd3d0866c09201ff42493ca2", "ff60f453888ac00a0e0b6f1649c1a699515a955f", "a9dd61f4dc1b033683d0152f5753e1cfaadc6500", "dffb64ac066bbcfe6aea6b11408b5ea62a40e9fb", "e8d1d2a61c5a259440ef9fcd301093b43e87efa1", "3ea27ba44a3e8a13148236807e569b909517ed89", "7e63dc2831c93437cc3a6f1b80722669fdfef051", "73e4076a532ec6a0633aed5cf6009414cdaf1f6a", "58c3aaf6157ac326e81f31ab5712072a506207fa", "652d3f33fd0a99808dd646aed228b45eacdaf34f", "79f69d04a9614fbeab554048802c25118033cbd6", "b67e6f9c38d9cd2c41fa906e177b449094159039", "d37bac011f93e8eee3b71efd2ea727d35bd3348f", "0b55b31765f101535eac0d50b9da377f82136d2f", "5b1b90a0a6d491b26f427824985d69d5d0693220", "c2145bd76d944b7f2d7097c6370f1d7fd9d79ac9", "8bda09b2fb85c317c6361aee1935bcbcf87c1c70", "a9fd443590628890844412d274e396bb0e383eed", "0ed78b9562661c550e382ed30de252d877a04cdc", "1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4", "2b3556d1344cb5128b89785ed1b3793b54527fa5", "b9a65dd30e417b45eb50add150e0b155e69ad14d", "47ffffb26cbc0cf0800cbf7efd370e0afd4aaa92", "0d30066576c029cd888d7c759349379bdb0e88c2", "053f9b10532a87e346fad281e0be81337cb525a5", "5edf937723d48b03443b0cb27d32cb7c3cff3c17", "57c82df62b8212df8efe8cfab33f666cc7a2fc8d", "9d896263d1200e49e463e00a19502e694ea4ad7d", "8602b2ef26a0f851f1f6f2f2ae0ce142eb64300a", "695e4c975740d2aedcfc42d7ec445b4b2b56cbeb", "fcb9597f75e9e1770df629d784cf03ed87d61d0e", "cd9f1f429b41c4c125df231bab8872e012ff5316", "d7b534178ef47def4aea4850782b13ef21bf8e96", "ef247c194162f76eb8d44b1f83c25a4002ab69a6", "7fdcb6638a9e01986cd8fb4133b4448700087faf", "5357e6e5d5fe06934bfe693d18b9f44bbd98f73b", "dd7faa9ebacb64bcf4210c3be76202c592e3d637", "a082b1ee9a5bafe678539e694197c0910d4a09b2", "18d7684c6b96caf51adb519738720eceb1b13050", "2375edf7a4014eb9ed1d0634726063f41528d6cd", "9bd36d36f7394272a9890aaa20f856d03110fcae", "de7daa206f1dc3d5f83c5342fc08e3e92ddfa126", "a80f062fce64f9935c89fe0d8caeb798e72e451b", "aedc9a615b97442ddc4fd07427ff8595965d46c6", "33e5d1c93e4195a1bfd303a94f0fc3f1c5e233bd", "e2fb8ff1cc25a285288e7b6eda4c32fccdcaa10b", "29322b9a3744afaa5fc986b805d9edb6ff5ea9fe", "9802885e39e0847374a2efae801b8b719c09c64c", "8a05ac5e375ebf80b5fb88f207a9d33d5765c27b", "ee56823f2f00c8c773e4ebc725ca57d2f9242947", "09d9d9d153119558e83643f0097ffb87e1037649", "2f8df9aeb5732a8831c4761e09c1997fcd5e1bbc", "d4d2014f05e17869b72f180fd0065358c722ac65", "d2fa8990bf4fb1eecd3019f3cc718c536118109c", "2d638ee3e358732f3c052b854dc16949fdd4a2c3", "87e207fa31099d20450e60f056a0b1304dbc1bfa", "0eec56cb77e52c8070dfd9088a55270f22536224", "ac952dcf1448b7afa8c17cbd8062e3fb1382da0f", "8ce9949b88726e117552ce3aa6901a5178db3bb2", "3209c4c71b22c7cd91e5be583664ce953e560a83", "db6cc1ccbc517f86d39e446792ce7d84b935d899", "9e97dc079139f009c5c98617c28825dca0d70ae3", "bea1ffedb87d7b0eb9cd4397b334b204fa450a84", "71ed20748c919cd261024b146992ced4c9c2157b", "16ab146b88cde533861ac5d409923579613fdf0f", "81a8b2e55bcea9d9b26e67fcbb5a30ca8a8defc3", "41eedd1e627a060f6955d8e04a3e9307a7b8ae6c", "fb2e805e61a5939f46352b69a355b8f6b54052cf", "3dfecf073b2ea403def1fdba11835b8a95cf01a0", "a84032e66db042a57722b4a3bc7301ebe567fb8b", "9e6ecc12794f1d3215f93376a32b350a0492ceb0", "4959489ce1f092d0d66afdf0c835f1b4e9162d3c", "20510478cfd6385d7d080ec435e8491d6b87c3ba", "4f9001753ceb18ee06f825687abe0e3d292e71e0", "fb9673f0373ca4c72fe9059648ae618d45fd8c90", "1381c0adc5b9693394b0ac8f9e7ce90fb8162f08", "3da97d97b12fcf22208c36f471119f33a08d9b6f", "b5900b981020b00a48f544b942ae172a52ebccd1", "fea8ccdb0ea1a8d20acba1f359e2345164602576", "08c7deb50c98f5a5095cbb60d3a85b1cac149883", "4a1b67d1f30abeeecb270666605025d9d78971ff", "9f33fe98e70c049ddf932247a44b9c9af85cf9cb", "13af83892724343cfdf88debbf00ea1343a10db1", "559bdd52d77c1d5c60d9d837168f4b8affec877d", "9746186205ed5e559d17e87d7ede9e3dd3922e54", "2abacfea9b33de419caeb01d5f5ce342667d695e", "36a0961cc64c4d3033aec820073d50c6470caa41", "0f7a5e5cd62066d2207a3b51f2cf26dbec1f134f", "789389dce27ad72adad251c81734bdb6c274c30f", "ce7b4fcdd81d4a6461f94abefb48c4b524865cd7", "1ed93b32367351089815aeeedf06cde9e999a346", "7667484b76a893287f3728e5b7604034ff868edf", "ad5965e00d9511528c91adea0b356ad1e7081f0e", "6933b9dd92156ba716006cbe5047edea15661765", "0f556558853268d86cd05bf8ea42da6d7862a024", "ddbc590554fe1c1052fa965a2b707b3b46dbe7e9", "3f31863c0e094a80d4fda3f23a06b50f6e2ad192", "ad82805e1fa43c211a802cf6f673f07303de4a90", "3dde006a6bf19a2008116542ba3d39f9a280648c", "10fe1c3d32398ee5492b72e230e74615ed31bca3", "3528980fbad49fae95ec278d089bdf01b33445ad", "e6ca3361169c2465eea56b4d3eb270f11427ef30", "63d8d69e90e79806a062cb8654ad78327c8957bb", "7fd73c91462153e16d207faa8ec0e3f507c72ae5", "07e2461bccbd4a17609d022518a82bdba691c21d", "fb9a1f5692ecc5fee9f7673c9934c61538ff0774", "3ec860cfbd5d953f29c43c4e926d3647e532c8b0", "1a0501050159c4ea6993c01d55f31d2e94688e8d", "9d18904e73c71f6b54dc13aa622ca0f7844d9503", "c232d4989ad1bd9ee19d8309cf0fdec2a5c3895f", "b68881f3528fc39226ffa44220ddb41a467910b5", "84784b388550de5ecb5b7740ad8a724c0a01410e", "5a362e8f6eee03095fb3001b417fcddd80ea3d73", "9df7ea3eed6b0c9c067521119698cfa79cc1f91d", "47d32615dc396dece539b664969d2e102f5b0f63", "4f15b1e750007465024181dd002dfc6d1baa48c9", "eca4aa7238d232b108847edb62a49e38cdf8115f", "52bfa4a8b3e3b8e0c0031ae53caddb4c067c04e3", "4e370791915e4b56603451b4fd1bd0105f1bcefb", "e9dd49970d259ec267d7c23a62215db37b986e49", "9019d11217cedd413d65052c72f07e320bc3f120", "012eb31e668ea9f87e39a0bc3ad467d31843568b", "cfa00c64740429d101b9b741845b501719f08910", "39905f28acee51506352c34736577d49ceb2f318", "8b632db02220806cd62e35fdebb3ede58243dee0", "4300c7773a130b4995f60ba5ed920dd1782a3527", "132045bbf158060cbbb20f86f212ce89c4358eda", "c362698bcaccd6c78a8073c394781389a8ea2a86", "8797c870c0881cd30fda186affee4bdec54aeecd", "dee39ab960882e70a87501118dfb61cf7a0cd017", "543a005dd1c6118c73e099e65119ae10c790969e", "05d3657ef0a94f1d0ad8291d8f2fb3b22aac782a", "b92a057606a47eb7de6ecc180e4dbf53c4a8d4b7", "b23bad5159c41b32fa07e901b1cf52fdb41450df", "b0698da6bc08772d686713945b561bc952dc7949", "17f472a7cb25bf1e76ff29181b1d40585e2ae5c1", "52f8eb239997d9a324d4794529c60522db8d08bf", "c97c4d390b8e53c594483cf9533d80aaeb8c9df1", "b13e819c48bcb2362614c18cdcd7a53d90944fea", "d24a30ed78b749f3730e25dcef89472dd5fb439c", "08a03f966216e4c5bf13405556f16220a14ddb3d", "1df554e992baf60f2d0b7c1b563250ba19b8f8ff", "55c3b8f21b063b605a01a720af92c0a5de59333a", "5b7870359b8b9934453f8e772ab7c3f9df3a5035", "2168ec12eff5c3d1ff09d0f3c13d6df5b5061164", "536b1db1b1db8d4cfef813575304421ebe8332f7", "751e9126b99c8c2552447c0bb1897ce5add6cf4a", "26779e1152bcb0072eda2abf4262c72fcfbcda2e", "4fcf1bfc2a8989412adb67c97ce1bee72a996fff", "bc4e86b6d2d386805466b822a04ea0c015debfff", "44787df6079918c6c4bf3dc871e2cad5a62c0e58", "36ddf5d08ce753fe00efc844be3769f09dda9f91", "2599445b0990979483db54c707c9a33b18231910", "3ce0cecc16b49385d8d45044bef44a66e08b08bc", "307ed58717ecc925bdf3f56da4560b44ea6bc312", "22dbdace88c8f4bda2843ed421e3708ec0744237", "45f4b06b7c9fa4cf548d33e40b2295b2d6ff806e", "392d4fc94db11fa40a96a41f975532f1a36630c6", "10f4bbf87a44bab3d79e330e486c897e95f5f33f", "ee4c659ad75c302b223a3815a65aa2e304cccc30", "3d0e9dbb81c4fcdfb179342f57f9815a5ca2b659", "08ba1a7d91ce9b4ac26869bfe4bb7c955b0d1a24", "6681ec516067747a4576f737f10f8d9bbca2d8d1", "ca56ac26cd7e1fdc35033228b4936bf70a090825", "10afbfad8c01bf44e6fed021b53000b77f9ce564", "302bf028487b50bed33bc6d36971b8ecf06393ab", "f68263a6f541429a8645ca2f4b0658cdbbd66638", "3feecfc73e4b215664218852ca77ef26ff7af94c", "694b23e806db505c0d5de29a7670e898043d3ccd", "9a5c896a527fb6b72508d7a6309c5c375cb2967c", "5583a131ab89aef81cee3e60d32160685c24d694", "0fb75f5cb12d1e1a909b9f698b7617bb9603002f", "eb6b8359909749e52efff9b78b037a88468101fd", "6f3391fda6b25796b5e051f822f91243f69276cb", "75776ce56e649dda68d9a8f13a9df911662e5b79", "1ab19e516b318ed6ab64822efe9b2328836107a4", "031d22b08d9e8235f46679b89e273ab8723d3e67", "24662ce3f3499ec8c5ecc546dac69dbffad578c6", "fa4ff855ca125b986bcb2bc6b71bef2ae8fde1cf", "3138e91d8920a3d73eab28c5b1655e8a245a6cf1", "23316b1bb8e17bc1f9dd2c971093fca3407c32d0", "3294e27356c3b1063595885a6d731d625b15505a", "ed74363c54ef59e99a983e85f02be9d263f66ffb", "2fa941ed0f9546796499782e285a14cabf0186de", "d968dcc086683ed597ad06313710e6717a157eda", "dfe98039db4aab11338900ae393fe02eda4f2374", "7ab9035ec3871bbeadf1095afbe1ff9d9cb25480", "e21cdb56c23e2a834a611d51abce545d2e8d01a2", "1b727cf3b3516afc599ee70fb9f374012649a459", "114907f89466987b3c41c8d530e50b2ac67179cf", "7a7569246855b8620d65d016013ccd0144a4f3ab", "ec001d5550cf950626f6416cfcccde82806df2ee", "7f8fe6b9e3f1fc4cc6232eedc0f5e328469a30c3", "fcd9221f8ef306155f59817a3b0bdae05e9e0ae2", "8b793aba8ef62e41dabf0d2bbe3e84b8ead59fe7", "f47108e83398953e5aa56d5c5dd75b950d242f9c", "80fd7c92f6dc92c2a61c354c73dc483794e9bcab", "dc090aea412cef17c7a68ec84c34797806feab24", "c5c1575565e04cd0afc57d7ac7f7a154c573b38f", "c562637140da95e37ea228d35f1046589d31b3b2", "48fd03f97ea949b6b7f44d8db86ecfe3d62c49dd", "591bd78a06814e75cae7cdef50ad91cf22e66c23", "4ed727bfef7d61023d391fdcb95cfa1df901be5e", "250252b9693778e0af653efebfe17f68d649c8a7", "600f164c81dbaa0327e7bd659fd9eb7f511f9e9a", "f0619ba7eacdfcc25d0487fb9c3c00d143633500", "580f3ef6e77753ce0b157ebc02656f346080d9a8", "8cce8fe25effa5c9451e2496b4659ec0f8eccf1c", "4156f9fc5983b09eb97ad3d9abc248b15440b955", "2eaa773c455fda8e04f3c0570e9ff128662f31e6", "ce7a385b791686f318313e94a0b573c456c1297f", "fa9aabaa364732ddfb1d228cb8e93fa12c3bf52c", "65747ccbc558ba2ffb6c1726ce5808f0c5f81774", "5ad07ae06ba8ae012367fd06205e948ff13cc7ab", "c35aff2baba9b62fbddb23b2d4346a987823ad7d", "2e243d59184f781755339f6b415fff87f63c5ca2", "9bbadfc8c30f17790454e5842a1151ad6cc914cc", "37b0357d2db89bc4560d4201c3c2478988c87640", "ff27963c5bce8f46ca4adc5bee209f35dfd69633", "2dd91115091f1691ea37c4b14788ca4199354012", "cfc9056155bf32648448b588a752f694b4e8249c", "ff4dec12d0ba0bb1d2c6bbc194545819bc9c1e5a", "47b38c14df17f60151b0f92a6be3e110d758c522", "068f8b19a3847a2eaf0c65f6d85ec60060750d3c", "349668b75c4398c075fc681f563a80ad7cf6b4f2", "20f272f4bdf562aa8b4dae84b67cfafa34a00738", "38fb6eada1e62e0c25c45023107ca8ab3426c162", "fe9a6a93af9c32f6b0454a7cf6897409124514bd", "2563b2adba98788a217565ba5a648f83cb75eeeb", "0c124734ce6015bd4c506b101038aebc1412da49", "05ce0e4e9ae2c7b2320decb3bb29e066f1dd96d3", "3150fd3b0065372f898b42a3628318210fcd566b", "8e01ce9c92a657077b2bbbe119523bd7429306f9", "46e82bdcd623e693ec086f81fa78f55c92a9ce44", "de6ba16ee8ad07e2f02d685b1e98b8be5045cb1b", "0b72d7bc2639be9fcb54cb10d06e17e5dcfbe750", "18c72175ddbb7d5956d180b65a96005c100f6014", "50ee027c63dcc5ab5cd0a6cdffb1994f83916a46", "960ad662c2bb454d69006492cc3f52d1550de55d", "d671a210990f67eba9b2d3dda8c2cb91575b4a7a", "e3917d6935586b90baae18d938295e5b089b5c62", "11cc0774365b0cc0d3fa1313bef3d32c345507b1", "5e19d7307ea67799eb830d5ce971f893e2b8a9ca", "ad08426ca57da2be0e9f8c1f673e491582edb896", "28d55935cc36df297fe21b98b4e2b07b5720612e", "8c81705e5e4a1e2068a5bd518adc6955d49ae434", "0952ac6ce94c98049d518d29c18d136b1f04b0c0", "9306f61c7c3bdcdcb257cd437ca59df8e599e326", "86881ce8f80adea201304ca6bb3aa413d94e9dd0", "bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b", "346dbc7484a1d930e7cc44276c29d134ad76dc3f", "158aa18c724107587bcc4137252d0ba10debf417", "804b4c1b553d9d7bae70d55bf8767c603c1a09e3", "35140ebfa0b6d75fd096aed72d40b16ea6a3828b", "f842b13bd494be1bbc1161dc6df244340b28a47f", "bc871497626afb469d25c4975aa657159269aefe", "d41bcb0c79f46aca47b9f9b8a779ce80a2a351f9", "05e96d76ed4a044d8e54ef44dac004f796572f1a", "1e0d92b9b4011822825d1f7dc0eba6d83504d45d", "a5d4cc596446517dfaa4d92276a12d5e1c0a284c", "29659b6fc4dceb117cec687d8accda5f514080ed", "f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6", "0742d051caebf8a5d452c03c5d55dfb02f84baab", "096eb8b4b977aaf274c271058feff14c99d46af3", "b88ceded6467e9b286f048bb1b17be5998a077bd", "ecdd83002f69c2ccc644d07abb44dd939542d89d", "01c09acf0c046296643de4c8b55a9330e9c8a419", "f88ce52c5042f9f200405f58dbe94b4e82cf0d34", "66ec085c362f698b40d6e0e7b10629462280c062", "2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76", "e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd", "0394040749195937e535af4dda134206aa830258", "f0ca31fd5cad07e84b47d50dc07db9fc53482a46", "0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6", "656aeb92e4f0e280576cbac57d4abbfe6f9439ea", "35e87e06cf19908855a16ede8c79a0d3d7687b5c", "68d4056765c27fbcac233794857b7f5b8a6a82bf", "32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b", "94e259345e82fa3015a381d6e91ec6cded3971b4", "50e47857b11bfd3d420f6eafb155199f4b41f6d7", "bd6099429bb7bf248b1fd6a1739e744512660d55", "adf31283550ff810540bad0edd2c8878ac252b20", "dbe0e533d715f8543bcf197f3b8e5cffa969dfc0", "869583b700ecf33a9987447aee9444abfe23f343", "ffef029c360db36c72d356103f7b119c5e54f96b", "7735f63e5790006cb3d989c8c19910e40200abfc", "0a4a8768c1ed419baebe1c420bd9051760875cbe", "4ea63435d7b58d41a5cbcdd34812201f302ca061", "1f24cef78d1de5aa1eefaf344244dcd1972797e8", "6769cfbd85329e4815bb1332b118b01119975a95", "3e4f84ce00027723bdfdb21156c9003168bc1c80", "e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e", "1ff896f707a5ee2868282d3272c6baa4a61b406b", "b41374f4f31906cf1a73c7adda6c50a78b4eb498", "ae89e464576209b1082da38e0cee7aeabd03d932", "3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94", "78bfa428adb237c5ba85eda35e6a304b679c5c8c", "89945b7cd614310ebae05b8deed0533a9998d212", "3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8", "3e3a87eb24628ab075a3d2bde3abfd185591aa4c", "d16471d4a5c300a122c1b1a4c802ce4748835e1a", "fdca08416bdadda91ae977db7d503e8610dd744f", "df550cb749858648209707bec5410431ea95e027", "f1ba2fe3491c715ded9677862fea966b32ca81f0", "d4331a8dd47b03433f8390da2eaa618751861c64", "447a5e1caf847952d2bb526ab2fb75898466d1bc", "f180cb7111e9a6ba7cfe0b251c0c35daaef4f517", "15cd05baa849ab058b99a966c54d2f0bf82e7885", "05e03c48f32bd89c8a15ba82891f40f1cfdc7562", "b9cad920a00fc0e997fc24396872e03f13c0bb9c", "0bf3513d18ec37efb1d2c7934a837dabafe9d091", "5b9d41e2985fa815c0f38a2563cca4311ce82954", "2bf646a6efd15ab830344ae9d43e10cc89e29f34", "ad784332cc37720f03df1c576e442c9c828a587a", "4223666d1b0b1a60c74b14c2980069905088edc6", "9eaa967d19fc66010b7ade7d94eaf7971a1957f3", "2c883977e4292806739041cf8409b2f6df171aee", "cb8c067aeabacd0eb723c5bb23eb41d8d219c57d", "a35849af340f80791c4a901ec2f2bbbac06660f5", "5b0008ba87667085912ea474025d2323a14bfc90", "b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24", "503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99", "d02c54192dbd0798b43231efe1159d6b4375ad36", "43e268c118ac25f1f0e984b57bc54f0119ded520", "0b878d553f359b38753c6ea27d7acf500a90da15", "e35b09879a7df814b2be14d9102c4508e4db458b", "9eb13f8e8d948146bfbae1260e505ba209c7fdc1", "c138c76809b8da9e5822fb0ae38457e5d75287e0", "69a77cb816a31c65699cd11c4a3b1b82ae44e903", "5f9dc3919fb088eb84accb1e490921a134232466", "acee1e7700e9f084ff64805a2c67d16fe69e63a8", "40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a", "3bdaf59665e6effe323a1b61308bcac2da4c1b73", "97c1f68fb7162af326cd0f1bc546908218ec5da6", "1d97735bb0f0434dde552a96e1844b064af08f62", "78c1ad33772237bf138084220d1ffab800e1200d", "3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001", "94f093ce723a7112d5698a1e88f437503d2d40af", "74b0095944c6e29837c208307a67116ebe1231c8", "7c25213a7fa5fe13199d3112613ea0b9045320d1", "231a6d2ee1cc76f7e0c5912a530912f766e0b459", "e6e5a6090016810fb902b51d5baa2469ae28b8a1", "4909ed22b1310f1c6f2005be5ce3349e3259ff6a", "1198572784788a6d2c44c149886d4e42858d49e4", "cee02d8b9a63bd7f9b3cc394b84012eb44fffbaf", "6cd96f2b63c6b6f33f15c0ea366e6003f512a951", "f86c6942a7e187c41dd0714531efd2be828e18ad", "2fd007088a75916d0bf50c493d94f950bf55c5e6", "a3775b3a0e78b890d9ca79b0aabd982551474a88", "2b3ceb40dced78a824cf67054959e250aeaa573b", "a317083d9aac4062e77aa0854513383c87e47ece", "760a712f570f7a618d9385c0cee7e4d0d6a78ed2", "85f27ec70474fe93f32864dd03c1d0f321979100", "3f4798c7701da044bdb7feb61ebdbd1d53df5cfe", "690d669115ad6fabd53e0562de95e35f1078dfbb", "a50b4d404576695be7cd4194a064f0602806f3c4", "06fb92e110d077c27d401d2f9483964cd0615284", "817321d4008bf95e9be00cf6cb1554a1aed40027", "ff9e042cccbed7e350a25b7d806cd17fb79dfdf9", "9dfd1e9daea4c54a05b06df905bf8ee1faccaa72", "47f5f740e225281c02c8a2ae809be201458a854f", "c5437496932dcb9d33519a120821da755951e1a9", "77be118034a700e5b7d9633f50f6fbb7fabec8ef", "6e94c579097922f4bc659dd5d6c6238a428c4d22", "11fdff97f4511ae3d3691cfdeec5a19fa04db6ef", "f9752fd07b14505d0438bc3e14b23d7f0fe7f48b", "7ad1638f7d76c7e885bc84cd694c60f109f02159", "1a862270ad9168e3bf5471bda2793c32d4043aa4", "2f8183b549ec51b67f7dad717f0db6bf342c9d02", "7477cf04c6b086108f459f693a60272523c134db", "656ef752b363a24f84cc1aeba91e4fa3d5dd66ba", "9ba358281f2946cba12fff266019193a2b059590", "003ba2001bd2614d309d6ec15e9e2cbe86db03a1", "01d2cf5398c2b3e0f4fc8e8318a4492c95a0b242", "4ca9753ab023accbfa75a547a65344ee17b549ba", "e40df008fd0e5fd169840bf7d72a951411d13c59", "1da1299088a6bf28167c58bbd46ca247de41eb3c", "1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9", "28f5138d63e4acafca49a94ae1dc44f7e9d84827", "13841d54c55bd74964d877b4b517fa94650d9b65", "888581e88c1cbfb8e905c317c6944b6ac2d4557c", "2ff6f7e489ae8ff054422444a5e0604e30f3e97b", "4866a5d6d7a40a26f038fc743e16345c064e9842", "61329bc767152f01aa502989abc854b53047e52c", "2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd", "a090d61bfb2c3f380c01c0774ea17929998e0c96", "60bffecd79193d05742e5ab8550a5f89accd8488", "02f4b900deabbe7efa474f2815dc122a4ddb5b76", "db3acf0653d6e69887d184c7ebb1958f74a4d0b1", "5c526ee00ec0e80ba9678fee5134dae3f497ff08", "016cbf0878db5c40566c1fbc237686fbad666a33", "e0765de5cabe7e287582532456d7f4815acd74c1", "9dcfa771a7e87d7681348dd9f6cf9803699b16ce", "d4df31006798ee091b86e091a7bf5dce6e51ba3e", "9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f", "e4df98e4b45a598661a47a0a8900065716dafd6d", "bca39960ba46dc3193defe0b286ee0bea4424041", "00d0b01d6a5f12216e078001b7c49225d2495b21", "57f5711ca7ee5c7110b7d6d12c611d27af37875f", "518edcd112991a1717856841c1a03dd94a250090", "4cf3419dbf83a76ccac11828ca57b46bbbe54e0a", "7c9622ad1d8971cd74cc9e838753911fe27ccac4", "e83e5960c2aabab654e1545eb419ef64c25800d5", "262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19", "0b24cca96ca61248a3fa3973525a967f94292835", "605f6817018a572797095b83bec7fae7195b2abc", "2f5b51af8053cf82ab52bbfd46b56999222ec21c", "641f34deb3bdd123c6b6e7b917519c3e56010cb7", "da7bbfa905d88834f8929cb69f41a1b683639f4b", "b3050dc48600acf2f75edf1f580a1f9e9cb3c14a", "2f3ec6d666d7b94b63a104f92859199428b77f78", "15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4", "19666b9eefcbf764df7c1f5b6938031bcf777191", "fb95fb1e0bf99347a69f76c9fd65e039024e73b7", "328da943e22adef5957c08b6909bda09d931a350", "3983637022992a329f1d721bed246ae76bc934f7", "29e96ec163cb12cd5bd33bdf3d32181c136abaf9", "4c88e41424022c7c5f111d34d931fae15f52a551", "ca902aeec4fa54d32a4fed9ba89a7fb2f7131734", "0969e0dc05fca21ff572ada75cb4b703c8212e80", "352d61eb66b053ae5689bd194840fd5d33f0e9c0", "bf961e4a57a8f7e9d792e6c2513ee1fb293658e9", "5167e16b53283be5587659ea8eaa3b8ef3fddd33", "a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba", "9bac481dc4171aa2d847feac546c9f7299cc5aa0", "2fc2250d843326f3eefab1941e5a6e54eef239b3", "54f442c7fa4603f1814ebd8eba912a00dceb5cb2", "0ed0e48b245f2d459baa3d2779bfc18fee04145b", "8f6d05b8f9860c33c7b1a5d704694ed628db66c7", "2e0addeffba4be98a6ad0460453fbab52616b139", "fb2bd6c2959a4f811b712840e599f695dad2967e", "7aa062c6c90dba866273f5edd413075b90077b51", "dfd8602820c0e94b624d02f2e10ce6c798193a25", "a20036b7fbf6c0db454c8711e72d78f145560dc8", "d3e51c0cfd6ae3d3082c2aa27fa1c73fa9662fdf", "f869601ae682e6116daebefb77d92e7c5dd2cb15", "3b21aaf7def52964cf1fcc5f11520a7618c8fae3", "2edab301935d1e1faa0859897b617862d4fede63", "fc5538e60952f86fff22571c334a403619c742c3", "0ea6ee0931f2dc51b0440dfa197433faacd53010", "891b31be76e2baa83745f24c2e2013851dc83cbb", "6f73807e309b262c5761c7a73c6a5609679f9f02", "d46b4e6871fc9974542215f001e92e3035aa08d9", "8274069feeff6392b6c5d45d8bfaaacd36daedad", "2116b13eb3af418ef02502715e8f3c98664e699a", "1ad97cce5fa8e9c2e001f53f6f3202bddcefba22", "fc7b34a2e43bb3d3585e1963bb64a488e2f278a0", "8a09668efc95eafd6c3056ff1f0fbc43bb5774db", "dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda", "f9b90d3c1e2c3d0f3d9a94e6a0aea5e3047bca78", "6afeb764ee97fbdedfa8f66810dfc22feae3fa1f", "9028fbbd1727215010a5e09bc5758492211dec19", "61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8", "0aa74ad36064906e165ac4b79dec298911a7a4db", "cab3c6069387461c3a9e5d77defe9a84fe9c9032", "82d79658805f6c1aedf7b0b88b47b9555584d7ae", "0db43ed25d63d801ce745fe04ca3e8b363bf3147", "028e237cb539b01ec72c244f57fdcfb65bbe53d4", "aecb15e3e9191eb135bdba2426967bfac3f068db", "32c5c65db2af9691f8bb749c953c978959329f8f", "ab427f0c7d4b0eb22c045392107509451165b2ba", "0c3f7272a68c8e0aa6b92d132d1bf8541c062141", "68d40176e878ebffbc01ffb0556e8cb2756dd9e9", "76d9f5623d3a478677d3f519c6e061813e58e833", "58cb1414095f5eb6a8c6843326a6653403a0ee17", "713594c18978b965be87651bb553c28f8501df0a", "17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735", "0ba99a709cd34654ac296418a4f41a9543928149", "6a092b8bb90234aa48a25ca12b206929fbeaa4a4", "a56c1331750bf3ac33ee07004e083310a1e63ddc", "9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f", "7957abae15f631c5f5c50de68aa2ad08fe1f366f", "6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3", "0641dbee7202d07b6c78a39eecd312c17607412e", "40fb4e8932fb6a8fef0dddfdda57a3e142c3e823", "c1581b5175994e33549b8e6d07b4ea0baf7fe517", "9a6da02db99fcc0690d7ffdc15340b125726ab95", "e78394213ae07b682ce40dc600352f674aa4cb05", "ffd81d784549ee51a9b0b7b8aaf20d5581031b74", "83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05", "33b915476f798ca18ae80183bf40aea4aaf57d1e", "d588dd4f305cdea37add2e9bb3d769df98efe880", "0cf1287c8fd41dcef4ac03ebeab20482f02dce20", "9d46485ca2c562d5e295251530a99dd5df99b589", "1fa3948af1c338f9ae200038c45adadd2b39a3e4", "f231e9408da20498ba51d93459b3fcdb7b666efb", "0181fec8e42d82bfb03dc8b82381bb329de00631", "b262a2a543971e10fcbfc7f65f46115ae895d69e", "f5149fb6b455a73734f1252a96a9ce5caa95ae02", "f7093b138fd31956e30d411a7043741dcb8ca4aa", "576d0fea5a1ae9ce22996e726787c49023fc7522", "6afed8dc29bc568b58778f066dc44146cad5366c", "487f9ab19ca6779a014278d93f3e56ff82dac2e3", "d2bad850d30973a61b1a7d7dc582241a41e5c326", "053c2f592a7f153e5f3746aa5ab58b62f2cf1d21", "6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94", "af97e792827438ddea1d5900960571939fc0533e", "c5d9ac2f52c9fc229890798b9d6e4d899b72c525", "daf05febbe8406a480306683e46eb5676843c424", "64fd48fae4d859583c4a031b51ce76ecb5de614c", "ce54e891e956d5b502a834ad131616786897dc91", "3d1959048eba5495e765a80c8e0bbd3d65b3d544", "7e90e316819f220aa728dc1ddf73cf1d90e0af33", "c252bc84356ed69ccf53507752135b6e98de8db4", "8ea30ade85880b94b74b56a9bac013585cb4c34b", "73fbdd57270b9f91f2e24989178e264f2d2eb7ae", "197c64c36e8a9d624a05ee98b740d87f94b4040c", "3a04eb72aa64760dccd73e68a3b2301822e4cdc3", "54bae57ed37ce50e859cbc4d94d70cc3a84189d5", "05a312478618418a2efb0a014b45acf3663562d7", "535cdce8264ac0813d5bb8b19ceafa77a1674adf", "47f8b3b3f249830b6e17888df4810f3d189daac1", "0e50fe28229fea45527000b876eb4068abd6ed8c", "9d5bfaf6191484022a6731ce13ac1b866d21ad18", "b5690409be6c4e98bd37181d41121adfef218537", "89f4bcbfeb29966ab969682eae235066a89fc151", "64a08beb073f62d2ce44e25c4f887de9208625a4", "e68869499471bcd6fa8b4dc02aa00633673c0917", "6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a", "aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9", "6308e9c991125ee6734baa3ec93c697211237df8", "c2422c975d9f9b62fbb19738e5ce5e818a6e1752", "58483028445bf6b2d1ad6e4b1382939587513fe1", "e7b7df786cf5960d55cbac4e696ca37b7cee8dcd", "03701e66eda54d5ab1dc36a3a6d165389be0ce79", "8027a9093f9007200e8e69e05616778a910f4a5f", "0e37d70794d5ccfef8b4cc22b4203245f33eec6e", "80aa455068018c63237c902001b58844fcc6f160", "8796f2d54afb0e5c924101f54d469a1d54d5775d", "2be9284d531b8c573a4c39503ca50606446041a3", "188abc5bad3a3663d042ce98c7a7327e5a1ae298", "9d55ec73cab779403cd933e6eb557fb04892b634", "076f2dca12b3e85c282fc678f0d22ad6a3e6dc14", "3b2d5585af59480531616fe970cb265bbdf63f5b", "c5844de3fdf5e0069d08e235514863c8ef900eb7", "e42f3c27391821f9873539fc3da125b83bffd5a2", "948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494", "2e157e8b57f679c2f1b8e16d6e934f52312f08f6", "238fc68b2e0ef9f5ec043d081451902573992a03", "43cbe3522f356fbf07b1ff0def73756391dc3454", "3933416f88c36023a0cba63940eb92f5cef8001a", "228d187a24b1b602105e91dd06ee35a35dbbfc38", "2d35a07c4fa03d78d5b622ab703ea44850de8d39", "1270044a3fa1a469ec2f4f3bd364754f58a1cb56", "2988f24908e912259d7a34c84b0edaf7ea50e2b3", "a237e3d89c460e1b2e3f12c5d4275bd0c6eb47a8", "844e3e6992c98e53b45e4eb88368d0d6e27fc1d6", "622daa25b5e6af69f0dac3a3eaf4050aa0860396", "5253c94f955146ba7d3566196e49fe2edea1c8f4", "b299c292b84aeb4f080a8b39677a8e0d07d51b27", "dddd70fb2746a944e7428e2eb61ca06faff3fce9", "66d512342355fb77a4450decc89977efe7e55fa2", "11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8", "9378ead3a09bc9f89fb711e2746facf399dd942e", "e510f2412999399149d8635a83eca89c338a99a1", "b2e6944bebab8e018f71f802607e6e9164ad3537", "87309bdb2b9d1fb8916303e3866eca6e3452c27d", "0c4659b35ec2518914da924e692deb37e96d6206", "a3bf6129d1ae136709063a5639eafd8018f50feb", "539ae0920815eb248939165dd5d1b0188ff7dca2", "471befc1b5167fcfbf5280aa7f908eff0489c72b", "8f051647bd8d23482c6c3866c0ce1959b8bd40f6", "c4e2d5ebfebbb9dcee6a9866c3d6290481496df5", "dd0258367fadb632b612ccd84fbc1ef892e70aeb", "0da3c329ae14a4032b3ba38d4ea808cf6d115c4a", "2d5d3905adfea7a6a8371dc2c5edc669cadacf70", "7f203f2ff6721e73738720589ea83adddb7fdd27", "6b742055a664bcbd1c6a85ae6796bd15bc945367", "08fbe3187f31b828a38811cc8dc7ca17933b91e9", "982e6bd58e61c1d37bfca5b25e52e1325f2f3334", "8fee7b38358815e443f8316fa18768d76dba12e3", "acde297810059ca632ef3f7c002b63b40cb8796f", "51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6", "3df7401906ae315e6aef3b4f13126de64b894a54", "b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3", "365866dc937529c3079a962408bffaa9b87c1f06", "b3b4a7e29b9186e00d2948a1d706ee1605fe5811", "1195f0bf8f745ba69da915203bcd79589b94aec5", "1d1caaa2312390260f7d20ad5f1736099818d358", "42c645df49106b68a71abe757ac13245db4be394", "4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41", "66a9935e958a779a3a2267c85ecb69fbbb75b8dc", "8855d6161d7e5b35f6c59e15b94db9fa5bbf2912", "182f3aa4b02248ff9c0f9816432a56d3c8880706", "4350bb360797a4ade4faf616ed2ac8e27315968e", "cfdbcb796d028b073cdf7b91162384cd1c14e621", "2be9144a1e66de127192b01907c862381f4011d1", "41d9a240b711ff76c5448d4bf4df840cc5dad5fc", "a34d75da87525d1192bda240b7675349ee85c123", "dbede5113e4e91a3a26058e8b7253438a1df04c9", "35d90beea6b4dca8d949aae93f86cf53da72971f", "8cb55413f1c5b6bda943697bba1dc0f8fc880d28", "eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf", "4b7c110987c1d89109355b04f8597ce427a7cd72", "6f0900a7fe8a774a1977c5f0a500b2898bcbe149", "b018fa5cb9793e260b8844ae155bd06380988584", "40854850a1ca24d9f1e62f2a0432edcbb5633f76", "ab87dfccb1818bdf0b41d732da1f9335b43b74ae", "2cf9088e9faa81872b355a4ea0a9fae46d3c8a08", "33ba256d59aefe27735a30b51caf0554e5e3a1df", "6d207360148ec3991b70952315cb3f1e8899e977", "84d7af78c8dba3cad0380a33511725db4db1a54d", "72591a75469321074b072daff80477d8911c3af3", "cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b", "63b29886577a37032c7e32d8899a6f69b11a90de", "eefe8bd6384f565d2e42881f1f9a468d1672989d", "2340d810c515dc0c9fd319f598fa8012dc0368a0", "0c247ac797a5d4035469abc3f9a0a2ccba49f4d8", "499f2b005e960a145619305814a4e9aa6a1bba6a", "7a84368ebb1a20cc0882237a4947efc81c56c0c0", "2d925cddb4a42d235b637e4888e24ba876b09e4a", "20a88cc454a03d62c3368aa1f5bdffa73523827b", "7535e3995deb84a879dc13857e2bc0796a2f7ce2", "0ba449e312894bca0d16348f3aef41ca01872383", "8752fb22fc34b76ba9f44967517b59359a029156", "39af06d29a74ad371a1846259e01c14b5343e3d1", "9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493", "a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2", "9771e04f48d8a1d7ae262539de8924117a04c20d", "cea2911ccabab40e9c1e5bcc0aa1127cab0c789f", "d1079444ceddb1de316983f371ecd1db7a0c2f38", "3d948e4813a6856e5b8b54c20e50cc5050e66abe", "017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637", "3ee7a8107a805370b296a53e355d111118e96b7c", "4ba38262fe20fab3e4c80215147b498f83843b93", "126535430845361cd7a3a6f317797fe6e53f5a3b", "7a94936ce558627afde4d5b439ec15c59dbcdaa4", "6d4c64ca6936f868d793e1b164ddaf19243c19a7", "ff402bd06c9c4e94aa47ad80ccc4455efa869af3", "84be18c7683417786c13d59026f30daeed8bd8c9", "91835984eaeb538606972de47c372c5fcfe8b6aa", "9c1860de6d6e991a45325c997bf9651c8a9d716f", "03bd58a96f635059d4bf1a3c0755213a51478f12", "72a87f509817b3369f2accd7024b2e4b30a1f588", "acab402d706dbde4bea4b7df52812681011f435e", "e12b2c468850acb456b0097d5535fc6a0d34efe3", "4abaebe5137d40c9fcb72711cdefdf13d9fc3e62", "70769def1284fe88fd57a477cde8a9c9a3dff13f", "ba99c37a9220e08e1186f21cab11956d3f4fccc2", "655e94eccddbe1b1662432c1237e61cf13a7d57b", "26b9d546a4e64c1d759c67cd134120f98a43c2a6", "cb9092fe74ea6a5b2bb56e9226f1c88f96094388", "5592574c82eec9367e9173b7820ff329a27b6c21", "81b8a6cabcd6451b21d5b44e69b0a355d9229cc4", "fa398c6d6bd03df839dce7b59e04f473bc0ed660", "0c378c8dcf707145e1e840a9951519d4176a301f", "f86d8385a6170b98e434a121fb7d12facb2c8426", "6b18628cc8829c3bf851ea3ee3bcff8543391819", "5101368f986aa9837fdb3a71cb4299dff6f6325d", "8c4042191431e9eb43f00b0f14c23765ab9c6688", "070ab604c3ced2c23cce2259043446c5ee342fd6", "5a5511dd059d732e60c62ef817532689f4e0ab46", "ffaad0204f4af763e3390a2f6053c0e9875376be", "425ea5656c7cf57f14781bafed51182b2e6da65f", "1f2c99bf032868ce520b9c5586a0c20051367b60", "418b468b804379e8a600bca0395e01bffb7e08de", "79cdc8c786c535366cafeced1f3bdeb18ff04e66", "9c9ef6a46fb6395702fad622f03ceeffbada06e5", "13d430257d595231bda216ef859950caa736ad1d", "eefb8768f60c17d76fe156b55b8a00555eb40f4d", "4d6462fb78db88afff44561d06dd52227190689c", "1b2d9a1c067f692dd48991beff03cd62b9faebf2", "3327e21b46434f6441018922ef31bddba6cc8176", "f93606d362fcbe62550d0bf1b3edeb7be684b000", "1a9a192b700c080c7887e5862c1ec578012f9ed1", "9863dd1e2a3d3b4910a91176ac0f2fee5eb3b5e1", "0d98750028ea7b84b86e6fec3e67d61e4f690d09", "ccca2263786429b1b3572886ce6a2bea8f0dfb26", "5a1669abdc4f958c589843cff2f4d83a11fe8007", "b704eaa339d55ef7eac56d0117a8e127fc597686", "90ac0f32c0c29aa4545ed3d5070af17f195d015f", "19af008599fb17bbd9b12288c44f310881df951c", "8b2064a6a535cd2b49e348560c4f9e2c3a8f4748", "41ab4939db641fa4d327071ae9bb0df4a612dc89", "3538d2b5f7ab393387ce138611ffa325b6400774", "df8da144a695269e159fb0120bf5355a558f4b02", "50ce3f8744c219871fbdcab1342d49d589f2626b", "22fdd8d65463f520f054bf4f6d2d216b54fc5677", "b97c7f82c1439fa1e4525e5860cb05a39cc412ea", "2f160a6526ebf10773680dadaba44b006bcec2cb", "6c0048265758442d1620c2a239590d0d9060c09d", "5cb83eba8d265afd4eac49eb6b91cdae47def26d", "01733018a79aa447a27f269a1b9a58cd5f39603e", "dac6e9d708a9757f848409f25df99c5a561c863c", "4657d87aebd652a5920ed255dca993353575f441", "00dc942f23f2d52ab8c8b76b6016d9deed8c468d", "95008358a631a10ee3c24bfa2bf0c39d136a916e", "2ee817981e02c4709d65870c140665ed25b005cc", "8147ee02ec5ff3a585dddcd000974896cb2edc53", "276dbb667a66c23545534caa80be483222db7769", "1125760c14ea6182b85a09bf3f5bad1bdad43ef5", "8ce9b7b52d05701d5ef4a573095db66ce60a7e1c", "87f738d3883fc56ef0841484478b89c0f241df02", "3a95eea0543cf05670e9ae28092a114e3dc3ab5c", "1d10010ea7af43d59e1909d27e4e0e987264c667", "09733129161ca7d65cf56a7ad63c17f493386027", "f09d5b6433f63d7403df5650893b78cdcf7319b3", "1cee993dc42626caf5dbc26c0a7790ca6571d01a", "d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12", "4b6be933057d939ddfa665501568ec4704fabb39", "7405ed035d1a4b9787b78e5566340a98fe4b63a0", "758d481bbf24d12615b751fd9ec121500a648bce", "0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64", "d046030f7138e5a2dbe2b3eec1b948ad8c787538", "d20ea5a4fa771bc4121b5654a7483ced98b39148", "ca0804050cf9d7e3ed311f9be9c7f829e5e6a003", "d37fa0caee9b598149f73ccc593f54eb2e0ffb58", "a59cdc49185689f3f9efdf7ee261c78f9c180789", "6ff0f804b8412a50ae2beea5cd020c94a5de5764", "322b7a4ce006e4d14748dd064e80ffba573ebcd7", "466184b10fb7ce9857e6b5bd6b4e5003e09a0b16", "7bd37e6721d198c555bf41a2d633c4f0a5aeecc1", "4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676", "829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a", "d75bd05865224a1341731da66b8d812a7924d6f6", "4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11", "d67dcaf6e44afd30c5602172c4eec1e484fc7fb7", "85a136b48c2036b16f444f93b086e2bd8539a498", "d18cca5e90884020e748e7fe2d13398d3cbd14fb", "cae87d5a724507e06f6d8178cfbec043db854fe3", "2cfc28a96b57e0817cc9624a5d553b3aafba56f3", "55fd4639c2126de5ad69d23b8a6e670a05911b9d", "34b42bcf84d79e30e26413f1589a9cf4b37076f9", "6d5f876a73799cc628e4ad2d9cfcd88091272342", "2ed7d95588200c8c738c7dd61b8338538e04ea30", "11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5", "eb38f20eaa1b849cabec99815883390f84daf279", "b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad", "cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd", "4270460b8bc5299bd6eaf821d5685c6442ea179a", "85785ae222c6a9e01830d73a120cdac75d0b838a", "3d143cfab13ecd9c485f19d988242e7240660c86", "bfd0dd2d13166a9c59e04c62f5463eacfc8d0d2b", "4a14a321a9b5101b14ed5ad6aa7636e757909a7c", "0fba39bf12486c7684fd3d51322e3f0577d3e4e8", "edf98a925bb24e39a6e6094b0db839e780a77b08", "aea4128ba18689ff1af27b90c111bbd34013f8d5", "4b28de1ebf6b6cb2479b9176fab50add6ed75b78", "410bc0b3bd82c85c98df71ec0cfe995f14621077", "1d3bd75e2fb95cc0996a1a2eeaf21dfa42ab7ca0", "816eff5e92a6326a8ab50c4c50450a6d02047b5e", "884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e", "965f3a60a762712c3fc040724e507d00357f8709", "f41e80f941a45b5880f4c88e5bf721872db3400f", "16c1b592d85d13f1ba4eff0afb4441bb78650785", "32d555faaaa0a6f6f9dfc9263e4dba75a38c3193", "5ca14fa73da37855bfa880b549483ee2aba26669", "bd07d1f68486052b7e4429dccecdb8deab1924db", "97032b13f1371c8a813802ade7558e816d25c73f", "09df62fd17d3d833ea6b5a52a232fc052d4da3f5", "89e31777f221ddb3bc9940d7f520c8114c4148a2", "304b1f14ca6a37552dbfac443f3d5b36dbe1a451", "bbf1396eb826b3826c5a800975047beabde2f0de", "a2cc3193ed56ef4cedaaf4402c844df28edb5639", "176fc31a686fb70d73f1fa354bf043ad236f7aa3", "c0723e0e154a33faa6ff959d084aebf07770ffaf", "7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098", "aa912375eaf50439bec23de615aa8a31a3395ad3", "ab58a7db32683aea9281c188c756ddf969b4cdbd", "2b10a07c35c453144f22e8c539bf9a23695e85fc", "01e63d0a21fad7a29301749e9eafed826101b636", "14e759cb019aaf812d6ac049fde54f40c4ed1468", "3917bf2cc075ef075d9c879fc9ec3349ea116735", "afa7899c5fc74e32c1fbf661a53f5e128bf480fe", "a9771fafd73b650235e6b5970e494187df9b3922", "4aac8f4b8e459d06ca22cd1b7606af99615bd5c7", "3d741315108b95cdb56d312648f5ad1c002c9718", "18b0cd5f044472803d3436475d3cd3ca648d9f72", "69026120a20aafa64bed9fd3beccf546758642f8", "66ee33bf0064eee159f3563e32b15c5bbd4140a0", "9c958322235a3ea1f239e3dde9bb865931cf34ed", "1d1184c92e7651d09d3231b4e650f3611a8e2c8b", "59cee3d54dca04207f57b19c3d1a31402a75c3c3", "7b46135815a3ef42e83607c24a5512ce811ef894", "658371a7f791d7ddeb266e939381d278ae809da5", "70bf51aada8c2d2fabef7cff29f9cb6da423ef50", "aa5efcc4331da6b1902f2c900b79120226fdcf20", "42d0193edad27f4a4505f1bf7a9122f0ac1a0e9e", "d42bdbcadd54ac14ad98c78a2359727223e72f02", "3eadf02a7ac57a2a0cc794180bf0b46b45a9e0a2", "414722ddd809b460d5b397eaf454fbb697cfb881", "daa8a47e0b40bdbfa2584e193f76798c79955fdd", "ff98041e54682c6d1af7b86b5fe125b8252a3466", "1e1e35284591b6a69569c48b3677b6f4409c5edc", "364f357d4894aa004e442a5a92896a9b14a46862", "52c89ca39a9fcad716e1e43c0bd4e40101c15d64", "b411448a978e48352e1959addb9ca8dc762262a3", "2819ac49d1967a3e51e7e65730a666a76ba9a687", "0149b14428de816bd62f80bbfd89238b765edaf7", "90e37567a5eb90624caabe2d74cca5a8f45b3144", "335af998fd86806422a4500ee6defc26df8a5388", "166f42f66c5e6dd959548acfb97dc77a36013639", "23b8a55785318ce90957a392607e24f620c4fccc", "611b1301b3bd13c518d0ec93d695e08b794766f7", "d8214e68bc7af0e24558fd9e79b2d777e46f2edc", "7f01e80129c78f92c4c4c2dc270c5ad9ccde6c1b", "d86c285cd89cad9c91bee28a44d5e463b3c8bba4", "12dce2bd9876dc5ae8abfa041ff3f494a4c69fd9", "da71ab76b67c292951cb93a9fa853677139fafac", "3e7328f082648afba63b08af462559b7bb562250", "259e35fa5a57cf16010621639957777ebad72367", "403ecc19291f21db6084db5c12f428e2af91ed3d", "695b040a9550a46b5ffe31e4a6abbadfac02c1ad", "8c7811c029905f4f3e9f31e925634a42e413f6d8", "11de4195ecf7b24568b0c893125cd6b8b469f0a6", "5d1608e03ab9c529d0b05631f9d2a3afcbf1c3e3", "a233cb665a6b44d233e8fb2913e5b57b20e1cd7b", "8bb4d90d5b97e8d08d2aaa99e9c075a506b3108a", "4e3c07283334a9b90dac011033fa2403bcf3c473", "ceedb191328ac4d968853b948a32b5689c2ac2a2", "e6ab1cc11f0abca010d6aac9548934c435fa548c", "fccd2c78a29ba3fbb12377665c65d065599a0ae2", "7901a33fe442fca87be7f8bb295091feb25f69bc", "521120c3907677e17708c17c5b6bab9087e61c5b", "fd023b69a8490459f62eae15514341203bc65e2c", "9703e31a7f873eb9fc41c81c303d83a7416fffc8", "81485efc5ac4e9b78c344762dca4aef8cf383e5c", "568ced900cbf7437c9e87b60a17e16f0c1e0c442", "85c007758e409eb3a9ae83375c7427dd517f4ab9", "0b572a2b7052b15c8599dbb17d59ff4f02838ff7", "b216040f110d2549f61e3f5a7261cab128cab361", "5d09d5257139b563bd3149cfd5e6f9eae3c34776", "5160569ca88171d5fa257582d161e9063c8f898d", "1ba20398e3b0154730590217a0988fbbab19e927", "69b3f0298ea8d0c98857e6aeefb35635bd64e490", "e96ce25d11296fce4e2ecc2da03bd207dc118724", "43a03cbe8b704f31046a5aba05153eb3d6de4142", "153e5cddb79ac31154737b3e025b4fb639b3c9e7", "7f2a234ad5c256733a837dbf98f25ed5aad214e8", "45a6333fc701d14aab19f9e2efd59fe7b0e89fec", "2489a839d0a761ef8520393a7e412c36f5f26324", "36c2db5ff76864d289781f93cbb3e6351f11984c", "48a417cfeba06feb4c7ab30f06c57ffbc288d0b5", "6aa61d28750629febe257d1cb69379e14c66c67f", "1ea4347def5868c622d7ce57cbe171fa68207e2b", "4c6e1840451e1f86af3ef1cb551259cb259493ba", "6a0b70abb9a81a96d4baa9b396deb9da4cc20f8f", "9ff3d2581fc86859b56aaef4ef2e1009e8bbc272", "ea86b75427f845f04e96bdaadfc0d67b3f460005", "8e63868e552e433dc536ba732f4c2af095602869", "33695e0779e67c7722449e9a3e2e55fde64cfd99", "71e95c3a31dceabe9cde9f117615be8bf8f6d40e", "d71a2baecdf28fd946458a34bfbc034681a82694", "d26b443f87df76034ff0fa9c5de9779152753f0c", "4e80d124692d00a88a32559c920579ce80c50a3c", "3f8fa9d4772d0b37deb98cbc9903da6cb754799a", "448aac9a3b9c490949602b091391bea16f013818", "bcc4749456781335b0f94e54a76c29e303b3665f", "f2e9616577a0eb866e78e6fd68c67809e4fce11c", "554f10d7b8933e9590551a4f891d034b9b8e8642", "40629398c85c12432979379800c267d2a8c62bf8", "a3177f82ea8391d9d733be47e4a0656a7b56e64c", "581baa27b0404940da1d817bb44bcd9085039930", "1edc9eefb555e044f12d8c8cd56e8cc950abf8bb", "2e6e9d117b626e34ce0167f9d69cec6698b0eb05", "8eae6ed5fa66b5eb63bdb6cc23d3b385a7fee37c", "956419756558d7a1f2505cde8133bf3021721102", "912f107002506ab8c7ae411c8d34c200ba567b02", "4949924ea5a5e68e180f71dec743b7b3fe3fb9cf", "d7d01406bf8bec7e48b70e886d93e935b8885815", "079a0a3bf5200994e1f972b1b9197bf2f90e87d4", "71f1e72670e676b6902cce0d6fc0b4f63b46ca28", "523abe29cc278f9daf03fe74d1e09d9e2711b73e", "40377a1bc15a9ec28ea54cc53d5cf0699365634f", "c14b4f44051c7ad34536f5dc87f1a1a7c656e9a6", "5c5304b79ebc2afd28ade6bb88daa80144ae3587", "ba7c01e1432bffc2fcde824d0b0ebd25ad7238c3", "0c56ba57e7b3f808e9ff85f61fbec38615d7f999", "4468afcc523b54d909b2b7e6e747c5c0f3f61c89", "56d831143008ec10f8122e5086f5a55aec770ea1", "7bdf4b717371ec9d59f8dff39fb7c1e03b8863e0", "ec9e8d69b67bcb2814b538091fa288b6bdbb990f", "58cb6677b77d5a79fc5b8058829693ca30b36ac5", "48d66e07041b8aa042d7a3d263fddc624bbc1e32", "14fed18d838bf6b89d98837837ff314e61ab7c60", "2bd49bdfc61788c8ac5621fe7f08a06dd2152fb9", "befa14324bb71e5d0f30808e54abc970d52f758c", "8b68db1af010f36f7e9d174d6ca0fcb24c1049ee", "ad8f0b5dd8d89c0b0805a77dc27a9ce22caf6c59", "738fadaf40249146f33da5b9efbb72a1fdf8767d", "30d21b5baf9514d26da749c6683c49b4fa55f2b5", "cc2cf8f69cd2d16c9bee2bb6c598548e7ff7cb05", "cc1b7592f29fc3a945a38c742b356167731ef59f", "ea050801199f98a1c7c1df6769f23f658299a3ae", "7a1828e181e3c8bd014c7e5fc1bcc417f122c18c", "562f35a662545d839876deeb605ca2c864507a82", "1b2ad281ef74e366ec58221b13edc6eefdb170f8", "9adbbd9dadaf7b15bb585555e7a2e2223e711296", "116170983869d56780343823621f2f30f62aa38e", "040ab6a70bef710b61e3b6a183c2d81947ac8f88", "b767abb865fba93f35312127b61ba351a2a91a44", "5a15eedcd836337b50a2bfab82ded7a9b939aca5", "49d1afd2d1fc1e11b46cd430d4a5809ac766bee8", "040806bc41c0dd50273921d8d839fda58d20b01e", "c5ddfc020a3d1a4cb5d83c725a683f54a7bf7f1d", "28e1668d7b61ce21bf306009a62b06593f1819e3", "9691055b1fcbe626b5bce9d8d43903094a5c0339", "6273b3491e94ea4dd1ce42b791d77bdc96ee73a8", "e2fc290a245d9f5c545e2e92ee8fcaff4908b97f", "b5b7f9cfa8f0ddabc850634912cdcd0ae6a79f62", "b7f3d16ac463065e99a8cae937592f8134b67bb6", "18df58d5055a01092084b22bf76bef9b1a6d360b", "033f0a769c60ea47928a805ea0b000278748c271", "8525a653d2115ca8f2fa061cd81bd268486ae459", "a341505614f5693c8b6c15d9801fea9f8d956804", "0a5b2e642683ff20b6f0cee16a32a68ba0099908", "5c9c153f705a02e157adcf49dccf4f1eeb70cf93", "3d7fce66c1880f4b29171e415cfad57d8b96ced2", "5a41f881700dc4af6e7c0790485345a4cade7307", "42a2b1e7ba92a8b5678ff2bee489a89009a2e913", "708a55d65568faf8158417ddfb79e728b2b28f86", "53b84c6887bb5c0a02e15b581132eb38f4a69bec", "bbaebcc0a2d65dea32fe1cf2a2aa12c65bb1b3da", "35472424eb5662d05928017942c32f4537cb5d5c", "71dc03d6c837ca9ec1334a63bea24d836de076a0", "751960b7e046b2d53315844f6e65b5ca945d7d36", "4372ddbf84b8d754ed83229db4ecb3d53ccceb73", "28103f6c09fd64c90a738076b0681400d4d31c9f", "37a1937691f69fc4b07fc5229c4d2756d2cf41f1", "49dcfbcb88139e4432cc0d3cfdd91af30f4d53dc", "9715aba0688195b2019d510ae3fd8da2e40f6e20", "0f25aa473e808de72c6975fdb1e3e65180a38c05", "62cf8c07ca6c4c7817f6a5682eb2d7cde76198ae", "601c9ac5859021c5c1321adeb38b177ebad346f0", "b2444e837095706998b03fa5fed223411b9d4d55", "27ae95d9ad6492511296360ba0618f5d0565cf9e", "491d42f3df46dd4cb9e2bf3f71f39f5313520d82", "53881bb35cb98c788f75fbc8c76198ccbc50edbf", "8aef5b3cfc80fafdcefc24c72a4796ca40f4bc8b", "229890f8ccd486ac377eecc689010458035b9a8a", "7ed24153b125bcce3fee92785ec8f7af5b011cff", "5a6dcba2b966b318bc17b24798294d9a510936d0", "5334e8f42130fb632afafa629a46ab1f0955a86f", "d745cf8c51032996b5fee6b19e1b5321c14797eb", "e5b4700a615cde23b91be3eadf1c99642cd33e42", "141fb4af72c7c33f57687f0233f53effc732c3db", "d740a9d099a86e234084234af09c060d736f764c", "7faa11c9f104977d49563b52387174b87c3ee8b0", "35692e80fa2fc17a1d37a40b3d4ffca28a1bcc7b", "3454e7bc68713f03000e6eeaab67d6401ba24531", "ec6340adb88d8443343e0f66f846a8c8c79ebb6f", "88c5baffa5522ea62ff5d5c41036b92e30d7e3c9", "0bc7d8e269a8c8018a7cb120ff25adf02d45c7ed", "55bb6235eaec0459183b5442f46501d29b824a9b", "ec19f55500104f68bc62d3c07e7108e1139ca108", "b7bc85b4f6a186e01365dd42993029ea06909c8f", "9555e76d705da6a3eaccb9731369b5e925cdc834", "6f90c0330a54d836f63d3b8e608193d26335dc71", "2aacfcdc5d06c86901852f7b666d17e97822ba23", "00e39fad9846084eb435b6cddd675ee11f2dfb90", "095251493ac774df0a737bb8023cffd036edccd0", "9caa7f125d3e861450bc3685699fceeaebea04d8", "4b7dc1e99b0b34022aec2bde1a13481f28f62030", "1c028833faf11dd565c749741eb97ce811b490de", "22bc3624a1e6d46f5b7c9208751d4f14fc87e946", "9bfdd57154164b6f9fd9dab52fb2679b9c7573a2", "2a31169e3a0f87987537220b743bbf6e79c440e5", "9c9ba660ff8fdf74a81ceaae5ee2e590c7659cf8", "4996e64c24cef33d0f7e5a2b1c3baf00e51493e6", "d7cbd030b282a7b0fe397df04a6a3c860608982f", "ad34d0da2a9f15bd45f8193e10838ae7deb9ead6", "212f6bd7163e7175eec7335be8d62249eaaccacc", "9cd0d36af668c354b0ff17f2e21cdde2c16b0d4e", "d168c2bd29fcad2083586430dd76f54da69bc8a6", "14f964d152337e963e4a4fd3619f6030aa75deb1", "3cb1f4c1650f7e55b78abba5a00b56a90b8e0567", "ce51d6533f4ef7f2c7dc417342b7e72211c4c516", "202d1b08ebfdcaf807bcf5ce7a72466b5ec60e5d", "eddafbd7ad94ebdf554a2810f1f122e87e0998dd", "e9ce1ab4a1b6204114446cb255c1d7639adc9a80", "e8ceaddf63c2b91997e36f062f4cff1e380db029", "23752b8fff9e07da1fd7b1b8dd9e2bb692ed2750", "09e66de10f59f2853843aeec6b0d5684fa4290c6", "47022785c35735a242dbacd4f1f1bb73628493ea", "9e3db0bd1dfa9e033a2a055a9ac03728cd28e930", "579fabe2a068228d4020050da3d68e9314148db5", "7c25a4b2eaa7bf0bc4e0bd239f05d6c0d4cb3431", "d1d14401d5c2ffcbba10a867905df47a99445b97", "418f96729f68fdd5a8e206248a2fe180672f9de2", "7ed4d134e1910ded71697aa7420f2fb720596d4b", "b06254f76e13d5f6ac0230fd8bdac35b901f9480", "49b67c48cebb33720d0722192678723141454a57", "0b62ece314846fa257d76e84dd9d002d1fcd21ae", "7c0ffae3acb0fd0a14ff66b6d474229aa16c53ab", "fc8030f2e34af8a99b25332683ba62ea18abde17", "21fc82d819d2a1a70465471be67260a1d6c0982e", "1afe5d933b58b4dd982a559cc6ec1d17959239de", "4441f529631a9278f5893f8302c2448039456fd9", "08d25f86d9ba5d2443bd3852aab01334a3a96dce", "251da2569036cebc2ea109972f412c5b1a9db20f", "35e2fb4a72656cbeb2e9afa140fb01af03815202", "4da2a88355cddba0371040ab3f87104797e7a1fa", "d5d6b3959958adb1333fa1a72227378ad3f7c16d", "4ec428bdc1f42d6269fb8d554cc128d28d12fc51", "99e149facb869122770b91842fbccaca3d7e764c", "189573971c1ef0893d761be1874de2c33a640fc6", "255290a1c4ff72ee7c6d78474e4fc4e62cdb1f31", "46a01565e6afe7c074affb752e7069ee3bf2e4ef", "e4fbd2f5b2068016aba3f3ef6f9bffd695b3a729", "7add83ab4ec0e856d88f6e76ea4f585e80def1fa", "89174737423d87258d3b9d5a660236a0bb66a470", "69e054acbf09a4bebac1c4b14c3f6a1ac6d199b0", "51e50d6e695e402d95099baee82846748bdfb1e0", "4e15b6ee0db3ca03348559abc104b734a1e0b482", "d6d7b7e882a65663fafe470f0582afb4279879b7", "984b721cc29b4d66c03b86114554c3bf3403d434", "df924c5a556f8ad0282982693aff0ff8ccd3d7df", "cf40951840bfa9b8721d722e9422c73e3a6fbf59", "3faff93758fe7fc58b3832055cb15c6ca3f306a7", "ac9feef881ed00a5a5e53bddb88f135a9cffe048", "b2c0b0a2bbce14201d33687f587a8e51cb800351", "c038186138b76a625500ff84c9dadb18aae29f1c", "cdeb16d4d2825da9df9982ed0bcf822d598d75d1", "0443b7a4372fb7bdcd69a0b55945f937c8b7d35b", "4137c8f2adfd3129d02e82a760ea4fcb2011b31d", "4c438728464f3d81d975c78aa0d7ed01088d2435", "0c5f74c8e323861d18d6090d8cce05dde22660d0", "b9b5bcd2fe579766074eef40ffbea0b4f4cc2283", "b0b43cc7c42cf4a44ea9040455ec40744b3e0d21", "3fc24d9d0ea6c52d316b4740a52b4bf28ba29cb5", "a4bb4474e38140d183241f0c8cee13167a6d2c60", "eed25d9b5b5b28e8454a359d54c9de5a05cc4682", "b64bf3dab761d27a19f2ff4049691dc47369595d", "d71cefc30269feaa1de3e330b472a7dc66ec95d3", "0f431ddce270d277fb5322dfdde1524a4717455f", "08aab46667dbcd875751f1e8ce2daed0df643b12", "360bb1bafa00dd1fa90a89766f5ef75061cfde75", "983bfb10fa228ecd1047ab4ac1d78c96448de059", "ea32e570ddf5661cdb030132e15e68e30ba6b24a", "a176e44cdee30d69732c331de8aba436194a2dd6", "8dccec5593788fd114ef4a4ebd953af80a26c3a9", "eaaf411826d129c2a31d997dc3f5f708a8186656", "b63411ed70ba315b87a716e1809faea48e70a982", "a94cae786d515d3450d48267e12ca954aab791c4", "071f824aeb7dd87955727f612e4dc5e8536db3b5", "87879be7d2d65a2b6e257cf10b6f51fe9ab8608e", "810f93d0f46a730b04e670da3dad6afa176bd9fd", "c01a499e52edd49bb5fbe4d0e7309f79a5d9d3fa", "4791df90a067b8084315765d9b2dc3896b0b47e5", "b38e915dc7a8531ca6f11a9e0c133a5cafe1d532", "a1d6c15f638b847b7dfbfdc901d1f9126cbdf289", "dfbdb4787091f71d27ba6d1e2bbc19b8e160954d", "39aac8a92315edfdde7f4f2ced20e998a1a3d70e", "7d0ff6d0621b3846e8543bc162fd0215d8adfaf0", "8c192cd39f90eb8ff2969f8916ef8967607c5298", "231951807081451293189fbf5ea9269b59724518", "6b6e2c2ff6fcc5837523940c69cf2e9e94bc0503", "7060f6062ba1cbe9502eeaaf13779aa1664224bb", "ef20808eaa02c1149b75fe38b7d5ffaff02dbd6f", "23de28db3c90eadcd98554a68e0425cb6aac9844", "80d109b71001d5184b34d48835a9bc00afdfa9ca", "1d478d088bd07fbd867bf21650846782164c4631", "82752700f496d4575163b2c59a547d24eb916baf", "c6d5d47513d6a7a1b0b92b33efda3f2a866d34ad", "75595c73bdce2e07dee0a4bfd911b36b6945b949", "34e23b934794a5abff251698df09cbac5ad2dd56", "e4834d942338d7a0b9a57fe307784caa8ad4c5cf", "f0f876b5bf3d442ef9eb017a6fa873bc5d5830c8", "f97e9818a8055668f9db7967b076dd036d25c417", "008dafebbb27eb64a1af8ded8bfe2e7a04c1d703", "7c3ab4a8947a0d1d1014d3e09dcba7485db5ea22", "d655dd296ef2c346f56b7b9f08fe7270592dcd29", "28934adf9690c6571de2b0c047d20b7cde7e159a", "18fe63c013983bea53be7d559ef36a1f385ca6ea", "629b1bdf4d96bb41f7d3fce5c7d5617515303b71", "6eb5f375d67dd690ec3b134de7caecde461e8c72", "e9b97cff4bf7ca4029cbe6b3919f796c6668e1a1", "18078e72bddefffc24a6e882790aca8531773bed", "9677d2f6a994f598c1d631038d49401c5f707ee0", "1b6f3139b1e59b90ab1aaf978359229b75985b49", "ea985e35b36f05156f82ac2025ad3fe8037be0cd", "40000b058cf80b7983a2c0f96562368a40a04580", "518f3cb2c9f2481cdce7741c5a821c26378b75e9", "1004d43b8a22b3692fd9018802ace33061fce70b", "5996001b797ab2a0f55d5355cb168f25bfe56bbd", "cccc169185d5a88a9fcdc4ab1477a8500cc9861b", "61b17f719bab899dd50bcc3be9d55673255fe102", "65c978a97f54cf255f01c6846d6c51b37c61f836", "ccb08ad272cae4705a45c7a642a133b4bbbe49bc", "301486e8dad7a41a1a99fd6fba28ce153fe1e56e", "c0a7ffb06bf23cffc49e67d6359b1fb5db336edc", "79b50cd468fcdba8f3c841c9d28d84ff66fd97fd", "cfb9713dca90b6f975ed392953ad099245d2ef6a", "b3d936c0d82f9b2032949af685a10708c6856d2c", "e3137bb7d73b4f846ad00800b46cc4118d2e0a34", "22954dd92a795d7f381465d1b353bcc41901430d", "dfdb4641fd3c5e559e8a850f7059a2ca646a4bc1", "4eb8376dcf9b251fffa238db8d78003c9a7aed3f", "2c761495cf3dd320e229586f80f868be12360d4e", "fcb0e0b0b5ba4daae62323d7b26abdc83f80952a", "41bde18c16d586865f8f60fbd3773422d6eb5890", "72fd5068ad5a2fe54570e6e758686daa6ca09a77", "2ede3ba3149bdb180387669acd7cc47aea2bfaba", "ff3fa31882bb9c7573a38c7d0883503a464522a6", "256f09fe3163564958381d7f3727b5c27c19144c", "891433740bf6d318782c468638722aebf8bef2f5", "85304f24f5a1800e66de20ad05e20c8c032b7d03", "35ec869dd0637c933d35ab823202c13b9b5d9aad", "7c5dde400571fd357d1093e1829a8bd7917d8fcd", "ee335fb785c332b1ac43565b007461002616f1e0", "9ded64e83d3ba51513ea00de27c0c770a02b0cf4", "5cdc02ed9f456219369fe3115321564c9955b9ae", "49f200f4651a8832d9005ed9b5cec4200f0a411b", "7d621ec871a03a01f5aa65253e9ae6c8aadaf798", "b4b9ec699c873cfbb16647966b230b522bc0113e", "988aa2583c63ada43ca260dd8b5a4a543725a483", "82ab819815c86e85128a2a055a0c0fcd1146b696", "6816cd8f4fcfda892d8f993cc0ae4d9e10db4178", "ac9516a589901f1421e8ce905dd8bc5b689317ca", "9844c10635927811e0e5c40f5ba1a074c867635c", "77b11260154e13e33c84599feba4cdc4f781bf71", "327f3d65a380f70bc39fe99c7ad55d76a5f7fff4", "17e7a53456539dac2c9cf8631174c6388f64e24b", "57126589b3fe62c35a36a2646dac3045d095ecf5", "5d1ffb7ba3c53ecc5a90d40380ae235043c16344", "87b93dc5834d945cf6a9e5b53864aee2005d71c9", "81f63e7344cc242416e37d791f7eb83ec2c07681", "d4993a960acf2495654fa7b2b177396b9e705f3a", "3ad6bd5c34b0866019b54f5976d644326069cb3d", "02b852e698dfe85df39c24e7dd39dedf484893dd", "49455079e54d7d06fd2d5617507947a31d4f9198", "7fe7beb207f24056a106b9be31908f807cb625de", "187480101af3fb195993da1e2c17d917df24eb23", "8a61396ab3eae09d5a6b6bd020bb4433dec52473", "2f2e1d2eee8a5a0c389d9dfb11b81964a0754335", "dcd319efcd00d73b21e0830ce9ca085aa05b129e", "6e35585eb37ee8a1de60a10a56a3183af480e214", "2606e6a5759c030e259ebf3f4261b9c04a36a609", "beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd", "b214e3daacc79ebfcfe242c4962c9f8f525a860d", "238136fe55dbebfc6e473f6d5f52448767082c69", "ae76de17551ae635b2d6862d89b254b697250175", "b80757994fc0f784b88e60b37c055302274e2e07", "c53a512b4d7dee0d8d0f3e5bf2c6ace7a00cbbae", "26861e41e5b44774a2801e1cd76fd56126bbe257", "f28bbee52ccd41da02f12cb2c93301056d51278a", "53c8f841cbf2c8f09c6ece9d7f164504fe39409b", "43e11904ca961006be79f650025b5d8fbac9913f", "0ae80aa149764e91544bbe45b80bb50434e7bda9", "723b87aca073b7b82970a4580f64ba8c6c64691b", "20c016843430bb65c60ea4011a9e8bbb5785c490", "661bf7aa2de455f966f114d900f92a43f973ae49", "5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2", "126bdedeb55f2ca1a40e65c87dcb18bd038148b2", "16815ef660ef9e4091a81044d430591348df72ee", "7f3b7acee7851f933402f2a2cf4deb157e996851", "4576b59a44f75120f6a2d17a4e9c52e894297661", "295e9b2ddb3c9c7d029a99d5b877bf3824949f1b", "f0b0dc397cf31df4fda45c0187190c42de5adf40", "af24595c0c8f1b317b6fe2f2b49417cc40094b5c", "7f05df12dff3defee495507abd4870a0a30c3590", "ca839a67ab0e1434e5107eb4db30e63c6607e75b", "fd0a462934359b52051ae95b970c9e01f4f32539", "7fda1edac608bc67e55ac3d7c9dc5a542d8f8aee", "a99361200552e723fc9ccc82a6f98be222e51c12", "3918dcfddf2da218a615dd8f008f6fce436e06f7", "811f42b16e917a53cbcc605112781d8ab76bd31b", "3efea06ad6398f9db07acf34479c81a99479e80b", "47de7339f70075f6ac1a77d3dba23dbe3e435b73", "46c52f92e10fd2f2dddda162ad7995a1658e1245", "0ceda9dae8b9f322df65ca2ef02caca9758aec6f", "c24c9a9ca6edf52e533951b2700ec23234423c61", "17c62bff70eb0919864f111df4930062aded729a", "76a2846d17521149a118fd54083f8a51646e2804", "97b3185d948c45a00a190ce0a26abd23e77c9edf", "3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f", "0a8ab703839ae585c2f27099616c40974cbeeda2", "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "15638d4611867b8f105fb541dbb61669fde6ab2a", "a7097289a40e740378f5973bee7e068151ef7597", "a8068de468ae9e1d6ebf021433467a449703acae", "62035628c85e13c10db4dfe2acedc5741874fc2e", "e103a8992a9eab5c2a5e8072a0cff7f5b68959a2", "c7ea9611446817f7b668882061ab11c7e998296c", "73866bdb723841da93b6ad93afe3d72817e2b377", "527cc8cd2af06a9ac2e5cded806bab5c3faad9cf", "0c4a139bb87c6743c7905b29a3cfec27a5130652", "5f107c92dd1c3f294b53627a5de1c7c46d996994", "ff3d4f2406ca2d78b20ed94a33983bca3583d520", "e018c7f468a9b61cd6e7dcbc40b332a8a25808ae", "3adae788b345c778d93d51cb9a1a5c38e718b1a6", "c51039a4cbfcdb0175f15824e186998500f5b85a", "a00bcd5db27ab270a11e83d3286f0c18a847d674", "4273a9d1605a69ac66440352b92ebeb230fd34f6", "5ff6aa027b2772b3cfb45bdde22ef808ba59acf5", "99a17a7f9a263ad357cab88b5607ae626139cec7", "8286c8eb0df7c9f1b2000b67066331f4e541f5bd", "5ce0309a124ec78a6b55de0b564ae13bff8c1dbc", "2a7e2cda27807d24b845f5b5080fb1296c302bfe", "57c929b6f30eec954dc5f17a52fbce290d8e3ca9", "bccef9817e38e8e9b450edc062f3938f1e25281c", "0994916f67fd15687dd5d7e414becb1cd77129ac", "8fbe68810cbc53521395829620060cf9558231cc", "a1b7b23bd8f2b2ef37a9113e6b8499f0069aac85", "9ba8dadb888433f6e4b47ea2b7ec7b0c9f8eeb0c", "6e7b2afb4daf1fe50a62faf75018ff81c24ee526", "f43687ad1aea44b841c7700a1a30d08806d2add2", "670e2c9b18ad853e86fe402bb8161bc81e38887d", "53f8f1ddd83a9e0e0821aaa883fbf7c1f7f5426e", "658eb1fd14808d10e0f4fee99c5506a1bb0e351a", "ae419d28ab936cbbc420dcfd1decb16a45afc8a9", "90ce227ec08053ea6acf9f9f9f53d8b7169574f2", "3f54660f555c4ef356375ec8c589891478d59513", "ddbb69b0f60960a29c3f861010ec55a22ab86d61", "bfecffd8fc8c309154b51e8d762fdc03918a2db0", "617a6935643615f09ef2b479609baa0d5f87cd67", "dddafd9fc479b06cb1601b1280c19181bc6172a7", "6d5e12ee5d75d5f8c04a196dd94173f96dc8603f", "0e1403f2182609fb64ed72913f7294fea7d02bd6", "c4c4e5ff454584ae6a68d25b36bfc860e9a893a0", "92a3d5ab3eb540a11eddf1b836c1db28640b2746", "aeb4cb00f0ab02b5a082daebac959d29e254dd8b", "90dcf6c0b414900c606112c0feff7ff2d68bb94e", "6f0b8920d39ac44eea320a4df2763137a71d851c", "cf01707bafd2850af6a46933a3029bc2a9aeecbf", "aec46facf3131a5be4fc23db4ebfb5514e904ae3", "544c06584c95bfdcafbd62e04fb796e575981476", "f20fe5c662fc90fb0032cdfe39812e83456ca46a", "b3cc2554449fb10002250bbc178e1009fc2fdb70", "fbfb0de017d57c5f282050dadb77797d97785ba5", "0a602b85c80cef7d38209226188aaab94d5349e8", "e68083909381a8fbd0e4468aa06204ac00a0e6fc", "98d7508f449fe44769c3f343778663779497ccbd", "0922e7d583d02f6078e59974a3de4452382ca9dd", "0a1e3d271fefd506b3a601bd1c812a9842385829", "425833b5fe892b00dcbeb6e3975008e9a73a5a72", "e8aa9aac39456a422bd7a1a87f943af5856e9ad2", "1cd584f519d9cd730aeef1b1d87f7e2e82b4de59", "6e261b9e539ecd03d76063f893d59c6eafb6ed43", "f5edbb47a9a200c17f0c34ec8cb6fc4a2f9cfc20", "29c9af31eb125b696ce34d0a8c64382f7e97bf23", "0f0fcf041559703998abf310e56f8a2f90ee6f21", "6e968f74fd6b4b3b172c787f298b3d4746ec5cc9", "dbd5e9691cab2c515b50dda3d0832bea6eef79f2", "e8867f819f39c1838bba7d446934258035d4101c", "3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f", "2fa4096260ae64fa704f9ff8d7a8d1552f903f43", "b8b0f0ca35cb02334aaa3192559fb35f0c90f8fa", "c16bae6b2e578df2cba8e436e02bdeda281c2743", "b277bde51641d6b08693c171aea761beb14af800", "281be1be2f0ecce173e3678a7e87419f0815e016", "87c6ba55b0f817de4504e39dbb201842ae102c9f", "8a12edaf81fd38f81057cf9577c822eb09ff6fc1", "2b2aee745c608c67f931ef064f1d672c0f549262", "4b86e711658003a600666d3ccfa4a9905463df1c", "26679e1885b1ce186e80551befdf82e57b3f7455", "9499b8367a84fccb3651a95e4391d6e17fd92ec5", "7af15295224c3ad69d56f17ff635763dd008a8a4", "d2b2b56dd8c1daa61152595caf759a62596a85c9", "a90e6751ae32cb2983891ef2216293311cd6a8e9", "72ef87fb1a49f0e386f123a6b4f5566f51a3a47d", "e49372992c31412f55579397e615610748b6e6c9", "9c3a25f7f8ac74cc3e5335bc06ce85f16edc13ae", "e0eb1d66f244456063409264ed795d9893565011", "89c57aab49ceb3c2b939e61f44ecc93257b3ce1b", "05db8e3a342f8f239203c24d496e809a65ca7f73", "1e8711d2fc4b05eac0699c82f4698154c2b057d3", "85934ee572897ab2da4f294bced88a6531c2fdcc", "acc5318592303852feba755a1202fb3c683b3b53", "97a0aba4e9a95db17c3d4367f59aad1f02e04b55", "f16d6152e23b032f59eb794fac2d272daa859a79", "68dbd8c2d61cfe7e3b896ab47850b95c3ce349a8", "5585f55bbfae39b032a24521ce65f905afd69a3e", "94826cb68980e3b89118569c93cfd36f3945fa99", "35700f9a635bd3c128ab41718b040a0c28d6361a", "20c6c93cef3d0417b750a9c56a9587acb93500a4", "727067392502bb44cadcd55680156e9517a3fd65", "bdab45dfbd4e21b707dbf2503147d1eb99d183da", "8b607928c7af70259a9f8af9e08e28e6037411c8", "a3b87364aa68b371ca9831d333b934402fbc3713", "feb5b8bf315a6b6222f62dd9533b1e0f891a27bd", "ec1223c8fc16751dd577d3418f61d44a139c7dc3", "61bab86023de164bca3e35fc22944a7262970e1d", "6025f0761024006e0ea5782a7cea29ed69231fbf", "16f76f040f08448cf0a3984168d69197ea4af039", "f16591cc791eb3ed55246e17115c7ebb3349187b", "3d3fdeb8792859543d791e34af4005a80f348eed", "10cc976f8bdc0ce269a1239cf7cc6f3a5df7cc8a", "3d33f16ffb3f56e63b8b5c51147b1a07840d734a", "892c911ca68f5b4bad59cde7eeb6c738ec6c4586", "3148c4ca284d6521769dfde54e3e7693228bda06", "4032597bf9727adc3f4e3191ec17b87d9ce0980b", "e9bbe558c73de60e40ce2bd8c7cb7a47dacfe594", "32a6f6aa50ce2a631bf4de7432f830b29b6b05f2", "85476331edc9a9e3393f736f14aa80ad95f3c105", "6ff9b66aec16d84b1133850e7e8ce188a5a9a7f4", "704fe3839742e8d022fdc110f3a502e42a0ef89e", "5801690199c1917fa58c35c3dead177c0b8f9f2d", "b34487edb8d47c0101d514b8cb63148d80deee54", "e3b20cf421812dc96477a2074d0bb1ee83e6c98b", "a7dab944b42c87c52df2abe016158eafb110b2af", "24e64e9fd79f138cf4d90f65da06eacf031ec635", "52417b0406886154f0b4e2343ad6ac18c0484ec4", "bdb1fca40fd98a966b627ba9b0f4a0ac801dffdc", "6dfa82f00ec6faee1db319c1e306ae779cfc1c36", "aac19081782914dc359c1a6b9cbc626478e0a957", "a416513aaf97060287bf3e64ccdc1ccf85106c07", "602ff4fd0f5bd10c9fb971ecd2317e542f070883", "710c30c6c05ad1c9c0858f42364e9ca3f8e70bb4", "0e14ff81568e3c5aedf6764f6be1ece325f67c02", "02c04a8b7c1232646ebc882caf3793327a510ba6", "89c45ace90d377502dc84825e5039290927ae9e2", "2148e8484c8f27d853872c59f483f0d281fc664c", "65874dd7220664762b5b25f47460b623a7eb0175", "a1f0188029436169002d75af8f23f7ebdad969dd", "548f94f82bf28efa299a64c2527aad36d76b81af", "038b8b2b629a8ba1e2ad6f9319e16b68e83e518a", "c0825a62bbf6a906ec812d0f668478f001c24279", "8d226c762f60a43c92d66db65952eedf19777ab2", "febdce13fdc9d3fd9381602916fbbc395d4ad2ab", "3a3087c03f0403c3e180f47f9001509e852b82b3", "532f91d59d96d28379e09043592903d143218f4b", "99127b39f7671607b1fc9d862a9af0ce9beedaa4", "51d97f4e4385a3da78bf9277a5426216198698c3", "527249f0b1432cec3e2cadc14cf285008d7f55c7", "9466232ccee3ea887330d90243d8e164426971a0", "e6dd073f9b2cea53baca6378c3b56fd84f3818bb", "2727b5eb800a89570c4c54b2b3bc726be29ed170", "ce93f83d69ee6ee981124ed1f20102335caf7b09", "2484a34597a40d846c084e827fda299fd0927008", "6e53acae2e9909affde44e339d8db5fcda42aac7", "d102f18d319d9545588075010f5d10b1ff77f967", "b5fffbc0e590ce67d485f1602c8158befcef9fa8", "e10f4d5c0e0e294e00ce3a92b2057c4b2a5acf09", "8b9934758378a78604ce13914e23c6994caa3427", "69a78c5edc0af189df07b1f5eae24105604a78e6", "fa8c73899c22b461cc062a10b6df20fccb18800c", "53d0227c40b354cc438c035951da801c9dcd87b7", "1c09c81b63c84d1001ce7896f45a4ee50e2f8f49", "2e67d919815a073d1dbc6db3153697578257a28d", "79eb37c207ae3a7ea51871fabfc307485846cdb5", "35168a83fa1020c2f7b6501d19e07127d923fc8a", "f128e7eeb1c4c9aaf5f610b538d480b6ad23a1dd", "a35d85c2efd1fb090267980ebb3fd7b6381e3b74", "2b0d16e74abf3c512fc70def14163b119f1988f2", "b7a3ff5153b340e639dd14eb6933e307199afe08", "7a05584a0e8ff57a81a172c1c86fb524cfd9c84a", "1d5b030747bd836aebf7a00ed061a2f7bdf0a84c", "619442b4e5503e80941ce7de87eb12da0cce2b23", "33be61144db9b7d2878f3ccb1251ee75b4f5ecfe", "83e71455ee2070617ea35c02f03b7451187985d1", "e5f4583dab0c7bcd9a416771b65721895f110134", "e4ae6597929a5da38ffaf230015aa76587984036", "e78ac6617fee67cfb981423cb6d42526b51bb9db", "7b8cfd54bf9392c91af4c4621d793ede638fc81d", "ac01cb10c356d26bcfb7f674fe91e6c190ba64ab", "537a00082b413b40fbdd02b5584791614f5071d2", "ffc06713436afc4e08bf4afa401ac52db674c5da", "8f4c8a80e94a883356ee4c4425324dac5457661a", "92911c43f91cc93a3357b48bc685ed676ef9ec11", "b878518814fee31ce8cb61040301e7a921892156", "a55970013b984f344dfbbbba677d89dce0ba5f81", "afb3a84b7daa92d6e1894f5fefe9b38904976d7d", "7bdac8773dffc917e678db342ccbefdae238fb57", "a1b89c488a723cc496cf931d97e2538ecf9b2991", "795662b9ae10bdb20ddf40ee124ec3c06ae6b2af", "0d9b83d33d7d01b38094dff4145b22348e926c7b", "289c51fdc3fdd57f303d0e270758bdb8da7eca4f", "855cf31504da69daf03766b1357030dd07e485f7", "a9c8fa7e890878fe209f28d358cf5eca5e958fdb", "47af13d56bf6882b2c173447ef5ae8adaa18058f", "5f87c992e5572cbab6896c9e86d8ea480e6be899", "3fbe9d51ce96e3d8abcbad37611f02d97c1986e9", "c2d7bc19196dd4a7ed1a08d60081d16e0c14f463", "810d60ff5c0106de53a48fa2731eacf5ca2377b6", "e417e88c13e0f3d5bbd02e6682823b0514f4bc78", "0ce0d440126c43700765e5d12fc95eb962680f4e", "551a62f43a9da5ceb9564358ad25523736fd48dc", "edc5c359ed0fc24a3e85628f57fde59cd9b26dd4", "c4ac98154efdd73fd3ec9954dfb5ed32b95f7ca5", "abf659847660763c94b44c0baaf9198046a11845", "8da421994430cab9f586b84e6573b6b705a6b63d", "2c14b1dc3ee73715691c90b139b0b881e4e6bc7d", "320e2c950d5b31cb371208a6b752a94585ac6665", "c43d8a3d36973e3b830684e80a035bbb6856bcf7", "3323a905a3960a663a9884540e8c3586cf362ba9", "e90124684b0cd67feb14cda51d9afe9fe73ba05e", "d7361c34e180392e62552c463cf8abad9813719b", "87283935f0eec5ddc0e5ad3062568df8eb89e7e0", "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "94456106e1d894ca3f41fa5a0a5fecffffa3f7a4", "6b5d7223239f02a091db8b9d3624b59994402419", "1ed1a49534ad8dd00f81939449f6389cfbc25321", "77351eaeb65e374a4d1e54acc28fea426670e364", "ca2cd1781eb06c42f7b9a0f4955a05416ccddfd9", "45e2aa7706fcedcbb2d93304a9824fe762b8b3b0", "16e8d439fbcf8311efea7b0baeb1a5340272b396", "8a2a77062770bbdbfdbfe06ad7c3ab1728a4c59a", "1d2af64416882b2ae8fe4de51b85fdd7d561cfee", "ce4ad1ad4134d9131af21d4213e598f03475cfd3", "e217382ceed42605eb2a9b570c55f9622635e111", "ddef0c93cd9f604e5905b81dc56818a477f171e2", "c0c311c985fcfb309066d0d475ca862b2a19363e", "d198b5bc5eae22f7a788729c0ea15b6b60b62f36", "e483482e19b022a1cd7081dc2757bb8a85774ed7", "e58434a01c45505995b000f5e631843a2f2ea582", "41be021880a916305c82199ddc2298eb271f6590", "b07546f26a99b61c5045e313bc024b0fe7de590a", "a8e2b2b1b76491336036005d81be57d256acdd0c", "7004e0808b0905761b583d74524b932ba66c20dd", "a6c49f98df934c45cfd948e7f68544b00fbbfa02", "6c8b770ceda34be2ef294e8e2271f94e8a58672e", "55af4918d6b20d13c58c482d7e31e17db53c6ab5", "42b56c77e4b154364763d4024baa8129da75151f", "604d7533bdcfb06f4ae217a2cd9fd2e1467192f8", "0b78c77c73ecbc07e8deb361e79d97660ce231a5", "3a307b7e2e742dd71b6d1ca7fde7454f9ebd2811", "878dce67a005a4d70f14a04230556b785e77532f", "cd0503a31a9f9040736ccfb24086dc934508cfc7", "5a05e438b60e728b862af1a4022131549b7390a3", "7118f2607052e9a8647c1ac60c0a00cb5bc79b31", "477ca04e9c6b9fd8326af7e11c6d60b6ada2f42a", "97a9aaf99400de88504ef40cee3057f7c718261b", "942bb63e78d9edfe3b8d0a4bf9a3511c736a6930", "fbd17af24e86fe487e28f99ba3e402dd6cfcd16a", "4da2c15f320ee2f719785c44ca2427fef04a0948", "1d39c1e39d712d2449de59beae76521f4e04b2bd", "7334b3ade39e3bd826bdd9bdae73d176e4a02caf", "510d5f8e610ae962ef35501e9c5f310d83dc7b70", "448efcae3b97aa7c01b15c6bc913d4fbb275f644", "10ffdfdbc0aafb89d94528f359425de0c7a81986", "5f9253997ce7ad510989e58c5c2b98e5af017fb0", "5fc662287842e5cb2d23b5fa917354e957c573bf", "71969ee27916d545c63fe852946dd6dcc015d1a8", "f6c7fbd84e6ac61af40e670e589ec52fa435f396", "46d728356b5090bc28461b30cb21a08c3a690195", "91d216e72a774b10c1eac9bce5b1046fac8c8a97", "52e0c03dd661d032865dfedd91ca49542ccfc2a3", "6f9c01a9b861882c6676227942005cef13f3cb29", "30ea6c4991ceabc8d197ccff3e45fb0a00a3f6c5", "f254cbfe9710de5e41589f8b7898112b06872ed2"] \ No newline at end of file
diff --git a/scraper/pdf_dump_first_page.sh b/scraper/pdf_dump_first_page.sh
new file mode 100644
index 00000000..2749915d
--- /dev/null
+++ b/scraper/pdf_dump_first_page.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+for i in datasets/s2/pdf/*/*/*.pdf
+ do
+ OUTPUT="${i%.*}.txt"
+ if [[ ! -e $OUTPUT ]]
+ then
+ pdf2txt.py -p 1 $i > $OUTPUT
+ echo $OUTPUT
+ else
+ if [ -s $OUTPUT ]
+ then
+ echo "found $OUTPUT"
+ else
+ echo "rm empty $OUTPUT"
+ rm -f $OUTPUT
+ fi
+ fi
+ done
diff --git a/scraper/reports/all_institutions.csv b/scraper/reports/all_institutions.csv
new file mode 100644
index 00000000..7ff27b0d
--- /dev/null
+++ b/scraper/reports/all_institutions.csv
@@ -0,0 +1,1499 @@
+"University of Delaware, USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+AALTO UNIVERSITY,60.18558755,24.824273298775,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi"
+"AGH University of Science and Technology, Kraków, Poland",50.0657033,19.9189586670586,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP"
+AI Institute,-34.6102167,-58.3752244291708,"INDEC, 609, Avenida Presidente Julio A. Roca, Microcentro, Comuna 1, Monserrat, CABA, C1067ABB, Argentina"
+ALICE Institute,-8.82143045,13.2347076178375,"Instituto Superior de Ciências da Educação (ISCED), Rua Salvador Allende (Salvador Guillermo Allende Gossens), Maculusso, Maianga, Município de Luanda, Luanda, 927, Angola"
+ARISTOTLE UNIVERSITY OF THESSALONIKI,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aalborg University, Denmark",57.01590275,9.97532826658991,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark"
+"Aalto University, Finland",60.18558755,24.824273298775,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi"
+"Aberystwyth University, UK",52.4107358,-4.05295500914411,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK"
+"Ahmedabad University, Gujarat, India 380009",23.0378743,72.5518004573221,"School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India"
+Ajou Univ.,37.2830003,127.045484689222,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국"
+Ajou University,37.2830003,127.045484689222,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국"
+Akita Prefectural University,39.8011499,140.045911602376,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本"
+"Akita Prefectural University, Yurihonjo, Japan",39.39325745,140.073500465928,"秋田県立大学, 日本海東北自動車道(無料区間), 八幡前, 由利本荘市, 秋田県, 東北地方, 〒015-0836, 日本"
+Akita University,39.7278142,140.133225661449,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本"
+"Akita University, Akita, Japan",39.7291921,140.136565773585,"秋田大学鉱業博物館, 2, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-8502, 日本"
+"Alexandria University, Alexandria, Egypt",31.21051105,29.9131456239399,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر"
+"Alibaba Group, Hangzhou, China",30.2810654,120.021390874339,"Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国"
+"Amazon, Berkshire, U.K.",51.43522855,-1.07155123817349,"Amazon Logistics, Exeter Road, Theale, West Berkshire, South East, England, RG7 4PL, UK"
+"American University, Washington, DC, USA",38.93804505,-77.0893922365193,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA"
+Amherst College,42.37289,-72.518814,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA"
+Amirkabir University of Technology,35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amirkabir University of Technology, Tehran, Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amity University Uttar Pradesh, Noida",28.54322285,77.3327482973395,"Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India"
+"Amity University, Lucknow, India",26.85095965,81.0495096452828,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India"
+"Anhui Polytechnic University, Wuhu, China",31.34185955,118.407397117034,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国"
+"Anhui University, Hefei, China",31.76909325,117.17795091346,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国"
+Anna University,13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Anna University Chennai, India",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Anna University, Chennai",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+Aristotle University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, Thessaloniki, 54124, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Arizona State University,33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Arizona State University, Tempe, AZ, USA",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia",3.0552109,101.7005831,"Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia"
+"Assiut University, Asyut, Egypt",27.18794105,31.1700949818453,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر"
+"Aston University, Birmingham, U.K.",52.48620785,-1.88849915088515,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK"
+Australia,-24.7761086,134.755,Australia
+Australian Institute of Sport,-35.24737535,149.104454269689,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"Australian National University, Canberra",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Australian National University, Canberra, ACT 0200, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Azad University, Qazvin, Iran",36.3173432,50.0367286,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎"
+B.S. University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+Bahcesehir University,41.02451875,28.9769795349346,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye"
+"Bahcesehir University, Istanbul, Turkey",41.02451875,28.9769795349346,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye"
+Banaras Hindu University,25.2662887,82.9927969,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India",14.4443949,75.9027655185535,"Bapuji Institute of Engineering and Technology, 2nd Cross Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Bas kent University,52.08340265,5.14828494152362,"University College Utrecht 'Babel', 7, Campusplein, Utrecht, Nederland, 3584 ED, Nederland"
+Beckman Institute,40.11571585,-88.2275077179639,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA"
+Beihang University,39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Beihang University, Beijing 100191, China",39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Beihang University, Beijing, China",39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Beijing Institute of Technology University, P. R. China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, Beijing 100081 CHINA",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, Beijing, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"Beijing Jiaotong University, Beijing, 100044, China",39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"Beijing Normal University, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+"Beijing Union University, 100101, China",39.9890068,116.420677175386,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国"
+Beijing University of Posts and Telecommunications,39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, Beijing",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Technology, Beijing 100022, China",39.87391435,116.477222846574,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国"
+"Beijing, China",39.906217,116.3912757,"北京市, 东城区, 北京市, 100010, 中国"
+"Beijing, Haidian, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+"Benha University, Egypt",30.0818727,31.2445484105016,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر"
+"Bharathidasan University, Trichy, India",10.7778845,78.6966319,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India"
+Bielefeld University,52.0280421,8.51148270115395,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland"
+"Bilkent University, 06800 Cankaya, Turkey",39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"Binghamton University, Binghamton, NY",42.0958077,-75.9145568939543,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+"Bogazici University, Turkey",41.08327335,29.0503931951846,"Boğaziçi Üniversitesi Güney Yerleşkesi, Sehitlikdergahı Sokağı, Beşiktaş, İstanbul, Marmara Bölgesi, 33345, Türkiye"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Boston University,42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Boston University, Boston, MA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Boston University, USA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+Bournemouth University,50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+"Bournemouth University, UK",50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+Brown University,41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Brown University, Providence Rhode Island, 02912, USA",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Brown University, Providence, RI",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Brown University, United States",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+Brunel University,51.53255315,-0.473993562050575,"Brunel University London, The Strip, Hillingdon, London, Greater London, England, UB8 3PH, UK"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+CARNEGIE MELLON UNIVERSITY,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+COLUMBIA UNIVERSITY,40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+CUNY City College,45.5546608,5.4065255,"Cuny, La Tour-du-Pin, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38110, France"
+California Institute of Technology,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, CA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, CA, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, California, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"Callaghan, NSW 2308, Australia",-32.8892352,151.6998983,"Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia"
+Cambridge Research Laboratory,52.17333465,0.149899463173698,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"Capital Normal University, 100048, China",39.92864575,116.30104052087,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国"
+Cardi University,10.6435074,-61.4022996445292,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Carleton University,45.3860843,-75.6953926739404,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"Carnegie Mellon University Pittsburgh, PA - 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, USA",37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"Central Tehran Branch, Azad University",35.753318,51.370631,"دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎"
+Central Washington University,47.00646895,-120.53673039883,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA"
+"Centre de Visió per Computador, Universitat Autònoma de Barcelona, Barcelona, Spain",41.5007811,2.11143663166357,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España"
+"Chang Gung University, Taoyuan, Taiwan",25.030438,121.390095126629,"長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣"
+Charles Sturt University,-35.0636071,147.3552234,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia"
+China,35.000074,104.999927,中国
+"China University of Mining and Technology, Xuzhou, China",34.2152538,117.1398541,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国"
+Chinese Academy of Sciences,40.0044795,116.370238,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国"
+"Chinese Academy of Sciences, Beijing",40.0044795,116.370238,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国"
+"Chinese Academy of Sciences, China",40.0044795,116.370238,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国"
+"Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh",22.46221665,91.9694226317318,"Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ"
+"Chonbuk National University, Jeonju-si",35.84658875,127.135013303058,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국"
+"Chongqing University of Posts and Telecommunications, Chongqing, China",29.5357046,106.604824742826,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国"
+"Chongqing University, China",29.5084174,106.578585515028,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国"
+"Chongqing University, Chongqing, China",29.5084174,106.578585515028,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国"
+Chosun University,35.1441031,126.9257858,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국"
+"Chu Hai College of Higher Education, Hong Kong",22.3760643,113.987153890134,"珠海學院 Chu Hai College of Higher Education, 80, 青盈路 Tsing Ying Road, 嘉和里 Ka Wo Lei, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国"
+"Chu Hai College of Higher Education, Tsuen Wan, Hong Kong",22.375601,113.987140797925,"珠海學院, 80, 青山公路-青山灣段 Castle Peak Road – Castle Peak Bay, 良田村 Leung Tin Tsuen, 青山灣 Castle Peak Bay, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国"
+Chubu University,35.2742655,137.013278412463,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本"
+"Chulalongkorn University Bangkok, Thailand",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+"Chung-Ang University, Seoul, South Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Chungnam National University,36.37029045,127.347804575184,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국"
+City University of Hong Kong,22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+"City University of Hong Kong, Hong Kong",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+"City University of Hong Kong, Kowloon, Hong Kong",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+Clemson University,34.66869155,-82.837434756078,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA"
+"Clemson University, Clemson, SC",34.67871075,-82.8346790794026,"E-06 Parking, Parkway Drive, Pickens County, South Carolina, SC, USA"
+Coburg University,50.26506145,10.9519648264628,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+"College Park, MD",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, MD 20742 USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, Maryland",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, United States",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+College of Computing,-6.1992922,39.3081862,"computing, Tunguu, Unguja Kusini, Zanzibar, 146, Tanzania"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+"College of Engineering Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+College of Engineering and Computer Science,25.7589624,-80.3738881489383,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA"
+"College of Engineering, Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+College of Informatics,14.6173885,121.101327315511,"Informatics, F.P. Felix Avenue, Dela Paz, San Isidro, Cainta, Rizal, Metro Manila, 1900, Philippines"
+Colorado State University,40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+"Colorado State University, Fort Collins",40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+"Colorado State University, Fort Collins, Colorado, USA",40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York, NY",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, New York, NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, New York, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, United States",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Communication University of China, Beijing, China",39.91199955,116.551891408714,"中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国"
+"Computer Science, Loughborough University, Loughborough, UK",52.7663577,-1.2292461,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK"
+Concordia University,45.57022705,-122.637093463826,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA"
+"Concordia University, Canada",45.4955911,-73.5775043,"FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada"
+"Concordia University, Montreal, QC, Canada",45.4955911,-73.5775043,"FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Cornell University, Ithaca, NY, USA",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Cornell University, USA",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+Courant Institute,40.7286994,-73.9957151,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+Courant Institute of Mathematical Sciences,40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Courant Institute of Mathematical Sciences, New York, NY",40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Curtin University, Perth WA 6102, Australia",-32.00686365,115.89691775,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia"
+"Curtin University, Perth WA, Australia",-32.00319745,115.891774804686,"A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia"
+"Curtin University, Perth, Australia",-32.00574155,115.892864389257,"Curtin University, B201 L2 Entry South, Waterford, Perth, Western Australia, 6102, Australia"
+"Curtin University, Perth, Western Australia 6012",-32.00319745,115.891774804686,"A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Cyprus University of Technology, Cyprus",34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+DUBLIN CITY UNIVERSITY,53.38522185,-6.25740874081493,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland"
+Dalian University of Technology,38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, Dalian, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, Dalian, Liaoning, 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea",37.3219575,127.1250723,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국"
+"Dankook University, Yongin, South Korea",37.3219575,127.1250723,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+"Deakin University, Geelong, VIC 3216, Australia",-38.19928505,144.303652287331,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+"Delft University of Technology, Mekelweg 4, Netherlands",51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+"Delft University of Technology, The Netherlands",51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+Democritus University of Thrace,40.84941785,25.8344493892098,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα"
+"Dermalog Identification Systems GmbH, Hamburg, Germany",53.5722826,9.9947826,"DERMALOG Identification Systems GmbH, 120, Mittelweg, Rotherbaum, Eimsbüttel, Hamburg, 20148, Deutschland"
+"Deutsche Welle, Bonn, Germany",50.7171497,7.12825184326238,"DW, Gronau, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Dhaka University,23.7317915,90.3805625,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ"
+"Disney Research, CH",47.3804685,8.5430355,"Disney Research Zürich, 48, Stampfenbachstrasse, Unterstrass, Kreis 6, Zürich, Bezirk Zürich, Zürich, 8006, Schweiz/Suisse/Svizzera/Svizra"
+"Donghua University, China",31.2061939,121.410471009388,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+Dr. Babasaheb Ambedkar Marathwada University,19.8960918,75.3089470267316,"Boys Hostel No. 3, Shantipura road, Cantonment, Bidri workshop, Aurangabad, Maharashtra, 431004, India"
+Drexel University,39.9574,-75.1902670552555,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+Duke University,35.9990522,-78.9290629011139,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA"
+East China Normal University,31.2284923,121.402113889769,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国"
+Eastern Mediterranean University,35.14479945,33.90492318497,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs"
+Eastern University,40.0505672,-75.3710932636663,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA"
+"Ecole Centrale de Lyon, Lyon, 69134, France",45.7833631,4.76877035614228,"EC de Lyon, 36, Avenue Guy de Collongue, Écully, Lyon, Métropole de Lyon, Circonscription départementale du Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69134, France"
+Edge Hill University,53.5582155,-2.86904651022128,"Edge Hill University, St Helens Road, West Lancashire, Lancs, North West England, England, L39 4QP, UK"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Electrical Engineering, University of",47.6532412,-122.3061707,"Electrical Engineering, 185, Loading Dock, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+Electrical and Computer Engineering,33.5866784,-101.875392037548,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA"
+Elon University,36.1017956,-79.501733,"Amphitheater, North Antioch Avenue, Elon, Alamance County, North Carolina, 27244, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+FL,27.7567667,-81.4639835,"Florida, USA"
+"Facebook Inc., San Francisco, CA, USA",37.4828007,-122.150711572363,"Facebook Inc., San Francisco Bay Trail, Menlo Park, San Mateo County, California, 94025-1246, USA"
+"Facebook, Singapore",1.3170417,103.8321041,"Ewe Boon back lane, between Palm Spring, City Towers and Wing On Life Garden, Farrer Park Gardens, Novena, Singapore, Central, 259803, Singapore"
+"Feng Chia University, Taichung, Taiwan",24.18005755,120.648360719503,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣"
+"Ferdowsi University of Mashhad, Mashhad, Iran",36.3076616,59.5269051097667,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎"
+Firat University,39.7275037,39.4712703382844,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye"
+"Florida Institute Of Technology, Melbourne Fl",28.0642296,-80.6230097241205,"Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA"
+"Florida Institute of Technology, Melbourne, USA",28.0642296,-80.6230097241205,"Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+"Florida International University, Miami, FL",25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"Florida State University, Tallahassee, FL 32306, USA",30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"Fordham University, New York, 10023, USA",40.7710604,-73.9852807046561,"Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+Fraser University,44.9689836,-93.2094162948556,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA"
+Fudan University,31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+GE Global Research,42.8298248,-73.8771938492793,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA"
+GE Global Research Center,42.8298248,-73.8771938492793,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA"
+"GIPSA-Lab, Grenoble, France",45.1929245,5.7661983,"GIPSA-lab, 11, Rue des Mathématiques, Médiat Rhône-Alpes, Saint-Martin-d'Hères, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38400, France"
+Gdansk University of Technology,54.37086525,18.6171601574695,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"George Mason University, Fairfax Virginia, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"George Mason University, Fairfax, VA 22030",38.8345539,-77.3152142,"George Mason University, University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA"
+"George Mason University, Fairfax, VA, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+Georgia Institute of Technology,33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+"Georgia Institute of Technology, Atlanta, 30332-0250, USA",33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+"Georgia Institute of Technology, Atlanta, Georgia, USA",33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+"Georgia Southern University, Statesboro, USA",32.42143805,-81.7845052864662,"Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA"
+Glyndwr University,53.05373795,-3.00482075353073,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK"
+"Golden, CO, USA",39.755543,-105.2210997,"Golden, Jefferson County, Colorado, USA"
+Graz University of Technology,47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+"Graz University of Technology, Austria",47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+Griffith University,-27.5533975,153.053362338641,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia"
+"Griffith University, Australia",-27.5533975,153.053362338641,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia"
+"Griffith University, Brisbane",-27.5533975,153.053362338641,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia"
+"Griffith University, Nathan, QLD, Australia",-27.5533975,153.053362338641,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia"
+Guangdong Medical College,23.1294489,113.343761097683,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国"
+"Guangdong University of Technology, China",23.1353836,113.294704958268,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国"
+"Guangzhou University, Guangzhou, China",23.04436505,113.366684576444,"广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"Guilin University of Electronic Technology Guangxi Guilin, China",25.2873992,110.332427699352,"桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国"
+Hacettepe University,39.86742125,32.7351907206768,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Halmstad University,56.66340325,12.8792972689712,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige"
+"Halmstad University, Halmstad, Sweden",56.66340325,12.8792972689712,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige"
+"Hangzhou Dianzi University, Hangzhou, China",30.3125525,120.3430946,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+"Harbin Engineering University, Harbin, Heilongjiang, 150001, China",45.77445695,126.676849168143,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+Harbin Institute of Technology,45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, China, 150001",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, Harbin 150001, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, Harbin, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+Harbin Institute of Technology;Shenzhen University,22.5895016,113.965710495775,"哈工大(深圳), 平山一路, 深圳大学城, 珠光村, 南山区, 深圳市, 广东省, 518000, 中国"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA 02138",42.36300645,-71.1245674978516,"Harvard University, Rotterdam Street, North Brighton, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+Harvard and Massachusetts Institute,42.5268445,-71.6525446,"Massachusetts Correctional Institute Shirley Minimum Security Library, Harvard Road, Shaker Village, Shirley, Middlesex County, Massachusetts, 01464, USA"
+"Hebei, China",39.0000001,116.0,"河北省, 中国"
+"Hefei University of Technology, Hefei, Anhui, 230601, China",31.846918,117.290533667908,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国"
+"Hefei University of Technology, Hefei, China",31.846918,117.290533667908,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国"
+"Hengyang Normal University, Hengyang, China",26.8661136,112.620921219792,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国"
+Heriot-Watt University,55.91029135,-3.32345776559167,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK"
+"Hiroshima University, Japan",34.4019766,132.7123195,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本"
+HoHai University,32.05765485,118.755000398628,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国"
+"Hofstra University, Hempstead, NY 11549",40.71703345,-73.599835005538,"Hofstra University, Hempstead Turnpike Bike Path, East Garden City, Nassau County, New York, 11549, USA"
+Hong Kong Baptist University,22.3874201,114.2082222,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国"
+"Hong Kong Baptist University, Hong Kong",22.3874201,114.2082222,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国"
+Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"Hong Kong Polytechnic University, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"Hong Kong University of Science and Technology, Hong Kong",22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"Howard University, Washington DC",38.921525,-77.019535656678,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA"
+"Huaqiao University, Xiamen, China",24.6004712,118.0816574,"华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国"
+Huazhong University of,22.53367445,113.917874206261,"深圳市第六人民医院, 89号, 桃园路, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518000, 中国"
+Huazhong University of Science and Technology,30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Huazhong University of Science and Technology, Wuhan, China",30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Huazhong University of Science and Technology, Wuhan, China 430074",30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Humboldt-University, Berlin, Germany",52.51875685,13.3935604936378,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"IBM Almaden Research Center, San Jose CA",37.21095605,-121.807486683178,"IBM Almaden Research Center, San José, Santa Clara County, California, USA"
+IBM Research,35.9042272,-78.8556576330566,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA"
+"IBM Research, USA",35.9042272,-78.8556576330566,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA"
+IBM Thomas J. Watson Research Center,41.21002475,-73.8040705573196,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA"
+IDIAP RESEARCH INSTITUTE,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+IDIAP Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"IDIAP, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+IIIT-Delhi,28.54632595,77.2732550434418,"IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India"
+"IIIT-Delhi, India",28.54632595,77.2732550434418,"IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India"
+"IIT Guwahati, Guwahati, India",26.19247875,91.6946356873113,"Indian Institute of Technology Guwahati - IIT Guwahati, NH27, Amingaon, Guwahati, Kamrup, Assam, 781015, India"
+IMPERIAL COLLEGE,39.9458551,116.406973072869,"国子监, 五道营胡同, Naga上院, 北京市, 东城区, 北京市, 100010, 中国"
+"INRIA Grenoble Rhone-Alpes, FRANCE",45.2182986,5.80703193086113,"INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France"
+Idiap Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Idiap Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+"Illinois Institute of Technology, Chicago, Illinois, USA",41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+Imperial College London,51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, London, U.K.",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, U.K",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, U.K.",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College, London, UK",51.5004171,-0.1782711,"Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK"
+India,22.3511148,78.6677428,India
+Indian Institute of Science,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Indian Institute of Science Bangalore,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Indian Institute of Technology,28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+"Indian Institute of Technology Delhi, New Delhi, India",28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+Indian Institute of Technology Kanpur,26.513188,80.2365194538339,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India"
+"Indian Institute of Technology Kanpur, Kanpur, India",26.513188,80.2365194538339,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India"
+"Indian Institute of Technology, Roorkee",29.8662461,77.8958708109136,"Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Indiana University Bloomington,39.17720475,-86.5154003022128,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA"
+"Industrial Technology Research Institute, Hsinchu, Taiwan",24.7741756,121.045092787653,"工研院, 195, 中興路四段, 頭重里, 竹東鎮, 新竹縣, 31040, 臺灣"
+Information Technologies Institute,33.5934539,130.3557837,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+"Information, Keio University",35.5416969,139.6347184,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本"
+Institute,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute for Advanced,38.7468877,139.824707282407,"Institute for Advanced Biosciences, 鶴岡市, 山形県, 東北地方, 日本"
+Institute for Communication Systems,51.2433692,-0.593220895014599,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK"
+Institute for System Programming,55.7449881,37.6645042069876,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute of Automation,54.1720834,12.0790983,"Institut für Automatisierungstechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+Institute of Communications Engineering,54.1718573,12.0784417,"Institut für Nachrichtentechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+Institute of Computer Science,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+Institute of Computing,43.47878995,-80.5548480959375,"Institute for Quantum Computing, Wes Graham Way, Lakeshore Village, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 6R2, Canada"
+Institute of Computing Technology,34.6988529,135.1936779,"神戸情報大学院大学, フラワーロード, 中央区, 神戸市, 兵庫県, 近畿地方, 650-0001, 日本"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"Institute of Engineering and Management, Kolkata, India",22.57423855,88.4337303,"Institute of Engineering and Management, Block -EP, Ring Road, GP Block, Kolkata, Twenty-four Parganas, West Bengal, 700091, India"
+Institute of Industrial Science,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+Institute of Information Technology,23.7289899,90.3982682,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+Institute of Media Innovation,1.3433937,103.6793303,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore"
+Institute of Road and,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+International Institute of Information Technology,17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+"International Institute of Information Technology (IIIT) Hyderabad, India",17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+"International Institute of Information Technology, Hyderabad, India",17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+"International Institute of Information Technology, Hyderabad, Telangana, India",17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+International University of,11.5744201,104.8775841,"International University, ផ្លូវ ១៩៨៤, ភូមិភ្នំពេញថ្មី, ខណ្ឌសែនសុខ, រាជធានីភ្នំពេញ, 12101, ព្រះរាជាណាចក្រ​កម្ពុជា"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+"Iowa State University, Ames, IA, USA",42.02791015,-93.6446441473745,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA"
+Iran,32.9407495,52.9471344,‏ایران‎
+Islamic Azad University,34.8452999,48.5596212013643,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University (ITU), Turkey",41.10539,29.0213673,"ITU Open Air Theater, Arı Yolu, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34485, Türkiye"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Istanbul University,41.0132424,28.9637609,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye"
+"Istanbul University, Istanbul, Turkey",41.0132424,28.9637609,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye"
+Jacobs University,53.4129148,-2.96897915394896,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK"
+Jadavpur University,22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+"Jadavpur University, India",22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+Jahangirnagar University,23.883312,90.2693921,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+"Jaipur, Rajasthan, India",26.916194,75.820349,"Jaipur, Rajasthan, 302001, India"
+Japan,36.5748441,139.2394179,日本
+Japan Advanced Institute of Science and Technology,36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan",36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+Jaypee Institute of Information Technology,28.6300443,77.3720823,"Jaypee Institute of Information Technology, Noida, A-10, National Highway 24 Bypass, Asha Pushp Vihar, Kaushambi, Ghaziabad, Uttar Pradesh, 201001, India"
+"Jiangnan University Jiangsu Wuxi, PR China",31.4854255,120.2739581,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国"
+"Jiangnan University, Jiangsu Wuxi, PR China",31.4854255,120.2739581,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国"
+"Jiangnan University, Wuxi",31.4854255,120.2739581,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国"
+"Jiangsu University of Science and Technology, Zhenjiang, China",32.198055,119.4632679083,"江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国"
+"Jiangsu University, ZhenJiang, Jiangsu, 212013, P. R. China",32.20302965,119.509683619281,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国"
+"Jiangsu University, Zhenjiang, China",32.20302965,119.509683619281,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国"
+"Jilin University, China",22.053565,113.39913285497,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国"
+"Joint Research Institute, Foshan, China",22.83388935,113.285418245471,"广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国"
+"Jordan University of Science and Technology, Irbid, Jordan",32.49566485,35.9916071719283,"Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن"
+"K.N. Toosi University of Technology, Tehran, Iran",35.76427925,51.409702762313,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎"
+"KAIST, Daejeon, Korea",36.3646244,127.352251416793,"궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국"
+"KAIST, Korea",36.3646244,127.352251416793,"궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국"
+"KTH Royal Institute of Technology, Stockholm",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+"KTH Royal Institute of Technology, 100 44 Stockholm, Sweden",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+"KTH Royal Institute of Technology, Stockholm, Sweden",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+Karlsruhe Institute of,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology (KIT), Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"Keio University, Yokohama 223-8522, Japan",35.55536215,139.654582444136,"慶應義塾大学 (矢上キャンパス), 理工坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-8522, 日本"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"Kent State University, Kent, Ohio, USA",41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"Khon Kaen University, Khon Kaen, 40002, Thailand",16.46007565,102.812117979662,"มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+"King Saud University, Riyadh",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"King Saud University, Riyadh 11543, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Kingston University,51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+"Kingston University, UK",51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+"Kobe University, Japan",34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+"Kogakuin University, Tokyo, Japan",35.6902784,139.695400958171,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本"
+"Kookmin University, Seoul, Korea",37.6107554,126.9946635,"국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국"
+Korea Advanced Institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+"Korea Advanced Institute of Science and Technology, Daejeon, Korea",36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+"Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea",36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+"Korea Advanced Institute of Science and Technology, Daejeon, South Korea",36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+"Korea Advanced Institute of Science and Technology, Korea",36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+Korea Advanced institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+"Korea University, Seoul, South Korea",37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+"Kumamoto University, Kumamoto, Japan",32.8164178,130.727039687562,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本"
+"Kurukshetra University, Kurukshetra",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+"Kurukshetra University, Kurukshetra, India",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+"Kyoto University, Kyoto, Japan",35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Kyung Hee University,32.8536333,-117.2035286,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA"
+"Kyung Hee University, Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+"Kyung Hee University, Seoul, South Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+"Kyung Hee University, South Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+"Kyung Hee University, Yongin, South Korea",37.24244405,127.080937489679,"경희대학교 국제캠퍼스, 서천동로21번길, 서천동, 기흥구, 용인시, 경기, 17108, 대한민국"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+"Lancaster University, Lancaster, UK",54.00975365,-2.78757490881378,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK"
+"Lehigh University, Bethlehem, PA 18015, USA",40.6068028,-75.3782488,"Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA"
+Liverpool John Moores University,53.4050747,-2.97030028586709,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK"
+Lomonosov Moscow State University,55.70229715,37.5317977694291,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ"
+"London, United Kingdom",51.5073219,-0.1276474,"London, Greater London, England, SW1A 2DU, UK"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India",13.0309553,77.5648559396817,"M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+METs Institute of Engineering,28.2140454,83.9607104993073,"Dihiko Paton, Pokhara Lekhnath Metropolitan Ward No. 6, Pokhara, Pokhara Lekhnath Metropolitan, कास्की, गण्डकी अञ्चल, पश्चिमाञ्चल विकास क्षेत्र, नेपाल"
+"MO, USA",38.7604815,-92.5617875,"Missouri, USA"
+"MPI Informatics, Germany",49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+MULTIMEDIA UNIVERSITY,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+Maastricht University,50.8336712,5.71589,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland"
+"Maastricht University, Maastricht, Netherlands",50.8444528,5.6884711,"University College Maastricht, 4, Zwingelput, Jekerkwartier, Maastricht, Limburg, Nederland, 6211KH, Nederland"
+Macau University of Science and,22.3358031,114.265903983304,"HKUST, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+Macau University of Science and Technology,22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"Manchester University, UK",53.47020165,-2.23932183309859,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK"
+"Mangalore University, India",12.81608485,74.9244927772961,"Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India"
+"Manonmaniam Sundaranar University, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Manonmaniam Sundaranar University, Tirunelveli",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Manonmaniam Sundaranar University, Tirunelveli, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+Marquette University,43.03889625,-87.9315544990507,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA"
+"Massachusetts General Hospital, Boston, MA, USA",42.36291795,-71.0687374226199,"Mass General, 55, Fruit Street, Downtown Crossing, Beacon Hill, Boston, Suffolk County, Massachusetts, 02114, USA"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Massachusetts Institute of Technology (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Math Institute,43.65879595,-79.3975504060101,"Fields Institute for Research in Math Science, 222, College Street, Kensington Market, Old Toronto, Toronto, Ontario, M5T 3A1, Canada"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+Max Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"Max Planck Institute for Informatics, Germany",49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"Max Planck Institute for Informatics, Saarbrucken, Germany",49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+Max-Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+McGill University,45.5039761,-73.5749687,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada"
+"McGill University, Montreal, Canada",45.50691775,-73.5791162596496,"McGill University, Avenue Docteur Penfield, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 2T8, Canada"
+McGovern Institute,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+McGovern Institute for Brain Research,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+McMaster University,43.26336945,-79.9180968401692,"McMaster University, Westdale, Hamilton, Ontario, Canada"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+"Memorial University of Newfoundland, Canada",47.5727251,-52.7330544350478,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada"
+"Memorial University of Newfoundland, Saint John's, NL, Canada",47.5727251,-52.7330544350478,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, E. Lansing, MI 48823, USA",42.7337998,-84.4804243,"Dero Fixit Bike Station, Grand River Avenue, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, United States of America",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Microsoft Res. Asia, Beijing, China",39.97834785,116.304119070565,"微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国"
+Microsoft Research,52.19495145,0.135010835076038,"Microsoft Research, 21, Station Road, Petersfield, Cambridge, Cambridgeshire, East of England, England, CB1 2FB, UK"
+"Microsoft Research Asia, Beijing, China",39.97834785,116.304119070565,"微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国"
+"Microsoft Research Asia, China",39.97834785,116.304119070565,"微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国"
+"Microsoft Research, Beijing, China",39.97834785,116.304119070565,"微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国"
+"Microsoft, Bellevue, WA, USA",47.6164826,-122.2008506,"Microsoft, 10455, Northeast 8th Street, Bellevue, King County, Washington, 98004-5002, USA"
+"Microsoft, Redmond, WA",47.6592914,-122.140633217997,"Microsoft Cafe RedW-F, Bridle Crest Trail, Microsoft Redwest Campus, Redmond, King County, Washington, W LAKE SAMMAMISH PKWY NE, USA"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University London, London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University, London",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+Monash University,-37.78397455,144.958674326093,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia"
+"Monash University Malaysia, Bandar Sunway, Malaysia",3.06405715,101.6005974,"Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia"
+"Monash University, Caulfield East, Australia",-37.8774135,145.044982494489,"Monash University (Caulfield campus), Queens Avenue, Caulfield East, City of Glen Eira, Victoria, 3163, Australia"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Muhlenberg College,40.5967637,-75.5124062,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA"
+Multimedia University,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+"Multimedia University, Cyberjaya, Malaysia",2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"NCCU, USA",44.2962202,-84.7116495,"nccu, South Reserve Road, Houghton Lake, Roscommon County, Michigan, 48629, USA"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"Nagoya University, Japan",43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"Nanjing Normal University, China",32.1066811,118.90863080932,"南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国"
+"Nanjing Normal University, Nanjing, China",32.1066811,118.90863080932,"南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国"
+"Nanjing University of Aeronautics and Astronautics, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Aeronautics and Astronautics, Nanjing, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+"Nanjing University of Information Science and Technology, Nanjing, China",32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Nanjing University of Science and Technology,32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Science and Technology, Nanjing, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210023, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210093, P.R.China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nantong University, Nantong, China",31.9747463,120.907792637552,"南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore 639798",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore, 639798",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"National Central University, Taoyuan County, Taiwan",24.96841805,121.191396961005,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣"
+National Cheng Kung University,22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+"National Cheng Kung University, Tainan, Taiwan",22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+"National Chiao Tung University, Hsinchu, Taiwan",24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+"National Chiao Tung University, Taiwan",24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+National Chiao-Tung University,24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+"National Chung Cheng University, Chiayi, Taiwan",23.56306355,120.475105312324,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣"
+"National Chung Hsing University, Taichung",24.12084345,120.675711652432,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣"
+"National Chung Hsing University, Taiwan",24.12084345,120.675711652432,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+National Institute of Standards and Technology,39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA",39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+National Institute of Technology Rourkela,22.2501589,84.9066855698087,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India"
+"National Institute of Technology, Durgapur, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+"National Institute of Technology, Durgapur, West Bengal, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+"National Institute of Technology, Rourkela (Odisha), India",22.2501589,84.9066855698087,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India"
+National Institutes of Health,39.00041165,-77.1032777503325,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA"
+"National Institutes of Health, Bethesda, Maryland 20892",39.00041165,-77.1032777503325,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+"National Taichung University of science and Technology, Taichung",24.15031065,120.683255008879,"臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+"National Taipei University of Technology, Taipei, Taiwan",25.04306355,121.534687724212,"NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣"
+National Taiwan Normal University,25.00823205,121.535771533186,"師大分部, 88, 汀州路四段, 萬年里, 文山區, 臺北市, 11677, 臺灣"
+National Taiwan University,25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan",25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"National Taiwan University of Science and Technology, Taipei, Taiwan",25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"National Taiwan University, 10647, Taipei, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+"National Taiwan University, Taipei, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+"National Taiwan University, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+National Technical University of Athens,37.98782705,23.7317973260904,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα"
+"National Tsing Hua University, Hsinchu, Taiwan",24.7925484,120.9951183,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣"
+"National Tsing Hua University, Taiwan",24.7925484,120.9951183,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣"
+National University,14.6042947,120.994285201104,"National University, M.F. Jocson, Royal Plaza, Sampaloc, Fourth District, Manila, Metro Manila, 1008, Philippines"
+National University of Defense Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"National University of Defense Technology, Changsha 410073, China",28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"National University of Defense Technology, Changsha, China",28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+National University of Defense and Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"National University of Ireland Galway, Galway, Ireland",53.27639715,-9.05829960688327,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan",22.73424255,120.283497550993,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣"
+National University of Science and Technology,33.6450855,72.9915892221655,"National University of Science and Technology, Indus Loop, H-11, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+National University of Singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"National University of Singapore, Singapore 117576",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"National University of Singapore, Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+National University of Technology Technology,33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+National University of singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"Naval Research Laboratory, Washington DC",38.8231381,-77.0178902,"Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA"
+"Nazarbayev University, Astana, Kazakhstan",51.0902854,71.3972526281434,"Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан"
+"Neurological Institute, USA",40.84211085,-73.9428460313244,"Neurological Institute of New York, Haven Avenue, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10032, USA"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, Newark , NJ, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, Newark, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, University Heights Newark, NJ 07102 USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Newcastle University, Newcastle upon Tyne",54.98023235,-1.61452627035949,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK"
+"Normal University, Kunming, China",25.0580509,102.6955241,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国"
+"North Acton, London",51.52344665,-0.259735350000002,"North Acton, Victoria Road, Acton, London Borough of Ealing, London, Greater London, England, W3 6UP, UK"
+North Carolina Central University,35.97320905,-78.897550537484,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA"
+"North Carolina State University, Raleigh, United States of America",35.77184965,-78.6740869545263,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA"
+"North China Electric Power University, Baoding, China",38.8760446,115.4973873,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国"
+"North Dakota State University, Fargo, ND 58108-6050, USA",46.897155,-96.8182760282419,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA"
+Northeastern University,42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, MA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, USA, 02115",42.34255795,-71.0905490240477,"Northeastern University, Public Alley 807, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+Northumbria University,55.0030632,-1.57463231052026,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK"
+"Northumbria University, Newcastle Upon Tyne, Tyne and Wear",55.0030632,-1.57463231052026,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK"
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK",54.9781026,-1.6067699,"Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK"
+"Northumbria University, Newcastle upon Tyne, U.K.",55.0030632,-1.57463231052026,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK"
+Northwestern Polytechnical University,34.2469152,108.910619816771,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国"
+"Northwestern Polytechnical University, Xian 710072, Shaanxi, China",34.2469152,108.910619816771,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国"
+"Northwestern Polytechnical University, Xi’an, China",34.2469152,108.910619816771,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国"
+Northwestern University,42.0551164,-87.6758111348217,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA"
+"Northwestern University, Evanston, IL, USA",42.0551164,-87.6758111348217,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+"Nottingham Trent University, Nottingham, UK",52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+"Nottingham University Hospital, Nottingham, UK",52.9434967,-1.18631123153121,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK"
+OF PRINCETON UNIVERSITY,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+OF STANFORD UNIVERSITY,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Oak Ridge National Laboratory, USA",35.93006535,-84.3124003215133,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+"Ocean University of China, Qingdao, China",36.16161795,120.493552763931,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国"
+Okayama University,34.6893393,133.9222272,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本"
+"Okayama University, Okayama, Japan",34.6893393,133.9222272,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本"
+"Oklahoma State University, Stillwater, OK, USA",36.1244756,-97.050043825,"Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA"
+"Old Dominion University, Norfolk, VA 23529, USA",36.885682,-76.3076857937011,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA"
+"Old Dominion University, Norfolk, VA, 23529",36.885682,-76.3076857937011,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA"
+Open University of Israel,32.77824165,34.9956567288188,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל"
+"Orange Labs, R&D, Meylan, France",45.21011775,5.79551075456301,"Orange Labs, 28, Chemin du Vieux Chêne, Inovallée Meylan, Le Mas du Bruchet, Meylan, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38240, France"
+Oregon State University,45.5198289,-122.677979643331,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA"
+"Osaka university, Japan",34.80809035,135.45785218408,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本"
+Otto von Guericke University,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+Oxford University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+"Oxford University, UK",51.7488051,-1.23874457456279,"James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK"
+"PA, 15213, USA",44.289627,-70.042577,"Pa, North Monmouth, Kennebec County, Maine, 04265, USA"
+"POSTECH, Pohang, South Korea, 37673",36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"PSG College of Technology, Coimbatore, Tamil Nadu, India",11.0246833,77.0028424564731,"PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Peking University,39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+"Peking University, Beijing",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+"Peking University, Beijing 100871, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Pennsylvania,40.9699889,-77.7278831,"Pennsylvania, USA"
+"Perth, Western Australia 6012",-31.9527121,115.8604796,"Perth, Western Australia, 6000, Australia"
+"Philipps-Universität Marburg, D-35032, Germany",50.8142701,8.771435,"FB 09 | Germanistik und Kunstwissenschaften (Dekanat), 3, Deutschhausstraße, Biegenhausen, Biegenviertel, Marburg, Landkreis Marburg-Biedenkopf, Regierungsbezirk Gießen, Hessen, 35037, Deutschland"
+"Pittsburgh Univ., PA, USA",40.4462779,-79.9637743112056,"WQEX-TV (Pittsburgh);WQED-TV (Pittsburgh);WQED-FM (Pittsburgh);WINP-TV (Pittsburgh);WEPA-CD (Pittsburgh), 3801, University Drive, North Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Plymouth University,50.3755269,-4.13937687442817,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+Pohang University of Science and Technology,36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea",36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"Pohang University of Science and Technology (POSTECH), South Korea",36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"Pohang University of Science and Technology, Pohang, Korea",36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"Politecnico di Torino, Italy",45.0636974,7.65752730185847,"Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia"
+"Politecnico di Torino, Torino, Italy",45.0636974,7.65752730185847,"Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Pondicherry Engineering College,12.0148693,79.8480910431981,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India"
+Pontificia Universidad Catolica de Chile,-33.41916095,-70.6178224038096,"Pontificia Universidad Católica de Chile - Campus Lo Contador, 1916, El Comendador, Pedro de Valdivia Norte, Providencia, Provincia de Santiago, Región Metropolitana de Santiago, 7500000, Chile"
+Portland State University,45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+"Portland State University, USA",45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+Portugal,40.033265,-7.8896263,Portugal
+Poznan University of Technology,52.4004837,16.9515808278647,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Princeton University, Princeton, NJ, USA",40.34725815,-74.6513455119257,"Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA"
+"Princeton University, Princeton, New Jersey, USA",40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Pune Institute of Computer Technology, Pune, ( India",18.4575638,73.8507352,"Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+Purdue University,40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN. 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, Indiana, 47906, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Qatar University, Doha, Qatar",25.37461295,51.4898035392337,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎"
+"Qatar University, Qatar",25.37461295,51.4898035392337,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎"
+"Quanzhou Normal University, Quanzhou, China",24.87147415,118.667386868962,"泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国"
+Queen Mary University,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+Queen Mary University of London,51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, London",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, London E1 4NS, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, London, U.K.",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Queensland University of Technology (QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Queensland University of Technology, Brisbane, QLD, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"R V College of Engineering, Bangalore, India",12.9231039,77.5006395299617,"R. V. College of Engineering, Bangalore-Mysore Road, Kengeri, Rajarajeshwari Nagar Zone, Bengaluru, Bangalore Urban, Karnataka, 560059, India"
+"RMIT University, Australia",-37.8087465,144.9638875,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"RMIT University, Melbourne, Australia",-37.8087465,144.9638875,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"RMIT University, Melbourne, VIC, Australia",-37.8087465,144.9638875,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"RMIT University, Vietnam",10.72991265,106.693208239997,"RMIT University Vietnam - Saigon South Campus, 702, Nguyễn Văn Linh, Khu 3 - Khu Đại học, Phường Tân Phong, Quận 7, Tp HCM, 756604, Việt Nam"
+RWTH Aachen University,50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+"RWTH Aachen University, Aachen, Germany",50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Rajasthan, India",26.8105777,73.7684549,"Rajasthan, India"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA",42.73280325,-73.6622354488153,"Rensselaer Polytechnic Institute, Tibbits Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+Research Center,24.7261991,46.6365468966391,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+"RheinAhrCampus der Hochschule Koblenz, Remagen, Germany",50.5722562,7.25318610053143,"RheinAhrCampus, 2, Joseph-Rovan-Allee, Remagen, Landkreis Ahrweiler, Rheinland-Pfalz, 53424, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Rice University, Houston, TX, 77005, USA",29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Rio de Janeiro State University, Brazil",-22.91117105,-43.2357797110467,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil"
+"Ritsumeikan University, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+"Ritsumeikan University, Kyoto, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+"Ritsumeikan, University",49.26007165,-123.253442836235,"Ritsumeikan House, Lower Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+Rochester Institute of Technology,43.08250655,-77.6712166264273,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+"Rowan University, Glassboro, NJ- 08028",39.7082432,-75.1170342529732,"Wellness Center (Winans Hall), Mullica Hill Road, Beau Rivage, Glassboro, Gloucester County, New Jersey, 08028:08062, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Ruhr University Bochum,51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+Rutgers University,40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+"Rutgers University, New Brunswick, NJ",40.50007595,-74.4457915242934,"Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA"
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Rutgers University, Piscataway",40.52251655,-74.4373851411688,"James Dickson Carr Library, 75, Avenue E, Piscataway Township, Middlesex County, New Jersey, 08854-8040, USA"
+"Rutgers University, Piscataway NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, New Jersey 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, USA",40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+"Ryerson University, Canada",43.65815275,-79.3790801045263,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada"
+"Ryerson University, Toronto, ON, Canada",43.65815275,-79.3790801045263,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada"
+"SASTRA University, Thanjavur, Tamil Nadu, India",10.9628655,79.3853065130097,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+"SRI International, Menlo Park, USA",37.4585796,-122.17560525105,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA"
+SUNY Buffalo,42.9336278,-78.8839447903448,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA"
+Sabanci University,40.8927159,29.3786332263582,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye"
+Sakarya University,40.76433515,30.3940787517111,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye"
+"San Jose State University, San Jose, CA",37.3351908,-121.881260081527,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Santa Fe Institute,35.7002878,-105.908648471331,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA"
+"School, The University of Sydney, Sydney, NSW, Australia",-33.8893229,151.180068,"Royal Prince Alfred Hospital School, 57-59, Grose Street, Camperdown, Sydney, NSW, 2050, Australia"
+"Science, University of Amsterdam",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Selçuk University, Konya, Turkey",38.02420685,32.5057052418378,"Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye"
+Semarang State University,-7.00349485,110.417749486905,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia"
+"Semnan University, Semnan, Iran",35.6037444,53.434458770112,"دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎"
+Seoul Nat'l Univ.,37.481223,126.9527151,"서울대입구, 지하 1822, 남부순환로, 중앙동, 봉천동, 관악구, 서울특별시, 08787, 대한민국"
+Seoul National University,37.26728,126.9841151,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국"
+"Seoul National University, Korea",37.26728,126.9841151,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국"
+"Seoul National University, Seoul, Korea",37.46685,126.94892,"서울대학교, 1, 관악로, 서림동, 신림동, 관악구, 서울특별시, 08825, 대한민국"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Shandong University of Science and Technology,36.00146435,120.116240565627,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国"
+"Shandong University, Shandong, China",36.3693473,120.673818,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国"
+"Shanghai Institute of Technology, Shanghai, China",31.1678395,121.417382632476,"上海应用技术大学, 康健路, 长桥, 徐汇区, 上海市, 200233, 中国"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Shanghai Jiao Tong University, People's Republic of China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"Shanghai Jiao Tong University, Shanghai, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+Shanghai University,31.32235655,121.384009410929,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国"
+"Shanghai University, Shanghai, China",31.32235655,121.384009410929,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国"
+Shanghai university,31.32235655,121.384009410929,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Sharif University of Technology, Tehran. Iran",35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+Shenzhen Institutes of Advanced Technology,22.59805605,113.985337841399,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国"
+"Shenzhen University, Shenzhen China",22.53521465,113.931591101679,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国"
+"Shenzhen University, Shenzhen, China",22.53521465,113.931591101679,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国"
+"Shibaura Institute of Technology, Tokyo, Japan",35.66053325,139.795031213151,"芝浦工業大学 豊洲キャンパス, 晴海通り, 豊洲2, 豊洲, 富岡一丁目, 江東区, 東京都, 関東地方, 135-6001, 日本"
+Shiraz University,29.6385474,52.5245706,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎"
+"Sichuan Univ., Chengdu",30.642769,104.067511751425,"四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国"
+Simon Fraser University,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+Singapore,1.3408528,103.878446863736,"Singapore, Southeast, Singapore"
+"Singapore Management University, Singapore",1.29500195,103.849092139632,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore"
+"Singapore University of Technology and Design, Singapore",1.340216,103.965089,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore"
+Sinhgad College of,19.0993293,74.7691424,"SINHGAD, NH61, Foi, Ahmadnagar, Ahmednagar, Maharashtra, 414001, India"
+"Soochow University, Suzhou, China",31.3070951,120.635739868117,"苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国"
+"South China Normal University, Guangzhou, China",23.143197,113.34009651145,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国"
+South China University of China,23.0490047,113.3971571,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+South China University of Technology,23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"South China University of Technology, China",23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"South China University of Technology, Guangzhou, China",23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"South China University of Technology, Guangzhou, Guangdong, China",23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+South College Road,39.2715228,-76.6936807,"South College Road, Beechfield, Baltimore, Maryland, 21229, USA"
+"South East European University, Tetovo, Macedonia",41.98676415,20.9625451620439,"Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија"
+"Southeast University, Nanjing, China",32.0575279,118.786822520439,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国"
+Southwest Jiaotong University,30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Southwest University, Chongqing 400715, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Southwest University, Chongqing, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+Stanford University,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, CA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, CA, United States",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, Stanford, California",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"State University of New York Polytechnic Institute, Utica, New York",43.13800205,-75.2294359077068,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA"
+State University of New York at Binghamton,42.08779975,-75.9706606561486,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA"
+"State University of New York at Binghamton, USA",42.08779975,-75.9706606561486,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA"
+State University of New York at Buffalo,42.95485245,-78.8178238693065,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA"
+"Statistics, University of",32.0731522,72.6814703364947,"Department Of Statistics, University Road, Satellite Town, Cantonment, سرگودھا, Sargodha District, پنجاب, 40100, ‏پاکستان‎"
+Stevens Institute of Technology,40.742252,-74.0270949,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA"
+"Stevens Institute of Technology, Hoboken, New Jersey, 07030",40.7451724,-74.027314,"Stevens Institute of Technology, Hudson Street, Hoboken, Hudson County, New Jersey, 07030, USA"
+Stony Brook University,40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Stony Brook University Hospital,40.90826665,-73.1152089127966,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, NY, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-Sen University, China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-Sen University, GuangZhou, China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-Sen University, Guangzhou, China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-Sen University, Guangzhou, P.R. China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-sen University, China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-sen University, Guangzhou, China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+SungKyunKwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Sungkyunkwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+"Sungkyunkwan University, Suwon, Republic of Korea",37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Swansea University,51.6091578,-3.97934429228629,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK"
+"Swansea University, Swansea, UK",51.6091578,-3.97934429228629,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK"
+Swiss Federal Institute of Technology,47.3764534,8.54770931489751,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+THE UNIVERSITY OF CHICAGO,41.78468745,-87.6007493265106,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+"TU Darmstadt, D-64283, Germany",49.8754648,8.6594332,"Institut für Psychologie, 10, Alexanderstraße, Darmstadt-Mitte, Darmstadt, Regierungsbezirk Darmstadt, Hessen, 64283, Deutschland"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"Tafresh University, Tafresh, Iran",34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"Tamkang University, Taipei, Taiwan",25.17500615,121.450767514156,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣"
+Tampere University of Technology,61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Technicolor, France",48.831533,2.28066282926829,"Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France"
+"Technicolor, Paris, France",48.831533,2.28066282926829,"Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France"
+Technion,32.774576,35.0236399,"טכניון, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל"
+Technion Israel Institute of Technology,32.7767536,35.0241452903301,"הטכניון - מכון טכנולוגי לישראל, דוד רוז, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל"
+"Technological University, Davanagere, Karnataka, India",14.4525199,75.9179512,"UBDT College of Engineering, College Private Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+"Teesside University, Middlesbrough, UK",54.5703695,-1.23509661862823,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK"
+"Teesside University, UK",54.5703695,-1.23509661862823,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK"
+Tel Aviv University,32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"Tel-Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Temple University,39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+"Temple University, Philadelphia, PA, 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+"Temple University, Philadelphia, PA, USA",39.981188,-75.1562826952332,"Temple University, Beasley's Walk, Stanton, Philadelphia, Philadelphia County, Pennsylvania, 19132:19133, USA"
+"Temple University, Philadelphia, USA",39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+"Texas A&M University, College Station, TX, USA",30.6108365,-96.3521280026443,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"The American University in Cairo, Egypt",30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+The Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"The Australian National University, Canberra, ACT, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"The Australian National University, Canberra, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+The Chinese University of Hong Kong,22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, China",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, Hong Kong",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, Hong Kong, China",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, New Territories, Hong Kong",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+The City College and the Graduate Center,37.76799565,-122.400099572569,"Graduate Center, 184, Hooper Street, Mission Bay, SF, California, 94158, USA"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+The City University of New York,40.8722825,-73.8948917141949,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA"
+The Education University of Hong Kong,22.46935655,114.19474193618,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国"
+The Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"The Hebrew University of Jerusalem, Israel",31.7918555,35.244723,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל"
+The Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"The Hong Kong Polytechnic University, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"The Hong Kong Polytechnic University, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"The Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"The Hong Kong Polytechnic University, Kowloon, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+The Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"The Hong Kong University of Science and Technology, Hong Kong",22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+"The Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+The Ohio State University,40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"The Ohio State University, Columbus, OH, USA",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"The Ohio State University, OH",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+The Open University of Israel,32.77824165,34.9956567288188,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל"
+The Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"The State University of New York at Buffalo, New York, USA",42.95485245,-78.8178238693065,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA"
+"The Univ of Hong Kong, China",22.2081469,114.259641148719,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国"
+"The University of Adelaide, Adelaide, SA, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"The University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+The University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"The University of Edinburgh, Edinburgh, U.K.",55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"The University of Electro-Communications, JAPAN",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"The University of Electro-Communications, Japan",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+The University of Hong Kong,22.2081469,114.259641148719,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国"
+The University of Manchester,53.46600455,-2.23300880782987,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK"
+The University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"The University of New South Wales, Australia",-33.91758275,151.231240246527,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia"
+The University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+"The University of Newcastle, Callaghan 2308, Australia",-32.8930923,151.705656,"University of Newcastle, Huxley Library, University Drive, Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia"
+The University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"The University of North Carolina at Charlotte, USA",35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"The University of North Carolina, Chapel Hill",35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+The University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"The University of Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+The University of Queensland,-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"The University of Queensland, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"The University of Queensland, Brisbane, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"The University of Queensland, QLD 4072, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"The University of Sheffield, Sheffield, U.K.",53.3815248,-1.480681425,"University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Sydney, NSW 2006, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Sydney, Sydney, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+The University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+The University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+The University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+"The University of Texas at Austin Austin, Texas, USA",30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+"The University of Texas at San Antonio, San Antonio, TX, USA",29.42182005,-98.5016869955163,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA"
+"The University of Tokushima, Japan",34.0788068,134.558981,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+The University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+"The University of Western Australia, Crawley, WA, Australia",-31.98027975,115.818084637301,"University of Western Australia (Crawley Campus), 35, Stirling Highway, Crawley, Perth, Western Australia, 6009, Australia"
+The University of York,53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"The University of York, Heslington, York YO10 5DD, United Kingdom",53.94830175,-1.05154975017361,"Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"The University of York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+The University of the Humanities,47.9218937,106.919552402206,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс"
+The Weizmann Institute of,31.904187,34.807378,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל"
+The Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+"Tianjin University, 300072, China",36.20304395,117.058421125807,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国"
+"Tianjin University, China",38.99224515,117.306075265115,"Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国"
+"Tianjin University, Tianjin, China",38.99224515,117.306075265115,"Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国"
+"Tohoku University, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+"Tohoku University, Sendai, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+Tokyo Denki University,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"Tokyo Institute of Technology, Japan",35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"Tokyo Institute of Technology, Kanagawa, Japan",35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+Tokyo Metropolitan University,35.6200925,139.38296706394,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+Tongji University,31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"Tongji University, Shanghai 201804, China",31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"Tongji University, Shanghai, China",31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+Toyota Research Institute,37.40253645,-122.116551067984,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA"
+"Toyota Technological Institute (Chicago, US",41.7847112,-87.5926056707507,"Toyota Technological Institute, 6045, South Kenwood Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+Tsinghua University,40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R. China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing, 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing, P. R. China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing,China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"UC Merced, USA",37.302827,-120.484819845561,"UC Merced Venture Lab, 1735, M Street, Merced, Merced County, California, 95340, USA"
+UIUC,40.04650815,-88.2619752357129,"UIUC Golf Course, Hartwell Drive, Savoy, Champaign County, Illinois, 61874, USA"
+"UNCW, USA",34.16271505,-78.1162477961939,"Uncw- Ecological Botanical Gardens, Henrytown, Brunswick County, North Carolina, USA"
+UNIVERSITY IN PRAGUE,50.0714761,14.4542642,"Business Institut EDU, Kodaňská, Vršovice, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 10100, Česko"
+UNIVERSITY OF CALIFORNIA,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"UNIVERSITY OF CALIFORNIA, BERKELEY",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"UNIVERSITY OF CALIFORNIA, SAN DIEGO",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+UNIVERSITY OF TARTU,58.38131405,26.7207808104523,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti"
+UNIVERSITY OF WISCONSIN MADISON,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+Universidad Autonoma de Madrid,40.48256135,-3.69060789542556,"Facultad de Medicina de la Universidad Autónoma de Madrid, Calle de Arturo Duperier, Fuencarral, Fuencarral-El Pardo, Madrid, Área metropolitana de Madrid y Corredor del Henares, Comunidad de Madrid, 28001, España"
+"Universidad Tecnica Federico Santa Maria, Valparaiso, Chile",-33.0362526,-71.595382,"Universidad Técnica Federico Santa María, Condominio Esmeralda, Valparaíso, Provincia de Valparaíso, V Región de Valparaíso, 2390382, Chile"
+"Universitat Oberta de Catalunya, Barcelona, Spain",41.40657415,2.1945341,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España"
+"Universitat Pompeu Fabra, Barcelona, Spain",41.39044285,2.18891949251166,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España"
+"Universitat de València, Valencia, Spain",39.47787665,-0.342577110177694,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España"
+"Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia",4.3830464,100.970015404936,"UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia"
+University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University (ITU,55.65965525,12.5910768893446,"IT-Universitetet i København, Emil Holms Kanal, Christianshavn, København, Københavns Kommune, Region Hovedstaden, 1424, Danmark"
+"University City Blvd., Charlotte, NC",35.312224,-80.7084736,"University City Boulevard, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+University College London,51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, London WC1N 3BG, United Kingdom",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+University Drive,-21.1753214,149.1432747,"University Drive, Ooralea, Mackay, QLD, 4740, Australia"
+"University Drive, Fairfax, VA 22030-4444, USA",38.835411,-77.316447,"University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA"
+University Institute of Engineering and Technology,26.9302879,80.9278433,"Maharishi University Of Information Technology, NH230, Jankipuram, Lucknow, Uttar Pradesh, 226021, India"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+University Of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania",45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of,29.3758342,71.7528712910287,"University of ..., University Road, بہاولپور, Bahāwalpur District, پنجاب, 63100, ‏پاکستان‎"
+University of Aberdeen,57.1646143,-2.10186013407315,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+University of Adelaide,-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Adelaide, SA, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Agder, Kristiansand, Norway",58.16308805,8.00144965545071,"UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge"
+"University of Aizu, Japan",37.5236728,139.938072464124,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本"
+"University of Akron, Akron",41.0789035,-81.5197127229943,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+University of Amsterdam,52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, the Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+University of Arizona,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+University of Barcelona,41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Basel, Switzerland",47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+University of Bath,51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Bath, Bath, Somerset, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+University of Birmingham,52.45044325,-1.93196134052244,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+University of Brescia,37.7689374,-87.1113859,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA"
+University of Bridgeport,41.1664858,-73.1920564,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA"
+University of Bristol,51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+"University of Bristol, Bristol, UK",51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"University of British Columbia, Canada",49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"University of British Columbia, Vancouver, Canada",49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+University of Buffalo,40.7021766,-99.0985061173294,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+"University of Calgary, Calgary, Alberta, Canada",51.0784038,-114.1287077,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada"
+University of California,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Davis,38.5336349,-121.790772639747,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California San Diego, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California San Diego, United States of America",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of California, Berkeley, Berkeley CA 94720, USA",37.8756681,-122.257979979865,"Goldman School of Public Policy, Hearst Avenue, Northside, Berkeley, Alameda County, California, 94720, USA"
+"University of California, Irvine",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of California, Merced",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Merced, CA 95344, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Merced, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Riverside",33.98071305,-117.332610354677,"University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"University of California, Riverside, California 92521, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"University of California, Riverside, Riverside CA, California 92521 United States",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, CA, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, Santa Barbara",34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"University of Cambridge, United Kingdom",52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+University of Campinas,-27.5953995,-48.6154218,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil"
+University of Campinas (Unicamp,-22.8224781,-47.0642599309425,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"University of Canterbury, New Zealand",-43.5240528,172.580306253669,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+"University of Cape Town, South Africa",-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, Orlando, 32816, United States of America",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, Orlando, FL, USA",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, Orlando, USA",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Central Punjab, Pakistan",31.4466149,74.2679762,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎"
+University of Chinese Academy of Sciences,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Chinese Academy of Sciences (UCAS,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+University of Colorado Colorado Springs,38.8920756,-104.797163894584,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA"
+"University of Colorado Denver, Denver, CO, USA",39.74287785,-105.005963984841,"University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA"
+"University of Colorado, Boulder",40.01407945,-105.266959437621,"Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA"
+University of Connecticut,41.8093779,-72.2536414,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA"
+University of Copenhagen,55.6801502,12.5723270014063,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark"
+"University of Crete, Crete, 73100, Greece",35.3713024,24.4754408,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα"
+"University of Dammam, Saudi Arabia",26.39793625,50.1980792430511,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+"University of Dayton, Dayton, OH, USA",39.738444,-84.1791874663107,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA"
+"University of Dayton, Ohio, USA",39.738444,-84.1791874663107,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA"
+"University of Delaware, Newark, 19716, USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+"University of Delaware, Newark, DE, USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+"University of Delaware, Newark, DE. USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+"University of Denver, Denver, CO",39.6766541,-104.962203,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA"
+University of Dhaka,23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"University of Dhaka, Bangladesh",23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"University of Dschang, Cameroon",5.4409448,10.0712056113589,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun"
+University of Dundee,56.45796755,-2.98214831353755,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK"
+"University of East Anglia, Norwich, U.K.",52.6221571,1.2409136,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+"University of Edinburgh, Edinburgh, UK",55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Engineering and Technology,31.6914689,74.2465617,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+"University of Exeter, UK",50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of Florida,29.6328784,-82.3490133048243,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA"
+"University of Florida, Gainesville, FL",29.6328784,-82.3490133048243,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+University of Geneva,42.57054745,-88.5557862661765,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+University of Groningen,53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Groningen, Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Gujrat, Pakistan",32.63744845,74.1617455759799,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎"
+"University of Haifa, Haifa, Israel",32.76162915,35.0198630428453,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל"
+"University of Hawaii, Manoa, Honolulu, HI, 96822",21.2982795,-157.818692295846,"University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA"
+"University of Hong Kong, China",22.2081469,114.259641148719,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国"
+University of Houston,29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+"University of Houston, Houston, TX 77204, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+"University of Houston, Houston, TX, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+University of Iceland,64.137274,-21.9456145356869,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland"
+University of Illinois,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Illinois at Chicago, Chicago, IL",41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+University of Illinois at Urbana,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Urbana Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana Champaign, Urbana",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA",40.1066501,-88.2240260725426,"University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, Urbana, IL",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, Urbana, IL, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana—Champaign, Champaign, IL, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois, Urbana-Champaign",40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Information,34.17980475,-117.325843648456,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA"
+"University of Ioannina, 45110, Greece",39.6162306,20.8396301098796,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα"
+"University of Ioannina, Ioannina, Greece",39.6162306,20.8396301098796,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+"University of Kent, Canterbury, U.K.",51.2975344,1.0729616473445,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Kentucky, USA",38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+University of Leeds,53.80387185,-1.55245712031677,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK"
+"University of Lincoln, U. K.",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+"University of Lincoln, U.K",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+"University of Lincoln, UK",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+University of Liverpool,53.406179,-2.96670818619252,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK"
+"University of Liverpool, Liverpool, U.K.",53.406179,-2.96670818619252,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK"
+University of Ljubljana,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+University of Ljubljana Faculty,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+"University of Ljubljana, Ljubljana, Slovenia",46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"University of Louisville, Louisville, KY 40292 USA",38.2167565,-85.7572502291168,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA"
+"University of Macau, Taipa, Macau",22.1240187,113.545109009671,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国"
+"University of Malaya, 50603 Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+"University of Malaya, Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+University of Malta,35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+"University of Malta, Msida, Malta",35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+"University of Manchester, Manchester, U.K.",53.46600455,-2.23300880782987,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+University of Maryland College Park,38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+"University of Maryland, College Park, MD, USA",39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"University of Maryland-College Park, USA",38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Massachusetts - Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Massachusetts Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts Amherst, Amherst MA, 01003",42.3919154,-72.5270705589714,"Murray D. Lincoln Campus Center, 1, Campus Center Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts Dartmouth, Dartmouth, MA, USA",41.62772475,-71.0072450098225,"University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA"
+"University of Massachusetts, Amherst",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Memphis,35.1189387,-89.9372195996589,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"University of Miami, Coral Gables, FL",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"University of Miami, USA",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+University of Michigan,42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, MI 48109 USA",42.2808797,-83.7357152493893,"Power Center for the Performing Arts, 121, Fletcher Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, MI, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann, Arbor, MI USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+University of Milan,38.6796662,-90.3262816,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA"
+University of Minnesota,44.97308605,-93.2370881262941,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA"
+"University of Missouri, Columbia, MO",38.926761,-92.2919378337447,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Nevada, Reno, Reno, NV, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+"University of Nevada, Reno, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+"University of New South Wales, Sydney, NSW, Australia",-33.91758275,151.231240246527,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+"University of Newcastle, Newcastle, Australia",-32.9276256,151.77133087091,"University of Newcastle, Christie Street, Newcastle, Newcastle-Maitland, Newcastle, NSW, 2300, Australia"
+University of North Carolina,35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+University of North Carolina Wilmington,34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+"University of North Carolina Wilmington, USA",34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+"University of North Carolina Wilmington, Wilmington, NC, USA",34.2377352,-77.92673494788,"Kenan House parking lot, Princess Street, Wilmington, New Hanover County, North Carolina, 28405, USA"
+"University of North Carolina Wilmington, Wilmington, United States",34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+University of North Carolina at Chapel Hill,35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC, USA",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, NC, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"University of North Carolina at Wilmington, USA",34.2249827,-77.8690774374448,"University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA"
+University of North Texas,33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+"University of North Texas, Denton, Texas, USA",33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"University of Northern British Columbia, Canada",53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"University of Northern British Columbia, Prince George, Canada",53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"University of Notre Dame, Notre Dame, IN, USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"University of Notre Dame, USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"University of Notre Dame. Notre Dame, IN 46556.USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+University of Oradea,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+"University of Oslo, Oslo, Norway",59.93891665,10.7217076488427,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge"
+University of Ottawa,45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"University of Ottawa, Ottawa, On, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+University of Oulu,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+"University of Oulu, Finland",65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+University of Oxford,51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, UK",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Patras, Greece",38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Pennsylvania,39.9492344,-75.191989851901,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+"University of Pennsylvania, Philadelphia, PA",39.95455675,-75.2029503620423,"40th Street Parking Lot, Walnut Street, Southwest Schuylkill, Philadelphia, Philadelphia County, Pennsylvania, 19104-1469, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"University of Peshawar, Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+University of Piraeus,37.94173275,23.6530326182197,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα"
+"University of Pisa, Pisa, Italy",43.7201299,10.4078976,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, PA 15213, USA",40.4444651,-79.9532347,"Nationality Rooms, 4200, Omicron Delta Kappa Walk, North Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"University of Pittsburgh, PA, 15260, USA",40.4437547,-79.9529557,"Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"University of Pittsburgh, PA, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, Pittsburgh",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, Pittsburgh PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, PA , USA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, PA 15260, USA",40.4437547,-79.9529557,"Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"University of Pittsburgh, Pittsburgh, PA, USA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+"University of Portsmouth, United Kingdom",50.79805775,-1.09834911234691,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK"
+University of Posts and Telecommunications,32.11527165,118.925956600436,"南京邮电大学仙林校区, 9, 文苑路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210023, 中国"
+"University of Queensland, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"University of Queensland, St Lucia, QLD, Australia",-27.497151,153.0117305,"Anthropology Museum, Chancellors Place, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+University of Rochester,43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+"University of Rochester, NY 14627, USA",43.1242954,-77.6288352530005,"Central Utilities Lot, Firemans, Rochester, Monroe County, New York, 14627, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+"University of Salzburg, Austria",47.79475945,13.0541752486067,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich"
+University of Science and,5.35755715,100.303850375,"USM, Lengkok Sastera, The LIGHT, Batu Uban, George Town, PNG, 11700, Malaysia"
+University of Science and Technology of China,31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, Anhui, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University of South Carolina, Columbia, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+"University of South Florida, Tampa, Florida 33620",28.0599999,-82.4138361902512,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA"
+University of Southampton,50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"University of Southampton, SO17 1BJ, UK",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"University of Southampton, Southampton, U.K.",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"University of Southampton, United Kingdom",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of Southern California, Los Angeles, CA",34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of Southern California, Los Angeles, CA 90089, USA",34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of Southern California, Los Angeles, USA",34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of St Andrews, United Kingdom",56.3411984,-2.7930938,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK"
+University of Stuttgart,48.9095338,9.1831892,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland"
+University of Surrey,51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Surrey, Guildford, Surrey GU2 7XH, UK",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Surrey, Guildford, Surrey, GU2 7XH, UK",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Surrey, United Kingdom",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"University of Sydney, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"University of Sydney, Sydney, NSW, Australia",-33.88578245,151.182068591379,"Sand Roll House, Parramatta Road, Camperdown, Sydney, NSW, 2050, Australia"
+"University of Tabriz, Tabriz, Iran",38.0612553,46.3298484,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎"
+University of Tampere,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of Technology Sydney,-33.8809651,151.201072985483,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology Sydney, New South Wales, Australia",-33.8809651,151.201072985483,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology Sydney, Sydney, NSW, Australia",-33.8830909,151.20217235558,"University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology Sydney, Ultimo, NSW, Australia",-33.8830909,151.20217235558,"University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Baghdad, Iraq",33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+"University of Technology, Sydney",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Sydney, NSW, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Sydney, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Texas at Arlington,32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, Texas 76019, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+"University of Texas at Dallas, Richardson, 75080, USA",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+University of Texas at San Antonio,29.58333105,-98.6194450505688,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA"
+"University of Texas at San Antonio, 78249, USA",29.58333105,-98.6194450505688,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA"
+"University of Texas at San Antonio, San Antonio, TX",29.42182005,-98.5016869955163,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA"
+"University of Texas at San Antonio, San Antonio, TX, USA",29.42182005,-98.5016869955163,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA"
+"University of Texas at San Antonio, San Antonio, Texas",29.42182005,-98.5016869955163,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA"
+"University of Texas at San Antonio, San Antonio, United States",29.58333105,-98.6194450505688,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA"
+"University of Texas, Austin, TX 78712-1188, USA",30.284458,-97.7342106,"University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA"
+"University of Texas, San Antonio, TX, USA",30.284458,-97.7342106,"University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Toronto,43.66333345,-79.3976997498952,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada"
+"University of Toronto Toronto, Canada",43.66333345,-79.3976997498952,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada"
+"University of Toronto, Toronto, ON, Canada",43.66333345,-79.3976997498952,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada"
+University of Toulouse,30.1781816,-93.2360581,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+"University of Trento, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+"University of Trento, Trento, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+"University of Trento, Trento, TN, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+University of Tsukuba,36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+"University of Tsukuba, Japan",36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+University of Twente,52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Twente, Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Twente, The Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+"University of Vermont, 33 Colchester Avenue, Burlington",44.48116865,-73.2002178989123,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA"
+"University of Vienna, Austria",48.2131302,16.3606865338016,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich"
+University of Virginia,38.0353682,-78.5035322,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA"
+"University of Virginia, Charlottesville, VA",38.0410576,-78.5054996018357,"University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA"
+University of Warwick,52.3793131,-1.5604252,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK"
+"University of Warwick, Coventry, U.K.",52.3793131,-1.5604252,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, WA 98195, United States",47.6547795,-122.305818,"University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, WA, USA",47.65249975,-122.2998748,"University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+University of Waterloo,43.47061295,-80.5472473165632,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada"
+University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+"University of Windsor, Canada",42.30791465,-83.0717691461703,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada"
+"University of Windsor, Canada N9B 3P4",42.30791465,-83.0717691461703,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada"
+"University of Windsor, Ontario, Canada",42.30791465,-83.0717691461703,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Wisconsin - Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Wisconsin-Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"University of Wisconsin-Madison, Madison, WI, USA",43.0705257,-89.4059387,"UW Geology Museum, 1215, West Dayton Street, South Campus, Madison, Dane County, Wisconsin, 53715, USA"
+University of Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+"University of Wollongong, Wollongong, Australia",-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+"University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of York, York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of York, York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of Zurich, Zurich, Switzerland",47.4968476,8.72981767380829,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra"
+"University of telecommunications and post, Sofia, Bulgaria",42.6560524,23.3476108351659,"Висше Училище по Телекомуникации и Пощи, 1, бул. Акад. Стефан Младенов, ж.к. Студентски град, район Студентски, Столична, София-град, 1700, Бългaрия"
+"University of the Basque Country, San Sebastian, Spain",43.30927695,-2.01066784661227,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España"
+University of the Western Cape,-33.9327762,18.6291540714825,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa"
+University of the Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+"University of the Witwatersrand, Johannesburg, South Africa",-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+"University, China",22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University, Guangzhou, China",23.1314851,113.2852239,"中山大学第一课室, 74号大院, 中山二路, 马棚岗, 农林街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国"
+"University, Hong Kong",54.0856448,13.389089,"Hong-Kong, Feldstraße, Greifswald, Südliche Mühlenvorstadt, Greifswald, Vorpommern-Greifswald, Mecklenburg-Vorpommern, 17489, Deutschland"
+"University, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"University, USA",25.7147949,-80.276947,"University, South Dixie Highway, Coral Gables, Miami-Dade County, Florida, 33124-6310, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+"Università degli Studi di Milano, Italy",45.47567215,9.23336232066359,"Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia"
+Università di Salerno Italy,40.7646949,14.7889151,"Università, Autostrada del Mediterraneo, Fisciano, SA, CAM, 84084, Italia"
+Université du Québec à Chicoutimi (UQAC),48.4200469,-71.0525344,"Université du Québec à Chicoutimi (UQAC), Chicoutimi, Ville de Saguenay, Saguenay - Lac-Saint-Jean, Québec, G7H 2B1, Canada"
+Ural Federal University (UrFU,56.8435083,60.6454805,"УрФУ, улица Гагарина, Эврика, Втузгородок, Кировский район, Екатеринбург, городской округ Екатеринбург, Свердловская область, Уральский федеральный округ, 620062, РФ"
+"Urmia University, Urmia, Iran",37.52914535,45.0488607694682,"دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎"
+"Ursinus College, Collegeville, PA",40.1917705,-75.4568484,"Ursinus College, East Main Street, Collegeville, Montgomery County, Pennsylvania, 19426, USA"
+"Utah State University, Logan UT",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+"Utah State University, Logan, UT 84322-4205, USA",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+"Varendra University, Rajshahi, Bangladesh",24.3643231,88.6333105,"department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ"
+Victoria University of Wellington,-41.29052775,174.768469187426,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa"
+Vienna University of Technology,48.19853965,16.3698616762866,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich"
+"Vignan University, Andhra Pradesh, India",16.2329008,80.5475018,"Vignan university, Sangam Dairy Entry, Sangam Dairy, Gowdapalem, Guntur District, Andhra Pradesh, 522213, India"
+Villanova University,40.0367774,-75.342023320028,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA"
+"Virginia Commonwealth University, Richmond, VA, USA",37.548215,-77.4530642444471,"Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA"
+Virginia Polytechnic Institute and State University,37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg, Virginia",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+Virginia Tech Carilion Research Institute,37.2579548,-79.9423329131356,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA"
+"Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany",53.599482,9.93353435970931,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland"
+Vrije Universiteit Brussel,50.8411007,4.32377555279953,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien"
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium",50.8223021,4.3967361,"Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien"
+"Vulcan Inc, Seattle, WA 98104",47.5980546,-122.3284865,"Vulcan Inc., 505, Downtown Seattle Transit Tunnel, Seattle Downtown, International District/Chinatown, Seattle, King County, Washington, 98191, USA"
+"WVU, USA",39.6349398,-79.9570056423469,"Stansbury Hall (WVU), Caperton Trail, Brewer Hill, Star City, Monongalia County, West Virginia, 26504, USA"
+"Walt Disney Imagineering, USA",34.1619174,-118.28837020278,"Walt Disney Imagineering, 1401, Flower Street, Grand Central Creative Campus, Glendale, Los Angeles County, California, 91201, USA"
+Warsaw University of Technology,52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+"Waseda University, Kitakyushu, Japan 808-0135",33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+"Washington University, St. Louis, MO, USA",38.6480445,-90.3099667,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA"
+Wayne State University,42.357757,-83.0628671134125,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+"Wayne State University, Detroit, MI, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+"Weizmann Institute of Science, Rehovot, 76100, Israel",31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+West Virginia University,39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, WV",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, WV, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+Western Kentucky University,36.9845317,-86.4576443016944,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA"
+"Western Sydney University, Parramatta, NSW 2150, Australia",-33.8160848,151.00560034186,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+"Wuhan University of Technology, Wuhan, China",30.60903415,114.351428398184,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国"
+Xerox Research Center,43.5129109,-79.6664076152913,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada"
+"Xi'an Jiaotong University, Xi'an, China",34.2474949,108.978987508847,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国"
+Xiamen University,24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiamen University, Xiamen 361005, China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiamen University, Xiamen, China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiamen University, Xiamen, Fujian, China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiamen University, Xiamen, P. R. China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiangtan University, Xiangtan, China",27.88707585,112.857109176016,"湘潭大学图书馆, 文化广场, 羊牯塘街道, 雨湖区, 湘潭市 / Xiangtan, 湖南省, 中国"
+Xidian University,34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Xidian University, Xi an, China",34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Xidian University, Xi'an, China",34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Xidian University, Xi’an, China",34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+Yeungnam University,35.8365403,128.7534309,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국"
+"Yonsei University, 50 Yonsei-ro, SEOUL, Republic of Korea",37.5600406,126.9369248,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국"
+"Yonsei University, 50 Yonsei-ro, Seodaemun-gu, SEOUL, Republic of Korea",37.5600406,126.9369248,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국"
+York University,43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"York University, Toronto",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"York University, Toronto, Canada",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"Yunnan University, Kunming, P. R. China",25.05703205,102.700275254918,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国"
+Zaragoza University,41.6406218,-0.900793992168927,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España"
+"Zhejiang Normal University, Jinhua, China",29.13646725,119.637686517179,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国"
+Zhejiang University,30.19331415,120.119308216677,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国"
+Zhejiang University of Technology,30.2931534,120.1620458,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国"
+"Zhejiang University of Technology, Hangzhou, China",30.2931534,120.1620458,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国"
+"Zhejiang University, Hangzhou, China",30.19331415,120.119308216677,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国"
+"Zhengzhou University, China",34.8088168,113.5352664,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国"
+"Zhengzhou University, Zhengzhou, Henan 450052, China",34.8088168,113.5352664,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国"
+a The University of Nottingham Malaysia Campus,2.9438432,101.8736196,"The University of Nottingham Malaysia Campus, Jalan Broga, Bandar Rinching, Semenyih, Selangor, 43500, Malaysia"
+any other University,53.8012316,-1.5476213,"Northern Film School, Millennium Square, Steander, Woodhouse, Leeds, Yorkshire and the Humber, England, LS1 3DW, UK"
+college of Engineering,13.0110912,80.2354520862161,"College of Engineering, Sardar Patel Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+of bilkent university,39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+of the University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+the Chinese University of Hong Kong,22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"the Hong Kong Polytechnic University, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"the University of Queensland, Brisbane, Qld, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+"university, Shiraz, Iran",29.6284395,52.5181728343761,"دانشکده مهندسی دانشگاه شیراز, ملاصدرا, فلسطین, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71936, ‏ایران‎"
+y National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+"École Polytechnique Fédérale de Lausanne (EPFL), Switzerland",46.5184121,6.5684654,"Bibliothèque de l'EPFL, Route des Noyerettes, Ecublens, District de l'Ouest lausannois, Vaud, 1024, Schweiz/Suisse/Svizzera/Svizra"
diff --git a/scraper/reports/all_institutions_sorted.csv b/scraper/reports/all_institutions_sorted.csv
new file mode 100644
index 00000000..67604598
--- /dev/null
+++ b/scraper/reports/all_institutions_sorted.csv
@@ -0,0 +1,1745 @@
+AALTO UNIVERSITY,AALTO UNIVERSITY,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.824273298775
+AGH University of Science and Technology,AGH University of Science and Technology,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.0657033,19.9189586670586
+AGH University of Science and Technology,"AGH University of Science and Technology, Kraków, Poland","AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.0657033,19.9189586670586
+AI Institute,AI Institute,"INDEC, 609, Avenida Presidente Julio A. Roca, Microcentro, Comuna 1, Monserrat, CABA, C1067ABB, Argentina",-34.6102167,-58.3752244291708
+ALICE Institute,ALICE Institute,"Instituto Superior de Ciências da Educação (ISCED), Rua Salvador Allende (Salvador Guillermo Allende Gossens), Maculusso, Maianga, Município de Luanda, Luanda, 927, Angola",-8.82143045,13.2347076178375
+ARISTOTLE UNIVERSITY OF THESSALONIKI,ARISTOTLE UNIVERSITY OF THESSALONIKI,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532826658991
+Aalborg University,"Aalborg University, Denmark","AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532826658991
+Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.824273298775
+Aalto University,"Aalto University, Finland","Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.824273298775
+Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.4107358,-4.05295500914411
+Aberystwyth University,"Aberystwyth University, UK","Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.4107358,-4.05295500914411
+Ahmedabad University,Ahmedabad University,"School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.0378743,72.5518004573221
+Ahmedabad University,"Ahmedabad University, Gujarat, India 380009","School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.0378743,72.5518004573221
+Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.2830003,127.045484689222
+Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.8011499,140.045911602376
+Akita Prefectural University,"Akita Prefectural University, Yurihonjo, Japan","秋田県立大学, 日本海東北自動車道(無料区間), 八幡前, 由利本荘市, 秋田県, 東北地方, 〒015-0836, 日本",39.39325745,140.073500465928
+Akita University,Akita University,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本",39.7278142,140.133225661449
+Akita University,"Akita University, Akita, Japan","秋田大学鉱業博物館, 2, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-8502, 日本",39.7291921,140.136565773585
+Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.9131456239399
+Alexandria University,"Alexandria University, Alexandria, Egypt","جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.9131456239399
+"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.2810654,120.021390874339
+"Amazon, Berkshire, U.K.","Amazon, Berkshire, U.K.","Amazon Logistics, Exeter Road, Theale, West Berkshire, South East, England, RG7 4PL, UK",51.43522855,-1.07155123817349
+American University,American University,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.93804505,-77.0893922365193
+American University,"American University, Washington, DC, USA","American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.93804505,-77.0893922365193
+Amherst College,Amherst College,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA",42.37289,-72.518814
+Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.704514,51.4097205774739
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.704514,51.4097205774739
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran, Iran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.704514,51.4097205774739
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran. Iran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.704514,51.4097205774739
+Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.0495096452828
+Amity University,"Amity University, Lucknow, India","Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.0495096452828
+Amity University Uttar Pradesh,Amity University Uttar Pradesh,"Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India",28.54322285,77.3327482973395
+Amity University Uttar Pradesh,"Amity University Uttar Pradesh, Noida","Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India",28.54322285,77.3327482973395
+Anhui Polytechnic University,Anhui Polytechnic University,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.34185955,118.407397117034
+Anhui Polytechnic University,"Anhui Polytechnic University, Wuhu, China","安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.34185955,118.407397117034
+Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091346
+Anhui University,"Anhui University, Hefei, China","安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091346
+Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0105838,80.2353736
+Anna University,"Anna University, Chennai","Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0105838,80.2353736
+Anna University Chennai,Anna University Chennai,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0105838,80.2353736
+Anna University Chennai,"Anna University Chennai, India","Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0105838,80.2353736
+Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Thessaloniki, 54124, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Aristotle University of Thessaloniki GR,Aristotle University of Thessaloniki GR,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.676531568996
+Arizona State University,"Arizona State University, AZ, USA","Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.676531568996
+Arizona State University,"Arizona State University, Tempe AZ","Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA",33.4206602,-111.932634924965
+Arizona State University,"Arizona State University, Tempe, AZ, USA","Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA",33.4206602,-111.932634924965
+Asia Pacific University of Technology and Innovation,Asia Pacific University of Technology and Innovation,"Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia",3.0552109,101.7005831
+Asia Pacific University of Technology and Innovation,"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia","Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia",3.0552109,101.7005831
+Assiut University,Assiut University,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.18794105,31.1700949818453
+Assiut University,"Assiut University, Asyut, Egypt","Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.18794105,31.1700949818453
+Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915088515
+Aston University,"Aston University, Birmingham, U.K.","Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915088515
+Australian Institute of Sport,Australian Institute of Sport,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.24737535,149.104454269689
+Australian National University,Australian National University,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.81354365,144.971791681654
+Australian National University,"Australian National University, Canberra","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+Australian National University,"Australian National University, Canberra, ACT 0200, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+Azad University,Azad University,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.3173432,50.0367286
+Azad University,"Azad University, Qazvin, Iran","پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.3173432,50.0367286
+Azad University,"Central Tehran Branch, Azad University","دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎",35.753318,51.370631
+B.S. University of Central Florida,B.S. University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.1971250118395
+Bahcesehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.9769795349346
+Bahcesehir University,"Bahcesehir University, Istanbul, Turkey","BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.9769795349346
+Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.2662887,82.9927969
+Bangalore Institute of Technology,Bangalore Institute of Technology,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India",12.9551259,77.5741985
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India","Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India","Bapuji Institute of Engineering and Technology, 2nd Cross Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India",14.4443949,75.9027655185535
+Bar Ilan University,Bar Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.8433433861531
+Bar Ilan University,"Bar Ilan University, Israel","אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.8433433861531
+Bas kent University,Bas kent University,"University College Utrecht 'Babel', 7, Campusplein, Utrecht, Nederland, 3584 ED, Nederland",52.08340265,5.14828494152362
+Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.2275077179639
+Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.9808333,116.341012492788
+Beihang University,"Beihang University, Beijing 100191, China","北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.9808333,116.341012492788
+Beihang University,"Beihang University, Beijing, China","北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.9808333,116.341012492788
+Beijing Institute of Technology University,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.9586652,116.309712808455
+Beijing Institute of Technology University,"Beijing Institute of Technology University, P. R. China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.9586652,116.309712808455
+"Beijing Institute of Technology, Beijing 100081 CHINA","Beijing Institute of Technology, Beijing 100081 CHINA","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.9586652,116.309712808455
+"Beijing Institute of Technology, Beijing, China","Beijing Institute of Technology, Beijing, China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.9586652,116.309712808455
+"Beijing Institute of Technology, China","Beijing Institute of Technology, China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.9586652,116.309712808455
+Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629045844
+Beijing Jiaotong University,"Beijing Jiaotong University, Beijing, 100044, China","北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629045844
+Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.359704380265
+Beijing Normal University,"Beijing Normal University, China","北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.359704380265
+Beijing Union University,Beijing Union University,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",39.9890068,116.420677175386
+Beijing Union University,"Beijing Union University, 100101, China","北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",39.9890068,116.420677175386
+Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.9601488,116.351939210403
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.9601488,116.351939210403
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing, China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.9601488,116.351939210403
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing, P.R. China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.9601488,116.351939210403
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.9601488,116.351939210403
+Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.477222846574
+Beijing University of Technology,"Beijing University of Technology, Beijing 100022, China","北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.477222846574
+"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.906217,116.3912757
+"Beijing, Haidian, China","Beijing, Haidian, China","北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.359704380265
+Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.0818727,31.2445484105016
+Benha University,"Benha University, Egypt","كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.0818727,31.2445484105016
+Bharathidasan University,Bharathidasan University,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India",10.7778845,78.6966319
+Bharathidasan University,"Bharathidasan University, Trichy, India","Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India",10.7778845,78.6966319
+Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.0280421,8.51148270115395
+Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.8720489,32.7539515466323
+Bilkent University,"Bilkent University, 06800 Cankaya, Turkey","Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.8720489,32.7539515466323
+Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.0958077,-75.9145568939543
+Binghamton University,"Binghamton University, Binghamton, NY","Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.0958077,-75.9145568939543
+Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.0868841,29.0441316722649
+Bogazici University,"Bogazici University, Bebek","Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.0868841,29.0441316722649
+Bogazici University,"Bogazici University, Turkey","Boğaziçi Üniversitesi Güney Yerleşkesi, Sehitlikdergahı Sokağı, Beşiktaş, İstanbul, Marmara Bölgesi, 33345, Türkiye",41.08327335,29.0503931951846
+Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.3354481,-71.1681386402306
+"Boston College, USA","Boston College, USA","Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.3354481,-71.1681386402306
+Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.3504253,-71.1005611418395
+Boston University,"Boston University, Boston, MA","BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.3504253,-71.1005611418395
+Boston University,"Boston University, USA","BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.3504253,-71.1005611418395
+Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433738695589
+Bournemouth University,"Bournemouth University, UK","Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433738695589
+Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8268682,-71.4012314581107
+Brown University,"Brown University, Providence Rhode Island, 02912, USA","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8268682,-71.4012314581107
+Brown University,"Brown University, Providence, RI","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8268682,-71.4012314581107
+Brown University,"Brown University, United States","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8268682,-71.4012314581107
+Brunel University,Brunel University,"Brunel University London, The Strip, Hillingdon, London, Greater London, England, UB8 3PH, UK",51.53255315,-0.473993562050575
+CALIFORNIA INSTITUTE OF TECHNOLOGY,CALIFORNIA INSTITUTE OF TECHNOLOGY,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+CARNEGIE MELLON UNIVERSITY,CARNEGIE MELLON UNIVERSITY,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4102193,-122.059654865858
+COLUMBIA UNIVERSITY,COLUMBIA UNIVERSITY,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+COMSATS Institute of Information Technology,COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4006332,74.2137296
+"COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎",33.65010145,73.1551494914791
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan","COMSATS Institute of Information Technology, Lahore 54000, Pakistan","COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4006332,74.2137296
+"COMSATS Institute of Information Technology, Pakistan","COMSATS Institute of Information Technology, Pakistan","COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4006332,74.2137296
+CUNY City College,CUNY City College,"Cuny, La Tour-du-Pin, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38110, France",45.5546608,5.4065255
+California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+"California Institute of Technology, Pasadena, CA","California Institute of Technology, Pasadena, CA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+"California Institute of Technology, Pasadena, CA, USA","California Institute of Technology, Pasadena, CA, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+"California Institute of Technology, Pasadena, California, USA","California Institute of Technology, Pasadena, California, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+"California Institute of Technology, USA","California Institute of Technology, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.125274866116
+"Callaghan, NSW 2308, Australia","Callaghan, NSW 2308, Australia","Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia",-32.8892352,151.6998983
+Cambridge Research Laboratory,Cambridge Research Laboratory,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK",52.17333465,0.149899463173698
+Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.7944026,-1.0971748
+Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052087
+Capital Normal University,"Capital Normal University, 100048, China","首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052087
+Cardi University,Cardi University,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago",10.6435074,-61.4022996445292
+Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.4879961,-3.17969747443907
+Cardiff University,"Cardiff University, UK","Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.4879961,-3.17969747443907
+Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.3860843,-75.6953926739404
+Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4102193,-122.059654865858
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh PA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA, 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University,"Carnegie Mellon University, USA","Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4102193,-122.059654865858
+Carnegie Mellon University Pittsburgh,Carnegie Mellon University Pittsburgh,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University Pittsburgh,"Carnegie Mellon University Pittsburgh, PA - 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Carnegie Mellon University Pittsburgh,"Carnegie Mellon University Pittsburgh, PA, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4441619,-79.942728259225
+Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673039883
+Chang Gung University,Chang Gung University,"長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣",25.030438,121.390095126629
+Chang Gung University,"Chang Gung University, Taoyuan, Taiwan","長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣",25.030438,121.390095126629
+Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.0636071,147.3552234
+China University of Mining and Technology,China University of Mining and Technology,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.2152538,117.1398541
+China University of Mining and Technology,"China University of Mining and Technology, Xuzhou, China","China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.2152538,117.1398541
+Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0044795,116.370238
+"Chinese Academy of Sciences, Beijing","Chinese Academy of Sciences, Beijing","中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0044795,116.370238
+"Chinese Academy of Sciences, China","Chinese Academy of Sciences, China","中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0044795,116.370238
+Chittagong University of Engineering and Technology,Chittagong University of Engineering and Technology,"Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.46221665,91.9694226317318
+Chittagong University of Engineering and Technology,"Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh","Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.46221665,91.9694226317318
+Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.135013303058
+Chonbuk National University,"Chonbuk National University, Jeonju-si","전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.135013303058
+Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5084174,106.578585515028
+Chongqing University,"Chongqing University, China","重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5084174,106.578585515028
+Chongqing University,"Chongqing University, Chongqing, China","重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5084174,106.578585515028
+Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.5357046,106.604824742826
+Chongqing University of Posts and Telecommunications,"Chongqing University of Posts and Telecommunications, Chongqing, China","重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.5357046,106.604824742826
+Chosun University,Chosun University,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국",35.1441031,126.9257858
+"Chu Hai College of Higher Education, Hong Kong","Chu Hai College of Higher Education, Hong Kong","珠海學院 Chu Hai College of Higher Education, 80, 青盈路 Tsing Ying Road, 嘉和里 Ka Wo Lei, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国",22.3760643,113.987153890134
+"Chu Hai College of Higher Education, Tsuen Wan, Hong Kong","Chu Hai College of Higher Education, Tsuen Wan, Hong Kong","珠海學院, 80, 青山公路-青山灣段 Castle Peak Road – Castle Peak Bay, 良田村 Leung Tin Tsuen, 青山灣 Castle Peak Bay, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国",22.375601,113.987140797925
+Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.2742655,137.013278412463
+Chulalongkorn University,Chulalongkorn University,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.532879009091
+Chulalongkorn University,"Chulalongkorn University, Bangkok","จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.532879009091
+Chulalongkorn University Bangkok,Chulalongkorn University Bangkok,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.532879009091
+Chulalongkorn University Bangkok,"Chulalongkorn University Bangkok, Thailand","จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.532879009091
+Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882,126.9619
+Chung-Ang University,"Chung-Ang University, Seoul, Korea","중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882,126.9619
+Chung-Ang University,"Chung-Ang University, Seoul, South Korea","중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882,126.9619
+Chungnam National University,Chungnam National University,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국",36.37029045,127.347804575184
+City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.169702912423
+City University of Hong Kong,"City University of Hong Kong, Hong Kong","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.169702912423
+City University of Hong Kong,"City University of Hong Kong, Hong Kong, China","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.169702912423
+City University of Hong Kong,"City University of Hong Kong, Kowloon, Hong Kong","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.169702912423
+Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.837434756078
+Clemson University,"Clemson University, Clemson, SC","E-06 Parking, Parkway Drive, Pickens County, South Carolina, SC, USA",34.67871075,-82.8346790794026
+Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.9519648264628
+"College Heights Blvd, Bowling Green, KY","College Heights Blvd, Bowling Green, KY","College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA",36.9881671,-86.4542111
+"College Park, MD","College Park, MD","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+"College Park, MD 20742 USA","College Park, MD 20742 USA","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+"College Park, MD, 20740, USA","College Park, MD, 20740, USA","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+"College Park, Maryland","College Park, Maryland","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+"College Park, USA","College Park, USA","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+"College Park, United States","College Park, United States","College Park, Prince George's County, Maryland, USA",38.980666,-76.9369189
+College of Computer and Information Science,College of Computer and Information Science,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA",42.3192923,-83.2343465549018
+College of Computing,College of Computing,"computing, Tunguu, Unguja Kusini, Zanzibar, 146, Tanzania",-6.1992922,39.3081862
+College of Electrical and Information Engineering,College of Electrical and Information Engineering,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија",42.0049791,21.40834315
+"College of Engineering Pune, India","College of Engineering Pune, India","College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India",18.52930005,73.8568253702551
+College of Engineering and Computer Science,College of Engineering and Computer Science,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA",25.7589624,-80.3738881489383
+"College of Engineering, Pune, India","College of Engineering, Pune, India","College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India",18.52930005,73.8568253702551
+College of Informatics,College of Informatics,"Informatics, F.P. Felix Avenue, Dela Paz, San Isidro, Cainta, Rizal, Metro Manila, 1900, Philippines",14.6173885,121.101327315511
+Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.5709358,-105.086552556269
+Colorado State University,"Colorado State University, Fort Collins","Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.5709358,-105.086552556269
+Colorado State University,"Colorado State University, Fort Collins, Colorado, USA","Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.5709358,-105.086552556269
+Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, New York","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, New York NY 10027, USA","Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.81779415,-73.9578531933627
+Columbia University,"Columbia University, New York, NY","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, New York, NY 10027, USA","Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.81779415,-73.9578531933627
+Columbia University,"Columbia University, New York, NY, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, New York, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University,"Columbia University, United States","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8419836,-73.9436897071772
+Columbia University in the City of New York,Columbia University in the City of New York,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.8071772,-73.9625279772072
+Communication University of China,Communication University of China,"中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.91199955,116.551891408714
+Communication University of China,"Communication University of China, Beijing, China","中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.91199955,116.551891408714
+Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.637093463826
+Concordia University,"Concordia University, Canada","FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada",45.4955911,-73.5775043
+Concordia University,"Concordia University, Montreal, QC, Canada","FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada",45.4955911,-73.5775043
+Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.4505507,-76.4783512955428
+Cornell University,"Cornell University, Ithaca, NY, USA","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.4505507,-76.4783512955428
+Cornell University,"Cornell University, Ithaca, New York","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.4505507,-76.4783512955428
+Cornell University,"Cornell University, USA","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.4505507,-76.4783512955428
+Courant Institute,Courant Institute,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7286994,-73.9957151
+Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7286484,-73.9956863
+"Courant Institute of Mathematical Sciences, New York, NY","Courant Institute of Mathematical Sciences, New York, NY","Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7286484,-73.9956863
+Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775
+Curtin University,"Curtin University, Perth WA 6102, Australia","Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775
+Curtin University,"Curtin University, Perth WA, Australia","A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia",-32.00319745,115.891774804686
+Curtin University,"Curtin University, Perth, Australia","Curtin University, B201 L2 Entry South, Waterford, Perth, Western Australia, 6102, Australia",-32.00574155,115.892864389257
+Curtin University,"Curtin University, Perth, Western Australia 6012","A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia",-32.00319745,115.891774804686
+Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.0457764820597
+Cyprus University of Technology,"Cyprus University of Technology, Cyprus","Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.0457764820597
+Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.0764296,14.418023122743
+DIT UNIVERSITY,DIT UNIVERSITY,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India",30.3983396,78.0753455
+DIT UNIVERSITY,"DIT UNIVERSITY, DEHRADUN","DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India",30.3983396,78.0753455
+DUBLIN CITY UNIVERSITY,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874081493
+Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.522810980755
+Dalian University of Technology,"Dalian University of Technology, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.522810980755
+Dalian University of Technology,"Dalian University of Technology, Dalian 116024, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.522810980755
+Dalian University of Technology,"Dalian University of Technology, Dalian, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.522810980755
+Dalian University of Technology,"Dalian University of Technology, Dalian, Liaoning, 116024, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.522810980755
+Dankook University,Dankook University,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3219575,127.1250723
+Dankook University,"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea","단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3219575,127.1250723
+Dankook University,"Dankook University, Yongin, South Korea","단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3219575,127.1250723
+Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.7047927,-72.2925909
+"Dartmouth College, NH 03755 USA","Dartmouth College, NH 03755 USA","Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA",43.7070046,-72.2869048
+Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.303652287331
+Deakin University,"Deakin University, Geelong, VIC 3216, Australia","Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.303652287331
+Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396036815404
+Delft University of Technology,"Delft University of Technology, Mekelweg 4, Netherlands","TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396036815404
+Delft University of Technology,"Delft University of Technology, The Netherlands","TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396036815404
+Democritus University of Thrace,Democritus University of Thrace,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα",40.84941785,25.8344493892098
+"Dermalog Identification Systems GmbH, Hamburg, Germany","Dermalog Identification Systems GmbH, Hamburg, Germany","DERMALOG Identification Systems GmbH, 120, Mittelweg, Rotherbaum, Eimsbüttel, Hamburg, 20148, Deutschland",53.5722826,9.9947826
+"Deutsche Welle, Bonn, Germany","Deutsche Welle, Bonn, Germany","DW, Gronau, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7171497,7.12825184326238
+Dhaka University,Dhaka University,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ",23.7317915,90.3805625
+"Disney Research, CH","Disney Research, CH","Disney Research Zürich, 48, Stampfenbachstrasse, Unterstrass, Kreis 6, Zürich, Bezirk Zürich, Zürich, 8006, Schweiz/Suisse/Svizzera/Svizra",47.3804685,8.5430355
+Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.2061939,121.410471009388
+Donghua University,"Donghua University, China","东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.2061939,121.410471009388
+Dr. B. C. Roy Engineering College,Dr. B. C. Roy Engineering College,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India",23.54409755,87.342697070434
+Dr. Babasaheb Ambedkar Marathwada University,Dr. Babasaheb Ambedkar Marathwada University,"Boys Hostel No. 3, Shantipura road, Cantonment, Bidri workshop, Aurangabad, Maharashtra, 431004, India",19.8960918,75.3089470267316
+Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.9574,-75.1902670552555
+Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.9990522,-78.9290629011139
+East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.2284923,121.402113889769
+Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318497
+Eastern University,Eastern University,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA",40.0505672,-75.3710932636663
+"Ecole Centrale de Lyon, Lyon, 69134, France","Ecole Centrale de Lyon, Lyon, 69134, France","EC de Lyon, 36, Avenue Guy de Collongue, Écully, Lyon, Métropole de Lyon, Circonscription départementale du Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69134, France",45.7833631,4.76877035614228
+Edge Hill University,Edge Hill University,"Edge Hill University, St Helens Road, West Lancashire, Lancs, North West England, England, L39 4QP, UK",53.5582155,-2.86904651022128
+Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.4486602,5.49039956550805
+Eindhoven University of Technology,"Eindhoven University of Technology, The Netherlands","Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.4486602,5.49039956550805
+Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.5866784,-101.875392037548
+Elon University,Elon University,"Amphitheater, North Antioch Avenue, Elon, Alamance County, North Carolina, 27244, USA",36.1017956,-79.501733
+Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.7487516,30.4765307102195
+"Facebook Inc., San Francisco, CA, USA","Facebook Inc., San Francisco, CA, USA","Facebook Inc., San Francisco Bay Trail, Menlo Park, San Mateo County, California, 94025-1246, USA",37.4828007,-122.150711572363
+"Facebook, Singapore","Facebook, Singapore","Ewe Boon back lane, between Palm Spring, City Towers and Wing On Life Garden, Farrer Park Gardens, Novena, Singapore, Central, 259803, Singapore",1.3170417,103.8321041
+Feng Chia University,Feng Chia University,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.18005755,120.648360719503
+Feng Chia University,"Feng Chia University, Taichung, Taiwan","逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.18005755,120.648360719503
+Ferdowsi University of Mashhad,Ferdowsi University of Mashhad,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎",36.3076616,59.5269051097667
+Ferdowsi University of Mashhad,"Ferdowsi University of Mashhad, Mashhad, Iran","دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎",36.3076616,59.5269051097667
+Firat University,Firat University,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye",39.7275037,39.4712703382844
+"Florida Institute Of Technology, Melbourne Fl","Florida Institute Of Technology, Melbourne Fl","Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA",28.0642296,-80.6230097241205
+"Florida Institute of Technology, Melbourne, USA","Florida Institute of Technology, Melbourne, USA","Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA",28.0642296,-80.6230097241205
+Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.3762889746807
+Florida International University,"Florida International University, Miami, FL","FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.3762889746807
+Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.2974786716626
+Florida State University,"Florida State University, Tallahassee, FL 32306, USA","Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.2974786716626
+Fordham University,Fordham University,"Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA",40.7710604,-73.9852807046561
+Fordham University,"Fordham University, New York, 10023, USA","Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA",40.7710604,-73.9852807046561
+Foundation University Rawalpindi Campus,Foundation University Rawalpindi Campus,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎",33.5609504,73.0712596618793
+Foundation University Rawalpindi Campus,"Foundation University Rawalpindi Campus, Pakistan","Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎",33.5609504,73.0712596618793
+Fraser University,Fraser University,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA",44.9689836,-93.2094162948556
+Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.500454969435
+Fudan University,"Fudan University, Shanghai, China","复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.500454969435
+GE Global Research,GE Global Research,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.8298248,-73.8771938492793
+GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.8298248,-73.8771938492793
+"GIPSA-Lab, Grenoble, France","GIPSA-Lab, Grenoble, France","GIPSA-lab, 11, Rue des Mathématiques, Médiat Rhône-Alpes, Saint-Martin-d'Hères, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38400, France",45.1929245,5.7661983
+Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.37086525,18.6171601574695
+George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.3079883887912
+George Mason University,"George Mason University, Fairfax Virginia, USA","George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.3079883887912
+George Mason University,"George Mason University, Fairfax, VA 22030","George Mason University, University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA",38.8345539,-77.3152142
+George Mason University,"George Mason University, Fairfax, VA, USA","George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.3079883887912
+Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.776033,-84.3988408600158
+"Georgia Institute of Technology, Atlanta, 30332-0250, USA","Georgia Institute of Technology, Atlanta, 30332-0250, USA","Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.776033,-84.3988408600158
+"Georgia Institute of Technology, Atlanta, Georgia, USA","Georgia Institute of Technology, Atlanta, Georgia, USA","Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.776033,-84.3988408600158
+Georgia Southern University,Georgia Southern University,"Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA",32.42143805,-81.7845052864662
+Georgia Southern University,"Georgia Southern University, Statesboro, USA","Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA",32.42143805,-81.7845052864662
+Glyndwr University,Glyndwr University,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK",53.05373795,-3.00482075353073
+"Golden, CO, USA","Golden, CO, USA","Golden, Jefferson County, Colorado, USA",39.755543,-105.2210997
+Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821,15.460195677136
+Graz University of Technology,"Graz University of Technology, Austria","TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821,15.460195677136
+Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.5533975,153.053362338641
+Griffith University,"Griffith University, Australia","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.5533975,153.053362338641
+Griffith University,"Griffith University, Brisbane","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.5533975,153.053362338641
+Griffith University,"Griffith University, Nathan, QLD, Australia","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.5533975,153.053362338641
+Guangdong Medical College,Guangdong Medical College,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国",23.1294489,113.343761097683
+Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.1353836,113.294704958268
+Guangdong University of Technology,"Guangdong University of Technology, China","广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.1353836,113.294704958268
+Guangzhou University,Guangzhou University,"广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04436505,113.366684576444
+Guangzhou University,"Guangzhou University, Guangzhou, China","广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04436505,113.366684576444
+Guilin University of Electronic Technology Guangxi Guilin,Guilin University of Electronic Technology Guangxi Guilin,"桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.2873992,110.332427699352
+Guilin University of Electronic Technology Guangxi Guilin,"Guilin University of Electronic Technology Guangxi Guilin, China","桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.2873992,110.332427699352
+Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.7351907206768
+Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.8792972689712
+Halmstad University,"Halmstad University, Halmstad, Sweden","Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.8792972689712
+Hangzhou Dianzi University,Hangzhou Dianzi University,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.3125525,120.3430946
+Hangzhou Dianzi University,"Hangzhou Dianzi University, Hangzhou, China","杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.3125525,120.3430946
+Hankuk University of Foreign Studies,Hankuk University of Foreign Studies,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.5953979,127.0630499
+Hankuk University of Foreign Studies,"Hankuk University of Foreign Studies, South Korea","외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.5953979,127.0630499
+Hanoi University of Science and Technology,Hanoi University of Science and Technology,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam",21.003952,105.843601832826
+Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.5557271,127.0436642
+Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.676849168143
+Harbin Engineering University,"Harbin Engineering University, Harbin, Heilongjiang, 150001, China","哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.676849168143
+Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7413921,126.625527550394
+"Harbin Institute of Technology, China","Harbin Institute of Technology, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7413921,126.625527550394
+"Harbin Institute of Technology, China, 150001","Harbin Institute of Technology, China, 150001","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7413921,126.625527550394
+"Harbin Institute of Technology, Harbin 150001, China","Harbin Institute of Technology, Harbin 150001, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7413921,126.625527550394
+"Harbin Institute of Technology, Harbin, China","Harbin Institute of Technology, Harbin, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7413921,126.625527550394
+Harbin Institute of Technology;Shenzhen University,Harbin Institute of Technology;Shenzhen University,"哈工大(深圳), 平山一路, 深圳大学城, 珠光村, 南山区, 深圳市, 广东省, 518000, 中国",22.5895016,113.965710495775
+Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.1266665287448
+Harvard University,"Harvard University, Cambridge","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.1266665287448
+Harvard University,"Harvard University, Cambridge, MA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.1266665287448
+Harvard University,"Harvard University, Cambridge, MA 02138","Harvard University, Rotterdam Street, North Brighton, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36300645,-71.1245674978516
+Harvard University,"Harvard University, Cambridge, MA, USA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.1266665287448
+Harvard University,"Harvard University, USA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.1266665287448
+Harvard and Massachusetts Institute,Harvard and Massachusetts Institute,"Massachusetts Correctional Institute Shirley Minimum Security Library, Harvard Road, Shaker Village, Shirley, Middlesex County, Massachusetts, 01464, USA",42.5268445,-71.6525446
+"Hebei, China","Hebei, China","河北省, 中国",39.0000001,116.0
+Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.846918,117.290533667908
+Hefei University of Technology,"Hefei University of Technology, Hefei, Anhui, 230601, China","合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.846918,117.290533667908
+Hefei University of Technology,"Hefei University of Technology, Hefei, China","合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.846918,117.290533667908
+Hengyang Normal University,Hengyang Normal University,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国",26.8661136,112.620921219792
+Hengyang Normal University,"Hengyang Normal University, Hengyang, China","衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国",26.8661136,112.620921219792
+Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345776559167
+Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.4019766,132.7123195
+Hiroshima University,"Hiroshima University, Japan","Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.4019766,132.7123195
+HoHai University,HoHai University,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国",32.05765485,118.755000398628
+Hofstra University,Hofstra University,"Hofstra University, Hempstead Turnpike Bike Path, East Garden City, Nassau County, New York, 11549, USA",40.71703345,-73.599835005538
+Hofstra University,"Hofstra University, Hempstead, NY 11549","Hofstra University, Hempstead Turnpike Bike Path, East Garden City, Nassau County, New York, 11549, USA",40.71703345,-73.599835005538
+Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.3874201,114.2082222
+Hong Kong Baptist University,"Hong Kong Baptist University, Hong Kong","香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.3874201,114.2082222
+Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+Hong Kong Polytechnic University,"Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+Hong Kong Polytechnic University,"Hong Kong Polytechnic University, Hong Kong, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3386304,114.2620337
+Hong Kong University of Science and Technology,"Hong Kong University of Science and Technology, Hong Kong","香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3386304,114.2620337
+Howard University,Howard University,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.921525,-77.019535656678
+Howard University,"Howard University, Washington DC","Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.921525,-77.019535656678
+Huaqiao University,Huaqiao University,"华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.6004712,118.0816574
+Huaqiao University,"Huaqiao University, Xiamen, China","华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.6004712,118.0816574
+Huazhong University of,Huazhong University of,"深圳市第六人民医院, 89号, 桃园路, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518000, 中国",22.53367445,113.917874206261
+Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5097537,114.4062881
+Huazhong University of Science and Technology,"Huazhong University of Science and Technology, Wuhan, China","华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5097537,114.4062881
+Huazhong University of Science and Technology,"Huazhong University of Science and Technology, Wuhan, China 430074","华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5097537,114.4062881
+Humboldt-University,Humboldt-University,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.51875685,13.3935604936378
+Humboldt-University,"Humboldt-University, Berlin, Germany","Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.51875685,13.3935604936378
+Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.628506656425
+"IBM Almaden Research Center, San Jose CA","IBM Almaden Research Center, San Jose CA","IBM Almaden Research Center, San José, Santa Clara County, California, USA",37.21095605,-121.807486683178
+IBM Research,IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.9042272,-78.8556576330566
+"IBM Research, USA","IBM Research, USA","IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.9042272,-78.8556576330566
+IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.8040705573196
+IDIAP RESEARCH INSTITUTE,IDIAP RESEARCH INSTITUTE,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+"IDIAP Research Institute, Martigny, Switzerland","IDIAP Research Institute, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+"IDIAP, Martigny, Switzerland","IDIAP, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+"IIIT-Delhi, India","IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.2732550434418
+"IIT Guwahati, Guwahati, India","IIT Guwahati, Guwahati, India","Indian Institute of Technology Guwahati - IIT Guwahati, NH27, Amingaon, Guwahati, Kamrup, Assam, 781015, India",26.19247875,91.6946356873113
+IMPERIAL COLLEGE,IMPERIAL COLLEGE,"国子监, 五道营胡同, Naga上院, 北京市, 东城区, 北京市, 100010, 中国",39.9458551,116.406973072869
+"INRIA Grenoble Rhone-Alpes, FRANCE","INRIA Grenoble Rhone-Alpes, FRANCE","INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France",45.2182986,5.80703193086113
+Idiap Research Institute,Idiap Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+"Idiap Research Institute, Martigny, Switzerland","Idiap Research Institute, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+"Idiap Research Institute, Switzerland","Idiap Research Institute, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.109237,7.08453548522408
+Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.8361963,-87.6265591274291
+"Illinois Institute of Technology, Chicago, Illinois, USA","Illinois Institute of Technology, Chicago, Illinois, USA","Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.8361963,-87.6265591274291
+Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, London, U.K.","Imperial College London, London, U.K.","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, London, UK","Imperial College London, London, UK","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, U.K","Imperial College London, U.K","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, U.K.","Imperial College London, U.K.","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, UK","Imperial College London, UK","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College London, United Kingdom","Imperial College London, United Kingdom","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.175607973937072
+"Imperial College, London, UK","Imperial College, London, UK","Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK",51.5004171,-0.1782711
+Indian Institute of Science,Indian Institute of Science,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0222347,77.5671832476811
+Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0222347,77.5671832476811
+"Indian Institute of Science, India","Indian Institute of Science, India","IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0222347,77.5671832476811
+Indian Institute of Technology,Indian Institute of Technology,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India",28.5444176,77.1893001
+"Indian Institute of Technology Delhi, New Delhi, India","Indian Institute of Technology Delhi, New Delhi, India","Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India",28.5444176,77.1893001
+Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.513188,80.2365194538339
+"Indian Institute of Technology Kanpur, Kanpur, India","Indian Institute of Technology Kanpur, Kanpur, India","Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.513188,80.2365194538339
+"Indian Institute of Technology, Roorkee","Indian Institute of Technology, Roorkee","Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India",29.8662461,77.8958708109136
+Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.8795690544362
+Indiana University Bloomington,Indiana University Bloomington,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA",39.17720475,-86.5154003022128
+"Industrial Technology Research Institute, Hsinchu, Taiwan","Industrial Technology Research Institute, Hsinchu, Taiwan","工研院, 195, 中興路四段, 頭重里, 竹東鎮, 新竹縣, 31040, 臺灣",24.7741756,121.045092787653
+Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.5934539,130.3557837
+Information Technology University (ITU),Information Technology University (ITU),"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎",31.4760299,74.3427526
+Information Technology University (ITU),"Information Technology University (ITU), Punjab, Lahore, Pakistan","Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎",31.4760299,74.3427526
+Institute for Advanced,Institute for Advanced,"Institute for Advanced Biosciences, 鶴岡市, 山形県, 東北地方, 日本",38.7468877,139.824707282407
+Institute for Communication Systems,Institute for Communication Systems,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK",51.2433692,-0.593220895014599
+Institute for System Programming,Institute for System Programming,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ",55.7449881,37.6645042069876
+Institute of,Institute of,"Institute, Kanawha County, West Virginia, 25112, USA",38.3836097,-81.7654665
+Institute of Automation,Institute of Automation,"Institut für Automatisierungstechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland",54.1720834,12.0790983
+Institute of Communications Engineering,Institute of Communications Engineering,"Institut für Nachrichtentechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland",54.1718573,12.0784417
+Institute of Computer Science,Institute of Computer Science,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국",35.15456615,128.098476040221
+Institute of Computer Science III,Institute of Computer Science III,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국",35.15456615,128.098476040221
+Institute of Computing,Institute of Computing,"Institute for Quantum Computing, Wes Graham Way, Lakeshore Village, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 6R2, Canada",43.47878995,-80.5548480959375
+Institute of Computing Technology,Institute of Computing Technology,"神戸情報大学院大学, フラワーロード, 中央区, 神戸市, 兵庫県, 近畿地方, 650-0001, 日本",34.6988529,135.1936779
+Institute of Digital Media,Institute of Digital Media,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India",20.28907925,85.84232125
+Institute of Electronics and Computer Science,Institute of Electronics and Computer Science,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija",56.97734805,24.1951425550775
+"Institute of Engineering and Management, Kolkata, India","Institute of Engineering and Management, Kolkata, India","Institute of Engineering and Management, Block -EP, Ring Road, GP Block, Kolkata, Twenty-four Parganas, West Bengal, 700091, India",22.57423855,88.4337303
+Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.118523607658
+Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.0410728,121.614756201755
+Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7289899,90.3982682
+Institute of Media Innovation,Institute of Media Innovation,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore",1.3433937,103.6793303
+Institute of Road and,Institute of Road and,"Institute, Kanawha County, West Virginia, 25112, USA",38.3836097,-81.7654665
+Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.8338371,10.7035939
+International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4454957,78.3485469754447
+"International Institute of Information Technology (IIIT) Hyderabad, India","International Institute of Information Technology (IIIT) Hyderabad, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4454957,78.3485469754447
+"International Institute of Information Technology, Hyderabad, India","International Institute of Information Technology, Hyderabad, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4454957,78.3485469754447
+"International Institute of Information Technology, Hyderabad, Telangana, India","International Institute of Information Technology, Hyderabad, Telangana, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4454957,78.3485469754447
+International University of,International University of,"International University, ផ្លូវ ១៩៨៤, ភូមិភ្នំពេញថ្មី, ខណ្ឌសែនសុខ, រាជធានីភ្នំពេញ, 12101, ព្រះរាជាណាចក្រ​កម្ពុជា",11.5744201,104.8775841
+Ionian University,Ionian University,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.2899482,21.7886469
+Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.6446441473745
+Iowa State University,"Iowa State University, Ames, IA, USA","Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.6446441473745
+Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.8452999,48.5596212013643
+Islamic University of Gaza - Palestine,Islamic University of Gaza - Palestine,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية",31.51368535,34.4401934143135
+Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.022311592943
+Istanbul Technical University,"Istanbul Technical University, Istanbul, 34469, TURKEY","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.022311592943
+Istanbul Technical University,"Istanbul Technical University, Istanbul, Turkey","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.022311592943
+Istanbul Technical University,"Istanbul Technical University, Turkey","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.022311592943
+Istanbul Technical University (ITU),Istanbul Technical University (ITU),"ITU Open Air Theater, Arı Yolu, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34485, Türkiye",41.10539,29.0213673
+Istanbul Technical University (ITU),"Istanbul Technical University (ITU), Turkey","ITU Open Air Theater, Arı Yolu, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34485, Türkiye",41.10539,29.0213673
+Istanbul University,Istanbul University,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.0132424,28.9637609
+Istanbul University,"Istanbul University, Istanbul, Turkey","İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.0132424,28.9637609
+Jacobs University,Jacobs University,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK",53.4129148,-2.96897915394896
+Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.5611537,88.4131019353334
+Jadavpur University,"Jadavpur University, India","Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.5611537,88.4131019353334
+Jahangirnagar University,Jahangirnagar University,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.883312,90.2693921
+Jahangirnagar University,"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh","Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.88277575,90.2671009927283
+"Jaipur, Rajasthan, India","Jaipur, Rajasthan, India","Jaipur, Rajasthan, 302001, India",26.916194,75.820349
+Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.4442949,136.5928587
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan","Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan","JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.4442949,136.5928587
+Jaypee Institute of Information Technology,Jaypee Institute of Information Technology,"Jaypee Institute of Information Technology, Noida, A-10, National Highway 24 Bypass, Asha Pushp Vihar, Kaushambi, Ghaziabad, Uttar Pradesh, 201001, India",28.6300443,77.3720823
+Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.4854255,120.2739581
+Jiangnan University,"Jiangnan University, Jiangsu Wuxi, PR China","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.4854255,120.2739581
+Jiangnan University,"Jiangnan University, Wuxi","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.4854255,120.2739581
+Jiangnan University Jiangsu Wuxi,Jiangnan University Jiangsu Wuxi,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.4854255,120.2739581
+Jiangnan University Jiangsu Wuxi,"Jiangnan University Jiangsu Wuxi, PR China","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.4854255,120.2739581
+Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.509683619281
+Jiangsu University,"Jiangsu University, ZhenJiang, Jiangsu, 212013, P. R. China","江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.509683619281
+Jiangsu University,"Jiangsu University, Zhenjiang, China","江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.509683619281
+Jiangsu University of Science and Technology,Jiangsu University of Science and Technology,"江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.198055,119.4632679083
+Jiangsu University of Science and Technology,"Jiangsu University of Science and Technology, Zhenjiang, China","江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.198055,119.4632679083
+Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.053565,113.39913285497
+Jilin University,"Jilin University, China","吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.053565,113.39913285497
+"Joint Research Institute, Foshan, China","Joint Research Institute, Foshan, China","广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国",22.83388935,113.285418245471
+Jordan University of Science and Technology,Jordan University of Science and Technology,"Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.49566485,35.9916071719283
+Jordan University of Science and Technology,"Jordan University of Science and Technology, Irbid, Jordan","Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.49566485,35.9916071719283
+K.N. Toosi University of Technology,K.N. Toosi University of Technology,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎",35.76427925,51.409702762313
+K.N. Toosi University of Technology,"K.N. Toosi University of Technology, Tehran, Iran","دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎",35.76427925,51.409702762313
+"KAIST, Daejeon, Korea","KAIST, Daejeon, Korea","궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국",36.3646244,127.352251416793
+"KAIST, Korea","KAIST, Korea","궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국",36.3646244,127.352251416793
+"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.0706321329842
+"KTH Royal Institute of Technology, 100 44 Stockholm, Sweden","KTH Royal Institute of Technology, 100 44 Stockholm, Sweden","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.0706321329842
+"KTH Royal Institute of Technology, Stockholm, Sweden","KTH Royal Institute of Technology, Stockholm, Sweden","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.0706321329842
+Karlsruhe Institute of,Karlsruhe Institute of,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+"Karlsruhe Institute of Technology (KIT), Germany","Karlsruhe Institute of Technology (KIT), Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany","Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+"Karlsruhe Institute of Technology, Germany","Karlsruhe Institute of Technology, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+"Karlsruhe Institute of Technology, Karlsruhe, Germany","Karlsruhe Institute of Technology, Karlsruhe, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312559623876
+Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.8830686,4.7019503
+Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.5416969,139.6347184
+Keio University,"Information, Keio University","綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.5416969,139.6347184
+Keio University,"Keio University, Yokohama 223-8522, Japan","慶應義塾大学 (矢上キャンパス), 理工坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-8522, 日本",35.55536215,139.654582444136
+Kent State University,Kent State University,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.1443525,-81.3398283284572
+Kent State University,"Kent State University, Kent, Ohio, USA","Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.1443525,-81.3398283284572
+Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.4469025,54.3942563
+Khalifa University,"Khalifa University, Abu Dhabi, United Arab Emirates","Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.4469025,54.3942563
+Khon Kaen University,Khon Kaen University,"มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.46007565,102.812117979662
+Khon Kaen University,"Khon Kaen University, Khon Kaen, 40002, Thailand","มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.46007565,102.812117979662
+King Abdullah University of Science and Technology 4700,King Abdullah University of Science and Technology 4700,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.31055485,39.1051548637793
+King Abdullah University of Science and Technology 4700,"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia","KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.31055485,39.1051548637793
+King Faisal University,King Faisal University,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.397778,50.183056
+King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7246403,46.623350123456
+King Saud University,"King Saud University, Riyadh","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7246403,46.623350123456
+King Saud University,"King Saud University, Riyadh 11543, Saudi Arabia","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7246403,46.623350123456
+King Saud University,"King Saud University, Riyadh, Saudi Arabia","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7246403,46.623350123456
+Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.4293086,-0.2684044
+Kingston University,"Kingston University, UK","Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.4293086,-0.2684044
+Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.7275714,135.237099997686
+Kobe University,"Kobe University, Japan","神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.7275714,135.237099997686
+Kogakuin University,Kogakuin University,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.6902784,139.695400958171
+Kogakuin University,"Kogakuin University, Tokyo, Japan","工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.6902784,139.695400958171
+Kookmin University,Kookmin University,"국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국",37.6107554,126.9946635
+Kookmin University,"Kookmin University, Seoul, Korea","국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국",37.6107554,126.9946635
+Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+"Korea Advanced Institute of Science and Technology, Daejeon, Korea","Korea Advanced Institute of Science and Technology, Daejeon, Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+"Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea","Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+"Korea Advanced Institute of Science and Technology, Daejeon, South Korea","Korea Advanced Institute of Science and Technology, Daejeon, South Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+"Korea Advanced Institute of Science and Technology, Korea","Korea Advanced Institute of Science and Technology, Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+Korea Advanced institute of Science and Technology,Korea Advanced institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.3697191,127.362537001151
+Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.5901411,127.0362318
+Korea University,"Korea University, Seoul, South Korea","고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.5901411,127.0362318
+Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.8164178,130.727039687562
+Kumamoto University,"Kumamoto University, Kumamoto, Japan","熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.8164178,130.727039687562
+Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.8156304467532
+Kurukshetra University,"Kurukshetra University, Kurukshetra","Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.8156304467532
+Kurukshetra University,"Kurukshetra University, Kurukshetra, India","Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.8156304467532
+Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0274996,135.781545126193
+Kyoto University,"Kyoto University, Kyoto, Japan","京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0274996,135.781545126193
+Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.8536333,-117.2035286
+Kyung Hee University,"Kyung Hee University, Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.5948716,127.0530887
+Kyung Hee University,"Kyung Hee University, Seoul, South Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.5948716,127.0530887
+Kyung Hee University,"Kyung Hee University, South Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.5948716,127.0530887
+Kyung Hee University,"Kyung Hee University, Yongin, South Korea","경희대학교 국제캠퍼스, 서천동로21번길, 서천동, 기흥구, 용인시, 경기, 17108, 대한민국",37.24244405,127.080937489679
+Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.223598480987
+La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.7784754,144.298047
+La Trobe University,"La Trobe University, Australia","La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.7784754,144.298047
+Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757490881378
+Lancaster University,"Lancaster University, Lancaster, UK","Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757490881378
+Lehigh University,Lehigh University,"Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.6068028,-75.3782488
+Lehigh University,"Lehigh University, Bethlehem, PA 18015, USA","Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.6068028,-75.3782488
+Liverpool John Moores University,Liverpool John Moores University,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK",53.4050747,-2.97030028586709
+Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.5317977694291
+"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.5073219,-0.1276474
+Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.7663577,-1.2292461
+Loughborough University,"Computer Science, Loughborough University, Loughborough, UK","Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.7663577,-1.2292461
+Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.1862047410405
+Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.7039571,13.1902011
+Lund University,"Lund University, Lund, Sweden","TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.7039571,13.1902011
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India","M S Ramaiah Institute of Technology, Bangalore, Karnataka, India","M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India",13.0309553,77.5648559396817
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY,MASSACHUSETTS INSTITUTE OF TECHNOLOGY,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+METs Institute of Engineering,METs Institute of Engineering,"Dihiko Paton, Pokhara Lekhnath Metropolitan Ward No. 6, Pokhara, Pokhara Lekhnath Metropolitan, कास्की, गण्डकी अञ्चल, पश्चिमाञ्चल विकास क्षेत्र, नेपाल",28.2140454,83.9607104993073
+"MPI Informatics, Germany","MPI Informatics, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.2579566,7.04577416640431
+MULTIMEDIA UNIVERSITY,MULTIMEDIA UNIVERSITY,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.641853013536
+Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.8336712,5.71589
+Maastricht University,"Maastricht University, Maastricht, Netherlands","University College Maastricht, 4, Zwingelput, Jekerkwartier, Maastricht, Limburg, Nederland, 6211KH, Nederland",50.8444528,5.6884711
+Macau University of Science and,Macau University of Science and,"HKUST, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3358031,114.265903983304
+Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.568032061523
+Macau University of Science and Technology,"Macau University of Science and Technology, Macau","Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.568032061523
+Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.84450465,100.856208183836
+Manchester University,Manchester University,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK",53.47020165,-2.23932183309859
+Manchester University,"Manchester University, UK","Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK",53.47020165,-2.23932183309859
+Mangalore University,Mangalore University,"Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India",12.81608485,74.9244927772961
+Mangalore University,"Mangalore University, India","Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India",12.81608485,74.9244927772961
+Manonmaniam Sundaranar University,Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100444813
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, India","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100444813
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tirunelveli","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100444813
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tirunelveli, India","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100444813
+Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.03889625,-87.9315544990507
+"Massachusetts General Hospital, Boston, MA, USA","Massachusetts General Hospital, Boston, MA, USA","Mass General, 55, Fruit Street, Downtown Crossing, Beacon Hill, Boston, Suffolk County, Massachusetts, 02114, USA",42.36291795,-71.0687374226199
+Massachusetts Institute,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+Massachusetts Institute of Technology,Massachusetts Institute of Technology,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+Massachusetts Institute of Technology (MIT,Massachusetts Institute of Technology (MIT,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA","Massachusetts Institute of Technology, Cambridge, MA 02139, USA","MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3583961,-71.0956778766393
+Math Institute,Math Institute,"Fields Institute for Research in Math Science, 222, College Street, Kensington Market, Old Toronto, Toronto, Ontario, M5T 3A1, Canada",43.65879595,-79.3975504060101
+Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.5369125,9.05922532743396
+Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.2579566,7.04577416640431
+"Max Planck Institute for Informatics, Germany","Max Planck Institute for Informatics, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.2579566,7.04577416640431
+"Max Planck Institute for Informatics, Saarbrucken, Germany","Max Planck Institute for Informatics, Saarbrucken, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.2579566,7.04577416640431
+Max-Planck Institute for Informatics,Max-Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.2579566,7.04577416640431
+McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.5039761,-73.5749687
+McGill University,"McGill University, Montreal, Canada","McGill University, Avenue Docteur Penfield, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 2T8, Canada",45.50691775,-73.5791162596496
+McGovern Institute,McGovern Institute,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3626295,-71.0914481
+McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.3626295,-71.0914481
+McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.9180968401692
+Meiji University,Meiji University,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本",35.6975029,139.761391749285
+Memorial University of Newfoundland,Memorial University of Newfoundland,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.5727251,-52.7330544350478
+Memorial University of Newfoundland,"Memorial University of Newfoundland, Canada","Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.5727251,-52.7330544350478
+Memorial University of Newfoundland,"Memorial University of Newfoundland, Saint John's, NL, Canada","Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.5727251,-52.7330544350478
+Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, E. Lansing, MI 48823, USA","Dero Fixit Bike Station, Grand River Avenue, East Lansing, Ingham County, Michigan, 48824, USA",42.7337998,-84.4804243
+Michigan State University,"Michigan State University, East Lansing 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, East Lansing MI","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, East Lansing, 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, East Lansing, MI","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, East Lansing, MI 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, East Lansing, MI, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+Michigan State University,"Michigan State University, United States of America","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.718568,-84.4779157093052
+"Microsoft Res. Asia, Beijing, China","Microsoft Res. Asia, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",39.97834785,116.304119070565
+Microsoft Research,Microsoft Research,"Microsoft Research, 21, Station Road, Petersfield, Cambridge, Cambridgeshire, East of England, England, CB1 2FB, UK",52.19495145,0.135010835076038
+"Microsoft Research Asia, Beijing, China","Microsoft Research Asia, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",39.97834785,116.304119070565
+"Microsoft Research Asia, China","Microsoft Research Asia, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",39.97834785,116.304119070565
+"Microsoft Research, Beijing, China","Microsoft Research, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",39.97834785,116.304119070565
+"Microsoft, Bellevue, WA, USA","Microsoft, Bellevue, WA, USA","Microsoft, 10455, Northeast 8th Street, Bellevue, King County, Washington, 98004-5002, USA",47.6164826,-122.2008506
+"Microsoft, Redmond, WA","Microsoft, Redmond, WA","Microsoft Cafe RedW-F, Bridle Crest Trail, Microsoft Redwest Campus, Redmond, King County, Washington, W LAKE SAMMAMISH PKWY NE, USA",47.6592914,-122.140633217997
+Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.7855350558467
+Middlebury College,Middlebury College,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA",44.0090777,-73.1767946
+Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.229632209454029
+Middlesex University,"Middlesex University, London","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.229632209454029
+Middlesex University London,Middlesex University London,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.229632209454029
+Middlesex University London,"Middlesex University London, London, UK","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.229632209454029
+Middlesex University London,"Middlesex University London, UK","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.229632209454029
+Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.958674326093
+Monash University,"Monash University, Caulfield East, Australia","Monash University (Caulfield campus), Queens Avenue, Caulfield East, City of Glen Eira, Victoria, 3163, Australia",-37.8774135,145.044982494489
+Monash University,"Monash University, Victoria, Australia","Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia",-37.9011951,145.130584919767
+Monash University Malaysia,Monash University Malaysia,"Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.06405715,101.6005974
+Monash University Malaysia,"Monash University Malaysia, Bandar Sunway, Malaysia","Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.06405715,101.6005974
+"Moscow Institute of Physics and Technology, Russia","Moscow Institute of Physics and Technology, Russia","МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ",55.929035,37.5186680829482
+Muhlenberg College,Muhlenberg College,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA",40.5967637,-75.5124062
+Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.641853013536
+Multimedia University,"Multimedia University, Cyberjaya, Malaysia","Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.641853013536
+Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.2381023,127.1903431
+Nagaoka University of Technology,Nagaoka University of Technology,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本",37.42354445,138.77807276029
+Nagaoka University of Technology,"Nagaoka University of Technology, Japan","長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本",37.42354445,138.77807276029
+Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225282
+Nagoya University,"Nagoya University, Japan","SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225282
+Nanjing Normal University,Nanjing Normal University,"南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1066811,118.90863080932
+Nanjing Normal University,"Nanjing Normal University, China","南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1066811,118.90863080932
+Nanjing Normal University,"Nanjing Normal University, Nanjing, China","南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1066811,118.90863080932
+Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.0565957,118.774088328078
+Nanjing University,"Nanjing University, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.0565957,118.774088328078
+Nanjing University,"Nanjing University, Nanjing 210023, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.0565957,118.774088328078
+Nanjing University,"Nanjing University, Nanjing 210093, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.0565957,118.774088328078
+Nanjing University,"Nanjing University, Nanjing 210093, P.R.China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.0565957,118.774088328078
+Nanjing University of Aeronautics and Astronautics,Nanjing University of Aeronautics and Astronautics,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0373496,118.8140686
+Nanjing University of Aeronautics and Astronautics,"Nanjing University of Aeronautics and Astronautics, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0373496,118.8140686
+Nanjing University of Aeronautics and Astronautics,"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0373496,118.8140686
+Nanjing University of Aeronautics and Astronautics,"Nanjing University of Aeronautics and Astronautics, Nanjing, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0373496,118.8140686
+Nanjing University of Information Science and Technology,Nanjing University of Information Science and Technology,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国",32.2068102,118.718472893883
+Nanjing University of Information Science and Technology,"Nanjing University of Information Science and Technology, Nanjing, China","南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国",32.2068102,118.718472893883
+Nanjing University of Science and Technology,Nanjing University of Science and Technology,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.031826,118.852142742792
+Nanjing University of Science and Technology,"Nanjing University of Science and Technology, China","南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.031826,118.852142742792
+Nanjing University of Science and Technology,"Nanjing University of Science and Technology, Nanjing, China","南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.031826,118.852142742792
+Nantong University,Nantong University,"南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国",31.9747463,120.907792637552
+Nantong University,"Nantong University, Nantong, China","南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国",31.9747463,120.907792637552
+Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+Nanyang Technological University,"Nanyang Technological University, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+Nanyang Technological University,"Nanyang Technological University, Singapore 639798","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+Nanyang Technological University,"Nanyang Technological University, Singapore 639798, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+Nanyang Technological University,"Nanyang Technological University, Singapore, 639798","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+Nanyang Technological University,"Nanyang Technological University, Singapore, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.191396961005
+National Central University,"National Central University, Taoyuan County, Taiwan","NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.191396961005
+National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.9991916,120.216251337909
+National Cheng Kung University,"National Cheng Kung University, Tainan, Taiwan","成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.9991916,120.216251337909
+National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.997244116807
+National Chiao Tung University,"National Chiao Tung University, Hsinchu, Taiwan","NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.997244116807
+National Chiao Tung University,"National Chiao Tung University, Taiwan","NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.997244116807
+National Chiao-Tung University,National Chiao-Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.997244116807
+National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.475105312324
+National Chung Cheng University,"National Chung Cheng University, Chiayi, Taiwan","國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.475105312324
+National Chung Hsing University,National Chung Hsing University,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.675711652432
+National Chung Hsing University,"National Chung Hsing University, Taichung","國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.675711652432
+National Chung Hsing University,"National Chung Hsing University, Taiwan","國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.675711652432
+National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.118523607658
+National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.1254938,-77.2229347515
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA","National Institute of Standards and Technology, Gaithersburg, MD 20899, USA","National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.1254938,-77.2229347515
+National Institute of Technology Karnataka,National Institute of Technology Karnataka,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India",13.01119095,74.7949882494716
+National Institute of Technology Rourkela,National Institute of Technology Rourkela,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.2501589,84.9066855698087
+"National Institute of Technology, Durgapur, India","National Institute of Technology, Durgapur, India","National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India",23.54869625,87.291057119111
+"National Institute of Technology, Durgapur, West Bengal, India","National Institute of Technology, Durgapur, West Bengal, India","National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India",23.54869625,87.291057119111
+"National Institute of Technology, Rourkela (Odisha), India","National Institute of Technology, Rourkela (Odisha), India","National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.2501589,84.9066855698087
+National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.1032777503325
+"National Institutes of Health, Bethesda, Maryland 20892","National Institutes of Health, Bethesda, Maryland 20892","NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.1032777503325
+National Sun Yat Sen University,National Sun Yat Sen University,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣",22.62794005,120.266318480249
+National Sun Yat Sen University,"National Sun Yat Sen University, 804 Kaohsiung, Taiwan","國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣",22.62794005,120.266318480249
+National Taichung University of science and Technology,National Taichung University of science and Technology,"臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.15031065,120.683255008879
+National Taichung University of science and Technology,"National Taichung University of science and Technology, Taichung","臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.15031065,120.683255008879
+National Taipei University,National Taipei University,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣",24.94314825,121.368629787836
+National Taipei University of Technology,National Taipei University of Technology,"NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣",25.04306355,121.534687724212
+National Taipei University of Technology,"National Taipei University of Technology, Taipei, Taiwan","NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣",25.04306355,121.534687724212
+National Taiwan Normal University,National Taiwan Normal University,"師大分部, 88, 汀州路四段, 萬年里, 文山區, 臺北市, 11677, 臺灣",25.00823205,121.535771533186
+National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.538469235773
+National Taiwan University,"National Taiwan University, 10647, Taipei, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.538469235773
+National Taiwan University,"National Taiwan University, Taipei, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.538469235773
+National Taiwan University,"National Taiwan University, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.538469235773
+National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.541737363138
+National Taiwan University of Science and Technology,"National Taiwan University of Science and Technology, Taipei 10607, Taiwan","臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.541737363138
+National Taiwan University of Science and Technology,"National Taiwan University of Science and Technology, Taipei, Taiwan","臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.541737363138
+National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.7317973260904
+National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.7925484,120.9951183
+National Tsing Hua University,"National Tsing Hua University, Hsinchu, Taiwan","國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.7925484,120.9951183
+National Tsing Hua University,"National Tsing Hua University, Taiwan","國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.7925484,120.9951183
+National University,National University,"National University, M.F. Jocson, Royal Plaza, Sampaloc, Fourth District, Manila, Metro Manila, 1008, Philippines",14.6042947,120.994285201104
+National University of Defense Technology,National University of Defense Technology,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2290209,112.994832044032
+National University of Defense Technology,"National University of Defense Technology, Changsha 410073, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2290209,112.994832044032
+National University of Defense Technology,"National University of Defense Technology, Changsha, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2290209,112.994832044032
+National University of Defense and Technology,National University of Defense and Technology,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2290209,112.994832044032
+National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829960688327
+National University of Ireland Galway,"National University of Ireland Galway, Galway, Ireland","National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829960688327
+National University of Ireland Maynooth,National University of Ireland Maynooth,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland",53.3846975,-6.60039458177959
+National University of Ireland Maynooth,"National University of Ireland Maynooth, Co. Kildare, Ireland","National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland",53.3846975,-6.60039458177959
+National University of Kaohsiung,National University of Kaohsiung,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.73424255,120.283497550993
+National University of Kaohsiung,"National University of Kaohsiung, 811 Kaohsiung, Taiwan","國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.73424255,120.283497550993
+National University of Science and Technology,National University of Science and Technology,"National University of Science and Technology, Indus Loop, H-11, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.6450855,72.9915892221655
+National University of Sciences and Technology (NUST),National University of Sciences and Technology (NUST),"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.644347,72.9885079
+National University of Sciences and Technology (NUST),"National University of Sciences and Technology (NUST), Islamabad, Pakistan","National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.644347,72.9885079
+National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+National University of Singapore,"National University of Singapore, Singapore","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+National University of Singapore,"National University of Singapore, Singapore 117576","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+National University of Singapore,"National University of Singapore, Singapore, Singapore","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+National University of Technology Technology,National University of Technology Technology,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق",33.3120263,44.4471829434368
+National University of singapore,National University of singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+"Naval Research Laboratory, Washington DC","Naval Research Laboratory, Washington DC","Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.8231381,-77.0178902
+Nazarbayev University,Nazarbayev University,"Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.0902854,71.3972526281434
+Nazarbayev University,"Nazarbayev University, Astana, Kazakhstan","Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.0902854,71.3972526281434
+"Neurological Institute, USA","Neurological Institute, USA","Neurological Institute of New York, Haven Avenue, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10032, USA",40.84211085,-73.9428460313244
+New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+"New Jersey Institute of Technology, Newark , NJ, USA","New Jersey Institute of Technology, Newark , NJ, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+"New Jersey Institute of Technology, Newark, USA","New Jersey Institute of Technology, Newark, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+"New Jersey Institute of Technology, USA","New Jersey Institute of Technology, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.9962539360963
+Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627035949
+Newcastle University,"Newcastle University, Newcastle upon Tyne","Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627035949
+Normal University,Normal University,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.0580509,102.6955241
+Normal University,"Normal University, Kunming, China","云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.0580509,102.6955241
+"North Acton, London","North Acton, London","North Acton, Victoria Road, Acton, London Borough of Ealing, London, Greater London, England, W3 6UP, UK",51.52344665,-0.259735350000002
+North Carolina Central University,North Carolina Central University,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA",35.97320905,-78.897550537484
+North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.6740869545263
+North Carolina State University,"North Carolina State University, Raleigh, United States of America","North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.6740869545263
+North China Electric Power University,North China Electric Power University,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.8760446,115.4973873
+North China Electric Power University,"North China Electric Power University, Baoding, China","华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.8760446,115.4973873
+North Dakota State University,North Dakota State University,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.897155,-96.8182760282419
+North Dakota State University,"North Dakota State University, Fargo, ND 58108-6050, USA","North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.897155,-96.8182760282419
+Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3383668,-71.0879352428284
+Northeastern University,"Northeastern University, Boston, MA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3383668,-71.0879352428284
+Northeastern University,"Northeastern University, Boston, MA, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3383668,-71.0879352428284
+Northeastern University,"Northeastern University, Boston, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3383668,-71.0879352428284
+Northeastern University,"Northeastern University, Boston, USA, 02115","Northeastern University, Public Alley 807, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.34255795,-71.0905490240477
+Northeastern University,"Northeastern University, MA, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3383668,-71.0879352428284
+Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0030632,-1.57463231052026
+Northumbria University,"Northumbria University, Newcastle Upon Tyne, Tyne and Wear","Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0030632,-1.57463231052026
+Northumbria University,"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK","Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK",54.9781026,-1.6067699
+Northumbria University,"Northumbria University, Newcastle upon Tyne, U.K.","Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0030632,-1.57463231052026
+Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2469152,108.910619816771
+Northwestern Polytechnical University,"Northwestern Polytechnical University, Xian 710072, Shaanxi, China","西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2469152,108.910619816771
+Northwestern Polytechnical University,"Northwestern Polytechnical University, Xi’an, China","西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2469152,108.910619816771
+Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.0551164,-87.6758111348217
+Northwestern University,"Northwestern University, Evanston, IL, USA","Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.0551164,-87.6758111348217
+Nottingham Trent University,Nottingham Trent University,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",52.9577322,-1.15617099267709
+Nottingham Trent University,"Nottingham Trent University, Nottingham, UK","Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",52.9577322,-1.15617099267709
+Nottingham University Hospital,Nottingham University Hospital,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK",52.9434967,-1.18631123153121
+Nottingham University Hospital,"Nottingham University Hospital, Nottingham, UK","Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK",52.9434967,-1.18631123153121
+OF PRINCETON UNIVERSITY,OF PRINCETON UNIVERSITY,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325
+OF STANFORD UNIVERSITY,OF STANFORD UNIVERSITY,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+"Oak Ridge National Laboratory, USA","Oak Ridge National Laboratory, USA","Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.3124003215133
+Oakland University,Oakland University,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA",42.66663325,-83.2065575175658
+Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.493552763931
+Ocean University of China,"Ocean University of China, Qingdao, China","中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.493552763931
+Okayama University,Okayama University,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.6893393,133.9222272
+Okayama University,"Okayama University, Okayama, Japan","岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.6893393,133.9222272
+Oklahoma State University,Oklahoma State University,"Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.1244756,-97.050043825
+Oklahoma State University,"Oklahoma State University, Stillwater, OK, USA","Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.1244756,-97.050043825
+Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.885682,-76.3076857937011
+Old Dominion University,"Old Dominion University, Norfolk, VA 23529, USA","Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.885682,-76.3076857937011
+Old Dominion University,"Old Dominion University, Norfolk, VA, 23529","Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.885682,-76.3076857937011
+Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.9956567288188
+"Orange Labs, R&D, Meylan, France","Orange Labs, R&D, Meylan, France","Orange Labs, 28, Chemin du Vieux Chêne, Inovallée Meylan, Le Mas du Bruchet, Meylan, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38240, France",45.21011775,5.79551075456301
+Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.5198289,-122.677979643331
+Osaka university,Osaka university,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218408
+Osaka university,"Osaka university, Japan","大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218408
+Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.6447124822347
+Otto-von-Guericke University Magdeburg,Otto-von-Guericke University Magdeburg,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.6447124822347
+Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.7555205,-1.2261597
+Oxford Brookes University,"Oxford Brookes University, Oxford, United Kingdom","Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.7555205,-1.2261597
+Oxford University,Oxford University,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK",51.7520849,-1.25166460220888
+Oxford University,"Oxford University, UK","James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK",51.7488051,-1.23874457456279
+"PA, 15213, USA","PA, 15213, USA","Pa, North Monmouth, Kennebec County, Maine, 04265, USA",44.289627,-70.042577
+"POSTECH, Pohang, South Korea, 37673","POSTECH, Pohang, South Korea, 37673","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+"PSG College of Technology, Coimbatore, Tamil Nadu, India","PSG College of Technology, Coimbatore, Tamil Nadu, India","PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India",11.0246833,77.0028424564731
+Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.9922379,116.303938156219
+Peking University,"Peking University, Beijing","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.9922379,116.303938156219
+Peking University,"Peking University, Beijing 100871, China","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.9922379,116.303938156219
+Peking University,"Peking University, Beijing, China","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.9922379,116.303938156219
+Pennsylvania,Pennsylvania,"Pennsylvania, USA",40.9699889,-77.7278831
+"Perth, Western Australia 6012","Perth, Western Australia 6012","Perth, Western Australia, 6000, Australia",-31.9527121,115.8604796
+Philipps-Universität Marburg,Philipps-Universität Marburg,"FB 09 | Germanistik und Kunstwissenschaften (Dekanat), 3, Deutschhausstraße, Biegenhausen, Biegenviertel, Marburg, Landkreis Marburg-Biedenkopf, Regierungsbezirk Gießen, Hessen, 35037, Deutschland",50.8142701,8.771435
+Philipps-Universität Marburg,"Philipps-Universität Marburg, D-35032, Germany","FB 09 | Germanistik und Kunstwissenschaften (Dekanat), 3, Deutschhausstraße, Biegenhausen, Biegenviertel, Marburg, Landkreis Marburg-Biedenkopf, Regierungsbezirk Gießen, Hessen, 35037, Deutschland",50.8142701,8.771435
+"Pittsburgh Univ., PA, USA","Pittsburgh Univ., PA, USA","WQEX-TV (Pittsburgh);WQED-TV (Pittsburgh);WQED-FM (Pittsburgh);WINP-TV (Pittsburgh);WEPA-CD (Pittsburgh), 3801, University Drive, North Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4462779,-79.9637743112056
+Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.3755269,-4.13937687442817
+Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+Pohang University of Science and Technology,"Pohang University of Science and Technology, Pohang, Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+Pohang University of Science and Technology (POSTECH),Pohang University of Science and Technology (POSTECH),"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+Pohang University of Science and Technology (POSTECH),"Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+Pohang University of Science and Technology (POSTECH),"Pohang University of Science and Technology (POSTECH), South Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.321075092352
+"Politecnico di Torino, Italy","Politecnico di Torino, Italy","Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia",45.0636974,7.65752730185847
+"Politecnico di Torino, Torino, Italy","Politecnico di Torino, Torino, Italy","Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia",45.0636974,7.65752730185847
+Politehnica University of Timisoara,Politehnica University of Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.746189,21.2275507517647
+Pondicherry Engineering College,Pondicherry Engineering College,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India",12.0148693,79.8480910431981
+Pontificia Universidad Catolica de Chile,Pontificia Universidad Catolica de Chile,"Pontificia Universidad Católica de Chile - Campus Lo Contador, 1916, El Comendador, Pedro de Valdivia Norte, Providencia, Provincia de Santiago, Región Metropolitana de Santiago, 7500000, Chile",-33.41916095,-70.6178224038096
+Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.684929993829
+Portland State University,"Portland State University, USA","Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.684929993829
+Poznan University of Technology,Poznan University of Technology,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP",52.4004837,16.9515808278647
+Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325
+Princeton University,"Princeton University, Princeton, NJ, USA","Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA",40.34725815,-74.6513455119257
+Princeton University,"Princeton University, Princeton, New Jersey, USA","Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325
+"Pune Institute of Computer Technology, Pune, ( India","Pune Institute of Computer Technology, Pune, ( India","Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India",18.4575638,73.8507352
+Punjabi University Patiala,Punjabi University Patiala,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India",30.3568981,76.4551272
+Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4319722,-86.923893679845
+Purdue University,"Purdue University, West Lafayette, IN 47907, USA","Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4262569,-86.9157551
+Purdue University,"Purdue University, West Lafayette, IN, USA","Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4319722,-86.923893679845
+Purdue University,"Purdue University, West Lafayette, IN. 47907, USA","Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4262569,-86.9157551
+Purdue University,"Purdue University, West Lafayette, Indiana, 47906, USA","Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4319722,-86.923893679845
+Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.4898035392337
+Qatar University,"Qatar University, Doha, Qatar","Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.4898035392337
+Qatar University,"Qatar University, Qatar","Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.4898035392337
+Quanzhou Normal University,Quanzhou Normal University,"泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国",24.87147415,118.667386868962
+Quanzhou Normal University,"Quanzhou Normal University, Quanzhou, China","泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国",24.87147415,118.667386868962
+Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.0570222,21.922709
+Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5247272,-0.0393103466301624
+Queen Mary University of London,"Queen Mary University of London, London","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5247272,-0.0393103466301624
+Queen Mary University of London,"Queen Mary University of London, London E1 4NS, UK","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5247272,-0.0393103466301624
+Queen Mary University of London,"Queen Mary University of London, London, U.K.","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5247272,-0.0393103466301624
+Queen Mary University of London,"Queen Mary University of London, UK","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5247272,-0.0393103466301624
+Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.028410039129
+Queensland University of Technology,"Queensland University of Technology, Australia","Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.028410039129
+Queensland University of Technology,"Queensland University of Technology, Brisbane, QLD, Australia","Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.028410039129
+Queensland University of Technology (QUT,Queensland University of Technology (QUT,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.4770485,153.028373791304
+Queensland University of Technology(QUT,Queensland University of Technology(QUT,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.4770485,153.028373791304
+"R V College of Engineering, Bangalore, India","R V College of Engineering, Bangalore, India","R. V. College of Engineering, Bangalore-Mysore Road, Kengeri, Rajarajeshwari Nagar Zone, Bengaluru, Bangalore Urban, Karnataka, 560059, India",12.9231039,77.5006395299617
+RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8087465,144.9638875
+RMIT University,"RMIT University, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8087465,144.9638875
+RMIT University,"RMIT University, Melbourne, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8087465,144.9638875
+RMIT University,"RMIT University, Melbourne, VIC, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8087465,144.9638875
+RMIT University,"RMIT University, Vietnam","RMIT University Vietnam - Saigon South Campus, 702, Nguyễn Văn Linh, Khu 3 - Khu Đại học, Phường Tân Phong, Quận 7, Tp HCM, 756604, Việt Nam",10.72991265,106.693208239997
+RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.7791703,6.06728732851292
+RWTH Aachen University,"RWTH Aachen University, Aachen, Germany","RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.7791703,6.06728732851292
+Raipur institute of technology,Raipur institute of technology,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India",21.2262243,81.8013664
+"Rajasthan, India","Rajasthan, India","Rajasthan, India",26.8105777,73.7684549
+Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.7298459,-73.6795021620135
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA","Rensselaer Polytechnic Institute, Troy, NY 12180, USA","Rensselaer Polytechnic Institute, Tibbits Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.73280325,-73.6622354488153
+"Rensselaer Polytechnic Institute, USA","Rensselaer Polytechnic Institute, USA","Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.7298459,-73.6795021620135
+Research Center,Research Center,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية",24.7261991,46.6365468966391
+Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682403998887
+"RheinAhrCampus der Hochschule Koblenz, Remagen, Germany","RheinAhrCampus der Hochschule Koblenz, Remagen, Germany","RheinAhrCampus, 2, Joseph-Rovan-Allee, Remagen, Landkreis Ahrweiler, Rheinland-Pfalz, 53424, Deutschland",50.5722562,7.25318610053143
+Rheinische-Friedrich-Wilhelms University,Rheinische-Friedrich-Wilhelms University,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7338124,7.1022465
+Rheinische-Friedrich-Wilhelms University,"Rheinische-Friedrich-Wilhelms University, Bonn, Germany","Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7338124,7.1022465
+Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.4047811339379
+Rice University,"Rice University, Houston, TX, 77005, USA","Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.4047811339379
+Rio de Janeiro State University,Rio de Janeiro State University,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil",-22.91117105,-43.2357797110467
+Rio de Janeiro State University,"Rio de Janeiro State University, Brazil","UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil",-22.91117105,-43.2357797110467
+Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0333281,135.7249154
+Ritsumeikan University,"Ritsumeikan University, Japan","立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0333281,135.7249154
+Ritsumeikan University,"Ritsumeikan University, Kyoto, Japan","立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0333281,135.7249154
+Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.494231705059
+Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.6712166264273
+Rowan University,Rowan University,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA",39.7103526,-75.1193266647699
+Rowan University,"Rowan University, Glassboro, NJ- 08028","Wellness Center (Winans Hall), Mullica Hill Road, Beau Rivage, Glassboro, Gloucester County, New Jersey, 08028:08062, USA",39.7082432,-75.1170342529732
+Rowland Institute,Rowland Institute,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA",42.3639862,-71.0778293
+Ruhr University Bochum,Ruhr University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541306078
+Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541306078
+Ruhr-University Bochum,"Ruhr-University Bochum, Germany","RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541306078
+Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.431688684404
+Rutgers University,"Rutgers University, New Brunswick, NJ","Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA",40.50007595,-74.4457915242934
+Rutgers University,"Rutgers University, Newark, NJ, USA","Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA",40.7417586,-74.1750462269524
+Rutgers University,"Rutgers University, Piscataway","James Dickson Carr Library, 75, Avenue E, Piscataway Township, Middlesex County, New Jersey, 08854-8040, USA",40.52251655,-74.4373851411688
+Rutgers University,"Rutgers University, Piscataway NJ 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5234675,-74.436975
+Rutgers University,"Rutgers University, Piscataway, NJ","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5234675,-74.436975
+Rutgers University,"Rutgers University, Piscataway, NJ 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5234675,-74.436975
+Rutgers University,"Rutgers University, Piscataway, NJ, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5234675,-74.436975
+Rutgers University,"Rutgers University, Piscataway, New Jersey 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5234675,-74.436975
+Rutgers University,"Rutgers University, USA","Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.431688684404
+Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.3790801045263
+Ryerson University,"Ryerson University, Canada","Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.3790801045263
+Ryerson University,"Ryerson University, Toronto, ON, Canada","Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.3790801045263
+SASTRA University,SASTRA University,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India",10.9628655,79.3853065130097
+SASTRA University,"SASTRA University, Thanjavur, Tamil Nadu, India","SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India",10.9628655,79.3853065130097
+SIMON FRASER UNIVERSITY,SIMON FRASER UNIVERSITY,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.2767454,-122.917773749103
+"SRI International, Menlo Park, USA","SRI International, Menlo Park, USA","SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.4585796,-122.17560525105
+SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.9336278,-78.8839447903448
+Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.8927159,29.3786332263582
+Sakarya University,Sakarya University,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye",40.76433515,30.3940787517111
+San Jose State University,San Jose State University,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.3351908,-121.881260081527
+San Jose State University,"San Jose State University, San Jose, CA","SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.3351908,-121.881260081527
+Santa Clara University,Santa Clara University,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA",37.34820285,-121.935635412063
+Santa Clara University,"Santa Clara University, Santa Clara, CA. 95053, USA","Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA",37.34820285,-121.935635412063
+Santa Fe Institute,Santa Fe Institute,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA",35.7002878,-105.908648471331
+Selçuk University,Selçuk University,"Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye",38.02420685,32.5057052418378
+Selçuk University,"Selçuk University, Konya, Turkey","Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye",38.02420685,32.5057052418378
+Semarang State University,Semarang State University,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia",-7.00349485,110.417749486905
+Semnan University,Semnan University,"دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.6037444,53.434458770112
+Semnan University,"Semnan University, Semnan, Iran","دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.6037444,53.434458770112
+Seoul Nat'l Univ.,Seoul Nat'l Univ.,"서울대입구, 지하 1822, 남부순환로, 중앙동, 봉천동, 관악구, 서울특별시, 08787, 대한민국",37.481223,126.9527151
+Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728,126.9841151
+Seoul National University,"Seoul National University, Korea","서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728,126.9841151
+Seoul National University,"Seoul National University, Seoul, Korea","서울대학교, 1, 관악로, 서림동, 신림동, 관악구, 서울특별시, 08825, 대한민국",37.46685,126.94892
+Shaheed Zulfikar Ali Bhutto Institute of,Shaheed Zulfikar Ali Bhutto Institute of,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎",24.8186587,67.0316585
+Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.3693473,120.673818
+Shandong University,"Shandong University, Shandong, China","山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.3693473,120.673818
+Shandong University of Science and Technology,Shandong University of Science and Technology,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国",36.00146435,120.116240565627
+"Shanghai Institute of Technology, Shanghai, China","Shanghai Institute of Technology, Shanghai, China","上海应用技术大学, 康健路, 长桥, 徐汇区, 上海市, 200233, 中国",31.1678395,121.417382632476
+Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.428406809373
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.428406809373
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, People's Republic of China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.428406809373
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, Shanghai 200240, China","上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国",31.02775885,121.432219256081
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, Shanghai, China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.428406809373
+Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.384009410929
+Shanghai University,"Shanghai University, Shanghai, China","上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.384009410929
+Shanghai university,Shanghai university,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.384009410929
+Sharda University,Sharda University,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India",28.4737512,77.4836148
+Sharda University,"Sharda University, Greater Noida, India","Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India",28.4737512,77.4836148
+Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.7036227,51.351250969544
+Sharif University of Technology,"Sharif University of Technology, Tehran. Iran","دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.7036227,51.351250969544
+Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.985337841399
+Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.931591101679
+Shenzhen University,"Shenzhen University, Shenzhen China","深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.931591101679
+Shenzhen University,"Shenzhen University, Shenzhen, China","深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.931591101679
+"Shibaura Institute of Technology, Tokyo, Japan","Shibaura Institute of Technology, Tokyo, Japan","芝浦工業大学 豊洲キャンパス, 晴海通り, 豊洲2, 豊洲, 富岡一丁目, 江東区, 東京都, 関東地方, 135-6001, 日本",35.66053325,139.795031213151
+Shiraz University,Shiraz University,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎",29.6385474,52.5245706
+"Sichuan Univ., Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.642769,104.067511751425
+Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.2767454,-122.917773749103
+Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.849092139632
+Singapore Management University,"Singapore Management University, Singapore","Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.849092139632
+Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.340216,103.965089
+Singapore University of Technology and Design,"Singapore University of Technology and Design, Singapore","Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.340216,103.965089
+Sinhgad College of,Sinhgad College of,"SINHGAD, NH61, Foi, Ahmadnagar, Ahmednagar, Maharashtra, 414001, India",19.0993293,74.7691424
+Soochow University,Soochow University,"苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.3070951,120.635739868117
+Soochow University,"Soochow University, Suzhou, China","苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.3070951,120.635739868117
+South China Normal University,South China Normal University,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.143197,113.34009651145
+South China Normal University,"South China Normal University, Guangzhou, China","华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.143197,113.34009651145
+South China University of China,South China University of China,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0490047,113.3971571
+South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0502042,113.398803226836
+South China University of Technology,"South China University of Technology, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0502042,113.398803226836
+South China University of Technology,"South China University of Technology, Guangzhou, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0502042,113.398803226836
+South China University of Technology,"South China University of Technology, Guangzhou, Guangdong, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0502042,113.398803226836
+South College Road,South College Road,"South College Road, Beechfield, Baltimore, Maryland, 21229, USA",39.2715228,-76.6936807
+South East European University,South East European University,"Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија",41.98676415,20.9625451620439
+South East European University,"South East European University, Tetovo, Macedonia","Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија",41.98676415,20.9625451620439
+Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.0575279,118.786822520439
+Southeast University,"Southeast University, Nanjing, China","SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.0575279,118.786822520439
+Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.697847,104.0520811
+Southwest Jiaotong University,"Southwest Jiaotong University, Chengdu, China","西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.697847,104.0520811
+Southwest Jiaotong University,"Southwest Jiaotong University, Chengdu, P.R. China","西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.697847,104.0520811
+Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.420500156445
+Southwest University,"Southwest University, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.420500156445
+Southwest University,"Southwest University, Chongqing 400715, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.420500156445
+Southwest University,"Southwest University, Chongqing, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.420500156445
+"Sri krishna College of Technology, Coimbatore, India","Sri krishna College of Technology, Coimbatore, India","Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India",10.925861,76.9224672855261
+Stamford University Bangladesh,Stamford University Bangladesh,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.7448166,90.4084351355108
+Stamford University Bangladesh,"Stamford University Bangladesh, Dhaka-1209, Bangladesh","Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.7448166,90.4084351355108
+Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+Stanford University,"Stanford University, CA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+Stanford University,"Stanford University, CA, United States","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+Stanford University,"Stanford University, Stanford, CA, USA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+Stanford University,"Stanford University, Stanford, California","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+Stanford University,"Stanford University, USA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.169365354983
+State University of New York Polytechnic Institute,State University of New York Polytechnic Institute,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA",43.13800205,-75.2294359077068
+State University of New York Polytechnic Institute,"State University of New York Polytechnic Institute, Utica, New York","State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA",43.13800205,-75.2294359077068
+State University of New York at Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.9706606561486
+State University of New York at Binghamton,"State University of New York at Binghamton, USA","State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.9706606561486
+State University of New York at Buffalo,State University of New York at Buffalo,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",42.95485245,-78.8178238693065
+Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.742252,-74.0270949
+"Stevens Institute of Technology, Hoboken, New Jersey, 07030","Stevens Institute of Technology, Hoboken, New Jersey, 07030","Stevens Institute of Technology, Hudson Street, Hoboken, Hudson County, New Jersey, 07030, USA",40.7451724,-74.027314
+Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University,"Stony Brook University, NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University,"Stony Brook University, NY, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University,"Stony Brook University, Stony Brook NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University,"Stony Brook University, Stony Brook, NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University,"Stony Brook University, Stony Brook, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9153196,-73.1270626
+Stony Brook University Hospital,Stony Brook University Hospital,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.90826665,-73.1152089127966
+Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-Sen University,"Sun Yat-Sen University, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-Sen University,"Sun Yat-Sen University, GuangZhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-Sen University,"Sun Yat-Sen University, Guangzhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-Sen University,"Sun Yat-Sen University, Guangzhou, P.R. China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-sen University,Sun Yat-sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-sen University,"Sun Yat-sen University, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+Sun Yat-sen University,"Sun Yat-sen University, Guangzhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.287889943975
+SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3003127,126.972123
+Sungkyunkwan University,Sungkyunkwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3003127,126.972123
+Sungkyunkwan University,"Sungkyunkwan University, Suwon, Republic of Korea","성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3003127,126.972123
+Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.6091578,-3.97934429228629
+Swansea University,"Swansea University, Swansea, UK","Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.6091578,-3.97934429228629
+Swiss Federal Institute of Technology,Swiss Federal Institute of Technology,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.3764534,8.54770931489751
+THE UNIVERSITY OF ARIZONA,THE UNIVERSITY OF ARIZONA,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.2351726,-110.950958317648
+THE UNIVERSITY OF CHICAGO,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.6007493265106
+"TU Darmstadt, D-64283, Germany","TU Darmstadt, D-64283, Germany","Institut für Psychologie, 10, Alexanderstraße, Darmstadt-Mitte, Darmstadt, Regierungsbezirk Darmstadt, Hessen, 64283, Deutschland",49.8754648,8.6594332
+Tafresh University,Tafresh University,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.68092465,50.0534135183902
+Tafresh University,"Tafresh University, Tafresh, Iran","دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.68092465,50.0534135183902
+Tamkang University,Tamkang University,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.17500615,121.450767514156
+Tamkang University,"Tamkang University, Taipei, Taiwan","淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.17500615,121.450767514156
+Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.8587746189096
+Tampere University of Technology,"Tampere University of Technology, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.8587746189096
+Tampere University of Technology,"Tampere University of Technology, Tampere 33720, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.8587746189096
+Tampere University of Technology,"Tampere University of Technology, Tampere, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.8587746189096
+Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.5677531417838
+Technical University Munich,"Technical University Munich, Germany","TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.5677531417838
+"Technicolor, France","Technicolor, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.831533,2.28066282926829
+"Technicolor, Paris, France","Technicolor, Paris, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.831533,2.28066282926829
+Technion Israel Institute of Technology,Technion Israel Institute of Technology,"הטכניון - מכון טכנולוגי לישראל, דוד רוז, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל",32.7767536,35.0241452903301
+Technological University,Technological University,"UBDT College of Engineering, College Private Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India",14.4525199,75.9179512
+Technological University,"Technological University, Davanagere, Karnataka, India","UBDT College of Engineering, College Private Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India",14.4525199,75.9179512
+Teesside University,Teesside University,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.5703695,-1.23509661862823
+Teesside University,"Teesside University, Middlesbrough, UK","Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.5703695,-1.23509661862823
+Teesside University,"Teesside University, UK","Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.5703695,-1.23509661862823
+Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1119889,34.8045970204252
+Tel Aviv University,"Tel Aviv University, Israel","אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1119889,34.8045970204252
+Tel-Aviv University,Tel-Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1119889,34.8045970204252
+Tel-Aviv University,"Tel-Aviv University, Israel","אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1119889,34.8045970204252
+Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.1534690525548
+Temple University,"Temple University, Philadelphia, PA 19122, USA","Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA",39.9808569,-75.149594
+Temple University,"Temple University, Philadelphia, PA, 19122, USA","Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA",39.9808569,-75.149594
+Temple University,"Temple University, Philadelphia, PA, USA","Temple University, Beasley's Walk, Stanton, Philadelphia, Philadelphia County, Pennsylvania, 19132:19133, USA",39.981188,-75.1562826952332
+Temple University,"Temple University, Philadelphia, USA","Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.1534690525548
+Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.6108365,-96.3521280026443
+Texas A&M University,"Texas A&M University, College Station, TX, USA","Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.6108365,-96.3521280026443
+Thapar University,Thapar University,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India",30.35566105,76.3658164148513
+The American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.2366413899265
+The American University in Cairo,"The American University in Cairo, Egypt","الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.2366413899265
+The Australian National University,The Australian National University,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.81354365,144.971791681654
+The Australian National University,"The Australian National University, Canberra, ACT, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+The Australian National University,"The Australian National University, Canberra, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+The Australian National University Canberra ACT 2601,The Australian National University Canberra ACT 2601,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+The Australian National University Canberra ACT 2601,"The Australian National University Canberra ACT 2601, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331324
+The Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.207886442805
+The Chinese University of Hong Kong,"The Chinese University of Hong Kong, China","中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.207886442805
+The Chinese University of Hong Kong,"The Chinese University of Hong Kong, Hong Kong","中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.207886442805
+The Chinese University of Hong Kong,"The Chinese University of Hong Kong, Hong Kong, China","香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.413656,114.2099405
+The Chinese University of Hong Kong,"The Chinese University of Hong Kong, New Territories, Hong Kong","香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.413656,114.2099405
+The City College and the Graduate Center,The City College and the Graduate Center,"Graduate Center, 184, Hooper Street, Mission Bay, SF, California, 94158, USA",37.76799565,-122.400099572569
+"The City College of New York, New York, NY 10031, USA","The City College of New York, New York, NY 10031, USA","CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA",40.81819805,-73.9510089793336
+The City University of New York,The City University of New York,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA",40.8722825,-73.8948917141949
+The Education University of Hong Kong,The Education University of Hong Kong,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国",22.46935655,114.19474193618
+The Florida State University,The Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.2974786716626
+The Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.7918555,35.244723
+The Hebrew University of Jerusalem,"The Hebrew University of Jerusalem, Israel","האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.7918555,35.244723
+The Hong Kong Polytechnic University,The Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+The Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+The Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+The Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Hong Kong, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+The Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Kowloon, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+The Hong Kong University of Science and Technology,The Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3386304,114.2620337
+The Hong Kong University of Science and Technology,"The Hong Kong University of Science and Technology, Hong Kong","香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3386304,114.2620337
+The Institute of Electronics,The Institute of Electronics,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India",12.8447999,77.6632389626693
+The Nanyang Technological University,The Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+The Nanyang Technological University,"The Nanyang Technological University, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3484104,103.682979653067
+The Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.0285936787604
+The Ohio State University,"The Ohio State University, Columbus, OH, USA","The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.0285936787604
+The Ohio State University,"The Ohio State University, OH","The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.0285936787604
+The Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.709274809394501
+The Open University of Israel,The Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.9956567288188
+The Robotics Institute,The Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.494231705059
+The State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.4409980124119
+The State University of New York at Buffalo,The State University of New York at Buffalo,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",42.95485245,-78.8178238693065
+The State University of New York at Buffalo,"The State University of New York at Buffalo, New York, USA","University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",42.95485245,-78.8178238693065
+"The Univ of Hong Kong, China","The Univ of Hong Kong, China","海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2081469,114.259641148719
+The University of Adelaide,The University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+The University of Adelaide,"The University of Adelaide, Adelaide, SA, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+The University of Adelaide,"The University of Adelaide, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+The University of British Columbia,The University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.246581610019
+The University of Cambridge,The University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.143088815415187
+The University of Edinburgh,The University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534912525441
+The University of Edinburgh,"The University of Edinburgh, Edinburgh, U.K.","New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534912525441
+The University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.6572957,139.542558677257
+The University of Electro-Communications,"The University of Electro-Communications, JAPAN","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.6572957,139.542558677257
+The University of Electro-Communications,"The University of Electro-Communications, Japan","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.6572957,139.542558677257
+The University of Electro-Communications,"The University of Electro-Communications, Tokyo","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.6572957,139.542558677257
+The University of Hong Kong,The University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2081469,114.259641148719
+The University of Manchester,The University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300880782987
+The University of Maryland,The University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.2899685,-76.6219610316858
+The University of New South Wales,The University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.231240246527
+The University of New South Wales,"The University of New South Wales, Australia","UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.231240246527
+The University of Newcastle,The University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.3578899,151.37834708231
+The University of Newcastle,"The University of Newcastle, Callaghan 2308, Australia","University of Newcastle, Huxley Library, University Drive, Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia",-32.8930923,151.705656
+The University of North Carolina,The University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.0477532652511
+The University of North Carolina,"The University of North Carolina, Chapel Hill","University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.0477532652511
+The University of North Carolina at Charlotte,The University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3103441,-80.732616166699
+The University of North Carolina at Charlotte,"The University of North Carolina at Charlotte, USA","Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3103441,-80.732616166699
+The University of Nottingham,The University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9387428,-1.20029569274574
+The University of Nottingham,"The University of Nottingham, UK","University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9387428,-1.20029569274574
+The University of Queensland,The University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+The University of Queensland,"The University of Queensland, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+The University of Queensland,"The University of Queensland, Brisbane, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+The University of Queensland,"The University of Queensland, QLD 4072, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+The University of Sheffield,The University of Sheffield,"University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.3815248,-1.480681425
+The University of Sheffield,"The University of Sheffield, Sheffield, U.K.","University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.3815248,-1.480681425
+The University of Sydney,The University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.189433661925
+The University of Sydney,"School, The University of Sydney, Sydney, NSW, Australia","Royal Prince Alfred Hospital School, 57-59, Grose Street, Camperdown, Sydney, NSW, 2050, Australia",-33.8893229,151.180068
+The University of Sydney,"The University of Sydney, NSW 2006, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.189433661925
+The University of Sydney,"The University of Sydney, Sydney, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.189433661925
+The University of Tennessee,The University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.9542493,-83.9307395
+The University of Tennessee,"The University of Tennessee, Knoxville","University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.9542493,-83.9307395
+The University of Texas,The University of Texas,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3163078,-95.2536994379459
+The University of Texas at,The University of Texas at,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3163078,-95.2536994379459
+The University of Texas at Austin,The University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.284151,-97.7319559808022
+The University of Texas at Austin Austin,The University of Texas at Austin Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.284151,-97.7319559808022
+The University of Texas at Austin Austin,"The University of Texas at Austin Austin, Texas, USA","University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.284151,-97.7319559808022
+The University of Texas at Dallas,The University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.9820799,-96.7566278
+The University of Texas at Dallas,"The University of Texas at Dallas, Richardson, TX","University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.9820799,-96.7566278
+The University of Texas at San Antonio,The University of Texas at San Antonio,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.42182005,-98.5016869955163
+The University of Texas at San Antonio,"The University of Texas at San Antonio, San Antonio, TX, USA","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.42182005,-98.5016869955163
+The University of Tokushima,The University of Tokushima,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.0788068,134.558981
+The University of Tokushima,"The University of Tokushima, Japan","大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.0788068,134.558981
+The University of Tokyo,The University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9020448,139.936220089117
+The University of Tokyo,"The University of Tokyo, Japan","東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9020448,139.936220089117
+The University of Western Australia,The University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.797900374251
+The University of Western Australia,"The University of Western Australia, Crawley, WA, Australia","University of Western Australia (Crawley Campus), 35, Stirling Highway, Crawley, Perth, Western Australia, 6009, Australia",-31.98027975,115.818084637301
+The University of York,The University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+The University of York,"The University of York, Heslington, York YO10 5DD, United Kingdom","Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK",53.94830175,-1.05154975017361
+The University of York,"The University of York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+The University of York,"The University of York, United Kingdom","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+The University of the Humanities,The University of the Humanities,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс",47.9218937,106.919552402206
+The Weizmann Institute of,The Weizmann Institute of,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל",31.904187,34.807378
+The Weizmann Institute of Science,The Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9078499,34.8133409244421
+Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.058421125807
+Tianjin University,"Tianjin University, 300072, China","泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.058421125807
+Tianjin University,"Tianjin University, China","Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国",38.99224515,117.306075265115
+Tianjin University,"Tianjin University, Tianjin, China","Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国",38.99224515,117.306075265115
+Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.2530945,140.8736593
+Tohoku University,"Tohoku University, Japan","Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.2530945,140.8736593
+Tohoku University,"Tohoku University, Sendai, Japan","Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.2530945,140.8736593
+Tokyo Denki University,Tokyo Denki University,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.6572957,139.542558677257
+Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5167538,139.483422513406
+"Tokyo Institute of Technology, Japan","Tokyo Institute of Technology, Japan","東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5167538,139.483422513406
+"Tokyo Institute of Technology, Kanagawa, Japan","Tokyo Institute of Technology, Kanagawa, Japan","東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5167538,139.483422513406
+Tokyo Metropolitan University,Tokyo Metropolitan University,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本",35.6200925,139.38296706394
+Tomsk Polytechnic University,Tomsk Polytechnic University,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ",56.46255985,84.955654946724
+Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.496949085887
+Tongji University,"Tongji University, Shanghai 201804, China","同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.496949085887
+Tongji University,"Tongji University, Shanghai, China","同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.496949085887
+Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.116551067984
+"Toyota Technological Institute (Chicago, US","Toyota Technological Institute (Chicago, US","Toyota Technological Institute, 6045, South Kenwood Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.7847112,-87.5926056707507
+Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, 100084 Beijing, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing 100084, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing 100084, P.R. China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing 100084, P.R.China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing, 100084, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing, P. R. China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, Beijing,China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+Tsinghua University,"Tsinghua University, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.320989081778
+"UC Merced, USA","UC Merced, USA","UC Merced Venture Lab, 1735, M Street, Merced, Merced County, California, 95340, USA",37.302827,-120.484819845561
+UNIVERSITY IN PRAGUE,UNIVERSITY IN PRAGUE,"Business Institut EDU, Kodaňská, Vršovice, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 10100, Česko",50.0714761,14.4542642
+UNIVERSITY OF CALIFORNIA,UNIVERSITY OF CALIFORNIA,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.87631055,-122.238859269443
+UNIVERSITY OF CALIFORNIA,"UNIVERSITY OF CALIFORNIA, BERKELEY","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.8687126,-122.255868148743
+UNIVERSITY OF CALIFORNIA,"UNIVERSITY OF CALIFORNIA, SAN DIEGO","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+UNIVERSITY OF OULU,UNIVERSITY OF OULU,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.0592157,25.466326012507
+UNIVERSITY OF TAMPERE,UNIVERSITY OF TAMPERE,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.7792067776763
+UNIVERSITY OF TARTU,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.7207808104523
+UNIVERSITY OF WISCONSIN MADISON,UNIVERSITY OF WISCONSIN MADISON,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.4306642542901
+Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.0101011516362
+Ulm University,"Ulm University, Germany","HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.0101011516362
+Universidad Autonoma de Madrid,Universidad Autonoma de Madrid,"Facultad de Medicina de la Universidad Autónoma de Madrid, Calle de Arturo Duperier, Fuencarral, Fuencarral-El Pardo, Madrid, Área metropolitana de Madrid y Corredor del Henares, Comunidad de Madrid, 28001, España",40.48256135,-3.69060789542556
+"Universidad Tecnica Federico Santa Maria, Valparaiso, Chile","Universidad Tecnica Federico Santa Maria, Valparaiso, Chile","Universidad Técnica Federico Santa María, Condominio Esmeralda, Valparaíso, Provincia de Valparaíso, V Región de Valparaíso, 2390382, Chile",-33.0362526,-71.595382
+Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.5007811,2.11143663166357
+Universitat Autònoma de Barcelona,"Centre de Visió per Computador, Universitat Autònoma de Barcelona, Barcelona, Spain","Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.5007811,2.11143663166357
+Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.1945341
+Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, Barcelona, Spain","Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.1945341
+Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949251166
+Universitat Pompeu Fabra,"Universitat Pompeu Fabra, Barcelona, Spain","Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949251166
+Universitat de València,Universitat de València,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.47787665,-0.342577110177694
+Universitat de València,"Universitat de València, Valencia, Spain","Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.47787665,-0.342577110177694
+Universiti Teknologi PETRONAS,Universiti Teknologi PETRONAS,"UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia",4.3830464,100.970015404936
+Universiti Teknologi PETRONAS,"Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia","UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia",4.3830464,100.970015404936
+University,University,"Ritsumeikan House, Lower Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26007165,-123.253442836235
+University,"Ritsumeikan, University","Ritsumeikan House, Lower Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26007165,-123.253442836235
+University,"University, China","大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4133862,114.210058
+University,"University, Guangzhou, China","中山大学第一课室, 74号大院, 中山二路, 马棚岗, 农林街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.1314851,113.2852239
+University,"University, Hong Kong","Hong-Kong, Feldstraße, Greifswald, Südliche Mühlenvorstadt, Greifswald, Vorpommern-Greifswald, Mecklenburg-Vorpommern, 17489, Deutschland",54.0856448,13.389089
+University,"University, Singapore","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.2962018,103.776899437848
+University,"University, USA","University, South Dixie Highway, Coral Gables, Miami-Dade County, Florida, 33124-6310, USA",25.7147949,-80.276947
+University,"University, Xi an Shaanxi Province, Xi an 710049, China","西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国",34.2707834,108.94449949951
+University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.0504456538413
+University (ITU,University (ITU,"IT-Universitetet i København, Emil Holms Kanal, Christianshavn, København, Københavns Kommune, Region Hovedstaden, 1424, Danmark",55.65965525,12.5910768893446
+University City Blvd.,University City Blvd.,"University City Boulevard, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.312224,-80.7084736
+University City Blvd.,"University City Blvd., Charlotte, NC","University City Boulevard, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.312224,-80.7084736
+University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5231607,-0.1282037
+University College London,"University College London, London WC1N 3BG, United Kingdom","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5231607,-0.1282037
+University College London,"University College London, London, UK","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5231607,-0.1282037
+University College London,"University College London, UK","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5231607,-0.1282037
+University Drive,University Drive,"University Drive, Ooralea, Mackay, QLD, 4740, Australia",-21.1753214,149.1432747
+University Drive,"University Drive, Fairfax, VA 22030-4444, USA","University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA",38.835411,-77.316447
+University Heights,University Heights,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+University Heights,"New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+University Heights Newark,University Heights Newark,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+University Heights Newark,"New Jersey Institute of Technology, University Heights Newark, NJ 07102 USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7423025,-74.1792817237128
+University Institute of Engineering and Technology,University Institute of Engineering and Technology,"Maharishi University Of Information Technology, NH230, Jankipuram, Lucknow, Uttar Pradesh, 226021, India",26.9302879,80.9278433
+University Library,University Library,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore",1.30604775,103.7728987705
+University Library,"University Library, Singapore","University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore",1.30604775,103.7728987705
+University Of California San Diego,University Of California San Diego,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University Of Maryland,University Of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.2899685,-76.6219610316858
+University POLITEHNICA Timisoara,University POLITEHNICA Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.746189,21.2275507517647
+University POLITEHNICA Timisoara,"University POLITEHNICA Timisoara, Timisoara, 300223, Romania","UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.746189,21.2275507517647
+University POLITEHNICA of Bucharest,University POLITEHNICA of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.0504456538413
+University POLITEHNICA of Bucharest,"University POLITEHNICA of Bucharest, Bucharest, Romania","Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.0504456538413
+University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.0504456538413
+University Politehnica of Bucharest,"University Politehnica of Bucharest, Romania","Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.0504456538413
+University of,University of,"University of ..., University Road, بہاولپور, Bahāwalpur District, پنجاب, 63100, ‏پاکستان‎",29.3758342,71.7528712910287
+University of,"Electrical Engineering, University of","Electrical Engineering, 185, Loading Dock, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA",47.6532412,-122.3061707
+University of,"Statistics, University of","Department Of Statistics, University Road, Satellite Town, Cantonment, سرگودھا, Sargodha District, پنجاب, 40100, ‏پاکستان‎",32.0731522,72.6814703364947
+University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.1646143,-2.10186013407315
+University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447511707098
+University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+University of Adelaide,"University of Adelaide, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+University of Adelaide,"University of Adelaide, SA, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9189226,138.604236675404
+University of Agder,University of Agder,"UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge",58.16308805,8.00144965545071
+University of Agder,"University of Agder, Kristiansand, Norway","UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge",58.16308805,8.00144965545071
+University of Aizu,University of Aizu,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.5236728,139.938072464124
+University of Aizu,"University of Aizu, Japan","会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.5236728,139.938072464124
+University of Akron,University of Akron,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.0789035,-81.5197127229943
+University of Akron,"University of Akron, Akron","University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.0789035,-81.5197127229943
+University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.5238572,-113.522826652346
+University of Alberta,"University of Alberta, Edmonton, Canada","University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.5238572,-113.522826652346
+University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Amsterdam,"Science, University of Amsterdam","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Amsterdam,"University of Amsterdam, Amsterdam, The","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Amsterdam,"University of Amsterdam, Amsterdam, The Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Amsterdam,"University of Amsterdam, The Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Amsterdam,"University of Amsterdam, the Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.3553655,4.9501644
+University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.2351726,-110.950958317648
+University of Arkansas at Little Rock,University of Arkansas at Little Rock,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA",34.72236805,-92.3383025526859
+University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.3868913,2.16352384576632
+University of Barcelona,"University of Barcelona, Spain","Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.3868913,2.16352384576632
+University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.5612651,7.5752961
+University of Basel,"University of Basel, Switzerland","Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.5612651,7.5752961
+University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.3791442,-2.3252332
+University of Bath,"University of Bath, Bath, Somerset, United Kingdom","University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.3791442,-2.3252332
+University of Bath,"University of Bath, Bath, United Kingdom","University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.3791442,-2.3252332
+University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134052244
+University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7338124,7.1022465
+University of Bonn,"University of Bonn, Germany","Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7338124,7.1022465
+University of Brescia,University of Brescia,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA",37.7689374,-87.1113859
+University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.1664858,-73.1920564
+University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.4584837,-2.60977519828372
+University of Bristol,"University of Bristol, Bristol, BS8 1UB, UK","University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK",51.4562363,-2.602779
+University of Bristol,"University of Bristol, Bristol, UK","Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.4584837,-2.60977519828372
+University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.246581610019
+University of British Columbia,"University of British Columbia, Canada","University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.246581610019
+University of British Columbia,"University of British Columbia, Vancouver, Canada","University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.246581610019
+University of Buffalo,University of Buffalo,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA",40.7021766,-99.0985061173294
+University of Caen,University of Caen,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0274996,135.781545126193
+University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.0784038,-114.1287077
+University of Calgary,"University of Calgary, Calgary, Alberta, Canada","University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.0784038,-114.1287077
+University of California,University of California,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.87631055,-122.238859269443
+University of California,"University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.8687126,-122.255868148743
+University of California,"University of California, Berkeley, Berkeley CA 94720, USA","Goldman School of Public Policy, Hearst Avenue, Northside, Berkeley, Alameda County, California, 94720, USA",37.8756681,-122.257979979865
+University of California,"University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.6431901,-117.84016493553
+University of California,"University of California, Irvine, USA","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.6431901,-117.84016493553
+University of California,"University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.421588883632
+University of California,"University of California, Merced, CA 95344, USA","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.421588883632
+University of California,"University of California, Merced, USA","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.421588883632
+University of California,"University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.332610354677
+University of California,"University of California, Riverside CA 92521-0425, USA","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",33.9743275,-117.32558236636
+University of California,"University of California, Riverside, California 92521, USA","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",33.9743275,-117.32558236636
+University of California,"University of California, Riverside, Riverside CA, California 92521 United States","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",33.9743275,-117.32558236636
+University of California,"University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California,"University of California, San Diego, CA, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California,"University of California, San Diego, California, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California,"University of California, San Diego, La Jolla","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California,"University of California, San Diego, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California,"University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.4145937,-119.84581949869
+University of California Berkeley,University of California Berkeley,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.87631055,-122.238859269443
+University of California Berkeley,University of California Berkeley,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.87631055,-122.238859269443
+University of California Davis,University of California Davis,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.5336349,-121.790772639747
+University of California San Diego,University of California San Diego,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California San Diego,"University of California San Diego, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California San Diego,"University of California San Diego, United States of America","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.231100493855
+University of California Santa Barbara,University of California Santa Barbara,"UCSB, Santa Barbara County, California, 93106, USA",34.4145937,-119.84581949869
+University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.143088815415187
+University of Cambridge,"University of Cambridge, United Kingdom","Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.143088815415187
+University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.5953995,-48.6154218
+University of Campinas (Unicamp,University of Campinas (Unicamp,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil",-22.8224781,-47.0642599309425
+University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.084469935058
+University of Canterbury,University of Canterbury,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.5240528,172.580306253669
+University of Canterbury,"University of Canterbury, New Zealand","University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.5240528,172.580306253669
+University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.4599734888018
+University of Cape Town,"University of Cape Town, South Africa","University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.4599734888018
+University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.1971250118395
+University of Central Florida,"University of Central Florida, Orlando","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.42903955,-81.4421617727936
+University of Central Florida,"University of Central Florida, Orlando, 32816, United States of America","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.42903955,-81.4421617727936
+University of Central Florida,"University of Central Florida, Orlando, FL, USA","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.42903955,-81.4421617727936
+University of Central Florida,"University of Central Florida, Orlando, USA","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.42903955,-81.4421617727936
+University of Central Florida,"University of Central Florida, USA","University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.1971250118395
+University of Central Punjab,University of Central Punjab,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎",31.4466149,74.2679762
+University of Central Punjab,"University of Central Punjab, Pakistan","University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎",31.4466149,74.2679762
+University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing 100190, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing 101408, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing, 100049, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences (UCAS,University of Chinese Academy of Sciences (UCAS,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences (UCAS),University of Chinese Academy of Sciences (UCAS),"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Chinese Academy of Sciences (UCAS),"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9082804,116.2458527
+University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.2075951,-8.42566147540816
+University of Coimbra,"University of Coimbra, Portugal","Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.2075951,-8.42566147540816
+University of Colorado,University of Colorado,"Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.266959437621
+University of Colorado,"University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.266959437621
+University of Colorado Colorado Springs,University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.8920756,-104.797163894584
+University of Colorado Denver,University of Colorado Denver,"University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.74287785,-105.005963984841
+University of Colorado Denver,"University of Colorado Denver, Denver, CO, USA","University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.74287785,-105.005963984841
+University of Connecticut,University of Connecticut,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA",41.8093779,-72.2536414
+University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.6801502,12.5723270014063
+University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.3713024,24.4754408
+University of Crete,"University of Crete, Crete, 73100, Greece","House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.3713024,24.4754408
+University of Dammam,University of Dammam,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.39793625,50.1980792430511
+University of Dammam,"University of Dammam, Saudi Arabia","University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.39793625,50.1980792430511
+University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.738444,-84.1791874663107
+University of Dayton,"University of Dayton, Dayton, OH, USA","University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.738444,-84.1791874663107
+University of Dayton,"University of Dayton, Ohio, USA","University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.738444,-84.1791874663107
+University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.6810328,-75.7540184
+University of Delaware,"University of Delaware, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.6810328,-75.7540184
+University of Delaware,"University of Delaware, Newark, 19716, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.6810328,-75.7540184
+University of Delaware,"University of Delaware, Newark, DE, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.6810328,-75.7540184
+University of Delaware,"University of Delaware, Newark, DE. USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.6810328,-75.7540184
+University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.6766541,-104.962203
+University of Denver,"University of Denver, Denver, CO","University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.6766541,-104.962203
+University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7316957,90.3965275
+University of Dhaka,"University of Dhaka, Bangladesh","World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7316957,90.3965275
+University of Dschang,University of Dschang,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.4409448,10.0712056113589
+University of Dschang,"University of Dschang, Cameroon","Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.4409448,10.0712056113589
+University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831353755
+University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.6221571,1.2409136
+University of East Anglia,"University of East Anglia, Norwich, U.K.","Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.6221571,1.2409136
+University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534912525441
+University of Edinburgh,"University of Edinburgh, Edinburgh, UK","New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534912525441
+University of Engineering and Technology,University of Engineering and Technology,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎",31.6914689,74.2465617
+University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.7369302,-3.53647671702167
+University of Exeter,"University of Exeter, UK","University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.7369302,-3.53647671702167
+University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.6328784,-82.3490133048243
+University of Florida,"University of Florida, Gainesville, FL","University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.6328784,-82.3490133048243
+University of Florida,"University of Florida, Gainesville, FL, 32611, USA","University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA",29.6447739,-82.3575193392276
+University of Frankfurt,University of Frankfurt,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland",50.13053055,8.69234223934388
+University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.5557862661765
+University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921783557444
+University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482206542
+University of Groningen,"University of Groningen, Netherlands","Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482206542
+University of Groningen,"University of Groningen, The Netherlands","Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482206542
+University of Gujrat,University of Gujrat,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.63744845,74.1617455759799
+University of Gujrat,"University of Gujrat, Pakistan","University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.63744845,74.1617455759799
+University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.0198630428453
+University of Haifa,"University of Haifa, Haifa, Israel","אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.0198630428453
+University of Hawaii,University of Hawaii,"University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA",21.2982795,-157.818692295846
+University of Hawaii,"University of Hawaii, Manoa, Honolulu, HI, 96822","University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA",21.2982795,-157.818692295846
+University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2081469,114.259641148719
+University of Hong Kong,"University of Hong Kong, China","海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2081469,114.259641148719
+University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7207902,-95.3440627149137
+University of Houston,"University of Houston, Houston, TX 77204, USA","UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7207902,-95.3440627149137
+University of Houston,"University of Houston, Houston, TX, USA","UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7207902,-95.3440627149137
+University of Iceland,University of Iceland,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland",64.137274,-21.9456145356869
+University of Illinois,University of Illinois,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.2258766477716
+University of Illinois,"University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.2258766477716
+University of Illinois Urbana Champaign,University of Illinois Urbana Champaign,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.2258766477716
+University of Illinois at,University of Illinois at,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA",40.1006938,-88.2313043272112
+University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.6485625597018
+University of Illinois at Chicago,"University of Illinois at Chicago, Chicago, IL","University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.6485625597018
+University of Illinois at Urbana,University of Illinois at Urbana,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA",40.1006938,-88.2313043272112
+University of Illinois at Urbana Champaign,University of Illinois at Urbana Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana Champaign,"University of Illinois at Urbana Champaign, Urbana","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana Champaign,"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA","University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.1066501,-88.2240260725426
+University of Illinois at Urbana-Champaign,University of Illinois at Urbana-Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana-Champaign,"University of Illinois at Urbana-Champaign, IL USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana-Champaign,"University of Illinois at Urbana-Champaign, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana-Champaign,"University of Illinois at Urbana-Champaign, Urbana, IL","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana-Champaign,"University of Illinois at Urbana-Champaign, Urbana, IL, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana—Champaign,University of Illinois at Urbana—Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Illinois at Urbana—Champaign,"University of Illinois at Urbana—Champaign, Champaign, IL, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.101976,-88.2314378
+University of Information,University of Information,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA",34.17980475,-117.325843648456
+University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6162306,20.8396301098796
+University of Ioannina,"University of Ioannina, 45110, Greece","Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6162306,20.8396301098796
+University of Ioannina,"University of Ioannina, Ioannina, Greece","Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6162306,20.8396301098796
+University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.6659,-91.573103065
+University of Karlsruhe,University of Karlsruhe,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland",49.00664235,8.39405151637065
+University of Karlsruhe,"University of Karlsruhe, Germany","Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland",49.00664235,8.39405151637065
+University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.2975344,1.0729616473445
+University of Kent,"University of Kent, Canterbury, U.K.","University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.2975344,1.0729616473445
+University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.0333742,-84.5017758
+University of Kentucky,"University of Kentucky, USA","University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.0333742,-84.5017758
+University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712031677
+University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.548734723802121
+University of Lincoln,"University of Lincoln, U. K.","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.548734723802121
+University of Lincoln,"University of Lincoln, U.K","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.548734723802121
+University of Lincoln,"University of Lincoln, UK","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.548734723802121
+University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.406179,-2.96670818619252
+University of Liverpool,"University of Liverpool, Liverpool, U.K.","Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.406179,-2.96670818619252
+University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.0501558,14.4690732689076
+University of Ljubljana,"University of Ljubljana, Ljubljana, Slovenia","UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.0501558,14.4690732689076
+University of Ljubljana Faculty,University of Ljubljana Faculty,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.0501558,14.4690732689076
+University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.5217668,-0.130190717056655
+University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.2167565,-85.7572502291168
+University of Louisville,"University of Louisville, Louisville, KY 40292 USA","University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.2167565,-85.7572502291168
+University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.1240187,113.545109009671
+University of Macau,"University of Macau, Taipa, Macau","研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.1240187,113.545109009671
+University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103394
+University of Malaya,"University of Malaya, 50603 Kuala Lumpur, Malaysia","UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103394
+University of Malaya,"University of Malaya, Kuala Lumpur, Malaysia","UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103394
+University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.9023226,14.4834189
+University of Malta,"University of Malta, Msida, Malta","University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.9023226,14.4834189
+University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300880782987
+University of Manchester,"University of Manchester, Manchester, U.K.","University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300880782987
+University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.8091536,-97.133041790072
+University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.2899685,-76.6219610316858
+University of Maryland,"University of Maryland, College Park, MD, USA","The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.2899685,-76.6219610316858
+University of Maryland,"Y. Li, University of Maryland","Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA",39.2864694,-76.6263409932124
+University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.9461029019905
+University of Maryland-College Park,University of Maryland-College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.9461029019905
+University of Maryland-College Park,"University of Maryland-College Park, USA","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.9461029019905
+University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.3889785,-72.5286987
+University of Massachusetts,"University of Massachusetts, Amherst","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.3889785,-72.5286987
+University of Massachusetts,"University of Massachusetts, Amherst MA, USA","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.3889785,-72.5286987
+University of Massachusetts,"University of Massachusetts, Amherst, MA","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.3889785,-72.5286987
+University of Massachusetts - Amherst,University of Massachusetts - Amherst,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA",42.3869382,-72.5299147706745
+University of Massachusetts Amherst,University of Massachusetts Amherst,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA",42.3869382,-72.5299147706745
+University of Massachusetts Amherst,"University of Massachusetts Amherst, Amherst MA, 01003","Murray D. Lincoln Campus Center, 1, Campus Center Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.3919154,-72.5270705589714
+University of Massachusetts Dartmouth,University of Massachusetts Dartmouth,"University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA",41.62772475,-71.0072450098225
+University of Massachusetts Dartmouth,"University of Massachusetts Dartmouth, Dartmouth, MA, USA","University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA",41.62772475,-71.0072450098225
+University of Memphis,University of Memphis,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA",35.1189387,-89.9372195996589
+University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7173339,-80.2786688657706
+University of Miami,"University of Miami, Coral Gables, FL","University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7173339,-80.2786688657706
+University of Miami,"University of Miami, USA","University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7173339,-80.2786688657706
+University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Michigan,"University of Michigan, Ann Arbor","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Michigan,"University of Michigan, Ann Arbor, MI","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Michigan,"University of Michigan, Ann Arbor, MI 48109 USA","Power Center for the Performing Arts, 121, Fletcher Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2808797,-83.7357152493893
+University of Michigan,"University of Michigan, Ann Arbor, MI, USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Michigan,"University of Michigan, Ann Arbor, USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Michigan,"University of Michigan, Ann, Arbor, MI USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.2942142,-83.710038935096
+University of Milan,University of Milan,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA",38.6796662,-90.3262816
+University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.2370881262941
+University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.926761,-92.2919378337447
+University of Missouri,"University of Missouri, Columbia, MO","L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.926761,-92.2919378337447
+University of Nebraska - Lincoln,University of Nebraska - Lincoln,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA",40.8174723,-96.7044468
+University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5469449,-119.813465660936
+University of Nevada,"University of Nevada, Reno, Reno, NV, USA","Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5469449,-119.813465660936
+University of Nevada,"University of Nevada, Reno, USA","Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5469449,-119.813465660936
+University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.231240246527
+University of New South Wales,"University of New South Wales, Sydney, NSW, Australia","UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.231240246527
+University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.3578899,151.37834708231
+University of Newcastle,"University of Newcastle, Newcastle, Australia","University of Newcastle, Christie Street, Newcastle, Newcastle-Maitland, Newcastle, NSW, 2300, Australia",-32.9276256,151.77133087091
+University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.0477532652511
+University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2375581,-77.9270129
+University of North Carolina Wilmington,"University of North Carolina Wilmington, USA","Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2375581,-77.9270129
+University of North Carolina Wilmington,"University of North Carolina Wilmington, Wilmington, NC, USA","Kenan House parking lot, Princess Street, Wilmington, New Hanover County, North Carolina, 28405, USA",34.2377352,-77.92673494788
+University of North Carolina Wilmington,"University of North Carolina Wilmington, Wilmington, United States","Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2375581,-77.9270129
+University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9113971,-79.0504529
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, Chapel Hill, NC","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9105975,-79.0517871
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, Chapel Hill, NC, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9105975,-79.0517871
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, NC, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9113971,-79.0504529
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9113971,-79.0504529
+University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3103441,-80.732616166699
+University of North Carolina at Wilmington,University of North Carolina at Wilmington,"University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2249827,-77.8690774374448
+University of North Carolina at Wilmington,"University of North Carolina at Wilmington, USA","University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2249827,-77.8690774374448
+University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.2098879,-97.1514748776857
+University of North Texas,"University of North Texas, Denton, Texas, USA","University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.2098879,-97.1514748776857
+University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.8925662,-122.814715920529
+University of Northern British Columbia,"University of Northern British Columbia, Canada","UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.8925662,-122.814715920529
+University of Northern British Columbia,"University of Northern British Columbia, Prince George, Canada","UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.8925662,-122.814715920529
+University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+University of Notre Dame,"University of Notre Dame, Notre Dame, IN, USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+University of Notre Dame,"University of Notre Dame, USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+University of Notre Dame. Notre Dame,University of Notre Dame. Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+University of Notre Dame. Notre Dame,"University of Notre Dame. Notre Dame, IN 46556.USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9387428,-1.20029569274574
+University of Nottingham,"University of Nottingham, Nottingham, UK","University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9387428,-1.20029569274574
+University of Oradea,University of Oradea,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.0570222,21.922709
+University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.7217076488427
+University of Oslo,"University of Oslo, Oslo, Norway","UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.7217076488427
+University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.6874011819989
+University of Ottawa,"University of Ottawa, Canada","University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.6874011819989
+University of Ottawa,"University of Ottawa, Ottawa, On, Canada","University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.6874011819989
+University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.0592157,25.466326012507
+University of Oulu,"University of Oulu, Finland","Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.0592157,25.466326012507
+University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.7534538,-1.25400997048855
+University of Oxford,"University of Oxford, Oxford, United Kingdom","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.7534538,-1.25400997048855
+University of Oxford,"University of Oxford, UK","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.7534538,-1.25400997048855
+University of Oxford,"University of Oxford, United Kingdom","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.7534538,-1.25400997048855
+University of Patras,University of Patras,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.2899482,21.7886469
+University of Patras,"University of Patras, Greece","Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.2899482,21.7886469
+University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.9492344,-75.191989851901
+University of Pennsylvania,"University of Pennsylvania, Philadelphia, PA","40th Street Parking Lot, Walnut Street, Southwest Schuylkill, Philadelphia, Philadelphia County, Pennsylvania, 19104-1469, USA",39.95455675,-75.2029503620423
+University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.2622421,-123.2450052
+University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0092004,71.4877494739102
+University of Peshawar,"University of Peshawar, Pakistan","University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0092004,71.4877494739102
+University of Peshawar,"University of Peshawar, Peshawar, Pakistan","University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0092004,71.4877494739102
+University of Piraeus,University of Piraeus,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα",37.94173275,23.6530326182197
+University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.7201299,10.4078976
+University of Pisa,"University of Pisa, Pisa, Italy","Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.7201299,10.4078976
+University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.9624399276271
+University of Pittsburgh,"University of Pittsburgh, PA 15213, USA","Nationality Rooms, 4200, Omicron Delta Kappa Walk, North Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4444651,-79.9532347
+University of Pittsburgh,"University of Pittsburgh, PA, 15260, USA","Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4437547,-79.9529557
+University of Pittsburgh,"University of Pittsburgh, PA, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.9624399276271
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.9624399276271
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh PA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4495417,-79.8957457221781
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4495417,-79.8957457221781
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA , USA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4495417,-79.8957457221781
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA 15260, USA","Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4437547,-79.9529557
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA, USA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4495417,-79.8957457221781
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.9624399276271
+University of Pittsburgh,"University of Pittsburgh, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.9624399276271
+University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.3752501,-4.13927692297343
+University of Plymouth,"University of Plymouth, UK","Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.3752501,-4.13927692297343
+University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911234691
+University of Portsmouth,"University of Portsmouth, United Kingdom","University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911234691
+University of Posts and Telecommunications,University of Posts and Telecommunications,"南京邮电大学仙林校区, 9, 文苑路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210023, 中国",32.11527165,118.925956600436
+University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+University of Queensland,"University of Queensland, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+University of Queensland,"University of Queensland, St Lucia, QLD, Australia","Anthropology Museum, Chancellors Place, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.497151,153.0117305
+University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.1576969,-77.5882915756007
+University of Rochester,"University of Rochester, NY 14627, USA","Central Utilities Lot, Firemans, Rochester, Monroe County, New York, 14627, USA",43.1242954,-77.6288352530005
+University of Rochester,"University of Rochester, Rochester, NY, USA","Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.1576969,-77.5882915756007
+University of Salzburg,University of Salzburg,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.79475945,13.0541752486067
+University of Salzburg,"University of Salzburg, Austria","Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.79475945,13.0541752486067
+University of Science and,University of Science and,"USM, Lengkok Sastera, The LIGHT, Batu Uban, George Town, PNG, 11700, Malaysia",5.35755715,100.303850375
+University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Science and Technology of China,"University of Science and Technology of China, Hefei 230026, P. R. China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, 230027, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, Anhui, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, Anhui, P. R. China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.264207478576
+University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4133862,114.210058
+University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.9928298,-81.0268516781225
+University of South Carolina,"University of South Carolina, Columbia, USA","University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.9928298,-81.0268516781225
+University of South Carolina,"University of South Carolina, USA","University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.9928298,-81.0268516781225
+University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.0599999,-82.4138361902512
+University of South Florida,"University of South Florida, Tampa, Florida 33620","University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.0599999,-82.4138361902512
+University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464294664816
+University of Southampton,"University of Southampton, SO17 1BJ, UK","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464294664816
+University of Southampton,"University of Southampton, Southampton, U.K.","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464294664816
+University of Southampton,"University of Southampton, United Kingdom","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464294664816
+University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0224149,-118.286344073446
+University of Southern California,"University of Southern California, Los Angeles, CA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0224149,-118.286344073446
+University of Southern California,"University of Southern California, Los Angeles, CA 90089, USA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0224149,-118.286344073446
+University of Southern California,"University of Southern California, Los Angeles, USA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0224149,-118.286344073446
+University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.3411984,-2.7930938
+University of St Andrews,"University of St Andrews, United Kingdom","University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.3411984,-2.7930938
+University of Stuttgart,University of Stuttgart,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland",48.9095338,9.1831892
+University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.590013824660236
+University of Surrey,"University of Surrey, Guildford, Surrey GU2 7XH, UK","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.590013824660236
+University of Surrey,"University of Surrey, Guildford, Surrey, GU2 7XH, UK","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.590013824660236
+University of Surrey,"University of Surrey, United Kingdom","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.590013824660236
+University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.189433661925
+University of Sydney,"University of Sydney, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.189433661925
+University of Sydney,"University of Sydney, Sydney, NSW, Australia","Sand Roll House, Parramatta Road, Camperdown, Sydney, NSW, 2050, Australia",-33.88578245,151.182068591379
+University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.0612553,46.3298484
+University of Tabriz,"University of Tabriz, Tabriz, Iran","دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.0612553,46.3298484
+University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.7792067776763
+University of Technology,University of Technology,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق",33.3120263,44.4471829434368
+University of Technology,"University of Technology, Baghdad, Iraq","الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق",33.3120263,44.4471829434368
+University of Technology,"University of Technology, Sydney","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8828784,151.200682779726
+University of Technology,"University of Technology, Sydney, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8828784,151.200682779726
+University of Technology,"University of Technology, Sydney, NSW, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8828784,151.200682779726
+University of Technology,"University of Technology, Sydney, Sydney, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8828784,151.200682779726
+University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.8809651,151.201072985483
+University of Technology Sydney,"University of Technology Sydney, New South Wales, Australia","University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.8809651,151.201072985483
+University of Technology Sydney,"University of Technology Sydney, Sydney, NSW, Australia","University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8830909,151.20217235558
+University of Technology Sydney,"University of Technology Sydney, Ultimo, NSW, Australia","University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia",-33.8830909,151.20217235558
+University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.9542493,-83.9307395
+University of Tennessee,"University of Tennessee, Knoxville","University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.9542493,-83.9307395
+University of Texas,University of Texas,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3163078,-95.2536994379459
+University of Texas,"University of Texas, Austin, TX 78712-1188, USA","University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA",30.284458,-97.7342106
+University of Texas,"University of Texas, San Antonio, TX, USA","University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA",30.284458,-97.7342106
+University of Texas at,University of Texas at,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3163078,-95.2536994379459
+University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7283683,-97.112018348404
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, TX","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7283683,-97.112018348404
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, TX, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7283683,-97.112018348404
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, Texas 76019, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7283683,-97.112018348404
+University of Texas at Arlington,"University of Texas at Arlington, TX, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7283683,-97.112018348404
+University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.284151,-97.7319559808022
+University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.9820799,-96.7566278
+University of Texas at Dallas,"University of Texas at Dallas, Richardson, 75080, USA","University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.9820799,-96.7566278
+University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.6194450505688
+University of Texas at San Antonio,"University of Texas at San Antonio, 78249, USA","UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.6194450505688
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, TX","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.42182005,-98.5016869955163
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, TX, USA","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.42182005,-98.5016869955163
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, Texas","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.42182005,-98.5016869955163
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, United States","UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.6194450505688
+University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.9588934957528
+University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9020448,139.936220089117
+University of Tokyo,"University of Tokyo, Japan","東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9020448,139.936220089117
+University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.3976997498952
+University of Toronto,"University of Toronto, Toronto, ON, Canada","University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.3976997498952
+University of Toronto Toronto,University of Toronto Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.3976997498952
+University of Toronto Toronto,"University of Toronto Toronto, Canada","University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.3976997498952
+University of Toulouse,University of Toulouse,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA",30.1781816,-93.2360581
+University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.0658836,11.1159894
+University of Trento,"University of Trento, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.0658836,11.1159894
+University of Trento,"University of Trento, Trento, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.0658836,11.1159894
+University of Trento,"University of Trento, Trento, TN, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.0658836,11.1159894
+University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.1112058,140.1055176
+University of Tsukuba,"University of Tsukuba, Japan","University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.1112058,140.1055176
+University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2380139,6.8566761
+University of Twente,"University of Twente, Netherlands","University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2380139,6.8566761
+University of Twente,"University of Twente, The Netherlands","University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2380139,6.8566761
+University of Venezia,University of Venezia,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia",45.4312742,12.3265377
+University of Vermont,University of Vermont,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.48116865,-73.2002178989123
+University of Vermont,"University of Vermont, 33 Colchester Avenue, Burlington","University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.48116865,-73.2002178989123
+University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.2131302,16.3606865338016
+University of Vienna,"University of Vienna, Austria","Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.2131302,16.3606865338016
+University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.0353682,-78.5035322
+University of Virginia,"University of Virginia, Charlottesville, VA","University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA",38.0410576,-78.5054996018357
+University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.3793131,-1.5604252
+University of Warwick,"University of Warwick, Coventry, U.K.","University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.3793131,-1.5604252
+University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.6543238,-122.308008943203
+University of Washington,"University of Washington, Seattle, USA","University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.6543238,-122.308008943203
+University of Washington,"University of Washington, Seattle, WA 98195, United States","University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.6547795,-122.305818
+University of Washington,"University of Washington, Seattle, WA, USA","University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA",47.65249975,-122.2998748
+University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.5472473165632
+University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.797900374251
+University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.0717691461703
+University of Windsor,"University of Windsor, Canada","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.0717691461703
+University of Windsor,"University of Windsor, Canada N9B 3P4","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.0717691461703
+University of Windsor,"University of Windsor, Ontario, Canada","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.0717691461703
+University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.4306642542901
+University of Wisconsin - Madison,University of Wisconsin - Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.4306642542901
+University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.4306642542901
+University of Wisconsin-Madison,University of Wisconsin-Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.4306642542901
+University of Wisconsin-Madison,"University of Wisconsin-Madison, Madison, WI, USA","UW Geology Museum, 1215, West Dayton Street, South Campus, Madison, Dane County, Wisconsin, 53715, USA",43.0705257,-89.4059387
+University of Witwatersrand,University of Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.1888813,28.0247907319205
+University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.878346547278
+University of Wollongong,"University of Wollongong, Wollongong, Australia","University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.878346547278
+University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+University of York,"University of York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+University of York,"University of York, York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+University of York,"University of York, York, United Kingdom","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.0313887829649
+University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.4968476,8.72981767380829
+University of Zurich,"University of Zurich, Zurich, Switzerland","ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.4968476,8.72981767380829
+University of telecommunications and post,University of telecommunications and post,"Висше Училище по Телекомуникации и Пощи, 1, бул. Акад. Стефан Младенов, ж.к. Студентски град, район Студентски, Столична, София-град, 1700, Бългaрия",42.6560524,23.3476108351659
+University of telecommunications and post,"University of telecommunications and post, Sofia, Bulgaria","Висше Училище по Телекомуникации и Пощи, 1, бул. Акад. Стефан Младенов, ж.к. Студентски град, район Студентски, Столична, София-град, 1700, Бългaрия",42.6560524,23.3476108351659
+University of the Basque Country,University of the Basque Country,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.30927695,-2.01066784661227
+University of the Basque Country,"University of the Basque Country, San Sebastian, Spain","Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.30927695,-2.01066784661227
+University of the Western Cape,University of the Western Cape,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa",-33.9327762,18.6291540714825
+University of the Witwatersrand,University of the Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.1888813,28.0247907319205
+University of the Witwatersrand,"University of the Witwatersrand, Johannesburg, South Africa","University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.1888813,28.0247907319205
+Università degli Studi di Milano,Università degli Studi di Milano,"Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.47567215,9.23336232066359
+Università degli Studi di Milano,"Università degli Studi di Milano, Italy","Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.47567215,9.23336232066359
+Università di Salerno Italy,Università di Salerno Italy,"Università, Autostrada del Mediterraneo, Fisciano, SA, CAM, 84084, Italia",40.7646949,14.7889151
+Université du Québec à Chicoutimi (UQAC),Université du Québec à Chicoutimi (UQAC),"Université du Québec à Chicoutimi (UQAC), Chicoutimi, Ville de Saguenay, Saguenay - Lac-Saint-Jean, Québec, G7H 2B1, Canada",48.4200469,-71.0525344
+Ural Federal University (UrFU,Ural Federal University (UrFU,"УрФУ, улица Гагарина, Эврика, Втузгородок, Кировский район, Екатеринбург, городской округ Екатеринбург, Свердловская область, Уральский федеральный округ, 620062, РФ",56.8435083,60.6454805
+Urmia University,Urmia University,"دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎",37.52914535,45.0488607694682
+Urmia University,"Urmia University, Urmia, Iran","دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎",37.52914535,45.0488607694682
+"Ursinus College, Collegeville, PA","Ursinus College, Collegeville, PA","Ursinus College, East Main Street, Collegeville, Montgomery County, Pennsylvania, 19426, USA",40.1917705,-75.4568484
+Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7411504,-111.8122309
+Utah State University,"Utah State University, Logan UT","Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7411504,-111.8122309
+Utah State University,"Utah State University, Logan, UT 84322-4205, USA","Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7411504,-111.8122309
+Varendra University,Varendra University,"department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ",24.3643231,88.6333105
+Varendra University,"Varendra University, Rajshahi, Bangladesh","department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ",24.3643231,88.6333105
+Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.768469187426
+Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.3698616762866
+Vignan University,Vignan University,"Vignan university, Sangam Dairy Entry, Sangam Dairy, Gowdapalem, Guntur District, Andhra Pradesh, 522213, India",16.2329008,80.5475018
+Vignan University,"Vignan University, Andhra Pradesh, India","Vignan university, Sangam Dairy Entry, Sangam Dairy, Gowdapalem, Guntur District, Andhra Pradesh, 522213, India",16.2329008,80.5475018
+Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.0367774,-75.342023320028
+Virginia Commonwealth University,Virginia Commonwealth University,"Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.548215,-77.4530642444471
+Virginia Commonwealth University,"Virginia Commonwealth University, Richmond, VA, USA","Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.548215,-77.4530642444471
+Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.4254251869494
+Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Blacksburg","Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.4254251869494
+Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Blacksburg, Virginia","Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.4254251869494
+Virginia Tech Carilion Research Institute,Virginia Tech Carilion Research Institute,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA",37.2579548,-79.9423329131356
+"Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany","Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany","Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.599482,9.93353435970931
+Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.8411007,4.32377555279953
+Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 1050 Brussels, Belgium","Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien",50.8223021,4.3967361
+"Vulcan Inc, Seattle, WA 98104","Vulcan Inc, Seattle, WA 98104","Vulcan Inc., 505, Downtown Seattle Transit Tunnel, Seattle Downtown, International District/Chinatown, Seattle, King County, Washington, 98191, USA",47.5980546,-122.3284865
+"Walt Disney Imagineering, USA","Walt Disney Imagineering, USA","Walt Disney Imagineering, 1401, Flower Street, Grand Central Creative Campus, Glendale, Los Angeles County, California, 91201, USA",34.1619174,-118.28837020278
+Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.0073577612511
+Warsaw University of Technology,"Warsaw University of Technology, Poland","Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.0073577612511
+Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.8898728,130.708562047107
+Waseda University,"Waseda University, Kitakyushu, Japan 808-0135","早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.8898728,130.708562047107
+Washington University,Washington University,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.6480445,-90.3099667
+Washington University,"Washington University, St. Louis, MO, USA","Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.6480445,-90.3099667
+Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.357757,-83.0628671134125
+Wayne State University,"Wayne State University, Detroit, MI 48202, USA","Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA",42.3656423,-83.0711533990367
+Wayne State University,"Wayne State University, Detroit, MI, USA","Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA",42.3656423,-83.0711533990367
+Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9078499,34.8133409244421
+"Weizmann Institute of Science, Rehovot, 76100, Israel","Weizmann Institute of Science, Rehovot, 76100, Israel","מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9078499,34.8133409244421
+West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+West Virginia University,"West Virginia University, Morgantown WV 26506, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+West Virginia University,"West Virginia University, Morgantown, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+West Virginia University,"West Virginia University, Morgantown, WV","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+West Virginia University,"West Virginia University, Morgantown, WV 26506, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+West Virginia University,"West Virginia University, Morgantown, WV, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355
+Western Kentucky University,Western Kentucky University,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA",36.9845317,-86.4576443016944
+Western Sydney University,Western Sydney University,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia",-33.8160848,151.00560034186
+Western Sydney University,"Western Sydney University, Parramatta, NSW 2150, Australia","Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia",-33.8160848,151.00560034186
+Wolfson College,Wolfson College,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK",51.7711076,-1.25361700492597
+Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.351428398184
+Wuhan University of Technology,"Wuhan University of Technology, Wuhan, China","武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.351428398184
+Xerox Research Center,Xerox Research Center,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada",43.5129109,-79.6664076152913
+Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.2474949,108.978987508847
+Xi'an Jiaotong University,"Xi'an Jiaotong University, Xi'an, China","西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.2474949,108.978987508847
+Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4399419,118.093017809127
+Xiamen University,"Xiamen University, Xiamen 361005, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4399419,118.093017809127
+Xiamen University,"Xiamen University, Xiamen, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4399419,118.093017809127
+Xiamen University,"Xiamen University, Xiamen, Fujian, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4399419,118.093017809127
+Xiamen University,"Xiamen University, Xiamen, P. R. China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4399419,118.093017809127
+Xiangtan University,Xiangtan University,"湘潭大学图书馆, 文化广场, 羊牯塘街道, 雨湖区, 湘潭市 / Xiangtan, 湖南省, 中国",27.88707585,112.857109176016
+Xiangtan University,"Xiangtan University, Xiangtan, China","湘潭大学图书馆, 文化广场, 羊牯塘街道, 雨湖区, 湘潭市 / Xiangtan, 湖南省, 中国",27.88707585,112.857109176016
+Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1235825,108.83546
+Xidian University,"Xidian University, Xi an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1235825,108.83546
+Xidian University,"Xidian University, Xi'an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1235825,108.83546
+Xidian University,"Xidian University, Xi’an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1235825,108.83546
+Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.9896696015223
+Yaroslavl State University,Yaroslavl State University,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ",57.6252103,39.8845656
+Yeungnam University,Yeungnam University,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국",35.8365403,128.7534309
+Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.5600406,126.9369248
+Yonsei University,"Yonsei University, 50 Yonsei-ro, SEOUL, Republic of Korea","연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.5600406,126.9369248
+Yonsei University,"Yonsei University, 50 Yonsei-ro, Seodaemun-gu, SEOUL, Republic of Korea","연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.5600406,126.9369248
+York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.7743911,-79.5048108538813
+York University,"York University, Toronto","York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.7743911,-79.5048108538813
+York University,"York University, Toronto, Canada","York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.7743911,-79.5048108538813
+Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.700275254918
+Yunnan University,"Yunnan University, Kunming, P. R. China","云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.700275254918
+Zaragoza University,Zaragoza University,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España",41.6406218,-0.900793992168927
+Zhejiang Normal University,Zhejiang Normal University,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国",29.13646725,119.637686517179
+Zhejiang Normal University,"Zhejiang Normal University, Jinhua, China","浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国",29.13646725,119.637686517179
+Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.119308216677
+Zhejiang University,"Zhejiang University, Hangzhou, China","浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.119308216677
+Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.2931534,120.1620458
+Zhejiang University of Technology,"Zhejiang University of Technology, Hangzhou, China","浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.2931534,120.1620458
+Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8088168,113.5352664
+Zhengzhou University,"Zhengzhou University, China","科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8088168,113.5352664
+Zhengzhou University,"Zhengzhou University, Zhengzhou, Henan 450052, China","科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8088168,113.5352664
+a The University of Nottingham Malaysia Campus,a The University of Nottingham Malaysia Campus,"The University of Nottingham Malaysia Campus, Jalan Broga, Bandar Rinching, Semenyih, Selangor, 43500, Malaysia",2.9438432,101.8736196
+any other University,any other University,"Northern Film School, Millennium Square, Steander, Woodhouse, Leeds, Yorkshire and the Humber, England, LS1 3DW, UK",53.8012316,-1.5476213
+college of Engineering,college of Engineering,"College of Engineering, Sardar Patel Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0110912,80.2354520862161
+of Cornell University,of Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.4505507,-76.4783512955428
+of bilkent university,of bilkent university,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.8720489,32.7539515466323
+of the University of Notre Dame,of the University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.2382202601727
+the Chinese University of Hong Kong,the Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.207886442805
+the Hong Kong Polytechnic University,the Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+the Hong Kong Polytechnic University,"the Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.304572,114.179762852269
+the University of Queensland,the University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+the University of Queensland,"the University of Queensland, Brisbane, Qld, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.013169559836
+to Michigan State University,to Michigan State University,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA",42.7231021,-84.4449848597663
+university,university,"دانشکده مهندسی دانشگاه شیراز, ملاصدرا, فلسطین, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71936, ‏ایران‎",29.6284395,52.5181728343761
+university,"university, Shiraz, Iran","دانشکده مهندسی دانشگاه شیراز, ملاصدرا, فلسطین, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71936, ‏ایران‎",29.6284395,52.5181728343761
+y National Institute of Advanced Industrial Science and Technology,y National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.118523607658
+"École Polytechnique Fédérale de Lausanne (EPFL), Switzerland","École Polytechnique Fédérale de Lausanne (EPFL), Switzerland","Bibliothèque de l'EPFL, Route des Noyerettes, Ecublens, District de l'Ouest lausannois, Vaud, 1024, Schweiz/Suisse/Svizzera/Svizra",46.5184121,6.5684654
diff --git a/scraper/reports/doi_domains.html b/scraper/reports/doi_domains.html
new file mode 100644
index 00000000..957060d7
--- /dev/null
+++ b/scraper/reports/doi_domains.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>DOI Domains</title><link rel='stylesheet' href='reports.css'></head><body><h2>DOI Domains</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>ieeexplore.ieee.org</td><td>1846</td></tr><tr><td>link.springer.com</td><td>388</td></tr><tr><td>dl.acm.org</td><td>259</td></tr><tr><td>www.computer.org</td><td>193</td></tr><tr><td>www.sciencedirect.com</td><td>139</td></tr><tr><td>linkinghub.elsevier.com</td><td>54</td></tr><tr><td>www.worldscientific.com</td><td>18</td></tr><tr><td>arxiv.org</td><td>15</td></tr><tr><td>www.ncbi.nlm.nih.gov</td><td>14</td></tr><tr><td>www.crossref.org</td><td>11</td></tr><tr><td>www.spiedigitallibrary.org</td><td>9</td></tr><tr><td>onlinelibrary.wiley.com</td><td>7</td></tr><tr><td>www.nature.com</td><td>6</td></tr><tr><td>www.mitpressjournals.org</td><td>5</td></tr><tr><td>mr.crossref.org</td><td>5</td></tr><tr><td>jivp-eurasipjournals.springeropen.com</td><td>4</td></tr><tr><td>www.tandfonline.com</td><td>3</td></tr><tr><td>www.inderscience.com</td><td>2</td></tr><tr><td>www.hindawi.com</td><td>2</td></tr><tr><td>www.scitepress.org</td><td>2</td></tr><tr><td>epubs.siam.org</td><td>1</td></tr><tr><td>www.jstage.jst.go.jp</td><td>1</td></tr><tr><td>annals-csis.org</td><td>1</td></tr><tr><td>ora.ox.ac.uk</td><td>1</td></tr><tr><td>www.emeraldinsight.com</td><td>1</td></tr><tr><td>spiral.imperial.ac.uk:8443</td><td>1</td></tr><tr><td>autosoftjournal.net</td><td>1</td></tr><tr><td>www.liebertpub.com</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/doi_institutions.csv b/scraper/reports/doi_institutions.csv
new file mode 100644
index 00000000..61467c23
--- /dev/null
+++ b/scraper/reports/doi_institutions.csv
@@ -0,0 +1,2171 @@
+,1637
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore",37
+"Department of Informatics, Aristotle University of Thessaloniki, Thessaloniki, Greece",24
+"Department of Electrical and Computer Engineering, National University of Singapore, Singapore",21
+"School of Computer Engineering, Nanyang Technological University, Singapore",19
+"Department of Automation, Tsinghua University, Beijing, China",16
+"South China University of Technology, Guangzhou, China",14
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences",14
+"College of Computer Science and Technology, Zhejiang University, Hangzhou, China",14
+"School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China",13
+"Beijing University of Posts and Telecommunications, China",13
+"School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China",12
+"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France",12
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",12
+"College of Computer Science, Sichuan University, Chengdu, China",12
+"College of Computer Science, Zhejiang University, Hangzhou, China",12
+"School of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing, China",11
+"Department of Information Engineering and Computer Science, University of Trento, Trento, Italy",11
+"Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, Korea",11
+"State Key Laboratory of Management and Control of Complex Systems, CASIA, Beijing, China",10
+"School of Computing, National University of Singapore, Singapore",10
+"University of Maryland, College Park",10
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore",10
+"School of Computer Science and Technology, Tianjin University, Tianjin, China",10
+"Department of Computer Engineering, Kyung Hee University, South Korea",9
+"Dept. of Computer Science and Information Engineering, National Central University, Jhongli, Taiwan",9
+"Noblis, Falls Church, VA, U.S.A.",9
+"National University of Ireland Galway, Galway, Ireland",9
+"School of Electrical and Electronic Engineering, College of Engineering, Yonsei University, Seoul, South Korea",9
+"Department of Electrical Engineering, National Taiwan University of Science and Technology, Taipei, Taiwan",9
+"P.G. Demidov Yaroslavl State University, Yaroslavl, Russia",8
+"School of Electronic Information Engineering, Tianjin University, Tianjin, China",8
+"Faculty of Electrical Engineering and Information Technology, Slovak University of Technology, Bratislava, Bratislava, Slovakia",8
+"National Laboratory of Pattern Recognition, Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, China",8
+"Department of Electronic and Information Engineering, The Hong Kong Polytechnic University",8
+"CAS Center for Excellence in Brain Science and Intelligence Technology; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences; University of Chinese Academy of Sciences, Beijing, China",8
+"Institute of Computer Science and Technology, Peking University, Beijing, P.R. China, 100871",8
+"School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China",8
+"State Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Department of Electronic Engineering, Tsinghua University, Beijing 100084, China",8
+"Department of Computing, Imperial College London, London, U.K.",8
+"Samsung R&amp;D Institute, China",8
+"Department of Computer Science and Engineering, Shanghai Jiao Tong University, China",8
+"IIIT-Delhi, India",7
+"National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University, China",7
+"Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, China",7
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China",7
+"Stony Brook University, Stony Brook University, NY 11794, USA",7
+"CyLab Biometrics Center and the Department of Electrical and Computer Engineering (ECE), Carnegie Mellon University, Pittsburgh, USA",7
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi’an, China",7
+"School of Electronic Engineering, University of Electronic Science and Technology of China, Chengdu, China",7
+"Department of Computer Science, Jiangnan University, No. 1800 LiHu Avenue, WuXi, China",7
+"Center for Automation Research, UMIACS, University of Maryland, College Park, 20740, United States of America",7
+"Beijing University of Posts and Telecommunications, Beijing, 100876, China",7
+"Visual Computing Group, Microsoft Research, Beijing, China",7
+"School of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, South Korea",7
+"Samsung Advanced Institute of Technology, Korea",7
+"Indraprastha Institute of Information Technology Delhi, Delhi, India",7
+"Department of Electrical Engineering, KAIST, Daejeon, Korea",6
+"Colorado State University, Fort Collins",6
+"Department of Information Engineering and Computer Science, University of Trento, Italy",6
+"College of Information Technical Science, NanKai University, CITS, TianJin, China",6
+"SAIT India, Samsung India Software Operations Pvt. Ltd (SISO), Bangalore, India, 560093",6
+"State University of New York at Binghamton, USA",6
+"Computer Science, U.Illinois at Urbana Champaign, Urbana, United States",6
+"Department of Computer Science, Università degli Studi di Milano, Italy",6
+"College of Information Science and Engineering, Northeastern University, Shenyang, 110819, PR China",6
+"Advanced Digital Sciences Center, Singapore",6
+"School of Electrical, Computer and Energy Engineering, Arizona State University, Tempe, AZ, USA",6
+Shanghai Jiao Tong University,6
+"Dept. of Computer Science, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA",6
+"School of Communication and Information Engineering, University of Electronic Science and Technology of China, Chengdu, China",6
+"Key Lab of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi'an, China",6
+"University of Ljubljana, Ljubljana, Slovenia",6
+"University of Notre Dame, Notre Dame, IN, USA",6
+"School of Electronic and Information Engineering, Beihang University, Beijing, China",6
+"DIA, University of Trieste, Italy",6
+"Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University, Shanghai, China",6
+"School of Information and Control Engineering, China University of Mining and Technology, Xuzhou, China",6
+"Center for Machine Vision and Signal Analysis, University of Oulu, Finland",6
+"School of Information and Software Engineering, University of Electronic Science and Technology of China (UESTC), Chengdu, 610054, China P.R.C",6
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China",6
+"Institute of Automation, Chinese Academy of Sciences, China",6
+"Department of Automation, University of Science and Technology of China, Hefei, China",6
+"Department of Mechanical Engineering, Faculty of Engineering, National University of Singapore, 117576, Singapore",6
+"IIT Guwahati, Guwahati, India",6
+"School of Software, Dalian University of Technology, Dalian, China",6
+"Department of Computer Science and Engineering, Varendra University, Rajshahi, Bangladesh",6
+"Department of Electronic Engineering, Tsinghua University, Beijing, China",6
+"Indraprastha Institute of Information Technology Delhi, New Delhi, India",6
+"Key Lab of Computing and Communication Software of Anhui Province, School of Computer Science and Technology, University of Science and Technology of China, Hefei, China, 230027",6
+"Chongqing Key Laboratory of Computational Intelligence, Chongqing University of Posts and Telecommunications, Chongqing 400065, PR China",6
+"Department of Computing, The Hong Kong Polytechnic University, Hong Kong",6
+"Facultad de Ingeniería, Universidad de la República, Montevideo, Uruguay",6
+"School of Software, Dalian University of Technology, China 116620",6
+"School of Computer Science &amp; Technology, Harbin Institute of Technology",6
+"Microsoft Res. Asia, Beijing, China",5
+"LUNAM Université, LIUM, Le Mans, France",5
+"School of Electronics and Information, Northwestern Polytechnical University",5
+"Electronics and Telecommunications Research Institute, Korea",5
+"Institute for Microsensors, Actuators and Systems, University of Bremen, Bremen, Germany",5
+"Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, P.R. China, 100190",5
+"Institute of Automation, Chinese Academy of Sciences, Beijing, China",5
+"Nokia Research Center, Beijing",5
+"College of Computer Science, Zhejiang University of Technology, Hangzhou, China",5
+"Frontier Research Group, Samsung India Software Operations, India",5
+"Faculty of Information Technology, Beijing University of Technology, Beijing, China",5
+"Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Türkiye",5
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA",5
+IIIT-Delhi,5
+"School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea",5
+"Institute of Artificial Intelligence and Robotics, Xi’an Jiaotong University, Xi’an, China",5
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Science, Beijing, 100190, China",5
+"Human Language Technology Center of Excellence, The Johns Hopkins University, Baltimore, MD, 21218, USA",5
+"Department of Electronic Engineering/Graduate School at Shenzhen, Tsinghua University, China",5
+"Dalian University of Technology, China",5
+Chinese Academy of Sciences,5
+"Nanyang Technological University, Singapore",5
+"College of Information Science and Technology, Beijing Normal University, Beijing, P.R. China",5
+"Visea İnovatif Bilgi Teknolojileri, ETGB Teknoparkı, Eskişehir, Türkiye",5
+"Ocean University of China, Department of Educational Technology, Qingdao, China",5
+"Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, China",5
+"Disney Research, UK",5
+"Asian Institute of Technology (AIT), Pathum Thani 12120, Thailand",5
+"Department of Computer Science and Engineering, Michigan State University, East Lansing, MI",5
+"Telecommun. & Ind. Phys., CSIRO, Epping, NSW, Australia",5
+"Centre for Health Technologies, Faculty of Engineering and Information Technology, University of Technology, Sydney, New South Wales, Australia",5
+"Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences",5
+"Pattern Recognition and Intelligent System Laboratory, School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China",5
+"Artificial Vision Laboratory, National Taiwan University of Science and Technology, Taipei, Taiwan",5
+"Hangzhou Dianzi University, Hangzhou, China",5
+"Biometric Recognition Group - ATVS, Escuela Politecnica Superior, Universidad Autonoma de Madrid, Avda. Francisco Tomas y Valiente, 11 - Campus de Cantoblanco - 28049, Spain",5
+"Queen Mary University of London, UK",5
+"Institute of Microelectronics, Tsinghua University, Beijing, China",5
+"Department of Computer Science and Engineering, National Institute of Technology, Durgapur, India",5
+"DUT-RU International School of Information &amp; Software Engineering, Dalian University of Technology",5
+"Department of Software Engineering, College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia",5
+"The University of Queensland, Brisbane, Australia",5
+"Department of Information Science and Engineering, Ritsumeikan University, Shiga, Japan",5
+"Department of Automation, State Key Lab of Intelligent Technologies and Systems and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China",5
+"Chongqing Institute of Green and Intelligent Technology, CAS, Chongqing, 400714",5
+"School of Data and Computer Science, Sun Yat-Sen University, China",5
+"Centre of Development of Advanced Computing (CDAC) Mumbai, 400049, India",5
+"Chongqing Institute of Technology, China",5
+"Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",5
+"Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, 95 Zhongguancun East Road, Haidian District, Beijing, China",5
+"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China",5
+"IBM China Research Laboratory, Beijing, China",5
+"Department of Electronics and Telecommunications, Politecnico di Torino, Torino, Italy",5
+"Department of Computer Science, Xiamen University, Xiamen, P. R. China",5
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore 639798",5
+"The Institute of Optics and Electronics Chinese Academy of Sciences, University of the Chinese Academy of Sciences, Chengdu, China",5
+"Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown, WV, USA",5
+"School of Computer and Information, Hefei University of Technology, Hefei, China",5
+"NICTA, PO Box 6020, St Lucia, QLD 4067, Australia",5
+"College of Information and Control Engineering, China University of Petroleum, Qingdao, 266580, China",5
+"Department of Electronics and Telecommunication Engineering, Don Bosco Institute of Technology, Kurla (W), Mumbai, India",5
+"Dept. of Electrical and Computer Engineering, University of Illinois at Urbana-Champaign, USA",5
+"R V College of Engineering, Department of Computer Science and Engineering, Bangalore, India",5
+"Centre for Machine Vision Research, University of Oulu, Finland",5
+"Knowledge Technology Institute, Department of Informatics, University of Hamburg, Hamburg, Germany",5
+"School of Electrical Engineering Department, Korea University, Rep. of Korea",5
+"Inst. Nat. des Telecommun., Evry, France",5
+"National Science and Technology Development Agency, National Electronics and Computer Technology Center, Pathum Thani, 12120, Thailand",4
+"Dalian University of Technology, Dalian, Liaoning, 116024, China",4
+"School of Engineering &amp; Applied Science, Ahmedabad University, Gujarat, India 380009",4
+Shanghai Jiao Tong University School of Electronic Information and Electrical Engineering,4
+"University of Technology, Sydney, P.O. Box 123, Broadway, NSW, 2007, Australia",4
+"The Australian Centre for Visual Technologies, The university of Adelaide",4
+"School of Engineering and Computer Science, Victoria University of Wellington, PO Box 600, 6140, New Zealand",4
+"National Taiwan University, Taipei, Taiwan",4
+"School of Computer Science &amp; Technology, Nanjing University of Science and Technology, China",4
+"Department of Electrical and Computer Engineering, Rowan University, Glassboro, NJ- 08028",4
+"School of Electronic and Information Engineering, Xi'an Jiaotong University, Xi'an, China, 710049",4
+"Media Integration and Communication Center - MICC, University of Florence, Italy",4
+"School of Computer Science, University of the Witwatersrand, Johannesburg, South Africa",4
+"Department of Microelectornics and Computer Science, Lodz University of Technology, ul. Wolczanska 221/223, 90-924, Poland",4
+"School of Computer Science and Telecommunication Engineering, Jiangsu University, ZhenJiang, Jiangsu, 212013, P. R. China",4
+Seoul Nat'l Univ.,4
+"School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China 100876",4
+"Key Laboratory of Specialty Fiber Optics and Optical Access Networks, Shanghai University, China",4
+"Institute of Computer Science and Technology, Peking University, Beijing, China, 100871",4
+"Department of Electronic Engineering, Tsinghua University, Beijing 100084, China",4
+"School of Computer Science and Engineering, Nanjing University of Science and Technology, China",4
+"Faculty of electrical engineering, University of Ljubljana, Slovenia",4
+"Department of Information Management and Security, Korea University",4
+"Pattern Recognition and Intelligent System Lab (PRIS) Beijing University of Posts and Telecommunications, Beijing 100876, P. R. China",4
+"Institute of Intelligence Information Processing, Xidian University, Xi¿an, China, 710071",4
+"Research Center for Information Technology Innovation (CITI), Academia Sinica, Taipei, 115 Taiwan",4
+"Univ. Orléans, INSA CVL, PRISME EA 4229, Bourges, France",4
+"Institute of Systems and Robotics (ISR), University of Coimbra, Portugal",4
+"School of Electrical and Electronics Engineering, Yonsei University, 50 Yonsei-ro, Seodaemun-gu, SEOUL, Republic of Korea",4
+"School of Information Science and Engineering, Southeast University, Nanjing, 210096, P.R. China",4
+"Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea",4
+"INRIA Grenoble Rhone-Alpes, FRANCE",4
+"Department of Automation, Shanghai Jiao Tong University, Shanghai, China",4
+"North China Electric Power University Department of Electronic and Communication Engineering Baoding, Hebei, China",4
+Seoul National University,4
+"School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology, Auckland, New Zealand",4
+"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China",4
+"School of Mechanical and Electrical Engineering, Guilin University of Electronic Technology Guangxi Guilin, China",4
+"University of Portsmouth, United Kingdom",4
+"Bilgisayar Mühendisliği, Başkent Üniversitesi, Ankara, Türkiye",4
+Universidad Autonoma de Madrid,4
+"University of Oulu, Machine Vision Group, PO Box 4500, 90014, Finland",4
+"Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea",4
+"Center for Computer Vision and Department of Mathematics, Sun Yat-Sen University, Guangzhou, China",4
+"Signal Processing Laboratory (LTS5), Ecole Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",4
+"Institute of Computer, Hangzhou Dianzi University, China",4
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, No.95 East Road of Zhongguancun, Beijing, China",4
+"Dept. of Computing, Curtin University GPO Box U1987, Perth, WA 6845",4
+"Department of Computer Science and Communication Engineering, Jiangsu University, Zhenjiang, China",4
+"Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai, China",4
+"Dalle Molle Instituite for Artificial Intelligence (IDSIA), Lugano, Switzerland",4
+"Dept of Electrical and Computer Engineering, University of Calgary, Calgary, CANADA",4
+"Department of Computer Science, University of Colorado at Colorado Springs",4
+"EECS Department, University of Kansas, Lawrence, KS",4
+"Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190",4
+"The Queensland Brain Institute, University of Queensland, St Lucia, QLD, Australia",4
+"School of Information and Communication, Guilin University of Electronic Technology Guangxi Guilin, China",4
+"College of Information and Control Engineering, China University of Petroleum (East China), Qingdao, P.R. China",4
+Department of Mathematics and Computer Science University of Basel,4
+"Xi'an Jiaotong University, Xi'an, China",4
+"Department of Information Engineering, University of Brescia, Via Branze, 38 - 25123, Italy",4
+"Idiap Research Institute, Martigny, Switzerland",4
+"Department of Computer Science and Technology, Zhejiang University, Hangzhou 310027, China",4
+"Norwegian Biometrics Laboratory, NTNU - Gj⊘vik, Norway",4
+"Department of Electrical Engineering and Electronics, University of Liverpool, Liverpool, U.K.",4
+"Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China",4
+"Key Laboratory of Intelligent Perception and Image Understanding of the Ministry of Education, International Research Center for Intelligent Perception and Computation, Joint International Research Laboratory of Intelligent Perception and Computation of China, Xidian University, Xi’an, China",4
+"VUB-NPU Joint AVSP Research Lab, Vrije Universiteit Brussel (VUB), Deptartment of Electronics & Informatics (ETRO), Pleinlaan 2, 1050 Brussel, Belgium",4
+"National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University, Chengdu, China",4
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi'an, China",4
+"Department of Electronic and Electrical Engineering, Pohang University of Science and Technology (POSTECH), South Korea",4
+"Graduate School of Information Science, Nara Institute of Science and Technology, Takayama-cho 8916-5, Ikoma-shi, Nara, Japan",4
+"Department of Computer Science, University of North Carolina, Charlotte, NC, USA",4
+"Institute of Machine Learning and Systems Biology, College of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai 201804, China",4
+"Intel Labs, Hillsboro, Oregon, USA",4
+"Smart Surveillance Interest Group, Department of Computer Science, Universidade Federal de Minas Gerais, Minas Gerais, Brazil",4
+"Department of Automation, State Key Lab of Intelligent Technologies and Systems, and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China",4
+"Université de Lyon, CNRS, UMR5205, F-69622, France",4
+"Department of Informatics, Aristotle University of Thessaloniki, Thessaloniki, 54124, Greece",4
+"Shanghai University School of Communication and Information Engineering Shanghai, China",4
+"University of Chinese Academy of Sciences, Beijing, China",4
+"Computer Science and Engineering, Pennsylvania State University, PA, USA SiliconScapes, LLC, PA, USA",4
+"Key Laboratory of Intelligent Information Processing, Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing, China",4
+"Queen Mary University of London, London",4
+"Dept. of Computer Engineering, Keimyung University, Daegu, Korea",4
+"Department of Cognitive Science, Xiamen University, Xiamen, Fujian, China",4
+"State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China",4
+"Universit&#x00E9; de Lyon, CNRS, UMR5205, F-69622, France",4
+"School of Information and Communication Engineering, Dalian University of Technology, Dalian, Liaoning, 116024, China",4
+"DII, University of Brescia, Brescia, Italy",4
+"Institute for Creative Technologies, University of Southern California",4
+"University of California, San Diego, USA",4
+"The University of Queensland, School of ITEE, QLD 4072, Australia",4
+"Department of Computer Science, University of York, UK",4
+"Department of Automation, State Key Lab of Intelligent Technologies and Systems, Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China",4
+"Dept. of Computer Science, National Tsing Hua University, Hsinchu, Taiwan",4
+"SRI International, Menlo Park, USA",4
+"Universit&#x00E9; de Lyon, CNRS, France",4
+"School of Computer Science and Technology & Joint International Research Laboratory of Machine Learning and Neuromorphic Computing, Soochow University, Suzhou, China",4
+"Department of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China",4
+"Computer Science and Engineering Department, University of South Florida, Tampa, FL, USA",4
+"Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney, Playa, Havana, Cuba",4
+"&#x00C7;o&#x011F;ulortam &#x0130;&#x015F;aret &#x0130;&#x015F;leme ve &#x00D6;r&#x00FC;nt&#x00FC; Tan&#x0131;ma Grubu, &#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye",4
+"School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China",4
+"Department of Electrical and Computer Engineering, Beckman Institute Advanced Science and Technology, University of Illinois at Urbana–Champaign, Urbana, IL, USA",4
+"National Laboratory of Pattern Recognition, CASIA, Center for Research on Intelligent Perception and Computing, CASIA, Center for Excellence in Brain Science and Intelligence Technology, CAS, University of Chinese Academy of Sciences, Beijing, 100049, China",4
+"Machine Learning and Cybernetics Research Center, School of Computer Science and Engineering, South China University of Technology, 510006, Guangzhou, China",4
+"IC Design Group, CSIR-Central Electronics Engineering Research Institute, Pilani, Rajasthan, India",4
+"College of Information Engineering, Yangzhou University, Yangzhou, China",4
+"Department of Mathematics, Intelligent Data Center, Sun Yat-sen University, Guangzhou, China",4
+"State Key Laboratory of Intelligent Technology and Systems Tsinghua National Laboratory for Information Science and Technology Department of Electronic Engineering, Tsinghua University, Beijing 100084, China",4
+"National Laboratory of Radar Signal Processing, Xidian University, Xi&#x2019;an, China",4
+"Department of Computer Engineering, College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia",4
+"Information Sciences Institute, University of Southern California, 4676 Admiralty Way, Marina Del Rey, 90292, USA",4
+"Faculty of Information Science and Technology, Multimedia University, Melaka, Malaysia",4
+"National Digital Switching System Engineering and Technological Research Center, Zhengzhou, China",4
+"Department of Electrical and Computer Engineering, Northeastern University, Boston, MA, USA",4
+University of Electronic Science and Technology of China,4
+National Taiwan University of Science and Technology,4
+"Samsung R&D Institute, Bangalore, India",4
+"Yaroslavl State University, Yaroslavl, Russia",4
+"Department of Electrical and Computer Engineering, Seoul National University",4
+"School of Electronics and Information Technology, Sun Yat-sen University, China",4
+"University of Tunis, The National Higher school of engineers of Tunis (ENSIT), Laboratory of Signal Image and Energy Mastery, LR13ES03 (SIME), Tunis, Tunisia",4
+"College of Information Science and Electronic Engineering, Zhejiang University, Hangzhou, China",4
+"Department of Computer Science and Engineering, Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea",4
+"University of Maryland, College Park, Maryland 20740 United States",4
+"Face Aging Group, University of North Carolina, Wilmington, NC, USA",4
+"Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China",4
+"North Carolina State University, Department of Electrical and Computer Engineering, Raleigh, United States of America",4
+"Institute of Computer Science and Technology, Peking University, Beijing, China",4
+"College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia",4
+"Media Technology Lab, Huawei Technologies Co., Ltd",4
+"Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University, 28 Xianning West Road, Xi'an, Shaanxi, China",4
+"National University of Defense Technology, Changsha, China",4
+"Hewlett-Packard Laboratories, Hewlett-Packard Company, Palo Alto, CA, USA",4
+"School of Computer Science and Engineering, Center for Robotics, University of Electronic Science and Technology of China, Chengdu, China",4
+"School of Electronic and Computer Engineering, Peking University",4
+"Centre for Vision, Speech and Signal Processing University of Surrey, Guildford, UK",4
+"Shenzhen Key Laboratory of Information Science and Technology, Shenzhen Engineering Laboratory of IS&DCP and the Department of Electronic Engineering, Graduate School at Shenzhen, Tsinghua University, Beijing, China",4
+"Department of Computer Graphics and Multimedia, University of Brno, Brno, Czech Republic",4
+"Department of Information and Communication Technologies, Universitat Pompeu Fabra, Barcelona, Spain",4
+"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China",4
+"Shanghai Jiao Tong University, School of Electronic Information and Electrical Engineering, People's Republic of China",4
+"Video/Image Modeling and Synthesis Laboratory, Department of Computer and Information Sciences, University of Delaware, Newark, DE",4
+"Multimedia Processing Lab., Samsung Advanced Institute of Technology (SAIT), Suwon-si, Korea",4
+"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA",4
+"Osaka university, Japan",4
+"IBJ, Inc., Tokyo, Japan",4
+"Faculty of Engineering, Ain Shams University, Computer and Systems Engineering Department, Cairo, Egypt",4
+"School of Automation and Information Engineering, Xi'an University of Technology, Xi'an, China",4
+College of electronic and information engineer Changchun University of Science and Technology Changchun China,4
+"School of Electrical, Computer and Telecommunication Engineering, University of Wollongong, NSW 2522, Australia",4
+"The University of Texas at Austin Austin, Texas, USA",4
+"Amity University Uttar Pradesh, Noida",4
+"Intelligent Media Laboratory, Digital Contents Research Institute, Sejong University, Seoul, South Korea",4
+"Computer Science and Engineering Dept., University of Nevada Reno, USA",4
+"Dept of Computer Engineering, Kyung Hee University, Yongin-si, South Korea",4
+"Computational Biomedicine Lab, Department of Computer Science, University of Houston, TX, USA",4
+"Department of Information and Control, B-DAT Laboratory, Nanjing University of Information and Technology, Nanjing, China",4
+"State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China",4
+"State Key Laboratory of Robotics, Chinese Academy of Sciences, Shenyang Institute of Automation, Shenyang, 110016, China",4
+"Hefei University of Technology, Hefei, China",4
+"Sharp Laboratories of America, Camas, WA",4
+"National University of Singapore, Singapore",4
+"Department of Data Science and Knowledge Engineering, Maastricht University, Maastricht, Netherlands",4
+"Center for Biometrics and Security Research and the National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",4
+"Geintra Research Group, University of Alcala",4
+"National Engineering Research Center for Multimedia Software, Computer School, Wuhan University, Wuhan, China",4
+"Electrical and Computer Systems Engineering, School of Engineering, Monash University Malaysia, 46150 Selangor, Malaysia",4
+"Beijing Laboratory of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing, China",4
+"Department of Computer Science, Hong Kong Baptist University, Hong Kong",4
+"Beijing, Haidian, China",4
+"Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China",4
+"School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN 47907, U.S.A.",4
+"Institute for Human-Machine Communication, Technische Universit&#x00E4;t M&#x00FC;nchen, Germany",4
+"School of Information Technology and Electrical Engineering, The University of Queensland, Brisbane, Australia",4
+"Department of Electrical and Computer Engineering, Ryerson University, Toronto, ON, Canada",4
+"Faculty of Engineering Science, Department of Systems Innovation, Arai Laboratory at Osaka University, Japan",4
+"School of Electronic Engineering, Xidian University, Xi'an, China",4
+"Biometric Recognition Group - ATVS, EPS, Universidad Autonoma de Madrid, Avda. Francisco Tomas y Valiente, 11 - Campus de Cantoblanco - 28049 Madrid, Spain",4
+"Department of MathematicsIntelligent Data Center, Sun Yat-sen University, Guangzhou, China",4
+"University of Trento, Italy",4
+"Centre for Imaging Sciences, The University of Manchester, Manchester, United Kingdom",4
+"National Laboratory of Pattern Recognition, CASIA, University of Chinese Academy of Sciences, Beijing, 100049, China",4
+"School of Electronic and Electrical Engineering, Shanghai Jiao Tong University, National Engineering Lab on Information Content Analysis Techniques, GT036001 Shanghai, China",4
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences",4
+"Department of Informatics, University of Oslo, Oslo, Norway",4
+"Speech, Audio, Image and Video Technology (SAIVT) Laboratory, Queensland University of Technology, Australia",4
+"Technicolor, France",4
+"School of Mathematics and Computational Science, Sun Yat-sen University, Guangzhou, China",4
+"School of Computer Science and Technology, Wuhan University of Technology, Wuhan, China",3
+"CyLab Biometrics Center and the Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA",3
+"Department of Software and Information Systems Engineering, Ben-Gurion University of the Negev, Beersheba, Israel",3
+"National Laboratory of Pattern Recognition, Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, University of Chinese Academy of Sciences, Beijing, China",3
+"Vision Lab at Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA 23529, USA",3
+"IBM T. J. Watson Research, Yorktown Heights, NY, USA",3
+"Computer Science and Technology, University of Science and Technology of China",3
+"School of Information Technologies, University of Sydney, Australia",3
+"Department of Electronic Engineering, The Chinese University of Hong Kong, China",3
+"Key Laboratory of Machine Perception (Ministry of Education) Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, China",3
+"School of Electrical and Electronic Engineering, Changchun University of Technology, Changchun, CO 130012 China",3
+"Center for Cognitive Ubiquitous Computing, Arizona State University, USA",3
+"School of Marine Science and Technology, Northwestern Polytechnical University, Xi’an, China",3
+"Institute of Computing, State University of Campinas, Campinas, Brazil",3
+"Guangdong Key Laboratory of Data Security and Privacy Preserving, Guangdong Engineering Research Center of Data Security and Privacy Preserving, College of Information Science and Technology, Jinan University, Guangzhou, China",3
+"State Key Laboratory of Intelligent Technology and Systems, Department of Electronic Engineering, Tsinghua University, Beijing 100084, P.R. China",3
+"CAS Key Laboratory of Technology in Geo-spatial Information Processing and Application System, University of Science and Technology of China, Hefei 230027, China",3
+"Algılayıcılar, Görüntü ve Sinyal İşleme Grubu, HAVELSAN A.Ş. Ankara, Türkiye",3
+"C &amp; C Innovation Research Labs, NEC Corporation, Nara, Japan",3
+"Dept. of Audio Visual Technology, Technische Universitt, Ilmenau, Germany",3
+"Imperial College London, UK",3
+"School of Electrical and Computer Engineering, Royal Melbourne Institute of Technology University , Melbourne, Australia",3
+"School of Information Technology and Electrical Engineering, The University of Queensland, Australia",3
+"Department of Computer Science and Engineering, Kyung Hee University, Seoul, South Korea",3
+"Institute for Infocomm Research, 1 Fusionpolis Way, #21-01, Connexis Singapore 138632, Singapore",3
+"School of Computer Science and Engineering, South China University of Technology, China",3
+"Department of Radiology and the Biomedical Research Imaging Center, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA",3
+"Dept. of Electrical Engineering and Comp. Sc., Northwestern University, Evanston, IL 60208, USA",3
+"School of Electronics and Computer Science, University of Southampton, Southampton, U.K.",3
+"School of Electronic and Information Engineering, Xi'an Jiaotong University, Xi'an, China",3
+"Program of Electrical Engineering, COPPE/UFRJ, Universidade Federal do Rio de Janeiro, Rio de Janeiro-RJ CEP, Brazil",3
+"Bilgisayar Mühendisliği, İstanbul Teknik Üniversitesi, İstanbul, Turkey",3
+"Department of Electrical and Computer Engineering and the Center for Automation Research, UMIACS, University of Maryland, College Park, USA",3
+"Department of Computer Engineering, Kyung Hee University, Seoul, South Korea",3
+"Michigan State University, United States of America",3
+"School of Engineering, University of Baja California, Tijuana, M&#x00E9;xico",3
+"Department of Computer Science and Engineering, University of South Florida, Tampa, Florida 33620",3
+"KTH Royal Institute of Technology, 100 44 Stockholm, Sweden",3
+"School of Software, Huazhong University of Science and Technology, Wuhan, China",3
+"Department of Computer Science and Engineering, Lehigh University, Bethlehem, PA 18015, USA",3
+"School of Computer Science, Center for Optical Imagery Analysis and Learning (OPTIMAL)",3
+"Department of Computing, Curtin University, Perth WA 6102, Australia",3
+"Department of Systems and Computing, Federal University of Campina Grande, Av. Apríigio Veloso, 882, 58429-900 Campina Grande, PB, Brazil",3
+"Institute of Imaging and Computer Vision, RWTH Aachen University, Templergraben 55, 52056, Aachen, Germany",3
+"Universidade Federal do Rio de Janeiro, Cx.P. 68504, Rio de Janeiro, RJ, CEP 21945-970, Brazil",3
+"R&D Centre Algoritmi, School of Engineering, University of Minho, Portugal",3
+"National Laboratory for Parallel and Distributed Processing, School of Computer, College of Computer, National University of Defense Technology, Changsha, China",3
+"Department of Computer and Information Science, Temple University, Philadelphia, PA, 19122, USA",3
+"Department of Control and Computer Engineering, Politecnico di Torino, Italy",3
+"Key Laboratory of System Control and Information Processing MOE, Department of Automation, Shanghai Jiao Tong University",3
+"College of Computer Science, Zhejiang University, China",3
+"Institute of Industrial Information Technology (IIIT), Karlsruhe Institute of Technology (KIT), 76187 Karlsruhe, Germany",3
+"School of Electronics and Information Technology, Sun Yat-Sen University, Guangzhou, China",3
+"Institute for Electronics, Signal Processing and Communications (IESK), Otto-von-Guericke-University Magdeburg, D-39106, P.O. Box 4210 Germany",3
+"Institute for Human-Machine Communication, TU M&#x00FC;nchen, Theresienstrae 90, 80333 M&#x00FC;nchen, Germany",3
+"School of Computer Science and Technology, Harbin Institute of Technology, China",3
+"Oak Ridge National Laboratory, USA",3
+"Center for Research in Intelligent Systems, University of California, Riverside Riverside, CA 92521-0425, USA",3
+"Department of Electrical and Computer Engineering, University of Windsor, 401 Sunset Avenue, Windsor, N9B 3P4, Canada",3
+"Elektrik ve Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Türkiye",3
+"Department of Computer Science, Zhejiang University, Hangzhou, China",3
+"Software Solution Laboratory, Samsung Advanced Institute of Technology, Suwon-si, South Korea",3
+Rice University,3
+"Department of electronic engineering, Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xi'an, China",3
+"Centre of Informatics, Federal University of Pernambuco, Recife-PE, Brazil. Bruno J. T. Fernandes is also with the Polytechnic School, University of Pernambuco, Brazil",3
+"Computer Vision Laboratory, ETH Zurich, Sternwartstrasse 7, 8092, Switzerland",3
+"VNU HCMC, University of Science, Ho Chi Minh City, Vietnam",3
+"Department of Electrical and Computer Engineering, Peking University, Beijing, China",3
+"Instrumentation, IT and Systems Lab IRSEEM Rouen, FR",3
+"Aristotle University of Thessaloniki, Greece",3
+"School of Automation, Northwestern Polytechnical University, Xi&#x2019;an, China",3
+"Department of Computer Science and Engineering, Arizona State University, Tempe, AZ, USA",3
+"College of Information and Control Engineering, China University of Petroleum (East China), Qingdao, Shandong, 266580, China",3
+"Center for Research on Intelligent Perception and Computing, Institute of Automation, National Laboratory of Pattern Recognition, Chinese Academy of Sciences",3
+"National Laboratory of Pattern Recognition CAS Center for Excellence in Brain Science and Intelligence Technology Institute of Automation, Chinese Academy of Sciences, 100190, China",3
+"Univ. Bordeaux, LaBRI, PICTURA, UMR 5800, F-33400 Talence, France",3
+"The Univ of Hong Kong, China",3
+"Advanced Technologies Application Center (CENATAV), 7A ♯21406 Siboney, Playa, P.C.12200, Havana, Cuba",3
+"GIPSA-Lab, Grenoble, France",3
+"Samsung Research and Development Institute Bangalore Pvt Ltd., Bangalore, India",3
+"Inst. of Autom., Shanghai Jiao Tong Univ., China",3
+"Department of Computer Science, New Jersey Institute of Technology, Newark, USA",3
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi&#x2019;an, China",3
+"Department of Electrical and Electronic Engineering, Imperial College London, London, U.K.",3
+"Center for Cognitive Ubiquitous Computing (CUbiC), Arizona State University, Tempe, AZ, USA",3
+"Department of Computing, Curtin University, Perth WA, Australia",3
+SUNY Buffalo,3
+"Graduate School of System Design Tokyo Metropolitan University Tokyo, Japan",3
+"Bilgisayar Mühendisliği Bölümü, TOBB Ekonomi ve Teknoloji Üniversitesi, Ankara, Türkiye",3
+"Intelligent Data Center, School of Mathematics and Computational Science, Sun Yat-sen University, Guangzhou, China",3
+"Indian Institute of Information Technology at Allahabad, Allahabad, India",3
+"Face Aging Group, Computer Science Department, UNCW, USA",3
+"Department of Computer Science and Digital Technologies, Faculty of Engineering and Environment, Northumbria University, Newcastle Upon Tyne, U.K.",3
+"Faculty of Information Technology, University of Technology, Sydney, Australia",3
+"Department of Computer Science and Engineering, Visual Learning and Intelligence Group, IIT Hyderabad, Hyderabad, India",3
+"School of Computing, Communications and Electronics, University of Plymouth, UK",3
+"Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA",3
+"University of California San Diego, United States of America",3
+"Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xidian University, Xi'an, China",3
+"Singapore University of Technology and Design, Singapore",3
+"School of Information Science and Technology, Xiamen University, Xiamen, P. R. China",3
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan",3
+"School of Information Technology and Electrical Engineering, The University of Queensland",3
+"Center for Automation Research, UMIACS University of Maryland, College Park, MD 20742",3
+"School of Electronics, Electrical Engineering and Computer Science, Queen&#x2019;s University Belfast, Belfast, U.K.",3
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore",3
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia",3
+"Department of Computer Science, University of Hamburg, Germany",3
+"Peking University, Beijing, China",3
+"Department of Computer ScienceMultimedia Processing Laboratory, National Tsing Hua University, Hsinchu, Taiwan",3
+"West Virginia University, Lane Dept. of CSEE, Morgantown, WV",3
+University of California San Diego,3
+"School of Computer Science and Technology, University of Science and Technology of China, Hefei, Anhui, China",3
+"School of Information Technologies, The University of Sydney, NSW 2006, Australia, Sydney",3
+"Department of Electrical Engineering, University of Windsor, Ontario, Canada",3
+"School of Information and Communication Engineering, Beijing University of Posts and Telcommunications, Beijing, China",3
+"INRIA Grenoble Rh&#x00F4;ne-Alpes Research Center, 655 avenue de l'Europe, 38 334 Saint Ismier Cedex, France",3
+"National Institutes of Health, Bethesda, Maryland 20892",3
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA",3
+"Gwangju Institute of Science and Technology, 123, Cheomdangwagi-ro, Buk-gu, Gwangju, South Korea",3
+"Department of Computing, The Hong Kong Polytechnic University, China",3
+Harvard University,3
+"School of Computing and Information Sciences, Florida International University, Miami, FL",3
+"College of Electronic Information and Automation, Civil Aviation University of China, Tianjin",3
+"Department of Automation, Tsinghua University, 100084 Beijing, China",3
+"NICTA, Canberra ACT, Australia and CECS, Australian National University, Australia",3
+"Research Center of Intelligent Robotics, Shanghai Jiao Tong University, Shanghai 200240, P.R China",3
+"UtopiaCompression Corporation, 11150 W. Olympic Blvd, Suite 820, Los Angeles, CA 90064, USA",3
+"Laboratoire des Syst&#x00E8;mes de T&#x00E9;l&#x00E9;communication et Ing&#x00E9;nierie de la D&#x00E9;cision (LASTID) Universit&#x00E9; Ibn Tofail BP 133, Kenitra 14000, Maroc",3
+"Sorbonne Universités, UPMC Univ Paris 06, CNRS, UMR 7222, F-75005, Paris, France",3
+"Graduate School of Informatics and Engineering, The University of Electro-Communications, 1-5-1 Chofugaoka, Chofu, Tokyo 182-8585, Japan",3
+"School of Computer and Information Technology, Beijing Jiaotong University, Beijing, 100044, China",3
+University of Wisconsin - Madison,3
+"Mines-Télécom/Télécom Lille, CRIStAL (UMR CNRS 9189), Villeneuve d'Ascq, France",3
+"Kyung Hee University, Korea",3
+"Departamento de Computación, Facultad de Ciencias Exactas y Naturales, Universidad de Buenos Aires, Argentina",3
+"Stony Brook University, Stony Brook, NY 11794, USA",3
+"University of Delaware, Newark, 19716, USA",3
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Adenauerring 4, 76131, Germany",3
+"University at Buffalo, The State University of New York, Buffalo, NY 14203, USA",3
+UIUC,3
+"Computational Biomedicine Lab, Department of Computer Science, University of Houston, 4800 Calhoun Rd., TX, 77004, USA",3
+"Pattern Recognition and Intelligent Systems Laboratory, Beijing University of Posts and Telecommunications, Beijing, China",3
+"Laboratory of Intelligent Recognition and Image Processing, School of Computer Science and Engineering, Beihang University, 100191, Beijing, China",3
+"Face Aging Group, UNCW",3
+"College of Computer Science and Technology, Xinjiang Normal University, Urumchi, 830054, China",3
+"School of Information Technology, Deakin University, Geelong, VIC 3216, Australia",3
+"Dept. of Informatics, Aristotle Univ. of Thessaloniki, Greece",3
+"University of Southern California, Institute for Robotics and Intelligent Systems, Los Angeles, CA 90089, USA",3
+"Computer Science, University of Houston, Texas 77004, United States of America",3
+"School of Communication and Information Engineering, Beijing University of Posts and Telecommunications, Beijing, China",3
+"Department of Computer Science and Technology, Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University, Shanghai, China",3
+"Department of Computer Science and Engineering, Michigan State University, USA",3
+"Tsinghua University, Beijing,China",3
+"Media & Inf. Res. Labs., NEC Corp., Kanagawa, Japan",3
+"Department of Electronic Engineering, Shanghai Jiao Tong University, China",3
+"Department of Computer Science and TechnologyState Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China",3
+"School of Software, Tsinghua University, Beijing, P. R. China",3
+"Research Center of Intelligent Robotics Shanghai Jiao Tong University, Shanghai, 200240, P.R. China",3
+"Center for Research on Intelligent Perception and Computing National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences",3
+"School of Software, University of Technology Sydney, New South Wales, Australia",3
+"School of Telecommunications Engineering, Xidian University, Xi’an, China",3
+"Department of Computer Science, Shenzhen Graduate School, Harbin Institute of Technology, Guangdong 518055, China",3
+"Carnegie Mellon University, Pittsburgh, PA, USA",3
+"Azbil Corporation 1-12-2, Kawana, Fujisawa-shi, 251-8522, Japan",3
+"Graduate School of Information Sciences, Tohoku University, 6-6-05., Aramaki Aza Aoba., Sendai-shi., 980-8579., Japan",3
+"Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan",3
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, P. R. China",3
+"Institute for Electronics, Signal Processing and Communications (IESK) Otto-von-Guericke-University Magdeburg D-39016 Magdeburg, P.O. Box 4210 Germany",3
+"Department of Computer, the University of Suwon, Korea",3
+"Institute for Anthropomatics, Karlsruhe Institute of Technology, Germany",3
+"Department of Electrical and Computer Engineering, Florida Institute of Technology, Melbourne, USA",3
+"Dept. of Computer Science and Engineering, St. Joseph's College of Engineering and Technology, Palai, Kerala, India",3
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",3
+"Department of Computer Science and Engineering, Michigan State University, East Lansing 48824, USA",3
+"Centre for Vision, Speech and Signal Processing, University of Surrey, UK",3
+"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China",3
+"Department of Electrical Engineering, Indian Institute of Technology Kanpur, PIN 208016, Uttar Pradesh, India",3
+"Dept. of Computer Science and Electrical Engineering, University of Missouri-Kansas City, MO, USA",3
+"University of North Carolina Wilmington, USA",3
+"Shenzhen Key Laboratory of Computer Vision and Pattern Recognition, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China",3
+"Pattern Recognition and Intelligent System Laboratory, Beijing University of Posts and Telecommunications, Beijing 100876, China",3
+"Visual Media Computing Lab, Department of Multimedia and Graphic Arts, Cyprus University of Technology, Limassol, Cyprus",3
+"Department of Computer Science, Computational Biomedicine Laboratory, University of Houston, Houston, TX, USA",3
+"Centro de Inform&#x00E1;tica, Universidade Federal de Pernambuco, Recife, Brazil",3
+"Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, China",3
+"Swiss Federal Institute of Technology, Lausanne (EPFL), Switzerland",3
+"Australian Centre for Field Robotics University of Sydney, 2006, Australia",3
+"Universit&#x00E9; de Lyon, Laboratoire d&#x2019;InfoRmatique en Image et Syst&#x00E8;mes d&#x2019;information, Centre National de Recherche Scientifique 5205, Ecole Centrale de Lyon, France",3
+"Department of Computer ScienceFace Aging Group Research Laboratory, Institute for Interdisciplinary Studies in Identity Sciences, University of North Carolina at Wilmington, Wilmington, NC, USA",3
+"School of Software, Tsinghua University, Beijing, China",3
+"Media Laboratory, Massachusetts Institute of Technology, Cambridge, MA 02139, USA",3
+Center for Research on Intelligent Perception and Computing,3
+"UC Merced, USA",3
+"Centre for Quantum Computation & Information Systems, Faculty of Engineering and IT, University of Technology, Sydney, 235 Jones Street, Ultimo, NSW, Australia",3
+"Samsung Research Center-Beijing, SAIT China Lab Beijing, China",3
+"IT - Instituto de Telecomunicações, University of Beira Interior, Portugal",3
+"Center for Cognitive, Connected &amp; Computational Imaging, College of Engineering &amp; Informatics, NUI Galway, Ireland",3
+"Institute for Anthropomatics and Robotics, Karlsruhe Institute of Technology, Karlsruhe, Germany",3
+"Institute of Information Science, Beijing jiaotong University, Beijing, China",3
+"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education, School of Artificial Intelligence, Xidian University, Xi’an, China",3
+"Center for Automation Research, University of Maryland, College Park, 20742, USA",3
+"Hasso Plattner Institute, University of Potsdam, Prof.-Dr.-Helmert-Str. 2-3, 14482, Germany",3
+"Dalian University of Technology, School of Software Tuqiang St. 321, Dalian, 116620, China",3
+"Department of Computer Science, University of Central Florida, Orlando, 32816, United States of America",3
+"Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai 200240, China",3
+"College of Computer Science and Technology, Jilin University, Changchun, China",3
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China",3
+"University of Nottingham, Ningbo China",3
+"National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University Chengdu, 610065, China",3
+"Institute of Forensic Science, Ministry of Justice, Shanghai 200063, China",3
+"Department of Information Engineering, University of Florence, Florence, Italy",3
+"Dept. of Computer Science and Information Engineering, National Dong Hwa University, Hualien, Taiwan",3
+"West Virginia University, Morgantown, WV, USA",3
+"EUP Mataró, Spain",3
+Université du Québec à Chicoutimi (UQAC),3
+"Dept. of Computer Sciences, ASIA Team, Moulay Ismail University, Faculty of Science and Techniques, BP 509 Boutalamine 52000 Errachidia, Morocco",3
+"School of Electrical and Electronic Engineering, Singapore",3
+"Center for Future Media and School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China",3
+"Advanced Technologies Application Center, 7a #21406 b/ 214 and 216, P.C. 12200, Playa, Havana, Cuba",3
+"Artificial Vision Laboratory, National Taiwan University of Science and Technology",3
+"Department of Electrical Engineering, Indian Institute of Technology Kanpur, Kanpur, India",3
+"Key Laboratory of Machine Perception (Ministry of Education), Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, China",3
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang, China",3
+"Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China",3
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation Chinese Academy of Sciences, Beijing, China 100190",3
+"Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Kowloon, Hong Kong",3
+"Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, &#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye",3
+Sharif University of Technology,3
+"Department of Computer Science and Engineering, National Institute of Technology Uttarakhand, Srinagar Garhwal, India",3
+"Dept. of Mediamatics, Delft Univ. of Technol., Netherlands",3
+"Disney Research Pittsburgh, Pittsburgh, PA, USA",3
+Electrical and Computer Engineering,3
+"School of Electronics and Information Engineering, Tianjin University, Tianjin, China",3
+"Cornell University, USA",3
+"Department of Information Science and Engineering, Changzhou University, Changzhou, China",3
+"International Center of Excellence on Intelligent Robotics and Automation Research, National Taiwan University, Taiwan",3
+"Department of Informatics, University of Thessaloniki, 54124, Greece",3
+"Department of Electrical and Computer Engineering, University of Dayton, Ohio, USA",3
+"Department of Electrical and Computer Engineering, University of Windsor, Canada",3
+"Graduate School of Shenzhen, Tsinghua University, Beijing, China",3
+"Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Korea",3
+"Institute of Computational Science, University of Lugano, Switzerland",3
+"Norwegian Biometrics Laboratory, NTNU - Gj&#x00F8;vik, Norway",3
+"Institute of Technology and Science, Tokushima University, 2-1 Minamijyousanjima, 770-8506, Japan",3
+"LTCI, CNRS, T&#x00E9;l&#x00E9;com ParisTech, Universit&#x00E9; Paris-Saclay, 75013, France",3
+"National Lab of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",3
+"School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University, Dongchuan Road 800, Minhang District, Shanghai, 200240, China",3
+"Multimedia and Intelligent Software Technology Beijing Municipal Key Lab., College of Computer Science, Beijing University of Technology Beijing, China.",3
+"Institute of Imaging & Computer Vision, RWTH Aachen University, Aachen, Germany",3
+Korea University,3
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, NSW, Australia",3
+"College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China",3
+"Shenzhen Key Laboratory of Broadband Network and Multimedia, Graduate School at Shenzhen, Tsinghua University, Shenzhen, China",3
+"TCS Research, New Delhi, India",3
+"Faculty of Electrical Engineering, University of Ljubljana, Trzaska cesta 25, SI-1000 Ljubljana, Slovenia",3
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China",3
+"Department of Electrical and Computer Engineering, Binghamton University, Binghamton, NY",3
+"School of Electronics and Computer Science, University of Southampton, United Kingdom",3
+"Department of Computer Science, University of Massachusetts Amherst, Amherst MA, 01003",3
+"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education, International Research Center for Intelligent Perception and Computation, Xidian University, Xi’an, China",3
+"Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech Technical University in Prague, 166 27 Prague 6, Technická 2 Czech Republic",3
+"Computer Laboratory, University of Cambridge, United Kingdom",3
+"Department of Computer Science, University of Texas at San Antonio, San Antonio, TX, USA",3
+"South China University of Technology, China",3
+"Visionlab, Heriot-Watt University, Edinburgh, UK",3
+"Institute for Infocomm Research, A*STAR, Singapore",3
+"Korea Advanced Institute of Science and Technology (KAIST), Republic of Korea",3
+"Xerox Research Center, Webster, NY, USA",3
+"Department of Engineering and Environment, Northumbria University, Newcastle Upon Tyne, Tyne and Wear",3
+"Institute of Applied Computer Science, Kiel University of Applied Sciences, Kiel, Germany",3
+"School of Creative Technologies, University of Portsmouth, Portsmouth, POI 2DJ, UK",3
+"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, France",3
+"Faculty of Electronic Information and Electrical Engineering, School of Information and Communication Engineering, Dalian University of Technology, Dalian, China",3
+"Image Processing Center, Beihang University, Beijing, China",3
+"Affectiva Inc., Waltham, MA, USA",3
+"Department of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China",3
+"Department of Computer Science, Korea Advanced Institute of Science and Technology, 291 Daehak-ro, Yuseong-gu, Daejeon 305-701, Republic of Korea",3
+"Dept. of Electrical and Computer Engineering & Centre for Intelligent Machines, McGill University, Montreal, Quebec, Canada",3
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Surrey, UK",3
+"Computer Vision and Image Processing Lab, Institute for Integrated and Intelligent Systems, Griffith University, Australia",3
+"Department of Computer Science and Engineering, Michigan State University, East Lansing, MI, USA",3
+"Institute of Software, College of Computer, National University of Defense Technology, Changsha, Hunan, China, 410073",3
+"Department of Electrical and Computer Engineering, Northeastern University, Boston, USA, 02115",3
+"AltumView Systems Inc., Burnaby, BC, Canada",3
+Sapienza University of Rome,3
+"Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences",3
+"Computer Vision Lab, Sungkyunkwan University Suwon, South Korea",3
+"The University of Tokyo, Japan",3
+"Department of Computer Science, Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany",3
+"Faculty of Engineering, Shinshu University, Nagano, Japan",3
+"Institute for Creative Technologies, University of Southern California, 12015 E Waterfront Dr, Los Angeles, CA, USA",3
+"Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan, R.O.C",3
+"Vision Lab in Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA, 23529",3
+"Center for Research of E-life DIgital Technology (CREDIT), Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan",3
+"Graduate School of Convergence Science and Technology, Seoul National University, Seoul, Korea",3
+"E-Comm Research Lab, Infosys Limited, Bangalore, India",3
+"College of Mechanical and Electrical Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, China",3
+"National Laboratory of Pattern Recognition, Center for Biometrics and Security Research, Institute of Automation, Chinese Academy of Sciences, Beijing, China",3
+"School of Computer Science and Software Engineering University of Wollongong, Australia",3
+"Phonexia, Brno-Krlovo Pole, Czech Republic",3
+"Expert Systems, Modena, Italy",3
+"Chair of Complex & Intelligent Systems, University of Passau, Passau, Germany",3
+"Department of Applied Mechanics, Chalmers University of Technology, SE-412 96 Göteborg, Sweden",3
+"Laboratory for Intelligent and Safe Automobiles, University of California, San Diego, USA",3
+Toyota Research Institute,3
+"University of California, Merced",3
+"Image and Video Research Lab, Queensland University of Technology, 2 George Street, GPO Box 2434, Brisbane, QLD 4001, Australia",3
+"School of Computer Science and Engineering, Nanjing University of Science and Technology",3
+"Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro #1, Tonantzintla, Puebla, Mexico",3
+"NLPR, Institute of Automation, Chinese Academy of Sciences",3
+"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, CAS Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Beijing 100190, China",3
+"T&#x00E9;l&#x00E9;com Lille, CRIStAL UMR (CNRS 9189), France",3
+"IMPCA, Curtin University, Australia",3
+"Faculty of Informatics and Computing, Universiti Sultan Zainal Abidin, Besut Campus, 22200 Besut, Terengganu, Malaysia",3
+Concordia University,3
+"State Key Laboratory of Digital Manufacturing Equipment and Technology, Huazhong University of Science and Technology, Wuhan, China",3
+"University of California, Los Angeles, CA Dept. of Electrical Engineering",3
+"University Of Electronic Science And Technology Of China, China",3
+"University of Texas at San Antonio, San Antonio, TX, USA",3
+IBM Research,3
+"Faculty of Electrical Engineering, University of Ljubljana, Trzaska 25, SI-1000 Ljubljana, Slovenia",3
+"Computational Biomedicine Lab, Department of Computer Science, University of Houston, Houston, TX, USA",3
+"Center for Digital Media Computing, Software School, Xiamen University, Xiamen 361005, China",3
+"Department of Informatics, Aristotle University of Thessaloniki, Greece",3
+"State Key Laboratory on Intelligent Technology and Systems, National Laboratory for Information Science and Technology, Department of Computer Science and Technology, Tsinghua University, China",3
+"School of Electronic and Information Engineering, South China University of Technology, Guangzhou, Guangdong, China",3
+"Dept. of Cybernetics and Artificial Intelligence, FEI TU of Košice, Slovak Republic",3
+"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China",3
+"Image and Video Systems Lab, School of Electrical Engineering, KAIST, Republic of Korea",3
+"Evolutionary Computation Research Group, Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand",3
+"National Laboratory of Pattern Recognition, Center for Research on Intelligent Perception and Computing, CAS Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China",3
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, P.O.Box 217 7500 AE Enschede, The Netherlands",3
+"MindLAB Research Group, Universidad Nacional de Colombia, Colombia",3
+"IntelliView Technologies Inc., Calgary, AB, Canada",3
+"Department of Electronic Engineering, National Taipei University of Technology, Taipei, Taiwan",3
+"Information and media processing laboratories, NEC Corporation",3
+"School of Behavioral and Brain Sciences, University of Texas at Dallas, Richardson, 75080, USA",3
+"Dept. of Automation and Applied Informatics, Politehnica University of Timisoara, Romania",3
+Queen Mary University of London,3
+"School of Automation and Electrical Engineering, University of Science and Technology Beijing, 100083, China",3
+"Michigan State University, East Lansing, 48824, USA",3
+"Department of Electronics, AGH University of Science and Technology, Kraków, Poland",3
+"School of Software, Jiangxi Normal University, Nanchang, China",3
+"Department of Computer Science, Pontificia Universidad Cato&#x00B4;lica de Chile",3
+"Faculty of Information Technology, Ho Chi Minh City University of Science, VNU-HCM, District 5, Ho Chi Minh City, Vietnam",3
+Department of Electronic and Computer Engineering National Taiwan University of Science and Technology,3
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China",3
+"Georgia Institute of Technology, Atlanta, 30332-0250, USA",3
+"Tongji University, Shanghai, China",3
+"Department of Electrical and Computer Engineering, Vision Laboratory, Old Dominion University, Norfolk, VA, USA",3
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology Sydney, Ultimo, NSW, Australia",3
+"School of Information Science and Engineering, Xiamen University, Xiamen 361005, China",3
+"University of California San Diego, USA",3
+"HCC Lab, Vision &amp; Sensing Group, University of Canberra, Australia",3
+"Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China",3
+"REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia",3
+"School of physics and engineering, Sun Yat-Sen University, GuangZhou, China",3
+"Intelligent Vision Research Lab, Department of Computer Science, Federal University of Bahia",3
+"FDNA inc., Herzliya, Israel",3
+"Department of Mathematics & Computer Science, Philipps-Universität Marburg, D-35032, Germany",3
+"Australian Center for Visual Technologies, and School of Computer Science, The University of Adelaide, Adelaide, Australia",3
+"Department of Electronic Measuring systems, Moscow Engineering Physics Institute, National Research Nuclear University MEPhI, Moscow, Russia",3
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore, 639798",3
+"National University of Defence Technology, Changsha 410000, China",2
+"Elektrik-Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Turkey",2
+"Elektrik - Elektronik Mühendisliği Bölümü, Atılım Üniversitesi, Ankara, Türkiye",2
+"China Electronics Standardization Institute, Beijing, 100007",2
+"School of Reliability and System Engineering, Science and Technology on Reliability and Environmental Engineering Laboratory, Beihang University, Beijing, China",2
+"Department of Computer Science, Kent State University, OH 44242, U.S.A.",2
+"Machine Intelligence Unit, Indian Statistical Institute, Kolkata, India",2
+"Computational Biomedicine Lab, University of Houston",2
+"Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown WV 26506, USA",2
+"Bilgisayar Mühendisliği Bölümü, İstanbul Teknik Üniversitesi, İstanbul, Turkiye",2
+"Universidad Tecnica Federico Santa Maria, Department of Electronic Engineering, Valparaiso, Chile",2
+"Dept. of Comput. Syst., Univ. of Technol., Sydney, NSW, Australia",2
+"Harvard University, Cambridge, MA, USA",2
+"Michigan State University, East Lansing, MI, U.S.A.",2
+"Department of Computer Science, National Tsing Hua University, Taiwan",2
+"Dept. of Comput. Sci., York Univ., UK",2
+"CSE, SUNY at Buffalo, USA",2
+"Department of Computer Engineering, Mahanakorn University of Technology, 140 Cheum-Sampan Rd., Nong Chok, Bangkok THAILAND 10530",2
+"Dept. of Computer Science, YiLi Normal College, Yining, China 835000",2
+"School of Computing and Communications, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia",2
+"DISI, University of Trento, Italy",2
+"LAPI, University Politehnica of Bucharest, Romania",2
+"University of Colorado at Colorado Springs, Colorado Springs, CO, USA",2
+"Department of Mechanical Engineering, National Taiwan University, 10647, Taipei, Taiwan",2
+"Institution for Infocomm Research, Connexis, Singapore",2
+"Department of d’Informàtica, Universitat de València, Valencia, Spain",2
+"Toyota Research Institute, Cambridge, MA, USA",2
+"Research Centre for Computers, Communication and Social Innovation La Trobe University, Victoria - 3086, Australia",2
+"IBM Thomas J. Watson, Research Center, Yorktown Heights, New York 10598, USA",2
+"Institute of Computing, University of Campinas (UNICAMP), SP, 13083-852, Brazil",2
+"IFRJDL, Institute of Computing Technology, CAS, P.O.Box 2704, Beijing, China, 100080",2
+"Computer Science Department, University of Southern California, Los Angeles, 90089, United States of America",2
+"Department of Signal Processing, Tampere University of Technology, Tampere, Finland",2
+"JD Artificial Intelligence Research, Beijing, China",2
+"STARS team, Inria Sophia Antipolis-Méditerranée, Sophia Antipolis, France",2
+"Agency for Science, Technology and Research (A*STAR), Institute of High Performance Computing, Singapore",2
+"Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA",2
+"Department of Electrical and Computer Engineering, Singapore",2
+"Dept. of ECE &amp; Digital Technology Center, Univ. of Minnesota, USA",2
+"Department of Computer Science, Wayne State University, Detroit, MI, USA",2
+"Dept. of Computer Science, Yonsei University, Seoul, South Korea, 120-749",2
+"Division of Graduate Studies, Tijuana Institute of Technology, M&#x00E9;xico",2
+"Faculty of Science and Technology, University of Macau, Macau, China",2
+"Department of Electrical and Computer Engineering and the Center for Automation Research, UMIACS, University of Maryland, College Park, MD",2
+"Visual Analysis of People (VAP) laboratory, Aalborg University, Denmark",2
+"Instituto de Telecomunicações, Instituto Superior Técnico, Universidade de Lisboa, Portugal",2
+"School of Computer Science, Northwestern Polytechnical University, Xi’an, China",2
+"Escuela Politecnica Superior, Universidad Autonoma de Madrid, Madrid, Spain",2
+"SUPELEC / IETR, Avenue de la Boulaie, 35576 Cesson Sevigne, France",2
+"IT - Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal",2
+"Dept. of Computer Science & Engineering, University of South Florida, Tampa, 33620, United States of America",2
+"Department of Information Management, National Formosa University, Huwei, Yulin 632, Taiwan",2
+"Dept of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Kavala, Greece",2
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany",2
+"Department of Electrical and Computer Engineering, New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA",2
+"Department of Electronic Engineering, Shanghai Jiao Tong University",2
+"College of Computer and Information, Hohai University, Nanjing, China",2
+"Department of Information Systems and Cyber Security and the Department of Electrical and Computer Engineering, University of Texas at San Antonio, San Antonio, TX, USA",2
+"Department of Electrical and Computer Engineering, University of Texas at San Antonio, San Antonio, TX, USA",2
+"Electronics &amp; Telecommunications Research Institute (ETRI), Daejeon, Korea",2
+"Electrical and Computer Engineering Department, University of Windsor, Ontario, Canada N9B 3P4",2
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China",2
+"Department of Computer Science, The University of Hong Kong",2
+"Dept. of Eng. Sci., Oxford Univ., UK",2
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Adenauerring 4, Karlsruhe, Germany",2
+"Department of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Republic of Korea",2
+"Facial Image Processing and Analysis Group, Institute for Anthropomatics, Karlsruhe Institute of Technology, D-76131 Karlsruhe, P.O. Box 6980 Germany",2
+"Delft University of Technology, Mekelweg 4, Netherlands",2
+"Human-Machines Interaction (HMI) Laboratory, Department of Industrial Informatics, TEI of Kavala, Kavala, Greece",2
+"Department of Computer Science and Engineering, Michigan State University",2
+"Dept. of ECE, Maryland Univ., College Park, MD, USA",2
+"Research Institute of Intelligent Control and Systems, Harbin Institute of Technology, Harbin, China",2
+National University of Defense and Technology,2
+"School of Computer Science, CECS, Australian National University, Australia",2
+"Electrical &amp; Electronic Engineering Department, Mevlana University Konya, Turkey",2
+"Institute for Computational and Mathematical Engineering, Stanford University, Stanford, CA, USA",2
+"Institute of Electronics, National Chiao Tung University, Hsinchu, Taiwan",2
+"GIPSA Laboratory, Image and Signal Department, Grenoble Institute of Technology, Grenoble, France",2
+"Gradate School of Information Production and System, Waseda University, Kitakyushu, Japan 808-0135",2
+"Graduate School of Information, Production and Systems, Waseda University, Japan",2
+"Department of Computer Science and Engineering, National Taiwan Ocean University, No.2, Beining Rd., Keelung 202, Taiwan",2
+"Tampere University of Technology, Finland",2
+"Department of Electrical Engineering, National Chiao Tung University, Hsinchu, Taiwan",2
+"Biodata Mining Group, Technical Faculty, Bielefeld University, Germany",2
+"Department of Electrical and Computer Engineering, National University of Singapore, Singapore, Singapore",2
+"Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Deniz Harp Okulu, &#x0130;stanbul, T&#x00FC;rkiye",2
+"IETR, CNRS UMR 6164, Supelec, Cesson-Sevigne, France",2
+"Institute of Intelligent Systems and Robotics (ISIR), Pierre and Marie Curie University , Paris, France",2
+"University of Technology, Sydney, NSW, Australia",2
+"Statistical Machine Intelligence &amp; LEarning, School of Computer Science &amp; Engineering University of Electronic Science and Technology of China, 611731, China",2
+"Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National Central University, Jhongli, Taiwan",2
+"Department of Electrical and Electronics Engineering, Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia",2
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore639798",2
+West Virginia University,2
+"Dept. of Electr. & Comput. Eng., McMaster Univ., Hamilton, Ont., Canada",2
+"Nanjing Children's Hospital Affiliated to Nanjing Medical University, Nanjing, China",2
+"College of Telecommunications &amp; Information Engineering, Nanjing University of Posts and Telecommunications, Nanjing, China",2
+"School of Telecommunications Engineering, Xidian University, Xi&#x2019;an, China",2
+"London Healthcare Sciences Centre, London, ON, Canada",2
+"Department of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Human Machines Interaction (HMI) Laboratory, 65404 Kavala, Greece",2
+"School of Electrical Engineering and Computer Science, Seoul National University, Korea",2
+"Jordan University of Science and Technology, Irbid, Jordan",2
+"College of Medical Information Engineering, Guangdong Pharmaceutical University, Guangzhou, China",2
+"Biometric Technologies Laboratory, Department of Electrical and Computer Engineering, University of Calgary, Alberta, T2N 1N4 Canada",2
+"Morpho, SAFRAN Group, 11 Boulevard Galli&#x00E9;ni 92130 Issy-Les-Moulineaux - France",2
+"Department of Computer Science, Aalto University, Finland",2
+"Norwegian Biometrics Laboratory, Norwegian University of Science and Technology (NTNU), 2802 Gj⊘vik, Norway",2
+"International Institute of Information Technology (IIIT) Hyderabad, India",2
+"Computer Laboratory, University of Cambridge, Cambridge, UK",2
+"Department of Electronic Systems, Aalborg University, Denmark",2
+"Artificial Intelligence and Information Analysis Lab, Department of Informatics, Aristotle University of Thessaloniki, Greece",2
+University of British Columbia Department of Electrical and Computer Engineering,2
+"Department of Computer Science, Swansea University, Swansea, UK",2
+"Computer Science and Technology, IIEST, Shibpur",2
+"Amirkabir University of Technology, Tehran, Iran",2
+"EURECOM, Sophia Antipolis, France",2
+"School of Computer Science and Technology, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China",2
+"Department of Electrical and Computer Engineering, University of Delaware, Newark, DE, USA",2
+"School of Electrical and Electronic Engineering, Yonsei University, 50 Yonsei-ro, SEOUL, Republic of Korea",2
+"Department of Computer Science and Engineering, University of Califonia, San Diego",2
+"Department of Computer Science and Technology, Tsinghua University, Beijing",2
+"University of Missouri Department of Electrical and Computer Engineering Columbia, MO, USA",2
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia",2
+"Inf. Syst. Dept., Buckingham Univ., UK",2
+"Key Laboratory of Machine Perception, Shenzhen Graduate School, Peking University, China",2
+"Dept. of Electr. & Comput. Eng., Old Dominion Univ., Norfolk, VA, USA",2
+"Department of Computer Science, Edge Hill University",2
+"Department of Psychology, University of Pittsburgh, PA, 15260, USA",2
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213, USA",2
+"National Central University, Taoyuan County, Taiwan",2
+"Department of Computer Science &amp; Engineering, POSTECH, Pohang, Sourth Korea, 37673",2
+"Signals and Systems Group, Faculty of EEMCS, University of Twente, the Netherlands",2
+"Research Center of Machine Learning and Data Analysis, School of Computer Science and Technology, Soochow University, Suzhou, China",2
+"School of Computer Science, University of Windsor, Canada N9B 3P4",2
+"Laboratory Heudiasyc, University of Technology of Compiègne, BP 20529. F-60205, France",2
+"Dept. Electrical Engineering, National Taiwan University, Taipei, Taiwan",2
+"Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Turkey",2
+University of Notre Dame,2
+University of Ljubljana,2
+Istanbul Technical University,2
+"Polytechnic School, University of Pernambuco, Recife, Brazil",2
+"Faculty of Technical Sciences, Singidunum University, Belgrade 11000, Serbia",2
+"Dept. of CSEE, University of Maryland, Baltimore County, Baltimore, MD 21250",2
+"Dept. of Electron. & Inf., Toyota Technol. Inst., Nagoya, Japan",2
+"Department of Computer Science, University of Maryland, College Park, MD",2
+"Department of Computer Science and Engineering, POSTECH, Pohang 790-784, Republic of Korea",2
+"School of Electronic Engineering and Computer Science, Queen Mary University of London, UK",2
+"Star Technologies, USA",2
+"Dept. of Comput. Sci., New York State Univ., Binghamton, NY, USA",2
+"Dept. of Electrical Engineering, National Institute of Technology, Rourkela, India 769008",2
+"Division of Control, EEE, Nanyang Tech. Univ., Singapore",2
+"Department of Computer Science &amp; Engineering, University of Ioannina, 45110, Greece",2
+"Jiangsu University of Science and Technology, Zhenjiang, China",2
+"University of Valladolid (Spain), Dep. Of Systems Engineering and Automatic Control, Industrial Engineering School",2
+"Department of Computer Science, Mangalore University, India",2
+"Department of Computer Science, Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal",2
+"Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing, China",2
+"School of Electronic and Information Engineering, South China University of Technology, Guangzhou, China",2
+"Video Analytics Laboratory, SERC, Indian Institute of Science, Bangalore, India",2
+"NPU-VUB Joint AVSP Research Lab, School of Computer Science, Northwestern Polytechnical University (NPU) Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, Xi'an 710072, China",2
+"CAS Center for Excellence in Brain Science and Intelligence Technology, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",2
+Australian National University,2
+"Institute of Computing, University of Campinas (UNICAMP), Campinas, SP, 13083-852, Brazil",2
+"Sichuan Univ., Chengdu",2
+"Laboratory for Intelligent and Safe Automobiles, University of California San Diego, La Jolla, CA 92093 USA",2
+"Department of Computing, Imperial College London, London, 180 Queen’s Gate, UK",2
+"Australian Center for Visual Technologies, and School of Computer Science, University of Adelaide, Canberra, Australia",2
+"Bilgisayar Mühendisligi Bölümü, İstanbul Teknik Üniversitesi",2
+"Research&Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai 201804, P.R China",2
+"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China",2
+"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China",2
+"Departamento de Computação, Universidade Federal do Piauí, Teresina, Brasil",2
+"Bilgisayar Mühendisliği Bölümü, Marmara Üniversitesi, İstanbul, Türkiye",2
+"Le2i FRE2005, CNRS, Arts et M&#x00E9;tiers, Univ. Bourgogne Franche-Comt&#x00E9;, UTBM, F-90010 Belfort, France",2
+"Graduate School of Engineering, Osaka University, 2-1 Yamadaoka, Suita, Osaka, 565-0871 Japan",2
+"Department of Computing, Imperial College London, U.K.",2
+"Dept. of Computer Science and Information Engineering, Southern Taiwan University of Science and Technology, Tainan City, Taiwan",2
+"Corp. Res. & Dev., Toshiba Corp., Tokyo, Japan",2
+"Dept. of Electronics and Telecommunication Engg., KCT's Late G.N. Sapkal college of Engineering, Nashik, India",2
+"Bilgisayar Mühendisliği Bölümü, Gebze Teknik Üniversitesi, Kocaeli, 41400, Türkiye",2
+"State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, China",2
+Tencent Inc,2
+"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing 100190",2
+"Faculty of Information Science and Technology (FIST), Multimedia University, Melaka, Malaysia",2
+"Fraunhofer IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany",2
+"Naval Research Laboratory, Washington DC",2
+"Department of Electrical and Electronic Engineering, Imperial College London, United Kingdom",2
+"Department of Computer Science, National Tsing Hua University, Hsinchu, Taiwan",2
+"SPAWAR Systems Center Pacific, San Diego, California, USA",2
+"Department of Electrical Engineering, National Taiwan University, Taiwan",2
+"Department of Electrical Engineering, Ferdowsi University of Mashhad, Mashhad, Iran",2
+"Artificial Vision Laboratory, Dept. of Mechanical Engineering, National Taiwan University of Science and Technology, Taipei City, Taiwan 106",2
+"Microsoft Corporation, Redmond, WA, USA",2
+"Dept. of Electrical Engineering, National Tsing-Hua University, Taiwan",2
+"Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Y&#x0131;ld&#x0131;z Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye",2
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, Ultimo, NSW, Australia",2
+"Department of Electronics and Information Engineering, Huazhong University of Science and Technology, Wuhan, China",2
+"Department of Artificial Intelligence, Faculty of Computer Science & Information Technology, University of Malaya, Kuala Lumpur, 50603, Malaysia",2
+"Department of Computer Science and Engineering of Systems, University of Zaragoza, Escuela Universitaria Politécnica de Teruel, Teruel, Spain",2
+"Department of Automation, North-China University of Technology, Beijing, China",2
+"University of Bern, Neubrückstrasse 10, Bern, Switzerland",2
+"Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Kowloon, Hong Kong",2
+"Computer Science, Fudan University, Shanghai, 201203, China",2
+"Electronic Engineering and Computer Science, Queen Mary University, London, United Kingdom",2
+"Department of Computer Science and Engineering, Pohang University of Science and Technology, Pohang, Korea",2
+"Swiss Federal, Institute of Technology, Lausanne (EPFL), Switzerland",2
+"Disney Research, CH",2
+"Water Optics Technology Pte. Ltd, Singapore",2
+"School of Electrical &amp; Electronic Engineering, Nanyang Technological University, Singapore",2
+"National Laboratory of Pattern Recognition, CAS Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China",2
+"Department of Information Science and Electronic Engineering, Zhejiang University, Hangzhou, China",2
+"Orange Labs International Center Beijing, Beijing, 100876, China",2
+"Beijing University of Posts and Telecommunications, Beijing 100876, China",2
+"Norwegian Biometrics Lab, NTNU, Gj⊘vik, Norway",2
+"The Edward S. Rogers Sr. Department of Electrical and Computer Engineering, University of Toronto, 10 King's College Road, Toronto, Canada",2
+"School of Computer Science and Technology, University of Science and Technology of China",2
+"Department of Electrical Engineering Indian Institute of Technology Delhi New Delhi, India",2
+"Department of Electronics and Communication Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India",2
+"Department of Electrical Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India",2
+"Center for Automation Research, University of Maryland Institute for Advanced Computer Studies, University of Maryland, College Park, MD, USA",2
+"School of EECS, Queen Mary University of London, UK",2
+"College of Software, Shenyang Normal University, Shenyang, China",2
+"Zhejiang University of Technology, Hangzhou, China",2
+"School of Computer Science and Technology, Nanjing Normal University, China",2
+"University of Technology Sydney, Ultimo, NSW, Australia",2
+"Center for Special Needs Education, Nara University of Education, Takabatake-cho, Nara-shi, Nara, Japan",2
+"Sch. of Electr. & Electron. Eng., Nanyang Technol. Univ., Singapore, Singapore",2
+"Samovar CNRS UMR 5157, Télécom SudParis, Université Paris-Saclay, Evry, France",2
+"Beijing E-Hualu Info Technology Co., Ltd, Beijing, China",2
+"Machine Learning Center, Faculty of Mathematics and Computer Science, Hebei University, Baoding 071002, China",2
+"Applied Informatics, Faculty of Technology, Bielefeld University, Germany",2
+"Institut de Robòtica i Informàtica Industrial, CSIC-UPC, Barcelona, Spain",2
+"Department of Computer Science, Universitat Oberta de Catalunya, Barcelona, Spain",2
+"University of Groningen, Nijenborgh 9, 9747 AG, The Netherlands",2
+"University of Science and Technology of China, NO.443, Huangshan Road, Hefei, Anhui, China",2
+"Shenyang SIASUN Robot &amp; Automation Co., LTD., Shenyang, China",2
+"State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China",2
+"Bilgisayar Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Türkiye",2
+"Department of National Laboratory of Pattern Recognition, Chinese Academy of Sciences, Institute of Automation, Beijing, China",2
+"Department of Computer Science Faculty of Science, Khon Kaen University, Khon Kaen, 40002, Thailand",2
+"Academy of Broadcasting Science, Beijing, P.R. China",2
+"Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, Beijing, China",2
+"Key Laboratory of Machine Perception, Ministry of Eduction, Peking University, Beijing, China",2
+"College of Computer Science, Zhejiang University, Hangzhou, Zhejiang, China",2
+"Department of Computer Engineering, Istanbul Technical University, Istanbul, Turkey",2
+"Microsoft Research Asia, Beijing, China",2
+"Department of Information Engineering, The Chinese University of Hong Kong",2
+"School of Computing, Teesside University, Middlesbrough, UK",2
+"Department of Computer Science and Digital Technologies, Faculty of Engineering and Environment, Northumbria University, Newcastle, UK, NE1 8ST",2
+"Faculty of Telecommunications, Technical University of Sofia, Bulgaria",2
+"Key Lab of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China",2
+"Computer Science, University of Haifa, Carmel, 31905, Israel",2
+"Fernuniversitt in Hagen FUH Hagen, Germany",2
+"Research institute for Telecommunication and Cooperation, FTK, Dortmund, Germany",2
+"Core Technology Center, OMRON Corporation, Kyoto, Japan",2
+"College of Computer Science and Technology, Chongqing University of Posts and Telecommunications, Chongqing 404100, China",2
+"College of Software Engineering, Chongqing University of Posts and Telecommunications, Chongqing 404100, China",2
+"USP - University of São Paulo / ICMC, SSC - LRM (Mobile Robots Lab.), São Carlos, 13566-590, Brazil",2
+"Department of Automation, Tsinghua National Laboratory for Information Science and Technology (TNList), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China",2
+"Department of Electric and Electronics, Selçuk University, Konya, Turkey",2
+"Research Center of Intelligent Robotics, Department of Automation, Shanghai Jiao Tong University, 200240, China",2
+"Institute of Automation, Chinese Academy of Sciences",2
+"Department of Electrical Engineering, KAIST, Deajeon, Daejeon, Republic of Korea",2
+"Department of Electrical Engineering, Tafresh University, Tafresh, Iran",2
+"Department of Electrical and Electronic Engineering, Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh",2
+"Department of Mechanical Engineering, Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh",2
+"Kochi University of Technology, Kochi, 782-8502, Japan",2
+"Hefei University of Technology, School of Computer and Information, Hefei, Anhui, 230601, China",2
+"Karlsruhe Institute of Technology, Institute for Anthropomatics, Karlsruhe, Germany",2
+"Pattern Recognition and Intelligent System Lab., Beijing University of Posts and Telecommunications, China",2
+"NCCU, USA",2
+"WVU, USA",2
+"University of Nottingham Malaysia Campus, Selangor Darul Ehsan, Malaysia",2
+"Centre for Quantum Computation and Intelligent Systems, the Faculty of Engineering and Information Technology, University of Technology, Sydney, Ultimo, Australia",2
+"Shahid Bahonar University of Kerman Computer Engineering Department, Kerman, Iran",2
+"Department of Computer and Information Sciences, University of Delaware, Newark, DE, USA",2
+"Department of Electrical Engineering, University of Hawaii, Manoa, Honolulu, HI, 96822",2
+"Samsung Electronics, SAIT Suwon-si, Korea",2
+"Department of Automation, University of Science and Technology of China",2
+"Centre for Intelligent Sensing, Queen Mary University of London, London, U.K.",2
+"CETUC, Pontifical Catholic University of Rio de Janeiro, Brazil",2
+"&#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye",2
+"Islamic Azad University, South Tehran Branch, Electrical Engineering Department, Iran",2
+"Shenzhen Graduate School, Harbin Institute of Technology, China",2
+"Human Language Technology and Pattern Recognition Group, RWTH Aachen University",2
+"Rensselaer Polytechnic Institute, USA",2
+"Electrical Engineering Department, Amirkabir University of Technology, Tehran, Iran",2
+"Vision Lab, School of Engineering and Physical Sciences, Heriot-Watt University, Edinburgh, United Kingdom",2
+"University of Southern California, Los Angeles, USA",2
+"University of Amsterdam, The Netherlands",2
+"Academia Sinica, Institute of Information Science, Taipei, Taiwan",2
+"Centre for Communication Systems Research, University of Surrey, Guildford, Surrey, United Kingdom",2
+"School of Computer Engineering and Science, Shanghai University",2
+"Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong, China",2
+"Corp. Res. & Dev. Center, Toshiba Corp., Kawasaki, Japan",2
+"School of Computer Science and Technology, Tianjin University, 300072, China",2
+"Department of Information & Communication Technologies, Universitat Pompeu Fabra, Barcelona, Spain",2
+"Computer Engineering, Rochester Institute of Technology, USA",2
+"Department of Computer Applications, National Institute of Technology, Tiruchirappalli, India",2
+"B. Tech Graduate, ECE, MSIT, C-4 Janakpuri, New Delhi, India",2
+"Department of Electrical, Computer and IT Engineering, Qazvin Branch, Islamic Azad University, Qazvin, Iran",2
+"Computer Vision Institute, School of Computer Science and Software Engineering, and the Shenzhen Key Laboratory of Spatial Information Smart Sensing and Services, Shenzhen University, Shenzhen, China",2
+"RSISE, Australian National University, Australia",2
+"HumanRobot Interaction Research Center, Department of Mechanical Engineering, Korea Advanced Institute of Science and Technology, Republic of Korea seojh",2
+"Panasonic Singapore Laboratories Pte Ltd (PSL), Tai Seng Industrial Estate 534415, Singapore",2
+"Software School, Xiamen University, Xiamen, China",2
+"Massachusetts General Hospital, Boston, MA, USA",2
+"Department of Computer Science, Virginia Commonwealth University, Richmond, VA, USA",2
+"Dept. of Electrical and Electronics Engineering, Bahcesehir University, Istanbul, Turkey",2
+"Imaging Software Technol. Center, Fuji Photo Film Co. Ltd., Japan",2
+"Dept. of ECE & Digital Technology Center, University of Minnesota, USA",2
+"Shenzhen University, Shenzhen China",2
+"National Lab of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, 95 Zhongguancun East Road, 100190, Beijing, China",2
+"Islamic University of Technology, Bangladesh",2
+"Institute of Computer and Communication Engineering, Department of Electrical Engineering, National Cheng Kung University, Tainan, 70101, Taiwan",2
+Technion,2
+"Department of Computer Science, Okayama University, Okayama, Japan",2
+Cyprus University of Technology,2
+"Dept of Electronics and Communication, Manipal Institute Of Technology, Karnataka, India",2
+"University of Technology, Sydney, Sydney, Australia",2
+"Polytechnic School of Pernambuco, University of Pernambuco, Recife-PE, Brazil",2
+"Dept. of Electrical Engineering, National Taiwan University, Taiwan",2
+"Research Center for Information Technology Innovation, Academia Sinica, Taiwan",2
+"University of Illinois at Urbana-Champaign, 201 N Goodwin, 61820, USA",2
+"Research School of Engineering, The Australian National University, Canberra, ACT, Australia",2
+"CyLab Biometrics Center and the Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA",2
+"Pittsburgh Univ., PA, USA",2
+"Computer Vision and Remote Sensing, Berlin University of Technology, Sekr. FR 3-1, Franklinstr. 28/29, 10587, Germany",2
+"Department of Information Engineering, the Chinese University of Hong Kong, Shatin",2
+"Department of Signal Processing, Tampere University of Technology, Finland",2
+"Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Ankara Y&#x0131;ld&#x0131;r&#x0131;m Beyaz&#x0131;t &#x00DC;niversitesi, Ankara, T&#x00FC;rkiye",2
+"Department of Electronic and Communication Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia",2
+"Senior Member, IEEE, Pohang University of Science and Technology (POSTECH), Pohang, Gyeongbuk, 790-784, South Korea. phone: 82-54-279-2880, 2214; fax: 82-54-279-5594; e-mail: dreaming@postech.ac.kr, syoh@postech.ac.kr",2
+"Pohang University of Science and Technology (POSTECH), Pohang, Gyeongbuk, 790-784, South Korea. phone: 82-54-279-2880, 2214; fax: 82-54-279-5594; e-mail: dreaming@postech.ac.kr",2
+"Center of Machine Vision Research, Department of Computer Science and Engineering, University of Oulu, Oulu, Finland",2
+"Key Laboratory of Child Development and Learning Science (Ministry of Education), Research Center for Learning Science, Southeast University, Nanjing, China",2
+"Dirección General de la Guardia Civil - DGGC Madrid, Spain",2
+"School of Information Science and Technology, Huaqiao University, Xiamen, China",2
+"Computer Laboratory, University of Cambridge, UK",2
+"Rutgers University, Piscataway",2
+"University of Hong Kong, China",2
+"Department of Automation, State Key Laboratory of Intelligent Technologies and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China",2
+"School of Electronics and Information Technology, Sun Yat-Sen University",2
+"Hexi University, Center for Information Technology, Zhangye, China",2
+"Department of Computer Engineering, Rochester Institute of Technology, Rochester, NY, USA",2
+"School of Communication and Information Engineering, Shanghai University, Shanghai, China",2
+"Columbia University, New York, USA",2
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",2
+"Department of Communications and Computer Engineering, University of Malta, Msida, Malta",2
+"Multimedia Communications Dept., EURECOM, Sophia Antipolis, France",2
+"Michigan State University, East Lansing, U.S.A.",2
+"Dept. of E & TC Engineering, Maharashtra Institute of Technology, Pune, India",2
+"Commonwealth Scientific and Industrial Research Organisation, Clayton South, Vic. , Australia",2
+"Speech, Audio, Image and Video Technology Laboratory, Queensland University of Technology, Brisbane, Australia",2
+"School of Computer Science and Technology and the Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, Tianjin, China",2
+"School of Electrical Engineering and Computer Science, Queen Mary University of London, London, U.K.",2
+"Institute of Communications Engineering, National Tsing Hua University, Hsinchu, Taiwan",2
+"Dept. of ECE and Digital Technology Center, Univ. of Minnesota, USA",2
+"Department of Computer Science, National Tsing Hua University, Taiwan, R.O.C",2
+"Department of Computer Science, Chu Hai College of Higher Education, Hong Kong",2
+"School of Electrical and Computer Engineering, Ulsan National Institute of Science and Technology (UNIST), UNIST-gil 50, 689-798, Korea",2
+"Dept. of Comp. Sci. and Inf. Eng, Chung Hua University, Hsinchu, Taiwan",2
+"Dept. of Comp. Sci, National Chiao Tung University, Hsinchu, Taiwan",2
+"HEUDIASYC Mixed Res. Unit, Compiegne Univ. of Technol., France",2
+"Universit&#x00E0; di Salerno v. Ponte don Melillo, 84084, Fisciano (IT)",2
+"National Taiwan University of Science and Technology, No.43, Keelung Rd., Sec.4, Da'an Dist., Taipei City 10607, Taiwan",2
+"Department of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",2
+"Laboratory LAROSERI, Department of Computer Science, Faculty of Sciences, University of Chouaib Doukkali, El Jadida - Morocco",2
+"Department of Electrical Engineering, Shanghai Jiao Tong University, Shanghai, China",2
+"Computer Vision, Video and Image Processing (CvviP) Research Lab, Faculty of Electrical Engineering, Universiti Teknologi Malaysia, 81310 UTM Skudai, Johor, Malaysia",2
+"Centre for Quantum Computation & Intelligent Systems and the Faculty of Engineering & Information Technology, University of Technology, Sydney, Australia",2
+"Department of Electronics, University of Goa, India",2
+"Department of Computer Science, School of Science and Technology, Meiji University, 1-1-1 Higashimita, Tama-ku, Kawasaki, Kanagawa",2
+"Department of Computer Science, Graduate School of Science and Technology, Meiji University, 1-1-1 Higashimita, Tama-ku, Kawasaki, Kanagawa",2
+"Multimedia Processing Laboratory, Department of Computer Science, National Tsing Hua University, Hsinchu, Taiwan",2
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA",2
+"Electric Power Research Institute, State Grid Shanghai Electric Power Company Shanghai, 200093, China",2
+"South East European University, Tetovo, Macedonia",2
+"Computer Science and Engineering, Arizona State University, Tempe, AZ",2
+"School of EE, Xidian University, Xi'an 710071, China",2
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore, Singapore",2
+"Department of ECE, National Institute of Technology, Rourkela (Odisha), India",2
+University of Houston,2
+Korea Electronics Technology Institute,2
+"Computer Science and Engineering Dept., University of North Texas, Denton, TX, USA",2
+"Machine Learning Department, Carnegie Mellon University, Pittsburgh, PA",2
+"Computer Science and Engineering Michigan State University, East Lansing, USA",2
+"Organization of Advanced Science and Technology, Kobe University, Japan",2
+"IBM T.J. Watson Research Center, Yorktown Heights, NY 10598, USA",2
+"University of Illinois’ Advanced Digital Sciences Center, Singapore",2
+"Institute for Advanced Computer Studies, University of Maryland, College Park, Maryland 20740 United States",2
+"B-DAT Laboratory, School of Information and Control, Nanjing University of Information and Technology, Nanjing, China",2
+"Intelligent Data Center (IDC) and Department of Mathematics, Sun Yat-Sen University, Guangzhou, China",2
+"National Institute of Informatics, Tokyo, Japan",2
+Jaypee Institute of Information Technology,2
+"Samsung Advanced Institute of Technology (SAIT), Republic of Korea",2
+"Institute of Computing Technology, CAS, Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Beijing, China",2
+"School of Computer Science and Engineering, University of Aizu, Tsuruga, Ikkimachi, Aizuwakamatsu, Japan",2
+"Comnuter Science Department, Hong Kong Baptist University",2
+"Department of Electrical Engineering and Computer Science, Northwestern University, Evanston, IL, USA",2
+"Institute of Computing, University of Campinas, Campinas, SP, 13083-852, Brazil",2
+"College of Information Science and Engineering, Ritsumeikan University, Kusatsu, Japan",2
+"Robotics Lab, Futurewei Technologies Inc., Santa Clara, USA",2
+"Institute of Automatic Control Engineering (LSR), TU München, Germany",2
+"Image Understanding and Knowledge-Based Systems, TU München, Germany",2
+"HRL Laboratories, LLC, Information Systems and Sciences Lab, Malibu, CA 90265 USA",2
+"School of Computer Science, Communication University of China, Beijing, China",2
+"Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY, USA",2
+"Computer Science and Technology, University of Science and Technology of China, Hefei, Anhui, China",2
+"Thales Services, ThereSIS, Palaiseau, France",2
+"School of Electrical and Electronic Engineering, Tianjin University of Technology, China",2
+"Faculty of Computers and Information, Cairo University, Egypt",2
+"Dept. of Electrical and Computer Engineering, National University of Singapore",2
+"Department of Computing, the Hong Kong Polytechnic University, Hong Kong",2
+"Institute of Computing, University of Campinas, Campinas, SP, Brazil, 13083-852",2
+"Tsinghua National Laboratory for Information Science and Technology, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China",2
+"CyLab Biometrics Center, Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA 15213, USA",2
+"La Trobe University, Australia",2
+"State key Laboratory of High Performance Computing, National University of Defense Technology, Changsha, Hunan, China, 410073",2
+"Beijing Key Laboratory of Multimedia and Intelligent Software Technology, College of Metropolitan Transportation, Beijing University of Technology, Beijing, China",2
+"Science and Engineering Faculty, Queensland University of Technology, Australia",2
+"Department of Computer Technology, Shanghai Jiao Tong University, Shanghai, China",2
+"School of Computer Science and Software Engineering, The University of Western Australia, Nedlands, WA, Australia",2
+"National Tsing Hua University, Hsinchu, Taiwan",2
+"Rutgers, The State University of New Jersey",2
+"Dhirubhai Ambani Institute of Information and Communication Technology, India",2
+"Aix Marseille Univ LIF/CNRS, France",2
+"Swiss Federal Institute of Technology Lausanne (EPFL), Switzerland",2
+"National Engineering Research Center for E-Learning, Central China Normal University, Wuhan, China",2
+Institut de Rob&#x00F2;tica i Inform&#x00E0;tica Industrial (CSIC-UPC),2
+"TeV, Fondazione Bruno Kessler, Trento, Italy",2
+"Department of Computer Science, IT: Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal",2
+"Institute of Computer and Communication Engineering, Department of Electrical Engineering, National Cheng Kung University, 1 University Road, Tainan, Taiwan",2
+"New Jersey Institute of Technology, Department of Electrical &amp; Computer Engineering, University Heights Newark, NJ 07102 USA",2
+Korea Advanced Institute of Science and Technology,2
+"School of Information Science, Japan Advanced Institute of Science and Technology, Asahidai 1-1, Nomi-shi, Ishikawa, Japan, 923-1211",2
+"Chinese Academy of Sciences, Beijing",2
+"Tsinghua University, Beijing",2
+"Electrical and Control Engineering, National Chiao Tung University, Hsinchu, Taiwan",2
+"Artificial Intelligence Laboratory, University of Tsukuba, Japan",2
+"Dept. of Electr. & Comput. Eng., Carnegie Mellon Univ., Pittsburgh, PA, USA",2
+"Brno University of Technology, Brno-střed, Czech Republic",2
+"Deutsche Welle, Bonn, Germany",2
+"GSI Universidad Polit-écnica de Madrid, Madrid, Spain",2
+"Department of Computer Science, University of Calgary, Calgary, Alberta, Canada",2
+"National Institute of Standards and Technology (NIST), Gaithersburg, MD",2
+"Räven AB, SE-411 14 Göteborg, Sweden",2
+"School of Mathematics and Statistics, Xi'an Jiaotong University, Xi'an, 710049, China",2
+"School of Engineering and Applied Sciences, Harvard University, Cambridge, MA 02138",2
+"The Rowland Insitute at Harvard, Harvard University, Cambridge, MA 02142",2
+"Halmstad University, Halmstad, Sweden",2
+"Dept. of Appl. Phys. & Electron., Umea Univ., Sweden",2
+"Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Hong Kong, SAR",2
+"Department of Radiology, University of Pennsylvania, Philadelphia, PA",2
+"Institute of VLSI Design, Zhejiang University, Hangzhou, China",2
+"Faculty of Engineering Technology, Hasselt University, Diepenbeek, Belgium",2
+"DUT-RU International School of Information and Software Engineering, Dalian University of Technology, Dalian, China",2
+"Universita degli Studi di Palermo, Dipartimento di Ingegegneria Informatica, Viale delle Scienze, 90128, ITALY",2
+"Mechatronic Engineering Department, Mevlana University, Konya, Turkey",2
+"Tokyo Metropolitan University, Hino, Tokyo 191-0065, Japan",2
+"Department of Electrical and Computer, Engineering, University of Denver, Denver, CO 80208",2
+"TÜBİITAK-BİILGEM-UEKAE, Anibal Cad., P.K.74, 41470, Gebze-KOCAELİ, Turkey",2
+"State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China",2
+"School of Electronics Engineering and Computer Science, Peking University, Beijing 100871, China",2
+"Department of Electronic Engineering, Tsinghua University, Beijing 100084, P.R. China",2
+"Department of Electrical and Computer Engineering, University of British Columbia, Vancouver, Canada",2
+"The 28th Research Institute of China Electronics Technology Group Corporation, China",2
+"Indian Statistical Institute, 203, B. T. Road, Kolkata 700108, India",2
+"Institute of VLSI Design, Zhejiang University",2
+"Faculty of Engineering Technology, University Hasselt",2
+"Institute of Information Science, Beijing Jiaotong University, 100044, China",2
+"Department of Computer and Information Sciences, Temple University",2
+"Department of Computing Sciences, Elon University",2
+"University of Maryland, College Park, MD, USA",2
+"Department of Electrical and Computer Engineering, University of Maryland, College Park, MD",2
+"Raytheon BBN Technologies, 10 Moulton St, Cambridge, MA",2
+"General Electric Global Research, 1 Research Circle, Niskayuna, NY",2
+"Concordia University, Montreal, QC, Canada",2
+"Charles Perkin Centre, Faculty of Medicine, University of Sydney, Australia",2
+"Charles Perkin Centre, Faculty of Engineering, University of Sydney, Australia",2
+"Department of Computing, The Hong Kong Polytechnic University, Hong Kong, China",2
+"Department of Electrical and Computer Engineering, Nazarbayev University, Astana, Kazakhstan",2
+"Department of Information and Communication Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan",2
+"Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA",2
+"Tsinghua National Lab for Info. Sci. &amp; Tech., Depart. of Computer Sci. &amp; Tech., Tsinghua University, Beijing, China",2
+Harbin Institute of Technology,2
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, Beijing, China",2
+"School of Electrical Engineering and Computer Science at the University of Newcastle, Callaghan, NSW 2308, Australia",2
+"School of Electrical Engineering and Computing, University of Newcastle, Newcastle, Australia",2
+"Department of Computer Science and Engineering, Dankook University, Yongin, South Korea",2
+"KTH Royal Institute of Technology, Stockholm, Sweden",2
+"Division of Graduate Studies of Tijuana Institute Technology, Mexico",2
+"Department of Psychology and the Center for Brain Science, Harvard University, Cambridge",2
+"School of Engineering and Applied Sciences, Department of Molecular and Cellular Biology, and the Center for Brain Science, Harvard University, Cambridge",2
+"Department of Computer Science, The University of Texas at San Antonio, San Antonio, TX, USA",2
+"Sch. of Infor. Sci. and Tech., Huizhou Unversity, Huizhou, China",2
+"Institute of Advanced Manufacturing Technology, Ningbo Institute of Industrial Technology, Chinese Academy of Sciences, Ningbo, China",2
+"School of Mechatronic Engineering and Automation, Shanghai University, Shanghai, China",2
+"Waseda University The Graduate School of Information, Production and Systems 2-7, Hibikino, Wakamatsu-ku, Kitakyushu-shi, Fukuoka, Japan",2
+"London, United Kingdom",2
+"Shenzhen VisuCA Key Lab / SIAT, Chinese Academy of Sciences, China",2
+"Department of Mathematics, Center for Computer Vision, Sun Yat-Sen University, Guangzhou, China",2
+"Majority Report, France",2
+"Imaging Science and Engineering Laboratory Tokyo Institute of Technology Yokohama 226-8503, Japan",2
+"University of Montreal, Department of Computer Science and Operations Research (DIRO), 2920 Chemin de la tour, QC, Canada, H3C 3J7",2
+"College of Computer Science and Technology, Harbin Engineering University, Harbin, China",2
+"Microsoft Research, Haidian, Beijing, P. R. China",2
+"Video and Image Processing System Laboratory, School of Electronic Engineering, Xidian University , Xi'an, China",2
+"Department of Computing, Imperial College London, United Kingdom",2
+"Robert BOSCH Research and Technology Center, Palo Alto, CA 94304, USA",2
+"Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney Playa, Havana, Cuba",2
+"National Chung Hsing University, Taichung",2
+"School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing",2
+"School of Electronic Engineering, Xi'an University of Posts and Telecommunications, Xi'an, China",2
+"Department of Sciences and Information Technology, University of Sassari, Viale Mancini 5, 07100 Sassari, Italy",2
+"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, D-39016, P.O. Box 4210 Germany",2
+"ISIR, CNRS UMR 7222, Universite Pierre et Marie Curie, Paris",2
+"National Taiwan University of Science and Technology, Taipei, Taiwan",2
+Beijing Institute of Science and Technology Information,2
+"University of Maryland, College Park, MD, 20742",2
+"Department of Computer Science, University of Rochester, Rochester, NY, USA",2
+"School of Computer Science and Educational Software, Guangzhou University, Guangzhou, China",2
+"Department of Electrical Engineering and Computer Science, University of Siegen, Siegen, Germany",2
+"State Key Laboratory of Brain and Cognitive Science, Institute of Psychology, Chinese Academy of Sciences, Beijing, 100101, China",2
+"Electrical and Computer Engineering, Wayne State University, Detroit, Michigan 48202",2
+"University of Udine, Italy",2
+"Department of Computer Science and Information Engineering, National Formosa University, Yunlin 632, Taiwan",2
+"Broadcasting &amp; Telecommunications, Convergence Media Research Department, Electronics and Telecommunications Research Institute, Daejeon, Korea",2
+"Graduate Institute of Networking and Multimedia and the Department of Computer Science and Information Engineering, National Taiwan University, Taipei, Taiwan",2
+"Innovation Center, Canon USA Inc., San Jose, California",2
+"University of Texas at San Antonio, San Antonio, Texas",2
+"Dept. of ECE & Digital Technology Center, Univ. of Minnesota, USA",2
+"Bradley Department of Electrical and Computer Engineering, Virginia Polytechnic Institute and State University (Virginia Tech), Blacksburg, VA 24061, USA",2
+"FMV I&#x015E;IK &#x00DC;niversitesi, &#x015E;ile, Istanbul",2
+"Istanbul Technical University, Informatics Institute, 34469, Turkey",2
+"School of Mathematical Sciences, Anhui University, Hefei, China",2
+"Electrical and Computer Engineering, Michigan State University, East Lansing, MI, 48824, USA",2
+"University of Trento, Trento, Italy",2
+"Agency for Science, Technology and Research, Institute for Infocomm Research, Singapore",2
+"School of Electrical &amp; Electronic Engineering, Nanyang Technological University, Singapore 639798, Singapore",2
+Artificial Vision Laboratory National Taiwan University of Science and Technology,2
+"Computational Imaging Laboratory, School of Electrical Engineering and Computer Science, University of Central Florida, Orlando, FL, USA",1
+"Key Laboratory of Machine Perception(MOE), EECS, Peking University, Beijing, 100871",1
+"College of Engineering and Computer Science, The Australian National University, Canberra, ACT, Australia",1
+"Griffith University, Australia",1
+"Department of Computer Science, University of California at Davis, Davis, USA",1
+"School of Computer Science, Fudan University, Shanghai, China",1
+"Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, South Korea",1
+"Korea Institute of Oriental Medicine, Daejeon, South Korea",1
+"Microsoft Research Asia, 49 Zhichun Road, Beijing, 100190, China",1
+"Department of Information Engineering, The Chinese University of Hong Kong, China",1
+"Harbin Engineering University, Harbin, Heilongjiang, 150001, China",1
+"Dept. of Computer Science and Computer Engineering, University of Louisville, KY, USA",1
+"Dept. of Advanced Technologies, Alcorn State University, MS, USA",1
+"Baiyun District Bureau of Justice, Guangzhou, China",1
+"Guangdong Key Laboratory of Information Security Technology, School of Data and Computer Science, Sun Yat-sen University, Guangzhou, China",1
+IBM T. J. Watson Research Center,1
+"AI Lab, TAL Education Group, College of Electronics and Information Engineering, Sichuan University, Chengdu, China",1
+"Institute of High Performance Computing, A*STAR, Singapore",1
+"3OmniVision Technologies Singapore Pte. Ltd., Singapore",1
+"Department of ECE, National University of Singapore, Singapore",1
+"Department of Electrical and Computer Engineering, University of Toronto Toronto, Canada",1
+"School of Information Science and Engineering, Yunnan University, Kunming, P. R. China",1
+"Res. Center for Learning Sci., Southeast Univ., Jiangsu, China",1
+"CSE, SUNY at Buffalo, USA and Southeast University, China",1
+"Knowledge Enterprise Development, Arizona State University, Tempe, 85287-5406 United States",1
+"Computer Science, Florida State University, Tallahassee, United States",1
+"Computing Informatics and Decision Systems Engineering, Arizona State University, Tempe, United States",1
+"Department of Psychology, University of Northern British Columbia, Prince George, Canada",1
+"Speech, Audio, Image, and Video Technology Laboratory, Queensland University of Technology , Brisbane, Australia",1
+"Speech, Audio, Image, and Video Technology Laboratory, Queensland University of Technology, Brisbane, Australia",1
+"Commonwealth Scientific and Industrial Research Organization, Pullenvale, Australia",1
+"Department of Psychology, University of Pittsburgh, Pittsburgh, PA , USA",1
+"Department of Psychology, University of Pittsburgh, Pittsburgh, PA, USA",1
+"School of computer Science and Engineering, Nanyang Technological University, Singapore",1
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore",1
+"Dhirubhai Ambani Institute of Information and Communication Technology, Gandhinagar, India",1
+"Department of Electrical and Computer Engineering, Stevens Institute of Technology, Hoboken, New Jersey, 07030",1
+"Department of Electrical, Computer and Biomedical Engineering, University of Rhode Island, Kingston, RI 02881",1
+"Vulcan Inc, Seattle, WA 98104",1
+"Department of Computer Science, Hofstra University, Hempstead, NY 11549",1
+"Dept. of Computing, Curtin University of Technology, WA 6102, USA",1
+"School of Software, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia",1
+"Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA",1
+"Center for OPTical Imagery Analysis and Learning, Northwestern Polytechnical University, Shaanxi, China",1
+"Beijing Etrol Technologies Co., Ltd, Beijing, China",1
+"Securics, Inc. Colorado Springs, CO, USA",1
+"Institute of Computing, University of Campinas (Unicamp) Campinas, SP, Brazil",1
+"Department of Electrical and Computer Engineering, Saginaw Valley State University, University Ctr, MI- 48710",1
+"IDIAP Research Institute, Martigny, Switzerland",1
+"Centre de Visió per Computador, Universitat Autònoma de Barcelona, Barcelona, Spain",1
+"University of Michigan, Ann, Arbor, MI USA",1
+"Department of Computer Science, Rutgers University, Piscataway, New Jersey 08854, USA",1
+"Department of Embedded Systems, Institute for Infocomm Research, Singapore",1
+"IBM Research, USA",1
+"IBM Hursley Labs, UK",1
+"Monash University, Caulfield East, Australia",1
+"School of Math and Geospatial Sciences, Royal Melbourne Institute of Technology University , Melbourne, Australia",1
+"Department of Computer Science, Harbin Institute of Technology, China, 150001",1
+"Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA 15213. Marios.Savvides@ri.cmu.edu",1
+"Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA 15213. yunghui@cmu.edu",1
+"College of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing, China",1
+"Department of Software Engineering, King Saud University, Riyadh, Saudi Arabia",1
+"Institute of Information Technology, University of Dhaka, Dhaka, Bangladesh",1
+"Department of Information, The Third Affiliated Hospital, Sun Yat-sen University, China",1
+"OmniVision Technologies Singapore Pte. Ltd., Singapore",1
+"TCL Research America, San Jose, CA 95134, USA",1
+"Dept. of Eng. Sciences and Appl. Mathematics, Northwestern University, Evanston, IL 60208, USA",1
+GE Global Research,1
+"Xerox Research Center India, India",1
+"Palo Alto Research Center, Webster, NY",1
+"Facebook, Singapore",1
+"Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, Turkey",1
+"Elektrik-Elektronik Mühendisliği Bölümü, Boğaziçi Üniversitesi, Turkey",1
+"School of Computer and Software, Nanjing University of Information Science and Technology, Nanjing, China",1
+"Beijing Advanced Innovation Center for Imaging Technology, Beijing 100048, China",1
+"Lane Department of CSEE, West Virginia University, Morgantown, WV 26506, USA",1
+"Institute of Computing, University of Campinas, Campinas-SP, CEP, Brazil",1
+"Department of Electronics and Computing and the Electronics and Information Technology Research & Development Center, Universidade Federal do Amazonas, Manaus-AM, CEP, Brazil",1
+"National Chiao-Tung University, Hsinchiu, Taiwan",1
+"Department of Computer and Information Science, University of Macau, Taipa, Macau",1
+"General Electric Global Research, Niskayuna, NY, USA",1
+"Institute of Computing, University of Campinas, Campinas, Brazil",1
+"Department of Mathematics, Wayne State University, Detroit, MI, USA",1
+"Artificial Intelligence Key Laboratory, of Sichuan Province, Zigong, Sichuan, 643000, P. R. China",1
+"School of Big Data and Computer, Science, Guizhou Normal University, Guiyang, Guizhou, 550025, P. R. China",1
+"School of Electrical & Electronic Engineering, Yonsei University, Seoul, South Korea, 120-749",1
+"Inria M&#x00E9;diterran&#x00E9;e, France",1
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",1
+"The Rowland Institute at Harvard, Harvard University, Cambridge, MA 02142, USA",1
+"Data and Analytics Department, KPMG AGWPG, Düsseldorf, Germany",1
+"Faculty of Mathematics and Statistics, Hubei University, Wuhan, China",1
+"West Virginia University, Morgantown, WV",1
+Ajou Univ.,1
+"State Key Laboratory of Transient Optics and Photonics, Center for OPTical IMagery Analysis and Learning, Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, China",1
+"School of Information Technology, Halmstad University, Halmstad, Sweden",1
+"Nokia Bell-Labs, Madrid, Spain",1
+"Technicolor, Paris, France",1
+"MPI Informatics, Germany",1
+"Dept. de Ciência da Computacão, Universidade Federal de Ouro Preto, MG Brazil",1
+"Interactive and Digital Media Institute, National University of Singapore, Singapore",1
+"Alibaba Group, Hangzhou, China",1
+"Shin-Guang Elementary School, Yulin 646, Taiwan",1
+"Department of Computer Science, Brown University, Providence Rhode Island, 02912, USA",1
+"School of Management, New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA",1
+"School of Electrical Engineering, Nantong University, Nantong, China",1
+"Vesalis company, Clermont-Ferrand, France",1
+"University of Calgary, Calgary, T3G 2T6 AB, CANADA",1
+"University of Louisville, Louisville, KY 40292 USA",1
+"School of Electrical Engineering and Computer Science, University of Central Florida, Orlando, FL, USA",1
+"Max Planck Institute for Informatics, Saarbrucken, Germany",1
+"School of Software Engineering, Chongqing University, Chongqing, China",1
+"College of Information Engineering, Capital Normal University, Beijing, China",1
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing, China",1
+"Department of Electrical Engineering, University of California, Riverside, Riverside CA, California 92521 United States",1
+"Art History, University of California, Riverside, Riverside, California United States",1
+"Electrical Engineering, University of California, Riverside, Riverside, California 92521 United States",1
+"University of Science &amp; Technology (UST), Daejeon, Korea",1
+"Universidade Estadual de Campinas, Cx.P. 6176 Campinas-SP, CEP 13084-971, Brazil",1
+"Embodied Emotion, Cognition and (Inter-)Action Lab, University of Hertfordshire, United Kingdom",1
+"Institute on Children Studies, University of Minho, Portugal",1
+"College of Aerospace and Material Engineering, National University of Defense Technology, Changsha, China",1
+"Air Force Research Lab, Rome, NY, 13441, USA",1
+"Department of Electronic Engineering, Institute of Image Communication and Information Processing, Shanghai Jiao Tong University, Shanghai, China",1
+"School of Computer Engineering, The Nanyang Technological University, Singapore",1
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB, Fraunhoferstrasse 1, Karlsruhe, Germany",1
+"Department of Electrical and Computer Engineering, University of Toronto, Toronto, ON, Canada",1
+"Department of Computer Science, University of Texas at San Antonio",1
+"Department of Computer Science, University of Rochester",1
+"School of Computer Science and Technology, Tianjin University, China",1
+"Philips Research Eindhoven, HTC 34, Netherlands",1
+"Epson Research and Development Inc., San Jose, CA",1
+"GE Global Research, Bangalore, India",1
+"Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea",1
+"Department of Business Planning &amp; Information Systems, TEI of Crete, Agios Nikolaos, Greece",1
+"National Institute of Informatics, Japan",1
+"School of Information Technology Jawaharlal Nehru Technological University Hyderabad Andhra Pradesh, India",1
+"Department of CSE, Vignan University, Andhra Pradesh, India",1
+"University of North Carolina at Wilmington, USA",1
+"UNCW, USA",1
+"Department of EngineeringFaculty of Engineering and Science, University of Agder, Kristiansand, Norway",1
+"Yahoo Inc., New York, NY, USA",1
+"Queen Mary, University of London",1
+Brunel University,1
+"Vision &amp; Sensing Group, Faculty of Information Sciences and Engineering, University of Canberra, Australia",1
+"School of Engineering, CECS, Australian National University, Australia",1
+"Comput. Control Lab, Nanyang Technol. Univ., Singapore",1
+School of Computer ScienceThe University of Adelaide,1
+"Instituto de Sistemas e Rob&#x00F3;tica, Instituto Superior T&#x00E9;cnico, Lisboa, Portugal",1
+"Shenzhen Graduate School, Harbin Institute of Technology, Bio-Computing Research Center, Shenzhen, China",1
+"Department of Computing, Biometrics Research Centre, The Hong Kong Polytechnic University, Hong Kong",1
+"School of Computer Science, Nanjing University of Science and Technology, Nanjing, China",1
+"Centre for Quantum Computation &amp; Intelligent Systems, University of Technology, Sydney, Australia",1
+"CSIE, National Cheng Kung University, Tainan, 701 Taiwan",1
+"CSIE, National Taiwan University of Science and Technology, Taipei, 106 Taiwan",1
+"Computer Science and Engineering Department, University of Texas at Arlington, Arlington, TX, USA",1
+"INSA CVL, Univ. Orléans, PRISME EA 4229, Bourges, France",1
+"LITIS, Universite de Rouen - INSA de Rouen, Rouen, FR",1
+"Department of Learning and Digital Technology, Fo Guang University, Yilan, Taiwan",1
+"Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Hong Kong",1
+"Noah’s Ark Laboratory, Hong Kong",1
+"Noah.s Ark Laboratory and Hong Kong University of Science and Technology, Hong Kong",1
+"Department of Computer Science, University of Texas at San Antonio, San Antonio, United States",1
+"Chongqing Institute of Green and Intelligent Technology, Chinese Academy of China, Hefei University of Technology, Hefei, China",1
+Carnegie Mellon University,1
+"Fac. of Mathematics and Computer Sciences, University of Science, Ho Chi Minh City, Viet Nam",1
+"Graduate Institute of Communication Engineering, National Taiwan University, Taipei, Taiwan",1
+"LAMIA, University of French West Indies and Guiana, EA 4540, Pointe-à-Pitre, France",1
+"Institute of Intelligent Systems and Robotics (ISIR), Pierre and Marie Curie University, Paris, France",1
+"Xiamen University of Technology, Xiamen, China",1
+"Chulalongkorn University Bangkok, Thailand",1
+"Department of Computer Science, City University of Hong Kong, Kowloon, Hong Kong",1
+"School of Automation, Huazhong University of Science and Technology, Wuhan, China",1
+"Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Taiwan 640, R.O.C.",1
+"Bordeaux INP, LaBRI, PICTURA, UMR 5800, F-33400 Talence, France",1
+"Department of Electrical Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan",1
+"Department of Information Management, College of Management, National United University, Miaoli, Taiwan",1
+"Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan",1
+"Fundamental and Applied Science Department, Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia",1
+"Sch. of Electr. Eng. & Comput. Sci., Newcastle Univ., NSW, Australia",1
+"University of Sassari, Computer Vision Laboratory, PolComing Viale Mancini, 5 07100 Sassari, Italy",1
+"Azure Storage, Microsoft, Seattle, WA, USA",1
+"Department of Electronics Engineering, Mokpo National University, Republic of Korea",1
+"School of Information and Communication Engineering, Sungkyunkwan University, Suwon, Republic of Korea",1
+"Graduate Program on Electrical Engineering, Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil",1
+"Graduate Program on Electrical Engineering, University of Passo Fundo, Passo Fundo, Brazil",1
+"Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil",1
+"Northwestern Polytechnical University, Xi&#x2019;an, China",1
+"Delft University of Technology, EEMCS, Delft, The Netherlands, reinierz@gmail.com",1
+"Imperial College London, Computing Department, London, U.K., m.pantic@imperial.ac.uk",1
+"Departments of Medical Imaging and Medical Biophysics, University of Western Ontario, London, ON, Canada",1
+"St. Joseph’s Health Care, London, ON, Canada",1
+"Northumbria University, Newcastle upon Tyne, U.K.",1
+"Department of Medical Biophysics, University of Western Ontario, London, ON, Canada",1
+"School of Electrical Engineering, Kookmin University, Seoul, Korea",1
+"The School of Physics and Telecommunication Engineering, South China Normal University, Guangzhou, China",1
+"College of Automation, Shenyang Aerospace University, China",1
+"Universit&#x00E9; de Lyon, CNRS, Ecole Centrale de Lyon, LIRIS UMR5205, F-69134, France",1
+"Center for Machine Vision Research, University of Oulu, Finland",1
+"Faculty of Computer Science & Information Technology University of Malaya Kuala Lumpur, Malaysia",1
+Nanyang Technological University School of Computer Engineering,1
+"College of Engineering, Shibaura Institute of Technology, Tokyo, Japan",1
+"Graduate School of Engineering, Shibaura Institute of Technology, Tokyo, Japan",1
+"Department of Mathematics, JiaYing University, Meizhou, China",1
+"Hebei University of Technology, School of Science, Tianjin, P. R. China",1
+"Department of Electrical and Computer Engineering, College of Engineering, and College of Computer and Information Science (Affiliated), Northeastern University, MA, USA",1
+"Semnan University, Semnan, Iran",1
+"Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France",1
+"Centre for Intelligent Systems Research, Deakin University, Geelong, VIC, Australia",1
+"Faculty of Engineering, Technology, and Built Environment, UCSI University, Kuala Lumpur, Malaysia",1
+"Department of Computer Science, Cornell University and Cornell NYC Tech",1
+"Department of Electrical and Computer Engineering, New Jersey Institute of Technology, Newark , NJ, USA",1
+"Microsoft Research , Redmond, WA, USA",1
+"Microsoft Visual Perception Laboratory, Zhejiang University, Hangzhou, China",1
+"Coll. of Electron. & Inf., Northwestern Polytech. Univ., Xi'an, China",1
+"Nanyang Technological University and the Institute for Infocomm Research, Singapore",1
+Ajou University,1
+"Faculty of Applied Mathematics, Guangdong University of Technology, Guangzhou, China",1
+"Faculty of Information Science and Technology, Sun Yat-Sen University, Guangzhou, China",1
+"Department of Computer Science, University of Loughborogh",1
+"Department of Electrical Engineering and Electronics, University of Liverpool",1
+"Department of Creative IT Engineering, POSTECH, Pohang, South Korea, 37673",1
+"Viterbi School of Engineering, University of Southern California, Los Angeles, CA",1
+"University of Pittsburgh, USA",1
+"Donghua University, China",1
+"Department of Information Management, Yuan Ze University, Taoyuan, China",1
+"AI Speech Ltd., Suzhou, China",1
+"Department of Electronic Engineering, Kyung Hee University, Yongin, South Korea",1
+"Faculty of Computer Science and Information Technology, University of Malaya, Kuala Lumpur, Malaysia",1
+"Dept. of Comput. Sci. &amp; Info. Eng., National Yunlin Univ. of Science &amp; Technology, Taiwan",1
+"Bilgisayar Mühendisliği Bölümü, İstanbul Üniversitesi, Turkey",1
+"Bilgisayar Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Turkey",1
+"Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. k.messer@surrey.ac.uk",1
+"Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. j.kittler@surrey.ac.uk",1
+"Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. x.zou@surrey.ac.uk",1
+"University of Twente, Human Media Interaction Group, Enschede, The Netherlands",1
+Biometric and Imaging Processing Laboratory (BIPLab),1
+University of Naples Federico II,1
+Warsaw University of Technology,1
+Research and Academic Computer Network (NASK),1
+SensoMotoric Instruments (SMI),1
+Maastricht University,1
+Università di Salerno Italy,1
+University of Southampton,1
+"University of Beira Interior, IT: Instituto de Telecomunicações",1
+"SAP Innovation Center Networks, Singapore",1
+"National Research University Higher School of Economics, Laboratory of Algorithms and Technologies for Network Analysis, Nizhny Novgorod, Russia",1
+"Bioinformatics Institute, A&#x2217;STAR, Singapore",1
+"iCV Research Group, Institute of Technology, University of Tartu, 50411, Estonia",1
+"Dept. Mathematics and Informatics, University of Barcelona, Computer Vision Center, Spain",1
+"Institute of Technology, University of Tartu, 50411, Estonia",1
+"Amazon.com Cambridge, MA, USA",1
+"Dept. of EMPH, Icahn School of Medicine at Mount Sinai, New York, NY 10029",1
+"Dept. of ENME College Park, University of Maryland, College Park, MD, 20742",1
+"Eskişehir Osmangazi Üniversitesi, Bilgisayar Mühendisliği Bölümü, Eskişehir, Türkiye",1
+"Anadolu Üniversitesi, Elek., Elektronik Mühendisliği Bölümü, Eskişehir, Türkiye",1
+"Department of Computer Science, University of Texas at San Antonio, San Antonio, TX",1
+"College of Communication Engineering, Chongqing University, Chongqing, China",1
+"Vision Semantics Ltd, UK",1
+"Rutgers University, USA",1
+"Computer Science, SUNY Stony Brook, Stony Brook, United States",1
+"Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. peterson@math.colostate.edu",1
+"Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. kirby@math.colostate.edu",1
+"Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. chang@math.colostate.edu",1
+"The University of Electro-Communications, Japan",1
+"Institute for Infocomm Research, A-star, Singapore",1
+"Inst. Dalle Molle d'Intelligence Artificielle Perceptive, Martigny, Switzerland",1
+"Transmural Biotech, Barcelona, Spain",1
+"George Mason University, Fairfax, VA 22030",1
+"Computational Biomedicine Lab, University of Houston, 4800 Calhoun Rd., Houston, TX 77204, USA",1
+"Purdue University, West Lafayette, IN, USA",1
+"Moshanghua Tech Company, Ltd., Beijing, China",1
+"College of Information Engineering, Xiangtan University, Xiangtan, China",1
+"CARTIF Centro Tecnológico, Robotics and Computer Vision Division, Boecillo (Valladolid, Spain)",1
+"University of California, San Diego",1
+"Dept. of Computer Science and Information Engineering, Providence University, Taichung, Taiwan",1
+"360 AI Institute, Beijing, China",1
+"Tencent YouTu Lab, Tencent Shanghai, China",1
+"Sun Yat-sen University, China",1
+"Centeye, Inc.",1
+"Center for Optical Imagery Analysis and Learning (OPTIMAL), State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China",1
+"Institute of Information and Control, Hangzhou Dianzi University, China",1
+"NPU-VUB Joint AVSP Research Lab, Vrije Universitiet Brussel (VUB), Department of Electronics & Informatics (ETRO) Pleinlaan 2, 1050 Brussel, Belgium",1
+"School of Communication and Information Engineering, Shanghai University",1
+"IRISA, University of Rennes 1",1
+INRIA Rennes-Bretagne-Atlantique,1
+"Advanced Digital Sciences Center, University of Illinois at Urbana-Champaign, Singapore",1
+"International Institute of Information Technology, Hyderabad, Telangana, India",1
+"Shenzhen Graduate School, Harbin Institute of Technology, 518055, China",1
+"Research Institution of Intelligent Control and Testing, Graduate School of Tsinghua University at Shenzhen, 518055, China",1
+Commonwealth Scientific and Industrial Research Organization (CSIRO),1
+"University of Canberra, Austrlia",1
+"Department of Computer Science, Rutgers University, 110 Frelinghuysen Road, Piscataway",1
+"Ocean University of China, Teaching Center of Fundamental Courses, Qingdao, China",1
+"Clínica Otocenter, Teresina, Piauí, Brasil",1
+"Department of Computer Science and Engineering, The State University of New York at Buffalo, New York, USA",1
+"Elektrik-Elektronik Mühendisliği Bölümü, Trakya Üniversitesi, Edirne, Türkiye",1
+"Chinese Academy of Sciences, China",1
+"Amrita E-Learning Research Laboratory, Amrita Vishwa Vidyapeetham, Amritapuri, Kollam, India",1
+"Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India",1
+"Amrita E-Learning Research Laboratory and the Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India",1
+"IKERBASQUE, Basque Foundation for Science, and the University of the Basque Country, San Sebastian, Spain",1
+"Computer Vision Center, Edifici &#x201C;O&#x201D; - Campus UAB, 08193 Bellaterra (Barcelona), Spain",1
+"Amazon Research, Berlin, Germany",1
+"State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing, 100044, China",1
+"Samsung Advanced Institute of Technology, Samsung Electronics, Gyeonggi-do, Korea",1
+"Department of Information Engineering, University of Florence, Firenze, Italy",1
+"Tsinghua National Lab for Information Science and Technology, Beijing, China",1
+"Universidad Argentina de la Empresa (UADE), Lima 717, Buenos Aires, Argentina",1
+"US Army Research Laboratory, 2800 Powder Mill Rd, Adelphi, MD 20783, USA",1
+"Dept. of Comput. Sci., North Carolina Univ., Wilmington, NC, USA",1
+"Institute of Applied Mathematics, AMSS, Chinese Academy of Sciences, Beijing 100190",1
+"Research Center of Precision Sensing and Control, Institute of Automation, Chinese Academy of Sciences, Beijing, 100190",1
+"Biometrics Engineering Research Center, Yonsei University, Seoul, Korea",1
+"Departamento de Informtica e Matemtica Aplicada/University of Rio Grande do Norte, Natal, Brazil",1
+"Computer Engineering Department, Girne American University, Kyrenia, Cyprus 90",1
+"School of Engineering and Digital Arts, University of Kent, Canterbury, U.K.",1
+"Office of Naval Research, Arlington",1
+"Microsoft Research, Redmond, WA",1
+"Adobe Research Department, Adobe Systems Inc, San Jose, CA",1
+"Department of Computer Science, National Chung Cheng University, Chiayi, Taiwan",1
+"Microsoft, Redmond, WA",1
+"BIWI, ETH Zurich Zurich, Switzerland",1
+"Department of Electrical Engineering, National Chung Hsing University, Taiwan",1
+"Integrated Circuits and Electronics Laboratory, Department of Engineering, Aarhus University, Denmark",1
+"Dept. of Comput. Sci., California Inst. of Technol., Pasadena, CA, USA",1
+"Utechzone Co. Ltd., New Taipei City, Taiwan 235",1
+"Department of Cognitive Science, University of California, San Diego, CA, USA",1
+"Department of Communication Engineering, Shanghai University, Shanghai, China",1
+Department of Electronic Engineering Shanghai Jiao Tong University,1
+"Institute of Communication Engineering, National Tsing-Hua University, Taiwan",1
+"ICT Center, CSIRO",1
+"Bili&#x015F;im Teknolojileri Enstit&#x00FC;s&#x00FC;, T&#x00FC;bitak B&#x0130;LGEM, Kocaeli, T&#x00FC;rkiye",1
+"Karlsruhe Institute of Technology (KIT), Germany",1
+"Istanbul Technical University (ITU), Turkey",1
+"École Polytechnique Fédérale de Lausanne (EPFL), Switzerland",1
+"Department of Electronics and Communication Engineering, Faculty of Electrical & Electronic Engineering, Khulna University of Engineering & Technology, Bangladesh",1
+"Space Application Laboratory, Research Center for Advanced Science and Technology, University of Tokyo, Japan",1
+"Department of Aeronautics and Astronautics Engineering, Graduate School of Engineering, University of Tokyo, Japan",1
+"University of Central Florida 4000 Central Florida Blvd., Orlando, 328816, USA",1
+"Carnegie Mellon University 5000 Forbes Ave Pittsburgh, PA 15213, USA",1
+"Integrated Management Coastal Research Institute, Universitat Politècnica de València, València, Spain",1
+"Department of Computer Science, Madrid Open University, Madrid, Spain",1
+"Department of Research and Diagnostic Methods, Faculty of Education, Pontificia University of Salamanca, Salamanca, Spain",1
+"The University of Tokushima, Japan",1
+"Computer Science Department, University of Maryland, College Park, MD",1
+"Department of Computer Science, Memorial University of Newfoundland, Saint John's, NL, Canada",1
+"Computer Science Department, Tel-Aviv University, Ramat Aviv, Tel-Aviv, Israel",1
+"Shenzhen University, Shenzhen, China",1
+"U.S. Army Res. Lab., Adelphi, MD, USA",1
+"Department of Electrical Engineering, Assiut University, Asyut, Egypt",1
+"Visual Computation, Queen Mary University, London, United Kingdom",1
+"University of British Columbia, Canada",1
+"Graduate School at Shenzhen, Tsinghua University, China",1
+"Department of Computer Science, Cornell University, Ithaca, NY, USA",1
+"Department of Computer Science and Technology, Tongji University, Shanghai, China",1
+"Walt Disney Imagineering, USA",1
+"AEBC, Nanyang Environment &amp; Water Research Institute, Nanyang Technological University, Singapore",1
+"Australian Centre for Visual Technologies, University of Adelaide, Adelaide, Australia",1
+"Center for OPTical IMagery Analysis and Learning (OPTIMAL), State Key Laboratory of Transient Optics and Photonics, Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, P. R. China",1
+"University of Massachusetts at Amherst, Amherst, MA, USA",1
+"School of Computer Science, The University of Adelaide, Adelaide, SA, Australia",1
+"Department of Engineering and MaintenanceChina Mobile Group, Jiangsu Company, Ltd., Changzhou, China",1
+"School of Computer Sciences and Technology, Nanjing Normal University, Nanjing, China",1
+"School of Mathematical Sciences, Nanjing Normal University, Nanjing, China",1
+"School of Information Engineering, Zhengzhou University, China",1
+"Dept. of Computer Science, Unit of Medical Technology and Intelligent Information Systems, University of Ioannina, Greece",1
+"Dept. of Medical Physics, Medical School, Unit of Medical Technology and Intelligent Information Systems, University of Ioannina, Greece",1
+"Dermalog Identification Systems GmbH, Hamburg, Germany",1
+"Research & Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai, 201804, P.R. China",1
+"ECSE Department, Rensselaer Polytechnic Institute",1
+"Centre of Excellence for Research in Computational Intelligence and Applications, School of Computer Science, University of Birmingham, Birmingham, U.K.",1
+"VUB-NPU Joint AVSP Research Lab, Northwestern Polytechnical University (NPU), Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, X'ian 710072, China",1
+"Institute of Computer Science, Faculty of Electronics and Information Technology, Warsaw University of Technology, Nowowiejska 15/19, 00-665 Warsaw, Poland",1
+"KT Future Technology Laboratory, Seoul, South Korea",1
+"Department of Electrical and Computer Engineering, Rutgers University, Piscataway, NJ, USA",1
+"Jiangsu Key Laboratory of Big Data Analysis Technology, Nanjing University of Information Science and Technology, Nanjing, China",1
+"State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",1
+"Microsoft Key Laboratory of Visual Perception, Zhejiang University, Hangzhou, China",1
+"School of Automation, Northwestern Polytechnical University, Xi’an, China",1
+"SAIIP, School of Computer Science, Northwestern Polytechnical University, Xi’an, China",1
+"Department of Computer Science, Shenzhen Graduate School, Harbin Institute of Technology, China",1
+"State Key Laboratory of Synthetical Automation for Process Industries, Northeastern University, Shenyang, Liaoning 110004, China",1
+"University of Pittsburgh and Adjunct Faculty at the Robotics Institute, Carnegie Mellon University: 3137 Sennott Square, 210 S. Bouquet St., PA 15260 USA",1
+"AI Institute, Qihoo/360 Company, Beijing, China",1
+"Intelligent Media Technique Research Center, Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, Chongqing, P.R. China",1
+"CAS Center for Excellence in Brain Science and Intelligence Technology, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, University of Chinese Academy of Sciences, Beijing, P.R. China",1
+"AI Institute of Qihoo/360 Company, Beijing, P.R. China",1
+"Advanced Engineering Electronics & Safety, Delphi Deutschland GMBH, Delphiplatz 1, Wuppertal, North Rhine-Westfalia, Germany",1
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, P.R. China",1
+"LIRIS, UMR 5205 CNRS, INSA-Lyon, F-69621, France",1
+"Orange Labs, R&D, Meylan, France",1
+"Department of Computer Science, Chu Hai College of Higher Education, Tsuen Wan, Hong Kong",1
+"Teaching and research of section of mathematics, Hebei Information Engineering School, Baoding 071000, China",1
+"RheinAhrCampus der Hochschule Koblenz, Remagen, Germany",1
+"Singapore Polytechnic, 500 Dover Road, Singapore 139651",1
+"Singapore University of Technology and Design, 20 Dover Road, Singapore 138682",1
+"State Key Laboratory of Robotics, Shenyang Institute of Automation Chinese Academy of Sciences, Shenyang, China",1
+"Technische Universitt Darmstadt, Computer Systems Group, Darmstadt, Germany",1
+"School of Engineering and Applied Science, Aston University, Birmingham, U.K.",1
+"PERCEPTION Team, INRIA Grenoble Rhône-Alpes, France",1
+"Digital World Research Centre, University of Surrey, UK",1
+"ARM, Inc., San Jose, CA",1
+"Department of Information Engineering, Henan University of Science and Technology, Luoyang, China",1
+"School of Computing Sciences, University of East Anglia, Norwich, U.K.",1
+Department of mechatronic technology of National Taiwan Normal University,1
+"Department of Computer Science, Taipei Municipal University of Education",1
+"Computer Vision Center 08193 Bellaterra, Barcelona, SPAIN",1
+"Computer Science Division, University of Central Florida, Orlando, FL, USA",1
+"GuangXi Cast Animation Company, Ltd., Nanning, China",1
+"Department of Electrical Engineering and Computer Science, Colorado School of Mines, Golden, CO, USA",1
+"School of Information Engineering, Xiangtan University, Xiangtan, China",1
+"Baidu International Technology (Shenzhen) Company, Ltd., Shenzhen, China",1
+"Department of Electronic Engineering, The Chinese University of Hong Kong",1
+"School of Communication and Information Engineering, Chongqing University of Posts and Telecommunications, Chongqing, China",1
+"School of Computing, Teesside University, UK",1
+"Teleinfrastructure R&amp;D Lab, Technical University of Sofia, Bulgaria",1
+"Dept. of Inf. Network Technol., Hsiuping Inst. of Technol., Taichung, Taiwan",1
+"Cork Institute of Technology, CIT, Cork Ireland",1
+"Biomedical Engineering Program, University of Manitoba, Winnipeg, Canada",1
+"Department of Computer Science, University of North Carolina Wilmington, Wilmington, United States",1
+School of ComputingNational University of Singapore,1
+"UFSC - Federal University of Santa Catarina / INE - CTC, Florianópolis, 88040-900, Brazil",1
+"UDESC - Santa Catarina State University, DCC - CCT, Joinville, 89219-710, Brazil",1
+"School of Electrical and Electronic Engineering, University of Manchester, Manchester, U.K.",1
+"Fordham University, New York, 10023, USA",1
+"Rapid-Rich Object Search (ROSE) Lab, Nanyang Technological University, Interdisciplinary Graduate School, SingaporeSingapore",1
+"Department of Electrical Engineering, Semnan University, Semnan, Iran",1
+"Department of Electrical Engineering, Amirkabir University of Technology, Tehran, Iran",1
+"Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China",1
+"Dept. of Radiation Oncology, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA",1
+"Dept. of Electrical &amp; Computer Engineering, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA",1
+"Film Department ELTE University, Budapest, Hungary",1
+"IIIT Hyderabad, 500032, A.P, India",1
+"School of Computing and Electrical Engineering, IIT Mandi, H.P, 175001, India",1
+"School of Computer Science and Software Engineering, The University of Western Australia, Crawley, WA, Australia",1
+"School of Engineering, Griffith University, Nathan, QLD, Australia",1
+"Faculty of Engineering and Information Technology, Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, NSW, Australia",1
+"NASA Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA",1
+"Department of Naval Architecture and Marine Engineering, University of Michigan, Ann Arbor, MI 48109 USA",1
+"Beijing Key Laboratory of Digital Media, State Key Laboratory of Virtual Reality Technology and Systems, and School of Computer Science and Engineering , Beihang University, China",1
+"Philips Research , The Netherlands",1
+"Istanbul Technical University, Faculty of Computer and Informatics, Istanbul, Turkey",1
+"Signal and Information Processing section (SIP), Department of Electronic Systems, Aalborg University, Denmark",1
+"Section of Image Analysis and Computer Graphics, DTU Compute, Technical University of Denmark, Kgs. Lyngby, Denmark",1
+"
+ University of Delaware, USA",1
+"Department of Cognitive Science, School of Information Science and Engineering, Xiamen University, Xiamen, China",1
+"Taylor's University Lakeside Campus, Selangor Darul Ehsan, Malaysia",1
+"Department of Mathematical Sciences, Georgia Southern University, Statesboro, USA",1
+"School of Computer and Communication Science, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",1
+"Department of Electronic EngineeringCentre for Vision, Speech and Signal Processing, University of Surrey, Surrey, U.K.",1
+"Department of Electrical EngineeringFaculty of Engineering, Urmia University, Urmia, Iran",1
+"ICT-ISVISION Joint R&D Lab. for Face Recognition, Chinese Acad. of Sci., Beijing, China",1
+"International School, Beijing University of Posts and Telecommunications, Beijing, China",1
+"Department of Social and Decision Sciences, Carnigie Mellon University, Pittsburgh, PA 15224, USA",1
+"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. zhangxiaoxun@bit.edu.cn",1
+"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. jiayunde@bit.edu.cn",1
+"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. xushuang@bit.edu.cn",1
+"Microsoft Research Asia, China",1
+"Microsoft Live Labs Research, China",1
+"Baidu Research, USA",1
+"Applied Network Technology (ANT), Department of Computer Science, Faculty of Science, Khon Kaen University, Khon Kaen, Thailand",1
+"Department of Business Computer, Faculty of Management Science, Nakhon Ratchasima Rajabhat University, Nakhon Ratchasima, Thailand",1
+Microsoft Research,1
+MIT CSAIL,1
+Affectiva,1
+Yahoo! Research,1
+"Institute for Computational and Mathematical Engineering, Stanford University",1
+"Computer Laboratory, University of Cambridge, Cambridge, U.K.",1
+"Department of Mathematics and Computer Science, University of Cagliari, Italy",1
+"Center for OPTical IMagery Analysis and Learning, State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China",1
+"Department of Electronic and Electrical Engineering, The University of Sheffield, Sheffield, U.K.",1
+"College of Electronic and Information Engineering, Nanjing University of Information Science and Technology, Nanjing, China",1
+"Fotonation LTD, Galway, Ireland",1
+"School of Computer Science and Center for OPTical IMagery Analysis and Learning (OPTIMAL), Northwestern Polytechnical University, Xian 710072, Shaanxi, China",1
+Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;,1
+"Faculty of Computer and Informatics, Istanbul Technical University, Istanbul, Turkey",1
+"Amirkabir University of Technology, Electrical Engineering Department, Tehran, Iran",1
+"Office of Safety Research and Development, Federal Highway Administration, U.S. Department of Transportation, Virginia, USA",1
+"School of Computer Science and Technology, Guangdong University of Technology, China",1
+"College of Mathematics and Informatics, South China Agricultural University, China",1
+"Computer Vision and Multimodal Computing, MPI Informatics, Saarbruecken",1
+"Computer Vision Laboratory, ETH Zurich",1
+"School of Information and Electrical Engineering, China University of Mining and Technology, Xuzhou, China",1
+"Curtin University Department of Mechanical Engineering, Perth, Western Australia 6012",1
+"Department of Mechanical Engineering, Curtin University, Perth, Western Australia 6012",1
+Vols Taipei,1
+"Intel Labs Europe, London, United Kingdom",1
+"Technion - Israel Inst. of Technology, Haifa, 32000, Israel",1
+"The Open University of Israel, Raanana, 43107, Israel",1
+"Weizmann Institute of Science, Rehovot, 76100, Israel",1
+"Department of Electronic Engineering, The Chinese University of Hong Kong, Hong Kong, China",1
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China",1
+"Faculty of Science and Technology, Communication University of China, Beijing, China",1
+"Science and Technology Department, Communication University of China, Beijing, China",1
+"Collaborative Innovation Center, Communication University of China, Beijing, China",1
+"School of Computer Software, Tianjin University, 300072, China",1
+"Computer Vision Laboratory, ETH Zürich, Zürich, Switzerland",1
+"Universitat Pompeu Fabra, Universidad Pompeu Fabra (Edificio França), Passeig de Circumvallacio, 8, Barcelona, Spain",1
+"Departamento de estadística, Universidad Carlos III de Madrid, Barcelona, Spain",1
+"Southeast University, Nanjing, China",1
+"Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China",1
+"Computer Science, Rochester Institute of Technology, USA",1
+"Center for Imaging Science, Rochester Institute of Technology, USA",1
+"Space and Naval Warfare Systems Center Pacific, San Diego, CA, 92152, United States",1
+"Electrical and Computer Engineering, University of California, San Diego",1
+"ECE, Department MSIT, C-4 Janakpuri, New Delhi, India",1
+"Dept. of Comput. Sci., New Jersey Inst. of Technol., Newark, NJ, USA",1
+"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan. e-mail: chihming.fu@gmail.com",1
+"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan; Informatics Department, Fo-Guang University, I-Lan, Taiwan. e-mail: clhuang@ee.nthu.edu.tw",1
+"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan",1
+"Research Institute for Future Media Computing, Shenzhen University, Shenzhen, China",1
+"School of Computer and Information, Anhui Polytechnic University, Wuhu, China",1
+"Faculty of Information Sciences and Engineering, University of Canberra, Australia",1
+"Robotics Institute, Carnegie Mellon University, USA",1
+"Pediatrics Department, University of South Florida, Tampa, FL, USA",1
+"Department of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China",1
+"Raytheon BBN Technologies, Cambridge, MA, USA",1
+"Pontifical Catholic University of Minas Gerais - Department of Computer Science, R. Dom Jose Gaspar, 500, Belo Horizonte MG, 30535901, Brazil",1
+"Human-Robot Interaction Research Center, Department of Mechanical Engineering, Korea Advanced Institute of Science and Technology, Republic of Korea",1
+"Tsinghua University, Beijing, 100084, China",1
+"Department of Electrical and Computer Engineering, University of Alberta, Edmonton, Canada",1
+"Department of Statistics and Operational Research, Faculty of Mathematics, Complutense University of Madrid, Madrid, Spain",1
+"Distributed Infinity, Inc., Larkspur, CO, USA",1
+"University of Colorado Denver, Denver, CO, USA",1
+"IBM Thomas J. Watson Research Center, Yorktown Heights, NY, USA",1
+"Department of Computer Science and Engineering, Texas A&M University, College Station, TX, USA",1
+"Facebook Inc., San Francisco, CA, USA",1
+"Adobe Systems Inc., San Jose, CA, USA",1
+"Dept. of Mathematics and Computer Science, University of Udine, Italy",1
+"Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: hintz@it.uts.edu.au",1
+"Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: jant@it.uts.edu.au",1
+"Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: pohsiang@it.uts.edu.au",1
+"School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing 100876, China",1
+"School of Computer Science, Beijing University of Posts and Telecommunications, Beijing 100876, China",1
+"Advanced Electronics System, Academy of Scientific and Industrial Research, CSIR-Central Electronics Research Institute, Pilani, India",1
+"Mobile Communications Department, Eurecom, Biot, France",1
+"STARS Team, Institut National de Recherche en Informatique et en Automatique, Sophia Antipolis, France",1
+"Institute of Industrial Science, the University of Tokyo, Tokyo, Japan",1
+"Department of Electrical Engineering, KAIST, Korea",1
+"Electronic R&D Center, Mando Corp., Korea",1
+"Department of New Media, Korean German Institute of Technology, Korea",1
+"SAIT Beijing Lab, Samsung Advanced Institute of Technology, China",1
+"Mechatronics & Manufacturing Technology Center, Samsung Electronics Co., Korea",1
+"Department of Electrical, Computer and Biomedical Engineering, University of Pavia, Pavia, Italy",1
+Open University of Israel,1
+"Concordia Institute for Information Systems Engineering (CIISE), Concordia University, Montreal, QC, H3G 1T7, Canada",1
+"Department of Electrical and Computer Engineering, Concordia University, QC, Canada, H3G 1T7",1
+"University of KwaZulu-Natal, School of Maths, Statistics &amp; Computer Science, Durban - South Africa",1
+"Sudan University of Science and Technology, College of Computer Science and Information Technology, Khartoum - Sudan",1
+"Digital Media Institute, Hunan University, Changsha, 410082 P.R. China",1
+"College of information science and engineering, Hunan University, Changsha, 410082 P.R. China",1
+ACM Professional Specialist in Artificial Intelligence,1
+Université du Quebec a Rimouski (UQAR),1
+Shanghai university,1
+"CNRS, IMB, UMR 5251, Talence, France",1
+"UMR 5800, CNRS, LaBRI, Talence, France",1
+"UMR 5800, University of Bordeaux, LaBRI, Talence, France",1
+"UMR 5800, Bordeaux INP, LaBRI, Talence, France",1
+"UMR 5800, LaBRI, Talence, France",1
+"Dept. of Electrical Engineering, National Chung Hsing University, Taiwan",1
+"Division of Design of Intelligent Machines, Center for Development of Advanced Technologies, Algiers, Algeria",1
+"Microsoft Research, Beijing, China",1
+"University of Science and Technology of China, Hefei, China",1
+"AI Laboratories, Alibaba Group, Hangzhou, China",1
+"National ICT Australia, Canberra, ACT, Australia",1
+"MIT Media Laboratory, Cambridge, MA, USA",1
+"Industrial Technology Research Institute, Hsinchu, Taiwan",1
+"Garmin Corporation, New Taipei, Taiwan",1
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan",1
+"School of Information Technologies, University of Sydney, Sydney, NSW, Australia",1
+"Tencent AI Laboratory, Shenzhen, China",1
+"Malong Technologies Company, Ltd., Shenzhen, China",1
+"Department of Information Engineering, the Chinese University of Hong Kong",1
+"Department of Electronic Engineering, the Chinese University of Hong Kong, Shatin, Hong Kong",1
+"Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro No.1, Tonantzintla, Puebla, México. CP 72840",1
+"Pontifical Catholic University of Rio de Janeiro, Rua Marquês de São Vicente 225, Gávea, Brasil",1
+"Department of Electrical Engineering, National Taiwan University of Science and Technology",1
+"Department of Computer Science and Technology, Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China",1
+"Computational Vision Group, University of California at Irvine, Irvine, CA, USA",1
+"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland",1
+"Department of Computer Science and Technology, The Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China",1
+"Tohoku University, Japan",1
+"Department of Mechanical Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia",1
+"Dept. of Electron. Eng., Hannam Univ., Daejeon, South Korea",1
+"School of Information Science and Technology, Sun Yat-sen University, Guangzhou, China",1
+"Centre for Autism Research, Philadelphia, US",1
+University of Cambridge,1
+"Department of Electrical Engineering, Chang Gung University, Taoyuan, Taiwan",1
+"Dept. of Electr. & Comput. Eng., Toronto Univ., Ont., Canada",1
+"Center for Advance Imaging Innovation and Research, New York University, New York, NY, USA",1
+"Key Laboratory of Machine Perception (Ministry of Education), School of Electronics Engineering and Computer Science, Peking University, Beijing, China",1
+"Machine Learning Department, Carnegie Mellon University, Pittsburgh, PA, USA",1
+"Department of Computer Science, University of Warwick, Coventry, U.K.",1
+"Laboratoire MIA, University of La Rochelle, La Rochelle, France",1
+"College of Cyber Security, Jinan University, Guangzhou, China",1
+"Columbia University, New York",1
+"Department of Electrical and Computer Engineering, College of Computer and Information Science, Northeastern University, Boston, MA, USA",1
+"School of Engineering, University of Illinois, Urban Champagne, USA",1
+"ECIT, School of Electronics, Electrical Engineering &amp; Computer Science, Queen's University Belfast, Belfast, UK",1
+"Computer Science, Loughborough University, Loughborough, UK",1
+"Dept. of Computer Engineering, Science and Reaserch Branch, Islamic Azad University, Tehran, Iran",1
+"School of Electrical and Computer Engineering, College of Engineering, University of Tehran, Iran",1
+"School of Computing and Communications, Lancaster University, Lancaster, UK",1
+"Center for Optical Imagery Analysis and Learning, Northwestern Polytechnical University, Xi’an, China",1
+"School of Instrumentation Science and Opto-electronics Engineering, Beihang University, Beijing, China",1
+"School of Automation, Huazhong University of Science and Technology, Wuhan, China 430074",1
+"College of Electronics and Information Engineering, Sichuan University, Chengdu, China 610064",1
+"Dept. of Comp. Sci. and Tech., Shenzhen Graduate School, Harbin Institute of Technology, China",1
+Imperial College London,1
+"Machine Vision Group, University of Oulu, Oulu, Finland",1
+"Inst. of Autom., Chinese Acad. of Sci., Beijing, China",1
+"School of Computing, Computing 1, 13 Computing Drive, National University of Singapore, Singapore 117417",1
+"Institute for Infocomm Research, 1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1
+"Department of Electrical and Computer Engineering, National University of Singapore, Singapore 117576",1
+"Institute of Mathematical and Computer Sciences, University of São Paulo, São Carlos, Brazil",1
+"Graduate Sch. of Inf. Sci. & Technol., Tokyo Univ., Japan",1
+"Key Laboratory of Child Development and Learning Science, Ministry of Education, Research Center for Learning Science, Southeast University, Nanjing, China",1
+"Department of Psychology, University of Pittsburgh/Robotics Institute, Carnegie Mellon University , Pittsburgh, PA, USA",1
+"School of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China",1
+"Dept.of Intelligence Science and Technology, The Kyoto University of JAPAN",1
+"Dept.of Computational Intelligence and Systems Science, Tokyo Institute of Technology of JAPAN",1
+"Microsoft Research, Redmond, WA, USA",1
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia",1
+"School of Software Technology, Dalian University of Technology",1
+"School of Computer and Information Science, Southwest University, Chongqing, China",1
+"School of Computer Science and Technology, Shandong University, Shandong, China",1
+"Facebook Inc., Palo Alto, CA, USA",1
+"Stanford University, USA",1
+"Electrical and Computer Engineering Department, University of California, Santa Barbara, CA 93106 USA",1
+"Psychology Department, University of California, Santa Barbara, CA 93106 USA",1
+"Computer Science and Information Engineering Department, National Taiwan Normal University, Taipei, Taiwan",1
+"Dept. of Comp. Sci. and Inf. Eng, National United University, Miaoli, Taiwan",1
+"College of Mechanical and Electrical, Changzhou Textile Garment Institute, Changzhou, China",1
+"University of IIllinois, Urbana-Champaign",1
+"Institut EURECOM, Sophia Antipolis, (France)",1
+"Sapienza Universit&#x00E0; di Roma, v. Salaria 113, 00198, Rome, (IT)",1
+"Chinese Academy of Sciences, Shenzhen Institutes of Advanced Technology, Shenzhen, China",1
+"Department of Electrical and Computer Engineering, Iowa State University, Ames, IA, USA",1
+"Laboratory LIM, Department of Computer Science, Faculty of Sciences and Technologies, University Hassan II, Casablanca-Morocco",1
+"College of Electrical Engineering and Automation, Anhui University, Hefei, China",1
+"DCNS Research, 5 rue de l'Halbrane, 44340 Bouguenais, France",1
+"Department of Information Engineering and Computer Science, University of Trento, Trento, TN, Italy",1
+"Snapchat Research, Venice, CA90291",1
+"Beauty Cosmetic Research Lab, Kao Corporation, Tokyo, Japan",1
+"Department of CS, University of Texas at San Antonio, 78249, USA",1
+"Department of CSE, University at Buffalo (SUNY), NY 14260, USA",1
+University of Waterloo,1
+"College of Information, Capital University of Economics and Business, Beijing, China.sanyecunfu@emails.bjut.edu.cn",1
+"Bio-Computing Research Center, Harbin Institute of Technology Shenzhen Graduate School, China",1
+"Guangdong Industry Training Centre, Guangdong Polytechnic Normal University, Guangzhou, China",1
+"Korea University, Seoul, South Korea",1
+"Department of Electrical and Computer Engineering, Ajou University",1
+"Advanced Digital Sciences Center , Singapore",1
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China",1
+"Computer Science and Electrical Engineering West Virginia University, Morgantown, USA",1
+"Department of ComputingBiometrics Research Centre, The Hong Kong Polytechnic University, Hong Kong",1
+"School of Computer Science and Information Technology, RMIT University, Melbourne, VIC, Australia",1
+"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu, China",1
+"School of Digital Media, Jiangnan University Jiangsu Wuxi, PR China",1
+"School of Digital Media, Jiangnan University, Jiangsu Wuxi, PR China",1
+"School of Maths, Statistics &amp; Computer Science, University of KwaZulu-Natal, Durban, South Africa",1
+"Faculty of Science and Technology, Sudan University of Science and Technology, Khartoum, Sudan",1
+"Graduate School of System Informatics, Kobe University, Japan",1
+"Center for Research in Computer Vision, University of Central Florida, Orlando, USA",1
+"Department of Electrical and Computer Engineering, University of Florida, Gainesville, FL",1
+"Beijing FaceAll Co. Beijing, China",1
+University of Science and Technology of China,1
+"Amazon, Berkshire, U.K.",1
+"International Institute of Information Technology, Hyderabad, India",1
+"Electrical and Computer Engineering, University of Maryland, College Park, Maryland 20740 United States",1
+"Electrical and Computer Engineering, Rutgers University, Piscataway, New Jersey 08854 United States",1
+"Centre for Quantum Computation & Intelligent Systems and the Faculty of Engineering and Information Technology, University of Technology Sydney, 81 Broadway Street, Ultimo, NSW, Australia",1
+"Department of Multimedia and Graphic Arts, Cyprus University of Technology, P.O. Box 50329, 3036, Lemesos, Cyprus",1
+"Department of Electronic Engineering, City University of Hong Kong, 83 Tat Chee Avenue, Kowloon, Hong Kong",1
+"National Computer Network Emergency Response Technical Team/Coordination Center of China, Beijing, China",1
+"Department of Computer Science and Technology, Tsinghua University, Beijing, China",1
+"Army Research Office, RTP, Raliegh, NC, United States of America",1
+"Department of Informatics, Modeling, Electronics, and Systems, University of Calabria, Rende, Italy",1
+"The University of New South Wales, Australia",1
+"Advanced Technologies Application, Center (CENATAV), Cuba",1
+"Institute of Digital Media, Peking University, Beijing, China",1
+"GREYC, CNRS UMR6072, University of Caen, Caen, France",1
+"IDIAP, Martigny, Switzerland",1
+"Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA 15213. msavvid@cs.cmu.edu",1
+"Information Sciences Institute, University of Southern California, Marina del Rey, CA 90292. mitra@isi.edu",1
+"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: king@cse.cuhk.edu.hk",1
+"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: lyu@cse.cuhk.edu.hk",1
+"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: jkzhu@cse.cuhk.edu.hk",1
+"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: hbdeng@cse.cuhk.edu.hk",1
+"Electronics and Telecommunications Research Institute (ETRI), Republic of Korea",1
+"Xerox Research Center, Europe, France",1
+"Department of Electronic Engineering, State Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China",1
+"Science and Technology on Integrated Information System Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China",1
+"Graduate School of Information Science and Engineering, Ritsumeikan University, Kusatsu, Japan",1
+"Department of Automation, State Key Laboratory of Intelligent Technologies and Systems and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China",1
+"Laboratory of Media Audio & Video, Communication University of China, Beijing, China",1
+CNRS LTCI; T&#x00E9;l&#x00E9;com ParisTech,1
+Institut Mines-T&#x00E9;l&#x00E9;com; T&#x00E9;l&#x00E9;com ParisTech; CNRS LTCI,1
+"School of Science, Southwest Petroleum University, Chengdu, China",1
+"Research Center for Learning Science, Southeast University, China",1
+"School of Computer Science and Engineering, Tianjin University of Technology, China",1
+"Department of Electrical and Computer Engineering, College of the Computer and Information Science, Northeastern University, Boston, MA, USA",1
+"Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA",1
+"Department of Computer and Information Science, University of Massachusetts Dartmouth, Dartmouth, MA, USA",1
+"Department of Computer Science, University of Brasília, DF, Brazil 70910-900",1
+"Department of Mechanical Engineering, University of Brasília, DF, Brazil 70910-900",1
+"Department of Neurosurgery, University of Pittsburgh, PA 15213, USA",1
+"Faculty of Computers and Information, Ain Shams University, Egypt",1
+"Faculty of Computers and Information, BeniSuef University, Egypt",1
+"LIAMA, French National Institute for Research in Computer Science and Control, Paris, France",1
+"Intel Laboratory China, Beijing, China",1
+"School of Computing, National University of Singapore",1
+"Institute for Infocomm Research, Singapore",1
+"Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China",1
+"Rapid-Rich Object Search Laboratory, Interdisciplinary Graduate School, Nanyang Technological University, Singapore",1
+"Department of Computer Science and Technology, College of Computer, National University of Defense Technology, Changsha, Hunan, China, 410073",1
+Sapienza Univertsity of Rome,1
+"Hohai University, No. 1 Xikang Road, Nanjing, Jiangsu Province, China",1
+"Institute of Intelligent Information Processing, Xidian University, Xi'an, China",1
+"College of Metropolitan Transportation, Beijing University of Technology, Beijing, China",1
+"School of Computer Science and Technology, Harbin Institute of Technology at Weihai, Weihai, China",1
+"School of Computer and Control Engineering, University of Chinese Academy of Sciences, Beijing, China",1
+"Institute of Computing Technology, Chinese Academy of Sciences, Key Laboratory of Intelligent Information Processing, Beijing, China",1
+"Institute of Computing Technology, CAS, No.6 Kexueyuan South Road, Beijing, 100080, China",1
+"School of Computer Science Carnegie Mellon University Pittsburgh, PA, 15213, USA",1
+"Dept. of Computer Science, Purdue University",1
+"Center of Image and Signal Processing, Faculty of Computer Science & Information Technology, University of Malaya, Kuala Lumpur, Malaysia",1
+"Language Technologies Institute, Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA, USA",1
+"Pudong Branch, China Mobile Group Shanghai, Company Limited, Shanghai, China",1
+"School of Mathematics and Statistics, The University of Western Australia, Nedlands, WA, Australia",1
+"Department of Computer Science and Engineering, Qatar University, Doha, Qatar",1
+"France Telecom - Orange Labs, Lannion, France",1
+"National Key Laboratory of Cognitive Neuroscience and Learning, Beijing Normal University, Beijing, China",1
+"School of Computer Science, China University of Geosciences, Wuhan, China",1
+"Department of Computer Science, Hong Kong Baptist University, Kowloon, Hong Kong",1
+Waseda University,1
+Wide Eyes Technologies,1
+"Department of Electrical and Computer Engineering, University of Illinois at Urbana—Champaign, Champaign, IL, USA",1
+ThyssenKrupp Elevator Americas,1
+"State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",1
+"Dept. of Autom. Test & Control, Harbin Inst. of Technol., China",1
+"Department of Information and Control, B-DAT Laboratory, Nanjing University of Information Science and Technology, Nanjing, China",1
+"University of Maryland, College Park, USA",1
+"Institute of Engineering and Management, Kolkata, India",1
+"Inst. de Telecomunica&#x00E7;&#x00F5;es, Fac. de Ci&#x00EA;ncias da Universidade do Porto, Porto, Portugal",1
+"Peking University, Beijing",1
+"Siren Solutions, Dublin, Ireland",1
+"Paradigma Digital, Madrid, Spain",1
+"School of Mathematical Sciences, University of Science and Technology of China, Hefei, China",1
+"School of Computer Science and Technology, University of Science and Technology of China, Hefei, China",1
+"Department of Computer Science, Digital Image Processing Laboratory, Islamia College Peshawar, Peshawar, Pakistan",1
+"Department of Electrical and Computer Engineering, University of Calgary, Calgary, Alberta, Canada",1
+"Faculty of Applied Science, University of British Columbia, Vancouver, British Columbia, Canada",1
+"Rutgers University, 94 Brett Road, Piscataway, NJ 08854, United States of America",1
+"Volvo Car Corporation, SE-405 31 Göteborg, Sweden",1
+"Smart Eye AB, SE-413 27 Göteborg, Sweden",1
+"Department of Mathematics and Informatics, Ecole Centrale de Lyon, Lyon, 69134, France",1
+Toyota Research Institute - North America,1
+Griffith University,1
+"School of Computer, Beijing University of Posts and Telecommunications, Beijing, China",1
+"School of Information, Singapore Management University, Singapore",1
+"Agency for Science, Technology and Research, Singapore",1
+"School of Software, Beijing Institute of Technology, Beijing, China",1
+"Griffith School of Engineering, Queensland Research Laboratory, National ICT Australia, Griffith University, Nathan, Australia",1
+"Queensland Research Laboratory, National ICT Australia and Institute for Integrated and Intelligent Systems, Griffith University, Nathan, Australia",1
+"PRaDA, Deakin University, Australia",1
+"Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA",1
+"Neuropsychiatry Section, Department of Psychiatry, University of Pennsylvania",1
+"Department of Psychology, University of Illinois at Chicago, Chicago, IL",1
+"Department of Mathematics and Computer Science, Ursinus College, Collegeville, PA",1
+"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia",1
+"Chongqing University, China",1
+"University College London, UK, Dept. of Electronic and Electrical Engineering",1
+"School of Mathematical Sciences, DUT-RU International School of Information and Software Engineering, Dalian University of Technology, Dalian, China",1
+"Computing Department, Imperial College London, UK. M.Pantic@imperial.ic.ac.uk",1
+"Computing Department, Imperial College London, UK. M.F.Valstar@imperial.ic.ac.uk",1
+"Consiglio Nazionale delle Ricerche, Istituto di Calcolo e Reti ad Alte Prestazioni, Viale delle Scienze, 90128 Palermo, ITALY",1
+"Institute of Intelligent Machines, Chinese Academy of Sciences, Hefei, China",1
+"NTT Network Innovation Laboratories, Nippon Telegraph and Telephone Corp.",1
+"Faculty of Engineering, Tunku Abdul Rahman University College, Setapak, Malaysia",1
+"Faculty of Computing and Information Technology, Setapak, Malaysia",1
+"Dep. Inteligencia Artificial, U. Politécnica Madrid, Spain",1
+"Dep. Ciencias de la Computación, U. Rey Juan Carlos, Spain",1
+"Dep. Comp. Sci. and Engr., Fudan University, China",1
+"Computer Science Department, University of Maryland, College Park, MD, USA",1
+"Cernium Corporation, Reston, VA, USA",1
+"Computer Science Department, University of California, Los Angeles, CA, USA",1
+"Department of Computer and Information Science, Temple University, Philadelphia, PA, USA",1
+"Department of Electrical and Computer Engineering and the College of Computer and Information Science, Northeastern University, Boston, MA",1
+"Department of Electrical and Computer Engineering, Northeastern University, Boston, MA",1
+"North Acton, London",1
+"Imaging Science and Engineering Laboratory, Tokyo Institute of Technology, Kanagawa, Japan",1
+"Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology, Kanagawa, Japan",1
+"Department of ECE, University of Dayton, Dayton, OH, USA",1
+"ODU Vision Lab, Old Dominion University, Norfolk, VA, USA",1
+"School of Mathematical Sciences, Dalian University of Technology, Dalian, China",1
+"School of Mathematics and Computer Sciences, Gannan Normal University, Ganzhou, China",1
+"RMIT University, Vietnam",1
+"Tolendata Singapore R&amp;D Centre Private Limited, Singapore",1
+"College of Computer Science &amp; Software Engineering, Shenzhen University, China 518060",1
+"Concordia Institute for Information Systems Engineering (CIISE), 1515 St. Catherine West, Montreal, Quebec H3G 2W1, CANADA",1
+"Dept. of Computer Science and Software Engineering, Concordia University, 1515 St. Catherine West, Montreal, Quebec H3G 2W1, CANADA",1
+"Department of Computer Science and Engineering, University of Notre Dame",1
+"Department of Computer Science, Pontificia Universidad Catolica de Chile",1
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology at Sydney, Sydney, NSW, Australia",1
+"School of Engineering, The University of Edinburgh, Edinburgh, U.K.",1
+"Harbin Institute of Technology, Harbin, China",1
+"College of Computing, Georgia Tech",1
+"Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University, Korea",1
+"Taxes Instruments, Dallas, TX, United States",1
+"Energy Research Institute @NTU (ERI@N), Interdisciplinary Graduate School, Nanyang Technological University, Singapore 639798",1
+"Visual Analysis of People Laboratory, Aalborg University, Aalborg, Denmark",1
+"Computer Vision Team, ARS Traffic & Transport Technology, Trivandrum, India",1
+"Computer Science Dept., Columbia University, USA",1
+"Computer Science Dept., SUNY Stony Brook, USA",1
+Rensselaer Polytechnic Institute,1
+"School of Mathematical and Physical Sciences at the University of Newcastle, Callaghan, NSW 2308, Australia",1
+"School of Computer Science and Technology, Xiamen University, Xiamen, China",1
+"Collaborative Innovation Center for Geospatial Information Technology, Wuhan, China",1
+"Department of Electrical Engineering, Indian Institute of Science, C.V. Raman Avenue, Bangalore, KA 560-012, India",1
+"School of Software, Shenyang University of Technology, Shenyang, China",1
+"Department of Internal Medicine, Chung-Ang University, Seoul, South Korea",1
+"Department of Data Science, Dankook University, Yongin, South Korea",1
+"School of Engineering of UABC, University of Baja California, Tijuana, Mexico",1
+"Department of Electrical and Electronic Engineering, Nazarbayev University, Astana, Kazakhstan",1
+"University of Electronic Science and Technology of China, Chendu, China",1
+"Inception Institute of Artificial Intelligence, Abu Dhabi, United Arab Emirates",1
+"School of Electronics and Information Engineering, Beihang University, Beijing, China",1
+"College of Computer Science, Guangdong University of Petrochemical Technology, Maoming, China",1
+"Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY, USA, 12180",1
+"Department of Computer Engineering, Istanbul University, Istanbul, Turkey",1
+"Department of Computer Engineering, Bah&#x00E7;e&#x015F;ehir University, Istanbul, Turkey",1
+"National Institute of Standards and Technology, 100 Bureau Drive, Gaithersburg, MD 20899, USA",1
+"Col. of Comp. Sci. and Comm. Eng., Jiangsu University, Zhenjiang, China",1
+"School of Electronic and Information Engineering, Ningbo University of Technology, Ningbo, China",1
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA",1
+"School of Mathematics, Jilin University, China",1
+"Department of Computer Science, Memorial University of Newfoundland, Canada",1
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, 15213, USA",1
+"Dept. of Computer Science, Purdue University, West Lafayette, IN, 47907, USA",1
+"Griffith University, Brisbane",1
+"Griffith University, Brisbane and University of the South Pacific, Fiji",1
+Vision Semantics Ltd,1
+"Department of Electronic Engineering, City University of Hong Kong, Kowloon, Hong Kong",1
+"Concordia Institute for Information Systems Engineering Concordia University, Montreal, Canada",1
+"Department of Computer Science and Technology, Huaqiao University, Xiamen, China",1
+"Université des Antilles et de la Guyane (UAG), France",1
+"Institut des Systèmes intelligents et de Robotique, UPMC, France",1
+"School of Computer Science and Information Engineering, Shanghai Institute of Technology, Shanghai, China",1
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun, China",1
+"College of Information Science and Engineering, Northeastern University, Shenyang, China",1
+"Dept. of Artificial Intelligence, Faculty of Computer Engineering, University of Isfahan, Iran",1
+"Department of Information Processing Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology Yokohama 226-8503, Japan",1
+"School of Computer Science and Engineering, University of New South Wales, Sydney, NSW, Australia",1
+"School of Information Technology and Electrical Engineering, University of Queensland, St Lucia, QLD, Australia",1
+"Microsoft, Bellevue, WA, USA",1
+"M5001, Department of Computer Science, City University of Hong Kong, Kowloon, Hong Kong",1
+"Department of Computer Science, University of Texas, San Antonio, TX, USA",1
+"School of Electrical and Computer Engineering, Oklahoma State University, Stillwater, OK, USA",1
+"Imperial College London, London, U.K.",1
+"Alcohol Countermeasure Systems Corporation, Toronto, ON, Canada",1
+"Institute of Information and System Sciences, Faculty of Mathematics and Statistics, Xi’an Jiaotong University, Xi’an, China",1
+"Research Division, Educational Testing Service, Princeton, NJ, USA",1
+"Key Laboratory of Machine Intelligence and Advanced Computing, Ministry of Education, Sun Yat-sen University, Guangzhou, China",1
+"Division of Biomedical Engineering, Hong Kong University of Science and Technology, Kowloon, Hong Kong SAR",1
+"Department of Electronic and Computer Engineering, Hong Kong University of Science and Technology, Kowloon, Hong Kong SAR",1
+"Kumamoto University, Kumamoto, Japan",1
+"Center for Research on Intelligent Perception and Computing (CRIPAC), NLPR, CASIA, Beijing, China",1
+"National Taichung University of science and Technology, Taichung",1
+"University of Technology Sydney, Sydney, NSW, Australia",1
+"SAP Innovation Center Network, Singapore",1
+"Agency for Science, Technology and Research, Institute of High Performance Computing, Singapore",1
+"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. venu@cedar.buffalo.edu",1
+"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. zhizhang@cedar.buffalo.edu",1
+"CUBRC, Buffalo, NY, USA. slowe@cubrc.org",1
+"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. tulyakov@cedar.buffalo.edu",1
+"School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China",1
+"Department of Computing, University of Surrey, Guildford, Surrey, GU2 7XH, UK",1
+"Shenzhen College of Advanced Technology, University of Chinese Academy of Sciences, China",1
+"LAMIA, EA 4540, University of French West Indies &amp; Guyana",1
+"Institut Telecom - Telecom ParisTech CNRS/LTCI, Paris",1
+"ITI Department Telecom Bretagne, Brest, France",1
+"Adobe Systems Incorporated, San Jose, CA, 95110",1
+"University of Technology at Sydney, Sydney, NSW, Australia",1
+"College of Engineeing & Informatics, National University of Ireland Galway, Galway, Ireland",1
+"Faculty of Electronics and Telecommunications “POLITEHNICA” University from Timişoara Timişoara, România",1
+"College of Humanities, Jiangxi University of Traditional Chinese Medicine, Nanchang, 330004, China",1
+"Sch. of Electr. & Electron. Eng., Nanyang Technol. Univ., Singapore",1
+"Bahcesehir University, Istanbul, Turkey",1
+"University of Lincoln, U. K.",1
+"School of Mathematics and Computer Science, Quanzhou Normal University, Quanzhou, China",1
+"Department of Electrical Engineering, Chang Gung University, Taipei, Taiwan",1
+"School of Information Technology, Monash University Malaysia, Bandar Sunway, Malaysia",1
+"College of Engineering, Huaqiao University, Fujian, China",1
+"Department of Electrical Engineering and Information Technology, TU Darmstadt, D-64283, Germany",1
+"School of Computer Science and Software Engineering, University of Wollongong, Wollongong, Australia",1
+"Defence Science and Technology Organisation (DSTO), Edinburgh, Australia",1
+Reallusion Corporation,1
+National Taiwan Normal University,1
+University College London,1
+"Research Center for Institute of Information Science, Academia Sinica, Taiwan",1
+"Department of Computer Science and Information Engineering, National Taiwan University",1
+"Dept. of EE, Univ. at Buffalo, SUNY, USA",1
+"Istanbul Technical University, Computer Engineering Department, 34469, Turkey",1
+"Department of Electronic Engineering, City University of Hong Kong, Hong Kong",1
+"School of Information Technology and Electrical Engineering, the University of Queensland, Brisbane, Qld, Australia",1
+"School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu",1
+"University of Electronic Science and Technology of China, Chengdu",1
+"IBM Research, Singapore",1
+"Center for Applied Mathematics, Tianjin University, Tianjin, China",1
+"Department of Mathematics, School of Science, Tianjin University, Tianjin, China",1
+"Faculty of Applied Mathematics, Shanxi University of Finance and Economics",1
diff --git a/scraper/reports/doi_institutions.html b/scraper/reports/doi_institutions.html
new file mode 100644
index 00000000..8846cd56
--- /dev/null
+++ b/scraper/reports/doi_institutions.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Institutions from IEEE</title><link rel='stylesheet' href='reports.css'></head><body><h2>Institutions from IEEE</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore</b></td><td>37</td></tr><tr><td><b>Department of Informatics, Aristotle University of Thessaloniki, Thessaloniki, Greece</b></td><td>29</td></tr><tr><td><b>National Taiwan University, Taipei, Taiwan Roc</b></td><td>26</td></tr><tr><td><b>Department of Electrical and Computer Engineering, National University of Singapore, Singapore</b></td><td>21</td></tr><tr><td><b>Fudan University, Shanghai, China</b></td><td>21</td></tr><tr><td><b>National University of Singapore, Singapore, Singapore</b></td><td>20</td></tr><tr><td><b>Universität Hamburg, Hamburg, Germany</b></td><td>19</td></tr><tr><td><b>School of Computer Engineering, Nanyang Technological University, Singapore</b></td><td>19</td></tr><tr><td><b>School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China</b></td><td>18</td></tr><tr><td><b>South China University of Technology, Guangzhou, China</b></td><td>16</td></tr><tr><td><b>Department of Automation, Tsinghua University, Beijing, China</b></td><td>16</td></tr><tr><td><b>School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>15</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b></td><td>14</td></tr><tr><td><b>College of Computer Science and Technology, Zhejiang University, Hangzhou, China</b></td><td>14</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>14</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, China</b></td><td>13</td></tr><tr><td><b>School of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing, China</b></td><td>12</td></tr><tr><td><b>Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>12</td></tr><tr><td><b>Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France</b></td><td>12</td></tr><tr><td><b>Zhejiang University, Hangzhou, China</b></td><td>12</td></tr><tr><td><b>School of Computer Science and Technology, Tianjin University, Tianjin, China</b></td><td>12</td></tr><tr><td><b>College of Computer Science, Sichuan University, Chengdu, China</b></td><td>12</td></tr><tr><td><b>College of Computer Science, Zhejiang University, Hangzhou, China</b></td><td>12</td></tr><tr><td><b>Department of Information Engineering and Computer Science, University of Trento, Trento, Italy</b></td><td>11</td></tr><tr><td><b>National University of Singapore, Singapore</b></td><td>11</td></tr><tr><td><b>Harbin Institute of Technology, Harbin, China</b></td><td>11</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, Korea</b></td><td>11</td></tr><tr><td><b>State Key Laboratory of Management and Control of Complex Systems, CASIA, Beijing, China</b></td><td>10</td></tr><tr><td><b>Shanghai Jiao Tong University</b></td><td>10</td></tr><tr><td><b>Stanford University</b></td><td>10</td></tr><tr><td><b>School of Computing, National University of Singapore, Singapore</b></td><td>10</td></tr><tr><td><b>Northeastern University, Boston, MA, USA</b></td><td>10</td></tr><tr><td><b>University of Maryland, College Park</b></td><td>10</td></tr><tr><td><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore</b></td><td>10</td></tr><tr><td><b>Peking University, Beijing, China</b></td><td>10</td></tr><tr><td><b>Department of Computer Engineering, Kyung Hee University, South Korea</b></td><td>9</td></tr><tr><td><b>Dept. of Computer Science and Information Engineering, National Central University, Jhongli, Taiwan</b></td><td>9</td></tr><tr><td><b>Noblis, Falls Church, VA, U.S.A.</b></td><td>9</td></tr><tr><td><b>School of Electronic Information Engineering, Tianjin University, Tianjin, China</b></td><td>9</td></tr><tr><td><b>Shanghai Jiao Tong University, Shanghai, China</b></td><td>9</td></tr><tr><td><b>Beihang University, Beijing, China</b></td><td>9</td></tr><tr><td><b>National University of Ireland Galway, Galway, Ireland</b></td><td>9</td></tr><tr><td><b>Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China</b></td><td>9</td></tr><tr><td><b>School of Electrical and Electronic Engineering, College of Engineering, Yonsei University, Seoul, South Korea</b></td><td>9</td></tr><tr><td><b>Department of Electrical Engineering, National Taiwan University of Science and Technology, Taipei, Taiwan</b></td><td>9</td></tr><tr><td><b>Singapore Management University, Singapore, Singapore</b></td><td>8</td></tr><tr><td><b>P.G. Demidov Yaroslavl State University, Yaroslavl, Russia</b></td><td>8</td></tr><tr><td><b>Faculty of Electrical Engineering and Information Technology, Slovak University of Technology, Bratislava, Bratislava, Slovakia</b></td><td>8</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>8</td></tr><tr><td><b>Department of Electronic and Information Engineering, The Hong Kong Polytechnic University</b></td><td>8</td></tr><tr><td><b>CAS Center for Excellence in Brain Science and Intelligence Technology; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences; University of Chinese Academy of Sciences, Beijing, China</b></td><td>8</td></tr><tr><td><b>Institute of Computer Science and Technology, Peking University, Beijing, P.R. China, 100871</b></td><td>8</td></tr><tr><td><b>School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>8</td></tr><tr><td><b>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China</b></td><td>8</td></tr><tr><td><b>Tsinghua University, Beijing, China</b></td><td>8</td></tr><tr><td><b>State Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Department of Electronic Engineering, Tsinghua University, Beijing 100084, China</b></td><td>8</td></tr><tr><td><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>8</td></tr><tr><td><b>Arizona State University, Tempe, AZ, USA</b></td><td>8</td></tr><tr><td><b>Department of Computing, Imperial College London, London, U.K.</b></td><td>8</td></tr><tr><td><b>Samsung R&amp;D Institute, China</b></td><td>8</td></tr><tr><td><b>University of Texas at San Antonio, San Antonio, TX, USA</b></td><td>8</td></tr><tr><td><b>Department of Computer Science and Engineering, Shanghai Jiao Tong University, China</b></td><td>8</td></tr><tr><td><b>IIIT-Delhi, India</b></td><td>7</td></tr><tr><td><b>University of Texas at Arlington, Arlington, TX, USA</b></td><td>7</td></tr><tr><td><b>National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University, China</b></td><td>7</td></tr><tr><td><b>Huazhong University of Science and Technology, Wuhan, China</b></td><td>7</td></tr><tr><td><b>Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, China</b></td><td>7</td></tr><tr><td><b>Stony Brook University, Stony Brook University, NY 11794, USA</b></td><td>7</td></tr><tr><td><b>CyLab Biometrics Center and the Department of Electrical and Computer Engineering (ECE), Carnegie Mellon University, Pittsburgh, USA</b></td><td>7</td></tr><tr><td><b>School of Electronic and Information Engineering, Beihang University, Beijing, China</b></td><td>7</td></tr><tr><td><b>Faculty of Information Science and Technology, Multimedia University, Melaka, Malaysia</b></td><td>7</td></tr><tr><td><b>State Key Laboratory of Integrated Services Networks, Xidian University, Xi’an, China</b></td><td>7</td></tr><tr><td><b>School of Electronic Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>7</td></tr><tr><td><b>Department of Computer Science, Jiangnan University, No. 1800 LiHu Avenue, WuXi, China</b></td><td>7</td></tr><tr><td><b>College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China</b></td><td>7</td></tr><tr><td><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>7</td></tr><tr><td><b>School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China</b></td><td>7</td></tr><tr><td><b>Ulm University, Ulm, Germany</b></td><td>7</td></tr><tr><td><b>Center for Automation Research, UMIACS, University of Maryland, College Park, 20740, United States of America</b></td><td>7</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, Beijing, 100876, China</b></td><td>7</td></tr><tr><td><b>Department of Electronic Engineering, Tsinghua University, Beijing, China</b></td><td>7</td></tr><tr><td><b>Visual Computing Group, Microsoft Research, Beijing, China</b></td><td>7</td></tr><tr><td><b>School of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, South Korea</b></td><td>7</td></tr><tr><td><b>Samsung Advanced Institute of Technology, Korea</b></td><td>7</td></tr><tr><td><b>Indraprastha Institute of Information Technology Delhi, Delhi, India</b></td><td>7</td></tr><tr><td><b>Southeast University, China</b></td><td>6</td></tr><tr><td><b>Department of Electrical Engineering, KAIST, Daejeon, Korea</b></td><td>6</td></tr><tr><td><b>Colorado State University, Fort Collins</b></td><td>6</td></tr><tr><td><b>Indian Institute of Technology (BHU) Varanasi, India</b></td><td>6</td></tr><tr><td><b>Department of Information Engineering and Computer Science, University of Trento, Italy</b></td><td>6</td></tr><tr><td><b>College of Information Technical Science, NanKai University, CITS, TianJin, China</b></td><td>6</td></tr><tr><td><b>SAIT India, Samsung India Software Operations Pvt. Ltd (SISO), Bangalore, India, 560093</b></td><td>6</td></tr><tr><td><b>Fudan University, Shang Hai, China</b></td><td>6</td></tr><tr><td><b>State University of New York at Binghamton, USA</b></td><td>6</td></tr><tr><td><b>Department of Electrical and Computer Engineering, National University of Singapore, Singapore, Singapore</b></td><td>6</td></tr><tr><td><b>Computer Science, U.Illinois at Urbana Champaign, Urbana, United States</b></td><td>6</td></tr><tr><td><b>Indian Statistical Institute, Kolkata</b></td><td>6</td></tr><tr><td>NC A&T State University, Greensboro, NC, USA</td><td>6</td></tr><tr><td><b>Department of Computer Science, Università degli Studi di Milano, Italy</b></td><td>6</td></tr><tr><td><b>College of Information Science and Engineering, Northeastern University, Shenyang, 110819, PR China</b></td><td>6</td></tr><tr><td><b>Wuyi University, Jiangmen, China</b></td><td>6</td></tr><tr><td><b>Advanced Digital Sciences Center, Singapore</b></td><td>6</td></tr><tr><td><b>School of Electrical, Computer and Energy Engineering, Arizona State University, Tempe, AZ, USA</b></td><td>6</td></tr><tr><td><b>Dept. of Computer Science, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b></td><td>6</td></tr><tr><td><b>Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China</b></td><td>6</td></tr><tr><td><b>School of Communication and Information Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>6</td></tr><tr><td><b>Key Lab of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi'an, China</b></td><td>6</td></tr><tr><td><b>School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China</b></td><td>6</td></tr><tr><td><b>University of Ljubljana, Ljubljana, Slovenia</b></td><td>6</td></tr><tr><td><b>University of Notre Dame, Notre Dame, IN, USA</b></td><td>6</td></tr><tr><td><b>DIA, University of Trieste, Italy</b></td><td>6</td></tr><tr><td><b>Beijing Normal University, China</b></td><td>6</td></tr><tr><td><b>The University of Queensland, Brisbane, Australia</b></td><td>6</td></tr><tr><td><b>University of Houston</b></td><td>6</td></tr><tr><td><b>Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University, Shanghai, China</b></td><td>6</td></tr><tr><td><b>School of Information and Control Engineering, China University of Mining and Technology, Xuzhou, China</b></td><td>6</td></tr><tr><td><b>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</b></td><td>6</td></tr><tr><td>School of Information and Software Engineering, University of Electronic Science and Technology of China (UESTC), Chengdu, 610054, China P.R.C</td><td>6</td></tr><tr><td><b>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>6</td></tr><tr><td><b>Department of Automation, University of Science and Technology of China, Hefei, China</b></td><td>6</td></tr><tr><td><b>Department of Mechanical Engineering, Faculty of Engineering, National University of Singapore, 117576, Singapore</b></td><td>6</td></tr><tr><td><b>Shanghai Advanced Research Institute, Chinese Academy of Sciences, Shanghai, China</b></td><td>6</td></tr><tr><td><b>IIT Guwahati, Guwahati, India</b></td><td>6</td></tr><tr><td><b>School of Software, Dalian University of Technology, Dalian, China</b></td><td>6</td></tr><tr><td><b>Department of Computer Science and Engineering, Varendra University, Rajshahi, Bangladesh</b></td><td>6</td></tr><tr><td><b>Indraprastha Institute of Information Technology Delhi, New Delhi, India</b></td><td>6</td></tr><tr><td><b>School of Computer and Information, Hefei University of Technology, Hefei, China</b></td><td>6</td></tr><tr><td><b>Key Lab of Computing and Communication Software of Anhui Province, School of Computer Science and Technology, University of Science and Technology of China, Hefei, China, 230027</b></td><td>6</td></tr><tr><td><b>Chongqing Key Laboratory of Computational Intelligence, Chongqing University of Posts and Telecommunications, Chongqing 400065, PR China</b></td><td>6</td></tr><tr><td><b>Queen Mary University of London, UK</b></td><td>6</td></tr><tr><td><b>Department of Computing, The Hong Kong Polytechnic University, Hong Kong</b></td><td>6</td></tr><tr><td><b>Facultad de Ingeniería, Universidad de la República, Montevideo, Uruguay</b></td><td>6</td></tr><tr><td><b>School of Software, Dalian University of Technology, China 116620</b></td><td>6</td></tr><tr><td><b>School of Computer Science &amp; Technology, Harbin Institute of Technology</b></td><td>6</td></tr><tr><td><b>School of Computer Science and Engineering, South China University of Technology, Guangzhou, China</b></td><td>6</td></tr><tr><td><b>Microsoft Res. Asia, Beijing, China</b></td><td>5</td></tr><tr><td><b>LUNAM Université, LIUM, Le Mans, France</b></td><td>5</td></tr><tr><td><b>Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA</b></td><td>5</td></tr><tr><td><b>School of Electronics and Information, Northwestern Polytechnical University</b></td><td>5</td></tr><tr><td>Electronics and Telecommunications Research Institute, Korea</td><td>5</td></tr><tr><td><b>Institute for Microsensors, Actuators and Systems, University of Bremen, Bremen, Germany</b></td><td>5</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, P.R. China, 100190</b></td><td>5</td></tr><tr><td><b>Nokia Research Center, Beijing</b></td><td>5</td></tr><tr><td><b>College of Computer Science, Zhejiang University of Technology, Hangzhou, China</b></td><td>5</td></tr><tr><td><b>Frontier Research Group, Samsung India Software Operations, India</b></td><td>5</td></tr><tr><td><b>Faculty of Information Technology, Beijing University of Technology, Beijing, China</b></td><td>5</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Türkiye</b></td><td>5</td></tr><tr><td><b>School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>5</td></tr><tr><td><b>IIIT-Delhi</b></td><td>5</td></tr><tr><td><b>Georgia Institute of Technology</b></td><td>5</td></tr><tr><td><b>School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea</b></td><td>5</td></tr><tr><td><b>Institute of Artificial Intelligence and Robotics, Xi’an Jiaotong University, Xi’an, China</b></td><td>5</td></tr><tr><td><b>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Science, Beijing, 100190, China</b></td><td>5</td></tr><tr><td><b>Human Language Technology Center of Excellence, The Johns Hopkins University, Baltimore, MD, 21218, USA</b></td><td>5</td></tr><tr><td><b>Department of Electronic Engineering/Graduate School at Shenzhen, Tsinghua University, China</b></td><td>5</td></tr><tr><td><b>Dalian University of Technology, China</b></td><td>5</td></tr><tr><td><b>Chinese Academy of Sciences</b></td><td>5</td></tr><tr><td><b>Nanyang Technological University, Singapore</b></td><td>5</td></tr><tr><td><b>College of Information Science and Technology, Beijing Normal University, Beijing, P.R. China</b></td><td>5</td></tr><tr><td><b>Visea İnovatif Bilgi Teknolojileri, ETGB Teknoparkı, Eskişehir, Türkiye</b></td><td>5</td></tr><tr><td><b>Ocean University of China, Department of Educational Technology, Qingdao, China</b></td><td>5</td></tr><tr><td><b>Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, China</b></td><td>5</td></tr><tr><td><b>Disney Research, UK</b></td><td>5</td></tr><tr><td>Asian Institute of Technology (AIT), Pathum Thani 12120, Thailand</td><td>5</td></tr><tr><td>Chonnam National University, Gwangju, Korea</td><td>5</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University, East Lansing, MI</b></td><td>5</td></tr><tr><td><b>Microsoft Research Asia, Beijing, China</b></td><td>5</td></tr><tr><td><b>Carnegie Mellon University, ForbesAvenue, Pittsburgh PA</b></td><td>5</td></tr><tr><td><b>Telecommun. & Ind. Phys., CSIRO, Epping, NSW, Australia</b></td><td>5</td></tr><tr><td><b>Centre for Health Technologies, Faculty of Engineering and Information Technology, University of Technology, Sydney, New South Wales, Australia</b></td><td>5</td></tr><tr><td><b>Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences</b></td><td>5</td></tr><tr><td><b>Pattern Recognition and Intelligent System Laboratory, School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>5</td></tr><tr><td><b>Artificial Vision Laboratory, National Taiwan University of Science and Technology, Taipei, Taiwan</b></td><td>5</td></tr><tr><td><b>Hangzhou Dianzi University, Hangzhou, China</b></td><td>5</td></tr><tr><td><b>Department of Automation, Shanghai Jiao Tong University, Shanghai, China</b></td><td>5</td></tr><tr><td><b>Department of Informatics, Aristotle University of Thessaloniki, Thessaloniki, 54124, Greece</b></td><td>5</td></tr><tr><td><b>University of Trento, Italy, Via Sommarive, Trento (Italy)</b></td><td>5</td></tr><tr><td><b>Biometric Recognition Group - ATVS, Escuela Politecnica Superior, Universidad Autonoma de Madrid, Avda. Francisco Tomas y Valiente, 11 - Campus de Cantoblanco - 28049, Spain</b></td><td>5</td></tr><tr><td><b>Institute of Microelectronics, Tsinghua University, Beijing, China</b></td><td>5</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Durgapur, India</td><td>5</td></tr><tr><td><b>DUT-RU International School of Information &amp; Software Engineering, Dalian University of Technology</b></td><td>5</td></tr><tr><td><b>East China Normal University, Shanghai, China</b></td><td>5</td></tr><tr><td><b>Department of Software Engineering, College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia</b></td><td>5</td></tr><tr><td><b>Department of Information Science and Engineering, Ritsumeikan University, Shiga, Japan</b></td><td>5</td></tr><tr><td><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>5</td></tr><tr><td><b>Department of Automation, State Key Lab of Intelligent Technologies and Systems and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China</b></td><td>5</td></tr><tr><td>Chongqing Institute of Green and Intelligent Technology, CAS, Chongqing, 400714</td><td>5</td></tr><tr><td><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China</b></td><td>5</td></tr><tr><td><b>School of Data and Computer Science, Sun Yat-Sen University, China</b></td><td>5</td></tr><tr><td><b>Centre of Development of Advanced Computing (CDAC) Mumbai, 400049, India</b></td><td>5</td></tr><tr><td><b>National Institute of Informatics, Tokyo, Japan</b></td><td>5</td></tr><tr><td><b>University of Southern California</b></td><td>5</td></tr><tr><td><b>Chongqing Institute of Technology, China</b></td><td>5</td></tr><tr><td><b>Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>5</td></tr><tr><td><b>Northwestern Polytechnical University, Xi'an Shaanxi, China</b></td><td>5</td></tr><tr><td><b>Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, 95 Zhongguancun East Road, Haidian District, Beijing, China</b></td><td>5</td></tr><tr><td><b>Chinese Academy of Sciences, Beijing, China</b></td><td>5</td></tr><tr><td>SIAT at Chinese Academy of Sciences, China</td><td>5</td></tr><tr><td><b>IBM China Research Laboratory, Beijing, China</b></td><td>5</td></tr><tr><td><b>Stanford University, Stanford, CA, USA</b></td><td>5</td></tr><tr><td><b>University of California, Merced</b></td><td>5</td></tr><tr><td><b>Tsinghua National Laboratory for Information Science and Technology Institute of Microelectronics, Tsinghua University, Beijing, China</b></td><td>5</td></tr><tr><td>Department of Electronics and Telecommunications, Politecnico di Torino, Torino, Italy</td><td>5</td></tr><tr><td><b>Hefei University of Technology, Hefei, China</b></td><td>5</td></tr><tr><td><b>Department of Computer Science, Xiamen University, Xiamen, P. R. China</b></td><td>5</td></tr><tr><td>University of Southern California Institute for Creative Technologies, Los Angeles, CA</td><td>5</td></tr><tr><td><b>University of Maryland, College Park, MD, USA</b></td><td>5</td></tr><tr><td><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore 639798</b></td><td>5</td></tr><tr><td><b>The Institute of Optics and Electronics Chinese Academy of Sciences, University of the Chinese Academy of Sciences, Chengdu, China</b></td><td>5</td></tr><tr><td><b>Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown, WV, USA</b></td><td>5</td></tr><tr><td><b>Idiap Research Institute, Martigny, Switzerland</b></td><td>5</td></tr><tr><td><b>NICTA, PO Box 6020, St Lucia, QLD 4067, Australia</b></td><td>5</td></tr><tr><td><b>College of Information and Control Engineering, China University of Petroleum, Qingdao, 266580, China</b></td><td>5</td></tr><tr><td>Department of Electronics and Telecommunication Engineering, Don Bosco Institute of Technology, Kurla (W), Mumbai, India</td><td>5</td></tr><tr><td><b>Dept. of Electrical and Computer Engineering, University of Illinois at Urbana-Champaign, USA</b></td><td>5</td></tr><tr><td><b>School of Information Technology and Electrical Engineering, The University of Queensland, Brisbane, Australia</b></td><td>5</td></tr><tr><td><b>School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China</b></td><td>5</td></tr><tr><td><b>School of Electronic Engineering, Xidian University, Xi'an, China</b></td><td>5</td></tr><tr><td><b>University of Science and Technology of China, Hefei, Anhui, China</b></td><td>5</td></tr><tr><td>R V College of Engineering, Department of Computer Science and Engineering, Bangalore, India</td><td>5</td></tr><tr><td><b>Centre for Machine Vision Research, University of Oulu, Finland</b></td><td>5</td></tr><tr><td><b>Knowledge Technology Institute, Department of Informatics, University of Hamburg, Hamburg, Germany</b></td><td>5</td></tr><tr><td><b>School of Electrical Engineering Department, Korea University, Rep. of Korea</b></td><td>5</td></tr><tr><td>Inst. Nat. des Telecommun., Evry, France</td><td>5</td></tr><tr><td><b>University of Trento, Trento, Italy</b></td><td>5</td></tr><tr><td><b>National Science and Technology Development Agency, National Electronics and Computer Technology Center, Pathum Thani, 12120, Thailand</b></td><td>4</td></tr><tr><td><b>Dalian University of Technology, Dalian, Liaoning, 116024, China</b></td><td>4</td></tr><tr><td><b>School of Engineering &amp; Applied Science, Ahmedabad University, Gujarat, India 380009</b></td><td>4</td></tr><tr><td>Shanghai Jiao Tong University School of Electronic Information and Electrical Engineering</td><td>4</td></tr><tr><td><b>University of Technology, Sydney, P.O. Box 123, Broadway, NSW, 2007, Australia</b></td><td>4</td></tr><tr><td><b>The Australian Centre for Visual Technologies, The university of Adelaide</b></td><td>4</td></tr><tr><td><b>University of the Western Cape, Bellville, Western Cape</b></td><td>4</td></tr><tr><td><b>School of Engineering and Computer Science, Victoria University of Wellington, PO Box 600, 6140, New Zealand</b></td><td>4</td></tr><tr><td><b>Tsinghua University</b></td><td>4</td></tr><tr><td><b>National Taiwan University, Taipei, Taiwan</b></td><td>4</td></tr><tr><td><b>School of Computer Science &amp; Technology, Nanjing University of Science and Technology, China</b></td><td>4</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Rowan University, Glassboro, NJ- 08028</b></td><td>4</td></tr><tr><td><b>School of Electronic and Information Engineering, Xi'an Jiaotong University, Xi'an, China, 710049</b></td><td>4</td></tr><tr><td><b>Media Integration and Communication Center - MICC, University of Florence, Italy</b></td><td>4</td></tr><tr><td><b>School of Computer Science, University of the Witwatersrand, Johannesburg, South Africa</b></td><td>4</td></tr><tr><td>Department of Microelectornics and Computer Science, Lodz University of Technology, ul. Wolczanska 221/223, 90-924, Poland</td><td>4</td></tr><tr><td><b>School of Computer Science and Telecommunication Engineering, Jiangsu University, ZhenJiang, Jiangsu, 212013, P. R. China</b></td><td>4</td></tr><tr><td><b>Seoul Nat'l Univ.</b></td><td>4</td></tr><tr><td><b>School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing, China 100876</b></td><td>4</td></tr><tr><td><b>Key Laboratory of Specialty Fiber Optics and Optical Access Networks, Shanghai University, China</b></td><td>4</td></tr><tr><td><b>Institute of Computer Science and Technology, Peking University, Beijing, China, 100871</b></td><td>4</td></tr><tr><td><b>Department of Electronic Engineering, Tsinghua University, Beijing 100084, China</b></td><td>4</td></tr><tr><td><b>School of Computer Science and Engineering, Nanjing University of Science and Technology, China</b></td><td>4</td></tr><tr><td><b>Faculty of electrical engineering, University of Ljubljana, Slovenia</b></td><td>4</td></tr><tr><td><b>Department of Information Management and Security, Korea University</b></td><td>4</td></tr><tr><td><b>Pattern Recognition and Intelligent System Lab (PRIS) Beijing University of Posts and Telecommunications, Beijing 100876, P. R. China</b></td><td>4</td></tr><tr><td><b>Institute of Intelligence Information Processing, Xidian University, Xi¿an, China, 710071</b></td><td>4</td></tr><tr><td><b>Research Center for Information Technology Innovation (CITI), Academia Sinica, Taipei, 115 Taiwan</b></td><td>4</td></tr><tr><td><b>University of Miami, Coral Gables, FL</b></td><td>4</td></tr><tr><td><b>Univ. Orléans, INSA CVL, PRISME EA 4229, Bourges, France</b></td><td>4</td></tr><tr><td><b>Institute of Systems and Robotics (ISR), University of Coimbra, Portugal</b></td><td>4</td></tr><tr><td><b>Department of Computer Science, City University of Hong Kong, Kowloon, Hong Kong</b></td><td>4</td></tr><tr><td><b>School of Electrical and Electronics Engineering, Yonsei University, 50 Yonsei-ro, Seodaemun-gu, SEOUL, Republic of Korea</b></td><td>4</td></tr><tr><td><b>School of Information Science and Engineering, Southeast University, Nanjing, 210096, P.R. China</b></td><td>4</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea</b></td><td>4</td></tr><tr><td><b>Hong Kong University of Science and Technology, Hong Kong</b></td><td>4</td></tr><tr><td><b>INRIA Grenoble Rhone-Alpes, FRANCE</b></td><td>4</td></tr><tr><td>North China Electric Power University Department of Electronic and Communication Engineering Baoding, Hebei, China</td><td>4</td></tr><tr><td><b>Seoul National University</b></td><td>4</td></tr><tr><td>School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology, Auckland, New Zealand</td><td>4</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China</b></td><td>4</td></tr><tr><td><b>School of Mechanical and Electrical Engineering, Guilin University of Electronic Technology Guangxi Guilin, China</b></td><td>4</td></tr><tr><td><b>University of Portsmouth, United Kingdom</b></td><td>4</td></tr><tr><td><b>Carnegie Mellon University</b></td><td>4</td></tr><tr><td><b>Bilgisayar Mühendisliği, Başkent Üniversitesi, Ankara, Türkiye</b></td><td>4</td></tr><tr><td><b>Universidad Autonoma de Madrid</b></td><td>4</td></tr><tr><td><b>University of Oulu, Machine Vision Group, PO Box 4500, 90014, Finland</b></td><td>4</td></tr><tr><td><b>Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea</b></td><td>4</td></tr><tr><td><b>Center for Computer Vision and Department of Mathematics, Sun Yat-Sen University, Guangzhou, China</b></td><td>4</td></tr><tr><td><b>Signal Processing Laboratory (LTS5), Ecole Polytechnique Fédérale de Lausanne, Lausanne, Switzerland</b></td><td>4</td></tr><tr><td>KU Leuven, Leuven, Belgium</td><td>4</td></tr><tr><td>Academia Sinica, Taipei, Taiwan</td><td>4</td></tr><tr><td><b>Institute of Computer, Hangzhou Dianzi University, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, No.95 East Road of Zhongguancun, Beijing, China</b></td><td>4</td></tr><tr><td>LIARA Laboratory, University of Quebec at Chicoutimi (UQAC), Boulevard de l'Université, Chicoutimi (Quebec), Canada</td><td>4</td></tr><tr><td>Dept. of Computing, Curtin University GPO Box U1987, Perth, WA 6845</td><td>4</td></tr><tr><td><b>Department of Computer Science and Communication Engineering, Jiangsu University, Zhenjiang, China</b></td><td>4</td></tr><tr><td>NTT Software Innovation Center, Tokyo, Japan</td><td>4</td></tr><tr><td><b>Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai, China</b></td><td>4</td></tr><tr><td><b>University of Electronic Science and Technology of China, Chengdu, China</b></td><td>4</td></tr><tr><td><b>Dalle Molle Instituite for Artificial Intelligence (IDSIA), Lugano, Switzerland</b></td><td>4</td></tr><tr><td><b>Dept of Electrical and Computer Engineering, University of Calgary, Calgary, CANADA</b></td><td>4</td></tr><tr><td><b>Department of Computer Science, University of Colorado at Colorado Springs</b></td><td>4</td></tr><tr><td>EECS Department, University of Kansas, Lawrence, KS</td><td>4</td></tr><tr><td><b>Science and Technology on Information Systems Engineering Laboratory, National University of Defense Technology, Changsha, Hunan, P.R. China</b></td><td>4</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, Institute of Automation, Chinese Academy of Sciences, Beijing, China, 100190</b></td><td>4</td></tr><tr><td><b>The Queensland Brain Institute, University of Queensland, St Lucia, QLD, Australia</b></td><td>4</td></tr><tr><td><b>School of Information and Communication, Guilin University of Electronic Technology Guangxi Guilin, China</b></td><td>4</td></tr><tr><td><b>College of Information and Control Engineering, China University of Petroleum (East China), Qingdao, P.R. China</b></td><td>4</td></tr><tr><td>Department of Mathematics and Computer Science University of Basel</td><td>4</td></tr><tr><td><b>Xi'an Jiaotong University, Xi'an, China</b></td><td>4</td></tr><tr><td><b>Department of Information Engineering, University of Brescia, Via Branze, 38 - 25123, Italy</b></td><td>4</td></tr><tr><td><b>Department of Computer Science and Technology, Zhejiang University, Hangzhou 310027, China</b></td><td>4</td></tr><tr><td>Goa University, India</td><td>4</td></tr><tr><td><b>University of Texas at Arlington, Arlington, TX</b></td><td>4</td></tr><tr><td><b>Norwegian Biometrics Laboratory, NTNU - Gj⊘vik, Norway</b></td><td>4</td></tr><tr><td><b>Department of Electrical Engineering and Electronics, University of Liverpool, Liverpool, U.K.</b></td><td>4</td></tr><tr><td><b>University of Nottingham, UK</b></td><td>4</td></tr><tr><td>Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China</td><td>4</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Image Understanding of the Ministry of Education, International Research Center for Intelligent Perception and Computation, Joint International Research Laboratory of Intelligent Perception and Computation of China, Xidian University, Xi’an, China</b></td><td>4</td></tr><tr><td>VUB-NPU Joint AVSP Research Lab, Vrije Universiteit Brussel (VUB), Deptartment of Electronics & Informatics (ETRO), Pleinlaan 2, 1050 Brussel, Belgium</td><td>4</td></tr><tr><td><b>National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University, Chengdu, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory of Integrated Services Networks, Xidian University, Xi'an, China</b></td><td>4</td></tr><tr><td><b>Department of Electronic and Electrical Engineering, Pohang University of Science and Technology (POSTECH), South Korea</b></td><td>4</td></tr><tr><td><b>University of Canberra, Canberra, Australia</b></td><td>4</td></tr><tr><td>Graduate School of Information Science, Nara Institute of Science and Technology, Takayama-cho 8916-5, Ikoma-shi, Nara, Japan</td><td>4</td></tr><tr><td><b>Department of Computer Science, University of North Carolina, Charlotte, NC, USA</b></td><td>4</td></tr><tr><td><b>Institute of Machine Learning and Systems Biology, College of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai 201804, China</b></td><td>4</td></tr><tr><td><b>Intel Labs, Hillsboro, Oregon, USA</b></td><td>4</td></tr><tr><td>Smart Surveillance Interest Group, Department of Computer Science, Universidade Federal de Minas Gerais, Minas Gerais, Brazil</td><td>4</td></tr><tr><td><b>Department of Automation, State Key Lab of Intelligent Technologies and Systems, and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China</b></td><td>4</td></tr><tr><td><b>Université de Lyon, CNRS, UMR5205, F-69622, France</b></td><td>4</td></tr><tr><td>Shanghai University School of Communication and Information Engineering Shanghai, China</td><td>4</td></tr><tr><td>Microsoft, Redmond, WA, USA</td><td>4</td></tr><tr><td>Computer Science and Engineering, Pennsylvania State University, PA, USA SiliconScapes, LLC, PA, USA</td><td>4</td></tr><tr><td><b>Key Laboratory of Intelligent Information Processing, Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing, China</b></td><td>4</td></tr><tr><td><b>Queen Mary University of London, London</b></td><td>4</td></tr><tr><td>Dept. of Computer Engineering, Keimyung University, Daegu, Korea</td><td>4</td></tr><tr><td><b>Department of Cognitive Science, Xiamen University, Xiamen, Fujian, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China</b></td><td>4</td></tr><tr><td><b>Universit&#x00E9; de Lyon, CNRS, UMR5205, F-69622, France</b></td><td>4</td></tr><tr><td><b>School of Information and Communication Engineering, Dalian University of Technology, Dalian, Liaoning, 116024, China</b></td><td>4</td></tr><tr><td><b>DII, University of Brescia, Brescia, Italy</b></td><td>4</td></tr><tr><td><b>School of Software, Tsinghua University, Beijing, China</b></td><td>4</td></tr><tr><td>National ICT Australia and UNSW, Sydney, Australia</td><td>4</td></tr><tr><td><b>Institute for Creative Technologies, University of Southern California</b></td><td>4</td></tr><tr><td><b>School of Information Science and Technology, Xiamen University, Xiamen, China</b></td><td>4</td></tr><tr><td><b>University of California, San Diego, USA</b></td><td>4</td></tr><tr><td><b>The University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>4</td></tr><tr><td><b>Department of Computer Science, University of York, UK</b></td><td>4</td></tr><tr><td><b>Department of Automation, State Key Lab of Intelligent Technologies and Systems, Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China</b></td><td>4</td></tr><tr><td><b>Dept. of Computer Science, National Tsing Hua University, Hsinchu, Taiwan</b></td><td>4</td></tr><tr><td><b>SRI International, Menlo Park, USA</b></td><td>4</td></tr><tr><td><b>Universit&#x00E9; de Lyon, CNRS, France</b></td><td>4</td></tr><tr><td><b>School of Computer Science and Technology & Joint International Research Laboratory of Machine Learning and Neuromorphic Computing, Soochow University, Suzhou, China</b></td><td>4</td></tr><tr><td><b>Department of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China</b></td><td>4</td></tr><tr><td><b>Computer Science and Engineering Department, University of South Florida, Tampa, FL, USA</b></td><td>4</td></tr><tr><td><b>Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney, Playa, Havana, Cuba</b></td><td>4</td></tr><tr><td><b>&#x00C7;o&#x011F;ulortam &#x0130;&#x015F;aret &#x0130;&#x015F;leme ve &#x00D6;r&#x00FC;nt&#x00FC; Tan&#x0131;ma Grubu, &#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye</b></td><td>4</td></tr><tr><td>Department of Electrical and Computer Engineering, Beckman Institute Advanced Science and Technology, University of Illinois at Urbana–Champaign, Urbana, IL, USA</td><td>4</td></tr><tr><td><b>National Laboratory of Pattern Recognition, CASIA, Center for Research on Intelligent Perception and Computing, CASIA, Center for Excellence in Brain Science and Intelligence Technology, CAS, University of Chinese Academy of Sciences, Beijing, 100049, China</b></td><td>4</td></tr><tr><td><b>Machine Learning and Cybernetics Research Center, School of Computer Science and Engineering, South China University of Technology, 510006, Guangzhou, China</b></td><td>4</td></tr><tr><td><b>IC Design Group, CSIR-Central Electronics Engineering Research Institute, Pilani, Rajasthan, India</b></td><td>4</td></tr><tr><td><b>College of Information Engineering, Yangzhou University, Yangzhou, China</b></td><td>4</td></tr><tr><td><b>Department of Mathematics, Intelligent Data Center, Sun Yat-sen University, Guangzhou, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory of Intelligent Technology and Systems Tsinghua National Laboratory for Information Science and Technology Department of Electronic Engineering, Tsinghua University, Beijing 100084, China</b></td><td>4</td></tr><tr><td>Universiti Kuala Lumpur, Kuala Lumpur</td><td>4</td></tr><tr><td><b>Max Planck Institute for Informatics, Saarland Informatics Campus, Germany</b></td><td>4</td></tr><tr><td><b>National Laboratory of Radar Signal Processing, Xidian University, Xi&#x2019;an, China</b></td><td>4</td></tr><tr><td><b>Department of Computer Engineering, College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia</b></td><td>4</td></tr><tr><td>Beijing Normal Univeristy, Beijing, China</td><td>4</td></tr><tr><td><b>Information Sciences Institute, University of Southern California, 4676 Admiralty Way, Marina Del Rey, 90292, USA</b></td><td>4</td></tr><tr><td><b>National University of Defense Technology, China</b></td><td>4</td></tr><tr><td><b>National Digital Switching System Engineering and Technological Research Center, Zhengzhou, China</b></td><td>4</td></tr><tr><td><b>Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Kowloon, Hong Kong</b></td><td>4</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Northeastern University, Boston, MA, USA</b></td><td>4</td></tr><tr><td><b>University of Tsukuba</b></td><td>4</td></tr><tr><td><b>University of Electronic Science and Technology of China</b></td><td>4</td></tr><tr><td><b>National Taiwan University of Science and Technology</b></td><td>4</td></tr><tr><td><b>Samsung R&D Institute, Bangalore, India</b></td><td>4</td></tr><tr><td><b>Yaroslavl State University, Yaroslavl, Russia</b></td><td>4</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Seoul National University</b></td><td>4</td></tr><tr><td><b>College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China</b></td><td>4</td></tr><tr><td><b>School of Electronics and Information Technology, Sun Yat-sen University, China</b></td><td>4</td></tr><tr><td>University of Tunis, The National Higher school of engineers of Tunis (ENSIT), Laboratory of Signal Image and Energy Mastery, LR13ES03 (SIME), Tunis, Tunisia</td><td>4</td></tr><tr><td><b>College of Information Science and Electronic Engineering, Zhejiang University, Hangzhou, China</b></td><td>4</td></tr><tr><td><b>Department of Computer Science and Engineering, Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea</b></td><td>4</td></tr><tr><td><b>University of Maryland, College Park, Maryland 20740 United States</b></td><td>4</td></tr><tr><td><b>Face Aging Group, University of North Carolina, Wilmington, NC, USA</b></td><td>4</td></tr><tr><td><b>Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China</b></td><td>4</td></tr><tr><td><b>North Carolina State University, Department of Electrical and Computer Engineering, Raleigh, United States of America</b></td><td>4</td></tr><tr><td><b>Institute of Computer Science and Technology, Peking University, Beijing, China</b></td><td>4</td></tr><tr><td><b>College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia</b></td><td>4</td></tr><tr><td>Media Technology Lab, Huawei Technologies Co., Ltd</td><td>4</td></tr><tr><td><b>Institute of Artificial Intelligence and Robotics, Xi'an Jiaotong University, 28 Xianning West Road, Xi'an, Shaanxi, China</b></td><td>4</td></tr><tr><td><b>Keio University, Japan</b></td><td>4</td></tr><tr><td><b>National University of Defense Technology, Changsha, China</b></td><td>4</td></tr><tr><td><b>Hewlett-Packard Laboratories, Hewlett-Packard Company, Palo Alto, CA, USA</b></td><td>4</td></tr><tr><td><b>School of Computer Science and Engineering, Center for Robotics, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>4</td></tr><tr><td><b>Image Processing Center, Beihang University, Beijing, China</b></td><td>4</td></tr><tr><td><b>School of Electronic and Computer Engineering, Peking University</b></td><td>4</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing University of Surrey, Guildford, UK</b></td><td>4</td></tr><tr><td><b>Shenzhen Key Laboratory of Information Science and Technology, Shenzhen Engineering Laboratory of IS&DCP and the Department of Electronic Engineering, Graduate School at Shenzhen, Tsinghua University, Beijing, China</b></td><td>4</td></tr><tr><td>Department of Computer Graphics and Multimedia, University of Brno, Brno, Czech Republic</td><td>4</td></tr><tr><td><b>Department of Information and Communication Technologies, Universitat Pompeu Fabra, Barcelona, Spain</b></td><td>4</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China</b></td><td>4</td></tr><tr><td><b>Shanghai Jiao Tong University, School of Electronic Information and Electrical Engineering, People's Republic of China</b></td><td>4</td></tr><tr><td><b>Video/Image Modeling and Synthesis Laboratory, Department of Computer and Information Sciences, University of Delaware, Newark, DE</b></td><td>4</td></tr><tr><td><b>Multimedia Processing Lab., Samsung Advanced Institute of Technology (SAIT), Suwon-si, Korea</b></td><td>4</td></tr><tr><td>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA</td><td>4</td></tr><tr><td><b>Osaka university, Japan</b></td><td>4</td></tr><tr><td><b>IBJ, Inc., Tokyo, Japan</b></td><td>4</td></tr><tr><td><b>The University of Tokyo, Japan</b></td><td>4</td></tr><tr><td>Faculty of Engineering, Ain Shams University, Computer and Systems Engineering Department, Cairo, Egypt</td><td>4</td></tr><tr><td>School of Automation and Information Engineering, Xi'an University of Technology, Xi'an, China</td><td>4</td></tr><tr><td><b>College of electronic and information engineer Changchun University of Science and Technology Changchun China</b></td><td>4</td></tr><tr><td><b>School of Electrical, Computer and Telecommunication Engineering, University of Wollongong, NSW 2522, Australia</b></td><td>4</td></tr><tr><td><b>The University of Texas at Austin Austin, Texas, USA</b></td><td>4</td></tr><tr><td><b>Amity University Uttar Pradesh, Noida</b></td><td>4</td></tr><tr><td><b>Intelligent Media Laboratory, Digital Contents Research Institute, Sejong University, Seoul, South Korea</b></td><td>4</td></tr><tr><td>Computer Science and Engineering Dept., University of Nevada Reno, USA</td><td>4</td></tr><tr><td><b>Dept of Computer Engineering, Kyung Hee University, Yongin-si, South Korea</b></td><td>4</td></tr><tr><td><b>Computational Biomedicine Lab, Department of Computer Science, University of Houston, TX, USA</b></td><td>4</td></tr><tr><td><b>University of Surrey, Guildford</b></td><td>4</td></tr><tr><td>Department of Information and Control, B-DAT Laboratory, Nanjing University of Information and Technology, Nanjing, China</td><td>4</td></tr><tr><td><b>State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory of Robotics, Chinese Academy of Sciences, Shenyang Institute of Automation, Shenyang, 110016, China</b></td><td>4</td></tr><tr><td>Inha University, South Korea</td><td>4</td></tr><tr><td><b>Sharp Laboratories of America, Camas, WA</b></td><td>4</td></tr><tr><td><b>Department of Informatics, Aristotle University of Thessaloniki, Greece</b></td><td>4</td></tr><tr><td><b>Department of Data Science and Knowledge Engineering, Maastricht University, Maastricht, Netherlands</b></td><td>4</td></tr><tr><td><b>Center for Biometrics and Security Research and the National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>4</td></tr><tr><td><b>Geintra Research Group, University of Alcala</b></td><td>4</td></tr><tr><td><b>National Engineering Research Center for Multimedia Software, Computer School, Wuhan University, Wuhan, China</b></td><td>4</td></tr><tr><td><b>Electrical and Computer Systems Engineering, School of Engineering, Monash University Malaysia, 46150 Selangor, Malaysia</b></td><td>4</td></tr><tr><td><b>Beijing Laboratory of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing, China</b></td><td>4</td></tr><tr><td><b>Department of Computer Science, Hong Kong Baptist University, Hong Kong</b></td><td>4</td></tr><tr><td><b>University of Science and Technology of China, Hefei, China</b></td><td>4</td></tr><tr><td><b>Beijing, Haidian, China</b></td><td>4</td></tr><tr><td>Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China</td><td>4</td></tr><tr><td><b>School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN 47907, U.S.A.</b></td><td>4</td></tr><tr><td>Institute for Human-Machine Communication, Technische Universit&#x00E4;t M&#x00FC;nchen, Germany</td><td>4</td></tr><tr><td><b>Peking University, Shenzhen, China</b></td><td>4</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Ryerson University, Toronto, ON, Canada</b></td><td>4</td></tr><tr><td>Faculty of Engineering Science, Department of Systems Innovation, Arai Laboratory at Osaka University, Japan</td><td>4</td></tr><tr><td><b>Biometric Recognition Group - ATVS, EPS, Universidad Autonoma de Madrid, Avda. Francisco Tomas y Valiente, 11 - Campus de Cantoblanco - 28049 Madrid, Spain</b></td><td>4</td></tr><tr><td><b>Department of MathematicsIntelligent Data Center, Sun Yat-sen University, Guangzhou, China</b></td><td>4</td></tr><tr><td><b>University of Trento, Italy</b></td><td>4</td></tr><tr><td><b>Centre for Imaging Sciences, The University of Manchester, Manchester, United Kingdom</b></td><td>4</td></tr><tr><td><b>National Laboratory of Pattern Recognition, CASIA, University of Chinese Academy of Sciences, Beijing, 100049, China</b></td><td>4</td></tr><tr><td><b>School of Electronic and Electrical Engineering, Shanghai Jiao Tong University, National Engineering Lab on Information Content Analysis Techniques, GT036001 Shanghai, China</b></td><td>4</td></tr><tr><td><b>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences</b></td><td>4</td></tr><tr><td><b>Department of Informatics, University of Oslo, Oslo, Norway</b></td><td>4</td></tr><tr><td><b>Speech, Audio, Image and Video Technology (SAIVT) Laboratory, Queensland University of Technology, Australia</b></td><td>4</td></tr><tr><td><b>Technicolor, France</b></td><td>4</td></tr><tr><td><b>Korea Advanced Institute of Science and Technology, Daejeon, South Korea</b></td><td>4</td></tr><tr><td><b>School of Mathematics and Computational Science, Sun Yat-sen University, Guangzhou, China</b></td><td>4</td></tr><tr><td><b>School of Computer Science and Technology, Wuhan University of Technology, Wuhan, China</b></td><td>3</td></tr><tr><td><b>CyLab Biometrics Center and the Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>3</td></tr><tr><td>NTT Corporation, Atsugi, Japan</td><td>3</td></tr><tr><td>Department of Software and Information Systems Engineering, Ben-Gurion University of the Negev, Beersheba, Israel</td><td>3</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, University of Chinese Academy of Sciences, Beijing, China</b></td><td>3</td></tr><tr><td><b>Vision Lab at Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA 23529, USA</b></td><td>3</td></tr><tr><td>Intel Labs China, Beijing, China</td><td>3</td></tr><tr><td><b>IBM T. J. Watson Research, Yorktown Heights, NY, USA</b></td><td>3</td></tr><tr><td><b>Computer Science and Technology, University of Science and Technology of China</b></td><td>3</td></tr><tr><td><b>School of Information Technologies, University of Sydney, Australia</b></td><td>3</td></tr><tr><td><b>Department of Electronic Engineering, The Chinese University of Hong Kong, China</b></td><td>3</td></tr><tr><td><b>Key Laboratory of Machine Perception (Ministry of Education) Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, China</b></td><td>3</td></tr><tr><td>School of Electrical and Electronic Engineering, Changchun University of Technology, Changchun, CO 130012 China</td><td>3</td></tr><tr><td><b>Center for Cognitive Ubiquitous Computing, Arizona State University, USA</b></td><td>3</td></tr><tr><td><b>School of Marine Science and Technology, Northwestern Polytechnical University, Xi’an, China</b></td><td>3</td></tr><tr><td><b>Institute of Computing, State University of Campinas, Campinas, Brazil</b></td><td>3</td></tr><tr><td><b>Guangdong Key Laboratory of Data Security and Privacy Preserving, Guangdong Engineering Research Center of Data Security and Privacy Preserving, College of Information Science and Technology, Jinan University, Guangzhou, China</b></td><td>3</td></tr><tr><td><b>State Key Laboratory of Intelligent Technology and Systems, Department of Electronic Engineering, Tsinghua University, Beijing 100084, P.R. China</b></td><td>3</td></tr><tr><td><b>CAS Key Laboratory of Technology in Geo-spatial Information Processing and Application System, University of Science and Technology of China, Hefei 230027, China</b></td><td>3</td></tr><tr><td><b>Tokyo Institute of Technology, Tokyo, Japan</b></td><td>3</td></tr><tr><td>Radboud University, Nijmegen, Netherlands</td><td>3</td></tr><tr><td>Algılayıcılar, Görüntü ve Sinyal İşleme Grubu, HAVELSAN A.Ş. Ankara, Türkiye</td><td>3</td></tr><tr><td><b>C &amp; C Innovation Research Labs, NEC Corporation, Nara, Japan</b></td><td>3</td></tr><tr><td>Dept. of Audio Visual Technology, Technische Universitt, Ilmenau, Germany</td><td>3</td></tr><tr><td><b>Imperial College London, UK</b></td><td>3</td></tr><tr><td>School of Electrical and Computer Engineering, Royal Melbourne Institute of Technology University , Melbourne, Australia</td><td>3</td></tr><tr><td><b>School of Information Technology and Electrical Engineering, The University of Queensland, Australia</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Kyung Hee University, Seoul, South Korea</b></td><td>3</td></tr><tr><td><b>Institute for Infocomm Research, 1 Fusionpolis Way, #21-01, Connexis Singapore 138632, Singapore</b></td><td>3</td></tr><tr><td><b>School of Computer Science and Engineering, South China University of Technology, China</b></td><td>3</td></tr><tr><td><b>Department of Radiology and the Biomedical Research Imaging Center, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA</b></td><td>3</td></tr><tr><td><b>Dept. of Electrical Engineering and Comp. Sc., Northwestern University, Evanston, IL 60208, USA</b></td><td>3</td></tr><tr><td><b>School of Electronics and Computer Science, University of Southampton, Southampton, U.K.</b></td><td>3</td></tr><tr><td><b>School of Electronic and Information Engineering, Xi'an Jiaotong University, Xi'an, China</b></td><td>3</td></tr><tr><td><b>Program of Electrical Engineering, COPPE/UFRJ, Universidade Federal do Rio de Janeiro, Rio de Janeiro-RJ CEP, Brazil</b></td><td>3</td></tr><tr><td><b>Bilgisayar Mühendisliği, İstanbul Teknik Üniversitesi, İstanbul, Turkey</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering and the Center for Automation Research, UMIACS, University of Maryland, College Park, USA</b></td><td>3</td></tr><tr><td><b>Department of Computer Engineering, Kyung Hee University, Seoul, South Korea</b></td><td>3</td></tr><tr><td><b>Michigan State University, United States of America</b></td><td>3</td></tr><tr><td>School of Engineering, University of Baja California, Tijuana, M&#x00E9;xico</td><td>3</td></tr><tr><td><b>Center for Machine Vision Research, University of Oulu</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, University of South Florida, Tampa, Florida 33620</b></td><td>3</td></tr><tr><td><b>KTH Royal Institute of Technology, 100 44 Stockholm, Sweden</b></td><td>3</td></tr><tr><td><b>School of Software, Huazhong University of Science and Technology, Wuhan, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Lehigh University, Bethlehem, PA 18015, USA</b></td><td>3</td></tr><tr><td>School of Computer Science, Center for Optical Imagery Analysis and Learning (OPTIMAL)</td><td>3</td></tr><tr><td><b>Department of Computing, Curtin University, Perth WA 6102, Australia</b></td><td>3</td></tr><tr><td>Department of Systems and Computing, Federal University of Campina Grande, Av. Apríigio Veloso, 882, 58429-900 Campina Grande, PB, Brazil</td><td>3</td></tr><tr><td><b>Institute of Imaging and Computer Vision, RWTH Aachen University, Templergraben 55, 52056, Aachen, Germany</b></td><td>3</td></tr><tr><td><b>Universidade Federal do Rio de Janeiro, Cx.P. 68504, Rio de Janeiro, RJ, CEP 21945-970, Brazil</b></td><td>3</td></tr><tr><td>R&D Centre Algoritmi, School of Engineering, University of Minho, Portugal</td><td>3</td></tr><tr><td><b>National Laboratory for Parallel and Distributed Processing, School of Computer, College of Computer, National University of Defense Technology, Changsha, China</b></td><td>3</td></tr><tr><td><b>Department of Computer and Information Science, Temple University, Philadelphia, PA, 19122, USA</b></td><td>3</td></tr><tr><td>Department of Control and Computer Engineering, Politecnico di Torino, Italy</td><td>3</td></tr><tr><td><b>Key Laboratory of System Control and Information Processing MOE, Department of Automation, Shanghai Jiao Tong University</b></td><td>3</td></tr><tr><td><b>College of Computer Science, Zhejiang University, China</b></td><td>3</td></tr><tr><td><b>Institute of Industrial Information Technology (IIIT), Karlsruhe Institute of Technology (KIT), 76187 Karlsruhe, Germany</b></td><td>3</td></tr><tr><td><b>School of Electronics and Information Technology, Sun Yat-Sen University, Guangzhou, China</b></td><td>3</td></tr><tr><td><b>Institute for Electronics, Signal Processing and Communications (IESK), Otto-von-Guericke-University Magdeburg, D-39106, P.O. Box 4210 Germany</b></td><td>3</td></tr><tr><td><b>Institute for Human-Machine Communication, TU M&#x00FC;nchen, Theresienstrae 90, 80333 M&#x00FC;nchen, Germany</b></td><td>3</td></tr><tr><td><b>School of Computer Science and Technology, Harbin Institute of Technology, China</b></td><td>3</td></tr><tr><td><b>Oak Ridge National Laboratory, USA</b></td><td>3</td></tr><tr><td>Center for Research in Intelligent Systems, University of California, Riverside Riverside, CA 92521-0425, USA</td><td>3</td></tr><tr><td><b>Department of CS&E, Indian Institute of Technology, Madras, India</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Windsor, 401 Sunset Avenue, Windsor, N9B 3P4, Canada</b></td><td>3</td></tr><tr><td>Shanghai Advanced Research Institute, CAS, Shanghai, China</td><td>3</td></tr><tr><td><b>Elektrik ve Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Türkiye</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, Zhejiang University, Hangzhou, China</b></td><td>3</td></tr><tr><td><b>Software Solution Laboratory, Samsung Advanced Institute of Technology, Suwon-si, South Korea</b></td><td>3</td></tr><tr><td><b>Florida International University, Miami, FL</b></td><td>3</td></tr><tr><td><b>Rice University</b></td><td>3</td></tr><tr><td>Department of electronic engineering, Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xi'an, China</td><td>3</td></tr><tr><td><b>Centre of Informatics, Federal University of Pernambuco, Recife-PE, Brazil. Bruno J. T. Fernandes is also with the Polytechnic School, University of Pernambuco, Brazil</b></td><td>3</td></tr><tr><td>Computer Vision Laboratory, ETH Zurich, Sternwartstrasse 7, 8092, Switzerland</td><td>3</td></tr><tr><td><b>VNU HCMC, University of Science, Ho Chi Minh City, Vietnam</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Peking University, Beijing, China</b></td><td>3</td></tr><tr><td><b>Instrumentation, IT and Systems Lab IRSEEM Rouen, FR</b></td><td>3</td></tr><tr><td><b>Aristotle University of Thessaloniki, Greece</b></td><td>3</td></tr><tr><td><b>School of Automation, Northwestern Polytechnical University, Xi&#x2019;an, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Arizona State University, Tempe, AZ, USA</b></td><td>3</td></tr><tr><td>BITS Pilani, Pilani , India</td><td>3</td></tr><tr><td>Department of Computer Science and Technology, Indian Institute of Engineering Science and Technology, Shibpur, Howrah-711 103, India</td><td>3</td></tr><tr><td><b>College of Information and Control Engineering, China University of Petroleum (East China), Qingdao, Shandong, 266580, China</b></td><td>3</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, Institute of Automation, National Laboratory of Pattern Recognition, Chinese Academy of Sciences</b></td><td>3</td></tr><tr><td><b>National Laboratory of Pattern Recognition CAS Center for Excellence in Brain Science and Intelligence Technology Institute of Automation, Chinese Academy of Sciences, 100190, China</b></td><td>3</td></tr><tr><td><b>Univ. Bordeaux, LaBRI, PICTURA, UMR 5800, F-33400 Talence, France</b></td><td>3</td></tr><tr><td><b>Tianjin University, China</b></td><td>3</td></tr><tr><td><b>The Univ of Hong Kong, China</b></td><td>3</td></tr><tr><td><b>Advanced Technologies Application Center (CENATAV), 7A ♯21406 Siboney, Playa, P.C.12200, Havana, Cuba</b></td><td>3</td></tr><tr><td><b>GIPSA-Lab, Grenoble, France</b></td><td>3</td></tr><tr><td><b>University of Maryland, Baltimore County, Baltimore, MD, USA</b></td><td>3</td></tr><tr><td>Dept. of CS&E, IIT Madras, India</td><td>3</td></tr><tr><td><b>Samsung Research and Development Institute Bangalore Pvt Ltd., Bangalore, India</b></td><td>3</td></tr><tr><td><b>Inst. of Autom., Shanghai Jiao Tong Univ., China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, New Jersey Institute of Technology, Newark, USA</b></td><td>3</td></tr><tr><td><b>State Key Laboratory of Integrated Services Networks, Xidian University, Xi&#x2019;an, China</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Electronic Engineering, Imperial College London, London, U.K.</b></td><td>3</td></tr><tr><td><b>Center for Cognitive Ubiquitous Computing (CUbiC), Arizona State University, Tempe, AZ, USA</b></td><td>3</td></tr><tr><td><b>Department of Computing, Curtin University, Perth WA, Australia</b></td><td>3</td></tr><tr><td><b>SUNY Buffalo</b></td><td>3</td></tr><tr><td>Graduate School of System Design Tokyo Metropolitan University Tokyo, Japan</td><td>3</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, TOBB Ekonomi ve Teknoloji Üniversitesi, Ankara, Türkiye</b></td><td>3</td></tr><tr><td><b>Intelligent Data Center, School of Mathematics and Computational Science, Sun Yat-sen University, Guangzhou, China</b></td><td>3</td></tr><tr><td><b>Indian Institute of Information Technology at Allahabad, Allahabad, India</b></td><td>3</td></tr><tr><td>Face Aging Group, Computer Science Department, UNCW, USA</td><td>3</td></tr><tr><td>City University of New York, New York, NY, USA</td><td>3</td></tr><tr><td><b>Department of Computer Science and Digital Technologies, Faculty of Engineering and Environment, Northumbria University, Newcastle Upon Tyne, U.K.</b></td><td>3</td></tr><tr><td><b>Faculty of Information Technology, University of Technology, Sydney, Australia</b></td><td>3</td></tr><tr><td>Department of Computer Science and Engineering, Visual Learning and Intelligence Group, IIT Hyderabad, Hyderabad, India</td><td>3</td></tr><tr><td><b>School of Computing, Communications and Electronics, University of Plymouth, UK</b></td><td>3</td></tr><tr><td>Ghent University, Ghent, Belgium</td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA</b></td><td>3</td></tr><tr><td><b>University of California San Diego, United States of America</b></td><td>3</td></tr><tr><td>Columbia Univeristy, New York, NY, USA</td><td>3</td></tr><tr><td><b>Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xidian University, Xi'an, China</b></td><td>3</td></tr><tr><td>Microsoft Research Cambridge</td><td>3</td></tr><tr><td><b>Singapore University of Technology and Design, Singapore</b></td><td>3</td></tr><tr><td><b>School of Information Science and Technology, Xiamen University, Xiamen, P. R. China</b></td><td>3</td></tr><tr><td><b>Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan</b></td><td>3</td></tr><tr><td><b>School of Information Technology and Electrical Engineering, The University of Queensland</b></td><td>3</td></tr><tr><td>Center for Automation Research, UMIACS University of Maryland, College Park, MD 20742</td><td>3</td></tr><tr><td>School of Electronics, Electrical Engineering and Computer Science, Queen&#x2019;s University Belfast, Belfast, U.K.</td><td>3</td></tr><tr><td><b>Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore</b></td><td>3</td></tr><tr><td><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, University of Hamburg, Germany</b></td><td>3</td></tr><tr><td><b>Department of Computer ScienceMultimedia Processing Laboratory, National Tsing Hua University, Hsinchu, Taiwan</b></td><td>3</td></tr><tr><td><b>West Virginia University, Lane Dept. of CSEE, Morgantown, WV</b></td><td>3</td></tr><tr><td><b>University of California San Diego</b></td><td>3</td></tr><tr><td><b>School of Computer Science and Technology, University of Science and Technology of China, Hefei, Anhui, China</b></td><td>3</td></tr><tr><td><b>School of Information Technologies, The University of Sydney, NSW 2006, Australia, Sydney</b></td><td>3</td></tr><tr><td><b>Department of Electrical Engineering, University of Windsor, Ontario, Canada</b></td><td>3</td></tr><tr><td>School of Information and Communication Engineering, Beijing University of Posts and Telcommunications, Beijing, China</td><td>3</td></tr><tr><td><b>INRIA Grenoble Rh&#x00F4;ne-Alpes Research Center, 655 avenue de l'Europe, 38 334 Saint Ismier Cedex, France</b></td><td>3</td></tr><tr><td><b>National Institutes of Health, Bethesda, Maryland 20892</b></td><td>3</td></tr><tr><td><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>3</td></tr><tr><td>Gwangju Institute of Science and Technology, 123, Cheomdangwagi-ro, Buk-gu, Gwangju, South Korea</td><td>3</td></tr><tr><td><b>Department of Computing, The Hong Kong Polytechnic University, China</b></td><td>3</td></tr><tr><td><b>Harvard University</b></td><td>3</td></tr><tr><td><b>School of Computing and Information Sciences, Florida International University, Miami, FL</b></td><td>3</td></tr><tr><td><b>College of Electronic Information and Automation, Civil Aviation University of China, Tianjin</b></td><td>3</td></tr><tr><td><b>Department of Automation, Tsinghua University, 100084 Beijing, China</b></td><td>3</td></tr><tr><td><b>NICTA, Canberra ACT, Australia and CECS, Australian National University, Australia</b></td><td>3</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>3</td></tr><tr><td><b>Research Center of Intelligent Robotics, Shanghai Jiao Tong University, Shanghai 200240, P.R China</b></td><td>3</td></tr><tr><td><b>UtopiaCompression Corporation, 11150 W. Olympic Blvd, Suite 820, Los Angeles, CA 90064, USA</b></td><td>3</td></tr><tr><td><b>Chinese Academy of Sciences, China</b></td><td>3</td></tr><tr><td><b>Laboratoire des Syst&#x00E8;mes de T&#x00E9;l&#x00E9;communication et Ing&#x00E9;nierie de la D&#x00E9;cision (LASTID) Universit&#x00E9; Ibn Tofail BP 133, Kenitra 14000, Maroc</b></td><td>3</td></tr><tr><td><b>Sorbonne Universités, UPMC Univ Paris 06, CNRS, UMR 7222, F-75005, Paris, France</b></td><td>3</td></tr><tr><td><b>Graduate School of Informatics and Engineering, The University of Electro-Communications, 1-5-1 Chofugaoka, Chofu, Tokyo 182-8585, Japan</b></td><td>3</td></tr><tr><td><b>School of Computer and Information Technology, Beijing Jiaotong University, Beijing, 100044, China</b></td><td>3</td></tr><tr><td><b>University of Wisconsin - Madison</b></td><td>3</td></tr><tr><td><b>Mines-Télécom/Télécom Lille, CRIStAL (UMR CNRS 9189), Villeneuve d'Ascq, France</b></td><td>3</td></tr><tr><td><b>Kyung Hee University, Korea</b></td><td>3</td></tr><tr><td><b>Departamento de Computación, Facultad de Ciencias Exactas y Naturales, Universidad de Buenos Aires, Argentina</b></td><td>3</td></tr><tr><td><b>Stony Brook University, Stony Brook, NY 11794, USA</b></td><td>3</td></tr><tr><td><b>University of Delaware, Newark, 19716, USA</b></td><td>3</td></tr><tr><td><b>Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Adenauerring 4, 76131, Germany</b></td><td>3</td></tr><tr><td><b>University at Buffalo, The State University of New York, Buffalo, NY 14203, USA</b></td><td>3</td></tr><tr><td><b>UIUC</b></td><td>3</td></tr><tr><td><b>Computational Biomedicine Lab, Department of Computer Science, University of Houston, 4800 Calhoun Rd., TX, 77004, USA</b></td><td>3</td></tr><tr><td><b>Pattern Recognition and Intelligent Systems Laboratory, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>3</td></tr><tr><td><b>Laboratory of Intelligent Recognition and Image Processing, School of Computer Science and Engineering, Beihang University, 100191, Beijing, China</b></td><td>3</td></tr><tr><td><b>Face Aging Group, UNCW</b></td><td>3</td></tr><tr><td><b>University of Texas at San Antonio, San Antonio, USA</b></td><td>3</td></tr><tr><td><b>College of Computer Science and Technology, Xinjiang Normal University, Urumchi, 830054, China</b></td><td>3</td></tr><tr><td><b>School of Information Technology, Deakin University, Geelong, VIC 3216, Australia</b></td><td>3</td></tr><tr><td>Institute of Mathematical and Computer Sciences, University of Sao Paulo, Sao Paulo, Brazil</td><td>3</td></tr><tr><td>Dept. of Informatics, Aristotle Univ. of Thessaloniki, Greece</td><td>3</td></tr><tr><td><b>Zhejiang University</b></td><td>3</td></tr><tr><td><b>Northwestern Polytechnical University, Xi'an, P. R. China</b></td><td>3</td></tr><tr><td><b>University of Southern California, Institute for Robotics and Intelligent Systems, Los Angeles, CA 90089, USA</b></td><td>3</td></tr><tr><td>NTT Media Intelligence Laboratories, Tokyo, Japan</td><td>3</td></tr><tr><td><b>Computer Science, University of Houston, Texas 77004, United States of America</b></td><td>3</td></tr><tr><td><b>School of Communication and Information Engineering, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>3</td></tr><tr><td>Beijing Institute of Graphic Communication, Beijing</td><td>3</td></tr><tr><td><b>Department of Computer Science and Technology, Shanghai Key Laboratory of Multidimensional Information Processing, East China Normal University, Shanghai, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University, USA</b></td><td>3</td></tr><tr><td><b>Tsinghua University, Beijing,China</b></td><td>3</td></tr><tr><td><b>Media & Inf. Res. Labs., NEC Corp., Kanagawa, Japan</b></td><td>3</td></tr><tr><td><b>Centre de Visió per Computador, Universitat Autònoma de Barcelona, Barcelona, Spain</b></td><td>3</td></tr><tr><td><b>Department of Electronic Engineering, Shanghai Jiao Tong University, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and TechnologyState Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China</b></td><td>3</td></tr><tr><td><b>School of Software, Tsinghua University, Beijing, P. R. China</b></td><td>3</td></tr><tr><td><b>Research Center of Intelligent Robotics Shanghai Jiao Tong University, Shanghai, 200240, P.R. China</b></td><td>3</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b></td><td>3</td></tr><tr><td><b>School of Software, University of Technology Sydney, New South Wales, Australia</b></td><td>3</td></tr><tr><td><b>School of Telecommunications Engineering, Xidian University, Xi’an, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, Shenzhen Graduate School, Harbin Institute of Technology, Guangdong 518055, China</b></td><td>3</td></tr><tr><td><b>Azbil Corporation 1-12-2, Kawana, Fujisawa-shi, 251-8522, Japan</b></td><td>3</td></tr><tr><td><b>Graduate School of Information Sciences, Tohoku University, 6-6-05., Aramaki Aza Aoba., Sendai-shi., 980-8579., Japan</b></td><td>3</td></tr><tr><td><b>Australian National University, Canberra, Australia</b></td><td>3</td></tr><tr><td><b>Visualisation Group, University of Warwick, Coventry, UK</b></td><td>3</td></tr><tr><td><b>School of Software Engineering, Chongqing University, Chongqing, China</b></td><td>3</td></tr><tr><td><b>Beijing University of Posts and Telecommunications</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan</b></td><td>3</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, P. R. China</b></td><td>3</td></tr><tr><td>Nara Institute of Science and Technology, Japan</td><td>3</td></tr><tr><td><b>Institute for Electronics, Signal Processing and Communications (IESK) Otto-von-Guericke-University Magdeburg D-39016 Magdeburg, P.O. Box 4210 Germany</b></td><td>3</td></tr><tr><td>Department of Computer, the University of Suwon, Korea</td><td>3</td></tr><tr><td><b>Institute for Anthropomatics, Karlsruhe Institute of Technology, Germany</b></td><td>3</td></tr><tr><td>Department of Electrical and Computer Engineering, Florida Institute of Technology, Melbourne, USA</td><td>3</td></tr><tr><td><b>Fujian Key laboratory of Sensing and Computing for Smart City, School of Information Science and Technology, Xiamen University, Xiamen, China</b></td><td>3</td></tr><tr><td>Dept. of Computer Science and Engineering, St. Joseph's College of Engineering and Technology, Palai, Kerala, India</td><td>3</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, Beijing, P.R. China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University, East Lansing 48824, USA</b></td><td>3</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</b></td><td>3</td></tr><tr><td><b>School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China</b></td><td>3</td></tr><tr><td><b>Department of Electrical Engineering, Indian Institute of Technology Kanpur, PIN 208016, Uttar Pradesh, India</b></td><td>3</td></tr><tr><td>Dept. of Computer Science and Electrical Engineering, University of Missouri-Kansas City, MO, USA</td><td>3</td></tr><tr><td><b>University of North Carolina Wilmington, USA</b></td><td>3</td></tr><tr><td><b>Shenzhen Key Laboratory of Computer Vision and Pattern Recognition, Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China</b></td><td>3</td></tr><tr><td><b>Pattern Recognition and Intelligent System Laboratory, Beijing University of Posts and Telecommunications, Beijing 100876, China</b></td><td>3</td></tr><tr><td><b>Visual Media Computing Lab, Department of Multimedia and Graphic Arts, Cyprus University of Technology, Limassol, Cyprus</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, Computational Biomedicine Laboratory, University of Houston, Houston, TX, USA</b></td><td>3</td></tr><tr><td><b>Centro de Inform&#x00E1;tica, Universidade Federal de Pernambuco, Recife, Brazil</b></td><td>3</td></tr><tr><td><b>Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, China</b></td><td>3</td></tr><tr><td>Inha University, Incheon, South Korea</td><td>3</td></tr><tr><td><b>Swiss Federal Institute of Technology, Lausanne (EPFL), Switzerland</b></td><td>3</td></tr><tr><td><b>Australian Centre for Field Robotics University of Sydney, 2006, Australia</b></td><td>3</td></tr><tr><td><b>Universit&#x00E9; de Lyon, Laboratoire d&#x2019;InfoRmatique en Image et Syst&#x00E8;mes d&#x2019;information, Centre National de Recherche Scientifique 5205, Ecole Centrale de Lyon, France</b></td><td>3</td></tr><tr><td><b>Department of Computer ScienceFace Aging Group Research Laboratory, Institute for Interdisciplinary Studies in Identity Sciences, University of North Carolina at Wilmington, Wilmington, NC, USA</b></td><td>3</td></tr><tr><td><b>Media Laboratory, Massachusetts Institute of Technology, Cambridge, MA 02139, USA</b></td><td>3</td></tr><tr><td>Center for Research on Intelligent Perception and Computing</td><td>3</td></tr><tr><td><b>UC Merced, USA</b></td><td>3</td></tr><tr><td><b>Centre for Quantum Computation & Information Systems, Faculty of Engineering and IT, University of Technology, Sydney, 235 Jones Street, Ultimo, NSW, Australia</b></td><td>3</td></tr><tr><td><b>Samsung Research Center-Beijing, SAIT China Lab Beijing, China</b></td><td>3</td></tr><tr><td><b>IT - Instituto de Telecomunicações, University of Beira Interior, Portugal</b></td><td>3</td></tr><tr><td>Thiagarajar College of Engineering, Madurai, Tamilnadu, India</td><td>3</td></tr><tr><td><b>Center for Cognitive, Connected &amp; Computational Imaging, College of Engineering &amp; Informatics, NUI Galway, Ireland</b></td><td>3</td></tr><tr><td><b>Institute for Anthropomatics and Robotics, Karlsruhe Institute of Technology, Karlsruhe, Germany</b></td><td>3</td></tr><tr><td><b>Institute of Information Science, Beijing jiaotong University, Beijing, China</b></td><td>3</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education, School of Artificial Intelligence, Xidian University, Xi’an, China</b></td><td>3</td></tr><tr><td><b>Center for Automation Research, University of Maryland, College Park, 20742, USA</b></td><td>3</td></tr><tr><td><b>Hasso Plattner Institute, University of Potsdam, Prof.-Dr.-Helmert-Str. 2-3, 14482, Germany</b></td><td>3</td></tr><tr><td><b>Dalian University of Technology, School of Software Tuqiang St. 321, Dalian, 116620, China</b></td><td>3</td></tr><tr><td><b>Shenzhen Graduate School, Peking University, Shenzhen, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, University of Central Florida, Orlando, 32816, United States of America</b></td><td>3</td></tr><tr><td><b>Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai 200240, China</b></td><td>3</td></tr><tr><td><b>College of Computer Science and Technology, Jilin University, Changchun, China</b></td><td>3</td></tr><tr><td><b>University of Technology, Sydney</b></td><td>3</td></tr><tr><td><b>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b></td><td>3</td></tr><tr><td><b>Software School, Xiamen University, Xiamen, China</b></td><td>3</td></tr><tr><td><b>University of Nottingham, Ningbo China</b></td><td>3</td></tr><tr><td><b>National Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University Chengdu, 610065, China</b></td><td>3</td></tr><tr><td><b>Institute of Forensic Science, Ministry of Justice, Shanghai 200063, China</b></td><td>3</td></tr><tr><td><b>Department of Information Engineering, University of Florence, Florence, Italy</b></td><td>3</td></tr><tr><td>Dept. of Computer Science and Information Engineering, National Dong Hwa University, Hualien, Taiwan</td><td>3</td></tr><tr><td><b>West Virginia University, Morgantown, WV, USA</b></td><td>3</td></tr><tr><td><b>EUP Mataró, Spain</b></td><td>3</td></tr><tr><td><b>Université du Québec à Chicoutimi (UQAC)</b></td><td>3</td></tr><tr><td><b>Dept. of Computer Sciences, ASIA Team, Moulay Ismail University, Faculty of Science and Techniques, BP 509 Boutalamine 52000 Errachidia, Morocco</b></td><td>3</td></tr><tr><td>School of Electrical and Electronic Engineering, Singapore</td><td>3</td></tr><tr><td><b>Microsoft Research, Beijing, China</b></td><td>3</td></tr><tr><td><b>Northeastern University, Boston, USA</b></td><td>3</td></tr><tr><td><b>Center for Future Media and School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>3</td></tr><tr><td><b>Advanced Technologies Application Center, 7a #21406 b/ 214 and 216, P.C. 12200, Playa, Havana, Cuba</b></td><td>3</td></tr><tr><td><b>Artificial Vision Laboratory, National Taiwan University of Science and Technology</b></td><td>3</td></tr><tr><td><b>Department of Electrical Engineering, Indian Institute of Technology Kanpur, Kanpur, India</b></td><td>3</td></tr><tr><td>Universidade Nova Lisboa, Lisboa, Portugal</td><td>3</td></tr><tr><td><b>Wuhan University, Wuhan, China</b></td><td>3</td></tr><tr><td><b>Key Laboratory of Machine Perception (Ministry of Education), Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, China</b></td><td>3</td></tr><tr><td><b>Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang, China</b></td><td>3</td></tr><tr><td><b>Beijing Key Laboratory of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China</b></td><td>3</td></tr><tr><td>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation Chinese Academy of Sciences, Beijing, China 100190</td><td>3</td></tr><tr><td><b>Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, &#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Jadavpur University, Kolkata, India</b></td><td>3</td></tr><tr><td><b>Indian Statistical Institute, Kolkata, India</b></td><td>3</td></tr><tr><td><b>Jiangsu University, Zhenjiang, China</b></td><td>3</td></tr><tr><td><b>Sharif University of Technology</b></td><td>3</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology Uttarakhand, Srinagar Garhwal, India</td><td>3</td></tr><tr><td>Dept. of Mediamatics, Delft Univ. of Technol., Netherlands</td><td>3</td></tr><tr><td><b>Disney Research Pittsburgh, Pittsburgh, PA, USA</b></td><td>3</td></tr><tr><td><b>Electrical and Computer Engineering</b></td><td>3</td></tr><tr><td><b>Video Analytics Laboratory, SERC, Indian Institute of Science, Bangalore, India</b></td><td>3</td></tr><tr><td><b>School of Electronics and Information Engineering, Tianjin University, Tianjin, China</b></td><td>3</td></tr><tr><td><b>Cornell University, USA</b></td><td>3</td></tr><tr><td>Department of Information Science and Engineering, Changzhou University, Changzhou, China</td><td>3</td></tr><tr><td><b>International Center of Excellence on Intelligent Robotics and Automation Research, National Taiwan University, Taiwan</b></td><td>3</td></tr><tr><td><b>Department of Informatics, University of Thessaloniki, 54124, Greece</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Dayton, Ohio, USA</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Windsor, Canada</b></td><td>3</td></tr><tr><td><b>Graduate School of Shenzhen, Tsinghua University, Beijing, China</b></td><td>3</td></tr><tr><td><b>Hanoi University of Science and Technology, Hanoi, Vietnam</b></td><td>3</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Korea</b></td><td>3</td></tr><tr><td><b>Institute of Computational Science, University of Lugano, Switzerland</b></td><td>3</td></tr><tr><td><b>Norwegian Biometrics Laboratory, NTNU - Gj&#x00F8;vik, Norway</b></td><td>3</td></tr><tr><td><b>Institute of Technology and Science, Tokushima University, 2-1 Minamijyousanjima, 770-8506, Japan</b></td><td>3</td></tr><tr><td><b>LTCI, CNRS, T&#x00E9;l&#x00E9;com ParisTech, Universit&#x00E9; Paris-Saclay, 75013, France</b></td><td>3</td></tr><tr><td><b>National Lab of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>3</td></tr><tr><td><b>School of Electronic Information and Electrical Engineering, Shanghai Jiao Tong University, Dongchuan Road 800, Minhang District, Shanghai, 200240, China</b></td><td>3</td></tr><tr><td><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore, Singapore</b></td><td>3</td></tr><tr><td><b>Multimedia and Intelligent Software Technology Beijing Municipal Key Lab., College of Computer Science, Beijing University of Technology Beijing, China.</b></td><td>3</td></tr><tr><td><b>Institute of Imaging & Computer Vision, RWTH Aachen University, Aachen, Germany</b></td><td>3</td></tr><tr><td><b>Korea University</b></td><td>3</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, NSW, Australia</b></td><td>3</td></tr><tr><td><b>Shenzhen Key Laboratory of Broadband Network and Multimedia, Graduate School at Shenzhen, Tsinghua University, Shenzhen, China</b></td><td>3</td></tr><tr><td><b>TCS Research, New Delhi, India</b></td><td>3</td></tr><tr><td><b>University of North Carolina Wilmington, Wilmington, NC</b></td><td>3</td></tr><tr><td><b>Faculty of Electrical Engineering, University of Ljubljana, Trzaska cesta 25, SI-1000 Ljubljana, Slovenia</b></td><td>3</td></tr><tr><td><b>Research Institute of Shenzhen, Wuhan University, Shenzhen, China</b></td><td>3</td></tr><tr><td><b>Shanghai University</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Binghamton University, Binghamton, NY</b></td><td>3</td></tr><tr><td><b>Nanyang Technological University, Singapore, Singapore</b></td><td>3</td></tr><tr><td><b>New York University, New York, NY, USA</b></td><td>3</td></tr><tr><td><b>School of Electronics and Computer Science, University of Southampton, United Kingdom</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, University of Massachusetts Amherst, Amherst MA, 01003</b></td><td>3</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education, International Research Center for Intelligent Perception and Computation, Xidian University, Xi’an, China</b></td><td>3</td></tr><tr><td><b>Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech Technical University in Prague, 166 27 Prague 6, Technická 2 Czech Republic</b></td><td>3</td></tr><tr><td><b>Computer Laboratory, University of Cambridge, United Kingdom</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, University of Texas at San Antonio, San Antonio, TX, USA</b></td><td>3</td></tr><tr><td>Institute for Infocomm Research, A*STAR, Singapore, Singapore</td><td>3</td></tr><tr><td><b>South China University of Technology, China</b></td><td>3</td></tr><tr><td><b>Visionlab, Heriot-Watt University, Edinburgh, UK</b></td><td>3</td></tr><tr><td><b>Institute for Infocomm Research, A*STAR, Singapore</b></td><td>3</td></tr><tr><td><b>Korea Advanced Institute of Science and Technology (KAIST), Republic of Korea</b></td><td>3</td></tr><tr><td><b>Xerox Research Center, Webster, NY, USA</b></td><td>3</td></tr><tr><td>Ashikaga Institute of Technology, Ashikaga, Japan</td><td>3</td></tr><tr><td><b>Department of Engineering and Environment, Northumbria University, Newcastle Upon Tyne, Tyne and Wear</b></td><td>3</td></tr><tr><td><b>College of Information Science and Engineering, Ritsumeikan University, Kusatsu, Japan</b></td><td>3</td></tr><tr><td>Institute of Applied Computer Science, Kiel University of Applied Sciences, Kiel, Germany</td><td>3</td></tr><tr><td><b>School of Creative Technologies, University of Portsmouth, Portsmouth, POI 2DJ, UK</b></td><td>3</td></tr><tr><td><b>Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, France</b></td><td>3</td></tr><tr><td><b>Faculty of Electronic Information and Electrical Engineering, School of Information and Communication Engineering, Dalian University of Technology, Dalian, China</b></td><td>3</td></tr><tr><td><b>Affectiva Inc., Waltham, MA, USA</b></td><td>3</td></tr><tr><td><b>Department of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, Korea Advanced Institute of Science and Technology, 291 Daehak-ro, Yuseong-gu, Daejeon 305-701, Republic of Korea</b></td><td>3</td></tr><tr><td><b>Dept. of Electrical and Computer Engineering & Centre for Intelligent Machines, McGill University, Montreal, Quebec, Canada</b></td><td>3</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Surrey, UK</b></td><td>3</td></tr><tr><td><b>Computer Vision and Image Processing Lab, Institute for Integrated and Intelligent Systems, Griffith University, Australia</b></td><td>3</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University, East Lansing, MI, USA</b></td><td>3</td></tr><tr><td><b>Institute of Software, College of Computer, National University of Defense Technology, Changsha, Hunan, China, 410073</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Northeastern University, Boston, USA, 02115</b></td><td>3</td></tr><tr><td><b>AltumView Systems Inc., Burnaby, BC, Canada</b></td><td>3</td></tr><tr><td>Central China Normal University, Wuhan, China</td><td>3</td></tr><tr><td><b>Sapienza University of Rome</b></td><td>3</td></tr><tr><td><b>Center for Biometrics and Security Research &amp; National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b></td><td>3</td></tr><tr><td><b>Computer Vision Lab, Sungkyunkwan University Suwon, South Korea</b></td><td>3</td></tr><tr><td><b>Beijing Key Laboratory of Multimedia and Intelligent Software Technology, College of Metropolitan Transportation, Beijing University of Technology, Beijing, China</b></td><td>3</td></tr><tr><td><b>Department of Computer Science, Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany</b></td><td>3</td></tr><tr><td><b>Faculty of Engineering, Shinshu University, Nagano, Japan</b></td><td>3</td></tr><tr><td><b>Institute for Creative Technologies, University of Southern California, 12015 E Waterfront Dr, Los Angeles, CA, USA</b></td><td>3</td></tr><tr><td><b>National Engineering Research Center for E-Learning, Central China Normal University, Wuhan, China</b></td><td>3</td></tr><tr><td><b>Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan, R.O.C</b></td><td>3</td></tr><tr><td><b>Vision Lab in Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA, 23529</b></td><td>3</td></tr><tr><td><b>Center for Research of E-life DIgital Technology (CREDIT), Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan</b></td><td>3</td></tr><tr><td><b>Graduate School of Convergence Science and Technology, Seoul National University, Seoul, Korea</b></td><td>3</td></tr><tr><td><b>E-Comm Research Lab, Infosys Limited, Bangalore, India</b></td><td>3</td></tr><tr><td><b>College of Mechanical and Electrical Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, China</b></td><td>3</td></tr><tr><td>Chongqing University of Posts and Telecommunications Chongqing, China</td><td>3</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Center for Biometrics and Security Research, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>3</td></tr><tr><td>School of Computer Science and Software Engineering University of Wollongong, Australia</td><td>3</td></tr><tr><td>Phonexia, Brno-Krlovo Pole, Czech Republic</td><td>3</td></tr><tr><td><b>Expert Systems, Modena, Italy</b></td><td>3</td></tr><tr><td><b>Chair of Complex & Intelligent Systems, University of Passau, Passau, Germany</b></td><td>3</td></tr><tr><td><b>Stanford University, Palo Alto, CA, USA</b></td><td>3</td></tr><tr><td>Department of Applied Mechanics, Chalmers University of Technology, SE-412 96 Göteborg, Sweden</td><td>3</td></tr><tr><td>Technische Universität München, Munich, Germany</td><td>3</td></tr><tr><td><b>Laboratory for Intelligent and Safe Automobiles, University of California, San Diego, USA</b></td><td>3</td></tr><tr><td><b>Toyota Research Institute</b></td><td>3</td></tr><tr><td><b>Image and Video Research Lab, Queensland University of Technology, 2 George Street, GPO Box 2434, Brisbane, QLD 4001, Australia</b></td><td>3</td></tr><tr><td><b>School of Computer Science and Engineering, Nanjing University of Science and Technology</b></td><td>3</td></tr><tr><td><b>The University of Newcastle, NSW, Australia</b></td><td>3</td></tr><tr><td><b>Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro #1, Tonantzintla, Puebla, Mexico</b></td><td>3</td></tr><tr><td><b>NLPR, Institute of Automation, Chinese Academy of Sciences</b></td><td>3</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, CAS Center for Excellence in Brain Science and Intelligence Technology, Chinese Academy of Sciences, Beijing 100190, China</b></td><td>3</td></tr><tr><td><b>Columbia University, New York, USA</b></td><td>3</td></tr><tr><td><b>T&#x00E9;l&#x00E9;com Lille, CRIStAL UMR (CNRS 9189), France</b></td><td>3</td></tr><tr><td><b>IMPCA, Curtin University, Australia</b></td><td>3</td></tr><tr><td><b>Faculty of Informatics and Computing, Universiti Sultan Zainal Abidin, Besut Campus, 22200 Besut, Terengganu, Malaysia</b></td><td>3</td></tr><tr><td><b>Concordia University</b></td><td>3</td></tr><tr><td><b>State Key Laboratory of Digital Manufacturing Equipment and Technology, Huazhong University of Science and Technology, Wuhan, China</b></td><td>3</td></tr><tr><td>University of California, Los Angeles, CA Dept. of Electrical Engineering</td><td>3</td></tr><tr><td><b>University Of Electronic Science And Technology Of China, China</b></td><td>3</td></tr><tr><td><b>IBM Research</b></td><td>3</td></tr><tr><td>Academia Sinica, Taipei, Taiwan Roc</td><td>3</td></tr><tr><td><b>Faculty of Electrical Engineering, University of Ljubljana, Trzaska 25, SI-1000 Ljubljana, Slovenia</b></td><td>3</td></tr><tr><td><b>Computational Biomedicine Lab, Department of Computer Science, University of Houston, Houston, TX, USA</b></td><td>3</td></tr><tr><td><b>Center for Digital Media Computing, Software School, Xiamen University, Xiamen 361005, China</b></td><td>3</td></tr><tr><td><b>University of Milan, Italy</b></td><td>3</td></tr><tr><td><b>State Key Laboratory on Intelligent Technology and Systems, National Laboratory for Information Science and Technology, Department of Computer Science and Technology, Tsinghua University, China</b></td><td>3</td></tr><tr><td><b>School of Electronic and Information Engineering, South China University of Technology, Guangzhou, Guangdong, China</b></td><td>3</td></tr><tr><td>Dept. of Cybernetics and Artificial Intelligence, FEI TU of Košice, Slovak Republic</td><td>3</td></tr><tr><td><b>Department of Mathematics and Informatics, University of Florence, Florence, Italy</b></td><td>3</td></tr><tr><td><b>Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China</b></td><td>3</td></tr><tr><td>Image and Video Systems Lab, School of Electrical Engineering, KAIST, Republic of Korea</td><td>3</td></tr><tr><td><b>Evolutionary Computation Research Group, Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand</b></td><td>3</td></tr><tr><td><b>School of Electronics and Information, Northwestern Polytechnical University, Xi’an, China</b></td><td>3</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Center for Research on Intelligent Perception and Computing, CAS Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>3</td></tr><tr><td><b>Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, P.O.Box 217 7500 AE Enschede, The Netherlands</b></td><td>3</td></tr><tr><td><b>MindLAB Research Group, Universidad Nacional de Colombia, Colombia</b></td><td>3</td></tr><tr><td><b>IntelliView Technologies Inc., Calgary, AB, Canada</b></td><td>3</td></tr><tr><td><b>Department of Electronic Engineering, National Taipei University of Technology, Taipei, Taiwan</b></td><td>3</td></tr><tr><td>Information and media processing laboratories, NEC Corporation</td><td>3</td></tr><tr><td>Southern Illinois University at Carbondale, IL, USA</td><td>3</td></tr><tr><td><b>School of Behavioral and Brain Sciences, University of Texas at Dallas, Richardson, 75080, USA</b></td><td>3</td></tr><tr><td><b>Dept. of Automation and Applied Informatics, Politehnica University of Timisoara, Romania</b></td><td>3</td></tr><tr><td><b>Queen Mary University of London</b></td><td>3</td></tr><tr><td>School of Automation and Electrical Engineering, University of Science and Technology Beijing, 100083, China</td><td>3</td></tr><tr><td><b>Michigan State University, East Lansing, 48824, USA</b></td><td>3</td></tr><tr><td><b>The Hong Kong Polytechnic University, Hong Kong, China</b></td><td>3</td></tr><tr><td><b>Peking University, China / Shanghai Jiao Tong University, China</b></td><td>3</td></tr><tr><td><b>Department of Electronics, AGH University of Science and Technology, Kraków, Poland</b></td><td>3</td></tr><tr><td>School of Software, Jiangxi Normal University, Nanchang, China</td><td>3</td></tr><tr><td>Department of Computer Science, Pontificia Universidad Cato&#x00B4;lica de Chile</td><td>3</td></tr><tr><td><b>Faculty of Information Technology, Ho Chi Minh City University of Science, VNU-HCM, District 5, Ho Chi Minh City, Vietnam</b></td><td>3</td></tr><tr><td>Fujitsu Laboratories, Kawasaki, Kanagawa, Japan</td><td>3</td></tr><tr><td>Department of Electronic and Computer Engineering National Taiwan University of Science and Technology</td><td>3</td></tr><tr><td><b>Georgia Institute of Technology, Atlanta, 30332-0250, USA</b></td><td>3</td></tr><tr><td><b>Tongji University, Shanghai, China</b></td><td>3</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Vision Laboratory, Old Dominion University, Norfolk, VA, USA</b></td><td>3</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology Sydney, Ultimo, NSW, Australia</b></td><td>3</td></tr><tr><td><b>School of Information Science and Engineering, Xiamen University, Xiamen 361005, China</b></td><td>3</td></tr><tr><td><b>University of California San Diego, USA</b></td><td>3</td></tr><tr><td><b>HCC Lab, Vision &amp; Sensing Group, University of Canberra, Australia</b></td><td>3</td></tr><tr><td><b>Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China</b></td><td>3</td></tr><tr><td><b>REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia</b></td><td>3</td></tr><tr><td><b>School of Computer Science and Educational Software, Guangzhou University, Guangzhou, China</b></td><td>3</td></tr><tr><td><b>IBM Thomas J. Watson Research Center, Yorktown Heights, NY, USA</b></td><td>3</td></tr><tr><td><b>School of physics and engineering, Sun Yat-Sen University, GuangZhou, China</b></td><td>3</td></tr><tr><td>New York University Abu Dhabi & NYU Tandon School of Engineering, Abu Dhabi, Uae</td><td>3</td></tr><tr><td>Intelligent Vision Research Lab, Department of Computer Science, Federal University of Bahia</td><td>3</td></tr><tr><td><b>FDNA inc., Herzliya, Israel</b></td><td>3</td></tr><tr><td><b>Department of Mathematics & Computer Science, Philipps-Universität Marburg, D-35032, Germany</b></td><td>3</td></tr><tr><td><b>Australian Center for Visual Technologies, and School of Computer Science, The University of Adelaide, Adelaide, Australia</b></td><td>3</td></tr><tr><td>Department of Electronic Measuring systems, Moscow Engineering Physics Institute, National Research Nuclear University MEPhI, Moscow, Russia</td><td>3</td></tr><tr><td><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore, 639798</b></td><td>3</td></tr><tr><td><b>IT - Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</b></td><td>3</td></tr><tr><td><b>National University of Defence Technology, Changsha 410000, China</b></td><td>2</td></tr><tr><td>National Ilan University, Ilan, Taiwan Roc</td><td>2</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Turkey</b></td><td>2</td></tr><tr><td><b>Elektrik - Elektronik Mühendisliği Bölümü, Atılım Üniversitesi, Ankara, Türkiye</b></td><td>2</td></tr><tr><td>China Electronics Standardization Institute, Beijing, 100007</td><td>2</td></tr><tr><td><b>School of Reliability and System Engineering, Science and Technology on Reliability and Environmental Engineering Laboratory, Beihang University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Kent State University, OH 44242, U.S.A.</b></td><td>2</td></tr><tr><td><b>Machine Intelligence Unit, Indian Statistical Institute, Kolkata, India</b></td><td>2</td></tr><tr><td><b>Computational Biomedicine Lab, University of Houston</b></td><td>2</td></tr><tr><td><b>Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown WV 26506, USA</b></td><td>2</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, İstanbul Teknik Üniversitesi, İstanbul, Turkiye</b></td><td>2</td></tr><tr><td>Universidade Nova de Lisboa, Caparica, Portugal</td><td>2</td></tr><tr><td>Universidad Tecnica Federico Santa Maria, Department of Electronic Engineering, Valparaiso, Chile</td><td>2</td></tr><tr><td>Dept. of Comput. Syst., Univ. of Technol., Sydney, NSW, Australia</td><td>2</td></tr><tr><td><b>Harvard University, Cambridge, MA, USA</b></td><td>2</td></tr><tr><td><b>Michigan State University, East Lansing, MI, U.S.A.</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, National Tsing Hua University, Taiwan</b></td><td>2</td></tr><tr><td>Dept. of Comput. Sci., York Univ., UK</td><td>2</td></tr><tr><td><b>CSE, SUNY at Buffalo, USA</b></td><td>2</td></tr><tr><td><b>Department of Computer Engineering, Mahanakorn University of Technology, 140 Cheum-Sampan Rd., Nong Chok, Bangkok THAILAND 10530</b></td><td>2</td></tr><tr><td>The Australian National University RSCS, ANU, Canberra, Australia</td><td>2</td></tr><tr><td><b>University of Newcastle, Australia</b></td><td>2</td></tr><tr><td>Dept. of Computer Science, YiLi Normal College, Yining, China 835000</td><td>2</td></tr><tr><td>School of Computing and Communications, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia</td><td>2</td></tr><tr><td><b>DISI, University of Trento, Italy</b></td><td>2</td></tr><tr><td><b>LAPI, University Politehnica of Bucharest, Romania</b></td><td>2</td></tr><tr><td><b>University of Colorado at Colorado Springs, Colorado Springs, CO, USA</b></td><td>2</td></tr><tr><td><b>University of Twente, Enschede, Netherlands</b></td><td>2</td></tr><tr><td><b>Department of Mechanical Engineering, National Taiwan University, 10647, Taipei, Taiwan</b></td><td>2</td></tr><tr><td><b>Institution for Infocomm Research, Connexis, Singapore</b></td><td>2</td></tr><tr><td><b>Department of d’Informàtica, Universitat de València, Valencia, Spain</b></td><td>2</td></tr><tr><td><b>Toyota Research Institute, Cambridge, MA, USA</b></td><td>2</td></tr><tr><td><b>Research Centre for Computers, Communication and Social Innovation La Trobe University, Victoria - 3086, Australia</b></td><td>2</td></tr><tr><td><b>IBM Thomas J. Watson, Research Center, Yorktown Heights, New York 10598, USA</b></td><td>2</td></tr><tr><td><b>Institute of Computing, University of Campinas (UNICAMP), SP, 13083-852, Brazil</b></td><td>2</td></tr><tr><td><b>IFRJDL, Institute of Computing Technology, CAS, P.O.Box 2704, Beijing, China, 100080</b></td><td>2</td></tr><tr><td><b>Computer Science Department, University of Southern California, Los Angeles, 90089, United States of America</b></td><td>2</td></tr><tr><td><b>Department of Signal Processing, Tampere University of Technology, Tampere, Finland</b></td><td>2</td></tr><tr><td><b>JD Artificial Intelligence Research, Beijing, China</b></td><td>2</td></tr><tr><td><b>STARS team, Inria Sophia Antipolis-Méditerranée, Sophia Antipolis, France</b></td><td>2</td></tr><tr><td><b>Agency for Science, Technology and Research (A*STAR), Institute of High Performance Computing, Singapore</b></td><td>2</td></tr><tr><td><b>Delft University of Technology</b></td><td>2</td></tr><tr><td><b>Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA</b></td><td>2</td></tr><tr><td>Department of Electrical and Computer Engineering, Singapore</td><td>2</td></tr><tr><td>Dept. of ECE &amp; Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td><b>Department of Computer Science, Wayne State University, Detroit, MI, USA</b></td><td>2</td></tr><tr><td><b>Dept. of Computer Science, Yonsei University, Seoul, South Korea, 120-749</b></td><td>2</td></tr><tr><td><b>Division of Graduate Studies, Tijuana Institute of Technology, M&#x00E9;xico</b></td><td>2</td></tr><tr><td><b>School of Engineering and Digital Arts, University of Kent, Canterbury, Kent CT2 7NT, United Kingdom</b></td><td>2</td></tr><tr><td>Instituto de Telecomunicações & Faculdade de Ciěncias da Universidade do Porto</td><td>2</td></tr><tr><td><b>Faculty of Science and Technology, University of Macau, Macau, China</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering and the Center for Automation Research, UMIACS, University of Maryland, College Park, MD</b></td><td>2</td></tr><tr><td><b>Visual Analysis of People (VAP) laboratory, Aalborg University, Denmark</b></td><td>2</td></tr><tr><td><b>Instituto de Telecomunicações, Instituto Superior Técnico, Universidade de Lisboa, Portugal</b></td><td>2</td></tr><tr><td><b>School of Computer Science, Northwestern Polytechnical University, Xi’an, China</b></td><td>2</td></tr><tr><td><b>Escuela Politecnica Superior, Universidad Autonoma de Madrid, Madrid, Spain</b></td><td>2</td></tr><tr><td><b>SUPELEC / IETR, Avenue de la Boulaie, 35576 Cesson Sevigne, France</b></td><td>2</td></tr><tr><td><b>Dept. of Computer Science & Engineering, University of South Florida, Tampa, 33620, United States of America</b></td><td>2</td></tr><tr><td>Department of Information Management, National Formosa University, Huwei, Yulin 632, Taiwan</td><td>2</td></tr><tr><td>Dept of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Kavala, Greece</td><td>2</td></tr><tr><td><b>Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA</b></td><td>2</td></tr><tr><td><b>Department of Electronic Engineering, Shanghai Jiao Tong University</b></td><td>2</td></tr><tr><td><b>College of Computer and Information, Hohai University, Nanjing, China</b></td><td>2</td></tr><tr><td><b>Department of Information Systems and Cyber Security and the Department of Electrical and Computer Engineering, University of Texas at San Antonio, San Antonio, TX, USA</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Texas at San Antonio, San Antonio, TX, USA</b></td><td>2</td></tr><tr><td><b>Electronics &amp; Telecommunications Research Institute (ETRI), Daejeon, Korea</b></td><td>2</td></tr><tr><td>University of Ulm, Ulm, Germany</td><td>2</td></tr><tr><td><b>Electrical and Computer Engineering Department, University of Windsor, Ontario, Canada N9B 3P4</b></td><td>2</td></tr><tr><td><b>National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, The University of Hong Kong</b></td><td>2</td></tr><tr><td>Dept. of Eng. Sci., Oxford Univ., UK</td><td>2</td></tr><tr><td><b>Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Adenauerring 4, Karlsruhe, Germany</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Republic of Korea</b></td><td>2</td></tr><tr><td><b>Facial Image Processing and Analysis Group, Institute for Anthropomatics, Karlsruhe Institute of Technology, D-76131 Karlsruhe, P.O. Box 6980 Germany</b></td><td>2</td></tr><tr><td><b>Delft University of Technology, Mekelweg 4, Netherlands</b></td><td>2</td></tr><tr><td>Human-Machines Interaction (HMI) Laboratory, Department of Industrial Informatics, TEI of Kavala, Kavala, Greece</td><td>2</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University</b></td><td>2</td></tr><tr><td>Dept. of ECE, Maryland Univ., College Park, MD, USA</td><td>2</td></tr><tr><td><b>Research Institute of Intelligent Control and Systems, Harbin Institute of Technology, Harbin, China</b></td><td>2</td></tr><tr><td>Department of Computer Engineering, TOBB University of Economics and Technology, Ankara, Turkey</td><td>2</td></tr><tr><td><b>National University of Defense and Technology</b></td><td>2</td></tr><tr><td><b>School of Computer Science, CECS, Australian National University, Australia</b></td><td>2</td></tr><tr><td>Electrical &amp; Electronic Engineering Department, Mevlana University Konya, Turkey</td><td>2</td></tr><tr><td><b>Institute for Computational and Mathematical Engineering, Stanford University, Stanford, CA, USA</b></td><td>2</td></tr><tr><td><b>Institute of Electronics, National Chiao Tung University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td>GIPSA Laboratory, Image and Signal Department, Grenoble Institute of Technology, Grenoble, France</td><td>2</td></tr><tr><td><b>Florida International University</b></td><td>2</td></tr><tr><td><b>Gradate School of Information Production and System, Waseda University, Kitakyushu, Japan 808-0135</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Rourkela, Odisha, India</td><td>2</td></tr><tr><td><b>Graduate School of Information, Production and Systems, Waseda University, Japan</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, National Taiwan Ocean University, No.2, Beining Rd., Keelung 202, Taiwan</td><td>2</td></tr><tr><td><b>Tampere University of Technology, Finland</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, National Chiao Tung University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>Biodata Mining Group, Technical Faculty, Bielefeld University, Germany</b></td><td>2</td></tr><tr><td><b>Chungnam National University, Daejeon, South Korea</b></td><td>2</td></tr><tr><td>Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Deniz Harp Okulu, &#x0130;stanbul, T&#x00FC;rkiye</td><td>2</td></tr><tr><td><b>IETR, CNRS UMR 6164, Supelec, Cesson-Sevigne, France</b></td><td>2</td></tr><tr><td><b>Institute of Intelligent Systems and Robotics (ISIR), Pierre and Marie Curie University , Paris, France</b></td><td>2</td></tr><tr><td><b>University of Technology, Sydney, NSW, Australia</b></td><td>2</td></tr><tr><td>Statistical Machine Intelligence &amp; LEarning, School of Computer Science &amp; Engineering University of Electronic Science and Technology of China, 611731, China</td><td>2</td></tr><tr><td><b>Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National Central University, Jhongli, Taiwan</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Electronics Engineering, Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore639798</b></td><td>2</td></tr><tr><td><b>West Virginia University</b></td><td>2</td></tr><tr><td><b>Czech Technical University in Prague, Prague, Czech Rep</b></td><td>2</td></tr><tr><td>Masaryk University, Brno, Czech Rep</td><td>2</td></tr><tr><td>Charles University, Prague, Czech Rep</td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., McMaster Univ., Hamilton, Ont., Canada</td><td>2</td></tr><tr><td><b>Nanjing Children's Hospital Affiliated to Nanjing Medical University, Nanjing, China</b></td><td>2</td></tr><tr><td><b>College of Telecommunications &amp; Information Engineering, Nanjing University of Posts and Telecommunications, Nanjing, China</b></td><td>2</td></tr><tr><td><b>School of Telecommunications Engineering, Xidian University, Xi&#x2019;an, China</b></td><td>2</td></tr><tr><td><b>Nanjing University of Science and Technology, Xiaolingwei, Xuanwu, Nanjing, China</b></td><td>2</td></tr><tr><td><b>London Healthcare Sciences Centre, London, ON, Canada</b></td><td>2</td></tr><tr><td>Department of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Human Machines Interaction (HMI) Laboratory, 65404 Kavala, Greece</td><td>2</td></tr><tr><td><b>School of Electrical Engineering and Computer Science, Seoul National University, Korea</b></td><td>2</td></tr><tr><td><b>Jordan University of Science and Technology, Irbid, Jordan</b></td><td>2</td></tr><tr><td><b>College of Medical Information Engineering, Guangdong Pharmaceutical University, Guangzhou, China</b></td><td>2</td></tr><tr><td><b>University of Michigan</b></td><td>2</td></tr><tr><td><b>Biometric Technologies Laboratory, Department of Electrical and Computer Engineering, University of Calgary, Alberta, T2N 1N4 Canada</b></td><td>2</td></tr><tr><td><b>Morpho, SAFRAN Group, 11 Boulevard Galli&#x00E9;ni 92130 Issy-Les-Moulineaux - France</b></td><td>2</td></tr><tr><td><b>Center for Machine Vision Research, University of Oulu, Finland</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Aalto University, Finland</b></td><td>2</td></tr><tr><td><b>Norwegian Biometrics Laboratory, Norwegian University of Science and Technology (NTNU), 2802 Gj⊘vik, Norway</b></td><td>2</td></tr><tr><td><b>International Institute of Information Technology (IIIT) Hyderabad, India</b></td><td>2</td></tr><tr><td><b>Computer Laboratory, University of Cambridge, Cambridge, UK</b></td><td>2</td></tr><tr><td><b>Department of Electronic Systems, Aalborg University, Denmark</b></td><td>2</td></tr><tr><td><b>Artificial Intelligence and Information Analysis Lab, Department of Informatics, Aristotle University of Thessaloniki, Greece</b></td><td>2</td></tr><tr><td>University of British Columbia Department of Electrical and Computer Engineering</td><td>2</td></tr><tr><td><b>Department of Computer Science, Swansea University, Swansea, UK</b></td><td>2</td></tr><tr><td><b>Computer Science and Technology, IIEST, Shibpur</b></td><td>2</td></tr><tr><td><b>Amirkabir University of Technology, Tehran, Iran</b></td><td>2</td></tr><tr><td><b>EURECOM, Sophia Antipolis, France</b></td><td>2</td></tr><tr><td>School of Computer Science and Technology, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China</td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Delaware, Newark, DE, USA</b></td><td>2</td></tr><tr><td><b>School of Electrical and Electronic Engineering, Yonsei University, 50 Yonsei-ro, SEOUL, Republic of Korea</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, University of Califonia, San Diego</td><td>2</td></tr><tr><td><b>Department of Computer Science and Technology, Tsinghua University, Beijing</b></td><td>2</td></tr><tr><td>University of Missouri Department of Electrical and Computer Engineering Columbia, MO, USA</td><td>2</td></tr><tr><td><b>School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia</b></td><td>2</td></tr><tr><td>Inf. Syst. Dept., Buckingham Univ., UK</td><td>2</td></tr><tr><td><b>Key Laboratory of Machine Perception, Shenzhen Graduate School, Peking University, China</b></td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Old Dominion Univ., Norfolk, VA, USA</td><td>2</td></tr><tr><td><b>Department of Computer Science, Edge Hill University</b></td><td>2</td></tr><tr><td><b>Department of Psychology, University of Pittsburgh, PA, 15260, USA</b></td><td>2</td></tr><tr><td><b>The Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213, USA</b></td><td>2</td></tr><tr><td><b>National Central University, Taoyuan County, Taiwan</b></td><td>2</td></tr><tr><td>Department of Computer Science &amp; Engineering, POSTECH, Pohang, Sourth Korea, 37673</td><td>2</td></tr><tr><td><b>Anhui University, HeFei, China</b></td><td>2</td></tr><tr><td><b>Signals and Systems Group, Faculty of EEMCS, University of Twente, the Netherlands</b></td><td>2</td></tr><tr><td><b>Research Center of Machine Learning and Data Analysis, School of Computer Science and Technology, Soochow University, Suzhou, China</b></td><td>2</td></tr><tr><td>Coursera and Stanford University</td><td>2</td></tr><tr><td><b>School of Computer Science, University of Windsor, Canada N9B 3P4</b></td><td>2</td></tr><tr><td><b>Laboratory Heudiasyc, University of Technology of Compiègne, BP 20529. F-60205, France</b></td><td>2</td></tr><tr><td><b>Dept. Electrical Engineering, National Taiwan University, Taipei, Taiwan</b></td><td>2</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Turkey</b></td><td>2</td></tr><tr><td><b>University of Notre Dame</b></td><td>2</td></tr><tr><td><b>University of Ljubljana</b></td><td>2</td></tr><tr><td><b>Istanbul Technical University</b></td><td>2</td></tr><tr><td><b>Polytechnic School, University of Pernambuco, Recife, Brazil</b></td><td>2</td></tr><tr><td><b>Faculty of Technical Sciences, Singidunum University, Belgrade 11000, Serbia</b></td><td>2</td></tr><tr><td><b>Dept. of CSEE, University of Maryland, Baltimore County, Baltimore, MD 21250</b></td><td>2</td></tr><tr><td>Dept. of Electron. & Inf., Toyota Technol. Inst., Nagoya, Japan</td><td>2</td></tr><tr><td><b>Department of Computer Science, University of Maryland, College Park, MD</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, POSTECH, Pohang 790-784, Republic of Korea</td><td>2</td></tr><tr><td><b>School of Electronic Engineering and Computer Science, Queen Mary University of London, UK</b></td><td>2</td></tr><tr><td><b>University of the Witwatersrand</b></td><td>2</td></tr><tr><td><b>Star Technologies, USA</b></td><td>2</td></tr><tr><td>Dept. of Comput. Sci., New York State Univ., Binghamton, NY, USA</td><td>2</td></tr><tr><td>Dept. of Electrical Engineering, National Institute of Technology, Rourkela, India 769008</td><td>2</td></tr><tr><td><b>Division of Control, EEE, Nanyang Tech. Univ., Singapore</b></td><td>2</td></tr><tr><td><b>Department of Computer Science &amp; Engineering, University of Ioannina, 45110, Greece</b></td><td>2</td></tr><tr><td><b>Jiangsu University of Science and Technology, Zhenjiang, China</b></td><td>2</td></tr><tr><td><b>University of Valladolid (Spain), Dep. Of Systems Engineering and Automatic Control, Industrial Engineering School</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Mangalore University, India</b></td><td>2</td></tr><tr><td><b>Department of Computer Education, Sungkyunkwan University, Seoul, Republic of Korea</b></td><td>2</td></tr><tr><td>Department of Computer Science, Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>2</td></tr><tr><td><b>Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing, China</b></td><td>2</td></tr><tr><td><b>University of Pittsburgh, Pittsburgh, PA, USA</b></td><td>2</td></tr><tr><td><b>Xidian University, Xi'an, China</b></td><td>2</td></tr><tr><td><b>School of Electronic and Information Engineering, South China University of Technology, Guangzhou, China</b></td><td>2</td></tr><tr><td>School of Computer Science and Technology, Tianjin University&Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China</td><td>2</td></tr><tr><td>NPU-VUB Joint AVSP Research Lab, School of Computer Science, Northwestern Polytechnical University (NPU) Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, Xi'an 710072, China</td><td>2</td></tr><tr><td><b>CAS Center for Excellence in Brain Science and Intelligence Technology, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>2</td></tr><tr><td><b>Australian National University</b></td><td>2</td></tr><tr><td><b>Institute of Computing, University of Campinas (UNICAMP), Campinas, SP, 13083-852, Brazil</b></td><td>2</td></tr><tr><td><b>Sichuan Univ., Chengdu</b></td><td>2</td></tr><tr><td><b>Laboratory for Intelligent and Safe Automobiles, University of California San Diego, La Jolla, CA 92093 USA</b></td><td>2</td></tr><tr><td><b>Department of Computing, Imperial College London, London, 180 Queen’s Gate, UK</b></td><td>2</td></tr><tr><td><b>Australian Center for Visual Technologies, and School of Computer Science, University of Adelaide, Canberra, Australia</b></td><td>2</td></tr><tr><td><b>Bilgisayar Mühendisligi Bölümü, İstanbul Teknik Üniversitesi</b></td><td>2</td></tr><tr><td>Research&Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai 201804, P.R China</td><td>2</td></tr><tr><td>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China</td><td>2</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>2</td></tr><tr><td><b>Departamento de Computação, Universidade Federal do Piauí, Teresina, Brasil</b></td><td>2</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, Marmara Üniversitesi, İstanbul, Türkiye</b></td><td>2</td></tr><tr><td><b>Le2i FRE2005, CNRS, Arts et M&#x00E9;tiers, Univ. Bourgogne Franche-Comt&#x00E9;, UTBM, F-90010 Belfort, France</b></td><td>2</td></tr><tr><td><b>Graduate School of Engineering, Osaka University, 2-1 Yamadaoka, Suita, Osaka, 565-0871 Japan</b></td><td>2</td></tr><tr><td><b>Department of Computing, Imperial College London, U.K.</b></td><td>2</td></tr><tr><td>Dept. of Computer Science and Information Engineering, Southern Taiwan University of Science and Technology, Tainan City, Taiwan</td><td>2</td></tr><tr><td><b>Corp. Res. & Dev., Toshiba Corp., Tokyo, Japan</b></td><td>2</td></tr><tr><td>Dept. of Electronics and Telecommunication Engg., KCT's Late G.N. Sapkal college of Engineering, Nashik, India</td><td>2</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, Gebze Teknik Üniversitesi, Kocaeli, 41400, Türkiye</b></td><td>2</td></tr><tr><td><b>State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, China</b></td><td>2</td></tr><tr><td>Tencent Inc</td><td>2</td></tr><tr><td><b>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing 100190</b></td><td>2</td></tr><tr><td><b>Faculty of Information Science and Technology (FIST), Multimedia University, Melaka, Malaysia</b></td><td>2</td></tr><tr><td><b>Fraunhofer IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany</b></td><td>2</td></tr><tr><td>Facebook Inc., Menlo Park, CA, USA</td><td>2</td></tr><tr><td><b>Naval Research Laboratory, Washington DC</b></td><td>2</td></tr><tr><td>Computational Intelligence Lab, Institute of Informatics and Telecommunications, NCSR Demokritos, Athens, Greece</td><td>2</td></tr><tr><td><b>Computational Biomedicine Lab, Department of Computer Science, University of Houston, Houston, TX and Computational Intelligence Lab, Institute of Informatics and Telecommunications, NCSR Demokrit ...</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Electronic Engineering, Imperial College London, United Kingdom</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, National Tsing Hua University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>SPAWAR Systems Center Pacific, San Diego, California, USA</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, National Taiwan University, Taiwan</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, Ferdowsi University of Mashhad, Mashhad, Iran</b></td><td>2</td></tr><tr><td><b>Artificial Vision Laboratory, Dept. of Mechanical Engineering, National Taiwan University of Science and Technology, Taipei City, Taiwan 106</b></td><td>2</td></tr><tr><td><b>Microsoft Corporation, Redmond, WA, USA</b></td><td>2</td></tr><tr><td>Dept. of Electrical Engineering, National Tsing-Hua University, Taiwan</td><td>2</td></tr><tr><td>Department Informatik, Hamburg University of Applied Sciences, Hamburg, Germany</td><td>2</td></tr><tr><td>Department Informatik, Hamburg University of Applied Sciences, Engineering and Computing, University of the West of Scotland</td><td>2</td></tr><tr><td><b>University of Siena, Siena, Italy</b></td><td>2</td></tr><tr><td>Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Y&#x0131;ld&#x0131;z Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye</td><td>2</td></tr><tr><td><b>Stony Brook University, Stony Brook, NY</b></td><td>2</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, Ultimo, NSW, Australia</b></td><td>2</td></tr><tr><td><b>Department of Electronics and Information Engineering, Huazhong University of Science and Technology, Wuhan, China</b></td><td>2</td></tr><tr><td><b>Department of Artificial Intelligence, Faculty of Computer Science & Information Technology, University of Malaya, Kuala Lumpur, 50603, Malaysia</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering of Systems, University of Zaragoza, Escuela Universitaria Politécnica de Teruel, Teruel, Spain</td><td>2</td></tr><tr><td><b>Department of DMC Engineering, Sungkyunkwan University, Suwon, South Korea</b></td><td>2</td></tr><tr><td>Department of Automation, North-China University of Technology, Beijing, China</td><td>2</td></tr><tr><td><b>University of Bern, Neubrückstrasse 10, Bern, Switzerland</b></td><td>2</td></tr><tr><td><b>Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Kowloon, Hong Kong</b></td><td>2</td></tr><tr><td><b>Computer Science, Fudan University, Shanghai, 201203, China</b></td><td>2</td></tr><tr><td><b>Electronic Engineering and Computer Science, Queen Mary University, London, United Kingdom</b></td><td>2</td></tr><tr><td><b>Department of Computer Science and Engineering, Pohang University of Science and Technology, Pohang, Korea</b></td><td>2</td></tr><tr><td><b>Swiss Federal, Institute of Technology, Lausanne (EPFL), Switzerland</b></td><td>2</td></tr><tr><td><b>Disney Research, CH</b></td><td>2</td></tr><tr><td>Faculty of Electrical Engineering and Computing, University of Zagreb, Zagreb, Croatia</td><td>2</td></tr><tr><td><b>Water Optics Technology Pte. Ltd, Singapore</b></td><td>2</td></tr><tr><td><b>School of Electrical &amp; Electronic Engineering, Nanyang Technological University, Singapore</b></td><td>2</td></tr><tr><td><b>Faculty of Computer Science and Information Technology, University of Malaya, Kuala Lumpur, Malaysia</b></td><td>2</td></tr><tr><td><b>National Laboratory of Pattern Recognition, CAS Center for Excellence in Brain Science and Intelligence Technology, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>2</td></tr><tr><td><b>Department of Information Science and Electronic Engineering, Zhejiang University, Hangzhou, China</b></td><td>2</td></tr><tr><td><b>Orange Labs International Center Beijing, Beijing, 100876, China</b></td><td>2</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, Beijing 100876, China</b></td><td>2</td></tr><tr><td>Xi'an Jiaotong-Liverpool University, Suzhou, Jiangsu, P.R. China</td><td>2</td></tr><tr><td>Indian Statistical Insitute, Kolkata 700108</td><td>2</td></tr><tr><td>Centre for Secure Information Technologies, Queen’s University Belfast, Belfast, UK</td><td>2</td></tr><tr><td><b>National University of Defense Technology, Hunan, China</b></td><td>2</td></tr><tr><td><b>Rutgers University, Piscataway, USA</b></td><td>2</td></tr><tr><td>Wrocław University of Science and Technology, Wrocław, Poland</td><td>2</td></tr><tr><td><b>Norwegian Biometrics Lab, NTNU, Gj⊘vik, Norway</b></td><td>2</td></tr><tr><td><b>The Edward S. Rogers Sr. Department of Electrical and Computer Engineering, University of Toronto, 10 King's College Road, Toronto, Canada</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Technology, University of Science and Technology of China</b></td><td>2</td></tr><tr><td><b>Zhejiang University, HangZhou, China</b></td><td>2</td></tr><tr><td>Department of Electrical Engineering Indian Institute of Technology Delhi New Delhi, India</td><td>2</td></tr><tr><td>Department of Electronics and Communication Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India</td><td>2</td></tr><tr><td>Department of Electrical Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India</td><td>2</td></tr><tr><td><b>Center for Automation Research, University of Maryland Institute for Advanced Computer Studies, University of Maryland, College Park, MD, USA</b></td><td>2</td></tr><tr><td><b>School of EECS, Queen Mary University of London, UK</b></td><td>2</td></tr><tr><td><b>College of Software, Shenyang Normal University, Shenyang, China</b></td><td>2</td></tr><tr><td><b>Zhejiang University of Technology, Hangzhou, China</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Technology, Nanjing Normal University, China</b></td><td>2</td></tr><tr><td><b>University of Technology Sydney, Ultimo, NSW, Australia</b></td><td>2</td></tr><tr><td><b>Center for Special Needs Education, Nara University of Education, Takabatake-cho, Nara-shi, Nara, Japan</b></td><td>2</td></tr><tr><td>Key Laboratory of Dependable Service Computing in Cyber Physical Society Ministry of Education, Chongqing, China</td><td>2</td></tr><tr><td><b>Sch. of Electr. & Electron. Eng., Nanyang Technol. Univ., Singapore, Singapore</b></td><td>2</td></tr><tr><td><b>Samovar CNRS UMR 5157, Télécom SudParis, Université Paris-Saclay, Evry, France</b></td><td>2</td></tr><tr><td><b>Beijing E-Hualu Info Technology Co., Ltd, Beijing, China</b></td><td>2</td></tr><tr><td><b>Machine Learning Center, Faculty of Mathematics and Computer Science, Hebei University, Baoding 071002, China</b></td><td>2</td></tr><tr><td><b>Applied Informatics, Faculty of Technology, Bielefeld University, Germany</b></td><td>2</td></tr><tr><td>Osaka University Health Care Center, Japan</td><td>2</td></tr><tr><td><b>Institut de Robòtica i Informàtica Industrial, CSIC-UPC, Barcelona, Spain</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Universitat Oberta de Catalunya, Barcelona, Spain</b></td><td>2</td></tr><tr><td><b>University of Groningen, Nijenborgh 9, 9747 AG, The Netherlands</b></td><td>2</td></tr><tr><td><b>University of Science and Technology of China, NO.443, Huangshan Road, Hefei, Anhui, China</b></td><td>2</td></tr><tr><td><b>Shenyang SIASUN Robot &amp; Automation Co., LTD., Shenyang, China</b></td><td>2</td></tr><tr><td><b>State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China</b></td><td>2</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Türkiye</b></td><td>2</td></tr><tr><td><b>Department of National Laboratory of Pattern Recognition, Chinese Academy of Sciences, Institute of Automation, Beijing, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Science Faculty of Science, Khon Kaen University, Khon Kaen, 40002, Thailand</b></td><td>2</td></tr><tr><td><b>Queen Mary University of London, London, United Kingdom</b></td><td>2</td></tr><tr><td><b>Academy of Broadcasting Science, Beijing, P.R. China</b></td><td>2</td></tr><tr><td><b>Engineering Lab on Intelligent Perception for Internet of Things (ELIP), Shenzhen Graduate School, Peking University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Key Laboratory of Machine Perception, Ministry of Eduction, Peking University, Beijing, China</b></td><td>2</td></tr><tr><td><b>College of Computer Science, Zhejiang University, Hangzhou, Zhejiang, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Engineering, Istanbul Technical University, Istanbul, Turkey</b></td><td>2</td></tr><tr><td><b>Department of Information Engineering, The Chinese University of Hong Kong</b></td><td>2</td></tr><tr><td><b>School of Computing, Teesside University, Middlesbrough, UK</b></td><td>2</td></tr><tr><td><b>Department of Computer Science and Digital Technologies, Faculty of Engineering and Environment, Northumbria University, Newcastle, UK, NE1 8ST</b></td><td>2</td></tr><tr><td><b>Faculty of Telecommunications, Technical University of Sofia, Bulgaria</b></td><td>2</td></tr><tr><td><b>Key Lab of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China</b></td><td>2</td></tr><tr><td><b>Computer Science, University of Haifa, Carmel, 31905, Israel</b></td><td>2</td></tr><tr><td><b>Fernuniversitt in Hagen FUH Hagen, Germany</b></td><td>2</td></tr><tr><td><b>Research institute for Telecommunication and Cooperation, FTK, Dortmund, Germany</b></td><td>2</td></tr><tr><td><b>Core Technology Center, OMRON Corporation, Kyoto, Japan</b></td><td>2</td></tr><tr><td><b>College of Computer Science and Technology, Chongqing University of Posts and Telecommunications, Chongqing 404100, China</b></td><td>2</td></tr><tr><td><b>College of Software Engineering, Chongqing University of Posts and Telecommunications, Chongqing 404100, China</b></td><td>2</td></tr><tr><td><b>USP - University of São Paulo / ICMC, SSC - LRM (Mobile Robots Lab.), São Carlos, 13566-590, Brazil</b></td><td>2</td></tr><tr><td><b>Department of Automation, Tsinghua National Laboratory for Information Science and Technology (TNList), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Department of Electric and Electronics, Selçuk University, Konya, Turkey</b></td><td>2</td></tr><tr><td><b>Research Center of Intelligent Robotics, Department of Automation, Shanghai Jiao Tong University, 200240, China</b></td><td>2</td></tr><tr><td><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, KAIST, Deajeon, Daejeon, Republic of Korea</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, Tafresh University, Tafresh, Iran</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Electronic Engineering, Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh</b></td><td>2</td></tr><tr><td><b>Department of Mechanical Engineering, Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh</b></td><td>2</td></tr><tr><td><b>University of Ottawa, Ottawa, ON, Canada</b></td><td>2</td></tr><tr><td><b>Kochi University of Technology, Kochi, 782-8502, Japan</b></td><td>2</td></tr><tr><td><b>Hefei University of Technology, School of Computer and Information, Hefei, Anhui, 230601, China</b></td><td>2</td></tr><tr><td><b>Karlsruhe Institute of Technology, Institute for Anthropomatics, Karlsruhe, Germany</b></td><td>2</td></tr><tr><td><b>Chinese Academy of Sciences, Shenzhen, China</b></td><td>2</td></tr><tr><td><b>Pattern Recognition and Intelligent System Lab., Beijing University of Posts and Telecommunications, China</b></td><td>2</td></tr><tr><td><b>NCCU, USA</b></td><td>2</td></tr><tr><td><b>WVU, USA</b></td><td>2</td></tr><tr><td><b>University of Nottingham Malaysia Campus, Selangor Darul Ehsan, Malaysia</b></td><td>2</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, the Faculty of Engineering and Information Technology, University of Technology, Sydney, Ultimo, Australia</b></td><td>2</td></tr><tr><td>Shahid Bahonar University of Kerman Computer Engineering Department, Kerman, Iran</td><td>2</td></tr><tr><td><b>Department of Computer and Information Sciences, University of Delaware, Newark, DE, USA</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, University of Hawaii, Manoa, Honolulu, HI, 96822</b></td><td>2</td></tr><tr><td><b>Samsung Electronics, SAIT Suwon-si, Korea</b></td><td>2</td></tr><tr><td><b>Department of Automation, University of Science and Technology of China</b></td><td>2</td></tr><tr><td><b>Centre for Intelligent Sensing, Queen Mary University of London, London, U.K.</b></td><td>2</td></tr><tr><td><b>CETUC, Pontifical Catholic University of Rio de Janeiro, Brazil</b></td><td>2</td></tr><tr><td><b>&#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye</b></td><td>2</td></tr><tr><td><b>School of Electronic Engineering, Xidian University, Xi’an, China</b></td><td>2</td></tr><tr><td><b>Islamic Azad University, South Tehran Branch, Electrical Engineering Department, Iran</b></td><td>2</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT) & Università di Torino, Genova, Italy</td><td>2</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT) & Università degli Studi di Genova, Genova, Italy</td><td>2</td></tr><tr><td><b>Shenzhen Graduate School, Harbin Institute of Technology, China</b></td><td>2</td></tr><tr><td><b>Human Language Technology and Pattern Recognition Group, RWTH Aachen University</b></td><td>2</td></tr><tr><td><b>Rensselaer Polytechnic Institute, USA</b></td><td>2</td></tr><tr><td><b>Electrical Engineering Department, Amirkabir University of Technology, Tehran, Iran</b></td><td>2</td></tr><tr><td><b>Vision Lab, School of Engineering and Physical Sciences, Heriot-Watt University, Edinburgh, United Kingdom</b></td><td>2</td></tr><tr><td><b>Center for Machine Vision Research, Computer Science and Engineering, University of Oulu, Oulu, Finland</b></td><td>2</td></tr><tr><td><b>University of Southern California, Los Angeles, USA</b></td><td>2</td></tr><tr><td><b>University of Amsterdam, The Netherlands</b></td><td>2</td></tr><tr><td><b>Academia Sinica, Institute of Information Science, Taipei, Taiwan</b></td><td>2</td></tr><tr><td><b>Centre for Communication Systems Research, University of Surrey, Guildford, Surrey, United Kingdom</b></td><td>2</td></tr><tr><td>Norwegian Biometric Laboratory, Norwegian University of Science and Technology (NTNU), Gjøvik, Norway</td><td>2</td></tr><tr><td><b>School of Computer Engineering and Science, Shanghai University</b></td><td>2</td></tr><tr><td><b>Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong, China</b></td><td>2</td></tr><tr><td><b>Corp. Res. & Dev. Center, Toshiba Corp., Kawasaki, Japan</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Technology, Tianjin University, 300072, China</b></td><td>2</td></tr><tr><td><b>Department of Information & Communication Technologies, Universitat Pompeu Fabra, Barcelona, Spain</b></td><td>2</td></tr><tr><td><b>Computer Engineering, Rochester Institute of Technology, USA</b></td><td>2</td></tr><tr><td><b>University of Notre Dame, Notre Dame, Indiana</b></td><td>2</td></tr><tr><td>Department of Computer Applications, National Institute of Technology, Tiruchirappalli, India</td><td>2</td></tr><tr><td>B. Tech Graduate, ECE, MSIT, C-4 Janakpuri, New Delhi, India</td><td>2</td></tr><tr><td><b>Department of Electrical, Computer and IT Engineering, Qazvin Branch, Islamic Azad University, Qazvin, Iran</b></td><td>2</td></tr><tr><td><b>Computer Vision Institute, School of Computer Science and Software Engineering, and the Shenzhen Key Laboratory of Spatial Information Smart Sensing and Services, Shenzhen University, Shenzhen, China</b></td><td>2</td></tr><tr><td><b>University of Tokyo, Tokyo, Japan</b></td><td>2</td></tr><tr><td><b>RSISE, Australian National University, Australia</b></td><td>2</td></tr><tr><td>San Diego State University, San Diego, CA, USA</td><td>2</td></tr><tr><td><b>University of Memphis, Memphis, TN</b></td><td>2</td></tr><tr><td><b>HumanRobot Interaction Research Center, Department of Mechanical Engineering, Korea Advanced Institute of Science and Technology, Republic of Korea seojh</b></td><td>2</td></tr><tr><td><b>Panasonic Singapore Laboratories Pte Ltd (PSL), Tai Seng Industrial Estate 534415, Singapore</b></td><td>2</td></tr><tr><td><b>University of Texas at Arlington, Arlington, USA</b></td><td>2</td></tr><tr><td><b>Massachusetts General Hospital, Boston, MA, USA</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Virginia Commonwealth University, Richmond, VA, USA</b></td><td>2</td></tr><tr><td><b>Dept. of Electrical and Electronics Engineering, Bahcesehir University, Istanbul, Turkey</b></td><td>2</td></tr><tr><td>MIT, Cambridge, MA, USA</td><td>2</td></tr><tr><td><b>Department of Computer Science, University of York, York, UK</b></td><td>2</td></tr><tr><td><b>Imaging Software Technol. Center, Fuji Photo Film Co. Ltd., Japan</b></td><td>2</td></tr><tr><td><b>Dept. of ECE & Digital Technology Center, University of Minnesota, USA</b></td><td>2</td></tr><tr><td><b>Shenzhen University, Shenzhen China</b></td><td>2</td></tr><tr><td><b>National Lab of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, 95 Zhongguancun East Road, 100190, Beijing, China</b></td><td>2</td></tr><tr><td><b>Islamic University of Technology, Bangladesh</b></td><td>2</td></tr><tr><td><b>Institute of Computer and Communication Engineering, Department of Electrical Engineering, National Cheng Kung University, Tainan, 70101, Taiwan</b></td><td>2</td></tr><tr><td><b>Technion</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Okayama University, Okayama, Japan</b></td><td>2</td></tr><tr><td><b>Cyprus University of Technology</b></td><td>2</td></tr><tr><td>Dept of Electronics and Communication, Manipal Institute Of Technology, Karnataka, India</td><td>2</td></tr><tr><td><b>University of Technology, Sydney, Sydney, Australia</b></td><td>2</td></tr><tr><td>LMU Munich, Germany</td><td>2</td></tr><tr><td>Polytechnic School of Pernambuco, University of Pernambuco, Recife-PE, Brazil</td><td>2</td></tr><tr><td><b>Dept. of Electrical Engineering, National Taiwan University, Taiwan</b></td><td>2</td></tr><tr><td><b>Research Center for Information Technology Innovation, Academia Sinica, Taiwan</b></td><td>2</td></tr><tr><td><b>University of Illinois at Urbana-Champaign, 201 N Goodwin, 61820, USA</b></td><td>2</td></tr><tr><td><b>Research School of Engineering, The Australian National University, Canberra, ACT, Australia</b></td><td>2</td></tr><tr><td><b>CyLab Biometrics Center and the Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA</b></td><td>2</td></tr><tr><td><b>Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, Enschede, The Netherlands</b></td><td>2</td></tr><tr><td><b>Department of Computing, Imperial College London, London, UK</b></td><td>2</td></tr><tr><td><b>Pittsburgh Univ., PA, USA</b></td><td>2</td></tr><tr><td><b>Computer Vision and Remote Sensing, Berlin University of Technology, Sekr. FR 3-1, Franklinstr. 28/29, 10587, Germany</b></td><td>2</td></tr><tr><td><b>Department of Information Engineering, the Chinese University of Hong Kong, Shatin</b></td><td>2</td></tr><tr><td>Başkent University, Ankara, TURKEY</td><td>2</td></tr><tr><td><b>Department of Signal Processing, Tampere University of Technology, Finland</b></td><td>2</td></tr><tr><td><b>Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Ankara Y&#x0131;ld&#x0131;r&#x0131;m Beyaz&#x0131;t &#x00DC;niversitesi, Ankara, T&#x00FC;rkiye</b></td><td>2</td></tr><tr><td><b>Department of Computer and Information Science, University of Macau, Taipa, Macau</b></td><td>2</td></tr><tr><td>Department of Electronic and Communication Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia</td><td>2</td></tr><tr><td><b>Senior Member, IEEE, Pohang University of Science and Technology (POSTECH), Pohang, Gyeongbuk, 790-784, South Korea. phone: 82-54-279-2880, 2214; fax: 82-54-279-5594; e-mail: dreaming@postech.ac.kr, syoh@postech.ac.kr</b></td><td>2</td></tr><tr><td><b>Pohang University of Science and Technology (POSTECH), Pohang, Gyeongbuk, 790-784, South Korea. phone: 82-54-279-2880, 2214; fax: 82-54-279-5594; e-mail: dreaming@postech.ac.kr</b></td><td>2</td></tr><tr><td><b>Center of Machine Vision Research, Department of Computer Science and Engineering, University of Oulu, Oulu, Finland</b></td><td>2</td></tr><tr><td><b>Key Laboratory of Child Development and Learning Science (Ministry of Education), Research Center for Learning Science, Southeast University, Nanjing, China</b></td><td>2</td></tr><tr><td><b>School of Information Science and Technology, Sun Yat-sen University, Guangzhou, China</b></td><td>2</td></tr><tr><td><b>Dirección General de la Guardia Civil - DGGC Madrid, Spain</b></td><td>2</td></tr><tr><td><b>School of Information Science and Technology, Huaqiao University, Xiamen, China</b></td><td>2</td></tr><tr><td><b>Computer Laboratory, University of Cambridge, UK</b></td><td>2</td></tr><tr><td><b>School of Automation, Southeast University, Nanjing, China</b></td><td>2</td></tr><tr><td><b>Rutgers University, Piscataway</b></td><td>2</td></tr><tr><td><b>University of Hong Kong, China</b></td><td>2</td></tr><tr><td><b>Department of Automation, State Key Laboratory of Intelligent Technologies and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China</b></td><td>2</td></tr><tr><td><b>School of Electronics and Information Technology, Sun Yat-Sen University</b></td><td>2</td></tr><tr><td><b>Charles Sturt University, Wagga Wagga NSW, Australia</b></td><td>2</td></tr><tr><td>Sunway University, Selangor, Malaysia</td><td>2</td></tr><tr><td><b>Hexi University, Center for Information Technology, Zhangye, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Engineering, Rochester Institute of Technology, Rochester, NY, USA</b></td><td>2</td></tr><tr><td><b>School of Communication and Information Engineering, Shanghai University, Shanghai, China</b></td><td>2</td></tr><tr><td><b>Department of Communications and Computer Engineering, University of Malta, Msida, Malta</b></td><td>2</td></tr><tr><td><b>Multimedia Communications Dept., EURECOM, Sophia Antipolis, France</b></td><td>2</td></tr><tr><td>Northwestern Polytechnical University Xian, P. R. China</td><td>2</td></tr><tr><td><b>Northwestern Polytechnical University, Xian, P. R. China, and UNC-Charlotte, Charlotte, NC</b></td><td>2</td></tr><tr><td><b>Michigan State University, East Lansing, U.S.A.</b></td><td>2</td></tr><tr><td>Dept. of E & TC Engineering, Maharashtra Institute of Technology, Pune, India</td><td>2</td></tr><tr><td><b>Commonwealth Scientific and Industrial Research Organisation, Clayton South, Vic. , Australia</b></td><td>2</td></tr><tr><td><b>Speech, Audio, Image and Video Technology Laboratory, Queensland University of Technology, Brisbane, Australia</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Technology and the Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, Tianjin, China</b></td><td>2</td></tr><tr><td><b>School of Electrical Engineering and Computer Science, Queen Mary University of London, London, U.K.</b></td><td>2</td></tr><tr><td><b>Institute of Communications Engineering, National Tsing Hua University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td>Dept. of ECE and Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td><b>Department of Computer Science, National Tsing Hua University, Taiwan, R.O.C</b></td><td>2</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Hong Kong</td><td>2</td></tr><tr><td>School of Electrical and Computer Engineering, Ulsan National Institute of Science and Technology (UNIST), UNIST-gil 50, 689-798, Korea</td><td>2</td></tr><tr><td>Dept. of Comp. Sci. and Inf. Eng, Chung Hua University, Hsinchu, Taiwan</td><td>2</td></tr><tr><td><b>Dept. of Comp. Sci, National Chiao Tung University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>Department of Computing, Curtin University, Perth, Australia</b></td><td>2</td></tr><tr><td><b>HEUDIASYC Mixed Res. Unit, Compiegne Univ. of Technol., France</b></td><td>2</td></tr><tr><td><b>Universit&#x00E0; di Salerno v. Ponte don Melillo, 84084, Fisciano (IT)</b></td><td>2</td></tr><tr><td>Shanghai Jiao Tong University & Alibaba Group, Shanghai, China</td><td>2</td></tr><tr><td><b>National Taiwan University of Science and Technology, No.43, Keelung Rd., Sec.4, Da'an Dist., Taipei City 10607, Taiwan</b></td><td>2</td></tr><tr><td>School of Computer Science, Kyungpook National University, Buk-gu, Daegu, The Republic of Korea</td><td>2</td></tr><tr><td><b>Department of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China</b></td><td>2</td></tr><tr><td>Laboratory LAROSERI, Department of Computer Science, Faculty of Sciences, University of Chouaib Doukkali, El Jadida - Morocco</td><td>2</td></tr><tr><td><b>Department of Electrical Engineering, Shanghai Jiao Tong University, Shanghai, China</b></td><td>2</td></tr><tr><td><b>Computer Vision, Video and Image Processing (CvviP) Research Lab, Faculty of Electrical Engineering, Universiti Teknologi Malaysia, 81310 UTM Skudai, Johor, Malaysia</b></td><td>2</td></tr><tr><td><b>Microsoft Research Asia, China</b></td><td>2</td></tr><tr><td><b>Centre for Quantum Computation & Intelligent Systems and the Faculty of Engineering & Information Technology, University of Technology, Sydney, Australia</b></td><td>2</td></tr><tr><td>Microsoft Research India Pvt. Ltd, Bangalore, Karnataka, India</td><td>2</td></tr><tr><td><b>Indiana University Bloomington, Bloomington, IN, USA</b></td><td>2</td></tr><tr><td>Department of Electronics, University of Goa, India</td><td>2</td></tr><tr><td><b>Department of Computer Science, School of Science and Technology, Meiji University, 1-1-1 Higashimita, Tama-ku, Kawasaki, Kanagawa</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, Graduate School of Science and Technology, Meiji University, 1-1-1 Higashimita, Tama-ku, Kawasaki, Kanagawa</b></td><td>2</td></tr><tr><td><b>Multimedia Processing Laboratory, Department of Computer Science, National Tsing Hua University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b></td><td>2</td></tr><tr><td><b>Electric Power Research Institute, State Grid Shanghai Electric Power Company Shanghai, 200093, China</b></td><td>2</td></tr><tr><td><b>South East European University, Tetovo, Macedonia</b></td><td>2</td></tr><tr><td><b>Computer Science and Engineering, Arizona State University, Tempe, AZ</b></td><td>2</td></tr><tr><td><b>Villanova University, Villanova, PA, USA</b></td><td>2</td></tr><tr><td><b>University of Technology Sydney, Sydney, Australia</b></td><td>2</td></tr><tr><td><b>School of EE, Xidian University, Xi'an 710071, China</b></td><td>2</td></tr><tr><td>Department of ECE, National Institute of Technology, Rourkela (Odisha), India</td><td>2</td></tr><tr><td><b>Korea Electronics Technology Institute</b></td><td>2</td></tr><tr><td><b>Computer Science and Engineering Dept., University of North Texas, Denton, TX, USA</b></td><td>2</td></tr><tr><td>Beijing Key Laboratory of Advanced Information Science and Network Technology, Beijing, China</td><td>2</td></tr><tr><td><b>Institute of Information Science, Beijing Jiaotong University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Machine Learning Department, Carnegie Mellon University, Pittsburgh, PA</b></td><td>2</td></tr><tr><td><b>Computer Science and Engineering Michigan State University, East Lansing, USA</b></td><td>2</td></tr><tr><td><b>College of Information Science and Technology, Beijing Normal University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Organization of Advanced Science and Technology, Kobe University, Japan</b></td><td>2</td></tr><tr><td><b>Center for Research in Computer Vision, University of Central Florida, Orlando, USA</b></td><td>2</td></tr><tr><td><b>IBM T.J. Watson Research Center, Yorktown Heights, NY 10598, USA</b></td><td>2</td></tr><tr><td><b>International Institute of Information Technology, Hyderabad, India</b></td><td>2</td></tr><tr><td><b>University of Illinois’ Advanced Digital Sciences Center, Singapore</b></td><td>2</td></tr><tr><td><b>Institute for Advanced Computer Studies, University of Maryland, College Park, Maryland 20740 United States</b></td><td>2</td></tr><tr><td>B-DAT Laboratory, School of Information and Control, Nanjing University of Information and Technology, Nanjing, China</td><td>2</td></tr><tr><td><b>University of Cambridge, Cambridge, United Kingdom</b></td><td>2</td></tr><tr><td><b>Intelligent Data Center (IDC) and Department of Mathematics, Sun Yat-Sen University, Guangzhou, China</b></td><td>2</td></tr><tr><td><b>Jaypee Institute of Information Technology</b></td><td>2</td></tr><tr><td><b>Samsung Advanced Institute of Technology (SAIT), Republic of Korea</b></td><td>2</td></tr><tr><td><b>Department of Computer Science and Technology, Tsinghua University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Institute of Computing Technology, CAS, Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Beijing, China</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Engineering, University of Aizu, Tsuruga, Ikkimachi, Aizuwakamatsu, Japan</b></td><td>2</td></tr><tr><td><b>Comnuter Science Department, Hong Kong Baptist University</b></td><td>2</td></tr><tr><td><b>Department of Electrical Engineering and Computer Science, Northwestern University, Evanston, IL, USA</b></td><td>2</td></tr><tr><td><b>Institute of Computing, University of Campinas, Campinas, SP, 13083-852, Brazil</b></td><td>2</td></tr><tr><td><b>Robotics Lab, Futurewei Technologies Inc., Santa Clara, USA</b></td><td>2</td></tr><tr><td><b>Institute of Automatic Control Engineering (LSR), TU München, Germany</b></td><td>2</td></tr><tr><td><b>Image Understanding and Knowledge-Based Systems, TU München, Germany</b></td><td>2</td></tr><tr><td><b>University of Delaware, Newark, DE</b></td><td>2</td></tr><tr><td><b>HRL Laboratories, LLC, Information Systems and Sciences Lab, Malibu, CA 90265 USA</b></td><td>2</td></tr><tr><td><b>Division of Computing Systems, School of Computer Engineering, Nanyang Technological University, Singapore, Singapore</b></td><td>2</td></tr><tr><td><b>School of Computer Science, Communication University of China, Beijing, China</b></td><td>2</td></tr><tr><td><b>Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY, USA</b></td><td>2</td></tr><tr><td><b>Computer Science and Technology, University of Science and Technology of China, Hefei, Anhui, China</b></td><td>2</td></tr><tr><td>Thales Services, ThereSIS, Palaiseau, France</td><td>2</td></tr><tr><td>School of Electrical and Electronic Engineering, Tianjin University of Technology, China</td><td>2</td></tr><tr><td><b>Faculty of Computers and Information, Cairo University, Egypt</b></td><td>2</td></tr><tr><td><b>Dept. of Electrical and Computer Engineering, National University of Singapore</b></td><td>2</td></tr><tr><td><b>Department of Computing, the Hong Kong Polytechnic University, Hong Kong</b></td><td>2</td></tr><tr><td><b>Institute of Computing, University of Campinas, Campinas, SP, Brazil, 13083-852</b></td><td>2</td></tr><tr><td><b>Tsinghua National Laboratory for Information Science and Technology, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China</b></td><td>2</td></tr><tr><td><b>CyLab Biometrics Center, Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA 15213, USA</b></td><td>2</td></tr><tr><td><b>La Trobe University, Australia</b></td><td>2</td></tr><tr><td><b>State key Laboratory of High Performance Computing, National University of Defense Technology, Changsha, Hunan, China, 410073</b></td><td>2</td></tr><tr><td><b>University of South Carolina, Columbia, SC, USA</b></td><td>2</td></tr><tr><td><b>Science and Engineering Faculty, Queensland University of Technology, Australia</b></td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, Mepco Schlenk Engineering College, Sivakasi, India</td><td>2</td></tr><tr><td><b>Department of Computer Technology, Shanghai Jiao Tong University, Shanghai, China</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Software Engineering, The University of Western Australia, Nedlands, WA, Australia</b></td><td>2</td></tr><tr><td><b>National Tsing Hua University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>Rutgers, The State University of New Jersey</b></td><td>2</td></tr><tr><td><b>Dhirubhai Ambani Institute of Information and Communication Technology, India</b></td><td>2</td></tr><tr><td><b>Aix Marseille Univ LIF/CNRS, France</b></td><td>2</td></tr><tr><td><b>Swiss Federal Institute of Technology Lausanne (EPFL), Switzerland</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, San Jose State University, San Jose, CA</b></td><td>2</td></tr><tr><td>IIIT Bangalore, India</td><td>2</td></tr><tr><td>Institut de Rob&#x00F2;tica i Inform&#x00E0;tica Industrial (CSIC-UPC)</td><td>2</td></tr><tr><td><b>TeV, Fondazione Bruno Kessler, Trento, Italy</b></td><td>2</td></tr><tr><td>Department of Computer Science, IT: Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>2</td></tr><tr><td>Xinjiang University, Urumqi, China</td><td>2</td></tr><tr><td><b>Jiangsu Key Laboratory of Big Data Analysis Technology, Nanjing University of Information Science and Technology, Nanjing, China</b></td><td>2</td></tr><tr><td><b>School of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing, People’s Republic of China</b></td><td>2</td></tr><tr><td><b>Institute of Computer and Communication Engineering, Department of Electrical Engineering, National Cheng Kung University, 1 University Road, Tainan, Taiwan</b></td><td>2</td></tr><tr><td><b>New Jersey Institute of Technology, Department of Electrical &amp; Computer Engineering, University Heights Newark, NJ 07102 USA</b></td><td>2</td></tr><tr><td><b>Korea Advanced Institute of Science and Technology</b></td><td>2</td></tr><tr><td><b>College of Communication Engineering, Chongqing University, Chongqing, China</b></td><td>2</td></tr><tr><td><b>Department of Forestry and Management of the Environment, Democritus University of Thrace, Orestiada, Greece</b></td><td>2</td></tr><tr><td>School of Computing Science and Engineering, VIT University, Vellore, India</td><td>2</td></tr><tr><td><b>School of Information Science, Japan Advanced Institute of Science and Technology, Asahidai 1-1, Nomi-shi, Ishikawa, Japan, 923-1211</b></td><td>2</td></tr><tr><td><b>Chinese Academy of Sciences, Beijing</b></td><td>2</td></tr><tr><td><b>Tsinghua University, Beijing</b></td><td>2</td></tr><tr><td><b>Electrical and Control Engineering, National Chiao Tung University, Hsinchu, Taiwan</b></td><td>2</td></tr><tr><td><b>Artificial Intelligence Laboratory, University of Tsukuba, Japan</b></td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Carnegie Mellon Univ., Pittsburgh, PA, USA</td><td>2</td></tr><tr><td><b>Brno University of Technology, Brno-střed, Czech Republic</b></td><td>2</td></tr><tr><td><b>Deutsche Welle, Bonn, Germany</b></td><td>2</td></tr><tr><td>GSI Universidad Polit-écnica de Madrid, Madrid, Spain</td><td>2</td></tr><tr><td><b>University of Waterloo, Canada</b></td><td>2</td></tr><tr><td><b>The University of Tokyo, Tokyo, Japan</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, University of Calgary, Calgary, Alberta, Canada</b></td><td>2</td></tr><tr><td><b>National Institute of Standards and Technology (NIST), Gaithersburg, MD</b></td><td>2</td></tr><tr><td><b>Räven AB, SE-411 14 Göteborg, Sweden</b></td><td>2</td></tr><tr><td><b>School of Mathematics and Statistics, Xi'an Jiaotong University, Xi'an, 710049, China</b></td><td>2</td></tr><tr><td><b>University of Illinois at Urbana-Champaign, Urbana, USA</b></td><td>2</td></tr><tr><td><b>School of Engineering and Applied Sciences, Harvard University, Cambridge, MA 02138</b></td><td>2</td></tr><tr><td><b>The Rowland Insitute at Harvard, Harvard University, Cambridge, MA 02142</b></td><td>2</td></tr><tr><td><b>The Open University of Israel, Israel</b></td><td>2</td></tr><tr><td><b>Halmstad University, Halmstad, Sweden</b></td><td>2</td></tr><tr><td>Department of Electronic Engineering, Kwangwoon University, Seoul, Republic of Korea</td><td>2</td></tr><tr><td><b>Division of Information and Computer Engineering, Ajou University, Suwon, Republic of Korea</b></td><td>2</td></tr><tr><td><b>Department of Computer Engineering, Kyung Hee University, Suwon, Republic of Korea</b></td><td>2</td></tr><tr><td><b>School of Computer Science, Carnegie Mellon University, Pittsburgh, USA</b></td><td>2</td></tr><tr><td>Dept. of Appl. Phys. & Electron., Umea Univ., Sweden</td><td>2</td></tr><tr><td>Universidade Federal do Paraná, Curitiba, Brazil</td><td>2</td></tr><tr><td><b>Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Hong Kong, SAR</b></td><td>2</td></tr><tr><td><b>Department of Radiology, University of Pennsylvania, Philadelphia, PA</b></td><td>2</td></tr><tr><td><b>Institute of VLSI Design, Zhejiang University, Hangzhou, China</b></td><td>2</td></tr><tr><td><b>Faculty of Engineering Technology, Hasselt University, Diepenbeek, Belgium</b></td><td>2</td></tr><tr><td><b>DUT-RU International School of Information and Software Engineering, Dalian University of Technology, Dalian, China</b></td><td>2</td></tr><tr><td><b>University of Barcelona, Barcelona, Spain</b></td><td>2</td></tr><tr><td>Università degli Studi di Verona, Verona, Italy</td><td>2</td></tr><tr><td>CEA, Gif-Sur-Yvette, France</td><td>2</td></tr><tr><td>UMR CNRS - Univ. Bourgogne, Dijon, France</td><td>2</td></tr><tr><td><b>Universita degli Studi di Palermo, Dipartimento di Ingegegneria Informatica, Viale delle Scienze, 90128, ITALY</b></td><td>2</td></tr><tr><td><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, USA</b></td><td>2</td></tr><tr><td>Mechatronic Engineering Department, Mevlana University, Konya, Turkey</td><td>2</td></tr><tr><td><b>Tokyo Metropolitan University, Hino, Tokyo 191-0065, Japan</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer, Engineering, University of Denver, Denver, CO 80208</b></td><td>2</td></tr><tr><td>TÜBİITAK-BİILGEM-UEKAE, Anibal Cad., P.K.74, 41470, Gebze-KOCAELİ, Turkey</td><td>2</td></tr><tr><td><b>State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China</b></td><td>2</td></tr><tr><td><b>School of Electronics Engineering and Computer Science, Peking University, Beijing 100871, China</b></td><td>2</td></tr><tr><td><b>Department of Electronic Engineering, Tsinghua University, Beijing 100084, P.R. China</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of British Columbia, Vancouver, Canada</b></td><td>2</td></tr><tr><td>The 28th Research Institute of China Electronics Technology Group Corporation, China</td><td>2</td></tr><tr><td><b>Indian Statistical Institute, 203, B. T. Road, Kolkata 700108, India</b></td><td>2</td></tr><tr><td><b>Institute of Neural Information Processing, Ulm University, Ulm, Germany</b></td><td>2</td></tr><tr><td><b>Institute of VLSI Design, Zhejiang University</b></td><td>2</td></tr><tr><td><b>Faculty of Engineering Technology, University Hasselt</b></td><td>2</td></tr><tr><td><b>Massachusetts Institute of Technology, Cambridge, MA</b></td><td>2</td></tr><tr><td><b>Institute of Information Science, Academia Sinica, Taipei, Taiwan Roc</b></td><td>2</td></tr><tr><td><b>Institute of Information Science, Beijing Jiaotong University, 100044, China</b></td><td>2</td></tr><tr><td><b>Department of Computer and Information Sciences, Temple University</b></td><td>2</td></tr><tr><td><b>Department of Computing Sciences, Elon University</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Maryland, College Park, MD</b></td><td>2</td></tr><tr><td>Raytheon BBN Technologies, 10 Moulton St, Cambridge, MA</td><td>2</td></tr><tr><td><b>General Electric Global Research, 1 Research Circle, Niskayuna, NY</b></td><td>2</td></tr><tr><td><b>Concordia University, Montreal, QC, Canada</b></td><td>2</td></tr><tr><td><b>Charles Perkin Centre, Faculty of Medicine, University of Sydney, Australia</b></td><td>2</td></tr><tr><td><b>Charles Perkin Centre, Faculty of Engineering, University of Sydney, Australia</b></td><td>2</td></tr><tr><td><b>Department of Computing, The Hong Kong Polytechnic University, Hong Kong, China</b></td><td>2</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Nazarbayev University, Astana, Kazakhstan</b></td><td>2</td></tr><tr><td><b>Department of Information and Communication Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan</b></td><td>2</td></tr><tr><td><b>Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA</b></td><td>2</td></tr><tr><td><b>Tsinghua National Lab for Info. Sci. &amp; Tech., Depart. of Computer Sci. &amp; Tech., Tsinghua University, Beijing, China</b></td><td>2</td></tr><tr><td><b>Harbin Institute of Technology</b></td><td>2</td></tr><tr><td><b>National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>2</td></tr><tr><td><b>School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing 100876, China</b></td><td>2</td></tr><tr><td>School of Electrical Engineering and Computer Science at the University of Newcastle, Callaghan, NSW 2308, Australia</td><td>2</td></tr><tr><td><b>School of Electrical Engineering and Computing, University of Newcastle, Newcastle, Australia</b></td><td>2</td></tr><tr><td><b>School of Computer Science, University of Windsor, Windsor, Canada</b></td><td>2</td></tr><tr><td><b>Department of Computer Science and Engineering, Dankook University, Yongin, South Korea</b></td><td>2</td></tr><tr><td>Electronics and Communication Sciences Unit, Indian Statistical Institute, Kolkata, India</td><td>2</td></tr><tr><td><b>KTH Royal Institute of Technology, Stockholm, Sweden</b></td><td>2</td></tr><tr><td><b>Division of Graduate Studies of Tijuana Institute Technology, Mexico</b></td><td>2</td></tr><tr><td>Pontifícia Universidade Católica do RS, Porto Alegre-RS, Brazil</td><td>2</td></tr><tr><td><b>Department of Psychology and the Center for Brain Science, Harvard University, Cambridge</b></td><td>2</td></tr><tr><td><b>School of Engineering and Applied Sciences, Department of Molecular and Cellular Biology, and the Center for Brain Science, Harvard University, Cambridge</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, The University of Texas at San Antonio, San Antonio, TX, USA</b></td><td>2</td></tr><tr><td><b>Sch. of Infor. Sci. and Tech., Huizhou Unversity, Huizhou, China</b></td><td>2</td></tr><tr><td><b>Institute of Advanced Manufacturing Technology, Ningbo Institute of Industrial Technology, Chinese Academy of Sciences, Ningbo, China</b></td><td>2</td></tr><tr><td><b>School of Mechatronic Engineering and Automation, Shanghai University, Shanghai, China</b></td><td>2</td></tr><tr><td>Waseda University The Graduate School of Information, Production and Systems 2-7, Hibikino, Wakamatsu-ku, Kitakyushu-shi, Fukuoka, Japan</td><td>2</td></tr><tr><td><b>London, United Kingdom</b></td><td>2</td></tr><tr><td><b>Shenzhen VisuCA Key Lab / SIAT, Chinese Academy of Sciences, China</b></td><td>2</td></tr><tr><td><b>Department of Mathematics, Center for Computer Vision, Sun Yat-Sen University, Guangzhou, China</b></td><td>2</td></tr><tr><td><b>Department of Computer Science and Technology, Huaqiao University, Xiamen, China</b></td><td>2</td></tr><tr><td><b>Xiamen University, Fujian, China</b></td><td>2</td></tr><tr><td>Majority Report, France</td><td>2</td></tr><tr><td><b>Imaging Science and Engineering Laboratory Tokyo Institute of Technology Yokohama 226-8503, Japan</b></td><td>2</td></tr><tr><td>SITI Laboratory, National Engineering School of Tunis (ENIT), University of Tunis El Manar, Tunis, Tunisia</td><td>2</td></tr><tr><td>University of Montreal, Department of Computer Science and Operations Research (DIRO), 2920 Chemin de la tour, QC, Canada, H3C 3J7</td><td>2</td></tr><tr><td><b>College of Computer Science and Technology, Harbin Engineering University, Harbin, China</b></td><td>2</td></tr><tr><td><b>Keio University, Kanagawa, Japan</b></td><td>2</td></tr><tr><td><b>Microsoft Research, Haidian, Beijing, P. R. China</b></td><td>2</td></tr><tr><td><b>Video and Image Processing System Laboratory, School of Electronic Engineering, Xidian University , Xi'an, China</b></td><td>2</td></tr><tr><td><b>Department of Computing, Imperial College London, United Kingdom</b></td><td>2</td></tr><tr><td><b>Robert BOSCH Research and Technology Center, Palo Alto, CA 94304, USA</b></td><td>2</td></tr><tr><td>Università di Salerno, Fisciano (SA), Italy</td><td>2</td></tr><tr><td>Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney Playa, Havana, Cuba</td><td>2</td></tr><tr><td><b>National Chung Hsing University, Taichung</b></td><td>2</td></tr><tr><td><b>School of Information and Communication Engineering, Beijing University of Posts and Telecommunications, Beijing</b></td><td>2</td></tr><tr><td>School of Electronic Engineering, Xi'an University of Posts and Telecommunications, Xi'an, China</td><td>2</td></tr><tr><td><b>The University of Tokyo</b></td><td>2</td></tr><tr><td>Department of Sciences and Information Technology, University of Sassari, Viale Mancini 5, 07100 Sassari, Italy</td><td>2</td></tr><tr><td><b>Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, D-39016, P.O. Box 4210 Germany</b></td><td>2</td></tr><tr><td><b>ISIR, CNRS UMR 7222, Universite Pierre et Marie Curie, Paris</b></td><td>2</td></tr><tr><td><b>National Taiwan University of Science and Technology, Taipei, Taiwan</b></td><td>2</td></tr><tr><td><b>Beijing Institute of Science and Technology Information</b></td><td>2</td></tr><tr><td><b>University of Maryland, College Park, MD, 20742</b></td><td>2</td></tr><tr><td><b>Department of Computer Science, University of Rochester, Rochester, NY, USA</b></td><td>2</td></tr><tr><td>Department of Electrical Engineering and Computer Science, University of Siegen, Siegen, Germany</td><td>2</td></tr><tr><td><b>State Key Laboratory of Brain and Cognitive Science, Institute of Psychology, Chinese Academy of Sciences, Beijing, 100101, China</b></td><td>2</td></tr><tr><td><b>Electrical and Computer Engineering, Wayne State University, Detroit, Michigan 48202</b></td><td>2</td></tr><tr><td><b>Bahcesehir University, Istanbul, Turkey</b></td><td>2</td></tr><tr><td><b>University of Udine, Italy</b></td><td>2</td></tr><tr><td>Department of Computer Science and Information Engineering, National Formosa University, Yunlin 632, Taiwan</td><td>2</td></tr><tr><td>Broadcasting &amp; Telecommunications, Convergence Media Research Department, Electronics and Telecommunications Research Institute, Daejeon, Korea</td><td>2</td></tr><tr><td><b>Keio University, Yokohama, Japan</b></td><td>2</td></tr><tr><td><b>Graduate Institute of Networking and Multimedia and the Department of Computer Science and Information Engineering, National Taiwan University, Taipei, Taiwan</b></td><td>2</td></tr><tr><td><b>Electrical and Computer Engineering Department, Carnegie Mellon University, Pittsburgh, USA</b></td><td>2</td></tr><tr><td><b>Innovation Center, Canon USA Inc., San Jose, California</b></td><td>2</td></tr><tr><td><b>University of Texas at San Antonio, San Antonio, Texas</b></td><td>2</td></tr><tr><td>Dept. of ECE & Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td><b>Bradley Department of Electrical and Computer Engineering, Virginia Polytechnic Institute and State University (Virginia Tech), Blacksburg, VA 24061, USA</b></td><td>2</td></tr><tr><td>FMV I&#x015E;IK &#x00DC;niversitesi, &#x015E;ile, Istanbul</td><td>2</td></tr><tr><td><b>Istanbul Technical University, Informatics Institute, 34469, Turkey</b></td><td>2</td></tr><tr><td><b>School of Mathematical Sciences, Anhui University, Hefei, China</b></td><td>2</td></tr><tr><td><b>Electrical and Computer Engineering, Michigan State University, East Lansing, MI, 48824, USA</b></td><td>2</td></tr><tr><td><b>Agency for Science, Technology and Research, Institute for Infocomm Research, Singapore</b></td><td>2</td></tr><tr><td><b>School of Electrical &amp; Electronic Engineering, Nanyang Technological University, Singapore 639798, Singapore</b></td><td>2</td></tr><tr><td><b>Artificial Vision Laboratory National Taiwan University of Science and Technology</b></td><td>2</td></tr><tr><td><b>Computational Imaging Laboratory, School of Electrical Engineering and Computer Science, University of Central Florida, Orlando, FL, USA</b></td><td>1</td></tr><tr><td>Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Erlangen, Germany</td><td>1</td></tr><tr><td><b>College of Computer Science and Electronic Engineering, Hunan Key Laboratory of Dependable Systems and Network, Hunan University, Changsha, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, and ASRI, Seoul National University, Republic of Korea</b></td><td>1</td></tr><tr><td><b>Istanbul Technical University, Turkey</b></td><td>1</td></tr><tr><td><b>Sabanci University, Turkey</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Machine Perception(MOE), EECS, Peking University, Beijing, 100871</b></td><td>1</td></tr><tr><td>Nanjing University of Posts and Telecommunications, China</td><td>1</td></tr><tr><td><b>Information Sciences Institute, University of Southern California, Marina del Rey, USA</b></td><td>1</td></tr><tr><td><b>Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, National Chin-Yi University of Technology, Taichung, Taiwan</td><td>1</td></tr><tr><td><b>College of Engineering and Computer Science, The Australian National University, Canberra, ACT, Australia</b></td><td>1</td></tr><tr><td><b>Griffith University, Australia</b></td><td>1</td></tr><tr><td>Laboratoire d’interprétation et de traitement d’images et vidéo, Polytechnique Montréal, Montreal, Canada</td><td>1</td></tr><tr><td>Laboratoire d’imagerie de vision et d’intelligence artificielle, École de technologie supérieure, Université du Québec, Montreal, Canada</td><td>1</td></tr><tr><td><b>Department of Multimedia Design, National Taichung University of Science and Technology, Taichung, Taiwan</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Information Engineering, National Chung Cheng University, Chia-Yi, Taiwan</b></td><td>1</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, Surrey GU2 7XH, UK</b></td><td>1</td></tr><tr><td>University of Bern, Bern, Switzerland</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Denver, Denver, USA</b></td><td>1</td></tr><tr><td>School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, PR China</td><td>1</td></tr><tr><td>Department of Computer Science, University of California at Davis, Davis, USA</td><td>1</td></tr><tr><td><b>School of Computer Science, Fudan University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology, Daejeon, South Korea</b></td><td>1</td></tr><tr><td><b>Korea Institute of Oriental Medicine, Daejeon, South Korea</b></td><td>1</td></tr><tr><td><b>Microsoft Research Asia, 49 Zhichun Road, Beijing, 100190, China</b></td><td>1</td></tr><tr><td>Collaborative Innovation Center of Electric Vehicles in Beijing, Beijing, China</td><td>1</td></tr><tr><td><b>School of Software Technology, Dalian University of Technology, Dalian, China</b></td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Shri Shankaracharya Technical Campus, Bhilai, District-Durg, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Jaypee University of Information Technology, Solan, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Indian Institute of Technology (Banaras Hindu University), Varanasi, India</td><td>1</td></tr><tr><td><b>Department of Information Engineering, The Chinese University of Hong Kong, China</b></td><td>1</td></tr><tr><td><b>Institute of Education, Xiamen University, Xiamen Shi, China</b></td><td>1</td></tr><tr><td>College of Artificial Intelligenge and Big Data, ChongQing University of Electronic Engineering, Chongqing, China</td><td>1</td></tr><tr><td><b>Harbin Engineering University, Harbin, Heilongjiang, 150001, China</b></td><td>1</td></tr><tr><td>Laboratoire Jean Kuntzmann, Grenoble, France</td><td>1</td></tr><tr><td><b>Electrical and Electronics Engineering Department, Eskisehir Osmangazi University, Eskisehir, Turkey</b></td><td>1</td></tr><tr><td><b>Dept. of Computer Science and Computer Engineering, University of Louisville, KY, USA</b></td><td>1</td></tr><tr><td>Dept. of Advanced Technologies, Alcorn State University, MS, USA</td><td>1</td></tr><tr><td><b>Baiyun District Bureau of Justice, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Guangdong Key Laboratory of Information Security Technology, School of Data and Computer Science, Sun Yat-sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Department of Information Management, Tamkang University, New Taipei City, Taiwan</b></td><td>1</td></tr><tr><td>Department of Industrial Design, Tatung University, Taipei 104, Republic of China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Tatung University, Taipei 104, Republic of China</td><td>1</td></tr><tr><td><b>IBM T. J. Watson Research Center</b></td><td>1</td></tr><tr><td><b>AI Lab, TAL Education Group, College of Electronics and Information Engineering, Sichuan University, Chengdu, China</b></td><td>1</td></tr><tr><td><b>Institute of High Performance Computing, A*STAR, Singapore</b></td><td>1</td></tr><tr><td><b>3OmniVision Technologies Singapore Pte. Ltd., Singapore</b></td><td>1</td></tr><tr><td><b>Department of ECE, National University of Singapore, Singapore</b></td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Yangzhou University, Yangzhou, People’s Republic of China</td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Nanjing University of Science & Technology, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Toronto Toronto, Canada</b></td><td>1</td></tr><tr><td><b>School of Information Science and Engineering, Yunnan University, Kunming, P. R. China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, Jiangnan University, Wuxi, China</b></td><td>1</td></tr><tr><td>School of Information Engineering, Yangzhou University, Yangzhou, China</td><td>1</td></tr><tr><td><b>Key Laboratory of Intelligent Processing, Institute of Computing Technology, CAS, Chinese Academy of Sciences (CAS), Beijing, People’s Republic of China</b></td><td>1</td></tr><tr><td>Res. Center for Learning Sci., Southeast Univ., Jiangsu, China</td><td>1</td></tr><tr><td>Eedoo Inc, Beijing, China</td><td>1</td></tr><tr><td><b>School of Mechanical Engineering and Automation, Northeastern University, Shenyang 110819, China</b></td><td>1</td></tr><tr><td>CSE, SUNY at Buffalo, USA and Southeast University, China</td><td>1</td></tr><tr><td><b>Knowledge Enterprise Development, Arizona State University, Tempe, 85287-5406 United States</b></td><td>1</td></tr><tr><td><b>Computer Science, Florida State University, Tallahassee, United States</b></td><td>1</td></tr><tr><td><b>Computing Informatics and Decision Systems Engineering, Arizona State University, Tempe, United States</b></td><td>1</td></tr><tr><td><b>Department of Psychology, University of Northern British Columbia, Prince George, Canada</b></td><td>1</td></tr><tr><td><b>Speech, Audio, Image, and Video Technology Laboratory, Queensland University of Technology , Brisbane, Australia</b></td><td>1</td></tr><tr><td><b>Speech, Audio, Image, and Video Technology Laboratory, Queensland University of Technology, Brisbane, Australia</b></td><td>1</td></tr><tr><td><b>Commonwealth Scientific and Industrial Research Organization, Pullenvale, Australia</b></td><td>1</td></tr><tr><td><b>Department of Psychology, University of Pittsburgh, Pittsburgh, PA , USA</b></td><td>1</td></tr><tr><td><b>Department of Psychology, University of Pittsburgh, Pittsburgh, PA, USA</b></td><td>1</td></tr><tr><td><b>School of computer Science and Engineering, Nanyang Technological University, Singapore</b></td><td>1</td></tr><tr><td><b>Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore</b></td><td>1</td></tr><tr><td><b>Dhirubhai Ambani Institute of Information and Communication Technology, Gandhinagar, India</b></td><td>1</td></tr><tr><td><b>Yonsei University, Seoul, South Korea</b></td><td>1</td></tr><tr><td><b>Multimedia University, Melaka, Malaysia</b></td><td>1</td></tr><tr><td>School of Information Technology and Engineering, VIT University, Vellore, India</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Stevens Institute of Technology, Hoboken, New Jersey, 07030</b></td><td>1</td></tr><tr><td>Department of Electrical, Computer and Biomedical Engineering, University of Rhode Island, Kingston, RI 02881</td><td>1</td></tr><tr><td>Vulcan Inc, Seattle, WA 98104</td><td>1</td></tr><tr><td><b>Department of Computer Science, Hofstra University, Hempstead, NY 11549</b></td><td>1</td></tr><tr><td>Dept. of Computing, Curtin University of Technology, WA 6102, USA</td><td>1</td></tr><tr><td>School of Software, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td><b>University of Washington, Tacoma & Ghent University, Tacoma, WA, USA</b></td><td>1</td></tr><tr><td>University of California, Santa Cruz & Ghent University, Santa Cruz, CA, USA</td><td>1</td></tr><tr><td>Computer Vision Research lab, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran</td><td>1</td></tr><tr><td><b>Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA</b></td><td>1</td></tr><tr><td><b>Center for OPTical Imagery Analysis and Learning, Northwestern Polytechnical University, Shaanxi, China</b></td><td>1</td></tr><tr><td><b>Beijing Etrol Technologies Co., Ltd, Beijing, China</b></td><td>1</td></tr><tr><td><b>Securics, Inc. Colorado Springs, CO, USA</b></td><td>1</td></tr><tr><td><b>Institute of Computing, University of Campinas (Unicamp) Campinas, SP, Brazil</b></td><td>1</td></tr><tr><td>HAN University of Applied Sciences, Arnhem, Netherlands</td><td>1</td></tr><tr><td>Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan</td><td>1</td></tr><tr><td>Laboratoire Hubert Curien, UMR5516, Université Jean Monnet, Saint-Etienne, France</td><td>1</td></tr><tr><td>Université de Lyon, CNRS, LIRIS, UMR5205, Université Lyon 1, Lyon, France</td><td>1</td></tr><tr><td>Department of Electrical and Computer Engineering, Saginaw Valley State University, University Ctr, MI- 48710</td><td>1</td></tr><tr><td>TCTS Lab, Faculté Polytechnique de Mons, Belgium</td><td>1</td></tr><tr><td>Speech Technology Group, Technical University of Madrid, Spain</td><td>1</td></tr><tr><td>TALP Research Center, Universitat Politècnica de Catalunya, Spain</td><td>1</td></tr><tr><td><b>Electrical and Electronics Engineering Dept., Bogazici University, Turkey</b></td><td>1</td></tr><tr><td><b>AIIA Lab, Aristotle University of Thessaloniki, Greece</b></td><td>1</td></tr><tr><td>TELE Lab, Université catholique de Louvain, Belgium</td><td>1</td></tr><tr><td><b>DISI, University of Trento, Trento, Italy</b></td><td>1</td></tr><tr><td><b>LAPI, University Politehnica of Bucharest, Bucharest, Romania</b></td><td>1</td></tr><tr><td><b>IDIAP Research Institute, Martigny, Switzerland</b></td><td>1</td></tr><tr><td><b>University of Michigan, Ann, Arbor, MI USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Memorial University of Newfoundland, St. John’s, Canada</b></td><td>1</td></tr><tr><td>INRIA Grenoble-Rhône-Alpes Research Center, France</td><td>1</td></tr><tr><td><b>Department of Computer Science, Rutgers University, Piscataway, New Jersey 08854, USA</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Information, Anhui Polytechnic University, Wuhu, China</b></td><td>1</td></tr><tr><td><b>Language Technologies Institute, Carnegie Mellon University, Pittsburgh, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Stanford University, Stanford, USA</b></td><td>1</td></tr><tr><td><b>School of Mathematics, Beihang University, Beijing, China</b></td><td>1</td></tr><tr><td>Department of Embedded Systems, Institute for Infocomm Research, Singapore</td><td>1</td></tr><tr><td><b>IBM Research, USA</b></td><td>1</td></tr><tr><td><b>IBM Hursley Labs, UK</b></td><td>1</td></tr><tr><td>E.T.S. Ingenieros Industriales, Universidad de Castilla-La Mancha Campus Universitario, Ciudad Real, Spain</td><td>1</td></tr><tr><td>Universidad de Las Palmas de Gran Canaria, SIANI, Edificio Central del Parque Científico-Tecnológico, Las Palmas, Spain</td><td>1</td></tr><tr><td><b>Monash University, Caulfield East, Australia</b></td><td>1</td></tr><tr><td>School of Math and Geospatial Sciences, Royal Melbourne Institute of Technology University , Melbourne, Australia</td><td>1</td></tr><tr><td><b>Department of Computer Science, Harbin Institute of Technology, China, 150001</b></td><td>1</td></tr><tr><td>Department of Computer Science and Application, Zhengzhou Institute of Aeronautical Industry Management, Zhengzhou, China</td><td>1</td></tr><tr><td>School of Information Engineering and Automation, Kunming University of Science and Technology, Kunming, China</td><td>1</td></tr><tr><td><b>Computer Science Department, School of Information Science and Engineering, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA 15213. Marios.Savvides@ri.cmu.edu</b></td><td>1</td></tr><tr><td><b>Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA 15213. yunghui@cmu.edu</b></td><td>1</td></tr><tr><td><b>College of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Department of Software Engineering, King Saud University, Riyadh, Saudi Arabia</b></td><td>1</td></tr><tr><td><b>Institute of Information Technology, University of Dhaka, Dhaka, Bangladesh</b></td><td>1</td></tr><tr><td><b>Department of Information, The Third Affiliated Hospital, Sun Yat-sen University, China</b></td><td>1</td></tr><tr><td><b>OmniVision Technologies Singapore Pte. Ltd., Singapore</b></td><td>1</td></tr><tr><td><b>Electrical and Computer Engineering, Ryerson University, Toronto, Canada</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computing Engineering, Ryerson University, Toronto, Canada</b></td><td>1</td></tr><tr><td>Department of Electrical and Computer Engineering, Naresuan University, Muang, Thailand</td><td>1</td></tr><tr><td>Department of Computer Science, Christian-Albrechts University, Kiel, Germany</td><td>1</td></tr><tr><td>Engineering Lab on Intelligent Perception for Internet of Things, Peking University Shenzhen Graduate School, Shenzhen, China</td><td>1</td></tr><tr><td><b>MOE Key Laboratory of Machine Perception, Peking University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Eletrical and Computer Engineering Department, Drexel University, Philadelphia, USA</b></td><td>1</td></tr><tr><td><b>TCL Research America, San Jose, CA 95134, USA</b></td><td>1</td></tr><tr><td><b>Dept. of Eng. Sciences and Appl. Mathematics, Northwestern University, Evanston, IL 60208, USA</b></td><td>1</td></tr><tr><td>Delft University of Technology and Sensor Technology, Netherlands Defense Academy</td><td>1</td></tr><tr><td><b>GE Global Research</b></td><td>1</td></tr><tr><td><b>Xerox Research Center India, India</b></td><td>1</td></tr><tr><td><b>Palo Alto Research Center, Webster, NY</b></td><td>1</td></tr><tr><td><b>Facebook, Singapore</b></td><td>1</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Bahçeşehir Üniversitesi, Turkey</b></td><td>1</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Boğaziçi Üniversitesi, Turkey</b></td><td>1</td></tr><tr><td><b>School of Information Technologies, The University of Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td>School of Electrical Engineering and Automation, Qilu University of Technology, Jinan, China</td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Xi’an Jiaotong University, Xi’an, China</td><td>1</td></tr><tr><td><b>School of Computer and Software, Nanjing University of Information Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Beijing Advanced Innovation Center for Imaging Technology, Beijing 100048, China</b></td><td>1</td></tr><tr><td><b>Lane Department of CSEE, West Virginia University, Morgantown, WV 26506, USA</b></td><td>1</td></tr><tr><td><b>Institute of Computing, University of Campinas, Campinas-SP, CEP, Brazil</b></td><td>1</td></tr><tr><td>Department of Electronics and Computing and the Electronics and Information Technology Research & Development Center, Universidade Federal do Amazonas, Manaus-AM, CEP, Brazil</td><td>1</td></tr><tr><td><b>National Chiao-Tung University, Hsinchiu, Taiwan</b></td><td>1</td></tr><tr><td><b>General Electric Global Research, Niskayuna, NY, USA</b></td><td>1</td></tr><tr><td><b>Institute of Computing, University of Campinas, Campinas, Brazil</b></td><td>1</td></tr><tr><td>University of California at Merced, Merced, USA</td><td>1</td></tr><tr><td><b>University of Adelaide, Adelaide, Australia</b></td><td>1</td></tr><tr><td>Technische Universität München, Garching, Germany</td><td>1</td></tr><tr><td><b>Department of Mathematics, Wayne State University, Detroit, MI, USA</b></td><td>1</td></tr><tr><td><b>Artificial Intelligence Key Laboratory, of Sichuan Province, Zigong, Sichuan, 643000, P. R. China</b></td><td>1</td></tr><tr><td>School of Big Data and Computer, Science, Guizhou Normal University, Guiyang, Guizhou, 550025, P. R. China</td><td>1</td></tr><tr><td><b>School of Electrical & Electronic Engineering, Yonsei University, Seoul, South Korea, 120-749</b></td><td>1</td></tr><tr><td><b>Inria M&#x00E9;diterran&#x00E9;e, France</b></td><td>1</td></tr><tr><td><b>Microsoft Research, Mountain View, California</b></td><td>1</td></tr><tr><td>University of California at Santa Cruz, Santa Cruz, California</td><td>1</td></tr><tr><td><b>Massachusetts Institute of Technology, Cambridge, MA 02139, USA</b></td><td>1</td></tr><tr><td><b>The Rowland Institute at Harvard, Harvard University, Cambridge, MA 02142, USA</b></td><td>1</td></tr><tr><td><b>School of Electrical and Electronic Engineering, The University of Manchester, Manchester, UK</b></td><td>1</td></tr><tr><td>Network Center, Huizhou University, Huizhou, China</td><td>1</td></tr><tr><td><b>School of Advanced Computing, Sun Yat-sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>School of Software, Sun Yat-sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td>Faculty of Engineering, Computer Engineering Department, Akdeniz University, Dumlupinar Bulvari, Turkey</td><td>1</td></tr><tr><td>IRCICA, Parc Scientifique de la Haute Borne, Lille 1 University, Villeneuve d’Ascq, France</td><td>1</td></tr><tr><td><b>University of Bath</b></td><td>1</td></tr><tr><td>Data and Analytics Department, KPMG AGWPG, Düsseldorf, Germany</td><td>1</td></tr><tr><td><b>Faculty of Mathematics and Statistics, Hubei University, Wuhan, China</b></td><td>1</td></tr><tr><td><b>West Virginia University, Morgantown, WV</b></td><td>1</td></tr><tr><td><b>Ajou Univ.</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Transient Optics and Photonics, Center for OPTical IMagery Analysis and Learning, Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, China</b></td><td>1</td></tr><tr><td><b>School of Information Technology, Halmstad University, Halmstad, Sweden</b></td><td>1</td></tr><tr><td><b>Nokia Bell-Labs, Madrid, Spain</b></td><td>1</td></tr><tr><td>Department of Electronic Information Engineering, Suzhou Vocational University, Suzhou, Jiangsu, China</td><td>1</td></tr><tr><td>JiangSu Province Support Software Engineering R&D Center for Modern Information Technology Application in Enterprise, Suzhou, China</td><td>1</td></tr><tr><td>Université de Lorraine, LORIA, UMR 7503</td><td>1</td></tr><tr><td><b>Department of Psychology, University of Pittsburgh, Pittsburgh, USA</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, The City College of New York, New York, USA</td><td>1</td></tr><tr><td>Robótica y Manufactura Avanzada, Centro de Investigación y de Estudios Avanzados del Instituto Politécnico Nacional, Ramos Arizpe, Mexico</td><td>1</td></tr><tr><td><b>Technicolor, Paris, France</b></td><td>1</td></tr><tr><td><b>MPI Informatics, Germany</b></td><td>1</td></tr><tr><td>School of Computing and Electrical Engineering, Indian Institute of Technology Mandi, India</td><td>1</td></tr><tr><td>Dept. de Ciência da Computacão, Universidade Federal de Ouro Preto, MG Brazil</td><td>1</td></tr><tr><td><b>Interactive and Digital Media Institute, National University of Singapore, Singapore</b></td><td>1</td></tr><tr><td><b>Alibaba Group, Hangzhou, China</b></td><td>1</td></tr><tr><td><b>School of Electronic Engineering and Computer Science, Queen Mary University of London, London, UK</b></td><td>1</td></tr><tr><td><b>Shin-Guang Elementary School, Yulin 646, Taiwan</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Brown University, Providence Rhode Island, 02912, USA</b></td><td>1</td></tr><tr><td><b>School of Management, New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA</b></td><td>1</td></tr><tr><td><b>SLAC National Laboratory, Stanford University, Stanford, USA</b></td><td>1</td></tr><tr><td><b>IWE II, RWTH Aachen University, Aachen, Germany</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore, Singapore</b></td><td>1</td></tr><tr><td><b>School of Electrical Engineering, Nantong University, Nantong, China</b></td><td>1</td></tr><tr><td><b>Vesalis company, Clermont-Ferrand, France</b></td><td>1</td></tr><tr><td><b>University of Calgary, Calgary, T3G 2T6 AB, CANADA</b></td><td>1</td></tr><tr><td><b>University of Louisville, Louisville, KY 40292 USA</b></td><td>1</td></tr><tr><td><b>School of Electrical Engineering and Computer Science, University of Central Florida, Orlando, FL, USA</b></td><td>1</td></tr><tr><td><b>Max Planck Institute for Informatics, Saarbrucken, Germany</b></td><td>1</td></tr><tr><td><b>College of Information Engineering, Capital Normal University, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Automation, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, University of California, Riverside, Riverside CA, California 92521 United States</td><td>1</td></tr><tr><td><b>Art History, University of California, Riverside, Riverside, California United States</b></td><td>1</td></tr><tr><td><b>Electrical Engineering, University of California, Riverside, Riverside, California 92521 United States</b></td><td>1</td></tr><tr><td><b>University of Science &amp; Technology (UST), Daejeon, Korea</b></td><td>1</td></tr><tr><td><b>Chinese Academy of Sciences, Chongqing, China</b></td><td>1</td></tr><tr><td><b>Chinese Academy of Sciences, Chongqing, Singapore</b></td><td>1</td></tr><tr><td><b>Universidade Estadual de Campinas, Cx.P. 6176 Campinas-SP, CEP 13084-971, Brazil</b></td><td>1</td></tr><tr><td>Department of CSE, Regional Campus of Anna University, Tirunelveli 627007, India</td><td>1</td></tr><tr><td><b>Embodied Emotion, Cognition and (Inter-)Action Lab, University of Hertfordshire, United Kingdom</b></td><td>1</td></tr><tr><td><b>Institute on Children Studies, University of Minho, Portugal</b></td><td>1</td></tr><tr><td><b>College of Aerospace and Material Engineering, National University of Defense Technology, Changsha, China</b></td><td>1</td></tr><tr><td><b>Air Force Research Lab, Rome, NY, 13441, USA</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, Institute of Image Communication and Information Processing, Shanghai Jiao Tong University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>School of Computer Engineering, The Nanyang Technological University, Singapore</b></td><td>1</td></tr><tr><td><b>Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB, Fraunhoferstrasse 1, Karlsruhe, Germany</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Toronto, Toronto, ON, Canada</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Texas at San Antonio</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Rochester</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Tianjin University, China</b></td><td>1</td></tr><tr><td><b>Institute of Systems Science, National University of Singapore, Singapore, Singapore</b></td><td>1</td></tr><tr><td>Dalian Key Laboratory of Digital Technology for National Culture, Dalian Minzu University, Dalian, China</td><td>1</td></tr><tr><td><b>Institute of Systems Science, Northeastern University, Shenyang, China</b></td><td>1</td></tr><tr><td><b>Philips Research Eindhoven, HTC 34, Netherlands</b></td><td>1</td></tr><tr><td><b>Epson Research and Development Inc., San Jose, CA</b></td><td>1</td></tr><tr><td><b>GE Global Research, Bangalore, India</b></td><td>1</td></tr><tr><td><b>Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea</b></td><td>1</td></tr><tr><td>Department of Business Planning &amp; Information Systems, TEI of Crete, Agios Nikolaos, Greece</td><td>1</td></tr><tr><td><b>National Institute of Informatics, Japan</b></td><td>1</td></tr><tr><td>School of Information Technology Jawaharlal Nehru Technological University Hyderabad Andhra Pradesh, India</td><td>1</td></tr><tr><td><b>Department of CSE, Vignan University, Andhra Pradesh, India</b></td><td>1</td></tr><tr><td><b>University of North Carolina at Wilmington, USA</b></td><td>1</td></tr><tr><td><b>UNCW, USA</b></td><td>1</td></tr><tr><td><b>Department of EngineeringFaculty of Engineering and Science, University of Agder, Kristiansand, Norway</b></td><td>1</td></tr><tr><td><b>Yahoo Inc., New York, NY, USA</b></td><td>1</td></tr><tr><td>Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, Paris, France</td><td>1</td></tr><tr><td><b>Queen Mary, University of London</b></td><td>1</td></tr><tr><td><b>Brunel University</b></td><td>1</td></tr><tr><td><b>Vision &amp; Sensing Group, Faculty of Information Sciences and Engineering, University of Canberra, Australia</b></td><td>1</td></tr><tr><td><b>School of Engineering, CECS, Australian National University, Australia</b></td><td>1</td></tr><tr><td><b>Comput. Control Lab, Nanyang Technol. Univ., Singapore</b></td><td>1</td></tr><tr><td>School of Computer ScienceThe University of Adelaide</td><td>1</td></tr><tr><td><b>Instituto de Sistemas e Rob&#x00F3;tica, Instituto Superior T&#x00E9;cnico, Lisboa, Portugal</b></td><td>1</td></tr><tr><td><b>University of Washington, Seattle, WA, USA</b></td><td>1</td></tr><tr><td>Shanghai Advanced Research Institute, CAS & Qiniu AI Lab, Shanghai, China</td><td>1</td></tr><tr><td>University of Lyon, LIRIS - CNRS, National Institute of Applied Sciences (INSA), Lyon, France</td><td>1</td></tr><tr><td><b>Shenzhen Graduate School, Harbin Institute of Technology, Bio-Computing Research Center, Shenzhen, China</b></td><td>1</td></tr><tr><td>Toyohashi University of Technology, Toyohashi, Japan</td><td>1</td></tr><tr><td><b>Department of Computing, Biometrics Research Centre, The Hong Kong Polytechnic University, Hong Kong</b></td><td>1</td></tr><tr><td><b>School of Computer Science, Nanjing University of Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td>Department of Informatics, King’s College London, Strand, London, UK</td><td>1</td></tr><tr><td><b>Centre for Quantum Computation &amp; Intelligent Systems, University of Technology, Sydney, Australia</b></td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Hebei University of Technology, Tianjin, China</td><td>1</td></tr><tr><td><b>CSIE, National Cheng Kung University, Tainan, 701 Taiwan</b></td><td>1</td></tr><tr><td><b>CSIE, National Taiwan University of Science and Technology, Taipei, 106 Taiwan</b></td><td>1</td></tr><tr><td><b>Computer Science and Engineering Department, University of Texas at Arlington, Arlington, TX, USA</b></td><td>1</td></tr><tr><td><b>INSA CVL, Univ. Orléans, PRISME EA 4229, Bourges, France</b></td><td>1</td></tr><tr><td><b>LITIS, Universite de Rouen - INSA de Rouen, Rouen, FR</b></td><td>1</td></tr><tr><td>Department of Learning and Digital Technology, Fo Guang University, Yilan, Taiwan</td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Hong Kong</b></td><td>1</td></tr><tr><td><b>Noah’s Ark Laboratory, Hong Kong</b></td><td>1</td></tr><tr><td><b>Noah.s Ark Laboratory and Hong Kong University of Science and Technology, Hong Kong</b></td><td>1</td></tr><tr><td><b>La Trobe University, Melbourne, Australia</b></td><td>1</td></tr><tr><td>BITS Pilani, India , India</td><td>1</td></tr><tr><td>College of Computer and Information Sciences, Al Imam Mohammad Ibn Saud Islamic University (IMSIU), Riyadh, Saudi Arabia</td><td>1</td></tr><tr><td><b>COMSATS, Institute of Information Technology, Sahiwal, Pakistan</b></td><td>1</td></tr><tr><td>National University of Computer and Emerging Sciences, Islamabad, Islamabad, Pakistan</td><td>1</td></tr><tr><td><b>Concordia Institute for Information Systems Engineering (CIISE), Concordia University, Montreal, Canada</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Concordia University, Montreal, Canada</b></td><td>1</td></tr><tr><td>Research Team on Audio Visual Signal Processing (AVSP), Vrije Universiteit Brussel (VUB), Electronics and Informatics Department, VUB-ETRO, Pleinlaan 2, 1050 Brussel, Belgium</td><td>1</td></tr><tr><td><b>School of Engineering and Information Technology, Deakin University, Geelong, Australia</b></td><td>1</td></tr><tr><td><b>Griffith University, Queensland, Australia</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Texas at San Antonio, San Antonio, United States</b></td><td>1</td></tr><tr><td><b>Chongqing Institute of Green and Intelligent Technology, Chinese Academy of China, Hefei University of Technology, Hefei, China</b></td><td>1</td></tr><tr><td><b>Fac. of Mathematics and Computer Sciences, University of Science, Ho Chi Minh City, Viet Nam</b></td><td>1</td></tr><tr><td><b>Graduate Institute of Communication Engineering, National Taiwan University, Taipei, Taiwan</b></td><td>1</td></tr><tr><td><b>LAMIA, University of French West Indies and Guiana, EA 4540, Pointe-à-Pitre, France</b></td><td>1</td></tr><tr><td><b>Institute of Intelligent Systems and Robotics (ISIR), Pierre and Marie Curie University, Paris, France</b></td><td>1</td></tr><tr><td><b>Xiamen University of Technology, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Chulalongkorn University Bangkok, Thailand</b></td><td>1</td></tr><tr><td>College of Computer Science and Technology of Huaqiao University Xiamen, Xiamen, China</td><td>1</td></tr><tr><td><b>School of Automation, Huazhong University of Science and Technology, Wuhan, China</b></td><td>1</td></tr><tr><td>Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Taiwan 640, R.O.C.</td><td>1</td></tr><tr><td><b>Bordeaux INP, LaBRI, PICTURA, UMR 5800, F-33400 Talence, France</b></td><td>1</td></tr><tr><td>Institute of Image Processing and Pattern Recognition, Henan University, Kaifeng 475004, China</td><td>1</td></tr><tr><td><b>Department of Systems and Control Engineering, University of Malta, Msida, Malta</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>Department of Information Management, College of Management, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td><b>Fundamental and Applied Science Department, Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia</b></td><td>1</td></tr><tr><td>Department of Statistics, University of California at Berkeley, Berkeley, USA</td><td>1</td></tr><tr><td>International Computer Science Institute, University of California at Berkeley, Berkeley, USA</td><td>1</td></tr><tr><td><b>Computer Science Department, Rensselaer Polytechnic Institute, Troy, USA</b></td><td>1</td></tr><tr><td>College of Information Science and Technology, Agricultural University of Hebei, Baoding, China</td><td>1</td></tr><tr><td><b>Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Sch. of Electr. Eng. & Comput. Sci., Newcastle Univ., NSW, Australia</b></td><td>1</td></tr><tr><td><b>University of Sassari, Computer Vision Laboratory, PolComing Viale Mancini, 5 07100 Sassari, Italy</b></td><td>1</td></tr><tr><td><b>Centre for Intelligent Machines, McGill University, Montréal, Canada</b></td><td>1</td></tr><tr><td><b>Azure Storage, Microsoft, Seattle, WA, USA</b></td><td>1</td></tr><tr><td>Department of Electronics Engineering, Mokpo National University, Republic of Korea</td><td>1</td></tr><tr><td><b>School of Information and Communication Engineering, Sungkyunkwan University, Suwon, Republic of Korea</b></td><td>1</td></tr><tr><td><b>Institute of Computer Science and Technology, Peking university, Beijing, China</b></td><td>1</td></tr><tr><td>FX Palo Alto Laboratory</td><td>1</td></tr><tr><td>Department of Applied Optics and Photonics, University of Calcutta, Kolkata, India</td><td>1</td></tr><tr><td>Department of Electrical Engineering, Future Institute of Engineering and Management, Kolkata, India</td><td>1</td></tr><tr><td><b>School of Electronics and Information, Northwestern Polytechnical University, Xian, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of North Carolina, Charlotte, USA</b></td><td>1</td></tr><tr><td><b>Graduate Program on Electrical Engineering, Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil</b></td><td>1</td></tr><tr><td><b>Graduate Program on Electrical Engineering, University of Passo Fundo, Passo Fundo, Brazil</b></td><td>1</td></tr><tr><td><b>Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil</b></td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Visvesvaraya National Institute of Technology, Nagpur, India</td><td>1</td></tr><tr><td><b>Department of Information Systems, College of Computer and Information Sciences, King Saud University, Riyadh, Saudi Arabia</b></td><td>1</td></tr><tr><td><b>Northwestern Polytechnical University, Xi&#x2019;an, China</b></td><td>1</td></tr><tr><td><b>Osaka University, Japan</b></td><td>1</td></tr><tr><td>Telecom Division, Centre de Développement des Technologies Avancées, Algiers, Algeria</td><td>1</td></tr><tr><td><b>Delft University of Technology, EEMCS, Delft, The Netherlands, reinierz@gmail.com</b></td><td>1</td></tr><tr><td><b>Imperial College London, Computing Department, London, U.K., m.pantic@imperial.ac.uk</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Specialty Fiber Optics and Optical Access Networks, Shanghai University, Shanghai, China</b></td><td>1</td></tr><tr><td>Departments of Medical Imaging and Medical Biophysics, University of Western Ontario, London, ON, Canada</td><td>1</td></tr><tr><td><b>St. Joseph’s Health Care, London, ON, Canada</b></td><td>1</td></tr><tr><td><b>Northumbria University, Newcastle upon Tyne, U.K.</b></td><td>1</td></tr><tr><td>Department of Medical Biophysics, University of Western Ontario, London, ON, Canada</td><td>1</td></tr><tr><td><b>School of Computer Science, University of Nottingham, Nottingham, UK</b></td><td>1</td></tr><tr><td><b>School of Electrical Engineering, Kookmin University, Seoul, Korea</b></td><td>1</td></tr><tr><td><b>University of Science and Technology of China, Hefei, P.R. China</b></td><td>1</td></tr><tr><td><b>The School of Physics and Telecommunication Engineering, South China Normal University, Guangzhou, China</b></td><td>1</td></tr><tr><td>School of Computer Science, Shaanxi Normal University, Xi’an, China</td><td>1</td></tr><tr><td>Engineering Laboratory of Teaching Information Technology of Shaanxi Province, Xi’an, China</td><td>1</td></tr><tr><td>Key Laboratory of Modern Teaching Technology, Ministry of Education, Xi’an, China</td><td>1</td></tr><tr><td><b>College of Automation, Shenyang Aerospace University, China</b></td><td>1</td></tr><tr><td><b>Universit&#x00E9; de Lyon, CNRS, Ecole Centrale de Lyon, LIRIS UMR5205, F-69134, France</b></td><td>1</td></tr><tr><td>College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, P.R. China</td><td>1</td></tr><tr><td><b>Institute of Automation, Chinese Academy of Sciences, Beijing, P.R. China</b></td><td>1</td></tr><tr><td><b>Faculty of Computer Science & Information Technology University of Malaya Kuala Lumpur, Malaysia</b></td><td>1</td></tr><tr><td>Nanyang Technological University School of Computer Engineering</td><td>1</td></tr><tr><td><b>College of Engineering, Shibaura Institute of Technology, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>Graduate School of Engineering, Shibaura Institute of Technology, Tokyo, Japan</b></td><td>1</td></tr><tr><td>Department of Electronics and Electrical Engineering, Indian Institute of Technology (IIT) Guwahati, Guwahati, India</td><td>1</td></tr><tr><td>Technology Section, Israel National Police, Jerusalem, Israel</td><td>1</td></tr><tr><td>Department of Electro-Optics Engineering, Ben-Gurion University, Beer Sheva, Israel</td><td>1</td></tr><tr><td>Department of Mathematics, JiaYing University, Meizhou, China</td><td>1</td></tr><tr><td>Hebei University of Technology, School of Science, Tianjin, P. R. China</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, College of Engineering, and College of Computer and Information Science (Affiliated), Northeastern University, MA, USA</b></td><td>1</td></tr><tr><td><b>Chongqing University, Chongqing, China</b></td><td>1</td></tr><tr><td><b>Semnan University, Semnan, Iran</b></td><td>1</td></tr><tr><td>YiLi Normal College, Yining, China</td><td>1</td></tr><tr><td><b>Curtin University, Perth WA, Australia</b></td><td>1</td></tr><tr><td>Faculty of Electronic Information and Electrical Engineering, Dalian University, Dalian, China</td><td>1</td></tr><tr><td><b>Department of Industrial Engineering and Engineering Management, National Tsing Hua University, Taiwan</b></td><td>1</td></tr><tr><td>Centre for Innovation in IT Services and Applications (iNEXT), University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td>Video Surveillance Laboratory, Guizhou University for Nationalities, Guiyang, China</td><td>1</td></tr><tr><td><b>Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France</b></td><td>1</td></tr><tr><td>College of Arts and Sciences, Shanxi Agricultural University, Shanxi, China</td><td>1</td></tr><tr><td><b>Centre for Intelligent Systems Research, Deakin University, Geelong, VIC, Australia</b></td><td>1</td></tr><tr><td><b>Faculty of Engineering, Technology, and Built Environment, UCSI University, Kuala Lumpur, Malaysia</b></td><td>1</td></tr><tr><td><b>Sichuan Province Key Lab of Signal and Information Processing, Southwest Jiaotong University, Chengdu 610031, PR China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Cornell University and Cornell NYC Tech</b></td><td>1</td></tr><tr><td><b>Dept of Computer Science, University of North Carolina at Charlotte, Charlotte, NC, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, New Jersey Institute of Technology, Newark , NJ, USA</b></td><td>1</td></tr><tr><td><b>Microsoft Research , Redmond, WA, USA</b></td><td>1</td></tr><tr><td><b>Microsoft Visual Perception Laboratory, Zhejiang University, Hangzhou, China</b></td><td>1</td></tr><tr><td><b>Coll. of Electron. & Inf., Northwestern Polytech. Univ., Xi'an, China</b></td><td>1</td></tr><tr><td><b>Nanyang Technological University and the Institute for Infocomm Research, Singapore</b></td><td>1</td></tr><tr><td><b>Intelligent Systems Laboratory, University of Bristol, Merchant Venturers Building, Woodland Rd, Bristol BS8 1UB, UK</b></td><td>1</td></tr><tr><td>IRDA Group, ADMIR Laboratory, Rabat IT Center, ENSIAS, CNRST (URAC29), Mohammed V University of Rabat, Morocco</td><td>1</td></tr><tr><td>LRIT, CNRST (URAC29), Mohammed V University of Rabat, Morocco</td><td>1</td></tr><tr><td><b>Ajou University</b></td><td>1</td></tr><tr><td>Queen’s University, Kingston, Canada</td><td>1</td></tr><tr><td>University of Science Technology, Wuhan, China</td><td>1</td></tr><tr><td>Tunku Abdul Rahman University College, Kuala Lumpur, Malaysia</td><td>1</td></tr><tr><td>University at Qatar, Doha, Qatar</td><td>1</td></tr><tr><td>University of Istanbul, Istanbul, Turkey</td><td>1</td></tr><tr><td><b>Institute for Information and System Sciences and Ministry of Education Key Lab for Intelligent Networks and Network Security, Xi'an Jiaotong University, Xi'an 710049, PR China</b></td><td>1</td></tr><tr><td><b>Faculty of Applied Mathematics, Guangdong University of Technology, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Faculty of Information Science and Technology, Sun Yat-Sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Tuen Mun, Hong Kong</td><td>1</td></tr><tr><td>PolyU Shenzhen Research Institute, Shenzhen, China</td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Loughborogh</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering and Electronics, University of Liverpool</b></td><td>1</td></tr><tr><td><b>University of Bristol, Bristol, United Kingdom</b></td><td>1</td></tr><tr><td>German National Library of Science and Technology & Leibniz Universität Hannover, Hannover, Germany</td><td>1</td></tr><tr><td>University of Applied Sciences Jena, Jena, Germany</td><td>1</td></tr><tr><td>Department of Creative IT Engineering, POSTECH, Pohang, South Korea, 37673</td><td>1</td></tr><tr><td><b>Viterbi School of Engineering, University of Southern California, Los Angeles, CA</b></td><td>1</td></tr><tr><td><b>Centre for Multimedia Signal Processing and Department of Computing, Hong Kong Polytechnic University, Flat PQ717, Kowloon, Hung Hom, Hong Kong</b></td><td>1</td></tr><tr><td>Department of Computer Science, University of Western Ontario, London, Canada</td><td>1</td></tr><tr><td><b>University of Pittsburgh, USA</b></td><td>1</td></tr><tr><td><b>Anhui University, HeFei, China and Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td><b>Rensselaer Polytechnic Institute, Troy, NY, USA</b></td><td>1</td></tr><tr><td>Vision Laboratory, LARSyS, University of the Algarve, Faro, Portugal</td><td>1</td></tr><tr><td><b>Donghua University, China</b></td><td>1</td></tr><tr><td>Department of Information Management, Yuan Ze University, Taoyuan, China</td><td>1</td></tr><tr><td><b>AI Speech Ltd., Suzhou, China</b></td><td>1</td></tr><tr><td><b>Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing, China</b></td><td>1</td></tr><tr><td>DICGIM, Universitá degli Studi di Palermo, V.le delle Scienze, Ed. 6, 90128 Palermo, Italy</td><td>1</td></tr><tr><td>Department of Computer Engineering, Karadeniz Technical University, Trabzon, Turkey</td><td>1</td></tr><tr><td>Department of Computer Technologies, Trabzon Vocational School, Karadeniz Technical University, Trabzon, Turkey</td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, Kyung Hee University, Yongin, South Korea</b></td><td>1</td></tr><tr><td>Stanford University and Coursera</td><td>1</td></tr><tr><td>Dept. of Comput. Sci. &amp; Info. Eng., National Yunlin Univ. of Science &amp; Technology, Taiwan</td><td>1</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, İstanbul Üniversitesi, Turkey</b></td><td>1</td></tr><tr><td><b>Bilgisayar Mühendisliği Bölümü, Bahçeşehir Üniversitesi, İstanbul, Turkey</b></td><td>1</td></tr><tr><td><b>Institute of Industrial Science, The University of Tokyo, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td>Faculty of Mathematics and Statistics, Hubei Key Laboratory of Applied Mathematics, Hubei University, Wuhan, China</td><td>1</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. k.messer@surrey.ac.uk</b></td><td>1</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. j.kittler@surrey.ac.uk</b></td><td>1</td></tr><tr><td><b>Centre for Vision, Speech and Signal Processing, University of Surrey, United Kingdom. x.zou@surrey.ac.uk</b></td><td>1</td></tr><tr><td><b>University of Twente, Human Media Interaction Group, Enschede, The Netherlands</b></td><td>1</td></tr><tr><td>Biometric and Imaging Processing Laboratory (BIPLab)</td><td>1</td></tr><tr><td><b>University of Naples Federico II</b></td><td>1</td></tr><tr><td><b>Warsaw University of Technology</b></td><td>1</td></tr><tr><td>Research and Academic Computer Network (NASK)</td><td>1</td></tr><tr><td><b>SensoMotoric Instruments (SMI)</b></td><td>1</td></tr><tr><td><b>Maastricht University</b></td><td>1</td></tr><tr><td><b>Università di Salerno Italy</b></td><td>1</td></tr><tr><td><b>University of Southampton</b></td><td>1</td></tr><tr><td><b>University of Beira Interior, IT: Instituto de Telecomunicações</b></td><td>1</td></tr><tr><td>Philips Applied Technologies, Eindhoven, Netherlands</td><td>1</td></tr><tr><td><b>Delft University of Technology, Delft, Netherlands</b></td><td>1</td></tr><tr><td>Philips Research Eindhoven, Eindhoven, Netherlands</td><td>1</td></tr><tr><td>Key Lab Complex System & Intelligence Science, Institute of Automation, Chinese Academy of Science, Beijing, China</td><td>1</td></tr><tr><td>College of Computer Science and Technology, Wuhan University of Science and Technology, Wuhan, China</td><td>1</td></tr><tr><td><b>State Key Laboratory of Software Engineering, Wuhan University, Wuhan, China</b></td><td>1</td></tr><tr><td><b>SAP Innovation Center Networks, Singapore</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td>National Research University Higher School of Economics, Laboratory of Algorithms and Technologies for Network Analysis, Nizhny Novgorod, Russia</td><td>1</td></tr><tr><td><b>Bioinformatics Institute, A&#x2217;STAR, Singapore</b></td><td>1</td></tr><tr><td>Emory University School of Medicine, Atlanta, USA</td><td>1</td></tr><tr><td><b>School of Electrical and Computer Engineering, Georgia Institute of Technology, Atlanta, USA</b></td><td>1</td></tr><tr><td>Department of Radiology and Imaging Sciences, Winship Cancer Institute, Emory University School of Medicine, Atlanta, USA</td><td>1</td></tr><tr><td><b>iCV Research Group, Institute of Technology, University of Tartu, 50411, Estonia</b></td><td>1</td></tr><tr><td><b>Dept. Mathematics and Informatics, University of Barcelona, Computer Vision Center, Spain</b></td><td>1</td></tr><tr><td><b>Institute of Technology, University of Tartu, 50411, Estonia</b></td><td>1</td></tr><tr><td><b>Amazon.com Cambridge, MA, USA</b></td><td>1</td></tr><tr><td>Dept. of EMPH, Icahn School of Medicine at Mount Sinai, New York, NY 10029</td><td>1</td></tr><tr><td><b>Dept. of ENME College Park, University of Maryland, College Park, MD, 20742</b></td><td>1</td></tr><tr><td><b>Eskişehir Osmangazi Üniversitesi, Bilgisayar Mühendisliği Bölümü, Eskişehir, Türkiye</b></td><td>1</td></tr><tr><td><b>Anadolu Üniversitesi, Elek., Elektronik Mühendisliği Bölümü, Eskişehir, Türkiye</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Texas at San Antonio, San Antonio, TX</b></td><td>1</td></tr><tr><td>Electrical-Electronics Engineering Department, Izmir University of Economics, Balcova, Turkey</td><td>1</td></tr><tr><td><b>Electrical-Electronics Engineering Department, Firat University, Elazig, Turkey</b></td><td>1</td></tr><tr><td><b>Mechatronics Engineering Department, Firat University, Elazig, Turkey</b></td><td>1</td></tr><tr><td>Department of Computer Science, Solapur University, Solapur, India</td><td>1</td></tr><tr><td><b>Vision Semantics Ltd, UK</b></td><td>1</td></tr><tr><td><b>Rutgers University, USA</b></td><td>1</td></tr><tr><td><b>Computer Science, SUNY Stony Brook, Stony Brook, United States</b></td><td>1</td></tr><tr><td>Computer Vision Research Group, School of Computer Sciences, Universiti Sains Malaysia, Penang, Malaysia</td><td>1</td></tr><tr><td><b>Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. peterson@math.colostate.edu</b></td><td>1</td></tr><tr><td><b>Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. kirby@math.colostate.edu</b></td><td>1</td></tr><tr><td><b>Department of Mathematics, Colorado State University, Fort Collins, CO 80523-1874 U.S.A. chang@math.colostate.edu</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Faculty of Engineering, Urmia university, Urmia, Iran</b></td><td>1</td></tr><tr><td>Department of Information Technology, Netaji Subhas Engineering College, Kolkata, India</td><td>1</td></tr><tr><td>Computer Engineering College, Jimei University, Xiamen, China</td><td>1</td></tr><tr><td>Fujian Key Laboratory of the Brain-like Intelligent Systems, Xiamen, China</td><td>1</td></tr><tr><td>School of Information, Hunan University of Humanities, Science and Technology, Loudi, China</td><td>1</td></tr><tr><td><b>Cognitive Science Department, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td>Al Imam Mohammad Ibn Saud Islamic University, Riyadh, Saudi Arabia</td><td>1</td></tr><tr><td>School of Information and Mechatronics, Gwangju Institute of Science and Technology, Gwangju, Korea</td><td>1</td></tr><tr><td><b>Department of Computer Science, COMSATS, Institute of Information Technology, Sahiwal, Pakistan</b></td><td>1</td></tr><tr><td><b>The University of Electro-Communications, Japan</b></td><td>1</td></tr><tr><td><b>Institute for Infocomm Research, A-star, Singapore</b></td><td>1</td></tr><tr><td><b>Inst. Dalle Molle d'Intelligence Artificielle Perceptive, Martigny, Switzerland</b></td><td>1</td></tr><tr><td><b>Transmural Biotech, Barcelona, Spain</b></td><td>1</td></tr><tr><td><b>George Mason University, Fairfax, VA 22030</b></td><td>1</td></tr><tr><td>Gwangju Institute of Science and Technology (GIST), Gwangju, Republic of Korea</td><td>1</td></tr><tr><td><b>Computational Biomedicine Lab, University of Houston, 4800 Calhoun Rd., Houston, TX 77204, USA</b></td><td>1</td></tr><tr><td><b>Purdue University, West Lafayette, IN, USA</b></td><td>1</td></tr><tr><td><b>Moshanghua Tech Company, Ltd., Beijing, China</b></td><td>1</td></tr><tr><td><b>College of Information Engineering, Xiangtan University, Xiangtan, China</b></td><td>1</td></tr><tr><td><b>CARTIF Centro Tecnológico, Robotics and Computer Vision Division, Boecillo (Valladolid, Spain)</b></td><td>1</td></tr><tr><td><b>University of California, San Diego</b></td><td>1</td></tr><tr><td><b>School of Software Engineering, South China University of Technology, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>School of Computer Science, South China Normal University, Guangzhou, China</b></td><td>1</td></tr><tr><td>Dept. of Computer Science and Information Engineering, Providence University, Taichung, Taiwan</td><td>1</td></tr><tr><td>360 AI Institute, Beijing, China</td><td>1</td></tr><tr><td><b>Tencent YouTu Lab, Tencent Shanghai, China</b></td><td>1</td></tr><tr><td><b>Sun Yat-sen University, China</b></td><td>1</td></tr><tr><td><b>Centeye, Inc.</b></td><td>1</td></tr><tr><td><b>Center for Optical Imagery Analysis and Learning (OPTIMAL), State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China</b></td><td>1</td></tr><tr><td><b>Institute of Information and Control, Hangzhou Dianzi University, China</b></td><td>1</td></tr><tr><td><b>Hong Kong Baptist University and BNU-HKBU United International College</b></td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Research Lab, Vrije Universitiet Brussel (VUB), Department of Electronics & Informatics (ETRO) Pleinlaan 2, 1050 Brussel, Belgium</td><td>1</td></tr><tr><td><b>Department of Computer Science, School of Information Science and Engineering, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>School of Communication and Information Engineering, Shanghai University</b></td><td>1</td></tr><tr><td>IRISA, University of Rennes 1</td><td>1</td></tr><tr><td><b>INRIA Rennes-Bretagne-Atlantique</b></td><td>1</td></tr><tr><td><b>Advanced Digital Sciences Center, University of Illinois at Urbana-Champaign, Singapore</b></td><td>1</td></tr><tr><td><b>International Institute of Information Technology, Hyderabad, Telangana, India</b></td><td>1</td></tr><tr><td><b>Shenzhen Graduate School, Harbin Institute of Technology, 518055, China</b></td><td>1</td></tr><tr><td>Research Institution of Intelligent Control and Testing, Graduate School of Tsinghua University at Shenzhen, 518055, China</td><td>1</td></tr><tr><td>Commonwealth Scientific and Industrial Research Organization (CSIRO)</td><td>1</td></tr><tr><td><b>University of Canberra, Austrlia</b></td><td>1</td></tr><tr><td><b>B-DAT Lab, School of Information and Control, Nanjing University of Information Science and Technology, No. 219, Ningliu Road, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Rutgers University, 110 Frelinghuysen Road, Piscataway</b></td><td>1</td></tr><tr><td><b>Ocean University of China, Teaching Center of Fundamental Courses, Qingdao, China</b></td><td>1</td></tr><tr><td>Indiana University-Bloomington, USA</td><td>1</td></tr><tr><td>Key Laboratory of Medical Image Computing (Northeastern University), Ministry of Education, Shenyang, China</td><td>1</td></tr><tr><td><b>School of Information Science and Engineering, Northeastern University, Shenyang, China</b></td><td>1</td></tr><tr><td>Clínica Otocenter, Teresina, Piauí, Brasil</td><td>1</td></tr><tr><td>Key Lab of Broadband Wireless Communication and Sensor Network Technology, Ministry of Education, Nanjing, China</td><td>1</td></tr><tr><td>Nanjing University of Posts and Telecommunications, Nanjing, China</td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The State University of New York at Buffalo, New York, USA</b></td><td>1</td></tr><tr><td><b>Elektrik-Elektronik Mühendisliği Bölümü, Trakya Üniversitesi, Edirne, Türkiye</b></td><td>1</td></tr><tr><td>Grupo de Aplicacion de Telecomunicaciones Visuales, Universidad Politecnica de Madrid, Av. Complutense 30, 28040 Madrid, Spain</td><td>1</td></tr><tr><td>Department of Management Information Systems, Universität Regensburg, Universitätsstr. 31, 93053 Regensburg, Germany</td><td>1</td></tr><tr><td><b>Amrita E-Learning Research Laboratory, Amrita Vishwa Vidyapeetham, Amritapuri, Kollam, India</b></td><td>1</td></tr><tr><td>Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India</td><td>1</td></tr><tr><td>Amrita E-Learning Research Laboratory and the Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India</td><td>1</td></tr><tr><td><b>IKERBASQUE, Basque Foundation for Science, and the University of the Basque Country, San Sebastian, Spain</b></td><td>1</td></tr><tr><td><b>Computer Vision Center, Edifici &#x201C;O&#x201D; - Campus UAB, 08193 Bellaterra (Barcelona), Spain</b></td><td>1</td></tr><tr><td><b>Amazon Research, Berlin, Germany</b></td><td>1</td></tr><tr><td><b>DISI-Alma Mater Studiorum, Università di Bologna, Bologna, Italy</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing, 100044, China</b></td><td>1</td></tr><tr><td>Department of ECE, PEC University of Technology, Chandigarh, India</td><td>1</td></tr><tr><td>Biomedical Instrumentation (V-02), CSIR-Central Scientific Instruments Organisation (CSIO)|, Chandigarh, India</td><td>1</td></tr><tr><td>CEERI, Pilani, India</td><td>1</td></tr><tr><td>MNIT, Jaipur, India</td><td>1</td></tr><tr><td><b>Samsung Advanced Institute of Technology, Samsung Electronics, Gyeonggi-do, Korea</b></td><td>1</td></tr><tr><td><b>Department of Information Engineering, University of Florence, Firenze, Italy</b></td><td>1</td></tr><tr><td><b>Carnegie Mellon University, Pittsburgh, USA</b></td><td>1</td></tr><tr><td><b>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td>Department of Arts and Humanities, College of Business, Arts and Social Sciences, Brunel University London, Uxbridge, UK</td><td>1</td></tr><tr><td><b>Product/Industrial Design, Northumbria School of Design, Northumbria University, Newcastle upon Tyne, UK</b></td><td>1</td></tr><tr><td>Department of Design, College of Engineering, Design and Physical Sciences, Brunel University London, Uxbridge, UK</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Indian Institute of Technology Madras, Chennai, India</td><td>1</td></tr><tr><td><b>The Organization of Advanced Science and Technology, Kobe University, Kobe, Japan</b></td><td>1</td></tr><tr><td><b>RIEB, Kobe University, Kobe, Japan</b></td><td>1</td></tr><tr><td>NTT Service Evolution Laboratories, Kanagawa, Japan</td><td>1</td></tr><tr><td><b>Tsinghua National Lab for Information Science and Technology, Beijing, China</b></td><td>1</td></tr><tr><td><b>Universidad Argentina de la Empresa (UADE), Lima 717, Buenos Aires, Argentina</b></td><td>1</td></tr><tr><td><b>Columbia University, NEW YORK, NY, USA</b></td><td>1</td></tr><tr><td>Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia</td><td>1</td></tr><tr><td><b>US Army Research Laboratory, 2800 Powder Mill Rd, Adelphi, MD 20783, USA</b></td><td>1</td></tr><tr><td>Dept. of Comput. Sci., North Carolina Univ., Wilmington, NC, USA</td><td>1</td></tr><tr><td><b>Institute of Applied Mathematics, AMSS, Chinese Academy of Sciences, Beijing 100190</b></td><td>1</td></tr><tr><td><b>Research Center of Precision Sensing and Control, Institute of Automation, Chinese Academy of Sciences, Beijing, 100190</b></td><td>1</td></tr><tr><td><b>Biometrics Engineering Research Center, Yonsei University, Seoul, Korea</b></td><td>1</td></tr><tr><td>University of Washington &Microsoft, Seattle, WA, USA</td><td>1</td></tr><tr><td><b>Department of Engineering Science, University of Oxford, Parks Road, Oxford OX1 3PJ, UK</b></td><td>1</td></tr><tr><td>Departamento de Informtica e Matemtica Aplicada/University of Rio Grande do Norte, Natal, Brazil</td><td>1</td></tr><tr><td>Computer Engineering Department, Girne American University, Kyrenia, Cyprus 90</td><td>1</td></tr><tr><td><b>School of Engineering and Digital Arts, University of Kent, Canterbury, U.K.</b></td><td>1</td></tr><tr><td><b>Cornell University, New York, NY, USA</b></td><td>1</td></tr><tr><td>Cornell University & Facebook Inc., New York, NY, USA</td><td>1</td></tr><tr><td><b>Office of Naval Research, Arlington</b></td><td>1</td></tr><tr><td>School of Computer Science and Technology, Nanjing University of Posts and Telecommunications, Nanjing, China</td><td>1</td></tr><tr><td>Fujian Provincial Key Laboratory of Information Processing and Intelligent Control, Fuzhou, China</td><td>1</td></tr><tr><td>School of Technology, Nanjing Audit University, Nanjing, China</td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Microsoft Research, Redmond, WA</b></td><td>1</td></tr><tr><td>Adobe Research Department, Adobe Systems Inc, San Jose, CA</td><td>1</td></tr><tr><td><b>Department of Computer Science, National Chung Cheng University, Chiayi, Taiwan</b></td><td>1</td></tr><tr><td><b>School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Microsoft, Redmond, WA</b></td><td>1</td></tr><tr><td><b>BIWI, ETH Zurich Zurich, Switzerland</b></td><td>1</td></tr><tr><td><b>Video Analytics Lab, SERC, Indian Institute of Science, Bangalore, India</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, National Chung Hsing University, Taiwan</b></td><td>1</td></tr><tr><td>Integrated Circuits and Electronics Laboratory, Department of Engineering, Aarhus University, Denmark</td><td>1</td></tr><tr><td>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology (ICT), CAS, Beijing, China</td><td>1</td></tr><tr><td>Dept. of Comput. Sci., California Inst. of Technol., Pasadena, CA, USA</td><td>1</td></tr><tr><td>Utechzone Co. Ltd., New Taipei City, Taiwan 235</td><td>1</td></tr><tr><td>Department of Cognitive Science, University of California, San Diego, CA, USA</td><td>1</td></tr><tr><td><b>Department of Communication Engineering, Shanghai University, Shanghai, China</b></td><td>1</td></tr><tr><td>Department of Electronic Engineering Shanghai Jiao Tong University</td><td>1</td></tr><tr><td><b>Institute of Communication Engineering, National Tsing-Hua University, Taiwan</b></td><td>1</td></tr><tr><td>Innovations Kontakt Stelle (IKS) Hamburg, Hamburg University of Applied Sciences</td><td>1</td></tr><tr><td>School of Engineering and Computing, University of the West of Scotland</td><td>1</td></tr><tr><td>Computer Science Department, Central Washington University (CWU)</td><td>1</td></tr><tr><td>ICT Center, CSIRO</td><td>1</td></tr><tr><td><b>CSE Department, Regional Campus, Anna University, Tirunelveli, India</b></td><td>1</td></tr><tr><td>Technische Universität München, München, Germany</td><td>1</td></tr><tr><td><b>National defense acquisition and system engineering management, National University of Defense Technology, Changsha, Hunan, P.R. China</b></td><td>1</td></tr><tr><td>Electrical Engineering and Computer Science, School of Engineering, University of California at Merced, Merced, USA</td><td>1</td></tr><tr><td><b>Bili&#x015F;im Teknolojileri Enstit&#x00FC;s&#x00FC;, T&#x00FC;bitak B&#x0130;LGEM, Kocaeli, T&#x00FC;rkiye</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Software Engineering, University of Western Australia, Crawley, Australia</b></td><td>1</td></tr><tr><td><b>College of Engineering & Computer Science, Australian National University, Canberra, Australia</b></td><td>1</td></tr><tr><td>Data61, Commonwealth Scientific and Industrial Research Organization (CSIRO), Canberra, Australia</td><td>1</td></tr><tr><td><b>Human-Centered Technology Research Centre, University of Canberra, Bruce, Australia</b></td><td>1</td></tr><tr><td><b>Karlsruhe Institute of Technology (KIT), Germany</b></td><td>1</td></tr><tr><td><b>Istanbul Technical University (ITU), Turkey</b></td><td>1</td></tr><tr><td><b>École Polytechnique Fédérale de Lausanne (EPFL), Switzerland</b></td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Faculty of Electrical & Electronic Engineering, Khulna University of Engineering & Technology, Bangladesh</td><td>1</td></tr><tr><td>Pennsylvania State University, University Park, PA</td><td>1</td></tr><tr><td>University of Sao Paulo</td><td>1</td></tr><tr><td><b>University of Southern California, Southern California, USA</b></td><td>1</td></tr><tr><td>School of Software, Henan University, Kaifeng, China</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Texas A&M University, College Station, USA</b></td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Wuhan Institute of Technology, Wuhan, China</td><td>1</td></tr><tr><td><b>Space Application Laboratory, Research Center for Advanced Science and Technology, University of Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>Department of Aeronautics and Astronautics Engineering, Graduate School of Engineering, University of Tokyo, Japan</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, Computer Vision Laboratory, Linköping University, Linköping, Sweden</td><td>1</td></tr><tr><td>Computer Vision Research Laboratory, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran</td><td>1</td></tr><tr><td>Treelogic, Technological Scientific Park of Asturias, Llanera, Spain</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Oviedo, Gijón, Spain</td><td>1</td></tr><tr><td>Fundación CTIC (Technological Center), Technological Scientific Park of Gijón, Gijón, Spain</td><td>1</td></tr><tr><td><b>University of Central Florida 4000 Central Florida Blvd., Orlando, 328816, USA</b></td><td>1</td></tr><tr><td><b>Carnegie Mellon University 5000 Forbes Ave Pittsburgh, PA 15213, USA</b></td><td>1</td></tr><tr><td><b>School of Tai-an, Shandong University of Science and Technology, Tai-an, China</b></td><td>1</td></tr><tr><td><b>Integrated Management Coastal Research Institute, Universitat Politècnica de València, València, Spain</b></td><td>1</td></tr><tr><td>Department of Computer Science, Madrid Open University, Madrid, Spain</td><td>1</td></tr><tr><td>Department of Research and Diagnostic Methods, Faculty of Education, Pontificia University of Salamanca, Salamanca, Spain</td><td>1</td></tr><tr><td><b>The University of Tokushima, Japan</b></td><td>1</td></tr><tr><td><b>Department of Signal Processing, Tampere University of Technology, FIN-Tampere, 33720, Finland</b></td><td>1</td></tr><tr><td><b>Computer Science Department, University of Maryland, College Park, MD</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Memorial University of Newfoundland, Saint John's, NL, Canada</b></td><td>1</td></tr><tr><td><b>Computer Science Department, Tel-Aviv University, Ramat Aviv, Tel-Aviv, Israel</b></td><td>1</td></tr><tr><td><b>Shenzhen University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>U.S. Army Res. Lab., Adelphi, MD, USA</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Assiut University, Asyut, Egypt</b></td><td>1</td></tr><tr><td>Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan</td><td>1</td></tr><tr><td>Dept. of Information Engineering, Faculty of Engineering, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan</td><td>1</td></tr><tr><td>Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan, +81 25 262 7499</td><td>1</td></tr><tr><td><b>Visual Computation, Queen Mary University, London, United Kingdom</b></td><td>1</td></tr><tr><td><b>University of British Columbia, Canada</b></td><td>1</td></tr><tr><td>NTNU, Norway</td><td>1</td></tr><tr><td>Institute of Informatics, Wroclaw University of Technology, Wroclaw, Poland</td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Yeungnam University, Korea</b></td><td>1</td></tr><tr><td><b>Graduate School at Shenzhen, Tsinghua University, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Cornell University, Ithaca, NY, USA</b></td><td>1</td></tr><tr><td>Polish-Japanese Institute of Information Technology, Warszawa, Poland</td><td>1</td></tr><tr><td>Faculty of Applied Informatics and Mathematics, Department of Informatics, Warsaw University of Life Sciences (SGGW), Warsaw, Poland</td><td>1</td></tr><tr><td><b>AGH University of Science and Technology, Kraków, Poland</b></td><td>1</td></tr><tr><td>Polish-Japanese Institute of Information Technology, Warsaw, Poland</td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, Tongji University, Shanghai, China</b></td><td>1</td></tr><tr><td>FernUniversität , Hagen, Germany</td><td>1</td></tr><tr><td>Universidad Tecnica Federico Santa Maria , Valparaiso, Chile</td><td>1</td></tr><tr><td>Staffordshire University , Staffordshire, United Kingdom</td><td>1</td></tr><tr><td><b>The University of North Carolina at Charlotte, Charlotte, USA</b></td><td>1</td></tr><tr><td><b>Walt Disney Imagineering, USA</b></td><td>1</td></tr><tr><td><b>AEBC, Nanyang Environment &amp; Water Research Institute, Nanyang Technological University, Singapore</b></td><td>1</td></tr><tr><td>Faculty of Engineering, Computing and Science, Swinburne University of Technology Sarawak Campus, Kuching, Malaysia</td><td>1</td></tr><tr><td><b>Australian Centre for Visual Technologies, University of Adelaide, Adelaide, Australia</b></td><td>1</td></tr><tr><td><b>Center for OPTical IMagery Analysis and Learning (OPTIMAL), State Key Laboratory of Transient Optics and Photonics, Xi’an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi’an, P. R. China</b></td><td>1</td></tr><tr><td>University of Massachusetts at Amherst, Amherst, MA, USA</td><td>1</td></tr><tr><td><b>School of Computer Science, The University of Adelaide, Adelaide, SA, Australia</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Rutgers University, Piscataway, USA</b></td><td>1</td></tr><tr><td><b>University of Maryland, College Park, College Park, USA</b></td><td>1</td></tr><tr><td><b>Chinese Academy of Sciences, Beijing, P.R.China</b></td><td>1</td></tr><tr><td><b>School of Science, Jiangnan University, Wuxi, China</b></td><td>1</td></tr><tr><td><b>School of Internet of Things Engineering, Jiangnan University, Wuxi, China</b></td><td>1</td></tr><tr><td>Department of Engineering and MaintenanceChina Mobile Group, Jiangsu Company, Ltd., Changzhou, China</td><td>1</td></tr><tr><td><b>School of Computer Sciences and Technology, Nanjing Normal University, Nanjing, China</b></td><td>1</td></tr><tr><td><b>School of Mathematical Sciences, Nanjing Normal University, Nanjing, China</b></td><td>1</td></tr><tr><td>Indian Statistical Institute, Kolkata 700108</td><td>1</td></tr><tr><td>Departament d’Informàtica, Universitat de Valencia, Valencia, Spain</td><td>1</td></tr><tr><td><b>Department of Computer Science, George Mason University, Fairfax, USA</b></td><td>1</td></tr><tr><td><b>School of Information Technology, Deakin University, Geelong, Australia</b></td><td>1</td></tr><tr><td><b>School of Sciences, South China University of Technology, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>College of Computer and Information Science, Southwest University, Chongqing, China</b></td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, Gannan Normal University, Ganzhou, People’s Republic of China</td><td>1</td></tr><tr><td><b>Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, Tongji University, Shanghai, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>School of Information Engineering, Zhengzhou University, China</b></td><td>1</td></tr><tr><td>National Laboratory of Pattern Recognition, Beijing, China</td><td>1</td></tr><tr><td><b>National University of Kaohsiung, Kaohsiung, Taiwan</b></td><td>1</td></tr><tr><td>Quang Binh University, Dong Hoi City, Vietnam</td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, Jiangsu University of Science and Technology, Zhenjiang, China</b></td><td>1</td></tr><tr><td><b>Dept. of Computer Science, Unit of Medical Technology and Intelligent Information Systems, University of Ioannina, Greece</b></td><td>1</td></tr><tr><td><b>Dept. of Medical Physics, Medical School, Unit of Medical Technology and Intelligent Information Systems, University of Ioannina, Greece</b></td><td>1</td></tr><tr><td><b>Dermalog Identification Systems GmbH, Hamburg, Germany</b></td><td>1</td></tr><tr><td>School of Mathematics and Information Technology, Nanjing Xiao Zhuang University, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td><b>Research & Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai, 201804, P.R. China</b></td><td>1</td></tr><tr><td><b>ECSE Department, Rensselaer Polytechnic Institute</b></td><td>1</td></tr><tr><td><b>Centre of Excellence for Research in Computational Intelligence and Applications, School of Computer Science, University of Birmingham, Birmingham, U.K.</b></td><td>1</td></tr><tr><td><b>VUB-NPU Joint AVSP Research Lab, Northwestern Polytechnical University (NPU), Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, X'ian 710072, China</b></td><td>1</td></tr><tr><td><b>Arizona State University, Phoenix, AZ, USA</b></td><td>1</td></tr><tr><td>School of Computing, Electronics and Mathematics, Faculty of Engineering, Environment and Computing, Coventry University, Coventry, UK</td><td>1</td></tr><tr><td><b>Department of Computer Science and Information Engineering, National Taipei University, Taipei, Taiwan</b></td><td>1</td></tr><tr><td>Institute of Computer Science, Christian-Albrechts-Universität Kiel, Kiel, Germany</td><td>1</td></tr><tr><td><b>Institute of Computer Science, Faculty of Electronics and Information Technology, Warsaw University of Technology, Nowowiejska 15/19, 00-665 Warsaw, Poland</b></td><td>1</td></tr><tr><td>KT Future Technology Laboratory, Seoul, South Korea</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Rutgers University, Piscataway, NJ, USA</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Anhui University, Hefei, China</b></td><td>1</td></tr><tr><td><b>School of Mathematical Sciences, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China</b></td><td>1</td></tr><tr><td><b>Microsoft Key Laboratory of Visual Perception, Zhejiang University, Hangzhou, China</b></td><td>1</td></tr><tr><td><b>Institute of Automation, National Laboratory of Pattern Recognition, Chinese Academy of Sciences, Beijing, P.R. China</b></td><td>1</td></tr><tr><td><b>School of Interactive Computing, Georgia Institute of Technology, Atlanta, USA</b></td><td>1</td></tr><tr><td>Microsoft Research Asia, Beijing, P.R. China</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Seoul National University, Seoul, Korea</b></td><td>1</td></tr><tr><td><b>Nanyang Technological University, 50 Nanyang Drive, 637553, Singapore</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Northumbria University, Newcastle, UK</b></td><td>1</td></tr><tr><td><b>School of Automation, Northwestern Polytechnical University, Xi’an, China</b></td><td>1</td></tr><tr><td><b>SAIIP, School of Computer Science, Northwestern Polytechnical University, Xi’an, China</b></td><td>1</td></tr><tr><td>Shanghai Maritime University, Shanghai, China</td><td>1</td></tr><tr><td>Machine Intelligence Research Institute, Rockville, USA</td><td>1</td></tr><tr><td><b>Department of Computer Science, Shenzhen Graduate School, Harbin Institute of Technology, China</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Synthetical Automation for Process Industries, Northeastern University, Shenyang, Liaoning 110004, China</b></td><td>1</td></tr><tr><td><b>University of Pittsburgh and Adjunct Faculty at the Robotics Institute, Carnegie Mellon University: 3137 Sennott Square, 210 S. Bouquet St., PA 15260 USA</b></td><td>1</td></tr><tr><td><b>AI Institute, Qihoo/360 Company, Beijing, China</b></td><td>1</td></tr><tr><td><b>Intelligent Media Technique Research Center, Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, Chongqing, P.R. China</b></td><td>1</td></tr><tr><td><b>CAS Center for Excellence in Brain Science and Intelligence Technology, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, University of Chinese Academy of Sciences, Beijing, P.R. China</b></td><td>1</td></tr><tr><td><b>AI Institute of Qihoo/360 Company, Beijing, P.R. China</b></td><td>1</td></tr><tr><td><b>Advanced Engineering Electronics & Safety, Delphi Deutschland GMBH, Delphiplatz 1, Wuppertal, North Rhine-Westfalia, Germany</b></td><td>1</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, P.R. China</b></td><td>1</td></tr><tr><td>Orange—France Telecom Division R&D—TECH/IRIS, Cesson Sévigné Cedex, France</td><td>1</td></tr><tr><td>IIT-Madras, Chennai, India</td><td>1</td></tr><tr><td>Department of Computer Science, Innopolis University, Kazan, Russia</td><td>1</td></tr><tr><td><b>Center for Telematics and Information Technology, University of Twente, Enschede, Netherlands</b></td><td>1</td></tr><tr><td>Department of Computer Science, University of Science & Technology, Bannu, Pakistan</td><td>1</td></tr><tr><td><b>Department of Biomedical Engineering, Kyung Hee University, Suwon, Korea</b></td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Kyung Hee University, Suwon, Korea</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Sungkyunkwan University, Suwon, Korea</b></td><td>1</td></tr><tr><td>Naver Labs Europe, Meylan, France</td><td>1</td></tr><tr><td><b>Image and Video Systems Lab, Dept. of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Yuseong-Gu, Daejeon 305-701, Republic of Korea</b></td><td>1</td></tr><tr><td><b>LIRIS, UMR 5205 CNRS, INSA-Lyon, F-69621, France</b></td><td>1</td></tr><tr><td><b>Orange Labs, R&D, Meylan, France</b></td><td>1</td></tr><tr><td>School of Computer and Systems Sciences, JawaharLal Nehru University, New Delhi 110067, India</td><td>1</td></tr><tr><td>Univ. La Rochelle, La Rochelle, France</td><td>1</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Tsuen Wan, Hong Kong</td><td>1</td></tr><tr><td><b>University of Maryland, Center for Automation Research, 4411 A.V. Williams Building, College Park, MD 20742-3275, USA</b></td><td>1</td></tr><tr><td><b>Teaching and research of section of mathematics, Hebei Information Engineering School, Baoding 071000, China</b></td><td>1</td></tr><tr><td><b>George Mason University, Fairfax, USA</b></td><td>1</td></tr><tr><td><b>University of Naples Federico II, Napoli, Italy</b></td><td>1</td></tr><tr><td>University of Salerno, Salerno, Italy</td><td>1</td></tr><tr><td><b>Sapienza University of Rome, Rome, Italy</b></td><td>1</td></tr><tr><td><b>RheinAhrCampus der Hochschule Koblenz, Remagen, Germany</b></td><td>1</td></tr><tr><td>Google, Mountain View, USA</td><td>1</td></tr><tr><td>Computer Sciences Department, University of Wisconsin, Madison, USA</td><td>1</td></tr><tr><td>Google, Seattle, USA</td><td>1</td></tr><tr><td>Singapore Polytechnic, 500 Dover Road, Singapore 139651</td><td>1</td></tr><tr><td><b>Singapore University of Technology and Design, 20 Dover Road, Singapore 138682</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Robotics, Shenyang Institute of Automation Chinese Academy of Sciences, Shenyang, China</b></td><td>1</td></tr><tr><td><b>Bournemouth University, Poole, UK</b></td><td>1</td></tr><tr><td><b>Technische Universitt Darmstadt, Computer Systems Group, Darmstadt, Germany</b></td><td>1</td></tr><tr><td><b>School of Engineering and Applied Science, Aston University, Birmingham, U.K.</b></td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Xiamen University of Technology, Xiamen, China</td><td>1</td></tr><tr><td>Centre for Machine Vision, Bristol Robotics Laboratory, University of the West of England, T Block, Frenchay Campus, Coldharbour Lane, Bristol, BS16 1QY, UK</td><td>1</td></tr><tr><td><b>PERCEPTION Team, INRIA Grenoble Rhône-Alpes, France</b></td><td>1</td></tr><tr><td><b>MIR@CL Laboratory, Faculty of Sciences of Sfax (FSS), University of Sfax, Sfax, Tunisia</b></td><td>1</td></tr><tr><td>Saudi Electronic University, Riyadh, Kingdom of Saudi Arabia</td><td>1</td></tr><tr><td><b>MIR@CL Laboratory, Faculty of Economics and Management of Sfax (FSEGS), University of Sfax, Sfax, Tunisia</b></td><td>1</td></tr><tr><td><b>Digital World Research Centre, University of Surrey, UK</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Curtin University, Miri Sarawak, Malaysia</b></td><td>1</td></tr><tr><td>Information Security Group, City University London, London, UK</td><td>1</td></tr><tr><td><b>Faculty of Engineering, Multimedia University, Cyberjaya, Malaysia</b></td><td>1</td></tr><tr><td><b>Faculty of Computing and Informatics, Multimedia University, Cyberjaya, Malaysia</b></td><td>1</td></tr><tr><td>School of Physics and Electronic Information Engineering, Wenzhou University, Wenzhou, China</td><td>1</td></tr><tr><td>IIIT Chittoor, SriCity, Andhra Pradesh, India</td><td>1</td></tr><tr><td><b>ARM, Inc., San Jose, CA</b></td><td>1</td></tr><tr><td>Department of Information Engineering, Henan University of Science and Technology, Luoyang, China</td><td>1</td></tr><tr><td><b>School of Computing Sciences, University of East Anglia, Norwich, U.K.</b></td><td>1</td></tr><tr><td>Department of mechatronic technology of National Taiwan Normal University</td><td>1</td></tr><tr><td><b>Department of Computer Science, Taipei Municipal University of Education</b></td><td>1</td></tr><tr><td><b>Computer Vision Center 08193 Bellaterra, Barcelona, SPAIN</b></td><td>1</td></tr><tr><td><b>Computer Science Division, University of Central Florida, Orlando, FL, USA</b></td><td>1</td></tr><tr><td><b>GuangXi Cast Animation Company, Ltd., Nanning, China</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering and Computer Science, Colorado School of Mines, Golden, CO, USA</td><td>1</td></tr><tr><td><b>School of Information Engineering, Xiangtan University, Xiangtan, China</b></td><td>1</td></tr><tr><td><b>Baidu International Technology (Shenzhen) Company, Ltd., Shenzhen, China</b></td><td>1</td></tr><tr><td>The Image Processing and Analysis Laboratory (LAPI), University “Politehnica” of Bucharest, 313 Splaiul Independeţei, Bucharest, Romania</td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, The Chinese University of Hong Kong</b></td><td>1</td></tr><tr><td><b>School of Communication and Information Engineering, Chongqing University of Posts and Telecommunications, Chongqing, China</b></td><td>1</td></tr><tr><td>Division of Digital Media Engineering, Sang-Myung University, Suwon, Republic of Korea</td><td>1</td></tr><tr><td><b>CAS, Key Lab of Intell. Info. Process., Institute of Computing Technology, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computing, Teesside University, UK</b></td><td>1</td></tr><tr><td><b>Teleinfrastructure R&amp;D Lab, Technical University of Sofia, Bulgaria</b></td><td>1</td></tr><tr><td><b>The State Key Laboratory of Management and Control for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td>Facebook AI Research (FAIR), Menlo Park, USA</td><td>1</td></tr><tr><td>Princeton University &Microsoft, Princeton, NJ, USA</td><td>1</td></tr><tr><td>Microsoft &University of Washington, Redmond, WA, USA</td><td>1</td></tr><tr><td>Intel Labs, Pittsburgh PA</td><td>1</td></tr><tr><td>Dept. of Inf. Network Technol., Hsiuping Inst. of Technol., Taichung, Taiwan</td><td>1</td></tr><tr><td>Alibaba Group, Zhejiang, People’s Republic of China</td><td>1</td></tr><tr><td><b>Computer Science, Arizona State University, Tempe, USA</b></td><td>1</td></tr><tr><td><b>Cork Institute of Technology, CIT, Cork Ireland</b></td><td>1</td></tr><tr><td><b>Biomedical Engineering Program, University of Manitoba, Winnipeg, Canada</b></td><td>1</td></tr><tr><td>Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute Troy, Troy, USA</td><td>1</td></tr><tr><td>Key Lab of Computing and Communication Software of Anhui Province School of Computer Science and Technology, University of Science and Technology of China Hefei, Anhui, People’s Republic of China</td><td>1</td></tr><tr><td><b>Department of Computer Science, University of North Carolina Wilmington, Wilmington, United States</b></td><td>1</td></tr><tr><td><b>School of Electrical and Electronic Engineering, Yonsei University, Seoul, South Korea</b></td><td>1</td></tr><tr><td>School of ComputingNational University of Singapore</td><td>1</td></tr><tr><td><b>Centre for Intelligent Machines and Department of Electrical and Computer Engineering, McGill University, Montreal, Canada</b></td><td>1</td></tr><tr><td><b>UFSC - Federal University of Santa Catarina / INE - CTC, Florianópolis, 88040-900, Brazil</b></td><td>1</td></tr><tr><td><b>UDESC - Santa Catarina State University, DCC - CCT, Joinville, 89219-710, Brazil</b></td><td>1</td></tr><tr><td><b>School of Electrical and Electronic Engineering, University of Manchester, Manchester, U.K.</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Information Technology, University of Nottingham, Nottingham, UK</b></td><td>1</td></tr><tr><td><b>Waseda University, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>Computer Science Department, Rutgers University, 110 Frelinghuysen Road, Piscataway, NJ 08854-8019, USA</b></td><td>1</td></tr><tr><td><b>Fordham University, New York, 10023, USA</b></td><td>1</td></tr><tr><td><b>Rapid-Rich Object Search (ROSE) Lab, Nanyang Technological University, Interdisciplinary Graduate School, SingaporeSingapore</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Semnan University, Semnan, Iran</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Amirkabir University of Technology, Tehran, Iran</b></td><td>1</td></tr><tr><td>Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China</td><td>1</td></tr><tr><td>Dept. of Radiation Oncology, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</td><td>1</td></tr><tr><td>Dept. of Electrical &amp; Computer Engineering, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</td><td>1</td></tr><tr><td>University of Nottingham (Malaysia Campus), Malaysia</td><td>1</td></tr><tr><td>South Valley University, Qena, Egypt</td><td>1</td></tr><tr><td>Film Department ELTE University, Budapest, Hungary</td><td>1</td></tr><tr><td>Gipsa-Lab, Saint Martin d’Heres, France</td><td>1</td></tr><tr><td>ICA Laboratory, Grenoble, France</td><td>1</td></tr><tr><td><b>IIIT Hyderabad, 500032, A.P, India</b></td><td>1</td></tr><tr><td>School of Computing and Electrical Engineering, IIT Mandi, H.P, 175001, India</td><td>1</td></tr><tr><td><b>School of Computer Science and Software Engineering, The University of Western Australia, Crawley, WA, Australia</b></td><td>1</td></tr><tr><td><b>School of Engineering, Griffith University, Nathan, QLD, Australia</b></td><td>1</td></tr><tr><td><b>Faculty of Engineering and Information Technology, Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>NASA Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA</b></td><td>1</td></tr><tr><td><b>Department of Naval Architecture and Marine Engineering, University of Michigan, Ann Arbor, MI 48109 USA</b></td><td>1</td></tr><tr><td><b>Beijing Key Laboratory of Digital Media, State Key Laboratory of Virtual Reality Technology and Systems, and School of Computer Science and Engineering , Beihang University, China</b></td><td>1</td></tr><tr><td><b>Philips Research , The Netherlands</b></td><td>1</td></tr><tr><td><b>Istanbul Technical University, Faculty of Computer and Informatics, Istanbul, Turkey</b></td><td>1</td></tr><tr><td><b>Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, China</b></td><td>1</td></tr><tr><td>AICTE Emeritus Fellow,  </td><td>1</td></tr><tr><td><b>Department of Computer Science & Engineering, Jadavpur University, Kolkata, India</b></td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, GCELT, Kolkata, India</td><td>1</td></tr><tr><td><b>Chinese Academy of Sciences, Shaanxi, P. R. China</b></td><td>1</td></tr><tr><td><b>University of Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td>Chinese University of Hong Kong, Hong Kong</td><td>1</td></tr><tr><td><b>Signal and Information Processing section (SIP), Department of Electronic Systems, Aalborg University, Denmark</b></td><td>1</td></tr><tr><td><b>Section of Image Analysis and Computer Graphics, DTU Compute, Technical University of Denmark, Kgs. Lyngby, Denmark</b></td><td>1</td></tr><tr><td>Department of Computer System and Communication, Faculty of Information and Communication, Universiti Teknikal Malaysia Melaka, Durian Tunggal, Malaysia</td><td>1</td></tr><tr><td>Division Télécom, Centre de Développement des Technologies Avancées - CDTA, Algiers, Algeria</td><td>1</td></tr><tr><td><b>University of Delaware, USA</b></td><td>1</td></tr><tr><td><b>Department of Cognitive Science, School of Information Science and Engineering, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Taylor's University Lakeside Campus, Selangor Darul Ehsan, Malaysia</b></td><td>1</td></tr><tr><td><b>Department of Mathematical Sciences, Georgia Southern University, Statesboro, USA</b></td><td>1</td></tr><tr><td>School of Computer and Communication Science, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland</td><td>1</td></tr><tr><td><b>Department of Electronic EngineeringCentre for Vision, Speech and Signal Processing, University of Surrey, Surrey, U.K.</b></td><td>1</td></tr><tr><td><b>Department of Electrical EngineeringFaculty of Engineering, Urmia University, Urmia, Iran</b></td><td>1</td></tr><tr><td><b>ICT-ISVISION Joint R&D Lab. for Face Recognition, Chinese Acad. of Sci., Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Electrical and Computer Engineering, Purdue University, West Lafayette, USA</b></td><td>1</td></tr><tr><td>Baidu Research - Institute of Deep Learning, Sunnyvale, USA</td><td>1</td></tr><tr><td>Jiaxing University, Jiaxing, China</td><td>1</td></tr><tr><td><b>International School, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computer Engineering, Nanyang Technological University, Singapore, Singapore</b></td><td>1</td></tr><tr><td><b>Department of Social and Decision Sciences, Carnigie Mellon University, Pittsburgh, PA 15224, USA</b></td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. zhangxiaoxun@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. jiayunde@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. xushuang@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Electronics and Communication, University of Allahabadm Allahabad, India 211002</td><td>1</td></tr><tr><td><b>Microsoft Live Labs Research, China</b></td><td>1</td></tr><tr><td><b>Baidu Research, USA</b></td><td>1</td></tr><tr><td><b>Center for Machine Vision and Signal Analysis, Department of Computer Science and Engineering, University of Oulu, Oulu, Finland</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Child Development and Learning Science of Ministry of Education, Research Center for Learning Science, Southeast University, Nanjing, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Applied Network Technology (ANT), Department of Computer Science, Faculty of Science, Khon Kaen University, Khon Kaen, Thailand</b></td><td>1</td></tr><tr><td>Department of Business Computer, Faculty of Management Science, Nakhon Ratchasima Rajabhat University, Nakhon Ratchasima, Thailand</td><td>1</td></tr><tr><td><b>Microsoft Research</b></td><td>1</td></tr><tr><td><b>MIT CSAIL</b></td><td>1</td></tr><tr><td><b>Affectiva</b></td><td>1</td></tr><tr><td>Yahoo! Research</td><td>1</td></tr><tr><td><b>University of Denver, 2390 S York Street, CMK 308, Denver, CO 80210, USA</b></td><td>1</td></tr><tr><td><b>Institute for Computational and Mathematical Engineering, Stanford University</b></td><td>1</td></tr><tr><td><b>Computer Laboratory, University of Cambridge, Cambridge, U.K.</b></td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, University of Cagliari, Italy</td><td>1</td></tr><tr><td><b>Institute of Computer Science and Technology, Chongqing University of Posts and Telecommunications, Chongqing, P.R. China</b></td><td>1</td></tr><tr><td><b>School of Information Science and Technology, Southwest Jiaotong University, Chengdou, P.R. China</b></td><td>1</td></tr><tr><td><b>Center for OPTical IMagery Analysis and Learning, State Key Laboratory of Transient Optics and Photonics, Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences, Xi'an, China</b></td><td>1</td></tr><tr><td><b>Department of Electronic and Electrical Engineering, The University of Sheffield, Sheffield, U.K.</b></td><td>1</td></tr><tr><td><b>College of Electronic and Information Engineering, Nanjing University of Information Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Fotonation LTD, Galway, Ireland</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Center for OPTical IMagery Analysis and Learning (OPTIMAL), Northwestern Polytechnical University, Xian 710072, Shaanxi, China</b></td><td>1</td></tr><tr><td>Universidad de León, León, Spain</td><td>1</td></tr><tr><td>Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;</td><td>1</td></tr><tr><td>Robert Bosch Engineering and Business Solutions Limited, Bangalore, India</td><td>1</td></tr><tr><td>Department of Instrumentation and Control Engineering, PSG College of Technology, Coimbatore, India</td><td>1</td></tr><tr><td><b>Department of Biomedical Engineering, Shanghai Jiao Tong University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Faculty of Computer and Informatics, Istanbul Technical University, Istanbul, Turkey</b></td><td>1</td></tr><tr><td>China Airborne Missile Academy, Luoyang, China</td><td>1</td></tr><tr><td>Electronic Information Engineering College, Henan University of Science and Technology, Luoyang, China</td><td>1</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi’an, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Amirkabir University of Technology, Electrical Engineering Department, Tehran, Iran</b></td><td>1</td></tr><tr><td><b>School of Computing and Communication, University of Technology Sydney, Sydney, Australia</b></td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Inner Mongolia University of Science and Technology, Baotou, People’s Republic of China</td><td>1</td></tr><tr><td><b>School of Electronic and Information Engineering, Beihang University, Beijing, People’s Republic of China</b></td><td>1</td></tr><tr><td>Istituto Italiano di Tecnologia & Università di Verona, Genova, Italy</td><td>1</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT), Genova, Italy</td><td>1</td></tr><tr><td><b>Institute of Imaging and Computer Vision, RWTH Aachen University, Aachen, Germany</b></td><td>1</td></tr><tr><td>Office of Safety Research and Development, Federal Highway Administration, U.S. Department of Transportation, Virginia, USA</td><td>1</td></tr><tr><td><b>Department of Applied Mathematics, Beijing Jiaotong University, Beijing, People’s Republic of China</b></td><td>1</td></tr><tr><td>Xinjiang Vocational and Technical College of Communications, Wulumuqi, People’s Republic of China</td><td>1</td></tr><tr><td><b>School of Mechatronical Engineering and Automation, Shanghai University, Shanghai, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Guangdong University of Technology, China</b></td><td>1</td></tr><tr><td>College of Mathematics and Informatics, South China Agricultural University, China</td><td>1</td></tr><tr><td><b>Computer Vision and Multimodal Computing, MPI Informatics, Saarbruecken</b></td><td>1</td></tr><tr><td><b>Computer Vision Laboratory, ETH Zurich</b></td><td>1</td></tr><tr><td><b>School of Information and Electrical Engineering, China University of Mining and Technology, Xuzhou, China</b></td><td>1</td></tr><tr><td>Curtin University Department of Mechanical Engineering, Perth, Western Australia 6012</td><td>1</td></tr><tr><td><b>Department of Mechanical Engineering, Curtin University, Perth, Western Australia 6012</b></td><td>1</td></tr><tr><td>Department of Information Engineering, HeNan Radio and Television University, Zhengzhou, People’s Republic of China</td><td>1</td></tr><tr><td><b>School of Computer and Information Science, Southwest University, Chongqing, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, Center for Robotics, Key Laboratory for NeuroInformation of Ministry of Education, University of Electronic Science and Technology of China, Chengdu, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>National Institute of Diagnostics and Vaccine Development in Infectious Diseases, Xiamen University, Xiamen, People’s Republic of China</b></td><td>1</td></tr><tr><td>Computer Science Department, School of Information Science and Engineering, Xiamen, University, Xiamen, People’s Republic of China</td><td>1</td></tr><tr><td>PLA University of Science and Technology, China</td><td>1</td></tr><tr><td><b>PLA University of Science and Technology, China and State Key Lab. for Novel Software Technology, Nanjing University, China</b></td><td>1</td></tr><tr><td><b>College of Computer and Information, Hohai University, China</b></td><td>1</td></tr><tr><td><b>College of Computer and Information, Hohai University, China and Key Lab. of Image and Video Understanding for Social Safety, Nanjing University of Science & Technology, China</b></td><td>1</td></tr><tr><td><b>Vols Taipei</b></td><td>1</td></tr><tr><td><b>Intel Labs Europe, London, United Kingdom</b></td><td>1</td></tr><tr><td><b>Technion - Israel Inst. of Technology, Haifa, 32000, Israel</b></td><td>1</td></tr><tr><td><b>The Open University of Israel, Raanana, 43107, Israel</b></td><td>1</td></tr><tr><td><b>Weizmann Institute of Science, Rehovot, 76100, Israel</b></td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Nebraska-Lincoln, Lincoln, USA</td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, The Chinese University of Hong Kong, Hong Kong, China</b></td><td>1</td></tr><tr><td><b>Department of Information and Communication Engineering, Chosun University, Gwangju, Korea</b></td><td>1</td></tr><tr><td>School of Electronics and Computer Eng., Chonnam National University, Gwangju, Korea</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA 23529, USA</b></td><td>1</td></tr><tr><td>FAST, Supélec, Avenue de la Boulaie, Cesson-Sévigné, France</td><td>1</td></tr><tr><td>ISIR laboratory, Pierre and Marie Curie university, Paris Cedex 05, France</td><td>1</td></tr><tr><td>Centre for Visual Computing, Faculty of Engineering and Informatics, University of Bradford, Bradford, UK</td><td>1</td></tr><tr><td><b>Faculty of Science and Technology, Communication University of China, Beijing, China</b></td><td>1</td></tr><tr><td><b>Science and Technology Department, Communication University of China, Beijing, China</b></td><td>1</td></tr><tr><td><b>Collaborative Innovation Center, Communication University of China, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computer Software, Tianjin University, 300072, China</b></td><td>1</td></tr><tr><td><b>Computer Vision Laboratory, ETH Zürich, Zürich, Switzerland</b></td><td>1</td></tr><tr><td>Amsterdam University College, Amsterdam, The Netherlands</td><td>1</td></tr><tr><td><b>Informatics Institute, Faculty of Science, University of Amsterdam, Amsterdam, The Netherlands</b></td><td>1</td></tr><tr><td><b>Universitat Pompeu Fabra, Universidad Pompeu Fabra (Edificio França), Passeig de Circumvallacio, 8, Barcelona, Spain</b></td><td>1</td></tr><tr><td><b>Departamento de estadística, Universidad Carlos III de Madrid, Barcelona, Spain</b></td><td>1</td></tr><tr><td><b>Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Computer Science, Rochester Institute of Technology, USA</b></td><td>1</td></tr><tr><td><b>Center for Imaging Science, Rochester Institute of Technology, USA</b></td><td>1</td></tr><tr><td><b>Space and Naval Warfare Systems Center Pacific, San Diego, CA, 92152, United States</b></td><td>1</td></tr><tr><td><b>Electrical and Computer Engineering, University of California, San Diego</b></td><td>1</td></tr><tr><td>Key Laboratory of Intelligent Information Processing, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>School of Computer & Software, Nanjing University of Information Science & Technology, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td><b>School of Computer Science, Chongqing University, Chongqing, China</b></td><td>1</td></tr><tr><td>Institute of Life Sciences, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>School of Information Science and Engineering, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>FEECS, Department of Computer Science, Technical University of Ostrava, Ostrava-Poruba, Czech Republic</td><td>1</td></tr><tr><td>ECE, Department MSIT, C-4 Janakpuri, New Delhi, India</td><td>1</td></tr><tr><td>Dept. of Comput. Sci., New Jersey Inst. of Technol., Newark, NJ, USA</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan. e-mail: chihming.fu@gmail.com</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan; Informatics Department, Fo-Guang University, I-Lan, Taiwan. e-mail: clhuang@ee.nthu.edu.tw</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan</td><td>1</td></tr><tr><td><b>Research Institute for Future Media Computing, Shenzhen University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>University Politehnica of Bucharest, Bucharest, Romania</b></td><td>1</td></tr><tr><td><b>School of Computer and Information, Anhui Polytechnic University, Wuhu, China</b></td><td>1</td></tr><tr><td><b>Faculty of Information Sciences and Engineering, University of Canberra, Australia</b></td><td>1</td></tr><tr><td><b>Robotics Institute, Carnegie Mellon University, USA</b></td><td>1</td></tr><tr><td><b>Pediatrics Department, University of South Florida, Tampa, FL, USA</b></td><td>1</td></tr><tr><td>Department of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China</td><td>1</td></tr><tr><td><b>Sun Yat-Sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td>University of California at Los Angeles, Los Angeles, CA, USA</td><td>1</td></tr><tr><td><b>University of Queensland, Brisbane, Australia</b></td><td>1</td></tr><tr><td><b>University of Maryland, Baltimore County, Baltimore, MD</b></td><td>1</td></tr><tr><td><b>Jadavpur University, Kolkata, India</b></td><td>1</td></tr><tr><td>Department of Physics, Tripura University (A Central University), Suryamaninagar, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Tripura University (A Central University), Suryamaninagar, India</td><td>1</td></tr><tr><td><b>Raytheon BBN Technologies, Cambridge, MA, USA</b></td><td>1</td></tr><tr><td>Pontifical Catholic University of Minas Gerais - Department of Computer Science, R. Dom Jose Gaspar, 500, Belo Horizonte MG, 30535901, Brazil</td><td>1</td></tr><tr><td><b>College of Computer and Information Science, Southwest University, Chongqing 400715, China</b></td><td>1</td></tr><tr><td><b>Human-Robot Interaction Research Center, Department of Mechanical Engineering, Korea Advanced Institute of Science and Technology, Republic of Korea</b></td><td>1</td></tr><tr><td><b>Tsinghua University, Beijing, 100084, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea</b></td><td>1</td></tr><tr><td><b>School of Information Science and Technology, Southwest Jiaotong University, Chengdu, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Electronics Engineering, Bahcesehir University, Istanbul, Turkey</b></td><td>1</td></tr><tr><td><b>School of Science, Jiangnan University, Wuxi, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Medical Image Processing Group, Department of Radiology, University of Pennsylvania, Philadelphia, USA</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Advanced Process Control for Light Industry, Jiangnan University, Ministry of Education, Wuxi, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>School of Internet of Things, Jiangnan University, Wuxi, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Alberta, Edmonton, Canada</b></td><td>1</td></tr><tr><td>Department of Statistics and Operational Research, Faculty of Mathematics, Complutense University of Madrid, Madrid, Spain</td><td>1</td></tr><tr><td><b>Distributed Infinity, Inc., Larkspur, CO, USA</b></td><td>1</td></tr><tr><td><b>University of Colorado Denver, Denver, CO, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Texas A&M University, College Station, TX, USA</b></td><td>1</td></tr><tr><td><b>Facebook Inc., San Francisco, CA, USA</b></td><td>1</td></tr><tr><td><b>Adobe Systems Inc., San Jose, CA, USA</b></td><td>1</td></tr><tr><td>Dept. of Mathematics and Computer Science, University of Udine, Italy</td><td>1</td></tr><tr><td><b>University of Wisconsin-Madison, Madison, WI, USA</b></td><td>1</td></tr><tr><td>LIMSI-CNRS, Orsay Cedex, France</td><td>1</td></tr><tr><td>Istituto di Informatica e Telematica, Consiglio Nazionale delle Ricerche, Pisa, Italy</td><td>1</td></tr><tr><td><b>Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: hintz@it.uts.edu.au</b></td><td>1</td></tr><tr><td><b>Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: jant@it.uts.edu.au</b></td><td>1</td></tr><tr><td><b>Faculty of Information Technology, University of Technology, Sydney, Sydney, Australia. email: pohsiang@it.uts.edu.au</b></td><td>1</td></tr><tr><td>Faculty of Information Sciences and Engineering, Management and Science University, Selangor, Malaysia</td><td>1</td></tr><tr><td>UTM-Big Data Center, Universiti Teknologi Malaysia, Johor Bahru, Malaysia</td><td>1</td></tr><tr><td>Faculty of Computing, Universiti Teknologi Malaysia, Johor Bahru, Malaysia</td><td>1</td></tr><tr><td><b>School of Computer Science, Beijing University of Posts and Telecommunications, Beijing 100876, China</b></td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Computer Science, University of Maribor, Maribor, Slovenia</td><td>1</td></tr><tr><td>LAMIA, EA 4540, University of French West Indies and Guyana, Guadeloupe, France</td><td>1</td></tr><tr><td>ISIR, UPMC Univ Paris 06, CNRS, Paris, France</td><td>1</td></tr><tr><td><b>Advanced Electronics System, Academy of Scientific and Industrial Research, CSIR-Central Electronics Research Institute, Pilani, India</b></td><td>1</td></tr><tr><td><b>Mobile Communications Department, Eurecom, Biot, France</b></td><td>1</td></tr><tr><td><b>STARS Team, Institut National de Recherche en Informatique et en Automatique, Sophia Antipolis, France</b></td><td>1</td></tr><tr><td>Merchant Marine College, Shanghai Maritime University, Shanghai 201306, PR China</td><td>1</td></tr><tr><td><b>Institute of Industrial Science, the University of Tokyo, Tokyo, Japan</b></td><td>1</td></tr><tr><td>Department of Informatics, King’s College London, London, UK</td><td>1</td></tr><tr><td><b>DST INSPIRE Fellow, Department of Computer Science and Engineering, Jadavpur University, Kolkata, India</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering, KAIST, Korea</td><td>1</td></tr><tr><td><b>Electronic R&D Center, Mando Corp., Korea</b></td><td>1</td></tr><tr><td>Department of New Media, Korean German Institute of Technology, Korea</td><td>1</td></tr><tr><td><b>SAIT Beijing Lab, Samsung Advanced Institute of Technology, China</b></td><td>1</td></tr><tr><td><b>Mechatronics & Manufacturing Technology Center, Samsung Electronics Co., Korea</b></td><td>1</td></tr><tr><td>Department of Electrical, Computer and Biomedical Engineering, University of Pavia, Pavia, Italy</td><td>1</td></tr><tr><td><b>Open University of Israel</b></td><td>1</td></tr><tr><td><b>The University of Western Australia, Crawley, Australia</b></td><td>1</td></tr><tr><td><b>Curtin University, Perth, Australia</b></td><td>1</td></tr><tr><td>Pontifical Catholic Univ of Rio de Janei, Department of Informatics, Rio de Janeiro, Brazil</td><td>1</td></tr><tr><td>Department of Informatics, Pontifical Catholic Univ of Rio de Janei, Rio de Janeiro, Brazil</td><td>1</td></tr><tr><td>School of Computing Sciences and Informatics, University of Cincinnati, Cincinnati, USA</td><td>1</td></tr><tr><td><b>Concordia University, Montreal, Canada</b></td><td>1</td></tr><tr><td>Universiti Kuala Lumpur, Kedah</td><td>1</td></tr><tr><td><b>Concordia Institute for Information Systems Engineering (CIISE), Concordia University, Montreal, QC, H3G 1T7, Canada</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Concordia University, QC, Canada, H3G 1T7</b></td><td>1</td></tr><tr><td><b>Beijing University of Posts and Telecommunications, 100876, PR China</b></td><td>1</td></tr><tr><td><b>University of KwaZulu-Natal, School of Maths, Statistics &amp; Computer Science, Durban - South Africa</b></td><td>1</td></tr><tr><td>Sudan University of Science and Technology, College of Computer Science and Information Technology, Khartoum - Sudan</td><td>1</td></tr><tr><td>LMU Munich, Germany and Munich University of Applied Sciences, Germany</td><td>1</td></tr><tr><td>Department of Electric and Electronic Engineering, Avrasya University, Trabzon, Turkey</td><td>1</td></tr><tr><td><b>Department of Electric and Electronic Engineering, Selçuk University, Konya, Turkey</b></td><td>1</td></tr><tr><td><b>Digital Media Institute, Hunan University, Changsha, 410082 P.R. China</b></td><td>1</td></tr><tr><td><b>College of information science and engineering, Hunan University, Changsha, 410082 P.R. China</b></td><td>1</td></tr><tr><td>ACM Professional Specialist in Artificial Intelligence</td><td>1</td></tr><tr><td><b>Université du Quebec a Rimouski (UQAR)</b></td><td>1</td></tr><tr><td><b>School of Information Technology & Electrical Engineering, The University of Queensland, Brisbane, Australia</b></td><td>1</td></tr><tr><td><b>School of Computing, National University of Singapore, Singapore, Singapore</b></td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Jiangxi Normal University, Nanchang, China</td><td>1</td></tr><tr><td><b>Shanghai university</b></td><td>1</td></tr><tr><td>University of Washington and Google Inc.</td><td>1</td></tr><tr><td>Google Inc.</td><td>1</td></tr><tr><td><b>University of Washington</b></td><td>1</td></tr><tr><td><b>CNRS, IMB, UMR 5251, Talence, France</b></td><td>1</td></tr><tr><td><b>UMR 5800, CNRS, LaBRI, Talence, France</b></td><td>1</td></tr><tr><td><b>UMR 5800, University of Bordeaux, LaBRI, Talence, France</b></td><td>1</td></tr><tr><td><b>UMR 5800, Bordeaux INP, LaBRI, Talence, France</b></td><td>1</td></tr><tr><td><b>UMR 5800, LaBRI, Talence, France</b></td><td>1</td></tr><tr><td><b>Dept. of Electrical Engineering, National Chung Hsing University, Taiwan</b></td><td>1</td></tr><tr><td><b>Division of Design of Intelligent Machines, Center for Development of Advanced Technologies, Algiers, Algeria</b></td><td>1</td></tr><tr><td><b>AI Laboratories, Alibaba Group, Hangzhou, China</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi’an, China</b></td><td>1</td></tr><tr><td>CCCE, Nankai University Jinnan Campus, Tianjin, China</td><td>1</td></tr><tr><td><b>College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, USA</b></td><td>1</td></tr><tr><td><b>Department of Mathematics, University of Houston, Houston, USA</b></td><td>1</td></tr><tr><td><b>Research Group on Intelligent Machines, University of Sfax, ENIS, Sfax, Tunisia</b></td><td>1</td></tr><tr><td><b>Department of Management, Dalian University of Technology, Dalian Liaoning, China</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, Dalian University of Technology, Dalian Liaoning, China</b></td><td>1</td></tr><tr><td><b>College of Communication Engineering, Chongqing University, Shapingba district, Chongqing, China</b></td><td>1</td></tr><tr><td>Department of Computer Science, VHNSN College, Virudhunagar, India</td><td>1</td></tr><tr><td>Department of Computer Science, ANJA College, Sivakasi, India</td><td>1</td></tr><tr><td><b>Department of Information Engineering, The Chinese University of Hong Kong, Shatin, Hong Kong</b></td><td>1</td></tr><tr><td>Tsinghua National Laboratory for Information Science and Technology, Department of Computer Science and Technology Tsinghua University, Beijing, China</td><td>1</td></tr><tr><td><b>National ICT Australia, Canberra, ACT, Australia</b></td><td>1</td></tr><tr><td><b>MIT Media Laboratory, Cambridge, MA, USA</b></td><td>1</td></tr><tr><td>Foundation for Research & Technology – Hellas, Heraklion, Crete, Greece</td><td>1</td></tr><tr><td>Vrije Universiteit Amsterdam, Amsterdam, The Netherlands</td><td>1</td></tr><tr><td>Ruhr-Universität Bochum, Bochum, Germany</td><td>1</td></tr><tr><td><b>Department of Mathematics and Computer Science, University of Basel, Basel, Switzerland</b></td><td>1</td></tr><tr><td><b>Industrial Technology Research Institute, Hsinchu, Taiwan</b></td><td>1</td></tr><tr><td><b>Garmin Corporation, New Taipei, Taiwan</b></td><td>1</td></tr><tr><td><b>Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan</b></td><td>1</td></tr><tr><td><b>School of Information Technologies, University of Sydney, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>Tencent AI Laboratory, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Malong Technologies Company, Ltd., Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Beijing Normal University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Sun Yat-sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Guangzhou University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Department of Information Engineering, the Chinese University of Hong Kong</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, the Chinese University of Hong Kong, Shatin, Hong Kong</b></td><td>1</td></tr><tr><td>Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro No.1, Tonantzintla, Puebla, México. CP 72840</td><td>1</td></tr><tr><td><b>Pontifical Catholic University of Rio de Janeiro, Rua Marquês de São Vicente 225, Gávea, Brasil</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, National Taiwan University of Science and Technology</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Computational Vision Group, University of California at Irvine, Irvine, CA, USA</b></td><td>1</td></tr><tr><td><b>Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, The Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China</b></td><td>1</td></tr><tr><td>Key Laboratory of System Control and Information Processing, Ministry of Education of China, Shanghai, People’s Republic of China</td><td>1</td></tr><tr><td><b>Department of Automation, Shanghai Jiao Tong University, Shanghai, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>School of Psychology, University of Ottawa, Ottawa, Canada</b></td><td>1</td></tr><tr><td><b>School of Electrical Engineering and Computer Science, University of Ottawa, Ottawa, Canada</b></td><td>1</td></tr><tr><td><b>Faculty of Biomedical Engineering, Amirkabir University of Technology, Tehran, Iran</b></td><td>1</td></tr><tr><td><b>Tohoku University, Japan</b></td><td>1</td></tr><tr><td><b>Intelligent Multimedia Technique Research Center, Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, Chongqing 400714, PR China</b></td><td>1</td></tr><tr><td>Department of Mechanical Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia</td><td>1</td></tr><tr><td><b>University of Alberta, Canada</b></td><td>1</td></tr><tr><td><b>China University of Geosciences, Wuhan, China</b></td><td>1</td></tr><tr><td><b>College of Information Science and Engineering, Hunan University, Changsha, China</b></td><td>1</td></tr><tr><td>Dept. of Electron. Eng., Hannam Univ., Daejeon, South Korea</td><td>1</td></tr><tr><td><b>Centre for Autism Research, Philadelphia, US</b></td><td>1</td></tr><tr><td><b>University of Cambridge</b></td><td>1</td></tr><tr><td><b>Department of EE, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Korea</b></td><td>1</td></tr><tr><td><b>Department of Software and Computer Engineering, Ajou University, Suwon, Korea</b></td><td>1</td></tr><tr><td>Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies, Zhejiang, China</td><td>1</td></tr><tr><td><b>College of Computer Science, Zhejiang University, Zhejiang, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Chang Gung University, Taoyuan, Taiwan</b></td><td>1</td></tr><tr><td><b>School of Information Science and Engineering, Shandong University, Jinan, China</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Toronto Univ., Ont., Canada</td><td>1</td></tr><tr><td><b>Center for Advance Imaging Innovation and Research, New York University, New York, NY, USA</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Machine Perception (Ministry of Education), School of Electronics Engineering and Computer Science, Peking University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Machine Learning Department, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Warwick, Coventry, U.K.</b></td><td>1</td></tr><tr><td>Laboratoire MIA, University of La Rochelle, La Rochelle, France</td><td>1</td></tr><tr><td><b>College of Cyber Security, Jinan University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Columbia University, New York</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, College of Computer and Information Science, Northeastern University, Boston, MA, USA</b></td><td>1</td></tr><tr><td>Fraunhofer Institute for Telecommunications, Berlin, Germany</td><td>1</td></tr><tr><td>Fraunhofer Institute for Digital Media Technology, Ilmenau, Germany</td><td>1</td></tr><tr><td>Siemens AG, Corporate Technology, Munich, Germany</td><td>1</td></tr><tr><td><b>School of Engineering, University of Illinois, Urban Champagne, USA</b></td><td>1</td></tr><tr><td>ECIT, School of Electronics, Electrical Engineering &amp; Computer Science, Queen's University Belfast, Belfast, UK</td><td>1</td></tr><tr><td><b>Computer Science, Loughborough University, Loughborough, UK</b></td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Calcutta, Kolkata, India</td><td>1</td></tr><tr><td><b>Graduate School of Information, Production and Systems, Waseda University, Kitakyushu, Japan</b></td><td>1</td></tr><tr><td><b>Dept. of Computer Engineering, Science and Reaserch Branch, Islamic Azad University, Tehran, Iran</b></td><td>1</td></tr><tr><td>School of Electrical and Computer Engineering, College of Engineering, University of Tehran, Iran</td><td>1</td></tr><tr><td>Luoyang Electro-Optical Equipment Research Institute, Luoyang, People’s Republic of China</td><td>1</td></tr><tr><td><b>Schepens Eye Research Institute, Harvard University, Cambridge, USA</b></td><td>1</td></tr><tr><td><b>Image Processing Center, Beihang University, Beijing, People’s Republic of China</b></td><td>1</td></tr><tr><td>Technological Educational Institute of Sterea Ellada, Psahna, Halkida, Greece</td><td>1</td></tr><tr><td>National Centre for Scientific Research “Demokritos”, Agia Paraskevi, Athens, Greece</td><td>1</td></tr><tr><td>University of Maastricht, Maastricht, The Netherlands</td><td>1</td></tr><tr><td>Centre of Research and Technology Hellas, Thermi, Thessaloniki, Greece</td><td>1</td></tr><tr><td><b>School of Computing and Communications, Lancaster University, Lancaster, UK</b></td><td>1</td></tr><tr><td><b>Center for Optical Imagery Analysis and Learning, Northwestern Polytechnical University, Xi’an, China</b></td><td>1</td></tr><tr><td><b>School of Instrumentation Science and Opto-electronics Engineering, Beihang University, Beijing, China</b></td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, P.P.G. Institute of Technology, Coimbatore, India</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Institute of Road and Transport Technology, Erode, India</td><td>1</td></tr><tr><td><b>Wayne State University, Detroit, USA</b></td><td>1</td></tr><tr><td><b>School of Automation, Huazhong University of Science and Technology, Wuhan, China 430074</b></td><td>1</td></tr><tr><td><b>College of Electronics and Information Engineering, Sichuan University, Chengdu, China 610064</b></td><td>1</td></tr><tr><td>Department of Computer Science, Banasthali Vidyapith, Banasthali, India</td><td>1</td></tr><tr><td>Computer Science and Engineering Department, SP Memorial Institute of Technology, Kaushambi, India</td><td>1</td></tr><tr><td><b>Dept. of Comp. Sci. and Tech., Shenzhen Graduate School, Harbin Institute of Technology, China</b></td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Xi’an Jiaotong University, Xi’an, Shaanxi, China</td><td>1</td></tr><tr><td><b>Imperial College London</b></td><td>1</td></tr><tr><td><b>Machine Vision Group, University of Oulu, Oulu, Finland</b></td><td>1</td></tr><tr><td>Fujifilm Software, San Jose, USA</td><td>1</td></tr><tr><td><b>Inst. of Autom., Chinese Acad. of Sci., Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computing, Computing 1, 13 Computing Drive, National University of Singapore, Singapore 117417</b></td><td>1</td></tr><tr><td>Institute for Infocomm Research, 1 Fusionopolis Way, #21-01 Connexis, Singapore 138632</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, National University of Singapore, Singapore 117576</b></td><td>1</td></tr><tr><td><b>Institute of Mathematical and Computer Sciences, University of São Paulo, São Carlos, Brazil</b></td><td>1</td></tr><tr><td><b>Computational Brain Science Lab, Department of Computational Science and Technology, School of Computer Science and Communication, KTH Royal Institute of Technology, Stockholm, Sweden</b></td><td>1</td></tr><tr><td><b>Graduate Sch. of Inf. Sci. & Technol., Tokyo Univ., Japan</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Child Development and Learning Science, Ministry of Education, Research Center for Learning Science, Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td><b>State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>National Laboratory of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China</b></td><td>1</td></tr><tr><td><b>Department of Psychology, University of Pittsburgh/Robotics Institute, Carnegie Mellon University , Pittsburgh, PA, USA</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Dept.of Intelligence Science and Technology, The Kyoto University of JAPAN</b></td><td>1</td></tr><tr><td><b>Dept.of Computational Intelligence and Systems Science, Tokyo Institute of Technology of JAPAN</b></td><td>1</td></tr><tr><td><b>Microsoft Research, Redmond, WA, USA</b></td><td>1</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, People’s Republic of China</b></td><td>1</td></tr><tr><td>HTC Research, Beijing, China</td><td>1</td></tr><tr><td>QCIS, University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td><b>IIIS, Tsinghua University, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Nanjing University of Science and Technology, Nanjing 210094, PR China</b></td><td>1</td></tr><tr><td><b>School of Software Technology, Dalian University of Technology</b></td><td>1</td></tr><tr><td>Interuniversity Microelectronics Centre, Heverlee, Belgium</td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Lab, Department ETRO, Vrije Universiteit Brussel (VUB), Brussels, Belgium</td><td>1</td></tr><tr><td>Shaanxi Key Laboratory on Speech and Image Information Processing, Xi’an, China</td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Lab, School of Computer Science, Northwestern Polytechnical University (NPU), Xi’an, China</td><td>1</td></tr><tr><td><b>School of Computer and Information Science, Southwest University, Chongqing, China</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Shandong University, Shandong, China</b></td><td>1</td></tr><tr><td><b>Facebook Inc., Palo Alto, CA, USA</b></td><td>1</td></tr><tr><td><b>Stanford University, USA</b></td><td>1</td></tr><tr><td>Institute of Electronics and Computer Science, Riga, Latvia</td><td>1</td></tr><tr><td>Electrical and Computer Engineering Department, University of California, Santa Barbara, CA 93106 USA</td><td>1</td></tr><tr><td>Psychology Department, University of California, Santa Barbara, CA 93106 USA</td><td>1</td></tr><tr><td><b>Computer Science and Information Engineering Department, National Taiwan Normal University, Taipei, Taiwan</b></td><td>1</td></tr><tr><td>Dept. of Comp. Sci. and Inf. Eng, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>School of Control Science and Engineering DUT, Dalian, China</td><td>1</td></tr><tr><td><b>College of Mechanical and Electrical, Changzhou Textile Garment Institute, Changzhou, China</b></td><td>1</td></tr><tr><td>Information Technology R&D Center, Mitsubishi Electric Corporation, Kamakura, Japan</td><td>1</td></tr><tr><td>School of Information Science and Engineering, Hunan city University, Yiyang, China</td><td>1</td></tr><tr><td><b>School of Electronics and Information Engineering, Tongji University, Shanghai, China</b></td><td>1</td></tr><tr><td>KU Leuven, ESAT - PSI, iMinds, Leuven, Belgium</td><td>1</td></tr><tr><td>Max-Planck-Institut für Informatik, Saarbrücken, Germany</td><td>1</td></tr><tr><td><b>Faculty of Electrical Engineering, Department of Cybernetics, Czech Technical University in Prague, Prague 6, Czech Republic</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Toronto, Toronto, Canada</b></td><td>1</td></tr><tr><td>Research Center for Science and Technology in Medicine, Tehran University of Medical Sciences, Tehran, Iran</td><td>1</td></tr><tr><td>University of IIllinois, Urbana-Champaign</td><td>1</td></tr><tr><td><b>Department of ECE, National University of Singapore</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, University of Dayton</b></td><td>1</td></tr><tr><td><b>Institut EURECOM, Sophia Antipolis, (France)</b></td><td>1</td></tr><tr><td><b>Sapienza Universit&#x00E0; di Roma, v. Salaria 113, 00198, Rome, (IT)</b></td><td>1</td></tr><tr><td><b>Chinese Academy of Sciences, Shenzhen Institutes of Advanced Technology, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Iowa State University, Ames, IA, USA</b></td><td>1</td></tr><tr><td>Zhejiang University & Alibaba Group, Hangzhou, China</td><td>1</td></tr><tr><td>Laboratory LIM, Department of Computer Science, Faculty of Sciences and Technologies, University Hassan II, Casablanca-Morocco</td><td>1</td></tr><tr><td><b>College of Electrical Engineering and Automation, Anhui University, Hefei, China</b></td><td>1</td></tr><tr><td>Electrical Engineering Department, Yazd University, Yazd, Iran</td><td>1</td></tr><tr><td><b>School of Computer and Science Technology, Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td>School of Electronics and Information Engineering, Tianjin Polytechnic University, Tianjin, China</td><td>1</td></tr><tr><td>Tianjin Key Laboratory of Optoelectronic Detection Technology and Systems, Tianjin, China</td><td>1</td></tr><tr><td><b>Research School of Engineering, Australian National University, Canberra, Australia</b></td><td>1</td></tr><tr><td><b>DCNS Research, 5 rue de l'Halbrane, 44340 Bouguenais, France</b></td><td>1</td></tr><tr><td>Adjunct, Effat University, Jeddah, Saudi Arabia</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Miami, Coral Gables, USA</b></td><td>1</td></tr><tr><td>School of Computer Science, Wuyi University, Jiangmen, China</td><td>1</td></tr><tr><td><b>Faculty of Electrical Engineering, University of Ljubljana, Ljubljana, Slovenia</b></td><td>1</td></tr><tr><td><b>School of Computer Engineering and Science, Shanghai University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Faculty of Education, East China Normal University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Department of Information Engineering and Computer Science, University of Trento, Trento, TN, Italy</b></td><td>1</td></tr><tr><td>Snapchat Research, Venice, CA90291</td><td>1</td></tr><tr><td><b>Beauty Cosmetic Research Lab, Kao Corporation, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>University of Waterloo, Waterloo, Canada</b></td><td>1</td></tr><tr><td><b>Department of CS, University of Texas at San Antonio, 78249, USA</b></td><td>1</td></tr><tr><td>Department of CSE, University at Buffalo (SUNY), NY 14260, USA</td><td>1</td></tr><tr><td><b>University of Waterloo</b></td><td>1</td></tr><tr><td>School of Information and Engineering, Jinhua Polytechnic, Jinhua, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Texas, Arlington, USA</td><td>1</td></tr><tr><td>School of Medical Science, Jinhua Polytechnic, Jinhua, China</td><td>1</td></tr><tr><td><b>College of Information, Capital University of Economics and Business, Beijing, China.sanyecunfu@emails.bjut.edu.cn</b></td><td>1</td></tr><tr><td><b>Bio-Computing Research Center, Harbin Institute of Technology Shenzhen Graduate School, China</b></td><td>1</td></tr><tr><td><b>Guangdong Industry Training Centre, Guangdong Polytechnic Normal University, Guangzhou, China</b></td><td>1</td></tr><tr><td>S. S. College of Business Studies, University of Delhi, Delhi, India</td><td>1</td></tr><tr><td>School of Computer & System Sciences, Jawaharlal Nehru University, New Delhi, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Uttarakhand, India</td><td>1</td></tr><tr><td><b>Korea University, Seoul, South Korea</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Ajou University</b></td><td>1</td></tr><tr><td><b>Advanced Digital Sciences Center , Singapore</b></td><td>1</td></tr><tr><td><b>National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China</b></td><td>1</td></tr><tr><td>Baidu Online Network Technology (Beijing) Co. Ltd, Beijing, China</td><td>1</td></tr><tr><td><b>Computer Science and Electrical Engineering West Virginia University, Morgantown, USA</b></td><td>1</td></tr><tr><td><b>Shenzhen Institutes of Advanced Technology,Chinese Academy of Sciences, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Department of ComputingBiometrics Research Centre, The Hong Kong Polytechnic University, Hong Kong</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Information Technology, RMIT University, Melbourne, VIC, Australia</b></td><td>1</td></tr><tr><td>Faculty of Engineering and Computing, Coventry University, UK</td><td>1</td></tr><tr><td>Dept. of Theoretical Electrical Engineering, Technical University of Sofia, Sofia, Bulgaria</td><td>1</td></tr><tr><td><b>Clemson University, Clemson, SC</b></td><td>1</td></tr><tr><td><b>School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>1</td></tr><tr><td><b>School of Digital Media, Jiangnan University Jiangsu Wuxi, PR China</b></td><td>1</td></tr><tr><td><b>School of Digital Media, Jiangnan University, Jiangsu Wuxi, PR China</b></td><td>1</td></tr><tr><td><b>School of Maths, Statistics &amp; Computer Science, University of KwaZulu-Natal, Durban, South Africa</b></td><td>1</td></tr><tr><td><b>Faculty of Science and Technology, Sudan University of Science and Technology, Khartoum, Sudan</b></td><td>1</td></tr><tr><td>Lawrence Berkeley National Laboratory, Berkeley, USA</td><td>1</td></tr><tr><td>No.1 Senior Middle School of Wendeng District, Weihai, China</td><td>1</td></tr><tr><td>Standards & Metrology Research Institute of CARS, Beijing, China</td><td>1</td></tr><tr><td>College of Information Science & Technology, Hebei Agricultural University, Baoding, China</td><td>1</td></tr><tr><td><b>Graduate School of System Informatics, Kobe University, Japan</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Florida, Gainesville, FL</b></td><td>1</td></tr><tr><td><b>Beijing FaceAll Co. Beijing, China</b></td><td>1</td></tr><tr><td><b>University of Science and Technology of China</b></td><td>1</td></tr><tr><td>Amazon, Berkshire, U.K.</td><td>1</td></tr><tr><td>Tianjin Universtiy, Tianjin, China</td><td>1</td></tr><tr><td><b>Electrical and Computer Engineering, University of Maryland, College Park, Maryland 20740 United States</b></td><td>1</td></tr><tr><td><b>Electrical and Computer Engineering, Rutgers University, Piscataway, New Jersey 08854 United States</b></td><td>1</td></tr><tr><td><b>The Computer Laboratory, University of Cambridge, Cambridge, UK</b></td><td>1</td></tr><tr><td><b>New York University, New York City, NY, USA</b></td><td>1</td></tr><tr><td><b>Centre for Quantum Computation & Intelligent Systems and the Faculty of Engineering and Information Technology, University of Technology Sydney, 81 Broadway Street, Ultimo, NSW, Australia</b></td><td>1</td></tr><tr><td>University of Lancaster, Lancaster, United Kingdom</td><td>1</td></tr><tr><td>University of Helsinki, Helsinki, Finland</td><td>1</td></tr><tr><td><b>Department of Multimedia and Graphic Arts, Cyprus University of Technology, P.O. Box 50329, 3036, Lemesos, Cyprus</b></td><td>1</td></tr><tr><td><b>Ryerson Multimedia Research Laboratory, Ryerson University, Toronto, Ontario, Canada</b></td><td>1</td></tr><tr><td>Intelligent and Interactive Systems, Institute of Computer Science, University of Innsbruck, Innsbruck, Austria</td><td>1</td></tr><tr><td>Signal and Image Exploitation (INTELSIG), Montefiore Institute, University of Liège, Liège, Belgium</td><td>1</td></tr><tr><td>Megvii Inc., Beijing, China</td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, City University of Hong Kong, 83 Tat Chee Avenue, Kowloon, Hong Kong</b></td><td>1</td></tr><tr><td><b>Department of Information Management, National Taiwan University of Science and Technology, Taipei, Taiwan, ROC</b></td><td>1</td></tr><tr><td><b>University of Ottawa, Ottawa, Canada</b></td><td>1</td></tr><tr><td><b>National Computer Network Emergency Response Technical Team/Coordination Center of China, Beijing, China</b></td><td>1</td></tr><tr><td><b>Army Research Office, RTP, Raliegh, NC, United States of America</b></td><td>1</td></tr><tr><td><b>The State Key Laboratory of Integrated Services Networks (ISN), Xidian University, Xi’an, China</b></td><td>1</td></tr><tr><td><b>Department of Electronic and Engineering, Xidian University, Xi’an, China</b></td><td>1</td></tr><tr><td>Department of Informatics, Modeling, Electronics, and Systems, University of Calabria, Rende, Italy</td><td>1</td></tr><tr><td><b>The University of New South Wales, Australia</b></td><td>1</td></tr><tr><td>School of Materials Science and Engineering, Central South University, Changsha, China</td><td>1</td></tr><tr><td>Institute of Energy, Jiangxi Academy of Sciences, Nanchang, China</td><td>1</td></tr><tr><td><b>Xiamen Key Laboratory of Computer Vision and Pattern Recognition, Huaqiao University, Xiamen, China</b></td><td>1</td></tr><tr><td>**</td><td>1</td></tr><tr><td><b>Advanced Technologies Application, Center (CENATAV), Cuba</b></td><td>1</td></tr><tr><td><b>Institute of Digital Media, Peking University, Beijing, China</b></td><td>1</td></tr><tr><td><b>GREYC, CNRS UMR6072, University of Caen, Caen, France</b></td><td>1</td></tr><tr><td><b>IDIAP, Martigny, Switzerland</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA 15213. msavvid@cs.cmu.edu</b></td><td>1</td></tr><tr><td><b>Information Sciences Institute, University of Southern California, Marina del Rey, CA 90292. mitra@isi.edu</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: king@cse.cuhk.edu.hk</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: lyu@cse.cuhk.edu.hk</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: jkzhu@cse.cuhk.edu.hk</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong. E-mail: hbdeng@cse.cuhk.edu.hk</b></td><td>1</td></tr><tr><td>Electrical and Electronic Engineering Department, Faculty of Engineering, Shahed University, Tehran, Iran</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Science and Research Branch, Islamic Azad University, Tehran, Iran</b></td><td>1</td></tr><tr><td><b>Electronics and Telecommunications Research Institute (ETRI), Republic of Korea</b></td><td>1</td></tr><tr><td><b>Xerox Research Center, Europe, France</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, State Key Laboratory of Intelligent Technology and Systems, Tsinghua National Laboratory for Information Science and Technology, Tsinghua University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Science and Technology on Integrated Information System Laboratory, Institute of Software, Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td>College of Mathematics and Information Engineering, Jiaxing University, Jiaxing, China</td><td>1</td></tr><tr><td><b>State Key Laboratory of Fundamental Science on Synthetic Vision, College of Computer Science, Sichuan University, Chengdu, China</b></td><td>1</td></tr><tr><td>Dept. of Convergence, Daegu Gyeongbuk Institute of Science & Technology (DGIST), Daegu, Korea</td><td>1</td></tr><tr><td><b>Graduate School of Information Science and Engineering, Ritsumeikan University, Kusatsu, Japan</b></td><td>1</td></tr><tr><td><b>School of Electronic Science and Engineering, National ASIC Research and Engineering Center, Southeast University, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Human Media Interaction Group, University of Twente, PO Box 217, 7500 AE Enschede, The Netherlands</b></td><td>1</td></tr><tr><td>School of Mechanical and Electrical Engineering, Shandong Management University, Jinan, China</td><td>1</td></tr><tr><td>School of Information Science and Technology, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>National Institute of Advanced Industrial Science Technology, Japan</td><td>1</td></tr><tr><td>Tilburg center for Cognition and Communication, Tilburg University, Tilburg, The Netherlands</td><td>1</td></tr><tr><td><b>Massachusetts Institute of Technology, Cambridge, USA</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China</b></td><td>1</td></tr><tr><td><b>MOE-Microsoft Laboratory for Intelligent Computing and Intelligent Systems, Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Department of Automation, State Key Laboratory of Intelligent Technologies and Systems and Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, The University of Sheffield, Sheffield, UK</b></td><td>1</td></tr><tr><td>Automatics Research Group, Universidad Tecnológica de Pereira, Pereira, Colombia</td><td>1</td></tr><tr><td><b>Department of Computer Engineering, College of Computer & Information Sciences, King Saud University, Riyadh, Saudi Arabia</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, City University of Hong Kong, Kowloon Tong, Hong Kong, China</b></td><td>1</td></tr><tr><td><b>Laboratory of Media Audio & Video, Communication University of China, Beijing, China</b></td><td>1</td></tr><tr><td><b>Division of Electrical Engineering, School of Electrical Engineering and Computer Science, Korea Advanced Institute of Science and Technology (KAIST), 373-1 Guseong-Dong, Yuseong-Gu, Daejeon 305-701, Republic of Korea</b></td><td>1</td></tr><tr><td><b>CNRS LTCI; T&#x00E9;l&#x00E9;com ParisTech</b></td><td>1</td></tr><tr><td><b>Institut Mines-T&#x00E9;l&#x00E9;com; T&#x00E9;l&#x00E9;com ParisTech; CNRS LTCI</b></td><td>1</td></tr><tr><td>School of Science, Southwest Petroleum University, Chengdu, China</td><td>1</td></tr><tr><td><b>Amity University, Noida, India</b></td><td>1</td></tr><tr><td>Infosys Limited, Bhubaneswar, India</td><td>1</td></tr><tr><td><b>Quanzhou Institute of Equipment Manufacturing, Haixi Institutes, Chinese Academy of Sciences, Quanzhou, China</b></td><td>1</td></tr><tr><td><b>Research Center for Learning Science, Southeast University, China</b></td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Tianjin University of Technology, China</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, College of the Computer and Information Science, Northeastern University, Boston, MA, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA</b></td><td>1</td></tr><tr><td><b>Department of Computer and Information Science, University of Massachusetts Dartmouth, Dartmouth, MA, USA</b></td><td>1</td></tr><tr><td>Department of Computer Science, University of Brasília, DF, Brazil 70910-900</td><td>1</td></tr><tr><td>Department of Mechanical Engineering, University of Brasília, DF, Brazil 70910-900</td><td>1</td></tr><tr><td><b>Department of Neurosurgery, University of Pittsburgh, PA 15213, USA</b></td><td>1</td></tr><tr><td><b>Faculty of Computers and Information, Ain Shams University, Egypt</b></td><td>1</td></tr><tr><td><b>Faculty of Computers and Information, BeniSuef University, Egypt</b></td><td>1</td></tr><tr><td>LIAMA, French National Institute for Research in Computer Science and Control, Paris, France</td><td>1</td></tr><tr><td><b>Intel Laboratory China, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Computing, National University of Singapore</b></td><td>1</td></tr><tr><td><b>Institute for Infocomm Research, Singapore</b></td><td>1</td></tr><tr><td><b>Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China</b></td><td>1</td></tr><tr><td>Leiden University, Leiden, The Netherlands</td><td>1</td></tr><tr><td>TNO, The Hague, The Netherlands</td><td>1</td></tr><tr><td>City University, Kowloon Tong, Hong Kong</td><td>1</td></tr><tr><td>Radboud University, EC Nijmegen, The Netherlands</td><td>1</td></tr><tr><td>TNO, Oude Waalsdorperweg, AK The Hague, The Netherlands</td><td>1</td></tr><tr><td>Liaocheng University, Liaocheng, China</td><td>1</td></tr><tr><td><b>Machine Vision Group, Infotech Oulu and Department of Electrical and Information Engineering, University of Oulu, Finland</b></td><td>1</td></tr><tr><td>Northwestern Polytechnic University, Xi’an, China</td><td>1</td></tr><tr><td>University of Science and Technology Beijing, Beijing, China</td><td>1</td></tr><tr><td><b>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, 95 Zhongguancun Donglu, Beijing 100190, China</b></td><td>1</td></tr><tr><td><b>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai 200240, China</b></td><td>1</td></tr><tr><td><b>Rapid-Rich Object Search Laboratory, Interdisciplinary Graduate School, Nanyang Technological University, Singapore</b></td><td>1</td></tr><tr><td><b>Department of Industrial and Systems Engineering, University of Minnesota, Minneapolis, USA</b></td><td>1</td></tr><tr><td><b>Department of Mathematics, University of Portsmouth, Portsmouth, UK</b></td><td>1</td></tr><tr><td><b>Department of Automation, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Technology, College of Computer, National University of Defense Technology, Changsha, Hunan, China, 410073</b></td><td>1</td></tr><tr><td>Faculty of Information Engineering, China University of Geosciences, Wuhan, China</td><td>1</td></tr><tr><td><b>University of Abertay, Dundee, UK</b></td><td>1</td></tr><tr><td>China University of Geosciences Wuhan, China</td><td>1</td></tr><tr><td>University of Udine, Udine, Italy</td><td>1</td></tr><tr><td>INRS-EMT, Montreal, Canada</td><td>1</td></tr><tr><td><b>Sapienza Univertsity of Rome</b></td><td>1</td></tr><tr><td><b>Queen Mary University of London, London, England UK</b></td><td>1</td></tr><tr><td><b>Fudan University, Shanghai , China</b></td><td>1</td></tr><tr><td><b>Hohai University, No. 1 Xikang Road, Nanjing, Jiangsu Province, China</b></td><td>1</td></tr><tr><td><b>Institute of Intelligent Information Processing, Xidian University, Xi'an, China</b></td><td>1</td></tr><tr><td><b>College of Metropolitan Transportation, Beijing University of Technology, Beijing, China</b></td><td>1</td></tr><tr><td>School of Computer Science and Technology, Harbin Institute of Technology at Weihai, Weihai, China</td><td>1</td></tr><tr><td><b>School of Computer and Control Engineering, University of Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td><b>Institute of Computing Technology, Chinese Academy of Sciences, Key Laboratory of Intelligent Information Processing, Beijing, China</b></td><td>1</td></tr><tr><td><b>University of Southern California, Los Angeles, CA, USA</b></td><td>1</td></tr><tr><td><b>Institute of Computing Technology, CAS, No.6 Kexueyuan South Road, Beijing, 100080, China</b></td><td>1</td></tr><tr><td>School of Computer Science Carnegie Mellon University Pittsburgh, PA, 15213, USA</td><td>1</td></tr><tr><td><b>Dept. of Computer Science, Purdue University</b></td><td>1</td></tr><tr><td><b>Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, USA</b></td><td>1</td></tr><tr><td><b>Center of Image and Signal Processing, Faculty of Computer Science & Information Technology, University of Malaya, Kuala Lumpur, Malaysia</b></td><td>1</td></tr><tr><td><b>Graduate School of Engineering Science, Department of Systems Innovation, Osaka University, Toyonaka, Japan</b></td><td>1</td></tr><tr><td>College of Information and Technology, Incheon National University, Incheon, Korea</td><td>1</td></tr><tr><td><b>College of Electronics and Information Engineering, Sichuan University, Chengdu, China</b></td><td>1</td></tr><tr><td><b>School of Software Engineering, Beijing Jiaotong University, Beijing, China</b></td><td>1</td></tr><tr><td>Tianjin University & University of South Carolina, Tianjin, China</td><td>1</td></tr><tr><td><b>Laboratory of Genetics, National Institute on Aging, National Institutes of Health, Baltimore, USA</b></td><td>1</td></tr><tr><td>School of Electronics Engineering, Kyungpook National University, Taegu, South Korea</td><td>1</td></tr><tr><td>Department of Electrical & Electronics Engineering, Kalasalingam University, Krishnankoil, India</td><td>1</td></tr><tr><td><b>Language Technologies Institute, Carnegie Mellon University, 5000 Forbes Ave, Pittsburgh, PA, USA</b></td><td>1</td></tr><tr><td><b>Pudong Branch, China Mobile Group Shanghai, Company Limited, Shanghai, China</b></td><td>1</td></tr><tr><td><b>School of Mathematics and Statistics, The University of Western Australia, Nedlands, WA, Australia</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Qatar University, Doha, Qatar</b></td><td>1</td></tr><tr><td>School of Computer Engineering, Hanshin University, Osan, Republic of Korea</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Center for Automation Research, University of Maryland, College Park, USA</b></td><td>1</td></tr><tr><td><b>France Telecom - Orange Labs, Lannion, France</b></td><td>1</td></tr><tr><td><b>National Key Laboratory of Cognitive Neuroscience and Learning, Beijing Normal University, Beijing, China</b></td><td>1</td></tr><tr><td>School of Computer Science, China University of Geosciences, Wuhan, China</td><td>1</td></tr><tr><td>College of Computer Science and Technology of Huaqiao University, Xiamen, China</td><td>1</td></tr><tr><td><b>Department of Computer Science, Hong Kong Baptist University, Kowloon, Hong Kong</b></td><td>1</td></tr><tr><td><b>University of Windsor, Canada</b></td><td>1</td></tr><tr><td>CEA (iRSTV/BGE), INSERM (U1038), CNRS (FR3425), Université Grenoble-Alpes, Grenoble, France</td><td>1</td></tr><tr><td>NLPR, Institute of Automation, Chinese Academy of Science, Beijing, People’s Republic of China</td><td>1</td></tr><tr><td>Costel, Université de Rennes 2, Rennes, France</td><td>1</td></tr><tr><td>IRISA, Université de Bretagne Sud, Vannes, France</td><td>1</td></tr><tr><td>Research & Development, British Broadcasting Corporation (BBC), London, UK</td><td>1</td></tr><tr><td><b>Science and Engineering Faculty, Queensland University of Technology, Brisbane, Australia</b></td><td>1</td></tr><tr><td>Faculty of Computer Science and Engineering, Xi’an University of Technology, Xi’an, China</td><td>1</td></tr><tr><td><b>Waseda University</b></td><td>1</td></tr><tr><td>Wide Eyes Technologies</td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Illinois at Urbana—Champaign, Champaign, IL, USA</b></td><td>1</td></tr><tr><td><b>ThyssenKrupp Elevator Americas</b></td><td>1</td></tr><tr><td><b>Tsinghua University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Center for Signal and Image Processing, School of Electrical and Computer Engineering, Georgia Institute of Technology, Atlanta</b></td><td>1</td></tr><tr><td>School of Information Engineering, Jiangxi Manufacturing Technology College, Nanchang, China</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Nanjing Forestry University and Shandong University, Jinan, China</td><td>1</td></tr><tr><td>Department of Language Studies, Nanjing Forestry University, Nanjing, China</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Nanjing Forestry University, Nanjing, China</td><td>1</td></tr><tr><td><b>State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China</b></td><td>1</td></tr><tr><td>Dept. of Autom. Test & Control, Harbin Inst. of Technol., China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Frederick University, Nicosia, Cyprus</td><td>1</td></tr><tr><td><b>Department of Digital Systems, University of Piraeus, Piraeus, Greece</b></td><td>1</td></tr><tr><td>The Maersk Mc-Kinney Moller Institute, University of Southern Denmark, Odense M, Denmark</td><td>1</td></tr><tr><td><b>Department of Information and Control, B-DAT Laboratory, Nanjing University of Information Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, Hong Kong</b></td><td>1</td></tr><tr><td><b>University of Electronic Science and Technology of China, China</b></td><td>1</td></tr><tr><td><b>University of Maryland, College Park, USA</b></td><td>1</td></tr><tr><td><b>Institute of Engineering and Management, Kolkata, India</b></td><td>1</td></tr><tr><td><b>Inst. de Telecomunica&#x00E7;&#x00F5;es, Fac. de Ci&#x00EA;ncias da Universidade do Porto, Porto, Portugal</b></td><td>1</td></tr><tr><td><b>Peking University, Beijing</b></td><td>1</td></tr><tr><td><b>Korea Electronics Technology Institute, Bundang-gu, Seongnam-si, Republic of Korea</b></td><td>1</td></tr><tr><td><b>National Taiwan University, Taiwan</b></td><td>1</td></tr><tr><td><b>Siren Solutions, Dublin, Ireland</b></td><td>1</td></tr><tr><td><b>Paradigma Digital, Madrid, Spain</b></td><td>1</td></tr><tr><td><b>School of Mathematical Sciences, University of Science and Technology of China, Hefei, China</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, University of Science and Technology of China, Hefei, China</b></td><td>1</td></tr><tr><td><b>Australian National University, Australia</b></td><td>1</td></tr><tr><td><b>University of Canberra, Australia</b></td><td>1</td></tr><tr><td><b>Institute of Electrical and Control Engineering, National Chiao Tung University, Hsinchu, Taiwan, ROC</b></td><td>1</td></tr><tr><td>Department of Computer Science, Digital Image Processing Laboratory, Islamia College Peshawar, Peshawar, Pakistan</td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Michigan State University, East Lansing, USA</b></td><td>1</td></tr><tr><td><b>Research Institute for Future Media Computing, School of Computer Science & Software Engineering, Shenzhen University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Computer Vision Institute, School of Computer Science & Software Engineering, Shenzhen University, Shenzhen, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, University of Calgary, Calgary, Alberta, Canada</b></td><td>1</td></tr><tr><td><b>Faculty of Applied Science, University of British Columbia, Vancouver, British Columbia, Canada</b></td><td>1</td></tr><tr><td>Department of Computer Science and Software Engineering, International Islamic University, Islamabad, Pakistan</td><td>1</td></tr><tr><td>Department of Computer Science, Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan</td><td>1</td></tr><tr><td><b>Rutgers University, 94 Brett Road, Piscataway, NJ 08854, United States of America</b></td><td>1</td></tr><tr><td><b>Volvo Car Corporation, SE-405 31 Göteborg, Sweden</b></td><td>1</td></tr><tr><td><b>Smart Eye AB, SE-413 27 Göteborg, Sweden</b></td><td>1</td></tr><tr><td>Technische Universität München / Imperial College London, Munich / London, England UK</td><td>1</td></tr><tr><td><b>University of Geneva, Geneva, Switzerland</b></td><td>1</td></tr><tr><td>Department of Mathematics and Informatics, Ecole Centrale de Lyon, Lyon, 69134, France</td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, Jamia Hamdard University, New Delhi, India</td><td>1</td></tr><tr><td><b>Institute of Image Processing & Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China</b></td><td>1</td></tr><tr><td><b>Toyota Research Institute - North America</b></td><td>1</td></tr><tr><td><b>Department of Computer Science & Engineering, Arizona State University, Tempe, USA</b></td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, University of Minnesota-Twin Cities, Minneapolis, USA</td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Informatics, Budapest University of Technology and Economics, Budapest, Hungary</td><td>1</td></tr><tr><td>School of Information Science and Technology, Northwest University, Xi’an, China</td><td>1</td></tr><tr><td>Radboud University, Donders Institute for Brain, Cognition and Behaviour, Nijmegen, The Netherlands</td><td>1</td></tr><tr><td><b>Griffith University</b></td><td>1</td></tr><tr><td><b>School of Computer, Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>1</td></tr><tr><td><b>School of Information, Singapore Management University, Singapore</b></td><td>1</td></tr><tr><td><b>Agency for Science, Technology and Research, Singapore</b></td><td>1</td></tr><tr><td>School of Software, Beijing Institute of Technology, Beijing, China</td><td>1</td></tr><tr><td><b>Department of Software Technology and Enterprize, Korea University, Seoul, Republic of Korea</b></td><td>1</td></tr><tr><td>University of St. Andrews, UK</td><td>1</td></tr><tr><td><b>University of Illinois at Urbana-Champaign, Champaign, IL, USA</b></td><td>1</td></tr><tr><td>University of Tunis El Manar, Tunis, Tunisia</td><td>1</td></tr><tr><td><b>Department of Informatics, Karlsruhe Institute of Technology, Karlsruhe, Germany</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Purdue University, West Lafayette, USA</b></td><td>1</td></tr><tr><td>College of Information and Control Engineering, China University of Petroleum, Qingdao, China</td><td>1</td></tr><tr><td><b>Griffith School of Engineering, Queensland Research Laboratory, National ICT Australia, Griffith University, Nathan, Australia</b></td><td>1</td></tr><tr><td><b>Queensland Research Laboratory, National ICT Australia and Institute for Integrated and Intelligent Systems, Griffith University, Nathan, Australia</b></td><td>1</td></tr><tr><td>Intel Labs Europe, Pipers Way, Swindon</td><td>1</td></tr><tr><td><b>PRaDA, Deakin University, Australia</b></td><td>1</td></tr><tr><td>Department of Computer Systems, Universidad Politécnica de Madrid, Madrid, Spain</td><td>1</td></tr><tr><td><b>Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA</b></td><td>1</td></tr><tr><td><b>Neuropsychiatry Section, Department of Psychiatry, University of Pennsylvania</b></td><td>1</td></tr><tr><td><b>Department of Psychology, University of Illinois at Chicago, Chicago, IL</b></td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, Ursinus College, Collegeville, PA</td><td>1</td></tr><tr><td><b>Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td>Systems Engineering Institute, Xi’an Jiaotong University, Xi’an 710049, China</td><td>1</td></tr><tr><td><b>Chongqing University, China</b></td><td>1</td></tr><tr><td><b>University College London, UK, Dept. of Electronic and Electrical Engineering</b></td><td>1</td></tr><tr><td>Institute of Semiconductors, Chinese Academy of Sciences&University of Chinese Academy of Sciences, Beijing, China</td><td>1</td></tr><tr><td><b>School of Mathematical Sciences, DUT-RU International School of Information and Software Engineering, Dalian University of Technology, Dalian, China</b></td><td>1</td></tr><tr><td><b>Computing Department, Imperial College London, UK. M.Pantic@imperial.ic.ac.uk</b></td><td>1</td></tr><tr><td><b>Computing Department, Imperial College London, UK. M.F.Valstar@imperial.ic.ac.uk</b></td><td>1</td></tr><tr><td><b>Consiglio Nazionale delle Ricerche, Istituto di Calcolo e Reti ad Alte Prestazioni, Viale delle Scienze, 90128 Palermo, ITALY</b></td><td>1</td></tr><tr><td>School of Computer Science and Technology, Nanjing University of Science and Technology of China, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td><b>University of Rochester, New York, USA</b></td><td>1</td></tr><tr><td><b>Microsoft Research, Beijing, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>University of Science and Technology of China, Hefei, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Institute of Intelligent Machines, Chinese Academy of Sciences, Hefei, China</b></td><td>1</td></tr><tr><td>NTT Network Innovation Laboratories, Nippon Telegraph and Telephone Corp.</td><td>1</td></tr><tr><td><b>Faculty of Engineering, Tunku Abdul Rahman University College, Setapak, Malaysia</b></td><td>1</td></tr><tr><td>Faculty of Computing and Information Technology, Setapak, Malaysia</td><td>1</td></tr><tr><td><b>Dep. Inteligencia Artificial, U. Politécnica Madrid, Spain</b></td><td>1</td></tr><tr><td><b>Dep. Ciencias de la Computación, U. Rey Juan Carlos, Spain</b></td><td>1</td></tr><tr><td><b>Dep. Comp. Sci. and Engr., Fudan University, China</b></td><td>1</td></tr><tr><td><b>Computer Science Department, University of Maryland, College Park, MD, USA</b></td><td>1</td></tr><tr><td><b>Cernium Corporation, Reston, VA, USA</b></td><td>1</td></tr><tr><td>Computer Science Department, University of California, Los Angeles, CA, USA</td><td>1</td></tr><tr><td><b>Department of Computer and Information Science, Temple University, Philadelphia, PA, USA</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering and the College of Computer and Information Science, Northeastern University, Boston, MA</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Northeastern University, Boston, MA</b></td><td>1</td></tr><tr><td><b>School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China</b></td><td>1</td></tr><tr><td><b>North Acton, London</b></td><td>1</td></tr><tr><td><b>Imaging Science and Engineering Laboratory, Tokyo Institute of Technology, Kanagawa, Japan</b></td><td>1</td></tr><tr><td><b>Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology, Kanagawa, Japan</b></td><td>1</td></tr><tr><td><b>Department of ECE, University of Dayton, Dayton, OH, USA</b></td><td>1</td></tr><tr><td><b>ODU Vision Lab, Old Dominion University, Norfolk, VA, USA</b></td><td>1</td></tr><tr><td><b>EURECOM, Route des Chappes, France</b></td><td>1</td></tr><tr><td>INRIA, Sophia Antipolis, France</td><td>1</td></tr><tr><td><b>School of Mathematical Sciences, Dalian University of Technology, Dalian, China</b></td><td>1</td></tr><tr><td>School of Mathematics and Computer Sciences, Gannan Normal University, Ganzhou, China</td><td>1</td></tr><tr><td>University of Maribor, Faculty of Electrical Engineering and Computer Science, Koroška cesta 46, SI-2000, Slovenia</td><td>1</td></tr><tr><td><b>RMIT University, Vietnam</b></td><td>1</td></tr><tr><td><b>Tolendata Singapore R&amp;D Centre Private Limited, Singapore</b></td><td>1</td></tr><tr><td><b>College of Computer Science &amp; Software Engineering, Shenzhen University, China 518060</b></td><td>1</td></tr><tr><td>University of Tours, France</td><td>1</td></tr><tr><td><b>Concordia Institute for Information Systems Engineering (CIISE), 1515 St. Catherine West, Montreal, Quebec H3G 2W1, CANADA</b></td><td>1</td></tr><tr><td><b>Dept. of Computer Science and Software Engineering, Concordia University, 1515 St. Catherine West, Montreal, Quebec H3G 2W1, CANADA</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, University of Notre Dame</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Pontificia Universidad Catolica de Chile</b></td><td>1</td></tr><tr><td><b>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology at Sydney, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>School of Engineering, The University of Edinburgh, Edinburgh, U.K.</b></td><td>1</td></tr><tr><td><b>Changzhou University, Changzhou, China</b></td><td>1</td></tr><tr><td><b>High Magnetic Field Laboratory, Chinese Academy of Sciences, Hefei, China</b></td><td>1</td></tr><tr><td><b>Medical Psychology, Ulm University, Ulm, Germany</b></td><td>1</td></tr><tr><td>Department of Information Management, Hwa Hsia University of Technology, New Taipei City, Taiwan</td><td>1</td></tr><tr><td>Department of Electronic Engineering, National Ilan University, Yilan City, Taiwan</td><td>1</td></tr><tr><td><b>School of Computer Science, Guangzhou University, Guangzhou, China</b></td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Computer Science, Ningbo University, Ningbo, China</td><td>1</td></tr><tr><td>College of Information and Electrical Engineering, Ludong University, Yantai, China</td><td>1</td></tr><tr><td><b>College of Computing, Georgia Tech</b></td><td>1</td></tr><tr><td><b>Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University, Korea</b></td><td>1</td></tr><tr><td><b>Taxes Instruments, Dallas, TX, United States</b></td><td>1</td></tr><tr><td>Wakayama University</td><td>1</td></tr><tr><td><b>Energy Research Institute @NTU (ERI@N), Interdisciplinary Graduate School, Nanyang Technological University, Singapore 639798</b></td><td>1</td></tr><tr><td>Computer Science College, Xi’an Polytechnic University, Xi’an, China</td><td>1</td></tr><tr><td><b>Visual Analysis of People Laboratory, Aalborg University, Aalborg, Denmark</b></td><td>1</td></tr><tr><td><b>Computer Vision Team, ARS Traffic & Transport Technology, Trivandrum, India</b></td><td>1</td></tr><tr><td><b>Computer Science Dept., Columbia University, USA</b></td><td>1</td></tr><tr><td>Computer Science Dept., SUNY Stony Brook, USA</td><td>1</td></tr><tr><td><b>Rensselaer Polytechnic Institute</b></td><td>1</td></tr><tr><td>School of Mathematical and Physical Sciences at the University of Newcastle, Callaghan, NSW 2308, Australia</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, JNTU College of Engineering, Hyderabad, India</td><td>1</td></tr><tr><td>Department of Physics, JNTU College of Engineering, Kakinada, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, JNTU College of Engineering, Kakinada, India</td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, Xiamen University, Xiamen, China</b></td><td>1</td></tr><tr><td><b>Collaborative Innovation Center for Geospatial Information Technology, Wuhan, China</b></td><td>1</td></tr><tr><td><b>Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech Technical University in Prague, Prague 6, Czech Republic</b></td><td>1</td></tr><tr><td>Department of Telecommunications and Information Processing, Image Processing and Interpretation, UGent/iMinds, Ghent, Belgium</td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Indian Institute of Science, C.V. Raman Avenue, Bangalore, KA 560-012, India</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Computer Engineering, Ryerson University, Toronto, Canada</b></td><td>1</td></tr><tr><td>School of Software, Shenyang University of Technology, Shenyang, China</td><td>1</td></tr><tr><td><b>Department of Internal Medicine, Chung-Ang University, Seoul, South Korea</b></td><td>1</td></tr><tr><td><b>Department of Data Science, Dankook University, Yongin, South Korea</b></td><td>1</td></tr><tr><td><b>Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td>School of Engineering of UABC, University of Baja California, Tijuana, Mexico</td><td>1</td></tr><tr><td>University of Hawaii at Hilo, HI, USA</td><td>1</td></tr><tr><td>Yuncheng University, Shanxi Province, China</td><td>1</td></tr><tr><td><b>Department of Electrical and Electronic Engineering, Nazarbayev University, Astana, Kazakhstan</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, Nanjing University of Science and Technology, Nanjing, China</b></td><td>1</td></tr><tr><td><b>State Key Laboratory of Information Security, Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td><b>University of Electronic Science and Technology of China, Chendu, China</b></td><td>1</td></tr><tr><td><b>Inception Institute of Artificial Intelligence, Abu Dhabi, United Arab Emirates</b></td><td>1</td></tr><tr><td><b>School of Electronics and Information Engineering, Beihang University, Beijing, China</b></td><td>1</td></tr><tr><td><b>College of Computer Science, Guangdong University of Petrochemical Technology, Maoming, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY, USA, 12180</b></td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Istanbul University, Istanbul, Turkey</b></td><td>1</td></tr><tr><td>Department of Computer Engineering, Bah&#x00E7;e&#x015F;ehir University, Istanbul, Turkey</td><td>1</td></tr><tr><td>Sichuan University West China Hospital of Stomatology, Chengdu, China</td><td>1</td></tr><tr><td><b>Center for Future Media, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>1</td></tr><tr><td>School of Software Engineering, Chengdu University of Information Technology, Chengdu, China</td><td>1</td></tr><tr><td><b>School of Information and Software Engineering, University of Electronic Science and Technology of China, Chengdu, China</b></td><td>1</td></tr><tr><td><b>National Institute of Standards and Technology, 100 Bureau Drive, Gaithersburg, MD 20899, USA</b></td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, People’s Republic of China</td><td>1</td></tr><tr><td><b>Col. of Comp. Sci. and Comm. Eng., Jiangsu University, Zhenjiang, China</b></td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Ningbo University of Technology, Ningbo, China</td><td>1</td></tr><tr><td><b>Center for Automation Research, University of Maryland, College Park, MD 20742, USA</b></td><td>1</td></tr><tr><td><b>Delft University of Technology, Delft, The Netherlands</b></td><td>1</td></tr><tr><td>Department of Computer Engineering, Bogaziçi University, Bebek, Turkey</td><td>1</td></tr><tr><td>Department of Electrical and Electronic Engineering, Auckland University of Technology , Auckland, New Zealand</td><td>1</td></tr><tr><td>Department of Computer Engineering, Qazvin Islamic Azad University , Qazvin, Iran</td><td>1</td></tr><tr><td>Shanghai University of Finance and Economics, Shanghai, China</td><td>1</td></tr><tr><td><b>School of Mathematics, Jilin University, China</b></td><td>1</td></tr><tr><td><b>Department of Computer Science, Memorial University of Newfoundland, Canada</b></td><td>1</td></tr><tr><td>Graduate School of Engineering, Nagasaki University, Nagasaki, Japan</td><td>1</td></tr><tr><td>Institute of Management and Information Technologies, Chiba University, Chiba, Japan</td><td>1</td></tr><tr><td>Graduate School of Advanced Integration Science, Chiba University, Chiba, Japan</td><td>1</td></tr><tr><td><b>School of Computer Science, Carnegie Mellon University, Pittsburgh, PA, 15213, USA</b></td><td>1</td></tr><tr><td><b>Dept. of Computer Science, Purdue University, West Lafayette, IN, 47907, USA</b></td><td>1</td></tr><tr><td><b>Department of Electrical and Electronic Engineering, Yonsei University, Seoul, South Korea</b></td><td>1</td></tr><tr><td><b>Griffith University, Brisbane</b></td><td>1</td></tr><tr><td><b>Griffith University, Brisbane and University of the South Pacific, Fiji</b></td><td>1</td></tr><tr><td>Vision Semantics Ltd</td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, City University of Hong Kong, Kowloon, Hong Kong</b></td><td>1</td></tr><tr><td>Department of Film and Digital Media, Seokyeong University, Seoul, Republic of Korea</td><td>1</td></tr><tr><td>Department of MediaSoftware, Sungkyul University, Anyang-si, Republic of Korea</td><td>1</td></tr><tr><td><b>Department of Computer Science and Engineering, Korea University, Seoul, Republic of Korea</b></td><td>1</td></tr><tr><td>Pusan National University, Busan, Korea</td><td>1</td></tr><tr><td><b>Graduate School at Shenzhen, Tsinghua University, Shenzhen, China</b></td><td>1</td></tr><tr><td>School of Engineering, Swiss Federal Institute of Technology Lausanne (EPFL), Lausanne, Switzerland</td><td>1</td></tr><tr><td>Department of Computer Science, Auckland University of Technology, Auckland, New Zealand</td><td>1</td></tr><tr><td>L3S Research Center, Leibniz Universität Hannover, Hannover, Germany</td><td>1</td></tr><tr><td>German National Library of Science and Technology (TIB), Hannover, Germany</td><td>1</td></tr><tr><td>taglicht media Film- & Fernsehproduktion GmbH, Köln, Germany</td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, University of Marburg, Marburg, Germany</td><td>1</td></tr><tr><td><b>The Hong Kong Polytechnic University, Chu Hai College of Higher Education, Hong Kong, China</b></td><td>1</td></tr><tr><td>School of Mathematics and Computational Science, Anqing Normal University, Anqing, People’s Republic of China</td><td>1</td></tr><tr><td><b>School of IoT Engineering, Jiangnan University, Wuxi, People’s Republic of China</b></td><td>1</td></tr><tr><td>Concordia Institute for Information Systems Engineering Concordia University, Montreal, Canada</td><td>1</td></tr><tr><td>IKERBASQUE, Basque Foundation for Science, Bilbao, Spain</td><td>1</td></tr><tr><td>University of the Basque Country UPV/EHU, San Sebastian, Spain</td><td>1</td></tr><tr><td>Computer Vision Center, Edifici “O”, Campus UAB, Bellaterra, Spain</td><td>1</td></tr><tr><td>Graduate School of Biomedical Sciences, Nagasaki University, Nagasaki City, Japan</td><td>1</td></tr><tr><td>Xiamen University of Technology, Fujian, China</td><td>1</td></tr><tr><td><b>Université des Antilles et de la Guyane (UAG), France</b></td><td>1</td></tr><tr><td><b>Institut des Systèmes intelligents et de Robotique, UPMC, France</b></td><td>1</td></tr><tr><td>School of Computer Science and Information Engineering, Shanghai Institute of Technology, Shanghai, China</td><td>1</td></tr><tr><td><b>College of Computer Science and Information Technology, Northeast Normal University, Changchun, China</b></td><td>1</td></tr><tr><td><b>College of Information Science and Engineering, Northeastern University, Shenyang, China</b></td><td>1</td></tr><tr><td>Dept. of Artificial Intelligence, Faculty of Computer Engineering, University of Isfahan, Iran</td><td>1</td></tr><tr><td>Department of Information Processing Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology Yokohama 226-8503, Japan</td><td>1</td></tr><tr><td><b>Research Groups on Intelligent Machines, University of Sfax, Sfax, Tunisia</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, University of New South Wales, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>School of Information Technology and Electrical Engineering, University of Queensland, St Lucia, QLD, Australia</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, Jiangsu University of Science and Technology, Zhenjiang City 212003, China</b></td><td>1</td></tr><tr><td><b>Microsoft, Bellevue, WA, USA</b></td><td>1</td></tr><tr><td><b>M5001, Department of Computer Science, City University of Hong Kong, Kowloon, Hong Kong</b></td><td>1</td></tr><tr><td>Department of Computer Science, University of Texas, San Antonio, TX, USA</td><td>1</td></tr><tr><td><b>School of Electrical and Computer Engineering, Oklahoma State University, Stillwater, OK, USA</b></td><td>1</td></tr><tr><td><b>Imperial College London, London, U.K.</b></td><td>1</td></tr><tr><td><b>University of East Anglia, Norwich, United Kingdom</b></td><td>1</td></tr><tr><td>University of Sheffield, Sheffield, United Kingdom</td><td>1</td></tr><tr><td>Insititute of Automation, Chinese Academy of Sciences (CAS), Beijing, China</td><td>1</td></tr><tr><td><b>Alcohol Countermeasure Systems Corporation, Toronto, ON, Canada</b></td><td>1</td></tr><tr><td><b>Center for Ubiquitous Computing, University of Oulu, Oulu, Finland</b></td><td>1</td></tr><tr><td>School of Computing and Information Systems, University of Melbourne, Melbourne, Australia</td><td>1</td></tr><tr><td><b>Center for Machine Vision and Signal Analysis, University of Oulu, Oulu, Finland</b></td><td>1</td></tr><tr><td><b>Institute of Information and System Sciences, Faculty of Mathematics and Statistics, Xi’an Jiaotong University, Xi’an, China</b></td><td>1</td></tr><tr><td><b>Research Division, Educational Testing Service, Princeton, NJ, USA</b></td><td>1</td></tr><tr><td><b>Key Laboratory of Machine Intelligence and Advanced Computing, Ministry of Education, Sun Yat-sen University, Guangzhou, China</b></td><td>1</td></tr><tr><td><b>Division of Biomedical Engineering, Hong Kong University of Science and Technology, Kowloon, Hong Kong SAR</b></td><td>1</td></tr><tr><td><b>Department of Electronic and Computer Engineering, Hong Kong University of Science and Technology, Kowloon, Hong Kong SAR</b></td><td>1</td></tr><tr><td><b>University of York, York, United Kingdom</b></td><td>1</td></tr><tr><td><b>Kumamoto University, Kumamoto, Japan</b></td><td>1</td></tr><tr><td>Sapienza Università di Roma, Roma, Italy</td><td>1</td></tr><tr><td><b>Center for Research on Intelligent Perception and Computing (CRIPAC), NLPR, CASIA, Beijing, China</b></td><td>1</td></tr><tr><td><b>National Taichung University of science and Technology, Taichung</b></td><td>1</td></tr><tr><td><b>University of Technology Sydney, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>SAP Innovation Center Network, Singapore</b></td><td>1</td></tr><tr><td><b>Agency for Science, Technology and Research, Institute of High Performance Computing, Singapore</b></td><td>1</td></tr><tr><td><b>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. venu@cedar.buffalo.edu</b></td><td>1</td></tr><tr><td><b>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. zhizhang@cedar.buffalo.edu</b></td><td>1</td></tr><tr><td><b>CUBRC, Buffalo, NY, USA. slowe@cubrc.org</b></td><td>1</td></tr><tr><td>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. tulyakov@cedar.buffalo.edu</td><td>1</td></tr><tr><td><b>Computational Biomedicine Lab, Department of Computer Science, University of Houston, Houston, USA</b></td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China</td><td>1</td></tr><tr><td><b>Department of Computing, University of Surrey, Guildford, Surrey, GU2 7XH, UK</b></td><td>1</td></tr><tr><td><b>Shenzhen College of Advanced Technology, University of Chinese Academy of Sciences, China</b></td><td>1</td></tr><tr><td>College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, China</td><td>1</td></tr><tr><td><b>National ASIC Design and Engineering Center, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b></td><td>1</td></tr><tr><td>LAMIA, EA 4540, University of French West Indies &amp; Guyana</td><td>1</td></tr><tr><td><b>Institut Telecom - Telecom ParisTech CNRS/LTCI, Paris</b></td><td>1</td></tr><tr><td>Peking University & Shanghai Jaio Tong University, Beijing, China</td><td>1</td></tr><tr><td>School of Information Technology, Madurai Kamarai University, Madurai, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, Sanjivani College of Engineering, Kopargaon, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, St.Peter’s University, Chennai, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, Panimalar Engineering College, Chennai, India</td><td>1</td></tr><tr><td>Department of Computer Science, IT-Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>1</td></tr><tr><td>ITI Department Telecom Bretagne, Brest, France</td><td>1</td></tr><tr><td>Adobe Systems Incorporated, San Jose, CA, 95110</td><td>1</td></tr><tr><td><b>University of Technology at Sydney, Sydney, NSW, Australia</b></td><td>1</td></tr><tr><td><b>College of Engineeing & Informatics, National University of Ireland Galway, Galway, Ireland</b></td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Bogazici University, Bebek, Istanbul, Turkey</b></td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Istanbul University, Avcilar, Istanbul, Turkey</b></td><td>1</td></tr><tr><td><b>Department of Computer Engineering, Bahcesehir University, Besiktas, Istanbul, Turkey</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Technology, China University of Mining and Technology, Xuzhou, China</b></td><td>1</td></tr><tr><td><b>Faculty of Electronics and Telecommunications “POLITEHNICA” University from Timişoara Timişoara, România</b></td><td>1</td></tr><tr><td><b>College of Humanities, Jiangxi University of Traditional Chinese Medicine, Nanchang, 330004, China</b></td><td>1</td></tr><tr><td><b>Sch. of Electr. & Electron. Eng., Nanyang Technol. Univ., Singapore</b></td><td>1</td></tr><tr><td><b>Division of Computer Science and Engineering, Center for Advanced Image and Information Technology, Chonbuk National University, Jeonju, Republic of Korea</b></td><td>1</td></tr><tr><td><b>Division of Computer Science and Engineering, Chonbuk National University, Jeonju, Republic of Korea</b></td><td>1</td></tr><tr><td><b>University of Lincoln, U. K.</b></td><td>1</td></tr><tr><td><b>School of Mathematics and Computer Science, Quanzhou Normal University, Quanzhou, China</b></td><td>1</td></tr><tr><td><b>Department of Electrical Engineering, Chang Gung University, Taipei, Taiwan</b></td><td>1</td></tr><tr><td><b>School of Information Technology, Monash University Malaysia, Bandar Sunway, Malaysia</b></td><td>1</td></tr><tr><td><b>College of Engineering, Huaqiao University, Fujian, China</b></td><td>1</td></tr><tr><td>Department of Electrical Engineering and Information Technology, TU Darmstadt, D-64283, Germany</td><td>1</td></tr><tr><td>Institute of Neural Information Processing, University of Ulm, Ulm, Germany</td><td>1</td></tr><tr><td>Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, Magdeburg, Germany</td><td>1</td></tr><tr><td>Faculty of Biomedical Engineering, Amirkabir University of Technology (Tehran Polytechnic), Tehran, Iran</td><td>1</td></tr><tr><td><b>University of Technology Sydney, Broadway, Australia</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Software Engineering, University of Wollongong, Wollongong, Australia</b></td><td>1</td></tr><tr><td>Defence Science and Technology Organisation (DSTO), Edinburgh, Australia</td><td>1</td></tr><tr><td>Reallusion Corporation</td><td>1</td></tr><tr><td><b>National Taiwan Normal University</b></td><td>1</td></tr><tr><td><b>University College London</b></td><td>1</td></tr><tr><td><b>Keio University, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>The University of Tokyo, Bunkyo, Tokyo, Japan</b></td><td>1</td></tr><tr><td><b>Keio University, Yokohama City, Kanagawa, Japan</b></td><td>1</td></tr><tr><td><b>Keio University, Yokohama City, Japan</b></td><td>1</td></tr><tr><td>National Institute of Advanced Industrial Science and Technology (AIST), Koto, Tokyo, Japan</td><td>1</td></tr><tr><td><b>Research Center for Institute of Information Science, Academia Sinica, Taiwan</b></td><td>1</td></tr><tr><td><b>Department of Computer Science and Information Engineering, National Taiwan University</b></td><td>1</td></tr><tr><td><b>Department of Statistics, Carnegie Mellon University, Pittsburgh, USA</b></td><td>1</td></tr><tr><td>Computer Application Research Center, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China</td><td>1</td></tr><tr><td>Dept. of EE, Univ. at Buffalo, SUNY, USA</td><td>1</td></tr><tr><td>Department of Computer Science, Minjiang University, Fuzhou, People’s Republic of China</td><td>1</td></tr><tr><td><b>Automotive Engineering Research Institute, Jiangsu University, Zhenjiang, People’s Republic of China</b></td><td>1</td></tr><tr><td><b>Istanbul Technical University, Computer Engineering Department, 34469, Turkey</b></td><td>1</td></tr><tr><td><b>Department of Electronic Engineering, City University of Hong Kong, Hong Kong</b></td><td>1</td></tr><tr><td><b>School of Information Technology and Electrical Engineering, the University of Queensland, Brisbane, Qld, Australia</b></td><td>1</td></tr><tr><td><b>School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu</b></td><td>1</td></tr><tr><td><b>University of Electronic Science and Technology of China, Chengdu</b></td><td>1</td></tr><tr><td>Institute of High Performance Computing and Networking, National Research Council of Italy (ICAR-CNR), Naples, Italy</td><td>1</td></tr><tr><td><b>IBM Research, Singapore</b></td><td>1</td></tr><tr><td><b>Center for Applied Mathematics, Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td><b>Department of Mathematics, School of Science, Tianjin University, Tianjin, China</b></td><td>1</td></tr><tr><td><b>Faculty of Applied Mathematics, Shanxi University of Finance and Economics</b></td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/doi_institutions_geocoded.csv b/scraper/reports/doi_institutions_geocoded.csv
new file mode 100644
index 00000000..843ab2ba
--- /dev/null
+++ b/scraper/reports/doi_institutions_geocoded.csv
@@ -0,0 +1,1430 @@
+61831364ddc8db869618f1c7f0ad35ab2ab6bcf7,Heterogeneous feature code for expression recognition,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+61a3c45c9f802f9d5fa8d94fee811e203bac6487,A Customized Sparse Representation Model With Mixed Norm for Undersampled Face Recognition,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+6159908dec4bc2c1102f416f8a52a31bf3e666a4,Local gradient increasing pattern for facial expression recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+610779e90b644cc18696d7ac7820d3e0598e24d0,Robust Representation and Recognition of Facial Emotions Using Extreme Sparse Learning,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+61e2044184d86d0f13e50ecaa3da6a4913088c76,Beyond Frame-level CNN: Saliency-Aware 3-D CNN With LSTM for Video Action Recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+61329bc767152f01aa502989abc854b53047e52c,A Two-Stage Approach to Robust Tensor Decomposition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+61b22b1016bf13aca8d2e57c4e5e004d423f4865,Sliced Inverse Regression With Adaptive Spectral Sparsity for Dimension Reduction,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261,Robust face recognition with class dependent factor analysis,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,Deep Coupled ResNet for Low-Resolution Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+0de1450369cb57e77ef61cd334c3192226e2b4c2,"In defense of low-level structural features and SVMs for facial attribute classification: Application to detection of eye state, Mouth State, and eyeglasses in the wild",Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+95f1790da3d0a4a5310a050512ce355b3c5aac86,Towards temporal adaptive representation for video action recognition,Canon,"Innovation Center, Canon USA Inc., San Jose, California","2680 Zanker Rd #100, San Jose, CA 95134, USA",37.38976400,-121.92459300,company,
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc,A One Bit Facial Asymmetry Code (FAC) in Fourier Domain for Human Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+95289007f2f336e6636cf8f920225b8d47c6e94f,Automatic Training Image Acquisition and Effective Feature Selection From Community-Contributed Photos for Facial Attribute Detection,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+95023e3505263fac60b1759975f33090275768f3,Facial Expression Recognition in Daily Life by Embedded Photo Reflective Sensors on Smart Eyewear,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+952138ae6534fad573dca0e6b221cdf042a36412,Flexible Template and Model Matching Using Intensity,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+9590b09c34fffda08c8f54faffa379e478f84b04,Efficient Dual Approach to Distance Metric Learning,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+95d858b39227edeaf75b7fad71f3dc081e415d16,Minimum-risk temporal alignment of videos,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+5957936195c10521dadc9b90ca9b159eb1fc4871,LBP-ferns-based feature extraction for robust facial recognition,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,Robust feature encoding for age-invariant face recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+592370b4c7b58a2a141e507f3a2cc5bbd247a62e,Teaching emotion expressions to a human companion robot using deep neural architectures,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+59b83666c1031c3f509f063b9963c7ad9781ca23,Hierarchical Committee of Deep CNNs with Exponentially-Weighted Decision Fusion for Static Facial Expression Recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+9255d3b2bfee4aaae349f68e67c76a077d2d07ad,Ocular Recognition for Blinking Eyes,Quanzhou Normal University,Quanzhou Normal University,"泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国",24.87147415,118.66738687,edu,
+92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,Quality scores for deep regression systems,FDNA Israel,"FDNA inc., Herzliya, Israel","Sapir St 5, Herzliya, Israel",32.16388240,34.81158620,company,"5 Sapir St., Ampa House, Herzliya, Israel 4685209"
+0c378c8dcf707145e1e840a9951519d4176a301f,Dynamic detection rate-based bit allocation for biometric discretization,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,Biometric template update under facial aging,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+0c6a18b0cee01038eb1f9373c369835b236373ae,Learning warps based similarity for pose-unconstrained face recognition,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3,Fast and compact Kronecker-structured dictionary learning for classification and representation,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+66ec085c362f698b40d6e0e7b10629462280c062,Illumination compensation and normalization using logarithm and discrete cosine transform,"Nanyang Technological University, Singapore","Comput. Control Lab, Nanyang Technol. Univ., Singapore","50 Nanyang Avenue, Block N4 #02a-32, Singapore 639798",1.34619520,103.68154990,edu,
+661c78a0e2b63cbdb9c20dcf89854ba029b6bc87,Structure-aware multi-object discovery for weakly supervised tracking,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+66f4d7c381bd1798703977de2e38b696c6641b77,Recognizing fleeting facial expressions with different viewpoints,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6688b2b1c1162bc00047075005ec5c7fca7219fd,Face expression recognition: A brief overview of the last decade,West University of Timișoara,"Faculty of Electronics and Telecommunications “POLITEHNICA” University from Timişoara Timişoara, România","Bulevardul Vasile Pârvan, Timișoara, Romania",45.74728570,21.22630020,edu,
+660c99ac408b535bb0468ab3708d0d1d5db30180,An improved redundant dictionary based on sparse representation for face recognition,China University of Mining and Technology,China University of Mining and Technology,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.21525380,117.13985410,edu,
+3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac,Transfer learning via attributes for improved on-the-fly classification,"Technicolor, France","Technicolor, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.83153300,2.28066283,edu,
+3e0035b447d0d4e11ceda45936c898256f321382,Combining shape regression model and isophotes curvature information for eye center localization,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3e1190655cc7c1159944d88bdbe591b53f48d761,Ensemble canonical correlation analysis,Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.08688410,29.04413167,edu,
+3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48,A privacy framework for the Internet of Things,National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829961,edu,
+3e0377af0087b9b836bf6d95bc1c7085dfde4897,Heterogeneous Semantic Level Features Fusion for Action Recognition,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+3e7070323bca6106f19bea4c97ef67bd6249cb5d,Discovery of facial motions using deep machine perception,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+3e03d19b950edadc74ca047dec86227282eccf71,Facial Expression Recognition Using Salient Features and Convolutional Neural Network,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d,"A Spontaneous Micro-expression Database: Inducement, collection and baseline",University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99,Patch-Based Principal Covariance Discriminative Learning for Image Set Classification,Guangzhou University,Guangzhou University,"广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04436505,113.36668458,edu,
+504d2675da7a56a36386568ee668938df6d82bbe,Regularized Deep Belief Network for Image Attribute Detection,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+502d30c5eac92c7db587d85d080343fbd9bc469e,Domain Specific Learning for Newborn Face Recognition,"Indraprastha Institute of Information Technology Delhi, India","Indraprastha Institute of Information Technology Delhi, Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+50333790dd98c052dfafe1f9bf7bf8b4fc9530ba,Sparse concept discriminant matrix factorization for image representation,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+5039834df68600a24e7e8eefb6ba44a5124e67fc,Modular hierarchical feature learning with deep neural networks for face verification,Beijing Institute of Science and Technology Information,Beijing Institute of Science and Technology Information,"China, Beijing, Haidian, 清河四拔子",40.04332040,116.34181090,edu,
+68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5,Crowded scene understanding algorithm based on Two-Stream Residual Network,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+68eb6e0e3660009e8a046bff15cef6fe87d46477,Multi-dropout regression for wide-angle landmark localization,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+68e6cfb0d7423d3fae579919046639c8e2d04ad7,Multi-task ConvNet for blind face inpainting with application to face verification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+68f19f06f49aa98b676fc6e315b25e23a1efb1f0,Robust pose normalization for face recognition under varying views,"Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+68c1090f912b69b76437644dd16922909dd40d60,Robust and Accurate Shape Model Matching Using Random Forest Regression-Voting,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+68021c333559ab95ca10e0dbbcc8a4840c31e157,A framework for joint facial expression recognition and point localization,Otto-von-Guericke-University Magdeburg,"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, D-39016, P.O. Box 4210 Germany","Universitätspl. 2, 39106 Magdeburg, Germany",52.14020530,11.64419910,edu,
+57eeaceb14a01a2560d0b90d38205e512dcca691,Recurrent Spatial-Temporal Attention Network for Action Recognition in Videos,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+5763b09ebca9a756b4adebf74d6d7de27e80e298,Picture-specific cohort score normalization for face pair matching,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+572dbaee6648eefa4c9de9b42551204b985ff863,The more the merrier: Analysing the affect of a group of people in images,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+57ca530e9acb63487e8591cb6efb89473aa1e5b4,Multilayer Surface Albedo for Face Recognition With Reference Images in Bad Lighting Conditions,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+578117ff493d691166fefc52fd61bad70d8752a9,Dealing with occlusions in face recognition by region-based fusion,Universidad Autonoma de Madrid,Universidad Autonoma de Madrid,"Facultad de Medicina de la Universidad Autónoma de Madrid, Calle de Arturo Duperier, Fuencarral, Fuencarral-El Pardo, Madrid, Área metropolitana de Madrid y Corredor del Henares, Comunidad de Madrid, 28001, España",40.48256135,-3.69060790,edu,
+5721cd4b898f0e7df8de1e0215f630af94656be9,Retouch transfer for 3D printed face replica with automatic alignment,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+5779e3e439c90d43648db107e848aeb954d3e347,Graph Regularized Restricted Boltzmann Machine,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+5748652924084b7b0220cddcd28f6b2222004359,Large-Cone Nonnegative Matrix Factorization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3b350afd8b82487aa97097170c269a25daa0c82d,Sparse Simultaneous Recurrent Deep Learning for Robust Facial Expression Recognition,Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.88568200,-76.30768579,edu,
+3ba74755c530347f14ec8261996dd9eae896e383,A Low-Power Convolutional Neural Network Face Recognition Processor and a CIS Integrated With Always-on Face Detector,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+3b8c830b200f1df8ef705de37cbfe83945a3d307,Annotated face model-based alignment: a robust landmark-free pose estimation approach for 3D model registration,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b,"Binary ""proximity patches motion"" descriptor for action recognition in videos",University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,Terrain classification of hyperspectral remote sensing images based on kernel maximum margin criterion,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,Facial Expression Biometrics Using Tracker Displacement Features,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6,Deep CCA based super vector for action recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+3bd10f7603c4f5a4737c5613722124787d0dd818,An Efficient Joint Formulation for Bayesian Face Verification,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3b75681f0162752865d85befd8b15e7d954ebfe6,Evaluation of a face recognition system performance's variation on a citizen passports database,"Universidad de la República, Uruguay","Facultad de Ingeniería, Universidad de la República, Montevideo, Uruguay","Ave Julio Herrera y Reissig 565, 11300 Montevideo, Uruguay",-34.91817060,-56.16657250,edu,
+3b64b8be33887e77e6def4c385985e43e2c15eea,Understanding Deep Representations Learned in Modeling Users Likes,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,Smart Facial Age Estimation with Stacked Deep Network Fusion,National Chung Hsing University,National Chung Hsing University,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.67571165,edu,
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,Volume structured ordinal features with background similarity measure for video face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6f16f4bd01aeefdd03d6783beacb7de118f5af8a,A multi-label classification approach for Facial Expression Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+6fdf2f4f7ae589af6016305a17d460617d9ef345,Robust facial landmark localization using multi partial features,Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.81641780,130.72703969,edu,
+6f48e5e258da11e6ba45eeabe65a5698f17e58ef,Online whole-word and stroke-based modeling for hand-written letter recognition in in-car environments,Bosch Research and Technology Center,"Robert BOSCH Research and Technology Center, Palo Alto, CA 94304, USA","4009 Miranda Ave, Palo Alto, CA 94304, USA",37.40048670,-122.13643830,company,
+0387b32d0ebd034dc778972367e7d4194223785d,Emotion recognition with boosted tree classifiers,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+03333e7ec198208c13627066bc76b0367f5e270f,Action unit selective feature maps in deep networks for facial expression recognition,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+03babadaaa7e71d4b65203e27e8957db649155c6,Distance Metric Learning via Iterated Support Vector Machines,Xi’an Jiaotong University,"Institute of Information and System Sciences, Faculty of Mathematics and Statistics, Xi’an Jiaotong University, Xi’an, China","28 Xianning W Rd, JiaoDa ShangYe JieQu, Beilin Qu, Xian Shi, Shaanxi Sheng, China",34.25080300,108.98369300,edu,
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,Kinship verification from facial images and videos: human versus machine,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+034b3f3bac663fb814336a69a9fd3514ca0082b9,Unifying holistic and Parts-Based Deformable Model fitting,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0343f9401b98de36be957a30209fef45dd684270,On video based face recognition through adaptive sparse dictionary,Alcohol Countermeasure Systems Corporation,"Alcohol Countermeasure Systems Corporation, Toronto, ON, Canada","60 International Blvd, Etobicoke, ON M9W 6J2, Canada",43.67813310,-79.58748570,company,
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,Learning to Recognise Unseen Classes by A Few Similes,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb,IPST: Incremental Pictorial Structures for Model-Free Tracking of Deformable Objects,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3,Landmark manifold: Revisiting the Riemannian manifold approach for facial emotion recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c,Mining Latent Attributes From Click-Through Logs for Image Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534,Exponential Discriminant Locality Preserving Projection for face recognition,Jiangsu University of Science and Technology,Jiangsu University of Science and Technology,"江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.19805500,119.46326791,edu,
+9e8382aa1de8f2012fd013d3b39838c6dad8fb4d,Learning Object-Centric Transformation for Video Prediction,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+9efdb73c6833df57732b727c6aeac510cadb53fe,Face image generation system using attribute information with DCGANs,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+9e7646b7e9e89be525cda1385cc1351cc28a896e,Sensor-Assisted Multi-View Face Recognition System on Smart Glass,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,Local generic representation for patch uLBP-based face recognition with single training sample per subject,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,Segmentation-based illumination normalization for face detection,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+9eb13f8e8d948146bfbae1260e505ba209c7fdc1,Demo: Robust face recognition via sparse representation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9ef06cc958af2274afd193a1dca705c08234bcd3,Facial expression recognition using statistical subspace,Ho Chi Minh City University of Science,"Faculty of Information Technology, Ho Chi Minh City University of Science, VNU-HCM, District 5, Ho Chi Minh City, Vietnam","227 Đường Nguyễn Văn Cừ, Phường 4, Quận 5, Hồ Chí Minh, Vietnam",10.76252160,106.68232620,edu,
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,A robust composite metric for head pose tracking using an accurate face model,University of the French West Indies,"Université des Antilles et de la Guyane (UAG), France","Fouillole, Pointe-à-Pitre 97157, Guadeloupe",16.22427240,-61.52893250,edu,University of the French West Indies
+041b51a81a977b5c64682c55414ad8d165c1f2ce,Voice authentication embedded solution for secured access control,AGH University of Science and Technology,AGH University of Science and Technology,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.06570330,19.91895867,edu,
+04c07ecaf5e962ac847059ece3ae7b6962b4e5c4,Multi-view common space learning for emotion recognition in the wild,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+047ce307ad0c871bc2c9a5c1e4649cefae2ba50d,Real-time emotion identification for socially intelligent robots,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+6a38e4bb35673a73f041e34d3f2db7067482a9b5,Emotion Recognition in the Wild with Feature Fusion and Multiple Kernel Learning,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+6adecb82edbf84a0097ff623428f4f1936e31de0,Client-specific A-stack model for adult face verification across aging,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450,Face recognition with temporal invariance: A 3D aging model,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6aa0a47f4b986870370c622be51f00f3a1b9d364,Coupled Kernel Embedding for Low-Resolution Face Image Recognition,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d,Rotational Linear Discriminant Analysis Technique for Dimensionality Reduction,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+32f62da99ec9f58dd93e3be667612abcf00df16a,Octagonal prism LBP representation for face recognition,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+32b76220ed3a76310e3be72dab4e7d2db34aa490,Class specific subspace learning for collaborative representation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+32c5c65db2af9691f8bb749c953c978959329f8f,Recovering intrinsic images from image sequences using total variation models,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+328da943e22adef5957c08b6909bda09d931a350,On intelligent surveillance systems and face recognition for mass transport security,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+3288e16c62a215254e2ed7c39675482b356c3bef,Facial expression recognition system based on a face statistical model and Support Vector Machines,Politehnica University of Timisoara,Politehnica University of Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.74618900,21.22755075,edu,
+329b2781007604652deb72139d14315df3bc2771,Fusing Pointwise and Pairwise Labels for Supporting User-adaptive Image Retrieval,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+32e9c9520cf6acb55dde672b73760442b2f166f5,Joint Semantic and Latent Attribute Modelling for Cross-Class Transfer Learning,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+3251f40ed1113d592c61d2017e67beca66e678bb,Improving Face Pose Estimation Using Long-Term Temporal Averaging for Stochastic Optimization,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+356a144d2aa5cc5e74d178dae3963003871aa8a1,Learning Relative Aesthetic Quality with a Pairwise Approach,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,Human Behavior Understanding,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+35d90beea6b4dca8d949aae93f86cf53da72971f,Face tracking in low resolution videos under illumination variations,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3598d10d7d4f2b543afa8bcf6b2c34a3696ef155,Fusion of probabilistic collaborative and sparse representation for robust image classification,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+35d272877b178aa97c678e3fcbb619ff512af4c2,A multi-scale fusion convolutional neural network for face detection,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+35d42f4e7a1d898bc8e2d052c38e1106f3e80188,Human and algorithm performance on the PaSC face Recognition Challenge,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,Captioning Videos Using Large-Scale Image Corpus,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,Discriminative Feature Extraction by a Neural Implementation of Canonical Correlation Analysis,Istanbul University,Istanbul University,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.01324240,28.96376090,edu,
+69ad67e204fb3763d4c222a6c3d05d6725b638ed,Capture expression-dependent AU relations for expression recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a,Learning Match Kernels on Grassmann Manifolds for Action Recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+695426275dee2ec56bc0c0afe1c5b4227a350840,Pooling the Convolutional Layers in Deep ConvNets for Video Action Recognition,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+696236fb6f986f6d5565abb01f402d09db68e5fa,Learning adaptive receptive fields for deep image parsing networks,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a,Nonoptimality of the Maximum-Weight Dependence Tree in Classification,Nazarbayev University,Nazarbayev University,"Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.09028540,71.39725263,edu,
+3cb057a24a8adba6fe964b5d461ba4e4af68af14,Perceptual Annotation: Measuring Human Vision to Improve Computer Vision,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+3c09fb7fe1886072670e0c4dd632d052102a3733,Content-Attention Representation by Factorized Action-Scene Network for Action Recognition,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,Neural networks recognition rate as index to compare the performance of fuzzy edge detectors,"Tijuana Institute of Technology, Mexico","Division of Graduate Studies of Tijuana Institute Technology, Mexico","San Diego, CA 92161, USA",32.87853490,-117.23583070,edu,
+3ce96f03874d42345c0727edc78b6949b20b4a11,Image attribute learning with ontology guided fused lasso,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c,Is block matching an alternative tool to LBP for face recognition?,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+3ce37af3ac0ed2eba08267a3605730b2e0433da5,Hierarchical Representation Learning for Kinship Verification,"Indraprastha Institute of Information Technology Delhi, India","Indraprastha Institute of Information Technology Delhi, Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+3cd22b5b81a0172d608ff14be71b755d1f68c201,Face Recognition Using Composite Features Based on Discriminant Analysis,Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882000,126.96190000,edu,
+3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd,A High-Efficiency and High-Accuracy Fully Automatic Collaborative Face Annotation System for Distributed Online Social Networks,National Taipei University of Technology,National Taipei University of Technology,"NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣",25.04306355,121.53468772,edu,
+562f7555e5cb79ce0fe834c4613264d8378dd007,Spatio-temporal texture-based feature extraction for spontaneous facial expression recognition,Monash University Malaysia,Monash University Malaysia,"Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.06405715,101.60059740,edu,
+5642bafa7955b69f05c11230151cd59fcbe43b8e,SN-SVM: a sparse nonparametric support vector machine classifier,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+56fb30b24e7277b47d366ca2c491749eee4d6bb1,Using Bayesian statistics and Gabor Wavelets for recognition of human faces,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+56e25358ebfaf8a8b3c7c33ed007e24f026065d0,V-shaped interval insensitive loss for ordinal classification,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+568ced900cbf7437c9e87b60a17e16f0c1e0c442,A novel algorithm for illumination invariant DCT-based face recognition,"IntelliView Technologies, Calgary, Canada","IntelliView Technologies Inc., Calgary, AB, Canada","205, 327 - 41st Ave NE, Calgary, AB T2E 2N4, Canada",51.08994730,-114.05591380,company,
+5613cb13ab381c8a8b81181ac786255705691626,DeepEmo: Real-world facial expression analysis via deep learning,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+566563a02dbaebec07429046122426acd7039166,Improving Convolutional Neural Networks Via Compacting Features,"Collaborative Innovation Center for Geospatial Information Technology, Wuhan, China","Collaborative Innovation Center for Geospatial Information Technology, Wuhan, China","Wuhan, Hubei, China",30.59284900,114.30553900,edu,
+5632ba72b2652df3b648b2ee698233e76a4eee65,Reconstruction of 3D facial image using a single 2D image,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+56f57786516dcc8ea3c0ffe877c1363bfb9981d2,Online multimodal matrix factorization for human action video indexing,Universidad Nacional de Colombia,"MindLAB Research Group, Universidad Nacional de Colombia, Colombia","Cra 45, Bogotá, Colombia",4.63819380,-74.08404640,edu,
+565f7c767e6b150ebda491e04e6b1de759fda2d4,"Fine-grained face verification: FGLFW database, baselines, and human-DCMN partnership",Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+51b42da0706a1260430f27badcf9ee6694768b9b,Shape initialization without ground truth for face alignment,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+51410d6bd9a41eacb105f15dbdaee520e050d646,Facial Expression Recognition based on Support Vector Machine using Gabor Wavelet Filter,Varendra University,Varendra University,"department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ",24.36432310,88.63331050,edu,
+51d6a8a61ea9588a795b20353c97efccec73f5db,Simultaneous facial activity tracking and recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+51f626540860ad75b68206025a45466a6d087aa6,Cluster convolutional neural networks for facial age estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+518a3ce2a290352afea22027b64bf3950bffc65a,Finding iconic images,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+51dcb36a6c247189be4420562f19feb00c9487f8,Towards robust face recognition from multiple views,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+51b770e6b2af994ffc8793f59b24a9f619033a3a,Human attribute analysis using a top-view camera based on multi-stage classification,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+516f8728ad1d4f9f2701a2b5385f8c8e71b9d356,Edge-Aware Spatial Denoising Filtering Based on a Psychological Model of Stimulus Similarity,Nazarbayev University,Nazarbayev University,"Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.09028540,71.39725263,edu,
+5167e16b53283be5587659ea8eaa3b8ef3fddd33,Model-based reconstruction for illumination variation in face images,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+51bb86dc8748088a198b216f7e97616634147388,Face age estimation by using Bisection Search Tree,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b,Complementary Cohort Strategy for Multimodal Face Pair Matching,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1,An automatic region detection and processing approach in genetic programming for binary image classification,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+3d89f9b4da3d6fb1fdb33dea7592b5992069a096,Face recognition based on convolution siamese networks,University of the Chinese Academy of Sciences,"The Institute of Optics and Electronics Chinese Academy of Sciences, University of the Chinese Academy of Sciences, Chengdu, China","Chengdu, Sichuan, China",30.57281500,104.06680100,edu,
+5810ce61fda464d4de2769bd899e12727bee0382,Smile detection using Pair-wise Distance Vector and Extreme Learning Machine,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177,Sparsely Encoded Local Descriptor for face recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+58d43e32660446669ff54f29658961fe8bb6cc72,Automatic detection of obstructive sleep apnea using facial images,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+58684a925693a0e3e4bb1dd2ebe604885be034d2,A kernelized discriminant analysis algorithm based on modified generalized singular value decomposition,Texas Instruments,"Taxes Instruments, Dallas, TX, United States","12500 T I Blvd, Dallas, TX 75243, USA",32.91119080,-96.75231540,company,
+58483028445bf6b2d1ad6e4b1382939587513fe1,Modelling and correction of multipath interference in time of flight cameras,University of Alcala,"Geintra Research Group, University of Alcala","Plaza de San Diego, s/n, 28801 Alcalá de Henares, Madrid, Spain",40.48247220,-3.36286740,edu,
+5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,Regularized metric adaptation for unconstrained face verification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+58217ae5423828ed5e1569bee93d491569d79970,Multi-Modal Human Verification Using Face and Speech,Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882000,126.96190000,edu,
+58eb9174211d58af76023ce33ee05769de57236c,Submodular Attribute Selection for Visual Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+58d0c140597aa658345230615fb34e2c750d164c,Continuous Biometric Verification for Non-Repudiation of Remote Services,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+5811944e93a1f3e35ece7a70a43a3de95c69b5ab,Convolutional neural networks for attribute-based active authentication on mobile devices,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+58df849378fbcfb6b1a8ebddfbe4caa450226b9d,Head pose estimation using learned discretization,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,Face recognition based on the fusion of wavelet packet sub-images and fisher linear discriminant,Guangzhou University,Guangzhou University,"广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04436505,113.36668458,edu,
+67e6ddce6fea17bb2b171c949ee224936d36c0d1,Discriminant spectral analysis for facial expression recognition,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581,2D Cascaded AdaBoost for Eye Localization,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+0b45aeb0aede5e0c19b508ede802bdfec668aefd,Learning facial attributes by crowdsourcing in social media,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+0b3144cdc9d6d5a1498d6178db20d1c49fb64de9,"Eliciting, capturing and tagging spontaneous facialaffect in autism spectrum disorder",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0b8839945259ec764ef0fad47471f34db39f40c3,SVM point-based real-time emotion detection,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d,Using unlabeled data to improve classification of emotional states in human computer interaction,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+93af335bf8c610f34ce0cadc15d1dd592debc706,Auxiliary Demographic Information Assisted Age Estimation With Cascaded Structure,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+93d903d2e48d6a8ad3e3d2aff2e57622efe649cd,Local saliency-inspired binary patterns for automatic recognition of multi-view facial expression,"Indian Statistical Institute, Kolkata","Indian Statistical Institute, Kolkata","Plot No. 203, Barrackpore Trunk Road, Baranagar, Kolkata, West Bengal 700108, India",22.64815210,88.37681700,edu,
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e,Adaptive face representation via class-specific and intra-class variation dictionaries for recognition,Changzhou University,"Changzhou University, Changzhou, China","1 Gehu Middle Rd, Wujin Qu, Changzhou Shi, Jiangsu Sheng, China",31.68423700,119.95514100,edu,
+93e1e195f294c463f4832c4686775bf386b3de39,Temporal Variance Analysis for Action Recognition,University of Technology at Sydney,"University of Technology at Sydney, Sydney, NSW, Australia","15 Broadway, Ultimo NSW 2007, Australia",-33.88323760,151.20049420,edu,
+93c0405b1f5432eab11cb5180229720604ffd030,Recognition of Faces and Facial Attributes Using Accumulative Local Sparse Representations,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+93108f1548e8766621565bdb780455023349d2b2,Facial expression synthesis based on motion patterns learned from face database,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+939f9fa056f8be445da19b43da64bd2405851a43,Classifying facial expressions using point-based analytic face model and Support Vector Machines,Concordia Institute for Information Systems Engineering,"Concordia Institute for Information Systems Engineering (CIISE), 1515 St. Catherine West, Montreal, Quebec H3G 2W1, CANADA","1455 Boulevard de Maisonneuve O, Montréal, QC H3G 1M8, Canada",45.49726570,-73.57902270,edu,
+9378ead3a09bc9f89fb711e2746facf399dd942e,Adaptive Region-Based Image Enhancement Method for Robust Face Recognition Under Variable Illumination Conditions,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+93eb3963bc20e28af26c53ef3bce1e76b15e3209,Occlusion robust face recognition based on mask learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d,Pose-based composition improvement for portrait photographs,"RMIT University, Vietnam","RMIT University, Vietnam","RMIT University Vietnam - Saigon South Campus, 702, Nguyễn Văn Linh, Khu 3 - Khu Đại học, Phường Tân Phong, Quận 7, Tp HCM, 756604, Việt Nam",10.72991265,106.69320824,edu,
+947cdeb52f694fb1c87fc16836f8877cd83dc652,High-performance and lightweight real-time deep face emotion recognition,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+9436170c648c40b6f4cc3751fca3674aa82ffe9a,Maximum Margin Discriminant Projections for facial expression recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+946b4d840b026d91608758d04f2763e9b981234e,LUI: lip in multimodal mobile GUI interaction,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+947ee3452e4f3d657b16325c6b959f8b8768efad,Deep multi-view robust representation learning,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+940e5c45511b63f609568dce2ad61437c5e39683,Fiducial Facial Point Extraction Using a Novel Projective Invariant,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+0e05b365af662bc6744106a7cdf5e77c9900e967,"Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics",EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+0e37d70794d5ccfef8b4cc22b4203245f33eec6e,A second order polynomial based subspace projection method for dimensionality reduction,University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.73844400,-84.17918747,edu,
+0e8a28511d8484ad220d3e8dde39220c74fab14b,MSDLSR: Margin Scalable Discriminative Least Squares Regression for Multicategory Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0e454686f83284ced2ffc5740829552a032671a3,Estimating multimodal attributes for unknown objects,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,Deep Aging Face Verification With Large Gaps,"North Acton, London","North Acton, London","North Acton, Victoria Road, Acton, London Borough of Ealing, London, Greater London, England, W3 6UP, UK",51.52344665,-0.25973535,edu,
+0ed4b4d6d1a0c49c4eb619aab36db559b620d99f,Biased subspace learning for misalignment-robust facial expression recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3,Facial video super resolution using semantic exemplar components,"Sharp Laboratories of America, Camas, WA","Sharp Laboratories of America, Camas, WA","5750 NW Pacific Rim Blvd, Camas, WA 98607, USA",45.59332750,-122.46110560,company,
+0e2d956790d3b8ab18cee8df6c949504ee78ad42,Scalable face image retrieval integrating multi-feature quantization and constrained reference re-ranking,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0ed96cc68b1b61e9eb4096f67d3dcab9169148b9,Emotion Recognition in Real-world Conditions with Acoustic and Visual Features,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+0e02dadab802128f6155e099135d03ca6b72f42c,Learning Balanced and Unbalanced Graphs via Low-Rank Coding,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+608b01c70f0d1166c10c3829c411424d9ef550e7,Facial expression recognition by learning spatiotemporal features with multi-layer independent subspace analysis,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+606dff86a34c67c79d93f1e536487847a5bb7002,Localized support vector machines using Parzen window for incomplete sets of categories,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+604a281100784b4d5bc1a6db993d423abc5dc8f0,Face Verification Across Age Progression Using Discriminative Methods,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+60821d447e5b8a96dd9294a0514911e1141ff620,Real-time facial expression recognition with illumination-corrected image sequences,"Universidad Politécnica Madrid, Spain","Dep. Inteligencia Artificial, U. Politécnica Madrid, Spain","Calle de los Ciruelos, 28660 Boadilla del Monte, Madrid, Spain",40.40462810,-3.83964120,edu,
+607aebe7568407421e8ffc7b23a5fda52650ad93,Face alignment via an ensemble of random ferns,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+605f6817018a572797095b83bec7fae7195b2abc,Principal Gabor filters for face recognition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+602f772c69e4a1a65de00443c30d51fdd47a80aa,Face recognition based on the feature fusion of 2DLDA and LBP,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+609d81ddf393164581b3e3bf11609a712ac47522,Fuzzy qualitative approach for micro-expression recognition,"Tunku Abdul Rahman University College, Malaysia","Faculty of Engineering, Tunku Abdul Rahman University College, Setapak, Malaysia","Danau Kota, 53100 Kuala Lumpur, Federal Territory of Kuala Lumpur, Malaysia",3.20685320,101.71829430,edu,
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823,A Dataset and a Technique for Generalized Nuclear Segmentation for Computational Pathology,Indian Institute of Technology Guwahati,"IIT Guwahati, Guwahati, India","Indian Institute of Technology Guwahati - IIT Guwahati, NH27, Amingaon, Guwahati, Kamrup, Assam, 781015, India",26.19247875,91.69463569,edu,
+344c0917c8d9e13c6b3546da8695332f86b57bd3,Semi-supervised multi-output image manifold regression,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+346752e3ab96c93483413be4feaa024ccfe9499f,An Attribute-Assisted Reranking Model for Web Image Search,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+34dd83115195676e7a8b008eb0e9abe84b330b32,Optimized recognition with few instances based on semantic distance,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+34fd227f4fdbc7fe028cc1f7d92cb59204333718,A Deep Face Recognition Method Based on Model Fine-tuning and Principal Component Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5a259f2f5337435f841d39dada832ab24e7b3325,Face Recognition via Active Annotation and Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3,Learning hierarchical video representation for action recognition,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+5feee69ed183954fa76c58735daa7dd3549e434d,Mean shift clustering for personal photo album organization,"Istituto di Calcolo e Reti ad Alte Prestazioni, Palermo, Italy","Consiglio Nazionale delle Ricerche, Istituto di Calcolo e Reti ad Alte Prestazioni, Viale delle Scienze, 90128 Palermo, ITALY","Viale delle Scienze, 90128 Palermo PA, Italy",38.10304820,13.34789420,edu,
+5f2c210644c1e567435d78522258e0ae036deedb,Biologically vs. Logic Inspired Encoding of Facial Actions and Emotions in Video,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5f7094ba898a248e1e6b37e3d9fb795e59131cdc,Frame-skip Convolutional Neural Networks for action recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+5fb9944b18f5a4a6d20778816290ed647f5e3853,Wearable for Wearable: A Social Signal Processing Perspective for Clothing Analysis using Wearable Devices,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+5f1cd82343f4bd6972f674d50aecb453d06f04ad,The Impact of Personalisation on Human-Robot Interaction in Learning Scenarios,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+5f4219118556d2c627137827a617cf4e26242a6e,Explicit Shape Regression With Characteristic Number for Facial Landmark Localization,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+5fb59cf5b31a80d8c70d91660092ef86494be577,Real-time SVM-based emotion recognition algorithm,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,Bandit Framework for Systematic Learning in Wireless Video-Based Face Recognition,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+5f448ab700528888019542e6fea1d1e0db6c35f2,Transferred Deep Convolutional Neural Network Features for Extensive Facial Landmark Localization,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+5f9dc3919fb088eb84accb1e490921a134232466,Pose Estimation Based on Two Images from Different Views,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+33b915476f798ca18ae80183bf40aea4aaf57d1e,Face Illumination Manipulation Using a Single Reference Image by Adaptive Layer Decomposition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,Adaptive Cascade Regression Model For Robust Face Alignment,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+335435a94f8fa9c128b9f278d929c9d0e45e2510,CREMA-D: Crowd-Sourced Emotional Multimodal Actors Dataset,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,Age prediction on face features via multiple classifiers,"University Sultan Zainal Abidin, Malaysia","Faculty of Informatics and Computing, Universiti Sultan Zainal Abidin, Besut Campus, 22200 Besut, Terengganu, Malaysia","22020 Kampung Raja, Terengganu, Malaysia",5.76488480,102.62817020,edu,
+33b61be191e63b0c9974be708180275c9d5b3057,Cross-dataset facial expression recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+33bbf01413910bca26ed287112d32fe88c1cc0df,Region-based feature fusion for facial-expression recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+05184f01e66d7139530729b281da74db35a178d2,Optimal metric selection for improved multi-pose face recognition with group information,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+05785cb0dcaace54801aa486d4f8fdad3245b27a,Novel generative model for facial expressions based on statistical shape analysis of landmarks trajectories,"CRIStAL UMR, France","T&#x00E9;l&#x00E9;com Lille, CRIStAL UMR (CNRS 9189), France","Lille, France",50.62925000,3.05725600,edu,
+052c5ef6b20bf3e88bc955b6b2e86571be08ba64,Heterogeneous Specular and Diffuse 3-D Surface Approximation for Face Recognition Across Pose,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+057b80e235b10799d03876ad25465208a4c64caf,Video Question Answering via Gradually Refined Attention over Appearance and Motion,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0532cbcf616f27e5f6a4054f818d4992b99d201d,Class specific centralized dictionary learning for face recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+055cd8173536031e189628c879a2acad6cf2a5d0,Fast multi-view face alignment via multi-task auto-encoders,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+05c5134125a333855e8d25500bf97a31496c9b3f,Robust Multi-Modal Cues for Dyadic Human Interaction Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+052cec9fdbfe12ccd02688f3b7f538c0d73555b3,Learning weighted hashing on local structured data,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9d5bfaf6191484022a6731ce13ac1b866d21ad18,Hierarchical multi-label framework for robust face recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+9d4692e243e25eb465a0480376beb60a5d2f0f13,Positional Ternary Pattern (PTP): An edge based image descriptor for human age recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+9d46485ca2c562d5e295251530a99dd5df99b589,Real-time face recognition for human-robot interaction,"Instituto Nacional de Astrofísica, Óptica y Electrónica, Mexico","Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro #1, Tonantzintla, Puebla, Mexico","Luis Enrique Erro 1, Sta María Tonanzintla, 72840 Puebla, Pue., Mexico",19.03231070,-98.31537020,edu,
+9df86395c11565afa8683f6f0a9ca005485c5589,"Facial expression recognition using active contour-based face detection, facial movement-based feature extraction, and non-linear feature selection",Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+9d3377313759dfdc1a702b341d8d8e4b1469460c,Cast2Face: Assigning Character Names Onto Faces in Movie With Actor-Character Correspondence,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce,Multilevel Quadratic Variation Minimization for 3D Face Modeling and Virtual View Synthesis,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+9cda3e56cec21bd8f91f7acfcefc04ac10973966,"Periocular biometrics: databases, algorithms and directions",Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+9ce97efc1d520dadaa0d114192ca789f23442727,Teaching Computer Vision: Bringing Research Benchmarks to the Classroom,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+9c2f20ed168743071db6268480a966d5d238a7ee,A face-house paradigm for architectural scene analysis,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,Facial landmark detection via pose-induced auto-encoder networks,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+9c23859ec7313f2e756a3e85575735e0c52249f4,Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+9c59bb28054eee783a40b467c82f38021c19ff3e,Logistic similarity metric learning for face verification,University of Lyon,"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France","20 Avenue Albert Einstein, 69100 Villeurbanne, France",45.78332440,4.87819840,edu,
+023decb4c56f2e97d345593e4f7b89b667a6763d,Generalized Low Rank Approximations of Matrices,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+026e96c3c4751e1583bfe78b8c28bdfe854c4988,Facial analysis in the wild with LSTM networks,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+0247998a1c045e601dc4d65c53282b5e655be62b,Learning to tell brake and turn signals in videos using CNN-LSTM structure,"Toyota Research Institute, North America",Toyota Research Institute - North America,"2311 Green Rd Suite E, Ann Arbor, MI 48105, USA",42.30985050,-83.69329530,company,
+021469757d626a39639e260492eea7d3e8563820,3D Face Processing,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a4898f55f12e6393b1c078803909ea715bf71730,"Where is the driver looking: Analysis of head, eye and iris for robust gaze zone estimation","University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a4e75766ef93b43608c463c233b8646439ce2415,Automatic real-time FACS-coder to anonymise drivers in eye tracker videos,"Volvo, Sweden","Volvo Car Corporation, SE-405 31 Göteborg, Sweden","Karossvägen 2, 405 31 Göteborg, Sweden",57.72288600,11.84620530,edu,
+a35ed55dc330d470be2f610f4822f5152fcac4e1,Tattoo recognition technology - challenge (Tatt-C): an open tattoo database for developing tattoo recognition research,NIST,"National Institute of Standards and Technology (NIST), Gaithersburg, MD","100 Bureau Dr, Gaithersburg, MD 20899, USA",39.14004000,-77.21850600,edu,
+a3201e955d6607d383332f3a12a7befa08c5a18c,VLAD encoded Deep Convolutional features for unconstrained face verification,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+a3ed0f15824802359e05d9777cacd5488dfa7dba,A Wearable Social Interaction Aid for Children with Autism,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a3bf6129d1ae136709063a5639eafd8018f50feb,A linear regression model for estimating facial image quality,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+b5f9180666924a3215ab0b1faf712e70b353444d,Facial expression synthesis with direction field preservation based mesh deformation and lighting fitting based wrinkle mapping,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+b5bda4e1374acc7414107cde529ad8b3263fae4b,Online learning design of an image-based facial expression recognition system,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+b54fe193b6faf228e5ffc4b88818d6aa234b5bb9,Video Generation Using 3D Convolutional Neural Network,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+b5690409be6c4e98bd37181d41121adfef218537,Improving image clustering: An unsupervised feature weight learning framework,IBM China Research Laboratory,"IBM China Research Laboratory, Beijing, China","Beijing, China",39.90419990,116.40739630,company,
+b5f3b0f45cf7f462a9c463a941e34e102a029506,From individual to group-level emotion recognition: EmotiW 5.0,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2,Proposal and implementation of a novel scheme for image and emotion recognition using Hadoop,Amity University Uttar Pradesh,Amity University Uttar Pradesh,"Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India",28.54322285,77.33274830,edu,
+b5f79df712ad535d88ae784a617a30c02e0551ca,Locating Facial Landmarks Using Probabilistic Random Forest,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+b55e70df03d9b80c91446a97957bc95772dcc45b,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,"Siren Solutions, Dublin, Ireland","Siren Solutions, Dublin, Ireland","Dublin, Ireland",53.34980530,-6.26030970,company,
+b50edfea790f86373407a964b4255bf8e436d377,Group emotion recognition with individual facial emotion CNNs and global image based CNNs,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9,A Cascaded Framework for Model-Based 3D Face Reconstruction,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b2470969e4fba92f7909eac26b77d08cc5575533,Profit Maximization Mechanism and Data Management for Data Analytics Services,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23,An interactive virtual mirror to support makeup for visually impaired persons,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+b2f9e0497901d22b05b9699b0ea8147861c2e2cc,Facial Expression Recognition Using Local Region Specific Dense Optical Flow and LBP Features,Korea Electronics Technology Institute,Korea Electronics Technology Institute,"South Korea, Gyeonggi-do, Seongnam-si, Bundang-gu, 새나리로 25 (야탑동) KETI 전자부품연구원",37.40391700,127.15978600,edu,
+b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4,Adaptive Feature Mapping for Customizing Deep Learning Based Facial Expression Recognition Model,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+b234d429c9ea682e54fca52f4b889b3170f65ffc,A Concatenational Graph Evolution Aging Model,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b2ddea9c71cd73fa63e09e8121bc7a098fae70b4,An interactive game for teaching facial expressions to children with Autism Spectrum Disorders,"Universidade do Porto, Portugal","Inst. de Telecomunica&#x00E7;&#x00F5;es, Fac. de Ci&#x00EA;ncias da Universidade do Porto, Porto, Portugal","Instituto Superior Técnico, Av. Rovisco Pais 1, 1049-001 Lisboa, Portugal",38.73684640,-9.13934570,edu,
+b2cb335ded99b10f37002d09753bd5a6ea522ef1,Analysis of adaptability of deep features for verifying blurred and cross-resolution images,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+d9e66b877b277d73f8876f537206395e71f58269,Learning Stacked Image Descriptor for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d963bdff2ce5212fa585a83ca8fad96875bc0057,Combining multi-representation for multimedia event detection using co-training,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+d9218c2bbc7449dbccac351f55675efd810535db,Feature selection for facial expression recognition,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+d983dda8b03ed60fa3afafe5c50f1d9a495f260b,Face recognition using elastic local reconstruction based on a single face image,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+d9eed86e53ce5f7cba379fe77bbefb42e83c0d88,Implicit Block Diagonal Low-Rank Representation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+d9b4b49378fcd77dcd5e755975b99ed4c7962f17,Stroke Detector and Structure Based Models for Character Recognition: A Comparative Study,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d91f9e8cbf271004ef1a293401197a10a26ccd1b,Facial action units detection by robust temporal features,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+acab402d706dbde4bea4b7df52812681011f435e,Robust face recognition with illumination normalization using a reference profile,"Infosys Limited, Bangalore, India","E-Comm Research Lab, Infosys Limited, Bangalore, India","Bengaluru, Karnataka, India",12.97159870,77.59456270,company,
+acd4280453b995cb071c33f7c9db5760432f4279,Deep transformation learning for face recognition in the unconstrained scene,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+ac2e166c76c103f17fdea2b4ecb137200b8d4703,Cognitive face analysis system for future interactive TV,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+ac48ecbc7c3c1a7eab08820845d47d6ce197707c,Iterative Re-Constrained Group Sparse Face Recognition With Adaptive Weights Learning,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+ac03849956ac470c41585d2ee34d8bb58bb3c764,Automatic inference of mental states from spontaneous facial expressions,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+ad08426ca57da2be0e9f8c1f673e491582edb896,Convergence Analysis of Graph Regularized Non-Negative Matrix Factorization,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+adad7446e371d27fdaee39475856e2058f3045e5,A two-stage low complexity face recognition system for face images with alignment errors,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+ad4d1ecf5c5473c050e11f6876ce148de1c8920a,Matching video net: Memory-based embedding for video action recognition,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+ad8bd7016132a2f98ff1f41dac695285e71cc4b1,A face alignment method based on SURF features,"Changchun University of Science and Technology, China",College of electronic and information engineer Changchun University of Science and Technology Changchun China,"7989 Weixing Rd, Chaoyang Qu, Changchun Shi, Jilin Sheng, China, 130012",43.83467700,125.30313500,edu,
+ad5a35a251e07628dd035c68e44a64c53652be6b,Robust facial landmark tracking via cascade regression,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ad339a5fdaab95f3c8aad83b60ceba8d76107fa2,Segmented Linear Subspaces for Illumination-Robust Face Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ada063ce9a1ff230791c48b6afa29c401a9007f1,Biometric Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ad50f6899103eff0ee4504e539c38eb965fd1309,Emotion recognition based on a novel triangular facial feature extraction method,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+bbc21d6b7c6e807c6886d237a04b501158ca6bb8,Learning Personalized Models for Facial Expression Analysis and Gesture Recognition,"TeV, Fondazione Bruno Kessler, Trento, Italy","TeV, Fondazione Bruno Kessler, Trento, Italy","Trento, Province of Trento, Italy",46.07477930,11.12174860,edu,
+bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b,Constrained versus unconstrained learning in generalized recurrent network for image processing,ThyssenKrupp Elevator Americas,ThyssenKrupp Elevator Americas,"4511 N Himes Ave, Tampa, FL 33614, USA",27.98411770,-82.50021990,company,
+bb4be8e24d7b8ed56d81edec435b7b59bad96214,Localized Multifeature Metric Learning for Image-Set-Based Face Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+bb2f61a057bbf176e402d171d79df2635ccda9f6,Multi-modal joint embedding for fashion product retrieval,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c,"An improved approach for face detection using superpixels, moment-based matching, and isosceles triangle matching",National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f,A Hybrid Approach for Facial Expression Recognition,San Jose State University,San Jose State University,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.33519080,-121.88126008,edu,
+d7c87f4ca39f79d93c954ffacac32bc6eb527e2c,Curvelet Entropy for Facial Expression Recognition,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+d75bd05865224a1341731da66b8d812a7924d6f6,Dynamic Detection-Rate-Based Bit Allocation With Genuine Interval Concealment for Binary Biometric Representation,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,Towards Improving Social Communication Skills With Multimodal Sensory Information,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+d7a84db2a1bf7b97657b0250f354f249394dd700,Global and local feature based multi-classifier A-stack model for aging face identification,"Swiss Federal Institute of Technology Lausanne, Switzerland","Swiss Federal Institute of Technology Lausanne (EPFL), Switzerland","Route Cantonale, 1015 Lausanne, Switzerland",46.51905570,6.56675760,edu,
+d77f18917a58e7d4598d31af4e7be2762d858370,Detecting person presence in TV shows with linguistic and structural features,"Orange Labs, Lannion, France","France Telecom - Orange Labs, Lannion, France","2 Avenue Pierre Marzin, 22300 Lannion, France",48.75416800,-3.45845860,company,
+d05759932001aa6f1f71e7dc261c4716f57a5397,Locality Preserving Discriminant Projection,"Dhirubhai Ambani Institute of Information and Communication Technology, India","Dhirubhai Ambani Institute of Information and Communication Technology, India","Near Reliance Chowkdi, DA IICT Road, Gandhinagar, Gujarat 382007, India",23.18854690,72.62902940,edu,
+d00e9a6339e34c613053d3b2c132fccbde547b56,A cascaded convolutional neural network for age estimation of unconstrained faces,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+d0b67ec62086b55f00dc461ab58dc87b85388b2b,Online facial expression recognition based on combining texture and geometric information,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+d0a8889f694422614bf3ecccd69aa1d4f7822606,Image and Video-Based Biometrics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1,Constrained Metric Learning by Permutation Inducing Isometries,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+d09fd7e0bb5d997963cfef45452724416b2bb052,Research on algorithm of state recognition of students based on facial expression,China Mobile Group Shanghai,"Pudong Branch, China Mobile Group Shanghai, Company Limited, Shanghai, China","Pudong, Shanghai, China",31.22151700,121.54437900,company,
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c,A multi-label convolutional neural network approach to cross-domain action unit detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+be632b206f1cd38eab0c01c5f2004d1e8fc72880,Gradual training of cascaded shape regression for facial landmark localization and pose estimation,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+beae35eb5b2c7f63dfa9115f07b5ba0319709951,Discriminative 3D morphable model fitting,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,Evaluation of Face Datasets as Tools for Assessing the Performance of Face Recognition Methods,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+b3050dc48600acf2f75edf1f580a1f9e9cb3c14a,Face relighting using discriminative 2D spherical spaces for face recognition,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+b388bf63c79e429dafee16c62b2732bcbea0d026,Ceci n'est pas une pipe: A deep convolutional network for fine-art paintings classification,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+b351575e3eab724d62d0703e24ecae55025eef00,Person-centered accessible technologies and computing solutions through interdisciplinary and integrated perspectives from disability research,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+df767f62a6bf3b09e6417d801726f2d5d642a202,Face Recognition under Varying Lighting Based on the Probabilistic Model of Gabor Phase,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+df87193e15a19d5620f5a6458b05fee0cf03729f,Emotional expression recognition with a cross-channel convolutional neural network for human-robot interaction,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd,Challenges in Deep Learning for Multimodal Applications,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+df550cb749858648209707bec5410431ea95e027,Local Laplacian Coding From Theoretical Analysis of Local Coding Schemes for Locally Linear Classification,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+df7ff512e8324894d20103fd8ab5da650e4d86db,Linking names and faces by person-based subset clustering,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+df7af280771a6c8302b75ed0a14ffe7854cca679,Prediction of users' facial attractiveness on an online dating website,"IBJ, Inc., Tokyo, Japan","IBJ, Inc., Tokyo, Japan","Tokyo, Japan",35.68948750,139.69170640,company,
+da1477b4a65ae5a013e646b57e004f0cd60619a2,Nose tip detection from 3D facial mesh data using a rotationally invariant local shape descriptor,Sungkyunkwan University Suwon,"Computer Vision Lab, Sungkyunkwan University Suwon, South Korea","25-2 Sungkyunkwan-ro, Myeongnyun 3(sam)ga-dong, Jongno-gu, Seoul, South Korea",37.58822700,126.99360600,edu,
+da7bbfa905d88834f8929cb69f41a1b683639f4b,Discriminant analysis with Gabor phase for robust face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dad6b36fd515bda801f3d22a462cc62348f6aad8,Gait-based age estimation using a whole-generation gait database,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+dac8fc521dfafb2d082faa4697f491eae00472c7,Learning to Generate and Edit Hairstyles,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+dac6e9d708a9757f848409f25df99c5a561c863c,SVD Face: Illumination-Invariant Face Representation,"Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+dac34b590adddef2fc31f26e2aeb0059115d07a1,House in the (Biometric) Cloud: A Possible Application,Sapienza University of Rome,Sapienza University of Rome,"Piazzale Aldo Moro, 5, 00185 Roma RM, Italy",41.90376260,12.51443840,edu,
+b472f91390781611d4e197564b0016d9643a5518,Facial expression recognition using geometric and appearance features,University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447512,edu,
+b484141b99d3478a12b8a6854864c4b875d289b8,Low-resolution face recognition via Simultaneous Discriminant Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,Lapped convolutional neural networks for embedded systems,"AltumView Systems Inc., Burnaby, BC, Canada","AltumView Systems Inc., Burnaby, BC, Canada","8525 Baxter Pl, Burnaby, BC V5A 4V7, Canada",49.25938790,-122.91518930,company,
+a216f7863fc6ab15e2bb7a538dfe00924e1da0ab,Block-wise constrained sparse graph for face image representation,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+a2646865d7c3d7fb346cf714caf146de2ea0e68f,Distributed graph regularized non-negative matrix factorization with greedy coordinate descent,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+a2b4a6c6b32900a066d0257ae6d4526db872afe2,Learning Face Image Quality From Human Assessments,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+a200885bf6bfa0493d85e7617e65cdabe30a2dab,An efficient face classification method based on shared and class-specific dictionary learning,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+a20036b7fbf6c0db454c8711e72d78f145560dc8,On averaging face images for recognition under pose variations,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+a2af07176a38fe844b0e2fdf4abae65472628b38,Dog breed classification via landmarks,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+a2b76ab614d92f5e71312b530f0b6281d0c500f7,On optimal low rank Tucker approximation for tensors: the case for an adjustable core size,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+a26fd9df58bb76d6c7a3254820143b3da5bd584b,Monitor Pupils' Attention by Image Super-Resolution and Anomaly Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+a5eb36f1e77245dfc9e5c0c03998529331e4c89b,An optimal set of code words and correntropy for rotated least squares regression,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,Cost-Sensitive Local Binary Feature Learning for Facial Age Estimation,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260,Mediating Human Decision Making with Emotional Attitudes in Web Based Decision Support Systems,La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.77847540,144.29804700,edu,
+a5d4cc596446517dfaa4d92276a12d5e1c0a284c,Kernel Grassmannian distances and discriminant analysis for face recognition from image sets,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+a5f35880477ae82902c620245e258cf854c09be9,Face detection by structural models,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a53f988d16f5828c961553e8efd38fed15e70bcc,Pokerface: Partial order keeping and energy repressing method for extreme face illumination normalization,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+a52a69bf304d49fba6eac6a73c5169834c77042d,Margin Loss: Making Faces More Separable,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+bdf5434648356ce22bdbf81d2951e4bb00228e4d,A Half Face Recognition Scheme,"Chongqing Institute of Technology, China","Chongqing Institute of Technology, China","69 Hongguang Ave, Banan Qu, Chongqing Shi, China",29.45832600,106.52994700,edu,
+bddc822cf20b31d8f714925bec192c39294184f7,Facial expression recognition based on local binary patterns,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+bdd203bcd3c41c336c5635fb026a78279d75b4be,Shannon information based adaptive sampling for action recognition,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5,Avatar recommendation method based on facial attributes,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+bd66dc891270d858de3adf97d42ed714860ae94d,Non-semantic facial parts for face verification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,Modified Hidden Factor Analysis for Cross-Age Face Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a,A facial symmetry prior for improved illumination fitting of 3D morphable model,Speech and Signal Processing University of Surrey,"Centre for Vision, Speech and Signal Processing University of Surrey, Guildford, UK","388 Stag Hill, Guildford GU2 7XH Stag Hill, Guildford GU2 7XH, UK",51.24354510,-0.58857440,edu,
+d10cfcf206b0991e3bc20ac28df1f61c63516f30,Smile or smirk? Automatic detection of spontaneous asymmetric smiles to understand viewer experience,"Affectiva, Inc.","Affectiva Inc., Waltham, MA, USA","294 Washington St, Boston, MA 02108, USA",42.35730460,-71.05824150,company,MIT spinoff
+d116bac3b6ad77084c12bea557d42ed4c9d78433,Recognition of occluded facial expressions based on CENTRIST features,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+d1079444ceddb1de316983f371ecd1db7a0c2f38,Sparse residue for occluded face image reconstruction and classification,"Harbin Institute of Technology, Shenzhen","Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China","China, Guangdong, Shenzhen, Nanshan, 平山一路",22.58675200,113.96878000,edu,
+d1dd80d77655876fb45b9420fe72444c303b219e,Accumulated motion images for facial expression recognition in videos,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+d60e3eef429ed2a51bbd806125fa31f5bea072a4,Hajj human event classification system using machine learning techniques,Ain Shams University,"Faculty of Engineering, Ain Shams University, Cairo, Egypt","1 El Sarayat St.، ABBASSEYA، Al Waili, Cairo Governorate 11535, Egypt",30.06456570,31.27886080,edu,
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35,Avatar digitization from a single image for real-time rendering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+d691440030394c2e00a2ab47aba4f8b5fca5f25a,Tube ConvNets: Better exploiting motion for action recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+d63bd06340dd35590a22222509e455c49165ee13,Recurrent Temporal Sparse Autoencoder for attention-based action recognition,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+d6e08345ba293565086cb282ba08b225326022fc,Occlusion-Aware Fragment-Based Tracking With Spatial-Temporal Consistency,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+d6791b98353aa113d79f6fb96335aa6c7ea3b759,Collaborative Random Faces-Guided Encoders for Pose-Invariant Face Representation Learning,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+d62d82c312c40437bc4c1c91caedac2ba5beb292,Super Wide Regression Network for Unsupervised Cross-Database Facial Expression Recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+bc607bee2002c6c6bf694a15efd0a5d049767237,A novel large-scale multimedia image data classification algorithm based on mapping assisted deep neural network,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+bc66685acc64fa3c425c0ee6c443d3fa87db7364,Personal Clothing Retrieval on Photo Collections by Color and Attributes,"HP Labs, Palo Alto, CA","Hewlett-Packard Laboratories, Hewlett-Packard Company, Palo Alto, CA, USA","1501 Page Mill Rd, Palo Alto, CA 94304, USA",37.41233890,-122.14795950,company,
+bcead1a92744e76c38caaa13159de4abfb81b1d0,Bags-of-daglets for action recognition,Télécom ParisTech,"Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France","Business Pôle. 1047 route des Dolines. Allée Pierre Ziller, 06560 Sophia Antipolis, France",43.62716550,7.04109170,edu,
+bc08dfa22949fbe54e15b1a6379afade71835968,Multiple Facial Action Unit recognition by learning joint features and label relations,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+bca39960ba46dc3193defe0b286ee0bea4424041,A decision-boundary-oriented feature selection method and its application to face recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+bc6a7390135bf127b93b90a21b1fdebbfb56ad30,Bimodal Vein Data Mining via Cross-Selected-Domain Knowledge Transfer,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4,Temporal Convolutional Neural Network for Gesture Recognition,Communication University of China,Communication University of China,"中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.91199955,116.55189141,edu,
+ae425a2654a1064c2eda29b08a492c8d5aab27a2,An incremental face recognition system based on deep learning,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+ae89e464576209b1082da38e0cee7aeabd03d932,Robust face recognition using generalized neural reflectance model,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+ae7604b1840753e9c2e1ab7a97e02f91a9d81860,Automatic facial emotion recognition using weber local descriptor for e-Healthcare system,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,Dynamic facial landmarking selection for emotion recognition using Gaussian processes,University of Sheffield,The University of Sheffield,"University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.38152480,-1.48068143,edu,
+aef58a54d458ab76f62c9b6de61af4f475e0f616,A spiking thalamus model for form and motion processing of images,"HRL Laboratories, Malibu, CA","HRL Laboratories, LLC, Information Systems and Sciences Lab, Malibu, CA 90265 USA","4797, 3011 Malibu Canyon Rd, Malibu, CA 90265, United States",34.04286290,-118.69525780,company,
+aed6af12148b43e4a24ee6e2bc3604ca59bd99a5,Discriminative Deep Metric Learning for Face and Kinship Verification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+aee3427d0814d8a398fd31f4f46941e9e5488d83,Face verification with aging using AdaBoost and local binary patterns,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+ae96fc36c89e5c6c3c433c1163c25db1359e13ea,Linear discriminant analysis with spectral regularization,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+d8c9bad8d07ae4196027dfb8343b9d9aefb130ff,Power difference template for action recognition,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+d8b99eada922bd2ce4e20dc09c61a0e3cc640a62,Image factorization and feature fusion for enhancing robot vision in human face recognition,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba,Towards robotic facial mimicry: System development and evaluation,"TU München, Germany","Image Understanding and Knowledge-Based Systems, TU München, Germany","Arcisstraße 21, 80333 München, Germany",48.14966000,11.56786020,edu,
+d8288322f32ee4501cef5a9b667e5bb79ebd7018,Facing scalability: Naming faces in an online social network,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1,Face detection and recognition for home service robots with end-to-end deep neural networks,"Futurewei Technologies Inc., Santa Clara, CA","Robotics Lab, Futurewei Technologies Inc., Santa Clara, USA","2330 Central Expy, Santa Clara, CA 95050, USA",37.37344400,-121.96487270,company,
+ab7923968660d04434271559c4634790dc68c58e,Facial landmark detection via cascade multi-channel convolutional neural network,Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.24749490,108.97898751,edu,
+abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18,A survey on aggregating methods for action recognition with dense trajectories,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c,Development of facial expression recognition for training video customer service representatives,Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.03332810,135.72491540,edu,
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c,Emotion Recognition from Occluded Facial Expressions Using Weber Local Descriptor,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+ab0981d1da654f37620ca39c6b42de21d7eb58eb,Unsupervised Hierarchical Dynamic Parsing and Encoding for Action Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+abe4c1d6b964c4f5443b0334a44f0b03dd1909f4,Deep learning based image description generation,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0,Comparison of face detection and image classification for detecting front seat passengers in vehicles,Xerox Research Center,Xerox Research Center,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada",43.51291090,-79.66640762,edu,
+abbc6dcbd032ff80e0535850f1bc27c4610b0d45,Facial age estimation via extended curvature Gabor filter,"Electronics and Telecommunications Research Institute, Korea","Electronics and Telecommunications Research Institute (ETRI), Republic of Korea",South Korea,35.90775700,127.76692200,edu,
+ab80582807506c0f840bd1ba03a8b84f8ac72f79,Aphash: Anchor-Based Probability Hashing for Image Retrieval,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+ab6886252aea103b3d974462f589b4886ef2735a,Two-stage Multi-class AdaBoost for Facial Expression Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+e51f1ee5535017e10a5f77100ff892509ec6b221,Rough common vector: A new approach to face recognition,University of Aizu,University of Aizu,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.52367280,139.93807246,edu,
+e5ea7295b89ef679e74919bf957f58d55ad49489,Gaussian Mixture Models based on the Phase Spectra for Illumination Invariant Face Identification on the Yale Database,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e57108607d94aa158eb22ae50540ae6080e48d4b,Head-Pose Invariant Facial Expression Recognition Using Convolutional Neural Networks,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056,Classifiability-Based Discriminatory Projection Pursuit,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+e57ce6244ec696ff9aa42d6af7f09eed176153a8,Instantaneous real-time head pose at a distance,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,Learning Flexible Block based Local Binary Patterns for unconstrained face detection,"Advanced Technologies Application Center, Havana, Cuba","Advanced Technologies Application Center, Siboney Playa, Havana, Cuba","Playa, Havana, Cuba",23.08862140,-82.44819440,edu,
+e546572f8205570de4518bcf8d0345465e51d7a0,Residue boundary histograms for action recognition in the compressed domain,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+e287ff7997297ce1197359ed0fb2a0bd381638c9,Joint-Feature Guided Depth Map Super-Resolution With Face Priors,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+e2faaebd17d10e2919bd69492787e7565546a63f,Exploring hybrid spatio-temporal convolutional networks for human action recognition,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e,Image classification: A hierarchical dictionary learning approach,"Army Research Office, Raliegh, NC","Army Research Office, RTP, Raliegh, NC, United States of America","800 Park Offices Dr, Durham, NC 27703, USA",35.89612180,-78.87039630,mil,
+e2f78d2f75a807b89a13115a206da4661361fa71,Trip Outfits Advisor: Location-Oriented Clothing Recommendation,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+f472cb8380a41c540cfea32ebb4575da241c0288,Cross-dataset learning and person-specific normalisation for automatic Action Unit detection,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+f41d7f891a1fc4569fe2df66e67f277a1adef229,"Combining nonuniform sampling, hybrid super vector, and random forest with discriminative decision trees for action recognition","Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+f4411787688ca40466ee059ec64bf56d746733c1,Human emotion and cognition recognition from body language of the head using soft computing techniques,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+f4465454811acb2021a46d84d94fc88e2dda00a6,An interactive facial expression generation system,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+f41e80f941a45b5880f4c88e5bf721872db3400f,Differential evolution-based subspace clustering via thresholding ridge regression,Jaypee Institute of Information Technology,Jaypee Institute of Information Technology,"Jaypee Institute of Information Technology, Noida, A-10, National Highway 24 Bypass, Asha Pushp Vihar, Kaushambi, Ghaziabad, Uttar Pradesh, 201001, India",28.63004430,77.37208230,edu,
+f4ba07d2ae6c9673502daf50ee751a5e9262848f,Real-time multi-view facial landmark detector learned by the structured output SVM,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6,Sample Weighting: An Inherent Approach for Outlier Suppressing Discriminant Analysis,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+f486624efa750d718a670fba3c7f21b1c84ebaeb,Discriminative Dictionary Learning With Two-Level Low Rank and Group Sparse Decomposition for Image Classification,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+f4b5a8f6462a68e79d643648c780efe588e4b6ca,Enforcing similarity constraints with integer programming for better scene text recognition,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+f39783847499dd56ba39c1f3b567f64dfdfa8527,On categorising gender in surveillance imagery,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+f374ac9307be5f25145b44931f5a53b388a77e49,Improvements in Active Appearance Model based synthetic age progression for adult aging,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+eb38f20eaa1b849cabec99815883390f84daf279,Automatic face detection in video sequences using local normalization and optimal adaptive correlation techniques,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+eb9867f5efc98d3203ce1037f9a8814b0d15d0aa,Periocular recognition based on Gabor and Parzen PNN,"Centre of Development of Advanced Computing, Mumbai, India","Centre of Development of Advanced Computing (CDAC) Mumbai, 400049, India","9, 10th Gulmohar Cross Rd, Gulmohar Road, JVPD Scheme, Juhu, Mumbai, Maharashtra 400049, India",19.11471490,72.83383690,edu,
+ebbceab4e15bf641f74e335b70c6c4490a043961,Evaluating the performance of face-aging algorithms,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+ebc2a3e8a510c625353637e8e8f07bd34410228f,Dual Sparse Constrained Cascade Regression for Robust Face Alignment,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+eb5c1e526fe2d17778c68f60c874c3da0129fabd,A robust facial landmark detection method in multi-views,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+ebce3f5c1801511de9e2e14465482260ba5933cc,More than a Feeling: The MiFace Framework for Defining Facial Communication Mappings,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+eb240521d008d582af37f0497f12c51f4bab16c8,Statistical Richness of Visual Phase Information: Update on Recognizing Persons by Iris Patterns,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+ebb3d5c70bedf2287f9b26ac0031004f8f617b97,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+ebeb0546efeab2be404c41a94f586c9107952bc3,Multi-cue Augmented Face Clustering,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,Enhanced face detection using body part detections for wearable cameras,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+c7745f941532b7d6fa70db09e81eb1167f70f8a7,Rank-one Projections with Adaptive Margins for Face Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+c7cd490e43ee4ff81e8f86f790063695369c2830,Use fast R-CNN and cascade structure for face detection,"Beijing FaceAll Co., Beijing, China","Beijing FaceAll Co. Beijing, China","Beijing, China",39.90419990,116.40739630,edu,
+c07ab025d9e3c885ad5386e6f000543efe091c4b,Preserving Model Privacy for Machine Learning in Distributed Systems,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+c0f9fae059745e50658d9605bd8875fc3a2d0b4b,Vision-based animation of 3D facial avatars,Sejong University,"Intelligent Media Laboratory, Digital Contents Research Institute, Sejong University, Seoul, South Korea","209 Neungdong-ro, Gunja-dong, Gwangjin-gu, Seoul, South Korea",37.55025960,127.07313900,edu,
+c0b02be66a5a1907e8cfb8117de50f80b90a65a8,Manifold learning in sparse selected feature subspaces,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+c0f67e850176bb778b6c048d81c3d7e4d8c41003,Action recognition with gradient boundary convolutional network,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+ee2217f9d22d6a18aaf97f05768035c38305d1fa,Detection of facial parts via deformable part model using part annotation,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+ee56823f2f00c8c773e4ebc725ca57d2f9242947,Modest face recognition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca,Face verification across age progression: A survey of the state-of-the-art,University of KwaZulu-Natal,University of KwaZulu-Natal,"238 Mazisi Kunene Rd, Glenwood, Durban, 4041, South Africa",-29.86742190,30.98072720,edu,
+eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2,Fisher discrimination sparse learning based on graph embedding for image classification,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,Structure-Aware Slow Feature Analysis for Age Estimation,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+c92e36689ef561df726a7ae861d9c166c3934908,Face hallucination by deep traversal network,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a,Deep Convolutional Neural Network with Independent Softmax for Large Scale Face Recognition,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+c907104680ad53bdc673f2648d713e4d26335825,Dataset and Metrics for Adult Age-Progression Evaluation,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+c9832564d5dc601113b4d80e5a05ede6fee9f7dd,Pre-trained classifiers with One Shot Similarity for context aware face verification and identification,"TCS Research, New Delhi, India","TCS Research, New Delhi, India","23, Prithviraj Rd, Tughlak Road Area, New Delhi, Delhi 110003, India",28.60208600,77.22407600,company,
+c90427085909029afd2af01d1967e80b78e01b88,Gaze-Assisted Multi-Stream Deep Neural Network for Action Recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0,A Framework of Joint Graph Embedding and Sparse Regression for Dimensionality Reduction,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,Facial landmark localization based on hierarchical pose regression with cascaded random ferns,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+fcc6fe6007c322641796cb8792718641856a22a7,Automatic facial makeup detection with application in face recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+fc8fb68a7e3b79c37108588671c0e1abf374f501,Semantic Pooling for Complex Event Analysis in Untrimmed Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fc8990088e0f1f017540900bc3f5a4996192ff05,Hierarchical bilinear network for high performance face detection,Chinese Academy of Science,"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China","Beijing, China",39.90419990,116.40739630,edu,
+fcb97ede372c5bddde7a61924ac2fd29788c82ce,Ordinary Preserving Manifold Analysis for Human Age and Head Pose Estimation,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+fc5538e60952f86fff22571c334a403619c742c3,SampleBoost: Improving boosting performance by destabilizing weak learners based on weighted error analysis,University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.20988790,-97.15147488,edu,
+fcceea054cb59f1409dda181198ed4070ed762c9,Multiple face tracking method in the wild using color histogram features,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+fc7f140fcedfe54dd63769268a36ff3f175662b5,FASTEN: An FPGA-Based Secure System for Big Data Processing,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+fd809ee36fa6832dda57a0a2403b4b52c207549d,A fully annotated thermal face database and its application for thermal facial expression recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+fdd19fee07f2404952e629cc7f7ffaac14febe01,Face recognition based on dictionary learning with the locality constraints of atoms,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+fd38163654a0551ed7f4e442851508106e6105d9,Face Recognition Assisted With 3D Face Model,Capital University of Economics and Business,"College of Information, Capital University of Economics and Business, Beijing, China.sanyecunfu@emails.bjut.edu.cn","121 Shoujingmao S Rd, Huaxiang, Fengtai Qu, Beijing Shi, China, 100070",39.84117100,116.31644700,edu,
+f2902f5956d7e2dca536d9131d4334f85f52f783,Facial age estimation using Clustered Multi-task Support Vector Regression Machine,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+f2d605985821597773bc6b956036bdbc5d307386,Sharable and Individual Multi-View Metric Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f2896dd2701fbb3564492a12c64f11a5ad456a67,Cross-database age estimation based on transfer learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+f27e5a13c1c424504b63a9084c50f491c1b17978,Robust Top-k Multiclass SVM for Visual Category Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+f25aa838fb44087668206bf3d556d31ffd75235d,Vinereactor: Crowdsourced Spontaneous Facial Expression Data,Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.03677740,-75.34202332,edu,
+f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf,High frequency compensated face hallucination,"Kao Corporation, Tokyo, Japan","Beauty Cosmetic Research Lab, Kao Corporation, Tokyo, Japan","Tokyo, Japan",35.68948750,139.69170640,edu,
+f2eab39cf68de880ee7264b454044a55098e8163,Discriminative K-SVD for dictionary learning in face recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+f2cc459ada3abd9d8aa82e92710676973aeff275,Object class recognition using range of multiple computer vision algorithms,South East European University,South East European University,"Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија",41.98676415,20.96254516,edu,
+f27fd2a1bc229c773238f1912db94991b8bf389a,How do you develop a face detector for the unconstrained environment?,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,An accurate eye pupil localization approach based on adaptive gradient boosting decision tree,"State Grid Shanghai Electric Power Company, Shanghai, China","Electric Power Research Institute, State Grid Shanghai Electric Power Company Shanghai, 200093, China","Shanghai, China",31.23039040,121.47370210,edu,
+f201baf618574108bcee50e9a8b65f5174d832ee,Viewpoint-Consistent 3D Face Alignment,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+f5c57979ec3d8baa6f934242965350865c0121bd,An Across-Target Study on Visual Attentions in Facial Expression Recognition,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+f5a95f857496db376d69f7ac844d1f56e3577b75,"The LDOS-PerAff-1 corpus of facial-expression video clips with affective, personality and user-interaction metadata",University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+e3b9863e583171ac9ae7b485f88e503852c747b6,Deep Relative Attributes,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+cf4c1099bef189838877c8785812bc9baa5441ed,Semantic-free attributes for image classification,Paristech,"LTCI, CNRS, T&#x00E9;l&#x00E9;com ParisTech, Universit&#x00E9; Paris-Saclay, 75013, France","3 Rue Michel Ange, 75016 Paris, France",48.84760370,2.26399340,edu,
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,Application for video analysis based on machine learning and computer vision algorithms,Yaroslavl State University,Yaroslavl State University,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ",57.62521030,39.88456560,edu,
+cf736f596bf881ca97ec4b29776baaa493b9d50e,Low Dimensional Deep Features for facial landmark alignment,"Samsung R&D Institute, Bangalore, India","Samsung R&D Institute, Bangalore, India","#2870, Phoenix Building, 4th Floor Bagmane Constellation Business Park, Outer Ring Rd, Doddanekundi, Marathahalli, Bengaluru, Karnataka 560037, India",12.98035370,77.69751010,company,
+cf7a4442a6aad0e08d4aade8ec379c44f84bca8a,Learning parts-based representation for face transition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+cf784156547c3be146706e2763c1a52d939d1722,Breaking video into pieces for action recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,Subspace Learning for Facial Age Estimation Via Pairwise Age Ranking,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+cf54e9776d799aa183d7466094525251d66389a4,Key point localization for 3d model generation from facial illustrations using SURF and color features,Meiji University,Meiji University,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本",35.69750290,139.76139175,edu,
+cf6851c24f489dabff0238e01554edea6aa0fc7c,An analytical method for face detection based on image patterns of EEG signals in the time-frequency domain,Tokushima University,"Institute of Technology and Science, Tokushima University, 2-1 Minamijyousanjima, 770-8506, Japan","2 Chome-24 Shinkuracho, Tokushima, Tokushima Prefecture 770-8501, Japan",34.07010740,134.55979330,edu,
+cae41c3d5508f57421faf672ee1bea0da4be66e0,Palmprint recognition via discriminative index learning,University of Lugano,"Institute of Computational Science, University of Lugano, Switzerland","Via Giuseppe Buffi 13, 6900 Lugano, Switzerland",46.01073700,8.95810900,edu,
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,Entropy-based active sparse subspace clustering,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+ca458f189c1167e42d3a5aaf81efc92a4c008976,Double Shrinking Sparse Dimension Reduction,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+ca8f23d9b9a40016eaf0467a3df46720ac718e1d,Face detection using Local Hybrid Patterns,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+ca44a838da4187617dca9f6249d8c4b604661ec7,Multi-pose face hallucination via neighbor embedding for facial components,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+cacce7f4ce74e3269f5555aa6fd83e48baaf9c96,Circle & Search: Attribute-Aware Shoe Retrieval,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ca37933b6297cdca211aa7250cbe6b59f8be40e5,"Multi-task learning for smile detection, emotion recognition and gender classification",Hanoi University of Science and Technology,Hanoi University of Science and Technology,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam",21.00395200,105.84360183,edu,
+e41246837c25d629ca0fad74643fb9eb8bf38009,Multi-color ULBP with wavelet transform in invariant pose face recognition,Universiti Teknologi Malaysia,"Computer Vision, Video and Image Processing (CvviP) Research Lab, Faculty of Electrical Engineering, Universiti Teknologi Malaysia, 81310 UTM Skudai, Johor, Malaysia","Sultan Ibrahim Chancellery Building, Jalan Iman, 81310 Skudai, Johor, Malaysia",1.56327890,103.63821900,edu,
+e4d53e7f4c2052940841abc08f9574655f3f7fb4,TaiChi: A Fine-Grained Action Recognition Dataset,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5,Learning Semantic-Aligned Action Representation,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1,Cascaded Elastically Progressive Model for Accurate Face Alignment,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,Robust Distance Metric Learning via Bayesian Inference,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e42f3c27391821f9873539fc3da125b83bffd5a2,An efficient method for face recognition under illumination variations,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,Cross-pose landmark localization using multi-dropout framework,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+e4c3587392d477b7594086c6f28a00a826abf004,Face recognition by facial attribute assisted network,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2,Gradient feature matching for expression invariant face recognition using single reference image,University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.73844400,-84.17918747,edu,
+fe5d6c65e51386f4d36f7434fe6fcd9494fe9361,Discriminant Manifold Learning via Sparse Coding for Robust Feature Extraction,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c847de9faa1f1a06d5647949a23f523f84aba7f3,Moving face spoofing detection via 3D projective invariants,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+c83d142a47babe84e8c4addafa9e2bb9e9b757a5,Facial expression recognition with robust covariance estimation and Support Vector Machines,University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+c83e26622b275fdf878135e71c23325a31d0e5fc,Denser Trajectories of Anchor Points for Action Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+c8fb8872203ee694d95da47a1f9929ac27186d87,View and texture-independent facial expression recognition in videos using dynamic programming,"Compiegne University of Technology, France","HEUDIASYC Mixed Res. Unit, Compiegne Univ. of Technol., France","57 Avenue de Landshut, 60200 Compiègne, France",49.40075300,2.79528080,edu,
+c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e,Confidence fusion based emotion recognition of multiple persons for human-robot interaction,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+c81b27932069e6c7016bfcaa5e861b99ac617934,Leveraging geometric correlation for input-adaptive facial landmark regression,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,Facial Expression Recognition Using Weighted Mixture Deep Neural Network Based on Double-Channel Facial Images,"Changzhou Textile Garment Institute, Changzhou, China","College of Mechanical and Electrical, Changzhou Textile Garment Institute, Changzhou, China","China, Jiangsu, Changzhou, Wujin, 武宜南路鸣凰镇北庙桥加油站北50米",31.68423300,119.93616400,edu,
+fb1b6138aeb081adf853316c0d83ef4c5626a7fa,SCNN: Sequential convolutional neural network for human action recognition in videos,Chinese Academy of Science,"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China","Beijing, China",39.90419990,116.40739630,edu,
+fbc9ba70e36768efff130c7d970ce52810b044ff,Face-graph matching for classifying groups of people,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ed9de242a23ad546902e1d5ec022dbb029cc2282,Local binary pattern orientation based face recognition,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+edfce091688bc88389dd4877950bd58e00ff1253,A talking profile to distinguish identical twins,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+edbddf8c176d6e914f0babe64ad56c051597d415,Predicting Image Memorability Through Adaptive Transfer Learning From External Sources,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+ed273b5434013dcdb9029c1a9f1718da494a23a2,Off-Feature Information Incorporated Metric Learning for Face Recognition,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee,A survey on compressed domain video analysis techniques,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+ed70d1a9435c0b32c0c75c1a062f4f07556f7016,Correlated warped Gaussian processes for gender-specific age estimation,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+edd6ed94207ab614c71ac0591d304a708d708e7b,Reconstructive discriminant analysis: A feature extraction method induced from linear regression classification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4,LBP edge-mapped descriptor using MGM interest points for face recognition,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+c12260540ec14910f5ec6e38d95bdb606826b32e,Privileged Information-Based Conditional Structured Output Regression Forest for Facial Point Detection,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+c1a70d63d1667abfb1f6267f3564110d55c79c0d,Shadow compensation and illumination normalization of face image,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+c138c76809b8da9e5822fb0ae38457e5d75287e0,Random Forest Construction With Robust Semisupervised Node Splitting,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c1581b5175994e33549b8e6d07b4ea0baf7fe517,Online incremental clustering with distance metric learning for high dimensional data,"Kyoto University, Japan",The Kyoto University of JAPAN,"Yoshidahonmachi, Sakyo Ward, Kyoto, Kyoto Prefecture 606-8501, Japan",35.02624440,135.78082180,edu,
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05,A Wearable IoT with Complex Artificial Perception Embedding for Alzheimer Patients,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478,Semi-Supervised Image-to-Video Adaptation for Video Action Recognition,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2,In the Pursuit of Effective Affective Computing: The Relationship Between Features and Registration,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c1fb854d9a04b842ff38bd844b50115e33113539,A Video-Based Facial Motion Tracking and Expression Recognition System,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+c17c7b201cfd0bcd75441afeaa734544c6ca3416,Layerwise Class-Aware Convolutional Neural Network,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+c175ebe550761b18bac24d394d85bdfaf3b7718c,Facial expression recognition using Fisher weight maps,"Tokyo University, Japan","Graduate Sch. of Inf. Sci. & Technol., Tokyo Univ., Japan","Tokyo, Japan",35.68948750,139.69170640,edu,
+c61eaf172820fcafaabf39005bd4536f0c45f995,Spatio-Temporal Scale Selection in Video Data,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+c6382de52636705be5898017f2f8ed7c70d7ae96,Unconstrained face detection: State of the art baseline and challenges,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+c631a31be2c793d398175ceef7daff1848bb6408,Emotional condition in the Health Smart Homes environment: emotion recognition using ensemble of classifiers,University of São Paulo,"Institute of Mathematical and Computer Sciences, University of São Paulo, São Carlos, Brazil","Av. Trab. São Carlense, 400 - Centro, São Carlos - SP, 13566-590, Brazil",-22.00703470,-47.89493230,edu,
+c60601bdb5465d8270fdf444e5d8aeccab744e29,Rotation invariant Facial Expression Recognition in image sequences,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092,Face recognition under varying lighting conditions using self quotient image,Chinese Academy of Science,"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China","Beijing, China",39.90419990,116.40739630,edu,
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9,Improving the recognition of faces occluded by facial accessories,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+ec5c63609cf56496715b0eba0e906de3231ad6d1,Private and Scalable Personal Data Analytics Using Hybrid Edge-to-Cloud Deep Learning,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,"Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning",University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+4e1d89149fc4aa057a8becce2d730ec6afd60efa,IPCM separability ratio for supervised feature selection,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+4ea63435d7b58d41a5cbcdd34812201f302ca061,Robust blurred face recognition using sample-wise kernel estimation and random compressed multi-scale local binary pattern histograms,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+4e6e5cb93e7e564bc426b5b27888d55101504c50,Analyzing user behavior in online advertising with facial expressions,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+4eeccbbb98de4f2e992600482fd6b881ace014bb,Multi-pose Facial Expression Recognition Using Transformed Dirichlet Process,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+4e581831d24fd90b0b5228b9136e76fa3e8f8279,LGE-KSVD: Robust Sparse Representation Classification,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+4eb8030b31ff86bdcb063403eef24e53b9ad4329,LSTM for dynamic emotion and group emotion recognition in the wild,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+4e37cd250130c6fd60e066f0c8efb3cbb778c421,"Discriminant Analysis via Joint Euler Transform and $\ell_{2,1}$ -Norm",Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+20d6a4aaf5abf2925fdce2780e38ab1771209f76,Double Supervision Face Recognition Based on Deep Learning,"Hexi University, China","Hexi University, Center for Information Technology, Zhangye, China","China, Gansu, Zhangye, Ganzhou, N Ring Rd, 环城北路",38.94385400,100.44471980,edu,
+204f1cf56794bb23f9516b5f225a6ae00d3d30b8,An AdaBoost-Based Face Detection System Using Parallel Configurable Architecture With Optimized Computation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+20b405d658b7bb88d176653758384e2e3e367039,Face recognition with manifold-based kernel discriminant analysis,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+20eabf10e9591443de95b726d90cda8efa7e53bb,Discriminative Histogram Intersection Metric Learning and Its Applications,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+205f035ec90a7fa50fd04fdca390ce83c0eea958,Emotion Recognition Using Multiple Kernel Learning toward E-learning Applications,Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.06360710,147.35522340,edu,
+189e5a2fa51ed471c0e7227d82dffb52736070d8,Cross-age face recognition using reference coding with kernel direct discriminant analysis,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+18855be5e7a60269c0652e9567484ce5b9617caa,Local Centre of Mass Face for face recognition under varying illumination,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa,Local Large-Margin Multi-Metric Learning for Face and Kinship Verification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+18bfda16116e76c2b21eb2b54494506cbb25e243,Face Recognition in Global Harmonic Subspace,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+184dba921b932143d196c833310dee6884fa4a0a,Distributed face recognition system for indoor environments,"Istanbul Technical University, Turkey","Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, &#x0130;stanbul Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye","Maslak, 34467 Sarıyer/İstanbul, Turkey",41.10559410,29.02534010,edu,
+18dd3867d68187519097c84b7be1da71771d01a3,Efficient evaluation of human-powered joins with crowdsourced join pre-filters,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b,Random Subspace Supervised Descent Method for Regression Problems in Computer Vision,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5,Robust Multiview Data Analysis Through Collective Low-Rank Subspace,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+188abc5bad3a3663d042ce98c7a7327e5a1ae298,Generalized Projection-Based M-Estimator,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+180bd019eab85bbf01d9cddc837242e111825750,A Content-Adaptive Joint Image Compression and Encryption Scheme,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+270acff7916589a6cc9ca915b0012ffcb75d4899,On the Applications of Robust PCA in Image and Video Processing,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+27812db1d2f68611cc284d65d11818082e572008,Computer vision for the blind: A dataset for experiments on face detection and recognition,University of Trieste,"DIA, University of Trieste, Italy","Via Alfonso Valerio, 6/1, Edificio C8, 34127 Trieste TS, Italy",45.66025100,13.79458400,edu,
+27e5b7ae3506a0f7472ee9089cd2472442e71c14,Low-resolution degradation face recognition over long distance based on CCA,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+27b451abfe321a696c852215bb7efb4c2e50c89f,Panoramic Face Recognition,Chang Gung University,Chang Gung University,"長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣",25.03043800,121.39009513,edu,
+279459cbbc5c6db4802e9c737cc72a612d76f7fc,DMMLN: A deep multi-task and metric learning based network for video classification,"National Digital Switching System Engineering and Technological Research Center, Zhengzhou, China","National Digital Switching System Engineering and Technological Research Center, Zhengzhou, China","Zhengzhou, Henan, China",34.74661100,113.62532800,edu,
+272e487dfa32f241b622ac625f42eae783b7d9aa,Face recognition via semi-supervised discriminant local analysis,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,Example image-based feature extraction for face recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661,Automatic replication of teleoperator head movements and facial expressions on a humanoid robot,"Centre for Autism Research, Philadelphia, US","Centre for Autism Research, Philadelphia, US","5, 2716 South St, Philadelphia, PA 19104, USA",39.94558380,-75.18632190,edu,
+4b9b30066a05bdeb0e05025402668499ebf99a6b,Real-time face detection using Gentle AdaBoost algorithm and nesting cascade structure,Huaqiao University,Huaqiao University,"华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.60047120,118.08165740,edu,
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,Understanding the discrimination power of facial regions in forensic casework,"Dirección General de la Guardia Civil, Madrid, Spain","Dirección General de la Guardia Civil - DGGC Madrid, Spain","Calle de Guzmán el Bueno, 110, 28003 Madrid, Spain",40.44455650,-3.71227850,edu,
+4b9ec224949c79a980a5a66664d0ac6233c3d575,Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+4ba2f445fcbbad464f107b036c57aa807ac5c0c2,Sparse Discriminative Multimanifold Grassmannian Analysis for Face Recognition With Image Sets,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+4bf85ef995c684b841d0a5a002d175fadd922ff0,Ensemble of Deep Models for Event Recognition,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+4b9c47856f8314ecbe4d0efc65278c2ededb2738,Spatiotemporal Local Monogenic Binary Patterns for Facial Expression Recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+4b936847f39094d6cb0bde68cea654d948c4735d,Face alignment under occlusion based on local and global feature regression,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+11bb2abe0ca614c15701961428eb2f260e3e2eef,Joint Normalization and Dimensionality Reduction on Grassmannian: A Generalized Perspective,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+11ba01ce7d606bab5c2d7e998c6d94325521b8a0,Regression based landmark estimation and multi-feature fusion for visual speech recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+110919f803740912e02bb7e1424373d325f558a9,Statistical Inference of Gaussian-Laplace Distribution for Person Verification,China University of Geosciences,"China University of Geosciences, Wuhan, China","388 Lumo Rd, Hongshan Qu, Wuhan Shi, Hubei Sheng, China, 430073",30.52715100,114.40076200,edu,
+11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de,Object class detection: A survey,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+113cd9e5a4081ce5a0585107951a0d36456ce7a8,Real-time Recognition of Facial Expression using Active Appearance Model with Second Order Minimization and Neural Network,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+11f8d0a54e55c5e6537eef431cd548fa292ef90b,Deep learning algorithms for discriminant autoencoding,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+110359824a0e3b6480102b108372793265a24a86,Landmark perturbation-based data augmentation for unconstrained face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1125760c14ea6182b85a09bf3f5bad1bdad43ef5,A Probabilistic Approach to Linear Subspace Fitting for Computer Vision Problems,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+11a6593e6e35f95ebeb5233897d1d8bcad6f9c87,A Brain-Inspired Method of Facial Expression Generation Using Chaotic Feature Extracting Bidirectional Associative Memory,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+7d61b70d922d20c52a4e629b09465076af71ddfd,Nonnegative class-specific entropy component analysis with adaptive step search criterion,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+7d45f1878d8048f6b3de5b3ec912c49742d5e968,Automatic Facial Expression Recognition System Using Deep Network-Based Data Fusion,Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+7d7b036ed01765c9473d695f029142128d442aaa,Real-Time Action Recognition With Deeply Transferred Motion Vector CNNs,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+7dc498d45f9fcb97acee552c6f587b65d5122c35,Face detection and landmark localization using Bilayer Tree Structured Model,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+7de8a8b437ec7a18e395be9bf7c8f2d502025cc6,Robust face recognition under illumination variation and occlusion (in english),"Beyazıt University, Ankara, Turkey","Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Ankara Y&#x0131;ld&#x0131;r&#x0131;m Beyaz&#x0131;t &#x00DC;niversitesi, Ankara, T&#x00FC;rkiye","Ankara, Turkey",39.93336350,32.85974190,edu,
+298c2be98370de8af538c06c957ce35d00e93af8,Prototype-based class-specific nonlinear subspace learning for large-scale face verification,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+2961e14c327341d22d5f266a6872aa174add8ac4,Web Image Re-Ranking UsingQuery-Specific Semantic Signatures,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+29f298dd5f806c99951cb434834bc8dcc765df18,Computationally efficient template-based face recognition,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+293d69d042fe9bc4fea256c61915978ddaf7cc92,Face Recognition by Coarse-to-Fine Landmark Regression with Application to ATM Surveillance,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2983cf95743be82671a71528004036bd19172712,Asymmetric Binary Coding for Image Search,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+29a5d38390857e234c111f8bb787724c08f39110,Statistical appearance models for automatic pose invariant face recognition,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6,"Multimodal coordination of facial action, head rotation, and eye motion during spontaneous smiles",Pittsburgh University,"Pittsburgh Univ., PA, USA","4200 Fifth Ave, Pittsburgh, PA 15260, USA",40.44435330,-79.96083500,edu,
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf,Investigating 3-D Model and Part Information for Improving Content-Based Vehicle Retrieval,"Industrial Technology Research Institute, Hsinchu, Taiwan","Industrial Technology Research Institute, Hsinchu, Taiwan","工研院, 195, 中興路四段, 頭重里, 竹東鎮, 新竹縣, 31040, 臺灣",24.77417560,121.04509279,edu,
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,Markov Chain Monte Carlo for Automated Face Image Analysis,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0,Robust Statistical Frontalization of Human and Animal Faces,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+7c457c9a658327af6f6490729b4cab1239c22005,An Emotion Recognition System for Mobile Applications,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+7cfbf90368553333b47731729e0e358479c25340,"Towards a Unified Framework for Pose, Expression, and Occlusion Tolerant Automatic Facial Alignment",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7c13fa0c742123a6a927771ce67da270492b588c,Deep Bidirectional Cross-Triplet Embedding for Online Clothing Shopping,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+7c66e7f357553fd4b362d00ff377bffb9197410e,Gaussian Process Domain Experts for Modeling of Facial Affect,MIT Media Lab,"MIT Media Laboratory, Cambridge, MA, USA","75 Amherst St, Cambridge, MA 02139, USA",42.36035700,-71.08726400,edu,
+16b0c171fb094f677fcdf78bbb9aaef0d5404942,Category-Specific Object Image Denoising,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+16eaa26a84468b27e559215db01c53286808ec2a,MoFAP: A Multi-level Representation for Action Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+16c1b592d85d13f1ba4eff0afb4441bb78650785,Multilinear Spatial Discriminant Analysis for Dimensionality Reduction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+16fadde3e68bba301f9829b3f99157191106bd0f,Utility data annotation with Amazon Mechanical Turk,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+16fc82d44188eb49a151bd5836a29911b3bfabcb,Sparse Softmax Vector Coding Based Deep Cascade Model,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+42441f1fee81c8fd42a74504df21b3226a648739,Automatic gender recognition based on pixel-pattern-based texture feature,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+42fff5b37006009c2dbfab63c0375c7c7d7d8ee3,Regularized directional feature learning for face recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,Joint salient object detection and existence prediction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+425ea5656c7cf57f14781bafed51182b2e6da65f,Structured Kernel Dictionary Learning With Correlation Constraint for Object Recognition,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+89e31777f221ddb3bc9940d7f520c8114c4148a2,Integrating Spectral Kernel Learning and Constraints in Semi-Supervised Classification,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+89497854eada7e32f06aa8f3c0ceedc0e91ecfef,Deep Context-Sensitive Facial Landmark Detection With Tree-Structured Modeling,Alibaba,"AI Laboratories, Alibaba Group, Hangzhou, China","Hangzhou, Zhejiang, China",30.27408400,120.15507000,company,
+4551194408383b12db19a22cca5db0f185cced5c,Nonlinear Topological Component Analysis: Application to Age-Invariant Face Recognition,"Center for Development of Advanced Technologies, Algeria","Division of Design of Intelligent Machines, Center for Development of Advanced Technologies, Algiers, Algeria","haouch oukil، Cité 20 aout 1956 Baba Hassen 5 juillet 1962، Alger 16303, Algeria",36.68948700,2.98187700,edu,"Center for Development of Advanced Technologies, Algiers, Algeria"
+45e043dffc57a9070f483ac4aec2c5cd2cec22cb,SuperpowerGlass: A Wearable Aid for the At-Home Therapy of Children with Autism,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+452ea180cf4d08d7500fc4bc046fd7141fd3d112,A robust approach to facial ethnicity classification on large scale face databases,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+45edb29fb7eed5a52040300e1fd3cd53f1bdb429,Facial makeup detection via selected gradient orientation of entropy information,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+4512b87d68458d9ba0956c0f74b60371b6c69df4,SuperPatchMatch: An Algorithm for Robust Correspondences Using Superpixel Patches,University of Bordeaux,"UMR 5800, LaBRI, Talence, France","Domaine universitaire, 351, cours de la Libération, 33405 Talence, France",44.80837500,-0.59670500,edu,
+459eb3cfd9b52a0d416571e4bc4e75f979f4b901,Vision development of humanoid head robot SHFR-III,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+453bf941f77234cb5abfda4e015b2b337cea4f17,Robust regression based face recognition with fast outlier removal,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,"Facial landmark localization: Past, present and future",Moulay Ismail University,Moulay Ismail University,"Marjane 2, BP: 298، Meknes 50050, Morocco",33.85611100,-5.57439100,edu,Moulay Ismail University
+1fe1a78c941e03abe942498249c041b2703fd3d2,Face alignment based on improved shape searching,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+1f5725a4a2eb6cdaefccbc20dccadf893936df12,On the relevance of age in handwritten biometric recognition,"EUP Mataró, Spain","EUP Mataró, Spain","Mataró, Barcelona, Spain",41.53811240,2.44474060,edu,
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+73ba33e933e834b815f62a50aa1a0e15c6547e83,Invariant feature extraction for facial recognition: A survey of the state-of-the-art,University of KwaZulu-Natal,University of KwaZulu-Natal,"238 Mazisi Kunene Rd, Glenwood, Durban, 4041, South Africa",-29.86742190,30.98072720,edu,
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499,Adaptive Cascade Deep Convolutional Neural Networks for face alignment,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,Report on the BTAS 2016 Video Person Recognition Evaluation,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+87610276ccbc12d0912b23fd493019f06256f94e,Unsupervised feature selection for proportional data clustering via expectation propagation,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+872ff48a3acfbf96376fd048348372f5137615e4,Parallelized deformable part models with effective hypothesis pruning,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+876583a059154def7a4bc503b21542f80859affd,On the analysis of factors influencing the performance of facial age progression,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+8027a9093f9007200e8e69e05616778a910f4a5f,Generating face images under multiple illuminations based on a single front-lighted sample without 3D models,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,Learning Kernel Extended Dictionary for Face Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+80aa455068018c63237c902001b58844fcc6f160,Sparse eigentracker augmented by associative mapping to 3D shape,Okayama University,Okayama University,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.68933930,133.92222720,edu,
+80ed678ef28ccc1b942e197e0393229cd99d55c8,Face recognition based on Kinect,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+809e5884cf26b71dc7abc56ac0bad40fb29c671c,On SIFTs and their scales,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+74cec83ee694b5d0e07d5d0bacd0aa48a80776aa,Improved discriminant nearest feature space analysis for variable lighting face recognition,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+745e74ae84e1b2b8690d07db523531642023d6c4,Face recognition via non-negative sparse low-rank representation classification,Yangzhou University,"College of Information Engineering, Yangzhou University, Yangzhou, China","196 Huayang W Rd, Hanjiang Qu, Yangzhou Shi, Jiangsu Sheng, China",32.33934870,119.39704100,edu,
+1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a,UGC-JU face database and its benchmarking using linear regression classifier,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+1aa61dd85d3a5a2fe819cba21192ec4471c08628,Deep learning based forensic face verification in videos,"Institute of Forensic Science, Ministry of Justice, Shanghai, China","Institute of Forensic Science, Ministry of Justice, Shanghai 200063, China","1347 Guangfu W Rd, Putuo Qu, Shanghai Shi, China, 200063",31.22665700,121.41905100,gov,
+1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db,Rank-constrained PCA for intrinsic images decomposition,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+1a8d40bcfb087591cc221086440d9891749d47b8,Directional ternary pattern ( DTP) for facial expression recognition,"Islamic University of Technology, Bangladesh","Islamic University of Technology, Bangladesh","Board Bazar, Dhaka- Mymensingh Highway, Gazipur 1704, Bangladesh",23.94726100,90.37993250,edu,
+1a53ca294bbe5923c46a339955e8207907e9c8c6,What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+1afef6b389bd727c566cd6fbcd99adefe4c0cf32,Towards resolution invariant face recognition in uncontrolled scenarios,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+1aeef2ab062c27e0dbba481047e818d4c471ca57,Analyzing impact of image scaling algorithms on viola-jones face detection framework,"Central Electronics Research Institute, Pilani, India","Advanced Electronics System, Academy of Scientific and Industrial Research, CSIR-Central Electronics Research Institute, Pilani, India","Central Electronics Engineering Research Institute, Pilani, Rajasthan 333031, India",28.36561930,75.58349530,edu,
+1a81c722727299e45af289d905d7dcf157174248,BabyTalk: Understanding and Generating Simple Image Descriptions,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+1a40c2a2d17c52c8b9d20648647d0886e30a60fa,Hybrid hypergraph construction for facial expression recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1ad780e02edf155c09ea84251289a054b671b98a,Facial expression recognition via Gabor wavelet and structured sparse representation,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+287de191c49a3caa38ad7594093045dfba1eb420,Object specific deep feature and its application to face detection,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+281b91c35a1af97b1405bc724a04e2be6e24971b,A novel gradient synthesis-based illumination formalization method for face recognition under varying lighting conditions,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+28d55935cc36df297fe21b98b4e2b07b5720612e,Efficient subspace clustering of large-scale data streams with misses,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+28a45770faf256f294ce3bbd5de25c6d5700976e,Accurate mouth state estimation via convolutional neural networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+283d381c5c2ba243013b1c4f5e3b29eb906fa823,Shape parameter optimization for Adaboosted active shape model,Fuji Photo Film,"Imaging Software Technol. Center, Fuji Photo Film Co. Ltd., Japan","Japan, 〒010-0001 Akita Prefecture, Akita, Nakadori, 2 Chome−2−32 山二ビル 6F",39.71635900,140.12378700,company,
+170aa0f16cd655fdd4d087f5e9c99518949a1b5c,Facial Shape-from-shading and Recognition Using Principal Geodesic Analysis and Robust Statistics,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+179545c1fc645cb2ad9b31a30f48352d541876ff,Kernel-based Subspace Analysis for Face Recognition,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+179564f157a96787b1b3380a9f79701e3394013d,MACH: my automated conversation coach,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+1773d65c1dc566fd6128db65e907ac91b4583bed,Learning Temporal Dynamics for Video Super-Resolution: A Deep Learning Approach,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+17d03da4db3bb89537d644b682b2a091d563af4a,Recognition of Partially Occluded and Rotated Images With a Network of Spiking Neurons,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13,Face Recognition Using Gabor-Based Feature Extraction and Feature Space Transformation Fusion Method for Single Image per Person Problem,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+7b618a699b79c1272f6c83101917ad021a58d96b,BAUM-2: a multilingual audio-visual affective face database,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+8f71c97206a03c366ddefaa6812f865ac6df87e9,A face tracking framework based on convolutional neural networks and Kalman filter,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+8f3675e979629ca9cee9436d37763f546edb8d40,Video action classification by deep learning,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+8fee7b38358815e443f8316fa18768d76dba12e3,Robust nonnegative matrix factorization using L21-norm,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96,Personalized clothing recommendation combining user social circle and fashion style consistency,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73,Context constrained facial landmark localization based on discontinuous Haar-like feature,Panasonic Singapore,"Panasonic Singapore Laboratories Pte Ltd (PSL), Tai Seng Industrial Estate 534415, Singapore",Singapore,1.33926090,103.89160770,company,"Tai Seng Industrial Estate 534415, Singapore"
+8fe5feeaa72eddc62e7e65665c98e5cb0acffa87,Hierarchical committee of deep convolutional neural networks for robust facial expression recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+8fb2ec3bbd862f680be05ef348b595e142463524,Multi-View Active Shape Model with Robust Parameter Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8f73af52d87c94d0bd43242462fd68d974eda331,Improving faces/non-faces discrimination in video sequences by using a local spatio-temporal representation,"Advanced Technologies Application Center, Havana, Cuba","Advanced Technologies Application Center, Siboney Playa, Havana, Cuba","Playa, Havana, Cuba",23.08862140,-82.44819440,edu,
+8f99f7ccb85af6d4b9e015a9b215c529126e7844,Face image-based age and gender estimation with consideration of ethnic difference,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+8f051647bd8d23482c6c3866c0ce1959b8bd40f6,Semi-supervised classification by discriminative regularization,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+8fc36452a49cb0fd43d986da56f84b375a05b4c1,Crowdsourcing facial expressions using popular gameplay,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+8aff9c8a0e17be91f55328e5be5e94aea5227a35,Sparse Tensor Discriminant Color Space for Face Verification,Raytheon BBN Technologies,"Raytheon BBN Technologies, Cambridge, MA, USA","10 Moulton St, Cambridge, MA 02138, USA",42.38980550,-71.14759860,company,
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34,Layer-wise supervised neural network for face alignment with multi-task regularization,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,Expressions Recognition of North-East Indian (NEI) Faces,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+8a2210bedeb1468f223c08eea4ad15a48d3bc894,Do you see what I see?: designing a sensory substitution device to access non-verbal modes of communication,University of Memphis,University of Memphis,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA",35.11893870,-89.93721960,edu,
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,Place-centric Visual Urban Perception with Deep Multi-instance Regression,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+8a63a2b10068b6a917e249fdc73173f5fd918db0,"A Review of Automated Pain Assessment in Infants: Features, Classification Tasks, and Databases",University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+8a4893d825db22f398b81d6a82ad2560832cd890,Evaluating AAM fitting methods for facial expression recognition,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+8a2bedaa38abf173823944f0de2c84f5b2549609,Robust Image Regression Based on the Extended Matrix Variate Power Exponential Distribution of Dependent Noise,Anhui Polytechnic University,Anhui Polytechnic University,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.34185955,118.40739712,edu,
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832,"Simple, Efficient and Effective Encodings of Local Deep Features for Video Action Recognition",University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+7ebfa8f1c92ac213ff35fa27287dee94ae5735a1,A Novel Transient Wrinkle Detection Algorithm and Its Application for Expression Synthesis,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+7e456e94f3080c761f858264428ee4c91cd187b2,Recognition of facial expressions using locally weighted and adjusted order Pseudo Zernike Moments,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+7e27d946d23229220bcb6672aacab88e09516d39,DLSTM approach to video modeling with hashing for large-scale video retrieval,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+10cb39e93fac194220237f15dae084136fdc6740,Informativeness of Degraded Data in Training a Classification System,"Space and Naval Warfare Systems Center Pacific, San Diego, CA","Space and Naval Warfare Systems Center Pacific, San Diego, CA, 92152, United States","53560 Hull St, San Diego, CA 92152, USA",32.70865800,-117.24724910,mil,
+10e2f2ad1dedec6066e063cb2098b089b35905a8,Crowd Scene Understanding from Video: A Survey,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+10df1d4b278da991848fb71b572f687bd189c10e,Key frame extraction for salient activity recognition,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+104ee18b513b52386f871e959c1f9e5072604e93,A new dataset for hand gesture estimation,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+10f4bbf87a44bab3d79e330e486c897e95f5f33f,An Experimental Evaluation of Three Classifiers for Use in Self-Updating Face Recognition Systems,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+10bfa4cecd64b9584c901075d6b50f4fad898d0b,Optimized 3D Lighting Environment Estimation for Image Forgery Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9,Robust Adaptive Embedded Label Propagation With Weight Learning for Inductive Classification,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3,Facial expression recognition via deep learning,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+193474d008cab9fa1c1fa81ce094d415f00b075c,A Review of Human Action Recognition in Video,Communication University of China,Communication University of China,"中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.91199955,116.55189141,edu,
+1966bddc083886a9b547e1817fe6abc352a00ec3,Gender Classification Using Pyramid Segmentation for Unconstrained Back-facing Video Sequences,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+19705579b8e7d955092ef54a22f95f557a455338,Fiducial facial point extraction with cross ratio,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+1979e270093b343d62e97816eeed956062e155a0,Multi-lane architecture for eigenface based real-time face recognition,Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.88568200,-76.30768579,edu,
+194f5d3c240d06575403c9a422a0ebc86d43b91e,Real-time face detection and phone-to-face distance measuring for speech recognition for multi-modal interface in mobile device,Chosun University,Chosun University,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국",35.14410310,126.92578580,edu,
+1902288256839539aeb5feb3e1699b963a15aa1a,Exploring multimodal video representation for action recognition,University of Potsdam,"Hasso Plattner Institute, University of Potsdam, Prof.-Dr.-Helmert-Str. 2-3, 14482, Germany","Prof.-Dr.-Helmert-Straße 2-3, 14482 Potsdam, Germany",52.39399650,13.13336570,edu,
+191b70fdd6678ef9a00fd63710c70b022d075362,Synthesizing pose and lighting variation from object motion,Toshiba,"Corp. Res. & Dev. Center, Toshiba Corp., Kawasaki, Japan","Japan, 〒212-8582 Kanagawa Prefecture, Kawasaki, Saiwai Ward, 小向東芝町1",35.54931130,139.69201440,company,
+4c141534210df53e58352f30bab558a077fec3c6,Bridging Music and Image via Cross-Modal Ranking Analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4c6886c489e93ccab5a1124555a6f3e5b0104464,Metric learning based on attribute hypergraph,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+4ca9753ab023accbfa75a547a65344ee17b549ba,A general framework for Approximate Nearest Subspace search,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+4c0846bcfa64d9e810802c5b7ef0f8b43523fe54,Adaptive Anomaly Detection with Kernel Eigenspace Splitting and Merging,"Intel Labs Europe, London, United Kingdom","Intel Labs Europe, London, United Kingdom","40 Bank St, Canary Wharf, London E14 5NR, UK",51.50280900,-0.01945300,company,
+4cfe921ac4650470b0473fd52a2b801f4494ee64,Human vision inspired framework for facial expressions recognition,University of Lyon,"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France","20 Avenue Albert Einstein, 69100 Villeurbanne, France",45.78332440,4.87819840,edu,
+4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,A Learning Framework for Age Rank Estimation Based on Face Images With Scattering Transform,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+4cec3e5776090852bef015a8bbe74fed862aa2dd,Class-Discriminative Kernel Sparse Representation-Based Classification Using Multi-Objective Optimization,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+4c0cc732314ba3ccccd9036e019b1cfc27850c17,Late fusion and calibration for multimedia event detection using few examples,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af,A review of recent advances in visual speech decoding,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+268c4bb54902433bf00d11391178a162e5d674c9,Learning spatial weighting via quadratic programming for facial expression analysis,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+261a80216dda39b127d2b7497c068ec7e0fdf183,A Framework for Making Face Detection Benchmark Databases,Vols Taipei,Vols Taipei,"Taipei, Taiwan",25.03296940,121.56541770,edu,
+2601b679fdd637f3cd978753ae2f15e8759dd267,Joint classification of actions with matrix completion,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,Nonnegative matrix factorization with Hessian regularizer,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+265a88a8805f6ba3efae3fcc93d810be1ea68866,Approximated Chi-square distance for histogram matching in facial image analysis: Face and expression recognition,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+26949c1ba7f55f0c389000aa234238bf01a32d3b,Coupled cascade regression for simultaneous facial landmark detection and head pose estimation,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79,Attribute-based continuous user authentication on mobile devices,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+21bd60919e2e182a29af455353141ba4907b1b41,Attended Visual Content Degradation Based Reduced Reference Image Quality Assessment,China University of Mining and Technology,China University of Mining and Technology,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.21525380,117.13985410,edu,
+21b5af67618fcc047b495d2d5d7c2bf145753633,Warp that smile on your face: Optimal and smooth deformations for face recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,Learning robust latent subspace for discriminative regression,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+21959bc56a160ebd450606867dce1462a913afab,Face recognition based on manifold constrained joint sparse sensing with K-SVD,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+21f5f65e832c5472d6d08f6ee280d65ff0202e29,Face Detection in Thermal Infrared Images: A Comparison of Algorithm- and Machine-Learning-Based Approaches,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2149d49c84a83848d6051867290d9c8bfcef0edb,Label-Sensitive Deep Metric Learning for Facial Age Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,Learning deep facial expression features from image and optical flow sequences using 3D CNN,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+4d1f77d9418a212c61a3c75c04a5b3884f6441ba,Hierarchical and Spatio-Temporal Sparse Representation for Human Action Recognition,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+4d19401e44848fe65b721971bc71a9250870ed5f,Mgn: Multi-Glimpse Network for Action Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4d4736173a5e72c266e52f3a43bdcb2b58f237a2,Locally nonlinear regression based on kernel for pose-invariant face recognition,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+4d6d6369664a49f6992f65af4148cefef95055bc,Seamless texture stitching on a 3D mesh by poisson blending in patches,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+7535e3995deb84a879dc13857e2bc0796a2f7ce2,Fast density-weighted low-rank approximation spectral clustering,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,Face recognition under varying illumination,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78,Action unit intensity estimation using hierarchical partial least squares,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+75b51140d08acdc7f0af11b0ffa1edb40ebbd059,Selecting discriminant eigenfaces by using binary feature selection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+754626bd5fb06fee5e10962fdfeddd495513e84b,Facial expression pair matching,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+81a4397d5108f6582813febc9ddbeff905474120,Unsupervised automatic attribute discovery method via multi-graph clustering,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+8184a92e1ccc7fdeb4a198b226feb325c63d6870,Deep learning for facial expression recognition: A step closer to a smartphone that knows your moods,"Fotonation LTD, Galway, Ireland","Fotonation LTD, Galway, Ireland","Galway, Ireland",53.27066800,-9.05679050,company,
+8185be0689442db83813b49e215bf30870017459,Feature Learning for Image Classification Via Multiobjective Genetic Programming,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+81b8a6cabcd6451b21d5b44e69b0a355d9229cc4,Low-rank and sparse matrix recovery based on a randomized rank-revealing decomposition,Pontifical Catholic University of Rio de Janeiro,"Pontifical Catholic University of Rio de Janeiro, Rua Marquês de São Vicente 225, Gávea, Brasil","R. Marquês de São Vicente, 225 - Gávea, Rio de Janeiro - RJ, 22451-045, Brazil",-22.97910740,-43.23308250,edu,
+81d81a2060366f29fd100f793c11acf000bd2a7f,Facial Expression Recognition Based on Rough Set Theory and SVM,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+81513764b73dae486a9d2df28269c7db75e9beb3,Learning Bases of Activity for Facial Expression Recognition,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+81d232e1f432db7de67baf4f30f240c62d1a9055,Improving human action recognitionby temporal attention,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8127b7654d6e5c46caaf2404270b74c6b0967e19,Computer Expression Recognition Toolbox,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3,Task-dependent multi-task multiple kernel learning for facial action unit detection,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+81f101cea3c451754506bf1c7edf80a661fa4dd1,Exploiting sparsity and co-occurrence structure for action unit recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,Histogram equalized deep PCA with ELM classification for expressive face recognition,Khon Kaen University,Khon Kaen University,"มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.46007565,102.81211798,edu,
+86fa086d02f424705bbea53943390f009191740a,Precise eye localization with improved SDM,"Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+865d4ce1751ff3c0a8eb41077a9aa7bd94603c47,Emotion recognition in the wild via sparse transductive transfer linear discriminant analysis,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+86afb1e38a96f2ac00e792ef353a971fd13c8474,How interesting images are: An atypicality approach for social networks,University of Hawaii,University of Hawaii,"University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA",21.29827950,-157.81869230,edu,
+8686b15802529ff8aea50995ef14079681788110,Deformed Graph Laplacian for Semisupervised Learning,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+864d50327a88d1ff588601bf14139299ced2356f,Generating face images based on 3D morphable model,Slovak University of Technology,"Faculty of Electrical Engineering and Information Technology, Slovak University of Technology, Bratislava, Bratislava, Slovakia","Ilkovičova 2961/3, 841 04 Karlova Ves, Slovakia",48.15185320,17.07334460,edu,
+8633732d9f787f8497c2696309c7d70176995c15,Multi-objective convolutional learning for face labeling,"Baidu Research, USA","Baidu Research, USA","1195 Bordeaux Dr, Sunnyvale, CA 94089, USA",37.40922650,-122.02366150,company,
+72345fed8d068229e50f9ea694c4babfd23244a0,Comparative Study: Face Recognition via the Correlation Filter Technique,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+720763bcb5e0507f13a8a319018676eb24270ff0,What can visual content analysis do for text based image search?,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+72167c9e4e03e78152f6df44c782571c3058050e,Acume: A new visualization tool for understanding facial expression and gesture data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+72119cb98f9502ec639de317dccea57fd4b9ee55,A new approach for face recognition under makeup changes,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+72d110df78a7931f5f2beaa29f1eb528cf0995d3,Facial emotion recognition system for autistic children: a feasible study based on FPGA implementation,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+44b827df6c433ca49bcf44f9f3ebfdc0774ee952,Deep Correlation Feature Learning for Face Verification in the Wild,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+443f4421e44d4f374c265e6f2551bf9830de5597,Effect of illumination on automatic expression recognition: A novel 3D relightable facial database,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,Cognitive Gravity Model Based Semi-Supervised Dimension Reduction,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+44855e53801d09763c1fb5f90ab73e5c3758a728,Sentence Directed Video Object Codiscovery,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+44d93039eec244083ac7c46577b9446b3a071f3e,Empirical comparisons of several preprocessing methods for illumination insensitive face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2a92bda6dbd5cce5894f7d370d798c07fa8783f4,Class-Specific Kernel Fusion of Multiple Descriptors for Face Verification Using Multiscale Binarised Statistical Image Features,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,Grassmannian Regularized Structured Multi-View Embedding for Image Classification,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+2a98b850139b911df5a336d6ebf33be7819ae122,Maximum entropy regularized group collaborative representation for face recognition,Georgia Southern University,Georgia Southern University,"Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA",32.42143805,-81.78450529,edu,
+2ae2e29c3e9cc2d94a26da5730df7845de0d631b,Audio-Visual Recognition System in Compression Domain,University of Nottingham Malaysia,"University of Nottingham Malaysia Campus, Selangor Darul Ehsan, Malaysia","Semenyih, Selangor, Malaysia",2.94513320,101.87609130,edu,
+2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,A study of large-scale ethnicity estimation with gender and age variations,North Carolina Central University,North Carolina Central University,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA",35.97320905,-78.89755054,edu,
+2a7058a720fa9da4b9b607ea00bfdb63652dff95,Continuous Probability Distribution Prediction of Image Emotions via Multitask Shared Sparse Regression,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce,Integrating Spatial and Discriminant Strength for Feature Selection and Linear Dimensionality Reduction,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+2a84f7934365f05b6707ea0ac225210f78e547af,A joint facial point detection method of deep convolutional network and shape regression,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+2a4984fb48c175d1e42c6460c5f00963da9f26b6,Binary pattern flavored feature extractors for Facial Expression Recognition: An overview,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2fd007088a75916d0bf50c493d94f950bf55c5e6,Projective Representation Learning for Discriminative Face Recognition,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+2f43b614607163abf41dfe5d17ef6749a1b61304,Investigating the Periocular-Based Face Recognition Across Gender Transformation,University of North Carolina at Wilmington,University of North Carolina at Wilmington,"University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.22498270,-77.86907744,edu,
+2f67d5448b5372f639633d8d29aac9c0295b4d72,Facial expression classification on web images,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+2f837ff8b134b785ee185a9c24e1f82b4e54df04,Local Binary Patterns and Its Application to Facial Image Analysis: A Survey,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+2f73203fd71b755a9601d00fc202bbbd0a595110,Micro-expression Analysis by Fusing Deep Convolutional Neural Network and Optical Flow,Kochi University of Technology,"Kochi University of Technology, Kochi, 782-8502, Japan","185 Tosayamadacho Miyanokuchi, Kami, Kōchi Prefecture 782-0003, Japan",33.62081300,133.71975500,edu,
+2f841ff062053f38725030aa1b77db903dad1efb,Crowdsourced saliency for mining robotically gathered 3D maps using multitouch interaction on smartphones and tablets,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+2facf3e85240042a02f289a0d40fee376c478d0f,Aging face verification in score-age space using single reference image template,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+43fce0c6b11eb50f597aa573611ac6dc47e088d3,IoT and Computer Vision Based Driver Safety Monitoring System with Risk Prediction,Chittagong University of Engineering and Technology,Chittagong University of Engineering and Technology,"Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.46221665,91.96942263,edu,
+4349f17ec319ac8b25c14c2ec8c35f374b958066,Dynamic Texture Comparison Using Derivative Sparse Representation: Application to Video-Based Face Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+43cbe3522f356fbf07b1ff0def73756391dc3454,Laplacian of smoothed image as representation for face recognition,"IIIT Hyderabad, India","IIIT Hyderabad, 500032, A.P, India","IIIT, Gachibowli, Hyderabad, Telangana 500032, India",17.44479180,78.34830980,edu,
+4398afa0aeb5749a12772f2d81ca688066636019,Partial Matching of Facial Expression Sequence Using Over-Complete Transition Dictionary for Emotion Recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,Weakly supervised multiscale-inception learning for web-scale face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4328933890f5a89ad0af69990926d8484f403e4b,Personalized portraits ranking,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+88780bd55615c58d9bacc4d66fc2198e603a1714,Classification of facial-emotion expression in the application of psychotherapy using Viola-Jones and Edge-Histogram of Oriented Gradient,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e,Discriminant dictionary learning with sparse embedding on face recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+88ed558bff3600f5354963d1abe762309f66111e,Real-World and Rapid Face Recognition Toward Pose and Expression Variations via Feature Library Matrix,Semnan University,Semnan University,"دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.60374440,53.43445877,edu,
+88535dba55b0a80975df179d31a6cc80cae1cc92,Facial expression recognition with an optimized radial basis kernel,Selçuk University,Selçuk University,"Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye",38.02420685,32.50570524,edu,
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,Simultaneous Local Binary Feature Learning and Encoding for Homogeneous and Heterogeneous Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+8882d39edae556a351b6445e7324ec2c473cadb1,Hierarchical Recurrent Neural Hashing for Image Retrieval With Hierarchical Convolutional Features,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+88c21e06ed44da518a7e346fce416efedc771704,Feature extraction via multi-view non-negative matrix factorization with local graph regularization,Fordham University,Fordham University,"Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA",40.77106040,-73.98528070,edu,
+9f5e22fbc22e1b0a61bcd75202d299232e68de5d,Facial expression Recognition based on Motion Estimation,"Universidade Federal de Pernambuco, Brazil","Centro de Inform&#x00E1;tica, Universidade Federal de Pernambuco, Recife, Brazil","Av. Jorn. Aníbal Fernandes, s/n - Cidade Universitária, Recife - PE, 50740-560, Brazil",-8.05566810,-34.95157800,edu,
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc,An Overview and Empirical Comparison of Distance Metric Learning Methods,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+9f3c9e41f46df9c94d714b1f080dafad6b4de1de,On the detection of images containing child-pornographic material,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+9f428db0d3cf26b9b929dd333a0445bcc7514cdf,Dynamic soft encoded patterns for facial event analysis,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+9f131b4e036208f2402182a1af2a59e3c5d7dd44,Face Retrieval Framework Relying on User's Visual Memory,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+9fd1b8abbad25cb38f0c009288fb5db0fc862db6,Soft margin AdaBoost for face pose classification,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,A review on Gabor wavelets for face recognition,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+9f43caad22803332400f498ca4dd0429fe7da0aa,Exploring human visual system: Study to aid the development of automatic facial expression recognition framework,University of Lyon,"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France","20 Avenue Albert Einstein, 69100 Villeurbanne, France",45.78332440,4.87819840,edu,
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0,Cross-Modal Metric Learning for AUC Optimization,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7,A hybrid vision system for detecting use of mobile phones while driving,Federal University of Santa Catarina,"UFSC - Federal University of Santa Catarina / INE - CTC, Florianópolis, 88040-900, Brazil","R. Eng. Agronômico Andrei Cristian Ferreira, s/n - Trindade, Florianópolis - SC, 88040-900, Brazil",-27.60070340,-48.51917750,edu,
+6bacd4347f67ec60a69e24ed7cc0ac8073004e6f,Kinship classification based on discriminative facial patches,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+6b742055a664bcbd1c6a85ae6796bd15bc945367,Face recognition using localized features based on non-negative sparse coding,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08,Unconstrained face verification with a dual-layer block-based metric learning,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+071ec4f3fb4bfe6ae9980477d208a7b12691710e,Learning Multimodal Latent Attributes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0750c796467b6ef60b0caff5fb199337d54d431e,Face detection method based on histogram of sparse code in tree deformable model,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+38e7f3fe450b126367ec358be9b4cc04e82fa8c7,Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose,"OMRON Corporation, Kyoto, Japan","Core Technology Center, OMRON Corporation, Kyoto, Japan","Kyoto, Kyoto Prefecture, Japan",35.01163630,135.76802940,company,
+3888d7a40f3cea5e4a851c8ca97a2d7810a62867,A new margin-based AdaBoost algorithm: Even more robust than RobustBoost to class-label noise,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+387b54cf6c186c12d83f95df6bd458c5eb1254ee,Deep probabilities for age estimation,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+38345264a9ca188c4facffe6e18a7e6865fb2966,The role of reproducibility in affective computing,"Fernuniversitt, Hagen, Germany","Fernuniversitt in Hagen FUH Hagen, Germany","Universitätsstraße 25, 58097 Hagen, Germany",51.37675480,7.49564310,edu,
+00049f989067d082f7f8d0581608ad5441d09f8b,Adaptive Part-Level Model Knowledge Transfer for Gender Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+00301c250d667700276b1e573640ff2fd7be574d,Establishing a test set and initial comparisons for quantitatively evaluating synthetic age progression for adult aging,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+009bf86913f1c366d9391bf236867d84d12fa20c,Illumination invariant representation for privacy preserving face identification,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+0034e37a0faf0f71395245b266aacbf5412f190a,Face Distortion Recovery Based on Online Learning Database for Conversational Video,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6e2041a9b5d840b0c3e4195241cd110640b1f5f3,Robust relative attributes for human action recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6e7ffd67329ca6027357a133437505bc56044e65,Facial expressions recognition system using Bayesian inference,Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+6ec275755f8776b620d0a4550be0e65caf2bc87a,Action Unit recognition in still images using graph-based feature selection,Technical University of Sofia,"Faculty of Telecommunications, Technical University of Sofia, Bulgaria","ulitsa ""Akademik Stefan Mladenov"" 1, 1700 Studentski Kompleks, Sofia, Bulgaria",42.65608530,23.34765230,edu,
+9abf6d56a7d336bc58f4e3328d2ee807032589f1,Facial expression recongition using firefly-based feature optimization,Teesside University,Teesside University,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.57036950,-1.23509662,edu,
+9ab126760f68071a78cabe006cf92995d6427025,An efficient multi-threshold AdaBoost approach to detecting faces in images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9a98dd6d6aaba05c9e46411ea263f74df908203d,LETRIST: Locally Encoded Transform Feature Histogram for Rotation-Invariant Texture Classification,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+9a59abdf3460970de53e09cb397f47d86744f472,Query-specific visual semantic spaces for web image re-ranking,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9aade3d26996ce7ef6d657130464504b8d812534,Face Alignment With Deep Regression,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+361eaef45fccfffd5b7df12fba902490a7d24a8d,Robust deep learning features for face recognition under mismatched conditions,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+36486944b4feeb88c0499fecd253c5a53034a23f,Deep feature selection and projection for cross-age face retrieval,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+36b13627ee8a5a8cd04645213aabfa917bbd32f5,Edge-Aware Label Propagation for Mobile Facial Enhancement on the Cloud,"Baidu, Inc.","Baidu International Technology (Shenzhen) Company, Ltd., Shenzhen, China","Shenzhen, Guangdong, China",22.54309600,114.05786500,company,
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,3D Pose Tracking With Multitemplate Warping and SIFT Correspondences,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+5c3eb40b06543f00b2345f3291619a870672c450,Facial Expression Recognition using Auto-regressive Models,"Computer Vision Center, Barcelona","Computer Vision Center 08193 Bellaterra, Barcelona, SPAIN","Campus UAB, Edifici O, s/n, 08193 Cerdanyola del Vallès, Barcelona, Spain",41.50089570,2.11155300,edu,
+0974677f59e78649a40f0a1d85735410d21b906a,A real-time 17-scale object detection accelerator with adaptive 2000-stage classification in 65nm CMOS,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+099053f2cbfa06c0141371b9f34e26970e316426,Effective recognition of facial micro-expressions with video motion magnification,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+5dbb2d556f2e63a783a695a517f5deb11aafd7ea,Fine-grained face verification: Dataset and baseline results,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+5dafab3c936763294257af73baf9fb3bb1696654,Towards inclusive design in mobile biometry,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+5d9971c6a9d5c56463ea186850b16f8969a58e67,Facial-expression recognition based on a low-dimensional temporal feature space,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+5da827fe558fb2e1124dcc84ef08311241761726,Attribute preserved face de-identification,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+5df17c81c266cf2ebb0778e48e825905e161a8d9,A Novel Lip Descriptor for Audio-Visual Keyword Spotting Based on Adaptive Decision Fusion,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+5d2e5833ca713f95adcf4267148ac2ccf2318539,Facial expression recognition using entropy and brightness features,University of Lyon,"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France","20 Avenue Albert Einstein, 69100 Villeurbanne, France",45.78332440,4.87819840,edu,
+5d9f468a2841ea2f27bbe3ef2c6fe531d444be68,PT-NET: Improve object and face detection via a pre-trained CNN model,"Academy of Broadcasting Science, Beijing, China","Academy of Broadcasting Science, Beijing, P.R. China","Beijing, China",39.90419990,116.40739630,edu,
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265,Adaptive 3D Face Reconstruction from Unconstrained Photo Collections,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3150e329e01be31ba08b6d76fc46b0da88a5ddeb,Action Recognition Using Convolutional Restricted Boltzmann Machines,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,Face Recognition In Unconstrained Environment,Khon Kaen University,Khon Kaen University,"มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.46007565,102.81211798,edu,
+31ffc95167a2010ce7aab23db7d5fc7ec439f5fb,Groupwise Retargeted Least-Squares Regression,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+31f905d40a4ac3c16c91d5be8427762fa91277f1,Learning Rotation-Invariant Local Binary Descriptor,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+914d7527678b514e3ee9551655f55ffbd3f0eb0a,Facial action unit detection using deep neural networks in videos,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+91167aceafbc9c1560381b33c8adbc32a417231b,Robust Tensor Analysis With L1-Norm,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+919bdc161485615d5ee571b1585c1eb0539822c8,A ranking model for face alignment with Pseudo Census Transform,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+9166f46aa3e58befaefd3537e5a11b31ebeea4d0,Low-complexity HOG for efficient video saliency,Technische Universitt Darmstadt,"Technische Universitt Darmstadt, Computer Systems Group, Darmstadt, Germany","Hochschulstraße 10, 64289 Darmstadt, Germany",49.87741510,8.65461020,edu,
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,Robust facial landmark detection and tracking across poses and expressions for in-the-wild monocular video,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+657e702326a1cbc561e059476e9be4d417c37795,Face detection based on multi task learning and multi layer feature fusion,"SIASUN Robot and Automation, Shenyang, China","Shenyang SIASUN Robot &amp; Automation Co., LTD., Shenyang, China","Shenyang, Liaoning, China",41.80569900,123.43147200,company,
+659dc6aa517645a118b79f0f0273e46ab7b53cd9,Age-invariant face recognition using a feature progressing model,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+6584c3c877400e1689a11ef70133daa86a238602,Supervised Committee of Convolutional Neural Networks in Automated Facial Expression Analysis,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+65fc8393610fceec665726fe4e48f00dc90f55fb,The effectiveness of using geometrical features for facial expression recognition,Otto-von-Guericke-University Magdeburg,"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, D-39016, P.O. Box 4210 Germany","Universitätspl. 2, 39106 Magdeburg, Germany",52.14020530,11.64419910,edu,
+62fddae74c553ac9e34f511a2957b1614eb4f937,Action Recognition Based on Efficient Deep Feature Learning in the Spatio-Temporal Domain,RheinAhrCampus der Hochschule Koblenz,"RheinAhrCampus der Hochschule Koblenz, Remagen, Germany","RheinAhrCampus, 2, Joseph-Rovan-Allee, Remagen, Landkreis Ahrweiler, Rheinland-Pfalz, 53424, Deutschland",50.57225620,7.25318610,edu,
+62750d78e819d745b9200b0c5c35fcae6fb9f404,Leveraging implicit demographic information for face recognition using a multi-expert system,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+62e834114b58a58a2ea2d7b6dd7b0ce657a64317,Adaptive facial feature extraction,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+6267dbeb54889be5bdb50c338a7c6ef82287084c,Face recognition based on 2DPCA and fuzzy-rough technique,"Hebei Information Engineering School, Baoding, China","Teaching and research of section of mathematics, Hebei Information Engineering School, Baoding 071000, China","Lianchi, Baoding, Hebei, China, 071000",38.86371910,115.51483260,edu,
+963a004e208ce4bd26fa79a570af61d31651b3c3,Computational methods for modeling facial aging: A survey,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+9635493998ad60764d7bbf883351af57a668d159,Cross-validated smooth multi-instance learning,"Beijing E-Hualu Info Technology Co., Ltd, Beijing, China","Beijing E-Hualu Info Technology Co., Ltd, Beijing, China","165 Fushi Rd, Shijingshan Qu, Beijing Shi, China, 100144",39.92532100,116.19579500,company,
+96b1f2bde46fe4f6cc637398a6a71e8454291a6e,Structured Max-Margin Learning for Inter-Related Classifier Training and Multilabel Image Annotation,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,State-of-the-art face recognition performance using publicly available software and datasets,"Université Paris-Saclay, France","Samovar CNRS UMR 5157, Télécom SudParis, Université Paris-Saclay, Evry, France","3 Rue Michel Ange, 75016 Paris, France",48.84760370,2.26399340,edu,
+96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40,Pursuing face identity from view-specific representation to view-invariant representation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+96d34c1a749e74af0050004162d9dc5132098a79,High-speed face recognition based on discrete cosine transform and RBF neural networks,"Nanyang Technological University, Singapore","Comput. Control Lab, Nanyang Technol. Univ., Singapore","50 Nanyang Avenue, Block N4 #02a-32, Singapore 639798",1.34619520,103.68154990,edu,
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb,Learning a bag of features based nonlinear metric for facial similarity,"LIRIS, INSA-Lyon, France","LIRIS, UMR 5205 CNRS, INSA-Lyon, F-69621, France","40 Avenue Guy de Collongue, 69130 Écully, France",45.78359660,4.76789480,edu,
+3a9fbd05aaab081189a8eea6f23ed730fa6db03c,Facial action unit prediction under partial occlusion based on Error Weighted Cross-Correlation Model,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c,Clustering lightened deep representation for large scale face identification,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,Automated social skills training with audiovisual information,Nara University of Education,"Center for Special Needs Education, Nara University of Education, Takabatake-cho, Nara-shi, Nara, Japan","Takabatakecho, Nara, Nara Prefecture 630-8528, Japan",34.67412100,135.84217100,edu,
+3ad56aed164190e1124abea4a3c4e1e868b07dee,Collaborative expression representation using peak expression and intra class variation face images for practical subject-independent emotion recognition in videos,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+5491478ae2c58af21389ed3af21babd362511a8e,Towards HDR Based Facial Expression Recognition under Complex Lighting,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+54e988bc0764073a5db2955705d4bfa8365b7fa9,Emotion recognition in the wild challenge (EmotiW) challenge and workshop summary,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,Towards Robust and Accurate Multi-View and Partially-Occluded Face Alignment,"360 AI Institute, Beijing, China","360 AI Institute, Beijing, China","Beijing, China",39.90419990,116.40739630,company,
+982fcead58be419e4f34df6e806204674a4bc579,Performance improvement of face recognition algorithms using occluded-region detection,"Azbil Corporation, Kawana, Japan","Azbil Corporation 1-12-2, Kawana, Fujisawa-shi, 251-8522, Japan","2 Chome Kawana, Fujisawa, Kanagawa Prefecture 251-0015, Japan",35.33414870,139.49433560,company,
+9888edfb6276887eb56a6da7fe561e508e72a517,Layer-Centric Memory Reuse and Data Migration for Extreme-Scale Deep Learning on Many-Core Architectures,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+98d1b5515b079492c8e7f0f9688df7d42d96da8e,Use of Active Appearance Models for analysis and synthesis of naturally occurring behavior,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+982ede05154c1afdcf6fc623ba45186a34f4b9f2,The Many Shades of Negativity,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+982d4f1dee188f662a4b5616a045d69fc5c21b54,Learning to link human objects in videos and advertisements with clothes retrieval,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+98e098ba9ff98fc58f22fed6d3d8540116284b91,Global Temporal Representation Based CNNs for Infrared Action Recognition,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122,Attribute-based supervised deep learning model for action recognition,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+539cb169fb65a5542c84f42efcd5d2d925e87ebb,A transfer learning approach to cross-database facial expression recognition,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+53507e2de66eaba996f14fd2f54a5535056f1e59,Feature fusion with covariance matrix regularization in face recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+5375a3344017d9502ebb4170325435de3da1fa16,Computer Vision – ACCV 2012,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5304cd17f9d6391bf31276e4419100f17d4423b2,Local histogram specification using learned histograms for face recognition,Nanjing Normal University,Nanjing Normal University,"南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.10668110,118.90863081,edu,
+53873fe7bbd5a2d171e2b1babc9cacaad6cabe45,Coupled Dictionary Learning for the Detail-Enhanced Synthesis of 3-D Facial Expressions,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+535cdce8264ac0813d5bb8b19ceafa77a1674adf,Discriminative Lasso,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+53f5cb365806c57811319a42659c9f68b879454a,Research on face recognition based on deep learning,Shenyang Normal University,"College of Software, Shenyang Normal University, Shenyang, China","Fanglin Rd, Shenbei Xinqu, Shenyang Shi, Liaoning Sheng, China",41.91299790,123.41795810,edu,
+3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1,Recent advances in video-based human action recognition using deep learning: A review,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3fa628e7cff0b1dad3f15de98f99b0fdb09df834,People recognition in ambiguously labeled Photo Collections,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+3f88ea8cf2eade325b0f32832561483185db5c10,Low-Rank and Joint Sparse Representations for Multi-Modal Recognition,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+3ff79cf6df1937949cc9bc522041a9a39d314d83,Adversarial examples: A survey,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+30cc1ddd7a9b4878cca7783a59086bdc49dc4044,Intensity contrast masks for gender classification,National Taipei University,National Taipei University,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣",24.94314825,121.36862979,edu,
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4,Interpretable Partitioned Embedding for Customized Multi-item Fashion Outfit Composition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+30188b836f2fa82209d7afbf0e4d0ee29c6b9a87,Stable Orthogonal Local Discriminant Embedding for Linear Dimensionality Reduction,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+3080026f2f0846d520bd5bacb0cb2acea0ffe16b,2.5D cascaded regression for robust facial landmark detection,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+30cace74a7d51e9a928287e25bcefb968c49f331,Monocular 3D facial information retrieval for automated facial expression analysis,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708,Fuzzy Sparse Autoencoder Framework for Single Image Per Person Face Recognition,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+5ee0103048e1ce46e34a04c45ff2c2c31529b466,Learning occlusion patterns using semantic phrases for object detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10,Multi-instance Hidden Markov Model for facial expression recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+5e87f5076952cd442718d6b4addce905bae1a1a4,Facial expression recognition based on salient patch selection,"SAIC Motor Corporation, Shanghai","Research & Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai, 201804, P.R. China","Shanghai, China",31.23039040,121.47370210,company,
+5ed5e534c8defd683909200c1dc31692942b7b5f,A Multimodal Approach to Assessing User Experiences with Agent Helpers,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+5e806d8fa48216041fe719309534e3fa903f7b5b,An expression transformation for improving the recognition of expression-variant faces from one sample image per person,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2,Real-Time 3D Eye Performance Reconstruction for RGBD Cameras,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5bed2453a5b0c54a4a4a294f29c9658658a9881e,Angular-Similarity-Preserving Binary Signatures for Linear Subspaces,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5b64584d6b01e66dfd0b6025b2552db1447ccdeb,Deep expectation for estimation of fingerprint orientation fields,"Dermalog Identification Systems, Hamburg, Germany","Dermalog Identification Systems GmbH, Hamburg, Germany","Mittelweg 120, 20148 Hamburg, Germany",53.57227000,9.99472000,company,
+5b4bbba68053d67d12bd3789286e8a9be88f7b9d,An automatic region based methodology for facial expression recognition,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+37c5e3b6175db9eaadee425dc51bc7ce05b69a4e,RETRACTED ARTICLE: Sparse tensor CCA for color face recognition,Jiangsu University of Science and Technology,Jiangsu University of Science and Technology,"江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.19805500,119.46326791,edu,
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,Intelligent Information and Database Systems,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+37866fea39deeff453802cde529dd9d32e0205a5,"Sense beauty via face, dressing, and/or voice","National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+373c4d6af0ee233f0d669c3955c3a3ef2a009638,Eye corner detection with texture image fusion,Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.80881680,113.53526640,edu,
+0874734e2af06883599ed449532a015738a1e779,Semi-supervised classification based on subspace sparse representation,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a,Robust Low Rank Subspace Segmentation via Joint $$\ell _{21} $$ ℓ21 -Norm Minimization,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+6d5f876a73799cc628e4ad2d9cfcd88091272342,An appearance model constructed on 3-D surface for robust face recognition against pose and illumination variations,"NEC, Kanagawa, Japan","Media & Inf. Res. Labs., NEC Corp., Kanagawa, Japan","Kanagawa Prefecture, Japan",35.44750730,139.64234460,company,
+6dcf6b028a6042a9904628a3395520995b1d0ef9,Field support vector machines,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6da3ff4250103369f4a6a39c8fb982438a97525c,Binary Data Embedding Framework for Multiclass Classification,University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.40617900,-2.96670819,edu,
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c,Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+6da711d07b63c9f24d143ca3991070736baeb412,Spatio-temporal constraint for fast face tracking in movies,"Orange Labs International Center, Beijing, China","Orange Labs International Center Beijing, Beijing, 100876, China","Beitaipingzhuang, Haidian, Beijing, China, 100876",39.96416860,116.35725230,edu,
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,Scalable Linear Visual Feature Learning via Online Parallel Nonnegative Matrix Factorization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6d70344ae6f6108144a15e9debc7b0be4e3335f1,THU Face Database for Real-Time Automatic Video Scoring Model,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+013d0acff1e5410fd9f6e15520d16f4ea02f03f6,Learning Representative Deep Features for Image Set Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0133d1fe8a3138871075cd742c761a3de93a42ec,An SVM based scoring evaluation system for fluorescence microscopic image classification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+017e94ad51c9be864b98c9b75582753ce6ee134f,Rapid one-shot acquisition of dynamic VR avatars,Walt Disney Imagineering,"Walt Disney Imagineering, USA","Walt Disney Imagineering, 1401, Flower Street, Grand Central Creative Campus, Glendale, Los Angeles County, California, 91201, USA",34.16191740,-118.28837020,company,
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846,Toward Personalized Modeling: Incremental and Ensemble Alignment for Sequential Faces in the Wild,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+0141cb33c822e87e93b0c1bad0a09db49b3ad470,Unconstrained 3D face reconstruction,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+0647c9d56cf11215894d57d677997826b22f6a13,Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study,"Norwegian Biometrics Lab, NTNU, Norway","Norwegian Biometrics Lab, NTNU, Gj⊘vik, Norway","Teknologivegen 22, 2815 Gjøvik, Norway",60.78973180,10.68219270,edu,
+067fe74aec42cb82b92cf6742c7cfb4a65f16951,Robust Manifold Nonnegative Matrix Factorization,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+06518858bd99cddf9bc9200fac5311fc29ac33b4,Sparse Low-Rank Component-Based Representation for Face Recognition With Low-Quality Images,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+06b4e41185734f70ce432fdb2b121a7eb01140af,Domain Invariant and Class Discriminative Feature Learning for Visual Domain Adaptation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+6c6f0e806e4e286f3b18b934f42c72b67030ce17,Combination of age and head pose for adult face verification,"Swiss Federal, Institute of Technology, Lausanne","Swiss Federal, Institute of Technology, Lausanne (EPFL), Switzerland","Route Cantonale, 1015 Lausanne, Switzerland",46.51905570,6.56675760,edu,
+6c28b3550f57262889fe101e5d027912eb39564e,Hybrid Approach for Facial Feature Detection and Tracking under Occlusion,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac,Multiple feature fusion for face recognition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6cbde27d9a287ae926979dbb18dfef61cf49860e,Recent Advances in Zero-Shot Recognition: Toward Data-Efficient Understanding of Visual Content,Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+39c8ed5213882d4dbc74332245ffe201882c5de1,Multi-view face hallucination based on sparse representation,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+397257783ccc8cace5b67cc71e0c73034d559a4f,A 3D-Based Pose Invariant Face Recognition at a Distance Framework,Assiut University,Assiut University,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.18794105,31.17009498,edu,
+396b2963f0403109d92a4d4f26205f279ea79d2c,A joint compression-discrimination neural transformation applied to target detection,US Army Research Laboratory,"US Army Research Laboratory, 2800 Powder Mill Rd, Adelphi, MD 20783, USA","2800 Powder Mill Rd, Adelphi, MD 20783, USA",39.02985870,-76.96380270,mil,
+397022a4460750c762dbb0aaebcacc829dee8002,Attribute Regularization Based Human Action Recognition,"CASIA, Beijing, China","State Key Laboratory of Management and Control of Complex Systems, CASIA, Beijing, China","Beijing, China",39.90419990,116.40739630,edu,
+39c10888a470b92b917788c57a6fd154c97b421c,Joint multi-feature fusion and attribute relationships for facial attribute prediction,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+39d0de660e2116f32088ce07c3376759d0fdaff5,Regression-based metric learning,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+39af06d29a74ad371a1846259e01c14b5343e3d1,Structure-Aware Data Consolidation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+39d6339a39151b5f88ec2d7acc38fe0618d71b5f,Tracking characters in movies within logical story units,University of Brescia,University of Brescia,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA",37.76893740,-87.11138590,edu,
+39d6f8b791995dc5989f817373391189d7ac478a,On the kernel Extreme Learning Machine speedup,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+3960882a7a1cd19dfb711e35a5fc1843ed9002e7,On the equivalent of low-rank linear regressions and linear discriminant analysis based regressions,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+993934822a42e70dd35fb366693d847164ca15ff,Example-based performance driven facial shape animation,University of Tokushima,The University of Tokushima,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.07880680,134.55898100,edu,
+99a1180c3d39532efecfc5fa251d6893375c91a1,Facial expression recognition based on Gabor features and sparse representation,China University of Petroleum,"College of Information and Control Engineering, China University of Petroleum, Qingdao, 266580, China","China, Shandong, Weifang, Kuiwen, Dongfeng E St, 49号潍坊学院经济管理学院附近",36.71684600,119.18339500,edu,
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a,Mobile App Classification Method Using Machine Learning Based User Emotion Recognition,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,A Proposal to Improve the Authentication Process in m-Health Environments,Universitat Politècnica de València,"Integrated Management Coastal Research Institute, Universitat Politècnica de València, València, Spain","Camí de Vera, s/n, 46022 València, Valencia, Spain",39.48083760,-0.34095220,edu,
+9989ad33b64accea8042e386ff3f1216386ba7f1,Facial feature extraction method based on shallow and deep fusion CNN,Guilin University of Electronic Technology Guangxi Guilin,Guilin University of Electronic Technology Guangxi Guilin,"桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.28739920,110.33242770,edu,
+9961f1e5cf8fda29912344773bc75c47f18333a0,An automatic decision approach to coal–rock recognition in top coal caving based on MF-Score,Shandong University of Science and Technology,Shandong University of Science and Technology,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国",36.00146435,120.11624057,edu,
+998542e5e3882bb0ce563d390b1e1bff5460e80c,Evaluation of face recognition techniques for application to facebook,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+9989eda2f5392cfe1f789bb0f6213a46d92d1302,Activity recognition and prediction with pose based discriminative patch model,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+993374c1c9d58a3dec28160188ff6ac1227d02f5,WHoG: A weighted HoG-based scheme for the detection of birds and identification of their poses in natural environments,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+5217ab9b723158b3ba2235e807d165e72fd33007,A novel facial expression database construction method based on web images,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+52e270ca8f5b53eabfe00a21850a17b5cc10f6d5,A comparison study of feature spaces and classification methods for facial expression recognition,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+5213549200bccec57232fc3ff788ddf1043af7b3,Displaced dynamic expression regression for real-time facial tracking and animation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+52b102620fff029b80b3193bec147fe6afd6f42e,Benchmark of a large scale database for facial beauty prediction,Wuyi University,"Wuyi University, Jiangmen, China","Pengjiang, Jiangmen, China, 529030",22.59924800,113.08663400,edu,
+559645d2447004355c83737a19c9a811b45780f1,Combining view-based pose normalization and feature transform for cross-pose face recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+550351edcfd59d3666984771f5248d95548f465a,Diverse Expected Gradient Active Learning for Relative Attributes,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7,EyeOpener: Editing Eyes in the Wild,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,Transferring deep representation for NIR-VIS heterogeneous face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+552122432b92129d7e7059ef40dc5f6045f422b5,Empowering Simple Binary Classifiers for Image Set Based Face Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+553a605243b77a76c1ed4c1ad4f9a43ff45e391b,Experimental evaluation of facial expression recognition,Xinjiang Normal University,"College of Computer Science and Technology, Xinjiang Normal University, Urumchi, 830054, China","311 Nongda E Rd, Shayibake Qu, Wulumuqi Shi, Xinjiang Weiwuerzizhiqu, China, 830000",43.81413200,87.56629700,edu,
+55c4efc082a8410b528af7325de8148b80cf41e3,"Integrated System for Face Detection, Clustering and Recognition","National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+9745a7f38c9bba9d2fd076813fc9ab7a128a3e19,Attribute-assisted reranking for web image retrieval,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4,Novel directional patterns and a Generalized Supervised Dimension Reduction System (GSDRS) for facial emotion recognition,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0,Artificial Neural Networks in Pattern Recognition,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+978b32ff990d636f7e2050bb05b8df7dfcbb42a1,Age invariant face recognition based on texture embedded discriminative graph model,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+9729930ab0f9cbcd07f1105bc69c540330cda50a,Compressing Fisher Vector for Robust Face Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+635d2696aa597a278dd6563f079be06aa76a33c0,Age estimation via fusion of multiple binary age grouping systems,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6343bc0013343b6a5f96154f02d18dcd36a3f74c,Compressed domain human action recognition in H.264/AVC video streams,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6316a4b689706b0f01b40f9a3cef47b92bc52411,Rotation-Invariant Neoperceptron,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+0f7e9199dad3237159e985e430dd2bf619ef2db5,Learning Social Circles in Ego-Networks Based on Multi-View Network Structure,Microsoft,"Microsoft Corporation, Redmond, WA, USA","One Microsoft Way, Redmond, WA 98052, USA",47.64233180,-122.13693020,company,
+0f2461a265be997c962fa562ae48378fb964b7b4,Automated big security text pruning and classification,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+0f22b89341d162a7a0ebaa3c622d9731e5551064,Object recognition in ocean imagery using feature selection and compressive sensing,"SPAWAR Systems Center Pacific, San Diego","SPAWAR Systems Center Pacific, San Diego, California, USA","53560 Hull St, San Diego, CA 92152, USA",32.70865800,-117.24724910,mil,
+0fdc3cbf92027cb1200f3f94927bef017d7325ae,Joint prototype and metric learning for set-to-set matching: Application to biometrics,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+0f1cb558b32c516e2b6919fea0f97a307aaa9091,Face image retrieval based on shape and texture feature fusion,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0fcf04fda0bea5265b73c85d2cc2f7f70416537b,A Multiattribute Sparse Coding Approach for Action Recognition From a Single Unknown Viewpoint,National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.47510531,edu,
+0f64e26d6dd6f1c99fe2050887fac26cafe9ed60,Bridging the Gap Between Forensics and Biometric-Enabled Watchlists for e-Borders,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+0a4a8768c1ed419baebe1c420bd9051760875cbe,An approximate message passing algorithm for robust face recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0a5b2e642683ff20b6f0cee16a32a68ba0099908,Beyond Mahalanobis distance: Learning second-order discriminant function for people verification,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,Probabilistic Elastic Part Model: A Pose-Invariant Representation for Real-World Face Verification,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+0a451fc7d2c6b3509d213c210ae880645edf90ed,Semi-supervised local-learning-based feature selection,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+0abfb5b89e9546f8a5c569ab35b39b888e7cea46,Toward Development of a Face Recognition System for Watchlist Surveillance,"Office of Naval Research, Arlington","Office of Naval Research, Arlington","875 N Randolph St, Arlington, VA 22217, USA",38.88079270,-77.10869400,mil,
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,A Face Recognition Application for People with Visual Impairments: Understanding Use Beyond the Lab,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+642a386c451e94d9c44134e03052219a7512b9de,Taking the bite out of automated naming of characters in TV video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+64e216c128164f56bc91a33c18ab461647384869,Low-resolution Convolutional Neural Networks for video face recognition,Fraunhofer,"Fraunhofer IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany","Fraunhoferstraße 1, 76131 Karlsruhe, Germany",49.01546000,8.42579990,company,
+64ec02e1056de4b400f9547ce56e69ba8393e2ca,Multi-RPN Fusion-based Sparse PCA-CNN Approach to Object Detection and Recognition for Robot-aided Visual System,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+6489ad111fee8224b34f99d1bcfb5122786508cd,Learning symmetric face pose models online using locally weighted projectron regression,Dalle Molle Instituite for Artificial Intelligence,"Dalle Molle Instituite for Artificial Intelligence (IDSIA), Lugano, Switzerland","Via Cantonale 2C, 6928 Manno TI, Switzerland",46.02619220,8.91848180,edu,
+64b9ad39d115f3e375bde4f70fb8fdef5d681df8,Bootstrapping Joint Bayesian model for robust face verification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+64fd48fae4d859583c4a031b51ce76ecb5de614c,Illuminated face normalization technique by using wavelet fusion and local binary patterns,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+90ddf1aabf1c73b5fc45254a2de46e53a0bde857,An improved eLBPH method for facial identity recognition: Expression-specific weighted local binary pattern histogram,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+90eb66e75381cce7146b3953a2ae479a7beec539,Hybrid sensing face detection and recognition,US Army Research Laboratory,"US Army Research Laboratory, 2800 Powder Mill Rd, Adelphi, MD 20783, USA","2800 Powder Mill Rd, Adelphi, MD 20783, USA",39.02985870,-76.96380270,mil,
+90221884fe2643b80203991686af78a9da0f9791,High level describable attributes for predicting aesthetics and interestingness,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+90c4a6c6f790dbcef9a29c9a755458be09e319b6,Attention-based LSTM with Semantic Consistency for Videos Captioning,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+bf30477f4bd70a585588528355b7418d2f37953e,Facial expression recognition based on static and dynamic approaches,"Universidad Argentina de la Empresa, Argentina","Universidad Argentina de la Empresa (UADE), Lima 717, Buenos Aires, Argentina","Lima 775, C1073 AAO, Buenos Aires, Argentina",-34.61709410,-58.38204780,edu,
+bf3bf5400b617fef2825eb987eb496fea99804b9,Recognizing Minimal Facial Sketch by Generating Photorealistic Faces With the Guidance of Descriptive Attributes,"Tsinghua National Lab for Information Science and Technology, Beijing","Tsinghua National Lab for Information Science and Technology, Beijing, China","Shuangqing Rd, Haidian Qu, Beijing Shi, China, 100083",39.99674580,116.33229460,edu,
+bf0836e5c10add0b13005990ba019a9c4b744b06,An enhanced independent component-based human facial expression recognition from video,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+bf37a81d572bb154581845b65a766fab1e5c7dda,Rotation-reversal invariant HOG cascade for facial expression recognition,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+d34f546e61eccbac2450ca7490f558e751e13ec3,A Flexible Dirty Model Dictionary Learning Approach for Classification,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+d37ca68742b2999667faf464f78d2fbf81e0cb07,DFDnet: Discriminant Face Descriptor Network for Facial Age Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea,Searching Persuasively: Joint Event Detection and Evidence Recounting with Limited Supervision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa,Spontaneous Expression Detection from 3D Dynamic Sequences by Analyzing Trajectories on Grassmann Manifolds,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+d383ba7bbf8b7b49dcef9f8abab47521966546bb,Face image retrieval by shape manipulation,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+d3a3d15a32644beffaac4322b9f165ed51cfd99b,Eye detection by using deep learning,"Gebze Technical University, Turkey","Bilgisayar Mühendisliği Bölümü, Gebze Teknik Üniversitesi, Kocaeli, 41400, Türkiye","Cumhuriyet Mah, 2254. Sk. No:2, 41420 Gebze/Kocaeli, Turkey",40.80805620,29.35612020,edu,
+d4331a8dd47b03433f8390da2eaa618751861c64,Probabilistic Approach to Realistic Face Synthesis With a Single Uncalibrated Image,"Samsung Advanced Institute of Technology, Samsung Electronics, Gyeonggi-do, Korea","Samsung Advanced Institute of Technology, Samsung Electronics, Gyeonggi-do, Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+d40c16285d762f7a1c862b8ac05a0fdb24af1202,Coarse-to-fine facial landmarks localization based on convolutional feature,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+d4df31006798ee091b86e091a7bf5dce6e51ba3e,Face recognition using the classified appearance-based quotient image,Toshiba,"Corp. Res. & Dev. Center, Toshiba Corp., Kawasaki, Japan","Japan, 〒212-8582 Kanagawa Prefecture, Kawasaki, Saiwai Ward, 小向東芝町1",35.54931130,139.69201440,company,
+d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3,Analysis of in- and out-group differences between Western and East-Asian facial expression recognition,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda,A general framework for real-time analysis of massive multimedia streams,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3,Multi-pose facial expression recognition based on SURF boosting,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+ba1c0600d3bdb8ed9d439e8aa736a96214156284,Complex representations for learning statistical shape priors,"Amazon Research, Berlin","Amazon Research, Berlin, Germany","Krausenstraße 38, 10117 Berlin, Germany",52.50986860,13.39845130,company,
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1,Yet another gaze detector: An embodied calibration free system for the iCub robot,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe,Dynamic facial expression recognition using Laplacian Eigenmaps-based manifold learning,University of the Basque Country,University of the Basque Country,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.30927695,-2.01066785,edu,
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e,Dynamic facial expression recognition by joint static and multi-time gap transition classification,Sorbonne,"Sorbonne Universités, UPMC Univ Paris 06, CNRS, UMR 7222, F-75005, Paris, France","4 Place Jussieu, 75005 Paris, France",48.84710360,2.35749900,edu,
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021,Evaluation of face image quality metrics in person identification problem,Yaroslavl State University,Yaroslavl State University,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ",57.62521030,39.88456560,edu,
+a094e52771baabe4ab37ef7853f9a4f534227457,Estimation of Driver Head Yaw Angle Using a Generic Geometric Model,"Amrita Vishwa Vidyapeetham, India","Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India","Amritapuri, Vallikavu, Kerala 690546, India",9.09368520,76.49158540,edu,
+a7c066e636b8953481b4a8d8ff25a43a96dd348f,Facial expression recognition using face-regions,"Université Ibn Tofail, Morocco","Laboratoire des Syst&#x00E8;mes de T&#x00E9;l&#x00E9;communication et Ing&#x00E9;nierie de la D&#x00E9;cision (LASTID) Universit&#x00E9; Ibn Tofail BP 133, Kenitra 14000, Maroc","Av. de L'Université, Kénitra, Morocco",34.24608690,-6.58530760,edu,
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979,Face Recognition with Relative Difference Space and SVM,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,A comparison of facial landmark detection methods,"Trakya University, Edirne, Turkey","Elektrik-Elektronik Mühendisliği Bölümü, Trakya Üniversitesi, Edirne, Türkiye","Edirne, Edirne Merkez/Edirne, Turkey",41.67712970,26.55571450,edu,
+a7a3ec1128f920066c25cb86fbc33445ce613919,Joint facial landmark detection and action estimation based on deep probabilistic random forest,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+a713a01971e73d0c3118d0409dc7699a24f521d6,Age estimation based on face images and pre-trained convolutional neural networks,Università degli Studi di Milano,Università degli Studi di Milano,"Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.47567215,9.23336232,edu,
+a7f188a7161b6605d58e48b2537c18a69bd2446f,An intelligent multi-modal affect recognition system for persistent and non-invasive personal health monitoring,"UtopiaCompression Corp., Los Angeles, CA","UtopiaCompression Corporation, 11150 W. Olympic Blvd, Suite 820, Los Angeles, CA 90064, USA","11150 W Olympic Blvd #820, Los Angeles, CA 90064",34.03927660,-118.43863880,company,
+b8a16fcb65a8cee8dd32310a03fe36b5dff9266a,Facial expression recognition from static images,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+b84f164dbccb16da75a61323adaca730f528edde,Approximate Least Trimmed Sum of Squares Fitting and Applications in Image Analysis,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+b8048a7661bdb73d3613fde9d710bd45a20d13e7,An Academic Emotion Database and the Baseline Evaluation,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,Homemade TS-Net for Automatic Face Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4,Context-Sensitive Dynamic Ordinal Regression for Intensity Estimation of Facial Action Units,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+b8978a5251b6e341a1171e4fd9177aec1432dd3a,FaceHunter: A multi-task convolutional neural network based face detector,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9,Gaze fixations and dynamics for behavior modeling and prediction of on-road driving maneuvers,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+b8f64a94f536b46ef34a0223272e02f9be785ef9,An face-based visual fixation system for prosthetic vision,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0,The Role of Featural and Configural Information in Face Classification A Simulation of the Expertise Hypothesis,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+b161d261fabb507803a9e5834571d56a3b87d147,Gender recognition from face images using a geometric descriptor,University of Campinas (UNICAMP),"Institute of Computing, University of Campinas (UNICAMP), Campinas, SP, 13083-852, Brazil","Universidade Estadual de Campinas - Av. Albert Einstein, 1251 - Cidade Universitária, Campinas - SP, 13083-852, Brazil",-22.81483740,-47.06477080,edu,
+b1efefcc9a5d30be90776571a6cc0071f3679753,BRoPH: A compact and efficient binary 3D feature descriptor,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+b11b71b704629357fe13ed97b216b9554b0e7463,ASCERTAIN: Emotion and Personality Recognition Using Commercial Sensors,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b1534888673e6119f324082246016d28eba249aa,Saliency-based navigation in omnidirectional image,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,Deep learning-based learning to rank with ties for image re-ranking,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dd0086da7c4efe61abb70dd012538f5deb9a8d16,Face recognition by decision fusion of two-dimensional linear discriminant analysis and local binary pattern,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+dd8a851f2a0c63bb97e33aaff1841695f601c863,Still-to-Video face recognition via weighted scenario oriented discriminant analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31,HapFACS 3.0: FACS-Based Facial Expression Generator for 3D Speaking Virtual Characters,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+ddf577e8b7c86b1122c1bc90cba79f641d2b33fa,A framework of face synthesis based on multilinear analysis,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+dcb50e1f439d1f9b14ae85866f4542e51b830a07,Spatiotemporal local orientational binary patterns for facial expression recognition from video sequences,Hangzhou Dianzi University,Hangzhou Dianzi University,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.31255250,120.34309460,edu,
+dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6,Facial Skin Beautification Using Adaptive Region-Aware Masks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dc107e7322f7059430b4ef4991507cb18bcc5d95,Wide-angle micro sensors for vision on a tight budget,"Centeye, Inc.","Centeye, Inc.","4905 Reno Rd NW, Washington, DC 20008, USA",38.95353190,-77.07186700,company,
+dc2f16f967eac710cb9b7553093e9c977e5b761d,Learning a lightweight deep convolutional network for joint age and gender recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+dc5d9399b3796db7fd850990402dce221b98c8be,New Robust Metric Learning Model Using Maximum Correntropy Criterion,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,Age classification with deep learning face representation,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+b69e7e2a7705a58a0e3f1b80ae542907b89ce02e,A depth video-based facial expression recognition system utilizing generalized local directional deviation-based binary pattern feature discriminant analysis,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef,Approximate radial gradient transform based face recognition,Mangalore University,Mangalore University,"Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India",12.81608485,74.92449278,edu,
+b6f15bf8723b2d5390122442ab04630d2d3878d8,Dense 3D face alignment from 2D videos in real-time,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d,Fully Automatic Facial Action Recognition in Spontaneous Behavior,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+b63b6ed78b39166d87d4c56f8890873aa65976a2,Nonverbal communication with a multimodal agent via facial expression recognition,"University of Valladolid, Spain","University of Valladolid (Spain), Dep. Of Systems Engineering and Automatic Control, Industrial Engineering School","2061 Rathbone Hall, 66506, 1701B Platt St, Manhattan, KS 66502, United States",39.19063310,-96.58404850,edu,
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10,Three-Dimensional Attention-Based Deep Ranking Model for Video Highlight Detection,"Moshanghua Tech Company, Ltd., Beijing, China","Moshanghua Tech Company, Ltd., Beijing, China","Beijing, China",39.90419990,116.40739630,company,
+a92147bed9c17c311c6081beb0ef4c3165b6268e,Toward Large-Population Face Identification in Unconstrained Videos,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a939e287feb3166983e36b8573cd161d12097ad8,Exploiting privileged information for facial expression recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+a9215666b4bcdf8d510de8952cf0d55b635727dc,Action Units and Their Cross-Correlations for Prediction of Cognitive Load during Driving,Ecole Polytechnique Fédérale de Lausanne,"Signal Processing Laboratory (LTS5), Ecole Polytechnique Fédérale de Lausanne, Lausanne, Switzerland","Route Cantonale, 1015 Lausanne, Switzerland",46.51905570,6.56675760,edu,
+a9426cb98c8aedf79ea19839643a7cf1e435aeaa,Cascaded regression for 3D pose estimation for mouse in fisheye lens distorted monocular images,"Transmural Biotech, Barcelona, Spain","Transmural Biotech, Barcelona, Spain","Carrer de Sabino Arana, 38, 08028 Barcelona, Spain",41.38617590,2.12487170,edu,
+d569c3e62f471aa75ed53e631ec05c1a3d594595,Facial expression analysis using shape and motion information extracted by convolutional neural networks,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+d5b445c5716952be02172ca4d40c44f4f04067fa,Person independent facial expression analysis using Gabor features and Genetic Algorithm,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584,Tensor-Jet: A tensorial representation of Local Binary Gaussian Jet maps,INRIA Grenoble,"INRIA Grenoble Rhone-Alpes, FRANCE","INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France",45.21829860,5.80703193,edu,
+d5c66a48bc0a324750db3d295803f47f6060043d,Support Vector Machine with Weighted Summation Kernel Obtained by Adaboost,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+d289ce63055c10937e5715e940a4bb9d0af7a8c5,DeepMon: Mobile GPU-based Deep Learning Framework for Continuous Vision Applications,Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.84909214,edu,
+d2a415365f997c8fe2dbdd4e06ceab2e654172f6,Synthesis of emotional expressions specific to facial structure,"Indian Statistical Institute, Kolkata","Indian Statistical Institute, Kolkata","Plot No. 203, Barrackpore Trunk Road, Baranagar, Kolkata, West Bengal 700108, India",22.64815210,88.37681700,edu,
+d2fac640086ba89271ad7c1ebf36239ecd64605e,Illumination suppression for illumination invariant face recognition,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+d2f2b10a8f29165d815e652f8d44955a12d057e6,Multiscale binarised statistical image features for symmetric face matching using multiple descriptor fusion based on class-specific LDA,Urmia University,Urmia University,"دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎",37.52914535,45.04886077,edu,
+d20ea5a4fa771bc4121b5654a7483ced98b39148,Set-to-Set Face Recognition Under Variations in Pose and Illumination,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+aa7c72f874951ff7ca3769439f2f39b7cfd4b202,It's All About the Data,"University of Illinois, Urbana Champaign","Computer Science, U.Illinois at Urbana Champaign, Urbana, United States","Champaign, IL, USA",40.10195230,-88.22716150,edu,
+aa892fe17c06e2b18db2b12314499a741e755df7,Improved performance of face recognition using CNN with constrained triplet loss layer,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+aab9a617be6e5507beb457b1e6c2e5b046f9cff0,Face recognition using a pictorial-edit distance,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,Deformable part models with CNN features for facial landmark detection under occlusion,University of the Witwatersrand,University of the Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.18888130,28.02479073,edu,
+aa581b481d400982a7e2a88830a33ec42ad0414f,Learning a joint discriminative-generative model for action recognition,"Vision Semantics Ltd, UK","Vision Semantics Ltd, UK",United Kingdom,55.37805100,-3.43597300,edu,
+aa5a7a9900548a1f1381389fc8695ced0c34261a,Multiple facial action unit recognition enhanced by facial expressions,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912,ORCHARD: Visual object recognition accelerator based on approximate in-memory processing,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+af12a79892bd030c19dfea392f7a7ccb0e7ebb72,A study on human age estimation under facial expression changes,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+af7553d833886663550ce83b087a592a04b36419,Dual Subspace Nonnegative Graph Embedding for Identity-Independent Expression Recognition,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9,Multi-Task Learning with Low Rank Attribute Embedding for Multi-Camera Person Re-Identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+af3b803188344971aa89fee861a6a598f30c6f10,Facial expression recognition with FHOG features,"Eskişehir Osmangazi Üniversitesi, Eskişehir, Turkey","Eskişehir Osmangazi Üniversitesi, Bilgisayar Mühendisliği Bölümü, Eskişehir, Türkiye","Büyükdere Mahallesi, Osmangazi Ünv. No:38, 26040 Odunpazarı/Eskişehir, Turkey",39.74887790,30.47581540,edu,
+b749ca71c60904d7dad6fc8fa142bf81f6e56a62,Band-Reweighed Gabor Kernel Embedding for Face Image Representation and Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+b712f08f819b925ff7587b6c09a8855bc295d795,Independent Component Analysis Using Semi-Parametric Density Estimation Via Entropy Maximization,"Amazon Research, Cambridge","Amazon.com Cambridge, MA, USA","101 Main St, Cambridge, MA 02142, USA",42.36229600,-71.08100960,company,
+b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0,Automatic facial expression recognition based on a deep convolutional-neural-network structure,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+b7845e0b0ce17cde7db37d5524ef2a61dee3e540,Fusion of classifier predictions for audio-visual emotion recognition,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+b7b8e7813fbc12849f2daba5cab604abd8cbaab6,Face recognition using affine dense SURF-like descriptors,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41,Understanding how deep neural networks learn face expressions,University of Pernambuco,"University of Pernambuco, Recife-PE, Brazil","Av. Gov. Agamenon Magalhães - Santo Amaro, Recife - PE, 50100-010, Brazil",-8.04406030,-34.88611670,edu,
+b759936982d6fb25c55c98955f6955582bdaeb27,Efficient object feature selection for action recognition,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7,Two-stage cascade model for unconstrained face detection,University of Zagreb,"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia","Unska ul. 3, 10000, Zagreb, Croatia",45.80112100,15.97084090,edu,
+b7043048b4ba748c9c6317b6d8206192c34f57ff,Shaping datasets: Optimal data selection for specific target distributions across dimensions,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5,Soft video parsing by label distribution learning,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+db0379c9b02e514f10f778cccff0d6a6acf40519,Differentiating spontaneous from posed facial expressions within a generic facial expression recognition framework,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2,Landmark Free Face Attribute Prediction,"SAP Innovation Center Network, Singapore","SAP Innovation Center Network, Singapore","30 Pasir Panjang Rd, Singapore 117440",1.27486000,103.79778700,company,"30 Pasir Panjang Road, Singapore"
+a8faeef97e2a00eddfb17a44d4892c179a7cc277,Scalable face labeling in online social networks,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb,Illumination Invariant Face Recognition: A Survey,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,Sparse matrix transform-based linear discriminant analysis for hyperspectral image classification,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+def934edb7c7355757802a95218c6e4ed6122a72,Computer Vision,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+ded8252fc6df715753e75ba7b7fee518361266ef,Feature extraction for facial expression recognition by canonical correlation analysis,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+de79437f74e8e3b266afc664decf4e6e4bdf34d7,To face or not to face: Towards reducing false positive of face detection,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+dec76940896a41a8a7b6e9684df326b23737cd5d,Seeing through the expression: Bridging the gap between expression and emotion recognition,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+de92951ea021ec56492d76381a8ae560a972dd68,Discriminative filter based regression learning for facial expression recognition,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef,Cross-spectral cross-resolution video database for face recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+dec5b11b01f35f72adb41d2be26b9b95870c5c00,Facial expression recognition by combination of classifiers,University of Technology of Compiègne,"Laboratory Heudiasyc, University of Technology of Compiègne, BP 20529. F-60205, France","57 Avenue de Landshut, 60200 Compiègne, France",49.40075300,2.79528080,edu,
+de45bf9e5593a5549a60ca01f2988266d04d77da,Continuous valence prediction using recurrent neural networks with facial expressions and EEG signals,"Başkent University, Ankara, Turkey","Bilgisayar Mühendisliği, Başkent Üniversitesi, Ankara, Türkiye","Fatih Sultan Mahallesi, Eskişehir Yolu 20. Km. Bağlıca Kampüsü, 06790 Etimesgut/Ankara, Turkey",39.88792060,32.65519900,edu,
+b05943b05ef45e8ea8278e8f0870f23db5c83b23,User authentication on mobile devices with dynamical selection of biometric techniques for optimal performance,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+a6b5ca99432c23392cec682aebb8295c0283728b,Content-Aware Proactive Caching for Backhaul Offloading in Cellular Network,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+a60db9ca8bc144a37fe233b08232d9c91641cbb5,Spatial alignment network for facial landmark localization,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+a6ce1a1de164f41cb8999c728bceedf65d66bb23,Dynamic facial expression recognition using local patch and LBP-TOP,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,Kernel Sparse Representation-Based Classifier,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+b934f730a81c071dbfc08eb4c360d6fca2daa08f,Characteristic number regression for facial feature extraction,Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.20619390,121.41047101,edu,
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea,Illumination Normalization Based on Simplified Local Binary Patterns for A Face Verification System,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,What Shall I Look Like after N Years?,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,Event Specific Multimodal Pattern Mining for Knowledge Base Construction,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+b910590a0eb191d03e1aedb3d55c905129e92e6b,Robust gender classification on unconstrained face images,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+b91f54e1581fbbf60392364323d00a0cd43e493c,A high-resolution spontaneous 3D dynamic facial expression database,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+a180dc9766490416246e7fbafadca14a3c500a46,Improvements on the linear discrimination technique with application to face recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,"On looking at faces in an automobile: Issues, algorithms and evaluation on naturalistic driving dataset","University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a1cda8e30ce35445e4f51b47ab65b775f75c9f18,Normalized face image generation with perceptron generative adversarial networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a192845a7695bdb372cccf008e6590a14ed82761,A Novel Local Pattern Descriptor&#x2014;Local Vector Pattern in High-Order Derivative Space for Face Recognition,National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.19139696,edu,
+a119844792fd9157dec87e3937685c8319cac62f,"Multifarious distances, cameras and illuminations face database",Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a168ca2e199121258fbb2b6c821207456e5bf994,"Continuous AU intensity estimation using localized, sparse facial feature space",University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+a1081cb856faae25df14e25045cd682db8028141,Audio-Visual Person Recognition in Multimedia Data From the Iarpa Janus Program,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+ef2bb8bd93fa8b44414565b32735334fa6823b56,An accurate and efficient face recognition method based on hash coding,Guilin University of Electronic Technology Guangxi Guilin,Guilin University of Electronic Technology Guangxi Guilin,"桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.28739920,110.33242770,edu,
+efc78a7d95b14abacdfde5c78007eabf9a21689c,Subjectively Interesting Component Analysis: Data Projections that Contrast with Prior Expectations,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+ef7b8f73e95faa7a747e0b04363fced0a38d33b0,Fast and reliable human action recognition in video sequences by sequential analysis,University of Loughborogh,University of Loughborogh,"Epinal Way, Loughborough LE11 3TU, UK",52.76508140,-1.23205340,edu,
+ef3a0b454370991a9c18ac7bfd228cf15ad53da0,Two-dimensional Sparse Principal Component Analysis: A new technique for feature extraction,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+c3c463a9ee464bb610423b7203300a83a166b500,Transform-invariant dictionary learning for face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c362116a358320e71fb6bc8baa559142677622d2,Improve robustness of sparse PCA by L1-norm maximization,Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.24749490,108.97898751,edu,
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f,Towards retro-projected robot faces: An alternative to mechatronic and android faces,University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37525010,-4.13927692,edu,
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,Age and gender estimation using deep residual learning network,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e,Learning to classify gender from four million images,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+c4ca092972abb74ee1c20b7cae6e69c654479e2c,Linear canonical correlation analysis based ranking approach for facial age estimation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c444c4dab97dd6d6696f56c1cacda051dde60448,Multiview Face Detection and Registration Requiring Minimal Manual Intervention,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+c48b68dc780c71ab0f0f530cd160aa564ed08ade,Facial expression recognition based on local binary patterns and coarse-to-fine classification,"Northwestern Polytechnic University, Xi'an, China","Coll. of Electron. & Inf., Northwestern Polytech. Univ., Xi'an, China","633 Clark St, Evanston, IL 60208, USA",42.05645940,-87.67526700,edu,
+c49075ead6eb07ede5ada4fe372899bd0cfb83ac,Multi-stage classification network for automatic age estimation from facial images,RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.80874650,144.96388750,edu,
+c4541802086461420afb1ecb5bb8ccd5962a9f02,Image Ratio Features for Facial Expression Recognition Application,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+c4d439fe07a65b735d0c8604bd5fdaea13f6b072,Parallel AP Clustering and Re-ranking for Automatic Image-Text Alignment and Large-Scale Web Image Search,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+c4d0d09115a0df856cdb389fbccb20f62b07b14e,Environment coupled metrics learning for unconstrained face verification,Chinese Academy of Science,"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China","Beijing, China",39.90419990,116.40739630,edu,
+eac97959f2fcd882e8236c5dd6035870878eb36b,Adaptive ranking of facial attractiveness,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+ea026456729f0ec54c697198e1fd089310de4ae2,Face identity verification based on sinusoidal projection,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+ea86b75427f845f04e96bdaadfc0d67b3f460005,Label consistent recursive least squares dictionary learning for image classification,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+e12b2c468850acb456b0097d5535fc6a0d34efe3,Illumination robust single sample face recognition using multi-directional orthogonal gradient phase faces,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+e1c50cf0c08d70ff90cf515894b2b360b2bc788b,Facial behavior as behavior biometric? an empirical study,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+e101bab97bce2733222db9cfbb92a82779966508,A Micro-GA Embedded PSO Feature Selection Approach to Intelligent Facial Emotion Recognition,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+e1179a5746b4bf12e1c8a033192326bf7f670a4d,Facial makeup detection technique based on texture and shape analysis,Télécom ParisTech,"Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France","Business Pôle. 1047 route des Dolines. Allée Pierre Ziller, 06560 Sophia Antipolis, France",43.62716550,7.04109170,edu,
+e14b046a564604508ea8e3369e7e9f612e148511,Facial Expression Recognition on Hexagonal Structure Using LBP-Based Histogram Variances,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+e1449be4951ba7519945cd1ad50656c3516113da,Local Gradient Hexa Pattern: A Descriptor for Face Recognition and Retrieval,"IIIT Allahabad, India","Indian Institute of Information Technology at Allahabad, Allahabad, India","Indian Institute of Information Technology, Jhalwa, Prayagraj, Uttar Pradesh, India",25.42991140,81.77118270,edu,
+cdf0dc4e06d56259f6c621741b1ada5c88963c6d,Makeup-insensitive face recognition by facial depth reconstruction and Gabor filter bank from women's real-world images,Semnan University,Semnan University,"دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.60374440,53.43445877,edu,
+cd85f71907f1c27349947690b48bfb84e44a3db0,Visual Pattern Discovery and Recognition,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+cdfa7dccbc9e9d466f8a5847004973a33c7fcc89,Multiple Subcategories Parts-Based Representation for One Sample Face Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,Improving CNN Performance Accuracies With Min–Max Objective,Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.24749490,108.97898751,edu,
+ccb95192001b07bb25fc924587f9682b0df3de8e,Head pose estimation for recognizing face images using collaborative representation based classification,"IIEST Shibpur, India","Computer Science and Technology, IIEST, Shibpur","P.O. - Botanic Garden, Howrah, West Bengal 711103, India",22.55518080,88.30713790,edu,
+cc70fb1ab585378c79a2ab94776723e597afe379,Detect face in the wild using CNN cascade with feature aggregation at multi-resolution,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+cc5edaa1b0e91bc3577547fc30ea094aa2722bf0,"The design, implementation and evaluation of a relaxation service with facial emotion detection",Shibaura Institute of Technology,Shibaura Institute of Technology,"芝浦工業大学 豊洲キャンパス, 晴海通り, 豊洲2, 豊洲, 富岡一丁目, 江東区, 東京都, 関東地方, 135-6001, 日本",35.66053325,139.79503121,edu,
+cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,Analysis of the effect of image resolution on automatic face gender and age classification,"TOBB Economy and Technology University, Ankara, Turkey","Bilgisayar Mühendisliği Bölümü, TOBB Ekonomi ve Teknoloji Üniversitesi, Ankara, Türkiye","Söğütözü Mahallesi, Söğütözü Cd. No:43, 06510 Çankaya/Ankara, Turkey",39.92130970,32.79882330,edu,
+ccebd3bf069f5c73ea2ccc5791976f894bc6023d,Face detection based on deep convolutional neural networks exploiting incremental facial part learning,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,Robust face recognition via transfer learning for robot partner,"University of Malaya, Kuala Lumpur","Faculty of Computer Science & Information Technology University of Malaya Kuala Lumpur, Malaysia","University of Malaya, 50603 Kuala Lumpur, Wilayah Persekutuan Kuala Lumpur, Malaysia",3.12821340,101.65069480,edu,
+ccfebdf7917cb50b5fcd56fb837f841a2246a149,A feature subtraction method for image based kinship verification under uncontrolled environments,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+cc2a9f4be1e465cb4ba702539f0f088ac3383834,Automated recognition of complex categorical emotions from facial expressions and head motions,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+e6d46d923f201da644ae8d8bd04721dd9ac0e73d,Robust transgender face recognition: Approach based on appearance and therapy factors,"Norwegian Biometrics Laboratory, NTNU, Norway","Norwegian Biometrics Laboratory, Norwegian University of Science and Technology (NTNU), 2802 Gj⊘vik, Norway","Teknologivegen 22, 2815 Gjøvik, Norway",60.78973180,10.68219270,edu,
+e6d6203fa911429d76f026e2ec2de260ec520432,Siamese network features for image matching,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+e6c834c816b5366875cf3060ccc20e16f19a9fc6,Subspace learning via low rank projections for dimensionality reduction,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+e66a6ae542907d6a0ebc45da60a62d3eecf17839,3D-aided face recognition from videos,University of Lyon,"Universit&#x00E9; de Lyon, CNRS, INSA-Lyon, LIRIS, UMR5205, F-69621, France","20 Avenue Albert Einstein, 69100 Villeurbanne, France",45.78332440,4.87819840,edu,
+e66b4aa85524f493dafde8c75176ac0afad5b79c,Watchlist risk assessment using multiparametric cost and relative entropy,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+e6d6d1b0a8b414160f67142fc18e1321fe3f1c49,Semantic facial description via axiomatic Fuzzy Set based clustering,Shenyang Aerospace University,"College of Automation, Shenyang Aerospace University, China","Hanqing S Rd, Heping Qu, Shenyang Shi, Liaoning Sheng, China, 110016",41.76538300,123.41744800,edu,
+e69a765d033ef6ea55c57ca41c146b27964c5cf2,A 0.53mW ultra-low-power 3D face frontalization processor for face recognition with human-level accuracy in wearable devices,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+f9fb7979af4233c2dd14813da94ec7c38ce9232a,Detecting Gaze Towards Eyes in Natural Social Interactions and Its Use in Child Assessment,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f95321f4348cfacc52084aae2a19127d74426047,A novel facial feature extraction method based on Empirical Mode Decomposition,South China Normal University,South China Normal University,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.14319700,113.34009651,edu,
+f03a82fd4a039c1b94a0e8719284a777f776fb22,Video content analysis using convolutional neural networks,Jordan University of Science and Technology,Jordan University of Science and Technology,"Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.49566485,35.99160717,edu,
+f0dac9a55443aa39fd9832bdff202a579b835e88,Social Interaction Assistant: A Person-Centered Approach to Enrich Social Interactions for Individuals With Visual Impairments,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+f0a9d69028edd1a39147848ad1116ca308d7491e,Case-Based Facial Action Units Recognition Using Interactive Genetic Algorithm,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+f09d5b6433f63d7403df5650893b78cdcf7319b3,Pixel selection in a face image based on discriminant features for face recognition,Kookmin University,Kookmin University,"국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국",37.61075540,126.99466350,edu,
+f095b5770f0ff13ba9670e3d480743c5e9ad1036,Fast Algorithms for Fitting Active Appearance Models to Unconstrained Images,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429,Compressing and Accelerating Neural Network for Facial Point Localization,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+f7911b9ff58d07d19c68f4a30f40621f63c0f385,Discriminative-Element-Aware Sparse Representation for Action Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+f7be8956639e66e534ed6195d929aed4e0b90cad,Active Learning of Introductory Machine Learning,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7,Facial expression recognition based on Local Sign Directional Pattern,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211,InvisibleEye: Mobile Eye Tracking Using Multiple Low-Resolution Cameras and Learning-Based Gaze Estimation,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+e8f4ded98f5955aad114f55e7aca6b540599236b,Convolutional Fusion Network for Face Verification in the Wild,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+e896389891ba84af58a8c279cf8ab5de3e9320ee,Audio-visual speaker localization via weighted clustering,INRIA Grenoble,"INRIA Grenoble Rhone-Alpes, FRANCE","INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France",45.21829860,5.80703193,edu,
+e865908ed5e5d7469b412b081ca8abd738c72121,A Non-Greedy Algorithm for L1-Norm LDA,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+faf19885431cb39360158982c3a1127f6090a1f6,Inheritable Fisher vector feature for kinship verification,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+fa72e39971855dff6beb8174b5fa654e0ab7d324,"A depth video-based facial expression recognition system using radon transform, generalized discriminant analysis, and hidden Markov model",King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+fa80344137c4d158bf59be4ac5591d074483157a,Face recognition based on constructive neural networks covering learning algorithm,"Shanghai Jiao Tong University, China","Inst. of Autom., Shanghai Jiao Tong Univ., China","1954 Huashan Rd, JiaoTong DaXue, Xuhui Qu, Shanghai Shi, China, 200030",31.20100100,121.43284100,edu,
+fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,Customized Orthogonal Locality Preserving Projections With Soft-Margin Maximization for Face Recognition,Federal University of Rio Grande do Sul,"Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil","Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil",-30.03382480,-51.21882800,edu,
+fadbb3a447d697d52771e237173b80782caaa936,Multi-label multi-instance learning with missing object tags,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7,Sparse representation based facial expression classification for pain assessment in neonates,"Nanjing Children's Hospital, Nanjing Medical University","Nanjing Children's Hospital Affiliated to Nanjing Medical University, Nanjing, China","140 Hanzhong Rd, Gulou Qu, Nanjing Shi, Jiangsu Sheng, China, 210029",32.04384600,118.77476300,edu,
+fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4,Neutral Face Classification Using Personalized Appearance Models for Fast and Robust Emotion Detection,"Samsung R&D Institute, Bangalore, India","Samsung R&D Institute, Bangalore, India","#2870, Phoenix Building, 4th Floor Bagmane Constellation Business Park, Outer Ring Rd, Doddanekundi, Marathahalli, Bengaluru, Karnataka 560037, India",12.98035370,77.69751010,company,
+ffea4184a0b24807b5f4ed87f9a985c2a27027d9,Cross-media retrieval by intra-media and inter-media correlation mining,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17,Gender Recognition or Gender Reductionism?: The Social Implications of Embedded Gender Recognition Systems,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,Visual Descriptors in Methods for Video Hyperlinking,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+ffc81ced9ee8223ab0adb18817321cbee99606e6,A multibiometrics-based CAPTCHA for improved online security,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+fffe5ab3351deab81f7562d06764551422dbd9c4,Fully automated facial picture evaluation using high level attributes,"GIPSA-Lab, Grenoble, France","GIPSA-Lab, Grenoble, France","GIPSA-lab, 11, Rue des Mathématiques, Médiat Rhône-Alpes, Saint-Martin-d'Hères, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38400, France",45.19292450,5.76619830,edu,
+ff012c56b9b1de969328dacd13e26b7138ff298b,Facial Age Estimation With Age Difference,Microsoft,"Microsoft Corporation, Redmond, WA, USA","One Microsoft Way, Redmond, WA 98052, USA",47.64233180,-122.13693020,company,
+c570d1247e337f91e555c3be0e8c8a5aba539d9f,Robust semi-automatic head pose labeling for real-world face video sequences,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,Age and gender classification using local appearance descriptors from facial components,University of Sassari,"University of Sassari, Computer Vision Laboratory, PolComing Viale Mancini, 5 07100 Sassari, Italy","Viale Pasquale Stanislao Mancini, 5, 07100 Sassari SS, Italy",40.72401760,8.55789470,edu,
+c5022fbeb65b70f6fe11694575b8ad1b53412a0d,Lighting normalisation for face recognition,"Newcastle University, Australia","Sch. of Electr. Eng. & Comput. Sci., Newcastle Univ., NSW, Australia","University Dr, Callaghan NSW 2308, Australia",-32.89277180,151.70417750,edu,
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e,Multi-task Deep Neural Network for Joint Face Recognition and Facial Attribute Prediction,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+c553f0334fcadf43607925733685adef81fbe406,Adaptive feature learning CNN for behavior recognition in crowd scene,Universiti Teknologi Petronas,Universiti Teknologi PETRONAS,"UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia",4.38304640,100.97001540,edu,
+c59a9151cef054984607b7253ef189c12122a625,Model-free non-rigid head pose tracking by joint shape and pose estimation,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+c59b62864a6d86eead075c88137a87070a984550,Facial expression recognition by correlated Topic Models and Bayes modeling,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+c270aff2b066ee354b4fe7e958a40a37f7bfca45,Expression recognition in the wild with transfer learning,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+c291f0e29871c8b9509d1a2876c3e305839ad4ac,A single layer feedforward fusion network for face verification,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+c244c3c797574048d6931b6714ebac64d820dbb3,Exploiting the locality information of dense trajectory feature for human action recognition,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+c222f8079c246ead285894c47bdbb2dfc7741044,Face de-identification with expressions preservation,"Bordeaux INP, France","Bordeaux INP, LaBRI, PICTURA, UMR 5800, F-33400 Talence, France","Avenue des Facultés, 33400 Talence, France",44.80557160,-0.60519720,edu,
+c2474202d56bb80663e7bece5924245978425fc1,Localize heavily occluded human faces via deep segmentation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c2422c975d9f9b62fbb19738e5ce5e818a6e1752,"<inline-formula> <tex-math notation=""LaTeX"">$L_{1}$ </tex-math></inline-formula>-Minimization Algorithms for Sparse Signal Reconstruction Based on a Projection Neural Network",City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+c2dc29e0db76122dfed075c3b9ee48503b027809,How scenes imply actions in realistic videos?,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff,Class-Specific Subspace-Based Two-Dimensional Principal Component Analysis for Face Recognition,Chulalongkorn University ,Chulalongkorn University Bangkok,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.53287901,edu,
+f6fc112ff7e4746b040c13f28700a9c47992045e,Bi-Level Semantic Representation Analysis for Multimedia Event Detection,Xiamen University of Technology,"Xiamen University of Technology, Xiamen 361024, China","600 Ligong Rd, Jimei Qu, Xiamen Shi, Fujian Sheng, China, 361024",24.62053900,118.08463100,edu,
+f6532bf13a4649b7599eb40f826aa5281e392c61,Facial Action Recognition Combining Heterogeneous Features via Multikernel Learning,University of French West Indies and Guiana,"LAMIA, University of French West Indies and Guiana, EA 4540, Pointe-à-Pitre, France","Fouillole, Pointe-à-Pitre 97157, Guadeloupe",16.22427240,-61.52893250,edu,
+f6311d6b3f4d3bd192d866d2e898c30eea37d7d5,Facial expression recognition based on discriminative dictionary learning,China University of Petroleum,"College of Information and Control Engineering, China University of Petroleum, Qingdao, 266580, China","China, Shandong, Weifang, Kuiwen, Dongfeng E St, 49号潍坊学院经济管理学院附近",36.71684600,119.18339500,edu,
+f6f2a212505a118933ef84110e487551b6591553,Exemplar-embed complex matrix factorization for facial expression recognition,"University of Science, Vietnam","Fac. of Mathematics and Computer Sciences, University of Science, Ho Chi Minh City, Viet Nam","227 Đường Nguyễn Văn Cừ, Phường 4, Quận 5, Hồ Chí Minh, Vietnam",10.76241650,106.68120130,edu,
+f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,Understanding Blooming Human Groups in Social Networks,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+f652cb159a2cf2745aabcbf6a7beed4415e79e34,An efficient image normalization method for face recognition under varying illuminations,Chungnam National University,Chungnam National University,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국",36.37029045,127.34780458,edu,
+e957d0673af7454dbf0a14813201b0e2570577e9,COATL - a learning architecture for online real-time detection and classification assistance for environmental data,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8,Bayesian Constrained Local Models Revisited,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+e9cebf627c204c6949dcc077d04c57eb66b2c038,A method for extraction of affective audio-visual facial clips from movies,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+e96cef8732f3021080c362126518455562606f2d,Binary Coding by Matrix Classifier for Efficient Subspace Retrieval,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+e9b731f00d16a10a31ceea446b2baa38719a31f1,Facial expression recognition based on mixture of basic expressions and intensities,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+e9d1b3767c06c896f89690deea7a95401ae4582b,Hierarchical class-specific kernel discriminant analysis for face verification,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+e96ce25d11296fce4e2ecc2da03bd207dc118724,Classification of face images using local iterated function systems,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+f1e44e64957397d167d13f8f551cae99e5c16c75,Face detection and facial expression recognition using simultaneous clustering and feature selection via an expectation propagation statistical learning framework,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+f16599e4ec666c6390c90ff9a253162178a70ef5,Linguistic Patterns and Cross Modality-based Image Retrieval for Complex Queries,La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.77847540,144.29804700,edu,
+f1da4d705571312b244ebfd2b450692fd875cd1f,Max-Margin Multiattribute Learning With Low-Rank Constraint,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,A Secure Face-Verification Scheme Based on Homomorphic Encryption and Deep Neural Networks,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+f1061b2b5b7ca32edd5aa486aecc63a0972c84f3,Duplex Metric Learning for Image Set Classification,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+f180cb7111e9a6ba7cfe0b251c0c35daaef4f517,Modeling Neuron Selectivity Over Simple Midlevel Features for Image Classification,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+f1af714b92372c8e606485a3982eab2f16772ad8,The MUG facial expression database,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+e7436b8e68bb7139b823a7572af3decd96241e78,A new approach for face detection with omnidirectional sensors,"University of Rouen, France","LITIS, Universite de Rouen - INSA de Rouen, Rouen, FR","685 Avenue de l'Université, 76800 Saint-Étienne-du-Rouvray, France",49.38497570,1.06832570,edu,
+e75a589ca27dc4f05c2715b9d54206dee37af266,Multiscale Deep Alternative Neural Network for Large-Scale Video Classification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+e73f2839fc232c03e9f027c78bc419ee15810fe8,Flexible 3D neighborhood cascade deformable part models for object detection,"University of Science, Vietnam","Fac. of Mathematics and Computer Sciences, University of Science, Ho Chi Minh City, Viet Nam","227 Đường Nguyễn Văn Cừ, Phường 4, Quận 5, Hồ Chí Minh, Vietnam",10.76241650,106.68120130,edu,
+e790a2538579c8e2ef9b314962ab26197d6664c6,A jointly local structured sparse deep learning network for face recognition,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+e7e8c0bbee09b5af6f7df1de8f0f26da992737c4,Autoassociative Pyramidal Neural Network for face verification,University of Pernambuco,"University of Pernambuco, Recife-PE, Brazil","Av. Gov. Agamenon Magalhães - Santo Amaro, Recife - PE, 50100-010, Brazil",-8.04406030,-34.88611670,edu,
+cb992fe67f0d4025e876161bfd2dda467eaec741,Random forest-based feature selection for emotion recognition,"University of Orléans, France","Univ. Orléans, INSA CVL, PRISME EA 4229, Bourges, France","Château de la Source, 45100 Orléans, France",47.84457440,1.93369650,edu,
+cbc2de9b919bc63590b6ee2dfd9dda134af45286,Direct face detection and video reconstruction from event cameras,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+cbf3e848c5d2130dd640d9bd546403b8d78ce0f9,Local linear discriminant analysis with composite kernel for face recognition,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+cbfcd1ec8aa30e31faf205c73d350d447704afee,Angle 2DPCA: A New Formulation for 2DPCA,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055,"A Survey on Deep Learning: Algorithms, Techniques, and Applications",Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+cb7a743b9811d20682c13c4ee7b791ff01c62155,VRank: Voting system on Ranking model for human age estimation,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+cb27b45329d61f5f95ed213798d4b2a615e76be2,Deep Facial Age Estimation Using Conditional Multitask Learning With Weak Label Expansion,"Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+cbaa17be8c22e219a9c656559e028867dfb2c2ed,Which face is more attractive?,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+cb160c5c2a0b34aba7b0f39f5dda6aca8135f880,Facial expression recognition based on locational features,"Eskişehir Osmangazi University, Turkey","Elektrik ve Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Türkiye","Eskişehir Osmangazi Üniversitesi, Mühendislik Fakültesi, 26480 Odunpazarı/Eskişehir, Turkey",39.74871200,30.47595620,edu,
+f85ccab7173e543f2bfd4c7a81fb14e147695740,A method to infer emotions from facial Action Units,"Samsung R&D Institute, Bangalore, India","Samsung R&D Institute, Bangalore, India","#2870, Phoenix Building, 4th Floor Bagmane Constellation Business Park, Outer Ring Rd, Doddanekundi, Marathahalli, Bengaluru, Karnataka 560037, India",12.98035370,77.69751010,company,
+f8162276f3b21a3873dde7a507fd68b4ab858bcc,Generalized Nonlinear Discriminant Analysis,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+f88ce52c5042f9f200405f58dbe94b4e82cf0d34,A Locality-Constrained and Label Embedding Dictionary Learning Algorithm for Image Classification,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+f86c6942a7e187c41dd0714531efd2be828e18ad,Low-rank and structured sparse subspace clustering,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+cefaad8241bceb24827a71bf7c2556e458e57faa,Local Structure-Based Image Decomposition for Feature Extraction With Applications to Face Recognition,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+cef73d305e5368ee269baff53ec20ea3ae7cdd82,Correlation-Based Face Detection for Recognizing Faces in Videos,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,Large-Scale Video Classification with Elastic Streaming Sequential Data Processing System,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+ce8db0fe11e7c96d08de561506f9f8f399dabbb2,Weighted sparse representation using a learned distance metric for face recognition,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0,Illumination compensation and normalization for robust face recognition using discrete cosine transform in logarithm domain,"Nanyang Technological University, Singapore","Comput. Control Lab, Nanyang Technol. Univ., Singapore","50 Nanyang Avenue, Block N4 #02a-32, Singapore 639798",1.34619520,103.68154990,edu,
+ce30ddb5ceaddc0e7d308880a45c135287573d0e,Exploiting implicit affective labeling for image recommendations,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+ce2945e369603fcec1fcdc6e19aac5996325cba9,Emotion recognition using PHOG and LPQ features,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+e084b0e477ee07d78c32c3696ea22c94f5fdfbec,Semi-supervised visual recognition with constrained graph regularized non negative matrix factorization,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+e0ab926cd48a47a8c7b16e27583421141f71f6df,Human activity recognition using an ensemble of support vector machines,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+e03f69bad7e6537794a50a99da807c9df4ff5186,Unsupervised method of Domain Adaptation on representation of discriminatory regions of the face image for surveillance face datasets,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+46b2ecef197b465abc43e0e017543b1af61921ac,Face alignment with Cascaded Bidirectional LSTM Neural Networks,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb,LogDet Divergence-Based Metric Learning With Triplet Constraints and Its Applications,University of Agder,University of Agder,"UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge",58.16308805,8.00144966,edu,
+2cf3564d7421b661e84251d280d159d4b3ebb336,Discriminating projections for estimating face age in wild images,University of North Carolina at Wilmington,University of North Carolina at Wilmington,"University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.22498270,-77.86907744,edu,
+2cd426f10178bd95fef3dede69ae7b67e73bb70c,Real-time face alignment enhancement by tracking,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+2ce84465b9759166effc7302c2f5339766cc523d,Sparsity-based joint gaze correction and face beautification for conferencing video,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+7923742e2af655dee4f9a99e39916d164bc30178,Soft biometric privacy: Retaining biometric utility of face images while perturbing gender,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7914c3f510e84a3d83d66717aad0d852d6a4d148,Relative attribute guided dictionary learning,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+7935f644c8044c0d3b81e2842e5ecc3672698bbb,Frequency guided bilateral symmetry Gabor Wavelet Network,"Samsung SAIT, Korea","Advanced Media Lab. Samsung Advance Institute of Technology, Republic of Korea","130 Samseong-ro, Maetan 3(sam)-dong, Yeongtong-gu, Suwon, Gyeonggi-do, South Korea",37.25202260,127.05550190,company,
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3,Non-negative matrix factorization as a feature selection tool for maximum margin classifiers,"Epson Research and Development Inc., San Jose, CA","Epson Research and Development Inc., San Jose, CA","214 Devcon Dr, San Jose, CA 95112, USA",37.37445600,-121.91274020,company,
+794a51097385648e3909a1acae7188f5ab881710,Accurate eye localization in low and standard definition content,"Philips Research Eindhoven, HTC 34, Netherlands","Philips Research Eindhoven, HTC 34, Netherlands","High Tech Campus 34, 5656 AE Eindhoven, Netherlands",51.41175220,5.45866520,company,
+79fd4baca5f840d6534a053b22e0029948b9075e,Neutral-independent geometric features for facial expression recognition,"Otto-von-Guericke-University, Magdeburg","Institute for Electronics, Signal Processing and Communications (IESK), Otto-von-Guericke-University Magdeburg, D-39106, P.O. Box 4210 Germany","Universitätspl. 2, 39106 Magdeburg, Germany",52.14020530,11.64419910,edu,
+2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84,A common framework for real-time emotion recognition and facial action unit detection,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+2d7c2c015053fff5300515a7addcd74b523f3f66,Age-Related Factor Guided Joint Task Modeling Convolutional Neural Network for Cross-Age Face Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2dbc57abf3ceda80827b85593ce1f457b76a870b,Facial expression classification using salient pattern driven integrated geometric and textual features,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2d79dece7890121469f515a6e773ba0251fc2d98,Integration of precise iris localization into active appearance models for automatic initialization and robust deformable face tracking,"Institute of Industrial Information Technology (IIIT), Karlsruhe Institute of Technology (KIT), 76187 Karlsruhe, Germany","Institute of Industrial Information Technology (IIIT), Karlsruhe Institute of Technology (KIT), 76187 Karlsruhe, Germany","76131 Karlsruhe, Germany",49.01191990,8.41703030,edu,
+2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f,Attribute prediction with long-range interactions via path coding,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+2d2fb01f761d21a459cfb34935bc47ab45a9913b,Intra-Class Variation Reduction Using Training Expression Images for Sparse Representation Based Facial Expression Recognition,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+417c2fa930bb7078fdf10cb85c503bd5270b9dc2,Low-resolution video face recognition with face normalization and feature adaptation,Fraunhofer,"Fraunhofer IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany","Fraunhoferstraße 1, 76131 Karlsruhe, Germany",49.01546000,8.42579990,company,
+41c42cb001f34c43d4d8dd8fb72a982854e173fb,Evolutionary Cross-Domain Discriminative Hessian Eigenmaps,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+83b54b8c97dc14e302dad191327407ec0d5fb4a6,Temporal action localization with two-stream segment-based RNN,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+8383faea09b4b4bef8117a1da897495ebd68691b,Good Practices for Learning to Recognize Actions Using FV and VLAD,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+838dad9d1d68d29be280d92e69410eaac40084bc,Effectiveness of various classification techniques on human face recognition,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+834736698f2cc5c221c22369abe95515243a9fc3,GARP-face: Balancing privacy protection and utility preservation in face de-identification,"Air Force Research Lab, Rome, NY","Air Force Research Lab, Rome, NY, 13441, USA","26 Electronics Parkway, Rome, NY 13441, USA",43.22135160,-75.40855770,mil,
+1b8541ec28564db66a08185510c8b300fa4dc793,Affine-Transformation Parameters Regression for Face Alignment,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+1b211f8221162ce7ef212956b637b50e30ad48f4,Saliency-context two-stream convnets for action recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,Building a game scenario to encourage children with autism to recognize and label emotions using a humanoid robot,University of Hertfordshire,"Embodied Emotion, Cognition and (Inter-)Action Lab, University of Hertfordshire, United Kingdom","De Havilland Campus, Mosquito Way, Hatfield AL10 9EU, UK",51.76175610,-0.24679970,edu,
+1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac,Fast eye localization without a face model using inner product detectors,"Universidade Estadual de Campinas, Brazil","Universidade Estadual de Campinas, Cx.P. 6176 Campinas-SP, CEP 13084-971, Brazil","Cidade Universitária Zeferino Vaz - Barão Geraldo, Campinas - SP, 13083-970, Brazil",-22.81843930,-47.06472060,edu,
+1b2d9a1c067f692dd48991beff03cd62b9faebf2,Local primitive code mining for fast and accurate face recognition,"Nokia Research Center, Beijing","Nokia Research Center, Beijing","China, Beijing Shi, Xicheng Qu, XiDan, Xidan N St, 118号西单商场2楼 邮政编码: 100031",39.91229390,116.37418790,company,
+771a6a80dd08212d83a4e976522e1ce108881401,An automated method for realistic face simulation and facial landmark annotation and its application to active appearance models,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+7783095a565094ae5b3dccf082d504ddd7255a5c,"""Wow! you are so beautiful today!""",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+77c5437107f8138d48cb7e10b2b286fa51473678,A pseudo ensemble convolutional neural networks,"Electronics and Telecommunications Research Institute, Daejeon, Korea","Electronics &amp; Telecommunications Research Institute (ETRI), Daejeon, Korea","Electronics and Telecommunications Research Institute, Sinseong-dong, Daejeon, South Korea",36.38376500,127.36694000,edu,"Electronics and Telecommunications Research Institute, Daejeon, Korea"
+77cea27494499dd162221d1476bf70a87391790a,Neighborhood repulsed correlation metric learning for kinship verification,Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052,edu,
+779d3f0cf74b7d33344eea210170c7c981a7e27b,Fast-PADMA: Rapidly Adapting Facial Affect Model From Similar Individuals,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+77816b9567d5fed1f6085f33e1ddbcc73af2010e,Artimetrics: Biometrics for Artificial Entities,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+7788fa76f1488b1597ee2bebc462f628e659f61e,A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+778c1e95b6ea4ccf89067b83364036ab08797256,Exploring Patterns of Gradient Orientations and Magnitudes for Face Recognition,"VESALIS SAS, France","Vesalis company, Clermont-Ferrand, France","8 - 10 Allée Evariste Galois, 63000 Clermont-Ferrand, France",45.75976430,3.13102130,company,
+771505abd38641454757de75fe751d41e87f89a4,Learning structured sparse representation for single sample face recognition,Nantong University,Nantong University,"南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国",31.97474630,120.90779264,edu,
+48dcf45a1e38adbb9826594f7ffaa5e95ef78395,Illumination invariant feature based on neighboring radiance ratio,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+48255c9e1d6e1d030728d33a71699757e337be08,Person-independent facial expression recognition via hierarchical classification,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+48906f609446afcdaacbe1d65770d7a6165a8eee,Storages Are Not Forever,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+484bac2a9ff3a43a6f85d109bbc579a4346397f5,Finding VIPs - A visual image persons search using a content property reasoner and web ontology,Lehigh University,Lehigh University,"Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.60680280,-75.37824880,edu,
+486f5e85944404a1b57333443070b0b8c588c262,The power of fear: Facial emotion analysis of CEOs to forecast firm performance,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+7049187c5155d9652747413ce1ebc8dbb209fd69,Facial depth map enhancement via neighbor embedding,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+70341f61dfe2b92d8607814b52dfd0863a94310e,Impact of resolution and image quality on video face analysis,Fraunhofer,"Fraunhofer IOSB, Fraunhoferstrasse 1, 76131 Karlsruhe, Germany","Fraunhoferstraße 1, 76131 Karlsruhe, Germany",49.01546000,8.42579990,company,
+70444627cb765a67a2efba17b0f4b81ce1fc20ff,Joint Sparse Representation and Embedding Propagation Learning: A Framework for Graph-Based Semisupervised Learning,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,Using SVM to design facial expression recognition for shape and texture features,"Shin-Guang Elementary School, Taiwan","Shin-Guang Elementary School, Yulin 646, Taiwan","Yulin, Guangxi, China",22.65403200,110.18122000,edu,
+1e3068886b138304ec5a7296702879cc8788143d,Active Rare Class Discovery and Classification Using Dirichlet Processes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1e62ca5845a6f0492574a5da049e9b43dbeadb1b,Cross-Modality Face Recognition via Heterogeneous Joint Bayesian,"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.28106540,120.02139087,edu,
+1eb9c859ff7537182a25556635954bcd11830822,Multi-features fusion based CRFs for face segmentation,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+1ed617d14dbc53b20287d3405b14c68d8dad3965,Benchmarking a Multimodal and Multiview and Interactive Dataset for Human Action Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1ed49161e58559be399ce7092569c19ddd39ca0b,Transferring from face recognition to face attribute prediction through adaptive selection of off-the-shelf CNN representations,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+84f3c4937cd006888b82f2eb78e884f2247f0c4e,Beyond touch: Natural interactions using facial expressions,"Samsung SAIT, Bangalore","SAIT India, Samsung India Software Operations Pvt. Ltd (SISO), Bangalore, India, 560093","Bengaluru, Karnataka 560093, India",12.98586720,77.67130310,company,
+84574aa43a98ad8a29470977e7b091f5a5ec2366,Latent max-margin metric learning for comparing video face tubes,"Technicolor, France","Technicolor, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.83153300,2.28066283,edu,
+84a74ef8680b66e6dccbc69ae80321a52780a68e,Facial Expression Recognition,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+844e3e6992c98e53b45e4eb88368d0d6e27fc1d6,Structure-constrained low-rank and partial sparse representation for image classification,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+84ec0983adb8821f0655f83b8ce47f36896ca9ee,Finding label noise examples in large scale datasets,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+4aea1213bdb5aa6c74b99fca1afc72d8a99503c6,Facial feature extraction using hybrid genetic-simplex optimization in multi-objective Active Appearance Model,"SUPELEC / IETR, France","SUPELEC / IETR, Avenue de la Boulaie, 35576 Cesson Sevigne, France","Campus de Rennes, Avenue de la Boulaie, 35510 Cesson-Sévigné, France",48.12523160,-1.62340120,edu,
+24b5ea4e262e22768813e7b6581f60e4ab9a8de7,"Facial Soft Biometrics for Recognition in the Wild: Recent Works, Annotation, and COTS Evaluation",Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+244293024aebbb0ff42a7cf2ba49b1164697a127,Multiscale representation for partial face recognition under near infrared illumination,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+24eeb748a5e431510381ec7c8253bcb70eff8526,Convex Multiview Semi-Supervised Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+24286ef164f0e12c3e9590ec7f636871ba253026,Age and gender classification using wide convolutional neural network and Gabor filter,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+2400c4994655c4dd59f919c4d6e9640f57f2009f,Super-resolution of facial images in forensics scenarios,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,An analysis of the robustness of deep face recognition networks to noisy training labels,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+24b637c98b22cd932f74acfeecdb50533abea9ae,Robust Face Recognition via Minimum Error Entropy-Based Atomic Representation,Hubei University,"Faculty of Mathematics and Statistics, Hubei University, Wuhan, China","Hongshan, Wuhan, China",30.48176100,114.31096000,edu,
+24e42e6889314099549583c7e19b1cb4cc995226,Research of face recognition under active infrared lighting based on embedded system,NanKai University,"College of Information Technical Science, NanKai University, CITS, TianJin, China","China, Tianjin, Nankai, Lequn N Rd, 南开大学综合实验楼A区604",39.10335500,117.16492700,edu,
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,Behavioural facial animation using motion graphs and mind maps,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+2360ecf058393141ead1ca6b587efa2461e120e4,Facial expression analysis and expression-invariant face recognition by manifold-based synthesis,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+23ee7b7a9ca5948e81555aaf3a044cfec778f148,Beyond simple features: A large-scale feature search approach to unconstrained face recognition,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2336de3a81dada63eb00ea82f7570c4069342fb5,A methodological framework for investigating age factors on the performance of biometric systems,University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.29753440,1.07296165,edu,
+239e305c24155add73f2a0ba5ccbd66b37f77e14,Fast computation of low-rank matrix approximations,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+23e824d1dfc33f3780dd18076284f07bd99f1c43,Spoofing faces using makeup: An investigative study,INRIA Méditerranée,"Inria M&#x00E9;diterran&#x00E9;e, France","2004 Route des Lucioles, 06902 Valbonne, France",43.61581310,7.06838000,edu,"Inria Méditerranée, France"
+2340d810c515dc0c9fd319f598fa8012dc0368a0,A collaborative face recognition framework on a social network platform,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+4f03ba35440436cfa06a2ed2a571fea01cb36598,The extended collaborative representation-based classification,"Artificial Intelligence Key Laboratory of Sichuan Province, China","Artificial Intelligence Key Laboratory, of Sichuan Province, Zigong, Sichuan, 643000, P. R. China","Ziliujing, Zigong, Sichuan, China, 643000",29.33909180,104.77858020,gov,
+4f1249369127cc2e2894f6b2f1052d399794919a,Deep Age Estimation: From Classification to Ranking,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+8dd3f05071fd70fb1c349460b526b0e69dcc65bf,Local Directional Ternary Pattern for Facial Expression Recognition,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+8d3e95c31c93548b8c71dbeee2e9f7180067a888,Template regularized sparse coding for face verification,GE Global Research,"General Electric Global Research, Niskayuna, NY, USA","1 Research Cir, Niskayuna, NY 12309, USA",42.82715560,-73.87804810,company,
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec,Rule based assessment of hearing-impaired children's facial expressions,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+15ef65fd68d61f3d47326e358c446b0f054f093a,Learning guided convolutional neural networks for cross-resolution face recognition,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,Weak Classifier for Density Estimation in Eye Localization and Tracking,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+159caaa56c2291bedbd41d12af5546a7725c58d4,A joint optimization scheme to combine different levels of features for face recognition with makeup changes,Beijing Advanced Innovation Center for Imaging Technology,"Beijing Advanced Innovation Center for Imaging Technology, Beijing 100048, China","Haidian, Beijing, China, 100048",39.92907420,116.31093150,edu,
+12c4ba96eaa37586f07be0d82b2e99964048dcb5,Local Adaptive Binary Patterns Using Diamond Sampling Structure for Texture Classification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a,Exploiting spatial-temporal context for trajectory based action video retrieval,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+126076774da192d4d3f4efcd1accc719ee5f9683,A hybrid facial expression recognition method based on neutral face shape estimation,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+12b533f7c6847616393591dcfe4793cfe9c4bb17,Semantic Face Signatures: Recognizing and Retrieving Faces by Verbal Descriptions,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8cffe360a05085d4bcba111a3a3cd113d96c0369,Learning universal multi-view age estimator using video context,"Facebook, Singapore","Facebook, Singapore","Ewe Boon back lane, between Palm Spring, City Towers and Wing On Life Garden, Farrer Park Gardens, Novena, Singapore, Central, 259803, Singapore",1.31704170,103.83210410,company,
+8c85ef961826575bc2c2f4da7784bc3bfcf8b188,Pareto-optimal discriminant analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8c50869b745fc094a4fb1b27861934c3c14d7199,A study of the effect of subject motion to pulse rate estimation,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+8c4042191431e9eb43f00b0f14c23765ab9c6688,Multi-model robust error correction for face recognition,"TCL Research America, San Jose, CA","TCL Research America, San Jose, CA 95134, USA","2870 Zanker Rd Suite 200, San Jose, CA 95134, USA",37.39521480,-121.92778100,company,
+8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58,Facial action unit intensity estimation using rotation invariant features and regression analysis,University of the Witwatersrand,University of the Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.18888130,28.02479073,edu,
+8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3,Component-Based Active Appearance Models for Face Modelling,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+8c2b663f8be1702ed3e377b5e6e85921fe7c6389,An accurate eye localization approach for smart embedded system,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+8cd0855ca967ce47b0225b58bbadd38d8b1b41a1,Detecting Anatomical Landmarks From Limited Medical Imaging Data Using Two-Stage Task-Oriented Deep Neural Networks,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+85785ae222c6a9e01830d73a120cdac75d0b838a,Multimedia Database Retrieval,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+85a136b48c2036b16f444f93b086e2bd8539a498,Orthogonal Principal Coefficients Embedding for Unsupervised Subspace Learning,OmniVision Technologies Singapore,"OmniVision Technologies Singapore Pte. Ltd., Singapore","4275 Burton Dr, Santa Clara, CA 95054, USA",37.39099960,-121.96450710,edu,
+85f27ec70474fe93f32864dd03c1d0f321979100,Integrating Local and Global Manifold structures for unsupervised dimensionality reduction,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+85f7f03b79d03da5fae3a7f79d9aac228a635166,Age categorization via ECOC with fused gabor and LBP features,"Institute for Infocomm Research, Singapore","Institute for Infocomm Research, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+854b1f0581f5d3340f15eb79452363cbf38c04c8,Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0,RF-Based Fall Monitoring Using Convolutional Neural Networks,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+85ae6fa48e07857e17ac4bd48fb804785483e268,Gender Estimation Based on Smile-Dynamics,INRIA Méditerranée,"Inria M&#x00E9;diterran&#x00E9;e, France","2004 Route des Lucioles, 06902 Valbonne, France",43.61581310,7.06838000,edu,"Inria Méditerranée, France"
+8562b4f63e49847692b8cb31ef0bdec416b9a87a,Marginal Representation Learning With Graph Structure Self-Adaptation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1d30f813798c55ae4fe454829be6e2948ee841da,Kernel Fukunaga-Koontz Transform Subspaces For Enhanced Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1d51b256af68c5546d230f3e6f41da029e0f5852,Class-Specific Kernel Discriminant Analysis Revisited: Further Analysis and Extensions,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+1dabb080e3e968633f4b3774f19192f8378f5b67,Exploring deep learning based solutions in fine grained activity recognition in the wild,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1da1299088a6bf28167c58bbd46ca247de41eb3c,Face identification from a single example image based on Face-Specific Subspace (FSS),Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f,Part-based representation and classification for face recognition,University of Campinas (UNICAMP),"Institute of Computing, University of Campinas (UNICAMP), Campinas, SP, 13083-852, Brazil","Universidade Estadual de Campinas - Av. Albert Einstein, 1251 - Cidade Universitária, Campinas - SP, 13083-852, Brazil",-22.81483740,-47.06477080,edu,
+710c3aaffef29730ffd909a63798e9185f488327,The GIST of aligning faces,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,Robust Shape-Feature-Vector-Based Face Recognition System,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+7123e510dea783035b02f6c35e35a1a09677c5ab,Back to the future: A fully automatic method for robust age progression,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,Effective 3D based frontalization for unconstrained face recognition,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+7196b3832065aec49859c61318037b0c8c12363a,Probabilistic modeling of scenes using object frames,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7,Graph Maximum Margin Criterion for Face Recognition,Anhui Polytechnic University,Anhui Polytechnic University,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.34185955,118.40739712,edu,
+76669f166ddd3fb830dbaacb3daa875cfedc24d9,Learning face recognition from limited training data using deep neural networks,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+76640cb1a683a479ce2e0d6681d821ff39126d63,Innovative embodiment of job interview in emotionally aware communication robot,"NEC Corporation, Nara, Japan","C &amp; C Innovation Research Labs, NEC Corporation, Nara, Japan","Nara, Nara Prefecture, Japan",34.68508690,135.80500020,edu,
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2,Artistic stylization of face photos based on a single exemplar,Memorial University of Newfoundland,Memorial University of Newfoundland,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.57272510,-52.73305444,edu,
+1c0acf9c2f2c43be47b34acbd4e7338de360e555,A Multi-Camera Deep Neural Network for Detecting Elevated Alertness in Drivers,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+8274069feeff6392b6c5d45d8bfaaacd36daedad,Face recognition using extended generalized Rayleigh quotient,Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.24749490,108.97898751,edu,
+82953e7b3d28ccd1534eedbb6de7984c59d38cd4,Incremental Generalized Discriminative Common Vectors for Image Classification,Universitat de València,Universitat de València,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.47787665,-0.34257711,edu,
+8229f2735a0db0ad41f4d7252129311f06959907,Active Learning for Solving the Incomplete Data Problem in Facial Age Classification by the Furthest Nearest-Neighbor Criterion,"Institution for Infocomm Research, Singapore","Institution for Infocomm Research, Connexis, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df,Comparison of two methods for unsupervised person identification in TV shows,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+82e3f4099503633c042a425e9217bfe47cfe9d4b,A modified vector of locally aggregated descriptors approach for fast video classification,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+82dad0941a7cada11d2e2f2359293fe5fabf913f,A pool of deep models for event recognition,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+4909ed22b1310f1c6f2005be5ce3349e3259ff6a,Face recognition using AAM and global shape features,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+492116d16a39eb54454c7ffb1754cea27ad3a171,Making Facial Expressions of Emotions Accessible for Visually Impaired Persons,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+49068538b7eef66b4254cc11914128097302fab8,Difficult detection: A comparison of two different approaches to eye detection for unconstrained environments,"Securics, Colorado Springs, CO","Securics, Inc. Colorado Springs, CO, USA","2805 Jet Wing Dr, Colorado Springs, CO 80916, USA",38.79202200,-104.75308990,company,
+496f3d14cf466f054d395a3c71fa2cd6a3dda61d,A fast identity-independent expression recognition system for robust cartoonification using smart devices,Indian Institute of Technology Varanasi,"Indian Institute of Technology (BHU) Varanasi, India","IIT-BHU, Banaras Hindu University Campus, Uttar Pradesh 221005, India",25.26232470,82.98937350,edu,
+49be50efc87c5df7a42905e58b092729ea04c2f5,Beyond Bag-of-Words: Fast video classification with Fisher Kernel Vector of Locally Aggregated Descriptors,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+496d62741e8baf3859c24bb22eaccd3043322126,Beyond Trace Ratio: Weighted Harmonic Mean of Trace Ratios for Multiclass Discriminant Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,User Profiling through Deep Multimodal Fusion,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+4014d74e8f5ea4d76c2c1add81d0c88d6e342478,Group emotion recognition in the wild by combining deep neural networks for facial expression classification and scene-context analysis,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,ADL: Active dictionary learning for sparse representation,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+4097fef623185557bb1842501cfdc97f812fc66d,CTC Network with Statistical Language Modeling for Action Sequence Recognition in Videos,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0,Enhanced independent spectral histogram representations in face recognition,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+40f06e5c052d34190832b8c963b462ade739cbf0,Face recognition based on the quotient image method and sparse representation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+405cf40f3ce74210f7e9862b2b828ce002b409ed,Comprehensive study of features for subject-independent emotion recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2e7e1ee7e3ee1445939480efd615e8828b9838f8,Automatically Detecting Pain in Video Through Facial Action Units,University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.89256620,-122.81471592,edu,
+2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4,Identify Visual Human Signature in community via wearable camera,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+2e535b8cd02c2f767670ba47a43ad449fa1faad7,Deep-Learning Systems for Domain Adaptation in Computer Vision: Learning Transferable Feature Representations,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+2ed7d95588200c8c738c7dd61b8338538e04ea30,Local and holistic texture analysis approach for face recognition,Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.84450465,100.85620818,edu,
+2ef1b1b5ed732634e005df779fd9b21da0ffe60c,Pair of projections based on sparse consistence with applications to efficient face recognition,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+2e12c5ea432004de566684b29a8e148126ef5b70,Video modeling and learning on Riemannian manifold for emotion recognition in the wild,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2b286ed9f36240e1d11b585d65133db84b52122c,Real-time 3D eyelids tracking from semantic edges,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+2b300985a507533db3ec9bd38ade16a32345968e,Laplacian multiset canonical correlations for multiview feature extraction and image recognition,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+2b5005c2abf2d9a8c16afa50306b6959dfc72275,LBP-based biometric hashing scheme for human authentication,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+2b43100a13811b33cc9f905fa1334bfd8b1873ba,A hybrid Genetic Programming approach to feature detection and image classification,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+2bf646a6efd15ab830344ae9d43e10cc89e29f34,Structured AutoEncoders for Subspace Clustering,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+2bcd9b2b78eb353ea57cf50387083900eae5384a,Image ranking and retrieval based on multi-attribute queries,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+47cd161546c59ab1e05f8841b82e985f72e5ddcb,Gender classification in live videos,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+4786638ffb3b2fb385cec80720cc6e7c3588b773,Effective semantic features for facial expressions recognition using SVM,Tamkang University,Tamkang University,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.17500615,121.45076751,edu,
+4735fa28fa2a2af98f7b266efd300a00e60dddf7,Dual subspace nonnegative matrix factorization for person-invariant facial expression recognition,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+7831ab4f8c622d91974579c1ff749dadc170c73c,Video-to-video face matching: Establishing a baseline for unconstrained face recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+78d4d861c766af2a8da8855bece5da4e6eed2e1c,A comparison of facial feature representation methods for automatic facial expression recognition,University of the Western Cape,University of the Western Cape,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa",-33.93277620,18.62915407,edu,
+78e1798c3077f4f8a4df04ca35cd73f82e9a38f3,A hierarchical algorithm with multi-feature fusion for facial expression recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e,Is face recognition really a Compressive Sensing problem?,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+782a05fbe30269ff8ab427109f5c4d0a577e5284,Deep Manifold Learning Combined With Convolutional Neural Networks for Action Recognition,"Baiyun District Bureau of Justice, Guangzhou, China","Baiyun District Bureau of Justice, Guangzhou, China","100 Xianlie Middle Rd, Yuexiu Qu, Guangzhou Shi, Guangdong Sheng, China",23.13941620,113.30038740,company,
+780c8a795baca1ba4cb4956cded877dd3d1ca313,Simulation of face recognition at a distance by scaling down images,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+785eeac2e236a85a45b4e0356c0745279c31e089,Learning Person-Specific Representations From Faces in the Wild,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+782eee555067b2d6d24db87775e1ded5fb047491,Adaptive Multiple Experts System for personal identification using facial behaviour biometrics,University of Technology,"Center for Quantum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University of Technology, Sydney, NSW, Australia","11 University of Technology Sydney 81, Broadway, Ultimo NSW 2007, Australia",-33.88405040,151.19922540,edu,
+8bf945166305eb8e304a9471c591139b3b01a1e1,Retrieval of TV Talk-Show Speakers by Associating Audio Transcript to Visual Clusters,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+8b1fa60b9164b60d1ca2705611fab063505a3ef5,Latent Facial Topics for affect analysis,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+8bebb26880274bdb840ebcca530caf26c393bf45,Real-time face recognition in HD videos: Algorithms and framework,Ahmedabad University,Ahmedabad University,"School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.03787430,72.55180046,edu,
+136aae348c7ebc6fd9df970b0657241983075795,Semi-supervised learning based on group sparse for relative attributes,Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.67684917,edu,
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,Recurrent learning of context for salient region detection,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+7f5346a169c9784ca79aca5d95ae8bf2ebab58e3,Two-level multi-task metric learning with application to multi-classification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+7fcecaef60a681c47f0476e54e08712ee05d6154,Deeply learned attributes for crowded scene understanding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+7f203f2ff6721e73738720589ea83adddb7fdd27,Face alignment under variable illumination,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,A multi-view face database from Turkish TV series,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+7f904093e6933cab876e87532111db94c71a304f,Evaluation of gender classification methods on thermal and near-infrared face images,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+7fcd03407c084023606c901e8933746b80d2ad57,Local classifier chains for deep face recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+7a595800b490ff437ab06fe7612a678d5fe2b57d,Improved concept similarity measuring in the visual domain,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7a09e8f65bd85d4c79f0ae90d4e2685869a9894f,Face and Hair Region Labeling Using Semi-Supervised Spectral Clustering-Based Multiple Segmentations,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+7a6e3ed956f71b20c41fbec008b1fa8dacad31a6,Enhanced facial expression recognition by age,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+7a91617ec959acedc5ec8b65e55b9490b76ab871,An efficient illumination invariant face recognition technique using two dimensional linear discriminant analysis,"Indian Statistical Institute, Kolkata","Indian Statistical Institute, Kolkata","Plot No. 203, Barrackpore Trunk Road, Baranagar, Kolkata, West Bengal 700108, India",22.64815210,88.37681700,edu,
+7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8,Facial expression recognition using $${l}_{p}$$ l p -norm MKL multiclass-SVM,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+14d7bce17265738f10f48987bb7bffb3eafc676e,An integrated approach for efficient analysis of facial expressions,Kent State University,Kent State University,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.14435250,-81.33982833,edu,
+143571c2fc9b1b69d3172f8a35b8fad50bc8202a,Facial action unit recognition using multi-class classification,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+142e233adceed9171f718a214a7eba8497af4324,A new transfer learning Boosting approach based on distribution measure with an application on facial expression recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+14ae16e9911f6504d994503989db34d2d1cb2cd4,Facial expression recognition using bag of distances,National Taichung University of Science and Technology,National Taichung University of science and Technology,"臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.15031065,120.68325501,edu,
+1473e6f2d250307f0421f1e2ea68b6485d3bd481,Efficient feature extraction with simultaneous recurrent network for metric learning,Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.88568200,-76.30768579,edu,
+8e63868e552e433dc536ba732f4c2af095602869,Automatic Texture Synthesis for Face Recognition from Single Views,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+8eb40d0a0a1339469a05711f532839e8ffd8126c,Facial Expression Recognition Based on Deep Evolutional Spatial-Temporal Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4,Principal Angles Separate Subject Illumination Spaces in YDB and CMU-PIE,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+8e55486aa456cae7f04fe922689b3e99a0e409fe,LEAF: Latent Extended Attribute Features Discovery for Visual Classification,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+8ebe2df4d82af79f0f082ced70f3a73d7fb93b66,Relative attributes with deep Convolutional Neural Network,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+8e272978dd1500ce6e4c2ef5e91d4332078ff757,Human Face Identification from Video Based on Frequency Domain Asymmetry Representation Using Hidden Markov Models,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+8e21399bb102e993edd82b003c306a068a2474da,A complete discriminative subspace for robust face recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+228ea13041910c41b50d0052bdce924037c3bc6a,A Review Paper Between Open Source and Commercial SDK and Performance Comparisons of Face Matchers,"National Science and Technology Development Agency, Thailand","National Science and Technology Development Agency, National Electronics and Computer Technology Center, Pathum Thani, 12120, Thailand","Pathum Thani 12120, Thailand",14.09502500,100.66471010,gov,
+2238dddb76499b19035641d97711cf30d899dadb,Lip shape based emotion identification,"Atılım University, Ankara, Turkey","Elektrik - Elektronik Mühendisliği Bölümü, Atılım Üniversitesi, Ankara, Türkiye","Kızılcaşar Mahallesi, 06830 İncek Gölbaşı/Gölbaşı/Gölbaşı/Ankara, Turkey",39.81573110,32.72386770,edu,
+22894c7a84984bd4822dcfe7c76a74673a242c36,Automatic emotion recognition in the wild using an ensemble of static and dynamic representations,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+22a10d8d2a2cb9055557a3b335d6706100890afb,Comparison of matrix decomposition and SIFT descriptor based methods for face alignment,"Eskişehir Osmangazi University, Turkey","Elektrik ve Elektronik Mühendisliği Bölümü, Eskişehir Osmangazi Üniversitesi, Türkiye","Eskişehir Osmangazi Üniversitesi, Mühendislik Fakültesi, 26480 Odunpazarı/Eskişehir, Turkey",39.74871200,30.47595620,edu,
+22dbdace88c8f4bda2843ed421e3708ec0744237,"Real-time facial shape recovery from a single image under general, unknown lighting by rank relaxation",Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+22e121a8dea49e3042de305574356477ecacadda,Directional gradients integration image for illumination insensitive face representation,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+250b73ec5a4f78b7b4ea3aba65c27fc1352154d5,Constrained Multi-View Video Face Clustering,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+256b46b12ab47283e6ada05fad6a2b501de35323,Pose estimation using Spectral and Singular Value recomposition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+258b3b1df82186dd76064ef86b28555e91389b73,Initial Shape Pool Construction for Facial Landmark Localization Under Occlusion,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+2564920d6976be68bb22e299b0b8098090bbf259,Face recognition algorithm based on cascading BGP feature fusion,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
diff --git a/scraper/reports/doi_institutions_unattributed.csv b/scraper/reports/doi_institutions_unattributed.csv
new file mode 100644
index 00000000..bf5d6ea8
--- /dev/null
+++ b/scraper/reports/doi_institutions_unattributed.csv
@@ -0,0 +1,406 @@
+6196f4be3b28684f6528b8687adccbdf9ac5c67c,Recommending Outfits from Personal Closet
+61971f8e6fff5b35faed610d02ad14ccfc186c70,Identity-Adaptive Facial Expression Recognition through Expression Regeneration Using Conditional Generative Adversarial Networks
+0d90c992dd08bfb06df50ab5c5c77ce83061e830,An Automatic Face Annotation System Featuring High Accuracy for Online Social Networks
+0d3ff34d8490a9a53de1aac1dea70172cb02e013,Cross-Database Evaluation of Normalized Raw Pixels for Gender Recognition under Unconstrained Settings
+95288fa7ff4683e32fe021a78cbf7d3376e6e400,Football Action Recognition Using Hierarchical LSTM
+592f14f4b12225fc691477a180a2a3226a5ef4f0,"Inferring Visual Persuasion via Body Language, Setting, and Deep Features"
+9285f4a6a06e975bde3ae3267fccd971d4fff98a,Attentional Push: A Deep Convolutional Network for Augmenting Image Salience with Shared Attention Modeling in Social Scenes
+0cf1287c8fd41dcef4ac03ebeab20482f02dce20,User-Demand-Oriented Privacy-Preservation in Video Delivering
+66490b5869822b31d32af7108eaff193fbdb37b0,Cascade Multi-View Hourglass Model for Robust 3D Face Alignment
+663efaa0671eace1100fdbdecacd94216a17b1db,A Max-Margin Riffled Independence Model for Image Tag Ranking
+3ebb0209d5e99b22c67e425a67a959f4db8d1f47,Subspace-Based Convolutional Network for Handwritten Character Recognition
+3e3227c8e9f44593d2499f4d1302575c77977b2e,Facial Expression Recognition Using a Large Out-of-Context Dataset
+3e9ab40e6e23f09d16c852b74d40264067ac6abc,Learning Locally-Adaptive Decision Functions for Person Verification
+3ec860cfbd5d953f29c43c4e926d3647e532c8b0,Gabor-Based Region Covariance Matrices for Face Recognition
+57f4e54a63ef95596dbc743f391c3fff461f278b,On the Application of the Probabilistic Linear Discriminant Analysis to Face Recognition across Expression
+57178b36c21fd7f4529ac6748614bb3374714e91,IARPA Janus Benchmark - C: Face Dataset and Protocol
+57dc55edade7074f0b32db02939c00f4da8fe3a6,Yaw Estimation Using Cylindrical and Ellipsoidal Face Models
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3,Joint Discriminative Bayesian Dictionary and Classifier Learning
+03e1480f1de2ffbd85655d68aae63a01685c5862,Indian Classical Dance Classification on Manifold Using Jensen-Bregman LogDet Divergence
+9ba358281f2946cba12fff266019193a2b059590,Local Normalization with Optimal Adaptive Correlation for Automatic and Robust Face Detection on Video Sequences
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682,To Frontalize or Not to Frontalize: Do We Really Need Elaborate Pre-processing to Improve Face Recognition?
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807,Efficient Multi-attribute Similarity Learning Towards Attribute-Based Fashion Search
+9e105c4a176465d14434fb3f5bae67f57ff5fba2,SmartPartNet: Part-Informed Person Detection for Body-Worn Smartphones
+9e28243f047cc9f62a946bf87abedb65b0da0f0a,Can We Minimize the Influence Due to Gender and Race in Age Estimation?
+32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99,Audio-Visual Recognition System with Intra-Modal Fusion
+35208eda874591eac70286441d19785726578946,Deep Secure Encoding for Face Template Protection
+352a620f0b96a7e76b9195a7038d5eec257fd994,Kinship Classification through Latent Adaptive Subspace
+699b8250fb93b3fa64b2fc8f59fef036e172564d,Spontaneous Facial Expression Recognition: A Part Based Approach
+6932baa348943507d992aba75402cfe8545a1a9b,Stacked Hourglass Network for Robust Facial Landmark Localisation
+6966d9d30fa9b7c01523425726ab417fd8428790,Exemplar-Based Face Parsing
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827,An Empirical Study of Face Recognition under Variations
+3c6542295cf7fe362d7d629ac10670bf30cdabce,Hierarchical Aggregation Based Deep Aging Feature for Age Prediction
+56fd4c05869e11e4935d48aa1d7abb96072ac242,OpenFace 2.0: Facial Behavior Analysis Toolkit
+519f1486f0755ef3c1f05700ea8a05f52f83387b,A Fast Extension for Sparse Representation on Robust Face Recognition
+5180c98815d7034e753a14ef6f54583f115da3aa,Challenging 3D Head Tracking and Evaluation Using Unconstrained Test Data Set
+3dce635ce4b55fb63fc6d41b38640403b152a048,The Impact of Age and Threshold Variation on Facial Recognition Algorithm Performance Using Images of Children
+3d2c89676fcc9d64aaed38718146055152d22b39,Nuclear Norm Based 2DPCA
+3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548,Usability of Pilot's Gaze in Aeronautic Cockpit for Safer Aircraft
+3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e,Learning Local Responses of Facial Landmarks with Conditional Variational Auto-Encoder for Face Alignment
+67214e8d2f83eb41c14bfc86698eb6620e72e87c,What Makes a Style: Experimental Analysis of Fashion Prediction
+0bab5213911c19c40e936b08d2f8fba01e286b85,Cascaded Pose Regression Revisited: Face Alignment in Videos
+0ba5369c5e1e87ea172089d84a5610435c73de00,A Multi-task Convolutional Neural Network for Joint Iris Detection and Presentation Attack Detection
+0b82bf595e76898993ed4f4b2883c42720c0f277,Improving Face Recognition by Exploring Local Features with Visual Attention
+93dcea2419ca95b96a47e541748c46220d289d77,Multi-scale Fully Convolutional Network for Face Detection in the Wild
+94806f0967931d376d1729c29702f3d3bb70167c,Discriminative Hierarchical Rank Pooling for Activity Recognition
+942f6eb2ec56809430c2243a71d03cc975d0a673,Fooling Neural Networks in Face Attractiveness Evaluation: Adversarial Examples with High Attractiveness Score But Low Subjective Score
+94b729f9d9171e7c4489995e6e1cb134c8521f4e,Geometrical Approaches for Facial Expression Recognition Using Support Vector Machines
+0e4fa61871755b5548a5c970c8103f7b2ada24f3,Partial Face Recognition Based on Template Matching
+60777fbca8bff210398ec8b1179bc4ecb72dfec0,A Deformable Mixture Parsing Model with Parselets
+60462b981fda63c5f9d780528a37c46884fe0b54,Statistical Machine Learning vs Deep Learning in Information Fusion: Competition or Collaboration?
+34c1e9a6166f4732d1738db803467f7abc47ba87,Image Set Classification Using Sparse Bayesian Regression
+5a547df635a9a56ac224d556333d36ff68cbf088,Cross Local Gabor Binary Pattern Descriptor with Probabilistic Linear Discriminant Analysis for Pose-Invariant Face Recognition
+5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6,A Methodology for Detecting Faces from Different Views
+050e51268b0fb03033428ac777ccfef2db752ab3,"Fast, Accurate and Robust Recognition Based On Local Normalized Linear Summation Kernel"
+9d1cebed7672210f9c411c5ba422a931980da833,Relational Learning Based Happiness Intensity Analysis in a Group
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc,Detecting Masked Faces in the Wild with LLE-CNNs
+9ca542d744149f0efc8b8aac8289f5e38e6d200c,Gender and Smile Classification Using Deep Convolutional Neural Networks
+028e237cb539b01ec72c244f57fdcfb65bbe53d4,An Improved DLDA Based Method- Nonparametric DLDA
+021e008282714eaefc0796303f521c9e4f199d7e,NCC-Net: Normalized Cross Correlation Based Deep Matcher with Robustness to Illumination Variations
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd,Fine-Grained Visual Comparisons with Local Learning
+b5747ecfa0f3be0adaad919d78763b1133c4d662,Attribute-Assisted Domain Transfer from Image to Sketch
+b58d381f9f953bfe24915246b65da872aa94f9aa,Recommending New Links in Social Networks Using Face Recognition
+b5fdd7778503f27c9d9bf77fab193b475fab6076,Changes in Facial Expression as Biometric: A Database and Benchmarks of Identification
+b598f7761b153ecb26e9d08d3c5817aac5b34b52,A Simulated Annealing and 2DPCA Based Method for Face Recognition
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0,Disentangled Representation Learning GAN for Pose-Invariant Face Recognition
+b299c292b84aeb4f080a8b39677a8e0d07d51b27,Part-Level Regularized Semi-Nonnegative Coding for Semi-Supervised Learning
+b2add9fad0bcf7bf0660f99f389672cdf7cc6a70,Doppelganger Mining for Face Representation Learning
+b262a2a543971e10fcbfc7f65f46115ae895d69e,Illumination Invariant Efficient Face Recognition Using a Single Training Image
+d916602f694ebb9cf95d85e08dd53f653b6196c3,A Novel Space-Time Representation on the Positive Semidefinite Cone for Facial Expression Recognition
+d9072e6b7999bc2d5750eb58c67a643f38d176d6,Learning Kernel in Kernel-Based LDA for Face Recognition Under Illumination Variations
+d92084e376a795d3943df577d3b3f3b7d12eeae5,Face and Image Representation in Deep CNN Features
+d9deafd9d9e60657a7f34df5f494edff546c4fb8,Learning the Multilinear Structure of Visual Data
+ad7b6d2e8d66f720cc83323a0700c25006d49609,Face Recognition Under Varying Illumination Using Gradientfaces
+adb040081974369c46b943e9f75be4e405623102,Cascade Two-dimensional Locality Preserving Projections for Face Recognition
+bb070c019c0885232f114c7dca970d2afd9cd828,A Novel Landmark Detector System for Multi Resolution Frontal Faces
+bb4f83458976755e9310b241a689c8d21b481238,Improving Face Verification and Person Re-Identification Accuracy Using Hyperplane Similarity
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1,Face Verification: Strategies for Employing Deep Models
+d790093cb85fc556c0089610026e0ec3466ab845,Learning Assignment Order of Instances for the Constrained K-Means Clustering Algorithm
+d0b7d3f9a59034d44e7cd1b434cfd27136a7c029,Facial Emotion Recognition Using PHOG and a Hierarchical Expression Model
+be51854ef513362bc236b85dd6f0e2c2da51614b,Learning to Identify While Failing to Discriminate
+be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f,Automatic Face Annotation System Used Pyramid Database Architecture for Online Social Networks
+be7444c891caf295d162233bdae0e1c79791d566,Face Recognition Performance under Aging
+beb2f1a6f3f781443580ffec9161d9ce6852bf48,Deep Spatio-Temporal Representation Learning for Multi-Class Imbalanced Data Classification
+b36a80d15c3e48870ea6118b855055cc34307658,Facial 3D Shape Estimation from Images for Visual Speech Animation
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f,Kernel ELM and CNN Based Facial Age Estimation
+daca9d03c1c951ed518248de7f75ff51e5c272cb,Feature Learning Using Bayesian Linear Regression Model
+daa4cfde41d37b2ab497458e331556d13dd14d0b,Multi-view Constrained Local Models for Large Head Angle Facial Tracking
+da23d90bacf246b75ef752a2cbb138c4fcd789b7,Facial Action Unit Detection Using Active Learning and an Efficient Non-linear Kernel Approximation
+b42b535fcd0d9bd41a6594a910ea4623e907ceb9,Model Representation for Facial Expression Recognition Based on Shape and Texture
+a2e0966f303f38b58b898d388d1c83e40b605262,ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering
+a5acda0e8c0937bfed013e6382da127103e41395,Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions
+bd74c3ca2ff03396109ac2d1131708636bd0d4d3,Low-Shot Face Recognition with Hybrid Classifiers
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0,Face Aging Simulation with Deep Convolutional Generative Adversarial Networks
+d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f,Recognizing Conversational Expressions Using Latent Dynamic Conditional Random Fields
+d6bdc70d259b38bbeb3a78db064232b4b4acc88f,Video-Based Face Association and Identification
+bccb35704cdd3f2765b1a3f0296d1bff3be019c1,ADHD and ASD Classification Based on Emotion Recognition Data
+bc36badb6606b8162d821a227dda09a94aac537f,An Optimization Model for Human Activity Recognition Inspired by Information on Human-Object Interaction
+ae73f771d0e429a74b04a6784b1b46dfe98f53e4,Simultaneous Detection of Multiple Facial Action Units via Hierarchical Task Structure Learning
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29,RGB-D Face Recognition via Deep Complementary and Common Feature Learning
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a,Evaluating Automated Facial Age Estimation Techniques for Digital Forensics
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,Face Generation for Low-Shot Learning Using Generative Adversarial Networks
+d8c9ce0bd5e4b6d1465402a760845e23af5ac259,Robust Face Detection with Eyes Occluded by the Shadow from Dazzling Avoidance System
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261,Flexible Spatio-Temporal Networks for Video Prediction
+e5fbaeddbf98c667ec7c5575bda2158a36b55409,Facial Expression Recognition in Image Sequences Using Active Shape Model and SVM
+e295c1aa47422eb35123053038e62e9aa50a2e3a,ChaLearn Looking at People 2015: Apparent Age and Cultural Event Recognition Datasets and Results
+f402e088dddfaad7667bd4def26092d05f247206,Passenger Compartment Violation Detection in HOV/HOT Lanes
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491,Modeling Temporal Structure with LSTM for Online Action Detection
+f33bd953d2df0a5305fc8a93a37ff754459a906c,Deformable Models of Ears in-the-Wild for Alignment and Recognition
+eba4cfd76f99159ccc0a65cab0a02db42b548d85,Spoken Attributes: Mixing Binary and Relative Attributes to Say the Right Thing
+ebde9b9c714ed326157f41add8c781f826c1d864,Classification of Puck Possession Events in Ice Hockey
+eb3066de677f9f6131aab542d9d426aaf50ed2ce,Deep Transfer Network with 3D Morphable Models for Face Recognition
+c05ae45c262b270df1e99a32efa35036aae8d950,Predicting Facial Attributes in Video Using Temporal Coherence and Motion-Attention
+c0270a57ad78da6c3982a4034ffa195b9e932fda,Multi-level Feature Learning for Face Recognition under Makeup Changes
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7,Facial Expression Recognition via a Boosted Deep Belief Network
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b,Aff-Wild: Valence and Arousal ‘In-the-Wild’ Challenge
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9,Semantic Subspace Projection and Its Applications in Image Retrieval
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e,Detecting Faces Using Inside Cascaded Contextual CNN
+c900e0ad4c95948baaf0acd8449fde26f9b4952a,"EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild"
+c98b13871a3bc767df0bdd51ff00c5254ede8b22,Fast and Exact: ADMM-Based Discriminative Shape Segmentation with Loopy Part Models
+fd9ab411dc6258763c95b7741e3d51adf5504040,Eigenbubbles: An Enhanced Apparent BRDF Representation
+fd5376fcb09001a3acccc03159e8ff5801129683,Facial Micro-Expressions Grand Challenge 2018 Summary
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd,Attention-Aware Deep Reinforcement Learning for Video Face Recognition
+f2004fff215a17ac132310882610ddafe25ba153,Facial Expression Recognition via Deep Learning
+f5603ceaebe3caf6a812edef9c4b38def78cbf34,Tailoring Model-Based Techniques to Facial Expression Interpretation
+f531ce18befc03489f647560ad3e5639566b39dc,Boosting Speed and Accuracy in Deformable Part Models for Face Image in the Wild
+f545b121b9612707339dfdc40eca32def5e60430,A Novel Gabor Filter Selection Based on Spectral Difference and Minimum Error Rate for Facial Expression Recognition
+f58f30932e3464fc808e539897efa4ee4e7ac59f,Complex Event Detection Using Joint Max Margin and Semantic Features
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638,Spatio-Temporal Vector of Locally Max Pooled Features for Action Recognition in Videos
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae,I Know That Person: Generative Full Body and Face De-identification of People in Images
+ca447d6479554b27b4afbd0fd599b2ed39f2c335,Automatic Video Genre Classification Using Multiple SVM Votes
+e4df98e4b45a598661a47a0a8900065716dafd6d,Weakly Supervised Object Class Learning Via Discriminative Subspace Models
+fe866887d3c26ee72590c440ed86ffc80e980293,Understanding Human Aging Patterns from a Machine Perspective
+fe50efe9e282c63941ec23eb9b8c7510b6283228,A Facial Expression Recognition System Using Convolutional Networks
+fecccc79548001ecbd6cafd3067bcf14de80b11a,Camera Selection for Broadcasting Soccer Games
+c8585c95215bc53e28edb740678b3a0460ca8aa4,Facial Action Unit Recognition Augmented by Their Dependencies
+c808c784237f167c78a87cc5a9d48152579c27a4,Know You at One Glance: A Compact Vector Representation for Low-Shot Learning
+ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf,A Face-Recognition Approach Using Deep Reinforcement Learning Approach for User Authentication
+c15b68986ecfa1e13e3791686ae9024f66983f14,Inferring Hidden Statuses and Actions in Video by Causal Reasoning
+ec89f2307e29cc4222b887eb0619e0b697cf110d,Face Recognition Using Dual-Tree<newline/> Complex Wavelet Features
+ec39e9c21d6e2576f21936b1ecc1574dadaf291e,Pose-Robust Face Verification by Exploiting Competing Tasks
+ec00ecb64fa206cea8b2e716955a738a96424084,Intelligent Synthesis Driven Model Calibration: Framework and Face Recognition Application
+4e8f301dbedc9063831da1306b294f2bd5b10477,Discriminating Power of FISWG Characteristic Descriptors Under Different Forensic Use Cases
+4e061a302816f5890a621eb278c6efa6e37d7e2f,Discriminative Deep Metric Learning for Face Verification in the Wild
+4e343c66c5fe7426132869d552f0f205d1bc5307,Automatic Image Attribute Selection for Zero-Shot Learning of Object Categories
+4e1258db62e4762fd8647b250fda9c3567f86eb8,Online Facial Expression Recognition Based on Finite Beta-Liouville Mixture Models
+4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4,"TenniSet: A Dataset for Dense Fine-Grained Event Recognition, Localisation and Description"
+18010284894ed0edcca74e5bf768ee2e15ef7841,DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations
+2724ba85ec4a66de18da33925e537f3902f21249,Robust Face Landmark Estimation under Occlusion
+4b7f21b48c7e0dc7334e36108f558d54642c17c0,Describing Unseen Classes by Exemplars: Zero-Shot Learning Using Grouped Simile Ensemble
+113b06e70b7eead8ae7450bafe9c91656705024c,Face Alignment across Large Pose via MT-CNN Based 3D Shape Reconstruction
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca,Barycentric Representation and Metric Learning for Facial Expression Recognition
+1135a818b756b057104e45d976546970ba84e612,"Age, Gender, and Fine-Grained Ethnicity Prediction Using Convolutional Neural Networks for the East Asian Face Dataset"
+29db16efc3b378c50511f743e5197a4c0b9e902f,Deeply Learned Rich Coding for Cross-Dataset Facial Age Estimation
+7c8909da44e89a78fe88e815c83a4ced34f99149,Multi-classifier Q-stack Aging Model for Adult Face Verification
+7c11fa4fd91cb57e6e216117febcdd748e595760,Discriminant Feature Manifold for Facial Aging Estimation
+163ba5a998973f9ead6be0ca873aed5934d5022e,PFW: A Face Database in the Wild for Studying Face Identification and Verification in Uncontrolled Environment
+166ef5d3fd96d99caeabe928eba291c082ec75a0,A 3D Morphable Model of Craniofacial Shape and Texture Variation
+42a5dc91852c8c14ed5f4c3b451c9dc98348bc02,A Data Augmentation Methodology to Improve Age Estimation Using Convolutional Neural Networks
+89272b78b651038ff4d294b9ccca0018d2c9033b,Low Computation Face Verification Using Class Center Analysis
+891b31be76e2baa83745f24c2e2013851dc83cbb,Improved Face Representation by Nonuniform Multilevel Selection of Gabor Convolution Features
+45877ff4694576f59c2a9ca45aa65f935378492a,A Novel Clustering-Based Feature Extraction Method for an Automatic Facial Expression Analysis System
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2,Binary Coding for Partial Action Analysis with Limited Observation Ratios
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4,Deep and Deformable: Convolutional Mixtures of Deformable Part-Based Models
+73d53a7c27716ae9a6d3484e78883545e53117ae,A Multi-local Means Based Nearest Neighbor Classifier
+87806c51dc8c1077953178367dcf5c75c553ce34,VISAGE: A Support Vector Machine Approach to Group Dynamic Analysis
+87b607b8d4858a16731144d17f457a54e488f15d,Cross-Age Face Recognition on a Very Large Database: The Performance versus Age Intervals and Improvement Using Soft Biometric Traits
+8706c3d49d1136035f298041f03bb70dc074f24d,Averaged Gabor Filter Features for Facial Expression Recognition
+7477cf04c6b086108f459f693a60272523c134db,Learning Structured Low-Rank Representations for Image Classification
+289cfcd081c4393c7d6f63510747b5372202f855,Detecting Decision Ambiguity from Facial Images
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08,Deep Label Distribution Learning for Apparent Age Estimation
+28f1542c63f5949ee6f2d51a6422244192b5a900,"You Lead, We Exceed: Labor-Free Video Concept Learning by Jointly Exploiting Web Videos and Images"
+7bd37e6721d198c555bf41a2d633c4f0a5aeecc1,Fusing Local Patterns of Gabor and Non-subsampled Contourlet Transform for Face Recognition
+7b455cbb320684f78cd8f2443f14ecf5f50426db,A Fast and Robust Negative Mining Approach for Enrollment in Face Recognition Systems
+8a8127a06f432982bfb0150df3212f379b36840b,Analysis of Yawning Behaviour in Spontaneous Expressions of Drowsy Drivers
+8a6033cbba8598945bfadd2dd04023c2a9f31681,3D-Assisted Coarse-to-Fine Extreme-Pose Facial Landmark Detection
+7e8c8b1d72c67e2e241184448715a8d4bd88a727,Face Verification Based on Relational Disparity Features and Partial Least Squares Models
+1071dde48a77f81c35ad5f0ca90a9daedb54e893,A Monocular Video-Based Facial Expression Recognition System by Combining Static and Dynamic Knowledge
+10e4172dd4f4a633f10762fc5d4755e61d52dc36,Learning Multifunctional Binary Codes for Both Category and Attribute Oriented Retrieval Tasks
+197efbef17f92e5cb5076961b6cd9f59e88ffd9a,Human Action Recognition Using Optical Flow and Convolutional Neural Networks
+193bc8b663d041bc34134a8407adc3e546daa9cc,A Quantitative Comparison of Methods for 3D Face Reconstruction from 2D Images
+19c82eacd77b35f57ac8815b979716e08e3339ca,Facial Expression Recognition Using Multiple Feature Sets
+4c72a51a7c7288e6e17dfefe4f87df47929608e7,"Automatic Face Recognition of Newborns, Infants, and Toddlers: A Longitudinal Evaluation"
+26b9d546a4e64c1d759c67cd134120f98a43c2a6,Polynomial Correlation Filters for Human Face Recognition
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1,Video Frame Interpolation Based on Multi-scale Convolutional Network and Adversarial Training
+4d90d7834ae25ee6176c096d5d6608555766c0b1,Face and Body Association for Video-Based Face Recognition
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5,3D Active Shape Model for Automatic Facial Landmark Location Trained with Automatically Generated Landmark Points
+86597fe787e0bdd05935d25158790727257a40bd,Synthetic Prior Design for Real-Time Face Tracking
+863ad2838b9b90d4461995f498a39bcd2fb87c73,Learning Spatiotemporal Features Using 3DCNN and Convolutional LSTM for Gesture Recognition
+440b94b1624ca516b07e72ea8b3488072adc5e26,Comparison of Early and Late Information Fusion for Multi-camera HOV Lane Enforcement
+44c278cbecd6c1123bfa5df92e0bda156895fa48,Head Pose Estimation by Instance Parameterization
+2a826273e856939b58be8779d2136bffa0dddb08,Investigating Deep Neural Forests for Facial Expression Recognition
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a,Exploring Textures in Traffic Matrices to Classify Data Center Communications
+2a612a7037646276ff98141d3e7abbc9c91fccb8,A Compact and Discriminative Face Track Descriptor
+2f1485994ef2c09a7bb2874eb8252be8fe710db1,Dynamic Image Networks for Action Recognition
+2f5b51af8053cf82ab52bbfd46b56999222ec21c,Online Regression of Grandmother-Cell Responses with Visual Experience Learning for Face Recognition
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64,Learning Neural Networks with Ranking-Based Losses for Action Retrieval
+437642cfc8c34e445ea653929e2d183aaaeeb704,Component Biologically Inspired Features with Moving Segmentation for Age Estimation
+4342a2b63c9c344d78cf153600cd918a5fecad59,Synergy between Face Alignment and Tracking via Discriminative Global Consensus Optimization
+88e2efab01e883e037a416c63a03075d66625c26,Convolutional Experts Constrained Local Model for 3D Facial Landmark Detection
+9fab78015e6e91ba7241a923222acd6c576c6e27,Clothes Advertising by Targeting Principal Actors in Video
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,Lighting Analysis and Texture Modification of 3D Human Face Scans
+6baaa8b763cc5553715766e7fbe7abb235fae33c,Facial Attributes Classification Using Multi-task Representation Learning
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3,Jointly Learning Energy Expenditures and Activities Using Egocentric Multimodal Signals
+6b8329730b2e13178a577b878631735a1cd58a71,A Real-Time Big Data Architecture for Glasses Detection Using Computer Vision Techniques
+070c8ee3876c06f9a65693e536d61097ace40417,How Do Facial Expressions Contribute to Age Prediction?
+0733ec1953f6c774eb3a723618e1268586b46359,Recognition of facial expressions and measurement of levels of interest from video
+3826e47f0572ab4d0fe34f0ed6a49aa8303e0428,Joint Alignment and Clustering via Low-Rank Representation
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,One-Shot Face Recognition via Generative Learning
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8,A Computer Vision Based Approach for Understanding Emotional Involvements in Children with Autism Spectrum Disorders
+9aab33ce8d6786b3b77900a9b25f5f4577cea461,Automatic Semantic Face Recognition
+09f9409430bba2afb84aa8214dbbb43bfd4cf056,Uncertainty Estimation Using Fuzzy Measures for Multiclass Classification
+09138ad5ad1aeef381f825481d1b4f6b345c438c,Low-resolution Face Recognition with Variable Illumination Based on Differential Images
+098363b29eef1471c494382338687f2fe98f6e15,Metadata-Based Feature Aggregation Network for Face Recognition
+5ddfd3d372f7679518db8fd763d5f8bc5899ed67,"Cascaded Fusion of Dynamic, Spatial, and Textural Feature Sets for Person-Independent Facial Emotion Recognition"
+31ba7f5e09a2f0fe9cf7ea95314723206dcb6059,UHDB31: A Dataset for Better Understanding Face Recognition Across Pose and Illumination Variation
+310fe4e6cb6d090f7817de4c1034e35567b56e34,Robust Multi-pose Facial Expression Recognition
+31d51e48dbd9e7253eafe0719f3788adb564a971,Visual Phrases for Exemplar Face Detection
+3157be811685c93d0cef7fa4c489efea581f9b8e,Multi-spectral Imaging for Robust Ocular Biometrics
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78,Instance-Aware Detailed Action Labeling in Videos
+913062218c7498b2617bb9d7821fe1201659c5cc,Cross-Domain Facial Expression Recognition Using Supervised Kernel Mean Matching
+65869cc5ef00d581c637ae8ea6ca02ae4bb2b996,A Pairwise Covariance-Preserving Projection Method for Dimension Reduction
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a,Facial Expression Recognition: A Fully Integrated Approach
+9649a19b49607459cef32f43db4f6e6727080bdb,Offset Neural Network for Document Orientation Identification
+9806d3dc7805dd8c9c20d7222c915fc4beee7099,Self-Stimulatory Behaviours in the Wild for Autism Diagnosis
+53de11d144cd2eda7cf1bb644ae27f8ef2489289,Extending Detection with Privileged Information via Generalized Distillation
+5305bfdff39ae74d2958ba28d42c16495ce2ff86,Regularized Least-Squares Coding with Unlabeled Dictionary for Image-Set Based Face Recognition
+3ffbc912de7bad720c995385e1fdc439b1046148,A Face Recognition Algorithm Decreasing the Effect of Illumination
+3fe3d6ff7e5320f4395571131708ecaef6ef4550,TV News Retrieval Based on Story Segmentation and Concept Association
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6,Head Pose Estimation on Low-Quality Images
+3fc173805ed43602eebb7f64eea4d60c0386c612,Semi-supervised Bi-dictionary Learning Using Smooth Representation-Based Label Propagation
+30044dd951133187cb8b57e53a22cf9306fa7612,Predicting the Perceptual Demands of Urban Driving with Video Regression
+5b5b568a0ba63d00e16a263051c73e09ab83e245,Scaling Datacenter Accelerators with Compute-Reuse Architectures
+6dcf418c778f528b5792104760f1fbfe90c6dd6a,"AgeDB: The First Manually Collected, In-the-Wild Age Database"
+01e14d8ffd6767336d50c2b817a7b7744903e567,Deep Network Shrinkage Applied to Cross-Spectrum Face Recognition
+06ab24721d7117974a6039eb2e57d1545eee5e46,Biomechanical-Based Approach to Data Augmentation for One-Shot Gesture Recognition
+6c1227659878e867a01888eef472dd96b679adb6,Temporal Difference Networks for Video Action Recognition
+6c01b349edb2d33530e8bb07ba338f009663a9dd,Cross-Media Alignment of Names and Faces
+398558817e05e8de184cc4c247d4ea51ab9d4d58,Extraction and Selection of Muscle Based Features for Facial Expression Recognition
+9939498315777b40bed9150d8940fc1ac340e8ba,ChaLearn Looking at People and Faces of the World: Face AnalysisWorkshop and Challenge 2016
+99cd84a62edb2bda2fc2fdc362a72413941f6aa4,Support Vector Regression of Sparse Dictionary-Based Features for View-Independent Action Unit Intensity Estimation
+521aa8dcd66428b07728b91722cc8f2b5a73944b,Pseudo-Labeling Using Gaussian Process for Semi-Supervised Deep Learning
+525da67fb524d46f2afa89478cd482a68be8a42b,Learning to Generate 3D Stylized Character Expressions from Humans
+55ee484f9cbd62111512485e3c1c3eadbf2e15c0,Multi-Output Random Forests for Facial Action Unit Detection
+556875fb04ed6043620d7ca04dfe3d8b3a9284f5,Interaction Recognition Using Sparse Portraits
+97b5800e144a8df48f1f7e91383b0f37bc37cf60,Weakly Supervised Summarization of Web Videos
+9774430006f1ed017156b17f3cf669071e398c58,Discriminant Multi-component Face Analysis
+6318d3842b36362bb45527b717e1a45ae46151d5,Harnessing Object and Scene Semantics for Large-Scale Video Understanding
+633c851ebf625ad7abdda2324e9de093cf623141,Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database
+0fee3b9191dc1cef21f54232a23530cd8169d3b2,A Fast Iterative Algorithm for Improved Unsupervised Feature Selection
+0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e,Design of Radial Basis Function Neural Networks with Principal Component Analysis and Linear Discriminant Analysis for Black Plastic Identification
+640e12837241d52d04379d3649d050ee3760048c,Automatic Recognition of Smiling and Neutral Facial Expressions
+9057044c0347fb9798a9b552910a9aff150385db,Sparse Representation Based Face Recognition with Limited Labeled Samples
+9077365c9486e54e251dd0b6f6edaeda30ae52b9,Convolutional Neural Network-Based Video Super-Resolution for Action Recognition
+90c4deaa538da42b9b044d7b68c3692cced66036,Full Controllable Face Detection System Architecture for Robotic Vision
+bfdafe932f93b01632a5ba590627f0d41034705d,Transfer Learning for Human Action Recognition
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9,Deep Unsupervised Domain Adaptation for Face Recognition
+d36a1e4637618304c2093f72702dcdcc4dcd41d1,Identity-Aware Convolutional Neural Network for Facial Expression Recognition
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd,The SVM-Minus Similarity Score for Video Face Recognition
+d340a135a55ecf7506010e153d5f23155dcfa7e8,MAVI: An Embedded Device to Assist Mobility of Visually Impaired
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d,Spatial-Temporal Weighted Pyramid Using Spatial Orthogonal Pooling
+d4ec62efcc631fa720dfaa1cbc5692b39e649008,New Robust Clustering Model for Identifying Cancer Genome Landscapes
+d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4,A Team of Continuous-Action Learning Automata for Noise-Tolerant Learning of Half-Spaces
+ba931c3f90dd40a5db4301a8f0c71779a23043d6,A General Nonlinear Embedding Framework Based on Deep Neural Network
+a03448488950ee5bf50e9e1d744129fbba066c50,Deep Manifold Embedding Active Shape Model for Pose Invarient Face Tracking
+a006cd95c14de399706c5709b86ac17fce93fcba,Multi-label Learning with Missing Labels
+a76969df111f9ee9f0b898b51ad23a721d289bdc,A Model of Local Binary Pattern Feature Descriptor for Valence Facial Expression Classification
+b839bc95794dc65340b6e5fea098fa6e6ea5e430,Soft Biometrics in Online Social Networks: A Case Study on Twitter User Gender Recognition
+b8e5800dfc590f82a0f7eedefce9abebf8088d12,How to Train Your Neural Network with Dictionary Learning
+b86c49c6e3117ea116ec2d8174fa957f83502e89,A Correlated Topic Modeling Approach for Facial Expression Recognition
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b,Boosting Face in Video Recognition via CNN Based Key Frame Extraction
+b8b9cef0938975c5b640b7ada4e3dea6c06d64e9,Metric-Promoted Siamese Network for Gender Classification
+b85c198ce09ffc4037582a544c7ffb6ebaeff198,Efficient Multiple Instance Metric Learning Using Weakly Supervised Data
+dc1510110c23f7b509035a1eda22879ef2506e61,3D-Aided Face Recognition Robust to Expression and Pose Variations
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43,Factorized Variational Autoencoders for Modeling Audience Reactions to Movies
+dc5d04d34b278b944097b8925a9147773bbb80cc,A Temporal Sequence Learning for Action Recognition and Prediction
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df,AcFR: Active Face Recognition Using Convolutional Neural Networks
+a92c207031b0778572bf41803dba1a21076e128b,Unobtrusive Students' Engagement Analysis in Computer Science Laboratory Using Deep Learning Techniques
+d57982dc55dbed3d0f89589e319dc2d2bd598532,Reliable Crowdsourcing and Deep Locality-Preserving Learning for Expression Recognition in the Wild
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d,Reconstructing Intensity Images from Binary Spatial Gradient Cameras
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b,Subclass Error Correcting Output Codes Using Fisher's Linear Discriminant Ratio
+af97a51f56cd6b793cf96692931a8d1ddbe4e3cc,Learning Semantic Binary Codes by Encoding Attributes for Image Retrieval
+b784bb1d2b2720dac8d4b92851a8d6360c35b0b2,New Probabilistic Multi-graph Decomposition Model to Identify Consistent Human Brain Network Modules
+b72eebffe697008048781ab7b768e0c96e52236a,Discriminative Covariance Oriented Representation Learning for Face Recognition with Image Sets
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e,Cross-Modal Facial Attribute Recognition with Geometric Features
+db3984b143c59584a32d762d712d21c0e8cf38b8,Weighted Fusion of Bit Plane-Specific Local Image Descriptors for Facial Expression Recognition
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057,SmileNet: Registration-Free Smiling Face Detection In The Wild
+dbfe62c02b544b48354fac741d90eb4edf815db5,Performance Review of a Multi-Layer Feed-Forward Neural Network and Normalized Cross Correlation for Facial Expression Identification
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval
+a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825,Boosting for Learning a Similarity Measure in 2DPCA Based Face Recognition
+de162d4b8450bf2b80f672478f987f304b7e6ae4,Weakly Supervised Manifold Learning for Dense Semantic Object Correspondence
+dee6609615b73b10540f32537a242baa3c9fca4d,Temporal Domain Neural Encoder for Video Representation Learning
+b0358af78b7c5ee7adc883ef513bbcc84a18a02b,Universal Skin Detection Without Color Information
+b034cc919af30e96ee7bed769b93ea5828ae361b,Soft-Margin Mixture of Regressions
+a62997208fec1b2fbca6557198eb7bc9340b2409,An New Algorithm on Feature Selection with L-Norm PCA
+c3d3d2229500c555c7a7150a8b126ef874cbee1c,Shape Augmented Regression Method for Face Alignment
+c3a53b308c7a75c66759cbfdf52359d9be4f552b,On Detecting Partially Occluded Faces with Pose Variations
+c36f3cabeddce0263c944e9fe4afd510b5bae816,Action Parsing Using Context Features
+c4cfdcf19705f9095fb60fb2e569a9253a475f11,Towards Context-Aware Interaction Recognition for Visual Relationship Detection
+c41a3c31972cf0c1be6b6895f3bf97181773fcfb,Accurate Facial Landmarks Detection for Frontal Faces with Extended Tree-Structured Models
+ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff,View-Independent Facial Action Unit Detection
+eacf974e235add458efb815ada1e5b82a05878fa,Dynamic Facial Expression Analysis and Synthesis With MPEG-4 Facial Animation Parameters
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d,Symmetric Shape Morphing for 3D Face and Head Modelling
+cdcfc75f54405c77478ab776eb407c598075d9f8,Learning Binary Codes for Maximum Inner Product Search
+cc9d068cf6c4a30da82fd6350a348467cb5086d4,Protecting Your Faces: MeshFaces Generation and Removal via High-Order Relation-Preserving CycleGAN
+cc1b093cfb97475faabab414878fa7e4a2d97cd7,Building a Face Expression Recognizer and a Face Expression Database for an Intelligent Tutoring System
+ccb2ecb30a50460c9189bb55ba594f2300882747,Robust Gender Classification Using Multi-Spectral Imaging
+cc7c63473c5bef5ae09f26b2258691d9ffdd5f93,Subject-Independent Facial Expression Recognition with Biologically Inspired Features
+cce2f036d0c5f47c25e459b2f2c49fa992595654,Harvesting Web Images for Realistic Facial Expression Recognition
+cce332405ce9cd9dccc45efac26d1d614eaa982d,A Ranking Approach for Human Ages Estimation Based on Face Images
+e68869499471bcd6fa8b4dc02aa00633673c0917,Diffusion-Based Face Selective Smoothing in DCT Domain to Illumination Invariant Face Recognition
+f9752fd07b14505d0438bc3e14b23d7f0fe7f48b,Incremental and Decremental Multi-category Classification by Support Vector Machines
+f0f854f8cfe826fd08385c0c3c8097488f468076,Injury Mechanism Classification in Soccer Videos
+f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93,Reconstruction and Recognition of Tensor-Based Objects With Concurrent Subspaces Analysis
+f73174cfcc5c329b63f19fffdd706e1df4cc9e20,Automatic Vehicle Detection and Driver Identification Framework for Secure Vehicle Parking
+e8951cc76af80da43e3528fe6d984071f17f57e7,Online Cost Efficient Customer Recognition System for Retail Analytics
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f,Robust Regression for Face Recognition
+fa641327dc5873276f0af453a2caa1634c16f143,ChaLearn Looking at People RGB-D Isolated and Continuous Datasets for Gesture Recognition
+fa32b29e627086d4302db4d30c07a9d11dcd6b84,Weakly Supervised Facial Attribute Manipulation via Deep Adversarial Network
+ff42ec628b0980909bbb84225d0c4f8d9ac51e03,Convergent 2-D Subspace Learning With Null Space Analysis
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf,Unsupervised Action Discovery and Localization in Videos
+c26b43c2e1e2da96e7caabd46e1d7314acac0992,Facial Expression Recognition Using Facial Landmarks and Random Forest Classifier
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8,Age Estimation Guided Convolutional Neural Network for Age-Invariant Face Recognition
+f61829274cfe64b94361e54351f01a0376cd1253,Regressing a 3D Face Shape from a Single Image
+f65b47093e4d45013f54c3ba09bbcce7140af6bb,Multiple Anthropological Fisher Kernel Framework and Its Application to Kinship Verification
+f6511d8156058737ec5354c66ef6fdcf035d714d,Response Surface Learning for Face Misalignment Correction
+e9d77a85bc2fa672cc1bd10258c896c8d89b41e8,On the Theoretical and Computational Analysis between SDA and Lap-LDA
+e94168c35be1d4b4d2aaf42ef892e64a3874ed8c,Two-Dimensional Maximum Margin Feature Extraction for Face Recognition
+e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc,Model Semantic Relations with Extended Attributes
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45,Deep Sequential Context Networks for Action Prediction
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6,Gender Classification by Deep Learning on Millions of Weakly Labelled Images
+e7144f5c19848e037bb96e225d1cfd961f82bd9f,Heterogeneous Face Recognition: Recent Advances in Infrared-to-Visible Matching
+e73b1137099368dd7909d203b80c3d5164885e44,Facial Action Units Recognition Based on Fuzzy Kernel Clustering
+e79bacc03152ea55343e6af97bcd17d8904cf5ef,Recursive Spatial Transformer (ReST) for Alignment-Free Face Recognition
+cba090a5bfae7dd8a60a973259f0870ed68c4dd3,Human Action Classification Using Temporal Slicing for Deep Convolutional Neural Networks
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3,Facial Landmark Detection via Progressive Initialization
+cb4d8cef8cec9406b1121180d47c14dfef373882,Makeup-Invariant Face Recognition by 3D Face: Modeling and Dual-Tree Complex Wavelet Transform from Women's 2D Real-World Images
+cb2470aade8e5630dcad5e479ab220db94ecbf91,Exploring Facial Differences in European Countries Boundary by Fine-Tuned Neural Networks
+f856532a729bd337fae1eb7dbe55129ae7788f45,Isolated Word Recognition Using Low Dimensional Features and Kernel Based Classification
+cead57f2f7f7b733f4524c4b5a7ba7f271749b5f,Improving Face Detection Performance by Skin Detection Post-Processing
+ce75deb5c645eeb08254e9a7962c74cab1e4c480,Emotion-Preserving Representation Learning via Generative Adversarial Network for Multi-View Facial Expression Recognition
+e060e32f8ad98f10277b582393df50ac17f2836c,Zero-Shot Action Recognition with Error-Correcting Output Codes
+e0162dea3746d58083dd1d061fb276015d875b2e,Unconstrained Face Alignment Without Face Detection
+468bb5344f74842a9a43a7e1a3333ebd394929b4,From Macro to Micro Expression Recognition: Deep Learning on Small Datasets Using Transfer Learning
+46c82cfadd9f885f5480b2d7155f0985daf949fc,3D Shape Attributes
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e,What Is the Challenge for Deep Learning in Unconstrained Face Recognition?
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4,Spatiotemporal Pyramid Network for Video Action Recognition
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a,Integral Local Binary Patterns: A Novel Approach Suitable for Texture-Based Object Detection Tasks
+8355d095d3534ef511a9af68a3b2893339e3f96b,DEX: Deep EXpectation of Apparent Age from a Single Image
+83f80fd4eb614777285202fa99e8314e3e5b169c,Towards Automated Visual Monitoring of Individual Gorillas in the Wild
+1b4b3d0ce900996a6da8928e16370e21d15ed83e,A Review of Performance Evaluation on 2D Face Databases
+77d929b3c4bf546557815b41ed5c076a5792dc6b,Using Synthetic Data to Improve Facial Expression Analysis with 3D Convolutional Networks
+480858e55abdbc07ca47b7dc10204613fdd9783c,Early Facial Expression Recognition Using Hidden Markov Models
+48de3ca194c3830daa7495603712496fe908375c,Capturing Complex Spatio-temporal Relations among Facial Muscles for Facial Expression Recognition
+70e14e216b12bed2211c4df66ef5f0bdeaffe774,Attribute-Enhanced Face Recognition with Neural Tensor Fusion Networks
+708f4787bec9d7563f4bb8b33834de445147133b,Wavelet-SRNet: A Wavelet-Based CNN for Multi-scale Face Super Resolution
+70516aede32cf0dbc539abd9416c44faafc868bd,Automatic Emotion Recognition through Facial Expression Analysis in Merged Images Based on an Artificial Neural Network
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d,Generalized Face Super-Resolution
+1eb48895d86404251aa21323e5a811c19f9a55f9,A Hybrid Image Feature Descriptor for Classification
+84c5b45328dee855c4855a104ac9c0558cc8a328,Conformal Mapping of a 3D Face Representation onto a 2D Image for CNN Based Face Recognition
+845f45f8412905137bf4e46a0d434f5856cd3aec,The Spyware Used in Intimate Partner Violence
+84f86f8c559a38752ddfb417e58f98e1f8402f17,Modified Multiscale Vesselness Filter for Facial Feature Detection
+4a733a0862bd5f7be73fb4040c1375a6d17c9276,Designing Category-Level Attributes for Discriminative Visual Recognition
+4a8480d58c30dc484bda08969e754cd13a64faa1,Offline Deformable Face Tracking in Arbitrary Videos
+24603ed946cb9385ec541c86d2e42db47361c102,Reverse Engineering Psychologically Valid Facial Expressions of Emotion into Social Robots
+2480f8dccd9054372d696e1e521e057d9ac9de17,Ontology-Driven Hierarchical Deep Learning for Fashion Recognition
+4f8345f31e38f65f1155569238d14bd8517606f4,Learning by Associating Ambiguously Labeled Images
+8da32ff9e3759dc236878ac240728b344555e4e9,Investigating Nuisance Factors in Face Recognition with DCNN Representation
+8de5dc782178114d9424d33d9adabb2f29a1ab17,Driver Gaze Tracking and Eyes Off the Road Detection System
+151b87de997e55db892b122c211f9c749f4293de,Joint Learning of Object and Action Detectors
+1280b35e4a20036fcfd82ee09f45a3fca190276f,Face Verification Based on Feature Transfer via PCA-SVM Framework
+12226bca7a891e25b7d1e1a34a089521bba75731,Hand-Crafted Feature Guided Deep Learning for Facial Expression Recognition
+126204b377029feb500e9b081136e7a9010e3b6b,Efficient Dimensionality Reduction on Undersampled Problems through Incremental Discriminative Common Vectors
+120b9c271c3a4ea0ad12bbc71054664d4d460bc3,Face Recognition against Mouth Shape Variations
+8ccbbd9da0749d96f09164e28480d54935ee171c,Improved Facial Expression Recognition with Trainable 2-D Filters and Support Vector Machines
+8598d31c7ca9c8f5bb433409af5e472a75037b4d,Active Learning for Interactive Multimedia Retrieval
+85e78aa374d85f9a61da693e5010e40decd3f986,Top-Down Segmentation of Non-rigid Visual Objects Using Derivative-Based Search on Sparse Manifolds
+85c90ad5eebb637f048841ebfded05942bb786b7,A Joint Evaluation of Dictionary Learning and Feature Encoding for Action Recognition
+1ddea58d04e29069b583ac95bc0ae9bebb0bed07,"An Efficient Model for Simultaneous Face Detection, Pose Estimation and Landmark Localisation"
+715d3eb3665f46cd2fab74d35578a72aafbad799,A Peak Detection Method for Understanding User States for Empathetic Intelligent Agents
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9,Age Regression Based on Local Image Features
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d,Cross-Generating GAN for Facial Identity Preserving
+768f6a14a7903099729872e0db231ea814eb05e9,De-Mark GAN: Removing Dense Watermark with Generative Adversarial Network
+82e1692467969940a6d6ac40eae606b8b4981f7e,How Many Frames Does Facial Expression Recognition Require?
+826015d9ade1637b3fcbeca071e3137d3ac1ef56,A Deep Learning Frame-Work for Recognizing Developmental Disorders
+49e4f05fa98f63510de76e7abd8856ff8db0f38d,Facial Action Units Detection with Multi-Features and -AUs Fusion
+49fdafef327069516d887d8e69b5e96c983c3dd0,Face Retrieval in Video Sequences Using a Single Face Sample
+405d9a71350c9a13adea41f9d7f7f9274793824f,Enhancing Interior and Exterior Deep Facial Features for Face Detection in the Wild
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2,Comprehensive Dataset of Broadcast Soccer Videos
+2bb36c875754a2a8919f2f9b00a336c00006e453,Eigen-Evolution Dense Trajectory Descriptors
+471bef061653366ba66a7ac4f29268e8444f146e,Semi-supervised Component Analysis
+47fb74785fbd8870c2e819fc91d04b9d9722386f,Recurrent Assistance: Cross-Dataset Training of LSTMs on Kitchen Tasks
+78f2c8671d1a79c08c80ac857e89315197418472,Recurrent 3D-2D Dual Learning for Large-Pose Facial Landmark Detection
+784a83437b3dba49c0d7ccc10ac40497b84661a5,Generative Attribute Controller with Conditional Filtered Generative Adversarial Networks
+78f244dc2a171944836a89874b8f60e9fe80865d,Affective Video Classification Based on Spatio-temporal Feature Fusion
+7813d405450013bbdb0b3a917319d5964a89484a,From Affine Rank Minimization Solution to Sparse Modeling
+13f065d4e6dfe2a130bd64d73eee97d10d9f7d33,A Study of the Region Covariance Descriptor: Impact of Feature Selection and Image Transformations
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9,Leveraging the User's Face for Absolute Scale Estimation in Handheld Monocular SLAM
+7f2a234ad5c256733a837dbf98f25ed5aad214e8,Optimal Feature Extraction and Classification of Tensors via Matrix Product State Decomposition
+7f5b379b12505d60f9303aab1fea48515d36d098,Performance Comparison of Deep Learning Techniques for Recognizing Birds in Aerial Images
+7f68a5429f150f9eb7550308bb47a363f2989cb3,Multiple-Facial Action Unit Recognition by Shared Feature Learning and Semantic Relation Modeling
+14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1,Feature Extraction Using Recursive Cluster-Based Linear Discriminant With Application to Face Recognition
+8e9b92a805d1ce0bf4e0c04133d26e28db036e6a,Evaluation of Triple-Stream Convolutional Networks for Action Recognition
+22ccd537857aca1ee4b961f081f07c58d42a7f32,Face Recognition Despite Wearing Glasses
+25960f0a2ed38a89fa8076a448ca538de2f1e183,The Dark Side of the Face: Exploring the Ultraviolet Spectrum for Face Biometrics
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02,Just Noticeable Differences in Visual Attributes
+252f202bfb14d363a969fce19df2972b83fa7ec0,Boosting-POOF: Boosting Part Based One vs One Feature for Facial Expression Recognition in the Wild
diff --git a/scraper/reports/doi_institutions_unknown.csv b/scraper/reports/doi_institutions_unknown.csv
new file mode 100644
index 00000000..bc3bc41c
--- /dev/null
+++ b/scraper/reports/doi_institutions_unknown.csv
@@ -0,0 +1,856 @@
+61262450d4d814865a4f9a84299c24daa493f66e,Biometric recognition in surveillance scenarios: a survey,"Institute of High Performance Computing and Networking, National Research Council of Italy (ICAR-CNR), Naples, Italy"
+0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9,Anatomy based features for facial expression recognition,"FMV I&#x015E;IK &#x00DC;niversitesi, &#x015E;ile, Istanbul"
+0d9815f62498db21f06ee0a9cc8b166acc93888e,Image recognition system based on novel measures of image similarity and cluster validity,a
+0d9815f62498db21f06ee0a9cc8b166acc93888e,Image recognition system based on novel measures of image similarity and cluster validity,b
+0d9815f62498db21f06ee0a9cc8b166acc93888e,Image recognition system based on novel measures of image similarity and cluster validity,c
+0d9815f62498db21f06ee0a9cc8b166acc93888e,Image recognition system based on novel measures of image similarity and cluster validity,d
+0d9815f62498db21f06ee0a9cc8b166acc93888e,Image recognition system based on novel measures of image similarity and cluster validity,e
+0da3c329ae14a4032b3ba38d4ea808cf6d115c4a,Discriminant feature extraction for image recognition using complete robust maximum margin criterion,"Department of Computer Science, Minjiang University, Fuzhou, People’s Republic of China"
+0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6,Active differential CMOS imaging device for human face recognition,"Inst. Nat. des Telecommun., Evry, France"
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,Large-scale subspace clustering using random sketching and validation,"Dept. of ECE & Digital Technology Center, Univ. of Minnesota, USA"
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,Large-scale subspace clustering using random sketching and validation,"Dept. of EE, Univ. at Buffalo, SUNY, USA"
+0db371a6bc8794557b1bffc308814f53470e885a,Adaptive semi-supervised dimensionality reduction based on pairwise constraints weighting and graph optimizing,"Computer Application Research Center, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China"
+95023e3505263fac60b1759975f33090275768f3,Facial Expression Recognition in Daily Life by Embedded Photo Reflective Sensors on Smart Eyewear,"National Institute of Advanced Industrial Science and Technology (AIST), Koto, Tokyo, Japan"
+950bf95da60fd4e77d5159254fed906d5ed5fbcb,Clustering Faces in Movies Using an Automatically Constructed Social Network,Reallusion Corporation
+95b5296f7ec70455b0cf1748cddeaa099284bfed,Measurement of Static and Dynamic Bio- Parameters of a Person in Remote Systems for Current Psycho- Emotional and Functional State Monitoring,"Department of Electronic Measuring systems, Moscow Engineering Physics Institute, National Research Nuclear University MEPhI, Moscow, Russia"
+9590b09c34fffda08c8f54faffa379e478f84b04,Efficient Dual Approach to Distance Metric Learning,"Defence Science and Technology Organisation (DSTO), Edinburgh, Australia"
+95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6,Simultaneous Learning of Nonlinear Manifolds Based on the Bottleneck Neural Network,"Faculty of Biomedical Engineering, Amirkabir University of Technology (Tehran Polytechnic), Tehran, Iran"
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,Facial point localization via neural networks in a cascade regression framework,"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, Magdeburg, Germany"
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,Facial point localization via neural networks in a cascade regression framework,"Institute of Neural Information Processing, University of Ulm, Ulm, Germany"
+5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks,"Department of Electrical Engineering and Information Technology, TU Darmstadt, D-64283, Germany"
+9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23,Development of deep learning-based facial expression recognition system,"Broadcasting &amp; Telecommunications, Convergence Media Research Department, Electronics and Telecommunications Research Institute, Daejeon, Korea"
+0c65226edb466204189b5aec8f1033542e2c17aa,A study of CNN outside of training conditions,"Intelligent Vision Research Lab, Department of Computer Science, Federal University of Bahia"
+0c247ac797a5d4035469abc3f9a0a2ccba49f4d8,An efficient landmark localization for face occlusion,"Department of Computer Science and Information Engineering, National Formosa University, Yunlin 632, Taiwan"
+0c0db39cac8cb76b52cfdbe10bde1c53d68d202f,Metric-based Generative Adversarial Network,"New York University Abu Dhabi & NYU Tandon School of Engineering, Abu Dhabi, Uae"
+3e59d97d42f36fc96d33a5658951856a555e997b,"Realistic inverse lighting from a single 2D image of a face, taken under unknown and complex lighting","Department of Electrical Engineering and Computer Science, University of Siegen, Siegen, Germany"
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46,Learning a discriminative dictionary for sparse coding via label consistent K-SVD,"Adobe Systems Incorporated, San Jose, CA, 95110"
+501076313de90aca7848e0249e7f0e7283d669a1,Face recognition based on geometric features using Support Vector Machines,"ITI Department Telecom Bretagne, Brest, France"
+681d222f91b12b00e9a4217b80beaa11d032f540,Periocular recognition: how much facial expressions affect performance?,"Department of Computer Science, IT-Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal"
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,Facial Recognition System for Suspect Identification Using a Surveillance Camera,"Computer Science and Engineering, Panimalar Engineering College, Chennai, India"
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,Facial Recognition System for Suspect Identification Using a Surveillance Camera,"Computer Science and Engineering, St.Peter’s University, Chennai, India"
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,Facial Recognition System for Suspect Identification Using a Surveillance Camera,"Computer Science and Engineering, Sanjivani College of Engineering, Kopargaon, India"
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,Facial Recognition System for Suspect Identification Using a Surveillance Camera,"School of Information Technology, Madurai Kamarai University, Madurai, India"
+6813208b94ffa1052760d318169307d1d1c2438e,Multiple Models Fusion for Emotion Recognition in the Wild,"Peking University & Shanghai Jaio Tong University, Beijing, China"
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1,Multiple kernel learning SVM and statistical validation for facial landmark detection,"LAMIA, EA 4540, University of French West Indies &amp; Guyana"
+68d566ed4041a7519acb87753036610bd64dcc09,Lighting Estimation of a Convex Lambertian Object Using Redundant Spherical Harmonic Frames,"College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, China"
+5760d29574d78e79e8343b74e6e30b3555e48676,An Intelligent Music Player Based on Emotion Recognition,"R V College of Engineering, Department of Computer Science and Engineering, Bangalore, India"
+5763b09ebca9a756b4adebf74d6d7de27e80e298,Picture-specific cohort score normalization for face pair matching,"Department of Sciences and Information Technology, University of Sassari, Viale Mancini 5, 07100 Sassari, Italy"
+57ba4b6de23a6fc9d45ff052ed2563e5de00b968,An efficient deep neural networks training framework for robust face recognition,"School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China"
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,Terrain classification of hyperspectral remote sensing images based on kernel maximum margin criterion,"School of Electronic Engineering, Xi'an University of Posts and Telecommunications, Xi'an, China"
+3bdaf59665e6effe323a1b61308bcac2da4c1b73,2D spherical spaces for objects recognition under harsh lighting conditions,"Faculty of Engineering Science, Department of Systems Innovation, Arai Laboratory at Osaka University, Japan"
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,Facial Expression Biometrics Using Tracker Displacement Features,"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. tulyakov@cedar.buffalo.edu"
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,Volume structured ordinal features with background similarity measure for video face recognition,"Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney Playa, Havana, Cuba"
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,Measuring sample distortions in face recognition,"Sapienza Università di Roma, Roma, Italy"
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,Measuring sample distortions in face recognition,"Università di Salerno, Fisciano (SA), Italy"
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,Kinship verification from facial images and videos: human versus machine,"School of Computing and Information Systems, University of Melbourne, Melbourne, Australia"
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf,A lighting robust fitting approach of 3D morphable model for face reconstruction,"Insititute of Automation, Chinese Academy of Sciences (CAS), Beijing, China"
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,Learning to Recognise Unseen Classes by A Few Similes,"University of Sheffield, Sheffield, United Kingdom"
+9b8830655d4a5a837e3ffe835d14d6d71932a4f2,Multiview Face Recognition: From TensorFace to V-TensorFace and K-TensorFace,"Department of Computer Science, University of Texas, San Antonio, TX, USA"
+9e5690cdb4dfa30d98dff653be459e1c270cde7f,Multiple path search for action tube detection in videos,Department of Electronic and Computer Engineering National Taiwan University of Science and Technology
+9e5809122c0880183c7e42c7edd997f92de6d81e,Eye corner detector robust to shape and illumination changes,"Fujitsu Laboratories, Kawasaki, Kanagawa, Japan"
+9e99f818b37d44ec6aac345fb2c5356d83d511c7,Sift-flow registration for facial expression analysis using Gabor wavelets,"University of Montreal, Department of Computer Science and Operations Research (DIRO), 2920 Chemin de la tour, QC, Canada, H3C 3J7"
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,Local generic representation for patch uLBP-based face recognition with single training sample per subject,"SITI Laboratory, National Engineering School of Tunis (ENIT), University of Tunis El Manar, Tunis, Tunisia"
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,Segmentation-based illumination normalization for face detection,"Department of Information Processing Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology Yokohama 226-8503, Japan"
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,Attribute constrained subspace learning,"Institute for Human-Machine Communication, Technische Universit&#x00E4;t M&#x00FC;nchen, Germany"
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,Attribute constrained subspace learning,"Dept. of Artificial Intelligence, Faculty of Computer Engineering, University of Isfahan, Iran"
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f,Learning discriminative local binary patterns for face recognition,"Department of Computer Science, Pontificia Universidad Cato&#x00B4;lica de Chile"
+049186d674173ebb76496f9ecee55e17ed1ca41b,Inner Product Regularized Nonnegative Self Representation for Image Classification and Clustering,"School of Software, Jiangxi Normal University, Nanchang, China"
+049186d674173ebb76496f9ecee55e17ed1ca41b,Inner Product Regularized Nonnegative Self Representation for Image Classification and Clustering,"School of Computer Science and Information Engineering, Shanghai Institute of Technology, Shanghai, China"
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,A robust composite metric for head pose tracking using an accurate face model,"Majority Report, France"
+04f56dc5abee683b1e00cbb493d031d303c815fd,Scene character recognition using PCANet,"Xiamen University of Technology, Fujian, China"
+0450dacc43171c6e623d0d5078600dd570de777e,Emotional faces influence numerosity estimation without awareness,"Graduate School of Biomedical Sciences, Nagasaki University, Nagasaki City, Japan"
+045275adac94cced8a898a815293700401e9955f,Texture-independent recognition of facial expressions in image snapshots and videos,"Computer Vision Center, Edifici “O”, Campus UAB, Bellaterra, Spain"
+045275adac94cced8a898a815293700401e9955f,Texture-independent recognition of facial expressions in image snapshots and videos,"University of the Basque Country UPV/EHU, San Sebastian, Spain"
+045275adac94cced8a898a815293700401e9955f,Texture-independent recognition of facial expressions in image snapshots and videos,"IKERBASQUE, Basque Foundation for Science, Bilbao, Spain"
+6af75a8572965207c2b227ad35d5c61a5bd69f45,An Accelerated Variational Framework for Face Expression Recognition,"Concordia Institute for Information Systems Engineering Concordia University, Montreal, Canada"
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6,KPCA method based on within-class auxiliary training samples and its application to pattern classification,"School of Mathematics and Computational Science, Anqing Normal University, Anqing, People’s Republic of China"
+6afe1f668eea8dfdd43f0780634073ed4545af23,Deep learning for content-based video retrieval in film and television production,"Department of Mathematics and Computer Science, University of Marburg, Marburg, Germany"
+6afe1f668eea8dfdd43f0780634073ed4545af23,Deep learning for content-based video retrieval in film and television production,"taglicht media Film- & Fernsehproduktion GmbH, Köln, Germany"
+6afe1f668eea8dfdd43f0780634073ed4545af23,Deep learning for content-based video retrieval in film and television production,"German National Library of Science and Technology (TIB), Hannover, Germany"
+6afe1f668eea8dfdd43f0780634073ed4545af23,Deep learning for content-based video retrieval in film and television production,"L3S Research Center, Leibniz Universität Hannover, Hannover, Germany"
+6a527eeb0b2480109fe987ed7eb671e0d847fca8,Introduction to Intelligent Surveillance,"Department of Computer Science, Auckland University of Technology, Auckland, New Zealand"
+6adecb82edbf84a0097ff623428f4f1936e31de0,Client-specific A-stack model for adult face verification across aging,"School of Engineering, Swiss Federal Institute of Technology Lausanne (EPFL), Lausanne, Switzerland"
+6a931e7b7475635f089dd33e8d9a2899ae963804,Unified convolutional neural network for direct facial keypoints detection,"Pusan National University, Busan, Korea"
+6a6406906470be10f6d6d94a32741ba370a1db68,Emotion extraction based on multi bio-signal using back-propagation neural network,"Department of MediaSoftware, Sungkyul University, Anyang-si, Republic of Korea"
+6a6406906470be10f6d6d94a32741ba370a1db68,Emotion extraction based on multi bio-signal using back-propagation neural network,"Department of Film and Digital Media, Seokyeong University, Seoul, Republic of Korea"
+6ad5ac867c5ca56e0edaece153269d989b383b59,Local feature extraction and recognition under expression variations based on multimodal face and ear spherical map,"School of Automation and Electrical Engineering, University of Science and Technology Beijing, 100083, China"
+3266fcd1886e8ad883714e38203e66c0c6487f7b,Exploring synonyms as context in zero-shot action recognition,Vision Semantics Ltd
+321db1059032b828b223ca30f3304257f0c41e4c,Comparative evaluation of age classification from facial images,"Department of Electronics and Telecommunication Engineering, Don Bosco Institute of Technology, Kurla (W), Mumbai, India"
+327ae6742cca4a6a684a632b0d160dd84d0d8632,Dimension Reduction and Construction of Feature Space for Image Pattern Recognition,"Graduate School of Advanced Integration Science, Chiba University, Chiba, Japan"
+327ae6742cca4a6a684a632b0d160dd84d0d8632,Dimension Reduction and Construction of Feature Space for Image Pattern Recognition,"Institute of Management and Information Technologies, Chiba University, Chiba, Japan"
+327ae6742cca4a6a684a632b0d160dd84d0d8632,Dimension Reduction and Construction of Feature Space for Image Pattern Recognition,"Graduate School of Engineering, Nagasaki University, Nagasaki, Japan"
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2,Real time facial expression recognition with AdaBoost,"Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China"
+322488c4000c686e9bfb7514ccdeacae33e53358,People News Search via Name-Face Association Analysis,"Shanghai University of Finance and Economics, Shanghai, China"
+32dfd4545c87d9820cc92ca912c7d490794a81d6,Computer Vision for Driver Assistance,"Department of Computer Engineering, Qazvin Islamic Azad University , Qazvin, Iran"
+32dfd4545c87d9820cc92ca912c7d490794a81d6,Computer Vision for Driver Assistance,"Department of Electrical and Electronic Engineering, Auckland University of Technology , Auckland, New Zealand"
+32a440720ee988b7b41de204b2910775171ee12c,Front view gait recognition using Spherical Space Model with Human Point Clouds,"Waseda University The Graduate School of Information, Production and Systems 2-7, Hibikino, Wakamatsu-ku, Kitakyushu-shi, Fukuoka, Japan"
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,Human Behavior Understanding,"Department of Computer Engineering, Bogaziçi University, Bebek, Turkey"
+35ccc836df60cd99c731412fe44156c7fd057b99,A cascade framework for masked face detection,"School of Electronic and Information Engineering, Ningbo University of Technology, Ningbo, China"
+359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9,A modified kernel clustering method with multiple factors,"Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, People’s Republic of China"
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,Captioning Videos Using Large-Scale Image Corpus,"School of Software Engineering, Chengdu University of Information Technology, Chengdu, China"
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,Captioning Videos Using Large-Scale Image Corpus,"Sichuan University West China Hospital of Stomatology, Chengdu, China"
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,Discriminative Feature Extraction by a Neural Implementation of Canonical Correlation Analysis,"Department of Computer Engineering, Bah&#x00E7;e&#x015F;ehir University, Istanbul, Turkey"
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,Nonnegative Matrix Factorization with Integrated Graph and Feature Learning,"Southern Illinois University at Carbondale, IL, USA"
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,Nonnegative Matrix Factorization with Integrated Graph and Feature Learning,"Yuncheng University, Shanxi Province, China"
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,Nonnegative Matrix Factorization with Integrated Graph and Feature Learning,"University of Hawaii at Hilo, HI, USA"
+3c086601ce0bac61047b5b931b253bd4035e1e7a,Occlusion handling in feature point tracking using ranked parts based models,"Information and media processing laboratories, NEC Corporation"
+3cbd3124b1b4f95fcdf53abd358d7ceec7861dda,Convolutions through time for multi-label movie genre classification,"Pontifícia Universidade Católica do RS, Porto Alegre-RS, Brazil"
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,Neural networks recognition rate as index to compare the performance of fuzzy edge detectors,"School of Engineering of UABC, University of Baja California, Tijuana, Mexico"
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9,Anubhav: recognizing emotions through facial expression,"Electronics and Communication Sciences Unit, Indian Statistical Institute, Kolkata, India"
+3ca6adc90aae5912baa376863807191ffd56b34e,Exploring Facial Asymmetry Using Optical Flow,"School of Software, Shenyang University of Technology, Shenyang, China"
+56bcc89fb1e05d21a8b7b880c6b4df79271ceca5,Segmented face approximation with adaptive region growing based on low-degree polynomial fitting,"Department of Telecommunications and Information Processing, Image Processing and Interpretation, UGent/iMinds, Ghent, Belgium"
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,Performance evaluation of incremental training method for face recognition using PCA,"Department of Computer Science and Engineering, JNTU College of Engineering, Kakinada, India"
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,Performance evaluation of incremental training method for face recognition using PCA,"Department of Physics, JNTU College of Engineering, Kakinada, India"
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,Performance evaluation of incremental training method for face recognition using PCA,"Department of Electronics and Communication Engineering, JNTU College of Engineering, Hyderabad, India"
+569988e19ab36582d4bd0ec98e344cbacf177f45,Affective Visual Perception Using Machine Pareidolia of Facial Expressions,"School of Electrical Engineering and Computer Science at the University of Newcastle, Callaghan, NSW 2308, Australia"
+569988e19ab36582d4bd0ec98e344cbacf177f45,Affective Visual Perception Using Machine Pareidolia of Facial Expressions,"School of Mathematical and Physical Sciences at the University of Newcastle, Callaghan, NSW 2308, Australia"
+518a3ce2a290352afea22027b64bf3950bffc65a,Finding iconic images,"Computer Science Dept., SUNY Stony Brook, USA"
+3d1f976db6495e2bb654115b939b863d13dd3d05,Labeling faces with names based on the name semantic network,"Computer Science College, Xi’an Polytechnic University, Xi’an, China"
+3d0b2da6169d38b56c58fe5f13342cf965992ece,Spatio-temporal representation for face authentication by using multi-task learning with human attributes,"Image and Video Systems Lab, School of Electrical Engineering, KAIST, Republic of Korea"
+583e0d218e1e7aaf9763a5493e7c18c2b8dd7464,Coded facial expression,Wakayama University
+58eb9174211d58af76023ce33ee05769de57236c,Submodular Attribute Selection for Visual Recognition,"Raytheon BBN Technologies, 10 Moulton St, Cambridge, MA"
+587b8c147c6253878128ddacf6e5faf8272842a4,Driving High-Resolution Facial Scans with Video Performance Capture,"University of Southern California Institute for Creative Technologies, Los Angeles, CA"
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,Face recognition based on the fusion of wavelet packet sub-images and fisher linear discriminant,"College of Information and Electrical Engineering, Ludong University, Yantai, China"
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,Face recognition based on the fusion of wavelet packet sub-images and fisher linear discriminant,"Faculty of Electrical Engineering and Computer Science, Ningbo University, Ningbo, China"
+67af3ec65f1dc535018f3671624e72c96a611c39,Safe binary particle swam algorithm for an enhanced unsupervised label refinement in automatic face annotation,"Department of Electronic Engineering, National Ilan University, Yilan City, Taiwan"
+67af3ec65f1dc535018f3671624e72c96a611c39,Safe binary particle swam algorithm for an enhanced unsupervised label refinement in automatic face annotation,"Department of Information Management, Hwa Hsia University of Technology, New Taipei City, Taiwan"
+0b58b3a5f153f653c138257426bf8d572ae35a67,Cloud-based facial emotion recognition for real-time emotional atmosphere assessment during a lecture,"Dept. of Cybernetics and Artificial Intelligence, FEI TU of Košice, Slovak Republic"
+939d28859c8bd2cca2d692901e174cfd599dac74,Facial expression recognition based on texture and shape,"The 28th Research Institute of China Electronics Technology Group Corporation, China"
+93978ba84c8e95ff82e8b5960eab64e54ca36296,AMHUSE: a multimodal dataset for HUmour SEnsing,"University of Tours, France"
+948f35344e6e063ffc35f10c547d5dd9204dee4e,Multi-Objective Differential Evolution for feature selection in Facial Expression Recognition systems,"University of Maribor, Faculty of Electrical Engineering and Computer Science, Koroška cesta 46, SI-2000, Slovenia"
+940e5c45511b63f609568dce2ad61437c5e39683,Fiducial Facial Point Extraction Using a Novel Projective Invariant,"School of Mathematics and Computer Sciences, Gannan Normal University, Ganzhou, China"
+0e05b365af662bc6744106a7cdf5e77c9900e967,"Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics","INRIA, Sophia Antipolis, France"
+0e4baf74dfccef7a99c6954bb0968a2e35315c1f,Gender identification from face images,"TÜBİITAK-BİILGEM-UEKAE, Anibal Cad., P.K.74, 41470, Gebze-KOCAELİ, Turkey"
+604a281100784b4d5bc1a6db993d423abc5dc8f0,Face Verification Across Age Progression Using Discriminative Methods,"Computer Science Department, University of California, Los Angeles, CA, USA"
+609d81ddf393164581b3e3bf11609a712ac47522,Fuzzy qualitative approach for micro-expression recognition,"Faculty of Computing and Information Technology, Setapak, Malaysia"
+603231c507bb98cc8807b6cbe2c860f79e8f6645,Unitary transform-based template protection and its properties,"NTT Network Innovation Laboratories, Nippon Telegraph and Telephone Corp."
+60284c37249532fe7ff6b14834a2ae4d2a7fda02,Compressive sensing based facial expression recognition,"Mechatronic Engineering Department, Mevlana University, Konya, Turkey"
+6014eeb333998c2b2929657d233ebbcb1c3412c9,Discovering the City by Mining Diverse and Multimodal Data Streams,"Academia Sinica, Taipei, Taiwan Roc"
+34c2ea3c7e794215588c58adf0eaad6dc267d082,Multi-modal emotion recognition using semi-supervised learning and multiple neural networks in the wild,"Inha University, South Korea"
+5a0ae814be58d319dfc9fd98b058a2476801201c,Sparse margin–based discriminant analysis for feature extraction,"School of Computer Science and Technology, Nanjing University of Science and Technology of China, Nanjing, People’s Republic of China"
+5fea59ccdab484873081eaa37af88e26e3db2aed,Capacitive Sensor for Tagless Remote Human Identification Using Body Frequency Absorption Signatures,"Department of Electronics and Telecommunications, Politecnico di Torino, Torino, Italy"
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,NeuroDSP Accelerator for Face Detection Application,"UMR CNRS - Univ. Bourgogne, Dijon, France"
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,NeuroDSP Accelerator for Face Detection Application,"CEA, Gif-Sur-Yvette, France"
+5fb9944b18f5a4a6d20778816290ed647f5e3853,Wearable for Wearable: A Social Signal Processing Perspective for Clothing Analysis using Wearable Devices,"Università degli Studi di Verona, Verona, Italy"
+5fa6f72d3fe16f9160d221e28da35c1e67a5d951,A 700fps optimized coarse-to-fine shape searching based hardware accelerator for face alignment,"Institute of Semiconductors, Chinese Academy of Sciences&University of Chinese Academy of Sciences, Beijing, China"
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,Bandit Framework for Systematic Learning in Wireless Video-Based Face Recognition,"University of California, Los Angeles, CA Dept. of Electrical Engineering"
+33c2131cc85c0f0fef0f15ac18f28312347d9ba4,Edited AdaBoost by weighted kNN,"Systems Engineering Institute, Xi’an Jiaotong University, Xi’an 710049, China"
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,Adaptive Cascade Regression Model For Robust Face Alignment,"Department of Information and Control, B-DAT Laboratory, Nanjing University of Information and Technology, Nanjing, China"
+335435a94f8fa9c128b9f278d929c9d0e45e2510,CREMA-D: Crowd-Sourced Emotional Multimodal Actors Dataset,"Department of Mathematics and Computer Science, Ursinus College, Collegeville, PA"
+331d6ace8d59fa211e5bc84a93fdc65695238c69,Iterative column subset selection,"Department of Computer Systems, Universidad Politécnica de Madrid, Madrid, Spain"
+052fb35f731680d9d4e7d89c8f70f14173efb015,A Survey on Mobile Social Signal Processing,"Intel Labs Europe, Pipers Way, Swindon"
+053ee4a4793f54b02dfabde5436fd7ee479e79eb,Landmark-based Facial Expression Parametrization for Sign Languages Avatar Animation,"Universidade Federal do Paraná, Curitiba, Brazil"
+0553c6b9ee3f7d24f80e204d758c94a9d6b375d2,Face identification from one single sample face image,"Dept. of Appl. Phys. & Electron., Umea Univ., Sweden"
+0532cbcf616f27e5f6a4054f818d4992b99d201d,Class specific centralized dictionary learning for face recognition,"College of Information and Control Engineering, China University of Petroleum, Qingdao, China"
+05c5134125a333855e8d25500bf97a31496c9b3f,Robust Multi-Modal Cues for Dyadic Human Interaction Recognition,"University of Tunis El Manar, Tunis, Tunisia"
+9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c,A decentralised multimodal integration of social signals: a bio-inspired approach,"University of St. Andrews, UK"
+9df86395c11565afa8683f6f0a9ca005485c5589,"Facial expression recognition using active contour-based face detection, facial movement-based feature extraction, and non-linear feature selection","Department of Electronic Engineering, Kwangwoon University, Seoul, Republic of Korea"
+9d3377313759dfdc1a702b341d8d8e4b1469460c,Cast2Face: Assigning Character Names Onto Faces in Movie With Actor-Character Correspondence,"School of Software, Beijing Institute of Technology, Beijing, China"
+9c686b318cb7774b6da5e2c712743a5a6cafa423,Increasingly complex representations of natural movies across the dorsal stream are shared between subjects,"Radboud University, Donders Institute for Brain, Cognition and Behaviour, Nijmegen, The Netherlands"
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,Discriminative unsupervised 2D dimensionality reduction with graph embedding,"School of Information Science and Technology, Northwest University, Xi’an, China"
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,Discriminative unsupervised 2D dimensionality reduction with graph embedding,"Faculty of Electrical Engineering and Informatics, Budapest University of Technology and Economics, Budapest, Hungary"
+023decb4c56f2e97d345593e4f7b89b667a6763d,Generalized Low Rank Approximations of Matrices,"Department of Computer Science & Engineering, University of Minnesota-Twin Cities, Minneapolis, USA"
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb,High-dimensional multimedia classification using deep CNN and extended residual units,"Department of Computer Science & Engineering, Jamia Hamdard University, New Delhi, India"
+a4bb791b135bdc721c8fcc5bdef612ca654d7377,Location-sensitive sparse representation of deep normal patterns for expression-robust 3D face recognition,"Department of Mathematics and Informatics, Ecole Centrale de Lyon, Lyon, 69134, France"
+a4725a5b43e7c36d9e30028dff66958f892254a0,Emotion Recognition in the Wild: Incorporating Voice and Lip Activity in Multimodal Decision-Level Fusion,"Technische Universität München, Munich, Germany"
+a4725a5b43e7c36d9e30028dff66958f892254a0,Emotion Recognition in the Wild: Incorporating Voice and Lip Activity in Multimodal Decision-Level Fusion,"Technische Universität München / Imperial College London, Munich / London, England UK"
+a4e75766ef93b43608c463c233b8646439ce2415,Automatic real-time FACS-coder to anonymise drivers in eye tracker videos,"Department of Applied Mechanics, Chalmers University of Technology, SE-412 96 Göteborg, Sweden"
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,Reliable facial expression recognition for multi-scale images using weber local binary image based cosine transform features,"Department of Computer Science, Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan"
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,Reliable facial expression recognition for multi-scale images using weber local binary image based cosine transform features,"Department of Computer Science and Software Engineering, International Islamic University, Islamabad, Pakistan"
+a38dd439209b0913b14b1c3c71143457d8cf9b78,Face recognition in unconstrained environments,"Computer Science and Engineering Dept., University of Nevada Reno, USA"
+b5979489e11edd76607c219a8bdc83ba4a88ab38,Action Recognition in Video Sequences using Deep Bi-Directional LSTM With CNN Features,"Department of Computer Science, Digital Image Processing Laboratory, Islamia College Peshawar, Peshawar, Pakistan"
+b55e70df03d9b80c91446a97957bc95772dcc45b,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,"GSI Universidad Polit-écnica de Madrid, Madrid, Spain"
+b55e70df03d9b80c91446a97957bc95772dcc45b,MixedEmotions: An Open-Source Toolbox for Multimodal Emotion Analysis,"Phonexia, Brno-Krlovo Pole, Czech Republic"
+b5f9306c3207ac12ac761e7d028c78b3009a219c,Age estimation based on extended non-negative matrix factorization,"School of Computer Science and Software Engineering University of Wollongong, Australia"
+b50edfea790f86373407a964b4255bf8e436d377,Group emotion recognition with individual facial emotion CNNs and global image based CNNs,"SIAT at Chinese Academy of Sciences, China"
+b208f2fc776097e98b41a4ff71c18b393e0a0018,Efficient Design of Advanced Correlation Filters for Robust Distortion-Tolerant Face Recognition,"Dept. of Electr. & Comput. Eng., Carnegie Mellon Univ., Pittsburgh, PA, USA"
+d91a5589fd870bf62b7e4979d9d47e8acf6c655d,Face recognition method based on dynamic threshold local binary pattern,"Chongqing University of Posts and Telecommunications Chongqing, China"
+aca728cab26b95fbe04ec230b389878656d8af5b,Knowledge Computing and its Applications,"School of Computing Science and Engineering, VIT University, Vellore, India"
+aca728cab26b95fbe04ec230b389878656d8af5b,Knowledge Computing and its Applications,"The Maersk Mc-Kinney Moller Institute, University of Southern Denmark, Odense M, Denmark"
+acff2dc5d601887741002a78f8c0c35a799e6403,Artificial Intelligence Applications and Innovations,"Department of Computer Science and Engineering, Frederick University, Nicosia, Cyprus"
+ac37285f2f5ccf99e9054735a36465ee35a6afdd,Complete Kernel Fisher discriminant analysis of Gabor features with fractional power polynomial models for face recognition,"Dept. of Autom. Test & Control, Harbin Inst. of Technol., China"
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,Effective multiplicative updates for non-negative discriminative learning in multimodal dimensionality reduction,"Department of Computer Science and Technology, Nanjing Forestry University, Nanjing, China"
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,Effective multiplicative updates for non-negative discriminative learning in multimodal dimensionality reduction,"Department of Language Studies, Nanjing Forestry University, Nanjing, China"
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,Effective multiplicative updates for non-negative discriminative learning in multimodal dimensionality reduction,"Department of Computer Science and Technology, Nanjing Forestry University and Shandong University, Jinan, China"
+ad9ba7eade9d4299159512d6d5d07d7d3d26ae58,Feature Extraction Based on Maximum Nearest Subspace Margin Criterion,"School of Information Engineering, Jiangxi Manufacturing Technology College, Nanchang, China"
+ad77056780328bdcc6b7a21bce4ddd49c49e2013,Face verification based on deep reconstruction network,"School of Automation and Information Engineering, Xi'an University of Technology, Xi'an, China"
+ada063ce9a1ff230791c48b6afa29c401a9007f1,Biometric Recognition,"Xinjiang University, Urumqi, China"
+ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c,Soft Biometrics: Globally Coherent Solutions for Hair Segmentation and Style Recognition Based on Hierarchical MRFs,"Department of Computer Science, IT: Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal"
+bb2f61a057bbf176e402d171d79df2635ccda9f6,Multi-modal joint embedding for fashion product retrieval,Institut de Rob&#x00F2;tica i Inform&#x00E0;tica Industrial (CSIC-UPC)
+bb2f61a057bbf176e402d171d79df2635ccda9f6,Multi-modal joint embedding for fashion product retrieval,Wide Eyes Technologies
+bb83d5c7c17832d1eef14aa5d303d9dd65748956,Predicting student engagement in classrooms using facial behavioral cues,"IIIT Bangalore, India"
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,Towards robust automatic affective classification of images using facial expressions for practical applications,"Faculty of Computer Science and Engineering, Xi’an University of Technology, Xi’an, China"
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,Towards robust automatic affective classification of images using facial expressions for practical applications,"Research & Development, British Broadcasting Corporation (BBC), London, UK"
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,SAGA: sparse and geometry-aware non-negative matrix factorization through non-linear local embedding,"IRISA, Université de Bretagne Sud, Vannes, France"
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,SAGA: sparse and geometry-aware non-negative matrix factorization through non-linear local embedding,"Costel, Université de Rennes 2, Rennes, France"
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,SAGA: sparse and geometry-aware non-negative matrix factorization through non-linear local embedding,"NLPR, Institute of Automation, Chinese Academy of Science, Beijing, People’s Republic of China"
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,SAGA: sparse and geometry-aware non-negative matrix factorization through non-linear local embedding,"CEA (iRSTV/BGE), INSERM (U1038), CNRS (FR3425), Université Grenoble-Alpes, Grenoble, France"
+d79530e1745b33f3b771d0b38d090b40afc04191,A new method to estimate ages of facial image for large database,"College of Computer Science and Technology of Huaqiao University, Xiamen, China"
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,Towards Improving Social Communication Skills With Multimodal Sensory Information,"School of Computer Science, China University of Geosciences, Wuhan, China"
+d046030f7138e5a2dbe2b3eec1b948ad8c787538,Illumination invariant face recognition in logarithm Discrete Cosine Transform domain,"Faculty of Engineering, Ain Shams University, Computer and Systems Engineering Department, Cairo, Egypt"
+d0f9143f6f43a39bff47daf8c596681581db72ea,Implementation of an improved facial emotion retrieval method in multimedia system,"School of Computer Engineering, Hanshin University, Osan, Republic of Korea"
+d066575b48b552a38e63095bb1f7b56cbb1fbea4,The performance of corrected learning network for object recognition,"School of Information and Software Engineering, University of Electronic Science and Technology of China (UESTC), Chengdu, 610054, China P.R.C"
+bec0c33d330385d73a5b6a05ad642d6954a6d632,"Ranking, clustering and fusing the normalized LBP temporal facial features for face recognition in video sequences","Department of Computer Science and Engineering, Mepco Schlenk Engineering College, Sivakasi, India"
+bec0c33d330385d73a5b6a05ad642d6954a6d632,"Ranking, clustering and fusing the normalized LBP temporal facial features for face recognition in video sequences","Department of Electrical & Electronics Engineering, Kalasalingam University, Krishnankoil, India"
+be4faea0971ef74096ec9800750648b7601dda65,Feature Analysis of Unsupervised Learning for Multi-task Classification Using Convolutional Neural Network,"School of Electronics Engineering, Kyungpook National University, Taegu, South Korea"
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29,Recognizing Actions in Wearable-Camera Videos by Training Classifiers on Fixed-Camera Videos,"Tianjin University & University of South Carolina, Tianjin, China"
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958,Parallel Heat Kernel Volume Based Local Binary Pattern on Multi-Orientation Planes for Face Representation,"College of Information and Technology, Incheon National University, Incheon, Korea"
+b3e60bb5627312b72c99c5ef18aa41bcc1d21aea,Class specific dictionary learning for face recognition,"School of Computer Science Carnegie Mellon University Pittsburgh, PA, 15213, USA"
+df550cb749858648209707bec5410431ea95e027,Local Laplacian Coding From Theoretical Analysis of Local Coding Schemes for Locally Linear Classification,"School of Computer Science and Technology, Harbin Institute of Technology at Weihai, Weihai, China"
+da2b2be4c33e221c7f417875a6c5c74043b1b227,Score normalization in stratified biometric systems,"Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA"
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,Multimodal emotion recognition based on peak frame selection from video,"INRS-EMT, Montreal, Canada"
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,Multimodal emotion recognition based on peak frame selection from video,"University of Udine, Udine, Italy"
+b472f91390781611d4e197564b0016d9643a5518,Facial expression recognition using geometric and appearance features,"Central China Normal University, Wuhan, China"
+b472f91390781611d4e197564b0016d9643a5518,Facial expression recognition using geometric and appearance features,"China University of Geosciences Wuhan, China"
+b41d585246360646c677a8238ec35e8605b083b0,Student engagement study based on multi-cue detection and recognition in an intelligent learning environment,"Faculty of Information Engineering, China University of Geosciences, Wuhan, China"
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990,Big Data Analysis for Media Production,"Department of Computer Graphics and Multimedia, University of Brno, Brno, Czech Republic"
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a,Robust Video Face Recognition Under Pose Variation,"University of Science and Technology Beijing, Beijing, China"
+bddc822cf20b31d8f714925bec192c39294184f7,Facial expression recognition based on local binary patterns,"Northwestern Polytechnic University, Xi’an, China"
+d141c31e3f261d7d5214f07886c1a29ac734d6fc,Unsupervised Video Hashing via Deep Neural Network,"Liaocheng University, Liaocheng, China"
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,Blind late fusion in multimedia event retrieval,"TNO, Oude Waalsdorperweg, AK The Hague, The Netherlands"
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,Blind late fusion in multimedia event retrieval,"Radboud University, EC Nijmegen, The Netherlands"
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,Blind late fusion in multimedia event retrieval,"City University, Kowloon Tong, Hong Kong"
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,Blind late fusion in multimedia event retrieval,"TNO, The Hague, The Netherlands"
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,Blind late fusion in multimedia event retrieval,"Leiden University, Leiden, The Netherlands"
+d12bea587989fc78b47584470fd8f689b6ab81d2,Robust Face Representation Using Hybrid Spatial Feature Interdependence Matrix,"LIAMA, French National Institute for Research in Computer Science and Control, Paris, France"
+d628aabf1a666a875e77c3d3fee857cd25891947,Eye detection in unrestrained settings using efficient match kernels and SVM classification,"Department of Mechanical Engineering, University of Brasília, DF, Brazil 70910-900"
+d628aabf1a666a875e77c3d3fee857cd25891947,Eye detection in unrestrained settings using efficient match kernels and SVM classification,"Department of Computer Science, University of Brasília, DF, Brazil 70910-900"
+d62d82c312c40437bc4c1c91caedac2ba5beb292,Super Wide Regression Network for Unsupervised Cross-Database Facial Expression Recognition,"School of Computer Science and Engineering, Tianjin University of Technology, China"
+d62d82c312c40437bc4c1c91caedac2ba5beb292,Super Wide Regression Network for Unsupervised Cross-Database Facial Expression Recognition,"School of Electrical and Electronic Engineering, Tianjin University of Technology, China"
+bc607bee2002c6c6bf694a15efd0a5d049767237,A novel large-scale multimedia image data classification algorithm based on mapping assisted deep neural network,"Infosys Limited, Bhubaneswar, India"
+bcf2710d46941695e421226372397c9544994214,Facial expression recognition based on transfer learning from deep convolutional networks,"School of Science, Southwest Petroleum University, Chengdu, China"
+bc9bad25f8149318314971d8b8c170064e220ea8,Triplet CNN and pedestrian attribute recognition for improved person re-identification,"Thales Services, ThereSIS, Palaiseau, France"
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,Dynamic facial landmarking selection for emotion recognition using Gaussian processes,"Automatics Research Group, Universidad Tecnológica de Pereira, Pereira, Colombia"
+d878a67b2ef6a0a5dec72db15291f12419040ab1,Using web images as additional training resource for the discriminative generalized hough transform,"Institute of Applied Computer Science, Kiel University of Applied Sciences, Kiel, Germany"
+d8e5d94c3c8688f0ca0ee656c79847c7df04c77d,Voice activity detection based on facial movement,"Tilburg center for Cognition and Communication, Tilburg University, Tilburg, The Netherlands"
+d855791bc23b4aa8e751d6a4e2ae7f5566a991e8,"Evaluation of Facial Expression Recognition by a Smart Eyewear for Facial Direction Changes, Repeatability, and Positional Drift","National Institute of Advanced Industrial Science Technology, Japan"
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,Two-class 3D-CNN classifiers combination for video copy detection,"School of Information Science and Technology, Shandong Normal University, Jinan, China"
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,Two-class 3D-CNN classifiers combination for video copy detection,"School of Mechanical and Electrical Engineering, Shandong Management University, Jinan, China"
+ab68837d09986c592dcab7d08ee6dfb40e02916f,Enhanced Face Preprocessing and Feature Extraction Methods Robust to Illumination Variation,"Dept. of Convergence, Daegu Gyeongbuk Institute of Science & Technology (DGIST), Daegu, Korea"
+ab703224e3d6718bc28f7b9987eb6a5e5cce3b01,Unseen head pose prediction using dense multivariate label distribution,"College of Mathematics and Information Engineering, Jiaxing University, Jiaxing, China"
+ab2c07c9867243fad2d66fa6aeabfb780433f319,An Unscented Kalman filter based novel face detector and its robust system for illumination variant images using stochastic resonance,"Ashikaga Institute of Technology, Ashikaga, Japan"
+abf573864b8fbc0f1c491ca60b60527a3e75f0f5,A new deformable mesh model for face tracking using edge based features and novel sets of energy functions,"Electrical and Electronic Engineering Department, Faculty of Engineering, Shahed University, Tehran, Iran"
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,Learning Flexible Block based Local Binary Patterns for unconstrained face detection,"Media Technology Lab, Huawei Technologies Co., Ltd"
+e55f7250f3b8ee722814f8809620a851c31e5b0e,Feature Extraction and Filter Design for Eye Pattern Analysis,*
+e55f7250f3b8ee722814f8809620a851c31e5b0e,Feature Extraction and Filter Design for Eye Pattern Analysis,**
+e57014b4106dd1355e69a0f60bb533615a705606,Following event detection method based on human skeleton motion analysis by Kinect sensor,"Institute of Energy, Jiangxi Academy of Sciences, Nanchang, China"
+e57014b4106dd1355e69a0f60bb533615a705606,Following event detection method based on human skeleton motion analysis by Kinect sensor,"School of Materials Science and Engineering, Central South University, Changsha, China"
+e26a7e343fe109e2b52d1eeea5b02dae836f3502,Facial Expression Recognition Utilizing Local Direction-Based Robust Features and Deep Belief Network,"Department of Informatics, Modeling, Electronics, and Systems, University of Calabria, Rende, Italy"
+e2b3aae594035e58f72125e313e92c7c4cc9d5bb,Real-time moustache detection by combining image decolorization and texture detection with applications to facial gender recognition,"Institute for Infocomm Research, A*STAR, Singapore, Singapore"
+f423d8be5e13d9ef979debd3baf0a1b2e1d3682f,Approaching human level facial landmark localization by deep learning,"Megvii Inc., Beijing, China"
+f3553148e322f4f64545d6667dfbc7607c82703a,Can computer vision problems benefit from structured hierarchical classification?,"Signal and Image Exploitation (INTELSIG), Montefiore Institute, University of Liège, Liège, Belgium"
+f3553148e322f4f64545d6667dfbc7607c82703a,Can computer vision problems benefit from structured hierarchical classification?,"Intelligent and Interactive Systems, Institute of Computer Science, University of Innsbruck, Innsbruck, Austria"
+eb02daee558e483427ebcf5d1f142f6443a6de6b,The Science and Detection of Tilting,"University of Helsinki, Helsinki, Finland"
+eb02daee558e483427ebcf5d1f142f6443a6de6b,The Science and Detection of Tilting,"University of Lancaster, Lancaster, United Kingdom"
+ebc2a3e8a510c625353637e8e8f07bd34410228f,Dual Sparse Constrained Cascade Regression for Robust Face Alignment,"B-DAT Laboratory, School of Information and Control, Nanjing University of Information and Technology, Nanjing, China"
+ebeb0546efeab2be404c41a94f586c9107952bc3,Multi-cue Augmented Face Clustering,"Tianjin Universtiy, Tianjin, China"
+eb8a3948c4be0d23eb7326d27f2271be893b3409,A Probabilistic Approach to People-Centric Photo Selection and Sequencing,"Amazon, Berkshire, U.K."
+ee1f9637f372d2eccc447461ef834a9859011ec1,Optimized learning instance-based image retrieval,"College of Information Science & Technology, Hebei Agricultural University, Baoding, China"
+ee1f9637f372d2eccc447461ef834a9859011ec1,Optimized learning instance-based image retrieval,"Standards & Metrology Research Institute of CARS, Beijing, China"
+ee1f9637f372d2eccc447461ef834a9859011ec1,Optimized learning instance-based image retrieval,"No.1 Senior Middle School of Wendeng District, Weihai, China"
+ee1f9637f372d2eccc447461ef834a9859011ec1,Optimized learning instance-based image retrieval,"Lawrence Berkeley National Laboratory, Berkeley, USA"
+c997744db532767ee757197491d8ac28d10f1c0f,A real-time emotion recognition system for disabled persons,"University of Tunis, The National Higher school of engineers of Tunis (ENSIT), Laboratory of Signal Image and Energy Mastery, LR13ES03 (SIME), Tunis, Tunisia"
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,Engineering Applications of Neural Networks,"Dept. of Theoretical Electrical Engineering, Technical University of Sofia, Sofia, Bulgaria"
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,Engineering Applications of Neural Networks,"Faculty of Engineering and Computing, Coventry University, UK"
+fcf393a90190e376b617cc02e4a473106684d066,A sparse neighborhood preserving non-negative tensor factorization algorithm for facial expression recognition,"Beijing Key Laboratory of Advanced Information Science and Network Technology, Beijing, China"
+fcf393a90190e376b617cc02e4a473106684d066,A sparse neighborhood preserving non-negative tensor factorization algorithm for facial expression recognition,"Baidu Online Network Technology (Beijing) Co. Ltd, Beijing, China"
+fc8990088e0f1f017540900bc3f5a4996192ff05,Hierarchical bilinear network for high performance face detection,"Chongqing Institute of Green and Intelligent Technology, CAS, Chongqing, 400714"
+fcb276874cd932c8f6204f767157420500c64bd0,A Comparative Study of Linear Discriminant and Linear Regression Based Methods for Expression Invariant Face Recognition,"Department of Computer Science and Engineering, National Institute of Technology, Uttarakhand, India"
+fcb276874cd932c8f6204f767157420500c64bd0,A Comparative Study of Linear Discriminant and Linear Regression Based Methods for Expression Invariant Face Recognition,"School of Computer & System Sciences, Jawaharlal Nehru University, New Delhi, India"
+fcb276874cd932c8f6204f767157420500c64bd0,A Comparative Study of Linear Discriminant and Linear Regression Based Methods for Expression Invariant Face Recognition,"S. S. College of Business Studies, University of Delhi, Delhi, India"
+fde611bf25a89fe11e077692070f89dcdede043a,Facial Expression recognition using Local Binary Patterns and Kullback Leibler divergence,"Department of ECE, National Institute of Technology, Rourkela (Odisha), India"
+f28d549feffd414f38147d5e0460883fb487e2d3,Modular discriminant analysis and its applications,"School of Medical Science, Jinhua Polytechnic, Jinhua, China"
+f28d549feffd414f38147d5e0460883fb487e2d3,Modular discriminant analysis and its applications,"Department of Computer Science and Engineering, University of Texas, Arlington, USA"
+f28d549feffd414f38147d5e0460883fb487e2d3,Modular discriminant analysis and its applications,"School of Information and Engineering, Jinhua Polytechnic, Jinhua, China"
+f2896dd2701fbb3564492a12c64f11a5ad456a67,Cross-database age estimation based on transfer learning,"Department of CSE, University at Buffalo (SUNY), NY 14260, USA"
+f201baf618574108bcee50e9a8b65f5174d832ee,Viewpoint-Consistent 3D Face Alignment,"Snapchat Research, Venice, CA90291"
+f557df59cd088ffb8e27506d8612d062407e96f4,Multimedia event detection with ℓ2-regularized logistic Gaussian mixture regression,"School of Computer Science, Wuyi University, Jiangmen, China"
+f557df59cd088ffb8e27506d8612d062407e96f4,Multimedia event detection with ℓ2-regularized logistic Gaussian mixture regression,"Adjunct, Effat University, Jeddah, Saudi Arabia"
+cfba667644508853844c45bfe5d0b8a2ffb756d3,Robust gender classification using extended multi-spectral imaging by exploring the spectral angle mapper,"Department of Electronics, University of Goa, India"
+ca096e158912080493a898b0b8a4bd2902674fed,Up to a Limit?: Privacy Concerns of Bystanders and Their Willingness to Share Additional Information with Visually Impaired Users of Assistive Technologies,"Microsoft Research India Pvt. Ltd, Bangalore, Karnataka, India"
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,Entropy-based active sparse subspace clustering,"Tianjin Key Laboratory of Optoelectronic Detection Technology and Systems, Tianjin, China"
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,Entropy-based active sparse subspace clustering,"School of Electronics and Information Engineering, Tianjin Polytechnic University, Tianjin, China"
+cab3c6069387461c3a9e5d77defe9a84fe9c9032,A Novel multiple kernel-based dictionary learning for distributive and collective sparse representation based classifiers,"Electrical Engineering Department, Yazd University, Yazd, Iran"
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,New distances combination for facial expression recognition from image sequences,"Laboratory LAROSERI, Department of Computer Science, Faculty of Sciences, University of Chouaib Doukkali, El Jadida - Morocco"
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,New distances combination for facial expression recognition from image sequences,"Laboratory LIM, Department of Computer Science, Faculty of Sciences and Technologies, University Hassan II, Casablanca-Morocco"
+e4b825bf9d5df47e01e8d7829371d05208fc272d,Recognition of Facial Attributes Using Multi-Task Learning of Deep Networks,"School of Computer Science, Kyungpook National University, Buk-gu, Daegu, The Republic of Korea"
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,Spatio-Temporal AutoEncoder for Video Anomaly Detection,"Shanghai Jiao Tong University & Alibaba Group, Shanghai, China"
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,Spatio-Temporal AutoEncoder for Video Anomaly Detection,"Zhejiang University & Alibaba Group, Hangzhou, China"
+c833c2fb73decde1ad5b5432d16af9c7bee1c165,Homotopic Image Pseudo-Invariants for Openset Object Recognition and Image Retrieval,"University of IIllinois, Urbana-Champaign"
+c8fb8994190c1aa03c5c54c0af64c2c5c99139b4,Pose-invariant descriptor for facial emotion recognition,"Research Center for Science and Technology in Medicine, Tehran University of Medical Sciences, Tehran, Iran"
+c858c74d30c02be2d992f82a821b925669bfca13,Computer Vision – ECCV 2014,"Max-Planck-Institut für Informatik, Saarbrücken, Germany"
+c858c74d30c02be2d992f82a821b925669bfca13,Computer Vision – ECCV 2014,"KU Leuven, ESAT - PSI, iMinds, Leuven, Belgium"
+c843f591658ca9dbb77944a89372a92006defe68,Learning motion and content-dependent features with convolutions for action recognition,"School of Information Science and Engineering, Hunan city University, Yiyang, China"
+c872d6310f2079db0cee0e69cc96da1470055225,Heterogeneous Multi-task Learning on Non-overlapping Datasets for Facial Landmark Detection,"Information Technology R&D Center, Mitsubishi Electric Corporation, Kamakura, Japan"
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,Facial Expression Recognition Using Weighted Mixture Deep Neural Network Based on Double-Channel Facial Images,"Department of Information Science and Engineering, Changzhou University, Changzhou, China"
+fb7bf10cbc583db5d5eee945aa633fcb968e01ad,A novel weighted fuzzy LDA for face recognition using the genetic algorithm,"School of Control Science and Engineering DUT, Dalian, China"
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,Illumination Robust Face Recognition Using Spatial Expansion Local Histogram Equalization and Locally Linear Regression Classification,"Dept. of Comp. Sci. and Inf. Eng, Chung Hua University, Hsinchu, Taiwan"
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,Illumination Robust Face Recognition Using Spatial Expansion Local Histogram Equalization and Locally Linear Regression Classification,"Dept. of Comp. Sci. and Inf. Eng, National United University, Miaoli, Taiwan"
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,An experimental study on content-based face annotation of photos,"Psychology Department, University of California, Santa Barbara, CA 93106 USA"
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,An experimental study on content-based face annotation of photos,"Electrical and Computer Engineering Department, University of California, Santa Barbara, CA 93106 USA"
+fb3ff56ab12bd250caf8254eca30cd97984a949a,Face recognition Face2vec based on deep learning: Small database case,"Institute of Electronics and Computer Science, Riga, Latvia"
+fb2bd6c2959a4f811b712840e599f695dad2967e,Environmental illumination invariant face recognition using near infrared imaging system,"School of Electrical and Computer Engineering, Ulsan National Institute of Science and Technology (UNIST), UNIST-gil 50, 689-798, Korea"
+fba386ac63fe87ee5a0cf64bf4fb90324b657d61,Dynamic texture and geometry features for facial expression recognition in video,"Department of Computer Science, Chu Hai College of Higher Education, Hong Kong"
+ed94e7689cdae87891f08428596dec2a2dc6a002,Distributed sketched subspace clustering for large-scale datasets,"Dept. of ECE and Digital Technology Center, Univ. of Minnesota, USA"
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,Audio Visual Recognition of Spontaneous Emotions In-the-Wild,"NPU-VUB Joint AVSP Lab, School of Computer Science, Northwestern Polytechnical University (NPU), Xi’an, China"
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,Audio Visual Recognition of Spontaneous Emotions In-the-Wild,"Shaanxi Key Laboratory on Speech and Image Information Processing, Xi’an, China"
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,Audio Visual Recognition of Spontaneous Emotions In-the-Wild,"NPU-VUB Joint AVSP Lab, Department ETRO, Vrije Universiteit Brussel (VUB), Brussels, Belgium"
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,Audio Visual Recognition of Spontaneous Emotions In-the-Wild,"Interuniversity Microelectronics Centre, Heverlee, Belgium"
+edf60d081ffdfa80243217a50a411ab5407c961d,Recognizing an Action Using Its Name: A Knowledge-Based Approach,"QCIS, University of Technology, Sydney, Australia"
+edf60d081ffdfa80243217a50a411ab5407c961d,Recognizing an Action Using Its Name: A Knowledge-Based Approach,"HTC Research, Beijing, China"
+c65cfc9d3568c586faf18611c4124f6b7c0c1a13,A framework for face classification under pose variations,"Dept. of E & TC Engineering, Maharashtra Institute of Technology, Pune, India"
+c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd,Multiple instance learning with missing object tags,"Northwestern Polytechnical University Xian, P. R. China"
+c61a8940d66eed9850b35dd3768f18b59471ca34,Facial action unit recognition using temporal templates,"Dept. of Mediamatics, Delft Univ. of Technol., Netherlands"
+c60601bdb5465d8270fdf444e5d8aeccab744e29,Rotation invariant Facial Expression Recognition in image sequences,"Institute for Infocomm Research, 1 Fusionopolis Way, #21-01 Connexis, Singapore 138632"
+ec1a57e609eda72b4eb60155fac12db1da31f6c0,Probabilistic Linear Discriminant Analysis,"Fujifilm Software, San Jose, USA"
+ec1bec7344d07417fb04e509a9d3198da850349f,Determine attention of faces through growing level of emotion using deep Convolution Neural Network,"Department of Computer Science and Engineering, National Institute of Technology, Durgapur, India"
+ec983394f800da971d243f4143ab7f8421aa967c,D-FES: Deep facial expression recognition system,"Department of Computer Science and Engineering, National Institute of Technology Uttarakhand, Srinagar Garhwal, India"
+ecdd83002f69c2ccc644d07abb44dd939542d89d,Linear dimensionality reduction based on Hybrid structure preserving projections,"Department of Computer Science and Technology, Xi’an Jiaotong University, Xi’an, Shaanxi, China"
+4e43408a59852c1bbaa11596a5da3e42034d9380,Efficient facial expression recognition using histogram of oriented gradients in wavelet domain,"Computer Science and Engineering Department, SP Memorial Institute of Technology, Kaushambi, India"
+4e43408a59852c1bbaa11596a5da3e42034d9380,Efficient facial expression recognition using histogram of oriented gradients in wavelet domain,"Department of Computer Science, Banasthali Vidyapith, Banasthali, India"
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,A proposed method for the improvement in biometric facial image recognition using document-based classification,"Department of Electronics and Communication Engineering, Institute of Road and Transport Technology, Erode, India"
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,A proposed method for the improvement in biometric facial image recognition using document-based classification,"Department of Electronics and Communication Engineering, P.P.G. Institute of Technology, Coimbatore, India"
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,Exploiting sensing devices availability in AR/VR deployments to foster engagement,"Centre of Research and Technology Hellas, Thermi, Thessaloniki, Greece"
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,Exploiting sensing devices availability in AR/VR deployments to foster engagement,"University of Maastricht, Maastricht, The Netherlands"
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,Exploiting sensing devices availability in AR/VR deployments to foster engagement,"National Centre for Scientific Research “Demokritos”, Agia Paraskevi, Athens, Greece"
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,Exploiting sensing devices availability in AR/VR deployments to foster engagement,"Technological Educational Institute of Sterea Ellada, Psahna, Halkida, Greece"
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30,End-to-end temporal attention extraction and human action recognition,"Luoyang Electro-Optical Equipment Research Institute, Luoyang, People’s Republic of China"
+20b405d658b7bb88d176653758384e2e3e367039,Face recognition with manifold-based kernel discriminant analysis,"School of Electrical and Computer Engineering, College of Engineering, University of Tehran, Iran"
+205f035ec90a7fa50fd04fdca390ce83c0eea958,Emotion Recognition Using Multiple Kernel Learning toward E-learning Applications,"Sunway University, Selangor, Malaysia"
+18855be5e7a60269c0652e9567484ce5b9617caa,Local Centre of Mass Face for face recognition under varying illumination,"Department of Computer Science and Engineering, University of Calcutta, Kolkata, India"
+18bfda16116e76c2b21eb2b54494506cbb25e243,Face Recognition in Global Harmonic Subspace,"ECIT, School of Electronics, Electrical Engineering &amp; Computer Science, Queen's University Belfast, Belfast, UK"
+18145b0b13aa477eeabef9ceec4299b60e87c563,Role-based identity recognition for TV broadcasts,"Siemens AG, Corporate Technology, Munich, Germany"
+18145b0b13aa477eeabef9ceec4299b60e87c563,Role-based identity recognition for TV broadcasts,"Fraunhofer Institute for Digital Media Technology, Ilmenau, Germany"
+18145b0b13aa477eeabef9ceec4299b60e87c563,Role-based identity recognition for TV broadcasts,"Fraunhofer Institute for Telecommunications, Berlin, Germany"
+270acff7916589a6cc9ca915b0012ffcb75d4899,On the Applications of Robust PCA in Image and Video Processing,"Laboratoire MIA, University of La Rochelle, La Rochelle, France"
+27e0684fa5b57715162ac6c58a6ea283c7db1719,Select eigenfaces for face recognition with one training sample per subject,"Dept. of Electr. & Comput. Eng., Toronto Univ., Ont., Canada"
+27aa23d7a05368a6b5e3d95627f9bab34284e5c4,Sparse representation based on matrix rank minimization and k-means clustering for recognition,"State Key Laboratory of Management and Control for Complex Systems, Institute of Automation Chinese Academy of Sciences, Beijing, China 100190"
+27a586a435efdcecb151c275947fe5b5b21cf59b,Very Fast Semantic Image Segmentation Using Hierarchical Dilation and Feature Refining,"Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies, Zhejiang, China"
+4b94f531c203743a9f7f1e9dd009cdbee22ea197,Face recognition by using neural network classifiers based on PCA and LDA,"Dept. of Electron. Eng., Hannam Univ., Daejeon, South Korea"
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,Discriminant Tchebichef based moment features for face recognition,"Department of Electronic and Communication Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia"
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,Discriminant Tchebichef based moment features for face recognition,"Department of Mechanical Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia"
+7d8798e7430dcc68fcdbd93053c884fc44978906,Crowdsourcing for affective-interaction in computer games,"Universidade Nova Lisboa, Lisboa, Portugal"
+7d18e9165312cf669b799aa1b883c6bbe95bf40e,Simultaneous dimensionality reduction and dictionary learning for sparse representation based classification,"Key Laboratory of System Control and Information Processing, Ministry of Education of China, Shanghai, People’s Republic of China"
+2945cc9e821ab87fa17afc8802f3858435d1264c,Efficient video face recognition by using Fisher Vector encoding of binary features,"Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro No.1, Tonantzintla, Puebla, México. CP 72840"
+2960500033eb31777ed1af1fcb133dcab1b4a857,Recognizing facial expressions of emotion using action unit specific decision thresholds,"Başkent University, Ankara, TURKEY"
+29fd98f096fc9d507cd5ee7d692600b1feaf7ed1,Exploring Multimodal Visual Features for Continuous Affect Recognition,"Beijing Normal Univeristy, Beijing, China"
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,"Research in Attacks, Intrusions, and Defenses","Ruhr-Universität Bochum, Bochum, Germany"
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,"Research in Attacks, Intrusions, and Defenses","Vrije Universiteit Amsterdam, Amsterdam, The Netherlands"
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,"Research in Attacks, Intrusions, and Defenses","Foundation for Research & Technology – Hellas, Heraklion, Crete, Greece"
+1617f56c86bf8ea61de62062a97961d23fcf03d3,Facial Similarity Learning with Humans in the Loop,"Tsinghua National Laboratory for Information Science and Technology, Department of Computer Science and Technology Tsinghua University, Beijing, China"
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,"Face Recognition by RBF with Wavelet, DCV and Modified LBP Operator Face Representation Methods","Department of Computer Science, ANJA College, Sivakasi, India"
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,"Face Recognition by RBF with Wavelet, DCV and Modified LBP Operator Face Representation Methods","Department of Computer Science, VHNSN College, Virudhunagar, India"
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,Joint salient object detection and existence prediction,"CCCE, Nankai University Jinnan Campus, Tianjin, China"
+892400017e5c93611dc8361e7749135520d66f25,A comparative study of age-invariant face recognition with different feature representations,"School of Electrical and Electronic Engineering, Singapore"
+4500888fd4db5d7c453617ee2b0047cedccf2a27,Moving portraits,Google Inc.
+4500888fd4db5d7c453617ee2b0047cedccf2a27,Moving portraits,University of Washington and Google Inc.
+453bf941f77234cb5abfda4e015b2b337cea4f17,Robust regression based face recognition with fast outlier removal,"School of Computer and Information Engineering, Jiangxi Normal University, Nanchang, China"
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c,Facial expressions based error detection for smart environment using deep learning,ACM Professional Specialist in Artificial Intelligence
+1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9,A novel constructive algorithm for CANet,"Polytechnic School of Pernambuco, University of Pernambuco, Recife-PE, Brazil"
+1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb,Facial expression recognition based on image pyramid and single-branch decision tree,"Department of Electric and Electronic Engineering, Avrasya University, Trabzon, Turkey"
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User,"LMU Munich, Germany"
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,GazeDrone: Mobile Eye-Based Interaction in Public Space Without Augmenting the User,"LMU Munich, Germany and Munich University of Applied Sciences, Germany"
+73ba33e933e834b815f62a50aa1a0e15c6547e83,Invariant feature extraction for facial recognition: A survey of the state-of-the-art,"Sudan University of Science and Technology, College of Computer Science and Information Technology, Khartoum - Sudan"
+73dcb4c452badb3ee39a2f222298b234d08c21eb,Face recognition and facial expression identification using PCA,"Dept of Electronics and Communication, Manipal Institute Of Technology, Karnataka, India"
+87ee56feefdb39938cda7f872e784d9d986713af,Fusion of face recognition and facial expression detection for authentication: a proposed model,"Universiti Kuala Lumpur, Kuala Lumpur"
+87ee56feefdb39938cda7f872e784d9d986713af,Fusion of face recognition and facial expression detection for authentication: a proposed model,"Universiti Kuala Lumpur, Kedah"
+80677676b127b67938c8db06a15d87f5dd4bd7f1,A method for determining the number of features in the kernel space required for preserving classifiability,"School of Computing Sciences and Informatics, University of Cincinnati, Cincinnati, USA"
+80d4cf7747abfae96328183dd1f84133023c2668,Face retrieval in face track using sparse representation,"Dept. of Computer Science and Information Engineering, National Dong Hwa University, Hualien, Taiwan"
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,Beauty Technology,"Department of Informatics, Pontifical Catholic Univ of Rio de Janei, Rio de Janeiro, Brazil"
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,Beauty Technology,"Pontifical Catholic Univ of Rio de Janei, Department of Informatics, Rio de Janeiro, Brazil"
+746c0205fdf191a737df7af000eaec9409ede73f,Investigating Nuisances in DCNN-Based Face Recognition,"Department of Electrical, Computer and Biomedical Engineering, University of Pavia, Pavia, Italy"
+1ab19e516b318ed6ab64822efe9b2328836107a4,Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation,"Department of New Media, Korean German Institute of Technology, Korea"
+1ab19e516b318ed6ab64822efe9b2328836107a4,Face Recognition System Using Multiple Face Model of Hybrid Fourier Feature Under Uncontrolled Illumination Variation,"Department of Electrical Engineering, KAIST, Korea"
+1a0e1ba4408d12f8a28049da0ff8cad4f91690d5,A Hierarchical Predictive Coding Model of Object Recognition in Natural Images,"Department of Informatics, King’s College London, London, UK"
+1a47f12a2490f6775c0ad863ac856de27f5b3e03,An ℓ2/ℓ1 regularization framework for diverse learning tasks,"Merchant Marine College, Shanghai Maritime University, Shanghai 201306, PR China"
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,Impact of action unit detection in automatic emotion recognition,"ISIR, UPMC Univ Paris 06, CNRS, Paris, France"
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,Impact of action unit detection in automatic emotion recognition,"LAMIA, EA 4540, University of French West Indies and Guyana, Guadeloupe, France"
+1a03dcc811131b0b702bd5a75c54ed26cd27151a,Automated facial expression recognition based on histograms of oriented gradient feature vector differences,"Faculty of Electrical Engineering and Computer Science, University of Maribor, Maribor, Slovenia"
+28e1982d20b6eff33989abbef3e9e74400dbf508,Automated kinship verification and identification through human facial images: a survey,"Faculty of Computing, Universiti Teknologi Malaysia, Johor Bahru, Malaysia"
+28e1982d20b6eff33989abbef3e9e74400dbf508,Automated kinship verification and identification through human facial images: a survey,"UTM-Big Data Center, Universiti Teknologi Malaysia, Johor Bahru, Malaysia"
+28e1982d20b6eff33989abbef3e9e74400dbf508,Automated kinship verification and identification through human facial images: a survey,"Faculty of Information Sciences and Engineering, Management and Science University, Selangor, Malaysia"
+17de5a9ce09f4834629cd76b8526071a956c9c6d,Smart Parental Advisory: A Usage Control and Deep Learning-Based Framework for Dynamic Parental Control on Smart TV,"Istituto di Informatica e Telematica, Consiglio Nazionale delle Ricerche, Pisa, Italy"
+179564f157a96787b1b3380a9f79701e3394013d,MACH: my automated conversation coach,"MIT, Cambridge, MA, USA"
+179564f157a96787b1b3380a9f79701e3394013d,MACH: my automated conversation coach,"LIMSI-CNRS, Orsay Cedex, France"
+1723227710869a111079be7d61ae3df48604e653,Multimodal emotion recognition with automatic peak frame selection,"Dept. of Mathematics and Computer Science, University of Udine, Italy"
+1773d65c1dc566fd6128db65e907ac91b4583bed,Learning Temporal Dynamics for Video Super-Resolution: A Deep Learning Approach,"Department of Electrical and Computer Engineering, Beckman Institute Advanced Science and Technology, University of Illinois at Urbana–Champaign, Urbana, IL, USA"
+17d03da4db3bb89537d644b682b2a091d563af4a,Recognition of Partially Occluded and Rotated Images With a Network of Spiking Neurons,"Department of Statistics and Operational Research, Faculty of Mathematics, Complutense University of Madrid, Madrid, Spain"
+8f713e3c5b6b166c213e00a3873f750fb5939c9a,The 2D factor analysis and its application to face recognition with a single sample per person,"Pontifical Catholic University of Minas Gerais - Department of Computer Science, R. Dom Jose Gaspar, 500, Belo Horizonte MG, 30535901, Brazil"
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,Expressions Recognition of North-East Indian (NEI) Faces,"Department of Computer Science and Engineering, Tripura University (A Central University), Suryamaninagar, India"
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,Expressions Recognition of North-East Indian (NEI) Faces,"Department of Physics, Tripura University (A Central University), Suryamaninagar, India"
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,Place-centric Visual Urban Perception with Deep Multi-instance Regression,"San Diego State University, San Diego, CA, USA"
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,Place-centric Visual Urban Perception with Deep Multi-instance Regression,"University of California at Los Angeles, Los Angeles, CA, USA"
+8a63a2b10068b6a917e249fdc73173f5fd918db0,"A Review of Automated Pain Assessment in Infants: Features, Classification Tasks, and Databases","Department of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China"
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,Hybrid-Boost Learning for Multi-Pose Face Detection and Facial Expression Recognition,"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan"
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,Hybrid-Boost Learning for Multi-Pose Face Detection and Facial Expression Recognition,"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan; Informatics Department, Fo-Guang University, I-Lan, Taiwan. e-mail: clhuang@ee.nthu.edu.tw"
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,Hybrid-Boost Learning for Multi-Pose Face Detection and Facial Expression Recognition,"Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan. e-mail: chihming.fu@gmail.com"
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364,Enhanced independent component analysis and its application to content based face image retrieval,"Dept. of Comput. Sci., New Jersey Inst. of Technol., Newark, NJ, USA"
+7ee7b0602ef517b445316ca8aa525e28ea79307e,Emotion detection through fusion of complementary facial features,"ECE, Department MSIT, C-4 Janakpuri, New Delhi, India"
+7ee7b0602ef517b445316ca8aa525e28ea79307e,Emotion detection through fusion of complementary facial features,"B. Tech Graduate, ECE, MSIT, C-4 Janakpuri, New Delhi, India"
+7e48711c627edf90e9b232f2cbc0e3576c8f2f2a,Energy transfer features combined with DCT for object detection,"FEECS, Department of Computer Science, Technical University of Ostrava, Ostrava-Poruba, Czech Republic"
+7e2f7c0eeaeb47b163a7258665324643669919e8,Classifying advertising video by topicalizing high-level semantic concepts,"School of Information Science and Engineering, Shandong Normal University, Jinan, China"
+7e2f7c0eeaeb47b163a7258665324643669919e8,Classifying advertising video by topicalizing high-level semantic concepts,"Institute of Life Sciences, Shandong Normal University, Jinan, China"
+7e2f7c0eeaeb47b163a7258665324643669919e8,Classifying advertising video by topicalizing high-level semantic concepts,"School of Computer & Software, Nanjing University of Information Science & Technology, Nanjing, People’s Republic of China"
+7e2f7c0eeaeb47b163a7258665324643669919e8,Classifying advertising video by topicalizing high-level semantic concepts,"Key Laboratory of Intelligent Information Processing, Shandong Normal University, Jinan, China"
+7ec431e36919e29524eceb1431d3e1202637cf19,Object detection and tracking in crowd environment — A review,"Department of Computer Applications, National Institute of Technology, Tiruchirappalli, India"
+1025c4922491745534d5d4e8c6e74ba2dc57b138,Auto-Calibrated Gaze Estimation Using Human Gaze Patterns,"Amsterdam University College, Amsterdam, The Netherlands"
+196c12571ab51273f44ea3469d16301d5b8d2828,Is gender encoded in the smile? A computational framework for the analysis of the smile driven dynamic face for gender recognition,"Centre for Visual Computing, Faculty of Engineering and Informatics, University of Bradford, Bradford, UK"
+19b492d426f092d80825edba3b02e354c312295f,A survey on face modeling: building a bridge between face analysis and synthesis,"ISIR laboratory, Pierre and Marie Curie university, Paris Cedex 05, France"
+19b492d426f092d80825edba3b02e354c312295f,A survey on face modeling: building a bridge between face analysis and synthesis,"FAST, Supélec, Avenue de la Boulaie, Cesson-Sévigné, France"
+194f5d3c240d06575403c9a422a0ebc86d43b91e,Real-time face detection and phone-to-face distance measuring for speech recognition for multi-modal interface in mobile device,"School of Electronics and Computer Eng., Chonnam National University, Gwangju, Korea"
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d,Advances in computational facial attractiveness methods,"Department of Computer Science and Engineering, University of Nebraska-Lincoln, Lincoln, USA"
+4c648fe9b7bfd25236164333beb51ed364a73253,Presentation Attack Detection Methods for Face Recognition Systems: A Comprehensive Survey,"Norwegian Biometric Laboratory, Norwegian University of Science and Technology (NTNU), Gjøvik, Norway"
+262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19,Local Similarity based Linear Graph Embedding: A Robust Face Recognition Framework for SSPP problem,"PLA University of Science and Technology, China"
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,Nonnegative matrix factorization with Hessian regularizer,"Computer Science Department, School of Information Science and Engineering, Xiamen, University, Xiamen, People’s Republic of China"
+26bbe76d1ae9e05da75b0507510b92e7e6308c73,Learning to pool high-level features for face representation,"Department of Information Engineering, HeNan Radio and Television University, Zhengzhou, People’s Republic of China"
+2138ccf78dcf428c22951cc066a11ba397f6fcef,Efficacy of biophysiological measurements at FTFPs for facial expression classification: A validation,"Curtin University Department of Mechanical Engineering, Perth, Western Australia 6012"
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,Learning robust latent subspace for discriminative regression,"College of Mathematics and Informatics, South China Agricultural University, China"
+21959bc56a160ebd450606867dce1462a913afab,Face recognition based on manifold constrained joint sparse sensing with K-SVD,"Xinjiang Vocational and Technical College of Communications, Wulumuqi, People’s Republic of China"
+214072c84378802a0a0fde0b93ffb17bc04f3759,Driver cell phone usage detection on Strategic Highway Research Program (SHRP2) face view videos,"Office of Safety Research and Development, Federal Highway Administration, U.S. Department of Transportation, Virginia, USA"
+217aa3aa0b3d9f6f394b5d26f03418187d775596,Predicting Human Intentions from Motion Cues Only: A 2D+3D Fusion Approach,"Istituto Italiano di Tecnologia (IIT) & Università degli Studi di Genova, Genova, Italy"
+217aa3aa0b3d9f6f394b5d26f03418187d775596,Predicting Human Intentions from Motion Cues Only: A 2D+3D Fusion Approach,"Istituto Italiano di Tecnologia (IIT), Genova, Italy"
+217aa3aa0b3d9f6f394b5d26f03418187d775596,Predicting Human Intentions from Motion Cues Only: A 2D+3D Fusion Approach,"Istituto Italiano di Tecnologia (IIT) & Università di Torino, Genova, Italy"
+217aa3aa0b3d9f6f394b5d26f03418187d775596,Predicting Human Intentions from Motion Cues Only: A 2D+3D Fusion Approach,"Istituto Italiano di Tecnologia & Università di Verona, Genova, Italy"
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,Learning deep facial expression features from image and optical flow sequences using 3D CNN,"School of Electronic and Information Engineering, Inner Mongolia University of Science and Technology, Baotou, People’s Republic of China"
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,Face recognition under varying illumination,"Electronic Information Engineering College, Henan University of Science and Technology, Luoyang, China"
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,Face recognition under varying illumination,"China Airborne Missile Academy, Luoyang, China"
+758d481bbf24d12615b751fd9ec121500a648bce,Robust face identification using DTCWT and PCA subspace based sparse representation,"Department of Instrumentation and Control Engineering, PSG College of Technology, Coimbatore, India"
+758d481bbf24d12615b751fd9ec121500a648bce,Robust face identification using DTCWT and PCA subspace based sparse representation,"Robert Bosch Engineering and Business Solutions Limited, Bangalore, India"
+754626bd5fb06fee5e10962fdfeddd495513e84b,Facial expression pair matching,Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;
+751fb994b2c553dc843774a5620bfcab8bc657fd,Data Mining Techniques for the Estimation of Variables in Health-Related Noisy Data,"Universidad de León, León, Spain"
+81af86e3d343a40ce06a3927b6aa8c8853f6811a,MUSA: a banana database for ripening level determination,"Thiagarajar College of Engineering, Madurai, Tamilnadu, India"
+81c21f4aafab39b7f5965829ec9e0f828d6a6182,Acquiring high-resolution face images in outdoor environments: A master-slave calibration algorithm,"Department of Mathematics and Computer Science, University of Cagliari, Italy"
+81f101cea3c451754506bf1c7edf80a661fa4dd1,Exploiting sparsity and co-occurrence structure for action unit recognition,Yahoo! Research
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,Histogram equalized deep PCA with ELM classification for expressive face recognition,"Department of Business Computer, Faculty of Management Science, Nakhon Ratchasima Rajabhat University, Nakhon Ratchasima, Thailand"
+728b1b2a86a7ffda402e7ec1a97cd1988dcde868,An Ontology Based Framework for Retrieval of Museum Artifacts,"Department of Electronics and Communication, University of Allahabadm Allahabad, India 211002"
+72a3bb0fb490355a926c5a689e12268bff9ff842,Coding Facial Expression with Oriented Steerable Filters,"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. xushuang@bit.edu.cn"
+72a3bb0fb490355a926c5a689e12268bff9ff842,Coding Facial Expression with Oriented Steerable Filters,"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. jiayunde@bit.edu.cn"
+72a3bb0fb490355a926c5a689e12268bff9ff842,Coding Facial Expression with Oriented Steerable Filters,"Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. zhangxiaoxun@bit.edu.cn"
+7234468db46b37e2027ab2978c67b48b8581f796,Mirrored non-maximum suppression for accurate object part localization,Center for Research on Intelligent Perception and Computing
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,Cognitive Gravity Model Based Semi-Supervised Dimension Reduction,"Jiaxing University, Jiaxing, China"
+44855e53801d09763c1fb5f90ab73e5c3758a728,Sentence Directed Video Object Codiscovery,"Baidu Research - Institute of Deep Learning, Sunnyvale, USA"
+44834929e56f2a8f16844fde519039d647006216,Improving object detection by removing noisy samples from training sets,"National ICT Australia and UNSW, Sydney, Australia"
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8,Structured supervised dictionary learning based on class-specific and shared sub-dictionaries,"Shahid Bahonar University of Kerman Computer Engineering Department, Kerman, Iran"
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,Grassmannian Regularized Structured Multi-View Embedding for Image Classification,"School of Computer and Communication Science, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland"
+2a2df7e790737a026434187f9605c4763ff71292,Towards nonuniform illumination face enhancement via adaptive contrast stretching,"Division Télécom, Centre de Développement des Technologies Avancées - CDTA, Algiers, Algeria"
+2a2df7e790737a026434187f9605c4763ff71292,Towards nonuniform illumination face enhancement via adaptive contrast stretching,"Department of Computer System and Communication, Faculty of Information and Communication, Universiti Teknikal Malaysia Melaka, Durian Tunggal, Malaysia"
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,Multifeature Anisotropic Orthogonal Gaussian Process for Automatic Age Estimation,"Chinese University of Hong Kong, Hong Kong"
+2f8ef56c1007a02cdc016219553479d6b7e097fb,Face Recognition Using Kernel Fisher Linear Discriminant Analysis and RBF Neural Network,"Department of Computer Science & Engineering, GCELT, Kolkata, India"
+2f8ef56c1007a02cdc016219553479d6b7e097fb,Face Recognition Using Kernel Fisher Linear Discriminant Analysis and RBF Neural Network,"AICTE Emeritus Fellow,  "
+2f17c0514bb71e0ca20780d71ea0d50ff0da4938,Photo search in a personal photo diary by drawing face position with people tagging,"Inha University, Incheon, South Korea"
+43cbe3522f356fbf07b1ff0def73756391dc3454,Laplacian of smoothed image as representation for face recognition,"School of Computing and Electrical Engineering, IIT Mandi, H.P, 175001, India"
+4344ba6e33faaa616d01248368e66799548ca48b,Unsupervised joint face alignment with gradient correlation coefficient,"ICA Laboratory, Grenoble, France"
+4344ba6e33faaa616d01248368e66799548ca48b,Unsupervised joint face alignment with gradient correlation coefficient,"Gipsa-Lab, Saint Martin d’Heres, France"
+434f1442533754b3098afd4e24abf1e3792b24db,Over-the-shoulder shot detection in art films,"Film Department ELTE University, Budapest, Hungary"
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,An Integrated Signature-Based Framework for Efficient Visual Similarity Detection and Measurement in Video Shots,"South Valley University, Qena, Egypt"
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,An Integrated Signature-Based Framework for Efficient Visual Similarity Detection and Measurement in Video Shots,"University of Nottingham (Malaysia Campus), Malaysia"
+4317856a1458baa427dc00e8ea505d2fc5f118ab,Regularizing face verification nets for pain intensity regression,"Dept. of Electrical &amp; Computer Engineering, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+4317856a1458baa427dc00e8ea505d2fc5f118ab,Regularizing face verification nets for pain intensity regression,"Dept. of Radiation Oncology, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+4317856a1458baa427dc00e8ea505d2fc5f118ab,Regularizing face verification nets for pain intensity regression,"Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China"
+6ba6045e4b404c44f9b4dfce2d946019f0e85a72,Facial landmark detection based on an ensemble of local weighted regressors during real driving situation,"Dept. of Computer Engineering, Keimyung University, Daegu, Korea"
+07dc9f3b34284cc915dea7575f40ef0c04338126,Hierarchical Clustering Multi-Task Learning for Joint Human Action Grouping and Recognition,School of ComputingNational University of Singapore
+0701b01bc99bf3b64050690ceadb58a8800e81ed,Facial expression recognition through modeling age-related spatial patterns,"Key Lab of Computing and Communication Software of Anhui Province School of Computer Science and Technology, University of Science and Technology of China Hefei, Anhui, People’s Republic of China"
+0701b01bc99bf3b64050690ceadb58a8800e81ed,Facial expression recognition through modeling age-related spatial patterns,"Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute Troy, Troy, USA"
+383ff2d66fecdc2fd02a31ac1fa392f48e578296,An efficient multimodal 2D + 3D feature-based approach to automatic facial expression recognition,f
+008528d5e27919ee95c311266041e4fb1711c254,User-adaptive image retrieval via fusing pointwise and pairwise labels,"Alibaba Group, Zhejiang, People’s Republic of China"
+00d4c2db10f3a32d505d7b8adc7179e421443dec,Data driven adaptation for QoS aware embedded vision systems,"Computer Science and Engineering, Pennsylvania State University, PA, USA SiliconScapes, LLC, PA, USA"
+003ba2001bd2614d309d6ec15e9e2cbe86db03a1,A novel post-nonlinear ICA-based reflectance model for 3D surface reconstruction,"Dept. of Inf. Network Technol., Hsiuping Inst. of Technol., Taichung, Taiwan"
+00a38ebce124879738b04ffc1536018e75399193,Convolutional neural network for age classification from smart-phone based ocular images,"Dept. of Computer Science and Electrical Engineering, University of Missouri-Kansas City, MO, USA"
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3,Enabling Live Video Analytics with a Scalable and Privacy-Aware Framework,"Intel Labs, Pittsburgh PA"
+6e46d8aa63db3285417c8ebb65340b5045ca106f,Accelerating Machine Learning Inference with Probabilistic Predicates,"Microsoft &University of Washington, Redmond, WA, USA"
+6e46d8aa63db3285417c8ebb65340b5045ca106f,Accelerating Machine Learning Inference with Probabilistic Predicates,"Princeton University &Microsoft, Princeton, NJ, USA"
+6e46d8aa63db3285417c8ebb65340b5045ca106f,Accelerating Machine Learning Inference with Probabilistic Predicates,"Microsoft, Redmond, WA, USA"
+6e9de9c3af3258dd18142e9bef2977b7ce153bd5,Computer Vision – ECCV 2016 Workshops,"Facebook AI Research (FAIR), Menlo Park, USA"
+9a84588fe7e758cfbe7062686a648fab787fc32f,Human facial expression recognition using curvelet feature extraction and normalized mutual information feature selection,"Division of Digital Media Engineering, Sang-Myung University, Suwon, Republic of Korea"
+36219a3196aac2bd149bc786f083957a6e6da125,Recognition of the gaze direction: Anchoring with the eyebrows,"The Image Processing and Analysis Laboratory (LAPI), University “Politehnica” of Bucharest, 313 Splaiul Independeţei, Bucharest, Romania"
+36bb93c4f381adca267191811abb8cc7812363f9,Quick retrieval method of massive face images based on global feature and local feature fusion,"Shanghai University School of Communication and Information Engineering Shanghai, China"
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,3D Pose Tracking With Multitemplate Warping and SIFT Correspondences,"Department of Electrical Engineering and Computer Science, Colorado School of Mines, Golden, CO, USA"
+5c526ee00ec0e80ba9678fee5134dae3f497ff08,Contrast compensation by fuzzy classification and image illumination analysis for back-lit and front-lit color face images,Department of mechatronic technology of National Taiwan Normal University
+5c4f9260762a450892856b189df240f25b5ed333,Discriminative Elastic-Net Regularized Linear Regression,"Department of Information Engineering, Henan University of Science and Technology, Luoyang, China"
+09903df21a38e069273b80e94c8c29324963a832,Human action and event recognition using a novel descriptor based on improved dense trajectories,"IIIT Chittoor, SriCity, Andhra Pradesh, India"
+099053f2cbfa06c0141371b9f34e26970e316426,Effective recognition of facial micro-expressions with video motion magnification,"School of Physics and Electronic Information Engineering, Wenzhou University, Wenzhou, China"
+099053f2cbfa06c0141371b9f34e26970e316426,Effective recognition of facial micro-expressions with video motion magnification,"Information Security Group, City University London, London, UK"
+5d9971c6a9d5c56463ea186850b16f8969a58e67,Facial-expression recognition based on a low-dimensional temporal feature space,"Saudi Electronic University, Riyadh, Kingdom of Saudi Arabia"
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7,A survey on soft Biometrics and their application in person recognition at a distance,"Dept. of Computer Science and Engineering, St. Joseph's College of Engineering and Technology, Palai, Kerala, India"
+31ba9d0bfaa2a44bae039e5625eb580afd962892,Gender and gaze gesture recognition for human-computer interaction,"Centre for Machine Vision, Bristol Robotics Laboratory, University of the West of England, T Block, Frenchay Campus, Coldharbour Lane, Bristol, BS16 1QY, UK"
+314c4c95694ff12b3419733db387476346969932,Adaptive Metric Learning with the Low Rank Constraint,"School of Computer and Information Engineering, Xiamen University of Technology, Xiamen, China"
+915ff2bedfa0b73eded2e2e08b17f861c0e82a58,Automated facial expression recognition app development on smart phones using cloud computing,"Department of Electrical and Computer Engineering, Florida Institute of Technology, Melbourne, USA"
+91e17338a12b5e570907e816bff296b13177971e,Towards open-set face recognition using hashing functions,"Smart Surveillance Interest Group, Department of Computer Science, Universidade Federal de Minas Gerais, Minas Gerais, Brazil"
+65475ce4430fb524675ebab6bcb570dfa07e0041,Mapping method between image and natural sentence,"Department of Computer, the University of Suwon, Korea"
+651cafb2620ab60a0e4f550c080231f20ae6d26e,4D unconstrained real-time face recognition using a commodity depth camera,"Singapore Polytechnic, 500 Dover Road, Singapore 139651"
+6256b47342f080c62acd106095cf164df2be6020,FaceSimile: A mobile application for face image search based on interactive shape manipulation,"Google, Seattle, USA"
+6256b47342f080c62acd106095cf164df2be6020,FaceSimile: A mobile application for face image search based on interactive shape manipulation,"Computer Sciences Department, University of Wisconsin, Madison, USA"
+6256b47342f080c62acd106095cf164df2be6020,FaceSimile: A mobile application for face image search based on interactive shape manipulation,"Google, Mountain View, USA"
+62750d78e819d745b9200b0c5c35fcae6fb9f404,Leveraging implicit demographic information for face recognition using a multi-expert system,"University of Salerno, Salerno, Italy"
+628f9c1454b85ff528a60cd8e43ec7874cf17931,Automatic detection of very early stage of dementia through multimodal interaction with computer avatars,"Nara Institute of Science and Technology, Japan"
+628f9c1454b85ff528a60cd8e43ec7874cf17931,Automatic detection of very early stage of dementia through multimodal interaction with computer avatars,"Osaka University Health Care Center, Japan"
+96a8f115df9e2c938453282feb7d7b9fde6f4f95,Facial Expression Recognition in Video with Multiple Feature Fusion,"Department of Computer Science, Chu Hai College of Higher Education, Tsuen Wan, Hong Kong"
+9652f154f4ae7807bdaff32d3222cc0c485a6762,An efficient and sparse approach for large scale human action recognition in videos,"Univ. La Rochelle, La Rochelle, France"
+96e0b67f34208b85bd90aecffdb92bc5134befc8,Perturbation scheme for online learning of features: Incremental principal component analysis,"School of Computer and Systems Sciences, JawaharLal Nehru University, New Delhi 110067, India"
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c,Face tracking with convolutional neural network heat-map,"Chonnam National University, Gwangju, Korea"
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3,Exploring joint encoding of multi-direction local binary patterns for image classification,"Key Laboratory of Dependable Service Computing in Cyber Physical Society Ministry of Education, Chongqing, China"
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,Automated social skills training with audiovisual information,"Graduate School of Information Science, Nara Institute of Science and Technology, Takayama-cho 8916-5, Ikoma-shi, Nara, Japan"
+3a0425c25beea6c4c546771adaf5d2ced4954e0d,Domain Adaptation in Computer Vision Applications,"Naver Labs Europe, Meylan, France"
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,Evaluating real-life performance of the state-of-the-art in facial expression recognition using a novel YouTube-based datasets,"Department of Computer Science, University of Science & Technology, Bannu, Pakistan"
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,Evaluating real-life performance of the state-of-the-art in facial expression recognition using a novel YouTube-based datasets,"Department of Computer Science, Innopolis University, Kazan, Russia"
+54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d,Age Estimation by LS-SVM Regression on Facial Images,"IIT-Madras, Chennai, India"
+548233d67f859491e50c5c343d7d77a7531d4221,Robust detection of outliers for projection-based face recognition methods,"Orange—France Telecom Division R&D—TECH/IRIS, Cesson Sévigné Cedex, France"
+984edce0b961418d81203ec477b9bfa5a8197ba3,Customer and target individual face analysis for retail analytics,"Asian Institute of Technology (AIT), Pathum Thani 12120, Thailand"
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,Intelligence Science I,"Machine Intelligence Research Institute, Rockville, USA"
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,Intelligence Science I,"Shanghai Maritime University, Shanghai, China"
+5375a3344017d9502ebb4170325435de3da1fa16,Computer Vision – ACCV 2012,"Microsoft Research Asia, Beijing, P.R. China"
+3f4711c315d156a972af37fe23642dc970a60acf,Pose invariant face recognition with 3D morphable model and neural network,"KT Future Technology Laboratory, Seoul, South Korea"
+3ff418ac82df0b5c2f09f3571557e8a4b500a62c,Robust GPU-assisted camera tracking using free-form surface models,"Institute of Computer Science, Christian-Albrechts-Universität Kiel, Kiel, Germany"
+3060ac37dec4633ef69e7bc63488548ab3511f61,A hybrid deep learning neural approach for emotion recognition from facial expressions for socially assistive robots,"School of Computing, Electronics and Mathematics, Faculty of Engineering, Environment and Computing, Coventry University, Coventry, UK"
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,Facial expression recognition with enhanced feature extraction using graph fourier transform,"Department of Electrical Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India"
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,Facial expression recognition with enhanced feature extraction using graph fourier transform,"Department of Electronics and Communication Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India"
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,Facial expression recognition with enhanced feature extraction using graph fourier transform,"Department of Electrical Engineering Indian Institute of Technology Delhi New Delhi, India"
+30cace74a7d51e9a928287e25bcefb968c49f331,Monocular 3D facial information retrieval for automated facial expression analysis,"VUB-NPU Joint AVSP Research Lab, Vrije Universiteit Brussel (VUB), Deptartment of Electronics & Informatics (ETRO), Pleinlaan 2, 1050 Brussel, Belgium"
+5e19d7307ea67799eb830d5ce971f893e2b8a9ca,Heteroscedastic Sparse Representation Based Classification for Face Recognition,"School of Mathematics and Information Technology, Nanjing Xiao Zhuang University, Nanjing, People’s Republic of China"
+5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4,Pose-indexed based multi-view method for face alignment,"Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China"
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,Intelligent Information and Database Systems,"Wrocław University of Science and Technology, Wrocław, Poland"
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,Intelligent Information and Database Systems,"Quang Binh University, Dong Hoi City, Vietnam"
+37866fea39deeff453802cde529dd9d32e0205a5,"Sense beauty via face, dressing, and/or voice","National Laboratory of Pattern Recognition, Beijing, China"
+0831794eddcbac1f601dcb9be9d45531a56dbf7e,Learning correlations for human action recognition in videos,"Department of Mathematics and Computer Science, Gannan Normal University, Ganzhou, People’s Republic of China"
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,Fast Kernel Generalized Discriminative Common Vectors for Feature Extraction,"Centre for Secure Information Technologies, Queen’s University Belfast, Belfast, UK"
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,Fast Kernel Generalized Discriminative Common Vectors for Feature Extraction,"Departament d’Informàtica, Universitat de Valencia, Valencia, Spain"
+08872d801f134e41753601e85971769b28314ca2,Recognizing Facial Expressions in the Orthogonal Complement of Principal Subspace,"Indian Statistical Insitute, Kolkata 700108"
+08872d801f134e41753601e85971769b28314ca2,Recognizing Facial Expressions in the Orthogonal Complement of Principal Subspace,"Indian Statistical Institute, Kolkata 700108"
+080ab68a898a3703feead145e2c38361ae84a0a8,Pairwise Costs in Semisupervised Discriminant Analysis for Face Recognition,"Department of Engineering and MaintenanceChina Mobile Group, Jiangsu Company, Ltd., Changzhou, China"
+6dcf6b028a6042a9904628a3395520995b1d0ef9,Field support vector machines,"Xi'an Jiaotong-Liverpool University, Suzhou, Jiangsu, P.R. China"
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,Scalable Linear Visual Feature Learning via Online Parallel Nonnegative Matrix Factorization,"University of Massachusetts at Amherst, Amherst, MA, USA"
+013305c13cfabaea82c218b841dbe71e108d2b97,Incremental Clustering-Based Facial Feature Tracking Using Bayesian ART,"Faculty of Engineering, Computing and Science, Swinburne University of Technology Sarawak Campus, Kuching, Malaysia"
+016194dbcd538ab5a129ef1bcff3c6e073db63f9,"An insight into multimodal databases for social signal processing: acquisition, efforts, and directions","Faculty of Electrical Engineering and Computing, University of Zagreb, Zagreb, Croatia"
+06a799ad89a2a45aee685b9e892805e3e0251770,Learning Technology for Education in Cloud – The Changing Face of Education,"Staffordshire University , Staffordshire, United Kingdom"
+06a799ad89a2a45aee685b9e892805e3e0251770,Learning Technology for Education in Cloud – The Changing Face of Education,"Universidad Tecnica Federico Santa Maria , Valparaiso, Chile"
+06a799ad89a2a45aee685b9e892805e3e0251770,Learning Technology for Education in Cloud – The Changing Face of Education,"FernUniversität , Hagen, Germany"
+060f67c8a0de8fee9c1732b63ab40627993f93d0,Computer Vision and Graphics,"Polish-Japanese Institute of Information Technology, Warsaw, Poland"
+060f67c8a0de8fee9c1732b63ab40627993f93d0,Computer Vision and Graphics,"Faculty of Applied Informatics and Mathematics, Department of Informatics, Warsaw University of Life Sciences (SGGW), Warsaw, Poland"
+060f67c8a0de8fee9c1732b63ab40627993f93d0,Computer Vision and Graphics,"Polish-Japanese Institute of Information Technology, Warszawa, Poland"
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7,Deep Transfer Network for Unconstrained Face Verification,"Beijing Institute of Graphic Communication, Beijing"
+6cb8c52bb421ce04898fa42cb997c04097ddd328,Computational Collective Intelligence. Technologies and Applications,"Institute of Informatics, Wroclaw University of Technology, Wroclaw, Poland"
+6c0ad77af4c0850bd01bb118e175ecc313476f27,Extended multi-spectral face recognition across two different age groups: an empirical study,"Goa University, India"
+6c0ad77af4c0850bd01bb118e175ecc313476f27,Extended multi-spectral face recognition across two different age groups: an empirical study,"NTNU, Norway"
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,Interactive Facial Expression Reader and Extension to First Impression Improver,"Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan, +81 25 262 7499"
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,Interactive Facial Expression Reader and Extension to First Impression Improver,"Dept. of Information Engineering, Faculty of Engineering, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan"
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,Interactive Facial Expression Reader and Extension to First Impression Improver,"Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan"
+3980dadd27933d99b2f576c3b36fe0d22ffc4746,A facial expression recognition method based on cubic spline interpolation and HOG features,"Department of Automation, North-China University of Technology, Beijing, China"
+9944c451b4a487940d3fd8819080fe16d627892d,Human face shape analysis under spherical harmonics illumination considering self occlusion,Department of Mathematics and Computer Science University of Basel
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2,Exploiting Objects with LSTMs for Video Categorization,"NTT Media Intelligence Laboratories, Tokyo, Japan"
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,A Proposal to Improve the Authentication Process in m-Health Environments,"Department of Research and Diagnostic Methods, Faculty of Education, Pontificia University of Salamanca, Salamanca, Spain"
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,A Proposal to Improve the Authentication Process in m-Health Environments,"Department of Computer Science and Engineering of Systems, University of Zaragoza, Escuela Universitaria Politécnica de Teruel, Teruel, Spain"
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,A Proposal to Improve the Authentication Process in m-Health Environments,"Department of Computer Science, Madrid Open University, Madrid, Spain"
+992e4119d885f866cb715f4fbf0250449ce0db05,Glasses detection on real images based on robust alignment,"Fundación CTIC (Technological Center), Technological Scientific Park of Gijón, Gijón, Spain"
+992e4119d885f866cb715f4fbf0250449ce0db05,Glasses detection on real images based on robust alignment,"Department of Computer Science and Engineering, University of Oviedo, Gijón, Spain"
+992e4119d885f866cb715f4fbf0250449ce0db05,Glasses detection on real images based on robust alignment,"Treelogic, Technological Scientific Park of Asturias, Llanera, Spain"
+5278b7a6f1178bf5f90cd3388908925edff5ad46,3D object retrieval based on histogram of local orientation using one-shot score support vector machine,"Computer Vision Research Laboratory, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran"
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7,Fast facial expression recognition using local binary features and shallow neural networks,"Department of Electrical Engineering, Computer Vision Laboratory, Linköping University, Linköping, Sweden"
+522a4ca705c06a0436bbe62f46efe24d67a82422,Robust and efficient face recognition via low-rank supported extreme learning machine,"School of Computer Science and Engineering, Wuhan Institute of Technology, Wuhan, China"
+522a4ca705c06a0436bbe62f46efe24d67a82422,Robust and efficient face recognition via low-rank supported extreme learning machine,"School of Software, Henan University, Kaifeng, China"
+526c79c6ce39882310b814b7918449d48662e2a9,Facial expression analysis under partial occlusion,"Dept. of Informatics, Aristotle Univ. of Thessaloniki, Greece"
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,Exploiting the Use of Ensemble Classifiers to Enhance the Precision of User's Emotion Classification,"Institute of Mathematical and Computer Sciences, University of Sao Paulo, Sao Paulo, Brazil"
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,Exploiting the Use of Ensemble Classifiers to Enhance the Precision of User's Emotion Classification,University of Sao Paulo
+55c46ae1154ed310610bdf5f6d9e7023d14c7eb4,Adaptive multimodal recognition of voluntary and involuntary gestures of people with motor disabilities,"Pennsylvania State University, University Park, PA"
+55432723c728a2ce90d817e9e9877ae9fbad6fe5,"Performance of SVM, CNN, and ANN with BoW, HOG, and Image Pixels in Face Recognition","Department of Electronics and Communication Engineering, Faculty of Electrical & Electronic Engineering, Khulna University of Engineering & Technology, Bangladesh"
+552122432b92129d7e7059ef40dc5f6045f422b5,Empowering Simple Binary Classifiers for Image Set Based Face Recognition,"Data61, Commonwealth Scientific and Industrial Research Organization (CSIRO), Canberra, Australia"
+55266ddbe9d5366e8cd1b0b645971cad6d12157a,Face recognition classifier based on dimension reduction in deep learning properties,"Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Y&#x0131;ld&#x0131;z Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye"
+55aafdef9d9798611ade1a387d1e4689f2975e51,Hallucinating Compressed Face Images,"Electrical Engineering and Computer Science, School of Engineering, University of California at Merced, Merced, USA"
+972e044f69443dfc5c987e29250b2b88a6d2f986,Face model fitting with learned displacement experts and multi-band images,"Technische Universität München, München, Germany"
+97c59db934ff85c60c460a4591106682b5ab9caa,Extremely dense face registration: Comparing automatic landmarking algorithms for general and ethno-gender models,"ICT Center, CSIRO"
+9790ec6042fb2665c7d9369bf28566b0ce75a936,Towards More Robust Automatic Facial Expression Recognition in Smart Environments,"Department Informatik, Hamburg University of Applied Sciences, Engineering and Computing, University of the West of Scotland"
+9790ec6042fb2665c7d9369bf28566b0ce75a936,Towards More Robust Automatic Facial Expression Recognition in Smart Environments,"Department Informatik, Hamburg University of Applied Sciences, Hamburg, Germany"
+9790ec6042fb2665c7d9369bf28566b0ce75a936,Towards More Robust Automatic Facial Expression Recognition in Smart Environments,"Computer Science Department, Central Washington University (CWU)"
+9790ec6042fb2665c7d9369bf28566b0ce75a936,Towards More Robust Automatic Facial Expression Recognition in Smart Environments,"School of Engineering and Computing, University of the West of Scotland"
+9790ec6042fb2665c7d9369bf28566b0ce75a936,Towards More Robust Automatic Facial Expression Recognition in Smart Environments,"Innovations Kontakt Stelle (IKS) Hamburg, Hamburg University of Applied Sciences"
+97c1f68fb7162af326cd0f1bc546908218ec5da6,Supervised-learning based face hallucination for enhancing face recognition,"Dept. of Electrical Engineering, National Tsing-Hua University, Taiwan"
+97c1f68fb7162af326cd0f1bc546908218ec5da6,Supervised-learning based face hallucination for enhancing face recognition,Department of Electronic Engineering Shanghai Jiao Tong University
+63fd7a159e58add133b9c71c4b1b37b899dd646f,Exemplar-Based Human Action Pose Correction,"Department of Cognitive Science, University of California, San Diego, CA, USA"
+637b31157386efbde61505365c0720545248fbae,Deep learning with time-frequency representation for pulse estimation from facial videos,"Utechzone Co. Ltd., New Taipei City, Taiwan 235"
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c,Pruning training sets for learning of object categories,"Dept. of Comput. Sci., California Inst. of Technol., Pasadena, CA, USA"
+6359fcb0b4546979c54818df8271debc0d653257,Fusing magnitude and phase features with multiple face models for robust face recognition,"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology (ICT), CAS, Beijing, China"
+6345c0062885b82ccb760c738a9ab7fdce8cd577,Pain detection from facial images using unsupervised feature learning approach,"Integrated Circuits and Electronics Laboratory, Department of Engineering, Aarhus University, Denmark"
+0f7e9199dad3237159e985e430dd2bf619ef2db5,Learning Social Circles in Ego-Networks Based on Multi-View Network Structure,"EECS Department, University of Kansas, Lawrence, KS"
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,Probabilistic Elastic Part Model: A Pose-Invariant Representation for Real-World Face Verification,"Adobe Research Department, Adobe Systems Inc, San Jose, CA"
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,Unsupervised multi-manifold linear differential projection(UMLDP) for face recognition,"School of Technology, Nanjing Audit University, Nanjing, China"
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,Unsupervised multi-manifold linear differential projection(UMLDP) for face recognition,"Fujian Provincial Key Laboratory of Information Processing and Intelligent Control, Fuzhou, China"
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,Unsupervised multi-manifold linear differential projection(UMLDP) for face recognition,"School of Computer Science and Technology, Nanjing University of Posts and Telecommunications, Nanjing, China"
+0a0b9a9ff827065e4ff11022b0e417ddf1d3734e,Fusing active orientation models and mid-term audio features for automatic depression estimation,"Computational Intelligence Lab, Institute of Informatics and Telecommunications, NCSR Demokritos, Athens, Greece"
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,A Face Recognition Application for People with Visual Impairments: Understanding Use Beyond the Lab,"Cornell University & Facebook Inc., New York, NY, USA"
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,A Face Recognition Application for People with Visual Impairments: Understanding Use Beyond the Lab,"Facebook Inc., Menlo Park, CA, USA"
+6486b36c6f7fd7675257d26e896223a02a1881d9,Selective Review and Analysis of Aging Effects in Biometric System Implementation,"Computer Engineering Department, Girne American University, Kyrenia, Cyprus 90"
+6486b36c6f7fd7675257d26e896223a02a1881d9,Selective Review and Analysis of Aging Effects in Biometric System Implementation,"Departamento de Informtica e Matemtica Aplicada/University of Rio Grande do Norte, Natal, Brazil"
+645f09f4bc2e6a13663564ee9032ca16e35fc52d,Interactive Demonstration of Probabilistic Predicates,"University of Washington &Microsoft, Seattle, WA, USA"
+9048732c8591a92a1f4f589b520a733f07578f80,Improved CNN-based facial landmarks tracking via ridge regression at 150 Fps on mobile devices,Tencent Inc
+9055b155cbabdce3b98e16e5ac9c0edf00f9552f,MORPH: a longitudinal image database of normal adult age-progression,"Dept. of Comput. Sci., North Carolina Univ., Wilmington, NC, USA"
+90ae02da16b750a9fd43f8a38440f848309c2fe0,A review of facial gender recognition,"Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia"
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,Adaptive Message Update for Fast Affinity Propagation,"NTT Software Innovation Center, Tokyo, Japan"
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,Adaptive Message Update for Fast Affinity Propagation,"NTT Service Evolution Laboratories, Kanagawa, Japan"
+bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317,Two-Stream Convolutional Network with Multi-level Feature Fusion for Categorization of Human Action from Videos,"Department of Computer Science and Engineering, Indian Institute of Technology Madras, Chennai, India"
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,Cultural-based visual expression: emotional analysis of human face via Peking Opera Painted Faces (POPF),"Department of Design, College of Engineering, Design and Physical Sciences, Brunel University London, Uxbridge, UK"
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,Cultural-based visual expression: emotional analysis of human face via Peking Opera Painted Faces (POPF),"Department of Arts and Humanities, College of Business, Arts and Social Sciences, Brunel University London, Uxbridge, UK"
+d4353952a408e1eae8c27a45cc358976d38dde00,Features classification using geometrical deformation feature vector of support vector machine and active appearance algorithm for automatic facial expression recognition,"MNIT, Jaipur, India"
+d4353952a408e1eae8c27a45cc358976d38dde00,Features classification using geometrical deformation feature vector of support vector machine and active appearance algorithm for automatic facial expression recognition,"CEERI, Pilani, India"
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,An augmented image gradients based supervised regression technique for iris center localization,"Biomedical Instrumentation (V-02), CSIR-Central Scientific Instruments Organisation (CSIO)|, Chandigarh, India"
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,An augmented image gradients based supervised regression technique for iris center localization,"Department of ECE, PEC University of Technology, Chandigarh, India"
+d4b4020e289c095ce2c2941685c6cd37667f5cc9,Facial expression recognition,"Dept. of Electronics and Telecommunication Engg., KCT's Late G.N. Sapkal college of Engineering, Nashik, India"
+bab2f4949a38a712a78aafbc0a3c392227c65f56,Eye detection using gradient histogram matching for cornea localization in refractive eye surgery,"Dept. of Computer Science and Information Engineering, Southern Taiwan University of Science and Technology, Tainan City, Taiwan"
+a07f78124f83eef1ed3a6f54ba982664ae7ca82a,Tensor based robust color face recognition,"Dept. of Computing, Curtin University GPO Box U1987, Perth, WA 6845"
+a094e52771baabe4ab37ef7853f9a4f534227457,Estimation of Driver Head Yaw Angle Using a Generic Geometric Model,"Amrita E-Learning Research Laboratory and the Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India"
+a094e52771baabe4ab37ef7853f9a4f534227457,Estimation of Driver Head Yaw Angle Using a Generic Geometric Model,"Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India"
+a0f6196d27a39cde2dbf62c08d89cbe489600bb0,Development of two novel face-recognition CAPTCHAs: A security and usability study,"Department of Management Information Systems, Universität Regensburg, Universitätsstr. 31, 93053 Regensburg, Germany"
+a76e57c1b2e385b68ffdf7609802d71244804c1d,Improving retail efficiency through sensing technologies: A survey,"Grupo de Aplicacion de Telecomunicaciones Visuales, Universidad Politecnica de Madrid, Av. Complutense 30, 28040 Madrid, Spain"
+a78b5495a4223b9784cc53670cc10b6f0beefd32,Time-varying LSTM networks for action recognition,"Nanjing University of Posts and Telecommunications, Nanjing, China"
+a78b5495a4223b9784cc53670cc10b6f0beefd32,Time-varying LSTM networks for action recognition,"Key Lab of Broadband Wireless Communication and Sensor Network Technology, Ministry of Education, Nanjing, China"
+a735c6330430c0ff0752d117c54281b1396b16bf,Automatic location of facial landmarks for plastic surgery procedures,"Clínica Otocenter, Teresina, Piauí, Brasil"
+a73405038fdc0d8bf986539ef755a80ebd341e97,Conditional High-Order Boltzmann Machines for Supervised Relation Learning,"Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China"
+b85d0aef3ee2883daca2835a469f5756917e76b7,Semantic movie summarization based on string of IE-RoleNets,"Key Laboratory of Medical Image Computing (Northeastern University), Ministry of Education, Shenyang, China"
+b856d8d6bff745bb1b4beb67e4b821fc20073840,Joint dimensionality reduction for face recognition based on D-KSVD,"Research&Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai 201804, P.R China"
+b84dde74dddf6a3281a0b22c68999942d2722919,A New Approach of Facial Expression Recognition for Ambient Assisted Living,"LIARA Laboratory, University of Quebec at Chicoutimi (UQAC), Boulevard de l'Université, Chicoutimi (Quebec), Canada"
+b85d953de16eecaecccaa8fad4081bd6abda9b1b,Something to sink your teeth into: The presence of teeth augments ERPs to mouth expressions,"Indiana University-Bloomington, USA"
+b1f4423c227fa37b9680787be38857069247a307,"Collecting Large, Richly Annotated Facial-Expression Databases from Movies",Commonwealth Scientific and Industrial Research Organization (CSIRO)
+b1891010a0722117c57e98809e1f2b26cd8e9ee3,Analyzing the cross-generalization ability of a hybrid genetic & evolutionary application for multibiometric feature weighting and selection,"NC A&T State University, Greensboro, NC, USA"
+b1efefcc9a5d30be90776571a6cc0071f3679753,BRoPH: A compact and efficient binary 3D feature descriptor,"Research Institution of Intelligent Control and Testing, Graduate School of Tsinghua University at Shenzhen, 518055, China"
+b1534888673e6119f324082246016d28eba249aa,Saliency-based navigation in omnidirectional image,"IRISA, University of Rennes 1"
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,3D emotional facial animation synthesis with factored conditional Restricted Boltzmann Machines,"NPU-VUB Joint AVSP Research Lab, School of Computer Science, Northwestern Polytechnical University (NPU) Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, Xi'an 710072, China"
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,3D emotional facial animation synthesis with factored conditional Restricted Boltzmann Machines,"NPU-VUB Joint AVSP Research Lab, Vrije Universitiet Brussel (VUB), Department of Electronics & Informatics (ETRO) Pleinlaan 2, 1050 Brussel, Belgium"
+ddd9d7cb809589b701fba9f326d7cf998a63b14f,What Can We Learn about Motion Videos from Still Images?,"School of Computer Science and Technology, Tianjin University&Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China"
+dc84d3f29c52e6d296b5d457962c02074aa75d0f,Relative Forest for Visual Attribute Prediction,"360 AI Institute, Beijing, China"
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006,Image Representation Using Supervised and Unsupervised Learning Methods on Complex Domain,"Dept. of Computer Science and Information Engineering, Providence University, Taichung, Taiwan"
+dca2bb023b076de1ccd0c6b8d71faeb3fccb3978,Joint Estimation of Age and Expression by Combining Scattering and Convolutional Networks,"Academia Sinica, Taipei, Taiwan"
+b6bb883dd14f2737d0d6225cf4acbf050d307634,“A Leopard Cannot Change Its Spots”: Improving Face Recognition Using 3D-Based Caricatures,"Department of Computer Science, Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal"
+b6259115b819424de53bb92f64cc459dcb649f31,Learning Feature Representation for Face Verification,"Gwangju Institute of Science and Technology, 123, Cheomdangwagi-ro, Buk-gu, Gwangju, South Korea"
+b6ac33d2c470077fa8dcbfe9b113beccfbd739f8,Cross-modal alignment for wildlife recognition,"KU Leuven, Leuven, Belgium"
+a96c45ed3a44ad79a72499be238264ae38857988,Encouraging second-order consistency for multiple graph matching,"Gwangju Institute of Science and Technology (GIST), Gwangju, Republic of Korea"
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,A novel spontaneous facial expression recognition using dynamically weighted majority voting based ensemble classifier,"School of Information and Mechatronics, Gwangju Institute of Science and Technology, Gwangju, Korea"
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,A novel spontaneous facial expression recognition using dynamically weighted majority voting based ensemble classifier,"Al Imam Mohammad Ibn Saud Islamic University, Riyadh, Saudi Arabia"
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,Stratified pooling based deep convolutional neural networks for human action recognition,"School of Information, Hunan University of Humanities, Science and Technology, Loudi, China"
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,Stratified pooling based deep convolutional neural networks for human action recognition,"Fujian Key Laboratory of the Brain-like Intelligent Systems, Xiamen, China"
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,Stratified pooling based deep convolutional neural networks for human action recognition,"Computer Engineering College, Jimei University, Xiamen, China"
+d5dc78eae7a3cb5c953c89376e06531d39b34836,High-speed face recognition using self-adaptive radial basis function neural networks,"Department of Information Technology, Netaji Subhas Engineering College, Kolkata, India"
+d2598c088b0664c084413796f39697c6f821d56e,Cross-modal face matching: Tackling visual abstraction using fine-grained attributes,"School of Information and Communication Engineering, Beijing University of Posts and Telcommunications, Beijing, China"
+d2b3166b8a6a3e6e7bc116257e718e4fe94a0638,An unsupervised approach to learn the kernel functions: from global influence to local similarity,"Computer Vision Research Group, School of Computer Sciences, Universiti Sains Malaysia, Penang, Malaysia"
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e,A low rank model based improved eye detection under spectacles,"Dept. of Electrical Engineering, National Institute of Technology, Rourkela, India 769008"
+aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5,Multi-scale primal feature based facial expression modeling and identification,"Dept. of Comput. Sci., New York State Univ., Binghamton, NY, USA"
+aad7b12936e0ced60bc0be95e8670b60b5d5ce20,Face identification using affine simulated dense local descriptors,"Department of Computer Science and Engineering, POSTECH, Pohang 790-784, Republic of Korea"
+af29ad70ab148c83e1faa8b3098396bc1cd87790,Unconstrained face detection: a Deep learning and Machine learning combined approach,"Department of Computer Science, Solapur University, Solapur, India"
+af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2,A new facial expression recognition based on curvelet transform and online sequential extreme learning machine initialized with spherical clustering,"Electrical-Electronics Engineering Department, Izmir University of Economics, Balcova, Turkey"
+af97e792827438ddea1d5900960571939fc0533e,Face recognition under variable lighting using the mean-field method and the gray-level pyramid,"Dept. of Electron. & Inf., Toyota Technol. Inst., Nagoya, Japan"
+b712f08f819b925ff7587b6c09a8855bc295d795,Independent Component Analysis Using Semi-Parametric Density Estimation Via Entropy Maximization,"Dept. of EMPH, Icahn School of Medicine at Mount Sinai, New York, NY 10029"
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,Improvement in Detection of Wrong-Patient Errors When Radiologists Include Patient Photographs in Their Interpretation of Portable Chest Radiographs,"Department of Radiology and Imaging Sciences, Winship Cancer Institute, Emory University School of Medicine, Atlanta, USA"
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,Improvement in Detection of Wrong-Patient Errors When Radiologists Include Patient Photographs in Their Interpretation of Portable Chest Radiographs,"Emory University School of Medicine, Atlanta, USA"
+b7ec41005ce4384e76e3be854ecccd564d2f89fb,Granular Computing and Sequential Analysis of Deep Embeddings in Fast Still-to-Video Face Recognition,"National Research University Higher School of Economics, Laboratory of Algorithms and Technologies for Network Analysis, Nizhny Novgorod, Russia"
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda,Largest Matching Areas for Illumination and Occlusion Robust Face Recognition,"School of Electronics, Electrical Engineering and Computer Science, Queen&#x2019;s University Belfast, Belfast, U.K."
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,Feature extraction using maximum variance sparse mapping,"College of Computer Science and Technology, Wuhan University of Science and Technology, Wuhan, China"
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,Feature extraction using maximum variance sparse mapping,"Key Lab Complex System & Intelligence Science, Institute of Automation, Chinese Academy of Science, Beijing, China"
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,Eye localization for face matching: is it always useful and under what conditions?,"Philips Research Eindhoven, Eindhoven, Netherlands"
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,Eye localization for face matching: is it always useful and under what conditions?,"Philips Applied Technologies, Eindhoven, Netherlands"
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,Trends and Controversies,Research and Academic Computer Network (NASK)
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,Trends and Controversies,Biometric and Imaging Processing Laboratory (BIPLab)
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8,Towards view-invariant expression analysis using analytic shape manifolds,"Center for Automation Research, UMIACS University of Maryland, College Park, MD 20742"
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,Sparse matrix transform-based linear discriminant analysis for hyperspectral image classification,"Faculty of Mathematics and Statistics, Hubei Key Laboratory of Applied Mathematics, Hubei University, Wuhan, China"
+dec76940896a41a8a7b6e9684df326b23737cd5d,Seeing through the expression: Bridging the gap between expression and emotion recognition,"Dept. of Comput. Sci. &amp; Info. Eng., National Yunlin Univ. of Science &amp; Technology, Taiwan"
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,Offering Verified Credentials in Massive Open Online Courses: MOOCs and technology to advance learning and learning research (Ubiquity symposium),Stanford University and Coursera
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,Offering Verified Credentials in Massive Open Online Courses: MOOCs and technology to advance learning and learning research (Ubiquity symposium),Coursera and Stanford University
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,A new facial age estimation method using centrally overlapped block based local texture features,"Department of Computer Technologies, Trabzon Vocational School, Karadeniz Technical University, Trabzon, Turkey"
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,A new facial age estimation method using centrally overlapped block based local texture features,"Department of Computer Engineering, Karadeniz Technical University, Trabzon, Turkey"
+a6e75b4ccc793a58ef0f6dbe990633f7658c7241,Boosting Hankel matrices for face emotion recognition and pain detection,"DICGIM, Universitá degli Studi di Palermo, V.le delle Scienze, Ed. 6, 90128 Palermo, Italy"
+a6902db7972a7631d186bbf59c5ef116c205b1e8,Photo clip art,Microsoft Research Cambridge
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,Kernel Sparse Representation-Based Classifier,"Department of Information Management, Yuan Ze University, Taoyuan, China"
+b999364980e4c21d9c22cc5a9f14501432999ca4,Human action recognition in videos with articulated pose information by deep networks,"Vision Laboratory, LARSyS, University of the Algarve, Faro, Portugal"
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,Event Specific Multimodal Pattern Mining for Knowledge Base Construction,"Columbia Univeristy, New York, NY, USA"
+b961e512242ddad7712855ab00b4d37723376e5d,A real-time framework for eye detection and tracking,"Department of Computer Science, University of Western Ontario, London, Canada"
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,Deep convolution neural network with stacks of multi-scale convolutional layer block using triplet of faces for face recognition in the wild,"Department of Creative IT Engineering, POSTECH, Pohang, South Korea, 37673"
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,Deep convolution neural network with stacks of multi-scale convolutional layer block using triplet of faces for face recognition in the wild,"Department of Computer Science &amp; Engineering, POSTECH, Pohang, Sourth Korea, 37673"
+a11ce3c9b78bf3f868b1467b620219ff651fe125,Semi-supervised Identification of Rarely Appearing Persons in Video by Correcting Weak Labels,"University of Applied Sciences Jena, Jena, Germany"
+a11ce3c9b78bf3f868b1467b620219ff651fe125,Semi-supervised Identification of Rarely Appearing Persons in Video by Correcting Weak Labels,"German National Library of Science and Technology & Leibniz Universität Hannover, Hannover, Germany"
+efc78a7d95b14abacdfde5c78007eabf9a21689c,Subjectively Interesting Component Analysis: Data Projections that Contrast with Prior Expectations,"Ghent University, Ghent, Belgium"
+ef35c30529df914a6975af62aca1b9428f678e9f,Smile detection in the wild with deep convolutional neural networks,"PolyU Shenzhen Research Institute, Shenzhen, China"
+ef35c30529df914a6975af62aca1b9428f678e9f,Smile detection in the wild with deep convolutional neural networks,"Department of Computer Science, Chu Hai College of Higher Education, Tuen Mun, Hong Kong"
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,Neural Information Processing,"University of Istanbul, Istanbul, Turkey"
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,Neural Information Processing,"University at Qatar, Doha, Qatar"
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,Neural Information Processing,"Tunku Abdul Rahman University College, Kuala Lumpur, Malaysia"
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,Neural Information Processing,"University of Science Technology, Wuhan, China"
+c3e53788370341afe426f2216bed452cbbdaf117,A crowd sourced framework for neighbour assisted medical emergency system,"School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology, Auckland, New Zealand"
+c4b00e86841db3fced2a5d8ac65f80d0d3bbe352,A multi-view approach on modular PCA for illumination and pose invariant face recognition,"Dept. of Electr. & Comput. Eng., Old Dominion Univ., Norfolk, VA, USA"
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5,Adaptive linear discriminant analysis for online feature extraction,"Queen’s University, Kingston, Canada"
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,An optimization of the K-Nearest Neighbor using Dynamic Time Warping as a measurement similarity for facial expressions recognition,"LRIT, CNRST (URAC29), Mohammed V University of Rabat, Morocco"
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,An optimization of the K-Nearest Neighbor using Dynamic Time Warping as a measurement similarity for facial expressions recognition,"IRDA Group, ADMIR Laboratory, Rabat IT Center, ENSIAS, CNRST (URAC29), Mohammed V University of Rabat, Morocco"
+c459014131cbcd85f5bd5c0a89115b5cc1512be9,Face recognition in the presence of expression and/or illumination variation,"Inf. Syst. Dept., Buckingham Univ., UK"
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,Age group classification in the wild with deep RoR architecture,"North China Electric Power University Department of Electronic and Communication Engineering Baoding, Hebei, China"
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,Age group classification in the wild with deep RoR architecture,"University of Missouri Department of Electrical and Computer Engineering Columbia, MO, USA"
+eac97959f2fcd882e8236c5dd6035870878eb36b,Adaptive ranking of facial attractiveness,"Department of Computer Science and Engineering, University of Califonia, San Diego"
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9,Spontaneous Expression Recognition Using Universal Attribute Model,"Department of Computer Science and Engineering, Visual Learning and Intelligence Group, IIT Hyderabad, Hyderabad, India"
+ea5c9d5438cde6d907431c28c2f1f35e02b64b33,Robust principal component analysis via feature self-representation,"School of Computer Science and Technology, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China"
+e1d1540a718bb7a933e21339f1a2d90660af7353,Discriminative Probabilistic Latent Semantic Analysis with Application to Single Sample Face Recognition,"College of Arts and Sciences, Shanxi Agricultural University, Shanxi, China"
+e14b046a564604508ea8e3369e7e9f612e148511,Facial Expression Recognition on Hexagonal Structure Using LBP-Based Histogram Variances,"Video Surveillance Laboratory, Guizhou University for Nationalities, Guiyang, China"
+e14b046a564604508ea8e3369e7e9f612e148511,Facial Expression Recognition on Hexagonal Structure Using LBP-Based Histogram Variances,"Centre for Innovation in IT Services and Applications (iNEXT), University of Technology, Sydney, Australia"
+e14cc2715b806288fe457d88c1ad07ef55c65318,A Deep Feature based Multi-kernel Learning Approach for Video Emotion Recognition,"City University of New York, New York, NY, USA"
+e198a7b9e61dd19c620e454aaa81ae8f7377ade0,A hierarchical approach to facial aging,"Face Aging Group, Computer Science Department, UNCW, USA"
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,An Innovative Weighted 2DLDA Approach for Face Recognition,"Faculty of Electronic Information and Electrical Engineering, Dalian University, Dalian, China"
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,An Innovative Weighted 2DLDA Approach for Face Recognition,"YiLi Normal College, Yining, China"
+cd63759842a56bd2ede3999f6e11a74ccbec318b,Simultaneous dimensionality reduction and human age estimation via kernel partial least squares regression,"Hebei University of Technology, School of Science, Tianjin, P. R. China"
+cd74d606e76ecddee75279679d9770cdc0b49861,Transfer Learning of Structured Representation for Face Recognition,"Department of Mathematics, JiaYing University, Meizhou, China"
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,Improved nuisance attribute projection for face recognition,"Department of Electro-Optics Engineering, Ben-Gurion University, Beer Sheva, Israel"
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,Improved nuisance attribute projection for face recognition,"Technology Section, Israel National Police, Jerusalem, Israel"
+cccd0edb5dafb3a160179a60f75fd8c835c0be82,Extraction of texture and geometrical features from informative facial regions for sign language recognition,"Department of Electronics and Electrical Engineering, Indian Institute of Technology (IIT) Guwahati, Guwahati, India"
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,Physiological parameter monitoring of drivers based on video data and independent vector analysis,University of British Columbia Department of Electrical and Computer Engineering
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,Physiological parameter monitoring of drivers based on video data and independent vector analysis,Nanyang Technological University School of Computer Engineering
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,Robust face recognition via transfer learning for robot partner,"Graduate School of System Design Tokyo Metropolitan University Tokyo, Japan"
+e6f3707a75d760c8590292b54bc8a48582da2cd4,Lighting estimation of a convex Lambertian object using weighted spherical harmonic frames,"College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, P.R. China"
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,Discriminative face recognition via kernel sparse representation,"Key Laboratory of Modern Teaching Technology, Ministry of Education, Xi’an, China"
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,Discriminative face recognition via kernel sparse representation,"Engineering Laboratory of Teaching Information Technology of Shaanxi Province, Xi’an, China"
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,Discriminative face recognition via kernel sparse representation,"School of Computer Science, Shaanxi Normal University, Xi’an, China"
+f0b4f5104571020206b2d5e606c4d70f496983f9,Lattice computing (LC) meta-representation for pattern classification,"Department of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Human Machines Interaction (HMI) Laboratory, 65404 Kavala, Greece"
+f7ae38a073be7c9cd1b92359131b9c8374579b13,Descriptor Learning via Supervised Manifold Regularization for Multioutput Regression,"Department of Medical Biophysics, University of Western Ontario, London, ON, Canada"
+f7ae38a073be7c9cd1b92359131b9c8374579b13,Descriptor Learning via Supervised Manifold Regularization for Multioutput Regression,"Departments of Medical Imaging and Medical Biophysics, University of Western Ontario, London, ON, Canada"
+e82a0976db908e6f074b926f58223ac685533c65,Audiovisual synchrony assessment for replay attack detection in talking face biometrics,"Telecom Division, Centre de Développement des Technologies Avancées, Algiers, Algeria"
+faa46ef96493b04694555738100d9f983915cf9b,"Expression invariant face recognition using semidecimated DWT, Patch-LDSMT, feature and score level fusion","Department of Electronics and Communication Engineering, Visvesvaraya National Institute of Technology, Nagpur, India"
+ff82825a04a654ca70e6d460c8d88080ee4a7fcc,"Face Recognition in Surveillance Conditions with Bag-of-words, using Unsupervised Domain Adaptation","Dept. of CS&E, IIT Madras, India"
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,Band-pass correlation filter for illumination- and noise-tolerant face recognition,"Department of Electrical Engineering, Future Institute of Engineering and Management, Kolkata, India"
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,Band-pass correlation filter for illumination- and noise-tolerant face recognition,"Department of Applied Optics and Photonics, University of Calcutta, Kolkata, India"
+fff31548617f208cd5ae5c32917afd48abc4ff6a,Mobile situated analytics of ego-centric network data,FX Palo Alto Laboratory
+ff402bd06c9c4e94aa47ad80ccc4455efa869af3,ICA filters for lighting invariant face recognition,"Dept. of Electr. & Comput. Eng., McMaster Univ., Hamilton, Ont., Canada"
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,Visual Descriptors in Methods for Video Hyperlinking,"Charles University, Prague, Czech Rep"
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,Visual Descriptors in Methods for Video Hyperlinking,"Masaryk University, Brno, Czech Rep"
+ffb1cb0f9fd65247f02c92cfcb152590a5d68741,Complexity reduction of kernel discriminant analysis,"Department of Electronics Engineering, Mokpo National University, Republic of Korea"
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,Group Collaborative Representation for Image Set Classification,"College of Information Science and Technology, Agricultural University of Hebei, Baoding, China"
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,Group Collaborative Representation for Image Set Classification,"International Computer Science Institute, University of California at Berkeley, Berkeley, USA"
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,Group Collaborative Representation for Image Set Classification,"Department of Statistics, University of California at Berkeley, Berkeley, USA"
+c58ece1a3fa23608f022e424ec5a93cddda31308,Extraction of Visual Facial Features for Health Management,"Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan"
+c58ece1a3fa23608f022e424ec5a93cddda31308,Extraction of Visual Facial Features for Health Management,"Department of Information Management, College of Management, National United University, Miaoli, Taiwan"
+c58ece1a3fa23608f022e424ec5a93cddda31308,Extraction of Visual Facial Features for Health Management,"Department of Electrical Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan"
+c252bc84356ed69ccf53507752135b6e98de8db4,Sparse representation-based robust face recognition by graph regularized low-rank sparse representation recovery,"Institute of Image Processing and Pattern Recognition, Henan University, Kaifeng 475004, China"
+c23bd1917badd27093c8284bd324332b8c45bfcf,Personalized facial expression recognition in indoor environments,"Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Taiwan 640, R.O.C."
+c2474202d56bb80663e7bece5924245978425fc1,Localize heavily occluded human faces via deep segmentation,"Statistical Machine Intelligence &amp; LEarning, School of Computer Science &amp; Engineering University of Electronic Science and Technology of China, 611731, China"
+c29fe5ed41d2240352fcb8d8196eb2f31d009522,Age estimation with dynamic age range,"College of Computer Science and Technology of Huaqiao University Xiamen, Xiamen, China"
+f63b3b8388bc4dcd4a0330402af37a59ce37e4f3,Random attributes for image classification,"Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Deniz Harp Okulu, &#x0130;stanbul, T&#x00FC;rkiye"
+e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f,Gabor Filter Based Face Recognition Using Non-frontal Face Images,"Department of Computer Science and Technology, Indian Institute of Engineering Science and Technology, Shibpur, Howrah-711 103, India"
+e9331ae2a887c02e0a908ebae2810a681aedee29,Smooth Adaptive Fitting of 3D Face Model for the Estimation of Rigid and Non-rigid Facial Motion in Video Sequences,"Research Team on Audio Visual Signal Processing (AVSP), Vrije Universiteit Brussel (VUB), Electronics and Informatics Department, VUB-ETRO, Pleinlaan 2, 1050 Brussel, Belgium"
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,An adaptive training based on classification system for patterns in facial expressions using SURF descriptor templates,"National University of Computer and Emerging Sciences, Islamabad, Islamabad, Pakistan"
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,An adaptive training based on classification system for patterns in facial expressions using SURF descriptor templates,"College of Computer and Information Sciences, Al Imam Mohammad Ibn Saud Islamic University (IMSIU), Riyadh, Saudi Arabia"
+f16599e4ec666c6390c90ff9a253162178a70ef5,Linguistic Patterns and Cross Modality-based Image Retrieval for Complex Queries,"BITS Pilani, Pilani , India"
+f16599e4ec666c6390c90ff9a253162178a70ef5,Linguistic Patterns and Cross Modality-based Image Retrieval for Complex Queries,"BITS Pilani, India , India"
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,Video action classification using symmelets and deep learning,"Department of Computer Science and Engineering, National Taiwan Ocean University, No.2, Beining Rd., Keelung 202, Taiwan"
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,Video action classification using symmelets and deep learning,"Department of Learning and Digital Technology, Fo Guang University, Yilan, Taiwan"
+e7697c7b626ba3a426106d83f4c3a052fcde02a4,Real time 3D face alignment with Random Forests-based Active Appearance Models,"Computer Vision Laboratory, ETH Zurich, Sternwartstrasse 7, 8092, Switzerland"
+e7b7df786cf5960d55cbac4e696ca37b7cee8dcd,A sparse kernel representation method for image classification,"Department of electronic engineering, Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xi'an, China"
+cb8382f43ce073322eba82809f02d3084dad7969,Facial Expression Recognition using 2D Stationary Wavelet Transform and Gray-Level Co-occurrence MatrixP@13-17,"Department of Computer Science and Engineering, National Institute of Technology, Rourkela, Odisha, India"
+cb9921d5fc4ffa50be537332e111f03d74622442,Face Occlusion Detection Using Cascaded Convolutional Neural Network,"School of Computer Science and Engineering, Hebei University of Technology, Tianjin, China"
+f812347d46035d786de40c165a158160bb2988f0,Predictive coding as a model of cognition,"Department of Informatics, King’s College London, Strand, London, UK"
+f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c,OptiFuzz: a robust illumination invariant face recognition system and its implementation,"Toyohashi University of Technology, Toyohashi, Japan"
+f834c50e249c9796eb7f03da7459b71205dc0737,Enhanced Patterns of Oriented Edge Magnitudes for Face Recognition and Image Matching,"GIPSA Laboratory, Image and Signal Department, Grenoble Institute of Technology, Grenoble, France"
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f,Siamese multi-layer perceptrons for dimensionality reduction and face identification,"University of Lyon, LIRIS - CNRS, National Institute of Applied Sciences (INSA), Lyon, France"
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,Large-Scale Video Classification with Elastic Streaming Sequential Data Processing System,"Shanghai Advanced Research Institute, CAS & Qiniu AI Lab, Shanghai, China"
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,Large-Scale Video Classification with Elastic Streaming Sequential Data Processing System,"Shanghai Advanced Research Institute, CAS, Shanghai, China"
+ce70dd0d613b840754dce528c14c0ebadd20ffaa,Deep Learning on Sparse Manifolds for Faster Object Segmentation,School of Computer ScienceThe University of Adelaide
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9,Facial expression recognition using Support Vector Machines,"Electrical &amp; Electronic Engineering Department, Mevlana University Konya, Turkey"
+e0423788eb91772de9d708a17799179cf3230d63,Age Classification Using an Optimized CNN Architecture,"Department of Computer Engineering, TOBB University of Economics and Technology, Ankara, Turkey"
+46f48211716062744ddec5824e9de9322704dea1,Learning a Distance Metric from Relative Comparisons between Quadruplets of Images,"Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, Paris, France"
+4686df20f0ee40cd411e4b43860ef56de5531d9e,Illuminating light field: image-based face recognition across illuminations and poses,"Dept. of ECE, Maryland Univ., College Park, MD, USA"
+46976097c54e86032932d559c8eb82ffea4bb6bb,Facial emotion recognition with anisotropic inhibited Gabor energy histograms,"Center for Research in Intelligent Systems, University of California, Riverside Riverside, CA 92521-0425, USA"
+2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2,Gabor based face recognition with dynamic time warping,"School of Information Technology Jawaharlal Nehru Technological University Hyderabad Andhra Pradesh, India"
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,Lattice Computing Extension of the FAM Neural Classifier for Human Facial Expression Recognition,"Human-Machines Interaction (HMI) Laboratory, Department of Industrial Informatics, TEI of Kavala, Kavala, Greece"
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,Lattice Computing Extension of the FAM Neural Classifier for Human Facial Expression Recognition,"Department of Business Planning &amp; Information Systems, TEI of Crete, Agios Nikolaos, Greece"
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c,New semantic descriptor construction for facial expression recognition based on axiomatic fuzzy set,"Dalian Key Laboratory of Digital Technology for National Culture, Dalian Minzu University, Dalian, China"
+4113269f916117f975d5d2a0e60864735b73c64c,Regression and classification approaches to eye localization in face images,"Dept. of Eng. Sci., Oxford Univ., UK"
+83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6,A multi-perspective holistic approach to Kinship Verification in the Wild,"Department of Control and Computer Engineering, Politecnico di Torino, Italy"
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,Building a game scenario to encourage children with autism to recognize and label emotions using a humanoid robot,"R&D Centre Algoritmi, School of Engineering, University of Minho, Portugal"
+1bcb1c6d6cebc9737f9933fcefbf3da8a612f994,A novel Monogenic Directional Pattern (MDP) and pseudo-Voigt kernel for facilitating the identification of facial emotions,"Department of CSE, Regional Campus of Anna University, Tirunelveli 627007, India"
+7782627fa2e545276996ff9e9a1686ac496df081,Enhanced Autocorrelation in Real World Emotion Recognition,"University of Ulm, Ulm, Germany"
+77c3574a020757769b2ca807ff4b95a88eaa2a37,Computerized Face Recognition in Renaissance Portrait Art: A quantitative measure for identifying uncertain subjects in ancient portraits,"Department of Electrical Engineering, University of California, Riverside, Riverside CA, California 92521 United States"
+4848a48a2b8bacd2092e87961cd86818da8e7151,Comparative evaluation of facial fiducial point detection approaches,"Department of Systems and Computing, Federal University of Campina Grande, Av. Apríigio Veloso, 882, 58429-900 Campina Grande, PB, Brazil"
+480ccd25cb2a851745f5e6e95d33edb703efb49e,Cross-Modal Message Passing for Two-Stream Fusion,"School of Computer Science, Center for Optical Imagery Analysis and Learning (OPTIMAL)"
+7081958a390d3033f5f33e22bbfec7055ea8d601,Learning Distributions of Image Features by Interactive Fuzzy Lattice Reasoning in Pattern Recognition Applications,"Dept of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Kavala, Greece"
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,Using SVM to design facial expression recognition for shape and texture features,"Department of Information Management, National Formosa University, Huwei, Yulin 632, Taiwan"
+1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5,Building semantic understanding beyond deep learning from sound and vision,"Dept. de Ciência da Computacão, Universidade Federal de Ouro Preto, MG Brazil"
+1ea4347def5868c622d7ce57cbe171fa68207e2b,Analysis of WD Face Dictionary for Sparse Coding Based Face Recognition,"School of Computing and Electrical Engineering, Indian Institute of Technology Mandi, India"
+84be18c7683417786c13d59026f30daeed8bd8c9,A photometric sampling method for facial shape recovery,"Robótica y Manufactura Avanzada, Centro de Investigación y de Estudios Avanzados del Instituto Politécnico Nacional, Ramos Arizpe, Mexico"
+84a74ef8680b66e6dccbc69ae80321a52780a68e,Facial Expression Recognition,"Department of Electrical Engineering, The City College of New York, New York, USA"
+4aa27c1f8118dbb39809a0f79a28c0cbc3ede276,3D Visual Speech Animation from Image Sequences,"Université de Lorraine, LORIA, UMR 7503"
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,Face Recognition Using the Feature Fusion Technique Based on LNMF and NNSC Algorithms,"JiangSu Province Support Software Engineering R&D Center for Modern Information Technology Application in Enterprise, Suzhou, China"
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,Face Recognition Using the Feature Fusion Technique Based on LNMF and NNSC Algorithms,"Department of Electronic Information Engineering, Suzhou Vocational University, Suzhou, Jiangsu, China"
+247a8040447b6577aa33648395d95d80441a0cf3,A Fungus Spores Dataset and a Convolutional Neural Network Based Approach for Fungus Detection,"Data and Analytics Department, KPMG AGWPG, Düsseldorf, Germany"
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,Behavioural facial animation using motion graphs and mind maps,Instituto de Telecomunicações & Faculdade de Ciěncias da Universidade do Porto
+245d98726674297208e76308c3a11ce3fc43bee2,In-plane face orientation estimation in still images,"IRCICA, Parc Scientifique de la Haute Borne, Lille 1 University, Villeneuve d’Ascq, France"
+245d98726674297208e76308c3a11ce3fc43bee2,In-plane face orientation estimation in still images,"Faculty of Engineering, Computer Engineering Department, Akdeniz University, Dumlupinar Bulvari, Turkey"
+23edcd0d2011d9c0d421193af061f2eb3e155da3,Facial age estimation by using stacked feature composition and selection,"Network Center, Huizhou University, Huizhou, China"
+235bebe7d0db37e6727dfa1246663be34027d96b,General Type-2 fuzzy edge detectors applied to face recognition systems,"School of Engineering, University of Baja California, Tijuana, M&#x00E9;xico"
+239e305c24155add73f2a0ba5ccbd66b37f77e14,Fast computation of low-rank matrix approximations,"University of California at Santa Cruz, Santa Cruz, California"
+4f03ba35440436cfa06a2ed2a571fea01cb36598,The extended collaborative representation-based classification,"School of Big Data and Computer, Science, Guizhou Normal University, Guiyang, Guizhou, 550025, P. R. China"
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,Computer Vision -- ACCV 2014,"Technische Universität München, Garching, Germany"
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,Computer Vision -- ACCV 2014,"University of California at Merced, Merced, USA"
+158aa18c724107587bcc4137252d0ba10debf417,A randomized approach to large-scale subspace clustering,"Dept. of ECE &amp; Digital Technology Center, Univ. of Minnesota, USA"
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,Weak Classifier for Density Estimation in Eye Localization and Tracking,"Department of Electronics and Computing and the Electronics and Information Technology Research & Development Center, Universidade Federal do Amazonas, Manaus-AM, CEP, Brazil"
+152683f3ac99f829b476ea1b1b976dec6e17b911,Evaluation of head pose estimation methods for a non-cooperative biometric system,"Department of Microelectornics and Computer Science, Lodz University of Technology, ul. Wolczanska 221/223, 90-924, Poland"
+127c7f87f289b1d32e729738475b337a6b042cf7,Real-Time Drowsiness Detection Algorithm for Driver State Monitoring Systems,"Electronics and Telecommunications Research Institute, Korea"
+1252727e8096f48096ef89483d30c3a74500dd15,Action recognition using edge trajectories and motion acceleration descriptor,"School of Electronic and Information Engineering, Xi’an Jiaotong University, Xi’an, China"
+1252727e8096f48096ef89483d30c3a74500dd15,Action recognition using edge trajectories and motion acceleration descriptor,"School of Electrical Engineering and Automation, Qilu University of Technology, Jinan, China"
+8cffe360a05085d4bcba111a3a3cd113d96c0369,Learning universal multi-view age estimator using video context,"Department of Electrical and Computer Engineering, Singapore"
+8cedb92694845854f3ad0daf6c9adb6b81c293de,Products appreciation by facial expressions analysis,"Delft University of Technology and Sensor Technology, Netherlands Defense Academy"
+8c048be9dd2b601808b893b5d3d51f00907bdee0,Spontaneous versus posed smile recognition via region-specific texture descriptor and geometric facial dynamics,"Engineering Lab on Intelligent Perception for Internet of Things, Peking University Shenzhen Graduate School, Shenzhen, China"
+8c048be9dd2b601808b893b5d3d51f00907bdee0,Spontaneous versus posed smile recognition via region-specific texture descriptor and geometric facial dynamics,"Department of Computer Science, Christian-Albrechts University, Kiel, Germany"
+85785ae222c6a9e01830d73a120cdac75d0b838a,Multimedia Database Retrieval,"Department of Electrical and Computer Engineering, Naresuan University, Muang, Thailand"
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,Locality Preserving Collaborative Representation for Face Recognition,"School of Information Engineering and Automation, Kunming University of Science and Technology, Kunming, China"
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,Locality Preserving Collaborative Representation for Face Recognition,"Department of Computer Science and Application, Zhengzhou Institute of Aeronautical Industry Management, Zhengzhou, China"
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,Robust Shape-Feature-Vector-Based Face Recognition System,"School of Electrical and Computer Engineering, Royal Melbourne Institute of Technology University , Melbourne, Australia"
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,Robust Shape-Feature-Vector-Based Face Recognition System,"School of Math and Geospatial Sciences, Royal Melbourne Institute of Technology University , Melbourne, Australia"
+71f07c95a2b039cc21854c602f29e5be053f2aba,A comparison of face and facial feature detectors based on the Viola–Jones general object detection framework,"Universidad de Las Palmas de Gran Canaria, SIANI, Edificio Central del Parque Científico-Tecnológico, Las Palmas, Spain"
+71f07c95a2b039cc21854c602f29e5be053f2aba,A comparison of face and facial feature detectors based on the Viola–Jones general object detection framework,"E.T.S. Ingenieros Industriales, Universidad de Castilla-La Mancha Campus Universitario, Ciudad Real, Spain"
+71d68af11df855f886b511e4fc1635c1e9e789b0,A Self-Configurable Systolic Architecture for Face Recognition System Based on Principal Component Neural Network,"Department of Embedded Systems, Institute for Infocomm Research, Singapore"
+71c4b8e1bb25ee80f4317411ea8180dae6499524,Extended Features using Machine Learning Techniques for Photo Liking Prediction,"Dept. of Audio Visual Technology, Technische Universitt, Ilmenau, Germany"
+761304bbd259a9e419a2518193e1ff1face9fd2d,Robust and Computationally Efficient Face Detection Using Gaussian Derivative Features of Higher Orders,"INRIA Grenoble-Rhône-Alpes Research Center, France"
+49358915ae259271238c7690694e6a887b16f7ed,Synthesis of expressive facial animations: A multimodal caricatural mirror,"TELE Lab, Université catholique de Louvain, Belgium"
+49358915ae259271238c7690694e6a887b16f7ed,Synthesis of expressive facial animations: A multimodal caricatural mirror,"TALP Research Center, Universitat Politècnica de Catalunya, Spain"
+49358915ae259271238c7690694e6a887b16f7ed,Synthesis of expressive facial animations: A multimodal caricatural mirror,"Speech Technology Group, Technical University of Madrid, Spain"
+49358915ae259271238c7690694e6a887b16f7ed,Synthesis of expressive facial animations: A multimodal caricatural mirror,"TCTS Lab, Faculté Polytechnique de Mons, Belgium"
+493bc7071e35e7428336a515d1d26020a5fb9015,Automated human behavioral analysis framework using facial feature extraction and machine learning,"Department of Electrical and Computer Engineering, Saginaw Valley State University, University Ctr, MI- 48710"
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,Saliency-based framework for facial expression recognition,"Université de Lyon, CNRS, LIRIS, UMR5205, Université Lyon 1, Lyon, France"
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,Saliency-based framework for facial expression recognition,"Laboratoire Hubert Curien, UMR5516, Université Jean Monnet, Saint-Etienne, France"
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,Saliency-based framework for facial expression recognition,"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+4932b929a2e09ddebedcb1abe8c62f269e7d4e33,Color based vehicle classification in surveillance videos,"Algılayıcılar, Görüntü ve Sinyal İşleme Grubu, HAVELSAN A.Ş. Ankara, Türkiye"
+492116d16a39eb54454c7ffb1754cea27ad3a171,Making Facial Expressions of Emotions Accessible for Visually Impaired Persons,"Radboud University, Nijmegen, Netherlands"
+492116d16a39eb54454c7ffb1754cea27ad3a171,Making Facial Expressions of Emotions Accessible for Visually Impaired Persons,"HAN University of Applied Sciences, Arnhem, Netherlands"
+49fe4f387ac7e5852a78b327ec42cc7300c5f8e0,3D model retrieval based on linear prediction coding in cylindrical and spherical projections using SVM-OSS,"Computer Vision Research lab, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran"
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,User Profiling through Deep Multimodal Fusion,"University of California, Santa Cruz & Ghent University, Santa Cruz, CA, USA"
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,Expression-invariant facial identification,"School of Computing and Communications, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia"
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,Expression-invariant facial identification,"School of Software, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia"
+4033ac52dba394e390a86cd149b9838f1d7834b5,A modularly vectorized two dimensional LDA for face recognition,"Dept. of Computer Science, YiLi Normal College, Yining, China 835000"
+4033ac52dba394e390a86cd149b9838f1d7834b5,A modularly vectorized two dimensional LDA for face recognition,"Dept. of Computing, Curtin University of Technology, WA 6102, USA"
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,ADL: Active dictionary learning for sparse representation,"Vulcan Inc, Seattle, WA 98104"
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,ADL: Active dictionary learning for sparse representation,"Department of Electrical, Computer and Biomedical Engineering, University of Rhode Island, Kingston, RI 02881"
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4,Robustness of DR-LDP over PCANet for face analysis,"School of Information Technology and Engineering, VIT University, Vellore, India"
+2e36b63fdf1353425a57a0665b0d0274efe92963,Discriminating real and posed smiles: human and avatar smiles,"The Australian National University RSCS, ANU, Canberra, Australia"
+2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,Genealogical face recognition based on UB KinFace database,"CSE, SUNY at Buffalo, USA and Southeast University, China"
+2e27667421a7eeab278e0b761db4d2c725683c3f,Effective human age estimation using a two-stage approach based on Lie Algebrized Gaussians feature,"Eedoo Inc, Beijing, China"
+2e6776cd582c015b46faf616f29c98ce9cff51a2,Facial expression recognition using kernel canonical correlation analysis (KCCA),"Res. Center for Learning Sci., Southeast Univ., Jiangsu, China"
+2b300985a507533db3ec9bd38ade16a32345968e,Laplacian multiset canonical correlations for multiview feature extraction and image recognition,"School of Information Engineering, Yangzhou University, Yangzhou, China"
+2b0d14dbd079b3d78631117b1304d6c1579e1940,Fractional-Order Embedding Supervised Canonical Correlations Analysis with Applications to Feature Extraction and Recognition,"School of Computer Science and Engineering, Nanjing University of Science & Technology, Nanjing, People’s Republic of China"
+2b0d14dbd079b3d78631117b1304d6c1579e1940,Fractional-Order Embedding Supervised Canonical Correlations Analysis with Applications to Feature Extraction and Recognition,"Department of Computer Science and Technology, Yangzhou University, Yangzhou, People’s Republic of China"
+2be9284d531b8c573a4c39503ca50606446041a3,Recovering facial shape using a statistical surface normal model,"Dept. of Comput. Sci., York Univ., UK"
+4786638ffb3b2fb385cec80720cc6e7c3588b773,Effective semantic features for facial expressions recognition using SVM,"Department of Computer Science and Engineering, Tatung University, Taipei 104, Republic of China"
+4786638ffb3b2fb385cec80720cc6e7c3588b773,Effective semantic features for facial expressions recognition using SVM,"Department of Industrial Design, Tatung University, Taipei 104, Republic of China"
+780c8a795baca1ba4cb4956cded877dd3d1ca313,Simulation of face recognition at a distance by scaling down images,"Dept. of Advanced Technologies, Alcorn State University, MS, USA"
+8be60114634caa0eff8566f3252cb9a1b7d5ef10,Multiple target tracking with structural inference,Shanghai Jiao Tong University School of Electronic Information and Electrical Engineering
+8b4124bb68e5b3e6b8b77888beae7350dc594a40,Expression-invariant face recognition system using subspace model analysis,"Dept. of Comput. Syst., Univ. of Technol., Sydney, NSW, Australia"
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1,Visual Object Detection Using Cascades of Binary and One-Class Classifiers,"Laboratoire Jean Kuntzmann, Grenoble, France"
+13d430257d595231bda216ef859950caa736ad1d,Finding a Proper Approach to Obtain Cognitive Parameters from Human Faces Under Illumination Variations,"Universidad Tecnica Federico Santa Maria, Department of Electronic Engineering, Valparaiso, Chile"
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,Recurrent learning of context for salient region detection,"College of Artificial Intelligenge and Big Data, ChongQing University of Electronic Engineering, Chongqing, China"
+7f9be0e08784835de0f8bc3a82fcca02b3721dc1,Facial expression recognition under random block occlusion based on maximum likelihood estimation sparse representation,"School of Electrical and Electronic Engineering, Changchun University of Technology, Changchun, CO 130012 China"
+7f4040b482d16354d5938c1d1b926b544652bf5b,Competitive affective gaming: winning with a smile,"Universidade Nova de Lisboa, Caparica, Portugal"
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,Real-time recognition of cattle using animal biometrics,"Department of Computer Science and Engineering, Indian Institute of Technology (Banaras Hindu University), Varanasi, India"
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,Real-time recognition of cattle using animal biometrics,"Department of Computer Science and Engineering, Jaypee University of Information Technology, Solan, India"
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,Real-time recognition of cattle using animal biometrics,"Department of Computer Science and Engineering, Shri Shankaracharya Technical Campus, Bhilai, District-Durg, India"
+7f1078a2ebfa23a58adb050084d9034bd48a8a99,"Fisher discrimination-based $$l_{2,1} $$ l 2 , 1 -norm sparse representation for face recognition","Collaborative Innovation Center of Electric Vehicles in Beijing, Beijing, China"
+7acbf0b060e948589b38d5501ca217463cfd5c2f,Learning Multiple Relative Attributes With Humans in the Loop,"Department of Computer Science, University of California at Davis, Davis, USA"
+7a666a91a47da0d371a9ba288912673bcd5881e4,Enhanced supervised locally linear embedding,"School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, PR China"
+7a94936ce558627afde4d5b439ec15c59dbcdaa4,"A Closed-Form, Consistent and Robust Solution to Uncalibrated Photometric Stereo Via Local Diffuse Reflectance Maxima","University of Bern, Bern, Switzerland"
+14efb131bed66f1874dd96170f714def8db45d90,Capturing AU-Aware Facial Features and Their Latent Relations for Emotion Recognition in the Wild,"Intel Labs China, Beijing, China"
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,Robust watch-list screening using dynamic ensembles of SVMs based on multiple face representations,"Laboratoire d’imagerie de vision et d’intelligence artificielle, École de technologie supérieure, Université du Québec, Montreal, Canada"
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,Robust watch-list screening using dynamic ensembles of SVMs based on multiple face representations,"Laboratoire d’interprétation et de traitement d’images et vidéo, Polytechnique Montréal, Montreal, Canada"
+8ef465ff12ee1d2be2a99d1c628117a4ce890a6b,An embedded system for real-time facial expression recognition based on the extension theory,"Department of Electrical Engineering, National Chin-Yi University of Technology, Taichung, Taiwan"
+8e8a6623b4abd2452779c43f3c2085488dfcb323,Multi-clue fusion for emotion recognition in the wild,"Nanjing University of Posts and Telecommunications, China"
+8e21399bb102e993edd82b003c306a068a2474da,A complete discriminative subspace for robust face recognition,"China Electronics Standardization Institute, Beijing, 100007"
+22648dcd3100432fe0cc71e09de5ee855c61f12b,Automatic generation of composite image descriptions,"Department of Software and Information Systems Engineering, Ben-Gurion University of the Negev, Beersheba, Israel"
+22c06284a908d8ad0994ad52119773a034eed7ee,Adaptive Visual Feedback Generation for Facial Expression Improvement with Multi-task Deep Neural Networks,"NTT Corporation, Atsugi, Japan"
+22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505,An Effective Machine Learning Approach for Refining the Labels of Web Facial Images,"National Ilan University, Ilan, Taiwan Roc"
+259ddd3c618feec51576baac7eaaf80ea924b791,Private emotions versus social interaction: a data-driven approach towards analysing emotion in speech,"Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Erlangen, Germany"
diff --git a/scraper/reports/doi_institutions_unknown.html b/scraper/reports/doi_institutions_unknown.html
new file mode 100644
index 00000000..633f482b
--- /dev/null
+++ b/scraper/reports/doi_institutions_unknown.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Unknown Institutions from DOI</title><link rel='stylesheet' href='reports.css'></head><body><h2>Unknown Institutions from DOI</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>NC A&T State University, Greensboro, NC, USA</td><td>6</td></tr><tr><td>School of Information and Software Engineering, University of Electronic Science and Technology of China (UESTC), Chengdu, 610054, China P.R.C</td><td>6</td></tr><tr><td>Electronics and Telecommunications Research Institute, Korea</td><td>5</td></tr><tr><td>Asian Institute of Technology (AIT), Pathum Thani 12120, Thailand</td><td>5</td></tr><tr><td>Chonnam National University, Gwangju, Korea</td><td>5</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Durgapur, India</td><td>5</td></tr><tr><td>Chongqing Institute of Green and Intelligent Technology, CAS, Chongqing, 400714</td><td>5</td></tr><tr><td>SIAT at Chinese Academy of Sciences, China</td><td>5</td></tr><tr><td>Department of Electronics and Telecommunications, Politecnico di Torino, Torino, Italy</td><td>5</td></tr><tr><td>University of Southern California Institute for Creative Technologies, Los Angeles, CA</td><td>5</td></tr><tr><td>Department of Electronics and Telecommunication Engineering, Don Bosco Institute of Technology, Kurla (W), Mumbai, India</td><td>5</td></tr><tr><td>R V College of Engineering, Department of Computer Science and Engineering, Bangalore, India</td><td>5</td></tr><tr><td>Inst. Nat. des Telecommun., Evry, France</td><td>5</td></tr><tr><td>Shanghai Jiao Tong University School of Electronic Information and Electrical Engineering</td><td>4</td></tr><tr><td>Department of Microelectornics and Computer Science, Lodz University of Technology, ul. Wolczanska 221/223, 90-924, Poland</td><td>4</td></tr><tr><td>North China Electric Power University Department of Electronic and Communication Engineering Baoding, Hebei, China</td><td>4</td></tr><tr><td>School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology, Auckland, New Zealand</td><td>4</td></tr><tr><td>KU Leuven, Leuven, Belgium</td><td>4</td></tr><tr><td>Academia Sinica, Taipei, Taiwan</td><td>4</td></tr><tr><td>LIARA Laboratory, University of Quebec at Chicoutimi (UQAC), Boulevard de l'Université, Chicoutimi (Quebec), Canada</td><td>4</td></tr><tr><td>Dept. of Computing, Curtin University GPO Box U1987, Perth, WA 6845</td><td>4</td></tr><tr><td>NTT Software Innovation Center, Tokyo, Japan</td><td>4</td></tr><tr><td>EECS Department, University of Kansas, Lawrence, KS</td><td>4</td></tr><tr><td>Department of Mathematics and Computer Science University of Basel</td><td>4</td></tr><tr><td>Goa University, India</td><td>4</td></tr><tr><td>Beijing Key Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing 100081, China</td><td>4</td></tr><tr><td>VUB-NPU Joint AVSP Research Lab, Vrije Universiteit Brussel (VUB), Deptartment of Electronics & Informatics (ETRO), Pleinlaan 2, 1050 Brussel, Belgium</td><td>4</td></tr><tr><td>Graduate School of Information Science, Nara Institute of Science and Technology, Takayama-cho 8916-5, Ikoma-shi, Nara, Japan</td><td>4</td></tr><tr><td>Smart Surveillance Interest Group, Department of Computer Science, Universidade Federal de Minas Gerais, Minas Gerais, Brazil</td><td>4</td></tr><tr><td>Shanghai University School of Communication and Information Engineering Shanghai, China</td><td>4</td></tr><tr><td>Microsoft, Redmond, WA, USA</td><td>4</td></tr><tr><td>Computer Science and Engineering, Pennsylvania State University, PA, USA SiliconScapes, LLC, PA, USA</td><td>4</td></tr><tr><td>Dept. of Computer Engineering, Keimyung University, Daegu, Korea</td><td>4</td></tr><tr><td>National ICT Australia and UNSW, Sydney, Australia</td><td>4</td></tr><tr><td>Department of Electrical and Computer Engineering, Beckman Institute Advanced Science and Technology, University of Illinois at Urbana–Champaign, Urbana, IL, USA</td><td>4</td></tr><tr><td>Universiti Kuala Lumpur, Kuala Lumpur</td><td>4</td></tr><tr><td>Beijing Normal Univeristy, Beijing, China</td><td>4</td></tr><tr><td>University of Tunis, The National Higher school of engineers of Tunis (ENSIT), Laboratory of Signal Image and Energy Mastery, LR13ES03 (SIME), Tunis, Tunisia</td><td>4</td></tr><tr><td>Media Technology Lab, Huawei Technologies Co., Ltd</td><td>4</td></tr><tr><td>Department of Computer Graphics and Multimedia, University of Brno, Brno, Czech Republic</td><td>4</td></tr><tr><td>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA</td><td>4</td></tr><tr><td>Faculty of Engineering, Ain Shams University, Computer and Systems Engineering Department, Cairo, Egypt</td><td>4</td></tr><tr><td>School of Automation and Information Engineering, Xi'an University of Technology, Xi'an, China</td><td>4</td></tr><tr><td>Computer Science and Engineering Dept., University of Nevada Reno, USA</td><td>4</td></tr><tr><td>Department of Information and Control, B-DAT Laboratory, Nanjing University of Information and Technology, Nanjing, China</td><td>4</td></tr><tr><td>Inha University, South Korea</td><td>4</td></tr><tr><td>Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China</td><td>4</td></tr><tr><td>Institute for Human-Machine Communication, Technische Universit&#x00E4;t M&#x00FC;nchen, Germany</td><td>4</td></tr><tr><td>Faculty of Engineering Science, Department of Systems Innovation, Arai Laboratory at Osaka University, Japan</td><td>4</td></tr><tr><td>NTT Corporation, Atsugi, Japan</td><td>3</td></tr><tr><td>Department of Software and Information Systems Engineering, Ben-Gurion University of the Negev, Beersheba, Israel</td><td>3</td></tr><tr><td>Intel Labs China, Beijing, China</td><td>3</td></tr><tr><td>School of Electrical and Electronic Engineering, Changchun University of Technology, Changchun, CO 130012 China</td><td>3</td></tr><tr><td>Radboud University, Nijmegen, Netherlands</td><td>3</td></tr><tr><td>Algılayıcılar, Görüntü ve Sinyal İşleme Grubu, HAVELSAN A.Ş. Ankara, Türkiye</td><td>3</td></tr><tr><td>Dept. of Audio Visual Technology, Technische Universitt, Ilmenau, Germany</td><td>3</td></tr><tr><td>School of Electrical and Computer Engineering, Royal Melbourne Institute of Technology University , Melbourne, Australia</td><td>3</td></tr><tr><td>School of Engineering, University of Baja California, Tijuana, M&#x00E9;xico</td><td>3</td></tr><tr><td>School of Computer Science, Center for Optical Imagery Analysis and Learning (OPTIMAL)</td><td>3</td></tr><tr><td>Department of Systems and Computing, Federal University of Campina Grande, Av. Apríigio Veloso, 882, 58429-900 Campina Grande, PB, Brazil</td><td>3</td></tr><tr><td>R&D Centre Algoritmi, School of Engineering, University of Minho, Portugal</td><td>3</td></tr><tr><td>Department of Control and Computer Engineering, Politecnico di Torino, Italy</td><td>3</td></tr><tr><td>Center for Research in Intelligent Systems, University of California, Riverside Riverside, CA 92521-0425, USA</td><td>3</td></tr><tr><td>Shanghai Advanced Research Institute, CAS, Shanghai, China</td><td>3</td></tr><tr><td>Department of electronic engineering, Key Lab of Intelligent Perception and Image Understanding of Ministry of Education, Xi'an, China</td><td>3</td></tr><tr><td>Computer Vision Laboratory, ETH Zurich, Sternwartstrasse 7, 8092, Switzerland</td><td>3</td></tr><tr><td>BITS Pilani, Pilani , India</td><td>3</td></tr><tr><td>Department of Computer Science and Technology, Indian Institute of Engineering Science and Technology, Shibpur, Howrah-711 103, India</td><td>3</td></tr><tr><td>Dept. of CS&E, IIT Madras, India</td><td>3</td></tr><tr><td>Graduate School of System Design Tokyo Metropolitan University Tokyo, Japan</td><td>3</td></tr><tr><td>Face Aging Group, Computer Science Department, UNCW, USA</td><td>3</td></tr><tr><td>City University of New York, New York, NY, USA</td><td>3</td></tr><tr><td>Department of Computer Science and Engineering, Visual Learning and Intelligence Group, IIT Hyderabad, Hyderabad, India</td><td>3</td></tr><tr><td>Ghent University, Ghent, Belgium</td><td>3</td></tr><tr><td>Columbia Univeristy, New York, NY, USA</td><td>3</td></tr><tr><td>Microsoft Research Cambridge</td><td>3</td></tr><tr><td>Center for Automation Research, UMIACS University of Maryland, College Park, MD 20742</td><td>3</td></tr><tr><td>School of Electronics, Electrical Engineering and Computer Science, Queen&#x2019;s University Belfast, Belfast, U.K.</td><td>3</td></tr><tr><td>School of Information and Communication Engineering, Beijing University of Posts and Telcommunications, Beijing, China</td><td>3</td></tr><tr><td>Gwangju Institute of Science and Technology, 123, Cheomdangwagi-ro, Buk-gu, Gwangju, South Korea</td><td>3</td></tr><tr><td>Institute of Mathematical and Computer Sciences, University of Sao Paulo, Sao Paulo, Brazil</td><td>3</td></tr><tr><td>Dept. of Informatics, Aristotle Univ. of Thessaloniki, Greece</td><td>3</td></tr><tr><td>NTT Media Intelligence Laboratories, Tokyo, Japan</td><td>3</td></tr><tr><td>Beijing Institute of Graphic Communication, Beijing</td><td>3</td></tr><tr><td>Nara Institute of Science and Technology, Japan</td><td>3</td></tr><tr><td>Department of Computer, the University of Suwon, Korea</td><td>3</td></tr><tr><td>Department of Electrical and Computer Engineering, Florida Institute of Technology, Melbourne, USA</td><td>3</td></tr><tr><td>Dept. of Computer Science and Engineering, St. Joseph's College of Engineering and Technology, Palai, Kerala, India</td><td>3</td></tr><tr><td>Dept. of Computer Science and Electrical Engineering, University of Missouri-Kansas City, MO, USA</td><td>3</td></tr><tr><td>Inha University, Incheon, South Korea</td><td>3</td></tr><tr><td>Center for Research on Intelligent Perception and Computing</td><td>3</td></tr><tr><td>Thiagarajar College of Engineering, Madurai, Tamilnadu, India</td><td>3</td></tr><tr><td>Dept. of Computer Science and Information Engineering, National Dong Hwa University, Hualien, Taiwan</td><td>3</td></tr><tr><td>School of Electrical and Electronic Engineering, Singapore</td><td>3</td></tr><tr><td>Universidade Nova Lisboa, Lisboa, Portugal</td><td>3</td></tr><tr><td>State Key Laboratory of Management and Control for Complex Systems, Institute of Automation Chinese Academy of Sciences, Beijing, China 100190</td><td>3</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology Uttarakhand, Srinagar Garhwal, India</td><td>3</td></tr><tr><td>Dept. of Mediamatics, Delft Univ. of Technol., Netherlands</td><td>3</td></tr><tr><td>Department of Information Science and Engineering, Changzhou University, Changzhou, China</td><td>3</td></tr><tr><td>Institute for Infocomm Research, A*STAR, Singapore, Singapore</td><td>3</td></tr><tr><td>Ashikaga Institute of Technology, Ashikaga, Japan</td><td>3</td></tr><tr><td>Institute of Applied Computer Science, Kiel University of Applied Sciences, Kiel, Germany</td><td>3</td></tr><tr><td>Central China Normal University, Wuhan, China</td><td>3</td></tr><tr><td>Chongqing University of Posts and Telecommunications Chongqing, China</td><td>3</td></tr><tr><td>School of Computer Science and Software Engineering University of Wollongong, Australia</td><td>3</td></tr><tr><td>Phonexia, Brno-Krlovo Pole, Czech Republic</td><td>3</td></tr><tr><td>Department of Applied Mechanics, Chalmers University of Technology, SE-412 96 Göteborg, Sweden</td><td>3</td></tr><tr><td>Technische Universität München, Munich, Germany</td><td>3</td></tr><tr><td>University of California, Los Angeles, CA Dept. of Electrical Engineering</td><td>3</td></tr><tr><td>Academia Sinica, Taipei, Taiwan Roc</td><td>3</td></tr><tr><td>Dept. of Cybernetics and Artificial Intelligence, FEI TU of Košice, Slovak Republic</td><td>3</td></tr><tr><td>Image and Video Systems Lab, School of Electrical Engineering, KAIST, Republic of Korea</td><td>3</td></tr><tr><td>Information and media processing laboratories, NEC Corporation</td><td>3</td></tr><tr><td>Southern Illinois University at Carbondale, IL, USA</td><td>3</td></tr><tr><td>School of Automation and Electrical Engineering, University of Science and Technology Beijing, 100083, China</td><td>3</td></tr><tr><td>School of Software, Jiangxi Normal University, Nanchang, China</td><td>3</td></tr><tr><td>Department of Computer Science, Pontificia Universidad Cato&#x00B4;lica de Chile</td><td>3</td></tr><tr><td>Fujitsu Laboratories, Kawasaki, Kanagawa, Japan</td><td>3</td></tr><tr><td>Department of Electronic and Computer Engineering National Taiwan University of Science and Technology</td><td>3</td></tr><tr><td>New York University Abu Dhabi & NYU Tandon School of Engineering, Abu Dhabi, Uae</td><td>3</td></tr><tr><td>Intelligent Vision Research Lab, Department of Computer Science, Federal University of Bahia</td><td>3</td></tr><tr><td>Department of Electronic Measuring systems, Moscow Engineering Physics Institute, National Research Nuclear University MEPhI, Moscow, Russia</td><td>3</td></tr><tr><td>National Ilan University, Ilan, Taiwan Roc</td><td>2</td></tr><tr><td>China Electronics Standardization Institute, Beijing, 100007</td><td>2</td></tr><tr><td>Universidade Nova de Lisboa, Caparica, Portugal</td><td>2</td></tr><tr><td>Universidad Tecnica Federico Santa Maria, Department of Electronic Engineering, Valparaiso, Chile</td><td>2</td></tr><tr><td>Dept. of Comput. Syst., Univ. of Technol., Sydney, NSW, Australia</td><td>2</td></tr><tr><td>Dept. of Comput. Sci., York Univ., UK</td><td>2</td></tr><tr><td>The Australian National University RSCS, ANU, Canberra, Australia</td><td>2</td></tr><tr><td>Dept. of Computer Science, YiLi Normal College, Yining, China 835000</td><td>2</td></tr><tr><td>School of Computing and Communications, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia</td><td>2</td></tr><tr><td>Department of Electrical and Computer Engineering, Singapore</td><td>2</td></tr><tr><td>Dept. of ECE &amp; Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td>Instituto de Telecomunicações & Faculdade de Ciěncias da Universidade do Porto</td><td>2</td></tr><tr><td>Department of Information Management, National Formosa University, Huwei, Yulin 632, Taiwan</td><td>2</td></tr><tr><td>Dept of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Kavala, Greece</td><td>2</td></tr><tr><td>University of Ulm, Ulm, Germany</td><td>2</td></tr><tr><td>Dept. of Eng. Sci., Oxford Univ., UK</td><td>2</td></tr><tr><td>Human-Machines Interaction (HMI) Laboratory, Department of Industrial Informatics, TEI of Kavala, Kavala, Greece</td><td>2</td></tr><tr><td>Dept. of ECE, Maryland Univ., College Park, MD, USA</td><td>2</td></tr><tr><td>Department of Computer Engineering, TOBB University of Economics and Technology, Ankara, Turkey</td><td>2</td></tr><tr><td>Electrical &amp; Electronic Engineering Department, Mevlana University Konya, Turkey</td><td>2</td></tr><tr><td>GIPSA Laboratory, Image and Signal Department, Grenoble Institute of Technology, Grenoble, France</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Rourkela, Odisha, India</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, National Taiwan Ocean University, No.2, Beining Rd., Keelung 202, Taiwan</td><td>2</td></tr><tr><td>Bilgisayar M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Deniz Harp Okulu, &#x0130;stanbul, T&#x00FC;rkiye</td><td>2</td></tr><tr><td>Statistical Machine Intelligence &amp; LEarning, School of Computer Science &amp; Engineering University of Electronic Science and Technology of China, 611731, China</td><td>2</td></tr><tr><td>Masaryk University, Brno, Czech Rep</td><td>2</td></tr><tr><td>Charles University, Prague, Czech Rep</td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., McMaster Univ., Hamilton, Ont., Canada</td><td>2</td></tr><tr><td>Department of Computer and Informatics Engineering, Eastern Macedonia and Thrace Institute of Technology, Human Machines Interaction (HMI) Laboratory, 65404 Kavala, Greece</td><td>2</td></tr><tr><td>University of British Columbia Department of Electrical and Computer Engineering</td><td>2</td></tr><tr><td>School of Computer Science and Technology, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, University of Califonia, San Diego</td><td>2</td></tr><tr><td>University of Missouri Department of Electrical and Computer Engineering Columbia, MO, USA</td><td>2</td></tr><tr><td>Inf. Syst. Dept., Buckingham Univ., UK</td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Old Dominion Univ., Norfolk, VA, USA</td><td>2</td></tr><tr><td>Department of Computer Science &amp; Engineering, POSTECH, Pohang, Sourth Korea, 37673</td><td>2</td></tr><tr><td>Coursera and Stanford University</td><td>2</td></tr><tr><td>Dept. of Electron. & Inf., Toyota Technol. Inst., Nagoya, Japan</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, POSTECH, Pohang 790-784, Republic of Korea</td><td>2</td></tr><tr><td>Dept. of Comput. Sci., New York State Univ., Binghamton, NY, USA</td><td>2</td></tr><tr><td>Dept. of Electrical Engineering, National Institute of Technology, Rourkela, India 769008</td><td>2</td></tr><tr><td>Department of Computer Science, Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>2</td></tr><tr><td>School of Computer Science and Technology, Tianjin University&Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin, China</td><td>2</td></tr><tr><td>NPU-VUB Joint AVSP Research Lab, School of Computer Science, Northwestern Polytechnical University (NPU) Shaanxi Key Lab on Speech and Image Information Processing, 127 Youyi Xilu, Xi'an 710072, China</td><td>2</td></tr><tr><td>Research&Advanced Technology Division of SAIC Motor Corporation Limited, Shanghai 201804, P.R China</td><td>2</td></tr><tr><td>Center for Research on Intelligent Perception and Computing, National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China</td><td>2</td></tr><tr><td>Dept. of Computer Science and Information Engineering, Southern Taiwan University of Science and Technology, Tainan City, Taiwan</td><td>2</td></tr><tr><td>Dept. of Electronics and Telecommunication Engg., KCT's Late G.N. Sapkal college of Engineering, Nashik, India</td><td>2</td></tr><tr><td>Tencent Inc</td><td>2</td></tr><tr><td>Facebook Inc., Menlo Park, CA, USA</td><td>2</td></tr><tr><td>Computational Intelligence Lab, Institute of Informatics and Telecommunications, NCSR Demokritos, Athens, Greece</td><td>2</td></tr><tr><td>Dept. of Electrical Engineering, National Tsing-Hua University, Taiwan</td><td>2</td></tr><tr><td>Department Informatik, Hamburg University of Applied Sciences, Hamburg, Germany</td><td>2</td></tr><tr><td>Department Informatik, Hamburg University of Applied Sciences, Engineering and Computing, University of the West of Scotland</td><td>2</td></tr><tr><td>Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;, Y&#x0131;ld&#x0131;z Teknik &#x00DC;niversitesi, &#x0130;stanbul, T&#x00FC;rkiye</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering of Systems, University of Zaragoza, Escuela Universitaria Politécnica de Teruel, Teruel, Spain</td><td>2</td></tr><tr><td>Department of Automation, North-China University of Technology, Beijing, China</td><td>2</td></tr><tr><td>Faculty of Electrical Engineering and Computing, University of Zagreb, Zagreb, Croatia</td><td>2</td></tr><tr><td>Xi'an Jiaotong-Liverpool University, Suzhou, Jiangsu, P.R. China</td><td>2</td></tr><tr><td>Indian Statistical Insitute, Kolkata 700108</td><td>2</td></tr><tr><td>Centre for Secure Information Technologies, Queen’s University Belfast, Belfast, UK</td><td>2</td></tr><tr><td>Wrocław University of Science and Technology, Wrocław, Poland</td><td>2</td></tr><tr><td>Department of Electrical Engineering Indian Institute of Technology Delhi New Delhi, India</td><td>2</td></tr><tr><td>Department of Electronics and Communication Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India</td><td>2</td></tr><tr><td>Department of Electrical Engineering Malaviya National Institute of Technology Jaipur, Rajasthan, India</td><td>2</td></tr><tr><td>Key Laboratory of Dependable Service Computing in Cyber Physical Society Ministry of Education, Chongqing, China</td><td>2</td></tr><tr><td>Osaka University Health Care Center, Japan</td><td>2</td></tr><tr><td>Shahid Bahonar University of Kerman Computer Engineering Department, Kerman, Iran</td><td>2</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT) & Università di Torino, Genova, Italy</td><td>2</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT) & Università degli Studi di Genova, Genova, Italy</td><td>2</td></tr><tr><td>Norwegian Biometric Laboratory, Norwegian University of Science and Technology (NTNU), Gjøvik, Norway</td><td>2</td></tr><tr><td>Department of Computer Applications, National Institute of Technology, Tiruchirappalli, India</td><td>2</td></tr><tr><td>B. Tech Graduate, ECE, MSIT, C-4 Janakpuri, New Delhi, India</td><td>2</td></tr><tr><td>San Diego State University, San Diego, CA, USA</td><td>2</td></tr><tr><td>MIT, Cambridge, MA, USA</td><td>2</td></tr><tr><td>Dept of Electronics and Communication, Manipal Institute Of Technology, Karnataka, India</td><td>2</td></tr><tr><td>LMU Munich, Germany</td><td>2</td></tr><tr><td>Polytechnic School of Pernambuco, University of Pernambuco, Recife-PE, Brazil</td><td>2</td></tr><tr><td>Başkent University, Ankara, TURKEY</td><td>2</td></tr><tr><td>Department of Electronic and Communication Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia</td><td>2</td></tr><tr><td>Sunway University, Selangor, Malaysia</td><td>2</td></tr><tr><td>Northwestern Polytechnical University Xian, P. R. China</td><td>2</td></tr><tr><td>Dept. of E & TC Engineering, Maharashtra Institute of Technology, Pune, India</td><td>2</td></tr><tr><td>Dept. of ECE and Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Hong Kong</td><td>2</td></tr><tr><td>School of Electrical and Computer Engineering, Ulsan National Institute of Science and Technology (UNIST), UNIST-gil 50, 689-798, Korea</td><td>2</td></tr><tr><td>Dept. of Comp. Sci. and Inf. Eng, Chung Hua University, Hsinchu, Taiwan</td><td>2</td></tr><tr><td>Shanghai Jiao Tong University & Alibaba Group, Shanghai, China</td><td>2</td></tr><tr><td>School of Computer Science, Kyungpook National University, Buk-gu, Daegu, The Republic of Korea</td><td>2</td></tr><tr><td>Laboratory LAROSERI, Department of Computer Science, Faculty of Sciences, University of Chouaib Doukkali, El Jadida - Morocco</td><td>2</td></tr><tr><td>Microsoft Research India Pvt. Ltd, Bangalore, Karnataka, India</td><td>2</td></tr><tr><td>Department of Electronics, University of Goa, India</td><td>2</td></tr><tr><td>Department of ECE, National Institute of Technology, Rourkela (Odisha), India</td><td>2</td></tr><tr><td>Beijing Key Laboratory of Advanced Information Science and Network Technology, Beijing, China</td><td>2</td></tr><tr><td>B-DAT Laboratory, School of Information and Control, Nanjing University of Information and Technology, Nanjing, China</td><td>2</td></tr><tr><td>Thales Services, ThereSIS, Palaiseau, France</td><td>2</td></tr><tr><td>School of Electrical and Electronic Engineering, Tianjin University of Technology, China</td><td>2</td></tr><tr><td>Department of Computer Science and Engineering, Mepco Schlenk Engineering College, Sivakasi, India</td><td>2</td></tr><tr><td>IIIT Bangalore, India</td><td>2</td></tr><tr><td>Institut de Rob&#x00F2;tica i Inform&#x00E0;tica Industrial (CSIC-UPC)</td><td>2</td></tr><tr><td>Department of Computer Science, IT: Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>2</td></tr><tr><td>Xinjiang University, Urumqi, China</td><td>2</td></tr><tr><td>School of Computing Science and Engineering, VIT University, Vellore, India</td><td>2</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Carnegie Mellon Univ., Pittsburgh, PA, USA</td><td>2</td></tr><tr><td>GSI Universidad Polit-écnica de Madrid, Madrid, Spain</td><td>2</td></tr><tr><td>Department of Electronic Engineering, Kwangwoon University, Seoul, Republic of Korea</td><td>2</td></tr><tr><td>Dept. of Appl. Phys. & Electron., Umea Univ., Sweden</td><td>2</td></tr><tr><td>Universidade Federal do Paraná, Curitiba, Brazil</td><td>2</td></tr><tr><td>Università degli Studi di Verona, Verona, Italy</td><td>2</td></tr><tr><td>CEA, Gif-Sur-Yvette, France</td><td>2</td></tr><tr><td>UMR CNRS - Univ. Bourgogne, Dijon, France</td><td>2</td></tr><tr><td>Mechatronic Engineering Department, Mevlana University, Konya, Turkey</td><td>2</td></tr><tr><td>TÜBİITAK-BİILGEM-UEKAE, Anibal Cad., P.K.74, 41470, Gebze-KOCAELİ, Turkey</td><td>2</td></tr><tr><td>The 28th Research Institute of China Electronics Technology Group Corporation, China</td><td>2</td></tr><tr><td>Raytheon BBN Technologies, 10 Moulton St, Cambridge, MA</td><td>2</td></tr><tr><td>School of Electrical Engineering and Computer Science at the University of Newcastle, Callaghan, NSW 2308, Australia</td><td>2</td></tr><tr><td>Electronics and Communication Sciences Unit, Indian Statistical Institute, Kolkata, India</td><td>2</td></tr><tr><td>Pontifícia Universidade Católica do RS, Porto Alegre-RS, Brazil</td><td>2</td></tr><tr><td>Waseda University The Graduate School of Information, Production and Systems 2-7, Hibikino, Wakamatsu-ku, Kitakyushu-shi, Fukuoka, Japan</td><td>2</td></tr><tr><td>Majority Report, France</td><td>2</td></tr><tr><td>SITI Laboratory, National Engineering School of Tunis (ENIT), University of Tunis El Manar, Tunis, Tunisia</td><td>2</td></tr><tr><td>University of Montreal, Department of Computer Science and Operations Research (DIRO), 2920 Chemin de la tour, QC, Canada, H3C 3J7</td><td>2</td></tr><tr><td>Università di Salerno, Fisciano (SA), Italy</td><td>2</td></tr><tr><td>Advanced Technologies Application Center 7a #21406 b/ 214 and 216, P.C. 12200, Siboney Playa, Havana, Cuba</td><td>2</td></tr><tr><td>School of Electronic Engineering, Xi'an University of Posts and Telecommunications, Xi'an, China</td><td>2</td></tr><tr><td>Department of Sciences and Information Technology, University of Sassari, Viale Mancini 5, 07100 Sassari, Italy</td><td>2</td></tr><tr><td>Department of Electrical Engineering and Computer Science, University of Siegen, Siegen, Germany</td><td>2</td></tr><tr><td>Department of Computer Science and Information Engineering, National Formosa University, Yunlin 632, Taiwan</td><td>2</td></tr><tr><td>Broadcasting &amp; Telecommunications, Convergence Media Research Department, Electronics and Telecommunications Research Institute, Daejeon, Korea</td><td>2</td></tr><tr><td>Dept. of ECE & Digital Technology Center, Univ. of Minnesota, USA</td><td>2</td></tr><tr><td>FMV I&#x015E;IK &#x00DC;niversitesi, &#x015E;ile, Istanbul</td><td>2</td></tr><tr><td>Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Erlangen, Germany</td><td>1</td></tr><tr><td>Nanjing University of Posts and Telecommunications, China</td><td>1</td></tr><tr><td>Department of Electrical Engineering, National Chin-Yi University of Technology, Taichung, Taiwan</td><td>1</td></tr><tr><td>Laboratoire d’interprétation et de traitement d’images et vidéo, Polytechnique Montréal, Montreal, Canada</td><td>1</td></tr><tr><td>Laboratoire d’imagerie de vision et d’intelligence artificielle, École de technologie supérieure, Université du Québec, Montreal, Canada</td><td>1</td></tr><tr><td>University of Bern, Bern, Switzerland</td><td>1</td></tr><tr><td>School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, PR China</td><td>1</td></tr><tr><td>Department of Computer Science, University of California at Davis, Davis, USA</td><td>1</td></tr><tr><td>Collaborative Innovation Center of Electric Vehicles in Beijing, Beijing, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Shri Shankaracharya Technical Campus, Bhilai, District-Durg, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Jaypee University of Information Technology, Solan, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Indian Institute of Technology (Banaras Hindu University), Varanasi, India</td><td>1</td></tr><tr><td>College of Artificial Intelligenge and Big Data, ChongQing University of Electronic Engineering, Chongqing, China</td><td>1</td></tr><tr><td>Laboratoire Jean Kuntzmann, Grenoble, France</td><td>1</td></tr><tr><td>Dept. of Advanced Technologies, Alcorn State University, MS, USA</td><td>1</td></tr><tr><td>Department of Industrial Design, Tatung University, Taipei 104, Republic of China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Tatung University, Taipei 104, Republic of China</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Yangzhou University, Yangzhou, People’s Republic of China</td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Nanjing University of Science & Technology, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td>School of Information Engineering, Yangzhou University, Yangzhou, China</td><td>1</td></tr><tr><td>Res. Center for Learning Sci., Southeast Univ., Jiangsu, China</td><td>1</td></tr><tr><td>Eedoo Inc, Beijing, China</td><td>1</td></tr><tr><td>CSE, SUNY at Buffalo, USA and Southeast University, China</td><td>1</td></tr><tr><td>School of Information Technology and Engineering, VIT University, Vellore, India</td><td>1</td></tr><tr><td>Department of Electrical, Computer and Biomedical Engineering, University of Rhode Island, Kingston, RI 02881</td><td>1</td></tr><tr><td>Vulcan Inc, Seattle, WA 98104</td><td>1</td></tr><tr><td>Dept. of Computing, Curtin University of Technology, WA 6102, USA</td><td>1</td></tr><tr><td>School of Software, Faculty of Engineering and Information Technology, University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td>University of California, Santa Cruz & Ghent University, Santa Cruz, CA, USA</td><td>1</td></tr><tr><td>Computer Vision Research lab, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran</td><td>1</td></tr><tr><td>HAN University of Applied Sciences, Arnhem, Netherlands</td><td>1</td></tr><tr><td>Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan</td><td>1</td></tr><tr><td>Laboratoire Hubert Curien, UMR5516, Université Jean Monnet, Saint-Etienne, France</td><td>1</td></tr><tr><td>Université de Lyon, CNRS, LIRIS, UMR5205, Université Lyon 1, Lyon, France</td><td>1</td></tr><tr><td>Department of Electrical and Computer Engineering, Saginaw Valley State University, University Ctr, MI- 48710</td><td>1</td></tr><tr><td>TCTS Lab, Faculté Polytechnique de Mons, Belgium</td><td>1</td></tr><tr><td>Speech Technology Group, Technical University of Madrid, Spain</td><td>1</td></tr><tr><td>TALP Research Center, Universitat Politècnica de Catalunya, Spain</td><td>1</td></tr><tr><td>TELE Lab, Université catholique de Louvain, Belgium</td><td>1</td></tr><tr><td>INRIA Grenoble-Rhône-Alpes Research Center, France</td><td>1</td></tr><tr><td>Department of Embedded Systems, Institute for Infocomm Research, Singapore</td><td>1</td></tr><tr><td>E.T.S. Ingenieros Industriales, Universidad de Castilla-La Mancha Campus Universitario, Ciudad Real, Spain</td><td>1</td></tr><tr><td>Universidad de Las Palmas de Gran Canaria, SIANI, Edificio Central del Parque Científico-Tecnológico, Las Palmas, Spain</td><td>1</td></tr><tr><td>School of Math and Geospatial Sciences, Royal Melbourne Institute of Technology University , Melbourne, Australia</td><td>1</td></tr><tr><td>Department of Computer Science and Application, Zhengzhou Institute of Aeronautical Industry Management, Zhengzhou, China</td><td>1</td></tr><tr><td>School of Information Engineering and Automation, Kunming University of Science and Technology, Kunming, China</td><td>1</td></tr><tr><td>Department of Electrical and Computer Engineering, Naresuan University, Muang, Thailand</td><td>1</td></tr><tr><td>Department of Computer Science, Christian-Albrechts University, Kiel, Germany</td><td>1</td></tr><tr><td>Engineering Lab on Intelligent Perception for Internet of Things, Peking University Shenzhen Graduate School, Shenzhen, China</td><td>1</td></tr><tr><td>Delft University of Technology and Sensor Technology, Netherlands Defense Academy</td><td>1</td></tr><tr><td>School of Electrical Engineering and Automation, Qilu University of Technology, Jinan, China</td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Xi’an Jiaotong University, Xi’an, China</td><td>1</td></tr><tr><td>Department of Electronics and Computing and the Electronics and Information Technology Research & Development Center, Universidade Federal do Amazonas, Manaus-AM, CEP, Brazil</td><td>1</td></tr><tr><td>University of California at Merced, Merced, USA</td><td>1</td></tr><tr><td>Technische Universität München, Garching, Germany</td><td>1</td></tr><tr><td>School of Big Data and Computer, Science, Guizhou Normal University, Guiyang, Guizhou, 550025, P. R. China</td><td>1</td></tr><tr><td>University of California at Santa Cruz, Santa Cruz, California</td><td>1</td></tr><tr><td>Network Center, Huizhou University, Huizhou, China</td><td>1</td></tr><tr><td>Faculty of Engineering, Computer Engineering Department, Akdeniz University, Dumlupinar Bulvari, Turkey</td><td>1</td></tr><tr><td>IRCICA, Parc Scientifique de la Haute Borne, Lille 1 University, Villeneuve d’Ascq, France</td><td>1</td></tr><tr><td>Data and Analytics Department, KPMG AGWPG, Düsseldorf, Germany</td><td>1</td></tr><tr><td>Department of Electronic Information Engineering, Suzhou Vocational University, Suzhou, Jiangsu, China</td><td>1</td></tr><tr><td>JiangSu Province Support Software Engineering R&D Center for Modern Information Technology Application in Enterprise, Suzhou, China</td><td>1</td></tr><tr><td>Université de Lorraine, LORIA, UMR 7503</td><td>1</td></tr><tr><td>Department of Electrical Engineering, The City College of New York, New York, USA</td><td>1</td></tr><tr><td>Robótica y Manufactura Avanzada, Centro de Investigación y de Estudios Avanzados del Instituto Politécnico Nacional, Ramos Arizpe, Mexico</td><td>1</td></tr><tr><td>School of Computing and Electrical Engineering, Indian Institute of Technology Mandi, India</td><td>1</td></tr><tr><td>Dept. de Ciência da Computacão, Universidade Federal de Ouro Preto, MG Brazil</td><td>1</td></tr><tr><td>Department of Electrical Engineering, University of California, Riverside, Riverside CA, California 92521 United States</td><td>1</td></tr><tr><td>Department of CSE, Regional Campus of Anna University, Tirunelveli 627007, India</td><td>1</td></tr><tr><td>Dalian Key Laboratory of Digital Technology for National Culture, Dalian Minzu University, Dalian, China</td><td>1</td></tr><tr><td>Department of Business Planning &amp; Information Systems, TEI of Crete, Agios Nikolaos, Greece</td><td>1</td></tr><tr><td>School of Information Technology Jawaharlal Nehru Technological University Hyderabad Andhra Pradesh, India</td><td>1</td></tr><tr><td>Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, Paris, France</td><td>1</td></tr><tr><td>School of Computer ScienceThe University of Adelaide</td><td>1</td></tr><tr><td>Shanghai Advanced Research Institute, CAS & Qiniu AI Lab, Shanghai, China</td><td>1</td></tr><tr><td>University of Lyon, LIRIS - CNRS, National Institute of Applied Sciences (INSA), Lyon, France</td><td>1</td></tr><tr><td>Toyohashi University of Technology, Toyohashi, Japan</td><td>1</td></tr><tr><td>Department of Informatics, King’s College London, Strand, London, UK</td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Hebei University of Technology, Tianjin, China</td><td>1</td></tr><tr><td>Department of Learning and Digital Technology, Fo Guang University, Yilan, Taiwan</td><td>1</td></tr><tr><td>BITS Pilani, India , India</td><td>1</td></tr><tr><td>College of Computer and Information Sciences, Al Imam Mohammad Ibn Saud Islamic University (IMSIU), Riyadh, Saudi Arabia</td><td>1</td></tr><tr><td>National University of Computer and Emerging Sciences, Islamabad, Islamabad, Pakistan</td><td>1</td></tr><tr><td>Research Team on Audio Visual Signal Processing (AVSP), Vrije Universiteit Brussel (VUB), Electronics and Informatics Department, VUB-ETRO, Pleinlaan 2, 1050 Brussel, Belgium</td><td>1</td></tr><tr><td>College of Computer Science and Technology of Huaqiao University Xiamen, Xiamen, China</td><td>1</td></tr><tr><td>Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Taiwan 640, R.O.C.</td><td>1</td></tr><tr><td>Institute of Image Processing and Pattern Recognition, Henan University, Kaifeng 475004, China</td><td>1</td></tr><tr><td>Department of Electrical Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>Department of Information Management, College of Management, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>Department of Computer Science and Information Engineering, College of Electrical Engineering and Computer Science, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>Department of Statistics, University of California at Berkeley, Berkeley, USA</td><td>1</td></tr><tr><td>International Computer Science Institute, University of California at Berkeley, Berkeley, USA</td><td>1</td></tr><tr><td>College of Information Science and Technology, Agricultural University of Hebei, Baoding, China</td><td>1</td></tr><tr><td>Department of Electronics Engineering, Mokpo National University, Republic of Korea</td><td>1</td></tr><tr><td>FX Palo Alto Laboratory</td><td>1</td></tr><tr><td>Department of Applied Optics and Photonics, University of Calcutta, Kolkata, India</td><td>1</td></tr><tr><td>Department of Electrical Engineering, Future Institute of Engineering and Management, Kolkata, India</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Visvesvaraya National Institute of Technology, Nagpur, India</td><td>1</td></tr><tr><td>Telecom Division, Centre de Développement des Technologies Avancées, Algiers, Algeria</td><td>1</td></tr><tr><td>Departments of Medical Imaging and Medical Biophysics, University of Western Ontario, London, ON, Canada</td><td>1</td></tr><tr><td>Department of Medical Biophysics, University of Western Ontario, London, ON, Canada</td><td>1</td></tr><tr><td>School of Computer Science, Shaanxi Normal University, Xi’an, China</td><td>1</td></tr><tr><td>Engineering Laboratory of Teaching Information Technology of Shaanxi Province, Xi’an, China</td><td>1</td></tr><tr><td>Key Laboratory of Modern Teaching Technology, Ministry of Education, Xi’an, China</td><td>1</td></tr><tr><td>College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, P.R. China</td><td>1</td></tr><tr><td>Nanyang Technological University School of Computer Engineering</td><td>1</td></tr><tr><td>Department of Electronics and Electrical Engineering, Indian Institute of Technology (IIT) Guwahati, Guwahati, India</td><td>1</td></tr><tr><td>Technology Section, Israel National Police, Jerusalem, Israel</td><td>1</td></tr><tr><td>Department of Electro-Optics Engineering, Ben-Gurion University, Beer Sheva, Israel</td><td>1</td></tr><tr><td>Department of Mathematics, JiaYing University, Meizhou, China</td><td>1</td></tr><tr><td>Hebei University of Technology, School of Science, Tianjin, P. R. China</td><td>1</td></tr><tr><td>YiLi Normal College, Yining, China</td><td>1</td></tr><tr><td>Faculty of Electronic Information and Electrical Engineering, Dalian University, Dalian, China</td><td>1</td></tr><tr><td>Centre for Innovation in IT Services and Applications (iNEXT), University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td>Video Surveillance Laboratory, Guizhou University for Nationalities, Guiyang, China</td><td>1</td></tr><tr><td>College of Arts and Sciences, Shanxi Agricultural University, Shanxi, China</td><td>1</td></tr><tr><td>IRDA Group, ADMIR Laboratory, Rabat IT Center, ENSIAS, CNRST (URAC29), Mohammed V University of Rabat, Morocco</td><td>1</td></tr><tr><td>LRIT, CNRST (URAC29), Mohammed V University of Rabat, Morocco</td><td>1</td></tr><tr><td>Queen’s University, Kingston, Canada</td><td>1</td></tr><tr><td>University of Science Technology, Wuhan, China</td><td>1</td></tr><tr><td>Tunku Abdul Rahman University College, Kuala Lumpur, Malaysia</td><td>1</td></tr><tr><td>University at Qatar, Doha, Qatar</td><td>1</td></tr><tr><td>University of Istanbul, Istanbul, Turkey</td><td>1</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Tuen Mun, Hong Kong</td><td>1</td></tr><tr><td>PolyU Shenzhen Research Institute, Shenzhen, China</td><td>1</td></tr><tr><td>German National Library of Science and Technology & Leibniz Universität Hannover, Hannover, Germany</td><td>1</td></tr><tr><td>University of Applied Sciences Jena, Jena, Germany</td><td>1</td></tr><tr><td>Department of Creative IT Engineering, POSTECH, Pohang, South Korea, 37673</td><td>1</td></tr><tr><td>Department of Computer Science, University of Western Ontario, London, Canada</td><td>1</td></tr><tr><td>Vision Laboratory, LARSyS, University of the Algarve, Faro, Portugal</td><td>1</td></tr><tr><td>Department of Information Management, Yuan Ze University, Taoyuan, China</td><td>1</td></tr><tr><td>DICGIM, Universitá degli Studi di Palermo, V.le delle Scienze, Ed. 6, 90128 Palermo, Italy</td><td>1</td></tr><tr><td>Department of Computer Engineering, Karadeniz Technical University, Trabzon, Turkey</td><td>1</td></tr><tr><td>Department of Computer Technologies, Trabzon Vocational School, Karadeniz Technical University, Trabzon, Turkey</td><td>1</td></tr><tr><td>Stanford University and Coursera</td><td>1</td></tr><tr><td>Dept. of Comput. Sci. &amp; Info. Eng., National Yunlin Univ. of Science &amp; Technology, Taiwan</td><td>1</td></tr><tr><td>Faculty of Mathematics and Statistics, Hubei Key Laboratory of Applied Mathematics, Hubei University, Wuhan, China</td><td>1</td></tr><tr><td>Biometric and Imaging Processing Laboratory (BIPLab)</td><td>1</td></tr><tr><td>Research and Academic Computer Network (NASK)</td><td>1</td></tr><tr><td>Philips Applied Technologies, Eindhoven, Netherlands</td><td>1</td></tr><tr><td>Philips Research Eindhoven, Eindhoven, Netherlands</td><td>1</td></tr><tr><td>Key Lab Complex System & Intelligence Science, Institute of Automation, Chinese Academy of Science, Beijing, China</td><td>1</td></tr><tr><td>College of Computer Science and Technology, Wuhan University of Science and Technology, Wuhan, China</td><td>1</td></tr><tr><td>National Research University Higher School of Economics, Laboratory of Algorithms and Technologies for Network Analysis, Nizhny Novgorod, Russia</td><td>1</td></tr><tr><td>Emory University School of Medicine, Atlanta, USA</td><td>1</td></tr><tr><td>Department of Radiology and Imaging Sciences, Winship Cancer Institute, Emory University School of Medicine, Atlanta, USA</td><td>1</td></tr><tr><td>Dept. of EMPH, Icahn School of Medicine at Mount Sinai, New York, NY 10029</td><td>1</td></tr><tr><td>Electrical-Electronics Engineering Department, Izmir University of Economics, Balcova, Turkey</td><td>1</td></tr><tr><td>Department of Computer Science, Solapur University, Solapur, India</td><td>1</td></tr><tr><td>Computer Vision Research Group, School of Computer Sciences, Universiti Sains Malaysia, Penang, Malaysia</td><td>1</td></tr><tr><td>Department of Information Technology, Netaji Subhas Engineering College, Kolkata, India</td><td>1</td></tr><tr><td>Computer Engineering College, Jimei University, Xiamen, China</td><td>1</td></tr><tr><td>Fujian Key Laboratory of the Brain-like Intelligent Systems, Xiamen, China</td><td>1</td></tr><tr><td>School of Information, Hunan University of Humanities, Science and Technology, Loudi, China</td><td>1</td></tr><tr><td>Al Imam Mohammad Ibn Saud Islamic University, Riyadh, Saudi Arabia</td><td>1</td></tr><tr><td>School of Information and Mechatronics, Gwangju Institute of Science and Technology, Gwangju, Korea</td><td>1</td></tr><tr><td>Gwangju Institute of Science and Technology (GIST), Gwangju, Republic of Korea</td><td>1</td></tr><tr><td>Dept. of Computer Science and Information Engineering, Providence University, Taichung, Taiwan</td><td>1</td></tr><tr><td>360 AI Institute, Beijing, China</td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Research Lab, Vrije Universitiet Brussel (VUB), Department of Electronics & Informatics (ETRO) Pleinlaan 2, 1050 Brussel, Belgium</td><td>1</td></tr><tr><td>IRISA, University of Rennes 1</td><td>1</td></tr><tr><td>Research Institution of Intelligent Control and Testing, Graduate School of Tsinghua University at Shenzhen, 518055, China</td><td>1</td></tr><tr><td>Commonwealth Scientific and Industrial Research Organization (CSIRO)</td><td>1</td></tr><tr><td>Indiana University-Bloomington, USA</td><td>1</td></tr><tr><td>Key Laboratory of Medical Image Computing (Northeastern University), Ministry of Education, Shenyang, China</td><td>1</td></tr><tr><td>Clínica Otocenter, Teresina, Piauí, Brasil</td><td>1</td></tr><tr><td>Key Lab of Broadband Wireless Communication and Sensor Network Technology, Ministry of Education, Nanjing, China</td><td>1</td></tr><tr><td>Nanjing University of Posts and Telecommunications, Nanjing, China</td><td>1</td></tr><tr><td>Grupo de Aplicacion de Telecomunicaciones Visuales, Universidad Politecnica de Madrid, Av. Complutense 30, 28040 Madrid, Spain</td><td>1</td></tr><tr><td>Department of Management Information Systems, Universität Regensburg, Universitätsstr. 31, 93053 Regensburg, Germany</td><td>1</td></tr><tr><td>Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India</td><td>1</td></tr><tr><td>Amrita E-Learning Research Laboratory and the Department of Computer Science, Amrita Vishwa Vidyapeetham, Amritapuri Campus, Kollam, India</td><td>1</td></tr><tr><td>Department of ECE, PEC University of Technology, Chandigarh, India</td><td>1</td></tr><tr><td>Biomedical Instrumentation (V-02), CSIR-Central Scientific Instruments Organisation (CSIO)|, Chandigarh, India</td><td>1</td></tr><tr><td>CEERI, Pilani, India</td><td>1</td></tr><tr><td>MNIT, Jaipur, India</td><td>1</td></tr><tr><td>Department of Arts and Humanities, College of Business, Arts and Social Sciences, Brunel University London, Uxbridge, UK</td><td>1</td></tr><tr><td>Department of Design, College of Engineering, Design and Physical Sciences, Brunel University London, Uxbridge, UK</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Indian Institute of Technology Madras, Chennai, India</td><td>1</td></tr><tr><td>NTT Service Evolution Laboratories, Kanagawa, Japan</td><td>1</td></tr><tr><td>Lee Kong Chian Faculty of Engineering and Science, Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia</td><td>1</td></tr><tr><td>Dept. of Comput. Sci., North Carolina Univ., Wilmington, NC, USA</td><td>1</td></tr><tr><td>University of Washington &Microsoft, Seattle, WA, USA</td><td>1</td></tr><tr><td>Departamento de Informtica e Matemtica Aplicada/University of Rio Grande do Norte, Natal, Brazil</td><td>1</td></tr><tr><td>Computer Engineering Department, Girne American University, Kyrenia, Cyprus 90</td><td>1</td></tr><tr><td>Cornell University & Facebook Inc., New York, NY, USA</td><td>1</td></tr><tr><td>School of Computer Science and Technology, Nanjing University of Posts and Telecommunications, Nanjing, China</td><td>1</td></tr><tr><td>Fujian Provincial Key Laboratory of Information Processing and Intelligent Control, Fuzhou, China</td><td>1</td></tr><tr><td>School of Technology, Nanjing Audit University, Nanjing, China</td><td>1</td></tr><tr><td>Adobe Research Department, Adobe Systems Inc, San Jose, CA</td><td>1</td></tr><tr><td>Integrated Circuits and Electronics Laboratory, Department of Engineering, Aarhus University, Denmark</td><td>1</td></tr><tr><td>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology (ICT), CAS, Beijing, China</td><td>1</td></tr><tr><td>Dept. of Comput. Sci., California Inst. of Technol., Pasadena, CA, USA</td><td>1</td></tr><tr><td>Utechzone Co. Ltd., New Taipei City, Taiwan 235</td><td>1</td></tr><tr><td>Department of Cognitive Science, University of California, San Diego, CA, USA</td><td>1</td></tr><tr><td>Department of Electronic Engineering Shanghai Jiao Tong University</td><td>1</td></tr><tr><td>Innovations Kontakt Stelle (IKS) Hamburg, Hamburg University of Applied Sciences</td><td>1</td></tr><tr><td>School of Engineering and Computing, University of the West of Scotland</td><td>1</td></tr><tr><td>Computer Science Department, Central Washington University (CWU)</td><td>1</td></tr><tr><td>ICT Center, CSIRO</td><td>1</td></tr><tr><td>Technische Universität München, München, Germany</td><td>1</td></tr><tr><td>Electrical Engineering and Computer Science, School of Engineering, University of California at Merced, Merced, USA</td><td>1</td></tr><tr><td>Data61, Commonwealth Scientific and Industrial Research Organization (CSIRO), Canberra, Australia</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Faculty of Electrical & Electronic Engineering, Khulna University of Engineering & Technology, Bangladesh</td><td>1</td></tr><tr><td>Pennsylvania State University, University Park, PA</td><td>1</td></tr><tr><td>University of Sao Paulo</td><td>1</td></tr><tr><td>School of Software, Henan University, Kaifeng, China</td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Wuhan Institute of Technology, Wuhan, China</td><td>1</td></tr><tr><td>Department of Electrical Engineering, Computer Vision Laboratory, Linköping University, Linköping, Sweden</td><td>1</td></tr><tr><td>Computer Vision Research Laboratory, Electrical Engineering Faculty, Sahand University of Technology, Tabriz, Iran</td><td>1</td></tr><tr><td>Treelogic, Technological Scientific Park of Asturias, Llanera, Spain</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Oviedo, Gijón, Spain</td><td>1</td></tr><tr><td>Fundación CTIC (Technological Center), Technological Scientific Park of Gijón, Gijón, Spain</td><td>1</td></tr><tr><td>Department of Computer Science, Madrid Open University, Madrid, Spain</td><td>1</td></tr><tr><td>Department of Research and Diagnostic Methods, Faculty of Education, Pontificia University of Salamanca, Salamanca, Spain</td><td>1</td></tr><tr><td>Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan</td><td>1</td></tr><tr><td>Dept. of Information Engineering, Faculty of Engineering, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan</td><td>1</td></tr><tr><td>Graduate School of Science and Technology, Niigata University, 8050, Ikarashi 2-Nocho, Nishi-ku Niigata, 950-2181, Japan, +81 25 262 7499</td><td>1</td></tr><tr><td>NTNU, Norway</td><td>1</td></tr><tr><td>Institute of Informatics, Wroclaw University of Technology, Wroclaw, Poland</td><td>1</td></tr><tr><td>Polish-Japanese Institute of Information Technology, Warszawa, Poland</td><td>1</td></tr><tr><td>Faculty of Applied Informatics and Mathematics, Department of Informatics, Warsaw University of Life Sciences (SGGW), Warsaw, Poland</td><td>1</td></tr><tr><td>Polish-Japanese Institute of Information Technology, Warsaw, Poland</td><td>1</td></tr><tr><td>FernUniversität , Hagen, Germany</td><td>1</td></tr><tr><td>Universidad Tecnica Federico Santa Maria , Valparaiso, Chile</td><td>1</td></tr><tr><td>Staffordshire University , Staffordshire, United Kingdom</td><td>1</td></tr><tr><td>Faculty of Engineering, Computing and Science, Swinburne University of Technology Sarawak Campus, Kuching, Malaysia</td><td>1</td></tr><tr><td>University of Massachusetts at Amherst, Amherst, MA, USA</td><td>1</td></tr><tr><td>Department of Engineering and MaintenanceChina Mobile Group, Jiangsu Company, Ltd., Changzhou, China</td><td>1</td></tr><tr><td>Indian Statistical Institute, Kolkata 700108</td><td>1</td></tr><tr><td>Departament d’Informàtica, Universitat de Valencia, Valencia, Spain</td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, Gannan Normal University, Ganzhou, People’s Republic of China</td><td>1</td></tr><tr><td>National Laboratory of Pattern Recognition, Beijing, China</td><td>1</td></tr><tr><td>Quang Binh University, Dong Hoi City, Vietnam</td><td>1</td></tr><tr><td>School of Mathematics and Information Technology, Nanjing Xiao Zhuang University, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td>School of Computing, Electronics and Mathematics, Faculty of Engineering, Environment and Computing, Coventry University, Coventry, UK</td><td>1</td></tr><tr><td>Institute of Computer Science, Christian-Albrechts-Universität Kiel, Kiel, Germany</td><td>1</td></tr><tr><td>KT Future Technology Laboratory, Seoul, South Korea</td><td>1</td></tr><tr><td>Microsoft Research Asia, Beijing, P.R. China</td><td>1</td></tr><tr><td>Shanghai Maritime University, Shanghai, China</td><td>1</td></tr><tr><td>Machine Intelligence Research Institute, Rockville, USA</td><td>1</td></tr><tr><td>Orange—France Telecom Division R&D—TECH/IRIS, Cesson Sévigné Cedex, France</td><td>1</td></tr><tr><td>IIT-Madras, Chennai, India</td><td>1</td></tr><tr><td>Department of Computer Science, Innopolis University, Kazan, Russia</td><td>1</td></tr><tr><td>Department of Computer Science, University of Science & Technology, Bannu, Pakistan</td><td>1</td></tr><tr><td>Naver Labs Europe, Meylan, France</td><td>1</td></tr><tr><td>School of Computer and Systems Sciences, JawaharLal Nehru University, New Delhi 110067, India</td><td>1</td></tr><tr><td>Univ. La Rochelle, La Rochelle, France</td><td>1</td></tr><tr><td>Department of Computer Science, Chu Hai College of Higher Education, Tsuen Wan, Hong Kong</td><td>1</td></tr><tr><td>University of Salerno, Salerno, Italy</td><td>1</td></tr><tr><td>Google, Mountain View, USA</td><td>1</td></tr><tr><td>Computer Sciences Department, University of Wisconsin, Madison, USA</td><td>1</td></tr><tr><td>Google, Seattle, USA</td><td>1</td></tr><tr><td>Singapore Polytechnic, 500 Dover Road, Singapore 139651</td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Xiamen University of Technology, Xiamen, China</td><td>1</td></tr><tr><td>Centre for Machine Vision, Bristol Robotics Laboratory, University of the West of England, T Block, Frenchay Campus, Coldharbour Lane, Bristol, BS16 1QY, UK</td><td>1</td></tr><tr><td>Saudi Electronic University, Riyadh, Kingdom of Saudi Arabia</td><td>1</td></tr><tr><td>Information Security Group, City University London, London, UK</td><td>1</td></tr><tr><td>School of Physics and Electronic Information Engineering, Wenzhou University, Wenzhou, China</td><td>1</td></tr><tr><td>IIIT Chittoor, SriCity, Andhra Pradesh, India</td><td>1</td></tr><tr><td>Department of Information Engineering, Henan University of Science and Technology, Luoyang, China</td><td>1</td></tr><tr><td>Department of mechatronic technology of National Taiwan Normal University</td><td>1</td></tr><tr><td>Department of Electrical Engineering and Computer Science, Colorado School of Mines, Golden, CO, USA</td><td>1</td></tr><tr><td>The Image Processing and Analysis Laboratory (LAPI), University “Politehnica” of Bucharest, 313 Splaiul Independeţei, Bucharest, Romania</td><td>1</td></tr><tr><td>Division of Digital Media Engineering, Sang-Myung University, Suwon, Republic of Korea</td><td>1</td></tr><tr><td>Facebook AI Research (FAIR), Menlo Park, USA</td><td>1</td></tr><tr><td>Princeton University &Microsoft, Princeton, NJ, USA</td><td>1</td></tr><tr><td>Microsoft &University of Washington, Redmond, WA, USA</td><td>1</td></tr><tr><td>Intel Labs, Pittsburgh PA</td><td>1</td></tr><tr><td>Dept. of Inf. Network Technol., Hsiuping Inst. of Technol., Taichung, Taiwan</td><td>1</td></tr><tr><td>Alibaba Group, Zhejiang, People’s Republic of China</td><td>1</td></tr><tr><td>Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute Troy, Troy, USA</td><td>1</td></tr><tr><td>Key Lab of Computing and Communication Software of Anhui Province School of Computer Science and Technology, University of Science and Technology of China Hefei, Anhui, People’s Republic of China</td><td>1</td></tr><tr><td>School of ComputingNational University of Singapore</td><td>1</td></tr><tr><td>Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China</td><td>1</td></tr><tr><td>Dept. of Radiation Oncology, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</td><td>1</td></tr><tr><td>Dept. of Electrical &amp; Computer Engineering, Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</td><td>1</td></tr><tr><td>University of Nottingham (Malaysia Campus), Malaysia</td><td>1</td></tr><tr><td>South Valley University, Qena, Egypt</td><td>1</td></tr><tr><td>Film Department ELTE University, Budapest, Hungary</td><td>1</td></tr><tr><td>Gipsa-Lab, Saint Martin d’Heres, France</td><td>1</td></tr><tr><td>ICA Laboratory, Grenoble, France</td><td>1</td></tr><tr><td>School of Computing and Electrical Engineering, IIT Mandi, H.P, 175001, India</td><td>1</td></tr><tr><td>AICTE Emeritus Fellow,  </td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, GCELT, Kolkata, India</td><td>1</td></tr><tr><td>Chinese University of Hong Kong, Hong Kong</td><td>1</td></tr><tr><td>Department of Computer System and Communication, Faculty of Information and Communication, Universiti Teknikal Malaysia Melaka, Durian Tunggal, Malaysia</td><td>1</td></tr><tr><td>Division Télécom, Centre de Développement des Technologies Avancées - CDTA, Algiers, Algeria</td><td>1</td></tr><tr><td>School of Computer and Communication Science, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland</td><td>1</td></tr><tr><td>Baidu Research - Institute of Deep Learning, Sunnyvale, USA</td><td>1</td></tr><tr><td>Jiaxing University, Jiaxing, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. zhangxiaoxun@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. jiayunde@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Beijing Institute of Technology, Beijing 100081, CHINA. xushuang@bit.edu.cn</td><td>1</td></tr><tr><td>Department of Electronics and Communication, University of Allahabadm Allahabad, India 211002</td><td>1</td></tr><tr><td>Department of Business Computer, Faculty of Management Science, Nakhon Ratchasima Rajabhat University, Nakhon Ratchasima, Thailand</td><td>1</td></tr><tr><td>Yahoo! Research</td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, University of Cagliari, Italy</td><td>1</td></tr><tr><td>Universidad de León, León, Spain</td><td>1</td></tr><tr><td>Elektronik ve Haberle&#x015F;me M&#x00FC;hendisli&#x011F;i B&#x00F6;l&#x00FC;m&#x00FC;</td><td>1</td></tr><tr><td>Robert Bosch Engineering and Business Solutions Limited, Bangalore, India</td><td>1</td></tr><tr><td>Department of Instrumentation and Control Engineering, PSG College of Technology, Coimbatore, India</td><td>1</td></tr><tr><td>China Airborne Missile Academy, Luoyang, China</td><td>1</td></tr><tr><td>Electronic Information Engineering College, Henan University of Science and Technology, Luoyang, China</td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Inner Mongolia University of Science and Technology, Baotou, People’s Republic of China</td><td>1</td></tr><tr><td>Istituto Italiano di Tecnologia & Università di Verona, Genova, Italy</td><td>1</td></tr><tr><td>Istituto Italiano di Tecnologia (IIT), Genova, Italy</td><td>1</td></tr><tr><td>Office of Safety Research and Development, Federal Highway Administration, U.S. Department of Transportation, Virginia, USA</td><td>1</td></tr><tr><td>Xinjiang Vocational and Technical College of Communications, Wulumuqi, People’s Republic of China</td><td>1</td></tr><tr><td>College of Mathematics and Informatics, South China Agricultural University, China</td><td>1</td></tr><tr><td>Curtin University Department of Mechanical Engineering, Perth, Western Australia 6012</td><td>1</td></tr><tr><td>Department of Information Engineering, HeNan Radio and Television University, Zhengzhou, People’s Republic of China</td><td>1</td></tr><tr><td>Computer Science Department, School of Information Science and Engineering, Xiamen, University, Xiamen, People’s Republic of China</td><td>1</td></tr><tr><td>PLA University of Science and Technology, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Nebraska-Lincoln, Lincoln, USA</td><td>1</td></tr><tr><td>School of Electronics and Computer Eng., Chonnam National University, Gwangju, Korea</td><td>1</td></tr><tr><td>FAST, Supélec, Avenue de la Boulaie, Cesson-Sévigné, France</td><td>1</td></tr><tr><td>ISIR laboratory, Pierre and Marie Curie university, Paris Cedex 05, France</td><td>1</td></tr><tr><td>Centre for Visual Computing, Faculty of Engineering and Informatics, University of Bradford, Bradford, UK</td><td>1</td></tr><tr><td>Amsterdam University College, Amsterdam, The Netherlands</td><td>1</td></tr><tr><td>Key Laboratory of Intelligent Information Processing, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>School of Computer & Software, Nanjing University of Information Science & Technology, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td>Institute of Life Sciences, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>School of Information Science and Engineering, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>FEECS, Department of Computer Science, Technical University of Ostrava, Ostrava-Poruba, Czech Republic</td><td>1</td></tr><tr><td>ECE, Department MSIT, C-4 Janakpuri, New Delhi, India</td><td>1</td></tr><tr><td>Dept. of Comput. Sci., New Jersey Inst. of Technol., Newark, NJ, USA</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan. e-mail: chihming.fu@gmail.com</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan; Informatics Department, Fo-Guang University, I-Lan, Taiwan. e-mail: clhuang@ee.nthu.edu.tw</td><td>1</td></tr><tr><td>Electrical Engineering Department, National Tsing-Hua University, Hsin-Chu, Taiwan</td><td>1</td></tr><tr><td>Department of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China</td><td>1</td></tr><tr><td>University of California at Los Angeles, Los Angeles, CA, USA</td><td>1</td></tr><tr><td>Department of Physics, Tripura University (A Central University), Suryamaninagar, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Tripura University (A Central University), Suryamaninagar, India</td><td>1</td></tr><tr><td>Pontifical Catholic University of Minas Gerais - Department of Computer Science, R. Dom Jose Gaspar, 500, Belo Horizonte MG, 30535901, Brazil</td><td>1</td></tr><tr><td>Department of Statistics and Operational Research, Faculty of Mathematics, Complutense University of Madrid, Madrid, Spain</td><td>1</td></tr><tr><td>Dept. of Mathematics and Computer Science, University of Udine, Italy</td><td>1</td></tr><tr><td>LIMSI-CNRS, Orsay Cedex, France</td><td>1</td></tr><tr><td>Istituto di Informatica e Telematica, Consiglio Nazionale delle Ricerche, Pisa, Italy</td><td>1</td></tr><tr><td>Faculty of Information Sciences and Engineering, Management and Science University, Selangor, Malaysia</td><td>1</td></tr><tr><td>UTM-Big Data Center, Universiti Teknologi Malaysia, Johor Bahru, Malaysia</td><td>1</td></tr><tr><td>Faculty of Computing, Universiti Teknologi Malaysia, Johor Bahru, Malaysia</td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Computer Science, University of Maribor, Maribor, Slovenia</td><td>1</td></tr><tr><td>LAMIA, EA 4540, University of French West Indies and Guyana, Guadeloupe, France</td><td>1</td></tr><tr><td>ISIR, UPMC Univ Paris 06, CNRS, Paris, France</td><td>1</td></tr><tr><td>Merchant Marine College, Shanghai Maritime University, Shanghai 201306, PR China</td><td>1</td></tr><tr><td>Department of Informatics, King’s College London, London, UK</td><td>1</td></tr><tr><td>Department of Electrical Engineering, KAIST, Korea</td><td>1</td></tr><tr><td>Department of New Media, Korean German Institute of Technology, Korea</td><td>1</td></tr><tr><td>Department of Electrical, Computer and Biomedical Engineering, University of Pavia, Pavia, Italy</td><td>1</td></tr><tr><td>Pontifical Catholic Univ of Rio de Janei, Department of Informatics, Rio de Janeiro, Brazil</td><td>1</td></tr><tr><td>Department of Informatics, Pontifical Catholic Univ of Rio de Janei, Rio de Janeiro, Brazil</td><td>1</td></tr><tr><td>School of Computing Sciences and Informatics, University of Cincinnati, Cincinnati, USA</td><td>1</td></tr><tr><td>Universiti Kuala Lumpur, Kedah</td><td>1</td></tr><tr><td>Sudan University of Science and Technology, College of Computer Science and Information Technology, Khartoum - Sudan</td><td>1</td></tr><tr><td>LMU Munich, Germany and Munich University of Applied Sciences, Germany</td><td>1</td></tr><tr><td>Department of Electric and Electronic Engineering, Avrasya University, Trabzon, Turkey</td><td>1</td></tr><tr><td>ACM Professional Specialist in Artificial Intelligence</td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Jiangxi Normal University, Nanchang, China</td><td>1</td></tr><tr><td>University of Washington and Google Inc.</td><td>1</td></tr><tr><td>Google Inc.</td><td>1</td></tr><tr><td>CCCE, Nankai University Jinnan Campus, Tianjin, China</td><td>1</td></tr><tr><td>Department of Computer Science, VHNSN College, Virudhunagar, India</td><td>1</td></tr><tr><td>Department of Computer Science, ANJA College, Sivakasi, India</td><td>1</td></tr><tr><td>Tsinghua National Laboratory for Information Science and Technology, Department of Computer Science and Technology Tsinghua University, Beijing, China</td><td>1</td></tr><tr><td>Foundation for Research & Technology – Hellas, Heraklion, Crete, Greece</td><td>1</td></tr><tr><td>Vrije Universiteit Amsterdam, Amsterdam, The Netherlands</td><td>1</td></tr><tr><td>Ruhr-Universität Bochum, Bochum, Germany</td><td>1</td></tr><tr><td>Instituto Nacional de Astrofísica, Óptica y Electrónica, Luis Enrique Erro No.1, Tonantzintla, Puebla, México. CP 72840</td><td>1</td></tr><tr><td>Key Laboratory of System Control and Information Processing, Ministry of Education of China, Shanghai, People’s Republic of China</td><td>1</td></tr><tr><td>Department of Mechanical Engineering, Universiti Tenaga Nasional Km 7, Jalan IKRAM-UNITEN, 43000, Kajang, Selangor, Malaysia</td><td>1</td></tr><tr><td>Dept. of Electron. Eng., Hannam Univ., Daejeon, South Korea</td><td>1</td></tr><tr><td>Alibaba-Zhejiang University Joint Research Institute of Frontier Technologies, Zhejiang, China</td><td>1</td></tr><tr><td>Dept. of Electr. & Comput. Eng., Toronto Univ., Ont., Canada</td><td>1</td></tr><tr><td>Laboratoire MIA, University of La Rochelle, La Rochelle, France</td><td>1</td></tr><tr><td>Fraunhofer Institute for Telecommunications, Berlin, Germany</td><td>1</td></tr><tr><td>Fraunhofer Institute for Digital Media Technology, Ilmenau, Germany</td><td>1</td></tr><tr><td>Siemens AG, Corporate Technology, Munich, Germany</td><td>1</td></tr><tr><td>ECIT, School of Electronics, Electrical Engineering &amp; Computer Science, Queen's University Belfast, Belfast, UK</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Calcutta, Kolkata, India</td><td>1</td></tr><tr><td>School of Electrical and Computer Engineering, College of Engineering, University of Tehran, Iran</td><td>1</td></tr><tr><td>Luoyang Electro-Optical Equipment Research Institute, Luoyang, People’s Republic of China</td><td>1</td></tr><tr><td>Technological Educational Institute of Sterea Ellada, Psahna, Halkida, Greece</td><td>1</td></tr><tr><td>National Centre for Scientific Research “Demokritos”, Agia Paraskevi, Athens, Greece</td><td>1</td></tr><tr><td>University of Maastricht, Maastricht, The Netherlands</td><td>1</td></tr><tr><td>Centre of Research and Technology Hellas, Thermi, Thessaloniki, Greece</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, P.P.G. Institute of Technology, Coimbatore, India</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, Institute of Road and Transport Technology, Erode, India</td><td>1</td></tr><tr><td>Department of Computer Science, Banasthali Vidyapith, Banasthali, India</td><td>1</td></tr><tr><td>Computer Science and Engineering Department, SP Memorial Institute of Technology, Kaushambi, India</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Xi’an Jiaotong University, Xi’an, Shaanxi, China</td><td>1</td></tr><tr><td>Fujifilm Software, San Jose, USA</td><td>1</td></tr><tr><td>Institute for Infocomm Research, 1 Fusionopolis Way, #21-01 Connexis, Singapore 138632</td><td>1</td></tr><tr><td>HTC Research, Beijing, China</td><td>1</td></tr><tr><td>QCIS, University of Technology, Sydney, Australia</td><td>1</td></tr><tr><td>Interuniversity Microelectronics Centre, Heverlee, Belgium</td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Lab, Department ETRO, Vrije Universiteit Brussel (VUB), Brussels, Belgium</td><td>1</td></tr><tr><td>Shaanxi Key Laboratory on Speech and Image Information Processing, Xi’an, China</td><td>1</td></tr><tr><td>NPU-VUB Joint AVSP Lab, School of Computer Science, Northwestern Polytechnical University (NPU), Xi’an, China</td><td>1</td></tr><tr><td>Institute of Electronics and Computer Science, Riga, Latvia</td><td>1</td></tr><tr><td>Electrical and Computer Engineering Department, University of California, Santa Barbara, CA 93106 USA</td><td>1</td></tr><tr><td>Psychology Department, University of California, Santa Barbara, CA 93106 USA</td><td>1</td></tr><tr><td>Dept. of Comp. Sci. and Inf. Eng, National United University, Miaoli, Taiwan</td><td>1</td></tr><tr><td>School of Control Science and Engineering DUT, Dalian, China</td><td>1</td></tr><tr><td>Information Technology R&D Center, Mitsubishi Electric Corporation, Kamakura, Japan</td><td>1</td></tr><tr><td>School of Information Science and Engineering, Hunan city University, Yiyang, China</td><td>1</td></tr><tr><td>KU Leuven, ESAT - PSI, iMinds, Leuven, Belgium</td><td>1</td></tr><tr><td>Max-Planck-Institut für Informatik, Saarbrücken, Germany</td><td>1</td></tr><tr><td>Research Center for Science and Technology in Medicine, Tehran University of Medical Sciences, Tehran, Iran</td><td>1</td></tr><tr><td>University of IIllinois, Urbana-Champaign</td><td>1</td></tr><tr><td>Zhejiang University & Alibaba Group, Hangzhou, China</td><td>1</td></tr><tr><td>Laboratory LIM, Department of Computer Science, Faculty of Sciences and Technologies, University Hassan II, Casablanca-Morocco</td><td>1</td></tr><tr><td>Electrical Engineering Department, Yazd University, Yazd, Iran</td><td>1</td></tr><tr><td>School of Electronics and Information Engineering, Tianjin Polytechnic University, Tianjin, China</td><td>1</td></tr><tr><td>Tianjin Key Laboratory of Optoelectronic Detection Technology and Systems, Tianjin, China</td><td>1</td></tr><tr><td>Adjunct, Effat University, Jeddah, Saudi Arabia</td><td>1</td></tr><tr><td>School of Computer Science, Wuyi University, Jiangmen, China</td><td>1</td></tr><tr><td>Snapchat Research, Venice, CA90291</td><td>1</td></tr><tr><td>Department of CSE, University at Buffalo (SUNY), NY 14260, USA</td><td>1</td></tr><tr><td>School of Information and Engineering, Jinhua Polytechnic, Jinhua, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, University of Texas, Arlington, USA</td><td>1</td></tr><tr><td>School of Medical Science, Jinhua Polytechnic, Jinhua, China</td><td>1</td></tr><tr><td>S. S. College of Business Studies, University of Delhi, Delhi, India</td><td>1</td></tr><tr><td>School of Computer & System Sciences, Jawaharlal Nehru University, New Delhi, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, National Institute of Technology, Uttarakhand, India</td><td>1</td></tr><tr><td>Baidu Online Network Technology (Beijing) Co. Ltd, Beijing, China</td><td>1</td></tr><tr><td>Faculty of Engineering and Computing, Coventry University, UK</td><td>1</td></tr><tr><td>Dept. of Theoretical Electrical Engineering, Technical University of Sofia, Sofia, Bulgaria</td><td>1</td></tr><tr><td>Lawrence Berkeley National Laboratory, Berkeley, USA</td><td>1</td></tr><tr><td>No.1 Senior Middle School of Wendeng District, Weihai, China</td><td>1</td></tr><tr><td>Standards & Metrology Research Institute of CARS, Beijing, China</td><td>1</td></tr><tr><td>College of Information Science & Technology, Hebei Agricultural University, Baoding, China</td><td>1</td></tr><tr><td>Amazon, Berkshire, U.K.</td><td>1</td></tr><tr><td>Tianjin Universtiy, Tianjin, China</td><td>1</td></tr><tr><td>University of Lancaster, Lancaster, United Kingdom</td><td>1</td></tr><tr><td>University of Helsinki, Helsinki, Finland</td><td>1</td></tr><tr><td>Intelligent and Interactive Systems, Institute of Computer Science, University of Innsbruck, Innsbruck, Austria</td><td>1</td></tr><tr><td>Signal and Image Exploitation (INTELSIG), Montefiore Institute, University of Liège, Liège, Belgium</td><td>1</td></tr><tr><td>Megvii Inc., Beijing, China</td><td>1</td></tr><tr><td>Department of Informatics, Modeling, Electronics, and Systems, University of Calabria, Rende, Italy</td><td>1</td></tr><tr><td>School of Materials Science and Engineering, Central South University, Changsha, China</td><td>1</td></tr><tr><td>Institute of Energy, Jiangxi Academy of Sciences, Nanchang, China</td><td>1</td></tr><tr><td>**</td><td>1</td></tr><tr><td>Electrical and Electronic Engineering Department, Faculty of Engineering, Shahed University, Tehran, Iran</td><td>1</td></tr><tr><td>College of Mathematics and Information Engineering, Jiaxing University, Jiaxing, China</td><td>1</td></tr><tr><td>Dept. of Convergence, Daegu Gyeongbuk Institute of Science & Technology (DGIST), Daegu, Korea</td><td>1</td></tr><tr><td>School of Mechanical and Electrical Engineering, Shandong Management University, Jinan, China</td><td>1</td></tr><tr><td>School of Information Science and Technology, Shandong Normal University, Jinan, China</td><td>1</td></tr><tr><td>National Institute of Advanced Industrial Science Technology, Japan</td><td>1</td></tr><tr><td>Tilburg center for Cognition and Communication, Tilburg University, Tilburg, The Netherlands</td><td>1</td></tr><tr><td>Automatics Research Group, Universidad Tecnológica de Pereira, Pereira, Colombia</td><td>1</td></tr><tr><td>School of Science, Southwest Petroleum University, Chengdu, China</td><td>1</td></tr><tr><td>Infosys Limited, Bhubaneswar, India</td><td>1</td></tr><tr><td>School of Computer Science and Engineering, Tianjin University of Technology, China</td><td>1</td></tr><tr><td>Department of Computer Science, University of Brasília, DF, Brazil 70910-900</td><td>1</td></tr><tr><td>Department of Mechanical Engineering, University of Brasília, DF, Brazil 70910-900</td><td>1</td></tr><tr><td>LIAMA, French National Institute for Research in Computer Science and Control, Paris, France</td><td>1</td></tr><tr><td>Leiden University, Leiden, The Netherlands</td><td>1</td></tr><tr><td>TNO, The Hague, The Netherlands</td><td>1</td></tr><tr><td>City University, Kowloon Tong, Hong Kong</td><td>1</td></tr><tr><td>Radboud University, EC Nijmegen, The Netherlands</td><td>1</td></tr><tr><td>TNO, Oude Waalsdorperweg, AK The Hague, The Netherlands</td><td>1</td></tr><tr><td>Liaocheng University, Liaocheng, China</td><td>1</td></tr><tr><td>Northwestern Polytechnic University, Xi’an, China</td><td>1</td></tr><tr><td>University of Science and Technology Beijing, Beijing, China</td><td>1</td></tr><tr><td>Faculty of Information Engineering, China University of Geosciences, Wuhan, China</td><td>1</td></tr><tr><td>China University of Geosciences Wuhan, China</td><td>1</td></tr><tr><td>University of Udine, Udine, Italy</td><td>1</td></tr><tr><td>INRS-EMT, Montreal, Canada</td><td>1</td></tr><tr><td>School of Computer Science and Technology, Harbin Institute of Technology at Weihai, Weihai, China</td><td>1</td></tr><tr><td>School of Computer Science Carnegie Mellon University Pittsburgh, PA, 15213, USA</td><td>1</td></tr><tr><td>College of Information and Technology, Incheon National University, Incheon, Korea</td><td>1</td></tr><tr><td>Tianjin University & University of South Carolina, Tianjin, China</td><td>1</td></tr><tr><td>School of Electronics Engineering, Kyungpook National University, Taegu, South Korea</td><td>1</td></tr><tr><td>Department of Electrical & Electronics Engineering, Kalasalingam University, Krishnankoil, India</td><td>1</td></tr><tr><td>School of Computer Engineering, Hanshin University, Osan, Republic of Korea</td><td>1</td></tr><tr><td>School of Computer Science, China University of Geosciences, Wuhan, China</td><td>1</td></tr><tr><td>College of Computer Science and Technology of Huaqiao University, Xiamen, China</td><td>1</td></tr><tr><td>CEA (iRSTV/BGE), INSERM (U1038), CNRS (FR3425), Université Grenoble-Alpes, Grenoble, France</td><td>1</td></tr><tr><td>NLPR, Institute of Automation, Chinese Academy of Science, Beijing, People’s Republic of China</td><td>1</td></tr><tr><td>Costel, Université de Rennes 2, Rennes, France</td><td>1</td></tr><tr><td>IRISA, Université de Bretagne Sud, Vannes, France</td><td>1</td></tr><tr><td>Research & Development, British Broadcasting Corporation (BBC), London, UK</td><td>1</td></tr><tr><td>Faculty of Computer Science and Engineering, Xi’an University of Technology, Xi’an, China</td><td>1</td></tr><tr><td>Wide Eyes Technologies</td><td>1</td></tr><tr><td>School of Information Engineering, Jiangxi Manufacturing Technology College, Nanchang, China</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Nanjing Forestry University and Shandong University, Jinan, China</td><td>1</td></tr><tr><td>Department of Language Studies, Nanjing Forestry University, Nanjing, China</td><td>1</td></tr><tr><td>Department of Computer Science and Technology, Nanjing Forestry University, Nanjing, China</td><td>1</td></tr><tr><td>Dept. of Autom. Test & Control, Harbin Inst. of Technol., China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, Frederick University, Nicosia, Cyprus</td><td>1</td></tr><tr><td>The Maersk Mc-Kinney Moller Institute, University of Southern Denmark, Odense M, Denmark</td><td>1</td></tr><tr><td>Department of Computer Science, Digital Image Processing Laboratory, Islamia College Peshawar, Peshawar, Pakistan</td><td>1</td></tr><tr><td>Department of Computer Science and Software Engineering, International Islamic University, Islamabad, Pakistan</td><td>1</td></tr><tr><td>Department of Computer Science, Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan</td><td>1</td></tr><tr><td>Technische Universität München / Imperial College London, Munich / London, England UK</td><td>1</td></tr><tr><td>Department of Mathematics and Informatics, Ecole Centrale de Lyon, Lyon, 69134, France</td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, Jamia Hamdard University, New Delhi, India</td><td>1</td></tr><tr><td>Department of Computer Science & Engineering, University of Minnesota-Twin Cities, Minneapolis, USA</td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Informatics, Budapest University of Technology and Economics, Budapest, Hungary</td><td>1</td></tr><tr><td>School of Information Science and Technology, Northwest University, Xi’an, China</td><td>1</td></tr><tr><td>Radboud University, Donders Institute for Brain, Cognition and Behaviour, Nijmegen, The Netherlands</td><td>1</td></tr><tr><td>School of Software, Beijing Institute of Technology, Beijing, China</td><td>1</td></tr><tr><td>University of St. Andrews, UK</td><td>1</td></tr><tr><td>University of Tunis El Manar, Tunis, Tunisia</td><td>1</td></tr><tr><td>College of Information and Control Engineering, China University of Petroleum, Qingdao, China</td><td>1</td></tr><tr><td>Intel Labs Europe, Pipers Way, Swindon</td><td>1</td></tr><tr><td>Department of Computer Systems, Universidad Politécnica de Madrid, Madrid, Spain</td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, Ursinus College, Collegeville, PA</td><td>1</td></tr><tr><td>Systems Engineering Institute, Xi’an Jiaotong University, Xi’an 710049, China</td><td>1</td></tr><tr><td>Institute of Semiconductors, Chinese Academy of Sciences&University of Chinese Academy of Sciences, Beijing, China</td><td>1</td></tr><tr><td>School of Computer Science and Technology, Nanjing University of Science and Technology of China, Nanjing, People’s Republic of China</td><td>1</td></tr><tr><td>NTT Network Innovation Laboratories, Nippon Telegraph and Telephone Corp.</td><td>1</td></tr><tr><td>Faculty of Computing and Information Technology, Setapak, Malaysia</td><td>1</td></tr><tr><td>Computer Science Department, University of California, Los Angeles, CA, USA</td><td>1</td></tr><tr><td>INRIA, Sophia Antipolis, France</td><td>1</td></tr><tr><td>School of Mathematics and Computer Sciences, Gannan Normal University, Ganzhou, China</td><td>1</td></tr><tr><td>University of Maribor, Faculty of Electrical Engineering and Computer Science, Koroška cesta 46, SI-2000, Slovenia</td><td>1</td></tr><tr><td>University of Tours, France</td><td>1</td></tr><tr><td>Department of Information Management, Hwa Hsia University of Technology, New Taipei City, Taiwan</td><td>1</td></tr><tr><td>Department of Electronic Engineering, National Ilan University, Yilan City, Taiwan</td><td>1</td></tr><tr><td>Faculty of Electrical Engineering and Computer Science, Ningbo University, Ningbo, China</td><td>1</td></tr><tr><td>College of Information and Electrical Engineering, Ludong University, Yantai, China</td><td>1</td></tr><tr><td>Wakayama University</td><td>1</td></tr><tr><td>Computer Science College, Xi’an Polytechnic University, Xi’an, China</td><td>1</td></tr><tr><td>Computer Science Dept., SUNY Stony Brook, USA</td><td>1</td></tr><tr><td>School of Mathematical and Physical Sciences at the University of Newcastle, Callaghan, NSW 2308, Australia</td><td>1</td></tr><tr><td>Department of Electronics and Communication Engineering, JNTU College of Engineering, Hyderabad, India</td><td>1</td></tr><tr><td>Department of Physics, JNTU College of Engineering, Kakinada, India</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, JNTU College of Engineering, Kakinada, India</td><td>1</td></tr><tr><td>Department of Telecommunications and Information Processing, Image Processing and Interpretation, UGent/iMinds, Ghent, Belgium</td><td>1</td></tr><tr><td>School of Software, Shenyang University of Technology, Shenyang, China</td><td>1</td></tr><tr><td>School of Engineering of UABC, University of Baja California, Tijuana, Mexico</td><td>1</td></tr><tr><td>University of Hawaii at Hilo, HI, USA</td><td>1</td></tr><tr><td>Yuncheng University, Shanxi Province, China</td><td>1</td></tr><tr><td>Department of Computer Engineering, Bah&#x00E7;e&#x015F;ehir University, Istanbul, Turkey</td><td>1</td></tr><tr><td>Sichuan University West China Hospital of Stomatology, Chengdu, China</td><td>1</td></tr><tr><td>School of Software Engineering, Chengdu University of Information Technology, Chengdu, China</td><td>1</td></tr><tr><td>Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, People’s Republic of China</td><td>1</td></tr><tr><td>School of Electronic and Information Engineering, Ningbo University of Technology, Ningbo, China</td><td>1</td></tr><tr><td>Department of Computer Engineering, Bogaziçi University, Bebek, Turkey</td><td>1</td></tr><tr><td>Department of Electrical and Electronic Engineering, Auckland University of Technology , Auckland, New Zealand</td><td>1</td></tr><tr><td>Department of Computer Engineering, Qazvin Islamic Azad University , Qazvin, Iran</td><td>1</td></tr><tr><td>Shanghai University of Finance and Economics, Shanghai, China</td><td>1</td></tr><tr><td>Graduate School of Engineering, Nagasaki University, Nagasaki, Japan</td><td>1</td></tr><tr><td>Institute of Management and Information Technologies, Chiba University, Chiba, Japan</td><td>1</td></tr><tr><td>Graduate School of Advanced Integration Science, Chiba University, Chiba, Japan</td><td>1</td></tr><tr><td>Vision Semantics Ltd</td><td>1</td></tr><tr><td>Department of Film and Digital Media, Seokyeong University, Seoul, Republic of Korea</td><td>1</td></tr><tr><td>Department of MediaSoftware, Sungkyul University, Anyang-si, Republic of Korea</td><td>1</td></tr><tr><td>Pusan National University, Busan, Korea</td><td>1</td></tr><tr><td>School of Engineering, Swiss Federal Institute of Technology Lausanne (EPFL), Lausanne, Switzerland</td><td>1</td></tr><tr><td>Department of Computer Science, Auckland University of Technology, Auckland, New Zealand</td><td>1</td></tr><tr><td>L3S Research Center, Leibniz Universität Hannover, Hannover, Germany</td><td>1</td></tr><tr><td>German National Library of Science and Technology (TIB), Hannover, Germany</td><td>1</td></tr><tr><td>taglicht media Film- & Fernsehproduktion GmbH, Köln, Germany</td><td>1</td></tr><tr><td>Department of Mathematics and Computer Science, University of Marburg, Marburg, Germany</td><td>1</td></tr><tr><td>School of Mathematics and Computational Science, Anqing Normal University, Anqing, People’s Republic of China</td><td>1</td></tr><tr><td>Concordia Institute for Information Systems Engineering Concordia University, Montreal, Canada</td><td>1</td></tr><tr><td>IKERBASQUE, Basque Foundation for Science, Bilbao, Spain</td><td>1</td></tr><tr><td>University of the Basque Country UPV/EHU, San Sebastian, Spain</td><td>1</td></tr><tr><td>Computer Vision Center, Edifici “O”, Campus UAB, Bellaterra, Spain</td><td>1</td></tr><tr><td>Graduate School of Biomedical Sciences, Nagasaki University, Nagasaki City, Japan</td><td>1</td></tr><tr><td>Xiamen University of Technology, Fujian, China</td><td>1</td></tr><tr><td>School of Computer Science and Information Engineering, Shanghai Institute of Technology, Shanghai, China</td><td>1</td></tr><tr><td>Dept. of Artificial Intelligence, Faculty of Computer Engineering, University of Isfahan, Iran</td><td>1</td></tr><tr><td>Department of Information Processing Interdisciplinary Graduate School of Science and Engineering, Tokyo Institute of Technology Yokohama 226-8503, Japan</td><td>1</td></tr><tr><td>Department of Computer Science, University of Texas, San Antonio, TX, USA</td><td>1</td></tr><tr><td>University of Sheffield, Sheffield, United Kingdom</td><td>1</td></tr><tr><td>Insititute of Automation, Chinese Academy of Sciences (CAS), Beijing, China</td><td>1</td></tr><tr><td>School of Computing and Information Systems, University of Melbourne, Melbourne, Australia</td><td>1</td></tr><tr><td>Sapienza Università di Roma, Roma, Italy</td><td>1</td></tr><tr><td>Center for Unified Biometrics and Sensors, University at Buffalo, NY, USA. tulyakov@cedar.buffalo.edu</td><td>1</td></tr><tr><td>School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China</td><td>1</td></tr><tr><td>College of Control Engineering, Northeastern University at Qinhuangdao, Qinhuangdao, China</td><td>1</td></tr><tr><td>LAMIA, EA 4540, University of French West Indies &amp; Guyana</td><td>1</td></tr><tr><td>Peking University & Shanghai Jaio Tong University, Beijing, China</td><td>1</td></tr><tr><td>School of Information Technology, Madurai Kamarai University, Madurai, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, Sanjivani College of Engineering, Kopargaon, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, St.Peter’s University, Chennai, India</td><td>1</td></tr><tr><td>Computer Science and Engineering, Panimalar Engineering College, Chennai, India</td><td>1</td></tr><tr><td>Department of Computer Science, IT-Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal</td><td>1</td></tr><tr><td>ITI Department Telecom Bretagne, Brest, France</td><td>1</td></tr><tr><td>Adobe Systems Incorporated, San Jose, CA, 95110</td><td>1</td></tr><tr><td>Department of Electrical Engineering and Information Technology, TU Darmstadt, D-64283, Germany</td><td>1</td></tr><tr><td>Institute of Neural Information Processing, University of Ulm, Ulm, Germany</td><td>1</td></tr><tr><td>Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University Magdeburg, Magdeburg, Germany</td><td>1</td></tr><tr><td>Faculty of Biomedical Engineering, Amirkabir University of Technology (Tehran Polytechnic), Tehran, Iran</td><td>1</td></tr><tr><td>Defence Science and Technology Organisation (DSTO), Edinburgh, Australia</td><td>1</td></tr><tr><td>Reallusion Corporation</td><td>1</td></tr><tr><td>National Institute of Advanced Industrial Science and Technology (AIST), Koto, Tokyo, Japan</td><td>1</td></tr><tr><td>Computer Application Research Center, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China</td><td>1</td></tr><tr><td>Dept. of EE, Univ. at Buffalo, SUNY, USA</td><td>1</td></tr><tr><td>Department of Computer Science, Minjiang University, Fuzhou, People’s Republic of China</td><td>1</td></tr><tr><td>Institute of High Performance Computing and Networking, National Research Council of Italy (ICAR-CNR), Naples, Italy</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/first_pages.html b/scraper/reports/first_pages.html
new file mode 100644
index 00000000..1fa50094
--- /dev/null
+++ b/scraper/reports/first_pages.html
@@ -0,0 +1,48171 @@
+<!doctype html><html><head><title>First pages</title><link rel='stylesheet' href='reports.css'></head><body><h2>First pages</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>611961abc4dfc02b67edd8124abb08c449f5280a</td><td>Exploiting Image-trained CNN Architectures
+<br/>for Unconstrained Video Classification
+<br/><b>Northwestern University</b><br/>Evanston IL USA
+<br/>Raytheon BBN Technologies
+<br/>Cambridge, MA USA
+<br/><b>University of Toronto</b></td><td>('2815926', 'Shengxin Zha', 'shengxin zha')<br/>('1689313', 'Florian Luisier', 'florian luisier')<br/>('2996926', 'Walter Andrews', 'walter andrews')<br/>('2897313', 'Nitish Srivastava', 'nitish srivastava')<br/>('1776908', 'Ruslan Salakhutdinov', 'ruslan salakhutdinov')</td><td>szha@u.northwestern.edu
+<br/>{fluisier,wandrews}@bbn.com
+<br/>{nitish,rsalakhu}@cs.toronto.edu
+</td></tr><tr><td>610a4451423ad7f82916c736cd8adb86a5a64c59</td><td> Volume 4, Issue 11, November 2014 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>A Survey on Search Based Face Annotation Using Weakly
+<br/>Labelled Facial Images
+<br/>Department of Computer Engg, DYPIET Pimpri,
+<br/><b>Savitri Bai Phule Pune University, Maharashtra India</b></td><td>('15731441', 'Shital A. Shinde', 'shital a. shinde')<br/>('3392505', 'Archana Chaugule', 'archana chaugule')</td><td></td></tr><tr><td>6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2</td><td>Complex Bingham Distribution for Facial
+<br/>Feature Detection
+<br/>Eslam Mostafa1,2 and Aly Farag1
+<br/><b>CVIP Lab, University of Louisville, Louisville, KY, USA</b><br/><b>Alexandria University, Alexandria, Egypt</b></td><td></td><td>{eslam.mostafa,aly.farag}@louisville.edu
+</td></tr><tr><td>61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8</td><td>Greedy Feature Selection for Subspace Clustering
+<br/>Greedy Feature Selection for Subspace Clustering
+<br/>Department of Electrical & Computer Engineering
+<br/><b>Rice University, Houston, TX, 77005, USA</b><br/>Department of Electrical & Computer Engineering
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, 15213, USA</b><br/>Department of Electrical & Computer Engineering
+<br/><b>Rice University, Houston, TX, 77005, USA</b><br/>Editor:
+</td><td>('1746363', 'Eva L. Dyer', 'eva l. dyer')<br/>('1745861', 'Aswin C. Sankaranarayanan', 'aswin c. sankaranarayanan')<br/>('1746260', 'Richard G. Baraniuk', 'richard g. baraniuk')</td><td>e.dyer@rice.edu
+<br/>saswin@ece.cmu.edu
+<br/>richb@rice.edu
+</td></tr><tr><td>61084a25ebe736e8f6d7a6e53b2c20d9723c4608</td><td></td><td></td><td></td></tr><tr><td>61542874efb0b4c125389793d8131f9f99995671</td><td>Fair comparison of skin detection approaches on publicly available datasets
+<br/>a. DISI, Università di Bologna, Via Sacchi 3, 47521 Cesena, Italy.
+<br/><b>b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy</b></td><td>('1707759', 'Alessandra Lumini', 'alessandra lumini')<br/>('1804258', 'Loris Nanni', 'loris nanni')</td><td></td></tr><tr><td>61f93ed515b3bfac822deed348d9e21d5dffe373</td><td>Deep Image Set Hashing
+<br/><b>Columbia University</b><br/><b>Columbia University</b></td><td>('1710567', 'Jie Feng', 'jie feng')<br/>('2602265', 'Svebor Karaman', 'svebor karaman')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>jiefeng@cs.columbia.edu
+<br/>svebor.karaman@columbia.edu, sfchang@ee.columbia.edu
+</td></tr><tr><td>6180bc0816b1776ca4b32ced8ea45c3c9ce56b47</td><td>Fast Randomized Algorithms for Convex Optimization and
+<br/>Statistical Estimation
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2016-147
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-147.html
+<br/>August 14, 2016
+</td><td>('3173667', 'Mert Pilanci', 'mert pilanci')</td><td></td></tr><tr><td>61f04606528ecf4a42b49e8ac2add2e9f92c0def</td><td>Deep Deformation Network for Object Landmark
+<br/>Localization
+<br/>NEC Laboratories America, Department of Media Analytics
+</td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('46468682', 'Feng Zhou', 'feng zhou')</td><td>{xiangyu,manu}@nec-labs.com, zhfe99@gmail.com
+</td></tr><tr><td>612075999e82596f3b42a80e6996712cc52880a3</td><td>CNNs with Cross-Correlation Matching for Face Recognition in Video
+<br/>Surveillance Using a Single Training Sample Per Person
+<br/><b>University of Texas at Arlington, TX, USA</b><br/>2École de technologie supérieure, Université du Québec, Montreal, Canada
+</td><td>('3046171', 'Mostafa Parchami', 'mostafa parchami')<br/>('2805645', 'Saman Bashbaghi', 'saman bashbaghi')<br/>('1697195', 'Eric Granger', 'eric granger')</td><td>mostafa.parchami@mavs.uta.edu, bashbaghi@livia.etsmtl.ca and eric.granger@etsmtl.ca
+</td></tr><tr><td>61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa</td><td>Merging Datasets Through Deep learning
+<br/>IBM Research
+<br/><b>Yeshiva University</b><br/>IBM Research
+</td><td>('35970154', 'Kavitha Srinivas', 'kavitha srinivas')<br/>('51428397', 'Abraham Gale', 'abraham gale')<br/>('2828094', 'Julian Dolby', 'julian dolby')</td><td></td></tr><tr><td>61e9e180d3d1d8b09f1cc59bdd9f98c497707eff</td><td>Semi-supervised learning of
+<br/>facial attributes in video
+<br/>1INRIA, WILLOW, Laboratoire d’Informatique de l’Ecole Normale Sup´erieure,
+<br/>ENS/INRIA/CNRS UMR 8548
+<br/><b>University of Oxford</b></td><td>('1877079', 'Neva Cherniavsky', 'neva cherniavsky')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('1782755', 'Josef Sivic', 'josef sivic')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>6193c833ad25ac27abbde1a31c1cabe56ce1515b</td><td>Trojaning Attack on Neural Networks
+<br/><b>Purdue University, 2Nanjing University</b></td><td>('3347155', 'Yingqi Liu', 'yingqi liu')<br/>('2026855', 'Shiqing Ma', 'shiqing ma')<br/>('3216258', 'Yousra Aafer', 'yousra aafer')<br/>('2547748', 'Wen-Chuan Lee', 'wen-chuan lee')<br/>('3293342', 'Juan Zhai', 'juan zhai')<br/>('3155328', 'Weihang Wang', 'weihang wang')<br/>('1771551', 'Xiangyu Zhang', 'xiangyu zhang')</td><td>liu1751@purdue.edu, ma229@purdue.edu, yaafer@purdue.edu, lee1938@purdue.edu, zhaijuan@nju.edu.cn,
+<br/>wang1315@cs.purdue.edu, xyzhang@cs.purdue.edu
+</td></tr><tr><td>614a7c42aae8946c7ad4c36b53290860f6256441</td><td>1
+<br/>Joint Face Detection and Alignment using
+<br/>Multi-task Cascaded Convolutional Networks
+</td><td>('3393556', 'Kaipeng Zhang', 'kaipeng zhang')<br/>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('32787758', 'Zhifeng Li', 'zhifeng li')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>614079f1a0d0938f9c30a1585f617fa278816d53</td><td>Automatic Detection of ADHD and ASD from Expressive Behaviour in
+<br/>RGBD Data
+<br/><b>School of Computer Science, The University of Nottingham</b><br/>2Nottingham City Asperger Service & ADHD Clinic
+<br/><b>Institute of Mental Health, The University of Nottingham</b></td><td>('2736086', 'Shashank Jaiswal', 'shashank jaiswal')<br/>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('38690723', 'Alinda Gillott', 'alinda gillott')<br/>('2491166', 'David Daley', 'david daley')</td><td></td></tr><tr><td>0d746111135c2e7f91443869003d05cde3044beb</td><td>PARTIAL FACE DETECTION FOR CONTINUOUS AUTHENTICATION
+<br/>(cid:63)Department of Electrical and Computer Engineering and the Center for Automation Research,
+<br/><b>Rutgers, The State University of New Jersey, 723 CoRE, 94 Brett Rd, Piscataway, NJ</b><br/><b>UMIACS, University of Maryland, College Park, MD</b><br/>§Google Inc., 1600 Amphitheatre Parkway, Mountain View, CA 94043
+</td><td>('3152615', 'Upal Mahbub', 'upal mahbub')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('2406413', 'Brandon Barbello', 'brandon barbello')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>umahbub@umiacs.umd.edu, vishal.m.patel@rutgers.edu,
+<br/>dchandra@google.com, bbarbello@google.com, rama@umiacs.umd.edu
+</td></tr><tr><td>0da75b0d341c8f945fae1da6c77b6ec345f47f2a</td><td>121
+<br/>The Effect of Computer-Generated Descriptions on Photo-
+<br/>Sharing Experiences of People With Visual Impairments
+<br/><b>YUHANG ZHAO, Information Science, Cornell Tech, Cornell University</b><br/>SHAOMEI WU, Facebook Inc.
+<br/>LINDSAY REYNOLDS, Facebook Inc.
+<br/><b>SHIRI AZENKOT, Information Science, Cornell Tech, Cornell University</b><br/>Like sighted people, visually impaired people want to share photographs on social networking services, but
+<br/>find it difficult to identify and select photos from their albums. We aimed to address this problem by
+<br/>incorporating state-of-the-art computer-generated descriptions into Facebook’s photo-sharing feature. We
+<br/>interviewed 12 visually impaired participants to understand their photo-sharing experiences and designed
+<br/>a photo description feature for the Facebook mobile application. We evaluated this feature with six
+<br/>participants in a seven-day diary study. We found that participants used the descriptions to recall and
+<br/>organize their photos, but they hesitated to upload photos without a sighted person’s input. In addition to
+<br/>basic information about photo content, participants wanted to know more details about salient objects and
+<br/>people, and whether the photos reflected their personal aesthetic. We discuss these findings from the lens
+<br/>of self-disclosure and self-presentation theories and propose new computer vision research directions that
+<br/>will better support visual content sharing by visually impaired people.
+<br/>CCS Concepts: • Information interfaces and presentations → Multimedia and information systems; •
+<br/>Computer and society → Social issues
+<br/>impairments; computer-generated descriptions; SNSs; photo sharing; self-disclosure; self-
+<br/>KEYWORDS
+<br/>Visual
+<br/>presentation
+<br/>ACM Reference format:
+<br/>2017. The Effect of Computer-Generated Descriptions On Photo-Sharing Experiences of People With Visual
+<br/>Impairments. Proc. ACM Hum.-Comput. Interact. 1, 1. 121 (January 2017), 24 pages.
+<br/>DOI: 10.1145/3134756
+<br/>1 INTRODUCTION
+<br/>Sharing memories and experiences via photos is a common way to engage with others on social
+<br/>networking services (SNSs) [39,46,51]. For instance, Facebook users uploaded more than 350
+<br/>million photos a day [24] and Twitter, which initially supported only text in tweets, now has
+<br/>more than 28.4% of tweets containing images [39]. Visually impaired people (both blind and low
+<br/>vision) have a strong presence on SNS and are interested in sharing photos [50]. They take
+<br/>photos for the same reasons that sighted people do: sharing daily moments with their sighted
+<br/>
+<br/>Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee
+<br/>provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and
+<br/>the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored.
+</td><td></td><td></td></tr><tr><td>0d88ab0250748410a1bc990b67ab2efb370ade5d</td><td>Author(s) :
+<br/>ERROR HANDLING IN MULTIMODAL BIOMETRIC SYSTEMS USING
+<br/>RELIABILITY MEASURES (ThuPmOR6)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>Plamen Prodanov
+</td><td>('1753932', 'Krzysztof Kryszczuk', 'krzysztof kryszczuk')<br/>('1994765', 'Jonas Richiardi', 'jonas richiardi')<br/>('2439888', 'Andrzej Drygajlo', 'andrzej drygajlo')</td><td></td></tr><tr><td>0db43ed25d63d801ce745fe04ca3e8b363bf3147</td><td>Kernel Principal Component Analysis and its Applications in
+<br/>Face Recognition and Active Shape Models
+<br/><b>Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, NY 12180 USA</b></td><td>('4019552', 'Quan Wang', 'quan wang')</td><td>wangq10@rpi.edu
+</td></tr><tr><td>0daf696253a1b42d2c9d23f1008b32c65a9e4c1e</td><td>Unsupervised Discovery of Facial Events
+<br/>CMU-RI-TR-10-10
+<br/>May 2010
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania 15213
+<br/><b>c(cid:13) Carnegie Mellon University</b></td><td>('1757386', 'Feng Zhou', 'feng zhou')</td><td></td></tr><tr><td>0d538084f664b4b7c0e11899d08da31aead87c32</td><td>Deformable Part Descriptors for
+<br/>Fine-grained Recognition and Attribute Prediction
+<br/>Forrest Iandola1
+<br/><b>ICSI / UC Berkeley 2Brigham Young University</b></td><td>('40565777', 'Ning Zhang', 'ning zhang')<br/>('2071606', 'Ryan Farrell', 'ryan farrell')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td>1{nzhang,forresti,trevor}@eecs.berkeley.edu
+<br/>2farrell@cs.byu.edu
+</td></tr><tr><td>0dccc881cb9b474186a01fd60eb3a3e061fa6546</td><td>Effective Face Frontalization in Unconstrained Images
+<br/><b>The open University of Israel. 2Adience</b><br/>Figure 1: Frontalized faces. Top: Input photos; bottom: our frontalizations,
+<br/>obtained without estimating 3D facial shapes.
+<br/>“Frontalization” is the process of synthesizing frontal facing views of faces
+<br/>appearing in single unconstrained photos. Recent reports have suggested
+<br/>that this process may substantially boost the performance of face recogni-
+<br/>tion systems. This, by transforming the challenging problem of recognizing
+<br/>faces viewed from unconstrained viewpoints to the easier problem of rec-
+<br/>ognizing faces in constrained, forward facing poses. Previous frontalization
+<br/>methods did this by attempting to approximate 3D facial shapes for each
+<br/>query image. We observe that 3D face shape estimation from unconstrained
+<br/>photos may be a harder problem than frontalization and can potentially in-
+<br/>troduce facial misalignments. Instead, we explore the simpler approach of
+<br/>using a single, unmodified, 3D surface as an approximation to the shape of
+<br/>all input faces. We show that this leads to a straightforward, efficient and
+<br/>easy to implement method for frontalization. More importantly, it produces
+<br/>aesthetic new frontal views and is surprisingly effective when used for face
+<br/>recognition and gender estimation.
+<br/>Observation 1: For frontalization, one rough estimate of the 3D facial shape
+<br/>seems as good as another, demonstrated by the following example:
+<br/>Figure 2: Frontalization process. (a) facial features detected on a query
+<br/>face and on a reference face (b) which was produced by rendering a tex-
+<br/>tured 3D, CG model (c); (d) 2D query coordinates and corresponding 3D
+<br/>coordinates on the model provide an estimated projection matrix, used to
+<br/>back-project query texture to the reference coordinate system; (e) estimated
+<br/>self-occlusions shown overlaid on the frontalized result (warmer colors re-
+<br/>flect more occlusions.) Facial appearances in these regions are borrowed
+<br/>from corresponding symmetric face regions; (f) our final frontalized result.
+<br/>The top row shows surfaces estimated for the same query (left) by Hass-
+<br/>ner [2] (mid) and DeepFaces [6] (right). Frontalizations are shown at the
+<br/>bottom using our single-3D approach (left), Hassner (mid) and DeepFaces
+<br/>(right). Clearly, both surfaces are rough approximations to the facial shape.
+<br/>Moreover, despite the different surfaces, all results seem qualitatively simi-
+<br/>lar, calling to question the need for shape estimation for frontalization.
+<br/>Result 1: A novel frontalization method using a single, unmodified 3D ref-
+<br/>erence shape is described in the paper (illustrated in Fig. 2).
+<br/>Observation 2: A single, unmodified 3D reference shape produces aggres-
+<br/>sively aligned faces, as can be observed in Fig. 3.
+<br/>Result 2: Frontalized, strongly aligned faces elevate LFW [5] verification
+<br/>accuracy and gender estimation rates on the Adience benchmark [1].
+<br/>Conclusion: On the role of 2D appearance vs. 3D shape in face recognition,
+<br/>our results suggest that 3D shape estimation may be unnecessary.
+</td><td>('1756099', 'Tal Hassner', 'tal hassner')<br/>('35840854', 'Shai Harel', 'shai harel')<br/>('1753918', 'Eran Paz', 'eran paz')<br/>('1792038', 'Roee Enbar', 'roee enbar')</td><td></td></tr><tr><td>0d467adaf936b112f570970c5210bdb3c626a717</td><td></td><td></td><td></td></tr><tr><td>0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306</td><td>Review of Perceptual Resemblance of Local
+<br/>Plastic Surgery Facial Images using Near Sets
+<br/>1,2 Department of Computer Technology,
+<br/>YCCE Nagpur, India
+</td><td>('9083090', 'Prachi V. Wagde', 'prachi v. wagde')<br/>('9218400', 'Roshni Khedgaonkar', 'roshni khedgaonkar')</td><td></td></tr><tr><td>0de91641f37b0a81a892e4c914b46d05d33fd36e</td><td>RAPS: Robust and Efficient Automatic Construction of Person-Specific
+<br/>Deformable Models
+<br/>∗Department of Computing,
+<br/><b>Imperial College London</b><br/>180 Queens Gate,
+<br/>†EEMCS,
+<br/><b>University of Twente</b><br/>Drienerlolaan 5,
+<br/>London SW7 2AZ, U.K.
+<br/>7522 NB Enschede, The Netherlands
+</td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{c.sagonas, i.panagakis, s.zafeiriou, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>Appearance-Based Gaze Estimation in the Wild
+<br/>1Perceptual User Interfaces Group, 2Scalable Learning and Perception Group
+<br/><b>Max Planck Institute for Informatics, Saarbr ucken, Germany</b></td><td>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1751242', 'Yusuke Sugano', 'yusuke sugano')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>{xczhang,sugano,mfritz,bulling}@mpi-inf.mpg.de
+</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>A DATA-DRIVEN APPROACH TO CLEANING LARGE FACE DATASETS
+<br/><b>Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore</b></td><td>('1702224', 'Stefan Winkler', 'stefan winkler')</td><td></td></tr><tr><td>0db8e6eb861ed9a70305c1839eaef34f2c85bbaf</td><td></td><td></td><td></td></tr><tr><td>0d0b880e2b531c45ee8227166a489bf35a528cb9</td><td>Structure Preserving Object Tracking
+<br/><b>Computer Vision Lab, Delft University of Technology</b><br/>Mekelweg 4, 2628 CD Delft, The Netherlands
+</td><td>('2883723', 'Lu Zhang', 'lu zhang')<br/>('1803520', 'Laurens van der Maaten', 'laurens van der maaten')</td><td>{lu.zhang, l.j.p.vandermaaten}@tudelft.nl
+</td></tr><tr><td>0d3882b22da23497e5de8b7750b71f3a4b0aac6b</td><td>Research Article
+<br/>Context Is Routinely Encoded
+<br/>During Emotion Perception
+<br/>21(4) 595 –599
+<br/>© The Author(s) 2010
+<br/>Reprints and permission:
+<br/>sagepub.com/journalsPermissions.nav
+<br/>DOI: 10.1177/0956797610363547
+<br/>http://pss.sagepub.com
+<br/><b>Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos</b><br/>Center for Biomedical Imaging, Massachusetts General Hospital, Harvard Medical School
+</td><td>('1731779', 'Lisa Feldman Barrett', 'lisa feldman barrett')</td><td></td></tr><tr><td>0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9</td><td></td><td></td><td></td></tr><tr><td>0d760e7d762fa449737ad51431f3ff938d6803fe</td><td>LCDet: Low-Complexity Fully-Convolutional Neural Networks for
+<br/>Object Detection in Embedded Systems
+<br/>UC San Diego ∗
+<br/>Gokce Dane
+<br/>Qualcomm Inc.
+<br/>UC San Diego
+<br/>Qualcomm Inc.
+<br/>UC San Diego
+</td><td>('2906509', 'Subarna Tripathi', 'subarna tripathi')<br/>('1801046', 'Byeongkeun Kang', 'byeongkeun kang')<br/>('3484765', 'Vasudev Bhaskaran', 'vasudev bhaskaran')<br/>('30518518', 'Truong Nguyen', 'truong nguyen')</td><td>stripathi@ucsd.edu
+<br/>gokced@qti.qualcomm.com
+<br/>bkkang@ucsd.edu
+<br/>vasudevb@qti.qualcomm.com
+<br/>tqn001@eng.ucsd.edu
+</td></tr><tr><td>0d3068b352c3733c9e1cc75e449bf7df1f7b10a4</td><td>Context based Facial Expression Analysis in the
+<br/>Wild
+<br/><b>School of Computer Science, CECS, Australian National University, Australia</b><br/>http://users.cecs.anu.edu.au/∼adhall
+</td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')</td><td>abhinav.dhall@anu.edu.au
+</td></tr><tr><td>0dd72887465046b0f8fc655793c6eaaac9c03a3d</td><td>Real-time Head Orientation from a Monocular
+<br/>Camera using Deep Neural Network
+<br/>KAIST, Republic of Korea
+</td><td>('3250619', 'Byungtae Ahn', 'byungtae ahn')<br/>('2870153', 'Jaesik Park', 'jaesik park')</td><td>[btahn,jspark]@rcv.kaist.ac.kr, iskweon77@kaist.ac.kr
+</td></tr><tr><td>0d087aaa6e2753099789cd9943495fbbd08437c0</td><td></td><td></td><td></td></tr><tr><td>0d8415a56660d3969449e77095be46ef0254a448</td><td></td><td></td><td></td></tr><tr><td>0dfa460a35f7cab4705726b6367557b9f7842c65</td><td>Modeling Spatial-Temporal Clues in a Hybrid Deep
+<br/>Learning Framework for Video Classification
+<br/>School of Computer Science, Shanghai Key Lab of Intelligent Information Processing,
+<br/><b>Fudan University, Shanghai, China</b></td><td>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('31825486', 'Xi Wang', 'xi wang')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1743864', 'Hao Ye', 'hao ye')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>{zxwu, xwang10, ygj, haoye10, xyxue}@fudan.edu.cn
+</td></tr><tr><td>0d14261e69a4ad4140ce17c1d1cea76af6546056</td><td>Adding Facial Actions into 3D Model Search to Analyse
+<br/>Behaviour in an Unconstrained Environment
+<br/><b>Imaging Science and Biomedical Engineering, The University of Manchester, UK</b></td><td>('1753123', 'Angela Caunce', 'angela caunce')</td><td></td></tr><tr><td>0dbacb4fd069462841ebb26e1454b4d147cd8e98</td><td>Recent Advances in Discriminant Non-negative
+<br/>Matrix Factorization
+<br/><b>Aristotle University of Thessaloniki</b><br/>Thessaloniki, Greece, 54124
+</td><td>('1793625', 'Symeon Nikitidis', 'symeon nikitidis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>Email: {nikitidis,tefas,pitas}@aiia.csd.auth.gr
+</td></tr><tr><td>0db36bf08140d53807595b6313201a7339470cfe</td><td>Moving Vistas: Exploiting Motion for Describing Scenes
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</b></td><td>('34711525', 'Nitesh Shroff', 'nitesh shroff')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{nshroff,pturaga,rama}@umiacs.umd.edu
+</td></tr><tr><td>0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad</td><td>EmotiW 2016: Video and Group-Level Emotion
+<br/>Recognition Challenges
+<br/>Roland Goecke
+<br/>David R. Cheriton School of
+<br/>Human-Centred Technology
+<br/>David R. Cheriton School of
+<br/>Computer Science
+<br/><b>University of Waterloo</b><br/>Canada
+<br/><b>University of Canberra</b><br/>Centre
+<br/>Australia
+<br/>Computer Science
+<br/><b>University of Waterloo</b><br/>Canada
+<br/>Tom Gedeon
+<br/>David R. Cheriton School of
+<br/>Information Human Centred
+<br/>Computer Science
+<br/><b>University of Waterloo</b><br/>Canada
+<br/><b>Australian National University</b><br/>Computing
+<br/>Australia
+</td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('2942991', 'Jyoti Joshi', 'jyoti joshi')<br/>('1773895', 'Jesse Hoey', 'jesse hoey')</td><td>abhinav.dhall@uwaterloo.ca
+<br/>roland.goecke@ieee.org
+<br/>jyoti.joshi@uwaterloo.ca
+<br/>jhoey@cs.uwaterloo.ca
+<br/>tom.gedeon@anu.edu.au
+</td></tr><tr><td>0d735e7552af0d1dcd856a8740401916e54b7eee</td><td></td><td></td><td></td></tr><tr><td>0d06b3a4132d8a2effed115a89617e0a702c957a</td><td></td><td></td><td></td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td><td></td><td></td></tr><tr><td>0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a</td><td>Detection and Tracking of Faces in Videos: A Review
+<br/>© 2016 IJEDR | Volume 4, Issue 2 | ISSN: 2321-9939
+<br/>of Related Work
+<br/>1Student, 2Assistant Professor
+<br/>1, 2Dept. of Electronics & Comm., S S I E T, Punjab, India
+<br/>________________________________________________________________________________________________________
+</td><td>('48816689', 'Seema Saini', 'seema saini')</td><td></td></tr><tr><td>0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2</td><td><b>University of Nebraska - Lincoln</b><br/>US Army Research
+<br/>2015
+<br/>U.S. Department of Defense
+<br/>Effects of emotional expressions on persuasion
+<br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/>Follow this and additional works at: http://digitalcommons.unl.edu/usarmyresearch
+<br/>Wang, Yuqiong; Lucas, Gale; Khooshabeh, Peter; de Melo, Celso; and Gratch, Jonathan, "Effects of emotional expressions on
+<br/>persuasion" (2015). US Army Research. Paper 340.
+<br/>http://digitalcommons.unl.edu/usarmyresearch/340
+</td><td>('2522587', 'Yuqiong Wang', 'yuqiong wang')<br/>('2419453', 'Gale Lucas', 'gale lucas')<br/>('2635945', 'Peter Khooshabeh', 'peter khooshabeh')<br/>('1977901', 'Celso de Melo', 'celso de melo')<br/>('1730824', 'Jonathan Gratch', 'jonathan gratch')</td><td>DigitalCommons@University of Nebraska - Lincoln
+<br/>University of Southern California, wangyuqiong@ymail.com
+<br/>This Article is brought to you for free and open access by the U.S. Department of Defense at DigitalCommons@University of Nebraska - Lincoln. It has
+<br/>been accepted for inclusion in US Army Research by an authorized administrator of DigitalCommons@University of Nebraska - Lincoln.
+</td></tr><tr><td>0da4c3d898ca2fff9e549d18f513f4898e960aca</td><td>Wang, Y., Thomas, J., Weissgerber, S. C., Kazemini, S., Ul-Haq, I., &
+<br/>Quadflieg, S. (2015). The Headscarf Effect Revisited: Further Evidence for a
+<br/>336. 10.1068/p7940
+<br/>Peer reviewed version
+<br/>Link to published version (if available):
+<br/>10.1068/p7940
+<br/>Link to publication record in Explore Bristol Research
+<br/>PDF-document
+<br/><b>University of Bristol - Explore Bristol Research</b><br/>General rights
+<br/>This document is made available in accordance with publisher policies. Please cite only the published
+<br/>version using the reference above. Full terms of use are available:
+<br/>http://www.bristol.ac.uk/pure/about/ebr-terms.html
+<br/>Take down policy
+<br/>Explore Bristol Research is a digital archive and the intention is that deposited content should not be
+<br/>removed. However, if you believe that this version of the work breaches copyright law please contact
+<br/>• Your contact details
+<br/><b>Bibliographic details for the item, including a URL</b><br/>• An outline of the nature of the complaint
+<br/>On receipt of your message the Open Access Team will immediately investigate your claim, make an
+<br/>initial judgement of the validity of the claim and, where appropriate, withdraw the item in question
+<br/>from public view.
+<br/> </td><td></td><td>open-access@bristol.ac.uk and include the following information in your message:
+</td></tr><tr><td>951368a1a8b3c5cd286726050b8bdf75a80f7c37</td><td>A Family of Online Boosting Algorithms
+<br/><b>University of California, San Diego</b><br/><b>University of California, Merced</b><br/><b>University of California, San Diego</b></td><td>('2490700', 'Boris Babenko', 'boris babenko')<br/>('37144787', 'Ming-Hsuan Yang', 'ming-hsuan yang')<br/>('1769406', 'Serge Belongie', 'serge belongie')</td><td>bbabenko@cs.ucsd.edu
+<br/>mhyang@ucmerced.edu
+<br/>sjb@cs.ucsd.edu
+</td></tr><tr><td>956e9b69b3366ed3e1670609b53ba4a7088b8b7e</td><td>Semi-supervised dimensionality reduction for image retrieval
+<br/><b>aIBM China Research Lab, Beijing, China</b><br/><b>bTsinghua University, Beijing, China</b></td><td></td><td></td></tr><tr><td>956317de62bd3024d4ea5a62effe8d6623a64e53</td><td>Lighting Analysis and Texture Modification of 3D Human
+<br/>Face Scans
+<br/>Author
+<br/>Zhang, Paul, Zhao, Sanqiang, Gao, Yongsheng
+<br/>Published
+<br/>2007
+<br/>Conference Title
+<br/>Digital Image Computing Techniques and Applications
+<br/>DOI
+<br/>https://doi.org/10.1109/DICTA.2007.4426825
+<br/>Copyright Statement
+<br/>© 2007 IEEE. Personal use of this material is permitted. However, permission to reprint/
+<br/>republish this material for advertising or promotional purposes or for creating new collective
+<br/>works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+<br/>this work in other works must be obtained from the IEEE.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/17889
+<br/>Link to published version
+<br/>http://www.ieee.org/
+<br/>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+</td><td></td><td></td></tr><tr><td>959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c</td><td>Temporal Coherency based Criteria for Predicting
+<br/>Video Frames using Deep Multi-stage Generative
+<br/>Adversarial Networks
+<br/>Visualization and Perception Laboratory
+<br/>Department of Computer Science and Engineering
+<br/><b>Indian Institute of Technology Madras, Chennai, India</b></td><td>('29901316', 'Prateep Bhattacharjee', 'prateep bhattacharjee')<br/>('1680398', 'Sukhendu Das', 'sukhendu das')</td><td>1prateepb@cse.iitm.ac.in, 2sdas@iitm.ac.in
+</td></tr><tr><td>951f21a5671a4cd14b1ef1728dfe305bda72366f</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Impact Factor (2012): 3.358
+<br/>Use of ℓ2/3-norm Sparse Representation for Facial
+<br/>Expression Recognition
+<br/><b>MATS University, MATS School of Engineering and Technology, Arang, Raipur, India</b><br/><b>MATS University, MATS School of Engineering and Technology, Arang, Raipur, India</b><br/>in
+<br/>three
+<br/>to discriminate
+<br/>it
+<br/>from
+<br/>represents emotion,
+</td><td></td><td></td></tr><tr><td>95f26d1c80217706c00b6b4b605a448032b93b75</td><td>New Robust Face Recognition Methods Based on Linear
+<br/>Regression
+<br/><b>Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network</b><br/>Oriented Intelligent Computation, Shenzhen, Guangdong Province, China
+</td><td>('2208128', 'Jian-Xun Mi', 'jian-xun mi')<br/>('2650895', 'Jin-Xing Liu', 'jin-xing liu')<br/>('40342210', 'Jiajun Wen', 'jiajun wen')</td><td></td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>Interactive Facial Feature Localization
+<br/><b>University of Illinois at Urbana Champaign, Urbana, IL 61801, USA</b><br/>2 Adobe Systems Inc., San Jose, CA 95110, USA
+<br/>3 Facebook Inc., Menlo Park, CA 94025, USA
+</td><td>('36474335', 'Vuong Le', 'vuong le')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>9547a7bce2b85ef159b2d7c1b73dea82827a449f</td><td>Facial Expression Recognition Using Gabor Motion Energy Filters
+<br/>Dept. Computer Science Engineering
+<br/>UC San Diego
+<br/>Marian S. Bartlett
+<br/><b>Institute for Neural Computation</b><br/>UC San Diego
+</td><td>('4072965', 'Tingfan Wu', 'tingfan wu')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td>tingfan@gmail.com
+<br/>{marni,movellan}@mplab.ucsd.edu
+</td></tr><tr><td>9513503867b29b10223f17c86e47034371b6eb4f</td><td>Comparison of optimisation algorithms for
+<br/>deformable template matching
+<br/><b>Link oping University, Computer Vision Laboratory</b><br/>ISY, SE-581 83 Link¨oping, SWEDEN
+</td><td>('1797883', 'Vasileios Zografos', 'vasileios zografos')</td><td>zografos@isy.liu.se ⋆
+</td></tr><tr><td>955e2a39f51c0b6f967199942d77625009e580f9</td><td>NAMING FACES ON THE WEB
+<br/>a thesis
+<br/>submitted to the department of computer engineering
+<br/><b>and the institute of engineering and science</b><br/><b>of bilkent university</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>master of science
+<br/>By
+<br/>July, 2010
+</td><td>('34946851', 'Hilal Zitouni', 'hilal zitouni')</td><td></td></tr><tr><td>956c634343e49319a5e3cba4f2bd2360bdcbc075</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+<br/>873
+<br/>A Novel Incremental Principal Component Analysis
+<br/>and Its Application for Face Recognition
+</td><td>('1776124', 'Haitao Zhao', 'haitao zhao')<br/>('1768574', 'Pong Chi Yuen', 'pong chi yuen')</td><td></td></tr><tr><td>95ea564bd983129ddb5535a6741e72bb1162c779</td><td>Multi-Task Learning by Deep Collaboration and
+<br/>Application in Facial Landmark Detection
+<br/><b>Laval University, Qu bec, Canada</b></td><td>('2758280', 'Ludovic Trottier', 'ludovic trottier')<br/>('2310695', 'Philippe Giguère', 'philippe giguère')<br/>('1700926', 'Brahim Chaib-draa', 'brahim chaib-draa')</td><td>ludovic.trottier.1@ulaval.ca
+<br/>{philippe.giguere,brahim.chaib-draa}@ift.ulaval.ca
+</td></tr><tr><td>958c599a6f01678513849637bec5dc5dba592394</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Generalized Zero-Shot Learning for Action
+<br/>Recognition with Web-Scale Video Data
+<br/>Received: date / Accepted: date
+</td><td>('2473509', 'Kun Liu', 'kun liu')<br/>('8984539', 'Wenbing Huang', 'wenbing huang')</td><td></td></tr><tr><td>950171acb24bb24a871ba0d02d580c09829de372</td><td>Speeding up 2D-Warping for Pose-Invariant Face Recognition
+<br/><b>Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany</b></td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>surname@cs.rwth-aachen.de
+</td></tr><tr><td>59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb</td><td>A Deep-Learning Approach to Facial Expression Recognition
+<br/>with Candid Images
+<br/><b>CUNY City College</b><br/>Alibaba. Inc
+<br/><b>IBM China Research Lab</b><br/><b>CUNY Graduate Center and City College</b></td><td>('40617554', 'Wei Li', 'wei li')<br/>('1713016', 'Min Li', 'min li')<br/>('1703625', 'Zhong Su', 'zhong su')<br/>('4697712', 'Zhigang Zhu', 'zhigang zhu')</td><td>lwei000@citymail.cuny.edu
+<br/>mushi.lm@alibaba.inc
+<br/>suzhong@cn.ibm.com
+<br/>zhu@cs.ccny.cuny.edu
+</td></tr><tr><td>59fc69b3bc4759eef1347161e1248e886702f8f7</td><td>Final Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td><td>('40456402', 'Haoyu Li', 'haoyu li')</td><td></td></tr><tr><td>591a737c158be7b131121d87d9d81b471c400dba</td><td>Affect Valence Inference From Facial Action Unit Spectrograms
+<br/>MIT Media Lab
+<br/>MA 02139, USA
+<br/>MIT Media Lab
+<br/>MA 02139, USA
+<br/><b>Harvard University</b><br/>MA 02138, USA
+<br/>Rosalind Picard
+<br/>MIT Media Lab
+<br/>MA 02139, USA
+</td><td>('1801452', 'Daniel McDuff', 'daniel mcduff')<br/>('1754451', 'Rana El Kaliouby', 'rana el kaliouby')<br/>('2010950', 'Karim Kassam', 'karim kassam')</td><td>djmcduff@mit.edu
+<br/>kaliouby@mit.edu
+<br/>kskassam@fas.harvard.edu
+<br/>picard@mit.edu
+</td></tr><tr><td>59bfeac0635d3f1f4891106ae0262b81841b06e4</td><td>Face Verification Using the LARK Face
+<br/>Representation
+</td><td>('3326805', 'Hae Jong Seo', 'hae jong seo')<br/>('1718280', 'Peyman Milanfar', 'peyman milanfar')</td><td></td></tr><tr><td>59efb1ac77c59abc8613830787d767100387c680</td><td>DIF : Dataset of Intoxicated Faces for Drunk Person
+<br/>Identification
+<br/><b>Indian Institute of Technology Ropar</b><br/><b>Indian Institute of Technology Ropar</b></td><td>('46241736', 'Devendra Pratap Yadav', 'devendra pratap yadav')<br/>('1735697', 'Abhinav Dhall', 'abhinav dhall')</td><td>2014csb1010@iitrpr.ac.in
+<br/>abhinav@iitrpr.ac.in
+</td></tr><tr><td>590628a9584e500f3e7f349ba7e2046c8c273fcf</td><td></td><td></td><td></td></tr><tr><td>593234ba1d2e16a887207bf65d6b55bbc7ea2247</td><td>Combining Language Sources and Robust
+<br/>Semantic Relatedness for Attribute-Based
+<br/>Knowledge Transfer
+<br/>1 Department of Computer Science, TU Darmstadt
+<br/><b>Max Planck Institute for Informatics, Saarbr ucken, Germany</b></td><td>('34849128', 'Marcus Rohrbach', 'marcus rohrbach')<br/>('37718254', 'Michael Stark', 'michael stark')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>59eefa01c067a33a0b9bad31c882e2710748ea24</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+<br/>Fast Landmark Localization
+<br/>with 3D Component Reconstruction and CNN for
+<br/>Cross-Pose Recognition
+</td><td>('24020847', 'Hung-Cheng Shie', 'hung-cheng shie')<br/>('9640380', 'Cheng-Hua Hsieh', 'cheng-hua hsieh')</td><td></td></tr><tr><td>59e2037f5079794cb9128c7f0900a568ced14c2a</td><td>Clothing and People - A Social Signal Processing Perspective
+<br/><b>Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain</b><br/>2 Computer Vision Center, Barcelona, Spain
+<br/><b>University of Verona, Verona, Italy</b></td><td>('2084534', 'Maedeh Aghaei', 'maedeh aghaei')<br/>('10724083', 'Federico Parezzan', 'federico parezzan')<br/>('2837527', 'Mariella Dimiccoli', 'mariella dimiccoli')<br/>('1724155', 'Petia Radeva', 'petia radeva')<br/>('1723008', 'Marco Cristani', 'marco cristani')</td><td></td></tr><tr><td>59dac8b460a89e03fa616749a08e6149708dcc3a</td><td>A Convergent Solution to Matrix Bidirectional Projection Based Feature
+<br/>Extraction with Application to Face Recognition ∗
+<br/><b>School of Computer, National University of Defense Technology</b><br/>No 137, Yanwachi Street, Kaifu District,
+<br/>Changsha, Hunan Province, 410073, P.R. China
+</td><td>('3144121', 'Yubin Zhan', 'yubin zhan')<br/>('1969736', 'Jianping Yin', 'jianping yin')<br/>('33793976', 'Xinwang Liu', 'xinwang liu')</td><td>E-mail: {YubinZhan,JPYin,XWLiu}@nudt.edu.cn
+</td></tr><tr><td>59e9934720baf3c5df3a0e1e988202856e1f83ce</td><td>UA-DETRAC: A New Benchmark and Protocol for
+<br/>Multi-Object Detection and Tracking
+<br/><b>University at Albany, SUNY</b><br/>2 School of Computer and Control Engineering, UCAS
+<br/>3 Department of Electrical and Computer Engineering, UCSD
+<br/>4 National Laboratory of Pattern Recognition, CASIA
+<br/><b>University at Albany, SUNY</b><br/><b>Division of Computer Science and Engineering, Hanyang University</b><br/>7 Electrical Engineering and Computer Science, UCM
+</td><td>('39774417', 'Longyin Wen', 'longyin wen')<br/>('1910738', 'Dawei Du', 'dawei du')<br/>('1773408', 'Zhaowei Cai', 'zhaowei cai')<br/>('39643145', 'Ming-Ching Chang', 'ming-ching chang')<br/>('3245785', 'Honggang Qi', 'honggang qi')<br/>('33047058', 'Jongwoo Lim', 'jongwoo lim')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>59d225486161b43b7bf6919b4a4b4113eb50f039</td><td>Complex Event Recognition from Images with Few Training Examples
+<br/>Irfan Essa∗
+<br/><b>Georgia Institute of Technology</b><br/><b>University of Southern California</b></td><td>('2308598', 'Unaiza Ahsan', 'unaiza ahsan')<br/>('1726241', 'Chen Sun', 'chen sun')<br/>('1945508', 'James Hays', 'james hays')</td><td>uahsan3@gatech.edu
+<br/>chensun@google.com
+<br/>hays@gatech.edu
+<br/>irfan@cc.gatech.edu
+</td></tr><tr><td>5945464d47549e8dcaec37ad41471aa70001907f</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Every Moment Counts: Dense Detailed Labeling of Actions in Complex
+<br/>Videos
+<br/>Received: date / Accepted: date
+</td><td>('34149749', 'Serena Yeung', 'serena yeung')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td></td></tr><tr><td>59c9d416f7b3d33141cc94567925a447d0662d80</td><td>Universität des Saarlandes
+<br/>Max-Planck-Institut für Informatik
+<br/>AG5
+<br/>Matrix factorization over max-times
+<br/>algebra for data mining
+<br/>Masterarbeit im Fach Informatik
+<br/>Master’s Thesis in Computer Science
+<br/>von / by
+<br/>angefertigt unter der Leitung von / supervised by
+<br/>begutachtet von / reviewers
+<br/>November 2013
+<br/>UNIVERSITASSARAVIENSIS </td><td>('2297723', 'Sanjar Karaev', 'sanjar karaev')<br/>('1804891', 'Pauli Miettinen', 'pauli miettinen')<br/>('1804891', 'Pauli Miettinen', 'pauli miettinen')<br/>('1751591', 'Gerhard Weikum', 'gerhard weikum')</td><td></td></tr><tr><td>59bece468ed98397d54865715f40af30221aa08c</td><td>Deformable Part-based Robust Face Detection
+<br/>under Occlusion by Using Face Decomposition
+<br/>into Face Components
+<br/>Darijan Marčetić, Slobodan Ribarić
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia</b></td><td></td><td>{darijan.marcetic, slobodan.ribaric}@fer.hr
+</td></tr><tr><td>59a35b63cf845ebf0ba31c290423e24eb822d245</td><td>The FaceSketchID System: Matching Facial
+<br/>Composites to Mugshots
+<br/>tedious, and may not
+</td><td>('34393045', 'Hu Han', 'hu han')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>59f325e63f21b95d2b4e2700c461f0136aecc171</td><td>3070
+<br/>978-1-4577-1302-6/11/$26.00 ©2011 IEEE
+<br/>FOR FACE RECOGNITION
+<br/>1. INTRODUCTION
+</td><td></td><td></td></tr><tr><td>59420fd595ae745ad62c26ae55a754b97170b01f</td><td>Objects as Attributes for Scene Classification
+<br/><b>Stanford University</b></td><td>('33642044', 'Li-Jia Li', 'li-jia li')<br/>('2888806', 'Hao Su', 'hao su')<br/>('7892285', 'Yongwhan Lim', 'yongwhan lim')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td></td></tr><tr><td>599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a</td><td>How Robust is 3D Human Pose Estimation to Occlusion?
+<br/><b>Visual Computing Institute, RWTH Aachen University</b><br/>2Robert Bosch GmbH, Corporate Research
+</td><td>('2699877', 'Timm Linder', 'timm linder')<br/>('1789756', 'Bastian Leibe', 'bastian leibe')</td><td>{sarandi,leibe}@vision.rwth-aachen.de
+<br/>{timm.linder,kaioliver.arras}@de.bosch.com
+</td></tr><tr><td>5922e26c9eaaee92d1d70eae36275bb226ecdb2e</td><td>Boosting Classification Based Similarity
+<br/>Learning by using Standard Distances
+<br/>Departament d’Informàtica, Universitat de València
+<br/>Av. de la Universitat s/n. 46100-Burjassot (Spain)
+</td><td>('2275648', 'Emilia López-Iñesta', 'emilia lópez-iñesta')<br/>('3138833', 'Miguel Arevalillo-Herráez', 'miguel arevalillo-herráez')<br/>('2627759', 'Francisco Grimaldo', 'francisco grimaldo')</td><td>eloi@alumni.uv.es,miguel.arevalillo@uv.es
+<br/>francisco.grimaldo@uv.es
+</td></tr><tr><td>59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b</td><td>The Menpo Facial Landmark Localisation Challenge:
+<br/>A step towards the solution
+<br/>Department of Computing
+<br/><b>Imperial College London</b></td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('1688922', 'Grigorios Chrysos', 'grigorios chrysos')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('1719912', 'Jie Shen', 'jie shen')</td><td>{s.zafeiriou, g.trigeorgis, g.chrysos, j.deng16, jie.shen07}@imperial.ac.uk
+</td></tr><tr><td>59e75aad529b8001afc7e194e21668425119b864</td><td>Membrane Nonrigid Image Registration
+<br/>Department of Computer Science
+<br/><b>Drexel University</b><br/>Philadelphia, PA
+</td><td>('1708819', 'Ko Nishino', 'ko nishino')</td><td></td></tr><tr><td>59d45281707b85a33d6f50c6ac6b148eedd71a25</td><td>Rank Minimization across Appearance and Shape for AAM Ensemble Fitting
+<br/>2The Commonwealth Scientific and Industial Research Organization (CSIRO)
+<br/><b>Queensland University of Technology</b></td><td>('2699730', 'Xin Cheng', 'xin cheng')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')<br/>('1820249', 'Simon Lucey', 'simon lucey')</td><td>1{x2.cheng,s.sridharan}@qut.edu.au
+<br/>2{jason.saragih,simon.lucey}@csiro.au
+</td></tr><tr><td>59319c128c8ac3c88b4ab81088efe8ae9c458e07</td><td>Effective Computer Model For Recognizing
+<br/>Nationality From Frontal Image
+<br/>Bat-Erdene.B
+<br/>Information and Communication Management School
+<br/><b>The University of the Humanities</b><br/>Ulaanbaatar, Mongolia
+</td><td></td><td>e-mail: basubaer@gmail.com
+</td></tr><tr><td>59a6c9333c941faf2540979dcfcb5d503a49b91e</td><td>Sampling Clustering
+<br/><b>School of Computer Science and Technology, Shandong University, China</b></td><td>('51016741', 'Ching Tarn', 'ching tarn')<br/>('2413471', 'Yinan Zhang', 'yinan zhang')<br/>('48260402', 'Ye Feng', 'ye feng')</td><td>∗i@ctarn.io
+</td></tr><tr><td>59031a35b0727925f8c47c3b2194224323489d68</td><td>Sparse Variation Dictionary Learning for Face Recognition with A Single
+<br/>Training Sample Per Person
+<br/>ETH Zurich
+<br/>Switzerland
+</td><td>('5828998', 'Meng Yang', 'meng yang')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{yang,vangool}@vision.ee.ethz.ch
+</td></tr><tr><td>926c67a611824bc5ba67db11db9c05626e79de96</td><td>1913
+<br/>Enhancing Bilinear Subspace Learning
+<br/>by Element Rearrangement
+</td><td>('38188040', 'Dong Xu', 'dong xu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1686911', 'Stephen Lin', 'stephen lin')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td></td></tr><tr><td>923ede53b0842619831e94c7150e0fc4104e62f7</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1293
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>92b61b09d2eed4937058d0f9494d9efeddc39002</td><td>Under review in IJCV manuscript No.
+<br/>(will be inserted by the editor)
+<br/>BoxCars: Improving Vehicle Fine-Grained Recognition using
+<br/>3D Bounding Boxes in Traffic Surveillance
+<br/>Received: date / Accepted: date
+</td><td>('34891870', 'Jakub Sochor', 'jakub sochor')</td><td></td></tr><tr><td>9264b390aa00521f9bd01095ba0ba4b42bf84d7e</td><td>Displacement Template with Divide-&-Conquer
+<br/>Algorithm for Significantly Improving
+<br/>Descriptor based Face Recognition Approaches
+<br/><b>Wenzhou University, China</b><br/><b>University of Northern British Columbia, Canada</b><br/><b>Aberystwyth University, UK</b></td><td>('1692551', 'Liang Chen', 'liang chen')<br/>('33500699', 'Ling Yan', 'ling yan')<br/>('1990125', 'Yonghuai Liu', 'yonghuai liu')<br/>('39388942', 'Lixin Gao', 'lixin gao')<br/>('3779849', 'Xiaoqin Zhang', 'xiaoqin zhang')</td><td></td></tr><tr><td>92be73dffd3320fe7734258961fe5a5f2a43390e</td><td>TRANSFERRING FACE VERIFICATION NETS TO PAIN AND EXPRESSION REGRESSION
+<br/>Dept. of {Computer Science1, Electrical & Computer Engineering2, Radiation Oncology3, Cognitive Science4}
+<br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b><br/>5Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China
+<br/><b>Tsinghua University, Beijing 100084, China</b></td><td>('39369840', 'Feng Wang', 'feng wang')<br/>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('1692867', 'Chang Liu', 'chang liu')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')<br/>('3207112', 'Austin Reiter', 'austin reiter')<br/>('1678633', 'Gregory D. Hager', 'gregory d. hager')<br/>('2095823', 'Harry Quon', 'harry quon')<br/>('1709439', 'Jian Cheng', 'jian cheng')<br/>('1746141', 'Alan L. Yuille', 'alan l. yuille')</td><td></td></tr><tr><td>920a92900fbff22fdaaef4b128ca3ca8e8d54c3e</td><td>LEARNING PATTERN TRANSFORMATION MANIFOLDS WITH PARAMETRIC ATOM
+<br/>SELECTION
+<br/>Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+<br/>Signal Processing Laboratory (LTS4)
+<br/>Switzerland-1015 Lausanne
+</td><td>('12636684', 'Elif Vural', 'elif vural')<br/>('1703189', 'Pascal Frossard', 'pascal frossard')</td><td></td></tr><tr><td>9207671d9e2b668c065e06d9f58f597601039e5e</td><td>Face Detection Using a 3D Model on
+<br/>Face Keypoints
+</td><td>('2455529', 'Adrian Barbu', 'adrian barbu')<br/>('3019469', 'Gary Gramajo', 'gary gramajo')</td><td></td></tr><tr><td>924b14a9e36d0523a267293c6d149bca83e73f3b</td><td>Volume 5, Number 2, pp. 133 -164
+<br/>Development and Evaluation of a Method
+<br/>Employed to Identify Internal State
+<br/>Utilizing Eye Movement Data
+<br/>(cid:2) Graduate School of Media and
+<br/><b>Governance, Keio University</b><br/>(JAPAN)
+<br/>(cid:3) Faculty of Environmental
+<br/><b>Information, Keio University</b><br/>(JAPAN)
+</td><td>('31726964', 'Noriyuki Aoyama', 'noriyuki aoyama')<br/>('1889276', 'Tadahiko Fukuda', 'tadahiko fukuda')</td><td></td></tr><tr><td>9282239846d79a29392aa71fc24880651826af72</td><td>Antonakos et al. EURASIP Journal on Image and Video Processing 2014, 2014:14
+<br/>http://jivp.eurasipjournals.com/content/2014/1/14
+<br/>RESEARCH
+<br/>Open Access
+<br/>Classification of extreme facial events in sign
+<br/>language videos
+</td><td>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('1738119', 'Vassilis Pitsikalis', 'vassilis pitsikalis')<br/>('1750686', 'Petros Maragos', 'petros maragos')</td><td></td></tr><tr><td>92115b620c7f653c847f43b6c4ff0470c8e55dab</td><td>Training Deformable Object Models for Human
+<br/>Detection Based on Alignment and Clustering
+<br/>Department of Computer Science,
+<br/>Centre of Biological Signalling Studies (BIOSS),
+<br/><b>University of Freiburg, Germany</b></td><td>('2127987', 'Benjamin Drayer', 'benjamin drayer')<br/>('1710872', 'Thomas Brox', 'thomas brox')</td><td>{drayer,brox}@cs.uni-freiburg.de
+</td></tr><tr><td>928b8eb47288a05611c140d02441660277a7ed54</td><td>Exploiting Images for Video Recognition with Hierarchical Generative
+<br/>Adversarial Networks
+<br/>1 Beijing Laboratory of Intelligent Information Technology, School of Computer Science,
+<br/><b>Big Data Research Center, University of Electronic Science and Technology of China</b><br/><b>Beijing Institute of Technology</b></td><td>('3450614', 'Feiwu Yu', 'feiwu yu')<br/>('2125709', 'Xinxiao Wu', 'xinxiao wu')<br/>('9177510', 'Yuchao Sun', 'yuchao sun')<br/>('2055900', 'Lixin Duan', 'lixin duan')</td><td>{yufeiwu,wuxinxiao,sunyuchao}@bit.edu.cn, lxduan@uestc.edu.cn
+</td></tr><tr><td>926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0</td><td>Facial Expression Recognition Using Enhanced Deep 3D Convolutional Neural
+<br/>Networks
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Denver, Denver, CO</b></td><td>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')</td><td>behzad.hasani@du.edu and mmahoor@du.edu
+</td></tr><tr><td>92c2dd6b3ac9227fce0a960093ca30678bceb364</td><td>Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+<br/>version when available.
+<br/>Title
+<br/>On color texture normalization for active appearance models
+<br/>Author(s)
+<br/>Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+<br/>Publication
+<br/>Date
+<br/>2009-05-12
+<br/>Publication
+<br/>Information
+<br/>Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+<br/>Texture Normalization for Active Appearance Models. Image
+<br/>Processing, IEEE Transactions on, 18(6), 1372-1378.
+<br/>Publisher
+<br/>IEEE
+<br/>Link to
+<br/>publisher's
+<br/>version
+<br/>http://dx.doi.org/10.1109/TIP.2009.2017163
+<br/>Item record
+<br/>http://hdl.handle.net/10379/1350
+<br/>Some rights reserved. For more information, please see the item record link above.
+<br/>Downloaded 2018-11-06T00:40:53Z
+</td><td></td><td></td></tr><tr><td>92e464a5a67582d5209fa75e3b29de05d82c7c86</td><td>Reconstruction for Feature Disentanglement in Pose-invariant Face Recognition
+<br/><b>Rutgers University, NJ, USA</b><br/>2NEC Labs America, CA, USA
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')</td><td>{xpeng.cs, dnm}@rutgers.edu, {xiangyu, ksohn, manu}@nec-labs.com
+</td></tr><tr><td>927ba64123bd4a8a31163956b3d1765eb61e4426</td><td>Customer satisfaction measuring based on the most
+<br/>significant facial emotion
+<br/>To cite this version:
+<br/>most significant facial emotion. 15th IEEE International Multi-Conference on Systems, Signals
+<br/>Devices (SSD 2018), Mar 2018, Hammamet, Tunisia. <hal-01790317>
+<br/>HAL Id: hal-01790317
+<br/>https://hal-upec-upem.archives-ouvertes.fr/hal-01790317
+<br/>Submitted on 11 May 2018
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('50101862', 'Rostom Kachouri', 'rostom kachouri')<br/>('50101862', 'Rostom Kachouri', 'rostom kachouri')</td><td></td></tr><tr><td>922838dd98d599d1d229cc73896d55e7a769aa7c</td><td>Learning Hierarchical Representations for Face Verification
+<br/>with Convolutional Deep Belief Networks
+<br/>Erik Learned-Miller
+<br/><b>University of Massachusetts</b><br/><b>University of Michigan</b><br/><b>University of Massachusetts</b><br/>Amherst, MA
+<br/>Ann Arbor, MI
+<br/>Amherst, MA
+</td><td>('3219900', 'Gary B. Huang', 'gary b. huang')<br/>('1697141', 'Honglak Lee', 'honglak lee')</td><td>gbhuang@cs.umass.edu
+<br/>honglak@eecs.umich.edu
+<br/>elm@cs.umass.edu
+</td></tr><tr><td>9294739e24e1929794330067b84f7eafd286e1c8</td><td>Expression Recognition using Elastic Graph Matching
+<br/>21,
+<br/>21,
+<br/>21,
+<br/>, Cairong Zhou 2
+<br/><b>Research Center for Learning Science, Southeast University, Nanjing 210096, China</b><br/><b>Southeast University, Nanjing 210096, China</b></td><td>('40622743', 'Yujia Cao', 'yujia cao')<br/>('40608983', 'Wenming Zheng', 'wenming zheng')<br/>('1718117', 'Li Zhao', 'li zhao')</td><td>Email: yujia_cao@seu.edu.cn
+</td></tr><tr><td>92fada7564d572b72fd3be09ea3c39373df3e27c</td><td></td><td></td><td></td></tr><tr><td>927ad0dceacce2bb482b96f42f2fe2ad1873f37a</td><td>Interest-Point based Face Recognition System
+<br/>87
+<br/>X
+<br/>Interest-Point based Face Recognition System
+<br/>Spain
+<br/>1. Introduction
+<br/>Among all applications of face recognition systems, surveillance is one of the most
+<br/>challenging ones. In such an application, the goal is to detect known criminals in crowded
+<br/>environments, like airports or train stations. Some attempts have been made, like those of
+<br/>Tokio (Engadget, 2006) or Mainz (Deutsche Welle, 2006), with limited success.
+<br/>The first task to be carried out in an automatic surveillance system involves the detection of
+<br/>all the faces in the images taken by the video cameras. Current face detection algorithms are
+<br/>highly reliable and thus, they will not be the focus of our work. Some of the best performing
+<br/>examples are the Viola-Jones algorithm (Viola & Jones, 2004) or the Schneiderman-Kanade
+<br/>algorithm (Schneiderman & Kanade, 2000).
+<br/>The second task to be carried out involves the comparison of all detected faces among the
+<br/>database of known criminals. The ideal behaviour of an automatic system performing this
+<br/>task would be to get a 100% correct identification rate, but this behaviour is far from the
+<br/>capabilities of current face recognition algorithms. Assuming that there will be false
+<br/>identifications, supervised surveillance systems seem to be the most realistic option: the
+<br/>automatic system issues an alarm whenever it detects a possible match with a criminal, and
+<br/>a human decides whether it is a false alarm or not. Figure 1 shows an example.
+<br/>However, even in a supervised scenario the requirements for the face recognition algorithm
+<br/>are extremely high: the false alarm rate must be low enough as to allow the human operator
+<br/>to cope with it; and the percentage of undetected criminals must be kept to a minimum in
+<br/>order to ensure security. Fulfilling both requirements at the same time is the main challenge,
+<br/>as a reduction in false alarm rate usually implies an increase of the percentage of undetected
+<br/>criminals.
+<br/>We propose a novel face recognition system based in the use of interest point detectors and
+<br/>local descriptors. In order to check the performances of our system, and particularly its
+<br/>performances in a surveillance application, we present experimental results in terms of
+<br/>Receiver Operating Characteristic curves or ROC curves. From the experimental results, it
+<br/>becomes clear that our system outperforms classical appearance based approaches.
+<br/>www.intechopen.com
+</td><td>('35178717', 'Cesar Fernandez', 'cesar fernandez')<br/>('3686544', 'Maria Asuncion Vicente', 'maria asuncion vicente')<br/>('2422580', 'Miguel Hernandez', 'miguel hernandez')</td><td></td></tr><tr><td>929bd1d11d4f9cbc638779fbaf958f0efb82e603</td><td>This is the author’s version of a work that was submitted/accepted for pub-
+<br/>lication in the following source:
+<br/>Zhang, Ligang & Tjondronegoro, Dian W. (2010) Improving the perfor-
+<br/>mance of facial expression recognition using dynamic, subtle and regional
+<br/>features.
+<br/>In Kok, WaiWong, B. Sumudu, U. Mendis, & Abdesselam ,
+<br/>Bouzerdoum (Eds.) Neural Information Processing. Models and Applica-
+<br/>tions, Lecture Notes in Computer Science, Sydney, N.S.W, pp. 582-589.
+<br/>This file was downloaded from: http://eprints.qut.edu.au/43788/
+<br/>c(cid:13) Copyright 2010 Springer-Verlag
+<br/>Conference proceedings published, by Springer Verlag, will be available
+<br/>via Lecture Notes in Computer Science http://www.springer.de/comp/lncs/
+<br/>Notice: Changes introduced as a result of publishing processes such as
+<br/>copy-editing and formatting may not be reflected in this document. For a
+<br/>definitive version of this work, please refer to the published source:
+<br/>http://dx.doi.org/10.1007/978-3-642-17534-3_72
+</td><td></td><td></td></tr><tr><td>923ec0da8327847910e8dd71e9d801abcbc93b08</td><td>Hide-and-Seek: Forcing a Network to be Meticulous for
+<br/>Weakly-supervised Object and Action Localization
+<br/><b>University of California, Davis</b></td><td>('19553871', 'Krishna Kumar Singh', 'krishna kumar singh')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>0c741fa0966ba3ee4fc326e919bf2f9456d0cd74</td><td>Facial Age Estimation by Learning from Label Distributions
+<br/><b>School of Mathematical Sciences, Monash University, VIC 3800, Australia</b><br/><b>School of Computer Science and Engineering, Southeast University, Nanjing 210096, China</b><br/><b>National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China</b></td><td>('1735299', 'Xin Geng', 'xin geng')<br/>('2848275', 'Kate Smith-Miles', 'kate smith-miles')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')</td><td></td></tr><tr><td>0c435e7f49f3e1534af0829b7461deb891cf540a</td><td>Capturing Global Semantic Relationships for Facial Action Unit Recognition
+<br/><b>Rensselaer Polytechnic Institute</b><br/><b>School of Electrical Engineering and Automation, Harbin Institute of Technology</b><br/><b>School of Computer Science and Technology, University of Science and Technology of China</b></td><td>('2860279', 'Ziheng Wang', 'ziheng wang')<br/>('1830523', 'Yongqiang Li', 'yongqiang li')<br/>('1791319', 'Shangfei Wang', 'shangfei wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{wangz10,liy23,jiq}@rpi.edu
+<br/>sfwang@ustc.edu.cn
+</td></tr><tr><td>0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf</td><td>International Journal of Artificial Intelligence & Applications (IJAIA), Vol. 5, No. 3, May 2014
+<br/>FACIAL EXPRESSION RECOGNITION BASED ON
+<br/><b>Computer Science, Engineering and Mathematics School, Flinders University, Australia</b><br/><b>Computer Science, Engineering and Mathematics School, Flinders University, Australia</b></td><td>('3105876', 'Humayra Binte Ali', 'humayra binte ali')<br/>('1739260', 'David M W Powers', 'david m w powers')</td><td></td></tr><tr><td>0c30f6303dc1ff6d05c7cee4f8952b74b9533928</td><td>Pareto Discriminant Analysis
+<br/>Karim T. Abou–Moustafa
+<br/>Centre of Intelligent Machines
+<br/><b>The Robotics Institute</b><br/>Centre of Intelligent Machines
+<br/><b>McGill University</b><br/><b>Carnegie Mellon University</b><br/><b>McGill University</b></td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')<br/>('1701344', 'Frank P. Ferrie', 'frank p. ferrie')</td><td>karimt@cim.mcgill.ca
+<br/>ftorre@cs.cmu.edu
+<br/>ferrie@cim.mcgill.ca
+</td></tr><tr><td>0ccc535d12ad2142a8310d957cc468bbe4c63647</td><td>Better Exploiting OS-CNNs for Better Event Recognition in Images
+<br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('2072196', 'Sheng Guo', 'sheng guo')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>{07wanglimin, buptwangzhe2012, guosheng1001}@gmail.com, yu.qiao@siat.ac.cn
+</td></tr><tr><td>0c8a0a81481ceb304bd7796e12f5d5fa869ee448</td><td>International Journal of Fuzzy Logic and Intelligent Systems, vol. 10, no. 2, June 2010, pp. 95-100
+<br/>A Spatial Regularization of LDA for Face Recognition
+<br/><b>Gangnung-Wonju National University</b><br/>123 Chibyun-Dong, Kangnung, 210-702, Korea
+</td><td>('39845108', 'Lae-Jeong Park', 'lae-jeong park')</td><td>Tel : +82-33-640-2389, Fax : +82-33-646-0740, E-mail : ljpark@gwnu.ac.kr
+</td></tr><tr><td>0c36c988acc9ec239953ff1b3931799af388ef70</td><td>Face Detection Using Improved Faster RCNN
+<br/>Huawei Cloud BU, China
+<br/>Figure1.Face detection results of FDNet1.0
+</td><td>('2568329', 'Changzheng Zhang', 'changzheng zhang')<br/>('5084124', 'Xiang Xu', 'xiang xu')<br/>('2929196', 'Dandan Tu', 'dandan tu')</td><td>{zhangzhangzheng, xuxiang12, tudandan}@huawei.com
+</td></tr><tr><td>0c5ddfa02982dcad47704888b271997c4de0674b</td><td></td><td></td><td></td></tr><tr><td>0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1</td><td>Face Recognition in Videos by Label Propagation
+<br/><b>International Institute of Information Technology, Hyderabad, India</b></td><td>('37956314', 'Vijay Kumar', 'vijay kumar')<br/>('3185334', 'Anoop M. Namboodiri', 'anoop m. namboodiri')</td><td>{vijaykumar.r@research., anoop@, jawahar@}iiit.ac.in
+</td></tr><tr><td>0cccf576050f493c8b8fec9ee0238277c0cfd69a</td><td></td><td></td><td></td></tr><tr><td>0cdb49142f742f5edb293eb9261f8243aee36e12</td><td>Combined Learning of Salient Local Descriptors and Distance Metrics
+<br/>for Image Set Face Verification
+<br/>NICTA, PO Box 6020, St Lucia, QLD 4067, Australia
+<br/><b>University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('3026404', 'Yongkang Wong', 'yongkang wong')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td></td></tr><tr><td>0c069a870367b54dd06d0da63b1e3a900a257298</td><td>Author manuscript, published in "ICANN 2011 - International Conference on Artificial Neural Networks (2011)"
+</td><td></td><td></td></tr><tr><td>0c75c7c54eec85e962b1720755381cdca3f57dfb</td><td>2212
+<br/>Face Landmark Fitting via Optimized Part
+<br/>Mixtures and Cascaded Deformable Model
+</td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td></td></tr><tr><td>0cf2eecf20cfbcb7f153713479e3206670ea0e9c</td><td>Privacy-Protective-GAN for Face De-identification
+<br/><b>Temple University</b></td><td>('50117915', 'Yifan Wu', 'yifan wu')<br/>('46319628', 'Fan Yang', 'fan yang')<br/>('1805398', 'Haibin Ling', 'haibin ling')</td><td>{yifan.wu, fyang, hbling} @temple.edu
+</td></tr><tr><td>0ca36ecaf4015ca4095e07f0302d28a5d9424254</td><td>Improving Bag-of-Visual-Words Towards Effective Facial Expressive
+<br/>Image Classification
+<br/>1Univ. Grenoble Alpes, CNRS, Grenoble INP∗ , GIPSA-lab, 38000 Grenoble, France
+<br/>Keywords:
+<br/>BoVW, k-means++, Relative Conjunction Matrix, SIFT, Spatial Pyramids, TF.IDF.
+</td><td>('10762131', 'Dawood Al Chanti', 'dawood al chanti')<br/>('1788869', 'Alice Caplier', 'alice caplier')</td><td>dawood.alchanti@gmail.com
+</td></tr><tr><td>0c1d85a197a1f5b7376652a485523e616a406273</td><td>Joint Registration and Representation Learning for Unconstrained Face
+<br/>Identification
+<br/><b>University of Canberra, Australia, Data61 - CSIRO and ANU, Australia</b><br/><b>Khalifa University, Abu Dhabi, United Arab Emirates</b></td><td>('2008898', 'Munawar Hayat', 'munawar hayat')<br/>('1802072', 'Naoufel Werghi', 'naoufel werghi')</td><td>{munawar.hayat,roland.goecke}@canberra.edu.au, salman.khan@csiro.au, naoufel.werghi@kustar.ac.ae
+</td></tr><tr><td>0ca66283f4fb7dbc682f789fcf6d6732006befd5</td><td>Active Dictionary Learning for Image Representation
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Rutgers, The State University of New Jersey, Piscataway, NJ</b></td><td>('37799945', 'Tong Wu', 'tong wu')<br/>('9208982', 'Anand D. Sarwate', 'anand d. sarwate')<br/>('2138101', 'Waheed U. Bajwa', 'waheed u. bajwa')</td><td></td></tr><tr><td>0c7f27d23a162d4f3896325d147f412c40160b52</td><td>Models and Algorithms for
+<br/>Vision through the Atmosphere
+<br/>Submitted in partial fulfillment of the
+<br/>requirements for the degree
+<br/>of Doctor of Philosophy
+<br/>in the Graduate School of Arts and Sciences
+<br/><b>COLUMBIA UNIVERSITY</b><br/>2003
+</td><td>('1779052', 'Srinivasa G. Narasimhan', 'srinivasa g. narasimhan')</td><td></td></tr><tr><td>0cfca73806f443188632266513bac6aaf6923fa8</td><td>Predictive Uncertainty in Large Scale Classification
+<br/>using Dropout - Stochastic Gradient Hamiltonian
+<br/>Monte Carlo.
+<br/>Vergara, Diego∗1, Hern´andez, Sergio∗2, Valdenegro-Toro, Mat´ıas∗∗3 and Jorquera, Felipe∗4.
+<br/>∗Laboratorio de Procesamiento de Informaci´on Geoespacial, Universidad Cat´olica del Maule, Chile.
+<br/>∗∗German Research Centre for Artificial Intelligence, Bremen, Germany.
+</td><td></td><td>Email: 1diego.vergara@alu.ucm.cl, 2shernandez@ucm.cl,3matias.valdenegro@dfki.de,
+<br/>4f.jorquera.uribe@gmail.com
+</td></tr><tr><td>0c20fd90d867fe1be2459223a3cb1a69fa3d44bf</td><td>A Monte Carlo Strategy to Integrate Detection
+<br/>and Model-Based Face Analysis
+<br/>Department for Mathematics and Computer Science
+<br/><b>University of Basel, Switzerland</b></td><td>('2591294', 'Andreas Forster', 'andreas forster')<br/>('34460642', 'Bernhard Egger', 'bernhard egger')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td>sandro.schoenborn,andreas.forster,bernhard.egger,thomas.vetter@unibas.ch
+</td></tr><tr><td>0c2875bb47db3698dbbb3304aca47066978897a4</td><td>Recurrent Models for Situation Recognition
+<br/><b>University of Illinois at Urbana-Champaign</b></td><td>('36508529', 'Arun Mallya', 'arun mallya')<br/>('1749609', 'Svetlana Lazebnik', 'svetlana lazebnik')</td><td>{amallya2,slazebni}@illinois.edu
+</td></tr><tr><td>0c3f7272a68c8e0aa6b92d132d1bf8541c062141</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 672630, 6 pages
+<br/>http://dx.doi.org/10.1155/2014/672630
+<br/>Research Article
+<br/>Kruskal-Wallis-Based Computationally Efficient Feature
+<br/>Selection for Face Recognition
+<br/><b>Foundation University, Rawalpindi 46000, Pakistan</b><br/><b>Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad</b><br/>Islamabad 44000, Pakistan
+<br/><b>International Islamic University, Islamabad 44000, Pakistan</b><br/>Received 5 December 2013; Accepted 10 February 2014; Published 21 May 2014
+<br/>Academic Editors: S. Balochian, V. Bhatnagar, and Y. Zhang
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Face recognition in today’s technological world, and face recognition applications attain much more importance. Most of the
+<br/>existing work used frontal face images to classify face image. However these techniques fail when applied on real world face images.
+<br/>The proposed technique effectively extracts the prominent facial features. Most of the features are redundant and do not contribute
+<br/>to representing face. In order to eliminate those redundant features, computationally efficient algorithm is used to select the more
+<br/>discriminative face features. Extracted features are then passed to classification step. In the classification step, different classifiers
+<br/>are ensemble to enhance the recognition accuracy rate as single classifier is unable to achieve the high accuracy. Experiments are
+<br/>performed on standard face database images and results are compared with existing techniques.
+<br/>1. Introduction
+<br/>Face recognition is becoming more acceptable in the domain
+<br/>of computer vision and pattern recognition. The authenti-
+<br/>cation systems based on the traditional ID card and pass-
+<br/>word are nowadays replaced by the techniques which are
+<br/>more preferable in order to handle the security issues. The
+<br/>authentication systems based on biometrics are one of the
+<br/>substitutes which are independent of the user’s memory and
+<br/>not subjected to loss. Among those systems, face recognition
+<br/>gains special attention because of the security it provides and
+<br/>because it is independent of the high accuracy equipment
+<br/>unlike iris and recognition based on the fingerprints.
+<br/>Feature selection in pattern recognition is specifying the
+<br/>subset of significant features to decrease the data dimensions
+<br/>and at the same time it provides the set of selective features.
+<br/>Image is represented by set of features in methods used for
+<br/>feature extraction and each feature plays a vital role in the
+<br/>process of recognition. The feature selection algorithm drops
+<br/>all the unrelated features with the highly acceptable precision
+<br/>rate as compared to some other pattern classification problem
+<br/>in which higher precision rate cannot be obtained by greater
+<br/>number of feature sets [1].
+<br/>The feature selected by the classifiers plays a vital role
+<br/>in producing the best features that are vigorous to the
+<br/>inconsistent environment, for example, change in expressions
+<br/>and other barriers. Local (texture-based) and global (holistic)
+<br/>approaches are the two approaches used for face recognition
+<br/>[2]. Local approaches characterized the face in the form of
+<br/>geometric measurements which matches the unfamiliar face
+<br/>with the closest face from database. Geometric measurements
+<br/>contain angles and the distance of different facial points,
+<br/>for example, mouth position, nose length, and eyes. Global
+<br/>features are extracted by the use of algebraic methods like
+<br/>PCA (principle component analysis) and ICA (independent
+<br/>component analysis) [3]. PCA shows a quick response to
+<br/>light and variation as it serves inner and outer classes
+<br/>fairly. In face recognition, LDA (linear discriminate analysis)
+<br/>usually performs better than PCA but separable creation is
+<br/>not precise in classification. Good recognition rates can be
+<br/>produced by transformation techniques like DCT (discrete
+<br/>cosine transform) and DWT (discrete wavelet transform) [4].
+</td><td>('8652075', 'Sajid Ali Khan', 'sajid ali khan')<br/>('9955306', 'Ayyaz Hussain', 'ayyaz hussain')<br/>('1959869', 'Abdul Basit', 'abdul basit')<br/>('2388005', 'Sheeraz Akram', 'sheeraz akram')<br/>('8652075', 'Sajid Ali Khan', 'sajid ali khan')</td><td>Correspondence should be addressed to Sajid Ali Khan; sajidalibn@gmail.com
+</td></tr><tr><td>0cbc4dcf2aa76191bbf641358d6cecf38f644325</td><td>Visage: A Face Interpretation Engine for
+<br/>Smartphone Applications
+<br/><b>Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA</b><br/><b>Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA</b><br/>3 Microsoft Research Asia, No. 5 Dan Ling St., Haidian District, Beijing, China
+</td><td>('1840450', 'Xiaochao Yang', 'xiaochao yang')<br/>('1702472', 'Chuang-Wen You', 'chuang-wen you')<br/>('1884089', 'Hong Lu', 'hong lu')<br/>('1816301', 'Mu Lin', 'mu lin')<br/>('2772904', 'Nicholas D. Lane', 'nicholas d. lane')<br/>('1690035', 'Andrew T. Campbell', 'andrew t. campbell')</td><td>{Xiaochao.Yang,chuang-wen.you}@dartmouth.edu,hong.lu@intel.com,
+<br/>mu.lin@dartmouth.edu,niclane@microsoft.com,campbell@cs.dartmouth.edu
+</td></tr><tr><td>0ce8a45a77e797e9d52604c29f4c1e227f604080</td><td>International Journal of Computer Science, Engineering and Information Technology (IJCSEIT), Vol.3,No. 6,December 2013
+<br/>ZERNIKE MOMENT-BASED FEATURE EXTRACTION
+<br/>FOR FACIAL RECOGNITION OF IDENTICAL TWINS
+<br/>1Department of Electrical,Computer and Biomedical Engineering, Qazvin branch,
+<br/><b>Amirkabir University of Technology, Tehran</b><br/><b>IslamicAzad University, Qazvin, Iran</b><br/>Iran
+</td><td>('13302047', 'Hoda Marouf', 'hoda marouf')<br/>('1692435', 'Karim Faez', 'karim faez')</td><td></td></tr><tr><td>0ce3a786aed896d128f5efdf78733cc675970854</td><td>Learning the Face Prior
+<br/>for Bayesian Face Recognition
+<br/>Department of Information Engineering,
+<br/><b>The Chinese University of Hong Kong, China</b></td><td>('2312486', 'Chaochao Lu', 'chaochao lu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>0c54e9ac43d2d3bab1543c43ee137fc47b77276e</td><td></td><td></td><td></td></tr><tr><td>0c5afb209b647456e99ce42a6d9d177764f9a0dd</td><td>97
+<br/>Recognizing Action Units for
+<br/>Facial Expression Analysis
+</td><td>('40383812', 'Ying-li Tian', 'ying-li tian')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>0c59071ddd33849bd431165bc2d21bbe165a81e0</td><td>Person Recognition in Personal Photo Collections
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarbrücken, Germany
+</td><td>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1798000', 'Rodrigo Benenson', 'rodrigo benenson')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{joon,benenson,mfritz,schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>0c377fcbc3bbd35386b6ed4768beda7b5111eec6</td><td>258
+<br/>A Unified Probabilistic Framework
+<br/>for Spontaneous Facial Action Modeling
+<br/>and Understanding
+</td><td>('1686235', 'Yan Tong', 'yan tong')<br/>('1713712', 'Jixu Chen', 'jixu chen')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td></td></tr><tr><td>0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58</td><td>Memory-Augmented Attribute Manipulation Networks for
+<br/>Interactive Fashion Search
+<br/><b>Southwest Jiaotong University</b><br/><b>National University of Singapore</b><br/><b>AI Institute</b></td><td>('33901950', 'Bo Zhao', 'bo zhao')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('1814091', 'Xiao Wu', 'xiao wu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>zhaobo@my.swjtu.edu.cn, elezhf@nus.edu.sg, wuxiaohk@swjtu.edu.cn, yanshuicheng@360.cn
+</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td></td><td>('2964917', 'Cameron Whitelam', 'cameron whitelam')<br/>('1885566', 'Emma Taborsky', 'emma taborsky')<br/>('1917247', 'Austin Blanton', 'austin blanton')<br/>('8033275', 'Brianna Maze', 'brianna maze')<br/>('15282121', 'Tim Miller', 'tim miller')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')<br/>('40205896', 'James A. Duncan', 'james a. duncan')<br/>('2040584', 'Kristen Allen', 'kristen allen')<br/>('39403529', 'Jordan Cheney', 'jordan cheney')<br/>('2136478', 'Patrick Grother', 'patrick grother')</td><td></td></tr><tr><td>0cd8895b4a8f16618686f622522726991ca2a324</td><td>Discrete Choice Models for Static Facial Expression
+<br/>Recognition
+<br/><b>Ecole Polytechnique Federale de Lausanne, Signal Processing Institute</b><br/>2 Ecole Polytechnique Federale de Lausanne, Operation Research Group
+<br/>Ecublens, 1015 Lausanne, Switzerland
+<br/>Ecublens, 1015 Lausanne, Switzerland
+</td><td>('1794461', 'Gianluca Antonini', 'gianluca antonini')<br/>('2916630', 'Matteo Sorci', 'matteo sorci')<br/>('1690395', 'Michel Bierlaire', 'michel bierlaire')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td>{Matteo.Sorci,Gianluca.Antonini,JP.Thiran}@epfl.ch
+<br/>Michel.Bierlaire@epfl.ch
+</td></tr><tr><td>0cf7da0df64557a4774100f6fde898bc4a3c4840</td><td>Shape Matching and Object Recognition using Low Distortion Correspondences
+<br/>Department of Electrical Engineering and Computer Science
+<br/>U.C. Berkeley
+</td><td>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td>faberg,millert,malikg@eecs.berkeley.edu
+</td></tr><tr><td>0cbe059c181278a373292a6af1667c54911e7925</td><td>Owl and Lizard: Patterns of Head Pose and Eye
+<br/>Pose in Driver Gaze Classification
+<br/><b>Massachusetts Institute of Technology (MIT</b><br/><b>Chalmers University of Technology, SAFER</b></td><td>('7137846', 'Joonbum Lee', 'joonbum lee')<br/>('1901227', 'Bryan Reimer', 'bryan reimer')<br/>('35816778', 'Trent Victor', 'trent victor')</td><td></td></tr><tr><td>0c4659b35ec2518914da924e692deb37e96d6206</td><td>1236
+<br/>Registering a MultiSensor Ensemble of Images
+</td><td>('1822837', 'Jeff Orchard', 'jeff orchard')<br/>('6056877', 'Richard Mann', 'richard mann')</td><td></td></tr><tr><td>0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc</td><td>Learning Bayesian Network Classifiers for Facial Expression Recognition using
+<br/>both Labeled and Unlabeled Data
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA</b><br/>iracohen, huang
+<br/> Escola Polit´ecnica, Universidade de S˜ao Paulo, S˜ao Paulo, Brazil
+<br/>fgcozman, marcelo.cirelo
+</td><td>('1774778', 'Ira Cohen', 'ira cohen')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>@ifp.uiuc.edu
+<br/> Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands, nicu@liacs.nl
+<br/>@usp.br
+</td></tr><tr><td>0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/> A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/>IMPACT FACTOR: 6.017
+<br/>
+<br/> IJCSMC, Vol. 6, Issue. 1, January 2017, pg.221 – 227
+<br/>Central Local Directional Pattern Value
+<br/>Flooding Co-occurrence Matrix based
+<br/>Features for Face Recognition
+<br/><b>Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad</b></td><td>('40221166', 'Chandra Sekhar Reddy', 'chandra sekhar reddy')<br/>('40221166', 'Chandra Sekhar Reddy', 'chandra sekhar reddy')</td><td></td></tr><tr><td>0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d</td><td>SUBMITTED TO JOURNAL
+<br/>Weakly Supervised PatchNets: Describing and
+<br/>Aggregating Local Patches for Scene Recognition
+</td><td>('40184588', 'Zhe Wang', 'zhe wang')<br/>('39709927', 'Limin Wang', 'limin wang')<br/>('40457196', 'Yali Wang', 'yali wang')<br/>('3047890', 'Bowen Zhang', 'bowen zhang')<br/>('40285012', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>0c60eebe10b56dbffe66bb3812793dd514865935</td><td></td><td></td><td></td></tr><tr><td>0c05f60998628884a9ac60116453f1a91bcd9dda</td><td>Optimizing Open-Ended Crowdsourcing: The Next Frontier in
+<br/>Crowdsourced Data Management
+<br/><b>University of Illinois</b><br/><b>cid:63)Stanford University</b></td><td>('32953042', 'Akash Das Sarma', 'akash das sarma')<br/>('8336538', 'Vipul Venkataraman', 'vipul venkataraman')</td><td></td></tr><tr><td>6601a0906e503a6221d2e0f2ca8c3f544a4adab7</td><td>SRTM-2 2/9/06 3:27 PM Page 321
+<br/>Detection of Ancient Settlement Mounds:
+<br/>Archaeological Survey Based on the
+<br/>SRTM Terrain Model
+<br/>B.H. Menze, J.A. Ur, and A.G. Sherratt
+</td><td></td><td></td></tr><tr><td>660b73b0f39d4e644bf13a1745d6ee74424d4a16</td><td></td><td></td><td>3,250+OPEN ACCESS BOOKS106,000+INTERNATIONALAUTHORS AND EDITORS113+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book Reviews, Refinements and New Ideas in Face RecognitionDownloaded from: http://www.intechopen.com/books/reviews-refinements-and-new-ideas-in-face-recognitionPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at book.department@intechopen.com </td></tr><tr><td>66d512342355fb77a4450decc89977efe7e55fa2</td><td>Under review as a conference paper at ICLR 2018
+<br/>LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+<br/>INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td><td></td><td></td></tr><tr><td>66aad5b42b7dda077a492e5b2c7837a2a808c2fa</td><td>A Novel PCA-Based Bayes Classifier
+<br/>and Face Analysis
+<br/>1 Centre de Visi´o per Computador,
+<br/>Universitat Aut`onoma de Barcelona, Barcelona, Spain
+<br/>2 Department of Computer Science,
+<br/><b>Nanjing University of Science and Technology</b><br/>Nanjing, People’s Republic of China
+<br/>3 HEUDIASYC - CNRS Mixed Research Unit,
+<br/><b>Compi`egne University of Technology</b><br/>60205 Compi`egne cedex, France
+</td><td>('1761329', 'Zhong Jin', 'zhong jin')<br/>('1742818', 'Franck Davoine', 'franck davoine')<br/>('35428318', 'Zhen Lou', 'zhen lou')</td><td>zhong.jin@cvc.uab.es
+<br/>jyyang@mail.njust.edu.cn
+<br/>franck.davoine@hds.utc.fr
+</td></tr><tr><td>66b9d954dd8204c3a970d86d91dd4ea0eb12db47</td><td>Evaluation of Gabor-Wavelet-Based Facial Action Unit Recognition
+<br/>in Image Sequences of Increasing Complexity
+<br/><b>IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY</b><br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b></td><td>('40383812', 'Ying-li Tian', 'ying-li tian')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>Email: yltian@us.ibm.com,
+<br/>tk@cs.cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>6643a7feebd0479916d94fb9186e403a4e5f7cbf</td><td>Chapter 8
+<br/>3D Face Recognition
+</td><td>('1737428', 'Nick Pears', 'nick pears')</td><td></td></tr><tr><td>661ca4bbb49bb496f56311e9d4263dfac8eb96e9</td><td>Datasheets for Datasets
+</td><td>('2076288', 'Timnit Gebru', 'timnit gebru')<br/>('1722360', 'Hal Daumé', 'hal daumé')</td><td></td></tr><tr><td>66dcd855a6772d2731b45cfdd75f084327b055c2</td><td>Quality Classified Image Analysis with Application
+<br/>to Face Detection and Recognition
+<br/>International Doctoral Innovation Centre
+<br/><b>University of Nottingham Ningbo China</b><br/>School of Computer Science
+<br/><b>University of Nottingham Ningbo China</b><br/><b>College of Information Engineering</b><br/><b>Shenzhen University, Shenzhen, China</b></td><td>('1684164', 'Fei Yang', 'fei yang')<br/>('1737486', 'Qian Zhang', 'qian zhang')<br/>('2155597', 'Miaohui Wang', 'miaohui wang')<br/>('1698461', 'Guoping Qiu', 'guoping qiu')</td><td></td></tr><tr><td>666939690c564641b864eed0d60a410b31e49f80</td><td>What Visual Attributes Characterize an Object Class ?
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of</b><br/>Sciences, No.95, Zhongguancun East Road, Beijing, 100190, China
+<br/>2Microsoft Research, No.5, Dan Ling Street, Haidian District, Beijing 10080, China
+</td><td>('3247966', 'Jianlong Fu', 'jianlong fu')<br/>('1783122', 'Jinqiao Wang', 'jinqiao wang')<br/>('3349534', 'Xin-Jing Wang', 'xin-jing wang')<br/>('3663422', 'Yong Rui', 'yong rui')<br/>('1694235', 'Hanqing Lu', 'hanqing lu')</td><td>1fjlfu, jqwang, luhqg@nlpr.ia.ac.cn, 2fxjwang, yongruig@microsoft.com
+</td></tr><tr><td>66330846a03dcc10f36b6db9adf3b4d32e7a3127</td><td>Polylingual Multimodal Learning
+<br/><b>Institute AIFB, Karlsruhe Institute of Technology, Germany</b></td><td>('3219864', 'Aditya Mogadala', 'aditya mogadala')</td><td>{aditya.mogadala}@kit.edu
+</td></tr><tr><td>66d087f3dd2e19ffe340c26ef17efe0062a59290</td><td>Dog Breed Identification
+<br/>Brian Mittl
+<br/>Vijay Singh
+</td><td></td><td>wlarow@stanford.edu
+<br/>bmittl@stanford.edu
+<br/>vpsingh@stanford.edu
+</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation
+<br/><b>Xidian University 2Xi an Jiaotong University 3Microsoft Research Asia</b></td><td>('1786361', 'Zhenxing Niu', 'zhenxing niu')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('10699750', 'Xinbo Gao', 'xinbo gao')<br/>('36497527', 'Mo Zhou', 'mo zhou')<br/>('40367806', 'Le Wang', 'le wang')</td><td>{zhenxingniu,cdluminate}@gmail.com, lewang@mail.xjtu.edu.cn, xinbogao@mail.xidian.edu.cn
+<br/>ganghua@gmail.com
+</td></tr><tr><td>666300af8ffb8c903223f32f1fcc5c4674e2430b</td><td>Changing Fashion Cultures
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b><br/>Tsukuba, Ibaraki, Japan
+<br/><b>Tokyo Denki University</b><br/>Adachi, Tokyo, Japan
+</td><td>('3408038', 'Kaori Abe', 'kaori abe')<br/>('5014206', 'Teppei Suzuki', 'teppei suzuki')<br/>('9935341', 'Shunya Ueta', 'shunya ueta')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')<br/>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('2462801', 'Akio Nakamura', 'akio nakamura')</td><td>{abe.keroko, suzuki-teppei, shunya.ueta, yu.satou, hirokatsu.kataoka}@aist.go.jp
+<br/>nkmr-a@cck.dendai.ac.jp
+</td></tr><tr><td>66029f1be1a5cee9a4e3e24ed8fcb65d5d293720</td><td>HWANG AND GRAUMAN: ACCOUNTING FOR IMPORTANCE IN IMAGE RETRIEVAL
+<br/>Accounting for the Relative Importance of
+<br/>Objects in Image Retrieval
+<br/><b>The University of Texas</b><br/>Austin, TX, USA
+</td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>sjhwang@cs.utexas.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>6691dfa1a83a04fdc0177d8d70e3df79f606b10f</td><td>Illumination Modeling and Normalization for Face Recognition
+<br/><b>Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/>Beijing, 100080, China
+</td><td>('29948255', 'Haitao Wang', 'haitao wang')<br/>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('1744302', 'Yangsheng Wang', 'yangsheng wang')<br/>('38248052', 'Weiwei Zhang', 'weiwei zhang')</td><td>{htwang, wys, wwzhang}@nlpr.ia.ac.cn
+</td></tr><tr><td>66a2c229ac82e38f1b7c77a786d8cf0d7e369598</td><td>Proceedings of the 2016 Industrial and Systems Engineering Research Conference
+<br/>H. Yang, Z. Kong, and MD Sarder, eds.
+<br/>A Probabilistic Adaptive Search System
+<br/>for Exploring the Face Space
+<br/>Escuela Superior Politecnica del Litoral (ESPOL)
+<br/>Guayaquil-Ecuador
+</td><td>('3123974', 'Andres G. Abad', 'andres g. abad')<br/>('3044670', 'Luis I. Reyes Castro', 'luis i. reyes castro')</td><td></td></tr><tr><td>66886997988358847615375ba7d6e9eb0f1bb27f</td><td></td><td></td><td></td></tr><tr><td>66837add89caffd9c91430820f49adb5d3f40930</td><td></td><td></td><td></td></tr><tr><td>66a9935e958a779a3a2267c85ecb69fbbb75b8dc</td><td>FAST AND ROBUST FIXED-RANK MATRIX RECOVERY
+<br/>Fast and Robust Fixed-Rank Matrix
+<br/>Recovery
+<br/>Antonio Lopez
+</td><td>('34210410', 'Julio Guerrero', 'julio guerrero')</td><td></td></tr><tr><td>66533107f9abdc7d1cb8f8795025fc7e78eb1122</td><td>Vi a
+<br/>i a Whee
+<br/>W y g Sgy Dae i iy g S g iz ad Ze ga Biey
+<br/>y EECS AST 373 1  g Dg Y g G  Taej 305 701 REA
+<br/>z VR Cee ETR 161 ajg Dg Y g G  Taej 305 350 REA
+<br/>Abac
+<br/>Thee exi he c eaive aciviy bewee a h
+<br/>a beig ad ehabi
+<br/>a eae ehabi
+<br/>e ad ha he bee(cid:12) f ehabi
+<br/> ch a ai
+<br/>eadig i e f he eeia
+<br/>fied
+<br/>cf ad afey f a
+<br/>a
+<br/>bic a ye ARES  ad i h a b
+<br/>ieaci ech
+<br/>ech
+<br/>a
+<br/>vi a
+<br/>ecgizig he iive ad egaive eaig f he
+<br/> e i efed  he bai f chage f he facia
+<br/>exei a d
+<br/> e iei whi
+<br/> e wih a beveage. F he eÆcie vi a
+<br/>i ceig
+<br/>c
+<br/>ed e(cid:11)ec f he bic a. The vi a
+<br/>wih e(cid:11)ecive iei eadig i  ccef
+<br/> eve a beveage f he e.
+<br/>d ci
+<br/>Whee
+<br/>ai he e
+<br/>ca i ey ad  f ci i
+<br/>ye ci f a weed whee
+<br/>a ad ha  
+<br/>he whee
+<br/>he bic a ad h  ake ib
+<br/>exiece f a e ad a b i he ae evi
+<br/>e.
+<br/> hi cae he e eed  ieac wih
+<br/>he bic a i cfab
+<br/>Fig e 1: The whee
+<br/>h a b ieaci ech
+<br/>eve i ha bee eed ha ay diÆc
+<br/>i h a bf ieaci i exiig ehabi
+<br/>b. F exa
+<br/>a ake a high cgiive
+<br/>hyica
+<br/>eaig jyick dexe 
+<br/>de
+<br/>ai e eed ha he  diÆc
+<br/>ig ehabi
+<br/>a
+<br/>id a he begiig [4]. Theefe h a fied
+<br/>h a b ieaci i e f eeia
+<br/>i a whee
+<br/> hi ae we cide he whee
+<br/>bic ye ARES AST Rehabi
+<br/>gieeig Sevice ye  which we ae deve
+<br/>a a evice bic ye f he diab
+<br/>e
+<br/>i e Fig. 1. Ag h a b ieaci ech
+<br/>i e vi a
+</td><td></td><td>zbie@ee.kai.ac.k
+</td></tr><tr><td>66810438bfb52367e3f6f62c24f5bc127cf92e56</td><td>Face Recognition of Illumination Tolerance in 2D
+<br/>Subspace Based on the Optimum Correlation
+<br/>Filter
+<br/>Xu Yi
+<br/>Department of Information Engineering, Hunan Industry Polytechnic, Changsha, China
+<br/>images will be tested to project
+</td><td></td><td></td></tr><tr><td>66af2afd4c598c2841dbfd1053bf0c386579234e</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Context Assisted Face Clustering Framework with
+<br/>Human-in-the-Loop
+<br/>Received: date / Accepted: date
+</td><td>('3338094', 'Liyan Zhang', 'liyan zhang')<br/>('1686199', 'Sharad Mehrotra', 'sharad mehrotra')</td><td></td></tr><tr><td>66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5</td><td>The AAAI-17 Workshop on
+<br/>Human-Aware Artificial Intelligence
+<br/>WS-17-10
+<br/>Using Co-Captured Face, Gaze and Verbal Reactions to Images of
+<br/>Varying Emotional Content for Analysis and Semantic Alignment
+<br/><b>Muhlenberg College</b><br/><b>Rochester Institute of Technology</b><br/><b>Rochester Institute of Technology</b></td><td>('40114708', 'Trevor Walden', 'trevor walden')<br/>('2459642', 'Preethi Vaidyanathan', 'preethi vaidyanathan')<br/>('37459359', 'Reynold Bailey', 'reynold bailey')<br/>('1695716', 'Cecilia O. Alm', 'cecilia o. alm')</td><td>ag249083@muhlenberg.edu
+<br/>tjw5866@rit.edu
+<br/>{pxv1621, emilypx, rjbvcs, coagla}@rit.edu
+</td></tr><tr><td>66e9fb4c2860eb4a15f713096020962553696e12</td><td>A New Urban Objects Detection Framework
+<br/>Using Weakly Annotated Sets
+<br/><b>University of S ao Paulo - USP, S ao Paulo - Brazil</b><br/><b>New York University</b></td><td>('40014199', 'Claudio Silva', 'claudio silva')<br/>('1748049', 'Roberto M. Cesar', 'roberto m. cesar')</td><td>{keiji, gabriel.augusto.ferreira, rmcesar}@usp.br
+<br/>csilva@nyu.edu
+</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>Int J Comput Vis (2014) 108:59–81
+<br/>DOI 10.1007/s11263-013-0695-z
+<br/>The SUN Attribute Database: Beyond Categories for Deeper Scene
+<br/>Understanding
+<br/>Received: 27 February 2013 / Accepted: 28 December 2013 / Published online: 18 January 2014
+<br/>© Springer Science+Business Media New York 2014
+</td><td>('40541456', 'Genevieve Patterson', 'genevieve patterson')<br/>('12532254', 'James Hays', 'james hays')</td><td></td></tr><tr><td>661da40b838806a7effcb42d63a9624fcd684976</td><td>53
+<br/>An Illumination Invariant Accurate
+<br/>Face Recognition with Down Scaling
+<br/>of DCT Coefficients
+<br/>Department of Computer Science and Engineering, Amity School of Engineering and Technology, New Delhi, India
+<br/>In this paper, a novel approach for illumination normal-
+<br/>ization under varying lighting conditions is presented.
+<br/>Our approach utilizes the fact that discrete cosine trans-
+<br/>form (DCT) low-frequency coefficients correspond to
+<br/>illumination variations in a digital image. Under varying
+<br/>illuminations, the images captured may have low con-
+<br/>trast; initially we apply histogram equalization on these
+<br/>for contrast stretching. Then the low-frequency DCT
+<br/>coefficients are scaled down to compensate the illumi-
+<br/>nation variations. The value of scaling down factor and
+<br/>the number of low-frequency DCT coefficients, which
+<br/>are to be rescaled, are obtained experimentally. The
+<br/>classification is done using k−nearest neighbor classi-
+<br/>fication and nearest mean classification on the images
+<br/>obtained by inverse DCT on the processed coefficients.
+<br/>The correlation coefficient and Euclidean distance ob-
+<br/>tained using principal component analysis are used as
+<br/>distance metrics in classification. We have tested our
+<br/>face recognition method using Yale Face Database B.
+<br/>The results show that our method performs without any
+<br/>error (100% face recognition performance), even on the
+<br/>most extreme illumination variations. There are different
+<br/>schemes in the literature for illumination normalization
+<br/>under varying lighting conditions, but no one is claimed
+<br/>to give 100% recognition rate under all illumination
+<br/>variations for this database. The proposed technique is
+<br/>computationally efficient and can easily be implemented
+<br/>for real time face recognition system.
+<br/>Keywords: discrete cosine transform, correlation co-
+<br/>efficient, face recognition, illumination normalization,
+<br/>nearest neighbor classification
+<br/>1. Introduction
+<br/>Two-dimensional pattern classification plays a
+<br/>crucial role in real-world applications. To build
+<br/>high-performance surveillance or information
+<br/>security systems, face recognition has been
+<br/>known as the key application attracting enor-
+<br/>mous researchers highlighting on related topics
+<br/>[1,2]. Even though current machine recognition
+<br/>systems have reached a certain level of matu-
+<br/>rity, their success is limited by the real appli-
+<br/>cations constraints, like pose, illumination and
+<br/>expression. The FERET evaluation shows that
+<br/>the performance of a face recognition system
+<br/>decline seriously with the change of pose and
+<br/>illumination conditions [31].
+<br/>To solve the variable illumination problem a
+<br/>variety of approaches have been proposed [3, 7-
+<br/>11, 26-29]. Early work in illumination invariant
+<br/>face recognition focused on image representa-
+<br/>tions that are mostly insensitive to changes in
+<br/>illumination. There were approaches in which
+<br/>the image representations and distance mea-
+<br/>sures were evaluated on a tightly controlled face
+<br/>database that varied the face pose, illumination,
+<br/>and expression. The image representations in-
+<br/>clude edge maps, 2D Gabor-like filters, first and
+<br/>second derivatives of the gray-level image, and
+<br/>the logarithmic transformations of the intensity
+<br/>image along with these representations [4].
+<br/>The different approaches to solve the prob-
+<br/>lem of illumination invariant face recognition
+<br/>can be broadly classified into two main cate-
+<br/>gories. The first category is named as passive
+<br/>approach in which the visual spectrum images
+<br/>are analyzed to overcome this problem. The
+<br/>approaches belonging to other category named
+<br/>active, attempt to overcome this problem by
+<br/>employing active imaging techniques to obtain
+<br/>face images captured in consistent illumina-
+<br/>tion condition, or images of illumination invari-
+<br/>ant modalities. There is a hierarchical catego-
+<br/>rization of these two approaches. An exten-
+<br/>sive review of both approaches is given in [5].
+</td><td>('2650871', 'Virendra P. Vishwakarma', 'virendra p. vishwakarma')<br/>('2100294', 'Sujata Pandey', 'sujata pandey')<br/>('11690561', 'M. N. Gupta', 'm. n. gupta')</td><td></td></tr><tr><td>66886f5af67b22d14177119520bd9c9f39cdd2e6</td><td>T. KOBAYASHI: LEARNING ADDITIVE KERNEL
+<br/>Learning Additive Kernel For Feature
+<br/>Transformation and Its Application to CNN
+<br/>Features
+<br/><b>National Institute of Advanced Industrial</b><br/>Science and Technology
+<br/>Tsukuba, Japan
+</td><td>('1800592', 'Takumi Kobayashi', 'takumi kobayashi')</td><td>takumi.kobayashi@aist.go.jp
+</td></tr><tr><td>3edb0fa2d6b0f1984e8e2c523c558cb026b2a983</td><td>Automatic Age Estimation Based on
+<br/>Facial Aging Patterns
+</td><td>('1735299', 'Xin Geng', 'xin geng')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('2848275', 'Kate Smith-Miles', 'kate smith-miles')</td><td></td></tr><tr><td>3e69ed088f588f6ecb30969bc6e4dbfacb35133e</td><td>ACEEE Int. J. on Information Technology, Vol. 01, No. 02, Sep 2011
+<br/>Improving Performance of Texture Based Face
+<br/>Recognition Systems by Segmenting Face Region
+<br/><b>St. Xavier s Catholic College of Engineering, Nagercoil, India</b><br/><b>Manonmaniam Sundaranar University, Tirunelveli, India</b></td><td>('9375880', 'R. Reena Rose', 'r. reena rose')<br/>('3311251', 'A. Suruliandi', 'a. suruliandi')</td><td>mailtoreenarose@yahoo.in
+<br/>suruliandi@yahoo.com
+</td></tr><tr><td>3e0a1884448bfd7f416c6a45dfcdfc9f2e617268</td><td>Understanding and Controlling User Linkability in
+<br/>Decentralized Learning
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarland Informatics Campus
+<br/>Saarbrücken, Germany
+</td><td>('9517443', 'Tribhuvanesh Orekondy', 'tribhuvanesh orekondy')<br/>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{orekondy,joon,schiele,mfritz}@mpi-inf.mpg.de
+</td></tr><tr><td>3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07</td><td>FACIAL EXPRESSION RECOGNITION WITH LOCAL BINARY PATTERNS
+<br/>AND LINEAR PROGRAMMING
+<br/>Xiaoyi Feng1, 2, Matti Pietikäinen1, Abdenour Hadid1
+<br/>1 Machine Vision Group, Infotech Oulu and Dept. of Electrical and Information Engineering
+<br/><b>P. O. Box 4500 Fin-90014 University of Oulu, Finland</b><br/><b>College of Electronics and Information, Northwestern Polytechnic University</b><br/>710072 Xi’an, China
+<br/>In this work, we propose a novel approach to recognize facial expressions from static
+<br/>images. First, the Local Binary Patterns (LBP) are used to efficiently represent the facial
+<br/>images and then the Linear Programming (LP) technique is adopted to classify the seven
+<br/>facial expressions anger, disgust, fear, happiness, sadness, surprise and neutral.
+<br/>Experimental results demonstrate an average recognition accuracy of 93.8% on the JAFFE
+<br/>database, which outperforms the rates of all other reported methods on the same database.
+<br/>Introduction
+<br/>Facial expression recognition from static
+<br/>images is a more challenging problem
+<br/>than from image sequences because less
+<br/>information for expression actions
+<br/>is
+<br/>available. However, information in a
+<br/>single image is sometimes enough for
+<br/>expression recognition, and
+<br/>in many
+<br/>applications it is also useful to recognize
+<br/>single image’s facial expression.
+<br/>In the recent years, numerous approaches
+<br/>to facial expression analysis from static
+<br/>images have been proposed [1] [2]. These
+<br/>methods
+<br/>face
+<br/>representation and similarity measure.
+<br/>For instance, Zhang [3] used two types of
+<br/>features: the geometric position of 34
+<br/>manually selected fiducial points and a
+<br/>set of Gabor wavelet coefficients at these
+<br/>points. These two types of features were
+<br/>used both independently and jointly with
+<br/>a multi-layer perceptron for classification.
+<br/>Guo and Dyer [4] also adopted a similar
+<br/>face representation, combined with linear
+<br/>to carry out
+<br/>programming
+<br/>selection
+<br/>simultaneous
+<br/>and
+<br/>classifier
+<br/>they reported
+<br/>technique
+<br/>feature
+<br/>training, and
+<br/>differ
+<br/>generally
+<br/>in
+<br/>a
+<br/>simple
+<br/>imperative question
+<br/>better result. Lyons et al. used a similar face
+<br/>representation with
+<br/>LDA-based
+<br/>classification scheme [5]. All the above methods
+<br/>required the manual selection of fiducial points.
+<br/>Buciu et al. used ICA and Gabor representation for
+<br/>facial expression recognition and reported good result
+<br/>on the same database [6]. However, a suitable
+<br/>combination of feature extraction and classification is
+<br/>still one
+<br/>for expression
+<br/>recognition.
+<br/>In this paper, we propose a novel method for facial
+<br/>expression recognition. In the feature extraction step,
+<br/>the Local Binary Pattern (LBP) operator is used to
+<br/>describe facial expressions. In the classification step,
+<br/>seven expressions (anger, disgust, fear, happiness,
+<br/>sadness, surprise and neutral) are decomposed into 21
+<br/>expression pairs such as anger-fear, happiness-
+<br/>sadness etc. 21 classifiers are produced by the Linear
+<br/>Programming (LP) technique, each corresponding to
+<br/>one of the 21 expression pairs. A simple binary tree
+<br/>tournament scheme with pairwise comparisons is
+<br/>used for classifying unknown expressions.
+<br/>Face Representation with Local Binary Patterns
+<br/>
+<br/>Fig.1 shows the basic LBP operator [7], in which the
+<br/>original 3×3 neighbourhood at the left is thresholded
+<br/>by the value of the centre pixel, and a binary pattern
+</td><td></td><td>{xiaoyi,mkp,hadid}@ee.oulu.fi
+<br/>fengxiao@nwpu.edu.cn
+</td></tr><tr><td>3ee7a8107a805370b296a53e355d111118e96b7c</td><td></td><td></td><td></td></tr><tr><td>3ebce6710135d1f9b652815e59323858a7c60025</td><td>Component-based Face Detection
+<br/>(cid:1)Center for Biological and Computational Learning, M.I.T., Cambridge, MA, USA
+<br/><b>cid:2)Honda RandD Americas, Inc., Boston, MA, USA</b><br/><b>University of Siena, Siena, Italy</b></td><td>('1684626', 'Bernd Heisele', 'bernd heisele')</td><td>(cid:1)heisele, serre, tp(cid:2) @ai.mit.edu pontil@dii.unisi.it
+</td></tr><tr><td>3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b</td><td>Deep Value Networks Learn to
+<br/>Evaluate and Iteratively Refine Structured Outputs
+</td><td>('3037160', 'Michael Gygli', 'michael gygli')</td><td></td></tr><tr><td>3e3f305dac4fbb813e60ac778d6929012b4b745a</td><td>Feature sampling and partitioning for visual vocabulary
+<br/>generation on large action classification datasets.
+<br/><b>Oxford Brookes University</b><br/><b>University of Oxford</b></td><td>('3019396', 'Michael Sapienza', 'michael sapienza')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td></td></tr><tr><td>3ea8a6dc79d79319f7ad90d663558c664cf298d4</td><td></td><td>('40253814', 'IRA COHEN', 'ira cohen')</td><td></td></tr><tr><td>3e4f84ce00027723bdfdb21156c9003168bc1c80</td><td>1979
+<br/>© EURASIP, 2011 - ISSN 2076-1465
+<br/>19th European Signal Processing Conference (EUSIPCO 2011)
+<br/>INTRODUCTION
+</td><td></td><td></td></tr><tr><td>3e04feb0b6392f94554f6d18e24fadba1a28b65f</td><td>14
+<br/>Subspace Image Representation for Facial
+<br/>Expression Analysis and Face Recognition
+<br/>and its Relation to the Human Visual System
+<br/><b>Aristotle University of Thessaloniki GR</b><br/>Thessaloniki, Box 451, Greece.
+<br/>2 Electronics Department, Faculty of Electrical Engineering and Information
+<br/><b>Technology, University of Oradea 410087, Universitatii 1, Romania</b><br/>Summary. Two main theories exist with respect to face encoding and representa-
+<br/>tion in the human visual system (HVS). The first one refers to the dense (holistic)
+<br/>representation of the face, where faces have “holon”-like appearance. The second one
+<br/>claims that a more appropriate face representation is given by a sparse code, where
+<br/>only a small fraction of the neural cells corresponding to face encoding is activated.
+<br/>Theoretical and experimental evidence suggest that the HVS performs face analysis
+<br/>(encoding, storing, face recognition, facial expression recognition) in a structured
+<br/>and hierarchical way, where both representations have their own contribution and
+<br/>goal. According to neuropsychological experiments, it seems that encoding for face
+<br/>recognition, relies on holistic image representation, while a sparse image represen-
+<br/>tation is used for facial expression analysis and classification. From the computer
+<br/>vision perspective, the techniques developed for automatic face and facial expres-
+<br/>sion recognition fall into the same two representation types. Like in Neuroscience,
+<br/>the techniques which perform better for face recognition yield a holistic image rep-
+<br/>resentation, while those techniques suitable for facial expression recognition use a
+<br/>sparse or local image representation. The proposed mathematical models of image
+<br/>formation and encoding try to simulate the efficient storing, organization and coding
+<br/>of data in the human cortex. This is equivalent with embedding constraints in the
+<br/>model design regarding dimensionality reduction, redundant information minimiza-
+<br/>tion, mutual information minimization, non-negativity constraints, class informa-
+<br/>tion, etc. The presented techniques are applied as a feature extraction step followed
+<br/>by a classification method, which also heavily influences the recognition results.
+<br/>Key words: Human Visual System; Dense, Sparse and Local Image Repre-
+<br/>sentation and Encoding, Face and Facial Expression Analysis and Recogni-
+<br/>tion.
+<br/>R.P. W¨urtz (ed.), Organic Computing. Understanding Complex Systems,
+<br/>doi: 10.1007/978-3-540-77657-4 14, © Springer-Verlag Berlin Heidelberg 2008
+</td><td>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>pitas@zeus.csd.auth.gr
+<br/>ibuciu@uoradea.ro
+</td></tr><tr><td>3e685704b140180d48142d1727080d2fb9e52163</td><td>Single Image Action Recognition by Predicting
+<br/>Space-Time Saliency
+</td><td>('32998919', 'Marjaneh Safaei', 'marjaneh safaei')<br/>('1691260', 'Hassan Foroosh', 'hassan foroosh')</td><td></td></tr><tr><td>3e51d634faacf58e7903750f17111d0d172a0bf1</td><td>A COMPRESSIBLE TEMPLATE PROTECTION SCHEME
+<br/>FOR FACE RECOGNITION BASED ON SPARSE REPRESENTATION
+<br/><b>Tokyo Metropolitan University</b><br/>6–6 Asahigaoka, Hino-shi, Tokyo 191–0065, Japan
+<br/>† NTT Network Innovation Laboratories, Japan
+</td><td>('32403098', 'Yuichi Muraki', 'yuichi muraki')<br/>('11129971', 'Masakazu Furukawa', 'masakazu furukawa')<br/>('1728060', 'Masaaki Fujiyoshi', 'masaaki fujiyoshi')<br/>('34638424', 'Yoshihide Tonomura', 'yoshihide tonomura')<br/>('1737217', 'Hitoshi Kiya', 'hitoshi kiya')</td><td></td></tr><tr><td>3e40991ab1daa2a4906eb85a5d6a01a958b6e674</td><td>LIPNET: END-TO-END SENTENCE-LEVEL LIPREADING
+<br/><b>University of Oxford, Oxford, UK</b><br/>Google DeepMind, London, UK 2
+<br/>CIFAR, Canada 3
+<br/>{yannis.assael,brendan.shillingford,
+</td><td>('3365565', 'Yannis M. Assael', 'yannis m. assael')<br/>('3144580', 'Brendan Shillingford', 'brendan shillingford')<br/>('1766767', 'Shimon Whiteson', 'shimon whiteson')</td><td>shimon.whiteson,nando.de.freitas}@cs.ox.ac.uk
+</td></tr><tr><td>3e687d5ace90c407186602de1a7727167461194a</td><td>Photo Tagging by Collection-Aware People Recognition
+<br/>UFF
+<br/>UFF
+<br/>Asla S´a
+<br/>FGV
+<br/>IMPA
+</td><td>('2901520', 'Cristina Nader Vasconcelos', 'cristina nader vasconcelos')<br/>('19264449', 'Vinicius Jardim', 'vinicius jardim')<br/>('1746637', 'Paulo Cezar Carvalho', 'paulo cezar carvalho')</td><td>crisnv@ic.uff.br
+<br/>vinicius@id.uff.br
+<br/>asla.sa@fgv.br
+<br/>pcezar@impa.br
+</td></tr><tr><td>3e3a87eb24628ab075a3d2bde3abfd185591aa4c</td><td>Effects of sparseness and randomness of
+<br/>pairwise distance matrix on t-SNE results
+<br/><b>BECS, Aalto University, Helsinki, Finland</b></td><td>('32430508', 'Eli Parviainen', 'eli parviainen')</td><td></td></tr><tr><td>3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f</td><td>Objects as context for detecting their semantic parts
+<br/><b>University of Edinburgh</b></td><td>('20758701', 'Abel Gonzalez-Garcia', 'abel gonzalez-garcia')<br/>('1996209', 'Davide Modolo', 'davide modolo')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td>a.gonzalez-garcia@sms.ed.ac.uk
+<br/>davide.modolo@gmail.com
+<br/>vferrari@staffmail.ed.ac.uk
+</td></tr><tr><td>5040f7f261872a30eec88788f98326395a44db03</td><td>PAPAMAKARIOS, PANAGAKIS, ZAFEIRIOU: GENERALISED SCALABLE ROBUST PCA
+<br/>Generalised Scalable Robust Principal
+<br/>Component Analysis
+<br/>Department of Computing
+<br/><b>Imperial College London</b><br/>London, UK
+</td><td>('2369138', 'Georgios Papamakarios', 'georgios papamakarios')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>georgios.papamakarios13@imperial.ac.uk
+<br/>i.panagakis@imperial.ac.uk
+<br/>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>50f0c495a214b8d57892d43110728e54e413d47d</td><td>Submitted 8/11; Revised 3/12; Published 8/12
+<br/>Pairwise Support Vector Machines and their Application to Large
+<br/>Scale Problems
+<br/><b>Institute for Numerical Mathematics</b><br/>Technische Universit¨at Dresden
+<br/>01062 Dresden, Germany
+<br/>Cognitec Systems GmbH
+<br/>Grossenhainer Str. 101
+<br/>01127 Dresden, Germany
+<br/>Editor: Corinna Cortes
+</td><td>('25796572', 'Carl Brunner', 'carl brunner')<br/>('1833903', 'Andreas Fischer', 'andreas fischer')<br/>('2201239', 'Klaus Luig', 'klaus luig')<br/>('2439730', 'Thorsten Thies', 'thorsten thies')</td><td>C.BRUNNER@GMX.NET
+<br/>ANDREAS.FISCHER@TU-DRESDEN.DE
+<br/>LUIG@COGNITEC.COM
+<br/>THIES@COGNITEC.COM
+</td></tr><tr><td>501096cca4d0b3d1ef407844642e39cd2ff86b37</td><td>Illumination Invariant Face Image
+<br/>Representation using Quaternions
+<br/>Dayron Rizo-Rodr´ıguez, Heydi M´endez-V´azquez, and Edel Garc´ıa-Reyes
+<br/>Advanced Technologies Application Center. 7a # 21812 b/ 218 and 222,
+<br/>Rpto. Siboney, Playa, P.C. 12200, La Habana, Cuba.
+</td><td></td><td>{drizo,hmendez,egarcia}@cenatav.co.cu
+</td></tr><tr><td>500fbe18afd44312738cab91b4689c12b4e0eeee</td><td>ChaLearn Looking at People 2015 new competitions:
+<br/>Age Estimation and Cultural Event Recognition
+<br/><b>University of Barcelona</b><br/>Computer Vision Center, UAB
+<br/>Jordi Gonz`alez
+<br/>Xavier Bar´o
+<br/>Univ. Aut`onoma de Barcelona
+<br/>Computer Vision Center, UAB
+<br/>Universitat Oberta de Catalunya
+<br/>Computer Vision Center, UAB
+<br/><b>University of Barcelona</b><br/>Univ. Aut`onoma de Barcelona
+<br/>Computer Vision Center, UAB
+<br/><b>University of Barcelona</b><br/>Computer Vision Center, UAB
+<br/>INAOE
+<br/>Ivan Huerta
+<br/><b>University of Venezia</b><br/>Clopinet, Berkeley
+</td><td>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('40378482', 'Pablo Pardo', 'pablo pardo')<br/>('37811966', 'Junior Fabian', 'junior fabian')<br/>('3305641', 'Marc Oliu', 'marc oliu')<br/>('1742688', 'Hugo Jair Escalante', 'hugo jair escalante')<br/>('1743797', 'Isabelle Guyon', 'isabelle guyon')</td><td>Email: sergio@maia.ub.es
+<br/>Email: ppardoga7@gmail.com
+<br/>Email: poal@cvc.uab.es
+<br/>Email: xbaro@uoc.edu
+<br/>Email: jfabian@cvc.uab.es
+<br/>Email: moliusimon@gmail.com
+<br/>Email: hugo.jair@gmail.com
+<br/>Email: huertacasado@iuav.it
+<br/>Email: guyon@chalearn.org
+</td></tr><tr><td>501eda2d04b1db717b7834800d74dacb7df58f91</td><td></td><td>('3846862', 'Pedro Miguel Neves Marques', 'pedro miguel neves marques')</td><td></td></tr><tr><td>5083c6be0f8c85815ead5368882b584e4dfab4d1</td><td> Please do not quote. In press, Handbook of affective computing. New York, NY: Oxford
+<br/>Automated Face Analysis for Affective Computing
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>506c2fbfa9d16037d50d650547ad3366bb1e1cde</td><td>Convolutional Channel Features: Tailoring CNN to Diverse Tasks
+<br/>Junjie Yan
+<br/>Zhen Lei
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1716231', 'Bin Yang', 'bin yang')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{zlei, szli}@nlpr.ia.ac.cn
+<br/>{yb.derek, yanjjie}@gmail.com
+</td></tr><tr><td>500b92578e4deff98ce20e6017124e6d2053b451</td><td></td><td></td><td></td></tr><tr><td>504028218290d68859f45ec686f435f473aa326c</td><td>Multi-Fiber Networks for Video Recognition
+<br/><b>National University of Singapore</b><br/>2 Facebook Research
+<br/><b>Qihoo 360 AI Institute</b></td><td>('1713312', 'Yunpeng Chen', 'yunpeng chen')<br/>('1944225', 'Yannis Kalantidis', 'yannis kalantidis')<br/>('2757639', 'Jianshu Li', 'jianshu li')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td>{chenyunpeng, jianshu}@u.nus.edu, yannisk@fb.com,
+<br/>{eleyans, elefjia}@nus.edu.sg
+</td></tr><tr><td>5058a7ec68c32984c33f357ebaee96c59e269425</td><td>A Comparative Evaluation of Regression Learning
+<br/>Algorithms for Facial Age Estimation
+<br/>1 Herta Security
+<br/>Pau Claris 165 4-B, 08037 Barcelona, Spain
+<br/><b>DPDCE, University IUAV</b><br/>Santa Croce 1957, 30135 Venice, Italy
+</td><td>('1733945', 'Andrea Prati', 'andrea prati')</td><td>carles.fernandez@hertasecurity.com
+<br/>huertacasado@iuav.it, aprati@iuav.it
+</td></tr><tr><td>50ff21e595e0ebe51ae808a2da3b7940549f4035</td><td>IEEE TRANSACTIONS ON LATEX CLASS FILES, VOL. XX, NO. X, AUGUST 2017
+<br/>Age Group and Gender Estimation in the Wild with
+<br/>Deep RoR Architecture
+</td><td>('32164792', 'Ke Zhang', 'ke zhang')<br/>('35038034', 'Ce Gao', 'ce gao')<br/>('3451321', 'Liru Guo', 'liru guo')<br/>('2598874', 'Miao Sun', 'miao sun')<br/>('3451660', 'Xingfang Yuan', 'xingfang yuan')<br/>('3244463', 'Tony X. Han', 'tony x. han')<br/>('2626320', 'Zhenbing Zhao', 'zhenbing zhao')<br/>('2047712', 'Baogang Li', 'baogang li')</td><td></td></tr><tr><td>5042b358705e8d8e8b0655d07f751be6a1565482</td><td>International Journal of
+<br/>Emerging Research in Management &Technology
+<br/>ISSN: 2278-9359 (Volume-4, Issue-8)
+<br/>Research Article
+<br/> August
+<br/> 2015
+<br/>Review on Emotion Detection in Image
+<br/>CSE & PCET, PTU HOD, CSE & PCET, PTU
+<br/> Punjab, India Punj ab, India
+</td><td></td><td></td></tr><tr><td>50e47857b11bfd3d420f6eafb155199f4b41f6d7</td><td>International Journal of Computer, Consumer and Control (IJ3C), Vol. 2, No.1 (2013)
+<br/>3D Human Face Reconstruction Using a Hybrid of Photometric
+<br/>Stereo and Independent Component Analysis
+</td><td>('1734467', 'Cheng-Jian Lin', 'cheng-jian lin')<br/>('3318507', 'Shyi-Shiun Kuo', 'shyi-shiun kuo')<br/>('18305737', 'Hsueh-Yi Lin', 'hsueh-yi lin')<br/>('2911354', 'Cheng-Yi Yu', 'cheng-yi yu')</td><td></td></tr><tr><td>50eb75dfece76ed9119ec543e04386dfc95dfd13</td><td>Learning Visual Entities and their Visual Attributes from Text Corpora
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+</td><td>('2955093', 'Erik Boiy', 'erik boiy')<br/>('1797588', 'Koen Deschacht', 'koen deschacht')<br/>('1802161', 'Marie-Francine Moens', 'marie-francine moens')</td><td>erik.boiy@cs.kuleuven.be
+<br/>koen.deschacht@cs.kuleuven.be
+<br/>sien.moens@cs.kuleuven.be
+</td></tr><tr><td>5050807e90a925120cbc3a9cd13431b98965f4b9</td><td>To appear in the ECCV Workshop on Parts and Attributes, Oct. 2012.
+<br/>Unsupervised Learning of Discriminative
+<br/>Relative Visual Attributes
+<br/><b>Boston University</b><br/><b>Hacettepe University</b></td><td>('2863531', 'Shugao Ma', 'shugao ma')<br/>('2011587', 'Nazli Ikizler-Cinbis', 'nazli ikizler-cinbis')</td><td></td></tr><tr><td>50a0930cb8cc353e15a5cb4d2f41b365675b5ebf</td><td></td><td></td><td></td></tr><tr><td>508702ed2bf7d1b0655ea7857dd8e52d6537e765</td><td>ZUO, ORGANISCIAK, SHUM, YANG: SST-VLAD AND SST-FV FOR VAR
+<br/>Saliency-Informed Spatio-Temporal Vector
+<br/>of Locally Aggregated Descriptors and
+<br/>Fisher Vectors for Visual Action Recognition
+<br/>Department of Computer and
+<br/>Information Sciences
+<br/><b>Northumbria University</b><br/>Newcastle upon Tyne, NE1 8ST, UK
+</td><td>('40760781', 'Zheming Zuo', 'zheming zuo')<br/>('34975328', 'Daniel Organisciak', 'daniel organisciak')<br/>('2840036', 'Hubert P. H. Shum', 'hubert p. h. shum')<br/>('1706028', 'Longzhi Yang', 'longzhi yang')</td><td>zheming.zuo@northumbria.ac.uk
+<br/>daniel.organisciak@northumbria.ac.uk
+<br/>hubert.shum@northumbria.ac.uk
+<br/>longzhi.yang@northumbria.ac.uk
+</td></tr><tr><td>50eb2ee977f0f53ab4b39edc4be6b760a2b05f96</td><td>Australian Journal of Basic and Applied Sciences, 11(5) April 2017, Pages: 1-11
+<br/>AUSTRALIAN JOURNAL OF BASIC AND
+<br/>APPLIED SCIENCES
+<br/>ISSN:1991-8178 EISSN: 2309-8414
+<br/>Journal home page: www.ajbasweb.com
+<br/>Emotion Recognition Based on Texture Analysis of Facial Expressions
+<br/>Using Wavelets Transform
+<br/>1Suhaila N. Mohammed and 2Loay E. George
+<br/><b>Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq</b><br/><b>College of Science, Baghdad University, Baghdad, Iraq</b><br/>Address For Correspondence:
+<br/><b>Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq</b><br/>A R T I C L E I N F O
+<br/>Article history:
+<br/>Received 18 January 2017
+<br/>Accepted 28 March 2017
+<br/>Available online 15 April 2017
+<br/>Keywords:
+<br/>Facial Emotion, Face Detection,
+<br/>Template Based Methods, Texture
+<br/>Based Features, Haar Wavelets
+<br/>Transform, Image Blocking, Neural
+<br/>Network.
+<br/>A B S T R A C T
+<br/>Background: The interests toward developing accurate automatic facial emotion
+<br/>recognition methodologies are growing vastly and still an ever growing research field in
+<br/>the region of computer vision, artificial intelligent and automation. Auto emotion
+<br/>detection systems are demanded in various fields such as medicine, education, driver
+<br/>safety, games, etc. Despite the importance of this issue it still remains an unsolved
+<br/>problem Objective: In this paper a facial based emotion recognition system is
+<br/>introduced. Template based method is used for face region extraction by exploiting
+<br/>human knowledge about face components and the corresponding symmetry property.
+<br/>The system is based on texture features to work as identical feature vector. These
+<br/>features are extracted from face region through using Haar wavelets transform and
+<br/>blocking idea by calculating the energy of each block The feed forward neural network
+<br/>classifier is used for classification task. The network is trained using a training set of
+<br/>samples, and then the generated weights are used to test the recognition ability of the
+<br/>system. Results: JAFFE public dataset is used for system evaluation purpose; it holds
+<br/>213 facial samples for seven basic emotions. The conducted tests on the developed
+<br/>system gave accuracy around 90.05% when the number of blocks is set 4x4.
+<br/>Conclusion: This result is considered the highest when compared with the results of
+<br/>other newly published works, especially those based on texture features in which
+<br/>blocking idea allows the extraction of statistical features according to local energy of
+<br/>each block; this gave chance for more features to work more effectively.
+<br/>INTRODUCTION
+<br/>Due to the rapid development of technologies, it is being required to build a smart system for understanding
+<br/>human emotion (Ruivo et al., 2016). There are different ways to distinguish person emotions such as facial
+<br/>image, voice, shape of body and others. Mehrabian explained that person impression can be expressed through
+<br/>words (verbal part) by 7%, and 38% through tone of voice (vocal part) while the facial image can give the
+<br/>largest rate which reaches to 55% (Rani and Garg, 2014). Also, he indicated that one of the most important ways
+<br/>to display emotions is through facial expressions; where facial image contains much information (such as,
+<br/>person's identification and also about mood and state of mind) which can be used to distinguish human
+<br/>inspiration (Saini and Rana, 2014).
+<br/>Facial emotion recognition is an active area of research with several fields of applications. Some of the
+<br/>significant applications are: feedback system for e-learning, alert system for driving, social robot emotion
+<br/>recognition system, medical practices...etc (Dubey and Singh, 2016).
+<br/>Human emotion is composed of thousands of expressions but in the last decade the focus on analyzing only
+<br/>seven basic facial expressions such as happiness, sadness, surprise, disgust, fear, natural, and anger (Singh and
+<br/>Open Access Journal
+<br/>Published BY AENSI Publication
+<br/>© 2017 AENSI Publisher All rights reserved
+<br/>This work is licensed under the Creative Commons Attribution International License (CC BY).
+<br/>http://creativecommons.org/licenses/by/4.0/
+<br/>To Cite This Article: Suhaila N. Mohammed and Loay E. George., Emotion Recognition Based on Texture Analysis of Facial Expressions
+<br/>Using Wavelets Transform. Aust. J. Basic & Appl. Sci., 11(5): 1-11, 2017
+</td><td></td><td></td></tr><tr><td>50e45e9c55c9e79aaae43aff7d9e2f079a2d787b</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2015, Article ID 471371, 18 pages
+<br/>http://dx.doi.org/10.1155/2015/471371
+<br/>Research Article
+<br/>Unbiased Feature Selection in Learning Random Forests for
+<br/>High-Dimensional Data
+<br/><b>Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology</b><br/>Chinese Academy of Sciences, Shenzhen 518055, China
+<br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/><b>School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam</b><br/><b>College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China</b><br/><b>Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam</b><br/>Received 20 June 2014; Accepted 20 August 2014
+<br/>Academic Editor: Shifei Ding
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>Random forests (RFs) have been widely used as a powerful classification method. However, with the randomization in both bagging
+<br/>samples and feature selection, the trees in the forest tend to select uninformative features for node splitting. This makes RFs
+<br/>have poor accuracy when working with high-dimensional data. Besides that, RFs have bias in the feature selection process where
+<br/>multivalued features are favored. Aiming at debiasing feature selection in RFs, we propose a new RF algorithm, called xRF, to select
+<br/>good features in learning RFs for high-dimensional data. We first remove the uninformative features using 𝑝-value assessment,
+<br/>and the subset of unbiased features is then selected based on some statistical measures. This feature subset is then partitioned into
+<br/>two subsets. A feature weighting sampling technique is used to sample features from these two subsets for building trees. This
+<br/>approach enables one to generate more accurate trees, while allowing one to reduce dimensionality and the amount of data needed
+<br/>for learning RFs. An extensive set of experiments has been conducted on 47 high-dimensional real-world datasets including image
+<br/>datasets. The experimental results have shown that RFs with the proposed approach outperformed the existing random forests in
+<br/>increasing the accuracy and the AUC measures.
+<br/>1. Introduction
+<br/>Random forests (RFs) [1] are a nonparametric method that
+<br/>builds an ensemble model of decision trees from random
+<br/>subsets of features and bagged samples of the training data.
+<br/>RFs have shown excellent performance for both clas-
+<br/>sification and regression problems. RF model works well
+<br/>even when predictive features contain irrelevant features
+<br/>(or noise); it can be used when the number of features is
+<br/>much larger than the number of samples. However, with
+<br/>randomizing mechanism in both bagging samples and feature
+<br/>selection, RFs could give poor accuracy when applied to high
+<br/>dimensional data. The main cause is that, in the process of
+<br/>growing a tree from the bagged sample data, the subspace
+<br/>of features randomly sampled from thousands of features to
+<br/>split a node of the tree is often dominated by uninformative
+<br/>features (or noise), and the tree grown from such bagged
+<br/>subspace of features will have a low accuracy in prediction
+<br/>which affects the final prediction of the RFs. Furthermore,
+<br/>Breiman et al. noted that feature selection is biased in the
+<br/>classification and regression tree (CART) model because it is
+<br/>based on an information criteria, called multivalue problem
+<br/>[2]. It tends in favor of features containing more values, even if
+<br/>these features have lower importance than other ones or have
+<br/>no relationship with the response feature (i.e., containing
+<br/>less missing values, many categorical or distinct numerical
+<br/>values) [3, 4].
+<br/>In this paper, we propose a new random forests algo-
+<br/>rithm using an unbiased feature sampling method to build
+<br/>a good subspace of unbiased features for growing trees.
+</td><td>('40538635', 'Thanh-Tung Nguyen', 'thanh-tung nguyen')<br/>('8192216', 'Joshua Zhexue Huang', 'joshua zhexue huang')<br/>('39340373', 'Thuy Thi Nguyen', 'thuy thi nguyen')<br/>('40538635', 'Thanh-Tung Nguyen', 'thanh-tung nguyen')</td><td>Correspondence should be addressed to Thanh-Tung Nguyen; tungnt@wru.vn
+</td></tr><tr><td>5003754070f3a87ab94a2abb077c899fcaf936a6</td><td>Evaluation of LC-KSVD on UCF101 Action Dataset
+<br/><b>University of Maryland, College Park</b><br/>2Noah’s Ark Lab, Huawei Technologies
+</td><td>('3146162', 'Hyunjong Cho', 'hyunjong cho')<br/>('2445131', 'Hyungtae Lee', 'hyungtae lee')<br/>('34145947', 'Zhuolin Jiang', 'zhuolin jiang')</td><td>cho@cs.umd.edu, htlee@umd.edu, zhuolin.jiang@huawei.com
+</td></tr><tr><td>503db524b9a99220d430e741c44cd9c91ce1ddf8</td><td>Who’s Better, Who’s Best: Skill Determination in Video using Deep Ranking
+<br/><b>University of Bristol, Bristol, UK</b><br/>Walterio Mayol-Cuevas
+</td><td>('28798386', 'Hazel Doughty', 'hazel doughty')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td><Firstname>.<Surname>@bristol.ac.uk
+</td></tr><tr><td>50d15cb17144344bb1879c0a5de7207471b9ff74</td><td>Divide, Share, and Conquer: Multi-task
+<br/>Attribute Learning with Selective Sharing
+</td><td>('3197570', 'Chao-Yeh Chen', 'chao-yeh chen')<br/>('2228235', 'Dinesh Jayaraman', 'dinesh jayaraman')<br/>('1693054', 'Fei Sha', 'fei sha')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>50d961508ec192197f78b898ff5d44dc004ef26d</td><td>International Journal of Computer science & Information Technology (IJCSIT), Vol 1, No 2, November 2009
+<br/>A LOW INDEXED CONTENT BASED
+<br/>NEURAL NETWORK APPROACH FOR
+<br/>NATURAL OBJECTS RECOGNITION
+<br/>1Research Scholar, JNTUH, Hyderabad, AP. India
+<br/><b>Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India</b><br/><b>Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India</b></td><td></td><td> shyam_gunda2002@yahoo.co.in
+<br/>govardhan_cse@yahoo.co.in
+<br/>tv_venkat@yahoo.com
+</td></tr><tr><td>50ccc98d9ce06160cdf92aaf470b8f4edbd8b899</td><td>Towards Robust Cascaded Regression for Face Alignment in the Wild
+<br/>J¨urgen Beyerer2,1
+<br/><b>Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT</b><br/><b>Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB</b><br/>3Signal Processing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+</td><td>('1797975', 'Chengchao Qu', 'chengchao qu')<br/>('1697965', 'Hua Gao', 'hua gao')<br/>('2233872', 'Eduardo Monari', 'eduardo monari')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td>firstname.lastname@iosb.fraunhofer.de
+<br/>firstname.lastname@epfl.ch
+</td></tr><tr><td>5028c0decfc8dd623c50b102424b93a8e9f2e390</td><td>Published as a conference paper at ICLR 2017
+<br/>REVISITING CLASSIFIER TWO-SAMPLE TESTS
+<br/>1Facebook AI Research, 2WILLOW project team, Inria / ENS / CNRS
+</td><td>('3016461', 'David Lopez-Paz', 'david lopez-paz')<br/>('2093491', 'Maxime Oquab', 'maxime oquab')</td><td>dlp@fb.com, maxime.oquab@inria.fr
+</td></tr><tr><td>505e55d0be8e48b30067fb132f05a91650666c41</td><td>A Model of Illumination Variation for Robust Face Recognition
+<br/>Institut Eur´ecom
+<br/>Multimedia Communications Department
+<br/>BP 193, 06904 Sophia Antipolis Cedex, France
+</td><td>('1723883', 'Florent Perronnin', 'florent perronnin')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>fflorent.perronnin, jean-luc.dugelayg@eurecom.fr
+</td></tr><tr><td>507c9672e3673ed419075848b4b85899623ea4b0</td><td>Faculty of Informatics
+<br/><b>Institute for Anthropomatics</b><br/>Chair Prof. Dr.-Ing. R. Stiefelhagen
+<br/>Facial Image Processing and Analysis Group
+<br/>Multi-View Facial Expression
+<br/>Classification
+<br/>ADVISORS
+<br/>MARCH 2011
+<br/><b>KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association</b><br/>www.kit.edu
+</td><td>('33357889', 'Nikolas Hesse', 'nikolas hesse')<br/>('38113750', 'Hua Gao', 'hua gao')<br/>('40303076', 'Tobias Gehrig', 'tobias gehrig')</td><td></td></tr><tr><td>50c0de2cccf7084a81debad5fdb34a9139496da0</td><td>ORIGINAL RESEARCH
+<br/>published: 30 November 2016
+<br/>doi: 10.3389/fict.2016.00027
+<br/>The Influence of Annotation, Corpus
+<br/>Design, and Evaluation on the
+<br/>Outcome of Automatic Classification
+<br/>of Human Emotions
+<br/><b>Institute of Neural Information Processing, Ulm University, Ulm, Germany</b><br/>The integration of emotions into human–computer interaction applications promises a
+<br/>more natural dialog between the user and the technical system operators. In order
+<br/>to construct such machinery, continuous measuring of the affective state of the user
+<br/>becomes essential. While basic research that is aimed to capture and classify affective
+<br/>signals has progressed, many issues are still prevailing that hinder easy integration
+<br/>of affective signals into human–computer interaction. In this paper, we identify and
+<br/>investigate pitfalls in three steps of the work-flow of affective classification studies. It starts
+<br/>with the process of collecting affective data for the purpose of training suitable classifiers.
+<br/>Emotional data have to be created in which the target emotions are present. Therefore,
+<br/>human participants have to be stimulated suitably. We discuss the nature of these stimuli,
+<br/>their relevance to human–computer interaction, and the repeatability of the data recording
+<br/>setting. Second, aspects of annotation procedures are investigated, which include the
+<br/>variances of
+<br/>individual raters, annotation delay, the impact of the used annotation
+<br/>tool, and how individual ratings are combined to a unified label. Finally, the evaluation
+<br/>protocol
+<br/>is examined, which includes, among others, the impact of the performance
+<br/>measure on the accuracy of a classification model. We hereby focus especially on the
+<br/>evaluation of classifier outputs against continuously annotated dimensions. Together with
+<br/>the discussed problems and pitfalls and the ways how they affect the outcome, we
+<br/>provide solutions and alternatives to overcome these issues. As the final part of the paper,
+<br/>we sketch a recording scenario and a set of supporting technologies that can contribute
+<br/>to solve many of the issues mentioned above.
+<br/>Keywords: affective computing, affective labeling, human–computer interaction, performance measures, machine
+<br/>guided labeling
+<br/>1. INTRODUCTION
+<br/>The integration of affective signals into human–computer interaction (HCI) is generally considered
+<br/>beneficial to improve the interaction process (Picard, 2000). The analysis of affective data in HCI
+<br/>can be considered both cumbersome and prone to errors. The main reason for this is that the
+<br/>important steps in affective classification are particularly difficult. This includes difficulties that arise
+<br/>in the recording of suitable data collections comprising episodes of affective HCI, in the uncertainty
+<br/>and subjectivity of the annotations of these data, and finally in the evaluation protocol that should
+<br/>account for the continuous nature of the application.
+<br/>Edited by:
+<br/>Anna Esposito,
+<br/>Seconda Università degli Studi di
+<br/>Napoli, Italy
+<br/>Reviewed by:
+<br/>Anna Pribilova,
+<br/><b>Slovak University of Technology in</b><br/>Bratislava, Slovakia
+<br/>Alda Troncone,
+<br/>Seconda Università degli Studi di
+<br/>Napoli, Italy
+<br/>*Correspondence:
+<br/>contributed equally to this work.
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Human-Media Interaction, a section
+<br/>of the journal Frontiers in ICT
+<br/>Received: 15 May 2016
+<br/>Accepted: 26 October 2016
+<br/>Published: 30 November 2016
+<br/>Citation:
+<br/>Kächele M, Schels M and
+<br/>Schwenker F (2016) The Influence of
+<br/>Annotation, Corpus Design, and
+<br/>Evaluation on the Outcome of
+<br/>Automatic Classification of Human
+<br/>Emotions.
+<br/>doi: 10.3389/fict.2016.00027
+<br/>Frontiers in ICT | www.frontiersin.org
+<br/>November 2016 | Volume 3 | Article 27
+</td><td>('2144395', 'Markus Kächele', 'markus kächele')<br/>('3037635', 'Martin Schels', 'martin schels')<br/>('1685857', 'Friedhelm Schwenker', 'friedhelm schwenker')<br/>('2144395', 'Markus Kächele', 'markus kächele')<br/>('2144395', 'Markus Kächele', 'markus kächele')<br/>('3037635', 'Martin Schels', 'martin schels')</td><td>markus.kaechele@uni-ulm.de
+</td></tr><tr><td>680d662c30739521f5c4b76845cb341dce010735</td><td>Int J Comput Vis (2014) 108:82–96
+<br/>DOI 10.1007/s11263-014-0716-6
+<br/>Part and Attribute Discovery from Relative Annotations
+<br/>Received: 25 February 2013 / Accepted: 14 March 2014 / Published online: 26 April 2014
+<br/>© Springer Science+Business Media New York 2014
+</td><td>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td></td></tr><tr><td>68f89c1ee75a018c8eff86e15b1d2383c250529b</td><td>Final Report for Project Localizing Objects and
+<br/>Actions in Videos Using Accompanying Text
+<br/><b>Johns Hopkins University, Center for Speech and Language Processing</b><br/>Summer Workshop 2010
+<br/>J. Neumann, StreamSage/Comcast
+<br/><b>F.Ferraro, University of Rochester</b><br/><b>H. He, Honkong Polytechnic University</b><br/><b>Y. Li, University of Maryland</b><br/><b>C.L. Teo, University of Maryland</b><br/>November 4, 2010
+</td><td>('3167986', 'C. Fermueller', 'c. fermueller')<br/>('1743020', 'J. Kosecka', 'j. kosecka')<br/>('2601166', 'E. Tzoukermann', 'e. tzoukermann')<br/>('2995090', 'R. Chaudhry', 'r. chaudhry')<br/>('1937619', 'I. Perera', 'i. perera')<br/>('9133363', 'B. Sapp', 'b. sapp')<br/>('38873583', 'G. Singh', 'g. singh')<br/>('1870728', 'X. Yi', 'x. yi')</td><td></td></tr><tr><td>68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5</td><td>AGE CLASSIFICATION BASED ON
+<br/>SIMPLE LBP TRANSITIONS
+<br/><b>Aditya institute of Technology and Management, Tekkalli-532 201, A.P</b><br/>2Dr. V.Vijaya Kumar
+<br/>3A. Obulesu
+<br/>2Dean-Computer Sciences (CSE & IT), Anurag Group of Institutions, Hyderabad – 500088, A.P., India.,
+<br/> 3Asst. Professor, Dept. Of CSE, Anurag Group of Institutions, Hyderabad – 500088, A.P., India.
+</td><td>('34964075', 'Satyanarayana Murty', 'satyanarayana murty')</td><td>India, 1gsn_73@yahoo.co.in
+<br/>2drvvk144@gmail.com
+<br/>3obulesh.a@gmail.com
+</td></tr><tr><td>68d2afd8c5c1c3a9bbda3dd209184e368e4376b9</td><td>Representation Learning by Rotating Your Faces
+</td><td>('1849929', 'Luan Tran', 'luan tran')<br/>('2399004', 'Xi Yin', 'xi yin')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>68a3f12382003bc714c51c85fb6d0557dcb15467</td><td></td><td></td><td></td></tr><tr><td>6859b891a079a30ef16f01ba8b85dc45bd22c352</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 4, Issue 10, October 2014)
+<br/>2D Face Recognition Based on PCA & Comparison of
+<br/>Manhattan Distance, Euclidean Distance & Chebychev
+<br/>Distance
+<br/><b>RCC Institute of Information Technology, Kolkata, India</b></td><td>('2467416', 'Rajib Saha', 'rajib saha')<br/>('2144187', 'Sayan Barman', 'sayan barman')</td><td></td></tr><tr><td>68d08ed9470d973a54ef7806318d8894d87ba610</td><td>Drive Video Analysis for the Detection of Traffic Near-Miss Incidents
+</td><td>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('5014206', 'Teppei Suzuki', 'teppei suzuki')<br/>('6881850', 'Shoko Oikawa', 'shoko oikawa')<br/>('1720770', 'Yasuhiro Matsui', 'yasuhiro matsui')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')</td><td></td></tr><tr><td>68caf5d8ef325d7ea669f3fb76eac58e0170fff0</td><td></td><td></td><td></td></tr><tr><td>68003e92a41d12647806d477dd7d20e4dcde1354</td><td>ISSN: 0976-9102 (ONLINE)
+<br/>DOI: 10.21917/ijivp.2013.0101
+<br/> ICTACT JOURNAL ON IMAGE AND VIDEO PROCESSING, NOVEMBER 2013, VOLUME: 04, ISSUE: 02
+<br/>FUZZY BASED IMAGE DIMENSIONALITY REDUCTION USING SHAPE
+<br/>PRIMITIVES FOR EFFICIENT FACE RECOGNITION
+<br/>1Deprtment of Computer Science and Engineering, Nalla Narasimha Reddy Education Society’s Group of Institutions, India
+<br/><b>Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India</b><br/>3Deprtment of Computer Science and Engineering, Anurag Group of Institutions, India
+</td><td>('2086540', 'P. Chandra', 'p. chandra')<br/>('2803943', 'B. Eswara Reddy', 'b. eswara reddy')<br/>('36754879', 'Vijaya Kumar', 'vijaya kumar')</td><td>E-Mail: pchandureddy@yahoo.com
+<br/>E-mail: eswarcsejntu@gmail.com
+<br/>E-mail: vijayvakula@yahoo.com
+</td></tr><tr><td>68d4056765c27fbcac233794857b7f5b8a6a82bf</td><td>Example-Based Face Shape Recovery Using the
+<br/>Zenith Angle of the Surface Normal
+<br/>Mario Castel´an1, Ana J. Almaz´an-Delf´ın2, Marco I. Ram´ırez-Sosa-Mor´an3,
+<br/>and Luz A. Torres-M´endez1
+<br/>1 CINVESTAV Campus Saltillo, Ramos Arizpe 25900, Coahuila, M´exico
+<br/>2 Universidad Veracruzana, Facultad de F´ısica e Inteligencia Artificial, Xalapa 91000,
+<br/>3 ITESM, Campus Saltillo, Saltillo 25270, Coahuila, M´exico
+<br/>Veracruz, M´exico
+</td><td></td><td>mario.castelan@cinvestav.edu.mx
+</td></tr><tr><td>684f5166d8147b59d9e0938d627beff8c9d208dd</td><td>IEEE TRANS. NNLS, JUNE 2017
+<br/>Discriminative Block-Diagonal Representation
+<br/>Learning for Image Recognition
+</td><td>('38448016', 'Zheng Zhang', 'zheng zhang')<br/>('40065614', 'Yong Xu', 'yong xu')<br/>('40799321', 'Ling Shao', 'ling shao')<br/>('49500178', 'Jian Yang', 'jian yang')</td><td></td></tr><tr><td>68c5238994e3f654adea0ccd8bca29f2a24087fc</td><td>PLSA-BASED ZERO-SHOT LEARNING
+<br/>Centre of Image and Signal Processing
+<br/>Faculty of Computer Science & Information Technology
+<br/><b>University of Malaya, 50603 Kuala Lumpur, Malaysia</b></td><td>('2800072', 'Wai Lam Hoo', 'wai lam hoo')<br/>('2863960', 'Chee Seng Chan', 'chee seng chan')</td><td>{wailam88@siswa.um.edu.my; cs.chan@um.edu.my}
+</td></tr><tr><td>68cf263a17862e4dd3547f7ecc863b2dc53320d8</td><td></td><td></td><td></td></tr><tr><td>68e9c837431f2ba59741b55004df60235e50994d</td><td>Detecting Faces Using Region-based Fully
+<br/>Convolutional Networks
+<br/>Tencent AI Lab, China
+</td><td>('1996677', 'Yitong Wang', 'yitong wang')</td><td>{yitongwang,denisji,encorezhou,hawelwang,michaelzfli}@tencent.com
+</td></tr><tr><td>685f8df14776457c1c324b0619c39b3872df617b</td><td>Master of Science Thesis in Electrical Engineering
+<br/><b>Link ping University</b><br/>Face Recognition with
+<br/>Preprocessing and Neural
+<br/>Networks
+</td><td></td><td></td></tr><tr><td>68484ae8a042904a95a8d284a7f85a4e28e37513</td><td>Spoofing Deep Face Recognition with Custom Silicone Masks
+<br/>S´ebastien Marcel
+<br/><b>Idiap Research Institute. Centre du Parc, Rue Marconi 19, Martigny (VS), Switzerland</b></td><td>('1952348', 'Sushil Bhattacharjee', 'sushil bhattacharjee')</td><td>{sushil.bhattacharjee; amir.mohammadi; sebastien.marcel}@idiap.ch
+</td></tr><tr><td>687e17db5043661f8921fb86f215e9ca2264d4d2</td><td>A Robust Elastic and Partial Matching Metric for Face Recognition
+<br/>Microsoft Corporate
+<br/>One Microsoft Way, Redmond, WA 98052
+</td><td>('1745420', 'Gang Hua', 'gang hua')<br/>('33474090', 'Amir Akbarzadeh', 'amir akbarzadeh')</td><td>{ganghua, amir}@microsoft.com
+</td></tr><tr><td>688754568623f62032820546ae3b9ca458ed0870</td><td>bioRxiv preprint first posted online Sep. 27, 2016;
+<br/>doi:
+<br/>http://dx.doi.org/10.1101/077784
+<br/>.
+<br/>The copyright holder for this preprint (which was not
+<br/>peer-reviewed) is the author/funder. It is made available under a
+<br/>CC-BY-NC-ND 4.0 International license
+<br/>.
+<br/>Resting high frequency heart rate variability is not associated with the
+<br/>recognition of emotional facial expressions in healthy human adults.
+<br/>1 Univ. Grenoble Alpes, LPNC, F-38040, Grenoble, France
+<br/>2 CNRS, LPNC UMR 5105, F-38040, Grenoble, France
+<br/>3 IPSY, Université Catholique de Louvain, Louvain-la-Neuve, Belgium
+<br/>4 Fund for Scientific Research (FRS-FNRS), Brussels, Belgium
+<br/>Correspondence concerning this article should be addressed to Brice Beffara, Office E250, Institut
+<br/>de Recherches en Sciences Psychologiques, IPSY - Place du Cardinal Mercier, 10 bte L3.05.01 B-1348
+<br/>Author note
+<br/>This study explores whether the myelinated vagal connection between the heart and the brain
+<br/>is involved in emotion recognition. The Polyvagal theory postulates that the activity of the
+<br/>myelinated vagus nerve underlies socio-emotional skills. It has been proposed that the perception
+<br/>of emotions could be one of this skills dependent on heart-brain interactions. However, this
+<br/>assumption was differently supported by diverging results suggesting that it could be related to
+<br/>confounded factors. In the current study, we recorded the resting state vagal activity (reflected by
+<br/>High Frequency Heart Rate Variability, HF-HRV) of 77 (68 suitable for analysis) healthy human
+<br/>adults and measured their ability to identify dynamic emotional facial expressions. Results show
+<br/>that HF-HRV is not related to the recognition of emotional facial expressions in healthy human
+<br/>adults. We discuss this result in the frameworks of the polyvagal theory and the neurovisceral
+<br/>integration model.
+<br/>Keywords: HF-HRV; autonomic flexibility; emotion identification; dynamic EFEs; Polyvagal
+<br/>theory; Neurovisceral integration model
+<br/>Word count: 9810
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>Introduction
+<br/>The behavior of an animal is said social when involved in in-
+<br/>teractions with other animals (Ward & Webster, 2016). These
+<br/>interactions imply an exchange of information, signals, be-
+<br/>tween at least two animals. In humans, the face is an efficient
+<br/>communication channel, rapidly providing a high quantity of
+<br/>information. Facial expressions thus play an important role
+<br/>in the transmission of emotional information during social
+<br/>interactions. The result of the communication is the combina-
+<br/>tion of transmission from the sender and decoding from the
+<br/>receiver (Jack & Schyns, 2015). As a consequence, the quality
+<br/>of the interaction depends on the ability to both produce and
+<br/>identify facial expressions. Emotions are therefore a core
+<br/>feature of social bonding (Spoor & Kelly, 2004). Health
+<br/>of individuals and groups depend on the quality of social
+<br/>bonds in many animals (Boyer, Firat, & Leeuwen, 2015; S. L.
+<br/>Brown & Brown, 2015; Neuberg, Kenrick, & Schaller, 2011),
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>especially in highly social species such as humans (Singer &
+<br/>Klimecki, 2014).
+<br/>The recognition of emotional signals produced by others is
+<br/>not independent from its production by oneself (Niedenthal,
+<br/>2007). The muscles of the face involved in the production of
+<br/>a facial expressions are also activated during the perception of
+<br/>the same facial expressions (Dimberg, Thunberg, & Elmehed,
+<br/>2000). In other terms, the facial mimicry of the perceived
+<br/>emotional facial expression (EFE) triggers its sensorimotor
+<br/>simulation in the brain, which improves the recognition abili-
+<br/>ties (Wood, Rychlowska, Korb, & Niedenthal, 2016). Beyond
+<br/>that, the emotion can be seen as the body -including brain-
+<br/>dynamic itself (Gallese & Caruana, 2016) which helps to un-
+<br/>derstand why behavioral simulation is necessary to understand
+<br/>the emotion.
+<br/>The interplay between emotion production, emotion percep-
+<br/>tion, social communication and body dynamics has been sum-
+<br/>marized in the framework of the polyvagal theory (Porges,
+</td><td>('37799937', 'Nicolas Vermeulen', 'nicolas vermeulen')<br/>('2634712', 'Martial Mermillod', 'martial mermillod')</td><td>Louvain-la-Neuve, Belgium. E-mail: brice.beffara@univ-grenoble-alpes.fr
+</td></tr><tr><td>68f9cb5ee129e2b9477faf01181cd7e3099d1824</td><td>ALDA Algorithms for Online Feature Extraction
+</td><td>('2784763', 'Youness Aliyari Ghassabeh', 'youness aliyari ghassabeh')<br/>('2060085', 'Hamid Abrishami Moghaddam', 'hamid abrishami moghaddam')</td><td></td></tr><tr><td>68bf34e383092eb827dd6a61e9b362fcba36a83a</td><td></td><td></td><td></td></tr><tr><td>68d40176e878ebffbc01ffb0556e8cb2756dd9e9</td><td>International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+<br/>International Conference on Humming Bird ( 01st March 2014)
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Locality Repulsion Projection and Minutia Extraction Based
+<br/>Similarity Measure for Face Recognition
+<br/><b>AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of</b><br/>2Ramya P. is currently working as an Asst. Professor in the dept. of Information Technology at Vins Christian
+<br/><b>college of Engineering</b></td><td></td><td>Engineering. e-mail:anushyase@gmail.com.
+</td></tr><tr><td>68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090</td><td>AgeNet: Deeply Learned Regressor and Classifier for
+<br/>Robust Apparent Age Estimation
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/>2Tencent BestImage Team, Shanghai, 100080, China
+</td><td>('1731144', 'Xin Liu', 'xin liu')<br/>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1698586', 'Jie Zhang', 'jie zhang')<br/>('3126238', 'Shuzhe Wu', 'shuzhe wu')<br/>('13323391', 'Wenxian Liu', 'wenxian liu')<br/>('34393045', 'Hu Han', 'hu han')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{xin.liu, meina.kan, jie.zhang, shuzhe.wu, wenxian.liu, hu.han}@vipl.ict.ac.cn
+<br/>{darwinli}@tencent.com, {sgshan, xlchen}@ict.ac.cn
+</td></tr><tr><td>6889d649c6bbd9c0042fadec6c813f8e894ac6cc</td><td>Analysis of Robust Soft Learning Vector
+<br/>Quantization and an application to Facial
+<br/>Expression Recognition
+</td><td></td><td></td></tr><tr><td>68f69e6c6c66cfde3d02237a6918c9d1ee678e1b</td><td>Enhancing Concept Detection by Pruning Data with MCA-based Transaction
+<br/>Weights
+<br/>Department of Electrical and
+<br/>Computer Engineering
+<br/><b>University of Miami</b><br/>Coral Gables, FL 33124, USA
+<br/>School of Computing and
+<br/>Information Sciences
+<br/><b>Florida International University</b><br/>Miami, FL 33199, USA
+</td><td>('1685202', 'Lin Lin', 'lin lin')<br/>('1693826', 'Mei-Ling Shyu', 'mei-ling shyu')<br/>('1705664', 'Shu-Ching Chen', 'shu-ching chen')</td><td>Email: l.lin2@umiami.edu, shyu@miami.edu
+<br/>Email: chens@cs.fiu.edu
+</td></tr><tr><td>682760f2f767fb47e1e2ca35db3becbb6153756f</td><td>The Effect of Pets on Happiness: A Large-scale Multi-Factor
+<br/>Analysis using Social Multimedia
+<br/>From reducing stress and loneliness, to boosting productivity and overall well-being, pets are believed to play
+<br/>a significant role in people’s daily lives. Many traditional studies have identified that frequent interactions
+<br/>with pets could make individuals become healthier and more optimistic, and ultimately enjoy a happier life.
+<br/>However, most of those studies are not only restricted in scale, but also may carry biases by using subjective
+<br/>self-reports, interviews, and questionnaires as the major approaches. In this paper, we leverage large-scale
+<br/>data collected from social media and the state-of-the-art deep learning technologies to study this phenomenon
+<br/>in depth and breadth. Our study includes four major steps: 1) collecting timeline posts from around 20,000
+<br/>Instagram users; 2) using face detection and recognition on 2-million photos to infer users’ demographics,
+<br/>relationship status, and whether having children, 3) analyzing a user’s degree of happiness based on images
+<br/>and captions via smiling classification and textual sentiment analysis; 3) applying transfer learning techniques
+<br/>to retrain the final layer of the Inception v3 model for pet classification; and 4) analyzing the effects of pets
+<br/>on happiness in terms of multiple factors of user demographics. Our main results have demonstrated the
+<br/>efficacy of our proposed method with many new insights. We believe this method is also applicable to other
+<br/>domains as a scalable, efficient, and effective methodology for modeling and analyzing social behaviors and
+<br/>psychological well-being. In addition, to facilitate the research involving human faces, we also release our
+<br/>dataset of 700K analyzed faces.
+<br/>CCS Concepts: • Human-centered computing → Social media;
+<br/>Additional Key Words and Phrases: Happiness analysis, happiness, user demographics, pet and happiness,
+<br/>social multimedia, social media.
+<br/>ACM Reference format:
+<br/>Analysis using Social Multimedia. ACM Trans. Intell. Syst. Technol. 9, 4, Article 39 (June 2017), 15 pages.
+<br/>https://doi.org/0000001.0000001
+<br/>1 INTRODUCTION
+<br/>Happiness has always been a subjective and multidimensional matter; its definition varies individu-
+<br/>ally, and the factors impacting our feeling of happiness are diverse. A study in [21] has constructed
+<br/><b>We thank the support of New York State through the Goergen Institute for Data Science, our corporate research sponsors</b><br/>Xerox and VisualDX, and NSF Award #1704309.
+<br/><b>Author s addresses: X. Peng, University of Rochester; L. Chi</b><br/><b>University of Rochester and J. Luo, University of Rochester</b><br/>Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee
+<br/>provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the
+<br/>full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored.
+</td><td>('1901094', 'Xuefeng Peng', 'xuefeng peng')<br/>('35678395', 'Li-Kai Chi', 'li-kai chi')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1901094', 'Xuefeng Peng', 'xuefeng peng')<br/>('35678395', 'Li-Kai Chi', 'li-kai chi')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')</td><td></td></tr><tr><td>683ec608442617d11200cfbcd816e86ce9ec0899</td><td>Dual Linear Regression Based Classification for Face Cluster Recognition
+<br/><b>University of Northern British Columbia</b><br/>Prince George, BC, Canada V2N 4Z9
+</td><td>('1692551', 'Liang Chen', 'liang chen')</td><td>chen.liang.97@gmail.com
+</td></tr><tr><td>68c17aa1ecbff0787709be74d1d98d9efd78f410</td><td>International Journal of Optomechatronics, 6: 92–119, 2012
+<br/>Copyright # Taylor & Francis Group, LLC
+<br/>ISSN: 1559-9612 print=1559-9620 online
+<br/>DOI: 10.1080/15599612.2012.663463
+<br/>GENDER CLASSIFICATION FROM FACE IMAGES
+<br/>USING MUTUAL INFORMATION AND FEATURE
+<br/>FUSION
+<br/>Department of Electrical Engineering and Advanced Mining Technology
+<br/>Center, Universidad de Chile, Santiago, Chile
+<br/>In this article we report a new method for gender classification from frontal face images
+<br/>using feature selection based on mutual information and fusion of features extracted from
+<br/>intensity, shape, texture, and from three different spatial scales. We compare the results of
+<br/>three different mutual information measures: minimum redundancy and maximal relevance
+<br/>(mRMR), normalized mutual information feature selection (NMIFS), and conditional
+<br/>mutual information feature selection (CMIFS). We also show that by fusing features
+<br/>extracted from six different methods we significantly improve the gender classification
+<br/>results relative to those previously published, yielding 99.13% of the gender classification
+<br/>rate on the FERET database.
+<br/>Keywords: Feature fusion, feature selection, gender classification, mutual information, real-time gender
+<br/>classification
+<br/>1. INTRODUCTION
+<br/>During the 90’s, one of the main issues addressed in the area of computer
+<br/>vision was face detection. Many methods and applications were developed including
+<br/>the face detection used in many digital cameras nowadays. Gender classification is
+<br/>important in many possible applications including electronic marketing. Displays
+<br/>at retail stores could show products and offers according to the person gender as
+<br/>the person passes in front of a camera at the store. This is not a simple task since
+<br/>faces are not rigid and depend on illumination, pose, gestures, facial expressions,
+<br/>occlusions (glasses), and other facial features (makeup, beard). The high variability
+<br/>in the appearance of the face directly affects their detection and classification. Auto-
+<br/>matic classification of gender from face images has a wide range of possible applica-
+<br/>tions, ranging from human-computer interaction to applications in real-time
+<br/>electronic marketing in retail stores (Shan 2012; Bekios-Calfa et al. 2011; Chu
+<br/>et al. 2010; Perez et al. 2010a).
+<br/>Automatic gender classification has a wide range of possible applications for
+<br/>improving human-machine interaction and face identification methods (Irick et al.
+<br/>ing.uchile.cl
+<br/>92
+</td><td>('32271973', 'Claudio Perez', 'claudio perez')<br/>('40333310', 'Juan Tapia', 'juan tapia')<br/>('32723983', 'Claudio Held', 'claudio held')<br/>('32271973', 'Claudio Perez', 'claudio perez')<br/>('32271973', 'Claudio Perez', 'claudio perez')</td><td>Engineering, Universidad de Chile Casilla 412-3, Av. Tupper 2007, Santiago, Chile. E-mail: clperez@
+</td></tr><tr><td>68f61154a0080c4aae9322110c8827978f01ac2e</td><td>Research Article
+<br/>Journal of the Optical Society of America A
+<br/>Recognizing blurred, non-frontal, illumination and
+<br/>expression variant partially occluded faces
+<br/><b>Indian Institute of Technology Madras, Chennai 600036, India</b><br/>Compiled June 26, 2016
+<br/>The focus of this paper is on the problem of recognizing faces across space-varying motion blur, changes
+<br/>in pose, illumination, and expression, as well as partial occlusion, when only a single image per subject
+<br/>is available in the gallery. We show how the blur incurred due to relative motion between the camera and
+<br/>the subject during exposure can be estimated from the alpha matte of pixels that straddle the boundary
+<br/>between the face and the background. We also devise a strategy to automatically generate the trimap re-
+<br/>quired for matte estimation. Having computed the motion via the matte of the probe, we account for pose
+<br/>variations by synthesizing from the intensity image of the frontal gallery, a face image that matches the
+<br/>pose of the probe. To handle illumination and expression variations, and partial occlusion, we model the
+<br/>probe as a linear combination of nine blurred illumination basis images in the synthesized non-frontal
+<br/>pose, plus a sparse occlusion. We also advocate a recognition metric that capitalizes on the sparsity of the
+<br/>occluded pixels. The performance of our method is extensively validated on synthetic as well as real face
+<br/>data. © 2016 Optical Society of America
+<br/>OCIS codes:
+<br/>(150.0150) Machine vision.
+<br/>http://dx.doi.org/10.1364/ao.XX.XXXXXX
+<br/>(100.0100) Image processing; (100.5010) Pattern recognition; (100.3008) Image recognition, algorithms and filters;
+<br/>1. INTRODUCTION
+<br/>State-of-the-art face recognition (FR) systems can outperform
+<br/>even humans when presented with images captured under con-
+<br/>trolled environments. However, their performance drops quite
+<br/>rapidly in unconstrained settings due to image degradations
+<br/>arising from blur, variations in pose, illumination, and expres-
+<br/>sion, partial occlusion etc. Motion blur is commonplace today
+<br/>owing to the exponential rise in the use and popularity of light-
+<br/>weight and cheap hand-held imaging devices, and the ubiquity
+<br/>of mobile phones equipped with cameras. Photographs cap-
+<br/>tured using a hand-held device usually contain blur when the
+<br/>illumination is poor because larger exposure times are needed
+<br/>to compensate for the lack of light, and this increases the possi-
+<br/>bility of camera shake. On the other hand, reducing the shutter
+<br/>speed results in noisy images while tripods inevitably restrict
+<br/>mobility. Even for a well-lit scene, the face might be blurred if
+<br/>the subject is in motion. The problem is further compounded
+<br/>in the case of poorly-lit dynamic scenes since the blur observed
+<br/>on the face is due to the combined effects of the blur induced
+<br/>by the motion of the camera and the independent motion of
+<br/>the subject. In addition to blur and illumination, practical face
+<br/>recognition algorithms must also possess the ability to recognize
+<br/>faces across reasonable variations in pose. Partial occlusion and
+<br/>facial expression changes, common in real-world applications,
+<br/>escalate the challenges further. Yet another factor that governs
+<br/>the performance of face recognition algorithms is the number
+<br/>of images per subject available for training. In many practical
+<br/>application scenarios such as law enforcement, driver license or
+<br/>passport identification, where there is usually only one training
+<br/>sample per subject in the database, techniques that rely on the
+<br/>size and representation of the training set suffer a serious perfor-
+<br/>mance drop or even fail to work. Face recognition algorithms
+<br/>can broadly be classified into either discriminative or genera-
+<br/>tive approaches. While the availability of large labeled datasets
+<br/>and greater computing power has boosted the performance of
+<br/>discriminative methods [1, 2] recently, generative approaches
+<br/>continue to remain very popular [3, 4], and there is concurrent
+<br/>research in both directions. The model we present in this paper
+<br/>falls into the latter category. In fact, generative models are even
+<br/>useful for producing training samples for learning algorithms.
+<br/>Literature on face recognition from blurred images can be
+<br/>broadly classified into four categories. It is important to note
+<br/>that all of them (except our own earlier work in [4]) are restricted
+<br/>to the convolution model for uniform blur. In the first approach
+<br/>[5, 6], the blurred probe image is first deblurred using standard
+<br/>deconvolution algorithms before performing recognition. How-
+</td><td></td><td>*Corresponding author: jithuthatswho@gmail.com
+</td></tr><tr><td>6821113166b030d2123c3cd793dd63d2c909a110</td><td>STUDIA INFORMATICA
+<br/>Volume 36
+<br/>2015
+<br/>Number 1 (119)
+<br/><b>Gdansk University of Technology, Faculty of Electronics, Telecommunication</b><br/>and Informatics
+<br/>ACQUISITION AND INDEXING OF RGB-D RECORDINGS FOR
+<br/>FACIAL EXPRESSIONS AND EMOTION RECOGNITION1
+<br/>Summary. In this paper KinectRecorder comprehensive tool is described which
+<br/>provides for convenient and fast acquisition, indexing and storing of RGB-D video
+<br/>streams from Microsoft Kinect sensor. The application is especially useful as a sup-
+<br/>porting tool for creation of fully indexed databases of facial expressions and emotions
+<br/>that can be further used for learning and testing of emotion recognition algorithms for
+<br/>affect-aware applications. KinectRecorder was successfully exploited for creation of
+<br/>Facial Expression and Emotion Database (FEEDB) significantly reducing the time of
+<br/>the whole project consisting of data acquisition, indexing and validation. FEEDB has
+<br/>already been used as a learning and testing dataset for a few emotion recognition al-
+<br/>gorithms which proved utility of the database, and the KinectRecorder tool.
+<br/>Keywords: RGB-D data acquisition and indexing, facial expression recognition,
+<br/>emotion recognition
+<br/>AKWIZYCJA ORAZ INDEKSACJA NAGRAŃ RGB-D DO
+<br/>Streszczenie. W pracy przedstawiono kompleksowe narzędzie, które pozwala na
+<br/>wygodną i szybką akwizycję, indeksowanie i przechowywanie nagrań strumieni
+<br/>RGB-D z czujnika Microsoft Kinect. Aplikacja jest szczególnie przydatna jako na-
+<br/>mogą być następnie wykorzystywane do nauki i testowania algorytmów rozpoznawa-
+<br/>nia emocji użytkownika dla aplikacji je uwzględniających. KinectRecorder został
+<br/>skracając czas całego procesu, obejmującego akwizycję, indeksowanie i walidację
+<br/>nagrań. Baza FEEDB została już z powodzeniem wykorzystana jako uczący i testują-
+<br/>
+<br/>1 The research leading to these results has received funding from the Polish-Norwegian Research Programme
+<br/>operated by the National Centre for Research and Development under the Norwegian Financial Mechanism
+<br/>2009-2014 in the frame of Project Contract No Pol-Nor/210629/51/2013.
+</td><td>('3271448', 'Mariusz SZWOCH', 'mariusz szwoch')</td><td></td></tr><tr><td>68a04a3ae2086986877fee2c82ae68e3631d0356</td><td>THERMAL & REFLECTANCE BASED IDENTIFICATION IN CHALLENGING VARIABLE ILLUMINATIONS
+<br/>Thermal and Reflectance Based Personal
+<br/>Identification Methodology in Challenging
+<br/>Variable Illuminations
+<br/>†Department of Engineering
+<br/><b>University of Cambridge</b><br/>‡Delphi Corporation,
+<br/>Delphi Electronics and Safety
+<br/>Cambridge, CB2 1PZ, UK
+<br/>Kokomo, IN 46901-9005, USA
+<br/>February 15, 2007
+<br/>DRAFT
+</td><td>('2214319', 'Riad Hammoud', 'riad hammoud')</td><td>{oa214,cipolla}@eng.cam.ac.uk
+<br/>riad.hammoud@delphi.com
+</td></tr><tr><td>6888f3402039a36028d0a7e2c3df6db94f5cb9bb</td><td>Under review as a conference paper at ICLR 2018
+<br/>CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+<br/>OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td><td></td><td></td></tr><tr><td>57f5711ca7ee5c7110b7d6d12c611d27af37875f</td><td>Illumination Invariance for Face Verification
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Centre for Vision, Speech and Signal Processing
+<br/>School of Electronics and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>August 2006
+</td><td>('28467739', 'J. Short', 'j. short')<br/>('28467739', 'J. Short', 'j. short')</td><td></td></tr><tr><td>570308801ff9614191cfbfd7da88d41fb441b423</td><td>Unsupervised Synchrony Discovery in Human Interaction
+<br/><b>Robotics Institute, Carnegie Mellon University 3University of Pittsburgh, USA</b><br/><b>Beihang University, Beijing, China</b><br/><b>University of Miami, USA</b></td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('1874236', 'Daniel S. Messinger', 'daniel s. messinger')</td><td></td></tr><tr><td>57bf9888f0dfcc41c5ed5d4b1c2787afab72145a</td><td>Robust Facial Expression Recognition Based on
+<br/>Local Directional Pattern
+<br/>Automatic facial expression recognition has many
+<br/>potential applications
+<br/>in different areas of human
+<br/>computer interaction. However, they are not yet fully
+<br/>realized due to the lack of an effective facial feature
+<br/>descriptor. In this paper, we present a new appearance-
+<br/>based feature descriptor, the local directional pattern
+<br/>(LDP), to represent facial geometry and analyze its
+<br/>performance in expression recognition. An LDP feature is
+<br/>obtained by computing the edge response values in 8
+<br/>directions at each pixel and encoding them into an 8 bit
+<br/>binary number using the relative strength of these edge
+<br/>responses. The LDP descriptor, a distribution of LDP
+<br/>codes within an image or image patch, is used to describe
+<br/>each expression image. The effectiveness of dimensionality
+<br/>reduction techniques, such as principal component
+<br/>analysis and AdaBoost, is also analyzed in terms of
+<br/>computational cost saving and classification accuracy. Two
+<br/>well-known machine
+<br/>template
+<br/>matching and support vector machine, are used for
+<br/>classification using the Cohn-Kanade and Japanese
+<br/>female facial expression databases. Better classification
+<br/>accuracy shows the superiority of LDP descriptor against
+<br/>other appearance-based feature descriptors.
+<br/>learning methods,
+<br/>Keywords: Image representation, facial expression
+<br/>recognition, local directional pattern, features extraction,
+<br/>principal component analysis, support vector machine.
+<br/>
+<br/>Manuscript received Mar. 15, 2010; revised July 15, 2010; accepted Aug. 2, 2010.
+<br/>This work was supported by the Korea Research Foundation Grant funded by the Korean
+<br/>Government (KRF-2010-0015908).
+<br/><b>Kyung Hee University, Yongin, Rep. of Korea</b><br/>doi:10.4218/etrij.10.1510.0132
+<br/>I. Introduction
+<br/>Facial expression provides the most natural and immediate
+<br/>indication about a person’s emotions and intentions [1], [2].
+<br/>Therefore, automatic facial expression analysis is an important
+<br/>and challenging task that has had great impact in such areas as
+<br/>human-computer
+<br/>interaction and data-driven animation.
+<br/>Furthermore, video cameras have recently become an integral
+<br/>part of many consumer devices [3] and can be used for
+<br/>capturing facial images for recognition of people and their
+<br/>emotions. This ability to recognize emotions can enable
+<br/>customized applications [4], [5]. Even though much work has
+<br/>already been done on automatic facial expression recognition
+<br/>[6], [7], higher accuracy with reasonable speed still remains a
+<br/>great challenge [8]. Consequently, a fast but robust facial
+<br/>expression recognition system is very much needed to support
+<br/>these applications.
+<br/>The most critical aspect for any successful facial expression
+<br/>recognition system is to find an efficient facial feature
+<br/>representation [9]. An extracted facial feature can be considered
+<br/>an efficient representation if it can fulfill three criteria: first, it
+<br/>minimizes within-class variations of expressions while
+<br/>maximizes between-class variations; second, it can be easily
+<br/>extracted from the raw face image; and third, it can be
+<br/>described in a low-dimensional feature space to ensure
+<br/>computational speed during the classification step [10], [11].
+<br/>The goal of the facial feature extraction is thus to find an
+<br/>efficient and effective representation of the facial images which
+<br/>would provide robustness during recognition process. Two
+<br/>types of approaches have been proposed to extract facial
+<br/>features for expression recognition: a geometric feature-based
+<br/>system and an appearance-based system [12].
+<br/>In the geometric feature extraction system, the shape and
+<br/>© 2010
+<br/> ETRI Journal, Volume 32, Number 5, October 2010
+</td><td>('3182680', 'Taskeed Jabid', 'taskeed jabid')<br/>('9408912', 'Hasanul Kabir', 'hasanul kabir')<br/>('1685505', 'Oksam Chae', 'oksam chae')<br/>('3182680', 'Taskeed Jabid', 'taskeed jabid')</td><td>Taskeed Jabid (phone: +82 31 201 2948, email: taskeed@khu.ac.kr), Md. Hasanul Kabir
+<br/>(email: hasanul@khu.ac.kr), and Oksam Chae (email: oschae@khu.ac.kr) are with the
+</td></tr><tr><td>57ebeff9273dea933e2a75c306849baf43081a8c</td><td>Deep Convolutional Network Cascade for Facial Point Detection
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sy011@ie.cuhk.edu.hk
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>574751dbb53777101502419127ba8209562c4758</td><td></td><td></td><td></td></tr><tr><td>5778d49c8d8d127351eee35047b8d0dc90defe85</td><td>Probabilistic Subpixel Temporal Registration
+<br/>for Facial Expression Analysis
+<br/><b>Queen Mary University of London</b><br/>Centre for Intelligent Sensing
+</td><td>('1781916', 'Hatice Gunes', 'hatice gunes')<br/>('1713138', 'Andrea Cavallaro', 'andrea cavallaro')</td><td>fe.sariyanidi, h.gunes, a.cavallarog@qmul.ac.uk
+</td></tr><tr><td>57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1</td><td>Modeling the joint density of two images under a variety of transformations
+<br/>Joshua Susskind
+<br/><b>Institute for Neural Computation</b><br/><b>University of California, San Diego</b><br/>United States
+<br/>Department of Computer Science
+<br/><b>University of Frankfurt</b><br/>Germany
+<br/>Department of Computer Science
+<br/>Department of Computer Science
+<br/>ETH Zurich
+<br/>Switzerland
+<br/>Geoffrey Hinton
+<br/><b>University of Toronto</b><br/>Canada
+</td><td>('1710604', 'Roland Memisevic', 'roland memisevic')<br/>('1742208', 'Marc Pollefeys', 'marc pollefeys')</td><td>josh@mplab.ucsd.edu
+<br/>ro@cs.uni-frankfurt.de
+<br/>hinton@cs.toronto.edu
+<br/>marc.pollefeys@inf.ethz.ch
+</td></tr><tr><td>57fd229097e4822292d19329a17ceb013b2cb648</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>Fast Structural Binary Coding
+<br/><b>University of California, San Diego</b><br/><b>University of California, San Diego</b></td><td>('2451800', 'Dongjin Song', 'dongjin song')<br/>('1722649', 'Wei Liu', 'wei liu')<br/>('3520515', 'David A. Meyer', 'david a. meyer')</td><td>La Jolla, USA, 92093-0409. Email: dosong@ucsd.edu
+<br/>] Didi Research, Didi Kuaidi, Beijing, China. Email: wliu@ee.columbia.edu
+<br/>La Jolla, USA, 92093-0112. Email: dmeyer@math.ucsd.edu
+</td></tr><tr><td>57c59011614c43f51a509e10717e47505c776389</td><td>Unsupervised Human Action Detection by Action Matching
+<br/><b>The Australian National University Queensland University of Technology</b></td><td>('1688071', 'Basura Fernando', 'basura fernando')</td><td>firstname.lastname@anu.edu.au
+<br/>s.shirazi@qut.edu.au
+</td></tr><tr><td>57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5</td><td></td><td></td><td></td></tr><tr><td>57101b29680208cfedf041d13198299e2d396314</td><td></td><td></td><td></td></tr><tr><td>57893403f543db75d1f4e7355283bdca11f3ab1b</td><td></td><td></td><td></td></tr><tr><td>571f493c0ade12bbe960cfefc04b0e4607d8d4b2</td><td>International Journal of Research Studies in Science, Engineering and Technology
+<br/>Volume 3, Issue 2, February 2016, PP 18-41
+<br/>ISSN 2349-4751 (Print) & ISSN 2349-476X (Online)
+<br/>Review on Content Based Image Retrieval: From Its Origin to the
+<br/>New Age
+<br/>Assistant Professor, ECE
+<br/>Dr. B. L. Malleswari
+<br/>Principal
+<br/><b>Mahatma Gandhi Institute of Technology</b><br/><b>Sridevi Women's Engineering College</b><br/>Hyderabad, India
+<br/>Hyderabad, India
+</td><td></td><td>pasumarthinalini@gmil.com
+<br/>blmalleswari@gmail.com
+</td></tr><tr><td>57f8e1f461ab25614f5fe51a83601710142f8e88</td><td>Region Selection for Robust Face Verification using UMACE Filters
+<br/>Department of Electrical, Electronic and Systems Engineering, Faculty of Engineering,
+<br/>Universiti Kebangsaan Malaysia, 43600 Bangi, Selangor, Malaysia.
+<br/>In this paper, we investigate the verification performances of four subdivided face images with varying expressions. The
+<br/>objective of this study is to evaluate which part of the face image is more tolerant to facial expression and still retains its personal
+<br/>characteristics due to the variations of the image. The Unconstrained Minimum Average Correlation Energy (UMACE) filter is
+<br/>implemented to perform the verification process because of its advantages such as shift–invariance, ability to trade-off between
+<br/>discrimination and distortion tolerance, e.g. variations in pose, illumination and facial expression. The database obtained from the
+<br/>facial expression database of Advanced Multimedia Processing (AMP) Lab at CMU is used in this study. Four equal
+<br/>sizes of face regions i.e. bottom, top, left and right halves are used for the purpose of this study. The results show that the bottom
+<br/>half of the face region gives the best performance in terms of the PSR values with zero false accepted rate (FAR) and zero false
+<br/>rejection rate (FRR) compared to the other three regions.
+<br/>1. Introduction
+<br/>Face recognition is a well established field of research,
+<br/>and a large number of algorithms have been proposed in the
+<br/>literature. Various classifiers have been explored to improve
+<br/>the accuracy of face classification. The basic approach is to
+<br/>use distance-base methods which measure Euclidean distance
+<br/>between any two vectors and then compare it with the preset
+<br/>threshold. Neural Networks are often used as classifiers due
+<br/>to their powerful generation ability [1]. Support Vector
+<br/>Machines (SVM) have been applied with encouraging results
+<br/>[2].
+<br/>In biometric applications, one of the important tasks is the
+<br/>matching process between an individual biometrics against
+<br/>the database that has been prepared during the enrolment
+<br/>stage. For biometrics systems such as face authentication that
+<br/>use images as personal characteristics, biometrics sensor
+<br/>output and image pre-processing play an important role since
+<br/>the quality of a biometric input can change significantly due
+<br/>to illumination, noise and pose variations. Over the years,
+<br/>researchers have studied the role of illumination variation,
+<br/>pose variation, facial expression, and occlusions in affecting
+<br/>the performance of face verification systems [3].
+<br/>The Minimum Average Correlation Energy (MACE)
+<br/>filters have been reported to be an alternative solution to these
+<br/>problems because of the advantages such as shift-invariance,
+<br/>close-form expressions and distortion-tolerance. MACE
+<br/>filters have been successfully applied in the field of automatic
+<br/>target recognition as well as in biometric verification [3][4].
+<br/>Face and fingerprint verification using correlation filters have
+<br/>been investigated in [5] and [6], respectively. Savvides et.al
+<br/>performed face authentication and identification using
+<br/>correlation filters based on illumination variation [7]. In the
+<br/>process of implementing correlation filters, the number of
+<br/>training images used depends on the level of distortions
+<br/>applied to the images [5], [6].
+<br/>In this study, we investigate which part of a face image is
+<br/>more tolerant to facial expression and retains its personal
+<br/>characteristics for the verification process. Four subdivided
+<br/>face images, i.e. bottom, top, left and right halves, with
+<br/>varying expressions are investigated. By identifying only the
+<br/>region of the face that gives the highest verification
+<br/>performance, that region can be used instead of the full-face
+<br/>to reduce storage requirements.
+<br/>2. Unconstrained Minimum Average Correlation
+<br/>Energy (UMACE) Filter
+<br/>Correlation filter theory and the descriptions of the design
+<br/>of the correlation filter can be found in a tutorial survey paper
+<br/>[8]. According to [4][6], correlation filter evolves from
+<br/>matched filters which are optimal for detecting a known
+<br/>reference image in the presence of additive white Gaussian
+<br/>noise. However, the detection rate of matched filters
+<br/>decreases significantly due to even the small changes of scale,
+<br/>rotation and pose of the reference image.
+<br/>the pre-specified peak values
+<br/>In an effort to solve this problem, the Synthetic
+<br/>Discriminant Function (SDF) filter and the Equal Correlation
+<br/>Peak SDF (ECP SDF) filter ware introduced which allowed
+<br/>several training images to be represented by a single
+<br/>correlation filter. SDF filter produces pre-specified values
+<br/>called peak constraints. These peak values correspond to the
+<br/>authentic class or impostor class when an image is tested.
+<br/>However,
+<br/>to
+<br/>misclassifications when the sidelobes are larger than the
+<br/>controlled values at the origin.
+<br/>Savvides et.al developed
+<br/>the Minimum Average
+<br/>Correlation Energy (MACE) filters [5]. This filter reduces the
+<br/>large sidelobes and produces a sharp peak when the test
+<br/>image is from the same class as the images that have been
+<br/>used to design the filter. There are two kinds of variants that
+<br/>can be used in order to obtain a sharp peak when the test
+<br/>image belongs to the authentic class. The first MACE filter
+<br/>variant minimizes the average correlation energy of the
+<br/>training images while constraining the correlation output at
+<br/>the origin to a specific value for each of the training images.
+<br/>The second MACE filter variant is the Unconstrained
+<br/>Minimum Average Correlation Energy (UMACE) filter
+<br/>which also minimizes the average correlation output while
+<br/>maximizing the correlation output at the origin [4].
+<br/>lead
+<br/>Proceedings of the International Conference onElectrical Engineering and InformaticsInstitut Teknologi Bandung, Indonesia June 17-19, 2007B-67ISBN 978-979-16338-0-2611 </td><td>('5461819', 'Salina Abdul Samad', 'salina abdul samad')<br/>('2864147', 'Dzati Athiar Ramli', 'dzati athiar ramli')<br/>('2573778', 'Aini Hussain', 'aini hussain')</td><td>* E-mail: salina@vlsi.eng.ukm.my
+</td></tr><tr><td>57a1466c5985fe7594a91d46588d969007210581</td><td>A Taxonomy of Face-models for System Evaluation
+<br/>Motivation and Data Types
+<br/>Synthetic Data Types
+<br/>Unverified – Have no underlying physical or
+<br/>statistical basis
+<br/>Physics -Based – Based on structure and
+<br/>materials combined with the properties
+<br/>formally modeled in physics.
+<br/>Statistical – Use statistics from real
+<br/>data/experiments to estimate/learn model
+<br/>parameters. Generally have measurements
+<br/>of accuracy
+<br/>Guided Synthetic – Individual models based
+<br/>on individual people. No attempt to capture
+<br/>properties of large groups, a unique model
+<br/>per person. For faces, guided models are
+<br/>composed of 3D structure models and skin
+<br/>textures, capturing many artifacts not
+<br/>easily parameterized. Can be combined with
+<br/>physics-based rendering to generate samples
+<br/>under different conditions.
+<br/>Semi–Synethetic – Use measured data such
+<br/>as 2D images or 3D facial scans. These are
+<br/>not truly synthetic as they are re-rendering’s
+<br/>of real measured data.
+<br/>Semi and Guided Synthetic data provide
+<br/>higher operational relevance while
+<br/>maintaining a high degree of control.
+<br/>Generating statistically significant size
+<br/>datasets for face matching system
+<br/>evaluation is both a laborious and
+<br/>expensive process.
+<br/>There is a gap in datasets that allow for
+<br/>evaluation of system issues including:
+<br/> Long distance recognition
+<br/> Blur caused by atmospherics
+<br/> Various weather conditions
+<br/> End to end systems evaluation
+<br/>Our contributions:
+<br/> Define a taxonomy of face-models
+<br/>for controlled experimentations
+<br/> Show how Synthetic addresses gaps
+<br/>in system evaluation
+<br/> Show a process for generating and
+<br/>validating synthetic models
+<br/> Use these models in long distance
+<br/>face recognition system evaluation
+<br/>Experimental Setup
+<br/>Results and Conclusions
+<br/>Example Models
+<br/>Original Pie
+<br/>Semi-
+<br/>Synthetic
+<br/>FaceGen
+<br/>Animetrics
+<br/>http://www.facegen.com
+<br/>http://www.animetrics.com/products/Forensica.php
+<br/>Guided-
+<br/>Synthetic
+<br/>Models
+<br/> Models generated using the well
+<br/>known CMU PIE [18] dataset. Each of
+<br/>the 68 subjects of PIE were modeled
+<br/>using a right profile and frontal
+<br/>image from the lights subset.
+<br/> Two modeling programs were used,
+<br/>Facegen and Animetrics. Both
+<br/>programs create OBJ files and
+<br/>textures
+<br/> Models are re-rendered using
+<br/>custom display software built with
+<br/>OpenGL, GLUT and DevIL libraries
+<br/> Custom Display Box housing a BENQ SP820 high
+<br/>powered projector rated at 4000 ANSI Lumens
+<br/> Canon EOS 7D withd a Sigma 800mm F5.6 EX APO
+<br/>DG HSM lens a 2x adapter imaging the display
+<br/>from 214 meters
+<br/>Normalized Example Captures
+<br/>Real PIE 1 Animetrics
+<br/>FaceGen
+<br/>81M inside 214M outside
+<br/>Real PIE 2
+<br/> Pre-cropped images were used for the
+<br/>commercial core
+<br/> Ground truth eye points + geometric/lighting
+<br/>normalization pre processing before running
+<br/>through the implementation of the V1
+<br/>recognition algorithm found in [1].
+<br/> Geo normalization highlights how the feature
+<br/>region of the models looks very similar to
+<br/>that of the real person.
+<br/>Each test consisted of using 3 approximately frontal gallery images NOT used to
+<br/>make the 3D model used as the probe, best score over 3 images determined score.
+<br/>Even though the PIE-3D-20100224A–D sets were imaged on the same day, the V1
+<br/>core scored differently on each highlighting the synthetic data’s ability to help
+<br/>evaluate data capture methods and effects of varying atmospherics. The ISO setting
+<br/>varied which effects the shutter speed, with higher ISO generally yielding less blur.
+<br/>Dataset
+<br/>Range(m)
+<br/>Iso
+<br/>V1
+<br/>Comm.
+<br/>Original PIE Images
+<br/>FaceGen ScreenShots
+<br/>Animetrics Screenshots
+<br/>PIE-3D-20100210B
+<br/>PIE-3D-20100224A
+<br/>PIE-3D-20100224B
+<br/>PIE-3D-20100224C
+<br/>PIE-3D-20100224D
+<br/>N/A
+<br/>N/A
+<br/>N/A
+<br/>81m
+<br/>214m
+<br/>214m
+<br/>214m
+<br/>214m
+<br/>N/A
+<br/>N/A
+<br/>N/A
+<br/>500
+<br/>125
+<br/>125
+<br/>250
+<br/>400
+<br/>100
+<br/>47.76
+<br/>100
+<br/>100
+<br/>58.82
+<br/>45.59
+<br/>81.82
+<br/>79.1
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/> The same (100 percent) recognition rate on screenshots as original images
+<br/>validate the Anmetrics guided synthetic models and fails FaceGen Models.
+<br/> 100% recognition means dataset is too small/easy; exapanding pose and models
+<br/>underway.
+<br/> Expanded the photohead methodology into 3D
+<br/> Developed a robust modeling system allowing for multiple configurations of a
+<br/>single real life data set.
+<br/> Gabor+SVM based V1[15] significantly more impacted by atmospheric blur than
+<br/>the commercial algorithm
+<br/>Key References:
+<br/>[6 of 21] R. Bevridge, D. Bolme, M Teixeira, and B. Draper. The CSU Face Identification Evaluation System Users Guide: Version 5.0. Technical report, CSU 2003
+<br/>[8 of 21] T. Boult and W. Scheirer. Long range facial image acquisition and quality. In M. Tisarelli, S. Li, and R. Chellappa.
+<br/>[15 of 21] N. Pinto, J. J. DiCarlo, and D. D. Cox. How far can you get with a modern face recognition test set using only simple features? In IEEE CVPR, 2009.
+<br/>[18 of 21] T. Sim, S. Baker, and M. Bsat. The CMU Pose, Illumination and Expression (PIE) Database. In Proceedings of the IEEE F&G, May 2002.
+</td><td>('31552290', 'Brian C. Parks', 'brian c. parks')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')</td><td>{viyer,skirkbride,bparks,wscheirer,tboult}@vast.uccs.edu
+</td></tr><tr><td>574b62c845809fd54cc168492424c5fac145bc83</td><td>Learning Warped Guidance for Blind Face
+<br/>Restoration
+<br/><b>School of Computer Science and Technology, Harbin Institute of Technology, China</b><br/><b>School of Data and Computer Science, Sun Yat-sen University, China</b><br/><b>University of Kentucky, USA</b></td><td>('21515518', 'Xiaoming Li', 'xiaoming li')<br/>('40508248', 'Yuting Ye', 'yuting ye')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('1737218', 'Liang Lin', 'liang lin')<br/>('38958903', 'Ruigang Yang', 'ruigang yang')</td><td>csxmli@hit.edu.cn, csmliu@outlook.com, yeyuting.jlu@gmail.com,
+<br/>wmzuo@hit.edu.cn
+<br/>linliang@ieee.org
+<br/>ryang@cs.uky.edu
+</td></tr><tr><td>57246142814d7010d3592e3a39a1ed819dd01f3b</td><td><b>MITSUBISHI ELECTRIC RESEARCH LABORATORIES</b><br/>http://www.merl.com
+<br/>Verification of Very Low-Resolution Faces Using An
+<br/>Identity-Preserving Deep Face Super-resolution Network
+<br/>TR2018-116 August 24, 2018
+</td><td></td><td></td></tr><tr><td>5721216f2163d026e90d7cd9942aeb4bebc92334</td><td></td><td></td><td></td></tr><tr><td>575141e42740564f64d9be8ab88d495192f5b3bc</td><td>Age Estimation based on Multi-Region
+<br/>Convolutional Neural Network
+<br/>1Center for Biometrics and Security Research & National Laboratory of Pattern
+<br/><b>Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences</b></td><td>('40282288', 'Ting Liu', 'ting liu')<br/>('1756538', 'Jun Wan', 'jun wan')<br/>('39974958', 'Tingzhao Yu', 'tingzhao yu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{ting.liu,jun.wan,zlei,szli}@nlpr.ia.ac.cn,yutingzhao2013@ia.ac.cn
+</td></tr><tr><td>5789f8420d8f15e7772580ec373112f864627c4b</td><td>Efficient Global Illumination for Morphable Models
+<br/><b>University of Basel, Switzerland</b></td><td>('1801001', 'Andreas Schneider', 'andreas schneider')<br/>('34460642', 'Bernhard Egger', 'bernhard egger')<br/>('32013053', 'Lavrenti Frobeen', 'lavrenti frobeen')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td>{andreas.schneider,sandro.schoenborn,bernhard.egger,l.frobeen,thomas.vetter}@unibas.ch
+</td></tr><tr><td>574705812f7c0e776ad5006ae5e61d9b071eebdb</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/>A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/>IJCSMC, Vol. 3, Issue. 5, May 2014, pg.780 – 787
+<br/> RESEARCH ARTICLE
+<br/>A Novel Approach for Face Recognition
+<br/>Using PCA and Artificial Neural Network
+<br/><b>Dayananda Sagar College of Engg., India</b><br/><b>Dayananda Sagar College of Engg., India</b></td><td>('9856026', 'Karthik G', 'karthik g')<br/>('9856026', 'Karthik G', 'karthik g')</td><td>1 email : karthik.knocks@gmail.com; 2 email : hcsateesh@gmail.com
+</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td></td><td></td><td></td></tr><tr><td>571b83f7fc01163383e6ca6a9791aea79cafa7dd</td><td>SeqFace: Make full use of sequence information for face recognition
+<br/><b>College of Information Science and Technology</b><br/><b>Beijing University of Chemical Technology, China</b><br/>YUNSHITU Corp., China
+</td><td>('48594708', 'Wei Hu', 'wei hu')<br/>('7524887', 'Yangyu Huang', 'yangyu huang')<br/>('8451319', 'Guodong Yuan', 'guodong yuan')<br/>('47191084', 'Fan Zhang', 'fan zhang')<br/>('50391855', 'Ruirui Li', 'ruirui li')<br/>('47113208', 'Wei Li', 'wei li')</td><td></td></tr><tr><td>574ad7ef015995efb7338829a021776bf9daaa08</td><td>AdaScan: Adaptive Scan Pooling in Deep Convolutional Neural Networks
+<br/>for Human Action Recognition in Videos
+<br/>1IIT Kanpur‡
+<br/>2SRI International
+<br/>3UCSD
+</td><td>('24899770', 'Amlan Kar', 'amlan kar')<br/>('12692625', 'Nishant Rai', 'nishant rai')<br/>('39707211', 'Karan Sikka', 'karan sikka')<br/>('39396475', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>57a14a65e8ae15176c9afae874854e8b0f23dca7</td><td>UvA-DARE (Digital Academic Repository)
+<br/>Seeing mixed emotions: The specificity of emotion perception from static and dynamic
+<br/>facial expressions across cultures
+<br/>Fang, X.; Sauter, D.A.; van Kleef, G.A.
+<br/>Published in:
+<br/>Journal of Cross-Cultural Psychology
+<br/>DOI:
+<br/>10.1177/0022022117736270
+<br/>Link to publication
+<br/>Citation for published version (APA):
+<br/>Fang, X., Sauter, D. A., & van Kleef, G. A. (2018). Seeing mixed emotions: The specificity of emotion perception
+<br/>from static and dynamic facial expressions across cultures. Journal of Cross-Cultural Psychology, 49(1), 130-
+<br/>148. DOI: 10.1177/0022022117736270
+<br/>General rights
+<br/>It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+<br/>other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+<br/>Disclaimer/Complaints regulations
+<br/>If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+<br/>your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+<br/><b>the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam</b><br/>The Netherlands. You will be contacted as soon as possible.
+<br/>Download date: 08 Aug 2018
+<br/><b>UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl</b></td><td></td><td></td></tr><tr><td>57b052cf826b24739cd7749b632f85f4b7bcf90b</td><td>Fast Fashion Guided Clothing Image Retrieval:
+<br/>Delving Deeper into What Feature Makes
+<br/>Fashion
+<br/><b>School of Data and Computer Science, Sun Yat-sen University</b><br/>Guangzhou, P.R China
+</td><td>('3079146', 'Yuhang He', 'yuhang he')<br/>('40451106', 'Long Chen', 'long chen')</td><td>*Corresponding Author: chenl46@mail.sysu.edu.cn
+</td></tr><tr><td>57d37ad025b5796457eee7392d2038910988655a</td><td>GEERATVEEETATF
+<br/>
+<br/>by
+<br/>DagaEha
+<br/>UdeheS eviif
+<br/>f.DahaWeiha
+<br/>ATheiS biediaia
+<br/>Re ieefheDegeef
+<br/>aefSciece
+<br/>a
+<br/>TheSch
+<br/>
+<br/>Decebe2009
+</td><td></td><td></td></tr><tr><td>57f7d8c6ec690bd436e70d7761bc5f46e993be4c</td><td>Facial Expression Recognition Using Histogram Variances Faces
+<br/><b>University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia</b><br/><b>University of Aizu, Japan</b></td><td>('32796151', 'Ruo Du', 'ruo du')<br/>('37046680', 'Qiang Wu', 'qiang wu')<br/>('1706670', 'Xiangjian He', 'xiangjian he')<br/>('1714410', 'Wenjing Jia', 'wenjing jia')<br/>('40394300', 'Daming Wei', 'daming wei')</td><td>{ruodu, wuq, sean, wejia}@it.uts.edu.au
+<br/>dm-wei@u-aizu.ac.jp
+</td></tr><tr><td>3b1260d78885e872cf2223f2c6f3d6f6ea254204</td><td></td><td></td><td></td></tr><tr><td>3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5</td><td>Sparse Representation-based Open Set Recognition
+</td><td>('2310707', 'He Zhang', 'he zhang')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')</td><td></td></tr><tr><td>3b092733f428b12f1f920638f868ed1e8663fe57</td><td>On the Size of Convolutional Neural Networks and
+<br/>Generalization Performance
+<br/>Center for Automation Research, UMIACS*
+<br/>Department of Electrical and Computer Engineering†
+<br/><b>University of Maryland, College Park</b></td><td>('2747758', 'Maya Kabkab', 'maya kabkab')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>Email: {mayak, emhand, rama}@umiacs.umd.edu
+</td></tr><tr><td>3b73f8a2b39751efb7d7b396bf825af2aaadee24</td><td>Connecting Pixels to Privacy and Utility:
+<br/>Automatic Redaction of Private Information in Images
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarland Informatics Campus
+<br/>Saabr¨ucken, Germany
+</td><td>('9517443', 'Tribhuvanesh Orekondy', 'tribhuvanesh orekondy')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{orekondy,mfritz,schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>3b2d5585af59480531616fe970cb265bbdf63f5b</td><td>Robust Face Recognition under Varying Light
+<br/>Based on 3D Recovery
+<br/>Center of Computer Vision, School of
+<br/>Mathematics and Computing, Sun Yat-sen
+<br/><b>University, Guangzhou, China</b><br/>Ching Y Suen
+<br/>Centre for Pattern Recognition and Machine
+<br/><b>Intelligence, Concordia University, Montreal</b><br/>Canada, H3G 1M8
+</td><td>('3246510', 'Guan Yang', 'guan yang')</td><td>mcsfgc@mail.sysu.edu.cn
+<br/>parmidir@cenparmi.concordia.ca
+</td></tr><tr><td>3b64efa817fd609d525c7244a0e00f98feacc8b4</td><td>A Comprehensive Survey on Pose-Invariant
+<br/>Face Recognition
+<br/>Centre for Quantum Computation and Intelligent Systems
+<br/>Faculty of Engineering and Information Technology
+<br/><b>University of Technology, Sydney</b><br/>81-115 Broadway, Ultimo, NSW
+<br/>Australia
+<br/>15 March 2016
+</td><td>('37990555', 'Changxing Ding', 'changxing ding')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td>Emails: chx.ding@gmail.com, dacheng.tao@uts.edu.au
+</td></tr><tr><td>3bc776eb1f4e2776f98189e17f0d5a78bb755ef4</td><td></td><td></td><td></td></tr><tr><td>3b7f6035a113b560760c5e8000540fc46f91fed5</td><td>COUPLING ALIGNMENTS WITH RECOGNITION FOR STILL-TO-VIDEO
+<br/><b>Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China</b><br/>FACE RECOGNITION
+<br/>MOTIVATION
+<br/>Problem: Still-to-Video face recognition
+<br/>1. Gallery: high quality still face images (e.g., sharp and
+<br/>high face resolution ones)
+<br/>2. Probe: low quality video face frames (e.g., blur and low
+<br/>face resolution ones)
+<br/>Solution: Couple alignments with recognition
+<br/>1. Quality Alignment (QA): select the frames of ‘best
+<br/>quality’ from videos
+<br/>2. Geometric Alignment (GA): jointly align the selected
+<br/>frames to the still faces
+<br/>3. Sparse Representation (SR): sparsely represent the
+<br/>frames on the still faces
+<br/>Frame 1
+<br/>20
+<br/>220
+<br/>301
+<br/>333
+<br/>image
+<br/>OVERVIEW
+<br/>GA: Geometric Alignment
+<br/>SR: Sparse Representation
+<br/>QA: Quality Alignment
+<br/>T : Alignment parameters
+<br/>L: Identity labels
+<br/>C: Selecting confidences
+<br/>FORMULATION
+<br/>{ ˆT , ˆL} = arg minT,L (cid:107)Z(cid:107)1 +(cid:80)c
+<br/>s.t. Y ◦ T = B + E, B = AZ, Si = {j|Lj = i}.
+<br/>i=1 (cid:107)BSi(cid:107)∗ + (cid:107)E(cid:107)1,
+<br/>• Couple GA with SR: Y ◦T = B+E, B = AZ, (cid:107)Z(cid:107)1 ≤ t
+<br/>DATASETS
+<br/>1. YouTube-S2V dataset: 100 subjects, privately
+<br/>collected from YouTube Face DB [Wolf et al., CVPR’ 11]
+<br/>2. COX-S2V dataset: 1,000 subjects, publicly released
+<br/>in our prior work [Huang et al., ACCV ’12]
+<br/>– Y : Video faces, A: dictionary (still faces)
+<br/>– ◦ and T : Alignment operator and parameters
+<br/>– B: Sparse representations, E: residual errors
+<br/>Examples of still faces
+<br/>Examples of still faces
+<br/>• Couple SR with QA: Si = {j|Lj = i},(cid:80) (cid:107)BSi(cid:107)∗ ≤ k
+<br/>– Identity label: Lj = arg mink (cid:107)yj ◦τj −Akzjk(cid:107)2
+<br/>– Confidence: Ci =(cid:80)
+<br/>(cid:16) −(cid:107)ej(cid:107)1
+<br/>(cid:17)
+<br/>j∈Si
+<br/>exp
+<br/>σ2
+<br/>Frame 1
+<br/>RESULTS
+<br/>Frame 1
+<br/>31
+<br/>45
+<br/>72
+<br/>84
+<br/>Frame 1
+<br/>14
+<br/>25
+<br/>35
+<br/>46
+<br/>89
+<br/>Examples of video faces
+<br/>118
+<br/>Frame 1
+<br/>Examples of video faces
+<br/>14
+<br/>25
+<br/>OPTIMIZATION
+<br/>{ ˆT , ˆL} = arg minT,L (cid:107)Z(cid:107)1 +(cid:80)c
+<br/>Linearization:
+<br/>i=1 (cid:107)BSi(cid:107)∗ + (cid:107)E(cid:107)1,
+<br/>s.t.Y ◦ T + J∆T = B + E, B = AZ, Si = {j|Lj = i}.
+<br/>∂T Y ◦ T : Jacobian matrices w.r.t transformations
+<br/>J = ∂
+<br/>Main algorithm:
+<br/>Comparative methods:
+<br/>1. Baseline: SRC[1], CRC[2]
+<br/>2. Blind Geometric Alignment: RASL[3]
+<br/>3. Joint Geometric Alignment and Recognition: MRR[4]
+<br/>4. Our method: Couping Alignments with Recognition
+<br/>(CAR)
+<br/>Evaluation terms:
+<br/>1. Face Alignments (QA and GA)
+<br/>2. Sparse Reprentation (SR) for Face Recognition
+<br/>INPUT: Gallery data matrix A, probe video sequence
+<br/>data matrix Y and initial transformation T of Y
+<br/>1. WHILE not converged DO
+<br/>2. Compute Jacobian matrices w.r.t transformations
+<br/>3. Warp and normalize the images:
+<br/>(cid:20) vec(Y1 ◦ τ1)
+<br/>Y ◦ T =
+<br/>Set the segments at coarse search stage:
+<br/>vec((cid:107)Y1 ◦ τ1(cid:107)2)
+<br/>S1 = {1, . . . , n}, Si = φ, i = 2, . . . , c
+<br/>, . . . ,
+<br/>vec(Yn ◦ τn)
+<br/>vec((cid:107)Yn ◦ τn(cid:107)2)
+<br/>5. Apply Augmented Lagrange Multiplier to solve:
+<br/>(cid:107)BSi(cid:107)∗ + (cid:107)E(cid:107)1,
+<br/>{ ˆT , ˆZ} = arg min
+<br/>(cid:107)Z(cid:107)1 +
+<br/>c(cid:88)
+<br/>4.
+<br/>(cid:21)
+<br/>T,Z
+<br/>i=1
+<br/>s.t. Y ◦ T + J∆T = B + E, B = AZ;
+<br/>6. Update transformations: T = T + ∆T ∗
+<br/>7. Update segments at fine search stage:
+<br/>Si = {j|i = arg min
+<br/>(cid:107)yj ◦ τj − Akzjk(cid:107)2}.
+<br/>8. END WHILE
+<br/>9. Compute Ci of Si, i = 1, . . . , n for voting class label.
+<br/>OUTPUT: Class label of the probe video sequence.
+<br/>QA, GA, SR results.
+<br/>: correctly identified, (cid:3): finally selected
+<br/>CONCLUSION
+<br/>• The proposed method jointly performs GA, QA and SR
+<br/>in a unified optimization.
+<br/>• We employ an iterative EM-like algorithm to jointly op-
+<br/>timize the three tasks.
+<br/>• Experimental results demonstrate that GA, QA and SR
+<br/>benefit from each other.
+<br/>QA and GA results. Average faces of video frames finally
+<br/>selected for face recognition
+<br/>Methods
+<br/>SRC[1]
+<br/>CRC[2]
+<br/>RASL[3] -SRC
+<br/>RASL[3]-CRC
+<br/>MRR[4]
+<br/>CAR
+<br/>10.78
+<br/>10.34
+<br/>26.29
+<br/>29.74
+<br/>28.45
+<br/>36.21
+<br/>C1
+<br/>15.57
+<br/>14.43
+<br/>22.14
+<br/>19.43
+<br/>26.43
+<br/>43.42
+<br/>C2
+<br/>42.29
+<br/>43.57
+<br/>39.00
+<br/>41.29
+<br/>44.14
+<br/>55.00
+<br/>C3
+<br/>2.86
+<br/>4.14
+<br/>4.57
+<br/>4.00
+<br/>3.57
+<br/>10.71
+<br/>C4
+<br/>18.71
+<br/>19.71
+<br/>18.29
+<br/>19.43
+<br/>13.57
+<br/>28.86
+<br/>Face recognition results. Intensity feature, Y: YouTube-S2V, Ci:
+<br/>the i-the testing scenario of COX-S2V
+<br/>REFERENCES
+<br/>[1]
+<br/>J. Wright, A. Yang, A. Ganesh, S. Sastry, Y. Ma. Robust face recognition via sparse representa-
+<br/>tion. In TPAMI ’09
+<br/>[2] L. Zhang, M. Yang, X. Feng. Sparse representation or collaborative representation which helps
+<br/>face recognition? In ICCV ’11
+<br/>[3] Y. Peng, A. Ganesh, J. Wright, W. Xu, Y. Ma. RASL: Robust alignement by sparse and low-rank
+<br/>decomposition for linearly correlated images. In CVPR ’10
+<br/>[4] M. Yang, L. Zhang, D. Zhang. Efficient misalignment-robust representaion for real-time face
+<br/>recognition. In ECCV ’12
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('1874505', 'Xiaowei Zhao', 'xiaowei zhao')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e</td><td>Learning Person Trajectory Representations for Team Activity Analysis
+<br/><b>Simon Fraser University</b></td><td>('10386960', 'Nazanin Mehrasa', 'nazanin mehrasa')<br/>('19198359', 'Yatao Zhong', 'yatao zhong')<br/>('2123865', 'Frederick Tung', 'frederick tung')<br/>('3004771', 'Luke Bornn', 'luke bornn')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>{nmehrasa, yataoz, ftung, lbornn}@sfu.ca, mori@cs.sfu.ca
+</td></tr><tr><td>3bd1d41a656c8159305ba2aa395f68f41ab84f31</td><td>Entity-based Opinion Mining from Text and
+<br/>Multimedia
+<br/>1 Introduction
+<br/>Social web analysis is all about the users who are actively engaged and generate
+<br/>content. This content is dynamic, reflecting the societal and sentimental fluctuations
+<br/>of the authors as well as the ever-changing use of language. Social networks are
+<br/>pools of a wide range of articulation methods, from simple ”Like” buttons to com-
+<br/>plete articles, their content representing the diversity of opinions of the public. User
+<br/>activities on social networking sites are often triggered by specific events and re-
+<br/>lated entities (e.g. sports events, celebrations, crises, news articles) and topics (e.g.
+<br/>global warming, financial crisis, swine flu).
+<br/>With the rapidly growing volume of resources on the Web, archiving this material
+<br/>becomes an important challenge. The notion of community memories extends tradi-
+<br/>tional Web archives with related data from a variety of sources. In order to include
+<br/>this information, a semantically-aware and socially-driven preservation model is a
+<br/>natural way to go: the exploitation of Web 2.0 and the wisdom of crowds can make
+<br/>web archiving a more selective and meaning-based process. The analysis of social
+<br/>media can help archivists select material for inclusion, while social media mining
+<br/>can enrich archives, moving towards structured preservation around semantic cat-
+<br/>egories. In this paper, we focus on the challenges in the development of opinion
+<br/>mining tools from both textual and multimedia content.
+<br/>We focus on two very different domains: socially aware federated political
+<br/>archiving (realised by the national parliaments of Greece and Austria), and socially
+<br/>contextualized broadcaster web archiving (realised by two large multimedia broad-
+<br/><b>University of Shef eld, Regent Court, 211 Portobello, Shef eld</b><br/>Jonathon Hare
+<br/><b>Electronics and Computer Science, University of Southampton, Southampton, Hampshire</b></td><td>('2144272', 'Diana Maynard', 'diana maynard')<br/>('2144272', 'Diana Maynard', 'diana maynard')</td><td>S1 4DP, UK e-mail: diana@dcs.shef.ac.uk
+<br/>SO17 1BJ, UK e-mail: jsh2@ecs.soton.ac.uk
+</td></tr><tr><td>3bcd72be6fbc1a11492df3d36f6d51696fd6bdad</td><td>Multi-Task Zero-Shot Action Recognition with
+<br/>Prioritised Data Augmentation
+<br/>School of Electronic Engineering and Computer Science,
+<br/><b>Queen Mary University of London</b></td><td>('1735328', 'Xun Xu', 'xun xu')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td>{xun.xu,t.hospedales,s.gong}@qmul.ac.uk
+</td></tr><tr><td>3b9c08381282e65649cd87dfae6a01fe6abea79b</td><td>CUHK & ETHZ & SIAT Submission to ActivityNet Challenge 2016
+<br/><b>Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong</b><br/>2Computer Vision Lab, ETH Zurich, Switzerland
+<br/><b>Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('3047890', 'Bowen Zhang', 'bowen zhang')<br/>('2313919', 'Hang Song', 'hang song')<br/>('1688012', 'Wei Li', 'wei li')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1681236', 'Luc Van Gool', 'luc van gool')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>3b84d074b8622fac125f85ab55b63e876fed4628</td><td>End-to-End Localization and Ranking for
+<br/>Relative Attributes
+<br/><b>University of California, Davis</b></td><td>('19553871', 'Krishna Kumar Singh', 'krishna kumar singh')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10</td><td>Glimpse: Continuous, Real-Time Object Recognition on
+<br/>Mobile Devices
+<br/>MIT CSAIL
+<br/>Microsoft Research
+<br/>MIT CSAIL
+<br/>Microsoft Research
+<br/>MIT CSAIL
+</td><td>('32214366', 'Tiffany Yu-Han Chen', 'tiffany yu-han chen')<br/>('40125198', 'Lenin Ravindranath', 'lenin ravindranath')<br/>('1904357', 'Shuo Deng', 'shuo deng')<br/>('2292948', 'Paramvir Bahl', 'paramvir bahl')<br/>('1712771', 'Hari Balakrishnan', 'hari balakrishnan')</td><td>yuhan@csail.mit.edu
+<br/>lenin@microsoft.com
+<br/>shuodeng@csail.mit.edu
+<br/>bahl@microsoft.com
+<br/>hari@csail.mit.edu
+</td></tr><tr><td>3be8f1f7501978287af8d7ebfac5963216698249</td><td>Deep Cascaded Regression for Face Alignment
+<br/><b>School of Data and Computer Science, Sun Yat-Sen University, China</b><br/><b>National University of Singapore, Singapore</b><br/>algorithm refines the shape by estimating a shape increment
+<br/>∆S. In particular, a shape increment at stage k is calculated
+<br/>as:
+</td><td>('3124720', 'Shengtao Xiao', 'shengtao xiao')<br/>('10338111', 'Zhen Cui', 'zhen cui')<br/>('48815683', 'Yan Pan', 'yan pan')<br/>('48258938', 'Chunyan Xu', 'chunyan xu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>3bc376f29bc169279105d33f59642568de36f17f</td><td>Active Shape Models with SIFT Descriptors and MARS
+<br/><b>University of Cape Town, South Africa</b><br/>Keywords:
+<br/>Facial Landmark, Active Shape Model, Multivariate Adaptive Regression Splines
+</td><td>('2822258', 'Stephen Milborrow', 'stephen milborrow')<br/>('2537623', 'Fred Nicolls', 'fred nicolls')</td><td>milbo@sonic.net
+</td></tr><tr><td>3b38c06caf54f301847db0dd622a6622c3843957</td><td>RESEARCH ARTICLE
+<br/>Gender differences in emotion perception
+<br/>and self-reported emotional intelligence: A
+<br/>test of the emotion sensitivity hypothesis
+<br/><b>University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University</b><br/><b>Leiden, the Netherlands, 3 Delft University of Technology</b><br/>Intelligent Systems, Delft, the Netherlands
+</td><td>('1735303', 'Joost Broekens', 'joost broekens')</td><td>* a.h.fischer@uva.nl
+</td></tr><tr><td>3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0</td><td>On Knowledge Transfer in
+<br/>Object Class Recognition
+<br/>A dissertation approved by
+<br/>TECHNISCHE UNIVERSITÄT DARMSTADT
+<br/>Fachbereich Informatik
+<br/>for the degree of
+<br/>Doktor-Ingenieur (Dr.-Ing.)
+<br/>presented by
+<br/>Dipl.-Inform.
+<br/>born in Mainz, Germany
+<br/>Prof. Dr.-Ing. Michael Goesele, examiner
+<br/>Prof. Martial Hebert, Ph.D., co-examiner
+<br/>Prof. Dr. Bernt Schiele, co-examiner
+<br/>Date of Submission: 12th of August, 2010
+<br/>Date of Defense: 23rd of September, 2010
+<br/>Darmstadt, 2010
+<br/>D17
+</td><td>('37718254', 'Michael Stark', 'michael stark')</td><td></td></tr><tr><td>3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f</td><td>Enhancing Convolutional Neural Networks for Face Recognition with
+<br/>Occlusion Maps and Batch Triplet Loss
+<br/><b>aSchool of Engineering and Technology, University of Hertfordshire, Hat eld AL10 9AB, UK</b><br/>bIDscan Biometrics (a GBG company), London E14 9QD, UK
+</td><td>('2133352', 'Li Meng', 'li meng')<br/>('46301106', 'Margaret Hartnett', 'margaret hartnett')</td><td></td></tr><tr><td>3b9b200e76a35178da940279d566bbb7dfebb787</td><td>Learning Channel Inter-dependencies at Multiple Scales on Dense
+<br/>Networks for Face Recognition
+<br/>109 Research Way — PO Box 6109 Morgantown, West Virginia
+<br/><b>West Virginia University</b><br/>November 29, 2017
+</td><td>('16145333', 'Qiangchang Wang', 'qiangchang wang')<br/>('1822413', 'Guodong Guo', 'guodong guo')<br/>('23981570', 'Mohammad Iqbal Nouyed', 'mohammad iqbal nouyed')</td><td>qw0007@mix.wvu.edu, guodong.guo@mail.wvu.edu, monouyed@mix.wvu.edu
+</td></tr><tr><td>3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab</td><td>Improving Classification by Improving Labelling:
+<br/>Introducing Probabilistic Multi-Label Object Interaction Recognition
+<br/>Walterio Mayol-Cuevas
+<br/><b>University of Bristol</b></td><td>('2052236', 'Michael Wray', 'michael wray')<br/>('3420479', 'Davide Moltisanti', 'davide moltisanti')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td><FirstName>.<LastName>@bristol.ac.uk
+</td></tr><tr><td>3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8</td><td>Who Leads the Clothing Fashion: Style, Color, or Texture?
+<br/>A Computational Study
+<br/><b>School of Computer Science, Wuhan University, P.R. China</b><br/><b>Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China</b><br/><b>School of Data of Computer Science, Sun Yat-sen University, P.R. China</b><br/><b>University of South Carolina, USA</b></td><td>('4793870', 'Qin Zou', 'qin zou')<br/>('37361540', 'Zheng Zhang', 'zheng zhang')<br/>('40102806', 'Qian Wang', 'qian wang')<br/>('1720431', 'Qingquan Li', 'qingquan li')<br/>('40451106', 'Long Chen', 'long chen')<br/>('10829233', 'Song Wang', 'song wang')</td><td></td></tr><tr><td>3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e</td><td>Local Directional Number Pattern for Face
+<br/>Analysis: Face and Expression Recognition
+</td><td>('2525887', 'Adin Ramirez Rivera', 'adin ramirez rivera')<br/>('1685505', 'Oksam Chae', 'oksam chae')</td><td></td></tr><tr><td>3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5</td><td></td><td></td><td></td></tr><tr><td>3b9d94752f8488106b2c007e11c193f35d941e92</td><td>CVPR
+<br/>#2052
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2013 Submission #2052. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#2052
+<br/>Appearance, Visual and Social Ensembles for
+<br/>Face Recognition in Personal Photo Collections
+<br/>Anonymous CVPR submission
+<br/>Paper ID 2052
+</td><td></td><td></td></tr><tr><td>3bb6570d81685b769dc9e74b6e4958894087f3f1</td><td>Hu-Fu: Hardware and Software Collaborative
+<br/>Attack Framework against Neural Networks
+<br/><b>Beijing National Research Center for Information Science and Technology</b><br/><b>Tsinghua University</b></td><td>('3493074', 'Wenshuo Li', 'wenshuo li')<br/>('1909938', 'Jincheng Yu', 'jincheng yu')<br/>('6636914', 'Xuefei Ning', 'xuefei ning')<br/>('2892980', 'Pengjun Wang', 'pengjun wang')<br/>('49988678', 'Qi Wei', 'qi wei')<br/>('47904166', 'Yu Wang', 'yu wang')<br/>('39150998', 'Huazhong Yang', 'huazhong yang')</td><td>{lws17@mails.tsinghua.edu.cn, yu-wang@tsinghua.edu.cn}
+</td></tr><tr><td>3b557c4fd6775afc80c2cf7c8b16edde125b270e</td><td>Face Recognition: Perspectives from the
+<br/>Real-World
+<br/><b>Institute for Infocomm Research, A*STAR</b><br/>1 Fusionopolis Way, #21-01 Connexis (South Tower), Singapore 138632.
+<br/>Phone: +65 6408 2071; Fax: +65 6776 1378;
+</td><td>('1709001', 'Bappaditya Mandal', 'bappaditya mandal')</td><td>E-mail: bmandal@i2r.a-star.edu.sg
+</td></tr><tr><td>3b3482e735698819a6a28dcac84912ec01a9eb8a</td><td>Individual Recognition Using Gait Energy Image
+<br/>Center for Research in Intelligent Systems
+<br/><b>University of California, Riverside, California 92521, USA</b><br/>
+</td><td>('1699904', 'Ju Han', 'ju han')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td>@cris.ucr.edu
+</td></tr><tr><td>3b37d95d2855c8db64bd6b1ee5659f87fce36881</td><td>ADA: A Game-Theoretic Perspective on Data Augmentation for Object Detection
+<br/><b>University of Illinois at Chicago</b><br/><b>Carnegie Mellon University</b><br/><b>University of Illinois at Chicago</b></td><td>('2761655', 'Sima Behpour', 'sima behpour')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')<br/>('1753269', 'Brian D. Ziebart', 'brian d. ziebart')</td><td>sbehpo2@uic.edu
+<br/>kkitani@cs.cmu.edu
+<br/>bziebart@uic.edu
+</td></tr><tr><td>3be7b7eb11714e6191dd301a696c734e8d07435f</td><td></td><td></td><td></td></tr><tr><td>3be027448ad49a79816cd21dcfcce5f4e1cec8a8</td><td>Actively Selecting Annotations Among Objects and Attributes
+<br/><b>University of Texas at Austin</b></td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('2259154', 'Sudheendra Vijayanarasimhan', 'sudheendra vijayanarasimhan')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{adriana, svnaras, grauman}@cs.utexas.edu
+</td></tr><tr><td>3bd56f4cf8a36dd2d754704bcb71415dcbc0a165</td><td>Robust Regression
+<br/><b>Robotics Institute, Carnegie Mellon University</b></td><td>('39792229', 'Dong Huang', 'dong huang')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>3b410ae97e4564bc19d6c37bc44ada2dcd608552</td><td>Scalability Analysis of Audio-Visual Person
+<br/>Identity Verification
+<br/>1 Communications Laboratory,
+<br/>Universit´e catholique de Louvain, B-1348 Belgium,
+<br/>2 IDIAP, CH-1920 Martigny,
+<br/>Switzerland
+</td><td>('34964585', 'Jacek Czyz', 'jacek czyz')<br/>('1751569', 'Samy Bengio', 'samy bengio')<br/>('2510802', 'Christine Marcel', 'christine marcel')<br/>('1698047', 'Luc Vandendorpe', 'luc vandendorpe')</td><td>czyz@tele.ucl.ac.be,
+<br/>{Samy.Bengio,Christine.Marcel}@idiap.ch
+</td></tr><tr><td>3b470b76045745c0ef5321e0f1e0e6a4b1821339</td><td>Consensus of Regression for Occlusion-Robust
+<br/>Facial Feature Localization
+<br/><b>Rutgers University, Piscataway, NJ 08854, USA</b><br/>2 Adobe Research, San Jose, CA 95110, USA
+</td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td></td></tr><tr><td>6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb</td><td>Low Resolution Face Recognition Using a
+<br/>Two-Branch Deep Convolutional Neural Network
+<br/>Architecture
+</td><td>('19189138', 'Erfan Zangeneh', 'erfan zangeneh')<br/>('1772623', 'Mohammad Rahmati', 'mohammad rahmati')<br/>('3071758', 'Yalda Mohsenzadeh', 'yalda mohsenzadeh')</td><td></td></tr><tr><td>6f288a12033fa895fb0e9ec3219f3115904f24de</td><td>Learning Expressionlets via Universal Manifold
+<br/>Model for Dynamic Facial Expression Recognition
+</td><td>('1730228', 'Mengyi Liu', 'mengyi liu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>6fa0c206873dcc5812f7ea74a48bb4bf4b273494</td><td>Real-time Mobile Facial Expression Recognition System – A Case Study
+<br/>Department of Computer Engineering
+<br/><b>The University of Texas at Dallas, Richardson, TX</b></td><td>('2774175', 'Myunghoon Suk', 'myunghoon suk')</td><td>{mhsuk, praba}@utdallas.edu
+</td></tr><tr><td>6f9824c5cb5ac08760b08e374031cbdabc953bae</td><td>Unconstrained Human Identification Using Comparative Facial Soft Biometrics
+<br/>Nawaf Y. Almudhahka
+<br/><b>University of Southampton</b><br/>Southampton, United Kingdom
+</td><td>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('31534955', 'Jonathon S. Hare', 'jonathon s. hare')</td><td>{nya1g14,msn,jsh2}@ecs.soton.ac.uk
+</td></tr><tr><td>6f2dc51d607f491dbe6338711c073620c85351ac</td><td></td><td></td><td></td></tr><tr><td>6fed504da4e192fe4c2d452754d23d3db4a4e5e3</td><td>Learning Deep Features via Congenerous Cosine Loss for Person Recognition
+<br/>1 SenseTime Group Ltd., Beijing, China
+<br/><b>The Chinese University of Hong Kong, New Territories, Hong Kong</b></td><td>('1715752', 'Yu Liu', 'yu liu')<br/>('1929886', 'Hongyang Li', 'hongyang li')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td>liuyu@sensetime.com, {yangli, xgwang}@ee.cuhk.edu.hk
+</td></tr><tr><td>6f957df9a7d3fc4eeba53086d3d154fc61ae88df</td><td>Mod´elisation et suivi des d´eformations faciales :
+<br/>applications `a la description des expressions du visage
+<br/>dans le contexte de la langue des signes
+<br/>To cite this version:
+<br/>des expressions du visage dans le contexte de la langue des signes. Interface homme-machine
+<br/>[cs.HC]. Universit´e Paul Sabatier - Toulouse III, 2007. Fran¸cais. <tel-00185084>
+<br/>HAL Id: tel-00185084
+<br/>https://tel.archives-ouvertes.fr/tel-00185084
+<br/>Submitted on 5 Nov 2007
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('3029015', 'Hugo Mercier', 'hugo mercier')<br/>('3029015', 'Hugo Mercier', 'hugo mercier')</td><td></td></tr><tr><td>6f26ab7edd971148723d9b4dc8ddf71b36be9bf7</td><td>Differences in Abundances of Cell-Signalling Proteins in
+<br/>Blood Reveal Novel Biomarkers for Early Detection Of
+<br/>Clinical Alzheimer’s Disease
+<br/><b>Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de</b><br/>Produc¸a˜o, Universidade Federal de Minas Gerais (UFMG), Belo Horizonte, Brazil
+</td><td>('8423987', 'Mateus Rocha de Paula', 'mateus rocha de paula')<br/>('34861417', 'Regina Berretta', 'regina berretta')<br/>('1738680', 'Pablo Moscato', 'pablo moscato')</td><td></td></tr><tr><td>6f75697a86d23d12a14be5466a41e5a7ffb79fad</td><td></td><td></td><td></td></tr><tr><td>6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd</td><td>Pages 51.1-51.12
+<br/>DOI: https://dx.doi.org/10.5244/C.30.51
+</td><td></td><td></td></tr><tr><td>6f7a8b3e8f212d80f0fb18860b2495be4c363eac</td><td>Creating Capsule Wardrobes from Fashion Images
+<br/>UT-Austin
+<br/>UT-Austin
+</td><td>('22211024', 'Wei-Lin Hsiao', 'wei-lin hsiao')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>kimhsiao@cs.utexas.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81</td><td>Structured Output SVM Prediction of Apparent Age,
+<br/>Gender and Smile From Deep Features
+<br/>Michal Uˇriˇc´aˇr
+<br/>CMP, Dept. of Cybernetics
+<br/>FEE, CTU in Prague
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>PSI, ESAT, KU Leuven
+<br/>CVL, D-ITET, ETH Zurich
+<br/>Jiˇr´ı Matas
+<br/>CMP, Dept. of Cybernetics
+<br/>FEE, CTU in Prague
+</td><td>('1732855', 'Radu Timofte', 'radu timofte')<br/>('2173683', 'Rasmus Rothe', 'rasmus rothe')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>uricamic@cmp.felk.cvut.cz
+<br/>radu.timofte@vision.ee.ethz.ch
+<br/>rrothe@vision.ee.ethz.ch
+<br/>vangool@vision.ee.ethz.ch
+<br/>matas@cmp.felk.cvut.cz
+</td></tr><tr><td>6f08885b980049be95a991f6213ee49bbf05c48d</td><td>This article appeared in a journal published by Elsevier. The attached
+<br/>copy is furnished to the author for internal non-commercial research
+<br/><b>and education use, including for instruction at the authors institution</b><br/>and sharing with colleagues.
+<br/><b>Other uses, including reproduction and distribution, or selling or</b><br/>licensing copies, or posting to personal, institutional or third party
+<br/>websites are prohibited.
+<br/>In most cases authors are permitted to post their version of the
+<br/>article (e.g. in Word or Tex form) to their personal website or
+<br/>institutional repository. Authors requiring further information
+<br/>regarding Elsevier’s archiving and manuscript policies are
+<br/>encouraged to visit:
+<br/>http://www.elsevier.com/authorsrights
+</td><td></td><td></td></tr><tr><td>6f0900a7fe8a774a1977c5f0a500b2898bcbe149</td><td>1
+<br/>Quotient Based Multiresolution Image Fusion of Thermal
+<br/>and Visual Images Using Daubechies Wavelet Transform
+<br/>for Human Face Recognition
+<br/><b>Tripura University (A Central University</b><br/>Suryamaninagar, Tripura 799130, India
+<br/><b>Jadavpur University</b><br/>Kolkata, West Bengal 700032, India
+<br/>*AICTE Emeritus Fellow
+</td><td>('1694317', 'Mrinal Kanti Bhowmik', 'mrinal kanti bhowmik')<br/>('1721942', 'Debotosh Bhattacharjee', 'debotosh bhattacharjee')<br/>('1729425', 'Mita Nasipuri', 'mita nasipuri')<br/>('1679476', 'Dipak Kumar Basu', 'dipak kumar basu')<br/>('1727663', 'Mahantapas Kundu', 'mahantapas kundu')</td><td>mkb_cse@yahoo.co.in
+<br/>debotosh@indiatimes.com, mitanasipuri@gmail.com, dipakkbasu@gmail.com, mkundu@cse.jdvu.ac.in
+</td></tr><tr><td>6fea198a41d2f6f73e47f056692f365c8e6b04ce</td><td>Video Captioning with Boundary-aware Hierarchical Language
+<br/>Decoding and Joint Video Prediction
+<br/><b>Nanyang Technological University</b><br/><b>Nanyang Technological University</b><br/>Singapore, Singapore
+<br/>Singapore, Singapore
+<br/><b>Nanyang Technological University</b><br/>Singapore, Singapore
+<br/>Shafiq Joty
+<br/><b>Nanyang Technological University</b><br/>Singapore, Singapore
+</td><td>('8668622', 'Xiangxi Shi', 'xiangxi shi')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')<br/>('2174964', 'Jiuxiang Gu', 'jiuxiang gu')</td><td>xxshi@ntu.edu.sg
+<br/>JGU004@e.ntu.edu.sg
+<br/>asjfcai@ntu.edu.sg
+<br/>srjoty@ntu.edu.sg
+</td></tr><tr><td>6fbb179a4ad39790f4558dd32316b9f2818cd106</td><td>Input Aggregated Network for Face Video Representation
+<br/><b>Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China</b><br/><b>Stony Brook University, Stony Brook, USA</b></td><td>('40061483', 'Zhen Dong', 'zhen dong')<br/>('3306427', 'Su Jia', 'su jia')<br/>('1690083', 'Chi Zhang', 'chi zhang')<br/>('35371203', 'Mingtao Pei', 'mingtao pei')</td><td></td></tr><tr><td>6f84e61f33564e5188136474f9570b1652a0606f</td><td>Dual Motion GAN for Future-Flow Embedded Video Prediction
+<br/><b>Carnegie Mellon University</b></td><td>('40250403', 'Xiaodan Liang', 'xiaodan liang')<br/>('3682478', 'Lisa Lee', 'lisa lee')</td><td>{xiaodan1,lslee}@cs.cmu.edu
+</td></tr><tr><td>6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3</td><td>DOI 10.1007/s00530-005-0177-4
+<br/>R E G U L A R PA P E R
+<br/>Learning probabilistic classifiers for human–computer
+<br/>interaction applications
+<br/>Published online: 10 May 2005
+<br/>c(cid:1) Springer-Verlag 2005
+<br/>intelligent
+<br/>interaction,
+</td><td>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>6f3054f182c34ace890a32fdf1656b583fbc7445</td><td>Article
+<br/>Age Estimation Robust to Optical and Motion
+<br/>Blurring by Deep Residual CNN
+<br/><b>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu</b><br/>Received: 9 March 2018; Accepted: 10 April 2018; Published: 13 April 2018
+</td><td>('31515471', 'Jeon Seong Kang', 'jeon seong kang')<br/>('31864414', 'Chan Sik Kim', 'chan sik kim')<br/>('29944844', 'Se Woon Cho', 'se woon cho')<br/>('4634733', 'Kang Ryoung Park', 'kang ryoung park')</td><td>Seoul 100-715, Korea; kjs2605@dgu.edu (J.S.K.); kimchsi9004@naver.com (C.S.K.);
+<br/>lyw941021@dongguk.edu (Y.W.L.); jsu319@naver.com (S.W.C.)
+<br/>* Correspondence: parkgr@dongguk.edu; Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+</td></tr><tr><td>6fa3857faba887ed048a9e355b3b8642c6aab1d8</td><td>Face Recognition in Challenging Environments:
+<br/>An Experimental and Reproducible Research
+<br/>Survey
+</td><td>('2121764', 'Laurent El Shafey', 'laurent el shafey')</td><td></td></tr><tr><td>6fda12c43b53c679629473806c2510d84358478f</td><td>Journal of Academic and Applied Studies
+<br/>Vol. 1(1), June 2011, pp. 29-38
+<br/>A Training Model for Fuzzy Classification
+<br/>System
+<br/>
+<br/><b>Islamic Azad University</b><br/>Iran
+</td><td></td><td>Available online @ www.academians.org
+<br/>Email:a.jamshidnejad@yahoo.com
+</td></tr><tr><td>6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d</td><td>Deep Discriminative Model for Video
+<br/>Classification
+<br/>Center for Machine Vision and Signal Analysis (CMVS)
+<br/><b>University of Oulu, Finland</b></td><td>('2014145', 'Mohammad Tavakolian', 'mohammad tavakolian')<br/>('1751372', 'Abdenour Hadid', 'abdenour hadid')</td><td>firstname.lastname@oulu.fi
+</td></tr><tr><td>6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>937
+<br/>ICASSP 2012
+</td><td></td><td></td></tr><tr><td>6fe2efbcb860767f6bb271edbb48640adbd806c3</td><td>SOFT BIOMETRICS: HUMAN IDENTIFICATION USING COMPARATIVE DESCRIPTIONS
+<br/>Soft Biometrics; Human Identification using
+<br/>Comparative Descriptions
+</td><td>('34386180', 'Daniel A. Reid', 'daniel a. reid')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('2093843', 'Sarah V. Stevenage', 'sarah v. stevenage')</td><td></td></tr><tr><td>6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae</td><td>DAISEE: Dataset for Affective States in
+<br/>E-Learning Environments
+<br/>1 Microsoft India R&D Pvt. Ltd.
+<br/>2 Department of Computer Science, IIT Hyderabad
+</td><td>('50178849', 'Abhay Gupta', 'abhay gupta')<br/>('3468123', 'Richik Jaiswal', 'richik jaiswal')<br/>('3468212', 'Sagar Adhikari', 'sagar adhikari')<br/>('1973980', 'Vineeth Balasubramanian', 'vineeth balasubramanian')</td><td>abhgup@microsoft.com
+<br/>{cs12b1032, cs12b1034, vineethnb}@iith.ac.in
+</td></tr><tr><td>6f5151c7446552fd6a611bf6263f14e729805ec7</td><td>5KHHAO /7 %:0 7
+<br/>)>IJH=?J 9EJDE JDA ?JANJ B=?A ANFHAIIE ?=IIE?=JE KIEC JDA
+<br/>FH>=>EEJEAI JD=J A=?D A B IALAH= ?O ??KHHEC )7 CHKFI EI
+<br/>?=IIIAF=H=>EEJO MAECDJEC
+<br/>/=>H M=LAAJI H FHE?EF= ?FAJI ==OIEI 2+) ! 1 JDEI F=FAH MA
+</td><td></td><td>.=?E= )?JE 7EJ 4A?CEJE KIEC .EJAHA@
+<br/>?= *E=HO 2=JJAH .A=JKHAI MEJD *JIJH=FFA@
+<br/>=@ 9AECDJA@ -++ +=IIEAHI
+<br/>455EJD =@ 69E@A=JJ
+<br/>+AJHA BH 8EIE 5FAA?D =@ 5EC= 2H?AIIEC 7ELAHIEJO B 5KHHAO /KE@BH@
+<br/>4=O@5EJD 69E@A=JJ(IKHHAO=?K
+<br/>B=?E= =?JE ?@EC IOIJA .)+5 MA =@@HAII JDA FH>A B @AJA?J
+<br/>EC B=?E= =?JE KEJI )7I 6DA AJD@ =@FJA@ EI J JH=E = IECA
+<br/>AHHH?HHA?JEC KJFKJ ?@A -++ KJE?=II ?=IIEAH J AIJE=JA JDA
+<br/>FHAIAJ E JDA FH>A E=CA 2=JJ I?=EC EI KIA@ J ?=E>H=JA JDA -++
+<br/>KJFKJI J FH>=>EEJEAI =@ =FFHFHE=JA IKI B JDAIA FH>=>EEJEAI =HA
+<br/>J=A J >J=E = IAF=H=JA FH>=>EEJO BH A=?D )7 E@ELE@K=O .A=JKHA
+<br/>ANJH=?JE EI FAHBHA@ >O CAAH=JEC = =HCA K>AH B ?= >E=HO F=J
+<br/>JAH *2 BA=JKHAI =@ JDA IAA?JEC BH JDAIA KIEC B=IJ ?HHA=JE
+<br/>>=IA@ JAHEC .+*. 6DA >E=I =@ L=HE=?A FHFAHJEAI B JDA ?=IIEAH
+<br/>=HA A=IKHA@ =@ MA IDM JD=J >JD JDAIA IKH?AI B AHHH ?= >A HA
+<br/>@K?A@ >O AD=?EC -++ JDHKCD JDA =FFE?=JE B >JIJH=FFEC =@
+<br/>1JH@K?JE
+<br/>6DA B=?E==?JE ?@EC IOIJA .)+5 B -= =@ .HEAIA   EI ?O
+<br/>AFOA@ E =FFE?=JEI MDE?D FAHBH =KJ=JE? B=?E= ANFHAIIE HA?CEJE
+<br/>1 JDEI AJD@ E@ELE@K= B=?E= LAAJI =HA ?D=H=?JAHEIA@ =I A B "" JOFAI
+<br/>M =I =?JE KEJI )7I /HKFI B )7I =O JDA >A =FFA@ J AJEI
+<br/>KIEC = IJ=@=H@ ?@A > JA DMALAH JD=J )7I =HA J A?AII=HEO E@A
+<br/>FA@AJ =I JDA FHAIA?A B A )7 =O =A?J JDA =FFA=H=?A B =JDAH 6DAO
+<br/>=O =I ??KH =J @EAHAJ EJAIEJEAI =@ =O ??KH  O A IE@A B JDA
+<br/>B=?A 1 JDEI F=FAH MA B?KI  HA?CEIEC IEN )7I BH JDA HACE =HK@ JDA
+<br/>AOAI =I EKIJH=JA@ E .EC 
+<br/>1EJE= HAFHAIAJ=JE AJD@I BH )7 ?=IIE?=JE MAHA >=IA@  A=IKHEC
+<br/>JDA HA=JELA FIEJE B = =HCA K>AH B =@=H FEJI  JDA B=?A   1J
+<br/>D=I >AA BK@ DMALAH JD=J ?F=H=>A H >AJJAH HAIKJI ?= >A >J=EA@ >O
+<br/>J=EC = HA DEIJE? =FFH=?D J BA=JKHA ANJH=?JE KIEC AJD@I IK?D =I
+<br/>?F=HA JM IK?D AJD@I =AO 2+) " =@ ?= >E=HO F=JJAH *2
+</td></tr><tr><td>03c56c176ec6377dddb6a96c7b2e95408db65a7a</td><td>A Novel Geometric Framework on Gram Matrix
+<br/>Trajectories for Human Behavior Understanding
+</td><td>('46243486', 'Anis Kacem', 'anis kacem')<br/>('2909056', 'Mohamed Daoudi', 'mohamed daoudi')<br/>('2125606', 'Boulbaba Ben Amor', 'boulbaba ben amor')<br/>('2507859', 'Stefano Berretti', 'stefano berretti')</td><td></td></tr><tr><td>03d9ccce3e1b4d42d234dba1856a9e1b28977640</td><td></td><td></td><td></td></tr><tr><td>0322e69172f54b95ae6a90eb3af91d3daa5e36ea</td><td>Face Classification using Adjusted Histogram in
+<br/>Grayscale
+</td><td></td><td></td></tr><tr><td>036c41d67b49e5b0a578a401eb31e5f46b3624e0</td><td>The Tower Game Dataset: A Multimodal Dataset
+<br/>for Analyzing Social Interaction Predicates
+<br/>∗ SRI International
+<br/><b>University of California, Santa Cruz</b><br/><b>University of California, Berkeley</b></td><td>('1955011', 'David A. Salter', 'david a. salter')<br/>('1860011', 'Amir Tamrakar', 'amir tamrakar')<br/>('1832513', 'Behjat Siddiquie', 'behjat siddiquie')<br/>('4599641', 'Mohamed R. Amer', 'mohamed r. amer')<br/>('1696401', 'Ajay Divakaran', 'ajay divakaran')<br/>('40530418', 'Brian Lande', 'brian lande')<br/>('2108704', 'Darius Mehri', 'darius mehri')</td><td>Email: {david.salter, amir.tamrakar, behjat.siddiquie, mohamed.amer, ajay.divakaran}@sri.com
+<br/>Email: brianlande@soe.ucsc.edu
+<br/>Email: darius mehri@berkeley.edu
+</td></tr><tr><td>03b03f5a301b2ff88ab3bb4969f54fd9a35c7271</td><td>Multi-kernel learning of deep convolutional features for action recognition
+<br/><b>Imperial College London</b><br/>Noah’s Ark Lab (Huawei Technologies UK)
+<br/>Cortexica Vision Systems Limited
+</td><td>('39599054', 'Biswa Sengupta', 'biswa sengupta')<br/>('29742002', 'Yu Qian', 'yu qian')</td><td>b.sengupta@imperial.ac.uk
+</td></tr><tr><td>03f7041515d8a6dcb9170763d4f6debd50202c2b</td><td>Clustering Millions of Faces by Identity
+</td><td>('40653304', 'Charles Otto', 'charles otto')<br/>('7496032', 'Dayong Wang', 'dayong wang')<br/>('40217643', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>03ce2ff688f9b588b6f264ca79c6857f0d80ceae</td><td>Attention Clusters: Purely Attention Based
+<br/>Local Feature Integration for Video Classification
+<br/><b>Tsinghua University, 2Rutgers University, 3Massachusetts Institute of Technology, 4Baidu IDL</b></td><td>('1716690', 'Xiang Long', 'xiang long')<br/>('2551285', 'Chuang Gan', 'chuang gan')<br/>('1732213', 'Gerard de Melo', 'gerard de melo')<br/>('3045089', 'Jiajun Wu', 'jiajun wu')<br/>('48033101', 'Xiao Liu', 'xiao liu')<br/>('35247507', 'Shilei Wen', 'shilei wen')</td><td></td></tr><tr><td>03b99f5abe0e977ff4c902412c5cb832977cf18e</td><td>CROWLEY AND ZISSERMAN: OF GODS AND GOATS
+<br/>Of Gods and Goats: Weakly Supervised
+<br/>Learning of Figurative Art
+<br/>Elliot J. Crowley
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b></td><td>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>elliot@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>038ce930a02d38fb30d15aac654ec95640fe5cb0</td><td>Approximate Structured Output Learning for Constrained Local
+<br/>Models with Application to Real-time Facial Feature Detection and
+<br/>Tracking on Low-power Devices
+</td><td>('40474289', 'Shuai Zheng', 'shuai zheng')<br/>('3274976', 'Paul Sturgess', 'paul sturgess')<br/>('1730268', 'Philip H. S. Torr', 'philip h. s. torr')</td><td></td></tr><tr><td>03167776e17bde31b50f294403f97ee068515578</td><td>Chapter 11. Facial Expression Analysis
+<br/><b>University of Pittsburgh, Pittsburgh, PA 15260, USA</b><br/>1 Principles of Facial Expression Analysis
+<br/>1.1 What Is Facial Expression Analysis?
+<br/>Facial expressions are the facial changes in response to a person’s internal emotional states,
+<br/>intentions, or social communications. Facial expression analysis has been an active research
+<br/>topic for behavioral scientists since the work of Darwin in 1872 [18, 22, 25, 71]. Suwa et
+<br/>al. [76] presented an early attempt to automatically analyze facial expressions by tracking the
+<br/>motion of 20 identified spots on an image sequence in 1978. After that, much progress has
+<br/>been made to build computer systems to help us understand and use this natural form of human
+<br/>communication [6, 7, 17, 20, 28, 39, 51, 55, 65, 78, 81, 92, 93, 94, 96].
+<br/>In this chapter, facial expression analysis refers to computer systems that attempt to auto-
+<br/>matically analyze and recognize facial motions and facial feature changes from visual informa-
+<br/>tion. Sometimes the facial expression analysis has been confused with emotion analysis in the
+<br/>computer vision domain. For emotion analysis, higher level knowledge is required. For exam-
+<br/>ple, although facial expressions can convey emotion, they can also express intention, cognitive
+<br/>processes, physical effort, or other intra- or interpersonal meanings. Interpretation is aided by
+<br/>context, body gesture, voice, individual differences, and cultural factors as well as by facial
+<br/>configuration and timing [10, 67, 68]. Computer facial expression analysis systems need to
+<br/>analyze the facial actions regardless of context, culture, gender, and so on.
+<br/>The accomplishments in the related areas such as psychological studies, human movement
+<br/>analysis, face detection, face tracking, and recognition make the automatic facial expression
+<br/>analysis possible. Automatic facial expression analysis can be applied in many areas such as
+<br/>emotion and paralinguistic communication, clinical psychology, psychiatry, neurology, pain
+<br/>assessment, lie detection, intelligent environments, and multimodal human computer interface
+<br/>(HCI).
+<br/>1.2 Basic Structure of Facial Expression Analysis Systems
+<br/>Facial expression analysis includes both measurement of facial motion and recognition of ex-
+<br/>pression. The general approach to automatic facial expression analysis (AFEA) consists of
+</td><td>('40383812', 'Ying-Li Tian', 'ying-li tian')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>1 IBM T. J. Watson Research Center, Hawthorne, NY 10532, USA. yltian@us.ibm.com
+<br/>2 Robotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, USA. tk@cs.cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>0334a8862634988cc684dacd4279c5c0d03704da</td><td>FaceNet2ExpNet: Regularizing a Deep Face Recognition Net for
+<br/>Expression Recognition
+<br/><b>University of Maryland, College Park</b><br/>2 Siemens Healthcare Technology Center, Princeton, New Jersey
+</td><td>('1700765', 'Hui Ding', 'hui ding')<br/>('1682187', 'Shaohua Kevin Zhou', 'shaohua kevin zhou')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>03c1fc9c3339813ed81ad0de540132f9f695a0f8</td><td>Proceedings of Machine Learning Research 81:1–15, 2018
+<br/>Conference on Fairness, Accountability, and Transparency
+<br/>Gender Shades: Intersectional Accuracy Disparities in
+<br/>Commercial Gender Classification∗
+<br/>MIT Media Lab 75 Amherst St. Cambridge, MA 02139
+<br/>Microsoft Research 641 Avenue of the Americas, New York, NY 10011
+<br/>Editors: Sorelle A. Friedler and Christo Wilson
+</td><td>('38222513', 'Joy Buolamwini', 'joy buolamwini')<br/>('2076288', 'Timnit Gebru', 'timnit gebru')</td><td>joyab@mit.edu
+<br/>timnit.gebru@microsoft.com
+</td></tr><tr><td>0339459a5b5439d38acd9c40a0c5fea178ba52fb</td><td>D|C|I&I 2009 Prague
+<br/>Multimodal recognition of emotions in car
+<br/>environments
+</td><td></td><td></td></tr><tr><td>030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f</td><td>11936 • The Journal of Neuroscience, August 26, 2015 • 35(34):11936 –11945
+<br/>Behavioral/Cognitive
+<br/>Inhibition-Induced Forgetting Results from Resource
+<br/>Competition between Response Inhibition and Memory
+<br/>Encoding Processes
+<br/><b>Center for Cognitive Neuroscience, Duke University, Durham, North Carolina</b><br/>Response inhibition is a key component of executive control, but its relation to other cognitive processes is not well understood. We
+<br/>recently documented the “inhibition-induced forgetting effect”: no-go cues are remembered more poorly than go cues. We attributed this
+<br/>effect to central-resource competition, whereby response inhibition saps attention away from memory encoding. However, this proposal
+<br/>is difficult to test with behavioral means alone. We therefore used fMRI in humans to test two neural predictions of the “common resource
+<br/>hypothesis”: (1) brain regions associated with response inhibition should exhibit greater resource demands during encoding of subse-
+<br/>quently forgotten than remembered no-go cues; and (2) this higher inhibitory resource demand should lead to memory encoding regions
+<br/>having less resources available during encoding of subsequently forgotten no-go cues. Participants categorized face stimuli by gender in
+<br/>a go/no-go task and, following a delay, performed a surprise recognition memory test for those faces. Replicating previous findings,
+<br/>memory was worse for no-go than for go stimuli. Crucially, forgetting of no-go cues was predicted by high inhibitory resource demand, as
+<br/>quantified by the trial-by-trial ratio of activity in neural “no-go” versus “go” networks. Moreover, this index of inhibitory demand
+<br/>exhibited an inverse trial-by-trial relationship with activity in brain regions responsible for the encoding of no-go cues into memory,
+<br/>notably the ventrolateral prefrontal cortex. This seesaw pattern between the neural resource demand of response inhibition and activity
+<br/>related to memory encoding directly supports the hypothesis that response inhibition temporarily saps attentional resources away from
+<br/>stimulus processing.
+<br/>Key words: attention; cognitive control; memory; response inhibition
+<br/>Significance Statement
+<br/>Recent behavioral experiments showed that inhibiting a motor response to a stimulus (a “no-go cue”) impairs subsequent
+<br/>memory for that cue. Here, we used fMRI to test whether this “inhibition-induced forgetting effect” is caused by competition for
+<br/>neural resources between the processes of response inhibition and memory encoding. We found that trial-by-trial variations in
+<br/>neural inhibitory resource demand predicted subsequent forgetting of no-go cues and that higher inhibitory demand was further-
+<br/>more associated with lower concurrent activation in brain regions responsible for successful memory encoding of no-go cues.
+<br/>Thus, motor inhibition and stimulus encoding appear to compete with each other: when more resources have to be devoted to
+<br/>inhibiting action, less are available for encoding sensory stimuli.
+<br/>Introduction
+<br/>Response inhibition, the ability to preempt or cancel goal-
+<br/>inappropriate actions, is considered a core cognitive control
+<br/>Received Feb. 6, 2015; revised July 22, 2015; accepted July 24, 2015.
+<br/>Author contributions: Y.-C.C. and T.E. designed research; Y.-C.C. performed research; Y.-C.C. analyzed data;
+<br/>Y.-C.C. and T.E. wrote the paper.
+<br/><b>This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E</b><br/>The authors declare no competing financial interests.
+<br/>DOI:10.1523/JNEUROSCI.0519-15.2015
+<br/>Copyright © 2015 the authors
+<br/>0270-6474/15/3511936-10$15.00/0
+<br/>function (Logan and Cowan, 1984; Aron, 2007), an impairment
+<br/>that contributes to impulsive symptoms of multiple psychiatric
+<br/>diseases,
+<br/>including obsessive-compulsive disorder, substance
+<br/>abuse, and attention-deficit/hyperactivity disorder (Horn et al.,
+<br/>2003; de Wit, 2009). However, the relation of response inhibition
+<br/>to other cognitive control functions, and to traditional cognitive
+<br/>domains, such as perception, memory, and attention, remains
+<br/>poorly understood (Jurado and Rosselli, 2007; Miyake and Fried-
+<br/>man, 2012).
+<br/>A recent behavioral study has shed new light on this issue by
+<br/>documenting an “inhibition-induced forgetting” effect, whereby
+<br/>inhibiting responses to no-go or stop cues impaired subsequent
+</td><td>('2846298', 'Yu-Chin Chiu', 'yu-chin chiu')<br/>('1900710', 'Tobias Egner', 'tobias egner')<br/>('2846298', 'Yu-Chin Chiu', 'yu-chin chiu')</td><td>LSRC, Box 90999, Durham, NC 27708. E-mail: chiu.yuchin@duke.edu.
+</td></tr><tr><td>03f98c175b4230960ac347b1100fbfc10c100d0c</td><td>Supervised Descent Method and its Applications to Face Alignment
+<br/><b>The Robotics Institute, Carnegie Mellon University, Pittsburgh PA</b></td><td>('3182065', 'Xuehan Xiong', 'xuehan xiong')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td>xxiong@andrew.cmu.edu
+<br/>ftorre@cs.cmu.edu
+</td></tr><tr><td>032825000c03b8ab4c207e1af4daeb1f225eb025</td><td>J. Appl. Environ. Biol. Sci., 7(10)159-164, 2017
+<br/>ISSN: 2090-4274
+<br/>© 2017, TextRoad Publication
+<br/>Journal of Applied Environmental
+<br/>and Biological Sciences
+<br/>www.textroad.com
+<br/>A Novel Approach for Human Face Detection in Color Images Using Skin
+<br/>Color and Golden Ratio
+<br/><b>Bacha Khan University, Charsadda, KPK, Pakistan</b><br/><b>Abdul WaliKhan University, Mardan, KPK, Pakistan</b><br/>Received: May 9, 2017
+<br/>Accepted: August 2, 2017
+</td><td>('12144785', 'Faizan Ullah', 'faizan ullah')<br/>('49669073', 'Dilawar Shah', 'dilawar shah')<br/>('46463663', 'Sabir Shah', 'sabir shah')<br/>('47160013', 'Abdus Salam', 'abdus salam')<br/>('12579194', 'Shujaat Ali', 'shujaat ali')</td><td></td></tr><tr><td>03264e2e2709d06059dd79582a5cc791cbef94b1</td><td>Convolutional Neural Networks for Facial Attribute-based Active Authentication
+<br/>On Mobile Devices
+<br/><b>University of Maryland, College Park</b><br/><b>University of Maryland, College Park</b><br/>MD, USA
+<br/>MD, USA
+</td><td>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('3383048', 'Pouya Samangouei', 'pouya samangouei')</td><td>pouya@umiacs.umd.org
+<br/>rama@umiacs.umd.edu
+</td></tr><tr><td>03a8f53058127798bc2bc0245d21e78354f6c93b</td><td>Max-Margin Additive Classifiers for Detection
+<br/>Sam Hare
+<br/>VGG Reading Group
+<br/>October 30, 2009
+</td><td>('35208858', 'Subhransu Maji', 'subhransu maji')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')</td><td></td></tr><tr><td>03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20</td><td>A Real Time System for Model-based Interpretation of
+<br/>the Dynamics of Facial Expressions
+<br/>Technische Universit¨at M¨unchen
+<br/>Boltzmannstr. 3, 85748 Garching
+<br/>1. Motivation
+<br/>Recent progress in the field of Computer Vision allows
+<br/>intuitive interaction via speech, gesture or facial expressions
+<br/>between humans and technical systems.Model-based tech-
+<br/>niques facilitate accurately interpreting images with faces
+<br/>by exploiting a priori knowledge, such as shape and texture
+<br/>information. This renders them an inevitable component
+<br/>to realize the paradigm of intuitive human-machine interac-
+<br/>tion.
+<br/>Our demonstration shows model-based recognition of
+<br/>facial expressions in real-time via the state-of-the-art
+<br/>Candide-3 face model [1] as visible in Figure 1. This three-
+<br/>dimensional and deformable model is highly appropriate
+<br/>for real-world face interpretation applications. However,
+<br/>its complexity challenges the task of model fitting and we
+<br/>tackle this challenge with an algorithm that has been auto-
+<br/>matically learned from a large set of images. This solution
+<br/>provides both, high accuracy and runtime. Note, that our
+<br/>system is not limited to facial expression estimation. Gaze
+<br/>direction, gender and age are also estimated.
+<br/>2. Face Model Fitting
+<br/>Models reduce the large amount of image data to a
+<br/>small number of model parameters to describe the im-
+<br/>age content, which facilitates and accelerates the subse-
+<br/>quent interpretation task. Cootes et al. [3] introduced mod-
+<br/>elling shapes with Active Contours. Further enhancements
+<br/>emerged the idea of expanding shape models with texture
+<br/>information [2]. Recent research considers modelling faces
+<br/>in 3D space [1, 10].
+<br/>Fitting the face model is the computational challenge of
+<br/>finding the parameters that best describe the face within a
+<br/>given image. This task is often addressed by minimizing
+<br/>an objective function, such as the pixel error between the
+<br/>model’s rendered surface and the underlying image content.
+<br/>This section describes the four main components of model-
+<br/>based techniques, see [9].
+<br/>The face model contains a parameter vector p that repre-
+<br/>sents its configurations. We integrate the complex and de-
+<br/>formable 3D wire frame Candide-3 face model [1]. The
+<br/>model consists of 116 anatomical landmarks and its param-
+<br/>eter vector p = (rx, ry, rz, s, tx, ty, σ, α)T describes the
+<br/>affine transformation (rx, ry, rz, s, tx, ty) and the deforma-
+<br/>tion (σ, α). The 79 deformation parameters indicate the
+<br/>shape of facial components such as the mouth, the eyes, or
+<br/>the eye brows, etc., see Figure 2.
+<br/>The localization algorithm computes an initial estimate of
+<br/>the model parameters that is further refined by the subse-
+<br/>quent fitting algorithm. Our system integrates the approach
+<br/>of [8], which detects the model’s affine transformation in
+<br/>case the image shows a frontal view face.
+<br/>The objective function yields a comparable value that
+<br/>specifies how accurately a parameterized model matches an
+<br/>image. Traditional approaches manually specify the objec-
+<br/>tive function in a laborious and erroneous task. In contrast,
+<br/>we automatically learn the objective function from a large
+<br/>set of training data based on objective information theoretic
+<br/>measures [9]. This approach does not require expert knowl-
+<br/>edge and it is domain-independently applicable. As a re-
+<br/>sult, this approach yields more robust and accurate objective
+<br/>functions, which greatly facilitate the task of the associated
+<br/>fitting algorithms. Accurately estimated model parameters
+<br/>in turn are required to infer correct high-level information,
+<br/>such as facial expression or gaze direction.
+<br/>Figure 1. Interpreting expressions with the Candide-3 face model.
+</td><td>('1685773', 'Christoph Mayer', 'christoph mayer')<br/>('32131501', 'Matthias Wimmer', 'matthias wimmer')<br/>('1704997', 'Freek Stulp', 'freek stulp')<br/>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('36401753', 'Anton Roth', 'anton roth')<br/>('34667371', 'Martin Eggers', 'martin eggers')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td>{mayerc,wimmerm,stulp,riaz,roth,eggers,radig}@in.tum.de
+</td></tr><tr><td>03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b</td><td>Acume: A Novel Visualization Tool for Understanding Facial
+<br/>Expression and Gesture Data
+</td><td></td><td></td></tr><tr><td>03adcf58d947a412f3904a79f2ab51cfdf0e838a</td><td>World Journal of Science and Technology 2012, 2(4):136-139
+<br/>ISSN: 2231 – 2587
+<br/>Available Online: www.worldjournalofscience.com
+<br/>_________________________________________________________________
+<br/> Proceedings of "Conference on Advances in Communication and Computing (NCACC'12)”
+<br/><b>Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India</b><br/> April 21, 2012
+<br/>Video-based face recognition: a survey
+<br/><b>R.C.Patel Institute of Technology, Shirpur, Dist.Dhule.Maharashtra, India</b></td><td>('40628915', 'Shailaja A Patil', 'shailaja a patil')<br/>('30751046', 'Pramod J Deore', 'pramod j deore')</td><td></td></tr><tr><td>03104f9e0586e43611f648af1132064cadc5cc07</td><td></td><td></td><td></td></tr><tr><td>03f14159718cb495ca50786f278f8518c0d8c8c9</td><td>2015 IEEE International Conference on Control System, Computing and Engineering, Nov 27 – Nov 29, 2015 Penang, Malaysia
+<br/>2015 IEEE International Conference on Control System,
+<br/>Computing and Engineering (ICCSCE2015)
+<br/>Technical Session 1A – DAY 1 – 27th Nov 2015
+<br/>Time: 3.00 pm – 4.30 pm
+<br/>Venue: Jintan
+<br/>Topic: Signal and Image Processing
+<br/>3.00 pm – 3.15pm
+<br/>3.15 pm – 3.30pm
+<br/>3.30 pm – 3.45pm
+<br/>3.45 pm – 4.00pm
+<br/>4.00 pm – 4.15pm
+<br/>4.15 pm – 4.30pm
+<br/>4.30 pm – 4.45pm
+<br/>1A 01 ID3
+<br/>Can Subspace Based Learning Approach Perform on Makeup Face
+<br/>Recognition?
+<br/>Khor Ean Yee, Pang Ying Han, Ooi Shih Yin and Wee Kuok Kwee
+<br/>1A 02 ID35
+<br/>Performance Evaluation of HOG and Gabor Features for Vision-based
+<br/>Vehicle Detection
+<br/>1A 03 ID23
+<br/>Experimental Method to Pre-Process Fuzzy Bit Planes before Low-Level
+<br/>Feature Extraction in Thermal Images
+<br/>Chan Wai Ti and Sim Kok Swee
+<br/>1A 04 ID84
+<br/>Fractal-based Texture and HSV Color Features for Fabric Image Retrieval
+<br/>Nanik Suciati, Darlis Herumurti and Arya Yudhi Wijaya
+<br/>1A 05 ID168
+<br/>Study of Automatic Melody Extraction Methods for Philippine Indigenous
+<br/>Music
+<br/>Jason Disuanco, Vanessa Tan, Franz de Leon
+<br/>1A 06 ID211
+<br/>Acoustical Comparison between Voiced and Voiceless Arabic Phonemes of
+<br/>Malay
+<br/>Speakers
+<br/>Ali Abd Almisreb, Ahmad Farid Abidin, Nooritawati Md Tahir
+<br/>*shaded cell is the proposed session chair
+<br/>viii
+<br/>©Faculty of Electrical Engineering, Universiti Teknologi MARA
+</td><td>('2715116', 'Soo Siang Teoh', 'soo siang teoh')</td><td>Tea Break @ Foyer
+</td></tr><tr><td>0394040749195937e535af4dda134206aa830258</td><td>Geodesic Entropic Graphs for Dimension and
+<br/>Entropy Estimation in Manifold Learning
+<br/>December 16, 2003
+</td><td>('1759109', 'Jose A. Costa', 'jose a. costa')<br/>('1699402', 'Alfred O. Hero', 'alfred o. hero')</td><td></td></tr><tr><td>0334cc0374d9ead3dc69db4816d08c917316c6c4</td><td></td><td></td><td></td></tr><tr><td>03c48d8376990cff9f541d542ef834728a2fcda2</td><td>Temporal Action Localization in Untrimmed Videos via Multi-stage CNNs
+<br/><b>Columbia University</b><br/>New York, NY, USA
+</td><td>('2195345', 'Zheng Shou', 'zheng shou')<br/>('2704179', 'Dongang Wang', 'dongang wang')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{zs2262,dw2648,sc250}@columbia.edu
+</td></tr><tr><td>0319332ded894bf1afe43f174f5aa405b49305f0</td><td>Shearlet Network-based Sparse Coding Augmented by
+<br/>Facial Texture Features for Face Recognition
+<br/>Ben Amar1
+<br/><b>Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia</b><br/><b>University of Houston, Houston, TX 77204, USA</b></td><td>('2791150', 'Mohamed Anouar Borgi', 'mohamed anouar borgi')<br/>('8847309', 'Demetrio Labate', 'demetrio labate')</td><td>{anoir.borgi@ieee.org ; dlabate@math.uh.edu ;
+<br/>maher.elarbi@gmail.com; chokri.benamar@ieee.org}
+</td></tr><tr><td>03ac1c694bc84a27621da6bfe73ea9f7210c6d45</td><td>Chapter 1
+<br/>Introduction to information security
+<br/>foundations and applications
+<br/>1.1 Background
+<br/>Information security has extended to include several research directions like user
+<br/>authentication and authorization, network security, hardware security, software secu-
+<br/>rity, and data cryptography. Information security has become a crucial need for
+<br/>protecting almost all information transaction applications. Security is considered as
+<br/>an important science discipline whose many multifaceted complexities deserve the
+<br/>synergy of the computer science and engineering communities.
+<br/>Recently, due to the proliferation of Information and Communication Tech-
+<br/>nologies, information security has started to cover emerging topics such as cloud
+<br/>computing security, smart cities’ security and privacy, healthcare and telemedicine,
+<br/>the Internet-of-Things (IoT) security [1], the Internet-of-Vehicles security, and sev-
+<br/>eral types of wireless sensor networks security [2,3]. In addition, information security
+<br/>has extended further to cover not only technical security problems but also social and
+<br/>organizational security challenges [4,5].
+<br/>Traditional systems’ development approaches were focusing on the system’s
+<br/>usability where security was left to the last stage with less priority. However, the
+<br/>new design approaches consider security-in-design process where security is consid-
+<br/>ered at the early phase of the design process. The new designed systems should be
+<br/>well protected against the available security attacks. Having new systems such as IoT
+<br/>or healthcare without enough security may lead to a leakage of sensitive data and, in
+<br/>some cases, life threatening situations.
+<br/>Taking the social aspect into account, security education is a vital need for both
+<br/>practitioners and system users [6]. Users’ misbehaviour due to a lack of security
+<br/>knowledge is the weakest point in the system security chain. The users’ misbehaviour
+<br/>is considered as a security vulnerability that may be exploited for launching security
+<br/>attacks. A successful security attack such as distributed denial-of-service attack will
+<br/>impose incident recovery cost in addition to the downtime cost.
+<br/><b>Electrical and Space Engineering, Lule University of Technology</b><br/>Sweden
+<br/><b>Faculty of Engineering, Al Azhar University, Qena, Egypt</b></td><td>('4073409', 'Ali Ismail Awad', 'ali ismail awad')</td><td></td></tr><tr><td>03baf00a3d00887dd7c828c333d4a29f3aacd5f5</td><td>Entropy Based Feature Selection for 3D Facial
+<br/>Expression Recognition
+<br/>Submitted to the
+<br/><b>Institute of Graduate Studies and Research</b><br/>in partial fulfillment of the requirements for the Degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/> Electrical and Electronic Engineering
+<br/><b>Eastern Mediterranean University</b><br/> September 2014
+<br/>Gazimağusa, North Cyprus
+</td><td>('1974278', 'Kamil Yurtkan', 'kamil yurtkan')</td><td></td></tr><tr><td>0359f7357ea8191206b9da45298902de9f054c92</td><td>Going Deeper in Facial Expression Recognition using Deep Neural Networks
+<br/>1 Department of Electrical and Computer Engineering
+<br/>2 Department of Computer Science
+<br/><b>University of Denver, Denver, CO</b></td><td>('2314025', 'Ali Mollahosseini', 'ali mollahosseini')<br/>('38461715', 'David Chan', 'david chan')<br/>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')</td><td>ali.mollahosseini@du.edu, davidchan@cs.du.edu, and mmahoor@du.edu ∗ †
+</td></tr><tr><td>0394e684bd0a94fc2ff09d2baef8059c2652ffb0</td><td>Median Robust Extended Local Binary Pattern
+<br/>for Texture Classification
+<br/>Index Terms— Texture descriptors, rotation invariance, local
+<br/>binary pattern (LBP), feature extraction, texture analysis.
+<br/>how the texture recognition process works in humans as
+<br/>well as in the important role it plays in the wide variety of
+<br/>applications of computer vision and image analysis [1], [2].
+<br/>The many applications of texture classification include medical
+<br/>image analysis and understanding, object recognition, biomet-
+<br/>rics, content-based image retrieval, remote sensing, industrial
+<br/>inspection, and document classification.
+<br/>As a classical pattern recognition problem, texture classifi-
+<br/>cation primarily consists of two critical subproblems: feature
+<br/>extraction and classifier designation [1], [2]. It is generally
+<br/>agreed that the extraction of powerful texture features plays a
+<br/>relatively more important role, since if poor features are used
+<br/>even the best classifier will fail to achieve good recognition
+<br/>results. Consequently, most research in texture classification
+<br/>focuses on the feature extraction part and numerous texture
+<br/>feature extraction methods have been developed, with excellent
+<br/>surveys given in [1]–[5]. Most existing methods have not,
+<br/>however, been capable of performing sufficiently well for
+<br/>real-world applications, which have demanding requirements
+<br/>including database size, nonideal environmental conditions,
+<br/>and running in real-time.
+</td><td>('39695518', 'Li Liu', 'li liu')<br/>('1716428', 'Songyang Lao', 'songyang lao')<br/>('1731709', 'Paul W. Fieguth', 'paul w. fieguth')<br/>('1714724', 'Matti Pietikäinen', 'matti pietikäinen')</td><td></td></tr><tr><td>03e88bf3c5ddd44ebf0e580d4bd63072566613ad</td><td></td><td></td><td></td></tr><tr><td>03f4c0fe190e5e451d51310bca61c704b39dcac8</td><td>J Ambient Intell Human Comput
+<br/>DOI 10.1007/s12652-016-0406-z
+<br/>O R I G I N A L R E S E A R C H
+<br/>CHEAVD: a Chinese natural emotional audio–visual database
+<br/>Received: 30 March 2016 / Accepted: 22 August 2016
+<br/>Ó Springer-Verlag Berlin Heidelberg 2016
+</td><td>('1704841', 'Ya Li', 'ya li')<br/>('37670752', 'Jianhua Tao', 'jianhua tao')<br/>('1850313', 'Linlin Chao', 'linlin chao')<br/>('1694779', 'Wei Bao', 'wei bao')<br/>('3095820', 'Yazhu Liu', 'yazhu liu')</td><td></td></tr><tr><td>03bd58a96f635059d4bf1a3c0755213a51478f12</td><td>Smoothed Low Rank and Sparse Matrix Recovery by
+<br/>Iteratively Reweighted Least Squares Minimization
+<br/>This work presents a general framework for solving the low
+<br/>rank and/or sparse matrix minimization problems, which may
+<br/>involve multiple non-smooth terms. The Iteratively Reweighted
+<br/>Least Squares (IRLS) method is a fast solver, which smooths the
+<br/>objective function and minimizes it by alternately updating the
+<br/>variables and their weights. However, the traditional IRLS can
+<br/>only solve a sparse only or low rank only minimization problem
+<br/>with squared loss or an affine constraint. This work generalizes
+<br/>IRLS to solve joint/mixed low rank and sparse minimization
+<br/>problems, which are essential formulations for many tasks. As a
+<br/>concrete example, we solve the Schatten-p norm and (cid:96)2,q-norm
+<br/>regularized Low-Rank Representation (LRR) problem by IRLS,
+<br/>and theoretically prove that the derived solution is a stationary
+<br/>point (globally optimal if p, q ≥ 1). Our convergence proof of
+<br/>IRLS is more general than previous one which depends on
+<br/>the special properties of the Schatten-p norm and (cid:96)2,q-norm.
+<br/>Extensive experiments on both synthetic and real data sets
+<br/>demonstrate that our IRLS is much more efficient.
+<br/>Index Terms—Low-rank and sparse minimization, Iteratively
+<br/>Reweighted Least Squares.
+<br/>I. INTRODUCTION
+<br/>I N recent years, the low rank and sparse matrix learning
+<br/>problems have been hot research topics and lead to broad
+<br/>applications in computer vision and machine learning, such
+<br/>as face recognition [1], collaborative filtering [2], background
+<br/>modeling [3], and subspace segmentation [4], [5]. The (cid:96)1-
+<br/>norm and nuclear norm are popular choices for sparse and
+<br/>low rank matrix minimizations with theoretical guarantees
+<br/>and competitive performance in practice. The models can be
+<br/>formulated as a joint low rank and sparse matrix minimization
+<br/>problem as follow:
+<br/>T(cid:88)
+<br/>nuclear norm ||M||∗ = (cid:80)
+<br/>min
+<br/>i=1
+<br/>where x and bi can be either vectors or matrices, Fi is a
+<br/>convex function (the Frobenius norm ||M||2
+<br/>ij;
+<br/>ij M 2
+<br/>i σi(M ), the sum of all singular
+<br/>F = (cid:80)
+<br/>Fi(Ai(x) + bi),
+<br/>(1)
+<br/>Copyright (c) 2014 IEEE. Personal use of this material
+<br/>is permitted.
+<br/>However, permission to use this material for any other purposes must be
+<br/>This research is supported by the Singapore National Research Foundation
+<br/>administered by the IDM Programme Office. Z. Lin is supported by NSF
+<br/>China (grant nos. 61272341 and 61231002), 973 Program of China (grant no.
+<br/>2015CB3525) and MSRA Collaborative Research Program.
+<br/>C. Lu and S. Yan are with the Department of Electrical and Com-
+<br/><b>puter Engineering, National University of Singapore, Singapore (e-mails</b><br/>Z. Lin is with the Key Laboratory of Machine Perception (MOE), School
+<br/>values of a matrix; (cid:96)1-norm ||M||1 = (cid:80)
+<br/>norm ||M||2,1 =(cid:80)
+<br/>= (cid:80)
+<br/>ij |Mij|; and (cid:96)2,1-
+<br/>j ||Mj||2, the sum of the (cid:96)2-norm of each
+<br/>column of a matrix) and Ai : Rd → Rm is a linear mapping.
+<br/>In this work, we further consider the nonconvex Schatten-p
+<br/>norm ||M||p
+<br/>ij |Mij|p
+<br/>and (cid:96)2,p-norm ||M||p
+<br/>j ||Mj||p
+<br/>2 with 0 < p < 1 for
+<br/>pursuing lower rank or sparser solutions.
+<br/>i σp(M ), (cid:96)p-norm ||M||p
+<br/>2,p = (cid:80)
+<br/>p = (cid:80)
+<br/>Sp
+<br/>Problem (1) is general which involves a wide range of
+<br/>problems, such as Lasso [6], group Lasso [7], trace Lasso [4],
+<br/>matrix completion [8], Robust Principle Component Analysis
+<br/>(RPCA) [3] and Low-Rank Representation (LRR) [5]. In this
+<br/>work, we aim to propose a general solver for (1). For the ease
+<br/>of discussion, we focus on the following two representative
+<br/>problems,
+<br/>RPCA:
+<br/>s.t. X = Z + E,
+<br/>(2)
+<br/>||Z||∗ + λ||E||1,
+<br/>min
+<br/>Z,E
+<br/>||Z||∗ + λ||E||2,1,
+<br/>min
+<br/>Z,E
+<br/>s.t. X = XZ + E,
+<br/>LRR:
+<br/>(3)
+<br/>where X ∈ Rd×n is a given data matrix, Z and E are with
+<br/>compatible dimensions and λ > 0 is the model parameter. No-
+<br/>tice that these problems can be reformulated as unconstrained
+<br/>problems (by representing E by Z) as that in problem (1).
+<br/>A. Related Works
+<br/>The sparse and low rank minimization problems can be
+<br/>solved by various methods, such as Semi-Definite Program-
+<br/>ming (SDP) [9], Accelerated Proximal Gradient (APG) [10],
+<br/>and Alternating Direction Method (ADM) [11]. However, SDP
+<br/>has a complexity of O(n6) for an n × n sized matrix, which
+<br/>is unbearable for large scale applications. APG requires that
+<br/>at
+<br/>least one term of the objective function has Lipschitz
+<br/>continuous gradient. Such an assumption is violated in many
+<br/>problems, e.g., problem (2) and (3). Compared with SDP
+<br/>and APG, ADM is the most widely used one. But it usually
+<br/>requires introducing several auxiliary variables corresponding
+<br/>to non-smooth terms. The auxiliary variables may slow down
+<br/>the convergence, or even lead to divergence when there are
+<br/>too many variables. Linearized ADM (LADM) [12] may
+<br/>reduce the number of auxiliary variables, but suffer the same
+<br/>convergence issue. The work [12] proposes an accelerated
+<br/>LADM with Adaptive Penalty (LADMAP) with lower per-
+<br/>iteration cost. However, the accelerating trick is special for the
+<br/>LRR problem. And thus are not general for other problems.
+<br/>Another drawback for many low rank minimization solvers is
+<br/>that they have to perform the soft singular value thresholding:
+<br/>λ||Z||∗ +
+<br/>||Z − Y ||2
+<br/>F ,
+<br/>min
+<br/>(4)
+</td><td>('33224509', 'Canyi Lu', 'canyi lu')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>obtained from the IEEE by sending a request to pubs-permissions@ieee.org.
+<br/>under its International Research Centre @Singapore Funding Initiative and
+<br/>canyilu@gmail.com; eleyans@nus.edu.sg).
+<br/>of EECS, Peking University, China (e-mail: zlin@pku.edu.cn).
+</td></tr><tr><td>031055c241b92d66b6984643eb9e05fd605f24e2</td><td>Multi-fold MIL Training for Weakly Supervised Object Localization
+<br/>Inria∗
+</td><td>('1939006', 'Ramazan Gokberk Cinbis', 'ramazan gokberk cinbis')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1009-7
+<br/>Large Scale 3D Morphable Models
+<br/>Received: 15 March 2016 / Accepted: 24 March 2017
+<br/>© The Author(s) 2017. This article is an open access publication
+</td><td>('1848903', 'James Booth', 'james booth')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('2931390', 'Anastasios Roussos', 'anastasios roussos')<br/>('5137183', 'Allan Ponniah', 'allan ponniah')</td><td></td></tr><tr><td>034addac4637121e953511301ef3a3226a9e75fd</td><td>Implied Feedback: Learning Nuances of User Behavior in Image Search
+<br/>Virginia Tech
+</td><td>('1713589', 'Devi Parikh', 'devi parikh')</td><td>parikh@vt.edu
+</td></tr><tr><td>03701e66eda54d5ab1dc36a3a6d165389be0ce79</td><td>179
+<br/>Improved Principal Component Regression for Face
+<br/>Recognition Under Illumination Variations
+</td><td>('1776127', 'Shih-Ming Huang', 'shih-ming huang')<br/>('1749263', 'Jar-Ferr Yang', 'jar-ferr yang')</td><td></td></tr><tr><td>03fe3d031afdcddf38e5cc0d908b734884542eeb</td><td>DOI: http://dx.doi.org/10.14236/ewic/EVA2017.60
+<br/>Engagement with Artificial Intelligence
+<br/>through Natural Interaction Models
+<br/>Sara (Salevati) Feldman
+<br/><b>Simon Fraser University</b><br/>Vancouver, Canada
+<br/><b>Simon Fraser University</b><br/>Vancouver, Canada
+<br/><b>Simon Fraser University</b><br/>Vancouver, Canada
+<br/>As Artificial Intelligence (AI) systems become more ubiquitous, what user experience design
+<br/>paradigms will be used by humans to impart their needs and intents to an AI system, in order to
+<br/>engage in a more social interaction? In our work, we look mainly at expression and creativity
+<br/>based systems, where the AI both attempts to model or understand/assist in processes of human
+<br/>expression and creativity. We therefore have designed and implemented a prototype system with
+<br/>more natural interaction modes for engagement with AI as well as other human computer
+<br/>interaction (HCI) where a more open natural communication stream is beneficial. Our proposed
+<br/>conversational agent system makes use of the affective signals from the gestural behaviour of the
+<br/>user and the semantic information from the speech input in order to generate a personalised,
+<br/>human-like conversation that is expressed in the visual and conversational output of the 3D virtual
+<br/>avatar system. We describe our system and two application spaces we are using it in – a care
+<br/>advisor / assistant for the elderly and an interactive creative assistant for uses to produce art
+<br/>forms.
+<br/>Artificial Intelligence. Natural user interfaces. Voice systems. Expression systems. ChatBots.
+<br/>1. INTRODUCTION
+<br/>is
+<br/>for
+<br/>way
+<br/>there
+<br/>sensor
+<br/>natural
+<br/>devices,
+<br/>understand
+<br/>requirement
+<br/>to
+<br/>the human
+<br/>Due to the increase of natural user interfaces and
+<br/>untethered
+<br/>a
+<br/>corresponding
+<br/>for computational
+<br/>models that can utilise interactive and affective
+<br/>user data in order to understand and emulate a
+<br/>more
+<br/>conversational
+<br/>communication. From an emulation standpoint, it is
+<br/>the mechanisms
+<br/>important
+<br/>underlying
+<br/>to human multilayered
+<br/>semantic communication to achieve a more natural
+<br/>user experience. Humans tend to make use of
+<br/>gestures and expressions
+<br/>in a conversational
+<br/>setting in addition to the linguistic components that
+<br/>allow them to express more than the semantics of
+<br/>is usually
+<br/>the utterances. This phenomenon
+<br/>automated
+<br/>current
+<br/>disregarded
+<br/>to
+<br/>conversational
+<br/>due
+<br/>being
+<br/>computationally demanding and
+<br/>requiring a
+<br/>cognitive component to be able to model the
+<br/>complexity of the additional signals. With the
+<br/>advances in the current technology we are now
+<br/>closer to achieve more natural-like conversational
+<br/>systems. Gesture capture and recognition systems
+<br/>for video and sound input can be combined with
+<br/>output systems such as Artificial Intelligence (AI)
+<br/>based conversational
+<br/>tools and 3D modelling
+<br/>systems
+<br/>the
+<br/>in
+<br/>© Feldman et al. Published by
+<br/>BCS Learning and Development Ltd.
+<br/>Proceedings of Proceedings of EVA London 2017, UK
+<br/>296
+<br/>to
+<br/>include
+<br/>in order
+<br/>systems
+<br/>to achieve human-level
+<br/>meaningful communication. This may allow the
+<br/>interaction to be more intuitive, open and fluent that
+<br/>can be more helpful in certain situations. In this
+<br/>work, we attempt
+<br/>the affective
+<br/>components from these input signals in order to
+<br/>generate a compatible and personalised character
+<br/>that can reflect some human-like qualities.
+<br/>Given
+<br/>these goals, we overview our 3D
+<br/>conversational avatar system and describe its use
+<br/>in our two application spaces, stressing its use
+<br/>where AI systems are involved. Our first application
+<br/>space is CareAdvisor, for maintaining active and
+<br/>healthy aging in older adults through a multi-
+<br/>modular Personalised Virtual Coaching system.
+<br/>Here the natural communication system is better
+<br/>suited for the elderly, who are technologically less
+<br/>experienced,
+<br/>non-
+<br/>confrontationally and as an assistant conduit to
+<br/>health data from other less conversational devices.
+<br/>Our second application space is in the interactive
+<br/>art exhibition area, where our avatar system is able
+<br/>to converse with users in a more open way,
+<br/>compared to say forms and input systems, on
+<br/>issues of art and creativity. This allows for more
+<br/>open,
+<br/>to an
+<br/>intuitive conversation
+<br/>especially when
+<br/>leading
+<br/>used
+</td><td>('22588208', 'Ozge Nilay Yalcin', 'ozge nilay yalcin')<br/>('1700040', 'Steve DiPaola', 'steve dipaola')</td><td>sara_salevati@sfu.ca
+<br/>oyalcin@sfu.ca
+<br/>sdipaola@sfu.ca
+</td></tr><tr><td>9b318098f3660b453fbdb7a579778ab5e9118c4c</td><td>3931
+<br/>Joint Patch and Multi-label Learning for Facial
+<br/>Action Unit and Holistic Expression Recognition
+<br/>classifiers without
+</td><td>('2393320', 'Kaili Zhao', 'kaili zhao')<br/>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1720776', 'Honggang Zhang', 'honggang zhang')</td><td></td></tr><tr><td>9be94fa0330dd493f127d51e4ef7f9fd64613cfc</td><td>Research Article
+<br/>Effects of pose and image resolution on
+<br/>automatic face recognition
+<br/>ISSN 2047-4938
+<br/>Received on 5th February 2015
+<br/>Revised on 16th May 2015
+<br/>Accepted on 14th September 2015
+<br/>doi: 10.1049/iet-bmt.2015.0008
+<br/>www.ietdl.org
+<br/><b>North Dakota State University, Fargo, ND 58108-6050, USA</b><br/><b>Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands</b></td><td>('3001880', 'Zahid Mahmood', 'zahid mahmood')<br/>('1798087', 'Tauseef Ali', 'tauseef ali')</td><td>✉ E-mail: zahid.mahmood@ndsu.edu
+</td></tr><tr><td>9bd35145c48ce172b80da80130ba310811a44051</td><td>Face Detection with End-to-End Integration of a
+<br/>ConvNet and a 3D Model
+<br/>1Nat’l Engineering Laboratory for Video Technology,
+<br/>Key Laboratory of Machine Perception (MoE),
+<br/>Cooperative Medianet Innovation Center, Shanghai
+<br/><b>Sch l of EECS, Peking University, Beijing, 100871, China</b><br/>2Department of ECE and the Visual Narrative Cluster,
+<br/><b>North Carolina State University, Raleigh, USA</b></td><td>('3422021', 'Yunzhu Li', 'yunzhu li')<br/>('3423002', 'Benyuan Sun', 'benyuan sun')<br/>('47353858', 'Tianfu Wu', 'tianfu wu')<br/>('1717863', 'Yizhou Wang', 'yizhou wang')</td><td>{leo.liyunzhu, sunbenyuan, Yizhou.Wang}@pku.edu.cn, tianfu wu@ncsu.edu
+</td></tr><tr><td>9b000ccc04a2605f6aab867097ebf7001a52b459</td><td></td><td></td><td></td></tr><tr><td>9b0489f2d5739213ef8c3e2e18739c4353c3a3b7</td><td>Visual Data Augmentation through Learning
+<br/><b>Imperial College London, UK</b><br/><b>Middlesex University London, UK</b></td><td>('34586458', 'Grigorios G. Chrysos', 'grigorios g. chrysos')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>{g.chrysos, i.panagakis, s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>9b474d6e81e3b94e0c7881210e249689139b3e04</td><td>VG-RAM Weightless Neural Networks for
+<br/>Face Recognition
+<br/>Departamento de Inform´atica
+<br/>Universidade Federal do Esp´ırito Santo
+<br/>Av. Fernando Ferrari, 514, 29075-910 - Vit´oria-ES
+<br/>Brazil
+<br/>1. Introduction
+<br/>Computerized human face recognition has many practical applications, such as access control,
+<br/>security monitoring, and surveillance systems, and has been one of the most challenging and
+<br/>active research areas in computer vision for many decades (Zhao et al.; 2003). Even though
+<br/>current machine recognition systems have reached a certain level of maturity, the recognition
+<br/>of faces with different facial expressions, occlusions, and changes in illumination and/or pose
+<br/>is still a hard problem.
+<br/>A general statement of the problem of machine recognition of faces can be formulated as fol-
+<br/>lows: given an image of a scene, (i) identify or (ii) verify one or more persons in the scene
+<br/>using a database of faces. In identification problems, given a face as input, the system reports
+<br/>back the identity of an individual based on a database of known individuals; whereas in veri-
+<br/>fication problems, the system confirms or rejects the claimed identity of the input face. In both
+<br/>cases, the solution typically involves segmentation of faces from scenes (face detection), fea-
+<br/>ture extraction from the face regions, recognition, or verification. In this chapter, we examine
+<br/>the recognition of frontal face images required in the context of identification problems.
+<br/>Many approaches have been proposed to tackle the problem of face recognition. One can
+<br/>roughly divide these into (i) holistic approaches, (ii) feature-based approaches, and (iii) hybrid
+<br/>approaches (Zhao et al.; 2003). Holistic approaches use the whole face region as the raw input
+<br/>to a recognition system (a classifier). In feature-based approaches, local features, such as the
+<br/>eyes, nose, and mouth, are first extracted and their locations and local statistics (geometric
+<br/>and/or appearance based) are fed into a classifier. Hybrid approaches use both local features
+<br/>and the whole face region to recognize a face.
+<br/>Among
+<br/>fisher-
+<br/>faces (Belhumeur et al.; 1997; Etemad and Chellappa; 1997) have proved to be effective
+<br/>(Turk and Pentland;
+<br/>eigenfaces
+<br/>holistic
+<br/>approaches,
+<br/>1991)
+<br/>and
+</td><td>('1699216', 'Alberto F. De Souza', 'alberto f. de souza')<br/>('3015563', 'Claudine Badue', 'claudine badue')<br/>('3158075', 'Felipe Pedroni', 'felipe pedroni')<br/>('3169286', 'Hallysson Oliveira', 'hallysson oliveira')</td><td></td></tr><tr><td>9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493</td><td>Close the Loop: Joint Blind Image Restoration and Recognition
+<br/>with Sparse Representation Prior
+<br/><b>School of Computer Science, Northwestern Polytechnical University, Xi an China</b><br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, IL USA</b><br/><b>U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA</b></td><td>('40479011', 'Haichao Zhang', 'haichao zhang')<br/>('1706007', 'Jianchao Yang', 'jianchao yang')<br/>('1801395', 'Yanning Zhang', 'yanning zhang')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>‡{hczhang,jyang29,huang}@ifp.uiuc.edu †ynzhang@nwpu.edu.cn §nasser.m.nasrabadi.civ@mail.mil
+</td></tr><tr><td>9bc01fa9400c231e41e6a72ec509d76ca797207c</td><td></td><td></td><td></td></tr><tr><td>9b2c359c36c38c289c5bacaeb5b1dd06b464f301</td><td>Dense Face Alignment
+<br/><b>Michigan State University, MI</b><br/>2Monta Vista High School, Cupertino, CA
+</td><td>('6797891', 'Yaojie Liu', 'yaojie liu')<br/>('2357264', 'Amin Jourabloo', 'amin jourabloo')<br/>('26365310', 'William Ren', 'william ren')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>1{liuyaoj1,jourablo,liuxm}@msu.edu, 2williamyren@gmail.com
+</td></tr><tr><td>9bcfadd22b2c84a717c56a2725971b6d49d3a804</td><td>How to Detect a Loss of Attention in a Tutoring System
+<br/>using Facial Expressions and Gaze Direction
+</td><td>('2975858', 'Mark ter Maat', 'mark ter maat')</td><td></td></tr><tr><td>9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca</td><td>Position-Squeeze and Excitation Module
+<br/>for Facial Attribute Analysis
+<br/>Shanghai Key Laboratory of
+<br/>Multidimensional Information
+<br/>Processing,
+<br/><b>East China Normal University</b><br/>200241 Shanghai, China
+</td><td>('36124320', 'Yan Zhang', 'yan zhang')<br/>('7962836', 'Wanxia Shen', 'wanxia shen')<br/>('49755228', 'Li Sun', 'li sun')<br/>('12493943', 'Qingli Li', 'qingli li')<br/>('36124320', 'Yan Zhang', 'yan zhang')<br/>('7962836', 'Wanxia Shen', 'wanxia shen')<br/>('49755228', 'Li Sun', 'li sun')<br/>('12493943', 'Qingli Li', 'qingli li')</td><td>452642781@qq.com
+<br/>51151214005@ecnu.cn
+<br/>sunli@ee.ecnu.edu.cn
+<br/>qlli@cs.ecnu.edu.cn
+</td></tr><tr><td>9b07084c074ba3710fee59ed749c001ae70aa408</td><td>698535 CDPXXX10.1177/0963721417698535MartinezComputational Models of Face Perception
+<br/>research-article2017
+<br/>Computational Models of Face Perception
+<br/>Aleix M. Martinez
+<br/>Department of Electrical and Computer Engineering, Center for Cognitive and Brain Sciences,
+<br/><b>and Mathematical Biosciences Institute, The Ohio State University</b><br/>Current Directions in Psychological
+<br/>Science
+<br/> 1 –7
+<br/>© The Author(s) 2017
+<br/>Reprints and permissions:
+<br/>sagepub.com/journalsPermissions.nav
+<br/>DOI: 10.1177/0963721417698535
+<br/>https://doi.org/10.1177/0963721417698535
+<br/>www.psychologicalscience.org/CDPS
+</td><td></td><td></td></tr><tr><td>9be653e1bc15ef487d7f93aad02f3c9552f3ee4a</td><td>Computer Vision for Head Pose Estimation:
+<br/>Review of a Competition
+<br/><b>Tampere University of Technology, Finland</b><br/><b>University of Paderborn, Germany</b><br/>3 Zorgon, The Netherlands
+</td><td>('1847889', 'Heikki Huttunen', 'heikki huttunen')<br/>('40394658', 'Ke Chen', 'ke chen')<br/>('2364638', 'Abhishek Thakur', 'abhishek thakur')<br/>('2558923', 'Artus Krohn-Grimberghe', 'artus krohn-grimberghe')<br/>('2300445', 'Oguzhan Gencoglu', 'oguzhan gencoglu')<br/>('3328835', 'Xingyang Ni', 'xingyang ni')<br/>('2067035', 'Mohammed Al-Musawi', 'mohammed al-musawi')<br/>('40448210', 'Lei Xu', 'lei xu')<br/>('3152947', 'Hendrik Jacob van Veen', 'hendrik jacob van veen')</td><td></td></tr><tr><td>9b246c88a0435fd9f6d10dc88f47a1944dd8f89e</td><td>PICODES: Learning a Compact Code for
+<br/>Novel-Category Recognition
+<br/><b>Dartmouth College</b><br/>Hanover, NH, U.S.A.
+<br/>Andrew Fitzgibbon
+<br/>Microsoft Research
+<br/>Cambridge, United Kingdom
+</td><td>('34338883', 'Alessandro Bergamo', 'alessandro bergamo')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>{aleb, lorenzo}@cs.dartmouth.edu
+<br/>awf@microsoft.com
+</td></tr><tr><td>9b164cef4b4ad93e89f7c1aada81ae7af802f3a4</td><td> Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
+<br/> Vol. 2(1), 17-20, January (2013)
+<br/>Res.J.Recent Sci.
+<br/>A Fully Automatic and Haar like Feature Extraction-Based Method for Lip
+<br/>Contour Detection
+<br/><b>School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN</b><br/>Received 26th September 2012, revised 27th October 2012, accepted 6th November 2012
+<br/>Available online at: www.isca.in
+</td><td></td><td></td></tr><tr><td>9bac481dc4171aa2d847feac546c9f7299cc5aa0</td><td>Matrix Product State for Higher-Order Tensor
+<br/>Compression and Classification
+</td><td>('2852180', 'Johann A. Bengua', 'johann a. bengua')<br/>('2839912', 'Ho N. Phien', 'ho n. phien')<br/>('1834451', 'Minh N. Do', 'minh n. do')</td><td></td></tr><tr><td>9b93406f3678cf0f16451140ea18be04784faeee</td><td>A Bayesian Approach to Alignment-Based
+<br/>Image Hallucination
+<br/><b>University of Central Florida</b><br/>2 Microsoft Research New England
+</td><td>('1802944', 'Marshall F. Tappen', 'marshall f. tappen')<br/>('1681442', 'Ce Liu', 'ce liu')</td><td>mtappen@eecs.ucf.edu
+<br/>celiu@microsoft.com
+</td></tr><tr><td>9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7</td><td>Faical Expression Recognition by Combining
+<br/>Texture and Geometrical Features
+</td><td>('3057167', 'Renjie Liu', 'renjie liu')<br/>('36485086', 'Ruofei Du', 'ruofei du')<br/>('40371477', 'Bao-Liang Lu', 'bao-liang lu')</td><td></td></tr><tr><td>9b6d0b3fbf7d07a7bb0d86290f97058aa6153179</td><td>NII, Japan at the first THUMOS Workshop 2013
+<br/><b>National Institute of Informatics</b><br/>2-1-2 Hitotsubashi, Chiyoda-ku, Tokyo, Japan 101-8430
+</td><td>('39814149', 'Sang Phan', 'sang phan')<br/>('1802416', 'Duy-Dinh Le', 'duy-dinh le')<br/>('40693818', 'Shin’ichi Satoh', 'shin’ichi satoh')</td><td>{plsang,ledduy,satoh}@nii.ac.jp
+</td></tr><tr><td>9b684e2e2bb43862f69b12c6be94db0e7a756187</td><td>Differentiating Objects by Motion:
+<br/>Joint Detection and Tracking of Small Flying Objects
+<br/><b>The University of Tokyo</b><br/>CSIRO-Data61
+<br/><b>Australian National University</b><br/><b>The University of Tokyo</b><br/>Figure 1: Importance of multi-frame information for recognizing apparently small flying objects (birds in these examples).
+<br/><b>While visual features in single frames are vague and limited, multi-frame information, including deformation and pose</b><br/>changes, provides better clues with which to recognize birds. To extract such useful motion patterns, tracking is necessary for
+<br/>compensating translation of objects, but the tracking itself is a challenge due to the limited visual information. The blue boxes
+<br/>are birds tracked by our method that utilizes multi-frame representation for detection, while the red boxes are the results of a
+<br/>single-frame handcrafted-feature-based tracker [11] , which tends to fail when tracking small objects.
+</td><td>('1890560', 'Ryota Yoshihashi', 'ryota yoshihashi')<br/>('38621343', 'Tu Tuan Trinh', 'tu tuan trinh')<br/>('48727803', 'Rei Kawakami', 'rei kawakami')<br/>('2941564', 'Shaodi You', 'shaodi you')<br/>('33313329', 'Makoto Iida', 'makoto iida')<br/>('48795689', 'Takeshi Naemura', 'takeshi naemura')</td><td>{yoshi, tu, rei, naemura}@hc.ic.i.u-tokyo.ac.jp
+<br/>iida@ilab.eco.rcast.u-tokyo.ac.jp
+<br/>shaodi.you@data61.csiro.au
+</td></tr><tr><td>9e8637a5419fec97f162153569ec4fc53579c21e</td><td>Segmentation and Normalization of Human Ears
+<br/>using Cascaded Pose Regression
+<br/><b>University of Applied Sciences Darmstadt - CASED</b><br/>Haardtring 100,
+<br/>64295 Darmstadt, Germany
+<br/>http://www.h-da.de
+</td><td>('1742085', 'Christoph Busch', 'christoph busch')</td><td>anika.pflug@cased.de
+<br/>christoph.busch@hig.no
+</td></tr><tr><td>9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32</td><td>Exploring Temporal Preservation Networks for Precise Temporal Action
+<br/>Localization
+<br/>National Laboratory for Parallel and Distributed Processing,
+<br/><b>National University of Defense Technology</b><br/>Changsha, China
+</td><td>('2352864', 'Ke Yang', 'ke yang')<br/>('2292038', 'Peng Qiao', 'peng qiao')<br/>('1718853', 'Dongsheng Li', 'dongsheng li')<br/>('1893776', 'Shaohe Lv', 'shaohe lv')<br/>('1791001', 'Yong Dou', 'yong dou')</td><td>{yangke13,pengqiao,dongshengli,yongdou,shaohelv}@nudt.edu.cn
+</td></tr><tr><td>9e4b052844d154c3431120ec27e78813b637b4fc</td><td>Journal of AI and Data Mining
+<br/>Vol. 2, No .1, 2014, 33-38.
+<br/>Local gradient pattern - A novel feature representation for facial
+<br/>expression recognition
+<br/><b>School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand</b><br/>Received 23 April 2013; accepted 16 June 2013
+</td><td>('31914125', 'M. Shahidul Islam', 'm. shahidul islam')</td><td>*Corresponding author: suva.93@grads.nida.ac.th (M.Shahidul Islam)
+</td></tr><tr><td>9e42d44c07fbd800f830b4e83d81bdb9d106ed6b</td><td>Learning Discriminative Aggregation Network for Video-based Face Recognition
+<br/><b>Tsinghua University, Beijing, China</b><br/>2State Key Lab of Intelligent Technologies and Systems, Beijing, China
+<br/>3Tsinghua National Laboratory for Information Science and Technology (TNList), Beijing, China
+</td><td>('39358728', 'Yongming Rao', 'yongming rao')<br/>('2772283', 'Ji Lin', 'ji lin')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td>raoyongming95@gmail.com; lin-j14@mails.tsinghua.edu.cn; {lujiwen,jzhou}@tsinghua.edu.cn
+</td></tr><tr><td>9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0</td><td>Max-Margin Invariant Features from Transformed
+<br/>Unlabeled Data
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+</td><td>('2628116', 'Dipan K. Pal', 'dipan k. pal')<br/>('27756148', 'Ashwin A. Kannan', 'ashwin a. kannan')<br/>('27693929', 'Gautam Arakalgud', 'gautam arakalgud')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>{dipanp,aalapakk,garakalgud,marioss}@cmu.edu
+</td></tr><tr><td>9ea73660fccc4da51c7bc6eb6eedabcce7b5cead</td><td>Talking Head Detection by Likelihood-Ratio Test†
+<br/>MIT Lincoln Laboratory,
+<br/>Lexington MA 02420, USA
+</td><td>('2877010', 'Carl Quillen', 'carl quillen')</td><td>wcampbell@ll.mit.edu
+</td></tr><tr><td>9e9052256442f4e254663ea55c87303c85310df9</td><td>International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+<br/>Volume 4 Issue 10, October 2015
+<br/>Review On Attribute-assisted Reranking for
+<br/>Image Search
+<br/>
+</td><td></td><td></td></tr><tr><td>9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7</td><td>A New Facial Expression Recognition Method Based on Local Gabor Filter Bank and PCA plus LDA
+<br/>A New Facial Expression Recognition Method Based on
+<br/>Local Gabor Filter Bank and PCA plus LDA
+<br/>1 School of Electronic and Information Engineering, South China
+<br/><b>University of Technology, Guangzhou, 510640, P.R.China</b><br/><b>Motorola China Research Center, Shanghai, 210000, P.R.China</b></td><td>('15414934', 'Hong-Bo Deng', 'hong-bo deng')<br/>('2949795', 'Lian-Wen Jin', 'lian-wen jin')<br/>('1751744', 'Li-Xin Zhen', 'li-xin zhen')<br/>('34824270', 'Jian-Cheng Huang', 'jian-cheng huang')<br/>('15414934', 'Hong-Bo Deng', 'hong-bo deng')<br/>('2949795', 'Lian-Wen Jin', 'lian-wen jin')<br/>('1751744', 'Li-Xin Zhen', 'li-xin zhen')<br/>('34824270', 'Jian-Cheng Huang', 'jian-cheng huang')</td><td>{hbdeng, eelwjin}@scut.edu.cn
+<br/>{Li-Xin.Zhen, Jian-Cheng.Huang}@motorola.com
+</td></tr><tr><td>9ef2b2db11ed117521424c275c3ce1b5c696b9b3</td><td>Robust Face Alignment Using a Mixture of Invariant Experts
+<br/>‡Intel Corporation
+<br/><b>Mitsubishi Electric Research Labs (MERL</b></td><td>('2577513', 'Oncel Tuzel', 'oncel tuzel')<br/>('14939251', 'Salil Tambe', 'salil tambe')<br/>('34749896', 'Tim K. Marks', 'tim k. marks')</td><td>{oncel, tmarks}@merl.com,
+<br/>salil.tambe@intel.com
+</td></tr><tr><td>9e5acdda54481104aaf19974dca6382ed5ff21ed</td><td>Yulia Gizatdinova and Veikko Surakka 
+<br/>Automatic localization of facial
+<br/>landmarks from expressive images
+<br/>of high complexity
+<br/>DEPARTMENT OF COMPUTER SCIENCES 
+<br/><b>UNIVERSITY OF TAMPERE</b><br/>D‐2008‐9 
+<br/>TAMPERE 2008 
+</td><td></td><td></td></tr><tr><td>9ed943f143d2deaac2efc9cf414b3092ed482610</td><td>Independent subspace of dynamic Gabor features for facial expression classification
+<br/>School of Information Science
+<br/><b>Japan Advanced Institute of Science and Technology</b><br/>Asahidai 1-1, Nomi-city, Ishikawa, Japan
+</td><td>('2847306', 'Prarinya Siritanawan', 'prarinya siritanawan')<br/>('1791753', 'Kazunori Kotani', 'kazunori kotani')<br/>('1753878', 'Fan Chen', 'fan chen')</td><td>Email: {p.siritanawan, ikko, chen-fan}@jaist.ac.jp
+</td></tr><tr><td>9e1c3b8b1653337094c1b9dba389e8533bc885b0</td><td>Demographic Classification with Local Binary
+<br/>Patterns
+<br/>Department of Computer Science and Technology,
+<br/><b>Tsinghua University, Beijing 100084, China</b></td><td>('4381671', 'Zhiguang Yang', 'zhiguang yang')<br/>('1679380', 'Haizhou Ai', 'haizhou ai')</td><td>ahz@mail.tsinghua.edu.cn
+</td></tr><tr><td>9e0285debd4b0ba7769b389181bd3e0fd7a02af6</td><td>From face images and attributes to attributes
+<br/>Computer Vision Laboratory, ETH Zurich, Switzerland
+</td><td>('9664434', 'Robert Torfason', 'robert torfason')<br/>('2794259', 'Eirikur Agustsson', 'eirikur agustsson')<br/>('2173683', 'Rasmus Rothe', 'rasmus rothe')<br/>('1732855', 'Radu Timofte', 'radu timofte')</td><td></td></tr><tr><td>9ed4ad41cbad645e7109e146ef6df73f774cd75d</td><td>SARFRAZ, SIDDIQUE, STIEFELHAGEN: RPM FOR PAIR-WISE FACE-SIMILARITY
+<br/>RPM: Random Points Matching for Pair-wise
+<br/>Face-Similarity
+<br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology</b><br/>Karlsruhe, Germany
+<br/><b>Swiss Federal Institute of Technology</b><br/>(ETH) Zurich
+<br/>Zurich, Switzerland
+</td><td>('4241648', 'M. Saquib Sarfraz', 'm. saquib sarfraz')<br/>('6262445', 'Muhammad Adnan Siddique', 'muhammad adnan siddique')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>saquib.sarfraz@kit.edu
+<br/>siddique@ifu.baug.ethz.ch
+<br/>rainer.stiefelhagen@kit.edu
+</td></tr><tr><td>9e182e0cd9d70f876f1be7652c69373bcdf37fb4</td><td>Talking Face Generation by Adversarially
+<br/>Disentangled Audio-Visual Representation
+<br/><b>The Chinese University of Hong Kong</b></td><td>('40576774', 'Hang Zhou', 'hang zhou')<br/>('1715752', 'Yu Liu', 'yu liu')<br/>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td></td></tr><tr><td>9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c</td><td>What Makes an Image Popular?
+<br/><b>Massachusetts Institute</b><br/>of Technology
+<br/><b>eBay Research Labs</b><br/>DigitalGlobe
+</td><td>('2556428', 'Aditya Khosla', 'aditya khosla')<br/>('2541992', 'Atish Das Sarma', 'atish das sarma')<br/>('37164887', 'Raffay Hamid', 'raffay hamid')</td><td>khosla@csail.mit.edu
+<br/>atish.dassarma@gmail.com
+<br/>raffay@gmail.com
+</td></tr><tr><td>9e5c2d85a1caed701b68ddf6f239f3ff941bb707</td><td></td><td></td><td></td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization
+<br/>Challenge
+<br/><b>Imperial College London, UK</b><br/><b>School of Computer Science, University of Lincoln, U.K</b><br/><b>EEMCS, University of Twente, The Netherlands</b></td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{c.sagonas, gt204, s.zafeiriou, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>04bb3fa0824d255b01e9db4946ead9f856cc0b59</td><td></td><td></td><td></td></tr><tr><td>040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large-scale Bisample Learning on ID vs. Spot Face Recognition
+<br/>Received: date / Accepted: date
+</td><td>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>04f0292d9a062634623516edd01d92595f03bd3f</td><td>Distribution-based Iterative Pairwise Classification of
+<br/>Emotions in the Wild Using LGBP-TOP
+<br/><b>The University of Nottingham</b><br/>Mised Reality Lab
+<br/>Anıl Yüce
+<br/>Signal Processing
+<br/>Laboratory(LTS5)
+<br/>École Polytechnique Fédérale
+<br/>de Lausanne, Switzerland
+<br/><b>The University of Nottingham</b><br/>Mixed Reality Lab
+<br/><b>The University of Nottingham</b><br/>Mixed Reality Lab
+</td><td>('2449665', 'Timur R. Almaev', 'timur r. almaev')<br/>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('2321668', 'Alexandru Ghitulescu', 'alexandru ghitulescu')</td><td>psxta4@nottingham.ac.uk
+<br/>anil.yuce@epfl.ch
+<br/>psyadg@nottingham.ac.uk
+<br/>michel.valstar@nottingham.ac.uk
+</td></tr><tr><td>047f6afa87f48de7e32e14229844d1587185ce45</td><td>An Improvement of Energy-Transfer Features
+<br/>Using DCT for Face Detection
+<br/><b>Technical University of Ostrava, FEECS</b><br/>17. listopadu 15, 708 33 Ostrava-Poruba, Czech Republic
+</td><td>('2467747', 'Radovan Fusek', 'radovan fusek')<br/>('2557877', 'Eduard Sojka', 'eduard sojka')</td><td>{radovan.fusek,eduard.sojka,karel.mozdren,milan.surkala}@vsb.cz
+</td></tr><tr><td>04b851f25d6d49e61a528606953e11cfac7df2b2</td><td>Optical Flow Guided Feature: A Fast and Robust Motion Representation for
+<br/>Video Action Recognition
+<br/><b>The University of Sydney 2SenseTime Research 3The Chinese University of Hong Kong</b></td><td>('1837024', 'Shuyang Sun', 'shuyang sun')<br/>('1874900', 'Zhanghui Kuang', 'zhanghui kuang')<br/>('37145669', 'Lu Sheng', 'lu sheng')<br/>('3001348', 'Wanli Ouyang', 'wanli ouyang')<br/>('1726357', 'Wei Zhang', 'wei zhang')</td><td>{shuyang.sun wanli.ouyang}@sydney.edu.au
+<br/>{wayne.zhang kuangzhanghui}@sensetime.com
+<br/>lsheng@ee.cuhk.edu.hk
+</td></tr><tr><td>04522dc16114c88dfb0ebd3b95050fdbd4193b90</td><td>Appears in 2nd Canadian Conference on Computer and Robot Vision, Victoria, Canada, 2005.
+<br/>Minimum Bayes Error Features for Visual Recognition by Sequential Feature
+<br/>Selection and Extraction
+<br/>Department of Computer Science
+<br/><b>University of British Columbia</b><br/>Department of Electrical and Computer engineering
+<br/><b>University of California San Diego</b></td><td>('3265767', 'Gustavo Carneiro', 'gustavo carneiro')<br/>('1699559', 'Nuno Vasconcelos', 'nuno vasconcelos')</td><td>carneiro@cs.ubc.ca
+<br/>nuno@ece.ucsd.edu
+</td></tr><tr><td>04470861408d14cc860f24e73d93b3bb476492d0</td><td></td><td></td><td></td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>Patch-based Probabilistic Image Quality Assessment for
+<br/>Face Selection and Improved Video-based Face Recognition
+<br/>NICTA, PO Box 6020, St Lucia, QLD 4067, Australia ∗
+<br/><b>The University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('3026404', 'Yongkang Wong', 'yongkang wong')<br/>('3104113', 'Shaokang Chen', 'shaokang chen')<br/>('40080354', 'Sandra Mau', 'sandra mau')<br/>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td></td></tr><tr><td>0447bdb71490c24dd9c865e187824dee5813a676</td><td>Manifold Estimation in View-based Feature
+<br/>Space for Face Synthesis Across Pose
+<br/>Paper 27
+</td><td></td><td></td></tr><tr><td>0435a34e93b8dda459de49b499dd71dbb478dc18</td><td>VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification
+<br/>Using Convolutional Neural Networks
+<br/>Department of Electronics and Communication Engineering and
+<br/><b>Computer Vision Group, L. D. College of Engineering, Ahmedabad, India</b><br/>the need for handcrafted facial descriptors and data
+<br/>preprocessing. D-CNN models have been not only
+<br/>successfully applied to human face analysis, but also for
+<br/>the visual saliency detection [21, 22, 23]. Visual Saliency
+<br/>is fundamentally an intensity map where higher intensity
+<br/>signifies regions, where a general human being would
+<br/>look, and lower intensities mean decreasing level of visual
+<br/>attention. It’s a measure of visual attention of humans
+<br/>based on the content of the image. It has numerous
+<br/>applications in computer vision and image processing
+<br/>tasks. It is still an open problem when considering the MIT
+<br/>Saliency Benchmark [24].
+<br/>In previous five years, considering age estimation,
+<br/>gender classification and facial expression classification
+<br/>accuracies
+<br/>increased rapidly on several benchmarks.
+<br/>However, in unconstrained environments, i.e. low to high
+<br/>occluded face and
+<br/>this
+<br/>classification tasks are still facing challenges to achieve
+<br/>competitive results. Some of the sample images are shown
+<br/>in the Fig. 1.
+<br/>low-resolution facial
+<br/>image,
+<br/>Figure 1: Sample images having unconstrained environments i.e.
+<br/>occlusion, low resolution.
+<br/>In this paper, we tackle the age, gender, and facial
+<br/>expression classification problem from different angle. We
+<br/>are inspired by the recent progress in the domain of image
+<br/>classification and visual saliency prediction using deep
+<br/>learning to achieve the competitive results. Based on the
+<br/>above motivation our work
+<br/>this multi-task
+<br/>classification of the facial image is as follows:
+<br/> Our VEGAC method uses off-the-shelf face detector
+<br/>proposed by Mathias et al. [2] to obtain the location of the
+<br/>face in the test image. Then, we increase the margin of
+<br/>detected face by 30% and crop the face. After getting the
+<br/>cropped face, we pass the cropped face on the Deep Multi-
+<br/>for
+</td><td>('27343041', 'Ayesha Gurnani', 'ayesha gurnani')<br/>('23922616', 'Vandit Gajjar', 'vandit gajjar')<br/>('22239413', 'Viraj Mavani', 'viraj mavani')<br/>('26425477', 'Yash Khandhediya', 'yash khandhediya')</td><td>{gurnani.ayesha.52, gajjar.vandit.381, mavani.viraj.604, khandhediya.yash.364}@ldce.ac.in
+</td></tr><tr><td>043efe5f465704ced8d71a067d2b9d5aa5b59c29</td><td>EGGER ET AL.: OCCLUSION-AWARE 3D MORPHABLE FACE MODELS
+<br/>Occlusion-aware 3D Morphable Face Models
+<br/>Department of Mathematics and
+<br/>Computer Science
+<br/><b>University of Basel</b><br/>Basel Switzerland
+<br/>http://gravis.cs.unibas.ch
+<br/>Andreas Morel-Forster
+</td><td>('34460642', 'Bernhard Egger', 'bernhard egger')<br/>('49462138', 'Andreas Schneider', 'andreas schneider')<br/>('39550224', 'Clemens Blumer', 'clemens blumer')<br/>('1987368', 'Sandro Schönborn', 'sandro schönborn')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td>bernhard.egger@unibas.ch
+<br/>andreas.schneider@unibas.ch
+<br/>clemens.blumer@unibas.ch
+<br/>andreas.forster@unibas.ch
+<br/>sandro.schoenborn@unibas.ch
+<br/>thomas.vetter@unibas.ch
+</td></tr><tr><td>044ba70e6744e80c6a09fa63ed6822ae241386f2</td><td>TO APPEAR IN AUTONOMOUS ROBOTS, SPECIAL ISSUE IN LEARNING FOR HUMAN-ROBOT COLLABORATION
+<br/>Early Prediction for Physical Human Robot
+<br/>Collaboration in the Operating Room
+</td><td>('2641330', 'Tian Zhou', 'tian zhou')</td><td></td></tr><tr><td>04661729f0ff6afe4b4d6223f18d0da1d479accf</td><td>From Facial Parts Responses to Face Detection: A Deep Learning Approach
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('1692609', 'Shuo Yang', 'shuo yang')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{ys014, pluo, ccloy, xtang}@ie.cuhk,edu.hk
+</td></tr><tr><td>04dcdb7cb0d3c462bdefdd05508edfcff5a6d315</td><td>Assisting the training of deep neural networks
+<br/>with applications to computer vision
+<br/>tesi doctoral està subjecta a
+<br/>la
+<br/>Aquesta
+<br/>CompartirIgual 4.0. Espanya de Creative Commons.
+<br/>Esta tesis doctoral está sujeta a la licencia Reconocimiento - NoComercial – CompartirIgual
+<br/>4.0. España de Creative Commons.
+<br/>This doctoral thesis is licensed under the Creative Commons Attribution-NonCommercial-
+<br/>ShareAlike 4.0. Spain License.
+<br/>llicència Reconeixement- NoComercial –
+</td><td>('3995639', 'Adriana Romero', 'adriana romero')</td><td></td></tr><tr><td>044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa</td><td>Dynamic Texture Recognition Using Local Binary
+<br/>Patterns with an Application to Facial Expressions
+</td><td>('1757287', 'Guoying Zhao', 'guoying zhao')</td><td></td></tr><tr><td>04f55f81bbd879773e2b8df9c6b7c1d324bc72d8</td><td>Multi-view Face Analysis Based on Gabor Features
+<br/><b>College of Information and Control Engineering in China University of Petroleum</b><br/>Qingdao 266580, China
+<br/>
+</td><td>('1707922', 'Hongli Liu', 'hongli liu')</td><td></td></tr><tr><td>04250e037dce3a438d8f49a4400566457190f4e2</td><td></td><td></td><td></td></tr><tr><td>0431e8a01bae556c0d8b2b431e334f7395dd803a</td><td>Learning Localized Perceptual Similarity Metrics for Interactive Categorization
+<br/>Google Inc.
+<br/>google.com
+</td><td>('2367820', 'Catherine Wah', 'catherine wah')</td><td></td></tr><tr><td>04b4c779b43b830220bf938223f685d1057368e9</td><td>Video retrieval based on deep convolutional
+<br/>neural network
+<br/>Yajiao Dong
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
+<br/>Jianguo Li
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
+</td><td></td><td>yajiaodong@bit.edu.cn
+<br/>jianguoli@bit.edu.cn
+</td></tr><tr><td>04616814f1aabe3799f8ab67101fbaf9fd115ae4</td><td><b>UNIVERSIT´EDECAENBASSENORMANDIEU.F.R.deSciences´ECOLEDOCTORALESIMEMTH`ESEPr´esent´eeparM.GauravSHARMAsoutenuele17D´ecembre2012envuedel’obtentionduDOCTORATdel’UNIVERSIT´EdeCAENSp´ecialit´e:InformatiqueetapplicationsArrˆet´edu07aoˆut2006Titre:DescriptionS´emantiquedesHumainsPr´esentsdansdesImagesVid´eo(SemanticDescriptionofHumansinImages)TheworkpresentedinthisthesiswascarriedoutatGREYC-UniversityofCaenandLEAR–INRIAGrenobleJuryM.PatrickPEREZDirecteurdeRechercheINRIA/Technicolor,RennesRapporteurM.FlorentPERRONNINPrincipalScientistXeroxRCE,GrenobleRapporteurM.JeanPONCEProfesseurdesUniversit´esENS,ParisExaminateurMme.CordeliaSCHMIDDirectricedeRechercheINRIA,GrenobleDirectricedeth`eseM.Fr´ed´ericJURIEProfesseurdesUniversit´esUniversit´edeCaenDirecteurdeth`ese</b></td><td></td><td></td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>Hipster Wars: Discovering Elements
+<br/>of Fashion Styles
+<br/><b>University of North Carolina at Chapel Hill, NC, USA</b><br/><b>Tohoku University, Japan</b></td><td>('1772294', 'M. Hadi Kiapour', 'm. hadi kiapour')<br/>('1721910', 'Kota Yamaguchi', 'kota yamaguchi')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')</td><td>{hadi,aberg,tlberg}@cs.unc.edu
+<br/>kyamagu@vision.is.tohoku.ac.jp
+</td></tr><tr><td>04ff69aa20da4eeccdabbe127e3641b8e6502ec0</td><td>Sequential Face Alignment via Person-Specific Modeling in the Wild
+<br/><b>Rutgers University</b><br/><b>University of Texas at Arlington</b><br/>Piscataway, NJ 08854
+<br/>Arlington, TX 76019
+<br/><b>Rutgers University</b><br/>Piscataway, NJ 08854
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>xpeng.nb@cs.rutgers.edu
+<br/>jzhuang@uta.edu
+<br/>dnm@cs.rutgers.edu
+</td></tr><tr><td>046a694bbb3669f2ff705c6c706ca3af95db798c</td><td>Conditional Convolutional Neural Network for Modality-aware Face Recognition
+<br/><b>Imperial College London</b><br/><b>National University of Singapore</b><br/>3Panasonic R&D Center Singapore
+</td><td>('34336393', 'Chao Xiong', 'chao xiong')<br/>('1874505', 'Xiaowei Zhao', 'xiaowei zhao')<br/>('40245930', 'Danhang Tang', 'danhang tang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')</td><td>{chao.xiong10, x.zhao, d.tang11}@imperial.ac.uk, Karlekar.Jayashree@sg.panasonic.com, eleyans@nus.edu.sg, tk.kim@imperial.ac.uk
+</td></tr><tr><td>047d7cf4301cae3d318468fe03a1c4ce43b086ed</td><td>Co-Localization of Audio Sources in Images Using
+<br/>Binaural Features and Locally-Linear Regression
+<br/>To cite this version:
+<br/>Sources in Images Using Binaural Features and Locally-Linear Regression. IEEE Transactions
+<br/>on Audio Speech and Language Processing, 2015, 15p. <hal-01112834>
+<br/>HAL Id: hal-01112834
+<br/>https://hal.inria.fr/hal-01112834
+<br/>Submitted on 3 Feb 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('3307172', 'Antoine Deleforge', 'antoine deleforge')<br/>('1794229', 'Radu Horaud', 'radu horaud')<br/>('2159538', 'Yoav Y. Schechner', 'yoav y. schechner')<br/>('1780746', 'Laurent Girin', 'laurent girin')<br/>('3307172', 'Antoine Deleforge', 'antoine deleforge')<br/>('1794229', 'Radu Horaud', 'radu horaud')<br/>('2159538', 'Yoav Y. Schechner', 'yoav y. schechner')<br/>('1780746', 'Laurent Girin', 'laurent girin')</td><td></td></tr><tr><td>04317e63c08e7888cef480fe79f12d3c255c5b00</td><td>Face Recognition Using a Unified 3D Morphable Model
+<br/>Hu, G., Yan, F., Chan, C-H., Deng, W., Christmas, W., Kittler, J., & Robertson, N. M. (2016). Face Recognition
+<br/>Using a Unified 3D Morphable Model. In Computer Vision – ECCV 2016: 14th European Conference,
+<br/>Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII (pp. 73-89). (Lecture Notes in
+<br/>Computer Science; Vol. 9912). Springer Verlag. DOI: 10.1007/978-3-319-46484-8_5
+<br/>Published in:
+<br/>Computer Vision – ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14,
+<br/>2016, Proceedings, Part VIII
+<br/>Document Version:
+<br/>Peer reviewed version
+<br/><b>Queen's University Belfast - Research Portal</b><br/><b>Link to publication record in Queen's University Belfast Research Portal</b><br/>Publisher rights
+<br/>The final publication is available at Springer via http://dx.doi.org/10.1007/978-3-319-46484-8_5
+<br/>General rights
+<br/><b>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</b><br/>copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+<br/>with these rights.
+<br/>Take down policy
+<br/>The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+<br/>ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the
+<br/>Download date:12. Sep. 2018
+</td><td></td><td>Research Portal that you believe breaches copyright or violates any law, please contact openaccess@qub.ac.uk.
+</td></tr><tr><td>046865a5f822346c77e2865668ec014ec3282033</td><td>Discovering Informative Social Subgraphs and Predicting
+<br/>Pairwise Relationships from Group Photos
+<br/><b>National Taiwan University, Taipei, Taiwan</b><br/>†Academia Sinica, Taipei, Taiwan
+</td><td>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')<br/>('1704678', 'Hong-Yuan Mark Liao', 'hong-yuan mark liao')</td><td>yanying@cmlab.csie.ntu.edu.tw, winston@csie.ntu.edu.tw, liao@iis.sinica.edu.tw
+</td></tr><tr><td>047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff</td><td>Unsupervised Training for 3D Morphable Model Regression
+<br/><b>Princeton University</b><br/>2Google Research
+<br/>3MIT CSAIL
+</td><td>('32627314', 'Kyle Genova', 'kyle genova')<br/>('39578349', 'Forrester Cole', 'forrester cole')</td><td></td></tr><tr><td>0470b0ab569fac5bbe385fa5565036739d4c37f8</td><td>Automatic Face Naming with Caption-based Supervision
+<br/>To cite this version:
+<br/>with Caption-based Supervision. CVPR 2008 - IEEE Conference on Computer Vision
+<br/>Pattern Recognition,
+<br/>ciety,
+<br/><10.1109/CVPR.2008.4587603>. <inria-00321048v2>
+<br/>Jun
+<br/>2008,
+<br/>pp.1-8,
+<br/>2008, Anchorage, United
+<br/>so-
+<br/><http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4587603>.
+<br/>IEEE Computer
+<br/>States.
+<br/>HAL Id: inria-00321048
+<br/>https://hal.inria.fr/inria-00321048v2
+<br/>Submitted on 11 Apr 2011
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('2737253', 'Matthieu Guillaumin', 'matthieu guillaumin')<br/>('1722052', 'Thomas Mensink', 'thomas mensink')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')<br/>('2737253', 'Matthieu Guillaumin', 'matthieu guillaumin')<br/>('1722052', 'Thomas Mensink', 'thomas mensink')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>6a3a07deadcaaab42a0689fbe5879b5dfc3ede52</td><td>Learning to Estimate Pose by Watching Videos
+<br/>Department of Computer Science and Engineering
+<br/>IIT Kanpur
+</td><td>('36668573', 'Prabuddha Chakraborty', 'prabuddha chakraborty')<br/>('1744135', 'Vinay P. Namboodiri', 'vinay p. namboodiri')</td><td>{prabudc, vinaypn} @iitk.ac.in
+</td></tr><tr><td>6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d</td><td>Cooperative Learning with Visual Attributes
+<br/><b>Carnegie Mellon University</b><br/>Georgia Tech
+</td><td>('32519394', 'Tanmay Batra', 'tanmay batra')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td>tbatra@cmu.edu
+<br/>parikh@gatech.edu
+</td></tr><tr><td>6afed8dc29bc568b58778f066dc44146cad5366c</td><td>Kernel Hebbian Algorithm for Single-Frame
+<br/>Super-Resolution
+<br/><b>Max Planck Institute f ur biologische Kybernetik</b><br/>Spemannstr. 38, D-72076 T¨ubingen, Germany
+<br/>http://www.kyb.tuebingen.mpg.de/
+</td><td>('1808255', 'Kwang In Kim', 'kwang in kim')<br/>('30541601', 'Matthias O. Franz', 'matthias o. franz')</td><td>{kimki, mof, bs}@tuebingen.mpg.de
+</td></tr><tr><td>6ad107c08ac018bfc6ab31ec92c8a4b234f67d49</td><td></td><td></td><td></td></tr><tr><td>6a184f111d26787703f05ce1507eef5705fdda83</td><td></td><td></td><td></td></tr><tr><td>6a16b91b2db0a3164f62bfd956530a4206b23fea</td><td>A Method for Real-Time Eye Blink Detection and Its Application
+<br/>Mahidol Wittayanusorn School
+<br/>Puttamonton, Nakornpatom 73170, Thailand
+</td><td></td><td>Chinnawat.Deva@gmail.com
+</td></tr><tr><td>6a806978ca5cd593d0ccd8b3711b6ef2a163d810</td><td>Facial feature tracking for Emotional Dynamic
+<br/>Analysis
+<br/>1ISIR, CNRS UMR 7222
+<br/>Univ. Pierre et Marie Curie, Paris
+<br/>2LAMIA, EA 4540
+<br/>Univ. of Fr. West Indies & Guyana
+</td><td>('3093849', 'Thibaud Senechal', 'thibaud senechal')<br/>('3074790', 'Vincent Rapp', 'vincent rapp')<br/>('2554802', 'Lionel Prevost', 'lionel prevost')</td><td>{rapp, senechal}@isir.upmc.fr
+<br/>lionel.prevost@univ-ag.fr
+</td></tr><tr><td>6a8a3c604591e7dd4346611c14dbef0c8ce9ba54</td><td>ENTERFACE’10, JULY 12TH - AUGUST 6TH, AMSTERDAM, THE NETHERLANDS.
+<br/>58
+<br/>An Affect-Responsive Interactive Photo Frame
+</td><td>('1713360', 'Ilkka Kosunen', 'ilkka kosunen')<br/>('32062164', 'Marcos Ortega Hortas', 'marcos ortega hortas')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td></td></tr><tr><td>6aa43f673cc42ed2fa351cbc188408b724cb8d50</td><td></td><td></td><td></td></tr><tr><td>6a2b83c4ae18651f1a3496e48a35b0cd7a2196df</td><td>Top Rank Supervised Binary Coding for Visual Search
+<br/>Department of ECE
+<br/>School of Electronic Engineering
+<br/>School of Information Science
+<br/>UC San Diego
+<br/><b>Xidian University</b><br/>and Engineering
+<br/><b>Xiamen University</b><br/>Department of Mathematics
+<br/>UC San Diego
+<br/>IBM T. J. Watson
+<br/><b>Research Center</b></td><td>('2451800', 'Dongjin Song', 'dongjin song')<br/>('39059457', 'Wei Liu', 'wei liu')<br/>('1725599', 'Rongrong Ji', 'rongrong ji')<br/>('3520515', 'David A. Meyer', 'david a. meyer')<br/>('1732563', 'John R. Smith', 'john r. smith')</td><td>dosong@ucsd.edu
+<br/>wliu@ee.columbia.edu
+<br/>rrji@xmu.edu.cn
+<br/>dmeyer@math.ucsd.edu
+<br/>jsmith@us.ibm.com
+</td></tr><tr><td>6a52e6fce541126ff429f3c6d573bc774f5b8d89</td><td>Role of Facial Emotion in Social Correlation
+<br/>Department of Computer Science and Engineering
+<br/><b>Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan</b></td><td>('2159044', 'Pankaj Mishra', 'pankaj mishra')<br/>('47865262', 'Takayuki Ito', 'takayuki ito')</td><td>{pankaj.mishra, rafik}@itolab.nitech.ac.jp,
+<br/>ito.takayuki@nitech.ac.jp
+</td></tr><tr><td>6a5fe819d2b72b6ca6565a0de117c2b3be448b02</td><td>Supervised and Projected Sparse Coding for Image Classification
+<br/>Computer Science and Engineering Department
+<br/><b>University of Texas at Arlington</b><br/>Arlington,TX,76019
+</td><td>('39122448', 'Jin Huang', 'jin huang')<br/>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>huangjinsuzhou@gmail.com, feipingnie@gmail.com, heng@uta.edu, chqding@uta.edu
+</td></tr><tr><td>6afeb764ee97fbdedfa8f66810dfc22feae3fa1f</td><td>Robust Principal Component Analysis with Complex Noise
+<br/><b>School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China</b><br/><b>School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China</b><br/><b>The Hong Kong Polytechnic University, Hong Kong, China</b></td><td>('40209122', 'Qian Zhao', 'qian zhao')<br/>('1803714', 'Deyu Meng', 'deyu meng')<br/>('7814629', 'Zongben Xu', 'zongben xu')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('36685537', 'Lei Zhang', 'lei zhang')</td><td>TIMMY.ZHAOQIAN@GMAIL.COM
+<br/>DYMENG@MAIL.XJTU.EDU.CN
+<br/>ZBXU@MAIL.XJTU.EDU.CN
+<br/>CSWMZUO@GMAIL.COM
+<br/>CSLZHANG@COMP.POLYU.EDU.HK
+</td></tr><tr><td>6aa61d28750629febe257d1cb69379e14c66c67f</td><td>Max–Planck–Institut f¨ur biologische Kybernetik
+<br/><b>Max Planck Institute for Biological Cybernetics</b><br/>Technical Report No. 109
+<br/>Kernel Hebbian Algorithm for
+<br/>Iterative Kernel Principal
+<br/>Component Analysis
+<br/>Sch¨olkopf1
+<br/>June 2003
+<br/>This report is available in PDF–format via anonymous ftp at ftp://ftp.kyb.tuebingen.mpg.de/pub/mpi-memos/pdf/kha.pdf. The com-
+<br/>plete series of Technical Reports is documented at: http://www.kyb.tuebingen.mpg.de/techreports.html
+</td><td>('1808255', 'Kwang In Kim', 'kwang in kim')<br/>('30541601', 'Matthias O. Franz', 'matthias o. franz')</td><td>1 Department Sch¨olkopf, email: kimki;mof;bs@tuebingen.mpg.de
+</td></tr><tr><td>6ae96f68187f1cdb9472104b5431ec66f4b2470f</td><td><b>Carnegie Mellon University</b><br/><b>Dietrich College Honors Theses</b><br/><b>Dietrich College of Humanities and Social Sciences</b><br/>4-30-2012
+<br/>Improving Task Performance in an Affect-mediated
+<br/>Computing System
+<br/>Follow this and additional works at: http://repository.cmu.edu/hsshonors
+<br/>Part of the Databases and Information Systems Commons
+</td><td>('29120285', 'Vivek Pai', 'vivek pai')</td><td>Research Showcase @ CMU
+<br/>Carnegie Mellon University, vpai@cmu.edu
+<br/>This Thesis is brought to you for free and open access by the Dietrich College of Humanities and Social Sciences at Research Showcase @ CMU. It has
+<br/>been accepted for inclusion in Dietrich College Honors Theses by an authorized administrator of Research Showcase @ CMU. For more information,
+<br/>please contact research-showcase@andrew.cmu.edu.
+</td></tr><tr><td>6a4419ce2338ea30a570cf45624741b754fa52cb</td><td>Statistical transformer networks: learning shape
+<br/>and appearance models via self supervision
+<br/><b>University of York</b></td><td>('39180407', 'Anil Bas', 'anil bas')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')</td><td>{ab1792,william.smith}@york.ac.uk
+</td></tr><tr><td>6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8</td><td>A New Weighted Sparse Representation Based
+<br/>on MSLBP and Its Application to Face Recognition
+<br/><b>School of IoT Engineering, Jiangnan University, Wuxi 214122, China</b></td><td>('1823451', 'He-Feng Yin', 'he-feng yin')<br/>('37020604', 'Xiao-Jun Wu', 'xiao-jun wu')</td><td>yinhefeng@126.com, wu_xiaojun@yahoo.com.cn
+</td></tr><tr><td>6a657995b02bc9dee130701138ea45183c18f4ae</td><td>THE TIMING OF FACIAL MOTION IN POSED AND SPONTANEOUS SMILES
+<br/>J.F. COHN* and K.L.SCHMIDT
+<br/><b>University of Pittsburgh</b><br/>Department of Psychology
+<br/>4327 Sennott Square, 210 South Bouquet Street
+<br/>Pittsburgh, PA 15260, USA
+<br/>Revised 19 March 2004
+<br/>Almost all work in automatic facial expression analysis has focused on recognition of prototypic
+<br/>expressions rather than dynamic changes in appearance over time. To investigate the relative
+<br/>contribution of dynamic features to expression recognition, we used automatic feature tracking to
+<br/>measure the relation between amplitude and duration of smile onsets in spontaneous and deliberate
+<br/>smiles of 81 young adults of Euro- and African-American background. Spontaneous smiles were of
+<br/>smaller amplitude and had a larger and more consistent relation between amplitude and duration than
+<br/>deliberate smiles. A linear discriminant classifier using timing and amplitude measures of smile
+<br/>onsets achieved a 93% recognition rate. Using timing measures alone, recognition rate declined only
+<br/>marginally to 89%. These findings suggest that by extracting and representing dynamic as well as
+<br/>morphological features, automatic facial expression analysis can begin to discriminate among the
+<br/>message values of morphologically similar expressions.
+<br/> Keywords: automatic facial expression analysis, timing, spontaneous facial behavior
+<br/> AMS Subject Classification:
+<br/>1. Introduction
+<br/>Almost all work in automatic facial expression analysis has sought to recognize either
+<br/>prototypic expressions of emotion (e.g., joy or anger) or more molecular appearance
+<br/>prototypes such as FACS action units. This emphasis on prototypic expressions follows
+<br/>from the work of Darwin10and more recently Ekman12 who proposed that basic emotions
+<br/>have corresponding prototypic expressions and described their components, such as
+<br/>crows-feet wrinkles lateral to the outer eye corners, in emotion-specified joy expressions.
+<br/>Considerable evidence suggests that six prototypic expressions (joy, surprise, anger,
+<br/>sadness, disgust, and fear) are universal in their performance and in their perception12
+<br/>and can communicate subjective emotion, communicative
+<br/>intent, and action
+<br/>tendencies.18, 19, 26
+</td><td></td><td>*jeffcohn@pitt.edu
+<br/>kschmidt@pitt.edu
+</td></tr><tr><td>6a0368b4e132f4aa3bbdeada8d894396f201358a</td><td>One-Class Multiple Instance Learning via
+<br/>Robust PCA for Common Object Discovery
+<br/><b>Huazhong University of Science and Technology</b><br/>2Visual Computing Group, Microsoft Research Asia
+<br/>3Lab of Neuro Imaging and Department of Computer Science, UCLA
+</td><td>('2443233', 'Xinggang Wang', 'xinggang wang')<br/>('2554701', 'Zhengdong Zhang', 'zhengdong zhang')<br/>('1700297', 'Yi Ma', 'yi ma')<br/>('1686737', 'Xiang Bai', 'xiang bai')<br/>('1743698', 'Wenyu Liu', 'wenyu liu')<br/>('1736745', 'Zhuowen Tu', 'zhuowen tu')</td><td>{wxghust,zhangzdfaint}@gmail.com, mayi@microsoft.com,
+<br/>{xbai,liuwy}@hust.edu.cn, ztu@loni.ucla.edu
+</td></tr><tr><td>6ab33fa51467595f18a7a22f1d356323876f8262</td><td>Ordinal Hyperplanes Ranker with Cost Sensitivities for Age Estimation
+<br/><b>Institute of Information Science, Academia Sinica, Taipei, Taiwan</b><br/><b>Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan</b><br/><b>National Taiwan University, Taipei, Taiwan</b><br/><b>Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan</b></td><td>('34692779', 'Kuang-Yu Chang', 'kuang-yu chang')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')<br/>('1732064', 'Yi-Ping Hung', 'yi-ping hung')</td><td>{kuangyu, song}@iis.sinica.edu.tw, hung@csie.ntu.edu.tw
+</td></tr><tr><td>6aefe7460e1540438ffa63f7757c4750c844764d</td><td>Non-rigid Segmentation using Sparse Low Dimensional Manifolds and
+<br/>Deep Belief Networks ∗
+<br/>Instituto de Sistemas e Rob´otica
+<br/>Instituto Superior T´ecnico, Portugal
+</td><td>('3259175', 'Jacinto C. Nascimento', 'jacinto c. nascimento')</td><td></td></tr><tr><td>6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a</td><td>Excitation Dropout:
+<br/>Encouraging Plasticity in Deep Neural Networks
+<br/>1Pattern Analysis & Computer Vision (PAVIS), Istituto Italiano di Tecnologia
+<br/><b>Boston University</b><br/>3Adobe Research
+<br/><b>University of Verona</b></td><td>('40063519', 'Andrea Zunino', 'andrea zunino')<br/>('3298267', 'Sarah Adel Bargal', 'sarah adel bargal')<br/>('2322579', 'Pietro Morerio', 'pietro morerio')<br/>('1701293', 'Jianming Zhang', 'jianming zhang')<br/>('1749590', 'Stan Sclaroff', 'stan sclaroff')<br/>('1727204', 'Vittorio Murino', 'vittorio murino')</td><td>{andrea.zunino,vittorio.murino}@iit.it,
+<br/>{sbargal,sclaroff}@bu.edu, jianmzha@adobe.com
+</td></tr><tr><td>6a4ebd91c4d380e21da0efb2dee276897f56467a</td><td>HOG ACTIVE APPEARANCE MODELS
+<br/><b>cid:2)Imperial College London, U.K</b><br/><b>University of Lincoln, School of Computer Science, U.K</b></td><td>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2</td><td></td><td></td><td></td></tr><tr><td>6a7e464464f70afea78552c8386f4d2763ea1d9c</td><td>Review Article
+<br/>International Journal of Current Engineering and Technology
+<br/>E-ISSN 2277 – 4106, P-ISSN 2347 - 5161
+<br/>©2014 INPRESSCO
+<br/>, All Rights Reserved
+<br/>Available at http://inpressco.com/category/ijcet
+<br/>Facial Landmark Localization – A Literature Survey
+<br/><b>PES Institute of Technology, Bangalore, Karnataka, India</b><br/>Accepted 25 May 2014, Available online 01 June2014, Vol.4, No.3 (June 2014)
+</td><td></td><td></td></tr><tr><td>32925200665a1bbb4fc8131cd192cb34c2d7d9e3</td><td>3-9
+<br/>MVA2009 IAPR Conference on Machine Vision Applications, May 20-22, 2009, Yokohama, JAPAN
+<br/>An Active Appearance Model with a Derivative-Free
+<br/>Optimization
+<br/><b>CNRS , Institute of Automation of the Chinese Academy of Sciences</b><br/>95, Zhongguancun Dong Lu, PO Box 2728 − Beijing 100190 − PR China
+<br/>LIAMA Sino-French IT Lab.
+</td><td>('8214735', 'Jixia Zhang', 'jixia zhang')<br/>('1742818', 'Franck Davoine', 'franck davoine')<br/>('3364363', 'Chunhong Pan', 'chunhong pan')</td><td>Franck.Davoine@gmail.com
+</td></tr><tr><td>322c063e97cd26f75191ae908f09a41c534eba90</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Improving Image Classification using Semantic Attributes
+<br/>Received: date / Accepted: date
+</td><td>('1758652', 'Yu Su', 'yu su')</td><td></td></tr><tr><td>325b048ecd5b4d14dce32f92bff093cd744aa7f8</td><td>CVPR
+<br/>#2670
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2008 Submission #2670. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#2670
+<br/>Multi-Image Graph Cut Clothing Segmentation for Recognizing People
+<br/>Anonymous CVPR submission
+<br/>Paper ID 2670
+</td><td></td><td></td></tr><tr><td>32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347</td><td>HIERARCHICAL SPARSE AND COLLABORATIVE LOW-RANK REPRESENTATION FOR
+<br/>EMOTION RECOGNITION
+<br/><b>Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA</b></td><td>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('31507586', 'Minh Dao', 'minh dao')<br/>('1678633', 'Gregory D. Hager', 'gregory d. hager')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')</td><td>{xxiang, minh.dao, ghager1, trac}@jhu.edu
+</td></tr><tr><td>32d8e555441c47fc27249940991f80502cb70bd5</td><td>Machine Learning Models that Remember Too Much
+<br/><b>Cornell University</b><br/>Cornell Tech
+<br/>Cornell Tech
+</td><td>('3469125', 'Congzheng Song', 'congzheng song')<br/>('1723945', 'Vitaly Shmatikov', 'vitaly shmatikov')<br/>('1707461', 'Thomas Ristenpart', 'thomas ristenpart')</td><td>cs2296@cornell.edu
+<br/>ristenpart@cornell.edu
+<br/>shmat@cs.cornell.edu
+</td></tr><tr><td>3294e27356c3b1063595885a6d731d625b15505a</td><td>Illumination Face Spaces are Idiosyncratic
+<br/>2, H. Kley1, C. Peterson1 ∗
+<br/><b>Colorado State University, Fort Collins, CO 80523, USA</b></td><td>('2640182', 'Jen-Mei Chang', 'jen-mei chang')</td><td></td></tr><tr><td>324f39fb5673ec2296d90142cf9a909e595d82cf</td><td>Hindawi Publishing Corporation
+<br/>Mathematical Problems in Engineering
+<br/>Volume 2011, Article ID 864540, 15 pages
+<br/>doi:10.1155/2011/864540
+<br/>Research Article
+<br/>Relationship Matrix Nonnegative
+<br/>Decomposition for Clustering
+<br/>Faculty of Science and State Key Laboratory for Manufacturing Systems Engineering, Xi’an Jiaotong
+<br/><b>University, Xi an Shaanxi Province, Xi an 710049, China</b><br/>Received 18 January 2011; Revised 28 February 2011; Accepted 9 March 2011
+<br/>Copyright q 2011 J.-Y. Pan and J.-S. Zhang. This is an open access article distributed under
+<br/>the Creative Commons Attribution License, which permits unrestricted use, distribution, and
+<br/>reproduction in any medium, provided the original work is properly cited.
+<br/>Nonnegative matrix factorization (cid:2)NMF(cid:3) is a popular tool for analyzing the latent structure of non-
+<br/>negative data. For a positive pairwise similarity matrix, symmetric NMF (cid:2)SNMF(cid:3) and weighted
+<br/>NMF (cid:2)WNMF(cid:3) can be used to cluster the data. However, both of them are not very efficient
+<br/>for the ill-structured pairwise similarity matrix. In this paper, a novel model, called relationship
+<br/>matrix nonnegative decomposition (cid:2)RMND(cid:3), is proposed to discover the latent clustering structure
+<br/>from the pairwise similarity matrix. The RMND model is derived from the nonlinear NMF
+<br/>algorithm. RMND decomposes a pairwise similarity matrix into a product of three low rank
+<br/>nonnegative matrices. The pairwise similarity matrix is represented as a transformation of a
+<br/>positive semidefinite matrix which pops out the latent clustering structure. We develop a learning
+<br/>procedure based on multiplicative update rules and steepest descent method to calculate the
+<br/>nonnegative solution of RMND. Experimental results in four different databases show that the
+<br/>proposed RMND approach achieves higher clustering accuracy.
+<br/>1. Introduction
+<br/>Nonnegative matrix factorization (cid:2)NMF(cid:3) (cid:6)1(cid:7) has been introduced as an effective technique for
+<br/>analyzing the latent structure of nonnegative data such as images and documents. A variety
+<br/>of real-world applications of NMF has been found in many areas such as machine learning,
+<br/>signal processing (cid:6)2–4(cid:7), data clustering (cid:6)5, 6(cid:7), and computer vision (cid:6)7(cid:7).
+<br/>Most applications focus on the clustering aspect of NMF (cid:6)8, 9(cid:7). Each sample can be
+<br/>represented as a linear combination of clustering centroids. Recently, a theoretic analysis
+<br/>has shown the equivalence between NMF and K-means/spectral clustering (cid:6)10(cid:7). Symmetric
+<br/>NMF (cid:2)SNMF(cid:3) (cid:6)10(cid:7) is an extension of NMF. It aims at learning clustering structure from
+<br/>the kernel matrix or pairwise similarity matrix which is positive semidefinite. When the simi-
+<br/>larity matrix is not positive semidefinite, SNMF is not able to capture the clustering structure
+</td><td>('9416881', 'Ji-Yuan Pan', 'ji-yuan pan')<br/>('2265568', 'Jiang-She Zhang', 'jiang-she zhang')<br/>('14464924', 'Angelo Luongo', 'angelo luongo')</td><td>Correspondence should be addressed to Ji-Yuan Pan, panjiyuan@gmail.com
+</td></tr><tr><td>321bd4d5d80abb1bae675a48583f872af3919172</td><td>Wang et al. EURASIP Journal on Image and Video Processing (2016) 2016:44
+<br/>DOI 10.1186/s13640-016-0152-3
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R EV I E W
+<br/>Entropy-weighted feature-fusion method
+<br/>for head-pose estimation
+<br/>Open Access
+</td><td>('40579241', 'Kang Liu', 'kang liu')<br/>('2076553', 'Xu Qian', 'xu qian')</td><td></td></tr><tr><td>3240c9359061edf7a06bfeb7cc20c103a65904c2</td><td>PPR-FCN: Weakly Supervised Visual Relation Detection via Parallel Pairwise
+<br/>R-FCN
+<br/><b>Columbia University, National University of Singapore</b></td><td>('5462268', 'Hanwang Zhang', 'hanwang zhang')<br/>('26538630', 'Zawlin Kyaw', 'zawlin kyaw')<br/>('46380822', 'Jinyang Yu', 'jinyang yu')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{hanwangzhang, kzl.zawlin, yjy941124}@gmail.com; shih.fu.chang@columbia.edu
+</td></tr><tr><td>32b8c9fd4e3f44c371960eb0074b42515f318ee7</td><td></td><td></td><td></td></tr><tr><td>32575ffa69d85bbc6aef5b21d73e809b37bf376d</td><td>-)5741/ *1-641+ 5)2- 37)16; 1 6-45 . *1-641+ 1.4)61
+<br/>7ELAHIEJO B JJ=M=
+<br/>)*564)+6
+<br/>IKHA L=HE=JEI E >EAJHE? I=FA GK=EJO 9A >ACE MEJD
+<br/>IKHAAJI 9A JDA IDM JD=J JDA >EAJHE? EBH=JE BH
+<br/>JA EI JDA A= D(p(cid:107)q) BH = FAHII E JDA FFK=JE 1
+<br/>BH I= ALAI B >KH MEJD = =IOFJJE? >AD=LEH =J =HCAH
+<br/>>KH
+<br/> 164,7+61
+<br/>*EAJHE? I=FA GK=EJO EI = A=IKHA B JDA KIABKAII B =
+<br/>GK=EJO
+<br/>F=FAH MA FHFIA = AM =FFH=?D J A=IKHA JDEI GK=JEJO
+<br/>JDA EJKEJELA >IAHL=JE JD=J = DECD GK=EJO >EAJHE? E=CA
+<br/>>EAJHE? EBH=JE
+<br/>EIIKAI E >EAJHE? JA?DCO .H AN=FA A B JDA IJ
+<br/>? >EAJHE? GKAIJEI EI JD=J B KEGKAAII AC J MD=J
+<br/>ANJAJ =HA CAHFHEJI KEGKA .H JDA FEJ B LEAM B
+<br/>=>A EBH=JE EI =L=E=>A BH = CELA JA?DCO IK?D
+<br/>  $  "
+<br/>1 JDEI F=FAH MA A=>H=JA = =FFH=?D J
+<br/>BMI
+<br/>AJI
+<br/> >ABHA = >EAJHE? A=IKHAAJ t0 =J MDE?D JEA MA O
+<br/>M = FAHI p EI F=HJ B = FFK=JE q MDE?D =O >A JDA
+</td><td></td><td>4E?D=H@ ;K=H= =@ )@O )@AH
+<br/>5?D B 1BH=JE 6A?DCO =@ -CEAAHEC
+<br/>J=HE +==@=
+<br/>6DEI F=FAH @ALAFI = AM =FFH=?D J K@AHIJ=@ =@ A=
+<br/>JDA EJKEJE JD=J @ACH=@=JEI J = >EAJHE? I=FA ME HA
+<br/>@K?A JDA =KJ B E@AJE=>A EBH=JE =L=E=>A 1 H
+<br/>@AH J A=IKHA JDA =KJ B E@AJE=>A EBH=JE MA
+<br/>@AA >EAJHE? EBH=JE =I JDA @A?HA=IA E K?AHJ=EJO
+<br/>=>KJ JDA E@AJEJO B = FAHI @KA J = IAJ B >EAJHE? A=
+<br/>= FAHI =O >A ?=?K=JA@ >O JDA HA=JELA AJHFO D(p(cid:107)q)
+<br/>>AJMAA JDA FFK=JE BA=JKHA @EIJHE>KJE q =@ JDA FAHII
+<br/>BA=JKHA @EIJHE>KJE p 6DA >EAJHE? EBH=JE BH = IOI
+<br/>H@AH J FH=?JE?=O A=IKHA D(p(cid:107)q) MEJD EEJA@ @=J= I=
+<br/>FAI MA EJH@K?A = =CHEJD MDE?D HACK=HEAI = /=KIIE=
+<br/>@A B JDA BA=JKHA ?L=HE=?AI ) AN=FA B JDEI AJD@
+<br/>EI IDM BH 2+) .EIDAH EA=H @EI?HEE=J ., =@ 1+)
+<br/>>=IA@ B=?A HA?CEJE MEJD >EAJHE? EBH=JE ?=?K=JA@
+<br/>J >A 45.0 >EJI 2+) 37.0 >EJI ., 39.0 >EJI 1+) =@
+<br/>55.6 >EJI BKIE B 2+) =@ ., BA=JKHAI *=IA@  JDEI
+<br/>@AEJE B >EAJHE? EBH=JE MA IEK=JA @ACH=@=JEI
+<br/>B >EAJHE? E=CAI =@ ?=?K=JA JDA HAIKJEC @A?HA=IA E
+<br/>>EAJHE? EBH=JE 4AIKJI IDM = GK=IEEA=H @A?HA=IA
+<br/>>EAJHE? E=CA ' A HA?AJ @ALAFAJ EI JDA IECEB
+<br/>E?=J ALA B EJAHAIJ E IJ=@=H@I BH A=IKHAAJ B >E
+<br/>AJHE? GK=EJO .H AN=FA 15 D=I HA?AJO AIJ=>EIDA@ =
+<br/>>EAJHE? I=FA GK=EJO @H=BJ IJ=@=H@ ' )??H@EC J '
+<br/>>EAJHE? I=FA GK=EJO =O >A ?IE@AHA@ BH JDA FEJ B
+<br/>LEAM B ?D=H=?JAH EDAHAJ BA=JKHAI @AEJO =??KH=?O B BA=
+<br/>JKHAI H KJEEJO FHA@E?JA@ >EAJHE?I FAHBH=?A ) CA
+<br/>AH= ?IAIKI D=I @ALAFA@ JD=J JDA IJ EFHJ=J A=IKHA
+<br/>B = GK=EJO AJHE? EI EJI KJEEJO ` E=CAI AL=K=JA@ =I DECDAH
+<br/>GK=EJO KIJ >A JDIA JD=J HAIKJ E >AJJAH E@AJE?=JE B E
+<br/>@ELE@K=I =I A=IKHA@ >O = E?HA=IA@ IAF=H=JE B CAKEA
+<br/>=@ EFIJH =J?D I?HA @EIJHE>KJEI 6DA =JKHA B >E
+<br/>AJHE? I=FA @AEJO D=I IAA EJJA ELAIJEC=JE =JDKCD
+<br/>BH IFA?E? >EAJHE? @=EJEAI =CHEJDI J A=IKHA >E
+<br/>AJHE? GK=EJO D=LA >AA FHFIA@ .H AN=FA JDA .13
+<br/>=CHEJD   EI = ME@AO KIA@ A=IKHA BH CAHFHEJ E=CA
+<br/>A ?KHHAJ @EB?KJO EI JD=J JDAHA EI  ?IAIKI =I J MD=J
+<br/>= A=IKHA B >EAJHE? I=FA @AEJO IDK@ CELA 1 JDEI
+<br/>>=IA@  = EBH=JE JDAHAJE? BH=AMH 9A >ACE MEJD
+<br/>EI HA KIABK J E@AJEBO JDA E@ELE@K= JD= = M GK=EJO
+<br/>E=CA 6DEI IKCCAIJI JD=J JDA GK=JEJO B E@AJE=>A EBH
+<br/>=JE @A?HA=IAI MEJD = HA@K?JE E GK=EJO /ELA = M=O J
+<br/>A=IKHA JDA @A?HA=IA E EBH=JE ?=KIA@ >O = CELA E
+<br/>=CA @ACH=@=JE A ?= A=IKHA JDA =II?E=JA@ @A?HA=IA E
+<br/>A=IKHEC >EAJHE? EBH=JE ?JAJ EI HA=JA@ J =O
+<br/>E@AJE=>EEJO A =O >A EJAHAIJA@ E DM K?D E@AJE
+<br/>=I LE@A IKHLAE=?A 1 JDA ?JANJ B >EAJHE? BKIE 
+<br/>A MK@ EA J >A =>A J GK=JEBO JDA >EAJHE? EBH=
+<br/>JE E A=?D IOIJA E@ELE@K=O =@ JDA FJAJE= C=E BH
+<br/>BKIEC JDA IOIJAI )@@EJE=O IK?D = A=IKHA EI HAAL=J
+<br/>J >EAJHE? ?HOFJIOIJAI =@ FHEL=?O A=IKHAI 5ALAH=
+<br/>=KJDHI D=LA FHAIAJA@ =FFH=?DAI HAAL=J J JDEI GKAIJE
+<br/>=@@HAII JDEI GKAIJE >=IA@  @AEJEI BH EBH=JE
+<br/>JDAHO   9A @AA JDA JAH ]>EAJHE? EBH=JE^ =I
+<br/>>EAJHE? EBH=JE *1 JDA @A?HA=IA E K?AHJ=EJO =>KJ
+<br/>JDA E@AJEJO B = FAHI @KA J = IAJ B >EAJHE? A=IKHA
+<br/>1 H@AH J EJAHFHAJ JDEI @AEJE MA HABAH J JM EIJ=JI
+<br/>MDA F=AJ =@  =BJAH HA?AELEC = IAJ B A=IKHAAJI
+<br/>t1 MA D=LA HA EBH=JE =@ AII K?AHJ=EJO =>KJ JDA
+<br/>FAHII E@AJEJO
+<br/>*=IA@  JDAIA A=IKHAI MA JDA @AA JDA EBH=JE II
+<br/>@KA J = @ACH=@=JE E E=CA GK=EJO =I JDA HA=JELA ?D=CA
+</td></tr><tr><td>32ecbbd76fdce249f9109594eee2d52a1cafdfc7</td><td>Object Specific Deep Learning Feature and Its Application to Face Detection
+<br/><b>University of Nottingham, Ningbo, China</b><br/><b>University of Nottingham, Ningbo, China</b><br/><b>Shenzhen University, Shenzhen, China</b><br/><b>University of Nottingham, Ningbo, China</b></td><td>('3468964', 'Xianxu Hou', 'xianxu hou')<br/>('39508183', 'Ke Sun', 'ke sun')<br/>('1687690', 'LinLin Shen', 'linlin shen')<br/>('1698461', 'Guoping Qiu', 'guoping qiu')</td><td>xianxu.hou@nottingham.edu.cn
+<br/>ke.sun@nottingham.edu.cn
+<br/>llshen@szu.edu.cn
+<br/>guoping.qiu@nottingham.edu.cn
+</td></tr><tr><td>32c20afb5c91ed7cdbafb76408c3a62b38dd9160</td><td>Viewing Real-World Faces in 3D
+<br/><b>The Open University of Israel, Israel</b></td><td>('1756099', 'Tal Hassner', 'tal hassner')</td><td>hassner@openu.ac.il
+</td></tr><tr><td>32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b</td><td>Lighting Aware Preprocessing for Face
+<br/>Recognition across Varying Illumination
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</b><br/><b>Institute of Digital Media, Peking University, Beijing 100871, China</b></td><td>('34393045', 'Hu Han', 'hu han')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td>{hhan,sgshan,lyqing,xlchen,wgao}@jdl.ac.cn
+</td></tr><tr><td>329394480fc5e9e96de4250cc1a2b060c3677c94</td><td>Improved Dense Trajectory with Cross Streams
+<br/>Graduate School of
+<br/>Information
+<br/>Science and Technology
+<br/><b>University of Tokyo</b><br/>tokyo.ac.jp
+<br/>Graduate School of
+<br/>Information
+<br/>Science and Technology
+<br/><b>University of Tokyo</b><br/>tokyo.ac.jp
+<br/>Graduate School of
+<br/>Information
+<br/>Science and Technology
+<br/><b>University of Tokyo</b><br/>tokyo.ac.jp
+</td><td>('8197937', 'Katsunori Ohnishi', 'katsunori ohnishi')<br/>('2859204', 'Masatoshi Hidaka', 'masatoshi hidaka')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td>ohnishi@mi.t.u-
+<br/>hidaka@mi.t.u-
+<br/>harada@mi.t.u-
+</td></tr><tr><td>32728e1eb1da13686b69cc0bd7cce55a5c963cdd</td><td>Automatic Facial Emotion Recognition Method Based on Eye
+<br/>Region Changes
+<br/><b>Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran</b><br/><b>Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran</b><br/><b>Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran</b><br/>Received: 19/Apr/2015 Revised: 19/Mar/2016 Accepted: 19/Apr/2016
+</td><td>('35191740', 'Nasrollah Moghadam Charkari', 'nasrollah moghadam charkari')<br/>('2239524', 'Muharram Mansoorizadeh', 'muharram mansoorizadeh')</td><td>m.navran@modares.ac.ir
+<br/>charkari@modares.ac.ir
+<br/>mansoorm@basu.ac.ir
+</td></tr><tr><td>32c9ebd2685f522821eddfc19c7c91fd6b3caf22</td><td>Finding Correspondence from Multiple Images
+<br/>via Sparse and Low-Rank Decomposition
+<br/><b>School of Computer Engineering, Nanyang Technological University, Singapore</b><br/>2 Advanced Digital Sciences Center, Singapore
+</td><td>('1920683', 'Zinan Zeng', 'zinan zeng')<br/>('1926757', 'Tsung-Han Chan', 'tsung-han chan')<br/>('2370507', 'Kui Jia', 'kui jia')<br/>('1714390', 'Dong Xu', 'dong xu')</td><td>{znzeng,dongxu}@ntu.edu.sg, {Th.chan,Chris.jia}@adsc.com.sg
+</td></tr><tr><td>3270b2672077cc345f188500902eaf7809799466</td><td>Multibiometric Systems: Fusion Strategies and
+<br/>Template Security
+<br/>By
+<br/>A Dissertation
+<br/>Submitted to
+<br/><b>Michigan State University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Doctor of Philosophy
+<br/>Department of Computer Science and Engineering
+<br/>2008
+</td><td>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')</td><td></td></tr><tr><td>321c8ba38db118d8b02c0ba209be709e6792a2c7</td><td>Learn to Combine Multiple Hypotheses for Accurate Face Alignment
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,zlei,dyi,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>324b9369a1457213ec7a5a12fe77c0ee9aef1ad4</td><td>Dynamic Facial Analysis: From Bayesian Filtering to Recurrent Neural Network
+<br/>NVIDIA
+</td><td>('2931118', 'Jinwei Gu', 'jinwei gu')</td><td>{jinweig,xiaodongy,shalinig,jkautz}@nvidia.com
+</td></tr><tr><td>329d58e8fb30f1bf09acb2f556c9c2f3e768b15c</td><td>Leveraging Intra and Inter-Dataset Variations for
+<br/>Robust Face Alignment
+<br/>Department of Computer Science and Technology
+<br/><b>Tsinghua University</b><br/>Department of Information Engineering
+<br/><b>The Chinese University of Hong Kong</b></td><td>('38766009', 'Wenyan Wu', 'wenyan wu')<br/>('1692609', 'Shuo Yang', 'shuo yang')</td><td>wwy15@mails.tsinghua.edu.cn
+<br/>ys014@ie.cuhk.edu.hk
+</td></tr><tr><td>32df63d395b5462a8a4a3c3574ae7916b0cd4d1d</td><td>978-1-4577-0539-7/11/$26.00 ©2011 IEEE
+<br/>1489
+<br/>ICASSP 2011
+</td><td></td><td></td></tr><tr><td>35308a3fd49d4f33bdbd35fefee39e39fe6b30b7</td><td></td><td>('1799216', 'Jeong-Jik Seo', 'jeong-jik seo')<br/>('1780155', 'Jisoo Son', 'jisoo son')<br/>('7627712', 'Wesley De Neve', 'wesley de neve')<br/>('1692847', 'Yong Man Ro', 'yong man ro')</td><td></td></tr><tr><td>353b6c1f431feac6edde12b2dde7e6e702455abd</td><td>Multi-scale Patch based Collaborative
+<br/>Representation for Face Recognition with
+<br/>Margin Distribution Optimization
+<br/><b>Biometric Research Center</b><br/><b>The Hong Kong Polytechnic University</b><br/><b>School of Computer Science and Technology, Tianjin University</b></td><td>('2873638', 'Pengfei Zhu', 'pengfei zhu')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('1688792', 'Qinghua Hu', 'qinghua hu')</td><td>{cspzhu,cslzhang}@comp.polyu.edu.hk
+</td></tr><tr><td>352d61eb66b053ae5689bd194840fd5d33f0e9c0</td><td>Analysis Dictionary Learning based
+<br/>Classification: Structure for Robustness
+</td><td>('49501811', 'Wen Tang', 'wen tang')<br/>('1733181', 'Ashkan Panahi', 'ashkan panahi')<br/>('1769928', 'Hamid Krim', 'hamid krim')<br/>('2622498', 'Liyi Dai', 'liyi dai')</td><td></td></tr><tr><td>350da18d8f7455b0e2920bc4ac228764f8fac292</td><td>From: AAAI Technical Report SS-03-08. Compilation copyright © 2003, AAAI (www.aaai.org). All rights reserved.
+<br/>Automatic Detecting Neutral Face for Face Authentication and
+<br/>Facial Expression Analysis
+<br/>Exploratory Computer Vision Group
+<br/><b>IBM Thomas J. Watson Research Center</b><br/>PO Box 704, Yorktown Heights, NY 10598
+</td><td>('40383812', 'Ying-li Tian', 'ying-li tian')<br/>('1773140', 'Ruud M. Bolle', 'ruud m. bolle')</td><td>{yltian, bolle}@us.ibm.com
+</td></tr><tr><td>3538d2b5f7ab393387ce138611ffa325b6400774</td><td>A DSP-BASED APPROACH FOR THE IMPLEMENTATION OF FACE RECOGNITION
+<br/>ALGORITHMS
+<br/>A. U. Batur
+<br/>B. E. Flinchbaugh
+<br/>M. H. Hayes IIl
+<br/>Center for Signal and Image Proc.
+<br/>Georgia Inst. Of Technology
+<br/>Atlanta, GA
+<br/>Imaging and Audio Lab.
+<br/>Texas Instruments
+<br/>Dallas, TX
+<br/>Center for Signal and Image Proc.
+<br/>Georgia Inst. Of Technology
+<br/>Atlanta, CA
+</td><td></td><td></td></tr><tr><td>3504907a2e3c81d78e9dfe71c93ac145b1318f9c</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Unconstrained Still/Video-Based Face Verification with Deep
+<br/>Convolutional Neural Networks
+<br/>Received: date / Accepted: date
+</td><td>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('2682056', 'Ching-Hui Chen', 'ching-hui chen')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')</td><td></td></tr><tr><td>35b1c1f2851e9ac4381ef41b4d980f398f1aad68</td><td>Geometry Guided Convolutional Neural Networks for
+<br/>Self-Supervised Video Representation Learning
+</td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('40206014', 'Boqing Gong', 'boqing gong')<br/>('2473509', 'Kun Liu', 'kun liu')<br/>('49466491', 'Hao Su', 'hao su')<br/>('1744254', 'Leonidas J. Guibas', 'leonidas j. guibas')</td><td></td></tr><tr><td>351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd</td><td>ActionSnapping: Motion-based Video
+<br/>Synchronization
+<br/>Disney Research
+</td><td>('2893744', 'Alexander Sorkine-Hornung', 'alexander sorkine-hornung')</td><td></td></tr><tr><td>35f03f5cbcc21a9c36c84e858eeb15c5d6722309</td><td>Placing Broadcast News Videos in their Social Media
+<br/>Context using Hashtags
+<br/><b>Columbia University</b></td><td>('2136860', 'Joseph G. Ellis', 'joseph g. ellis')<br/>('2602265', 'Svebor Karaman', 'svebor karaman')<br/>('1786871', 'Hongzhi Li', 'hongzhi li')<br/>('36009509', 'Hong Bin Shim', 'hong bin shim')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{jge2105, svebor.karaman, hongzhi.li, h.shim, sc250}@columbia.edu
+</td></tr><tr><td>35e4b6c20756cd6388a3c0012b58acee14ffa604</td><td>Gender Classification in Large Databases
+<br/>E. Ram´on-Balmaseda, J. Lorenzo-Navarro, and M. Castrill´on-Santana (cid:63)
+<br/>Universidad de Las Palmas de Gran Canaria
+<br/>SIANI
+<br/>Spain
+</td><td></td><td>enrique.de101@.alu.ulpgc.es{jlorenzo,mcastrillon}@siani.es
+</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology</b><br/>CAS, China
+</td><td>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')</td><td>xy012@ie.cuhk.edu.hk
+<br/>zk013@ie.cuhk.edu.hk
+<br/>dhlin@ie.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>35f921def890210dda4b72247849ad7ba7d35250</td><td>Exemplar-based Graph Matching
+<br/>for Robust Facial Landmark Localization
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>http://www.f-zhou.com
+<br/>Adobe Research
+<br/>San Jose, CA 95110
+</td><td>('1757386', 'Feng Zhou', 'feng zhou')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')</td><td>{jbrandt, zlin}@adobe.com
+</td></tr><tr><td>357963a46dfc150670061dbc23da6ba7d6da786e</td><td></td><td></td><td></td></tr><tr><td>35ec9b8811f2d755c7ad377bdc29741b55b09356</td><td>Efficient, Robust and Accurate Fitting of a 3D Morphable Model
+<br/><b>University of Basel</b><br/>Bernoullistrasse 16, CH - 4056 Basel, Switzerland
+</td><td>('3293655', 'Sami Romdhani', 'sami romdhani')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td>fsami.romdhani, thomas.vetterg@unibas.ch
+</td></tr><tr><td>35f1bcff4552632419742bbb6e1927ef5e998eb4</td><td></td><td></td><td></td></tr><tr><td>35c973dba6e1225196566200cfafa150dd231fa8</td><td></td><td></td><td></td></tr><tr><td>35f084ddee49072fdb6e0e2e6344ce50c02457ef</td><td>A Bilinear Illumination Model
+<br/>for Robust Face Recognition
+<br/>The Harvard community has made this
+<br/>article openly available. Please share how
+<br/>this access benefits you. Your story matters
+<br/>Citation
+<br/>Machiraju. 2005. A bilinear illumination model for robust face
+<br/>recognition. Proceedings of the Tenth IEEE International Conference
+<br/>on Computer Vision: October 17-21, 2005, Beijing, China. 1177-1184.
+<br/>Los Almamitos, C.A.: IEEE Computer Society.
+<br/>Published Version
+<br/>doi:10.1109/ICCV.2005.5
+<br/>Citable link
+<br/>http://nrs.harvard.edu/urn-3:HUL.InstRepos:4238979
+<br/>Terms of Use
+<br/><b></b><br/>repository, and is made available under the terms and conditions
+<br/>applicable to Other Posted Material, as set forth at http://
+<br/>nrs.harvard.edu/urn-3:HUL.InstRepos:dash.current.terms-of-
+<br/>use#LAA
+</td><td>('1780935', 'Baback Moghaddam', 'baback moghaddam')<br/>('1701371', 'Hanspeter Pfister', 'hanspeter pfister')</td><td></td></tr><tr><td>3505c9b0a9631539e34663310aefe9b05ac02727</td><td>A Joint Discriminative Generative Model for Deformable Model
+<br/>Construction and Classification
+<br/><b>Imperial College London, UK</b><br/><b>Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The</b><br/>Netherlands
+</td><td>('2000297', 'Ioannis Marras', 'ioannis marras')<br/>('1793625', 'Symeon Nikitidis', 'symeon nikitidis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>2 Yoti Ltd, London, UK, e-mail: symeon.nikitidis@yoti.com
+</td></tr><tr><td>3506518d616343d3083f4fe257a5ee36b376b9e1</td><td>Unsupervised Domain Adaptation for
+<br/>Personalized Facial Emotion Recognition
+<br/><b>University of Trento</b><br/>Trento, Italy
+<br/>FBK
+<br/><b>University of Perugia</b><br/>Trento, Italy
+<br/>Perugia, Italy
+<br/><b>University of Trento</b><br/>Trento, Italy
+</td><td>('2933565', 'Gloria Zen', 'gloria zen')<br/>('1716310', 'Enver Sangineto', 'enver sangineto')<br/>('40811261', 'Elisa Ricci', 'elisa ricci')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td></td></tr><tr><td>353a89c277cca3e3e4e8c6a199ae3442cdad59b5</td><td></td><td></td><td></td></tr><tr><td>35e0256b33212ddad2db548484c595334f15b4da</td><td>Attentive Fashion Grammar Network for
+<br/>Fashion Landmark Detection and Clothing Category Classification
+<br/><b>Beijing Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, China</b><br/><b>University of California, Los Angeles, USA</b></td><td>('2693875', 'Wenguan Wang', 'wenguan wang')<br/>('2762640', 'Yuanlu Xu', 'yuanlu xu')<br/>('34926055', 'Jianbing Shen', 'jianbing shen')<br/>('3133970', 'Song-Chun Zhu', 'song-chun zhu')</td><td></td></tr><tr><td>35e6f6e5f4f780508e5f58e87f9efe2b07d8a864</td><td>This paper is a preprint (IEEE accepted status). IEEE copyright notice. 2018 IEEE.
+<br/>Personal use of this material is permitted. Permission from IEEE must be obtained for all
+<br/><b>other uses, in any current or future media, including reprinting/republishing this material for</b><br/>advertising or promotional purposes, creating new collective works, for resale or redistribu-
+<br/>tion to servers or lists, or reuse of any copyrighted.
+<br/>A. Tejero-de-Pablos, Y. Nakashima, T. Sato, N. Yokoya, M. Linna and E. Rahtu, ”Sum-
+<br/>marization of User-Generated Sports Video by Using Deep Action Recognition Features,” in
+<br/>doi: 10.1109/TMM.2018.2794265
+<br/>keywords: Cameras; Feature extraction; Games; Hidden Markov models; Semantics;
+<br/>Three-dimensional displays; 3D convolutional neural networks; Sports video summarization;
+<br/>action recognition; deep learning; long short-term memory; user-generated video,
+<br/>URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8259321&isnumber=4456689
+</td><td></td><td></td></tr><tr><td>35e87e06cf19908855a16ede8c79a0d3d7687b5c</td><td>Strategies for Multi-View Face Recognition for
+<br/>Identification of Human Faces: A Review
+<br/>Department of Computer Science
+<br/>Mahatma Gandhi Shikshan Mandal’s,
+<br/><b>Arts, Science and Commerce College, Chopda</b><br/>Dist: Jalgaon (M.S)
+<br/>Dr. R.R.Manza
+<br/>Department of Computer Science and IT
+<br/><b>Dr. Babasaheb Ambedkar Marathwada University</b><br/>Aurangabad.
+</td><td>('21182750', 'Pritesh G. Shah', 'pritesh g. shah')</td><td>pritshah143@gmail.com
+<br/>manzaramesh@gmail.com
+</td></tr><tr><td>352110778d2cc2e7110f0bf773398812fd905eb1</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, JUNE 2014
+<br/>Matrix Completion for Weakly-supervised
+<br/>Multi-label Image Classification
+</td><td>('31671904', 'Ricardo Cabral', 'ricardo cabral')<br/>('1683568', 'Fernando De la Torre', 'fernando de la torre')<br/>('2884203', 'Alexandre Bernardino', 'alexandre bernardino')</td><td></td></tr><tr><td>6964af90cf8ac336a2a55800d9c510eccc7ba8e1</td><td>Temporal Relational Reasoning in Videos
+<br/>MIT CSAIL
+</td><td>('1804424', 'Bolei Zhou', 'bolei zhou')<br/>('50112310', 'Alex Andonian', 'alex andonian')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')</td><td>{bzhou,aandonia,oliva,torralba}@csail.mit.edu
+</td></tr><tr><td>697b0b9630213ca08a1ae1d459fabc13325bdcbb</td><td></td><td></td><td></td></tr><tr><td>69ff40fd5ce7c3e6db95a2b63d763edd8db3a102</td><td>HUMAN AGE ESTIMATION VIA GEOMETRIC AND TEXTURAL
+<br/>FEATURES
+<br/>Merve KILINC1 and Yusuf Sinan AKGUL2
+<br/>1TUBITAK BILGEM UEKAE, Anibal Street, 41470, Gebze, Kocaeli, Turkey
+<br/><b>GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology</b><br/>Kocaeli, Turkey
+<br/>Keywords:
+<br/>Age estimation:age classification:geometric features:LBP:Gabor:LGBP:cross ratio:FGNET:MORPH
+</td><td></td><td>mkilinc@uekae.tubitak.gov.tr1, mkilinc@gyte.edu.tr2, akgul@bilmuh.gyte.edu.tr2
+</td></tr><tr><td>69adbfa7b0b886caac15ebe53b89adce390598a3</td><td>Face hallucination using cascaded
+<br/>super-resolution and identity priors
+<br/><b>University of Ljubljana, Faculty of Electrical Engineering</b><br/><b>University of Notre Dame</b><br/>Fig. 1. Sample face hallucination results generated with the proposed method.
+</td><td>('3387470', 'Klemen Grm', 'klemen grm')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')</td><td></td></tr><tr><td>69d29012d17cdf0a2e59546ccbbe46fa49afcd68</td><td>Subspace clustering of dimensionality-reduced data
+<br/>ETH Zurich, Switzerland
+</td><td>('1730683', 'Reinhard Heckel', 'reinhard heckel')<br/>('2208878', 'Michael Tschannen', 'michael tschannen')</td><td>Email: {heckel,boelcskei}@nari.ee.ethz.ch, michaelt@student.ethz.ch
+</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>Learning Deep Representation for Imbalanced Classification
+<br/><b>The Chinese University of Hong Kong</b><br/>2SenseTime Group Limited
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('2000034', 'Chen Huang', 'chen huang')<br/>('9263285', 'Yining Li', 'yining li')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{chuang,ly015,ccloy,xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>69de532d93ad8099f4d4902c4cad28db958adfea</td><td></td><td></td><td></td></tr><tr><td>69a55c30c085ad1b72dd2789b3f699b2f4d3169f</td><td>International Journal of Computer Trends and Technology (IJCTT) – Volume 34 Number 3 - April 2016
+<br/>Automatic Happiness Strength Analysis of a
+<br/>Group of People using Facial Expressions
+<br/>Sagiri Prasanthi#1, Maddali M.V.M. Kumar*2,
+<br/>#1PG Student, #2Assistant Professor
+<br/><b>St. Ann s College of Engineering and Technology, Andhra Pradesh, India</b><br/>is a collective concern
+</td><td></td><td></td></tr><tr><td>69b18d62330711bfd7f01a45f97aaec71e9ea6a5</td><td>RESEARCH ARTICLE
+<br/>M-Track: A New Software for Automated
+<br/>Detection of Grooming Trajectories in Mice
+<br/><b>State University of New York Polytechnic Institute, Utica, New York</b><br/><b>United States of America, State University of New York Albany, Albany, New York</b><br/><b>United States of America, State University of New York Albany, Albany</b><br/>New York, United States of America
+<br/>☯ These authors contributed equally to this work.
+<br/>a11111
+</td><td>('35820210', 'Sheldon L. Reeves', 'sheldon l. reeves')<br/>('8626210', 'Kelsey E. Fleming', 'kelsey e. fleming')<br/>('1708615', 'Lin Zhang', 'lin zhang')<br/>('3976998', 'Annalisa Scimemi', 'annalisa scimemi')</td><td>* scimemia@gmail.com, ascimemi@albany.edu
+</td></tr><tr><td>69526cdf6abbfc4bcd39616acde544568326d856</td><td>636
+<br/>[17] B. Moghaddam, T. Jebara, and A. Pentland, “Bayesian face recogni-
+<br/>tion,” Pattern Recognit., vol. 33, no. 11, pp. 1771–1782, Nov. 2000.
+<br/>[18] A. Nefian, “A hidden Markov model-based approach for face detection
+<br/>and recognition,” Ph.D. dissertation, Dept. Elect. Comput. Eng. Elect.
+<br/>Eng., Georgia Inst. Technol., Atlanta, 1999.
+<br/>[19] P. J. Phillips et al., “Overview of the face recognition grand challenge,”
+<br/>presented at the IEEE CVPR, San Diego, CA, Jun. 2005.
+<br/>[20] H. T. Tanaka, M. Ikeda, and H. Chiaki, “Curvature-based face surface
+<br/>recognition using spherical correlation-principal direction for curved
+<br/>object recognition,” in Proc. Int. Conf. Automatic Face and Gesture
+<br/>Recognition, 1998, pp. 372–377.
+<br/>[21] M. Turk and A. Pentland, “Eigenfaces for recognition,” J. Cognit. Sci.,
+<br/>pp. 71–86, 1991.
+<br/>[22] V. N. Vapnik, Statistical Learning Theory. New York: Wiley, 1998.
+<br/>[23] W. Zhao, R. Chellappa, A. Rosenfeld, and P. Phillips, “Face recogni-
+<br/>tion: A literature survey,” ACM Comput. Surveys, vol. 35, no. 44, pp.
+<br/>399–458, 2003.
+<br/>[24] W. Zhao, R. Chellappa, and P. J. Phillips, “Subspace linear discrimi-
+<br/>nant analysis for face recognition,” UMD TR4009, 1999.
+<br/>Face Verification Using Template Matching
+</td><td>('2627097', 'Anil Kumar Sao', 'anil kumar sao')</td><td></td></tr><tr><td>690d669115ad6fabd53e0562de95e35f1078dfbb</td><td>Progressive versus Random Projections for Compressive Capture of Images,
+<br/>Lightfields and Higher Dimensional Visual Signals
+<br/>MIT Media Lab
+<br/>75 Amherst St, Cambridge, MA
+<br/>MERL
+<br/>201 Broadway, Cambridge MA
+<br/>MIT Media Lab
+<br/>75 Amherst St, Cambridge, MA
+</td><td>('1912905', 'Rohit Pandharkar', 'rohit pandharkar')<br/>('1785066', 'Ashok Veeraraghavan', 'ashok veeraraghavan')<br/>('1717566', 'Ramesh Raskar', 'ramesh raskar')</td><td></td></tr><tr><td>6993bca2b3471f26f2c8a47adfe444bfc7852484</td><td>The Do’s and Don’ts for CNN-based Face Verification
+<br/>Carlos Castillo
+<br/><b>University of Maryland, College Park</b><br/>UMIACS
+</td><td>('2068427', 'Ankan Bansal', 'ankan bansal')<br/>('48467498', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{ankan,carlos,rranjan1,rama}@umiacs.umd.edu
+</td></tr><tr><td>69eb6c91788e7c359ddd3500d01fb73433ce2e65</td><td>CAMGRAPH: Distributed Graph Processing for
+<br/>Camera Networks
+<br/><b>College of Computing</b><br/><b>Georgia Institute of Technology</b><br/>Atlanta, GA, USA
+</td><td>('3427189', 'Steffen Maass', 'steffen maass')<br/>('5540701', 'Kirak Hong', 'kirak hong')<br/>('1751741', 'Umakishore Ramachandran', 'umakishore ramachandran')</td><td>steffen.maass@gatech.edu,khong9@cc.gatech.edu,rama@cc.gatech.edu
+</td></tr><tr><td>691964c43bfd282f6f4d00b8b0310c554b613e3b</td><td>Temporal Hallucinating for Action Recognition with Few Still Images
+<br/>2†
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China</b><br/><b>The Chinese University of Hong Kong 3 SenseTime Group Limited</b></td><td>('46696518', 'Lei Zhou', 'lei zhou')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>69063f7e0a60ad6ce16a877bc8f11b59e5f7348e</td><td>Class-Specific Image Deblurring
+<br/>2, Fatih Porikli1
+<br/><b>The Australian National University Canberra ACT 2601, Australia</b><br/>2NICTA, Locked Bag 8001, Canberra ACT 2601, Australia
+</td><td>('33672969', 'Saeed Anwar', 'saeed anwar')<br/>('1774721', 'Cong Phuoc Huynh', 'cong phuoc huynh')</td><td></td></tr><tr><td>69a9da55bd20ce4b83e1680fbc6be2c976067631</td><td></td><td></td><td></td></tr><tr><td>69c2ac04693d53251500557316c854a625af84ee</td><td>JID: PATREC
+<br/>ARTICLE IN PRESS
+<br/>Contents lists available at ScienceDirect
+<br/>Pattern Recognition Letters
+<br/>journal homepage: www.elsevier.com/locate/patrec
+<br/>[m5G; April 22, 2016;10:30 ]
+<br/>50 years of biometric research: Accomplishments, challenges,
+<br/>and opportunities
+<br/>a , 1 ,
+<br/>a
+<br/><b>Michigan State University, East Lansing, MI 48824, USA</b><br/>b IBM Research Singapore, 9 Changi Business Park Central 1, 486048 Singapore
+<br/>a r t i c l e
+<br/>i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 4 February 2015
+<br/>Available online xxx
+<br/>Keywords:
+<br/>Biometrics
+<br/>Fingerprints
+<br/>Face
+<br/>Iris
+<br/>Security
+<br/>Privacy
+<br/>Forensics
+<br/>Biometric recognition refers to the automated recognition of individuals based on their biological and
+<br/>behavioral characteristics such as fingerprint, face, iris, and voice. The first scientific paper on automated
+<br/>fingerprint matching was published by Mitchell Trauring in the journal Nature in 1963. The first objec-
+<br/>tive of this paper is to document the significant progress that has been achieved in the field of biometric
+<br/>recognition in the past 50 years since Trauring’s landmark paper. This progress has enabled current state-
+<br/>of-the-art biometric systems to accurately recognize individuals based on biometric trait(s) acquired un-
+<br/>der controlled environmental conditions from cooperative users. Despite this progress, a number of chal-
+<br/>lenging issues continue to inhibit the full potential of biometrics to automatically recognize humans. The
+<br/>second objective of this paper is to enlist such challenges, analyze the solutions proposed to overcome
+<br/>them, and highlight the research opportunities in this field. One of the foremost challenges is the de-
+<br/>sign of robust algorithms for representing and matching biometric samples obtained from uncooperative
+<br/>subjects under unconstrained environmental conditions (e.g., recognizing faces in a crowd). In addition,
+<br/>fundamental questions such as the distinctiveness and persistence of biometric traits need greater atten-
+<br/>tion. Problems related to the security of biometric data and robustness of the biometric system against
+<br/>spoofing and obfuscation attacks, also remain unsolved. Finally, larger system-level issues like usability,
+<br/>user privacy concerns, integration with the end application, and return on investment have not been ad-
+<br/>equately addressed. Unlocking the full potential of biometrics through inter-disciplinary research in the
+<br/>above areas will not only lead to widespread adoption of this promising technology, but will also result
+<br/>in wider user acceptance and societal impact.
+<br/>© 2016 Published by Elsevier B.V.
+<br/>1. Introduction
+<br/>“It is the purpose of this article to present, together with some evi-
+<br/>dence of its feasibility, a method by which decentralized automatic
+<br/>identity verification, such as might be desired for credit, banking
+<br/>or security purposes, can be accomplished through automatic com-
+<br/>parison of the minutiae in finger-ridge patterns.”
+<br/>– Mitchell Trauring, Nature, March 1963
+<br/>In modern society, the ability to reliably identify individu-
+<br/>als in real-time is a fundamental requirement in many applica-
+<br/>tions including forensics, international border crossing, financial
+<br/>transactions, and computer security. Traditionally, an exclusive pos-
+<br/> This paper has been recommended for acceptance by S. Sarkar.
+<br/>Corresponding author. Tel.: +1 517 355 9282; fax: +1 517 432 1061.
+<br/>1 IAPR Fellow.
+<br/>http://dx.doi.org/10.1016/j.patrec.2015.12.013
+<br/>0167-8655/© 2016 Published by Elsevier B.V.
+<br/>session of a token, such as a passport or an ID card, has been ex-
+<br/>tensively used for identifying individuals. In the context of com-
+<br/>puter systems and applications, knowledge-based schemes based
+<br/>on passwords and PINs are commonly used for person authentica-
+<br/>2 Since both token-based and knowledge-based mechanisms
+<br/>tion.
+<br/>have their own strengths and limitations, the use of two-factor
+<br/>authentication schemes that combine both these authentication
+<br/>mechanisms are also popular.
+<br/>Biometric recognition, or simply biometrics, refers to the auto-
+<br/>mated recognition of individuals based on their biological and be-
+<br/>havioral characteristics [39] . Examples of biometric traits that have
+<br/>been successfully used in practical applications include face, fin-
+<br/>gerprint, palmprint, iris, palm/finger vein, and voice. The use of
+<br/>DNA, in the context of biometrics (as opposed to just forensics), is
+<br/>also beginning to gain traction. Since biometric traits are generally
+<br/>inherent to an individual, there is a strong and reasonably
+<br/>2 Authentication involves verifying the claimed identity of a person.
+<br/>Please cite this article as: A.K. Jain et al., 50 years of biometric research: Accomplishments, challenges, and opportunities, Pattern Recog-
+<br/>nition Letters (2016), http://dx.doi.org/10.1016/j.patrec.2015.12.013
+</td><td>('6680444', 'Anil K. Jain', 'anil k. jain')<br/>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>E-mail addresses: jain@cse.msu.edu (A.K. Jain), nkarthik@sg.ibm.com
+<br/>(K. Nandakumar), rossarun@cse.msu.edu (A. Ross).
+</td></tr><tr><td>6974449ce544dc208b8cc88b606b03d95c8fd368</td><td></td><td></td><td></td></tr><tr><td>69fb98e11df56b5d7ec7d45442af274889e4be52</td><td>Harnessing the Deep Net Object Models for
+<br/>enhancing Human Action Recognition
+<br/>O.V. Ramana Murthy1 and Roland Goecke1,2
+<br/><b>Vision and Sensing, HCC Lab, ESTeM, University of Canberra</b><br/><b>IHCC, RSCS, CECS, Australian National University</b></td><td></td><td>Email: O.V.RamanaMurthy@ieee.org, roland.goecke@ieee.org
+</td></tr><tr><td>3cb2841302af1fb9656f144abc79d4f3d0b27380</td><td>See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/319928941
+<br/>When 3D-Aided 2D Face Recognition Meets Deep
+<br/>Learning: An extended UR2D for Pose-Invariant
+<br/>Face Recognition
+<br/>Article · September 2017
+<br/>CITATIONS
+<br/>4 authors:
+<br/>READS
+<br/>33
+<br/>Xiang Xu
+<br/><b>University of Houston</b><br/>Pengfei Dou
+<br/><b>University of Houston</b><br/>8 PUBLICATIONS 10 CITATIONS
+<br/>9 PUBLICATIONS 29 CITATIONS
+<br/>SEE PROFILE
+<br/>SEE PROFILE
+<br/>Ha Le
+<br/><b>University of Houston</b><br/>7 PUBLICATIONS 2 CITATIONS
+<br/>Ioannis A Kakadiaris
+<br/><b>University of Houston</b><br/>468 PUBLICATIONS 5,233 CITATIONS
+<br/>SEE PROFILE
+<br/>SEE PROFILE
+<br/>Some of the authors of this publication are also working on these related projects:
+<br/>3D-Aided 2D Face Recognition View project
+<br/>iRay: mobile medical AR View project
+<br/>All content following this page was uploaded by Xiang Xu on 27 September 2017.
+<br/>The user has requested enhancement of the downloaded file.
+</td><td></td><td></td></tr><tr><td>3c78b642289d6a15b0fb8a7010a1fb829beceee2</td><td>Analysis of Facial Dynamics
+<br/>Using a Tensor Framework
+<br/><b>University of Bristol</b><br/>Department of Computer Science
+<br/>Bristol, United Kingdom
+<br/><b>University of Bristol</b><br/>Department of Experimental Psychology
+<br/>Bristol, United Kingdom
+</td><td>('2903159', 'Lisa Gralewski', 'lisa gralewski')<br/>('23725787', 'Edward Morrison', 'edward morrison')<br/>('2022210', 'Ian Penton-Voak', 'ian penton-voak')</td><td>gralewsk@cs.bris.ac.uk
+</td></tr><tr><td>3cc3cf57326eceb5f20a02aefae17108e8c8ab57</td><td>BENCHMARK FOR EVALUATING BIOLOGICAL IMAGE ANALYSIS TOOLS
+<br/>Center for Bio-Image Informatics, Electrical and Computer Engineering Department,
+<br/><b>University of California, Santa Barbara</b><br/>http://www.bioimage.ucsb.edu
+<br/>Biological images are critical components for a detailed understanding of the structure and functioning of cells and proteins.
+<br/>Image processing and analysis tools increasingly play a significant role in better harvesting this vast amount of data, most of
+<br/>which is currently analyzed manually and qualitatively. A number of image analysis tools have been proposed to automatically
+<br/>extract the image information. As the studies relying on image analysis tools have become widespread, the validation of
+<br/>these methods, in particular, segmentation methods, has become more critical. There have been very few efforts at creating
+<br/>benchmark datasets in the context of cell and tissue imaging, while, there have been successful benchmarks in other fields, such
+<br/>as the Berkeley segmentation dataset [1], the handwritten digit recognition dataset MNIST [2] and face recognition dataset [3, 4].
+<br/>In the field of biomedical image processing, most of standardized benchmark data sets concentrates on macrobiological images
+<br/>such as mammograms and magnet resonance imaging (MRI) images [5], however, there is still a lack of a standardized dataset
+<br/>for microbiological structures (e.g. cells and tissues) and it is well known in biomedical imaging [5].
+<br/>We propose a benchmark for biological images to: 1) provide image collections with well defined ground truth; 2) provide
+<br/>image analysis tools and evaluation methods to compare and validate analysis tools. We include a representative dataset of
+<br/>microbiological structures whose scales range from a subcellular level (nm) to a tissue level (µm), inheriting intrinsic challenges
+<br/>in the domain of biomedical image analysis (Fig. 1). The dataset is acquired through two of the main microscopic imaging
+<br/>techniques: transmitted light microscopy and confocal laser scanning microscopy. The analysis tools1in the benchmark are
+<br/>designed to obtain different quantitative measures from the dataset including microtubule tracing, cell segmentation, and retinal
+<br/>layer segmentation.
+<br/>Fig. 1. Example dataset provided in the benchmark.
+<br/>This research is supported by NSF ITR-0331697.
+<br/>1All analysis tools mentioned in this work can be found at http://www.bioimage.ucsb.edu/publications/.
+<br/>ScaleConfocal microscopyLight microscopymicrotubulehorizontal cellSubcellular(< 1 µm)photoreceptorsbreast cancer cellsCOS1 cellsCellularTissue(< 10 µm)(< 30 µm)(< 350 µm)(≈10-50 µm in width)retinal layers </td><td>('8451780', 'Elisa Drelie Gelasca', 'elisa drelie gelasca')<br/>('3045933', 'Jiyun Byun', 'jiyun byun')<br/>('3064236', 'Boguslaw Obara', 'boguslaw obara')</td><td></td></tr><tr><td>3cb488a3b71f221a8616716a1fc2b951dd0de549</td><td>Facial Age Estimation by
+<br/>Adaptive Label Distribution Learning
+<br/>School of Computer Science and Engineering
+<br/>Key Lab of Computer Network and Information Integration, Ministry of Education
+<br/><b>Southeast University, Nanjing 211189, China</b></td><td>('1735299', 'Xin Geng', 'xin geng')<br/>('1794816', 'Qin Wang', 'qin wang')<br/>('40228279', 'Yu Xia', 'yu xia')</td><td>Email: {xgeng, qinwang, xiayu}@seu.edu.cn
+</td></tr><tr><td>3cfbe1f100619a932ba7e2f068cd4c41505c9f58</td><td>A Realistic Simulation Tool for Testing Face Recognition
+<br/>Systems under Real-World Conditions∗
+<br/>M. Correa, J. Ruiz-del-Solar, S. Parra-Tsunekawa, R. Verschae
+<br/>Department of Electrical Engineering, Universidad de Chile
+<br/>Advanced Mining Technology Center, Universidad de Chile
+</td><td></td><td></td></tr><tr><td>3c563542db664321aa77a9567c1601f425500f94</td><td>TV-GAN: Generative Adversarial Network Based Thermal to Visible Face
+<br/>Recognition
+<br/><b>The University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('50615828', 'Teng Zhang', 'teng zhang')<br/>('2331880', 'Arnold Wiliem', 'arnold wiliem')<br/>('1973322', 'Siqi Yang', 'siqi yang')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td>[patrick.zhang, a.williem, siqi.yang]@uq.edu.au, lovell@itee.uq.edu.au
+</td></tr><tr><td>3c03d95084ccbe7bf44b6d54151625c68f6e74d0</td><td></td><td></td><td></td></tr><tr><td>3cd7b15f5647e650db66fbe2ce1852e00c05b2e4</td><td></td><td></td><td></td></tr><tr><td>3c6cac7ecf546556d7c6050f7b693a99cc8a57b3</td><td>Robust Facial Landmark Detection in the Wild
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Centre for Vision, Speech and Signal Processing
+<br/>Faculty of Engineering and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>January 2016
+</td><td>('37705062', 'Zhenhua Feng', 'zhenhua feng')<br/>('37705062', 'Zhenhua Feng', 'zhenhua feng')</td><td></td></tr><tr><td>3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0</td><td>Defeating Image Obfuscation with Deep Learning
+<br/><b>The University of Texas at</b><br/>Austin
+<br/>Cornell Tech
+<br/>Cornell Tech
+</td><td>('34861228', 'Richard McPherson', 'richard mcpherson')<br/>('2520493', 'Reza Shokri', 'reza shokri')<br/>('1723945', 'Vitaly Shmatikov', 'vitaly shmatikov')</td><td>richard@cs.utexas.edu
+<br/>shokri@cornell.edu
+<br/>shmat@cs.cornell.edu
+</td></tr><tr><td>3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3</td><td>Submitted 2/11; Revised 10/11; Published ??/11
+<br/>Distance Metric Learning with Eigenvalue Optimization
+<br/><b>College of Engineering, Mathematics and Physical Sciences</b><br/><b>University of Exeter</b><br/>Harrison Building, North Park Road
+<br/>Exeter, EX4 4QF, UK
+<br/>Department of Engineering Mathematics
+<br/><b>University of Bristol</b><br/>Merchant Venturers Building, Woodland Road
+<br/>Bristol, BS8 1UB, UK
+<br/>Editor:
+</td><td>('38954213', 'Yiming Ying', 'yiming ying')<br/>('1695363', 'Peng Li', 'peng li')</td><td>y.ying@exeter.ac.uk
+<br/>lipeng@ieee.org
+</td></tr><tr><td>3c97c32ff575989ef2869f86d89c63005fc11ba9</td><td>Face Detection with the Faster R-CNN
+<br/>Erik Learned-Miller
+<br/><b>University of Massachusetts Amherst</b><br/><b>University of Massachusetts Amherst</b><br/>Amherst MA 01003
+<br/>Amherst MA 01003
+</td><td>('40175280', 'Huaizu Jiang', 'huaizu jiang')</td><td>hzjiang@cs.umass.edu
+<br/>elm@cs.umass.edu
+</td></tr><tr><td>3ce2ecf3d6ace8d80303daf67345be6ec33b3a93</td><td></td><td></td><td></td></tr><tr><td>3c1aef7c2d32a219bdbc89a44d158bc2695e360a</td><td>Adversarial Attack Type I: Generating False Positives
+<br/><b>Shanghai Jiao Tong University</b><br/>Shanghai, P.R. China 200240
+<br/><b>Shanghai Jiao Tong University</b><br/>Shanghai, P.R. China 200240
+<br/><b>Shanghai Jiao Tong University</b><br/>Shanghai, P.R. China 200240
+<br/><b>Shanghai Jiao Tong University</b><br/>Shanghai, P.R. China 200240
+</td><td>('51428687', 'Sanli Tang', 'sanli tang')<br/>('13858459', 'Mingjian Chen', 'mingjian chen')<br/>('2182657', 'Xiaolin Huang', 'xiaolin huang')<br/>('1688428', 'Jie Yang', 'jie yang')</td><td>tangsanli@sjtu.edu.cn
+<br/>w179261466@sjtu.edu.cn
+<br/>xiaolinhuang@sjtu.edu.cn
+<br/>jieyang@sjtu.edu.cn
+</td></tr><tr><td>3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8</td><td>Measuring Gaze Orientation for Human-Robot
+<br/>Interaction
+<br/>∗ CNRS; LAAS; 7 avenue du Colonel Roche, 31077 Toulouse Cedex, France
+<br/>† Universit´e de Toulouse; UPS; LAAS-CNRS : F-31077 Toulouse, France
+<br/>Introduction
+<br/>In the context of Human-Robot interaction estimating gaze orientation brings
+<br/>useful information about human focus of attention. This is a contextual infor-
+<br/>mation : when you point something you usually look at it. Estimating gaze
+<br/>orientation requires head pose estimation. There are several techniques to esti-
+<br/>mate head pose from images, they are mainly based on training [3, 4] or on local
+<br/>face features tracking [6]. The approach described here is based on local face
+<br/>features tracking in image space using online learning, it is a mixed approach
+<br/>since we track face features using some learning at feature level. It uses SURF
+<br/>features [2] to guide detection and tracking. Such key features can be matched
+<br/>between images, used for object detection or object tracking [10]. Several ap-
+<br/>proaches work on fixed size images like training techniques which mainly work
+<br/>on low resolution images because of computation costs whereas approaches based
+<br/>on local features tracking work on high resolution images. Tracking face features
+<br/>such as eyes, nose and mouth is a common problem in many applications such as
+<br/>detection of facial expression or video conferencing [8] but most of those appli-
+<br/>cations focus on front face images [9]. We developed an algorithm based on face
+<br/>features tracking using a parametric model. First we need face detection, then
+<br/>we detect face features in following order: eyes, mouth, nose. In order to achieve
+<br/>full profile detection we use sets of SURF to learn what eyes, mouth and nose
+<br/>look like once tracking is initialized. Once those sets of SURF are known they
+<br/>are used to detect and track face features. SURF have a descriptor which is often
+<br/>used to identify a key point and here we add some global geometry information
+<br/>by using the relative position between key points. Then we use a particle filter to
+<br/>track face features using those SURF based detectors, we compute the head pose
+<br/>angles from features position and pass the results through a median filter. This
+<br/>paper is organized as follows. Section 2 describes our modeling of visual features,
+<br/>section 3 presents our tracking implementation. Section 4 presents results we get
+<br/>with our implementation and future works in section 5.
+<br/>2 Visual features
+<br/>We use some basic properties of facial features to initialize our algorithm : eyes
+<br/>are dark and circular, mouth is an horizontal dark line with a specific color,...
+</td><td>('5253126', 'R. Brochard', 'r. brochard')<br/>('2667229', 'B. Burger', 'b. burger')<br/>('2325221', 'A. Herbulot', 'a. herbulot')<br/>('1797260', 'F. Lerasle', 'f. lerasle')</td><td></td></tr><tr><td>3c0bbfe664fb083644301c67c04a7f1331d9515f</td><td>The Role of Color and Contrast in Facial Age Estimation
+<br/>Paper ID: 7
+<br/><b>No Institute Given</b></td><td></td><td></td></tr><tr><td>3c4f6d24b55b1fd3c5b85c70308d544faef3f69a</td><td>A Hybrid Deep Learning Architecture for
+<br/>Privacy-Preserving Mobile Analytics
+<br/><b>cid:63)Sharif University of Technology, University College London, Queen Mary University of London</b></td><td>('8201306', 'Seyed Ali Ossia', 'seyed ali ossia')<br/>('9920557', 'Ali Shahin Shamsabadi', 'ali shahin shamsabadi')<br/>('2251846', 'Ali Taheri', 'ali taheri')<br/>('1688652', 'Hamid R. Rabiee', 'hamid r. rabiee')<br/>('1763096', 'Hamed Haddadi', 'hamed haddadi')</td><td></td></tr><tr><td>3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f</td><td>Absolute Head Pose Estimation From Overhead Wide-Angle Cameras
+<br/><b>IBM T.J. Watson Research Center</b><br/>19 Skyline Drive, Hawthorne, NY 10532 USA
+</td><td>('40383812', 'Ying-li Tian', 'ying-li tian')<br/>('1690709', 'Arun Hampapur', 'arun hampapur')</td><td>{ yltian,lisabr,jconnell,sharat,arunh,aws,bolle }@us.ibm.com
+</td></tr><tr><td>3cb64217ca2127445270000141cfa2959c84d9e7</td><td></td><td></td><td></td></tr><tr><td>3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd</td><td>International Journal of Computer Information Systems and Industrial Management Applications (IJCISIM)
+<br/>ISSN: 2150-7988 Vol.2 (2010), pp.262-278
+<br/>http://www.mirlabs.org/ijcisim
+<br/>Simulating Pareidolia of Faces for Architectural Image Analysis
+<br/>Newcastle Robotics Laboratory
+<br/>School of Electrical Engineering and Computer Science
+<br/><b>The University of Newcastle, Callaghan 2308, Australia</b><br/>School of Architecture and Built Environment
+<br/><b>The University of Newcastle</b><br/>Callaghan 2308, Australia
+</td><td>('1716539', 'Stephan K. Chalup', 'stephan k. chalup')<br/>('40211094', 'Michael J. Ostwald', 'michael j. ostwald')</td><td>Stephan.Chalup@newcastle.edu.au, Kenny.Hong@uon.edu.au
+<br/>Michael.Ostwald@newcastle.edu.au
+</td></tr><tr><td>3cd8ab6bb4b038454861a36d5396f4787a21cc68</td><td> Video‐Based Facial Expression Recognition Using Hough Forest
+<br/><b>National Tsing Hua University, Hsin-Chu, Taiwan</b><br/><b>Asian University, Taichung, Taiwan</b></td><td>('2790846', 'Shih-Chung Hsu', 'shih-chung hsu')<br/>('1793389', 'Chung-Lin Huang', 'chung-lin huang')</td><td>E-mail: d9761817@oz.nthu.edu.tw, clhuang@asia.edu.tw
+</td></tr><tr><td>3cd5da596060819e2b156e8b3a28331ef633036b</td><td></td><td></td><td></td></tr><tr><td>3ca5d3b8f5f071148cb50f22955fd8c1c1992719</td><td>EVALUATING RACE AND SEX DIVERSITY IN THE WORLD’S LARGEST
+<br/>COMPANIES USING DEEP NEURAL NETWORKS
+<br/>1 ​Youth Laboratories, Ltd, Diversity AI Group, Skolkovo Innovation Center, Nobel Street 5,
+<br/>143026, Moscow, Russia
+<br/>2 ​Insilico Medicine, Emerging Technology Centers, JHU, 1101 33rd Street, Baltimore, MD,
+<br/>21218, USA
+<br/><b>University of Oxford, Oxford, United Kingdom</b><br/><b>Computer Engineering and Computer Science, Duthie Center for Engineering, University of</b><br/>Louisville, Louisville, KY 40292, USA
+<br/>5 ​Computer Vision Lab, Department of Information Technology and Electrical Engineering, ETH
+<br/>Zürich, Switzerland
+<br/><b>Center for Healthy Aging, University of</b><br/>Copenhagen, Denmark
+<br/>7 ​The Biogerontology Research Foundation, 2354 Chynoweth House, Trevissome Park, Truro,
+<br/>TR4 8UN, UK.
+<br/><b>Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia</b></td><td>('3888942', 'Konstantin Chekanov', 'konstantin chekanov')<br/>('4017984', 'Polina Mamoshina', 'polina mamoshina')<br/>('1976753', 'Roman V. Yampolskiy', 'roman v. yampolskiy')<br/>('1732855', 'Radu Timofte', 'radu timofte')<br/>('40336662', 'Alex Zhavoronkov', 'alex zhavoronkov')</td><td>Morten Scheibye-Knudsen: ​mscheibye@sund.ku.dk
+<br/>Alex Zhavoronkov: ​alex@biogerontology.org
+</td></tr><tr><td>3c56acaa819f4e2263638b67cea1ec37a226691d</td><td>Body Joint guided 3D Deep Convolutional
+<br/>Descriptors for Action Recognition
+</td><td>('3201156', 'Congqi Cao', 'congqi cao')<br/>('46867228', 'Yifan Zhang', 'yifan zhang')<br/>('1713887', 'Chunjie Zhang', 'chunjie zhang')<br/>('1694235', 'Hanqing Lu', 'hanqing lu')</td><td></td></tr><tr><td>3cc46bf79fb9225cf308815c7d41c8dd5625cc29</td><td>AGE INTERVAL AND GENDER PREDICTION USING PARAFAC2 APPLIED TO SPEECH
+<br/>UTTERANCES
+<br/><b>Aristotle University of Thessaloniki</b><br/>Thessaloniki 54124, GREECE
+<br/><b>Cyprus University of Technology</b><br/>3040 Limassol, Cyprus
+</td><td>('3352401', 'Evangelia Pantraki', 'evangelia pantraki')<br/>('1736143', 'Constantine Kotropoulos', 'constantine kotropoulos')<br/>('1830709', 'Andreas Lanitis', 'andreas lanitis')</td><td>{pantraki@|costas@aiia}.csd.auth.gr
+<br/>andreas.lanitis@cut.ac.cy
+</td></tr><tr><td>3c8da376576938160cbed956ece838682fa50e9f</td><td>Chapter 4
+<br/>Aiding Face Recognition with
+<br/>Social Context Association Rule
+<br/>based Re-Ranking
+<br/>Humans are very efficient at recognizing familiar face images even in challenging condi-
+<br/>tions. One reason for such capabilities is the ability to understand social context between
+<br/>individuals. Sometimes the identity of the person in a photo can be inferred based on the
+<br/>identity of other persons in the same photo, when some social context between them is
+<br/>known. This chapter presents an algorithm to utilize the co-occurrence of individuals as
+<br/>the social context to improve face recognition. Association rule mining is utilized to infer
+<br/>multi-level social context among subjects from a large repository of social transactions.
+<br/>The results are demonstrated on the G-album and on the SN-collection pertaining to 4675
+<br/>identities prepared by the authors from a social networking website. The results show that
+<br/>association rules extracted from social context can be used to augment face recognition and
+<br/>improve the identification performance.
+<br/>4.1
+<br/>Introduction
+<br/>Face recognition capabilities of humans have inspired several researchers to understand
+<br/>the science behind it and use it in developing automated algorithms. Recently, it is also
+<br/>argued that encoding social context among individuals can be leveraged for improved
+<br/>automatic face recognition [175]. As shown in Figure 4.1, often times a person’s identity
+<br/>can be inferred based on the identity of other persons in the same photo, when some social
+<br/>context between them is known. A subject’s face in consumer photos generally co-occur
+<br/>along with their socially relevant people. With the advent of social networking services,
+<br/>the social context between individuals is readily available. Face recognition performance
+<br/>105
+</td><td></td><td></td></tr><tr><td>56e4dead93a63490e6c8402a3c7adc493c230da5</td><td>World Journal of Computer Application and Technology 1(2): 41-50, 2013
+<br/>DOI: 10.13189/wjcat.2013.010204
+<br/> http://www.hrpub.org
+<br/>Face Recognition Techniques: A Survey
+<br/>V.Vijayakumari
+<br/><b>Sri krishna College of Technology, Coimbatore, India</b><br/>Copyright © 2013 Horizon Research Publishing All rights reserved.
+</td><td></td><td>*Corresponding Author: ebinviji@rediffmail.com
+</td></tr><tr><td>56e885b9094391f7d55023a71a09822b38b26447</td><td>FREQUENCY DECODED LOCAL BINARY PATTERN
+<br/>Face Retrieval using Frequency Decoded Local
+<br/>Descriptor
+</td><td>('34992579', 'Shiv Ram Dubey', 'shiv ram dubey')</td><td></td></tr><tr><td>56c700693b63e3da3b985777da6d9256e2e0dc21</td><td>Global Refinement of Random Forest
+<br/><b>University of Science and Technology of China</b><br/>Microsoft Research
+</td><td>('3080683', 'Shaoqing Ren', 'shaoqing ren')<br/>('2032273', 'Xudong Cao', 'xudong cao')<br/>('1732264', 'Yichen Wei', 'yichen wei')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>sqren@mail.ustc.edu.cn
+<br/>{xudongca,yichenw,jiansun}@microsoft.com
+</td></tr><tr><td>56359d2b4508cc267d185c1d6d310a1c4c2cc8c2</td><td>Shape Driven Kernel Adaptation in
+<br/>Convolutional Neural Network for Robust Facial Trait Recognition
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China</b><br/><b>National University of Singapore, Singapore</b></td><td>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('1757173', 'Junliang Xing', 'junliang xing')<br/>('1773437', 'Zhiheng Niu', 'zhiheng niu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>56e079f4eb40744728fd1d7665938b06426338e5</td><td>Bayesian Approaches to Distribution Regression
+<br/><b>University of Oxford</b><br/><b>University College London</b><br/><b>University of Oxford</b><br/><b>Imperial College London</b></td><td>('35142231', 'Ho Chung Leon Law', 'ho chung leon law')<br/>('36326783', 'Dougal J. Sutherland', 'dougal j. sutherland')<br/>('1698032', 'Dino Sejdinovic', 'dino sejdinovic')<br/>('2127497', 'Seth Flaxman', 'seth flaxman')</td><td>ho.law@spc.ox.ac.uk
+<br/>dougal@gmail.com
+<br/>dino.sejdinovic@stats.ox.ac.uk
+<br/>s.flaxman@imperial.ac.uk
+</td></tr><tr><td>56e6f472090030a6f172a3e2f46ef9daf6cad757</td><td>Asian Face Image Database PF01
+<br/>Intelligent Multimedia Lab.
+<br/>†Department of Computer Science and Engineering
+<br/><b>Pohang University of Science and Technology</b><br/>San 31, Hyoja-Dong, Nam-Gu, Pohang, 790-784, Korea
+</td><td></td><td></td></tr><tr><td>56a653fea5c2a7e45246613049fb16b1d204fc96</td><td>3287
+<br/>Quaternion Collaborative and Sparse Representation
+<br/>With Application to Color Face Recognition
+<br/>representation-based
+</td><td>('2888882', 'Cuiming Zou', 'cuiming zou')<br/>('3369665', 'Kit Ian Kou', 'kit ian kou')<br/>('3154834', 'Yulong Wang', 'yulong wang')</td><td></td></tr><tr><td>56f86bef26209c85f2ef66ec23b6803d12ca6cd6</td><td>Pyramidal RoR for Image Classification
+<br/><b>North China Electric Power University, Baoding, China</b></td><td>('32164792', 'Ke Zhang', 'ke zhang')<br/>('3451321', 'Liru Guo', 'liru guo')<br/>('35038034', 'Ce Gao', 'ce gao')<br/>('2626320', 'Zhenbing Zhao', 'zhenbing zhao')</td><td>Eail:zhangke41616@126.com
+</td></tr><tr><td>5666ed763698295e41564efda627767ee55cc943</td><td>Manuscript
+<br/>Click here to download Manuscript: template.tex
+<br/>Click here to view linked References
+<br/>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Relatively-Paired Space Analysis: Learning a Latent Common
+<br/>Space from Relatively-Paired Observations
+<br/>Received: date / Accepted: date
+</td><td>('1874900', 'Zhanghui Kuang', 'zhanghui kuang')</td><td></td></tr><tr><td>566a39d753c494f57b4464d6bde61bf3593f7ceb</td><td>A Critical Review of Action Recognition Benchmarks
+<br/><b>The Open University of Israel</b></td><td>('1756099', 'Tal Hassner', 'tal hassner')</td><td>hassner@openu.ac.il
+</td></tr><tr><td>56c2fb2438f32529aec604e6fc3b06a595ddbfcc</td><td>MAICS 2016
+<br/>pp. 97–102
+<br/>Comparison of Recent Machine Learning Techniques for Gender Recognition
+<br/>from Facial Images
+<br/>Computer Science Department
+<br/><b>Central Washington University</b><br/>Ellensburg, WA, USA
+<br/>Computer Science Department
+<br/><b>Central Washington University</b><br/>Ellensburg, WA, USA
+<br/>R˘azvan Andonie
+<br/>Computer Science Department
+<br/><b>Central Washington University</b><br/>Computer Science Department
+<br/><b>Central Washington University</b><br/>Ellensburg, WA, USA
+<br/>Ellensburg, WA, USA
+<br/>and
+<br/>Electronics and Computers Department
+<br/><b>Transilvania University</b><br/>Bras¸ov, Romania
+</td><td>('9770023', 'Joseph Lemley', 'joseph lemley')<br/>('9770023', 'Joseph Lemley', 'joseph lemley')<br/>('40470929', 'Sami Abdul-Wahid', 'sami abdul-wahid')<br/>('35877118', 'Dipayan Banik', 'dipayan banik')</td><td></td></tr><tr><td>56f231fc40424ed9a7c93cbc9f5a99d022e1d242</td><td>Age Estimation Based on A Single Network with
+<br/>Soft Softmax of Aging Modeling
+<br/>1Center for Biometrics and Security Research & National Laboratory of Pattern
+<br/><b>Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences</b><br/>3Faculty of Information Technology,
+<br/><b>Macau University of Science and Technology, Macau</b></td><td>('9645431', 'Zichang Tan', 'zichang tan')<br/>('2950852', 'Shuai Zhou', 'shuai zhou')<br/>('1756538', 'Jun Wan', 'jun wan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>5615d6045301ecbc5be35e46cab711f676aadf3a</td><td>Discriminatively Learned Hierarchical Rank Pooling Networks
+<br/>Received: date / Accepted: date
+</td><td>('1688071', 'Basura Fernando', 'basura fernando')</td><td></td></tr><tr><td>561ae67de137e75e9642ab3512d3749b34484310</td><td>December 2017
+<br/>DeepGestalt - Identifying Rare Genetic Syndromes
+<br/>Using Deep Learning
+<br/>1FDNA Inc., Boston, Massachusetts, USA
+<br/><b>Sackler Faculty of Medicine, Tel Aviv University, Tel Aviv, Israel</b><br/><b>Recanati Genetic Institute, Rabin Medical Center and Schneider Children s Medical Center, Petah Tikva, Israel</b><br/><b>Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn</b><br/><b>Rheinische-Friedrich-Wilhelms University, Bonn, Germany</b><br/><b>Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany</b><br/><b>University of California, San Diego, California, USA</b><br/>7Division of Genetics/Dysmorphology, Rady Children’s Hospital San Diego, San Diego, California, USA
+<br/>8Division of Medical Genetics, A. I. du Pont Hospital for Children/Nemours, Wilmington, Delaware,USA
+<br/>Boston 186 South St. 5th Floor, Boston, MA 02111 U.S.A., Tel: +1 (617) 412-7000
+<br/>Conflict of interest: YG, YH, OB, NF, DG are employees of FDNA; LBS is an advisor of FDNA;
+<br/>LBS, PK, LMB, KWG are members of the scientific advisory board of FDNA
+</td><td>('2916582', 'Yaron Gurovich', 'yaron gurovich')<br/>('1917486', 'Yair Hanani', 'yair hanani')<br/>('40142952', 'Omri Bar', 'omri bar')<br/>('40443403', 'Nicole Fleischer', 'nicole fleischer')<br/>('35487552', 'Dekel Gelbman', 'dekel gelbman')<br/>('20717247', 'Lina Basel-Salmon', 'lina basel-salmon')<br/>('4346029', 'Martin Zenker', 'martin zenker')<br/>('6335877', 'Lynne M. Bird', 'lynne m. bird')<br/>('5404116', 'Karen W. Gripp', 'karen w. gripp')</td><td></td></tr><tr><td>568cff415e7e1bebd4769c4a628b90db293c1717</td><td>Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI-16)
+<br/>Concepts Not Alone: Exploring Pairwise Relationships
+<br/>for Zero-Shot Video Activity Recognition
+<br/><b>IIIS, Tsinghua University, Beijing, China</b><br/><b>QCIS, University of Technology Sydney, Sydney, Australia</b><br/><b>DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA</b></td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('2735055', 'Ming Lin', 'ming lin')<br/>('39033919', 'Yi Yang', 'yi yang')<br/>('1732213', 'Gerard de Melo', 'gerard de melo')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td></td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity
+<br/><b>The Blavatnik School of Computer Science, Tel-Aviv University, Israel</b><br/><b>Computer Science Division, The Open University of Israel</b></td><td>('1776343', 'Lior Wolf', 'lior wolf')<br/>('3352629', 'Itay Maoz', 'itay maoz')</td><td></td></tr><tr><td>56a677c889e0e2c9f68ab8ca42a7e63acf986229</td><td>Mining Spatial and Spatio-Temporal ROIs for Action Recognition
+<br/>Jiang Wang2 Alan Yuille1,3
+<br/><b>University of California, Los Angeles</b><br/><b>Baidu Research, USA 3John Hopkins University</b></td><td>('5964529', 'Xiaochen Lian', 'xiaochen lian')</td><td>{lianxiaochen@,yuille@stat.}ucla.edu
+<br/>{chenzhuoyuan,yangyi05,wangjiang03}@baidu.com
+</td></tr><tr><td>566038a3c2867894a08125efe41ef0a40824a090</td><td>978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+<br/>1945
+<br/>ICASSP 2009
+</td><td></td><td></td></tr><tr><td>56dca23481de9119aa21f9044efd7db09f618704</td><td>Riemannian Dictionary Learning and Sparse
+<br/>Coding for Positive Definite Matrices
+</td><td>('2691929', 'Anoop Cherian', 'anoop cherian')<br/>('3072326', 'Suvrit Sra', 'suvrit sra')</td><td></td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>Distance Estimation of an Unknown Person
+<br/>from a Portrait
+<br/>1 Technicolor - Cesson S´evign´e, France
+<br/><b>California Institute of Technology, Pasadena, CA, USA</b></td><td>('2232848', 'Xavier P. Burgos-Artizzu', 'xavier p. burgos-artizzu')<br/>('3339867', 'Matteo Ruggero Ronchi', 'matteo ruggero ronchi')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>xavier.burgos@technicolor.com, {mronchi,perona}@caltech.edu
+</td></tr><tr><td>56f812661c3248ed28859d3b2b39e033b04ae6ae</td><td>Multiple Feature Fusion by Subspace Learning
+<br/><b>Beckman Institute</b><br/><b>University of Illinois at</b><br/>Urbana-Champaign
+<br/>Urbana, IL 61801, USA
+<br/>Durham, NC 27707, USA
+<br/>Computer Science
+<br/>North Carolina Central
+<br/><b>University</b><br/><b>Beckman Institute</b><br/><b>University of Illinois at</b><br/>Urbana-Champaign
+<br/>Urbana, IL 61801, USA
+</td><td>('1708679', 'Yun Fu', 'yun fu')<br/>('37575012', 'Liangliang Cao', 'liangliang cao')<br/>('1822413', 'Guodong Guo', 'guodong guo')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{yunfu2,cao4}@uiuc.edu
+<br/>gdguo@nccu.edu
+<br/>huang@ifp.uiuc.edu
+</td></tr><tr><td>516a27d5dd06622f872f5ef334313350745eadc3</td><td>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>1
+<br/>Fine-Grained Facial Expression Analysis Us-
+<br/>ing Dimensional Emotion Model
+<br/>
+</td><td>('41179750', 'Feng Zhou', 'feng zhou')<br/>('34362536', 'Shu Kong', 'shu kong')<br/>('3157443', 'Charless C. Fowlkes', 'charless c. fowlkes')<br/>('29889388', 'Tao Chen', 'tao chen')<br/>('40216538', 'Baiying Lei', 'baiying lei')</td><td></td></tr><tr><td>512befa10b9b704c9368c2fbffe0dc3efb1ba1bf</td><td>Evidence and a Computational Explanation of Cultural Differences in
+<br/>Facial Expression Recognition
+<br/>Matthew N. Dailey
+<br/>Computer Science and Information Management
+<br/><b>Asian Institute of Technology, Pathumthani, Thailand</b><br/>Computer Science and Engineering
+<br/><b>University of California, San Diego, USA</b><br/>Michael J. Lyons
+<br/><b>College of Image Arts and Sciences</b><br/><b>Ritsumeikan University, Kyoto, Japan</b><br/>Faculty of Informatics
+<br/><b>Kogakuin University, Tokyo, Japan</b><br/>Department of Design and Computer Applications
+<br/><b>Sendai National College of Technology, Natori, Japan</b><br/>Department of Psychology
+<br/><b>Tohoku University, Sendai, Japan</b><br/>Garrison W. Cottrell
+<br/>Computer Science and Engineering
+<br/><b>University of California, San Diego, USA</b><br/>Facial expressions are crucial to human social communication, but the extent to which they are
+<br/>innate and universal versus learned and culture dependent is a subject of debate. Two studies
+<br/>explored the effect of culture and learning on facial expression understanding. In Experiment
+<br/>better than the other at classifying facial expressions posed by members of the same culture.
+<br/>In Experiment 2, this reciprocal in-group advantage was reproduced by a neurocomputational
+<br/>model trained in either a Japanese cultural context or an American cultural context. The model
+<br/>demonstrates how each of us, interacting with others in a particular cultural context, learns to
+<br/>recognize a culture-specific facial expression dialect.
+<br/>The scientific literature on innate versus culture-specific
+<br/>years ago, Darwin (1872/1998) argued for innate production
+<br/>of facial expressions based on cross-cultural comparisons.
+<br/>Landis (1924), however, found little agreement between par-
+<br/>ticipants. Woodworth (1938) and Schlosberg (1952) found
+<br/>structure in the disagreement in interpretation, proposing a
+<br/>low-dimensional similarity space characterizing affective fa-
+<br/>cial expressions.
+<br/>Starting in the 1960’s, researchers found more support for
+<br/>facial expressions as innate, universal indicators of particular
+<br/>sions (Tomkins, 1962–1963; Tomkins & McCarter, 1964).
+<br/>Ekman and colleagues found cross-cultural consistency in
+<br/>pressions in both literate and preliterate cultures (Ekman,
+<br/>1972; Ekman, Friesen, O’Sullivan, et al., 1987; Ekman,
+<br/>Sorensen, & Friesen, 1969).
+<br/>Today, researchers disagree on the precise degree to which
+<br/>sal versus culture-specific (Ekman, 1994, 1999b; Fridlund,
+<br/>1994; Izard, 1994; Russell, 1994, 1995), but there appears
+<br/>to be consensus that universal factors interact to some extent
+<br/>with culture-specific learning to produce differences between
+<br/>cultures. A number of modern theories (Ekman, 1999a; Rus-
+<br/>sell & Bullock, 1986; Scherer, 1992; Russell, 1994) attempt
+<br/>to account for these universals and culture-specific varia-
+<br/>tions.
+<br/>Cultural differences in facial expression interpre-
+<br/>tation
+<br/>The early cross-cultural studies on facial expression
+<br/>recognition focused mainly on the question of universality
+<br/>sought to analyze and interpret the cultural differences that
+<br/>came up in those studies. However, a steadily increasing
+<br/>number of studies have focused on the factors underlying
+<br/>cultural differences. These studies either compare the fa-
+<br/>cial expression judgments made by participants from differ-
+<br/>ent cultures or attempt to find the relevant dimensions of
+<br/>culture predicting observed cultural differences. Much of
+<br/>the research was framed by Ekman’s “neuro-cultural” theory
+<br/>elicitors, display rules, and/or consequences due to culture-
+<br/>specific learning.
+<br/>Ekman (1972) and Friesen (1972) proposed display rules
+</td><td>('33597747', 'Carrie Joyce', 'carrie joyce')<br/>('40533190', 'Miyuki Kamachi', 'miyuki kamachi')<br/>('12030857', 'Hanae Ishi', 'hanae ishi')<br/>('8365437', 'Jiro Gyoba', 'jiro gyoba')</td><td></td></tr><tr><td>51c3050fb509ca685de3d9ac2e965f0de1fb21cc</td><td>Fantope Regularization in Metric Learning
+<br/>Marc T. Law
+<br/>Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
+</td><td>('1728523', 'Nicolas Thome', 'nicolas thome')<br/>('1702233', 'Matthieu Cord', 'matthieu cord')</td><td></td></tr><tr><td>516d0d9eb08825809e4618ca73a0697137ebabd5</td><td>Regularizing Long Short Term
+<br/>Memory with 3D Human-Skeleton
+<br/>Sequences for Action Recognition
+<br/><b>Oregon State University</b><br/>CVPR 2016
+</td><td>('3112334', 'Behrooz Mahasseni', 'behrooz mahasseni')<br/>('34917793', 'Sinisa Todorovic', 'sinisa todorovic')</td><td></td></tr><tr><td>519a724426b5d9ad384d38aaf2a4632d3824f243</td><td>WANG et al.: LEARNING OBJECT RECOGNITION FROM DESCRIPTIONS
+<br/>Learning Models for Object Recognition
+<br/>from Natural Language Descriptions
+<br/>School of Computing
+<br/><b>University of Leeds</b><br/>Leeds, UK
+</td><td>('2635321', 'Josiah Wang', 'josiah wang')<br/>('1686341', 'Katja Markert', 'katja markert')<br/>('3056091', 'Mark Everingham', 'mark everingham')</td><td>scs6jwks@comp.leeds.ac.uk
+<br/>markert@comp.leeds.ac.uk
+<br/>me@comp.leeds.ac.uk
+</td></tr><tr><td>5180df9d5eb26283fb737f491623395304d57497</td><td>Scalable Angular Discriminative Deep Metric Learning
+<br/>for Face Recognition
+<br/><b>aCenter for Combinatorics, Nankai University, Tianjin 300071, China</b><br/><b>bCenter for Applied Mathematics, Tianjin University, Tianjin 300072, China</b></td><td>('2143751', 'Bowen Wu', 'bowen wu')</td><td></td></tr><tr><td>51c7c5dfda47647aef2797ac3103cf0e108fdfb4</td><td>CS 395T: Celebrity Look-Alikes ∗
+</td><td>('2362854', 'Adrian Quark', 'adrian quark')</td><td>quark@mail.utexas.edu
+</td></tr><tr><td>519f4eb5fe15a25a46f1a49e2632b12a3b18c94d</td><td>Non-Lambertian Reflectance Modeling and
+<br/>Shape Recovery of Faces using Tensor Splines
+</td><td>('9432255', 'Ritwik Kumar', 'ritwik kumar')<br/>('1765280', 'Angelos Barmpoutis', 'angelos barmpoutis')<br/>('3163927', 'Arunava Banerjee', 'arunava banerjee')<br/>('1733005', 'Baba C. Vemuri', 'baba c. vemuri')</td><td></td></tr><tr><td>518edcd112991a1717856841c1a03dd94a250090</td><td><b>Rice University</b><br/>Endogenous Sparse Recovery
+<br/>by
+<br/>A Thesis Submitted
+<br/>in Partial Fulfillment of the
+<br/>Requirements for the Degree
+<br/>Masters of Science
+<br/>Approved, Thesis Committee:
+<br/>Dr. Richard G. Baraniuk, Chair
+<br/>Victor E. Cameron Professor of Electrical
+<br/>and Computer Engineering
+<br/>Dr. Don H. Johnson
+<br/>J.S. Abercrombie Professor Emeritus of
+<br/>Electrical and Computer Engineering
+<br/>Dr. Wotao Yin
+<br/>Assistant Professor of Computational and
+<br/>Applied Mathematics
+<br/>Houston, Texas
+<br/>December 2011
+</td><td>('1746363', 'Eva L. Dyer', 'eva l. dyer')</td><td></td></tr><tr><td>51683eac8bbcd2944f811d9074a74d09d395c7f3</td><td>Automatic Analysis of Facial Actions:
+<br/>Learning from Transductive, Supervised and
+<br/>Unsupervised Frameworks
+<br/>CMU-RI-TR-17-01
+<br/>January 2017
+<br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Fernando De la Torre, Co-chair
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy in Robotics.
+</td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1820249', 'Simon Lucey', 'simon lucey')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')<br/>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')<br/>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')</td><td></td></tr><tr><td>51faacfa4fb1e6aa252c6970e85ff35c5719f4ff</td><td>Zoom-Net: Mining Deep Feature Interactions for
+<br/>Visual Relationship Recognition
+<br/><b>University of Science and Technology of China, Key Laboratory of Electromagnetic</b><br/>Space Information, the Chinese Academy of Sciences, 2SenseTime Group Limited,
+<br/><b>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</b><br/><b>SenseTime-NTU Joint AI Research Centre, Nanyang Technological University</b></td><td>('4332039', 'Guojun Yin', 'guojun yin')<br/>('37145669', 'Lu Sheng', 'lu sheng')<br/>('50677886', 'Bin Liu', 'bin liu')<br/>('1708598', 'Nenghai Yu', 'nenghai yu')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('49895575', 'Jing Shao', 'jing shao')<br/>('1717179', 'Chen Change Loy', 'chen change loy')</td><td>gjyin@mail.ustc.edu.cn, {flowice,ynh}@ustc.edu.cn, ccloy@ieee.org,
+<br/>{lsheng,xgwang}@ee.cuhk.edu.hk, shaojing@sensetime.com
+</td></tr><tr><td>51cc78bc719d7ff2956b645e2fb61bab59843d2b</td><td>Face and Facial Expression Recognition with an
+<br/>Embedded System for Human-Robot Interaction
+<br/><b>School of Computer Engineering, Sejong University, Seoul, Korea</b></td><td>('2241562', 'Yang-Bok Lee', 'yang-bok lee')<br/>('2706430', 'Yong-Guk Kim', 'yong-guk kim')</td><td>*ykim@sejong.ac.kr
+</td></tr><tr><td>511b06c26b0628175c66ab70dd4c1a4c0c19aee9</td><td>International Journal of Engineering Research and General ScienceVolume 2, Issue 5, August – September 2014
+<br/>ISSN 2091-2730
+<br/>Face Recognition using Laplace Beltrami Operator by Optimal Linear
+<br/>Approximations
+<br/><b>Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj</b><br/><b>Research Scholar (M.Tech, IT), Institute of Engineering and Technology</b></td><td></td><td></td></tr><tr><td>51528cdce7a92835657c0a616c0806594de7513b</td><td></td><td></td><td></td></tr><tr><td>51cb09ee04831b95ae02e1bee9b451f8ac4526e3</td><td>Beyond Short Snippets: Deep Networks for Video Classification
+<br/>Matthew Hausknecht2
+<br/><b>University of Maryland, College Park</b><br/><b>University of Texas at Austin</b><br/><b>Google, Inc</b></td><td>('2340579', 'Joe Yue-Hei Ng', 'joe yue-hei ng')<br/>('1689108', 'Oriol Vinyals', 'oriol vinyals')<br/>('3089272', 'Rajat Monga', 'rajat monga')<br/>('2259154', 'Sudheendra Vijayanarasimhan', 'sudheendra vijayanarasimhan')<br/>('1805076', 'George Toderici', 'george toderici')</td><td>yhng@umiacs.umd.edu
+<br/>mhauskn@cs.utexas.edu
+<br/>svnaras@google.com
+<br/>vinyals@google.com
+<br/>rajatmonga@google.com
+<br/>gtoderici@google.com
+</td></tr><tr><td>514a74aefb0b6a71933013155bcde7308cad2b46</td><td><b>CARNEGIE MELLON UNIVERSITY</b><br/>OPTIMAL CLASSIFIER ENSEMBLES
+<br/>FOR IMPROVED BIOMETRIC VERIFICATION
+<br/>A Dissertation
+<br/>Submitted to the Faculty of Graduate School
+<br/>In Partial Fulfillment of the Requirements
+<br/>for The Degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>in
+<br/>ELECTRICAL AND COMPUTER ENGINEERING
+<br/>by
+<br/>COMMITTEE:
+<br/>Advisor: Prof. Vijayakumar Bhagavatula
+<br/>Prof. Tsuhan Chen
+<br/>Prof. David Casasent
+<br/>Prof. Arun Ross
+<br/>Pittsburgh, Pennsylvania
+<br/>January, 2007
+</td><td>('2202489', 'Krithika Venkataramani', 'krithika venkataramani')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td></td></tr><tr><td>51a8dabe4dae157aeffa5e1790702d31368b9161</td><td>SPI-J068 00418
+<br/>International Journal of Pattern Recognition
+<br/>and Artificial Intelligence
+<br/>Vol. 19, No. 4 (2005) 513–531
+<br/>c(cid:1) World Scientific Publishing Company
+<br/>FACE RECOGNITION UNDER GENERIC ILLUMINATION
+<br/>BASED ON HARMONIC RELIGHTING
+<br/>Graduate School of Chinese Academy Sciences
+<br/>No. 19, Yuquan Road, Beijing, 100039, P.R. China
+<br/><b>Institute of Computing Technology, CAS</b><br/>No. 6 Kexueyuan South Road, Beijing, 100080, P.R. China
+<br/>The performances of the current face recognition systems suffer heavily from the vari-
+<br/>ations in lighting. To deal with this problem, this paper presents an illumination nor-
+<br/>malization approach by relighting face images to a canonical illumination based on the
+<br/>harmonic images model. Benefiting from the observations that human faces share sim-
+<br/>ilar shape, and the albedos of the face surfaces are quasi-constant, we first estimate
+<br/>the nine low-frequency components of the illumination from the input facial image. The
+<br/>facial image is then normalized to the canonical illumination by re-rendering it using
+<br/>the illumination ratio image technique. For the purpose of face recognition, two kinds of
+<br/>canonical illuminations, the uniform illumination and a frontal flash with the ambient
+<br/>lights, are considered, among which the former encodes merely the texture information,
+<br/>while the latter encodes both the texture and shading information. Our experiments on
+<br/>the CMU-PIE face database and the Yale B face database have shown that the proposed
+<br/>relighting normalization can significantly improve the performance of a face recognition
+<br/>system when the probes are collected under varying lighting conditions.
+<br/>Keywords: Face recognition; varying lighting; harmonic images; lighting estimation;
+<br/>illumination normalization.
+<br/>1. Introduction
+<br/>Face recognition has various potential applications in public security, law enforce-
+<br/>ment and commerce such as mug-shot database matching, identity authentication
+<br/>for credit card or driver license, access control, information security, and video
+<br/>surveillance. In addition, there are many emerging fields that can benefit from face
+<br/><b>recognition, such as human computer interfaces and e-services, including e-home</b><br/>online-shopping and online-banking. Related research activities have significantly
+<br/>increased over the past few years.5,26
+<br/>513
+</td><td>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1698902', 'Wen Gao', 'wen gao')<br/>('1691233', 'Bo Du', 'bo du')</td><td>lyqing@jdl.ac.cn
+<br/>sgshan@jdl.ac.cn
+<br/>wgao@jdl.ac.cn
+<br/>bdu@jdl.ac.cn
+</td></tr><tr><td>512b4c8f0f3fb23445c0c2dab768bcd848fa8392</td><td> Analysis and Synthesis of Facial Expressions by Feature-
+<br/>Points Tracking and Deformable Model
+<br/> 1- Faculty of Electrical and Computer Eng.,
+<br/><b>University of Tabriz, Tabriz, Iran</b><br/>2- Department of Electrical Eng.,
+<br/><b>Tarbiat Modarres University, Tehran, Iran</b><br/>in
+<br/>an
+<br/>role
+<br/>essential
+<br/>facial expressions
+<br/>
+</td><td>('3210269', 'H. Seyedarabi', 'h. seyedarabi')<br/>('31092101', 'A. Aghagolzadeh', 'a. aghagolzadeh')<br/>('2052255', 'S. Khanmohammadi', 's. khanmohammadi')<br/>('2922912', 'E. Kabir', 'e. kabir')</td><td>seyedarabi@tahoo.com, aghagol@tabrizu.ac.ir, khan@tabrizu.ac.ir
+</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>Situation Recognition:
+<br/>Visual Semantic Role Labeling for Image Understanding
+<br/><b>Computer Science and Engineering, University of Washington, Seattle, WA</b><br/><b>Allen Institute for Arti cial Intelligence (AI2), Seattle, WA</b><br/>Figure 1. Six images that depict situations where actors, objects, substances, and locations play roles in an activity. Below each image is a
+<br/>realized frame that summarizes the situation: the left columns (blue) list activity-specific roles (derived from FrameNet, a broad coverage
+<br/>verb lexicon) while the right columns (green) list values (from ImageNet) for each role. Three different activities are shown, highlighting
+<br/>that visual properties can vary widely between role values (e.g., clipping a sheep’s wool looks very different from clipping a dog’s nails).
+</td><td>('2064210', 'Mark Yatskar', 'mark yatskar')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td>[my89, lsz, ali]@cs.washington.edu
+</td></tr><tr><td>5173a20304ea7baa6bfe97944a5c7a69ea72530f</td><td>Sensors 2013, 13, 12830-12851; doi:10.3390/s131012830
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Best Basis Selection Method Using Learning Weights for
+<br/>Face Recognition
+<br/><b>The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong</b><br/><b>The School of Electrical Electronic and Control Engineering, Kongju National University</b><br/>275 Budae-Dong, Seobuk-Gu, Cheonan, Chungnam 331-717, Korea
+<br/>Tel.: +82-41-521-9168; Fax: +82-41-563-3689.
+<br/>Received: 24 July 2013; in revised form: 26 August 2013 / Accepted: 16 September 2013/
+<br/>Published: 25 September 2013
+</td><td>('1801849', 'Wonju Lee', 'wonju lee')<br/>('2840643', 'Minkyu Cheon', 'minkyu cheon')<br/>('2638048', 'Chang-Ho Hyun', 'chang-ho hyun')<br/>('1718637', 'Mignon Park', 'mignon park')</td><td>Seodaemun-Gu, Seoul 120-749, Korea; E-Mails: delicado@yonsei.ac.kr (W.L.);
+<br/>1000minkyu@gmail.com (M.C.); mignpark@yonsei.ac.kr (M.P.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: hyunch@kongju.ac.kr;
+</td></tr><tr><td>51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6</td><td>Computing and Informatics, Vol. 22, 2003, ??–??
+<br/>A SURVEY OF FACE DETECTION, EXTRACTION
+<br/>AND RECOGNITION
+<br/>National Storage System Laboratory
+<br/>School of Software Engineering
+<br/><b>Huazhong University of Science and Technology</b><br/>Wuhan, 430074, P. R. China
+<br/>Manuscript received 23 June 2002; revised 27 January 2003
+<br/>Communicated by Ladislav Hluch´y
+</td><td>('2366162', 'Yongzhong Lu', 'yongzhong lu')<br/>('1711876', 'Jingli Zhou', 'jingli zhou')<br/>('1714618', 'Shengsheng Yu', 'shengsheng yu')</td><td>e-mail: luyongz0@sohu.com
+</td></tr><tr><td>5161e38e4ea716dcfb554ccb88901b3d97778f64</td><td>SSPP-DAN: DEEP DOMAIN ADAPTATION NETWORK FOR
+<br/>FACE RECOGNITION WITH SINGLE SAMPLE PER PERSON
+<br/>School of Computing, KAIST, Republic of Korea
+</td><td>('2487892', 'Sungeun Hong', 'sungeun hong')<br/>('40506942', 'Woobin Im', 'woobin im')</td><td></td></tr><tr><td>5121f42de7cb9e41f93646e087df82b573b23311</td><td>CLASSIFYING ONLINE DATING PROFILES ON TINDER USING FACENET FACIAL
+<br/>EMBEDDINGS
+<br/><b>FL</b></td><td></td><td>Charles F. Jekel (cjekel@ufl.edu; cj@jekel.me) and Raphael T. Haftka
+</td></tr><tr><td>51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>A Fast and Accurate System for Face Detection,
+<br/>Identification, and Verification
+</td><td>('48467498', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('2068427', 'Ankan Bansal', 'ankan bansal')<br/>('7674316', 'Jingxiao Zheng', 'jingxiao zheng')<br/>('2680836', 'Hongyu Xu', 'hongyu xu')<br/>('35199438', 'Joshua Gleason', 'joshua gleason')<br/>('2927406', 'Boyu Lu', 'boyu lu')<br/>('8435884', 'Anirudh Nanduri', 'anirudh nanduri')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>5141cf2e59fb2ec9bb489b9c1832447d3cd93110</td><td>Learning Person Trajectory Representations for Team Activity Analysis
+<br/><b>Simon Fraser University</b></td><td>('10386960', 'Nazanin Mehrasa', 'nazanin mehrasa')<br/>('19198359', 'Yatao Zhong', 'yatao zhong')<br/>('2123865', 'Frederick Tung', 'frederick tung')<br/>('3004771', 'Luke Bornn', 'luke bornn')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>{nmehrasa, yataoz, ftung, lbornn}@sfu.ca, mori@cs.sfu.ca
+</td></tr><tr><td>5185f2a40836a754baaa7419a1abdd1e7ffaf2ad</td><td>A Multimodality Framework for Creating Speaker/Non-Speaker Profile
+<br/>Databases for Real-World Video
+<br/><b>Beckman Institute</b><br/><b>University of Illinois</b><br/>Urbana, IL 61801
+<br/><b>Beckman Institute</b><br/><b>University of Illinois</b><br/>Urbana, IL 61801
+<br/><b>Beckman Institute</b><br/><b>University of Illinois</b><br/>Urbana, IL 61801
+</td><td>('3082579', 'Jehanzeb Abbas', 'jehanzeb abbas')<br/>('1804874', 'Charlie K. Dagli', 'charlie k. dagli')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>jabbas2@ifp.uiuc.edu
+<br/>dagli@ifp.uiuc.edu
+<br/>huang@ifp.uiuc.edu
+</td></tr><tr><td>511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7</td><td>Hindawi
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2018, Article ID 4512473, 10 pages
+<br/>https://doi.org/10.1155/2018/4512473
+<br/>Research Article
+<br/>A Community Detection Approach to Cleaning Extremely
+<br/>Large Face Database
+<br/><b>Computer School, University of South China, Hengyang, China</b><br/><b>National Laboratory for Parallel and Distributed Processing, National University of Defense Technology, Changsha, China</b><br/>Received 11 December 2017; Accepted 12 March 2018; Published 22 April 2018
+<br/>Academic Editor: Amparo Alonso-Betanzos
+<br/>permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Though it has been easier to build large face datasets by collecting images from the Internet in this Big Data era, the time-consuming
+<br/>manual annotation process prevents researchers from constructing larger ones, which makes the automatic cleaning of noisy labels
+<br/>highly desirable. However, identifying mislabeled faces by machine is quite challenging because the diversity of a person’s face
+<br/>images that are captured wildly at all ages is extraordinarily rich. In view of this, we propose a graph-based cleaning method that
+<br/>mainly employs the community detection algorithm and deep CNN models to delete mislabeled images. As the diversity of faces is
+<br/>preserved in multiple large communities, our cleaning results have both high cleanness and rich data diversity. With our method, we
+<br/>clean the extremely large MS-Celeb-1M face dataset (approximately 10 million images with noisy labels) and obtain a clean version
+<br/>of it called C-MS-Celeb (6,464,018 images of 94,682 celebrities). By training a single-net model using our C-MS-Celeb dataset,
+<br/>without fine-tuning, we achieve 99.67% at Equal Error Rate on the LFW face recognition benchmark, which is comparable to other
+<br/>state-of-the-art results. This demonstrates the data cleaning positive effects on the model training. To the best of our knowledge,
+<br/>our C-MS-Celeb is the largest clean face dataset that is publicly available so far, which will benefit face recognition researchers.
+<br/>1. Introduction
+<br/>In the last few years, researchers have witnessed the remark-
+<br/>able progress in face recognition due to the significant success
+<br/>of deep convolutional neural networks [1] and the emergence
+<br/>of large scale face datasets [2]. Although the data explosion
+<br/>has made it easier to build datasets by collecting real world
+<br/>images from the Internet [3], constructing a large scale face
+<br/>dataset remains a highly time-consuming and costly task
+<br/>because the mislabeled images returned by search engines
+<br/>need to be manually removed [4]. Thus, automatic cleaning
+<br/>of noisy labels in the raw dataset is strongly desirable.
+<br/>However, identifying mislabeled faces automatically by
+<br/>machine is by no means easy. The main reason for this is that,
+<br/>for faces that are captured wildly, the variation of a man’s faces
+<br/>can be so large that some of his images may easily be identified
+<br/>as someone else’s [5]. Thus, a machine may be misled by this
+<br/>rich data diversity within one person and delete correctly
+<br/>labeled images. For example, if old faces of a man are the
+<br/>majority in the dataset, a young face of him may be regarded
+<br/>as someone else and removed. Another challenge is that, due
+<br/>to the ambiguity of people’s names, searching for someone’s
+<br/>pictures online usually returns images from multiple people
+<br/>[2], which requires the cleaning method to be tolerant to the
+<br/>high proportion of noisy labels in the raw dataset constructed
+<br/>by online searching.
+<br/>In order to clean noisy labels and meanwhile preserve
+<br/>the rich data diversity of various faces, we propose a three-
+<br/>stage graph-based method to clean large face datasets using
+<br/>the community detection algorithm. For each image in the
+<br/>raw dataset, we firstly use pretrained deep CNN models to
+<br/>align the face and extract a feature vector to represent each
+<br/>face. Secondly, for features of the same identity, based on the
+<br/>cosine similarity between different features, we construct an
+<br/>undirected graph, named “face similarity graph,” to quantify
+<br/>the similarity between different images. After deleting weak
+<br/>edges and applying the community detection algorithm, we
+<br/>delete mislabeled images by removing minor communities. In
+<br/>the last stage, we try to relabel each previously deleted image
+</td><td>('3335298', 'Chi Jin', 'chi jin')<br/>('9856301', 'Ruochun Jin', 'ruochun jin')<br/>('38536592', 'Kai Chen', 'kai chen')<br/>('1791001', 'Yong Dou', 'yong dou')<br/>('3335298', 'Chi Jin', 'chi jin')</td><td>Correspondence should be addressed to Ruochun Jin; sczjrc@163.com
+</td></tr><tr><td>51d048b92f6680aca4a8adf07deb380c0916c808</td><td>This is the accepted version of the following article: "State of the Art on Monocular 3D Face Reconstruction, Tracking, and Applications",
+<br/>which has been published in final form at http://onlinelibrary.wiley.com. This article may be used for non-commercial purposes in accordance
+<br/>with the Wiley Self-Archiving Policy [http://olabout.wiley.com/WileyCDA/Section/id-820227.html].
+<br/>EUROGRAPHICS 2018
+<br/>K. Hildebrandt and C. Theobalt
+<br/>(Guest Editors)
+<br/>Volume 37 (2018), Number 2
+<br/>STAR – State of The Art Report
+<br/>State of the Art on Monocular 3D Face
+<br/>Reconstruction, Tracking, and Applications
+<br/>M. Zollhöfer1,2
+<br/>J. Thies3 P. Garrido1,5 D. Bradley4 T. Beeler4 P. Pérez5 M. Stamminger6 M. Nießner3 C. Theobalt1
+<br/><b>Max Planck Institute for Informatics</b><br/><b>Stanford University</b><br/><b>Technical University of Munich</b><br/>4Disney Research
+<br/>5Technicolor
+<br/><b>University of Erlangen-Nuremberg</b><br/>Figure 1: This state-of-the-art report provides an overview of monocular 3D face reconstruction and tracking, and highlights applications.
+</td><td></td><td></td></tr><tr><td>5134353bd01c4ea36bd007c460e8972b1541d0ad</td><td>Face Recognition with Multi-Resolution Spectral Feature
+<br/>Images
+<br/><b>School of Electrical Engineering and Automation, Anhui University, Hefei, China, Hong Kong Polytechnic</b><br/><b>University, Hong Kong, China, 3 Center for Intelligent Electricity Networks, University of Newcastle, Newcastle, Australia, 4 School of Electrical and Electronic Engineering</b><br/><b>Nanyang Technological University, Singapore, Singapore</b></td><td>('31443079', 'Zhan-Li Sun', 'zhan-li sun')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')<br/>('50067626', 'Zhao-yang Dong', 'zhao-yang dong')<br/>('40465036', 'Han Wang', 'han wang')<br/>('29927490', 'Qing-wei Gao', 'qing-wei gao')</td><td></td></tr><tr><td>5160569ca88171d5fa257582d161e9063c8f898d</td><td>Local Binary Patterns as an Image Preprocessing for Face Authentication
+<br/><b>IDIAP Research Institute, Martigny, Switzerland</b><br/>Ecole Polytechnique F´ed´erale de Lausanne (EPFL), Switzerland
+</td><td>('16602458', 'Guillaume Heusch', 'guillaume heusch')<br/>('2820403', 'Yann Rodriguez', 'yann rodriguez')</td><td>fheusch, rodrig, marcelg@idiap.ch
+</td></tr><tr><td>5157dde17a69f12c51186ffc20a0a6c6847f1a29</td><td>Evolutionary Cost-sensitive Extreme Learning
+<br/>Machine
+<br/>1
+</td><td>('40613723', 'Lei Zhang', 'lei zhang')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>51dc127f29d1bb076d97f515dca4cc42dda3d25b</td><td></td><td></td><td></td></tr><tr><td>3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f</td><td>Face Alignment Across Large Poses: A 3D Solution
+<br/>Hailin Shi1
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/><b>Michigan State University</b></td><td>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{xiangyu.zhu,zlei,hailin.shi,szli}@nlpr.ia.ac.cn
+<br/>liuxm@msu.edu
+</td></tr><tr><td>3d143cfab13ecd9c485f19d988242e7240660c86</td><td>Discriminative Collaborative Representation for
+<br/>Classification
+<br/><b>Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan</b><br/><b>Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan</b><br/>3 OMRON Social Solutions Co., LTD, Kyoto 619-0283, Japan
+</td><td>('2549020', 'Yang Wu', 'yang wu')<br/>('40400215', 'Wei Li', 'wei li')<br/>('1707934', 'Masayuki Mukunoki', 'masayuki mukunoki')<br/>('1681266', 'Michihiko Minoh', 'michihiko minoh')<br/>('1710195', 'Shihong Lao', 'shihong lao')</td><td>yangwu@mm.media.kyoto-u.ac.jp,seuliwei@126.com,
+<br/>{minoh,mukunoki}@media.kyoto-u.ac.jp,lao_shihong@oss.omron.co.jp
+</td></tr><tr><td>3daafe6389d877fe15d8823cdf5ac15fd919676f</td><td>Human Action Localization
+<br/>with Sparse Spatial Supervision
+</td><td>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('49142153', 'Xavier Martin', 'xavier martin')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>3dabf7d853769cfc4986aec443cc8b6699136ed0</td><td>In A. Esposito, N. Bourbakis, N. Avouris, and I. Hatzilygeroudis. (Eds.) Lecture Notes in
+<br/>Computer Science, Vol 5042: Verbal and Nonverbal Features of Human-human and Human-
+<br/>machine Interaction, Springer Verlag, p. 1-21.
+<br/>Data mining spontaneous facial behavior with
+<br/>automatic expression coding
+<br/><b>Institute for Neural Computation, University of California, San Diego, La Jolla, CA</b><br/><b>Human Development and Applied Psychology, University of Toronto, Ontario, Canada</b><br/>0445, USA
+<br/><b>Engineering and Natural Science, Sabanci University, Istanbul, Turkey</b></td><td>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('40322754', 'Esra Vural', 'esra vural')<br/>('2855884', 'Kang Lee', 'kang lee')</td><td>mbartlett@ucsd.edu; gwen@mpmlab.ucsd.edu, movellan@mplab.ucsd.edu,
+<br/>vesra@ucsd.edu, kang.lee@utoronto.ca
+</td></tr><tr><td>3db75962857a602cae65f60f202d311eb4627b41</td><td></td><td></td><td></td></tr><tr><td>3daf1191d43e21a8302d98567630b0e2025913b0</td><td>Can Autism be Catered with Artificial Intelligence-Assisted Intervention
+<br/>Technology? A Literature Review
+<br/><b>Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan</b><br/>†Universit´e Claude Bernard Lyon 1, France
+</td><td>('38817141', 'Muhammad Shoaib Jaliawala', 'muhammad shoaib jaliawala')<br/>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')</td><td></td></tr><tr><td>3d36f941d8ec613bb25e80fb8f4c160c1a2848df</td><td>Out-of-sample generalizations for supervised
+<br/>manifold learning for classification
+</td><td>('12636684', 'Elif Vural', 'elif vural')<br/>('1780587', 'Christine Guillemot', 'christine guillemot')</td><td></td></tr><tr><td>3d5a1be4c1595b4805a35414dfb55716e3bf80d8</td><td>Hidden Two-Stream Convolutional Networks for
+<br/>Action Recognition
+</td><td>('1749901', 'Yi Zhu', 'yi zhu')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td></td></tr><tr><td>3d62b2f9cef997fc37099305dabff356d39ed477</td><td>Joint Face Alignment and 3D Face
+<br/>Reconstruction with Application to Face
+<br/>Recognition
+</td><td>('33320460', 'Feng Liu', 'feng liu')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('39422721', 'Dan Zeng', 'dan zeng')</td><td></td></tr><tr><td>3dc522a6576c3475e4a166377cbbf4ba389c041f</td><td></td><td></td><td></td></tr><tr><td>3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd</td><td>Ensemble of Hankel Matrices for
+<br/>Face Emotion Recognition
+<br/>DICGIM, Universit´a degli Studi di Palermo,
+<br/>V.le delle Scienze, Ed. 6, 90128 Palermo, Italy,
+<br/>DRAFT
+<br/>To appear in ICIAP 2015
+</td><td>('1711610', 'Liliana Lo Presti', 'liliana lo presti')<br/>('9127836', 'Marco La Cascia', 'marco la cascia')</td><td>liliana.lopresti@unipa.it
+</td></tr><tr><td>3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f</td><td>Proceedings of the Pakistan Academy of Sciences 52 (1): 27–38 (2015)
+<br/>Copyright © Pakistan Academy of Sciences
+<br/>ISSN: 0377 - 2969 (print), 2306 - 1448 (online)
+<br/> Pakistan Academy of Sciences
+<br/>Research Article
+<br/>Bimodal Human Emotion Classification in the
+<br/>Speaker-Dependent Scenario
+<br/><b>University of Peshawar, Peshawar, Pakistan</b><br/><b>University of Engineering and Technology</b><br/><b>Sarhad University of Science and Information Technology</b><br/><b>University of Peshawar, Peshawar, Pakistan</b><br/>Peshawar, Pakistan
+<br/>Peshawar, Pakistan
+<br/>
+</td><td>('34267835', 'Sanaul Haq', 'sanaul haq')<br/>('3124216', 'Tariqullah Jan', 'tariqullah jan')<br/>('1766329', 'Muhammad Asif', 'muhammad asif')<br/>('1710701', 'Amjad Ali', 'amjad ali')<br/>('40332145', 'Naveed Ahmad', 'naveed ahmad')</td><td></td></tr><tr><td>3d0379688518cc0e8f896e30815d0b5e8452d4cd</td><td>Autotagging Facebook:
+<br/>Social Network Context Improves Photo Annotation
+<br/><b>Harvard University</b><br/>Todd Zickler
+<br/><b>Harvard University</b><br/>UC Berkeley EECS & ICSI
+</td><td>('2201347', 'Zak Stone', 'zak stone')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td>zstone@fas.harvard.edu
+<br/>zickler@seas.harvard.edu
+<br/>trevor@eecs.berkeley.edu
+</td></tr><tr><td>3dda181be266950ba1280b61eb63ac11777029f9</td><td></td><td></td><td></td></tr><tr><td>3d24b386d003bee176a942c26336dbe8f427aadd</td><td>Sequential Person Recognition in Photo Albums with a Recurrent Network∗
+<br/><b>The University of Adelaide, Australia</b></td><td>('39948681', 'Yao Li', 'yao li')<br/>('2604251', 'Guosheng Lin', 'guosheng lin')<br/>('3194022', 'Bohan Zhuang', 'bohan zhuang')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('5546141', 'Anton van den Hengel', 'anton van den hengel')</td><td></td></tr><tr><td>3dcebd4a1d66313dcd043f71162d677761b07a0d</td><td> Yerel Đkili Örüntü Ortamında Yerel Görünüme Dayalı Yüz Tanıma
+<br/>Local Binary Pattern Domain Local Appearance Face Recognition
+<br/>Hazım K. Ekenel1, Mika Fischer1, Erkin Tekeli2, Rainer Stiefelhagen1, Aytül Erçil2
+<br/>1Institut für Theorestische Informatik, Universität Karlsruhe (TH), Karlsruhe, Germany
+<br/><b>Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey</b><br/>Özetçe
+<br/>Bu bildiride, ayrık kosinüs dönüşümü tabanlı yerel görünüme
+<br/>dayalı yüz tanıma algoritması ile yüz imgelerinin yerel ikili
+<br/>örüntüye (YĐÖ) dayalı betimlemesini birleştiren hızlı bir yüz
+<br/>tanıma algoritması sunulmuştur. Bu tümleştirmedeki amaç,
+<br/>yerel ikili örüntünün dayanıklı imge betimleme yeteneği ile
+<br/>ayrık kosinüs dönüşümünün derli-toplu veri betimleme
+<br/>yeteneğinden yararlanmaktır. Önerilen yaklaşımda, yerel
+<br/>görünümün modellenmesinden önce girdi yüz imgesi yerel
+<br/>ikili örüntü ile betimlenmiştir. Elde edilen YĐÖ betimlemesi,
+<br/>birbirleri ile örtüşmeyen bloklara ayrılmış ve her blok
+<br/>üzerinde yerel özniteliklerin çıkartımı için ayrık kosinüs
+<br/>dönüşümü uygulanmıştır. Çıkartımı yapılan yerel öznitelikler
+<br/>daha sonra arka arkaya eklenerek global öznitelik vektörü
+<br/>oluşturulmuştur. Önerilen algoritma, CMU PIE ve FRGC
+<br/>versiyon 2 veritabanlarından seçilen yüz imgeleri üzerinde
+<br/>sınanmıştır. Deney sonuçları, tümleşik yöntemin başarımı
+<br/>önemli ölçüde arttırdığını göstermiştir.
+</td><td></td><td>{ekenel,mika.fischer,stiefel}@ira.uka.de, {erkintekeli,aytulercil}@sabanciuniv.edu
+</td></tr><tr><td>3d0f9a3031bee4b89fab703ff1f1d6170493dc01</td><td>SVDD-Based Illumination Compensation
+<br/>for Face Recognition
+<br/><b>The Robotics Institute, Carnegie Mellon University</b><br/>5000 Forbes Ave., Pittsburgh, PA 15213, USA
+<br/><b>Center for Arti cial Vision Research, Korea University</b><br/>Anam-dong, Seongbuk-ku, Seoul 136-713, Korea
+</td><td>('2348968', 'Sang-Woong Lee', 'sang-woong lee')<br/>('1703007', 'Seong-Whan Lee', 'seong-whan lee')</td><td>rhiephil@cs.cmu.edu
+<br/>swlee@image.korea.ac.kr
+</td></tr><tr><td>3d6ee995bc2f3e0f217c053368df659a5d14d5b5</td><td></td><td></td><td></td></tr><tr><td>3d0c21d4780489bd624a74b07e28c16175df6355</td><td>Deep or Shallow Facial Descriptors? A Case for
+<br/>Facial Attribute Classification and Face Retrieval
+<br/>1 Faculty of Engineering,
+<br/><b>Multimedia University, Cyberjaya, Malaysia</b><br/>2 Faculty of Computing & Informatics,
+<br/><b>Multimedia University, Cyberjaya, Malaysia</b></td><td>('3366793', 'Rasoul Banaeeyan', 'rasoul banaeeyan')<br/>('31612015', 'Mohd Haris Lye', 'mohd haris lye')<br/>('4759494', 'Mohammad Faizal Ahmad Fauzi', 'mohammad faizal ahmad fauzi')<br/>('2339975', 'John See', 'john see')</td><td>banaeeyan@gmail.com, {haris.lye, faizal1, hezerul, johnsee}@mmu.edu.my
+</td></tr><tr><td>3df8cc0384814c3fb05c44e494ced947a7d43f36</td><td>The Pose Knows: Video Forecasting by Generating Pose Futures
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Avenue, Pittsburgh, PA 15213
+</td><td>('14192361', 'Jacob Walker', 'jacob walker')<br/>('35789996', 'Kenneth Marino', 'kenneth marino')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td>{jcwalker, kdmarino, abhinavg, hebert}@cs.cmu.edu
+</td></tr><tr><td>3d42e17266475e5d34a32103d879b13de2366561</td><td>Proc.4thIEEEInt’lConf.AutomaticFace&GestureRecognition,Grenoble,France,pp264–270
+<br/>The Global Dimensionality of Face Space
+<br/>(cid:3)
+<br/>http://venezia.rockefeller.edu/
+<br/><b>The Rockefeller University</b><br/>Laboratory of Computational Neuroscience
+<br/>Laboratory for Applied Mathematics
+<br/>Mount Sinai School of Medicine
+<br/>c(cid:13) IEEE2000
+<br/>1230 York Avenue, New York, NY 10021
+<br/>One Gustave L. Levy Place, New York, NY 10029
+</td><td>('2939761', 'Penio S. Penev', 'penio s. penev')<br/>('3266322', 'Lawrence Sirovich', 'lawrence sirovich')</td><td>PenevPS@IEEE.org
+<br/>chico@camelot.mssm.edu
+</td></tr><tr><td>3dd906bc0947e56d2b7bf9530b11351bbdff2358</td><td></td><td></td><td></td></tr><tr><td>3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0</td><td>Face2Text: Collecting an Annotated Image Description Corpus for the
+<br/>Generation of Rich Face Descriptions
+<br/><b>University of Malta</b><br/><b>University of Copenhagen</b></td><td>('1700894', 'Albert Gatt', 'albert gatt')<br/>('32227979', 'Marc Tanti', 'marc tanti')<br/>('35347012', 'Adrian Muscat', 'adrian muscat')<br/>('1782032', 'Patrizia Paggio', 'patrizia paggio')<br/>('2870709', 'Claudia Borg', 'claudia borg')<br/>('3356545', 'Lonneke van der Plas', 'lonneke van der plas')</td><td>{albert.gatt, marc.tanti.06, adrian.muscat, patrizia.paggio, reuben.farrugia}@um.edu.mt
+<br/>{claudia.borg, kenneth.camilleri, mike.rosner, lonneke.vanderplas}@um.edu.mt
+<br/>paggio@hum.ku.dk
+</td></tr><tr><td>3dbfd2fdbd28e4518e2ae05de8374057307e97b3</td><td>Improving Face Detection
+<br/><b>CISUC, University of Coimbra</b><br/><b>Faculty of Computer Science, University of A Coru na, Coru na, Spain</b></td><td>('2045142', 'Penousal Machado', 'penousal machado')<br/>('39583137', 'Juan Romero', 'juan romero')</td><td>3030 Coimbra, Portugal machado@dei.uc.pt, jncor@dei.uc.pt
+<br/>jj@udc.pt
+</td></tr><tr><td>3df7401906ae315e6aef3b4f13126de64b894a54</td><td>Robust Learning of Discriminative Projection for Multicategory Classification on
+<br/>the Stiefel Manifold
+<br/><b>Curtin University of Technology</b><br/>GPO Box U1987, Perth, WA 6845, Australia
+</td><td>('1725024', 'Duc-Son Pham', 'duc-son pham')<br/>('1679520', 'Svetha Venkatesh', 'svetha venkatesh')</td><td>dspham@ieee.org, svetha@cs.curtin.edu.au
+</td></tr><tr><td>3d68cedd80babfbb04ab197a0b69054e3c196cd9</td><td>Bimodal Information Analysis for Emotion Recognition
+<br/>Master of Engineering
+<br/>Department of Electrical and Computer Engineering
+<br/><b>McGill University</b><br/>Montreal, Quebec
+<br/>October 2009
+<br/>Revised: February 2010
+<br/><b>A Thesis submitted to McGill University in partial fulfillment of the requirements for the</b><br/>degree of Master of Engineering
+<br/>i
+</td><td>('2376514', 'Malika Meghjani', 'malika meghjani')<br/>('2376514', 'Malika Meghjani', 'malika meghjani')</td><td></td></tr><tr><td>3dfb822e16328e0f98a47209d7ecd242e4211f82</td><td>Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in
+<br/>Unconstrained Environments
+<br/><b>Beijing University of Posts and Telecommunications</b><br/>Beijing 100876,China
+</td><td>('15523767', 'Tianyue Zheng', 'tianyue zheng')<br/>('1774956', 'Weihong Deng', 'weihong deng')<br/>('23224233', 'Jiani Hu', 'jiani hu')</td><td>2231135739@qq.com, whdeng@bupt.edu.cn, 40902063@qq.com
+</td></tr><tr><td>3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a</td><td>1892
+<br/>Random Multispace Quantization as
+<br/>an Analytic Mechanism for BioHashing
+<br/>of Biometric and Random Identity Inputs
+</td><td>('2124820', 'Alwyn Goh', 'alwyn goh')</td><td></td></tr><tr><td>3d6943f1573f992d6897489b73ec46df983d776c</td><td></td><td></td><td></td></tr><tr><td>3d948e4813a6856e5b8b54c20e50cc5050e66abe</td><td>A Smart Phone Image Database for Single
+<br/>Image Recapture Detection
+<br/><b>Institute for Infocomm Research, A*STAR, Singapore</b><br/>2 Department of Electrical and Computer Engineering
+<br/><b>National University of Singapore, Singapore</b><br/>3 Department of Electrical and Computer Engineering
+<br/><b>New Jersey Institute of Technology, USA</b></td><td>('2740420', 'Xinting Gao', 'xinting gao')<br/>('2821964', 'Bo Qiu', 'bo qiu')<br/>('3138499', 'JingJing Shen', 'jingjing shen')<br/>('2475944', 'Tian-Tsong Ng', 'tian-tsong ng')</td><td>{xgao, qiubo, ttng}@i2r.a-star.eud.sg
+<br/>shenjingjing89@gmail.com
+<br/>shi@njit.edu
+</td></tr><tr><td>3d94f81cf4c3a7307e1a976dc6cb7bf38068a381</td><td>3846
+<br/>Data-Dependent Label Distribution Learning
+<br/>for Age Estimation
+</td><td>('3276410', 'Zhouzhou He', 'zhouzhou he')<br/>('40613648', 'Xi Li', 'xi li')<br/>('1720488', 'Zhongfei Zhang', 'zhongfei zhang')<br/>('28342797', 'Fei Wu', 'fei wu')<br/>('1735299', 'Xin Geng', 'xin geng')<br/>('2998634', 'Yaqing Zhang', 'yaqing zhang')<br/>('37144787', 'Ming-Hsuan Yang', 'ming-hsuan yang')<br/>('1755711', 'Yueting Zhuang', 'yueting zhuang')</td><td></td></tr><tr><td>3d9db1cacf9c3bb7af57b8112787b59f45927355</td><td>Original research
+<br/>published: 20 June 2016
+<br/>doi: 10.3389/fict.2016.00011
+<br/>improving Medical students’
+<br/>awareness of Their non-Verbal
+<br/>communication through automated
+<br/>non-Verbal Behavior Feedback
+<br/><b>School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical</b><br/><b>School, The University of Sydney, Sydney, NSW, Australia</b><br/>The non-verbal communication of clinicians has an impact on patients’ satisfaction and
+<br/>health outcomes. Yet medical students are not receiving enough training on the appropri-
+<br/>ate non-verbal behaviors in clinical consultations. Computer vision techniques have been
+<br/>used for detecting different kinds of non-verbal behaviors, and they can be incorporated
+<br/>in educational systems that help medical students to develop communication skills.
+<br/>We describe EQClinic, a system that combines a tele-health platform with automated
+<br/>non-verbal behavior recognition. The system aims to help medical students improve
+<br/>their communication skills through a combination of human and automatically generated
+<br/>feedback. EQClinic provides fully automated calendaring and video conferencing features
+<br/>for doctors or medical students to interview patients. We describe a pilot (18 dyadic
+<br/>interactions) in which standardized patients (SPs) (i.e., someone acting as a real patient)
+<br/>were interviewed by medical students and provided assessments and comments about
+<br/>their performance. After the interview, computer vision and audio processing algorithms
+<br/>were used to recognize students’ non-verbal behaviors known to influence the quality of
+<br/>a medical consultation: including turn taking, speaking ratio, sound volume, sound pitch,
+<br/>smiling, frowning, head leaning, head tilting, nodding, shaking, face-touch gestures and
+<br/>overall body movements. The results showed that students’ awareness of non-verbal
+<br/>communication was enhanced by the feedback information, which was both provided
+<br/>by the SPs and generated by the machines.
+<br/>Keywords: non-verbal communication, non-verbal behavior, clinical consultation, medical education,
+<br/>communication skills, non-verbal behavior detection, automated feedback
+<br/>inTrODUcTiOn
+<br/>Edited by:
+<br/>Leman Figen Gul,
+<br/><b>Istanbul Technical University, Turkey</b><br/>Reviewed by:
+<br/>Marc Aurel Schnabel,
+<br/><b>Victoria University of Wellington</b><br/>New Zealand
+<br/>Antonella Lotti,
+<br/><b>University of Genoa, Italy</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted
+<br/>to Digital Education,
+<br/>a section of the journal
+<br/>Frontiers in ICT
+<br/>Received: 28 April 2016
+<br/>Accepted: 07 June 2016
+<br/>Published: 20 June 2016
+<br/>Citation:
+<br/>Liu C, Calvo RA and Lim R (2016)
+<br/>Improving Medical Students’
+<br/>Awareness of Their Non-Verbal
+<br/>Communication through Automated
+<br/>Non-Verbal Behavior Feedback.
+<br/>doi: 10.3389/fict.2016.00011
+<br/>Over the last 10 years, we have witnessed a dramatic improvement in affective computing (Picard,
+<br/>2000; Calvo et  al., 2015) and behavior recognition techniques (Vinciarelli et  al., 2012). These
+<br/>techniques have progressed from the recognition of person-specific posed behavior to the more
+<br/>difficult person-independent recognition of behavior in “the-wild” (Vinciarelli et al., 2009). They
+<br/>are considered robust enough that they are being incorporated into new applications. For example,
+<br/>new learning technologies have been developed that detect a student’s emotions and use this to guide
+<br/>the learning experience (Calvo and D’Mello, 2011). They can also be used to support reflection by
+<br/>Frontiers in ICT | www.frontiersin.org
+<br/>June 2016 | Volume 3 | Article 11
+</td><td>('30772945', 'Chunfeng Liu', 'chunfeng liu')<br/>('1742162', 'Rafael A. Calvo', 'rafael a. calvo')<br/>('36807976', 'Renee Lim', 'renee lim')<br/>('1742162', 'Rafael A. Calvo', 'rafael a. calvo')</td><td>rafael.calvo@sydney.edu.au
+</td></tr><tr><td>580f86f1ace1feed16b592d05c2b07f26c429b4b</td><td>Dense-Captioning Events in Videos
+<br/><b>Stanford University</b></td><td>('2580593', 'Ranjay Krishna', 'ranjay krishna')<br/>('35163655', 'Kenji Hata', 'kenji hata')<br/>('3260219', 'Frederic Ren', 'frederic ren')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')</td><td>{ranjaykrishna, kenjihata, fren, feifeili, jniebles}@cs.stanford.edu
+</td></tr><tr><td>58d47c187b38b8a2bad319c789a09781073d052d</td><td>Factorizable Net: An Efficient Subgraph-based
+<br/>Framework for Scene Graph Generation
+<br/><b>The Chinese University of Hong Kong, Hong Kong SAR, China</b><br/><b>The University of Sydney, SenseTime Computer Vision Research Group</b><br/>3 MIT CSAIL, USA
+<br/>4 Sensetime Ltd, Beijing, China
+<br/><b>Samsung Telecommunication Research Institute, Beijing, China</b></td><td>('2180892', 'Yikang Li', 'yikang li')<br/>('3001348', 'Wanli Ouyang', 'wanli ouyang')<br/>('1804424', 'Bolei Zhou', 'bolei zhou')<br/>('1788070', 'Jianping Shi', 'jianping shi')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td>{ykli, xgwang}@ee.cuhk.edu.hk, wanli.ouyang@sydney.edu.au,
+<br/>bzhou@csail.mit.edu, shijianping@sensetime.com, c0502.zhang@samsung.com
+</td></tr><tr><td>582edc19f2b1ab2ac6883426f147196c8306685a</td><td>Do We Really Need to Collect Millions of Faces
+<br/>for Effective Face Recognition?
+<br/><b>Institute for Robotics and Intelligent Systems, USC, CA, USA</b><br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b></td><td>('11269472', 'Iacopo Masi', 'iacopo masi')<br/>('2955822', 'Jatuporn Toy Leksut', 'jatuporn toy leksut')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td></td></tr><tr><td>5859774103306113707db02fe2dd3ac9f91f1b9e</td><td></td><td></td><td></td></tr><tr><td>5892f8367639e9c1e3cf27fdf6c09bb3247651ed</td><td>Estimating Missing Features to Improve Multimedia Information Retrieval
+</td><td>('2666918', 'Abraham Bagherjeiran', 'abraham bagherjeiran')<br/>('35089151', 'Nicole S. Love', 'nicole s. love')<br/>('1696815', 'Chandrika Kamath', 'chandrika kamath')</td><td></td></tr><tr><td>5850aab97e1709b45ac26bb7d205e2accc798a87</td><td></td><td></td><td></td></tr><tr><td>587f81ae87b42c18c565694c694439c65557d6d5</td><td>DeepFace: Face Generation using Deep Learning
+</td><td>('31560532', 'Hardie Cate', 'hardie cate')<br/>('6415321', 'Fahim Dalvi', 'fahim dalvi')<br/>('8815003', 'Zeshan Hussain', 'zeshan hussain')</td><td>ccate@stanford.edu
+<br/>fdalvi@cs.stanford.edu
+<br/>zeshanmh@stanford.edu
+</td></tr><tr><td>580054294ca761500ada71f7d5a78acb0e622f19</td><td>1331
+<br/>A Subspace Model-Based Approach to Face
+<br/>Relighting Under Unknown Lighting and Poses
+</td><td>('2081318', 'Hyunjung Shim', 'hyunjung shim')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td></td></tr><tr><td>587c48ec417be8b0334fa39075b3bfd66cc29dbe</td><td>Journal of Vision (2016) 16(15):28, 1–8
+<br/>Serial dependence in the perception of attractiveness
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/><b>Helen Wills Neuroscience Institute, University of</b><br/>California, Berkeley, CA, USA
+<br/><b>Vision Science Group, University of California</b><br/>Berkeley, CA, USA
+<br/>The perception of attractiveness is essential for choices
+<br/>of food, object, and mate preference. Like perception of
+<br/>other visual features, perception of attractiveness is
+<br/>stable despite constant changes of image properties due
+<br/>to factors like occlusion, visual noise, and eye
+<br/>movements. Recent results demonstrate that perception
+<br/>of low-level stimulus features and even more complex
+<br/>attributes like human identity are biased towards recent
+<br/>percepts. This effect is often called serial dependence.
+<br/>Some recent studies have suggested that serial
+<br/>dependence also exists for perceived facial
+<br/>attractiveness, though there is also concern that the
+<br/>reported effects are due to response bias. Here we used
+<br/>an attractiveness-rating task to test the existence of
+<br/>serial dependence in perceived facial attractiveness. Our
+<br/>results demonstrate that perceived face attractiveness
+<br/>was pulled by the attractiveness level of facial images
+<br/>encountered up to 6 s prior. This effect was not due to
+<br/>response bias and did not rely on the previous motor
+<br/>response. This perceptual pull increased as the difference
+<br/>in attractiveness between previous and current stimuli
+<br/>increased. Our results reconcile previously conflicting
+<br/>findings and extend previous work, demonstrating that
+<br/>sequential dependence in perception operates across
+<br/>different levels of visual analysis, even at the highest
+<br/>levels of perceptual interpretation.
+<br/>Introduction
+<br/>Humans make aesthetic judgments all the time about
+<br/>the attractiveness or desirability of objects and scenes.
+<br/>Aesthetic judgments are not merely about judging
+<br/>works of art; they are constantly involved in our daily
+<br/>activity, influencing or determining our choices of food,
+<br/>object (Creusen & Schoormans, 2005), and mate
+<br/>preference (Rhodes, Simmons, & Peters, 2005).
+<br/>Aesthetic judgments are based on perceptual pro-
+<br/>cessing (Arnheim, 1954; Livingstone & Hubel, 2002;
+<br/>Solso, 1996). These judgments, like other perceptual
+<br/>experiences, are thought to be relatively stable in spite
+<br/>of fluctuations in the raw visual input we receive due to
+<br/>factors like occlusion, visual noise, and eye movements.
+<br/>One mechanism that allows the visual system to achieve
+<br/>this stability is serial dependence. Recent results have
+<br/>revealed that the perception of visual features such as
+<br/>orientation (Fischer & Whitney, 2014), numerosity
+<br/>(Cicchini, Anobile, & Burr, 2014), and facial identity
+<br/>(Liberman, Fischer, & Whitney, 2014) are systemati-
+<br/>cally assimilated toward visual input from the recent
+<br/>past. This perceptual pull has been distinguished from
+<br/>hysteresis in motor responses or decision processes, and
+<br/>has been shown to be tuned by the magnitude of the
+<br/>difference between previous and current visual inputs
+<br/>(Fischer & Whitney, 2014; Liberman, Fischer, &
+<br/>Whitney, 2014).
+<br/>Is aesthetics perception similarly stable like feature
+<br/>perception? Some previous studies have suggested that
+<br/>the answer is yes. It has been shown that there is a
+<br/>positive correlation between observers’ successive
+<br/>attractiveness ratings of facial images (Kondo, Taka-
+<br/>hashi, & Watanabe, 2012; Taubert, Van der Burg, &
+<br/>Alais, 2016). This suggests that there is an assimilative
+<br/>sequential dependence in attractiveness judgments.
+<br/>Citation: Xia, Y., Leib, A. Y., & Whitney, D. (2016). Serial dependence in the perception of attractiveness. Journal of Vision,
+<br/>16(15):28, 1–8, doi:10.1167/16.15.28.
+<br/>doi: 10 .116 7 /1 6. 15 . 28
+<br/>Received July 13, 2016; published December 22, 2016
+<br/>ISSN 1534-7362
+<br/>This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
+</td><td>('37397364', 'Ye Xia', 'ye xia')<br/>('6931574', 'Allison Yamanashi Leib', 'allison yamanashi leib')<br/>('1821337', 'David Whitney', 'david whitney')</td><td></td></tr><tr><td>58081cb20d397ce80f638d38ed80b3384af76869</td><td>Embedded Real-Time Fall Detection Using Deep
+<br/>Learning For Elderly Care
+<br/>Samsung Research, Samsung Electronics
+</td><td>('1729858', 'Hyunwoo Lee', 'hyunwoo lee')<br/>('1784186', 'Jooyoung Kim', 'jooyoung kim')<br/>('32671800', 'Dojun Yang', 'dojun yang')<br/>('3443235', 'Joon-Ho Kim', 'joon-ho kim')</td><td>{hyun0772.lee, joody.kim, dojun.yang, mythos.kim}@samsung.com
+</td></tr><tr><td>581e920ddb6ecfc2a313a3aa6fed3d933b917ab0</td><td>Automatic Mapping of Remote Crowd Gaze to
+<br/>Stimuli in the Classroom
+<br/><b>University of T ubingen, T ubingen, Germany</b><br/>2 Leibniz-Institut f¨ur Wissensmedien, T¨ubingen, Germany
+<br/><b>Hector Research Institute of Education Sciences and Psychology, T ubingen</b><br/>Germany
+</td><td>('2445102', 'Thiago Santini', 'thiago santini')<br/>('24003697', 'Lucas Draghetti', 'lucas draghetti')<br/>('3286609', 'Peter Gerjets', 'peter gerjets')<br/>('2446461', 'Ulrich Trautwein', 'ulrich trautwein')<br/>('1884159', 'Enkelejda Kasneci', 'enkelejda kasneci')</td><td></td></tr><tr><td>58fa85ed57e661df93ca4cdb27d210afe5d2cdcd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
+<br/>978-1-5090-4847-2/16/$31.00 ©2016 IEEE
+<br/>4118
+</td><td></td><td></td></tr><tr><td>5860cf0f24f2ec3f8cbc39292976eed52ba2eafd</td><td>International Journal of Automated Identification Technology, 3(2), July-December 2011, pp. 51-60
+<br/>COMPUTATION EvaBio: A TOOL FOR PERFORMANCE
+<br/>EVALUATION IN BIOMETRICS
+<br/><b>GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS</b><br/> 6 Boulevard Maréchal Juin, 14000 Caen Cedex - France
+</td><td>('2774452', 'Julien Mahier', 'julien mahier')<br/>('3356614', 'Baptiste Hemery', 'baptiste hemery')<br/>('2174941', 'Mohamad El-Abed', 'mohamad el-abed')<br/>('1793765', 'Christophe Rosenberger', 'christophe rosenberger')</td><td></td></tr><tr><td>584909d2220b52c0d037e8761d80cb22f516773f</td><td>OCR-Free Transcript Alignment
+<br/>Dept. of Mathematics and Computer Science
+<br/>School of Computer Science
+<br/>School of Computer Science
+<br/><b>The Open University</b><br/>Israel
+<br/><b>Tel Aviv University</b><br/>Tel-Aviv, Israel
+<br/><b>Tel Aviv University</b><br/>Tel-Aviv, Israel
+</td><td>('1756099', 'Tal Hassner', 'tal hassner')<br/>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1759551', 'Nachum Dershowitz', 'nachum dershowitz')</td><td>Email: hassner@openu.ac.il
+<br/>Email: wolf@cs.tau.ac.il
+<br/>Email: nachumd@tau.ac.il
+</td></tr><tr><td>58bf72750a8f5100e0c01e55fd1b959b31e7dbce</td><td>PyramidBox: A Context-assisted Single Shot
+<br/>Face Detector.
+<br/>Baidu Inc.
+</td><td>('48785141', 'Xu Tang', 'xu tang')<br/>('14931829', 'Daniel K. Du', 'daniel k. du')<br/>('31239588', 'Zeqiang He', 'zeqiang he')<br/>('2272123', 'Jingtuo Liu', 'jingtuo liu')</td><td>tangxu02@baidu.com,daniel.kang.du@gmail.com,{hezeqiang,liujingtuo}@baidu.com
+</td></tr><tr><td>58542eeef9317ffab9b155579256d11efb4610f2</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Face Recognition Revisited on Pose, Alignment,
+<br/>Color, Illumination and Expression-PyTen
+<br/>Computer Science, BIT Noida, India
+</td><td></td><td></td></tr><tr><td>58823377757e7dc92f3b70a973be697651089756</td><td>Technical Report
+<br/>UCAM-CL-TR-861
+<br/>ISSN 1476-2986
+<br/>Number 861
+<br/>Computer Laboratory
+<br/>Automatic facial expression analysis
+<br/>October 2014
+<br/>15 JJ Thomson Avenue
+<br/>Cambridge CB3 0FD
+<br/>United Kingdom
+<br/>phone +44 1223 763500
+<br/>http://www.cl.cam.ac.uk/
+</td><td>('1756344', 'Tadas Baltrusaitis', 'tadas baltrusaitis')</td><td></td></tr><tr><td>580e48d3e7fe1ae0ceed2137976139852b1755df</td><td>THE EFFECTS OF MOTION AND ORIENTATION ON PERCEPTION OF
+<br/>FACIAL EXPRESSIONS AND FACE RECOGNITION
+<br/>by
+<br/><b>B.S. University of Indonesia</b><br/><b>M.S. Brunel University of West London</b><br/>Submitted to the Graduate Faculty of
+<br/>Arts and Sciences in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/><b>University of Pittsburgh</b><br/>2002
+</td><td>('2059653', 'Zara Ambadar', 'zara ambadar')</td><td></td></tr><tr><td>5865e824e3d8560e07840dd5f75cfe9bf68f9d96</td><td>RESEARCH ARTICLE
+<br/>Embodied conversational agents for
+<br/>multimodal automated social skills training in
+<br/>people with autism spectrum disorders
+<br/><b>Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara</b><br/><b>Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara</b><br/>Japan, 3 Developmental Center for Child and Adult, Shigisan Hospital, Ikoma-gun, Nara, 636-0815, Japan
+</td><td>('3162048', 'Hiroki Tanaka', 'hiroki tanaka')<br/>('1867578', 'Hideki Negoro', 'hideki negoro')<br/>('35238212', 'Hidemi Iwasaka', 'hidemi iwasaka')<br/>('40285672', 'Satoshi Nakamura', 'satoshi nakamura')</td><td>* hiroki-tan@is.naist.jp
+</td></tr><tr><td>58bb77dff5f6ee0fb5ab7f5079a5e788276184cc</td><td>Facial Expression Recognition with PCA and LBP
+<br/>Features Extracting from Active Facial Patches
+<br/>
+</td><td>('7895427', 'Yanpeng Liu', 'yanpeng liu')<br/>('16879896', 'Yuwen Cao', 'yuwen cao')<br/>('29275442', 'Yibin Li', 'yibin li')<br/>('1686211', 'Ming Liu', 'ming liu')<br/>('1772484', 'Rui Song', 'rui song')<br/>('1706513', 'Yafang Wang', 'yafang wang')<br/>('40395865', 'Zhigang Xu', 'zhigang xu')<br/>('1708045', 'Xin Ma', 'xin ma')</td><td></td></tr><tr><td>585260468d023ffc95f0e539c3fa87254c28510b</td><td>Cardea: Context–Aware Visual Privacy Protection
+<br/>from Pervasive Cameras
+<br/>HKUST-DT System and Media Laboratory
+<br/><b>Hong Kong University of Science and Technology, Hong Kong</b></td><td>('3432205', 'Jiayu Shu', 'jiayu shu')<br/>('2844817', 'Rui Zheng', 'rui zheng')<br/>('2119751', 'Pan Hui', 'pan hui')</td><td>Email: ∗jshuaa@ust.hk, †rzhengac@ust.hk, ‡panhui@ust.hk
+</td></tr><tr><td>58cb1414095f5eb6a8c6843326a6653403a0ee17</td><td></td><td></td><td></td></tr><tr><td>58db008b204d0c3c6744f280e8367b4057173259</td><td>International Journal of Current Engineering and Technology
+<br/>ISSN 2277 - 4106
+<br/> © 2012 INPRESSCO. All Rights Reserved.
+<br/>Available at http://inpressco.com/category/ijcet
+<br/>Research Article
+<br/>Facial Expression Recognition
+<br/><b>Jaipur, Rajasthan, India</b><br/>Accepted 3June 2012, Available online 8 June 2012
+</td><td>('40621542', 'Riti Kushwaha', 'riti kushwaha')<br/>('2117075', 'Neeta Nain', 'neeta nain')</td><td></td></tr><tr><td>58628e64e61bd2776a2a7258012eabe3c79ca90c</td><td>Active Grounding of Visual Situations
+<br/><b>Portland State University</b><br/><b>Santa Fe Institute</b><br/>Unpublished Draft
+</td><td>('3438473', 'Max H. Quinn', 'max h. quinn')<br/>('27572284', 'Erik Conser', 'erik conser')<br/>('38388831', 'Jordan M. Witte', 'jordan m. witte')<br/>('4421478', 'Melanie Mitchell', 'melanie mitchell')</td><td></td></tr><tr><td>676a136f5978783f75b5edbb38e8bb588e8efbbe</td><td>Matrix Completion for Resolving Label Ambiguity
+<br/><b>UMIACS, University of Maryland, College Park, USA</b><br/>Learning a visual classifier requires a large amount of labeled images
+<br/>and videos. However, labeling images is expensive and time-consuming
+<br/>due to the significant amount of human efforts involved. As a result, brief
+<br/>descriptions such as tags, captions and screenplays accompanying the im-
+<br/>ages and videos become important for training classifiers. Although such
+<br/>information is publicly available, it is not as explicitly labeled as human
+<br/>annotation. For instance, names in the caption of a news photo provide
+<br/>possible candidates for faces appearing in the image [1]. The names in the
+<br/>screenplays are only weakly associated with faces in the shots [4]. The prob-
+<br/>lem in which instead of a single label per instance, one is given a candidate
+<br/>set of labels, of which only one is correct is known as ambiguously labeled
+<br/>learning [2, 6].
+<br/>Ambiguous Labels
+<br/>Disambiguated Labels
+<br/>Class 2
+<br/>MCar
+<br/>Class 1
+<br/>L={1}
+<br/>L={2}
+<br/>L={3}
+<br/>L={1, 2}
+<br/>L={2, 3}
+<br/>L={1, 3}
+<br/>Class 3
+<br/>The ambiguously labeled data is denoted as L = {(x j , L j), j = 1, 2, . . . , N},
+<br/>Figure 1: MCar reassigns the labels for those ambiguously labeled in-
+<br/>stances such that instances of the same subjects cohesively form potentially-
+<br/>separable convex hulls.
+<br/>where N is the number of instances. There are c classes, and the class labels
+<br/>are denoted as Y = {1, 2, . . . , c}. Note that x j is the feature vector of the jth
+<br/>instance, and its ambiguous labeling set L j ⊆ Y consists of the candidate
+<br/>labels associated with the jth instance. The true label of the jth instance is
+<br/>l j ∈ L j. In other words, one of the labels in L j is the true label of x j. The
+<br/>objective is to resolve the ambiguity in L such that each predicted label ˆl j
+<br/>of x j matches its true label l j.
+<br/>We interpret the ambiguous labeling set L j with soft labeling vector p j,
+<br/>where pi, j indicates the probability that instance j belongs to class i. This
+<br/>allows us to quantitatively assign the likelihood of each class the instance
+<br/>belongs to if such information is provided. Without any prior knowledge,
+<br/>we assume equal probability for each candidate label. Let P ∈ Rc×N denotes
+<br/>the ambiguous labeling matrix with p j in its jth column. With this, one can
+<br/>model the ambiguous labeling as P = P0 + EP, where P0 and EP denote the
+<br/>true labeling matrix and the labeling noise, respectively. The jth column
+<br/>vector of P0 is p0
+<br/>j = el j , where el j is the canonical vector corresponding to
+<br/>the 1-of-K coding of its true label l j. Similarly, assuming that the feature
+<br/>vectors are corrupted by some noise or occlusion, the feature matrix X with
+<br/>x j in its jth column can be modeled as X = X0 + EX , where X ∈ Rm×N con-
+<br/>sists of N feature vectors of dimension m, X0 represents the feature matrix
+<br/>in the absence of noise and EX accounts for the noise.
+<br/>Figure 1 shows the geometric interpretation of our proposed method,
+<br/>Matrix Completion for Ambiguity Resolving (MCar). When each element
+<br/>in the ambiguous labeling set is trivially treated as the true label, the convex
+<br/>hulls of each class are erroneously expanded. MCar reassigns the ambiguous
+<br/>labels such that each over-expanded convex hull shrinks to its actual contour,
+<br/>and the convex hulls becomes potentially separable.
+<br/>In the paper, we show that the heterogeneous feature matrix, which is
+<br/>the concatenation of the labeling matrix P and feature matrix X, is ideally
+<br/>low-rank in the absence of noise (Figure 2), which allows us to convert the
+<br/>aforementioned label reassignment problem as a matrix completion prob-
+<br/>lem [5]. The proposed MCar takes the heterogeneous feature matrix as in-
+<br/>put, and returns the predicted labeling matrix Y by solving the following
+<br/>optimization problem
+<br/>=
+<br/>=
+<br/>+
+<br/>+
+<br/> ۾଴
+<br/> ܆଴
+<br/> ۾
+<br/> ܆
+<br/>
+<br/>۳௉
+<br/>۳௑
+<br/>Figure 2: Ideal decomposition of heterogeneous feature matrix using MCar.
+<br/>The underlying low-rank structure and the ambiguous labeling are recovered
+<br/>simultaneously.
+<br/>The proposed method inherits the benefit of low-rank recovery and pos-
+<br/>sesses the capability to resolve the label ambiguity via low-rank approxima-
+<br/>tion of the heterogeneous matrix. As a result, our method is more robust
+<br/>compared to some of the existing discriminative ambiguous learning meth-
+<br/>ods [3, 7], sparsity/dictionary-based method [2], and low-rank representation-
+<br/>based method [8]. Moreover, we generalize MCar to include the labeling
+<br/>constraints between the instances for practical applications. Compared to
+<br/>the state of the arts, our proposed framework achieves 2.9% improvement
+<br/>on the labeling accuracy of the Lost dataset and performs comparably on the
+<br/>Labeled Yahoo! News dataset.
+<br/>[1] T. L. Berg, A. C. Berg, J. Edwards, M. Maire, R. White, Y.-W. Teh,
+<br/>E. Learned-Miller, and D. A. Forsyth. Names and faces in the news. In
+<br/>CVPR, 2004.
+<br/>[2] Y.-C. Chen, V. M. Patel, J. K. Pillai, R. Chellappa, and P. J. Phillips.
+<br/>Dictionary learning from ambiguously labeled data. In CVPR, 2013.
+<br/>[3] T. Cour, B. Sapp, C. Jordan, and B. Taskar. Learning from ambiguously
+<br/>labeled images. In CVPR, 2009.
+<br/>[4] M. Everingham, J. Sivic, and A. Zisserman. Hello! My name is... Buffy
+<br/>(1)
+<br/>- Automatic naming of characters in TV video. In BMVC, 2006.
+<br/>[5] A. B. Goldberg, X. Zhu, B. Recht, J.-M. Xu, and R. D. Nowak. Trans-
+<br/>duction with matrix completion: Three birds with one stone. In NIPS,
+<br/>2010.
+<br/>[6] E. Hüllermeier and J. Beringer. Learning from ambiguously labeled
+<br/>examples. In Intell. Data Anal., 2006.
+<br/>[7] J. Luo and F. Orabona. Learning from candidate labeling sets. In NIPS,
+<br/>2010.
+<br/>min
+<br/>Y,EX
+<br/>rank(H) + λ kEX k0 + γkYk0
+<br/>Z(cid:21) =(cid:20)P
+<br/>X(cid:21) −(cid:20)EP
+<br/>EX(cid:21) ,
+<br/>N , Y ∈ Rc×N
+<br/>+ ,
+<br/>s.t. H =(cid:20)Y
+<br/>1T
+<br/>c Y = 1T
+<br/>yi, j = 0 if pi, j = 0,
+<br/>where λ ∈ R+ and γ ∈ R+ control the sparsity of data noise and predicted
+<br/>labeling matrix, respectively. Consequently, the predicted label of instance
+<br/>j can be obtained as
+<br/>ˆl j = arg max
+<br/>i∈Y
+<br/>yi, j .
+<br/>(2)
+</td><td>('2682056', 'Ching-Hui Chen', 'ching-hui chen')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>677585ccf8619ec2330b7f2d2b589a37146ffad7</td><td>A flexible model for training action localization
+<br/>with varying levels of supervision
+</td><td>('1902524', 'Guilhem Chéron', 'guilhem chéron')<br/>('2285263', 'Jean-Baptiste Alayrac', 'jean-baptiste alayrac')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>676f9eabf4cfc1fd625228c83ff72f6499c67926</td><td>FACE IDENTIFICATION AND CLUSTERING
+<br/>A thesis submitted to the
+<br/>Graduate School—New Brunswick
+<br/><b>Rutgers, The State University of New Jersey</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Master of Science
+<br/>Graduate Program in Computer Science
+<br/>Written under the direction of
+<br/>Dr. Vishal Patel, Dr. Ahmed Elgammal
+<br/>and approved by
+<br/>New Brunswick, New Jersey
+<br/>May, 2017
+</td><td>('34805991', 'Atul Dhingra', 'atul dhingra')</td><td></td></tr><tr><td>677477e6d2ba5b99633aee3d60e77026fb0b9306</td><td></td><td></td><td></td></tr><tr><td>6789bddbabf234f31df992a3356b36a47451efc7</td><td>Unsupervised Generation of Free-Form and
+<br/>Parameterized Avatars
+</td><td>('33964593', 'Adam Polyak', 'adam polyak')<br/>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>679b7fa9e74b2aa7892eaea580def6ed4332a228</td><td>Communication and automatic
+<br/>interpretation of affect from facial
+<br/>expressions1
+<br/><b>University of Amsterdam, the Netherlands</b><br/><b>University of Trento, Italy</b><br/><b>University of Amsterdam, the Netherlands</b></td><td>('1764521', 'Albert Ali Salah', 'albert ali salah')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>675b2caee111cb6aa7404b4d6aa371314bf0e647</td><td>AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions
+<br/>Carl Vondrick∗
+</td><td>('39599498', 'Chunhui Gu', 'chunhui gu')<br/>('1758054', 'Yeqing Li', 'yeqing li')<br/>('1726241', 'Chen Sun', 'chen sun')<br/>('48536531', 'David A. Ross', 'david a. ross')<br/>('2259154', 'Sudheendra Vijayanarasimhan', 'sudheendra vijayanarasimhan')<br/>('1805076', 'George Toderici', 'george toderici')<br/>('2997956', 'Caroline Pantofaru', 'caroline pantofaru')<br/>('2262946', 'Susanna Ricco', 'susanna ricco')<br/>('1694199', 'Rahul Sukthankar', 'rahul sukthankar')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td></td></tr><tr><td>679b72d23a9cfca8a7fe14f1d488363f2139265f</td><td></td><td></td><td></td></tr><tr><td>67484723e0c2cbeb936b2e863710385bdc7d5368</td><td>Anchor Cascade for Efficient Face Detection
+</td><td>('2425630', 'Baosheng Yu', 'baosheng yu')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>Face Swapping: Automatically Replacing Faces in Photographs
+<br/><b>Columbia University</b><br/>Peter Belhumeur
+<br/>Figure 1: We have developed a system that automatically replaces faces in an input image with ones selected from a large collection of
+<br/>face images, obtained by applying face detection to publicly available photographs on the internet. In this example, the faces of (a) two
+<br/>people are shown after (b) automatic replacement with the top three ranked candidates. Our system for face replacement can be used for face
+<br/>de-identification, personalized face replacement, and creating an appealing group photograph from a set of “burst” mode images. Original
+<br/>images in (a) used with permission from Retna Ltd. (top) and Getty Images Inc. (bottom).
+<br/>Rendering, Computational Photography
+<br/>1 Introduction
+<br/>it
+<br/>Advances in digital photography have made it possible to cap-
+<br/>ture large collections of high-resolution images and share them
+<br/>on the internet. While the size and availability of these col-
+<br/>lections is leading to many exciting new applications,
+<br/>is
+<br/>also creating new problems. One of the most
+<br/>important of
+<br/>these problems is privacy. Online systems such as Google
+<br/>Street View (http://maps.google.com/help/maps/streetview) and
+<br/>EveryScape (http://everyscape.com) allow users to interactively
+<br/>navigate through panoramic images of public places created using
+<br/>thousands of photographs. Many of the images contain people who
+<br/>have not consented to be photographed, much less to have these
+<br/>photographs publicly viewable. Identity protection by obfuscating
+<br/>the face regions in the acquired photographs using blurring, pixela-
+<br/>tion, or simply covering them with black pixels is often undesirable
+<br/>as it diminishes the visual appeal of the image. Furthermore, many
+</td><td>('2085183', 'Dmitri Bitouk', 'dmitri bitouk')<br/>('40631426', 'Neeraj Kumar', 'neeraj kumar')<br/>('2057606', 'Samreen Dhillon', 'samreen dhillon')<br/>('1750470', 'Shree K. Nayar', 'shree k. nayar')</td><td></td></tr><tr><td>6742c0a26315d7354ab6b1fa62a5fffaea06da14</td><td>BAS AND SMITH: WHAT DOES 2D GEOMETRIC INFORMATION REALLY TELL US ABOUT 3D FACE SHAPE?
+<br/>What does 2D geometric information
+<br/>really tell us about 3D face shape?
+</td><td>('39180407', 'Anil Bas', 'anil bas')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')</td><td></td></tr><tr><td>67a50752358d5d287c2b55e7a45cc39be47bf7d0</td><td></td><td></td><td></td></tr><tr><td>67c3c1194ee72c54bc011b5768e153a035068c43</td><td>StreetScenes: Towards Scene Understanding in
+<br/>Still Images
+<br/>by
+<br/>Stanley Michael Bileschi
+<br/>Submitted to the Department of Electrical Engineering and Computer
+<br/>Science
+<br/>in partial fulflllment of the requirements for the degree of
+<br/>Doctor of Philosophy in Computer Science and Engineering
+<br/>at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY</b><br/>May 2006
+<br/><b>c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved</b><br/>Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Department of Electrical Engineering and Computer Science
+<br/>May 5, 2006
+<br/>Certifled by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Tomaso A. Poggio
+<br/>McDermott Professor
+<br/>Thesis Supervisor
+<br/>Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Arthur C. Smith
+<br/>Chairman, Department Committee on Graduate Students
+</td><td></td><td></td></tr><tr><td>673d4885370b27c863e11a4ece9189a6a45931cc</td><td>Recurrent Residual Module for Fast Inference in Videos
+<br/><b>Shanghai Jiao Tong University, 2Zhejiang University, 3Massachusetts Institute of Technology</b><br/>networks for video recognition are more challenging. For
+<br/>example, for Youtube-8M dataset [1] with over 8 million
+<br/>video clips, it will take 50 years for a CPU to extract the
+<br/>deep features using a standard CNN model.
+</td><td>('35654996', 'Bowen Pan', 'bowen pan')<br/>('35992009', 'Wuwei Lin', 'wuwei lin')<br/>('2126444', 'Xiaolin Fang', 'xiaolin fang')<br/>('35933894', 'Chaoqin Huang', 'chaoqin huang')<br/>('1804424', 'Bolei Zhou', 'bolei zhou')<br/>('1830034', 'Cewu Lu', 'cewu lu')</td><td>†{googletornado,linwuwei13, huangchaoqin}@sjtu.edu.cn, ¶fxlfang@gmail.com
+<br/>§bzhou@csail.mit.edu; ‡lu-cw@cs.sjtu.edu.cn
+</td></tr><tr><td>67c703a864aab47eba80b94d1935e6d244e00bcb</td><td> (IJACSA) International Journal of Advanced Computer Science and Applications
+<br/>Vol. 7, No. 6, 2016
+<br/>Face Retrieval Based On Local Binary Pattern and Its
+<br/>Variants: A Comprehensive Study
+<br/><b>University of Science, VNU-HCM, Viet Nam</b><br/>face searching,
+</td><td>('3911040', 'Phan Khoi', 'phan khoi')</td><td></td></tr><tr><td>6754c98ba73651f69525c770fb0705a1fae78eb5</td><td>Joint Cascade Face Detection and Alignment
+<br/><b>University of Science and Technology of China</b><br/>2 Microsoft Research
+</td><td>('39447786', 'Dong Chen', 'dong chen')<br/>('3080683', 'Shaoqing Ren', 'shaoqing ren')<br/>('1732264', 'Yichen Wei', 'yichen wei')<br/>('47300766', 'Xudong Cao', 'xudong cao')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>{chendong,sqren}@mail.ustc.edu.cn
+<br/>{yichenw,xudongca,jiansun}@microsoft.com
+</td></tr><tr><td>672fae3da801b2a0d2bad65afdbbbf1b2320623e</td><td>Pose-Selective Max Pooling for Measuring Similarity
+<br/>1Dept. of Computer Science
+<br/>2Dept. of Electrical & Computer Engineering
+<br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b></td><td>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')</td><td>xxiang@cs.jhu.edu
+</td></tr><tr><td>677ebde61ba3936b805357e27fce06c44513a455</td><td>Facial Expression Recognition Based on Facial
+<br/>Components Detection and HOG Features
+<br/><b>The Hong Kong Polytechnic University, Hong Kong</b><br/><b>Chu Hai College of Higher Education, Hong Kong</b></td><td>('2366262', 'Junkai Chen', 'junkai chen')<br/>('1715231', 'Zenghai Chen', 'zenghai chen')<br/>('8590720', 'Zheru Chi', 'zheru chi')<br/>('1965426', 'Hong Fu', 'hong fu')</td><td>Email: Junkai.Chen@connect.polyu.hk
+</td></tr><tr><td>67ba3524e135c1375c74fe53ebb03684754aae56</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1767
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>6769cfbd85329e4815bb1332b118b01119975a95</td><td>Tied factor analysis for face recognition across
+<br/>large pose changes
+</td><td></td><td></td></tr><tr><td>0be43cf4299ce2067a0435798ef4ca2fbd255901</td><td>Title
+<br/>A temporal latent topic model for facial expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>The 10th Asian Conference on Computer Vision (ACCV 2010),
+<br/>Queenstown, New Zealand, 8-12 November 2010. In Lecture
+<br/>Notes in Computer Science, 2010, v. 6495, p. 51-63
+<br/>Issued Date
+<br/>2011
+<br/>URL
+<br/>http://hdl.handle.net/10722/142604
+<br/>Rights
+<br/>Creative Commons: Attribution 3.0 Hong Kong License
+</td><td></td><td></td></tr><tr><td>0bc53b338c52fc635687b7a6c1e7c2b7191f42e5</td><td>ZHANG, BHALERAO: LOGLET SIFT FOR PART DESCRIPTION
+<br/>Loglet SIFT for Part Description in
+<br/>Deformable Part Models: Application to Face
+<br/>Alignment
+<br/>Department of Computer Science
+<br/><b>University of Warwick</b><br/>Coventry, UK
+</td><td>('39900385', 'Qiang Zhang', 'qiang zhang')<br/>('2227351', 'Abhir Bhalerao', 'abhir bhalerao')</td><td>q.zhang.13@warwick.ac.uk
+<br/>abhir.bhalerao@warwick.ac.uk
+</td></tr><tr><td>0b2277a0609565c30a8ee3e7e193ce7f79ab48b0</td><td>944
+<br/>Cost-Sensitive Semi-Supervised Discriminant
+<br/>Analysis for Face Recognition
+</td><td>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('3353607', 'Xiuzhuang Zhou', 'xiuzhuang zhou')<br/>('1689805', 'Yap-Peng Tan', 'yap-peng tan')<br/>('38152390', 'Yuanyuan Shang', 'yuanyuan shang')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td></td></tr><tr><td>0b9ce839b3c77762fff947e60a0eb7ebbf261e84</td><td>Proceedings of the IASTED International Conference
+<br/>Computer Vision (CV 2011)
+<br/>June 1 - 3, 2011 Vancouver, BC, Canada
+<br/>LOGARITHMIC FOURIER PCA: A NEW APPROACH TO FACE
+<br/>RECOGNITION
+<br/>1 Lakshmiprabha Nattamai Sekar,
+<br/>omjyoti
+<br/>Majumder
+<br/>Surface Robotics Lab
+<br/><b>Central Mechanical Engineering Research Institute</b><br/>Mahatma Gandhi Avenue,
+<br/>Durgapur - 713209, West Bengal, India.
+</td><td>('9155672', 'Jhilik Bhattacharya', 'jhilik bhattacharya')</td><td>email: 1 n prabha mech@cmeri.res.in, 2 bjhilik@cmeri.res.in, 3 sjm@cmeri.res.in
+</td></tr><tr><td>0b8b8776684009e537b9e2c0d87dbd56708ddcb4</td><td>Adversarial Discriminative Heterogeneous Face Recognition
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>Center for Excellence in Brain Science and Intelligence Technology, CAS
+<br/><b>University of Chinese Academy of Sciences, Beijing 100190, China</b></td><td>('3051419', 'Lingxiao Song', 'lingxiao song')<br/>('2567523', 'Man Zhang', 'man zhang')<br/>('2225749', 'Xiang Wu', 'xiang wu')<br/>('1705643', 'Ran He', 'ran he')</td><td></td></tr><tr><td>0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b</td><td></td><td></td><td></td></tr><tr><td>0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7</td><td>1
+<br/>The Use of Deep Learning in Image
+<br/>Segmentation, Classification and Detection
+<br/><b>The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania</b><br/>
+</td><td>('33789881', 'Mihai-Sorin Badea', 'mihai-sorin badea')<br/>('3407753', 'Laura Maria Florea', 'laura maria florea')<br/>('2905899', 'Constantin Vertan', 'constantin vertan')</td><td></td></tr><tr><td>0b605b40d4fef23baa5d21ead11f522d7af1df06</td><td>Label-Embedding for Attribute-Based Classification
+<br/>a Computer Vision Group∗, XRCE, France
+<br/>b LEAR†, INRIA, France
+</td><td>('2893664', 'Zeynep Akata', 'zeynep akata')<br/>('1723883', 'Florent Perronnin', 'florent perronnin')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>0b0eb562d7341231c3f82a65cf51943194add0bb</td><td>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>Facial Image Analysis Based on Local Binary
+<br/>Patterns: A Survey
+<br/>
+</td><td>('40451093', 'Di Huang', 'di huang')<br/>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('40703561', 'Mohsen Ardebilian', 'mohsen ardebilian')<br/>('40231048', 'Liming Chen', 'liming chen')</td><td></td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td>Who’s in the Picture?
+<br/>Berkeley, CA 94720
+<br/>Computer Science Division
+<br/>U.C. Berkeley
+</td><td>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('34497462', 'Jaety Edwards', 'jaety edwards')</td><td>millert@cs.berkeley.edu
+</td></tr><tr><td>0b78fd881d0f402fd9b773249af65819e48ad36d</td><td>ANALYSIS AND MODELING OF AFFECTIVE AUDIO VISUAL SPEECH
+<br/>BASED ON PAD EMOTION SPACE
+<br/><b>Tsinghua University</b></td><td>('2180849', 'Shen Zhang', 'shen zhang')<br/>('1856341', 'Yingjin Xu', 'yingjin xu')<br/>('25714033', 'Jia Jia', 'jia jia')<br/>('7239047', 'Lianhong Cai', 'lianhong cai')</td><td>{zhangshen05, xuyj03, jiajia}@mails.tsinghua.edu.cn, clh-dcs@tsinghua.edu.cn
+</td></tr><tr><td>0b835284b8f1f45f87b0ce004a4ad2aca1d9e153</td><td>Cartooning for Enhanced Privacy in Lifelogging and Streaming Videos
+<br/>David Crandall
+<br/>School of Informatics and Computing
+<br/><b>Indiana University Bloomington</b></td><td>('3053390', 'Eman T. Hassan', 'eman t. hassan')<br/>('2221434', 'Rakibul Hasan', 'rakibul hasan')<br/>('34507388', 'Patrick Shaffer', 'patrick shaffer')<br/>('1996617', 'Apu Kapadia', 'apu kapadia')</td><td>{emhassan, rakhasan, patshaff, djcran, kapadia}@indiana.edu
+</td></tr><tr><td>0b5bd3ce90bf732801642b9f55a781e7de7fdde0</td><td></td><td></td><td></td></tr><tr><td>0b0958493e43ca9c131315bcfb9a171d52ecbb8a</td><td>A Unified Neural Based Model for Structured Output Problems
+<br/>Soufiane Belharbi∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien Adam∗2
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>April 13, 2015
+</td><td></td><td></td></tr><tr><td>0b51197109813d921835cb9c4153b9d1e12a9b34</td><td><b>THE UNIVERSITY OF CHICAGO</b><br/>JOINTLY LEARNING MULTIPLE SIMILARITY METRICS FROM TRIPLET
+<br/>CONSTRAINTS
+<br/>A DISSERTATION SUBMITTED TO
+<br/>THE FACULTY OF THE DIVISION OF THE PHYSICAL SCIENCES
+<br/>IN CANDIDACY FOR THE DEGREE OF
+<br/>MASTER OF SCIENCE
+<br/>DEPARTMENT OF COMPUTER SCIENCE
+<br/>BY
+<br/>CHICAGO, ILLINOIS
+<br/>WINTER, 2015
+</td><td>('40504838', 'LIWEN ZHANG', 'liwen zhang')</td><td></td></tr><tr><td>0bf3513d18ec37efb1d2c7934a837dabafe9d091</td><td>Robust Subspace Clustering via Thresholding Ridge Regression
+<br/><b>Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore</b><br/><b>College of Computer Science, Sichuan University, Chengdu 610065, P.R. China</b></td><td>('8249791', 'Xi Peng', 'xi peng')<br/>('9276020', 'Zhang Yi', 'zhang yi')<br/>('3134548', 'Huajin Tang', 'huajin tang')</td><td>pangsaai@gmail.com, zhangyi@scu.edu.cn, htang@i2r.a-star.edu.sg.
+</td></tr><tr><td>0b20f75dbb0823766d8c7b04030670ef7147ccdd</td><td>1
+<br/>Feature selection using nearest attributes
+</td><td>('1744784', 'Alex Pappachen James', 'alex pappachen james')<br/>('1697594', 'Sima Dimitrijev', 'sima dimitrijev')</td><td></td></tr><tr><td>0b5a82f8c0ee3640503ba24ef73e672d93aeebbf</td><td>On Learning 3D Face Morphable Model
+<br/>from In-the-wild Images
+</td><td>('1849929', 'Luan Tran', 'luan tran')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>0b174d4a67805b8796bfe86cd69a967d357ba9b6</td><td> Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
+<br/> Vol. 3(4), 56-62, April (2014)
+<br/>Res.J.Recent Sci.
+</td><td></td><td></td></tr><tr><td>0ba449e312894bca0d16348f3aef41ca01872383</td><td></td><td></td><td></td></tr><tr><td>0b87d91fbda61cdea79a4b4dcdcb6d579f063884</td><td>The Open Automation and Control Systems Journal, 2015, 7, 569-579
+<br/>569
+<br/>Open Access
+<br/>Research on Theory and Method for Facial Expression Recognition Sys-
+<br/>tem Based on Dynamic Image Sequence
+<br/><b>School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R</b><br/>China
+<br/><b>Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China</b></td><td>('9296838', 'Yang Xinfeng', 'yang xinfeng')<br/>('2083303', 'Jiang Shan', 'jiang shan')</td><td>Send Orders for Reprints to reprints@benthamscience.ae
+</td></tr><tr><td>0be2245b2b016de1dcce75ffb3371a5e4b1e731b</td><td>On the Variants of the Self-Organizing Map That Are
+<br/>Based on Order Statistics
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, Thessaloniki 54124, Greece
+</td><td>('1762248', 'Vassiliki Moschou', 'vassiliki moschou')<br/>('1711062', 'Dimitrios Ververidis', 'dimitrios ververidis')<br/>('1736143', 'Constantine Kotropoulos', 'constantine kotropoulos')</td><td>{vmoshou, jimver, costas}@aiia.csd.auth.gr
+</td></tr><tr><td>0b79356e58a0df1d0efcf428d0c7c4651afa140d</td><td>Appears In: Advances in Neural Information Processing Systems , MIT Press,  .
+<br/>Bayesian Modeling of Facial Similarity
+<br/><b>Mitsubishi Electric Research Laboratory</b><br/>
+<br/>Cambridge, MA
+<br/><b>Massachusettes Institute of Technology</b><br/>
+<br/>Cambridge, MA
+</td><td>('1780935', 'Baback Moghaddam', 'baback moghaddam')<br/>('1768120', 'Tony Jebara', 'tony jebara')<br/>('1682773', 'Alex Pentland', 'alex pentland')</td><td>baback@merl.com
+<br/>fjebara,sandyg@media.mit.edu
+</td></tr><tr><td>0b572a2b7052b15c8599dbb17d59ff4f02838ff7</td><td>Automatic Subspace Learning via Principal
+<br/>Coefficients Embedding
+</td><td>('8249791', 'Xi Peng', 'xi peng')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('1709367', 'Zhang Yi', 'zhang yi')<br/>('1680126', 'Rui Yan', 'rui yan')</td><td></td></tr><tr><td>0b85b50b6ff03a7886c702ceabad9ab8c8748fdc</td><td>http://www.journalofvision.org/content/11/3/17
+<br/>Is there a dynamic advantage for facial expressions?
+<br/><b>Institute of Child Health, University College London, UK</b><br/>Laboratory of Neuromotor Physiology, Santa Lucia
+<br/>Foundation, Rome, Italy
+<br/>Some evidence suggests that it is easier to identify facial expressions (FEs) shown as dynamic displays than as photographs
+<br/>(dynamic advantage hypothesis). Previously, this has been tested by using dynamic FEs simulated either by morphing a
+<br/>neutral face into an emotional one or by computer animations. For the first time, we tested the dynamic advantage hypothesis
+<br/>by using high-speed recordings of actors’ FEs. In the dynamic condition, stimuli were graded blends of two recordings
+<br/>(duration: 4.18 s), each describing the unfolding of an expression from neutral to apex. In the static condition, stimuli (duration:
+<br/>3 s) were blends of just the apex of the same recordings. Stimuli for both conditions were generated by linearly morphing one
+<br/>expression into the other. Performance was estimated by a forced-choice task asking participants to identify which prototype
+<br/>the morphed stimulus was more similar to. Identification accuracy was not different between conditions. Response times (RTs)
+<br/>measured from stimulus onset were shorter for static than for dynamic stimuli. Yet, most responses to dynamic stimuli were
+<br/>given before expressions reached their apex. Thus, with a threshold model, we tested whether discriminative information is
+<br/>integrated more effectively in dynamic than in static conditions. We did not find any systematic difference. In short, neither
+<br/>identification accuracy nor RTs supported the dynamic advantage hypothesis.
+<br/>Keywords: facial expressions, dynamic advantage, emotion, identification
+<br/>1–15, http://www.journalofvision.org/content/11/3/17, doi:10.1167/11.3.17.
+<br/>Introduction
+<br/>Research on emotion recognition has relied primarily on
+<br/>static images of intense facial expressions (FEs), which—
+<br/>despite being accurately identified (Ekman & Friesen,
+<br/>1982)—are fairly impoverished representations of real-life
+<br/>FEs. As a motor behavior determined by facial muscle
+<br/>actions, expressions are intrinsically dynamic. Insofar as
+<br/>detecting moment-to-moment changes in others’ affective
+<br/>states is fundamental for regulating social
+<br/>interactions
+<br/>(Yoshikawa & Sato, 2008), visual sensitivity to the
+<br/>dynamic properties of FEs might be an important aspect
+<br/>of our emotion recognition abilities.
+<br/>There is considerable evidence that dynamic information
+<br/>is not redundant and may be beneficial for various aspect of
+<br/><b>face processing, including age (Berry, 1990), sex (Hill</b><br/>Johnston, 2001; Mather & Murdoch, 1994), and identity
+<br/>(Hill & Johnston, 2001; Lander, Christie, & Bruce, 1999;
+<br/>see O’Toole, Roark, & Abdi, 2002 for a review) recogni-
+<br/>tion. In real life, static information—such as the invariant
+<br/>geometrical parameters of
+<br/>features—and
+<br/>dynamic information describing the contraction of the
+<br/>expressive muscles are closely intertwined and contribute
+<br/>jointly to the overall perception. The relative contribution
+<br/>of either type of cues, which is likely to depend on the
+<br/>meaning that one is asked to extract from the stimulus, is
+<br/>still poorly understood. Pure motion information is suffi-
+<br/>cient to recognize a person’s identity and sex (Hill &
+<br/>the facial
+<br/>Johnston, 2001). Other studies have shown that face
+<br/>identity is better recognized from dynamic than static
+<br/>displays when the stimuli are degraded (e.g., shown as
+<br/>negatives, upside down, thresholded, pixilated, or blurred).
+<br/>However,
+<br/>the advantage disappears with unmodified
+<br/>stimuli (Knight & Johnston, 1997; Lander et al., 1999). In
+<br/>short, insofar as recognition of identity from complete
+<br/>static images is already close to perfect, motion appears to
+<br/>be beneficial only when static information is insufficient or
+<br/>has been manipulated (Katsiri, 2006; O’Toole et al., 2002).
+<br/>In comparison to face identity, fewer studies have
+<br/>investigated the role of dynamic information in FE recog-
+<br/>nition (see Katsiri, 2006, for a review). Taken together,
+<br/>they seem to suggest that the process of emotion identi-
+<br/>fication is facilitated when expressions are dynamic rather
+<br/>than static. However, because of various methodological
+<br/>issues and conceptual inconsistencies across studies, this
+<br/>suggestion needs to be qualified. We can divide the avail-
+<br/>able studies in three main groups.
+<br/>First, there are studies showing that dynamic information
+<br/>improves expression recognition in a variety of suboptimal
+<br/>conditions, i.e., when static information is either unavail-
+<br/>able or is only partially accessible. As in the case of
+<br/>identity recognition, emotions can be inferred from
+<br/>animated point-light descriptions of the faces that neglect
+<br/>facial features (Bassili, 1978, 1979; see also Bruce &
+<br/>Valentine, 1988). Furthermore, in various neuropsycholog-
+<br/>ical and developmental conditions, there is evidence that
+<br/>dynamic presentation improves emotion recognition with
+<br/>doi: 10.1167/11.3.17
+<br/>Received November 18, 2010; published March 22, 2011
+<br/>ISSN 1534-7362 * ARVO
+<br/>Downloaded From: http://jov.arvojournals.org/pdfaccess.ashx?url=/data/journals/jov/933483/ on 03/30/2017 </td><td>('34569930', 'Chiara Fiorentini', 'chiara fiorentini')<br/>('32709245', 'Paolo Viviani', 'paolo viviani')</td><td></td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>Person Re-identification in the Wild
+<br/>3USTC
+<br/>4UCSD
+<br/><b>University of Technology Sydney</b><br/>2UTSA
+</td><td>('14904242', 'Liang Zheng', 'liang zheng')<br/>('1983351', 'Hengheng Zhang', 'hengheng zhang')<br/>('3141359', 'Shaoyan Sun', 'shaoyan sun')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('1713616', 'Qi Tian', 'qi tian')</td><td>{liangzheng06,manu.chandraker,yee.i.yang,wywqtian}@gmail.com
+</td></tr><tr><td>0b242d5123f79defd5f775d49d8a7047ad3153bc</td><td>CBMM Memo No. 36
+<br/>September 15, 2015
+<br/>How Important is Weight Symmetry in
+<br/>Backpropagation?
+<br/>by
+<br/><b>Center for Brains, Minds and Machines, McGovern Institute, MIT</b></td><td>('1694846', 'Qianli Liao', 'qianli liao')<br/>('1700356', 'Joel Z. Leibo', 'joel z. leibo')</td><td></td></tr><tr><td>0ba1d855cd38b6a2c52860ae4d1a85198b304be4</td><td>Variable-state Latent Conditional Random Fields
+<br/>for Facial Expression Recognition and Action Unit Detection
+<br/><b>Imperial College London, UK</b><br/><b>Rutgers University, USA</b></td><td>('2616466', 'Robert Walecki', 'robert walecki')<br/>('1729713', 'Ognjen Rudovic', 'ognjen rudovic')<br/>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>0b50e223ad4d9465bb92dbf17a7b79eccdb997fb</td><td>Implicit Elastic Matching with Random Projections for Pose-Variant Face
+<br/>Recognition
+<br/>Electrical and Computer Engineering
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>Microsoft Live Labs Research
+</td><td>('1738310', 'John Wright', 'john wright')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td>ganghua@microsoft.com
+<br/>jnwright@uiuc.edu
+</td></tr><tr><td>0badf61e8d3b26a0d8b60fe94ba5c606718daf0b</td><td>Rev. Téc. Ing. Univ. Zulia. Vol. 39, Nº 2, 384 - 392, 2016
+<br/>Facial Expression Recognition Using Deep Belief Network
+<br/><b>School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China</b><br/><b>Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China</b><br/><b>School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China</b><br/><b>School of Computer and Information Science, Chongqing Normal University 401331, China</b><br/>Deli Zhu
+</td><td>('3439338', 'Yunong Yang', 'yunong yang')<br/>('2068791', 'Dingyi Fang', 'dingyi fang')</td><td></td></tr><tr><td>0b02bfa5f3a238716a83aebceb0e75d22c549975</td><td>Learning Probabilistic Models for Recognizing Faces
+<br/>under Pose Variations
+<br/><b>Computer vision and Remote Sensing, Berlin university of Technology</b><br/>Sekr. FR-3-1, Franklinstr. 28/29, Berlin, Germany
+</td><td>('2326207', 'M. Saquib', 'm. saquib')<br/>('2962236', 'Olaf Hellwich', 'olaf hellwich')</td><td>{saquib;hellwich}@fpk.tu-berlin.de
+</td></tr><tr><td>0bce54bfbd8119c73eb431559fc6ffbba741e6aa</td><td>Published as a conference paper at ICLR 2018
+<br/>SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+<br/>RECURRENT NEURAL NETWORKS
+<br/>†Barcelona Supercomputing Center, ‡Google Inc,
+<br/><b>Universitat Polit`ecnica de Catalunya, Columbia University</b></td><td>('2447185', 'Brendan Jou', 'brendan jou')<br/>('1711068', 'Jordi Torres', 'jordi torres')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{victor.campos, jordi.torres}@bsc.es, bjou@google.com,
+<br/>xavier.giro@upc.edu, shih.fu.chang@columbia.edu
+</td></tr><tr><td>0b2966101fa617b90510e145ed52226e79351072</td><td>Beyond Verbs: Understanding Actions in Videos
+<br/>with Text
+<br/>Department of Computer Science
+<br/><b>University of Manitoba</b><br/>Winnipeg, MB, Canada
+<br/>Department of Computer Science
+<br/><b>University of Manitoba</b><br/>Winnipeg, MB, Canada
+</td><td>('3056962', 'Shujon Naha', 'shujon naha')<br/>('2295608', 'Yang Wang', 'yang wang')</td><td>Email: shujon@cs.umanitoba.ca
+<br/>Email: ywang@cs.umanitoba.ca
+</td></tr><tr><td>0ba0f000baf877bc00a9e144b88fa6d373db2708</td><td>Facial Expression Recognition Based on Local
+<br/>Directional Pattern Using SVM Decision-level Fusion
+<br/>1. Key Laboratory of Education Informalization for Nationalities, Ministry of
+<br/><b>Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan</b><br/><b>Normal University, Kunming, China</b></td><td>('2535958', 'Juxiang Zhou', 'juxiang zhou')<br/>('3305175', 'Tianwei Xu', 'tianwei xu')<br/>('2411704', 'Jianhou Gan', 'jianhou gan')</td><td>{zjuxiang@126.com,xutianwei@ynnu.edu.cn,kmganjh@yahoo.com.cn}
+</td></tr><tr><td>0be80da851a17dd33f1e6ffdd7d90a1dc7475b96</td><td>Hindawi Publishing Corporation
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2016, Article ID 7696035, 7 pages
+<br/>http://dx.doi.org/10.1155/2016/7696035
+<br/>Research Article
+<br/>Weighted Feature Gaussian Kernel SVM for
+<br/>Emotion Recognition
+<br/><b>School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China</b><br/>Received 26 June 2016; Revised 14 August 2016; Accepted 14 September 2016
+<br/>Academic Editor: Francesco Camastra
+<br/>Copyright © 2016 W. Wei and Q. Jia. This is an open access article distributed under the Creative Commons Attribution License,
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Emotion recognition with weighted feature based on facial expression is a challenging research topic and has attracted great
+<br/>attention in the past few years. This paper presents a novel method, utilizing subregion recognition rate to weight kernel function.
+<br/>First, we divide the facial expression image into some uniform subregions and calculate corresponding recognition rate and weight.
+<br/>Then, we get a weighted feature Gaussian kernel function and construct a classifier based on Support Vector Machine (SVM). At
+<br/>last, the experimental results suggest that the approach based on weighted feature Gaussian kernel function has good performance
+<br/>on the correct rate in emotion recognition. The experiments on the extended Cohn-Kanade (CK+) dataset show that our method
+<br/>has achieved encouraging recognition results compared to the state-of-the-art methods.
+<br/>1. Introduction
+<br/>Emotion recognition has necessary applications in the real
+<br/>world. Its applications include but are not limited to artificial
+<br/>intelligence and human computer interaction. It remains a
+<br/>challenging and attractive topic. There are many methods
+<br/>which have been proposed for handling problems in emotion
+<br/>recognition. Speech [1, 2], physiological [3–5], and visual
+<br/>signals have been explored for emotion recognition. Speech
+<br/>signals are discontinuous signals, since they can be captured
+<br/>only when people are talking. Acquirement of physiological
+<br/>signal needs some special physiological sensors. Visual signal
+<br/>is the best choice for emotion recognition based on the above
+<br/>reasons. Although the visual information provided is useful,
+<br/>there are challenges regarding how to utilize this information
+<br/>reliably and robustly. According to Albert Mehrabian’s 7%–
+<br/>38%–55% rule, facial expression is an important mean of
+<br/>detecting emotions [6].
+<br/>Further studies have been carried out on emotion recog-
+<br/>nition problems in facial expression images during the last
+<br/>decade [7, 8]. Given a facial expression image, estimate the
+<br/>correct emotional state, such as anger, happiness, sadness, and
+<br/>surprise. The general process has two steps: feature extraction
+<br/>and classification. For feature extraction, geometric feature,
+<br/>texture feature, motion feature, and statistical feature are in
+<br/>common use. For classification, methods based on machine
+<br/>learning algorithm are frequently used. According to special-
+<br/>ity of features, applying weighted features to machine learning
+<br/>algorithm has become an active research topic.
+<br/>In recent years, emotion recognition with weighted fea-
+<br/>ture based on facial expression has become a new research
+<br/>topic and received more and more attention [9, 10]. The
+<br/>aim is to estimate emotion type from a facial expression
+<br/>image captured during physical facial expression process of
+<br/>a subject. But the emotion features captured from the facial
+<br/>expression image are strongly linked to not the whole face
+<br/>but some specific regions in the face. For instance, features
+<br/>of eyebrow, eye, nose, and mouth areas are closely related
+<br/>to facial expression [11]. Besides, the effect of each feature
+<br/>on recognition result is different. In order to make the best
+<br/>of feature, using feature weighting technique can further
+<br/>enhance recognition performance. While there are several
+<br/>approaches of confirming weight, it remains an open issue
+<br/>on how to select feature and calculate corresponding weight
+<br/>effectively.
+<br/>In this paper, a new emotion recognition method based
+<br/>on weighted feature facial expression is presented. It is
+<br/>motivated by the fact that emotion can be described by facial
+<br/>expression and each facial expression feature has different
+<br/>impact on recognition results. Different from previous works
+</td><td>('39248132', 'Wei Wei', 'wei wei')<br/>('2301733', 'Qingxuan Jia', 'qingxuan jia')</td><td>Correspondence should be addressed to Wei Wei; wei wei@bupt.edu.cn
+</td></tr><tr><td>0b183f5260667c16ef6f640e5da50272c36d599b</td><td>Spatio-temporal Event Classification Using
+<br/>Time-Series Kernel Based Structured Sparsity(cid:2)
+<br/>L´aszl´o A. Jeni1, Andr´as L˝orincz2, Zolt´an Szab´o3,
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA</b><br/><b>Faculty of Informatics, E otv os Lor and University, Budapest, Hungary</b><br/><b>Gatsby Computational Neuroscience Unit, University College London, London, UK</b><br/><b>University of Pittsburgh, Pittsburgh, PA, USA</b></td><td>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td>laszlo.jeni@ieee.org, andras.lorincz@elte.hu,
+<br/>zoltan.szabo@gatsby.ucl.ac.uk, {jeffcohn,tk}@cs.cmu.edu
+</td></tr><tr><td>0b4c4ea4a133b9eab46b217e22bda4d9d13559e6</td><td>MORF: Multi-Objective Random Forests for Face Characteristic Estimation
+<br/><b>MICC - University of Florence</b><br/>2CVC - Universitat Autonoma de Barcelona
+<br/><b>DVMM Lab - Columbia University</b></td><td>('37822746', 'Dario Di Fina', 'dario di fina')<br/>('2602265', 'Svebor Karaman', 'svebor karaman')<br/>('1749498', 'Andrew D. Bagdanov', 'andrew d. bagdanov')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td>{dario.difina, alberto.delbimbo}@unifi.it, svebor.karaman@columbia.edu, bagdanov@cvc.uab.es
+</td></tr><tr><td>0ba99a709cd34654ac296418a4f41a9543928149</td><td></td><td></td><td></td></tr><tr><td>0be764800507d2e683b3fb6576086e37e56059d1</td><td>Learning from Geometry
+<br/>by
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Duke University</b><br/>Date:
+<br/>Approved:
+<br/>Robert Calderbank, Supervisor
+<br/>Lawrence Carin
+<br/>Ingrid Daubechies
+<br/>Gallen Reeves
+<br/>Guillermo Sapiro
+<br/>Dissertation submitted in partial fulfillment of the requirements for the degree of
+<br/>Doctor of Philosophy in the Department of Electrical and Computer Engineering
+<br/><b>in the Graduate School of Duke University</b><br/>2016
+</td><td>('34060310', 'Jiaji Huang', 'jiaji huang')</td><td></td></tr><tr><td>0b642f6d48a51df64502462372a38c50df2051b1</td><td>A Domain Adaptation Approach to Improve
+<br/>Speaker Turn Embedding Using Face Representation
+<br/><b>Idiap Research Institute, Martigny, Switzerland</b><br/>École Polytechnique Fédéral de Lausanne, Switzerland
+</td><td>('39560344', 'Nam Le', 'nam le')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>nle@idiap.ch,odobez@idiap.ch
+</td></tr><tr><td>0b7d1386df0cf957690f0fe330160723633d2305</td><td>Learning American English Accents Using Ensemble Learning with GMMs
+<br/>Department of Computer Science
+<br/><b>Rensselaer Polytechnic Institute</b><br/>Troy, NY 12180
+<br/>Department of Computer Science
+<br/><b>Rensselaer Polytechnic Institute</b><br/>Troy, NY 12180
+</td><td>('38769302', 'Jonathan T. Purnell', 'jonathan t. purnell')<br/>('1705107', 'Malik Magdon-Ismail', 'malik magdon-ismail')</td><td>purnej@cs.rpi.edu
+<br/>magdon@cs.rpi.edu
+</td></tr><tr><td>0b6616f3ebff461e4b6c68205fcef1dae43e2a1a</td><td>Rectifying Self Organizing Maps
+<br/>for Automatic Concept Learning from Web Images
+<br/><b>Bilkent University</b><br/>06800 Ankara/Turkey
+<br/>Pinar Duygulu
+<br/><b>Bilkent University</b><br/>06800 Ankara/Turkey
+</td><td>('2540074', 'Eren Golge', 'eren golge')</td><td>eren.golge@bilkent.edu.tr
+<br/>pinar.duygulu@gmail.com
+</td></tr><tr><td>0b8c92463f8f5087696681fb62dad003c308ebe2</td><td>On Matching Sketches with Digital Face Images
+<br/>in local
+</td><td>('2559473', 'Himanshu S. Bhatt', 'himanshu s. bhatt')<br/>('34173298', 'Samarth Bharadwaj', 'samarth bharadwaj')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td></td></tr><tr><td>0bc0f9178999e5c2f23a45325fa50300961e0226</td><td>Recognizing facial expressions from videos using Deep
+<br/>Belief Networks
+<br/>CS 229 Project
+</td><td>('34699434', 'Andrew Ng', 'andrew ng')</td><td>Adithya Rao (adithyar@stanford.edu), Narendran Thiagarajan (naren@stanford.edu)
+</td></tr><tr><td>0ba402af3b8682e2aa89f76bd823ddffdf89fa0a</td><td>Squared Earth Mover’s Distance-based Loss for Training Deep Neural Networks
+<br/>Computer Science Department
+<br/><b>Stony Brook University</b><br/>Cognitive Neuroscience Lab
+<br/>Computer Science Department
+<br/><b>Harvard University</b><br/><b>Stony Brook University</b></td><td>('2321406', 'Le Hou', 'le hou')<br/>('2576295', 'Chen-Ping Yu', 'chen-ping yu')<br/>('1686020', 'Dimitris Samaras', 'dimitris samaras')</td><td>lehhou@cs.stonybrook.edu
+<br/>chenpingyu@fas.harvard.edu
+<br/>samaras@cs.stonybrook.edu
+</td></tr><tr><td>0bf0029c9bdb0ac61fda35c075deb1086c116956</td><td>Article
+<br/>Modelling of Orthogonal Craniofacial Profiles
+<br/><b>University of York, Heslington, York YO10 5GH, UK</b><br/>Received: 20 October 2017; Accepted: 23 November 2017; Published: 30 November 2017
+</td><td>('1694260', 'Hang Dai', 'hang dai')<br/>('1737428', 'Nick Pears', 'nick pears')<br/>('1678859', 'Christian Duncan', 'christian duncan')</td><td>nick.pears@york.ac.uk
+<br/>2 Alder Hey Children’s Hospital, Liverpool L12 2AP, UK; Christian.Duncan@alderhey.nhs.uk
+<br/>* Correspondence: hd816@york.ac.uk; Tel.: +44-1904-325-643
+</td></tr><tr><td>0b3f354e6796ef7416bf6dde9e0779b2fcfabed2</td><td></td><td></td><td></td></tr><tr><td>9391618c09a51f72a1c30b2e890f4fac1f595ebd</td><td>Globally Tuned Cascade Pose Regression via
+<br/>Back Propagation with Application in 2D Face
+<br/>Pose Estimation and Heart Segmentation in 3D
+<br/>CT Images
+<br/><b>Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College</b><br/>April 1, 2015
+<br/>This work was submitted to ICML 2015 but got rejected. We put the initial
+<br/>submission ”as is” in Page 2 - 11 and add updated contents at the tail. The
+<br/>code of this work is available at https://github.com/pengsun/bpcpr5.
+</td><td></td><td>Peng Sun pes2021@med.cornell.edu
+<br/>James K Min jkm2001@med.cornell.edu
+<br/>Guanglei Xiong gux2003@med.cornell.edu
+</td></tr><tr><td>93675f86d03256f9a010033d3c4c842a732bf661</td><td>Universit´edesSciencesetTechnologiesdeLilleEcoleDoctoraleSciencesPourl’ing´enieurUniversit´eLilleNord-de-FranceTHESEPr´esent´ee`al’Universit´edesSciencesetTechnologiesdeLillePourobtenirletitredeDOCTEURDEL’UNIVERSIT´ESp´ecialit´e:MicroetNanotechnologieParTaoXULocalizedgrowthandcharacterizationofsiliconnanowiresSoutenuele25Septembre2009Compositiondujury:Pr´esident:TuamiLASRIRapporteurs:ThierryBARONHenriMARIETTEExaminateurs:EricBAKKERSXavierWALLARTDirecteurdeth`ese:BrunoGRANDIDIER </td><td></td><td></td></tr><tr><td>935a7793cbb8f102924fa34fce1049727de865c2</td><td>AGE ESTIMATION UNDER CHANGES IN IMAGE QUALITY: AN EXPERIMENTAL STUDY
+<br/><b>ISLA Lab, Informatics Institute, University of Amsterdam</b></td><td>('1765602', 'Fares Alnajar', 'fares alnajar')<br/>('1695527', 'Theo Gevers', 'theo gevers')<br/>('1968574', 'Sezer Karaoglu', 'sezer karaoglu')</td><td></td></tr><tr><td>9326d1390e8601e2efc3c4032152844483038f3f</td><td>Landmark Based Facial Component Reconstruction
+<br/>for Recognition Across Pose
+<br/>Department of Mechanical Engineering
+<br/><b>National Taiwan University of Science and Technology</b><br/>Taipei, Taiwan
+</td><td>('38801529', 'Gee-Sern Hsu', 'gee-sern hsu')<br/>('3329222', 'Hsiao-Chia Peng', 'hsiao-chia peng')<br/>('2329565', 'Kai-Hsiang Chang', 'kai-hsiang chang')</td><td>Email: ∗jison@mail.ntust.edu.tw
+</td></tr><tr><td>93747de3d40376761d1ef83ffa72ec38cd385833</td><td>COGNITION AND EMOTION, 2015
+<br/>http://dx.doi.org/10.1080/02699931.2015.1039494
+<br/>Team members’ emotional displays as indicators
+<br/>of team functioning
+<br/><b>University of Amsterdam, Amsterdam, The</b><br/>Netherlands
+<br/><b>University of Amsterdam, Amsterdam, The Netherlands</b><br/><b>Ross School of Business, University of Michigan, Ann Arbor, MI, USA</b><br/>(Received 18 August 2014; accepted 6 April 2015)
+<br/>Emotions are inherent to team life, yet it is unclear how observers use team members’ emotional
+<br/>expressions to make sense of team processes. Drawing on Emotions as Social Information theory, we
+<br/>propose that observers use team members’ emotional displays as a source of information to predict the
+<br/>team’s trajectory. We argue and show that displays of sadness elicit more pessimistic inferences
+<br/>regarding team dynamics (e.g., trust, satisfaction, team effectiveness, conflict) compared to displays of
+<br/>happiness. Moreover, we find that this effect is strengthened when the future interaction between the
+<br/>team members is more ambiguous (i.e., under ethnic dissimilarity; Study 1) and when emotional
+<br/>displays can be clearly linked to the team members’ collective experience (Study 2). These studies shed
+<br/>light on when and how people use others’ emotional expressions to form impressions of teams.
+<br/>Keywords: Emotions as social information; Impression formation; Team functioning; Sense-making.
+<br/>How do people make sense of social collectives? This
+<br/>question has a long-standing interest in the social
+<br/>sciences (Hamilton & Sherman, 1996), because
+<br/>observers’ understanding of what goes on between
+<br/>other individuals informs their behavioural responses
+<br/>(Abelson, Dasgupta, Park, & Banaji, 1998; Magee &
+<br/>Tiedens, 2006). A special type of social collective is
+<br/>the team, in which individuals work together on a
+<br/>joint task (Ilgen, 1999). There are many reasons why
+<br/>outside observers may want to develop an under-
+<br/>standing of a team’s functioning and future trajectory,
+<br/>for instance because their task is to supervise the team
+<br/>or because they are considering sponsoring or poten-
+<br/>tially joining the team as a member. However,
+<br/>making sense of a team’s trajectory is an uncertain
+<br/>endeavour because explicit information about team
+<br/>functioning is often not available. This problem is
+<br/>further exacerbated by the fact that team ventures are
+<br/>simultaneously potent and precarious. When indivi-
+<br/>duals join forces in teams, great achievements can be
+<br/>obtained (Guzzo & Dickson, 1996), but teams are
+<br/>also a potential breeding ground for myriad negative
+<br/>outcomes such as intra-team conflicts, social inhibi-
+<br/>tion, decision-making biases and productivity losses
+<br/>(Jehn, 1995; Kerr & Tindale, 2004). We propose
+<br/>that, in their sense-making efforts, observers there-
+<br/>fore make use of dynamic signals that provide up-to-
+<br/>date diagnostic information about the likely trajectory
+<br/><b>Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein</b><br/>© 2015 Taylor & Francis
+</td><td>('2863272', 'Jeffrey Sanchez-Burks', 'jeffrey sanchez-burks')</td><td>1018 XA Amsterdam, The Netherlands. E-mail: ac.homan@uva.nl
+</td></tr><tr><td>936c7406de1dfdd22493785fc5d1e5614c6c2882</td><td>2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 762–772,
+<br/>Montr´eal, Canada, June 3-8, 2012. c(cid:13)2012 Association for Computational Linguistics
+<br/>762
+</td><td></td><td></td></tr><tr><td>93721023dd6423ab06ff7a491d01bdfe83db7754</td><td>ROBUST FACE ALIGNMENT USING CONVOLUTIONAL NEURAL
+<br/>NETWORKS
+<br/>Orange Labs, 4, Rue du Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>Keywords:
+<br/>Face alignment, Face registration, Convolutional Neural Networks.
+</td><td>('1762557', 'Stefan Duffner', 'stefan duffner')<br/>('34798028', 'Christophe Garcia', 'christophe garcia')</td><td>{stefan.duffner, christophe.garcia}@orange-ftgroup.com
+</td></tr><tr><td>93971a49ef6cc88a139420349a1dfd85fb5d3f5c</td><td>Scalable Probabilistic Models:
+<br/>Applied to Face Identification in the Wild
+<br/>Biometric Person Recognition Group
+<br/><b>Idiap Research Institute</b><br/>Rue Marconi 19 PO Box 592
+<br/>1920 Martigny
+</td><td>('2121764', 'Laurent El Shafey', 'laurent el shafey')</td><td>laurent.el-shafey@idiap.ch
+<br/>sebastien.marcel@idiap.ch
+</td></tr><tr><td>93420d9212dd15b3ef37f566e4d57e76bb2fab2f</td><td>An All-In-One Convolutional Neural Network for Face Analysis
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</b></td><td>('48467498', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{rranjan1,swamiviv,carlos,rama}@umiacs.umd.edu
+</td></tr><tr><td>93af36da08bf99e68c9b0d36e141ed8154455ac2</td><td>Workshop track - ICLR 2018
+<br/>ADDITIVE MARGIN SOFTMAX
+<br/>FOR FACE VERIFICATION
+<br/>Department of Information and Communication Engineering
+<br/><b>University of Electronic Science and Technology of China</b><br/>Chengdu, Sichuan 611731 China
+<br/><b>College of Computing</b><br/><b>Georgia Institute of Technology</b><br/>Atlanta, United States.
+<br/>Department of Information and Communication Engineering
+<br/><b>University of Electronic Science and Technology of China</b><br/>Chengdu, Sichuan 611731 China
+</td><td>('47939378', 'Feng Wang', 'feng wang')<br/>('51094998', 'Weiyang Liu', 'weiyang liu')<br/>('8424682', 'Haijun Liu', 'haijun liu')</td><td>feng.wff@gmail.com
+<br/>{wyliu, hanjundai}@gatech.edu
+<br/>haijun liu@126.com chengjian@uestc.edu.cn
+</td></tr><tr><td>93cbb3b3e40321c4990c36f89a63534b506b6daf</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+<br/>477
+<br/>Learning From Examples in the Small Sample Case:
+<br/>Face Expression Recognition
+</td><td>('1822413', 'Guodong Guo', 'guodong guo')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')</td><td></td></tr><tr><td>937ffb1c303e0595317873eda5ce85b1a17f9943</td><td>Eyes Do Not Lie: Spontaneous versus Posed Smiles
+<br/><b>Intelligent Systems Lab Amsterdam, University of Amsterdam</b><br/>Science Park 107, Amsterdam, The Netherlands
+</td><td>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td>h.dibeklioglu@uva.nl, r.valenti@uva.nl, a.a.salah@uva.nl, th.gevers@uva.nl
+</td></tr><tr><td>93f37c69dd92c4e038710cdeef302c261d3a4f92</td><td>Compressed Video Action Recognition
+<br/>Philipp Kr¨ahenb¨uhl1
+<br/><b>The University of Texas at Austin, 2Carnegie Mellon University</b><br/><b>University of Southern California, 4A9, 5Amazon</b></td><td>('2978413', 'Chao-Yuan Wu', 'chao-yuan wu')<br/>('1771307', 'Manzil Zaheer', 'manzil zaheer')<br/>('2804000', 'Hexiang Hu', 'hexiang hu')<br/>('1691629', 'Alexander J. Smola', 'alexander j. smola')<br/>('1758550', 'R. Manmatha', 'r. manmatha')</td><td>cywu@cs.utexas.edu
+<br/>manzil@cmu.edu
+<br/>smola@amazon.com
+<br/>hexiangh@usc.edu
+<br/>philkr@cs.utexas.edu
+<br/>manmatha@a9.com
+</td></tr><tr><td>936227f7483938097cc1cdd3032016df54dbd5b6</td><td>Learning to generalize to new compositions in image understanding
+<br/><b>Gonda Brain Research Center, Bar Ilan University, Israel</b><br/>3Google Research, Mountain View CA, USA
+<br/><b>Tel Aviv University, Israel</b></td><td>('34815079', 'Yuval Atzmon', 'yuval atzmon')<br/>('1750652', 'Jonathan Berant', 'jonathan berant')<br/>('3451674', 'Vahid Kezami', 'vahid kezami')<br/>('1786843', 'Amir Globerson', 'amir globerson')<br/>('1732280', 'Gal Chechik', 'gal chechik')</td><td>yuval.atzmon@biu.ac.il
+</td></tr><tr><td>939123cf21dc9189a03671484c734091b240183e</td><td>Within- and Cross- Database Evaluations for Gender
+<br/>Classification via BeFIT Protocols
+<br/><b>Idiap Research Institute</b><br/>Centre du Parc, Rue Marconi 19, CH-1920, Martigny, Switzerland
+</td><td>('2128163', 'Nesli Erdogmus', 'nesli erdogmus')<br/>('2059725', 'Matthias Vanoni', 'matthias vanoni')</td><td>Email: nesli.erdogmus, matthias.vanoni, marcel@idiap.ch
+</td></tr><tr><td>938ae9597f71a21f2e47287cca318d4a2113feb2</td><td>Classifier Learning with Prior Probabilities
+<br/>for Facial Action Unit Recognition
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/><b>University of Chinese Academy of Sciences</b><br/><b>Rensselaer Polytechnic Institute</b></td><td>('49889545', 'Yong Zhang', 'yong zhang')<br/>('38690089', 'Weiming Dong', 'weiming dong')<br/>('39495638', 'Bao-Gang Hu', 'bao-gang hu')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>zhangyong201303@gmail.com, weiming.dong@ia.ac.cn, hubg@nlpr.ia.ac.cn, qji@ecse.rpi.edu
+</td></tr><tr><td>94b9c0a6515913bad345f0940ee233cdf82fffe1</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Impact Factor (2012): 3.358
+<br/>Face Recognition using Local Ternary Pattern for
+<br/>Low Resolution Image
+<br/><b>Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India</b><br/><b>Chandigarh University, Gharuan, Punjab, India</b></td><td>('40440964', 'Amanpreet Kaur', 'amanpreet kaur')</td><td></td></tr><tr><td>946017d5f11aa582854ac4c0e0f1b18b06127ef1</td><td>Tracking Persons-of-Interest
+<br/>via Adaptive Discriminative Features
+<br/><b>Xi an Jiaotong University</b><br/><b>Hanyang University</b><br/><b>University of Illinois, Urbana-Champaign</b><br/><b>University of California, Merced</b><br/>http://shunzhang.me.pn/papers/eccv2016/
+</td><td>('2481388', 'Shun Zhang', 'shun zhang')<br/>('1698965', 'Yihong Gong', 'yihong gong')<br/>('3068086', 'Jia-Bin Huang', 'jia-bin huang')<br/>('33047058', 'Jongwoo Lim', 'jongwoo lim')<br/>('32014778', 'Jinjun Wang', 'jinjun wang')<br/>('1752333', 'Narendra Ahuja', 'narendra ahuja')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>94eeae23786e128c0635f305ba7eebbb89af0023</td><td>Journal of Machine Learning Research 18 (2018) 1-34
+<br/>Submitted 01/17; Revised 4/18; Published 6/18
+<br/>Emergence of Invariance and Disentanglement
+<br/>in Deep Representations∗
+<br/>Department of Computer Science
+<br/><b>University of California</b><br/>Los Angeles, CA 90095, USA
+<br/>Department of Computer Science
+<br/><b>University of California</b><br/>Los Angeles, CA 90095, USA
+<br/>Editor: Yoshua Bengio
+</td><td>('16163297', 'Alessandro Achille', 'alessandro achille')<br/>('1715959', 'Stefano Soatto', 'stefano soatto')</td><td>achille@cs.ucla.edu
+<br/>soatto@cs.ucla.edu
+</td></tr><tr><td>944faf7f14f1bead911aeec30cc80c861442b610</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
+</td><td>('1881509', 'Vicky Kalogeiton', 'vicky kalogeiton')<br/>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73</td><td>5967
+<br/>A Benchmark and Comparative Study of
+<br/>Video-Based Face Recognition
+<br/>on COX Face Database
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1705483', 'Haihong Zhang', 'haihong zhang')<br/>('1710195', 'Shihong Lao', 'shihong lao')<br/>('2378840', 'Alifu Kuerban', 'alifu kuerban')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494</td><td>Available online at www.sciencedirect.com
+<br/> Procedia Engineering 41 ( 2012 ) 465 – 472
+<br/>International Symposium on Robotics and Intelligent Sensors 2012 (IRIS 2012)
+<br/>Face Recognition From Single Sample Per Person by Learning of
+<br/>Generic Discriminant Vectors
+<br/><b>aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia</b><br/><b>bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia</b></td><td>('7453141', 'Fadhlan Hafiz', 'fadhlan hafiz')<br/>('2412523', 'Amir A. Shafie', 'amir a. shafie')<br/>('9146253', 'Yasir Mohd Mustafah', 'yasir mohd mustafah')</td><td></td></tr><tr><td>94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81</td><td></td><td></td><td></td></tr><tr><td>94325522c9be8224970f810554611d6a73877c13</td><td></td><td></td><td></td></tr><tr><td>9487cea80f23afe9bccc94deebaa3eefa6affa99</td><td>Fast, Dense Feature SDM on an iPhone
+<br/><b>Queensland University of Technology, Brisbane, Queensland, Australia</b><br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('3231493', 'Ashton Fagg', 'ashton fagg')<br/>('1820249', 'Simon Lucey', 'simon lucey')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')</td><td></td></tr><tr><td>9441253b638373a0027a5b4324b4ee5f0dffd670</td><td>A Novel Scheme for Generating Secure Face
+<br/>Templates Using BDA
+<br/>P.G. Student, Department of Computer Engineering,
+<br/>Associate Professor, Department of Computer
+<br/>MCERC,
+<br/>Nashik (M.S.), India
+</td><td>('40075681', 'Shraddha S. Shinde', 'shraddha s. shinde')<br/>('2590072', 'Anagha P. Khedkar', 'anagha p. khedkar')</td><td>e-mail: shraddhashinde@gmail.com
+</td></tr><tr><td>949699d0b865ef35b36f11564f9a4396f5c9cddb</td><td>Anders, Ende, Junghofer, Kissler & Wildgruber (Eds.)
+<br/>ISSN 0079-6123
+<br/>CHAPTER 18
+<br/>Processing of facial identity and expression: a
+<br/>psychophysical, physiological and computational
+<br/>perspective
+<br/>Sarah D. Chiller-Glaus2
+<br/><b>Max Planck Institute for Biological Cybernetics, Spemannstr. 38, 72076 T bingen, Germany</b><br/><b>University of Zurich, Zurich, Switzerland</b></td><td>('2388249', 'Adrian Schwaninger', 'adrian schwaninger')<br/>('1793750', 'Christian Wallraven', 'christian wallraven')</td><td></td></tr><tr><td>94ac3008bf6be6be6b0f5140a0bea738d4c75579</td><td></td><td></td><td></td></tr><tr><td>94e259345e82fa3015a381d6e91ec6cded3971b4</td><td>Classiflcation of Photometric Factors
+<br/>Based on Photometric Linearization
+<br/><b>The Institute of Scienti c and Industrial Research, Osaka University</b><br/>8-1 Mihogaoka, Ibaraki-shi, Osaka 567-0047, JAPAN
+<br/>2 Matsushita Electric Industrial Co., Ltd.
+<br/><b>Okayama University</b><br/>Okayama-shi, Okayama 700-8530, JAPAN
+</td><td>('3155610', 'Yasuhiro Mukaigawa', 'yasuhiro mukaigawa')<br/>('2740479', 'Yasunori Ishii', 'yasunori ishii')<br/>('1695509', 'Takeshi Shakunaga', 'takeshi shakunaga')</td><td>mukaigaw@am.sanken.osaka-u.ac.jp
+</td></tr><tr><td>94a11b601af77f0ad46338afd0fa4ccbab909e82</td><td></td><td></td><td></td></tr><tr><td>0efdd82a4753a8309ff0a3c22106c570d8a84c20</td><td>LDA WITH SUBGROUP PCA METHOD FOR FACIAL IMAGE RETRIEVAL
+<br/><b>Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea</b></td><td>('34600044', 'Wonjun Hwang', 'wonjun hwang')<br/>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')<br/>('37980373', 'Seokcheol Kee', 'seokcheol kee')</td><td>wjhwang@sait.samsung.co.kr
+</td></tr><tr><td>0e5dcc6ae52625fd0637c6bba46a973e46d58b9c</td><td>Pareto Models for Multiclass Discriminative Linear
+<br/>Dimensionality Reduction
+<br/><b>University of Alberta, Edmonton, AB T6G 2E8, Canada</b><br/><b>bRobotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, U.S.A</b><br/><b>cCentre of Intelligent Machines, McGill University, Montr eal, QC H3A 0E9, Canada</b></td><td>('3141839', 'Fernando De La Torre', 'fernando de la torre')<br/>('1701344', 'Frank P. Ferrie', 'frank p. ferrie')</td><td></td></tr><tr><td>0e73d2b0f943cf8559da7f5002414ccc26bc77cd</td><td>Similarity Comparisons for Interactive Fine-Grained Categorization
+<br/><b>California Institute of Technology</b><br/>vision.caltech.edu
+<br/>Serge Belongie4
+<br/><b>Toyota Technological Institute at Chicago</b><br/>ttic.edu
+<br/>4 Cornell Tech
+<br/>tech.cornell.edu
+<br/>Approach
+<br/>1) Image
+<br/>Database w/
+<br/>Class Labels
+<br/>2) Collect Similarity
+<br/>Comparisons
+<br/>3) Learn Perceptual
+<br/>Embedding
+<br/>A
+<br/>Mallard
+<br/>Cardinal
+<br/>?
+<br/>1) Query Image
+<br/>2) Computer
+<br/>Vision
+<br/>B
+<br/>3) Human-in-the-Loop Categorization
+<br/>C
+<br/>(cid:1876)
+<br/>(cid:1855)
+<br/>(cid:1868)
+<br/>C
+<br/>D
+<br/>perceptual space
+<br/>where
+<br/>(cid:1826) True location of (cid:1876) in
+<br/>(cid:1872) Time step
+<br/>(cid:1847)(cid:3047) User responses at (cid:1872)
+<br/>(cid:1876) Query image
+<br/>(cid:1855) Class
+<br/>INTERACTIVE
+<br/>CATEGORIZATION
+<br/>• Compute per-class probabilities as:
+<br/>(cid:1826)
+<br/>(cid:1868)(cid:1855),|(cid:1876),(cid:1847)(cid:3047) (cid:1503)(cid:1868)(cid:1855),(cid:1847)(cid:3047)|(cid:1876) = (cid:3505) (cid:1868)(cid:1855),(cid:1826),(cid:1847)(cid:3047)|(cid:1876)(cid:1856)(cid:1826)
+<br/>(cid:1875)(cid:3047)=(cid:1868)(cid:1855),(cid:1826),(cid:1847)(cid:3047)|(cid:1876) =(cid:1868)(cid:1847)(cid:3047)| (cid:1855),(cid:1826),(cid:1876) (cid:1868)(cid:1855),(cid:1826)(cid:1876)
+<br/>(cid:1868)(cid:1855),|(cid:1876),(cid:1847)(cid:3047) (cid:3406)(cid:963)
+<br/>(cid:1875)(cid:3038)(cid:3047)
+<br/>(cid:3038),(cid:3030)(cid:3286)(cid:2880)(cid:3030)(cid:963) (cid:1875)(cid:3038)(cid:3047)
+<br/>i.e. sum of weights of examples of class (cid:1855)
+<br/>(cid:3038)
+<br/>where (cid:1863) enumerates training examples
+<br/>• Weight (cid:1875)(cid:3038) represents how likely (cid:1826)(cid:3038) is
+<br/>true location (cid:1826):
+<br/>(cid:1875)(cid:3038)(cid:3047)=(cid:1868)(cid:1855)(cid:3038),(cid:1826)(cid:3038),(cid:1847)(cid:3047)|(cid:1876) =(cid:1868)(cid:1847)(cid:3047)| (cid:1855)(cid:3038),(cid:1826)(cid:3038),(cid:1876) (cid:1868)(cid:1855)(cid:3038),(cid:1826)(cid:3038)(cid:1876)
+<br/>Efficient computation
+<br/>• Approximate per-class probabilities as:
+<br/>such that
+<br/>(cid:1875)(cid:3038)(cid:3047)(cid:2878)(cid:2869)=(cid:1868)(cid:1873)(cid:3047)(cid:2878)(cid:2869)(cid:1826)(cid:3038)(cid:1875)(cid:3038)(cid:3047)
+<br/>= (cid:2038)(cid:1845)(cid:3036)(cid:3038)
+<br/>(cid:1875)(cid:3038)(cid:3047)
+<br/>(cid:963)
+<br/>(cid:2038)(cid:1845)(cid:3037)(cid:3038)
+<br/>(cid:3037)(cid:1488)(cid:3005)
+<br/>(cid:3513) Initialize weights (cid:1875)(cid:3038)(cid:2868)= (cid:1868)(cid:1855)(cid:3038),(cid:1826)(cid:3038)(cid:1876)
+<br/>(cid:3514) Update weights (cid:1875)(cid:3038)(cid:3047)(cid:2878)(cid:2869) when user answers
+<br/>Efficient update rule:
+<br/>a similarity question
+<br/>(cid:3515) Update per-class probabilities
+<br/>?
+<br/>(cid:3047)
+<br/>(cid:1847)
+<br/>(cid:1876)
+<br/>(cid:1855)
+<br/>(cid:1868)
+<br/>D
+<br/>A
+<br/>Learning a Metric
+<br/>• Given set of triplet comparisons (cid:2286), learn
+<br/>embedding (cid:1800) of (cid:1840) training images with
+<br/>From (cid:1800), generate similarity matrix
+<br/>(cid:1845)(cid:1488)(cid:1840)×(cid:1840)
+<br/>stochastic triplet embedding [van der Maaten
+<br/>& Weinberger 2012]
+<br/>B
+<br/>D
+<br/>D
+<br/>Computer Vision
+<br/>• Easy to map off-the-shelf CV
+<br/>algorithms into framework, e.g.,
+<br/>multiclass classification scores
+<br/>(cid:1868)(cid:1855),(cid:1826)(cid:1876) (cid:1503)(cid:1868)(cid:1855)|(cid:1876)
+<br/>Incorporate independent user
+<br/>response as:
+<br/>Incorporating Users
+<br/>• (cid:1830) is grid of images for each question
+<br/>(cid:1868)(cid:1873)(cid:1826) = (cid:2038)(cid:1871)((cid:1826),(cid:1826)(cid:3036))
+<br/>(cid:963)
+<br/>(cid:2038)(cid:1871)((cid:1826),(cid:1826)(cid:3037))
+<br/>(cid:3037)(cid:1488)(cid:3005)
+<br/>entropy of (cid:1868)(cid:1855),(cid:1826)(cid:3038),(cid:1847)(cid:3047)|(cid:1876)
+<br/>largest (cid:1875)(cid:3038)(cid:3047)
+<br/>Selecting the Display
+<br/>• Approximate solution: maximizes
+<br/>[Fang & Geman 2005]
+<br/>From each cluster, select image with
+<br/>expected information gain in terms of
+<br/>• Group images into equal-weight clusters
+<br/>Results
+<br/>Learned Embedding
+<br/>Learn category-level embedding of
+<br/>• Category-level embedding requires
+<br/>(cid:1840)=200 nodes
+<br/>Simulated noisy users
+<br/>With computer vision
+<br/>Deterministic users
+<br/>No computer vision
+<br/>Deterministic users
+<br/>With computer vision
+<br/>Interactive Categorization
+<br/>• Using computer vision reduces the burden on the user
+<br/>• The system is robust to user noise
+<br/>much fewer comparisons compared to
+<br/>at the instance-level
+<br/>Similarity comparisons are advantageous compared to part/attribute questions
+<br/>Intelligently selecting image displays reduces effort
+<br/>System supports multiple similarity
+<br/>metrics as different types of
+<br/>questions
+<br/>Simulate perceptual spaces using
+<br/>CUB-200-2011 attribute
+<br/>annotations
+<br/>Multiple Metrics
+<br/>CV, Color Similarity
+<br/>CV, Shape Similarity
+<br/>CV, Pattern Similarity
+<br/>No CV, Color/Shape/Pattern Similarity
+<br/>CV, Color/Shape/Pattern Similarity
+<br/>Method
+<br/>Avg. #Qs
+<br/>2.70
+<br/>2.67
+<br/>2.67
+<br/>2.64
+<br/>4.21
+<br/>Qualitative Results
+<br/>Vermilion
+<br/>Fly-
+<br/>catcher
+<br/>Query Image
+<br/>Q1: Most Similar?
+<br/>Q2: Most Similar?
+<br/>Query Image
+<br/>Q1: Most Similar By Color?
+<br/>Q2: Most Similar By Pattern?
+<br/>Hooded
+<br/>Merganser
+<br/><b>University of California, San Diego</b><br/>vision.ucsd.edu
+<br/>Overview
+<br/>Problem
+<br/>• Parts and attributes exhibit weaknesses
+<br/>(cid:190) Scalability issues; costly; reliance on experts, but experts are scarce
+<br/>Proposed Solution
+<br/>• Use relative similarity comparisons to reduce dependence on expert-
+<br/>derived part and attribute vocabularies
+<br/>Contributions
+<br/>• We present an efficient, flexible, and scalable system for interactive
+<br/>fine-grained visual categorization
+<br/>(cid:190) Based on perceptual similarity
+<br/>(cid:190) Combines similarity metrics and computer vision methods in a
+<br/>unified framework
+<br/>• Outperforms state-of-the-art relevance feedback-based and
+<br/>part/attribute-based approaches
+<br/>Similarity Comparisons
+<br/>A
+<br/>A. Collect grid-based
+<br/>similarity comp-
+<br/>arisons that do not
+<br/>require prior expertise
+<br/>B. Broadcast grid-based
+<br/>comparisons to triplet
+<br/>comparisons
+<br/>B
+<br/>(cid:2286)= (cid:1861),(cid:1862),(cid:1864) (cid:1876)(cid:3036) more similar to (cid:1876)(cid:3037) than (cid:1876)(cid:3039)
+<br/>Is this more similar to… (cid:1876)(cid:3036)
+<br/>(cid:1876)(cid:3037)
+<br/>This one?
+<br/>(cid:1876)(cid:3039)
+<br/>Or this one?
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>(cid:1871) , > (cid:1871) ,
+<br/>?
+<br/>(cid:1871)((cid:1861),(cid:1862)): perceptual similarity
+<br/>between images (cid:1876)(cid:3036) and (cid:1876)(cid:3037)
+</td><td>('2367820', 'Catherine Wah', 'catherine wah')<br/>('2996914', 'Grant Van Horn', 'grant van horn')<br/>('3251767', 'Steve Branson', 'steve branson')<br/>('35208858', 'Subhransu Maji', 'subhransu maji')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>{sbranson,perona}@caltech.edu
+<br/>smaji@ttic.edu
+<br/>sjb@cs.cornell.edu
+<br/>{cwah@cs,gvanhorn@}ucsd.edu
+</td></tr><tr><td>0ed0e48b245f2d459baa3d2779bfc18fee04145b</td><td>Semi-Supervised Dimensionality Reduction∗
+<br/>1National Laboratory for Novel Software Technology
+<br/><b>Nanjing University, Nanjing 210093, China</b><br/>2Department of Computer Science and Engineering
+<br/><b>Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China</b></td><td>('1772283', 'Daoqiang Zhang', 'daoqiang zhang')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('1680768', 'Songcan Chen', 'songcan chen')</td><td>dqzhang@nuaa.edu.cn
+<br/>zhouzh@nju.edu.cn
+<br/>s.chen@nuaa.edu.cn
+</td></tr><tr><td>0eac652139f7ab44ff1051584b59f2dc1757f53b</td><td>Efficient Branching Cascaded Regression
+<br/>for Face Alignment under Significant Head Rotation
+<br/><b>University of Wisconsin Madison</b></td><td>('2721523', 'Brandon M. Smith', 'brandon m. smith')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')</td><td>bmsmith@cs.wisc.edu
+<br/>dyer@cs.wisc.edu
+</td></tr><tr><td>0ef96d97365899af797628e80f8d1020c4c7e431</td><td>Improving the Speed of Kernel PCA on Large Scale Datasets
+<br/><b>Institute for Vision Systems Engineering</b><br/><b>Monash University, Victoria, Australia</b></td><td>('2451050', 'Tat-Jun Chin', 'tat-jun chin')<br/>('2220700', 'David Suter', 'david suter')</td><td>{ tat.chin | d.suter }@eng.monash.edu.au
+</td></tr><tr><td>0e7f277538142fb50ce2dd9179cffdc36b794054</td><td>Combining Image Captions and Visual Analysis
+<br/>for Image Concept Classification
+<br/>Department of Information and
+<br/>Knowledge Engineering
+<br/>Faculty of Informatics and
+<br/><b>Statistics, University of</b><br/>Economics, Prague
+<br/>Multimedia and Vision
+<br/>Research Group
+<br/><b>Queen Mary University</b><br/>Mile End Road, London
+<br/>United Kingdom
+<br/>Department of Information and
+<br/>Knowledge Engineering
+<br/>Faculty of Informatics and
+<br/><b>Statistics, University of</b><br/>Economics, Prague
+<br/>Department of Information and
+<br/>Knowledge Engineering
+<br/>Faculty of Informatics and
+<br/><b>Statistics, University of</b><br/>Economics, Prague
+<br/>Multimedia and Vision
+<br/>Research Group
+<br/><b>Queen Mary University</b><br/>Mile End Road, London
+<br/>United Kingdom
+</td><td>('2005670', 'Tomas Kliegr', 'tomas kliegr')<br/>('3183509', 'Krishna Chandramouli', 'krishna chandramouli')<br/>('2073485', 'Jan Nemrava', 'jan nemrava')<br/>('1740821', 'Vojtech Svatek', 'vojtech svatek')<br/>('1732655', 'Ebroul Izquierdo', 'ebroul izquierdo')</td><td>tomas.kliegr@vse.cz
+<br/>krishna.c@ieee.org
+<br/>nemrava@vse.cz
+<br/>svatek@vse.cz
+<br/>ebroul.izquierdo@elec.qmul.ac.uk
+</td></tr><tr><td>0e8760fc198a7e7c9f4193478c0e0700950a86cd</td><td></td><td></td><td></td></tr><tr><td>0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056</td><td>HHS Public Access
+<br/>Author manuscript
+<br/>Curr Res Psychol. Author manuscript; available in PMC 2017 January 17.
+<br/>Published in final edited form as:
+<br/>Curr Res Psychol. 2016 ; 6(2): 22–30. doi:10.3844/crpsp.2015.22.30.
+<br/>The Role of Perspective-Taking on Ability to Recognize Fear
+<br/><b>Virginia Polytechnic Institute and State University, Blacksburg</b><br/>Virginia, USA
+<br/><b>Virginia Polytechnic Institute and State University, Blacksburg, Virginia</b><br/>USA
+<br/><b>Virginia Tech Carilion Research Institute</b><br/>Roanoke, Virginia, USA
+<br/><b>Virginia Polytechnic Institute and State University, Blacksburg</b><br/>Virginia, USA
+</td><td>('2974674', 'Andrea Trubanova', 'andrea trubanova')<br/>('2359365', 'Inyoung Kim', 'inyoung kim')<br/>('3712207', 'Marika C. Coffman', 'marika c. coffman')<br/>('6057482', 'Martha Ann Bell', 'martha ann bell')<br/>('2294952', 'Stephen M. LaConte', 'stephen m. laconte')<br/>('1709677', 'Denis Gracanin', 'denis gracanin')<br/>('2197231', 'Susan W. White', 'susan w. white')</td><td></td></tr><tr><td>0e652a99761d2664f28f8931fee5b1d6b78c2a82</td><td>BERGSTRA, YAMINS, AND COX: MAKING A SCIENCE OF MODEL SEARCH
+<br/>Making a Science of Model Search
+<br/>J. Bergstra1
+<br/>D. Yamins2
+<br/>D. D. Cox1
+<br/><b>Rowland Institute at Harvard</b><br/>100 Edwin H. Land Boulevard
+<br/>Cambridge, MA 02142, USA
+<br/>2 Department of Brain and Cognitive
+<br/>Sciences
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA 02139, USA
+</td><td></td><td>bergstra@rowland.harvard.edu
+<br/>yamins@mit.edu
+<br/>davidcox@fas.harvard.edu
+</td></tr><tr><td>0e50fe28229fea45527000b876eb4068abd6ed8c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2936
+</td><td></td><td></td></tr><tr><td>0eff410cd6a93d0e37048e236f62e209bc4383d1</td><td>Anchorage Convention District
+<br/>May 3-8, 2010, Anchorage, Alaska, USA
+<br/>978-1-4244-5040-4/10/$26.00 ©2010 IEEE
+<br/>4803
+</td><td></td><td></td></tr><tr><td>0ea7b7fff090c707684fd4dc13e0a8f39b300a97</td><td>Integrated Face Analytics Networks through
+<br/>Cross-Dataset Hybrid Training
+<br/><b>School of Computing, National University of Singapore, Singapore</b><br/><b>Electrical and Computer Engineering, National University of Singapore, Singapore</b><br/><b>Beijing Institute of Technology University, P. R. China</b><br/>4 SAP Innovation Center Network Singapore, Singapore
+</td><td>('2757639', 'Jianshu Li', 'jianshu li')<br/>('2052311', 'Jian Zhao', 'jian zhao')<br/>('1715286', 'Terence Sim', 'terence sim')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('3124720', 'Shengtao Xiao', 'shengtao xiao')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('40345914', 'Fang Zhao', 'fang zhao')<br/>('1943724', 'Jianan Li', 'jianan li')</td><td>{jianshu,xiao_shengtao,zhaojian90}@u.nus.edu,lijianan15@gmail.com
+<br/>{elezhf,elefjia,eleyans}@nus.edu.sg,tsim@comp.nus.edu.sg
+</td></tr><tr><td>0ee737085af468f264f57f052ea9b9b1f58d7222</td><td>SiGAN: Siamese Generative Adversarial Network
+<br/>for Identity-Preserving Face Hallucination
+</td><td>('3192517', 'Chih-Chung Hsu', 'chih-chung hsu')<br/>('1685088', 'Chia-Wen Lin', 'chia-wen lin')<br/>('3404171', 'Weng-Tai Su', 'weng-tai su')<br/>('1705205', 'Gene Cheung', 'gene cheung')</td><td></td></tr><tr><td>0ee661a1b6bbfadb5a482ec643573de53a9adf5e</td><td>JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, MONTH YEAR
+<br/>On the Use of Discriminative Cohort Score
+<br/>Normalization for Unconstrained Face Recognition
+</td><td>('1725688', 'Massimo Tistarelli', 'massimo tistarelli')<br/>('2384894', 'Yunlian Sun', 'yunlian sun')<br/>('2404207', 'Norman Poh', 'norman poh')</td><td></td></tr><tr><td>0e36ada8cb9c91f07c9dcaf196d036564e117536</td><td>Much Ado About Time: Exhaustive Annotation of Temporal Data
+<br/><b>Carnegie Mellon University</b><br/>2Inria
+<br/><b>University of Washington 4The Allen Institute for AI</b><br/>http://allenai.org/plato/charades/
+</td><td>('34280810', 'Gunnar A. Sigurdsson', 'gunnar a. sigurdsson')<br/>('2192178', 'Olga Russakovsky', 'olga russakovsky')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td></td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>Face Detection, Pose Estimation, and Landmark Localization in the Wild
+<br/><b>University of California, Irvine</b></td><td>('32542103', 'Xiangxin Zhu', 'xiangxin zhu')</td><td>{xzhu,dramanan}@ics.uci.edu
+</td></tr><tr><td>0ebc50b6e4b01eb5eba5279ce547c838890b1418</td><td>Similarity-Preserving Binary Signature for Linear Subspaces
+<br/>∗State Key Laboratory of Intelligent Technology and Systems,
+<br/>Tsinghua National Laboratory for Information Science and Technology (TNList),
+<br/><b>Tsinghua University, Beijing 100084, China</b><br/><b>National University of Singapore, Singapore</b></td><td>('1901939', 'Jianqiu Ji', 'jianqiu ji')<br/>('38376468', 'Jianmin Li', 'jianmin li')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1713616', 'Qi Tian', 'qi tian')<br/>('34997537', 'Bo Zhang', 'bo zhang')</td><td>jijq10@mails.tsinghua.edu.cn, {lijianmin, dcszb}@mail.tsinghua.edu.cn
+<br/>‡Department of Computer Science, University of Texas at San Antonio, qi.tian@utsa.edu
+<br/>eleyans@nus.edu.sg
+</td></tr><tr><td>0e49a23fafa4b2e2ac097292acf00298458932b4</td><td>Theory and Applications of Mathematics & Computer Science 3 (1) (2013) 13–31
+<br/>Unsupervised Detection of Outlier Images Using Multi-Order
+<br/>Image Transforms
+<br/><b>aLawrence Technological University, 21000 W Ten Mile Rd., South eld, MI 48075, United States</b></td><td></td><td></td></tr><tr><td>0ec1673609256b1e457f41ede5f21f05de0c054f</td><td>Blessing of Dimensionality: High-dimensional Feature and Its Efficient
+<br/>Compression for Face Verification
+<br/><b>University of Science and Technology of China</b><br/>Microsoft Research Asia
+</td><td>('39447786', 'Dong Chen', 'dong chen')<br/>('2032273', 'Xudong Cao', 'xudong cao')<br/>('1716835', 'Fang Wen', 'fang wen')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>chendong@mail.ustc.edu.cn
+<br/>{xudongca,fangwen,jiansun}@microsoft.com
+</td></tr><tr><td>0e3840ea3227851aaf4633133dd3cbf9bbe89e5b</td><td></td><td></td><td></td></tr><tr><td>0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a</td><td></td><td></td><td></td></tr><tr><td>0ea38a5ba0c8739d1196da5d20efb13406bb6550</td><td>Relative Attributes
+<br/><b>Toyota Technological Institute Chicago (TTIC</b><br/><b>University of Texas at Austin</b></td><td>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>dparikh@ttic.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>0e21c9e5755c3dab6d8079d738d1188b03128a31</td><td>Constrained Clustering and Its Application to Face Clustering in Videos
+<br/>1NLPR, CASIA, Beijing 100190, China
+<br/><b>Rensselaer Polytechnic Institute, Troy, NY 12180, USA</b></td><td>('2040015', 'Baoyuan Wu', 'baoyuan wu')<br/>('40382978', 'Yifan Zhang', 'yifan zhang')<br/>('39495638', 'Bao-Gang Hu', 'bao-gang hu')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td></td></tr><tr><td>0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698</td><td>Spontaneous Subtle Expression Recognition:
+<br/>Imbalanced Databases & Solutions (cid:63)
+<br/>1 Faculty of Engineering,
+<br/><b>Multimedia University (MMU), Cyberjaya, Malaysia</b><br/>2 Faculty of Computing & Informatics,
+<br/><b>Multimedia University (MMU), Cyberjaya, Malaysia</b></td><td>('2339975', 'John See', 'john see')</td><td>lengoanhcat@gmail.com, raphael@mmu.edu.my
+<br/>johnsee@mmu.edu.my
+</td></tr><tr><td>0e93a5a7f6dbdb3802173dca05717d27d72bfec0</td><td>Attribute Recognition by Joint Recurrent Learning of Context and Correlation
+<br/><b>Queen Mary University of London</b><br/>Vision Semantics Ltd.2
+</td><td>('48093957', 'Jingya Wang', 'jingya wang')<br/>('2171228', 'Xiatian Zhu', 'xiatian zhu')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('47113208', 'Wei Li', 'wei li')</td><td>{jingya.wang, s.gong, wei.li}@qmul.ac.uk
+<br/>eddy@visionsemantics.com
+</td></tr><tr><td>0e2ea7af369dbcaeb5e334b02dd9ba5271b10265</td><td></td><td></td><td></td></tr><tr><td>0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5</td><td>Max-Margin Early Event Detectors
+<br/>Minh Hoai
+<br/><b>Robotics Institute, Carnegie Mellon University</b></td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>0e7c70321462694757511a1776f53d629a1b38f3</td><td>NIST Special Publication 1136
+<br/>2012 Proceedings of the
+<br/>Performance Metrics for Intelligent
+<br/>Systems (PerMI ‘12) Workshop
+<br/>
+<br/>http://dx.doi.org/10.6028/NIST.SP.1136
+</td><td>('39737545', 'Rajmohan Madhavan', 'rajmohan madhavan')<br/>('2105056', 'Elena R. Messina', 'elena r. messina')<br/>('31797581', 'Brian A. Weiss', 'brian a. weiss')</td><td></td></tr><tr><td>0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d</td><td>Fast Subspace Search via Grassmannian Based Hashing
+<br/><b>University of Minnesota</b><br/><b>Proto Labs, Inc</b><br/><b>Columbia University</b><br/><b>University of Minnesota</b></td><td>('1712593', 'Xu Wang', 'xu wang')<br/>('1734862', 'Stefan Atev', 'stefan atev')<br/>('1738310', 'John Wright', 'john wright')<br/>('1919996', 'Gilad Lerman', 'gilad lerman')</td><td>wang1591@umn.edu
+<br/>stefan.atev@gmail.com
+<br/>johnwright@ee.columbia.edu
+<br/>lerman@umn.edu
+</td></tr><tr><td>0ec67c69e0975cfcbd8ba787cc0889aec4cc5399</td><td>Locating Salient Object Features
+<br/>K.N.Walker, T.F.Cootes and C.J.Taylor
+<br/>Dept. Medical Biophysics,
+<br/><b>Manchester University, UK</b></td><td></td><td>knw@sv1.smb.man.ac.uk
+</td></tr><tr><td>0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64</td><td>Estimating Illumination Parameters In Real Space
+<br/>With Application To Image Relighting
+<br/><b>Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education</b><br/>Guangyou Xu
+<br/><b>Tsinghua University, Beijing 100084, P.R.China</b><br/>Categories and Subject Descriptors
+<br/>I.4.8 [Image Processing and Computer Vision]: Scene Analysis
+<br/>– photometry, shading, shape.
+<br/>General Terms
+<br/>Algorithms
+<br/>Keywords
+<br/>Illumination parameters estimation, spherical harmonic, image
+<br/>relighting.
+<br/>1. INTRODUCTION
+<br/>Illumination condition is a fundamental problem in both computer
+<br/>vision and graphics. For instance, the estimation of lighting
+<br/>condition is important in face relighting and recognition, since
+<br/>synthesized realistic images can alleviate the small sample
+<br/>problem in face recognition applications.
+<br/>Recently Basri [2] and Ramamoorthi [3] independently apply the
+<br/>spherical harmonics techniques to explain the low dimensionality
+<br/>of differently illuminated images for convex Lambertian object.
+<br/>Ramamoorthi even derives analytically the principal components
+<br/>of this low dimensional image subspace. This method have
+<br/>already been widely applied to the areas of inverse rendering,
+<br/>image relighting, face recognition, etc.
+<br/>One of the limitations of this method is that the cast shadows are
+<br/>ignored. In the experiment results of [1], the cast shadows
+<br/>improve the face recognition result on the most extreme light
+<br/>directions. How to overcome this limitation is one of the
+<br/>motivations of our work. Furthermore, rendering realistic image
+<br/>need the real light direction. Although the spherical harmonics
+<br/>coefficient of illumination could be easily estimated, how to
+<br/>recover the real light direction from these coefficients is still a
+<br/>problem.
+<br/>We propose a novel algorithm for estimating the illumination
+<br/>parameters including the direction and strength of point light with
+<br/>the strength of ambient illumination. Images are projected into the
+<br/>analytical subspace derived in [3] according to a known 3D
+<br/>geometry, then the illumination parameters are estimated from
+<br/>these projected coefficients. Our primary experiments proved the
+<br/>stability and effectiveness of this method.
+<br/>Copyright is held by the author/owner(s).
+<br/>MM'05, November 6-11, 2005, Singapore.
+<br/>ACM 1-59593-044-2/05/0011.
+<br/>2. METHODOLOGY
+<br/>Consider a convex Lambertian object of known geometry with
+<br/>uniform albedo illuminated by distant isotropic light sources, the
+<br/>irradiance could be expressed as a linear combination of the
+<br/>spherical harmonic basis functions. In fact, 99% of the energy of
+<br/>the Lambertian BRDF filter is constrained by the first 9 basis [3].
+<br/>In this paper we consider a simple illumination model consisting
+<br/>of one distant directional point light source and ambient
+<br/>illumination. We could write the illumination coefficients as
+<br/>formula of four illumination parameters (Azimuth and Elevation
+<br/>angel for point light direction, Sp for point light strength and Sa
+<br/>for ambient illumination strength).
+<br/>One problem is that, although the spherical harmonic basis
+<br/>functions are orthogonal in the sphere coordinates, they are not
+<br/>orthogonal in the image space. This property causes the algorithm
+<br/>unstable in some case. We choose the analytical subspace
+<br/>constructed in [3], which requires no training data. The image is
+<br/>projected to this subspace and the PCA coefficients are computed.
+<br/>Then the illumination parameters could be estimated from these
+<br/>PCA coefficients by solving a nonlinear least-square problem.
+<br/>Finding a global extreme of nonlinear problem is very difficult.
+<br/>We choose the popular Gauss-Newton method to solve this
+<br/>minimal problem, which might stay on local minima. The
+<br/>experimental results show that if we choose enough PCA
+<br/>coefficients, the energy surface guarantee the local minima is
+<br/>same as the global minima.(Note that we can use only a part of
+<br/>the PCA coefficients to solve this nonlinear minimal problem.)
+<br/>Actually the first five PCA coefficients are enough for estimate
+<br/>these parameters stably. (For limited length of this paper, the
+<br/>equations and stability analysis of the result is omitted.)
+<br/>3. RESULTS
+<br/>We experimented on both synthesized sphere images and real face
+<br/>images in CMU PIE database [4] and Yale Database B [1].
+<br/>3.1 Synthesized sphere images result
+<br/>First, we randomly select the four illumination parameters and
+<br/>synthesize 600 sphere images under the different illumination, in
+<br/>which the incident directions are limited to the upper hemisphere
+<br/>and the light strength parameters are normalized to sum to unity.
+<br/>Then we test our algorithm on these synthesized sphere images.
+<br/>Similar to the Yale Database B, we divide the images into 5
+<br/>subsets (12°, 25°, 55°, 77°, 90°) according to the angle which the
+<br/>light source direction makes with the camera's axis.
+<br/>1039 </td><td>('13801076', 'Feng Xie', 'feng xie')<br/>('3265275', 'Linmi Tao', 'linmi tao')</td><td>xiefeng97@mails.tsinghua.edu.cn
+<br/>{linmi, xgy-dcs}@tsinghua.edu.cn
+</td></tr><tr><td>0ee5c4112208995bf2bb0fb8a87efba933a94579</td><td>Understanding Clothing Preference Based on Body Shape From Online Sources
+<br/>Fashion is Taking Shape:
+<br/>1Scalable Learning and Perception Group, 2Real Virtual Humans
+<br/><b>Max Planck Institute for Informatics, Saarbr ucken, Germany</b></td><td>('26879574', 'Hosnieh Sattar', 'hosnieh sattar')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('2635816', 'Gerard Pons-Moll', 'gerard pons-moll')</td><td>{sattar,mfritz,gpons}@mpi-inf.mpg.de
+</td></tr><tr><td>0e1a18576a7d3b40fe961ef42885101f4e2630f8</td><td>Automated Detection and Identification of
+<br/>Persons in Video
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>September 24, 2004
+</td><td>('3056091', 'Mark Everingham', 'mark everingham')</td><td>{me|az}@robots.ox.ac.uk
+</td></tr><tr><td>6080f26675e44f692dd722b61905af71c5260af8</td><td></td><td></td><td></td></tr><tr><td>60a006bdfe5b8bf3243404fae8a5f4a9d58fa892</td><td>A Reference-Based Framework for
+<br/>Pose Invariant Face Recognition
+<br/>1 HP Labs, Palo Alto, CA 94304, USA
+<br/>2 Google Inc., Mountain View, CA 94043, USA
+<br/><b>BRIC, University of North Carolina at Chapel Hill, NC 27599, USA</b><br/><b>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</b></td><td>('1784929', 'Mehran Kafai', 'mehran kafai')<br/>('1745657', 'Kave Eshghi', 'kave eshghi')<br/>('39776603', 'Le An', 'le an')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td>mehran.kafai@hp.com, kave@google.com, lan004@unc.edu, bhanu@cris.ucr.edu
+</td></tr><tr><td>6043006467fb3fd1e9783928d8040ee1f1db1f3a</td><td>Face Recognition with Learning-based Descriptor
+<br/><b>The Chinese University of Hong Kong</b><br/><b>ITCS, Tsinghua University</b><br/><b>Shenzhen Institutes of Advanced Technology</b><br/>4Microsoft Research Asia
+<br/>Chinese Academy of Sciences, China
+</td><td>('2695115', 'Zhimin Cao', 'zhimin cao')<br/>('2274228', 'Qi Yin', 'qi yin')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td></td></tr><tr><td>600025c9a13ff09c6d8b606a286a79c823d89db8</td><td>Machine Learning and Applications: An International Journal (MLAIJ) Vol.1, No.1, September 2014
+<br/>A REVIEW ON LINEAR AND NON-LINEAR
+<br/>DIMENSIONALITY REDUCTION
+<br/>TECHNIQUES
+<br/>1Arunasakthi. K, 2KamatchiPriya. L
+<br/>1 Assistant Professor
+<br/>Department of Computer Science and Engineering
+<br/><b>Ultra College of Engineering and Technology for Women, India</b><br/>2Assistant Professor
+<br/>Department of Computer Science and Engineering
+<br/><b>Vickram College of Engineering, Enathi, Tamil Nadu, India</b></td><td></td><td></td></tr><tr><td>60d765f2c0a1a674b68bee845f6c02741a49b44e</td><td></td><td></td><td></td></tr><tr><td>60c24e44fce158c217d25c1bae9f880a8bd19fc3</td><td>Controllable Image-to-Video Translation:
+<br/>A Case Study on Facial Expression Generation
+<br/>MIT CSAIL
+<br/>Wenbing Huang
+<br/>Tencent AI Lab
+<br/>MIT-Waston Lab
+<br/>Tencent AI Lab
+<br/>Tencent AI Lab
+</td><td>('2548303', 'Lijie Fan', 'lijie fan')<br/>('2551285', 'Chuang Gan', 'chuang gan')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('40206014', 'Boqing Gong', 'boqing gong')</td><td></td></tr><tr><td>60e2b9b2e0db3089237d0208f57b22a3aac932c1</td><td>Frankenstein: Learning Deep Face Representations
+<br/>using Small Data
+</td><td>('38819702', 'Guosheng Hu', 'guosheng hu')<br/>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('2653152', 'Yongxin Yang', 'yongxin yang')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')</td><td></td></tr><tr><td>60542b1a857024c79db8b5b03db6e79f74ec8f9f</td><td>Learning to Detect Human-Object Interactions
+<br/><b>University of Michigan, Ann Arbor</b><br/><b>Washington University in St. Louis</b></td><td>('2820136', 'Yu-Wei Chao', 'yu-wei chao')<br/>('1860829', 'Yunfan Liu', 'yunfan liu')<br/>('9539636', 'Xieyang Liu', 'xieyang liu')<br/>('9344937', 'Huayi Zeng', 'huayi zeng')<br/>('8342699', 'Jia Deng', 'jia deng')</td><td>{ywchao,yunfan,lxieyang,jiadeng}@umich.edu
+<br/>{zengh}@wustl.edu
+</td></tr><tr><td>60d4cef56efd2f5452362d4d9ac1ae05afa970d1</td><td>Learning End-to-end Video Classification with Rank-Pooling
+<br/><b>Research School of Engineering, The Australian National University, ACT 2601, Australia</b><br/><b>Research School of Computer Science, The Australian National University, ACT 2601, Australia</b></td><td>('1688071', 'Basura Fernando', 'basura fernando')<br/>('2377076', 'Stephen Gould', 'stephen gould')</td><td>BASURA.FERNANDO@ANU.EDU.AU
+<br/>STEPHEN.GOULD@ANU.EDU.AU
+</td></tr><tr><td>60ce4a9602c27ad17a1366165033fe5e0cf68078</td><td>TECHNICAL NOTE
+<br/>DIGITAL & MULTIMEDIA SCIENCES
+<br/>J Forensic Sci, 2015
+<br/>doi: 10.1111/1556-4029.12800
+<br/>Available online at: onlinelibrary.wiley.com
+<br/>Ph.D.
+<br/>Combination of Face Regions in Forensic
+<br/>Scenarios*
+</td><td>('1808344', 'Pedro Tome', 'pedro tome')<br/>('1701431', 'Julian Fierrez', 'julian fierrez')<br/>('1692626', 'Ruben Vera-Rodriguez', 'ruben vera-rodriguez')<br/>('1732220', 'Javier Ortega-Garcia', 'javier ortega-garcia')</td><td></td></tr><tr><td>6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf</td><td></td><td></td><td></td></tr><tr><td>60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09</td><td>Long-Term Video Interpolation with Bidirectional
+<br/>Predictive Network
+<br/><b>Peking University</b></td><td>('8082703', 'Xiongtao Chen', 'xiongtao chen')<br/>('1788029', 'Wenmin Wang', 'wenmin wang')<br/>('3258842', 'Jinzhuo Wang', 'jinzhuo wang')</td><td></td></tr><tr><td>60970e124aa5fb964c9a2a5d48cd6eee769c73ef</td><td>Subspace Clustering for Sequential Data
+<br/>School of Computing and Mathematics
+<br/><b>Charles Sturt University</b><br/>Bathurst, NSW 2795, Australia
+<br/>Division of Computational Informatics
+<br/>CSIRO
+<br/>North Ryde, NSW 2113, Australia
+</td><td>('40635684', 'Stephen Tierney', 'stephen tierney')<br/>('1750488', 'Junbin Gao', 'junbin gao')<br/>('1767638', 'Yi Guo', 'yi guo')</td><td>{stierney, jbgao}@csu.edu.au
+<br/>yi.guo@csiro.au
+</td></tr><tr><td>60efdb2e204b2be6701a8e168983fa666feac1be</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1043-5
+<br/>Transferring Deep Object and Scene Representations for Event
+<br/>Recognition in Still Images
+<br/>Received: 31 March 2016 / Accepted: 1 September 2017
+<br/>© Springer Science+Business Media, LLC 2017
+</td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('1915826', 'Zhe Wang', 'zhe wang')</td><td></td></tr><tr><td>60824ee635777b4ee30fcc2485ef1e103b8e7af9</td><td>Cascaded Collaborative Regression for Robust Facial
+<br/>Landmark Detection Trained using a Mixture of Synthetic and
+<br/>Real Images with Dynamic Weighting
+<br/>Life Member, IEEE, William Christmas, and Xiao-Jun Wu
+</td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('38819702', 'Guosheng Hu', 'guosheng hu')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td></td></tr><tr><td>60643bdab1c6261576e6610ea64ea0c0b200a28d</td><td></td><td></td><td></td></tr><tr><td>60a20d5023f2bcc241eb9e187b4ddece695c2b9b</td><td>Invertible Nonlinear Dimensionality Reduction
+<br/>via Joint Dictionary Learning
+<br/>Department of Electrical and Computer Engineering
+<br/>Technische Universit¨at M¨unchen, Germany
+</td><td>('30013158', 'Xian Wei', 'xian wei')<br/>('1744239', 'Martin Kleinsteuber', 'martin kleinsteuber')<br/>('36559760', 'Hao Shen', 'hao shen')</td><td>{xian.wei, kleinsteuber, hao.shen}@tum.de.
+</td></tr><tr><td>60cdcf75e97e88638ec973f468598ae7f75c59b4</td><td>86
+<br/>Face Annotation Using Transductive
+<br/>Kernel Fisher Discriminant
+</td><td>('1704030', 'Jianke Zhu', 'jianke zhu')<br/>('1681775', 'Michael R. Lyu', 'michael r. lyu')</td><td></td></tr><tr><td>60040e4eae81ab6974ce12f1c789e0c05be00303</td><td>Center for Energy Harvesting
+<br/>Materials and Systems (CEHMS),
+<br/>Bio-Inspired Materials and
+<br/>Devices Laboratory (BMDL),
+<br/>Center for Intelligent Material
+<br/>Systems and Structure (CIMSS),
+<br/>Department of Mechanical Engineering,
+<br/>Virginia Tech,
+<br/>Blacksburg, VA 24061
+<br/>Graphical Facial Expression
+<br/>Analysis and Design Method:
+<br/>An Approach to Determine
+<br/>Humanoid Skin Deformation
+<br/>The architecture of human face is complex consisting of 268 voluntary muscles that perform
+<br/>coordinated action to create real-time facial expression. In order to replicate facial expres-
+<br/>sion on humanoid face by utilizing discrete actuators, the first and foremost step is the identi-
+<br/>fication of a pair of origin and sinking points (SPs). In this paper, we address this issue and
+<br/>present a graphical analysis technique that could be used to design expressive robotic faces.
+<br/>The underlying criterion in the design of faces being deformation of a soft elastomeric skin
+<br/>through tension in anchoring wires attached on one end to the skin through the sinking point
+<br/>and the other end to the actuator. The paper also addresses the singularity problem of facial
+<br/>control points and important phenomena such as slacking of actuators. Experimental charac-
+<br/>terization on a prototype humanoid face was performed to validate the model and demon-
+<br/>strate the applicability on a generic platform. [DOI: 10.1115/1.4006519]
+<br/>Keywords: humanoid prototype, facial expression, artificial skin, contractile actuator,
+<br/>graphical analysis
+<br/>Introduction
+<br/>Facial expression of humanoid is becoming a key research topic
+<br/>in recent years in the areas of social robotics. The embodiment of
+<br/>robotic head akin to that of human being promotes a more friendly
+<br/>communication between the humanoid and the user. There are
+<br/>many challenges in realizing human-like face such as material
+<br/>suitable for artificial skin, muscles, sensors, supporting structures,
+<br/>machine elements, vision, and audio systems. In addition to mate-
+<br/>rials and their integration, computational tools, static and dynamic
+<br/>analysis are required to fully understand the effect of each param-
+<br/>eter on the overall performance of a prototype humanoid face and
+<br/>provide optimum condition.
+<br/>This paper is organized in eight sections. First, we introduce the
+<br/>background and methodology for creating facial expression in
+<br/>robotic heads. A thorough description of the overall problem asso-
+<br/>ciated with expression analysis is presented along with pictorial
+<br/>representation of the muscle arrangement on a prototype face.
+<br/>Second, a literature survey is presented on facial expression analy-
+<br/>sis techniques applied to humanoid head. Third, the description of
+<br/>graphical facial expression analysis and design (GFEAD) method
+<br/>is presented focusing on two generic cases. Fourth, application
+<br/>of the GFEAD method on a prototype skull is presented and
+<br/>important manifestations that could not be obtained with other
+<br/>techniques are discussed. Fifth, results from experimental charac-
+<br/>terization of facial movement with a skin layer are discussed.
+<br/>Sixth, the effect of the skin properties and associated issues will
+<br/>be discussed. Section 7 discusses the significance of GFEAD
+<br/>method on practical platforms. Finally, the summary of this study
+<br/>is presented in Sec. 8.
+<br/>In the last few years, we have demonstrated humanoid heads
+<br/>using a variety of actuation technologies including: piezoelectric
+<br/>ultrasonic motors for actuation and macrofiber composite for sens-
+<br/>ing [1]; electromagnetic RC servo motor for actuation and embed-
+<br/><b>University of Texas at</b><br/>Dallas 800 West Campbell Rd., Richardson, TX 75080.
+<br/>2Corresponding author.
+<br/>Contributed by the Mechanisms and Robotics Committee of ASME for publica-
+<br/>tion in the JOURNAL OF MECHANISMS AND ROBOTICS. Manuscript received October 10,
+<br/>2010; final manuscript received February 23, 2012; published online April 25, 2012.
+<br/>Assoc. Editor: Qiaode Jeffrey Ge.
+<br/>ded unimorph for sensing [2,3], and recently shape memory alloy
+<br/>(SMA) based actuation for baby humanoid robot focusing on the
+<br/>face and jaw movement [4]. We have also reported facial muscles
+<br/>based on conducting polymer actuators to overcome the high
+<br/>power requirement of current actuation technologies including
+<br/>polypyrrole–polyvinylidene difluoride composite stripe and zig-
+<br/>zag actuators [5] and axial type helically wounded polypyrrole–
+<br/>platinum composite actuators [6]. All these studies have identified
+<br/>the issues related to the design of facial structure and artificial
+<br/>muscle requirements. Other types of actuators such as dielectric
+<br/>elastomer were also studied for general robotics application [7].
+<br/>There are several other studies reported in literature related to
+<br/>humanoid facial expression. Facial expression generation and ges-
+<br/>ture synthesis from sign language has been applied in the animation
+<br/>of an avatar [8], expressive humanoid robot Albert-HUBO with 31
+<br/>Degree of Freedom (DOF) head and 35 DOF body motions based
+<br/>on servo motors [9], facial expression imitation system for face rec-
+<br/>ognition and implementation on mascot type robotic system [10],
+<br/>facial expressive humanoid robot SAYA based on McKibben pneu-
+<br/>matic actuators [11], and android robot Repliee for studying psy-
+<br/>chological aspects [12]. However, none of these studies address the
+<br/>design strategy for humanoid head based on discrete actuators.
+<br/>Computational tools for precise analysis of the effect of actuator
+<br/>arrangement on the facial expression are missing.
+<br/>Even though significant efforts have been made, there is little
+<br/>fundamental understanding of the structural design questions.
+<br/>How these facial expressions can be precisely designed? How are
+<br/>the terminating points on the skull determined? What will be the
+<br/>effect of variation in arrangement of actuators? The answer to
+<br/>these questions requires the development of an accurate mathe-
+<br/>matical model that can be easily coded and visualized. For this
+<br/>purpose, we present a GFEAD method for application in human-
+<br/>oid head development. This method will be briefly discussed for
+<br/>generic cases to illustrate all the computational steps.
+<br/>The prime motivation behind using the graphical approach is
+<br/>that it provides both visual information as well as quantitative
+<br/>data required for the design and analysis of humanoid face. The
+<br/>deformation analysis and design is performed directly on the skull
+<br/>surface, which ultimately forms the platform for actuation. The
+<br/>graphical approach is simple to implement as it is conducted in
+<br/>2D. Generally, the skull is created from a scanned model; thus,
+<br/>Journal of Mechanisms and Robotics
+<br/>Copyright VC 2012 by ASME
+<br/>MAY 2012, Vol. 4 / 021010-1
+</td><td>('2248772', 'Yonas Tadesse', 'yonas tadesse')<br/>('25310631', 'Shashank Priya', 'shashank priya')</td><td>e-mail: yonas@vt.edu;
+<br/>yonas.tadesse@utdallas.edu
+<br/>e-mail: spriya@vt.edu
+</td></tr><tr><td>60b3601d70f5cdcfef9934b24bcb3cc4dde663e7</td><td>SUBMITTED TO IEEE TRANS. ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Binary Gradient Correlation Patterns
+<br/>for Robust Face Recognition
+</td><td>('1739171', 'Weilin Huang', 'weilin huang')<br/>('1709042', 'Hujun Yin', 'hujun yin')</td><td></td></tr><tr><td>60737db62fb5fab742371709485e4b2ddf64b7b2</td><td>Crowdsourced Selection on Multi-Attribute Data
+<br/><b>Tsinghua University</b></td><td>('39163188', 'Xueping Weng', 'xueping weng')<br/>('23492509', 'Guoliang Li', 'guoliang li')<br/>('1802748', 'Huiqi Hu', 'huiqi hu')<br/>('33091680', 'Jianhua Feng', 'jianhua feng')</td><td>wxp15@mails.tsinghua.edu.cn, liguoliang@tsinghua.edu.cn, hqhu@sei.ecnu.edu.cn, fengjh@tsinghua.edu.cn
+</td></tr><tr><td>60496b400e70acfbbf5f2f35b4a49de2a90701b5</td><td>Avoiding Boosting Overfitting by Removing Confusing
+<br/>Samples
+<br/><b>Moscow State University, dept. of Computational Mathematics and Cybernetics</b><br/>Graphics and Media Lab
+<br/>119992 Moscow, Russia
+</td><td>('2918740', 'Alexander Vezhnevets', 'alexander vezhnevets')<br/>('3319972', 'Olga Barinova', 'olga barinova')</td><td>{avezhnevets, obarinova}@graphics.cs.msu.ru
+</td></tr><tr><td>60bffecd79193d05742e5ab8550a5f89accd8488</td><td>PhD Thesis Proposal
+<br/>Classification using sparse representation and applications to skin
+<br/>lesion diagnosis
+<br/>In only a few decades, sparse representation modeling has undergone a tremendous expansion with
+<br/>successful applications in many fields including signal and image processing, computer science,
+<br/>machine learning, statistics. Mathematically, it can be considered as the problem of finding the
+<br/>sparsest solution (the one with the fewest non-zeros entries) to an underdetermined linear system
+<br/>of equations [1]. Based on the observation for natural images (or images rich in textures) that small
+<br/>scale structures tend to repeat themselves in an image or in a group of similar images, a signal
+<br/>source can be sparsely represented over some well-chosen redundant basis (a dictionary). In other
+<br/>words, it can be approximately representable by a linear combination of a few elements (also called
+<br/>atoms or basis vectors) of a redundant/over-complete dictionary.
+<br/>Such models have been proven successful in many tasks including denoising [2]-[5], compression
+<br/>[6],[7], super-resolution [8],[9], classification and pattern recognition [10]-[16]. In the context of
+<br/>classification, the objective is to find the class to which a test signal belongs, given training data
+<br/>from multiple classes. Sparse representation has become a powerful technique in classification and
+<br/><b>applications, including texture classification [16], face recognition [12], object detection [10], and</b><br/>segmentation of medical images [17], [18]. In conventional Sparse Representation Classification
+<br/>(SRC) schemes, learned dictionaries and sparse representation are involved to classify image pixels
+<br/>(the image is divided into patches surrounding each image pixel). The performance of a SRC relies
+<br/>on a good dictionary, and on the sparse representation optimization model. Typically, a dictionary
+<br/>is learned for each signal class using training data, and classification of a new signal is achieved
+<br/>by associating it with the class whose dictionary allows the best approximation of the signal via an
+<br/>optimization problem that minimize the reconstruction error under some constraints including the
+<br/>sparsity one. It is important to note that the dictionary may not be a trained one [12]. In [12], the
+<br/>dictionary used for the face recognition is composed of many face images. Generally, the
+<br/>classification methods consider sparse modeling of natural high-dimensional signals and assume
+<br/>that the data belonging to the same class lie in the same subspace of a much lower dimension. Thus,
+<br/>the data can be modeled as a union of low dimensional linear subspaces. Then a union of a small
+<br/>subset of these linear subspaces is found to be a model of each class [19]. More advanced methods
+<br/>take into account the multi-subspace structure of the data of a high dimensional space. That is the
+<br/>case when data in multiple classes lie in multiple low-dimensional subspaces. Then, the
+<br/>classification problem can be formulated via a structured sparsity-based model, or group sparsity
+<br/>one [13, 20]. Other approach proposed to increase the performance of classification by using
+<br/>multiple disjoint sparse representation for the dictionary of each class instead of a single signal
+<br/>representation [21].
+<br/>II. Objective
+<br/>In this study, we focus on a highly accurate classification methods by sparse representation in order
+<br/>to improve existing methods. More specifically, we aim to improve the result of classification for
+<br/>-1-
+</td><td></td><td></td></tr><tr><td>601834a4150e9af028df90535ab61d812c45082c</td><td>A short review and primer on using video for
+<br/>psychophysiological observations in
+<br/>human-computer interaction applications
+<br/><b>Quanti ed Employee unit, Finnish Institute of Occupational Health</b><br/>POBox 40, 00250, Helsinki, Finland
+</td><td>('2612057', 'Teppo Valtonen', 'teppo valtonen')</td><td>teppo. valtonen @ttl. fi,
+</td></tr><tr><td>346dbc7484a1d930e7cc44276c29d134ad76dc3f</td><td><b></b><br/>On: 21 November 2007
+<br/>Access Details: [subscription number 785020433]
+<br/>Publisher: Informa Healthcare
+<br/>Informa Ltd Registered in England and Wales Registered Number: 1072954
+<br/>Registered office: Mortimer House, 37-41 Mortimer Street, London W1T 3JH, UK
+<br/>Systems
+<br/><b>Publication details, including instructions for authors and subscription information</b><br/>http://www.informaworld.com/smpp/title~content=t713663148
+<br/>Artists portray human faces with the Fourier statistics of
+<br/>complex natural scenes
+<br/><b>a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany</b><br/><b>Friedrich Schiller University, D-07740 Jena</b><br/>Germany
+<br/>First Published on: 28 August 2007
+<br/>To cite this Article: Redies, Christoph, Hänisch, Jan, Blickhan, Marko and Denzler,
+<br/>Joachim (2007) 'Artists portray human faces with the Fourier statistics of complex
+<br/>To link to this article: DOI: 10.1080/09548980701574496
+<br/>URL: http://dx.doi.org/10.1080/09548980701574496
+<br/>PLEASE SCROLL DOWN FOR ARTICLE
+<br/>Full terms and conditions of use: http://www.informaworld.com/terms-and-conditions-of-access.pdf
+<br/>This article maybe used for research, teaching and private study purposes. Any substantial or systematic reproduction,
+<br/>re-distribution, re-selling, loan or sub-licensing, systematic supply or distribution in any form to anyone is expressly
+<br/>forbidden.
+<br/>The publisher does not give any warranty express or implied or make any representation that the contents will be
+<br/>complete or accurate or up to date. The accuracy of any instructions,
+<br/>formulae and drug doses should be
+<br/>independently verified with primary sources. The publisher shall not be liable for any loss, actions, claims, proceedings,
+<br/>demand or costs or damages whatsoever or howsoever caused arising directly or indirectly in connection with or
+<br/>arising out of the use of this material.
+</td><td>('2485437', 'Christoph Redies', 'christoph redies')</td><td></td></tr><tr><td>34a41ec648d082270697b9ee264f0baf4ffb5c8d</td><td></td><td></td><td></td></tr><tr><td>34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c</td><td>The AXES submissions at TrecVid 2013
+<br/><b>University of Twente 2Dublin City University 3Oxford University</b><br/>4KU Leuven 5Fraunhofer Sankt Augustin 6INRIA Grenoble
+</td><td>('3157479', 'Robin Aly', 'robin aly')<br/>('3271933', 'Matthijs Douze', 'matthijs douze')<br/>('1688071', 'Basura Fernando', 'basura fernando')<br/>('9401491', 'Zaid Harchaoui', 'zaid harchaoui')<br/>('1767756', 'Kevin McGuinness', 'kevin mcguinness')<br/>('3095774', 'Dan Oneata', 'dan oneata')<br/>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('2319574', 'Danila Potapov', 'danila potapov')<br/>('3428663', 'Jérôme Revaud', 'jérôme revaud')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')<br/>('1809436', 'Jochen Schwenninger', 'jochen schwenninger')<br/>('1783430', 'David Scott', 'david scott')<br/>('1704728', 'Tinne Tuytelaars', 'tinne tuytelaars')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')<br/>('40465030', 'Heng Wang', 'heng wang')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>34bb11bad04c13efd575224a5b4e58b9249370f3</td><td>Towards Good Practices for Action Video Encoding
+<br/>National Key Laboratory for Novel Software Technology
+<br/><b>Nanyang Technological University</b><br/><b>Shanghai Jiao Tong University</b><br/><b>Nanjing University, China</b><br/>Singapore
+<br/>China
+</td><td>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('22183596', 'Yu Zhang', 'yu zhang')<br/>('8131625', 'Weiyao Lin', 'weiyao lin')</td><td>wujx2001@nju.edu.cn
+<br/>roykimbly@hotmail.com
+<br/>wylin@sjtu.edu.cn
+</td></tr><tr><td>3411ef1ff5ad11e45106f7863e8c7faf563f4ee1</td><td>Image Retrieval and Ranking via Consistently
+<br/>Reconstructing Multi-attribute Queries
+<br/><b>School of Computer Science and Technology, Tianjin University, Tianjin, China</b><br/>2 State Key Laboratory of Information Security, IIE, Chinese Academy of Sciences, China
+<br/><b>National University of Singapore</b><br/>4 State Key Laboratory of Virtual Reality Technology and Systems School of Computer Science
+<br/><b>and Engineering, Beihang University, Beijing, China</b></td><td>('1719250', 'Xiaochun Cao', 'xiaochun cao')<br/>('38188331', 'Hua Zhang', 'hua zhang')<br/>('33465926', 'Xiaojie Guo', 'xiaojie guo')<br/>('2705801', 'Si Liu', 'si liu')<br/>('33610144', 'Xiaowu Chen', 'xiaowu chen')</td><td>caoxiaochun@iie.ac.cn, huazhang@tju.edu.cn, xj.max.guo@gmail.com,
+<br/>dcslius@nus.edu.sg, chen@buaa.edu.cn
+</td></tr><tr><td>345cc31c85e19cea9f8b8521be6a37937efd41c2</td><td>Deep Manifold Traversal: Changing Labels with
+<br/>Convolutional Features
+<br/><b>Cornell University, Washington University in St. Louis</b><br/>*Authors contributing equally
+</td><td>('31693738', 'Jacob R. Gardner', 'jacob r. gardner')<br/>('3222840', 'Paul Upchurch', 'paul upchurch')<br/>('1940272', 'Matt J. Kusner', 'matt j. kusner')<br/>('7769997', 'Yixuan Li', 'yixuan li')<br/>('1706504', 'John E. Hopcroft', 'john e. hopcroft')</td><td></td></tr><tr><td>34d484b47af705e303fc6987413dc0180f5f04a9</td><td>RI:Medium: Unsupervised and Weakly-Supervised
+<br/>Discovery of Facial Events
+<br/>1 Introduction
+<br/>The face is one of the most powerful channels of nonverbal communication. Facial expression has been a
+<br/>focus of emotion research for over a hundred years [11]. It is central to several leading theories of emotion
+<br/>[16, 28, 44] and has been the focus of at times heated debate about issues in emotion science [17, 23, 40].
+<br/><b>Facial expression gures prominently in research on almost every aspect of emotion, including psychophys</b><br/>iology [30], neural correlates [18], development [31], perception [4], addiction [24], social processes [26],
+<br/>depression [39] and other emotion disorders [46], to name a few. In general, facial expression provides cues
+<br/>about emotional response, regulates interpersonal behavior, and communicates aspects of psychopathology.
+<br/>While people have believed for centuries that facial expressions can reveal what people are thinking and
+<br/>feeling, it is relatively recently that the face has been studied scientifically for what it can tell us about
+<br/>internal states, social behavior, and psychopathology.
+<br/>Faces possess their own language. Beginning with Darwin and his contemporaries, extensive efforts
+<br/>have been made to manually describe this language. A leading approach, the Facial Action Coding System
+<br/>(FACS) [19] , segments the visible effects of facial muscle activation into ”action units.” Because of its
+<br/>descriptive power, FACS has become the state of the art in manual measurement of facial expression and is
+<br/>widely used in studies of spontaneous facial behavior. The FACS taxonomy was develop by manually ob-
+<br/>serving graylevel variation between expressions in images and to a lesser extent by recording the electrical
+<br/>activity of underlying facial muscles [9]. Because of its importance to human social dynamics, person per-
+<br/>ception, biological bases of behavior, extensive efforts have been made to automatically detect this language
+<br/>(i.e., facial expression) using computer vision and machine learning. In part for these reasons, much effort
+<br/>in automatic facial image analysis seeks to automatically recognize FACS action units [5, 45, 38, 42]. With
+<br/>few exceptions, previous work on facial expression has been supervised in nature (i.e. event categories are
+<br/>defined in advance in labeled training data, see [5, 45, 38, 42] for a review of state-of-the-art algorithms)
+<br/>using either FACS or emotion labels (e.g. angry, surprise, sad). Because manual coding is highly labor
+<br/>intensive, progress in automated facial image analysis has been limited by lack of sufficient training data
+<br/>especially with respect to human behavior in naturally occurring settings (as opposed to posed facial be-
+<br/>havior). Little attention has been paid to the problem of unsupervised or weakly-supervised discovery of
+<br/>facial events prior to recognition. In this proposal we question whether the reliance on supervised learning
+<br/>is necessary. Specifically, Can unsupervised or weakly-supervised learning algorithms discover useful and
+<br/>meaningful facial events in video sequences of natural occurring behavior?. Three are the main contributions
+<br/>of this proposal:
+<br/>• We ask whether unsupervised or weakly-supervised learning algorithms can discover useful and
+<br/>meaningful facial events in video sequences of one or more persons with natural occurring behavior.
+<br/>Several issues contribute to the challenge of discovery of facial events; these include the large vari-
+<br/>ability in the temporal scale and periodicity of facial expressions, illumination and fast pose changes,
+<br/>the complexity of decoupling rigid and non-rigid motion from video, the exponential nature of all
+<br/>possible facial movement combinations, and characterization of subtle facial behavior.
+<br/>• We propose two novel non-parametric algorithms for unsupervised and weakly-supervised time-series
+<br/>analysis. In preliminary experiments these algorithms were able to discover meaningful facial events
+</td><td></td><td></td></tr><tr><td>341002fac5ae6c193b78018a164d3c7295a495e4</td><td>von Mises-Fisher Mixture Model-based Deep
+<br/>learning: Application to Face Verification
+</td><td>('1773090', 'Md. Abul Hasnat', 'md. abul hasnat')<br/>('34767162', 'Jonathan Milgram', 'jonathan milgram')<br/>('34086868', 'Liming Chen', 'liming chen')</td><td></td></tr><tr><td>34ce703b7e79e3072eed7f92239a4c08517b0c55</td><td>What impacts skin color in digital photos?
+<br/><b>Advanced Digital Sciences Center, University of Illinois at Urbana-Champaign, Singapore</b></td><td>('3213946', 'Albrecht Lindner', 'albrecht lindner')<br/>('1702224', 'Stefan Winkler', 'stefan winkler')</td><td></td></tr><tr><td>345bea5f7d42926f857f395c371118a00382447f</td><td>Transfiguring Portraits
+<br/><b>Computer Science and Engineering, University of Washington</b><br/>Figure 1: Our system’s goal is to let people imagine and explore how they may look like in a different country, era, hair style, hair color, age,
+<br/>and anything else that can be queried in an image search engine. The examples above show a single input photo (left) and automatically
+<br/>synthesized appearances of the input person with ”curly hair” (top row), in ”india” (2nd row), and at ”1930” (3rd row).
+</td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')</td><td></td></tr><tr><td>34ec83c8ff214128e7a4a4763059eebac59268a6</td><td>Action Anticipation By Predicting Future
+<br/>Dynamic Images
+<br/>Australian Centre for Robotic Vision, ANU, Canberra, Australia
+</td><td>('46771280', 'Cristian Rodriguez', 'cristian rodriguez')<br/>('1688071', 'Basura Fernando', 'basura fernando')<br/>('40124570', 'Hongdong Li', 'hongdong li')</td><td>{cristian.rodriguez, basura.fernando, hongdong.li}@.anu.edu.au
+</td></tr><tr><td>3463f12ad434d256cd5f94c1c1bfd2dd6df36947</td><td>Article
+<br/>Facial Expression Recognition with Fusion Features
+<br/>Extracted from Salient Facial Areas
+<br/><b>School of Control Science and Engineering, Shandong University, Jinan 250061, China</b><br/>Academic Editors: Xue-Bo Jin; Shuli Sun; Hong Wei and Feng-Bao Yang
+<br/>Received: 23 January 2017; Accepted: 24 March 2017; Published: 29 March 2017
+</td><td>('7895427', 'Yanpeng Liu', 'yanpeng liu')<br/>('29275442', 'Yibin Li', 'yibin li')<br/>('1708045', 'Xin Ma', 'xin ma')<br/>('1772484', 'Rui Song', 'rui song')</td><td>liuyanpeng@sucro.org (Y.L.); liyb@sdu.edu.cn (Y.L.); maxin@sdu.edu.cn (X.M.)
+<br/>* Correspondence: rsong@sdu.edu.cn
+</td></tr><tr><td>346c9100b2fab35b162d7779002c974da5f069ee</td><td>Photo Search by Face Positions and Facial Attributes
+<br/>on Touch Devices
+<br/><b>National Taiwan University, Taipei, Taiwan</b></td><td>('2476032', 'Yu-Heng Lei', 'yu-heng lei')<br/>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('2817570', 'Lime Iida', 'lime iida')<br/>('33970300', 'Bor-Chun Chen', 'bor-chun chen')<br/>('1776110', 'Hsiao-Hang Su', 'hsiao-hang su')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td>{limeiida, siriushpa}@gmail.com, b95901019@ntu.edu.tw, winston@csie.ntu.edu.tw
+<br/>{ryanlei, yanying}@cmlab.csie.ntu.edu.tw,
+</td></tr><tr><td>34863ecc50722f0972e23ec117f80afcfe1411a9</td><td>An Efficient Face Recognition Algorithm Based
+<br/>on Robust Principal Component Analysis
+<br/>TNLIST and Department of Automation
+<br/><b>Tsinghua University</b><br/>Beijing, China
+</td><td>('2860279', 'Ziheng Wang', 'ziheng wang')<br/>('2842970', 'Xudong Xie', 'xudong xie')</td><td> zihengwang.thu@gmail.com, xdxie@tsinghua.edu.cn
+</td></tr><tr><td>34b7e826db49a16773e8747bc8dfa48e344e425d</td><td></td><td></td><td></td></tr><tr><td>34c594abba9bb7e5813cfae830e2c4db78cf138c</td><td>Transport-Based Single Frame Super Resolution of Very Low Resolution Face Images
+<br/><b>Carnegie Mellon University</b><br/>We describe a single-frame super-resolution method for reconstructing high-
+<br/>resolution (abbr. high-res) faces from very low-resolution (abbr. low-res)
+<br/>face images (e.g. smaller than 16× 16 pixels) by learning a nonlinear La-
+<br/>grangian model for the high-res face images. Our technique is based on the
+<br/>mathematics of optimal transport, and hence we denote it as transport-based
+<br/>SFSR (TB-SFSR). In the training phase, a nonlinear model of high-res fa-
+<br/>cial images is constructed based on transport maps that morph a reference
+<br/>image into the training face images. In the testing phase, the resolution of
+<br/>a degraded image is enhanced by finding the model parameters that best fit
+<br/>the given low resolution data.
+<br/>Generally speaking, most SFSR methods [2, 3, 4, 5] are based on a
+<br/>linear model for the high-res images. Hence, ultimately, the majority of
+<br/>SFSR models in the literature can be written as, Ih(x) = ∑i wiψi(x), where
+<br/>Ih is a high-res image or a high-res image patch, w’s are weight coefficients,
+<br/>and ψ’s are high-res images (or image patches), which are learned from the
+<br/>training images using a specific model. Here we propose a fundamentally
+<br/>different approach toward modeling high-res images. In our approach the
+<br/>high-res image is modeled as a mass preserving mapping of a high-res tem-
+<br/>plate image, I0, as follows
+<br/>Ih(x) = det(I +∑
+<br/>αiDvi(x))I0(x +∑
+<br/>αivi(x)),
+<br/>(1)
+<br/>where I is the identity matrix, αi is the weight coefficient of displacement
+<br/>field vi (i.e. a smooth vector field), and Dvi(x) is the Jacobian matrix of the
+<br/>displacement field vi, evaluated at x. The proposed method can be viewed
+<br/>as a linear modeling in the space of mass-preserving mappings, which cor-
+<br/>responds to a non-linear model in the image space. Thus (through the use of
+<br/>the optimal mapping function f(x) = x +∑i αivi(x)) our modeling approach
+<br/>can also displace pixels, in addition to changing their intensities.
+<br/>Given a training set of high-res face images, I1, ...,IN : Ω → R with
+<br/>Ω = [0,1]2 the image intensities are first normalized to integrate to 1. This
+<br/>is done so the images can be treated as distributions of a fixed amount of in-
+<br/>tensity values (i.e. fixed amount of mass). Next, the reference face is defined
+<br/>to be the average image, I0 = 1
+<br/>i=1 Ii, and the optimal transport distance
+<br/>between the reference image and the i’th training image, Ii, is defined to be,
+<br/>N ∑N
+<br/>(cid:90)
+<br/>dOT (I0,Ii) = minui
+<br/>|ui(x)|2Ii(x)dx
+<br/>s.t. det(I + Dui(x))I0(x + ui(x)) = Ii(x)
+<br/>(2)
+<br/>where (f(x) = x + u(x)) : Ω → Ω is a mass preserving transform from Ii to
+<br/>I0, u is the optimal displacement field, and Dui is the Jacobian matrix of
+<br/>u. The optimization problem above is well posed and has a unique min-
+<br/>imizer [1]. Having optimal displacement fields ui for i = 1, . . . ,N a sub-
+<br/>space, V , is learned for these displacement fields. Let v j for j = 1, ...,M
+<br/>be a basis for subspace V. Then, any combination of the basis displacement
+<br/>fields can be used to construct an arbitrary deformation field, fα (x) = x +
+<br/>∑M
+<br/>j=1 α jv j(x), which can then be used to construct a given image Iα (x) =
+<br/>det(Dfα (x))I0(fα (x)). Hence, subspace V provides a generative model for
+<br/>the high-res face image.
+<br/>In the testing phase, we constrain the space of
+<br/>possible high-res solutions to those, which are representable as Iα for some
+<br/>α ∈ RM. Hence, for a degraded input image, Il, and assuming that φ (.) is
+<br/>known and following the MAP criteria we can write,
+<br/>α∗ = argminα
+<br/>(cid:107)Il − φ (Iα )(cid:107)2
+<br/>(3)
+<br/>where a gradient descent approach is used to obtain a local optima α∗. Note
+<br/>that, images of faces (and other deformable objects) differ from each other
+<br/>s.t Iα (x) = det(Dfα (x))I0(fα (x))
+</td><td>('2062432', 'Soheil Kolouri', 'soheil kolouri')<br/>('1818350', 'Gustavo K. Rohde', 'gustavo k. rohde')</td><td></td></tr><tr><td>34108098e1a378bc15a5824812bdf2229b938678</td><td>Reconstructive Sparse Code Transfer for
+<br/>Contour Detection and Semantic Labeling
+<br/>1TTI Chicago
+<br/><b>California Institute of Technology</b><br/><b>University of California at Berkeley / ICSI</b></td><td>('1965929', 'Michael Maire', 'michael maire')<br/>('2251428', 'Stella X. Yu', 'stella x. yu')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>mmaire@ttic.edu, stellayu@berkeley.edu, perona@caltech.edu
+</td></tr><tr><td>341ed69a6e5d7a89ff897c72c1456f50cfb23c96</td><td>DAGER: Deep Age, Gender and Emotion
+<br/>Recognition Using Convolutional Neural
+<br/>Networks
+<br/>Computer Vision Lab, Sighthound Inc., Winter Park, FL
+</td><td>('1707795', 'Afshin Dehghan', 'afshin dehghan')<br/>('16131262', 'Enrique G. Ortiz', 'enrique g. ortiz')<br/>('37574860', 'Guang Shu', 'guang shu')<br/>('2234898', 'Syed Zain Masood', 'syed zain masood')</td><td>{afshindehghan, egortiz, guangshu, zainmasood}@sighthound.com
+</td></tr><tr><td>348a16b10d140861ece327886b85d96cce95711e</td><td>Finding Good Features for Object Recognition
+<br/>by
+<br/><b>B.S. (Cornell University</b><br/><b>M.S. (University of California, Berkeley</b><br/>A dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Computer Science
+<br/>in the
+<br/>GRADUATE DIVISION
+<br/>of the
+<br/><b>UNIVERSITY OF CALIFORNIA, BERKELEY</b><br/>Committee in charge:
+<br/>Professor Jitendra Malik, Chair
+<br/>Spring 2005
+</td><td>('3236352', 'Andras David Ferencz', 'andras david ferencz')<br/>('1744452', 'David A. Forsyth', 'david a. forsyth')<br/>('1678771', 'Peter J. Bickel', 'peter j. bickel')</td><td></td></tr><tr><td>3419af6331e4099504255a38de6f6b7b3b1e5c14</td><td>Modified Eigenimage Algorithm for Painting
+<br/>Image Retrieval
+<br/><b>Stanford University</b><br/>
+</td><td>('12833413', 'Qun Feng Tan', 'qun feng tan')</td><td></td></tr><tr><td>34c8de02a5064e27760d33b861b7e47161592e65</td><td>Video Action Recognition based on Deeper Convolution Networks with
+<br/>Pair-Wise Frame Motion Concatenation
+<br/><b>School of Computer Science, Northwestern Polytechnical University, China</b><br/><b>Sensor-enhanced Social Media (SeSaMe) Centre, National University of Singapore, Singapore</b><br/><b>School of Information Engineering, Nanchang University, China</b></td><td>('9229148', 'Yamin Han', 'yamin han')<br/>('40188000', 'Peng Zhang', 'peng zhang')<br/>('2628886', 'Tao Zhuo', 'tao zhuo')<br/>('1730584', 'Wei Huang', 'wei huang')<br/>('1801395', 'Yanning Zhang', 'yanning zhang')</td><td></td></tr><tr><td>340d1a9852747b03061e5358a8d12055136599b0</td><td>Audio-Visual Recognition System Insusceptible
+<br/>to Illumination Variation over Internet Protocol
+<br/>
+</td><td>('1968167', 'Yee Wan Wong', 'yee wan wong')</td><td></td></tr><tr><td>34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>A Virtual Assistant to Help Dysphagia Patients Eat Safely at Home
+<br/><b>SRI International, Menlo Park California / *Brooklyn College, Brooklyn New York</b><br/>
+</td><td>('6647218', 'Michael Freed', 'michael freed')<br/>('1936842', 'Brian Burns', 'brian burns')<br/>('39451362', 'Aaron Heller', 'aaron heller')<br/>('3431324', 'Sharon Beaumont-Bowman', 'sharon beaumont-bowman')</td><td>{first name, last name}@sri.com, sharonb@brooklyn.cuny.edu
+</td></tr><tr><td>34b42bcf84d79e30e26413f1589a9cf4b37076f9</td><td>Learning Sparse Representations of High
+<br/>Dimensional Data on Large Scale Dictionaries
+<br/><b>Princeton University</b><br/>Princeton, NJ 08544, USA
+</td><td>('1730249', 'Zhen James Xiang', 'zhen james xiang')<br/>('1693135', 'Peter J. Ramadge', 'peter j. ramadge')</td><td>{zxiang,haoxu,ramadge}@princeton.edu
+</td></tr><tr><td>5a3da29970d0c3c75ef4cb372b336fc8b10381d7</td><td>CNN-based Real-time Dense Face Reconstruction
+<br/>with Inverse-rendered Photo-realistic Face Images
+</td><td>('8280113', 'Yudong Guo', 'yudong guo')<br/>('2938279', 'Juyong Zhang', 'juyong zhang')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')<br/>('15679675', 'Boyi Jiang', 'boyi jiang')<br/>('48510441', 'Jianmin Zheng', 'jianmin zheng')</td><td></td></tr><tr><td>5a93f9084e59cb9730a498ff602a8c8703e5d8a5</td><td>HUSSAIN ET. AL: FACE RECOGNITION USING LOCAL QUANTIZED PATTERNS
+<br/>Face Recognition using Local Quantized
+<br/>Patterns
+<br/>Fréderic Jurie
+<br/>GREYC — CNRS UMR 6072,
+<br/><b>University of Caen Basse-Normandie</b><br/>Caen, France
+</td><td>('2695106', 'Sibt ul Hussain', 'sibt ul hussain')<br/>('3423479', 'Thibault Napoléon', 'thibault napoléon')</td><td>Sibt.ul.Hussain@gmail.com
+<br/>Thibault.Napoleon@unicaen.fr
+<br/>Frederic.Jurie@unicaen.fr
+</td></tr><tr><td>5a87bc1eae2ec715a67db4603be3d1bb8e53ace2</td><td>A Novel Convergence Scheme for Active Appearance Models
+<br/>School of Electrical and Computer Engineering
+<br/><b>Georgia Institute of Technology</b><br/>Atlanta, GA 30332
+</td><td>('38410822', 'Aziz Umit Batur', 'aziz umit batur')<br/>('2583044', 'Monson H. Hayes', 'monson h. hayes')</td><td>{batur, mhh3}@ece.gatech.edu
+</td></tr><tr><td>5aad56cfa2bac5d6635df4184047e809f8fecca2</td><td>A VISUAL DICTIONARY ATTACK ON PICTURE PASSWORDS
+<br/><b>Cornell University</b></td><td>('1803066', 'Amir Sadovnik', 'amir sadovnik')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td></td></tr><tr><td>5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0</td><td>Available online at www.sciencedirect.com
+<br/>ScienceDirect
+<br/> Procedia Computer Science 87 ( 2016 ) 300 – 305
+<br/>4th International Conference on Recent Trends in Computer Science &Engineering
+<br/>Automatic Frontal Face Reconstruction Approach for Pose Invariant Face
+<br/>Recognition
+<br/>Kavitha.Ja,Mirnalinee.T.Tb
+<br/><b>aResearch Scholar, Anna University, Chennai, Inida</b><br/><b>SSN College of Engineering, Kalavakkam, Tamil Nadu, India</b></td><td></td><td></td></tr><tr><td>5ac80e0b94200ee3ecd58a618fe6afd077be0a00</td><td>Unifying Geometric Features and Facial Action Units for Improved
+<br/>Performance of Facial Expression Analysis
+<br/><b>Kent State University</b><br/>Keywords:
+<br/>Facial Action Unit, Facial Expression, Geometric features.
+</td><td>('1688430', 'Mehdi Ghayoumi', 'mehdi ghayoumi')</td><td>{mghayoum,akbansal}@kent.edu
+</td></tr><tr><td>5aadd85e2a77e482d44ac2a215c1f21e4a30d91b</td><td>Face Recognition using Principle Components and Linear
+<br/>Discriminant Analysis
+<br/>HATIM A.
+<br/>ABOALSAMH 1,2
+<br/>HASSAN I.
+<br/>MATHKOUR 1,2
+<br/>GHAZY M.R.
+<br/>ASSASSA 1,2
+<br/>MONA F.M.
+<br/>MURSI 1,3
+<br/>1 Center of Excellence in Information Assurance (CoEIA),
+<br/>2 Department of Computer Science
+<br/>3 Department of Information Technology
+<br/><b>College of Computer and Information Sciences</b><br/><b>King Saud University, Riyadh</b><br/>SAUDI ARABIA
+</td><td></td><td>hatim@ksu.edu.sa
+<br/>mathkour@ksu.edu.sa
+<br/>gassassa@coeia.edu.sa
+<br/>monmursi@coeia.edu.sa
+</td></tr><tr><td>5a34a9bb264a2594c02b5f46b038aa1ec3389072</td><td>Label-Embedding for Image Classification
+</td><td>('2893664', 'Zeynep Akata', 'zeynep akata')<br/>('1723883', 'Florent Perronnin', 'florent perronnin')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>5a5f9e0ed220ce51b80cd7b7ede22e473a62062c</td><td>Videos as Space-Time Region Graphs
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/>Figure 1. How do you recognize simple actions such as opening book? We argue action
+<br/>understanding requires appearance modeling but also capturing temporal dynamics
+<br/>(how shape of book changes) and functional relationships. We propose to represent
+<br/>videos as space-time region graphs followed by graph convolutions for inference.
+</td><td>('39849136', 'Xiaolong Wang', 'xiaolong wang')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td></td></tr><tr><td>5ac946fc6543a445dd1ee6d5d35afd3783a31353</td><td>FEATURELESS: BYPASSING FEATURE EXTRACTION IN ACTION CATEGORIZATION
+<br/>S. L. Pinteaa, P. S. Mettesa
+<br/>J. C. van Gemerta,b, A. W. M. Smeuldersa
+<br/>aIntelligent Sensory Information Systems,
+<br/><b>University of Amsterdam</b><br/>Amsterdam, Netherlands
+</td><td></td><td></td></tr><tr><td>5a4c6246758c522f68e75491eb65eafda375b701</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>1118
+<br/>ICASSP 2010
+</td><td></td><td></td></tr><tr><td>5aad5e7390211267f3511ffa75c69febe3b84cc7</td><td>Driver Gaze Estimation
+<br/>Without Using Eye Movement
+<br/>MIT AgeLab
+</td><td>('2145054', 'Lex Fridman', 'lex fridman')<br/>('2180983', 'Philipp Langhans', 'philipp langhans')<br/>('7137846', 'Joonbum Lee', 'joonbum lee')<br/>('1901227', 'Bryan Reimer', 'bryan reimer')</td><td>fridman@mit.edu, philippl@mit.edu, joonbum@mit.edu, reimer@mit.edu
+</td></tr><tr><td>5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372</td><td></td><td></td><td></td></tr><tr><td>5ae970294aaba5e0225122552c019eb56f20af74</td><td>International Journal of Computer and Electrical Engineering
+<br/>Establishing Dense Correspondence of High Resolution 3D
+<br/>Faces via Möbius Transformations
+<br/><b>College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China</b><br/>Manuscript submitted July 14, 2014; accepted November 2, 2014.
+<br/>doi: 10. 17706/ijcee.2014.v6.866
+</td><td>('30373915', 'Jian Liu', 'jian liu')<br/>('37509862', 'Quan Zhang', 'quan zhang')<br/>('3224964', 'Chaojing Tang', 'chaojing tang')</td><td>* Corresponding author. Email: cjtang@263.net
+</td></tr><tr><td>5a86842ab586de9d62d5badb2ad8f4f01eada885</td><td>International Journal of Engineering Research and General Science Volume 3, Issue 3, May-June, 2015
+<br/>ISSN 2091-2730
+<br/>Facial Emotion Recognition and Classification Using Hybridization
+<br/>Method
+<br/><b>Chandigarh Engg. College, Mohali, Punjab, India</b></td><td>('6010530', 'Anchal Garg', 'anchal garg')<br/>('9744572', 'Rohit Bajaj', 'rohit bajaj')</td><td>anchalgarg949@gmail.com, 07696449500
+</td></tr><tr><td>5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>AutomaticageandgenderclassificationusingsupervisedappearancemodelAliMainaBukarHassanUgailDavidConnahAliMainaBukar,HassanUgail,DavidConnah,“Automaticageandgenderclassificationusingsupervisedappearancemodel,”J.Electron.Imaging25(6),061605(2016),doi:10.1117/1.JEI.25.6.061605. </td><td></td><td></td></tr><tr><td>5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0</td><td>The International Arab Journal of Information Technology, Vol. 11, No. 2, March 2014
+<br/>
+<br/>149
+<br/>
+<br/>Face Recognition using Adaptive Margin Fisher’s
+<br/>Criterion and Linear Discriminant Analysis
+<br/>
+<br/>(AMFC-LDA)
+<br/><b>COMSATS Institute of Information Technology, Pakistan</b></td><td>('2151799', 'Marryam Murtaza', 'marryam murtaza')<br/>('33088042', 'Muhammad Sharif', 'muhammad sharif')<br/>('36739230', 'Mudassar Raza', 'mudassar raza')<br/>('1814986', 'Jamal Hussain Shah', 'jamal hussain shah')</td><td></td></tr><tr><td>5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6</td><td>International Journal of Computer Applications® (IJCA) (0975 – 8887)
+<br/>International Conference on Knowledge Collaboration in Engineering, ICKCE-2014
+<br/>Human Expression Recognition using Facial Features
+<br/>G.Saranya
+<br/>Post graduate student, Dept. of ECE
+<br/><b>Parisutham Institute of Technology and Science</b><br/>Thanjavur.
+<br/><b>Affiliated to Anna university, Chennai</b><br/>recognition can be used
+</td><td></td><td></td></tr><tr><td>5a7520380d9960ff3b4f5f0fe526a00f63791e99</td><td>The Indian Spontaneous Expression
+<br/>Database for Emotion Recognition
+</td><td>('38657440', 'Priyadarshi Patnaik', 'priyadarshi patnaik')<br/>('2680543', 'Aurobinda Routray', 'aurobinda routray')<br/>('2730256', 'Rajlakshmi Guha', 'rajlakshmi guha')</td><td></td></tr><tr><td>5a07945293c6b032e465d64f2ec076b82e113fa6</td><td>Pulling Actions out of Context: Explicit Separation for Effective Combination
+<br/><b>Stony Brook University, Stony Brook, NY 11794, USA</b></td><td>('50874742', 'Yang Wang', 'yang wang')</td><td>{wang33, minhhoai}@cs.stonybrook.edu
+</td></tr><tr><td>5fff61302adc65d554d5db3722b8a604e62a8377</td><td>Additive Margin Softmax for Face Verification
+<br/>UESTC
+<br/>Georgia Tech
+<br/>UESTC
+<br/>UESTC
+</td><td>('47939378', 'Feng Wang', 'feng wang')<br/>('51094998', 'Weiyang Liu', 'weiyang liu')<br/>('8424682', 'Haijun Liu', 'haijun liu')<br/>('1709439', 'Jian Cheng', 'jian cheng')</td><td>feng.wff@gmail.com
+<br/>wyliu@gatech.edu
+<br/>haijun liu@126.com
+<br/>chengjian@uestc.edu.cn
+</td></tr><tr><td>5f771fed91c8e4b666489ba2384d0705bcf75030</td><td>Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning
+<br/>and A New Benchmark for Multi-Human Parsing
+<br/><b>National University of Singapore</b><br/><b>National University of Defense Technology</b><br/><b>Qihoo 360 AI Institute</b></td><td>('46509484', 'Jian Zhao', 'jian zhao')<br/>('2757639', 'Jianshu Li', 'jianshu li')<br/>('48207454', 'Li Zhou', 'li zhou')<br/>('1715286', 'Terence Sim', 'terence sim')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td>chengyu996@gmail.com zhouli2025@gmail.com
+<br/>{eleyans, elefjia}@nus.edu.sg
+<br/>{zhaojian90, jianshu}@u.nus.edu
+<br/>tsim@comp.nus.edu.sg
+</td></tr><tr><td>5fa04523ff13a82b8b6612250a39e1edb5066521</td><td>Dockerface: an Easy to Install and Use Faster R-CNN Face Detector in a Docker
+<br/>Container
+<br/>Center for Behavioral Imaging
+<br/><b>College of Computing</b><br/><b>Georgia Institute of Technology</b></td><td>('31601235', 'Nataniel Ruiz', 'nataniel ruiz')<br/>('1692956', 'James M. Rehg', 'james m. rehg')</td><td>nataniel.ruiz@gatech.edu
+<br/>rehg@gatech.edu
+</td></tr><tr><td>5fa6e4a23da0b39e4b35ac73a15d55cee8608736</td><td>IJCV special issue (Best papers of ECCV 2016) manuscript No.
+<br/>(will be inserted by the editor)
+<br/>RED-Net:
+<br/>A Recurrent Encoder-Decoder Network for Video-based Face Alignment
+<br/>Submitted: April 19 2017 / Revised: December 12 2017
+</td><td>('4340744', 'Xi Peng', 'xi peng')</td><td></td></tr><tr><td>5f871838710a6b408cf647aacb3b198983719c31</td><td>1716
+<br/>Locally Linear Regression for Pose-Invariant
+<br/>Face Recognition
+</td><td>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td></td></tr><tr><td>5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9</td><td></td><td></td><td></td></tr><tr><td>5f344a4ef7edfd87c5c4bc531833774c3ed23542</td><td>c
+</td><td></td><td></td></tr><tr><td>5f6ab4543cc38f23d0339e3037a952df7bcf696b</td><td>Video2Vec: Learning Semantic Spatial-Temporal
+<br/>Embeddings for Video Representation
+<br/>School of Computer Engineering
+<br/>School of Electrical Engineering
+<br/>School of Computer Science
+<br/><b>Arizona State University</b><br/>Tempe, Arizona 85281
+<br/><b>Arizona State University</b><br/>Tempe, Arizona 85281
+<br/><b>Arizona State University</b><br/>Tempe, Arizona 85281
+</td><td>('8060096', 'Sheng-hung Hu', 'sheng-hung hu')<br/>('2180892', 'Yikang Li', 'yikang li')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td>Email:shenghun@asu.edu
+<br/>Email:yikangli@asu.edu
+<br/>Email:Baoxin.Li@asu.edu
+</td></tr><tr><td>5f7c4c20ae2731bfb650a96b69fd065bf0bb950e</td><td>Turk J Elec Eng & Comp Sci
+<br/>(2016) 24: 1797 { 1814
+<br/>c⃝ T (cid:127)UB_ITAK
+<br/>doi:10.3906/elk-1310-253
+<br/>A new fuzzy membership assignment and model selection approach based on
+<br/>dynamic class centers for fuzzy SVM family using the (cid:12)re(cid:13)y algorithm
+<br/><b>Young Researchers and Elite Club, Mashhad Branch, Islamic Azad University, Mashhad, Iran</b><br/><b>Faculty of Engineering, Ferdowsi University, Mashhad, Iran</b><br/>Received: 01.11.2013
+<br/>(cid:15)
+<br/>Accepted/Published Online: 30.06.2014
+<br/>(cid:15)
+<br/>Final Version: 23.03.2016
+</td><td>('9437627', 'Omid Naghash Almasi', 'omid naghash almasi')<br/>('4945660', 'Modjtaba Rouhani', 'modjtaba rouhani')</td><td></td></tr><tr><td>5f94969b9491db552ffebc5911a45def99026afe</td><td>Multimodal Learning and Reasoning for Visual
+<br/>Question Answering
+<br/>Integrative Sciences and Engineering
+<br/><b>National University of Singapore</b><br/>Electrical and Computer Engineering
+<br/><b>National University of Singapore</b></td><td>('3393294', 'Ilija Ilievski', 'ilija ilievski')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td>ilija.ilievski@u.nus.edu
+<br/>elefjia@nus.edu.sg
+</td></tr><tr><td>5f758a29dae102511576c0a5c6beda264060a401</td><td>Fine-grained Video Attractiveness Prediction Using Multimodal
+<br/>Deep Learning on a Large Real-world Dataset
+<br/><b>Wuhan University, Tencent AI Lab, National University of Singapore, University of Rochester</b></td><td>('3179887', 'Xinpeng Chen', 'xinpeng chen')<br/>('47740660', 'Jingyuan Chen', 'jingyuan chen')<br/>('34264361', 'Lin Ma', 'lin ma')<br/>('1849993', 'Jian Yao', 'jian yao')<br/>('46641573', 'Wei Liu', 'wei liu')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('38144094', 'Tong Zhang', 'tong zhang')</td><td></td></tr><tr><td>5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a</td><td></td><td></td><td></td></tr><tr><td>5feb1341a49dd7a597f4195004fe9b59f67e6707</td><td>A Deep Ranking Model for Spatio-Temporal Highlight Detection
+<br/>from a 360◦ Video
+<br/><b>Seoul National University</b></td><td>('7877122', 'Youngjae Yu', 'youngjae yu')<br/>('1693291', 'Sangho Lee', 'sangho lee')<br/>('35272603', 'Joonil Na', 'joonil na')<br/>('35365676', 'Jaeyun Kang', 'jaeyun kang')<br/>('1743920', 'Gunhee Kim', 'gunhee kim')</td><td>{yj.yu, sangho.lee, joonil}@vision.snu.ac.kr, {kjy13411}@gmail.com, gunhee@snu.ac.kr
+</td></tr><tr><td>5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b</td><td>CBMM Memo No. 85
+<br/>06/2018
+<br/>Deep Regression Forests for Age Estimation
+<br/><b>Key Laboratory of Specialty Fiber Optics and Optical Access Networks, Shanghai University</b><br/><b>Johns Hopkins University</b><br/><b>College of Computer and Control Engineering, Nankai University 4: Hikvision Research</b></td><td>('41187410', 'Wei Shen', 'wei shen')<br/>('9544564', 'Yilu Guo', 'yilu guo')<br/>('46394340', 'Yan Wang', 'yan wang')<br/>('1681247', 'Kai Zhao', 'kai zhao')<br/>('46172451', 'Bo Wang', 'bo wang')<br/>('35922327', 'Alan Yuille', 'alan yuille')</td><td></td></tr><tr><td>5f57a1a3a1e5364792b35e8f5f259f92ad561c1f</td><td>Implicit Sparse Code Hashing
+<br/><b>Institute of Information Science</b><br/>Academia Sinica, Taiwan
+</td><td>('2144284', 'Tsung-Yu Lin', 'tsung-yu lin')<br/>('2301765', 'Tsung-Wei Ke', 'tsung-wei ke')<br/>('1805102', 'Tyng-Luh Liu', 'tyng-luh liu')</td><td></td></tr><tr><td>5f27ed82c52339124aa368507d66b71d96862cb7</td><td>Semi-supervised Learning of Classifiers: Theory, Algorithms
+<br/>and Their Application to Human-Computer Interaction
+<br/>This work has been partially funded by NSF Grant IIS 00-85980.
+<br/>DRAFT
+</td><td>('1774778', 'Ira Cohen', 'ira cohen')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>Ira Cohen: Hewlett-Packard Labs, Palo Alto, CA, USA, ira.cohen@hp.com
+<br/>Fabio G. Cozman and Marcelo C. Cirelo: Escola Polit´ecnica, Universidade de S˜ao Paulo, S˜ao Paulo,Brazil. fgcozman@usp.br,
+<br/>marcelo.cirelo@poli.usp.br
+<br/>Nicu Sebe: Faculty of Science, University of Amsterdam, The Netherlands. nicu@science.uva.nl
+<br/>Thomas S. Huang: Beckman Institute, University of Illinois at Urbana-Champaign, USA. huang@ifp.uiuc.edu
+</td></tr><tr><td>5fa932be4d30cad13ea3f3e863572372b915bec8</td><td></td><td></td><td></td></tr><tr><td>5fea26746f3140b12317fcf3bc1680f2746e172e</td><td>Dense Supervision for Visual Comparisons via Synthetic Images
+<br/>Semantic Jitter:
+<br/><b>University of Texas at Austin</b><br/><b>University of Texas at Austin</b><br/>Distinguishing subtle differences in attributes is valuable, yet
+<br/>learning to make visual comparisons remains non-trivial. Not
+<br/>only is the number of possible comparisons quadratic in the
+<br/>number of training images, but also access to images adequately
+<br/>spanning the space of fine-grained visual differences is limited.
+<br/>We propose to overcome the sparsity of supervision problem
+<br/>via synthetically generated images. Building on a state-of-the-
+<br/>art image generation engine, we sample pairs of training images
+<br/>exhibiting slight modifications of individual attributes. Augment-
+<br/>ing real training image pairs with these examples, we then train
+<br/>attribute ranking models to predict the relative strength of an
+<br/>attribute in novel pairs of real images. Our results on datasets of
+<br/>faces and fashion images show the great promise of bootstrapping
+<br/>imperfect image generators to counteract sample sparsity for
+<br/>learning to rank.
+<br/>INTRODUCTION
+<br/>Fine-grained analysis of images often entails making visual
+<br/>comparisons. For example, given two products in a fashion
+<br/>catalog, a shopper may judge which shoe appears more pointy
+<br/>at the toe. Given two selfies, a teen may gauge in which one he
+<br/>is smiling more. Given two photos of houses for sale on a real
+<br/>estate website, a home buyer may analyze which facade looks
+<br/>better maintained. Given a series of MRI scans, a radiologist
+<br/>may judge which pair exhibits the most shape changes.
+<br/>In these and many other such cases, we are interested in
+<br/>inferring how a pair of images compares in terms of a par-
+<br/>ticular property, or “attribute”. That is, which is more pointy,
+<br/>smiling, well-maintained, etc. Importantly, the distinctions of
+<br/>interest are often quite subtle. Subtle comparisons arise both
+<br/>in image pairs that are very similar in almost every regard
+<br/>(e.g., two photos of the same individual wearing the same
+<br/>clothing, yet smiling more in one photo than the other), as
+<br/>well as image pairs that are holistically different yet exhibit
+<br/>only slight differences in the attribute in question (e.g., two
+<br/>individuals different in appearance, and one is smiling slightly
+<br/>more than the other).
+<br/>A growing body of work explores computational models
+<br/>for visual comparisons [1], [2], [3], [4], [5], [6], [7], [8], [9],
+<br/>[10], [11], [12]. In particular, ranking models for “relative
+<br/>attributes” [2], [3], [4], [5], [9], [11] use human-ordered pairs
+<br/>of images to train a system to predict the relative ordering in
+<br/>novel image pairs.
+<br/>A major challenge in training a ranking model is the sparsity
+<br/>of supervision. That sparsity stems from two factors: label
+<br/>availability and image availability. Because training instances
+<br/>consist of pairs of images—together with the ground truth
+<br/>human judgment about which exhibits the property more
+<br/>Fig. 1: Our method “densifies” supervision for training ranking functions to
+<br/>make visual comparisons, by generating ordered pairs of synthetic images.
+<br/>Here, when learning the attribute smiling, real training images need not be
+<br/>representative of the entire attribute space (e.g., Web photos may cluster
+<br/>around commonly photographed expressions, like toothy smiles). Our idea
+<br/>“fills in” the sparsely sampled regions to enable fine-grained supervision.
+<br/>Given a novel pair (top), the nearest synthetic pairs (right) may present better
+<br/>training data than the nearest real pairs (left).
+<br/>or less—the space of all possible comparisons is quadratic
+<br/>in the number of potential
+<br/>training images. This quickly
+<br/>makes it intractable to label an image collection exhaustively
+<br/>for its comparative properties. At the same time, attribute
+<br/>comparisons entail a greater cognitive load than, for example,
+<br/>object category labeling. Indeed, the largest existing relative
+<br/>attribute datasets sample only less than 0.1% of all image pairs
+<br/>for ground truth labels [11], and there is a major size gap
+<br/>between standard datasets labeled for classification (now in
+<br/>the millions [13]) and those for comparisons (at best in the
+<br/>thousands [11]). A popular shortcut is to propagate category-
+<br/>level comparisons down to image instances [4], [14]—e.g.,
+<br/>deem all ocean scenes as “more open” than all forest scenes—
+<br/>but
+<br/>label noise and in practice
+<br/>underperforms training with instance-level comparisons [2].
+<br/>this introduces substantial
+<br/>Perhaps more insidious than the annotation cost, however,
+<br/>is the problem of even curating training images that suf-
+<br/>ficiently illustrate fine-grained differences. Critically, sparse
+<br/>supervision arises not simply because 1) we lack resources
+<br/>to get enough image pairs labeled, but also because 2) we
+<br/>lack a direct way to curate photos demonstrating all sorts
+<br/>of subtle attribute changes. For example, how might we
+<br/>gather unlabeled image pairs depicting all subtle differences
+<br/>Novel PairReal PairsSynthetic Pairsvs. </td><td>('2206630', 'Aron Yu', 'aron yu')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>aron.yu@utexas.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>5f5906168235613c81ad2129e2431a0e5ef2b6e4</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Unified Framework for Compositional Fitting of
+<br/>Active Appearance Models
+<br/>Received: date / Accepted: date
+</td><td>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')</td><td></td></tr><tr><td>5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c</td><td>Multiple Facial Attributes Estimation based on
+<br/>Weighted Heterogeneous Learning
+<br/>H.Fukui* T.Yamashita* Y.Kato* R.Matsui*
+<br/><b>Chubu University</b><br/>**Abeja Inc.
+<br/>1200, Matuoto-cho, Kasugai,
+<br/>4-1-20, Toranomon, Minato-ku,
+<br/>Aichi, Japan
+<br/>Tokyo, Japan
+</td><td>('2531207', 'T. Ogata', 't. ogata')</td><td></td></tr><tr><td>5f676d6eca4c72d1a3f3acf5a4081c29140650fb</td><td>To Skip or not to Skip? A Dataset of Spontaneous Affective Response
+<br/>of Online Advertising (SARA) for Audience Behavior Analysis
+<br/><b>College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China</b><br/><b>BRIC, University of North Carolina at Chapel Hill, NC 27599, USA</b><br/>3 HP Labs, Palo Alto, CA 94304, USA
+<br/><b>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</b></td><td>('1803478', 'Songfan Yang', 'songfan yang')<br/>('39776603', 'Le An', 'le an')<br/>('1784929', 'Mehran Kafai', 'mehran kafai')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td>syang@scu.edu.cn, lan004@unc.edu, mehran.kafai@hp.com, bhanu@cris.ucr.edu
+</td></tr><tr><td>5f453a35d312debfc993d687fd0b7c36c1704b16</td><td><b>Clemson University</b><br/>TigerPrints
+<br/>All Theses
+<br/>12-2015
+<br/>Theses
+<br/>A Training Assistant Tool for the Automated Visual
+<br/>Inspection System
+<br/>Follow this and additional works at: http://tigerprints.clemson.edu/all_theses
+<br/>Part of the Electrical and Computer Engineering Commons
+<br/>Recommended Citation
+<br/>Ramaraj, Mohan Karthik, "A Training Assistant Tool for the Automated Visual Inspection System" (2015). All Theses. Paper 2285.
+<br/>This Thesis is brought to you for free and open access by the Theses at TigerPrints. It has been accepted for inclusion in All Theses by an authorized
+</td><td>('4154752', 'Mohan Karthik Ramaraj', 'mohan karthik ramaraj')</td><td>Clemson University, rmohankarthik91@gmail.com
+<br/>administrator of TigerPrints. For more information, please contact awesole@clemson.edu.
+</td></tr><tr><td>5fc664202208aaf01c9b62da5dfdcd71fdadab29</td><td>arXiv:1504.05308v1 [cs.CV] 21 Apr 2015
+</td><td></td><td></td></tr><tr><td>5fac62a3de11125fc363877ba347122529b5aa50</td><td>AMTnet: Action-Micro-Tube Regression by
+<br/>End-to-end Trainable Deep Architecture
+<br/><b>Oxford Brookes University, Oxford, United Kingdom</b></td><td>('3017538', 'Suman Saha', 'suman saha')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')<br/>('1931660', 'Gurkirt Singh', 'gurkirt singh')</td><td>{suman.saha-2014, gurkirt.singh-2015, fabio.cuzzolin}@brookes.ac.uk
+</td></tr><tr><td>5fa1724a79a9f7090c54925f6ac52f1697d6b570</td><td>Proceedings of the Workshop on Grammar and Lexicon: Interactions and Interfaces,
+<br/>pages 41–47, Osaka, Japan, December 11 2016.
+<br/>41
+</td><td></td><td></td></tr><tr><td>5fba1b179ac80fee80548a0795d3f72b1b6e49cd</td><td>Virtual U: Defeating Face Liveness Detection by Building Virtual Models
+<br/>From Your Public Photos
+<br/><b>University of North Carolina at Chapel Hill</b></td><td>('1734114', 'Yi Xu', 'yi xu')<br/>('39310157', 'True Price', 'true price')<br/>('40454588', 'Jan-Michael Frahm', 'jan-michael frahm')<br/>('1792232', 'Fabian Monrose', 'fabian monrose')</td><td>{yix, jtprice, jmf, fabian}@cs.unc.edu
+</td></tr><tr><td>33f7e78950455c37236b31a6318194cfb2c302a4</td><td>Parameterizing Object Detectors
+<br/>in the Continuous Pose Space
+<br/><b>Boston University, USA</b><br/>2 Disney Research Pittsburgh, USA
+</td><td>('1702188', 'Kun He', 'kun he')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')</td><td>{hekun,sclaroff}@cs.bu.edu, lsigal@disneyresearch.com
+</td></tr><tr><td>33548531f9ed2ce6f87b3a1caad122c97f1fd2e9</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 104 – No.2, October 2014
+<br/>Facial Expression Recognition in Video using
+<br/>Adaboost and SVM
+<br/>Surabhi Prabhakar
+<br/>Department of CSE
+<br/><b>Amity University</b><br/>Noida, India
+<br/>Jaya Sharma
+<br/>Shilpi Gupta
+<br/>Department of CSE
+<br/>Department of CSE
+<br/><b>Amity University</b><br/>Noida, India
+<br/><b>Amity University</b><br/>Noida, India
+</td><td></td><td></td></tr><tr><td>33ac7fd3a622da23308f21b0c4986ae8a86ecd2b</td><td>Building an On-Demand Avatar-Based Health Intervention for Behavior Change
+<br/>School of Computing and Information Sciences
+<br/><b>Florida International University</b><br/>Miami, FL, 33199, USA
+<br/>Department of Computer Science
+<br/><b>University of Miami</b><br/>Coral Gables, FL, 33146, USA
+</td><td>('2671668', 'Ugan Yasavur', 'ugan yasavur')<br/>('2782570', 'Claudia de Leon', 'claudia de leon')<br/>('1809087', 'Reza Amini', 'reza amini')<br/>('1765935', 'Ubbo Visser', 'ubbo visser')</td><td></td></tr><tr><td>33030c23f6e25e30b140615bb190d5e1632c3d3b</td><td>Toward a General Framework for Words and
+<br/>Pictures
+<br/><b>Stony Brook University</b><br/><b>Stony Brook University</b><br/>Hal Daum´e III
+<br/><b>University of Maryland</b><br/>Jesse Dodge
+<br/><b>University of Washington</b><br/><b>University of Maryland</b><br/><b>Stony Brook University</b><br/>Alyssa Mensch
+<br/>M.I.T.
+<br/><b>University of Aberdeen</b><br/>Karl Stratos
+<br/><b>Columbia University</b><br/><b>Stony Brook University</b></td><td>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('2694557', 'Amit Goyal', 'amit goyal')<br/>('1682965', 'Xufeng Han', 'xufeng han')<br/>('38390487', 'Margaret Mitchell', 'margaret mitchell')<br/>('1721910', 'Kota Yamaguchi', 'kota yamaguchi')</td><td></td></tr><tr><td>33ba256d59aefe27735a30b51caf0554e5e3a1df</td><td>Early Active Learning via Robust
+<br/>Representation and Structured Sparsity
+<br/>†Department of Computer Science and Engineering
+<br/><b>University of Texas at Arlington, Arlington, Texas 76019, USA</b><br/>‡Department of Electrical Engineering and Computer Science
+<br/>Colorado School of Mines, Golden, Colorado 80401, USA
+</td><td>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1683402', 'Hua Wang', 'hua wang')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>feipingnie@gmail.com, huawangcs@gmail.com, heng@uta.edu, chqding@uta.edu
+</td></tr><tr><td>33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13</td><td><b>Imperial College London</b><br/>Department of Computing
+<br/>Machine Learning Techniques
+<br/>for Automated Analysis of Facial
+<br/>Expressions
+<br/>December, 2013
+<br/>Supervised by Prof. Maja Pantic
+<br/>Submitted in part fulfilment of the requirements for the degree of PhD in Computing and
+<br/><b>the Diploma of Imperial College London. This thesis is entirely my own work, and, except</b><br/>where otherwise indicated, describes my own research.
+</td><td>('1729713', 'Ognjen Rudovic', 'ognjen rudovic')</td><td></td></tr><tr><td>33aff42530c2fd134553d397bf572c048db12c28</td><td>From Emotions to Action Units with Hidden and Semi-Hidden-Task Learning
+<br/>Universitat Pompeu Fabra
+<br/>Centre de Visio per Computador
+<br/>Universitat Pompeu Fabra
+<br/>Barcelona
+<br/>Barcelona
+<br/>Barcelona
+</td><td>('40097226', 'Adria Ruiz', 'adria ruiz')<br/>('2820687', 'Joost van de Weijer', 'joost van de weijer')<br/>('1692494', 'Xavier Binefa', 'xavier binefa')</td><td>adria.ruiz@upf.es
+<br/>joost@cvc.uab.es
+<br/>xavier.binefa@upf.es
+</td></tr><tr><td>33a1a049d15e22befc7ddefdd3ae719ced8394bf</td><td>FULL PAPER
+<br/> International Journal of Recent Trends in Engineering, Vol 2, No. 1, November 2009
+<br/>An Efficient Approach to Facial Feature Detection
+<br/>for Expression Recognition
+<br/>S.P. Khandait1, P.D. Khandait2 and Dr.R.C.Thool2
+<br/>1Deptt. of Info.Tech., K.D.K.C.E., Nagpur, India
+<br/> 2Deptt.of Electronics Engg., K.D.K.C.E., Nagpur, India, 2Deptt. of Info.Tech., SGGSIET, Nanded
+</td><td></td><td>Prapti_khandait@yahoo.co.in
+<br/>prabhakark_117@yahoo.co.in , rcthool@yahoo.com,
+</td></tr><tr><td>334e65b31ad51b1c1f84ce12ef235096395f1ca7</td><td>Emotion in Human-Computer Interaction
+<br/>Emotion in Human-Computer Interaction
+<br/>Brave, S. & Nass, C. (2002). Emotion in human-computer interaction. In J. Jacko & A.
+<br/>Sears (Eds.), Handbook of human-computer interaction (pp. 251-271). Hillsdale, NJ:
+<br/>Lawrence Erlbaum Associates.
+<br/>Scott Brave and Clifford Nass
+<br/>Department of Communication
+<br/><b>Stanford University</b><br/>Stanford, CA 94305-2050
+<br/>Phone: 650-428-1805,650-723-5499
+<br/>Fax: 650-725-2472
+</td><td></td><td>brave,nass@stanford.edu
+</td></tr><tr><td>3328413ee9944de1cc7c9c1d1bf2fece79718ba1</td><td>Co-Training of Audio and Video Representations
+<br/>from Self-Supervised Temporal Synchronization
+<br/><b>Dartmouth College</b><br/>Facebook Research
+<br/><b>Dartmouth College</b></td><td>('3443095', 'Bruno Korbar', 'bruno korbar')<br/>('1687325', 'Du Tran', 'du tran')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>bruno.18@dartmouth.edu
+<br/>trandu@fb.com
+<br/>LT@dartmouth.edu
+</td></tr><tr><td>3399f8f0dff8fcf001b711174d29c9d4fde89379</td><td>Face R-CNN
+<br/>Tencent AI Lab, China
+</td><td>('39049654', 'Hao Wang', 'hao wang')</td><td>{hawelwang,michaelzfli,denisji,yitongwang}@tencent.com
+</td></tr><tr><td>333aa36e80f1a7fa29cf069d81d4d2e12679bc67</td><td>Suggesting Sounds for Images
+<br/>from Video Collections
+<br/>1Computer Science Department, ETH Z¨urich, Switzerland
+<br/>2Disney Research, Switzerland
+</td><td>('39231399', 'Oliver Wang', 'oliver wang')<br/>('1734448', 'Andreas Krause', 'andreas krause')<br/>('2893744', 'Alexander Sorkine-Hornung', 'alexander sorkine-hornung')</td><td>{msoler,krausea}@ethz.ch
+<br/>{jean-charles.bazin,owang,alex}@disneyresearch.com
+</td></tr><tr><td>3312eb79e025b885afe986be8189446ba356a507</td><td>This is a post-print of the original paper published in ECCV 2016 (SpringerLink).
+<br/>MOON : A Mixed Objective Optimization
+<br/>Network for the Recognition of Facial Attributes
+<br/>Vision and Security Technology (VAST) Lab,
+<br/><b>University of Colorado at Colorado Springs</b></td><td>('39886114', 'Ethan M. Rudd', 'ethan m. rudd')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td>{erudd,mgunther,tboult}@vast.uccs.edu
+</td></tr><tr><td>33792bb27ef392973e951ca5a5a3be4a22a0d0c6</td><td>Two-dimensional Whitening Reconstruction for
+<br/>Enhancing Robustness of Principal Component
+<br/>Analysis
+</td><td>('2766473', 'Xiaoshuang Shi', 'xiaoshuang shi')<br/>('1759643', 'Zhenhua Guo', 'zhenhua guo')<br/>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1705066', 'Lin Yang', 'lin yang')<br/>('1748883', 'Jane You', 'jane you')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>3328674d71a18ed649e828963a0edb54348ee598</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 34, NO. 6, DECEMBER 2004
+<br/>2405
+<br/>A Face and Palmprint Recognition Approach Based
+<br/>on Discriminant DCT Feature Extraction
+</td><td>('15132338', 'Xiao-Yuan Jing', 'xiao-yuan jing')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>339937141ffb547af8e746718fbf2365cc1570c8</td><td>Facial Emotion Recognition in Real Time
+</td><td>('1849233', 'Dan Duncan', 'dan duncan')<br/>('3133285', 'Gautam Shine', 'gautam shine')<br/>('3158339', 'Chris English', 'chris english')</td><td>duncand@stanford.edu
+<br/>gshine@stanford.edu
+<br/>chriseng@stanford.edu
+</td></tr><tr><td>33402ee078a61c7d019b1543bb11cc127c2462d2</td><td>Self-Supervised Video Representation Learning With Odd-One-Out Networks
+<br/><b>ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam</b></td><td>('1688071', 'Basura Fernando', 'basura fernando')</td><td></td></tr><tr><td>33aa980544a9d627f305540059828597354b076c</td><td></td><td></td><td></td></tr><tr><td>33ae696546eed070717192d393f75a1583cd8e2c</td><td></td><td></td><td></td></tr><tr><td>33f2b44742cc828347ccc5ec488200c25838b664</td><td>Pooling the Convolutional Layers in Deep ConvNets for Action Recognition
+<br/><b>School of Computer Science and Technology, Tianjin University, China</b><br/><b>School of Computer and Information, Hefei University of Technology, China</b></td><td>('2905510', 'Shichao Zhao', 'shichao zhao')<br/>('1732242', 'Yanbin Liu', 'yanbin liu')<br/>('2302512', 'Yahong Han', 'yahong han')<br/>('2248826', 'Richang Hong', 'richang hong')</td><td>{zhaoshichao, csyanbin, yahong}@tju.edu.cn, hongrc.hfut@gmail.com
+</td></tr><tr><td>3393459600368be2c4c9878a3f65a57dcc0c2cfa</td><td>Eigen-PEP for Video Face Recognition
+<br/><b>Stevens Institute of Technology Adobe Systems Inc</b></td><td>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('1720987', 'Xiaohui Shen', 'xiaohui shen')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')</td><td></td></tr><tr><td>3352426a67eabe3516812cb66a77aeb8b4df4d1b</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 4, NO. 5, APRIL 2015
+<br/>Joint Multi-view Face Alignment in the Wild
+</td><td>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('47943220', 'Yuxiang Zhou', 'yuxiang zhou')</td><td></td></tr><tr><td>334d6c71b6bce8dfbd376c4203004bd4464c2099</td><td>BICONVEX RELAXATION FOR SEMIDEFINITE PROGRAMMING IN
+<br/>COMPUTER VISION
+</td><td>('36861219', 'Sohil Shah', 'sohil shah')<br/>('1746575', 'Christoph Studer', 'christoph studer')<br/>('1962083', 'Tom Goldstein', 'tom goldstein')</td><td></td></tr><tr><td>33695e0779e67c7722449e9a3e2e55fde64cfd99</td><td>Riemannian Coding and Dictionary Learning: Kernels to the Rescue
+<br/><b>Australian National University and NICTA</b><br/>While sparse coding on non-flat Riemannian manifolds has recently become
+<br/>increasingly popular, existing solutions either are dedicated to specific man-
+<br/>ifolds, or rely on optimization problems that are difficult to solve, especially
+<br/>when it comes to dictionary learning. In this paper, we propose to make use
+<br/>of kernels to perform coding and dictionary learning on Riemannian man-
+<br/>ifolds. To this end, we introduce a general Riemannian coding framework
+<br/>with its kernel-based counterpart. This lets us (i) generalize beyond the spe-
+<br/>cial case of sparse coding; (ii) introduce efficient solutions to two coding
+<br/>schemes; (iii) learn the kernel parameters; (iv) perform unsupervised and
+<br/>supervised dictionary learning in a much simpler manner than previous Rie-
+<br/>mannian coding approaches.
+<br/>i=1, di ∈ M, be a dictionary on a Rie-
+<br/>mannian manifold M, and x ∈ M be a query point on the manifold. We
+<br/>(cid:17)
+<br/>define a general Riemannian coding formulation as
+<br/>More specifically, let D = {di}N
+<br/>(cid:93)N
+<br/>j=1 α jd j
+<br/>min
+<br/>s.t. α ∈ C,
+<br/>+ λγ(α;x,D)
+<br/>δ 2(cid:0)x,
+<br/>(1)
+<br/>on α. Moreover, (cid:85) : M×···×M× R× R···× R → M is an operator
+<br/>where δ : M×M → R+ is a metric on M, α ∈ RN is the vector of Rie-
+<br/>mannian codes, γ is a prior on the codes α and C is a set of constraints
+<br/>that combines multiple dictionary atoms {d j ∈ M} with weights {α j} and
+<br/>generates a point ˆx on M. This general formulation encapsulates intrinsic
+<br/>sparse coding [2, 5], but also lets us derive and intrinsic version of Locality-
+<br/>constrained Linear Coding [10]. Such intrinsic formulations, however, de-
+<br/>pend on the logarithm map, which may be highly nonlinear, or not even have
+<br/>an analytic solution.
+<br/>To overcome these weaknesses and obtain a general formulation of Rie-
+<br/>mannian coding, we propose to perform coding in RKHS. This has the
+<br/>twofold advantage of yielding simple solutions to several popular coding
+<br/>techniques and of resulting in a potentially better representation than stan-
+<br/>dard coding techniques due to the nonlinearity of the approach. To this
+<br/>end, let φ : M → H be a mapping to an RKHS induced by the kernel
+<br/>k(x,y) = φ (x)T φ (y). Coding in H can then be formulated as
+<br/>(cid:13)(cid:13)(cid:13)φ(cid:0)x)−∑N
+<br/>(cid:13)(cid:13)(cid:13)2
+<br/>j=1 α jφ(cid:0)d j)
+<br/>+ λγ(α;φ(cid:0)x),φ(cid:0)D))
+<br/>min
+<br/>s.t. α ∈ C.
+<br/>(2)
+<br/>As shown in the paper, the reconstruction term in (2) can be kernelized.
+<br/>More importantly, after kernelization, this term remains quadratic, convex
+<br/>and similar to its counterpart in Euclidean space. This lets us derive efficient
+<br/>solutions to two coding schemes: kernel Sparse Coding (kSC) and kernel
+<br/>Locality Constrained Coding (kLCC).
+<br/>In many cases, it is beneficial not only to compute the codes for a given
+<br/>dictionary, but also to optimize the dictionary to best suit the problem at
+<br/>hand. Given training data, and for fixed codes, we then show that, by relying
+<br/>on the Representer theorem [8], the dictionary update has an analytic form.
+<br/>Furthermore, we introduce an approach to supervised dictionary learning,
+<br/>which, given labeled data, jointly learns the dictionary and a classifier acting
+<br/>on the codes. The resulting supervised coding schemes are referred to as
+<br/>kSSC and kSLCC.
+<br/>We demonstrate the effectiveness of our approach on three different
+<br/>types of non-flat manifolds, as well as illustrate its generality by also ap-
+<br/>plying it to Euclidean space, which simply is a special type of Rieman-
+<br/>nian manifold. In particular, we evaluated our different techniques on two
+<br/>challenging classification datasets where the images are represented with
+<br/>region covariance descriptors (RCovDs) [9], which lie on SPD manifolds.
+</td><td>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')</td><td></td></tr><tr><td>334ac2a459190b41923be57744aa6989f9a54a51</td><td>Apples to Oranges: Evaluating Image Annotations from Natural Language
+<br/>Processing Systems
+<br/>Brown Laboratory for Linguistic Information Processing (BLLIP)
+<br/><b>Brown University, Providence, RI</b></td><td>('2139196', 'Rebecca Mason', 'rebecca mason')<br/>('1749837', 'Eugene Charniak', 'eugene charniak')</td><td>{rebecca,ec}@cs.brown.edu
+</td></tr><tr><td>33e20449aa40488c6d4b430a48edf5c4b43afdab</td><td>TRANSACTIONS ON AFFECTIVE COMPUTING
+<br/>The Faces of Engagement: Automatic
+<br/>Recognition of Student Engagement from Facial
+<br/>Expressions
+</td><td>('1775637', 'Jacob Whitehill', 'jacob whitehill')<br/>('3089406', 'Zewelanji Serpell', 'zewelanji serpell')<br/>('3267606', 'Yi-Ching Lin', 'yi-ching lin')<br/>('39687351', 'Aysha Foster', 'aysha foster')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td></td></tr><tr><td>333e7ad7f915d8ee3bb43a93ea167d6026aa3c22</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at http://dx.doi.org/10.1109/TIFS.2014.2309851
+<br/>DRAFT
+<br/>3D Assisted Face Recognition: Dealing With
+<br/>Expression Variations
+<br/>
+</td><td>('2128163', 'Nesli Erdogmus', 'nesli erdogmus')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td></td></tr><tr><td>334166a942acb15ccc4517cefde751a381512605</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+<br/> Volume: 04 Issue: 10 | Oct -2017 www.irjet.net p-ISSN: 2395-0072
+<br/>Facial Expression Analysis using Deep Learning
+<br/><b>M.Tech Student, SSG Engineering College, Odisha, India</b><br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+<br/>examination structures need to analyse the facial exercises
+</td><td>('13518951', 'Raman Patel', 'raman patel')</td><td></td></tr><tr><td>33403e9b4bbd913ae9adafc6751b52debbd45b0e</td><td></td><td></td><td></td></tr><tr><td>33ef419dffef85443ec9fe89a93f928bafdc922e</td><td>SelfKin: Self Adjusted Deep Model For
+<br/>Kinship Verification
+<br/><b>Faculty of Engineering, Bar-Ilan University, Israel</b></td><td>('32450996', 'Eran Dahan', 'eran dahan')<br/>('1926432', 'Yosi Keller', 'yosi keller')</td><td></td></tr><tr><td>33ad23377eaead8955ed1c2b087a5e536fecf44e</td><td>Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling
+<br/>∗ indicates equal contribution
+</td><td>('2177037', 'Andrew Kae', 'andrew kae')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('1697141', 'Honglak Lee', 'honglak lee')</td><td>1 University of Massachusetts, Amherst, MA, USA, {akae,elm}@cs.umass.edu
+<br/>2 University of Michigan, Ann Arbor, MI, USA, {kihyuks,honglak}@umich.edu
+</td></tr><tr><td>053b263b4a4ccc6f9097ad28ebf39c2957254dfb</td><td>Cost-Effective HITs for Relative Similarity Comparisons
+<br/><b>Cornell University</b><br/><b>University of California, San Diego</b><br/><b>Cornell University</b></td><td>('3035230', 'Michael J. Wilber', 'michael j. wilber')<br/>('2064392', 'Iljung S. Kwak', 'iljung s. kwak')<br/>('1769406', 'Serge J. Belongie', 'serge j. belongie')</td><td></td></tr><tr><td>05b8673d810fadf888c62b7e6c7185355ffa4121</td><td>(will be inserted by the editor)
+<br/>A Comprehensive Survey to Face Hallucination
+<br/>Received: date / Accepted: date
+</td><td>('2870173', 'Nannan Wang', 'nannan wang')</td><td></td></tr><tr><td>056d5d942084428e97c374bb188efc386791e36d</td><td>Temporally Robust Global Motion
+<br/>Compensation by Keypoint-based Congealing
+<br/><b>Michigan State University</b></td><td>('2447931', 'Yousef Atoum', 'yousef atoum')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>05e658fed4a1ce877199a4ce1a8f8cf6f449a890</td><td></td><td></td><td></td></tr><tr><td>05ad478ca69b935c1bba755ac1a2a90be6679129</td><td>Attribute Dominance: What Pops Out?
+<br/>Georgia Tech
+</td><td>('3169410', 'Naman Turakhia', 'naman turakhia')</td><td>nturakhia@gatech.edu
+</td></tr><tr><td>0595d18e8d8c9fb7689f636341d8a55cc15b3e6a</td><td>Discriminant Analysis on Riemannian Manifold of Gaussian Distributions
+<br/>for Face Recognition with Image Sets
+<br/>1Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b></td><td>('39792743', 'Wen Wang', 'wen wang')<br/>('39792743', 'Ruiping Wang', 'ruiping wang')<br/>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{wen.wang, zhiwu.huang}@vipl.ict.ac.cn, {wangruiping, sgshan, xlchen}@ict.ac.cn
+</td></tr><tr><td>0573f3d2754df3a717368a6cbcd940e105d67f0b</td><td>Emotion Recognition In The Wild Challenge 2013∗
+<br/>Res. School of Computer
+<br/>Science
+<br/><b>Australian National University</b><br/>Roland Goecke
+<br/>Vision & Sensing Group
+<br/><b>University of Canberra</b><br/><b>Australian National University</b><br/>Vision & Sensing Group
+<br/><b>University of Canberra</b><br/>HCC Lab
+<br/><b>University of Canberra</b><br/><b>Australian National University</b></td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('2942991', 'Jyoti Joshi', 'jyoti joshi')<br/>('1743035', 'Michael Wagner', 'michael wagner')</td><td>jyoti.joshi@canberra.edu.au
+<br/>abhinav.dhall@anu.edu.au
+<br/>roland.goecke@ieee.org
+<br/>michael.wagner@canberra.edu.au
+</td></tr><tr><td>05a0d04693b2a51a8131d195c68ad9f5818b2ce1</td><td>Dual-reference Face Retrieval
+<br/><b>School of Computing Sciences, University of East Anglia, Norwich, UK</b><br/><b>University of Pittsburgh, Pittsburgh, USA</b><br/>3JD Artificial Intelligence Research (JDAIR), Beijing, China
+</td><td>('19285980', 'BingZhang Hu', 'bingzhang hu')<br/>('40255667', 'Feng Zheng', 'feng zheng')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td>bingzhang.hu@uea.ac.uk, feng.zheng@pitt.edu, ling.shao@ieee.org
+</td></tr><tr><td>0562fc7eca23d47096472a1d42f5d4d086e21871</td><td></td><td></td><td></td></tr><tr><td>054738ce39920975b8dcc97e01b3b6cc0d0bdf32</td><td>Towards the Design of an End-to-End Automated
+<br/>System for Image and Video-based Recognition
+</td><td>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('40080979', 'Amit Kumar', 'amit kumar')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')</td><td></td></tr><tr><td>05e03c48f32bd89c8a15ba82891f40f1cfdc7562</td><td>Scalable Robust Principal Component
+<br/>Analysis using Grassmann Averages
+</td><td>('2142792', 'Søren Hauberg', 'søren hauberg')<br/>('1808965', 'Aasa Feragen', 'aasa feragen')<br/>('2105795', 'Michael J. Black', 'michael j. black')</td><td></td></tr><tr><td>05a312478618418a2efb0a014b45acf3663562d7</td><td>Accelerated Sampling for the Indian Buffet Process
+<br/><b>Cambridge University, Trumpington Street, Cambridge CB21PZ, UK</b></td><td>('2292194', 'Finale Doshi-Velez', 'finale doshi-velez')<br/>('1983575', 'Zoubin Ghahramani', 'zoubin ghahramani')</td><td>finale@alum.mit.edu
+<br/>zoubin@eng.cam.ac.uk
+</td></tr><tr><td>056ba488898a1a1b32daec7a45e0d550e0c51ae4</td><td>Cascaded Continuous Regression for Real-time
+<br/>Incremental Face Tracking
+<br/>Enrique S´anchez-Lozano, Brais Martinez,
+<br/><b>Computer Vision Laboratory. University of Nottingham</b></td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>{psxes1,yorgos.tzimiropoulos,michel.valstar}@nottingham.ac.uk
+</td></tr><tr><td>050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371</td><td>Contents
+<br/>Scale Space and PDE Methods
+<br/>Spatio-Temporal Scale Selection in Video Data . . . . . . . . . . . . . . . . . . . . .
+<br/>Dynamic Texture Recognition Using Time-Causal Spatio-Temporal
+<br/>Scale-Space Filters . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Corner Detection Using the Affine Morphological Scale Space . . . . . . . . . . .
+<br/>Luis Alvarez
+<br/>Nonlinear Spectral Image Fusion. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Martin Benning, Michael Möller, Raz Z. Nossek, Martin Burger,
+<br/>Daniel Cremers, Guy Gilboa, and Carola-Bibiane Schönlieb
+<br/>16
+<br/>29
+<br/>41
+<br/>Tubular Structure Segmentation Based on Heat Diffusion. . . . . . . . . . . . . . .
+<br/>54
+<br/>Fang Yang and Laurent D. Cohen
+<br/>Analytic Existence and Uniqueness Results for PDE-Based Image
+<br/>Reconstruction with the Laplacian . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Laurent Hoeltgen, Isaac Harris, Michael Breuß, and Andreas Kleefeld
+<br/>Combining Contrast Invariant L1 Data Fidelities with Nonlinear
+<br/>Spectral Image Decomposition . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Leonie Zeune, Stephan A. van Gils, Leon W.M.M. Terstappen,
+<br/>and Christoph Brune
+<br/>An Efficient and Stable Two-Pixel Scheme for 2D
+<br/>Forward-and-Backward Diffusion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Martin Welk and Joachim Weickert
+<br/>66
+<br/>80
+<br/>94
+<br/>Restoration and Reconstruction
+<br/>Blind Space-Variant Single-Image Restoration of Defocus Blur. . . . . . . . . . .
+<br/>109
+<br/>Leah Bar, Nir Sochen, and Nahum Kiryati
+<br/>Denoising by Inpainting. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>121
+<br/>Robin Dirk Adam, Pascal Peter, and Joachim Weickert
+<br/>Stochastic Image Reconstruction from Local Histograms
+<br/>of Gradient Orientation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Agnès Desolneux and Arthur Leclaire
+<br/>133
+</td><td>('3205375', 'Tony Lindeberg', 'tony lindeberg')<br/>('3205375', 'Tony Lindeberg', 'tony lindeberg')</td><td></td></tr><tr><td>056294ff40584cdce81702b948f88cebd731a93e</td><td></td><td></td><td></td></tr><tr><td>052880031be0a760a5b606b2ad3d22f237e8af70</td><td>Datasets on object manipulation and interaction: a survey
+</td><td>('3112203', 'Yongqiang Huang', 'yongqiang huang')<br/>('35760122', 'Yu Sun', 'yu sun')</td><td></td></tr><tr><td>055de0519da7fdf27add848e691087e0af166637</td><td>Joint Unsupervised Face Alignment
+<br/>and Behaviour Analysis(cid:2)
+<br/><b>Imperial College London, UK</b></td><td>('1786302', 'Lazaros Zafeiriou', 'lazaros zafeiriou')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{l.zafeiriou12,e.antonakos,s.zafeiriou,m.pantic}@imperial.ac.uk
+</td></tr><tr><td>0515e43c92e4e52254a14660718a9e498bd61cf5</td><td>MACHINE LEARNING SYSTEMS FOR DETECTING DRIVER DROWSINESS
+<br/><b>Sabanci University</b><br/>Faculty of
+<br/>Engineering and Natural Sciences
+<br/>Orhanli, Istanbul
+<br/><b>University Of California San Diego</b><br/><b>Institute of</b><br/>Neural Computation
+<br/>La Jolla, San Diego
+</td><td>('40322754', 'Esra Vural', 'esra vural')<br/>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('1858421', 'Marian Bartlett', 'marian bartlett')<br/>('29794862', 'Javier Movellan', 'javier movellan')</td><td></td></tr><tr><td>053c2f592a7f153e5f3746aa5ab58b62f2cf1d21</td><td>International Journal of Research in
+<br/>Engineering & Technology (IJRET)
+<br/>ISSN 2321-8843
+<br/>Vol. 1, Issue 2, July 2013, 11-20
+<br/>© Impact Journals
+<br/>PERFORMANCE EVALUATION OF ILLUMINATION NORMALIZATION TECHNIQUES
+<br/>FOR FACE RECOGNITION
+<br/><b>PSG College of Technology, Coimbatore, Tamil Nadu, India</b></td><td></td><td></td></tr><tr><td>05891725f5b27332836cf058f04f18d74053803f</td><td>One-shot Action Localization by Learning Sequence Matching Network
+<br/><b>The Australian National University</b><br/><b>ShanghaiTech University</b><br/>Fatih Porikli
+<br/><b>The Australian National University</b></td><td>('51050729', 'Hongtao Yang', 'hongtao yang')<br/>('33913193', 'Xuming He', 'xuming he')</td><td>u5226028@anu.edu.au
+<br/>hexm@shanghaitech.edu.cn
+<br/>fatih.porikli@anu.edu.au
+</td></tr><tr><td>0568fc777081cbe6de95b653644fec7b766537b2</td><td>Learning Expressionlets on Spatio-Temporal Manifold for Dynamic Facial
+<br/>Expression Recognition
+<br/>1Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China</b><br/><b>University of Oulu, Finland</b></td><td>('1730228', 'Mengyi Liu', 'mengyi liu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>mengyi.liu@vipl.ict.ac.cn, {sgshan, wangruiping, xlchen}@ict.ac.cn
+</td></tr><tr><td>05d80c59c6fcc4652cfc38ed63d4c13e2211d944</td><td>On Sampling-based Approximate Spectral Decomposition
+<br/>Google Research, New York, NY
+<br/><b>Courant Institute of Mathematical Sciences and Google Research, New York, NY</b><br/><b>Courant Institute of Mathematical Sciences, New York, NY</b></td><td>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')<br/>('1709415', 'Mehryar Mohri', 'mehryar mohri')<br/>('8395559', 'Ameet Talwalkar', 'ameet talwalkar')</td><td>sanjivk@google.com
+<br/>mohri@cs.nyu.edu
+<br/>ameet@cs.nyu.edu
+</td></tr><tr><td>05ea7930ae26165e7e51ff11b91c7aa8d7722002</td><td>Learning And-Or Model to Represent Context and
+<br/>Occlusion for Car Detection and Viewpoint Estimation
+</td><td>('3198440', 'Tianfu Wu', 'tianfu wu')<br/>('40479452', 'Bo Li', 'bo li')<br/>('3133970', 'Song-Chun Zhu', 'song-chun zhu')</td><td></td></tr><tr><td>055530f7f771bb1d5f352e2758d1242408d34e4d</td><td>A Facial Expression Recognition System from
+<br/>Depth Video
+<br/>Department of Computer Education
+<br/><b>Sungkyunkwan University</b><br/>Seoul, Republic of Korea
+</td><td>('3241032', 'Md. Zia Uddin', 'md. zia uddin')</td><td>Email: ziauddin@skku.edu
+</td></tr><tr><td>050eda213ce29da7212db4e85f948b812a215660</td><td>Combining Models and Exemplars for Face Recognition:
+<br/>An Illuminating Example
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+</td><td>('1715286', 'Terence Sim', 'terence sim')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td></td></tr><tr><td>051a84f0e39126c1ebeeb379a405816d5d06604d</td><td>Cogn Comput (2009) 1:257–267
+<br/>DOI 10.1007/s12559-009-9018-7
+<br/>Biometric Recognition Performing in a Bioinspired System
+<br/>Joan Fa`bregas Æ Marcos Faundez-Zanuy
+<br/>Published online: 20 May 2009
+<br/>Ó Springer Science+Business Media, LLC 2009
+</td><td></td><td></td></tr><tr><td>05e3acc8afabc86109d8da4594f3c059cf5d561f</td><td>Actor-Action Semantic Segmentation with Grouping Process Models
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>University of Michigan, Ann Arbor</b><br/>CVPR 2016
+<br/>OBJECTIVE
+<br/>We seek to label each pixel in a video with a pair of actor (e.g. adult, baby and
+<br/>dog) and action (e.g. eating, walking and jumping) labels.
+<br/>Overview of the Grouping Process Model
+<br/>Video Labeling
+<br/>- We propose a novel grouping process model (GPM) that adaptively adds
+<br/>long-ranging interactions of the supervoxel hierarchy to the labeling CRF.
+<br/>Input Video
+<br/>Segment-Level
+<br/>s.
+<br/>The Tree Slice Problem
+<br/>slice
+<br/>The Video Labeling Problem
+<br/>Selected Nodes
+<br/>Input Video
+<br/>- We incorporate the video-level recognition into segment-level labeling by
+<br/>the means of global labeling cost and the GPM.
+<br/> - a set of random variables defined on the segments taking
+<br/>Definition & Joint Modeling
+<br/>Segment-Level:
+<br/> - a video segmentation with n segments.
+<br/>V = {q1, q2, . . . , qN}
+<br/>L = {l1, l2, . . . , lN}
+<br/>labels from both actor space and action space, e.g. adult-eating, dog-crawling.
+<br/>Supervoxel Hierarchy:
+<br/>T = {T1, T2, . . . , TS}
+<br/>chy with S total supervoxels.
+<br/>s = {s1, s2, . . . , sS}
+<br/>voxels denoting its active or not.
+<br/> - a segmentation tree extracted from a supervoxel hierar-
+<br/>- a set of binary random variables defined on the super-
+<br/>The Overall Objective Function:
+<br/>(L∗, s∗) = arg min
+<br/>E(L, s|V,T )
+<br/>E(L, s|V,T ) = Ev(L|V) +E h(s|T )
+<br/>L,s
+<br/>+(cid:31)t∈T
+<br/>(Eh(Lt|st) +E h(st|Lt))
+<br/>Grouping Cues from Segment Labeling. The GPM uses evidence directly from
+<br/>the segment-level CRF to locate supervoxels across various scales that best cor-
+<br/>respond to the actor and its action.
+<br/>Eh(st|Lt) = (H(Lt)|Lt| + θh)st
+<br/>The Tree Slice Constraint. We seek a single labeling over the video. Each node
+<br/>in CRF is associated with one and only one supervoxel in the hierarchy. This con-
+<br/>straint is the same as our previous work in Xu et al. ICCV 2013.
+<br/>Eh(s|T ) =
+<br/>P(cid:31)p=1
+<br/>δ(PT
+<br/>p s (cid:31)= 1)θτ
+<br/>Labeling Cues from Supervoxel Hierarchy. Once the supervoxels are selected,
+<br/>they provide strong labeling cues to the segment-level CRF. The CRF nodes con-
+<br/>nected to the same active supervoxel are encouraged to have the same label.
+<br/>Eh(Lt|st) =(cid:31) (cid:30)i∈Lt(cid:30)j(cid:30)=i,j∈Lt
+<br/>ij(li, lj) =(cid:31) θt
+<br/>ψh
+<br/>if li (cid:31)= lj
+<br/>otherwise
+<br/>ψh
+<br/>ij(li, lj)
+<br/>if st = 1
+<br/>otherwise
+<br/>Segment-Level CRF
+<br/>The segment-level CRF considers the interplay of actors and actions.
+<br/>- denotes the set of actor labels (e.g. adult, baby and dog).
+<br/>- denotes the set of action labels (e.g. eating, running and crawling).
+<br/>Ev(L|V) =(cid:31)i∈V
+<br/>ξv
+<br/>i (li) +(cid:31)i∈V (cid:31)j∈E(i)
+<br/>ξv
+<br/>ij(li, lj)
+<br/>ξv
+<br/>i (li) = ψv
+<br/>i (lXi ) +φ v
+<br/>i (lYi ) +ϕ v
+<br/>i (lXi , lYj )
+<br/>ij(lXi , lXj )
+<br/>ψv
+<br/>ij(lYi , lYj )
+<br/>φv
+<br/>ij(lXi , lXj ) +φ v
+<br/>ψv
+<br/>ij(lYi , lYj )
+<br/>(cid:31)= lXj ∧ lYi = lYj
+<br/>if lXi
+<br/>(cid:31)= lYj
+<br/>if lXi = lXj ∧ lYi
+<br/>(cid:31)= lXj ∧ lYi
+<br/>(cid:31)= lYj
+<br/>if lXi
+<br/>if lXi = lXj ∧ lYi = lYj .
+<br/>ξv
+<br/>ij(li, lj) =
+<br/>Iterative Inference
+<br/>Directly solving the overall objective function is hard. We use an iterative inference
+<br/>schema to efficiently solve it.
+<br/>The Video Labeling Problem. Given a tree slice, we find the best labeling.
+<br/>L∗ = arg min
+<br/>= arg min
+<br/>E(L|s,V,T )
+<br/>Ev(L|V) +(cid:31)t∈T
+<br/>- Optimization depends on
+<br/>- Solvable by graph-cuts multi-label inference.
+<br/>
+<br/>Eh(Lt|st)
+<br/>The Tree Slice Problem. Given a labeling, we find the best tree slice.
+<br/>- Rewrite as a binary linear program.
+<br/>s∗ = arg min
+<br/>E(s|L,V,T )
+<br/>= arg min
+<br/>Eh(st|Lt)
+<br/>Eh(s|T ) +(cid:31)t∈T
+<br/>s.t. Ps = 1P and s ∈ {0, 1}S
+<br/>min(cid:31)t∈T
+<br/>αtst
+<br/>Experiments: The Actor-Action Semantic Segmentation
+<br/>- Dataset: the A2D large-scale video labeling dataset.
+<br/>One-third of videos have more than one actor performing different actions.
+<br/>- Two different hierarchies: TSP and GBH.
+<br/>- Video-level recognition is added through both global labeling cost and the GPM.
+<br/>It consists of 3782 YouTube videos with an average length of 136 frames.
+<br/>100.0
+<br/>80.0
+<br/>60.0
+<br/>40.0
+<br/>20.0
+<br/>0.0
+<br/>!-./'
+<br/>77.9
+<br/>74.6
+<br/>44.8
+<br/>45.7
+<br/>64.9
+<br/>38.0
+<br/>85.2
+<br/>84.9
+<br/>58.3
+<br/>59.4
+<br/>!"#$
+<br/>$%#$ &'()*+,' !"#$%& !"#$!&
+<br/>100.0
+<br/>80.0
+<br/>60.0
+<br/>40.0
+<br/>20.0
+<br/>0.0
+<br/>!-.(/0
+<br/>77.6
+<br/>74.6
+<br/>45.5
+<br/>47.0
+<br/>85.3
+<br/>84.8
+<br/>60.5
+<br/>61.2
+<br/>63.9
+<br/>29.0
+<br/>!"#$
+<br/>$%#$ &'()*+,' !"#$%& !"#$!&
+<br/>100.0
+<br/>80.0
+<br/>60.0
+<br/>40.0
+<br/>20.0
+<br/>0.0
+<br/>1!-./'23!-.(/04
+<br/>84.2
+<br/>76.2
+<br/>72.9
+<br/>63.0
+<br/>83.8
+<br/>43.3
+<br/>43.9
+<br/>25.4
+<br/>26.5
+<br/>13.9
+<br/>!"#$
+<br/>$%#$ &'()*+,' !"#$%& !"#$!&
+<br/>5)6,73'()*+,)-")*./0+11-'223*+24839,))/:73 ;)/<*)3=(>,)3!--6'*-+
+<br/>Visual example of the actor-action video labelings for all methods. (a) - (c) are
+<br/>videos where most methods get correct labelings; (d) - (e) are videos where GPM
+<br/>models outperform; (h) - (i) are different videos with partially correct labelings.
+<br/>(a)
+<br/>(b)
+<br/>(c)
+<br/>(d)
+<br/>(e)
+<br/>(f)
+<br/>(g)
+<br/>(h)
+<br/>(i)
+<br/>Ground-Truth
+<br/>AHRF
+<br/>FCRF
+<br/>adult-none
+<br/>adult-eating
+<br/>adult-eating
+<br/>adult-eating
+<br/>baby-crawling
+<br/>Trilayer
+<br/>GPM (TSP)
+<br/>GPM (GBH)
+<br/>adult-none
+<br/>adult-eating
+<br/>adult-eating
+<br/>adult-eating
+<br/>car-running
+<br/>car-running
+<br/>car-running
+<br/>car-running
+<br/>car-running
+<br/>car-running
+<br/>baby-rolling
+<br/>baby-rolling
+<br/>baby-rolling
+<br/>baby-rolling
+<br/>baby-rolling
+<br/>baby-rolling
+<br/>dog-eating
+<br/>baby-crawling
+<br/>dog-crawling
+<br/>adult-none
+<br/>car-rolling
+<br/>car-rolling
+<br/>dog-crawling
+<br/>dog-crawling
+<br/>bird-eating
+<br/>cat-climbing
+<br/>adult-walking
+<br/>adult-walking
+<br/>bird-eating
+<br/>bird-eating
+<br/>adult-walking
+<br/>bird-walking
+<br/>bird-flying
+<br/>car-running
+<br/>car-running
+<br/>bird-walking
+<br/>bird-walking
+<br/>car-running
+<br/>dog-walking
+<br/>dog-walking
+<br/>adult-walking
+<br/>car-jumping
+<br/>ball-flying
+<br/>adult-walking
+<br/>car-running
+<br/>adult-walking
+<br/>adult-walking
+<br/>adult-walking
+<br/>car-running
+<br/>adult-walking
+<br/>adult-running
+<br/>ball-rolling
+<br/>adult-none
+<br/>adult-walking
+<br/>adult-running
+<br/>adult-walking
+<br/>adult-walking
+<br/>dog-walking
+<br/>dog-rolling
+<br/>ball-rolling
+<br/>ball-rolling
+<br/>adult-none
+<br/>car-jumping
+<br/>adult-none
+<br/>bird-walking
+<br/>adult-crawling
+<br/>adult-jumping
+<br/>adult-crawling
+<br/>car-flying
+<br/>adult-crawling
+<br/>adult-crawling
+<br/>αt = H(Lt)|Lt| + θh
+<br/>Acknowledgements. This work has been supported in part by Google, Samsung, DARPA W32P4Q-15-C-0070
+<br/>and ARO W911NF-15-1-0354.
+</td><td>('2026123', 'Chenliang Xu', 'chenliang xu')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td></td></tr><tr><td>05f4d907ee2102d4c63a3dc337db7244c570d067</td><td></td><td></td><td></td></tr><tr><td>0559fb9f5e8627fecc026c8ee6f7ad30e54ee929</td><td>4
+<br/>Facial Expression Recognition
+<br/><b>ADSIP Research Centre, University of Central Lancashire</b><br/>UK
+<br/>1. Introduction
+<br/>Facial expressions are visible signs of a person’s affective state, cognitive activity and
+<br/>personality. Humans can perform expression recognition with a remarkable robustness
+<br/>without conscious effort even under a variety of adverse conditions such as partially
+<br/>occluded faces, different appearances and poor illumination. Over the last two decades, the
+<br/>advances in imaging technology and ever increasing computing power have opened up a
+<br/>possibility of automatic facial expression recognition and this has led to significant research
+<br/>efforts from the computer vision and pattern recognition communities. One reason for this
+<br/>growing interest is due to a wide spectrum of possible applications in diverse areas, such as
+<br/>more engaging human-computer interaction (HCI) systems, video conferencing, augmented
+<br/>reality. Additionally from the biometric perspective, automatic recognition of facial
+<br/>expressions has been investigated in the context of monitoring patients in the intensive care
+<br/>and neonatal units for signs of pain and anxiety, behavioural research, identifying level of
+<br/>concentration, and improving face recognition.
+<br/>Automatic facial expression recognition is a difficult task due to its inherent subjective
+<br/>nature, which is additionally hampered by usual difficulties encountered in pattern
+<br/>recognition and computer vision research. The vast majority of the current state-of-the-art
+<br/>facial expression recognition systems are based on 2-D facial images or videos, which offer
+<br/>good performance only for the data captured under controlled conditions. As a result, there
+<br/>is currently a shift towards the use of 3-D facial data to yield better recognition performance.
+<br/>However, it requires more expensive data acquisition systems and sophisticated processing
+<br/>algorithms. The aim of this chapter is to provide an overview of the existing methodologies
+<br/>and recent advances in the facial expression recognition, as well as present a systematic
+<br/>description of the authors’ work on the use of 3-D facial data for automatic recognition of
+<br/>facial expressions, starting from data acquisition and database creation to data processing
+<br/>algorithms and performance evaluation.
+<br/>1.1 Facial expression
+<br/>Facial expressions are generated ... skin texture” (Pantic & Rothkrantz, 2000)” should be
+<br/>replaced by “Expressions shown on the face are produced by a combination of contraction
+<br/>activities made by facial muscles, with most noticeable temporal deformation around nose,
+<br/>lips, eyelids, and eyebrows as well as facial skin texture patterns (Pantic & Rothkrantz,
+<br/>2000). Typical facial expressions last for a few seconds, normally between 250 milliseconds
+<br/>and five seconds (Fasel & Luettin, 2003). According to psychologists Ekman and Friesen
+</td><td>('2647218', 'Bogdan J. Matuszewski', 'bogdan j. matuszewski')<br/>('2343120', 'Wei Quan', 'wei quan')</td><td></td></tr><tr><td>052f994898c79529955917f3dfc5181586282cf8</td><td>Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos
+<br/>1NEC Labs America
+<br/>2UC Merced
+<br/><b>Dalian University of Technology</b><br/>4UC San Diego
+</td><td>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')</td><td></td></tr><tr><td>05a7be10fa9af8fb33ae2b5b72d108415519a698</td><td>Multilayer and Multimodal Fusion of Deep Neural Networks
+<br/>for Video Classification
+<br/>NVIDIA
+</td><td>('2214162', 'Xiaodong Yang', 'xiaodong yang')</td><td>{xiaodongy, pmolchanov, jkautz}@nvidia.com
+</td></tr><tr><td>050a149051a5d268fcc5539e8b654c2240070c82</td><td>MAGISTERSKÉ A DOKTORSKÉSTUDIJNÍ PROGRAMY31. 5. 2018SBORNÍKSTUDENTSKÁ VĚDECKÁ KONFERENCE </td><td></td><td></td></tr><tr><td>05318a267226f6d855d83e9338eaa9e718b2a8dd</td><td>_______________________________________________________PROCEEDING OF THE 16TH CONFERENCE OF FRUCT ASSOCIATION
+<br/>Age Estimation from Face Images: Challenging
+<br/>Problem for Audience Measurement Systems
+<br/><b>Yaroslavl State University</b><br/>Russia
+</td><td>('1857299', 'Alexander Ganin', 'alexander ganin')<br/>('39942308', 'Olga Stepanova', 'olga stepanova')<br/>('39635716', 'Anton Lebedev', 'anton lebedev')</td><td>vhr@yandex.ru, angnn@mail.ru, dcslab@uniyar.ac.ru, lebedevdes@gmail.com
+</td></tr><tr><td>057d5f66a873ec80f8ae2603f937b671030035e6</td><td>Newtonian Image Understanding:
+<br/>Unfolding the Dynamics of Objects in Static Images
+<br/><b>Allen Institute for Arti cial Intelligence (AI</b><br/><b>University of Washington</b></td><td>('3012475', 'Roozbeh Mottaghi', 'roozbeh mottaghi')<br/>('2456400', 'Hessam Bagherinezhad', 'hessam bagherinezhad')<br/>('2563325', 'Mohammad Rastegari', 'mohammad rastegari')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td></td></tr><tr><td>0580edbd7865414c62a36da9504d1169dea78d6f</td><td>Baseline CNN structure analysis for facial expression recognition
+</td><td>('2448391', 'Minchul Shin', 'minchul shin')<br/>('1702520', 'Munsang Kim', 'munsang kim')<br/>('1750864', 'Dong-Soo Kwon', 'dong-soo kwon')</td><td></td></tr><tr><td>050a3346e44ca720a54afbf57d56b1ee45ffbe49</td><td>Multi-Cue Zero-Shot Learning with Strong Supervision
+<br/><b>Max-Planck Institute for Informatics</b></td><td>('2893664', 'Zeynep Akata', 'zeynep akata')<br/>('34070834', 'Mateusz Malinowski', 'mateusz malinowski')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>0517d08da7550241fb2afb283fc05d37fce5d7b7</td><td>Sensors & Transducers, Vol. 153, Issue 6, June 2013, pp. 92-99
+<br/>
+<br/>SSSeeennnsssooorrrsss &&& TTTrrraaannnsssddduuuccceeerrrsss
+<br/>© 2013 by IFSA
+<br/>http://www.sensorsportal.com
+<br/>Combination of Local Multiple Patterns and Exponential
+<br/>Discriminant Analysis for Facial Recognition
+<br/><b>College of Computer Science, Chongqing University, Chongqing, 400030, China</b><br/><b>College of software, Chongqing University of Posts and Telecommunications Chongqing</b><br/><b>Institute of Computer Science and Technology, Chongqing University of Posts and</b><br/>400065, China
+<br/>Telecommunications, Chongqing 400065, China
+<br/>1 Tel.: 023-65112784, fax: 023-65112784
+<br/>Received: 26 April 2013 /Accepted: 14 June 2013 /Published: 25 June 2013
+</td><td>('2623870', 'Lifang Zhou', 'lifang zhou')<br/>('1713814', 'Bin Fang', 'bin fang')<br/>('1964987', 'Weisheng Li', 'weisheng li')<br/>('2103166', 'Lidou Wang', 'lidou wang')</td><td>1 E-mail: zhoulf@cqupt.edu.cn
+</td></tr><tr><td>053931267af79a89791479b18d1b9cde3edcb415</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Attributes for Improved Attributes: A Multi-Task Network
+<br/>Utilizing Implicit and Explicit Relationships for Facial Attribute Classification
+<br/><b>University of Maryland, College Park</b><br/><b>College Park, MD</b></td><td>('3351637', 'Emily M. Hand', 'emily m. hand')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{emhand, rama}@umiacs.umd.edu
+</td></tr><tr><td>05f3d1e9fb254b275354ca69018e9ed321dd8755</td><td>Face Recognition using Optimal Representation
+<br/>Ensemble
+<br/><b>NICTA , Queensland Research Laboratory, QLD, Australia</b><br/><b>Grif th University, QLD, Australia</b><br/><b>University of Adelaide, SA, Australia</b><br/>29·4·2013
+</td><td>('1711119', 'Hanxi Li', 'hanxi li')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('1744926', 'Yongsheng Gao', 'yongsheng gao')</td><td></td></tr><tr><td>05e96d76ed4a044d8e54ef44dac004f796572f1a</td><td></td><td></td><td></td></tr><tr><td>051f03bc25ec633592aa2ff5db1d416b705eac6c</td><td>To appear in the International Joint Conference on Biometrics (IJCB 2011), Washington D.C., October 2011
+<br/>Partial Face Recognition: An Alignment Free Approach
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing, MI 48824, U.S.A</b></td><td>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>{scliao,jain}@cse.msu.edu
+</td></tr><tr><td>9d58e8ab656772d2c8a99a9fb876d5611fe2fe20</td><td>Beyond Temporal Pooling: Recurrence and Temporal
+<br/>Convolutions for Gesture Recognition in Video
+<br/>{lionel.pigou,aaron.vandenoord,sander.dieleman,
+<br/><b>Ghent University</b><br/>February 11, 2016
+</td><td>('2660640', 'Lionel Pigou', 'lionel pigou')<br/>('48373216', 'Sander Dieleman', 'sander dieleman')<br/>('10182287', 'Mieke Van Herreweghe', 'mieke van herreweghe')</td><td>mieke.vanherreweghe, joni.dambre}@ugent.be
+</td></tr><tr><td>9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6</td><td>International Journal of Pattern Recognition
+<br/>and Arti¯cial Intelligence
+<br/>Vol. 26, No. 1 (2012) 1250002 (9 pages)
+<br/>#.c World Scienti¯c Publishing Company
+<br/>DOI: 10.1142/S0218001412500024
+<br/>IMPROVED PSEUDOINVERSE LINEAR
+<br/>DISCRIMINANT ANALYSIS METHOD FOR
+<br/>DIMENSIONALITY REDUCTION
+<br/>*Signal Processing Laboratory, School of Engineering
+<br/><b>Gri th University, QLD-4111, Brisbane, Australia</b><br/><b>University of the South Paci c, Fiji</b><br/>‡Laboratory of DNA Information Analysis
+<br/><b>Human Genome Center, Institute of Medical Science</b><br/><b>University of Tokyo, 4-6-1 Shirokanedai</b><br/>Minato-ku, Tokyo 108-8639, Japan
+<br/>Received 4 November 2010
+<br/>Accepted 22 September 2011
+<br/>Published 11 May 2012
+<br/>Pseudoinverse linear discriminant analysis (PLDA) is a classical method for solving small
+<br/>sample size problem. However, its performance is limited. In this paper, we propose an improved
+<br/>PLDA method which is faster and produces better classi¯cation accuracy when experimented on
+<br/>several datasets.
+<br/>Keywords : Pseudoinverse;
+<br/>tational complexity.
+<br/>linear discriminant analysis; dimensionality reduction; compu-
+<br/>1. Introduction
+<br/>Dimensionality reduction is an important aspect of pattern classi¯cation. It helps in
+<br/>improving the robustness (or generalization capability) of the pattern classi¯er and
+<br/>in reducing its computational complexity. The linear discriminant analysis (LDA)
+<br/>method5 is a well-known dimensionality reduction technique studied in the litera-
+<br/>ture. The LDA technique ¯nds an orientation matrix W that transforms high-
+<br/>dimensional feature vectors belonging to di®erent classes to lower dimensional
+<br/>feature vectors such that the projected feature vectors of a class are well separated
+<br/>from the feature vectors of other classes. The orientation W is obtained by max-
+<br/>imizing the Fisher's criterion function J1ðWÞ ¼ jW TSBWj=jW TSW Wj, where SB is
+<br/>between-class scatter matrix and SW is within-class scatter matrix. It has been shown
+<br/>in the literature that modi¯ed version of Fisher's criterion J2ðWÞ ¼ jW TSBWj=
+<br/>jW TST Wj produces similar results, where ST is total scatter matrix.6
+<br/>1250002-1
+<br/><b>Int. J. Patt. Recogn. Artif. Intell. 2012.26. Downloaded from www.worldscientific.comby GRIFFITH UNIVERSITY INFORMATION SERVICES on 09/05/12. For personal use only</b></td><td>('3150542', 'Kuldip K. Paliwal', 'kuldip k. paliwal')<br/>('40532633', 'Alok Sharma', 'alok sharma')</td><td>§aloks@ims.u-tokyo.ac.jp
+<br/>¶sharma_al@usp.ac.fj
+</td></tr><tr><td>9d42df42132c3d76e3447ea61e900d3a6271f5fe</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Advanced Computing and Communication Techniques for High Performance Applications (ICACCTHPA-2014)
+<br/>AutoCAP: An Automatic Caption Generation System
+<br/>based on the Text Knowledge Power Series
+<br/>Representation Model
+<br/>M.Tech Dept of CSE
+<br/><b>NSS College of Engineering</b><br/>Palakkad, Kerala
+</td><td>('24326432', 'Krishnapriya P S', 'krishnapriya p s')</td><td></td></tr><tr><td>9d55ec73cab779403cd933e6eb557fb04892b634</td><td>Kernel principal component analysis network for image classification1
+<br/><b>Key Laboratory of Computer Network and Information Integration of Ministry of Education, Southeast University, Nanjing</b><br/>210096, China)
+<br/>(2 Institut National de la Santé et de la Recherche Médicale U 1099, Rennes 35000, France)
+<br/>(3 Laboratoire Traitement du Signal et de l’Image, Université de Rennes 1, Rennes 35000, France)
+<br/>(4Centre de Recherche en Information Biomédicale Sino-français, Nanjing 210096, China)
+</td><td>('1684465', 'Lotfi Senhadji', 'lotfi senhadji')</td><td></td></tr><tr><td>9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2</td><td>Workshop track - ICLR 2018
+<br/>LEARNING TO CLUSTER
+<br/><b>ZHAW Datalab, Zurich University of Applied Sciences</b><br/>Winterthur, Switzerland
+</td><td>('40087403', 'Benjamin B. Meier', 'benjamin b. meier')<br/>('2793787', 'Thilo Stadelmann', 'thilo stadelmann')</td><td>benjamin.meier70@gmail.com, stdm@zhaw.ch, oliver.duerr@gmail.com
+</td></tr><tr><td>9d357bbf014289fb5f64183c32aa64dc0bd9f454</td><td>Face Identification by Fitting a 3D Morphable Model
+<br/>using Linear Shape and Texture Error Functions
+<br/><b>University of Freiburg, Instit ut f ur Informatik</b><br/>Georges-K¨ohler-Allee 52, 79110 Freiburg, Germany,
+</td><td>('3293655', 'Sami Romdhani', 'sami romdhani')<br/>('2880906', 'Volker Blanz', 'volker blanz')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td>fromdhani, volker, vetterg@informatik.uni-freiburg.de
+</td></tr><tr><td>9d66de2a59ec20ca00a618481498a5320ad38481</td><td>POP: Privacy-preserving Outsourced Photo Sharing
+<br/>and Searching for Mobile Devices
+<br/><b>cid:3) School of Software, Tsinghua University</b><br/><b>Illinois Institute of Technology</b></td><td>('1718343', 'Lan Zhang', 'lan zhang')<br/>('8645024', 'Taeho Jung', 'taeho jung')<br/>('1773806', 'Cihang Liu', 'cihang liu')<br/>('1752660', 'Xuan Ding', 'xuan ding')<br/>('34569491', 'Xiang-Yang Li', 'xiang-yang li')<br/>('10258874', 'Yunhao Liu', 'yunhao liu')</td><td></td></tr><tr><td>9d839dfc9b6a274e7c193039dfa7166d3c07040b</td><td>Augmented Faces
+<br/>1ETH Z¨urich
+<br/>2Kooaba AG
+<br/>3K.U. Leuven
+</td><td>('1727791', 'Matthias Dantone', 'matthias dantone')<br/>('1696393', 'Lukas Bossard', 'lukas bossard')<br/>('1726249', 'Till Quack', 'till quack')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{dantone,bossard,tquack,vangool}@vision.ee.ethz.ch
+</td></tr><tr><td>9dcc6dde8d9f132577290d92a1e76b5decc6d755</td><td>Journal of Trends in the Development of Machinery
+<br/> and Associated Technology
+<br/>Vol. 16, No. 1, 2012, ISSN 2303-4009 (online), p.p. 175-178
+<br/>FACIAL EXPRESSION ANALYSIS BASED
+<br/>ON OPTIMIZED GABOR FEATURES
+<br/><b>Istanbul University</b><br/>Avcilar, 34320 Istanbul
+<br/>Turkey
+<br/>Yalçın Çekiç
+<br/><b>Bahcesehir University</b><br/>Besiktas, 34349 Istanbul
+<br/>Turkey
+</td><td>('40701205', 'Aydın Akan', 'aydın akan')</td><td></td></tr><tr><td>9d36c81b27e67c515df661913a54a797cd1260bb</td><td>Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+<br/>Vol. 2, Issue 1,Jan-Feb 2012, pp.787-793
+<br/> 3D FACE RECOGNITION TECHNIQUES - A REVIEW
+<br/><b>Gujarat Technological University, India</b><br/><b>Gujarat Technological University, India</b><br/>security at many places
+</td><td>('9318822', 'Mahesh M. Goyani', 'mahesh m. goyani')<br/>('9198701', 'Preeti B. Sharma', 'preeti b. sharma')<br/>('9318822', 'Mahesh M. Goyani', 'mahesh m. goyani')</td><td></td></tr><tr><td>9d757c0fede931b1c6ac344f67767533043cba14</td><td>Search Based Face Annotation Using PCA and
+<br/>Unsupervised Label Refinement Algorithms
+<br/><b>Savitribai Phule Pune University</b><br/><b>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</b><br/>Mahatma Phulenagar, 120/2 Mahaganpati soc, Chinchwad, Pune-19, MH, India
+<br/><b>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</b><br/>Computer Department, D.Y.PIET, Pimpri, Pune-18, MH, India
+<br/>presents
+</td><td>('15731441', 'Shital Shinde', 'shital shinde')<br/>('3392505', 'Archana Chaugule', 'archana chaugule')</td><td></td></tr><tr><td>9d57c4036a0e5f1349cd11bc342ac515307b6720</td><td>Landmark Weighting for 3DMM Shape Fitting
+<br/><b>aSchool of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China</b><br/><b>bCVSSP, University of Surrey, Guildford, GU2 7XH, UK</b><br/>A B S T R A C T
+</td><td>('51232704', 'Yu Yanga', 'yu yanga')<br/>('37020604', 'Xiao-Jun Wu', 'xiao-jun wu')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td></td></tr><tr><td>9d941a99e6578b41e4e32d57ece580c10d578b22</td><td>Sensors 2015, 15, 4326-4352; doi:10.3390/s150204326
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Illumination-Invariant and Deformation-Tolerant Inner Knuckle
+<br/>Print Recognition Using Portable Devices
+<br/><b>School of Computer Science and Engineering, South China University of Technology</b><br/>Higher Education Mega Center, Panyu, Guangzhou 510006, China;
+<br/>2 National-Regional Key Technology Engineering Laboratory for Medical Ultrasound,
+<br/><b>School of Medicine, Shenzhen University, Shenzhen 518060, China</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China</b><br/>Academic Editor: Vittorio M.N. Passaro
+<br/>Received: 6 January 2015 / Accepted: 6 February 2015 / Published: 12 February 2015
+</td><td>('2884662', 'Xuemiao Xu', 'xuemiao xu')<br/>('35636977', 'Qiang Jin', 'qiang jin')<br/>('3041338', 'Le Zhou', 'le zhou')<br/>('38166238', 'Jing Qin', 'jing qin')<br/>('1720633', 'Tien-Tsin Wong', 'tien-tsin wong')<br/>('2513505', 'Guoqiang Han', 'guoqiang han')</td><td>E-Mails: jin.q@mail.scut.edu.cn (Q.J.); z.le02@mail.scut.edu.cn (L.Z.); csgqhan@scut.edu.cn (G.H.)
+<br/>Hong Kong 999077, China; E-Mail: ttwong@cse.cuhk.edu.hk
+<br/>* Authors to whom correspondence should be addressed; E-Mails: xuemx@scut.edu.cn (X.X.);
+<br/>jqin@szu.edu.cn (J.Q.); Tel.:+86-20-39380285 (X.X.); +86-755-86392117 (J.Q.).
+</td></tr><tr><td>9d60ad72bde7b62be3be0c30c09b7d03f9710c5f</td><td>A Survey: Face Recognition Techniques
+<br/>Assistant Professor, ITM GOI
+<br/>M Tech, ITM GOI
+<br/>face
+<br/>video
+<br/>(Eigen
+<br/>passport-verification,
+</td><td>('4122158', 'Arun Agrawal', 'arun agrawal')<br/>('3731551', 'Ranjana Sikarwar', 'ranjana sikarwar')</td><td></td></tr><tr><td>9d896605fbf93315b68d4ee03be0770077f84e40</td><td>Baby Talk: Understanding and Generating Image Descriptions
+<br/><b>Stony Brook University</b><br/><b>Stony Brook University, NY 11794, USA</b></td><td>('2170826', 'Girish Kulkarni', 'girish kulkarni')<br/>('1699545', 'Yejin Choi', 'yejin choi')<br/>('40305780', 'Siming Li', 'siming li')<br/>('1685538', 'Tamara L Berg', 'tamara l berg')<br/>('3128210', 'Visruth Premraj', 'visruth premraj')<br/>('2985883', 'Sagnik Dhar', 'sagnik dhar')<br/>('39668247', 'Alexander C Berg', 'alexander c berg')</td><td>{tlberg}@cs.stonybrook.edu
+</td></tr><tr><td>9d61b0beb3c5903fc3032655dc0fd834ec0b2af3</td><td>Learning a Locality Preserving Subspace for Visual Recognition
+<br/>Microsoft Research Asia, Beijing 100080, China
+<br/><b>School of Mathematical Science, Peking University, China</b></td><td>('3945955', 'Xiaofei He', 'xiaofei he')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1689532', 'Yuxiao Hu', 'yuxiao hu')</td><td>*Department of Computer Science, University of Chicago (xiaofei@cs.uchicago.edu)
+</td></tr><tr><td>9d24179aa33a94c8c61f314203bf9e906d6b64de</td><td>Searching for People through
+<br/>Textual and Visual Attributes
+<br/><b>Institute of Computing</b><br/><b>University of Campinas (Unicamp</b><br/>Campinas-SP, Brazil
+<br/>Fig. 1. The proposed approach aims at searching for people using textual and visual attributes. Given an image database of faces, we extract the points of
+<br/>interest (PoIs) to construct a visual dictionary that allow us to obtain the feature vectors by a quantization process (top). Then we train attribute classifiers to
+<br/>generate a score for each image (middle). Finally, given a textual query (e.g., male), we fusion obtained scores to return a unique final rank (bottom).
+</td><td>('37811966', 'Junior Fabian', 'junior fabian')<br/>('1820089', 'Ramon Pires', 'ramon pires')<br/>('2145405', 'Anderson Rocha', 'anderson rocha')</td><td></td></tr><tr><td>9d3aa3b7d392fad596b067b13b9e42443bbc377c</td><td>Facial Biometric Templates and Aging:
+<br/>Problems and Challenges for Artificial
+<br/>Intelligence
+<br/><b>Cyprus University of Technology</b><br/>P.O Box 50329, Lemesos, 3066, Cyprus
+</td><td>('1830709', 'Andreas Lanitis', 'andreas lanitis')</td><td>andreas.lanitis@cut.ac.cy
+</td></tr><tr><td>9db4b25df549555f9ffd05962b5adf2fd9c86543</td><td>Nonlinear 3D Face Morphable Model
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing MI</b></td><td>('1849929', 'Luan Tran', 'luan tran')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{tranluan, liuxm}@msu.edu
+</td></tr><tr><td>9d06d43e883930ddb3aa6fe57c6a865425f28d44</td><td>Clustering Appearances of Objects Under Varying Illumination Conditions
+<br/>Computer Science & Engineering
+<br/><b>University of California at San Diego</b><br/><b>cid:1) Honda Research Institute</b><br/>David Kriegman
+<br/>Computer Science
+<br/>800 California Street
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>La Jolla, CA 92093
+<br/>Mountain View, CA 94041
+<br/>Urbana, IL 61801
+</td><td>('1788818', 'Jeffrey Ho', 'jeffrey ho')<br/>('33047058', 'Jongwoo Lim', 'jongwoo lim')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')<br/>('2457452', 'Kuang-chih Lee', 'kuang-chih lee')</td><td>jho@cs.ucsd.edu myang@honda-ri.com jlim1@uiuc.edu
+<br/>klee10@uiuc.edu
+<br/>kriegman@cs.ucsd.edu
+</td></tr><tr><td>9c1305383ce2c108421e9f5e75f092eaa4a5aa3c</td><td>SPEAKER RETRIEVAL FOR TV SHOW VIDEOS BY ASSOCIATING AUDIO SPEAKER
+<br/>RECOGNITION RESULT TO VISUAL FACES∗
+<br/><b>School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China</b><br/>’CNRS-LTCI, TELECOM-ParisTech, Paris, France
+</td><td>('1859487', 'Yina Han', 'yina han')<br/>('2485487', 'Joseph Razik', 'joseph razik')<br/>('1693574', 'Gerard Chollet', 'gerard chollet')<br/>('1774346', 'Guizhong Liu', 'guizhong liu')</td><td></td></tr><tr><td>9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1</td><td></td><td></td><td></td></tr><tr><td>9c1860de6d6e991a45325c997bf9651c8a9d716f</td><td>3D Reconstruction and Face Recognition Using Kernel-Based
+<br/> ICA and Neural Networks
+<br/> Chi-Yung Lee
+<br/>Dept. of Electrical Dept. of CSIE Dept. of CSIE
+<br/><b>Engineering Chaoyang University Nankai Institute of</b><br/><b>National University of Technology Technology</b></td><td>('1734467', 'Cheng-Jian Lin', 'cheng-jian lin')</td><td> of Kaohsiung s9527618@cyut.edu.tw cylee@nkc.edu.tw
+<br/>cjlin@nuk.edu.tw
+</td></tr><tr><td>9c9ef6a46fb6395702fad622f03ceeffbada06e5</td><td>EUROGRAPHICS 2004 / M.-P. Cani and M. Slater
+<br/>(Guest Editors)
+<br/>Volume 23 (2004), Number 3
+<br/>Exchanging Faces in Images
+<br/>1 Max-Planck-Institut für Informatik, Saarbrücken, Germany
+<br/><b>University of Basel, Departement Informatik, Basel, Switzerland</b></td><td>('2880906', 'Volker Blanz', 'volker blanz')<br/>('2658043', 'Kristina Scherbaum', 'kristina scherbaum')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')<br/>('1746884', 'Hans-Peter Seidel', 'hans-peter seidel')</td><td></td></tr><tr><td>9c1cdb795fd771003da4378f9a0585730d1c3784</td><td>Stacked Deformable Part Model with Shape
+<br/>Regression for Object Part Localization
+<br/>Center for Biometrics and Security Research & National Laboratory
+<br/><b>of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1708973', 'Yang Yang', 'yang yang')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,zlei,yang.yang,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>9ca7899338129f4ba6744f801e722d53a44e4622</td><td>Deep Neural Networks Regularization for Structured
+<br/>Output Prediction
+<br/>Soufiane Belharbi∗
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+</td><td>('1712446', 'Clément Chatelain', 'clément chatelain')<br/>('1782268', 'Romain Hérault', 'romain hérault')<br/>('37078795', 'Sébastien Adam', 'sébastien adam')</td><td>soufiane.belharbi@insa-rouen.fr
+<br/>romain.herault@insa-rouen.fr
+<br/>clement.chatelain@insa-rouen.fr
+<br/>sebastien.adam@univ-rouen.fr
+</td></tr><tr><td>9c1664f69d0d832e05759e8f2f001774fad354d6</td><td>Action representations in robotics: A
+<br/>taxonomy and systematic classification
+<br/>Journal Title
+<br/>XX(X):1–32
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td><td>('33237072', 'Philipp Zech', 'philipp zech')<br/>('2898615', 'Erwan Renaudo', 'erwan renaudo')<br/>('36081156', 'Simon Haller', 'simon haller')<br/>('46447747', 'Xiang Zhang', 'xiang zhang')</td><td></td></tr><tr><td>9c25e89c80b10919865b9c8c80aed98d223ca0c6</td><td>GENDER PREDICTION BY GAIT ANALYSIS BASED ON TIME SERIES VARIATION OF
+<br/>JOINT POSITIONS
+<br/>Dept. of Computer Science
+<br/>School of Science and Technology
+<br/><b>Meiji University</b><br/>Dept. of Fundamental Science and Technology
+<br/>Graduate School of Science and Technology
+<br/><b>Meiji University</b><br/>1-1-1 Higashimita Tama-ku
+<br/>Kawasaki Kanagawa Japan
+<br/>1-1-1 Higashimita Tama-ku
+<br/>Kawasaki Kanagawa Japan
+</td><td>('1800246', 'Ryusuke Miyamoto', 'ryusuke miyamoto')<br/>('8187964', 'Risako Aoki', 'risako aoki')</td><td>E-mail: miya@cs.meiji.ac.jp
+<br/>E-mail: aori@cs.meiji.ac.jp
+</td></tr><tr><td>9c7444c6949427994b430787a153d5cceff46d5c</td><td>Journal of Computer Science 5 (11): 801-810, 2009
+<br/>ISSN 1549-3636
+<br/>© 2009 Science Publications
+<br/>Boosting Kernel Discriminative Common Vectors for Face Recognition
+<br/>1Department of Computer Science and Engineering,
+<br/><b>SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India</b><br/><b>Bharathidasan University, Trichy, India</b></td><td>('34608395', 'C. Lakshmi', 'c. lakshmi')<br/>('2594379', 'M. Ponnavaikko', 'm. ponnavaikko')</td><td></td></tr><tr><td>9c065dfb26ce280610a492c887b7f6beccf27319</td><td>Learning from Video and Text via Large-Scale Discriminative Clustering
+<br/>1 ´Ecole Normale Sup´erieure
+<br/>2Inria
+<br/>3CIIRC
+</td><td>('19200186', 'Antoine Miech', 'antoine miech')<br/>('2285263', 'Jean-Baptiste Alayrac', 'jean-baptiste alayrac')<br/>('2329288', 'Piotr Bojanowski', 'piotr bojanowski')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('1782755', 'Josef Sivic', 'josef sivic')</td><td></td></tr><tr><td>9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+<br/> Volume: 03 Issue: 07 | July-2016 www.irjet.net p-ISSN: 2395-0072
+<br/>Attribute Based Face Classification Using Support Vector Machine
+<br/><b>Research Scholar, PSGR Krishnammal College for Women, Coimbatore</b><br/><b>PSGR Krishnammal College for Women, Coimbatore</b></td><td></td><td></td></tr><tr><td>9c373438285101d47ab9332cdb0df6534e3b93d1</td><td>Occupancy Detection in Vehicles Using Fisher Vector
+<br/>Image Representation
+<br/><b>Xerox Research Center</b><br/>Webster, NY 14580
+<br/><b>Xerox Research Center</b><br/>Webster, NY 14580
+</td><td>('1762503', 'Yusuf Artan', 'yusuf artan')<br/>('5942563', 'Peter Paul', 'peter paul')</td><td>Yusuf.Artan@xerox.com
+<br/>Peter.Paul@xerox.com
+</td></tr><tr><td>9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d</td><td>Expression Recognition with Ri-HOG Cascade
+<br/><b>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</b><br/><b>RIEB, Kobe University, Kobe, 657-8501, Japan</b></td><td>('2866465', 'Jinhui Chen', 'jinhui chen')<br/>('2834542', 'Zhaojie Luo', 'zhaojie luo')<br/>('1744026', 'Tetsuya Takiguchi', 'tetsuya takiguchi')<br/>('1678564', 'Yasuo Ariki', 'yasuo ariki')</td><td></td></tr><tr><td>9ce0d64125fbaf625c466d86221505ad2aced7b1</td><td>Saliency Based Framework for Facial Expression
+<br/>Recognition
+<br/>To cite this version:
+<br/>Facial Expression Recognition. Frontiers of Computer Science, 2017, <10.1007/s11704-017-6114-9>.
+<br/><hal-01546192>
+<br/>HAL Id: hal-01546192
+<br/>https://hal.archives-ouvertes.fr/hal-01546192
+<br/>Submitted on 23 Jun 2017
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')<br/>('39469581', 'Alexandre Meyer', 'alexandre meyer')<br/>('1971616', 'Hubert Konik', 'hubert konik')<br/>('1768560', 'Saïda Bouakaz', 'saïda bouakaz')<br/>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')<br/>('39469581', 'Alexandre Meyer', 'alexandre meyer')<br/>('1971616', 'Hubert Konik', 'hubert konik')<br/>('1768560', 'Saïda Bouakaz', 'saïda bouakaz')</td><td></td></tr><tr><td>9c4cc11d0df2de42d6593f5284cfdf3f05da402a</td><td>Appears in the 14th International Conference on Pattern Recognition, ICPR’98, Queensland, Australia, August 17-20, 1998.
+<br/>Enhanced Fisher Linear Discriminant Models for Face Recognition
+<br/><b>George Mason University</b><br/><b>University Drive, Fairfax, VA 22030-4444, USA</b><br/>
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td>@cs.gmu.edu
+</td></tr><tr><td>9cd6a81a519545bf8aa9023f6e879521f85d4cd1</td><td>Domain-invariant Face Recognition using Learned Low-rank
+<br/>Transformation
+<br/><b>Duke University</b><br/>Durham, NC, 27708
+<br/><b>Duke University</b><br/>Durham, NC, 27708
+<br/><b>University of Maryland</b><br/><b>College Park, MD</b><br/>May 11, 2014
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')<br/>('2682056', 'Ching-Hui Chen', 'ching-hui chen')</td><td>qiang.qiu@duke.edu
+<br/>guillermo.sapiro@duke.edu
+<br/>ching@umd.edu
+</td></tr><tr><td>9cadd166893f1b8aaecb27280a0915e6694441f5</td><td>Appl. Math. Inf. Sci. 7, No. 2, 455-462 (2013)
+<br/>455
+<br/>Applied Mathematics & Information Sciences
+<br/>An International Journal
+<br/>c⃝ 2013 NSP
+<br/>Natural Sciences Publishing Cor.
+<br/>Multi-Modal Emotion Recognition Fusing Video and
+<br/>Audio
+<br/><b>School of Computer Software, Tianjin University, 300072 Tianjin, China</b><br/><b>School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China</b><br/>Received: 7 Sep. 2012; Revised 15 Nov. 2012; Accepted 18 Nov. 2012
+<br/>Published online: 1 Mar. 2013
+</td><td>('29962190', 'Chao Xu', 'chao xu')<br/>('2531641', 'Pufeng Du', 'pufeng du')<br/>('38465490', 'Zhiyong Feng', 'zhiyong feng')<br/>('1889014', 'Zhaopeng Meng', 'zhaopeng meng')<br/>('2375971', 'Tianyi Cao', 'tianyi cao')<br/>('36675950', 'Caichao Dong', 'caichao dong')</td><td></td></tr><tr><td>02601d184d79742c7cd0c0ed80e846d95def052e</td><td>Graphical Representation for Heterogeneous
+<br/>Face Recognition
+</td><td>('2299758', 'Chunlei Peng', 'chunlei peng')<br/>('10699750', 'Xinbo Gao', 'xinbo gao')<br/>('2870173', 'Nannan Wang', 'nannan wang')<br/>('38158055', 'Jie Li', 'jie li')</td><td></td></tr><tr><td>02cc96ad997102b7c55e177ac876db3b91b4e72c</td><td>MuseumVisitors: a dataset for pedestrian and group detection, gaze estimation
+<br/>and behavior understanding
+</td><td>('36971654', 'Federico Bartoli', 'federico bartoli')<br/>('2973738', 'Giuseppe Lisanti', 'giuseppe lisanti')<br/>('2831602', 'Lorenzo Seidenari', 'lorenzo seidenari')<br/>('2602265', 'Svebor Karaman', 'svebor karaman')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td>1{firstname.lastname}@unifi.it, University of Florence
+<br/>2sk4089@columbia.edu, Columbia University
+</td></tr><tr><td>02e43d9ca736802d72824892c864e8cfde13718e</td><td>Transferring a Semantic Representation for Person Re-Identification and
+<br/>Search
+<br/>Shi, Z; Yang, Y; Hospedales, T; XIANG, T; IEEE Conference on Computer Vision and
+<br/>Pattern Recognition
+<br/>© 2015 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/><b>obtained for all other uses, in any current or future media, including reprinting/republishing</b><br/>this material for advertising or promotional purposes, creating new collective works, for resale
+<br/>or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+<br/>other works.
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/10075
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td><td></td><td>more information contact scholarlycommunications@qmul.ac.uk
+</td></tr><tr><td>02fda07735bdf84554c193811ba4267c24fe2e4a</td><td>Illumination Invariant Face Recognition
+<br/>Using Near-Infrared Images
+</td><td>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('1724841', 'Rufeng Chu', 'rufeng chu')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('39306651', 'Lun Zhang', 'lun zhang')</td><td></td></tr><tr><td>023ed32ac3ea6029f09b8c582efbe3866de7d00a</td><td>CENTER FOR
+<br/>MACHINE PERCEPTION
+<br/>Discriminative learning from
+<br/>partially annotated examples
+<br/>CZECH TECHNICAL
+<br/><b>UNIVERSITY IN PRAGUE</b><br/>Study Programme: Electrical Engineering and
+<br/>Information Technology
+<br/>Branch of Study: Artificial Intelligence and Biocybernetics
+<br/>CTU–CMP–2016–07
+<br/>June 14, 2016
+<br/>ftp://cmp.felk.cvut.cz/pub/cvl/articles/antoniuk/Antoniuk-TR-2016-07.pdf
+<br/>Available at
+<br/>Thesis Advisors: Ing. Vojtˇech Franc, Ph.D. ,
+<br/>prof. Ing. V´aclav Hlav´aˇc, CSc.
+<br/>Acknowledgements: SGS15/201/OHK3/3T/13, CAK/TE01020197,
+<br/>UP-Driving/688652, GACR/P103/12/G084.
+<br/><b>Research Reports of CMP, Czech Technical University in Prague, No</b><br/>Published by
+<br/>Center for Machine Perception, Department of Cybernetics
+<br/><b>Faculty of Electrical Engineering, Czech Technical University</b><br/>Technick´a 2, 166 27 Prague 6, Czech Republic
+<br/>fax +420 2 2435 7385, phone +420 2 2435 7637, www: http://cmp.felk.cvut.cz
+</td><td>('2742026', 'Kostiantyn Antoniuk', 'kostiantyn antoniuk')</td><td>antonkos@fel.cvut.cz
+</td></tr><tr><td>0241513eeb4320d7848364e9a7ef134a69cbfd55</td><td>Supervised Translation-Invariant Sparse
+<br/>Coding
+<br/><b>University of Illinois at Urbana Champaign</b><br/>²NEC Laboratories America at Cupertino
+</td><td>('1706007', 'Jianchao Yang', 'jianchao yang')<br/>('38701713', 'Kai Yu', 'kai yu')</td><td></td></tr><tr><td>02dd0af998c3473d85bdd1f77254ebd71e6158c6</td><td>PPP: Joint Pointwise and Pairwise Image Label Prediction
+<br/>1Department of Computer Science, Arizona State Univerity
+<br/>2Yahoo Research
+</td><td>('33513248', 'Yilin Wang', 'yilin wang')<br/>('1736632', 'Jiliang Tang', 'jiliang tang')</td><td>{yilinwang,suhang.wang,huan.liu,baoxin.li}@asu.edu
+<br/>jlt@yahoo-inc.com
+</td></tr><tr><td>0290523cabea481e3e147b84dcaab1ef7a914612</td><td>Generated Motion Maps
+<br/><b>Tokyo Denki University</b><br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b></td><td>('20505300', 'Yuta Matsuzaki', 'yuta matsuzaki')<br/>('34935749', 'Kazushige Okayasu', 'kazushige okayasu')<br/>('2462801', 'Akio Nakamura', 'akio nakamura')<br/>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')</td><td>matsuzaki.y, okayasu.k@is.dendai.ac.jp, nkmr-a@cck.dendai.ac.jp
+<br/>hirokatsu.kataoka@aist.go.jp
+</td></tr><tr><td>0229829e9a1eed5769a2b5eccddcaa7cd9460b92</td><td>Pooled Motion Features for First-Person Videos
+<br/><b>Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA</b><br/>Figure 1: Overall representation framework of our pooled time series (PoT). Given a sequence of per-frame feature descriptors (e.g., HOF or CNN
+<br/>features) from a video, PoT represents motion information in the video by computing short-term/long-term changes in each descriptor value.
+<br/>In this paper, we present a new feature representation for first-person videos.
+<br/>In first-person video understanding (e.g., activity recognition [4]), it is very
+<br/>important to capture both entire scene dynamics (i.e., egomotion) and salient
+<br/>local motion observed in videos. We describe a representation framework
+</td><td>('1904850', 'Brandon Rothrock', 'brandon rothrock')</td><td></td></tr><tr><td>025720574ef67672c44ba9e7065a83a5d6075c36</td><td>Unsupervised Learning of Video Representations using LSTMs
+<br/><b>University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA</b></td><td>('2897313', 'Nitish Srivastava', 'nitish srivastava')<br/>('2711409', 'Elman Mansimov', 'elman mansimov')<br/>('1776908', 'Ruslan Salakhutdinov', 'ruslan salakhutdinov')</td><td>NITISH@CS.TORONTO.EDU
+<br/>EMANSIM@CS.TORONTO.EDU
+<br/>RSALAKHU@CS.TORONTO.EDU
+</td></tr><tr><td>029317f260b3303c20dd58e8404a665c7c5e7339</td><td>1276
+<br/>Character Identification in Feature-Length Films
+<br/>Using Global Face-Name Matching
+<br/>and Yeh-Min Huang, Member, IEEE
+</td><td>('1688633', 'Changsheng Xu', 'changsheng xu')<br/>('1694235', 'Hanqing Lu', 'hanqing lu')</td><td></td></tr><tr><td>026e4ee480475e63ae68570d73388f8dfd4b4cde</td><td>Evaluating gender portrayal in Bangladeshi TV
+<br/>Department of CSE
+<br/><b>Eastern University</b><br/>Dhaka, Bangladesh
+<br/>Department of Women and Gender Studies
+<br/>Rawshan E Fatima
+<br/><b>Dhaka University</b><br/>Dhaka, Bangladesh
+<br/><b>Khulna University of Engineering and Technology</b><br/><b>Massachusetts Institute of Technology</b><br/>Department of EEE
+<br/>Khulna, Bangladesh
+<br/>Media Lab
+<br/>Cambridge, MA, USA
+</td><td>('34688479', 'Md. Naimul Hoque', 'md. naimul hoque')<br/>('40081015', 'Manash Kumar Mandal', 'manash kumar mandal')<br/>('1706468', 'Nazmus Saquib', 'nazmus saquib')</td><td>naimul.et@easternuni.edu.bd
+<br/>rawshan.e.fatima@gmail.com
+<br/>manashmndl@gmail.com
+<br/>saquib@mit.edu
+</td></tr><tr><td>02e628e99f9a1b295458cb453c09863ea1641b67</td><td>Two-stage Convolutional Part Heatmap
+<br/>Regression for the 1st 3D Face Alignment in the
+<br/>Wild (3DFAW) Challenge
+<br/><b>Computer Vision Laboratory, University of Nottingham, Nottingham, UK</b></td><td>('3458121', 'Adrian Bulat', 'adrian bulat')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>{adrian.bulat,yorgos.tzimiropoulos}@nottingham.ac.uk
+</td></tr><tr><td>0273414ba7d56ab9ff894959b9d46e4b2fef7fd0</td><td>Photographic home styles in Congress: a
+<br/>computer vision approach∗
+<br/>December 1, 2016
+</td><td>('40845190', 'L. Jason Anastasopoulos', 'l. jason anastasopoulos')<br/>('2007721', 'Dhruvil Badani', 'dhruvil badani')<br/>('2647307', 'Crystal Lee', 'crystal lee')<br/>('2361255', 'Shiry Ginosar', 'shiry ginosar')<br/>('40411568', 'Jake Williams', 'jake williams')</td><td></td></tr><tr><td>02e133aacde6d0977bca01ffe971c79097097b7f</td><td></td><td></td><td></td></tr><tr><td>02567fd428a675ca91a0c6786f47f3e35881bcbd</td><td>ACCEPTED BY IEEE TIP
+<br/>Deep Label Distribution Learning
+<br/>With Label Ambiguity
+</td><td>('2226422', 'Bin-Bin Gao', 'bin-bin gao')<br/>('1694501', 'Chao Xing', 'chao xing')<br/>('3407628', 'Chen-Wei Xie', 'chen-wei xie')<br/>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('1735299', 'Xin Geng', 'xin geng')</td><td></td></tr><tr><td>02f4b900deabbe7efa474f2815dc122a4ddb5b76</td><td>Local and Global Optimization Techniques in Graph-based Clustering
+<br/><b>The University of Tokyo, Japan</b></td><td>('11682769', 'Daiki Ikami', 'daiki ikami')<br/>('2759239', 'Toshihiko Yamasaki', 'toshihiko yamasaki')<br/>('1712839', 'Kiyoharu Aizawa', 'kiyoharu aizawa')</td><td>{ikami, yamasaki, aizawa}@hal.t.u-tokyo.ac.jp
+</td></tr><tr><td>029b53f32079063047097fa59cfc788b2b550c4b</td><td></td><td></td><td></td></tr><tr><td>02bd665196bd50c4ecf05d6852a4b9ba027cd9d0</td><td></td><td></td><td></td></tr><tr><td>026b5b8062e5a8d86c541cfa976f8eee97b30ab8</td><td>MDLFace: Memorability Augmented Deep Learning for Video Face Recognition
+<br/>IIIT-Delhi, India
+</td><td>('1931069', 'Gaurav Goswami', 'gaurav goswami')<br/>('1875774', 'Romil Bhardwaj', 'romil bhardwaj')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td>{gauravgs,romil11092,rsingh,mayank}@iiitd.ac.in
+</td></tr><tr><td>0235b2d2ae306b7755483ac4f564044f46387648</td><td>Recognition of Facial Attributes
+<br/>using Adaptive Sparse Representations
+<br/>of Random Patches
+<br/>1 Department of Computer Science
+<br/>Pontificia Universidad Cat´olica de Chile
+<br/>http://dmery.ing.puc.cl
+<br/>2 Department of Computer Science & Engineering
+<br/><b>University of Notre Dame</b><br/>http://www.nd.edu/~kwb
+</td><td>('1797475', 'Domingo Mery', 'domingo mery')</td><td></td></tr><tr><td>02467703b6e087799e04e321bea3a4c354c5487d</td><td>To appear in the CVPR Workshop on Biometrics, June 2016
+<br/>Grouper: Optimizing Crowdsourced Face Annotations∗
+<br/>Noblis
+<br/>Noblis
+<br/>Noblis
+<br/>Noblis
+<br/><b>Michigan State University</b></td><td>('9453012', 'Jocelyn C. Adams', 'jocelyn c. adams')<br/>('7996649', 'Kristen C. Allen', 'kristen c. allen')<br/>('15282121', 'Tim Miller', 'tim miller')<br/>('1718102', 'Nathan D. Kalka', 'nathan d. kalka')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>jocelyn.adams@noblis.org
+<br/>kristen.allen@noblis.org
+<br/>timothy.miller@noblis.org
+<br/>nathan.kalka@noblis.org
+<br/>jain@cse.msu.edu
+</td></tr><tr><td>02e39f23e08c2cb24d188bf0ca34141f3cc72d47</td><td>REMOVING ILLUMINATION ARTIFACTS FROM FACE IMAGES USING THE NUISANCE
+<br/>ATTRIBUTE PROJECTION
+<br/>Vitomir ˇStruc, Boˇstjan Vesnicer, France Miheliˇc, Nikola Paveˇsi´c
+<br/><b>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia</b></td><td></td><td></td></tr><tr><td>023be757b1769ecb0db810c95c010310d7daf00b</td><td>YANG, MOU, ZHANG ET AL.: FACE ALIGNMENT ASSISTED BY HEAD POSE ESTIMATION1
+<br/>Face Alignment Assisted by Head Pose
+<br/>Estimation
+<br/>1 Computer Laboratory
+<br/><b>University of Cambridge</b><br/>Cambridge, UK
+<br/>2 School of EECS
+<br/><b>Queen Mary University of London</b><br/>London, UK
+<br/>3 Faculty of Arts & Sciences
+<br/><b>Harvard University</b><br/>Cambridge, MA, US
+</td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('2734386', 'Wenxuan Mou', 'wenxuan mou')<br/>('40491398', 'Yichi Zhang', 'yichi zhang')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')<br/>('1781916', 'Hatice Gunes', 'hatice gunes')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td>heng.yang@cl.cam.ac.uk
+<br/>w.mou@qmul.ac.uk
+<br/>yichizhang@fas.harvard.edu
+<br/>i.patras@qmul.ac.uk
+<br/>h.gunes@qmul.ac.uk
+<br/>peter.robinson@cl.cam.ac.uk
+</td></tr><tr><td>0278acdc8632f463232e961563e177aa8c6d6833</td><td>Selective Transfer Machine for Personalized
+<br/>Facial Expression Analysis
+<br/>1 INTRODUCTION
+<br/>Index Terms—Facial expression analysis, personalization, domain adaptation, transfer learning, support vector machine (SVM)
+<br/>A UTOMATIC facial AU detection confronts a number of
+</td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('3141839', 'Fernando De la Torre', 'fernando de la torre')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>0209389b8369aaa2a08830ac3b2036d4901ba1f1</td><td>DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild
+<br/>Rıza Alp G¨uler 1
+<br/>1INRIA-CentraleSup´elec, France
+<br/><b>Imperial College London, UK</b><br/><b>University College London, UK</b></td><td>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2796644', 'Patrick Snape', 'patrick snape')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('2010660', 'Iasonas Kokkinos', 'iasonas kokkinos')</td><td>1riza.guler@inria.fr
+<br/>2{g.trigeorgis, e.antonakos, p.snape,s.zafeiriou}@imperial.ac.uk
+<br/>3i.kokkinos@cs.ucl.ac.uk
+</td></tr><tr><td>02c993d361dddba9737d79e7251feca026288c9c</td><td></td><td></td><td></td></tr><tr><td>02239ae5e922075a354169f75f684cad8fdfd5ab</td><td>Commonly Uncommon:
+<br/>Semantic Sparsity in Situation Recognition
+<br/><b>Computer Science and Engineering, University of Washington, Seattle, WA</b><br/><b>Allen Institute for Arti cial Intelligence (AI2), Seattle, WA</b><br/><b>University of Virginia, Charlottesville, VA</b></td><td>('2064210', 'Mark Yatskar', 'mark yatskar')<br/>('2004053', 'Vicente Ordonez', 'vicente ordonez')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td>[my89, lsz, ali]@cs.washington.edu, vicente@cs.virginia.edu
+</td></tr><tr><td>02d650d8a3a9daaba523433fbe93705df0a7f4b1</td><td>How Does Aging Affect Facial Components?
+<br/><b>Michigan State University</b></td><td>('40653304', 'Charles Otto', 'charles otto')<br/>('34393045', 'Hu Han', 'hu han')</td><td>{ottochar,hhan,jain}@cse.msu.edu
+</td></tr><tr><td>0294f992f8dfd8748703f953925f9aee14e1b2a2</td><td>Blur-Robust Face Recognition via
+<br/>Transformation Learning
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>('40448827', 'Jun Li', 'jun li')<br/>('1690083', 'Chi Zhang', 'chi zhang')<br/>('23224233', 'Jiani Hu', 'jiani hu')<br/>('1774956', 'Weihong Deng', 'weihong deng')</td><td></td></tr><tr><td>02820c1491b10a1ff486fed32c269e4077c36551</td><td>Active User Authentication for Smartphones: A Challenge
+<br/>Data Set and Benchmark Results
+<br/>1Department of Electrical and Computer Engineering and the Center for Automation Research,
+<br/><b>UMIACS, University of Maryland, College Park, MD</b><br/><b>Rutgers, The State University of New Jersey, 508 CoRE, 94 Brett Rd, Piscataway, NJ</b></td><td>('3152615', 'Upal Mahbub', 'upal mahbub')<br/>('40599829', 'Sayantan Sarkar', 'sayantan sarkar')</td><td>{umahbub, ssarkar2, rama}@umiacs.umd.edu
+<br/>vishal.m.patel@rutgers.edu∗
+</td></tr><tr><td>a40edf6eb979d1ddfe5894fac7f2cf199519669f</td><td>Improving Facial Attribute Prediction using Semantic Segmentation
+<br/>Center for Research in Computer Vision
+<br/><b>University of Central Florida</b></td><td>('3222250', 'Mahdi M. Kalayeh', 'mahdi m. kalayeh')<br/>('40206014', 'Boqing Gong', 'boqing gong')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>Mahdi@eecs.ucf.edu
+<br/>bgong@crcv.ucf.edu
+<br/>shah@crcv.ucf.edu
+</td></tr><tr><td>a46283e90bcdc0ee35c680411942c90df130f448</td><td></td><td></td><td></td></tr><tr><td>a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2</td><td>Yudistira and Kurita EURASIP Journal on Image and Video
+<br/>Processing (2017) 2017:85
+<br/>DOI 10.1186/s13640-017-0235-9
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Gated spatio and temporal convolutional
+<br/>neural network for activity recognition:
+<br/>towards gated multimodal deep learning
+</td><td>('2035597', 'Novanto Yudistira', 'novanto yudistira')<br/>('1742728', 'Takio Kurita', 'takio kurita')</td><td></td></tr><tr><td>a4876b7493d8110d4be720942a0f98c2d116d2a0</td><td>Multi-velocity neural networks for gesture recognition in videos
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA
+</td><td>('37381309', 'Otkrist Gupta', 'otkrist gupta')<br/>('2283049', 'Dan Raviv', 'dan raviv')<br/>('1717566', 'Ramesh Raskar', 'ramesh raskar')</td><td>otkrist@mit.edu
+<br/>raviv@mit.edu
+<br/>raskar@media.mit.edu
+</td></tr><tr><td>a40f8881a36bc01f3ae356b3e57eac84e989eef0</td><td>End-to-end semantic face segmentation with conditional
+<br/>random fields as convolutional, recurrent and adversarial
+<br/>networks
+</td><td>('3038211', 'Umut Güçlü', 'umut güçlü')<br/>('1920611', 'Meysam Madadi', 'meysam madadi')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1857280', 'Xavier Baró', 'xavier baró')<br/>('38485168', 'Rob van Lier', 'rob van lier')<br/>('2052286', 'Marcel van Gerven', 'marcel van gerven')</td><td></td></tr><tr><td>a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3</td><td>E
+<br/>Feature Transfer
+<br/>MSc Thesis
+<br/>written by
+<br/>degree of
+<br/>Master of Science
+<br/><b>at the Delft University of Technology</b><br/>Date of the public defense: Members of the Thesis Committee:
+<br/>August 31, 2017
+<br/>Prof. Marcel Reinders
+<br/>Dr. Julian Urbano Merino
+<br/>Dr. Gonzalez Adrlana (Bosch)
+</td><td>('1694101', 'Yue Liu', 'yue liu')<br/>('37806314', 'Silvia-Laura Pintea', 'silvia-laura pintea')<br/>('30445013', 'Jan van Gemert', 'jan van gemert')<br/>('2372050', 'Ildiko Suveg', 'ildiko suveg')<br/>('30445013', 'Jan van Gemert', 'jan van gemert')<br/>('37806314', 'Silvia-Laura Pintea', 'silvia-laura pintea')<br/>('2372050', 'Ildiko Suveg', 'ildiko suveg')</td><td></td></tr><tr><td>a46086e210c98dcb6cb9a211286ef906c580f4e8</td><td>Fusing Multi-Stream Deep Networks for Video Classification
+<br/><b>Fudan University, Shanghai, China</b><br/>Alibaba Group, Seattle, USA
+</td><td>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('31825486', 'Xi Wang', 'xi wang')<br/>('1743864', 'Hao Ye', 'hao ye')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('1715001', 'Jun Wang', 'jun wang')</td><td>zxwu, ygj, xwang10, haoye10, xyxue@fudan.edu.cn
+<br/>wongjun@gmail.com
+</td></tr><tr><td>a44590528b18059b00d24ece4670668e86378a79</td><td>Learning the Hierarchical Parts of Objects by Deep
+<br/>Non-Smooth Nonnegative Matrix Factorization
+</td><td>('19275690', 'Jinshi Yu', 'jinshi yu')<br/>('1764724', 'Guoxu Zhou', 'guoxu zhou')<br/>('1747156', 'Andrzej Cichocki', 'andrzej cichocki')<br/>('1795838', 'Shengli Xie', 'shengli xie')</td><td></td></tr><tr><td>a472d59cff9d822f15f326a874e666be09b70cfd</td><td>VISUAL LEARNING WITH WEAKLY LABELED VIDEO
+<br/>A DISSERTATION
+<br/>SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+<br/>AND THE COMMITTEE ON GRADUATE STUDIES
+<br/><b>OF STANFORD UNIVERSITY</b><br/>IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+<br/>FOR THE DEGREE OF
+<br/>DOCTOR OF PHILOSOPHY
+<br/>May 2015
+</td><td>('3355264', 'Kevin Tang', 'kevin tang')</td><td></td></tr><tr><td>a4c430b7d849a8f23713dc283794d8c1782198b2</td><td>Video Concept Embedding
+<br/>1. Introduction
+<br/>In the area of natural language processing, there has been
+<br/>much success in learning distributed representations for
+<br/>words as vectors. Doing so has an advantage over using
+<br/>simple labels, or a one-hot coding scheme for representing
+<br/>individual words. In learning distributed vector representa-
+<br/>tions for words, we manage to capture semantic relatedness
+<br/>of words in vector distance. For example, the word vector
+<br/>for ”car” and ”road” should end up being closer together in
+<br/>the vector space representation than ”car” and ”penguin”.
+<br/>This has been very useful in NLP areas of machine transla-
+<br/>tion and semantic understanding.
+<br/>In the computer vision domain, video understanding is a
+<br/>very important topic.
+<br/>It is made hard due to the large
+<br/>amount of high dimensional data in videos. One strategy
+<br/>to address this is to summarize a video into concepts (eg.
+<br/>running, climbing, cooking). This allows us to represent a
+<br/>video in a very natural way to humans, such as a sequence
+<br/>of semantic events. However this has the same shortcom-
+<br/>ings that one-hot coding of words have.
+<br/>The goal of this project is to find a meaningful way to em-
+<br/>bed video concepts into a vector space. The hope would
+<br/>be to capture semantic relatedness of concepts in a vector
+<br/>representation, essentially doing for videos what word2vec
+<br/>did for text. Having a vector representation for video con-
+<br/>cepts would help in areas such as semantic video retrieval
+<br/>and video classification, as it would provide a statistically
+<br/>meaningful and robust way of representing videos as lower
+<br/>dimensional vectors. An interesting thing would be to ob-
+<br/>serve if such a vector representation would result in ana-
+<br/>logical reasoning using simple vector arithmetic.
+<br/>Figure 1 shows an example of concepts detected at differ-
+<br/>ent snapshots in the same video. For example, consider
+<br/>the scenario where the concepts Kicking a ball, Soccer and
+<br/>Running are detected in the three snapshots respectively
+<br/>(from left to right). Since, these snapshots belong in the
+<br/>same video, we expect that these concepts are semantically
+<br/>similar and that they should lie close in the resulting em-
+<br/>bedding space. The aim of this project is to find a vector
+<br/>space embedding for the space of concepts such that vector
+<br/>representations for semantically similar concepts (in this
+<br/>Figure 1. Example snapshots from the same video
+<br/>case, Running, Kicking and Soccer) lie in the vicinity of
+<br/>each other.
+<br/>2. Related Work
+<br/>(Mikolov et al., 2013a) introduces the popular skip-gram
+<br/>model to learn distributed representations of words from
+<br/>very large linguistic datasets. Specifically, it uses each
+<br/>word as an input to a log-linear classifier and predict words
+<br/>within a certain range before and after the current word in
+<br/>the dataset.
+<br/>(Mikolov et al., 2013b) extends this model
+<br/>to learn representations for phrases, in addition to words,
+<br/>and also improve the quality of vectors and training speed.
+<br/>These works also show that the skip-gram model exhibits
+<br/>a linear structure that enables it to perform reasoning using
+<br/>basic vector arithmetic. The skip-gram model from these
+<br/>works is the basis of our model in learning representations
+<br/>for concepts.
+<br/>(Le & Mikolov, 2014) extends the concept of word vectors
+<br/>to sentences and paragraphs. Their approach is more in-
+<br/>volved than a simple bag of words approach, in that it tries
+<br/>to capture the nature of the words in the paragraph. They
+<br/>construct the paragraph vector in such a way that it can be
+<br/>used to predict the word vectors that are contained inside
+<br/>the paragraph. They do this by first learning word vectors,
+<br/>such that the probability of a word vector given its context
+<br/>is maximized. To learn paragraph vectors, the paragraph
+<br/>is essentially treated as a word, and the words it contains
+<br/>become the context. This provides a key insight in how
+<br/>a set of concept vectors can be used together to provide a
+<br/>more meaningful vector representation for videos, which
+<br/>can then be used for retrieval.
+<br/>(Hu et al.) utilizes structured knowledge in the data to learn
+<br/>distributed representations that improve semantic related-
+</td><td>('2387189', 'Anirudh Vemula', 'anirudh vemula')<br/>('32203964', 'Rahul Nallamothu', 'rahul nallamothu')<br/>('9619757', 'Syed Zahir Bokhari', 'syed zahir bokhari')</td><td>AVEMULA1@ANDREW.CMU.EDU
+<br/>RNALLAMO@ANDREW.CMU.EDU
+<br/>SBOKHARI@ANDREW.CMU.EDU
+</td></tr><tr><td>a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2</td><td></td><td></td><td></td></tr><tr><td>a4f37cfdde3af723336205b361aefc9eca688f5c</td><td>Recent Advances
+<br/>in Face Recognition
+</td><td></td><td></td></tr><tr><td>a481e394f58f2d6e998aa320dad35c0d0e15d43c</td><td>Selectively Guiding Visual Concept Discovery
+<br/><b>Colorado State University</b><br/>Fort Collins, Colorado
+</td><td>('2857477', 'Maggie Wigness', 'maggie wigness')<br/>('1694404', 'Bruce A. Draper', 'bruce a. draper')<br/>('1757322', 'J. Ross Beveridge', 'j. ross beveridge')</td><td>mwigness,draper,ross@cs.colostate.edu
+</td></tr><tr><td>a30869c5d4052ed1da8675128651e17f97b87918</td><td>Fine-Grained Comparisons with Attributes
+</td><td>('2206630', 'Aron Yu', 'aron yu')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b</td><td>First International Workshop on Adaptive Shot Learning
+<br/>for Gesture Understanding and Production
+<br/>ASL4GUP 2017
+<br/>Held in conjunction with IEEE FG 2017, in May 30, 2017,
+<br/>Washington DC, USA
+</td><td></td><td></td></tr><tr><td>a3d8b5622c4b9af1f753aade57e4774730787a00</td><td>Pose-Aware Person Recognition
+<br/>Anoop Namboodiri (cid:63)
+<br/>(cid:63) CVIT, IIIT Hyderabad, India
+<br/>† Facebook AI Research
+</td><td>('37956314', 'Vijay Kumar', 'vijay kumar')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')<br/>('1694502', 'C. V. Jawahar', 'c. v. jawahar')</td><td></td></tr><tr><td>a322479a6851f57a3d74d017a9cb6d71395ed806</td><td>Towards Pose Invariant Face Recognition in the Wild
+<br/><b>National University of Singapore</b><br/><b>National University of Defense Technology</b><br/><b>Nanyang Technological University</b><br/>4Panasonic R&D Center Singapore
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/><b>Qihoo 360 AI Institute</b></td><td>('2668358', 'Sugiri Pranata', 'sugiri pranata')<br/>('3493398', 'Shengmei Shen', 'shengmei shen')<br/>('1757173', 'Junliang Xing', 'junliang xing')<br/>('46509407', 'Jian Zhao', 'jian zhao')<br/>('5524736', 'Yu Cheng', 'yu cheng')<br/>('33419682', 'Lin Xiong', 'lin xiong')<br/>('2757639', 'Jianshu Li', 'jianshu li')<br/>('40345914', 'Fang Zhao', 'fang zhao')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td></td></tr><tr><td>a3017bb14a507abcf8446b56243cfddd6cdb542b</td><td>Face Localization and Recognition in Varied
+<br/>Expressions and Illumination
+<br/>Hui-Yu Huang, Shih-Hang Hsu
+<br/>
+</td><td></td><td></td></tr><tr><td>a3c8c7da177cd08978b2ad613c1d5cb89e0de741</td><td>A Spatio-temporal Approach for Multiple
+<br/>Object Detection in Videos Using Graphs
+<br/>and Probability Maps
+<br/><b>University of S ao Paulo, S ao Paulo, Brazil</b><br/>2 Institut Mines T´el´ecom, T´el´ecom ParisTech, CNRS LTCI, Paris, France
+</td><td>('1863046', 'Henrique Morimitsu', 'henrique morimitsu')<br/>('1695917', 'Isabelle Bloch', 'isabelle bloch')</td><td>henriquem87@gmail.com
+</td></tr><tr><td>a378fc39128107815a9a68b0b07cffaa1ed32d1f</td><td>Determining a Suitable Metric When using Non-negative Matrix Factorization∗
+<br/>Computer Vision Center, Dept. Inform`atica
+<br/>Universitat Aut`onoma de Barcelona
+<br/>08193 Bellaterra, Barcelona, Spain
+</td><td>('1761407', 'David Guillamet', 'david guillamet')</td><td>{davidg,jordi}@cvc.uab.es
+</td></tr><tr><td>a34d75da87525d1192bda240b7675349ee85c123</td><td>Naive-Deep Face Recognition: Touching the Limit of LFW Benchmark or Not?
+<br/>Face++, Megvii Inc.
+<br/>Face++, Megvii Inc.
+<br/>Face++, Megvii Inc.
+</td><td>('1848243', 'Erjin Zhou', 'erjin zhou')<br/>('2695115', 'Zhimin Cao', 'zhimin cao')<br/>('2274228', 'Qi Yin', 'qi yin')</td><td>zej@megvii.com
+<br/>czm@megvii.com
+<br/>yq@megvii.com
+</td></tr><tr><td>a301ddc419cbd900b301a95b1d9e4bb770afc6a3</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>DECK: Discovering Event Composition Knowledge from
+<br/>Web Images for Zero-Shot Event Detection and Recounting in Videos
+<br/><b>University of Southern California</b><br/><b>IIIS, Tsinghua University</b><br/>‡ Google Research
+</td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('1726241', 'Chen Sun', 'chen sun')</td><td></td></tr><tr><td>a3dc109b1dff3846f5a2cc1fe2448230a76ad83f</td><td>J.Savitha et al, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.4, April- 2015, pg. 722-731
+<br/>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/>A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/> IJCSMC, Vol. 4, Issue. 4, April 2015, pg.722 – 731
+<br/> RESEARCH ARTICLE
+<br/>ACTIVE APPEARANCE MODEL AND PCA
+<br/>BASED FACE RECOGNITION SYSTEM
+<br/>Mrs. J.Savitha M.Sc., M.Phil.
+<br/><b>Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India</b><br/>Dr. A.V.Senthil Kumar
+<br/><b>Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India</b></td><td></td><td>Email: savitha.sanjay1@gmail.com
+<br/>Email: avsenthilkumar@gmail.com
+</td></tr><tr><td>a3f69a073dcfb6da8038607a9f14eb28b5dab2db</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1184
+</td><td></td><td></td></tr><tr><td>a38045ed82d6800cbc7a4feb498e694740568258</td><td>UNLV Theses, Dissertations, Professional Papers, and Capstones
+<br/>5-2010
+<br/>African American and Caucasian males' evaluation
+<br/>of racialized female facial averages
+<br/>Rhea M. Watson
+<br/><b>University of Nevada Las Vegas</b><br/>Follow this and additional works at: http://digitalscholarship.unlv.edu/thesesdissertations
+<br/>Part of the Cognition and Perception Commons, Race and Ethnicity Commons, and the Social
+<br/>Psychology Commons
+<br/>Repository Citation
+<br/>Watson, Rhea M., "African American and Caucasian males' evaluation of racialized female facial averages" (2010). UNLV Theses,
+<br/>Dissertations, Professional Papers, and Capstones. 366.
+<br/>http://digitalscholarship.unlv.edu/thesesdissertations/366
+</td><td></td><td>This Thesis is brought to you for free and open access by Digital Scholarship@UNLV. It has been accepted for inclusion in UNLV Theses, Dissertations,
+<br/>Professional Papers, and Capstones by an authorized administrator of Digital Scholarship@UNLV. For more information, please contact
+<br/>digitalscholarship@unlv.edu.
+</td></tr><tr><td>a3f684930c5c45fcb56a2b407d26b63879120cbf</td><td>LPM for Fast Action Recognition with Large Number of Classes
+<br/>School of Electrical Engineering and Computer Scinece
+<br/><b>University of Ottawa, Ottawa, On, Canada</b><br/>Department of Electronics and Information Engineering
+<br/><b>Hua Zhong University of Science and Technology, Wuhan, China</b><br/>1. Introduction
+<br/>In this paper, we provide an overview of the Local Part
+<br/>Model system for the THUMOS 2013: Action Recognition
+<br/>with a Large Number of Classes1 evaluations. Our system
+<br/>uses a combination of fast random sampling feature extrac-
+<br/>tion and local part model feature representation.
+<br/>Over the last decade, the advances in the area of com-
+<br/>puter vision and pattern recognition have fuelled a large
+<br/>amount of research with great progress in human action
+<br/>recognition. Much of the early progress [1, 5, 14] has been
+<br/>reported on atomic actions with several categories based
+<br/>on staged videos captured under controlled settings, such
+<br/>as KTH [14] and Weizmann [1]. More recently, there are
+<br/>emerging interests for sophisticated algorithms in recogniz-
+<br/>ing actions from realistic video. Such interests involve two
+<br/>prospects: 1) In comparison to image classification evalu-
+<br/>ating millions of images with over one thousand categories,
+<br/>action recognition is still at its initial stage. It is important
+<br/>to develop reliable, automatic methods which scale to large
+<br/>numbers of action categories captured in realistic settings.
+<br/>2) With over 100 hours of videos are uploaded to YouTube
+<br/>every minute2, and millions of surveillance cameras all over
+<br/>the world, the need for efficient recognition of the visual
+<br/>events in the video is crucial for real world applications.
+<br/>Recent studies [5, 10, 11, 21] have shown that lo-
+<br/>cal spatio-temporal features can achieve remarkable per-
+<br/>formance when represented by popular bag-of-features
+<br/>method. A recent trend is the use of dense sampled points
+<br/>[16, 21] and trajectories [7, 19] to improve the perfor-
+<br/>mance. Local Part Model [15] achieved state-of-the-art per-
+<br/>formance on real-life datasets with high efficiency when
+<br/>combined with random sampling over high density sam-
+<br/>1http://crcv.ucf.edu/ICCV13-Action-Workshop/index.html
+<br/>2http://www.youtube.com/yt/press/statistics.html
+<br/>pling grids.
+<br/>In this paper, we focus on recognize human
+<br/>action “in the wild” with large number of classes. More
+<br/>specifically, we aim to improve the state-of-the-art Local
+<br/>Part Model method on large scale real-life action datasets.
+<br/>The paper is organized as follows: The next section re-
+<br/>views the LPM algorithm. Section 3 introduces four differ-
+<br/>ent descriptors we will use. In section 4, we present some
+<br/>experimental results and analysis. The paper is completed
+<br/>with a brief conclusion. The code for computing random
+<br/>sampling with Local Part Model is available on-line3.
+<br/>2. LPM algorithm
+<br/>Inspired by the multiscale, deformable part model [6]
+<br/>for object classification, we proposed a 3D multiscale part
+<br/>model in [16]. However, instead of adopting deformable
+<br/>“parts”, we used “parts” with fixed size and location on the
+<br/>purpose of maintaining both structural information and lo-
+<br/>cal events ordering for action recognition. As shown in Fig-
+<br/>ure 1, the local part model includes both a coarse primi-
+<br/>tive level root feature covering event-content statistics and
+<br/>higher resolution overlapping part filters incorporating lo-
+<br/>cal structural and temporal relations.
+<br/>More recently, we [15] applied random sampling method
+<br/>with local part model over a very dense sampling grid
+<br/>and achieved state-of-the-art performance on realistic large
+<br/>scale datasets with potential for real-time recognition. Un-
+<br/>der the local part model, a feature consists of a coarse global
+<br/>root filter and several fine overlapped part filters. The root
+<br/>filter is extracted on the video at half the resolution. This
+<br/>way, a high density grid can be defined with far less sam-
+<br/>ples. For every coarse root filter, a group of fine part filters
+<br/>are computed at full video resolution and at locations rela-
+<br/>tive to their root filter reference position. These part filters
+<br/>3https://github.com/fshi/actionMBH
+</td><td>('36925389', 'Feng Shi', 'feng shi')<br/>('1745632', 'Emil Petriu', 'emil petriu')</td><td>fshi98@gmail.com, {laganier, petriu}@site.uottawa.ca
+<br/>zhenhaiyu@mail.hust.edu.cn
+</td></tr><tr><td>a3f78cc944ac189632f25925ba807a0e0678c4d5</td><td>Action Recognition in Realistic Sports Videos
+</td><td>('1799979', 'Khurram Soomro', 'khurram soomro')<br/>('40029556', 'Amir Roshan Zamir', 'amir roshan zamir')</td><td></td></tr><tr><td>a33f20773b46283ea72412f9b4473a8f8ad751ae</td><td></td><td></td><td></td></tr><tr><td>a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7</td><td>Multiple Local Curvature Gabor Binary
+<br/>Patterns for Facial Action Recognition
+<br/>Signal Processing Laboratory (LTS5),
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td><td>('2383305', 'Nuri Murat Arar', 'nuri murat arar')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td>{anil.yuce,murat.arar,jean-philippe.thiran}@epfl.ch
+</td></tr><tr><td>a32c5138c6a0b3d3aff69bcab1015d8b043c91fb</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/19/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Videoredaction:asurveyandcomparisonofenablingtechnologiesShaganSahAmeyaShringiRaymondPtuchaAaronBurryRobertLoceShaganSah,AmeyaShringi,RaymondPtucha,AaronBurry,RobertLoce,“Videoredaction:asurveyandcomparisonofenablingtechnologies,”J.Electron.Imaging26(5),051406(2017),doi:10.1117/1.JEI.26.5.051406. </td><td></td><td></td></tr><tr><td>a32d4195f7752a715469ad99cb1e6ebc1a099de6</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 749096, 10 pages
+<br/>http://dx.doi.org/10.1155/2014/749096
+<br/>Research Article
+<br/>The Potential of Using Brain Images for Authentication
+<br/><b>College of Mechatronic Engineering and Automation, National University of Defense Technology</b><br/>Changsha, Hunan 410073, China
+<br/>Received 6 May 2014; Accepted 19 June 2014; Published 10 July 2014
+<br/>Academic Editor: Wangmeng Zuo
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Biometric recognition (also known as biometrics) refers to the automated recognition of individuals based on their biological or
+<br/>behavioral traits. Examples of biometric traits include fingerprint, palmprint, iris, and face. The brain is the most important and
+<br/>complex organ in the human body. Can it be used as a biometric trait? In this study, we analyze the uniqueness of the brain and
+<br/>try to use the brain for identity authentication. The proposed brain-based verification system operates in two stages: gray matter
+<br/>extraction and gray matter matching. A modified brain segmentation algorithm is implemented for extracting gray matter from
+<br/>an input brain image. Then, an alignment-based matching algorithm is developed for brain matching. Experimental results on two
+<br/>data sets show that the proposed brain recognition system meets the high accuracy requirement of identity authentication. Though
+<br/>currently the acquisition of the brain is still time consuming and expensive, brain images are highly unique and have the potential
+<br/>possibility for authentication in view of pattern recognition.
+<br/>1. Introduction
+<br/>Identity authentication is an important task for different
+<br/>applications including access control, ATM card verification,
+<br/>and forensic affairs. Compared with conventional methods
+<br/>(e.g., key, ID card, and password), biometric recognition
+<br/>is more resistant to social engineering attacks (e.g., theft).
+<br/>Biometric recognition is also intrinsically superior that makes
+<br/>it unforgettable. During the past few decades, biometric tech-
+<br/>nologies have shown more and more importance in various
+<br/>applications [1, 2]. Among them, recognition technologies
+<br/>based on fingerprint [3, 4], palmprint [5, 6], iris [7, 8], and
+<br/>face [9, 10] are the most popular.
+<br/>The brain is the center of the nervous system and the most
+<br/>important and complex organ in the human body. Though
+<br/>different brains may be alike in the way they act and have
+<br/>similar traits, scientists have confirmed that no two brains are
+<br/>or will ever be the same [11]. Both genes (what we inherit)
+<br/>and experience (what we learn) could allow individual brains
+<br/>to develop in distinctly different ways. Recent studies show
+<br/>that the so-called jumping genes, which ensure that identical
+<br/>twins are different, may also influence the brains [12]. All
+<br/>these studies show that the human brain is a work of genius in
+<br/>its design and capabilities, and it is unique. Though brain gray
+<br/>matter will change with age or disease, it shows steadiness in
+<br/>adulthood [13, 14]. The question we are interested in this study
+<br/>is as follows: can we use the brain for identity authentication?
+<br/>This paper analyzes the uniqueness of human brain
+<br/>and proposes to use the brain for personal identification
+<br/>(authentication). Compared with other biometric techniques,
+<br/>brain recognition is more resistant to forgery (e.g., fake
+<br/>fingerprints [15]) and spoofing (e.g., face disguise [16]). Brain
+<br/>recognition is also more reliable to identify the escapee
+<br/>since one’s brain can hardly be modified, whereas other
+<br/>biologic traits may be altered, such as altered fingerprints [17].
+<br/>Palaniappan and Mandic [18] established a Visual Evoked
+<br/>Potential- (VEP-) based biometrics, and simulations have
+<br/>indicated the significant potential of brain electrical activity
+<br/>as a biometric tool. However, VEP is not robust to the
+<br/>activity of brain. Aloui et al. [19] extracted characteristics of
+<br/>brain images and used them in an application as a biometric
+<br/>tool to identify individuals. Their method just uses a single
+<br/>slice of the brain and thus suffers from the influence of
+<br/>noise. Another drawback of this method is that it only uses
+</td><td>('40326124', 'Fanglin Chen', 'fanglin chen')<br/>('8526311', 'Zongtan Zhou', 'zongtan zhou')<br/>('1730001', 'Hui Shen', 'hui shen')<br/>('2517668', 'Dewen Hu', 'dewen hu')<br/>('40326124', 'Fanglin Chen', 'fanglin chen')</td><td>Correspondence should be addressed to Dewen Hu; dwhu@nudt.edu.cn
+</td></tr><tr><td>a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9</td><td>Recognizing Violence in Movies
+<br/>CIS400/401 Project Final Report
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Ben Sapp
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+</td><td>('1908780', 'Lei Kang', 'lei kang')<br/>('1685978', 'Ben Taskar', 'ben taskar')</td><td>kanglei@seas.upenn.edu
+<br/>mjiawei@seas.upenn.edu
+<br/>bensapp@cis.upenn.edu
+<br/>taskar@cis.upenn.edu
+</td></tr><tr><td>a3eab933e1b3db1a7377a119573ff38e780ea6a3</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>838
+<br/>ICASSP 2010
+</td><td></td><td></td></tr><tr><td>a308077e98a611a977e1e85b5a6073f1a9bae6f0</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 810368, 15 pages
+<br/>http://dx.doi.org/10.1155/2014/810368
+<br/>Review Article
+<br/>Intelligent Screening Systems for Cervical Cancer
+<br/><b>Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia</b><br/>Received 24 December 2013; Accepted 11 February 2014; Published 11 May 2014
+<br/>Academic Editors: S. Balochian, V. Bhatnagar, and Y. Zhang
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Advent of medical image digitalization leads to image processing and computer-aided diagnosis systems in numerous clinical
+<br/>applications. These technologies could be used to automatically diagnose patient or serve as second opinion to pathologists. This
+<br/>paper briefly reviews cervical screening techniques, advantages, and disadvantages. The digital data of the screening techniques
+<br/>are used as data for the computer screening system as replaced in the expert analysis. Four stages of the computer system are
+<br/>enhancement, features extraction, feature selection, and classification reviewed in detail. The computer system based on cytology
+<br/>data and electromagnetic spectra data achieved better accuracy than other data.
+<br/>1. Introduction
+<br/>Cervical cancer is a leading cause of mortality and morbidity,
+<br/>which comprises approximately 12% of all cancers in women
+<br/>worldwide according to World Health Organization (WHO).
+<br/>In fact, the annual global statistics of WHO estimated 470
+<br/>600 new cases and 233 400 deaths from cervical cancer
+<br/>around the year 2000. As reported in National Cervical
+<br/>Cancer Coalition (NCCC) in 2010, cervical cancer is a cancer
+<br/>of the cervix which is commonly caused by a virus named
+<br/>Human Papillomavirus (HPV) [1]. The virus can damage
+<br/>cells in the cervix, namely, squamous cells and glandular
+<br/>cells that may develop into squamous cell carcinoma (cancer
+<br/>of the squamous cells) and adenocarcinoma (cancer of the
+<br/>glandular cells), respectively. Squamous cell carcinoma can
+<br/>be thought of as similar to skin cancer because it begins on
+<br/>the surface of the ectocervix. Adenocarcinoma begins further
+<br/>inside the uterus, in the mucus-producing gland cells of the
+<br/>endocervix [2].
+<br/>Cervical cancer develops from normal to precancerous
+<br/>cells (dysplasia) over a period of two to three decades [3].
+<br/>Even though the dysplasia cells look like cancer cells, they
+<br/>are not malignant cells. These cells are known as cervical
+<br/>intraepithelial neoplasia (CIN) which is usually of low grade,
+<br/>and they only affect the surface of the cervical tissue. The
+<br/>majority will regress back to normal spontaneously. Over
+<br/>time, a small proportion will continue to develop into cancer.
+<br/>Based on WHO system, the level of CIN growth can be
+<br/>divided into grades 1, 2, and 3. It should be noted that at least
+<br/>two-thirds of the CIN 1 lesions, half of the CIN 2 lesions, and
+<br/>one-third of the CIN 3 lesions will regress back to normal [3].
+<br/>The median ages of patients with these different precursor
+<br/>grades are 25, 29, and 34 years, respectively. Ultimately, a
+<br/>small proportion will develop into infiltrating cancer, usually
+<br/>from the age of 45 years onwards.
+<br/>In 1994, the Bethesda system was introduced to simplify
+<br/>the WHO system. This system divided all cervical epithelial
+<br/>precursor lesions into two groups: the Low-grade Squamous
+<br/>Intraepithelial Lesion (LSIL) and High-grade Squamous
+<br/>Intraepithelial Lesion (HSIL). The LSIL corresponds to CIN1,
+<br/>while the HSIL includes CIN2 and CIN3 [4].
+<br/>Since a period of two to three decades is needed for
+<br/>cervical cancer to reach an invasive state, the incidence and
+<br/>mortality related to this disease can be significantly reduced
+<br/>through early detection and proper treatment. Realizing
+<br/>this fact, a variety of screening tests have therefore been
+<br/>developed in attempting to be implemented as early cervical
+<br/>precancerous screening tools.
+<br/>2. Methodology
+<br/>This paper reviews 103 journal papers. The papers are
+<br/>obtained electronically through 2 major scientific databases:
+</td><td>('2905656', 'Yessi Jusman', 'yessi jusman')<br/>('33102280', 'Siew Cheok Ng', 'siew cheok ng')<br/>('2784667', 'Noor Azuan Abu Osman', 'noor azuan abu osman')<br/>('2905656', 'Yessi Jusman', 'yessi jusman')</td><td>Correspondence should be addressed to Siew Cheok Ng; siewcng@um.edu.my and Noor Azuan Abu Osman; azuan@um.edu.my
+</td></tr><tr><td>a35dd69d63bac6f3296e0f1d148708cfa4ba80f6</td><td>Audio Visual Emotion Recognition with Temporal Alignment and Perception
+<br/>Attention
+<br/><b>National Laboratory of Pattern Recognition Institute of Automation, Chinese Academy of Sciences</b><br/><b>Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain</b><br/><b>Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS</b></td><td>('1850313', 'Linlin Chao', 'linlin chao')<br/>('37670752', 'Jianhua Tao', 'jianhua tao')<br/>('2740129', 'Minghao Yang', 'minghao yang')<br/>('1704841', 'Ya Li', 'ya li')<br/>('1718662', 'Zhengqi Wen', 'zhengqi wen')</td><td>{linlin.chao, jhtao, mhyang, yli, zqwen}@nlpr.ia.ac.cn
+</td></tr><tr><td>a3a34c1b876002e0393038fcf2bcb00821737105</td><td>Face Identification across Different Poses and Illuminations
+<br/>with a 3D Morphable Model
+<br/>V. Blanz, S. Romdhani, and T. Vetter
+<br/><b>University of Freiburg</b><br/>Georges-K¨ohler-Allee 52, 79110 Freiburg, Germany
+</td><td></td><td>fvolker, romdhani, vetterg@informatik.uni-freiburg.de
+</td></tr><tr><td>a3f1db123ce1818971a57330d82901683d7c2b67</td><td>Poselets and Their Applications in High-Level
+<br/>Computer Vision
+<br/>Lubomir Bourdev
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2012-52
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-52.html
+<br/>May 1, 2012
+</td><td></td><td></td></tr><tr><td>a36c8a4213251d3fd634e8893ad1b932205ad1ca</td><td>Videos from the 2013 Boston Marathon:
+<br/>An Event Reconstruction Dataset for
+<br/>Synchronization and Localization
+<br/>CMU-LTI-018
+<br/><b>Language Technologies Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave., Pittsburgh, PA 15213
+<br/>www.lti.cs.cmu.edu
+<br/>© October 1, 2016
+</td><td>('1915796', 'Junwei Liang', 'junwei liang')<br/>('47896638', 'Han Lu', 'han lu')<br/>('2927024', 'Shoou-I Yu', 'shoou-i yu')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td></td></tr><tr><td>a3a97bb5131e7e67316b649bbc2432aaa1a6556e</td><td>Cogn Affect Behav Neurosci
+<br/>DOI 10.3758/s13415-013-0170-x
+<br/>Role of the hippocampus and orbitofrontal cortex
+<br/>during the disambiguation of social cues in working memory
+<br/>Chantal E. Stern
+<br/><b>Psychonomic Society, Inc</b></td><td>('2973557', 'Karin Schon', 'karin schon')</td><td></td></tr><tr><td>a35d3ba191137224576f312353e1e0267e6699a1</td><td>Increasing security in DRM systems
+<br/>through biometric authentication.
+<br/>ecuring the exchange
+<br/>of intellectual property
+<br/>and providing protection
+<br/>to multimedia contents in
+<br/>distribution systems have enabled the
+<br/>advent of digital rights management
+<br/>(DRM) systems [5], [14], [21], [47],
+<br/>[51], [53]. Rights holders should be able to
+<br/>license, monitor, and track the usage of rights
+<br/>in a dynamic digital trading environment, espe-
+<br/>cially in the near future when universal multimedia
+<br/>access (UMA) becomes a reality, and any multimedia
+<br/>content will be available anytime, anywhere. In such
+<br/>DRM systems, encryption algorithms, access control,
+<br/>key management strategies, identification and tracing
+<br/>of contents, or copy control will play a prominent role
+<br/>to supervise and restrict access to multimedia data,
+<br/>avoiding unauthorized or fraudulent operations.
+<br/>A key component of any DRM system, also known
+<br/>as intellectual property management and protection
+<br/>(IPMP) systems in the MPEG-21 framework, is user
+<br/>authentication to ensure that
+<br/>only those with specific rights are
+<br/>able to access the digital informa-
+<br/>tion. It is here that biometrics can
+<br/>play an essential role, reinforcing securi-
+<br/>ty at all stages where customer authentica-
+<br/>tion is needed. The ubiquity of users and
+<br/>devices, where the same user might want to
+<br/>access to multimedia contents from different
+<br/>environments (home, car, work, jogging, etc.) and
+<br/>also from different devices or media (CD, DVD,
+<br/>home computer, laptop, PDA, 2G/3G mobile phones,
+<br/>game consoles, etc.) strengthens the need for reliable
+<br/>and universal authentication of users.
+<br/>Classical user authentication systems have been
+<br/>based in something that you have (like a key, an identi-
+<br/>fication card, etc.) and/or something that you know
+<br/>(like a password, or a PIN). With biometrics, a new
+<br/>user authentication paradigm is added: something that
+<br/>you are (e.g., fingerprints or face) or something that
+<br/>you do or produce (e.g., handwritten signature or
+<br/>50
+<br/>IEEE SIGNAL PROCESSING MAGAZINE
+<br/>1053-5888/04/$20.00©2004IEEE
+<br/>MARCH 2004
+</td><td>('1732220', 'Javier Ortega-Garcia', 'javier ortega-garcia')<br/>('5058247', 'Josef Bigun', 'josef bigun')<br/>('3127386', 'Douglas Reynolds', 'douglas reynolds')<br/>('1775227', 'Joaquin Gonzalez-Rodriguez', 'joaquin gonzalez-rodriguez')</td><td></td></tr><tr><td>a3a2f3803bf403262b56ce88d130af15e984fff0</td><td>Building a Compact Relevant Sample Coverage
+<br/>for Relevance Feedback in Content-Based Image
+<br/>Retrieval
+<br/><b>Tsinghua University, Beijing, China</b><br/>2 Sensing & Control Technology Laboratory, Omron Corporation, Kyoto, Japan
+</td><td>('38916673', 'Bangpeng Yao', 'bangpeng yao')<br/>('1679380', 'Haizhou Ai', 'haizhou ai')<br/>('1710195', 'Shihong Lao', 'shihong lao')</td><td></td></tr><tr><td>b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae</td><td>Harvesting Motion Patterns in Still Images from the Internet
+<br/><b>ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing</b><br/><b>University of California, San Diego, La Jolla</b></td><td></td><td>Jiajun Wu (jiajunwu.cs@gmail.com)
+<br/>Yining Wang (ynwang.yining@gmail.com)
+<br/>Zhulin Li (li-zl12@mails.tsinghua.edu.cn)
+<br/>Zhuowen Tu (ztu@ucsd.edu)
+</td></tr><tr><td>b5968e7bb23f5f03213178c22fd2e47af3afa04c</td><td>Multi-Human Parsing in the Wild
+<br/><b>National University of Singapore</b><br/><b>Beijing Jiaotong University</b><br/>March 16, 2018
+</td><td>('2757639', 'Jianshu Li', 'jianshu li')<br/>('2263674', 'Yidong Li', 'yidong li')<br/>('46509407', 'Jian Zhao', 'jian zhao')<br/>('1715286', 'Terence Sim', 'terence sim')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>b5cd9e5d81d14868f1a86ca4f3fab079f63a366d</td><td>Tag-based Video Retrieval by Embedding Semantic Content in a Continuous
+<br/>Word Space
+<br/><b>University of Southern California</b><br/>Ram Nevatia
+<br/>Cees G.M. Snoek
+<br/><b>University of Amsterdam</b></td><td>('3407713', 'Arnav Agharwal', 'arnav agharwal')<br/>('3407447', 'Rama Kovvuri', 'rama kovvuri')</td><td>{agharwal,nkovvuri,nevatia}@usc.edu
+<br/>cgmsnoek@uva.nl
+</td></tr><tr><td>b558be7e182809f5404ea0fcf8a1d1d9498dc01a</td><td>Bottom-up and top-down reasoning with convolutional latent-variable models
+<br/>UC Irvine
+<br/>UC Irvine
+</td><td>('2894848', 'Peiyun Hu', 'peiyun hu')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>peiyunh@ics.uci.edu
+<br/>dramanan@ics.uci.edu
+</td></tr><tr><td>b5cd8151f9354ee38b73be1d1457d28e39d3c2c6</td><td>Finding Celebrities in Video
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2006-77
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2006/EECS-2006-77.html
+<br/>May 23, 2006
+</td><td>('3317048', 'Nazli Ikizler', 'nazli ikizler')<br/>('1865836', 'Jai Vasanth', 'jai vasanth')<br/>('1744452', 'David Forsyth', 'david forsyth')</td><td></td></tr><tr><td>b5fc4f9ad751c3784eaf740880a1db14843a85ba</td><td>SIViP (2007) 1:225–237
+<br/>DOI 10.1007/s11760-007-0016-5
+<br/>ORIGINAL PAPER
+<br/>Significance of image representation for face verification
+<br/>Received: 29 August 2006 / Revised: 28 March 2007 / Accepted: 28 March 2007 / Published online: 1 May 2007
+<br/>© Springer-Verlag London Limited 2007
+</td><td>('2627097', 'Anil Kumar Sao', 'anil kumar sao')<br/>('1783087', 'B. V. K. Vijaya Kumar', 'b. v. k. vijaya kumar')</td><td></td></tr><tr><td>b562def2624f59f7d3824e43ecffc990ad780898</td><td></td><td></td><td></td></tr><tr><td>b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>RIVERSIDE
+<br/>Modeling Social and Temporal Context for Video Analysis
+<br/>A Dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Computer Science
+<br/>by
+<br/>June 2015
+<br/>Dissertation Committee:
+<br/>Dr. Christian R. Shelton, Chairperson
+<br/>Dr. Tao Jiang
+<br/>Dr. Stefano Lonardi
+<br/>Dr. Amit Roy-Chowdhury
+</td><td>('12561781', 'Zhen Qin', 'zhen qin')</td><td></td></tr><tr><td>b599f323ee17f12bf251aba928b19a09bfbb13bb</td><td>AUTONOMOUS QUADCOPTER VIDEOGRAPHER
+<br/>by
+<br/>REY R. COAGUILA
+<br/>B.S. Universidad Peruana de Ciencias Aplicadas, 2009
+<br/>A thesis submitted in partial fulfillment of the requirements
+<br/>for the degree of Master of Science in Computer Science
+<br/>in the Department of Electrical Engineering and Computer Science
+<br/><b>in the College of Engineering and Computer Science</b><br/><b>at the University of Central Florida</b><br/>Orlando, Florida
+<br/>Spring Term
+<br/>2015
+<br/>Major Professor: Gita R. Sukthankar
+</td><td></td><td></td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>UCF101: A Dataset of 101 Human Actions
+<br/>Classes From Videos in The Wild
+<br/>CRCV-TR-12-01
+<br/>November 2012
+<br/>Keywords: Action Dataset, UCF101, UCF50, Action Recognition
+<br/>Center for Research in Computer Vision
+<br/><b>University of Central Florida</b><br/>4000 Central Florida Blvd.
+<br/>Orlando, FL 32816-2365 USA
+</td><td>('1799979', 'Khurram Soomro', 'khurram soomro')<br/>('40029556', 'Amir Roshan Zamir', 'amir roshan zamir')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td></td></tr><tr><td>b5da4943c348a6b4c934c2ea7330afaf1d655e79</td><td>Facial Landmarks Detection by Self-Iterative Regression based
+<br/>Landmarks-Attention Network
+<br/><b>University of Chinese Academy of Sciences, Beijing, China</b><br/>2 Microsoft Research Asia, Beijing, China
+</td><td>('33325349', 'Tao Hu', 'tao hu')<br/>('3245785', 'Honggang Qi', 'honggang qi')<br/>('1697982', 'Jizheng Xu', 'jizheng xu')<br/>('1689702', 'Qingming Huang', 'qingming huang')</td><td>hutao16@mails.ucas.ac.cn, hgqi@ucas.ac.cn
+</td></tr><tr><td>b5402c03a02b059b76be829330d38db8e921e4b5</td><td>Mei, et al, Hybridized KNN and SVM for gene expression data classification
+<br/>Hybridized KNN and SVM for gene expression data classification
+<br/><b>Zhengzhou University, Zhengzhou, Henan 450052, China</b><br/>Received October 22, 2008
+</td><td>('39156927', 'Zhen Mei', 'zhen mei')<br/>('2380760', 'Qi Shen', 'qi shen')<br/>('35476967', 'Baoxian Ye', 'baoxian ye')</td><td></td></tr><tr><td>b5160e95192340c848370f5092602cad8a4050cd</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, TO APPEAR
+<br/>Video Classification With CNNs: Using The Codec
+<br/>As A Spatio-Temporal Activity Sensor
+</td><td>('33998511', 'Aaron Chadha', 'aaron chadha')<br/>('2822935', 'Alhabib Abbas', 'alhabib abbas')<br/>('2747620', 'Yiannis Andreopoulos', 'yiannis andreopoulos')</td><td></td></tr><tr><td>b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad</td><td>Journal of Applied Research and
+<br/>Technology
+<br/>ISSN: 1665-6423
+<br/>Centro de Ciencias Aplicadas y
+<br/>Desarrollo Tecnológico
+<br/>México
+<br/>
+<br/>Hussain Shah, Jamal; Sharif, Muhammad; Raza, Mudassar; Murtaza, Marryam; Ur-Rehman, Saeed
+<br/>Robust Face Recognition Technique under Varying Illumination
+<br/>Journal of Applied Research and Technology, vol. 13, núm. 1, febrero, 2015, pp. 97-105
+<br/>Centro de Ciencias Aplicadas y Desarrollo Tecnológico
+<br/>Distrito Federal, México
+<br/>Available in: http://www.redalyc.org/articulo.oa?id=47436895009
+<br/> How to cite
+<br/> Complete issue
+<br/> More information about this article
+<br/> Journal's homepage in redalyc.org
+<br/>Scientific Information System
+<br/>Network of Scientific Journals from Latin America, the Caribbean, Spain and Portugal
+<br/>Non-profit academic project, developed under the open access initiative
+</td><td></td><td>jart@aleph.cinstrum.unam.mx
+</td></tr><tr><td>b56530be665b0e65933adec4cc5ed05840c37fc4</td><td>IEEE Computer Society Conference on Computer Vision and Pattern Recognition, June 2007
+<br/>©IEEE
+<br/>Reducing correspondence ambiguity in loosely labeled training data
+<br/><b>University of Arizona</b><br/>Tucson Arizona
+</td><td>('1728667', 'Kobus Barnard', 'kobus barnard')</td><td>kobus@cs.arizona.edu
+</td></tr><tr><td>b5f4e617ac3fc4700ec8129fcd0dcf5f71722923</td><td>Hierarchical Wavelet Networks for Facial Feature Localization
+<br/>Rog·erio S. Feris
+<br/>Microsoft Research
+<br/>Redmond, WA 98052
+<br/>U.S.A.
+<br/>Volker Kr¤uger
+<br/><b>University of Maryland, CFAR</b><br/><b>College Park, MD</b><br/>U.S.A.
+</td><td>('1936061', 'Jim Gemmell', 'jim gemmell')</td><td></td></tr><tr><td>b52886610eda6265a2c1aaf04ce209c047432b6d</td><td>Microexpression Identification and Categorization
+<br/>using a Facial Dynamics Map
+</td><td>('1684875', 'Feng Xu', 'feng xu')<br/>('2247926', 'Junping Zhang', 'junping zhang')</td><td></td></tr><tr><td>b51b4ef97238940aaa4f43b20a861eaf66f67253</td><td>Hindawi Publishing Corporation
+<br/>EURASIP Journal on Image and Video Processing
+<br/>Volume 2008, Article ID 184618, 16 pages
+<br/>doi:10.1155/2008/184618
+<br/>Research Article
+<br/>Unsupervised Modeling of Objects and Their Hierarchical
+<br/>Contextual Interactions
+<br/><b>Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA</b><br/>Received 11 June 2008; Accepted 2 September 2008
+<br/>Recommended by Simon Lucey
+<br/>A successful representation of objects in literature is as a collection of patches, or parts, with a certain appearance and position.
+<br/>The relative locations of the different parts of an object are constrained by the geometry of the object. Going beyond a single
+<br/>object, consider a collection of images of a particular scene category containing multiple (recurring) objects. The parts belonging
+<br/>to different objects are not constrained by such a geometry. However, the objects themselves, arguably due to their semantic
+<br/>relationships, demonstrate a pattern in their relative locations. Hence, analyzing the interactions among the parts across the
+<br/>collection of images can allow for extraction of the foreground objects, and analyzing the interactions among these objects
+<br/>can allow for a semantically meaningful grouping of these objects, which characterizes the entire scene. These groupings are
+<br/>typically hierarchical. We introduce hierarchical semantics of objects (hSO) that captures this hierarchical grouping. We propose
+<br/>an approach for the unsupervised learning of the hSO from a collection of images of a particular scene. We also demonstrate the
+<br/>use of the hSO in providing context for enhanced object localization in the presence of significant occlusions, and show its superior
+<br/>performance over a fully connected graphical model for the same task.
+<br/>Copyright © 2008 D. Parikh and T. Chen. This is an open access article distributed under the Creative Commons Attribution
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>1.
+<br/>INTRODUCTION
+<br/>Objects that tend to cooccur in scenes are often semantically
+<br/>related. Hence, they demonstrate a characteristic grouping
+<br/>behavior according to their relative positions in the scene.
+<br/>Some groupings are tighter than others, and thus a hierarchy
+<br/>of these groupings among these objects can be observed in a
+<br/>collection of images of similar scenes. It is this hierarchy that
+<br/>we refer to as the hierarchical semantics of objects (hSO).
+<br/>This can be better understood with an example.
+<br/>Consider an office scene. Most offices, as seen in Figure 1,
+<br/>are likely to have, for instance, a chair, a phone, a monitor,
+<br/>and a keyboard. If we analyze a collection of images taken
+<br/>from such office settings, we would observe that across
+<br/>images, the monitor and keyboard are more or less in the
+<br/>same position with respect to each other, and hence can be
+<br/>considered to be part of the same super object at a lower level
+<br/>in the hSO structure, say a computer. Similarly, the computer
+<br/>may usually be somewhere in the vicinity of the phone, and
+<br/>so the computer and the phone belong to the same super
+<br/>object at a higher level, say the desk area. But the chair and
+<br/>the desk area may be placed relatively arbitrarily in the scene
+<br/>with respect to each other, more so than any of the other
+<br/>objects, and hence belong to a common super object only
+<br/>at the highest level in the hierarchy, that is, the scene itself.
+<br/>A possible hSO that would describe such an office scene is
+<br/>shown in Figure 1. Along with the structure, the hSO may
+<br/>also store other information such as the relative position of
+<br/>the objects and their cooccurrence counts as parameters.
+<br/>The hSO is motivated from an interesting thought
+<br/>exercise: at what scale is an object defined? Are the individual
+<br/>keys on a keyboard objects, or the entire keyboard, or is
+<br/>the entire computer an object? The definition of an object
+<br/>is blurry, and the hSO exploits this to allow incorporation
+<br/>of semantic information of the scene layout. The leaves of
+<br/>the hSO are a collection of parts and represent the objects,
+<br/>while the various levels in the hSO represent the super objects
+</td><td>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>Correspondence should be addressed to Devi Parikh, dparikh@andrew.cmu.edu
+</td></tr><tr><td>b5d7c5aba7b1ededdf61700ca9d8591c65e84e88</td><td>INTERSPEECH 2010
+<br/>Data Pruning for Template-based Automatic Speech Recognition
+<br/><b>ESAT, Katholieke Universiteit Leuven, Leuven, Belgium</b></td><td>('1717646', 'Dino Seppi', 'dino seppi')</td><td>dino.seppi@esat.kuleuven.be, dirk.vancompernolle@esat.kuleuven.be
+</td></tr><tr><td>b5c749f98710c19b6c41062c60fb605e1ef4312a</td><td>Evaluating Two-Stream CNN for Video Classification
+<br/>School of Computer Science, Shanghai Key Lab of Intelligent Information Processing,
+<br/><b>Fudan University, Shanghai, China</b></td><td>('1743864', 'Hao Ye', 'hao ye')<br/>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('3066866', 'Rui-Wei Zhao', 'rui-wei zhao')<br/>('31825486', 'Xi Wang', 'xi wang')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>{haoye10, zxwu,rwzhao14, xwang10, ygj, xyxue}@fudan.edu.cn
+</td></tr><tr><td>b5857b5bd6cb72508a166304f909ddc94afe53e3</td><td>SSIG and IRISA at Multimodal Person Discovery
+<br/>1Department of Computer Science, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+<br/>2IRISA & Inria Rennes , CNRS, Rennes, France
+</td><td>('2823797', 'Cassio E. dos Santos', 'cassio e. dos santos')<br/>('1708671', 'Guillaume Gravier', 'guillaume gravier')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')</td><td>cass@dcc.ufmg.br, guig@irisa.fr, william@dcc.ufmg.br
+</td></tr><tr><td>b59f441234d2d8f1765a20715e227376c7251cd7</td><td></td><td></td><td></td></tr><tr><td>b51e3d59d1bcbc023f39cec233f38510819a2cf9</td><td>CBMM Memo No. 003
+<br/>March 27, 2014
+<br/>Can a biologically-plausible hierarchy effectively
+<br/>replace face detection, alignment, and
+<br/>recognition pipelines?
+<br/>by
+</td><td>('1694846', 'Qianli Liao', 'qianli liao')<br/>('2211263', 'Youssef Mroueh', 'youssef mroueh')</td><td></td></tr><tr><td>b54c477885d53a27039c81f028e710ca54c83f11</td><td>1201
+<br/>Semi-Supervised Kernel Mean Shift Clustering
+</td><td>('34817359', 'Saket Anand', 'saket anand')<br/>('3323332', 'Sushil Mittal', 'sushil mittal')<br/>('2577513', 'Oncel Tuzel', 'oncel tuzel')<br/>('1729185', 'Peter Meer', 'peter meer')</td><td></td></tr><tr><td>b503f481120e69b62e076dcccf334ee50559451e</td><td>Recognition of Facial Action Units with Action
+<br/>Unit Classifiers and An Association Network
+<br/>1Department of Electronic and Information Engineering, The Hong Kong Polytechnic
+<br/><b>University, Hong Kong</b><br/><b>Chu Hai College of Higher Education, Hong Kong</b></td><td>('2366262', 'JunKai Chen', 'junkai chen')<br/>('1715231', 'Zenghai Chen', 'zenghai chen')<br/>('8590720', 'Zheru Chi', 'zheru chi')<br/>('1965426', 'Hong Fu', 'hong fu')</td><td>Junkai.Chen@connect.polyu.hk, Zenghai.Chen@connect.polyu.hk
+<br/>chi.zheru@polyu.edu.hk, hongfu@chuhai.edu.hk
+</td></tr><tr><td>b55d0c9a022874fb78653a0004998a66f8242cad</td><td>Hybrid Facial Representations
+<br/>for Emotion Recognition
+<br/>Automatic facial expression recognition is a widely
+<br/>studied problem in computer vision and human-robot
+<br/>interaction. There has been a range of studies for
+<br/>representing facial descriptors for facial expression
+<br/>recognition. Some prominent descriptors were presented
+<br/>in the first facial expression recognition and analysis
+<br/>challenge (FERA2011). In that competition, the Local
+<br/>Gabor Binary Pattern Histogram Sequence descriptor
+<br/>showed the most powerful description capability. In this
+<br/>paper, we introduce hybrid facial representations for facial
+<br/>expression recognition, which have more powerful
+<br/>description capability with lower dimensionality. Our
+<br/>descriptors consist of a block-based descriptor and a pixel-
+<br/>based descriptor. The block-based descriptor represents
+<br/>the micro-orientation and micro-geometric structure
+<br/>information. The pixel-based descriptor represents texture
+<br/>information. We validate our descriptors on two public
+<br/>databases, and the results show that our descriptors
+<br/>perform well with a relatively low dimensionality.
+<br/>Keywords: Facial expression recognition, Histograms of
+<br/>Oriented Gradients, HOG, Local Binary Pattern, LBP,
+<br/>Rotated Local Binary Pattern, RLBP, Gabor filter, GF.
+<br/>
+<br/>Manuscript received Mar. 31, 2013; revised Aug. 29, 2013; accepted Sept. 23, 2013.
+<br/>This work was supported by the R&D program of the Korea Ministry of Knowledge and
+<br/><b>Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT</b><br/>[10041826, Development of emotional features sensing, diagnostics and distribution s/w
+<br/>platform for measurement of multiple intelligence from young children].
+<br/>Jaehong Kim
+<br/>Daejeon, Rep. of Korea.
+<br/>and
+<br/>I. Introduction
+<br/>Facial expression is a natural and intuitive means for humans
+<br/>to express and sense their emotions and intentions. For this
+<br/>reason, automatic facial expression recognition has been an
+<br/>active research field in computer vision and human-robot
+<br/>interaction for a long time [1], [2]. In the case of robots living
+<br/>with a family, it is very useful to sense the family members’
+<br/>emotions through facial expressions and respond appropriately.
+<br/>There are three stages in the general automatic facial
+<br/>expression recognition systems. The first stage is to detect the
+<br/>faces and normalize the photographic images of the faces. This
+<br/>stage may be based on a holistic facial region or on facial
+<br/>components such as the eyes, nose, and mouth. The next stage
+<br/>is to extract the facial expression descriptors from the
+<br/>normalized faces. Finally, the system classifies the facial
+<br/>descriptors into the proper expression categories.
+<br/>In this paper, we introduce new facial expression descriptors.
+<br/>These descriptors adopt two representations, a block-based
+<br/>representation and a pixel-based representation, to reflect the
+<br/>micro-orientation, micro-geometric structure, and texture
+<br/>information. The descriptors show more powerful description
+<br/>capability with low dimensionality than the state-of-the-art
+<br/>descriptors.
+<br/>II. Previous Work
+<br/>Many researchers have shown a range of approaches to
+<br/>construct an automatic facial expression recognition system.
+<br/>Geometric approaches and texture-based approaches are the
+<br/>types. Texture-based approaches have
+<br/>most prominent
+<br/>generally shown a better performance
+<br/>than geometric
+<br/>approaches in previous research [3], [4]. In texture-based
+<br/>ETRI Journal, Volume 35, Number 6, December 2013 © 2013
+<br/>http://dx.doi.org/10.4218/etrij.13.2013.0054
+<br/>Woo-han Yun et al. 1021
+</td><td>('36034086', 'DoHyung Kim', 'dohyung kim')</td><td>Woo-han Yun (phone: +82 42 860 5804, yochin@etri.re.kr), DoHyung Kim
+<br/>(dhkim008@etri.re.kr), Chankyu Park
+<br/>(jhkim504@etri.re.kr) are with the IT Convergence Technology Research Laboratory, ETRI,
+<br/>(parkck@etri.re.kr),
+</td></tr><tr><td>b5930275813a7e7a1510035a58dd7ba7612943bc</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 26, 1525-1537 (2010)
+<br/>Short Paper__________________________________________________
+<br/>Face Recognition Using L-Fisherfaces*
+<br/><b>Institute of Information Science</b><br/><b>Beijing Jiaotong University</b><br/>Beijing, 100044 China
+<br/><b>College of Information and Electrical Engineering</b><br/><b>Shandong University of Science and Technology</b><br/>Qingdao, 266510 China
+<br/>An appearance-based face recognition approach called the L-Fisherfaces is pro-
+<br/>posed in this paper, By using Local Fisher Discriminant Embedding (LFDE), the face
+<br/>images are mapped into a face subspace for analysis. Different from Linear Discriminant
+<br/>Analysis (LDA), which effectively sees only the Euclidean structure of face space, LFDE
+<br/>finds an embedding that preserves local information, and obtains a face subspace that
+<br/>best detects the essential face manifold structure. Different from Locality Preserving
+<br/>Projections (LPP) and Unsupervised Discriminant projections (UDP), which ignore the
+<br/>class label information, LFDE searches for the project axes on which the data points of
+<br/>different classes are far from each other while requiring data points of the same class to
+<br/>be close to each other. We compare the proposed L-Fisherfaces approach with PCA,
+<br/>LDA, LPP, and UDP on three different face databases. Experimental results suggest that
+<br/>the proposed L-Fisherfaces provides a better representation and achieves higher accuracy
+<br/>in face recognition.
+<br/>Keywords: face recognition, local Fisher discriminant embedding, manifold learning, lo-
+<br/>cality preserving projections, unsupervised discriminant projections
+<br/>1. INTRODUCTION
+<br/>Face recognition has aroused wide concerns over the past few decades due to its
+<br/>potential applications, such as criminal identification, credit card verification, and secu-
+<br/>rity system and scene surveillance. In the literature, various algorithms have been proposed
+<br/>for this problem [1, 2]. PCA and LDA are two well-known linear subspace-learning tech-
+<br/>niques and have become the most popular methods for face recognition [3-5]. Recently, He
+<br/>et al. [6, 7] and Yang et al. [8, 9] proposed two manifold learning based methods,
+<br/>namely, Locality Preserving Projections (LPP) and unsupervised discriminant projection
+<br/>(UDP), for face recognition. LPP is a linear subspace method derived from Laplacian
+<br/>Eigenmap [10]. It results in a linear map that optimally preserves local neighborhood
+<br/>information and its objective function is to minimize the local scatter of the projected
+<br/>data. Unlike LPP, UDP finds a linear map based on the criterion that seeks to maximize
+<br/>Received July 29, 2008; revised October 30, 2008; accepted January 8, 2009.
+<br/>Communicated by H. Y. Mark Liao.
+<br/>* This work was partially supported by the National Natural Science Foundation of China (NSFC, No. 60672062)
+<br/>and the Major State Basic Research Development Program of China (973 Program No. 2004CB318005).
+<br/>1525
+</td><td>('7924002', 'Cheng-Yuan Zhang', 'cheng-yuan zhang')<br/>('2383779', 'Qiu-Qi Ruan', 'qiu-qi ruan')</td><td></td></tr><tr><td>b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88</td><td>An Enhanced Intelligent Agent with Image Description
+<br/>Generation
+<br/>Department of Computer Science and Digital Technologies, Facutly of Engineering and
+<br/><b>Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom</b><br/>learning
+<br/>for
+<br/>techniques
+</td><td>('29695322', 'Ben Fielding', 'ben fielding')<br/>('1921534', 'Philip Kinghorn', 'philip kinghorn')<br/>('2801063', 'Kamlesh Mistry', 'kamlesh mistry')<br/>('1712838', 'Li Zhang', 'li zhang')</td><td>{ben.fielding, philip.kinghorn, kamlesh.mistry, li.zhang (corr. author)}@northumbria.ac.uk
+</td></tr><tr><td>b59cee1f647737ec3296ccb3daa25c890359c307</td><td>Continuously Reproducing Toolchains in Pattern
+<br/>Recognition and Machine Learning Experiments
+<br/>A. Anjos
+<br/><b>Idiap Research Institute</b><br/>Martigny, Switzerland
+<br/>M. G¨unther
+<br/>Vision and Security Technology
+<br/><b>University of Colorado</b><br/>Colorado Springs, USA
+</td><td></td><td>andre.anjos@idiap.ch
+<br/>mgunther@vast.uccs.edu
+</td></tr><tr><td>b249f10a30907a80f2a73582f696bc35ba4db9e2</td><td>Improved graph-based SFA: Information preservation
+<br/>complements the slowness principle
+<br/>Institut f¨ur Neuroinformatik
+<br/><b>Ruhr-University Bochum, Germany</b></td><td>('2366497', 'Alberto N. Escalante', 'alberto n. escalante')<br/>('1736245', 'Laurenz Wiskott', 'laurenz wiskott')</td><td></td></tr><tr><td>b2a0e5873c1a8f9a53a199eecae4bdf505816ecb</td><td>Hybrid VAE: Improving Deep Generative Models
+<br/>using Partial Observations
+<br/>Snap Research
+<br/>Microsoft Research
+</td><td>('1715440', 'Sergey Tulyakov', 'sergey tulyakov')<br/>('2388416', 'Sebastian Nowozin', 'sebastian nowozin')</td><td>stulyakov@snap.com
+<br/>{awf,Sebastian.Nowozin}@microsoft.com
+</td></tr><tr><td>b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8</td><td>HyperFace: A Deep Multi-task Learning Framework for Face Detection,
+<br/>Landmark Localization, Pose Estimation, and Gender Recognition
+<br/><b>University of Maryland</b><br/><b>College Park, MD</b></td><td>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')</td><td>rranjan1@umd.edu
+</td></tr><tr><td>b216040f110d2549f61e3f5a7261cab128cab361</td><td>2734
+<br/>IEICE TRANS. INF. & SYST., VOL.E100–D, NO.11 NOVEMBER 2017
+<br/>LETTER
+<br/>Weighted Voting of Discriminative Regions for Face Recognition∗
+<br/>SUMMARY
+<br/>This paper presents a strategy, Weighted Voting of Dis-
+<br/>criminative Regions (WVDR), to improve the face recognition perfor-
+<br/>mance, especially in Small Sample Size (SSS) and occlusion situations.
+<br/>In WVDR, we extract the discriminative regions according to facial key
+<br/>points and abandon the rest parts. Considering different regions of face
+<br/>make different contributions to recognition, we assign weights to regions
+<br/>for weighted voting. We construct a decision dictionary according to the
+<br/>recognition results of selected regions in the training phase, and this dic-
+<br/>tionary is used in a self-defined loss function to obtain weights. The final
+<br/>identity of test sample is the weighted voting of selected regions. In this
+<br/>paper, we combine the WVDR strategy with CRC and SRC separately, and
+<br/>extensive experiments show that our method outperforms the baseline and
+<br/>some representative algorithms.
+<br/>key words: discriminative regions, small sample size, occlusion, weighted
+<br/>strategy, face recognition
+<br/>1.
+<br/>Introduction
+<br/>Face recognition is one of the most popular and challenging
+<br/>problems in computer vision. Many representative methods,
+<br/>such as SRC [1] and CRC [2], have achieved good results in
+<br/>the controlled condition. However, face recognition with
+<br/>occlusion or small training size is still challenging.
+<br/>Wright et al. [1] first apply the Sparse Representation
+<br/>based Classification (SRC) for face recognition (FR). Zhang
+<br/>et al. [2] propose Collaborative Representation based Clas-
+<br/>sification (CRC) and claim that it is the CR instead of the
+<br/>l1-norm sparsity that truly improves the FR performance.
+<br/>However, the performance of classifiers (e.g. SVM [3], SRC
+<br/>and CRC) declines dramatically if the training sample size
+<br/>is small. Some works have been done to tackle the Small
+<br/>Sample Size (SSS) problem. The Extended SRC [4] algo-
+<br/>rithm constructs an auxiliary intra-class variant dictionary
+<br/>to represent the variations between training and test images,
+<br/>while the construction of the dictionary needs extra data.
+<br/>Patch-based methods are another effective way to solve the
+<br/>SSS problem.
+<br/>In [5], Zhu et al. propose the patch-based
+<br/>CRC and multi-scale ensemble. Gao et al. [6] propose the
+<br/>Regularized Patch-based Representation to solve the SSS
+<br/>problem. However, patch-based methods are sensitive to the
+<br/>patch size [7], and haven’t noticed the texture distribution of
+<br/>a face image.
+<br/>Images with disguise or occlusion are hard to clas-
+<br/>sify. The recognition rate of many classifiers (e.g. SVM and
+<br/>SRC) decreases rapidly when images occluded. Local Con-
+<br/>tourlet Combined Patterns (LCCP) [8] reports a good per-
+<br/>formance in non-occlusion images but the recognition rate
+<br/>decreases in occlusion condition. There are some improve-
+<br/>ments [9], [10] for occlusion problem. The recent prob-
+<br/>abilistic collaborative representation (ProCRC) [10] jointly
+<br/>maximizes the likelihood of test samples with multiple
+<br/>classes.
+<br/>Instead of splitting the image into patches of same size,
+<br/>we extract the face regions according to an alignment algo-
+<br/>rithm [11]. Some regions, such as eyes and nose, are dis-
+<br/>In addition, different regions
+<br/>criminative for recognition.
+<br/>have different representation abilities. As Fig. 1 shows, dis-
+<br/>criminative ability of regions is affected by type of region
+<br/>and training size. So it’s reasonable that the regions are as-
+<br/>signed with different weights.
+<br/>In this paper, we propose a method termed Weighted
+<br/>Voting of Discriminative Regions (WVDR), in which, dis-
+<br/>criminative regions are extracted from face images and
+<br/>weights are learned from a decision dictionary in training
+<br/>Manuscript received June 5, 2017.
+<br/>Manuscript revised July 16, 2017.
+<br/>Manuscript publicized August 4, 2017.
+<br/>The authors are with Shenzhen Key Lab. of Information Sci
+<br/>& Tech, Shenzhen Engineering Lab. of IS & DCP Department of
+<br/>Electronic Engineering, Graduate School at Shenzhen, Tsinghua
+<br/><b>University, China</b><br/>This work was supported by the Natural Science Foun-
+<br/>dation of China (No. 61471216, No. 61771276),
+<br/>the Na-
+<br/>tional Key Research and Development Program of China
+<br/>(No. 2016YFB0101001 and 2017YFC0112500) and the Spe-
+<br/>cial Foundation for the Development of Strategic Emerging In-
+<br/>dustries of Shenzhen (No. JCYJ20170307153940960 and No.
+<br/>JCYJ20150831192224146).
+<br/>thor)
+<br/>DOI: 10.1587/transinf.2017EDL8124
+<br/>Fig. 1
+<br/>Recognition rates (AR database) when using only a single region.
+<br/>The s represents the number of training samples per person. The X-axis
+<br/>represents the regions extracted from face, and the image means the whole
+<br/>face image.
+<br/><b>Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers</b></td><td>('3196016', 'Wenming Yang', 'wenming yang')<br/>('2183412', 'Riqiang Gao', 'riqiang gao')<br/>('2883861', 'Qingmin Liao', 'qingmin liao')</td><td>a) E-mail: grq15@mails.tsinghua.edu.cn (Corresponding au-
+</td></tr><tr><td>b261439b5cde39ec52d932a222450df085eb5a91</td><td>International Journal of Computer Trends and Technology (IJCTT) – volume 24 Number 2 – June 2015
+<br/>Facial Expression Recognition using Analytical Hierarchy
+<br/>Process
+<br/><b>MTech Student 1, 2, Disha Institute of</b><br/>Management and Technology, Raipur Chhattisgarh, India1, 2
+<br/>to
+<br/>its significant contribution
+</td><td></td><td></td></tr><tr><td>b234cd7788a7f7fa410653ad2bafef5de7d5ad29</td><td>Unsupervised Temporal Ensemble Alignment
+<br/>For Rapid Annotation
+<br/>1 CSIRO, Brisbane, QLD, Australia
+<br/><b>Queensland University of Technology, Brisbane, QLD, Australia</b><br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('3231493', 'Ashton Fagg', 'ashton fagg')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')<br/>('1820249', 'Simon Lucey', 'simon lucey')</td><td>ashton@fagg.id.au, s.sridharan@qut.edu.au, slucey@cs.cmu.edu
+</td></tr><tr><td>b2c60061ad32e28eb1e20aff42e062c9160786be</td><td>Diverse and Controllable Image Captioning with
+<br/>Part-of-Speech Guidance
+<br/><b>University of Illinois at Urbana-Champaign</b></td><td>('2118997', 'Aditya Deshpande', 'aditya deshpande')<br/>('29956361', 'Jyoti Aneja', 'jyoti aneja')<br/>('46659761', 'Liwei Wang', 'liwei wang')</td><td>{ardeshp2, janeja2, lwang97, aschwing, daf}@illinois.edu
+</td></tr><tr><td>b2b535118c5c4dfcc96f547274cdc05dde629976</td><td>JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+<br/>Automatic Recognition of Facial Displays of
+<br/>Unfelt Emotions
+<br/>Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+</td><td>('38370357', 'Kaustubh Kulkarni', 'kaustubh kulkarni')<br/>('22197083', 'Ciprian Adrian Corneanu', 'ciprian adrian corneanu')<br/>('22211769', 'Ikechukwu Ofodile', 'ikechukwu ofodile')<br/>('47608164', 'Gholamreza Anbarjafari', 'gholamreza anbarjafari')</td><td></td></tr><tr><td>b235b4ccd01a204b95f7408bed7a10e080623d2e</td><td>Regularizing Flat Latent Variables with Hierarchical Structures
+</td><td>('7246002', 'Rongcheng Lin', 'rongcheng lin')<br/>('2703486', 'Huayu Li', 'huayu li')<br/>('38472218', 'Xiaojun Quan', 'xiaojun quan')<br/>('2248826', 'Richang Hong', 'richang hong')<br/>('2737890', 'Zhiang Wu', 'zhiang wu')<br/>('1874059', 'Yong Ge', 'yong ge')</td><td>(cid:117)UNC Charlotte. Email: {rlin4, hli38, yong.ge}@uncc.edu,
+<br/>(cid:63) Hefei University of Technology. Email: hongrc@hfut.edu.cn
+<br/>† Institute for Infocomm Research. Email: quanx@i2r.a-star.edu.sg
+<br/>∓ Nanjing University of Finance and Economics. Email: zawu@seu.edu.cn
+</td></tr><tr><td>b29b42f7ab8d25d244bfc1413a8d608cbdc51855</td><td>EFFECTIVE FACE LANDMARK LOCALIZATION VIA SINGLE DEEP NETWORK
+<br/>1National Key Laboratory of Fundamental Science on Synthetic Vision
+<br/><b>School of Computer Science, Sichuan University, Chengdu, China</b></td><td>('3471145', 'Zongping Deng', 'zongping deng')<br/>('1691465', 'Ke Li', 'ke li')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')<br/>('40600345', 'Yi Zhang', 'yi zhang')<br/>('1715100', 'Hu Chen', 'hu chen')</td><td>3huchen@scu.edu.cn
+</td></tr><tr><td>b2e5df82c55295912194ec73f0dca346f7c113f6</td><td>CUHK&SIAT Submission for THUMOS15 Action Recognition Challenge
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('39060754', 'Limin Wang', 'limin wang')<br/>('40184588', 'Zhe Wang', 'zhe wang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('40612284', 'Yu Qiao', 'yu qiao')</td><td>07wanglimin@gmail.com, buptwangzhe2012@gmail.com, yjxiong@ie.cuhk.edu.hk, yu.qiao@siat.ac.cn
+</td></tr><tr><td>b2e6944bebab8e018f71f802607e6e9164ad3537</td><td>Mixed Error Coding for
+<br/>Face Recognition with Mixed Occlusions
+<br/><b>Zhejiang University of Technology</b><br/>Hangzhou, China
+</td><td>('4487395', 'Ronghua Liang', 'ronghua liang')<br/>('34478462', 'Xiao-Xin Li', 'xiao-xin li')</td><td>{rhliang, mordekai}@zjut.edu.cn
+</td></tr><tr><td>b2c25af8a8e191c000f6a55d5f85cf60794c2709</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Novel Dimensionality Reduction Technique based on
+<br/>Kernel Optimization Through Graph Embedding
+<br/>N. Vretos, A. Tefas and I. Pitas
+<br/>the date of receipt and acceptance should be inserted later
+</td><td></td><td></td></tr><tr><td>b239a756f22201c2780e46754d06a82f108c1d03</td><td>Robust Multimodal Recognition via Multitask
+<br/>Multivariate Low-Rank Representations
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA</b></td><td>('9033105', 'Heng Zhang', 'heng zhang')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{hzhang98, pvishalm, rama}@umiacs.umd.edu
+</td></tr><tr><td>b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e</td><td>Learning with Ambiguous Label Distribution for
+<br/>Apparent Age Estimation
+<br/>Department of Signal Processing
+<br/><b>Tampere University of Technology</b><br/>Tampere 33720, Finland
+</td><td>('40394658', 'Ke Chen', 'ke chen')</td><td>firstname.lastname@tut.fi
+</td></tr><tr><td>d904f945c1506e7b51b19c99c632ef13f340ef4c</td><td>A scalable 3D HOG model for fast object detection and viewpoint estimation
+<br/>KU Leuven, ESAT/PSI - iMinds
+<br/>Kasteelpark Arenberg 10 B-3001 Leuven, Belgium
+</td><td>('3048367', 'Marco Pedersoli', 'marco pedersoli')<br/>('1704728', 'Tinne Tuytelaars', 'tinne tuytelaars')</td><td>firstname.lastname@esat.kuleuven.be
+</td></tr><tr><td>d949fadc9b6c5c8b067fa42265ad30945f9caa99</td><td>Rethinking Feature Discrimination and
+<br/>Polymerization for Large-scale Recognition
+<br/><b>The Chinese University of Hong Kong</b></td><td>('1715752', 'Yu Liu', 'yu liu')<br/>('46382329', 'Hongyang Li', 'hongyang li')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td>{yuliu, yangli, xgwang}@ee.cuhk.edu.hk
+</td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>Precise Temporal Action Localization by
+<br/>Evolving Temporal Proposals
+<br/><b>East China Normal University</b><br/>Shanghai, China
+<br/><b>University of Washington</b><br/>Seattle, WA, USA
+<br/>Shanghai Advanced Research
+<br/><b>Institute, CAS, China</b><br/><b>East China Normal University</b><br/>Shanghai, China
+<br/>Shanghai Advanced Research
+<br/><b>Institute, CAS, China</b><br/>Liang He
+<br/><b>East China Normal University</b><br/>Shanghai, China
+</td><td>('31567595', 'Haonan Qiu', 'haonan qiu')<br/>('1803391', 'Yao Lu', 'yao lu')<br/>('3015119', 'Yingbin Zheng', 'yingbin zheng')<br/>('47939010', 'Feng Wang', 'feng wang')<br/>('1743864', 'Hao Ye', 'hao ye')</td><td>hnqiu@ica.stc.sh.cn
+<br/>luyao@cs.washington.edu
+<br/>zhengyb@sari.ac.cn
+<br/>fwang@cs.ecnu.edu.cn
+<br/>yeh@sari.ac.cn
+<br/>lhe@cs.ecnu.edu.cn
+</td></tr><tr><td>d961617db4e95382ba869a7603006edc4d66ac3b</td><td>Experimenting Motion Relativity for Action Recognition
+<br/>with a Large Number of Classes
+<br/><b>East China Normal University</b><br/>500 Dongchuan Rd., Shanghai, China
+</td><td>('39586279', 'Feng Wang', 'feng wang')<br/>('38755510', 'Xiaoyan Li', 'xiaoyan li')</td><td></td></tr><tr><td>d9810786fccee5f5affaef59bc58d2282718af9b</td><td>Adaptive Frame Selection for
+<br/>Enhanced Face Recognition in
+<br/>Low-Resolution Videos
+<br/>by
+<br/>Thesis submitted to the
+<br/><b>College of Engineering and Mineral Resources</b><br/><b>at West Virginia University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Master of Science
+<br/>in
+<br/>Electrical Engineering
+<br/>Arun Ross, PhD., Chair
+<br/>Xin Li, PhD.
+<br/>Donald Adjeroh, PhD.
+<br/>Lane Department of Computer Science and Electrical Engineering
+<br/>Morgantown, West Virginia
+<br/>2008
+<br/>Keywords: Face Biometrics, Super-Resolution, Optical Flow, Super-Resolution using
+<br/>Optical Flow, Adaptive Frame Selection, Inter-Frame Motion Parameter, Image Quality,
+<br/>Image-Level Fusion, Score-Level Fusion
+</td><td>('2531952', 'Raghavender Reddy Jillela', 'raghavender reddy jillela')<br/>('2531952', 'Raghavender Reddy Jillela', 'raghavender reddy jillela')</td><td></td></tr><tr><td>d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>3031
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>d930ec59b87004fd172721f6684963e00137745f</td><td>Face Pose Estimation using a
+<br/>Tree of Boosted Classifiers
+<br/><b>Signal Processing Institute</b><br/>´Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+<br/>September 11, 2006
+</td><td>('1768663', 'Julien Meynet', 'julien meynet')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td></td></tr><tr><td>d9739d1b4478b0bf379fe755b3ce5abd8c668f89</td><td></td><td></td><td></td></tr><tr><td>d9c4586269a142faee309973e2ce8cde27bda718</td><td>Contextual Visual Similarity
+<br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b></td><td>('2461523', 'Xiaofang Wang', 'xiaofang wang')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td>xiaofan2@andrew.cmu.edu {kkitani,hebert}@cs.cmu.edu
+</td></tr><tr><td>d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa</td><td>Facial identification problem: A tracking based approach
+<br/>Department of Information Technology
+<br/><b>University of Milan</b><br/>via Bramante, 65 - 26013, Crema (CR), Italy
+<br/>Telephone: +390373898047, Fax: 0373899010
+<br/>AST Group, ST Microelectronics
+<br/>via Olivetti, 5 - 20041,
+<br/>Agrate Brianza (MI), Italy
+<br/>Telephone: +390396037234
+</td><td>('3330245', 'Marco Anisetti', 'marco anisetti')<br/>('2061298', 'Valerio Bellandi', 'valerio bellandi')<br/>('1746044', 'Ernesto Damiani', 'ernesto damiani')<br/>('2666794', 'Fabrizio Beverina', 'fabrizio beverina')</td><td>Email: {anisetti,bellandi,damiani}@dti.unimi.it
+<br/>Email: fabrizio.beverina@st.com
+</td></tr><tr><td>d9318c7259e394b3060b424eb6feca0f71219179</td><td>406
+<br/>Face Matching and Retrieval Using Soft Biometrics
+</td><td>('2222919', 'Unsang Park', 'unsang park')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>d9a1dd762383213741de4c1c1fd9fccf44e6480d</td><td></td><td></td><td></td></tr><tr><td>d963e640d0bf74120f147329228c3c272764932b</td><td>International Journal of Advanced Science and Technology
+<br/>Vol.64 (2014), pp.1-10
+<br/>http://dx.doi.org/10.14257/ijast.2014.64.01
+<br/>Image Processing for Face Recognition Rate Enhancement
+<br/><b>School of Computer and Information, Hefei University of Technology, Hefei</b><br/><b>University of Technology, Baghdad, Iraq</b><br/>People’s Republic of China
+</td><td></td><td>Israa_ameer@yahoo.com
+</td></tr><tr><td>d9ef1a80738bbdd35655c320761f95ee609b8f49</td><td> Volume 5, Issue 4, 2015 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>A Research - Face Recognition by Using Near Set Theory
+<br/>Department of Computer Science and Engineering
+<br/><b>Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India</b></td><td>('9231464', 'Bhakti Kurhade', 'bhakti kurhade')</td><td></td></tr><tr><td>d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c</td><td>Learning Inference Models for Computer Vision
+</td><td></td><td></td></tr><tr><td>d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f</td><td>It All Matters:
+<br/>Reporting Accuracy, Inference Time and Power Consumption
+<br/>for Face Emotion Recognition on Embedded Systems
+<br/><b>Institute of Telecommunications, TU Wien</b><br/>Movidius an Intel Company
+<br/>Dexmont Pe˜na
+<br/>Movidius an Intel Company
+<br/>Movidius an Intel Company
+<br/>ALaRI, Faculty of Informatics, USI
+</td><td>('48802034', 'Jelena Milosevic', 'jelena milosevic')<br/>('51129064', 'Andrew Forembsky', 'andrew forembsky')<br/>('9151916', 'David Moloney', 'david moloney')<br/>('1697550', 'Miroslaw Malek', 'miroslaw malek')</td><td>jelena.milosevic@tuwien.ac.at
+<br/>andrew.forembsky2@mail.dcu.ie
+<br/>dexmont.pena@intel.com
+<br/>david.moloney@intel.com
+<br/>miroslaw.malek@usi.ch
+</td></tr><tr><td>d9327b9621a97244d351b5b93e057f159f24a21e</td><td>SCIENCE CHINA
+<br/>Information Sciences
+<br/>. RESEARCH PAPERS .
+<br/>December 2010 Vol. 53 No. 12: 2415–2428
+<br/>doi: 10.1007/s11432-010-4099-1
+<br/>Laplacian smoothing transform for face recognition
+<br/>GU SuiCheng, TAN Ying
+<br/>& HE XinGui
+<br/>Key Laboratory of Machine Perception (MOE); Department of Machine Intelligence,
+<br/><b>School of Electronics Engineering and Computer Science; Peking University, Beijing 100871, China</b><br/>Received March 16, 2009; accepted April 1, 2010
+</td><td></td><td></td></tr><tr><td>d915e634aec40d7ee00cbea96d735d3e69602f1a</td><td>Two-Stream convolutional nets for action recognition in untrimmed video
+<br/><b>Stanford University</b><br/><b>Stanford University</b></td><td>('3308619', 'Kenneth Jung', 'kenneth jung')<br/>('5590869', 'Song Han', 'song han')</td><td>kjung@stanford.edu
+<br/>songhan@stanford.edu
+</td></tr><tr><td>aca232de87c4c61537c730ee59a8f7ebf5ecb14f</td><td>EBGM VS SUBSPACE PROJECTION FOR FACE RECOGNITION
+<br/>19.5 Km Markopoulou Avenue, P.O. Box 68, Peania, Athens, Greece
+<br/>Athens Information Technology
+<br/>Keywords:
+<br/>Human-Machine Interfaces, Computer Vision, Face Recognition.
+</td><td>('40089976', 'Andreas Stergiou', 'andreas stergiou')<br/>('1702943', 'Aristodemos Pnevmatikakis', 'aristodemos pnevmatikakis')<br/>('1725498', 'Lazaros Polymenakos', 'lazaros polymenakos')</td><td></td></tr><tr><td>ac1d97a465b7cc56204af5f2df0d54f819eef8a6</td><td>A Look at Eye Detection for Unconstrained
+<br/>Environments
+<br/>Key words: Unconstrained Face Recognition, Eye Detection, Machine Learning,
+<br/>Correlation Filters, Photo-head Testing Protocol
+<br/>1 Introduction
+<br/>Eye detection is a necessary processing step for many face recognition algorithms.
+<br/>For some of these algorithms, the eye coordinates are required for proper geomet-
+<br/>ric normalization before recognition. For others, the eyes serve as reference points
+<br/>to locate other significant features on the face, such as the nose and mouth. The
+<br/>eyes, containing significant discriminative information, can even be used by them-
+<br/>selves as features for recognition. Eye detection is a well studied problem for the
+<br/>constrained face recognition problem, where we find controlled distances, lighting,
+<br/>and limited pose variation. A far more difficult scenario for eye detection is the un-
+<br/>constrained face recognition problem, where we do not have any control over the
+<br/>environment or the subject. In this chapter, we will take a look at eye detection for
+<br/>the latter, which encompasses problems of flexible authentication, surveillance, and
+<br/>intelligence collection.
+<br/>A multitude of problems affect the acquisition of face imagery in unconstrained
+<br/>environments, with major problems related to lighting, distance, motion and pose.
+<br/>Existing work on lighting [14, 7] has focused on algorithmic issues (specifically,
+<br/>normalization), and not the direct impact of acquisition. Under difficult acquisition
+<br/><b>Vision and Security Technology Lab, University of Colorado at Colorado Springs, Colorado</b><br/>Anderson Rocha
+<br/><b>Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander</b></td><td>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td>USA, e-mail: lastname@uccs.edu
+<br/>son.rocha@ic.unicamp.br
+</td></tr><tr><td>ac2e44622efbbab525d4301c83cb4d5d7f6f0e55</td><td>A 3D Morphable Model learnt from 10,000 faces
+<br/><b>Imperial College London, UK</b><br/>†Great Ormond Street Hospital, UK
+<br/><b>Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland</b></td><td>('1848903', 'James Booth', 'james booth')<br/>('2931390', 'Anastasios Roussos', 'anastasios roussos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('5137183', 'Allan Ponniah', 'allan ponniah')<br/>('2421231', 'David Dunaway', 'david dunaway')</td><td>⋆{james.booth,troussos,s.zafeiriou}@imperial.ac.uk, †{allan.ponniah,david.dunaway}@gosh.nhs.uk
+</td></tr><tr><td>ac6c3b3e92ff5fbcd8f7967696c7aae134bea209</td><td>Deep Cascaded Bi-Network for
+<br/>Face Hallucination(cid:63)
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b><br/><b>University of California, Merced</b></td><td>('2226254', 'Shizhan Zhu', 'shizhan zhu')<br/>('2391885', 'Sifei Liu', 'sifei liu')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>ac855f0de9086e9e170072cb37400637f0c9b735</td><td>Fast Geometrically-Perturbed Adversarial Faces
+<br/><b>West Virginia University</b></td><td>('35477977', 'Ali Dabouei', 'ali dabouei')<br/>('30319988', 'Sobhan Soleymani', 'sobhan soleymani')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')</td><td>{ad0046, ssoleyma}@mix.wvu.edu, {jeremy.dawson, nasser.nasrabadi}@mail.wvu.edu
+</td></tr><tr><td>ac21c8aceea6b9495574f8f9d916e571e2fc497f</td><td>Pose-Independent Identity-based Facial Image
+<br/>Retrieval using Contextual Similarity
+<br/><b>King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia</b></td><td>('3036634', 'Islam Almasri', 'islam almasri')</td><td></td></tr><tr><td>ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6</td><td>779
+<br/>Privacy-Protected Facial Biometric Verification
+<br/>Using Fuzzy Forest Learning
+</td><td>('1690116', 'Ahmed Bouridane', 'ahmed bouridane')<br/>('1691478', 'Danny Crookes', 'danny crookes')<br/>('1739563', 'M. Emre Celebi', 'm. emre celebi')<br/>('39486168', 'Hua-Liang Wei', 'hua-liang wei')</td><td></td></tr><tr><td>aca273a9350b10b6e2ef84f0e3a327255207d0f5</td><td></td><td></td><td></td></tr><tr><td>aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9</td><td>Co-Regularized Ensemble for Feature Selection
+<br/><b>School of Computer Science and Technology, Tianjin University, China</b><br/><b>School of Information Technology and Electrical Engineering, The University of Queensland</b><br/>3Tianjin Key Laboratory of Cognitive Computing and Application
+</td><td>('2302512', 'Yahong Han', 'yahong han')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('1720932', 'Xiaofang Zhou', 'xiaofang zhou')</td><td>yahong@tju.edu.cn, yee.i.yang@gmail.com, zxf@itee.uq.edu.au
+</td></tr><tr><td>accbd6cd5dd649137a7c57ad6ef99232759f7544</td><td>FACIAL EXPRESSION RECOGNITION WITH LOCAL BINARY PATTERNS
+<br/>AND LINEAR PROGRAMMING
+<br/>1 Machine Vision Group, Infotech Oulu and Dept. of Electrical and Information Engineering
+<br/><b>P. O. Box 4500 Fin-90014 University of Oulu, Finland</b><br/><b>College of Electronics and Information, Northwestern Polytechnic University</b><br/>710072 Xi’an, China
+<br/>In this work, we propose a novel approach to recognize facial expressions from static
+<br/>images. First, the Local Binary Patterns (LBP) are used to efficiently represent the facial
+<br/>images and then the Linear Programming (LP) technique is adopted to classify the seven
+<br/>facial expressions anger, disgust, fear, happiness, sadness, surprise and neutral.
+<br/>Experimental results demonstrate an average recognition accuracy of 93.8% on the JAFFE
+<br/>database, which outperforms the rates of all other reported methods on the same database.
+<br/>Introduction
+<br/>Facial expression recognition from static
+<br/>images is a more challenging problem
+<br/>than from image sequences because less
+<br/>information for expression actions
+<br/>is
+<br/>available. However, information in a
+<br/>single image is sometimes enough for
+<br/>expression recognition, and
+<br/>in many
+<br/>applications it is also useful to recognize
+<br/>single image’s facial expression.
+<br/>In the recent years, numerous approaches
+<br/>to facial expression analysis from static
+<br/>images have been proposed [1] [2]. These
+<br/>methods
+<br/>face
+<br/>representation and similarity measure.
+<br/>For instance, Zhang [3] used two types of
+<br/>features: the geometric position of 34
+<br/>manually selected fiducial points and a
+<br/>set of Gabor wavelet coefficients at these
+<br/>points. These two types of features were
+<br/>used both independently and jointly with
+<br/>a multi-layer perceptron for classification.
+<br/>Guo and Dyer [4] also adopted a similar
+<br/>face representation, combined with linear
+<br/>to carry out
+<br/>programming
+<br/>selection
+<br/>simultaneous
+<br/>and
+<br/>classifier
+<br/>they reported
+<br/>technique
+<br/>feature
+<br/>training, and
+<br/>differ
+<br/>generally
+<br/>in
+<br/>a
+<br/>simple
+<br/>imperative question
+<br/>better result. Lyons et al. used a similar face
+<br/>representation with
+<br/>LDA-based
+<br/>classification scheme [5]. All the above methods
+<br/>required the manual selection of fiducial points.
+<br/>Buciu et al. used ICA and Gabor representation for
+<br/>facial expression recognition and reported good result
+<br/>on the same database [6]. However, a suitable
+<br/>combination of feature extraction and classification is
+<br/>still one
+<br/>for expression
+<br/>recognition.
+<br/>In this paper, we propose a novel method for facial
+<br/>expression recognition. In the feature extraction step,
+<br/>the Local Binary Pattern (LBP) operator is used to
+<br/>describe facial expressions. In the classification step,
+<br/>seven expressions (anger, disgust, fear, happiness,
+<br/>sadness, surprise and neutral) are decomposed into 21
+<br/>expression pairs such as anger-fear, happiness-
+<br/>sadness etc. 21 classifiers are produced by the Linear
+<br/>Programming (LP) technique, each corresponding to
+<br/>one of the 21 expression pairs. A simple binary tree
+<br/>tournament scheme with pairwise comparisons is
+<br/>Face Representation with Local Binary Patterns
+<br/>
+<br/>Fig.1 shows the basic LBP operator [7], in which the
+<br/>original 3×3 neighbourhood at the left is thresholded
+<br/>by the value of the centre pixel, and a binary pattern
+</td><td>('4729239', 'Xiaoyi Feng', 'xiaoyi feng')<br/>('1714724', 'Matti Pietikäinen', 'matti pietikäinen')<br/>('1751372', 'Abdenour Hadid', 'abdenour hadid')</td><td>{xiaoyi,mkp,hadid}@ee.oulu.fi
+<br/>fengxiao@nwpu.edu.cn
+</td></tr><tr><td>ac51d9ddbd462d023ec60818bac6cdae83b66992</td><td>Hindawi Publishing Corporation
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2015, Article ID 709072, 10 pages
+<br/>http://dx.doi.org/10.1155/2015/709072
+<br/>Research Article
+<br/>An Efficient Robust Eye Localization by Learning
+<br/>the Convolution Distribution Using Eye Template
+<br/>1Science and Technology on Parallel and Distributed Processing Laboratory, School of Computer,
+<br/><b>National University of Defense Technology, Changsha 410073, China</b><br/><b>Informatization Office, National University of Defense Technology, Changsha 410073, China</b><br/>Received 30 January 2015; Accepted 14 April 2015
+<br/>Academic Editor: Ye-Sho Chen
+<br/>permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Eye localization is a fundamental process in many facial analyses. In practical use, it is often challenged by illumination, head pose,
+<br/>facial expression, occlusion, and other factors. It remains great difficulty to achieve high accuracy with short prediction time and
+<br/>low training cost at the same time. This paper presents a novel eye localization approach which explores only one-layer convolution
+<br/>map by eye template using a BP network. Results showed that the proposed method is robust to handle many difficult situations. In
+<br/>experiments, accuracy of 98% and 96%, respectively, on the BioID and LFPW test sets could be achieved in 10 fps prediction rate
+<br/>with only 15-minute training cost. In comparison with other robust models, the proposed method could obtain similar best results
+<br/>with greatly reduced training time and high prediction speed.
+<br/>1. Introduction
+<br/>Eye localization is essential to many face analyses. In analysis
+<br/>of the human sentiment, eye focus, and head pose, the loca-
+<br/>tion of the eye is indispensable to extract the corresponding
+<br/>information there [1]. In face tracing, eye localization is often
+<br/>required in real time. In face recognition, many algorithms
+<br/>ask for the alignment of the face images based on eye location
+<br/>[2]. Inaccurate location may result in the failure of the
+<br/>recognition [3, 4].
+<br/>However, real-world eye localization is filled with chal-
+<br/>lenges. Face pictures are commonly taken by a projection
+<br/>from the 3D space to the 2D plane. Appearance of the face
+<br/>image could be influenced by the head pose, facial expression,
+<br/>and illumination. Texture around eyes is therefore full of
+<br/>change. Moreover, eyes may be occluded by stuffs like glasses
+<br/>and hair, as shown in Figure 1. To work in any unexpected
+<br/>cases, the algorithm should be robust to those impacts.
+<br/>In the design of the eye localization algorithm in practical
+<br/>use, prediction accuracy, rate, and the training cost are the
+<br/>most concerned factors. A robust algorithm should keep high
+<br/>prediction accuracy for varying cases with diverse face poses,
+<br/>facial expressions in complex environment with occlusion,
+<br/>and illumination changes. For real time applications, high
+<br/>prediction rate is required. For some online learning systems
+<br/>like the one used for public security, short training time is
+<br/>also in demand to quickly adapt the algorithm to different
+<br/>working places. Low training cost is also of benefit for the
+<br/>tuning of the algorithm. To improve the accuracy in the diffi-
+<br/>cult environment, complex model is often applied. However,
+<br/>the over complicated model will increase the training cost
+<br/>and the prediction time. How to select an approach with
+<br/>enough complexity to achieve high prediction accuracy, high
+<br/>prediction rate, and low training cost at the same time is still
+<br/>a challenge.
+<br/>Eye localization approaches could be mainly divided into
+<br/>the texture based and the structure based. Texture based
+<br/>methods [5–8] learn the features from the image textures. For
+<br/>the methods exploring local textures [5, 6], high prediction
+<br/>rate could be achieved with simple training. However, they
+<br/>are usually not robust to the situation with occlusion and
+<br/>distortion due to the limited information from the local area.
+<br/>On the other hand, methods like [7, 8] study the global texture
+<br/>feature from entire face image by convolution networks. High
+</td><td>('1790480', 'Xuan Li', 'xuan li')<br/>('1791001', 'Yong Dou', 'yong dou')<br/>('2223570', 'Xin Niu', 'xin niu')<br/>('2512580', 'Jiaqing Xu', 'jiaqing xu')<br/>('2672701', 'Ruorong Xiao', 'ruorong xiao')<br/>('1790480', 'Xuan Li', 'xuan li')</td><td>Correspondence should be addressed to Xuan Li; lixuan@nudt.edu.cn
+</td></tr><tr><td>acc548285f362e6b08c2b876b628efceceeb813e</td><td>Hindawi Publishing Corporation
+<br/>Computational and Mathematical Methods in Medicine
+<br/>Volume 2014, Article ID 427826, 12 pages
+<br/>http://dx.doi.org/10.1155/2014/427826
+<br/>Research Article
+<br/>Objectifying Facial Expressivity Assessment of Parkinson’s
+<br/>Patients: Preliminary Study
+<br/><b>Vrije Universiteit Brussel, 1050 Brussels, Belgium</b><br/><b>Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China</b><br/><b>Vrije Universiteit Brussel, 1050 Brussels, Belgium</b><br/><b>Vrije Universiteit Brussel, 1050 Brussels, Belgium</b><br/>Received 9 June 2014; Accepted 22 September 2014; Published 13 November 2014
+<br/>Academic Editor: Justin Dauwels
+<br/>permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Patients with Parkinson’s disease (PD) can exhibit a reduction of spontaneous facial expression, designated as “facial masking,” a
+<br/>symptom in which facial muscles become rigid. To improve clinical assessment of facial expressivity of PD, this work attempts
+<br/>to quantify the dynamic facial expressivity (facial activity) of PD by automatically recognizing facial action units (AUs) and
+<br/>estimating their intensity. Spontaneous facial expressivity was assessed by comparing 7 PD patients with 8 control participants. To
+<br/>voluntarily produce spontaneous facial expressions that resemble those typically triggered by emotions, six emotions (amusement,
+<br/>sadness, anger, disgust, surprise, and fear) were elicited using movie clips. During the movie clips, physiological signals (facial
+<br/>electromyography (EMG) and electrocardiogram (ECG)) and frontal face video of the participants were recorded. The participants
+<br/>were asked to report on their emotional states throughout the experiment. We first examined the effectiveness of the emotion
+<br/>manipulation by evaluating the participant’s self-reports. Disgust-induced emotions were significantly higher than the other
+<br/>emotions. Thus we focused on the analysis of the recorded data during watching disgust movie clips. The proposed facial expressivity
+<br/>assessment approach captured differences in facial expressivity between PD patients and controls. Also differences between PD
+<br/>patients with different progression of Parkinson’s disease have been observed.
+<br/>1. Introduction
+<br/>One of the manifestations of Parkinson’s disease (PD) is the
+<br/>gradual loss of facial mobility and “mask-like” appearance.
+<br/>Katsikitis and Pilowsky (1988) [1] stated that PD patients
+<br/>were rated as significantly less expressive than an aphasic
+<br/>and control group, on a task designed to assess spontaneous
+<br/>facial expression. In addition, the spontaneous smiles of PD
+<br/>patients are often perceived to be “unfelt,” because of the lack
+<br/>of accompanying cheek raises [2]. Jacobs et al. [3] confirmed
+<br/>that PD patients show reduced intensity of emotional facial
+<br/>expression compared to the controls. In order to assess facial
+<br/>expressivity, most research relies on subjective coding of the
+<br/>implied researchers, as in aforementioned studies. Tickle-
+<br/>Degnen and Lyons [4] found that decreased facial expressivity
+<br/>correlated with self-reports of PD patients as well as the
+<br/>Unified Parkinson’s Disease Rating Scale (UPDRS) [5]. PD
+<br/>patients, who rated their ability to facially express emotions
+<br/>as severely affected, did demonstrate less facial expressivity.
+<br/>In this paper, we investigate automatic measurements
+<br/>of facial expressivity from video recorded PD patients and
+<br/>control populations. To the best of our knowledge, in actual
+<br/>research, few attempts have been made for designing a
+<br/>computer-based quantitative analysis of facial expressivity of
+<br/>PD patient. To analyze whether Parkinson’s disease affected
+<br/>voluntary expression of facial emotions, Bowers et al. [6]
+<br/>videotaped PD patients and healthy control participants
+<br/>while they made voluntary facial expression (happy, sad, fear,
+<br/>anger, disgust, and surprise). In their approach, the amount of
+<br/>facial movements change and timing have been quantified by
+</td><td>('40432410', 'Peng Wu', 'peng wu')<br/>('34068333', 'Isabel Gonzalez', 'isabel gonzalez')<br/>('3348420', 'Dongmei Jiang', 'dongmei jiang')<br/>('1970907', 'Hichem Sahli', 'hichem sahli')<br/>('3041213', 'Eric Kerckhofs', 'eric kerckhofs')<br/>('2540163', 'Marie Vandekerckhove', 'marie vandekerckhove')<br/>('40432410', 'Peng Wu', 'peng wu')</td><td>Correspondence should be addressed to Peng Wu; pwu@etro.vub.ac.be
+</td></tr><tr><td>acee2201f8a15990551804dd382b86973eb7c0a8</td><td>To Boost or Not to Boost? On the Limits of
+<br/>Boosted Trees for Object Detection
+<br/><b>Computer Vision and Robotics Research Laboratory</b><br/><b>University of California San Diego</b></td><td>('1802326', 'Eshed Ohn-Bar', 'eshed ohn-bar')</td><td>{eohnbar, mtrivedi}@ucsd.edu
+</td></tr><tr><td>ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e</td><td></td><td></td><td></td></tr><tr><td>ac820d67b313c38b9add05abef8891426edd5afb</td><td></td><td></td><td></td></tr><tr><td>ac9a331327cceda4e23f9873f387c9fd161fad76</td><td>Deep Convolutional Neural Network for Age Estimation based on
+<br/>VGG-Face Model
+<br/><b>University of Bridgeport</b><br/><b>University of Bridgeport</b><br/>Technology Building, Bridgeport CT 06604 USA
+</td><td>('7404315', 'Zakariya Qawaqneh', 'zakariya qawaqneh')<br/>('34792425', 'Arafat Abu Mallouh', 'arafat abu mallouh')<br/>('2791535', 'Buket D. Barkana', 'buket d. barkana')</td><td>Emails: {zqawaqneh; aabumall@my.bridgeport.edu}, bbarkana@bridgeport.edu
+</td></tr><tr><td>ac26166857e55fd5c64ae7194a169ff4e473eb8b</td><td>Personalized Age Progression with Bi-level
+<br/>Aging Dictionary Learning
+</td><td>('2287686', 'Xiangbo Shu', 'xiangbo shu')<br/>('8053308', 'Jinhui Tang', 'jinhui tang')<br/>('3233021', 'Zechao Li', 'zechao li')<br/>('2356867', 'Hanjiang Lai', 'hanjiang lai')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>ac559873b288f3ac28ee8a38c0f3710ea3f986d9</td><td>Team DEEP-HRI Moments in Time Challenge 2018 Technical Report
+<br/><b>Hikvision Research Institute</b></td><td>('39816387', 'Chao Li', 'chao li')<br/>('48375401', 'Zhi Hou', 'zhi hou')<br/>('35843399', 'Jiaxu Chen', 'jiaxu chen')<br/>('9162532', 'Jiqiang Zhou', 'jiqiang zhou')<br/>('50322310', 'Di Xie', 'di xie')<br/>('3290437', 'Shiliang Pu', 'shiliang pu')</td><td></td></tr><tr><td>ac8e09128e1e48a2eae5fa90f252ada689f6eae7</td><td>Leolani: a reference machine with a theory of
+<br/>mind for social communication
+<br/><b>VU University Amsterdam, Computational Lexicology and Terminology Lab, De</b><br/>Boelelaan 1105, 1081HV Amsterdam, The Netherlands
+<br/>www.cltl.nl
+</td><td>('50998926', 'Bram Kraaijeveld', 'bram kraaijeveld')</td><td>{p.t.j.m.vossen,s.baezsantamaria,l.bajcetic,b.kraaijeveld}@vu.nl
+</td></tr><tr><td>ac8441e30833a8e2a96a57c5e6fede5df81794af</td><td>IEEE TRANSACTIONS ON IMAGE PROCESSING
+<br/>Hierarchical Representation Learning for Kinship
+<br/>Verification
+</td><td>('1952698', 'Naman Kohli', 'naman kohli')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2487227', 'Afzel Noore', 'afzel noore')<br/>('2641605', 'Angshul Majumdar', 'angshul majumdar')</td><td></td></tr><tr><td>ac86ccc16d555484a91741e4cb578b75599147b2</td><td>Morphable Face Models - An Open Framework
+<br/><b>Gravis Research Group, University of Basel</b></td><td>('3277377', 'Thomas Gerig', 'thomas gerig')<br/>('39550224', 'Clemens Blumer', 'clemens blumer')<br/>('34460642', 'Bernhard Egger', 'bernhard egger')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td></td></tr><tr><td>ac12ba5bf81de83991210b4cd95b4ad048317681</td><td>Combining Deep Facial and Ambient Features
+<br/>for First Impression Estimation
+<br/><b>Program of Computational Science and Engineering, Bo gazi ci University</b><br/>Bebek, Istanbul, Turkey
+<br/><b>Nam k Kemal University</b><br/>C¸ orlu, Tekirda˘g, Turkey
+<br/><b>Bo gazi ci University</b><br/>Bebek, Istanbul, Turkey
+</td><td>('38007788', 'Heysem Kaya', 'heysem kaya')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td>furkan.gurpinar@boun.edu.tr
+<br/>hkaya@nku.edu.tr
+<br/>salah@boun.edu.tr
+</td></tr><tr><td>ac75c662568cbb7308400cc002469a14ff25edfd</td><td>REGULARIZATION STUDIES ON LDA FOR FACE RECOGNITION
+<br/>Bell Canada Multimedia Laboratory, The Edward S. Rogers Sr. Department of
+<br/><b>Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada</b></td><td>('1681365', 'Juwei Lu', 'juwei lu')</td><td></td></tr><tr><td>ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea</td><td>From Gabor Magnitude to Gabor Phase Features:
+<br/>Tackling the Problem of Face Recognition under Severe Illumination Changes
+<br/>215
+<br/>12
+<br/>X
+<br/>From Gabor Magnitude to Gabor Phase
+<br/>Features: Tackling the Problem of Face
+<br/>Recognition under Severe Illumination Changes
+<br/><b>Faculty of Electrical Engineering, University of Ljubljana</b><br/>Slovenia
+<br/>1. Introduction
+<br/>Among the numerous biometric systems presented in the literature, face recognition
+<br/>systems have received a great deal of attention in recent years. The main driving force in the
+<br/>development of these systems can be found in the enormous potential face recognition
+<br/>technology has in various application domains ranging from access control, human-machine
+<br/>interaction and entertainment to homeland security and surveillance (Štruc et al., 2008a).
+<br/>While contemporary face recognition techniques have made quite a leap in terms of
+<br/>performance over the last two decades, they still struggle with their performance when
+<br/>deployed in unconstrained and uncontrolled environments (Gross et al., 2004; Phillips et al.,
+<br/>2007). In such environments the external conditions present during the image acquisition
+<br/>stage heavily influence the appearance of a face in the acquired image and consequently
+<br/>affect the performance of the recognition system. It is said that face recognition techniques
+<br/>suffer from the so-called PIE problem, which refers to the problem of handling Pose,
+<br/>Illumination and Expression variations that are typically encountered in real-life operating
+<br/>conditions. In fact, it was emphasized by numerous researchers that the appearance of the
+<br/>same face can vary significantly from image to image due to changes of the PIE factors and
+<br/>that the variability in the images induced by the these factors can easily surpass the
+<br/>variability induced by the subjects’ identity (Gross et al., 2004; Short et al., 2005). To cope
+<br/>with image variability induced by the PIE factors, face recognition systems have to utilize
+<br/>feature extraction techniques capable of extracting stable and discriminative features from
+<br/>facial images regardless of the conditions governing the acquisition procedure. We will
+<br/>confine ourselves in this chapter to tackling the problem of illumination changes, as it
+<br/>represents the PIE factor which, in our opinion, is the hardest to control when deploying a
+<br/>face recognition system, e.g., in access control applications.
+<br/>Many feature extraction techniques, among them particularly the appearance based
+<br/>methods, have difficulties extracting stable features from images captured under varying
+<br/>illumination conditions and, hence, perform poorly when deployed in unconstrained
+<br/>environments. Researchers have, therefore, proposed a number of alternatives that should
+<br/>compensate for the illumination changes and thus ensure stable face recognition
+<br/>performance.
+</td><td>('2011218', 'Vitomir Štruc', 'vitomir štruc')<br/>('1753753', 'Nikola Pavešić', 'nikola pavešić')</td><td></td></tr><tr><td>acb83d68345fe9a6eb9840c6e1ff0e41fa373229</td><td>Kernel Methods in Computer Vision:
+<br/>Object Localization, Clustering,
+<br/>and Taxonomy Discovery
+<br/>vorgelegt von
+<br/>Matthew Brian Blaschko, M.S.
+<br/>aus La Jolla
+<br/>Von der Fakult¨at IV - Elektrotechnik und Informatik
+<br/>der Technischen Universit¨at Berlin
+<br/>zur Erlangung des akademischen Grades
+<br/>Doktor der Naturwissenschaften
+<br/>Dr. rer. nat.
+<br/>genehmigte Dissertation
+<br/>Promotionsausschuß:
+<br/>Vorsitzender: Prof. Dr. O. Hellwich
+<br/>Berichter: Prof. Dr. T. Hofmann
+<br/>Berichter: Prof. Dr. K.-R. M¨uller
+<br/>Berichter: Prof. Dr. B. Sch¨olkopf
+<br/>Tag der wissenschaftlichen Aussprache: 23.03.2009
+<br/>Berlin 2009
+<br/>D83
+</td><td></td><td></td></tr><tr><td>ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7</td><td>Multimodal Caricatural Mirror
+<br/>(1) : Université catholique de Louvain, Belgium
+<br/>(2) Universitat Polytecnica de Barcelona, Spain
+<br/>(3) Universidad Polytècnica de Madrid, Spain
+<br/><b>Aristotle University of Thessaloniki, Greece</b><br/><b>Bogazici University, Turkey</b><br/>(6) Faculté Polytechnique de Mons, Belgium
+</td><td></td><td></td></tr><tr><td>ad8540379884ec03327076b562b63bc47e64a2c7</td><td>Int. J. Bio-Inspired Computation, Vol. 5, No. 3, 2013
+<br/>175
+<br/>Bee royalty offspring algorithm for improvement of
+<br/>facial expressions classification model
+<br/>Department of Computer Science,
+<br/>Mahshahr Branch,
+<br/><b>Islamic Azad University</b><br/>Mahshahr, Iran
+<br/>*Corresponding author
+<br/>Md Jan Nordin
+<br/>Centre for Artificial Intelligence Technology,
+<br/>Universiti Kebangsaan Malaysia,
+<br/>Bangi, Selangor, Malaysia
+</td><td>('1880066', 'Amir Jamshidnezhad', 'amir jamshidnezhad')</td><td>E-mail: a.jamshidnejad@yahoo.com
+<br/>E-mail: jan@ftsm.ukm.my
+</td></tr><tr><td>adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6</td><td>Two Birds, One Stone: Jointly Learning Binary Code for
+<br/>Large-scale Face Image Retrieval and Attributes Prediction
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/><b>School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China</b></td><td>('38751558', 'Yan Li', 'yan li')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('3035576', 'Haomiao Liu', 'haomiao liu')<br/>('3371529', 'Huajie Jiang', 'huajie jiang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{yan.li, haomiao.liu, huajie.jiang}@vipl.ict.ac.cn, {wangruiping, sgshan, xlchen}@ict.ac.cn
+</td></tr><tr><td>adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be</td><td>Apprentissage de métrique appliqué à la
+<br/>détection de changement de page Web et
+<br/>aux attributs relatifs
+<br/>thieu Cord*
+<br/>* Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris,
+<br/>France
+<br/>RÉSUMÉ. Nous proposons dans cet article un nouveau schéma d’apprentissage de métrique.
+<br/>Basé sur l’exploitation de contraintes qui impliquent des quadruplets d’images, notre approche
+<br/>vise à modéliser des relations sémantiques de similarités riches ou complexes. Nous étudions
+<br/>comment ce schéma peut être utilisé dans des contextes tels que la détection de régions impor-
+<br/>tantes dans des pages Web ou la reconnaissance à partir d’attributs relatifs.
+</td><td>('1728523', 'Nicolas Thome', 'nicolas thome')</td><td></td></tr><tr><td>ada73060c0813d957576be471756fa7190d1e72d</td><td>VRPBench: A Vehicle Routing Benchmark Tool
+<br/>October 19, 2016
+</td><td>('7660594', 'Guilherme A. Zeni', 'guilherme a. zeni')<br/>('7809605', 'Mauro Menzori', 'mauro menzori')<br/>('1788152', 'Luis A. A. Meira', 'luis a. a. meira')</td><td></td></tr><tr><td>add50a7d882eb38e35fe70d11cb40b1f0059c96f</td><td>High-Fidelity Pose and Expression Normalization for Face Recognition in the Wild
+<br/><b>Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/>Pose and expression normalization is a crucial step to recover the canonical
+<br/>view of faces under arbitrary conditions, so as to improve the face recogni-
+<br/>tion performance. Most normalization algorithms can be divided in to 2D
+<br/>and 3D methods. 2D methods either estimate a flow to simulate the 3D
+<br/>geometry transformation or learn appearance transformations between dif-
+<br/>ferent poses. 3D methods estimate the depth information with a face model
+<br/>and normalize faces through 3D transformations.
+<br/>An ideal normalization is desired to preserve the face appearance with
+<br/>little artifact and information loss, which we call high-fidelity. However,
+<br/>most previous methods fail to satisfy that. In this paper, we present a 3D
+<br/>pose and expression normalization method to recover the canonical-view,
+<br/>expression-free image with high fidelity. It contains three components: pose
+<br/>adaptive 3D Morphable Model (3DMM) fitting, identity preserving normal-
+<br/>ization and invisible region filling, which is briefly summarized in Fig. 1.
+<br/>Figure 1: Overview of the High-Fidelity Pose and Expression Normalization
+<br/>(HPEN) method
+<br/>With an input image, the landmarks are detected with the face alignment
+<br/>algorithm and we mark the corresponding 3D landmarks on the face model.
+<br/>Then the 3DMM can be fitted by minimizing the distance between the 2D
+<br/>landmarks and projected 3D landmarks:
+<br/>arg
+<br/>min
+<br/>f ,R,t3d ,αid ,αexp
+<br/>(cid:107)s2d − f PR(S + Aidαid + Aexpαexp +t3d)(cid:107)
+<br/>(1)
+<br/>where αid is the shape parameter, αexp is the expression parameter. f ,R,t3d
+<br/>are pose parameters. However, when faces deviate from the frontal pose, the
+<br/>correspondence between 2D and 3D landmarks will be broken, which we
+<br/>model as “landmark marching”: when pose changes, the contour landmarks
+<br/>move along the parallel to the visibility boundary, see Fig. 2(a). To deal with
+<br/>the phenomenon we propose an approximation method to adjust contour
+<br/>landmarks during 3DMM fitting. The 3D model are firstly projected with
+<br/>only yaw and pitch to eliminate in-plane rotation. Then for each parallel, the
+<br/>point with extreme x coordinate will be chosen as the marching destimation,
+<br/>see Fig. 2(b).
+<br/>With the fitted 3DMM, The face can be normalized through 3D trans-
+<br/>formations. In this paper we also normalize the external face region which
+<br/>contains discriminative information as well. Firstly we mark three groups of
+<br/>anchors which are located on the face boundary, face surrounding and image
+<br/>contour, see Fig. 3(a). Then their depth are estimated by enlarging the fitted
+</td><td>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>ad784332cc37720f03df1c576e442c9c828a587a</td><td>Face Recognition Based on Face-Specific Subspace
+<br/><b>JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China</b><br/><b>Harbin Institute of Technology, Harbin, China</b></td><td>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1698902', 'Wen Gao', 'wen gao')<br/>('1725937', 'Debin Zhao', 'debin zhao')</td><td></td></tr><tr><td>ada42b99f882ba69d70fff68c9ccbaff642d5189</td><td>Semantic Image Segmentation
+<br/>and
+<br/>Web-Supervised Visual Learning
+<br/>D.Phil Thesis
+<br/>Robotics Research Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Supervisors:
+<br/>Professor Andrew Zisserman
+<br/>Dr. Antonio Criminisi
+<br/>Florian Schroff
+<br/><b>St. Anne s College</b><br/>Trinity, 2009
+</td><td></td><td></td></tr><tr><td>ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff</td><td>Weakly Supervised Action Localization by Sparse Temporal Pooling Network
+<br/><b>University of California</b><br/>Irvine, CA, USA
+<br/>Google
+<br/>Venice, CA, USA
+<br/><b>Seoul National University</b><br/>Seoul, Korea
+</td><td>('1998374', 'Phuc Nguyen', 'phuc nguyen')<br/>('40282288', 'Ting Liu', 'ting liu')<br/>('2775959', 'Gautam Prasad', 'gautam prasad')<br/>('40030651', 'Bohyung Han', 'bohyung han')</td><td>nguyenpx@uci.edu
+<br/>{liuti, gautamprasad}@google.com
+<br/>bhhan@snu.ac.kr
+</td></tr><tr><td>adfaf01773c8af859faa5a9f40fb3aa9770a8aa7</td><td>LARGE SCALE VISUAL RECOGNITION
+<br/>A DISSERTATION
+<br/>PRESENTED TO THE FACULTY
+<br/><b>OF PRINCETON UNIVERSITY</b><br/>IN CANDIDACY FOR THE DEGREE
+<br/>OF DOCTOR OF PHILOSOPHY
+<br/>RECOMMENDED FOR ACCEPTANCE
+<br/>BY THE DEPARTMENT OF
+<br/>COMPUTER SCIENCE
+<br/>ADVISER: FEI-FEI LI
+<br/>JUNE 2012
+</td><td>('8342699', 'JIA DENG', 'jia deng')</td><td></td></tr><tr><td>adf5caca605e07ee40a3b3408f7c7c92a09b0f70</td><td>Line-based PCA and LDA approaches for Face Recognition
+<br/><b>Kyung Hee University South of Korea</b></td><td>('1687579', 'Vo Dinh Minh Nhat', 'vo dinh minh nhat')<br/>('1700806', 'Sungyoung Lee', 'sungyoung lee')</td><td>{vdmnhat, sylee}@oslab.khu.ac.kr
+</td></tr><tr><td>adaf2b138094981edd615dbfc4b7787693dbc396</td><td>Statistical Methods For Facial
+<br/>Shape-from-shading and Recognition
+<br/>Submitted for the degree of Doctor of Philosophy
+<br/>Department of Computer Science
+<br/>20th February 2007
+</td><td>('1687021', 'William A. P. Smith', 'william a. p. smith')</td><td></td></tr><tr><td>ad6745dd793073f81abd1f3246ba4102046da022</td><td></td><td></td><td></td></tr><tr><td>ad9cb522cc257e3c5d7f896fe6a526f6583ce46f</td><td>Real-Time Recognition of Facial Expressions for Affective
+<br/>Computing Applications
+<br/>by
+<br/>A M. Eng. Project submitted in conformity with the requirements
+<br/>for the degree of Master of Engineering
+<br/>Department of Mechanical and Industrial Engineering
+<br/><b>University of Toronto</b></td><td>('26301224', 'Christopher Wang', 'christopher wang')<br/>('26301224', 'Christopher Wang', 'christopher wang')</td><td></td></tr><tr><td>ad08c97a511091e0f59fc6a383615c0cc704f44a</td><td>Towards the improvement of self-service
+<br/>systems via emotional virtual agents
+<br/>Christopher Martin
+<br/>School of Computing &
+<br/>Engineering Systems
+<br/><b>University of Abertay</b><br/>Bell Street, Dundee
+<br/>School of Computing &
+<br/>Engineering Systems
+<br/><b>University of Abertay</b><br/>Bell Street, Dundee
+<br/>School of Computing &
+<br/>Engineering Systems
+<br/><b>University of Abertay</b><br/>Bell Street, Dundee
+<br/>School of Social & Health
+<br/>Sciences
+<br/><b>University of Abertay</b><br/>Bell Street, Dundee
+<br/>Affective computing and emotional agents have been found to have a positive effect on human-
+<br/>computer interactions. In order to develop an acceptable emotional agent for use in a self-service
+<br/>interaction, two stages of research were identified and carried out; the first to determine which
+<br/>facial expressions are present in such an interaction and the second to determine which emotional
+<br/>agent behaviours are perceived as appropriate during a problematic self-service shopping task. In
+<br/>the first stage, facial expressions associated with negative affect were found to occur during self-
+<br/>service shopping interactions, indicating that facial expression detection is suitable for detecting
+<br/>negative affective states during self-service interactions. In the second stage, user perceptions of
+<br/>the emotional facial expressions displayed by an emotional agent during a problematic self-service
+<br/>interaction were gathered. Overall, the expression of disgust was found to be perceived as
+<br/>inappropriate while emotionally neutral behaviour was perceived as appropriate, however gender
+<br/>differences suggested that females perceived surprise as inappropriate. Results suggest that
+<br/>agents should change their behaviour and appearance based on user characteristics such as
+<br/>gender.
+<br/>Keywords: affective computing, virtual agents, emotions, emotion detection, HCI, computer vision, empathy.
+<br/>1. INTRODUCTION
+<br/>This paper describes research which contributes
+<br/>towards the development of an empathetic system
+<br/>which will detect and improve a user’s affective
+<br/>state during a problematic self-service interaction
+<br/>(SSI) through the use of an affective agent. Self-
+<br/>Service Technologies (SSTs) are those which allow
+<br/>a person to obtain goods or services from a retailer
+<br/>or service provider without the need for another
+<br/>person to be involved in the transaction. SSTs are
+<br/>used in many situations including high street shops,
+<br/>supermarkets and ticket kiosks. The use of SSTs
+<br/>may provide benefits such as improved customer
+<br/>service (for example allowing 24 hour a day, 7 days
+<br/>a week service),
+<br/>labour costs and
+<br/>improved efficiency (Cho & Fiorito, 2010). Less
+<br/>than 5% of causes for dissatisfaction with SST
+<br/>interactions were found to be the fault of the
+<br/>customer (Meuter et al., 2000; Pujari, 2004),
+<br/>indicating that there is a need for businesses and
+<br/>SST manufacturers to improve these interactions in
+<br/>order to reduce causes for dissatisfaction (Martin et
+<br/>al., unpublished). The frustration caused by a
+<br/>negative SSI can have a detrimental effect on a
+<br/>user’s behavioural intentions towards the retailer,
+<br/>impacting the likelihood the user will continue doing
+<br/>reduced
+<br/>business with them in the future and whether they
+<br/>will recommend them to other potential users (Lin &
+<br/>Hsieh, 2006; Johnson et al., 2008). By adopting
+<br/>affective computing practices in SSI design, such
+<br/>as giving computers the ability to detect and react
+<br/>intelligently to human emotions and to express their
+<br/>own simulated emotions, user experiences may be
+<br/>improved (Klein et al., 1999; Jaksic et al., 2006;
+<br/>Wang et al., 2009).
+<br/>Affective agents have been
+<br/>to reduce
+<br/>found
+<br/>frustration during human-computer
+<br/>interactions
+<br/>(HCIs) (Klein et al., 1999; Jaksic et al., 2006),
+<br/>therefore we are investigating their effectiveness at
+<br/>improving negative affective states in a SST user
+<br/>during a shopping scenario. We propose a system
+<br/>which will detect negative affective states in a user
+<br/>and express appropriate empathetic reactions
+<br/>using an affective virtual agent.
+<br/>Two stages of research were identified. The
+<br/>purpose of stage 1 (reported in Martin et al., in
+<br/>press) was to investigate whether emotional facial
+<br/>expressions are present during SST use,
+<br/>to
+<br/>determine whether a vision-based emotion detector
+<br/>would be suitable for this system. The purpose of
+<br/>stage 2 (reported in Martin et al., unpublished) was
+<br/>© The Authors. Published by BISL. Proceedings of the BCS HCI 2012 People & Computers XXVI, Birmingham, UK351Work In Progress </td><td>('11111134', 'Leslie Ball', 'leslie ball')<br/>('2529392', 'Jacqueline Archibald', 'jacqueline archibald')<br/>('33069212', 'Lloyd Carson', 'lloyd carson')</td><td>c.martin@abertay.ac.uk
+<br/>l.ball@abertay.ac.uk
+<br/>j.archibald @abertay.ac.uk
+<br/>l.carson@abertay.ac.uk
+</td></tr><tr><td>ad2339c48ad4ffdd6100310dcbb1fb78e72fac98</td><td>Video Fill In the Blank using LR/RL LSTMs with Spatial-Temporal Attentions
+<br/><b>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</b></td><td>('33209161', 'Amir Mazaheri', 'amir mazaheri')<br/>('46335319', 'Dong Zhang', 'dong zhang')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>amirmazaheri@cs.ucf.edu, dzhang@cs.ucf.edu, shah@crcv.ucf.edu
+</td></tr><tr><td>ad247138e751cefa3bb891c2fe69805da9c293d7</td><td>American Journal of Networks and Communications
+<br/>2015; 4(4): 90-94
+<br/>Published online July 7, 2015 (http://www.sciencepublishinggroup.com/j/ajnc)
+<br/>doi: 10.11648/j.ajnc.20150404.12
+<br/>ISSN: 2326-893X (Print); ISSN: 2326-8964 (Online)
+<br/>A Novel Hybrid Method for Face Recognition Based on 2d
+<br/>Wavelet and Singular Value Decomposition
+<br/><b>Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran</b><br/><b>Islamic Azad University, Shahrood, Iran</b><br/>Email address:
+<br/>To cite this article:
+<br/>Decomposition. American Journal of Networks and Communications. Vol. 4, No. 4, 2015, pp. 90-94. doi: 10.11648/j.ajnc.20150404.12
+</td><td>('2653670', 'Vahid Haji Hashemi', 'vahid haji hashemi')<br/>('2153844', 'Abdorreza Alavi Gharahbagh', 'abdorreza alavi gharahbagh')<br/>('2653670', 'Vahid Haji Hashemi', 'vahid haji hashemi')<br/>('2153844', 'Abdorreza Alavi Gharahbagh', 'abdorreza alavi gharahbagh')</td><td>hajihashemi.vahid@yahoo.com (V. H. Hashemi), R_alavi@iau-shahrood.ac.ir (A. A. Gharahbagh)
+</td></tr><tr><td>adf62dfa00748381ac21634ae97710bb80fc2922</td><td>ViFaI: A trained video face indexing scheme
+<br/>1. Introduction
+<br/>With the increasing prominence of inexpensive
+<br/>video recording devices (e.g., digital camcorders and
+<br/>video recording smartphones),
+<br/>the average user’s
+<br/>video collection today is increasing rapidly. With this
+<br/>development, there arises a natural desire to rapidly
+<br/>access a subset of one’s collection of videos. The solu-
+<br/>tion to this problem requires an effective video index-
+<br/>ing scheme. In particular, we must be able to easily
+<br/>process a video to extract such indexes.
+<br/>Today, there also exist large sets of labeled (tagged)
+<br/>face images. One important example is an individual’s
+<br/>Facebook profile. Such a set of of tagged images of
+<br/>one’s self, family, friends, and colleagues represents
+<br/>an extremely valuable potential training set.
+<br/>In this work, we explore how to leverage the afore-
+<br/>mentioned training set to solve the video indexing
+<br/>problem.
+<br/>2. Problem Statement
+<br/>Use a labeled (tagged) training set of face images
+<br/>to extract relevant indexes from a collection of videos,
+<br/>and use these indexes to answer boolean queries of the
+<br/>form: “videos with ‘Person 1’ OP1 ‘Person 2’ OP2 ...
+<br/>OP(N-1) ‘Person N’ ”, where ‘Person N’ corresponds
+<br/>to a training label (tag) and OPN is a boolean operand
+<br/>such as AND, OR, NOT, XOR, and so on.
+<br/>3. Proposed Scheme
+<br/>In this section, we outline our proposed scheme to
+<br/>address the problem we postulate in the previous sec-
+<br/>tion. We provide further details about the system im-
+<br/>plementation in Section 4.
+<br/>At a high level, we subdivide the problem into two
+<br/>key phases: the first ”off-line” executed once, and the
+<br/>second ”on-line” phase instantiated upon each query.
+<br/>For the purposes of this work, we define an index as
+<br/>follows: <video id, tag, frame #>.
+<br/>3.1. The training phase
+<br/>We first outline Phase 1 (the training or “off-line”
+<br/>phase):
+<br/>1. Use the labeled training set plus an additional set
+<br/>of ‘other’ faces to compute the Fisher Linear Dis-
+<br/>criminant (FLD) [1].
+<br/>2. Project the training data onto the space defined by
+<br/>the eigenvectors returned by the FLD, and train
+<br/>a classifier (first nearest neighbour, then SVM if
+<br/>required) using the training features.
+<br/>3. Iterate through each frame of each video, detect-
+<br/>ing faces [2], classifying detected results, and add
+<br/>an index if the detected face corresponds to one of
+<br/>the labeled classes from the previous step.
+<br/>3.2. The query phase
+<br/>Now, we outline Phase 2 (the query or “on-line”
+<br/>phase):
+<br/>1. Key the indexes on their video id.
+<br/>2. For each video, evaluate the boolean query for the
+<br/>set of corresponding indexes.
+<br/>3. Keep videos for which the boolean query evalu-
+<br/>ates true, and discard those for which it evaluates
+<br/>false.
+<br/>4. Implementation Details
+<br/>We are implementing the project in C++, leverag-
+<br/>ing the OpenCV v2.2 framework [4]. In this section,
+<br/>we will highlight some of the critical implementation
+<br/>details of our proposed system.
+</td><td>('30006340', 'Nayyar', 'nayyar')<br/>('47384529', 'Audrey Wei', 'audrey wei')</td><td>hnayyar@stanford.edu
+<br/>awei1001@stanford.edu
+</td></tr><tr><td>bbc4b376ebd296fb9848b857527a72c82828fc52</td><td>Attributes for Improved Attributes
+<br/><b>University of Maryland</b><br/><b>College Park, MD</b></td><td>('3351637', 'Emily Hand', 'emily hand')</td><td>emhand@cs.umd.edu
+</td></tr><tr><td>bb489e4de6f9b835d70ab46217f11e32887931a2</td><td>Everything you wanted to know about Deep Learning for Computer Vision but were
+<br/>afraid to ask
+<br/>Moacir A. Ponti, Leonardo S. F. Ribeiro, Tiago S. Nazare
+<br/><b>ICMC University of S ao Paulo</b><br/>S˜ao Carlos/SP, 13566-590, Brazil
+<br/><b>CVSSP University of Surrey</b><br/>Guildford, GU2 7XH, UK
+<br/>tools,
+</td><td>('2227956', 'Tu Bui', 'tu bui')<br/>('10710438', 'John Collomosse', 'john collomosse')</td><td>Email: [ponti, leonardo.sampaio.ribeiro, tiagosn]@usp.br
+<br/>Email: [t.bui, j.collomosse]@surrey.ac.uk
+</td></tr><tr><td>bba281fe9c309afe4e5cc7d61d7cff1413b29558</td><td>Social Cognitive and Affective Neuroscience, 2017, 984–992
+<br/>doi: 10.1093/scan/nsx030
+<br/>Advance Access Publication Date: 11 April 2017
+<br/>Original article
+<br/>An unpleasant emotional state reduces working
+<br/>memory capacity: electrophysiological evidence
+<br/>1Laboratorio de Neurofisiologia do Comportamento, Departamento de Fisiologia e Farmacologia, Instituto
+<br/>Biome´dico, Universidade Federal Fluminense, Niteroi, Brazil, 2MograbiLab, Departamento de Psicologia,
+<br/>Pontifıcia Universidade Catolica do Rio de Janeiro, Rio de Janeiro, Brazil, and 3Laboratorio de Engenharia
+<br/>Pulmonar, Programa de Engenharia Biome´dica, COPPE, Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil
+</td><td>('18129331', 'Jessica S. B. Figueira', 'jessica s. b. figueira')<br/>('2993713', 'Leticia Oliveira', 'leticia oliveira')<br/>('38252417', 'Mirtes G. Pereira', 'mirtes g. pereira')<br/>('18138365', 'Luiza B. Pacheco', 'luiza b. pacheco')<br/>('6891211', 'Isabela Lobo', 'isabela lobo')<br/>('5663717', 'Gabriel C. Motta-Ribeiro', 'gabriel c. motta-ribeiro')<br/>('1837214', 'Isabel A. David', 'isabel a. david')<br/>('1837214', 'Isabel A. David', 'isabel a. david')</td><td>Fluminense, Rua Hernani Pires de Mello, 101, Niteroi, RJ 24210-130, Brazil. E-mail: isabeldavid@id.uff.br.
+</td></tr><tr><td>bb557f4af797cae9205d5c159f1e2fdfe2d8b096</td><td></td><td></td><td></td></tr><tr><td>bb06ef67a49849c169781657be0bb717587990e0</td><td>Impact of Temporal Subsampling on Accuracy and
+<br/>Performance in Practical Video Classification
+<br/>F. Scheidegger∗†, L. Cavigelli∗, M. Schaffner∗, A. C. I. Malossi†, C. Bekas†, L. Benini∗‡
+<br/>∗ETH Zürich, 8092 Zürich, Switzerland
+<br/>†IBM Research - Zürich, 8803 Rüschlikon, Switzerland
+<br/>‡Università di Bologna, Italy
+</td><td></td><td></td></tr><tr><td>bb22104d2128e323051fb58a6fe1b3d24a9e9a46</td><td>IAJ=JE BH ==OIEI 1 AIIA?A ?= EBH=JE =EO B?KIAI  JDA IK>JA
+<br/>ABBA?JELAAII B KH =CHEJD
+<br/>==OIEI 7IK=O = B=?E= ANFHAIIE ==OIEI IOIJA ?J=EI JDHAA IJ=CAI B=?A =?GKE
+<br/>9DAJDAH KIEC *=OAIE= ?=IIEAH " & IKFFHJ LA?JH =?DEA 58  H AKH=
+<br/>HACEI E = IECA ?=IIEAH EI = ? IJH=JACO & 0MALAH J = ?= HACEI
+</td><td></td><td>)=OEC .=?E= -NFHAIIE >O .KIEC =EB@I
+<br/>9A;= +D=C1,2 +DK5C +DA1,3 =@ ;E2EC 0KC1,2,3
+<br/>11IJEJKJA B 1BH=JE 5?EA?A )?=@AE= 5EE?= 6=EM=
+<br/>2,AFJ B +FKJAH 5?EA?A =@ 1BH=JE -CEAAHEC =JE= 6=EM= 7ELAHIEJO
+<br/>3/H=@K=JA 1IJEJKJA B AJMHEC =@ KJEA@E= =JE= 6=EM= 7ELAHIEJO
+<br/>{wychang, song}@iis.sinica.edu.tw; hung@csie.ntu.edu.tw
+<br/>)>IJH=?J .A=JKHA HAFHAIAJ=JE =@ ?=IIE?=JE =HA JM =H EIIKAI E B=?E=
+<br/>ANFHAIIE ==OIEI 1 JDA F=IJ IJ AJD@I KIA@ AEJDAH DEIJE? H ?= HAFHA
+<br/>L=HE=JEI B ANFHAIIEI =@ DEIJE? HAFHAIAJ=JE IJHAIIAI  C>= @ELAHIE
+<br/>JEAI 6 J=A JDA =@L=J=CAI B >JD = DO>HE@ HAFHAIAJ=JE EI IKCCAIJA@ E JDEI
+<br/>F=FAH =@ =EB@ A=HEC EI =FFEA@ J ?D=H=?JAHEA C>= =@ ?= EBH=
+<br/>JE @EI?HEE=JELAO 7EA IA AJD@I KIEC KIKFAHLEIA@ =EB@ A=H
+<br/>EC =FFH=?DAI A>A@@A@ =EB@I B JDA DO>HE@ HAFHAIAJ=JE =HA A=HA@ >O
+<br/>=@FJEC = IKFAHLEIA@ =EB@ A=HEC JA?DEGKA 6 EJACH=JA JDAIA =EB@I
+<br/>ABBA?JELAO = BKIE ?=IIEAH EI EJH@K?A@ MDE?D ?= DAF J AFO IKEJ=>A
+<br/>?>E=JE MAECDJI B B=?E= ?FAJI J E@AJEBO = ANFHAIIE +FHADA
+<br/>IELA ?F=HEII  B=?E= ANFHAIIE HA?CEJE =HA E?K@A@ J @AIJH=JA JDA
+<br/> 1JH@K?JE
+<br/>4A=EEC DK= AJEI F=OI = EFHJ=J HA E DK= ?KE?=JE 6 IJK@O
+<br/>DK= >AD=LEH I?EAJE?=O =@ IOIJA=JE?=O AJE ==OIEI EI = EJHECKEC HA
+<br/>IA=H?D EIIKA E =O A@I K?D =JJAJE D=I >AA @H=M J JDEI JFE? E ?FKJAH
+<br/>LEIE =FFE?=JEI IK?D =I DK=?FKJAH EJAH=?JE H>J ?CEJE =@ >AD=LEH
+<br/>IEJE BA=JKHA ANJH=?JE =@ ?=IIE?=JE
+<br/>.H BA=JKHA ANJH=?JE = J B AJD@I D=LA >AA FHFIA@ 1 CAAH= IJ AJD
+<br/>@I HAFHAIAJ BA=JKHAI E AEJDAH DEIJE? H ?= M=OI 0EIJE? HAFHAIAJ=JE KIAI JDA
+<br/>MDA B=?A BH HAFHAIAJ=JE =@ B?KIAI  JDA B=?E= L=HE=JEI B C>= =FFA=H=?A
+<br/>1 ?JH=IJ ?= HAFHAIAJ=JE =@FJI ?= B=?E= HACEI H BA=JKHAI =@ CELAI =JJA
+<br/>JE J JDA IK>JA @ELAHIEJEAI  = B=?A 6DKCD IJ HA?AJ IJK@EAI D=LA >AA @EHA?JA@
+<br/>JM=H@I ?= HAFHAIAJ=JE % & C@ HAIA=H?D HAIKJI =HA IJE >J=EA@ >O KIEC
+<br/>DEIJE? =FFH=?D   0A?A EJ EI EJAHAIJEC J ANFEJ >JD B JDAEH >AAJI J @A
+<br/>LAF = DO>HE@ HAFHAIAJ=JE
+<br/>1 =@@EJE J BA=JKHA HAFHAIAJ=JE MA =I EJH@K?A = AJD@ BH ?=IIE?=JE
+<br/>AJMHI @EC = IJHC ?=IIEAH EI JDA ?HA E JDA ANEIJEC B=?E= ANFHAIIE ==O
+<br/>IEI IJK@EAI 1 JDA =FFH=?DAI JD=J =@FJ ?= B=?E= EBH=JE MAECDJEC JDAIA ?=
+</td></tr><tr><td>bbf28f39e5038813afd74cf1bc78d55fcbe630f1</td><td>Style Aggregated Network for Facial Landmark Detection
+<br/><b>University of Technology Sydney, 2 The University of Sydney</b></td><td>('9929684', 'Xuanyi Dong', 'xuanyi dong')<br/>('1685212', 'Yan Yan', 'yan yan')<br/>('3001348', 'Wanli Ouyang', 'wanli ouyang')<br/>('1698559', 'Yi Yang', 'yi yang')</td><td>{xuanyi.dong,yan.yan-3}@student.uts.edu.au;
+<br/>wanli.ouyang@sydney.edu.au; yi.yang@uts.edu.au
+</td></tr><tr><td>bbe1332b4d83986542f5db359aee1fd9b9ba9967</td><td></td><td></td><td></td></tr><tr><td>bbe949c06dc4872c7976950b655788555fe513b8</td><td>Automatic Frequency Band Selection for
+<br/>Illumination Robust Face Recognition
+<br/><b>Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany</b></td><td>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{ekenel,rainer.stiefelhagen}@kit.edu
+</td></tr><tr><td>bbcb4920b312da201bf4d2359383fb4ee3b17ed9</td><td>RESEARCH ARTICLE
+<br/>Robust Face Recognition via Multi-Scale
+<br/>Patch-Based Matrix Regression
+<br/><b>Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing</b><br/><b>China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology</b><br/><b>Nanjing, 210094, China, 3 School of Automation, Nanjing University of Posts and Telecommunications</b><br/><b>Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and</b><br/>Telecommunications, Nanjing, 210023, China
+<br/>a11111
+</td><td>('3306402', 'Guangwei Gao', 'guangwei gao')<br/>('2700773', 'Jian Yang', 'jian yang')<br/>('1712078', 'Xiaoyuan Jing', 'xiaoyuan jing')<br/>('35919708', 'Pu Huang', 'pu huang')<br/>('3359690', 'Juliang Hua', 'juliang hua')<br/>('1742990', 'Dong Yue', 'dong yue')</td><td>* csggao@gmail.com
+</td></tr><tr><td>bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb</td><td>Situation Recognition with Graph Neural Networks
+<br/>Supplementary Material
+<br/><b>Uber Advanced Technologies Group, 5Vector Institute</b><br/>We present additional analysis and results of our approach in the supplementary material. First, we analyze the verb
+<br/>prediction performance in Sec. 1. In Sec. 2, we present t-SNE [2] plots to visualize the verb and role embeddings. We present
+<br/>several examples of the influence of different roles on predicting the verb-frame correctly. This is visualized in Sec. 3 through
+<br/>propagation matrices similar to Fig. 7 of the main paper. Finally, in Sec. 4 we include several example predictions that our
+<br/>model makes.
+<br/>1. Verb Prediction
+<br/>We present the verb prediction accuracies for our fully-connected model on the development set in Fig. 1. The random
+<br/>performance is close to 0.2% (504 verbs). About 22% of all verbs are classified correctly over 50% of the time. These
+<br/>include taxiing, erupting, flossing, microwaving, etc. On the other hand, verbs such as attaching,
+<br/>making, placing can have very different image representations, and show prediction accuracies of less than 10%.
+<br/>Our model helps improve the role-noun predictions by sharing information across all roles. Nevertheless, if the verb is
+<br/>predicted incorrectly, the whole situation is treated as incorrect. Thus, verb prediction performance plays a crucial role.
+<br/>Figure 1. Verb prediction accuracy on the development set. Some verbs such as taxiing typically have a similar image (a plane on the
+<br/>tarmac), while verbs such as rubbing or twisting can have very different corresponding images.
+<br/>taxiinglappingretrievingflickingminingwaxingjugglingcurtsyingcommutingdancingcrushingreadingexaminingdousingdecomposingchoppingdrawingcryingcalmingsniffingmourningsubmergingtwistingcarvingrubbingaskingVerbs0102030405060708090100Accuracy (%) </td><td>('8139953', 'Ruiyu Li', 'ruiyu li')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('2246396', 'Renjie Liao', 'renjie liao')<br/>('1729056', 'Jiaya Jia', 'jiaya jia')<br/>('2422559', 'Raquel Urtasun', 'raquel urtasun')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')<br/>('2043324', 'Hong Kong', 'hong kong')</td><td>ryli@cse.cuhk.edu.hk, {makarand,rjliao,urtasun,fidler}@cs.toronto.edu, leojia9@gmail.com
+</td></tr><tr><td>bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197</td><td>TO APPEAR IN TPAMI
+<br/>From Images to 3D Shape Attributes
+</td><td>('1786435', 'David F. Fouhey', 'david f. fouhey')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>bbf01aa347982592b3e4c9e4f433e05d30e71305</td><td></td><td></td><td></td></tr><tr><td>bbc5f4052674278c96abe7ff9dc2d75071b6e3f3</td><td>Nonlinear Hierarchical Part-based Regression for Unconstrained Face Alignment
+<br/>†NEC Laboratories America, Media Analytics
+<br/>‡Adobe Research
+<br/><b>cid:93)University of North Carolina at Charlotte</b><br/><b>Rutgers, The State University of New Jersey</b></td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>xiangyu@nec-labs.com, zlin@adobe.com, szhang16@uncc.edu, dnm@cs.rutgers.edu
+</td></tr><tr><td>bbfe0527e277e0213aafe068113d719b2e62b09c</td><td>Dog Breed Classification Using Part Localization
+<br/><b>Columbia University</b><br/><b>University of Maryland</b></td><td>('2454675', 'Jiongxin Liu', 'jiongxin liu')<br/>('20615377', 'Angjoo Kanazawa', 'angjoo kanazawa')</td><td></td></tr><tr><td>bbf1396eb826b3826c5a800975047beabde2f0de</td><td></td><td></td><td></td></tr><tr><td>bb451dc2420e1a090c4796c19716f93a9ef867c9</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 104 – No.5, October 2014
+<br/>A Review on: Automatic Movie Character Annotation
+<br/>by Robust Face-Name Graph Matching
+<br/>Research Scholar
+<br/><b>Sinhgad College of</b><br/>Engineering, korti, Pandharpur,
+<br/><b>Solapur University, INDIA</b><br/>Gadekar P.R.
+<br/>Assistant Professor
+<br/><b>Sinhgad College of</b><br/>Engineering, korti, Pandharpur,
+<br/><b>Solapur University, INDIA</b><br/>Bandgar Vishal V.
+<br/>Assistant Professor
+<br/><b>College of Engineering (Poly</b><br/>Pandharpur, Solapur, INDIA
+<br/>Bhise Avdhut S.
+<br/>HOD, Department of
+<br/>Information Technology,
+<br/><b>College of Engineering (Poly</b><br/>Pandharpur, Solapur, INDIA
+</td><td></td><td></td></tr><tr><td>bbd1eb87c0686fddb838421050007e934b2d74ab</td><td></td><td></td><td></td></tr><tr><td>d73d2c9a6cef79052f9236e825058d5d9cdc1321</td><td>2014-ENST-0040
+<br/>EDITE - ED 130
+<br/>Doctorat ParisTech
+<br/>T H È S E
+<br/>pour obtenir le grade de docteur délivré par
+<br/>TELECOM ParisTech
+<br/>Spécialité « Signal et Images »
+<br/>présentée et soutenue publiquement par
+<br/>le 08 juillet 2014
+<br/>Cutting the Visual World into Bigger Slices for Improved Video
+<br/>Concept Detection
+<br/>Amélioration de la détection des concepts dans les vidéos par de plus grandes tranches du Monde
+<br/>Visuel
+<br/>Directeur de thèse : Bernard Mérialdo
+<br/>Jury
+<br/>M. Philippe-Henri Gosselin, Professeur, INRIA
+<br/>M. Georges Quénot, Directeur de recherche CNRS, LIG
+<br/>M. Georges Linares, Professeur, LIA
+<br/>M. François Brémond, Professeur, INRIA
+<br/>M. Bernard Mérialdo, Professeur, EURECOM
+<br/>Rapporteur
+<br/>Rapporteur
+<br/>Examinateur
+<br/>Examinateur
+<br/>Encadrant
+<br/>TELECOM ParisTech
+<br/>école de l’Institut Télécom - membre de ParisTech
+</td><td>('2135932', 'Usman Farrokh Niaz', 'usman farrokh niaz')</td><td></td></tr><tr><td>d794ffece3533567d838f1bd7f442afee13148fd</td><td>Hand Detection and Tracking in Videos
+<br/>for Fine-grained Action Recognition
+<br/><b>The University of Electro-Communications, Tokyo</b><br/>1-5-1 Chofugaoka, Chofu, Tokyo, 182-8585 Japan
+</td><td>('1681659', 'Keiji Yanai', 'keiji yanai')</td><td></td></tr><tr><td>d78077a7aa8a302d4a6a09fb9737ab489ae169a6</td><td></td><td></td><td></td></tr><tr><td>d7593148e4319df7a288180d920f2822eeecea0b</td><td>LIU, YU, FUNES-MORA, ODOBEZ: DIFFERENTIAL APPROACH FOR GAZE ESTIMATION 1
+<br/>A Differential Approach for Gaze
+<br/>Estimation with Calibration
+<br/><b>Idiap Research Institute</b><br/>2 Eyeware Tech SA
+<br/>Kenneth A. Funes-Mora 2
+</td><td>('1697913', 'Gang Liu', 'gang liu')<br/>('50133842', 'Yu Yu', 'yu yu')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>gang.liu@idiap.ch
+<br/>yu.yu@idiap.ch
+<br/>kenneth@eyeware.tech
+<br/>odobez@idiap.ch
+</td></tr><tr><td>d7312149a6b773d1d97c0c2b847609c07b5255ec</td><td></td><td></td><td></td></tr><tr><td>d7fe2a52d0ad915b78330340a8111e0b5a66513a</td><td>Unpaired Photo-to-Caricature Translation on Faces in
+<br/>the Wild
+<br/><b>aNo. 238 Songling Road, Ocean University of</b><br/>China, Qingdao, China
+</td><td>('4670300', 'Ziqiang Zheng', 'ziqiang zheng')<br/>('50077564', 'Zhibin Yu', 'zhibin yu')<br/>('2336297', 'Haiyong Zheng', 'haiyong zheng')<br/>('49297407', 'Bing Zheng', 'bing zheng')</td><td></td></tr><tr><td>d7cbedbee06293e78661335c7dd9059c70143a28</td><td>MobileFaceNets: Efficient CNNs for Accurate Real-
+<br/>Time Face Verification on Mobile Devices
+<br/><b>School of Computer and Information Technology, Beijing Jiaotong University, Beijing</b><br/><b>Research Institute, Watchdata Inc., Beijing, China</b><br/>China
+</td><td>('39326372', 'Sheng Chen', 'sheng chen')<br/>('1681842', 'Yang Liu', 'yang liu')<br/>('46757550', 'Xiang Gao', 'xiang gao')<br/>('2765914', 'Zhen Han', 'zhen han')</td><td>{sheng.chen, yang.liu.yj, xiang.gao}@watchdata.com,
+<br/>zhan@bjtu.edu.cn
+</td></tr><tr><td>d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f</td><td>Automating Image Analysis by Annotating Landmarks with Deep
+<br/>Neural Networks
+<br/>February 3, 2017
+<br/>Running head: Automatic Annotation of Landmarks
+<br/><b>Boston University, Boston, MA</b><br/><b>University of North Carolina at Chapel Hill, Chapel Hill, NC</b><br/>Keywords: automatic landmark localization, annotation, pose estimation, deep neural networks, hawkmoths
+<br/>Contents
+</td><td>('2025025', 'Mikhail Breslav', 'mikhail breslav')<br/>('1711465', 'Tyson L. Hedrick', 'tyson l. hedrick')<br/>('1749590', 'Stan Sclaroff', 'stan sclaroff')<br/>('1723703', 'Margrit Betke', 'margrit betke')</td><td></td></tr><tr><td>d708ce7103a992634b1b4e87612815f03ba3ab24</td><td>FCVID: Fudan-Columbia Video Dataset
+<br/>Available at: http://bigvid.fudan.edu.cn/FCVID/
+<br/>1 OVERVIEW
+<br/>Recognizing visual contents in unconstrained videos
+<br/>has become a very important problem for many ap-
+<br/>plications, such as Web video search and recommen-
+<br/>dation, smart content-aware advertising, robotics, etc.
+<br/>Existing datasets for video content recognition are
+<br/>either small or do not have reliable manual labels.
+<br/>In this work, we construct and release a new Inter-
+<br/>net video dataset called Fudan-Columbia Video Dataset
+<br/>(FCVID), containing 91,223 Web videos (total duration
+<br/>4,232 hours) annotated manually according to 239
+<br/>categories. We believe that the release of FCVID can
+<br/>stimulate innovative research on this challenging and
+<br/>important problem.
+<br/>2 COLLECTION AND ANNOTATION
+<br/>The categories in FCVID cover a wide range of topics
+<br/>like social events (e.g., “tailgate party”), procedural
+<br/>events (e.g., “making cake”), objects (e.g., “panda”),
+<br/>scenes (e.g., “beach”), etc. These categories were de-
+<br/>fined very carefully. Specifically, we conducted user
+<br/>surveys and used the organization structures on
+<br/>YouTube and Vimeo as references, and browsed nu-
+<br/>merous videos to identify categories that satisfy the
+<br/>following three criteria: (1) utility — high relevance
+<br/>in supporting practical application needs; (2) cover-
+<br/>age — a good coverage of the contents that people
+<br/>record; and (3) feasibility — likely to be automatically
+<br/>recognized in the next several years, and a high
+<br/>frequency of occurrence that is sufficient for training
+<br/>a recognition algorithm.
+<br/>This definition effort led to a set of over 250 candi-
+<br/>date categories. For each category, in addition to the
+<br/>official name used in the public release, we manually
+<br/>defined another alternative name. Videos were then
+<br/>downloaded from YouTube searches using the official
+<br/>and the alternative names as search terms. The pur-
+<br/>pose of using the alternative names was to expand the
+<br/>candidate video sets. For each search, we downloaded
+<br/>1,000 videos, and after removing duplicate videos and
+<br/>some extremely long ones (longer than 30 minutes),
+<br/>there were around 1,000–1,500 candidate videos for
+<br/>each category.
+<br/>All the videos were annotated manually to ensure
+<br/>a high precision of the FCVID labels. In order to min-
+<br/>imize subjectivity, nearly 20 annotators were involved
+<br/>in the task, and a master annotator was assigned to
+<br/>monitor the entire process and double-check all the
+<br/>found positive videos. Some of the videos are multi-
+<br/>labeled, and thus filtering the 1,000–1,500 videos for
+<br/>each category with focus on just the single category
+<br/>label is not adequate. As checking the existence of all
+<br/>the 250+ classes for each video is extremely difficult,
+<br/>we use the following strategy to narrow down the “la-
+<br/>bel search space” for each video. We first grouped the
+<br/>categories according to subjective predictions of label
+<br/>co-occurrences, e.g., “wedding reception” & “wed-
+<br/>ding ceremony”, “waterfall” & “river”, “hiking” &
+<br/>“mountain”, and even “dog” & “birthday”. We then
+<br/>annotated the videos not only based on the target cat-
+<br/>egory label, but also according to the identified related
+<br/>labels. This helped produce a fairly complete label
+<br/>set for FCVID, but largely reduced the annotation
+<br/>workload. After removing the rare categories with
+<br/>less than 100 videos after annotation, the final FCVID
+<br/>dataset contains 91,223 videos and 239 categories,
+<br/>where 183 are events and 56 are objects, scenes, etc.
+<br/>Figure 1 shows the number of videos per category.
+<br/>“Dog” has the largest number of positive videos
+<br/>(1,136), while “making egg tarts” is the most infre-
+<br/>quent category containing only 108 samples. The total
+<br/>duration of FCVID is 4,232 hours with an average
+<br/>video duration of 167 seconds. Figure 2 further gives
+<br/>the average video duration of each category.
+<br/>The categories are organized using a hierarchy con-
+<br/>taining 11 high-level groups, as visualized in Figure 3.
+<br/>3 COMPARISON WITH RELATED DATASETS
+<br/>We compare FCVID with the following datasets. Most
+<br/>of them have been widely adopted in the existing
+<br/>works on video categorization.
+<br/>KTH and Weizmann: The KTH [1] and the Weiz-
+<br/>mann [2] datasets are well-known benchmarks for
+<br/>human action recognition. The former contains 600
+<br/>videos of 6 human actions performed by 25 people
+<br/>in four scenarios, and the latter consists of 81 videos
+<br/>associated with 9 actions performed by 9 actors.
+<br/>Hollywood Human Action: The Hollywood
+<br/>dataset [3] contains 8 action classes collected from
+<br/>32 Hollywood movies with a total of 430 videos.
+</td><td>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('39811558', 'Jun Wang', 'jun wang')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td></td></tr><tr><td>d78734c54f29e4474b4d47334278cfde6efe963a</td><td>Exploring Disentangled Feature Representation Beyond Face Identification
+<br/><b>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</b><br/><b>SenseTime Group Limited, 3Peking University</b></td><td>('1715752', 'Yu Liu', 'yu liu')<br/>('22181490', 'Fangyin Wei', 'fangyin wei')<br/>('49895575', 'Jing Shao', 'jing shao')<br/>('37145669', 'Lu Sheng', 'lu sheng')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td>{yuliu,lsheng,xgwang}@ee.cuhk.edu.hk, weifangyin@pku.edu.cn,
+<br/>{shaojing,yanjunjie}@sensetime.com
+</td></tr><tr><td>d785fcf71cb22f9c33473cba35f075c1f0f06ffc</td><td>Learning Active Facial Patches for Expression Analysis
+<br/><b>Rutgers University, Piscataway, NJ</b><br/><b>Nanjing University of Information Science and Technology, Nanjing, 210044, China</b><br/><b>University of Texas at Arlington, Arlington, TX</b></td><td>('29803023', 'Lin Zhong', 'lin zhong')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')<br/>('39606160', 'Peng Yang', 'peng yang')<br/>('40107085', 'Bo Liu', 'bo liu')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>{linzhong,qsliu,peyang,lb507,dnm}@cs.rutgers.edu, Jzhuang@uta.edu
+</td></tr><tr><td>d79365336115661b0e8dbbcd4b2aa1f504b91af6</td><td>Variational methods for Conditional Multimodal
+<br/>Deep Learning
+<br/>Department of Computer Science and Automation
+<br/><b>Indian Institute of Science</b></td><td>('2686270', 'Gaurav Pandey', 'gaurav pandey')<br/>('2440174', 'Ambedkar Dukkipati', 'ambedkar dukkipati')</td><td>Email{gp88, ad@csa.iisc.ernet.in
+</td></tr><tr><td>d7b6bbb94ac20f5e75893f140ef7e207db7cd483</td><td>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+<br/>Face Recognition across Pose: A
+<br/>Review
+<br/>Author
+<br/>Zhang, Paul, Gao, Yongsheng
+<br/>Published
+<br/>2009
+<br/>Journal Title
+<br/>Pattern Recognition
+<br/>DOI
+<br/>https://doi.org/10.1016/j.patcog.2009.04.017
+<br/>Copyright Statement
+<br/>Copyright 2009 Elsevier. This is the author-manuscript version of this paper. Reproduced in accordance
+<br/>with the copyright policy of the publisher. Please refer to the journal's website for access to the
+<br/>definitive, published version.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/30193
+</td><td></td><td></td></tr><tr><td>d78373de773c2271a10b89466fe1858c3cab677f</td><td></td><td></td><td></td></tr><tr><td>d78fbd11f12cbc194e8ede761d292dc2c02d38a2</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 8, No. 10, 2017
+<br/>Enhancing Gray Scale Images for Face Detection
+<br/>under Unstable Lighting Condition
+<br/>Department of Mathematics and Computer Science,
+<br/>Faculty of Sciences, PO Box 67 Dschang
+<br/><b>University of Dschang, Cameroon</b><br/>DJIMELI TSAMENE Charly
+<br/>Department of Mathematics and Computer Science,
+<br/>Faculty of Sciences, PO Box 67 Dschang,
+<br/><b>University of Dschang, Cameroon</b><br/>techniques compared are:
+</td><td></td><td></td></tr><tr><td>d72973a72b5d891a4c2d873daeb1bc274b48cddf</td><td>A New Supervised Dimensionality Reduction Algorithm Using Linear
+<br/>Discriminant Analysis and Locality Preserving Projection
+<br/>School of Information Engineering
+<br/><b>Guangdong Medical College</b><br/>Dongguan, Guangdong, China
+<br/>School of Electronics and Information
+<br/><b>South China University of Technology</b><br/>Guangzhou, Guangdong, China
+</td><td>('2588058', 'DI ZHANG', 'di zhang')<br/>('20374749', 'YUN ZHAO', 'yun zhao')<br/>('31866339', 'MINGHUI DU', 'minghui du')</td><td> haihaiwenqi@163.com, zyun@gdmc.edu.cn
+<br/>ecmhdu@scut.edu.cn
+</td></tr><tr><td>d700aedcb22a4be374c40d8bee50aef9f85d98ef</td><td>Rethinking Spatiotemporal Feature Learning:
+<br/>Speed-Accuracy Trade-offs in Video Classification
+<br/>1 Google Research
+<br/><b>University of California San Diego</b></td><td>('1817030', 'Saining Xie', 'saining xie')<br/>('40559421', 'Chen Sun', 'chen sun')<br/>('1808244', 'Jonathan Huang', 'jonathan huang')<br/>('1736745', 'Zhuowen Tu', 'zhuowen tu')<br/>('1702318', 'Kevin Murphy', 'kevin murphy')</td><td></td></tr><tr><td>d7d166aee5369b79ea2d71a6edd73b7599597aaa</td><td>Fast Subspace Clustering Based on the
+<br/>Kronecker Product
+<br/><b>Beihang University 2Gri th University 3University of York, UK</b></td><td>('38840844', 'Lei Zhou', 'lei zhou')<br/>('3042223', 'Xiao Bai', 'xiao bai')<br/>('6820648', 'Xianglong Liu', 'xianglong liu')<br/>('40582215', 'Jun Zhou', 'jun zhou')<br/>('38987678', 'Hancock Edwin', 'hancock edwin')</td><td></td></tr><tr><td>d79f9ada35e4410cd255db39d7cc557017f8111a</td><td>Journal of Eye Movement Research
+<br/>7(3):3, 1-8
+<br/>Evaluation of accurate eye corner detection methods for gaze
+<br/>estimation
+<br/><b>Public University of Navarra, Spain</b><br/>Childrens National Medical Center, USA
+<br/><b>Public University of Navarra, Spain</b><br/><b>Public University of Navarra, Spain</b><br/>Accurate detection of iris center and eye corners appears to be a promising
+<br/>approach for low cost gaze estimation.
+<br/>In this paper we propose novel eye
+<br/>inner corner detection methods. Appearance and feature based segmentation
+<br/>approaches are suggested. All these methods are exhaustively tested on a realistic
+<br/>dataset containing images of subjects gazing at different points on a screen.
+<br/>We have demonstrated that a method based on a neural network presents the
+<br/>best performance even in light changing scenarios.
+<br/>In addition to this method,
+<br/>algorithms based on AAM and Harris corner detector present better accuracies
+<br/>than recent high performance face points tracking methods such as Intraface.
+<br/>Keywords: eye tracking, low cost, eye inner corner
+<br/>Introduction
+<br/>Research on eye detection and tracking has attracted
+<br/>much attention in the last decades. Since it is one of the
+<br/>most stable and representative features of the subject,
+<br/>eye detection is used in a great variety of applications,
+<br/>such as subject identification, human computer inter-
+<br/>action as shown in Morimoto and Mimica (2005) and
+<br/>gesture recognition as described by Tian, Kanade, and
+<br/>Cohn (2000) and Bailenson et al. (2008).
+<br/>Human computer interaction based on eye informa-
+<br/>tion is one of the most challenging research topics in
+<br/>the recent years. According to the literature, the first
+<br/>attempts to track the human gaze using cameras be-
+<br/>gan in 1974 as shown in the work by Merchant, Mor-
+<br/>rissette, and Porterfield (1974). Since then, and espe-
+<br/>cially in the last decades, much effort has been devoted
+<br/>to improving the performance of eye tracking systems.
+<br/>The availability of high performance eye tracking sys-
+<br/>tems has provided advances in fields such as usabil-
+<br/>ity research as described by Ellis, Candrea, Misner,
+<br/>Craig, and Lankford (1998) Poole and Ball (2005) and
+<br/>interaction for severely disabled people in works such
+<br/>as Bolt (1982), Starker and Bolt (1990) and Vertegaal
+<br/>(1999). Gaze tracking systems can be used to deter-
+<br/>mine the fixation point of an individual on a computer
+<br/>screen, which can in turn be used as a pointer to in-
+<br/>teract with the computer. Thus, severely disabled peo-
+<br/>ple who cannot communicate with their environment
+<br/>using alternative interaction tools can perform several
+<br/>tasks by means of their gaze. Performance limitations,
+<br/>such as head movement constraints, limit the employ-
+<br/>ment of the gaze trackers as interaction tools in other
+<br/>areas. Moreover, the limited market for eye tracking
+<br/>systems and the specialized hardware they employ, in-
+<br/>crease their prices. The eye tracking community has
+<br/>identified new application fields, such as video games
+<br/>or the automotive industry, as potential markets for the
+<br/>technology (Zhang, Bulling, & Gellersen, 2013). How-
+<br/>ever, simpler (i.e., lower cost) hardware is needed to
+<br/>reach these areas.
+<br/>Although web cams offer acceptable resolutions for
+<br/>eye tracking purposes, the optics used provide a wider
+<br/>field of view in which the whole face appears. By con-
+<br/>trast, most of the existing high-performance eye track-
+<br/>ing systems employ infrared illumination.
+<br/>Infrared
+<br/>light-emitting diodes provide a higher image quality
+<br/>and produce bright pixels in the image from infrared
+<br/>light reflections on the cornea named as glints. Al-
+<br/>though some works suggest the combination of light
+<br/>sources and web cams to track the eyes as described in
+<br/>Sigut and Sidha (2011), the challenge of low-cost sys-
+<br/>tems is to avoid the use of light sources to keep the sys-
+<br/>tems as simple as possible; hence, the image quality de-
+<br/>creases. High-performance eye tracking systems usu-
+<br/>ally combine glints and pupil information to compute
+<br/>the gaze position on the screen. Accurate pupil detec-
+<br/>tion is not feasible in web cam images, and most works
+<br/>on this topic focus on iris center. In order to improve
+<br/>accuracy, other elements such as eye corners or head
+<br/>position are necessary for gaze estimation applications,
+<br/>apart from the estimation of both irises. In the work by
+<br/>Ince and Yang (2009), they consider that the horizontal
+<br/>and vertical deviation of eye movements through eye-
+</td><td>('2592332', 'Jose Javier Bengoechea', 'jose javier bengoechea')<br/>('2595143', 'Juan J. Cerrolaza', 'juan j. cerrolaza')<br/>('2175923', 'Arantxa Villanueva', 'arantxa villanueva')<br/>('1752979', 'Rafael Cabeza', 'rafael cabeza')</td><td></td></tr><tr><td>d0e895a272d684a91c1b1b1af29747f92919d823</td><td>Classification of Mouth Action Units using Local Binary Patterns
+<br/><b>The American University in Cairo</b><br/>Department of Computer Science, AUC, AUC
+<br/>Avenue, P.O. Box 74 New Cairo 11835, Egypt
+<br/><b>The American University in Cairo</b><br/>Department of Computer Science, AUC, AUC
+<br/>Avenue, P.O. Box 74 New Cairo 11835, Egypt
+</td><td>('3298267', 'Sarah Adel Bargal', 'sarah adel bargal')<br/>('3337337', 'Amr Goneid', 'amr goneid')</td><td>s_bargal@aucegypt.edu
+<br/>goneid@aucegypt.edu
+</td></tr><tr><td>d082f35534932dfa1b034499fc603f299645862d</td><td>TAMING WILD FACES: WEB-SCALE, OPEN-UNIVERSE FACE IDENTIFICATION IN
+<br/>STILL AND VIDEO IMAGERY
+<br/>by
+<br/><b>B.S. University of Central Florida</b><br/><b>M.S. University of Central Florida</b><br/>A dissertation submitted in partial fulfilment of the requirements
+<br/>for the degree of Doctor of Philosophy
+<br/>in the Department of Electrical Engineering and Computer Science
+<br/><b>in the College of Engineering and Computer Science</b><br/><b>at the University of Central Florida</b><br/>Orlando, Florida
+<br/>Spring Term
+<br/>2014
+<br/>Major Professor: Mubarak Shah
+</td><td>('1873759', 'G. ORTIZ', 'g. ortiz')</td><td></td></tr><tr><td>d03265ea9200a993af857b473c6bf12a095ca178</td><td>Multiple deep convolutional neural
+<br/>networks averaging for face
+<br/>alignment
+<br/>Zhouping Yin
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 05/28/2015 Terms of Use: http://spiedl.org/terms </td><td>('7671296', 'Shaohua Zhang', 'shaohua zhang')<br/>('39584289', 'Hua Yang', 'hua yang')</td><td></td></tr><tr><td>d0ac9913a3b1784f94446db2f1fb4cf3afda151f</td><td>Exploiting Multi-modal Curriculum in Noisy Web Data for
+<br/>Large-scale Concept Learning
+<br/><b>School of Computer Science, Carnegie Mellon University, PA, USA</b><br/><b>School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China</b></td><td>('1915796', 'Junwei Liang', 'junwei liang')<br/>('38782499', 'Lu Jiang', 'lu jiang')<br/>('1803714', 'Deyu Meng', 'deyu meng')</td><td>{junweil, lujiang, alex}@cs.cmu.edu, dymeng@mail.xjtu.edu.cn.
+</td></tr><tr><td>d0471d5907d6557cf081edf4c7c2296c3c221a38</td><td>A Constrained Deep Neural Network for Ordinal Regression
+<br/><b>Nanyang Technological University</b><br/>Rolls-Royce Advanced Technology Centre
+<br/>50 Nanyang Avenue, Singapore, 639798
+<br/>6 Seletar Aerospace Rise, Singapore, 797575
+</td><td>('47908585', 'Yanzhu Liu', 'yanzhu liu')<br/>('1799918', 'Chi Keong Goh', 'chi keong goh')</td><td>liuy0109@e.ntu.edu.sg, adamskong@ntu.edu.sg
+<br/>ChiKeong.Goh@Rolls-Royce.com
+</td></tr><tr><td>d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0</td><td></td><td></td><td></td></tr><tr><td>d00c335fbb542bc628642c1db36791eae24e02b7</td><td>Article
+<br/>Deep Learning-Based Gaze Detection System for
+<br/>Automobile Drivers Using a NIR Camera Sensor
+<br/><b>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu</b><br/>Received: 5 January 2018; Accepted: 1 February 2018; Published: 3 February 2018
+</td><td>('8683310', 'Rizwan Ali Naqvi', 'rizwan ali naqvi')<br/>('15668895', 'Muhammad Arsalan', 'muhammad arsalan')<br/>('3407484', 'Ganbayar Batchuluun', 'ganbayar batchuluun')<br/>('40376380', 'Hyo Sik Yoon', 'hyo sik yoon')<br/>('4634733', 'Kang Ryoung Park', 'kang ryoung park')</td><td>Seoul 100-715, Korea; rizwanali@dongguk.edu (R.A.N.); arsal@dongguk.edu (M.A.);
+<br/>ganabata87@gmail.com (G.B.); yoonhs@dongguk.edu (H.S.Y.)
+<br/>* Correspondence: parkgr@dongguk.edu; Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+</td></tr><tr><td>d06c8e3c266fbae4026d122ec9bd6c911fcdf51d</td><td>Role for 2D image generated 3D face models in the rehabilitation of facial palsy
+<br/><b>Northumbria University, Newcastle Upon-Tyne NE21XE, UK</b><br/>Published in Healthcare Technology Letters; Received on 4th April 2017; Revised on 7th June 2017; Accepted on 7th June 2017
+<br/>The outcome for patients diagnosed with facial palsy has been shown to be linked to rehabilitation. Dense 3D morphable models have been
+<br/>shown within the computer vision to create accurate representations of human faces even from single 2D images. This has the potential
+<br/>to provide feedback to both the patient and medical expert dealing with the rehabilitation plan. It is proposed that a framework for the
+<br/>creation and measuring of patient facial movement consisting of a hybrid 2D facial landmark fitting technique which shows better
+<br/>accuracy in testing than current methods and 3D model fitting.
+<br/>1. Introduction: Recent medical studies [1–3] have highlighted
+<br/>that patients diagnosed and treated with specific types of facial
+<br/>paralysis such as Bell’s palsy have outcomes that are directly
+<br/>linked to the rehabilitation provided. While various treatment and
+<br/>rehabilitation paths exist dependant on the specifics of the facial
+<br/>palsy diagnosis, the aim is to restore a degree of facial muscle
+<br/>movement
+<br/>[4] completed a
+<br/>comprehensive study over 5 years of the rehabilitation process
+<br/>and outcomes for 303 facial paralysis patients, the key finding
+<br/>was the need for specialised therapy plans tailored via feedback
+<br/>for the best patient outcomes. While Banks et al [5] have shown
+<br/>that quality qualitative feedback to a clinician is required for the
+<br/>best development of rehabilitation plans.
+<br/>to the patient. Lindsay et al
+<br/>Tracking and providing qualitative feedback on the progress
+<br/>of rehabilitation for a patient is an area where the application of
+<br/>computer vision and machine learning techniques could prove to
+<br/>be highly beneficial. Computer vision methods can provide the
+<br/>capability of capturing accurate 3D models of the human face
+<br/>these in turn can be leveraged to analyse and measure changes in
+<br/>face shape and levels of motion [6].
+<br/>Applying 3D face modelling techniques in an automated
+<br/>framework for
+<br/>tracking facial palsy rehabilitation progression
+<br/>has a number of potential benefits. 3D face models generated
+<br/>from a 2D face image can provide a detailed topography of an
+<br/>individual human face which can be qualitatively measured for
+<br/>change over time by a computer system. Potential benefits of
+<br/>such an automated system include providing the clinician
+<br/>dealing with a patients rehabilitation to gather regular objective
+<br/>feedback on the condition and tailor therapy without always
+<br/>needing to physically see the patient or providing continuity of
+<br/>care if for instance the clinician changes during the rehabilitation
+<br/>period. Patients will have a visual evidence in which to see the
+<br/>progress that has been made. It has been indicated that patients
+<br/>suffering from facial palsy can also be affected by psychol-
+<br/>ogical and social problems the capacity to track rehabilitation pri-
+<br/>vately within a comfortable setting like their own home may be
+<br/>of benefit.
+<br/>Some previous studies [7] have looked at the process of aiding
+<br/>diagnosis through the application of computer vision techniques
+<br/>these have been limited to 2D imaging which measure on a spare
+<br/>set of landmarks. The hypothesis is that 3D face modelling consist-
+<br/>ing of thousands of landmarks provides a far richer model of the
+<br/>face which in turn can present a more accurate measurement
+<br/>system for facial motion.
+<br/>In this Letter we propose a framework applicable for accurate
+<br/>generation of 3D face models of facial palsy patients from 2D
+<br/>images applying state-of-the-art methods and a proposed method
+<br/>Healthcare Technology Letters, 2017, Vol. 4, Iss. 4, pp. 145–148
+<br/>doi: 10.1049/htl.2017.0023
+<br/>Fig. 1 2D face alignment of 68 landmarks on a facial image which displays
+<br/>asymmetric movement, like that of a patient suffering from facial palsy
+<br/>of using geometrical features to track rehabilitation and present
+<br/>our conclusions.
+<br/>2. Proposed system overview: The accuracy of
+<br/>the facial
+<br/>representation is a key components of any computer-based system
+<br/>which aims to measure facial motion. We suggest that the more
+<br/>complex a depiction of the individuals patient facial topography
+<br/>the greater the potential
+<br/>is for the desired level of accuracy.
+<br/>Developing such a system requires a framework of methods to
+<br/>build and measure such a model.
+<br/>As camera systems which perceive depth within an image are not
+<br/>currently common place or require specialist and expensive hard-
+<br/>ware initially we require a method for face detection and 2D face
+<br/>145
+<br/>This is an open access article published by the IET under the
+<br/>Creative Commons Attribution License (http://creativecommons.
+<br/>org/licenses/by/3.0/)
+</td><td>('12667800', 'Gary Storey', 'gary storey')<br/>('40618413', 'Richard Jiang', 'richard jiang')<br/>('1690116', 'Ahmed Bouridane', 'ahmed bouridane')</td><td>✉ E-mail: gary.storey@northumbria.ac.uk
+</td></tr><tr><td>d074b33afd95074d90360095b6ecd8bc4e5bb6a2</td><td>December 11, 2007
+<br/>12:8 WSPC/INSTRUCTION FILE
+<br/>bauer-2007-ijhr
+<br/>International Journal of Humanoid Robotics
+<br/>c(cid:13) World Scientific Publishing Company
+<br/>Human-Robot Collaboration: A Survey
+<br/><b>Institute of Automatic Control Engineering (LSR</b><br/>Technische Universit¨at M¨unchen
+<br/>80290 Munich
+<br/>Germany
+<br/>Received 01.05.2007
+<br/>Revised 29.09.2007
+<br/>Accepted Day Month Year
+<br/>As robots are gradually leaving highly structured factory environments and moving into
+<br/>human populated environments, they need to possess more complex cognitive abilities.
+<br/>They do not only have to operate efficiently and safely in natural, populated environ-
+<br/>ments, but also be able to achieve higher levels of cooperation and communication with
+<br/>humans. Human-robot collaboration (HRC) is a research field with a wide range of ap-
+<br/>plications, future scenarios, and potentially a high economic impact. HRC is an interdis-
+<br/>ciplinary research area comprising classical robotics, cognitive sciences, and psychology.
+<br/>This article gives a survey of the state of the art of human-robot collaboration. Es-
+<br/>tablished methods for intention estimation, action planning, joint action, and machine
+<br/>learning are presented together with existing guidelines to hardware design. This article
+<br/>is meant to provide the reader with a good overview of technologies and methods for
+<br/>HRC.
+<br/>Keywords: Human-robot collaboration; intention estimation; action planning; machine
+<br/>learning.
+<br/>1. Introduction
+<br/>Human-robot Collaboration (HRC) is a wide research field with a high economic
+<br/>impact. Robots have already started moving out of laboratory and manufacturing
+<br/>environments into more complex human working environments such as homes, of-
+<br/>fices, hospitals and even outer space. HRC is already used in elderly care1, space
+<br/>applications2, and rescue robotics3. The design of robot behaviour, appearance,
+<br/>cognitive, and social skills is highly challenging, and requires interdisciplinary co-
+<br/>operation between classical robotics, cognitive sciences, and psychology. Humans as
+<br/>nondeterministic factors make cognitive sciences and artificial intelligence important
+<br/>research fields in HRC.
+<br/>This article refers to human-robot collaboration as opposed to human-robot in-
+<br/>teraction (HRI) as these two terms hold different meanings4. Interaction is a more
+<br/><b>general term, including collaboration. Interaction determines action on someone</b></td><td>('1749896', 'Dirk Wollherr', 'dirk wollherr')<br/>('1732126', 'Martin Buss', 'martin buss')</td><td>ab@tum.de; dw@tum.de; mb@tum.de
+</td></tr><tr><td>d04d5692461d208dd5f079b98082eda887b62323</td><td>Subspace learning with frequency regularizer: its application to face recognition
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition,
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/>95 Zhongguancun Donglu, Beijing 100190, China.
+</td><td>('1704114', 'Xiangsheng Huang', 'xiangsheng huang')<br/>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1716143', 'Dong Yi', 'dong yi')</td><td>{zlei,dyi,szli}@cbsr.ia.ac.cn, xiangsheng.huang@ia.ac.cn
+</td></tr><tr><td>d05513c754966801f26e446db174b7f2595805ba</td><td>Everything is in the Face? Represent Faces with
+<br/>Object Bank
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>School of Computer Science, Carnegie Mellon University, PA 15213, USA</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('1731144', 'Xin Liu', 'xin liu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td>{xin.liu, shiguang.shan, shaoxin.li}@vipl.ict.ac.cn, alex@cs.cmu.edu;
+</td></tr><tr><td>d0509afe9c2c26fe021889f8efae1d85b519452a</td><td>Visual Psychophysics for Making Face
+<br/>Recognition Algorithms More Explainable
+<br/><b>University of Notre Dame, Notre Dame, IN, 46556, USA</b><br/><b>Perceptive Automata, Inc</b><br/><b>Harvard University, Cambridge, MA 02138, USA</b></td><td>('3849184', 'Brandon RichardWebster', 'brandon richardwebster')<br/>('40901458', 'So Yon Kwon', 'so yon kwon')<br/>('40896426', 'Christopher Clarizio', 'christopher clarizio')<br/>('2503235', 'Samuel E. Anthony', 'samuel e. anthony')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')</td><td></td></tr><tr><td>d03baf17dff5177d07d94f05f5791779adf3cd5f</td><td></td><td></td><td></td></tr><tr><td>d0144d76b8b926d22411d388e7a26506519372eb</td><td>Improving Regression Performance with Distributional Losses
+</td><td>('29905816', 'Ehsan Imani', 'ehsan imani')</td><td></td></tr><tr><td>d02e27e724f9b9592901ac1f45830341d37140fe</td><td>DA-GAN: Instance-level Image Translation by Deep Attention Generative
+<br/>Adversarial Networks
+<br/>The State Universtiy of New York at Buffalo
+<br/>The State Universtiy of New York at Buffalo
+<br/>Microsoft Research
+<br/>Microsoft Research
+</td><td>('2327045', 'Shuang Ma', 'shuang ma')<br/>('1735257', 'Chang Wen Chen', 'chang wen chen')<br/>('3247966', 'Jianlong Fu', 'jianlong fu')<br/>('1724211', 'Tao Mei', 'tao mei')</td><td>shuangma@buffalo.edu
+<br/>chencw@buffalo.edu
+<br/>jianf@microsoft.com
+<br/>tmei@microsoft.com
+</td></tr><tr><td>d02b32b012ffba2baeb80dca78e7857aaeececb0</td><td>Human Pose Estimation: Extension and Application
+<br/>Thesis submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Master of Science (By Research)
+<br/>in
+<br/>Computer Science and Engineering
+<br/>by
+<br/>201002052
+<br/>Center for Visual Information Technology
+<br/><b>International Institute of Information Technology</b><br/>Hyderabad - 500 032, INDIA
+<br/>September 2016
+</td><td>('50226534', 'Digvijay Singh', 'digvijay singh')</td><td>digvijay.singh@research.iiit.ac.in
+</td></tr><tr><td>d0a21f94de312a0ff31657fd103d6b29db823caa</td><td>Facial Expression Analysis
+</td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea</td><td>Face Recognition with Patterns of Oriented
+<br/>Edge Magnitudes
+<br/>1 Vesalis Sarl, Clermont Ferrand, France
+<br/>2 Gipsa-lab, Grenoble INP, France
+</td><td>('35083213', 'Ngoc-Son Vu', 'ngoc-son vu')<br/>('1788869', 'Alice Caplier', 'alice caplier')</td><td></td></tr><tr><td>d0d7671c816ed7f37b16be86fa792a1b29ddd79b</td><td>Exploring Semantic Inter-Class Relationships (SIR)
+<br/>for Zero-Shot Action Recognition
+<br/><b>Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China</b><br/><b>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia</b><br/><b>School of Computer Science, Carnegie Mellon University, Pittsburgh, USA</b><br/><b>College of Computer Science, Zhejiang University, Zhejiang, China</b></td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('2735055', 'Ming Lin', 'ming lin')<br/>('39033919', 'Yi Yang', 'yi yang')<br/>('1755711', 'Yueting Zhuang', 'yueting zhuang')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td>ganchuang1990@gmail.com, linming04@gmail.com,
+<br/>yiyang@cs.cmu.edu, yzhuang@zju.edu.cn, alex@cs.cmu.edu
+</td></tr><tr><td>d01303062b21cd9ff46d5e3ff78897b8499480de</td><td>Multi-task Learning by Maximizing Statistical Dependence
+<br/><b>University of Bath</b><br/><b>University of Bath</b><br/><b>University of Bath</b></td><td>('51013428', 'Youssef A. Mejjati', 'youssef a. mejjati')<br/>('1792288', 'Darren Cosker', 'darren cosker')<br/>('1808255', 'Kwang In Kim', 'kwang in kim')</td><td></td></tr><tr><td>d02c54192dbd0798b43231efe1159d6b4375ad36</td><td>3D Reconstruction and Face Recognition Using Kernel-Based
+<br/> ICA and Neural Networks
+<br/>Dept. of Electrical Dept. of CSIE Dept. of CSIE
+<br/><b>Engineering Chaoyang University Nankai Institute of</b><br/><b>National University of Technology Technology</b></td><td>('1734467', 'Cheng-Jian Lin', 'cheng-jian lin')<br/>('1759040', 'Chi-Yung Lee', 'chi-yung lee')</td><td> of Kaohsiung s9527618@cyut.edu.tw cylee@nkc.edu.tw
+<br/>cjlin@nuk.edu.tw
+</td></tr><tr><td>d00787e215bd74d32d80a6c115c4789214da5edb</td><td>Faster and Lighter Online
+<br/>Sparse Dictionary Learning
+<br/>Project report
+</td><td>('2714145', 'Jeremias Sulam', 'jeremias sulam')</td><td></td></tr><tr><td>d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5</td><td>Towards Universal Representation for Unseen Action Recognition
+<br/><b>University of California, Merced</b><br/><b>Open Lab, School of Computing, Newcastle University, UK</b><br/><b>Inception Institute of Arti cial Intelligence (IIAI), Abu Dhabi, UAE</b></td><td>('1749901', 'Yi Zhu', 'yi zhu')<br/>('50363618', 'Yang Long', 'yang long')<br/>('1735787', 'Yu Guan', 'yu guan')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td></td></tr><tr><td>be8c517406528edc47c4ec0222e2a603950c2762</td><td>Harrigan / The new handbook of methods in nonverbal behaviour research 02-harrigan-chap02 Page Proof page 7
+<br/>17.6.2005
+<br/>5:45pm
+<br/>B A S I C R E S E A RC H
+<br/>M E T H O D S A N D
+<br/>P RO C E D U R E S
+</td><td></td><td></td></tr><tr><td>beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2</td><td>RESEARCH PAPER
+<br/>International Journal of Recent Trends in Engineering Vol 1, No. 1, May 2009,
+<br/>Adaptive Histogram Equalization and Logarithm
+<br/>Transform with Rescaled Low Frequency DCT
+<br/>Coefficients for Illumination Normalization
+<br/>Department of Computer Science and Engineering
+<br/>Amity School of Engineering Technology, 580, Bijwasan, New Delhi-110061, India
+<br/><b>Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India</b><br/>illumination normalization. The
+<br/>lighting conditions. Most of the
+</td><td>('2650871', 'Virendra P. Vishwakarma', 'virendra p. vishwakarma')<br/>('2100294', 'Sujata Pandey', 'sujata pandey')</td><td>Email: vpvishwakarma@aset.amity.edu
+</td></tr><tr><td>be86d88ecb4192eaf512f29c461e684eb6c35257</td><td>Automatic Attribute Discovery and
+<br/>Characterization from Noisy Web Data
+<br/><b>Stony Brook University, Stony Brook NY 11794, USA</b><br/><b>Columbia University, New York NY 10027, USA</b><br/><b>University of California, Berkeley, Berkeley CA 94720, USA</b></td><td>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('9676096', 'Jonathan Shih', 'jonathan shih')</td><td>tlberg@cs.sunysb.edu,
+<br/>aberg@cs.columbia.edu,
+<br/>jmshih@berkeley.edu.
+</td></tr><tr><td>be48b5dcd10ab834cd68d5b2a24187180e2b408f</td><td>FOR PERSONAL USE ONLY
+<br/>Constrained Low-rank Learning Using Least
+<br/>Squares Based Regularization
+</td><td>('2420746', 'Ping Li', 'ping li')<br/>('1720236', 'Jun Yu', 'jun yu')<br/>('48958393', 'Meng Wang', 'meng wang')<br/>('1763785', 'Luming Zhang', 'luming zhang')<br/>('1724421', 'Deng Cai', 'deng cai')<br/>('50080046', 'Xuelong Li', 'xuelong li')</td><td></td></tr><tr><td>beb49072f5ba79ed24750108c593e8982715498e</td><td>STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
+<br/>GeneGAN: Learning Object Transfiguration
+<br/>and Attribute Subspace from Unpaired Data
+<br/>1 Megvii Inc.
+<br/>Beijing, China
+<br/>2 Department of Information Science,
+<br/>School of Mathematical Sciences,
+<br/><b>Peking University</b><br/>Beijing, China
+</td><td>('35132667', 'Shuchang Zhou', 'shuchang zhou')<br/>('14002400', 'Taihong Xiao', 'taihong xiao')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('7841666', 'Dieqiao Feng', 'dieqiao feng')<br/>('8159691', 'Qinyao He', 'qinyao he')<br/>('2416953', 'Weiran He', 'weiran he')</td><td>shuchang.zhou@gmail.com
+<br/>xiaotaihong@pku.edu.cn
+<br/>yangyi@megvii.com
+<br/>fdq@megvii.com
+<br/>hqy@megvii.com
+<br/>hwr@megvii.com
+</td></tr><tr><td>be4a20113bc204019ea79c6557a0bece23da1121</td><td>DeepCache: Principled Cache for Mobile Deep Vision
+<br/>We present DeepCache, a principled cache design for deep learning
+<br/>inference in continuous mobile vision. DeepCache benefits model
+<br/>execution efficiency by exploiting temporal locality in input video
+<br/>streams. It addresses a key challenge raised by mobile vision: the
+<br/>cache must operate under video scene variation, while trading off
+<br/>among cacheability, overhead, and loss in model accuracy. At the
+<br/>input of a model, DeepCache discovers video temporal locality by ex-
+<br/>ploiting the video’s internal structure, for which it borrows proven
+<br/>heuristics from video compression; into the model, DeepCache prop-
+<br/>agates regions of reusable results by exploiting the model’s internal
+<br/>structure. Notably, DeepCache eschews applying video heuristics to
+<br/>model internals which are not pixels but high-dimensional, difficult-
+<br/>to-interpret data.
+<br/>Our implementation of DeepCache works with unmodified deep
+<br/>learning models, requires zero developer’s manual effort, and is
+<br/>therefore immediately deployable on off-the-shelf mobile devices.
+<br/>Our experiments show that DeepCache saves inference execution
+<br/>time by 18% on average and up to 47%. DeepCache reduces system
+<br/>energy consumption by 20% on average.
+<br/>CCS Concepts: • Human-centered computing → Ubiquitous
+<br/>and mobile computing; • Computing methodologies → Com-
+<br/>puter vision tasks;
+<br/>Additional Key Words and Phrases: Deep Learning; Mobile Vision;
+<br/>Cache
+<br/>INTRODUCTION
+<br/>With ubiquitous cameras on mobile and wearable devices,
+<br/>continuous mobile vision emerges to enable a variety of com-
+<br/><b>pelling applications, including cognitive assistance [29], life</b><br/>style monitoring [61], and street navigation [27]. To support
+<br/>continuous mobile vision, Convolutional Neural Network
+<br/>2018. XXXX-XXXX/2018/9-ART $15.00
+<br/>https://doi.org/10.1145/3241539.3241563
+<br/>Fig. 1. The overview of DeepCache.
+<br/>(CNN) is recognized as the state-of-the-art algorithm: a soft-
+<br/>ware runtime, called deep learning engine, ingests a continu-
+<br/>ous stream of video images1; for each input frame the engine
+<br/>executes a CNN model as a cascade of layers, produces in-
+<br/>termediate results called feature maps, and outputs inference
+<br/>results. Such CNN executions are known for their high time
+<br/>and space complexity, stressing resource-constrained mobile
+<br/>devices. Although CNN execution can be offloaded to the
+<br/>cloud [2, 34], it becomes increasingly compelling to execute
+<br/>CNNs on device [27, 44, 52], which ensures fast inference, pre-
+<br/>serves user privacy, and remains unaffected by poor Internet
+<br/>connectivity.
+<br/>To afford costly CNN on resource-constrained mobile/wear-
+<br/>able devices, we set to exploit a mobile video stream’s tempo-
+<br/>ral locality, i.e., rich information redundancy among consec-
+<br/>utive video frames [27, 51, 52]. Accordingly, a deep learning
+<br/>engine can cache results when it executes CNN over a mo-
+<br/>bile video, by using input frame contents as cache keys and
+<br/>inference results as cache values. Such caching is expected
+<br/>to reduce the engine’s resource demand significantly.
+<br/>Towards effective caching and result reusing, we face two
+<br/>major challenges. 1) Reusable results lookup: Classic caches,
+<br/>e.g., the web browser cache, look up cached values (e.g., web
+<br/>pages) based on key equivalence (e.g., identical URLs). This
+<br/>does not apply to a CNN cache: its keys, i.e., mobile video
+<br/>contents, often undergo moderate scene variation over time.
+<br/>The variation is caused by environmental changes such as
+<br/>1We refer to them as a mobile video stream in the remainder of the paper.
+<br/>, Vol. 1, No. 1, Article . Publication date: September 2018.
+</td><td>('2529558', 'Mengwei Xu', 'mengwei xu')<br/>('46694806', 'Mengze Zhu', 'mengze zhu')<br/>('3180228', 'Yunxin Liu', 'yunxin liu')<br/>('1774176', 'Felix Xiaozhu Lin', 'felix xiaozhu lin')<br/>('8016688', 'Xuanzhe Liu', 'xuanzhe liu')<br/>('8016688', 'Xuanzhe Liu', 'xuanzhe liu')<br/>('2529558', 'Mengwei Xu', 'mengwei xu')</td><td>xumengwei@pku.edu.cn; Mengze Zhu, Peking University, MoE, Beijing,
+<br/>China, zhumz@pku.edu.cn; Yunxin Liu, Microsoft Research, Beijing, China,
+<br/>yunxin.liu@microsoft.com; Felix Xiaozhu Lin, Purdue ECE, West Lafayette,
+<br/>Indiana, USA, xzl@purdue.edu; Xuanzhe Liu, Peking University, MoE, Bei-
+<br/>jing, China, xzl@pku.edu.cn.
+</td></tr><tr><td>becd5fd62f6301226b8e150e1a5ec3180f748ff8</td><td>Robust and Practical Face Recognition via
+<br/>Structured Sparsity
+<br/>1Advanced Digital Sciences Center, Singapore
+<br/>2 Microsoft Research Asia, Beijing, China
+<br/><b>University of Illinois at Urbana-Champaign</b></td><td>('2370507', 'Kui Jia', 'kui jia')<br/>('1926757', 'Tsung-Han Chan', 'tsung-han chan')<br/>('1700297', 'Yi Ma', 'yi ma')</td><td></td></tr><tr><td>be437b53a376085b01ebd0f4c7c6c9e40a4b1a75</td><td>ISSN (Online) 2321 – 2004
+<br/>ISSN (Print) 2321 – 5526
+<br/> INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+<br/> Vol. 4, Issue 5, May 2016
+<br/>IJIREEICE
+<br/>Face Recognition and Retrieval Using Cross
+<br/>Age Reference Coding
+<br/> BE, DSCE, Bangalore1
+<br/>Assistant Professor, DSCE, Bangalore2
+</td><td>('4427719', 'Chandrakala', 'chandrakala')</td><td></td></tr><tr><td>bebb8a97b2940a4e5f6e9d3caf6d71af21585eda</td><td>Mapping Emotional Status to Facial Expressions
+<br/><b>Tsinghua University</b><br/>Beijing 100084, P. R. China
+</td><td>('3165307', 'Yangzhou Du', 'yangzhou du')<br/>('2693354', 'Xueyin Lin', 'xueyin lin')</td><td>dyz99@mails.tsinghua.edu.cn; lxy-dcs@tsinghua.edu.cn
+</td></tr><tr><td>be07f2950771d318a78d2b64de340394f7d6b717</td><td>See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/290192867
+<br/>3D HMM-based Facial Expression Recognition
+<br/>using Histogram of Oriented Optical Flow
+<br/>ARTICLE in SYNTHESIS LECTURES ON ARTIFICIAL INTELLIGENCE AND MACHINE LEARNING · DECEMBER 2015
+<br/>DOI: 10.14738/tmlai.36.1661
+<br/>READS
+<br/>12
+<br/>3 AUTHORS, INCLUDING:
+<br/>Sheng Kung
+<br/><b>Oakland University</b><br/>Djamel Bouchaffra
+<br/><b>Institute of Electrical and Electronics Engineers</b><br/>1 PUBLICATION 0 CITATIONS
+<br/>57 PUBLICATIONS 402 CITATIONS
+<br/>SEE PROFILE
+<br/>SEE PROFILE
+<br/>All in-text references underlined in blue are linked to publications on ResearchGate,
+<br/>letting you access and read them immediately.
+<br/>Available from: Djamel Bouchaffra
+<br/>Retrieved on: 11 February 2016
+</td><td></td><td></td></tr><tr><td>be4f7679797777f2bc1fd6aad8af67cce5e5ce87</td><td>Interestingness Prediction
+<br/>by Robust Learning to Rank(cid:2)
+<br/><b>School of EECS, Queen Mary University of London, UK</b><br/><b>School of Mathematical Sciences, Peking University, China</b></td><td>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('1746280', 'Yuan Yao', 'yuan yao')</td><td>{y.fu,t.hospedales,t.xiang,s.gong}@qmul.ac.uk, yuany@math.pku.edu.cn
+</td></tr><tr><td>beb4546ae95f79235c5f3c0e9cc301b5d6fc9374</td><td>A Modular Approach to Facial Expression Recognition
+<br/><b>Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht</b><br/><b>Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht</b></td><td>('31822812', 'Michal Sindlar', 'michal sindlar')<br/>('1727399', 'Marco Wiering', 'marco wiering')</td><td>sindlar@phil.uu.nl
+<br/>marco@cs.uu.nl
+</td></tr><tr><td>be28ed1be084385f5d389db25fd7f56cd2d7f7bf</td><td>Exploring Computation-Communication Tradeoffs
+<br/>in Camera Systems
+<br/><b>Paul G. Allen School of Computer Science and Engineering, University of Washington</b><br/><b>University of Washington</b></td><td>('19170117', 'Amrita Mazumdar', 'amrita mazumdar')<br/>('47108160', 'Thierry Moreau', 'thierry moreau')<br/>('37270394', 'Meghan Cowan', 'meghan cowan')<br/>('1698528', 'Armin Alaghi', 'armin alaghi')<br/>('1717411', 'Luis Ceze', 'luis ceze')<br/>('1723213', 'Mark Oskin', 'mark oskin')<br/>('46829693', 'Visvesh Sathe', 'visvesh sathe')</td><td>{amrita,moreau,cowanmeg}@cs.washington.edu, sungk9@uw.edu, {armin,luisceze,oskin}@cs.washington.edu, sathe@uw.edu
+</td></tr><tr><td>bebea83479a8e1988a7da32584e37bfc463d32d4</td><td>Discovery of Latent 3D Keypoints via
+<br/>End-to-end Geometric Reasoning
+<br/>Google AI
+</td><td>('37016781', 'Supasorn Suwajanakorn', 'supasorn suwajanakorn')<br/>('2704494', 'Jonathan Tompson', 'jonathan tompson')</td><td>{supasorn, snavely, tompson, mnorouzi}@google.com
+</td></tr><tr><td>bed06e7ff0b510b4a1762283640b4233de4c18e0</td><td>Bachelor Project
+<br/>Czech
+<br/>Technical
+<br/><b>University</b><br/>in Prague
+<br/>F3
+<br/>Faculty of Electrical Engineering
+<br/>Department of Cybernetics
+<br/>Face Interpretation Problems on Low
+<br/>Quality Images
+<br/>Supervisor: Ing. Jan Čech, Ph.D
+<br/>May 2018
+</td><td></td><td></td></tr><tr><td>bec31269632c17206deb90cd74367d1e6586f75f</td><td>Large-scale Datasets: Faces with Partial
+<br/>Occlusions and Pose Variations in the Wild
+<br/><b>Wayne State University</b><br/>Detroit, MI, USA 48120
+</td><td>('2489629', 'Zeyad Hailat', 'zeyad hailat')<br/>('35265528', 'Xuewen Chen', 'xuewen chen')</td><td>Email: ∗tarik alafif@wayne.edu, †zmhailat@wayne.edu, ‡melih.aslan@wayne.edu, §xuewen.chen@wayne.edu
+</td></tr><tr><td>be5276e9744c4445fe5b12b785650e8f173f56ff</td><td>Spatio-temporal VLAD Encoding for
+<br/>Human Action Recognition in Videos
+<br/><b>University of Trento, Italy</b><br/><b>University Politehnica of Bucharest, Romania</b><br/><b>University of Tokyo, Japan</b></td><td>('3429470', 'Ionut C. Duta', 'ionut c. duta')<br/>('1796198', 'Bogdan Ionescu', 'bogdan ionescu')<br/>('1712839', 'Kiyoharu Aizawa', 'kiyoharu aizawa')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td>{ionutcosmin.duta, niculae.sebe}@unitn.it
+<br/>bionescu@imag.pub.ro
+<br/>aizawa@hal.t.u-tokyo.ac.jp
+</td></tr><tr><td>be57d2aaab615ec8bc1dd2dba8bee41a4d038b85</td><td>Automatic Analysis of Naturalistic Hand-Over-Face Gestures
+<br/><b>University of Cambridge</b><br/>One of the main factors that limit the accuracy of facial analysis systems is hand occlusion. As the face
+<br/>becomes occluded, facial features are lost, corrupted, or erroneously detected. Hand-over-face occlusions are
+<br/>considered not only very common but also very challenging to handle. However, there is empirical evidence
+<br/>that some of these hand-over-face gestures serve as cues for recognition of cognitive mental states. In this
+<br/>article, we present an analysis of automatic detection and classification of hand-over-face gestures. We detect
+<br/>hand-over-face occlusions and classify hand-over-face gesture descriptors in videos of natural expressions
+<br/>using multi-modal fusion of different state-of-the-art spatial and spatio-temporal features. We show experi-
+<br/>mentally that we can successfully detect face occlusions with an accuracy of 83%. We also demonstrate that
+<br/>we can classify gesture descriptors (hand shape, hand action, and facial region occluded) significantly better
+<br/>than a na¨ıve baseline. Our detailed quantitative analysis sheds some light on the challenges of automatic
+<br/>classification of hand-over-face gestures in natural expressions.
+<br/>Categories and Subject Descriptors: I.2.10 [Vision and Scene Understanding]: Video Analysis
+<br/>General Terms: Affective Computing, Body Expressions
+<br/>Additional Key Words and Phrases: Hand-over-face occlusions, face touches, hand gestures, facial landmarks,
+<br/>histograms of oriented gradient, space-time interest points
+<br/>ACM Reference Format:
+<br/>over-face gestures. ACM Trans. Interact. Intell. Syst. 6, 2, Article 19 (July 2016), 18 pages.
+<br/>DOI: http://dx.doi.org/10.1145/2946796
+<br/>1. INTRODUCTION
+<br/>Over the past few years, there has been an increasing interest in machine under-
+<br/>standing and recognition of people’s affective and cognitive mental states, especially
+<br/>based on facial expression analysis. One of the major factors that limits the accuracy
+<br/>of facial analysis systems is hand occlusion. People often hold their hands near their
+<br/>faces as a gesture in natural conversation. As many facial analysis systems are based
+<br/>on geometric or appearance based facial features, such features are lost, corrupted,
+<br/>or erroneously detected during occlusion. This results in an incorrect analysis of the
+<br/>person’s facial expression. Although face touches are very common, they are under-
+<br/>researched, mostly because segmenting of the hand on the face is very challenging,
+<br/>as face and hand usually have similar colour and texture. Detection of hand-over-face
+<br/>The research leading to these results received partial funding from the European Community’s Seventh
+<br/>Framework Programme (FP7/2007-2013) under Grant No. 289021 (ASC-Inclusion). We also thank Yousef
+<br/>Jameel and Qualcomm for providing funding as well.
+<br/>Authors’ address: The Computer Laboratory, 15 JJ Thomson Avenue, Cambridge CB3 0FD, United Kingdom;
+<br/>Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted
+<br/>without fee provided that copies are not made or distributed for profit or commercial advantage and that
+<br/>copies show this notice on the first page or initial screen of a display along with the full citation. Copyrights for
+</td><td>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39626495', 'Peter Robinson', 'peter robinson')<br/>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td>emails: {Marwa.Mahmoud, Tadas.Baltrusaitis, Peter.Robinson}@cl.cam.ac.uk.
+</td></tr><tr><td>be4f18e25b06f430e2de0cc8fddcac8585b00beb</td><td>STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
+<br/>A New Face Recognition Algorithm based on
+<br/>Dictionary Learning for a Single Training
+<br/>Sample per Person
+<br/>Ian Wassell
+<br/>Computer Laboratory,
+<br/><b>University of Cambridge</b></td><td>('1681842', 'Yang Liu', 'yang liu')</td><td>yl504@cam.ac.uk
+<br/>ijw24@cam.ac.uk
+</td></tr><tr><td>bef503cdfe38e7940141f70524ee8df4afd4f954</td><td></td><td></td><td></td></tr><tr><td>beab10d1bdb0c95b2f880a81a747f6dd17caa9c2</td><td>DeepDeblur: Fast one-step blurry face images restoration
+<br/>Tsinghua Unversity
+</td><td>('2766905', 'Lingxiao Wang', 'lingxiao wang')<br/>('2112160', 'Yali Li', 'yali li')<br/>('1678689', 'Shengjin Wang', 'shengjin wang')</td><td>wlx16@mails.tsinghua.edu.cn, liyali@ocrserv.ee.tsinghua.edu.cn, wgsgj@tsinghua.edu.cn
+</td></tr><tr><td>b331ca23aed90394c05f06701f90afd550131fe3</td><td>Zhou et al. EURASIP Journal on Image and Video Processing (2018) 2018:49
+<br/>https://doi.org/10.1186/s13640-018-0287-5
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Double regularized matrix factorization for
+<br/>image classification and clustering
+<br/>Open Access
+</td><td>('39147685', 'Wei Zhou', 'wei zhou')<br/>('7513726', 'Chengdong Wu', 'chengdong wu')<br/>('46583983', 'Jianzhong Wang', 'jianzhong wang')<br/>('9305845', 'Xiaosheng Yu', 'xiaosheng yu')<br/>('50130800', 'Yugen Yi', 'yugen yi')</td><td></td></tr><tr><td>b3b532e8ea6304446b1623e83b0b9a96968f926c</td><td>Joint Network based Attention for Action Recognition
+<br/>1 National Engineering Laboratory for Video Technology, School of EE&CS,
+<br/><b>Peking University, Beijing, China</b><br/>2 Cooperative Medianet Innovation Center, China
+<br/>3 School of Information and Electronics,
+<br/><b>Beijing Institute of Technology, Beijing, China</b></td><td>('38179026', 'Yemin Shi', 'yemin shi')<br/>('1705972', 'Yonghong Tian', 'yonghong tian')<br/>('5765799', 'Yaowei Wang', 'yaowei wang')<br/>('34097174', 'Tiejun Huang', 'tiejun huang')</td><td></td></tr><tr><td>b37f57edab685dba5c23de00e4fa032a3a6e8841</td><td>Towards Social Interaction Detection in Egocentric Photo-streams
+<br/><b>University of Barcelona and Computer Vision Centre, Barcelona, Spain</b><br/>Recent advances in wearable camera technology have
+<br/>led to novel applications in the field of Preventive Medicine.
+<br/>For some of them, such as cognitive training of elderly peo-
+<br/>ple by digital memories and detection of unhealthy social
+<br/>trends associated to neuropsychological disorders, social in-
+<br/>teraction are of special interest. Our purpose is to address
+<br/>this problem in the domain of egocentric photo-streams cap-
+<br/>tured by a low temporal resolution wearable camera (2fpm).
+<br/>These cameras are suited for collecting visual information
+<br/>for long period of time, as required by the aforementioned
+<br/>applications. The major difficulties to be handled in this
+<br/>context are the sparsity of observations as well as the unpre-
+<br/>dictability of camera motion and attention orientation due
+<br/>to the fact that the camera is worn as part of clothing (see
+<br/>Fig. 1). Inspired by the theory of F-formation which is a
+<br/>pattern that people tend to follow when interacting [5], our
+<br/>proposed approach consists of three steps: multi-faces as-
+<br/>signment, social signals extraction and interaction detection
+<br/>of the individuals with the camera wearer (see Fig. 2).
+<br/>1. Multi-face Assignment
+<br/>While person detection and tracking in classical videos
+<br/>have been active research areas for a long time, the problem
+<br/>of people assignment in low temporal resolution egocen-
+<br/>tric photo-streams is still unexplored. To address such an
+<br/>issue, we proposed a novel method for multi-face assign-
+<br/>ment in egocentric photo-streams, we called extended-Bag-
+<br/>of-Tracklets (eBoT) [2]. This approach basically consists
+<br/>of 4 major sequential modules: seed and tracklet gener-
+<br/>ation, grouping tracklets into eBoT, prototypes extraction
+<br/>and occlusion treatment. Prior to any computation, first, a
+<br/>temporal segmentation algorithm [6] is applied to extract
+<br/>segments characterized by similar visual properties. Later
+<br/>on, a face detector is applied on all the frames of a seg-
+<br/>ment to detect visible faces on them [8]. Based on the ratio
+<br/>between the number of frames with detected faces and the
+<br/>total number of frames of the segment, we extract segments
+<br/>containing trackable persons. The next steps are applied on
+<br/>these extracted segments, hereafter referred to as sequences.
+<br/>Figure 1. Example of social interaction (first row) and non-social
+<br/>interaction (second row) in egocentric photo-streams.
+<br/>• Seed and tracklet generation: The set of collected
+<br/>bounding boxes that surround the face of each per-
+<br/>son throughout the sequence, are called seeds. For
+<br/>each seed, a set of correspondences to it is generated
+<br/>along the sequence by propagating the seed forward
+<br/>and backward employing the deep-matching technique
+<br/>[7] that lead to form a tracklet. To propagate a seed
+<br/>found in a frame, in all the frames of the sequence, the
+<br/>region of the frames most similar to the seed is found
+<br/>as the one having the highest deep-matching score.
+<br/>• Grouping tracklets into Bag-of-tracklets (eBoT):
+<br/>Assuming that tracklets generated by seeds belong-
+<br/>ing to the same person in a sequence, are likely to
+<br/>be similar to each other, we group them into a set of
+<br/>non-overlapping eBoTs. Since seeds corresponding to
+<br/>false positive detections generate unreliable tracklets
+<br/>and unreliable eBoTs, we defined a measure based on
+<br/>the density of the eBoTs to exclude unreliable eBoTs.
+<br/>• Prototypes extraction: A prototype extracted from an
+<br/>eBoT, should best represent all tracklets in the eBoT,
+<br/>and therefore, it should best localize a person’s face in
+<br/>each frame. As the prototype frame, the frame whose
+<br/>bounding box has the biggest intersection with the rest
+<br/>of the tracklets in that frame is chosen.
+<br/>• Occlusion treatment: Estimation of occluded frames
+<br/>is a very helpful feature since it allows us to exclude
+<br/>occluded frames which do not convey many informa-
+<br/>tion from final prototypes. To this goal, we define a
+<br/>frame confidence measure to assign a confidence value
+</td><td>('2084534', 'Maedeh Aghaei', 'maedeh aghaei')<br/>('2837527', 'Mariella Dimiccoli', 'mariella dimiccoli')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>aghaei.maya@gmail.com
+</td></tr><tr><td>b3154d981eca98416074538e091778cbc031ca29</td><td>Pedestrian Attribute Analysis
+<br/>Using a Top-View Camera in a Public Space
+<br/><b>The University of Tokyo</b><br/>7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan
+<br/><b>School of Electrical and Computer Engineering, Cornell University</b><br/>116 Ward Hall, Ithaca, NY 14853, USA
+<br/>3 JSPS Postdoctoral Fellow for Research Abroad
+</td><td>('2759239', 'Toshihiko Yamasaki', 'toshihiko yamasaki')<br/>('21152852', 'Tomoaki Matsunami', 'tomoaki matsunami')</td><td>{yamasaki,matsunami}@hal.t.u-tokyo.ac.jp
+</td></tr><tr><td>b3cb91a08be4117d6efe57251061b62417867de9</td><td>T. Swearingen and A. Ross. "A label propagation approach for predicting missing biographic labels in
+<br/>A Label Propagation Approach for
+<br/>Predicting Missing Biographic Labels
+<br/>in Face-Based Biometric Records
+</td><td>('3153117', 'Thomas Swearingen', 'thomas swearingen')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td></td></tr><tr><td>b340f275518aa5dd2c3663eed951045a5b8b0ab1</td><td>Visual Inference of Human Emotion and Behaviour
+<br/>Dept of Computer Science
+<br/><b>Queen Mary College, London</b><br/>Dept of Computer Science
+<br/><b>Queen Mary College, London</b><br/>Dept of Computer Science
+<br/><b>Queen Mary College, London</b><br/>England, UK
+<br/>England, UK
+<br/>England, UK
+</td><td>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('1700927', 'Tao Xiang', 'tao xiang')</td><td>sgg@dcs.qmul.ac.uk
+<br/>cfshan@dcs.qmul.ac.uk
+<br/>txiang@dcs.qmul.ac.uk
+</td></tr><tr><td>b3200539538eca54a85223bf0ec4f3ed132d0493</td><td>Action Anticipation with RBF Kernelized
+<br/>Feature Mapping RNN
+<br/>Hartley[0000−0002−5005−0191]
+<br/><b>The Australian National University, Australia</b></td><td>('11519650', 'Yuge Shi', 'yuge shi')</td><td></td></tr><tr><td>b3b467961ba66264bb73ffe00b1830d7874ae8ce</td><td>Finding Tiny Faces
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Figure 1: We describe a detector that can find around 800 faces out of the reportedly 1000 present, by making use of novel
+<br/>characterizations of scale, resolution, and context to find small objects. Detector confidence is given by the colorbar on the
+<br/>right: can you confidently identify errors?
+</td><td>('2894848', 'Peiyun Hu', 'peiyun hu')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>{peiyunh,deva}@cs.cmu.edu
+</td></tr><tr><td>b3ba7ab6de023a0d58c741d6abfa3eae67227caf</td><td>Zero-Shot Activity Recognition with Verb Attribute Induction
+<br/>Paul G. Allen School of Computer Science & Engineering
+<br/><b>University of Washington</b><br/>Seattle, WA 98195, USA
+</td><td>('2545335', 'Rowan Zellers', 'rowan zellers')<br/>('1699545', 'Yejin Choi', 'yejin choi')</td><td>{rowanz,yejin}@cs.washington.edu
+</td></tr><tr><td>b375db63742f8a67c2a7d663f23774aedccc84e5</td><td>Brain-inspired Classroom Occupancy
+<br/>Monitoring on a Low-Power Mobile Platform
+<br/><b>Electronic and Information Engineering, University of Bologna, Italy</b><br/>†Integrated Systems Laboratory, ETH Zurich, Switzerland
+</td><td>('1721381', 'Francesco Conti', 'francesco conti')<br/>('1785226', 'Antonio Pullini', 'antonio pullini')<br/>('1710649', 'Luca Benini', 'luca benini')</td><td>f.conti@unibo.it,{pullinia,lbenini}@iis.ee.ethz.ch
+</td></tr><tr><td>b3330adb131fb4b6ebbfacce56f1aec2a61e0869</td><td>Emotion recognition using facial images
+<br/>School of Electrical and Electronics Engineering
+<br/>Department of Electronics and Communication Engineering
+<br/><b>SASTRA University, Thanjavur, Tamil Nadu, India</b></td><td>('9365696', 'Siva sankari', 'siva sankari')</td><td> ramya.ece.sk@gmail.com, siva.ece.ds@gmail.com, knr@ece.sastra.edu
+</td></tr><tr><td>b3c60b642a1c64699ed069e3740a0edeabf1922c</td><td>Max-Margin Object Detection
+</td><td>('29250541', 'Davis E. King', 'davis e. king')</td><td>davis@dlib.net
+</td></tr><tr><td>b3f3d6be11ace907c804c2d916830c85643e468d</td><td><b>University of Toulouse</b><br/><b>University of Toulouse II Le Mirail</b><br/>PhD in computer sciences / artificial intelligence
+<br/>A Logical Framework for
+<br/>Trust-Related Emotions:
+<br/>Formal and Behavioral Results
+<br/>by
+<br/>Co-supervisors:
+<br/>Toulouse, September 2010
+</td><td>('1759342', 'Manh Hung NGUYEN', 'manh hung nguyen')<br/>('3107309', 'Jean-François BONNEFON', 'jean-françois bonnefon')<br/>('1733042', 'Dominique LONGIN', 'dominique longin')</td><td></td></tr><tr><td>b3f7c772acc8bc42291e09f7a2b081024a172564</td><td> www.ijmer.com Vol. 3, Issue. 5, Sep - Oct. 2013 pp-3225-3230 ISSN: 2249-6645
+<br/>International Journal of Modern Engineering Research (IJMER)
+<br/>A novel approach for performance parameter estimation of face
+<br/>recognition based on clustering, shape and corner detection
+<br/><b></b><br/>
+</td><td>('1904292', 'Prashant Jain', 'prashant jain')</td><td></td></tr><tr><td>b3c398da38d529b907b0bac7ec586c81b851708f</td><td>Face Recognition under Varying Lighting Conditions Using Self Quotient
+<br/>Image
+<br/><b>Institute of Automation, Chinese Academy of</b><br/>Sciences, Beijing, 100080, China,
+</td><td>('29948255', 'Haitao Wang', 'haitao wang')<br/>('1744302', 'Yangsheng Wang', 'yangsheng wang')</td><td>Email: {htwang,wys}@nlpr.ia.ac.cn
+</td></tr><tr><td>b32cf547a764a4efa475e9c99a72a5db36eeced6</td><td>UvA-DARE (Digital Academic Repository)
+<br/>Mimicry of ingroup and outgroup emotional expressions
+<br/>Sachisthal, M.S.M.; Sauter, D.A.; Fischer, A.H.
+<br/>Published in:
+<br/>Comprehensive Results in Social Psychology
+<br/>DOI:
+<br/>10.1080/23743603.2017.1298355
+<br/>Link to publication
+<br/>Citation for published version (APA):
+<br/>Sachisthal, M. S. M., Sauter, D. A., & Fischer, A. H. (2016). Mimicry of ingroup and outgroup emotional
+<br/>expressions. Comprehensive Results in Social Psychology, 1(1-3), 86-105. DOI:
+<br/>10.1080/23743603.2017.1298355
+<br/>General rights
+<br/>It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+<br/>other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+<br/>Disclaimer/Complaints regulations
+<br/>If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+<br/>your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+<br/><b>the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam</b><br/>The Netherlands. You will be contacted as soon as possible.
+<br/>Download date: 08 Aug 2018
+<br/><b>UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl</b></td><td></td><td></td></tr><tr><td>b3658514a0729694d86a8b89c875a66cde20480c</td><td>Improving the Robustness of Subspace Learning
+<br/>Techniques for Facial Expression Recognition
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, 54124 Thessaloniki, Greece
+</td><td>('2342345', 'Dimitris Bolis', 'dimitris bolis')<br/>('2447585', 'Anastasios Maronidis', 'anastasios maronidis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: {mpolis, amaronidis, tefas, pitas}@aiia.csd.auth.gr (cid:63)
+</td></tr><tr><td>b3b4a7e29b9186e00d2948a1d706ee1605fe5811</td><td>Paper
+<br/>Image Preprocessing
+<br/>for Illumination Invariant Face
+<br/>Verification
+<br/><b>Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland</b></td><td>('3031283', 'Mariusz Leszczyński', 'mariusz leszczyński')</td><td></td></tr><tr><td>b32631f456397462b3530757f3a73a2ccc362342</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3069
+</td><td></td><td></td></tr><tr><td>b33e8db8ccabdfc49211e46d78d09b14557d4cba</td><td>Face Expression Recognition and Analysis:
+<br/>1
+<br/>The State of the Art
+<br/><b>College of Computing, Georgia Institute of Technology</b></td><td>('3115428', 'Vinay Bettadapura', 'vinay bettadapura')</td><td>vinay@gatech.edu
+</td></tr><tr><td>b3afa234996f44852317af382b98f5f557cab25a</td><td></td><td></td><td></td></tr><tr><td>df90850f1c153bfab691b985bfe536a5544e438b</td><td>FACE TRACKING ALGORITHM ROBUST TO POSE,
+<br/>ILLUMINATION AND FACE EXPRESSION CHANGES: A 3D
+<br/>PARAMETRIC MODEL APPROACH
+<br/><b></b><br/>via Bramante 65 - 26013, Crema (CR), Italy
+<br/>Luigi Arnone, Fabrizio Beverina
+<br/>STMicroelectronics - Advanced System Technology Group
+<br/>via Olivetti 5 - 20041, Agrate Brianza, Italy
+<br/>Keywords:
+<br/>Face tracking, expression changes, FACS, illumination changes.
+</td><td>('3330245', 'Marco Anisetti', 'marco anisetti')<br/>('2061298', 'Valerio Bellandi', 'valerio bellandi')</td><td></td></tr><tr><td>df8da144a695269e159fb0120bf5355a558f4b02</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>International Conference on Recent Trends in engineering & Technology - 2013(ICRTET'2013)
+<br/>Face Recognition using PCA and Eigen Face
+<br/>Approach
+<br/>ME EXTC [VLSI & Embedded System]
+<br/>Sinhgad Academy of Engineering
+<br/>EXTC Department
+<br/>Pune, India
+</td><td></td><td></td></tr><tr><td>dfd934ae448a1b8947d404b01303951b79b13801</td><td>Christopher A. Longmore
+<br/><b>University of Plymouth, UK</b><br/><b>Bournemouth University, UK</b><br/>Andrew W. Young
+<br/><b>University of York, UK</b><br/>The importance of internal facial features in learning new
+<br/>faces
+<br/>Running head: FACIAL FEATURES IN LEARNING NEW FACES
+<br/>Address of correspondence:
+<br/>Chris Longmore
+<br/>School of Psychology
+<br/>Faculty of Health and Human Sciences
+<br/><b>Plymouth University</b><br/>Drake Circus
+<br/>Plymouth
+<br/>PL4 8AA
+<br/>Tel: +44 (0)1752 584890
+<br/>Fax: +44 (0)1752 584808
+</td><td>('39557512', 'Chang Hong Liu', 'chang hong liu')</td><td>Email: chris.longmore@plymouth.ac.uk
+</td></tr><tr><td>df577a89830be69c1bfb196e925df3055cafc0ed</td><td>Shift: A Zero FLOP, Zero Parameter Alternative to Spatial Convolutions
+<br/>UC Berkeley
+</td><td>('3130257', 'Bichen Wu', 'bichen wu')<br/>('40417702', 'Alvin Wan', 'alvin wan')<br/>('27577617', 'Xiangyu Yue', 'xiangyu yue')<br/>('1755487', 'Sicheng Zhao', 'sicheng zhao')<br/>('30096597', 'Noah Golmant', 'noah golmant')<br/>('3647010', 'Amir Gholaminejad', 'amir gholaminejad')<br/>('30503077', 'Joseph Gonzalez', 'joseph gonzalez')<br/>('1732330', 'Kurt Keutzer', 'kurt keutzer')</td><td>{bichen,alvinwan,xyyue,phj,schzhao,noah.golmant,amirgh,jegonzal,keutzer}@berkeley.edu
+</td></tr><tr><td>df0e280cae018cebd5b16ad701ad101265c369fa</td><td>Deep Attributes from Context-Aware Regional Neural Codes
+<br/><b>Image Processing Center, Beihang University</b><br/>2 Intel Labs China
+<br/><b>Columbia University</b></td><td>('2780589', 'Jianwei Luo', 'jianwei luo')<br/>('35423937', 'Jianguo Li', 'jianguo li')<br/>('1715001', 'Jun Wang', 'jun wang')<br/>('1791565', 'Zhiguo Jiang', 'zhiguo jiang')<br/>('6060281', 'Yurong Chen', 'yurong chen')</td><td></td></tr><tr><td>dfabe7ef245ca68185f4fcc96a08602ee1afb3f7</td><td></td><td></td><td></td></tr><tr><td>df51dfe55912d30fc2f792561e9e0c2b43179089</td><td>Face Hallucination using Linear Models of Coupled
+<br/>Sparse Support
+<br/>grid and fuse them to suppress the aliasing caused by under-
+<br/>sampling [5], [6]. On the other hand, learning based meth-
+<br/>ods use coupled dictionaries to learn the mapping relations
+<br/>between low- and high- resolution image pairs to synthesize
+<br/>high-resolution images from low-resolution images [4], [7].
+<br/>The research community has lately focused on the latter
+<br/>category of super-resolution methods, since they can provide
+<br/>higher quality images and larger magnification factors.
+</td><td>('1805605', 'Reuben A. Farrugia', 'reuben a. farrugia')<br/>('1780587', 'Christine Guillemot', 'christine guillemot')</td><td></td></tr><tr><td>df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb</td><td>SREFI: Synthesis of Realistic Example Face Images
+<br/><b>University of Notre Dame, USA</b><br/><b>FaceTec, Inc</b></td><td>('40061203', 'Sandipan Banerjee', 'sandipan banerjee')<br/>('3365839', 'John S. Bernhard', 'john s. bernhard')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')</td><td>{sbanerj1, wscheire, kwb, flynn}@nd.edu
+<br/>jsbernhardjr@gmail.com
+</td></tr><tr><td>df054fa8ee6bb7d2a50909939d90ef417c73604c</td><td>Image Quality-Aware Deep Networks Ensemble for Efficient
+<br/>Gender Recognition in the Wild
+<br/><b>Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany</b><br/><b>German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany</b><br/>Keywords:
+<br/>Gender, Face, Deep Neural Networks, Quality, In the Wild
+</td><td>('2585383', 'Mohamed Selim', 'mohamed selim')<br/>('40810260', 'Suraj Sundararajan', 'suraj sundararajan')<br/>('1771057', 'Alain Pagani', 'alain pagani')<br/>('1807169', 'Didier Stricker', 'didier stricker')</td><td>{mohamed.selim, alain.pagani, didier.stricker}@dfki.uni-kl.de, s lakshmin13@informatik.uni-kl.de
+</td></tr><tr><td>df80fed59ffdf751a20af317f265848fe6bfb9c9</td><td>1666
+<br/>Learning Deep Sharable and Structural
+<br/>Detectors for Face Alignment
+</td><td>('40387982', 'Hao Liu', 'hao liu')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('2632601', 'Jianjiang Feng', 'jianjiang feng')<br/>('25060740', 'Jie Zhou', 'jie zhou')</td><td></td></tr><tr><td>dfd8602820c0e94b624d02f2e10ce6c798193a25</td><td>STRUCTURED ANALYSIS DICTIONARY LEARNING FOR IMAGE CLASSIFICATION
+<br/>Department of Electrical and Computer Engineering
+<br/><b>North Carolina State University, Raleigh, NC, USA</b><br/>†Army Research Office, RTP, Raleigh, NC, USA
+</td><td>('49501811', 'Wen Tang', 'wen tang')<br/>('1733181', 'Ashkan Panahi', 'ashkan panahi')<br/>('1769928', 'Hamid Krim', 'hamid krim')<br/>('2622498', 'Liyi Dai', 'liyi dai')</td><td>{wtang6, apanahi, ahk}@ncsu.edu, liyi.dai@us.army.mil
+</td></tr><tr><td>dff838ba0567ef0a6c8fbfff9837ea484314efc6</td><td>Progress Report, MSc. Dissertation: On-line
+<br/>Random Forest for Face Detection
+<br/>School of Computer Science
+<br/><b>The University of Manchester</b><br/>May 9, 2014
+<br/>Contents
+<br/>1 Introduction
+<br/>2 Background
+<br/>3 Research Methods
+<br/>3.1 What the project involves . . . . . . . . . . . . . . . . . . . . . .
+<br/>3.2 The project plan and evaluation of the plan . . . . . . . . . . . .
+<br/>4 Progress
+<br/>4.1 Quality attributes
+<br/>4.2 Prototypes
+<br/>. . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>. . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>4.2.1 PGM Image . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>4.2.2 Working with Haar-like features and Integral Image
+<br/>. . .
+<br/>4.2.3 Accesing the Webcam Driver . . . . . . . . . . . . . . . .
+<br/>4.2.4 The On-line Random Forest . . . . . . . . . . . . . . . . .
+<br/>4.2.5 The First version of the User Interface . . . . . . . . . . .
+<br/>4.3 Open discussion about the On-line Random Forest . . . . . . . .
+<br/>5 Next Steps and Conclusions
+<br/>6 References
+<br/>10
+<br/>10
+<br/>11
+<br/>11
+<br/>12
+<br/>13
+<br/>15
+<br/>15
+<br/>16
+<br/>17
+<br/>18
+</td><td></td><td></td></tr><tr><td>dfa80e52b0489bc2585339ad3351626dee1a8395</td><td>Human Action Forecasting by Learning Task Grammars
+</td><td>('22237490', 'Tengda Han', 'tengda han')<br/>('36541522', 'Jue Wang', 'jue wang')<br/>('2691929', 'Anoop Cherian', 'anoop cherian')<br/>('2377076', 'Stephen Gould', 'stephen gould')</td><td></td></tr><tr><td>df71a00071d5a949f9c31371c2e5ee8b478e7dc8</td><td>Using Opportunistic Face Logging
+<br/>from Smartphone to Infer Mental
+<br/>Health: Challenges and Future
+<br/>Directions
+<br/><b>Dartmouth College</b><br/><b>Dartmouth College</b><br/><b>Dartmouth College</b><br/>Permission to make digital or hard copies of all or part of this work for personal
+<br/>or classroom use is granted without fee provided that copies are not made or
+<br/>distributed for profit or commercial advantage and that copies bear this notice
+<br/>and the full citation on the first page. Copyrights for components of this work
+</td><td>('1698066', 'Rui Wang', 'rui wang')<br/>('1690035', 'Andrew T. Campbell', 'andrew t. campbell')<br/>('2253140', 'Xia Zhou', 'xia zhou')</td><td>rui.wang@cs.dartmouth.edu
+<br/>campbell@cs.dartmouth.edu
+<br/>xia@cs.dartmouth.edu
+</td></tr><tr><td>df9269657505fcdc1e10cf45bbb8e325678a40f5</td><td>INTERSPEECH 2016
+<br/>September 8–12, 2016, San Francisco, USA
+<br/>Open-Domain Audio-Visual Speech Recognition: A Deep Learning Approach
+<br/><b>Carnegie Mellon University</b></td><td>('37467623', 'Yajie Miao', 'yajie miao')<br/>('1740721', 'Florian Metze', 'florian metze')</td><td>{ymiao,fmetze}@cs.cmu.edu
+</td></tr><tr><td>dfb6aa168177d4685420fcb184def0aa7db7cddb</td><td>The Effect of Lighting Direction/Condition on the Performance
+<br/>of Face Recognition Algorithms
+<br/><b>West Virginia University, Morgantown, WV</b><br/><b>University of Miami, Coral Gables, FL</b></td><td>('1722978', 'Gamal Fahmy', 'gamal fahmy')<br/>('4562956', 'Ahmed El-Sherbeeny', 'ahmed el-sherbeeny')<br/>('9449390', 'Mohamed Abdel-Mottaleb', 'mohamed abdel-mottaleb')<br/>('16279046', 'Hany Ammar', 'hany ammar')</td><td></td></tr><tr><td>df2841a1d2a21a0fc6f14fe53b6124519f3812f9</td><td>Learning Image Attributes
+<br/>using the Indian Buffet Process
+<br/>Department of Computer Science
+<br/><b>Brown University</b><br/>Providence, RI 02912
+<br/>Department of Computer Science
+<br/><b>Brown University</b><br/>Providence, RI 02912
+</td><td>('2059199', 'Soravit Changpinyo', 'soravit changpinyo')<br/>('1799035', 'Erik B. Sudderth', 'erik b. sudderth')</td><td>schangpi@cs.brown.edu
+<br/>sudderth@cs.brown.edu
+</td></tr><tr><td>dfecaedeaf618041a5498cd3f0942c15302e75c3</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Recursive Framework for Expression Recognition: From
+<br/>Web Images to Deep Models to Game Dataset
+<br/>Received: date / Accepted: date
+</td><td>('48625314', 'Wei Li', 'wei li')</td><td></td></tr><tr><td>df5fe0c195eea34ddc8d80efedb25f1b9034d07d</td><td>Robust Modified Active Shape Model for Automatic Facial Landmark
+<br/>Annotation of Frontal Faces
+</td><td>('2363348', 'Keshav Seshadri', 'keshav seshadri')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td></td></tr><tr><td>df2494da8efa44d70c27abf23f73387318cf1ca8</td><td>RESEARCH ARTICLE
+<br/>Supervised Filter Learning for Representation
+<br/>Based Face Recognition
+<br/><b>College of Computer Science and Information Technology, Northeast Normal University, Changchun</b><br/><b>China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of</b><br/><b>Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of</b><br/>Economics and Business, Beijing, China
+<br/>a11111
+</td><td>('2498586', 'Chao Bi', 'chao bi')<br/>('1684635', 'Lei Zhang', 'lei zhang')<br/>('7009658', 'Miao Qi', 'miao qi')<br/>('5858971', 'Caixia Zheng', 'caixia zheng')<br/>('3042163', 'Yugen Yi', 'yugen yi')<br/>('1831935', 'Jianzhong Wang', 'jianzhong wang')<br/>('1751108', 'Baoxue Zhang', 'baoxue zhang')</td><td>* wangjz019@nenu.edu.cn (JW); zhangbaoxue@cueb.edu.cn (BZ)
+</td></tr><tr><td>df674dc0fc813c2a6d539e892bfc74f9a761fbc8</td><td>IOSR Journal of Computer Engineering (IOSR-JCE)
+<br/>e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 10, Issue 6 (May. - Jun. 2013), PP 21-29
+<br/>www.iosrjournals.org
+<br/>An Image Mining System for Gender Classification & Age
+<br/>Prediction Based on Facial Features
+<br/> 1.Ms.Dhanashri Shirkey , 2Prof.Dr.S.R.Gupta,
+<br/>M.E(Scholar),Department Computer Science & Engineering, PRMIT & R, Badnera
+<br/>Asstt.Prof. Department Computer Science & Engineering, PRMIT & R, Badnera
+</td><td></td><td></td></tr><tr><td>dad7b8be074d7ea6c3f970bd18884d496cbb0f91</td><td>Super-Sparse Regression for Fast Age
+<br/>Estimation From Faces at Test Time
+<br/><b>University of Cagliari</b><br/>Piazza d’Armi, 09123 Cagliari, Italy
+<br/>WWW home page: http://prag.diee.unica.it
+</td><td>('2272441', 'Ambra Demontis', 'ambra demontis')<br/>('1684175', 'Battista Biggio', 'battista biggio')<br/>('1716261', 'Giorgio Fumera', 'giorgio fumera')<br/>('1710171', 'Fabio Roli', 'fabio roli')</td><td>{ambra.demontis,battista.biggio,fumera,roli}@diee.unica.it
+</td></tr><tr><td>daf05febbe8406a480306683e46eb5676843c424</td><td>Robust Subspace Segmentation with Block-diagonal Prior
+<br/><b>National University of Singapore, Singapore</b><br/><b>Key Lab. of Machine Perception, School of EECS, Peking University, China</b><br/><b>National University of Singapore, Singapore</b></td><td>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('1678675', 'Huan Xu', 'huan xu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>1{a0066331,eleyans}@nus.edu.sg, 2zlin@pku.edu.cn, 3mpexuh@nus.edu.sg
+</td></tr><tr><td>da4170c862d8ae39861aa193667bfdbdf0ecb363</td><td>Multi-task CNN Model for Attribute Prediction
+</td><td>('3282196', 'Abrar H. Abdulnabi', 'abrar h. abdulnabi')<br/>('22804340', 'Gang Wang', 'gang wang')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('2370507', 'Kui Jia', 'kui jia')</td><td></td></tr><tr><td>da15344a4c10b91d6ee2e9356a48cb3a0eac6a97</td><td></td><td></td><td></td></tr><tr><td>da5bfddcfe703ca60c930e79d6df302920ab9465</td><td></td><td></td><td></td></tr><tr><td>dac2103843adc40191e48ee7f35b6d86a02ef019</td><td>854
+<br/>Unsupervised Celebrity Face Naming in Web Videos
+</td><td>('2172810', 'Lei Pang', 'lei pang')<br/>('1751681', 'Chong-Wah Ngo', 'chong-wah ngo')</td><td></td></tr><tr><td>dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e</td><td>RECOGNIZING EMOTIONS IN SPONTANEOUS FACIAL EXPRESSIONS
+<br/>Institut f¨ur Nachrichtentechnik
+<br/>Universit¨at Karlsruhe (TH), Germany
+</td><td>('2500636', 'Michael Grimm', 'michael grimm')<br/>('1787004', 'Kristian Kroschel', 'kristian kroschel')</td><td>grimm@int.uni-karlsruhe.de
+</td></tr><tr><td>daa02cf195818cbf651ef81941a233727f71591f</td><td>Face recognition system on Raspberry Pi
+<br/><b>Institute of Electronics and Computer Science</b><br/>14 Dzerbenes Street, Riga, LV 1006, Latvia
+</td><td>('2059963', 'Olegs Nikisins', 'olegs nikisins')<br/>('2337567', 'Rihards Fuksis', 'rihards fuksis')<br/>('3199162', 'Arturs Kadikis', 'arturs kadikis')<br/>('3310787', 'Modris Greitans', 'modris greitans')</td><td></td></tr><tr><td>daa52dd09b61ee94945655f0dde216cce0ebd505</td><td>Recognizing Micro-Actions and Reactions from Paired Egocentric Videos
+<br/><b>The University of Tokyo</b><br/><b>Carnegie Mellon University</b><br/><b>The University of Tokyo</b><br/>Tokyo, Japan
+<br/>Pittsburgh, PA, USA
+<br/>Tokyo, Japan
+</td><td>('1899753', 'Ryo Yonetani', 'ryo yonetani')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')<br/>('9467266', 'Yoichi Sato', 'yoichi sato')</td><td>yonetani@iis.u-tokyo.ac.jp
+<br/>kkitani@cs.cmu.edu
+<br/>ysato@iis.u-tokyo.ac.jp
+</td></tr><tr><td>daba8f0717f3f47c272f018d0a466a205eba6395</td><td></td><td></td><td></td></tr><tr><td>daefac0610fdeff415c2a3f49b47968d84692e87</td><td>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>Proceedings of NAACL-HLT 2018, pages 1481–1491
+<br/>1481
+</td><td></td><td></td></tr><tr><td>b49affdff167f5d170da18de3efa6fd6a50262a2</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+<br/>(2008)"
+</td><td></td><td></td></tr><tr><td>b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3</td><td>Rapid Face Recognition Using Hashing
+<br/><b>Australian National University, and NICTA</b><br/><b>Australian National University, and NICTA</b><br/>Canberra, Australia
+<br/>Canberra, Australia
+<br/><b>NICTA, and Australian National University</b><br/>Canberra, Australia
+</td><td>('3177281', 'Qinfeng Shi', 'qinfeng shi')<br/>('1711119', 'Hanxi Li', 'hanxi li')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')</td><td></td></tr><tr><td>b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807</td><td>A short review and primer on electromyography
+<br/>in human computer interaction applications
+<br/><b>Helsinki Collegium for Advanced Studies, University of Helsinki, Finland</b><br/><b>Helsinki Institute for Information Technology, Aalto University, Finland</b><br/><b>School of Business, Aalto University, Finland</b><br/><b>Quantitative Employee unit, Finnish Institute of Occupational Health</b><br/>POBox 40, Helsinki, 00250, Finland
+<br/><b>Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of</b><br/>Helsinki, Finland
+</td><td>('1751008', 'Niklas Ravaja', 'niklas ravaja')<br/>('1713422', 'Jari Torniainen', 'jari torniainen')</td><td>benjamin.cowley@ttl.fi,
+</td></tr><tr><td>b446bcd7fb78adfe346cf7a01a38e4f43760f363</td><td>To appear in ICB 2018
+<br/>Longitudinal Study of Child Face Recognition
+<br/><b>Michigan State University</b><br/>East Lansing, MI, USA
+<br/><b>Malaviya National Institute of Technology</b><br/>Jaipur, India
+<br/><b>Michigan State University</b><br/>East Lansing, MI, USA
+</td><td>('32623642', 'Debayan Deb', 'debayan deb')<br/>('2117075', 'Neeta Nain', 'neeta nain')<br/>('1739705', 'Anil K. Jain', 'anil k. jain')</td><td>debdebay@msu.edu
+<br/>nnain.cse@mnit.ac.in
+<br/>jain@cse.msu.edu
+</td></tr><tr><td>b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172</td><td>Face Aging with Contextual Generative Adversarial Nets
+<br/>SKLOIS, IIE, CAS
+<br/>SKLOIS, IIE, CAS
+<br/>School of Cyber Security, UCAS
+<br/>SKLOIS, IIE, CAS
+<br/><b>University of Trento, Italy</b><br/><b>Qihoo 360 AI Institute, Beijing, China</b><br/><b>National University of singapore</b><br/>SKLOIS, IIE, CAS
+<br/>School of Cyber Security, UCAS
+<br/><b>Nanjing University of Science and</b><br/>Technology
+</td><td>('38110120', 'Si Liu', 'si liu')<br/>('7760591', 'Renda Bao', 'renda bao')<br/>('39711014', 'Yao Sun', 'yao sun')<br/>('1699978', 'Wei Wang', 'wei wang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('4661961', 'Defa Zhu', 'defa zhu')<br/>('2287686', 'Xiangbo Shu', 'xiangbo shu')</td><td>liusi@iie.ac.cn
+<br/>roger bao@163.com
+<br/>sunyao@iie.ac.cn
+<br/>wangwei1990@gmail.com
+<br/>eleyans@nus.edu.sg
+<br/>18502408950@163.com
+<br/>shuxb@njust.edu.cn
+</td></tr><tr><td>b41374f4f31906cf1a73c7adda6c50a78b4eb498</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Iterative Gaussianization: From ICA to
+<br/>Random Rotations
+</td><td>('2732577', 'Valero Laparra', 'valero laparra')<br/>('1684246', 'Gustavo Camps-Valls', 'gustavo camps-valls')<br/>('2186866', 'Jesús Malo', 'jesús malo')</td><td></td></tr><tr><td>b42a97fb47bcd6bfa72e130c08960a77ee96f9ab</td><td>FACIAL EXPRESSION RECOGNITION BASED ON GRAPH-PRESERVING SPARSE
+<br/>NON-NEGATIVE MATRIX FACTORIZATION
+<br/><b>Institute of Information Science</b><br/><b>Beijing Jiaotong University</b><br/>Beijing 100044, P.R. China
+<br/>Qiuqi Ruan
+<br/>ACCESS Linnaeus Center
+<br/><b>KTH Royal Institute of Technology, Stockholm</b><br/>School of Electrical Engineering
+</td><td>('3247912', 'Ruicong Zhi', 'ruicong zhi')<br/>('1749334', 'Markus Flierl', 'markus flierl')</td><td>{05120370, qqruan}@bjtu.edu.cn
+<br/>{ruicong, mflierl, bastiaan}@kth.se
+</td></tr><tr><td>b4d209845e1c67870ef50a7c37abaf3770563f3e</td><td>GHODRATI, GAVVES, SNOEK: VIDEO TIME
+<br/>Video Time: Properties, Encoders and
+<br/>Evaluation
+<br/>Cees G. M. Snoek
+<br/>QUVA Lab
+<br/><b>University of Amsterdam</b><br/>Netherlands
+</td><td>('3060081', 'Amir Ghodrati', 'amir ghodrati')<br/>('2304222', 'Efstratios Gavves', 'efstratios gavves')</td><td>{a.ghodrati,egavves,cgmsnoek}@uva.nl
+</td></tr><tr><td>b4d7ca26deb83cec1922a6964c1193e8dd7270e7</td><td></td><td></td><td></td></tr><tr><td>b4ee64022cc3ccd14c7f9d4935c59b16456067d3</td><td>Unsupervised Cross-Domain Image Generation
+</td><td>('40084473', 'Davis Rempe', 'davis rempe')<br/>('9184695', 'Haotian Zhang', 'haotian zhang')</td><td></td></tr><tr><td>b40290a694075868e0daef77303f2c4ca1c43269</td><td>第 40 卷 第 4 期
+<br/>2014 年 4 月
+<br/>自 动 化 学 报
+<br/>ACTA AUTOMATICA SINICA
+<br/>Vol. 40, No. 4
+<br/>April, 2014
+<br/>融合局部与全局信息的头发形状模型
+<br/>王 楠 1 艾海舟 1
+<br/>摘 要 头发在人体表观中具有重要作用, 然而, 因为缺少有效的形状模型, 头发分割仍然是一个非常具有挑战性的问题. 本
+<br/>文提出了一种基于部件的模型, 它对头发形状以及环境变化更加鲁棒. 该模型将局部与全局信息相结合以描述头发的形状. 局
+<br/>部模型通过一系列算法构建, 包括全局形状词表生成, 词表分类器学习以及参数优化; 而全局模型刻画不同的发型, 采用支持
+<br/>向量机 (Support vector machine, SVM) 来学习, 它为所有潜在的发型配置部件并确定势函数. 在消费者图片上的实验证明
+<br/>了本文算法在头发形状多变和复杂环境等条件下的准确性与有效性.
+<br/>关键词 头发形状建模, 部件模型, 部件配置算法, 支持向量机
+<br/>引用格式 王楠, 艾海舟. 融合局部与全局信息的头发形状模型. 自动化学报, 2014, 40(4): 615−623
+<br/>DOI 10.3724/SP.J.1004.2014.00615
+<br/>Combining Local and Global Information for Hair Shape Modeling
+<br/>AI Hai-Zhou1
+</td><td>('3666771', 'WANG Nan', 'wang nan')</td><td></td></tr><tr><td>b4362cd87ad219790800127ddd366cc465606a78</td><td>Sensors 2015, 15, 26756-26768; doi:10.3390/s151026756
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>A Smartphone-Based Automatic Diagnosis System for Facial
+<br/>Nerve Palsy
+<br/><b>Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea</b><br/><b>Head and Neck Surgery, Seoul National University</b><br/><b>College of Medicine, Seoul National University</b><br/>Seoul 03080, Korea
+<br/>Fax: +82-2-870-3863 (Y.H.K.); +82-2-3676-1175 (K.S.P.).
+<br/>Academic Editor: Ki H. Chon
+<br/>Received: 31 July 2015 / Accepted: 19 October 2015 / Published: 21 October 2015
+</td><td>('31812715', 'Hyun Seok Kim', 'hyun seok kim')<br/>('2189639', 'So Young Kim', 'so young kim')<br/>('40219387', 'Young Ho Kim', 'young ho kim')<br/>('1972762', 'Kwang Suk Park', 'kwang suk park')</td><td>E-Mail: khs0330kr@bmsil.snu.ac.kr
+<br/>Boramae Medical Center, Seoul 07061, Korea; E-Mail: sossi81@hanmail.net
+<br/>* Authors to whom correspondence should be addressed; E-Mails: yhkiment@gmail.com (Y.H.K.);
+<br/>pks@bmsil.snu.ac.kr (K.S.P.); Tel.: +82-2-870-2442 (Y.H.K.); +82-2-2072-3135 (K.S.P.);
+</td></tr><tr><td>b4f4b0d39fd10baec34d3412d53515f1a4605222</td><td>Every Picture Tells a Story:
+<br/>Generating Sentences from Images
+<br/>1 Computer Science Department
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>2 Computer Vision Group, School of Mathematics
+<br/><b>Institute for studies in theoretical Physics and Mathematics(IPM</b></td><td>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1888731', 'Mohsen Hejrati', 'mohsen hejrati')<br/>('21160985', 'Mohammad Amin Sadeghi', 'mohammad amin sadeghi')<br/>('35527128', 'Peter Young', 'peter young')<br/>('3125805', 'Cyrus Rashtchian', 'cyrus rashtchian')<br/>('3118681', 'Julia Hockenmaier', 'julia hockenmaier')</td><td>{afarhad2,pyoung2,crashtc2,juliahmr,daf}@illinois.edu
+<br/>{m.a.sadeghi,mhejrati}@gmail.com
+</td></tr><tr><td>b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4</td><td>Autonomous Learning Framework Based on Online Hybrid
+<br/>Classifier for Multi-view Object Detection in Video
+<br/><b>aSchool of Electronic Information and Mechanics, China University of Geosciences, Wuhan, Hubei 430074, China</b><br/><b>bSchool of Automation, China University of Geosciences, Wuhan, Hubei 430074, China</b><br/><b>cHuizhou School Affiliated to Beijing Normal University, Huizhou 516002, China</b><br/>dNational Key Laboratory of Science and Technology on Multispectral Information Processing, School of Automation, Huazhong
+<br/><b>University of Science and Technology, Wuhan, 430074, China</b></td><td>('2588731', 'Dapeng Luo', 'dapeng luo')</td><td></td></tr><tr><td>b43b6551ecc556557b63edb8b0dc39901ed0343b</td><td>ICA AND GABOR REPRESENTATION FOR FACIAL EXPRESSION RECOGNITION
+<br/>I. Buciu C. Kotropoulos
+<br/>and I. Pitas
+<br/><b>Aristotle University of Thessaloniki</b></td><td></td><td>GR-54124, Thessaloniki, Box 451, Greece, {nelu,costas,pitas}@zeus.csd.auth.gr
+</td></tr><tr><td>a255a54b8758050ea1632bf5a88a201cd72656e1</td><td>Nonparametric Facial Feature Localization
+<br/>J. K. Aggarwal
+<br/><b>Computer and Vision Research Center</b><br/><b>The University of Texas at Austin</b></td><td>('2622649', 'Birgi Tamersoy', 'birgi tamersoy')<br/>('1713065', 'Changbo Hu', 'changbo hu')</td><td>birgi@utexas.edu
+<br/>changbo.hu@gmail.com
+<br/>aggarwaljk@mail.utexas.edu
+</td></tr><tr><td>a2b9cee7a3866eb2db53a7d81afda72051fe9732</td><td>Reconstructing a Fragmented Face from an Attacked
+<br/>Secure Identification Protocol
+<br/>Department of Computer Science
+<br/><b>University of Texas at Austin</b><br/>May 6, 2011
+</td><td>('39573884', 'Andy Luong', 'andy luong')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>aluong@cs.utexas.edu
+</td></tr><tr><td>a285b6edd47f9b8966935878ad4539d270b406d1</td><td>Sensors 2011, 11, 9573-9588; doi:10.3390/s111009573
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Facial Expression Recognition Based on Local Binary Patterns
+<br/>and Kernel Discriminant Isomap
+<br/><b>Taizhou University, Taizhou 317000, China</b><br/><b>School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, China</b><br/>Tel.: +86-576-8513-7178; Fax: ++86-576-8513-7178.
+<br/>Received: 31 August 2011; in revised form: 27 September 2011 / Accepted: 9 October 2011 /
+<br/>Published: 11 October 2011
+</td><td>('48551029', 'Xiaoming Zhao', 'xiaoming zhao')<br/>('1695589', 'Shiqing Zhang', 'shiqing zhang')</td><td>E-Mail: tzczsq@163.com
+<br/>* Author to whom correspondence should be addressed; E-Mail: tzxyzxm@163.com;
+</td></tr><tr><td>a2bd81be79edfa8dcfde79173b0a895682d62329</td><td>Multi-Objective Vehicle Routing Problem Applied to
+<br/>Large Scale Post Office Deliveries
+<br/>Zenia
+<br/><b>aSchool of Technology, University of Campinas</b><br/>Paschoal Marmo, 1888, Limeira, SP, Brazil
+</td><td>('1788152', 'Luis A. A. Meira', 'luis a. a. meira')<br/>('37279198', 'Paulo S. Martins', 'paulo s. martins')<br/>('7809605', 'Mauro Menzori', 'mauro menzori')</td><td></td></tr><tr><td>a2359c0f81a7eb032cff1fe45e3b80007facaa2a</td><td>Towards Structured Analysis of Broadcast Badminton Videos
+<br/>C.V.Jawahar
+<br/>CVIT, KCIS, IIIT Hyderabad
+</td><td>('2964097', 'Anurag Ghosh', 'anurag ghosh')<br/>('48039353', 'Suriya Singh', 'suriya singh')</td><td>{anurag.ghosh, suriya.singh}@research.iiit.ac.in, jawahar@iiit.ac.in
+</td></tr><tr><td>a2eb90e334575d9b435c01de4f4bf42d2464effc</td><td>A NEW SPARSE IMAGE REPRESENTATION
+<br/>ALGORITHM APPLIED TO FACIAL
+<br/>EXPRESSION RECOGNITION
+<br/>Ioan Buciu and Ioannis Pitas
+<br/>Department of Informatics
+<br/><b>Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece</b><br/>Phone: +30-231-099-6361
+<br/>Fax: +30-231-099-8453
+<br/>Web: http://poseidon.csd.auth.gr
+</td><td></td><td>E-mail: nelu,pitas@zeus.csd.auth.gr
+</td></tr><tr><td>a25106a76af723ba9b09308a7dcf4f76d9283589</td><td> Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/> A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/> IJCSMC, Vol. 3, Issue. 4, April 2014, pg.139 – 146
+<br/> RESEARCH ARTICLE
+<br/>Local Octal Pattern: A Proficient Feature
+<br/>Extraction for Face Recognition
+<br/><b>Computer Science and Engineering, Easwari Engineering College, India</b><br/><b>Computer Science and Engineering, Anna University, India</b></td><td>('3263740', 'S Chitrakala', 's chitrakala')</td><td>1 nithya.jagan90@gamil.com
+<br/>2 suchitra.s@srmeaswari.ac.in
+<br/>3 ckgops@gmail.com
+</td></tr><tr><td>a2d9c9ed29bbc2619d5e03320e48b45c15155195</td><td></td><td></td><td></td></tr><tr><td>a29a22878e1881d6cbf6acff2d0b209c8d3f778b</td><td>Benchmarking Still-to-Video Face Recognition
+<br/>via Partial and Local Linear Discriminant
+<br/>Analysis on COX-S2V Dataset
+<br/><b>Key Lab of Intelligent Information Processing, Institute of Computing Technology</b><br/>Chinese Academy of Sciences, Beijing 100190, China
+<br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3OMRON Social Solutions Co. Ltd, Kyoto, Japan
+<br/><b>College of Information Science and Engineering, Xinjiang University</b></td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1705483', 'Haihong Zhang', 'haihong zhang')<br/>('1710195', 'Shihong Lao', 'shihong lao')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{zhiwu.huang, shiguang.shan}@vipl.ict.ac.cn,
+<br/>angelazhang@ssb.kusatsu.omron.co.jp, lao@ari.ncl.omron.co.jp,
+<br/>ghalipk@xju.edu.cn, xilin.chen@vipl.ict.ac.cn
+</td></tr><tr><td>a2429cc2ccbabda891cc5ae340b24ad06fcdbed5</td><td>Discovering the Signatures of Joint Attention in Child-Caregiver Interaction
+<br/>Department of Computer Science
+<br/>Department of Psychology
+<br/><b>Stanford University</b><br/>Department of Psychology
+<br/><b>Stanford University</b><br/>Department of Computer Science
+<br/><b>Stanford University</b><br/>Department of Psychology
+<br/><b>Stanford University</b></td><td>('2536223', 'Michael C. Frank', 'michael c. frank')<br/>('7211962', 'Laura Soriano', 'laura soriano')<br/>('3147852', 'Guido Pusiol', 'guido pusiol')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>guido@cs.stanford.edu
+<br/>lsoriano@stanford.edu
+<br/>feifeili@stanford.edu
+<br/>mcfrank@stanford.edu
+</td></tr><tr><td>a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d</td><td></td><td></td><td></td></tr><tr><td>a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa</td><td>Intention from Motion
+</td><td>('40063519', 'Andrea Zunino', 'andrea zunino')<br/>('3393678', 'Jacopo Cavazza', 'jacopo cavazza')<br/>('34465973', 'Atesh Koul', 'atesh koul')<br/>('37783905', 'Andrea Cavallo', 'andrea cavallo')<br/>('1834966', 'Cristina Becchio', 'cristina becchio')<br/>('1727204', 'Vittorio Murino', 'vittorio murino')</td><td></td></tr><tr><td>a2bcfba155c990f64ffb44c0a1bb53f994b68a15</td><td>The Photoface Database
+<br/><b>Imperial College London</b><br/>180 Queen’s Gate, London SW7 2AZ UK.
+<br/><b>Machine Vision Lab, Faculty of Environment and Technology, University of the West of England</b><br/><b>cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London</b><br/>Frenchay Campus, Bristol BS16 1QY UK.
+<br/>Exhibition Road, South Kensington Campus, London SW7 2AZ UK.
+<br/>River House, 53-57 High Street, Kingston upon Thames, Surrey KT1 1LQ UK.
+<br/><b>Imperial College London</b><br/><b>Informatics and Telematics Institute, Centre of Research and Technology - Hellas</b><br/>6th km Xarilaou - Thermi, Thessaloniki 57001 Greece
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1689047', 'Vasileios Argyriou', 'vasileios argyriou')<br/>('2871609', 'Maria Petrou', 'maria petrou')</td><td>{s.zafeiriou,maria.petrou}@imperial.ac.uk, vasileios.argyriou@kinston.ac.uk
+<br/>{mark.hansen,gary.atkinson,melvyn.smith,lyndon.smith}@uwe.ac.uk. ∗
+</td></tr><tr><td>a2fbaa0b849ecc74f34ebb36d1442d63212b29d2</td><td> Volume 5, Issue 6, June 2015 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>An Efficient Approach to Face Recognition of Surgically
+<br/>Altered Images
+<br/>Department of computer science and engineering
+<br/><b>SUS college of Engineering and Technology</b><br/>Tangori, District, Mohali, Punjab, India
+</td><td></td><td></td></tr><tr><td>a50b4d404576695be7cd4194a064f0602806f3c4</td><td>In Proceedings of BMVC, Edimburgh, UK, September 2006
+<br/>Efficiently estimating facial expression and
+<br/>illumination in appearance-based tracking
+<br/>†ESCET, U. Rey Juan Carlos
+<br/>C/ Tulip´an, s/n
+<br/>28933 M´ostoles, Spain
+<br/>‡Facultad Inform´atica, UPM
+<br/>Campus de Montegancedo s/n
+<br/>28660 Boadilla del Monte, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+</td><td>('1778998', 'Luis Baumela', 'luis baumela')</td><td></td></tr><tr><td>a59cdc49185689f3f9efdf7ee261c78f9c180789</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING XX, XXX-XXX (2015)
+<br/>A New Approach for Learning Discriminative Dictionary
+<br/>for Pattern Classification
+<br/>THUY THI NGUYEN1, BINH THANH HUYNH2 AND SANG VIET DINH2
+<br/>1Faculty of Information Technology
+<br/><b>Vietnam National University of Agriculture</b><br/>Trau Quy town, Gialam, Hanoi, Vietnam
+<br/>2School of Information and Communication Technology
+<br/><b>Hanoi University of Science and Technology</b><br/>No 1, Dai Co Viet Street, Hanoi, Vietnam
+<br/>Dictionary learning (DL) for sparse coding based classification has been widely re-
+<br/>searched in pattern recognition in recent years. Most of the DL approaches focused on
+<br/>the reconstruction performance and the discriminative capability of the learned dictionary.
+<br/>This paper proposes a new method for learning discriminative dictionary for sparse rep-
+<br/>resentation based classification, called Incoherent Fisher Discrimination Dictionary
+<br/>Learning (IFDDL). IFDDL combines the Fisher Discrimination Dictionary Learning
+<br/>(FDDL) method, which learns a structured dictionary where the class labels and the dis-
+<br/>crimination criterion are exploited, and the Incoherent Dictionary Learning (IDL) method,
+<br/>which learns a dictionary where the mutual incoherence between pairs of atoms is ex-
+<br/>ploited. In the combination, instead of considering the incoherence between atoms in a
+<br/>single shared dictionary as in IDL, we propose to incorporate the incoherence between
+<br/>pairs of atoms within each sub-dictionary, which represent a specific object class. This
+<br/>aims to increase discrimination capacity of between basic atoms in sub-dictionaries. The
+<br/>combination allows one to exploit the advantages of both methods and the discrimination
+<br/>capacity of the entire dictionary. Extensive experiments have been conducted on bench-
+<br/>mark image data sets for Face recognition (ORL database, Extended Yale B database, AR
+<br/>database) and Digit recognition (the USPS database). The experimental results show that
+<br/>our proposed method outperforms most of state-of-the-art methods for sparse coding and
+<br/>DL based classification, meanwhile maintaining similar complexity.
+<br/>Keywords: dictionary learning, sparse coding, fisher criterion, pattern recognition, object
+<br/>classification
+<br/>1. INTRODUCTION
+<br/>Sparse representation (or sparse coding) has been widely used in many problems of
+<br/>image processing and computer vision [1, 2], audio processing [3, 4], as well as classifi-
+<br/>cation [5-9] and archived very impressive results. In this model, an input signal is de-
+<br/>composed by a sparse linear combination of a few atoms from an over-complete diction-
+<br/>ary. In general, the goal of sparse representation is to represent input signals by a linear
+<br/>combination of atoms (or words). This is done by minimizing the reconstruction error
+<br/>under a sparsity constraint:
+<br/>min
+<br/>D X
+<br/>||
+<br/>A DX
+<br/>||
+<br/>X
+<br/>||
+<br/>||
+<br/>Received February 15, 2015; revised June 18, 2015; accepted July 9, 2015.
+<br/>Communicated by Hsin-Min Wang.
+<br/>xxx
+<br/>(1)
+</td><td></td><td>E-mail: myngthuy@gmail.com
+<br/>E-mail: {binhht; sangdv}@soict.hust.edu.vn
+</td></tr><tr><td>a5e5094a1e052fa44f539b0d62b54ef03c78bf6a</td><td>Detection without Recognition for Redaction
+<br/><b>Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA</b><br/>2Conduent, Conduent Labs - US, 800 Phillips Rd, MS128, Webster, NY USA, 14580
+</td><td>('3424086', 'Shagan Sah', 'shagan sah')<br/>('40492623', 'Ram Longman', 'ram longman')<br/>('29980978', 'Ameya Shringi', 'ameya shringi')<br/>('1736673', 'Robert Loce', 'robert loce')<br/>('39834006', 'Majid Rabbani', 'majid rabbani')<br/>('32847225', 'Raymond Ptucha', 'raymond ptucha')</td><td>Email: sxs4337@rit.edu
+</td></tr><tr><td>a5c8fc1ca4f06a344b53dc81ebc6d87f54896722</td><td>Learning to see people like people
+<br/><b>University of California, San Diego</b><br/>9500 Gilman Dr, La Jolla, CA 92093
+<br/><b>University of California, San Diego</b><br/>9500 Gilman Dr, La Jolla, CA 92093
+<br/><b>Purdue University</b><br/>610 Purdue Mall, West Lafayette, IN 47907
+<br/>Garrison Cottrell
+<br/><b>University of California, San Diego</b><br/>9500 Gilman Dr, La Jolla, CA 92093
+</td><td>('9409376', 'Amanda Song', 'amanda song')<br/>('13212680', 'Chad Atalla', 'chad atalla')<br/>('11157727', 'Linjie Li', 'linjie li')</td><td>feijuejuanling@gmail.com
+<br/>li2477@purdue.edu
+<br/>catalla@ucsd.edu
+<br/>gary@ucsd.edu
+</td></tr><tr><td>a5ade88747fa5769c9c92ffde9b7196ff085a9eb</td><td>Why is Facial Expression Analysis in the Wild
+<br/>Challenging?
+<br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology, Germany</b><br/>Hazım Kemal Ekenel
+<br/>Faculty of Computer and Informatics
+<br/><b>Istanbul Technical University, Turkey</b><br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology, Germany</b></td><td>('40303076', 'Tobias Gehrig', 'tobias gehrig')</td><td>tobias.gehrig@kit.edu
+<br/>ekenel@itu.edu.tr
+</td></tr><tr><td>a56c1331750bf3ac33ee07004e083310a1e63ddc</td><td>Vol. xx, pp. x
+<br/>c(cid:13) xxxx Society for Industrial and Applied Mathematics
+<br/>x–x
+<br/>Efficient Point-to-Subspace Query in (cid:96)1 with Application to Robust Object
+<br/>Instance Recognition
+</td><td>('1699024', 'Ju Sun', 'ju sun')<br/>('2580421', 'Yuqian Zhang', 'yuqian zhang')<br/>('1738310', 'John Wright', 'john wright')</td><td></td></tr><tr><td>a54e0f2983e0b5af6eaafd4d3467b655a3de52f4</td><td>Face Recognition Using Convolution Filters and
+<br/>Neural Networks
+<br/>Head, Dept. of E&E,PEC
+<br/>Sec-12, Chandigarh – 160012
+<br/>Department of CSE & IT, PEC
+<br/>Sec-12, Chandigarh – 160012
+<br/>C.P. Singh
+<br/>Physics Department, CFSL,
+<br/>Sec-36, Chandigarh - 160036
+<br/>a
+<br/>of
+<br/>to: (a)
+<br/>potential method
+</td><td>('1734714', 'V. Rihani', 'v. rihani')<br/>('2927010', 'Amit Bhandari', 'amit bhandari')</td><td>vrihani@yahoo.com
+<br/>amit.bhandari@yahoo.com
+<br/>cpureisingh@yahoo.com
+</td></tr><tr><td>a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb</td><td>VFSC: A Very Fast Sparse Clustering to Cluster Faces
+<br/>from Videos
+<br/><b>University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam</b></td><td>('2187730', 'Dinh-Luan Nguyen', 'dinh-luan nguyen')<br/>('1780348', 'Minh-Triet Tran', 'minh-triet tran')</td><td>1212223@student.hcmus.edu.vn
+<br/>tmtriet@fit.hcmus.edu.vn
+</td></tr><tr><td>a5f11c132eaab258a7cea2d681875af09cddba65</td><td>A spatiotemporal model with visual attention for
+<br/>video classification
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of California San Diego, La Jolla, California, USA</b><br/>paper proposes a spatiotemporal model in which CNN and
+<br/>RNN are concatenated, as shown in Fig. 1.
+</td><td>('2493180', 'Mo Shan', 'mo shan')<br/>('50365495', 'Nikolay Atanasov', 'nikolay atanasov')</td><td>Email: {moshan, natanasov}@eng.ucsd.edu
+</td></tr><tr><td>a546fd229f99d7fe3cf634234e04bae920a2ec33</td><td>RESEARCH ARTICLE
+<br/>Fast Fight Detection
+<br/>1 Department of Systems Engineering and Automation, E.T.S.I. Industriales, Ciudad Real, Castilla-La
+<br/><b>Mancha, Spain, Imperial College, London, UK</b></td><td>('5463808', 'Ismael Serrano Gracia', 'ismael serrano gracia')<br/>('8952654', 'Oscar Deniz Suarez', 'oscar deniz suarez')<br/>('8219927', 'Gloria Bueno Garcia', 'gloria bueno garcia')<br/>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')</td><td>* ismael.serrano@uclm.es (ISG); oscar.deniz@uclm.es (ODS); gloria.bueno@uclm.es (GBG)
+</td></tr><tr><td>a538b05ebb01a40323997629e171c91aa28b8e2f</td><td>Rectified Linear Units Improve Restricted Boltzmann Machines
+<br/>Geoffrey E. Hinton
+<br/><b>University of Toronto, Toronto, ON M5S 2G4, Canada</b></td><td>('4989209', 'Vinod Nair', 'vinod nair')</td><td>vnair@cs.toronto.edu
+<br/>hinton@cs.toronto.edu
+</td></tr><tr><td>a57ee5a8fb7618004dd1def8e14ef97aadaaeef5</td><td>Fringe Projection Techniques: Whither we are?
+<br/><b>Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland</b><br/>During recent years, the use of fringe projection techniques
+<br/>for generating three-dimensional (3D) surface information has
+<br/>become one of the most active research areas in optical metrol-
+<br/>ogy.
+<br/>Its applications range from measuring the 3D shape of
+<br/>MEMS components to the measurement of flatness of large
+<br/>panels (2.5 m × .45 m). The technique has found various ap-
+<br/>plications in diverse fields: biomedical applications such as
+<br/>3D intra-oral dental measurements [1], non-invasive 3D imag-
+<br/>ing and monitoring of vascular wall deformations [2], human
+<br/>body shape measurement for shape guided radiotherapy treat-
+<br/>ment [3, 4], lower back deformation measurement [5], detection
+<br/>and monitoring of scoliosis [6], inspection of wounds [7, 8]
+<br/>and skin topography measurement for use in cosmetology [9,
+<br/>10, 11];
+<br/>industrial and scientific applications such as char-
+<br/>acterization of MEMS components [12, 13], vibration analy-
+<br/>sis [14, 15], refractometry [16], global measurement of free
+<br/>surface deformations [17, 18], local wall thickness measure-
+<br/>ment of forced sheet metals [19], corrosion analysis [20, 21],
+<br/>measurement of surface roughness [22, 23], reverse engineer-
+<br/>ing [24, 25, 26], quality control of printed circuit board man-
+<br/>ufacturing [27, 28, 29] and heat-flow visualization [30]; kine-
+<br/>matics applications such as measuring the shape and position
+<br/>of a moving object/creature [31, 32] and the study of kinemat-
+<br/>ical parameters of dragonfly in free flight [33, 34]; biometric
+<br/>identification applications such as 3D face reconstruction for
+<br/>the development of robust face recognition systems [35, 36];
+<br/>cultural heritage and preservation [37, 38, 39] etc.
+<br/>One of the outstanding features of some of the fringe pro-
+<br/>jection techniques is their ability to provide high-resolution,
+<br/>whole-field 3D reconstruction of objects in a non-contact man-
+<br/>ner at video frame rates. This feature has backed the technique
+<br/>to pervade new areas of applications such as security systems,
+<br/>gaming and virtual reality. To gain insights into the series of
+<br/>contributions that have helped in unfolding the technique to ac-
+<br/>quire this feature, the reader is referred to the review articles in
+<br/>this special issue by Song Zhang, and Xianyu Su et al.
+<br/>A typical fringe projection profilometry system is shown in
+<br/>Fig 1.
+<br/>It consists of a projection unit, an image acquisition
+<br/>unit and a processing/analysis unit. Measurement of shape
+<br/>through fringe projection techniques involves (1) projecting a
+<br/>structured pattern (usually a sinusoidal fringe pattern) onto the
+<br/>object surface, (2) recording the image of the fringe pattern
+<br/>that is phase modulated by the object height distribution, (3)
+<br/>calculating the phase modulation by analyzing the image with
+<br/>one of the fringe analysis techniques (such as Fourier transform
+<br/>Figure 1: Fringe projection profilometry system
+<br/>method, phase stepping and spatial phase detection methods-
+<br/>most of them generate wrapped phase distribution) (4) using a
+<br/>suitable phase unwrapping algorithm to get continuous phase
+<br/>distribution which is proportional to the object height varia-
+<br/>tions, and finally (5) calibrating the system for mapping the
+<br/>unwrapped phase distribution to real world 3-D co-ordinates.
+<br/>Fig. 2 shows the flowchart that depicts different steps involved
+<br/>in the measurement of height distribution of an object using the
+<br/>fringe projection technique and the role of each step. A pic-
+<br/>torial representation of the same with more details is shown in
+<br/>Fig. 3.
+<br/>During the last three decades, fringe projection techniques
+<br/>have developed tremendously due to the contribution of large
+<br/>number of researchers and the developments can be broadly
+<br/>categorized as follows: design or structure of the pattern
+<br/>used for projection [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
+<br/>method of generating and projecting the patterns [50, 51, 52,
+<br/>53, 54, 55, 56, 57, 58, 59, 60, 61, 62], study of errors
+<br/>caused by the equipment used and proposing possible correc-
+<br/>tions [63, 64, 65, 66], developing new fringe analysis meth-
+<br/>ods to extract underlying phase distribution [67, 68, 69, 70,
+<br/>71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83], improv-
+<br/>ing existing fringe analysis methods [84, 85, 86, 87, 88, 89,
+<br/>90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100], phase unwrapping
+<br/>algorithms [101, 102, 103, 104, 105, 106, 107, 108, 109], cal-
+<br/>ibration techniques [110, 111, 112, 113, 114, 115, 116, 117,
+<br/>118, 119, 120, 121, 122, 123], scale of measurement (mi-
+<br/>Preprint submitted to Optics and Lasers in Engineering
+<br/>September 1, 2009
+</td><td>('1694155', 'Sai Siva Gorthi', 'sai siva gorthi')<br/>('32741407', 'Pramod Rastogi', 'pramod rastogi')</td><td></td></tr><tr><td>a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529</td><td>Exemplar Hidden Markov Models for Classification of Facial Expressions in
+<br/>Videos
+<br/>Univ. of California San Diego
+<br/>Univ. of Canberra, Australian
+<br/>Univ. of California San Diego
+<br/>Marian Bartlett
+<br/>California, USA
+<br/><b>National University</b><br/>Australia
+<br/>California, USA
+</td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('39707211', 'Karan Sikka', 'karan sikka')</td><td>ksikka@ucsd.edu
+<br/>mbartlett@ucsd.edu
+<br/>abhinav.dhall@anu.edu
+</td></tr><tr><td>a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a</td><td>818
+<br/>Continuous Head Movement Estimator for
+<br/>Driver Assistance: Issues, Algorithms,
+<br/>and On-Road Evaluations
+<br/>Mohan Manubhai Trivedi, Fellow, IEEE
+</td><td>('1947383', 'Ashish Tawari', 'ashish tawari')<br/>('1841835', 'Sujitha Martin', 'sujitha martin')</td><td></td></tr><tr><td>a51d5c2f8db48a42446cc4f1718c75ac9303cb7a</td><td>Cross-validating Image Description Datasets and Evaluation Metrics
+<br/>Department of Computer Science
+<br/><b>University of Shef eld, UK</b></td><td>('2635321', 'Josiah Wang', 'josiah wang')</td><td>{j.k.wang, r.gaizauskas}@sheffield.ac.uk
+</td></tr><tr><td>a52d9e9daf2cb26b31bf2902f78774bd31c0dd88</td><td>Understanding and Designing Convolutional Networks
+<br/>for Local Recognition Problems
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2016-97
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-97.html
+<br/>May 13, 2016
+</td><td>('34703740', 'Jonathan Long', 'jonathan long')</td><td></td></tr><tr><td>a51882cfd0706512bf50e12c0a7dd0775285030d</td><td>Cross-Modal Face Matching: Beyond Viewed
+<br/>Sketches
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China. 2School of</b><br/><b>Electronic Engineering and Computer Science Queen Mary University of London</b><br/>London E1 4NS, United Kingdom
+</td><td>('2961830', 'Shuxin Ouyang', 'shuxin ouyang')<br/>('1705408', 'Yi-Zhe Song', 'yi-zhe song')<br/>('7823169', 'Xueming Li', 'xueming li')</td><td></td></tr><tr><td>a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be</td><td></td><td></td><td></td></tr><tr><td>a503eb91c0bce3a83bf6f524545888524b29b166</td><td></td><td></td><td></td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>Moments in Time Dataset: one million
+<br/>videos for event understanding
+</td><td>('2526653', 'Mathew Monfort', 'mathew monfort')<br/>('1804424', 'Bolei Zhou', 'bolei zhou')<br/>('3298267', 'Sarah Adel Bargal', 'sarah adel bargal')<br/>('50112310', 'Alex Andonian', 'alex andonian')<br/>('12082007', 'Tom Yan', 'tom yan')<br/>('40544169', 'Kandan Ramakrishnan', 'kandan ramakrishnan')<br/>('33421444', 'Quanfu Fan', 'quanfu fan')<br/>('1856025', 'Carl Vondrick', 'carl vondrick')<br/>('31735139', 'Aude Oliva', 'aude oliva')</td><td></td></tr><tr><td>a52581a7b48138d7124afc7ccfcf8ec3b48359d0</td><td>http://www.jos.org.cn
+<br/>Tel/Fax: +86-10-62562563
+<br/>ISSN 1000-9825, CODEN RUXUEW
+<br/>Journal of Software, Vol.17, No.3, March 2006, pp.525−534
+<br/>DOI: 10.1360/jos170525
+<br/>© 2006 by Journal of Software. All rights reserved.
+<br/>基于 3D 人脸重建的光照、姿态不变人脸识别
+<br/>柴秀娟 1+, 山世光 2, 卿来云 2, 陈熙霖 2, 高 文 1,2
+<br/>1(哈尔滨工业大学 计算机学院,黑龙江 哈尔滨 150001)
+<br/>2(中国科学院 计算技术研究所 ICT-ISVISION 面像识别联合实验室,北京 100080)
+<br/>Pose and Illumination Invariant Face Recognition Based on 3D Face Reconstruction
+<br/><b>Harbin Institute of Technology, Harbin 150001, China</b><br/><b>ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences</b><br/>Beijing 100080, China)
+<br/>Chai XJ, Shan SG, Qing LY, Chen XL, Gao W. Pose and illumination invariant face recognition based on 3D
+<br/>face reconstruction. Journal of Software, 2006,17(3):525−534. http://www.jos.org.cn/1000-9825/17/525.htm
+</td><td>('2100752', 'GAO Wen', 'gao wen')</td><td>E-mail: jos@iscas.ac.cn
+<br/>+ Corresponding author: Phn: +86-10-58858300 ext 314, Fax: +86-10-58858301, E-mail: xjchai@jdl.ac.cn, http://www.jdl.ac.cn/
+</td></tr><tr><td>bd0265ba7f391dc3df9059da3f487f7ef17144df</td><td>Data-Driven Sparse Sensor Placement
+<br/><b>University of Washington, Seattle, WA 98195, United States</b><br/><b>University of Washington, Seattle, WA 98195, United States</b><br/><b>University of Washington, Seattle, WA 98195, United States</b></td><td>('37119658', 'Krithika Manohar', 'krithika manohar')<br/>('1824880', 'Bingni W. Brunton', 'bingni w. brunton')<br/>('1937069', 'J. Nathan Kutz', 'j. nathan kutz')<br/>('3083169', 'Steven L. Brunton', 'steven l. brunton')</td><td></td></tr><tr><td>bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4</td><td>Hindawi
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2018, Article ID 7068349, 13 pages
+<br/>https://doi.org/10.1155/2018/7068349
+<br/>Review Article
+<br/>Deep Learning for Computer Vision: A Brief Review
+<br/><b>Technological Educational Institute of Athens, 12210 Athens, Greece</b><br/><b>National Technical University of Athens, 15780 Athens, Greece</b><br/>Received 17 June 2017; Accepted 27 November 2017; Published 1 February 2018
+<br/>Academic Editor: Diego Andina
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>Over the last years deep learning methods have been shown to outperform previous state-of-the-art machine learning techniques
+<br/>in several fields, with computer vision being one of the most prominent cases. This review paper provides a brief overview of some
+<br/>of the most significant deep learning schemes used in computer vision problems, that is, Convolutional Neural Networks, Deep
+<br/>Boltzmann Machines and Deep Belief Networks, and Stacked Denoising Autoencoders. A brief account of their history, structure,
+<br/>advantages, and limitations is given, followed by a description of their applications in various computer vision tasks, such as object
+<br/>detection, face recognition, action and activity recognition, and human pose estimation. Finally, a brief overview is given of future
+<br/>directions in designing deep learning schemes for computer vision problems and the challenges involved therein.
+<br/>1. Introduction
+<br/>Deep learning allows computational models of multiple
+<br/>processing layers to learn and represent data with multiple
+</td><td>('3393001', 'Nikolaos Doulamis', 'nikolaos doulamis')<br/>('2594647', 'Athanasios Voulodimos', 'athanasios voulodimos')<br/>('3393144', 'Anastasios Doulamis', 'anastasios doulamis')<br/>('1806369', 'Eftychios Protopapadakis', 'eftychios protopapadakis')<br/>('2594647', 'Athanasios Voulodimos', 'athanasios voulodimos')</td><td>Correspondence should be addressed to Athanasios Voulodimos; thanosv@mail.ntua.gr
+</td></tr><tr><td>bd6099429bb7bf248b1fd6a1739e744512660d55</td><td>Submitted 11/09; Revised 5/10; Published 8/10
+<br/>Regularized Discriminant Analysis, Ridge Regression and Beyond
+<br/><b>College of Computer Science and Technology</b><br/><b>Zhejiang University</b><br/>Hangzhou, Zhejiang 310027, China
+<br/>Computer Science Division and Department of Statistics
+<br/><b>University of California</b><br/>Berkeley, CA 94720-1776, USA
+<br/>Editor: Inderjit Dhillon
+</td><td>('1739312', 'Zhihua Zhang', 'zhihua zhang')<br/>('1779165', 'Guang Dai', 'guang dai')<br/>('1682914', 'Congfu Xu', 'congfu xu')<br/>('1694621', 'Michael I. Jordan', 'michael i. jordan')</td><td>ZHZHANG@ZJU.EDU.CN
+<br/>GUANG.GDAI@GMAIL.COM
+<br/>XUCONGFU@ZJU.EDU.CN
+<br/>JORDAN@CS.BERKELEY.EDU
+</td></tr><tr><td>bd0e100a91ff179ee5c1d3383c75c85eddc81723</td><td>Okutama-Action: An Aerial View Video Dataset for Concurrent Human Action
+<br/>Detection∗
+<br/><b>Technical University of Munich, Munich, 2KTH Royal Institute of Technology, Stockholm</b><br/><b>Polytechnic University of Catalonia, Barcelona, 4National Taiwan University, Taipei, 5University of</b><br/><b>Tokyo, Tokyo, 6National Institute of Informatics, Tokyo</b></td><td>('39393520', 'Mohammadamin Barekatain', 'mohammadamin barekatain')<br/>('19185012', 'Hsueh-Fu Shih', 'hsueh-fu shih')<br/>('47427148', 'Samuel Murray', 'samuel murray')<br/>('1943224', 'Kotaro Nakayama', 'kotaro nakayama')<br/>('47972365', 'Yutaka Matsuo', 'yutaka matsuo')<br/>('2356111', 'Helmut Prendinger', 'helmut prendinger')</td><td>m.barekatain@tum.de, miquelmr@kth.se, r03945026@ntu.edu.tw, samuelmu@kth.se,
+<br/>nakayama@weblab.t.u-tokyo.ac.jp, matsuo@weblab.t.u-tokyo.ac.jp, helmut@nii.ac.jp
+</td></tr><tr><td>bd8f3fef958ebed5576792078f84c43999b1b207</td><td>BUAA-iCC at ImageCLEF 2015 Scalable
+<br/>Concept Image Annotation Challenge
+<br/><b>Intelligent Recognition and Image Processing Lab, Beihang University, Beijing</b><br/>100191, P.R.China
+<br/>http://irip.buaa.edu.cn/
+<br/><b>School of Information Technology and Management, University of International</b><br/>Business and Economics, Beijing 100029, P.R.China
+</td><td>('40013375', 'Yunhong Wang', 'yunhong wang')<br/>('2097309', 'Jiaxin Chen', 'jiaxin chen')<br/>('34288046', 'Ningning Liu', 'ningning liu')<br/>('1712838', 'Li Zhang', 'li zhang')</td><td>yhwang@buaa.edu.cn; chenjiaxinX@gmail.com.
+<br/>ningning.liu@uibe.edu.cn
+</td></tr><tr><td>bd9eb65d9f0df3379ef96e5491533326e9dde315</td><td></td><td></td><td></td></tr><tr><td>bd07d1f68486052b7e4429dccecdb8deab1924db</td><td></td><td></td><td></td></tr><tr><td>bd0201b32e7eca7818468f2b5cb1fb4374de75b9</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+<br/> Volume: 02 Issue: 02 | May-2015 www.irjet.net p-ISSN: 2395-0072
+<br/>FACIAL EMOTION EXPRESSIONS RECOGNITION WITH BRAIN ACTIVITES
+<br/>USING KINECT SENSOR V2
+<br/>Ph.D student Hesham A. ALABBASI, Doctoral School of Automatic Control and Computers,
+<br/><b>University POLITEHNICA of Bucharest, Bucharest, Romania</b><br/>Bucharest, Bucharest, Romania.
+<br/><b>Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest</b><br/>Bucharest, Romania.
+<br/><b>Ph.D student Zaid Shhedi, Doctoral School of Automatic Control and Computers, University</b><br/>POLITEHNICA of Bucharest, Bucharest, Romania.
+<br/>is emotional
+<br/>sensor, Face tracking SDK, Neural network, Brain
+<br/>activities.
+<br/>Key Words: Facial expressions, Facial features, Kinect
+<br/>visual Studio 2013 (C++) and Matlab 2015 to recognize
+<br/>eight expressions.
+<br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+</td><td>('3124644', 'Florica Moldoveanu', 'florica moldoveanu')</td><td></td></tr><tr><td>bd8e2d27987be9e13af2aef378754f89ab20ce10</td><td></td><td></td><td></td></tr><tr><td>bd236913cfe07896e171ece9bda62c18b8c8197e</td><td>Deep Learning with Energy-efficient Binary Gradient Cameras
+<br/>∗NVIDIA,
+<br/><b>Carnegie Mellon University</b></td><td>('39131476', 'Suren Jayasuriya', 'suren jayasuriya')<br/>('39775678', 'Orazio Gallo', 'orazio gallo')<br/>('2931118', 'Jinwei Gu', 'jinwei gu')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td></td></tr><tr><td>bd379f8e08f88729a9214260e05967f4ca66cd65</td><td>Learning Compositional Visual Concepts with Mutual Consistency
+<br/><b>School of Electrical and Computer Engineering, Cornell University, Ithaca NY</b><br/><b>Nancy E. and Peter C. Meinig School of Biomedical Engineering, Cornell University, Ithaca NY</b><br/>3Siemens Corporate Technology, Princeton NJ
+<br/>Figure 1: We propose ConceptGAN, a framework that can jointly learn, transfer and compose concepts to generate semantically meaningful
+<br/>images, even in subdomains with no training data (highlighted) while the state-of-the-art methods such as CycleGAN [49] fail to do so.
+</td><td>('3303727', 'Yunye Gong', 'yunye gong')<br/>('1976152', 'Srikrishna Karanam', 'srikrishna karanam')<br/>('3311781', 'Ziyan Wu', 'ziyan wu')<br/>('2692770', 'Kuan-Chuan Peng', 'kuan-chuan peng')<br/>('39497207', 'Jan Ernst', 'jan ernst')<br/>('1767099', 'Peter C. Doerschuk', 'peter c. doerschuk')</td><td>{yg326,pd83}@cornell.edu,{first.last}@siemens.com
+</td></tr><tr><td>bd13f50b8997d0733169ceba39b6eb1bda3eb1aa</td><td>Occlusion Coherence: Detecting and Localizing Occluded Faces
+<br/><b>University of California at Irvine, Irvine, CA</b></td><td>('1898210', 'Golnaz Ghiasi', 'golnaz ghiasi')<br/>('3157443', 'Charless C. Fowlkes', 'charless c. fowlkes')</td><td></td></tr><tr><td>bd21109e40c26af83c353a3271d0cd0b5c4b4ade</td><td>Attentive Sequence to Sequence Translation for Localizing Clips of Interest
+<br/>by Natural Language Descriptions
+<br/><b>Zhejiang University</b><br/><b>University of Technology Sydney</b><br/><b>Zhejiang University</b><br/><b>University of Technology Sydney</b><br/><b>Hikvision Research Institute</b></td><td>('1819984', 'Ke Ning', 'ke ning')<br/>('2948393', 'Linchao Zhu', 'linchao zhu')<br/>('50140409', 'Ming Cai', 'ming cai')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('2603725', 'Di Xie', 'di xie')</td><td>ningke@zju.edu.cn
+<br/>zhulinchao7@gmail.com
+<br/>Yi.Yang@uts.edu.au
+<br/>xiedi@hikvision.com
+</td></tr><tr><td>bd8b7599acf53e3053aa27cfd522764e28474e57</td><td>Learning Long Term Face Aging Patterns
+<br/>from Partially Dense Aging Databases
+<br/>Jinli Suo1,2,3
+<br/><b>Graduate University of Chinese Academy of Sciences(CAS), 100190, China</b><br/>2Key Lab of Intelligent Information Processing of CAS,
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>Lotus Hill Institute for Computer Vision and Information Science, 436000, China</b><br/><b>School of Electronic Engineering and Computer Science, Peking University, 100871, China</b></td><td>('1698902', 'Wen Gao', 'wen gao')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')</td><td>wgao@pku.edu.cn
+<br/>jlsuo@jdl.ac.cn
+<br/>{xlchen,sgshan}@ict.ac.cn
+</td></tr><tr><td>bd8f77b7d3b9d272f7a68defc1412f73e5ac3135</td><td>SphereFace: Deep Hypersphere Embedding for Face Recognition
+<br/><b>Georgia Institute of Technology</b><br/><b>Carnegie Mellon University</b><br/><b>Sun Yat-Sen University</b></td><td>('36326884', 'Weiyang Liu', 'weiyang liu')<br/>('1751019', 'Zhiding Yu', 'zhiding yu')<br/>('1779453', 'Le Song', 'le song')</td><td>wyliu@gatech.edu, {yandongw,yzhiding}@andrew.cmu.edu, lsong@cc.gatech.edu
+</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>SCUT-FBP: A Benchmark Dataset for
+<br/>Facial Beauty Perception
+<br/>School of Electronic and Information Engineering
+<br/><b>South China University of Technology, Guangzhou 510640, China</b></td><td>('2361818', 'Duorui Xie', 'duorui xie')<br/>('2521432', 'Lingyu Liang', 'lingyu liang')<br/>('1703322', 'Lianwen Jin', 'lianwen jin')<br/>('1720015', 'Jie Xu', 'jie xu')<br/>('4997446', 'Mengru Li', 'mengru li')</td><td>*Email: lianwen.jin@gmail.com
+</td></tr><tr><td>bd78a853df61d03b7133aea58e45cd27d464c3cf</td><td>A Sparse Representation Approach to Facial
+<br/>Expression Recognition Based on LBP plus LFDA
+<br/>Computer science and Engineering Department,
+<br/><b>Government College of Engineering, Aurangabad [Autonomous</b><br/>Station Road, Aurangabad, Maharashtra, India.
+</td><td></td><td></td></tr><tr><td>bd9c9729475ba7e3b255e24e7478a5acb393c8e9</td><td>Interpretable Partitioned Embedding for Customized Fashion Outfit
+<br/>Composition
+<br/><b>Zhejiang University, Hangzhou, China</b><br/><b>Arizona State University, Phoenix, Arizona</b><br/>♭Alibaba Group, Hangzhou, China
+</td><td>('7357719', 'Zunlei Feng', 'zunlei feng')<br/>('46218293', 'Zhenyun Yu', 'zhenyun yu')<br/>('7607499', 'Yezhou Yang', 'yezhou yang')<br/>('9633703', 'Yongcheng Jing', 'yongcheng jing')<br/>('46179768', 'Junxiao Jiang', 'junxiao jiang')<br/>('1727111', 'Mingli Song', 'mingli song')</td><td></td></tr><tr><td>bd2d7c7f0145028e85c102fe52655c2b6c26aeb5</td><td>Attribute-based People Search: Lessons Learnt from a
+<br/>Practical Surveillance System
+<br/>Rogerio Feris
+<br/>IBM Watson
+<br/>http://rogerioferis.com
+<br/>Russel Bobbitt
+<br/>IBM Watson
+<br/>Lisa Brown
+<br/>IBM Watson
+<br/>IBM Watson
+</td><td>('1767897', 'Sharath Pankanti', 'sharath pankanti')</td><td>bobbitt@us.ibm.com
+<br/>lisabr@us.ibm.com
+<br/>sharat@us.ibm.com
+</td></tr><tr><td>bd9157331104a0708aa4f8ae79b7651a5be797c6</td><td>SLAC: A Sparsely Labeled Dataset for Action Classification and Localization
+<br/><b>Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College</b></td><td>('1683002', 'Hang Zhao', 'hang zhao')<br/>('3305169', 'Zhicheng Yan', 'zhicheng yan')<br/>('1804138', 'Heng Wang', 'heng wang')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')</td><td>{hangzhao, torralba}@mit.edu, {zyan3, hengwang, torresani}@fb.com
+</td></tr><tr><td>bdbba95e5abc543981fb557f21e3e6551a563b45</td><td>International Journal of Computational Intelligence and Applications
+<br/>Vol. 17, No. 2 (2018) 1850008 (15 pages)
+<br/>#.c The Author(s)
+<br/>DOI: 10.1142/S1469026818500086
+<br/>Speeding up the Hyperparameter Optimization of Deep
+<br/>Convolutional Neural Networks
+<br/>Knowledge Technology, Department of Informatics
+<br/>Universit€at Hamburg
+<br/>Vogt-K€olln-Str. 30, Hamburg 22527, Germany
+<br/>Received 15 August 2017
+<br/>Accepted 23 March 2018
+<br/>Published 18 June 2018
+<br/>Most learning algorithms require the practitioner to manually set the values of many hyper-
+<br/>parameters before the learning process can begin. However, with modern algorithms, the
+<br/>evaluation of a given hyperparameter setting can take a considerable amount of time and the
+<br/>search space is often very high-dimensional. We suggest using a lower-dimensional represen-
+<br/>tation of the original data to quickly identify promising areas in the hyperparameter space. This
+<br/>information can then be used to initialize the optimization algorithm for the original, higher-
+<br/>dimensional data. We compare this approach with the standard procedure of optimizing the
+<br/>hyperparameters only on the original input.
+<br/>We perform experiments with various state-of-the-art hyperparameter optimization algo-
+<br/>rithms such as random search, the tree of parzen estimators (TPEs), sequential model-based
+<br/>algorithm con¯guration (SMAC), and a genetic algorithm (GA). Our experiments indicate that
+<br/>it is possible to speed up the optimization process by using lower-dimensional data repre-
+<br/>sentations at the beginning, while increasing the dimensionality of the input later in the opti-
+<br/>mization process. This is independent of the underlying optimization procedure, making the
+<br/>approach promising for many existing hyperparameter optimization algorithms.
+<br/>Keywords: Hyperparameter optimization; hyperparameter importance; convolutional neural
+<br/>networks; genetic algorithm; Bayesian optimization.
+<br/>1. Introduction
+<br/>The performance of many contemporary machine learning algorithms depends cru-
+<br/>cially on the speci¯c initialization of hyperparameters such as the general architec-
+<br/>ture, the learning rate, regularization parameters, and many others.1,2 Indeed,
+<br/>This is an Open Access article published by World Scienti¯c Publishing Company. It is distributed under
+<br/>the terms of the Creative Commons Attribution 4.0 (CC-BY) License. Further distribution of this work is
+<br/>permitted, provided the original work is properly cited.
+<br/>1850008-1
+<br/>Int. J. Comp. Intel. Appl. 2018.17. Downloaded from www.worldscientific.comby WSPC on 07/18/18. Re-use and distribution is strictly not permitted, except for Open Access articles. </td><td>('11634287', 'Tobias Hinz', 'tobias hinz')<br/>('2632932', 'Sven Magg', 'sven magg')<br/>('1736513', 'Stefan Wermter', 'stefan wermter')</td><td>*hinz@informatik.uni-hamburg.de
+<br/>†navarro@informatik.uni-hamburg.de
+<br/>‡magg@informatik.uni-hamburg.de
+<br/>wermter@informatik.uni-hamburg.de
+</td></tr><tr><td>bd70f832e133fb87bae82dfaa0ae9d1599e52e4b</td><td>Combining Classifier for Face Identification
+<br/><b>HCI Lab., Samsung Advanced Institute of Technology, Yongin, Korea</b><br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, UK</b></td><td>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>taekyun@sait.samsung.co.kr
+<br/>J.Kittler@surrey.ac.uk
+</td></tr><tr><td>d1dfdc107fa5f2c4820570e369cda10ab1661b87</td><td>Super SloMo: High Quality Estimation of Multiple Intermediate Frames
+<br/>for Video Interpolation
+<br/>Erik Learned-Miller1
+<br/>1UMass Amherst
+<br/>2NVIDIA 3UC Merced
+</td><td>('40175280', 'Huaizu Jiang', 'huaizu jiang')<br/>('3232265', 'Deqing Sun', 'deqing sun')<br/>('2745026', 'Varun Jampani', 'varun jampani')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td>{hzjiang,elm}@cs.umass.edu,{deqings,vjampani,jkautz}@nvidia.com, mhyang@ucmerced.edu
+</td></tr><tr><td>d185f4f05c587e23c0119f2cdfac8ea335197ac0</td><td> 33
+<br/>Chapter III
+<br/>Facial Expression Analysis,
+<br/>Modeling and Synthesis:
+<br/>Overcoming the Limitations of
+<br/>Artificial Intelligence with the Art
+<br/>of the Soluble
+<br/><b>Eindhoven University of Technology, The Netherlands</b><br/><b>Ritsumeikan University, Japan</b></td><td>('1728894', 'Christoph Bartneck', 'christoph bartneck')<br/>('1709339', 'Michael J. Lyons', 'michael j. lyons')</td><td></td></tr><tr><td>d140c5add2cddd4a572f07358d666fe00e8f4fe1</td><td>Statistically Learned Deformable Eye Models
+<br/><b>Imperial College London</b></td><td>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('37539937', 'Bingqing Qu', 'bingqing qu')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>d1dae2993bdbb2667d1439ff538ac928c0a593dc</td><td>International Journal of Computational Intelligence and Informatics, Vol. 3: No. 1, April - June 2013
+<br/>Gamma Correction Technique Based Feature Extraction
+<br/>for Face Recognition System
+<br/>P Kumar
+<br/>Electronics and Communication Engineering
+<br/><b>K S Rangasamy College of Technology</b><br/>Electronics and Communication Engineering
+<br/><b>K S Rangasamy College of Technology</b><br/>Tamilnadu, India
+<br/>Tamilnadu, India
+</td><td>('9316812', 'B Vinothkumar', 'b vinothkumar')</td><td>Vinoeee58@gmail.com
+<br/>kumar@ksrct.ac.in
+</td></tr><tr><td>d1f58798db460996501f224fff6cceada08f59f9</td><td>Transferrable Representations for Visual Recognition
+<br/>Jeffrey Donahue
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2017-106
+<br/>http://www2.eecs.berkeley.edu/Pubs/TechRpts/2017/EECS-2017-106.html
+<br/>May 14, 2017
+</td><td></td><td></td></tr><tr><td>d115c4a66d765fef596b0b171febca334cea15b5</td><td>Combining Stacked Denoising Autoencoders and
+<br/>Random Forests for Face Detection
+<br/><b>Swansea University</b><br/>Singleton Park, Swansea SA2 8PP, United Kingdom
+<br/>http://csvision.swan.ac.uk
+</td><td>('6248353', 'Jingjing Deng', 'jingjing deng')<br/>('2168049', 'Xianghua Xie', 'xianghua xie')<br/>('13154093', 'Michael Edwards', 'michael edwards')</td><td>*x.xie@swansea.ac.uk
+</td></tr><tr><td>d1a43737ca8be02d65684cf64ab2331f66947207</td><td>IJB–S: IARPA Janus Surveillance Video Benchmark (cid:3)
+<br/>Kevin O’Connor z
+</td><td>('1718102', 'Nathan D. Kalka', 'nathan d. kalka')<br/>('48889427', 'Stephen Elliott', 'stephen elliott')<br/>('8033275', 'Brianna Maze', 'brianna maze')<br/>('40205896', 'James A. Duncan', 'james a. duncan')<br/>('40577714', 'Julia Bryan', 'julia bryan')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>d122d66c51606a8157a461b9d7eb8b6af3d819b0</td><td>Vol-3 Issue-4 2017
+<br/>IJARIIE-ISSN(O)-2395-4396
+<br/>AUTOMATED RECOGNITION OF FACIAL
+<br/>EXPRESSIONS
+<br/><b>METs Institute of Engineering</b><br/>Adgoan,Nashik,Maharashtra.
+<br/>Adgoan, Nashik, Maharashtra.
+<br/>
+</td><td></td><td></td></tr><tr><td>d142e74c6a7457e77237cf2a3ded4e20f8894e1a</td><td>HUMAN EMOTION ESTIMATION FROM
+<br/>EEG AND FACE USING STATISTICAL
+<br/>FEATURES AND SVM
+<br/>1,3Department of Information Technologies,
+<br/><b>University of telecommunications and post, Sofia, Bulgaria</b><br/> 2,4Department of Telecommunications,
+<br/><b>University of telecommunications and post, Sofia, Bulgaria</b></td><td>('40110188', 'Strahil Sokolov', 'strahil sokolov')<br/>('3050423', 'Yuliyan Velchev', 'yuliyan velchev')<br/>('2283935', 'Svetla Radeva', 'svetla radeva')<br/>('2512835', 'Dimitar Radev', 'dimitar radev')</td><td></td></tr><tr><td>d1082eff91e8009bf2ce933ac87649c686205195</td><td>(will be inserted by the editor)
+<br/>Pruning of Error Correcting Output Codes by
+<br/>Optimization of Accuracy-Diversity Trade off
+<br/>S¨ureyya ¨Oz¨o˘g¨ur Aky¨uz · Terry
+<br/>Windeatt · Raymond Smith
+<br/>Received: date / Accepted: date
+</td><td></td><td></td></tr><tr><td>d1959ba4637739dcc6cc6995e10fd41fd6604713</td><td><b>Rochester Institute of Technology</b><br/>RIT Scholar Works
+<br/>Theses
+<br/>5-2017
+<br/>Thesis/Dissertation Collections
+<br/>Deep Learning for Semantic Video Understanding
+<br/>Follow this and additional works at: http://scholarworks.rit.edu/theses
+<br/>Recommended Citation
+<br/><b>Kulhare, Sourabh, "Deep Learning for Semantic Video Understanding" (2017). Thesis. Rochester Institute of Technology. Accessed</b><br/>from
+<br/>This Thesis is brought to you for free and open access by the Thesis/Dissertation Collections at RIT Scholar Works. It has been accepted for inclusion
+</td><td>('10376365', 'Sourabh Kulhare', 'sourabh kulhare')</td><td>sk1846@rit.edu
+<br/>in Theses by an authorized administrator of RIT Scholar Works. For more information, please contact ritscholarworks@rit.edu.
+</td></tr><tr><td>d1881993c446ea693bbf7f7d6e750798bf958900</td><td>Large-Scale YouTube-8M Video Understanding with Deep Neural Networks
+<br/><b>Institute for System Programming</b><br/><b>Institute for System Programming</b><br/>ispras.ru
+</td><td>('34125461', 'Manuk Akopyan', 'manuk akopyan')<br/>('19228325', 'Eshsou Khashba', 'eshsou khashba')</td><td>manuk@ispras.ru
+</td></tr><tr><td>d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576</td><td>Neural Face Editing with Intrinsic Image Disentangling
+<br/><b>Stony Brook University 2Adobe Research 3 CentraleSup elec, Universit e Paris-Saclay</b></td><td>('2496409', 'Zhixin Shu', 'zhixin shu')</td><td>1{zhshu,samaras}@cs.stonybrook.edu
+<br/>2{yumer,hadap,sunkaval,elishe}@adobe.com
+</td></tr><tr><td>d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0</td><td></td><td></td><td></td></tr><tr><td>d61578468d267c2d50672077918c1cda9b91429b</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/> A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/>IJCSMC, Vol. 3, Issue. 9, September 2014, pg.314 – 323
+<br/> RESEARCH ARTICLE
+<br/>Face Image Retrieval Using Pose Specific
+<br/>Set Sparse Feature Representation
+<br/><b>Viswajyothi College of Engineering and Technology Kerala, India</b><br/><b>Viswajyothi College of Engineering and Technology Kerala, India</b></td><td>('3163376', 'Sebastian George', 'sebastian george')</td><td>afeefengg@gmail.com
+</td></tr><tr><td>d687fa99586a9ad229284229f20a157ba2d41aea</td><td>Journal of Intelligent Learning Systems and Applications, 2013, 5, 115-122
+<br/>http://dx.doi.org/10.4236/jilsa.2013.52013 Published Online May 2013 (http://www.scirp.org/journal/jilsa)
+<br/>115
+<br/>Face Recognition Based on Wavelet Packet Coefficients
+<br/>and Radial Basis Function Neural Networks
+<br/><b>Virudhunagar Hindu Nadars Senthikumara Nadar College, Virudhunagar</b><br/><b>Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India</b><br/>Received December 12th, 2012; revised April 19th, 2013; accepted April 26th, 2013
+<br/>tributed under the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any me-
+<br/>dium, provided the original work is properly cited.
+</td><td></td><td>Email: *kathirvalavakumar@yahoo.com, jebaarul07@yahoo.com
+</td></tr><tr><td>d69719b42ee53b666e56ed476629a883c59ddf66</td><td>Learning Facial Action Units from Web Images with
+<br/>Scalable Weakly Supervised Clustering
+<br/>Aleix M. Martinez3
+<br/><b>School of Comm. and Info. Engineering, Beijing University of Posts and Telecom</b><br/><b>Robotics Institute, Carnegie Mellon University</b><br/><b>The Ohio State University</b></td><td>('2393320', 'Kaili Zhao', 'kaili zhao')</td><td></td></tr><tr><td>d647099e571f9af3a1762f895fd8c99760a3916e</td><td>Exploring Facial Expressions with Compositional Features
+<br/><b>Rutgers University</b><br/>110 Frelinghuysen Road, Piscataway, NJ 08854, USA
+</td><td>('39606160', 'Peng Yang', 'peng yang')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>peyang@cs.rutgers.edu, qsliu@cs.rutgers.edu, dnm@cs.rutgers.edu
+</td></tr><tr><td>d69271c7b77bc3a06882884c21aa1b609b3f76cc</td><td>FaceBoxes: A CPU Real-time Face Detector with High Accuracy
+<br/><b>CBSR and NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('3220556', 'Shifeng Zhang', 'shifeng zhang')</td><td>{shifeng.zhang,xiangyu.zhu,zlei,hailin.shi,xiaobo.wang,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>d6a9ea9b40a7377c91c705f4c7f206a669a9eea2</td><td>Visual Representations for Fine-grained
+<br/>Categorization
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2015-244
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2015/EECS-2015-244.html
+<br/>December 17, 2015
+</td><td>('40565777', 'Ning Zhang', 'ning zhang')</td><td></td></tr><tr><td>d6ca3dc01de060871839d5536e8112b551a7f9ff</td><td>Sleep-deprived Fatigue Pattern Analysis using Large-Scale Selfies from Social Media
+<br/>Computer Science Department
+<br/>Computer Science Department
+<br/><b>University of Rochester</b><br/><b>University of Rochester</b><br/>Rochester, USA
+<br/>Rochester, USA
+<br/>Department of Psychiatry
+<br/><b>University of Rochester</b><br/>Rochester, USA
+<br/>Computer Science Department
+<br/><b>University of Rochester</b><br/>Rochester, USA
+</td><td>('1901094', 'Xuefeng Peng', 'xuefeng peng')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('39226140', 'Catherine Glenn', 'catherine glenn')<br/>('35678395', 'Li-Kai Chi', 'li-kai chi')<br/>('13171221', 'Jingyao Zhan', 'jingyao zhan')</td><td>xpeng4@u.rochester.edu
+<br/>jiebo.luo@rochester.edu
+<br/>catherine.glenn@rochester.edu
+<br/>{lchi3, jzhan}@u.rochester.edu
+</td></tr><tr><td>d671a210990f67eba9b2d3dda8c2cb91575b4a7a</td><td>Journal of Machine Learning Research ()
+<br/>Submitted ; Published
+<br/>Social Environment Description from Data Collected with a
+<br/>Wearable Device
+<br/>Computer Vision Center
+<br/><b>Autonomous University of Barcelona</b><br/>Barcelona, Spain
+<br/>Editor: Radeva Petia, Pujol Oriol
+</td><td>('7629833', 'Pierluigi Casale', 'pierluigi casale')</td><td>pierluigi@cvc.uab.cat
+</td></tr><tr><td>d61e794ec22a4d4882181da17316438b5b24890f</td><td>Detecting Sensor Level Spoof Attacks Using Joint
+<br/>Encoding of Temporal and Spatial Features
+<br/><b>The Hong Kong Polytechnic University, Hong Kong</b></td><td>('1690410', 'Jun Liu', 'jun liu')<br/>('1684016', 'Ajay Kumar', 'ajay kumar')</td><td></td></tr><tr><td>d65b82b862cf1dbba3dee6541358f69849004f30</td><td>Contents lists available at ScienceDirect
+<br/>j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / c v i u
+<br/>2.5D Elastic graph matching
+<br/><b>Imperial College, London, UK</b><br/>a r t i c l e
+<br/>i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 29 November 2009
+<br/>Accepted 1 December 2010
+<br/>Available online 17 March 2011
+<br/>Keywords:
+<br/>Elastic graph matching
+<br/>3D face recognition
+<br/>Multiscale mathematical morphology
+<br/>Geodesic distances
+<br/>In this paper, we propose novel elastic graph matching (EGM) algorithms for face recognition assisted by
+<br/>the availability of 3D facial geometry. More specifically, we conceptually extend the EGM algorithm in
+<br/>order to exploit the 3D nature of human facial geometry for face recognition/verification. In order to
+<br/>achieve that, first we extend the matching module of the EGM algorithm in order to capitalize on the
+<br/>2.5D facial data. Furthermore, we incorporate the 3D geometry into the multiscale analysis used and
+<br/>build a novel geodesic multiscale morphological pyramid of dilations/erosions in order to fill the graph
+<br/>jets. We show that the proposed advances significantly enhance the performance of EGM algorithms.
+<br/>We demonstrate the efficiency of the proposed advances in the face recognition/verification problem
+<br/>using photometric stereo.
+<br/>Ó 2011 Elsevier Inc. All rights reserved.
+<br/>1. Introduction
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('2871609', 'Maria Petrou', 'maria petrou')</td><td></td></tr><tr><td>d6102a7ddb19a185019fd2112d2f29d9258f6dec</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3721
+</td><td></td><td></td></tr><tr><td>d6bfa9026a563ca109d088bdb0252ccf33b76bc6</td><td>Unsupervised Temporal Segmentation of Facial Behaviour
+<br/>Department of Computer Science and Engineering, IIT Kanpur
+</td><td>('2094658', 'Abhishek Kar', 'abhishek kar')<br/>('2676758', 'Prithwijit Guha', 'prithwijit guha')</td><td>{akar,amit}@iitk.ac.in, prithwijit.guha@tcs.com
+</td></tr><tr><td>d67dcaf6e44afd30c5602172c4eec1e484fc7fb7</td><td>Illumination Normalization for Robust Face Recognition
+<br/>Using Discrete Wavelet Transform
+<br/><b>Mahanakorn University of Technology</b><br/>51 Cheum-Sampan Rd., Nong Chok, Bangkok, THAILAND 10530
+</td><td>('2337544', 'Amnart Petpon', 'amnart petpon')<br/>('1805935', 'Sanun Srisuk', 'sanun srisuk')</td><td>ta tee473@hotmail.com, sanun@mut.ac.th
+</td></tr><tr><td>d6c7092111a8619ed7a6b01b00c5f75949f137bf</td><td>A Novel Feature Extraction Technique for Facial Expression
+<br/>Recognition
+<br/> 1 Department of Computer Science, School of Applied Statistics,
+<br/><b>National Institute of Development Administration</b><br/>Bangkok, 10240, Thailand
+<br/>
+<br/>2 Department of Computer Science, School of Applied Statistics,
+<br/><b>National Institute of Development Administration</b><br/>Bangkok, 10240, Thailand
+<br/>
+</td><td>('7484236', 'Mohammad Shahidul Islam', 'mohammad shahidul islam')<br/>('2291161', 'Surapong Auwatanamongkol', 'surapong auwatanamongkol')</td><td></td></tr><tr><td>d68dbb71b34dfe98dee0680198a23d3b53056394</td><td>VIVA Face-off Challenge: Dataset Creation and Balancing Privacy
+<br/><b>University of California, San Diego</b><br/>9500 Gilman Drive, La Jolla, CA 92093
+<br/>1. Introduction
+<br/>Vision for intelligent vehicles is a growing area of re-
+<br/>search [5] for many practical reasons including the rela-
+<br/>tively inexpensive nature of camera sensing units and even
+<br/>more the non-contact and non-intrusive manner of obser-
+<br/>vation. The latter is of critical importance when observing
+<br/>the driver inside the vehicle cockpit because no sensing unit
+<br/>should impede the driver’s primary task of driving. One
+<br/>of the key tasks in observing the driver is to estimate the
+<br/>driver’s gaze direction. From a vision sensing perspective,
+<br/>for driver gaze estimation, two of the fundamental building
+<br/>blocks are face detection and head pose estimation.
+<br/>Figure 1. A sample of challenging instances due to varying illumi-
+<br/>nation, occlusions and camera perspectives.
+<br/>In literature, vision based systems for face detection and
+<br/>head pose estimation have progressed significantly in the
+<br/>last decade. However, the limits of the state-of-the-art sys-
+<br/>tems have not been tested thoroughly on a common pool
+<br/>of challenging dataset as the one we propose in this work.
+<br/>Using our database, we want to benchmark existing algo-
+<br/>rithms to highlight problems and deficiencies in current
+<br/>approaches and, simultaneously, progress the development
+<br/>of future algorithms to tackle this problem. Furthermore,
+<br/>while introducing a new benchmarking database, we also
+<br/>raise awareness of privacy protection systems [4] necessary
+<br/>to protect the identity of driver’s in such databases.
+<br/>2. In-the Wild Dataset
+<br/>In recent years, literature has introduced a few in-the-
+<br/>wild datasets (e.g. Helen [2] and COFW [1]) but nothing
+<br/>like the challenges from real-world driving scenario are pre-
+<br/>sented in such databases. Therefore, we introduce a never
+<br/>before seen challenging database of driver’s faces under
+<br/>varying illumination (e.g. sunny and cloudy), in the pres-
+<br/>ence of typical partially occluding objects (e.g. eyewear and
+<br/>hats) or actions (e.g. hand movements),in blur from head
+<br/>motions, under different camera configurations and from
+<br/>different drivers. A small sample of these challenging in-
+<br/>stances are depicted in Figure 1.
+<br/>Three major efforts have been put forth in creating this
+<br/>challenging database. One is in the data collection itself
+<br/>which was done by instrumenting vehicles at UCSD-LISA
+<br/>and having multiple drivers drive the instrumented vehicle
+<br/>year around. Second is in extracting challenging instances
+<br/>from more than a hundred hours of video data. The final
+<br/>effort has been in ground truth annotations (e.g. face posi-
+<br/>tion and head pose). Preliminary evaluation of the state-of-
+<br/>the art head pose algorithms on a small validation part of
+<br/>this dataset is shown in Table 1. Here detection rate is the
+<br/>number of sample images where an algorithm produced an
+<br/>output over the total number of sample images. It is evident
+<br/>that no one algorithm is yet to reach high detection rate and
+<br/>low error values in head pose.
+<br/>3. Balancing Privacy
+<br/>In current literature, there is a lack of publicly available
+<br/>naturalistic driving data largely due to concerns over indi-
+<br/>vidual privacy. Camera sensors looking at a driver, which
+</td><td>('1841835', 'Sujitha Martin', 'sujitha martin')<br/>('1713989', 'Mohan M. Trivedi', 'mohan m. trivedi')</td><td>scmartin@ucsd.edu, mtrivedi@ucsd.edu
+</td></tr><tr><td>d666ce9d783a2d31550a8aa47da45128a67304a7</td><td>On Relating Visual Elements to City Statistics
+<br/><b>University of California, Berkeley</b><br/>Maneesh Agrawala†
+<br/><b>University of California, Berkeley</b><br/><b>University of California, Berkeley</b><br/>(c) Visual Elements for Thefts in San Francisco
+<br/>(a) Predicted High Theft Location in Oakland
+<br/>(b) Predicted Low Theft Location in Oakland
+<br/>(d) Predicted Theft Rate in Oakland
+<br/>Figure 1: Our system automatically computes a predictor from a set of Google StreetView images of areas where a statistic was observed. In this example
+<br/>we use a predictor generated from reports of theft in San Francisco to predict the probability of thefts occurring in Oakland. Our system can predict high
+<br/>theft rate areas (a) and low theft rates area (b) based solely on street-level images from the areas. Visually, the high theft area exhibits a marked quality of
+<br/>disrepair (bars on the windows, unkempt facades, etc), a visual cue that the probability of theft is likely higher. Our method automatically computes machine
+<br/>learning models that detect visual elements similar to these cues (c) from San Francisco. To compute predictions, we use the models to detect the presence of
+<br/>these visual elements in an image and combine all of the detections according to an automatically learned set of weights. Our resulting predictions are 63%
+<br/>accurate in this case and can be computed everywhere in Oakland (d) as they only rely on images as input.
+</td><td>('2288243', 'Sean M. Arietta', 'sean m. arietta')<br/>('1752236', 'Ravi Ramamoorthi', 'ravi ramamoorthi')</td><td></td></tr><tr><td>d6fb606e538763282e3942a5fb45c696ba38aee6</td><td></td><td></td><td></td></tr><tr><td>bcee40c25e8819955263b89a433c735f82755a03</td><td>Biologically inspired vision for human-robot
+<br/>interaction
+<br/>M. Saleiro, M. Farrajota, K. Terzi´c, S. Krishna, J.M.F. Rodrigues, and J.M.H.
+<br/>du Buf
+<br/><b>Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal</b></td><td></td><td>{masaleiro, mafarrajota, kterzic, jrodrig, dubuf}@ualg.pt,
+<br/>saikrishnap2003@gmail.com,
+</td></tr><tr><td>bc6de183cd8b2baeebafeefcf40be88468b04b74</td><td>Age Group Recognition using Human Facial Images
+<br/>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 126 – No.13, September 2015
+<br/>Dept. of Electronics and Telecommunication
+<br/><b>Government College of Engineering</b><br/>Aurangabad, Maharashtra, India
+</td><td>('31765215', 'Shailesh S. Kulkarni', 'shailesh s. kulkarni')</td><td></td></tr><tr><td>bcf19b964e7d1134d00332cf1acf1ee6184aff00</td><td>1922
+<br/>IEICE TRANS. INF. & SYST., VOL.E100–D, NO.8 AUGUST 2017
+<br/>LETTER
+<br/>Trajectory-Set Feature for Action Recognition
+<br/>SUMMARY We propose a feature for action recognition called
+<br/>Trajectory-Set (TS), on top of the improved Dense Trajectory (iDT).
+<br/>The TS feature encodes only trajectories around densely sampled inter-
+<br/>est points, without any appearance features. Experimental results on the
+<br/>UCF50 action dataset demonstrates that TS is comparable to state-of-the-
+<br/>arts, and outperforms iDT; the accuracy of 95.0%, compared to 91.7% by
+<br/>iDT.
+<br/>key words: action recognition, trajectory, improved Dense Trajectory
+<br/>the two-stream CNN [2] that uses a single frame and a opti-
+<br/>cal flow stack. In their paper stacking trajectories was also
+<br/>reported but did not perform well, probably the sparseness
+<br/>of trajectories does not fit to CNN architectures. In contrast,
+<br/>we take a hand-crafted approach that can be fused later with
+<br/>CNN outputs.
+<br/>1.
+<br/>Introduction
+<br/>Action recognition has been well studied in the computer
+<br/>vision literature [1] because it is an important and challeng-
+<br/>ing task. Deep learning approaches have been proposed
+<br/>recently [2]–[4], however still a hand-crafted feature, im-
+<br/>proved Dense Trajectory (iDT) [5], [6], is comparable in
+<br/>performance. Moreover, top performances of deep learn-
+<br/>ing approaches are obtained by combining the iDT fea-
+<br/>ture [3], [7], [8].
+<br/>In this paper, we propose a novel hand-crafted feature
+<br/>for action recognition, called Trajectory-Set (TS), that en-
+<br/>codes trajectories in a local region of a video. The con-
+<br/>tribution of this paper is summarized as follows. We pro-
+<br/>pose another hand-crafted feature that can be combined with
+<br/>deep learning approaches. Hand-crafted features are com-
+<br/>plement to deep learning approaches, however a little effort
+<br/>has been done in this direction after iDT. Second, the pro-
+<br/>posed TS feature focuses on the better handling of motions
+<br/>in the scene. The iDT feature uses trajectories of densely
+<br/>samples interest points in a simple way, while we explore
+<br/>here the way to extract a rich information from trajectories.
+<br/>The proposed TS feature is complement to appearance in-
+<br/>formation such as HOG and objects in the scene, which can
+<br/>be computed separately and combined afterward in a late
+<br/>fusion fashion.
+<br/>There are two relate works relevant to our work. One
+<br/>is trajectons [9] that uses a global dictionary of trajectories
+<br/>in a video to cluster representative trajectories as snippets.
+<br/>Our TS feature is computed locally, not globally, inspired
+<br/>by the success of local image descriptors [10]. The other is
+<br/>Manuscript received March 2, 2017.
+<br/>Manuscript revised April 27, 2017.
+<br/>Manuscript publicized May 10, 2017.
+<br/><b>The authors are with Hiroshima University, Higashihiroshima</b><br/>shi, 739–8527 Japan.
+<br/>DOI: 10.1587/transinf.2017EDL8049
+<br/>2. Dense Trajectory
+<br/>Here we briefly summarize the improved dense trajectory
+<br/>(iDT) [6] on which we base for the proposed method. First,
+<br/>the image pyramid for a particular frame at time t in a video
+<br/>is constructed, and interest points are densely sampled at
+<br/>each level of the pyramid. Next, interest points are tracked
+<br/>in the following L frames (L = 15 by default). Then, the
+<br/>iDT is computed by using local features such as HOG (His-
+<br/>togram of Oriented Gradient) [10], HOF (Histogram of Op-
+<br/>tical Flow), and MBH (Motion Boundary Histograms) [11]
+<br/>along the trajectory tube; a stack of patches centered at the
+<br/>trajectory in the frames.
+<br/>, pt1
+<br/>In fact, Tt0,tL
+<br/>For example, between two points in time t0 and tL, a
+<br/>, . . . , ptL in frames {t0, t1,
+<br/>trajectory Tt0,tL has points pt0
+<br/>. . . , tL}.
+<br/>is a vector of displacement be-
+<br/>tween frames rather than point coordinates, that is, Tt0,tL
+<br/>(v0, v1, . . . , vL−1) where vi = pi−1 − pi. Local features such as
+<br/>HOGti are computed with a patch centered at pti in frame at
+<br/>time ti.
+<br/>To improve the performance, the global motion is re-
+<br/>moved by computing homography, and background trajec-
+<br/>tories are removed by using a people detector. The Fisher
+<br/>vector encoding [12] is used to compute an iDT feature of a
+<br/>video.
+<br/>3. Proposed Trajectory-Set Feature
+<br/>We think that extracted trajectories might have rich informa-
+<br/>tion discriminative enough for classifying different actions,
+<br/>even although trajectories have no appearance information.
+<br/>As shown in Fig. 1, different actions are expected to have
+<br/>different trajectories, regardless of appearance, texture, or
+<br/>shape of the video frame contents. However a single trajec-
+<br/>tory Tt0,tL may be severely affected by inaccurate tracking
+<br/>results and an irregular motion in the frame.
+<br/>We instead propose to aggregate nearby trajectories to
+<br/>form a Trajectory-Set (TS) feature. First, a frame is divided
+<br/>into non-overlapping cells of M × M pixels as shown in
+<br/><b>Copyright c(cid:2) 2017 The Institute of Electronics, Information and Communication Engineers</b></td><td>('47916686', 'Kenji Matsui', 'kenji matsui')<br/>('1744862', 'Toru Tamaki', 'toru tamaki')<br/>('1688940', 'Bisser Raytchev', 'bisser raytchev')<br/>('1686272', 'Kazufumi Kaneda', 'kazufumi kaneda')</td><td>a) E-mail: tamaki@hiroshima-u.ac.jp
+</td></tr><tr><td>bc9003ad368cb79d8a8ac2ad025718da5ea36bc4</td><td>Technische Universit¨at M¨unchen
+<br/>Bildverstehen und Intelligente Autonome Systeme
+<br/>Facial Expression Recognition With A
+<br/>Three-Dimensional Face Model
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Informatik der Technischen Uni-
+<br/>versit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktors der Naturwissenschaften
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr. Johann Schlichter
+<br/>Pr¨ufer der Dissertation: 1. Univ.-Prof. Dr. Bernd Radig (i.R.)
+<br/>2. Univ.-Prof. Gudrun J. Klinker, Ph.D.
+<br/>Die Dissertation wurde am 04.07.2011 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Informatik am 02.12.2011 angenommen.
+</td><td>('50565622', 'Christoph Mayer', 'christoph mayer')</td><td></td></tr><tr><td>bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9</td><td>Using Deep Autoencoders for Facial Expression
+<br/>Recognition
+<br/><b>COMSATS Institute of Information Technology, Islamabad</b><br/><b>Information Technology University (ITU), Punjab, Lahore, Pakistan</b><br/><b>National University of Sciences and Technology (NUST), Islamabad, Pakistan</b></td><td>('24040678', 'Siddique Latif', 'siddique latif')<br/>('1734917', 'Junaid Qadir', 'junaid qadir')</td><td>engr.ussman@gmail.com, slatif.msee15seecs@seecs.edu.pk, junaid.qadir@itu.edu.pk
+</td></tr><tr><td>bcc346f4a287d96d124e1163e4447bfc47073cd8</td><td></td><td></td><td></td></tr><tr><td>bc27434e376db89fe0e6ef2d2fabc100d2575ec6</td><td>Faceless Person Recognition;
+<br/>Privacy Implications in Social Media
+<br/><b>Max-Planck Institute for Informatics</b><br/>Person A training samples.
+<br/>Is this person A ?
+<br/>Fig. 1: An illustration of one of the scenarios considered: can a vision system
+<br/>recognise that the person in the right image is the same as the tagged person in
+<br/>the left images, even when the head is obfuscated?
+</td><td>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1798000', 'Rodrigo Benenson', 'rodrigo benenson')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{joon, benenson, mfritz, schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>bcc172a1051be261afacdd5313619881cbe0f676</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2197
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>bcfeac1e5c31d83f1ed92a0783501244dde5a471</td><td></td><td></td><td></td></tr><tr><td>bc12715a1ddf1a540dab06bf3ac4f3a32a26b135</td><td>An Analysis of the State of the Art in Multiple Object Tracking
+<br/>Tracking the Trackers:
+<br/><b>Technical University Munich, Germany</b><br/><b>University of Adelaide, Australia</b><br/>3Photogrammetry and Remote Sensing, ETH Z¨urich, Switzerland
+<br/>4TU Darmstadt, Germany
+</td><td>('34761498', 'Anton Milan', 'anton milan')<br/>('1803034', 'Konrad Schindler', 'konrad schindler')<br/>('34493380', 'Stefan Roth', 'stefan roth')</td><td></td></tr><tr><td>bc910ca355277359130da841a589a36446616262</td><td>Conditional High-order Boltzmann Machine:
+<br/>A Supervised Learning Model for Relation Learning
+<br/>1Center for Research on Intelligent Perception and Computing
+<br/>National Laboratory of Pattern Recognition
+<br/>2Center for Excellence in Brain Science and Intelligence Technology
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>('39937384', 'Yan Huang', 'yan huang')<br/>('40119691', 'Wei Wang', 'wei wang')<br/>('22985667', 'Liang Wang', 'liang wang')</td><td>{yhuang, wangwei, wangliang}@nlpr.ia.ac.cn
+</td></tr><tr><td>bc2852fa0a002e683aad3fb0db5523d1190d0ca5</td><td></td><td></td><td></td></tr><tr><td>bc866c2ced533252f29cf2111dd71a6d1724bd49</td><td>Sensors 2014, 14, 19561-19581; doi:10.3390/s141019561
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>A Multi-Modal Face Recognition Method Using Complete Local
+<br/>Derivative Patterns and Depth Maps
+<br/><b>Institute of Microelectronics, Tsinghua University, Beijing 100084, China</b><br/>Tel.: +86-10-6279-4398.
+<br/>External Editor: Vittorio M.N. Passaro
+<br/>Received: 8 August 2014; in revised form: 3 October 2014 / Accepted: 13 October 2014 /
+<br/>Published: 20 October 2014
+</td><td>('3817476', 'Shouyi Yin', 'shouyi yin')<br/>('34585208', 'Xu Dai', 'xu dai')<br/>('12263637', 'Peng Ouyang', 'peng ouyang')<br/>('1743798', 'Leibo Liu', 'leibo liu')<br/>('1803672', 'Shaojun Wei', 'shaojun wei')</td><td>E-Mails: daixu@gmail.com (X.D.); oyangpeng12@163.com (P.O.); liulb@tsinghua.edu.cn (L.L.);
+<br/>wsj@tsinghua.edu.cn (S.W.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: yinsy@tsinghua.edu.cn;
+</td></tr><tr><td>bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Deep Learning for Fixed Model Reuse∗
+<br/><b>National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China</b><br/>Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing, 210023, China
+</td><td>('1708973', 'Yang Yang', 'yang yang')<br/>('1721819', 'De-Chuan Zhan', 'de-chuan zhan')<br/>('3750883', 'Ying Fan', 'ying fan')<br/>('2192443', 'Yuan Jiang', 'yuan jiang')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')</td><td>{yangy, zhandc, fany, jiangy, zhouzh}@lamda.nju.edu.cn
+</td></tr><tr><td>bcb99d5150d792001a7d33031a3bd1b77bea706b</td><td></td><td></td><td></td></tr><tr><td>bc811a66855aae130ca78cd0016fd820db1603ec</td><td>Towards three-dimensional face recognition in the real
+<br/>To cite this version:
+<br/>HAL Id: tel-00998798
+<br/>https://tel.archives-ouvertes.fr/tel-00998798
+<br/>Submitted on 2 Jun 2014
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>teaching and research institutions in France or
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+</td><td>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')<br/>('47144044', 'Li', 'li')</td><td></td></tr><tr><td>bc98027b331c090448492eb9e0b9721e812fac84</td><td>Journal of Intelligent Learning Systems and Applications, 2012, 4, 266-273
+<br/>http://dx.doi.org/10.4236/jilsa.2012.44027 Published Online November 2012 (http://www.SciRP.org/journal/jilsa)
+<br/>Face Representation Using Combined Method of Gabor
+<br/>Filters, Wavelet Transformation and DCV and Recognition
+<br/>Using RBF
+<br/><b>VHNSN College, Virudhunagar, ANJA College</b><br/>Sivakasi, India.
+<br/>Received April 27th, 2012; revised July 19th, 2012; accepted July 26th, 2012
+</td><td>('39000426', 'Kathirvalavakumar Thangairulappan', 'kathirvalavakumar thangairulappan')<br/>('15392239', 'Jebakumari Beulah Vasanthi Jeyasingh', 'jebakumari beulah vasanthi jeyasingh')</td><td>Email: *kathirvalavakumar@yahoo.com, jebaarul07@yahoo.com
+</td></tr><tr><td>bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab</td><td>MoCoGAN: Decomposing Motion and Content for Video Generation
+<br/>Snap Research
+<br/>NVIDIA
+</td><td>('1715440', 'Sergey Tulyakov', 'sergey tulyakov')<br/>('9536217', 'Ming-Yu Liu', 'ming-yu liu')<br/>('50030951', 'Xiaodong Yang', 'xiaodong yang')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td>stulyakov@snap.com
+<br/>{mingyul,xiaodongy,jkautz}@nvidia.com
+</td></tr><tr><td>bcac3a870501c5510df80c2a5631f371f2f6f74a</td><td>CVPR
+<br/>#1387
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2013 Submission #1387. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#1387
+<br/>Structured Face Hallucination
+<br/>Anonymous CVPR submission
+<br/>Paper ID 1387
+</td><td></td><td></td></tr><tr><td>ae8d5be3caea59a21221f02ef04d49a86cb80191</td><td>Published as a conference paper at ICLR 2018
+<br/>SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+<br/>RECURRENT NEURAL NETWORKS
+<br/>†Barcelona Supercomputing Center, ‡Google Inc,
+<br/><b>Universitat Polit`ecnica de Catalunya, Columbia University</b></td><td>('2447185', 'Brendan Jou', 'brendan jou')<br/>('1711068', 'Jordi Torres', 'jordi torres')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{victor.campos, jordi.torres}@bsc.es, bjou@google.com,
+<br/>xavier.giro@upc.edu, shih.fu.chang@columbia.edu
+</td></tr><tr><td>aed321909bb87c81121c841b21d31509d6c78f69</td><td></td><td></td><td></td></tr><tr><td>ae936628e78db4edb8e66853f59433b8cc83594f</td><td></td><td></td><td></td></tr><tr><td>ae0765ebdffffd6e6cc33c7705df33b7e8478627</td><td>Self-Reinforced Cascaded Regression for Face Alignment
+<br/><b>DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China</b><br/>2Key Laboratory for Ubiquitous Network and Service Software of Liaoning Province, Dalian, China
+<br/><b>School of Mathematical Science, Dalian University of Technology, Dalian, China</b></td><td>('1710408', 'Xin Fan', 'xin fan')<br/>('34469457', 'Risheng Liu', 'risheng liu')<br/>('3453975', 'Kang Huyan', 'kang huyan')<br/>('3013708', 'Yuyao Feng', 'yuyao feng')<br/>('7864960', 'Zhongxuan Luo', 'zhongxuan luo')</td><td>{xin.fan, rsliu, zxluo}@dlut.edu.cn, huyankang@hotmail.com yyaofeng@gmail.com
+</td></tr><tr><td>aefc7c708269b874182a5c877fb6dae06da210d4</td><td>Deep Learning of Invariant Features via Simulated
+<br/>Fixations in Video
+<br/><b>Stanford University, CA</b><br/><b>Stanford University, CA</b><br/><b>NEC Laboratories America, Inc., Cupertino, CA</b></td><td>('2860351', 'Will Y. Zou', 'will y. zou')<br/>('1682028', 'Shenghuo Zhu', 'shenghuo zhu')<br/>('1701538', 'Andrew Y. Ng', 'andrew y. ng')<br/>('38701713', 'Kai Yu', 'kai yu')</td><td>{wzou, ang}@cs.stanford.edu
+<br/>{zsh, kyu}@sv.nec-labs.com
+</td></tr><tr><td>ae2cf545565c157813798910401e1da5dc8a6199</td><td>Mahkonen et al. EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:61
+<br/>https://doi.org/10.1186/s13640-018-0303-9
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Cascade of Boolean detector
+<br/>combinations
+</td><td>('3292563', 'Katariina Mahkonen', 'katariina mahkonen')</td><td></td></tr><tr><td>aebb9649bc38e878baef082b518fa68f5cda23a5</td><td>
+</td><td></td><td></td></tr><tr><td>aeaf5dbb3608922246c7cd8a619541ea9e4a7028</td><td>Weakly Supervised Facial Action Unit Recognition through Adversarial Training
+<br/><b>University of Science and Technology of China, Hefei, Anhui, China</b></td><td>('46217896', 'Guozhu Peng', 'guozhu peng')<br/>('1791319', 'Shangfei Wang', 'shangfei wang')</td><td>gzpeng@mail.ustc.edu.cn, sfwang@ustc.edu.cn
+</td></tr><tr><td>ae836e2be4bb784760e43de88a68c97f4f9e44a1</td><td>Semi-Supervised Dimensionality Reduction∗
+<br/>1National Laboratory for Novel Software Technology
+<br/><b>Nanjing University, Nanjing 210093, China</b><br/>2Department of Computer Science and Engineering
+<br/><b>Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China</b></td><td>('51326748', 'Daoqiang Zhang', 'daoqiang zhang')<br/>('46228434', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('1680768', 'Songcan Chen', 'songcan chen')</td><td>dqzhang@nuaa.edu.cn
+<br/>zhouzh@nju.edu.cn
+<br/>s.chen@nuaa.edu.cn
+</td></tr><tr><td>ae5bb02599244d6d88c4fe466a7fdd80aeb91af4</td><td>Analysis of Recognition Algorithms using Linear, Generalized Linear, and
+<br/>Generalized Linear Mixed Models
+<br/>Dept. of Computer Science
+<br/><b>Colorado State University</b><br/>Fort Colllins, CO 80523
+<br/>Dept. of Statistics
+<br/><b>Colorado State University</b><br/>Fort Collins, CO 80523
+</td><td>('1757322', 'J. Ross Beveridge', 'j. ross beveridge')<br/>('1750370', 'Geof H. Givens', 'geof h. givens')</td><td></td></tr><tr><td>ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9</td><td>Sensing Highly Non-Rigid Objects with RGBD
+<br/>Sensors for Robotic Systems
+<br/>A Dissertation
+<br/>Presented to
+<br/>the Graduate School of
+<br/><b>Clemson University</b><br/>In Partial Fulfillment
+<br/>of the Requirements for the Degree
+<br/>Doctor of Philosophy
+<br/>Computer Engineering
+<br/>by
+<br/>May 2013
+<br/>Accepted by:
+<br/>Dr. Stanley T. Birchfield, Committee Chair
+</td><td>('2181472', 'Bryan Willimon', 'bryan willimon')<br/>('26607413', 'Ian D. Walker', 'ian d. walker')<br/>('1724942', 'Adam W. Hoover', 'adam w. hoover')<br/>('2171076', 'Damon L. Woodard', 'damon l. woodard')</td><td></td></tr><tr><td>aeeea6eec2f063c006c13be865cec0c350244e5b</td><td>Induced Disgust, Happiness and Surprise: an Addition to the MMI Facial
+<br/>Expression Database
+<br/><b>Imperial College London / Twente University</b><br/>Department of Computing / EEMCS
+<br/>180 Queen’s Gate / Drienerlolaan 5
+<br/>London / Twente
+</td><td>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>Michel.Valstar@imperial.ac.uk, M.Pantic@imperial.ac.uk
+</td></tr><tr><td>ae9257f3be9f815db8d72819332372ac59c1316b</td><td>P SY CH O L O GIC AL SC I E NC E
+<br/>Research Article
+<br/>Deciphering the Enigmatic Face
+<br/>The Importance of Facial Dynamics in Interpreting Subtle
+<br/>Facial Expressions
+<br/><b>University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada</b></td><td>('2059653', 'Zara Ambadar', 'zara ambadar')</td><td></td></tr><tr><td>ae89b7748d25878c4dc17bdaa39dd63e9d442a0d</td><td>On evaluating face tracks in movies
+<br/>To cite this version:
+<br/>in movies. IEEE International Conference on Image Processing (ICIP 2013), Sep 2013, Melbourne,
+<br/>Australia. 2013. <hal-00870059>
+<br/>HAL Id: hal-00870059
+<br/>https://hal.inria.fr/hal-00870059
+<br/>Submitted on 4 Oct 2013
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('2889451', 'Alexey Ozerov', 'alexey ozerov')<br/>('2712091', 'Jean-Ronan Vigouroux', 'jean-ronan vigouroux')<br/>('39255836', 'Louis Chevallier', 'louis chevallier')<br/>('1799777', 'Patrick Pérez', 'patrick pérez')<br/>('2889451', 'Alexey Ozerov', 'alexey ozerov')<br/>('2712091', 'Jean-Ronan Vigouroux', 'jean-ronan vigouroux')<br/>('39255836', 'Louis Chevallier', 'louis chevallier')<br/>('1799777', 'Patrick Pérez', 'patrick pérez')</td><td></td></tr><tr><td>ae1de0359f4ed53918824271c888b7b36b8a5d41</td><td>Low-cost Automatic Inpainting for Artifact Suppression in Facial Images
+<br/>Thomaz4
+<br/><b>Scienti c Visualization and Computer Graphics, University of Groningen, Nijenborgh 9, Groningen, The Netherlands</b><br/>2Department of Computing, National Laboratory of Scientific Computation, Petr´opolis, Brazil
+<br/><b>Paran a Federal University, Curitiba, Brazil</b><br/><b>University Center of FEI, S ao Bernardo do Campo, Brazil</b><br/>Keywords:
+<br/>Image inpainting, Face reconstruction, Statistical Decision, Image Quality Index
+</td><td>('1686665', 'Alexandru Telea', 'alexandru telea')</td><td>{a.sobiecki, a.c.telea}@rug.nl, gilson@lncc.br, neves@ufpr.br, cet@fei.edu.br
+</td></tr><tr><td>ae4390873485c9432899977499c3bf17886fa149</td><td>FACIAL EXPRESSION RECOGNITION USING
+<br/>DIGITALISED FACIAL FEATURES BASED ON
+<br/>ACTIVE SHAPE MODEL
+<br/><b>Institute for Arts, Science and Technology</b><br/><b>Glyndwr University</b><br/>Wrexham, United Kingdom
+</td><td>('39048426', 'Nan Sun', 'nan sun')<br/>('11832393', 'Zheng Chen', 'zheng chen')<br/>('1818364', 'Richard Day', 'richard day')</td><td>bruce.n.sun@gmail.com1
+<br/>z.chen@glyndwr.ac.uk2
+<br/>r.day@glyndwr.ac.uk3
+</td></tr><tr><td>aeff403079022683b233decda556a6aee3225065</td><td>DeepFace: Face Generation using Deep Learning
+</td><td>('31560532', 'Hardie Cate', 'hardie cate')<br/>('6415321', 'Fahim Dalvi', 'fahim dalvi')<br/>('8815003', 'Zeshan Hussain', 'zeshan hussain')</td><td>ccate@stanford.edu
+<br/>fdalvi@cs.stanford.edu
+<br/>zeshanmh@stanford.edu
+</td></tr><tr><td>ae753fd46a744725424690d22d0d00fb05e53350</td><td>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>Describing Clothing by Semantic Attributes
+<br/>Anonymous ECCV submission
+<br/>Paper ID 727
+</td><td></td><td></td></tr><tr><td>aea4128ba18689ff1af27b90c111bbd34013f8d5</td><td>Efficient k-Support Matrix Pursuit
+<br/><b>National University of Singapore</b><br/><b>School of Software, Sun Yat-sen University, China</b><br/><b>School of Information Science and Technology, Sun Yat-sen University, China</b><br/><b>School of Computer Science, South China Normal University, China</b></td><td>('2356867', 'Hanjiang Lai', 'hanjiang lai')<br/>('2493641', 'Yan Pan', 'yan pan')<br/>('33224509', 'Canyi Lu', 'canyi lu')<br/>('1704995', 'Yong Tang', 'yong tang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{laihanj,canyilu}@gmail.com, panyan5@mail.sysu.edu.cn,
+<br/>ytang@scnu.edu.cn, eleyans@nus.edu.sg
+</td></tr><tr><td>ae2c71080b0e17dee4e5a019d87585f2987f0508</td><td>Research Paper: Emotional Face Recognition in Children
+<br/>With Attention Deficit/Hyperactivity Disorder: Evidence
+<br/>From Event Related Gamma Oscillation
+<br/>CrossMark
+<br/><b>School of Advanced Technologies in Medicine, Tehran University of Medical Sciences, Tehran, Iran</b><br/><b>School of Medicine, Tehran University of Medical Sciences, Tehran, Iran</b><br/><b>Research Center for Cognitive and Behavioral Sciences, Tehran University of Medical Sciences, Tehran, Iran</b><br/><b>Amirkabir University of Technology, Tehran, Iran</b><br/>Use your device to scan
+<br/>and read the article online
+<br/>Citation: Sarraf Razavi, M., Tehranidoost, M., Ghassemi, F., Purabassi, P., & Taymourtash, A. (2017). Emotional Face Rec-
+<br/>ognition in Children With Attention Deficit/Hyperactivity Disorder: Evidence From Event Related Gamma Oscillation. Basic
+<br/>and Clinical Neuroscience, 8(5):419-426. https://doi.org/10.18869/NIRP.BCN.8.5.419
+<br/> : : https://doi.org/10.18869/NIRP.BCN.8.5.419
+<br/>Article info:
+<br/>Received: 03 Feb. 2017
+<br/>First Revision: 29 Feb. 2017
+<br/>Accepted: 11 Jul. 2017
+<br/>Key Words:
+<br/>Emotional face
+<br/>recognition, Event-
+<br/>Related Oscillation
+<br/>(ERO), Gamma band
+<br/>activity, Attention Deficit
+<br/>Hyperactivity Disorder
+<br/>(ADHD)
+<br/>A B S T R A C T
+<br/>Introduction: Children with attention-deficit/hyperactivity disorder (ADHD) have some
+<br/>impairment in emotional relationship which can be due to problems in emotional processing.
+<br/>The present study investigated neural correlates of early stages of emotional face processing in
+<br/>this group compared with typically developing children using the Gamma Band Activity (GBA).
+<br/>Methods: A total of 19 children diagnosed with ADHD (Combined type) based on DSM-IV
+<br/>classification were compared with 19 typically developing children matched on age, gender, and
+<br/>IQ. The participants performed an emotional face recognition while their brain activities were
+<br/>recorded using an event-related oscillation procedure.
+<br/>Results: The results indicated that ADHD children compared to normal group showed a significant
+<br/>reduction in the gamma band activity, which is thought to reflect early perceptual emotion
+<br/>discrimination for happy and angry emotions (P<0.05).
+<br/>Conclusion: The present study supports the notion that individuals with ADHD have some
+<br/>impairments in early stage of emotion processing which can cause their misinterpretation of
+<br/>emotional faces.
+<br/>1. Introduction
+<br/>DHD is a common neurodevelopmental
+<br/>disorder characterized by inattentiveness
+<br/>and hyperactivity/impulsivity (American
+<br/>Psychiatric Association, 2013). Individu-
+<br/>als with ADHD also show problems in social and emo-
+<br/><b>tional functions, including the effective assessment of</b><br/>the emotional state of others. It is important to set the
+<br/>adaptive behavior of human facial expressions in social
+<br/>interactions (Cadesky, Mota, & Schachar, 2000; Corbett
+<br/>& Glidden, 2000). Based on the evidence, frontotem-
+<br/>poral-posterior and fronto striatal cerebellar systems
+<br/>are involved in emotional functions. These regions may
+<br/>contribute to impairments of emotional recognition in
+<br/>ADHD (Corbett & Glidden, 2000; Dickstein, Bannon,
+<br/>Xavier Castellanos, & Milham, 2006; Durston, Van
+<br/>Belle, & De Zeeuw, 2011).
+<br/>* Corresponding Author:
+<br/><b>Amirkabir University of Technology, Tehran, Iran</b><br/>Tel:+98 (912) 3260661
+<br/>419
+<br/>Basic and ClinicalSeptember, October 2017, Volume 8, Number 5 </td><td>('29928144', 'Mahdiyeh Sarraf Razavi', 'mahdiyeh sarraf razavi')<br/>('7171067', 'Mehdi Tehranidoost', 'mehdi tehranidoost')<br/>('34494047', 'Farnaz Ghassemi', 'farnaz ghassemi')<br/>('29839761', 'Parivash Purabassi', 'parivash purabassi')<br/>('29933673', 'Athena Taymourtash', 'athena taymourtash')<br/>('34494047', 'Farnaz Ghassemi', 'farnaz ghassemi')</td><td>E-mail: ghassemi@aut.ac.ir
+</td></tr><tr><td>ae4e2c81c8a8354c93c4b21442c26773352935dd</td><td></td><td></td><td></td></tr><tr><td>ae85c822c6aec8b0f67762c625a73a5d08f5060d</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at http://dx.doi.org/10.1109/TPAMI.2014.2353624
+<br/>IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. M, NO. N, MONTH YEAR
+<br/>Retrieving Similar Styles to Parse Clothing
+</td><td>('1721910', 'Kota Yamaguchi', 'kota yamaguchi')<br/>('1772294', 'M. Hadi Kiapour', 'm. hadi kiapour')<br/>('35258350', 'Luis E. Ortiz', 'luis e. ortiz')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')</td><td></td></tr><tr><td>ae5f32e489c4d52e7311b66060c7381d932f4193</td><td>Appearance-and-Relation Networks for Video Classification
+<br/><b>State Key Laboratory for Novel Software Technology, Nanjing University, China</b><br/>2Computer Vision Laboratory, ETH Zurich, Switzerland
+<br/>3Google Research
+</td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('47113208', 'Wei Li', 'wei li')<br/>('50135099', 'Wen Li', 'wen li')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf</td><td>Contents lists available at ScienceDirect
+<br/>j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / c o m p h u m b e h
+<br/>Full length article
+<br/>That personal profile image might jeopardize your rental opportunity!
+<br/>On the relative impact of the seller's facial expressions upon buying
+<br/>behavior on Airbnb™*
+<br/>a Faculty of Technology, Westerdals Oslo School of Arts, Communication and Technology, Oslo, Norway
+<br/><b>b School of Business, Reykjavik University, Reykjavik, Iceland</b><br/><b>c Cardiff Business School, Cardiff University, Cardiff, United Kingdom</b><br/>a r t i c l e i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 29 November 2016
+<br/>Received in revised form
+<br/>2 February 2017
+<br/>Accepted 9 February 2017
+<br/>Available online 10 February 2017
+<br/>Keywords:
+<br/>Sharing economy
+<br/>Peer-to-peer
+<br/>Facial expressions
+<br/>Evolutionary psychology
+<br/>Approach and avoidance
+<br/>Conjoint study
+<br/>Airbnb is an online marketplace for peer-to-peer accommodation rental services. In contrast to tradi-
+<br/>tional rental services, personal profile images, i.e. the sellers' facial images, are present along with the
+<br/>housing on offer. This study aims to investigate the impact of a seller's facial image and their expression
+<br/>upon buyers' behavior in this context. The impact of facial expressions was investigated together with
+<br/>other relevant variables (price and customer ratings). Findings from a conjoint study (n ¼ 139) show that
+<br/>the impact of a seller's facial expression on buying behavior in an online peer-to-peer context is sig-
+<br/>nificant. A negative facial expression and absence of facial image (head silhouette) abates approach and
+<br/>evokes avoidance tendencies to explore a specific web page on Airbnb, and, simultaneously decrease the
+<br/>likelihood to rent. The reverse effect was true for neutral and positive facial expressions. We found that a
+<br/>negative and positive facial expression had more impact on likelihood to rent, for women than for men.
+<br/>Further analysis shows that the absence of facial image and an angry facial expression cannot be
+<br/>compensated for by a low price and top customer ratings related to likelihood to rent. Practitioners
+<br/>should keep in mind that the presence/absence of facial images and their inherent expressions have a
+<br/>significant impact in the peer-to-peer accommodation rental services.
+<br/>© 2017 Elsevier Ltd. All rights reserved.
+<br/>1. Introduction
+<br/>The sharing economy, characterized by peer-to-peer trans-
+<br/>actions, has seen immense growth recently. These marketplaces are
+<br/>defined by direct transactions between individuals (buyers and
+<br/>sellers), while the marketplace itself is provided by a third party
+<br/>(Botsman & Rogers, 2011). According to a recent survey by Penn
+<br/>Schoen Berland (2016), 22% of American adults have already
+<br/>offered something to this market, and 42% had used the service to
+<br/>buy a product or a service. PricewaterhouseCoopers (PwC) (2014),
+<br/>has predicted that these sharing economy sectors will be worth
+<br/>* The authors express their thanks to Dr. R. G. Vishnu Menon for assistance with
+<br/>the conjoint analysis.
+<br/>* Corresponding author. Westerdals Oslo School of Arts, Communication and
+<br/>Technology, Faculty of Technology, Christian Kroghs Gate 32, 0186, Oslo, Norway.
+<br/>http://dx.doi.org/10.1016/j.chb.2017.02.029
+<br/>0747-5632/© 2017 Elsevier Ltd. All rights reserved.
+<br/>around $335 billion by 2025. Their research further indicates that
+<br/>the most important growth sectors are lending and crowd funding,
+<br/>online staffing, and peer-to-peer accommodation. Participants in
+<br/>the peer-to-peer market tend to be motivated by new economic,
+<br/>environmental, and social factors (Bucher, Fieseler, & Lutz, 2016;
+<br/>B€ocker & Meelen, 2016; Schor, 2014) as this marketplace has
+<br/>some additional attributes compared to more traditional forms of
+<br/>commerce. The behavior of buyers on the peer-to-peer marketplace
+<br/>is, however, not well understood.
+<br/>Airbnb is a peer-to-peer platform that facilitates accommoda-
+<br/>tion rental services. This marketplace offers intangible experienced
+<br/>goods (Levitt, 1981, pp. 94e102), which are typically produced and
+<br/>consumed simultaneously (Gr€onroos, 1978). The sellers are co-
+<br/>producers of the service experience. Thus, the quality of renting
+<br/>an apartment on Airbnb cannot be verified before the buyer has
+<br/>started using the service. The Sellers on Airbnb are, therefore, an
+<br/>integrated part of the service that is delivered, and are expected to
+<br/>fulfill the buyer's needs throughout their stay. Consequently,
+</td><td>('2372119', 'Asle Fagerstrøm', 'asle fagerstrøm')<br/>('10665177', 'Sanchit Pawar', 'sanchit pawar')<br/>('3617093', 'Valdimar Sigurdsson', 'valdimar sigurdsson')<br/>('3232722', 'Mirella Yani-De-Soriano', 'mirella yani-de-soriano')</td><td>E-mail address: asle.fagerstrom@westerdals.no (A. Fagerstrøm).
+</td></tr><tr><td>d893f75206b122973cdbf2532f506912ccd6fbe0</td><td>Facial Expressions with Some Mixed
+<br/>Expressions Recognition Using Neural
+<br/>Networks
+<br/>Dr.R.Parthasarathi, V.Lokeswar Reddy, K.Vishnuthej, G.Vishnu Vandan
+<br/>Department of Information Technology
+<br/><b>Pondicherry Engineering College</b><br/>Puducherry-605014, India
+</td><td></td><td></td></tr><tr><td>d861c658db2fd03558f44c265c328b53e492383a</td><td>Automated Face Extraction and Normalization of 3D Mesh Data
+</td><td>('10423763', 'Jia Wu', 'jia wu')<br/>('1905646', 'Raymond Tse', 'raymond tse')<br/>('1809809', 'Linda G. Shapiro', 'linda g. shapiro')</td><td></td></tr><tr><td>d84a48f7d242d73b32a9286f9b148f5575acf227</td><td>Global and Local Consistent Age Generative
+<br/>Adversarial Networks
+<br/>Center for Research on Intelligent Perception and Computing, CASIA, Beijing, China
+<br/>National Laboratory of Pattern Recognition, CASIA, Beijing, China
+<br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('2112221', 'Peipei Li', 'peipei li')<br/>('33079499', 'Yibo Hu', 'yibo hu')<br/>('39763795', 'Qi Li', 'qi li')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>Email: peipei.li, yibo.hu@cripac.ia.ac.cn, qli,rhe,znsun@nlpr.ia.ac.cn
+</td></tr><tr><td>d8f0bda19a345fac81a1d560d7db73f2b4868836</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>RIVERSIDE
+<br/>Online Activity Understanding and Labeling in Natural Videos
+<br/>A Dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Computer Science
+<br/>by
+<br/>August 2016
+<br/>Dissertation Committee:
+<br/>Dr. Amit K. Roy-Chowdhury, Chairperson
+<br/>Dr. Eamonn Keogh
+<br/>Dr. Evangelos Christidis
+<br/>Dr. Christian Shelton
+</td><td>('38514801', 'Mahmudul Hasan', 'mahmudul hasan')</td><td></td></tr><tr><td>d82b93f848d5442f82154a6011d26df8a9cd00e7</td><td>NEURAL NETWORK BASED AGE CLASSIFICATION USING
+<br/>LINEAR WAVELET TRANSFORMS
+<br/>1Department of Computer Science & Engineering,
+<br/><b>Sathyabama University Old Mamallapuram Road, Chennai, India</b><br/><b>Electronics Engineering, National Institute of Technical Teachers</b><br/>Training & Research, Taramani, Chennai, India
+</td><td></td><td>E-mail : 1nithyaranjith2002@yahoo.co.in, 2gkvel@rediffmail.com
+</td></tr><tr><td>d8722ffbca906a685abe57f3b7b9c1b542adfa0c</td><td><b>University of Twente</b><br/>Faculty: Electrical Engineering, Mathematics and Computer Science
+<br/>Department: Computer Science
+<br/>Group: Human Media Interaction
+<br/>Facial Expression Analysis for Human
+<br/>Computer Interaction
+<br/>Recognizing emotions in an intelligent tutoring system by facial
+<br/>expression analysis from a video stream
+<br/>M. Ghijsen
+<br/>November 2004
+<br/>Examination committee:
+<br/>Dr. D.K.J. Heylen
+<br/>Prof.dr.ir. A Nijholt
+<br/>Dr.ir. H.J.A. op den Akker
+<br/>Dr. M. Poel
+<br/>Ir. R.J. Rienks
+</td><td></td><td></td></tr><tr><td>d8896861126b7fd5d2ceb6fed8505a6dff83414f</td><td>In-Plane Rotational Alignment of Faces by Eye and Eye-Pair Detection
+<br/>M.F. Karaaba1, O. Surinta1, L.R.B. Schomaker1 and M.A. Wiering1
+<br/><b>Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen</b><br/>Nijenborgh 9, Groningen 9747AG, The Netherlands
+<br/>Keywords:
+<br/>Eye-pair Detection, Eye Detection, Face Alignment, Face Recognition, Support Vector Machine
+</td><td></td><td>{m.f.karaaba, o.surinta, l.r.b.schomaker, m.a.wiering}@rug.nl
+</td></tr><tr><td>d83d2fb5403c823287f5889b44c1971f049a1c93</td><td>Motiv Emot
+<br/>DOI 10.1007/s11031-013-9353-6
+<br/>O R I G I N A L P A P E R
+<br/>Introducing the sick face
+<br/>Ó Springer Science+Business Media New York 2013
+</td><td>('3947094', 'Sherri C. Widen', 'sherri c. widen')</td><td></td></tr><tr><td>d8b568392970b68794a55c090c4dd2d7f90909d2</td><td>PDA Face Recognition System
+<br/>Using Advanced Correlation
+<br/>Filters
+<br/>Chee Kiat Ng
+<br/>2005
+<br/>Advisor: Prof. Khosla/Reviere
+</td><td></td><td></td></tr><tr><td>d83ae5926b05894fcda0bc89bdc621e4f21272da</td><td>version of the following thesis:
+<br/>Frugal Forests: Learning a Dynamic and Cost Sensitive
+<br/>Feature Extraction Policy for Anytime Activity Classification
+</td><td>('1794409', 'Kristen Grauman', 'kristen grauman')<br/>('1728389', 'Peter Stone', 'peter stone')</td><td></td></tr><tr><td>d86fabd4498c8feaed80ec342d254fb877fb92f5</td><td>Y. GOUTSU: REGION-OBJECT RELEVANCE-GUIDED VRD
+<br/>Region-Object Relevance-Guided
+<br/>Visual Relationship Detection
+<br/><b>National Institute of Informatics</b><br/>Tokyo, Japan
+</td><td>('2897806', 'Yusuke Goutsu', 'yusuke goutsu')</td><td>goutsu@nii.ac.jp
+</td></tr><tr><td>d8bf148899f09a0aad18a196ce729384a4464e2b</td><td>FACIAL EXPRESSION RECOGNITION AND EXPRESSION
+<br/>INTENSITY ESTIMATION
+<br/>A dissertation submitted to the
+<br/>Graduate School—New Brunswick
+<br/><b>Rutgers, The State University of New Jersey</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Doctor of Philosophy
+<br/>Graduate Program in Computer Science
+<br/>Written under the direction of
+<br/>and approved by
+<br/>New Brunswick, New Jersey
+<br/>May, 2011
+</td><td>('1683829', 'PENG YANG', 'peng yang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td></td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>ZHANG ET AL.: QUANTIFYING FACIAL AGE BY POSTERIOR OF AGE COMPARISONS
+<br/>Quantifying Facial Age by Posterior of
+<br/>Age Comparisons
+<br/>1 SenseTime Group Limited
+<br/>2 Department of Information Engineering,
+<br/><b>The Chinese University of Hong Kong</b></td><td>('6693591', 'Yunxuan Zhang', 'yunxuan zhang')<br/>('46457827', 'Li Liu', 'li liu')<br/>('46651787', 'Cheng Li', 'cheng li')<br/>('1717179', 'Chen Change Loy', 'chen change loy')</td><td>zhangyunxuan@sensetime.com
+<br/>liuli@sensetime.com
+<br/>chengli@sensetime.com
+<br/>ccloy@ie.cuhk.edu.hk
+</td></tr><tr><td>d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>IRVINE
+<br/>Recognizing and Segmenting Objects in the Presence of Occlusion and Clutter
+<br/>DISSERTATION
+<br/>submitted in partial satisfaction of the requirements
+<br/>for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>in Computer Science
+<br/>by
+<br/>Dissertation Committee:
+<br/>Professor Charless Fowlkes, Chair
+<br/>Professor Deva Ramanan
+<br/>Professor Alexander Ihler
+<br/>2016
+</td><td>('1898210', 'Golnaz Ghiasi', 'golnaz ghiasi')</td><td></td></tr><tr><td>d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d</td><td>Robust Face Recognition via Multimodal Deep
+<br/>Face Representation
+</td><td>('37990555', 'Changxing Ding', 'changxing ding')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>ab8f9a6bd8f582501c6b41c0e7179546e21c5e91</td><td>Nonparametric Face Verification Using a Novel
+<br/>Face Representation
+</td><td>('3326805', 'Hae Jong Seo', 'hae jong seo')<br/>('1718280', 'Peyman Milanfar', 'peyman milanfar')</td><td></td></tr><tr><td>ab58a7db32683aea9281c188c756ddf969b4cdbd</td><td>Efficient Solvers for Sparse Subspace Clustering
+</td><td>('50333204', 'Stephen Becker', 'stephen becker')</td><td></td></tr><tr><td>ab734bac3994b00bf97ce22b9abc881ee8c12918</td><td>Log-Euclidean Metric Learning on Symmetric Positive Definite Manifold
+<br/>with Application to Image Set Classification
+<br/>†Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/>§Cooperative Medianet Innovation Center, China
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3046528', 'Xianqiu Li', 'xianqiu li')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>ZHIWU.HUANG@VIPL.ICT.AC.CN
+<br/>WANGRUIPING@ICT.AC.CN
+<br/>SGSHAN@ICT.AC.CN
+<br/>XIANQIU.LI@VIPL.ICT.AC.CN
+<br/>XLCHEN@ICT.AC.CN
+</td></tr><tr><td>aba770a7c45e82b2f9de6ea2a12738722566a149</td><td>Face Recognition in the Scrambled Domain via Salience-Aware
+<br/>Ensembles of Many Kernels
+<br/>Jiang, R., Al-Maadeed, S., Bouridane, A., Crookes, D., & Celebi, M. E. (2016). Face Recognition in the
+<br/>Scrambled Domain via Salience-Aware Ensembles of Many Kernels. IEEE Transactions on Information
+<br/>Forensics and Security, 11(8), 1807-1817. DOI: 10.1109/TIFS.2016.2555792
+<br/>Published in:
+<br/>Document Version:
+<br/>Peer reviewed version
+<br/><b>Queen's University Belfast - Research Portal</b><br/><b>Link to publication record in Queen's University Belfast Research Portal</b><br/>Publisher rights
+<br/><b>c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting</b><br/>republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists,
+<br/>or reuse of any copyrighted components of this work in other works.
+<br/>General rights
+<br/><b>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</b><br/>copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+<br/>with these rights.
+<br/>Take down policy
+<br/>The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+<br/>ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the
+<br/>Download date:05. Nov. 2018
+</td><td></td><td>Research Portal that you believe breaches copyright or violates any law, please contact openaccess@qub.ac.uk.
+</td></tr><tr><td>ab0f9bc35b777eaefff735cb0dd0663f0c34ad31</td><td>Semi-Supervised Learning of Geospatial Objects
+<br/>Through Multi-Modal Data Integration
+<br/>Electrical Engineering and Computer Science
+<br/><b>University of California, Merced, CA</b></td><td>('1698559', 'Yi Yang', 'yi yang')</td><td>Email: snewsam@ucmerced.edu
+</td></tr><tr><td>abb396490ba8b112f10fbb20a0a8ce69737cd492</td><td>Robust Face Recognition Using Color
+<br/>Information
+<br/><b>New Jersey Institute of Technology</b></td><td>('2047820', 'Zhiming Liu', 'zhiming liu')<br/>('39664966', 'Chengjun Liu', 'chengjun liu')</td><td>Newark, New Jersey 07102, USA. femail:zl9@njit.edug
+</td></tr><tr><td>ab989225a55a2ddcd3b60a99672e78e4373c0df1</td><td>Sample, Computation vs Storage Tradeoffs for
+<br/>Classification Using Tensor Subspace Models
+</td><td>('9039699', 'Mohammadhossein Chaghazardi', 'mohammadhossein chaghazardi')<br/>('1980683', 'Shuchin Aeron', 'shuchin aeron')</td><td></td></tr><tr><td>abac0fa75281c9a0690bf67586280ed145682422</td><td>Describable Visual Attributes for Face Images
+<br/>Submitted in partial fulfillment of the
+<br/>requirements for the degree
+<br/>of Doctor of Philosophy
+<br/>in the Graduate School of Arts and Sciences
+<br/><b>COLUMBIA UNIVERSITY</b><br/>2011
+</td><td>('40192613', 'Neeraj Kumar', 'neeraj kumar')</td><td></td></tr><tr><td>ab6776f500ed1ab23b7789599f3a6153cdac84f7</td><td>International Journal of Scientific & Engineering Research, Volume 6, Issue 4, April-2015 1212
+<br/>ISSN 2229-5518
+<br/>A Survey on Various Facial Expression
+<br/>Techniques
+</td><td>('2122870', 'Joy Bhattacharya', 'joy bhattacharya')</td><td></td></tr><tr><td>ab1719f573a6c121d7d7da5053fe5f12de0182e7</td><td>Combining Visual Recognition
+<br/>and Computational Linguistics
+<br/>Linguistic Knowledge for Visual Recognition
+<br/>and Natural Language Descriptions
+<br/>of Visual Content
+<br/>Thesis for obtaining the title of
+<br/>Doctor of Engineering Science
+<br/>(Dr.-Ing.)
+<br/>of the Faculty of Natural Science and Technology I
+<br/><b>of Saarland University</b><br/>by
+<br/>Saarbrücken
+<br/>March 2014
+</td><td>('34849128', 'Marcus Rohrbach', 'marcus rohrbach')</td><td></td></tr><tr><td>ab2b09b65fdc91a711e424524e666fc75aae7a51</td><td>Multi-modal Biomarkers to Discriminate Cognitive State*
+<br/>1MIT Lincoln Laboratory, Lexington, Massachusetts, USA
+<br/>2USARIEM, 3NSRDEC
+<br/>1. Introduction
+<br/>Multimodal biomarkers based on behavorial, neurophysiolgical, and cognitive measurements have
+<br/>recently obtained increasing popularity in the detection of cognitive stress- and neurological-based
+<br/>disorders. Such conditions are significantly and adversely affecting human performance and quality
+<br/>of life for a large fraction of the world’s population. Example modalities used in detection of these
+<br/>conditions include voice, facial expression, physiology, eye tracking, gait, and EEG analysis.
+<br/>Toward the goal of finding simple, noninvasive means to detect, predict and monitor cognitive
+<br/>stress and neurological conditions, MIT Lincoln Laboratory is developing biomarkers that satisfy
+<br/>three criteria. First, we seek biomarkers that reflect core components of cognitive status such as
+<br/>working memory capacity, processing speed, attention, and arousal. Second, and as importantly, we
+<br/>seek biomarkers that reflect timing and coordination relations both within components of each
+<br/>modality and across different modalities. This is based on the hypothesis that neural coordination
+<br/>across different parts of the brain is essential in cognition (Figure 1). An example of timing and
+<br/>coordination within a modality is the set of finely timed and synchronized physiological
+<br/>components of speech production, while an example of coordination across modalities is the timing
+<br/>and synchrony that occurs across speech and facial expression while speaking. Third, we seek
+<br/>multimodal biomarkers that contribute in a complementary fashion under various channel and
+<br/>background conditions. In this chapter, as an illustration of this biomarker approach we focus on
+<br/>cognitive stress and the particular case of detecting different cognitive load levels. We also briefly
+<br/>show how similar feature-extraction principles can be applied to a neurological condition through
+<br/>the example of major depression disorder (MDD). MDD is one of several neurological disorders
+<br/>where multi-modal biomarkers based on principles of timing and coordination are important for
+<br/>detection [11]-[22]. In our cognitive load experiments, we use two easily obtained noninvasive
+<br/>modalities, voice and face, and show how these two modalities can be fused to produce results on
+<br/>par with more invasive, “gold-standard” EEG measurements. Vocal and facial biomarkers will also
+<br/>be used in our MDD case study. In both application areas we focus on timing and coordination
+<br/>relations within the components of each modality.
+<br/>* Distribution A: public release.This work is sponsored by the Assistant Secretary of Defense for Research & Engineering under Air Force contract
+<br/>#FA8721-05-C-0002. Opinions,interpretations, conclusions, and recommendations are those of the authors and are not necessarily endorsed by the United States
+<br/>Government.
+</td><td>('1718470', 'Thomas F. Quatieri', 'thomas f. quatieri')<br/>('48628822', 'James R. Williamson', 'james r. williamson')<br/>('2794344', 'Christopher J. Smalt', 'christopher j. smalt')<br/>('38799981', 'Tejash Patel', 'tejash patel')<br/>('2894484', 'Brian S. Helfer', 'brian s. helfer')<br/>('3051832', 'Daryush D. Mehta', 'daryush d. mehta')<br/>('35718569', 'Kristin Heaton', 'kristin heaton')<br/>('47534051', 'Marianna Eddy', 'marianna eddy')<br/>('49739272', 'Joseph Moran', 'joseph moran')</td><td>[quatieri,jrw]@ll.mit.edu
+</td></tr><tr><td>ab87dfccb1818bdf0b41d732da1f9335b43b74ae</td><td>SUBMITTED TO IEEE TRANSACTIONS ON SIGNAL PROCESSING
+<br/>Structured Dictionary Learning for Classification
+</td><td>('36657778', 'Yuanming Suo', 'yuanming suo')<br/>('31507586', 'Minh Dao', 'minh dao')<br/>('35210356', 'Umamahesh Srinivas', 'umamahesh srinivas')<br/>('3346079', 'Vishal Monga', 'vishal monga')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')</td><td></td></tr><tr><td>abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72</td><td>Raisonnement abductif en logique de
+<br/>description exploitant les domaines concrets
+<br/>spatiaux pour l’interprétation d’images
+<br/>1. LTCI, Télécom ParisTech, Université Paris-Saclay, Paris, France
+<br/><b>Universit Paris-Dauphine, PSL Research University, CNRS, UMR</b><br/>LAMSADE, 75016 Paris, France
+<br/>RÉSUMÉ. L’interprétation d’images a pour objectif non seulement de détecter et reconnaître des
+<br/>objets dans une scène mais aussi de fournir une description sémantique tenant compte des in-
+<br/>formations contextuelles dans toute la scène. Le problème de l’interprétation d’images peut être
+<br/>formalisé comme un problème de raisonnement abductif, c’est-à-dire comme la recherche de la
+<br/>meilleure explication en utilisant une base de connaissances. Dans ce travail, nous présentons
+<br/>une nouvelle approche utilisant une méthode par tableau pour la génération et la sélection
+<br/>d’explications possibles d’une image donnée lorsque les connaissances, exprimées dans une
+<br/>logique de description, comportent des concepts décrivant les objets mais aussi les relations
+<br/>spatiales entre ces objets. La meilleure explication est sélectionnée en exploitant les domaines
+<br/>concrets pour évaluer le degré de satisfaction des relations spatiales entre les objets.
+</td><td>('4156317', 'Yifan Yang', 'yifan yang')<br/>('1773774', 'Jamal Atif', 'jamal atif')<br/>('1695917', 'Isabelle Bloch', 'isabelle bloch')</td><td>{yifan.yang,isabelle.bloch}@telecom-paristech.fr
+<br/>jamal.atif@dauphine.fr
+</td></tr><tr><td>abba1bf1348a6f1b70a26aac237338ee66764458</td><td>Facial Action Unit Detection Using Attention and Relation Learning
+<br/><b>Shanghai Jiao Tong University, China</b><br/><b>School of Computer Science and Technology, Tianjin University, China</b><br/><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore</b><br/>4 Tencent YouTu, China
+<br/><b>School of Computer Science and Software Engineering, East China Normal University, China</b></td><td>('3403352', 'Zhiwen Shao', 'zhiwen shao')<br/>('1771215', 'Zhilei Liu', 'zhilei liu')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')<br/>('10609538', 'Yunsheng Wu', 'yunsheng wu')<br/>('8452947', 'Lizhuang Ma', 'lizhuang ma')</td><td>shaozhiwen@sjtu.edu.cn, zhileiliu@tju.edu.cn, asjfcai@ntu.edu.sg
+<br/>simonwu@tencent.com, ma-lz@cs.sjtu.edu.cn
+</td></tr><tr><td>abdd17e411a7bfe043f280abd4e560a04ab6e992</td><td>Pose-Robust Face Recognition via Deep Residual Equivariant Mapping
+<br/><b>The Chinese University of Hong Kong</b><br/>2SenseTime Research
+</td><td>('9963152', 'Kaidi Cao', 'kaidi cao')<br/>('46651787', 'Cheng Li', 'cheng li')</td><td>{ry017, ccloy, xtang}@ie.cuhk.edu.hk
+<br/>{caokaidi, chengli}@sensetime.com
+</td></tr><tr><td>ab1dfcd96654af0bf6e805ffa2de0f55a73c025d</td><td></td><td></td><td></td></tr><tr><td>abeda55a7be0bbe25a25139fb9a3d823215d7536</td><td>UNIVERSITATPOLITÈCNICADECATALUNYAProgramadeDoctorat:AUTOMÀTICA,ROBÒTICAIVISIÓTesiDoctoralUnderstandingHuman-CentricImages:FromGeometrytoFashionEdgarSimoSerraDirectors:FrancescMorenoNoguerCarmeTorrasMay2015 </td><td></td><td></td></tr><tr><td>ab427f0c7d4b0eb22c045392107509451165b2ba</td><td>LEARNING SCALE RANGES FOR THE EXTRACTION OF REGIONS OF
+<br/>INTEREST
+<br/><b>Western Kentucky University</b><br/>Department of Mathematics and Computer Science
+<br/><b>College Heights Blvd, Bowling Green, KY</b></td><td>('1682467', 'Qi Li', 'qi li')<br/>('2446364', 'Zachary Bessinger', 'zachary bessinger')</td><td></td></tr><tr><td>ab1900b5d7cf3317d17193e9327d57b97e24d2fc</td><td></td><td></td><td></td></tr><tr><td>ab8fb278db4405f7db08fa59404d9dd22d38bc83</td><td>UNIVERSITÉ DE GENÈVE
+<br/>Département d'Informatique
+<br/>FACULTÉ DES SCIENCES
+<br/>Implicit and Automated Emotional
+<br/>Tagging of Videos
+<br/>THÈSE
+<br/>présenté à la Faculté des sciences de l'Université de Genève
+<br/>pour obtenir le grade de Docteur ès sciences, mention informatique
+<br/>par
+<br/>de
+<br/>Téhéran (IRAN)
+<br/>Thèse No 4368
+<br/>GENÈVE
+<br/>Repro-Mail - Université de Genève
+<br/>2011
+</td><td>('1809085', 'Thierry Pun', 'thierry pun')<br/>('2463695', 'Mohammad SOLEYMANI', 'mohammad soleymani')</td><td></td></tr><tr><td>e5e5f31b81ed6526c26d277056b6ab4909a56c6c</td><td>Revisit Multinomial Logistic Regression in Deep Learning:
+<br/>Data Dependent Model Initialization for Image Recognition
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>2Ping An Property&Casualty Insurance Company of China,
+<br/>3Microsoft
+</td><td>('50563570', 'Bowen Cheng', 'bowen cheng')<br/>('1972288', 'Rong Xiao', 'rong xiao')<br/>('3133575', 'Yandong Guo', 'yandong guo')<br/>('1689532', 'Yuxiao Hu', 'yuxiao hu')<br/>('38504661', 'Jianfeng Wang', 'jianfeng wang')<br/>('48571185', 'Lei Zhang', 'lei zhang')</td><td>1bcheng9@illinois.edu
+<br/>2xiaorong283@pingan.com.cn
+<br/>3yandong.guo@live.com, yuxiaohu@msn.com, {jianfw, leizhang}@microsoft.com
+</td></tr><tr><td>e5737ffc4e74374b0c799b65afdbf0304ff344cb</td><td></td><td></td><td></td></tr><tr><td>e506cdb250eba5e70c5147eb477fbd069714765b</td><td>Heterogeneous Face Recognition
+<br/>By
+<br/>Brendan F. Klare
+<br/>A Dissertation
+<br/>Submitted to
+<br/><b>Michigan State University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Doctor of Philosophy
+<br/>Computer Science and Engineering
+<br/>2012
+</td><td></td><td></td></tr><tr><td>e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf</td><td>A Visual Historical Record of American High School Yearbooks
+<br/>A Century of Portraits:
+<br/><b>University of California Berkeley</b><br/><b>Brown University</b><br/><b>University of California Berkeley</b></td><td>('2361255', 'Shiry Ginosar', 'shiry ginosar')<br/>('2660664', 'Kate Rakelly', 'kate rakelly')<br/>('33385802', 'Sarah Sachs', 'sarah sachs')<br/>('2130100', 'Brian Yin', 'brian yin')<br/>('1763086', 'Alexei A. Efros', 'alexei a. efros')</td><td></td></tr><tr><td>e5823a9d3e5e33e119576a34cb8aed497af20eea</td><td>DocFace+: ID Document to Selfie* Matching
+</td><td>('9644181', 'Yichun Shi', 'yichun shi')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>e5dfd17dbfc9647ccc7323a5d62f65721b318ba9</td><td></td><td></td><td></td></tr><tr><td>e510f2412999399149d8635a83eca89c338a99a1</td><td>Journal of Advanced Computer Science and Technology, 1 (4) (2012) 266-283
+<br/>c(cid:13)Science Publishing Corporation
+<br/>www.sciencepubco.com/index.php/JACST
+<br/>Face Recognition using Block-Based
+<br/>DCT Feature Extraction
+<br/>1Department of Electronics and Communication Engineering,
+<br/><b>M S Ramaiah Institute of Technology, Bangalore, Karnataka, India</b><br/>2Department of Electronics and Communication Engineering,
+<br/><b>S J B Institute of Technology, Bangalore, Karnataka, India</b></td><td>('2472608', 'K Manikantan', 'k manikantan')<br/>('3389602', 'Vaishnavi Govindarajan', 'vaishnavi govindarajan')<br/>('35084871', 'V V S Sasi Kiran', 'v v s sasi kiran')<br/>('1687245', 'S Ramachandran', 's ramachandran')</td><td>E-mail: kmanikantan@msrit.edu
+<br/>E-mail: vaish.india@gmail.com
+<br/>E-mail: sasikiran.f4@gmail.com
+<br/>E-mail: ramachandr@gmail.com
+</td></tr><tr><td>e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td><td>('2954974', 'Antonio C. Nazare', 'antonio c. nazare')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')</td><td>Email: {arturjordao, antonio.nazare, jessicasena, william}@dcc.ufmg.br
+</td></tr><tr><td>e59813940c5c83b1ce63f3f451d03d34d2f68082</td><td>Faculty of Informatics - Papers (Archive)
+<br/>Faculty of Engineering and Information Sciences
+<br/><b>University of Wollongong</b><br/>Research Online
+<br/>2008
+<br/>A real-time facial expression recognition system for
+<br/>online games
+<br/>Publication Details
+<br/>Zhan, C., Li, W., Ogunbona, P. & Safaei, F. (2008). A real-time facial expression recognition system for online games. International
+<br/>Journal of Computer Games Technology, 2008 (Article No. 10), 1-7.
+<br/>Research Online is the open access institutional repository for the
+<br/><b>University of Wollongong. For further information contact the UOW</b></td><td>('3283367', 'Ce Zhan', 'ce zhan')<br/>('1685696', 'Wanqing Li', 'wanqing li')<br/>('1719314', 'Philip Ogunbona', 'philip ogunbona')<br/>('1803733', 'Farzad Safaei', 'farzad safaei')</td><td>University of Wollongong, czhan@uow.edu.au
+<br/>University of Wollongong, wanqing@uow.edu.au
+<br/>University of Wollongong, philipo@uow.edu.au
+<br/>University of Wollongong, farzad@uow.edu.au
+<br/>Library: research-pubs@uow.edu.au
+</td></tr><tr><td>e5b301ee349ba8e96ea6c71782295c4f06be6c31</td><td>The Case for Onloading Continuous High-Datarate Perception to the Phone
+<br/><b>University of Washington</b><br/>Microsoft Research
+</td><td>('1871038', 'Seungyeop Han', 'seungyeop han')<br/>('3041721', 'Matthai Philipose', 'matthai philipose')</td><td></td></tr><tr><td>e569f4bd41895028c4c009e5b46b935056188e91</td><td>SIMONYAN et al.: FISHER VECTOR FACES IN THE WILD
+<br/>Fisher Vector Faces in the Wild
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Omkar M. Parkhi
+<br/>Andrea Vedaldi
+<br/>Andrew Zisserman
+</td><td>('34838386', 'Karen Simonyan', 'karen simonyan')</td><td>karen@robots.ox.ac.uk
+<br/>omkar@robots.ox.ac.uk
+<br/>vedaldi@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>e5fbffd3449a2bfe0acb4ec339a19f5b88fff783</td><td>WILES, KOEPKE, ZISSERMAN: SELF-SUP. FACIAL ATTRIBUTE FROM VIDEO
+<br/>Self-supervised learning of a facial attribute
+<br/>embedding from video
+<br/>Visual Geometry Group
+<br/><b>University of Oxford</b><br/>Oxford, UK
+</td><td>('8792285', 'Olivia Wiles', 'olivia wiles')<br/>('47104886', 'A. Sophia Koepke', 'a. sophia koepke')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>ow@robots.ox.ac.uk
+<br/>koepke@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>e5342233141a1d3858ed99ccd8ca0fead519f58b</td><td>ISSN: 2277 – 9043
+<br/>International Journal of Advanced Research in Computer Science and Electronics Engineering (IJARCSEE)
+<br/>Volume 2, Issue 2, February 2013
+<br/>Finger print and Palm print based Multibiometric
+<br/>Authentication System with GUI Interface
+<br/><b>PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India</b><br/><b>Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India</b></td><td></td><td></td></tr><tr><td>e52be9a083e621d9ed29c8e9914451a6a327ff59</td><td>UvA-DARE (Digital Academic Repository)
+<br/>Communication and Automatic Interpretation of Affect from Facial Expressions
+<br/>Salah, A.A.; Sebe, N.; Gevers, T.
+<br/>Published in:
+<br/>Affective computing and interaction: psychological, cognitive, and neuroscientific perspectives
+<br/>Link to publication
+<br/>Citation for published version (APA):
+<br/>Salah, A. A., Sebe, N., & Gevers, T. (2010). Communication and Automatic Interpretation of Affect from Facial
+<br/>Expressions. In D. Gökçay, & G. Yildirim (Eds.), Affective computing and interaction: psychological, cognitive,
+<br/>and neuroscientific perspectives (pp. 157-183). Hershey, PA: Information Science Reference.
+<br/>General rights
+<br/>It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+<br/>other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+<br/>Disclaimer/Complaints regulations
+<br/>If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+<br/>your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+<br/><b>the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam</b><br/>The Netherlands. You will be contacted as soon as possible.
+<br/>Download date: 12 Sep 2017
+<br/><b>UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl</b></td><td></td><td></td></tr><tr><td>e5d53a335515107452a30b330352cad216f88fc3</td><td>Generalized Loss-Sensitive Adversarial Learning
+<br/>with Manifold Margins
+<br/>Laboratory for MAchine Perception and LEarning (MAPLE)
+<br/>http://maple.cs.ucf.edu/
+<br/><b>University of Central Florida, Orlando FL 32816, USA</b></td><td>('46232436', 'Marzieh Edraki', 'marzieh edraki')<br/>('2272096', 'Guo-Jun Qi', 'guo-jun qi')</td><td>m.edraki@knights.ucf.edu, guojun.qi@ucf.edu
+</td></tr><tr><td>e5799fd239531644ad9270f49a3961d7540ce358</td><td>KINSHIP CLASSIFICATION BY MODELING FACIAL FEATURE HEREDITY
+<br/><b>Cornell University 2Eastman Kodak Company</b></td><td>('2666471', 'Ruogu Fang', 'ruogu fang')<br/>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td></td></tr><tr><td>e5eb7fa8c9a812d402facfe8e4672670541ed108</td><td>Performance of PCA Based Semi-supervised
+<br/>Learning in Face Recognition Using MPEG-7
+<br/>Edge Histogram Descriptor
+<br/>Department of Computer Science and Engineering
+<br/><b>Bangladesh University of Engineering and Technology(BUET</b><br/>Dhaka-1000, Bangladesh
+</td><td>('3034202', 'Sheikh Motahar Naim', 'sheikh motahar naim')<br/>('9248625', 'Abdullah Al Farooq', 'abdullah al farooq')<br/>('1990532', 'Md. Monirul Islam', 'md. monirul islam')</td><td>Email: {shafin buet, naim sbh2007, saurav00001}@yahoo.com, mmislam@cse.buet.ac.bd
+</td></tr><tr><td>e22adcd2a6a7544f017ec875ce8f89d5c59e09c8</td><td>Published in Proc. of IEEE 9th International Conference on Biometrics: Theory, Applications and Systems (BTAS), (Los
+<br/>Angeles, CA), October 2018.
+<br/>Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding
+<br/>Arbitrary Gender Classifiers
+<br/><b>Computer Science and Engineering, Michigan State University, East Lansing, USA</b><br/><b>University of Wisconsin Madison, USA</b></td><td>('5456235', 'Vahid Mirjalili', 'vahid mirjalili')<br/>('2562040', 'Sebastian Raschka', 'sebastian raschka')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>mirjalil@cse.msu.edu
+<br/>mail@sebastianraschka.com
+<br/>rossarun@cse.msu.edu
+</td></tr><tr><td>e27c92255d7ccd1860b5fb71c5b1277c1648ed1e</td><td></td><td></td><td></td></tr><tr><td>e200c3f2849d56e08056484f3b6183aa43c0f13a</td><td></td><td></td><td></td></tr><tr><td>e2d265f606cd25f1fd72e5ee8b8f4c5127b764df</td><td>Real-Time End-to-End Action Detection
+<br/>with Two-Stream Networks
+<br/><b>School of Engineering, University of Guelph</b><br/><b>Vector Institute for Arti cial Intelligence</b><br/><b>Canadian Institute for Advanced Research</b></td><td>('35933395', 'Alaaeldin El-Nouby', 'alaaeldin el-nouby')<br/>('3861110', 'Graham W. Taylor', 'graham w. taylor')</td><td>{aelnouby,gwtaylor}@uoguelph.ca
+</td></tr><tr><td>e293a31260cf20996d12d14b8f29a9d4d99c4642</td><td>Published as a conference paper at ICLR 2017
+<br/>LR-GAN: LAYERED RECURSIVE GENERATIVE AD-
+<br/>VERSARIAL NETWORKS FOR IMAGE GENERATION
+<br/>Virginia Tech
+<br/>Blacksburg, VA
+<br/>Facebook AI Research
+<br/>Menlo Park, CA
+<br/><b>Georgia Institute of Technology</b><br/>Atlanta, GA
+</td><td>('2404941', 'Jianwei Yang', 'jianwei yang')<br/>('39248118', 'Anitha Kannan', 'anitha kannan')<br/>('1746610', 'Dhruv Batra', 'dhruv batra')</td><td>jw2yang@vt.edu
+<br/>akannan@fb.com
+<br/>{dbatra, parikh}@gatech.edu
+</td></tr><tr><td>e20e2db743e8db1ff61279f4fda32bf8cf381f8e</td><td>Deep Cross Polarimetric Thermal-to-visible Face Recognition
+<br/><b>West Virginia University</b></td><td>('6779960', 'Seyed Mehdi Iranmanesh', 'seyed mehdi iranmanesh')<br/>('35477977', 'Ali Dabouei', 'ali dabouei')<br/>('2700951', 'Hadi Kazemi', 'hadi kazemi')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')</td><td>{seiranmanesh, ad0046, hakazemi}@mix.wvu.edu, {nasser.nasrabadi}@mail.wvu.edu
+</td></tr><tr><td>f437b3884a9e5fab66740ca2a6f1f3a5724385ea</td><td>Human Identification Technical Challenges
+<br/>DARPA
+<br/>3701 N. Fairfax Dr
+<br/>Arlington, VA 22203
+</td><td>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>jphillips@darpa.mil
+</td></tr><tr><td>f412d9d7bc7534e7daafa43f8f5eab811e7e4148</td><td>Durham Research Online
+<br/>Deposited in DRO:
+<br/>16 December 2014
+<br/>Version of attached le:
+<br/>Accepted Version
+<br/>Peer-review status of attached le:
+<br/>Peer-reviewed
+<br/>Citation for published item:
+<br/>Kirk, H. E. and Hocking, D. R. and Riby, D. M. and Cornish, K. M. (2013) 'Linking social behaviour and
+<br/>anxiety to attention to emotional faces in Williams syndrome.', Research in developmental disabilities., 34
+<br/>(12). pp. 4608-4616.
+<br/>Further information on publisher's website:
+<br/>http://dx.doi.org/10.1016/j.ridd.2013.09.042
+<br/>Publisher's copyright statement:
+<br/>NOTICE: this is the author's version of a work that was accepted for publication in Research in Developmental
+<br/>Disabilities. Changes resulting from the publishing process, such as peer review, editing, corrections, structural
+<br/>formatting, and other quality control mechanisms may not be reected in this document. Changes may have been made
+<br/>to this work since it was submitted for publication. A denitive version was subsequently published in Research in
+<br/>Developmental Disabilities, 34, 12, December 2013, 10.1016/j.ridd.2013.09.042.
+<br/>Additional information:
+<br/>Use policy
+<br/>The full-text may be used and/or reproduced, and given to third parties in any format or medium, without prior permission or charge, for
+<br/>personal research or study, educational, or not-for-prot purposes provided that:
+<br/>• a full bibliographic reference is made to the original source
+<br/>• a link is made to the metadata record in DRO
+<br/>• the full-text is not changed in any way
+<br/>The full-text must not be sold in any format or medium without the formal permission of the copyright holders.
+<br/>Please consult the full DRO policy for further details.
+<br/><b>Durham University Library, Stockton Road, Durham DH1 3LY, United Kingdom</b><br/>Tel : +44 (0)191 334 3042 | Fax : +44 (0)191 334 2971
+<br/>http://dro.dur.ac.uk
+</td><td></td><td></td></tr><tr><td>f43eeb578e0ca48abfd43397bbd15825f94302e4</td><td>Optical Computer Recognition of Facial Expressions
+<br/>Associated with Stress Induced by Performance
+<br/>Demands
+<br/>DINGES DF, RIDER RL, DORRIAN J, MCGLINCHEY EL, ROGERS NL,
+<br/>CIZMAN Z, GOLDENSTEIN SK, VOGLER C, VENKATARAMAN S, METAXAS
+<br/>DN. Optical computer recognition of facial expressions associated
+<br/>with stress induced by performance demands. Aviat Space Environ
+<br/>Med 2005; 76(6, Suppl.):B172– 82.
+<br/>Application of computer vision to track changes in human facial
+<br/>expressions during long-duration spaceflight may be a useful way to
+<br/>unobtrusively detect the presence of stress during critical operations. To
+<br/>develop such an approach, we applied optical computer recognition
+<br/>(OCR) algorithms for detecting facial changes during performance while
+<br/>people experienced both low- and high-stressor performance demands.
+<br/>Workload and social feedback were used to vary performance stress in
+<br/>60 healthy adults (29 men, 31 women; mean age 30 yr). High-stressor
+<br/>scenarios involved more difficult performance tasks, negative social
+<br/>feedback, and greater time pressure relative to low workload scenarios.
+<br/>Stress reactions were tracked using self-report ratings, salivary cortisol,
+<br/>and heart rate. Subjects also completed personality, mood, and alexi-
+<br/>thymia questionnaires. To bootstrap development of the OCR algorithm,
+<br/>we had a human observer, blind to stressor condition, identify the
+<br/>expressive elements of the face of people undergoing high- vs. low-
+<br/>stressor performance. Different sets of videos of subjects’ faces during
+<br/>performance conditions were used for OCR algorithm training. Subjec-
+<br/>tive ratings of stress, task difficulty, effort required, frustration, and
+<br/>negative mood were significantly increased during high-stressor perfor-
+<br/>mance bouts relative to low-stressor bouts (all p ⬍ 0.01). The OCR
+<br/>algorithm was refined to provide robust 3-d tracking of facial expres-
+<br/>sions during head movement. Movements of eyebrows and asymmetries
+<br/>in the mouth were extracted. These parameters are being used in a
+<br/>Hidden Markov model to identify high- and low-stressor conditions.
+<br/>Preliminary results suggest that an OCR algorithm using mouth and
+<br/>eyebrow regions has the potential
+<br/>to discriminate high- from low-
+<br/>stressor performance bouts in 75– 88% of subjects. The validity of the
+<br/>workload paradigm to induce differential levels of stress in facial ex-
+<br/>pressions was established. The paradigm also provided the basic stress-
+<br/>related facial expressions required to establish a prototypical OCR al-
+<br/>gorithm to detect such changes. Efforts are underway to further improve
+<br/>the OCR algorithm by adding facial touching and automating applica-
+<br/>tion of the deformable masks and OCR algorithms to video footage of the
+<br/>moving faces as a prelude to blind validation of the automated ap-
+<br/>proach.
+<br/>Keywords: optical computer recognition, computer vision, workload,
+<br/>performance, stress, human face, cortisol, heart rate, astronauts, Markov
+<br/>models.
+<br/>ASTRONAUTS ARE required to perform mission-
+<br/>critical tasks at a high level of functional capability
+<br/>throughout spaceflight. While they can be trained to
+<br/>cope with, and/or adapt to some stressors of space-
+<br/>flight, stressful reactions can and have occurred during
+<br/>long-duration missions, especially when operational
+<br/>performance demands become elevated when unex-
+<br/>pected and/or underestimated operational require-
+<br/>ments occurred while crews were already experiencing
+<br/>work-related stressors (13,28,42,43,52,57,66). In some of
+<br/>these instances, stressed flight crews have withdrawn
+<br/>from voice communications with ground controllers
+<br/>(7,66), or when pressed to continue performing, made
+<br/>errors that could have jeopardized the mission (13,28).
+<br/>Consequently, there is a need to identify when during
+<br/>operational demands astronauts are experiencing be-
+<br/>havioral stress associated with performance demands.
+<br/>This is especially important as mission durations in-
+<br/>crease in length and ultimately involve flight to other
+<br/>locations in the solar system.
+<br/>Facial Expressions of Stress
+<br/>Measurement of human emotional expressions via
+<br/><b>the face, including negative affect and distress, dates</b><br/>back to Darwin (14), but in recent years has been un-
+<br/>dergoing extensive scientific study (46). Although cul-
+<br/>tural differences can intensify facial expression of emo-
+<br/>tions (53), there is considerable scientific evidence that
+<br/>select emotions are communicated in distinct facial dis-
+<br/>plays across cultures, age, and gender (45). Because
+<br/>many techniques for monitoring stress reactions are
+<br/>impractical, unreliable, or obtrusive in spaceflight, we
+<br/>seek to develop a novel, objective, unobtrusive com-
+<br/>puter vision system to continuously track facial expres-
+<br/>sions during performance demands, to detect when
+<br/>From the Unit for Experimental Psychiatry, Department of Psychi-
+<br/><b>atry, University of Pennsylvania School of Medicine, Philadelphia, PA</b><br/>(D. F. Dinges, R. L. Rider, J. Dorrian, E. L. McGlinchey, N. L. Rogers,
+<br/>Z. Cizman); and the Center for Computational Biomedicine, Imaging
+<br/><b>and Modeling, Rutgers University</b><br/>New Brunswick, NJ (S. K. Goldstein, C. Vogler, S. Venkataraman,
+<br/>D. N. Metaxas).
+<br/>Address reprint requests to: David F. Dinges, Ph.D., Professor and
+<br/>Director, Unit for Experimental Psychiatry, Department of Psychiatry,
+<br/><b>University of Pennsylvania School of Medicine, 1013 Blockley Hall</b><br/>med.upenn.edu.
+<br/>Reprint & Copyright © by Aerospace Medical Association, Alexan-
+<br/>dria, VA.
+<br/>B172
+</td><td>('5515440', 'Jillian Dorrian', 'jillian dorrian')<br/>('4940404', 'Ziga Cizman', 'ziga cizman')<br/>('2467082', 'Christian Vogler', 'christian vogler')<br/>('2898034', 'Sundara Venkataraman', 'sundara venkataraman')</td><td>423 Guardian Drive, Philadelphia, PA 19104-6021; dinges@mail.
+</td></tr><tr><td>f442a2f2749f921849e22f37e0480ac04a3c3fec</td><td></td><td></td><td> Critical Features for Face Recognition in Humans and Machines Naphtali Abudarham1, Lior Shkiller1, Galit Yovel1,2 1School of Psychological Sciences, 2Sagol School of Neuroscience Tel Aviv University, Tel Aviv, Israel Correspondence regarding this manuscript should be addressed to: Galit Yovel School of Psychological Sciences & Sagol School of Neuroscience Tel Aviv University Tel Aviv, 69978, Israel Email: gality@post.tau.ac.il, </td></tr><tr><td>f4f9697f2519f1fe725ee7e3788119ed217dca34</td><td>Selfie-Presentation in Everyday Life: A Large-scale
+<br/>Characterization of Selfie Contexts on Instagram
+<br/><b>Georgia Institute of Technology</b><br/>North Ave NW
+<br/>Atlanta, GA 30332
+</td><td>('10799246', 'Julia Deeb-Swihart', 'julia deeb-swihart')<br/>('39723397', 'Christopher Polack', 'christopher polack')<br/>('1809407', 'Eric Gilbert', 'eric gilbert')</td><td>{jdeeb3, cfpolack,gilbert,irfan}@gatech.edu
+</td></tr><tr><td>f4f6fc473effb063b7a29aa221c65f64a791d7f4</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 4/20/2018 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>FacialexpressionrecognitioninthewildbasedonmultimodaltexturefeaturesBoSunLiandongLiGuoyanZhouJunHeBoSun,LiandongLi,GuoyanZhou,JunHe,“Facialexpressionrecognitioninthewildbasedonmultimodaltexturefeatures,”J.Electron.Imaging25(6),061407(2016),doi:10.1117/1.JEI.25.6.061407. </td><td></td><td></td></tr><tr><td>f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0</td><td></td><td></td><td></td></tr><tr><td>f4373f5631329f77d85182ec2df6730cbd4686a9</td><td>Soft Computing manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Recognizing Gender from Human Facial Regions using
+<br/>Genetic Algorithm
+<br/>Received: date / Accepted: date
+</td><td>('24069279', 'Avirup Bhattacharyya', 'avirup bhattacharyya')<br/>('40813600', 'Partha Pratim Roy', 'partha pratim roy')<br/>('32614479', 'Samarjit Kar', 'samarjit kar')</td><td></td></tr><tr><td>f4210309f29d4bbfea9642ecadfb6cf9581ccec7</td><td>An Agreement and Sparseness-based Learning Instance Selection
+<br/>and its Application to Subjective Speech Phenomena
+<br/>1 Machine Intelligence & Signal Processing Group, MMK, Technische Universit¨at M¨unchen, Germany
+<br/><b>Imperial College London, United Kingdom</b></td><td>('30512170', 'Zixing Zhang', 'zixing zhang')<br/>('1751126', 'Florian Eyben', 'florian eyben')<br/>('39629517', 'Jun Deng', 'jun deng')</td><td>zixing.zhang@tum.de
+</td></tr><tr><td>f47404424270f6a20ba1ba8c2211adfba032f405</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+<br/>Identification of Face Age range Group using Neural
+<br/>Network
+</td><td>('7530203', 'Sneha Thakur', 'sneha thakur')</td><td> 1sne_thakur@yahoo.co.in
+<br/> 2ligendra@rediffmail.com
+</td></tr><tr><td>f4d30896c5f808a622824a2d740b3130be50258e</td><td>DS++: A Flexible, Scalable and Provably Tight Relaxation for Matching Problems
+<br/><b>Weizmann Institute of Science</b></td><td>('3046344', 'Nadav Dym', 'nadav dym')<br/>('3416939', 'Haggai Maron', 'haggai maron')<br/>('3232072', 'Yaron Lipman', 'yaron lipman')</td><td></td></tr><tr><td>f4ebbeb77249d1136c355f5bae30f02961b9a359</td><td>Human Computation for Attribute and Attribute Value Acquisition
+<br/>School of Computer Science
+<br/><b>Carnegie Melon University</b></td><td>('2987829', 'Edith Law', 'edith law')<br/>('1717452', 'Burr Settles', 'burr settles')<br/>('2681926', 'Aaron Snook', 'aaron snook')<br/>('2762792', 'Harshit Surana', 'harshit surana')<br/>('3328108', 'Luis von Ahn', 'luis von ahn')<br/>('39182987', 'Tom Mitchell', 'tom mitchell')</td><td>edith@cmu.edu
+</td></tr><tr><td>f4aed1314b2d38fd8f1b9d2bc154295bbd45f523</td><td>Subspace Clustering using Ensembles of
+<br/>K-Subspaces
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Michigan, Ann Arbor</b></td><td>('1782134', 'John Lipor', 'john lipor')<br/>('5250186', 'David Hong', 'david hong')<br/>('2358258', 'Dejiao Zhang', 'dejiao zhang')<br/>('1682385', 'Laura Balzano', 'laura balzano')</td><td>{lipor,dahong,dejiao,girasole}@umich.edu
+</td></tr><tr><td>f42dca4a4426e5873a981712102aa961be34539a</td><td>Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost
+<br/>Optical-Flow Estimation in the Wild
+<br/><b>University of Freiburg</b><br/>Germany
+</td><td>('31656404', 'Nima Sedaghat', 'nima sedaghat')</td><td>nima@cs.uni-freiburg.de
+</td></tr><tr><td>f3ca2c43e8773b7062a8606286529c5bc9b3ce25</td><td>Deep Clustering via Joint Convolutional Autoencoder Embedding and Relative
+<br/>Entropy Minimization
+<br/><b>Electrical and Computer Engineering, University of Pittsburgh, USA</b><br/><b>Computer Science and Engineering, University of Texas at Arlington, USA</b><br/><b>cid:93)School of Electronic Engineering, Xidian University, China</b><br/><b>cid:92)School of Information Technologies, University of Sydney, Australia</b></td><td>('2331771', 'Kamran Ghasedi Dizaji', 'kamran ghasedi dizaji')<br/>('10797930', 'Amirhossein Herandi', 'amirhossein herandi')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>kamran.ghasedi@gmail.com, amirhossein.herandi@uta.edu, chdeng@mail.xidian.edu.cn
+<br/>tom.cai@sydney.edu.au, heng.huang@pitt.edu
+</td></tr><tr><td>f3fcaae2ea3e998395a1443c87544f203890ae15</td><td></td><td></td><td></td></tr><tr><td>f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe</td><td>A Study on the Effective Approach
+<br/>to Illumination-Invariant Face Recognition
+<br/>Based on a Single Image
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 518055, China</b><br/>2 Shenzhen Key Laboratory for Visual Computing and Analytics, Shenzhen, 518055, China
+</td><td>('31361063', 'Jiapei Zhang', 'jiapei zhang')<br/>('2002129', 'Xiaohua Xie', 'xiaohua xie')</td><td>{jp.zhang,xiaohua.xie}@siat.ac.cn,
+<br/>sysuxiexh@gmail.com
+</td></tr><tr><td>f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7</td><td>NosePose: a competitive, landmark-free
+<br/>methodology for head pose estimation in the wild
+<br/>IMAGO Research Group - Universidade Federal do Paran´a
+</td><td>('37435823', 'Antonio C. P. Nascimento', 'antonio c. p. nascimento')<br/>('1800955', 'Olga R. P. Bellon', 'olga r. p. bellon')</td><td>{flavio,antonio.paes,olga,luciano}@ufpr.br
+</td></tr><tr><td>f3a59d85b7458394e3c043d8277aa1ffe3cdac91</td><td>Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource
+<br/>Constraints
+<br/><b>Chinese University of Hong Kong</b><br/><b>Indiana University</b><br/><b>Chinese University of Hong Kong</b></td><td>('1807925', 'Di Tang', 'di tang')<br/>('47119002', 'XiaoFeng Wang', 'xiaofeng wang')<br/>('3297454', 'Kehuan Zhang', 'kehuan zhang')</td><td>td016@ie.cuhk.edu.hk
+<br/>xw7@indiana.edu
+<br/>khzhang@ie.cuhk.edu.hk
+</td></tr><tr><td>f3f77b803b375f0c63971b59d0906cb700ea24ed</td><td>Advances in Electrical and Computer Engineering Volume 9, Number 3, 2009
+<br/>Feature Extraction for Facial Expression
+<br/>Recognition based on Hybrid Face Regions
+<br/>Seyed M. LAJEVARDI, Zahir M. HUSSAIN
+<br/><b>RMIT University, Australia</b></td><td></td><td>seyed.lajevardi @ rmit.edu.au
+</td></tr><tr><td>f355e54ca94a2d8bbc598e06e414a876eb62ef99</td><td></td><td></td><td></td></tr><tr><td>f3df296de36b7c114451865778e211350d153727</td><td>Spatio-Temporal Facial Expression Recognition Using Convolutional
+<br/>Neural Networks and Conditional Random Fields
+<br/><b>University of Denver, Denver, CO</b></td><td>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')</td><td>behzad.hasani@du.edu, and mmahoor@du.edu
+</td></tr><tr><td>f3ea181507db292b762aa798da30bc307be95344</td><td>Covariance Pooling for Facial Expression Recognition
+<br/>†Computer Vision Lab, ETH Zurich, Switzerland
+<br/>‡VISICS, KU Leuven, Belgium
+</td><td>('32610154', 'Dinesh Acharya', 'dinesh acharya')<br/>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('35268081', 'Danda Pani Paudel', 'danda pani paudel')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{acharyad, zhiwu.huang, paudel, vangool}@vision.ee.ethz.ch
+</td></tr><tr><td>f3fed71cc4fc49b02067b71c2df80e83084b2a82</td><td>Published as a conference paper at ICLR 2018
+<br/>LEARNING SPARSE LATENT REPRESENTATIONS WITH
+<br/>THE DEEP COPULA INFORMATION BOTTLENECK
+<br/><b>University of Basel, Switzerland</b></td><td>('30069186', 'Aleksander Wieczorek', 'aleksander wieczorek')<br/>('30537851', 'Mario Wieser', 'mario wieser')<br/>('2620254', 'Damian Murezzan', 'damian murezzan')<br/>('39891341', 'Volker Roth', 'volker roth')</td><td>{firstname.lastname}@unibas.ch
+</td></tr><tr><td>f3cf10c84c4665a0b28734f5233d423a65ef1f23</td><td>Title
+<br/>Temporal Exemplar-based Bayesian Networks for facial
+<br/>expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>Proceedings - 7Th International Conference On Machine
+<br/>Learning And Applications, Icmla 2008, 2008, p. 16-22
+<br/>Issued Date
+<br/>2008
+<br/>URL
+<br/>http://hdl.handle.net/10722/61208
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.;
+<br/>International Conference on Machine Learning and Applications
+<br/>Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+<br/>this material is permitted. However, permission to
+<br/>reprint/republish this material for advertising or promotional
+<br/>purposes or for creating new collective works for resale or
+<br/>redistribution to servers or lists, or to reuse any copyrighted
+<br/>component of this work in other works must be obtained from
+<br/>the IEEE.
+</td><td></td><td></td></tr><tr><td>f35a493afa78a671b9d2392c69642dcc3dd2cdc2</td><td>Automatic Attribute Discovery with Neural
+<br/>Activations
+<br/><b>University of North Carolina at Chapel Hill, USA</b><br/>2 NTT Media Intelligence Laboratories, Japan
+<br/><b>Tohoku University, Japan</b></td><td>('3302783', 'Sirion Vittayakorn', 'sirion vittayakorn')<br/>('1706592', 'Takayuki Umeda', 'takayuki umeda')<br/>('2023568', 'Kazuhiko Murasaki', 'kazuhiko murasaki')<br/>('1745497', 'Kyoko Sudo', 'kyoko sudo')<br/>('1718872', 'Takayuki Okatani', 'takayuki okatani')<br/>('1721910', 'Kota Yamaguchi', 'kota yamaguchi')</td><td></td></tr><tr><td>f3b7938de5f178e25a3cf477107c76286c0ad691</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, MARCH 2017
+<br/>Object Detection with Deep Learning: A Review
+</td><td>('33698309', 'Zhong-Qiu Zhao', 'zhong-qiu zhao')<br/>('36659418', 'Peng Zheng', 'peng zheng')<br/>('51132438', 'Shou-tao Xu', 'shou-tao xu')<br/>('1748808', 'Xindong Wu', 'xindong wu')</td><td></td></tr><tr><td>ebedc841a2c1b3a9ab7357de833101648281ff0e</td><td></td><td></td><td></td></tr><tr><td>eb526174fa071345ff7b1fad1fad240cd943a6d7</td><td>Deeply Vulnerable – A Study of the Robustness of Face Recognition to
+<br/>Presentation Attacks
+</td><td>('1990628', 'Amir Mohammadi', 'amir mohammadi')<br/>('1952348', 'Sushil Bhattacharjee', 'sushil bhattacharjee')</td><td></td></tr><tr><td>eb100638ed73b82e1cce8475bb8e180cb22a09a2</td><td>Temporal Action Detection with Structured Segment Networks
+<br/><b>The Chinese University of Hong Kong</b><br/>2Computer Vision Laboratory, ETH Zurich, Switzerland
+</td><td>('47827548', 'Yue Zhao', 'yue zhao')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('2765994', 'Zhirong Wu', 'zhirong wu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td></td></tr><tr><td>eb6ee56e085ebf473da990d032a4249437a3e462</td><td>Age/Gender Classification with Whole-Component
+<br/>Convolutional Neural Networks (WC-CNN)
+<br/><b>University of Southern California, Los Angeles, CA 90089, USA</b></td><td>('39004239', 'Chun-Ting Huang', 'chun-ting huang')<br/>('7022231', 'Yueru Chen', 'yueru chen')<br/>('35521292', 'Ruiyuan Lin', 'ruiyuan lin')<br/>('9363144', 'C.-C. Jay Kuo', 'c.-c. jay kuo')</td><td>E-mail: {chuntinh, yueruche, ruiyuanl}@usc.edu, cckuo@sipi.usc.edu
+</td></tr><tr><td>eb8519cec0d7a781923f68fdca0891713cb81163</td><td>Temporal Non-Volume Preserving Approach to Facial Age-Progression and
+<br/>Age-Invariant Face Recognition
+<br/><b>Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada</b><br/>2 CyLab Biometrics Center and the Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('6131978', 'T. Hoang Ngan Le', 't. hoang ngan le')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>{chinhand, kquach, kluu, thihoanl}@andrew.cmu.edu, msavvid@ri.cmu.edu
+</td></tr><tr><td>ebb1c29145d31c4afa3c9be7f023155832776cd3</td><td>CASME II: An Improved Spontaneous Micro-Expression
+<br/>Database and the Baseline Evaluation
+<br/><b>State Key Laboratory of Brain and Cognitive Science, Institute of Psychology, Chinese Academy of Sciences, Beijing, China, 2 University of Chinese Academy of Sciences</b><br/><b>Beijing, China, 3 Center for Machine Vision Research, Department of Computer Science and Engineering, University of Oulu, Oulu, Finland, 4 TNList, Department of</b><br/><b>Computer Science and Technology, Tsinghua University, Beijing, China</b></td><td>('9185305', 'Wen-Jing Yan', 'wen-jing yan')<br/>('39522870', 'Xiaobai Li', 'xiaobai li')<br/>('2819642', 'Su-Jing Wang', 'su-jing wang')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('1715826', 'Yong-Jin Liu', 'yong-jin liu')<br/>('1838009', 'Yu-Hsin Chen', 'yu-hsin chen')<br/>('1684007', 'Xiaolan Fu', 'xiaolan fu')</td><td></td></tr><tr><td>eb566490cd1aa9338831de8161c6659984e923fd</td><td>From Lifestyle Vlogs to Everyday Interactions
+<br/>EECS Department, UC Berkeley
+</td><td>('1786435', 'David F. Fouhey', 'david f. fouhey')<br/>('1763086', 'Alexei A. Efros', 'alexei a. efros')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td></td></tr><tr><td>eb9312458f84a366e98bd0a2265747aaed40b1a6</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>IV - 473
+<br/>ICIP 2007
+</td><td></td><td></td></tr><tr><td>eb716dd3dbd0f04e6d89f1703b9975cad62ffb09</td><td>Copyright
+<br/>by
+<br/>2012
+</td><td>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6</td><td>Int. J. Information Technology and Management, Vol. 11, Nos. 1/2, 2012
+<br/>35
+<br/>A new soft biometric approach for keystroke
+<br/>dynamics based on gender recognition
+<br/><b>GREYC Research Lab</b><br/>ENSICAEN – Université de Caen Basse Normandie – CNRS,
+<br/>14000 Caen, France
+<br/>Fax: +33-231538110
+<br/>*Corresponding author
+</td><td>('2615638', 'Romain Giot', 'romain giot')<br/>('1793765', 'Christophe Rosenberger', 'christophe rosenberger')</td><td>E-mail: romain.giot@ensicaen.fr
+<br/>E-mail: christophe.rosenberger@ensicaen.fr
+</td></tr><tr><td>ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9</td><td>Journal of Computational Mathematics
+<br/>Vol.xx, No.x, 200x, 1–25.
+<br/>http://www.global-sci.org/jcm
+<br/>doi:??
+<br/>Fast algorithms for Higher-order Singular Value Decomposition
+<br/>from incomplete data*
+<br/><b>University of Alabama, Tuscaloosa, AL</b></td><td>('40507939', 'Yangyang Xu', 'yangyang xu')</td><td>Email: yangyang.xu@ua.edu
+</td></tr><tr><td>ebabd1f7bc0274fec88a3dabaf115d3e226f198f</td><td>Driver drowsiness detection system based on feature
+<br/>representation learning using various deep networks
+<br/>School of Electrical Engineering, KAIST,
+<br/>Guseong-dong, Yuseong-gu, Dajeon, Rep. of Korea
+</td><td>('1989730', 'Sanghyuk Park', 'sanghyuk park')<br/>('1773194', 'Fei Pan', 'fei pan')<br/>('3315036', 'Sunghun Kang', 'sunghun kang')</td><td>{shine0624, feipan, sunghun.kang, cd yoo}@kaist.ac.kr
+</td></tr><tr><td>eb70c38a350d13ea6b54dc9ebae0b64171d813c9</td><td>On Graph-Structured Discrete
+<br/>Labelling Problems in Computer
+<br/>Vision: Learning, Inference and
+<br/>Applications
+<br/>Submitted in partial fulfillment of the requirements for
+<br/>the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Electrical and Computer Engineering
+<br/><b>M.S., Electrical and Computer Engineering, Carnegie Mellon University</b><br/><b>B.Tech., Electronics Engineering, Institute of Technology, Banaras Hindu University</b><br/><b>Carnegie Mellon University</b><br/>August, 2010
+</td><td>('1746610', 'Dhruv Batra', 'dhruv batra')</td><td></td></tr><tr><td>ebb9d53668205c5797045ba130df18842e3eadef</td><td></td><td></td><td></td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>VGGFace2: A dataset for recognising faces across pose and age
+<br/><b>Visual Geometry Group, University of Oxford</b></td><td>('46632720', 'Qiong Cao', 'qiong cao')<br/>('46980108', 'Li Shen', 'li shen')<br/>('10096695', 'Weidi Xie', 'weidi xie')<br/>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{qiong,lishen,weidi,omkar,az}@robots.ox.ac.uk
+</td></tr><tr><td>eb48a58b873295d719827e746d51b110f5716d6c</td><td>Face Alignment Using K-cluster Regression Forests
+<br/>With Weighted Splitting
+</td><td>('2393538', 'Marek Kowalski', 'marek kowalski')<br/>('1930272', 'Jacek Naruniec', 'jacek naruniec')</td><td></td></tr><tr><td>eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a</td><td>Facial Expression Recognition using Neural
+<br/>Network
+<br/><b>National Cheng Kung University</b><br/>Tainan, Taiwan, R.O.C.
+<br/>
+</td><td>('1751725', 'Shen-Chuan Tai', 'shen-chuan tai')<br/>('2142418', 'Yu-Yi Liao', 'yu-yi liao')<br/>('1925097', 'Chien-Shiang Hong', 'chien-shiang hong')</td><td>sctai@mail.ncku.edu.tw hhf93d@lily.ee.ncku.edu.tw zgz@lily.ee.ncku.edu.tw
+<br/>lyy94d@lily.ee.ncku.edu.tw hcs95d@dcmc.ee.ncku.edu.tw
+</td></tr><tr><td>ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c</td><td>Fast Localization of Facial Landmark Points
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia</b><br/><b>Link oping University, SE-581 83 Link oping, Sweden</b><br/>March 28, 2014
+</td><td>('3013350', 'Miroslav Frljak', 'miroslav frljak')<br/>('1767736', 'Robert Forchheimer', 'robert forchheimer')</td><td></td></tr><tr><td>ebf204e0a3e137b6c24e271b0d55fa49a6c52b41</td><td>Master of Science Thesis in Electrical Engineering
+<br/><b>Link ping University</b><br/>Visual Tracking Using
+<br/>Deep Motion Features
+</td><td>('8161428', 'Susanna Gladh', 'susanna gladh')</td><td></td></tr><tr><td>c71f36c9376d444075de15b1102b4974481be84d</td><td>3D Morphable Models: Data
+<br/>Pre-Processing, Statistical Analysis and
+<br/>Fitting
+<br/>Submitted for the degree of Doctor of Philosophy
+<br/>Department of Computer Science
+<br/><b>The University of York</b><br/>June, 2011
+</td><td>('37519514', 'Ankur Patel', 'ankur patel')</td><td></td></tr><tr><td>c7c53d75f6e963b403057d8ba5952e4974a779ad</td><td><b>Purdue University</b><br/>Purdue e-Pubs
+<br/>Open Access Theses
+<br/>8-2016
+<br/>Theses and Dissertations
+<br/>Aging effects in automated face recognition
+<br/><b>Purdue University</b><br/>Follow this and additional works at: http://docs.lib.purdue.edu/open_access_theses
+<br/>Recommended Citation
+<br/>Agamez, Miguel Cedeno, "Aging effects in automated face recognition" (2016). Open Access Theses. 930.
+<br/>http://docs.lib.purdue.edu/open_access_theses/930
+<br/>additional information.
+</td><td></td><td>This document has been made available through Purdue e-Pubs, a service of the Purdue University Libraries. Please contact epubs@purdue.edu for
+</td></tr><tr><td>c79cf7f61441195404472102114bcf079a72138a</td><td>Pose-Invariant 2D Face Recognition by Matching
+<br/>Using Graphical Models
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Center for Vision, Speech and Signal Processing
+<br/>Faculty of Engineering and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>September 2010
+</td><td>('1690611', 'Shervin Rahimzadeh Arashloo', 'shervin rahimzadeh arashloo')<br/>('1690611', 'Shervin Rahimzadeh Arashloo', 'shervin rahimzadeh arashloo')</td><td></td></tr><tr><td>c73dd452c20460f40becb1fd8146239c88347d87</td><td>Manifold Constrained Low-Rank Decomposition
+<br/>1State Key Laboratory of Satellite Navigation System and Equipment Technology, Shijiazhuang, China
+<br/><b>Center for Research in Computer Vision (CRCV), University of Central Florida (UCF</b><br/><b>School of Automation Science and Electrical Engineering, Beihang University, Beijing, China</b><br/>4 Istituto Italiano di Tecnologia, Genova, Italy
+</td><td>('9497155', 'Chen Chen', 'chen chen')<br/>('1740430', 'Baochang Zhang', 'baochang zhang')<br/>('1714730', 'Alessio Del Bue', 'alessio del bue')<br/>('1727204', 'Vittorio Murino', 'vittorio murino')</td><td>chenchen870713@gmail.com, alessio.delbue@iit.it, bczhang@buaa.edu.cn, vittorio.murino@iit.it ∗
+</td></tr><tr><td>c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e</td><td>The International Journal of Multimedia & Its Applications (IJMA) Vol.5, No.5, October 2013
+<br/>DYNEMO: A VIDEO DATABASE OF NATURAL FACIAL
+<br/>EXPRESSIONS OF EMOTIONS
+<br/>1LIP, Univ. Grenoble Alpes, BP 47 - 38040 Grenoble Cedex 9, France
+<br/>2LIG, Univ. Grenoble Alpes, BP 53 - 38041 Grenoble Cedex 9, France
+</td><td>('3209946', 'Anna Tcherkassof', 'anna tcherkassof')<br/>('20944713', 'Damien Dupré', 'damien dupré')<br/>('2357225', 'Brigitte Meillon', 'brigitte meillon')<br/>('2872246', 'Nadine Mandran', 'nadine mandran')<br/>('1870899', 'Michel Dubois', 'michel dubois')<br/>('1828394', 'Jean-Michel Adam', 'jean-michel adam')</td><td></td></tr><tr><td>c72e6992f44ce75a40f44be4365dc4f264735cfb</td><td>Story Understanding in Video
+<br/>Advertisements
+<br/>Department of Computer Science
+<br/><b>University of Pittsburgh</b><br/>Pennsylvania, United States
+</td><td>('9085797', 'Keren Ye', 'keren ye')<br/>('51150048', 'Kyle Buettner', 'kyle buettner')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('9085797', 'Keren Ye', 'keren ye')<br/>('51150048', 'Kyle Buettner', 'kyle buettner')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td>yekeren@cs.pitt.edu
+<br/>buettnerk@pitt.edu
+<br/>kovashka@cs.pitt.edu
+</td></tr><tr><td>c74aba9a096379b3dbe1ff95e7af5db45c0fd680</td><td>Neuro-Fuzzy Analysis of Facial Action Units
+<br/>and Expressions
+<br/>Digital Signal Processing Lab, Department of Computer Engineering
+<br/><b>Sharif University of Technology</b><br/>Tehran, Iran, Tel: +98 21 6616 4632
+</td><td>('1736464', 'Mahmoud Khademi', 'mahmoud khademi')<br/>('2936650', 'Mohammad Taghi Manzuri', 'mohammad taghi manzuri')<br/>('1702826', 'Mohammad Hadi Kiapour', 'mohammad hadi kiapour')</td><td>khademi@ce.sharif.edu, manzuri@sharif.edu, kiapour@ee.sharif.edu
+</td></tr><tr><td>c7de0c85432ad17a284b5b97c4f36c23f506d9d1</td><td>INTERSPEECH 2011
+<br/>RANSAC-based Training Data Selection for Speaker State Recognition
+<br/><b>Multimedia, Vision and Graphics Laboratory, Koc University, Istanbul, Turkey</b><br/><b>Bahc es ehir University, Istanbul, Turkey</b><br/><b>Ozye gin University, Istanbul, Turkey</b></td><td>('1777185', 'Elif Bozkurt', 'elif bozkurt')<br/>('1749677', 'Engin Erzin', 'engin erzin')</td><td>ebozkurt, eerzin@ku.edu.tr, cigdem.eroglu@bahcesehir.edu.tr, tanju.erdem@ozyegin.edu.tr
+</td></tr><tr><td>c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c</td><td>THE IMPACT OF PRODUCT PHOTO ON ONLINE CONSUMER
+<br/>PURCHASE INTENTION: AN IMAGE-PROCESSING ENABLED
+<br/>EMPIRICAL STUDY
+</td><td>('39306563', 'Xin Li', 'xin li')<br/>('2762720', 'Mengyue Wang', 'mengyue wang')<br/>('39016300', 'Yubo Chen', 'yubo chen')</td><td>Xin.Li.PhD@gmail.com
+<br/>Kong, menwang-c@my.cityu.edu.hk
+<br/>chenyubo@sem.tsinghua.edu.cn
+</td></tr><tr><td>c7f752eea91bf5495a4f6e6a67f14800ec246d08</td><td>EXPLORING THE TRANSFER
+<br/>LEARNING ASPECT OF DEEP
+<br/>NEURAL NETWORKS IN FACIAL
+<br/>INFORMATION PROCESSING
+<br/><b>A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER</b><br/>FOR THE DEGREE OF MASTER OF SCIENCE
+<br/>IN THE FACULTY OF ENGINEERING AND PHYSICAL SCIENCES
+<br/>2015
+<br/>By
+<br/>Crefeda Faviola Rodrigues
+<br/>School of Computer Science
+</td><td></td><td></td></tr><tr><td>c71217b2b111a51a31cf1107c71d250348d1ff68</td><td>One Network to Solve Them All — Solving Linear Inverse Problems
+<br/>using Deep Projection Models
+<br/><b>Carnegie Mellon University, Pittsburgh, PA</b></td><td>('2088535', 'Chun-Liang Li', 'chun-liang li')<br/>('1783087', 'B. V. K. Vijaya Kumar', 'b. v. k. vijaya kumar')<br/>('1745861', 'Aswin C. Sankaranarayanan', 'aswin c. sankaranarayanan')</td><td></td></tr><tr><td>c758b9c82b603904ba8806e6193c5fefa57e9613</td><td>Heterogeneous Face Recognition with CNNs
+<br/>INRIA Grenoble, Laboratoire Jean Kuntzmann
+</td><td>('2143851', 'Shreyas Saxena', 'shreyas saxena')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')</td><td>{firstname.lastname}@inria.fr
+</td></tr><tr><td>c7c03324833ba262eeaada0349afa1b5990c1ea7</td><td>A Wearable Face Recognition System on Google
+<br/>Glass for Assisting Social Interactions
+<br/><b>Institute for Infocomm Research, Singapore</b></td><td>('1709001', 'Bappaditya Mandal', 'bappaditya mandal')<br/>('35718875', 'Liyuan Li', 'liyuan li')<br/>('1694051', 'Cheston Tan', 'cheston tan')</td><td>Email address: bmandal@i2r.a-star.edu.sg (∗Contact author: Bappaditya Mandal);
+<br/>{scchia, lyli, vijay, cheston-tan, joohwee}@i2r.a-star.edu.sg
+</td></tr><tr><td>c76f64e87f88475069f7707616ad9df1719a6099</td><td>T-RECS: Training for Rate-Invariant
+<br/>Embeddings by Controlling Speed for Action
+<br/>Recognition
+<br/><b>University of Michigan</b></td><td>('31646172', 'Madan Ravi Ganesh', 'madan ravi ganesh')<br/>('24337238', 'Eric Hofesmann', 'eric hofesmann')<br/>('40893359', 'Byungsu Min', 'byungsu min')<br/>('40893002', 'Nadha Gafoor', 'nadha gafoor')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td></td></tr><tr><td>c7f0c0636d27a1d45b8fcef37e545b902195d937</td><td>Towards Around-Device Interaction using Corneal Imaging
+<br/><b>Coburg University</b><br/><b>Coburg University</b></td><td>('49770541', 'Daniel Schneider', 'daniel schneider')<br/>('2708269', 'Jens Grubert', 'jens grubert')</td><td>daniel.schneider@hs-coburg.de
+<br/>jg@jensgrubert.de
+</td></tr><tr><td>c7c8d150ece08b12e3abdb6224000c07a6ce7d47</td><td>DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+</td><td>('50202300', 'Shu Zhang', 'shu zhang')</td><td>{shu.zhang,rhe,tnt}@nlpr.ia.ac.cn
+</td></tr><tr><td>c78fdd080df01fff400a32fb4cc932621926021f</td><td>Robust Automatic Facial Expression Detection
+<br/>Method
+<br/><b>Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan</b><br/><b>Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan</b><br/>China
+<br/>China
+</td><td>('33024921', 'Yan Ouyang', 'yan ouyang')<br/>('1707161', 'Nong Sang', 'nong sang')</td><td>Email:oyy_01@163.com
+<br/>Email: nsang@hust.edu.cn
+</td></tr><tr><td>c74b1643a108939c6ba42ae4de55cb05b2191be5</td><td>NON-NEGATIVE MATRIX FACTORIZATION FOR FACE
+<br/>ILLUMINATION ANALYSIS
+<br/><b>CVSSP, University of Surrey</b><br/><b>CVSSP, University of Surrey</b><br/><b>CVSSP, University of Surrey</b><br/>Guildford, Surrey
+<br/>UK GU2 7XH
+<br/>Guildford, Surrey
+<br/>UK GU2 7XH
+<br/>Guildford, Surrey
+<br/>UK GU2 7XH
+</td><td>('38746097', 'Xuan Zou', 'xuan zou')<br/>('39685698', 'Wenwu Wang', 'wenwu wang')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>xuan.zou@surrey.ac.uk
+<br/>w.wang@surrey.ac.uk
+<br/>j.kittler@surrey.ac.uk
+</td></tr><tr><td>c75e6ce54caf17b2780b4b53f8d29086b391e839</td><td>ExpNet: Landmark-Free, Deep, 3D Facial Expressions
+<br/><b>Institute for Robotics and Intelligent Systems, USC, CA, USA</b><br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b></td><td>('1752756', 'Feng-Ju Chang', 'feng-ju chang')<br/>('46634688', 'Anh Tuan Tran', 'anh tuan tran')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('11269472', 'Iacopo Masi', 'iacopo masi')</td><td>{fengjuch,anhttran,iacopoma,nevatia,medioni}@usc.edu, hassner@openu.ac.il
+</td></tr><tr><td>c0723e0e154a33faa6ff959d084aebf07770ffaf</td><td>Interpolation Between Eigenspaces Using
+<br/>Rotation in Multiple Dimensions
+<br/><b>Graduate School of Information Science, Nagoya University, Japan</b><br/>2 No Japan Society for the Promotion of Science
+<br/><b>Japan</b></td><td>('1685524', 'Tomokazu Takahashi', 'tomokazu takahashi')<br/>('2833316', 'Lina', 'lina')<br/>('1679187', 'Ichiro Ide', 'ichiro ide')<br/>('1680642', 'Yoshito Mekada', 'yoshito mekada')<br/>('1725612', 'Hiroshi Murase', 'hiroshi murase')</td><td>ttakahashi@murase.m.is.nagoya-u.ac.jp
+</td></tr><tr><td>c03f48e211ac81c3867c0e787bea3192fcfe323e</td><td>INTERSPEECH 2016
+<br/>September 8–12, 2016, San Francisco, USA
+<br/>Mahalanobis Metric Scoring Learned from Weighted Pairwise Constraints in
+<br/>I-vector Speaker Recognition System
+<br/><b>School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China</b></td><td>('3308432', 'Zhenchun Lei', 'zhenchun lei')<br/>('2947033', 'Yanhong Wan', 'yanhong wan')<br/>('1853437', 'Jian Luo', 'jian luo')<br/>('2956877', 'Yingen Yang', 'yingen yang')</td><td>zhenchun.lei@hotmail.com, wyanhhappy@126.com,
+<br/>luo.jian@hotmail.com, ygyang@jxnu.edu.cn
+</td></tr><tr><td>c038beaa228aeec174e5bd52460f0de75e9cccbe</td><td>Temporal Segment Networks for Action
+<br/>Recognition in Videos
+</td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('48708388', 'Zhe Wang', 'zhe wang')<br/>('40612284', 'Yu Qiao', 'yu qiao')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>c043f8924717a3023a869777d4c9bee33e607fb5</td><td>Emotion Separation Is Completed Early and It Depends
+<br/>on Visual Field Presentation
+<br/><b>Lab for Human Brain Dynamics, RIKEN Brain Science Institute, Wakoshi, Saitama, Japan, 2 Lab for Human Brain Dynamics, AAI Scientific Cultural Services Ltd., Nicosia</b><br/>Cyprus
+</td><td>('2259342', 'Lichan Liu', 'lichan liu')<br/>('2348276', 'Andreas A. Ioannides', 'andreas a. ioannides')</td><td></td></tr><tr><td>c05a7c72e679745deab9c9d7d481f7b5b9b36bdd</td><td>NPS-CS-11-005
+<br/>
+<br/>
+<br/>NAVAL
+<br/>POSTGRADUATE
+<br/>SCHOOL
+<br/>MONTEREY, CALIFORNIA
+<br/>by
+<br/>BIOMETRIC CHALLENGES FOR FUTURE DEPLOYMENTS:
+<br/>A STUDY OF THE IMPACT OF GEOGRAPHY, CLIMATE, CULTURE,
+<br/> AND SOCIAL CONDITIONS ON THE EFFECTIVE
+<br/>COLLECTION OF BIOMETRICS
+<br/>April 2011
+<br/>Approved for public release; distribution is unlimited
+</td><td>('3337733', 'Paul C. Clark', 'paul c. clark')</td><td></td></tr><tr><td>c03e01717b2d93f04cce9b5fd2dcfd1143bcc180</td><td>Locality-constrained Active Appearance Model
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('1874505', 'Xiaowei Zhao', 'xiaowei zhao')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>mathzxw2002@gmail.com,{sgshan,chaixiujuan,xlchen}@ict.ac.cn
+</td></tr><tr><td>c0ff7dc0d575658bf402719c12b676a34271dfcd</td><td>A New Incremental Optimal Feature Extraction
+<br/>Method for On-line Applications
+<br/><b>K. N. Toosi University of</b><br/>Technology, Tehran, Iran
+<br/>21−Σ
+</td><td>('2784763', 'Youness Aliyari Ghassabeh', 'youness aliyari ghassabeh')<br/>('2060085', 'Hamid Abrishami Moghaddam', 'hamid abrishami moghaddam')</td><td>y_aliyari@ee.kntu.ac.ir, moghadam@saba.kntu.ac.ir
+</td></tr><tr><td>c02847a04a99a5a6e784ab580907278ee3c12653</td><td>Fine Grained Video Classification for
+<br/>Endangered Bird Species Protection
+<br/>Non-Thesis MS Final Report
+<br/>1. Introduction
+<br/>1.1 Background
+<br/>This project is about detecting eagles in videos. Eagles are endangered species at the brim of
+<br/>extinction since 1980s. With the bans of harmful pesticides, the number of eagles keep increasing.
+<br/>However, recent studies on golden eagles’ activities in the vicinity of wind turbines have shown
+<br/>significant number of turbine blade collisions with eagles as the major cause of eagles’ mortality. [1]
+<br/>This project is a part of a larger research project to build an eagle detection and deterrent system
+<br/>on wind turbine toward reducing eagles’ mortality. [2] The critical component of this study is a
+<br/>computer vision system for eagle detection in videos. The key requirement are that the system should
+<br/>work in real time and detect eagles at a far distance from the camera (i.e. in low resolution).
+<br/>There are three different bird species in my dataset - falcon, eagle and seagull. The reason for
+<br/>involving only these three species is based on the real world situation. Wind turbines are always
+<br/>installed near coast and mountain hill where falcons and seagulls will be the majority. So my model
+<br/>will classify the minority eagles out of other bird species during the immigration season and protecting
+<br/>them by using the deterrent system.
+<br/>1.2 Brief Approach
+<br/>Our approach represents a unified deep-learning architecture for eagle detection. Given videos,
+<br/>our goal is to detect eagle species at far distance from the camera, using both appearance and bird
+<br/>motion cues, so as to meet the recall-precision rates set by the user. Detecting eagle is a challenging
+<br/>task because of the following reasons. Frist, an eagle flies fast and high in the sky which means that
+<br/>we need a lens with wide angle such that captures their movement. However, a camera with wide
+<br/>angle produces a low resolution and low quality video and the detailed appearance of bird is
+<br/>compromised. Second, current neural network typically take as input low resolution images. This is
+<br/>because a higher resolution image will require larger filters and deeper networks which is turn hard to
+<br/>train [3]. So it is not clear whether the low resolution will cause challenge for fine-grained
+<br/>classification task. Last but not the least, there is not a large training database like PASCAL, MNIST
+</td><td>('2355840', 'Chenyu Wang', 'chenyu wang')</td><td></td></tr><tr><td>c0c8d720658374cc1ffd6116554a615e846c74b5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Modeling Multimodal Clues in a Hybrid Deep
+<br/>Learning Framework for Video Classification
+</td><td>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('8053308', 'Jinhui Tang', 'jinhui tang')<br/>('3233021', 'Zechao Li', 'zechao li')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td></td></tr><tr><td>c035c193eed5d72c7f187f0bc880a17d217dada0</td><td>Local Gradient Gabor Pattern (LGGP) with Applications in
+<br/>Face Recognition, Cross-spectral Matching and Soft
+<br/>Biometrics
+<br/><b>West Virginia University</b><br/><b>Michigan State University</b><br/>Morgantown, WV, USA
+<br/>East Lansing, MI, USA
+</td><td>('1751335', 'Cunjian Chen', 'cunjian chen')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td></td></tr><tr><td>c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5</td><td>Int'l Conf. IP, Comp. Vision, and Pattern Recognition | IPCV'16 |
+<br/>263
+<br/>MLP Neural Network Based Approach for
+<br/>Facial Expression Analysis
+<br/><b>Kent State University, Kent, Ohio, USA</b><br/>2 Department of Robotic Engineering, AU-TNB, Tehran, Iran
+<br/>the efficiency of
+</td><td></td><td></td></tr><tr><td>c00f402b9cfc3f8dd2c74d6b3552acbd1f358301</td><td>LEARNING DEEP REPRESENTATION FROM COARSE TO FINE FOR FACE ALIGNMENT
+<br/><b>Shanghai Jiao Tong University, China</b></td><td>('3403352', 'Zhiwen Shao', 'zhiwen shao')<br/>('7406856', 'Shouhong Ding', 'shouhong ding')<br/>('3450479', 'Yiru Zhao', 'yiru zhao')<br/>('3451401', 'Qinchuan Zhang', 'qinchuan zhang')<br/>('8452947', 'Lizhuang Ma', 'lizhuang ma')</td><td>{shaozhiwen, feiben, yiru.zhao, qinchuan.zhang}@sjtu.edu.cn, ma-lz@cs.sjtu.edu.cn
+</td></tr><tr><td>c089c7d8d1413b54f59fc410d88e215902e51638</td><td>TVParser: An Automatic TV Video Parsing Method
+<br/><b>National Lab of Pattern Recognition, Institute of Automation</b><br/>Chinese Academy of Sciences, Beijing, China, 100190
+<br/><b>China-Singapore Institute of Digital Media, Singapore</b></td><td>('1690954', 'Chao Liang', 'chao liang')<br/>('1688633', 'Changsheng Xu', 'changsheng xu')<br/>('1709439', 'Jian Cheng', 'jian cheng')<br/>('1694235', 'Hanqing Lu', 'hanqing lu')</td><td>fcliang,csxu,jcheng,luhqg@nlpr.ia.ac.cn
+</td></tr><tr><td>c0ee89dc2dad76147780f96294de9e421348c1f4</td><td>Efficiently detecting outlying behavior in
+<br/>video-game players
+<br/><b>Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea</b><br/><b>School of Games, Hongik University, Seoul, Korea</b><br/><b>Korea University</b><br/>Seoul, Korea
+<br/>4 AI Lab, NCSOFT, Seongnam, Korea
+</td><td>('7652095', 'Young Bin Kim', 'young bin kim')<br/>('40267433', 'Shin Jin Kang', 'shin jin kang')<br/>('4972813', 'Sang Hyeok Lee', 'sang hyeok lee')<br/>('5702793', 'Jang Young Jung', 'jang young jung')<br/>('3000093', 'Hyeong Ryeol Kam', 'hyeong ryeol kam')<br/>('2013790', 'Jung Lee', 'jung lee')<br/>('2467280', 'Young Sun Kim', 'young sun kim')<br/>('3103240', 'Joonsoo Lee', 'joonsoo lee')<br/>('22232963', 'Chang Hun Kim', 'chang hun kim')</td><td></td></tr><tr><td>c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774</td><td>A Two-Layer Representation For Large-Scale Action Recognition
+<br/><b>Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University</b><br/>2Shanghai Key Lab of Digital Media Processing and Transmission, 3Microsoft Research Asia
+<br/><b>University of California, San Diego</b></td><td>('1701941', 'Jun Zhu', 'jun zhu')<br/>('2450889', 'Baoyuan Wang', 'baoyuan wang')<br/>('1795291', 'Xiaokang Yang', 'xiaokang yang')<br/>('38790729', 'Wenjun Zhang', 'wenjun zhang')<br/>('1736745', 'Zhuowen Tu', 'zhuowen tu')</td><td>{zhujun.sjtu,zhuowen.tu}@gmail.com, baoyuanw@microsoft.com, {xkyang,zhangwenjun}@sjtu.edu.cn
+</td></tr><tr><td>c00df53bd46f78ae925c5768d46080159d4ef87d</td><td>Learning Bag-of-Features Pooling for Deep Convolutional Neural Networks
+<br/><b>Aristotle University of Thessaloniki</b><br/>Thessaloniki, Greece
+</td><td>('3200630', 'Nikolaos Passalis', 'nikolaos passalis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')</td><td>passalis@csd.auth.gr, tefas@aiia.csd.auth.gr
+</td></tr><tr><td>c0d5c3aab87d6e8dd3241db1d931470c15b9e39d</td><td></td><td></td><td></td></tr><tr><td>c05441dd1bc418fb912a6fafa84c0659a6850bf0</td><td>Received on 16th July 2014
+<br/>Revised on 11th September 2014
+<br/>Accepted on 23rd September 2014
+<br/>doi: 10.1049/iet-cvi.2014.0200
+<br/>www.ietdl.org
+<br/>ISSN 1751-9632
+<br/>Face recognition under varying illumination based on
+<br/>adaptive homomorphic eight local directional patterns
+<br/><b>Utah State University, Logan, UT 84322-4205, USA</b></td><td>('2147212', 'Mohammad Reza Faraji', 'mohammad reza faraji')<br/>('1725739', 'Xiaojun Qi', 'xiaojun qi')</td><td>E-mail: Mohammadreza.Faraji@aggiemail.usu.edu
+</td></tr><tr><td>eee8a37a12506ff5df72c402ccc3d59216321346</td><td>Uredniki:
+<br/>dr. Tomaž Erjavec
+<br/>Odsek za tehnologije znanja
+<br/>Institut »Jožef Stefan«, Ljubljana
+<br/>dr. Jerneja Žganec Gros
+<br/>Alpineon d.o.o, Ljubljana
+<br/>Založnik: Institut »Jožef Stefan«, Ljubljana
+<br/>Tisk: Birografika BORI d.o.o.
+<br/>Priprava zbornika: Mitja Lasič
+<br/>Oblikovanje naslovnice: dr. Damjan Demšar
+<br/>Tiskano iz predloga avtorjev
+<br/>Naklada: 50
+<br/>Ljubljana, oktober 2008
+<br/>Konferenco IS 2008 sofinancirata
+<br/>Ministrstvo za visoko šolstvo, znanost in tehnologijo
+<br/>Institut »Jožef Stefan«
+<br/>ISSN 1581-9973
+<br/>CIP - Kataložni zapis o publikaciji
+<br/>Narodna in univerzitetna knjižnica, Ljubljana
+<br/>004.934(082)
+<br/>81'25:004.6(082)
+<br/>004.8(063)
+<br/>oktober 2008, Ljubljana, Slovenia : zbornik 11. mednarodne
+<br/>Proceedings of the Sixth Language Technologies Conference, October
+<br/>16th-17th, 2008 : proceedings of the 11th International
+<br/>Multiconference Information Society - IS 2008, volume C / uredila,
+<br/>edited by Tomaž Erjavec, Jerneja Žganec Gros. - Ljubljana :
+<br/>1581-9973)
+<br/>ISBN 978-961-264-006-4
+<br/>družba 4. Information society 5. Erjavec, Tomaž, 1960- 6.
+<br/>Ljubljana)
+<br/>241520896
+</td><td></td><td></td></tr><tr><td>ee6b503ab512a293e3088fdd7a1c893a77902acb</td><td>Automatic Name-Face Alignment to Enable Cross-Media News Retrieval
+<br/>*School of Computer Science, Shanghai Key Laboratory of Intelligent Information Processing,
+<br/><b>The University of North Carolina at Charlotte, USA</b><br/><b>Fudan University, Shanghai, China</b></td><td>('7550713', 'Yuejie Zhang', 'yuejie zhang')<br/>('1721131', 'Wei Wu', 'wei wu')<br/>('1678662', 'Yang Li', 'yang li')<br/>('1751513', 'Cheng Jin', 'cheng jin')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('2344620', 'Jianping Fan', 'jianping fan')</td><td>*{yjzhang, 10210240122, 11210240052, jc, xyxue}@fudan.edu.cn, +jfan@uncc.edu
+</td></tr><tr><td>ee18e29a2b998eddb7f6663bb07891bfc7262248</td><td>1119
+<br/>Local Linear Discriminant Analysis Framework
+<br/>Using Sample Neighbors
+</td><td>('38162192', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>eeb6d084f9906c53ec8da8c34583105ab5ab8284</td><td>12
+<br/>Generation of Facial Expression Map using
+<br/>Supervised and Unsupervised Learning
+<br/><b>Akita Prefectural University</b><br/><b>Akita University</b><br/>Japan
+<br/>1. Introduction
+<br/>Recently, studies of human face recognition have been conducted vigorously (Fasel &
+<br/>Luettin, 2003; Yang et al., 2002; Pantic & Rothkrantz, 2000a; Zhao et al., 2000; Hasegawa et
+<br/>al., 1997; Akamatsu, 1997). Such studies are aimed at the implementation of an intelligent
+<br/>man-machine interface. Especially, studies of facial expression recognition for human-
+<br/>machine emotional communication are attracting attention (Fasel & Luettin, 2003; Pantic &
+<br/>Rothkrantz, 2000a; Tian et al., 2001; Pantic & Rothkrantz, 2000b; Lyons et al., 1999; Lyons et
+<br/>al., 1998; Zhang et al., 1998).
+<br/>The shape (static diversity) and motion (dynamic diversity) of facial components such as the
+<br/>eyebrows, eyes, nose, and mouth manifest expressions. Considering facial expressions from
+<br/>the perspective of static diversity because facial configurations differ among people, it is
+<br/>presumed that a facial expression pattern appearing on a face when facial expression is
+<br/>manifested includes person-specific features. In addition, from the viewpoint of dynamic
+<br/>diversity, because the dynamic change of facial expression originates in a person-specific
+<br/>facial expression pattern, it is presumed that the displacement vector of facial components
+<br/>has person-specific features. The properties of the human face described above reveal the
+<br/>following tasks.
+<br/>The first task is to generalize a facial expression recognition model. Numerous conventional
+<br/>approaches have attempted generalization of a facial expression recognition model. They
+<br/>use the distance of motion of feature points set on a face and the motion vectors of facial
+<br/>muscle movements in its arbitrary regions as feature values. Typically, such methods assign
+<br/>that information to so-called Action Units (AUs) of a Facial Action Coding System (FACS)
+<br/>(Ekman & Friesen, 1978). In fact, AUs are described qualitatively. Therefore, no objective
+<br/>criteria pertain to the setting positions of feature points and regions. They all depend on a
+<br/>particular researcher’s experience. However, features representing facial expressions are
+<br/>presumed to differ among subjects. Accordingly, a huge effort is necessary to link
+<br/>quantitative features with qualitative AUs for each subject and to derive universal features
+<br/>therefrom. It is also suspected that a generalized facial expression recognition model that is
+<br/>applicable to all subjects would disregard person-specific features of facial expressions that are
+<br/>borne originally by each subject. For all the reasons described above, it is an important task to
+<br/>establish a method to extract person-specific features using a common approach to every
+<br/>subject, and to build a facial expression recognition model that incorporates these features.
+<br/>Source: Machine Learning, Book edited by: Abdelhamid Mellouk and Abdennacer Chebira,
+<br/> ISBN 978-3-902613-56-1, pp. 450, February 2009, I-Tech, Vienna, Austria
+<br/>www.intechopen.com
+</td><td>('1932760', 'Masaki Ishii', 'masaki ishii')<br/>('2052920', 'Kazuhito Sato', 'kazuhito sato')<br/>('1738333', 'Hirokazu Madokoro', 'hirokazu madokoro')<br/>('21063785', 'Makoto Nishida', 'makoto nishida')</td><td></td></tr><tr><td>ee815f60dc4a090fa9fcfba0135f4707af21420d</td><td>EAC-Net: A Region-based Deep Enhancing and Cropping Approach for
+<br/>Facial Action Unit Detection
+<br/><b>Grove School of Engineering, CUNY City College, NY, USA</b><br/>2 Department of Computer Science, CUNY Graduate Center, NY, USA
+<br/><b>Engineering and Applied Science, SUNY Binghamton University, NY, USA</b></td><td>('48625314', 'Wei Li', 'wei li')</td><td></td></tr><tr><td>eed7920682789a9afd0de4efd726cd9a706940c8</td><td>Computers to Help with Conversations:
+<br/>Affective Framework to Enhance Human Nonverbal Skills
+<br/>by
+<br/>Mohammed Ehsan Hoque
+<br/><b>B.S., Pennsylvania State University</b><br/><b>M.S., University of Memphis</b><br/>Submitted to the Program in Media Arts and Sciences,
+<br/>School of Architecture and Planning,
+<br/>In partial fulfilment of the requirements for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY</b><br/>September 2013
+<br/><b>Massachusetts Institute of Technology 2013. All rights reserved</b><br/>Author
+<br/>Certified by
+<br/>Accepted by
+<br/> Program in Media Arts and Sciences
+<br/>August 15, 2013
+<br/> Rosalind W. Picard
+<br/> Professor of Media Arts and Sciences
+<br/> Program in Media Arts and Sciences, MIT
+<br/> Thesis supervisor
+<br/>Pattie Maes
+<br/>Associate Academic Head
+<br/>Program in Media Arts and Sciences, MIT
+</td><td></td><td></td></tr><tr><td>ee7093e91466b81d13f4d6933bcee48e4ee63a16</td><td>Discovering Person Identity via
+<br/>Large-Scale Observations
+<br/><b>Interactive and Digital Media Institute, National University of Singapore, SG</b><br/><b>School of Computing, National University of Singapore, SG</b></td><td>('3026404', 'Yongkang Wong', 'yongkang wong')<br/>('1986874', 'Lekha Chaisorn', 'lekha chaisorn')<br/>('1744045', 'Mohan S. Kankanhalli', 'mohan s. kankanhalli')</td><td></td></tr><tr><td>ee461d060da58d6053d2f4988b54eff8655ecede</td><td></td><td></td><td></td></tr><tr><td>eefb8768f60c17d76fe156b55b8a00555eb40f4d</td><td>Subspace Scores for Feature Selection in Computer Vision
+</td><td>('2032038', 'Cameron Musco', 'cameron musco')<br/>('2767340', 'Christopher Musco', 'christopher musco')</td><td>cnmusco@mit.edu
+<br/>cpmusco@mit.edu
+</td></tr><tr><td>ee463f1f72a7e007bae274d2d42cd2e5d817e751</td><td>Automatically Extracting Qualia Relations for the Rich Event Ontology
+<br/><b>University of Colorado Boulder, 2U.S. Army Research Lab</b></td><td>('51203051', 'Ghazaleh Kazeminejad', 'ghazaleh kazeminejad')<br/>('3202888', 'Claire Bonial', 'claire bonial')<br/>('1783500', 'Susan Windisch Brown', 'susan windisch brown')<br/>('1728285', 'Martha Palmer', 'martha palmer')</td><td>{ghazaleh.kazeminejad, susan.brown, martha.palmer}@colorado.edu
+<br/>claire.n.bonial.civ@mail.mil
+</td></tr><tr><td>eed1dd2a5959647896e73d129272cb7c3a2e145c</td><td></td><td></td><td></td></tr><tr><td>ee92d36d72075048a7c8b2af5cc1720c7bace6dd</td><td>FACE RECOGNITION USING MIXTURES OF PRINCIPAL COMPONENTS
+<br/>Video and Display Processing
+<br/>Philips Research USA
+<br/>Briarcliff Manor, NY 10510
+</td><td>('1727257', 'Deepak S. Turaga', 'deepak s. turaga')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>deepak.turaga@philips.com
+</td></tr><tr><td>ee418372b0038bd3b8ae82bd1518d5c01a33a7ec</td><td>CSE 255 Winter 2015 Assignment 1: Eye Detection using Histogram
+<br/>of Oriented Gradients and Adaboost Classifier
+<br/>Electrical and Computer Engineering Department
+<br/><b>University of California, San Diego</b></td><td>('2812409', 'Kevan Yuen', 'kevan yuen')</td><td>kcyuen@eng.ucsd.edu
+</td></tr><tr><td>eee06d68497be8bf3a8aba4fde42a13aa090b301</td><td>CR-GAN: Learning Complete Representations for Multi-view Generation
+<br/><b>Rutgers University</b><br/><b>University of North Carolina at Charlotte</b></td><td>('6812347', 'Yu Tian', 'yu tian')<br/>('4340744', 'Xi Peng', 'xi peng')<br/>('33860220', 'Long Zhao', 'long zhao')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>{yt219, px13, lz311, dnm}@cs.rutgers.edu, szhang16@uncc.edu
+</td></tr><tr><td>eee2d2ac461f46734c8e674ae14ed87bbc8d45c6</td><td>Generalized Rank Pooling for Activity Recognition
+<br/>1Australian Centre for Robotic Vision, 2Data61/CSIRO
+<br/><b>The Australian National University, Canberra, Australia</b></td><td>('2691929', 'Anoop Cherian', 'anoop cherian')<br/>('1688071', 'Basura Fernando', 'basura fernando')<br/>('23911916', 'Mehrtash Harandi', 'mehrtash harandi')<br/>('49384847', 'Stephen Gould', 'stephen gould')</td><td>firstname.lastname@{anu.edu.au, data61.csiro.au}
+</td></tr><tr><td>eed93d2e16b55142b3260d268c9e72099c53d5bc</td><td>ICFVR 2017: 3rd International Competition on Finger Vein Recognition
+<br/><b>Chittagong University of Engineering and Technology</b><br/>∗ These authors contributed equally to this work
+<br/><b>Peking University</b><br/>2Shenzhen Maidi Technology Co., LTD.
+<br/>3TigerIT
+</td><td>('46867002', 'Yi Zhang', 'yi zhang')<br/>('2560109', 'Houjun Huang', 'houjun huang')<br/>('38728899', 'Haifeng Zhang', 'haifeng zhang')<br/>('3142600', 'Liao Ni', 'liao ni')<br/>('47210488', 'Wei Xu', 'wei xu')<br/>('1694788', 'Nasir Uddin Ahmed', 'nasir uddin ahmed')<br/>('9336364', 'Md. Shakil Ahmed', 'md. shakil ahmed')<br/>('9372198', 'Yilun Jin', 'yilun jin')<br/>('23100665', 'Yingjie Chen', 'yingjie chen')<br/>('35273470', 'Jingxuan Wen', 'jingxuan wen')<br/>('39201759', 'Wenxin Li', 'wenxin li')</td><td></td></tr><tr><td>eedfb384a5e42511013b33104f4cd3149432bd9e</td><td>Multimodal Probabilistic Person
+<br/>Tracking and Identification
+<br/>in Smart Spaces
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktors der Ingenieurwissenschaften
+<br/>der Fakultät für Informatik
+<br/>der Universität Fridericiana zu Karlsruhe (TH)
+<br/>genehmigte
+<br/>Dissertation
+<br/>von
+<br/>aus Karlsruhe
+<br/>Tag der mündlichen Prüfung: 20.11.2009
+<br/>Erster Gutachter:
+<br/>Zweiter Gutachter:
+<br/>Prof. Dr. A. Waibel
+<br/>Prof. Dr. R. Stiefelhagen
+</td><td>('1701229', 'Keni Bernardin', 'keni bernardin')</td><td></td></tr><tr><td>c94b3a05f6f41d015d524169972ae8fd52871b67</td><td>The Fastest Deformable Part Model for Object Detection
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('39774417', 'Longyin Wen', 'longyin wen')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,zlei,lywen,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>c9424d64b12a4abe0af201e7b641409e182babab</td><td>Article
+<br/>Which, When, and How: Hierarchical Clustering with
+<br/>Human–Machine Cooperation
+<br/>Academic Editor: Tom Burr
+<br/>Received: 3 November 2016; Accepted: 14 December 2016; Published: 21 December 2016
+</td><td>('1751849', 'Huanyang Zheng', 'huanyang zheng')<br/>('1703691', 'Jie Wu', 'jie wu')</td><td>Computer and Information Sciences, Temple University, PA 19121, USA; jiewu@temple.edu
+<br/>* Correspondence: huanyang.zheng@temple.edu; Tel.: +1-215-204-8450
+</td></tr><tr><td>c91103e6612fa7e664ccbc3ed1b0b5deac865b02</td><td>Automatic facial expression recognition using
+<br/>statistical-like moments
+<br/><b>Integrated Research Center, Universit`a Campus Bio-Medico di Roma</b><br/>Via Alvaro del Portillo, 00128 Roma, Italy
+</td><td>('1679260', 'Giulio Iannello', 'giulio iannello')<br/>('1720099', 'Paolo Soda', 'paolo soda')</td><td>{r.dambrosio, g.iannello, p.soda}@unicampus.it
+</td></tr><tr><td>c903af0d69edacf8d1bff3bfd85b9470f6c4c243</td><td></td><td></td><td></td></tr><tr><td>c97a5f2241cc6cd99ef0c4527ea507a50841f60b</td><td>Person Search in Videos with One Portrait
+<br/>Through Visual and Temporal Links
+<br/><b>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</b><br/><b>Tsinghua University</b><br/>3 SenseTime Research
+</td><td>('39360892', 'Qingqiu Huang', 'qingqiu huang')<br/>('40584026', 'Wentao Liu', 'wentao liu')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td>{hq016,dhlin}@ie.cuhk.edu.hk
+<br/>liuwtwinter@gmail.com
+</td></tr><tr><td>c95cd36779fcbe45e3831ffcd3314e19c85defc5</td><td>FACE RECOGNITION USING MULTI-MODAL LOW-RANK DICTIONARY LEARNING
+<br/><b>University of Alberta, Edmonton, Canada</b></td><td>('1807674', 'Homa Foroughi', 'homa foroughi')<br/>('2627414', 'Moein Shakeri', 'moein shakeri')<br/>('1772846', 'Nilanjan Ray', 'nilanjan ray')<br/>('1734058', 'Hong Zhang', 'hong zhang')</td><td></td></tr><tr><td>c9e955cb9709f16faeb0c840f4dae92eb875450a</td><td>Proposal of Novel Histogram Features
+<br/>for Face Detection
+<br/><b>Harbin Institute of Technology, School of Computer Science and Technology</b><br/>P.O.Box 1071, Harbin, Heilongjiang 150001, China
+<br/><b>Heilongjiang University, College of Computer Science and Technology, China</b></td><td>('2607285', 'Haijing Wang', 'haijing wang')<br/>('40426020', 'Peihua Li', 'peihua li')<br/>('1821107', 'Tianwen Zhang', 'tianwen zhang')</td><td>ninhaijing@yahoo.com
+<br/>peihualj@hotmail.com
+</td></tr><tr><td>c92bb26238f6e30196b0c4a737d8847e61cfb7d4</td><td>BEYOND CONTEXT: EXPLORING SEMANTIC SIMILARITY FOR TINY FACE DETECTION
+<br/><b>School of Computer Science, Northwestern Polytechnical University, P.R.China</b><br/><b>Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia</b><br/><b>School of Data and Computer Science, Sun Yat-sen University, P.R.China</b></td><td>('24336288', 'Yue Xi', 'yue xi')<br/>('3104013', 'Jiangbin Zheng', 'jiangbin zheng')<br/>('1714410', 'Wenjing Jia', 'wenjing jia')<br/>('3031842', 'Hanhui Li', 'hanhui li')</td><td></td></tr><tr><td>c9bbd7828437e70cc3e6863b278aa56a7d545150</td><td>Unconstrained Fashion Landmark Detection via
+<br/>Hierarchical Recurrent Transformer Networks
+<br/><b>The Chinese University of Hong Kong</b><br/>2SenseTime Group Limited
+</td><td>('1979911', 'Sijie Yan', 'sijie yan')<br/>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1725421', 'Shi Qiu', 'shi qiu')</td><td>{ys016,lz013,pluo,xtang}@ie.cuhk.edu.hk,sqiu@sensetime.com,xgwang@ee.cuhk.edu.hk
+</td></tr><tr><td>c9f588d295437009994ddaabb64fd4e4c499b294</td><td>Predicting Professions through
+<br/>Probabilistic Model under Social Context
+<br/><b>Northeastern University</b><br/>Boston, MA, 02115
+</td><td>('2025056', 'Ming Shao', 'ming shao')<br/>('2897748', 'Liangyue Li', 'liangyue li')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>mingshao@ccs.neu.edu, {liangyue, yunfu}@ece.neu.edu
+</td></tr><tr><td>c92da368a6a886211dc759fe7b1b777a64d8b682</td><td>International Journal of Science and Advanced Technology (ISSN 2221-8386) Volume 1 No 2 April 2011
+<br/>http://www.ijsat.com
+<br/>Face Recognition System based on
+<br/>Face Pose Estimation and
+<br/>Frontal Face Pose Synthesis
+<br/>Department of Electrical Engineering
+<br/><b>National Chiao-Tung University</b><br/>Hsinchu, Taiwan, R.O.C
+<br/>Department of Electrical Engineering
+<br/><b>National Chiao-Tung University</b><br/>Hsinchu, Taiwan, R.O.C
+</td><td>('4525043', 'Kuo-Yu Chiu', 'kuo-yu chiu')<br/>('1707677', 'Sheng-Fuu Lin', 'sheng-fuu lin')</td><td>Alvin_cgr@hotmail.com
+</td></tr><tr><td>c98983592777952d1751103b4d397d3ace00852d</td><td>Face Synthesis from Facial Identity Features
+<br/>Google Research
+<br/>Google Research
+<br/><b>University of Massachusetts Amherst</b><br/>Google Research
+<br/>Google Research
+<br/>CSAIL, MIT and Google Research
+</td><td>('39578349', 'Forrester Cole', 'forrester cole')<br/>('8707513', 'Aaron Sarna', 'aaron sarna')<br/>('2636941', 'David Belanger', 'david belanger')<br/>('1707347', 'Dilip Krishnan', 'dilip krishnan')<br/>('2138834', 'Inbar Mosseri', 'inbar mosseri')<br/>('1768236', 'William T. Freeman', 'william t. freeman')</td><td>fcole@google.com
+<br/>sarna@google.com
+<br/>belanger@cs.umass.edu
+<br/>dilipkay@google.com
+<br/>inbarm@google.com
+<br/>wfreeman@google.com
+</td></tr><tr><td>c9367ed83156d4d682cefc59301b67f5460013e0</td><td>Geometry-Contrastive GAN for Facial Expression
+<br/>Transfer
+<br/><b>Institute of Software, Chinese Academy of Sciences</b></td><td>('35790820', 'Fengchun Qiao', 'fengchun qiao')<br/>('35996065', 'Zirui Jiao', 'zirui jiao')<br/>('3238696', 'Zhihao Li', 'zhihao li')<br/>('1804472', 'Hui Chen', 'hui chen')<br/>('7643981', 'Hongan Wang', 'hongan wang')</td><td></td></tr><tr><td>fc1e37fb16006b62848def92a51434fc74a2431a</td><td>DRAFT
+<br/>A Comprehensive Analysis of Deep Regression
+</td><td>('2793152', 'Pablo Mesejo', 'pablo mesejo')<br/>('1780201', 'Xavier Alameda-Pineda', 'xavier alameda-pineda')<br/>('1794229', 'Radu Horaud', 'radu horaud')</td><td></td></tr><tr><td>fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192</td><td>Initialization Strategies of Spatio-Temporal
+<br/>Convolutional Neural Networks
+<br/><b>University of Toronto</b></td><td>('2711409', 'Elman Mansimov', 'elman mansimov')<br/>('2897313', 'Nitish Srivastava', 'nitish srivastava')<br/>('1776908', 'Ruslan Salakhutdinov', 'ruslan salakhutdinov')</td><td></td></tr><tr><td>fc20149dfdff5fdf020647b57e8a09c06e11434b</td><td>Submitted 8/06; Revised 1/07; Published 5/07
+<br/>Local Discriminant Wavelet Packet Coordinates for Face Recognition
+<br/>Center for Computer Vision and Department of Mathematics
+<br/><b>Sun Yat-Sen (Zhongshan) University</b><br/>Guangzhou, 510275 China
+<br/>Department of Electric Engineering
+<br/><b>City University of Hong Kong</b><br/>83 Tat Chee Avenue
+<br/>Kowloon, Hong Kong, China
+<br/>Editor: Donald Geman
+</td><td>('5692650', 'Chao-Chun Liu', 'chao-chun liu')<br/>('1726138', 'Dao-Qing Dai', 'dao-qing dai')<br/>('1718530', 'Hong Yan', 'hong yan')</td><td>STSDDQ@MAIL.SYSU.EDU.CN
+<br/>H.YAN@CITYU.EDU.HK
+</td></tr><tr><td>fc516a492cf09aaf1d319c8ff112c77cfb55a0e5</td><td></td><td></td><td></td></tr><tr><td>fc0f5859a111fb17e6dcf6ba63dd7b751721ca61</td><td>Design of an Automatic
+<br/>Facial Expression Detector
+<br/>An essay presented for the degree
+<br/>of
+<br/>M.Math
+<br/>Applied Mathematics
+<br/><b>University of Waterloo</b><br/>2018/01/26
+</td><td>('2662893', 'Jian Liang', 'jian liang')</td><td></td></tr><tr><td>fcbec158e6a4ace3d4311b26195482b8388f0ee9</td><td>Face Recognition from Still Images and Videos
+<br/>Center for Automation Research (CfAR) and
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Maryland, College Park, MD</b><br/>I. INTRODUCTION
+<br/>In most situations, identifying humans using faces is an effortless task for humans. Is this true for computers?
+<br/>This very question defines the field of automatic face recognition [7], [31], [62], one of the most active research
+<br/>areas in computer vision, pattern recognition, and image understanding.
+<br/>Over the past decade, the problem of face recognition has attracted substantial attention from various disciplines
+<br/>and has witnessed a skyrocketing growth of the literature. Below, we mainly emphasize some key perspectives of
+<br/>the face recognition problem.
+<br/>A. Biometric perspective
+<br/>Face is a biometric. As a consequence, face recognition finds wide applications in authentication, security, and
+<br/>so on. One recent application is the US-VISIT system by the Department of Homeland Security (DHS), collecting
+<br/>foreign passengers’ fingerprints and face images.
+<br/>Biometric signatures of a person characterize the physiological or behavioral characteristics. Physiological bio-
+<br/>metrics are innate or naturally occuring, while behavioral biometrics arise from mannerisms or traits that are learned
+<br/>or acquired. Table I lists commonly used biometrics. Biometric technologies provide the foundation for an extensive
+<br/>array of highly secure identification and personal verification solutions. Compared to conventional identification and
+<br/>verification methods based on personal identification numbers (PINs) or passwords, biometric technologies offer
+<br/>many advantages. First, biometrics are individualized traits while passwords may be used or stolen by someone
+<br/>other than the authorized user. Also, biometric is very convenient since there is nothing to carry or remember. In
+<br/>addition, biometric technologies are becoming more accurate and less expensive.
+<br/>Among all biometrics listed in Table I, the face is a very unique one because it is the only biometric belonging
+<br/>to both physiological and behavioral categories. While the physiological part of the face has been widely exploited
+<br/>Partially supported by NSF ITR Grant 03-25119. Zhou is now with Integrated Data Systems Department, Siemens Corporate Research,
+<br/>November 5, 2004
+<br/>DRAFT
+</td><td>('1682187', 'Shaohua Kevin Zhou', 'shaohua kevin zhou')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>Email: {shaohua, rama}@cfar.umd.edu
+<br/>Princeton, NJ 08540. His current email address is kzhou@scr.siemens.com.
+</td></tr><tr><td>fcd3d69b418d56ae6800a421c8b89ef363418665</td><td>Effects of Aging over Facial Feature Analysis and Face
+<br/>Recognition
+<br/>Bogaziçi Un. Electronics Eng. Dept. March 2010
+</td><td>('3398552', 'Bilgin Esme', 'bilgin esme')</td><td></td></tr><tr><td>fcd77f3ca6b40aad6edbd1dab9681d201f85f365</td><td>c(cid:13)Copyright 2014
+</td><td>('3299424', 'Miro Enev', 'miro enev')</td><td></td></tr><tr><td>fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Asymmetric Discrete Graph Hashing
+<br/><b>University of Florida, Gainesville, FL, 32611, USA</b></td><td>('2766473', 'Xiaoshuang Shi', 'xiaoshuang shi')<br/>('2082604', 'Fuyong Xing', 'fuyong xing')<br/>('46321210', 'Kaidi Xu', 'kaidi xu')<br/>('2599018', 'Manish Sapkota', 'manish sapkota')<br/>('49576071', 'Lin Yang', 'lin yang')</td><td>xsshi2015@ufl.edu
+</td></tr><tr><td>fc798314994bf94d1cde8d615ba4d5e61b6268b6</td><td>Face Recognition: face in video, age invariance,
+<br/>and facial marks
+<br/>By
+<br/>A DISSERTATION
+<br/>Submitted to
+<br/><b>Michigan State University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>Computer Science
+<br/>2009
+</td><td>('2222919', 'Unsang Park', 'unsang park')</td><td></td></tr><tr><td>fc23a386c2189f221b25dbd0bb34fcd26ccf60fa</td><td>A Discriminative Latent Model of Object
+<br/>Classes and Attributes
+<br/><b>School of Computing Science, Simon Fraser University, Canada</b></td><td>('40457160', 'Yang Wang', 'yang wang')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>{ywang12,mori}@cs.sfu.ca
+</td></tr><tr><td>fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f</td><td>This is a preprint of the paper presented at the 11th International Conference Beyond Databases, Architectures and
+<br/>Structures (BDAS 2015), May 26-29 2015 in Ustroń, Poland and published in the Communications in Computer and
+<br/>Information Science Volume 521, 2015, pp 585-597. DOI: 10.1007/978-3-319-18422-7_52
+<br/>Evaluation Criteria for Affect-Annotated Databases
+<br/><b>Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland</b></td><td>('2414357', 'Agnieszka Landowska', 'agnieszka landowska')<br/>('3271448', 'Mariusz Szwoch', 'mariusz szwoch')<br/>('3175073', 'Wioleta Szwoch', 'wioleta szwoch')</td><td>szwoch@eti.pg.gda.pl
+</td></tr><tr><td>fc2bad3544c7c8dc7cd182f54888baf99ed75e53</td><td>Efficient Retrieval for Large Scale Metric
+<br/>Learning
+<br/><b>Institute for Computer Graphics and Vision</b><br/><b>Graz University of Technology, Austria</b></td><td>('1791182', 'Peter M. Roth', 'peter m. roth')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{koestinger,pmroth,bischof}@icg.tugraz.at
+</td></tr><tr><td>fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46</td><td>MANUSCRIPT SUBMITTED TO IEEE TRANS. PATTERN ANAL. MACH. INTELL., JULY 2010
+<br/>Feature Selection via Sparse Approximation for
+<br/>Face Recognition
+</td><td>('1944073', 'Yixiong Liang', 'yixiong liang')<br/>('31685288', 'Lei Wang', 'lei wang')<br/>('2090968', 'Yao Xiang', 'yao xiang')<br/>('6609276', 'Beiji Zou', 'beiji zou')</td><td></td></tr><tr><td>fcbf808bdf140442cddf0710defb2766c2d25c30</td><td>IJCV manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Unsupervised Semantic Action Discovery from Video
+<br/>Collections
+<br/>Received: date / Accepted: date
+</td><td>('3114252', 'Ozan Sener', 'ozan sener')<br/>('1681995', 'Ashutosh Saxena', 'ashutosh saxena')</td><td></td></tr><tr><td>fdff2da5bdca66e0ab5874ef58ac2205fb088ed7</td><td>Continuous Supervised Descent Method for
+<br/>Facial Landmark Localisation
+<br/>1Universitat Oberta de Catalunya, 156 Rambla del Poblenou, Barcelona, Spain
+<br/>2Universitat de Barcelona, 585 Gran Via de les Corts Catalanes, Barcelona, Spain
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA</b><br/>4Computer Vision Center, O Building, UAB Campus, Bellaterra, Spain
+<br/><b>University of Pittsburgh, Pittsburgh, PA, USA</b></td><td>('3305641', 'Marc Oliu', 'marc oliu')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')</td><td></td></tr><tr><td>fdfd57d4721174eba288e501c0c120ad076cdca8</td><td>An Analysis of Action Recognition Datasets for
+<br/>Language and Vision Tasks
+<br/><b>Institute for Language, Cognition and Computation</b><br/><b>School of Informatics, University of Edinburgh</b><br/>10 Crichton Street, Edinburgh EH8 9AB
+</td><td>('2921001', 'Spandana Gella', 'spandana gella')<br/>('48716849', 'Frank Keller', 'frank keller')</td><td>S.Gella@sms.ed.ac.uk, keller@inf.ed.ac.uk
+</td></tr><tr><td>fd4ac1da699885f71970588f84316589b7d8317b</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+<br/>Supervised Descent Method
+<br/>for Solving Nonlinear Least Squares
+<br/>Problems in Computer Vision
+</td><td>('3182065', 'Xuehan Xiong', 'xuehan xiong')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>fd33df02f970055d74fbe69b05d1a7a1b9b2219b</td><td>Single Shot Temporal Action Detection
+<br/><b>Shanghai Jiao Tong University, China. 2Columbia University, USA</b><br/><b>Cooperative Medianet Innovation Center (CMIC), Shanghai Jiao Tong University, China</b></td><td>('6873935', 'Tianwei Lin', 'tianwei lin')<br/>('1758267', 'Xu Zhao', 'xu zhao')<br/>('2195345', 'Zheng Shou', 'zheng shou')</td><td>{wzmsltw,zhaoxu}@sjtu.edu.cn,zs2262@columbia.edu
+</td></tr><tr><td>fdf533eeb1306ba418b09210387833bdf27bb756</td><td>951
+</td><td></td><td></td></tr><tr><td>fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3</td><td>Im2Flow: Motion Hallucination from Static Images for Action Recognition
+<br/>UT Austin
+<br/>UT Austin
+<br/>UT Austin
+</td><td>('3387849', 'Ruohan Gao', 'ruohan gao')<br/>('50398746', 'Bo Xiong', 'bo xiong')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>rhgao@cs.utexas.edu
+<br/>bxiong@cs.utexas.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>fdfaf46910012c7cdf72bba12e802a318b5bef5a</td><td>Computerized Face Recognition in Renaissance
+<br/>Portrait Art
+</td><td>('18640672', 'Ramya Srinivasan', 'ramya srinivasan')<br/>('3007257', 'Conrad Rudolph', 'conrad rudolph')<br/>('1688416', 'Amit Roy-Chowdhury', 'amit roy-chowdhury')</td><td></td></tr><tr><td>fd15e397629e0241642329fc8ee0b8cd6c6ac807</td><td>Semi-Supervised Clustering with Neural Networks
+<br/>IIIT-Delhi, India
+</td><td>('2200208', 'Ankita Shukla', 'ankita shukla')<br/>('39866663', 'Gullal Singh Cheema', 'gullal singh cheema')<br/>('34817359', 'Saket Anand', 'saket anand')</td><td>{ankitas, gullal1408, anands}@iiitd.ac.in
+</td></tr><tr><td>fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f</td><td>Multi-Expert Gender Classification on Age Group by Integrating Deep Neural
+<br/>Networks
+<br/><b>Yonsei University</b><br/>50 Yonsei-ro, Seodaemun-gu, Seoul 03722, Republic of Korea.
+</td><td>('51430701', 'Jun Beom Kho', 'jun beom kho')</td><td>kojb87@hanmail.net
+</td></tr><tr><td>fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f</td><td><b>University of Twente</b><br/>Department of Services, Cybersecurity and Safety
+<br/>Master Thesis
+<br/>Deep Verification Learning
+<br/>Author:
+<br/>F.H.J. Hillerstr¨om
+<br/>Committee:
+<br/>Prof. Dr. Ir. R.N.J. Veldhuis
+<br/>Dr. Ir. L.J. Spreeuwers
+<br/>Dr. Ir. D. Hiemstra
+<br/>December 5, 2016
+</td><td></td><td></td></tr><tr><td>fdca08416bdadda91ae977db7d503e8610dd744f</td><td>
+<br/>ICT-2009.7.1
+<br/>KSERA Project
+<br/>2010-248085
+<br/>Deliverable D3.1
+<br/>Deliverable D3.1
+<br/>Human Robot Interaction
+<br/>Human Robot Interaction
+<br/>18 October 2010
+<br/>Public Document
+<br/>The KSERA project (http://www.ksera
+<br/>KSERA project (http://www.ksera-project.eu) has received funding from the European Commission
+<br/>project.eu) has received funding from the European Commission
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>agreement n°2010-248085.
+</td><td></td><td></td></tr><tr><td>fd53be2e0a9f33080a9db4b5a5e416e24ae8e198</td><td>Apparent Age Estimation Using Ensemble of Deep Learning Models
+<br/>Refik Can Mallı∗
+<br/>Mehmet Ayg¨un∗
+<br/>Hazım Kemal Ekenel
+<br/><b>Istanbul Technical University</b><br/>Istanbul, Turkey
+</td><td></td><td>{mallir,aygunme,ekenel}@itu.edu.tr
+</td></tr><tr><td>fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81</td><td>Action Detection from a Robot-Car Perspective
+<br/>Universit´a degli Studi Federico II
+<br/>Naples, Italy
+<br/><b>Oxford Brookes University</b><br/>Oxford, UK
+</td><td>('39078800', 'Valentina Fontana', 'valentina fontana')<br/>('51149466', 'Manuele Di Maio', 'manuele di maio')<br/>('51152717', 'Stephen Akrigg', 'stephen akrigg')<br/>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('49348905', 'Suman Saha', 'suman saha')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td>vale.fontana@studenti.unina.it, man.dimaio@gmail.com
+<br/>15057204@brookes.ac.uk, gurkirt.singh-2015@brookes.ac.uk,
+<br/>suman.saha-2014@brookes.ac.uk, fabio.cuzzolin@brookes.ac.uk
+</td></tr><tr><td>fd96432675911a702b8a4ce857b7c8619498bf9f</td><td>Improved Face Detection and Alignment using Cascade
+<br/>Deep Convolutional Network
+<br/>†Beijing Key Laboratory of Intelligent Information Technology, School of
+<br/><b>Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China</b><br/><b>China Mobile Research Institute, Xuanwu Men West Street, Beijing</b></td><td>('22244104', 'Weilin Cong', 'weilin cong')<br/>('2901725', 'Sanyuan Zhao', 'sanyuan zhao')<br/>('1698061', 'Hui Tian', 'hui tian')<br/>('34926055', 'Jianbing Shen', 'jianbing shen')</td><td></td></tr><tr><td>fd10b0c771a2620c0db294cfb82b80d65f73900d</td><td>Identifying The Most Informative Features Using A Structurally Interacting Elastic Net
+<br/><b>Central University of Finance and Economics, Beijing, China</b><br/><b>Xiamen University, Xiamen, Fujian, China</b><br/><b>University of York, York, UK</b></td><td>('2290930', 'Lixin Cui', 'lixin cui')<br/>('1749518', 'Lu Bai', 'lu bai')<br/>('47295137', 'Zhihong Zhang', 'zhihong zhang')<br/>('49416727', 'Yue Wang', 'yue wang')<br/>('1679753', 'Edwin R. Hancock', 'edwin r. hancock')</td><td></td></tr><tr><td>fd7b6c77b46420c27725757553fcd1fb24ea29a8</td><td>MEXSVMs: Mid-level Features for Scalable Action Recognition
+<br/><b>Dartmouth College</b><br/>6211 Sudikoff Lab, Hanover, NH 03755
+<br/>Dartmouth Computer Science Technical Report TR2013-726
+</td><td>('1687325', 'Du Tran', 'du tran')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>{dutran,lorenzo}@cs.dartmouth.edu
+</td></tr><tr><td>fdb33141005ca1b208a725796732ab10a9c37d75</td><td>Int.J.Appl. Math. Comput.Sci.,2016,Vol. 26,No. 2,451–465
+<br/>DOI: 10.1515/amcs-2016-0032
+<br/>A CONNECTIONIST COMPUTATIONAL METHOD FOR FACE RECOGNITION
+<br/>, JOS ´E A. GIRONA-SELVA a
+<br/>aDepartment of Computer Technology
+<br/><b>University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain</b><br/>In this work, a modified version of the elastic bunch graph matching (EBGM) algorithm for face recognition is introduced.
+<br/>First, faces are detected by using a fuzzy skin detector based on the RGB color space. Then, the fiducial points for the facial
+<br/>graph are extracted automatically by adjusting a grid of points to the result of an edge detector. After that, the position of
+<br/>the nodes, their relation with their neighbors and their Gabor jets are calculated in order to obtain the feature vector defining
+<br/>each face. A self-organizing map (SOM) framework is shown afterwards. Thus, the calculation of the winning neuron and
+<br/>the recognition process are performed by using a similarity function that takes into account both the geometric and texture
+<br/>information of the facial graph. The set of experiments carried out for our SOM-EBGM method shows the accuracy of our
+<br/>proposal when compared with other state-of the-art methods.
+<br/>Keywords: pattern recognition, face recognition, neural networks, self-organizing maps.
+<br/>1.
+<br/>Introduction
+<br/>libraries,
+<br/>In recent years, there has been intensive research carried
+<br/>to develop complex security systems involving
+<br/>out
+<br/>biometric features.
+<br/>Automated biometric systems
+<br/>are being widely used in many applications such
+<br/>as surveillance, digital
+<br/>law
+<br/>enforcement, human computer intelligent interaction, and
+<br/>banking, among others. For applications requiring high
+<br/>levels of security, biometrics can be integrated with other
+<br/>authentication means such as smart cards and passwords.
+<br/>In relation to this, face recognition is an emerging research
+<br/>area and, in the next few years, it is supposed to be
+<br/>extensively used for automatic human recognition systems
+<br/>in many of the applications mentioned before.
+<br/>forensic work,
+<br/>One of the most popular methods for face recognition
+<br/>is elastic graph bunch matching (EBGM), proposed by
+<br/>Wiskott et al. (1997). This method is an evolution of the
+<br/>so-called dynamic link architecture (DLA) (Kotropoulos
+<br/>and Pitas, 1997). The main idea in elastic graph matching
+<br/>is to represent a face starting from a set of reference or
+<br/>fiducial points known as landmarks. These fiducial points
+<br/>have a spatial coherence, as they are connected using a
+<br/>graph structure. Therefore, EBGM represents faces as
+<br/>facial graphs with nodes at those facial landmarks (such
+<br/>Corresponding author
+<br/>as eyes, the tip of the nose, etc.). Considering these nodes,
+<br/>geometric information can be extracted, and both distance
+<br/>and angle metrics can be defined accordingly.
+<br/>This algorithm takes into account that facial images
+<br/>have many nonlinear features (variations in lighting,
+<br/>pose and expression) that are not generally considered
+<br/>in linear analysis methods, such as linear discriminant
+<br/>analysis (LDA) or principal component analysis (PCA)
+<br/>(Shin and Park, 2011). Moreover, it is particularly robust
+<br/>when out-of-plane rotations appear. However, the main
+<br/>drawback of this method is that it requires an accurate
+<br/>location of the fiducial points.
+<br/>Artificial neural networks (ANNs) are one of the
+<br/>most often used paradigms to address problems in
+<br/>artificial intelligence (Ba´nka et al., 2014; Kayarvizhy
+<br/>et al., 2014; Tran et al., 2014; Kumar and Kumar,
+<br/>2015). Among the different approaches of ANNs, the self
+<br/>organizing map (SOM) has special features for association
+<br/>and pattern classification (Kohonen, 2001), and it is one of
+<br/>the most popular neural network models. This technique
+<br/>is suitable in situations where there is an inaccuracy or a
+<br/>lack of formalization of the problem to be solved. In these
+<br/>cases, there is no precise mathematical formulation of
+<br/>the relationship between the input patterns (Azor´ın-L´opez
+<br/>et al., 2014).
+<br/>The SOM makes use of an unsupervised learning
+</td><td>('2274078', 'Francisco A. Pujol', 'francisco a. pujol')</td><td>e-mail: {fpujol,hmora}@dtic.ua.es,jags20@alu.ua.es
+</td></tr><tr><td>fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e</td><td>ORIGINAL RESEARCH
+<br/>published: 17 August 2018
+<br/>doi: 10.3389/fnhum.2018.00327
+<br/>Recognizing Frustration of Drivers
+<br/>From Face Video Recordings and
+<br/>Brain Activation Measurements With
+<br/>Functional Near-Infrared
+<br/>Spectroscopy
+<br/><b>Institute of Transportation Systems, German Aerospace Center (DLR), Braunschweig</b><br/><b>Germany, University of Oldenburg, Oldenburg, Germany</b><br/>Experiencing frustration while driving can harm cognitive processing, result in aggressive
+<br/>behavior and hence negatively influence driving performance and traffic safety. Being
+<br/>able to automatically detect frustration would allow adaptive driver assistance and
+<br/>automation systems to adequately react to a driver’s frustration and mitigate potential
+<br/>negative consequences. To identify reliable and valid indicators of driver’s frustration,
+<br/>we conducted two driving simulator experiments. In the first experiment, we aimed to
+<br/>reveal facial expressions that indicate frustration in continuous video recordings of the
+<br/>driver’s face taken while driving highly realistic simulator scenarios in which frustrated
+<br/>or non-frustrated emotional states were experienced. An automated analysis of facial
+<br/>expressions combined with multivariate logistic regression classification revealed that
+<br/>frustrated time intervals can be discriminated from non-frustrated ones with accuracy
+<br/>of 62.0% (mean over 30 participants). A further analysis of the facial expressions
+<br/>revealed that frustrated drivers tend to activate muscles in the mouth region (chin
+<br/>raiser, lip pucker, lip pressor). In the second experiment, we measured cortical activation
+<br/>with almost whole-head functional near-infrared spectroscopy (fNIRS) while participants
+<br/>experienced frustrating and non-frustrating driving simulator scenarios. Multivariate
+<br/>logistic regression applied to the fNIRS measurements allowed us to discriminate
+<br/>between frustrated and non-frustrated driving intervals with higher accuracy of 78.1%
+<br/>(mean over 12 participants). Frustrated driving intervals were indicated by increased
+<br/>activation in the inferior frontal, putative premotor and occipito-temporal cortices.
+<br/>Our results show that facial and cortical markers of
+<br/>frustration can be informative
+<br/>for time resolved driver state identification in complex realistic driving situations. The
+<br/>markers derived here can potentially be used as an input for future adaptive driver
+<br/>assistance and automation systems that detect driver frustration and adaptively react
+<br/>to mitigate it.
+<br/>Keywords: frustration, driver state recognition, facial expressions, functional near-infrared spectroscopy, adaptive
+<br/>automation
+<br/>Edited by:
+<br/>Guido P. H. Band,
+<br/><b>Leiden University, Netherlands</b><br/>Reviewed by:
+<br/>Paola Pinti,
+<br/><b>University College London</b><br/>United Kingdom
+<br/>Edmund Wascher,
+<br/>Leibniz-Institut für Arbeitsforschung
+<br/>an der TU Dortmund (IfADo),
+<br/>Germany
+<br/>*Correspondence:
+<br/>Received: 17 April 2018
+<br/>Accepted: 25 July 2018
+<br/>Published: 17 August 2018
+<br/>Citation:
+<br/>Ihme K, Unni A, Zhang M, Rieger JW
+<br/>and Jipp M (2018) Recognizing
+<br/>Frustration of Drivers From Face
+<br/>Video Recordings and Brain
+<br/>Activation Measurements With
+<br/>Functional Near-Infrared
+<br/>Spectroscopy.
+<br/>Front. Hum. Neurosci. 12:327.
+<br/>doi: 10.3389/fnhum.2018.00327
+<br/>Frontiers in Human Neuroscience | www.frontiersin.org
+<br/>August 2018 | Volume 12 | Article 327
+</td><td>('2873465', 'Klas Ihme', 'klas ihme')<br/>('34722642', 'Anirudh Unni', 'anirudh unni')<br/>('48984951', 'Meng Zhang', 'meng zhang')<br/>('2743311', 'Jochem W. Rieger', 'jochem w. rieger')<br/>('50093361', 'Meike Jipp', 'meike jipp')<br/>('2873465', 'Klas Ihme', 'klas ihme')</td><td>klas.ihme@dlr.de
+</td></tr><tr><td>fd892e912149e3f5ddd82499e16f9ea0f0063fa3</td><td>GazeDirector: Fully Articulated Eye Gaze Redirection in Video
+<br/><b>University of Cambridge, UK 2Carnegie Mellon University, USA</b><br/><b>Max Planck Institute for Informatics, Germany</b><br/>4Microsoft
+</td><td>('34399452', 'Erroll Wood', 'erroll wood')<br/>('49933077', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td></td></tr><tr><td>fde0180735699ea31f6c001c71eae507848b190f</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 76– No.3, August 2013
+<br/>Face Detection and Sex Identification from Color Images
+<br/>using AdaBoost with SVM based Component Classifier
+<br/>Lecturer, Department of EEE
+<br/><b>University of Information</b><br/>Technology and Sciences
+<br/>(UITS)
+<br/>Dhaka, Bangladesh
+<br/>B.Sc. in EEE
+<br/><b>International University of</b><br/>Business Agriculture and
+<br/>Technology (IUBAT)
+<br/>Dhaka-1230, Bangladesh
+<br/>Lecturer, Department of EEE
+<br/><b>International University of</b><br/>Business Agriculture and
+<br/>Technology (IUBAT)
+<br/>Dhaka-1230, Bangladesh
+</td><td>('1804849', 'Tonmoy Das', 'tonmoy das')<br/>('2832495', 'Md. Hafizur Rahman', 'md. hafizur rahman')</td><td></td></tr><tr><td>fdf8e293a7618f560e76bd83e3c40a0788104547</td><td>Interspecies Knowledge Transfer for Facial Keypoint Detection
+<br/><b>University of California, Davis</b><br/><b>Zhejiang University</b><br/><b>University of California, Davis</b></td><td>('35157022', 'Maheen Rashid', 'maheen rashid')<br/>('10734287', 'Xiuye Gu', 'xiuye gu')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td>mhnrashid@ucdavis.edu
+<br/>gxy0922@zju.edu.cn
+<br/>yongjaelee@ucdavis.edu
+</td></tr><tr><td>fd615118fb290a8e3883e1f75390de8a6c68bfde</td><td>Joint Face Alignment with Non-Parametric
+<br/>Shape Models
+<br/><b>University of Wisconsin Madison</b><br/>http://www.cs.wisc.edu/~lizhang/projects/joint-align/
+</td><td>('1893050', 'Brandon M. Smith', 'brandon m. smith')<br/>('40396555', 'Li Zhang', 'li zhang')</td><td></td></tr><tr><td>fdaf65b314faee97220162980e76dbc8f32db9d6</td><td>Accepted Manuscript
+<br/>Face recognition using both visible light image and near-infrared image and a deep
+<br/>network
+<br/>PII:
+<br/>DOI:
+<br/>Reference:
+<br/>S2468-2322(17)30014-8
+<br/>10.1016/j.trit.2017.03.001
+<br/>TRIT 41
+<br/>To appear in:
+<br/>CAAI Transactions on Intelligence Technology
+<br/>Received Date: 30 January 2017
+<br/>Accepted Date: 28 March 2017
+<br/>Please cite this article as: K. Guo, S. Wu, Y. Xu, Face recognition using both visible light image and
+<br/>near-infrared image and a deep network, CAAI Transactions on Intelligence Technology (2017), doi:
+<br/>10.1016/j.trit.2017.03.001.
+<br/>This is a PDF file of an unedited manuscript that has been accepted for publication. As a service to
+<br/>our customers we are providing this early version of the manuscript. The manuscript will undergo
+<br/>copyediting, typesetting, and review of the resulting proof before it is published in its final form. Please
+<br/>note that during the production process errors may be discovered which could affect the content, and all
+<br/>legal disclaimers that apply to the journal pertain.
+</td><td>('48477652', 'Kai Guo', 'kai guo')<br/>('40200363', 'Shuai Wu', 'shuai wu')</td><td></td></tr><tr><td>f22d6d59e413ee255e5e0f2104f1e03be1a6722e</td><td>Lattice Long Short-Term Memory for Human Action Recognition
+<br/><b>The Hong Kong University of Science and Technology</b><br/><b>Stanford University</b><br/><b>South China University of Technology</b></td><td>('41191188', 'Lin Sun', 'lin sun')<br/>('2370507', 'Kui Jia', 'kui jia')<br/>('1794604', 'Kevin Chen', 'kevin chen')<br/>('2131088', 'Bertram E. Shi', 'bertram e. shi')<br/>('1702137', 'Silvio Savarese', 'silvio savarese')</td><td></td></tr><tr><td>f24e379e942e134d41c4acec444ecf02b9d0d3a9</td><td>International Scholarly Research Network
+<br/>ISRN Machine Vision
+<br/>Volume 2012, Article ID 505974, 7 pages
+<br/>doi:10.5402/2012/505974
+<br/>Research Article
+<br/>Analysis of Facial Images across Age Progression by Humans
+<br/><b>Temple University, Philadelphia, PA 19122, USA</b><br/><b>Temple University, Philadelphia, PA 19122, USA</b><br/><b>West Virginia University, Morgantown, WV 26506, USA</b><br/>Received 25 July 2011; Accepted 25 August 2011
+<br/>Academic Editors: O. Ghita and R.-H. Park
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>The appearance of human faces can undergo large variations over aging progress. Analysis of facial image taken over age
+<br/>progression recently attracts increasing attentions in computer-vision community. Human abilities for such analysis are, however,
+<br/>less studied. In this paper, we conduct a thorough study of human ability on two tasks, face verification and age estimation, for
+<br/>facial images taken at different ages. Detailed and rigorous experimental analysis is provided, which helps understanding roles of
+<br/>different factors including age group, age gap, race, and gender. In addition, our study also leads to an interesting observation: for
+<br/>age estimation, photos from adults are more challenging than that from young people. We expect the study to provide a reference
+<br/>for machine-based solutions.
+<br/>1. Introduction
+<br/>Human faces are important in revealing the personal char-
+<br/>acteristic and understanding visual data. The facial research
+<br/>has been studied over several decades in computer vision
+<br/>community [1, 2]. Analysis facial images across age pro-
+<br/>gression recently attracts increasing research attention [3]
+<br/>because of its important real-life applications. For example,
+<br/>facial appearance predictor of missing people and ID photo
+<br/>automatic update system are playing important roles in
+<br/>simulating face aging of human beings. Age estimation can
+<br/>also be applied to age-restricted vending machine [4]. Most
+<br/>recent studies (see Section 2) of age-related facial image
+<br/>analysis mainly focus on three tasks: face verification, age
+<br/>estimation, and age effect simulation. In comparison, it
+<br/>remains unclear how humans perform on these tasks.
+<br/>In this paper, we study human ability on face verification
+<br/>and age estimation for face photos taken at across age
+<br/>progression. Such studies are important in that it not only
+<br/>provides a reference for future machine-based solutions,
+<br/>but also provides insight on how different factors (e.g., age
+<br/>gaps, gender, etc.) affect facial analysis algorithms. There are
+<br/>previous works on human performance for face recognition
+<br/>and age estimation; however, most of them are either
+<br/>focusing on nonage related issues such as lighting [5] or
+<br/>limited by the scale of image datasets (e.g., [6]). Taking
+<br/>advantage of the recent available MORPH dataset [7], which
+<br/>to the best of our knowledge is the largest publicly available
+<br/>face aging dataset, we are able to conduct thorough human
+<br/>studies on facial analysis tasks.
+<br/>For face verification, the task is to let a human subject
+<br/>decide whether two photos come from the same person (at
+<br/>different ages). In addition to report the general performance
+<br/>on our human subjects’ performance, we also analyze the
+<br/><b>e ects of di erence factors, including age group, age gap</b><br/>race, and gender. In addition, we also compare human
+<br/>performance with previous reported baseline algorithm. For
+<br/>age estimation, similarly, we report and analyze human
+<br/>performance for general cases as well as for different factors.
+<br/>Compared to a previous study on the FGNet database [8],
+<br/>our study implies that age estimation are harder for photos
+<br/>from adults than those from young people.
+<br/>The rest of the paper is organized as follows. Section 2
+<br/>shows the related works on different databases. Section 3
+<br/>describes the details of human experiments of face-recog-
+<br/>nition and age-estimation problems. Then, in Section 4,
+</td><td>('38129124', 'Jingting Zeng', 'jingting zeng')<br/>('1805398', 'Haibin Ling', 'haibin ling')<br/>('1686678', 'Longin Jan Latecki', 'longin jan latecki')<br/>('1822413', 'Guodong Guo', 'guodong guo')<br/>('38129124', 'Jingting Zeng', 'jingting zeng')</td><td>Correspondence should be addressed to Haibin Ling, hbling@temple.edu
+</td></tr><tr><td>f2b13946d42a50fa36a2c6d20d28de2234aba3b4</td><td>Adaptive Facial Expression Recognition Using Inter-modal
+<br/>Top-down Context
+<br/>Ravi Kiran
+<br/>Sarvadevabhatla
+<br/><b>Honda Research Institute USA</b><br/>425 National Ave, Suite 100
+<br/>Mountain View 94043, USA
+<br/>Neural Prosthetics Lab
+<br/>Department of Electrical and
+<br/>Computer Engineering
+<br/><b>McGill University</b><br/>Montreal H3A 2A7, Canada
+<br/>Neural Prosthetics Lab
+<br/>Department of Electrical and
+<br/>Computer Engineering
+<br/><b>McGill University</b><br/>Montreal H3A 2A7, Canada
+<br/><b>Honda Research Institute USA</b><br/>425 National Ave, Suite 100
+<br/>Mountain View 94043, USA
+</td><td>('1708927', 'Mitchel Benovoy', 'mitchel benovoy')<br/>('2003327', 'Sam Musallam', 'sam musallam')<br/>('1692465', 'Victor Ng-Thow-Hing', 'victor ng-thow-hing')</td><td>RSarvadevabhatla@hra.com
+<br/>benovoym@mcgill.ca
+<br/>sam.musallam@mcgill.ca
+<br/>vngthowhing@hra.com
+</td></tr><tr><td>f2c30594d917ea915028668bc2a481371a72a14d</td><td>Scene Understanding Using Internet Photo Collections
+<br/>A dissertation submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/><b>University of Washington</b><br/>2010
+<br/>Program Authorized to Offer Degree: Computer Science and Engineering
+</td><td>('35577716', 'Ian Simon', 'ian simon')</td><td></td></tr><tr><td>f2ad9b43bac8c2bae9dea694f6a4e44c760e63da</td><td>A Study on Illumination Invariant Face Recognition Methods
+<br/>Based on Multiple Eigenspaces
+<br/>1National Laboratory for Novel Software Technology
+<br/><b>Nanjing University, Nanjing 210093, P.R.China</b><br/>2Department of Computer Science
+<br/><b>North Dakota State University, Fargo, ND58105, USA</b></td><td>('7878359', 'Wu-Jun Li', 'wu-jun li')<br/>('2697799', 'Chong-Jun Wang', 'chong-jun wang')<br/>('1737124', 'Bin Luo', 'bin luo')</td><td>Email: {liwujun, chjwang}@ai.nju.edu.cn
+<br/>Email: Dianxiang.xu@ndsu.nodak.edu
+</td></tr><tr><td>f2e9494d0dca9fb6b274107032781d435a508de6</td><td></td><td></td><td></td></tr><tr><td>f2c568fe945e5743635c13fe5535af157b1903d1</td><td></td><td></td><td></td></tr><tr><td>f2a7f9bd040aa8ea87672d38606a84c31163e171</td><td>Human Action Recognition without Human
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b><br/>Tsukuba, Ibaraki, Japan
+</td><td>('1713046', 'Yun He', 'yun he')<br/>('3393640', 'Soma Shirakabe', 'soma shirakabe')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')<br/>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')</td><td>{yun.he, shirakabe-s, yu.satou, hirokatsu.kataoka}@aist.go.jp
+</td></tr><tr><td>f257300b2b4141aab73f93c146bf94846aef5fa1</td><td>Eigen Evolution Pooling for Human Action Recognition
+<br/><b>Stony Brook University, Stony Brook, NY 11794, USA</b></td><td>('2295608', 'Yang Wang', 'yang wang')<br/>('49701507', 'Vinh Tran', 'vinh tran')<br/>('2356016', 'Minh Hoai', 'minh hoai')</td><td>{wang33, tquangvinh, minhhoai}@cs.stonybrook.edu
+</td></tr><tr><td>f20e0eefd007bc310d2a753ba526d33a8aba812c</td><td>Lee et al.: RGB-D FACE RECOGNITION WITH A DEEP LEARNING APPROACH
+<br/>Accurate and robust face recognition from
+<br/>RGB-D images with a deep learning
+<br/>approach
+<br/>Yuancheng Lee
+<br/>http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=150
+<br/>http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=153
+<br/>Ching-Wei Tseng
+<br/>http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=156
+<br/>Computer Vision Lab,
+<br/>Department of
+<br/>Computer Science,
+<br/>National Tsing Hua
+<br/><b>University</b><br/>Hsinchu, Taiwan
+<br/>http://www.cs.nthu.edu.tw/~lai/
+</td><td>('7557765', 'Jiancong Chen', 'jiancong chen')<br/>('1696527', 'Shang-Hong Lai', 'shang-hong lai')</td><td></td></tr><tr><td>f26097a1a479fb6f32b27a93f8f32609cfe30fdc</td><td></td><td></td><td></td></tr><tr><td>f231046d5f5d87e2ca5fae88f41e8d74964e8f4f</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td><td></td><td>Contact book.department@intechopen.com
+</td></tr><tr><td>f28b7d62208fdaaa658716403106a2b0b527e763</td><td>Clustering-driven Deep Embedding with Pairwise Constraints
+<br/><b>JACOB GOLDBERGER, Bar-Ilan University</b><br/>Fig. 1. Employing deep embeddings for clustering 3D shapes. Above, we use PCA to visualize the output embedding of point clouds of chairs. We also highlight
+<br/>(in unique colors) a few random clusters and display a few representative chairs from these clusters.
+<br/>Recently, there has been increasing interest to leverage the competence
+<br/>of neural networks to analyze data. In particular, new clustering meth-
+<br/>ods that employ deep embeddings have been presented. In this paper, we
+<br/>depart from centroid-based models and suggest a new framework, called
+<br/>Clustering-driven deep embedding with PAirwise Constraints (CPAC), for
+<br/>non-parametric clustering using a neural network. We present a clustering-
+<br/>driven embedding based on a Siamese network that encourages pairs of data
+<br/>points to output similar representations in the latent space. Our pair-based
+<br/>model allows augmenting the information with labeled pairs to constitute a
+<br/>semi-supervised framework. Our approach is based on analyzing the losses
+<br/>associated with each pair to refine the set of constraints. We show that clus-
+<br/>tering performance increases when using this scheme, even with a limited
+<br/>amount of user queries. We demonstrate how our architecture is adapted
+<br/>for various types of data and present the first deep framework to cluster 3D
+<br/>shapes.
+<br/>INTRODUCTION
+<br/>Autoencoders provide means to analyze data without supervision.
+<br/>Autoencoders based on deep neural networks include non-linear
+<br/>neurons which significantly strengthen the power of the analysis.
+<br/>The key idea is that the encoders project the data into an embedding
+<br/>latent space, where the L2 proximity among the projected elements
+<br/>better expresses their similarity. To further enhance the data prox-
+<br/>imity in the embedding space, the encoder can be encouraged to
+<br/>form tight clusters in the embedding space. Xie et al. [2016] have
+<br/>presented an unsupervised embedding driven by a centroid-based
+<br/>clustering. They have shown that their deep embedding leads to
+<br/>better clustering of the data. More advanced clustering-driven em-
+<br/>bedding techniques have been recently presented [Dizaji et al. 2017;
+<br/>Yang et al. 2016]. These techniques are all centroid-based and para-
+<br/>metric, in the sense that the number of clusters is known a-priori.
+<br/>In this paper, we present a clustering-driven embedding technique
+<br/>that allows semi-supervision. The idea is to depart from centroid-
+<br/>based methods and use pairwise constraints to drive the clustering.
+<br/>Most, or all the constraints, can be learned with no supervision,
+<br/>while possibly a small portion of the data is supervised. More specifi-
+<br/>cally, we adopt robust continuous clustering (RCC) [Shah and Koltun
+<br/>2017] as a driving mechanism to encourage a tight clustering of the
+<br/>embedded data.
+<br/>The idea is to extract pairwise constraints using a mutual k-
+<br/>nearest neighbors analysis, and use these pairs as must-link con-
+<br/>straints. With no supervision, the set of constraints is imperfect
+<br/>and contains false positive pairs on one hand. Our technique allows
+<br/>removing false positive pairs and strengthening true positive pairs
+<br/>actively by a user. We present an approach that analyzes the losses
+<br/>associated with the pairs to form a set of false positive candidates.
+<br/>See Figure 2(b)-(c) for a visualization of the distribution of the data
+</td><td>('40901326', 'Sharon Fogel', 'sharon fogel')<br/>('1793313', 'Hadar Averbuch-Elor', 'hadar averbuch-elor')<br/>('1701009', 'Daniel Cohen-Or', 'daniel cohen-or')</td><td></td></tr><tr><td>f214bcc6ecc3309e2efefdc21062441328ff6081</td><td></td><td></td><td></td></tr><tr><td>f5149fb6b455a73734f1252a96a9ce5caa95ae02</td><td>Low-Rank-Sparse Subspace Representation for Robust Regression
+<br/><b>Harbin Institute of Technology</b><br/><b>Harbin Institute of Technology;Shenzhen University</b><br/>Harbin, China
+<br/>Harbin, China;Shenzhen, China
+<br/><b>The University of Sydney</b><br/><b>Harbin Institute of Technology</b><br/>Sydney, Australia
+<br/>Harbin, China
+</td><td>('1747644', 'Yongqiang Zhang', 'yongqiang zhang')<br/>('1887263', 'Daming Shi', 'daming shi')<br/>('1750488', 'Junbin Gao', 'junbin gao')<br/>('2862899', 'Dansong Cheng', 'dansong cheng')</td><td>seekever@foxmail.com
+<br/>d.m.shi@hotmail.com
+<br/>junbin.gao@sydney.edu.au
+<br/>cdsinhit@hit.edu.cn
+</td></tr><tr><td>f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Feature Selection Guided Auto-Encoder
+<br/>1Department of Electrical & Computer Engineering,
+<br/><b>College of Computer and Information Science</b><br/><b>Northeastern University, Boston, MA, USA</b></td><td>('47673521', 'Shuyang Wang', 'shuyang wang')<br/>('2788685', 'Zhengming Ding', 'zhengming ding')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>{shuyangwang, allanding, yunfu}@ece.neu.edu
+</td></tr><tr><td>f5eb0cf9c57716618fab8e24e841f9536057a28a</td><td>Rethinking Feature Distribution for Loss Functions in Image Classification
+<br/><b>Tsinghua University, Beijing, China</b><br/><b>University of at Urbana-Champaign, Illinois, USA</b></td><td>('47718901', 'Weitao Wan', 'weitao wan')<br/>('1752427', 'Jiansheng Chen', 'jiansheng chen')<br/>('8802368', 'Yuanyi Zhong', 'yuanyi zhong')<br/>('2641581', 'Tianpeng Li', 'tianpeng li')</td><td>wwt16@mails.tsinghua.edu.cn
+<br/>yuanyiz2@illinois.edu
+<br/>ltp16@mails.tsinghua.edu.cn
+<br/>jschenthu@mail.tsinghua.edu.cn
+</td></tr><tr><td>f571fe3f753765cf695b75b1bd8bed37524a52d2</td><td>Submodular Attribute Selection for Action
+<br/>Recognition in Video
+<br/>Jinging Zheng
+<br/><b>UMIACS, University of Maryland</b><br/><b>College Park, MD, USA</b><br/>Noah’s Ark Lab
+<br/>Huawei Technologies
+<br/><b>UMIACS, University of Maryland</b><br/><b>National Institute of Standards and Technology</b><br/><b>College Park, MD, USA</b><br/>Gaithersburg, MD, USA
+</td><td>('34145947', 'Zhuolin Jiang', 'zhuolin jiang')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>zjngjng@umiacs.umd.edu
+<br/>zhuolin.jiang@huawei.com
+<br/>rama@umiacs.umd.edu
+<br/>jonathon.phillips@nist.gov
+</td></tr><tr><td>f5fae7810a33ed67852ad6a3e0144cb278b24b41</td><td>Multilingual Gender Classification with Multi-view
+<br/>Deep Learning
+<br/>Notebook for PAN at CLEF 2018
+<br/><b>Jo ef Stefan Institute, Jamova 39, 1000 Ljubljana, Slovenia</b><br/>2 Jožef Stefan International Postgraduate School, Jamova 39, 1000 Ljubljana, Slovenia
+<br/><b>USHER Institute, University of Edinburgh, United Kingdom</b></td><td>('22684661', 'Matej Martinc', 'matej martinc')<br/>('40235216', 'Senja Pollak', 'senja pollak')</td><td>{matej.martinc,blaz.skrlj,senja.pollak}@ijs.si
+</td></tr><tr><td>f5af4e9086b0c3aee942cb93ece5820bdc9c9748</td><td>ENHANCING PERSON ANNOTATION
+<br/>FOR PERSONAL PHOTO MANAGEMENT
+<br/>USING CONTENT AND CONTEXT
+<br/>BASED TECHNOLOGIES
+<br/>By
+<br/>THESIS DIRECTED BY: PROF. NOEL E. O’CONNOR
+<br/>A THESIS SUBMITTED IN PARTIAL FULFILLMENT OF THE REQUIREMENTS FOR THE
+<br/>DEGREE OF DOCTOR OF PHILOSOPHY
+<br/>September 2008
+<br/>SCHOOL OF ELECTRONIC ENGINEERING
+<br/><b>DUBLIN CITY UNIVERSITY</b></td><td>('2668569', 'Saman H. Cooray', 'saman h. cooray')</td><td></td></tr><tr><td>f5770dd225501ff3764f9023f19a76fad28127d4</td><td>Real Time Online Facial Expression Transfer
+<br/>with Single Video Camera
+</td><td></td><td></td></tr><tr><td>f5aee1529b98136194ef80961ba1a6de646645fe</td><td>Large-Scale Learning of
+<br/>Discriminative Image Representations
+<br/>D.Phil Thesis
+<br/>Robotics Research Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Supervisors:
+<br/>Professor Andrew Zisserman
+<br/>Doctor Antonio Criminisi
+<br/><b>Mans eld College</b><br/>Trinity Term, 2013
+</td><td>('34838386', 'Karen Simonyan', 'karen simonyan')</td><td></td></tr><tr><td>f52efc206432a0cb860155c6d92c7bab962757de</td><td>MUGSHOT DATABASE ACQUISITION IN VIDEO SURVEILLANCE NETWORKS USING
+<br/>INCREMENTAL AUTO-CLUSTERING QUALITY MEASURES
+<br/>Computer Science Department
+<br/><b>University of Kentucky</b><br/>Lexington, KY, 40508
+</td><td>('3237043', 'Quanren Xiong', 'quanren xiong')</td><td></td></tr><tr><td>f519723238701849f1160d5a9cedebd31017da89</td><td>Impact of multi-focused images on recognition of soft biometric traits
+<br/>aEURECOM, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia
+<br/>
+<br/>Antipolis cedex, FRANCE
+</td><td>('24362694', 'V. Chiesa', 'v. chiesa')</td><td></td></tr><tr><td>f5eb411217f729ad7ae84bfd4aeb3dedb850206a</td><td>Tackling Low Resolution for Better Scene Understanding
+<br/>Thesis submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>MS in Computer Science and Engineering
+<br/>By Research
+<br/>by
+<br/>201202172
+<br/><b>International Institute of Information Technology</b><br/>Hyderabad - 500 032, INDIA
+<br/>July 2018
+</td><td>('41033644', 'Harish Krishna', 'harish krishna')</td><td>harishkrishna.v@research.iiit.ac.in
+</td></tr><tr><td>f558af209dd4c48e4b2f551b01065a6435c3ef33</td><td>International Journal of Emerging Technology in Computer Science & Electronics (IJETCSE)
+<br/>ISSN: 0976-1353 Volume 23 Issue 1 –JUNE 2016.
+<br/>AN ENHANCED ATTRIBUTE
+<br/>RERANKING DESIGN FOR WEB IMAGE
+<br/>SEARCH
+<br/>#Student,Cse, CIET, Lam,Guntur, India
+<br/>* Assistant Professort,Cse, CIET, Lam,Guntur , India
+</td><td>('4384318', 'G K Kishore Babu', 'g k kishore babu')</td><td></td></tr><tr><td>e378ce25579f3676ca50c8f6454e92a886b9e4d7</td><td>Robust Video Super-Resolution with Learned Temporal Dynamics
+<br/><b>University of Illinois at Urbana-Champaign 2Adobe Research</b><br/><b>Facebook 4Texas AandM University 5IBM Research</b></td><td>('1771885', 'Ding Liu', 'ding liu')<br/>('2969311', 'Zhangyang Wang', 'zhangyang wang')</td><td></td></tr><tr><td>e393a038d520a073b9835df7a3ff104ad610c552</td><td>Automatic temporal segment
+<br/>detection via bilateral long short-
+<br/>term memory recurrent neural
+<br/>networks
+<br/>detection via bilateral long short-term memory recurrent neural networks,” J.
+<br/>Electron. Imaging 26(2), 020501 (2017), doi: 10.1117/1.JEI.26.2.020501.
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 03/03/2017 Terms of Use: http://spiedigitallibrary.org/ss/termsofuse.aspx </td><td>('49447269', 'Bo Sun', 'bo sun')<br/>('7886608', 'Siming Cao', 'siming cao')<br/>('49264106', 'Jun He', 'jun he')<br/>('8834504', 'Lejun Yu', 'lejun yu')<br/>('2089565', 'Liandong Li', 'liandong li')<br/>('49447269', 'Bo Sun', 'bo sun')<br/>('7886608', 'Siming Cao', 'siming cao')<br/>('49264106', 'Jun He', 'jun he')<br/>('8834504', 'Lejun Yu', 'lejun yu')<br/>('2089565', 'Liandong Li', 'liandong li')</td><td></td></tr><tr><td>e35b09879a7df814b2be14d9102c4508e4db458b</td><td>Optimal Sensor Placement and
+<br/>Enhanced Sparsity for Classification
+<br/><b>University of Washington, Seattle, WA 98195, United States</b><br/><b>University of Washington, Seattle, WA 98195, United States</b><br/><b>Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States</b></td><td>('1824880', 'Bingni W. Brunton', 'bingni w. brunton')<br/>('3083169', 'Steven L. Brunton', 'steven l. brunton')<br/>('2424683', 'Joshua L. Proctor', 'joshua l. proctor')<br/>('1937069', 'J. Nathan Kutz', 'j. nathan kutz')</td><td></td></tr><tr><td>e3b324101157daede3b4d16bdc9c2388e849c7d4</td><td>Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose,
+<br/>Depth, and Expression Variations
+<br/>Hai X. Pham
+<br/><b>Rutgers University, USA</b></td><td>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')</td><td>{hxp1,vladimir}@cs.rutgers.edu
+</td></tr><tr><td>e3657ab4129a7570230ff25ae7fbaccb4ba9950c</td><td></td><td></td><td></td></tr><tr><td>e315959d6e806c8fbfc91f072c322fb26ce0862b</td><td>An Efficient Face Recognition System Based on Sub-Window
+<br/>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-1, Issue-6, January 2012
+<br/>Extraction Algorithm
+</td><td>('1696227', 'Manish Gupta', 'manish gupta')<br/>('36776003', 'Govind sharma', 'govind sharma')</td><td></td></tr><tr><td>e3c011d08d04c934197b2a4804c90be55e21d572</td><td>How to Train Triplet Networks with 100K Identities?
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
+</td><td>('1747751', 'Chong Wang', 'chong wang')<br/>('46447079', 'Xue Zhang', 'xue zhang')<br/>('26403761', 'Xipeng Lan', 'xipeng lan')</td><td>chongwang.nlpr@gmail.com
+<br/>yuannixue@126.com
+<br/>xipeng.lan@gmail.com
+</td></tr><tr><td>e39a0834122e08ba28e7b411db896d0fdbbad9ba</td><td>1368
+<br/>Maximum Likelihood Estimation of Depth Maps
+<br/>Using Photometric Stereo
+</td><td>('2964822', 'Adam P. Harrison', 'adam p. harrison')<br/>('39367958', 'Dileepan Joseph', 'dileepan joseph')</td><td></td></tr><tr><td>e3bb83684817c7815f5005561a85c23942b1f46b</td><td>Face Verification using Correlation Filters
+<br/>Electrical and Computer Eng. Dept,
+<br/>Electrical and Computer Eng. Dept,
+<br/>Electrical and Computer Eng. Dept,
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213, U.S.A.
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213, U.S.A.
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213, U.S.A.
+</td><td>('1794486', 'Marios Savvides', 'marios savvides')<br/>('36754879', 'Vijaya Kumar', 'vijaya kumar')<br/>('34607721', 'Pradeep Khosla', 'pradeep khosla')</td><td>msavvid@ri.cmu.edu
+<br/>kumar@ece.cmu.edu
+<br/>pkk@ece.cmu.edu
+</td></tr><tr><td>e30dc2abac4ecc48aa51863858f6f60c7afdf82a</td><td>Facial Signs and Psycho-physical Status Estimation for Well-being
+<br/>Assessment
+<br/>F. Chiarugi, G. Iatraki, E. Christinaki, D. Manousos, G. Giannakakis, M. Pediaditis,
+<br/>A. Pampouchidou, K. Marias and M. Tsiknakis
+<br/><b>Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas</b><br/>70013 Vasilika Vouton, Heraklion, Crete, Greece
+<br/>Keywords:
+<br/>Facial Expression, Stress, Anxiety, Feature Selection, Well-being Evaluation, FACS, FAPS, Classification.
+</td><td></td><td>{chiarugi, giatraki, echrist, mandim, ggian, mped, pampouch, kmarias, tsiknaki}@ics.forth.gr
+</td></tr><tr><td>e3e2c106ccbd668fb9fca851498c662add257036</td><td>Appearance, Context and Co-occurrence Ensembles for
+<br/>Identity Recognition in Personal Photo Collections
+<br/><b>University of Colorado at Colorado Springs</b><br/>T.E.Boult1
+<br/>2AT&T Labs-Research, Middletown, NJ
+</td><td>('27469806', 'Archana Sapkota', 'archana sapkota')<br/>('33692583', 'Raghuraman Gopalan', 'raghuraman gopalan')<br/>('2900213', 'Eric Zavesky', 'eric zavesky')</td><td>1 {asapkota,tboult}@vast.uccs.edu
+<br/>2{raghuram,ezavesky}@research.att.com
+</td></tr><tr><td>e379e73e11868abb1728c3acdc77e2c51673eb0d</td><td>In S.Li and A.Jain, (ed). Handbook of Face Recognition. Springer-Verlag, 2005
+<br/>Face Databases
+<br/><b>The Robotics Inistitute, Carnegie Mellon University</b><br/>5000 Forbes Avenue, Pittsburgh, PA 15213
+<br/>Because of its nonrigidity and complex three-dimensional (3D) structure, the appearance of a face is affected by a large
+<br/>number of factors including identity, face pose, illumination, facial expression, age, occlusion, and facial hair. The develop-
+<br/>ment of algorithms robust to these variations requires databases of sufficient size that include carefully controlled variations
+<br/>of these factors. Furthermore, common databases are necessary to comparatively evaluate algorithms. Collecting a high
+<br/>quality database is a resource-intensive task: but the availability of public face databases is important for the advancement of
+<br/>the field. In this chapter we review 27 publicly available databases for face recognition, face detection, and facial expression
+<br/>analysis.
+<br/>1 Databases for Face Recognition
+<br/>Face recognition continues to be one of the most popular research areas of computer vision and machine learning. Along
+<br/>with the development of face recognition algorithms, a comparatively large number of face databases have been collected.
+<br/>However, many of these databases are tailored to the specific needs of the algorithm under development. In this section
+<br/>we review publicly available databases that are of demonstrated use to others in the community. At the beginning of each
+<br/><b>subsection a table summarizing the key features of the database is provided, including (where available) the number of</b><br/>subjects, recording conditions, image resolution, and total number of images. Table 1 gives an overview of the recording
+<br/>conditions for all databases discussed in this section. Owing to space constraints not all databases are discussed at the same
+<br/>level of detail. Abbreviated descriptions of a number of mostly older databases are included in Section 1.13. The scope of
+<br/>this section is limited to databases containing full face imagery. Note, however, that there are databases of subface images
+<br/>available, such as the recently released CASIA Iris database [23].
+<br/>1.1 AR Database
+<br/>No. of subjects
+<br/>116
+<br/>Conditions
+<br/>Facial expressions
+<br/>Illumination
+<br/>Occlusion
+<br/>Time
+<br/>Image Resolution
+<br/>No. of Images
+<br/>768 × 576
+<br/>3288
+<br/>http://rvl1.ecn.purdue.edu/˜aleix/aleix face DB.html
+<br/>The AR database was collected at the Computer Vision Center in Barcelona, Spain in 1998 [25]. It contains images of
+<br/>116 individuals (63 men and 53 women). The imaging and recording conditions (camera parameters, illumination setting,
+<br/>camera distance) were carefully controlled and constantly recalibrated to ensure that settings are identical across subjects.
+<br/>The resulting RGB color images are 768 × 576 pixels in size. The subjects were recorded twice at a 2–week interval. During
+<br/>each session 13 conditions with varying facial expressions, illumination and occlusion were captured. Figure 1 shows an
+<br/>example for each condition. So far, more than 200 research groups have accessed the database.
+</td><td>('33731953', 'Ralph Gross', 'ralph gross')</td><td>Email: {rgross}@cs.cmu.edu
+</td></tr><tr><td>e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa</td><td><b>University of Massachusetts - Amherst</b><br/>Dissertations
+<br/>5-1-2012
+<br/>Dissertations and Theses
+<br/>Weakly Supervised Learning for Unconstrained
+<br/>Face Processing
+<br/>Follow this and additional works at: http://scholarworks.umass.edu/open_access_dissertations
+<br/>Recommended Citation
+<br/>Huang, Gary B., "Weakly Supervised Learning for Unconstrained Face Processing" (2012). Dissertations. Paper 559.
+</td><td>('3219900', 'Gary B. Huang', 'gary b. huang')</td><td>ScholarWorks@UMass Amherst
+<br/>University of Massachusetts - Amherst, garybhuang@gmail.com
+<br/>This Open Access Dissertation is brought to you for free and open access by the Dissertations and Theses at ScholarWorks@UMass Amherst. It has
+<br/>been accepted for inclusion in Dissertations by an authorized administrator of ScholarWorks@UMass Amherst. For more information, please contact
+<br/>scholarworks@library.umass.edu.
+</td></tr><tr><td>e3a6e9ddbbfc4c5160082338d46808cea839848a</td><td>Vision-Based Classification of Developmental Disorders
+<br/>Using Eye-Movements
+<br/><b>Stanford University, USA</b><br/><b>Stanford University, USA</b><br/><b>Stanford University, USA</b><br/><b>Stanford University, USA</b><br/><b>Stanford University, USA</b></td><td>('3147852', 'Guido Pusiol', 'guido pusiol')<br/>('1811529', 'Andre Esteva', 'andre esteva')<br/>('3472674', 'Arnold Milstein', 'arnold milstein')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td></td></tr><tr><td>e3c8e49ffa7beceffca3f7f276c27ae6d29b35db</td><td>Families in the Wild (FIW): Large-Scale Kinship Image
+<br/>Database and Benchmarks
+<br/><b>Northeastern University, Boston, USA</b><br/><b>College of Computer and Information Science, Northeastern University, Boston, USA</b></td><td>('4056993', 'Joseph P. Robinson', 'joseph p. robinson')<br/>('49248003', 'Ming Shao', 'ming shao')<br/>('47096713', 'Yue Wu', 'yue wu')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>{jrobins1, mingshao, yuewu, yunfu}@ece.neu.edu
+</td></tr><tr><td>e38371b69be4f341baa95bc854584e99b67c6d3a</td><td>DYAN: A Dynamical Atoms-Based Network
+<br/>For Video Prediction(cid:63)
+<br/><b>Electrical and Computer Engineering, Northeastern University, Boston, MA</b><br/>http://robustsystems.coe.neu.edu
+</td><td>('40366599', 'WenQian Liu', 'wenqian liu')<br/>('1785252', 'Abhishek Sharma', 'abhishek sharma')<br/>('30929906', 'Octavia Camps', 'octavia camps')<br/>('1687866', 'Mario Sznaier', 'mario sznaier')</td><td>liu.wenqi,sharma.abhis@husky.neu.edu, camps,msznaier@northeastern.edu
+</td></tr><tr><td>e3917d6935586b90baae18d938295e5b089b5c62</td><td>152
+<br/>Face Localization and Authentication
+<br/>Using Color and Depth Images
+</td><td>('1807962', 'Filareti Tsalakanidou', 'filareti tsalakanidou')<br/>('1744180', 'Sotiris Malassiotis', 'sotiris malassiotis')<br/>('1721460', 'Michael G. Strintzis', 'michael g. strintzis')</td><td></td></tr><tr><td>e328d19027297ac796aae2470e438fe0bd334449</td><td>Automatic Micro-expression Recognition from
+<br/>Long Video using a Single Spotted Apex
+<br/>1 Faculty of Computer Science & Information Technology,
+<br/><b>University of Malaya, Kuala Lumpur, Malaysia</b><br/>2 Faculty of Computing & Informatics,
+<br/><b>Multimedia University, Cyberjaya, Malaysia</b><br/>3 Faculty of Engineering,
+<br/><b>Multimedia University, Cyberjaya, Malaysia</b></td><td>('39888137', 'Sze-Teng Liong', 'sze-teng liong')<br/>('2339975', 'John See', 'john see')<br/>('1713159', 'KokSheik Wong', 'koksheik wong')</td><td>szeteng1206@hotmail.com,koksheik@um.edu.my
+<br/>johnsee@mmu.edu.my
+<br/>raphael@mmu.edu.my
+</td></tr><tr><td>e3144f39f473e238374dd4005c8b83e19764ae9e</td><td>Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost
+<br/>Optical-Flow Estimation in the Wild
+<br/><b>University of Freiburg</b><br/>Germany
+</td><td>('31656404', 'Nima Sedaghat', 'nima sedaghat')</td><td>nima@cs.uni-freiburg.de
+</td></tr><tr><td>e3a6e5a573619a97bd6662b652ea7d088ec0b352</td><td>Compare and Contrast: Learning Prominent Visual Differences
+<br/><b>The University of Texas at Austin</b></td><td>('50357985', 'Steven Chen', 'steven chen')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd</td><td>CapsNet comparative performance evaluation for image
+<br/>classification
+<br/><b>University of Waterloo, ON, Canada</b></td><td>('30421594', 'Rinat Mukhometzianov', 'rinat mukhometzianov')<br/>('36957611', 'Juan Carrillo', 'juan carrillo')</td><td></td></tr><tr><td>cffebdf88e406c27b892857d1520cb2d7ccda573</td><td>LEARNING FROM LARGE-SCALE VISUAL DATA
+<br/>FOR ROBOTS
+<br/>A Dissertation
+<br/>Presented to the Faculty of the Graduate School
+<br/><b>of Cornell University</b><br/>in Partial Fulfillment of the Requirements for the Degree of
+<br/>Doctor of Philosophy
+<br/>by
+<br/>Ozan S¸ener
+<br/>August 2016
+</td><td></td><td></td></tr><tr><td>cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2</td><td></td><td></td><td></td></tr><tr><td>cfffae38fe34e29d47e6deccfd259788176dc213</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, DECEMBER 2012
+<br/>Matrix Completion for Weakly-supervised
+<br/>Multi-label Image Classification
+</td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')<br/>('2884203', 'Alexandre Bernardino', 'alexandre bernardino')</td><td></td></tr><tr><td>cfd4004054399f3a5f536df71f9b9987f060f434</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. ??, NO. ??, ?? 20??
+<br/>Person Recognition in Personal Photo Collections
+</td><td>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1798000', 'Rodrigo Benenson', 'rodrigo benenson')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>cfd933f71f4a69625390819b7645598867900eab</td><td>INTERNATIONAL JOURNAL OF TECHNOLOGY ENHANCEMENTS AND EMERGING ENGINEERING RESEARCH, VOL 3, ISSUE 03 55
+<br/>ISSN 2347-4289
+<br/>Person Authentication Using Face And Palm Vein:
+<br/>A Survey Of Recognition And Fusion Techniques
+<br/><b>College of Engineering, Pune, India</b><br/>Image Processing & Machine Vision Section, Electronics & Instrumentation Services Division, BARC
+</td><td>('38561481', 'Dhanashree Vaidya', 'dhanashree vaidya')<br/>('2623250', 'Madhuri A. Joshi', 'madhuri a. joshi')</td><td>Email: preethimedu@gmail.com, dvaidya33@gmail.com, hod.extc@coep.ac.in, maj.extc@coep.ac.in, skar@barc.gov.in
+</td></tr><tr><td>cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce</td><td></td><td></td><td></td></tr><tr><td>cf875336d5a196ce0981e2e2ae9602580f3f6243</td><td>7 What 1
+<br/>Rosalind W. Picard
+<br/>It Mean for a Computer to "Have" Emotions?
+<br/>There is a lot of talk about giving machines emotions, some of
+<br/>it fluff. Recently at a large technical meeting, a researcher stood up
+<br/>and talked of how a Bamey stuffed animal [the purple dinosaur for
+<br/>kids) "has emotions." He did not define what he meant by this, but
+<br/>after repeating it several times, it became apparent that children
+<br/>attributed emotions to Barney, and that Barney had deliberately
+<br/>expressive behaviors that would encourage the kids to think. Bar-
+<br/>ney had emotions. But kids have attributed emotions to dolls and
+<br/>stuffed animals for as long a s we know; and most of my technical
+<br/>colleagues would agree that such toys have never had and still do
+<br/>not have emotions. What is different now that prompts a researcher
+<br/>to make such a claim? Is the computational plush an example of a
+<br/>computer that really does have emotions?
+<br/>If not Barney, then what would be an example of a computa-
+<br/>tional system that has emotions? I am not a philosopher, and this
+<br/>paper will not be a discussion of the meaning of this question in
+<br/>any philosophical sense. However, as an engineer I am interested
+<br/>in what capabilities I would require a machine to have before I
+<br/>would say that it "has emotions," if that is even possible.
+<br/>Theorists still grappl~ with the problem of defining emotion,
+<br/>after many decades of discussion, and no clean definition looks
+<br/>likely to emerge. Even without a precise definition, one can still
+<br/>begin to say concrete things about certain components of emotion,
+<br/>at least based on what is known about human and animal emo-
+<br/>tions. Of course, much is still u d a o w n about human emotions, so
+<br/>we are nowhere near being able to model them, much less dupli-
+<br/>cate all their functions in machines.'~lso, all scientific findings are
+<br/>subject to revision-history has certainly taught us humility, that
+<br/>what scientists believed to be true at one point has often been
+<br/>changed at a later date.
+<br/>I wish to begin by mentioning four motivations for giving
+<br/>machines certain emotional abilities (and there are more). One goal
+<br/>is to build robots and synthetic characters that can emulate living
+<br/>humans and animals-for example, to build a humanoid robot. A
+<br/>I
+</td><td></td><td></td></tr><tr><td>cfd8c66e71e98410f564babeb1c5fd6f77182c55</td><td>Comparative Study of Coarse Head Pose Estimation
+<br/><b>IBM T.J. Watson Research Center</b><br/>Hawthorne, NY 10532
+</td><td>('34609371', 'Lisa M. Brown', 'lisa m. brown')<br/>('40383812', 'Ying-Li Tian', 'ying-li tian')</td><td>{lisabr,yltian}@us.ibm.com
+</td></tr><tr><td>cf54a133c89f730adc5ea12c3ac646971120781c</td><td></td><td></td><td></td></tr><tr><td>cfbb2d32586b58f5681e459afd236380acd86e28</td><td>Improving Alignment of Faces for Recognition
+<br/>Christopher J. Pal
+<br/>D´epartement de g´enie informatique et g´enie logiciel
+<br/>´Ecole Polytechnique de Montr´eal,
+<br/>D´epartement de g´enie informatique et g´enie logiciel
+<br/>´Ecole Polytechnique de Montr´eal,
+<br/>Qu´ebec, Canada
+<br/>Qu´ebec, Canada
+</td><td>('2811524', 'Md. Kamrul Hasan', 'md. kamrul hasan')</td><td>md-kamrul.hasan@polymtl.ca
+<br/>christopher.pal@polymtl.ca
+</td></tr><tr><td>cfa92e17809e8d20ebc73b4e531a1b106d02b38c</td><td>Advances in Data Analysis and Classification manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Parametric Classification with Soft Labels using the
+<br/>Evidential EM Algorithm
+<br/>Linear Discriminant Analysis vs. Logistic Regression
+<br/>Received: date / Accepted: date
+</td><td>('1772306', 'Benjamin Quost', 'benjamin quost')<br/>('2259794', 'Shoumei Li', 'shoumei li')</td><td></td></tr><tr><td>cf5c9b521c958b84bb63bea9d5cbb522845e4ba7</td><td>Towards Arbitrary-View Face Alignment by Recommendation Trees∗
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b><br/>2SenseTime Group
+</td><td>('2226254', 'Shizhan Zhu', 'shizhan zhu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>zs014@ie.cuhk.edu.hk, chengli@sensetime.com, ccloy@ie.cuhk.edu.hk, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150</td><td>Detection of emotions from video in non-controlled
+<br/>environment
+<br/>To cite this version:
+<br/>Processing. Universit´e Claude Bernard - Lyon I, 2013. English. <NNT : 2013LYO10227>.
+<br/><tel-01166539v2>
+<br/>HAL Id: tel-01166539
+<br/>https://tel.archives-ouvertes.fr/tel-01166539v2
+<br/>Submitted on 23 Jun 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')<br/>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')</td><td></td></tr><tr><td>cfdc632adcb799dba14af6a8339ca761725abf0a</td><td>Probabilistic Formulations of Regression with Mixed
+<br/>Guidance
+</td><td>('38688704', 'Aubrey Gress', 'aubrey gress')<br/>('38673135', 'Ian Davidson', 'ian davidson')</td><td>adgress@ucdavis.edu, davidson@cs.ucdavis.edu
+</td></tr><tr><td>cfa931e6728a825caada65624ea22b840077f023</td><td>Deformable Generator Network: Unsupervised Disentanglement of
+<br/>Appearance and Geometry
+<br/><b>College of Automation, Harbin Engineering University, Heilongjiang, China</b><br/><b>University of California, Los Angeles, California, USA</b></td><td>('7306249', 'Xianglei Xing', 'xianglei xing')<br/>('9659905', 'Ruiqi Gao', 'ruiqi gao')<br/>('50495880', 'Tian Han', 'tian han')<br/>('3133970', 'Song-Chun Zhu', 'song-chun zhu')<br/>('39092098', 'Ying Nian Wu', 'ying nian wu')</td><td></td></tr><tr><td>cfc30ce53bfc204b8764ebb764a029a8d0ad01f4</td><td>Regularizing Deep Neural Networks by Noise:
+<br/>Its Interpretation and Optimization
+<br/>Dept. of Computer Science and Engineering, POSTECH, Korea
+</td><td>('2018393', 'Hyeonwoo Noh', 'hyeonwoo noh')<br/>('2205770', 'Tackgeun You', 'tackgeun you')<br/>('8511875', 'Jonghwan Mun', 'jonghwan mun')<br/>('40030651', 'Bohyung Han', 'bohyung han')</td><td>{shgusdngogo,tackgeun.you,choco1916,bhhan}@postech.ac.kr
+</td></tr><tr><td>cff911786b5ac884bb71788c5bc6acf6bf569eff</td><td>Multi-task Learning of Cascaded CNN for
+<br/>Facial Attribute Classification
+<br/><b>School of Information Science and Engineering, Xiamen University, Xiamen 361005, China</b><br/><b>School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China</b></td><td>('41034942', 'Ni Zhuang', 'ni zhuang')<br/>('40461734', 'Yan Yan', 'yan yan')<br/>('47336404', 'Si Chen', 'si chen')<br/>('37414077', 'Hanzi Wang', 'hanzi wang')</td><td>Email: ni.zhuang@foxmail.com, {yanyan, hanzi.wang}@xmu.edu.cn, chensi@xmut.edu.cn
+</td></tr><tr><td>cf09e2cb82961128302b99a34bff91ec7d198c7c</td><td>OFFICE ENTRANCE CONTROL WITH FACE RECOGNITION
+<br/> Dept. of Computer Science and Information Engineering,
+<br/><b>National Taiwan University, Taiwan</b><br/> Dept. of Computer Science and Information Engineering,
+<br/><b>National Taiwan University, Taiwan</b></td><td>('1721106', 'Yun-Che Tsai', 'yun-che tsai')<br/>('1703041', 'Chiou-Shann Fuh', 'chiou-shann fuh')</td><td>E-mail: jpm9ie8c@gmail.com
+<br/>E-mail: fuh@csie.ntu.edu.tw
+</td></tr><tr><td>cfc4aa456d9da1a6fabd7c6ca199332f03e35b29</td><td><b>University of Amsterdam and Renmin University at TRECVID</b><br/>Searching Video, Detecting Events and Describing Video
+<br/><b>University of Amsterdam</b><br/><b>Zhejiang University</b><br/>Amsterdam, The Netherlands
+<br/>Hangzhou, China
+<br/><b>Renmin University of China</b><br/>Beijing, China
+</td><td>('46741353', 'Cees G. M. Snoek', 'cees g. m. snoek')<br/>('40240283', 'Jianfeng Dong', 'jianfeng dong')<br/>('9931285', 'Xirong Li', 'xirong li')<br/>('48631563', 'Xiaoxu Wang', 'xiaoxu wang')<br/>('24332496', 'Qijie Wei', 'qijie wei')<br/>('2896042', 'Weiyu Lan', 'weiyu lan')<br/>('2304222', 'Efstratios Gavves', 'efstratios gavves')<br/>('13142264', 'Noureldien Hussein', 'noureldien hussein')<br/>('1769315', 'Dennis C. Koelma', 'dennis c. koelma')<br/>('1705182', 'Arnold W. M. Smeulders', 'arnold w. m. smeulders')</td><td></td></tr><tr><td>cf805d478aeb53520c0ab4fcdc9307d093c21e52</td><td>Finding Tiny Faces in the Wild with Generative Adversarial Network
+<br/>Mingli Ding2
+<br/><b>Visual Computing Center, King Abdullah University of Science and Technology (KAUST</b><br/><b>School of Electrical Engineering and Automation, Harbin Institute of Technology (HIT</b><br/><b>Institute of Software, Chinese Academy of Sciences (CAS</b><br/>Figure1. The detection results of tiny faces in the wild. (a) is the original low-resolution blurry face, (b) is the result of
+<br/>re-sizing directly by a bi-linear kernel, (c) is the generated image by the super-resolution method, and our result (d) is learned
+<br/>by the super-resolution (×4 upscaling) and refinement network simultaneously. Best viewed in color and zoomed in.
+</td><td>('2860057', 'Yancheng Bai', 'yancheng bai')<br/>('48378890', 'Yongqiang Zhang', 'yongqiang zhang')<br/>('2931652', 'Bernard Ghanem', 'bernard ghanem')</td><td>baiyancheng20@gmail.com
+<br/>{zhangyongqiang, dingml}@hit.edu.cn
+<br/>bernard.ghanem@kaust.edu.sa
+</td></tr><tr><td>cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab</td><td>Quaero at TRECVID 2010: Semantic Indexing
+<br/>1UJF-Grenoble 1 / UPMF-Grenoble 2 / Grenoble INP / CNRS, LIG UMR 5217, Grenoble, F-38041, France
+<br/><b>Karlsruhe Institute of Technology, P.O. Box 3640, 76021 Karlsruhe, Germany</b></td><td>('2357942', 'Bahjat Safadi', 'bahjat safadi')<br/>('1921500', 'Yubing Tong', 'yubing tong')<br/>('1981024', 'Franck Thollard', 'franck thollard')<br/>('40303076', 'Tobias Gehrig', 'tobias gehrig')<br/>('3025777', 'Hazim Kemal Ekenel', 'hazim kemal ekenel')</td><td></td></tr><tr><td>cf86616b5a35d5ee777585196736dfafbb9853b5</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Learning Multiscale Active Facial Patches for
+<br/>Expression Analysis
+</td><td>('29803023', 'Lin Zhong', 'lin zhong')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')<br/>('39606160', 'Peng Yang', 'peng yang')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td></td></tr><tr><td>cacd51221c592012bf2d9e4894178c1c1fa307ca</td><td>
+<br/>ISSN: 2277-3754
+<br/>ISO 9001:2008 Certified
+<br/>International Journal of Engineering and Innovative Technology (IJEIT)
+<br/>Volume 4, Issue 11, May 2015
+<br/>Face and Expression Recognition Techniques: A
+<br/>Review
+<br/>
+<br/>Advanced Communication & Signal Processing Laboratory, Department of Electronics & Communication
+<br/><b>engineering, Government College of Engineering Kannur, Kerala, India</b></td><td>('35135054', 'A. Ranjith Ram', 'a. ranjith ram')</td><td></td></tr><tr><td>ca0363d29e790f80f924cedaf93cb42308365b3d</td><td>Facial Expression Recognition in Image Sequences
+<br/>using Geometric Deformation Features and Support
+<br/>Vector Machines
+<br/><b>yAristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451
+<br/>54124 Thessaloniki, Greece
+</td><td>('1754270', 'Irene Kotsia', 'irene kotsia')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: fekotsia,pitasg@aiia.csd.auth.gr
+</td></tr><tr><td>cad52d74c1a21043f851ae14c924ac689e197d1f</td><td>From Ego to Nos-vision:
+<br/>Detecting Social Relationships in First-Person Views
+<br/>Universit`a degli Studi di Modena e Reggio Emilia
+<br/>Via Vignolese 905, 41125 Modena - Italy
+</td><td>('2452552', 'Stefano Alletto', 'stefano alletto')<br/>('2275344', 'Giuseppe Serra', 'giuseppe serra')<br/>('2175529', 'Simone Calderara', 'simone calderara')<br/>('2059900', 'Francesco Solera', 'francesco solera')<br/>('1741922', 'Rita Cucchiara', 'rita cucchiara')</td><td>{name.surname}@unimore.it
+</td></tr><tr><td>cac8bb0e393474b9fb3b810c61efdbc2e2c25c29</td><td></td><td></td><td></td></tr><tr><td>ca54d0a128b96b150baef392bf7e498793a6371f</td><td>Improve Pedestrian Attribute Classification by
+<br/>Weighted Interactions from Other Attributes
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>('1739258', 'Jianqing Zhu', 'jianqing zhu')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>jianqingzhu@foxmail.com, {scliao, zlei, szli}@cbsr.ia.ac.cn
+</td></tr><tr><td>cad24ba99c7b6834faf6f5be820dd65f1a755b29</td><td>Understanding hand-object
+<br/>manipulation by modeling the
+<br/>contextual relationship between actions,
+<br/>grasp types and object attributes
+<br/>Journal Title
+<br/>XX(X):1–14
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td><td>('3172280', 'Minjie Cai', 'minjie cai')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')<br/>('9467266', 'Yoichi Sato', 'yoichi sato')</td><td></td></tr><tr><td>cadba72aa3e95d6dcf0acac828401ddda7ed8924</td><td>THÈSE PRÉSENTÉE À LA FACULTÉ DES SCIENCES
+<br/>POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+<br/>Algorithms and VLSI Architectures
+<br/>for Low-Power Mobile Face Verification
+<br/>par
+<br/>Acceptée sur proposition du jury:
+<br/>Prof. F. Pellandini, directeur de thèse
+<br/>PD Dr. M. Ansorge, co-directeur de thèse
+<br/>Prof. P.-A. Farine, rapporteur
+<br/>Dr. C. Piguet, rapporteur
+<br/>Soutenue le 2 juin 2005
+<br/>INSTITUT DE MICROTECHNIQUE
+<br/>UNIVERSITÉ DE NEUCHÂTEL
+<br/>2006
+</td><td>('1844418', 'Jean-Luc Nagel', 'jean-luc nagel')</td><td></td></tr><tr><td>ca37eda56b9ee53610c66951ee7ca66a35d0a846</td><td>Semantic Concept Discovery for Large-Scale Zero-Shot Event Detection
+<br/><b>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</b><br/><b>Language Technologies Institute, Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b></td><td>('1729163', 'Xiaojun Chang', 'xiaojun chang')<br/>('39033919', 'Yi Yang', 'yi yang')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')</td><td>{cxj273, yee.i.yang}@gmail.com, {alex, epxing, yaoliang}@cs.cmu.edu
+</td></tr><tr><td>ca606186715e84d270fc9052af8500fe23befbda</td><td>Using Subclass Discriminant Analysis, Fuzzy Integral and Symlet Decomposition for
+<br/>Face Recognition
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Narmak, Tehran, Iran
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Narmak, Tehran, Iran
+<br/>Narmak, Tehran, Iran
+</td><td>('9267982', 'Seyed Mohammad Seyedzade', 'seyed mohammad seyedzade')<br/>('2532375', 'Sattar Mirzakuchaki', 'sattar mirzakuchaki')<br/>('2535533', 'Amir Tahmasbi', 'amir tahmasbi')</td><td>Email: sm.seyedzade@ieee.org
+<br/>Email: m_kuchaki@iust.ac.ir
+<br/>Email: a.tahmasbi@ieee.org
+</td></tr><tr><td>e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6</td><td>Illumination invariant face recognition and impostor rejection
+<br/>using different MINACE filter algorithms
+<br/><b>Carnegie Mellon University, Pittsburgh, PA</b></td><td>('8142777', 'Rohit Patnaik', 'rohit patnaik')<br/>('34925745', 'David Casasent', 'david casasent')</td><td></td></tr><tr><td>e4bf70e818e507b54f7d94856fecc42cc9e0f73d</td><td>IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+<br/>FACE RECOGNITION UNDER VARYING BLUR IN AN
+<br/>UNCONSTRAINED ENVIRONMENT
+<br/><b>M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India</b><br/><b>Information Technology, Madras Institute of Technology, TamilNadu, India, email</b></td><td></td><td>anubhapearl@gmail.com
+<br/>hemalatha.ch@gmail.com
+</td></tr><tr><td>e4bc529ced68fae154e125c72af5381b1185f34e</td><td>PERCEPTUAL GOAL SPECIFICATIONS FOR REINFORCEMENT LEARNING
+<br/>A Thesis Proposal
+<br/>Presented to
+<br/>The Academic Faculty
+<br/>by
+<br/>In Partial Fulfillment
+<br/>of the Requirements for the Degree
+<br/>Doctor of Philosophy in the
+<br/>School of Interactive Computing
+<br/><b>Georgia Institute of Technology</b><br/>November 2017
+</td><td>('12313871', 'Ashley D. Edwards', 'ashley d. edwards')</td><td></td></tr><tr><td>e465f596d73f3d2523dbf8334d29eb93a35f6da0</td><td></td><td></td><td></td></tr><tr><td>e4aeaf1af68a40907fda752559e45dc7afc2de67</td><td></td><td></td><td></td></tr><tr><td>e4c3d5d43cb62ac5b57d74d55925bdf76205e306</td><td></td><td></td><td></td></tr><tr><td>e42998bbebddeeb4b2bedf5da23fa5c4efc976fa</td><td>Generic Active Appearance Models Revisited
+<br/><b>Imperial College London, United Kingdom</b><br/><b>School of Computer Science, University of Lincoln, United Kingdom</b><br/><b>Faculty of Electrical Engineering, Mathematics and Computer Science, University</b><br/>of Twente, The Netherlands
+</td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{gt204, ja310, s.zafeiriou, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>e4a1b46b5c639d433d21b34b788df8d81b518729</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Side Information for Face Completion: a Robust
+<br/>PCA Approach
+</td><td>('4091869', 'Niannan Xue', 'niannan xue')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('1902288', 'Shiyang Cheng', 'shiyang cheng')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>e4c81c56966a763e021938be392718686ba9135e</td><td></td><td></td><td>3,100+OPEN ACCESS BOOKS103,000+INTERNATIONALAUTHORS AND EDITORS106+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book Visual Cortex - Current Status and PerspectivesDownloaded from: http://www.intechopen.com/books/visual-cortex-current-status-and-perspectivesPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at book.department@intechopen.com </td></tr><tr><td>e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc</td><td>Face Recognition with Independent Component Based
+<br/>Super-resolution
+<br/>aFaculty of Engineering and Natural Sciences, Sabanci Univ., Istanbul, Turkiye, 34956
+<br/>bSchool of Elec. and Comp. Eng. , Georgia Inst. of Tech., Atlanta, GA, USA, 30332-0250
+</td><td>('1844879', 'Osman Gokhan Sezer', 'osman gokhan sezer')<br/>('3975060', 'Yucel Altunbasak', 'yucel altunbasak')<br/>('31849282', 'Aytul Ercil', 'aytul ercil')</td><td></td></tr><tr><td>e4df83b7424842ff5864c10fa55d38eae1c45fac</td><td>Hindawi Publishing Corporation
+<br/>Discrete Dynamics in Nature and Society
+<br/>Volume 2009, Article ID 916382, 8 pages
+<br/>doi:10.1155/2009/916382
+<br/>Research Article
+<br/>Locally Linear Discriminate Embedding for
+<br/>Face Recognition
+<br/><b>Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia</b><br/>Received 21 January 2009; Accepted 12 October 2009
+<br/>Recommended by B. Sagar
+<br/>A novel method based on the local nonlinear mapping is presented in this research. The method
+<br/>is called Locally Linear Discriminate Embedding (cid:2)LLDE(cid:3). LLDE preserves a local linear structure
+<br/>of a high-dimensional space and obtains a compact data representation as accurately as possible
+<br/>in embedding space (cid:2)low dimensional(cid:3) before recognition. For computational simplicity and fast
+<br/>processing, Radial Basis Function (cid:2)RBF(cid:3) classifier is integrated with the LLDE. RBF classifier
+<br/>is carried out onto low-dimensional embedding with reference to the variance of the data. To
+<br/>validate the proposed method, CMU-PIE database has been used and experiments conducted in
+<br/>this research revealed the efficiency of the proposed methods in face recognition, as compared to
+<br/>the linear and non-linear approaches.
+<br/>the Creative Commons Attribution License, which permits unrestricted use, distribution, and
+<br/>reproduction in any medium, provided the original work is properly cited.
+<br/>1. Introduction
+<br/>Linear subspace analysis has been extensively applied to face recognition. A successful face
+<br/>recognition methodology is largely dependent on the particular choice of features used by
+<br/>the classifier. Linear methods are easy to understand and are very simple to implement, but
+<br/>the linearity assumption does not hold in many real-world scenarios. Face appearance lies in
+<br/>a high-dimensional nonlinear manifold. A disadvantage of the linear techniques is that they
+<br/>fail to capture the characteristics of the nonlinear appearance manifold. This is due to the
+<br/>fact that the linear methods extract features only from the input space without considering
+<br/>the nonlinear information between the components of the input data. However, a globally
+<br/>nonlinear mapping can often be approximated using a linear mapping in a local region. This
+<br/>has motivated the design of the nonlinear mapping methods in this study.
+<br/>The history of the nonlinear mapping is long; it can be traced back to Sammon’s
+<br/>mapping in 1969 (cid:5)1(cid:6). Over time, different techniques have been proposed such as the
+<br/>projection pursuit (cid:5)2(cid:6), the projection pursuit regression (cid:5)3(cid:6), self-organizing maps or SOM
+</td><td>('2008201', 'Eimad E. Abusham', 'eimad e. abusham')<br/>('32191265', 'E. K. Wong', 'e. k. wong')<br/>('32191265', 'E. K. Wong', 'e. k. wong')</td><td>Correspondence should be addressed to Eimad E. Abusham, eimad.eldin@mmu.edu.my
+</td></tr><tr><td>e4e3faa47bb567491eaeaebb2213bf0e1db989e1</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>Empirical Risk Minimization for Metric
+<br/>Learning Using Privileged Information
+<br/><b>School of Computer and Information, Hefei University of Technology, China</b><br/><b>Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia</b></td><td>('2028727', 'Xun Yang', 'xun yang')<br/>('15970836', 'Meng Wang', 'meng wang')<br/>('1763785', 'Luming Zhang', 'luming zhang')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td>{hfutyangxun, eric.mengwang, zglumg}@gmail.com;
+<br/>dacheng.tao@uts.edu.au;
+</td></tr><tr><td>e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5</td><td>Facial Expression Recognition Based on Constrained
+<br/>Local Models and Support Vector Machines
+</td><td>('1901962', 'Nikolay Neshov', 'nikolay neshov')<br/>('34945173', 'Ivo Draganov', 'ivo draganov')<br/>('1750280', 'Agata Manolova', 'agata manolova')</td><td></td></tr><tr><td>e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf</td><td></td><td></td><td></td></tr><tr><td>e4c2f8e4aace8cb851cb74478a63d9111ca550ae</td><td>DISTRIBUTED ONE-CLASS LEARNING
+<br/><b>cid:63)Queen Mary University of London, Imperial College London</b></td><td>('9920557', 'Ali Shahin Shamsabadi', 'ali shahin shamsabadi')<br/>('1763096', 'Hamed Haddadi', 'hamed haddadi')<br/>('1713138', 'Andrea Cavallaro', 'andrea cavallaro')</td><td></td></tr><tr><td>e475e857b2f5574eb626e7e01be47b416deff268</td><td>Facial Emotion Recognition Using Nonparametric
+<br/>Weighted Feature Extraction and Fuzzy Classifier
+</td><td>('2121174', 'Maryam Imani', 'maryam imani')<br/>('1801348', 'Gholam Ali Montazer', 'gholam ali montazer')</td><td></td></tr><tr><td>e4391993f5270bdbc621b8d01702f626fba36fc2</td><td>Author manuscript, published in "18th Scandinavian Conference on Image Analysis (2013)"
+<br/> DOI : 10.1007/978-3-642-38886-6_31
+</td><td></td><td></td></tr><tr><td>e43045a061421bd79713020bc36d2cf4653c044d</td><td>A New Representation of Skeleton Sequences for 3D Action Recognition
+<br/><b>The University of Western Australia</b><br/><b>Murdoch University</b></td><td>('2796959', 'Qiuhong Ke', 'qiuhong ke')<br/>('1698675', 'Mohammed Bennamoun', 'mohammed bennamoun')<br/>('1782428', 'Senjian An', 'senjian an')</td><td>qiuhong.ke@research.uwa.edu.au
+<br/>{mohammed.bennamoun,senjian.an,farid.boussaid}@uwa.edu.au
+<br/>f.sohel@murdoch.edu.au
+</td></tr><tr><td>e4d8ba577cabcb67b4e9e1260573aea708574886</td><td>UM SISTEMA DE RECOMENDAC¸ ˜AO INTELIGENTE BASEADO EM V´IDIO
+<br/>AULAS PARA EDUCAC¸ ˜AO A DIST ˆANCIA
+<br/>Gaspare Giuliano Elias Bruno
+<br/>Tese de Doutorado apresentada ao Programa
+<br/>de P´os-gradua¸c˜ao em Engenharia de Sistemas e
+<br/>Computa¸c˜ao, COPPE, da Universidade Federal
+<br/>do Rio de Janeiro, como parte dos requisitos
+<br/>necess´arios `a obten¸c˜ao do t´ıtulo de Doutor em
+<br/>Engenharia de Sistemas e Computa¸c˜ao.
+<br/>Orientadores: Edmundo Albuquerque de
+<br/>Souza e Silva
+<br/>Rosa Maria Meri Le˜ao
+<br/>Rio de Janeiro
+<br/>Janeiro de 2016
+</td><td></td><td></td></tr><tr><td>e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd</td><td>Accepted in Pattern Recognition Letters
+<br/>Pattern Recognition Letters
+<br/>journal homepage: www.elsevier.com
+<br/>Are you eligible? Predicting adulthood from face images via class specific mean
+<br/>autoencoder
+<br/>IIIT-Delhi, New Delhi, 110020, India
+<br/>Article history:
+<br/>Received 15 March 2017
+</td><td>('2220719', 'Maneet Singh', 'maneet singh')<br/>('1925017', 'Shruti Nagpal', 'shruti nagpal')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('39129417', 'Richa Singh', 'richa singh')</td><td></td></tr><tr><td>e4abc40f79f86dbc06f5af1df314c67681dedc51</td><td>Head Detection with Depth Images in the Wild
+<br/>Department of Engineering ”Enzo Ferrari”
+<br/><b>University of Modena and Reggio Emilia, Italy</b><br/>Keywords:
+<br/>Head Detection, Head Localization, Depth Maps, Convolutional Neural Network
+</td><td>('6125279', 'Diego Ballotta', 'diego ballotta')<br/>('12010968', 'Guido Borghi', 'guido borghi')<br/>('1723285', 'Roberto Vezzani', 'roberto vezzani')<br/>('1741922', 'Rita Cucchiara', 'rita cucchiara')</td><td>{name.surname}@unimore.it
+</td></tr><tr><td>e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b</td><td>Implicit Video Multi-Emotion Tagging by Exploiting Multi-Expression
+<br/>Relations
+</td><td>('1771215', 'Zhilei Liu', 'zhilei liu')<br/>('1791319', 'Shangfei Wang', 'shangfei wang')<br/>('3558606', 'Zhaoyu Wang', 'zhaoyu wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td></td></tr><tr><td>e48e94959c4ce799fc61f3f4aa8a209c00be8d7f</td><td>Hindawi Publishing Corporation
+<br/>The Scientific World Journal
+<br/>Volume 2013, Article ID 135614, 6 pages
+<br/>http://dx.doi.org/10.1155/2013/135614
+<br/>Research Article
+<br/>Design of an Efficient Real-Time Algorithm Using Reduced
+<br/>Feature Dimension for Recognition of Speed Limit Signs
+<br/><b>Sogang University, Seoul 121-742, Republic of Korea</b><br/>2 Samsung Techwin R&D Center, Security Solution Division, 701 Sampyeong-dong, Bundang-gu, Seongnam-si,
+<br/>Gyeonggi 463-400, Republic of Korea
+<br/>Received 28 August 2013; Accepted 1 October 2013
+<br/>Academic Editors: P. Daponte, M. Nappi, and N. Nishchal
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>We propose a real-time algorithm for recognition of speed limit signs from a moving vehicle. Linear Discriminant Analysis (LDA)
+<br/>required for classification is performed by using Discrete Cosine Transform (DCT) coefficients. To reduce feature dimension in
+<br/>LDA, DCT coefficients are selected by a devised discriminant function derived from information obtained by training. Binarization
+<br/>and thinning are performed on a Region of Interest (ROI) obtained by preprocessing a detected ROI prior to DCT for further
+<br/>reduction of computation time in DCT. This process is performed on a sequence of image frames to increase the hit rate of
+<br/>recognition. Experimental results show that arithmetic operations are reduced by about 60%, while hit rates reach about 100%
+<br/>compared to previous works.
+<br/>1. Introduction
+<br/>Driver safety is the main concern of the advanced vehicle
+<br/>system which became implementable due to the develop-
+<br/>ment of the autonomous driving, automatic control, and
+<br/>imaging technology. An advanced vehicle system gives driver
+<br/>information related to safety by sensing the surroundings
+<br/>automatically [1]. Speed limit signs recognition is regarded
+<br/>to be helpful in safety for drivers using advanced vehicle
+<br/>system. The system needs to recognize the speed limit sign
+<br/>in the distance quickly and accurately in order to give
+<br/>the driver precaution in time since vehicle is moving fast.
+<br/>But existing algorithms perform recognition by using many
+<br/>features extracted from captured image, requiring a large
+<br/>amount of arithmetic operations for classification [2].
+<br/>Several classification algorithms have been proposed,
+<br/>which include Neural Networks [2, 3], Support Vector
+<br/>Machine (SVM) [2], and Linear Discriminant Analysis
+<br/>(LDA) [2, 4]. Among these, SVM has relatively higher recog-
+<br/>nition rate, and LDA is used in many classification applica-
+<br/>tions due to its low computational complexity. However, its
+<br/>computational complexity needs to be further reduced to be
+<br/>used in real-time application. It can be achieved by reducing
+<br/>the number of inputs of LDA.
+<br/>This paper proposes an efficient real-time algorithm for
+<br/>recognition of speed limit signs by using reduced feature
+<br/>dimension. In this research study, DCT is employed and parts
+<br/>of Discrete Cosine Transform (DCT) coefficients are used as
+<br/>inputs to LDA instead of features extracted from image. DCT
+<br/>coefficients are selected by a devised discriminant function.
+<br/>To further reduce DCT computation time, binarization and
+<br/>thinning are applied to the detected Region of Interest (ROI).
+<br/>Image of speed limit sign in the distance obtained from cam-
+<br/>era has a low resolution and it gives poor rate of recognition.
+<br/>To resolve this problem, this paper proposes a recognition
+<br/>system using classification results on a sequence of frames.
+<br/>It can enhance hit rate of recognition by accumulating the
+<br/>probability of single frame recognition.
+<br/>2. Background
+<br/>In this section, LDA is briefly described, which is popularly
+<br/>employed for classification. LDA is a classical statistical
+</td><td>('2012225', 'Hanmin Cho', 'hanmin cho')<br/>('5984008', 'Seungwha Han', 'seungwha han')<br/>('6348959', 'Sun-Young Hwang', 'sun-young hwang')<br/>('2012225', 'Hanmin Cho', 'hanmin cho')</td><td>Correspondence should be addressed to Sun-Young Hwang; hwang@sogang.ac.kr
+</td></tr><tr><td>e496d6be415038de1636bbe8202cac9c1cea9dbe</td><td>Facial Expression Recognition in Older Adults using
+<br/>Deep Machine Learning
+<br/><b>National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce</b><br/>Italy
+</td><td>('2886068', 'Andrea Caroppo', 'andrea caroppo')<br/>('1796761', 'Alessandro Leone', 'alessandro leone')<br/>('1737181', 'Pietro Siciliano', 'pietro siciliano')</td><td>{andrea.caroppo,alessandro.leone,pietro.siciliano}@le.imm.cnr.it
+</td></tr><tr><td>e43cc682453cf3874785584fca813665878adaa7</td><td>www.ijecs.in
+<br/>International Journal Of Engineering And Computer Science ISSN:2319-7242
+<br/>Volume 3 Issue 10 October, 2014 Page No.8830-8834
+<br/>Face Recognition using Local Derivative Pattern Face
+<br/>Descriptor
+<br/>Department of Electronics and Telecommunication
+<br/><b>Datta Meghe College of Engineering</b><br/>Airoli, Navi Mumbai, India 1,2
+<br/>Mob: 99206746061
+<br/>Mob: 99870353142
+</td><td></td><td>pranitachavan42@gmail.com 1
+<br/>djpethe@gmail.com 2
+</td></tr><tr><td>fec6648b4154fc7e0892c74f98898f0b51036dfe</td><td>A Generic Face Processing
+<br/>Framework: Technologies,
+<br/>Analyses and Applications
+<br/>A Thesis Submitted in Partial Ful(cid:12)lment
+<br/>of the Requirements for the Degree of
+<br/>Master of Philosophy
+<br/>in
+<br/>Computer Science and Engineering
+<br/>Supervised by
+<br/><b>c(cid:13)The Chinese University of Hong Kong</b><br/>July 2003
+<br/><b>The Chinese University of Hong Kong holds the copyright of this thesis. Any</b><br/>person(s) intending to use a part or whole of the materials in the thesis in
+<br/>a proposed publication must seek copyright release from the Dean of the
+<br/>Graduate School.
+</td><td>('1681775', 'Michael R. Lyu', 'michael r. lyu')</td><td></td></tr><tr><td>fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9</td><td>Enhancing Recommender Systems for TV by Face Recognition
+<br/><b>iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium</b><br/>Keywords:
+<br/>Recommender System, Face Recognition, Face Detection, TV, Emotion Detection.
+</td><td>('1738833', 'Toon De Pessemier', 'toon de pessemier')<br/>('3441798', 'Damien Verlee', 'damien verlee')<br/>('1698239', 'Luc Martens', 'luc martens')</td><td>{toon.depessemier, luc.martens}@intec.ugent.be
+</td></tr><tr><td>fe9c460d5ca625402aa4d6dd308d15a40e1010fa</td><td>Neural Architecture for Temporal Emotion
+<br/>Classification
+<br/>Universit¨at Ulm, Neuroinformatik, Germany
+</td><td>('1681327', 'Roland Schweiger', 'roland schweiger')<br/>('2331203', 'Pierre Bayerl', 'pierre bayerl')<br/>('1706025', 'Heiko Neumann', 'heiko neumann')</td><td>froland.schweiger,pierre.bayerl,heiko.neumanng@informatik.uni-ulm.de
+</td></tr><tr><td>fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5</td><td>IOSR Journal of VLSI and Signal Processing (IOSR-JVSP)
+<br/>Volume 6, Issue 2, Ver. I (Mar. -Apr. 2016), PP 47-53
+<br/>e-ISSN: 2319 – 4200, p-ISSN No. : 2319 – 4197
+<br/>www.iosrjournals.org
+<br/>Performance Evaluation of Gabor Wavelet Features for Face
+<br/>Representation and Recognition
+<br/><b>Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India</b><br/><b>University B.D.T.College of Engineering, Visvesvaraya</b><br/><b>Technological University, Davanagere, Karnataka, India</b></td><td>('2038371', 'M. E. Ashalatha', 'm. e. ashalatha')<br/>('3283067', 'Mallikarjun S. Holi', 'mallikarjun s. holi')</td><td></td></tr><tr><td>fe464b2b54154d231671750053861f5fd14454f5</td><td>Multi Joint Action in CoTeSys
+<br/>- Setup and Challenges -
+<br/>Technical report CoTeSys-TR-10-01
+<br/>D. Brˇsˇci´c, F. Rohrm¨uller, O. Kourakos, S. Sosnowski, D. Althoff, M. Lawitzky,
+<br/>{drazen, rohrm, omirosk, sosnowski, dalthoff, lawitzky, moertl, rambow, vicky,
+<br/>M. Eggers, C. Mayer, T. Kruse, A. Kirsch, M. Beetz and B. Radig 2
+<br/>T. Lorenz and A. Schub¨o 4
+<br/>P. Basili and S. Glasauer 5
+<br/>W. Maier and E. Steinbach 7
+<br/><b>Institute of Automatic Control</b><br/>4 Experimental Psychology Unit
+<br/>Engineering
+<br/>Department of Psychology
+<br/>Department of Electrical Engineering
+<br/>Ludwig-Maximilians-Universit¨at
+<br/>and Information Technology
+<br/>Technische Universit¨at M¨unchen
+<br/>Arcisstraße 21, 80333 M¨unchen
+<br/>2Intelligent Autonomous Systems
+<br/>Department of Informatics
+<br/>M¨unchen
+<br/>Leopoldstraße 13, 80802 M¨unchen
+<br/>5Center for Sensorimotor Research
+<br/>Clinical Neurosciences and
+<br/>Department of Neurology
+<br/>Technische Universit¨at M¨unchen
+<br/>Ludwig-Maximilians-Universit¨at
+<br/>Boltzmannstraße 3, 85748 Garching
+<br/>M¨unchen
+<br/>bei M¨unchen
+<br/>Marchionistraße 23, 81377 M¨unchen
+<br/><b>Institute for Human-Machine</b><br/>6Robotics and Embedded Systems
+<br/>Communication
+<br/>Department of Informatics
+<br/>Department of Electrical Engineering
+<br/>Technische Universit¨at M¨unchen
+<br/>and Information Technology
+<br/>Boltzmannstraße 3, 85748 Garching
+<br/>Technische Universit¨at M¨unchen
+<br/>Arcisstraße 21, 80333 M¨unchen
+<br/>bei M¨unchen
+<br/><b>Institute for Media Technology</b><br/>Department of Electrical Engineering
+<br/>and Information Technology
+<br/>Technische Universit¨at M¨unchen
+<br/>Arcisstraße 21, 80333 M¨unchen
+</td><td>('46953125', 'X. Zang', 'x. zang')<br/>('47824592', 'W. Wang', 'w. wang')<br/>('48172476', 'A. Bannat', 'a. bannat')<br/>('30849638', 'G. Panin', 'g. panin')</td><td>medina, xueliang zang, wangwei, dirk, kuehnlen, hirche, buss}@lsr.ei.tum.de
+<br/>{eggers, mayerc, kruset, kirsch, beetz, radig}@in.tum.de
+<br/>{blume, bannat, rehrl, wallhoff}@tum.de
+<br/>{lorenz, schuboe}@psy.lmu.de
+<br/>{p.basili,s.glasauer}@lrz.uni-muenchen.de
+<br/>{lenz,roeder,panin,knoll}@in.tum.de
+<br/>{werner.maier, eckehard.steinbach}@tum.de
+</td></tr><tr><td>fe7c0bafbd9a28087e0169259816fca46db1a837</td><td></td><td></td><td></td></tr><tr><td>fe5df5fe0e4745d224636a9ae196649176028990</td><td><b>University of Massachusetts - Amherst</b><br/>Dissertations
+<br/>9-1-2010
+<br/>Dissertations and Theses
+<br/>Using Context to Enhance the Understanding of
+<br/>Face Images
+<br/>Follow this and additional works at: http://scholarworks.umass.edu/open_access_dissertations
+<br/>Recommended Citation
+<br/>Jain, Vidit, "Using Context to Enhance the Understanding of Face Images" (2010). Dissertations. Paper 287.
+</td><td>('2246870', 'Vidit Jain', 'vidit jain')</td><td>ScholarWorks@UMass Amherst
+<br/>University of Massachusetts - Amherst, vidit.jain@gmail.com
+<br/>This Open Access Dissertation is brought to you for free and open access by the Dissertations and Theses at ScholarWorks@UMass Amherst. It has
+<br/>been accepted for inclusion in Dissertations by an authorized administrator of ScholarWorks@UMass Amherst. For more information, please contact
+<br/>scholarworks@library.umass.edu.
+</td></tr><tr><td>fe961cbe4be0a35becd2d722f9f364ec3c26bd34</td><td>Computer-based Tracking, Analysis, and Visualization of Linguistically
+<br/>Significant Nonmanual Events in American Sign Language (ASL)
+<br/><b>Boston University / **Rutgers University / ***Gallaudet University</b><br/><b>Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA</b><br/><b>Rutgers University, Computer and Information Sciences, 110 Frelinghuysen Road, Piscataway, NJ</b><br/><b>Gallaudet University, Technology Access Program, 800 Florida Ave NE, Washington, DC</b></td><td>('1732359', 'Carol Neidle', 'carol neidle')<br/>('38079056', 'Jingjing Liu', 'jingjing liu')<br/>('39132952', 'Bo Liu', 'bo liu')<br/>('4340744', 'Xi Peng', 'xi peng')<br/>('2467082', 'Christian Vogler', 'christian vogler')<br/>('1711560', 'Dimitris Metaxas', 'dimitris metaxas')</td><td>E-mail: carol@bu.edu, jl1322@cs.rutgers.edu, lb507@cs.rutgers.edu, px13@cs.rutgers.edu,
+<br/>christian.vogler@gallaudet.edu, dnm@ cs.rutgers.edu
+</td></tr><tr><td>feb6e267923868bff6e2108603d00fdfd65251ca</td><td>February 1, 2013 15:16 WSPC/INSTRUCTION FILE
+<br/>S0218213012500297
+<br/>International Journal on Artificial Intelligence Tools
+<br/>Vol. 22, No. 1 (2013) 1250029 (30 pages)
+<br/>c(cid:13) World Scientific Publishing Company
+<br/>DOI: 10.1142/S0218213012500297
+<br/>UNSUPERVISED DISCOVERY OF VISUAL FACE CATEGORIES
+<br/><b>Institute of Systems Engineering, Southeast University, Nanjing, China</b><br/><b>University of Nevada, Reno, USA</b><br/><b>College of Computer and Information Sciences</b><br/><b>King Saud University, Riyadh 11543, Saudi Arabia</b><br/><b>College of Computer and Information Sciences</b><br/><b>King Saud University, P.O. Box 51178, Riyadh 11543, Saudi Arabia</b><br/>GHULAM MUHAMMAD
+<br/><b>College of Computer and Information Sciences</b><br/><b>King Saud University, Riyadh 11543, Saudi Arabia</b><br/>Received 30 January 2012
+<br/>Accepted 10 May 2012
+<br/>Published
+<br/>Human faces can be arranged into different face categories using information from common visual
+<br/>cues such as gender, ethnicity, and age. It has been demonstrated that using face categorization as a
+<br/>precursor step to face recognition improves recognition rates and leads to more graceful errors.1
+<br/>Although face categorization using common visual cues yields meaningful face categories,
+<br/>developing accurate and robust gender, ethnicity, and age categorizers is a challenging issue.
+<br/>Moreover, it limits the overall number of possible face categories and, in practice, yields unbalanced
+<br/>face categories which can compromise recognition performance. This paper investigates ways to
+<br/>automatically discover a categorization of human faces from a collection of unlabeled face images
+<br/>without relying on predefined visual cues. Specifically, given a set of face images from a group of
+<br/>known individuals (i.e., gallery set), our goal is finding ways to robustly partition the gallery set
+<br/>(i.e., face categories). The objective is being able to assign novel images of the same individuals
+<br/>(i.e., query set) to the correct face category with high accuracy and robustness. To address the issue
+<br/>of face category discovery, we represent faces using local features and apply unsupervised learning
+<br/>(i.e., clustering). To categorize faces in novel images, we employ nearest-neighbor algorithms
+<br/>1250029-1
+</td><td>('2884262', 'Shicai Yang', 'shicai yang')<br/>('1808451', 'George Bebis', 'george bebis')<br/>('2363759', 'Muhammad Hussain', 'muhammad hussain')<br/>('39344692', 'Anwar M. Mirza', 'anwar m. mirza')</td><td>shicai.yang@gmail.com
+<br/>bebis@cse.unr.edu
+<br/>mhussain@ksu.edu.sa
+<br/>ghulam@ksu.edu.sa
+<br/>anwar.m.mirza@gmail.com
+</td></tr><tr><td>fe48f0e43dbdeeaf4a03b3837e27f6705783e576</td><td></td><td></td><td></td></tr><tr><td>fea83550a21f4b41057b031ac338170bacda8805</td><td>Learning a Metric Embedding
+<br/>for Face Recognition
+<br/>using the Multibatch Method
+<br/>Orcam Ltd., Jerusalem, Israel
+</td><td>('46273386', 'Oren Tadmor', 'oren tadmor')<br/>('1743988', 'Yonatan Wexler', 'yonatan wexler')<br/>('31601132', 'Tal Rosenwein', 'tal rosenwein')<br/>('2554670', 'Shai Shalev-Shwartz', 'shai shalev-shwartz')<br/>('3140335', 'Amnon Shashua', 'amnon shashua')</td><td>firstname.lastname@orcam.com
+</td></tr><tr><td>feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc</td><td>EURECOM
+<br/>Multimedia Communications Department
+<br/>and
+<br/>Mobile Communications Department
+<br/>2229, route des Crˆetes
+<br/>B.P. 193
+<br/>06904 Sophia-Antipolis
+<br/>FRANCE
+<br/>Research Report RR-11-255
+<br/>Search Pruning with Soft Biometric Systems:
+<br/>Efficiency-Reliability Tradeoff
+<br/>June 1st, 2011
+<br/>Last update June 1st, 2011
+<br/>1EURECOM’s research is partially supported by its industrial members: BMW Group, Cisco,
+<br/>Monaco Telecom, Orange, SAP, SFR, Sharp, STEricsson, Swisscom, Symantec, Thales.
+</td><td>('3299530', 'Antitza Dantcheva', 'antitza dantcheva')<br/>('15758502', 'Arun Singh', 'arun singh')<br/>('1688531', 'Petros Elia', 'petros elia')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td></td></tr><tr><td>fe108803ee97badfa2a4abb80f27fa86afd9aad9</td><td></td><td></td><td></td></tr><tr><td>fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139</td><td>Rahman et al. EURASIP Journal on Image and Video Processing (2015) 2015:35
+<br/>DOI 10.1186/s13640-015-0090-5
+<br/>RESEARCH
+<br/>Open Access
+<br/>Bayesian face recognition using 2D
+<br/>Gaussian-Hermite moments
+</td><td>('47081388', 'S. M. Mahbubur Rahman', 's. m. mahbubur rahman')<br/>('2021126', 'Tamanna Howlader', 'tamanna howlader')</td><td></td></tr><tr><td>c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d</td><td>Modeling for part-based visual object
+<br/>detection based on local features
+<br/>Von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Rheinisch-Westf¨alischen Technischen Hochschule Aachen
+<br/>zur Erlangung des akademischen Grades eines Doktors
+<br/>der Ingenieurwissenschaften genehmigte Dissertation
+<br/>vorgelegt von
+<br/>Diplom-Ingenieur
+<br/>aus Neuss
+<br/>Berichter:
+<br/>Univ.-Prof. Dr.-Ing. Jens-Rainer Ohm
+<br/>Univ.-Prof. Dr.-Ing. Til Aach
+<br/>Tag der m¨undlichen Pr¨ufung: 28. September 2011
+<br/>Diese Dissertation ist auf den Internetseiten der
+<br/>Hochschulbibliothek online verf¨ugbar.
+</td><td>('2447988', 'Mark Asbach', 'mark asbach')</td><td></td></tr><tr><td>c86e6ed734d3aa967deae00df003557b6e937d3d</td><td>Generative Adversarial Networks with
+<br/>Decoder-Encoder Output Noise
+<br/>conditional distribution of their neighbors. In [32], Portilla and
+<br/>Simoncelli proposed a parametric texture model based on joint
+<br/>statistics, which uses a decomposition method that is called
+<br/>steerable pyramid decomposition to decompose the texture
+<br/>of images. An example-based super-resolution algorithm [11]
+<br/>was proposed in 2002, which uses a Markov network to model
+<br/>the spatial relationship between the pixels of an image. A
+<br/>scene completion algorithm [16] was proposed in 2007, which
+<br/>applied a semantic scene match technique. These traditional
+<br/>algorithms can be applied to particular image generation tasks,
+<br/>such as texture synthesis and super-resolution. Their common
+<br/>characteristic is that they predict the images pixel by pixel
+<br/>rather than generate an image as a whole, and the basic idea
+<br/>of them is to make an interpolation according to the existing
+<br/>part of the images. Here, the problem is, given a set of images,
+<br/>can we generate totally new images with the same distribution
+<br/>of the given ones?
+</td><td>('2421012', 'Guoqiang Zhong', 'guoqiang zhong')<br/>('46874300', 'Wei Gao', 'wei gao')<br/>('3142351', 'Yongbin Liu', 'yongbin liu')<br/>('47796538', 'Youzhao Yang', 'youzhao yang')</td><td></td></tr><tr><td>c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3</td><td>LOCATING FACIAL LANDMARKS WITH BINARY MAP CROSS-CORRELATIONS
+<br/>J´er´emie Nicolle
+<br/>K´evin Bailly
+<br/>Univ. Pierre & Marie Curie, ISIR - CNRS UMR 7222, F-75005, Paris - France
+</td><td>('3074790', 'Vincent Rapp', 'vincent rapp')<br/>('1680828', 'Mohamed Chetouani', 'mohamed chetouani')</td><td>{nicolle, bailly, rapp, chetouani}@isir.upmc.fr
+</td></tr><tr><td>c87f7ee391d6000aef2eadb49f03fc237f4d1170</td><td>1
+<br/>A real-time and unsupervised face Re-Identification system for Human-Robot
+<br/>Interaction
+<br/><b>Intelligent Behaviour Understanding Group, Imperial College London, London, UK</b><br/>A B S T R A C T
+<br/>In the context of Human-Robot Interaction (HRI), face Re-Identification (face Re-ID) aims to verify if certain detected faces have already been
+<br/>observed by robots. The ability of distinguishing between different users is crucial in social robots as it will enable the robot to tailor the interaction
+<br/>strategy toward the users’ individual preferences. So far face recognition research has achieved great success, however little attention has been paid
+<br/>to the realistic applications of Face Re-ID in social robots. In this paper, we present an effective and unsupervised face Re-ID system which
+<br/>simultaneously re-identifies multiple faces for HRI. This Re-ID system employs Deep Convolutional Neural Networks to extract features, and an
+<br/>online clustering algorithm to determine the face’s ID. Its performance is evaluated on two datasets: the TERESA video dataset collected by the
+<br/>TERESA robot, and the YouTube Face Dataset (YTF Dataset). We demonstrate that the optimised combination of techniques achieves an overall
+<br/>93.55% accuracy on TERESA dataset and an overall 90.41% accuracy on YTF dataset. We have implemented the proposed method into a software
+<br/>module in the HCI^2 Framework [1] for it to be further integrated into the TERESA robot [2], and has achieved real-time performance at 10~26
+<br/>Frames per second.
+<br/>Keywords: Real-Time Face Re-Identification, Open Set Re-ID, Multiple Re-ID, Human-Robot Interaction, CNN Descriptors, Online Clustering
+<br/>1. Introduction
+<br/>Face recognition problem is one of the oldest topics in
+<br/>Computer Vision [3]. Recently, the interest in this problem has
+<br/>been revamped, mostly due to the observation that standard face
+<br/>recognition approaches do not perform well in real-time
+<br/>scenarios where faces can be rotated, occluded, and under
+<br/>unconstrained illumination. Face recognition tasks are generally
+<br/>classified into two categories:
+<br/>1. Face Verification. Given two face images, the task of face
+<br/>verification is to determine if these two faces belong to the same
+<br/>person.
+<br/>2. Face Identification. This refers to the process of finding the
+<br/>identity of an unknown face image given a database of known
+<br/>faces.
+<br/>However, there are certain situations where a third type of
+<br/>face recognition is needed: face re-identification (face Re-ID). In
+<br/>the context of Human-Robot Interaction (HRI), the goal of face
+<br/>Re-ID is to determine if certain faces have been seen by the robot
+<br/>before, and if so, to determine their identity.
+<br/>Generally, a real-time and unsupervised face re-identification
+<br/>system is required to achieve effective interactions between
+<br/>humans and robots. In the realistic scenarios of HRI, the face re-
+<br/>identification task is confronted with the following challenges:
+<br/>a. The system needs to be able to build and update the run-
+<br/>time user gallery on the fly as there is usually no prior
+<br/>knowledge about the interaction targets in advance.
+<br/>b. The system should achieve high processing speed in
+<br/>order for the robot to maintain real-time interaction with
+<br/>the users.
+<br/>c. The method should be robust against high intra-class
+<br/>illumination changes, partial
+<br/>variance caused by
+<br/>
+<br/>occlusion, pose variation, and/or the display of facial
+<br/>expressions.
+<br/>d. The system should achieve high recognition accuracy on
+<br/>low-quality images resulted from motion blur (when the
+<br/>robot and / or the user is moving), out-of-focus blur,
+<br/>and/or over /under-exposure.
+<br/>Recently, deep-learning approaches, especially Convolutional
+<br/>Neural Networks (CNNs), have achieved great success in solving
+<br/>face recognition problems [4]–[8]. Comparing
+<br/>to classic
+<br/>approaches, deep-learning-based methods are characterised by
+<br/>their powerful feature extraction abilities. However, as existing
+<br/>works mostly focused on traditional face identification problems,
+<br/>the potential applications of deep-learning-based methods in
+<br/>solving face Re-ID problems is yet to be explored.
+<br/>that can work effectively
+<br/>In this paper, we present a real-time unsupervised face re-
+<br/>identification system
+<br/>in an
+<br/>unconstrained environment. Firstly, we employ a pre-trained
+<br/>CNN [7] as the feature extractor and try to improve its
+<br/>performance and processing speed in HRI context by utilising a
+<br/>variety of pre-processing techniques. In the Re-Identification step,
+<br/>we then use an online clustering algorithm to build and update a
+<br/>run-time face gallery and to output the probe faces’ ID.
+<br/>Experiments show that our system can achieve a Re-ID accuracy
+<br/>of 93.55% and 90.41% on the TERESA video dataset and the
+<br/>YTF Dataset respectively and is able to achieve a real-time
+<br/>processing speed of 10~26 FPS.
+<br/>2. Related Works
+<br/>Various methods [9]–[15] have been developed to solve the
+<br/>person Re-ID problem in surveillance context. However, most of
+<br/>them [9]–[13] are unsuitable to HRI applications as these
+<br/>approaches often rely on soft biometrics (i.e. clothing’s colours
+<br/>and textures) that are unavailable to the robot (which usually only
+<br/>sees the user’s face). Due to the unavailability of such soft
+<br/>biometrics, it is difficult to apply person re-identification
+</td><td>('2563750', 'Yujiang Wang', 'yujiang wang')<br/>('49927631', 'Jie Shen', 'jie shen')<br/>('2403354', 'Stavros Petridis', 'stavros petridis')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Surveillance Face Recognition Challenge
+<br/>Received: date / Accepted: date
+</td><td>('5314735', 'Zhiyi Cheng', 'zhiyi cheng')</td><td></td></tr><tr><td>c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd</td><td>1 Department of Computer Science
+<br/><b>Rutgers University</b><br/>New Jersey, USA
+<br/>2 Department of Computer Science
+<br/><b>The University of Texas at Arlington</b><br/>Texas, USA
+<br/>PENG, XI: TRACK FACIAL POINTS IN UNCONSTRAINED VIDEOS
+<br/>Track Facial Points in Unconstrained Videos
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('40420376', 'Qiong Hu', 'qiong hu')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>xipeng.cs@rutgers.edu
+<br/>qionghu.cs@rutgers.edu
+<br/>jzhuang@uta.edu
+<br/>dnm@cs.rutgers.edu
+</td></tr><tr><td>c8292aa152a962763185e12fd7391a1d6df60d07</td><td>Camera Distance from Face Images
+<br/><b>University of California, San Diego</b><br/>9500 Gilman Drive, La Jolla, CA, USA
+</td><td>('25234832', 'Arturo Flores', 'arturo flores')</td><td>{aflores,echristiansen,kriegman,sjb}@cs.ucsd.edu
+</td></tr><tr><td>c82c147c4f13e79ad49ef7456473d86881428b89</td><td></td><td></td><td></td></tr><tr><td>c84233f854bbed17c22ba0df6048cbb1dd4d3248</td><td>Exploring Locally Rigid Discriminative
+<br/>Patches for Learning Relative Attributes
+<br/>http://researchweb.iiit.ac.in/~yashaswi.verma/
+<br/>http://www.iiit.ac.in/~jawahar/
+<br/>CVIT
+<br/>IIIT-Hyderabad, India
+<br/>http://cvit.iiit.ac.in
+</td><td>('1694502', 'C. V. Jawahar', 'c. v. jawahar')<br/>('2169614', 'Yashaswi Verma', 'yashaswi verma')<br/>('1694502', 'C. V. Jawahar', 'c. v. jawahar')</td><td></td></tr><tr><td>c829be73584966e3162f7ccae72d9284a2ebf358</td><td>shuttleNet: A biologically-inspired RNN with loop connection and parameter
+<br/>sharing
+<br/>1 National Engineering Laboratory for Video Technology, School of EE&CS,
+<br/><b>Peking University, Beijing, China</b><br/>2 Cooperative Medianet Innovation Center, China
+<br/>3 School of Information and Electronics,
+<br/><b>Beijing Institute of Technology, Beijing, China</b></td><td>('38179026', 'Yemin Shi', 'yemin shi')<br/>('1705972', 'Yonghong Tian', 'yonghong tian')<br/>('5765799', 'Yaowei Wang', 'yaowei wang')<br/>('34097174', 'Tiejun Huang', 'tiejun huang')</td><td></td></tr><tr><td>c87d5036d3a374c66ec4f5870df47df7176ce8b9</td><td>ORIGINAL RESEARCH
+<br/>published: 12 July 2018
+<br/>doi: 10.3389/fpsyg.2018.01190
+<br/>Temporal Dynamics of Natural Static
+<br/>Emotional Facial Expressions
+<br/>Decoding: A Study Using Event- and
+<br/>Eye Fixation-Related Potentials
+<br/><b>GIPSA-lab, Institute of Engineering, Universit Grenoble Alpes, Centre National de la Recherche Scienti que, Grenoble INP</b><br/>Grenoble, France, 2 Department of Conception and Control of Aeronautical and Spatial Vehicles, Institut Supérieur de
+<br/>l’Aéronautique et de l’Espace, Université Fédérale de Toulouse, Toulouse, France, 3 Laboratoire InterUniversitaire de
+<br/>Psychologie – Personnalité, Cognition, Changement Social, Université Grenoble Alpes, Université Savoie Mont Blanc,
+<br/>Grenoble, France, 4 Exploration Fonctionnelle du Système Nerveux, Pôle Psychiatrie, Neurologie et Rééducation
+<br/>Neurologique, CHU Grenoble Alpes, Grenoble, France, 5 Université Grenoble Alpes, Inserm, CHU Grenoble Alpes, Grenoble
+<br/>Institut des Neurosciences, Grenoble, France
+<br/>This study aims at examining the precise temporal dynamics of the emotional facial
+<br/>decoding as it unfolds in the brain, according to the emotions displayed. To characterize
+<br/>this processing as it occurs in ecological settings, we focused on unconstrained visual
+<br/>explorations of natural emotional faces (i.e., free eye movements). The General Linear
+<br/>Model (GLM; Smith and Kutas, 2015a,b; Kristensen et al., 2017a) enables such a
+<br/>depiction. It allows deconvolving adjacent overlapping responses of the eye fixation-
+<br/>related potentials (EFRPs) elicited by the subsequent fixations and the event-related
+<br/>potentials (ERPs) elicited at the stimuli onset. Nineteen participants were displayed
+<br/>with spontaneous static facial expressions of emotions (Neutral, Disgust, Surprise, and
+<br/>Happiness) from the DynEmo database (Tcherkassof et al., 2013). Behavioral results
+<br/>on participants’ eye movements show that the usual diagnostic features in emotional
+<br/>decoding (eyes for negative facial displays and mouth for positive ones) are consistent
+<br/>with the literature. The impact of emotional category on both the ERPs and the EFRPs
+<br/>elicited by the free exploration of the emotional faces is observed upon the temporal
+<br/>dynamics of the emotional facial expression processing. Regarding the ERP at stimulus
+<br/>onset, there is a significant emotion-dependent modulation of the P2–P3 complex
+<br/>and LPP components’ amplitude at the left frontal site for the ERPs computed by
+<br/>averaging. Yet, the GLM reveals the impact of subsequent fixations on the ERPs time-
+<br/>locked on stimulus onset. Results are also in line with the valence hypothesis. The
+<br/>observed differences between the two estimation methods (Average vs. GLM) suggest
+<br/>the predominance of the right hemisphere at the stimulus onset and the implication
+<br/>of the left hemisphere in the processing of the information encoded by subsequent
+<br/>fixations. Concerning the first EFRP, the Lambda response and the P2 component are
+<br/>modulated by the emotion of surprise compared to the neutral emotion, suggesting
+<br/>Edited by:
+<br/>Eva G. Krumhuber,
+<br/><b>University College London</b><br/>United Kingdom
+<br/>Reviewed by:
+<br/>Marie Arsalidou,
+<br/><b>National Research University Higher</b><br/>School of Economics, Russia
+<br/>Jaana Simola,
+<br/><b>University of Helsinki, Finland</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Emotion Science,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 07 March 2018
+<br/>Accepted: 20 June 2018
+<br/>Published: 12 July 2018
+<br/>Citation:
+<br/>Guérin-Dugué A, Roy RN,
+<br/>Kristensen E, Rivet B, Vercueil L and
+<br/>Tcherkassof A (2018) Temporal
+<br/>Dynamics of Natural Static Emotional
+<br/>Facial Expressions Decoding: A Study
+<br/>Using Event- and Eye Fixation-Related
+<br/>Potentials. Front. Psychol. 9:1190.
+<br/>doi: 10.3389/fpsyg.2018.01190
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>July 2018 | Volume 9 | Article 1190
+</td><td>('7200702', 'Anne Guérin-Dugué', 'anne guérin-dugué')<br/>('20903548', 'Raphaëlle N. Roy', 'raphaëlle n. roy')<br/>('33987947', 'Emmanuelle Kristensen', 'emmanuelle kristensen')<br/>('48223466', 'Bertrand Rivet', 'bertrand rivet')<br/>('2544058', 'Laurent Vercueil', 'laurent vercueil')<br/>('3209946', 'Anna Tcherkassof', 'anna tcherkassof')<br/>('7200702', 'Anne Guérin-Dugué', 'anne guérin-dugué')</td><td>anne.guerin@gipsa-lab.grenoble-inp.fr
+</td></tr><tr><td>c8e84cdff569dd09f8d31e9f9ba3218dee65e961</td><td>Dictionaries for Image and Video-based Face Recognition
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA</b><br/><b>National Institute of Standards and Technology, Gaithersburg, MD 20899, USA</b><br/>In recent years, sparse representation and dictionary learning-based methods have emerged as
+<br/>powerful tools for efficiently processing data in non-traditional ways. A particular area of promise
+<br/>for these theories is face recognition.
+<br/>In this paper, we review the role of sparse representation
+<br/>and dictionary learning for efficient face identification and verification. Recent face recognition
+<br/>algorithms from still images, videos, and ambiguously label imagery are reviewed. In particular,
+<br/>discriminative dictionary learning algorithms as well as methods based on weakly supervised learning
+<br/>and domain adaptation are summarized. Some of the compelling challenges and issues that confront
+<br/>research in face recognition using sparse representations and dictionary learning are outlined.
+<br/>OCIS codes: (150.0150) Machine vision; (100.5010) Pattern recognition; (150.1135) Algorithms;
+<br/>(100.0100) Image processing.
+<br/>I.
+<br/>INTRODUCTION
+<br/>Face recognition is a challenging problem that has been
+<br/>actively researched for over two decades [59]. Current
+<br/>systems work very well when the test image is captured
+<br/>under controlled conditions [35]. However, their perfor-
+<br/>mance degrades significantly when the test image con-
+<br/>tains variations that are not present in the training im-
+<br/>ages. Some of these variations include illumination, pose,
+<br/>expression, cosmetics, and aging.
+<br/>It has been observed that since human faces have sim-
+<br/>ilar overall configuration, face images can be described
+<br/>by a relatively low dimensional subspace. As a result,
+<br/>holistic dimensionality reduction subspace methods such
+<br/>as Principle Component Analysis (PCA) [51], Linear
+<br/>Discriminant Analysis (LDA) [3], [17] and Independent
+<br/>Component Analysis (ICA) [2] have been proposed for
+<br/>the task of face recognition. These approaches can be
+<br/>classified into either generative or discriminative meth-
+<br/>ods. An advantage of using generative approaches is their
+<br/>reduced sensitivity to noise [59], [55].
+<br/>In recent years, generative and discriminative ap-
+<br/>proaches based on sparse representations have been gain-
+<br/>ing a lot of traction in biometrics recognition [32].
+<br/>In
+<br/>sparse representation, given a signal and a redundant dic-
+<br/>tionary, the goal is to represent this signal as a sparse lin-
+<br/>ear combination of elements (also known as atoms) from
+<br/>this dictionary. Finding a sparse representation entails
+<br/>solving a convex optimization problem. Using sparse rep-
+<br/>resentation, one can extract semantic information from
+<br/>the signal. For instance, one can sparsely represent a test
+<br/>sample in an overcomplete dictionary whose elements are
+<br/>the training samples themselves, provided that sufficient
+<br/>training samples are available from each class [55]. An in-
+<br/>teresting property of sparse representations is that they
+<br/>are robust to noise and occlusion. For instance, good
+<br/>performance under partial occlusion, missing data and
+<br/>variations in background has been demonstrated in many
+<br/>sparsity-based methods [55], [38]. The ability of sparse
+<br/>representations to extract meaningful information is due
+<br/>in part to the fact that face images belonging to the same
+<br/>person lie on a low-dimensional manifold.
+<br/>In order to successfully apply sparse representation to
+<br/>face recognition problems, one needs to correctly choose
+<br/>an appropriate dictionary. Rather than using a pre-
+<br/>determined dictionary, e.g. wavelets, one can train an
+<br/>overcomplete data-driven dictionary. An appropriately
+<br/>trained data-driven dictionary can simultaneously span
+<br/>the subspace of all faces and support optimal discrimi-
+<br/>nation of the classes. These dictionaries tend to provide
+<br/>better classification accuracy than a predetermined dic-
+<br/>tionary [31].
+<br/>Data-driven dictionaries can produce state-of-the-art
+<br/>results in various face recognition tasks. However, when
+<br/>the target data has a different distribution than the
+<br/>source data, the learned sparse representation may not
+<br/>be optimal. As a result, one needs to adapt these learned
+<br/>representations from one domain to the other. The prob-
+<br/>lem of transferring a representation or classifier from one
+<br/>domain to the other is known as domain adaptation or
+<br/>domain transfer learning [22], [42].
+<br/>In this paper, we summarize some of the recent ad-
+<br/>vances in still- and video-based face recognition using
+<br/>sparse representation and dictionary learning. Discrimi-
+<br/>native dictionary learning algorithms as well as methods
+<br/>based on weakly supervised learning and domain adapta-
+<br/>tion are summarized. These examples show that sparsity
+<br/>and dictionary learning are powerful tools for face recog-
+<br/>nition. Understanding how well these algorithms work
+<br/>can greatly improve our insights into some of the most
+<br/>compelling challenges in still- and video-based face recog-
+<br/>nition.
+<br/>A. Organization of the paper
+<br/>This paper is organized as follows. In Section II, we
+<br/>briefly review the idea behind sparse representation and
+<br/>dictionary learning. Section III presents some recent
+</td><td>('1751078', 'Yi-Chen Chen', 'yi-chen chen')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>∗ Corresponding author: pvishalm@umiacs.umd.edu
+</td></tr><tr><td>c8829013bbfb19ccb731bd54c1a885c245b6c7d7</td><td>Flexible Template and Model Matching Using Image Intensity
+<br/><b>University College London</b><br/>Department of Computer Science
+<br/>Gower Street, London, United Kingdom
+</td><td>('31557997', 'Bernard F. Buxton', 'bernard f. buxton')<br/>('1797883', 'Vasileios Zografos', 'vasileios zografos')</td><td>{B.Buxton, V.Zografos}@cs.ucl.ac.uk
+</td></tr><tr><td>c81ee278d27423fd16c1a114dcae486687ee27ff</td><td>Search Based Face Annotation Using Weakly
+<br/>Labeled Facial Images
+<br/><b>Savitribai Phule Pune University</b><br/><b>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</b><br/>Mahatma Phulenagar, 120/2 Mahaganpati soc, Chinchwad, Pune-19, MH, India
+<br/><b>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University</b><br/>DYPIET, Pimpri, Pune-18, MH, India
+</td><td>('15731441', 'Shital Shinde', 'shital shinde')<br/>('3392505', 'Archana Chaugule', 'archana chaugule')</td><td></td></tr><tr><td>c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f</td><td>BREUER, KIMMEL: A DEEP LEARNING PERSPECTIVE ON FACIAL EXPRESSIONS
+<br/>A Deep Learning Perspective on the Origin
+<br/>of Facial Expressions
+<br/>Department of Computer Science
+<br/><b>Technion - Israel Institute of Technology</b><br/>Technion City, Haifa, Israel
+<br/>Figure 1: Demonstration of the filter visualization process.
+</td><td>('50484701', 'Ran Breuer', 'ran breuer')<br/>('1692832', 'Ron Kimmel', 'ron kimmel')</td><td>rbreuer@cs.technion.ac.il
+<br/>ron@cs.technion.ac.il
+</td></tr><tr><td>c88ce5ef33d5e544224ab50162d9883ff6429aa3</td><td>Face Match for Family Reunification:
+<br/>Real-world Face Image Retrieval
+<br/>U.S. National Library of Medicine, 8600 Rockville Pike, Bethesda, MD 20894, USA
+<br/><b>Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA</b></td><td>('1744255', 'Eugene Borovikov', 'eugene borovikov')<br/>('34928283', 'Michael Gill', 'michael gill')<br/>('35029039', 'Szilárd Vajda', 'szilárd vajda')</td><td>(FaceMatch@NIH.gov)
+<br/>(Szilard.Vajda@cwu.edu)
+</td></tr><tr><td>c822bd0a005efe4ec1fea74de534900a9aa6fb93</td><td>Face Recognition Committee Machines:
+<br/>Dynamic Vs. Static Structures
+<br/>Department of Computer Science and Engineering
+<br/><b>The Chinese University of Hong Kong</b><br/>Shatin, Hong Kong
+</td><td>('2899702', 'Ho-Man Tang', 'ho-man tang')<br/>('1681775', 'Michael R. Lyu', 'michael r. lyu')<br/>('1706259', 'Irwin King', 'irwin king')</td><td>fhmtang, lyu, kingg@cse.cuhk.edu.hk
+</td></tr><tr><td>c88c21eb9a8e08b66c981db35f6556f4974d27a8</td><td>Attribute Learning
+<br/>Using Joint Human and Machine Computation
+<br/>Edith Law
+<br/>April 2011
+<br/>Machine Learning Department
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Luis von Ahn (co-Chair)
+<br/>Tom Mitchell (co-Chair)
+<br/>Jaime Carbonell
+<br/>Eric Horvitz, Microsoft Research
+<br/>Rob Miller, MIT
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy.
+<br/>Copyright c(cid:13) 2011 Edith Law
+</td><td></td><td></td></tr><tr><td>c8adbe00b5661ab9b3726d01c6842c0d72c8d997</td><td>Deep Architectures for Face Attributes
+<br/>Computer Vision and Machine Learning Group, Flickr, Yahoo,
+</td><td>('3469274', 'Tobi Baumgartner', 'tobi baumgartner')<br/>('31922487', 'Jack Culpepper', 'jack culpepper')</td><td>{tobi, jackcul}@yahoo-inc.com
+</td></tr><tr><td>fb4545782d9df65d484009558e1824538030bbb1</td><td></td><td></td><td></td></tr><tr><td>fbf196d83a41d57dfe577b3a54b1b7fa06666e3b</td><td>Extreme Learning Machine for Large-Scale
+<br/>Action Recognition
+<br/><b>Bo gazi ci University, Turkey</b></td><td>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td></td></tr><tr><td>fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1</td><td>Learning Discriminative Features via Label Consistent Neural Network
+<br/>†Raytheon BBN Technologies, Cambridge, MA, 02138
+<br/><b>University of Maryland, College Park, MD</b></td><td>('34145947', 'Zhuolin Jiang', 'zhuolin jiang')<br/>('1691470', 'Yaming Wang', 'yaming wang')<br/>('2502892', 'Viktor Rozgic', 'viktor rozgic')</td><td>{zjiang,wandrews,vrozgic}@bbn.com, {wym,lsd}@umiacs.umd.edu
+</td></tr><tr><td>fbb6ee4f736519f7231830a8e337b263e91f06fe</td><td>Illumination Robust Facial Feature Detection via
+<br/>Decoupled Illumination and Texture Features
+<br/><b>University of Waterloo, Waterloo ON N2L3G1, Canada</b><br/>WWW home page: http://vip.uwaterloo.ca/ (cid:63)
+</td><td>('2797326', 'Brendan Chwyl', 'brendan chwyl')<br/>('1685952', 'Alexander Wong', 'alexander wong')<br/>('1720258', 'David A. Clausi', 'david a. clausi')</td><td>{bchwyl,a28wong,dclausi}@uwaterloo.ca,
+</td></tr><tr><td>fb87045600da73b07f0757f345a937b1c8097463</td><td>JIA, YANG, ZHU, KUANG, NIU, CHAN: RCCR FOR LARGE POSE
+<br/>Reflective Regression of 2D-3D Face Shape
+<br/>Across Large Pose
+<br/><b>The University of Hong Kong</b><br/><b>National University of Defense</b><br/>Technology
+<br/>3 Tencent Inc.
+<br/>4 Sensetime Inc.
+</td><td>('34760532', 'Xuhui Jia', 'xuhui jia')<br/>('2966679', 'Heng Yang', 'heng yang')<br/>('35130187', 'Xiaolong Zhu', 'xiaolong zhu')<br/>('1874900', 'Zhanghui Kuang', 'zhanghui kuang')<br/>('1939702', 'Yifeng Niu', 'yifeng niu')<br/>('40392393', 'Kwok-Ping Chan', 'kwok-ping chan')</td><td>xhjia@cs.hku.hk
+<br/>yanghengnudt@gmail.com
+<br/>lucienzhu@gmail.com
+<br/>kuangzhanghui@sensetime.com
+<br/>niuyifeng@nudt.edu.cn
+<br/>kpchan@cs.hku.hk
+</td></tr><tr><td>fb85867c989b9ee6b7899134136f81d6372526a9</td><td>Learning to Align Images using Weak Geometric Supervision
+<br/><b>Georgia Institute of Technology</b><br/>2 Microsoft Research
+</td><td>('1703391', 'Jing Dong', 'jing dong')<br/>('3288815', 'Byron Boots', 'byron boots')<br/>('2038264', 'Frank Dellaert', 'frank dellaert')<br/>('1757937', 'Sudipta N. Sinha', 'sudipta n. sinha')</td><td></td></tr><tr><td>fb5280b80edcf088f9dd1da769463d48e7b08390</td><td></td><td></td><td></td></tr><tr><td>fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a</td><td>Beauty and the Burst:
+<br/>Remote Identification of Encrypted Video Streams
+<br/><b>Tel Aviv University, Cornell Tech</b><br/>Cornell Tech
+<br/><b>Tel Aviv University, Columbia University</b></td><td>('39347554', 'Roei Schuster', 'roei schuster')<br/>('1723945', 'Vitaly Shmatikov', 'vitaly shmatikov')<br/>('2337345', 'Eran Tromer', 'eran tromer')</td><td>rs864@cornell.edu
+<br/>shmat@cs.cornell.edu
+<br/>tromer@cs.tau.ac.il
+</td></tr><tr><td>fba464cb8e3eff455fe80e8fb6d3547768efba2f</td><td>
+<br/>International Journal of Engineering and Applied Sciences (IJEAS)
+<br/> ISSN: 2394-3661, Volume-3, Issue-2, February 2016
+<br/>Survey Paper on Emotion Recognition
+<br/>
+</td><td>('40502287', 'Prachi Shukla', 'prachi shukla')<br/>('2229305', 'Sandeep Patil', 'sandeep patil')</td><td></td></tr><tr><td>fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59</td><td>Reading Hidden Emotions: Spontaneous
+<br/>Micro-expression Spotting and Recognition
+</td><td>('50079101', 'Xiaobai Li', 'xiaobai li')<br/>('1836646', 'Xiaopeng Hong', 'xiaopeng hong')<br/>('39056318', 'Antti Moilanen', 'antti moilanen')<br/>('47932625', 'Xiaohua Huang', 'xiaohua huang')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')</td><td></td></tr><tr><td>fb084b1fe52017b3898c871514cffcc2bdb40b73</td><td>RESEARCH ARTICLE
+<br/>Illumination Normalization of Face Image
+<br/>Based on Illuminant Direction Estimation and
+<br/>Improved Retinex
+<br/><b>School of Electronic and Information Engineering, Beihang University, Beijing, 100191, China</b><br/><b>Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics</b><br/><b>University POLITEHNICA Timisoara, Timisoara, 300223, Romania</b></td><td>('1699804', 'Jizheng Yi', 'jizheng yi')<br/>('1724834', 'Xia Mao', 'xia mao')<br/>('35153304', 'Lijiang Chen', 'lijiang chen')<br/>('3399189', 'Yuli Xue', 'yuli xue')<br/>('1734732', 'Alberto Rovetta', 'alberto rovetta')<br/>('1860887', 'Catalin-Daniel Caleanu', 'catalin-daniel caleanu')</td><td>* clj@ee.buaa.edu.cn
+</td></tr><tr><td>fb9ad920809669c1b1455cc26dbd900d8e719e61</td><td>3D Gaze Estimation from Remote RGB-D Sensors
+<br/>THÈSE NO 6680 (2015)
+<br/>PRÉSENTÉE LE 9 OCTOBRE 2015
+<br/>À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+<br/>LABORATOIRE DE L'IDIAP
+<br/>PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE
+<br/>ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+<br/>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+<br/>PAR
+<br/>acceptée sur proposition du jury:
+<br/>Prof. K. Aminian, président du jury
+<br/>Dr J.-M. Odobez, directeur de thèse
+<br/>Prof. L.-Ph. Morency, rapporteur
+<br/>Prof. D. Witzner Hansen, rapporteur
+<br/>Dr R. Boulic, rapporteur
+<br/>Suisse
+<br/>2015
+</td><td>('9206411', 'Kenneth Alberto Funes Mora', 'kenneth alberto funes mora')</td><td></td></tr><tr><td>ed28e8367fcb7df7e51963add9e2d85b46e2d5d6</td><td>International J. of Engg. Research & Indu. Appls. (IJERIA).
+<br/>ISSN 0974-1518, Vol.9, No. III (December 2016), pp.23-42
+<br/>A NOVEL APPROACH OF FACE RECOGNITION USING
+<br/>CONVOLUTIONAL NEURAL NETWORKS WITH AUTO
+<br/>ENCODER
+<br/>1 Research Scholar, Dept. of Electronics & Communication Engineering,
+<br/><b>Rayalaseema University Kurnool, Andhra Pradesh</b><br/> 2 Research Supervisor, Professor, Dept. of Electronics & Communication Engineering,
+<br/><b>Madanapalle Institute of Technology and Science, Madanapalle, Andhra Pradesh</b></td><td>('7006226', 'S. A. K JILANI', 's. a. k jilani')</td><td></td></tr><tr><td>ed0cf5f577f5030ac68ab62fee1cf065349484cc</td><td>Revisiting Data Normalization for
+<br/>Appearance-Based Gaze Estimation
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarland Informatics Campus,
+<br/>Graduate School of Information
+<br/>Science and Technology, Osaka
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarland Informatics Campus,
+<br/>Germany
+<br/><b>University, Japan</b><br/>Germany
+</td><td>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1751242', 'Yusuke Sugano', 'yusuke sugano')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>xczhang@mpi-inf.mpg.de
+<br/>sugano@ist.osaka-u.ac.jp
+<br/>bulling@mpi-inf.mpg.de
+</td></tr><tr><td>edde81b2bdd61bd757b71a7b3839b6fef81f4be4</td><td>SHIH, MALLYA, SINGH, HOIEM: MULTI-PROPOSAL PART LOCALIZATION
+<br/>Part Localization using Multi-Proposal
+<br/>Consensus for Fine-Grained Categorization
+<br/><b>University of Illinois</b><br/>Urbana-Champaign
+<br/>IL, US
+</td><td>('2525469', 'Kevin J. Shih', 'kevin j. shih')<br/>('36508529', 'Arun Mallya', 'arun mallya')<br/>('37415643', 'Saurabh Singh', 'saurabh singh')<br/>('2433269', 'Derek Hoiem', 'derek hoiem')</td><td>kjshih2@illinois.edu
+<br/>amallya2@illinois.edu
+<br/>ss1@illinois.edu
+<br/>dhoiem@illinois.edu
+</td></tr><tr><td>edf98a925bb24e39a6e6094b0db839e780a77b08</td><td>Simplex Representation for Subspace Clustering
+<br/><b>The Hong Kong Polytechnic University, Hong Kong SAR, China</b><br/><b>School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China</b><br/>Spectral clustering based methods have achieved leading performance on subspace clustering problem. State-of-the-art subspace
+<br/>clustering methods follow a three-stage framework: compute a coefficient matrix from the data by solving an optimization problem;
+<br/>construct an affinity matrix from the coefficient matrix; and obtain the final segmentation by applying spectral clustering to the
+<br/>affinity matrix. To construct a feasible affinity matrix, these methods mostly employ the operations of exponentiation, absolutely
+<br/>symmetrization, or squaring, etc. However, all these operations will force the negative entries (which cannot be explicitly avoided)
+<br/>the data. In this paper, we introduce the simplex representation (SR) to remedy this problem of representation based subspace
+<br/>clustering. We propose an SR based least square regression (SRLSR) model to construct a physically more meaningful affinity matrix
+<br/>by integrating the nonnegative property of graph into the representation coefficient computation while maintaining the discrimination
+<br/>of original data. The SRLSR model is reformulated as a linear equality-constrained problem, which is solved efficiently under the
+<br/>alternating direction method of multipliers framework. Experiments on benchmark datasets demonstrate that the proposed SRLSR
+<br/>algorithm is very efficient and outperforms state-of-the-art subspace clustering methods on accuracy.
+<br/>Index Terms—Subspace clustering, simplex representation, spectral clustering.
+<br/>I. INTRODUCTION
+<br/>H IGH-dimensional data are commonly observed in var-
+<br/>ious computer vision and image processing prob-
+<br/>lems. Contrary to their high-dimensional appearance,
+<br/>the
+<br/>latent structure of those data usually lie in a union of
+<br/>low-dimensional subspaces [1]. Recovering the latent low-
+<br/>dimensional subspaces from the high-dimensional observation
+<br/>can not only reduce the computational cost and memory
+<br/>requirements of subsequent algorithms, but also reduce the
+<br/>learning and computer vision tasks, we need to find the clusters
+<br/>of high-dimensional data such that each cluster can be fitted
+<br/>by a subspace, which is referred to as the subspace clustering
+<br/>(SC) problem [1].
+<br/>SC has been extensively studied in the past decades [2]–
+<br/>[33]. Most of existing SC methods can be categorized into
+<br/>four categories: iterative based methods [2], [3], algebraic
+<br/>based methods [4]–[6], statistical based methods [7]–[10], and
+<br/>spectral clustering based methods [14]–[33]. Among these four
+<br/>categories, spectral clustering based methods have become the
+<br/>mainstream due to their theoretical guarantees and promising
+<br/>performance on real-world applications such as motion seg-
+<br/>mentation [16] and face clustering [18]. The spectral clustering
+<br/>based methods usually follow a three-step framework: Step
+<br/>1) obtain a coefficient matrix of the data points by solving
+<br/>an optimization problem, which usually incorporates sparse
+<br/>or low rank regularizations due to their good mathematical
+<br/>properties; Step 2) construct an affinity matrix from the
+<br/>coefficient matrix by employing exponentiation [14], abso-
+<br/>lutely symmetrization [15], [16], [20], [23]–[31], and squaring
+<br/>operations [17]–[19], [32], [33], etc.; Step 3) apply spectral
+<br/>analysis techniques [34] to the affinity matrix and obtain the
+<br/>final clusters of the data points.
+<br/>Most spectral clustering based methods [14]–[33] obtain
+<br/>the expected coefficient matrix under the self-expressiveness
+<br/>property [15], [16], which states that each data point in a union
+<br/>of multiple subspaces can be linearly represented by the other
+<br/>data points in the same subspace. However, in some real-world
+<br/>applications, the data points lie in a union of multiple affine
+<br/>subspaces rather than linear subspaces [16]. A trivial solution
+<br/>is to ignore the affine structure of the data points and directly
+<br/>perform clustering as in the subspaces of linear structures.
+<br/>A non-negligible drawback of this solution is the increasing
+<br/>dimension of the intersection of two subspaces, which can
+<br/>make the subspaces indistinguishable from each other [16]. To
+<br/>cluster data points lying in affine subspaces instead of linear
+<br/>subspaces, the affine constraint is introduced [15], [16], in
+<br/>which each data point can be written as an affine combination
+<br/>of other points with the sum of coefficients being one.
+<br/>Despite their high clustering accuracy, most of spectral
+<br/>clustering based methods [14]–[33] suffer from three major
+<br/>drawbacks. First, under the affine constraint, the coefficient
+<br/>vector is not flexible enough to handle real-world applications
+<br/>Second, negative coefficients cannot be fully avoided since
+<br/>the existing methods do not explicitly consider non-negative
+<br/>constraint
+<br/>in real-world applications,
+<br/>it is physically problematic to reconstruct a data point by
+<br/>allowing the others to “cancel each other out” with complex
+<br/>additions and subtractions [35]. Thus, most of these methods
+<br/>are limited by being stranded at this physical bottleneck. Third,
+<br/>the exponentiation, absolutely symmetrization, and squaring
+<br/>operations in Step 2 will force the negative coefficients to
+<br/>among the data points.
+<br/>in Step 1. However,
+<br/>To solve the three drawbacks mentioned above, we intro-
+<br/>duce the Simplex Representation (SR) for spectral clustering
+<br/>based SC. Specifically, the SR is introduced from two in-
+<br/>terdependent aspects. First, to broaden its adaptivity to real
+<br/>scenarios, we extend the affine constraint to the scaled affine
+<br/>constraint, in which the coefficient vector in the optimization
+</td><td>('47882783', 'Jun Xu', 'jun xu')<br/>('1803714', 'Deyu Meng', 'deyu meng')<br/>('48571185', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>ed08ac6da6f8ead590b390b1d14e8a9b97370794</td><td>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> ISSN(Online): 2320-9801
+<br/>
+<br/> ISSN (Print): 2320-9798
+<br/>International Journal of Innovative Research in Computer
+<br/>and Communication Engineering
+<br/>(An ISO 3297: 2007 Certified Organization)
+<br/>Vol. 3, Issue 9, September 2015
+<br/>An Efficient Approach for 3D Face
+<br/>Recognition Using ANN Based Classifiers
+<br/><b>Shri Shivaji College, Parbhani, M.S, India</b><br/><b>Arts, Commerce and Science College, Gangakhed, M.S, India</b><br/><b>Dnyanopasak College Parbhani, M.S, India</b></td><td>('34443070', 'Vaibhav M. Pathak', 'vaibhav m. pathak')</td><td></td></tr><tr><td>ed9d11e995baeec17c5d2847ec1a8d5449254525</td><td>Efficient Gender Classification Using a Deep LDA-Pruned Net
+<br/><b>McGill University</b><br/>845 Sherbrooke Street W, Montreal, QC H3A 0G4, Canada
+</td><td>('48087399', 'Qing Tian', 'qing tian')<br/>('1699104', 'Tal Arbel', 'tal arbel')<br/>('1713608', 'James J. Clark', 'james j. clark')</td><td>{qtian,arbel,clark}@cim.mcgill.ca
+</td></tr><tr><td>edef98d2b021464576d8d28690d29f5431fd5828</td><td>Pixel-Level Alignment of Facial Images
+<br/>for High Accuracy Recognition
+<br/>Using Ensemble of Patches
+</td><td>('1782221', 'Hoda Mohammadzade', 'hoda mohammadzade')<br/>('35809715', 'Amirhossein Sayyafan', 'amirhossein sayyafan')<br/>('24033665', 'Benyamin Ghojogh', 'benyamin ghojogh')</td><td></td></tr><tr><td>ed04e161c953d345bcf5b910991d7566f7c486f7</td><td>Combining facial expression analysis and synthesis on a
+<br/>Mirror my emotions!
+<br/>robot
+</td><td>('2185308', 'Stefan Sosnowski', 'stefan sosnowski')<br/>('39124596', 'Christoph Mayer', 'christoph mayer')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td></td></tr><tr><td>ed07856461da6c7afa4f1782b5b607b45eebe9f6</td><td>3D Morphable Models as Spatial Transformer Networks
+<br/><b>University of York, UK</b><br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</b></td><td>('39180407', 'Anil Bas', 'anil bas')<br/>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')<br/>('46649582', 'Muhammad Awais', 'muhammad awais')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>{ab1792,william.smith}@york.ac.uk, {p.huber,m.a.rana,j.kittler}@surrey.ac.uk
+</td></tr><tr><td>ed1886e233c8ecef7f414811a61a83e44c8bbf50</td><td>Deep Alignment Network: A convolutional neural network for robust face
+<br/>alignment
+<br/><b>Warsaw University of Technology</b></td><td>('2393538', 'Marek Kowalski', 'marek kowalski')<br/>('1930272', 'Jacek Naruniec', 'jacek naruniec')<br/>('1760267', 'Tomasz Trzcinski', 'tomasz trzcinski')</td><td>m.kowalski@ire.pw.edu.pl, j.naruniec@ire.pw.edu.pl, t.trzcinski@ii.pw.edu.pl
+</td></tr><tr><td>edd7504be47ebc28b0d608502ca78c0aea6a65a2</td><td>Recurrent Residual Learning for Action
+<br/>Recognition
+<br/><b>University of Bonn, Germany</b></td><td>('3434584', 'Ahsan Iqbal', 'ahsan iqbal')<br/>('32774629', 'Alexander Richard', 'alexander richard')<br/>('2946643', 'Juergen Gall', 'juergen gall')</td><td>{iqbalm,richard,kuehne,gall}@iai.uni-bonn.de
+</td></tr><tr><td>ed388878151a3b841f95a62c42382e634d4ab82e</td><td>DenseImage Network: Video Spatial-Temporal Evolution
+<br/>Encoding and Understanding
+<br/><b>Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('3162023', 'Xiaokai Chen', 'xiaokai chen')<br/>('2027479', 'Ke Gao', 'ke gao')</td><td>{chenxiaokai,kegao}@ict.ac.cn
+</td></tr><tr><td>edbb8cce0b813d3291cae4088914ad3199736aa0</td><td>Proceedings of the Twenty-Fifth AAAI Conference on Artificial Intelligence
+<br/>Efficient Subspace Segmentation via Quadratic Programming
+<br/><b>College of Computer Science and Technology, Zhejiang University, China</b><br/><b>National University of Singapore, Singapore</b><br/><b>School of Information Systems, Singapore Management University, Singapore</b></td><td>('35019367', 'Shusen Wang', 'shusen wang')<br/>('2026127', 'Tiansheng Yao', 'tiansheng yao')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('38203359', 'Jialie Shen', 'jialie shen')</td><td>wssatzju@gmail.com, eleyuanx@nus.edu.sg, tsyaoo@gmail.com, eleyans@nus.edu.sg, jlshen@smu.edu.sg
+</td></tr><tr><td>edff76149ec44f6849d73f019ef9bded534a38c2</td><td>Privacy-Preserving Visual Learning Using
+<br/>Doubly Permuted Homomorphic Encryption
+<br/><b>The University of Tokyo</b><br/>Tokyo, Japan
+<br/><b>Michigan State University</b><br/>East Lansing, MI, USA
+<br/><b>The University of Tokyo</b><br/>Tokyo, Japan
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA
+</td><td>('1899753', 'Ryo Yonetani', 'ryo yonetani')<br/>('2232940', 'Vishnu Naresh Boddeti', 'vishnu naresh boddeti')<br/>('9467266', 'Yoichi Sato', 'yoichi sato')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')</td><td>yonetani@iis.u-tokyo.ac.jp
+<br/>vishnu@msu.edu
+<br/>kkitani@cs.cmu.edu
+<br/>ysato@iis.u-tokyo.ac.jp
+</td></tr><tr><td>ed96f2eb1771f384df2349879970065a87975ca7</td><td>Adversarial Attacks on Face Detectors using Neural
+<br/>Net based Constrained Optimization
+<br/>Department of Electrical and
+<br/>Computer Engineering
+<br/><b>University of Toronto</b><br/>Department of Electrical and
+<br/>Computer Engineering
+<br/><b>University of Toronto</b></td><td>('26418299', 'Avishek Joey Bose', 'avishek joey bose')<br/>('3241876', 'Parham Aarabi', 'parham aarabi')</td><td>Email: joey.bose@mail.utoronto.ca
+<br/>Email: parham@ecf.utoronto.ca
+</td></tr><tr><td>c178a86f4c120eca3850a4915134fff44cbccb48</td><td></td><td></td><td></td></tr><tr><td>c1d2d12ade031d57f8d6a0333cbe8a772d752e01</td><td>Journal of Math-for-Industry, Vol.2(2010B-5), pp.147–156
+<br/>Convex optimization techniques for the efficient recovery of a sparsely
+<br/>corrupted low-rank matrix
+<br/>D 案
+<br/>Received on August 10, 2010 / Revised on August 31, 2010
+<br/>E 案
+</td><td>('2372029', 'Silvia Gandy', 'silvia gandy')<br/>('1685085', 'Isao Yamada', 'isao yamada')</td><td></td></tr><tr><td>c180f22a9af4a2f47a917fd8f15121412f2d0901</td><td>Facial Expression Recognition by ICA with
+<br/>Selective Prior
+<br/>Department of Information Processing, School of Information Science,
+<br/><b>Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan</b></td><td>('1753878', 'Fan Chen', 'fan chen')<br/>('1791753', 'Kazunori Kotani', 'kazunori kotani')</td><td>{chen-fan, ikko}@jaist.ac.jp
+</td></tr><tr><td>c146aa6d56233ce700032f1cb179700778557601</td><td>3D Morphable Models as Spatial Transformer Networks
+<br/><b>University of York, UK</b><br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</b></td><td>('39180407', 'Anil Bas', 'anil bas')<br/>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')<br/>('9170545', 'Muhammad Awais', 'muhammad awais')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>{ab1792,william.smith}@york.ac.uk, {p.huber,m.a.rana,j.kittler}@surrey.ac.uk
+</td></tr><tr><td>c1f07ec629be1c6fe562af0e34b04c54e238dcd1</td><td>A Novel Facial Feature Localization Method Using Probabilistic-like Output*
+<br/>Microsoft Research Asia
+<br/>
+<br/>Other methods utilize the face structure information and
+<br/>heuristically search the facial features within the facial
+<br/>regions [12]. Though the method is fast in localizing feature
+<br/>points, it might be sensitive to some noises, such as eye
+<br/>glasses, and thus fail in localization.
+<br/>To address these problems, we proposed a learning-based
+<br/>facial feature localization method under probabilistic-like
+<br/>framework. We modified an object detection method [12] so
+<br/>that it could generate a unified probabilistic-like output for
+<br/>each point. We therefore proposed an algorithm to locate
+<br/>the facial features using this probabilistic-like output.
+<br/>Because this method is learning-based, it is robust to pose,
+<br/>illumination, expression and appearance variations. The
+<br/>localization speed of the proposed method is extremely fast.
+<br/>It takes only about 10ms on the computer with a P4 1.3G
+<br/>CPU to locate five feature points and the accuracy is
+<br/>comparable with hand labeled results.
+<br/>This paper is organized as follows. Section 2 first
+<br/>describes the algorithm to calculate probabilistic-like output,
+<br/>and then presents the proposed localization approach based
+<br/>on the probabilistic-like output. Experiments will be given
+<br/>at Section 3. Section 4 gives the conclusion remarks and
+<br/>discusses future works.
+<br/>2. FACIAL FEATURE POINT LOCALIZATION
+<br/>The framework of the proposed method is illustrated in
+<br/>Figure 1.
+<br/>Figure 1.Feature Point Localization Framework
+<br/><b>ECE dept, University of Miami</b><br/>1251 Memorial Drive, EB406
+<br/>Coral Gables, Florida, 33124, U.S.
+<br/>
+</td><td>('1684635', 'Lei Zhang', 'lei zhang')<br/>('9310930', 'Long', 'long')<br/>('8392859', 'Mingjing Li', 'mingjing li')<br/>('38188346', 'Hongjiang Zhang', 'hongjiang zhang')<br/>('1679242', 'Longbin Chen', 'longbin chen')</td><td>{leizhang, mjli,hjzhang}@microsoft.com
+<br/>longzhu@msrchina.research.microsoft.com
+<br/>l.chen6@umiami.edu
+</td></tr><tr><td>c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d</td><td>Dual-Agent GANs for Photorealistic and Identity
+<br/>Preserving Profile Face Synthesis
+<br/><b>National University of Singapore</b><br/>3 Panasonic R&D Center Singapore
+<br/><b>National University of Defense Technology</b><br/><b>Franklin. W. Olin College of Engineering</b><br/><b>Qihoo 360 AI Institute</b></td><td>('46509484', 'Jian Zhao', 'jian zhao')<br/>('33419682', 'Lin Xiong', 'lin xiong')<br/>('2757639', 'Jianshu Li', 'jianshu li')<br/>('40345914', 'Fang Zhao', 'fang zhao')<br/>('2513111', 'Zhecan Wang', 'zhecan wang')<br/>('2668358', 'Sugiri Pranata', 'sugiri pranata')<br/>('3493398', 'Shengmei Shen', 'shengmei shen')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td>{zhaojian90, jianshu}@u.nus.edu
+<br/>{lin.xiong, karlekar.jayashree, sugiri.pranata, shengmei.shen}@sg.panasonic.com
+<br/>zhecan.wang@students.olin.edu
+<br/>{elezhf, eleyans, elefjia}@u.nus.edu
+</td></tr><tr><td>c10a15e52c85654db9c9343ae1dd892a2ac4a279</td><td>Int J Comput Vis (2012) 100:134–153
+<br/>DOI 10.1007/s11263-011-0494-3
+<br/>Learning the Relative Importance of Objects from Tagged Images
+<br/>for Retrieval and Cross-Modal Search
+<br/>Received: 16 December 2010 / Accepted: 23 August 2011 / Published online: 18 October 2011
+<br/>© Springer Science+Business Media, LLC 2011
+</td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')</td><td></td></tr><tr><td>c1fc70e0952f6a7587b84bf3366d2e57fc572fd7</td><td></td><td></td><td></td></tr><tr><td>c1dfabe36a4db26bf378417985a6aacb0f769735</td><td>Journal of Computer Vision and Image Processing, NWPJ-201109-50
+<br/>1
+<br/>Describing Visual Scene through EigenMaps
+<br/>
+</td><td>('2630005', 'Shizhi Chen', 'shizhi chen')<br/>('35484757', 'YingLi Tian', 'yingli tian')</td><td></td></tr><tr><td>c1482491f553726a8349337351692627a04d5dbe</td><td></td><td></td><td></td></tr><tr><td>c1ff88493721af1940df0d00bcfeefaa14f1711f</td><td>CVPR
+<br/>#1369
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2010 Submission #1369. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#1369
+<br/>Subspace Regression: Predicting a Subspace from one Sample
+<br/>Anonymous CVPR submission
+<br/>Paper ID 1369
+</td><td></td><td></td></tr><tr><td>c11eb653746afa8148dc9153780a4584ea529d28</td><td>Global and Local Consistent Wavelet-domain Age
+<br/>Synthesis
+</td><td>('2112221', 'Peipei Li', 'peipei li')<br/>('49995036', 'Yibo Hu', 'yibo hu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td></td></tr><tr><td>c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee</td><td>Robust Facial Landmark Localization Based on
+</td><td>('19254504', 'Yiyun Pan', 'yiyun pan')<br/>('7934466', 'Junwei Zhou', 'junwei zhou')<br/>('46636537', 'Yongsheng Gao', 'yongsheng gao')<br/>('2065968', 'Shengwu Xiong', 'shengwu xiong')</td><td></td></tr><tr><td>c17a332e59f03b77921942d487b4b102b1ee73b6</td><td>Learning an appearance-based gaze estimator
+<br/>from one million synthesised images
+<br/>Tadas Baltruˇsaitis2
+</td><td>('34399452', 'Erroll Wood', 'erroll wood')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')<br/>('39626495', 'Peter Robinson', 'peter robinson')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>1University of Cambridge, United Kingdom {erroll.wood,peter.robinson}@cam.ac.uk
+<br/>2Carnegie Mellon University, United States {tbaltrus,morency}@cs.cmu.edu
+<br/>3Max Planck Institute for Informatics, Germany bulling@mpi-inf.mpg.de
+</td></tr><tr><td>c1e76c6b643b287f621135ee0c27a9c481a99054</td><td></td><td></td><td></td></tr><tr><td>c10b0a6ba98aa95d740a0d60e150ffd77c7895ad</td><td>HANSELMANN, YAN, NEY: DEEP FISHER FACES
+<br/>Deep Fisher Faces
+<br/>Human Language Technology and
+<br/>Pattern Recognition Group
+<br/><b>RWTH Aachen University</b><br/>Aachen, Germany
+</td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('35362682', 'Shen Yan', 'shen yan')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>hanselmann@cs.rwth-aachen.de
+<br/>shen.yan@rwth-aachen.de
+<br/>ney@cs.rwth-aachen.de
+</td></tr><tr><td>c1298120e9ab0d3764512cbd38b47cd3ff69327b</td><td>Disguised Faces in the Wild
+<br/>IIIT-Delhi, India
+<br/><b>IBM TJ Watson Research Center, USA</b><br/>Rama Chellappa
+<br/><b>University of Maryland, College Park, USA</b></td><td>('2573268', 'Vineet Kushwaha', 'vineet kushwaha')<br/>('2220719', 'Maneet Singh', 'maneet singh')<br/>('50631607', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('47733712', 'Nalini Ratha', 'nalini ratha')</td><td>{maneets, rsingh, mayank}@iiitd.ac.in
+<br/>ratha@us.ibm.com
+<br/>rama@umiacs.umd.ed
+</td></tr><tr><td>c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290</td><td>Unconstrained face identification with multi-scale block-based
+<br/>correlation
+<br/>Gaston, J., MIng, J., & Crookes, D. (2016). Unconstrained face identification with multi-scale block-based
+<br/>correlation. In Proceedings of the 2017 IEEE International Conference on Acoustics, Speech and Signal
+<br/><b>Processing (pp. 1477-1481). [978-1-5090-4117-6/17] Institute of Electrical and Electronics Engineers (IEEE</b><br/>Published in:
+<br/>Proceedings of the 2017 IEEE International Conference on Acoustics, Speech and Signal Processing
+<br/>Document Version:
+<br/>Peer reviewed version
+<br/><b>Queen's University Belfast - Research Portal</b><br/><b>Link to publication record in Queen's University Belfast Research Portal</b><br/>Publisher rights
+<br/>© 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future
+<br/><b>media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or</b><br/>redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
+<br/>General rights
+<br/><b>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</b><br/>copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+<br/>with these rights.
+<br/>Take down policy
+<br/>The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+<br/>ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the
+<br/>Download date:29. Nov. 2017
+</td><td></td><td>Research Portal that you believe breaches copyright or violates any law, please contact openaccess@qub.ac.uk.
+</td></tr><tr><td>c68ec931585847b37cde9f910f40b2091a662e83</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 9, No. 6, 2018
+<br/>A Comparative Evaluation of Dotted Raster-
+<br/>Stereography and Feature-Based Techniques for
+<br/>Automated Face Recognition
+<br/>S. Talha Ahsan
+<br/>Department of Computer Science
+<br/>Department of Electrical Engineering
+<br/><b>Usman Institute of Technology</b><br/><b>Usman Institute of Technology</b><br/>Karachi, Pakistan
+<br/>Karachi, Pakistan
+<br/>Department of Computer Science
+<br/><b>Usman Institute of Technology</b><br/>Karachi, Pakistan
+<br/>and
+<br/>feature-based
+<br/>system. The
+<br/>techniques
+<br/>two candidate
+</td><td>('49508503', 'Muhammad Wasim', 'muhammad wasim')<br/>('3251091', 'Lubaid Ahmed', 'lubaid ahmed')<br/>('33238128', 'Syed Faisal Ali', 'syed faisal ali')</td><td></td></tr><tr><td>c696c9bbe27434cb6279223a79b17535cd6e88c8</td><td>International Journal of Information Technology Vol.11 No.9 2005
+<br/>*
+<br/>Discriminant Analysis
+<br/>Facial Expression Recognition with Pyramid Gabor
+<br/>Features and Complete Kernel Fisher Linear
+<br/>1 School of Electronic and Information Engineering, South China
+<br/><b>University of Technology, Guangzhou, 510640, P.R.China</b><br/><b>Motorola China Research Center, Shanghai, 210000, P.R.China</b></td><td>('30193721', 'Duan-Duan Yang', 'duan-duan yang')<br/>('2949795', 'Lian-Wen Jin', 'lian-wen jin')<br/>('9215052', 'Jun-Xun Yin', 'jun-xun yin')<br/>('1751744', 'Li-Xin Zhen', 'li-xin zhen')<br/>('34824270', 'Jian-Cheng Huang', 'jian-cheng huang')</td><td>{ddyang, eelwjin,eejxyin}@scut.edu.cn
+<br/>{Li-Xin.Zhen, Jian-Cheng.Huang}@motorola.com
+</td></tr><tr><td>c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3</td><td>Recurrent Neural Networks for Facial Action Unit
+<br/>Recognition from Image Sequences
+<br/>School of Computer Science
+<br/><b>University of Witwatersrand</b><br/>Private Bag 3, Wits 2050, South Africa
+<br/>Department of Computer Science
+<br/><b>University of the Western Cape</b><br/>Bellville, South Africa
+<br/><b>Middle East Technical University</b><br/>Northern Cyprus Campus
+<br/>Güzelyurt, Mersin10, Turkey
+</td><td>('1903882', 'H Nyongesa', 'h nyongesa')</td><td>Hima.vadapalli@wits.ac.za
+<br/>hnyongesa@uwc.ac.za
+<br/>Omlin@metu.edu.tr
+</td></tr><tr><td>c614450c9b1d89d5fda23a54dbf6a27a4b821ac0</td><td>Vol.60: e17160480, January-December 2017
+<br/>http://dx.doi.org/10.1590/1678-4324-2017160480
+<br/>ISSN 1678-4324 Online Edition
+<br/>1
+<br/>Engineering,Technology and Techniques
+<br/>BRAZILIAN ARCHIVES OF
+<br/>BIOLOGY AND TECHNOLOGY
+<br/>A N I N T E R N A T I O N A L J O U R N A L
+<br/>Face Image Retrieval of Efficient Sparse Code words and
+<br/>Multiple Attribute in Binning Image
+<br/><b>Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India</b></td><td></td><td></td></tr><tr><td>c6096986b4d6c374ab2d20031e026b581e7bf7e9</td><td>A Framework for Using Context to
+<br/>Understand Images of People
+<br/>Submitted in partial fulfillment of the
+<br/>requirements for the
+<br/>degree of Doctor of Philosophy
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>May 2009
+<br/>Thesis Committee:
+<br/>Tsuhan Chen, Chair
+</td><td>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1763086', 'Alexei A. Efros', 'alexei a. efros')<br/>('1709305', 'Martial Hebert', 'martial hebert')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1794486', 'Marios Savvides', 'marios savvides')<br/>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')</td><td></td></tr><tr><td>c6608fdd919f2bc4f8d7412bab287527dcbcf505</td><td>Unsupervised Alignment of Natural
+<br/>Language with Video
+<br/>by
+<br/>Submitted in Partial Fulfillment
+<br/>of the
+<br/>Requirements for the Degree
+<br/>Doctor of Philosophy
+<br/>Supervised by
+<br/>Professor Daniel Gildea
+<br/>Department of Computer Science
+<br/>Arts, Sciences and Engineering
+<br/>Edmund A. Hajim School of Engineering and Applied Sciences
+<br/><b>University of Rochester</b><br/>Rochester, New York
+<br/>2015
+</td><td>('2296971', 'Iftekhar Naim', 'iftekhar naim')</td><td></td></tr><tr><td>c6f3399edb73cfba1248aec964630c8d54a9c534</td><td>A Comparison of CNN-based Face and Head Detectors for
+<br/>Real-Time Video Surveillance Applications
+<br/>1 ´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+<br/>2 Genetec Inc., Montreal, Canada
+</td><td>('38993564', 'Le Thanh Nguyen-Meidine', 'le thanh nguyen-meidine')<br/>('1697195', 'Eric Granger', 'eric granger')<br/>('40185782', 'Madhu Kiran', 'madhu kiran')<br/>('38755219', 'Louis-Antoine Blais-Morin', 'louis-antoine blais-morin')</td><td>lethanh@livia.etsmtl.ca, eric.granger@etsmtl.ca, mkiran@livia.etsmtl.ca
+<br/>lablaismorin@genetec.com
+</td></tr><tr><td>c62c910264658709e9bf0e769e011e7944c45c90</td><td>Recent Progress of Face Image Synthesis
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>Center for Excellence in Brain Science and Intelligence Technology, CAS
+<br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b></td><td>('9702077', 'Zhihe Lu', 'zhihe lu')<br/>('7719475', 'Zhihang Li', 'zhihang li')<br/>('1680853', 'Jie Cao', 'jie cao')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>{luzhihe2016, lizhihang2016, caojie2016}@ia.ac.cn, {rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>c678920facffd35853c9d185904f4aebcd2d8b49</td><td>Learning to Anonymize Faces for
+<br/>Privacy Preserving Action Detection
+<br/>1 EgoVid Inc., South Korea
+<br/><b>University of California, Davis</b></td><td>('10805888', 'Zhongzheng Ren', 'zhongzheng ren')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')<br/>('1766489', 'Michael S. Ryoo', 'michael s. ryoo')</td><td>{zzren,yongjaelee}@ucdavis.edu, mryoo@egovid.com
+</td></tr><tr><td>c660500b49f097e3af67bb14667de30d67db88e3</td><td>www.elsevier.com/locate/cviu
+<br/>Facial asymmetry quantification for
+<br/>expression invariant human identification
+<br/>and Sinjini Mitrac
+<br/><b>a The Robotics Institute, Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA</b><br/><b>University of Pittsburgh, Pittsburgh, PA 15260, USA</b><br/><b>Carnegie Mellon University, Pittsburgh, PA 15213, USA</b><br/>Received 15 February 2002; accepted 24 March 2003
+</td><td>('1689241', 'Yanxi Liu', 'yanxi liu')<br/>('2185899', 'Karen L. Schmidt', 'karen l. schmidt')</td><td></td></tr><tr><td>c6241e6fc94192df2380d178c4c96cf071e7a3ac</td><td>Action Recognition with Trajectory-Pooled Deep-Convolutional Descriptors
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>07wanglimin@gmail.com, yu.qiao@siat.ac.cn, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6</td><td>This article appeared in a journal published by Elsevier. The attached
+<br/>copy is furnished to the author for internal non-commercial research
+<br/><b>and education use, including for instruction at the authors institution</b><br/>and sharing with colleagues.
+<br/><b>Other uses, including reproduction and distribution, or selling or</b><br/>licensing copies, or posting to personal, institutional or third party
+<br/>websites are prohibited.
+<br/>In most cases authors are permitted to post their version of the
+<br/>article (e.g. in Word or Tex form) to their personal website or
+<br/>institutional repository. Authors requiring further information
+<br/>regarding Elsevier’s archiving and manuscript policies are
+<br/>encouraged to visit:
+<br/>http://www.elsevier.com/copyright
+</td><td></td><td></td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>OPEN
+<br/>Received: 22 December 2015
+<br/>Accepted: 04 April 2016
+<br/>Published: 21 April 2016
+<br/>Anxiety promotes memory for
+<br/>mood-congruent faces but does not
+<br/>alter loss aversion
+<br/><b>Pathological anxiety is associated with disrupted cognitive processing, including working memory and</b><br/>decision-making. In healthy individuals, experimentally-induced state anxiety or high trait anxiety
+<br/>often results in the deployment of adaptive harm-avoidant behaviours. However, how these processes
+<br/>affect cognition is largely unknown. To investigate this question, we implemented a translational
+<br/>within-subjects anxiety induction, threat of shock, in healthy participants reporting a wide range of
+<br/>trait anxiety scores. Participants completed a gambling task, embedded within an emotional working
+<br/>memory task, with some blocks under unpredictable threat and others safe from shock. Relative to the
+<br/>safe condition, threat of shock improved recall of threat-congruent (fearful) face location, especially in
+<br/>highly trait anxious participants. This suggests that threat boosts working memory for mood-congruent
+<br/>stimuli in vulnerable individuals, mirroring memory biases in clinical anxiety. By contrast, Bayesian
+<br/>analysis indicated that gambling decisions were better explained by models that did not include threat
+<br/>or treat anxiety, suggesting that: (i) higher-level executive functions are robust to these anxiety
+<br/>manipulations; and (ii) decreased risk-taking may be specific to pathological anxiety. These findings
+<br/>provide insight into the complex interactions between trait anxiety, acute state anxiety and cognition,
+<br/>and may help understand the cognitive mechanisms underlying adaptive anxiety.
+<br/>Anxiety disorders constitute a major global health burden1, and are characterized by negative emotional process-
+<br/>ing biases, as well as disrupted working memory and decision-making2,3. On the other hand, anxiety can also be
+<br/>an adaptive response to stress, stimulating individuals to engage in harm-avoidant behaviours. Influential the-
+<br/>ories of pathological anxiety propose that clinical anxiety emerges through dysregulation of adaptive anxiety4,5.
+<br/>Therefore, in order to understand how this dysregulation emerges in pathological anxiety, it is crucial to first
+<br/>understand the cognitive features associated with adaptive or ‘non-pathological’ anxiety, in other words anxiety
+<br/>levels that can vary within and between individuals but do not result in the development of clinical symptoms
+<br/>associated with anxiety disorders.
+<br/><b>Several methods exists to induce anxiety in healthy individuals, including threat of shock (ToS), the Trier</b><br/>social stressor test (TSST), and the cold pressor test (CPT). During the ToS paradigm, subjects typically perform
+<br/>a cognitive task while either at risk of or safe from rare, but unpleasant, electric shocks. Compared to the other
+<br/>methodologies, ToS has the advantage of allowing for within-subjects, within-sessions, designs (for a review
+<br/>on its effects on cognition, see Robinson et al.2), and ensures the task is performed while being anxious, rather
+<br/>than after being relieved from the stressor. In addition, ToS paradigms have good translational analogues6, are
+<br/>well-validated7, and are thus considered a reliable model for examining adaptive anxiety in healthy individuals.
+<br/>Because the engagement of adaptive anxiety processes may vary with individuals’ vulnerability to developing
+<br/>pathological anxiety8–10, we were also interested in examining how the effects of state anxiety induced by threat
+<br/>of shock interact with dispositional or trait anxiety, as reflected in self-report questionnaire scores such as the
+<br/>State-Trait Anxiety Inventory11 (STAI). High levels of self-reported trait anxiety are indeed considered a strong
+<br/>vulnerability factor in the development of pathological anxiety4,12.
+<br/>The extent to which induced state anxiety (elicited by the laboratory procedures discussed above) and
+<br/>trait anxiety interact to alter cognition has rarely been studied10. In particular, does induced anxiety have a
+<br/><b>Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain</b><br/><b>Lab, University College London, London WC1H 0AP, UK. 3Clinical</b><br/><b>Psychopharmacology Unit, Educational and Health Psychology, University College</b><br/>London, WC1E 7HB. *These authors contributed equally to this work. †These authors jointly supervised this work.
+</td><td>('4177273', 'Chandni Hindocha', 'chandni hindocha')</td><td>Correspondence and requests for materials should be addressed to C.J.C. (email: caroline.charpentier.11@ucl.ac.uk)
+</td></tr><tr><td>c62c07de196e95eaaf614fb150a4fa4ce49588b4</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1078
+</td><td></td><td></td></tr><tr><td>c65a394118d34beda5dd01ae0df163c3db88fceb</td><td>In press : Proceedings of the 30th European Conference On Information Retrieval
+<br/>Glasgow, March-April 2008
+<br/>Finding the Best Picture:
+<br/>Cross-Media Retrieval of Content
+<br/><b>Katholieke Universiteit Leuven</b><br/>Celestijnenlaan 200A, B-3001 Heverlee, Belgium
+<br/>http://www.cs.kuleuven.be/~liir/
+</td><td>('1797588', 'Koen Deschacht', 'koen deschacht')<br/>('1802161', 'Marie-Francine Moens', 'marie-francine moens')</td><td>{Koen.Deschacht,Marie-Francine.Moens}@cs.kuleuven.be
+</td></tr><tr><td>ec90d333588421764dff55658a73bbd3ea3016d2</td><td>Research Article
+<br/>Protocol for Systematic Literature Review of Face
+<br/>Recognition in Uncontrolled Environment
+<br/><b>Bacha Khan University, Charsadda, KPK, Pakistan</b></td><td>('12144785', 'Faizan Ullah', 'faizan ullah')<br/>('46463663', 'Sabir Shah', 'sabir shah')<br/>('49669073', 'Dilawar Shah', 'dilawar shah')<br/>('12579194', 'Shujaat Ali', 'shujaat ali')</td><td>faizanullah@bkuc.edu.pk
+</td></tr><tr><td>ec8ec2dfd73cf3667f33595fef84c95c42125945</td><td>Pose-Invariant Face Alignment with a Single CNN
+<br/><b>Michigan State University</b><br/>2Visualization Group, Bosch Research and Technology Center North America
+</td><td>('2357264', 'Amin Jourabloo', 'amin jourabloo')<br/>('3876303', 'Mao Ye', 'mao ye')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('3334600', 'Liu Ren', 'liu ren')</td><td>1,2 {jourablo, liuxm}@msu.edu, {mao.ye2, liu.ren}@us.bosch.com
+</td></tr><tr><td>ec1e03ec72186224b93b2611ff873656ed4d2f74</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>3D Reconstruction of “In-the-Wild” Faces in
+<br/>Images and Videos
+</td><td>('47456731', 'James Booth', 'james booth')<br/>('2931390', 'Anastasios Roussos', 'anastasios roussos')<br/>('31243357', 'Evangelos Ververas', 'evangelos ververas')<br/>('2015036', 'Stylianos Ploumpis', 'stylianos ploumpis')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')</td><td></td></tr><tr><td>ec12f805a48004a90e0057c7b844d8119cb21b4a</td><td>Distance-Based Descriptors and Their
+<br/>Application in the Task of Object Detection
+<br/><b>Technical University of Ostrava, FEECS</b><br/>17. Listopadu 15, 708 33 Ostrava-Poruba, Czech Republic
+</td><td>('2467747', 'Radovan Fusek', 'radovan fusek')<br/>('2557877', 'Eduard Sojka', 'eduard sojka')</td><td>{radovan.fusek,eduard.sojka}@vsb.cz
+</td></tr><tr><td>ec22eaa00f41a7f8e45ed833812d1ac44ee1174e</td><td></td><td></td><td></td></tr><tr><td>ec54000c6c0e660dd99051bdbd7aed2988e27ab8</td><td>TWO IN ONE: JOINT POSE ESTIMATION AND FACE RECOGNITION WITH P2CA1
+<br/>*Dept. Teoria del Senyal i Comunicacions - Universitat Politècnica de Catalunya, Barcelona, Spain
+<br/>+Dipartimento di Elettronica e Informazione - Politecnico di Milano, Meiland, Italy
+</td><td>('2771575', 'Francesc Tarres', 'francesc tarres')<br/>('31936578', 'Antonio Rama', 'antonio rama')<br/>('2158932', 'Davide Onofrio', 'davide onofrio')<br/>('1729506', 'Stefano Tubaro', 'stefano tubaro')</td><td>{tarres, alrama}@gps.tsc.upc.edu
+<br/>{d.onofrio, tubaro}@elet.polimi.it
+</td></tr><tr><td>ec0104286c96707f57df26b4f0a4f49b774c486b</td><td>758
+<br/>An Ensemble CNN2ELM for Age Estimation
+</td><td>('40402919', 'Mingxing Duan', 'mingxing duan')<br/>('39893222', 'Kenli Li', 'kenli li')<br/>('34373985', 'Keqin Li', 'keqin li')</td><td></td></tr><tr><td>ec05078be14a11157ac0e1c6b430ac886124589b</td><td>Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches
+<br/><b>Concordia University</b><br/>Montreal, Quebec, Canada
+<br/><b>Concordia University</b><br/>Montreal, Quebec, Canada
+<br/>CyLab Biometrics Center
+<br/>Dept. of Electrical and Computer Engineering
+<br/><b>Carnegie Mellon University Pittsburgh, PA, USA</b><br/><b>Concordia University</b><br/>Montreal, Quebec, Canada
+</td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('1699922', 'Tien D. Bui', 'tien d. bui')</td><td>Email: c duon@encs.concordia.ca
+<br/>Email: k q@encs.concordia.ca
+<br/>Email: kluu@andrew.cmu.edu
+<br/>Email: bui@encs.concordia.ca
+</td></tr><tr><td>4e7ed13e541b8ed868480375785005d33530e06d</td><td>Face Recognition Using Deep Multi-Pose Representations
+<br/>Ram Nevatiab Gerard Medionib
+<br/>Prem Natarajana
+<br/><b>aInformation Sciences Institute</b><br/><b>University of Southern California</b><br/>Marina Del Rey, CA
+<br/><b>b Institute for Robotics and Intelligent Systems</b><br/><b>University of Southern California</b><br/>Los Angeles, California
+<br/><b>cThe Open University</b><br/>Raanana, Israel
+</td><td>('1746738', 'Yue Wu', 'yue wu')<br/>('38696444', 'Stephen Rawls', 'stephen rawls')<br/>('35840854', 'Shai Harel', 'shai harel')<br/>('11269472', 'Iacopo Masi', 'iacopo masi')<br/>('1689391', 'Jongmoo Choi', 'jongmoo choi')<br/>('2955822', 'Jatuporn Toy Leksut', 'jatuporn toy leksut')<br/>('5911467', 'Jungyeon Kim', 'jungyeon kim')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td></td></tr><tr><td>4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f</td><td>Представление новостных сюжетов с помощью
+<br/>событийных фотографий
+<br/>© М.М. Постников
+<br/> © Б.В. Добров
+<br/>Московский государственный университет имени М.В. Ломоносова
+<br/>факультет вычислительной математики и кибернетики,
+<br/>Москва, Россия
+<br/>Аннотация. Рассмотрена задача аннотирования новостного сюжета изображениями,
+<br/>ассоциированными с конкретными текстами сюжета. Введено понятие «событийной фотографии»,
+<br/>содержащей конкретную информацию, дополняющую текст сюжета. Для решения задачи применены
+<br/>нейронные сети с использованием переноса обучения (Inception v3) для специальной размеченной
+<br/>коллекции из 4114 изображений. Средняя точность полученных результатов составила более 94,7%.
+<br/>Ключевые слова: событийная фотография, новостные иллюстрации, перенос обучения.
+<br/>News Stories Representation Using Event Photos
+<br/>© M.M. Postnikov
+<br/> © B.V. Dobrov
+<br/><b>Lomonosov Moscow State University, Faculty of Computational Mathematics and Cybernetics</b><br/>Moscow, Russia
+</td><td></td><td>mihanlg@yandex.ru
+<br/> dobrov_bv@mail.ru
+<br/>mihanlg@yandex.ru
+<br/> dobrov_bv@mail.ru
+</td></tr><tr><td>4e5dc3b397484326a4348ccceb88acf309960e86</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 219732, 12 pages
+<br/>http://dx.doi.org/10.1155/2014/219732
+<br/>Research Article
+<br/>Secure Access Control and Large Scale Robust Representation
+<br/>for Online Multimedia Event Detection
+<br/><b>School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China</b><br/><b>School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA</b><br/><b>School of Computer Science, Wuyi University, Jiangmen 529020, China</b><br/><b>State Key Laboratory of Pulp and Paper Engineering, South China University of Technology, Guangzhou 510640, China</b><br/>Received 2 April 2014; Accepted 30 June 2014; Published 22 July 2014
+<br/>Academic Editor: Vincenzo Eramo
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>We developed an online multimedia event detection (MED) system. However, there are a secure access control issue and a large
+<br/>scale robust representation issue when we want to integrate traditional event detection algorithms into the online environment. For
+<br/>the first issue, we proposed a tree proxy-based and service-oriented access control (TPSAC) model based on the traditional role
+<br/>based access control model. Verification experiments were conducted on the CloudSim simulation platform, and the results showed
+<br/>that the TPSAC model is suitable for the access control of dynamic online environments. For the second issue, inspired by the
+<br/>object-bank scene descriptor, we proposed a 1000-object-bank (1000OBK) event descriptor. Feature vectors of the 1000OBK were
+<br/>extracted from response pyramids of 1000 generic object detectors which were trained on standard annotated image datasets, such
+<br/>as the ImageNet dataset. A spatial bag of words tiling approach was then adopted to encode these feature vectors for bridging the gap
+<br/>between the objects and events. Furthermore, we performed experiments in the context of event classification on the challenging
+<br/>TRECVID MED 2012 dataset, and the results showed that the robust 1000OBK event descriptor outperforms the state-of-the-art
+<br/>approaches.
+<br/>1. Introduction
+<br/>As one of the most interesting aspects of multimedia content
+<br/>analysis, the multimedia event detection (MED) is becoming
+<br/>an important research area for computer vision in recent
+<br/><b>years. According to the definition by the National Institute</b><br/>of Standards and Technology (NIST) [1], an event (1) is
+<br/>a complex activity occurring at a specific place and time,
+<br/>(2) involves people interacting with other people and/or
+<br/>objects, (3) consists of a number of human actions, processes,
+<br/>and activities that are loosely or tightly organized and that
+<br/>have significant temporal and semantic relationships to the
+<br/>overarching activity, and (4) is directly observable. A MED
+<br/>task is to indicate whether an event is occurred in a specified
+<br/>test clip based on a standard event kit [1], which includes an
+<br/>event name, a textual definition, a textual explication with an
+<br/>attribute list, an evidential description, and a set of illustrative
+<br/>video examples. Although there are many other definitions
+<br/>available, such as the MED definitions from the NIST, the
+<br/>research on the MED is still far from reaching its maturity.
+<br/>Most of the current researches are focused on specific areas,
+<br/>such as sports video [2], news video [3], and surveillance
+<br/>video [4]. These approaches do not perform well when used
+<br/>for the online or web based event detection due to two types
+<br/>of issues, which are the secure access control issue and the
+<br/>large scale robust representation issue. Thus, we developed an
+<br/>online multimedia event detection system, trying to provide
+<br/>general MED services.
+<br/>The first issue is about how we can obtain a secure access
+<br/>control for the online multimedia event detection system.
+<br/>Compared with that of traditional distributed systems, it is a
+<br/>kind of service relationships between access control subjects
+<br/>and objects in the online multimedia event detection system.
+<br/>The service could establish, recombine, destruct, and even
+<br/>inherit efficiently to requested parameters which cannot be
+<br/>satisfied well by traditional access control models, such as
+</td><td>('1706701', 'Changyu Liu', 'changyu liu')<br/>('40371462', 'Bin Lu', 'bin lu')<br/>('1780591', 'Huiling Li', 'huiling li')<br/>('1706701', 'Changyu Liu', 'changyu liu')</td><td>Correspondence should be addressed to Bin Lu; lbscut@gmail.com
+</td></tr><tr><td>4e6c17966efae956133bf8f22edeffc24a0470c1</td><td>Face Classification: A Specialized Benchmark
+<br/>Study
+<br/>1School of Electronic, Electrical and Communication Engineering
+<br/>2Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>University of Chinese Academy of Sciences</b><br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/><b>Macau University of Science and Technology</b></td><td>('37614515', 'Jiali Duan', 'jiali duan')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('2950852', 'Shuai Zhou', 'shuai zhou')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jli.duan,shuaizhou.palm}@gmail.com, {scliao,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>4e1836914bbcf94dc00e604b24b1b0d6d7b61e66</td><td>Dynamic Facial Expression Recognition Using Boosted
+<br/>Component-based Spatiotemporal Features and
+<br/>Multi-Classifier Fusion
+<br/>1. Machine Vision Group, Department of Electrical and Information Engineering,
+<br/><b>University of Oulu, Finland</b><br/><b>Research Center for Learning Science, Southeast University, China</b><br/>http://www.ee.oulu.fi/mvg
+</td><td>('18780812', 'Xiaohua Huang', 'xiaohua huang')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('40608983', 'Wenming Zheng', 'wenming zheng')</td><td>{huang.xiaohua,gyzhao,mkp}@ee.oulu.fi
+<br/>wenming_zheng@seu.edu.cn
+</td></tr><tr><td>4e4fa167d772f34dfffc374e021ab3044566afc3</td><td>Learning Low-Rank Representations with Classwise
+<br/>Block-Diagonal Structure for Robust Face Recognition
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/><b>School of Computer Science, Nanjing University of Science and Technology</b><br/><b>University of Maryland, College Park</b></td><td>('1689181', 'Yong Li', 'yong li')<br/>('38188270', 'Jing Liu', 'jing liu')<br/>('3233021', 'Zechao Li', 'zechao li')<br/>('34868330', 'Yangmuzi Zhang', 'yangmuzi zhang')<br/>('1694235', 'Hanqing Lu', 'hanqing lu')<br/>('38450168', 'Songde Ma', 'songde ma')</td><td>{yong.li,jliu,luhq}@nlpr.ia.ac.cn, zechao.li@gmail.com, ymzhang@umiacs.umd.edu, masd@most.cn
+</td></tr><tr><td>4e32fbb58154e878dd2fd4b06398f85636fd0cf4</td><td>A Hierarchical Matcher using Local Classifier Chains
+<br/>L. Zhang and I.A. Kakadiaris
+<br/>Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204
+</td><td></td><td></td></tr><tr><td>4ed54d5093d240cc3644e4212f162a11ae7d1e3b</td><td>Learning Visual Compound Models from Parallel
+<br/>Image-Text Datasets
+<br/><b>Bielefeld University</b><br/><b>University of Toronto</b></td><td>('2872318', 'Jan Moringen', 'jan moringen')<br/>('1724954', 'Sven Wachsmuth', 'sven wachsmuth')<br/>('1792908', 'Suzanne Stevenson', 'suzanne stevenson')</td><td>{jmoringe,swachsmu}@techfak.uni-bielefeld.de
+<br/>{sven,suzanne}@cs.toronto.edu
+</td></tr><tr><td>4e8c608fc4b8198f13f8a68b9c1a0780f6f50105</td><td>How Related Exemplars Help Complex Event Detection in Web Videos?
+<br/><b>ITEE, The University of Queensland, Australia</b><br/><b>ECE, National University of Singapore, Singapore</b><br/>§†
+<br/><b>School of Computer Science, Carnegie Mellon University, USA</b></td><td>('39033919', 'Yi Yang', 'yi yang')<br/>('1727419', 'Zhigang Ma', 'zhigang ma')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')<br/>('2351434', 'Zhongwen Xu', 'zhongwen xu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{yiyang,kevinma,alex}@cs.cmu.edu z.xu3@uq.edu.au
+</td></tr><tr><td>4ea53e76246afae94758c1528002808374b75cfa</td><td>Lasbela, U. J.Sci. Techl., vol.IV , pp. 57-70, 2015
+<br/>Review ARTICLE
+<br/>A Review of Scholastic Examination and Models for Face Recognition
+<br/> ISSN 2306-8256
+<br/>and Retrieval in Video
+<br/>
+<br/><b>SBK Women s University, Quetta, Balochistan</b><br/><b>University of Balochistan, Quetta</b><br/><b>University of Balochistan, Quetta</b><br/><b>Institute of Biochemistry, University of Balochistan, Quetta</b></td><td>('35415301', 'Varsha Sachdeva', 'varsha sachdeva')<br/>('2139801', 'Junaid Baber', 'junaid baber')<br/>('3343681', 'Maheen Bakhtyar', 'maheen bakhtyar')<br/>('1903979', 'Muzamil Bokhari', 'muzamil bokhari')<br/>('1702753', 'Imran Ali', 'imran ali')</td><td></td></tr><tr><td>4ed2d7ecb34a13e12474f75d803547ad2ad811b2</td><td>Common Action Discovery and Localization in Unconstrained Videos
+<br/>School of Electrical and Electronic Engineering
+<br/><b>Nanyang Technological University, Singapore</b></td><td>('1691251', 'Jiong Yang', 'jiong yang')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')</td><td>yang0374@e.ntu.edu.sg, jsyuan@ntu.edu.sg
+</td></tr><tr><td>4e97b53926d997f451139f74ec1601bbef125599</td><td>Discriminative Regularization for Generative Models
+<br/><b>Montreal Institute for Learning Algorithms, Universit e de Montr eal</b></td><td>('2059369', 'Alex Lamb', 'alex lamb')<br/>('3074927', 'Vincent Dumoulin', 'vincent dumoulin')</td><td>FIRST.LAST@UMONTREAL.CA
+</td></tr><tr><td>4e8168fbaa615009d1618a9d6552bfad809309e9</td><td>Deep Convolutional Neural Network Features and the Original Image
+<br/><b>School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA</b><br/><b>University of Maryland, College Park, USA</b></td><td>('7493834', 'Connor J. Parde', 'connor j. parde')<br/>('3363752', 'Matthew Q. Hill', 'matthew q. hill')<br/>('15929465', 'Y. Ivette Colon', 'y. ivette colon')<br/>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')</td><td></td></tr><tr><td>4e0636a1b92503469b44e2807f0bb35cc0d97652</td><td>Adversarial Localization Network
+<br/><b>Tsinghua University</b><br/><b>Stanford University</b><br/><b>Stanford University</b></td><td>('2548303', 'Lijie Fan', 'lijie fan')<br/>('3303970', 'Shengjia Zhao', 'shengjia zhao')<br/>('2490652', 'Stefano Ermon', 'stefano ermon')</td><td>flj14@mails.tsinghua.edu.cn
+<br/>sjzhao@stanford.edu
+<br/>ermon@stanford.edu
+</td></tr><tr><td>4e27fec1703408d524d6b7ed805cdb6cba6ca132</td><td>SSD-Sface: Single shot multibox detector for small faces
+<br/>C. Thuis
+</td><td></td><td></td></tr><tr><td>4e6c9be0b646d60390fe3f72ce5aeb0136222a10</td><td>Long-term Temporal Convolutions
+<br/>for Action Recognition
+</td><td>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>4ea4116f57c5d5033569690871ba294dc3649ea5</td><td>Multi-View Face Alignment Using 3D Shape Model for
+<br/>View Estimation
+<br/><b>Tsinghua University</b><br/>2Core Technology Center, Omron Corporation
+</td><td>('1739678', 'Yanchao Su', 'yanchao su')<br/>('1679380', 'Haizhou Ai', 'haizhou ai')<br/>('1710195', 'Shihong Lao', 'shihong lao')</td><td>ahz@mail.tsinghua.edu.cn
+</td></tr><tr><td>4e444db884b5272f3a41e4b68dc0d453d4ec1f4c</td><td></td><td></td><td></td></tr><tr><td>4ef0a6817a7736c5641dc52cbc62737e2e063420</td><td>International Journal of Advanced Computer Research (ISSN (Print): 2249-7277 ISSN (Online): 2277-7970)
+<br/>Volume-4 Number-4 Issue-17 December-2014
+<br/>Study of Face Recognition Techniques
+<br/>Received: 10-November-2014; Revised: 18-December-2014; Accepted: 23-December-2014
+<br/>©2014 ACCENTS
+</td><td>('7874804', 'Sangeeta Kaushik', 'sangeeta kaushik')<br/>('33551600', 'R. B. Dubey', 'r. b. dubey')<br/>('1680807', 'Abhimanyu Madan', 'abhimanyu madan')</td><td></td></tr><tr><td>4e4d034caa72dce6fca115e77c74ace826884c66</td><td>RESEARCH ARTICLE
+<br/>Sex differences in facial emotion recognition
+<br/>across varying expression intensity levels
+<br/>from videos
+<br/><b>University of Bath, Bath, Somerset, United Kingdom</b><br/>☯ These authors contributed equally to this work.
+<br/>¤ Current address: Social and Affective Neuroscience Laboratory, Centre for Health and Biological Sciences,
+<br/><b>Mackenzie Presbyterian University, S o Paulo, S o Paulo, Brazil</b></td><td>('2708124', 'Chris Ashwin', 'chris ashwin')<br/>('39455300', 'Mark Brosnan', 'mark brosnan')</td><td>* tanja.wingenbach@bath.edu
+</td></tr><tr><td>4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b</td><td></td><td></td><td></td></tr><tr><td>4e0e49c280acbff8ae394b2443fcff1afb9bdce6</td><td>Automatic learning of gait signatures for people identification
+<br/>F.M. Castro
+<br/>Univ. of Malaga
+<br/>fcastro<at>uma.es
+<br/>M.J. Mar´ın-Jim´enez
+<br/>Univ. of Cordoba
+<br/>mjmarin<at>uco.es
+<br/>N. Guil
+<br/>Univ. of Malaga
+<br/>nguil<at>uma.es
+<br/>N. P´erez de la Blanca
+<br/>Univ. of Granada
+<br/>nicolas<at>ugr.es
+</td><td></td><td></td></tr><tr><td>4e4e8fc9bbee816e5c751d13f0d9218380d74b8f</td><td></td><td></td><td></td></tr><tr><td>20a88cc454a03d62c3368aa1f5bdffa73523827b</td><td></td><td></td><td></td></tr><tr><td>20a432a065a06f088d96965f43d0055675f0a6c1</td><td>In: Proc. of the 25th Int. Conference on Artificial Neural Networks (ICANN)
+<br/>Part II, LNCS 9887, pp. 80-87, Barcelona, Spain, September 2016
+<br/>The final publication is available at Springer via
+<br/>http://dx.doi.org//10.1007/978-3-319-44781-0_10
+<br/>The Effects of Regularization on Learning Facial
+<br/>Expressions with Convolutional Neural Networks
+<br/><b></b><br/>Vogt-Koelln-Strasse 30, 22527 Hamburg, Germany
+<br/>http://www.informatik.uni-hamburg.de/WTM
+</td><td>('11634287', 'Tobias Hinz', 'tobias hinz')<br/>('1736513', 'Stefan Wermter', 'stefan wermter')</td><td>{4hinz,barros,wermter}@informatik.uni-hamburg.de
+</td></tr><tr><td>20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba</td><td>Sparse-MVRVMs Tree for Fast and Accurate
+<br/>Head Pose Estimation in the Wild
+<br/>Augmented Vision Research Group,
+<br/><b>German Research Center for Arti cial Intelligence (DFKI</b><br/>Tripstaddterstr. 122, 67663 Kaiserslautern, Germany
+<br/><b>Technical University of Kaiserslautern</b><br/>http://www.av.dfki.de
+</td><td>('2585383', 'Mohamed Selim', 'mohamed selim')<br/>('1771057', 'Alain Pagani', 'alain pagani')<br/>('1807169', 'Didier Stricker', 'didier stricker')</td><td>{mohamed.selim,alain.pagani,didier.stricker}@dfki.uni-kl.de
+</td></tr><tr><td>20b994a78cd1db6ba86ea5aab7211574df5940b3</td><td>Enriched Long-term Recurrent Convolutional Network
+<br/>for Facial Micro-Expression Recognition
+<br/><b>Faculty of Computing and Informatics, Multimedia University, Malaysia</b><br/><b>Faculty of Engineering, Multimedia University, Malaysia</b><br/><b>Shanghai Jiao Tong University, China</b></td><td>('30470673', 'Huai-Qian Khor', 'huai-qian khor')<br/>('2339975', 'John See', 'john see')<br/>('8131625', 'Weiyao Lin', 'weiyao lin')</td><td>Emails: 1hqkhor95@gmail.com, 2johnsee@mmu.edu.my, 3raphael@mmu.edu.my, 4wylin@sjtu.edu.cn
+</td></tr><tr><td>2004afb2276a169cdb1f33b2610c5218a1e47332</td><td>Hindawi
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2018, Article ID 3803627, 11 pages
+<br/>https://doi.org/10.1155/2018/3803627
+<br/>Research Article
+<br/>Deep Convolutional Neural Network Used in Single Sample per
+<br/>Person Face Recognition
+<br/><b>School of Information Engineering, Wuyi University, Jiangmen 529020, China</b><br/>Received 27 November 2017; Revised 23 May 2018; Accepted 26 July 2018; Published 23 August 2018
+<br/>Academic Editor: Jos´e Alfredo Hern´andez-P´erez
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Face recognition (FR) with single sample per person (SSPP) is a challenge in computer vision. Since there is only one sample to be
+<br/>trained, it makes facial variation such as pose, illumination, and disguise difficult to be predicted. To overcome this problem, this paper
+<br/>proposes a scheme combined traditional and deep learning (TDL) method to process the task. First, it proposes an expanding sample
+<br/>method based on traditional approach. Compared with other expanding sample methods, the method can be used easily and
+<br/>conveniently. Besides, it can generate samples such as disguise, expression, and mixed variation. Second, it uses transfer learning and
+<br/>introduces a well-trained deep convolutional neural network (DCNN) model and then selects some expanding samples to fine-tune the
+<br/>DCNN model. 0ird, the fine-tuned model is used to implement experiment. Experimental results on AR face database, Extend Yale B
+<br/>face database, FERET face database, and LFW database demonstrate that TDL achieves the state-of-the-art performance in SSPP FR.
+<br/>1. Introduction
+<br/>As artificial
+<br/>intelligence (AI) becomes more and more
+<br/>popular, computer vision (CV) also has been proved to be
+<br/>a very hot topic in academic such as face recognition [1],
+<br/>facial expression recognition [2], and object recognition [3].
+<br/>It is well known that the basic and important foundation in
+<br/>CV is that there are an amount of training samples. But in
+<br/>actual scenarios such as immigration management, fugitive
+<br/>tracing, and video surveillance, there may be only one
+<br/>sample, which leads to single sample per person (SSPP)
+<br/>problem such as gait recognition [4], face recognition (FR)
+<br/>[5, 6], and low-resolution face recognition [7] in CV.
+<br/>However, as the widely use of second-generation ID card
+<br/>which is convenient to be collected, SSPP FR becomes one of
+<br/>the most popular topics no matter in academic or in
+<br/>industry.
+<br/>Beymer and Poggio [8] proposed one example view
+<br/>problem in 1996. In [8], it was researched that how to
+<br/>perform face recognition (FR) using one example view.
+<br/>Firstly, it exploited prior knowledge to generate multiple
+<br/>virtual views. 0en, the example view and these multiple
+<br/>virtual views were used as example views in a view-based,
+<br/>pose-invariant
+<br/>face recognizer. Later, SSPP FR became
+<br/>a popular research topic at the beginning of the 21st century.
+<br/>Recently, many methods have been proposed. Generally
+<br/>speaking, these methods can be summarized in five basic
+<br/>methods: direct method, generic learning method, patch-
+<br/>based method, expanding sample method, and deep learning
+<br/>(DL) method. Direct method does experiment based on the
+<br/>SSPP directly by using an algorithm. Generic learning
+<br/>method is the way that using an auxiliary dataset to build
+<br/>a generic dataset from which some variation information
+<br/>can be learned by single sample. Patch-based method par-
+<br/>titions single sample into several patches first, then extracts
+<br/>features on these patches, respectively, and does classifica-
+<br/>tion finally. 0e expanding sample method is with some
+<br/>special means such as perturbation-based method [9, 10],
+<br/>photometric transforms, and geometric distortion [11] to
+<br/>increase sample so that abundant training samples can be
+<br/>used to process this task. 0e DL method uses the DL model
+<br/>to perform the research.
+<br/>Attracted by the good performance of DCNN, inspired
+<br/>by [12] and driven by AI, in this paper, a scheme combined
+</td><td>('9363278', 'Junying Zeng', 'junying zeng')<br/>('12054657', 'Xiaoxiao Zhao', 'xiaoxiao zhao')<br/>('2926767', 'Junying Gan', 'junying gan')<br/>('40552250', 'Chaoyun Mai', 'chaoyun mai')<br/>('1716453', 'Fan Wang', 'fan wang')<br/>('3003242', 'Yikui Zhai', 'yikui zhai')<br/>('9363278', 'Junying Zeng', 'junying zeng')</td><td>Correspondence should be addressed to Xiaoxiao Zhao; xiaoxiao-zhao@foxmail.com
+</td></tr><tr><td>20e504782951e0c2979d9aec88c76334f7505393</td><td>Robust LSTM-Autoencoders for Face De-Occlusion
+<br/>in the Wild
+</td><td>('37182704', 'Fang Zhao', 'fang zhao')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('39913117', 'Jian Zhao', 'jian zhao')<br/>('1898172', 'Wenhan Yang', 'wenhan yang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>209324c152fa8fab9f3553ccb62b693b5b10fb4d</td><td>CROWDSOURCED VISUAL KNOWLEDGE REPRESENTATIONS
+<br/>VISUAL GENOME
+<br/>A THESIS
+<br/>SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+<br/>AND THE COMMITTEE ON GRADUATE STUDIES
+<br/><b>OF STANFORD UNIVERSITY</b><br/>IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+<br/>FOR THE DEGREE OF
+<br/>MASTERS OF SCIENCE
+<br/>March 2016
+</td><td>('2580593', 'Ranjay Krishna', 'ranjay krishna')</td><td></td></tr><tr><td>2050847bc7a1a0453891f03aeeb4643e360fde7d</td><td>Accio: A Data Set for Face Track Retrieval
+<br/>in Movies Across Age
+<br/><b>Istanbul Technical University, Istanbul, Turkey</b><br/><b>Karlsruhe Institute of Technology, Karlsruhe, Germany</b></td><td>('2398366', 'Esam Ghaleb', 'esam ghaleb')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('2256981', 'Ziad Al-Halah', 'ziad al-halah')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{ghalebe, ekenel}@itu.edu.tr, {tapaswi, ziad.al-halah, rainer.stiefelhagen}@kit.edu
+</td></tr><tr><td>20ade100a320cc761c23971d2734388bfe79f7c5</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Subspace Clustering via Good Neighbors
+</td><td>('1755872', 'Jufeng Yang', 'jufeng yang')<br/>('1780418', 'Jie Liang', 'jie liang')<br/>('39329211', 'Kai Wang', 'kai wang')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>202d8d93b7b747cdbd6e24e5a919640f8d16298a</td><td>Face Classification via Sparse Approximation
+<br/><b>Bilgi University, Dolapdere, Istanbul, TR</b><br/><b>Bo gazici University, Istanbul, TR</b><br/><b>Y ld z Teknik University, Istanbul, TR</b></td><td>('2804969', 'Songul Albayrak', 'songul albayrak')</td><td></td></tr><tr><td>20767ca3b932cbc7b8112db21980d7b9b3ea43a3</td><td></td><td></td><td></td></tr><tr><td>20a16efb03c366fa4180659c2b2a0c5024c679da</td><td>SCREENING RULES FOR OVERLAPPING GROUP LASSO
+<br/><b>Carnegie Mellon University</b><br/>Recently, to solve large-scale lasso and group lasso problems,
+<br/>screening rules have been developed, the goal of which is to reduce
+<br/>the problem size by efficiently discarding zero coefficients using simple
+<br/>rules independently of the others. However, screening for overlapping
+<br/>group lasso remains an open challenge because the overlaps between
+<br/>groups make it infeasible to test each group independently. In this
+<br/>paper, we develop screening rules for overlapping group lasso. To ad-
+<br/>dress the challenge arising from groups with overlaps, we take into
+<br/>account overlapping groups only if they are inclusive of the group
+<br/>being tested, and then we derive screening rules, adopting the dual
+<br/>polytope projection approach. This strategy allows us to screen each
+<br/>group independently of each other. In our experiments, we demon-
+<br/>strate the efficiency of our screening rules on various datasets.
+<br/>1. Introduction. We propose efficient screening rules for regression
+<br/>with the overlapping group lasso penalty. Our goal is to develop simple
+<br/>rules to discard groups with zero coefficients in the optimization problem
+<br/>with the following form:
+<br/>(cid:13)(cid:13)βg
+<br/>(cid:13)(cid:13)2 ,
+<br/>ng
+<br/>(1.1)
+<br/>min
+<br/>(cid:107)y − Xβ(cid:107)2
+<br/>2 + λ
+<br/>(cid:88)
+<br/>g∈G
+<br/>where X ∈ RN×J is the input data for J inputs and N samples, y ∈ RN×1
+<br/>is the output vector, β ∈ RJ×1 is the vector of regression coefficients, ng
+<br/>is the size of group g, and λ is a regularization parameter that determines
+<br/>the sparsity of β. In this setting, G represents a set of groups of coefficients,
+<br/>defined a priori, and we allow arbitrary overlap between different groups,
+<br/>hence “overlapping” group lasso. Overlapping group lasso is a general model
+<br/>that subsumes lasso (Tibshirani, 1996), group lasso (Yuan and Lin, 2006),
+<br/>sparse group lasso (Simon et al., 2013), composite absolute penalties (Zhao,
+<br/>Rocha and Yu, 2009), and tree lasso (Zhao, Rocha and Yu, 2009; Kim et al.,
+<br/>2012) with (cid:96)1/(cid:96)2 penalty because they are a specific form of overlapping
+<br/>group lasso.
+<br/>In this paper, we do not consider the latent group lasso proposed by
+<br/>Jacob et al. (Jacob, Obozinski and Vert, 2009), where support is defined
+<br/>by the union of groups with nonzero coefficients. Instead, we consider the
+</td><td>('1918078', 'Seunghak Lee', 'seunghak lee')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')</td><td></td></tr><tr><td>205b34b6035aa7b23d89f1aed2850b1d3780de35</td><td>504
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>†Shenzhen Key Lab. of Information Sci&Tech,
+<br/><b>Nagaoka University of Technology, Japan</b><br/>RECOGNITION
+<br/>1. INTRODUCTION
+</td><td></td><td></td></tr><tr><td>20c2a5166206e7ffbb11a23387b9c5edf42b5230</td><td></td><td></td><td></td></tr><tr><td>20e505cef6d40f896e9508e623bfc01aa1ec3120</td><td>Fast Online Incremental Attribute-based Object
+<br/>Classification using Stochastic Gradient Descent and Self-
+<br/>Organizing Incremental Neural Network
+<br/>Department of Computational Intelligence and Systems Science,
+<br/><b>Tokyo Institute of Technology</b><br/>4259 Nagatsuta, Midori-ku, Yokohama, 226-8503 JAPAN
+</td><td>('2641676', 'Sirinart Tangruamsub', 'sirinart tangruamsub')<br/>('1711160', 'Aram Kawewong', 'aram kawewong')<br/>('1727786', 'Osamu Hasegawa', 'osamu hasegawa')</td><td>(tangruamsub.s.aa, kawewong.a.aa, hasegawa.o.aa)@m.titech.ac.jp
+</td></tr><tr><td>205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa</td><td>A Model-Based Facial Expression Recognition
+<br/>Algorithm using Principal Components Analysis
+<br/>N. Vretos, N. Nikolaidis and I.Pitas
+<br/><b>Informatics and Telematics Institute</b><br/>Centre for Research and Technology Hellas, Greece
+<br/><b>Aristotle University of Thessaloniki</b><br/>Thessaloniki 54124, Greece Tel,Fax: +30-2310996304
+</td><td></td><td>e-mail: vretos,nikolaid,pitas@aiia.csd.auth.gr
+</td></tr><tr><td>2098983dd521e78746b3b3fa35a22eb2fa630299</td><td></td><td></td><td></td></tr><tr><td>20b437dc4fc44c17f131713ffcbb4a8bd672ef00</td><td>Head pose tracking from RGBD sensor based on
+<br/>direct motion estimation
+<br/><b>Warsaw University of Technology, Poland</b></td><td>('1899063', 'Adam Strupczewski', 'adam strupczewski')<br/>('2393538', 'Marek Kowalski', 'marek kowalski')<br/>('1930272', 'Jacek Naruniec', 'jacek naruniec')</td><td></td></tr><tr><td>206e24f7d4b3943b35b069ae2d028143fcbd0704</td><td>Learning Structure and Strength of CNN Filters for Small Sample Size Training
+<br/>IIIT-Delhi, India
+</td><td>('3390448', 'Rohit Keshari', 'rohit keshari')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('39129417', 'Richa Singh', 'richa singh')</td><td>{rohitk, mayank, rsingh}@iiitd.ac.in
+</td></tr><tr><td>208a2c50edb5271a050fa9f29d3870f891daa4dc</td><td>http://www.journalofvision.org/content/11/13/24
+<br/>The resolution of facial expressions of emotion
+<br/>Aleix M. Martinez
+<br/><b>The Ohio State University, Columbus, OH, USA</b><br/><b>The Ohio State University, Columbus, OH, USA</b><br/><b>Much is known on how facial expressions of emotion are produced, including which individual muscles are most active in</b><br/>each expression. Yet, little is known on how this information is interpreted by the human visual system. This paper presents
+<br/>a systematic study of the image dimensionality of facial expressions of emotion. In particular, we investigate how recognition
+<br/>degrades when the resolution of the image (i.e., number of pixels when seen as a 5.3 by 8 degree stimulus) is reduced. We
+<br/>show that recognition is only impaired in practice when the image resolution goes below 20  30 pixels. A study of the
+<br/>confusion tables demonstrates that each expression of emotion is consistently confused by a small set of alternatives and
+<br/>that the confusion is not symmetric, i.e., misclassifying emotion a as b does not imply we will mistake b for a. This
+<br/>asymmetric pattern is consistent over the different image resolutions and cannot be explained by the similarity of muscle
+<br/>activation. Furthermore, although women are generally better at recognizing expressions of emotion at all resolutions, the
+<br/>asymmetry patterns are the same. We discuss the implications of these results for current models of face perception.
+<br/>Keywords: resolution, facial expressions, emotion
+<br/>http://www.journalofvision.org/content/11/13/24, doi:10.1167/11.13.24.
+<br/>Introduction
+<br/>Emotions are fundamental in studies of cognitive science
+<br/>(Damassio, 1995), neuroscience (LeDoux, 2000), social
+<br/>psychology (Adolphs, 2003), sociology (Massey, 2002),
+<br/>economics (Connolly & Zeelenberg, 2002), human evo-
+<br/>lution (Schmidt & Cohn, 2001), and engineering and
+<br/>computer science (Pentland, 2000). Emotional states and
+<br/>emotional analysis are known to influence or mediate
+<br/>behavior and cognitive processing. Many of these emo-
+<br/>tional processes may be hidden to an outside observer,
+<br/>whereas others are visible through facial expressions of
+<br/>emotion.
+<br/>Facial expressions of emotion are a consequence of the
+<br/>movement of the muscles underneath the skin of our face
+<br/>(Duchenne, 1862/1990). The movement of these muscles
+<br/>causes the skin of the face to deform in ways that an
+<br/>external observer can use to interpret the emotion of that
+<br/>person. Each muscle employed to create these facial
+<br/>constructs is referred to as an Action Unit (AU). Ekman
+<br/>and Friesen (1978) identified those AUs responsible for
+<br/>generating the emotions most commonly seen in the
+<br/>majority of culturesVanger, sadness, fear, surprise,
+<br/>happiness, and disgust. For example, happiness generally
+<br/>involves an upper–backward movement of the mouth
+<br/>corners; while the mouth is upturned (to produce the
+<br/>smile), the cheeks lift and the upper corner of the eyes
+<br/>wrinkle. This is known as the Duchenne (1862/1990)
+<br/>smile. It requires the activation of two facial muscles:
+<br/>the zygomatic major (AU 12) to raise the corners of the
+<br/>mouth and the orbicularis oculi (AU 42) to uplift the
+<br/>cheeks and form the eye corner wrinkles. The muscles and
+<br/>mechanisms used to produce the abovementioned facial
+<br/>expressions of emotion are now quite well understood and
+<br/>it has been shown that the AUs used in each expression
+<br/>are relatively consistent from person to person and among
+<br/>distinct cultures (Burrows & Cohn, 2009).
+<br/>Yet, as much as we understand the generative process
+<br/>of facial expressions of emotion, much still needs to be
+<br/>learned about their interpretation by our cognitive system.
+<br/>Thus, an important open problem is to define the
+<br/>computational (cognitive) space of facial expressions of
+<br/>emotion of the human visual system. In the present paper,
+<br/>we study the limits of this visual processing of facial
+<br/>expressions of emotion and what it tells us about how
+<br/>emotions are represented and recognized by our visual
+<br/>system. Note that the term “computational space” is used
+<br/>here to specify the combination of features (dimensions)
+<br/>used by the cognitive system to determine (i.e., analyze
+<br/>and classify)
+<br/>for each facial
+<br/>expression of emotion.
+<br/>the appropriate label
+<br/>To properly address the problem stated in the preceding
+<br/>paragraph, it is worth recalling that some facial expressions
+<br/>of emotion may have evolved to enhance or reduce our
+<br/>sensory inputs (Susskind et al., 2008). For example, fear is
+<br/>associated with a facial expression with open mouth,
+<br/>nostrils, and eyes and an inhalation of air, as if to enhance
+<br/>the perception of our environment, while the expression of
+<br/>disgust closes these channels (Chapman, Kim, Susskind,
+<br/>& Anderson, 2009). Other emotions, though, may have
+<br/>evolved for communication purposes (Schmidt & Cohn,
+<br/>2001). Under this assumption,
+<br/>the evolution of this
+<br/>capacity to express emotions had to be accompanied by
+<br/>doi: 10.1167/11.13.24
+<br/>Received January 25, 2011; published November 30, 2011
+<br/>ISSN 1534-7362 * ARVO
+<br/>Downloaded From: http://jov.arvojournals.org/pdfaccess.ashx?url=/data/journals/jov/932792/ on 06/20/2017 </td><td>('2323717', 'Shichuan Du', 'shichuan du')</td><td></td></tr><tr><td>207798603e3089a1c807c93e5f36f7767055ec06</td><td>Modeling the Correlation between
+<br/>Modality Semantics and Facial Expressions
+<br/>* Key Laboratory of Pervasive Computing, Ministry of Education
+<br/>Tsinghua National Laboratory for Information Science and Technology (TNList)
+<br/><b>Tsinghua University, Beijing 100084, China</b><br/><b>Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems</b><br/><b>Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China</b><br/>† Human-Computer Communications Laboratory, Department of Systems Engineering and Engineering Management,
+<br/><b>The Chinese University of Hong Kong, Hong Kong SAR, China</b></td><td>('25714033', 'Jia Jia', 'jia jia')<br/>('37783013', 'Xiaohui Wang', 'xiaohui wang')<br/>('3860920', 'Zhiyong Wu', 'zhiyong wu')<br/>('7239047', 'Lianhong Cai', 'lianhong cai')</td><td>Contact E-mail: # zywu@sz.tsinghua.edu.cn, * jjia@tsinghua.edu.cn
+</td></tr><tr><td>20be15dac7d8a5ba4688bf206ad24cab57d532d6</td><td>Face Shape Recovery and Recognition Using a
+<br/>Surface Gradient Based Statistical Model
+<br/>1 Centro de Investigaci´on y Estudios Avanzados del I.P.N., Ramos Arizpe 25900,
+<br/>Coahuila, Mexico
+<br/><b>The University of York, Heslington, York YO10 5DD, United Kingdom</b></td><td>('1679753', 'Edwin R. Hancock', 'edwin r. hancock')</td><td>mario.castelan@cinvestav.edu.mx
+<br/>erh@cs.york.ac.uk
+</td></tr><tr><td>2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b</td><td>TRANSACTIONS ON AUDIO, SPEECH, AND LANGUAGE PROCESSING, VOL. 23, NO. 4, APRIL 2015
+<br/>Co-Localization of Audio Sources in Images Using
+<br/>Binaural Features and Locally-Linear Regression
+<br/>∗ INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+<br/>† Univ. Grenoble Alpes, GIPSA-Lab, France
+<br/>‡ Dept. Electrical Eng., Technion-Israel Inst. of Technology, Haifa, Israel
+</td><td>('3307172', 'Antoine Deleforge', 'antoine deleforge')</td><td></td></tr><tr><td>206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8</td><td></td><td></td><td></td></tr><tr><td>2042aed660796b14925db17c0a8b9fbdd7f3ebac</td><td>Saliency in Crowd
+<br/>Department of Electrical and Computer Engineering
+<br/><b>National University of Singapore, Singapore</b></td><td>('40452812', 'Ming Jiang', 'ming jiang')<br/>('1946538', 'Juan Xu', 'juan xu')<br/>('3243515', 'Qi Zhao', 'qi zhao')</td><td>eleqiz@nus.edu.sg
+</td></tr><tr><td>202dc3c6fda654aeb39aee3e26a89340fb06802a</td><td>Spatio-Temporal Instance Learning:
+<br/>Action Tubes from Class Supervision
+<br/><b>University of Amsterdam</b></td><td>('2606260', 'Pascal Mettes', 'pascal mettes')</td><td></td></tr><tr><td>20111924fbf616a13d37823cd8712a9c6b458cd6</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 130 – No.11, November2015
+<br/>Linear Regression Line based Partial Face Recognition
+<br/>Naveena M.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>P. Nagabhushan
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>images. In
+</td><td>('33377948', 'G. Hemantha Kumar', 'g. hemantha kumar')</td><td></td></tr><tr><td>20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6</td><td>Appears in the Second International Conference on Audio- and Video-based Biometric Person Authentication, AVBPA’99, Washington D. C. USA, March 22-24, 1999.
+<br/>Comparative Assessment of Independent Component
+<br/>Analysis (ICA) for Face Recognition
+<br/><b>George Mason University</b><br/><b>University Drive, Fairfax, VA 22030-4444, USA</b><br/>
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td>@cs.gmu.edu
+</td></tr><tr><td>20532b1f80b509f2332b6cfc0126c0f80f438f10</td><td>A deep matrix factorization method for learning
+<br/>attribute representations
+<br/>Bj¨orn W. Schuller, Senior member, IEEE
+</td><td>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('2732737', 'Konstantinos Bousmalis', 'konstantinos bousmalis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>205af28b4fcd6b569d0241bb6b255edb325965a4</td><td>Intel Serv Robotics (2008) 1:143–157
+<br/>DOI 10.1007/s11370-007-0014-z
+<br/>SPECIAL ISSUE
+<br/>Facial expression recognition and tracking for intelligent human-robot
+<br/>interaction
+<br/>Received: 27 June 2007 / Accepted: 6 December 2007 / Published online: 23 January 2008
+<br/>© Springer-Verlag 2008
+</td><td>('1716880', 'Y. Yang', 'y. yang')</td><td></td></tr><tr><td>20cfb4136c1a984a330a2a9664fcdadc2228b0bc</td><td>Sparse Coding Trees with Application to Emotion Classification
+<br/><b>Harvard University, Cambridge, MA</b></td><td>('3144257', 'Hsieh-Chung Chen', 'hsieh-chung chen')<br/>('2512314', 'Marcus Z. Comiter', 'marcus z. comiter')<br/>('1731308', 'H. T. Kung', 'h. t. kung')<br/>('1841852', 'Bradley McDanel', 'bradley mcdanel')</td><td></td></tr><tr><td>20c02e98602f6adf1cebaba075d45cef50de089f</td><td>Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video
+<br/>Action Recognition
+<br/><b>Georgia Institute of Technology</b><br/><b>Carnegie Mellon University</b><br/>Irfan Essa
+<br/><b>Georgia Institute of Technology</b></td><td>('2308598', 'Unaiza Ahsan', 'unaiza ahsan')<br/>('37714701', 'Rishi Madhok', 'rishi madhok')</td><td>uahsan3@gatech.edu
+<br/>rmadhok@andrew.cmu.edu
+<br/>irfan@gatech.edu
+</td></tr><tr><td>2020e8c0be8fa00d773fd99b6da55029a6a83e3d</td><td>An Evaluation of the Invariance Properties
+<br/>of a Biologically-Inspired System
+<br/>for Unconstrained Face Recognition
+<br/><b>Massachusetts Institute of Technology, Cambridge, MA 02139, USA</b><br/><b>Rowland Institute at Harvard, Cambridge, MA 02142, USA</b></td><td>('30017846', 'Nicolas Pinto', 'nicolas pinto')</td><td>pinto@mit.edu
+<br/>cox@rowland.harvard.edu
+</td></tr><tr><td>20a0b23741824a17c577376fdd0cf40101af5880</td><td>Learning to track for spatio-temporal action localization
+<br/>Zaid Harchaouia,b
+<br/>b NYU
+<br/>a Inria∗
+</td><td>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td>firstname.lastname@inria.fr
+</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 23, NO. 6,
+<br/>JUNE 2001
+<br/>643
+<br/>From Few to Many: Illumination Cone
+<br/>Models for Face Recognition under
+<br/>Variable Lighting and Pose
+</td><td>('3230391', 'Athinodoros S. Georghiades', 'athinodoros s. georghiades')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('1765887', 'David J. Kriegman', 'david j. kriegman')</td><td></td></tr><tr><td>18636347b8741d321980e8f91a44ee054b051574</td><td>978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+<br/>37
+<br/>ICIP 2009
+</td><td></td><td></td></tr><tr><td>18206e1b988389eaab86ef8c852662accf3c3663</td><td></td><td></td><td></td></tr><tr><td>189b1859f77ddc08027e1e0f92275341e5c0fdc6</td><td>Sparse Representations and Distance Learning for
+<br/>Attribute based Category Recognition
+<br/>1 Center for Imaging Science, 2 Department of Computer Engineering
+<br/><b>Rochester Institute of Technology, Rochester, NY</b></td><td>('2272443', 'Grigorios Tsagkatakis', 'grigorios tsagkatakis')</td><td>{gxt6260, andreas.savakis}@rit.edu
+</td></tr><tr><td>18a9f3d855bd7728ed4f988675fa9405b5478845</td><td>ISSN: 0976-9102 (ONLINE)
+<br/>DOI: 10.21917/ijivp.2013.0103
+<br/> ICTACT JOURNAL ON IMAGE AND VIDEO PROCESSING, NOVEMBER 2013, VOLUME: 04, ISSUE: 02
+<br/>AN ILLUMINATION INVARIANT TEXTURE BASED FACE RECOGNITION
+<br/><b>J. P. College of Engineering, India</b><br/><b>Manonmaniam Sundaranar University, India</b><br/><b>St. Xavier s Catholic College of Engineering, India</b></td><td>('2792485', 'K. Meena', 'k. meena')<br/>('3311251', 'A. Suruliandi', 'a. suruliandi')<br/>('1998086', 'Reena Rose', 'reena rose')</td><td>E-mail: meen.nandhu@gmail.com
+<br/>E-mail: suruliandi@yahoo.com
+<br/>E-mail: mailtoreenarose@yahoo.in
+</td></tr><tr><td>181045164df86c72923906aed93d7f2f987bce6c</td><td>RHEINISCH-WESTFÄLISCHE TECHNISCHE
+<br/>HOCHSCHULE AACHEN
+<br/>KNOWLEDGE-BASED SYSTEMS GROUP
+<br/>Detection and Recognition of Human
+<br/>Faces using Random Forests for a
+<br/>Mobile Robot
+<br/>MASTER OF SCIENCE THESIS
+<br/>MATRICULATION NUMBER: 26 86 51
+<br/>SUPERVISOR:
+<br/>SECOND SUPERVISOR:
+<br/>PROF. ENRICO BLANZIERI, PH. D.
+<br/>ADVISERS:
+</td><td>('1779592', 'GERHARD LAKEMEYER', 'gerhard lakemeyer')<br/>('2181555', 'VAISHAK BELLE', 'vaishak belle')<br/>('1779592', 'GERHARD LAKEMEYER', 'gerhard lakemeyer')<br/>('1686596', 'STEFAN SCHIFFER', 'stefan schiffer')<br/>('1879646', 'THOMAS DESELAERS', 'thomas deselaers')</td><td></td></tr><tr><td>18166432309000d9a5873f989b39c72a682932f5</td><td>LEARNING A WARPED SUBSPACE MODEL OF FACES
+<br/>WITH IMAGES OF UNKNOWN POSE AND
+<br/>ILLUMINATION
+<br/><b>GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA</b><br/>Keywords:
+</td><td>('2720935', 'Jihun Ham', 'jihun ham')<br/>('1732066', 'Daniel D. Lee', 'daniel d. lee')</td><td>jhham@seas.upenn.edu, ddlee@seas.upenn.edu
+</td></tr><tr><td>18d5b0d421332c9321920b07e0e8ac4a240e5f1f</td><td>Collaborative Representation Classification
+<br/>Ensemble for Face Recognition
+</td><td>('2972883', 'Suah Kim', 'suah kim')<br/>('2434811', 'Run Cui', 'run cui')<br/>('1730037', 'Hyoung Joong Kim', 'hyoung joong kim')</td><td></td></tr><tr><td>18d51a366ce2b2068e061721f43cb798177b4bb7</td><td>Cognition and Emotion
+<br/>ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+<br/>Looking into your eyes: observed pupil size
+<br/>influences approach-avoidance responses
+<br/>eyes: observed pupil size influences approach-avoidance responses, Cognition and Emotion, DOI:
+<br/>10.1080/02699931.2018.1472554
+<br/>To link to this article: https://doi.org/10.1080/02699931.2018.1472554
+<br/>View supplementary material
+<br/>Published online: 11 May 2018.
+<br/>Submit your article to this journal
+<br/>View related articles
+<br/>View Crossmark data
+<br/>Full Terms & Conditions of access and use can be found at
+<br/>http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+</td><td>('47930228', 'Marco Brambilla', 'marco brambilla')<br/>('41074530', 'Marco Biella', 'marco biella')<br/>('47930228', 'Marco Brambilla', 'marco brambilla')<br/>('41074530', 'Marco Biella', 'marco biella')</td><td></td></tr><tr><td>18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae</td><td>Learning invariant representations and applications
+<br/>to face verification
+<br/>Center for Brains, Minds and Machines
+<br/><b>McGovern Institute for Brain Research</b><br/><b>Massachusetts Institute of Technology</b><br/>Cambridge MA 02139
+</td><td>('1694846', 'Qianli Liao', 'qianli liao')</td><td>lql@mit.edu, jzleibo@mit.edu, tp@ai.mit.edu
+</td></tr><tr><td>185263189a30986e31566394680d6d16b0089772</td><td>Efficient Annotation of Objects for Video Analysis
+<br/>Thesis submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>MS in Computer Science and Engineering
+<br/>by
+<br/>Research
+<br/>by
+<br/>Sirnam Swetha
+<br/>201303014
+<br/><b>International Institute of Information Technology</b><br/>Hyderabad - 500 032, INDIA
+<br/>June 2018
+</td><td></td><td>sirnam.swetha@research.iiit.ac.in
+</td></tr><tr><td>1885acea0d24e7b953485f78ec57b2f04e946eaf</td><td>Combining Local and Global Features for 3D Face Tracking
+<br/>Megvii (face++) Research
+</td><td>('40448951', 'Pengfei Xiong', 'pengfei xiong')<br/>('1775836', 'Guoqing Li', 'guoqing li')<br/>('3756559', 'Yuhang Sun', 'yuhang sun')</td><td>{xiongpengfei, liguoqing, sunyuhang}@megvii.com
+</td></tr><tr><td>184750382fe9b722e78d22a543e852a6290b3f70</td><td></td><td></td><td></td></tr><tr><td>18b9dc55e5221e704f90eea85a81b41dab51f7da</td><td>Attention-based Temporal Weighted
+<br/>Convolutional Neural Network for
+<br/>Action Recognition
+<br/><b>Xi an Jiaotong University, Xi an, Shannxi 710049, P.R.China</b><br/>2HERE Technologies, Chicago, IL 60606, USA
+<br/>3Alibaba Group, Hangzhou, Zhejiang 311121, P.R.China
+<br/>4Microsoft Research, Redmond, WA 98052, USA
+</td><td>('14800230', 'Jinliang Zang', 'jinliang zang')<br/>('40367806', 'Le Wang', 'le wang')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('1786361', 'Zhenxing Niu', 'zhenxing niu')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('1715389', 'Nanning Zheng', 'nanning zheng')</td><td></td></tr><tr><td>18a849b1f336e3c3b7c0ee311c9ccde582d7214f</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-012-0564-1
+<br/>Efficiently Scaling up Crowdsourced Video Annotation
+<br/>A Set of Best Practices for High Quality, Economical Video Labeling
+<br/>Received: 31 October 2011 / Accepted: 20 August 2012
+<br/>© Springer Science+Business Media, LLC 2012
+</td><td>('1856025', 'Carl Vondrick', 'carl vondrick')</td><td></td></tr><tr><td>18cd79f3c93b74d856bff6da92bfc87be1109f80</td><td>International Journal of Advances in Engineering & Technology, May 2012.
+<br/>©IJAET ISSN: 2231-1963
+<br/>AN APPLICATION TO HUMAN FACE PHOTO-SKETCH
+<br/>SYNTHESIS AND RECOGNITION
+<br/>1Student and 2Professor & Head,
+<br/><b>Bharti Vidyapeeth Deemed University, Pune, India</b></td><td>('35541779', 'Amit R. Sharma', 'amit r. sharma')<br/>('2731104', 'Prakash. R. Devale', 'prakash. r. devale')</td><td></td></tr><tr><td>182470fd0c18d0c5979dff75d089f1da176ceeeb</td><td>A Multimodal Annotation Schema for Non-Verbal Affective
+<br/>Analysis in the Health-Care Domain
+<br/>Federico M. Sukno
+<br/>Adrià Ruiz
+<br/>Department of Information and Communication Technologies
+<br/><b>Pompeu Fabra University, Spain</b><br/>Human-Centered Multimedia
+<br/><b>Augsburg University, Germany</b><br/>Louisa Praagst
+<br/><b>Institute of Communications Engineering</b><br/><b>Ulm University, Germany</b><br/><b>Information Technologies Institute</b><br/>Centre for Research & Technology Hellas, Greece
+</td><td>('33451278', 'Mónica Domínguez', 'mónica domínguez')<br/>('34326647', 'Dominik Schiller', 'dominik schiller')<br/>('2565410', 'Florian Lingenfelser', 'florian lingenfelser')<br/>('8632684', 'Ekeni Kamateri', 'ekeni kamateri')</td><td></td></tr><tr><td>1862cb5728990f189fa91c67028f6d77b5ac94f6</td><td>Speeding Up Tracking by Ignoring Features
+<br/>Hamdi Dibeklio˘glu
+<br/><b>Pattern Recognition and Bioinformatics Group, Delft University of Technology</b><br/>Mekelweg 4, 2628 CD Delft, The Netherlands
+</td><td>('2883723', 'Lu Zhang', 'lu zhang')<br/>('1803520', 'Laurens van der Maaten', 'laurens van der maaten')</td><td>{lu.zhang, h.dibeklioglu, l.j.p.vandermaaten}@tudelft.nl
+</td></tr><tr><td>1862bfca2f105fddfc79941c90baea7db45b8b16</td><td>Annotator Rationales for Visual Recognition
+<br/><b>University of Texas at Austin</b></td><td>('7408951', 'Jeff Donahue', 'jeff donahue')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{jdd,grauman}@cs.utexas.edu
+</td></tr><tr><td>1886b6d9c303135c5fbdc33e5f401e7fc4da6da4</td><td>Knowledge Guided Disambiguation for Large-Scale
+<br/>Scene Classification with Multi-Resolution CNNs
+</td><td>('39709927', 'Limin Wang', 'limin wang')<br/>('2072196', 'Sheng Guo', 'sheng guo')<br/>('1739171', 'Weilin Huang', 'weilin huang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('40285012', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>1888bf50fd140767352158c0ad5748b501563833</td><td>PA R T 1
+<br/>THE BASICS
+</td><td></td><td></td></tr><tr><td>187d4d9ba8e10245a34f72be96dd9d0fb393b1aa</td><td>GAIDON et al.: MINING VISUAL ACTIONS FROM MOVIES
+<br/>Mining visual actions from movies
+<br/>http://lear.inrialpes.fr/people/gaidon/
+<br/>Marcin Marszałek2
+<br/>http://www.robots.ox.ac.uk/~marcin/
+<br/>http://lear.inrialpes.fr/people/schmid/
+<br/>1 LEAR
+<br/>INRIA, LJK
+<br/>Grenoble, France
+<br/>2 Visual Geometry Group
+<br/><b>University of Oxford</b><br/>Oxford, UK
+</td><td>('1799820', 'Adrien Gaidon', 'adrien gaidon')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>182f3aa4b02248ff9c0f9816432a56d3c8880706</td><td>Sparse Coding for Classification via Discrimination Ensemble∗
+<br/>1School of Computer Science & Engineering, South China Univ. of Tech., Guangzhou 510006, China
+<br/>2School of Automation Science & Engineering, South China Univ. of Tech., Guangzhou 510006, China
+<br/><b>National University of Singapore, Singapore</b></td><td>('2217653', 'Yuhui Quan', 'yuhui quan')<br/>('1725160', 'Yong Xu', 'yong xu')<br/>('2111796', 'Yuping Sun', 'yuping sun')<br/>('34881546', 'Yan Huang', 'yan huang')<br/>('39689301', 'Hui Ji', 'hui ji')</td><td>{csyhquan@scut.edu.cn, yxu@scut.edu.cn, ausyp@scut.edu.cn, matjh@nus.edu.sg}
+</td></tr><tr><td>18941b52527e6f15abfdf5b86a0086935706e83b</td><td>DeepGUM: Learning Deep Robust Regression with a
+<br/>Gaussian-Uniform Mixture Model
+<br/>1 Inria Grenoble Rhˆone-Alpes, Montbonnot-Saint-Martin, France,
+<br/><b>University of Granada, Granada, Spain</b><br/><b>University of Trento, Trento, Italy</b></td><td>('2793152', 'Pablo Mesejo', 'pablo mesejo')<br/>('1780201', 'Xavier Alameda-Pineda', 'xavier alameda-pineda')<br/>('1794229', 'Radu Horaud', 'radu horaud')</td><td>firstname.name@inria.fr
+</td></tr><tr><td>185360fe1d024a3313042805ee201a75eac50131</td><td>299
+<br/>Person De-Identification in Videos
+</td><td>('35624289', 'Prachi Agrawal', 'prachi agrawal')<br/>('1729020', 'P. J. Narayanan', 'p. j. narayanan')</td><td></td></tr><tr><td>1824b1ccace464ba275ccc86619feaa89018c0ad</td><td>One Millisecond Face Alignment with an Ensemble of Regression Trees
+<br/><b>KTH, Royal Institute of Technology</b><br/>Computer Vision and Active Perception Lab
+<br/>Teknikringen 14, Stockholm, Sweden
+</td><td>('2626422', 'Vahid Kazemi', 'vahid kazemi')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')</td><td>{vahidk,sullivan}@csc.kth.se
+</td></tr><tr><td>18dfc2434a95f149a6cbb583cca69a98c9de9887</td><td></td><td></td><td></td></tr><tr><td>27a00f2490284bc0705349352d36e9749dde19ab</td><td>VoxCeleb2: Deep Speaker Recognition
+<br/>Visual Geometry Group, Department of Engineering Science,
+<br/><b>University of Oxford, UK</b></td><td>('2863890', 'Joon Son Chung', 'joon son chung')<br/>('19263506', 'Arsha Nagrani', 'arsha nagrani')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{joon,arsha,az}@robots.ox.ac.uk
+</td></tr><tr><td>271e2856e332634eccc5e80ba6fa9bbccf61f1be</td><td>3D Spatio-Temporal Face Recognition Using Dynamic Range Model Sequences
+<br/>Department of Computer Science
+<br/><b>State University of New York at Binghamton, Binghamton, NY</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('8072251', 'Lijun Yin', 'lijun yin')</td><td></td></tr><tr><td>27846b464369095f4909f093d11ed481277c8bba</td><td>Journal of Signal and Information Processing, 2017, 8, 99-112
+<br/>http://www.scirp.org/journal/jsip
+<br/>ISSN Online: 2159-4481
+<br/>ISSN Print: 2159-4465
+<br/>Real-Time Face Detection and Recognition in
+<br/>Complex Background
+<br/><b>Illinois Institute of Technology, Chicago, Illinois, USA</b><br/>How to cite this paper: Zhang, X., Gon-
+<br/>not, T. and Saniie, J. (2017) Real-Time
+<br/>Face Detection and Recognition in Com-
+<br/>plex Background. Journal of Signal and
+<br/>Information Processing, 8, 99-112.
+<br/>https://doi.org/10.4236/jsip.2017.82007
+<br/>Received: March 25, 2017
+<br/>Accepted: May 16, 2017
+<br/>Published: May 19, 2017
+<br/>Copyright © 2017 by authors and
+<br/>Scientific Research Publishing Inc.
+<br/>This work is licensed under the Creative
+<br/>Commons Attribution International
+<br/>License (CC BY 4.0).
+<br/>http://creativecommons.org/licenses/by/4.0/
+<br/>
+<br/>Open Access
+</td><td>('1682913', 'Xin Zhang', 'xin zhang')<br/>('2324553', 'Thomas Gonnot', 'thomas gonnot')<br/>('1691321', 'Jafar Saniie', 'jafar saniie')</td><td></td></tr><tr><td>27eb7a6e1fb6b42516041def6fe64bd028b7614d</td><td>Joint Unsupervised Deformable Spatio-Temporal Alignment of Sequences
+<br/><b>Imperial College London, UK</b><br/><b>University of Twente, The Netherlands</b><br/><b>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</b></td><td>('1786302', 'Lazaros Zafeiriou', 'lazaros zafeiriou')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('1694605', 'Maja Pantic', 'maja pantic')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>⋆{l.zafeiriou12, e.antonakos, s.zafeiriou, m.pantic}@imperial.ac.uk, †PanticM@cs.utwente.nl
+</td></tr><tr><td>2717998d89d34f45a1cca8b663b26d8bf10608a9</td><td>Real-time Action Recognition with Enhanced Motion Vector CNNs
+<br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b><br/><b>Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China</b><br/>3Computer Vision Lab, ETH Zurich, Switzerland
+</td><td>('3047890', 'Bowen Zhang', 'bowen zhang')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')<br/>('2774427', 'Hanli Wang', 'hanli wang')</td><td></td></tr><tr><td>27c66b87e0fbb39f68ddb783d11b5b7e807c76e8</td><td>Fast Simplex-HMM for One-Shot Learning Activity Recognition
+<br/><b>Zaragoza University</b><br/>Zaragoza, Spain.
+<br/><b>Kingston University</b><br/>London,UK.
+</td><td>('1783769', 'Carlos Medrano', 'carlos medrano')<br/>('1687002', 'Dimitrios Makris', 'dimitrios makris')</td><td>[mrodrigo, corrite, ctmedra]@unizar.es
+<br/>D.Makris@kingston.ac.uk
+</td></tr><tr><td>27a0a7837f9114143717fc63294a6500565294c2</td><td>Face Recognition in Unconstrained Environments: A
+<br/>Comparative Study
+<br/>To cite this version:
+<br/>Environments: A Comparative Study: . Workshop on Faces in ’Real-Life’ Images: Detection,
+<br/>Alignment, and Recognition, Oct 2008, Marseille, France. 2008. <inria-00326730>
+<br/>HAL Id: inria-00326730
+<br/>https://hal.inria.fr/inria-00326730
+<br/>Submitted on 5 Oct 2008
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('1689681', 'Rodrigo Verschae', 'rodrigo verschae')<br/>('1737300', 'Javier Ruiz-Del-Solar', 'javier ruiz-del-solar')<br/>('34047285', 'Mauricio Correa', 'mauricio correa')<br/>('1689681', 'Rodrigo Verschae', 'rodrigo verschae')<br/>('1737300', 'Javier Ruiz-Del-Solar', 'javier ruiz-del-solar')<br/>('34047285', 'Mauricio Correa', 'mauricio correa')</td><td></td></tr><tr><td>27d709f7b67204e1e5e05fe2cfac629afa21699d</td><td></td><td></td><td></td></tr><tr><td>271df16f789bd2122f0268c3e2fa46bc0cb5f195</td><td>Mining Discriminative Co-occurrence Patterns for Visual Recognition
+<br/>School of EEE
+<br/><b>Nanyang Technological University</b><br/>Singapore 639798
+<br/>Dept. of Media Analytics
+<br/>NEC Laboratories America
+<br/>Cupertino, CA, 95014 USA
+<br/>EECS Dept.
+<br/><b>Northwestern University</b><br/>Evanston, IL, 60208 USA
+</td><td>('34316743', 'Junsong Yuan', 'junsong yuan')<br/>('40634508', 'Ming Yang', 'ming yang')<br/>('39955137', 'Ying Wu', 'ying wu')</td><td>jsyuan@ntu.edu.sg
+<br/>myang@sv.nec-labs.com
+<br/>yingwu@eecs.northwestern.edu
+</td></tr><tr><td>275b5091c50509cc8861e792e084ce07aa906549</td><td>Institut für Informatik
+<br/>der Technischen
+<br/>Universität München
+<br/>Dissertation
+<br/>Leveraging the User’s Face as a Known Object
+<br/>in Handheld Augmented Reality
+<br/>Sebastian Bernhard Knorr
+</td><td></td><td></td></tr><tr><td>27218ff58c3f0e7d7779fba3bb465d746749ed7c</td><td>Active Learning for Image Ranking
+<br/>Over Relative Visual Attributes
+<br/>by
+<br/>Department of Computer Science
+<br/><b>University of Texas at Austin</b></td><td>('2548555', 'Lucy Liang', 'lucy liang')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>276dbb667a66c23545534caa80be483222db7769</td><td>3D Res. 2, 03(2011)4
+<br/>10.1007/3DRes.03(2011)4
+<br/>3DR REVIEW w
+<br/>An Introduction to Image-based 3D Surface Reconstruction and a
+<br/>Survey of Photometric Stereo Methods
+<br/>for
+<br/>introduction
+<br/>image-based 3D
+<br/>techniques. Then we describe
+<br/>Received: 21Feburary 2011 / Revised: 20 March 2011 / Accepted: 11 May 2011
+<br/><b>D Research Center, Kwangwoon University and Springer</b></td><td>('1908324', 'Steffen Herbort', 'steffen herbort')</td><td></td></tr><tr><td>270733d986a1eb72efda847b4b55bc6ba9686df4</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td><td></td><td>Contact book.department@intechopen.com
+</td></tr><tr><td>27c6cd568d0623d549439edc98f6b92528d39bfe</td><td>Regressive Tree Structured Model for Facial Landmark Localization
+<br/>Artificial Vision Lab., Dept Mechanical Engineering
+<br/><b>National Taiwan University of Science and Technology</b></td><td>('2329565', 'Kai-Hsiang Chang', 'kai-hsiang chang')<br/>('2421405', 'Shih-Chieh Huang', 'shih-chieh huang')</td><td>jison@mail.ntust.edu.tw
+</td></tr><tr><td>273b0511588ab0a81809a9e75ab3bd93d6a0f1e3</td><td>The final publication is available at Springer via http://dx.doi.org/10.1007/s11042-016-3428-9
+<br/>Recognition of Facial Expressions Based on Salient
+<br/>Geometric Features and Support Vector Machines
+<br/><b>Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep</b><br/><b>Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do</b><br/><b>School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada; E-Mail</b><br/>Tel.: +82-63-270-2406; Fax: +82-63-270-2394.
+</td><td>('32322842', 'Deepak Ghimire', 'deepak ghimire')<br/>('2034182', 'Joonwhoan Lee', 'joonwhoan lee')<br/>('1689656', 'Ze-Nian Li', 'ze-nian li')<br/>('31984909', 'SungHwan Jeong', 'sunghwan jeong')</td><td>of Korea; E-Mails: (deepak, shjeong)@keti.re.kr
+<br/>Rep. of Korea; E-Mail: chlee@jbnu.ac.kr
+<br/>li@cs.sfu.ca
+<br/>* Author to whom correspondence should be addressed; E-Mail: chlee@jbnu.ac.kr;
+</td></tr><tr><td>27169761aeab311a428a9dd964c7e34950a62a6b</td><td>International Journal of the Physical Sciences Vol. 5(13), pp. 2020 -2029, 18 October, 2010
+<br/>Available online at http://www.academicjournals.org/IJPS
+<br/>ISSN 1992 - 1950 ©2010 Academic Journals
+<br/>Full Length Research Paper
+<br/>Face recognition using 3D head scan data based on
+<br/>Procrustes distance
+<br/><b>Kongju National University, South Korea</b><br/><b>Korean Research Institute of Standards and Science (KRISS), Korea</b><br/>Accepted 6 July, 2010
+<br/>Recently, face recognition has attracted significant attention from the researchers and scientists in
+<br/>various fields of research, such as biomedical informatics, pattern recognition, vision, etc due its
+<br/>applications in commercially available systems, defense and security purpose. In this paper a practical
+<br/>method for face reorganization utilizing head cross section data based on Procrustes analysis is
+<br/>proposed. This proposed method relies on shape signatures of the contours extracted from face data.
+<br/>The shape signatures are created by calculating the centroid distance of the boundary points, which is
+<br/>a translation and rotation invariant signature. The shape signatures for a selected region of interest
+<br/>(ROI) are used as feature vectors and authentication is done using them. After extracting feature
+<br/>vectors a comparison analysis is performed utilizing Procrustes distance to differentiate their face
+<br/>pattern from each other. The proposed scheme attains an equal error rate (EER) of 4.563% for the 400
+<br/>head data for 100 subjects. The performance analysis of face recognition was analyzed based on K
+<br/>nearest neighbour classifier. The experimental results presented here verify that the proposed method
+<br/>is considerable effective.
+<br/>Key words: Face, biometrics, Procrustes distance, equal error rate, k nearest classifier.
+<br/>INTRODUCTION
+<br/>Perhaps face is the easiest means of identifying a person
+<br/>by another person. In general humans can identify
+<br/>themselves and others by faces in a scene without hard
+<br/>effort, but face recognition systems that implement these
+<br/>tasks are very challenging to design. The challenges are
+<br/>even extensive when there is a wide range of variation
+<br/>due to imaging situations. Both inter- and intra-subject
+<br/>variations are related with face images. Physical similarity
+<br/>among
+<br/>inter-subject
+<br/>variation whereas intra-subject variation is dependent on
+<br/>the following aspects such as age, head pose facial app-
+<br/>roach, presence of light and presence of other obje-
+<br/>cts/people etc. However, in face recognition, it has been
+<br/>observed that inter-person variations are available due to
+<br/>variations in local geometric features. Automatic face
+<br/>recognition has been widely studied during the last few
+<br/>decades. It is an active research area spanning many di-
+<br/>sciplines such as image processing, pattern recognition,
+<br/>responsible
+<br/>individuals
+<br/>for
+<br/>is
+<br/>computer vision, neural networks, artificial intelligence,
+<br/>and biometrics.
+<br/>Many researchers from these different disciplines work
+<br/>toward the goal of endowing machines or computers with
+<br/>the ability to recognize human faces as we human beings
+<br/>do, effortlessly, in our everyday life (Brunelli and Poggio,
+<br/>1993; Samaria, 1994; Wiskott et al., 1997; Turk and
+<br/>Pentland, 1991; Belhumeur et al., 1997; He et al., 2005;
+<br/>Wiskott et al., 1997; Lanitis et al., 1995; Cootes et al.,
+<br/>2001; Brunelli and Poggio, 1993; Turk, 1991; Bellhumer
+<br/>et al., 1997). Face recognition has a wide range of
+<br/>potential applications
+<br/>for commercial, security, and
+<br/>forensic purposes. These applications include automated
+<br/>crowd
+<br/>shot
+<br/>identification (e.g., for issuing driver licenses), credit card
+<br/>authorization, ATM machine access control, design of
+<br/>human computer interfaces, etc. The rapid evaluation in
+<br/>face recognition research can be found by the progress
+<br/>of systematic evaluation standards that includes the
+<br/>FERET, FRVT 2000, FRVT 2002, and XM2VTS
+<br/>protocols, and many existing software packages for
+<br/>example FaceIt, FaceVACS, FaceSnap Recorder,
+<br/>control, mug
+<br/>surveillance,
+<br/>access
+</td><td>('3222448', 'Sikyung Kim', 'sikyung kim')<br/>('2387342', 'Se Jin Park', 'se jin park')</td><td>*Corresponding author. E-mail: mynudding@yahoo.com.
+</td></tr><tr><td>27da432cf2b9129dce256e5bf7f2f18953eef5a5</td><td></td><td></td><td></td></tr><tr><td>27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd</td><td>To Appear in The IEEE 6th International Conference on Biometrics: Theory, Applications and
+<br/>Systems (BTAS), Sept. 29-Oct. 2, 2013, Washington DC, USA
+<br/>Automatic Multi-view Face Recognition via 3D Model Based Pose Regularization
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing, MI, U.S.A</b></td><td>('1883998', 'Koichiro Niinuma', 'koichiro niinuma')<br/>('34393045', 'Hu Han', 'hu han')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>{niinumak, hhan, jain}@msu.edu
+</td></tr><tr><td>27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5</td><td>ISSN : 2248-9622, Vol. 4, Issue 10( Part - 3), October 2014, pp.40-44
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Effect of Different Occlusion on Facial Expressions Recognition
+<br/><b>RGPV University, Indore</b><br/><b>RGPV University, Indore</b><br/>
+</td><td>('2890210', 'Ramchand Hablani', 'ramchand hablani')</td><td></td></tr><tr><td>2784d9212dee2f8a660814f4b85ba564ec333720</td><td>Learning Class-Specific Image Transformations with Higher-Order Boltzmann
+<br/>Machines
+<br/>Erik Learned-Miller
+<br/><b>University of Massachusetts Amherst</b><br/>Amherst, MA
+</td><td>('3219900', 'Gary B. Huang', 'gary b. huang')</td><td>{gbhuang,elm}@cs.umass.edu
+</td></tr><tr><td>2717b044ae9933f9ab87f16d6c611352f66b2033</td><td>GNAS: A Greedy Neural Architecture Search Method for
+<br/>Multi-Attribute Learning
+<br/><b>Zhejiang University, 2Southwest Jiaotong University, 3Carnegie Mellon University</b></td><td>('2986516', 'Siyu Huang', 'siyu huang')<br/>('50079147', 'Xi Li', 'xi li')<br/>('1720488', 'Zhongfei Zhang', 'zhongfei zhang')</td><td>{siyuhuang,xilizju,zhongfei}@zju.edu.cn,zhiqicheng@gmail.com,alex@cs.cmu.edu
+</td></tr><tr><td>2770b095613d4395045942dc60e6c560e882f887</td><td>GridFace: Face Rectification via Learning Local
+<br/>Homography Transformations
+<br/>Face++, Megvii Inc.
+</td><td>('1848243', 'Erjin Zhou', 'erjin zhou')<br/>('2695115', 'Zhimin Cao', 'zhimin cao')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>{zej,czm,sunjian}@megvii.com
+</td></tr><tr><td>27cccf992f54966feb2ab4831fab628334c742d8</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 64– No.18, February 2013
+<br/>Facial Expression Recognition by Statistical, Spatial
+<br/>Features and using Decision Tree
+<br/>Assistant Professor
+<br/>CSIT Department
+<br/>GGV BIlaspur, Chhattisgarh
+<br/>India
+<br/>Assistant Professor
+<br/>Electronics (ECE) Department
+<br/>JECRC Jaipur, Rajasthan India
+<br/>IshanBhardwaj
+<br/>Student of Ph.D.
+<br/>Electrical Department
+<br/>NIT Raipur, Chhattisgarh India
+</td><td>('8836626', 'Nazil Perveen', 'nazil perveen')<br/>('2092589', 'Darshan Kumar', 'darshan kumar')</td><td></td></tr><tr><td>27883967d3dac734c207074eed966e83afccb8c3</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>Two-dimensional Maximum Local Variation based on Image Euclidean Distance for Face
+<br/>Recognition
+<br/><b>State Key Laboratory of Integrated Services Networks, Xidian University, Xi an 710071 China</b><br/><b>State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China</b><br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/>to
+<br/>improve
+<br/>in
+<br/>images and
+<br/>in estimating
+</td><td>('38469552', 'Quanxue Gao', 'quanxue gao')</td><td></td></tr><tr><td>270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0</td><td>EmotioNet Challenge: Recognition of facial expressions of emotion in the wild
+<br/>Dept. Electrical and Computer Engineering
+<br/><b>The Ohio State University</b></td><td>('8038057', 'Ramprakash Srinivasan', 'ramprakash srinivasan')<br/>('9947018', 'Qianli Feng', 'qianli feng')<br/>('1678691', 'Yan Wang', 'yan wang')</td><td></td></tr><tr><td>27f8b01e628f20ebfcb58d14ea40573d351bbaad</td><td>DEPARTMENT OF INFORMATION ENGINEERING AND COMPUTER SCIENCE
+<br/>ICT International Doctoral School
+<br/>Events based Multimedia Indexing
+<br/>and Retrieval
+<br/>SUBMITTED TO THE DEPARTMENT OF
+<br/>INFORMATION ENGINEERING AND COMPUTER SCIENCE (DISI)
+<br/>IN THE PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE
+<br/>OF
+<br/>DOCTOR OF PHILOSOPHY
+<br/>Advisor:
+<br/>Examiners: Prof. Marco Carli, Universit`a degli Studi di Roma Tre, Italy
+<br/>Prof. Nicola Conci, Universit`a degli Studi di Trento, Italy
+<br/>Prof. Pietro Zanuttigh, Universit`a degli Studi di Padova, Italy
+<br/>Prof. Giulia Boato, Universit`a degli Studi di Trento, Italy
+<br/>December 2017
+</td><td>('36296712', 'Kashif Ahmad', 'kashif ahmad')</td><td></td></tr><tr><td>2742a61d32053761bcc14bd6c32365bfcdbefe35</td><td>Submitted 9/13; Revised 6/14; Published 2/15
+<br/>Learning Transformations for Clustering and Classification
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Duke University</b><br/>Durham, NC 27708, USA
+<br/>Department of Electrical and Computer Engineering
+<br/>Department of Computer Science
+<br/>Department of Biomedical Engineering
+<br/><b>Duke University</b><br/>Durham, NC 27708, USA
+<br/>Editor: Ben Recht
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td>qiang.qiu@duke.edu
+<br/>guillermo.sapiro@duke.edu
+</td></tr><tr><td>27dafedccd7b049e87efed72cabaa32ec00fdd45</td><td>Unsupervised Visual Alignment with Similarity Graphs
+<br/><b>Tampere University of Technology, Finland</b></td><td>('2416841', 'Fatemeh Shokrollahi Yancheshmeh', 'fatemeh shokrollahi yancheshmeh')<br/>('40394658', 'Ke Chen', 'ke chen')</td><td>{fatemeh.shokrollahiyancheshmeh, ke.chen, joni.kamarainen}@tut.fi
+</td></tr><tr><td>27a299b834a18e45d73e0bf784bbb5b304c197b3</td><td>Social Role Discovery in Human Events
+<br/><b>Stanford University</b><br/>br. maids
+<br/>bride
+<br/>groom
+<br/>gr. man
+<br/>Pairwise interaction features
+<br/>Social Role Model
+<br/>Σ𝛼
+<br/>Σ𝛽
+<br/>Introduction
+<br/>• Social Roles describe humans in an event
+<br/>•Social roles of humans are dependent on
+<br/>- their actions in a social setting
+<br/>- their interactions with other roles
+<br/>• Obtaining role annotations for training is expensive
+<br/>•Goal: Discover role clusters in a social event based on
+<br/>role-specific interactions
+<br/>1. Input: videos
+<br/>with human tracks
+<br/>2. Extract unary and
+<br/>interaction features
+<br/>3. Output: Cluster
+<br/>people into social roles
+<br/>Our Approach
+<br/>- Does not require
+<br/>role annotations
+<br/>- Clusters people
+<br/>into roles based
+<br/>on interactions as
+<br/>well as person-
+<br/>specific features
+<br/>Results: Clustering Accuracy
+<br/>• New YouTube dataset: ~40 videos with 160-240 people per event
+<br/>• Human tracks and ground-truth roles annotated
+<br/>Method
+<br/>prior
+<br/>K-means
+<br/>Only unary
+<br/>Interaction
+<br/>as context
+<br/>Birthday Wedding Award
+<br/>Function
+<br/>62.97%
+<br/>31.97%
+<br/>69.31%
+<br/>77.75%
+<br/>20.17%
+<br/>29.43%
+<br/>39.22%
+<br/>38.83%
+<br/>29.32%
+<br/>33.88%
+<br/>38.25%
+<br/>41.53%
+<br/>Physical
+<br/>Training
+<br/>65.93%
+<br/>57.67%
+<br/>76.69%
+<br/>77.91%
+<br/>No spatial
+<br/>43.72%
+<br/>No proxemic 43.72%
+<br/>44.81%
+<br/>Full Model
+<br/>36.41%
+<br/>39.32%
+<br/>42.72%
+<br/>79.54%
+<br/>79.80%
+<br/>83.12%
+<br/>82.82%
+<br/>77.91%
+<br/>82.82%
+<br/>• Only unary – No
+<br/>interaction feature
+<br/>Interaction as
+<br/>context – Average
+<br/>interaction as unary
+<br/>• No spatial – Only
+<br/>proxemic interaction
+<br/>• No proxemic – Only
+<br/>spatial interaction
+<br/>Ψ𝑃
+<br/>- Spatio-temporal trajectory features
+<br/>- Proxemic[2] interaction features
+<br/>Unary features
+<br/>- HOG3D and Trajectory to capture action
+<br/>- Gender and Color Histogram features
+<br/>- Object interaction features
+<br/>Ψ𝑢
+<br/>𝒔𝑖
+<br/>𝛼 - Unary feature weight
+<br/>𝒔𝑖
+<br/>- Social role assignment
+<br/>- Reference role assignment
+<br/>Interaction feature weight
+<br/>Jointly infer
+<br/>by variational
+<br/>inference
+<br/>Ψ𝑢
+<br/>Ψ𝑝
+<br/>Interaction restricted
+<br/>to reference role for
+<br/>tractable inference
+<br/>• Spatial relations in wedding. Cross-arrow is the position of the reference
+<br/>Results: Role Clusters
+<br/>role (groom)
+<br/>Bride
+<br/>Priest
+<br/>Brides maid
+<br/>Grooms man
+<br/>• Color of cross represents ground-truth role for wrong assignments
+<br/>bride
+<br/>groom
+<br/>priest
+<br/>grooms men
+<br/>brides maid
+<br/>b’day person
+<br/>parent
+<br/>friends
+<br/>guest
+<br/>presenter
+<br/>recipient
+<br/>host
+<br/>distributor
+<br/>[1] V. Ramanathan, B. Yao, L. Fei-Fei. Social Role Discovery in Human Events. In CVPR, 2013.
+<br/>[2] Y. Yang, S. Baker, A. Kannan, and D. Ramanan. Recognizing proxemics in personal photos. In CVPR, 2012.
+<br/>This work was supported in part by DARPA Minds Eye, NSF, Intel, Microsoft Research, Google Research and the Intelligence Advanced
+<br/>Research Projects Activity* (IARPA) via Department of Interior National Business Center contract number D11PC20069.
+<br/>* The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright thereon. Disclaimer: The views and conclusions contained herein are
+<br/>those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of IARPA, DOI/NBC, or the U.S. Government.
+<br/>instructor
+<br/>presenter
+</td><td>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')<br/>('38916673', 'Bangpeng Yao', 'bangpeng yao')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>{vigneshr, bangpeng, feifeili}@cs.stanford.edu
+</td></tr><tr><td>27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba</td><td>Comparison between k-nn and svm method
+<br/>for speech emotion recognition
+<br/><b>Anjuman College of Engineering and Technology, Sadar, Nagpur, India</b></td><td>('27879696', 'Muzaffar Khan', 'muzaffar khan')</td><td></td></tr><tr><td>274f87ad659cd90382ef38f7c6fafc4fc7f0d74d</td><td></td><td></td><td></td></tr><tr><td>27ee8482c376ef282d5eb2e673ab042f5ded99d7</td><td>Scale Normalization for the Distance Maps AAM.
+<br/>Avenue de la boulaie, BP 81127,
+<br/>35 511 Cesson-S´evign´e, France
+<br/>Sup´elec, IETR-SCEE Team
+</td><td>('31491147', 'Denis Giri', 'denis giri')<br/>('2861129', 'Maxime Rosenwald', 'maxime rosenwald')<br/>('32420329', 'Benjamin Villeneuve', 'benjamin villeneuve')<br/>('3353560', 'Sylvain Le Gallou', 'sylvain le gallou')</td><td>Email: {denis.giri, maxime.rosenwald, benjamin.villeneuve, sylvain.legallou, renaud.seguier}@supelec.fr
+</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>Unconstrained Facial Images: Database for Face
+<br/>Recognition under Real-world Conditions⋆
+<br/>1 Dept. of Computer Science & Engineering
+<br/><b>University of West Bohemia</b><br/>Plzeˇn, Czech Republic
+<br/>2 NTIS - New Technologies for the Information Society
+<br/><b>University of West Bohemia</b><br/>Plzeˇn, Czech Republic
+</td><td>('2628715', 'Ladislav Lenc', 'ladislav lenc')</td><td>{llenc,pkral}@kiv.zcu.cz
+</td></tr><tr><td>4bb03b27bc625e53d8d444c0ba3ee235d2f17e86</td><td>Reading Between The Lines: Object Localization
+<br/>Using Implicit Cues from Image Tags
+<br/>Department of Computer Science
+<br/><b>University of Texas at Austin</b></td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{sjhwang,grauman}@cs.utexas.edu
+</td></tr><tr><td>4b89cf7197922ee9418ae93896586c990e0d2867</td><td>LATEX Author Guidelines for CVPR Proceedings
+<br/>First Author
+<br/>Institution1
+<br/>Institution1 address
+</td><td></td><td>firstauthor@i1.org
+</td></tr><tr><td>4bc9a767d7e63c5b94614ebdc24a8775603b15c9</td><td><b>University of Trento</b><br/>Doctoral Thesis
+<br/>Understanding Visual Information:
+<br/>from Unsupervised Discovery to
+<br/>Minimal Effort Domain Adaptation
+<br/>Author:
+<br/>Supervisor:
+<br/>Dr. Nicu Sebe
+<br/>A thesis submitted in fulfilment of the requirements
+<br/>for the degree of Doctor of Philosophy
+<br/>in the
+<br/>International Doctorate School in Information and Communication Technologies
+<br/>Department of Information Engineering and Computer Science
+<br/>Multimedia and Human Understanding Group (MHUG)
+<br/>April 2015
+</td><td>('2933565', 'Gloria Zen', 'gloria zen')</td><td></td></tr><tr><td>4b519e2e88ccd45718b0fc65bfd82ebe103902f7</td><td>A Discriminative Model for Age Invariant Face
+<br/>Recognition
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China</b><br/><b>Michigan State University, E. Lansing, MI 48823, USA</b><br/><b>Korea University, Seoul 136-713, Korea</b></td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('2222919', 'Unsang Park', 'unsang park')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>4b3f425274b0c2297d136f8833a31866db2f2aec</td><td>This is a pre-print of the original paper accepted for publication in the CVPR 2017 Biometrics Workshop.
+<br/>Toward Open-Set Face Recognition
+<br/>Manuel G¨unther
+<br/><b>Vision and Security Technology Lab, University of Colorado Colorado Springs</b></td><td>('39616991', 'Steve Cruz', 'steve cruz')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')<br/>('39886114', 'Ethan M. Rudd', 'ethan m. rudd')</td><td>{mgunther,scruz,erudd,tboult}@vast.uccs.edu
+</td></tr><tr><td>4b7c110987c1d89109355b04f8597ce427a7cd72</td><td>ORIGINAL RESEARCH ARTICLE
+<br/>published: 16 October 2014
+<br/>doi: 10.3389/fnhum.2014.00804
+<br/>Feature- and Face-Exchange illusions: new insights and
+<br/>applications for the study of the binding problem
+<br/><b>American University, Washington, DC, USA</b><br/><b>University of Nevada, Reno, Reno, NV, USA</b><br/>Edited by:
+<br/><b>Baingio Pinna, University of</b><br/>Sassari, Italy
+<br/>Reviewed by:
+<br/>Stephen Louis Macknik, Barrow
+<br/><b>Neurological Institute, USA</b><br/>Susana Martinez-Conde, Barrow
+<br/><b>Neurological Institute, USA</b><br/>*Correspondence:
+<br/><b>Psychology, American University</b><br/>4400 Massachusetts Avenue NW,
+<br/>Washington, DC 20016, USA
+<br/>The binding problem is a longstanding issue in vision science: i.e., how are humans able to
+<br/>maintain a relatively stable representation of objects and features even though the visual
+<br/>system processes many aspects of the world separately and in parallel? We previously
+<br/>investigated this issue with a variant of the bounce-pass paradigm, which consists of two
+<br/>rectangular bars moving in opposite directions; if the bars are identical and never overlap,
+<br/>the motion could equally be interpreted as bouncing or passing. Although bars of different
+<br/>colors should be seen as passing each other (since the colors provide more information
+<br/>about the bars’ paths), we found “Feature Exchange”: observers reported the paradoxical
+<br/>perception that the bars appear to bounce off of each other and exchange colors. Here we
+<br/>extend our previous findings with three demonstrations. “Peripheral Feature-Exchange”
+<br/>consists of two colored bars that physically bounce (they continually meet in the middle
+<br/>of the monitor and return to the sides). When viewed in the periphery, the bars appear
+<br/>to stream past each other even though this percept relies on the exchange of features
+<br/>and contradicts the information provided by the color of the bars. In “Face-Exchange”
+<br/>two different faces physically pass each other. When fixating centrally, observers typically
+<br/>report the perception of bouncing faces that swap features, indicating that the Feature
+<br/>Exchange effect can occur even with complex objects. In “Face-Go-Round,” one face
+<br/>repeatedly moves from left to right on the top of the monitor, and the other from right
+<br/>to left at the bottom of the monitor. Observers typically perceive the faces moving in a
+<br/>circle—a percept that contradicts information provided by the identity of the faces. We
+<br/>suggest that Feature Exchange and the paradigms used to elicit it can be useful for the
+<br/>investigation of the binding problem as well as other contemporary issues of interest to
+<br/>vision science.
+<br/>Keywords: motion perception, object perception, binding problem, visual periphery, animation, bouncing
+<br/>streaming illusions, illusion of causality
+<br/>INTRODUCTION
+<br/>The “binding problem” refers to the observation that the brain
+<br/>processes many aspects of the visual world separately and in
+<br/>parallel, yet we perceive a unified world, populated by coherent
+<br/>objects (James, 1890; Treisman, 1996; Holcombe et al., 2009). The
+<br/>implication is that the visual system binds together the output of
+<br/>separate processes (which presumably compute features, textures,
+<br/>colors, motion gradients, etc.) prior to creating our object-centric
+<br/>perceptual world. Two fundamental questions of the binding
+<br/>problem can be summarized as follows: (1) How, and under
+<br/>what conditions, does the brain combine (or fail to combine) the
+<br/>outputs of these separate processes to construct an object rep-
+<br/>resentation? (2) How are object representations maintained over
+<br/>time and space?
+<br/>We recently examined the spatiotemporal conditions and the
+<br/>role feature-level processes play in representing and maintaining
+<br/>objects (Caplovitz et al., 2011) using a variant of the “bounce-
+<br/>pass paradigm” (Metzger, 1934; Michotte, 1946/1963; Kanizsa,
+<br/>1969). In a typical version of the bounce pass paradigm, the
+<br/>interpretation of motion direction and object correspondence
+<br/>direction is intrinsically ambiguous, and the degree to which
+<br/>observers report one or the other of the potential percepts has
+<br/>been used to study a range of perceptual and cognitive processes.
+<br/>For example, versions of this basic paradigm have been used to
+<br/>study properties of cross-modal interactions and motion per-
+<br/>ception as well as object representations (Bertenthal et al., 1993;
+<br/>Watanabe and Shimojo, 1998; Sekuler and Sekuler, 1999; Mitroff
+<br/>et al., 2005; Feldman and Tremoulet, 2006).
+<br/>The basic paradigm (illustrated in Figure 1A) consists of two
+<br/>rectangles; one that moves from right to left while the other moves
+<br/>from left to right. The display is ambiguous because the stimulus
+<br/>is wholly consistent with each rectangle passing from one side of
+<br/>the screen to the other (i.e., the perception of streaming) or as
+<br/>bouncing off of the other rectangle and returning to its point of
+<br/>origin (i.e., the perception of bouncing). If, at the point of inter-
+<br/>section, one rectangle overlaps with the other rectangle observers
+<br/>will commonly perceive streaming (Sekuler and Sekuler, 1999).
+<br/>In our experiments, this potential cue is removed: at the critical
+<br/>point of intersection, the rectangles exactly exchange places and
+<br/>thus never have an overlapping edge. When the two rectangles are
+<br/>Frontiers in Human Neuroscience
+<br/>www.frontiersin.org
+<br/>October 2014 | Volume 8 | Article 804 | 1
+<br/>HUMAN NEUROSCIENCE </td><td>('31981243', 'Arthur G. Shapiro', 'arthur g. shapiro')<br/>('8369036', 'Gideon P. Caplovitz', 'gideon p. caplovitz')<br/>('23863232', 'Erica L. Dixon', 'erica l. dixon')<br/>('31981243', 'Arthur G. Shapiro', 'arthur g. shapiro')</td><td>e-mail: arthur.shapiro@american.edu
+</td></tr><tr><td>4bd088ba3f42aa1e43ae33b1988264465a643a1f</td><td>Technical Report, IDE0852, May 2008
+<br/>Multiview Face Detection Using
+<br/>Gabor Filters and
+<br/>Support Vector Machine
+<br/>Bachelor’s Thesis in Computer Systems Engineering
+<br/>School of Information Science, Computer and Electrical Engineering
+<br/>
+<br/><b>Halmstad University</b></td><td></td><td></td></tr><tr><td>4bfce41cc72be315770861a15e467aa027d91641</td><td>Active Annotation Translation
+<br/>Caltech
+<br/>Kristj´an Eldj´arn Hj¨orleifsson
+<br/><b>University of Iceland</b><br/>Caltech
+</td><td>('3251767', 'Steve Branson', 'steve branson')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>sbranson@caltech.edu
+<br/>keh4@hi.is
+<br/>perona@caltech.edu
+</td></tr><tr><td>4b61d8490bf034a2ee8aa26601d13c83ad7f843a</td><td>A Modulation Module for Multi-task Learning with
+<br/>Applications in Image Retrieval
+<br/><b>Northwestern University</b><br/>2 AIBee
+<br/>3 Bytedance AI Lab
+<br/><b>Carnegie Mellon University</b></td><td>('8343585', 'Xiangyun Zhao', 'xiangyun zhao')</td><td></td></tr><tr><td>4bd3de97b256b96556d19a5db71dda519934fd53</td><td>Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face
+<br/>Recognition
+<br/><b>School of Electronic and Information Engineering, South China University of Technology</b><br/><b>Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('2512949', 'Yandong Wen', 'yandong wen')<br/>('32787758', 'Zhifeng Li', 'zhifeng li')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>yd.wen@siat.ac.cn, zhifeng.li@siat.ac.cn, yu.qiao@siat.ac.cn
+</td></tr><tr><td>4b04247c7f22410681b6aab053d9655cf7f3f888</td><td>Robust Face Recognition by Constrained Part-based
+<br/>Alignment
+</td><td>('1692992', 'Yuting Zhang', 'yuting zhang')<br/>('2370507', 'Kui Jia', 'kui jia')<br/>('7135663', 'Yueming Wang', 'yueming wang')<br/>('1734380', 'Gang Pan', 'gang pan')<br/>('1926757', 'Tsung-Han Chan', 'tsung-han chan')<br/>('1700297', 'Yi Ma', 'yi ma')</td><td></td></tr><tr><td>4b60e45b6803e2e155f25a2270a28be9f8bec130</td><td>Attribute Based Object Identification
+</td><td>('1686318', 'Yuyin Sun', 'yuyin sun')<br/>('1766509', 'Liefeng Bo', 'liefeng bo')<br/>('1731079', 'Dieter Fox', 'dieter fox')</td><td></td></tr><tr><td>4b48e912a17c79ac95d6a60afed8238c9ab9e553</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Minimum Margin Loss for Deep Face Recognition
+</td><td>('49141822', 'Xin Wei', 'xin wei')<br/>('3552546', 'Hui Wang', 'hui wang')<br/>('2986129', 'Huan Wan', 'huan wan')</td><td></td></tr><tr><td>4b5eeea5dd8bd69331bd4bd4c66098b125888dea</td><td>Human Activity Recognition Using Conditional
+<br/>Random Fields and Privileged Information
+<br/>submitted to
+<br/>the designated by the General Assembly Composition of the
+<br/>Department of Computer Science & Engineering Inquiry
+<br/>Committee
+<br/>by
+<br/>in partial fulfillment of the Requirements for the Degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>February 2016
+</td><td>('2045915', 'Michalis Vrigkas', 'michalis vrigkas')</td><td></td></tr><tr><td>4bbbee93519a4254736167b31be69ee1e537f942</td><td></td><td></td><td></td></tr><tr><td>4b74f2d56cd0dda6f459319fec29559291c61bff</td><td>CHIACHIA ET AL.: PERSON-SPECIFIC SUBSPACES FOR FAMILIAR FACES
+<br/>Person-Specific Subspace Analysis for
+<br/>Unconstrained Familiar Face Identification
+<br/>David Cox2
+<br/><b>Institute of Computing</b><br/><b>University of Campinas</b><br/>Campinas, Brazil
+<br/><b>Rowland Institute</b><br/><b>Harvard University</b><br/>Cambridge, USA
+<br/><b>McGovern Institute</b><br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, USA
+<br/>4 Department of Computer Science
+<br/>Universidade Federal de Minas Gerais
+<br/>Belo Horizonte, Brazil
+</td><td>('1761151', 'Giovani Chiachia', 'giovani chiachia')<br/>('30017846', 'Nicolas Pinto', 'nicolas pinto')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')<br/>('2145405', 'Anderson Rocha', 'anderson rocha')<br/>('1716806', 'Alexandre X. Falcão', 'alexandre x. falcão')</td><td>giovanichiachia@gmail.com
+<br/>pinto@mit.edu
+<br/>william@dcc.ufmg.br
+<br/>anderson.rocha@ic.unicamp.br
+<br/>afalcao@ic.unicamp.br
+<br/>davidcox@fas.harvard.edu
+</td></tr><tr><td>4ba38262fe20fab3e4c80215147b498f83843b93</td><td>MAKIANDCIPOLLA:OBTAININGTHESHAPEOFAMOVINGOBJECT
+<br/>Obtaining the Shape of a Moving Object
+<br/>with a Specular Surface
+<br/>Toshiba Research Europe
+<br/><b>Cambridge Research Laboratory</b><br/>Department of Engineering
+<br/><b>University of Cambridge</b></td><td>('1801052', 'Atsuto Maki', 'atsuto maki')<br/>('1745672', 'Roberto Cipolla', 'roberto cipolla')</td><td>atsuto.maki@crl.toshiba.co.uk
+<br/>cipolla@cam.ac.uk
+</td></tr><tr><td>4bbe460ab1b279a55e3c9d9f488ff79884d01608</td><td>GAGAN: Geometry-Aware Generative Adversarial Networks
+<br/>Jean Kossaifi∗
+<br/><b>Middlesex University London</b><br/><b>Imperial College London</b></td><td>('47801605', 'Linh Tran', 'linh tran')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{jean.kossaifi;linh.tran;i.panagakis;m.pantic}@imperial.ac.uk
+</td></tr><tr><td>4b3eaedac75ac419c2609e131ea9377ba8c3d4b8</td><td>FAST NEWTON ACTIVE APPEARANCE MODELS
+<br/>Jean Kossaifi(cid:63)
+<br/><b>cid:63) Imperial College London, UK</b><br/><b>University of Lincoln, UK</b><br/><b>University of Twente, The Netherlands</b></td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>4b507a161af8a7dd41e909798b9230f4ac779315</td><td>A Theory of Multiplexed Illumination
+<br/>Dept. Electrical Engineering
+<br/>Technion - Israel Inst. Technology
+<br/>Haifa 32000, ISRAEL
+<br/>Dept. Computer Science
+<br/><b>Columbia University</b><br/>New York, NY 10027
+</td><td>('2159538', 'Yoav Y. Schechner', 'yoav y. schechner')<br/>('1750470', 'Shree K. Nayar', 'shree k. nayar')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>yoav@ee.technion.ac.il
+<br/>{nayar,belhumeur}@cs.columbia.edu
+</td></tr><tr><td>4b02387c2db968a70b69d98da3c443f139099e91</td><td>Detecting facial landmarks in the video based on a hybrid framework
+<br/><b>School of Information Engineering, Guangdong University of Technology, 510006 Guangzhou, China</b><br/><b>School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China</b></td><td>('1850205', 'Nian Cai', 'nian cai')<br/>('3468993', 'Zhineng Lin', 'zhineng lin')<br/>('2686365', 'Fu Zhang', 'fu zhang')<br/>('39038751', 'Guandong Cen', 'guandong cen')<br/>('40465036', 'Han Wang', 'han wang')</td><td></td></tr><tr><td>4b6be933057d939ddfa665501568ec4704fabb39</td><td></td><td></td><td></td></tr><tr><td>4b71d1ff7e589b94e0f97271c052699157e6dc4a</td><td>Hindawi Publishing Corporation
+<br/>EURASIP Journal on Advances in Signal Processing
+<br/>Volume 2008, Article ID 748483, 18 pages
+<br/>doi:10.1155/2008/748483
+<br/>Research Article
+<br/>Pose-Encoded Spherical Harmonics for Face Recognition and
+<br/>Synthesis Using a Single Image
+<br/><b>Center for Automation Research, University of Maryland, College Park, MD 20742, USA</b><br/>2 Vision Technologies Lab, Sarnoff Corporation, Princeton, NJ 08873, USA
+<br/>Received 1 May 2007; Accepted 4 September 2007
+<br/>Recommended by Juwei Lu
+<br/>Face recognition under varying pose is a challenging problem, especially when illumination variations are also present. In this
+<br/>paper, we propose to address one of the most challenging scenarios in face recognition. That is, to identify a subject from a test
+<br/>image that is acquired under different pose and illumination condition from only one training sample (also known as a gallery
+<br/>image) of this subject in the database. For example, the test image could be semifrontal and illuminated by multiple lighting
+<br/>sources while the corresponding training image is frontal under a single lighting source. Under the assumption of Lambertian
+<br/>reflectance, the spherical harmonics representation has proved to be effective in modeling illumination variations for a fixed pose.
+<br/>In this paper, we extend the spherical harmonics representation to encode pose information. More specifically, we utilize the fact
+<br/>that 2D harmonic basis images at different poses are related by close-form linear transformations, and give a more convenient
+<br/>transformation matrix to be directly used for basis images. An immediate application is that we can easily synthesize a different
+<br/>view of a subject under arbitrary lighting conditions by changing the coefficients of the spherical harmonics representation. A
+<br/>more important result is an efficient face recognition method, based on the orthonormality of the linear transformations, for
+<br/>solving the above-mentioned challenging scenario. Thus, we directly project a nonfrontal view test image onto the space of frontal
+<br/>view harmonic basis images. The impact of some empirical factors due to the projection is embedded in a sparse warping matrix;
+<br/>for most cases, we show that the recognition performance does not deteriorate after warping the test image to the frontal view.
+<br/>Very good recognition results are obtained using this method for both synthetic and challenging real images.
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>1.
+<br/>INTRODUCTION
+<br/>Face recognition is one of the most successful applications
+<br/>of image analysis and understanding [1]. Given a database of
+<br/>training images (sometimes called a gallery set, or gallery im-
+<br/>ages), the task of face recognition is to determine the facial ID
+<br/>of an incoming test image. Built upon the success of earlier
+<br/>efforts, recent research has focused on robust face recogni-
+<br/>tion to handle the issue of significant difference between a
+<br/>test image and its corresponding training images (i.e., they
+<br/>belong to the same subject). Despite significant progress, ro-
+<br/>bust face recognition under varying lighting and different
+<br/>pose conditions remains to be a challenging problem. The
+<br/>problem becomes even more difficult when only one train-
+<br/>ing image per subject is available. Recently, methods have
+<br/>been proposed to handle the combined pose and illumina-
+<br/>tion problem when only one training image is available, for
+<br/>example, the method based on morphable models [2] and its
+<br/>extension [3] that proposes to handle the complex illumina-
+<br/>tion problem by integrating spherical harmonics representa-
+<br/>tion [4, 5]. In these methods, either arbitrary illumination
+<br/>conditions cannot be handled [2] or the expensive computa-
+<br/>tion of harmonic basis images is required for each pose per
+<br/>subject [3].
+<br/>Under the assumption of Lambertian reflectance, the
+<br/>spherical harmonics representation has proved to be effec-
+<br/>tive in modelling illumination variations for a fixed pose. In
+<br/>this paper, we extend the harmonic representation to encode
+<br/>pose information. We utilize the fact that all the harmonic
+<br/>basis images of a subject at various poses are related to each
+<br/>other via close-form linear transformations [6, 7], and de-
+<br/>rive a more convenient transformation matrix to analytically
+<br/>synthesize basis images of a subject at various poses from
+<br/>just one set of basis images at a fixed pose, say, the frontal
+</td><td>('39265975', 'Zhanfeng Yue', 'zhanfeng yue')<br/>('38480590', 'Wenyi Zhao', 'wenyi zhao')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('39265975', 'Zhanfeng Yue', 'zhanfeng yue')</td><td>Correspondence should be addressed to Zhanfeng Yue, zyue@cfar.umd.edu
+</td></tr><tr><td>4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2</td><td>Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
+<br/>Jo˜ao Carreira†
+<br/>†DeepMind
+<br/><b>University of Oxford</b></td><td>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>joaoluis@google.com
+<br/>zisserman@google.com
+</td></tr><tr><td>4b4ecc1cb7f048235605975ab37bb694d69f63e5</td><td>Nonlinear Embedding Transform for
+<br/>Unsupervised Domain Adaptation
+<br/>Center for Cognitive Ubiquitous Computing
+<br/><b>Arizona State University, AZ, USA</b></td><td>('3151995', 'Hemanth Venkateswara', 'hemanth venkateswara')<br/>('2471253', 'Shayok Chakraborty', 'shayok chakraborty')<br/>('1743991', 'Sethuraman Panchanathan', 'sethuraman panchanathan')</td><td>{hemanthv,schakr10,panch}@asu.edu
+</td></tr><tr><td>4be03fd3a76b07125cd39777a6875ee59d9889bd</td><td>CONTENT-BASED ANALYSIS FOR ACCESSING AUDIOVISUAL ARCHIVES:
+<br/>ALTERNATIVES FOR CONCEPT-BASED INDEXING AND SEARCH
+<br/>ESAT/PSI - IBBT
+<br/>KU Leuven, Belgium
+</td><td>('1704728', 'Tinne Tuytelaars', 'tinne tuytelaars')</td><td>Tinne.Tuytelaars@esat.kuleuven.be
+</td></tr><tr><td>4be774af78f5bf55f7b7f654f9042b6e288b64bd</td><td>Variational methods for Conditional Multimodal Learning:
+<br/>Generating Human Faces from Attributes
+<br/><b>Indian Institute of Science</b><br/>Bangalore, India
+</td><td>('2686270', 'Gaurav Pandey', 'gaurav pandey')<br/>('2440174', 'Ambedkar Dukkipati', 'ambedkar dukkipati')</td><td>{gp88, ad}@csa.iisc.ernet.in
+</td></tr><tr><td>4b321065f6a45e55cb7f9d7b1055e8ac04713b41</td><td>Affective Computing Models for Character
+<br/>Animation
+<br/>School of Computing and Mathematical Sciences
+<br/><b>Liverpool John Moores University</b><br/>Byrom Street, L3 3AF, Liverpool, UK
+</td><td>('1794784', 'Abdennour El Rhalibi', 'abdennour el rhalibi')<br/>('36782007', 'Christopher Carter', 'christopher carter')<br/>('1768270', 'Madjid Merabti', 'madjid merabti')</td><td>R.L.Duarte@2010.ljmu.ac.uk;{A.Elrhalibi; C.J.Carter;M.Merabti}@ljmu.ac.uk
+</td></tr><tr><td>4b605e6a9362485bfe69950432fa1f896e7d19bf</td><td>To appear in the CVPR Workshop on Biometrics, June 2016
+<br/>A Comparison of Human and Automated Face Verification Accuracy on
+<br/>Unconstrained Image Sets∗
+<br/>Noblis
+<br/>Noblis
+<br/>Noblis
+<br/>Noblis
+<br/><b>Michigan State University</b></td><td>('1917247', 'Austin Blanton', 'austin blanton')<br/>('7996649', 'Kristen C. Allen', 'kristen c. allen')<br/>('15282121', 'Tim Miller', 'tim miller')<br/>('1718102', 'Nathan D. Kalka', 'nathan d. kalka')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>imaus10@gmail.com
+<br/>kristen.allen@noblis.org
+<br/>timothy.miller@noblis.org
+<br/>nathan.kalka@noblis.org
+<br/>jain@cse.msu.edu
+</td></tr><tr><td>4b3dd18882ff2738aa867b60febd2b35ab34dffc</td><td>FACIAL FEATURE ANALYSIS OF
+<br/>SPONTANEOUS FACIAL EXPRESSION
+<br/>Computer Laboratory
+<br/><b>University of Cambridge</b><br/>William Gates Building,
+<br/>Cambridge CB3 0FD UK
+<br/>Department of Computer Science
+<br/><b>The American University in Cairo</b><br/>113 Kasr Al Aini Street,
+<br/>P.O. Box 2511, Cairo, Egypt
+</td><td>('1754451', 'Rana El Kaliouby', 'rana el kaliouby')<br/>('3337337', 'Amr Goneid', 'amr goneid')</td><td>rana.el-kaliouby@cl.cam.ac.uk
+<br/>goneid@aucegypt.edu
+</td></tr><tr><td>11a2ef92b6238055cf3f6dcac0ff49b7b803aee3</td><td>TOWARDS REDUCTION OF THE TRAINING AND SEARCH RUNNING TIME
+<br/>COMPLEXITIES FOR NON-RIGID OBJECT SEGMENTATION
+<br/>Instituto de Sistemas e Rob´otica, Instituto Superior T´ecnico, 1049-001 Lisboa, Portugal(a)
+<br/><b>Australian Centre for Visual Technologies, The University of Adelaide, Australia (b</b></td><td>('3259175', 'Jacinto C. Nascimento', 'jacinto c. nascimento')<br/>('3265767', 'Gustavo Carneiro', 'gustavo carneiro')</td><td></td></tr><tr><td>11dc744736a30a189f88fa81be589be0b865c9fa</td><td>A Unified Multiplicative Framework for Attribute Learning
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('2582309', 'Kongming Liang', 'kongming liang')<br/>('1783542', 'Hong Chang', 'hong chang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{kongming.liang, hong.chang, shiguang.shan, xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>11a210835b87ccb4989e9ba31e7559bb7a9fd292</td><td>Hub
+<br/>ScienceDirect
+<br/>Scopus
+<br/>SciTopics
+<br/>Applications
+<br/>Register
+<br/>Login
+<br/>Go to SciVal Suite
+<br/>Search
+<br/>Sources
+<br/>Analytics
+<br/>My alerts
+<br/>My list
+<br/>My settings
+<br/>Quick Search
+<br/>View search history | Back to results | < Previous 4 of 11 Next >
+<br/>Help
+<br/>Download PDF
+<br/>Add to 2collab
+<br/>Export
+<br/>Print
+<br/>E-mail
+<br/>Create bibliography
+<br/>Add to My List
+<br/>Cited by since 1996
+<br/>Proceedings of the 2010 10th International Conference on Intelligent Systems Design and
+<br/>Applications, ISDA'10
+<br/>2010, Article number 5687029, Pages 1154-1158
+<br/>This article has been cited 0 times in Scopus.
+<br/>Inform me when this document is cited in Scopus:
+<br/>Set alert
+<br/>Set feed
+<br/>ISBN: 978-142448135-4
+<br/>DOI: 10.1109/ISDA.2010.5687029
+<br/>Document Type: Conference Paper
+<br/>Source Type: Conference Proceeding
+<br/><b>Sponsors: Machine Intelligence Research Labs (MIR Labs</b><br/>View references (23)
+<br/>My Applications
+<br/>Add
+<br/>More By These Authors
+<br/>The authors of this article have a total of 67 records in
+<br/>Scopus:
+<br/>(Showing 5 most recent)
+<br/>Shekofteh, S.K.,Maryam Baradaran, K.,Toosizadeh,
+<br/>S.,Akbarzadeh-T., M.-R.,Hashemi, M.
+<br/>Head pose estimation using fuzzy approximator
+<br/>augmented by redundant membership functions
+<br/>(2010)ICSTE 2010 - 2010 2nd International Conference on
+<br/>Software Technology and Engineering, Proceedings
+<br/>Kamkar, I.,Akbarzadeh-T, M.-R.,Yaghoobi, M.
+<br/>Intelligent water drops a new optimization algorithm
+<br/>Hide Applications
+<br/>Find related documents
+<br/>In Scopus based on
+<br/>References
+<br/>Authors
+<br/>Keywords
+<br/>Cairo; 29 November 2010 through 1 December 2010; Category number CFP10394-CDR; Code
+<br/>83753
+<br/>View at publisher |
+<br/>A fuzzy approximator with Gaussian membership functions
+<br/>to estimate a human's head pose
+<br/>Baradaran-K, M.a
+<br/>, Toosizadeh, S.a
+<br/><b>Islamic Azad University, Mashhad Branch, Mashhad, Iran</b><br/><b>Ferdowsi University of Mashhad, Mashhad, Iran</b><br/>, Akbarzadeh-T, M.-R.b
+<br/>, Shekofteh, S.K.b
+</td><td></td><td></td></tr><tr><td>118ca3b2e7c08094e2a50137b1548ada7935e505</td><td>Workshop track - ICLR 2018
+<br/>A DATASET TO EVALUATE THE REPRESENTATIONS
+<br/>LEARNED BY VIDEO PREDICTION MODELS
+<br/><b>Toyota Research Institute, Cambridge, MA 2 University of Michigan, Ann Arbor, MI</b></td><td>('34246012', 'Ryan Szeto', 'ryan szeto')<br/>('2307158', 'Simon Stent', 'simon stent')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td>{szetor,jjcorso}@umich.edu
+<br/>{simon.stent,german.ros}@tri.global
+</td></tr><tr><td>11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d</td><td>Automated measurement of mouse social behaviors
+<br/>using depth sensing, video tracking, and
+<br/>machine learning
+<br/>and David J. Andersona,1
+<br/><b>aDivision of Biology and Biological Engineering 156-29, Howard Hughes Medical Institute, California Institute of Technology, Pasadena, CA</b><br/><b>and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA</b><br/>Contributed by David J. Anderson, August 16, 2015 (sent for review May 20, 2015)
+<br/>A lack of automated, quantitative, and accurate assessment of social
+<br/>behaviors in mammalian animal models has limited progress toward
+<br/>understanding mechanisms underlying social interactions and their
+<br/>disorders such as autism. Here we present a new integrated hard-
+<br/>ware and software system that combines video tracking, depth
+<br/>sensing, and machine learning for automatic detection and quanti-
+<br/>fication of social behaviors involving close and dynamic interactions
+<br/>between two mice of different coat colors in their home cage. We
+<br/>designed a hardware setup that integrates traditional video cameras
+<br/>with a depth camera, developed computer vision tools to extract the
+<br/>body “pose” of individual animals in a social context, and used a
+<br/>supervised learning algorithm to classify several well-described so-
+<br/>cial behaviors. We validated the robustness of the automated classi-
+<br/>fiers in various experimental settings and used them to examine how
+<br/>genetic background, such as that of Black and Tan Brachyury (BTBR)
+<br/>mice (a previously reported autism model), influences social behavior.
+<br/>Our integrated approach allows for rapid, automated measurement
+<br/>of social behaviors across diverse experimental designs and also af-
+<br/>fords the ability to develop new, objective behavioral metrics.
+<br/>social behavior | behavioral tracking | machine vision | depth sensing |
+<br/>supervised machine learning
+<br/>Social behaviors are critical for animals to survive and re-
+<br/>produce. Although many social behaviors are innate, they
+<br/>must also be dynamic and flexible to allow adaptation to a rap-
+<br/>idly changing environment. The study of social behaviors in model
+<br/>organisms requires accurate detection and quantification of such
+<br/>behaviors (1–3). Although automated systems for behavioral
+<br/>scoring in rodents are available (4–8), they are generally limited to
+<br/>single-animal assays, and their capabilities are restricted either to
+<br/>simple tracking or to specific behaviors that are measured using a
+<br/>dedicated apparatus (6–11) (e.g., elevated plus maze, light-dark
+<br/>box, etc.). By contrast, rodent social behaviors are typically scored
+<br/>manually. This is slow, highly labor-intensive, and subjective,
+<br/>resulting in analysis bottlenecks as well as inconsistencies between
+<br/>different human observers. These issues limit progress toward
+<br/>understanding the function of neural circuits and genes controlling
+<br/>social behaviors and their dysfunction in disorders such as autism
+<br/>(1, 12). In principle, these obstacles could be overcome through
+<br/>the development of automated systems for detecting and mea-
+<br/>suring social behaviors.
+<br/>Automating tracking and behavioral measurements during
+<br/>social interactions pose a number of challenges not encountered
+<br/>in single-animal assays, however, especially in the home cage
+<br/>environment (2). During many social behaviors, such as aggression
+<br/>or mating, two animals are in close proximity and often cross or
+<br/>touch each other, resulting in partial occlusion. This makes track-
+<br/>ing body positions, distinguishing each mouse, and detecting be-
+<br/>haviors particularly difficult. This is compounded by the fact that
+<br/>such social interactions are typically measured in the animals’
+<br/>home cage, where bedding, food pellets, and other moveable items
+<br/>can make tracking difficult. Nevertheless a home-cage environment
+<br/>is important for studying social behaviors, because it avoids the
+<br/>stress imposed by an unfamiliar testing environment.
+<br/>Recently several techniques have been developed to track
+<br/>social behaviors in animals with rigid exoskeletons, such as the
+<br/>fruit fly Drosophila, which have relatively few degrees of freedom
+<br/>in their movements (13–23). These techniques have had a trans-
+<br/>formative impact on the study of social behaviors in that species
+<br/>(2). Accordingly, the development of similar methods for mam-
+<br/>malian animal models, such as the mouse, could have a similar
+<br/>impact as well. However, endoskeletal animals exhibit diverse and
+<br/>flexible postures, and their actions during any one social behavior,
+<br/>such as aggression, are much less stereotyped than in flies. This
+<br/>presents a dual challenge to automated behavior classification:
+<br/>first, to accurately extract a representation of an animal’s posture
+<br/>from observed data, and second, to map that representation to the
+<br/>correct behavior (24–27). Current machine vision algorithms that
+<br/>track social interactions in mice mainly use the relative positions of
+<br/>two animals (25, 28–30); this approach generally cannot discrimi-
+<br/>nate social interactions that involve close proximity and vigorous
+<br/>physical activity, or identify specific behaviors such as aggression
+<br/>and mounting. In addition, existing algorithms that measure social
+<br/>interactions use a set of hardcoded, “hand-crafted” (i.e., pre-
+<br/>defined) parameters that make them difficult to adapt to new ex-
+<br/>perimental setups and conditions (25, 31).
+<br/>In this study, we combined 3D tracking and machine learning
+<br/>in an integrated system that can automatically detect, classify,
+<br/><b>and quantify distinct social behaviors, including those involving</b><br/>Significance
+<br/>Accurate, quantitative measurement of animal social behaviors
+<br/>is critical, not only for researchers in academic institutions
+<br/>studying social behavior and related mental disorders, but also for
+<br/>pharmaceutical companies developing drugs to treat disorders
+<br/>affecting social interactions, such as autism and schizophrenia.
+<br/>Here we describe an integrated hardware and software system
+<br/>that combines video tracking, depth-sensing technology, machine
+<br/>vision, and machine learning to automatically detect and score
+<br/>innate social behaviors, such as aggression, mating, and social
+<br/>investigation, between mice in a home-cage environment. This
+<br/>technology has the potential to have a transformative impact on
+<br/>the study of the neural mechanisms underlying social behavior
+<br/>and the development of new drug therapies for psychiatric dis-
+<br/>orders in humans.
+<br/>Author contributions: W.H., P.P., and D.J.A. designed research; W.H. performed research;
+<br/>W.H., X.P.B.-A., and S.G.N. contributed new reagents/analytic tools; W.H., A.K., M.Z., P.P.,
+<br/>and D.J.A. analyzed data; and W.H., A.K., M.Z., P.P., and D.J.A. wrote the paper.
+<br/>The authors declare no conflict of interest.
+<br/>This article contains supporting information online at www.pnas.org/lookup/suppl/doi:10.
+<br/>1073/pnas.1515982112/-/DCSupplemental.
+<br/>www.pnas.org/cgi/doi/10.1073/pnas.1515982112
+<br/>PNAS | Published online September 9, 2015 | E5351–E5360
+</td><td>('4502168', 'Weizhe Hong', 'weizhe hong')<br/>('6201086', 'Ann Kennedy', 'ann kennedy')<br/>('4195968', 'Moriel Zelikowsky', 'moriel zelikowsky')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>1To whom correspondence may be addressed. Email: whong@caltech.edu, perona@
+<br/>caltech.edu, or wuwei@caltech.edu.
+</td></tr><tr><td>113c22eed8383c74fe6b218743395532e2897e71</td><td>MODEC: Multimodal Decomposable Models for Human Pose Estimation
+<br/>Ben Sapp
+<br/><b>Google, Inc</b><br/><b>University of Washington</b></td><td>('1685978', 'Ben Taskar', 'ben taskar')</td><td>bensapp@google.com
+<br/>taskar@cs.washington.edu
+</td></tr><tr><td>11408af8861fb0a977412e58c1a23d61b8df458c</td><td>A Robust Learning Algorithm Based on
+<br/>SURF and PSM for Facial Expression Recognition
+<br/><b>Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan</b><br/><b>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</b></td><td>('2866465', 'Jinhui Chen', 'jinhui chen')<br/>('39484328', 'Xiaoyan Lin', 'xiaoyan lin')<br/>('1744026', 'Tetsuya Takiguchi', 'tetsuya takiguchi')<br/>('1678564', 'Yasuo Ariki', 'yasuo ariki')</td><td>ianchen@me.cs.scitec.kobe-u.ac.jp, {takigu,ariki}@kobe-u.ac.jp
+</td></tr><tr><td>11cc0774365b0cc0d3fa1313bef3d32c345507b1</td><td>Face Recognition Using Active Near-IR
+<br/>Illumination
+<br/>Centre for Vision, Speech and Signal Processing
+<br/><b>University of Surrey, United Kingdom</b><br/>
+</td><td>('38746097', 'Xuan Zou', 'xuan zou')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('2173900', 'Kieron Messer', 'kieron messer')</td><td>@surrey.ac.uk
+</td></tr><tr><td>11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5</td><td>Rolling Riemannian Manifolds to Solve the Multi-class Classification Problem
+<br/><b>Institute of Systems and Robotics - University of Coimbra, Portugal</b><br/><b>Portugal</b></td><td>('2117944', 'Rui Caseiro', 'rui caseiro')<br/>('39458914', 'Pedro Martins', 'pedro martins')<br/>('36478254', 'João F. Henriques', 'joão f. henriques')</td><td>{ruicaseiro, pedromartins, henriques, batista}@isr.uc.pt, fleite@mat.uc.pt
+</td></tr><tr><td>11269e98f072095ff94676d3dad34658f4876e0e</td><td>Facial Expression Recognition with Multithreaded
+<br/>Cascade of Rotation-invariant HOG
+<br/>Graduate School of System Informatics
+<br/>Graduate School of System Informatics
+<br/>Graduate School of System Informatics
+<br/><b>Kobe University</b><br/>Kobe, 657-8501, Japan
+<br/><b>Kobe University</b><br/>Kobe, 657-8501, Japan
+<br/><b>Kobe University</b><br/>Kobe, 657-8501, Japan
+<br/>In this paper, we propose a novel framework that adopts
+<br/>robust feature representation for training the multithreading
+<br/>boosting cascade. We adopt rotation-invariant HOG (Ri-HOG)
+<br/>as features, which is reminiscent of Dalal et al.’s HOG [9].
+<br/>However, in this paper, we noticeably enhance the conven-
+<br/>tional HOG in rotation-invariant ability and feature extraction
+<br/>speed. We carry out a detailed study of the effects of various
+<br/>implementation choices in descriptor performance. We subdi-
+<br/>vide the local patch into annular spatial bins to achieve spatial
+<br/>binning invariance. Besides, we apply radial gradient to attain
+<br/>gradient binning invariance, which is inspired by Takacs et
+<br/>al.’s RGT (Radial Gradient Transform) [10].
+</td><td>('2866465', 'Jinhui Chen', 'jinhui chen')<br/>('1744026', 'Tetsuya Takiguchi', 'tetsuya takiguchi')<br/>('1678564', 'Yasuo Ariki', 'yasuo ariki')</td><td>Email: ianchen@me.cs.scitec.kobe-u.ac.jp
+<br/>Email: takigu@kobe-u.ac.jp
+<br/>Email: ariki@kobe-u.ac.jp
+</td></tr><tr><td>113e5678ed8c0af2b100245057976baf82fcb907</td><td>Facing Imbalanced Data
+<br/>Recommendations for the Use of Performance Metrics
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td>1Carnegie Mellon University, Pittsburgh, PA, laszlo.jeni@ieee.org,ftorre@cs.cmu.edu
+<br/>2University of Pittsburgh, Pittsburgh, PA, jeffcohn@cs.cmu.edu
+</td></tr><tr><td>11691f1e7c9dbcbd6dfd256ba7ac710581552baa</td><td>SoccerNet: A Scalable Dataset for Action Spotting in Soccer Videos
+<br/><b>King Abdullah University of Science and Technology (KAUST), Saudi Arabia</b></td><td>('22314218', 'Silvio Giancola', 'silvio giancola')<br/>('41022271', 'Mohieddine Amine', 'mohieddine amine')<br/>('41015552', 'Tarek Dghaily', 'tarek dghaily')<br/>('2931652', 'Bernard Ghanem', 'bernard ghanem')</td><td>silvio.giancola@kaust.edu.sa, maa249@mail.aub.edu, tad05@mail.aub.edu, bernard.ghanem@kaust.edu.sa
+</td></tr><tr><td>11c04c4f0c234a72f94222efede9b38ba6b2306c</td><td>Real-Time Human Action Recognition by Luminance Field
+<br/>Trajectory Analysis
+<br/>Dept of Computing
+<br/>Kowloon, Hong Kong
+<br/>+852 2766-7316
+<br/><b>Hong Kong Polytechnic University</b><br/><b>University of Illinois at Urbana</b><br/><b>National University of Singapore</b><br/>Dept of ECE
+<br/>Champaign, USA
+<br/>+1 217-244-2960
+<br/>Dept of ECE
+<br/>Singapore
+<br/>+65 6516-2116
+</td><td>('2659956', 'Zhu Li', 'zhu li')<br/>('1708679', 'Yun Fu', 'yun fu')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>zhu.li@ieee.org
+<br/>{yunfu2,huang}@ifp.uiuc.edu
+<br/>elesyan@ece.nus.edu.sg
+</td></tr><tr><td>1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9</td><td>Action Recognition by Learning Deep Multi-Granular
+<br/>Spatio-Temporal Video Representation∗
+<br/><b>University of Science and Technology of China, Hefei 230026, P. R. China</b><br/>2 Microsoft Research, Beijing 100080, P. R. China
+<br/><b>University of Rochester, NY 14627, USA</b></td><td>('35539590', 'Qing Li', 'qing li')<br/>('3430743', 'Zhaofan Qiu', 'zhaofan qiu')<br/>('2053452', 'Ting Yao', 'ting yao')<br/>('1724211', 'Tao Mei', 'tao mei')<br/>('3663422', 'Yong Rui', 'yong rui')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')</td><td>{sealq, qiudavy}@mail.ustc.edu.cn; {tiyao, tmei, yongrui}@microsoft.com;
+<br/>jluo@cs.rochester.edu
+</td></tr><tr><td>1149c6ac37ae2310fe6be1feb6e7e18336552d95</td><td>Proc. Int. Conf. on Artificial Neural Networks (ICANN’05), Warsaw, LNCS 3696, vol. I, pp. 569-574, Springer Verlag 2005
+<br/>Classification of Face Images for Gender, Age,
+<br/>Facial Expression, and Identity1
+<br/>Department of Neuroinformatics and Cognitive Robotics
+<br/><b>Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany</b></td><td>('34420922', 'Torsten Wilhelm', 'torsten wilhelm')</td><td></td></tr><tr><td>11f17191bf74c80ad0b16b9f404df6d03f7c8814</td><td>Recognition of Visually Perceived Compositional
+<br/>Human Actions by Multiple Spatio-Temporal Scales
+<br/>Recurrent Neural Networks
+</td><td>('1754201', 'Minju Jung', 'minju jung')<br/>('1780524', 'Jun Tani', 'jun tani')</td><td></td></tr><tr><td>11367581c308f4ba6a32aac1b4a7cdb32cd63137</td><td></td><td></td><td></td></tr><tr><td>11a47a91471f40af5cf00449954474fd6e9f7694</td><td>Article
+<br/>NIRFaceNet: A Convolutional Neural Network for
+<br/>Near-Infrared Face Identification
+<br/><b>Southwest University, Chongqing 400715, China</b><br/>† These authors contribute equally to this work.
+<br/>Academic Editor: Willy Susilo
+<br/>Received: 16 July 2016; Accepted: 24 October 2016; Published: 27 October 2016
+</td><td>('34063916', 'Min Peng', 'min peng')<br/>('8206607', 'Chongyang Wang', 'chongyang wang')<br/>('34520676', 'Tong Chen', 'tong chen')<br/>('2373829', 'Guangyuan Liu', 'guangyuan liu')</td><td>peng2014m@email.swu.edu.cn (M.P.); mvrjustid520@email.swu.edu.cn (C.W.); liugy@swu.edu.cn (G.L.)
+<br/>* Correspondence: c_tong@swu.edu.cn; Tel.: +86-23-6825-4309
+</td></tr><tr><td>1198572784788a6d2c44c149886d4e42858d49e4</td><td>Learning Discriminative Features using Encoder/Decoder type Deep
+<br/>Neural Nets
+</td><td>('2162592', 'Vishwajeet Singh', 'vishwajeet singh')<br/>('40835709', 'Killamsetti Ravi Kumar', 'killamsetti ravi kumar')</td><td>1ALPES, Bolarum, Hyderabad 500010, vsthakur@gmail.com
+<br/>2ALPES, Bolarum, Hyderabad 500010, ravi.killamsetti@gmail.com
+<br/>3SNIST, Ghatkesar, Hyderabad 501301, kumar.e@gmail.com
+</td></tr><tr><td>11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8</td><td>970
+<br/>JUNE 2008
+<br/>Tied Factor Analysis for Face Recognition
+<br/>across Large Pose Differences
+</td><td>('1792404', 'James H. Elder', 'james h. elder')<br/>('1734784', 'Jonathan Warrell', 'jonathan warrell')<br/>('2338011', 'Fatima M. Felisberti', 'fatima m. felisberti')</td><td></td></tr><tr><td>1134a6be0f469ff2c8caab266bbdacf482f32179</td><td>IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+<br/>FACIAL EXPRESSION IDENTIFICATION USING FOUR-BIT CO-
+<br/>OCCURRENCE MATRIXFEATURES AND K-NN CLASSIFIER
+<br/><b>Aditya College of Engineering, Surampalem, East Godavari</b><br/>District, Andhra Pradesh, India
+</td><td>('8118823', 'Bala Shankar', 'bala shankar')<br/>('27686729', 'S R Kumar', 's r kumar')</td><td></td></tr><tr><td>11b3877df0213271676fa8aa347046fd4b1a99ad</td><td>Unsupervised Identification of Multiple Objects of
+<br/>Interest from Multiple Images: dISCOVER
+<br/><b>Carnegie Mellon University</b></td><td>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>{dparikh,tsuhan}@cmu.edu
+</td></tr><tr><td>112780a7fe259dc7aff2170d5beda50b2bfa7bda</td><td></td><td></td><td></td></tr><tr><td>1130c38e88108cf68b92ecc61a9fc5aeee8557c9</td><td>Dynamically Encoded Actions based on Spacetime Saliency
+<br/><b>Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria</b><br/><b>York University, Toronto, Canada</b></td><td>('2322150', 'Christoph Feichtenhofer', 'christoph feichtenhofer')<br/>('1718587', 'Axel Pinz', 'axel pinz')<br/>('1709096', 'Richard P. Wildes', 'richard p. wildes')</td><td>{feichtenhofer, axel.pinz}@tugraz.at
+<br/>wildes@cse.yorku.ca
+</td></tr><tr><td>11b89011298e193d9e6a1d99302221c1d8645bda</td><td>Structured Feature Selection
+<br/><b>Rensselaer Polytechnic Institute, USA</b></td><td>('39965604', 'Tian Gao', 'tian gao')<br/>('2860279', 'Ziheng Wang', 'ziheng wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{gaot, wangz10, jiq}@rpi.edu
+</td></tr><tr><td>111a9645ad0108ad472b2f3b243ed3d942e7ff16</td><td>Facial Expression Classification Using
+<br/>Combined Neural Networks
+<br/>DEE/PUC-Rio, Marquês de São Vicente 225, Rio de Janeiro – RJ - Brazil
+</td><td>('14032279', 'Rafael V. Santos', 'rafael v. santos')<br/>('1744578', 'Marley M.B.R. Vellasco', 'marley m.b.r. vellasco')<br/>('34686777', 'Raul Q. Feitosa', 'raul q. feitosa')<br/>('1687882', 'Ricardo Tanscheit', 'ricardo tanscheit')</td><td>marley@ele.puc-rio.br
+</td></tr><tr><td>1177977134f6663fff0137f11b81be9c64c1f424</td><td>Multi-Manifold Deep Metric Learning for Image Set Classification
+<br/>1Advanced Digital Sciences Center, Singapore
+<br/><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore</b><br/><b>School of ICE, Beijing University of Posts and Telecommunications, Beijing, China</b><br/><b>University of Illinois at Urbana-Champaign, Urbana, IL, USA</b><br/><b>Tsinghua University, Beijing, China</b></td><td>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('22804340', 'Gang Wang', 'gang wang')<br/>('1774956', 'Weihong Deng', 'weihong deng')<br/>('1742248', 'Pierre Moulin', 'pierre moulin')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td>jiwen.lu@adsc.com.sg; wanggang@ntu.edu.sg; whdeng@bupt.edu.cn;
+<br/>moulin@ifp.uiuc.edu; jzhou@tsinghua.edu.cn
+</td></tr><tr><td>1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc</td><td>Squared Earth Mover’s Distance Loss for Training
+<br/>Deep Neural Networks on Ordered-Classes
+<br/>Dept. of Computer Science
+<br/><b>Stony Brook University</b><br/>Chen-Ping Yu
+<br/><b>Phiar Technologies, Inc</b></td><td>('2321406', 'Le Hou', 'le hou')</td><td></td></tr><tr><td>111d0b588f3abbbea85d50a28c0506f74161e091</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 134 – No.10, January 2016
+<br/>Facial Expression Recognition from Visual Information
+<br/>using Curvelet Transform
+<br/>Surabhi Group of Institution Bhopal
+<br/>systems. Further applications
+</td><td>('6837599', 'Pratiksha Singh', 'pratiksha singh')</td><td></td></tr><tr><td>11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0</td><td>Hybrid Cascade Model for Face Detection in the Wild
+<br/>Based on Normalized Pixel Difference and a Deep
+<br/>Convolutional Neural Network
+<br/>Darijan Marčetić[0000-0002-6556-665X], Martin Soldić[0000-0002-4031-0404]
+<br/>and Slobodan Ribarić[0000-0002-8708-8513]
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia</b></td><td></td><td>{darijan.marcetic, martin.soldic, slobodan.ribaric}@fer.hr
+</td></tr><tr><td>117f164f416ea68e8b88a3005e55a39dbdf32ce4</td><td>Neuroaesthetics in Fashion: Modeling the Perception of Fashionability
+<br/>1Institut de Rob`otica i Inform`atica Industrial (CSIC-UPC),
+<br/><b>University of Toronto</b></td><td>('3114470', 'Edgar Simo-Serra', 'edgar simo-serra')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')<br/>('1994318', 'Francesc Moreno-Noguer', 'francesc moreno-noguer')<br/>('2422559', 'Raquel Urtasun', 'raquel urtasun')</td><td></td></tr><tr><td>7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4</td><td>2010 The 3rd International Conference on Machine Vision (ICMV 2010)
+<br/>Face Recognition and Representation by Tensor-based MPCA Approach
+<br/>Dept. of Control, Instrumentation, and Robot
+<br/>Engineering
+<br/><b>Chosun University</b><br/>Gwangju, Korea
+</td><td>('2806903', 'Yun-Hee Han', 'yun-hee han')</td><td>Yhhan1059@gmail.com
+</td></tr><tr><td>7d2556d674ad119cf39df1f65aedbe7493970256</td><td>Now You Shake Me: Towards Automatic 4D Cinema
+<br/><b>University of Toronto</b><br/><b>Vector Institute</b><br/>http://www.cs.toronto.edu/˜henryzhou/movie4d/
+</td><td>('2481662', 'Yuhao Zhou', 'yuhao zhou')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')</td><td>{henryzhou, makarand, fidler}@cs.toronto.edu
+</td></tr><tr><td>7d94fd5b0ca25dd23b2e36a2efee93244648a27b</td><td>Convolutional Network for Attribute-driven and Identity-preserving Human Face
+<br/>Generation
+<br/><b>The Hong Kong Polytechnic University, Hong Kong</b><br/><b>bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China</b></td><td>('1701799', 'Mu Li', 'mu li')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>7d8c2d29deb80ceed3c8568100376195ce0914cb</td><td>Identity-Aware Textual-Visual Matching with Latent Co-attention
+<br/><b>The Chinese University of Hong Kong</b></td><td>('1700248', 'Shuang Li', 'shuang li')<br/>('1721881', 'Tong Xiao', 'tong xiao')<br/>('1764548', 'Hongsheng Li', 'hongsheng li')<br/>('1742383', 'Wei Yang', 'wei yang')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')</td><td>{sli,xiaotong,hsli,wyang,xgwang}@ee.cuhk.edu.hk
+</td></tr><tr><td>7d306512b545df98243f87cb8173df83b4672b18</td><td>Flag Manifolds for the Characterization of
+<br/>Geometric Structure in Large Data Sets
+<br/>T. Marrinan, J. R. Beveridge, B. Draper, M. Kirby, and C. Peterson
+<br/><b>Colorado State University, Fort Collins, Colorado, USA</b></td><td></td><td>kirby@math.colostate.edu
+</td></tr><tr><td>7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b</td><td>FACE APPEARANCE FACTORIZATION FOR EXPRESSION ANALYSIS AND SYNTHESIS
+<br/><b>Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne</b><br/>BP 20529, 60205 COMPIEGNE Cedex, FRANCE.
+</td><td>('2371236', 'Bouchra Abboud', 'bouchra abboud')<br/>('1742818', 'Franck Davoine', 'franck davoine')</td><td>E-mail: Bouchra.Abboud@hds.utc.fr
+</td></tr><tr><td>7d41b67a641426cb8c0f659f0ba74cdb60e7159a</td><td>Soft Biometric Retrieval to Describe and Identify Surveillance Images
+<br/>School of Electronics and Computer Science,
+<br/><b>University of Southampton, United Kingdom</b></td><td>('3408521', 'Daniel Martinho-Corbishley', 'daniel martinho-corbishley')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('3000521', 'John N. Carter', 'john n. carter')</td><td>{dmc,msn,jnc}@ecs.soton.ac.uk
+</td></tr><tr><td>7d1688ce0b48096e05a66ead80e9270260cb8082</td><td>Real vs. Fake Emotion Challenge: Learning to Rank Authenticity From Facial
+<br/>Activity Descriptors
+<br/><b>Otto von Guericke University</b><br/>Magdeburg, Germany
+</td><td>('2441656', 'Frerk Saxen', 'frerk saxen')<br/>('1783606', 'Philipp Werner', 'philipp werner')<br/>('1741165', 'Ayoub Al-Hamadi', 'ayoub al-hamadi')</td><td>{Frerk.Saxen, Philipp.Werner, Ayoub.Al-Hamadi}@ovgu.de
+</td></tr><tr><td>7d53678ef6009a68009d62cd07c020706a2deac3</td><td>Facial Feature Point Extraction using
+<br/>the Adaptive Mean Shape in Active Shape Model
+<br/><b>Hanyang University</b><br/>Haengdang-dong, Seongdong-gu, Seoul, South Korea
+<br/>Giheung-eup, Yongin-si, Gyeonggi-do, Seoul, Korea
+<br/><b>Samsung Advanced Institute of Technology</b></td><td>('2771795', 'Hyoung-Joon Kim', 'hyoung-joon kim')<br/>('34600044', 'Wonjun Hwang', 'wonjun hwang')<br/>('2077154', 'Seok-Cheol Kee', 'seok-cheol kee')<br/>('2982904', 'Whoi-Yul Kim', 'whoi-yul kim')<br/>('40370422', 'Hyun-Chul Kim', 'hyun-chul kim')</td><td>{hckim, khjoon}@vision.hanyang.ac.kr, wykim@hanyang.ac.kr
+<br/>{wj.hwang, sckee}@samsung.com
+</td></tr><tr><td>7d7be6172fc2884e1da22d1e96d5899a29831ad2</td><td>L2GSCI: Local to Global Seam Cutting and Integrating for
+<br/>Accurate Face Contour Extraction
+<br/><b>South China University of China</b><br/><b>South China University of China</b><br/><b>Kitware, Inc</b><br/><b>The Education University of Hong Kong</b><br/><b>South China University of China</b></td><td>('37221211', 'Yongwei Nie', 'yongwei nie')<br/>('37579534', 'Xu Cao', 'xu cao')<br/>('2792312', 'Chengjiang Long', 'chengjiang long')<br/>('2420746', 'Ping Li', 'ping li')<br/>('4882057', 'Guiqing Li', 'guiqing li')</td><td>nieyongwei@scut.edu.cn
+</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>Labeled Faces in the Wild: A Survey
+</td><td>('1714536', 'Erik Learned-Miller', 'erik learned-miller')<br/>('1799600', 'Gary Huang', 'gary huang')<br/>('2895705', 'Aruni RoyChowdhury', 'aruni roychowdhury')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td></td></tr><tr><td>7d73adcee255469aadc5e926066f71c93f51a1a5</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1283
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>7df4f96138a4e23492ea96cf921794fc5287ba72</td><td>A Jointly Learned Deep Architecture for Facial Attribute Analysis and Face
+<br/>Detection in the Wild
+<br/><b>Fudan University</b></td><td>('37391748', 'Keke He', 'keke he')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>{kkhe15, yanweifu, xyxue}@fudan.edu.cn
+</td></tr><tr><td>7d9fe410f24142d2057695ee1d6015fb1d347d4a</td><td>Facial Expression Feature Extraction Based on
+<br/>FastLBP
+<br/><b>Beijing, China</b><br/><b>Beijing, China</b><br/>facial expression
+</td><td>('1921151', 'Ya Zheng', 'ya zheng')<br/>('2780963', 'Xiuxin Chen', 'xiuxin chen')<br/>('2671173', 'Chongchong Yu', 'chongchong yu')<br/>('39681852', 'Cheng Gao', 'cheng gao')</td><td>Email: zy_lovedabao@163.com
+<br/>Email: chenxx1979@126.com, chongzhy@vip.sina.com, gcandgh@163.com
+</td></tr><tr><td>7dd578878e84337d6d0f5eb593f22cabeacbb94c</td><td>Classifiers for Driver Activity Monitoring
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Minnesota</b></td><td>('3055503', 'Harini Veeraraghavan', 'harini veeraraghavan')<br/>('32975623', 'Nathaniel Bird', 'nathaniel bird')<br/>('1734862', 'Stefan Atev', 'stefan atev')<br/>('1696163', 'Nikolaos Papanikolopoulos', 'nikolaos papanikolopoulos')</td><td>harini@cs.umn.edu bird@cs.umn.edu atev@cs.umn.edu npapas@cs.umn.edu
+</td></tr><tr><td>7dffe7498c67e9451db2d04bb8408f376ae86992</td><td>LEAR-INRIA submission for the THUMOS workshop
+<br/>LEAR, INRIA, France
+</td><td>('40465030', 'Heng Wang', 'heng wang')</td><td>firstname.lastname@inria.fr
+</td></tr><tr><td>7df268a3f4da7d747b792882dfb0cbdb7cc431bc</td><td>Semi-supervised Adversarial Learning to Generate
+<br/>Photorealistic Face Images of New Identities from 3D
+<br/>Morphable Model
+<br/><b>Imperial College London, UK</b><br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</b></td><td>('2151914', 'Baris Gecer', 'baris gecer')<br/>('48467774', 'Binod Bhattarai', 'binod bhattarai')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')</td><td>{b.gecer,b.bhattarai,tk.kim}@imperial.ac.uk,
+<br/>j.kittler@surrey.ac.uk
+</td></tr><tr><td>7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2</td><td>2106
+<br/>Linear Regression for Face Recognition
+</td><td>('2095953', 'Imran Naseem', 'imran naseem')<br/>('2444665', 'Roberto Togneri', 'roberto togneri')<br/>('1698675', 'Mohammed Bennamoun', 'mohammed bennamoun')</td><td></td></tr><tr><td>7de386bf2a1b2436c836c0cc1f1f23fccb24aad6</td><td>Finding What the Driver Does
+<br/>Final Report
+<br/>Prepared by:
+<br/>Artificial Intelligence, Robotics, and Vision Laboratory
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Minnesota</b><br/>CTS 05-03
+<br/>HUMAN-CENTERED TECHNOLOGY TO ENHANCE SAFETY AND MOBILITY
+</td><td>('3055503', 'Harini Veeraraghavan', 'harini veeraraghavan')<br/>('1734862', 'Stefan Atev', 'stefan atev')<br/>('32975623', 'Nathaniel Bird', 'nathaniel bird')<br/>('31791248', 'Paul Schrater', 'paul schrater')<br/>('40654170', 'Nilolaos Papanikolopoulos', 'nilolaos papanikolopoulos')</td><td></td></tr><tr><td>29ce6b54a87432dc8371f3761a9568eb3c5593b0</td><td>Kent Academic Repository
+<br/>Full text document (pdf)
+<br/>Citation for published version
+<br/>Yassin, DK H. PHM and Hoque, Sanaul and Deravi, Farzin (2013) Age Sensitivity of Face Recognition
+<br/> pp. 12-15.
+<br/>DOI
+<br/>https://doi.org/10.1109/EST.2013.8
+<br/>Link to record in KAR
+<br/>http://kar.kent.ac.uk/43222/
+<br/>Document Version
+<br/>Author's Accepted Manuscript
+<br/>Copyright & reuse
+<br/>Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all
+<br/>content is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions
+<br/>for further reuse of content should be sought from the publisher, author or other copyright holder.
+<br/>Versions of research
+<br/>The version in the Kent Academic Repository may differ from the final published version.
+<br/>Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the
+<br/>published version of record.
+<br/>Enquiries
+<br/>For any further enquiries regarding the licence status of this document, please contact:
+<br/>If you believe this document infringes copyright then please contact the KAR admin team with the take-down
+<br/>information provided at http://kar.kent.ac.uk/contact.html
+</td><td></td><td>researchsupport@kent.ac.uk
+</td></tr><tr><td>2914e8c62f0432f598251fae060447f98141e935</td><td><b>University of Nebraska - Lincoln</b><br/>Computer Science and Engineering: Theses,
+<br/>Dissertations, and Student Research
+<br/>Computer Science and Engineering, Department of
+<br/>8-2016
+<br/>ACTIVITY ANALYSIS OF SPECTATOR
+<br/>PERFORMER VIDEOS USING MOTION
+<br/>TRAJECTORIES
+<br/>Follow this and additional works at: http://digitalcommons.unl.edu/computerscidiss
+<br/>Part of the Computer Engineering Commons
+<br/>Timsina, Anish, "ACTIVITY ANALYSIS OF SPECTATOR PERFORMER VIDEOS USING MOTION TRAJECTORIES" (2016).
+<br/>Computer Science and Engineering: Theses, Dissertations, and Student Research. Paper 107.
+<br/>http://digitalcommons.unl.edu/computerscidiss/107
+<br/>Nebraska - Lincoln. It has been accepted for inclusion in Computer Science and Engineering: Theses, Dissertations, and Student Research by an
+</td><td>('2404944', 'Anish Timsina', 'anish timsina')</td><td>DigitalCommons@University of Nebraska - Lincoln
+<br/>University of Nebraska-Lincoln, timsina.anish@gmail.com
+<br/>This Article is brought to you for free and open access by the Computer Science and Engineering, Department of at DigitalCommons@University of
+<br/>authorized administrator of DigitalCommons@University of Nebraska - Lincoln.
+</td></tr><tr><td>292eba47ef77495d2613373642b8372d03f7062b</td><td>Deep Secure Encoding: An Application to Face Recognition
+</td><td>('39192292', 'Rohit Pandey', 'rohit pandey')<br/>('34872128', 'Yingbo Zhou', 'yingbo zhou')<br/>('1723877', 'Venu Govindaraju', 'venu govindaraju')</td><td></td></tr><tr><td>29e96ec163cb12cd5bd33bdf3d32181c136abaf9</td><td>Report No. UIUCDCS-R-2006-2748
+<br/>UILU-ENG-2006-1788
+<br/>Regularized Locality Preserving Projections with Two-Dimensional
+<br/>Discretized Laplacian Smoothing
+<br/>by
+<br/>July 2006
+</td><td>('1724421', 'Deng Cai', 'deng cai')<br/>('3945955', 'Xiaofei He', 'xiaofei he')<br/>('39639296', 'Jiawei Han', 'jiawei han')</td><td></td></tr><tr><td>29e793271370c1f9f5ac03d7b1e70d1efa10577c</td><td>International Journal of Signal Processing, Image Processing and Pattern Recognition
+<br/>Vol.6, No.5 (2013), pp.423-436
+<br/>http://dx.doi.org/10.14257/ijsip.2013.6.5.37
+<br/>Face Recognition Based on Multi-classifierWeighted Optimization
+<br/>and Sparse Representation
+<br/><b>Institute of control science and engineering</b><br/><b>University of Science and Technology Beijing</b><br/>1,2,330 Xueyuan Road, Haidian District, Beijing 100083 P. R.China
+</td><td>('11241192', 'Deng Nan', 'deng nan')<br/>('7814565', 'Zhengguang Xu', 'zhengguang xu')</td><td>1dengnan666666@163.com, 2xzg_1@263.net, 3 xiaobian@ustb.edu.cn
+</td></tr><tr><td>2902f62457fdf7e8e8ee77a9155474107a2f423e</td><td>Non-rigid 3D Shape Registration using an
+<br/>Adaptive Template
+<br/><b>University of York, UK</b></td><td>('1694260', 'Hang Dai', 'hang dai')<br/>('1737428', 'Nick Pears', 'nick pears')<br/>('32131827', 'William Smith', 'william smith')</td><td>{hd816,nick.pears,william.smith}@york.ac.uk
+</td></tr><tr><td>29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea</td><td>Parametric Dictionaries and Feature Augmentation for
+<br/>Continuous Domain Adaptation∗
+<br/>Adobe Research
+<br/>Bangalore, India
+<br/>Light
+<br/>Paolo Alto, USA
+<br/><b>University of Maryland</b><br/><b>College Park, USA</b></td><td>('35223379', 'Sumit Shekhar', 'sumit shekhar')<br/>('34711525', 'Nitesh Shroff', 'nitesh shroff')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>sshekha@umiacs.umd.edu
+<br/>nshroff@umiacs.umd.edu
+<br/>rama@umiacs.umd.edu
+</td></tr><tr><td>29c7dfbbba7a74e9aafb6a6919629b0a7f576530</td><td>Automatic Facial Expression Analysis and Emotional
+<br/>Classification
+<br/>by
+<br/>Submitted to the Department of Math and Natural Sciences
+<br/>in partial fulfillment of the requirements for the degree of a
+<br/>Diplomingenieur der Optotechnik und Bildverarbeitung (FH)
+<br/>(Diplom Engineer of Photonics and Image Processing)
+<br/>at the
+<br/><b>UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD</b><br/>Accomplished and written at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT</b><br/>October 2004
+<br/>Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Department of Math and Natural Sciences
+<br/>October 30, 2004
+<br/>Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Dr. Harald Scharfenberg
+<br/>Professor at FHD
+<br/>Thesis Supervisor
+<br/>Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>visiting scientist at MIT
+</td><td>('40163324', 'Robert Fischer', 'robert fischer')<br/>('1684626', 'Bernd Heisele', 'bernd heisele')</td><td></td></tr><tr><td>292c6b743ff50757b8230395c4a001f210283a34</td><td>Fast Violence Detection in Video
+<br/>O. Deniz1, I. Serrano1, G. Bueno1 and T-K. Kim2
+<br/><b>VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain</b><br/><b>Imperial College, South Kensington Campus, London SW7 2AZ, UK</b><br/>Keywords:
+<br/>action recognition, violence detection, fight detection
+</td><td></td><td>{oscar.deniz, ismael.serrano, gloria.bueno}@uclm.es, tk.kim@imperial.ac.uk
+</td></tr><tr><td>29fc4de6b680733e9447240b42db13d5832e408f</td><td>International Journal of Multimedia and Ubiquitous Engineering
+<br/>Vol. 10, No. 3 (2015), pp. 35-44
+<br/>http://dx.doi.org/10.14257/ijmue.2015.10.3.04
+<br/>Recognition of Facial Expressions Based on Tracking and
+<br/>Selection of Discriminative Geometric Features
+<br/><b>Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of</b><br/>Korea
+<br/><b>Chonbuk National University, Jeonju-si</b><br/>Jeollabuk-do 561-756, Rep. of Korea
+<br/><b>School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada</b></td><td>('32322842', 'Deepak Ghimire', 'deepak ghimire')<br/>('2034182', 'Joonwhoan Lee', 'joonwhoan lee')<br/>('1689656', 'Ze-Nian Li', 'ze-nian li')<br/>('1682436', 'Sunghwan Jeong', 'sunghwan jeong')<br/>('1937680', 'Hyo Sub Choi', 'hyo sub choi')</td><td>deepak@keti.re.kr, chlee@jbnu.ac.kr, li@sfu.ca, shjeong@keti.re.kr,
+<br/>shpark@keti.re.kr, hschoi@keti.re.kr
+</td></tr><tr><td>29c1f733a80c1e07acfdd228b7bcfb136c1dff98</td><td></td><td></td><td></td></tr><tr><td>29f27448e8dd843e1c4d2a78e01caeaea3f46a2d</td><td></td><td></td><td></td></tr><tr><td>294d1fa4e1315e1cf7cc50be2370d24cc6363a41</td><td>2008 SPIE Digital Library -- Subscriber Archive Copy
+</td><td></td><td></td></tr><tr><td>29d414bfde0dfb1478b2bdf67617597dd2d57fc6</td><td>Multidim Syst Sign Process (2010) 21:213–229
+<br/>DOI 10.1007/s11045-009-0099-y
+<br/>Perfect histogram matching PCA for face recognition
+<br/>Received: 10 August 2009 / Revised: 21 November 2009 / Accepted: 29 December 2009 /
+<br/>Published online: 14 January 2010
+<br/>© Springer Science+Business Media, LLC 2010
+</td><td>('2413241', 'Ana-Maria Sevcenco', 'ana-maria sevcenco')</td><td></td></tr><tr><td>2912c3ea67678a1052d7d5cbe734a6ad90fc360e</td><td>Facial Feature Detection using a Virtual Structuring
+<br/>Element
+<br/>Intelligent Systems Lab Amsterdam,
+<br/><b>University of Amsterdam</b><br/>Kruislaan 403, 1098 SJ Amsterdam, The Netherlands
+<br/>Keywords: Feature Detection, Active Appearance Models
+</td><td>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td>rvalenti@science.uva.nl
+<br/>nicu@science.uva.nl
+<br/>gevers@science.uva.nl
+</td></tr><tr><td>29f4ac49fbd6ddc82b1bb697820100f50fa98ab6</td><td>The Benefits and Challenges of Collecting Richer Object Annotations
+<br/>Department of Computer Science
+<br/><b>University of Illinois Urbana Champaign</b></td><td>('2831988', 'Ian Endres', 'ian endres')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('2433269', 'Derek Hoiem', 'derek hoiem')<br/>('1744452', 'David A. Forsyth', 'david a. forsyth')</td><td>{iendres2,afarhad2,dhoiem,daf}@uiuc.edu
+</td></tr><tr><td>2910fcd11fafee3f9339387929221f4fc1160973</td><td>Evaluating Open-Universe Face Identification on the Web
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</b></td><td>('16131262', 'Enrique G. Ortiz', 'enrique g. ortiz')</td><td>brian@briancbecker.com and eortiz@cs.ucf.edu
+</td></tr><tr><td>29479bb4fe8c04695e6f5ae59901d15f8da6124b</td><td>Multiple Instance Learning for Labeling Faces in
+<br/>Broadcasting News Video
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+</td><td>('38936351', 'Jun Yang', 'jun yang')<br/>('2005689', 'Rong Yan', 'rong yan')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td>juny@cs.cmu.edu
+<br/>yanrong@cs.cmu.edu
+<br/>alex+@cs.cmu.edu
+</td></tr><tr><td>290136947fd44879d914085ee51d8a4f433765fa</td><td>On a Taxonomy of Facial Features
+</td><td>('1817623', 'Brendan Klare', 'brendan klare')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>2957715e96a18dbb5ed5c36b92050ec375214aa6</td><td>Improving Face Attribute Detection with Race and Gender Diversity
+<br/>InclusiveFaceNet:
+</td><td>('3766392', 'Hee Jung Ryu', 'hee jung ryu')</td><td></td></tr><tr><td>291f527598c589fb0519f890f1beb2749082ddfd</td><td>Seeing People in Social Context: Recognizing
+<br/>People and Social Relationships
+<br/><b>University of Illinois at Urbana-Champaign, Urbana, IL</b><br/><b>Kodak Research Laboratories, Rochester, NY</b></td><td>('22804340', 'Gang Wang', 'gang wang')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')</td><td></td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MS-Celeb-1M: A Dataset and Benchmark for
+<br/>Large-Scale Face Recognition
+<br/>Microsoft Research
+</td><td>('3133575', 'Yandong Guo', 'yandong guo')<br/>('1684635', 'Lei Zhang', 'lei zhang')<br/>('1689532', 'Yuxiao Hu', 'yuxiao hu')<br/>('1722627', 'Xiaodong He', 'xiaodong he')<br/>('1800422', 'Jianfeng Gao', 'jianfeng gao')</td><td>{yandong.guo,leizhang,yuxiao.hu,xiaohe,jfgao}@microsoft.com
+</td></tr><tr><td>29c340c83b3bbef9c43b0c50b4d571d5ed037cbd</td><td>Stacked Dense U-Nets with Dual
+<br/>Transformers for Robust Face Alignment
+<br/>https://github.com/deepinsight/insightface
+<br/>https://jiankangdeng.github.io/
+<br/>https://ibug.doc.ic.ac.uk/people/nxue
+<br/>Stefanos Zafeiriou2
+<br/>https://wp.doc.ic.ac.uk/szafeiri/
+<br/>1 InsightFace
+<br/>Shanghai, China
+<br/>2 IBUG
+<br/><b>Imperial College London</b><br/>London, UK
+</td><td>('3007274', 'Jia Guo', 'jia guo')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('3007274', 'Jia Guo', 'jia guo')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('4091869', 'Niannan Xue', 'niannan xue')</td><td></td></tr><tr><td>297d3df0cf84d24f7efea44f87c090c7d9be4bed</td><td>Appearance-based 3-D Face Recognition from
+<br/>Video
+<br/><b>University of Maryland, Center for Automation Research</b><br/>A.V. Williams Building
+<br/><b>College Park, MD</b><br/><b>The Robotics Institute, Carnegie Mellon University</b><br/>5000 Forbes Avenue, Pittsburgh, PA 15213
+</td><td>('33731953', 'Ralph Gross', 'ralph gross')<br/>('40039594', 'Simon Baker', 'simon baker')</td><td></td></tr><tr><td>29b86534d4b334b670914038c801987e18eb5532</td><td>Total Cluster: A person agnostic clustering method for
+<br/>broadcast videos
+<br/><b>Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany</b><br/><b>Visual Geometry Group, University of Oxford, UK</b><br/><b>Center for Machine Vision Research, University of Oulu, Finland</b></td><td>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('2827962', 'Esa Rahtu', 'esa rahtu')<br/>('1741116', 'Eric Sommerlade', 'eric sommerlade')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>tapaswi@kit.edu, omkar@robots.ox.ac.uk, erahtu@ee.oulu.fi
+<br/>eric@robots.ox.ac.uk, rainer.stiefelhagen@kit.edu, az@robots.ox.ac.uk
+</td></tr><tr><td>29631ca6cff21c9199c70bcdbbcd5f812d331a96</td><td>RESEARCH ARTICLE
+<br/>Error Rates in Users of Automatic Face
+<br/>Recognition Software
+<br/><b>School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology</b><br/><b>The University of Sydney, Sydney, Australia</b></td><td>('40404556', 'David White', 'david white')<br/>('29329747', 'James D. Dunn', 'james d. dunn')<br/>('5016966', 'Alexandra C. Schmid', 'alexandra c. schmid')<br/>('3086646', 'Richard I. Kemp', 'richard i. kemp')</td><td>* david.white@unsw.edu.au
+</td></tr><tr><td>2965d092ed72822432c547830fa557794ae7e27b</td><td>Improving Representation and Classification of Image and
+<br/>Video Data for Surveillance Applications
+<br/>BSc(Biol), MSc(Biol), MSc(CompSc)
+<br/>A thesis submitted for the degree of Doctor of Philosophy at
+<br/><b>The University of Queensland in</b><br/>School of Information Technology and Electrical Engineering
+</td><td>('2706642', 'Andres Sanin', 'andres sanin')</td><td></td></tr><tr><td>2983efadb1f2980ab5ef20175f488f77b6f059d7</td><td>ch04_88815.QXP 12/23/08 3:36 PM Page 53
+<br/>◆ 4 ◆
+<br/>EMOTION IN HUMAN–COMPUTER INTERACTION
+<br/><b>Stanford University</b><br/>Understanding Emotion . . . . . . . . . . . . . . . . . . . . . . . . . . 54
+<br/>Distinguishing Emotion from Related Constructs . . . . 55
+<br/>Mood . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 55
+<br/>Sentiment . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 56
+<br/>Effects of Affect . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 56
+<br/>Attention . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 56
+<br/>Memory . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 57
+<br/>Performance . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 57
+<br/>Assessment . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 57
+<br/>Causes of Emotion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 58
+<br/>Needs and Goals . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 58
+<br/>Appraisal Theories . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 59
+<br/>Contagion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 59
+<br/>Moods and Sentiments . . . . . . . . . . . . . . . . . . . . . . . . . . . . 59
+<br/>Previous Emotional State . . . . . . . . . . . . . . . . . . . . . . . . . . 59
+<br/>Causes of Mood . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
+<br/>Contagion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
+<br/>Color . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
+<br/>Other Effects . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
+<br/>Measuring Affect . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
+<br/>Neurological Responses . . . . . . . . . . . . . . . . . . . . . . . . . . 61
+<br/>Autonomic Activity . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 61
+<br/>Facial Expression . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 61
+<br/>Voice . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 62
+<br/>Self-Report Measures . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 62
+<br/>Affect Recognition by Users . . . . . . . . . . . . . . . . . . . . . . . 63
+<br/>Open Questions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 64
+<br/>1. With which emotion should HCI designers
+<br/>be most concerned? . . . . . . . . . . . . . . . . . . . . . . . . . 64
+<br/>2. When and how should interfaces attempt to
+<br/>directly address users’ emotions and basic
+<br/>needs (vs. application-specific goals)? . . . . . . . . . . . . 64
+<br/>3. How accurate must emotion recognition be
+<br/>to be useful as an interface technique? . . . . . . . . . . . 64
+<br/>4. When and how should users be informed
+<br/>that their affective states are being monitored
+<br/>and adapted to? . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 64
+<br/>5. How does emotion play out in computer-
+<br/>mediated communication (CMC)? . . . . . . . . . . . . . . 64
+<br/>Conclusion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 64
+<br/>Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 64
+<br/>References . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 65
+<br/>53
+</td><td>('2739604', 'Scott Brave', 'scott brave')<br/>('2029850', 'Clifford Nass', 'clifford nass')</td><td></td></tr><tr><td>2911e7f0fb6803851b0eddf8067a6fc06e8eadd6</td><td>Joint Fine-Tuning in Deep Neural Networks
+<br/>for Facial Expression Recognition
+<br/>School of Electrical Engineering
+<br/><b>Korea Advanced Institute of Science and Technology</b></td><td>('1800903', 'Heechul Jung', 'heechul jung')<br/>('3249661', 'Junho Yim', 'junho yim')</td><td>{heechul, haeng, junho.yim, sunny0414, junmo.kim}@kaist.ac.kr
+</td></tr><tr><td>2921719b57544cfe5d0a1614d5ae81710ba804fa</td><td>Face Recognition Enhancement Based on Image
+<br/>File Formats and Wavelet De-noising
+<br/>
+</td><td>('4050987', 'Jieqing Tan', 'jieqing tan')<br/>('40160496', 'Zhengfeng Hou', 'zhengfeng hou')</td><td></td></tr><tr><td>29a013b2faace976f2c532533bd6ab4178ccd348</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Hierarchical Manifold Learning With Applications
+<br/>to Supervised Classification for High-Resolution
+<br/>Remotely Sensed Images
+</td><td>('7192623', 'Hong-Bing Huang', 'hong-bing huang')<br/>('3239427', 'Hong Huo', 'hong huo')<br/>('1680725', 'Tao Fang', 'tao fang')</td><td></td></tr><tr><td>29921072d8628544114f68bdf84deaf20a8c8f91</td><td>Multi-Task Curriculum Transfer Deep Learning of Clothing Attributes
+<br/><b>School of EECS, Queen Mary University of London, UK</b></td><td>('40204089', 'Qi Dong', 'qi dong')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2171228', 'Xiatian Zhu', 'xiatian zhu')</td><td>{q.dong, s.gong, xiatian.zhu}@qmul.ac.uk
+</td></tr><tr><td>2969f822b118637af29d8a3a0811ede2751897b5</td><td>Cascaded Shape Space Pruning for Robust Facial Landmark Detection
+<br/>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b></td><td>('1874505', 'Xiaowei Zhao', 'xiaowei zhao')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{xiaowei.zhao,shiguang.shan,xiujuan.chai,xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>29756b6b16d7b06ea211f21cdaeacad94533e8b4</td><td>Thresholding Approach based on GPU for Facial
+<br/>Expression Recognition
+<br/>1 Benemérita Universidad Autónoma de Puebla, Faculty of Computer Science, Puebla, México
+<br/>2Instituto Tecnológico de Puebla, Puebla, México
+</td><td>('4348305', 'Jesús García-Ramírez', 'jesús garcía-ramírez')<br/>('3430302', 'Adolfo Aguilar-Rico', 'adolfo aguilar-rico')</td><td>gr_jesus@outlook.com,{aolvera,iolmos}@cs.buap.mx
+<br/>{kremhilda,adolforico2}@gmail.com
+</td></tr><tr><td>293193d24d5c4d2975e836034bbb2329b71c4fe7</td><td>Building a Corpus of Facial Expressions
+<br/>for Learning-Centered Emotions
+<br/>Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+<br/>Mexico
+</td><td>('1744658', 'María Lucía Barrón-Estrada', 'maría lucía barrón-estrada')<br/>('38814197', 'Bianca Giovanna Aispuro-Medina', 'bianca giovanna aispuro-medina')<br/>('38906263', 'Elvia Minerva Valencia-Rodríguez', 'elvia minerva valencia-rodríguez')<br/>('38797488', 'Ana Cecilia Lara-Barrera', 'ana cecilia lara-barrera')</td><td>{lbarron, rzatarain, m06170904, m95170906, m15171452} @itculiacan.edu.mx
+</td></tr><tr><td>294bd7eb5dc24052237669cdd7b4675144e22306</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2013): 4.438
+<br/>Automatic Face Annotation
+<br/>
+<br/><b>M.Tech Student, Mount Zion College of Engineering, Pathanamthitta, Kerala, India</b></td><td></td><td></td></tr><tr><td>2988f24908e912259d7a34c84b0edaf7ea50e2b3</td><td>A Model of Brightness Variations Due to
+<br/>Illumination Changes and Non-rigid Motion
+<br/>Using Spherical Harmonics
+<br/>Jos´e M. Buenaposada
+<br/>Dep. Ciencias de la Computaci´on,
+<br/>U. Rey Juan Carlos, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+<br/>Inst. for Systems and Robotics
+<br/>Inst. Superior T´ecnico, Portugal
+<br/>http://www.isr.ist.utl.pt/~adb
+<br/>Enrique Mu˜noz
+<br/>Facultad de Inform´atica,
+<br/>U. Complutense de Madrid, Spain
+<br/>Dep. de Inteligencia Artificial,
+<br/>U. Polit´ecnica de Madrid, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+<br/>http://www.dia.fi.upm.es/~pcr
+</td><td>('1714730', 'Alessio Del Bue', 'alessio del bue')<br/>('1778998', 'Luis Baumela', 'luis baumela')</td><td></td></tr><tr><td>29156e4fe317b61cdcc87b0226e6f09e416909e0</td><td></td><td></td><td></td></tr><tr><td>29f0414c5d566716a229ab4c5794eaf9304d78b6</td><td>Hindawi Publishing Corporation
+<br/>EURASIP Journal on Advances in Signal Processing
+<br/>Volume 2008, Article ID 579416, 17 pages
+<br/>doi:10.1155/2008/579416
+<br/>Review Article
+<br/>Biometric Template Security
+<br/><b>Michigan State University, 3115 Engineering Building</b><br/>East Lansing, MI 48824, USA
+<br/>Received 2 July 2007; Revised 28 September 2007; Accepted 4 December 2007
+<br/>Recommended by Arun Ross
+<br/>Biometric recognition offers a reliable solution to the problem of user authentication in identity management systems. With the
+<br/>widespread deployment of biometric systems in various applications, there are increasing concerns about the security and privacy
+<br/>of biometric technology. Public acceptance of biometrics technology will depend on the ability of system designers to demonstrate
+<br/>that these systems are robust, have low error rates, and are tamper proof. We present a high-level categorization of the various
+<br/>vulnerabilities of a biometric system and discuss countermeasures that have been proposed to address these vulnerabilities. In par-
+<br/>ticular, we focus on biometric template security which is an important issue because, unlike passwords and tokens, compromised
+<br/>biometric templates cannot be revoked and reissued. Protecting the template is a challenging task due to intrauser variability in the
+<br/>acquired biometric traits. We present an overview of various biometric template protection schemes and discuss their advantages
+<br/>and limitations in terms of security, revocability, and impact on matching accuracy. A template protection scheme with provable
+<br/>security and acceptable recognition performance has thus far remained elusive. Development of such a scheme is crucial as bio-
+<br/>metric systems are beginning to proliferate into the core physical and information infrastructure of our society.
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>1.
+<br/>INTRODUCTION
+<br/>A reliable identity management system is urgently needed in
+<br/>order to combat the epidemic growth in identity theft and to
+<br/>meet the increased security requirements in a variety of ap-
+<br/>plications ranging from international border crossings to se-
+<br/>curing information in databases. Establishing the identity of
+<br/>a person is a critical task in any identity management system.
+<br/>Surrogate representations of identity such as passwords and
+<br/>ID cards are not sufficient for reliable identity determination
+<br/>because they can be easily misplaced, shared, or stolen. Bio-
+<br/>metric recognition is the science of establishing the identity
+<br/>of a person using his/her anatomical and behavioral traits.
+<br/>Commonly used biometric traits include fingerprint, face,
+<br/>iris, hand geometry, voice, palmprint, handwritten signa-
+<br/>tures, and gait (see Figure 1). Biometric traits have a number
+<br/>of desirable properties with respect to their use as an authen-
+<br/>tication token, namely, reliability, convenience, universality,
+<br/>and so forth. These characteristics have led to the widespread
+<br/>deployment of biometric authentication systems. But there
+<br/>are still some issues concerning the security of biometric
+<br/>recognition systems that need to be addressed in order to en-
+<br/>sure the integrity and public acceptance of these systems.
+<br/>There are five major components in a generic biomet-
+<br/>ric authentication system, namely, sensor, feature extrac-
+<br/>tor, template database, matcher, and decision module (see
+<br/>Figure 2). Sensor is the interface between the user and the
+<br/>authentication system and its function is to scan the bio-
+<br/>metric trait of the user. Feature extraction module processes
+<br/>the scanned biometric data to extract the salient information
+<br/>(feature set) that is useful in distinguishing between differ-
+<br/>ent users. In some cases, the feature extractor is preceded
+<br/>by a quality assessment module which determines whether
+<br/>the scanned biometric trait is of sufficient quality for fur-
+<br/>ther processing. During enrollment, the extracted feature
+<br/>set is stored in a database as a template (XT) indexed by
+<br/>the user’s identity information. Since the template database
+<br/>could be geographically distributed and contain millions of
+<br/>records (e.g., in a national identification system), maintain-
+<br/>ing its security is not a trivial task. The matcher module is
+<br/>usually an executable program, which accepts two biomet-
+<br/>ric feature sets XT and XQ (from template and query, resp.)
+<br/>as inputs, and outputs a match score (S) indicating the sim-
+<br/>ilarity between the two sets. Finally, the decision module
+<br/>makes the identity decision and initiates a response to the
+<br/>query.
+</td><td>('6680444', 'Anil K. Jain', 'anil k. jain')<br/>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')<br/>('2743820', 'Abhishek Nagar', 'abhishek nagar')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>Correspondence should be addressed to Karthik Nandakumar, nandakum@cse.msu.edu
+</td></tr><tr><td>293ade202109c7f23637589a637bdaed06dc37c9</td><td></td><td></td><td></td></tr><tr><td>7c61d21446679776f7bdc7afd13aedc96f9acac1</td><td>Hierarchical Label Inference for Video Classification
+<br/><b>Simon Fraser University</b><br/><b>Simon Fraser University</b><br/><b>Simon Fraser University</b></td><td>('3079079', 'Nelson Nauata', 'nelson nauata')<br/>('2847110', 'Jonathan Smith', 'jonathan smith')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>nnauata@sfu.ca
+<br/>jws4@sfu.ca
+<br/>mori@cs.sfu.ca
+</td></tr><tr><td>7cee802e083c5e1731ee50e731f23c9b12da7d36</td><td>2B3C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional
+<br/>Networks
+<br/>Department of Electronics and Communication Engineering and
+<br/><b>Computer Vision Group, L. D. College of Engineering, Ahmedabad, India</b></td><td>('23922616', 'Vandit Gajjar', 'vandit gajjar')</td><td> gajjar.vandit.381@ldce.ac.in
+</td></tr><tr><td>7c47da191f935811f269f9ba3c59556c48282e80</td><td>Robust Eye Centers Localization
+<br/>with Zero–Crossing Encoded Image Projections
+<br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b><br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b><br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b></td><td>('2143956', 'Laura Florea', 'laura florea')<br/>('2760434', 'Corneliu Florea', 'corneliu florea')<br/>('2905899', 'Constantin Vertan', 'constantin vertan')</td><td>laura.florea@upb.ro
+<br/>corneliu.florea@upb.ro
+<br/>constantin.vertan@upb.ro
+</td></tr><tr><td>7c7ab59a82b766929defd7146fd039b89d67e984</td><td>Improving Multiview Face Detection with
+<br/>Multi-Task Deep Convolutional Neural Networks
+<br/>Microsoft Research
+<br/>One Microsoft Way, Redmond WA 98052
+</td><td>('1706673', 'Cha Zhang', 'cha zhang')<br/>('1809184', 'Zhengyou Zhang', 'zhengyou zhang')</td><td></td></tr><tr><td>7ca337735ec4c99284e7c98f8d61fb901dbc9015</td><td>Proceedings of the 8th International
+<br/>IEEE Conference on Intelligent Transportation Systems
+<br/>Vienna, Austria, September 13-16, 2005
+<br/>TC4.2
+<br/>Driver Activity Monitoring through Supervised and Unsupervised Learning
+<br/>Harini Veeraraghavan Stefan Atev Nathaniel Bird Paul Schrater Nikolaos Papanikolopoulos†
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Minnesota</b></td><td></td><td>{harini,atev,bird,schrater,npapas}@cs.umn.edu
+</td></tr><tr><td>7c1cfab6b60466c13f07fe028e5085a949ec8b30</td><td>Deep Feature Consistent Variational Autoencoder
+<br/><b>University of Nottingham, Ningbo China</b><br/><b>Shenzhen University, Shenzhen China</b><br/><b>University of Nottingham, Ningbo China</b><br/><b>University of Nottingham, Ningbo China</b></td><td>('3468964', 'Xianxu Hou', 'xianxu hou')<br/>('1687690', 'Linlin Shen', 'linlin shen')<br/>('39508183', 'Ke Sun', 'ke sun')<br/>('1698461', 'Guoping Qiu', 'guoping qiu')</td><td>xianxu.hou@nottingham.edu.cn
+<br/>llshen@szu.edu.cn
+<br/>ke.sun@nottingham.edu.cn
+<br/>guoping.qiu@nottingham.edu.cn
+</td></tr><tr><td>7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f</td><td>News image annotation on a large parallel text-image corpus
+<br/>Universit´e de Rennes 1/IRISA, CNRS/IRISA, INRIA Rennes-Bretagne Atlantique
+<br/>Campus de Beaulieu
+<br/>35042 Rennes Cedex, France
+</td><td>('1694537', 'Pierre Tirilly', 'pierre tirilly')<br/>('1735666', 'Vincent Claveau', 'vincent claveau')<br/>('2436627', 'Patrick Gros', 'patrick gros')</td><td>ptirilly@irisa.fr, vclaveau@irisa.fr, pgros@inria.fr
+</td></tr><tr><td>7c17280c9193da3e347416226b8713b99e7825b8</td><td>VideoCapsuleNet: A Simplified Network for Action
+<br/>Detection
+<br/>Kevin Duarte
+<br/>Yogesh S Rawat
+<br/>Center for Research in Computer Vision
+<br/><b>University of Central Florida</b><br/>Orlando, FL 32816
+</td><td>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>kevin_duarte@knights.ucf.edu
+<br/>yogesh@crcv.ucf.edu
+<br/>shah@crcv.ucf.edu
+</td></tr><tr><td>7cffcb4f24343a924a8317d560202ba9ed26cd0b</td><td>The Unconstrained Ear Recognition Challenge
+<br/><b>University of Ljubljana</b><br/>Ljubljana, Slovenia
+<br/>IIT Kharagpur
+<br/>Kharagpur, India
+<br/><b>University of Colorado Colorado Springs</b><br/>Colorado Springs, CO, USA
+<br/><b>Islamic Azad University</b><br/>Qazvin, Iran
+<br/><b>Imperial College London</b><br/>London, UK
+<br/>ITU Department of Computer Engineering
+<br/>Istanbul, Turkey
+</td><td>('34862665', 'Peter Peer', 'peter peer')<br/>('3110004', 'Anjith George', 'anjith george')<br/>('2173052', 'Adil Ahmad', 'adil ahmad')<br/>('39000630', 'Elshibani Omar', 'elshibani omar')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')<br/>('3062107', 'Reza Safdari', 'reza safdari')<br/>('47943220', 'Yuxiang Zhou', 'yuxiang zhou')<br/>('23981209', 'Dogucan Yaman', 'dogucan yaman')</td><td>ziga.emersic@fri.uni-lj.si
+</td></tr><tr><td>7c0a6824b556696ad7bdc6623d742687655852db</td><td>18th Telecommunications forum TELFOR 2010
+<br/>Serbia, Belgrade, November 23-25, 2010.
+<br/>MPCA+DATER: A Novel Approach for Face
+<br/>Recognition Based on Tensor Objects
+<br/>Ali. A. Shams Baboli, Member, IEEE, G. Rezai-rad, Member, IEEE, Aref. Shams Baboli
+</td><td></td><td></td></tr><tr><td>7c95449a5712aac7e8c9a66d131f83a038bb7caa</td><td>This is an author produced version of Facial first impressions from another angle: How
+<br/>social judgements are influenced by changeable and invariant facial properties.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/102935/
+<br/>Article:
+<br/>Rhodes (2017) Facial first impressions from another angle: How social judgements are
+<br/>influenced by changeable and invariant facial properties. British journal of psychology. pp.
+<br/>397-415. ISSN 0007-1269
+<br/>https://doi.org/10.1111/bjop.12206
+<br/>promoting access to
+<br/>White Rose research papers
+<br/>http://eprints.whiterose.ac.uk/
+</td><td>('16854522', 'Clare', 'clare')<br/>('9384336', 'Young', 'young')</td><td>eprints@whiterose.ac.uk
+</td></tr><tr><td>7c4c442e9c04c6b98cd2aa221e9d7be15efd8663</td><td>Classifier Learning with Hidden Information
+<br/><b>ECSE, Rensselaer Polytechnic Institute, Troy, NY</b></td><td>('2860279', 'Ziheng Wang', 'ziheng wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>wangz10@rpi.edu
+<br/>jiq@rpi.edu
+</td></tr><tr><td>7c3e09e0bd992d3f4670ffacb4ec3a911141c51f</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Transferring Object-Scene Convolutional Neural Networks for
+<br/>Event Recognition in Still Images
+<br/>Received: date / Accepted: date
+</td><td>('33345248', 'Limin Wang', 'limin wang')</td><td></td></tr><tr><td>7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d</td><td>Dissimilarity-Based Classifications in Eigenspaces(cid:63)
+<br/><b>Myongji University, Yongin, 449-728 South</b><br/><b>Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of</b></td><td>('34959719', 'Sang-Woon Kim', 'sang-woon kim')<br/>('1747298', 'Robert P. W. Duin', 'robert p. w. duin')</td><td>Korea. e-mail : kimsw@mju.ac.kr
+<br/>Technology, The Netherlands. e-mail : r.p.w.duin@tudelft.nl
+</td></tr><tr><td>7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b</td><td>SUPERVISED HEAT KERNEL LPP
+<br/>METHOD FOR FACE RECOGNITION
+<br/><b>Utah State University, Logan UT</b></td><td>('1725739', 'Xiaojun Qi', 'xiaojun qi')</td><td>cryshan@cc.usu.edu and xqi@cc.usu.edu
+</td></tr><tr><td>7c9622ad1d8971cd74cc9e838753911fe27ccac4</td><td>Representation Learning with Smooth
+<br/>Autoencoder
+<br/>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b></td><td>('2582309', 'Kongming Liang', 'kongming liang')<br/>('1783542', 'Hong Chang', 'hong chang')<br/>('10338111', 'Zhen Cui', 'zhen cui')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{kongming.liang, hong.chang, zhen.cui, shiguang.shan, xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>7c2c9b083817f7a779d819afee383599d2e97ed8</td><td>Disentangling Motion, Foreground and Background Features in Videos
+<br/><b>Beihang University</b><br/>Beijing, China
+<br/>V´ıctor Campos
+<br/>Xavier Giro-i-Nieto
+<br/>Barcelona Supercomputing Center
+<br/>Universitat Politecnica de Catalunya
+<br/>Barcelona, Catalonia/Spain
+<br/>Barcelona, Catalonia/Spain
+<br/>Barcelona Supercomputing Center
+<br/>Barcelona, Catalonia/Spain
+<br/>Cristian Canton Ferrer
+<br/>Facebook
+<br/>Seattle (WA), USA
+</td><td>('10668384', 'Xunyu Lin', 'xunyu lin')<br/>('1711068', 'Jordi Torres', 'jordi torres')</td><td>xunyulin2017@outlook.com
+<br/>victor.campos@bsc.es
+<br/>xavier.giro@upc.edu
+<br/>jordi.torres@bsc.es
+<br/>ccanton@fb.com
+</td></tr><tr><td>7c45339253841b6f0efb28c75f2c898c79dfd038</td><td>Unsupervised Joint Alignment of Complex Images
+<br/><b>University of Massachusetts Amherst</b><br/>Amherst, MA
+<br/>Erik Learned-Miller
+</td><td>('3219900', 'Gary B. Huang', 'gary b. huang')<br/>('2246870', 'Vidit Jain', 'vidit jain')</td><td>fgbhuang,vidit,elmg@cs.umass.edu
+</td></tr><tr><td>7c825562b3ff4683ed049a372cb6807abb09af2a</td><td>Finding Tiny Faces
+<br/>Supplementary Materials
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>1. Error analysis
+<br/>Quantitative analysis We plot the distribution of error modes among false positives in Fig. 1 and the impact of object
+<br/>characteristics on detection performance in Fig. 2 and Fig. 3.
+<br/>Qualitative analysis We show top 20 scoring false positives in Fig. 4.
+<br/>2. Experimental details
+<br/>Multi-scale features Inspired by the way [3] trains “FCN-8s at-once”, we scale the learning rate of predictor built on
+<br/>top of each layer by a fixed constant. Specifically, we use a scaling factor of 1 for res4, 0.1 for res3, and 0.01 for res2.
+<br/>One more difference between our model and [3] is that: instead of predicting at original resolution, our model predicts
+<br/>at the resolution of res3 feature (downsampled by 8X comparing to input resolution).
+<br/>Input sampling We first randomly re-scale the input image by 0.5X, 1X, or 2X. Then we randomly crop a 500x500
+<br/>image region out of the re-scaled input. We pad with average RGB value (prior to average subtraction) when cropping
+<br/>outside image boundary.
+<br/>Border cases Similar to [2], we ignore gradients coming from heatmap locations whose detection windows cross the
+<br/>image boundary. The only difference is, we treat padded average pixels (as described in Input sampling) as outside
+<br/>image boundary as well.
+<br/>Online hard mining and balanced sampling We apply hard mining on both positive and negative examples. Our
+<br/>implementation is simpler yet still effective comparing to [4]. We set a small threshold (0.03) on classification loss
+<br/>to filter out easy locations. Then we sample at most 128 locations for both positive and negative (respectively) from
+<br/>remaining ones whose losses are above the threshold. We compare training with and without hard mining on validation
+<br/>performance in Table 1.
+<br/>Loss function Our loss function is formulated in the same way as [2]. Note that we also use Huber loss as the loss
+<br/>function for bounding box regression.
+<br/>Bounding box regression Our bounding box regression is formulated as [2] and trained jointly with classification
+<br/>using stochastic gradient descent. We compare between testing with and without regression in terms of performance
+<br/>on WIDER FACE validation set.
+</td><td>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>{peiyunh,deva}@cs.cmu.edu
+</td></tr><tr><td>7c7b0550ec41e97fcfc635feffe2e53624471c59</td><td>1051-4651/14 $31.00 © 2014 IEEE
+<br/>DOI 10.1109/ICPR.2014.124
+<br/>660
+</td><td></td><td></td></tr><tr><td>7ce03597b703a3b6754d1adac5fbc98536994e8f</td><td></td><td></td><td></td></tr><tr><td>7c36afc9828379de97f226e131390af719dbc18d</td><td>Unsupervised Face-Name Association
+<br/>via Commute Distance
+<br/>1Zhejiang Provincial Key Laboratory of Service Robot
+<br/><b>College of Computer Science, Zhejiang University, Hangzhou, China</b><br/><b>State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China</b></td><td>('4140420', 'Jiajun Bu', 'jiajun bu')<br/>('40155478', 'Bin Xu', 'bin xu')<br/>('2484982', 'Chenxia Wu', 'chenxia wu')<br/>('2588203', 'Chun Chen', 'chun chen')<br/>('1704030', 'Jianke Zhu', 'jianke zhu')<br/>('1724421', 'Deng Cai', 'deng cai')<br/>('3945955', 'Xiaofei He', 'xiaofei he')</td><td>{bjj,xbzju,chenxiawu,chenc,jkzhu}@zju.edu.cn
+<br/>{dengcai,xiaofeihe}@cad.zju.edu.cn
+</td></tr><tr><td>7c119e6bdada2882baca232da76c35ae9b5277f8</td><td>Facial Expression Recognition Using Embedded
+<br/>Hidden Markov Model
+<br/><b>Intelligence Computing Research Center</b><br/>HIT Shenzhen Graduate School
+<br/>Shenzhen, China
+</td><td>('24233679', 'Languang He', 'languang he')<br/>('1747105', 'Xuan Wang', 'xuan wang')<br/>('10106946', 'Chenglong Yu', 'chenglong yu')<br/>('38700402', 'Kun Wu', 'kun wu')</td><td>{telent, wangxuan, ycl, wukun} @cs.hitsz.edu.cn
+</td></tr><tr><td>7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d</td><td>Stereo Matching for Unconstrained Face Recognition
+<br/>Ph.D. Proposal
+<br/><b>University of Maryland</b><br/>Department of Computer Science
+<br/><b>College Park, MD</b><br/>May 10, 2009
+</td><td>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')</td><td>carlos@cs.umd.edu
+</td></tr><tr><td>7c42371bae54050dbbf7ded1e7a9b4109a23a482</td><td>The International Arab Journal of Information Technology, Vol. 12, No. 2, March 2015 183
+<br/>Optimized Features Selection using Hybrid PSO-
+<br/>GA for Multi-View Gender Classification
+<br/><b>Foundation University Rawalpindi Campus, Pakistan</b><br/><b>University of Central Punjab, Pakistan</b><br/><b>University of Dammam, Saudi Arabia</b><br/>4Department of Computer Science, SZABIST, Pakistan
+</td><td>('1723986', 'Muhammad Nazir', 'muhammad nazir')<br/>('11616523', 'Muhammad Khan', 'muhammad khan')</td><td></td></tr><tr><td>7c953868cd51f596300c8231192d57c9c514ae17</td><td>Detecting and Aligning Faces by Image Retrieval
+<br/>Zhe Lin2
+<br/><b>Northwestern University</b><br/>2Adobe Research
+<br/>2145 Sheridan Road, Evanston, IL 60208
+<br/>345 Park Ave, San Jose, CA 95110
+</td><td>('1720987', 'Xiaohui Shen', 'xiaohui shen')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('1736695', 'Ying Wu', 'ying wu')</td><td>{xsh835, yingwu}@eecs.northwestern.edu
+<br/>{zlin, jbrandt}@adobe.com
+</td></tr><tr><td>7c6dbaebfe14878f3aee400d1378d90d61373921</td><td>A Novel Biometric Feature Extraction Algorithm using Two
+<br/>Dimensional Fisherface in 2DPCA subspace for Face Recognition
+<br/>School of Electrical, Electronic and Computer Engineering
+<br/><b>University of Newcastle</b><br/>Newcastle upon Tyne, NE1 7RU
+<br/>UNITED KINDOM
+</td><td>('3156162', 'R. M. MUTELO', 'r. m. mutelo')</td><td></td></tr><tr><td>7c9a65f18f7feb473e993077d087d4806578214e</td><td>SpringerLink - Zeitschriftenbeitrag
+<br/>http://www.springerlink.com/content/93hr862660nl1164/?p=abe5352...
+<br/>Deutsch
+<br/>Deutsch
+<br/>Go
+<br/>Vorherige Beitrag Nächste Beitrag
+<br/>Beitrag markieren
+<br/>In den Warenkorb legen
+<br/>Zu gespeicherten Artikeln
+<br/>hinzufügen
+<br/>Permissions & Reprints
+<br/>Diesen Artikel empfehlen
+<br/>Ergebnisse
+<br/>finden
+<br/>Erweiterte Suche
+<br/>Go
+<br/>im gesamten Inhalt
+<br/>in dieser Zeitschrift
+<br/>in diesem Heft
+<br/>Diesen Beitrag exportieren
+<br/>Diesen Beitrag exportieren als RIS
+<br/>| Text
+<br/>Text
+<br/>PDF
+<br/>PDF ist das gebräuchliche Format
+<br/>für Online Publikationen. Die Größe
+<br/>dieses Dokumentes beträgt 564
+<br/>Kilobyte. Je nach Art Ihrer
+<br/>Internetverbindung kann der
+<br/>Download einige Zeit in Anspruch
+<br/>nehmen.
+<br/>öffnen: Gesamtdokument
+<br/>Publikationsart Subject Collections
+<br/>Zurück zu: Journal Issue
+<br/>Athens Authentication Point
+<br/>Zeitschriftenbeitrag
+<br/>Willkommen!
+<br/>Um unsere personalisierten
+<br/>Angebote nutzen zu können,
+<br/>müssen Sie angemeldet sein.
+<br/>Login
+<br/>Jetzt registrieren
+<br/>Zugangsdaten vergessen?
+<br/>Hilfe.
+<br/>Mein Menü
+<br/>Markierte Beiträge
+<br/>Alerts
+<br/>Meine Bestellungen
+<br/>Private emotions versus social interaction: a data-driven approach towards
+<br/>analysing emotion in speech
+<br/>Zeitschrift
+<br/>Verlag
+<br/>ISSN
+<br/>Heft
+<br/>Kategorie
+<br/>DOI
+<br/>Seiten
+<br/>Subject Collection
+<br/>SpringerLink Date
+<br/>User Modeling and User-Adapted Interaction
+<br/>Springer Netherlands
+<br/>0924-1868 (Print) 1573-1391 (Online)
+<br/>Volume 18, Numbers 1-2 / Februar 2008
+<br/>Original Paper
+<br/>10.1007/s11257-007-9039-4
+<br/>175-206
+<br/>Informatik
+<br/>Freitag, 12. Oktober 2007
+<br/>Gespeicherte Beiträge
+<br/>Alle
+<br/>Favoriten
+<br/>(1) Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Martensstr. 3, 91058 Erlangen,
+<br/>Germany
+<br/>Received: 3 July 2006 Accepted: 14 January 2007 Published online: 12 October 2007
+</td><td>('1745089', 'Anton Batliner', 'anton batliner')<br/>('1732747', 'Stefan Steidl', 'stefan steidl')<br/>('2596771', 'Christian Hacker', 'christian hacker')<br/>('1739326', 'Elmar Nöth', 'elmar nöth')</td><td></td></tr><tr><td>7c1e1c767f7911a390d49bed4f73952df8445936</td><td>NON-RIGID OBJECT DETECTION WITH LOCAL INTERLEAVED SEQUENTIAL ALIGNMENT (LISA)
+<br/>Non-Rigid Object Detection with Local
+<br/>Interleaved Sequential Alignment (LISA)
+<br/>and Tom´aˇs Svoboda, Member, IEEE
+</td><td>('35274952', 'Karel Zimmermann', 'karel zimmermann')<br/>('2687885', 'David Hurych', 'david hurych')</td><td></td></tr><tr><td>7cf579088e0456d04b531da385002825ca6314e2</td><td>Emotion Detection on TV Show Transcripts with
+<br/>Sequence-based Convolutional Neural Networks
+<br/>Mathematics and Computer Science
+<br/>Mathematics and Computer Science
+<br/><b>Emory University</b><br/>Atlanta, GA 30322, USA
+<br/><b>Emory University</b><br/>Atlanta, GA 30322, USA
+</td><td>('10669356', 'Sayyed M. Zahiri', 'sayyed m. zahiri')<br/>('4724587', 'Jinho D. Choi', 'jinho d. choi')</td><td>sayyed.zahiri@emory.edu
+<br/>jinho.choi@emory.edu
+</td></tr><tr><td>7c80d91db5977649487388588c0c823080c9f4b4</td><td>DocFace: Matching ID Document Photos to Selfies∗
+<br/><b>Michigan State University</b><br/>East Lansing, Michigan, USA
+</td><td>('9644181', 'Yichun Shi', 'yichun shi')<br/>('1739705', 'Anil K. Jain', 'anil k. jain')</td><td>shiyichu@msu.edu, jain@cse.msu.edu
+</td></tr><tr><td>7c349932a3d083466da58ab1674129600b12b81c</td><td></td><td></td><td></td></tr><tr><td>7c30ea47f5ae1c5abd6981d409740544ed16ed16</td><td>ROITBERG, AL-HALAH, STIEFELHAGEN: NOVELTY DETECTION FOR ACTION RECOGNITION
+<br/>Informed Democracy: Voting-based Novelty
+<br/>Detection for Action Recognition
+<br/><b>Karlsruhe Institute of Technology</b><br/>76131 Karlsruhe,
+<br/>Germany
+</td><td>('33390229', 'Alina Roitberg', 'alina roitberg')<br/>('2256981', 'Ziad Al-Halah', 'ziad al-halah')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>alina.roitberg@kit.edu
+<br/>ziad.al-halah@kit.edu
+<br/>rainer.stiefelhagen@kit.edu
+</td></tr><tr><td>1648cf24c042122af2f429641ba9599a2187d605</td><td>Boosting Cross-Age Face Verification via Generative Age Normalization
+<br/>(cid:2) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td><td>('3116433', 'Grigory Antipov', 'grigory antipov')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')<br/>('2341854', 'Moez Baccouche', 'moez baccouche')</td><td>{grigory.antipov,moez.baccouche}@orange.com
+<br/>jean-luc.dugelay@eurecom.fr
+</td></tr><tr><td>162403e189d1b8463952fa4f18a291241275c354</td><td>Action Recognition with Spatio-Temporal
+<br/>Visual Attention on Skeleton Image Sequences
+<br/>With a strong ability of modeling sequential data, Recur-
+<br/>rent Neural Networks (RNN) with Long Short-Term Memory
+<br/>(LSTM) neurons outperform the previous hand-crafted feature
+<br/>based methods [9], [10]. Each skeleton frame is converted into
+<br/>a feature vector and the whole sequence is fed into the RNN.
+<br/>Despite the strong ability in modeling temporal sequences,
+<br/>RNN structures lack the ability to efficiently learn the spatial
+<br/>relations between the joints. To better use spatial information,
+<br/>a hierarchical structure is proposed in [11], [12] that feeds
+<br/>the joints into the network as several pre-defined body part
+<br/>groups. However,
+<br/>limit
+<br/>the effectiveness of representing spatial relations. A spatio-
+<br/>temporal 2D LSTM (ST-LSTM) network [13] is proposed
+<br/>to learn the spatial and temporal relations simultaneously.
+<br/>Furthermore, a two-stream RNN structure [14] is proposed to
+<br/>learn the spatio-temporal relations with two RNN branches.
+<br/>the pre-defined body regions still
+</td><td>('21518096', 'Zhengyuan Yang', 'zhengyuan yang')<br/>('3092578', 'Yuncheng Li', 'yuncheng li')<br/>('1706007', 'Jianchao Yang', 'jianchao yang')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')</td><td></td></tr><tr><td>160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b</td><td>Discriminant Multi-Label Manifold Embedding for Facial Action Unit
+<br/>Detection
+<br/>Signal Procesing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td><td>('1697965', 'Hua Gao', 'hua gao')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td>anil.yuce@epfl.ch, hua.gao@epfl.ch, jean-philippe.thiran@epfl.ch
+</td></tr><tr><td>16671b2dc89367ce4ed2a9c241246a0cec9ec10e</td><td>2006
+<br/>Detecting the Number of Clusters
+<br/>in n-Way Probabilistic Clustering
+</td><td>('1788526', 'Zhaoshui He', 'zhaoshui he')<br/>('1747156', 'Andrzej Cichocki', 'andrzej cichocki')<br/>('1795838', 'Shengli Xie', 'shengli xie')<br/>('1775180', 'Kyuwan Choi', 'kyuwan choi')</td><td></td></tr><tr><td>16fdd6d842475e6fbe58fc809beabbed95f0642e</td><td>Learning Temporal Embeddings for Complex Video Analysis
+<br/><b>Stanford University, 2Simon Fraser University</b></td><td>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')<br/>('10771328', 'Greg Mori', 'greg mori')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>{vigneshr, kdtang}@cs.stanford.edu, mori@cs.sfu.ca, feifeili@cs.stanford.edu
+</td></tr><tr><td>16bce9f940bb01aa5ec961892cc021d4664eb9e4</td><td>Mutual Component Analysis for Heterogeneous Face Recognition
+<br/>39
+<br/>Heterogeneous face recognition, also known as cross-modality face recognition or inter-modality face recogni-
+<br/>tion, refers to matching two face images from alternative image modalities. Since face images from different
+<br/>image modalities of the same person are associated with the same face object, there should be mutual com-
+<br/>ponents that reflect those intrinsic face characteristics that are invariant to the image modalities. Motivated
+<br/>by this rationality, we propose a novel approach called mutual component analysis (MCA) to infer the mu-
+<br/>tual components for robust heterogeneous face recognition. In the MCA approach, a generative model is first
+<br/>proposed to model the process of generating face images in different modalities, and then an Expectation
+<br/>Maximization (EM) algorithm is designed to iteratively learn the model parameters. The learned generative
+<br/>model is able to infer the mutual components (which we call the hidden factor, where hidden means the
+<br/>factor is unreachable and invisible, and can only be inferred from observations) that are associated with
+<br/>the person’s identity, thus enabling fast and effective matching for cross-modality face recognition. To en-
+<br/>hance recognition performance, we propose an MCA-based multi-classifier framework using multiple local
+<br/>features. Experimental results show that our new approach significantly outperforms the state-of-the-art
+<br/>results on two typical application scenarios, sketch-to-photo and infrared-to-visible face recognition.
+<br/>Categories and Subject Descriptors: I.5.1 [Pattern Recognition]: Models
+<br/>General Terms: Design, Algorithms, Performance
+<br/>Additional Key Words and Phrases: Face recognition, heterogeneous face recognition, mutual component
+<br/>analysis (MCA)
+<br/>ACM Reference Format:
+<br/>Heterogeneous Face Recognition ACM Trans. Intell. Syst. Technol. 9, 4, Article 39 (July 2015), 22 pages.
+<br/>DOI: http://dx.doi.org/10.1145/2807705
+<br/>This work was supported by grants from National Natural Science Foundation of China (61103164 and
+<br/>61125106), Natural Science Foundation of Guangdong Province (2014A030313688), Australian Research
+<br/>Council Projects (FT-130101457 and LP-140100569), Key Laboratory of Human-Machine Intelligence-
+<br/><b>Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong</b><br/>Innovative Research Team Program (No.201001D0104648280), the Key Research Program of the Chinese
+<br/><b>Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of</b><br/><b>Advanced Engineering, The Chinese University of Hong Kong</b><br/><b>Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy</b><br/><b>tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University</b><br/><b>Key Laboratory of Transient Optics and Photonics, Xi an Institute of Optics and Precision Mechanics, Chi</b><br/>Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted
+<br/>without fee provided that copies are not made or distributed for profit or commercial advantage and that
+<br/>copies bear this notice and the full citation on the first page. Copyrights for components of this work owned
+</td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('2856494', 'Dihong Gong', 'dihong gong')<br/>('20638185', 'Qiang Li', 'qiang li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('20638185', 'Qiang Li', 'qiang li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1720243', 'Xuelong Li', 'xuelong li')</td><td>of Sciences, P. R. China; e-mail: {zhifeng.li, dh.gong}@siat.ac.cn; Q. Li and D. Tao, Centre for Quan-
+<br/>of Technology Sydney, 81 Broadway, Ultimo, NSW 2007, Australia; e-mail: qiang.li-2@student.uts.edu.au,
+<br/>dacheng.tao@uts.edu.au; X. Li, the Center for OPTical IMagery Analysis and Learning (OPTIMAL), State
+<br/>nese Academy of Sciences, Xi’an 710119, Shaanxi, China; e-mail: xuelong li@opt.ac.cn.
+</td></tr><tr><td>16de1324459fe8fdcdca80bba04c3c30bb789bdf</td><td></td><td></td><td></td></tr><tr><td>16892074764386b74b6040fe8d6946b67a246a0b</td><td></td><td></td><td></td></tr><tr><td>16395b40e19cbc6d5b82543039ffff2a06363845</td><td>Action Recognition in Video Using Sparse Coding and Relative Features
+<br/>Anal´ı Alfaro
+<br/>P. Universidad Catolica de Chile
+<br/>P. Universidad Catolica de Chile
+<br/>P. Universidad Catolica de Chile
+<br/>Santiago, Chile
+<br/>Santiago, Chile
+<br/>Santiago, Chile
+</td><td>('1797475', 'Domingo Mery', 'domingo mery')<br/>('7263603', 'Alvaro Soto', 'alvaro soto')</td><td>ajalfaro@uc.cl
+<br/>dmery@ing.puc.cl
+<br/>asoto@ing.uc.cl
+</td></tr><tr><td>1677d29a108a1c0f27a6a630e74856e7bddcb70d</td><td>Efficient Misalignment-Robust Representation
+<br/>for Real-Time Face Recognition
+<br/><b>The Hong Kong Polytechnic University, Hong Kong</b></td><td>('5828998', 'Meng Yang', 'meng yang')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('1698371', 'David Zhang', 'david zhang')</td><td>{csmyang,cslzhang}@comp.polyu.edu.hk
+</td></tr><tr><td>16b9d258547f1eccdb32111c9f45e2e4bbee79af</td><td>2006 Xiyuan Ave.
+<br/>Chengdu, Sichuan 611731
+<br/>2006 Xiyuan Ave.
+<br/>Chengdu, Sichuan 611731
+<br/><b>University of Electronic Science and Technology of China</b><br/><b>Johns Hopkins University</b><br/>3400 N. Charles St.
+<br/>Baltimore, Maryland 21218
+<br/><b>Johns Hopkins University</b><br/>3400 N. Charles St.
+<br/>Baltimore, Maryland 21218
+<br/>NormFace: L2 Hypersphere Embedding for Face Verification
+<br/><b>University of Electronic Science and Technology of China</b></td><td>('1709439', 'Jian Cheng', 'jian cheng')<br/>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('1746141', 'Alan L. Yuille', 'alan l. yuille')<br/>('39369840', 'Feng Wang', 'feng wang')</td><td>feng.w(cid:29)@gmail.com
+<br/>chengjian@uestc.edu.cn
+<br/>xxiang@cs.jhu.edu
+<br/>alan.yuille@jhu.edu
+</td></tr><tr><td>16c884be18016cc07aec0ef7e914622a1a9fb59d</td><td>UNIVERSITÉ DE GRENOBLE
+<br/>No attribué par la bibliothèque
+<br/>THÈSE
+<br/>pour obtenir le grade de
+<br/>DOCTEUR DE L’UNIVERSITÉ DE GRENOBLE
+<br/>Spécialité : Mathématiques et Informatique
+<br/>préparée au Laboratoire Jean Kuntzmann
+<br/>dans le cadre de l’École Doctorale Mathématiques,
+<br/>Sciences et Technologies de l’Information, Informatique
+<br/>présentée et soutenue publiquement
+<br/>par
+<br/>le 27 septembre 2010
+<br/>Exploiting Multimodal Data for Image Understanding
+<br/>Données multimodales pour l’analyse d’image
+<br/>Directeurs de thèse : Cordelia Schmid et Jakob Verbeek
+<br/>JURY
+<br/>M. Éric Gaussier
+<br/>M. Antonio Torralba
+<br/><b>Mme Tinne Tuytelaars Katholieke Universiteit Leuven</b><br/><b>M. Mark Everingham University of Leeds</b><br/>Mme Cordelia Schmid
+<br/>M. Jakob Verbeek
+<br/>Président
+<br/>Université Joseph Fourier
+<br/><b>Massachusetts Institute of Technology Rapporteur</b><br/>Rapporteur
+<br/>Examinateur
+<br/>Examinatrice
+<br/>Examinateur
+<br/>INRIA Grenoble
+<br/>INRIA Grenoble
+</td><td>('2737253', 'Matthieu Guillaumin', 'matthieu guillaumin')</td><td></td></tr><tr><td>162dfd0d2c9f3621d600e8a3790745395ab25ebc</td><td>Head Pose Estimation Based on Multivariate Label Distribution
+<br/>School of Computer Science and Engineering
+<br/><b>Southeast University, Nanjing, China</b></td><td>('1735299', 'Xin Geng', 'xin geng')<br/>('40228279', 'Yu Xia', 'yu xia')</td><td>{xgeng, xiayu}@seu.edu.cn
+</td></tr><tr><td>16f940b4b5da79072d64a77692a876627092d39c</td><td>A Framework for Automated Measurement of the Intensity of Non-Posed Facial
+<br/>Action Units
+<br/><b>University of Denver, Denver, CO</b><br/><b>University of Miami, Coral Gables, FL</b><br/><b>University of Miami, Coral Gables, FL</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b><br/>Emails:
+</td><td>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')<br/>('2897823', 'Steven Cadavid', 'steven cadavid')<br/>('1874236', 'Daniel S. Messinger', 'daniel s. messinger')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>mmahoor@du.edu, scadavid@umsis.miami.edu, dmessinger@miami.edu, and jeffcohn@pitt.edu
+</td></tr><tr><td>16572c545384174f8136d761d2b0866e968120a8</td><td>Sequential Max-Margin Event Detectors
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA</b></td><td>('39792229', 'Dong Huang', 'dong huang')<br/>('2583890', 'Shitong Yao', 'shitong yao')<br/>('1734275', 'Yi Wang', 'yi wang')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>16820ccfb626dcdc893cc7735784aed9f63cbb70</td><td>Real-time Embedded Age and Gender Classification in Unconstrained Video
+<br/>School of Electrical Engineering and Computer Science
+<br/><b>University of Ottawa</b><br/>Ottawa, ON K1N 6N5 Canada
+<br/>CogniVue Corporation
+<br/>Gatineau, QC, Canada
+</td><td>('2014654', 'Ramin Azarmehr', 'ramin azarmehr')<br/>('1807494', 'Won-Sook Lee', 'won-sook lee')<br/>('2551825', 'Christina Xu', 'christina xu')<br/>('32944169', 'Daniel Laroche', 'daniel laroche')</td><td>{razar033,laganier,wslee}@uottawa.ca
+<br/>{cxu,dlaroche}@cognivue.com
+</td></tr><tr><td>1630e839bc23811e340bdadad3c55b6723db361d</td><td>SONG, TAN, CHEN: EXPLOITING RELATIONSHIP BETWEEN ATTRIBUTES
+<br/>Exploiting Relationship between Attributes for
+<br/>Improved Face Verification
+<br/>Department of Computer Science and
+<br/><b>Technology, Nanjing University of Aero</b><br/>nautics and Astronautics, Nanjing 210016,
+<br/>P.R. China
+</td><td>('3075941', 'Fengyi Song', 'fengyi song')<br/>('2248421', 'Xiaoyang Tan', 'xiaoyang tan')<br/>('1680768', 'Songcan Chen', 'songcan chen')</td><td>f.song@nuaa.edu.cn
+<br/>x.tan@nuaa.edu.cn
+<br/>s.chen@nuaa.edu.cn
+</td></tr><tr><td>164b0e2a03a5a402f66c497e6c327edf20f8827b</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Sparse Deep Transfer Learning for
+<br/>Convolutional Neural Network
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China</b></td><td>('2335888', 'Jiaming Liu', 'jiaming liu')<br/>('47903936', 'Yali Wang', 'yali wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>jiaming.liu@email.ucr.edu, {yl.wang, yu.qiao}@siat.ac.cn
+</td></tr><tr><td>16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb</td><td>J Nonverbal Behav
+<br/>DOI 10.1007/s10919-008-0059-5
+<br/>O R I G I N A L P A P E R
+<br/>All Smiles are Not Created Equal: Morphology
+<br/>and Timing of Smiles Perceived as Amused, Polite,
+<br/>and Embarrassed/Nervous
+<br/>Ó Springer Science+Business Media, LLC 2008
+</td><td>('2059653', 'Zara Ambadar', 'zara ambadar')</td><td></td></tr><tr><td>1667a77db764e03a87a3fd167d88b060ef47bb56</td><td>Alternative Semantic Representations for
+<br/>Zero-Shot Human Action Recognition
+<br/><b>School of Computer Science, The University of Manchester</b><br/>Manchester, M13 9PL, UK
+</td><td>('1729612', 'Qian Wang', 'qian wang')<br/>('32811782', 'Ke Chen', 'ke chen')</td><td>{qian.wang,ke.chen}@manchester.ac.uk
+</td></tr><tr><td>169618b8dc9b348694a31c6e9e17b989735b4d39</td><td>Unsupervised Representation Learning by Sorting Sequences
+<br/><b>University of California, Merced</b><br/>Maneesh Singh3
+<br/>2Virginia Tech
+<br/>3Verisk Analytics
+<br/>http://vllab1.ucmerced.edu/˜hylee/OPN/
+</td><td>('2837591', 'Hsin-Ying Lee', 'hsin-ying lee')<br/>('3068086', 'Jia-Bin Huang', 'jia-bin huang')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>16e95a907b016951da7c9327927bb039534151da</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 32, XXXX-XXXX (2016)
+<br/>3D Face Recognition Using Spherical Vector Norms Map *
+<br/>a Beijing Key Laboratory of Information Service Engineering,
+<br/><b>Beijing Union University, 100101, China</b><br/><b>b Computer Technology Institute, Beijing Union University, 100101, China</b><br/>c Beijing Advanced Innovation Center for Imaging Technology,
+<br/><b>Capital Normal University, 100048, China</b><br/>In this paper, we introduce a novel, automatic method for 3D face recognition. A
+<br/>new feature called a spherical vector norms map of a 3D face is created using the normal
+<br/>vector of each point. This feature contains more detailed information than the original
+<br/>depth image in regions such as the eyes and nose. For certain flat areas of 3D face, such
+<br/>as the forehead and cheeks, this map could increase the distinguishability of different
+<br/>points. In addition, this feature is robust to facial expression due to an adjustment that is
+<br/>made in the mouth region. Then, the facial representations, which are based on Histo-
+<br/>grams of Oriented Gradients, are extracted from the spherical vector norms map and the
+<br/>original depth image. A new partitioning strategy is proposed to produce the histogram
+<br/>of eight patches of a given image, in which all of the pixels are binned based on the
+<br/>magnitude and direction of their gradients. In this study, SVNs map and depth image are
+<br/>represented compactly with two histograms of oriented gradients; this approach is com-
+<br/>pleted by Linear Discriminant Analysis and a Nearest Neighbor classifier.
+<br/>Keywords: spherical vector norms map, Histograms of Oriented Gradients, 3D face
+<br/>recognition, Linear Discriminant Analysis, Face Recognition Grand Challenge database
+<br/>1. INTRODUCTION
+<br/>With the rapidly decreasing costs of 3D capturing devices, many researchers are in-
+<br/>vestigating 3D face recognition systems because it could overcome limitations illumina-
+<br/>tion and make-up, but still bear limitations mostly due to facial expression. We summa-
+<br/>rize a smaller subset of expressive-robust methods below:
+<br/>1. Deformable template-based approaches: Berretti et al. [1] proposed an approach
+<br/>that describes the geometric information of a 3D facial using a surface graph form, and
+<br/>the relevant information among the neighboring points could be encoded into a compact
+<br/>representation. 3DWWs (3D Weighted Walkthroughs) descriptors were proposed to
+<br/>demonstrate the mutual spatial displacement among pairwise arcs of points of the corre-
+<br/>sponding stripes. An 81.2% verification rate at a 0.1% FAR was achieved on the all vs.
+<br/>all experiment. The advantage of the method is the computational complexity is low.
+<br/>Kakadiaris et al. [2] mapped 3D geometry information onto a 2D regular grid using
+<br/>an elastically adapted deformable model. Then, advanced wavelet analysis was used for
+<br/>recognition and get good performance.
+<br/>Drira et al. [3] used radial curves emanating from the nose tips which were already
+<br/>provided, and used elastic shape analysis of these curves to develop a Riemannian
+<br/>framework. Finally, they analyze the shapes of full facial surfaces.
+<br/>1249
+</td><td>('3282147', 'Xue-Qiao Wang', 'xue-qiao wang')<br/>('2130097', 'Jia-Zheng Yuan', 'jia-zheng yuan')<br/>('1930238', 'Qing Li', 'qing li')</td><td>E-mail: {ldxueqiao; jiazheng; liqing10}@buu.edu.cn
+</td></tr><tr><td>166186e551b75c9b5adcc9218f0727b73f5de899</td><td>Volume 4, Issue 2, February 2016
+<br/>International Journal of Advance Research in
+<br/>Computer Science and Management Studies
+<br/>Research Article / Survey Paper / Case Study
+<br/>Available online at: www.ijarcsms.com
+<br/>ISSN: 2321-7782 (Online)
+<br/>Automatic Age and Gender Recognition in Human Face Image
+<br/>Dataset using Convolutional Neural Network System
+<br/>Subhani Shaik1
+<br/>Assoc. Prof & Head of the Department
+<br/>Department of CSE,
+<br/>Associate Professor
+<br/>Department of CSE,
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
+</td><td>('39885231', 'Anto A. Micheal', 'anto a. micheal')</td><td></td></tr><tr><td>16d6737b50f969247339a6860da2109a8664198a</td><td>Convolutional Neural Networks
+<br/>for Age and Gender Classification
+<br/><b>Stanford University</b></td><td>('22241470', 'Ari Ekmekji', 'ari ekmekji')</td><td>aekmekji@stanford.edu
+</td></tr><tr><td>16d9b983796ffcd151bdb8e75fc7eb2e31230809</td><td>EUROGRAPHICS 2018 / D. Gutierrez and A. Sheffer
+<br/>(Guest Editors)
+<br/>Volume 37 (2018), Number 2
+<br/>GazeDirector: Fully Articulated Eye Gaze Redirection in Video
+<br/>ID: paper1004
+</td><td></td><td></td></tr><tr><td>1679943d22d60639b4670eba86665371295f52c3</td><td></td><td></td><td></td></tr><tr><td>162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e</td><td>Mygdalis, V., Iosifidis, A., Tefas, A., & Pitas, I. (2016). Large-Scale
+<br/>Classification by an Approximate Least Squares One-Class Support Vector
+<br/>of a meeting held 20-22 August 2015, Helsinki, Finland (Vol. 2, pp. 6-10).
+<br/><b>Institute of Electrical and Electronics Engineers (IEEE). DOI</b><br/>10.1109/Trustcom.2015.555
+<br/>Peer reviewed version
+<br/>Link to published version (if available):
+<br/>10.1109/Trustcom.2015.555
+<br/>Link to publication record in Explore Bristol Research
+<br/>PDF-document
+<br/><b>University of Bristol - Explore Bristol Research</b><br/>General rights
+<br/>This document is made available in accordance with publisher policies. Please cite only the published
+<br/>version using the reference above. Full terms of use are available:
+<br/>http://www.bristol.ac.uk/pure/about/ebr-terms
+<br/> </td><td></td><td></td></tr><tr><td>1610d2d4947c03a89c0fda506a74ba1ae2bc54c2</td><td>Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose,
+<br/>Depth, and Expression Variations
+<br/>Hai X. Pham
+<br/><b>Rutgers University, USA</b></td><td>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')</td><td>{hxp1,vladimir}@cs.rutgers.edu
+</td></tr><tr><td>1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6</td><td>Recent Developments in Social Signal Processing
+<br/><b>Institute of Informatics - ISLA</b><br/><b>University of Amsterdam, Amsterdam, The Netherlands</b><br/>†Department of Computing
+<br/><b>Imperial College London, London, UK</b><br/><b>EEMCS, University of Twente Enschede, The Netherlands</b><br/><b>University of Glasgow</b><br/>Glasgow, Scotland
+</td><td>('1764521', 'Albert Ali Salah', 'albert ali salah')<br/>('1694605', 'Maja Pantic', 'maja pantic')<br/>('1719436', 'Alessandro Vinciarelli', 'alessandro vinciarelli')</td><td>Email: a.a.salah@uva.nl
+<br/>Email: m.pantic@imperial.ac.uk
+<br/>Email: vincia@dcs.gla.ac.uk
+</td></tr><tr><td>169076ffe5e7a2310e98087ef7da25aceb12b62d</td><td></td><td></td><td></td></tr><tr><td>167736556bea7fd57cfabc692ec4ae40c445f144</td><td>METHODS
+<br/>published: 13 January 2016
+<br/>doi: 10.3389/fict.2015.00028
+<br/>Improved Motion Description for
+<br/>Action Classification
+<br/>Inria, Centre Rennes – Bretagne Atlantique, Rennes, France
+<br/>Even though the importance of explicitly integrating motion characteristics in video
+<br/>descriptions has been demonstrated by several recent papers on action classification, our
+<br/>current work concludes that adequately decomposing visual motion into dominant and
+<br/>residual motions, i.e., camera and scene motion, significantly improves action recognition
+<br/>algorithms. This holds true both for the extraction of the space-time trajectories and for
+<br/>computation of descriptors. We designed a new motion descriptor – the DCS descriptor –
+<br/>that captures additional information on local motion patterns enhancing results based on
+<br/>differential motion scalar quantities, divergence, curl, and shear features. Finally, applying
+<br/>the recent VLAD coding technique proposed in image retrieval provides a substantial
+<br/>improvement for action recognition. These findings are complementary to each other
+<br/>and they outperformed all previously reported results by a significant margin on three
+<br/>challenging datasets: Hollywood 2, HMDB51, and Olympic Sports as reported in Jain
+<br/>et al. (2013). These results were further improved by Oneata et al. (2013), Wang and
+<br/>Schmid (2013), and Zhu et al. (2013) through the use of the Fisher vector encoding. We
+<br/>therefore also employ Fisher vector in this paper, and we further enhance our approach by
+<br/>combining trajectories from both optical flow and compensated flow. We as well provide
+<br/><b>additional details of DCS descriptors, including visualization. For extending the evaluation</b><br/>a novel dataset with 101 action classes, UCF101, was added.
+<br/>Keywords: action classification, camera motion, optical flow, motion trajectories, motion descriptors
+<br/>1. INTRODUCTION
+<br/>The recognition of human actions in unconstrained videos remains a challenging problem in
+<br/>computer vision despite the fact that human actions are often attributed to essential meaningful
+<br/>content in such videos. The field receives sustained attention due to its potential applications,
+<br/>such as for designing video-surveillance systems, in providing automatic annotation of video
+<br/>archives, as well as for improving human–computer interaction. The solutions that were proposed
+<br/>to address the above problems were inherited from the techniques first designed for image search
+<br/>and classification.
+<br/>Successful local features were developed to describe image patches (Schmid and Mohr, 1997;
+<br/>Lowe, 2004) and translated in the 2D + t domain as spatio-temporal local descriptors (Laptev et al.,
+<br/>2008; Wang et al., 2009) and now include motion clues of Wang et al. (2011). These descriptors
+<br/>are often extracted from spatial–temporal interest points (Laptev and Lindeberg, 2003; Willems
+<br/>et al., 2008). Furthermore, several approaches assume underlying temporal motion model involving
+<br/>trajectories (Hervieu et al., 2008; Matikainen et al., 2009; Messing et al., 2009; Sun et al., 2009;
+<br/>Brox and Malik, 2010; Wang et al., 2011; Wu et al., 2011; Gaidon et al., 2012; Wang and Schmid,
+<br/>2013).
+<br/>Edited by:
+<br/>Jean-Marc Odobez,
+<br/><b>Idiap Research Institute, Switzerland</b><br/>Reviewed by:
+<br/>Thanh Duc Ngo,
+<br/><b>Ho Chi Minh City University of</b><br/>Information Technology, Vietnam
+<br/>Jean Martinet,
+<br/><b>Lille 1 University, France</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Computer Image Analysis, a section
+<br/>of the journal Frontiers in ICT
+<br/>Received: 16 April 2015
+<br/>Accepted: 22 December 2015
+<br/>Published: 13 January 2016
+<br/>Citation:
+<br/>Jain M, Jégou H and Bouthemy P
+<br/>(2016) Improved Motion Description
+<br/>for Action Classification.
+<br/>doi: 10.3389/fict.2015.00028
+<br/>Frontiers in ICT | www.frontiersin.org
+<br/>January 2016 | Volume 2 | Article 28
+</td><td>('40027484', 'Mihir Jain', 'mihir jain')<br/>('1681054', 'Hervé Jégou', 'hervé jégou')<br/>('1716733', 'Patrick Bouthemy', 'patrick bouthemy')<br/>('40027484', 'Mihir Jain', 'mihir jain')</td><td>m.jain@uva.nl
+</td></tr><tr><td>167ea1631476e8f9332cef98cf470cb3d4847bc6</td><td>Visual Search at Pinterest
+<br/>1Visual Discovery, Pinterest
+<br/><b>University of California, Berkeley</b></td><td>('39554931', 'Yushi Jing', 'yushi jing')<br/>('1911082', 'Dmitry Kislyuk', 'dmitry kislyuk')<br/>('39835325', 'Andrew Zhai', 'andrew zhai')<br/>('2560579', 'Jiajing Xu', 'jiajing xu')<br/>('7408951', 'Jeff Donahue', 'jeff donahue')<br/>('2608161', 'Sarah Tavel', 'sarah tavel')</td><td>{jing, dliu, dkislyuk, andrew, jiajing, jdonahue, sarah}@pinterest.com
+</td></tr><tr><td>161eb88031f382e6a1d630cd9a1b9c4bc6b47652</td><td>1
+<br/>Automatic Facial Expression Recognition
+<br/>Using Features of Salient Facial Patches
+</td><td>('2680543', 'Aurobinda Routray', 'aurobinda routray')</td><td></td></tr><tr><td>420782499f38c1d114aabde7b8a8104c9e40a974</td><td>Joint Ranking and Classification using Weak Data for Feature Extraction
+<br/>Fashion Style in 128 Floats:
+<br/>Department of Computer Science and Engineering
+<br/><b>Waseda University, Tokyo, Japan</b></td><td>('3114470', 'Edgar Simo-Serra', 'edgar simo-serra')<br/>('1692113', 'Hiroshi Ishikawa', 'hiroshi ishikawa')</td><td>esimo@aoni.waseda.jp
+<br/>hfs@waseda.jp
+</td></tr><tr><td>4209783b0cab1f22341f0600eed4512155b1dee6</td><td>Accurate and Efficient Similarity Search for Large Scale Face Recognition
+<br/>BUPT
+<br/>BUPT
+<br/>BUPT
+</td><td>('49712251', 'Ce Qi', 'ce qi')<br/>('35963823', 'Zhizhong Liu', 'zhizhong liu')<br/>('1684263', 'Fei Su', 'fei su')</td><td></td></tr><tr><td>42e3dac0df30d754c7c7dab9e1bb94990034a90d</td><td>PANDA: Pose Aligned Networks for Deep Attribute Modeling
+<br/>2EECS, UC Berkeley
+<br/>1Facebook AI Research
+</td><td>('40565777', 'Ning Zhang', 'ning zhang')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td>{mano, ranzato, lubomir}@fb.com
+<br/>{nzhang, trevor}@eecs.berkeley.edu
+</td></tr><tr><td>4217473596b978f13a211cdf47b7d3f6588c785f</td><td>An Efficient Approach for Clustering Face Images
+<br/><b>Michigan State University</b><br/>Noblis
+<br/>Anil Jain
+<br/>Michigan State Universtiy
+</td><td>('40653304', 'Charles Otto', 'charles otto')<br/>('1817623', 'Brendan Klare', 'brendan klare')</td><td>ottochar@msu.edu
+<br/>Brendan.Klare@noblis.org
+<br/>jain@msu.edu
+</td></tr><tr><td>4223666d1b0b1a60c74b14c2980069905088edc6</td><td>A Convergent Incoherent Dictionary Learning
+<br/>Algorithm for Sparse Coding
+<br/>Department of Mathematics
+<br/><b>National University of Singapore</b></td><td>('3183763', 'Chenglong Bao', 'chenglong bao')<br/>('2217653', 'Yuhui Quan', 'yuhui quan')<br/>('39689301', 'Hui Ji', 'hui ji')</td><td></td></tr><tr><td>42afe6d016e52c99e2c0d876052ade9c192d91e7</td><td>Spontaneous vs. Posed Facial Behavior:
+<br/>Automatic Analysis of Brow Actions
+<br/><b>Imperial College London, UK</b><br/><b>Faculty of EEMCS, University of Twente, The Netherlands</b><br/><b>Psychology and Psychiatry, University of Pittsburgh, USA</b></td><td>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('1694605', 'Maja Pantic', 'maja pantic')<br/>('2059653', 'Zara Ambadar', 'zara ambadar')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>{michel.valstar,m.pantic}@imperial.ac.uk, {ambadar,jeffcohn}@pitt.edu,
+</td></tr><tr><td>42765c170c14bd58e7200b09b2e1e17911eed42b</td><td>2
+<br/>Feature Extraction Based on Wavelet
+<br/>Moments and Moment Invariants in
+<br/>Machine Vision Systems
+<br/>G.A. Papakostas, D.E. Koulouriotis and V.D. Tourassis
+<br/><b>Democritus University of Thrace</b><br/>Department of Production Engineering and Management
+<br/>Greece
+<br/>1. Introduction
+<br/>Recently, there has been an increasing interest on modern machine vision systems for
+<br/>industrial and commercial purposes. More and more products are introduced in the market,
+<br/>which are making use of visual information captured by a camera in order to perform a
+<br/>specific task. Such machine vision systems are used for detecting and/or recognizing a face
+<br/>in an unconstrained environment for security purposes, for analysing the emotional states of
+<br/>a human by processing his facial expressions or for providing a vision based interface in the
+<br/>context of the human computer interaction (HCI) etc..
+<br/>In almost all the modern machine vision systems there is a common processing procedure
+<br/>called feature extraction, dealing with the appropriate representation of the visual information.
+<br/>This task has two main objectives simultaneously, the compact description of the useful
+<br/>information by a set of numbers (features), by keeping the dimension as low as possible.
+<br/>Image moments constitute an important feature extraction method (FEM) which generates
+<br/>high discriminative features, able to capture the particular characteristics of the described
+<br/>pattern, which distinguish it among similar or totally different objects. Their ability to fully
+<br/>describe an image by encoding its contents in a compact way makes them suitable for many
+<br/>disciplines of the engineering life, such as image analysis (Sim et al., 2004), image
+<br/>watermarking (Papakostas et al., 2010a) and pattern recognition (Papakostas et al., 2007,
+<br/>2009a, 2010b).
+<br/>Among the several moment families introduced in the past, the orthogonal moments are
+<br/>the most popular moments widely used in many applications, owing to their
+<br/>orthogonality property that comes from the nature of the polynomials used as kernel
+<br/>functions, which they constitute an orthogonal base. As a result, the orthogonal moments
+<br/>have minimum information redundancy meaning that different moment orders describe
+<br/>different parts of the image.
+<br/>In order to use the moments to classify visual objects, they have to ensure high recognition
+<br/>rates for all possible object’s orientations. This requirement constitutes a significant
+<br/>operational feature of each modern pattern recognition system and it can be satisfied during
+<br/>www.intechopen.com
+</td><td></td><td></td></tr><tr><td>429c3588ce54468090cc2cf56c9b328b549a86dc</td><td></td><td></td><td></td></tr><tr><td>42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830</td><td>Coordinated Local Metric Learning
+<br/>Inria∗
+</td><td>('2143851', 'Shreyas Saxena', 'shreyas saxena')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')</td><td></td></tr><tr><td>42350e28d11e33641775bef4c7b41a2c3437e4fd</td><td>212
+<br/>Multilinear Discriminant Analysis
+<br/>for Face Recognition
+</td><td>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('38188040', 'Dong Xu', 'dong xu')<br/>('1706370', 'Qiang Yang', 'qiang yang')<br/>('39089563', 'Lei Zhang', 'lei zhang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>42e155ea109eae773dadf74d713485be83fca105</td><td></td><td></td><td></td></tr><tr><td>4223917177405eaa6bdedca061eb28f7b440ed8e</td><td>B-spline Shape from Motion & Shading: An Automatic Free-form Surface
+<br/>Modeling for Face Reconstruction
+<br/><b>School of Computer Science, Tianjin University</b><br/><b>School of Computer Science, Tianjin University</b><br/><b>School of Software, Tianjin University</b></td><td>('1919846', 'Weilong Peng', 'weilong peng')<br/>('1683334', 'Zhiyong Feng', 'zhiyong feng')<br/>('29962190', 'Chao Xu', 'chao xu')</td><td>wlpeng@tju.edu.cn
+<br/>zyfeng@tju.edu.cn
+</td></tr><tr><td>42eda7c20db9dc0f42f72bb997dd191ed8499b10</td><td>Gaze Embeddings for Zero-Shot Image Classification
+<br/><b>Max Planck Institute for Informatics</b><br/>Saarland Informatics Campus
+<br/>2Amsterdam Machine Learning Lab
+<br/><b>University of Amsterdam</b></td><td>('7789181', 'Nour Karessli', 'nour karessli')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td></td></tr><tr><td>42c9394ca1caaa36f535721fa9a64b2c8d4e0dee</td><td>Label Efficient Learning of Transferable
+<br/>Representations across Domains and Tasks
+<br/><b>Stanford University</b><br/>Virginia Tech
+<br/><b>University of California, Berkeley</b></td><td>('3378742', 'Zelun Luo', 'zelun luo')<br/>('8299168', 'Yuliang Zou', 'yuliang zou')<br/>('4742485', 'Judy Hoffman', 'judy hoffman')</td><td>zelunluo@stanford.edu
+<br/>ylzou@vt.edu
+<br/>jhoffman@eecs.berkeley.edu
+</td></tr><tr><td>4270460b8bc5299bd6eaf821d5685c6442ea179a</td><td>Int J Comput Vis (2009) 84: 163–183
+<br/>DOI 10.1007/s11263-008-0147-3
+<br/>Partial Similarity of Objects, or How to Compare a Centaur
+<br/>to a Horse
+<br/>Received: 30 September 2007 / Accepted: 3 June 2008 / Published online: 26 July 2008
+<br/>© Springer Science+Business Media, LLC 2008
+</td><td>('1731883', 'Alexander M. Bronstein', 'alexander m. bronstein')<br/>('1692832', 'Ron Kimmel', 'ron kimmel')</td><td></td></tr><tr><td>4205cb47ba4d3c0f21840633bcd49349d1dc02c1</td><td>ACTION RECOGNITION WITH GRADIENT BOUNDARY CONVOLUTIONAL NETWORK
+<br/><b>Research Institute of Shenzhen, Wuhan University, Shenzhen, China</b><br/><b>National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China</b><br/><b>Center for Research in Computer Vision, University of Central Florida, Orlando, USA</b></td><td>('2559431', 'Huafeng Chen', 'huafeng chen')<br/>('1736897', 'Jun Chen', 'jun chen')<br/>('1732874', 'Chen Chen', 'chen chen')<br/>('37254976', 'Ruimin Hu', 'ruimin hu')</td><td></td></tr><tr><td>42ded74d4858bea1070dadb08b037115d9d15db5</td><td>Exigent: An Automatic Avatar Generation System
+<br/>Computer Science and Artificial Intelligence Laboratory
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, Massachusetts 02139, USA
+</td><td>('2852664', 'Dominic Kao', 'dominic kao')<br/>('1709421', 'D. Fox Harrell', 'd. fox harrell')</td><td>{dkao,fox.harrell}@mit.edu
+</td></tr><tr><td>42ea8a96eea023361721f0ea34264d3d0fc49ebd</td><td>Parameterized Principal Component Analysis
+<br/><b>Florida State University, USA</b></td><td>('2109527', 'Ajay Gupta', 'ajay gupta')<br/>('2455529', 'Adrian Barbu', 'adrian barbu')</td><td></td></tr><tr><td>42f6f5454dda99d8989f9814989efd50fe807ee8</td><td>Conditional generative adversarial nets for convolutional face generation
+<br/>Symbolic Systems Program, Natural Language Processing Group
+<br/><b>Stanford University</b></td><td>('24339276', 'Jon Gauthier', 'jon gauthier')</td><td>jgauthie@stanford.edu
+</td></tr><tr><td>429d4848d03d2243cc6a1b03695406a6de1a7abd</td><td>Face Recognition based on Logarithmic Fusion
+<br/>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-2, Issue-3, July 2012
+<br/>of SVD and KT
+<br/>Ramachandra A C, Raja K B, Venugopal K R, L M Patnaik
+<br/>to
+<br/>
+</td><td></td><td></td></tr><tr><td>42dc36550912bc40f7faa195c60ff6ffc04e7cd6</td><td>Hindawi Publishing Corporation
+<br/>ISRN Machine Vision
+<br/>Volume 2013, Article ID 579126, 10 pages
+<br/>http://dx.doi.org/10.1155/2013/579126
+<br/>Research Article
+<br/>Visible and Infrared Face Identification via
+<br/>Sparse Representation
+<br/><b>LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France</b><br/><b>GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin</b><br/>14050 Caen, France
+<br/>Received 4 April 2013; Accepted 27 April 2013
+<br/>Academic Editors: O. Ghita, D. Hernandez, Z. Hou, M. La Cascia, and J. M. Tavares
+<br/>Copyright © 2013 P. Buyssens and M. Revenu. This is an open access article distributed under the Creative Commons Attribution
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>We present a facial recognition technique based on facial sparse representation. A dictionary is learned from data, and patches
+<br/>extracted from a face are decomposed in a sparse manner onto this dictionary. We particularly focus on the design of dictionaries
+<br/>that play a crucial role in the final identification rates. Applied to various databases and modalities, we show that this approach
+<br/>gives interesting performances. We propose also a score fusion framework that allows quantifying the saliency classifiers outputs
+<br/>and merging them according to these saliencies.
+<br/>1. Introduction
+<br/>Face recognition is a topic which has been of increasing inter-
+<br/>est during the last two decades due to a vast number of pos-
+<br/>sible applications: biometrics, video surveillance, advanced
+<br/>HMI, or image/video indexation. Although considerable
+<br/>progress has been made in this domain, especially with the
+<br/>development of powerful methods (such as the Eigenfaces
+<br/>or the Elastic Bunch Graph Matching methods), automatic
+<br/>face recognition is not enough accurate in uncontrolled envi-
+<br/>ronments for a large use. Many factors can degrade the per-
+<br/>formances of facial biometric system: illumination variation
+<br/>creates artificial shadows, changing locally the appearance of
+<br/>the face; head poses modify the distance between localized
+<br/>features; facial expression introduces global changes; artefacts
+<br/>wearing, such as glasses or scarf, may hide parts of the face.
+<br/>For the particular case of illumination, a lot of work has
+<br/>been done on the preprocessing step of the images to reduce
+<br/>the effect of the illumination on the face. Another approach is
+<br/>to use other imagery such as infrared, which has been showed
+<br/>to be a promising alternative. An infrared capture of a face is
+<br/>nearly invariant to illumination changes and allows a system
+<br/><b>to process in all the illumination conditions, including total</b><br/>darkness like night.
+<br/>While visual cameras measure the electromagnetic
+<br/>energy in the visible spectrum (0.4–0.7 𝜇m), sensors in the
+<br/>IR respond to thermal radiation in the infrared spectrum
+<br/>(0.7–14.0 𝜇m). The infrared spectrum can mainly be divided
+<br/>into reflected IR (Figure 1(b)) and emissive IR (Figure 1(c)).
+<br/>Reflected IR contains near infrared (NIR) (0.7–0.9 𝜇m)
+<br/>and short-wave infrared (SWIR) (0.9–2.4 𝜇m). The ther-
+<br/>mal IR band is associated with thermal radiation emitted
+<br/>by the objects. It contains the midwave infrared (MWIR)
+<br/>(3.0–5.0 𝜇m) and long-wave infrared (LWIR) (8.0–14.0 𝜇m).
+<br/>Although the reflected IR is by far the most studied, we use
+<br/>thermal long-wave IR in this study.
+<br/>Despite the advantages of infrared modality, infrared im-
+<br/>agery has other limitations. Since a face captured under this
+<br/>modality renders its thermal patterns, a temperature screen
+<br/>placed in front of the face will totally occlude it. This phe-
+<br/>nomenon appears when a subject simply wears glasses. In this
+<br/>case, the captured face has two black holes, corresponding to
+<br/>the glasses, which is far more inconvenient than in the visible
+</td><td>('2825139', 'Pierre Buyssens', 'pierre buyssens')</td><td>Correspondence should be addressed to Pierre Buyssens; pierre.buyssens@gmail.com
+</td></tr><tr><td>424259e9e917c037208125ccc1a02f8276afb667</td><td></td><td></td><td></td></tr><tr><td>42ecfc3221c2e1377e6ff849afb705ecd056b6ff</td><td>Pose Invariant Face Recognition under Arbitrary
+<br/>Unknown Lighting using Spherical Harmonics
+<br/>Department of Computer Science,
+<br/>SUNY at Stony Brook, NY, 11790
+</td><td>('38323599', 'Lei Zhang', 'lei zhang')<br/>('1686020', 'Dimitris Samaras', 'dimitris samaras')</td><td>{lzhang, samaras}@cs.sunysb.edu
+</td></tr><tr><td>421955c6d2f7a5ffafaf154a329a525e21bbd6d3</td><td>570
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 22, NO. 6,
+<br/>JUNE 2000
+<br/>Evolutionary Pursuit and Its
+<br/>Application to Face Recognition
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td></td></tr><tr><td>42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0</td><td></td><td></td><td></td></tr><tr><td>42df75080e14d32332b39ee5d91e83da8a914e34</td><td>4280
+<br/>Illumination Compensation Using Oriented
+<br/>Local Histogram Equalization and
+<br/>Its Application to Face Recognition
+</td><td>('1822733', 'Ping-Han Lee', 'ping-han lee')<br/>('2250469', 'Szu-Wei Wu', 'szu-wei wu')<br/>('1732064', 'Yi-Ping Hung', 'yi-ping hung')</td><td></td></tr><tr><td>4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99</td><td>Face Recognition From Video
+<br/>1Siemens Corporate Research
+<br/><b>College Road East, Princeton, NJ</b><br/>2Center for Automation Research (CfAR) and
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Maryland, College Park, MD</b><br/>I. INTRODUCTION
+<br/>While face recognition (FR) from a single still image has been studied extensively [13], [57], FR based on a
+<br/>video sequence is an emerging topic, evidenced by the growing increase in the literature. It is predictable that with
+<br/>the ubiquity of video sequences, FR based on video sequences will become more and more popular. In this chapter,
+<br/>we also address FR based on a group of still images (also referred to as multiple still images). Multiple still images
+<br/>are not necessarily from a video sequence; they can come from multiple independent still captures.
+<br/>It is obvious that multiple still images or a video sequence can be regarded as a single still image in a degenerate
+<br/>manner. More specifically, suppose that we have a group of face images {y1, . . . , yT} and a single-still-image-based
+<br/>FR algorithm A (or the base algorithm), we can construct a recognition algorithm based on multiple still images
+<br/>or a video sequence by fusing multiple base algorithms denoted by Ai’s. Each Ai takes a different single image
+<br/>yi as input. The fusion rule can be additive, multiplicative, and so on.
+<br/>Even though the fusion algorithm might work well in practice, clearly, the overall recognition performance solely
+<br/>depends on the base algorithm and hence designing the base algorithm A (or the similarity function k) is of ultimate
+<br/>importance. However, the fused algorithms neglect additional properties manifested in multiple still images or video
+<br/>sequences. Generally speaking, algorithms that judiciously exploit these properties will perform better in terms of
+<br/>recognition accuracy, computational efficiency, etc.
+<br/>There are three additional properties available from multiple still images and/or video sequences:
+<br/>- [P 1: Set of observations]. This property is directly exploited by the fused algorithms. One main disadvantage
+<br/>may be the ad hoc nature of the combination rule. However, theoretical analysis based on a set of observations
+<br/>can be performed. For example, a set of observations can be summarized using quantities like matrix, probability
+<br/>density function, manifold, etc. Hence, corresponding knowledge can be utilized to match two sets.
+<br/>- [P 2: Temporal continuity/Dynamics]. Successive frames in the video sequences are continuous in the
+<br/>temporal dimension. Such continuity, coming from facial expression, geometric continuity related to head
+<br/>July 14, 2008
+<br/>DRAFT
+</td><td>('1682187', 'Shaohua Kevin Zhou', 'shaohua kevin zhou')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('1867477', 'Gaurav Aggarwal', 'gaurav aggarwal')</td><td>Email: shaohua.zhou@siemens.com, rama@cfar.umd.edu, gaurav@cs.umd.edu
+</td></tr><tr><td>89945b7cd614310ebae05b8deed0533a9998d212</td><td>Divide-and-Conquer Method for L1 Norm Matrix
+<br/>Factorization in the Presence of Outliers and
+<br/>Missing Data
+</td><td>('1803714', 'Deyu Meng', 'deyu meng')</td><td></td></tr><tr><td>89de30a75d3258816c2d4d5a733d2bef894b66b9</td><td></td><td></td><td></td></tr><tr><td>89002a64e96a82486220b1d5c3f060654b24ef2a</td><td>PIEFA: Personalized Incremental and Ensemble Face Alignment
+<br/>Yang Yu⋆
+<br/><b>Rutgers University</b><br/>Piscataway, NJ, 08854
+<br/><b>The University of North Carolina at Charlotte</b><br/>Charlotte, NC, 28223
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>xpeng.nb,yyu,dnm@cs.rutgers.edu
+<br/>szhang16@uncc.edu
+</td></tr><tr><td>89c84628b6f63554eec13830851a5d03d740261a</td><td>Image Enhancement and Automated Target Recognition
+<br/>Techniques for Underwater Electro-Optic Imagery
+<br/><b>Metron, Inc</b><br/>11911 Freedom Dr., Suite 800
+<br/>Reston, VA 20190
+<br/>Contract Number N00014-07-C-0351
+<br/>http:www.metsci.com
+<br/>LONG TERM GOALS
+<br/>The long-term goal of this project is to provide a flexible, accurate and extensible automated target
+<br/>recognition (ATR) system for use with a variety of imaging and non-imaging sensors. Such an ATR
+<br/>system, once it achieves a high level of performance, can relieve human operators from the tedious
+<br/>business of pouring over vast quantities of mostly mundane data, calling the operator in only when the
+<br/>computer assessment involves an unacceptable level of ambiguity. The ATR system will provide most
+<br/>leading edge algorithms for detection, segmentation, and classification while incorporating many novel
+<br/>algorithms that we are developing at Metron. To address one of the most critical challenges in ATR
+<br/>technology, the system will also provide powerful feature extraction routines designed for specific
+<br/>applications of current interest.
+<br/>OBJECTIVES
+<br/>The main objective of this project is to develop a complete, flexible, and extensible modular automated
+<br/>target recognition (MATR) system for computer aided detection and classification (CAD/CAC) of
+<br/>target objects from within cluttered and possibly noisy image data. The MATR system framework is
+<br/>designed to be applicable to a wide range of situations, each with its own challenges, and so is
+<br/>organized in such a way that the constituent algorithms are interchangeable and can be selected based
+<br/>on their individual suitability to the particular task within the specific application. The ATR system
+<br/>designer can select combinations of algorithms, many of which are being developed at Metron, to
+<br/>produce a variety of systems, each tailored to specific needs. While the development of the system is
+<br/>still ongoing, results for mine countermeasures (MCM) applications using electro-optical (EO) image
+<br/>data have been encouraging. A brief description of the system framework, some of the novel
+<br/>algorithms, and preliminary test results are provided in this interim report.
+<br/>APPROACH
+<br/>The MATR system is composed of several modules, as depicted in Figure 1, reflecting the sequence of
+<br/>steps in the ATR process. The detection step is concerned with finding portions of an image that
+<br/>contain possible objects of interest, or targets, that merit further attention. During the localization and
+<br/>segmentation phase the position and approximate size and shape of the object is estimated and a
+<br/>portion of the image, or “snippet,” containing the object is extracted. At this stage, image processing
+<br/>may be performed on the snippet to reorient the target, mitigate noise, accentuate edge detail, etc.
+<br/>1
+</td><td>('2395986', 'Thomas Giddings', 'thomas giddings')<br/>('2386585', 'Cetin Savkli', 'cetin savkli')<br/>('2632462', 'Joseph Shirron', 'joseph shirron')</td><td>phone: (703) 437-2428 fax: (703) 787-3518 email: giddings@metsci.com
+</td></tr><tr><td>89c51f73ec5ebd1c2a9000123deaf628acf3cdd8</td><td>American Journal of Applied Sciences 5 (5): 574-580, 2008
+<br/>ISSN 1546-9239
+<br/>© 2008 Science Publications
+<br/>Face Recognition Based on Nonlinear Feature Approach
+<br/>1Eimad E.A. Abusham, 1Andrew T.B. Jin, 1Wong E. Kiong and 2G. Debashis
+<br/>1Faculty of Information Science and Technology,
+<br/><b>Faculty of Engineering and Technology, Multimedia University (Melaka Campus</b><br/>Jalan Ayer Keroh Lama, 75450 Bukit Beruang, Melaka, Malaysia
+</td><td></td><td></td></tr><tr><td>89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199</td><td>Application of an Improved Mean Shift Algorithm
+<br/>in Real-time Facial Expression Recognition
+<br/><b>School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008 china</b><br/><b>School of Electrical and Information Engineering, Hunan University of Technology, Hunan, Zhuzhou, 412008 china</b><br/><b>School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008 china</b><br/>Yan-hui ZHU
+<br/><b>School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008 china</b><br/>facial
+<br/>real-time
+<br/>expression
+</td><td>('1719090', 'Zhao-yi Peng', 'zhao-yi peng')<br/>('1696179', 'Yu Zhou', 'yu zhou')<br/>('2276926', 'Zhi-qiang Wen', 'zhi-qiang wen')</td><td>Email:pengzhaoyi@163.com
+<br/>Email:zypzy@163.com
+<br/>Email: swayhzhu@163.com
+<br/>Email: zhqwen20001@163.com
+</td></tr><tr><td>89e7d23e0c6a1d636f2da68aaef58efee36b718b</td><td>Lucas-Kanade Scale Invariant Feature Transform for
+<br/>Uncontrolled Viewpoint Face Recognition
+<br/>1Division of Computer Science and Engineering,
+<br/>2Center for Advanced Image and Information Technology
+<br/><b>Chonbuk National University, Jeonju 561-756, Korea</b></td><td>('2642847', 'Yongbin Gao', 'yongbin gao')<br/>('4292934', 'Hyo Jong Lee', 'hyo jong lee')</td><td></td></tr><tr><td>893239f17dc2d17183410d8a98b0440d98fa2679</td><td>UvA-DARE (Digital Academic Repository)
+<br/>Expression-Invariant Age Estimation
+<br/>Published in:
+<br/>Proceedings of the British Machine Vision Conference 2014
+<br/>DOI:
+<br/>10.5244/C.28.14
+<br/>Link to publication
+<br/>Citation for published version (APA):
+<br/>French, & T. Pridmore (Eds.), Proceedings of the British Machine Vision Conference 2014 (pp. 14.1-14.11).
+<br/>BMVA Press. DOI: 10.5244/C.28.14
+<br/>General rights
+<br/>It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+<br/>other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+<br/>Disclaimer/Complaints regulations
+<br/>If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+<br/>your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+<br/><b>the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam</b><br/>The Netherlands. You will be contacted as soon as possible.
+<br/>Download date: 04 Aug 2017
+<br/><b>UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl</b></td><td>('49776777', 'Alvarez Lopez', 'alvarez lopez')</td><td></td></tr><tr><td>89f4bcbfeb29966ab969682eae235066a89fc151</td><td>A Comparison of Photometric Normalisation Algorithms for Face Verification
+<br/>Centre for Vision, Speech and Signal Processing
+<br/><b>University of Surrey</b><br/>Guildford, Surrey, GU2 7XH, UK
+</td><td>('39213687', 'James Short', 'james short')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('2173900', 'Kieron Messer', 'kieron messer')</td><td>(cid:0)j.short,j.kittler,k.messer(cid:1)@eim.surrey.ac.uk
+</td></tr><tr><td>892c911ca68f5b4bad59cde7eeb6c738ec6c4586</td><td>RESEARCH ARTICLE
+<br/>The Ryerson Audio-Visual Database of
+<br/>Emotional Speech and Song (RAVDESS): A
+<br/>dynamic, multimodal set of facial and vocal
+<br/>expressions in North American English
+<br/><b>Ryerson University, Toronto, Canada</b><br/><b>Information Systems, University of Wisconsin-River Falls, Wisconsin, WI, United States of America</b></td><td>('2940438', 'Frank A. Russo', 'frank a. russo')</td><td>* steven.livingstone@uwrf.edu
+</td></tr><tr><td>8913a5b7ed91c5f6dec95349fbc6919deee4fc75</td><td>BigBIRD: A Large-Scale 3D Database of Object Instances
+</td><td>('37248999', 'Arjun Singh', 'arjun singh')<br/>('1905626', 'James Sha', 'james sha')<br/>('39537097', 'Karthik S. Narayan', 'karthik s. narayan')<br/>('2461427', 'Tudor Achim', 'tudor achim')<br/>('1689992', 'Pieter Abbeel', 'pieter abbeel')</td><td></td></tr><tr><td>8986585975c0090e9ad97bec2ba6c4b437419dae</td><td>Unsupervised Hard Example Mining from
+<br/>Videos for Improved Object Detection
+<br/><b>College of Information and Computer Sciences, University of Massachusetts, Amherst</b><br/>{souyoungjin,arunirc,hzjiang,ashishsingh,
+</td><td>('24525313', 'SouYoung Jin', 'souyoung jin')<br/>('2895705', 'Aruni RoyChowdhury', 'aruni roychowdhury')<br/>('40175280', 'Huaizu Jiang', 'huaizu jiang')<br/>('1785936', 'Ashish Singh', 'ashish singh')<br/>('39087749', 'Aditya Prasad', 'aditya prasad')<br/>('32315404', 'Deep Chakraborty', 'deep chakraborty')</td><td>aprasad,dchakraborty,elm}@cs.umass.edu
+</td></tr><tr><td>89cabb60aa369486a1ebe586dbe09e3557615ef8</td><td>Bayesian Networks as Generative
+<br/>Models for Face Recognition
+<br/><b>IDIAP RESEARCH INSTITUTE</b><br/>´ECOLE POLYTECHNIQUE F´ED´ERALE DE LAUSANNE
+<br/>supervised by:
+<br/>Dr. S. Marcel
+<br/>Prof. H. Bourlard
+<br/>2009
+</td><td>('16602458', 'Guillaume Heusch', 'guillaume heusch')</td><td></td></tr><tr><td>89d3a57f663976a9ac5e9cdad01267c1fc1a7e06</td><td>Neural Class-Specific Regression for face
+<br/>verification
+</td><td>('38813382', 'Guanqun Cao', 'guanqun cao')<br/>('9219875', 'Moncef Gabbouj', 'moncef gabbouj')</td><td></td></tr><tr><td>8983485996d5d9d162e70d66399047c5d01ac451</td><td>Deep Feature-based Face Detection on Mobile Devices
+<br/><b>Center for Automation Research, University of Maryland, College Park, MD</b><br/><b>Rutgers University, Piscataway, NJ</b></td><td>('40599829', 'Sayantan Sarkar', 'sayantan sarkar')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{ssarkar2, rama}@umiacs.umd.edu
+<br/>vishal.m.patel@rutgers.edu
+</td></tr><tr><td>89bc311df99ad0127383a9149d1684dfd8a5aa34</td><td>Towards ontology driven learning of
+<br/>visual concept detectors
+<br/><b>Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA</b></td><td>('3407640', 'Sanchit Arora', 'sanchit arora')<br/>('21781318', 'Chuck Cho', 'chuck cho')<br/>('1810102', 'Paul Fitzpatrick', 'paul fitzpatrick')</td><td></td></tr><tr><td>8981be3a69cd522b4e57e9914bf19f034d4b530c</td><td>Fast Automatic Video Retrieval using Web Images
+<br/><b>Center For Automation Research, University of Maryland, College Park</b></td><td>('2257769', 'Xintong Han', 'xintong han')<br/>('47679939', 'Bharat Singh', 'bharat singh')<br/>('2852035', 'Vlad I. Morariu', 'vlad i. morariu')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{xintong,bharat,morariu,lsd}@umiacs.umd.edu
+</td></tr><tr><td>898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c</td><td>Dynamic vs. Static Recognition of Facial
+<br/>Expressions
+<br/>No Author Given
+<br/><b>No Institute Given</b></td><td></td><td></td></tr><tr><td>89d7cc9bbcd2fdc4f4434d153ecb83764242227b</td><td>(IJERA) ISSN: 2248-9622 www.ijera.com
+<br/>Vol. 3, Issue 2, March -April 2013, pp.351-355
+<br/>Face-Name Graph Matching For The Personalities In Movie
+<br/>Screen
+<br/><b>VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College</b><br/><b>Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai</b><br/>Chennai.)
+</td><td></td><td></td></tr><tr><td>896f4d87257abd0f628c1ffbbfdac38c86a56f50</td><td>Action and Gesture Temporal Spotting with
+<br/>Super Vector Representation
+<br/><b>Southwest Jiaotong University, Chengdu, China</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS</b></td><td>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('2985266', 'Zhuowei Cai', 'zhuowei cai')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>891b10c4b3b92ca30c9b93170ec9abd71f6099c4</td><td>Facial landmark detection using structured output deep
+<br/>neural networks
+<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>September 24, 2015
+</td><td>('49529671', 'Adam', 'adam')</td><td></td></tr><tr><td>451b6409565a5ad18ea49b063561a2645fa4281b</td><td>Action Sets: Weakly Supervised Action Segmentation without Ordering
+<br/>Constraints
+<br/><b>University of Bonn, Germany</b></td><td>('32774629', 'Alexander Richard', 'alexander richard')<br/>('51267303', 'Hilde Kuehne', 'hilde kuehne')<br/>('2946643', 'Juergen Gall', 'juergen gall')</td><td>{richard,kuehne,gall}@iai.uni-bonn.de
+</td></tr><tr><td>45c340c8e79077a5340387cfff8ed7615efa20fd</td><td></td><td></td><td></td></tr><tr><td>455204fa201e9936b42756d362f62700597874c4</td><td>A REGION BASED METHODOLOGY FOR FACIAL
+<br/>EXPRESSION RECOGNITION
+<br/><b>Medical School, University of Ioannina, Ioannina, Greece</b><br/>Unit of Medical Technology and Intelligent Information Systems, Dept. of Computer Science
+<br/><b>University of Ioannina, Ioannina, Greece</b><br/>Keywords:
+<br/>Facial expression recognition, Gabor filters, filter bank, artificial neural networks, Japanese Female Facial
+<br/>Expression Database (JAFFE).
+</td><td>('2059518', 'Anastasios C. Koutlas', 'anastasios c. koutlas')<br/>('1692818', 'Dimitrios I. Fotiadis', 'dimitrios i. fotiadis')</td><td>me01697@cc.uoi.gr
+<br/>fotiadis@cs.uoi.gr
+</td></tr><tr><td>4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6</td><td>The role of structural facial asymmetry in asymmetry of
+<br/>peak facial expressions
+<br/>Karen L. Schmidt
+<br/><b>University of Pittsburgh, PA, USA</b><br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b><br/>Jeffrey F. Cohn
+<br/><b>University of Pittsburgh, PA, USA</b><br/>joy, anger, and disgust expressions,
+<br/>Asymmetric facial expression is generally attributed to asymmetry in movement,
+<br/>but structural asymmetry in the face may also affect asymmetry of expression.
+<br/>Asymmetry in posed expressions was measured using image-based approaches in
+<br/>digitised sequences of facial expression in 55 individuals, N/16 men, N/39
+<br/>women. Structural asymmetry (at neutral expression) was higher in men than
+<br/>women and accounted for .54, .62, and .66 of the variance in asymmetry at peak
+<br/>expression for
+<br/>respectively. Movement
+<br/>asymmetry (measured by change in pixel values over time) was found, but was
+<br/>unrelated to peak asymmetry in joy or anger expressions over the whole face and in
+<br/>facial subregions relevant to the expression. Movement asymmetry was negatively
+<br/>related to peak asymmetry in disgust expressions. Sidedness of movement
+<br/>asymmetry (defined as the ratio of summed movement on the left to movement
+<br/>on the right) was consistent across emotions within individuals. Sidedness was
+<br/>found only for joy expressions, which had significantly more movement on the left.
+<br/>The significant role of structural asymmetry in asymmetry of emotion expression
+<br/>and the exploration of facial expression asymmetry have important implications for
+<br/>evolutionary interpretations of facial signalling and facial expressions in general.
+<br/><b>Address correspondence to: Karen L. Schmidt, University of</b><br/>This study is part of a larger programme of research that is ongoing in the Department of
+<br/><b>Psychiatry at the University of Pittsburgh</b><br/><b>Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part</b><br/><b>by grants from the National Institute of Mental Health (MH 15279 and MH067976 (K. Schmidt</b><br/>and MH51435 (J. Cohn). Additional support for this project was received from Office of Naval
+<br/>Research (HID 29-203). The authors acknowledge the contribution of Rebecca McNutt to this
+<br/>article. A preliminary version of these results was presented at the Tenth Annual Conference: Facial
+<br/>Measurement and Meaning in Rimini, Italy, September 2003.
+<br/># 2006 Psychology Press, an imprint of the Taylor & Francis Group, an informa business
+<br/>DOI: 10.1080/13576500600832758
+</td><td>('1689241', 'Yanxi Liu', 'yanxi liu')</td><td>Pittsburgh, 121 University Place, Pittsburgh PA 15217, USA. E-mail: kschmidt@pitt.edu
+</td></tr><tr><td>4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec</td><td>Disentangling Features in 3D Face Shapes
+<br/>for Joint Face Reconstruction and Recognition∗
+<br/><b>College of Computer Science, Sichuan University</b><br/><b>Michigan State University</b></td><td>('1734409', 'Feng Liu', 'feng liu')<br/>('1778454', 'Ronghang Zhu', 'ronghang zhu')<br/>('39422721', 'Dan Zeng', 'dan zeng')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')<br/>('38284381', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>459960be65dd04317dd325af5b7cbb883d822ee4</td><td>The Meme Quiz: A Facial Expression Game Combining
+<br/>Human Agency and Machine Involvement
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Washington</b></td><td>('3059933', 'Kathleen Tuite', 'kathleen tuite')</td><td>{ktuite,kemelmi}@cs.washington.edu
+</td></tr><tr><td>45f858f9e8d7713f60f52618e54089ba68dfcd6d</td><td>What Actions are Needed for Understanding Human Actions in Videos?
+<br/><b>Carnegie Mellon University</b><br/>github.com/gsig/actions-for-actions
+</td><td>('34280810', 'Gunnar A. Sigurdsson', 'gunnar a. sigurdsson')</td><td></td></tr><tr><td>45e7ddd5248977ba8ec61be111db912a4387d62f</td><td>CHEN ET AL.: ADVERSARIAL POSENET
+<br/>Adversarial Learning of Structure-Aware Fully
+<br/>Convolutional Networks for Landmark
+<br/>Localization
+</td><td>('50579509', 'Yu Chen', 'yu chen')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('2126047', 'Xiu-Shen Wei', 'xiu-shen wei')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')<br/>('49499405', 'Jian Yang', 'jian yang')</td><td></td></tr><tr><td>45215e330a4251801877070c85c81f42c2da60fb</td><td>Domain Adaptive Dictionary Learning
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park</b><br/><b>Arts Media and Engineering, Arizona State University</b></td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>qiu@cs.umd.edu, {pvishalm, rama}@umiacs.umd.edu, pturaga@asu.edu
+</td></tr><tr><td>457cf73263d80a1a1338dc750ce9a50313745d1d</td><td>Published as a conference paper at ICLR 2017
+<br/>DECOMPOSING MOTION AND CONTENT FOR
+<br/>NATURAL VIDEO SEQUENCE PREDICTION
+<br/><b>University of Michigan, Ann Arbor, USA</b><br/>2Adobe Research, San Jose, CA 95110
+<br/>3POSTECH, Pohang, Korea
+<br/><b>Beihang University, Beijing, China</b><br/>5Google Brain, Mountain View, CA 94043
+</td><td>('2241528', 'Seunghoon Hong', 'seunghoon hong')<br/>('10668384', 'Xunyu Lin', 'xunyu lin')<br/>('1697141', 'Honglak Lee', 'honglak lee')<br/>('1768964', 'Jimei Yang', 'jimei yang')<br/>('1711926', 'Ruben Villegas', 'ruben villegas')</td><td></td></tr><tr><td>4526992d4de4da2c5fae7a5ceaad6b65441adf9d</td><td>System for Medical Mask Detection
+<br/>in the Operating Room Through
+<br/>Facial Attributes
+<br/>A. Nieto-Rodr´ıguez, M. Mucientes(B), and V.M. Brea
+<br/>Center for Research in Information Technologies (CiTIUS),
+<br/><b>University of Santiago de Compostela, Santiago de Compostela, Spain</b></td><td></td><td>{adrian.nietorodriguez,manuel.mucientes,victor.brea}@usc.es
+</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>Fine-grained Evaluation on Face Detection in the Wild
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1716231', 'Bin Yang', 'bin yang')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{yb.derek,yanjjie}@gmail.com
+<br/>{zlei,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>45efd6c2dd4ca19eed38ceeb7c2c5568231451e1</td><td>Comparative Analysis of Statistical Approach
+<br/>for Face Recognition
+<br/><b>CMR Institute of Technology, Hyderabad, (India</b></td><td>('39463904', 'M.Janga Reddy', 'm.janga reddy')</td><td></td></tr><tr><td>45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8</td><td></td><td></td><td></td></tr><tr><td>4560491820e0ee49736aea9b81d57c3939a69e12</td><td>Investigating the Impact of Data Volume and
+<br/>Domain Similarity on Transfer Learning
+<br/>Applications
+<br/>State Farm Insurance, Bloomington IL 61710, USA,
+</td><td>('30492517', 'Michael Bernico', 'michael bernico')<br/>('50024782', 'Yuntao Li', 'yuntao li')<br/>('41092475', 'Dingchao Zhang', 'dingchao zhang')</td><td>michael.bernico.qepz@statefarm.com
+</td></tr><tr><td>4571626d4d71c0d11928eb99a3c8b10955a74afe</td><td>Geometry Guided Adversarial Facial Expression Synthesis
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/>2Center for Research on Intelligent Perception and Computing, CASIA
+<br/>3Center for Excellence in Brain Science and Intelligence Technology, CAS
+</td><td>('3051419', 'Lingxiao Song', 'lingxiao song')<br/>('9702077', 'Zhihe Lu', 'zhihe lu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td></td></tr><tr><td>4534d78f8beb8aad409f7bfcd857ec7f19247715</td><td>Under review as a conference paper at ICLR 2017
+<br/>TRANSFORMATION-BASED MODELS OF VIDEO
+<br/>SEQUENCES
+<br/>Facebook AI Research
+</td><td>('39248118', 'Anitha Kannan', 'anitha kannan')<br/>('3149531', 'Arthur Szlam', 'arthur szlam')<br/>('1687325', 'Du Tran', 'du tran')</td><td>joost@joo.st, {akannan, ranzato, aszlam, trandu, soumith}@fb.com
+</td></tr><tr><td>459e840ec58ef5ffcee60f49a94424eb503e8982</td><td>One-shot Face Recognition by Promoting Underrepresented Classes
+<br/>Microsoft
+<br/>One Microsoft Way, Redmond, Washington, United States
+</td><td>('3133575', 'Yandong Guo', 'yandong guo')<br/>('1684635', 'Lei Zhang', 'lei zhang')</td><td>{yandong.guo, leizhang}@microsoft.com
+</td></tr><tr><td>45fbeed124a8956477dbfc862c758a2ee2681278</td><td></td><td></td><td></td></tr><tr><td>451c42da244edcb1088e3c09d0f14c064ed9077e</td><td>1964
+<br/>© EURASIP, 2011 - ISSN 2076-1465
+<br/>19th European Signal Processing Conference (EUSIPCO 2011)
+<br/>INTRODUCTION
+</td><td></td><td></td></tr><tr><td>4568063b7efb66801e67856b3f572069e774ad33</td><td>Correspondence Driven Adaptation for Human Profile Recognition
+<br/><b>NEC Laboratories America, Inc</b><br/>2Huawei Technologies (USA)
+<br/>Cupertino, CA 95014
+<br/>Santa Clara, CA 95050
+</td><td>('2909406', 'Ming Yang', 'ming yang')<br/>('1682028', 'Shenghuo Zhu', 'shenghuo zhu')<br/>('39157653', 'Fengjun Lv', 'fengjun lv')<br/>('38701713', 'Kai Yu', 'kai yu')</td><td>{myang,zsh,kyu}@sv.nec-labs.com
+<br/>felix.Lv@huawei.com
+</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>Coding Facial Expressions with Gabor Wavelets
+<br/><b>ATR Human Information Processing Research Laboratory</b><br/>2-2 Hikaridai, Seika-cho
+<br/>Soraku-gun, Kyoto 619-02, Japan
+<br/><b>Kyushu University</b></td><td>('34801422', 'Shigeru Akamatsu', 'shigeru akamatsu')<br/>('40533190', 'Miyuki Kamachi', 'miyuki kamachi')<br/>('8365437', 'Jiro Gyoba', 'jiro gyoba')</td><td>mlyons@hip.atr.co.jp
+</td></tr><tr><td>4542273a157bfd4740645a6129d1784d1df775d2</td><td>FaceRipper
+<br/>Automatic Face Indexer and Tagger for Personal
+<br/>Albums and Videos
+<br/>A PROJECT REPORT
+<br/>SUBMITTED IN PARTIAL FULFILMENT OF THE
+<br/>REQUIREMENTS FOR THE DEGREE OF
+<br/>Master of Engineering
+<br/>IN
+<br/>COMPUTER SCIENCE AND ENGINEERING
+<br/>by
+<br/>Computer Science and Automation
+<br/><b>Indian Institute of Science</b><br/>BANGALORE – 560 012
+<br/>July 2007
+</td><td>('2819449', 'Mehul Parsana', 'mehul parsana')</td><td></td></tr><tr><td>4511e09ee26044cb46073a8c2f6e1e0fbabe33e8</td><td></td><td></td><td></td></tr><tr><td>45513d0f2f5c0dac5b61f9ff76c7e46cce62f402</td><td>LEE,GRAUMAN:FACEDISCOVERYWITHSOCIALCONTEXT
+<br/>Face Discovery with Social Context
+<br/>https://webspace.utexas.edu/yl3663/~ylee/
+<br/>http://www.cs.utexas.edu/~grauman/
+<br/><b>University of Texas at Austin</b><br/>Austin, TX, USA
+</td><td>('1883898', 'Yong Jae Lee', 'yong jae lee')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>45e459462a80af03e1bb51a178648c10c4250925</td><td>LCrowdV: Generating Labeled Videos for
+<br/>Simulation-based Crowd Behavior Learning
+<br/><b>The University of North Carolina at Chapel Hill</b></td><td>('3422427', 'Ernest Cheung', 'ernest cheung')<br/>('3422442', 'Tsan Kwong Wong', 'tsan kwong wong')<br/>('2718563', 'Aniket Bera', 'aniket bera')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1699159', 'Dinesh Manocha', 'dinesh manocha')</td><td></td></tr><tr><td>458677de7910a5455283a2be99f776a834449f61</td><td>Face Image Retrieval Using Facial Attributes By
+<br/>K-Means
+<br/>[1]I.Sudha, [2]V.Saradha, [3]M.Tamilselvi, [4]D.Vennila
+<br/>[1]AP, Department of CSE ,[2][3][4] B.Tech(CSE)
+<br/><b>Achariya college of Engineering Technology</b><br/>Puducherry
+</td><td></td><td></td></tr><tr><td>45a6333fc701d14aab19f9e2efd59fe7b0e89fec</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
+<br/>RECOGNITION
+<br/>Luis Anton-Canalis
+<br/>Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+<br/>Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+<br/>Elena Sanchez-Nielsen
+<br/>Departamento de E.I.O. y Computacion
+<br/>38271 Universidad de La Laguna, Spain
+<br/>Keywords:
+<br/>Image understanding, Gesture recognition, Hand dataset.
+</td><td></td><td></td></tr><tr><td>450c6a57f19f5aa45626bb08d7d5d6acdb863b4b</td><td>Towards Interpretable Face Recognition
+<br/><b>Michigan State University</b><br/>2 Adobe Inc.
+<br/>3 Aibee
+</td><td>('32032812', 'Bangjie Yin', 'bangjie yin')<br/>('1849929', 'Luan Tran', 'luan tran')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1720987', 'Xiaohui Shen', 'xiaohui shen')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{yinbangj, tranluan, liuxm}@msu.edu, xshen@adobe.com, lhxustcer@gmail.com
+</td></tr><tr><td>1f9b2f70c24a567207752989c5bd4907442a9d0f</td><td>Deep Representations to Model User ‘Likes’
+<br/><b>School of Computer Engineering, Nanyang Technological University, Singapore</b><br/><b>Institute for Infocomm Research, Singapore</b><br/><b>QCIS, University of Technology, Sydney</b></td><td>('2731733', 'Sharath Chandra Guntuku', 'sharath chandra guntuku')<br/>('10638646', 'Joey Tianyi Zhou', 'joey tianyi zhou')<br/>('1872875', 'Sujoy Roy', 'sujoy roy')<br/>('1807998', 'Ivor W. Tsang', 'ivor w. tsang')</td><td>sharathc001@e.ntu.edu.sg, tzhou1@ntu.edu.sg, wslin@ntu.edu.sg
+<br/>sujoy@i2r.a-star.edu.sg
+<br/>ivor.tsang@uts.edu.au
+</td></tr><tr><td>1fe1bd6b760e3059fff73d53a57ce3a6079adea1</td><td>SINGH ET AL.: SCALING BAG-OF-VISUAL-WORDS GENERATION
+<br/>Fast-BoW: Scaling Bag-of-Visual-Words
+<br/>Generation
+<br/>Visual Learning & Intelligence Group
+<br/>Department of Computer Science and
+<br/>Engineering
+<br/><b>Indian Institute of Technology</b><br/>Hyderabad
+<br/>Kandi, Sangareddy, Telangana, India
+</td><td>('40624178', 'Dinesh Singh', 'dinesh singh')<br/>('51292354', 'Abhijeet Bhure', 'abhijeet bhure')<br/>('51305895', 'Sumit Mamtani', 'sumit mamtani')<br/>('34358756', 'C. Krishna Mohan', 'c. krishna mohan')</td><td>cs14resch11003@iith.ac.in
+<br/>cs15btech11001@iith.ac.in
+<br/>cs15btech11022@iith.ac.in
+<br/>ckm@iith.ac.in
+</td></tr><tr><td>1f05473c587e2a3b587f51eb808695a1c10bc153</td><td>Towards Good Practices for Very Deep Two-Stream ConvNets
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>{07wanglimin,bitxiong,buptwangzhe2012}@gmail.com, yu.qiao@siat.ac.cn
+</td></tr><tr><td>1fa3948af1c338f9ae200038c45adadd2b39a3e4</td><td>Computational Explorations of Split Architecture in Modeling Face and Object
+<br/>Recognition
+<br/><b>University of California San Diego</b><br/>9500 Gilman Drive #0404, La Jolla, CA 92093, USA
+<br/><b>University of California San Diego</b><br/>9500 Gilman Drive #0515, La Jolla, CA 92093, USA
+</td><td></td><td>Janet Hui-wen Hsiao (jhsiao@cs.ucsd.edu)
+<br/>Garrison W. Cottrell (gary@ucsd.edu)
+<br/>Danke Shieh (danke@ucsd.edu)
+</td></tr><tr><td>1ffe20eb32dbc4fa85ac7844178937bba97f4bf0</td><td>Face Clustering: Representation and Pairwise
+<br/>Constraints
+</td><td>('9644181', 'Yichun Shi', 'yichun shi')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>1f8304f4b51033d2671147b33bb4e51b9a1e16fe</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Beyond Trees:
+<br/>MAP Inference in MRFs via Outer-Planar Decomposition
+<br/>Received: date / Accepted: date
+</td><td>('1746610', 'Dhruv Batra', 'dhruv batra')</td><td></td></tr><tr><td>1f89439524e87a6514f4fbe7ed34bda4fd1ce286</td><td><b>Carnegie Mellon University</b><br/>Department of Statistics
+<br/><b>Dietrich College of Humanities and Social Sciences</b><br/>9-2005
+<br/>Devising Face Authentication System and
+<br/>Performance Evaluation Based on Statistical
+<br/>Models
+<br/><b>Carnegie Mellon University</b><br/>Follow this and additional works at: http://repository.cmu.edu/statistics
+<br/>Part of the Statistics and Probability Commons
+</td><td>('2046854', 'Sinjini Mitra', 'sinjini mitra')<br/>('1680307', 'Anthony Brockwell', 'anthony brockwell')<br/>('1794486', 'Marios Savvides', 'marios savvides')<br/>('1684961', 'Stephen E. Fienberg', 'stephen e. fienberg')</td><td>Research Showcase @ CMU
+<br/>Carnegie Mellon University, abrock@stat.cmu.edu
+<br/>Carnegie Mellon University, msavvid@cs.cmu.edu
+<br/>Carnegie Mellon University, fienberg@stat.cmu.edu
+<br/>This Technical Report is brought to you for free and open access by the Dietrich College of Humanities and Social Sciences at Research Showcase @
+<br/>CMU. It has been accepted for inclusion in Department of Statistics by an authorized administrator of Research Showcase @ CMU. For more
+<br/>information, please contact research-showcase@andrew.cmu.edu.
+</td></tr><tr><td>1f9ae272bb4151817866511bd970bffb22981a49</td><td>An Iterative Regression Approach for Face Pose Estima-
+<br/>tion from RGB Images
+<br/>This paper presents a iterative optimization method, explicit shape regression, for face pose
+<br/>detection and localization. The regression function is learnt to find out the entire facial shape
+<br/>and minimize the alignment errors. A cascaded learning framework is employed to enhance
+<br/>shape constraint during detection. A combination of a two-level boosted regression, shape
+<br/>performance. In this paper, we have explain the advantage of ESR for deformable object like
+<br/>face pose estimation and reveal its generic applications of the method. In the experiment,
+<br/>we compare the results with different work and demonstrate the accuracy and robustness in
+<br/>different scenarios.
+<br/>Introduction
+<br/>Pose estimation is an important problem in computer vision, and has enabled many practical ap-
+<br/>plication from face expression 1 to activity tracking 2. Researchers design a new algorithm called
+<br/>explicit shape regression (ESR) to find out face alignment from a picture 3. Figure 1 shows how
+<br/>the system uses ESR to learn a shape of a human face image. A simple way to identify a face is to
+<br/>find out facial landmarks like eyes, nose, mouth and chin. The researchers define a face shape S
+<br/>and S is composed of Nf p facial landmarks. Therefore, they get S = [x1, y1, ..., xNf p, yNf p]T . The
+<br/>objective of the researchers is to estimate a shape S of a face image. The way to know the accuracy
+</td><td>('3988780', 'Wenye He', 'wenye he')</td><td></td></tr><tr><td>1fd6004345245daf101c98935387e6ef651cbb55</td><td>Learning Symmetry Features for Face Detection
+<br/>Based on Sparse Group Lasso
+<br/>Center for Research on Intelligent Perception and Computing,
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation</b><br/>Chinese Academy of Sciences, Beijing, China
+</td><td>('39763795', 'Qi Li', 'qi li')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')<br/>('1705643', 'Ran He', 'ran he')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td>{qli,znsun,rhe,tnt}@nlpr.ia.ac.cn
+</td></tr><tr><td>1fc249ec69b3e23856b42a4e591c59ac60d77118</td><td>Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+<br/>Computational Biomedicine Lab
+<br/>4800 Calhoun Rd. Houston, TX, USA
+</td><td>('5084124', 'Xiang Xu', 'xiang xu')<br/>('26401746', 'Ha A. Le', 'ha a. le')<br/>('39634395', 'Pengfei Dou', 'pengfei dou')<br/>('2461369', 'Yuhang Wu', 'yuhang wu')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>{xxu18, hale4, pdou, ywu35, ikakadia}@central.uh.edu
+</td></tr><tr><td>1fbde67e87890e5d45864e66edb86136fbdbe20e</td><td>The Action Similarity Labeling Challenge
+</td><td>('3294355', 'Orit Kliper-Gross', 'orit kliper-gross')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6</td><td>Demographic Estimation from Face Images:
+<br/>Human vs. Machine Performance
+</td><td>('34393045', 'Hu Han', 'hu han')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0</td><td></td><td></td><td></td></tr><tr><td>1fe59275142844ce3ade9e2aed900378dd025880</td><td>Facial Landmark Detection via Progressive Initialization
+<br/><b>National University of Singapore</b><br/>Singapore 117576
+</td><td>('3124720', 'Shengtao Xiao', 'shengtao xiao')</td><td>xiao shengtao@u.nus.edu, eleyans@nus.edu.sg, ashraf@nus.edu.sg
+</td></tr><tr><td>1f2d12531a1421bafafe71b3ad53cb080917b1a7</td><td></td><td></td><td></td></tr><tr><td>1fe121925668743762ce9f6e157081e087171f4c</td><td>Unsupervised Learning of Overcomplete Face Descriptors
+<br/>Center for Machine Vision Research
+<br/><b>University of Oulu</b></td><td>('32683737', 'Juha Ylioinas', 'juha ylioinas')<br/>('1776374', 'Juho Kannala', 'juho kannala')<br/>('1751372', 'Abdenour Hadid', 'abdenour hadid')</td><td>firstname.lastname@ee.oulu.fi
+</td></tr><tr><td>1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d</td><td>vExplorer: A Search Method to Find Relevant YouTube Videos for Health
+<br/>Researchers
+<br/>IBM Research, Cambridge, MA, USA
+</td><td>('1764750', 'Hillol Sarker', 'hillol sarker')<br/>('3456866', 'Murtaza Dhuliawala', 'murtaza dhuliawala')<br/>('31633051', 'Nicholas Fay', 'nicholas fay')<br/>('15793829', 'Amar Das', 'amar das')</td><td></td></tr><tr><td>1fdeba9c4064b449231eac95e610f3288801fd3e</td><td>Fine-Grained Head Pose Estimation Without Keypoints
+<br/><b>Georgia Institute of Technology</b></td><td>('31601235', 'Nataniel Ruiz', 'nataniel ruiz')<br/>('39832600', 'Eunji Chong', 'eunji chong')<br/>('1692956', 'James M. Rehg', 'james m. rehg')</td><td>{nataniel.ruiz, eunjichong, rehg}@gatech.edu
+</td></tr><tr><td>1f8e44593eb335c2253d0f22f7f9dc1025af8c0d</td><td>Fine-tuning regression forests votes for object alignment in the wild.
+<br/>Yang, H; Patras, I
+<br/>© 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/><b>obtained for all other uses, in any current or future media, including reprinting/republishing</b><br/>this material for advertising or promotional purposes, creating new collective works, for resale
+<br/>or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+<br/>other works.
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/22607
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td><td></td><td>more information contact scholarlycommunications@qmul.ac.uk
+</td></tr><tr><td>1f94734847c15fa1da68d4222973950d6b683c9e</td><td>Embedding Label Structures for Fine-Grained Feature Representation
+<br/>UNC Charlotte
+<br/>Charlotte, NC 28223
+<br/>NEC Lab America
+<br/>Cupertino, CA 95014
+<br/>NEC Lab America
+<br/>Cupertino, CA 95014
+<br/>UNC Charlotte
+<br/>Charlotte, NC 28223
+</td><td>('2739998', 'Xiaofan Zhang', 'xiaofan zhang')<br/>('1757386', 'Feng Zhou', 'feng zhou')<br/>('1695082', 'Yuanqing Lin', 'yuanqing lin')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')</td><td>xzhang35@uncc.edu
+<br/>feng@nec-labs.com
+<br/>ylin@nec-labs.com
+<br/>szhang16@uncc.edu
+</td></tr><tr><td>1f745215cda3a9f00a65166bd744e4ec35644b02</td><td>Facial Cosmetics Database and Impact Analysis on
+<br/>Automatic Face Recognition
+<br/># Computer Science Department, TU Muenchen
+<br/>Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
+<br/>∗ Multimedia Communications Department, EURECOM
+<br/>450 Route des Chappes, 06410 Biot, France
+</td><td>('38996894', 'Marie-Lena Eckert', 'marie-lena eckert')<br/>('1862703', 'Neslihan Kose', 'neslihan kose')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>1 marie-lena.eckert@mytum.de
+<br/>2 kose@eurecom.fr
+<br/>3 jld@eurecom.fr
+</td></tr><tr><td>1fff309330f85146134e49e0022ac61ac60506a9</td><td>Data-Driven Sparse Sensor Placement for Reconstruction
+</td><td>('37119658', 'Krithika Manohar', 'krithika manohar')<br/>('1824880', 'Bingni W. Brunton', 'bingni w. brunton')<br/>('1937069', 'J. Nathan Kutz', 'j. nathan kutz')<br/>('3083169', 'Steven L. Brunton', 'steven l. brunton')</td><td>∗Corresponding author: kmanohar@uw.edu
+</td></tr><tr><td>1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c</td><td><b>ARISTOTLE UNIVERSITY OF THESSALONIKI</b><br/>FACULTY OF SCIENCES
+<br/>DEPARTMENT OF INFORMATICS
+<br/>POSTGRADUATE STUDIES PROGRAMME
+<br/>Age interval and gender prediction using PARAFAC2 on
+<br/>speech recordings and face images
+<br/>Supervisor: Professor Kotropoulos Constantine
+<br/>A thesis submitted in partial fulfillment of the requirements
+<br/>for the degree of Master of Science
+<br/>July 2016
+</td><td></td><td></td></tr><tr><td>1f24cef78d1de5aa1eefaf344244dcd1972797e8</td><td>Outlier-Robust Tensor PCA
+<br/><b>National University of Singapore, Singapore</b></td><td>('33481412', 'Pan Zhou', 'pan zhou')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')</td><td>pzhou@u.nus.edu
+<br/>elefjia@nus.edu.sg
+</td></tr><tr><td>1fe990ca6df273de10583860933d106298655ec8</td><td><b>College of Information Science and Engineering</b><br/><b>Hunan University</b><br/>Changsha, 410082 P.R. China
+<br/>In this paper, we propose a wavelet-based illumination normalization method for
+<br/>face recognition against different directions and strength of light. Here, by one-level
+<br/>discrete wavelet transform, a given face image is first decomposed into low frequency
+<br/>and high frequency components, respectively, and then the two components are pro-
+<br/>cessed separately through contrast enhancement to eliminate the effect of illumination
+<br/>variations and enhance the detailed edge information. Finally the normalized image is
+<br/>obtained through the inverse discrete wavelet transform. Experimental results on the
+<br/>Yale B, the extended Yale B and CMU PIE face databases show that the proposed
+<br/>method can effectively reduce the effect of illumination variations on face recognition.
+<br/>Keywords: face recognition, illumination normalization, discrete wavelet transform, edge
+<br/>enhancement, face representation
+<br/>1. INTRODUCTION
+<br/>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 31, 1711-1731 (2015)
+<br/>A Wavelet-Based Image Preprocessing Method
+<br/>for Illumination Insensitive Face Recognition
+<br/>Face recognition plays an important role in pattern recognition and computer vision
+<br/>due to its wide applications in human computer interaction, information security and
+<br/>access control, law enforcement and entertainment [1]. Various methods have been pro-
+<br/>posed for face recognition, such as PCA [2], LDA [3], LFA [4], EBGM [5], probabilistic
+<br/>and Bayesian matching [6] and SVM [7]. These methods can yield good performance
+<br/>when face images are well frontally illuminated. Existing studies have proved that face
+<br/>recognition for the same face with different illumination conditions is more difficult than
+<br/>the perception of face identity [8, 9]. The reason is that an object's appearance largely
+<br/>depends on the way in which it is viewed. Illumination variations mainly consist of the
+<br/>lighting direction and the lighting intensity. Usually, slight changes in illumination pro-
+<br/>duce dramatical changes in the face appearance. So, the performance of face recognition
+<br/>is highly sensitive to the illumination condition. For example, the unsuitable lighting
+<br/>direction and intensity may lead to underexposed or overexposed regions over the face,
+<br/>and weaken the discrimination of face features such as skin texture, eye detail, etc.
+<br/>Therefore, illumination normalization is a very important task for face recognition under
+<br/>varying illumination.
+<br/>To make face recognition relatively insensitive to illumination variations, many
+<br/>methods have been proposed with the goal of illumination normalization, illumination-
+<br/>invariant feature extraction or illumination variation modeling [10]. Illumination-inva-
+<br/>riant approaches generally fall into three classes. The first class is to preprocess face
+<br/>images by using some simply techniques, such as logarithm transform and histogram
+<br/>Received March 26, 2014; revised May 26, 2014; accepted July 17, 2014.
+<br/>Communicated by Chung-Lin Huang.
+<br/>1711
+</td><td>('2078993', 'Xiaochao Zhao', 'xiaochao zhao')<br/>('2138422', 'Yaping Lin', 'yaping lin')<br/>('2431083', 'Bo Ou', 'bo ou')<br/>('1824216', 'Junfeng Yang', 'junfeng yang')</td><td>E-mail: {s12103017; yplin; oubo; B12100031}@hnu.edu.cn
+</td></tr><tr><td>1feeab271621128fe864e4c64bab9b2e2d0ed1f1</td><td>Article
+<br/>Perception-Link Behavior Model: Supporting
+<br/>a Novel Operator Interface for a Customizable
+<br/>Anthropomorphic Telepresence Robot
+<br/><b>BeingTogether Centre, Institute for Media Innovation, Singapore 637553, Singapore</b><br/><b>Robotic Research Centre, Nanyang Technological University, Singapore 639798, Singapore</b><br/>Received: 15 May 2017; Accepted: 15 July 2017; Published: 20 July 2017
+</td><td>('1768723', 'William Gu', 'william gu')<br/>('9216152', 'Gerald Seet', 'gerald seet')<br/>('1695679', 'Nadia Magnenat-Thalmann', 'nadia magnenat-thalmann')</td><td>mglseet@ntu.edu.sg (G.S.); NADIATHALMANN@ntu.edu.sg (N.M.-T.)
+<br/>* Correspondence: GUYU0007@e.ntu.edu.sg
+</td></tr><tr><td>73b90573d272887a6d835ace89bfaf717747c59b</td><td>Feature Disentangling Machine - A Novel
+<br/>Approach of Feature Selection and Disentangling
+<br/>in Facial Expression Analysis
+<br/><b>University of South Carolina, USA</b><br/><b>Center for Computational Intelligence, Nanyang Technology University, Singapore</b><br/>3 Center for Quantum Computation and Intelligent Systems,
+<br/><b>University of Technology, Australia</b></td><td>('40205868', 'Ping Liu', 'ping liu')<br/>('10638646', 'Joey Tianyi Zhou', 'joey tianyi zhou')<br/>('3091647', 'Zibo Meng', 'zibo meng')<br/>('49107074', 'Shizhong Han', 'shizhong han')<br/>('1686235', 'Yan Tong', 'yan tong')</td><td></td></tr><tr><td>73f467b4358ac1cafb57f58e902c1cab5b15c590</td><td> ISSN 0976 3724 47
+<br/>Combination of Dimensionality Reduction Techniques for Face
+<br/>Image Retrieval: A Review
+<br/><b>M.Tech Scholar, MES College of Engineering, Kuttippuram</b><br/>Kerala
+<br/><b>MES College of Engineering, Kuttippuram</b><br/>Kerala
+</td><td></td><td>fousisadath@gmail.com
+<br/>Jahfar.ali@gmail.com
+</td></tr><tr><td>7323b594d3a8508f809e276aa2d224c4e7ec5a80</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>An Experimental Evaluation of Covariates
+<br/>Effects on Unconstrained Face Verification
+</td><td>('2927406', 'Boyu Lu', 'boyu lu')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>732e8d8f5717f8802426e1b9debc18a8361c1782</td><td>Unimodal Probability Distributions for Deep Ordinal Classification
+</td><td>('12757989', 'Christopher Beckham', 'christopher beckham')</td><td></td></tr><tr><td>73ed64803d6f2c49f01cffef8e6be8fc9b5273b8</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Cooking in the kitchen: Recognizing and Segmenting Human
+<br/>Activities in Videos
+<br/>Received: date / Accepted: date
+</td><td>('51267303', 'Hilde Kuehne', 'hilde kuehne')</td><td></td></tr><tr><td>7306d42ca158d40436cc5167e651d7ebfa6b89c1</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Transductive Zero-Shot Action Recognition by
+<br/>Word-Vector Embedding
+<br/>Received: date / Accepted: date
+</td><td>('47158489', 'Xun Xu', 'xun xu')</td><td></td></tr><tr><td>734cdda4a4de2a635404e4c6b61f1b2edb3f501d</td><td>Tie and Guan EURASIP Journal on Image and Video Processing 2013, 2013:8
+<br/>http://jivp.eurasipjournals.com/content/2013/1/8
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Automatic landmark point detection and tracking
+<br/>for human facial expressions
+</td><td>('1721867', 'Ling Guan', 'ling guan')</td><td></td></tr><tr><td>739d400cb6fb730b894182b29171faaae79e3f01</td><td>A New Regularized Orthogonal Local Fisher Discriminant Analysis for Image
+<br/>Feature Extraction
+<br/>dept. name of organization, name of organization, City, Country
+<br/><b>School of Management Engineering, Henan Institute of Engineering, Zhengzhou 451191, P.R. China</b><br/><b>Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China</b></td><td>('2539310', 'ZHONGFENG WANG', 'zhongfeng wang')<br/>('2539310', 'ZHONGFENG WANG', 'zhongfeng wang')<br/>('1718667', 'Zhan WANG', 'zhan wang')</td><td></td></tr><tr><td>732e4016225280b485c557a119ec50cffb8fee98</td><td>Are all training examples equally valuable?
+<br/><b>Massachusetts Institute of Technology</b><br/>Universitat Oberta de Catalunya
+<br/>Agata Lapedriza
+<br/>Computer Vision Center
+<br/><b>Massachusetts Institute of Technology</b><br/><b>Massachusetts Institute of Technology</b><br/><b>Massachusetts Institute of Technology</b></td><td>('2367683', 'Hamed Pirsiavash', 'hamed pirsiavash')<br/>('3326347', 'Zoya Bylinskii', 'zoya bylinskii')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')</td><td>hpirsiav@mit.edu
+<br/>agata@mit.edu
+<br/>zoya@mit.edu
+<br/>torralba@mit.edu
+</td></tr><tr><td>7373c4a23684e2613f441f2236ed02e3f9942dd4</td><td>This document is downloaded from DR-NTU, Nanyang Technological
+<br/><b>University Library, Singapore</b><br/>Title
+<br/>Feature extraction through binary pattern of phase
+<br/>congruency for facial expression recognition
+<br/>Author(s)
+<br/>Shojaeilangari, Seyedehsamaneh; Yau, Wei-Yun; Li, Jun;
+<br/>Teoh, Eam Khwang
+<br/>Citation
+<br/>Shojaeilangari, S., Yau, W. Y., Li, J., & Teoh, E. K.
+<br/>(2012). Feature extraction through binary pattern of
+<br/>phase congruency for facial expression recognition. 12th
+<br/>International Conference on Control Automation Robotics
+<br/>& Vision (ICARCV), 166-170.
+<br/>Date
+<br/>2012
+<br/>URL
+<br/>http://hdl.handle.net/10220/18012
+<br/>Rights
+<br/>© 2012 IEEE. Personal use of this material is permitted.
+<br/>Permission from IEEE must be obtained for all other
+<br/><b>uses, in any current or future media, including</b><br/>reprinting/republishing this material for advertising or
+<br/>promotional purposes, creating new collective works, for
+<br/>resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works. The
+<br/>published version is available at:
+<br/>[http://dx.doi.org/10.1109/ICARCV.2012.6485152].
+</td><td></td><td></td></tr><tr><td>732686d799d760ccca8ad47b49a8308b1ab381fb</td><td>Running head: TEACHERS’ DIFFERING BEHAVIORS
+<br/>1
+<br/>Graduate School of Psychology
+<br/>RESEARCH MASTER’S PSYCHOLOGY THESıS REPORT
+<br/>
+<br/>Teachers’ differing classroom behaviors:
+<br/>The role of emotional sensitivity and cultural tolerance
+<br/>Research Master’s, Social Psychology
+<br/>Ethics Committee Reference Code: 2016-SP-7084
+</td><td>('7444483', 'Agneta Fischer', 'agneta fischer')<br/>('22253276', 'Disa Sauter', 'disa sauter')<br/>('2808612', 'Monique Volman', 'monique volman')</td><td></td></tr><tr><td>73fbdd57270b9f91f2e24989178e264f2d2eb7ae</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1945
+<br/>ICASSP 2012
+</td><td></td><td></td></tr><tr><td>738a985fba44f9f5acd516e07d0d9578f2ffaa4e</td><td>MACHINE LEARNING TECHNIQUES FOR FACE ANALYSIS
+<br/>Man Machine Interaction Group
+<br/><b>Delft University of Technology</b><br/>Mekelweg 4, 2628 CD Delft
+<br/>The Netherlands
+<br/>from
+<br/>learning, pattern recognition, classifiers, face
+<br/>KEYWORDS
+<br/>Machine
+<br/>detection, facial expression recognition.
+</td><td>('2866326', 'D. Datcu', 'd. datcu')</td><td>E-mail: {D.Datcu, L.J.M.Rothkrantz}@ewi.tudelft.nl
+</td></tr><tr><td>73fd7e74457e0606704c5c3d3462549f1b2de1ad</td><td>Learning Predictable and Discriminative Attributes
+<br/>for Visual Recognition
+<br/><b>School of Software, Tsinghua University, Beijing 100084, China</b></td><td>('34811036', 'Yuchen Guo', 'yuchen guo')<br/>('38329336', 'Guiguang Ding', 'guiguang ding')<br/>('39665252', 'Xiaoming Jin', 'xiaoming jin')<br/>('1751179', 'Jianmin Wang', 'jianmin wang')</td><td>yuchen.w.guo@gmail.com, {dinggg,xmjin,jimwang}@tsinghua.edu.cn,
+</td></tr><tr><td>73c5bab5c664afa96b1c147ff21439135c7d968b</td><td>Whitened LDA for Face Recognition ∗
+<br/>Ubiquitous Computing Lab
+<br/><b>Kyung Hee University</b><br/>Suwon, Korea
+<br/>Ubiquitous Computing Lab
+<br/><b>Kyung Hee University</b><br/>Suwon, Korea
+<br/>Mobile Computing Lab
+<br/><b>SungKyunKwan University</b><br/>Suwon, Korea
+</td><td>('1687579', 'Vo Dinh Minh Nhat', 'vo dinh minh nhat')<br/>('1700806', 'Sungyoung Lee', 'sungyoung lee')<br/>('1718666', 'Hee Yong Youn', 'hee yong youn')</td><td>vdmnhat@oslab.khu.ac.kr
+<br/>sylee@oslab.khu.ac.kr
+<br/>youn@ece.skku.ac.kr
+</td></tr><tr><td>73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c</td><td></td><td></td><td></td></tr><tr><td>877100f430b72c5d60de199603ab5c65f611ce17</td><td>Within-person variability in men’s facial
+<br/>width-to-height ratio
+<br/><b>University of York, York, United Kingdom</b></td><td>('40598264', 'Robin S.S. Kramer', 'robin s.s. kramer')</td><td></td></tr><tr><td>870433ba89d8cab1656e57ac78f1c26f4998edfb</td><td>Regressing Robust and Discriminative 3D Morphable Models
+<br/>with a very Deep Neural Network
+<br/><b>Institute for Robotics and Intelligent Systems, USC, CA, USA</b><br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b></td><td>('1756099', 'Tal Hassner', 'tal hassner')<br/>('11269472', 'Iacopo Masi', 'iacopo masi')</td><td></td></tr><tr><td>872dfdeccf99bbbed7c8f1ea08afb2d713ebe085</td><td>L2-constrained Softmax Loss for Discriminative Face Verification
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</b></td><td>('48467498', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{rranjan1,carlos,rama}@umiacs.umd.edu
+</td></tr><tr><td>87e6cb090aecfc6f03a3b00650a5c5f475dfebe1</td><td>KIM, BALTRUŠAITIS et al.: HOLISTICALLY CONSTRAINED LOCAL MODEL
+<br/>Holistically Constrained Local Model:
+<br/>Going Beyond Frontal Poses for Facial
+<br/>Landmark Detection
+<br/>Tadas Baltrušaitis2
+<br/>Amir Zadeh2
+<br/>Gérard Medioni1
+<br/><b>Institute for Robotics and Intelligent</b><br/>Systems
+<br/><b>University of Southern California</b><br/>Los Angeles, CA, USA
+<br/><b>Language Technologies Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA
+</td><td>('2792633', 'KangGeon Kim', 'kanggeon kim')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>kanggeon.kim@usc.edu
+<br/>tbaltrus@cs.cmu.edu
+<br/>abagherz@cs.cmu.edu
+<br/>morency@cs.cmu.edu
+<br/>medioni@usc.edu
+</td></tr><tr><td>8796f2d54afb0e5c924101f54d469a1d54d5775d</td><td>Journal of Signal and Information Processing, 2012, 3, 45-50
+<br/>http://dx.doi.org/10.4236/jsip.2012.31007 Published Online February 2012 (http://www.SciRP.org/journal/jsip)
+<br/>45
+<br/>Illumination Invariant Face Recognition Using Fuzzy LDA
+<br/>and FFNN
+<br/><b>School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran</b><br/>Received October 20th, 2011; revised November 24th, 2011; accepted December 10th, 2011
+</td><td>('1697559', 'Behzad Bozorgtabar', 'behzad bozorgtabar')<br/>('3280435', 'Hamed Azami', 'hamed azami')<br/>('3097307', 'Farzad Noorian', 'farzad noorian')</td><td>Email: b_bozorgtabar@elec.iust.ac.ir, hmdazami@gmail.com, fnoorian@ee.iust.ac.ir
+</td></tr><tr><td>87f285782d755eb85d8922840e67ed9602cfd6b9</td><td>INCORPORATING BOLTZMANN MACHINE PRIORS
+<br/>FOR SEMANTIC LABELING IN IMAGES AND VIDEOS
+<br/>A Dissertation Presented
+<br/>by
+<br/>ANDREW KAE
+<br/>Submitted to the Graduate School of the
+<br/><b>University of Massachusetts Amherst in partial ful llment</b><br/>of the requirements for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>May 2014
+<br/>Computer Science
+</td><td></td><td></td></tr><tr><td>871f5f1114949e3ddb1bca0982086cc806ce84a8</td><td>Discriminative Learning of Apparel Features
+<br/>1 Computer Vision Laboratory, D-ITET, ETH Z¨urich, Switzerland
+<br/>2 ESAT - PSI / IBBT, K.U. Leuven, Belgium
+</td><td>('2173683', 'Rasmus Rothe', 'rasmus rothe')<br/>('2113583', 'Marko Ristin', 'marko ristin')<br/>('1727791', 'Matthias Dantone', 'matthias dantone')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{rrothe,ristin,mdantone,vangool}@vision.ee.ethz.ch
+<br/>luc.vangool@esat.kuleuven.be
+</td></tr><tr><td>8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f</td><td>Ordered Pooling of Optical Flow Sequences for Action Recognition
+<br/>1Data61/CSIRO, 2 Australian Center for Robotic Vision
+<br/><b>Australian National University, Canberra, Australia</b><br/>Fatih Porikli1,2,3
+</td><td>('48094509', 'Jue Wang', 'jue wang')<br/>('2691929', 'Anoop Cherian', 'anoop cherian')</td><td>jue.wang@anu.edu.au
+<br/>anoop.cherian@anu.edu.au
+<br/>fatih.porikli@anu.edu.au
+</td></tr><tr><td>87bee0e68dfc86b714f0107860d600fffdaf7996</td><td>Automated 3D Face Reconstruction from Multiple Images
+<br/>using Quality Measures
+<br/><b>Institute for Vision and Graphics, University of Siegen, Germany</b></td><td>('2712313', 'Marcel Piotraschke', 'marcel piotraschke')<br/>('2880906', 'Volker Blanz', 'volker blanz')</td><td>piotraschke@nt.uni-siegen.de, blanz@informatik.uni-siegen.de
+</td></tr><tr><td>87309bdb2b9d1fb8916303e3866eca6e3452c27d</td><td>Kernel Coding: General Formulation and Special Cases
+<br/><b>Australian National University, Canberra, ACT 0200, Australia</b><br/>NICTA(cid:63), Locked Bag 8001, Canberra, ACT 2601, Australia
+</td><td>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')</td><td></td></tr><tr><td>878169be6e2c87df2d8a1266e9e37de63b524ae7</td><td>CBMM Memo No. 089
+<br/>May 10, 2018
+<br/>Image interpretation above and below the object level
+</td><td>('2507298', 'Guy Ben-Yosef', 'guy ben-yosef')<br/>('1743045', 'Shimon Ullman', 'shimon ullman')</td><td></td></tr><tr><td>878301453e3d5cb1a1f7828002ea00f59cbeab06</td><td>Faceness-Net: Face Detection through
+<br/>Deep Facial Part Responses
+</td><td>('1692609', 'Shuo Yang', 'shuo yang')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>87e592ee1a7e2d34e6b115da08700a1ae02e9355</td><td>Deep Pictorial Gaze Estimation
+<br/>AIT Lab, Department of Computer Science, ETH Zurich
+</td><td>('20466488', 'Seonwook Park', 'seonwook park')<br/>('21195502', 'Adrian Spurr', 'adrian spurr')<br/>('2531379', 'Otmar Hilliges', 'otmar hilliges')</td><td>{firstname.lastname}@inf.ethz.ch
+</td></tr><tr><td>87147418f863e3d8ff8c97db0b42695a1c28195b</td><td>Attributes for Improved Attributes: A
+<br/>Multi-Task Network for Attribute Classification
+<br/><b>University of Maryland, College Park</b></td><td>('3351637', 'Emily M. Hand', 'emily m. hand')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5</td><td>SPATIO-TEMPORAL MAXIMUM AVERAGE CORRELATION
+<br/>HEIGHT TEMPLATES IN ACTION RECOGNITION AND VIDEO
+<br/>SUMMARIZATION
+<br/>by
+<br/><b>B.A. Earlham College, Richmond Indiana</b><br/><b>M.S. University of Central Florida</b><br/>A dissertation submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy
+<br/>in the School of Electrical Engineering and Computer Science
+<br/><b>in the College of Engineering and Computer Science</b><br/><b>at the University of Central Florida</b><br/>Orlando, Florida
+<br/>Summer Term
+<br/>2010
+<br/>Major Professor: Mubarak Shah
+</td><td>('35188194', 'MIKEL RODRIGUEZ', 'mikel rodriguez')</td><td></td></tr><tr><td>87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd</td><td>Craniofacial Image Analysis
+</td><td>('1935115', 'Ezgi Mercan', 'ezgi mercan')<br/>('1771661', 'Indriyati Atmosukarto', 'indriyati atmosukarto')<br/>('10423763', 'Jia Wu', 'jia wu')<br/>('1744684', 'Shu Liang', 'shu liang')<br/>('1809809', 'Linda G. Shapiro', 'linda g. shapiro')</td><td></td></tr><tr><td>8006219efb6ab76754616b0e8b7778dcfb46603d</td><td>CONTRIBUTIONSTOLARGE-SCALELEARNINGFORIMAGECLASSIFICATIONZeynepAkataPhDThesisl’´EcoleDoctoraleMath´ematiques,SciencesetTechnologiesdel’Information,InformatiquedeGrenoble </td><td></td><td></td></tr><tr><td>80193dd633513c2d756c3f568ffa0ebc1bb5213e</td><td></td><td></td><td></td></tr><tr><td>808b685d09912cbef4a009e74e10476304b4cccf</td><td>From Understanding to Controlling Privacy
+<br/>against Automatic Person Recognition in Social Media
+<br/><b>Max Planck Institute for Informatics, Germany</b></td><td>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')<br/>('1739548', 'Mario Fritz', 'mario fritz')</td><td>{joon,mfritz,schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>804b4c1b553d9d7bae70d55bf8767c603c1a09e3</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1831
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>800cbbe16be0f7cb921842d54967c9a94eaa2a65</td><td>MULTIMODAL RECOGNITION OF
+<br/>EMOTIONS
+</td><td></td><td></td></tr><tr><td>80135ed7e34ac1dcc7f858f880edc699a920bf53</td><td>EFFICIENT ACTION AND EVENT RECOGNITION IN VIDEOS USING
+<br/>EXTREME LEARNING MACHINES
+<br/>by
+<br/>G¨ul Varol
+<br/><b>B.S., Computer Engineering, Bo gazi ci University</b><br/><b>Submitted to the Institute for Graduate Studies in</b><br/>Science and Engineering in partial fulfillment of
+<br/>the requirements for the degree of
+<br/>Master of Science
+<br/>Graduate Program in Computer Engineering
+<br/><b>Bo gazi ci University</b><br/>2015
+</td><td></td><td></td></tr><tr><td>803c92a3f0815dbf97e30c4ee9450fd005586e1a</td><td>Max-Mahalanobis Linear Discriminant Analysis Networks
+</td><td>('19201674', 'Tianyu Pang', 'tianyu pang')</td><td></td></tr><tr><td>80277fb3a8a981933533cf478245f262652a33b5</td><td>Synergy-based Learning of Facial Identity
+<br/><b>Institute for Computer Graphics and Vision</b><br/><b>Graz University of Technology, Austria</b></td><td>('1791182', 'Peter M. Roth', 'peter m. roth')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{koestinger,pmroth,bischof}@icg.tugraz.at
+</td></tr><tr><td>80840df0802399838fe5725cce829e1b417d7a2e</td><td>Fast Approximate L∞ Minimization: Speeding Up Robust Regression
+<br/><b>School of Computer Science and Technology, Nanjing University of Science and Technology, China</b><br/><b>School of Computer Science, The University of Adelaide, Australia</b></td><td>('2731972', 'Fumin Shen', 'fumin shen')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('26065407', 'Rhys Hill', 'rhys hill')<br/>('5546141', 'Anton van den Hengel', 'anton van den hengel')<br/>('3195119', 'Zhenmin Tang', 'zhenmin tang')</td><td></td></tr><tr><td>80c8d143e7f61761f39baec5b6dfb8faeb814be9</td><td>Local Directional Pattern based Fuzzy Co-
+<br/>occurrence Matrix Features for Face recognition
+<br/>Professor, CSE Dept.
+<br/><b>Gokaraju Rangaraju Institute of Engineering and Technology, Hyd</b></td><td>('39121253', 'P Chandra Sekhar Reddy', 'p chandra sekhar reddy')</td><td></td></tr><tr><td>809ea255d144cff780300440d0f22c96e98abd53</td><td>ArcFace: Additive Angular Margin Loss for Deep Face Recognition
+<br/><b>Imperial College London</b><br/>UK
+<br/>DeepInSight
+<br/>China
+<br/><b>Imperial College London</b><br/>UK
+</td><td>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('3007274', 'Jia Guo', 'jia guo')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>j.deng16@imperial.ac.uk
+<br/>guojia@gmail.com
+<br/>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923</td><td>Research Article
+<br/>Soft Biometrics for a Socially Assistive Robotic
+<br/>Platform
+<br/>Open Access
+</td><td>('2104853', 'Pierluigi Carcagnì', 'pierluigi carcagnì')<br/>('2417460', 'Dario Cazzato', 'dario cazzato')<br/>('33097940', 'Marco Del Coco', 'marco del coco')<br/>('35438199', 'Pier Luigi Mazzeo', 'pier luigi mazzeo')<br/>('4730472', 'Marco Leo', 'marco leo')<br/>('1741861', 'Cosimo Distante', 'cosimo distante')</td><td></td></tr><tr><td>80a6bb337b8fdc17bffb8038f3b1467d01204375</td><td>Proceedings of the International Conference on Computer and Information Science and Technology
+<br/>Ottawa, Ontario, Canada, May 11 – 12, 2015
+<br/>Paper No. 126
+<br/>Subspace LDA Methods for Solving the Small Sample Size
+<br/>Problem in Face Recognition
+<br/><b></b><br/>101 KwanFu Rd., Sec. 2, Hsinchu, Taiwan
+</td><td>('2018515', 'Ching-Ting Huang', 'ching-ting huang')<br/>('1830341', 'Chaur-Chin Chen', 'chaur-chin chen')</td><td>j60626j@gmail.com;cchen@cs.nthu.edu.tw
+</td></tr><tr><td>80be8624771104ff4838dcba9629bacfe6b3ea09</td><td>Simultaneous Feature and Dictionary Learning
+<br/>for Image Set Based Face Recognition
+<br/>1 Advanced Digital Sciences Center, Singapore
+<br/><b>Nanyang Technological University, Singapore</b><br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b><br/><b>University of Illinois at Urbana-Champaign, IL USA</b></td><td>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('39209795', 'Gang Wang', 'gang wang')</td><td></td></tr><tr><td>8000c4f278e9af4d087c0d0895fff7012c5e3d78</td><td>Multi-Task Warped Gaussian Process for Personalized Age Estimation
+<br/><b>Hong Kong University of Science and Technology</b></td><td>('36233573', 'Yu Zhang', 'yu zhang')</td><td>{zhangyu,dyyeung}@cse.ust.hk
+</td></tr><tr><td>80097a879fceff2a9a955bf7613b0d3bfa68dc23</td><td>Active Self-Paced Learning for Cost-Effective and
+<br/>Progressive Face Identification
+</td><td>('1737218', 'Liang Lin', 'liang lin')<br/>('3170394', 'Keze Wang', 'keze wang')<br/>('1803714', 'Deyu Meng', 'deyu meng')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('36685537', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>80bd795930837330e3ced199f5b9b75398336b87</td><td>Relative Forest for Attribute Prediction
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{shaoxin.li, shiguang.shan, xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>74de03923a069ffc0fb79e492ee447299401001f</td><td>On Film Character Retrieval in Feature-Length Films
+<br/>1 Introduction
+<br/>The problem of automatic face recognition (AFR) concerns matching a detected (roughly localized) face
+<br/>against a database of known faces with associated identities. This task, although very intuitive to humans
+<br/>and despite the vast amounts of research behind it, still poses a significant challenge to computer-based
+<br/>methods. For reviews of the literature and commercial state-of-the-art see [5, 31] and [22, 23]. Much AFR
+<br/>research has concentrated on the user authentication paradigm (e.g. [2, 8, 19]). In contrast, we consider the
+<br/>content-based multimedia retrieval setup: our aim is to retrieve, and rank by confidence, film shots based on
+<br/>the presence of specific actors. A query to the system consists of the user choosing the person of interest in
+<br/>one or more keyframes. Possible applications include:
+<br/>1. DVD browsing: Current DVD technology allows users to quickly jump to the chosen part of a film
+<br/>using an on-screen index. However, the available locations are predefined. AFR technology could allow
+<br/>the user to rapidly browse scenes by formulating queries based on the presence of specific actors.
+<br/>2. Content-based web search: Many web search engines have very popular image search features (e.g.
+<br/>http://www.google.co.uk/imghp). Currently, the search is performed based on the keywords
+<br/>that appear in picture filenames or in the surrounding web page content. Face recognition can make the
+<br/>retrieval much more accurate by focusing on the content of images.
+<br/>We proceed from the face detection stage, assuming localized faces. Face detection technology is fairly
+<br/>mature and a number of reliable face detectors have been built, see [17, 21, 25, 30]. We use a local imple-
+<br/>mentation of the method of Schneiderman and Kanade [25] and consider a face to be correctly detected if
+<br/>both eyes and the mouth are visible, see Figure 1. In a typical feature-length film, using every 10th frame,
+<br/>we obtain 2000-5000 face detections which result from a cast of 10-20 primary and secondary characters
+<br/>(see §3).
+<br/>Problem challenges.
+<br/>A number of factors other than identity influence the way a face appears in an image. Lighting conditions,
+<br/><b>and especially light angle, drastically change the appearance of a face [1]. Facial expressions, including</b><br/>closed or partially closed eyes, also complicate the problem, just as head pose does. Partial occlusions, be
+<br/>they artefacts in front of a face or resulting from hair style change, or growing a beard or moustache also
+</td><td>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>1 Department of Engineering, University of Cambridge, UK oa214@cam.ac.uk
+<br/>2 Department of Engineering, University of Oxford, UK az@robots.ox.ac.uk
+</td></tr><tr><td>74f643579949ccd566f2638b85374e7a6857a9fc</td><td>Monogenic Binary Pattern (MBP): A Novel Feature Extraction and
+<br/>Representation Model for Face Recognition
+<br/><b>Biometric Research Center, The Hong Kong Polytechnic University</b><br/>Different from other face recognition methods, LBP
+<br/>methods use local structural information and histogram
+<br/>of sub-regions to extract and describe facial features.
+<br/>Following LBP, LGBPHS [6] was proposed to use
+<br/>Gabor filtering to enhance the facial features and then
+<br/>extract the local Gabor binary pattern histogram
+<br/>sequence, which improves much LBP’s robustness to
+<br/>illumination changes. The Gabor phase was also used
+<br/>to improve the recognition rate [7-8], and a typical
+<br/>method of this class is the HGPP [8], which captures
+<br/>the Global Gabor phase and Local Gabor phase
+<br/>variation. Despite the high accuracy, the expense of
+<br/>the above mentioned Gabor
+<br/>face
+<br/>recognition methods is also very expensive: both the
+<br/>computational cost and the storage space are high
+<br/>because Gabor filtering is usually applied at five
+<br/>different scales and along eight different orientations,
+<br/>which limits the application of these methods.
+<br/>filter based
+<br/>is a
+<br/>signal
+<br/>(HMBP)
+<br/>the MBP
+<br/>to describe
+<br/>two-dimensional
+<br/>This paper presents a new local facial feature
+<br/>extraction method, namely monogenic binary pattern
+<br/>(MBP), based on the theory of monogenic signal
+<br/>analysis [9], and then proposes to use the histogram of
+<br/>features.
+<br/>MBP
+<br/>Monogenic
+<br/>(2D)
+<br/>generalization of the one-dimensional analytic signal,
+<br/>through which
+<br/>the multi-resolution magnitude,
+<br/>orientation and phase of a 2D signal can be estimated.
+<br/>The proposed MBP combines monogenic orientation
+<br/>and monogenic magnitude information for face feature
+<br/>extraction and description. The advantage of MBP
+<br/>over other Gabor based methods [4][6][8] is that it has
+<br/>much lower time and space complexity but with better
+<br/>or comparable performance. This is mainly because
+<br/>monogenic signal analysis
+<br/>itself a compact
+<br/>representation of features with little information loss.
+<br/>It does not use steerable filters to create multi-
+<br/>orientation features like Gabor filters do. HMBP is the
+<br/>sub-region spatial histogram sequence of MBP
+<br/>features, which is robust to face image variation of
+<br/>is
+</td><td>('5828998', 'Meng Yang', 'meng yang')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('40613710', 'Lin Zhang', 'lin zhang')<br/>('1698371', 'David Zhang', 'david zhang')</td><td>E-mail: {csmyang, cslzhang, cslinzhang, csdzhang}@comp.polyu.edu.hk
+</td></tr><tr><td>74ce7e5e677a4925489897665c152a352c49d0a2</td><td>SONG ET AL.: SEGMENTATION-GUIDED IMAGE INPAINTING
+<br/>SPG-Net: Segmentation Prediction and
+<br/>Guidance Network for Image Inpainting
+<br/><b>University of Southern California</b><br/>3740 McClintock Ave
+<br/>Los Angeles, USA
+<br/>2 Baidu Research
+<br/>1195 Bordeaux Dr.,
+<br/>Sunnyvale, USA
+</td><td>('3383051', 'Yuhang Song', 'yuhang song')<br/>('1683340', 'Chao Yang', 'chao yang')<br/>('8035191', 'Yeji Shen', 'yeji shen')<br/>('1722767', 'Peng Wang', 'peng wang')<br/>('38592052', 'Qin Huang', 'qin huang')<br/>('9363144', 'C.-C. Jay Kuo', 'c.-c. jay kuo')</td><td>yuhangso@usc.edu
+<br/>chaoy@usc.edu
+<br/>yejishen@usc.edu
+<br/>wangpeng54@baidu.com
+<br/>qinhuang@usc.edu
+<br/>cckuo@sipi.usc.edu
+</td></tr><tr><td>74408cfd748ad5553cba8ab64e5f83da14875ae8</td><td>Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+<br/>and Evaluation
+</td><td></td><td></td></tr><tr><td>747d5fe667519acea1bee3df5cf94d9d6f874f20</td><td></td><td></td><td></td></tr><tr><td>74dbe6e0486e417a108923295c80551b6d759dbe</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 45– No.11, May 2012
+<br/>An HMM based Model for Prediction of Emotional
+<br/>Composition of a Facial Expression using both
+<br/>Significant and Insignificant Action Units and
+<br/>Associated Gender Differences
+<br/>Department of Management and Information
+<br/>Department of Management and Information
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+</td><td>('2931637', 'Suvashis Das', 'suvashis das')<br/>('1808643', 'Koichi Yamada', 'koichi yamada')</td><td></td></tr><tr><td>740e095a65524d569244947f6eea3aefa3cca526</td><td>Towards Human-like Performance Face Detection: A
+<br/>Convolutional Neural Network Approach
+<br/><b>University of Twente</b><br/>P.O. Box 217, 7500AE Enschede
+<br/>The Netherlands
+</td><td>('2651432', 'Joshua van Kleef', 'joshua van kleef')</td><td>j.a.vankleef-1@student.utwente.nl
+</td></tr><tr><td>74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8</td><td>Context and Subcategories for
+<br/>Sliding Window Object Recognition
+<br/>CMU-RI-TR-12-17
+<br/>Submitted in partial fulfillment of the
+<br/>requirements for the degree of
+<br/>Doctor of Philosophy in Robotics
+<br/><b>The Robotics Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania 15213
+<br/>August 2012
+<br/>Thesis Committee
+<br/>Martial Hebert, Co-Chair
+<br/>Alexei A. Efros, Co-Chair
+<br/>Takeo Kanade
+<br/><b>Deva Ramanan, University of California at Irvine</b></td><td>('2038685', 'Santosh K. Divvala', 'santosh k. divvala')<br/>('2038685', 'Santosh K. Divvala', 'santosh k. divvala')</td><td></td></tr><tr><td>747c25bff37b96def96dc039cc13f8a7f42dbbc7</td><td>EmoNets: Multimodal deep learning approaches for emotion
+<br/>recognition in video
+</td><td>('3127597', 'Samira Ebrahimi Kahou', 'samira ebrahimi kahou')<br/>('1748421', 'Vincent Michalski', 'vincent michalski')<br/>('2488222', 'Nicolas Boulanger-Lewandowski', 'nicolas boulanger-lewandowski')<br/>('1923596', 'David Warde-Farley', 'david warde-farley')<br/>('1751762', 'Yoshua Bengio', 'yoshua bengio')</td><td></td></tr><tr><td>741485741734a99e933dd0302f457158c6842adf</td><td> A Novel Automatic Facial Expression
+<br/>Recognition Method Based on AAM
+<br/><b>State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China</b></td><td>('1703431', 'Li Wang', 'li wang')<br/>('2677485', 'Ruifeng Li', 'ruifeng li')<br/>('1751643', 'Ke Wang', 'ke wang')</td><td>Email: wangli-hb@163.com, lrf100@ hit.edu.cn, wangke@ hit.edu.cn
+</td></tr><tr><td>744fa8062d0ae1a11b79592f0cd3fef133807a03</td><td>Aalborg Universitet
+<br/>Deep Pain
+<br/>Rodriguez, Pau; Cucurull, Guillem; Gonzàlez, Jordi; M. Gonfaus, Josep ; Nasrollahi, Kamal;
+<br/>Moeslund, Thomas B.; Xavier Roca, F.
+<br/>Published in:
+<br/>I E E E Transactions on Cybernetics
+<br/>DOI (link to publication from Publisher):
+<br/>10.1109/TCYB.2017.2662199
+<br/>Publication date:
+<br/>2017
+<br/>Document Version
+<br/>Accepted author manuscript, peer reviewed version
+<br/><b>Link to publication from Aalborg University</b><br/>Citation for published version (APA):
+<br/>Rodriguez, P., Cucurull, G., Gonzàlez, J., M. Gonfaus, J., Nasrollahi, K., Moeslund, T. B., & Xavier Roca, F.
+<br/>(2017). Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification. I E E E
+<br/>Transactions on Cybernetics, 1-11. DOI: 10.1109/TCYB.2017.2662199
+<br/>General rights
+<br/>Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+<br/>and it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+<br/> ? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+<br/> ? You may not further distribute the material or use it for any profit-making activity or commercial gain
+<br/> ? You may freely distribute the URL identifying the publication in the public portal ?
+<br/>Take down policy
+<br/>the work immediately and investigate your claim.
+<br/>Downloaded from vbn.aau.dk on: marts 22, 2018
+<br/> </td><td></td><td>If you believe that this document breaches copyright please contact us at vbn@aub.aau.dk providing details, and we will remove access to
+</td></tr><tr><td>743e582c3e70c6ec07094887ce8dae7248b970ad</td><td>International Journal of Signal Processing, Image Processing and Pattern Recognition
+<br/>Vol.8, No.10 (2015), pp.29-38
+<br/>http://dx.doi.org/10.14257/ijsip.2015.8.10.04
+<br/>Face Recognition based on Deep Neural Network
+<br/><b>Shandong Women s University</b></td><td>('9094473', 'Li Xinhua', 'li xinhua')<br/>('29742002', 'Yu Qian', 'yu qian')</td><td>lixinhua@sdwu.edu.cn
+</td></tr><tr><td>74b0095944c6e29837c208307a67116ebe1231c8</td><td></td><td></td><td></td></tr><tr><td>74156a11c2997517061df5629be78428e1f09cbd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
+<br/>978-1-5090-4846-5/16/$31.00 ©2016 IEEE
+<br/>2784
+</td><td></td><td></td></tr><tr><td>748e72af01ba4ee742df65e9c030cacec88ce506</td><td>Discriminative Regions Selection for Facial Expression
+<br/>Recognition
+<br/><b>MIRACL-FSEG, University of Sfax</b><br/>3018 Sfax, Tunisia
+<br/><b>MIRACL-FS, University of Sfax</b><br/>3018 Sfax, Tunisia
+</td><td>('2049116', 'Hazar Mliki', 'hazar mliki')<br/>('1749733', 'Mohamed Hammami', 'mohamed hammami')</td><td></td></tr><tr><td>745b42050a68a294e9300228e09b5748d2d20b81</td><td></td><td></td><td></td></tr><tr><td>749d605dd12a4af58de1fae6f5ef5e65eb06540e</td><td>Multi-Task Video Captioning with Video and Entailment Generation
+<br/>UNC Chapel Hill
+</td><td>('10721120', 'Ramakanth Pasunuru', 'ramakanth pasunuru')<br/>('7736730', 'Mohit Bansal', 'mohit bansal')</td><td>{ram, mbansal}@cs.unc.edu
+</td></tr><tr><td>749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7</td><td>A Modular Framework to Detect and Analyze Faces for
+<br/>Audience Measurement Systems
+<br/><b>Fraunhofer Institute for Integrated Circuits IIS</b><br/>Department Electronic Imaging
+<br/>Am Wolfsmantel 33, 91058 Erlangen, Germany
+</td><td>('33046373', 'Andreas Ernst', 'andreas ernst')<br/>('27421829', 'Tobias Ruf', 'tobias ruf')</td><td>{andreas.ernst, tobias.ruf, christian.kueblbeck}@iis.fraunhofer.de
+</td></tr><tr><td>74c19438c78a136677a7cb9004c53684a4ae56ff</td><td>RESOUND: Towards Action Recognition
+<br/>without Representation Bias
+<br/>UC San Diego
+</td><td>('48513320', 'Yingwei Li', 'yingwei li')<br/>('47002970', 'Yi Li', 'yi li')<br/>('1699559', 'Nuno Vasconcelos', 'nuno vasconcelos')</td><td>{yil325,yil898,nvasconcelos}@ucsd.edu
+</td></tr><tr><td>74618fb4ce8ce0209db85cc6069fe64b1f268ff4</td><td>Rendering and Animating Expressive
+<br/>Caricatures
+<br/>Mukundan
+<br/>*HITLab New Zealand,
+<br/><b>University</b><br/>of Canterbury,
+<br/>Christchurch,
+<br/>New Zealand
+<br/>tComputer
+<br/>Science
+<br/>and Software Engineering
+<br/>Email: {mohammad.obaid,
+<br/><b>University</b><br/>of Canterbury,
+<br/>New Zealand
+<br/>non­
+<br/>stylized
+<br/>and control
+<br/>on the generated caricature.
+<br/>A stroke-based
+<br/>of the caricature,
+<br/>of facial expressions.
+<br/>rendering of caricatures
+<br/>from a given face image, with
+<br/>the facial appearance
+<br/>using quadratic deformation
+<br/>rendering (NPR) engine is developed to generate
+<br/>that appears to be a sketch of the original
+</td><td>('1761180', 'Mohammad Obaid', 'mohammad obaid')<br/>('1684805', 'Mark Billinghurst', 'mark billinghurst')</td><td>mark.billinghurst}@hitlabnz.org,
+<br/>mukund@cosc.canterbury.ac.nz
+</td></tr><tr><td>74875368649f52f74bfc4355689b85a724c3db47</td><td>Object Detection by Labeling Superpixels
+<br/>1National Laboratory of Pattern Recognition, Chinese Academy of Sciences
+<br/><b>Institute of Data Science and Technology, Alibaba Group</b><br/><b>Institute of Deep Learning, Baidu Research</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2278628', 'Yinan Yu', 'yinan yu')<br/>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>7492c611b1df6bce895bee6ba33737e7fc7f60a6</td><td>The 3D Menpo Facial Landmark Tracking Challenge
+<br/><b>Imperial College London, UK</b><br/><b>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</b><br/><b>University of Exeter, UK</b></td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('34586458', 'Grigorios G. Chrysos', 'grigorios g. chrysos')<br/>('2931390', 'Anastasios Roussos', 'anastasios roussos')<br/>('31243357', 'Evangelos Ververas', 'evangelos ververas')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('2814229', 'George Trigeorgis', 'george trigeorgis')</td><td>{s.zafeiriou, g.chrysos}@imperial.ac.uk
+</td></tr><tr><td>74eae724ef197f2822fb7f3029c63014625ce1ca</td><td>International Journal of Bio-Science and Bio-Technology
+<br/>Vol. 5, No. 2, April, 2013
+<br/>Feature Extraction based on Local Directional Pattern with SVM
+<br/>Decision-level Fusion for Facial Expression Recognition
+<br/>1Key Laboratory of Education Informalization for Nationalities, Ministry of
+<br/><b>Education, Yunnan Normal University, Kunming, China</b><br/><b>College of Information, Yunnan Normal University, Kunming, China</b></td><td>('2535958', 'Juxiang Zhou', 'juxiang zhou')<br/>('3305175', 'Tianwei Xu', 'tianwei xu')<br/>('2411704', 'Jianhou Gan', 'jianhou gan')</td><td>zjuxiang@126.com,xutianwei@ynnu.edu.cn,kmganjh@yahoo.com.cn
+</td></tr><tr><td>7480d8739eb7ab97c12c14e75658e5444b852e9f</td><td>NEGREL ET AL.: REVISITED MLBOOST FOR FACE RETRIEVAL
+<br/>MLBoost Revisited: A Faster Metric
+<br/>Learning Algorithm for Identity-Based Face
+<br/>Retrieval
+<br/>Frederic Jurie
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS
+<br/>France
+</td><td>('2838835', 'Romain Negrel', 'romain negrel')<br/>('2504258', 'Alexis Lechervy', 'alexis lechervy')</td><td>romain.negrel@unicaen.fr
+<br/>alexis.lechervy@unicaen.fr
+<br/>frederic.jurie@unicaen.fr
+</td></tr><tr><td>74ba4ab407b90592ffdf884a20e10006d2223015</td><td>Partial Face Detection in the Mobile Domain
+</td><td>('3152615', 'Upal Mahbub', 'upal mahbub')<br/>('40599829', 'Sayantan Sarkar', 'sayantan sarkar')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>7405ed035d1a4b9787b78e5566340a98fe4b63a0</td><td>Self-Expressive Decompositions for
+<br/>Matrix Approximation and Clustering
+</td><td>('1746363', 'Eva L. Dyer', 'eva l. dyer')<br/>('3318961', 'Raajen Patel', 'raajen patel')<br/>('1746260', 'Richard G. Baraniuk', 'richard g. baraniuk')</td><td></td></tr><tr><td>744db9bd550bf5e109d44c2edabffec28c867b91</td><td>FX e-Makeup for Muscle Based Interaction
+<br/>1 Department of Informatics, PUC-Rio, Rio de Janeiro, Brazil
+<br/>2 Department of Mechanical Engineering, PUC-Rio, Rio de Janeiro, Brazil
+<br/>3 Department of Administration, PUC-Rio, Rio de Janeiro, Brazil
+</td><td>('21852164', 'Abel Arrieta', 'abel arrieta')<br/>('38047086', 'Felipe Esteves', 'felipe esteves')<br/>('1805792', 'Hugo Fuks', 'hugo fuks')</td><td>{kvega,hugo}@inf.puc-rio.br
+<br/>abel.arrieta@aluno.puc-rio.br
+<br/>felipeesteves@aluno.puc-rio.br
+</td></tr><tr><td>74325f3d9aea3a810fe4eab8863d1a48c099de11</td><td>Regression-Based Image Alignment
+<br/>for General Object Categories
+<br/><b>Queensland University of Technology (QUT</b><br/>Brisbane QLD 4000, Australia
+<br/><b>Carnegie Mellon University (CMU</b><br/>Pittsburgh PA 15289, USA
+</td><td>('2266155', 'Hilton Bristow', 'hilton bristow')<br/>('1820249', 'Simon Lucey', 'simon lucey')</td><td></td></tr><tr><td>744d23991a2c48d146781405e299e9b3cc14b731</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2535284, IEEE
+<br/>Transactions on Image Processing
+<br/>Aging Face Recognition: A Hierarchical Learning
+<br/>Model Based on Local Patterns Selection
+</td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('2856494', 'Dihong Gong', 'dihong gong')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>1a45ddaf43bcd49d261abb4a27977a952b5fff12</td><td>LDOP: Local Directional Order Pattern for Robust
+<br/>Face Retrieval
+<br/>
+</td><td>('34992579', 'Shiv Ram Dubey', 'shiv ram dubey')<br/>('34356161', 'Snehasis Mukherjee', 'snehasis mukherjee')</td><td></td></tr><tr><td>1a41e5d93f1ef5b23b95b7163f5f9aedbe661394</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 903160, 9 pages
+<br/>http://dx.doi.org/10.1155/2014/903160
+<br/>Research Article
+<br/>Alignment-Free and High-Frequency Compensation in
+<br/>Face Hallucination
+<br/><b>College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China</b><br/><b>College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan</b><br/>Received 25 August 2013; Accepted 21 November 2013; Published 12 February 2014
+<br/>Academic Editors: S. Bourennane and J. Marot
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Face hallucination is one of learning-based super resolution techniques, which is focused on resolution enhancement of facial
+<br/>images. Though face hallucination is a powerful and useful technique, some detailed high-frequency components cannot be
+<br/>recovered. It also needs accurate alignment between training samples. In this paper, we propose a high-frequency compensation
+<br/>framework based on residual images for face hallucination method in order to improve the reconstruction performance. The basic
+<br/>idea of proposed framework is to reconstruct or estimate a residual image, which can be used to compensate the high-frequency
+<br/>components of the reconstructed high-resolution image. Three approaches based on our proposed framework are proposed. We
+<br/>also propose a patch-based alignment-free face hallucination. In the patch-based face hallucination, we first segment facial images
+<br/>into overlapping patches and construct training patch pairs. For an input low-resolution (LR) image, the overlapping patches
+<br/>are also used to obtain the corresponding high-resolution (HR) patches by face hallucination. The whole HR image can then be
+<br/>reconstructed by combining all of the HR patches. Experimental results show that the high-resolution images obtained using our
+<br/>proposed approaches can improve the quality of those obtained by conventional face hallucination method even if the training data
+<br/>set is unaligned.
+<br/>1. Introduction
+<br/>There is a high demand for high-resolution (HR) images such
+<br/>as video surveillance, remote sensing, and medical imaging
+<br/>because high-resolution images can reveal more information
+<br/>than low-resolution images. However, it is hard to improve
+<br/>the image resolution by replacing sensors because of the
+<br/>high cost, hardware physical limits. Super resolution image
+<br/>reconstruction (SR) is one promising technique to solve the
+<br/>problem [1, 2]. SR can be broadly classified into two families of
+<br/>methods: (1) the classical multiframe super resolution [2] and
+<br/>(2) the single-frame super resolution, which is also known as
+<br/>example-based or learning-based super resolution [3–5]. In
+<br/>the classical multiimage SR, the HR image is reconstructed
+<br/>by combining subpixel-aligned multiimages (LR images). In
+<br/>the learning-based SR, the HR image is reconstructed by
+<br/>learning correspondence between low and high-resolution
+<br/>image patches from a database.
+<br/>Face hallucination is one of learning-based SR techniques
+<br/>proposed by Baker and Kanade [1, 6], which is focused on
+<br/>resolution enhancement of facial images. To date, a lot of
+<br/>algorithms of face hallucination methods have been proposed
+<br/>[7–12]. Though face hallucination is a powerful and useful
+<br/>technique, some detailed high-frequency components cannot
+<br/>be recovered. In this paper, we propose a high-frequency
+<br/>compensation framework based on residual images for face
+<br/>hallucination method in order to improve the reconstruction
+<br/>performance. The basic idea of proposed framework is to
+<br/>reconstruct or estimate a residual image, which can be used
+<br/>to compensate the high-frequency components of the recon-
+<br/>structed high-resolution image. Three approaches based on
+<br/>our proposed framework are proposed. We also propose a
+<br/>patch-based alignment-free face hallucination method. In the
+<br/>patch-based face hallucination, we first segment facial images
+<br/>into overlapping patches and construct training patch pairs.
+<br/>For an input LR image, the overlapping patches are also used
+<br/>to obtain the corresponding HR patches by face hallucination.
+<br/>The whole HR image can then be reconstructed by combining
+<br/>all of the HR patches.
+</td><td>('1699766', 'Yen-Wei Chen', 'yen-wei chen')<br/>('2755407', 'So Sasatani', 'so sasatani')<br/>('1707360', 'Xian-Hua Han', 'xian-hua han')<br/>('1699766', 'Yen-Wei Chen', 'yen-wei chen')</td><td>Correspondence should be addressed to Yen-Wei Chen; chen@is.ritsumei.ac.jp
+</td></tr><tr><td>1a65cc5b2abde1754b8c9b1d932a68519bcb1ada</td><td>LU, LIAN, YUILLE: PARSING SEMANTIC PARTS OF CARS
+<br/>Parsing Semantic Parts of Cars Using
+<br/>Graphical Models and Segment Appearance
+<br/>Consistency
+<br/>Alan Yuille2
+<br/>1 Department of Electrical Engineering
+<br/><b>Tsinghua University</b><br/>2 Department of Statistics
+<br/><b>University of California, Los Angeles</b></td><td>('2282045', 'Wenhao Lu', 'wenhao lu')<br/>('5964529', 'Xiaochen Lian', 'xiaochen lian')</td><td>yourslewis@gmail.com
+<br/>lianxiaochen@gmail.com
+<br/>yuille@stat.ucla.edu
+</td></tr><tr><td>1aa766bbd49bac8484e2545c20788d0f86e73ec2</td><td>
+<br/>Baseline Face Detection, Head Pose Estimation, and Coarse
+<br/>Direction Detection for Facial Data in the SHRP2 Naturalistic
+<br/>Driving Study
+<br/>J. Paone, D. Bolme, R. Ferrell, Member, IEEE, D. Aykac, and
+<br/>T. Karnowski, Member, IEEE
+<br/>Oak Ridge National Laboratory, Oak Ridge, TN
+</td><td></td><td></td></tr><tr><td>1a849b694f2d68c3536ed849ed78c82e979d64d5</td><td>This is a repository copy of Symmetric Shape Morphing for 3D Face and Head Modelling.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131760/
+<br/>Version: Accepted Version
+<br/>Proceedings Paper:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634, Smith, William Alfred
+<br/>Peter orcid.org/0000-0002-6047-0413 et al. (1 more author) (2018) Symmetric Shape
+<br/>Morphing for 3D Face and Head Modelling. In: The 13th IEEE Conference on Automatic
+<br/>Face and Gesture Recognition. IEEE .
+<br/>Reuse
+<br/>Items deposited in White Rose Research Online are protected by copyright, with all rights reserved unless
+<br/>indicated otherwise. They may be downloaded and/or printed for private study, or other acts as permitted by
+<br/>national copyright laws. The publisher or other rights holders may allow further reproduction and re-use of
+<br/>the full text version. This is indicated by the licence information on the White Rose Research Online record
+<br/>for the item.
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td><td></td><td>emailing eprints@whiterose.ac.uk including the URL of the record and the reason for the withdrawal request.
+<br/>eprints@whiterose.ac.uk
+</td></tr><tr><td>1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d</td><td>Online Robust Image Alignment via Iterative Convex Optimization
+<br/>Center for Data Analytics & Biomedical Informatics, Computer & Information Science Department,
+<br/><b>Temple University, Philadelphia, PA 19122, USA</b><br/><b>School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, 210044, China</b><br/><b>Purdue University, West Lafayette, IN 47907, USA</b></td><td>('36578908', 'Yi Wu', 'yi wu')<br/>('39274045', 'Bin Shen', 'bin shen')<br/>('1805398', 'Haibin Ling', 'haibin ling')</td><td>fwuyi,hblingg@temple.edu, bshen@purdue.edu
+</td></tr><tr><td>1a878e4667fe55170252e3f41d38ddf85c87fcaf</td><td>Discriminative Machine Learning with Structure
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2010-4
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-4.html
+<br/>January 12, 2010
+</td><td>('1685481', 'Simon Lacoste-Julien', 'simon lacoste-julien')</td><td></td></tr><tr><td>1a41831a3d7b0e0df688fb6d4f861176cef97136</td><td><b>massachusetts institute of technology artificial intelligence laboratory</b><br/>A Biological Model of Object
+<br/>Recognition with Feature Learning
+<br/>AI Technical Report 2003-009
+<br/>CBCL Memo 227
+<br/>June 2003
+<br/>© 2 0 0 3 m a s s a c h u s e t t s i n s t i t u t e o f
+<br/>t e c h n o l o g y, c a m b r i d g e , m a 0 2 1 3 9 u s a — w w w. a i . m i t . e d u
+</td><td>('1848733', 'Jennifer Louie', 'jennifer louie')</td><td>@ MIT
+</td></tr><tr><td>1ac2882559a4ff552a1a9956ebeadb035cb6df5b</td><td>How much training data for facial action unit detection?
+<br/><b>University of Pittsburgh, Pittsburgh, PA, USA</b><br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('36185909', 'Jeffrey M. Girard', 'jeffrey m. girard')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1820249', 'Simon Lucey', 'simon lucey')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>1a7a17c4f97c68d68fbeefee1751d349b83eb14a</td><td>Iterative Hessian sketch: Fast and accurate solution
+<br/>approximation for constrained least-squares
+<br/>1Department of Electrical Engineering and Computer Science
+<br/>2Department of Statistics
+<br/><b>University of California, Berkeley</b><br/>November 4, 2014
+</td><td>('3173667', 'Mert Pilanci', 'mert pilanci')<br/>('1721860', 'Martin J. Wainwright', 'martin j. wainwright')</td><td>{mert, wainwrig}@berkeley.edu
+</td></tr><tr><td>1aef6f7d2e3565f29125a4871cd60c4d86c48361</td><td>Natural Language Video Description using
+<br/>Deep Recurrent Neural Networks
+<br/><b>University of Texas at Austin</b><br/>Doctoral Dissertation Proposal
+</td><td>('1811430', 'Subhashini Venugopalan', 'subhashini venugopalan')<br/>('1797655', 'Raymond J. Mooney', 'raymond j. mooney')</td><td>vsub@cs.utexas.edu
+</td></tr><tr><td>1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f</td><td>International Journal of Linguistics and Computational Applications (IJLCA) ISSN 2394-6385 (Print)
+<br/>Volume 4, Issue 1, January – March 2017 ISSN 2394-6393 (Online)
+<br/> Implementation of Partial Face Recognition
+<br/>using Directional Binary Code
+<br/>N.Pavithra #1, A.Sivapriya*2, K.Hemalatha*3 , D.Lakshmi*4
+<br/><b>Final Year, PanimalarInstitute of Technology</b><br/><b>PanimalarInstitute of Technology, Tamilnadu, India</b><br/>in
+<br/>faith
+<br/>is proposed. It
+<br/>face alignment and
+</td><td></td><td></td></tr><tr><td>1a167e10fe57f6d6eff0bb9e45c94924d9347a3e</td><td>Boosting VLAD with Double Assignment using
+<br/>Deep Features for Action Recognition in Videos
+<br/><b>University of Trento, Italy</b><br/>Tuan A. Nguyen
+<br/><b>University of Tokyo, Japan</b><br/><b>University of Tokyo, Japan</b><br/><b>University Politehnica of Bucharest, Romania</b><br/><b>University of Trento, Italy</b></td><td>('3429470', 'Ionut C. Duta', 'ionut c. duta')<br/>('1712839', 'Kiyoharu Aizawa', 'kiyoharu aizawa')<br/>('1796198', 'Bogdan Ionescu', 'bogdan ionescu')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td>ionutcosmin.duta@unitn.it
+<br/>t nguyen@hal.t.u-tokyo.ac.jp
+<br/>aizawa@hal.t.u-tokyo.ac.jp
+<br/>bionescu@imag.pub.ro
+<br/>niculae.sebe@unitn.it
+</td></tr><tr><td>1a3eee980a2252bb092666cf15dd1301fa84860e</td><td>PCA GAUSSIANIZATION FOR IMAGE PROCESSING
+<br/>Image Processing Laboratory (IPL), Universitat de Val`encia
+<br/>Catedr´atico A. Escardino - 46980 Paterna, Val`encia, Spain
+</td><td>('2732577', 'Valero Laparra', 'valero laparra')<br/>('1684246', 'Gustavo Camps-Valls', 'gustavo camps-valls')</td><td>{lapeva,gcamps,jmalo}@uv.es
+</td></tr><tr><td>1a140d9265df8cf50a3cd69074db7e20dc060d14</td><td>Face Parts Localization Using
+<br/>Structured-Output Regression Forests
+<br/><b>School of EECS, Queen Mary University of London</b></td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td>{heng.yang,i.patras}@eecs.qmul.ac.uk
+</td></tr><tr><td>1a85956154c170daf7f15f32f29281269028ff69</td><td>Active Pictorial Structures
+<br/><b>Imperial College London</b><br/>180 Queens Gate, SW7 2AZ, London, U.K.
+</td><td>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>{e.antonakos, ja310, s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>1a031378cf1d2b9088a200d9715d87db8a1bf041</td><td>Workshop track - ICLR 2018
+<br/>DEEP DICTIONARY LEARNING: SYNERGIZING RE-
+<br/>CONSTRUCTION AND CLASSIFICATION
+</td><td>('3362896', 'Shahin Mahdizadehaghdam', 'shahin mahdizadehaghdam')<br/>('1733181', 'Ashkan Panahi', 'ashkan panahi')<br/>('1769928', 'Hamid Krim', 'hamid krim')</td><td>{smahdiz,apanahi,ahk}@ncsu.edu & liyi.dai.civ@mail.mil
+</td></tr><tr><td>1afd481036d57320bf52d784a22dcb07b1ca95e2</td><td>The Computer Journal Advance Access published December 6, 2012
+<br/><b>The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved</b><br/>doi:10.1093/comjnl/bxs146
+<br/>Automated Content Metadata Extraction
+<br/>Services Based on MPEG Standards
+<br/>D.C. Gibbon∗, Z. Liu, A. Basso and B. Shahraray
+<br/>AT&T Labs Research, Middletown, NJ, USA
+<br/>This paper is concerned with the generation, acquisition, standardized representation and transport
+<br/>of video metadata. The use of MPEG standards in the design and development of interoperable
+<br/>media architectures and web services is discussed. A high-level discussion of several algorithms
+<br/>for metadata extraction is presented. Some architectural and algorithmic issues encountered when
+<br/>designing services for real-time processing of video streams, as opposed to traditional offline media
+<br/>processing, are addressed. A prototype real-time video analysis system for generating MPEG-7
+<br/>Audiovisual Description Profile from MPEG-2 transport stream encapsulated video is presented.
+<br/>Such a capability can enable a range of new services such as content-based personalization of live
+<br/>broadcasts given that the MPEG-7 based data models fit in well with specifications for advanced
+<br/>television services such as TV-Anytime andAlliance for Telecommunications Industry Solutions IPTV
+<br/>Interoperability Forum.
+<br/>Keywords: MPEG-7; MPEG-21; audiovisual description profile; video processing; automated metadata
+<br/>extraction; video metadata, real-time media processing
+<br/>Received 1 March 2012; revised 11 September 2012; accepted 9 October 2012
+<br/>Handling editor: Marios Angelides
+<br/>1.
+<br/>INTRODUCTION
+<br/>Content descriptors have gained considerable prominence
+<br/>in the content ecosystem in the last decade. This growing
+<br/>significance stems from the fact that rich metadata promotes
+<br/>user engagement, enables fine-grained access to content and
+<br/>allows more intelligent and targeted access to content.
+<br/>Effective utilization of content descriptors involves three
+<br/>basic steps, namely generation, representation and transport.
+<br/>In traditional broadcasting,
+<br/>the generation of the content
+<br/>descriptions has been a manual process in which individuals
+<br/>would access the content and would index it according to
+<br/>specific rules (i.e. annotation guides). While in the past this
+<br/>was a viable option due to the limited amount of available
+<br/>content, with the large volumes of content that are generated
+<br/>today (e.g. YouTube uploads have currently surpassed 1 h of
+<br/>video every second), manual indexing is no longer a viable
+<br/>option. Research in multimedia content analysis has generated a
+<br/>variety of algorithms for content feature extraction in the visual,
+<br/>text, music and speech domains. Such algorithms provide
+<br/>descriptions with different levels of confidence and are often
+<br/>combined to improve their accuracy and descriptive power.
+<br/>Despite the enormous progress that has been made in this area,
+<br/>content description generation is not yet sufficiently advanced
+<br/>to be fully automated for all applications and types of content.
+<br/>However, for a subset of content types and certain applications,
+<br/>the current state of the art in automated content processing has
+<br/>proven sufficient.
+<br/>Another important consideration in effective and widespread
+<br/>utilization of content metadata is the adoption of appropriate
+<br/>representations for the metadata. Historically, the represen-
+<br/>tation of content metadata has been specialized to specific
+<br/>representation and service needs (i.e. the asset distribution
+<br/>interface from CableLabs for traditional paid video on demand
+<br/>services). Recently, in the context of MPEG, a standardization
+<br/>effort has been undertaken to create more general represen-
+<br/>tations of content descriptors that are independent of any
+<br/>particular application and to enable interoperability among
+<br/>metadata generation systems and applications.
+<br/>Finally, for a certain class of applications and services,
+<br/>real-time delivery or transport of metadata is critical, but
+<br/>is an area that is still in its infancy. For example, today’s
+<br/>systems for delivering television electronic program guide
+<br/>(EPG) information make efficient use of multicast delivery,
+<br/>but the data are largely static (the data may only change
+<br/>The Computer Journal, 2012
+</td><td></td><td>For Permissions, please email: journals.permissions@oup.com
+<br/>Corresponding author: dcg@research.att.com
+</td></tr><tr><td>1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f</td><td></td><td></td><td></td></tr><tr><td>1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6</td><td>Deep Learning for Video Classification and Captioning
+<br/><b>Fudan University, 2Microsoft Research Asia, 3University of Maryland</b><br/>1. Introduction
+<br/>Today’s digital contents are inherently multimedia: text, audio, image,
+<br/>video and etc. Video, in particular, becomes a new way of communication
+<br/>between Internet users with the proliferation of sensor-rich mobile devices.
+<br/>Accelerated by the tremendous increase in Internet bandwidth and storage
+<br/>space, video data has been generated, published and spread explosively, be-
+<br/>coming an indispensable part of today’s big data. This has encouraged the
+<br/>development of advanced techniques for a broad range of video understand-
+<br/>ing applications. A fundamental issue that underlies the success of these
+<br/>technological advances is the understanding of video contents. Recent ad-
+<br/>vances in deep learning in image [41, 68, 17, 50] and speech [21, 27] domain
+<br/>have encouraged techniques to learn robust video feature representations to
+<br/>effectively exploit abundant multimodal clues in video data.
+<br/>In this paper, we focus on reviewing two lines of research aiming to stimu-
+<br/>late the comprehension of videos with deep learning: video classification and
+<br/>video captioning. While video classification concentrates on automatically
+<br/>labeling video clips based on their semantic contents like human actions or
+<br/>complex events, video captioning attempts to generate a complete and nat-
+<br/>ural sentence, enriching the single label as in video classification, to capture
+<br/>the most informative dynamics in videos.
+<br/>There have been several efforts surveying literatures on video content
+<br/>understanding. Most of the approaches surveyed in these works adopted
+<br/>hand-crafted features coupled with typical machine learning pipelines for
+<br/>action recognition and event detection [1, 88, 61, 35]. In contrast, this paper
+<br/>focuses on discussing state-of-the-art deep learning techniques not only for
+<br/>video classification but also video captioning. As deep learning for video
+<br/>analysis is an emerging and vibrant field, we hope this paper could help
+<br/>stimulate future research along the line.
+</td><td>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('2053452', 'Ting Yao', 'ting yao')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')</td><td>zxwu@cs.umd.edu, tiyao@microsoft.com, {ygj, yanweifu}@fudan.edu.cn
+</td></tr><tr><td>1a9a192b700c080c7887e5862c1ec578012f9ed1</td><td>IEEE TRANSACTIONS ON SYSTEM, MAN AND CYBERNETICS, PART B
+<br/>Discriminant Subspace Analysis for Face
+<br/>Recognition with Small Number of Training
+<br/>Samples
+</td><td>('1844328', 'Hui Kong', 'hui kong')<br/>('1786811', 'Xuchun Li', 'xuchun li')<br/>('1752714', 'Matthew Turk', 'matthew turk')<br/>('1708413', 'Chandra Kambhamettu', 'chandra kambhamettu')</td><td></td></tr><tr><td>1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9</td><td>ARTICLE
+<br/>International Journal of Advanced Robotic Systems
+<br/>Face Recognition Under Illumination
+<br/>Variation Using Shadow Compensation
+<br/>and Pixel Selection
+<br/>Regular Paper
+<br/><b>Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea</b><br/>Received 14 Jun 2012; Accepted 31 Aug 2012
+<br/>DOI: 10.5772/52939
+<br/>© 2012 Choi; licensee InTech. This is an open access article distributed under the terms of the Creative
+<br/>Commons Attribution License (http://creativecommons.org/licenses/by/3.0), which permits unrestricted use,
+<br/>distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>to  other 
+<br/>features 
+<br/>for 
+<br/>face 
+<br/>retinal  or 
+<br/>is  similar 
+<br/>to 
+<br/>the 
+<br/>fingerprint, 
+<br/>image 
+<br/>taken  with 
+<br/>it  widely  applicable 
+<br/>illumination  variation.  By  using 
+</td><td>('1737997', 'Sang-Il Choi', 'sang-il choi')</td><td>* Corresponding author E-mail: choisi@dankook.ac.kr
+</td></tr><tr><td>1a8ccc23ed73db64748e31c61c69fe23c48a2bb1</td><td>Extensive Facial Landmark Localization
+<br/>with Coarse-to-fine Convolutional Network Cascade
+<br/>Megvii Inc.
+</td><td>('1848243', 'Erjin Zhou', 'erjin zhou')</td><td>{zej,fhq,czm,jyn,yq}@megvii.com
+</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis
+<br/><b>Michigan State University, East Lansing, MI, USA</b><br/>2 TechSmith Corporation, Okemos, MI, USA
+</td><td>('2941187', 'Seyed Morteza Safdarnejad', 'seyed morteza safdarnejad')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('1938832', 'Lalita Udpa', 'lalita udpa')<br/>('40467330', 'Brooks Andrus', 'brooks andrus')<br/>('1678721', 'John Wood', 'john wood')<br/>('37008125', 'Dean Craven', 'dean craven')</td><td></td></tr><tr><td>1ad97cce5fa8e9c2e001f53f6f3202bddcefba22</td><td>Grassmann Averages for Scalable Robust PCA
+<br/>DIKU and MPIs T¨ubingen∗
+<br/>Denmark and Germany
+<br/>DTU Compute∗
+<br/>Lyngby, Denmark
+</td><td>('1808965', 'Aasa Feragen', 'aasa feragen')<br/>('2142792', 'Søren Hauberg', 'søren hauberg')</td><td>aasa@diku.dk
+<br/>sohau@dtu.dk
+</td></tr><tr><td>1a1118cd4339553ad0544a0a131512aee50cf7de</td><td></td><td></td><td></td></tr><tr><td>1a6c9ef99bf0ab9835a91fe5f1760d98a0606243</td><td>ConceptMap:
+<br/>Mining Noisy Web Data for Concept Learning
+<br/><b>Bilkent University, 06800 Cankaya, Turkey</b></td><td>('2540074', 'Eren Golge', 'eren golge')</td><td></td></tr><tr><td>1afdedba774f6689eb07e048056f7844c9083be9</td><td>Markov Random Field Structures for Facial Action Unit Intensity Estimation
+<br/>∗Department of Computing
+<br/><b>Imperial College London</b><br/>180 Queen’s Gate
+<br/>London, UK
+<br/>†EEMCS
+<br/><b>University of Twente</b><br/>7522 NB Enschede
+<br/>Netherlands
+</td><td>('3007548', 'Georgia Sandbach', 'georgia sandbach')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{gls09,s.zafeiriou,m.pantic}@imperial.ac.uk
+</td></tr><tr><td>1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43</td><td>WANG AND MORI: MAX-MARGIN LATENT DIRICHLET ALLOCATION
+<br/>Max-Margin Latent Dirichlet Allocation for
+<br/>Image Classification and Annotation
+<br/><b>University</b><br/>of Illinois at Urbana Champaign
+<br/>School of Computing Science, Simon
+<br/><b>Fraser University</b></td><td>('40457160', 'Yang Wang', 'yang wang')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>yangwang@uiuc.edu
+<br/>mori@cs.sfu.ca
+</td></tr><tr><td>1a7a2221fed183b6431e29a014539e45d95f0804</td><td>Person Identification Using Text and Image Data
+<br/>David S. Bolme, J. Ross Beveridge and Adele E. Howe
+<br/>Computer Science Department
+<br/>Colorado State Univeristy
+<br/>Fort Collins, Colorado 80523
+</td><td></td><td>[bolme,ross,howe]@cs.colostate.edu
+</td></tr><tr><td>1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b</td><td>Competitive Game Designs for Improving the Cost
+<br/>Effectiveness of Crowdsourcing
+<br/><b>L3S Research Center, Hannover, Germany</b></td><td>('2993225', 'Markus Rokicki', 'markus rokicki')<br/>('3257370', 'Sergiu Chelaru', 'sergiu chelaru')<br/>('2553718', 'Sergej Zerr', 'sergej zerr')<br/>('1745880', 'Stefan Siersdorfer', 'stefan siersdorfer')</td><td>{rokicki,chelaru,siersdorfer,zerr}@L3S.de
+</td></tr><tr><td>2878b06f3c416c98496aad6fc2ddf68d2de5b8f6</td><td>Available online at www.sciencedirect.com
+<br/>Computer Vision and Image Understanding 110 (2008) 91–101
+<br/>www.elsevier.com/locate/cviu
+<br/>Two-stage optimal component analysis
+<br/><b>Florida State University, Tallahassee, FL 32306, USA</b><br/><b>Florida State University, Tallahassee, FL 32306, USA</b><br/><b>c School of Computational Science, Florida State University, Tallahassee, FL 32306, USA</b><br/>Received 26 September 2006; accepted 30 April 2007
+<br/>Available online 8 June 2007
+</td><td>('2207859', 'Yiming Wu', 'yiming wu')<br/>('1800002', 'Xiuwen Liu', 'xiuwen liu')<br/>('2436294', 'Washington Mio', 'washington mio')</td><td></td></tr><tr><td>287795991fad3c61d6058352879c7d7ae1fdd2b6</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 66– No.8, March 2013
+<br/>Biometrics Security: Facial Marks Detection from the
+<br/>Low Quality Images
+<br/>and facial marks are detected using LoG with morphological
+<br/>operator. This method though was not enough to detect the
+<br/>facial marks from the low quality images [7]. But, facial
+<br/>marks have been used to speed up the retrieval process in
+<br/>order to differentiate the human faces [15].
+<br/><b>B.S.Abdur Rahman University B.S.Abdur Rahman University</b><br/> Dept. Of Information Technology Dept. Of Computer Science & Engineering
+<br/> Chennai, India Chennai, India
+<br/>
+</td><td>('9401261', 'Ziaul Haque Choudhury', 'ziaul haque choudhury')</td><td></td></tr><tr><td>28a900a07c7cbce6b6297e4030be3229e094a950</td><td>382 The International Arab Journal of Information Technology, Vol. 9, No. 4, July 2012
+<br/>Local Directional Pattern Variance (LDPv): A
+<br/>Robust Feature Descriptor for Facial
+<br/>Expression Recognition
+<br/><b>Kyung Hee University, South Korea</b></td><td>('3182680', 'Taskeed Jabid', 'taskeed jabid')<br/>('1685505', 'Oksam Chae', 'oksam chae')</td><td></td></tr><tr><td>282503fa0285240ef42b5b4c74ae0590fe169211</td><td>Feeding Hand-Crafted Features for Enhancing the Performance of
+<br/>Convolutional Neural Networks
+<br/><b>Seoul National University</b><br/>Seoul Nat’l Univ.
+<br/><b>Seoul National University</b></td><td>('35453923', 'Sepidehsadat Hosseini', 'sepidehsadat hosseini')<br/>('32193683', 'Seok Hee Lee', 'seok hee lee')<br/>('1707645', 'Nam Ik Cho', 'nam ik cho')</td><td>sepid@ispl.snu.ac.kr
+<br/>seokheel@snu.ac.kr
+<br/>nicho@snu.ac.kr
+</td></tr><tr><td>28e0ed749ebe7eb778cb13853c1456cb6817a166</td><td></td><td></td><td></td></tr><tr><td>28b9d92baea72ec665c54d9d32743cf7bc0912a7</td><td></td><td></td><td></td></tr><tr><td>283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43</td><td>BAYESIAN DATA ASSOCIATION FOR TEMPORAL SCENE
+<br/>UNDERSTANDING
+<br/>by
+<br/>A Dissertation Submitted to the Faculty of the
+<br/>DEPARTMENT OF COMPUTER SCIENCE
+<br/>In Partial Fulfillment of the Requirements
+<br/>For the Degree of
+<br/>DOCTOR OF PHILOSOHPY
+<br/><b>In the Graduate College</b><br/><b>THE UNIVERSITY OF ARIZONA</b><br/>2013
+</td><td>('10399726', 'Ernesto Brau Avila', 'ernesto brau avila')</td><td></td></tr><tr><td>28d7029cfb73bcb4ad1997f3779c183972a406b4</td><td>Discriminative Nonlinear Analysis Operator
+<br/>Learning: When Cosparse Model Meets Image
+<br/>Classification
+</td><td>('2833510', 'Zaidao Wen', 'zaidao wen')<br/>('1940528', 'Biao Hou', 'biao hou')<br/>('1734497', 'Licheng Jiao', 'licheng jiao')</td><td></td></tr><tr><td>280d59fa99ead5929ebcde85407bba34b1fcfb59</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2662
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>28f5138d63e4acafca49a94ae1dc44f7e9d84827</td><td>Journal of Machine Learning Research xx (2012) xx-xx
+<br/>Submitted xx/xx; Published xx/xx
+<br/>MahNMF: Manhattan Non-negative Matrix Factorization
+<br/>Center for Quantum Computation and Intelligent Systems
+<br/>Faculty of Engineering and Information Technology
+<br/><b>University of Technology, Sydney</b><br/>Sydney, NSW 2007, Australia
+<br/>Center for Quantum Computation and Intelligent Systems
+<br/>Faculty of Engineering and Information Technology
+<br/><b>University of Technology, Sydney</b><br/>Sydney, NSW 2007, Australia
+<br/>School of Computer Science
+<br/><b>National University of Defense Technology</b><br/>Changsha, Hunan 410073, China
+<br/>Centre for Computational Statistics and Machine Learning (CSML)
+<br/>Department of Computer Science
+<br/><b>University College London</b><br/>Gower Street, London WC1E 6BT, United Kingdom
+<br/>Editor: xx
+</td><td>('2067095', 'Naiyang Guan', 'naiyang guan')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1764542', 'Zhigang Luo', 'zhigang luo')<br/>('1792322', 'John Shawe-Taylor', 'john shawe-taylor')</td><td>Guan.Naiyang@uts.edu.au
+<br/>dacheng.tao@uts.edu.au
+<br/>zgluo@nudt.edu.cn
+<br/>J.Shawe-Taylor@cs.ucl.ac.uk
+</td></tr><tr><td>28e1668d7b61ce21bf306009a62b06593f1819e3</td><td>RESEARCH ARTICLE
+<br/>Validation of the Amsterdam Dynamic Facial
+<br/>Expression Set – Bath Intensity Variations
+<br/>(ADFES-BIV): A Set of Videos Expressing Low,
+<br/>Intermediate, and High Intensity Emotions
+<br/><b>University of Bath, Bath, United Kingdom</b><br/>☯ These authors contributed equally to this work.
+</td><td>('7249951', 'Tanja S. H. Wingenbach', 'tanja s. h. wingenbach')<br/>('2708124', 'Chris Ashwin', 'chris ashwin')<br/>('39455300', 'Mark Brosnan', 'mark brosnan')</td><td>* tshw20@bath.ac.uk
+</td></tr><tr><td>28cd46a078e8fad370b1aba34762a874374513a5</td><td>CVPAPER.CHALLENGE IN 2016, JULY 2017
+<br/>cvpaper.challenge in 2016: Futuristic Computer
+<br/>Vision through 1,600 Papers Survey
+</td><td>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('1713046', 'Yun He', 'yun he')<br/>('9935341', 'Shunya Ueta', 'shunya ueta')<br/>('5014206', 'Teppei Suzuki', 'teppei suzuki')<br/>('3408038', 'Kaori Abe', 'kaori abe')<br/>('2554424', 'Asako Kanezaki', 'asako kanezaki')<br/>('22219521', 'Toshiyuki Yabe', 'toshiyuki yabe')<br/>('10800402', 'Yoshihiro Kanehara', 'yoshihiro kanehara')<br/>('22174281', 'Hiroya Yatsuyanagi', 'hiroya yatsuyanagi')<br/>('1692565', 'Shinya Maruyama', 'shinya maruyama')<br/>('3217653', 'Masataka Fuchida', 'masataka fuchida')<br/>('2642022', 'Yudai Miyashita', 'yudai miyashita')<br/>('34935749', 'Kazushige Okayasu', 'kazushige okayasu')<br/>('20505300', 'Yuta Matsuzaki', 'yuta matsuzaki')</td><td></td></tr><tr><td>286adff6eff2f53e84fe5b4d4eb25837b46cae23</td><td>Single-Image Depth Perception in the Wild
+<br/><b>University of Michigan, Ann Arbor</b></td><td>('1732404', 'Weifeng Chen', 'weifeng chen')<br/>('8342699', 'Jia Deng', 'jia deng')<br/>('2097755', 'Zhao Fu', 'zhao fu')<br/>('2500067', 'Dawei Yang', 'dawei yang')</td><td>{wfchen,zhaofu,ydawei,jiadeng}@umich.edu
+</td></tr><tr><td>286812ade95e6f1543193918e14ba84e5f8e852e</td><td>DOU, WU, SHAH, KAKADIARIS: 3D FACE RECONSTRUCTION FROM 2D LANDMARKS
+<br/>Robust 3D Face Shape Reconstruction from
+<br/>Single Images via Two-Fold Coupled
+<br/>Structure Learning
+<br/>Computational Biomedicine Lab
+<br/>Department of Computer Science
+<br/><b>University of Houston</b><br/>Houston, TX, USA
+</td><td>('39634395', 'Pengfei Dou', 'pengfei dou')<br/>('2461369', 'Yuhang Wu', 'yuhang wu')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>bensondou@gmail.com
+<br/>yuhang@cbl.uh.edu
+<br/>sshah@central.uh.edu
+<br/>ioannisk@uh.edu
+</td></tr><tr><td>282a3ee79a08486f0619caf0ada210f5c3572367</td><td></td><td></td><td></td></tr><tr><td>288dbc40c027af002298b38954d648fddd4e2fd3</td><td></td><td></td><td></td></tr><tr><td>28f311b16e4fe4cc0ff6560aae3bbd0cb6782966</td><td>Learning Language from Perceptual Context
+<br/>Department of Computer Science
+<br/><b>University of Texas at Austin</b><br/>David L. Chen
+<br/>Austin, TX 78712
+<br/>Doctoral Dissertation Proposal
+</td><td>('1797655', 'Raymond J. Mooney', 'raymond j. mooney')</td><td>dlcc@cs.utexas.edu
+</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td></td><td></td><td></td></tr><tr><td>28d06fd508d6f14cd15f251518b36da17909b79e</td><td>What’s in a Name? First Names as Facial Attributes
+<br/><b>Stanford University</b><br/><b>Cornell University</b><br/><b>Stanford University</b></td><td>('2896700', 'Huizhong Chen', 'huizhong chen')<br/>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1739786', 'Bernd Girod', 'bernd girod')</td><td>hchen2@stanford.edu
+<br/>andrew.c.gallagher@cornell.edu
+<br/>bgirod@stanford.edu
+</td></tr><tr><td>28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b</td><td>A New Fuzzy Stacked Generalization Technique
+<br/>and Analysis of its Performance
+</td><td>('2159942', 'Mete Ozay', 'mete ozay')<br/>('7158165', 'Fatos T. Yarman Vural', 'fatos t. yarman vural')</td><td></td></tr><tr><td>281486d172cf0c78d348ce7d977a82ff763efccd</td><td>Mining a Deep And-OR Object Semantics from Web Images via Cost-Sensitive
+<br/>Question-Answer-Based Active Annotations
+<br/><b>Shanghai Jiao Tong University</b><br/><b>University of California, Los Angeles</b><br/><b>cid:107)Chongqing University of Posts and Telecommunications</b></td><td>('22063226', 'Quanshi Zhang', 'quanshi zhang')<br/>('39092098', 'Ying Nian Wu', 'ying nian wu')<br/>('3133970', 'Song-Chun Zhu', 'song-chun zhu')</td><td></td></tr><tr><td>288964068cd87d97a98b8bc927d6e0d2349458a2</td><td>Mean-Variance Loss for Deep Age Estimation from a Face
+<br/>1Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/>3CAS Center for Excellence in Brain Science and Intelligence Technology
+</td><td>('34393045', 'Hu Han', 'hu han')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>hongyu.pan@vipl.ict.ac.cn, {hanhu,sgshan,xlchen}@ict.ac.cn
+</td></tr><tr><td>28bc378a6b76142df8762cd3f80f737ca2b79208</td><td>Understanding Objects in Detail with Fine-grained Attributes
+<br/>Ross Girshick5
+<br/>David Weiss7
+</td><td>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')<br/>('2585200', 'Siddharth Mahendran', 'siddharth mahendran')<br/>('2381485', 'Stavros Tsogkas', 'stavros tsogkas')<br/>('35208858', 'Subhransu Maji', 'subhransu maji')<br/>('1776374', 'Juho Kannala', 'juho kannala')<br/>('2827962', 'Esa Rahtu', 'esa rahtu')<br/>('1758219', 'Matthew B. Blaschko', 'matthew b. blaschko')<br/>('1685978', 'Ben Taskar', 'ben taskar')<br/>('2362960', 'Naomi Saphra', 'naomi saphra')<br/>('2920190', 'Sammy Mohamed', 'sammy mohamed')<br/>('2010660', 'Iasonas Kokkinos', 'iasonas kokkinos')<br/>('34838386', 'Karen Simonyan', 'karen simonyan')</td><td></td></tr><tr><td>287900f41dd880802aa57f602e4094a8a9e5ae56</td><td></td><td></td><td></td></tr><tr><td>28c0cb56e7f97046d6f3463378d084e9ea90a89a</td><td>Automatic Face Recognition for Film Character Retrieval in Feature-Length
+<br/>Films
+<br/>Ognjen Arandjelovi´c
+<br/><b>University of Oxford, UK</b></td><td>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>E-mail: oa214@cam.ac.uk,az@robots.ox.ac.uk
+</td></tr><tr><td>28be652db01273289499bc6e56379ca0237506c0</td><td>FaLRR: A Fast Low Rank Representation Solver
+<br/><b>School of Computer Engineering, Nanyang Technological University, Singapore</b><br/><b>of Engineering and Information Technology, University of Technology, Sydney, Australia</b><br/>‡Centre for Quantum Computation & Intelligent Systems and the Faculty
+<br/>In this paper, we develop a fast solver of low rank representation (LRR) [3]
+<br/>called FaLRR, which achieves order-of-magnitude speedup over existing
+<br/>LRR solvers, and is theoretically guaranteed to obtain a global optimum.
+<br/>LRR [3] has shown promising performance for various computer vision
+<br/>applications such as face clustering. Let X = [x1, . . . ,xn] ∈ Rd×n be a set
+<br/>of data samples drawn from a union of several subspaces, where d is the
+<br/>feature dimension and n is the total number of data samples. LRR seeks
+<br/>a low-rank data representation matrix Z ∈ Rn×n such that X can be self-
+<br/>expressed (i.e., X = XZ) when the data is clean. Considering that input
+<br/>data may contain outliers (i.e., some columns of X are corrupted), the LRR
+<br/>problem can be formulated as,
+<br/>(cid:107)Z(cid:107)∗ + λ(cid:107)E(cid:107)2,1
+<br/>min
+<br/>Z,E
+<br/>s.t. X = XZ + E,
+<br/>(1)
+<br/>where λ is a tradeoff parameter and E ∈ Rd×n denotes the representation
+<br/>error. The nuclear norm based term (cid:107)Z(cid:107)∗ acts as an approximation of the
+<br/>rank regularizer, and the (cid:96)2,1 norm based term (cid:107)E(cid:107)2,1 encourages E to be
+<br/>column-sparse.
+<br/>Regarding optimization, several algorithms [2, 3, 4] were proposed to
+<br/>exactly solve LRR. Moreover, to efficiently obtain an approximated solution
+<br/>of LRR, a distributed framework [5] was developed. However, the existing
+<br/>algorithms are usually based on the original formulation in (1) or a similar
+<br/>variant [4], which are two-variable problems with regard to the original data
+<br/>matrix. In this paper, we develop a fast LRR solver named FaLRR, which
+<br/>is based on a new reformulation of LRR as an optimization problem with
+<br/>regard to factorized data (which is obtained by skinny SVD on the original
+<br/>data matrix).
+<br/>Reformulation. Specifically, we study a more general formulation of
+<br/>LRR as follows,
+<br/>min
+<br/>Z∈Rn×m,E∈Rd×m
+<br/>(cid:107)Z(cid:107)∗ + λ(cid:107)E(cid:107)2,1
+<br/>s.t. XD = XZ + E
+<br/>(2)
+<br/>rUr = V(cid:48)
+<br/>which includes (1) as a special case. Let r denote the rank of X. More-
+<br/>over, let us factorize X via the skinny singular value decomposition (SVD):
+<br/>X = UrSrV(cid:48)
+<br/>r, where Ur ∈ Rd×r and Vr ∈ Rn×r are two column-wise orthog-
+<br/>onal matrices that satisfy U(cid:48)
+<br/>rVr = Ir, Sr ∈ Rr×r is a diagonal matrix
+<br/>defined as Sr = diag([σ1, . . . ,σr](cid:48)), in which {σi}r
+<br/>i=1 are the r positive sin-
+<br/>gular values of X sorted in descending order. Based on the definitions above,
+<br/>we present the reformulation by the following theorem:
+<br/>Theorem 1 Let W∗ denote an optimal solution of the following problem,
+<br/>(3)
+<br/>Then, {Z∗,E∗}, defined as Z∗ = VrW∗ and E∗ = XD− XVrW∗, is an op-
+<br/>timal solution of the problem in (2). In particular, (cid:107)Z∗(cid:107)∗ = (cid:107)W∗(cid:107)∗ and
+<br/>(cid:107)E∗(cid:107)2,1 = (cid:107)Sr(V(cid:48)
+<br/>rD−W∗)(cid:107)2,1 always hold, implying that the two problems
+<br/>in (2) and (3) have equal optimal objective values.
+<br/>(cid:107)W(cid:107)∗ + λ(cid:107)Sr(V(cid:48)
+<br/>rD− W)(cid:107)2,1 .
+<br/>min
+<br/>W∈Rr×m
+<br/>Optimization. In terms of optimization, we rewrite the problem in (3)
+<br/>as follows by introducing another variable Q ∈ Rr×m:
+<br/>min
+<br/>W,Q∈Rr×m
+<br/>(cid:107)W(cid:107)∗ + λ(cid:107)SrQ(cid:107)2,1
+<br/>s.t. W + Q = V(cid:48)
+<br/>rD,
+<br/>(4)
+<br/>and develop an efficient algorithm based on the alternating direction method
+<br/>(ADM) [1, 2], in which both resultant subproblems can be solved exactly.
+<br/>The corresponding augmented Lagrangian [1] w.r.t. (4) is
+<br/>Lρ (W,Q,L)
+<br/>= (cid:107)W(cid:107)∗ + λ(cid:107)SrQ(cid:107)2,1 +(cid:10)L,V(cid:48)
+<br/>rD− W− Q(cid:11) +
+<br/>(cid:107)V(cid:48)
+<br/>rD− W− Q(cid:107)2
+<br/>F ,
+</td><td>('2518469', 'Shijie Xiao', 'shijie xiao')<br/>('12135788', 'Wen Li', 'wen li')<br/>('38188040', 'Dong Xu', 'dong xu')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 96– No.13, June 2014
+<br/>Improved Combination of LBP plus LFDA for Facial
+<br/>Expression Recognition using SRC
+<br/>Research Scholar, CSE Department,
+<br/><b>Government College of Engineering, Aurangabad</b><br/>human
+<br/>facial
+<br/>expression
+<br/>recognition
+</td><td></td><td></td></tr><tr><td>2836d68c86f29bb87537ea6066d508fde838ad71</td><td>Personalized Age Progression with Aging Dictionary
+<br/><b>School of Computer Science and Engineering, Nanjing University of Science and Technology</b><br/><b>National University of Singapore</b><br/>Figure 1. A personalized aging face by the proposed method. The personalized aging face contains the aging layer (e.g.,
+<br/>wrinkles) and the personalized layer (e.g., mole). The former can be seen as the corresponding face in a linear combination
+<br/>of the aging patterns, while the latter is invariant in the aging process. For better view, please see ×3 original color PDF.
+</td><td>('2287686', 'Xiangbo Shu', 'xiangbo shu')<br/>('8053308', 'Jinhui Tang', 'jinhui tang')<br/>('2356867', 'Hanjiang Lai', 'hanjiang lai')<br/>('1776665', 'Luoqi Liu', 'luoqi liu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{shuxb104,laihanj}@gmail.com, jinhuitang@njust.edu.cn, {liuluoqi, eleyans}@nus.edu.sg
+</td></tr><tr><td>28de411a5b3eb8411e7bcb0003c426aa91f33e97</td><td> Volume 4, Issue 4, April 2014 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>Emotion Detection Using Facial Expressions -A Review
+<br/>
+<br/>Department of computer science and Application
+<br/> M Tech Student
+<br/> Department of computer science and Application
+<br/> Assistant professor
+<br/><b>Kurukshetra University, Kurukshetra</b><br/><b>Kurukshetra University, Kurukshetra</b><br/> Haryana (India)
+<br/>
+<br/>Haryana (India)
+</td><td>('2234813', 'Jyoti Rani', 'jyoti rani')<br/>('39608299', 'Kanwal Garg', 'kanwal garg')</td><td></td></tr><tr><td>28b26597a7237f9ea6a9255cde4e17ee18122904</td><td>Cerebral Cortex September 2015;25:2876–2882
+<br/>doi:10.1093/cercor/bhu083
+<br/>Advance Access publication April 25, 2014
+<br/>Network Interactions Explain Sensitivity to Dynamic Faces in the Superior Temporal Sulcus
+<br/>1MRC Cognition and Brain Sciences Unit, Cambridge CB2 7EF, UK and 2Wellcome Centre for Imaging Neuroscience,
+<br/><b>University College London, 12 Queen Square, London WC1N 3BG, UK</b><br/>The superior temporal sulcus (STS) in the human and monkey is sen-
+<br/>sitive to the motion of complex forms such as facial and bodily
+<br/>actions. We used functional magnetic resonance imaging (fMRI) to
+<br/>explore network-level explanations for how the form and motion
+<br/>information in dynamic facial expressions might be combined in the
+<br/>human STS. Ventral occipitotemporal areas selective for facial form
+<br/>were localized in occipital and fusiform face areas (OFA and FFA),
+<br/>and motion sensitivity was localized in the more dorsal temporal
+<br/>area V5. We then tested various connectivity models that modeled
+<br/>communication between the ventral form and dorsal motion path-
+<br/>ways. We show that facial form information modulated transmission
+<br/>of motion information from V5 to the STS, and that this face-
+<br/>selective modulation likely originated in OFA. This finding shows that
+<br/>form-selective motion sensitivity in the STS can be explained in
+<br/>terms of modulation of gain control on information flow in the motion
+<br/>pathway, and provides a substantial constraint for theories of the
+<br/>perception of faces and biological motion.
+<br/>Keywords: biological motion, dynamic causal modeling, face perception,
+<br/>functional magnetic resonance imaging, superior temporal sulcus
+<br/>Introduction
+<br/>Humans and other animals effortlessly recognize facial iden-
+<br/>tities and actions such as emotional expressions even when
+<br/>faces continuously move. Brain representations of dynamic
+<br/>faces may be manifested as greater responses in the superior
+<br/>temporal sulcus (STS) to facial motion than motion of nonface
+<br/>objects (Pitcher et al. 2011), suggesting localized representa-
+<br/>tions that combine information about motion and facial form.
+<br/>This finding relates to a considerable literature on “biological
+<br/>motion,” which studies how the complex forms of bodily actions
+<br/>are perceived from only the motion of light points fixed to limb
+<br/>joints, with form-related texture cues removed (Johansson 1973).
+<br/>Perception of such stimuli has been repeatedly associated with
+<br/>the human posterior STS (Vaina et al. 2001; Vaina and Gross
+<br/>2004; Giese and Poggio 2003; Hein and Knight 2008; Jastorff
+<br/>and Orban 2009) with similar results observed in potentially cor-
+<br/>responding areas of the macaque STS (Oram and Perrett 1994;
+<br/>Jastorff et al. 2012). The STS has been described as integrating
+<br/>form and motion information (Vaina et al. 2001; Giese and
+<br/>Poggio 2003), containing neurons that code for conjunctions of
+<br/>certain forms and movements (Oram and Perrett 1996). Never-
+<br/>theless, the mechanisms by which STS neurons come to be sensi-
+<br/>tive to the motion of some forms, but not others, remains a
+<br/>matter of speculation (Giese and Poggio 2003).
+<br/>We propose that network interactions can provide a mech-
+<br/>anistic explanation for STS sensitivity to motion that is selective
+<br/>to certain forms, in this case, faces. Specifically, STS responses
+<br/>to dynamic faces could result from communicative interactions
+<br/>between pathways sensitive to motion and facial form. Such in-
+<br/>teractions can occur when one pathway modulates or “gates”
+<br/>the ability of the other pathway to transmit information to the
+<br/>STS. Using functional magnetic resonance imaging (fMRI), we
+<br/>localized face-selective motion sensitivity in the STS of the
+<br/>human and then used causal connectivity analyses to model
+<br/>how these STS responses are influenced by areas sensitive to
+<br/>motion and areas selective to facial form. We localized ventral
+<br/>occipital and fusiform face areas (OFA and FFA) (Kanwisher
+<br/>et al. 1997), which selectively respond to facial form versus
+<br/>other objects (Calder and Young 2005; Calder 2011). We also
+<br/>localized motion sensitivity to faces and nonfaces in the more
+<br/>dorsal temporal hMT+/V5 complex (hereafter, V5). Together,
+<br/>these areas provide ventral and dorsal pathways to the STS.
+<br/>The ventral pathway transmits facial form information, via OFA
+<br/>and FFA, and the dorsal pathway transmits motion informa-
+<br/>tion, via V5. We then compared combinations of bilinear and
+<br/>nonlinear dynamic causal models (Friston et al. 2003) to iden-
+<br/>tify connectivity models that optimally explain how interac-
+<br/>tions between these form and motion pathways could generate
+<br/>STS responses to dynamic faces. We found that information
+<br/>about facial form, most likely originating in the OFA, gates the
+<br/>transmission of information about motion from V5 to the STS.
+<br/>Thus, integrated facial form and motion information in the STS
+<br/>can arise due to network interactions, where form and motion
+<br/>pathways play distinct roles.
+<br/>Materials and Methods
+<br/>Participants
+<br/>fMRI data were collected from 18 healthy, right-handed participants
+<br/>(over 18 years, 13 females) with normal or corrected-to-normal vision.
+<br/>Experimental procedures were approved by the Cambridge Psych-
+<br/>ology Research Ethics Committee.
+<br/>Imaging Acquisition
+<br/>A 3T Siemens Tim Trio MRI scanner with a 32-channel head coil was
+<br/>used for data acquisition. We collected a structural T1-weighted MPRAGE
+<br/>image (1-mm isotropic voxels). Functional data consisted of whole-brain
+<br/>T2*-weighted echo-planar imaging volumes with 32 oblique axial slices
+<br/>that were 3.5 mm thick, in-plane 64 × 64 matrix with resolution of 3 × 3
+<br/>mm, TR 2 s, TE 30 ms, flip angle 78°. We discarded the first 5 “dummy”
+<br/>volumes to ensure magnetic equilibration.
+<br/>Experimental Design
+<br/>The experiment used a block design with 2 runs (229 scans per run),
+<br/>which were collected as the localizer for another experiment (Furl,
+<br/>Henson, et al. 2013). Note that the dynamic causal modeling (DCM)
+<br/>analyses reported in Furl, Henson et al. (2013) used independent data
+<br/>(from separate runs using different stimuli) to address a different phe-
+<br/>nomenon than considered here. All blocks were 11 s, comprised
+<br/><b>The Author 2014. Published by Oxford University Press</b><br/>This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0/), which permits unrestricted
+<br/>reuse, distribution, and reproduction in any medium, provided the original work is properly cited.
+</td><td>('3162581', 'Nicholas Furl', 'nicholas furl')<br/>('1690599', 'Richard N. Henson', 'richard n. henson')<br/>('1737497', 'Karl J. Friston', 'karl j. friston')<br/>('2825775', 'Andrew J. Calder', 'andrew j. calder')<br/>('3162581', 'Nicholas Furl', 'nicholas furl')</td><td>UK. E-mail: nick.furl@mrc-cbu.cam.ac.uk
+</td></tr><tr><td>28fe6e785b32afdcd2c366c9240a661091b850cf</td><td>International Journal of Applied Information Systems (IJAIS) – ISSN : 2249-0868
+<br/>Foundation of Computer Science FCS, New York, USA
+<br/>Volume 10 – No.7, March 2016 – www.ijais.org
+<br/>Facial Expression Recognition using Patch based Gabor
+<br/>Features
+<br/>Electronics & Telecommunication Engg
+<br/>Electronics & Telecommunication Engg
+<br/><b>St. Francis Institute of Technology</b><br/><b>St. Francis Institute of Technology</b><br/>Department
+<br/>Mumbai, India
+<br/>Department
+<br/>Mumbai, India
+</td><td>('40187425', 'Vaqar Ansari', 'vaqar ansari')<br/>('9390824', 'Anju Chandran', 'anju chandran')</td><td></td></tr><tr><td>28c9198d30447ffe9c96176805c1cd81615d98c8</td><td>rsos.royalsocietypublishing.org
+<br/>Research
+<br/>Cite this article: Saunders TJ, Taylor AH,
+<br/>Atkinson QD. 2016 No evidence that a range of
+<br/>artificial monitoring cues influence online
+<br/>donations to charity in an MTurk sample.
+<br/>R. Soc. open sci. 3: 150710.
+<br/>http://dx.doi.org/10.1098/rsos.150710
+<br/>Received: 22 December 2015
+<br/>Accepted: 13 September 2016
+<br/>Subject Category:
+<br/>Psychology and cognitive neuroscience
+<br/>Subject Areas:
+<br/>behaviour/psychology/evolution
+<br/>Keywords:
+<br/>prosociality, eye images, charity donation,
+<br/>reputation, online behaviour
+<br/>Author for correspondence:
+<br/>Quentin D. Atkinson
+<br/>No evidence that a range of
+<br/>artificial monitoring cues
+<br/>influence online donations
+<br/>to charity in an MTurk
+<br/>sample
+<br/>Timothy J. Saunders, Alex H. Taylor and
+<br/>Quentin D. Atkinson
+<br/><b>School of Psychology, University of Auckland, Auckland, New Zealand</b><br/>AHT, 0000-0003-3492-7667
+<br/>Monitoring cues, such as an image of a face or pair of
+<br/>eyes, have been found to increase prosocial behaviour in
+<br/>several studies. However, other studies have found little
+<br/>or no support for this effect. Here, we examined whether
+<br/>monitoring cues affect online donations to charity while
+<br/>manipulating the emotion displayed, the number of watchers
+<br/>and the cue type. We also include as statistical controls a
+<br/>range of likely covariates of prosocial behaviour. Using the
+<br/>crowdsourcing Internet marketplace, Amazon Mechanical Turk
+<br/>(MTurk), 1535 participants completed our survey and were
+<br/>given the opportunity to donate to charity while being shown
+<br/>an image prime. None of the monitoring primes we tested
+<br/>had a significant effect on charitable giving. By contrast, the
+<br/>control variables of culture, age, sex and previous charity
+<br/>giving frequency did predict donations. This work supports
+<br/>the importance of cultural differences and enduring individual
+<br/>differences in prosocial behaviour and shows that a range of
+<br/>artificial monitoring cues do not reliably boost online charity
+<br/>donation on MTurk.
+<br/>Introduction
+<br/>1.
+<br/>Humans care deeply about their reputations [1]. If we know
+<br/>our choices will be made public, we act more prosocially [2–6].
+<br/>Recent work has shown that simple but evolutionarily significant
+<br/>artificial monitoring cues, such as an image of a pair of eyes,
+<br/>can promote cooperation [7–22]. While an image alone cannot
+<br/>monitor behaviour, the evolutionary legacy hypothesis holds that
+<br/>humans possess an evolved proximate mechanism that causes us
+<br/>to react to monitoring cues as if our reputations are at stake [9].
+<br/>Work using a range of economic games has shown that people act
+<br/>2016 The Authors. Published by the Royal Society under the terms of the Creative Commons
+<br/>Attribution License http://creativecommons.org/licenses/by/4.0/, which permits unrestricted
+<br/>use, provided the original author and source are credited.
+</td><td></td><td>e-mail: q.atkinson@auckland.ac.nz
+</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td></td><td></td><td></td></tr><tr><td>2866cbeb25551257683cf28f33d829932be651fe</td><td>In Proceedings of the 2018 IEEE International Conference on Image Processing (ICIP)
+<br/>The final publication is available at: http://dx.doi.org/10.1109/ICIP.2018.8451026
+<br/>A TWO-STEP LEARNING METHOD FOR DETECTING LANDMARKS
+<br/>ON FACES FROM DIFFERENT DOMAINS
+<br/>Erickson R. Nascimento
+<br/>Universidade Federal de Minas Gerais (UFMG), Brazil
+</td><td>('2749017', 'Bruna Vieira Frade', 'bruna vieira frade')</td><td>{brunafrade, erickson}@dcc.ufmg.br
+</td></tr><tr><td>28d99dc2d673d62118658f8375b414e5192eac6f</td><td>Using Ranking-CNN for Age Estimation
+<br/>1Department of Computer Science
+<br/>2Department of Mathematics
+<br/>3Research & Innovation Center
+<br/><b>Wayne State University</b><br/><b>Wayne State University</b><br/>Ford Motor Company
+</td><td>('15841224', 'Shixing Chen', 'shixing chen')<br/>('28887876', 'Jialiang Le', 'jialiang le')</td><td>{schen, czhang, mdong}@wayne.edu
+<br/>{jle1, mrao}@ford.com
+</td></tr><tr><td>280bc9751593897091015aaf2cab39805768b463</td><td>U.U.Tariq et al. / Carpathian Journal of Electronic and Computer Engineering 6/1 (2013) 8-15 8
+<br/>________________________________________________________________________________________________________
+<br/>Gender Perception From Faces Using Boosted LBPH
+<br/>(Local Binary Patten Histograms)
+<br/><b>COMSATS Institute of Information Technology</b><br/>Department of Electrical Engineering
+<br/>Abbottabad, Pakistan
+<br/>
+</td><td></td><td>Umair_tariq29@yahoo.com
+</td></tr><tr><td>28aa89b2c827e5dd65969a5930a0520fdd4a3dc7</td><td></td><td></td><td></td></tr><tr><td>28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68</td><td>Predicting User Annoyance Using Visual Attributes
+<br/>Virginia Tech
+<br/>Goibibo
+<br/>Virginia Tech
+<br/>Virginia Tech
+</td><td>('1755657', 'Gordon Christie', 'gordon christie')<br/>('2076800', 'Amar Parkash', 'amar parkash')<br/>('3051209', 'Ujwal Krothapalli', 'ujwal krothapalli')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td>gordonac@vt.edu
+<br/>amar08007@iiitd.ac.in
+<br/>ujjwal@vt.edu
+<br/>parikh@vt.edu
+</td></tr><tr><td>288d2704205d9ca68660b9f3a8fda17e18329c13</td><td>Studying Very Low Resolution Recognition Using Deep Networks
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA</b></td><td>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('3307026', 'Shiyu Chang', 'shiyu chang')<br/>('2680237', 'Yingzhen Yang', 'yingzhen yang')<br/>('1771885', 'Ding Liu', 'ding liu')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{zwang119, chang87, yyang58, dingliu2, t-huang1}@illinois.edu
+</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of
+<br/>Facial Attributes
+<br/><b>University of Kentucky</b><br/>UNC Charlotte
+<br/>UNC Charlotte
+<br/><b>University of Kentucky</b></td><td>('2121759', 'Connor Greenwell', 'connor greenwell')<br/>('1690110', 'Richard Souvenir', 'richard souvenir')<br/>('1715594', 'Scott Spurlock', 'scott spurlock')<br/>('1990750', 'Nathan Jacobs', 'nathan jacobs')</td><td>csgr222@uky.edu
+<br/>souvenir@uncc.edu
+<br/>sspurloc@uncc.edu
+<br/>jacobs@cs.uky.edu
+</td></tr><tr><td>17a85799c59c13f07d4b4d7cf9d7c7986475d01c</td><td>ADVERTIMENT. La consulta d’aquesta tesi queda condicionada a l’acceptació de les següents
+<br/>condicions d'ús: La difusió d’aquesta tesi per mitjà del servei TDX (www.tesisenxarxa.net) ha
+<br/>estat autoritzada pels titulars dels drets de propietat intel·lectual únicament per a usos privats
+<br/>emmarcats en activitats d’investigació i docència. No s’autoritza la seva reproducció amb finalitats
+<br/>de lucre ni la seva difusió i posada a disposició des d’un lloc aliè al servei TDX. No s’autoritza la
+<br/>presentació del seu contingut en una finestra o marc aliè a TDX (framing). Aquesta reserva de
+<br/>drets afecta tant al resum de presentació de la tesi com als seus continguts. En la utilització o cita
+<br/>de parts de la tesi és obligat indicar el nom de la persona autora.
+<br/>ADVERTENCIA. La consulta de esta tesis queda condicionada a la aceptación de las siguientes
+<br/>condiciones de uso: La difusión de esta tesis por medio del servicio TDR (www.tesisenred.net) ha
+<br/>sido autorizada por los titulares de los derechos de propiedad intelectual únicamente para usos
+<br/>privados enmarcados en actividades de investigación y docencia. No se autoriza su reproducción
+<br/>con finalidades de lucro ni su difusión y puesta a disposición desde un sitio ajeno al servicio TDR.
+<br/>No se autoriza la presentación de su contenido en una ventana o marco ajeno a TDR (framing).
+<br/>Esta reserva de derechos afecta tanto al resumen de presentación de la tesis como a sus
+<br/>contenidos. En la utilización o cita de partes de la tesis es obligado indicar el nombre de la
+<br/>persona autora.
+<br/>WARNING. On having consulted this thesis you’re accepting the following use conditions:
+<br/>Spreading this thesis by the TDX (www.tesisenxarxa.net) service has been authorized by the
+<br/>titular of the intellectual property rights only for private uses placed in investigation and teaching
+<br/>activities. Reproduction with lucrative aims is not authorized neither its spreading and availability
+<br/>from a site foreign to the TDX service. Introducing its content in a window or frame foreign to the
+<br/>TDX service is not authorized (framing). This rights affect to the presentation summary of the
+<br/>thesis as well as to its contents. In the using or citation of parts of the thesis it’s obliged to indicate
+<br/>the name of the author
+</td><td></td><td></td></tr><tr><td>1768909f779869c0e83d53f6c91764f41c338ab5</td><td>A Large-Scale Car Dataset for Fine-Grained Categorization and Verification
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology</b><br/>Chinese Academy of Sciences, Shenzhen, China
+</td><td>('2889075', 'Linjie Yang', 'linjie yang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1693209', 'Ping Luo', 'ping luo')</td><td>{yl012,pluo,ccloy,xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>171ca25bc2cdfc79cad63933bcdd420d35a541ab</td><td>Calibration-Free Gaze Estimation Using Human Gaze Patterns
+<br/><b>University of Amsterdam</b><br/>Amsterdam, The Netherlands
+</td><td>('1765602', 'Fares Alnajar', 'fares alnajar')<br/>('1695527', 'Theo Gevers', 'theo gevers')<br/>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1682828', 'Sennay Ghebreab', 'sennay ghebreab')</td><td>{f.alnajar,th.gevers,r.valenti,s.ghebreab}@uva.nl
+</td></tr><tr><td>176bd61cc843d0ed6aa5af83c22e3feb13b89fe1</td><td>14
+<br/>Investigating Spontaneous Facial Action
+<br/>Recognition through
+<br/>AAM Representations of the Face
+<br/><b>Carnegie Mellon University</b><br/>USA
+<br/>1. Introduction
+<br/>The Facial Action Coding System (FACS) [Ekman et al., 2002] is the leading method for
+<br/>measuring facial movement in behavioral science. FACS has been successfully applied, but
+<br/>not limited to, identifying the differences between simulated and genuine pain, differences
+<br/>betweenwhen people are telling the truth versus lying, and differences between suicidal and
+<br/>non-suicidal patients [Ekman and Rosenberg, 2005]. Successfully recognizing facial actions
+<br/>is recognized as one of the “major” hurdles to overcome, for successful automated
+<br/>expression recognition.
+<br/>How one should represent the face for effective action unit recognition is the main topic of
+<br/>interest in this chapter. This interest is motivated by the plethora of work in existence in
+<br/>other areas of face analysis, such as face recognition [Zhao et al., 2003], that demonstrate the
+<br/>benefit of representation when performing recognition tasks. It is well understood in the
+<br/>field of statistical pattern recognition [Duda et al., 2001] given a fixed classifier and training
+<br/>set that how one represents a pattern can greatly effect recognition performance. The face
+<br/>can be represented in a myriad of ways. Much work in facial action recognition has centered
+<br/>solely on the appearance (i.e., pixel values) of the face given quite a basic alignment (e.g.,
+<br/>eyes and nose). In our work we investigate the employment of the Active Appearance
+<br/>Model (AAM) framework [Cootes et al., 2001, Matthews and Baker, 2004] in order to derive
+<br/>effective representations for facial action recognition. Some of the representations we will be
+<br/>employing can be seen in Figure 1.
+<br/>Experiments in this chapter are run across two action unit databases. The Cohn- Kanade
+<br/>FACS-Coded Facial Expression Database [Kanade et al., 2000] is employed to investigate the
+<br/>effect of face representation on posed facial action unit recognition. Posed facial actions are
+<br/>those that have been elicited by asking subjects to deliberately make specific facial actions or
+<br/>expressions. Facial actions are typically recorded under controlled circumstances that
+<br/>include full-face frontal view, good lighting, constrained head movement and selectivity in
+<br/>terms of the type and magnitude of facial actions. Almost all work in automatic facial
+<br/>expression analysis has used posed image data and the Cohn-Kanade database may be the
+<br/>database most widely used [Tian et al., 2005]. The RU-FACS Spontaneous Expression
+<br/>Database is employed to investigate how these same representations affect spontaneous facial
+<br/>action unit recognition. Spontaneous facial actions are representative of “real-world” facial
+<br/>Source: Face Recognition, Book edited by: Kresimir Delac and Mislav Grgic, ISBN 978-3-902613-03-5, pp.558, I-Tech, Vienna, Austria, June 2007
+</td><td>('1820249', 'Simon Lucey', 'simon lucey')<br/>('2640279', 'Ahmed Bilal Ashraf', 'ahmed bilal ashraf')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>17d01f34dfe2136b404e8d7f59cebfb467b72b26</td><td>Riemannian Similarity Learning
+<br/><b>Bioinformatics Institute, A*STAR, Singapore</b><br/><b>School of Computing, National University of Singapore, Singapore</b></td><td>('39466179', 'Li Cheng', 'li cheng')</td><td>chengli@bii.a-star.edu.sg
+</td></tr><tr><td>176f26a6a8e04567ea71677b99e9818f8a8819d0</td><td>MEG: Multi-Expert Gender classification from
+<br/>face images in a demographics-balanced dataset
+</td><td>('1763890', 'Maria De Marsico', 'maria de marsico')<br/>('1795333', 'Michele Nappi', 'michele nappi')<br/>('1772512', 'Daniel Riccio', 'daniel riccio')</td><td>1Universidad de Las Palmas de Gran Canaria, Spain. Email: mcastrillon@siani.es
+<br/>2Sapienza University of Rome, Italy. Email: demarsico@di.uniroma1.it
+<br/>3University of Salerno, Fisciano (SA), Italy. Email: mnappi@unisa.it
+<br/>4University of Naples Federico II, Italy, Email: daniel.riccio@unina.it
+</td></tr><tr><td>17cf838720f7892dbe567129dcf3f7a982e0b56e</td><td>Global-Local Face Upsampling Network
+<br/><b>Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA</b></td><td>('2577513', 'Oncel Tuzel', 'oncel tuzel')<br/>('2066068', 'Yuichi Taguchi', 'yuichi taguchi')<br/>('2387467', 'John R. Hershey', 'john r. hershey')</td><td></td></tr><tr><td>17035089959a14fe644ab1d3b160586c67327db2</td><td></td><td></td><td></td></tr><tr><td>17370f848801871deeed22af152489e39b6e1454</td><td>UNDERSAMPLED FACE RECOGNITION WITH ONE-PASS DICTIONARY LEARNING
+<br/><b>Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan</b></td><td>('2017922', 'Chia-Po Wei', 'chia-po wei')<br/>('2733735', 'Yu-Chiang Frank Wang', 'yu-chiang frank wang')</td><td>{cpwei, ycwang}@citi.sinica.edu.tw
+</td></tr><tr><td>178a82e3a0541fa75c6a11350be5bded133a59fd</td><td>Techset Composition Ltd, Salisbury
+<br/>Doc:
+<br/>{IEE}BMT/Articles/Pagination/BMT20140045.3d
+<br/>www.ietdl.org
+<br/>Received on 15th July 2014
+<br/>Revised on 17th September 2014
+<br/>Accepted on 23rd September 2014
+<br/>doi: 10.1049/iet-bmt.2014.0045
+<br/>ISSN 2047-4938
+<br/>BioHDD: a dataset for studying biometric
+<br/>identification on heavily degraded data
+<br/><b>IT Instituto de Telecomunica es, University of Beira Interior, Covilh , Portugal</b><br/><b>Remote Sensing Unit Optics, Optometry and Vision Sciences Group, University of Beira Interior</b><br/>Covilhã, Portugal
+</td><td>('1712429', 'Hugo Proença', 'hugo proença')</td><td>E-mail: gmelfe@ubi.pt
+</td></tr><tr><td>17479e015a2dcf15d40190e06419a135b66da4e0</td><td>Predicting First Impressions with Deep Learning
+<br/><b>University of Notre Dame</b><br/><b>Harvard University 3Perceptive Automata, Inc</b></td><td>('7215627', 'Mel McCurrie', 'mel mccurrie')<br/>('51174355', 'Fernando Beletti', 'fernando beletti')<br/>('51176594', 'Lucas Parzianello', 'lucas parzianello')<br/>('51176974', 'Allen Westendorp', 'allen westendorp')<br/>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')</td><td></td></tr><tr><td>17fa1c2a24ba8f731c8b21f1244463bc4b465681</td><td>Published as a conference paper at ICLR 2016
+<br/>DEEP MULTI-SCALE VIDEO PREDICTION BEYOND
+<br/>MEAN SQUARE ERROR
+<br/><b>New York University</b><br/>2Facebook Artificial Intelligence Research
+</td><td>('2341378', 'Camille Couprie', 'camille couprie')</td><td>mathieu@cs.nyu.edu, {coupriec,yann}@fb.com
+</td></tr><tr><td>17579791ead67262fcfb62ed8765e115fb5eca6f</td><td>Real-Time Fashion-guided Clothing Semantic Parsing: a Lightweight Multi-Scale
+<br/>Inception Neural Network and Benchmark
+<br/>1School of Data and Computer Science
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, P.R. China</b><br/><b>Sun Yat-Sen University, Guangzhou, P.R. China</b><br/>2 PRMCT Lab
+</td><td>('3079146', 'Yuhang He', 'yuhang he')</td><td></td></tr><tr><td>177d1e7bbea4318d379f46d8d17720ecef3086ac</td><td>JMLR: Workshop and Conference Proceedings 44 (2015) 60-71
+<br/>NIPS 2015
+<br/>The 1st International Workshop “Feature Extraction: Modern Questions and Challenges”
+<br/>Learning Multi-channel Deep Feature Representations for
+<br/>Face Recognition
+<br/><b>Wayne State University, Detroit, MI 48202, USA</b><br/><b>University of Illinois at Urbana Champaign, Urbana</b><br/>IL 61801, USA
+<br/>Editor: Afshin Rostamizadeh
+</td><td>('2410994', 'Xue-wen Chen', 'xue-wen chen')<br/>('2708905', 'Melih S. Aslan', 'melih s. aslan')<br/>('1982110', 'Kunlei Zhang', 'kunlei zhang')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>xuewen.chen@wayne.edu
+<br/>melih.aslan@wayne.edu
+<br/>kunlei.zhang@wayne.edu
+<br/>t-huang1@illinois.edu
+</td></tr><tr><td>17a995680482183f3463d2e01dd4c113ebb31608</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. Y, MONTH Z
+<br/>Structured Label Inference for
+<br/>Visual Understanding
+</td><td>('3079079', 'Nelson Nauata', 'nelson nauata')<br/>('2804000', 'Hexiang Hu', 'hexiang hu')<br/>('2057809', 'Guang-Tong Zhou', 'guang-tong zhou')<br/>('47640964', 'Zhiwei Deng', 'zhiwei deng')<br/>('2928799', 'Zicheng Liao', 'zicheng liao')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td></td></tr><tr><td>17aa78bd4331ef490f24bdd4d4cd21d22a18c09c</td><td></td><td></td><td></td></tr><tr><td>170a5f5da9ac9187f1c88f21a88d35db38b4111a</td><td>Online Real-time Multiple Spatiotemporal Action Localisation and Prediction
+<br/>Philip Torr2
+<br/><b>Oxford Brookes University</b><br/><b>Oxford University</b><br/>Figure 1: Online spatiotemporal action localisation in a test ‘fencing’ video from UCF-101 [39]. (a) to (c): A 3D volumetric view of
+<br/>the video showing detection boxes and selected frames. At any given time, a certain portion (%) of the entire video is observed by the
+<br/>system, and the detection boxes are linked up to incrementally build online space-time action tubes in real-time. Note that the proposed
+<br/>method is able to detect multiple co-occurring action instances (3 action instances are shown in different colours). Note also that one of
+<br/>the fencers moves out of the image boundaries between frames 114 and 145, to which our model responds by trimming action tube 01
+<br/>at frame 114, and initiating a new tube (03) at frame 146.
+</td><td>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('3017538', 'Suman Saha', 'suman saha')<br/>('3019396', 'Michael Sapienza', 'michael sapienza')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td>{gurkirt.singh-2015,suman.saha-2014,fabio.cuzzolin}@brookes.ac.uk
+<br/>{michael.sapienza,philip.torr}@eng.ox.ac.uk
+</td></tr><tr><td>17c0d99171efc957b88c31a465c59485ab033234</td><td></td><td></td><td></td></tr><tr><td>1742ffea0e1051b37f22773613f10f69d2e4ed2c</td><td></td><td></td><td></td></tr><tr><td>1791f790b99471fc48b7e9ec361dc505955ea8b1</td><td></td><td></td><td></td></tr><tr><td>17a8d1b1b4c23a630b051f35e47663fc04dcf043</td><td>Differential Angular Imaging for Material Recognition
+<br/><b>Rutgers University, Piscataway, NJ</b><br/><b>Drexel University, Philadelphia, PA</b></td><td>('48181328', 'Jia Xue', 'jia xue')</td><td>{jia.xue,zhang.hang}@rutgers.edu, kdana@ece.rutgers.edu, kon@drexel.edu
+</td></tr><tr><td>171d8a39b9e3d21231004f7008397d5056ff23af</td><td>Simultaneous Facial Landmark Detection, Pose and Deformation Estimation
+<br/>under Facial Occlusion
+<br/>ECSE Department
+<br/><b>Institute of Automation</b><br/>ECSE Department
+<br/><b>Rensselaer Polytechnic Institute</b><br/>Chinese Academy of Sciences
+<br/><b>Rensselaer Polytechnic Institute</b></td><td>('1746738', 'Yue Wu', 'yue wu')<br/>('2864523', 'Chao Gou', 'chao gou')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>wuyuesophia@gmail.com
+<br/>gouchao2012@ic.ac.cn
+<br/>jiq@rpi.edu
+</td></tr><tr><td>17045163860fc7c38a0f7d575f3e44aaa5fa40d7</td><td>Boosting VLAD with Supervised Dictionary
+<br/>Learning and High-Order Statistics
+<br/><b>Southwest Jiaotong University, Chengdu, China</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS</b><br/>Hong Kong, China
+<br/><b>Hengyang Normal University, Hengyang, China</b><br/>Shenzhen, China
+</td><td>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')<br/>('37040717', 'Qiang Peng', 'qiang peng')</td><td></td></tr><tr><td>174930cac7174257515a189cd3ecfdd80ee7dd54</td><td>Multi-view Face Detection Using Deep Convolutional
+<br/>Neural Networks
+<br/>Yahoo
+<br/>Mohammad Saberian
+<br/>inc.com
+<br/>Yahoo
+<br/>Yahoo
+</td><td>('2114438', 'Sachin Sudhakar Farfade', 'sachin sudhakar farfade')<br/>('33642044', 'Li-Jia Li', 'li-jia li')</td><td>fsachin@yahoo-inc.com
+<br/>saberian@yahoo-
+<br/>lijiali.vision@gmail.com
+</td></tr><tr><td>17fad2cc826d2223e882c9fda0715fcd5475acf3</td><td></td><td></td><td></td></tr><tr><td>17e563af203d469c456bb975f3f88a741e43fb71</td><td>Naming TV Characters by Watching and Analyzing Dialogs
+<br/><b>Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany</b></td><td>('3408009', 'Monica-Laura Haurilet', 'monica-laura haurilet')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('2256981', 'Ziad Al-Halah', 'ziad al-halah')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{haurilet, tapaswi, ziad.al-halah, rainer.stiefelhagen}@kit.edu
+</td></tr><tr><td>171389529df11cc5a8b1fbbe659813f8c3be024d</td><td>Manifold Estimation in View-based Feature
+<br/>Space for Face Synthesis across Poses
+<br/>Center for Visualization and Virtual Environments
+<br/><b>University of Kentucky, USA</b></td><td>('2257812', 'Xinyu Huang', 'xinyu huang')<br/>('2943451', 'Jizhou Gao', 'jizhou gao')<br/>('1772171', 'Sen-Ching S. Cheung', 'sen-ching s. cheung')<br/>('38958903', 'Ruigang Yang', 'ruigang yang')</td><td></td></tr><tr><td>17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735</td><td>Study on Parameter Selection Using SampleBoost
+<br/>Computer Science and Engineering Department,
+<br/><b>University of North Texas, Denton, Texas, USA</b></td><td>('1898814', 'Mohamed Abouelenien', 'mohamed abouelenien')<br/>('1982703', 'Xiaohui Yuan', 'xiaohui yuan')</td><td>{mohamed, xiaohui.yuan}@unt.edu
+</td></tr><tr><td>1750db78b7394b8fb6f6f949d68f7c24d28d934f</td><td>Detecting Facial Retouching Using Supervised
+<br/>Deep Learning
+<br/>Bowyer, Fellow, IEEE
+</td><td>('5014060', 'Aparna Bharati', 'aparna bharati')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td></td></tr><tr><td>17cf6195fd2dfa42670dc7ada476e67b381b8f69</td><td>†Image Processing Laboratory, Department of Image Engineering
+<br/>Graduate School of Advanced Imaging Science, Multimedia, and Film
+<br/><b>Chung-Ang University, Seoul, Korea</b><br/><b>Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong</b><br/>Wonmi-Gu Puchon-Si, Kyunggi-Do 420-140, Korea
+<br/>‡Imaging, Robotics, and Intelligent Systems Laboratory
+<br/>Department of Electrical and Computer Engineering
+<br/><b>The University of Tennessee, Knoxville</b><br/>AUTOMATIC FACE REGION TRACKING FOR HIGHLY ACCURATE FACE
+<br/>RECOGNITION IN UNCONSTRAINED ENVIRONMENTS
+</td><td>('2243148', 'Young-Ouk Kim', 'young-ouk kim')<br/>('1684329', 'Joonki Paik', 'joonki paik')<br/>('39533703', 'Jingu Heo', 'jingu heo')</td><td></td></tr><tr><td>173657da03e3249f4e47457d360ab83b3cefbe63</td><td>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>Final Report
+<br/>3035140108
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td><td>('3347561', 'Haicheng Wang', 'haicheng wang')</td><td></td></tr><tr><td>174f46eccb5852c1f979d8c386e3805f7942bace</td><td>The Shape-Time Random Field for Semantic Video Labeling
+<br/>School of Computer Science
+<br/><b>University of Massachusetts, Amherst MA, USA</b></td><td>('2177037', 'Andrew Kae', 'andrew kae')</td><td>{akae,marlin,elm}@cs.umass.edu
+</td></tr><tr><td>17670b60dcfb5cbf8fdae0b266e18cf995f6014c</td><td>Longitudinal Face Modeling via
+<br/>Temporal Deep Restricted Boltzmann Machines
+<br/><b>Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada</b><br/>2 CyLab Biometrics Center and the Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1699922', 'Tien D. Bui', 'tien d. bui')</td><td>1{c duon, k q, bui}@encs.concordia.ca, 2kluu@andrew.cmu.edu
+</td></tr><tr><td>17027a05c1414c9a06a1c5046899abf382a1142d</td><td>Articulated Motion Discovery using Pairs of Trajectories
+<br/><b>University of Edinburgh</b><br/>2Google Research
+</td><td>('2059950', 'Luca Del Pero', 'luca del pero')<br/>('2262946', 'Susanna Ricco', 'susanna ricco')<br/>('1694199', 'Rahul Sukthankar', 'rahul sukthankar')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td>ldelper@inf.ed.ac.uk
+<br/>ricco@google.com
+<br/>sukthankar@google.com
+<br/>ferrari@inf.ed.ac.uk
+</td></tr><tr><td>17ded725602b4329b1c494bfa41527482bf83a6f</td><td>Compact Convolutional Neural Network Cascade for Face Detection
+<br/>Kalinovskii I.A.
+<br/>Spitsyn V.G.
+<br/><b>Tomsk Polytechnic University</b><br/><b>Tomsk Polytechnic University</b><br/>Tomsk, Russia
+<br/>Tomsk, Russia
+</td><td></td><td>kua_21@mail.ru
+<br/>spvg@tpu.ru
+</td></tr><tr><td>177bc509dd0c7b8d388bb47403f28d6228c14b5c</td><td>Deep Learning Face Representation from Predicting 10,000 Classes
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sy011@ie.cuhk.edu.hk
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889</td><td></td><td></td><td></td></tr><tr><td>7bbaa09c9e318da4370a83b126bcdb214e7f8428</td><td>FaaSter, Better, Cheaper: The Prospect of
+<br/>Serverless Scientific Computing and HPC
+<br/><b>Zurich University of Applied Sciences, School of Engineering</b><br/>Service Prototyping Lab (blog.zhaw.ch/icclab/), 8401 Winterthur, Switzerland
+<br/><b>ISISTAN Research Institute - CONICET - UNICEN</b><br/>Campus Universitario, Paraje Arroyo Seco, Tandil (7000), Buenos Aires, Argentina
+<br/><b>ITIC Research Institute, National University of Cuyo</b><br/>Padre Jorge Contreras 1300, M5502JMA Mendoza, Argentina
+</td><td>('1765470', 'Josef Spillner', 'josef spillner')<br/>('2891834', 'Cristian Mateos', 'cristian mateos')<br/>('34889755', 'David A. Monge', 'david a. monge')</td><td>josef.spillner@zhaw.ch
+<br/>cristian.mateos@isistan.unicen.edu.ar
+<br/>dmonge@uncu.edu.ar
+</td></tr><tr><td>7b63ed54345d8c06523f6b03c41a09b5c8f227e2</td><td>Facial Expression Recognition Based on
+<br/>Combination of Spatio-temporal and Spectral
+<br/>Features in Local Facial Regions
+<br/>Department of Electrical Engineering,
+<br/><b>Najafabad Branch, Islamic Azad University</b><br/>Isfahan, Iran.
+</td><td>('9337964', 'Nakisa Abounasr', 'nakisa abounasr')</td><td>n_abounasr@sel.iaun.ac.ir
+</td></tr><tr><td>7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25</td><td><b>UNIVERSITY OF CALIFORNIA, SAN DIEGO</b><br/>Video-based Car Surveillance: License Plate, Make, and Model Recognition
+<br/>A thesis submitted in partial satisfaction of the
+<br/>requirements for the degree Masters of Science
+<br/>in Computer Science
+<br/>by
+<br/>Louka Dlagnekov
+<br/>Committee in charge:
+<br/>Professor Serge J. Belongie, Chairperson
+<br/>2005
+</td><td>('3520515', 'David A. Meyer', 'david a. meyer')<br/>('1765887', 'David J. Kriegman', 'david j. kriegman')</td><td></td></tr><tr><td>7b9961094d3e664fc76b12211f06e12c47a7e77d</td><td>Bridging Biometrics and Forensics
+<br/><b>EECS, Syracuse University, Syracuse, NY, USA</b></td><td>('38495931', 'Yanjun Yan', 'yanjun yan')<br/>('2598035', 'Lisa Ann Osadciw', 'lisa ann osadciw')</td><td>{yayan, laosadci}@syr.edu
+</td></tr><tr><td>7bfe085c10761f5b0cc7f907bdafe1ff577223e0</td><td></td><td></td><td></td></tr><tr><td>7b43326477795a772c08aee750d3e433f00f20be</td><td>Computational Methods for Behavior Analysis
+<br/>Thesis by
+<br/>In Partial Fulfillment of the Requirements for the
+<br/>degree of
+<br/>Doctor of Philosophy
+<br/><b>CALIFORNIA INSTITUTE OF TECHNOLOGY</b><br/>Pasadena, California
+<br/>2017
+<br/>Defended September 16, 2016
+</td><td>('2948199', 'Eyrun Eyjolfsdottir', 'eyrun eyjolfsdottir')</td><td></td></tr><tr><td>7b9b3794f79f87ca8a048d86954e0a72a5f97758</td><td>DOI 10.1515/jisys-2013-0016      Journal of Intelligent Systems 2013; 22(4): 365–415
+<br/>Passing an Enhanced Turing Test –
+<br/>Interacting with Lifelike Computer
+<br/>Representations of Specific Individuals 
+</td><td>('1708812', 'Avelino J. Gonzalez', 'avelino j. gonzalez')<br/>('1745342', 'Jason Leigh', 'jason leigh')<br/>('1727179', 'Ronald F. DeMara', 'ronald f. demara')<br/>('7777088', 'Steven Jones', 'steven jones')<br/>('1761244', 'Sangyoon Lee', 'sangyoon lee')<br/>('1917523', 'Carlos Leon-Barth', 'carlos leon-barth')<br/>('3191606', 'Miguel Elvir', 'miguel elvir')<br/>('33294824', 'James Hollister', 'james hollister')<br/>('2680448', 'Steven Kobosko', 'steven kobosko')</td><td></td></tr><tr><td>7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed</td><td>A Psychologically-Inspired Match-Score Fusion Model
+<br/>for Video-Based Facial Expression Recognition
+<br/><b>VISLab, EBUII-216, University of California Riverside</b><br/>Riverside, California, USA, 92521-0425
+</td><td>('1707159', 'Bir Bhanu', 'bir bhanu')<br/>('1803478', 'Songfan Yang', 'songfan yang')</td><td>{acruz, bhanu, syang}@ee.ucr.edu
+</td></tr><tr><td>7b0f1fc93fb24630eb598330e13f7b839fb46cce</td><td>Learning to Find Eye Region Landmarks for Remote Gaze
+<br/>Estimation in Unconstrained Settings
+<br/>ETH Zurich
+<br/>MPI for Informatics
+<br/>MPI for Informatics
+<br/>ETH Zurich
+</td><td>('20466488', 'Seonwook Park', 'seonwook park')<br/>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')<br/>('2531379', 'Otmar Hilliges', 'otmar hilliges')</td><td>spark@inf.ethz.ch
+<br/>xczhang@mpi-inf.mpg.de
+<br/>bulling@mpi-inf.mpg.de
+<br/>otmarh@inf.ethz.ch
+</td></tr><tr><td>7be60f8c34a16f30735518d240a01972f3530e00</td><td>Facial Expression Recognition with Temporal Modeling of Shapes
+<br/><b></b><br/><b>The University of Texas at Austin</b></td><td>('18692590', 'Suyog Jain', 'suyog jain')<br/>('1713065', 'Changbo Hu', 'changbo hu')</td><td>suyog@cs.utexas.edu, changbo.hu@gmail.com, aggarwaljk@mail.utexas.edu
+</td></tr><tr><td>7bdcd85efd1e3ce14b7934ff642b76f017419751</td><td>289
+<br/>Learning Discriminant Face Descriptor
+</td><td>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f</td><td>On the Design and Evaluation of Robust Head Pose for
+<br/>Visual User Interfaces: Algorithms, Databases, and
+<br/>Comparisons
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Mohan Trivedi
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+</td><td>('1841835', 'Sujitha Martin', 'sujitha martin')<br/>('1947383', 'Ashish Tawari', 'ashish tawari')<br/>('1780529', 'Erik Murphy-Chutorian', 'erik murphy-chutorian')<br/>('3205274', 'Shinko Y. Cheng', 'shinko y. cheng')</td><td>scmartin@ucsd.edu
+<br/>atawari@ucsd.edu
+<br/>erikmc@google.com
+<br/>sycheng@hrl.com
+<br/>mtrivedi@ucsd.edu
+</td></tr><tr><td>8fe38962c24300129391f6d7ac24d7783e0fddd0</td><td><b>Center for Research in Computer Vision, University of Central Florida</b></td><td>('33209161', 'Amir Mazaheri', 'amir mazaheri')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>amirmazaheri@knights.ucf.edu
+<br/>shah@crcv.ucf.edu
+</td></tr><tr><td>8f6d05b8f9860c33c7b1a5d704694ed628db66c7</td><td>Non-linear dimensionality reduction and sparse
+<br/>representation models for facial analysis
+<br/>To cite this version:
+<br/>Medical Imaging. INSA de Lyon, 2014. English. <NNT : 2014ISAL0019>. <tel-01127217>
+<br/>HAL Id: tel-01127217
+<br/>https://tel.archives-ouvertes.fr/tel-01127217
+<br/>Submitted on 7 Mar 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('35061362', 'Yuyao Zhang', 'yuyao zhang')<br/>('35061362', 'Yuyao Zhang', 'yuyao zhang')</td><td></td></tr><tr><td>8f772d9ce324b2ef5857d6e0b2a420bc93961196</td><td>MAHPOD et al.: CFDRNN
+<br/>Facial Landmark Point Localization using
+<br/>Coarse-to-Fine Deep Recurrent Neural Network
+</td><td>('2748312', 'Shahar Mahpod', 'shahar mahpod')<br/>('3001038', 'Rig Das', 'rig das')<br/>('1767715', 'Emanuele Maiorana', 'emanuele maiorana')<br/>('1926432', 'Yosi Keller', 'yosi keller')<br/>('1682433', 'Patrizio Campisi', 'patrizio campisi')</td><td></td></tr><tr><td>8f3e120b030e6c1d035cb7bd9c22f6cc75782025</td><td>Bayesian Networks and the Imprecise Dirichlet
+<br/>Model applied to Recognition Problems
+<br/><b>Dalle Molle Institute for Arti cial Intelligence</b><br/>Galleria 2, Manno-Lugano, Switzerland
+<br/><b>Rensselaer Polytechnic Institute</b><br/>110 Eighth St., Troy, NY, USA
+</td><td>('1726583', 'Qiang Ji', 'qiang ji')</td><td>cassio@idsia.ch, jiq@rpi.edu
+</td></tr><tr><td>8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483</td><td></td><td></td><td></td></tr><tr><td>8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a</td><td>Feature Selection with Annealing for Big Data
+<br/>Learning
+</td><td>('2455529', 'Adrian Barbu', 'adrian barbu')<br/>('34680388', 'Yiyuan She', 'yiyuan she')<br/>('2139735', 'Liangjing Ding', 'liangjing ding')<br/>('3019469', 'Gary Gramajo', 'gary gramajo')</td><td></td></tr><tr><td>8fed5ea3b69ea441a8b02f61473eafee25fb2374</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Two-Dimensional PCA with F-Norm Minimization
+<br/><b>State Key Laboratory of ISN, Xidian University</b><br/><b>State Key Laboratory of ISN, Xidian University</b><br/>Xi’an China
+<br/>Xi’an China
+</td><td>('38469552', 'Quanxue Gao', 'quanxue gao')<br/>('40326660', 'Qianqian Wang', 'qianqian wang')</td><td></td></tr><tr><td>8fa3478aaf8e1f94e849d7ffbd12146946badaba</td><td>Attributes for Classifier Feedback
+<br/><b>Indraprastha Institute of Information Technology (Delhi, India</b><br/><b>Toyota Technological Institute (Chicago, US</b></td><td>('2076800', 'Amar Parkash', 'amar parkash')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td></td></tr><tr><td>8f3da45ff0c3e1777c3a7830f79c10f5896bcc21</td><td>Situation Recognition with Graph Neural Networks
+<br/><b>The Chinese University of Hong Kong, 2University of Toronto, 3Youtu Lab, Tencent</b><br/><b>Uber Advanced Technologies Group, 5Vector Institute</b></td><td>('8139953', 'Ruiyu Li', 'ruiyu li')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('2246396', 'Renjie Liao', 'renjie liao')<br/>('1729056', 'Jiaya Jia', 'jiaya jia')<br/>('2422559', 'Raquel Urtasun', 'raquel urtasun')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')</td><td>ryli@cse.cuhk.edu.hk, {makarand,rjliao,urtasun,fidler}@cs.toronto.edu, leojia9@gmail.com
+</td></tr><tr><td>8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8</td><td>Age Estimation Using Expectation of Label Distribution Learning ∗
+<br/><b>National Key Laboratory for Novel Software Technology, Nanjing University, China</b><br/><b>MOE Key Laboratory of Computer Network and Information Integration, Southeast University, China</b></td><td>('2226422', 'Bin-Bin Gao', 'bin-bin gao')<br/>('7678704', 'Hong-Yu Zhou', 'hong-yu zhou')<br/>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('1735299', 'Xin Geng', 'xin geng')</td><td>{gaobb,zhouhy,wujx}@lamda.nju.edu.cn, xgeng@seu.edu.cn
+</td></tr><tr><td>8f9c37f351a91ed416baa8b6cdb4022b231b9085</td><td>Generative Adversarial Style Transfer Networks for Face Aging
+<br/>Sveinn Palsson
+<br/>D-ITET, ETH Zurich
+<br/>Eirikur Agustsson
+<br/>D-ITET, ETH Zurich
+</td><td></td><td>spalsson@ethz.ch
+<br/>aeirikur@ethz.ch
+</td></tr><tr><td>8f8c0243816f16a21dea1c20b5c81bc223088594</td><td></td><td></td><td></td></tr><tr><td>8f08b2101d43b1c0829678d6a824f0f045d57da5</td><td>Supplementary Material for: Active Pictorial Structures
+<br/><b>Imperial College London</b><br/>180 Queens Gate, SW7 2AZ, London, U.K.
+<br/>In the following sections, we provide additional material for the paper “Active Pictorial Structures”. Section 1 explains in
+<br/>more detail the differences between the proposed Active Pictorial Structures (APS) and Pictorial Structures (PS). Section 2
+<br/>presents the proofs about the structure of the precision matrices of the Gaussian Markov Random Filed (GMRF) (Eqs. 10
+<br/>and 12 of the main paper). Section 3 gives an analysis about the forward Gauss-Newton optimization of APS and shows that
+<br/>the inverse technique with fixed Jacobian and Hessian, which is used in the main paper, is much faster. Finally, Sec. 4 shows
+<br/>additional experimental results and conducts new experiments on different objects (human eyes and cars). An open-source
+<br/>implementation of APS is available within the Menpo Project [1] in http://www.menpo.org/.
+<br/>1. Differences between Active Pictorial Structures and Pictorial Structures
+<br/>As explained in the main paper, the proposed model is partially motivated by PS [4, 8]. In the original formulation of PS,
+<br/>the cost function to be optimized has the form
+<br/>(cid:88)
+<br/>n(cid:88)
+<br/>n(cid:88)
+<br/>i=1
+<br/>arg min
+<br/>= arg min
+<br/>i=1
+<br/>mi((cid:96)i) +
+<br/>dij((cid:96)i, (cid:96)j) =
+<br/>i,j:(vi,vj )∈E
+<br/>[A((cid:96)i) − µa
+<br/>i ]T (Σa
+<br/>i )−1[A((cid:96)i) − µa
+<br/>i ] +
+<br/>(cid:88)
+<br/>i,j:(vi,vj )∈E
+<br/>[(cid:96)i − (cid:96)j − µd
+<br/>ij]T (Σd
+<br/>ij)−1[(cid:96)i − (cid:96)j − µd
+<br/>ij]
+<br/>(1)
+<br/>1 , . . . , (cid:96)T
+<br/>n ]T is the vector of landmark coordinates ((cid:96)i = [xi, yi]T , ∀i = 1, . . . , n), A((cid:96)i) is a feature vector
+<br/>where s = [(cid:96)T
+<br/>ij} denote the mean
+<br/>extracted from the image location (cid:96)i and we have assumed a tree G = (V, E). {µa
+<br/>and covariances of the appearance and deformation respectively. In Eq. 1, mi((cid:96)i) is a function measuring the degree of
+<br/>mismatch when part vi is placed at location (cid:96)i in the image. Moreover, dij((cid:96)i, (cid:96)j) denotes a function measuring the degree
+<br/>of deformation of the model when part vi is placed at location (cid:96)i and part vj is placed at location (cid:96)j. The authors show
+<br/>an inference algorithm based on distance transform [3] that can find a global minimum of Eq. 1 without any initialization.
+<br/>However, this algorithm imposes two important restrictions: (1) appearance of each part is independent of the rest of them
+<br/>and (2) G must always be acyclic (a tree). Additionally, the computation of mi((cid:96)i) for all parts (i = 1, . . . , n) and all possible
+<br/>image locations (response maps) has a high computational cost, which makes the algorithm very slow. Finally, in [8], the
+<br/>authors only use a diagonal covariance for the relative locations (deformation) of each edge of the graph, which restricts the
+<br/>flexibility of the model.
+<br/>i } and {µd
+<br/>ij, Σd
+<br/>i , Σa
+<br/>In the proposed APS, we aim to minimize the cost function (Eq. 19 of the main paper)
+<br/>(cid:107)A(S(¯s, p)) − ¯a(cid:107)2
+<br/>[A(S(¯s, p)) − ¯a]T Qa[A(S(¯s, p)) − ¯a] + [S(¯s, p) − ¯s]T Qd[S(¯s, p) − ¯s]
+<br/>Qa + (cid:107)S(¯s, p) − ¯s(cid:107)2
+<br/>Qd =
+<br/>arg min
+<br/>= arg min
+<br/>(2)
+<br/>There are two main differences between APS and PS: (1) we employ a statistical shape model and optimize with respect
+<br/>to its parameters and (2) we use the efficient Gauss-Newton optimization technique. However, these differences introduce
+<br/>some important advantages, as also mentioned in the main paper. The proposed formulation allows to define a graph (not
+<br/>only tree) between the object’s parts. This means that we can assume dependencies between any pair of landmarks for both
+</td><td>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>{e.antonakos, ja310, s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>8fbec9105d346cd23d48536eb20c80b7c2bbbe30</td><td>The Effectiveness of Face Detection Algorithms in Unconstrained Crowd Scenes
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Notre Dame</b><br/>Notre Dame, IN 46656
+</td><td>('27937356', 'Jeremiah R. Barr', 'jeremiah r. barr')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')</td><td>jbarr1,kwb,flynn@nd.edu
+</td></tr><tr><td>8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09</td><td>Electronic Letters on Computer Vision and Image Analysis 14(2):24-44; 2015
+<br/>A Survey on Human Emotion Recognition Approaches,
+<br/>Databases and Applications
+<br/><b>Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India</b><br/><b>P.S.R Engineering College, Sivakasi, Tamilnadu, India</b><br/>Received 7th Aug 2015; accepted 30th Nov 2015
+</td><td></td><td></td></tr><tr><td>8f8a5be9dc16d73664285a29993af7dc6a598c83</td><td>IJCSNS International Journal of Computer Science and Network Security, VOL.11 No.1, January 2011
+<br/>71
+<br/>Neural Network based Face Recognition with Gabor Filters
+<br/><b>Jahangirnagar University, Savar, Dhaka 1342, Bangladesh</b><br/>
+</td><td>('5463951', 'Amina Khatun', 'amina khatun')<br/>('38674112', 'Al-Amin Bhuiyan', 'al-amin bhuiyan')</td><td></td></tr><tr><td>8f5ce25e6e1047e1bf5b782d045e1dac29ca747e</td><td>A Novel Discriminant Non-negative Matrix
+<br/>Factorization Algorithm with Applications to
+<br/>Facial Image Characterization Problems
+<br/><b>yAristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451
+<br/>54124 Thessaloniki, Greece
+<br/>Address for correspondence:
+<br/><b>Aristotle University of Thessaloniki</b><br/>54124 Thessaloniki
+<br/>GREECE
+<br/>Tel. ++ 30 231 099 63 04
+<br/>Fax ++ 30 231 099 63 04
+<br/>April 18, 2007
+<br/>DRAFT
+</td><td>('1754270', 'Irene Kotsia', 'irene kotsia')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: fekotsia, dralbert, pitasg@aiia.csd.auth.gr
+</td></tr><tr><td>8f89aed13cb3555b56fccd715753f9ea72f27f05</td><td>Attended End-to-end Architecture for Age
+<br/>Estimation from Facial Expression Videos
+</td><td>('1678473', 'Wenjie Pei', 'wenjie pei')</td><td></td></tr><tr><td>8f92cccacf2c84f5d69db3597a7c2670d93be781</td><td>FACIAL EXPRESSION SYNTHESIS THROUGH FACIAL EXPRESSIONS
+<br/>STATISTICAL ANALYSIS
+<br/><b>Aristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451, 54124 Thessaloniki, Greece
+</td><td>('2764130', 'Stelios Krinidis', 'stelios krinidis')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: pitas@zeus.csd.auth.gr, stelios.krinidis@mycosmos.gr
+</td></tr><tr><td>8f6263e4d3775757e804796e104631c7a2bb8679</td><td>Characterizing Visual Representations within Convolutional Neural Networks:
+<br/>Toward a Quantitative Approach
+<br/><b>Center for Brain Science, Harvard University, Cambridge, MA 02138 USA</b><br/><b>Center for Brain Science, Harvard University, Cambridge, MA 02138 USA</b></td><td>('1739108', 'Chuan-Yung Tsai', 'chuan-yung tsai')<br/>('2042941', 'David D. Cox', 'david d. cox')</td><td>CHUANYUNGTSAI@FAS.HARVARD.EDU
+<br/>DAVIDCOX@FAS.HARVARD.EDU
+</td></tr><tr><td>8f9f599c05a844206b1bd4947d0524234940803d</td><td></td><td></td><td></td></tr><tr><td>8f60c343f76913c509ce623467bf086935bcadac</td><td>Joint 3D Face Reconstruction and Dense
+<br/>Alignment with Position Map Regression
+<br/>Network
+<br/><b>Shanghai Jiao Tong University, CloudWalk Technology</b><br/><b>Research Center for Intelligent Security Technology, CIGIT</b></td><td>('9196752', 'Yao Feng', 'yao feng')<br/>('1917608', 'Fan Wu', 'fan wu')<br/>('3492237', 'Xiaohu Shao', 'xiaohu shao')<br/>('1706354', 'Yanfeng Wang', 'yanfeng wang')<br/>('39851640', 'Xi Zhou', 'xi zhou')</td><td>fengyao@sjtu.edu.cn, wufan@cloudwalk.cn, shaoxiaohu@cigit.ac.cn
+<br/>wangyanfeng@sjtu.edu.cn, zhouxi@cloudwalk.cn
+</td></tr><tr><td>8fd9c22b00bd8c0bcdbd182e17694046f245335f</td><td>  
+<br/>Recognizing Facial Expressions in Videos
+</td><td>('8502461', 'Lin Su', 'lin su')<br/>('14362431', 'Matthew Balazsi', 'matthew balazsi')</td><td></td></tr><tr><td>8f5facdc0a2a79283864aad03edc702e2a400346</td><td>
+<br/>
+<br/>ISSN: 2277-3754
+<br/>ISO 9001:2008 Certified
+<br/>International Journal of Engineering and Innovative Technology (IJEIT)
+<br/>Volume 4, Issue 7, January 2015
+<br/>Human Age Estimation Framework using
+<br/>Bio-Inspired Features for Facial Image
+<br/>Santhosh Kumar G, Dr. Suresh H. N.
+<br/>Research scholor, BIT, under VTU, Belgaum India
+<br/><b>Bangalore Institute of Technology</b><br/>Bangalore–04, Karnataka
+</td><td></td><td></td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>Facial Landmark Detection by
+<br/>Deep Multi-task Learning
+<br/><b>The Chinese University of Hong Kong</b></td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>8af411697e73f6cfe691fe502d4bfb42510b4835</td><td>Dynamic Local Ternary Pattern for Face Recognition and
+<br/>Verification
+<br/><b>Institute of Information Technology</b><br/><b>University of Dhaka, Bangladesh</b><br/>Department of Industrial and Management Engineering
+<br/><b>Hankuk University of Foreign Studies, South Korea</b><br/>M. Abdullah-Al-Wadud
+</td><td>('39036762', 'Mohammad Ibrahim', 'mohammad ibrahim')<br/>('31210416', 'Humayun Kayesh', 'humayun kayesh')<br/>('13193999', 'Shah', 'shah')<br/>('2233124', 'Mohammad Shoyaib', 'mohammad shoyaib')</td><td>ibrahim iit@yahoo.com, iftekhar.efat@gmail.com, hkayesh@gmail.com, khaled@univdhaka.edu, shoyaib@du.ac.bd
+<br/>wadud@hufs.ac.kr
+</td></tr><tr><td>8acdc4be8274e5d189fb67b841c25debf5223840</td><td>Gultepe and Makrehchi
+<br/>Hum. Cent. Comput. Inf. Sci. (2018) 8:25
+<br/>https://doi.org/10.1186/s13673-018-0148-3
+<br/>RESEARCH
+<br/>Improving clustering performance
+<br/>using independent component analysis
+<br/>and unsupervised feature learning
+<br/>Open Access
+<br/>*Correspondence:
+<br/>Department of Electrical
+<br/>and Computer Engineering,
+<br/><b>University of Ontario Institute</b><br/>of Technology, 2000 Simcoe
+<br/>St N, Oshawa, ON L1H 7K4,
+<br/>Canada
+</td><td>('2729102', 'Eren Gultepe', 'eren gultepe')<br/>('3183840', 'Masoud Makrehchi', 'masoud makrehchi')</td><td>eren.gultepe@uoit.net
+</td></tr><tr><td>8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d</td><td>Facial Keypoint Detection
+<br/><b>Stanford University</b><br/>March 13, 2016
+</td><td>('29909347', 'Shayne Longpre', 'shayne longpre')<br/>('9928926', 'Ajay Sohmshetty', 'ajay sohmshetty')</td><td>slongpre@stanford.edu, ajay14@stanford.edu
+</td></tr><tr><td>8a54f8fcaeeede72641d4b3701bab1fe3c2f730a</td><td>What do you think of my picture? Investigating factors
+<br/>of influence in profile images context perception
+<br/>Heynderickx
+<br/>To cite this version:
+<br/>think of my picture? Investigating factors of influence in profile images context perception. Human
+<br/>Vision and Electronic Imaging XX, Mar 2015, San Francisco, United States. Proc. SPIE 9394, Hu-
+<br/>man Vision and Electronic Imaging XX, 9394, <http://spie.org/EI/conferencedetails/human-vision-
+<br/>electronic-imaging>. <10.1117/12.2082817>. <hal-01149535>
+<br/>HAL Id: hal-01149535
+<br/>https://hal.archives-ouvertes.fr/hal-01149535
+<br/>Submitted on 7 May 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('34678433', 'Filippo Mazza', 'filippo mazza')<br/>('40130265', 'Matthieu Perreira Da Silva', 'matthieu perreira da silva')<br/>('7591543', 'Patrick Le Callet', 'patrick le callet')<br/>('34678433', 'Filippo Mazza', 'filippo mazza')<br/>('40130265', 'Matthieu Perreira Da Silva', 'matthieu perreira da silva')<br/>('7591543', 'Patrick Le Callet', 'patrick le callet')<br/>('1728396', 'Ingrid Heynderickx', 'ingrid heynderickx')</td><td></td></tr><tr><td>8a8861ad6caedc3993e31d46e7de6c251a8cda22</td><td>StreetStyle: Exploring world-wide clothing styles from millions of photos
+<br/><b>Cornell University</b><br/>Figure 1: Extracting and measuring clothing style from Internet photos at scale. (a) We apply deep learning methods to learn to extract
+<br/>fashion attributes from images and create a visual embedding of clothing style. We use this embedding to analyze millions of Instagram photos
+<br/>of people sampled worldwide, in order to study spatio-temporal trends in clothing around the globe. (b) Further, using our embedding, we
+<br/>can cluster images to produce a global set of representative styles, from which we can (c) use temporal and geo-spatial statistics to generate
+<br/>concise visual depictions of what makes clothing unique in each city versus the rest.
+</td><td>('40353974', 'Kevin Matzen', 'kevin matzen')<br/>('1791337', 'Kavita Bala', 'kavita bala')<br/>('1830653', 'Noah Snavely', 'noah snavely')</td><td></td></tr><tr><td>8aae23847e1beb4a6d51881750ce36822ca7ed0b</td><td>Comparison Between Geometry-Based and Gabor-Wavelets-Based
+<br/>Facial Expression Recognition Using Multi-Layer Perceptron
+<br/><b>ATR Human Information Processing Research Laboratories</b><br/><b>ATR Interpreting Telecommunications Research Laboratories</b><br/>2-2 Hikaridai, Seika-cho, Soraku-gun, Kyoto 619-02, Japan
+<br/>INRIA, 2004 route des Lucioles, BP 93, F-06902 Sophia-Antipolis Cedex, France
+</td><td>('1809184', 'Zhengyou Zhang', 'zhengyou zhang')<br/>('34801422', 'Shigeru Akamatsu', 'shigeru akamatsu')<br/>('36206997', 'Michael Schuster', 'michael schuster')</td><td>e-mail: zzhang@sophia.inria.fr, zzhang@hip.atr.co.jp
+</td></tr><tr><td>8a866bc0d925dfd8bb10769b8b87d7d0ff01774d</td><td>WikiArt Emotions: An Annotated Dataset of Emotions Evoked by Art
+<br/>National Research Council Canada
+</td><td>('2886725', 'Svetlana Kiritchenko', 'svetlana kiritchenko')</td><td>{saif.mohammad,svetlana.kiritchenko}@nrc-cnrc.gc.ca
+</td></tr><tr><td>8a40b6c75dd6392ee0d3af73cdfc46f59337efa9</td><td></td><td></td><td></td></tr><tr><td>8a3bb63925ac2cdf7f9ecf43f71d65e210416e17</td><td>ShearFace: Efficient Extraction of Anisotropic
+<br/>Features for Face Recognition
+<br/>1Research Groups on Intelligent Machines,
+<br/><b>University of Sfax</b><br/> Sfax 3038, Tunisia
+<br/>and anisotropic
+</td><td>('2791150', 'Mohamed Anouar Borgi', 'mohamed anouar borgi')<br/>('8847309', 'Demetrio Labate', 'demetrio labate')</td><td>{anoir.borgi@ieee.org; dlabate@math.uh.edu}
+</td></tr><tr><td>8a0159919ee4e1a9f4cbfb652a1be212bf0554fd</td><td><b>University of Surrey</b><br/>Faculty of Engineering and Physical Sciences
+<br/>Department of Computer Science
+<br/>PhD Thesis
+<br/>Application of Power Laws to
+<br/>Biometrics, Forensics and
+<br/>Network Traffic Analysis
+<br/>by
+<br/>Supervisor: Prof. A.T.S. Ho
+<br/>Co-supervisors: Dr. N. Poh, Dr. S. Li
+<br/>November, 2016
+</td><td>('2909991', 'Aamo Iorliam', 'aamo iorliam')</td><td></td></tr><tr><td>8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b</td><td>Predicting the Future with Transformational
+<br/>States
+<br/><b>University of Pennsylvania, 2Ryerson University</b></td><td>('2689633', 'Andrew Jaegle', 'andrew jaegle')<br/>('40805511', 'Oleh Rybkin', 'oleh rybkin')<br/>('3150825', 'Konstantinos G. Derpanis', 'konstantinos g. derpanis')<br/>('1751586', 'Kostas Daniilidis', 'kostas daniilidis')</td><td>ajaegle@upenn.edu, oleh@cis.upenn.edu,
+<br/>kosta@scs.ryerson.ca, kostas@cis.upenn.edu
+</td></tr><tr><td>8adb2fcab20dab5232099becbd640e9c4b6a905a</td><td>Beyond Euclidean Eigenspaces:
+<br/>Bayesian Matching for Visual Recognition
+<br/><b>Mitsubishi Electric Research Laboratory</b><br/>MIT Media Laboratory
+<br/>
+<br/>
+<br/>Cambridge, MA
+<br/>Cambridge, MA
+</td><td>('1780935', 'Baback Moghaddam', 'baback moghaddam')<br/>('1682773', 'Alex Pentland', 'alex pentland')</td><td>baback@merl.com
+<br/>sandy@media.mit.edu
+</td></tr><tr><td>8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b</td><td>Animals on the Web
+<br/><b>University of California, Berkeley</b><br/><b>University of Illinois, Urbana-Champaign</b><br/>Computer Science Division
+<br/>Department of Computer Science
+</td><td>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('1744452', 'David A. Forsyth', 'david a. forsyth')</td><td>millert@cs.berkeley.edu
+<br/>daf@cs.uiuc.edu
+</td></tr><tr><td>8a91ad8c46ca8f4310a442d99b98c80fb8f7625f</td><td>2592
+<br/>2D Segmentation Using a Robust Active
+<br/>Shape Model With the EM Algorithm
+</td><td>('38769654', 'Carlos Santiago', 'carlos santiago')<br/>('3259175', 'Jacinto C. Nascimento', 'jacinto c. nascimento')<br/>('1744810', 'Jorge S. Marques', 'jorge s. marques')</td><td></td></tr><tr><td>8aed6ec62cfccb4dba0c19ee000e6334ec585d70</td><td>Localizing and Visualizing Relative Attributes
+</td><td>('2299381', 'Fanyi Xiao', 'fanyi xiao')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>8a336e9a4c42384d4c505c53fb8628a040f2468e</td><td>Wang and Luo EURASIP Journal on Bioinformatics
+<br/>and Systems Biology (2016) 2016:13
+<br/>DOI 10.1186/s13637-016-0048-7
+<br/>R ES EAR CH
+<br/>Detecting Visually Observable Disease
+<br/>Symptoms from Faces
+<br/>Open Access
+</td><td>('2207567', 'Kuan Wang', 'kuan wang')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')</td><td></td></tr><tr><td>7e600faee0ba11467d3f7aed57258b0db0448a72</td><td></td><td></td><td></td></tr><tr><td>7ed3b79248d92b255450c7becd32b9e5c834a31e</td><td>L1-regularized Logistic Regression Stacking and Transductive CRF Smoothing
+<br/>for Action Recognition in Video
+<br/><b>University of Florence</b><br/>Lorenzo Seidenari
+<br/><b>University of Florence</b><br/>Andrew D. Bagdanov
+<br/><b>University of Florence</b><br/><b>University of Florence</b></td><td>('2602265', 'Svebor Karaman', 'svebor karaman')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td>svebor.karaman@unifi.it
+<br/>lorenzo.seidenari@unifi.it
+<br/>bagdanov@dsi.unifi.it
+<br/>alberto.delbimbo@unifi.it
+</td></tr><tr><td>7e8016bef2c180238f00eecc6a50eac473f3f138</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Immersive Interactive Data Mining and Machine
+<br/>Learning Algorithms for Big Data Visualization
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr. sc.techn. Andreas Herkersdorf
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. habil. Dirk Wollherr
+<br/>3. Prof. Dr. Mihai Datcu
+<br/>Die Dissertation wurde am 13.08.2015 bei der Technischen Universit¨at M¨unchen eingerei-
+<br/>cht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 16.02.2016
+<br/>angenommen.
+</td><td>('2133342', 'Mohammadreza Babaee', 'mohammadreza babaee')</td><td></td></tr><tr><td>7ed2c84fdfc7d658968221d78e745dfd1def6332</td><td>May 15, 2007 6:32
+<br/>World Scientific Review Volume - 9.75in x 6.5in
+<br/>ObjectRecognitionLCV2
+<br/>Chapter 1
+<br/>Evaluation of linear combination of views for object recognition
+<br/>on real and synthetic datasets
+<br/>Department of computer science,
+<br/><b>University College London</b><br/>Malet Place, London, WC1E 6BT
+<br/>In this work, we present a method for model-based recognition of 3d objects from
+<br/>a small number of 2d intensity images taken from nearby, but otherwise arbitrary
+<br/>viewpoints. Our method works by linearly combining images from two (or more)
+<br/>viewpoints of a 3d object to synthesise novel views of the object. The object is
+<br/>recognised in a target image by matching to such a synthesised, novel view. All
+<br/>that is required is the recovery of the linear combination parameters, and since
+<br/>we are working directly with pixel intensities, we suggest searching the parameter
+<br/>space using a global, evolutionary optimisation algorithm combined with a local
+<br/>search method in order efficiently to recover the optimal parameters and thus
+<br/>recognise the object in the scene. We have experimented with both synthetic
+<br/>data and real-image, public databases.
+<br/>1.1. Introduction
+<br/>Object recognition is one of the most important and basic problems in computer
+<br/>vision and, for this reason, it has been studied extensively resulting in a plethora
+<br/>of publications and a variety of different approachesa aiming to solve this problem.
+<br/>Nevertheless accurate, robust and efficient solutions remain elusive because of the
+<br/>inherent difficulties when dealing in particular with 3d objects that may be seen
+<br/>from a variety of viewpoints. Variations in geometry, photometry and viewing angle,
+<br/>noise, occlusions and incomplete data are some of the problems with which object
+<br/>recognition systems are faced.
+<br/>In this paper, we will address a particular kind of extrinsic variations: varia-
+<br/>tions of the image due to changes in the viewpoint from which the object is seen.
+<br/>Traditionally, methods that aimed to solve the recognition problem for objects with
+<br/>varying pose relied on an explicit 3d model of the object, generating 2d projections
+<br/>from that model and comparing them with the scene image. Such was the work
+<br/>aFor a comprehensive review of object recognition methods and deformable templates in particular,
+<br/>see Refs. 1–4.
+</td><td>('1797883', 'Vasileios Zografos', 'vasileios zografos')<br/>('31557997', 'Bernard F. Buxton', 'bernard f. buxton')</td><td>{v.zografos,b.buxton}@cs.ucl.ac.uk
+</td></tr><tr><td>7eaa97be59019f0d36aa7dac27407b004cad5e93</td><td>Sampling Generative Networks
+<br/>School of Design
+<br/><b>Victoria University of Wellington</b><br/>Wellington, New Zealand
+</td><td>('40603980', 'Tom White', 'tom white')</td><td>tom.white@vuw.ac.nz
+</td></tr><tr><td>7eb895e7de883d113b75eda54389460c61d63f67</td><td>Can you tell a face from a HEVC bitstream?
+<br/><b>School of Engineering Science, Simon Fraser University, Burnaby, BC, Canada</b></td><td>('3393216', 'Saeed Ranjbar Alvar', 'saeed ranjbar alvar')<br/>('3320198', 'Hyomin Choi', 'hyomin choi')</td><td>Email: {saeedr,chyomin, ibajic}@sfu.ca
+</td></tr><tr><td>7e467e686f9468b826133275484e0a1ec0f5bde6</td><td>Efficient On-the-fly Category Retrieval
+<br/>using ConvNets and GPUs
+<br/><b>Visual Geometry Group, University of Oxford</b></td><td>('34838386', 'Karen Simonyan', 'karen simonyan')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{ken,karen,az}@robots.ox.ac.uk
+</td></tr><tr><td>7e3367b9b97f291835cfd0385f45c75ff84f4dc5</td><td>Improved Local Binary Pattern Based Action Unit Detection Using
+<br/>Morphological and Bilateral Filters
+<br/>1Signal Processing Laboratory (LTS5)
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne,
+<br/>Switzerland
+<br/>2nViso SA
+<br/>Lausanne, Switzerland
+</td><td>('2916630', 'Matteo Sorci', 'matteo sorci')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td>{anil.yuce;jean-philippe.thiran}@epfl.ch
+<br/>matteo.sorci@nviso.ch
+</td></tr><tr><td>7ef0cc4f3f7566f96f168123bac1e07053a939b2</td><td>Triangular Similarity Metric Learning: a Siamese
+<br/>Architecture Approach
+<br/>To cite this version:
+<br/>puter Science [cs]. UNIVERSITE DE LYON, 2016. English. <NNT : 2016LYSEI045>. <tel-
+<br/>01314392>
+<br/>HAL Id: tel-01314392
+<br/>https://hal.archives-ouvertes.fr/tel-01314392
+<br/>Submitted on 11 May 2016
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('37848497', 'Lilei Zheng', 'lilei zheng')<br/>('37848497', 'Lilei Zheng', 'lilei zheng')</td><td></td></tr><tr><td>7e00fb79576fe213853aeea39a6bc51df9fdca16</td><td>Online Multi-Face Detection and Tracking
+<br/>using Detector Confidence and Structured SVMs
+<br/><b>Eindhoven University of Technology, The Netherlands</b><br/>2TNO Embedded Systems Innovation, Eindhoven, The Netherlands
+</td><td>('3199035', 'Francesco Comaschi', 'francesco comaschi')<br/>('1679431', 'Sander Stuijk', 'sander stuijk')<br/>('1708289', 'Twan Basten', 'twan basten')<br/>('1684335', 'Henk Corporaal', 'henk corporaal')</td><td>{f.comaschi, s.stuijk, a.a.basten, h.corporaal}@tue.nl
+</td></tr><tr><td>7e2cfbfd43045fbd6aabd9a45090a5716fc4e179</td><td>Global Norm-Aware Pooling for Pose-Robust Face Recognition at Low False Positive Rate
+<br/>Global Norm-Aware Pooling for Pose-Robust Face Recognition at Low False
+<br/>Positive Rate
+<br/><b>a School of Computer and Information Technology, Beijing Jiaotong University, Beijing</b><br/>China
+<br/><b>b Research Institute, Watchdata Inc., Beijing, China</b><br/>c DeepInSight, China
+</td><td>('39326372', 'Sheng Chen', 'sheng chen')<br/>('3007274', 'Jia Guo', 'jia guo')<br/>('1681842', 'Yang Liu', 'yang liu')<br/>('46757550', 'Xiang Gao', 'xiang gao')<br/>('2765914', 'Zhen Han', 'zhen han')</td><td>{shengchen, zhan}@bjtu.edu.cn
+<br/>{yang.liu.yj, xiang.gao}@watchdata.com
+<br/>guojia@gmail.com
+</td></tr><tr><td>7ee53d931668fbed1021839db4210a06e4f33190</td><td>What if we do not have multiple videos of the same action? —
+<br/>Video Action Localization Using Web Images
+<br/><b>Center for Research in Computer Vision (CRCV), University of Central Florida (UCF</b></td><td>('3195774', 'Waqas Sultani', 'waqas sultani')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>waqassultani@knights.ucf.edu, shah@crcv.ucf.edu
+</td></tr><tr><td>7e18b5f5b678aebc8df6246716bf63ea5d8d714e</td><td>Original research
+<br/>published: 15 January 2018
+<br/>doi: 10.3389/fpsyt.2017.00309
+<br/>increased loss aversion in
+<br/>Unmedicated Patients with
+<br/>Obsessive–compulsive Disorder
+<br/>1 Department of Psychiatry, Icahn School of Medicine at Mount Sinai, New York, NY, United States, 2 Fishberg Department of
+<br/><b>Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States</b><br/><b>of Psychology, University of Michigan, Ann Arbor, MI, United States, University of Michigan, Ann</b><br/>Arbor, MI, United States
+<br/>introduction: Obsessive–compulsive disorder (OCD) patients show abnormalities in
+<br/>decision-making and, clinically, appear to show heightened sensitivity to potential nega-
+<br/>tive outcomes. Despite the importance of these cognitive processes in OCD, few studies
+<br/>have examined the disorder within an economic decision-making framework. Here, we
+<br/>investigated loss aversion, a key construct in the prospect theory that describes the
+<br/>tendency for individuals to be more sensitive to potential losses than gains when making
+<br/>decisions.
+<br/>Methods: Across two study sites, groups of unmedicated OCD patients (n = 14), medi-
+<br/>cated OCD patients (n = 29), and healthy controls (n = 34) accepted or rejected a series
+<br/>of 50/50 gambles containing varying loss/gain values. Loss aversion was calculated
+<br/>as the ratio of the likelihood of rejecting a gamble with increasing potential losses to
+<br/>the likelihood of accepting a gamble with increasing potential gains. Decision times to
+<br/>accept or reject were also examined and correlated with loss aversion.
+<br/>results: Unmedicated OCD patients exhibited significantly more loss aversion com-
+<br/>pared to medicated OCD or controls, an effect that was replicated across both sites
+<br/>and remained significant even after controlling for OCD symptom severity, trait anxiety,
+<br/>and sex. Post hoc analyses further indicated that unmedicated patients’ increased
+<br/>likelihood to reject a gamble as its loss value increased could not be explained solely by
+<br/>greater risk aversion among patients. Unmedicated patients were also slower to accept
+<br/>than reject gambles, effects that were not found in the other two groups. Loss aversion
+<br/>was correlated with decision times in unmedicated patients but not in the other two
+<br/>groups.
+<br/>Discussion: These data identify abnormalities of decision-making in a subgroup
+<br/>of OCD patients not taking psychotropic medication. The findings help elucidate
+<br/>the cognitive mechanisms of the disorder and suggest that future treatments could
+<br/>aim to target abnormalities of loss/gain processing during decision-making in this
+<br/>population.
+<br/>Keywords: decision-making, prospect theory, choice behavior, reward, obsessive–compulsive disorder
+<br/>Edited by:
+<br/>Qinghua He,
+<br/><b>Southwest University, China</b><br/>Reviewed by:
+<br/>Qiang Wang,
+<br/><b>Beijing Normal University, China</b><br/>Michael Grady Wheaton,
+<br/><b>Columbia University, United States</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted
+<br/>to Psychopathology,
+<br/>a section of the journal
+<br/>Frontiers in Psychiatry
+<br/>Received: 08 December 2017
+<br/>Accepted: 26 December 2017
+<br/>Published: 15 January 2018
+<br/>Citation:
+<br/>Sip KE, Gonzalez R, Taylor SF and
+<br/>Stern ER (2018) Increased Loss
+<br/>Aversion in Unmedicated Patients
+<br/>with Obsessive–Compulsive Disorder.
+<br/>Front. Psychiatry 8:309.
+<br/>doi: 10.3389/fpsyt.2017.00309
+<br/>Frontiers in Psychiatry | www.frontiersin.org
+<br/>January 2018 | Volume 8 | Article 309
+</td><td>('3592712', 'Kamila E. Sip', 'kamila e. sip')<br/>('31801083', 'Richard Gonzalez', 'richard gonzalez')<br/>('2085281', 'Stephan F. Taylor', 'stephan f. taylor')<br/>('2025121', 'Emily R. Stern', 'emily r. stern')<br/>('2025121', 'Emily R. Stern', 'emily r. stern')</td><td>emily.stern@mssm.edu,
+<br/>emily.stern@nyumc.org
+</td></tr><tr><td>7e9df45ece7843fe050033c81014cc30b3a8903a</td><td>AUDIO-VISUAL INTENT-TO-SPEAK DETECTION FOR HUMAN-COMPUTER
+<br/>INTERACTION
+<br/>Institut Eurecom
+<br/> , route des Cr^etes, BP  
+<br/>
+<br/><b>IBM T.J. Watson Research Center</b><br/>Yorktown Heights, NY 
+</td><td>('3163391', 'Philippe de Cuetos', 'philippe de cuetos')<br/>('2264160', 'Chalapathy Neti', 'chalapathy neti')<br/>('33666044', 'Andrew W. Senior', 'andrew w. senior')</td><td>decuetos@eurecom.fr
+<br/>cneti,aws@us.ibm.com
+</td></tr><tr><td>7ebd323ddfe3b6de8368c4682db6d0db7b70df62</td><td>Proceedings of the International Conference on Computer and Information Science and Technology
+<br/>Ottawa, Ontario, Canada, May 11 – 12, 2015
+<br/>Paper No. 111
+<br/>Location-based Face Recognition Using Smart Mobile Device
+<br/>Sensors
+<br/>Department of Computer Science
+<br/><b>University of Victoria, Victoria, Canada</b></td><td>('2019933', 'Nina Taherimakhsousi', 'nina taherimakhsousi')<br/>('1747880', 'Hausi A. Müller', 'hausi a. müller')</td><td>ninata@uvic.ca; hausi@uvic.ca
+</td></tr><tr><td>7eb85bcb372261bad707c05e496a09609e27fdb3</td><td>A Compute-efficient Algorithm for Robust Eyebrow Detection
+<br/><b>Nanyang Technological University, 2University of California San Diego</b></td><td>('36375772', 'Supriya Sathyanarayana', 'supriya sathyanarayana')<br/>('1710219', 'Ravi Kumar Satzoda', 'ravi kumar satzoda')<br/>('1924458', 'Suchitra Sathyanarayana', 'suchitra sathyanarayana')</td><td>supriya001@e.ntu.edu.sg, rsatzoda@eng.ucsd.edu, ssathyanarayana@ucsd.edu, astsrikan@ntu.edu.sg
+</td></tr><tr><td>7ed6ff077422f156932fde320e6b3bd66f8ffbcb</td><td>State of 3D Face Biometrics for Homeland Security Applications
+<br/>Chaudhari4
+</td><td>('2925401', 'Anshuman Razdan', 'anshuman razdan')<br/>('1693971', 'Gerald Farin', 'gerald farin')</td><td></td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>ZHONG, ARANDJELOVI ´C, ZISSERMAN: FACES IN PLACES
+<br/>Faces In Places: compound query retrieval
+<br/>Relja Arandjelovi´c2
+<br/>1 Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford, UK</b><br/>2 WILLOW project
+<br/>Departement d’Informatique de l’École
+<br/>Normale Supérieure
+<br/>ENS/INRIA/CNRS UMR 8548
+</td><td>('6730372', 'Yujie Zhong', 'yujie zhong')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>yujie@robots.ox.ac.uk
+<br/>relja.arandjelovic@inria.fr
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922</td><td>ORIGINAL RESEARCH
+<br/>published: 20 June 2018
+<br/>doi: 10.3389/fpsyg.2018.00971
+<br/>Skiing and Thinking About It:
+<br/>Moment-to-Moment and
+<br/>Retrospective Analysis of Emotions
+<br/>in an Extreme Sport
+<br/>and Tove Irene Dahl
+<br/><b>UiT The Arctic University of Norway, Troms , Norway</b><br/>Happiness is typically reported as an important reason for participating in challenging
+<br/>activities like extreme sport. While in the middle of the activity, however, participants
+<br/>do not seem particularly happy. So where does the happiness come from? The
+<br/>article proposes some answers from a study of facially expressed emotions measured
+<br/>moment-by-moment during a backcountry skiing event. Self-reported emotions were
+<br/>also assessed immediately after the skiing. Participants expressed lower levels of
+<br/>happiness while skiing, compared to when stopping for a break. Moment-to-moment
+<br/>and self-reported measures of emotions were largely unrelated. These findings are
+<br/>explained with reference to the Functional Wellbeing Approach (Vittersø, 2013), which
+<br/>argues that some moment-to-moment feelings are non-evaluative in the sense of being
+<br/>generated directly by the difficulty of an activity. By contrast, retrospective emotional
+<br/>feelings are more complex as they include an evaluation of the overall goals and values
+<br/>associated with the activity as a whole.
+<br/>Keywords: emotions, facial expression, moment-to-moment, functional wellbeing approach, extreme sport,
+<br/>backcountry skiing
+<br/>INTRODUCTION
+<br/>We engage in recreational activities in order to feel good. This pursuit is not restricted to
+<br/>leisure activities like sunbathing at the beach or enjoying a fine meal with friends and family.
+<br/>Mountaineers, BASE jumpers, and other extreme athletes also claim that the importance of their
+<br/>favorite activities is the experience of positive feelings (Brymer, 2005; Willig, 2008; Brown and
+<br/>Fraser, 2009; Hetland and Vittersø, 2012). But what exactly is it that feels so good about these
+<br/>vigorous and exhausting activities, often referred to as extreme sport? To explore this question,
+<br/>we developed a new way of measuring emotions in real time during the activity. We equipped
+<br/>the participants with a camera that captured their facially expressed emotion while skiing. These
+<br/>films were then analyzed with software for automatic coding of facial expressions and compared
+<br/>the participants self-reported emotions assessed in retrospect. This approach enabled us to explore
+<br/>long standing questions as to how such positive experiences are created. Are they a result of a series
+<br/>of online positive feelings? Or is it the impact of a few central features like intensity peaks, rapid
+<br/>emotional changes, and happy endings that create them? Is it the experience of flow? Or is it the
+<br/>feeling of mastery that kicks in only after the activity has been successfully accomplished?
+<br/>Edited by:
+<br/>Eric Brymer,
+<br/><b>Leeds Beckett University</b><br/>United Kingdom
+<br/>Reviewed by:
+<br/>Michael Banissy,
+<br/><b>Goldsmiths, University of London</b><br/>United Kingdom
+<br/>Ralf Christopher Buckley,
+<br/><b>Grif th University, Australia</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Movement Science and Sport
+<br/>Psychology,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 26 September 2017
+<br/>Accepted: 25 May 2018
+<br/>Published: 20 June 2018
+<br/>Citation:
+<br/>Hetland A, Vittersø J, Wie SOB,
+<br/>Kjelstrup E, Mittner M and Dahl TI
+<br/>(2018) Skiing and Thinking About It:
+<br/>Moment-to-Moment
+<br/>and Retrospective Analysis
+<br/>of Emotions in an Extreme Sport.
+<br/>Front. Psychol. 9:971.
+<br/>doi: 10.3389/fpsyg.2018.00971
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>June 2018 | Volume 9 | Article 971
+</td><td>('50814786', 'Audun Hetland', 'audun hetland')<br/>('2956586', 'Joar Vittersø', 'joar vittersø')<br/>('50823709', 'Simen Oscar Bø Wie', 'simen oscar bø wie')<br/>('50829546', 'Eirik Kjelstrup', 'eirik kjelstrup')<br/>('4281140', 'Matthias Mittner', 'matthias mittner')<br/>('50814786', 'Audun Hetland', 'audun hetland')</td><td>audun.hetland@uit.no
+</td></tr><tr><td>7e0c75ce731131e613544e1a85ae0f2c28ee4c1f</td><td><b>Imperial College London</b><br/>Department of Computing
+<br/>Regression-based Estimation of Pain and
+<br/>Facial Expression Intensity
+<br/>June, 2015
+<br/>Submitted in part fulfilment of the requirements for the degree of PhD in Computing and
+<br/><b>the Diploma of Imperial College London. This thesis is entirely my own work, and, except</b><br/>where otherwise indicated, describes my own research.
+</td><td>('3291812', 'Sebastian Kaltwang', 'sebastian kaltwang')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>7ef44b7c2b5533d00001ae81f9293bdb592f1146</td><td>No d’ordre : 227-2013
+<br/>Anne 2013
+<br/>THESE DE L’UNIVERSITE DE LYON
+<br/>Dlivre par
+<br/>L’UNIVERSITE CLAUDE BERNARD - LYON 1
+<br/>Ecole Doctorale Informatique et Mathmatiques
+<br/>P H D T H E S I S
+<br/>D´etection des ´emotions `a partir de vid´eos dans un
+<br/>environnement non contrˆol´e
+<br/>Detection of emotions from video in non-controlled environment
+<br/>Soutenue publiquement (Public defense) le 14/11/2013
+<br/>Composition du jury (Dissertation committee):
+<br/>Rapporteurs
+<br/>Mr. Renaud SEGUIER
+<br/>Mr. Jean-Claude MARTIN
+<br/>Examinateurs
+<br/>Mr. Thomas MOESLUND
+<br/>Mr. Patrick LAMBERT
+<br/>Mr. Samir GARBAYA
+<br/>Directeur
+<br/>Mme. Saida BOUAKAZ
+<br/>Co-encadrant
+<br/>Mr. Alexandre MEYER
+<br/>Mr. Hubert KONIK
+<br/>Professor, Supelec, CNRS UMR 6164, Rennes, France
+<br/>Professor, LIMSI-CNRS, Universit´e Paris-Sud, France
+<br/>Professor, Department of Architecture, Design and Media Technology,
+<br/><b>Aalborg University, Denmark</b><br/>Professor, LISTIC - Polytech Annecy-Chambery, France
+<br/>Associate Professor, Le2i, ENSAM, Chalon sur Saone, France
+<br/>Professor, LIRIS-CNRS, Universit´e Claude Bernard Lyon 1, France
+<br/>Associate Professor, LIRIS, Universit´e Claude Bernard Lyon 1, France
+<br/>Associate Professor, LaHC, Universit´e Jean Monnet, Saint-Etienne, France
+</td><td>('1943666', 'Rizwan Ahmed Khan', 'rizwan ahmed khan')</td><td></td></tr><tr><td>7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83</td><td>Extensions of Hierarchical Slow Feature
+<br/>Analysis for Efficient Classification and
+<br/>Regression on High-Dimensional Data
+<br/>Dissertation
+<br/>Submitted to the Faculty of Electrical
+<br/>Engineering and Information Technology
+<br/>at the
+<br/><b>Ruhr University Bochum</b><br/>for the
+<br/>Degree of Doktor-Ingenieur
+<br/>Alberto Nicol´as Escalante Ba˜nuelos
+<br/>by
+<br/>Bochum, Germany, January, 2017
+</td><td></td><td></td></tr><tr><td>7e507370124a2ac66fb7a228d75be032ddd083cc</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2708106, IEEE
+<br/>Transactions on Affective Computing
+<br/>Dynamic Pose-Robust Facial Expression
+<br/>Recognition by Multi-View Pairwise Conditional
+<br/>Random Forests
+<br/>1 Sorbonne Universit´es, UPMC Univ Paris 06
+<br/>CNRS, UMR 7222, F-75005, Paris, France
+</td><td>('3190846', 'Arnaud Dapogny', 'arnaud dapogny')<br/>('2521061', 'Kevin Bailly', 'kevin bailly')</td><td></td></tr><tr><td>1056347fc5e8cd86c875a2747b5f84fd570ba232</td><td></td><td></td><td></td></tr><tr><td>10550ee13855bd7403946032354b0cd92a10d0aa</td><td>Accelerating Neuromorphic Vision Algorithms
+<br/>for Recognition
+<br/>Ahmed Al Maashri
+<br/>Vijaykrishnan Narayanan
+<br/><b>Microsystems Design Lab, The Pennsylvania State University</b><br/>†IBM System and Technology Group
+<br/><b>School of Electrical, Computer and Energy Engineering, Arizona State University</b></td><td>('1723845', 'Michael DeBole', 'michael debole')<br/>('36156473', 'Matthew Cotter', 'matthew cotter')<br/>('2916636', 'Nandhini Chandramoorthy', 'nandhini chandramoorthy')<br/>('37095722', 'Yang Xiao', 'yang xiao')<br/>('1685028', 'Chaitali Chakrabarti', 'chaitali chakrabarti')</td><td>{maashri, mjcotter, nic5090, yux106, vijay}@cse.psu.edu
+<br/>mvdebole@us.ibm.com
+<br/>chaitali@asu.edu
+</td></tr><tr><td>10e12d11cb98ffa5ae82343f8904cfe321ae8004</td><td>A New Simplex Sparse Learning Model to Measure
+<br/>Data Similarity for Clustering
+<br/><b>University of Texas at Arlington</b><br/>Arlington, Texas 76019, USA
+</td><td>('39122448', 'Jin Huang', 'jin huang')<br/>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>huangjinsuzhou@gmail.com, feipingnie@gmail.com, heng@uta.edu
+</td></tr><tr><td>10e7dd3bbbfbc25661213155e0de1a9f043461a2</td><td>Cross Euclidean-to-Riemannian Metric Learning
+<br/>with Application to Face Recognition from Video
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1681236', 'Luc Van Gool', 'luc van gool')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>100105d6c97b23059f7aa70589ead2f61969fbc3</td><td>Frontal to Profile Face Verification in the Wild
+<br/><b>Center for Automation Research, University of Maryland, College Park, MD 20740, USA</b><br/><b>The State University of New Jersey</b><br/>Piscataway, NJ 08854, USA.
+</td><td>('2500202', 'Soumyadip Sengupta', 'soumyadip sengupta')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')</td><td></td></tr><tr><td>100da509d4fa74afc6e86a49352751d365fceee5</td><td>Multiclass Recognition and Part Localization with Humans in the Loop
+<br/>†Department of Computer Science and Engineering
+<br/><b>University of California, San Diego</b><br/>Serge Belongie†
+<br/>‡Department of Electrical Engineering
+<br/><b>California Institute of Technology</b></td><td>('2367820', 'Catherine Wah', 'catherine wah')<br/>('3251767', 'Steve Branson', 'steve branson')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>{cwah,sbranson,sjb}@cs.ucsd.edu
+<br/>perona@caltech.edu
+</td></tr><tr><td>10ab1b48b2a55ec9e2920a5397febd84906a7769</td><td></td><td></td><td></td></tr><tr><td>10af69f11301679b6fbb23855bf10f6af1f3d2e6</td><td>Beyond Gaussian Pyramid: Multi-skip Feature Stacking for Action Recognition
+<br/><b>School of Computer Science, Carnegie Mellon University</b></td><td>('46329993', 'Ming Lin', 'ming lin')<br/>('2314980', 'Xuanchong Li', 'xuanchong li')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')<br/>('1681921', 'Bhiksha Raj', 'bhiksha raj')</td><td>lanzhzh, minglin, xcli, alex, bhiksha@cs.cmu.edu
+</td></tr><tr><td>10ce3a4724557d47df8f768670bfdd5cd5738f95</td><td>Fihe igh Fie
+<br/>Ac e ad 
+<br/>Ra
+<br/>The Rbic i e Caegie e
+<br/>5000 Fbe Ave e ib gh A 15213
+<br/>Abac.  ay face ecgii ak he e ad i
+<br/>cdii f he be ad ga
+<br/>
+<br/>a di(cid:11)ee e ad de a di(cid:11)ee i
+<br/>ecgii a
+<br/> bjec ca ed a abiay e ad de abiay i
+<br/>ad ay  be f be iage agai ca ed a abiay e ad
+<br/> de abiay i
+<br/>Fihe
+<br/>iage. achig bewee he be ad ga
+<br/>he Fihe
+<br/>d ci
+<br/> ay face ecgii ceai he e f he be ad ga
+<br/>di(cid:11)ee. The ga
+<br/>The a
+<br/>ga
+<br/>view ca ed f a caea i he ce f he . The  be f ga
+<br/>ad be iage ca a
+<br/>iage f each  bjec a fa
+<br/>yica
+<br/>iage a ig
+<br/>Face ecgii ac e i.e. face ecgii whee he ga
+<br/>iage d  have he ae e ha eceived vey
+<br/>have bee ed which ca ecgize face [1]  e geea
+<br/>a a vaiey f e.
+<br/>a evey e. A
+<br/>f exa
+<br/>caiig a
+<br/>iai vaiai.  ca be ed wih abiay ga
+<br/>Afe e vaiai he ex  igi(cid:12)ca fac a(cid:11)ecig he aea
+<br/>ace f face i i
+<br/>face ecgii ac i
+<br/>face [4 5]. 
+<br/>i
+</td><td></td><td>fgiaiibg@c.c .ed
+</td></tr><tr><td>100428708e4884300e4c1ac1f84cbb16e7644ccf</td><td>REGULARIZED SHEARLET NETWORK FOR FACE RECOGNITION USING SINGLE
+<br/>SAMPLE PER PERSON
+<br/><b>Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia</b><br/><b>University of Houston, Houston, TX 77204, USA</b></td><td>('2791150', 'Mohamed Anouar Borgi', 'mohamed anouar borgi')<br/>('8847309', 'Demetrio Labate', 'demetrio labate')<br/>('3410172', 'Chokri Ben Amar', 'chokri ben amar')</td><td>{anoir.borgi@ieee.org; dlabate@math.uh.edu ; maher.elarbi@gmail.com ; chokri.benamar@ieee.org }
+</td></tr><tr><td>102e374347698fe5404e1d83f441630b1abf62d9</td><td>Facial Image Analysis for Fully-Automatic
+<br/>Prediction of Difficult Endotracheal Intubation
+</td><td>('40564153', 'Patrick Schoettker', 'patrick schoettker')<br/>('2916630', 'Matteo Sorci', 'matteo sorci')<br/>('1697965', 'Hua Gao', 'hua gao')<br/>('2612411', 'Christophe Perruchoud', 'christophe perruchoud')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td></td></tr><tr><td>10f17534dba06af1ddab96c4188a9c98a020a459</td><td>People-LDA: Anchoring Topics to People using Face Recognition
+<br/>Erik Learned-Miller
+<br/><b>University of Massachusetts Amherst</b><br/>Amherst MA 01003
+<br/>http://vis-www.cs.umass.edu/(cid:152)vidit/peopleLDA
+</td><td>('2246870', 'Vidit Jain', 'vidit jain')<br/>('1735747', 'Andrew McCallum', 'andrew mccallum')</td><td></td></tr><tr><td>10e0e6f1ec00b20bc78a5453a00c792f1334b016</td><td>Pose-Selective Max Pooling for Measuring Similarity
+<br/>1Dept. of Computer Science
+<br/>2Dept. of Electrical & Computer Engineering
+<br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b></td><td>('40031188', 'Xiang Xiang', 'xiang xiang')</td><td>xxiang@cs.jhu.edu
+</td></tr><tr><td>102b968d836177f9c436141e382915a4f8549276</td><td>Affective Multimodal Human-Computer Interaction
+<br/><b>Faculty of EEMCS, Delft University of Technology, The Netherlands</b><br/><b>Faculty of Science, University of Amsterdam, The Netherlands</b><br/><b>Psychology and Psychiatry, University of Pittsburgh, USA</b><br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, USA</b></td><td>('1694605', 'Maja Pantic', 'maja pantic')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>mpantic@ieee.org, nicu@science.uva.nl, jeffcohn@pitt.edu, huang@ifp.uiuc.edu
+</td></tr><tr><td>100641ed8a5472536dde53c1f50fa2dd2d4e9be9</td><td>Visual Attributes for Enhanced Human-Machine Communication*
+</td><td>('1713589', 'Devi Parikh', 'devi parikh')</td><td></td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td><td></td><td></td></tr><tr><td>10e704c82616fb5d9c48e0e68ee86d4f83789d96</td><td></td><td></td><td></td></tr><tr><td>101569eeef2cecc576578bd6500f1c2dcc0274e2</td><td>Multiaccuracy: Black-Box Post-Processing for Fairness in
+<br/>Classification
+<br/>James Zou
+</td><td>('40102677', 'Michael P. Kim', 'michael p. kim')<br/>('27316199', 'Amirata Ghorbani', 'amirata ghorbani')</td><td>mpk@cs.stanford.edu
+<br/>amiratag@stanford.edu
+<br/>jamesz@stanford.edu
+</td></tr><tr><td>106732a010b1baf13c61d0994552aee8336f8c85</td><td>Expanded Parts Model for Semantic Description
+<br/>of Humans in Still Images
+</td><td>('2515597', 'Gaurav Sharma', 'gaurav sharma')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>10e70a34d56258d10f468f8252a7762950830d2b</td><td></td><td></td><td></td></tr><tr><td>102b27922e9bd56667303f986404f0e1243b68ab</td><td>Wang et al. Appl Inform (2017) 4:13
+<br/>DOI 10.1186/s40535-017-0042-5
+<br/>RESEARCH
+<br/>Multiscale recurrent regression networks
+<br/>for face alignment
+<br/>Open Access
+<br/>*Correspondence:
+<br/>3 State Key Lab of Intelligent
+<br/>Technologies and Systems,
+<br/>Beijing 100084, People’s
+<br/>Republic of China
+<br/>Full list of author information
+<br/>is available at the end of the
+<br/>article
+</td><td>('27660491', 'Caixun Wang', 'caixun wang')<br/>('22192520', 'Haomiao Sun', 'haomiao sun')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('2632601', 'Jianjiang Feng', 'jianjiang feng')<br/>('25060740', 'Jie Zhou', 'jie zhou')</td><td>lujiwen@tsinghua.edu.cn
+</td></tr><tr><td>10fcbf30723033a5046db791fec2d3d286e34daa</td><td>On-Line Cursive Handwriting Recognition: A Survey of Methods
+<br/>and Performances
+<br/>*Faculty of Computer Science & Information Systems, Universiti Teknologi Malaysia (UTM) , 81310
+<br/>Skudai, Johor, Malaysia.
+</td><td>('1731121', 'Dzulkifli Mohamad', 'dzulkifli mohamad')<br/>('1921146', 'M. Othman', 'm. othman')</td><td> 1dzul@fsksm.utm.my, faisal@gmm.fsksm.utm.my, razib@fsksm.utm.my
+</td></tr><tr><td>101d4cfbd6f8a7a10bd33505e2b183183f1d8770</td><td>The 2013 SESAME Multimedia Event Detection and
+<br/>Recounting System
+<br/>SRI International (SRI)
+<br/><b>University of Amsterdam (UvA</b><br/><b>University of Southern California</b><br/>(USC)
+<br/>Cees G.M. Snoek
+<br/>Remi Trichet
+</td><td>('1764443', 'Robert C. Bolles', 'robert c. bolles')<br/>('40560201', 'J. Brian Burns', 'j. brian burns')<br/>('48804780', 'James A. Herson', 'james a. herson')<br/>('31693932', 'Gregory K. Myers', 'gregory k. myers')<br/>('2594026', 'Stephanie Pancoast', 'stephanie pancoast')<br/>('1746492', 'Julien van Hout', 'julien van hout')<br/>('49966591', 'Julie Wong', 'julie wong')<br/>('3000952', 'AmirHossein Habibian', 'amirhossein habibian')<br/>('1769315', 'Dennis C. Koelma', 'dennis c. koelma')<br/>('3245057', 'Zhenyang Li', 'zhenyang li')<br/>('2690389', 'Masoud Mazloom', 'masoud mazloom')<br/>('37806314', 'Silvia-Laura Pintea', 'silvia-laura pintea')<br/>('1964898', 'Sung Chun Lee', 'sung chun lee')<br/>('1858100', 'Pramod Sharma', 'pramod sharma')<br/>('40559421', 'Chen Sun', 'chen sun')</td><td></td></tr><tr><td>108b2581e07c6b7ca235717c749d45a1fa15bb24</td><td>Using Stereo Matching with General Epipolar
+<br/>Geometry for 2D Face Recognition
+<br/>across Pose
+</td><td>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')</td><td></td></tr><tr><td>106092fafb53e36077eba88f06feecd07b9e78e7</td><td>Attend and Interact: Higher-Order Object Interactions for Video Understanding
+<br/><b>Georgia Institute of Technology, 2NEC Laboratories America, 3Georgia Tech Research Institute</b></td><td>('7437104', 'Chih-Yao Ma', 'chih-yao ma')<br/>('2293919', 'Asim Kadav', 'asim kadav')<br/>('50162780', 'Iain Melvin', 'iain melvin')<br/>('1746245', 'Zsolt Kira', 'zsolt kira')<br/>('1775043', 'Hans Peter Graf', 'hans peter graf')</td><td></td></tr><tr><td>103c8eaca2a2176babab2cc6e9b25d48870d6928</td><td>FINDING RELEVANT SEMANTIC CONTENT FOR GROUNDED LANGUAGE LEARNING
+<br/>PANNING FOR GOLD:
+<br/><b>The University of Texas at Austin</b><br/>Department of Computer Science
+<br/>Austin, TX 78712, USA
+</td><td>('47514115', 'David L. Chen', 'david l. chen')<br/>('1797655', 'Raymond J. Mooney', 'raymond j. mooney')</td><td>dlcc@cs.utexas.edu and mooney@cs.utexas.edu
+</td></tr><tr><td>10d334a98c1e2a9e96c6c3713aadd42a557abb8b</td><td>Scene Text Recognition using Part-based Tree-structured Character Detection
+<br/>State Key Laboratory of Management and Control for Complex Systems, CASIA, Beijing, China
+</td><td>('1959339', 'Cunzhao Shi', 'cunzhao shi')<br/>('1683416', 'Chunheng Wang', 'chunheng wang')<br/>('2658590', 'Baihua Xiao', 'baihua xiao')<br/>('1698138', 'Yang Zhang', 'yang zhang')<br/>('39001252', 'Song Gao', 'song gao')<br/>('34539206', 'Zhong Zhang', 'zhong zhang')</td><td>{cunzhao.shi,chunheng.wang,baihua.xiao,yang.zhang,song.gao,zhong.zhang}@ia.ac.cn
+</td></tr><tr><td>10f66f6550d74b817a3fdcef7fdeba13ccdba51c</td><td>Benchmarking Face Alignment
+<br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology</b><br/>Karlsruhe, Germany
+</td><td>('1697965', 'Hua Gao', 'hua gao')</td><td>Email: {gao, ekenel}@kit.edu
+</td></tr><tr><td>107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53</td><td>Hollywood in Homes: Crowdsourcing Data
+<br/>Collection for Activity Understanding
+<br/><b>Carnegie Mellon University</b><br/>2Inria
+<br/><b>University of Washington</b><br/><b>The Allen Institute for AI</b><br/>http://allenai.org/plato/charades/
+</td><td>('34280810', 'Gunnar A. Sigurdsson', 'gunnar a. sigurdsson')<br/>('39849136', 'Xiaolong Wang', 'xiaolong wang')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td></td></tr><tr><td>1048c753e9488daa2441c50577fe5fdba5aa5d7c</td><td>Recognising faces in unseen modes: a tensor based approach
+<br/><b>Curtin University of Technology</b><br/>GPO Box U1987, Perth, WA 6845, Australia.
+</td><td>('2867032', 'Santu Rana', 'santu rana')<br/>('1713220', 'Wanquan Liu', 'wanquan liu')<br/>('1679953', 'Mihai Lazarescu', 'mihai lazarescu')<br/>('1679520', 'Svetha Venkatesh', 'svetha venkatesh')</td><td>{santu.rana, wanquan, m.lazarescu, svetha}@cs.curtin.edu.au
+</td></tr><tr><td>10ca2e03ff995023a701e6d8d128455c6e8db030</td><td>Modeling Stylized Character Expressions
+<br/>via Deep Learning
+<br/><b>University of Washington</b><br/>Seattle, WA, USA
+<br/>2 Zillow Group, Seattle, WA, USA
+<br/>3 Gage Academy of Art, Seattle, WA, USA
+</td><td>('2494850', 'Deepali Aneja', 'deepali aneja')<br/>('2952700', 'Alex Colburn', 'alex colburn')<br/>('9610752', 'Gary Faigin', 'gary faigin')<br/>('3349536', 'Barbara Mones', 'barbara mones')</td><td>{deepalia,shapiro,mones}@cs.washington.edu
+<br/>alexco@cs.washington.edu
+<br/>gary@gageacademy.org
+</td></tr><tr><td>1921e0a97904bdf61e17a165ab159443414308ed</td><td><b>Bielefeld University</b><br/>Faculty of Technology
+<br/>Applied Informatics
+<br/>Bachelor Thesis
+<br/>Retrieval of Web Images for
+<br/>Computer Vision Research
+<br/>September 28, 2009
+<br/>Author:
+<br/>malinke techfak.uni-bielefeld.de
+<br/>Supervisors:
+<br/>Dipl.-Inform. Marco Kortkamp
+<br/>PD Dr.-Ing. Sven Wachsmuth
+</td><td></td><td></td></tr><tr><td>19841b721bfe31899e238982a22257287b9be66a</td><td>Published as a conference paper at ICLR 2018
+<br/>SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+<br/>RECURRENT NEURAL NETWORKS
+<br/>†Barcelona Supercomputing Center, ‡Google Inc,
+<br/><b>Universitat Polit`ecnica de Catalunya, Columbia University</b></td><td>('2447185', 'Brendan Jou', 'brendan jou')<br/>('1711068', 'Jordi Torres', 'jordi torres')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{victor.campos, jordi.torres}@bsc.es, bjou@google.com,
+<br/>xavier.giro@upc.edu, shih.fu.chang@columbia.edu
+</td></tr><tr><td>1922ad4978ab92ce0d23acc4c7441a8812f157e5</td><td>Face Alignment by Coarse-to-Fine Shape Searching
+<br/><b>The Chinese University of Hong Kong</b><br/>2SenseTime Group
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('2226254', 'Shizhan Zhu', 'shizhan zhu')<br/>('40475617', 'Cheng Li', 'cheng li')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>zs014@ie.cuhk.edu.hk, chengli@sensetime.com, ccloy@ie.cuhk.edu.hk, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>19e62a56b6772bbd37dfc6b8f948e260dbb474f5</td><td>Cross-Domain Metric Learning Based on Information Theory
+<br/>1. State Key Laboratory of Computer Science
+<br/>2. Science and Technology on Integrated Information System Laboratory
+<br/><b>Institute of Software, Chinese Academy of Sciences, Beijing 100190, China</b><br/><b>University of Science and Technology of China</b></td><td>('39483391', 'Hao Wang', 'hao wang')<br/>('40451597', 'Wei Wang', 'wei wang')<br/>('1783918', 'Chen Zhang', 'chen zhang')<br/>('34532334', 'Fanjiang Xu', 'fanjiang xu')</td><td>weiwangpenny@gmail.com
+</td></tr><tr><td>192723085945c1d44bdd47e516c716169c06b7c0</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation
+<br/>Vision and Attention Theory Based Sampling
+<br/>for Continuous Facial Emotion Recognition
+<br/>Ninad S. Thakoor, Member, IEEE
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>36
+<br/>37
+</td><td>('1693314', 'Albert C. Cruz', 'albert c. cruz')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td></td></tr><tr><td>19fb5e5207b4a964e5ab50d421e2549ce472baa8</td><td>International Conference on Computer Systems and Technologies - CompSysTech’14
+<br/>Online Emotional Facial Expression Dictionary
+<br/>Léon Rothkrantz
+</td><td></td><td></td></tr><tr><td>1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2</td><td>ROBUST ONLINE FACE TRACKING-BY-DETECTION
+<br/>2TNO Embedded Systems Innovation, Eindhoven, The Netherlands
+<br/><b>Eindhoven University of Technology, The Netherlands</b></td><td>('3199035', 'Francesco Comaschi', 'francesco comaschi')<br/>('1679431', 'Sander Stuijk', 'sander stuijk')<br/>('1708289', 'Twan Basten', 'twan basten')<br/>('1684335', 'Henk Corporaal', 'henk corporaal')</td><td>{f.comaschi, s.stuijk, a.a.basten, h.corporaal}@tue.nl
+</td></tr><tr><td>1962e4c9f60864b96c49d85eb897141486e9f6d1</td><td>Neural Comput & Applic (2011) 20:565–573
+<br/>DOI 10.1007/s00521-011-0577-7
+<br/>O R I G I N A L A R T I C L E
+<br/>Locality preserving embedding for face and handwriting digital
+<br/>recognition
+<br/>Received: 3 December 2008 / Accepted: 11 March 2011 / Published online: 1 April 2011
+<br/>Ó Springer-Verlag London Limited 2011
+<br/>supervised manifold
+<br/>the local sub-manifolds.
+</td><td>('5383601', 'Zhihui Lai', 'zhihui lai')</td><td></td></tr><tr><td>193debca0be1c38dabc42dc772513e6653fd91d8</td><td>Mnemonic Descent Method:
+<br/>A recurrent process applied for end-to-end face alignment
+<br/><b>Imperial College London, UK</b><br/><b>Goldsmiths, University of London, UK</b><br/><b>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</b></td><td>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('2796644', 'Patrick Snape', 'patrick snape')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('1752913', 'Mihalis A. Nicolaou', 'mihalis a. nicolaou')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>(cid:63){g.trigeorgis, p.snape, e.antonakos, s.zafeiriou}@imperial.ac.uk, †m.nicolaou@gold.ac.uk
+</td></tr><tr><td>191674c64f89c1b5cba19732869aa48c38698c84</td><td>International Journal of Advanced Technology in Engineering and Science www.ijates.com
+<br/>Volume No.03, Issue No. 03, March 2015 ISSN (online): 2348 – 7550
+<br/>FACE IMAGE RETRIEVAL USING ATTRIBUTE -
+<br/>ENHANCED SPARSE CODEWORDS
+<br/>E.Sakthivel1 , M.Ashok kumar2
+<br/><b>PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India</b><br/><b>Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India</b></td><td></td><td></td></tr><tr><td>190d8bd39c50b37b27b17ac1213e6dde105b21b8</td><td>This document is downloaded from DR-NTU, Nanyang Technological
+<br/><b>University Library, Singapore</b><br/>Title
+<br/>Mining weakly labeled web facial images for search-
+<br/>based face annotation
+<br/>Author(s) Wang, Dayong; Hoi, Steven C. H.; He, Ying; Zhu, Jianke
+<br/>Citation
+<br/>Wang, D., Hoi, S. C. H., He, Y., & Zhu, J. (2014). Mining
+<br/>weakly labeled web facial images for search-based face
+<br/>annotation. IEEE Transactions on Knowledge and Data
+<br/>Engineering, 26(1), 166-179.
+<br/>Date
+<br/>2014
+<br/>URL
+<br/>http://hdl.handle.net/10220/18955
+<br/>Rights
+<br/>© 2014 IEEE. Personal use of this material is permitted.
+<br/>Permission from IEEE must be obtained for all other
+<br/><b>uses, in any current or future media, including</b><br/>reprinting/republishing this material for advertising or
+<br/>promotional purposes, creating new collective works, for
+<br/>resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works.
+<br/>Published version of this article is available at [DOI:
+<br/>http://dx.doi.org/10.1109/TKDE.2012.240].
+</td><td></td><td></td></tr><tr><td>19af008599fb17bbd9b12288c44f310881df951c</td><td>Discriminative Local Sparse Representations for
+<br/>Robust Face Recognition
+</td><td>('1719561', 'Yi Chen', 'yi chen')<br/>('35210356', 'Umamahesh Srinivas', 'umamahesh srinivas')<br/>('1694440', 'Thong T. Do', 'thong t. do')<br/>('3346079', 'Vishal Monga', 'vishal monga')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')</td><td></td></tr><tr><td>19296e129c70b332a8c0a67af8990f2f4d4f44d1</td><td>Metric Learning Approaches for Face Identification
+<br/>Is that you?
+<br/>M. Guillaumin, J. Verbeek and C. Schmid
+<br/>LEAR team, INRIA Rhˆone-Alpes, France
+<br/>Supplementary Material
+</td><td></td><td></td></tr><tr><td>19666b9eefcbf764df7c1f5b6938031bcf777191</td><td>Group Component Analysis for Multi-block Data:
+<br/>Common and Individual Feature Extraction
+</td><td>('1764724', 'Guoxu Zhou', 'guoxu zhou')<br/>('1747156', 'Andrzej Cichocki', 'andrzej cichocki')<br/>('38741479', 'Yu Zhang', 'yu zhang')</td><td></td></tr><tr><td>198b6beb53e0e61357825d57938719f614685f75</td><td>Vaulted Verification: A Scheme for Revocable Face
+<br/>Recognition
+<br/><b>University of Colorado, Colorado Springs</b></td><td>('3035230', 'Michael Wilber', 'michael wilber')</td><td>mwilber@uccs.edu
+</td></tr><tr><td>1921795408345751791b44b379f51b7dd54ebfa2</td><td>From Face Recognition to Models of Identity:
+<br/>A Bayesian Approach to Learning about
+<br/>Unknown Identities from Unsupervised Data
+<br/><b>Imperial College London, UK</b><br/>2 Microsoft Research, Cambridge, UK
+</td><td>('2388416', 'Sebastian Nowozin', 'sebastian nowozin')</td><td>dc315@imperial.ac.uk
+<br/>Sebastian.Nowozin@microsoft.com
+</td></tr><tr><td>190b3caa2e1a229aa68fd6b1a360afba6f50fde4</td><td></td><td></td><td></td></tr><tr><td>19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b</td><td>W. ZHANG ET AL.: IMPROVING HFR WITH CGAN
+<br/>Improving Heterogeneous Face Recognition
+<br/>with Conditional Adversarial Networks
+<br/>1 Laboratory LIRIS
+<br/>Ecole Centrale de Lyon
+<br/>Ecully, France
+<br/>2 Computer Vision Lab
+<br/><b>Stony Brook University</b><br/>Stony Brook, NY, USA
+</td><td>('2553752', 'Wuming Zhang', 'wuming zhang')<br/>('2496409', 'Zhixin Shu', 'zhixin shu')<br/>('1686020', 'Dimitris Samaras', 'dimitris samaras')<br/>('34086868', 'Liming Chen', 'liming chen')</td><td>wuming.zhang@ec-lyon.fr
+<br/>zhshu@cs.stonybrook.edu
+<br/>samaras@cs.stonybrook.edu
+<br/>liming.chen@ec-lyon.fr
+</td></tr><tr><td>19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54</td><td>Facial Action Coding Using Multiple Visual Cues and a Hierarchy of Particle
+<br/>Filters
+<br/><b>Computer Vision and Robotics Research Laboratory</b><br/><b>University of California, San Diego</b></td><td>('32049271', 'Joel C. McCall', 'joel c. mccall')<br/>('1713989', 'Mohan M. Trivedi', 'mohan m. trivedi')</td><td>jmccall@ucsd.edu mtrivedi@ucsd.edu
+</td></tr><tr><td>1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4</td><td>Monogenic Binary Coding: An Efficient Local Feature
+<br/>Extraction Approach to Face Recognition
+<br/><b>The Hong Kong Polytechnic University, Hong Kong, China</b></td><td>('5828998', 'Meng Yang', 'meng yang')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('1738911', 'Simon C. K. Shiu', 'simon c. k. shiu')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>195d331c958f2da3431f37a344559f9bce09c0f7</td><td>Parsing Occluded People by Flexible Compositions
+<br/><b>University of California, Los Angeles</b><br/>Figure 1: An illustration of the flexible compositions. Each connected sub-
+<br/>tree of the full graph (include the full graph itself) is a flexible composition.
+<br/>The flexible compositions that do not have certain parts are suitable for the
+<br/>people with those parts occluded.
+<br/>Figure 2: The absence of body parts evidence can help to predict occlusion.
+<br/>However, absence of evidence is not evidence of absence.
+<br/>It can fail in
+<br/>some challenging scenes. The local image measurements near the occlusion
+<br/>boundary (i.e., around the right elbow and left shoulder) can reliably provide
+<br/>evidence of occlusion.
+<br/>This paper presents an approach to parsing humans when there is signifi-
+<br/>cant occlusion. We model humans using a graphical model which has a tree
+<br/>structure building on recent work [1, 6] and exploit the connectivity prior
+<br/>that, even in presence of occlusion, the visible nodes form a connected sub-
+<br/>tree of the graphical model. We call each connected subtree a flexible com-
+<br/>position of object parts. This involves a novel method for learning occlusion
+<br/>cues. During inference we need to search over a mixture of different flexible
+<br/>models. By exploiting part sharing, we show that this inference can be done
+<br/>extremely efficiently requiring only twice as many computations as search-
+<br/>ing for the entire object (i.e., not modeling occlusion). We evaluate our
+<br/>model on the standard benchmarked “We Are Family" Stickmen dataset [2]
+<br/>and obtain significant performance improvements over the best alternative
+<br/>algorithms.
+<br/>Parsing humans into parts is an important visual task with many applica-
+<br/>tions such as activity recognition. A common approach is to formulate this
+<br/>task in terms of graphical models where the graph nodes and edges repre-
+<br/>sent human parts and their spatial relationships respectively. This approach
+<br/>is becoming successful on benchmarked datasets [1, 6]. But in many real
+<br/>world situations many human parts are occluded. Standard methods are par-
+<br/>tially robust to occlusion by, for example, using a latent variable to indicate
+<br/>whether a part is present and paying a penalty if the part is not detected, but
+<br/>are not designed to deal with significant occlusion. One of these models [1]
+<br/>will be used in this paper as a base model, and we will compare to it.
+<br/>In this paper, we observe that part occlusions often occur in regular pat-
+<br/>terns. The visible parts of a human tend to consist of a subset of connected
+<br/>parts even when there is significant occlusion (see Figures 1 and 2). In the
+<br/>terminology of graphical models, the visible (non-occluded) nodes form a
+<br/>connected subtree of the full graphical model (following current models, for
+<br/>simplicity, we assume that the graphical model is treelike). This connectiv-
+<br/>ity prior is not always valid (i.e., the visible parts of humans may form two
+<br/>or more connected subsets), but our analysis suggests it’s often true. In any
+<br/>case, we will restrict ourselves to it in this paper, since verifying that some
+<br/>isolated pieces of body parts belong to the same person is still very difficult
+<br/>for vision methods, especially in challenging scenes where multiple people
+<br/>occlude one another (see Figure 2).
+<br/>To formulate our approach we build on the base model [1], which is the
+<br/>state of the art on several benchmarked datasets [3, 4, 5], but is not designed
+<br/>for dealing with significant occlusion. We explicitly model occlusions us-
+<br/>ing the connectivity prior above. This means that we have a mixture of
+<br/>models where the number of components equals the number of all the pos-
+<br/>sible connected subtrees of the graph, which we call flexible compositions,
+</td><td>('34420250', 'Xianjie Chen', 'xianjie chen')</td><td></td></tr><tr><td>199c2df5f2847f685796c2523221c6436f022464</td><td>Self Quotient Image for Face Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School</b><br/><b>Bournemouth University</b></td><td>('29948255', 'Haitao Wang', 'haitao wang')<br/>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('1744302', 'Yangsheng Wang', 'yangsheng wang')</td><td></td></tr><tr><td>19c0069f075b5b2d8ac48ad28a7409179bd08b86</td><td>Modifying the Memorability of Face Photographs
+<br/><b>Massachusetts Institute of Technology</b><br/>Computer Science and Artificial Intelligence Laboratory
+</td><td>('2556428', 'Aditya Khosla', 'aditya khosla')<br/>('2553201', 'Wilma A. Bainbridge', 'wilma a. bainbridge')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')<br/>('31735139', 'Aude Oliva', 'aude oliva')</td><td>{khosla, wilma, torralba, oliva}@csail.mit.edu
+</td></tr><tr><td>19c0c7835dba1a319b59359adaa738f0410263e8</td><td>228
+<br/>Natural Image Statistics and
+<br/>Low-Complexity Feature Selection
+</td><td>('30125215', 'Manuela Vasconcelos', 'manuela vasconcelos')<br/>('1699559', 'Nuno Vasconcelos', 'nuno vasconcelos')</td><td></td></tr><tr><td>19808134b780b342e21f54b60095b181dfc7a600</td><td></td><td></td><td></td></tr><tr><td>19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9</td><td>FaceNet: A Unified Embedding for Face Recognition and Clustering
+<br/>Google Inc.
+<br/>Google Inc.
+<br/>Google Inc.
+</td><td>('3302320', 'Florian Schroff', 'florian schroff')<br/>('2741985', 'Dmitry Kalenichenko', 'dmitry kalenichenko')<br/>('2276542', 'James Philbin', 'james philbin')</td><td>fschroff@google.com
+<br/>dkalenichenko@google.com
+<br/>jphilbin@google.com
+</td></tr><tr><td>1910f5f7ac81d4fcc30284e88dee3537887acdf3</td><td> Volume 6, Issue 5, May 2016 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>Semantic Based Hypergraph Reranking Model for Web
+<br/>Image Search
+<br/>1, 2, 3, 4 B. E. Dept of CSE, 5 Asst. Prof. Dept of CSE
+<br/><b>Dr.D.Y.Patil College of Engineering, Pune, Maharashtra, India</b></td><td></td><td></td></tr><tr><td>19a9f658ea14701502d169dc086651b1d9b2a8ea</td><td>Structural Models for Face Detection
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,xczhang,zlei,dyi,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>197c64c36e8a9d624a05ee98b740d87f94b4040c</td><td>Regularized Greedy Column Subset Selection
+<br/>aDepartment of Computer Systems, Universidad Polit´ecnica de Madrid
+<br/>bDepartment of Applied Mathematics, Universidad Polit´ecnica de Madrid
+</td><td>('1858768', 'Alberto Mozo', 'alberto mozo')</td><td>*bruno.ordozgoiti@upm.es
+</td></tr><tr><td>19d4855f064f0d53cb851e9342025bd8503922e2</td><td>Learning SURF Cascade for Fast and Accurate Object Detection
+<br/>Intel Labs China
+</td><td>('35423937', 'Jianguo Li', 'jianguo li')<br/>('2470865', 'Yimin Zhang', 'yimin zhang')</td><td></td></tr><tr><td>19d3b02185ad36fb0b792f2a15a027c58ac91e8e</td><td>Im2Text: Describing Images Using 1 Million
+<br/>Captioned Photographs
+<br/>Tamara L Berg
+<br/><b>Stony Brook University</b><br/>Stony Brook, NY 11794
+</td><td>('2004053', 'Vicente Ordonez', 'vicente ordonez')<br/>('2170826', 'Girish Kulkarni', 'girish kulkarni')</td><td>{vordonezroma or tlberg}@cs.stonybrook.edu
+</td></tr><tr><td>193ec7bb21321fcf43bbe42233aed06dbdecbc5c</td><td>UC Santa Barbara
+<br/>UC Santa Barbara Previously Published Works
+<br/>Title
+<br/>Automatic 3D facial expression analysis in videos
+<br/>Permalink
+<br/>https://escholarship.org/uc/item/3g44f7k8
+<br/>Authors
+<br/>Chang, Y
+<br/>Vieira, M
+<br/>Turk, M
+<br/>et al.
+<br/>Publication Date
+<br/>2005-01-01
+<br/>Peer reviewed
+<br/>eScholarship.org
+<br/>Powered by the California Digital Library
+<br/><b>University of California</b></td><td></td><td></td></tr><tr><td>19da9f3532c2e525bf92668198b8afec14f9efea</td><td>Challenge: Face verification across age progression using real-world data
+<br/>Video and Image Modeling and Synthesis Lab, Department of Computer Science,
+<br/><b>University of Delaware, Newark, DE. USA</b><br/>1. Overview
+<br/>Analysis of face images has been the topic of in-depth research with wide spread applications. Face recognition, verifi-
+<br/>cation, age progression studies are some of the topics under study. In order to facilitate comparison and benchmarking of
+<br/>different approaches, various datasets have been released. For the specific topics of face verification with age progression,
+<br/>aging pattern extraction and age estimation, only two public datasets are currently available. The FGNET and MORPH
+<br/>datasets contain a large number of subjects, but only a few images are available for each subject. We present a new dataset,
+<br/>VADANA, which complements them by providing a large number of high quality digital images for each subject within and
+<br/>across ages (depth vs. breadth). It provides the largest number of intra-personal pairs, essential for better training and testing.
+<br/>The images also offer a natural range of pose, expression and illumination variation. We demonstrate the difference and
+<br/>difficulty of VADANA by testing with state-of-the-art algorithms. Our findings from experiments show how VADANA can
+<br/>aid further research on different types of verification algorithms.
+<br/>The following sections provide details for the proposed challenge. The dataset details, the need and motivation for its
+<br/>creation, comparison to existing benchmarks and the experiments performed on the same have been provided in the attached
+<br/>paper.
+<br/>2. Problem definition and challenges
+<br/>There are various problems in facial image analysis, such as face detection (finding faces in a given image), face recogni-
+<br/>tion (matching new image to a known dataset), face verification (determine if a given unknown pair of face images belong to
+<br/>same person) and many others. In this work, we focus on face verification specifically in the case of age progression.
+<br/>Problem definition: The input is a pair of facial images. The images are such that at least region from top of forehead till
+<br/>the chin is covered. Though in general, the images cover from top of head region and part of neck region also. The identity
+<br/>of the person(s) in the images is not known a priori. The system must determine if the two images belong to the same person
+<br/>(intra-personal pair or intra-pair) or to different persons (extra-personal pair or extra-pair). The two images are taken across a
+<br/>time period such that the age gap between the pair may range from 0 to 9 years. Also, the pose, expression and illumination
+<br/>is uncontrolled for both images.
+<br/>Training setup: During the training phase, the system is provided with pair of images (both intra-pairs and extra-pairs).
+<br/>The age of the subject in a given image and thus the age gap between a pair is provided during training. A classifier is trained
+<br/>using the features from the images.
+<br/>Testing setup: During the testing phase, the input is a pair of images. The subjects in these pairs are different from those
+<br/>in the training, i.e, the training and testing subjects are non-overlapping. There is no explicit age (or age-gap) information
+<br/>provided at this stage. The system must classify the pair as either intra-personal or extra-personal.
+<br/>Applications: The above problem definition closely resembles various real-world application scenarios such as passport
+<br/>verification, security and surveillance matching in videos/image captured over a period of time, clustering of people in large
+<br/>datasets where identities are unknown and many others.
+<br/>Challenges: The challenges stem from various aspects of the above problem definition: (1) The subject identities are not
+<br/>known, the system must therefore only rely on the information from the pair of images to determine the final classification.
+<br/>(2) The images are taken at different times, ranging from a gap of few months to up to 9 years (as in the case of passport
+<br/>verification). The effects due to aging thus contribute to shape and appearance changes even for an intra-pair (same person).
+</td><td>('1692539', 'Gowri Somanath', 'gowri somanath')<br/>('1708413', 'Chandra Kambhamettu', 'chandra kambhamettu')</td><td>somanath,chandra@cis.udel.edu
+</td></tr><tr><td>19868a469dc25ee0db00947e06c804b88ea94fd0</td><td>SP-SVM: Large Margin Classifier for Data on Multiple Manifolds
+<br/><b>Purdue University, West Lafayette, IN. 47907, USA</b><br/><b>College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China</b><br/><b>Santa Clara University, Santa Clara, CA. 95053, USA</b><br/><b>cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA</b></td><td>('39274045', 'Bin Shen', 'bin shen')<br/>('1678435', 'Bao-Di Liu', 'bao-di liu')<br/>('34913796', 'Qifan Wang', 'qifan wang')<br/>('3047254', 'Yi Fang', 'yi fang')<br/>('1741931', 'Jan P. Allebach', 'jan p. allebach')</td><td>bshen@purdue.edu, thu.liubaodi@gmail.com, wang868@purdue.edu, yfang@scu.edu, allebach@ecn.purdue.edu
+</td></tr><tr><td>192235f5a9e4c9d6a28ec0d333e36f294b32f764</td><td>Reconfiguring the Imaging Pipeline for Computer Vision
+<br/><b>Cornell University</b><br/><b>Carnegie Mellon University</b><br/><b>Cornell University</b></td><td>('2328520', 'Mark Buckler', 'mark buckler')<br/>('39131476', 'Suren Jayasuriya', 'suren jayasuriya')<br/>('2138184', 'Adrian Sampson', 'adrian sampson')</td><td></td></tr><tr><td>19878141fbb3117d411599b1a74a44fc3daf296d</td><td>Eye-State Action Unit Detection by Gabor Wavelets
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b><br/>http://www.cs.cmu.edu/face
+</td><td>('40383812', 'Ying-li Tian', 'ying-li tian')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>Email: fyltian, tkg@cs.cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>19f076998ba757602c8fec04ce6a4ca674de0e25</td><td>Turk J Elec Eng & Comp Sci
+<br/>(2016) 24: 219 { 233
+<br/>c⃝ T (cid:127)UB_ITAK
+<br/>doi:10.3906/elk-1304-139
+<br/>Fast and de-noise support vector machine training method based on fuzzy
+<br/>clustering method for large real world datasets
+<br/>(cid:3)
+<br/><b>Islamic Azad University, Gonabad, Iran</b><br/>Received: 15.04.2013
+<br/>(cid:15)
+<br/>Accepted/Published Online: 30.10.2013
+<br/>(cid:15)
+<br/>Final Version: 01.01.2016
+</td><td>('9437627', 'Omid Naghash ALMASI', 'omid naghash almasi')<br/>('4945660', 'Modjtaba ROUHANI', 'modjtaba rouhani')</td><td></td></tr><tr><td>19eb486dcfa1963c6404a9f146c378fc7ae3a1df</td><td></td><td></td><td></td></tr><tr><td>4c6daffd092d02574efbf746d086e6dc0d3b1e91</td><td></td><td></td><td></td></tr><tr><td>4cb8a691a15e050756640c0a35880cdd418e2b87</td><td>Class-based matching of object parts
+<br/>Department of Computer Science and Applied Mathematics
+<br/><b>Weizmann Institute of Science</b><br/>Rehovot, ISRAEL 76100
+</td><td>('1938475', 'Evgeniy Bart', 'evgeniy bart')<br/>('1743045', 'Shimon Ullman', 'shimon ullman')</td><td>(cid:0)evgeniy.bart, shimon.ullman(cid:1)@weizmann.ac.il
+</td></tr><tr><td>4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d</td><td>Mining Spatial and Spatio-Temporal ROIs for Action Recognition
+<br/>Jiang Wang2 Alan Yuille1,3
+<br/><b>University of California, Los Angeles</b><br/><b>Baidu Research, USA 3John Hopkins University</b></td><td>('5964529', 'Xiaochen Lian', 'xiaochen lian')</td><td>{lianxiaochen@,yuille@stat.}ucla.edu
+<br/>{chenzhuoyuan,yangyi05,wangjiang03}@baidu.com
+</td></tr><tr><td>4c6e1840451e1f86af3ef1cb551259cb259493ba</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
+<br/>RECOGNITION
+<br/>Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+<br/>Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+<br/>Departamento de E.I.O. y Computacion
+<br/>38271 Universidad de La Laguna, Spain
+<br/>Keywords:
+<br/>Image understanding, Gesture recognition, Hand dataset.
+</td><td>('1706692', 'Luis Anton-Canalis', 'luis anton-canalis')<br/>('1797958', 'Elena Sanchez-Nielsen', 'elena sanchez-nielsen')</td><td>lanton@iusiani.ulpgc.es
+<br/>enielsen@ull.es
+</td></tr><tr><td>4c87aafa779747828054cffee3125fcea332364d</td><td>View-Constrained Latent Variable Model
+<br/>for Multi-view Facial Expression Classification
+<br/><b>Imperial College London, UK</b><br/><b>EEMCS, University of Twente, The Netherlands</b></td><td>('2308430', 'Stefanos Eleftheriadis', 'stefanos eleftheriadis')<br/>('1729713', 'Ognjen Rudovic', 'ognjen rudovic')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{s.eleftheriadis,o.rudovic,m.pantic}@imperial.ac.uk
+</td></tr><tr><td>4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc</td><td></td><td></td><td></td></tr><tr><td>4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56</td><td>Photorealistic Facial Texture Inference Using Deep Neural Networks
+<br/>*Pinscreen
+<br/><b>University of Southern California</b><br/><b>USC Institute for Creative Technologies</b><br/>Figure 1: We present an inference framework based on deep neural networks for synthesizing photorealistic facial texture
+<br/>along with 3D geometry from a single unconstrained image. We can successfully digitize historic figures that are no longer
+<br/>available for scanning and produce high-fidelity facial texture maps with mesoscopic skin details.
+</td><td>('2059597', 'Shunsuke Saito', 'shunsuke saito')<br/>('1792471', 'Lingyu Wei', 'lingyu wei')<br/>('1808579', 'Liwen Hu', 'liwen hu')<br/>('1897417', 'Koki Nagano', 'koki nagano')<br/>('1706574', 'Hao Li', 'hao li')</td><td></td></tr><tr><td>4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb</td><td>MODI, KOVASHKA: CONFIDENCE AND DIVERSITY FOR ACTIVE SELECTION
+<br/>Confidence and Diversity for Active
+<br/>Selection of Feedback in Image Retrieval
+<br/>Department of Computer Science
+<br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA, USA
+</td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td>bhavin_modi@hotmail.com
+<br/>kovashka@cs.pitt.edu
+</td></tr><tr><td>4c8ef4f98c6c8d340b011cfa0bb65a9377107970</td><td>Sentiment Recognition in Egocentric
+<br/>Photostreams
+<br/><b>Intelligent Systems Group, University of Groningen, The Netherlands</b><br/><b>University of Barcelona, Spain</b><br/>3 Computer Vision Center, Barcelona, Spain
+</td><td>('1742086', 'Nicola Strisciuglio', 'nicola strisciuglio')<br/>('1730388', 'Nicolai Petkov', 'nicolai petkov')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>e.talavera.martinez@rug.nl,
+</td></tr><tr><td>4c822785c29ceaf67a0de9c699716c94fefbd37d</td><td>A Key Volume Mining Deep Framework for Action Recognition
+<br/>2 SenseTime Group Limited
+<br/><b>Tsinghua University</b><br/><b>Shenzhen Institutes of Advanced Technology, CAS, China</b><br/>Figure 1. Key volumes detected by our key volume mining deep framework. A volume is a spatial-temporal video clip. The top row shows
+<br/>key volumes are very sparse among the whole video, and the second row shows that key volumes may come from different modalities
+<br/>(different motion patterns here). Note that frames are sampled with fixed time interval.
+</td><td>('2121584', 'Wangjiang Zhu', 'wangjiang zhu')<br/>('1748341', 'Jie Hu', 'jie hu')<br/>('1687740', 'Gang Sun', 'gang sun')<br/>('2032273', 'Xudong Cao', 'xudong cao')<br/>('40612284', 'Yu Qiao', 'yu qiao')</td><td></td></tr><tr><td>4c815f367213cc0fb8c61773cd04a5ca8be2c959</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>2470
+<br/>ICASSP 2010
+</td><td></td><td></td></tr><tr><td>4ccf64fc1c9ca71d6aefdf912caf8fea048fb211</td><td>Light-weight Head Pose Invariant Gaze Tracking
+<br/><b>University of Maryland</b><br/>NVIDIA
+<br/>NVIDIA
+</td><td>('48467498', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('24817039', 'Shalini De Mello', 'shalini de mello')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td>rranjan1@umiacs.umd.edu
+<br/>shalinig@nvidia.com
+<br/>jkautz@nvidia.com
+</td></tr><tr><td>4cdb6144d56098b819076a8572a664a2c2d27f72</td><td>Face Synthesis for Eyeglass-Robust Face
+<br/>Recognition
+<br/><b>CBSRandNLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('46220439', 'Jianzhu Guo', 'jianzhu guo')<br/>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jianzhu.guo,xiangyu.zhu,zlei,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>4c4e49033737467e28aa2bb32f6c21000deda2ef</td><td>Improving Landmark Localization with Semi-Supervised Learning
+<br/><b>MILA-University of Montreal, 2NVIDIA, 3Ecole Polytechnique of Montreal, 4CIFAR, 5Facebook AI Research</b></td><td>('25056820', 'Sina Honari', 'sina honari')<br/>('2824500', 'Pavlo Molchanov', 'pavlo molchanov')<br/>('2342481', 'Stephen Tyree', 'stephen tyree')<br/>('1707326', 'Pascal Vincent', 'pascal vincent')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td>1{honaris, vincentp}@iro.umontreal.ca,
+<br/>2{pmolchanov, styree, jkautz}@nvidia.com, 3christopher.pal@polymtl.ca
+</td></tr><tr><td>4c6233765b5f83333f6c675d3389bbbf503805e3</td><td>Real-time High Performance Deformable Model for Face Detection in the Wild
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,xczhang,zlei,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>4c078c2919c7bdc26ca2238fa1a79e0331898b56</td><td>Unconstrained Facial Landmark Localization with Backbone-Branches
+<br/>Fully-Convolutional Networks
+<br/><b>Sun Yat-Sen University</b><br/>Guangzhou Higher Education Mega Center, Guangzhou 510006, PR China
+</td><td>('1742286', 'Zhujin Liang', 'zhujin liang')<br/>('2442939', 'Shengyong Ding', 'shengyong ding')<br/>('1737218', 'Liang Lin', 'liang lin')</td><td>alfredtofu@gmail.com, marcding@163.com, linliang@ieee.org
+</td></tr><tr><td>4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7</td><td>EFFICIENT LIKELIHOOD BAYESIAN CONSTRAINED LOCAL MODEL
+<br/><b>The Hong Kong Polytechnic University</b><br/><b>Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China</b></td><td>('2116302', 'Hailiang Li', 'hailiang li')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')<br/>('3280193', 'Man-Yau Chiu', 'man-yau chiu')<br/>('2233216', 'Kangheng Wu', 'kangheng wu')<br/>('1982263', 'Zhibin Lei', 'zhibin lei')</td><td>harley.li@connect.polyu.hk,{harleyli, edmondchiu, khwu, lei}@astri.org, enkmlam@polyu.edu.hk
+</td></tr><tr><td>4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367</td><td>Representing Videos based on Scene Layouts
+<br/>for Recognizing Agent-in-Place Actions
+<br/><b>University of Maryland, College Park</b><br/>2Comcast Applied AI Research
+<br/>3DeepMind
+<br/>4Adobe Research
+</td><td>('2180291', 'Ruichi Yu', 'ruichi yu')<br/>('3254319', 'Hongcheng Wang', 'hongcheng wang')<br/>('7674316', 'Jingxiao Zheng', 'jingxiao zheng')</td><td>{richyu, jxzheng, lsd}@umiacs.umd.edu
+<br/>anglili@google.com morariu@adobe.com
+</td></tr><tr><td>4c4236b62302957052f1bbfbd34dbf71ac1650ec</td><td>SEMI-SUPERVISED FACE RECOGNITION WITH LDA SELF-TRAINING
+<br/>Multimedia Communications Department, EURECOM
+<br/>2229 Route des Crêtes , BP 193, F-06560 Sophia-Antipolis Cedex, France
+</td><td>('37560971', 'Xuran Zhao', 'xuran zhao')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>{zhaox, evans, dugelay}@eurecom.fr
+</td></tr><tr><td>4cd0da974af9356027a31b8485a34a24b57b8b90</td><td>Binarized Convolutional Landmark Localizers for Human Pose Estimation and
+<br/>Face Alignment with Limited Resources
+<br/><b>Computer Vision Laboratory, The University of Nottingham</b><br/>Nottingham, United Kingdom
+</td><td>('3458121', 'Adrian Bulat', 'adrian bulat')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>{adrian.bulat, yorgos.tzimiropoulos}@nottingham.ac.uk
+</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>FaceTracer: A Search Engine for
+<br/>Large Collections of Images with Faces
+<br/><b>Columbia University</b></td><td>('40631426', 'Neeraj Kumar', 'neeraj kumar')</td><td></td></tr><tr><td>4c81c76f799c48c33bb63b9369d013f51eaf5ada</td><td>Multi-modal Score Fusion and Decision Trees for Explainable Automatic Job
+<br/>Candidate Screening from Video CVs
+<br/><b>Nam k Kemal University, Tekirda g, Turkey</b><br/><b>Bo gazic i University, Istanbul, Turkey</b></td><td>('38007788', 'Heysem Kaya', 'heysem kaya')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td>hkaya@nku.edu.tr, furkan.gurpinar@boun.edu.tr,salah@boun.edu.tr
+</td></tr><tr><td>4c1ce6bced30f5114f135cacf1a37b69bb709ea1</td><td>Gaze Direction Estimation by Component Separation for
+<br/>Recognition of Eye Accessing Cues
+<br/>Ruxandra Vrˆanceanu
+<br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b><br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b><br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b><br/>Image Processing and Analysis Laboratory
+<br/><b>University Politehnica of Bucharest, Romania, Address Splaiul Independent ei</b></td><td>('2760434', 'Corneliu Florea', 'corneliu florea')<br/>('2143956', 'Laura Florea', 'laura florea')<br/>('2905899', 'Constantin Vertan', 'constantin vertan')</td><td>rvranceanu@imag.pub.ro
+<br/>corneliu.florea@upb.ro
+<br/>laura.florea@upb.ro
+<br/>constantin.vertan@upb.ro
+</td></tr><tr><td>4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b</td><td>Facial Expression Recognition with Emotion-Based
+<br/>Feature Fusion
+<br/><b>The Hong Kong Polytechnic University, Hong Kong, SAR, 2University of Technology Sydney, Australia</b></td><td>('13671251', 'Cigdem Turan', 'cigdem turan')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')<br/>('1706670', 'Xiangjian He', 'xiangjian he')</td><td>E-mail: cigdem.turan@connect.polyu.hk, enkmlam@polyu.edu.hk, Xiangjian.He@uts.edu.au
+</td></tr><tr><td>4c523db33c56759255b2c58c024eb6112542014e</td><td>Patch-based Within-Object Classification∗
+<br/><b>University College London</b><br/><b>MRC Laboratory For Molecular Cell Biology, University College London</b></td><td>('1904148', 'Jania Aghajanian', 'jania aghajanian')<br/>('1734784', 'Jonathan Warrell', 'jonathan warrell')<br/>('1695363', 'Peng Li', 'peng li')<br/>('32948556', 'Jennifer L. Rohn', 'jennifer l. rohn')<br/>('31046411', 'Buzz Baum', 'buzz baum')</td><td>1{j.aghajanian, j.warrell, s.prince, p.li}@cs.ucl.ac.uk 2{j.rohn, b.baum}@ucl.ac.uk
+</td></tr><tr><td>261c3e30bae8b8bdc83541ffa9331b52fcf015e6</td><td>PATEL, SMITH: SFS+3DMM FOR FACE RECOGNITION
+<br/>Shape-from-shading driven 3D Morphable
+<br/>Models for Illumination Insensitive Face
+<br/>Recognition
+<br/>William A.P. Smith
+<br/>Department of Computer Science,
+<br/><b>The University of York</b></td><td>('37519514', 'Ankur Patel', 'ankur patel')</td><td>ankur@cs.york.ac.uk
+<br/>wsmith@cs.york.ac.uk
+</td></tr><tr><td>26f03693c50eb50a42c9117f107af488865f3dc1</td><td>Eigenhill vs. Eigenface and Eigenedge
+<br/><b>Istanbul Technical University</b><br/>Department of Computer Science
+</td><td>('1858702', 'Alper Yilmaz', 'alper yilmaz')<br/>('1766445', 'Muhittin Gökmen', 'muhittin gökmen')</td><td>yilmaz@cs.ucf.edu
+<br/>gokmen@cs.itu.edu.tr
+</td></tr><tr><td>2661f38aaa0ceb424c70a6258f7695c28b97238a</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 4, AUGUST 2012
+<br/>1027
+<br/>Multilayer Architectures for Facial
+<br/>Action Unit Recognition
+</td><td>('4072965', 'Tingfan Wu', 'tingfan wu')<br/>('2593137', 'Nicholas J. Butko', 'nicholas j. butko')<br/>('12114845', 'Paul Ruvolo', 'paul ruvolo')<br/>('1775637', 'Jacob Whitehill', 'jacob whitehill')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td></td></tr><tr><td>2609079d682998da2bc4315b55a29bafe4df414e</td><td>ON RANK AGGREGATION FOR FACE RECOGNITION FROM VIDEOS
+<br/>IIIT-Delhi, India
+</td><td>('2559473', 'Himanshu S. Bhatt', 'himanshu s. bhatt')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td></td></tr><tr><td>264a84f4d27cd4bca94270620907cffcb889075c</td><td>Deep Motion Features for Visual Tracking
+<br/><b>Computer Vision Laboratory, Link oping University, Sweden</b></td><td>('8161428', 'Susanna Gladh', 'susanna gladh')<br/>('2488938', 'Martin Danelljan', 'martin danelljan')<br/>('2358803', 'Fahad Shahbaz Khan', 'fahad shahbaz khan')<br/>('2228323', 'Michael Felsberg', 'michael felsberg')</td><td></td></tr><tr><td>26d407b911d1234e8e3601e586b49316f0818c95</td><td>[POSTER] Feasibility of Corneal Imaging for Handheld Augmented Reality
+<br/><b>Coburg University</b></td><td>('37101400', 'Daniel Schneider', 'daniel schneider')<br/>('2708269', 'Jens Grubert', 'jens grubert')</td><td></td></tr><tr><td>26a44feb7a64db7986473ca801c251aa88748477</td><td>Journal of Machine Learning Research 1 ()
+<br/>Submitted ; Published
+<br/>Unsupervised Learning of Gaussian Mixture Models with a
+<br/>Uniform Background Component
+<br/>Department of Statistics
+<br/><b>Florida State University</b><br/>Tallahassee, FL 32306-4330, USA
+<br/>Department of Statistics
+<br/><b>Florida State University</b><br/>Tallahassee, FL 32306-4330, USA
+<br/>Editor:
+</td><td>('2761870', 'Sida Liu', 'sida liu')<br/>('2455529', 'Adrian Barbu', 'adrian barbu')</td><td>sida.liu@stat.fsu.edu
+<br/>abarbu@stat.fsu.edu
+</td></tr><tr><td>264f7ab36ff2e23a1514577a6404229d7fe1242b</td><td>Facial Expression Recognition by De-expression Residue Learning
+<br/>Department of Computer Science
+<br/><b>State University of New York at Binghamton, USA</b></td><td>('2671017', 'Huiyuan Yang', 'huiyuan yang')<br/>('8072251', 'Lijun Yin', 'lijun yin')</td><td>{hyang51, uciftci}@binghamton.edu; lijun@cs.binghamton.edu
+</td></tr><tr><td>26a72e9dd444d2861298d9df9df9f7d147186bcd</td><td>DOI 10.1007/s00138-016-0768-4
+<br/>ORIGINAL PAPER
+<br/>Collecting and annotating the large continuous action dataset
+<br/>Received: 18 June 2015 / Revised: 18 April 2016 / Accepted: 22 April 2016 / Published online: 21 May 2016
+<br/>© The Author(s) 2016. This article is published with open access at Springerlink.com
+</td><td>('2089428', 'Daniel Paul Barrett', 'daniel paul barrett')</td><td></td></tr><tr><td>266766818dbc5a4ca1161ae2bc14c9e269ddc490</td><td>Article
+<br/>Boosting a Low-Cost Smart Home Environment with
+<br/>Usage and Access Control Rules
+<br/><b>Institute of Information Science and Technologies of CNR (CNR-ISTI)-Italy, 56124 Pisa, Italy</b><br/>Received: 27 April 2018; Accepted: 31 May 2018; Published: 8 June 2018
+</td><td>('1773887', 'Paolo Barsocchi', 'paolo barsocchi')<br/>('38567341', 'Antonello Calabrò', 'antonello calabrò')<br/>('1693901', 'Erina Ferro', 'erina ferro')<br/>('2209975', 'Claudio Gennaro', 'claudio gennaro')<br/>('1709783', 'Eda Marchetti', 'eda marchetti')<br/>('2508924', 'Claudio Vairo', 'claudio vairo')</td><td>antonello.calabro@isti.cnr.it (A.C.); erina.ferro@isti.cnr.it (E.F.); claudio.gennaro@isti.cnr.it (C.G.);
+<br/>eda.marchetti@isti.cnr.it (E.M.); claudio.vairo@isti.cnr.it (C.V.)
+<br/>* Correspondence: paolo.barsocchi@isti.cnr.it; Tel.: +39-050-315-2965
+</td></tr><tr><td>265af79627a3d7ccf64e9fe51c10e5268fee2aae</td><td>1817
+<br/>A Mixture of Transformed Hidden Markov
+<br/>Models for Elastic Motion Estimation
+</td><td>('1932096', 'Huijun Di', 'huijun di')<br/>('3265275', 'Linmi Tao', 'linmi tao')<br/>('1797002', 'Guangyou Xu', 'guangyou xu')</td><td></td></tr><tr><td>267c6e8af71bab68547d17966adfaab3b4711e6b</td><td></td><td></td><td></td></tr><tr><td>26af867977f90342c9648ccf7e30f94470d40a73</td><td>IJIRST –International Journal for Innovative Research in Science & Technology| Volume 3 | Issue 04 | September 2016
+<br/>ISSN (online): 2349-6010
+<br/>Joint Gender and Face Recognition System for
+<br/>RGB-D Images with Texture and DCT Features
+<br/>PG Student
+<br/>Department of Computer Science & Information Systems
+<br/><b>Federal Institute of Science and Technology, Mookkannoor</b><br/>PO, Angamaly, Ernakulam, Kerala 683577, India
+<br/>Prasad J. C.
+<br/>Associate Professor
+<br/>Department of Computer Science & Engineering
+<br/><b>Federal Institute of Science and Technology, Mookkannoor</b><br/>PO, Angamaly, Ernakulam, Kerala 683577, India
+</td><td></td><td></td></tr><tr><td>26a89701f4d41806ce8dbc8ca00d901b68442d45</td><td></td><td></td><td></td></tr><tr><td>26c884829897b3035702800937d4d15fef7010e4</td><td>IEICE TRANS. INF. & SYST., VOL.Exx–??, NO.xx XXXX 200x
+<br/>PAPER
+<br/>Facial Expression Recognition by Supervised Independent
+<br/>Component Analysis using MAP Estimation
+<br/>, Member
+<br/>SUMMARY Permutation ambiguity of the classical Inde-
+<br/>pendent Component Analysis (ICA) may cause problems in fea-
+<br/>ture extraction for pattern classification. Especially when only a
+<br/>small subset of components is derived from data, these compo-
+<br/>nents may not be most distinctive for classification, because ICA
+<br/>is an unsupervised method. We include a selective prior for de-
+<br/>mixing coefficients into the classical ICA to alleviate the problem.
+<br/>Since the prior is constructed upon the classification information
+<br/>from the training data, we refer to the proposed ICA model with
+<br/>a selective prior as a supervised ICA (sICA). We formulated the
+<br/>learning rule for sICA by taking a Maximum a Posteriori (MAP)
+<br/>scheme and further derived a fixed point algorithm for learning
+<br/>the de-mixing matrix. We investigate the performance of sICA
+<br/>in facial expression recognition from the aspects of both correct
+<br/>rate of recognition and robustness even with few independent
+<br/>components.
+<br/>key words:
+<br/>dent component analysis, fixed-point algorithm
+<br/>facial expression recognition, supervised indepen-
+<br/>1.
+<br/>Introduction
+<br/>Various methods have been proposed for auto-
+<br/>matic recognition of facial expression in the past several
+<br/>decades, which could be roughly classified into three
+<br/>categories: 1) Appearance-based method, represented
+<br/>by eigenfaces, fisherfaces and other methods using
+<br/>machine-learning techniques, such as neural networks
+<br/>and Support Vector Machine (SVM); 2) Model-based
+<br/><b>methods, including graph matching, optical- ow-based</b><br/>method and others; and 3) Hybrids of appearance based
+<br/>and model-based methods, such as Active Appearance
+<br/>Model (AAM). Detailed review of these methods could
+<br/>be found in two surveys in Refs.[1][2]. Appearance-
+<br/>based methods are superior to model-based methods
+<br/>in system complexity and performance reproducibil-
+<br/>ity. Further, appearance-based methods allow efficient
+<br/>characterization of a low-dimensional subspace within
+<br/>the overall space of raw image measurement, which
+<br/>deepen our understanding of facial expressions from
+<br/>their manifolds in subspace, and provide a statistical
+<br/>framework for the theoretical analysis of system per-
+<br/>formance. ICA, a powerful technique for blind source
+<br/>separation, was applied to facial expression recognition
+<br/>by Bartlett et al. for feature extraction.[3] They argued
+<br/>that facial expression consists of those features standing
+<br/>for minor, non-rigid, local variations of faces[3]. Struc-
+<br/>Manuscript received January 1, 200x.
+<br/>Manuscript revised January 1, 200x.
+<br/>Final manuscript received January 1, 200x.
+<br/>The author is with the school of information science,
+<br/><b>Japan Advanced Institute of Science and Technology</b><br/>tural information for those local variations are related
+<br/>to higher-order statistics, which could be well extracted
+<br/>by ICA.[5] The efficiency of ICA in extracting features
+<br/>for facial expression recognition has been verified by
+<br/>many previous works.[4][6][7]
+<br/>The major purpose of the present work is to im-
+<br/>prove the performance of ICA in facial expression recog-
+<br/>nition. In the classical ICA, the derived independent
+<br/>components are in random order, i.e., permutation am-
+<br/>biguity, where the original order provides no informa-
+<br/>tion on the significance of components in discrimina-
+<br/>tion.[8] As a result, the derived independent compo-
+<br/>nents may not be most distinctive for the classification
+<br/>task, especially when only a small subset of compo-
+<br/>nents is derived. Feature selection must be performed
+<br/>along with the feature extraction. The selection can
+<br/>be applied before, during or after ICA. In Ref.[4], Best
+<br/>Individual Feature (BIF) selection was adopted, where
+<br/>features were chosen according to some defined criteria
+<br/>individually. Methods by means of Sequential Forward
+<br/>Selection (SFS) and Sequential Floating Forward Se-
+<br/>lection (SFFS) were also proposed. [9] Since the selec-
+<br/>tion is performed after ICA, the features are limited to
+<br/>those chosen from the set of independent components
+<br/>obtained. To create a candidate set with enough repre-
+<br/>sentative features for discrimination, a large number of
+<br/>independent components should be learned, which may
+<br/>be expensive in computational cost. It is meaningful
+<br/>to search for a way to affect the selection of features
+<br/>before or during ICA. GEMC [10] makes a selection
+<br/>before ICA by heuristically replacing PCA with a dis-
+<br/>criminant analysis as the pre-processing to ICA, which
+<br/>still lacks a mathematical explanation. ICA in a local
+<br/>facial residue space is also proposed for face recognition,
+<br/>which can be regarded as using the pre-specified residue
+<br/>space to limit the selection of independent components
+<br/>before applying ICA. [11]
+<br/>We propose an approach to implement the feature
+<br/>selection during the learning of independent compo-
+<br/>nents. A constraint ICA has been proposed for the
+<br/>analysis of EEG signals, where all components should
+<br/>be sparse and close to a supplied reference signal by
+<br/>including a correlation term. [12] In our case, we try to
+<br/>design a method to let those components with higher
+<br/>degrees of class separation emerge easier than others.
+<br/>The classical ICA in Ref.[13] was shown to be deriv-
+<br/>able under the scheme of Maximum Log-Likelihood
+</td><td>('1753878', 'Fan Chen', 'fan chen')<br/>('1791753', 'Kazunori Kotani', 'kazunori kotani')</td><td></td></tr><tr><td>266ed43dcea2e7db9f968b164ca08897539ca8dd</td><td>Beyond Principal Components: Deep Boltzmann Machines for Face Modeling
+<br/><b>Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada</b><br/><b>Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA</b></td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1699922', 'Tien D. Bui', 'tien d. bui')</td><td>1 {c duon, k q, bui}@encs.concordia.ca, 2 kluu@andrew.cmu.edu
+</td></tr><tr><td>26ad6ceb07a1dc265d405e47a36570cb69b2ace6</td><td>RESEARCH AND EXPLOR ATORY
+<br/>DEVELOPMENT DEPARTMENT
+<br/>REDD-2015-384
+<br/>Neural Correlates of Cross-Cultural
+<br/>How to Improve the Training and Selection for
+<br/>Military Personnel Involved in Cross-Cultural
+<br/>Operating Under Grant #N00014-12-1-0629/113056
+<br/>Adaptation
+<br/>September, 2015
+<br/>Interactions
+<br/>Prepared for:
+<br/>Office of Naval Research
+</td><td>('20444535', 'Jonathon Kopecky', 'jonathon kopecky')<br/>('29125372', 'Alice Jackson', 'alice jackson')</td><td></td></tr><tr><td>2642810e6c74d900f653f9a800c0e6a14ca2e1c7</td><td>Projection Bank: From High-dimensional Data to Medium-length Binary Codes
+<br/>Department of Computer Science and Digital Technologies
+<br/><b>Northumbria University, Newcastle upon Tyne, NE1 8ST, UK</b></td><td>('40017778', 'Li Liu', 'li liu')<br/>('9452165', 'Mengyang Yu', 'mengyang yu')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td>li2.liu@northumbria.ac.uk, m.y.yu@ieee.org, ling.shao@ieee.org
+</td></tr><tr><td>26437fb289cd7caeb3834361f0cc933a02267766</td><td>2012 International Conference on Management and Education Innovation
+<br/>IPEDR vol.37 (2012) © (2012) IACSIT Press, Singapore
+<br/>Innovative Assessment Technologies: Comparing ‘Face-to-Face’ and
+<br/>Game-Based Development of Thinking Skills in Classroom Settings
+<br/><b>University of Szeged, 2 E tv s Lor nd University</b></td><td>('39201903', 'Gyöngyvér Molnár', 'gyöngyvér molnár')<br/>('32197908', 'András Lőrincz', 'andrás lőrincz')</td><td></td></tr><tr><td>26e570049aaedcfa420fc8c7b761bc70a195657c</td><td>J Sign Process Syst
+<br/>DOI 10.1007/s11265-017-1276-0
+<br/>Hybrid Facial Regions Extraction for Micro-expression
+<br/>Recognition System
+<br/>Received: 2 February 2016 / Revised: 20 October 2016 / Accepted: 10 August 2017
+<br/>© Springer Science+Business Media, LLC 2017
+</td><td>('39888137', 'Sze-Teng Liong', 'sze-teng liong')<br/>('2339975', 'John See', 'john see')<br/>('37809010', 'Su-Wei Tan', 'su-wei tan')</td><td></td></tr><tr><td>2654ef92491cebeef0997fd4b599ac903e48d07a</td><td>Facial Expression Recognition from Near-Infrared Video Sequences
+<br/>1. Machine Vision Group, Infotech Oulu and Department of Electrical and Information
+<br/>Engineering,
+<br/><b>P. O. Box 4500 FI-90014 University of Oulu, Finland</b><br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/>P. O. Box 95 Zhongguancun Donglu, Beijing 100080, China
+</td><td>('2021982', 'Matti Taini', 'matti taini')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('1714724', 'Matti Pietikäinen', 'matti pietikäinen')</td><td>E-mail: {matti.taini,gyzhao,mkp}@ee.oulu.fi
+<br/>E-mail: szli@cbsr.ia.ac.cn
+</td></tr><tr><td>2679e4f84c5e773cae31cef158eb358af475e22f</td><td>Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition
+<br/><b>Carnegie Mellon University, Pittsburgh, PA</b><br/><b>Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science</b><br/><b>The Hong Kong Polytechnic University, Hong Kong, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('1790207', 'Xiaofeng Liu', 'xiaofeng liu')<br/>('1748883', 'Jane You', 'jane you')<br/>('37774211', 'Ping Jia', 'ping jia')</td><td>liuxiaofeng@cmu.edu, kumar@ece.cmu.edu, csyjia@comp.polyu.edu.hk, jiap@ciomp.ac.cn
+</td></tr><tr><td>21ef129c063bad970b309a24a6a18cbcdfb3aff5</td><td>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Dr J.-M. Vesin, président du juryProf. J.-Ph. Thiran, Prof. D. Sander, directeurs de thèseProf. M. F. Valstar, rapporteurProf. H. K. Ekenel, rapporteurDr S. Marcel, rapporteurIndividual and Inter-related Action Unit Detection in Videos for Affect RecognitionTHÈSE NO 6837 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 19 FÉVRIER 2016À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 5PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2016PARAnıl YÜCE </td><td></td><td></td></tr><tr><td>218b2c5c9d011eb4432be4728b54e39f366354c1</td><td>Enhancing Training Collections for Image
+<br/>Annotation: An Instance-Weighted Mixture
+<br/>Modeling Approach
+</td><td>('1793498', 'Neela Sawant', 'neela sawant')<br/>('40116905', 'Jia Li', 'jia li')</td><td></td></tr><tr><td>217a21d60bb777d15cd9328970cab563d70b5d23</td><td>Hidden Factor Analysis for Age Invariant Face Recognition
+<br/>1Shenzhen Key Lab of Computer Vision and Pattern Recognition
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China</b><br/><b>Toyota Technological Institute at Chicago</b><br/><b>The Chinese University of Hong Kong</b><br/>4Media Lab, Huawei Technologies Co. Ltd., China
+</td><td>('2856494', 'Dihong Gong', 'dihong gong')<br/>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('7137861', 'Jianzhuang Liu', 'jianzhuang liu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>dh.gong@siat.ac.cn
+<br/>zhifeng.li@siat.ac.cn
+<br/>dhlin@ttic.edu
+<br/>liu.jianzhuang@huawei.com
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>21e828071249d25e2edaca0596e27dcd63237346</td><td></td><td></td><td></td></tr><tr><td>21a2f67b21905ff6e0afa762937427e92dc5aa0b</td><td>Hindawi
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2017, Article ID 8710492, 13 pages
+<br/>https://doi.org/10.1155/2017/8710492
+<br/>Research Article
+<br/>Extra Facial Landmark Localization via
+<br/>Global Shape Reconstruction
+<br/><b>School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave</b><br/>West Hi-Tech Zone, Chengdu 611731, China
+<br/>Received 4 January 2017; Revised 26 March 2017; Accepted 4 April 2017; Published 23 April 2017
+<br/>Academic Editor: Elio Masciari
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Localizing facial landmarks is a popular topic in the field of face analysis. However, problems arose in practical applications such
+<br/>as handling pose variations and partial occlusions while maintaining moderate training model size and computational efficiency
+<br/>still challenges current solutions. In this paper, we present a global shape reconstruction method for locating extra facial landmarks
+<br/>comparing to facial landmarks used in the training phase. In the proposed method, the reduced configuration of facial landmarks
+<br/>is first decomposed into corresponding sparse coefficients. Then explicit face shape correlations are exploited to regress between
+<br/>sparse coefficients of different facial landmark configurations. Finally extra facial landmarks are reconstructed by combining the
+<br/>pretrained shape dictionary and the approximation of sparse coefficients. By applying the proposed method, both the training
+<br/>time and the model size of a class of methods which stack local evidences as an appearance descriptor can be scaled down with
+<br/>only a minor compromise in detection accuracy. Extensive experiments prove that the proposed method is feasible and is able to
+<br/>reconstruct extra facial landmarks even under very asymmetrical face poses.
+<br/>1. Introduction
+<br/>Facial landmark localization is the first and a crucial step for
+<br/>many face analysis tasks such as face recognition [1], cartoon
+<br/>facial animation [2, 3], and facial expression understanding
+<br/>[4, 5]. Most facial landmarks are located along the dominant
+<br/>contours around facial features like eyebrows, nose, and
+<br/>mouth. Therefore facial landmarks on a face image jointly
+<br/>describe a face shape which lies in the shape space [6].
+<br/>For the last ten years remarkable progress has been
+<br/>made in the field of facial
+<br/>landmark localization [7, 8].
+<br/>Among a large number of proposed methods, the most
+<br/>popular solution is to treat the facial landmark localiza-
+<br/>tion problem as a holistic shape regression process and
+<br/>to learn a general regression model from labeled training
+<br/>images [9, 10]. Following this shape regression idea, various
+<br/>methods try to model a regression function that directly
+<br/>maps the appearance of images to landmark coordinates
+<br/>without the need of computing a parametric model. All
+<br/>facial landmarks in a shape are iterated collectively and the
+<br/>relationship between facial landmarks is flexibly embedded
+<br/>into the iteration process. On the other hand, to generate
+<br/>enough description of face images, multiscale local feature
+<br/>descriptors are typically adopted in most shape regression
+<br/>methods. For example, cascaded pose regression (CPR) [7]
+<br/>was first proposed to estimate general object poses with pose-
+<br/>indexed features and then extended for the problem of face
+<br/>alignment in explicit shape regression (ESR) [11] method.
+<br/>ESR combines two-level boosting regression, shape-indexed
+<br/>features, and correlation-based feature selection. As another
+<br/>example, supervised descent method (SDM) [12] and its
+<br/>extensions also have shown an impressive performance in the
+<br/>field of facial landmark localization. These kinds of methods
+<br/>stack shape-indexed high dimension feature descriptors and
+<br/>train regression functions from a supervised gradient descent
+<br/>view.
+<br/>However, facial landmark localization still meets great
+<br/>challenges in practical applications, such as handling pose
+<br/>variations and partial occlusion while maintaining moderate
+<br/>training model size and computational efficiency. In SDM
+<br/>and its improved methods, the dimension of regression
+</td><td>('9684590', 'Shuqiu Tan', 'shuqiu tan')<br/>('2915473', 'Dongyi Chen', 'dongyi chen')<br/>('9486108', 'Chenggang Guo', 'chenggang guo')<br/>('2122143', 'Zhiqi Huang', 'zhiqi huang')<br/>('9684590', 'Shuqiu Tan', 'shuqiu tan')</td><td>Correspondence should be addressed to Dongyi Chen; dychen@uestc.edu.cn
+</td></tr><tr><td>2162654cb02bcd10794ae7e7d610c011ce0fb51b</td><td>4697
+<br/>978-1-4799-5751-4/14/$31.00 ©2014 IEEE
+<br/>1http://www.skype.com/
+<br/>2http://www.google.com/hangouts/
+<br/>tification, sparse coding
+</td><td></td><td></td></tr><tr><td>21258aa3c48437a2831191b71cd069c05fb84cf7</td><td>A Robust and E(cid:14)cient Doubly Regularized
+<br/>Metric Learning Approach
+<br/>Siemens Corporate Research, Princeton, NJ, 08540
+<br/><b>CISE, University of Florida, Gainesville, FL</b></td><td>('35582088', 'Meizhu Liu', 'meizhu liu')<br/>('1733005', 'Baba C. Vemuri', 'baba c. vemuri')</td><td></td></tr><tr><td>21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc</td><td>m a s s a c h u s e t t s i n s t i t u t e o f
+<br/>t e c h n o l o g y — a r t i f i c i a l i n t e l l i g e n c e l a b o r a t o r y
+<br/>Rotation Invariant Real-time
+<br/>Face Detection and
+<br/>Recognition System
+<br/>AI Memo 2001-010
+<br/>CBCL Memo 197
+<br/>May 31, 2001
+<br/>© 2 0 0 1 m a s s a c h u s e t t s i n s t i t u t e o f
+<br/>t e c h n o l o g y, c a m b r i d g e , m a 0 2 1 3 9 u s a — w w w. a i . m i t . e d u
+</td><td>('35541734', 'Purdy Ho', 'purdy ho')</td><td></td></tr><tr><td>21bd9374c211749104232db33f0f71eab4df35d5</td><td>Integrating Facial Makeup Detection Into
+<br/>Multimodal Biometric User Verification System
+<br/>CuteSafe Technology Inc.
+<br/>Gebze, Kocaeli, Turkey
+<br/>Eurecom Digital Security Department
+<br/>06410 Biot, France
+</td><td>('39935459', 'Ekberjan Derman', 'ekberjan derman')<br/>('3179061', 'Chiara Galdi', 'chiara galdi')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>ekberjan.derman@cutesafe.com
+<br/>{chiara.galdi, jean-luc.dugelay}@eurecom.fr
+</td></tr><tr><td>214db8a5872f7be48cdb8876e0233efecdcb6061</td><td>Semantic-aware Co-indexing for Image Retrieval
+<br/><b>NEC Laboratories America, Inc</b><br/>2Dept. of CS, Univ. of Texas at San Antonio
+<br/>Cupertino, CA 95014
+<br/>San Antonio, TX 78249
+</td><td>('1776581', 'Shiliang Zhang', 'shiliang zhang')<br/>('2909406', 'Ming Yang', 'ming yang')<br/>('3991189', 'Xiaoyu Wang', 'xiaoyu wang')<br/>('1695082', 'Yuanqing Lin', 'yuanqing lin')<br/>('1713616', 'Qi Tian', 'qi tian')</td><td>{myang,xwang,ylin}@nec-labs.com
+<br/>slzhang.jdl@gmail.com qitian@cs.utsa.edu
+</td></tr><tr><td>21104bcf07ef0269ab133471a3200b9bf94b2948</td><td>Beyond Comparing Image Pairs: Setwise Active Learning for Relative Attributes
+<br/><b>University of Texas at Austin</b></td><td>('2548555', 'Lucy Liang', 'lucy liang')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>214ac8196d8061981bef271b37a279526aab5024</td><td>Face Recognition Using Smoothed High-Dimensional
+<br/>Representation
+<br/>Center for Machine Vision Research, PO Box 4500,
+<br/><b>FI-90014 University of Oulu, Finland</b></td><td>('32683737', 'Juha Ylioinas', 'juha ylioinas')<br/>('1776374', 'Juho Kannala', 'juho kannala')<br/>('1751372', 'Abdenour Hadid', 'abdenour hadid')</td><td></td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-013-0672-6
+<br/>Automatic and Efficient Human Pose Estimation for Sign
+<br/>Language Videos
+<br/>Received: 4 February 2013 / Accepted: 29 October 2013
+<br/>© Springer Science+Business Media New York 2013
+</td><td>('36326860', 'James Charles', 'james charles')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1</td><td>978-1-4673-2533-2/12/$26.00 ©2012 IEEE
+<br/>1437
+<br/>ICIP 2012
+</td><td></td><td></td></tr><tr><td>217de4ff802d4904d3f90d2e24a29371307942fe</td><td>POOF: Part-Based One-vs-One Features for Fine-Grained Categorization, Face
+<br/>Verification, and Attribute Estimation
+<br/><b>Columbia University</b><br/><b>Columbia University</b></td><td>('1778562', 'Thomas Berg', 'thomas berg')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>tberg@cs.columbia.edu
+<br/>belhumeur@cs.columbia.edu
+</td></tr><tr><td>2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44</td><td>Lessons from Collecting a Million Biometric Samples
+<br/><b>University of Notre Dame</b><br/>Notre Dame, IN 46556, USA
+<br/><b>National Institute of Standards and Technology</b><br/>Gaithersburg, MD 20899, USA
+</td><td>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>flynn@cse.nd.edu
+<br/>kwb@cse.nd.edu
+<br/>jonathon@nist.gov
+</td></tr><tr><td>210b98394c3be96e7fd75d3eb11a391da1b3a6ca</td><td>Spatiotemporal Derivative Pattern: A Dynamic
+<br/>Texture Descriptor for Video Matching
+<br/>Saeed Mian3
+<br/><b>Tafresh University, Tafresh, Iran</b><br/><b>Electrical Eng. Dep., Central Tehran Branch, Islamic Azad University, Tehran, Iran</b><br/><b>Computer Science and Software Engineering, The University of Western Australia</b><br/>WA 6009, Australia
+</td><td>('3046235', 'Farshid Hajati', 'farshid hajati')<br/>('2014145', 'Mohammad Tavakolian', 'mohammad tavakolian')<br/>('2997971', 'Soheila Gheisari', 'soheila gheisari')</td><td>{hajati,m_tavakolian}@tafreshu.ac.ir
+<br/>s.gheisari@iauctb.ac.ir
+<br/>ajmal.mian@uwa.edu.au
+</td></tr><tr><td>21765df4c0224afcc25eb780bef654cbe6f0bc3a</td><td>Multi-Channel Correlation Filters
+<br/><b>National University of Singapore</b><br/><b>National University of Singapore</b><br/>Singapore
+<br/>Singapore
+<br/>CSIRO
+<br/>Australia
+</td><td>('2860592', 'Hamed Kiani Galoogahi', 'hamed kiani galoogahi')<br/>('1715286', 'Terence Sim', 'terence sim')<br/>('1820249', 'Simon Lucey', 'simon lucey')</td><td>hkiani@comp.nus.edu.sg
+<br/>tsim@comp.nus.edu.sg
+<br/>simon.lucey@csiro.au
+</td></tr><tr><td>21b16df93f0fab4864816f35ccb3207778a51952</td><td>Recognition of Static Gestures applied to Brazilian Sign Language (Libras)
+<br/><b>Math Institute</b><br/>Department of Technology, Department of Exact Sciences
+<br/><b>Federal University of Bahia (UFBA</b><br/><b>State University of Feira de Santana (UEFS</b><br/>Salvador, Brazil
+<br/>Feira de Santana, Brazil
+</td><td>('2009399', 'Igor L. O. Bastos', 'igor l. o. bastos')<br/>('3057269', 'Michele F. Angelo', 'michele f. angelo')<br/>('2563043', 'Angelo C. Loula', 'angelo c. loula')</td><td>igorcrexito@gmail.com
+<br/>mfangelo@uefs.ecomp.br, angelocl@gmail.com
+</td></tr><tr><td>212608e00fc1e8912ff845ee7a4a67f88ba938fc</td><td>Coupled Deep Learning for Heterogeneous Face Recognition
+<br/>Center for Research on Intelligent Perception and Computing (CRIPAC),
+<br/>National Laboratory of Pattern Recognition (NLPR),
+<br/><b>Institute of Automation, Chinese Academy of Sciences, Beijing, P. R. China</b></td><td>('2225749', 'Xiang Wu', 'xiang wu')<br/>('3051419', 'Lingxiao Song', 'lingxiao song')<br/>('1705643', 'Ran He', 'ran he')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td>alfredxiangwu@gmail.com, {lingxiao.song, rhe, tnt}@nlpr.ia.ac.cn
+</td></tr><tr><td>4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27</td><td>Topological Principal Component Analysis for
+<br/>face encoding and recognition
+<br/>Juan J. Villanueva
+<br/>Computer Vision Center and Departament d’Inform(cid:18)atica, Edi(cid:12)ci O, Universitat
+<br/>Aut(cid:18)onoma de Barcelona
+</td><td>('38034605', 'Albert Pujol', 'albert pujol')<br/>('2997661', 'Felipe Lumbreras', 'felipe lumbreras')</td><td></td></tr><tr><td>4d625677469be99e0a765a750f88cfb85c522cce</td><td>Understanding Hand-Object Manipulation
+<br/>with Grasp Types and Object Attributes
+<br/><b>Institute of Industrial Science</b><br/><b>The University of Tokyo, Japan</b><br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University, USA</b><br/><b>Institute of Industrial Science</b><br/><b>The University of Tokyo, Japan</b></td><td>('3172280', 'Minjie Cai', 'minjie cai')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')<br/>('9467266', 'Yoichi Sato', 'yoichi sato')</td><td>cai-mj@iis.u-tokyo.ac.jp
+<br/>kkitani@cs.cmu.edu
+<br/>ysato@iis.u-tokyo.ac.jp
+</td></tr><tr><td>4da735d2ed0deeb0cae4a9d4394449275e316df2</td><td>Gothenburg, Sweden, June 19-22, 2016
+<br/>978-1-5090-1820-8/16/$31.00 ©2016 IEEE
+<br/>1410
+</td><td></td><td></td></tr><tr><td>4d15254f6f31356963cc70319ce416d28d8924a3</td><td>Quo vadis Face Recognition?
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Department of Psychology
+<br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA 15260
+</td><td>('33731953', 'Ralph Gross', 'ralph gross')<br/>('1838212', 'Jianbo Shi', 'jianbo shi')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>frgross,jshig@cs.cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>4d530a4629671939d9ded1f294b0183b56a513ef</td><td>International Journal of Machine Learning and Computing, Vol. 2, No. 4, August 2012
+<br/>Facial Expression Classification Method Based on Pseudo
+<br/>Zernike Moment and Radial Basis Function Network
+<br/>
+</td><td>('2009230', 'Tran Binh Long', 'tran binh long')<br/>('2710459', 'Le Hoang Thai', 'le hoang thai')<br/>('1971778', 'Tran Hanh', 'tran hanh')</td><td></td></tr><tr><td>4d2975445007405f8cdcd74b7fd1dd547066f9b8</td><td>Image and Video Processing
+<br/>for Affective Applications
+</td><td>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>4df889b10a13021928007ef32dc3f38548e5ee56</td><td></td><td></td><td></td></tr><tr><td>4d6462fb78db88afff44561d06dd52227190689c</td><td>Face-to-Face Social Activity Detection Using
+<br/>Data Collected with a Wearable Device
+<br/>1 Computer Vision Center, Campus UAB, Edifici O, Bellaterra, Barcelona, Spain
+<br/><b>Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain</b><br/>http://www.cvc.uab.es, http://www.maia.ub.es
+</td><td>('7629833', 'Pierluigi Casale', 'pierluigi casale')<br/>('9783922', 'Oriol Pujol', 'oriol pujol')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>pierluigi@cvc.uab.es
+</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td><td></td><td></td></tr><tr><td>4db9e5f19366fe5d6a98ca43c1d113dac823a14d</td><td>Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers
+<br/>Are 1,000 Features Worth A Picture?
+<br/>Department of Computer Science and Center for Human-Computer Interaction
+<br/>Virginia Tech, Arlington, VA, USA
+</td><td>('32698591', 'Vikram Mohanty', 'vikram mohanty')<br/>('51219402', 'David Thames', 'david thames')<br/>('2427623', 'Kurt Luther', 'kurt luther')</td><td></td></tr><tr><td>4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41</td><td></td><td></td><td></td></tr><tr><td>4de757faa69c1632066391158648f8611889d862</td><td>International Journal of Advanced Engineering Research and Science (IJAERS) Vol-3, Issue-3 , March- 2016]
+<br/>ISSN: 2349-6495
+<br/>Review of Face Recognition Technology Using
+<br/>Feature Fusion Vector
+<br/><b>S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India</b><br/>
+</td><td></td><td></td></tr><tr><td>4dd71a097e6b3cd379d8c802460667ee0cbc8463</td><td>Real-time Multi-view Facial Landmark Detector
+<br/>Learned by the Structured Output SVM
+<br/>1 Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech
+<br/><b>Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic</b><br/><b>National Institute of Informatics, Tokyo, Japan</b></td><td>('39492787', 'Diego Thomas', 'diego thomas')<br/>('1691286', 'Akihiro Sugimoto', 'akihiro sugimoto')</td><td></td></tr><tr><td>4db0968270f4e7b3fa73e41c50d13d48e20687be</td><td>Fashion Forward: Forecasting Visual Style in Fashion
+<br/><b>Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany</b><br/><b>The University of Texas at Austin, 78701 Austin, USA</b></td><td>('2256981', 'Ziad Al-Halah', 'ziad al-halah')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{ziad.al-halah, rainer.stiefelhagen}@kit.edu, grauman@cs.utexas.edu
+</td></tr><tr><td>4d9c02567e7b9e065108eb83ea3f03fcff880462</td><td>Towards Facial Expression Recognition in the Wild: A New Database and Deep
+<br/>Recognition System
+<br/><b>School of Electronics and Information, Northwestern Polytechnical University, China</b></td><td>('3411701', 'Xianlin Peng', 'xianlin peng')<br/>('1917901', 'Zhaoqiang Xia', 'zhaoqiang xia')<br/>('2871379', 'Lei Li', 'lei li')<br/>('4729239', 'Xiaoyi Feng', 'xiaoyi feng')</td><td>pengxl515@163.com, zxia@nwpu.edu.cn, li lei 08@163.com, fengxiao@nwpu.edu.cn
+</td></tr><tr><td>4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2352
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>4d90bab42806d082e3d8729067122a35bbc15e8d</td><td></td><td></td><td></td></tr><tr><td>4d3c4c3fe8742821242368e87cd72da0bd7d3783</td><td>Hybrid Deep Learning for Face Verification
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sy011@ie.cuhk.edu.hk
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>4d01d78544ae0de3075304ff0efa51a077c903b7</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 77– No.13, September 2013
+<br/>ART Network based Face Recognition with Gabor Filters
+<br/>Dept. of Computer Science & Engineering
+<br/>Dept. of Computer Science & Engineering
+<br/><b>Jahangirnagar University</b><br/>Savar, Dhaka – 1342, Bangladesh.
+</td><td>('5380965', 'Md. Mozammel Haque', 'md. mozammel haque')<br/>('39604645', 'Md. Al-amin Bhuiyan', 'md. al-amin bhuiyan')</td><td></td></tr><tr><td>4dd2be07b4f0393995b57196f8fc79d666b3aec5</td><td>3572
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>EXPRESSION RECOGNITION
+<br/>Dept. of Electronic Engineering
+<br/><b>Yeungnam University</b><br/>Gyeongsan, Korea
+<br/>1. INTRODUCTION
+</td><td>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('1685841', 'Chan-Su Lee', 'chan-su lee')</td><td></td></tr><tr><td>4d8ce7669d0346f63b20393ffaa438493e7adfec</td><td>Similarity Features for Facial Event Analysis
+<br/><b>Rutgers University, Piscataway NJ 08854, USA</b><br/>2 National Laboratory of Pattern Recognition, Chinese Academy of Sciences
+<br/>Beijing, 100080, China
+</td><td>('39606160', 'Peng Yang', 'peng yang')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')</td><td>peyang@cs.rutgers.edu
+</td></tr><tr><td>4d6ad0c7b3cf74adb0507dc886993e603c863e8c</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td><td>('2954974', 'Antonio C. Nazare', 'antonio c. nazare')</td><td>Email: {arturjordao, antonio.nazare, jessicasena, william}@dcc.ufmg.br
+</td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>A Scalable and Privacy-Aware IoT Service for Live Video Analytics
+<br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/>Intel Labs
+<br/>Norman Sadeh
+<br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b></td><td>('3196473', 'Junjue Wang', 'junjue wang')<br/>('1773498', 'Brandon Amos', 'brandon amos')<br/>('1802347', 'Padmanabhan Pillai', 'padmanabhan pillai')<br/>('1732721', 'Anupam Das', 'anupam das')<br/>('1747303', 'Mahadev Satyanarayanan', 'mahadev satyanarayanan')</td><td>junjuew@cs.cmu.edu
+<br/>bamos@cs.cmu.edu
+<br/>padmanabhan.s.pillai@intel.com
+<br/>sadeh@cs.cmu.edu
+<br/>anupamd@cs.cmu.edu
+<br/>satya@cs.cmu.edu
+</td></tr><tr><td>4d0b3921345ae373a4e04f068867181647d57d7d</td><td>Learning attributes from human gaze
+<br/>Department of Computer Science
+<br/><b>University of Pittsburgh</b><br/>IEEE 2017 Winter
+<br/>Conference on
+<br/>Applications of
+<br/>Computer Vision
+</td><td>('1916866', 'Nils Murrugarra-Llerena', 'nils murrugarra-llerena')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td></td></tr><tr><td>4dca3d6341e1d991c902492952e726dc2a443d1c</td><td>Learning towards Minimum Hyperspherical Energy
+<br/><b>Georgia Institute of Technology 2Emory University</b><br/><b>South China University of Technology 4NVIDIA 5Google Brain 6Ant Financial</b></td><td>('36326884', 'Weiyang Liu', 'weiyang liu')<br/>('10035476', 'Rongmei Lin', 'rongmei lin')<br/>('46270580', 'Zhen Liu', 'zhen liu')<br/>('47968201', 'Lixin Liu', 'lixin liu')<br/>('1751019', 'Zhiding Yu', 'zhiding yu')<br/>('47175326', 'Bo Dai', 'bo dai')<br/>('1779453', 'Le Song', 'le song')</td><td></td></tr><tr><td>4d0ef449de476631a8d107c8ec225628a67c87f9</td><td>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE
+<br/>must be obtained for all other uses, in any current or future media, including
+<br/>reprinting/republishing this material for advertising or promotional purposes,
+<br/>creating new collective works, for resale or redistribution to servers or lists, or
+<br/>reuse of any copyrighted component of this work in other works.
+<br/>Pre-print of article that appeared at BTAS 2010.
+<br/>The published article can be accessed from:
+<br/>http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5634517
+</td><td></td><td></td></tr><tr><td>4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f</td><td>Deep multi-frame face super-resolution
+<br/>Evgeniya Ustinova, Victor Lempitsky
+<br/>October 17, 2017
+</td><td></td><td></td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>Behav Res (2015) 47:1122–1135
+<br/>DOI 10.3758/s13428-014-0532-5
+<br/>The Chicago face database: A free stimulus set of faces
+<br/>and norming data
+<br/>Published online: 13 January 2015
+<br/><b>Psychonomic Society, Inc</b></td><td>('2428798', 'Joshua Correll', 'joshua correll')</td><td></td></tr><tr><td>75879ab7a77318bbe506cb9df309d99205862f6c</td><td>Analysis Of Emotion Recognition From Facial
+<br/>Expressions Using Spatial And Transform Domain
+<br/>Methods
+</td><td>('2855399', 'P. Suja', 'p. suja')<br/>('2510426', 'Shikha Tripathi', 'shikha tripathi')</td><td></td></tr><tr><td>7574f999d2325803f88c4915ba8f304cccc232d1</td><td>Transfer Learning For Cross-Dataset Recognition: A Survey
+<br/>This paper summarises and analyses the cross-dataset recognition transfer learning techniques with the
+<br/>emphasis on what kinds of methods can be used when the available source and target data are presented
+<br/>in different forms for boosting the target task. This paper for the first time summarises several transferring
+<br/>criteria in details from the concept level, which are the key bases to guide what kind of knowledge to transfer
+<br/>between datasets. In addition, a taxonomy of cross-dataset scenarios and problems is proposed according the
+<br/>properties of data that define how different datasets are diverged, thereby review the recent advances on
+<br/>each specific problem under different scenarios. Moreover, some real world applications and corresponding
+<br/>commonly used benchmarks of cross-dataset recognition are reviewed. Lastly, several future directions are
+<br/>identified.
+<br/>Additional Key Words and Phrases: Cross-dataset, transfer learning, domain adaptation
+<br/>1. INTRODUCTION
+<br/>It has been explored how human would transfer learning in one context to another
+<br/>similar context [Woodworth and Thorndike 1901; Perkins et al. 1992] in the field of
+<br/>Psychology and Education. For example, learning to drive a car helps a person later
+<br/>to learn more quickly to drive a truck, and learning mathematics prepares students to
+<br/>study physics. The machine learning algorithms are mostly inspired by human brains.
+<br/>However, most of them require a huge amount of training examples to learn a new
+<br/>model from scratch and fail to apply knowledge learned from previous domains or
+<br/>tasks. This may be due to that a basic assumption of statistical learning theory is
+<br/>that the training and test data are drawn from the same distribution and belong to
+<br/>the same task. Intuitively, learning from scratch is not realistic and practical, because
+<br/>it violates how human learn things. In addition, manually labelling a large amount
+<br/>of data for new domain or task is labour extensive, especially for the modern “data-
+<br/>hungry” and “data-driven” learning techniques (i.e. deep learning). However, the big
+<br/>data era provides a huge amount available data collected for other domains and tasks.
+<br/>Hence, how to use the previously available data smartly for the current task with
+<br/>scarce data will be beneficial for real world applications.
+<br/>To reuse the previous knowledge for current tasks, the differences between old data
+<br/>and new data need to be taken into account. Take the object recognition as an ex-
+<br/>ample. As claimed by Torralba and Efros [2011], despite the great efforts of object
+<br/>datasets creators, the datasets appear to have strong build-in bias caused by various
+<br/>factors, such as selection bias, capture bias, category or label bias, and negative set
+<br/>bias. This suggests that no matter how big the dataset is, it is impossible to cover
+<br/>the complexity of the real visual world. Hence, the dataset bias needs to be consid-
+<br/>ered before reusing data from previous datasets. Pan and Yang [2010] summarise that
+<br/>the differences between different datasets can be caused by domain divergence (i.e.
+<br/>distribution shift or feature space difference) or task divergence (i.e. conditional dis-
+<br/>tribution shift or label space difference), or both. For example, in visual recognition,
+<br/>the distributions between the previous and current data can be discrepant due to the
+<br/>different environments, lighting, background, sensor types, resolutions, view angles,
+<br/>and post-processing. Those external factors may cause the distribution divergence or
+<br/>even feature space divergence between different domains. On the other hand, the task
+<br/>divergence between current and previous data is also ubiquitous. For example, it is
+<br/>highly possible that an animal species that we want to recognize have not been seen
+<br/>ACM Journal Name, Vol. V, No. N, Article A, Publication date: January YYYY.
+</td><td>('47539715', 'Jing Zhang', 'jing zhang')<br/>('40508657', 'Wanqing Li', 'wanqing li')<br/>('1719314', 'Philip Ogunbona', 'philip ogunbona')</td><td></td></tr><tr><td>75fcbb01bc7e53e9de89cb1857a527f97ea532ce</td><td>Detection of Facial Landmarks from Neutral, Happy,
+<br/>and Disgust Facial Images
+<br/>Research Group for Emotions, Sociality, and Computing
+<br/>Tampere Unit for Computer-Human Interaction
+<br/>Department of Computer Sciences
+<br/><b>University of Tampere</b><br/>FIN-33014 Tampere, Finland
+</td><td>('2396729', 'Ioulia Guizatdinova', 'ioulia guizatdinova')<br/>('1718377', 'Veikko Surakka', 'veikko surakka')</td><td>ig74400@cs.uta.fi
+<br/>Veikko.Surakka@uta.fi
+</td></tr><tr><td>757e4cb981e807d83539d9982ad325331cb59b16</td><td>Demographics versus Biometric Automatic
+<br/>Interoperability
+<br/><b>Sapienza University of Rome, Italy</b><br/><b>Biometric and Image Processing Lab, University of Salerno, Italy</b><br/><b>George Mason University, Fairfax Virginia, USA</b></td><td>('1763890', 'Maria De Marsico', 'maria de marsico')<br/>('1795333', 'Michele Nappi', 'michele nappi')<br/>('1772512', 'Daniel Riccio', 'daniel riccio')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td>demarsico@di.uniroma1.it
+<br/>{mnappi,driccio}@unisa.it
+<br/>wechsler@cs.gmu.edu
+</td></tr><tr><td>75e9a141b85d902224f849ea61ab135ae98e7bfb</td><td></td><td></td><td></td></tr><tr><td>75503aff70a61ff4810e85838a214be484a674ba</td><td>Improved Facial Expression Recognition via Uni-Hyperplane Classification
+<br/>S.W. Chew∗, S. Lucey†, P. Lucey‡, S. Sridharan∗, and J.F. Cohn‡
+</td><td></td><td></td></tr><tr><td>75fd9acf5e5b7ed17c658cc84090c4659e5de01d</td><td>Project-Out Cascaded Regression with an application to Face Alignment
+<br/><b>School of Computer Science, University of Nottingham</b><br/>Contributions. Cascaded regression approaches [2] have been recently
+<br/>shown to achieve state-of-the-art performance for many computer vision
+<br/>tasks. Beyond its connection to boosting, cascaded regression has been in-
+<br/>terpreted as a learning-based approach to iterative optimization methods like
+<br/>the Newton’s method. However, in prior work [1],[4], the connection to op-
+<br/>timization theory is limited only in learning a mapping from image features
+<br/>to problem parameters.
+<br/>In this paper, we consider the problem of facial deformable model fit-
+<br/>ting using cascaded regression and make the following contributions: (a) We
+<br/>propose regression to learn a sequence of averaged Jacobian and Hessian
+<br/>matrices from data, and from them descent directions in a fashion inspired
+<br/>by Gauss-Newton optimization. (b) We show that the optimization problem
+<br/>in hand has structure and devise a learning strategy for a cascaded regres-
+<br/>sion approach that takes the problem structure into account. By doing so, the
+<br/>proposed method learns and employs a sequence of averaged Jacobians and
+<br/>descent directions in a subspace orthogonal to the facial appearance varia-
+<br/>tion; hence, we call it Project-Out Cascaded Regression (PO-CR). (c) Based
+<br/>on the principles of PO-CR, we built a face alignment system that produces
+<br/>remarkably accurate results on the challenging iBUG data set outperform-
+<br/>ing previously proposed systems by a large margin. Code for our system is
+<br/>available from http://www.cs.nott.ac.uk/~yzt/.
+<br/>Shape and appearance models. We use parametric shape and appearance
+<br/>models. An instance of the shape model is given by s(p) = s0 + Sp. An
+<br/>instance of the appearance model is given by A(c) = A0 + Ac.
+<br/>Face alignment via Gauss-Newton optimization. In this section, we for-
+<br/>mulate and solve the non-linear least squares optimization problem for face
+<br/>alignment using Gauss-Newton optimization. This will provide the basis for
+<br/>learning and fitting in PO-CR in the next section. In particular, to localize
+<br/>the landmarks in a new image, we would like to find p and c such that [3]
+<br/>||I(s(p))− A(c)||2.
+<br/>argmin
+<br/>p,c
+<br/>An update for p and c can be found by solving the following problem
+<br/>arg min
+<br/>∆p,∆c
+<br/>||I(s(p)) + JI∆p− A0 − Ac− A∆c||2.
+<br/>(1)
+<br/>(2)
+<br/>By exploiting the problem structure, the calculation for the optimal ∆c at
+<br/>each iteration is not necessary. We end up with the following problem [3]
+<br/>||I(s(p)) + JI∆p− A0||2
+<br/>P,
+<br/>argmin
+<br/>∆p
+<br/>(3)
+<br/>where P = E − AAT is a projection operator that projects out the facial
+<br/>appearance variation from the image Jacobian JI. The solution to the above
+<br/>problem is readily given by
+<br/>∆p = −H−1
+<br/>P JT
+<br/>P (I(s(p))− A0).
+<br/>(4)
+<br/>Face alignment via Project-Out Cascaded Regression. Based on Eqs. (3)
+<br/>and (4), the key idea in PO-CR is to compute from a set of training examples
+<br/>a sequence of averaged Jacobians(cid:98)J(k) from which the facial appearance
+<br/>variation is projected-out and from them and descent directions:
+<br/>Step I. Starting from the ground truth shape parameters p∗
+<br/>i for each
+<br/>training image Ii, i = 1, . . . ,H, we generate a set of K perturbed shape pa-
+<br/>rameters for iteration 1 pi, j(1), j = 1, . . . ,K that capture the statistics of the
+<br/>PO-CR learns the averaged projected-out Jacobian(cid:98)JP(1) = P(cid:98)J(1) for itera-
+<br/>face detection initialization process. Using the set ∆pi, j(1) = p∗
+<br/>i − pi, j(1),
+<br/>tion 1 by solving the following weighted least squares problem
+<br/>||I(s(pi, j(1))) + J(1)∆pi, j(1)− A0||2
+<br/>P,
+<br/>arg min(cid:98)JP(1)
+<br/>i=1
+<br/>j=1
+<br/>Step II. Having computed(cid:98)JP(1), we compute(cid:98)HP(1) =(cid:98)JP(1)T(cid:98)JP(1) .
+<br/>Step III. The descent directions R(1) for iteration 1 are given by
+<br/>R(1) =(cid:98)HP(1)−1(cid:98)JP(1)T .
+<br/>(6)
+<br/>Step IV. For each training sample, a new estimate for its shape parame-
+<br/>ters (to be used at the next iteration) is obtained from
+<br/>pi, j(2) = pi, j(1) + R(1)(I(s(pi, j(1)))− A0).
+<br/>(7)
+<br/>Finally, Steps I-IV are sequentially repeated until convergence and the whole
+<br/>process produces a set of L regressor matrices R(l), l = 1, . . . ,L.
+<br/>During testing,we extract image features I(s(p(k))) and then compute
+<br/>an update for the shape parameters from
+<br/>∆p(k) = R(k)(I(s(p(k)))− A0).
+<br/>(8)
+<br/>Results. We conducted a large number of experiments on LFPW, Helen,
+<br/>AFW and iBUG data sets. In the following figure, we show fiiting results
+<br/>from the challenging iBUG data set.
+<br/>Figure 1: Application of PO-CR to the alignment of the iBUG data set.
+<br/>[1] T.F. Cootes, G.J. Edwards, and C.J. Taylor. Active appearance models.
+<br/>TPAMI, 23(6):681–685, 2001.
+<br/>[2] Piotr Dollár, Peter Welinder, and Pietro Perona. Cascaded pose regres-
+<br/>sion. In CVPR, 2010.
+</td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td></td></tr><tr><td>75308067ddd3c53721430d7984295838c81d4106</td><td>Article
+<br/>Rapid Facial Reactions
+<br/>in Response to Facial
+<br/>Expressions of Emotion
+<br/>Displayed by Real Versus
+<br/>Virtual Faces
+<br/>i-Perception
+<br/>2018 Vol. 9(4), 1–18
+<br/>! The Author(s) 2018
+<br/>DOI: 10.1177/2041669518786527
+<br/>journals.sagepub.com/home/ipe
+<br/><b>LIMSI, CNRS, University of Paris-Sud, Orsay, France</b></td><td>('28174013', 'Jean-Claude Martin', 'jean-claude martin')</td><td></td></tr><tr><td>75cd81d2513b7e41ac971be08bbb25c63c37029a</td><td></td><td></td><td></td></tr><tr><td>75bf3b6109d7a685236c8589f8ead7d769ea863f</td><td>Model Selection with Nonlinear Embedding for Unsupervised Domain Adaptation
+<br/><b>Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA</b></td><td>('3151995', 'Hemanth Venkateswara', 'hemanth venkateswara')<br/>('2471253', 'Shayok Chakraborty', 'shayok chakraborty')<br/>('1743991', 'Sethuraman Panchanathan', 'sethuraman panchanathan')</td><td>{hemanthv, shayok.chakraborty, troy.mcdaniel, panch}@asu.edu
+</td></tr><tr><td>759cf57215fcfdd8f59c97d14e7f3f62fafa2b30</td><td>Real-time Distracted Driver Posture Classification
+<br/>Department of Computer Science and Engineering, School of Sciences and Engineering
+<br/><b>The American University in Cairo, New Cairo 11835, Egypt</b></td><td>('3434212', 'Yehya Abouelnaga', 'yehya abouelnaga')<br/>('2150605', 'Hesham M. Eraqi', 'hesham m. eraqi')<br/>('2233511', 'Mohamed N. Moustafa', 'mohamed n. moustafa')</td><td>{devyhia,heraqi,m.moustafa}@aucegypt.edu
+</td></tr><tr><td>751970d4fb6f61d1b94ca82682984fd03c74f127</td><td>Array-based Electromyographic Silent Speech Interface
+<br/><b>Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany</b><br/>Keywords:
+<br/>EMG, EMG-based Speech Recognition, Silent Speech Interface, Electrode Array
+</td><td>('1723149', 'Michael Wand', 'michael wand')<br/>('2289793', 'Christopher Schulte', 'christopher schulte')<br/>('1684236', 'Matthias Janke', 'matthias janke')<br/>('1713194', 'Tanja Schultz', 'tanja schultz')</td><td>{michael.wand, matthias.janke, tanja.schultz}@kit.edu, christopher.schulte@student.kit.edu
+</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>Attribute and Simile Classifiers for Face Verification
+<br/><b>Columbia University</b></td><td>('40631426', 'Neeraj Kumar', 'neeraj kumar')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('1750470', 'Shree K. Nayar', 'shree k. nayar')</td><td></td></tr><tr><td>75ebe1e0ae9d42732e31948e2e9c03d680235c39</td><td>“Hello! My name is... Buffy” – Automatic
+<br/>Naming of Characters in TV Video
+<br/><b>University of Oxford</b></td><td>('3056091', 'Mark Everingham', 'mark everingham')<br/>('1782755', 'Josef Sivic', 'josef sivic')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{me,josef,az}@robots.ox.ac.uk
+</td></tr><tr><td>75e5ba7621935b57b2be7bf4a10cad66a9c445b9</td><td></td><td></td><td></td></tr><tr><td>75859ac30f5444f0d9acfeff618444ae280d661d</td><td>Multibiometric Cryptosystems based on Feature
+<br/>Level Fusion
+</td><td>('2743820', 'Abhishek Nagar', 'abhishek nagar')<br/>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING
+<br/>AffectNet: A Database for Facial Expression,
+<br/>Valence, and Arousal Computing in the Wild
+</td><td>('2314025', 'Ali Mollahosseini', 'ali mollahosseini')<br/>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')</td><td></td></tr><tr><td>7553fba5c7f73098524fbb58ca534a65f08e91e7</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/>A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/>IJCSMC, Vol. 3, Issue. 6, June 2014, pg.816 – 824
+<br/> RESEARCH ARTICLE
+<br/>A Practical Approach for Determination
+<br/>of Human Gender & Age
+<br/>
+<br/><b>India</b><br/><b>India</b></td><td>('1802780', 'Harpreet Kaur', 'harpreet kaur')<br/>('1802780', 'Harpreet Kaur', 'harpreet kaur')<br/>('38968310', 'Ahsan Hussain', 'ahsan hussain')</td><td>1 hkaur_bhatia23@yahoo.com, 2 ahsanhbaba@gmail.com
+</td></tr><tr><td>751b26e7791b29e4e53ab915bfd263f96f531f56</td><td>Mood Meter: Counting Smiles in the Wild
+<br/>Mohammed (Ehsan) Hoque *
+<br/>Media Lab
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA, USA
+</td><td>('2806721', 'Will Drevo', 'will drevo')<br/>('1719389', 'Rosalind W. Picard', 'rosalind w. picard')<br/>('15977480', 'Javier Hernandez', 'javier hernandez')</td><td>{javierhr, mehoque, drevo, picard}@mit.edu
+</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings
+<br/><b>University of Massachusetts Amherst</b><br/><b>University of Massachusetts Amherst</b><br/>Amherst MA 01003
+<br/>Amherst MA 01003
+</td><td>('1714536', 'Erik Learned-Miller', 'erik learned-miller')<br/>('2246870', 'Vidit Jain', 'vidit jain')</td><td>elm@cs.umass.edu
+<br/>vidit@cs.umass.edu
+</td></tr><tr><td>75259a613285bdb339556ae30897cb7e628209fa</td><td>Unsupervised Domain Adaptation for Zero-Shot Learning
+<br/><b>Queen Mary University of London, London E1 4NS, UK</b></td><td>('2999293', 'Elyor Kodirov', 'elyor kodirov')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td>{e.kodirov, t.xiang, z.fu, s.gong}@qmul.ac.uk
+</td></tr><tr><td>754f7f3e9a44506b814bf9dc06e44fecde599878</td><td>Quantized Densely Connected U-Nets for
+<br/>Efficient Landmark Localization
+</td><td>('2986505', 'Zhiqiang Tang', 'zhiqiang tang')<br/>('4340744', 'Xi Peng', 'xi peng')<br/>('1947101', 'Shijie Geng', 'shijie geng')<br/>('3008832', 'Lingfei Wu', 'lingfei wu')<br/>('1753384', 'Shaoting Zhang', 'shaoting zhang')</td><td>1Rutgers University, {zt53, sg1309, dnm}@rutgers.edu
+<br/>2Binghamton University, xpeng@binghamton.edu
+<br/>3IBM T. J. Watson, lwu@email.wm.edu
+<br/>4SenseTime, zhangshaoting@sensetime.com
+</td></tr><tr><td>75249ebb85b74e8932496272f38af274fbcfd696</td><td>Face Identification in Large Galleries
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td><td>('1679142', 'William Robson Schwartz', 'william robson schwartz')</td><td>rafaelvareto@dcc.ufmg.br, filipe.oc87@gmail.com, william@dcc.ufmg.br
+</td></tr><tr><td>75d2ecbbcc934563dff6b39821605dc6f2d5ffcc</td><td>Capturing Subtle Facial Motions in 3D Face Tracking
+<br/><b>Beckman Institute</b><br/><b>University of Illinois at Urbana-Champaign</b><br/>Urbana, IL 61801
+</td><td>('1735018', 'Zhen Wen', 'zhen wen')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{zhenwen, huang}@ifp.uiuc.edu
+</td></tr><tr><td>81a142c751bf0b23315fb6717bc467aa4fdfbc92</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1767
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>81bfe562e42f2eab3ae117c46c2e07b3d142dade</td><td>A Hajj And Umrah Location Classification System For Video
+<br/>Crowded Scenes
+<br/>Adnan A. Gutub†
+<br/><b>Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA</b><br/><b>College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA</b></td><td>('2872536', 'Hossam M. Zawbaa', 'hossam m. zawbaa')<br/>('1977955', 'Salah A. Aly', 'salah a. aly')</td><td></td></tr><tr><td>81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f</td><td><b>UNIVERSITY OF TARTU</b><br/>FACULTY OF SCIENCE AND TECHNOLOGY 
+<br/><b>Institute of Computer Science</b><br/>Computer Science
+<br/>Comparison of Face Recognition
+<br/>Neural Networks 
+<br/>Bachelor's thesis (6 ECST)
+<br/>Supervisor: Tambet Matiisen
+<br/>Tartu 2016
+</td><td></td><td></td></tr><tr><td>8147ee02ec5ff3a585dddcd000974896cb2edc53</td><td>Angular Embedding:
+<br/>A Robust Quadratic Criterion
+<br/>Stella X. Yu, Member,
+<br/>IEEE
+</td><td></td><td></td></tr><tr><td>8199803f476c12c7f6c0124d55d156b5d91314b6</td><td>The iNaturalist Species Classification and Detection Dataset
+<br/>1Caltech
+<br/>2Google
+<br/>3Cornell Tech
+<br/>4iNaturalist
+</td><td>('2996914', 'Grant Van Horn', 'grant van horn')<br/>('13412044', 'Alex Shepard', 'alex shepard')<br/>('1690922', 'Pietro Perona', 'pietro perona')<br/>('50172592', 'Serge Belongie', 'serge belongie')</td><td></td></tr><tr><td>816bd8a7f91824097f098e4f3e0f4b69f481689d</td><td>Latent Semantic Analysis of Facial Action Codes
+<br/>for Automatic Facial Expression Recognition
+<br/>D-ITET/BIWI
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+<br/><b>IDIAP Research Institute</b><br/>Martigny, Switzerland
+<br/><b>IDIAP Research Institute</b><br/>Martigny, Switzerland
+</td><td>('8745904', 'Beat Fasel', 'beat fasel')<br/>('1824057', 'Florent Monay', 'florent monay')<br/>('1698682', 'Daniel Gatica-Perez', 'daniel gatica-perez')</td><td>bfasel@vision.ee.ethz.ch
+<br/>monay@idiap.ch
+<br/>gatica@idiap.ch
+</td></tr><tr><td>81706277ed180a92d2eeb94ac0560f7dc591ee13</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 55– No.15, October 2012
+<br/>Emotion based Contextual Semantic Relevance
+<br/>Feedback in Multimedia Information Retrieval
+<br/>Department of Computer Engineering, Indian
+<br/><b>Institute of Technology, Banaras Hindu</b><br/><b>University, Varanasi, 221005, India</b><br/>Anil K. Tripathi
+<br/>Department of Computer Engineering, Indian
+<br/><b>Institute of Technology, Banaras Hindu</b><br/><b>University, Varanasi, 221005, India</b><br/>to
+<br/>find some
+<br/>issued by a user
+</td><td>('41132883', 'Karm Veer Singh', 'karm veer singh')</td><td></td></tr><tr><td>81831ed8e5b304e9d28d2d8524d952b12b4cbf55</td><td></td><td></td><td></td></tr><tr><td>81b2a541d6c42679e946a5281b4b9dc603bc171c</td><td>Universit¨at Ulm | 89069 Ulm | Deutschland
+<br/>Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+<br/>Institut f¨ur Neuroinformatik
+<br/>Direktor: Prof. Dr. G¨unther Palm
+<br/>Semi-Supervised Learning with Committees:
+<br/>Exploiting Unlabeled Data Using Ensemble
+<br/>Learning Algorithms
+<br/>Dissertation zur Erlangung des Doktorgrades
+<br/>Doktor der Naturwissenschaften (Dr. rer. nat.)
+<br/>der Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+<br/>der Universit¨at Ulm
+<br/>vorgelegt von
+<br/>aus Kairo, ¨Agypten
+<br/>Ulm, Deutschland
+<br/>2010
+</td><td>('1799097', 'Mohamed Farouk Abdel Hady', 'mohamed farouk abdel hady')</td><td></td></tr><tr><td>81e11e33fc5785090e2d459da3ac3d3db5e43f65</td><td>International Journal of Advances in Engineering & Technology, March 2012.
+<br/>©IJAET ISSN: 2231-1963
+<br/>A NOVEL FACE RECOGNITION APPROACH USING A
+<br/>MULTIMODAL FEATURE VECTOR
+<br/><b>Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India</b><br/><b>National Institute of Technology, Durgapur, West Bengal, India</b></td><td>('9155672', 'Jhilik Bhattacharya', 'jhilik bhattacharya')<br/>('40301536', 'Nattami Sekhar', 'nattami sekhar')<br/>('1872045', 'Somajyoti Majumder', 'somajyoti majumder')<br/>('33606010', 'Gautam Sanyal', 'gautam sanyal')</td><td></td></tr><tr><td>81e366ed1834a8d01c4457eccae4d57d169cb932</td><td>Pose-Configurable Generic Tracking of Elongated Objects
+<br/>Multimedia Systems Department
+<br/><b>Gdansk University of Technology</b><br/>Departement Electronique et Physique
+<br/>Institut Mines-Telecom / Telecom SudParis
+</td><td>('2120042', 'Daniel Wesierski', 'daniel wesierski')<br/>('2603633', 'Patrick Horain', 'patrick horain')</td><td>daniel.wesierski@pg.gda.pl
+<br/>patrick.horain@telecom-sudaris.eu
+</td></tr><tr><td>8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c</td><td>Shuffle-Then-Assemble: Learning
+<br/>Object-Agnostic Visual Relationship Features
+<br/>School of Computer Science and Engineering,
+<br/><b>Nanyang Technological University</b></td><td>('47008946', 'Xu Yang', 'xu yang')<br/>('5462268', 'Hanwang Zhang', 'hanwang zhang')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')</td><td>s170018@e.ntu.edu.sg,{hanwangzhang,asjfcai}@ntu.edu.sg
+</td></tr><tr><td>81fc86e86980a32c47410f0ba7b17665048141ec</td><td>Segment-based Methods for Facial Attribute
+<br/>Detection from Partial Faces
+<br/>Department of Electrical and Computer Engineering and the Center for Automation Research,
+<br/><b>UMIACS, University of Maryland, College Park, MD</b></td><td>('3152615', 'Upal Mahbub', 'upal mahbub')</td><td>{umahbub, ssarkar2, rama}@umiacs.umd.edu
+</td></tr><tr><td>8160b3b5f07deaa104769a2abb7017e9c031f1c1</td><td>683
+<br/>Exploiting Discriminant Information in Nonnegative
+<br/>Matrix Factorization With Application
+<br/>to Frontal Face Verification
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td></td></tr><tr><td>814d091c973ff6033a83d4e44ab3b6a88cc1cb66</td><td>Behav Res (2016) 48:567–576
+<br/>DOI 10.3758/s13428-015-0601-4
+<br/>The EU-Emotion Stimulus Set: A validation study
+<br/>Published online: 30 September 2015
+<br/><b>Psychonomic Society, Inc</b></td><td>('2625704', 'Delia Pigat', 'delia pigat')<br/>('2391819', 'Shahar Tal', 'shahar tal')<br/>('2100443', 'Ofer Golan', 'ofer golan')<br/>('1884685', 'Simon Baron-Cohen', 'simon baron-cohen')<br/>('3343472', 'Daniel Lundqvist', 'daniel lundqvist')</td><td></td></tr><tr><td>816eff5e92a6326a8ab50c4c50450a6d02047b5e</td><td>fLRR: Fast Low-Rank Representation Using
+<br/>Frobenius Norm
+<br/>Low Rank Representation (LRR) intends to find the representation
+<br/>with lowest-rank of a given data set, which can be formulated as a
+<br/>rank minimization problem. Since the rank operator is non-convex and
+<br/>discontinuous, most of the recent works use the nuclear norm as a convex
+<br/>relaxation. This letter theoretically shows that under some conditions,
+<br/>Frobenius-norm-based optimization problem has an unique solution that
+<br/>is also a solution of the original LRR optimization problem. In other
+<br/>words, it is feasible to apply Frobenius-norm as a surrogate of the
+<br/>nonconvex matrix rank function. This replacement will largely reduce the
+<br/>time-costs for obtaining the lowest-rank solution. Experimental results
+<br/>show that our method (i.e., fast Low Rank Representation, fLRR),
+<br/>performs well in terms of accuracy and computation speed in image
+<br/>clustering and motion segmentation compared with nuclear-norm-based
+<br/>LRR algorithm.
+<br/>Introduction: Given a data set X ∈ Rm×n(m < n) composed of column
+<br/>vectors, let A be a data set composed of vectors with the same dimension
+<br/>as those in X. Both X and A can be considered as matrices. A linear
+<br/>representation of X with respect to A is a matrix Z that satisfies the
+<br/>equation X = AZ. The data set A is called a dictionary. In general, this
+<br/>linear matrix equation will have infinite solutions, and any solution can be
+<br/>considered to be a representation of X associated with the dictionary A. To
+<br/>obtain an unique Z and explore the latent structure of the given data set,
+<br/>various assumptions could be enforced over Z.
+<br/>Liu et al. recently proposed Low Rank Representation (LRR) [1] by
+<br/>assuming that data are approximately sampled from an union of low-rank
+<br/>subspaces. Mathematically, LRR aims at solving
+<br/>min rank(Z)
+<br/>s.t. X = AZ,
+<br/>(1)
+<br/>where rank(Z) could be defined as the number of nonzero eigenvalues of
+<br/>the matrix Z. Clearly, (1) is non-convex and discontinuous, whose convex
+<br/>relaxation is as follows,
+<br/>min kZk∗
+<br/>s.t. X = AZ,
+<br/>(2)
+<br/>where kZk∗ is the nuclear norm, which is a convex and continuous
+<br/>optimization problem.
+<br/>Considering the possible corruptions, the objective function of LRR is
+<br/>min kZk∗ + λkEkp
+<br/>s.t. X = AZ + E,
+<br/>(3)
+<br/>where k · kp could be ℓ1-norm for describing sparse corruption or ℓ2,1-
+<br/>norm for characterizing sample-specified corruption.
+<br/>The above nuclear-norm-based optimization problems are generally
+<br/>solved using Augmented Lagrange Multiplier algorithm (ALM) [2] which
+<br/>requires repeatedly performing Single Value Decomposition (SVD) over
+<br/>Z. Hence, this optimization program is inefficient.
+<br/>Beyond the nuclear-norm, do other norms exist that can be used as
+<br/>a surrogates for rank-minimization problem in LRR? Can we develop
+<br/>a fast algorithm to calculate LRR? This letter addresses these problems
+<br/>by theoretically showing the equivalence between the solutions of a
+<br/>Frobenius-norm-based problem and the original LRR problem. And we
+<br/>further develop fast Low Rank Representation (fLRR) based on the
+<br/>theoretical results.
+<br/>Theoretical Analysis: In the following analyses, Theorem 1 and
+<br/>Theorem 3 prove that Frobenius-norm-based problem is a surrogate of
+<br/>the rank-minimization problem of LRR in the case of clean data and
+<br/>corrupted ones, respectively. Theorem 2 shows that our Frobenius-norm-
+<br/>based method could produce a block-diagonal Z under some conditions.
+<br/>This property is helpful to subspace clustering.
+<br/>Let A ∈ Rm×n be a matrix with rank r. The full SVD and skinny
+<br/>SVD of A are A = U ΣV T and A = UrΣrV T
+<br/>r , where U and V are two
+<br/>orthogonal matrices with the size of m × m and n × n, respectively. In
+<br/>addition, Σ is an m × n rectangular diagonal matrix, its diagonal elements
+<br/>are nonnegative real numbers. Σr is a r × r diagonal matrix with singular
+<br/>values located on the diagonal in decreasing order, Ur and Vr consist of the
+<br/>first r columns of U and V , respectively. Clearly, Ur and Vr are column
+<br/>orthogonal matrices, i.e., U T
+<br/>r Vr = Ir, where Ir denotes the
+<br/>r Ur = Ir, V T
+<br/>identity matrix with the size of r × r. The pseudoinverse of A is defined
+<br/>by A† = VrΣ−1
+<br/>r U T
+<br/>r .
+<br/>Given a matrix M ∈ Rm×n, the Frobenius norm of M is defined by
+<br/>kM kF =ptrace (M T M ) =qPmin{m,n}
+<br/>value of M . Clearly, kM kF = 0 if and only if M = 0.
+<br/>i=1
+<br/>σ2
+<br/>i , where σi is a singular
+<br/>Lemma 1: Suppose P is a column orthogonal matrix, i.e., P T P = I. Then,
+<br/>kP M kF = kM kF .
+<br/>Lemma 2: For the matrices M and N with same number of columns, it
+<br/>holds that
+<br/>= kM k2
+<br/>F + kN k2
+<br/>F .
+<br/>(4)
+<br/>N (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(cid:20) M
+<br/>The proofs of the above two lemmas are trivial.
+<br/>Theorem 1:
+<br/>minimization problem
+<br/>Suppose
+<br/>that X ∈ span{A},
+<br/>the Frobenius norm
+<br/>min kZkF
+<br/>s.t. X = AZ,
+<br/>(5)
+<br/>has an unique solution Z ∗ = A†X which is also the lowest-rank solution
+<br/>of LRR in terms of (1).
+<br/>Proof: Let the full and skinny SVDs of A be A = U ΣV T and A =
+<br/>r U T
+<br/>UrΣrV T
+<br/>r .
+<br/>r , respectively. Then, the pseudoinverse of A is A† = VrΣ−1
+<br/>Defining Vc by V T =(cid:20) V T
+<br/>V T
+<br/>(cid:21) and V T
+<br/>c Vr = 0. Moreover, it can be easily
+<br/>checked that Z ∗ satisfies X = AZ ∗ owing to X ∈ span{A}.
+<br/>To prove that Z ∗ is the unique solution of the optimization problem
+<br/>(5), two steps are required. First, we will prove that, for any solution Z of
+<br/>X = AZ, it must hold that kZkF ≥ kZ ∗kF . Using Lemma 1, we have
+<br/>kZkF = (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>= (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>V T
+<br/>(cid:20) V T
+<br/>(cid:20) V T
+<br/>(cid:21) [Z ∗ + (Z − Z ∗)](cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>c (Z − Z ∗) (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>r (Z − Z ∗)
+<br/>r Z ∗ + V T
+<br/>c Z ∗ + V T
+<br/>V T
+<br/>As A (Z − Z ∗) = 0,
+<br/>r (Z − Z ∗) = 0. Denote B = Σ−1
+<br/>V T
+<br/>V T
+<br/>c Vr = 0, we have V T
+<br/>i.e., UrΣrV T
+<br/>r U T
+<br/>c VrB = 0. Then,
+<br/>r (Z − Z ∗) = 0,
+<br/>r X,
+<br/>follows that
+<br/>then Z ∗ = VrB. Because
+<br/>it
+<br/>c Z ∗ = V T
+<br/>(cid:20)
+<br/>kZkF =(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>V T
+<br/>c (Z − Z ∗) (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>By Lemma 2,
+<br/>kZk2
+<br/>F = kBk2
+<br/>F + kV T
+<br/>c (Z − Z ∗)k2
+<br/>F ,
+<br/>then, kZkF ≥ kBkF .
+<br/>By Lemma 1,
+<br/>kBkF = kVrBkF = kZ ∗kF ,
+<br/>(6)
+<br/>(7)
+<br/>(8)
+<br/>thus, kZkF ≥ kZ ∗kF for any solution Z of X = AZ.
+<br/>In the second step, we will prove that if there exists another solution Z
+<br/>of (5), Z = Z ∗ must hold. Clearly, Z is a solution of (5) which implies that
+<br/>X = AZ and kZkF = kZ ∗kF . From (7) and (8),
+<br/>kZk2
+<br/>F + kV T
+<br/>F = kZ ∗k2
+<br/>Since kZkF = kZ ∗kF ,
+<br/>c (Z − Z ∗) k2
+<br/>F .
+<br/>c (Z − Z ∗) kF = 0,
+<br/>r (Z − Z ∗) = 0, this gives
+<br/>and so V T
+<br/>V T (Z − Z ∗) = 0. Because V is an orthogonal matrix, it must hold
+<br/>that Z = Z ∗. The above proves that Z ∗ is the unique solution of the
+<br/>optimization problem (5).
+<br/>c (Z − Z ∗) = 0. Together with V T
+<br/>it must hold that kV T
+<br/>(9)
+<br/>Next, we prove that Z ∗ is also a solution of the LRR optimization
+<br/>problem (1). Clearly, for any solution Z of X = AZ,
+<br/>it holds that
+<br/>rank(Z) ≥ rank(AZ) = rank(X). On the other hand, rank(Z ∗) =
+<br/>rank(A†X) ≤ rank(X). Thus, rank(Z ∗) = rank(X). This shows that
+<br/>Z ∗ is the lowest-rank solution of the LRR optimization problem (1). The
+<br/>proof is complete.
+<br/>(cid:4)
+<br/>In the following, Theorem 2 will show that the optimal Z of (5) will
+<br/>be block-diagonal if the data are sampled from a set of independent
+<br/>subspaces {S1, S2, · · · , Sk}, where the dimensionality of Si is ri and
+<br/>i = {1, 2, · · · , k}. Note that, {S1, S2, · · · , Sk} are independent if and
+<br/>only if SiTPj6=i Sj = {0}. Suppose that X = [X1, X2, · · · , Xk] and
+<br/>A = [A1, A2, · · · , Ak], where Ai and Xi contain mi and ni data points
+<br/>ELECTRONICS LETTERS 12th December 2011 Vol. 00 No. 00
+</td><td>('2235162', 'Haixian Zhang', 'haixian zhang')<br/>('4340744', 'Xi Peng', 'xi peng')</td><td></td></tr><tr><td>8149c30a86e1a7db4b11965fe209fe0b75446a8c</td><td>Semi-Supervised Multiple Instance Learning based
+<br/>Domain Adaptation for Object Detection
+<br/>Siemens Corporate Research
+<br/>Siemens Corporate Research
+<br/>Siemens Corporate Research
+<br/>Amit Kale
+<br/>Bangalore
+<br/>Bangalore
+<br/>{chhaya.methani,
+<br/>Bangalore
+<br/>rahul.thota,
+</td><td>('2970569', 'Chhaya Methani', 'chhaya methani')<br/>('31516659', 'Rahul Thota', 'rahul thota')</td><td>kale.amit}@siemens.com
+</td></tr><tr><td>81da427270c100241c07143885ba3051ec4a2ecb</td><td>Learning the Synthesizability of Dynamic Texture Samples∗
+<br/><b>State Key Lab. LIESMARS, Wuhan University, China</b><br/>2Computer Vision Lab., ETH Zurich, Switzerland
+<br/>February 6, 2018
+</td><td>('1706687', 'Feng Yang', 'feng yang')<br/>('39943835', 'Gui-Song Xia', 'gui-song xia')<br/>('1778526', 'Dengxin Dai', 'dengxin dai')<br/>('1733213', 'Liangpei Zhang', 'liangpei zhang')</td><td>{guisong.xia, fengyang, zlp62}@whu.edu.cn
+<br/>dai@vision.ee.ethz.ch
+</td></tr><tr><td>861c650f403834163a2c27467a50713ceca37a3e</td><td>Probabilistic Elastic Part Model for Unsupervised Face Detector Adaptation
+<br/><b>Stevens Institute of Technology</b><br/>Hoboken, NJ 07030
+<br/>Adobe Systems Inc.
+<br/>San Jose, CA 95110
+</td><td>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('1706007', 'Jianchao Yang', 'jianchao yang')</td><td>{hli18, ghua}@stevens.edu
+<br/>{zlin, jbrandt, jiayang}@adobe.com
+</td></tr><tr><td>86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663</td><td>Benchmarks for Cloud Robotics
+<br/>Arjun Singh
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2016-142
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-142.html
+<br/>August 12, 2016
+</td><td></td><td></td></tr><tr><td>86b69b3718b9350c9d2008880ce88cd035828432</td><td>Improving Face Image Extraction by Using Deep Learning Technique
+<br/>National Library of Medicine, NIH, Bethesda, MD
+</td><td>('1726787', 'Zhiyun Xue', 'zhiyun xue')<br/>('1721328', 'Sameer Antani', 'sameer antani')<br/>('1691151', 'L. Rodney Long', 'l. rodney long')<br/>('1705831', 'Dina Demner-Fushman', 'dina demner-fushman')<br/>('1692057', 'George R. Thoma', 'george r. thoma')</td><td></td></tr><tr><td>86904aee566716d9bef508aa9f0255dc18be3960</td><td>Learning Anonymized Representations with
+<br/>Adversarial Neural Networks
+</td><td>('1743922', 'Pablo Piantanida', 'pablo piantanida')<br/>('1751762', 'Yoshua Bengio', 'yoshua bengio')<br/>('1694313', 'Pierre Duhamel', 'pierre duhamel')</td><td></td></tr><tr><td>86f191616423efab8c0d352d986126a964983219</td><td>Visual to Sound: Generating Natural Sound for Videos in the Wild
+<br/><b>University of North Carolina at Chapel Hill, 2Adobe Research</b></td><td>('49455017', 'Yipin Zhou', 'yipin zhou')<br/>('8056043', 'Zhaowen Wang', 'zhaowen wang')<br/>('2442612', 'Chen Fang', 'chen fang')<br/>('30190128', 'Trung Bui', 'trung bui')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')</td><td></td></tr><tr><td>867e709a298024a3c9777145e037e239385c0129</td><td> INTERNATIONAL JOURNAL
+<br/> OF PROFESSIONAL ENGINEERING STUDIES Volume VIII /Issue 2 / FEB 2017
+<br/>ANALYTICAL REPRESENTATION OF UNDERSAMPLED FACE
+<br/>RECOGNITION APPROACH BASED ON DICTIONARY LEARNING
+<br/>AND SPARSE REPRESENTATION
+<br/>(M.Tech)1, Assistant Professor2, Assistant Professor3, HOD of CSE Department4
+</td><td>('32628937', 'Murala Sandeep', 'murala sandeep')<br/>('1702980', 'Ranga Reddy', 'ranga reddy')</td><td></td></tr><tr><td>869a2fbe42d3fdf40ed8b768edbf54137be7ac71</td><td>Relative Attributes for Enhanced Human-Machine Communication
+<br/><b>Toyota Technological Institute, Chicago</b><br/><b>Indraprastha Institute of Information Technology, Delhi</b><br/><b>University of Texas, Austin</b></td><td>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('2076800', 'Amar Parkash', 'amar parkash')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>86c5478f21c4a9f9de71b5ffa90f2a483ba5c497</td><td>Kernel Selection using Multiple Kernel Learning and Domain
+<br/>Adaptation in Reproducing Kernel Hilbert Space, for Face
+<br/>Recognition under Surveillance Scenario
+<br/><b>Indian Institute of Technology, Madras, Chennai 600036, INDIA</b><br/>Face Recognition (FR) has been the interest to several researchers over the past few decades due to its passive nature of biometric
+<br/>authentication. Despite high accuracy achieved by face recognition algorithms under controlled conditions, achieving the same
+<br/>performance for face images obtained in surveillance scenarios, is a major hurdle. Some attempts have been made to super-resolve
+<br/>the low-resolution face images and improve the contrast, without considerable degree of success. The proposed technique in this
+<br/>paper tries to cope with the very low resolution and low contrast face images obtained from surveillance cameras, for FR under
+<br/>surveillance conditions. For Support Vector Machine classification, the selection of appropriate kernel has been a widely discussed
+<br/>issue in the research community. In this paper, we propose a novel kernel selection technique termed as MFKL (Multi-Feature
+<br/>Kernel Learning) to obtain the best feature-kernel pairing. Our proposed technique employs a effective kernel selection by Multiple
+<br/>Kernel Learning (MKL) method, to choose the optimal kernel to be used along with unsupervised domain adaptation method in the
+<br/>Reproducing Kernel Hilbert Space (RKHS), for a solution to the problem. Rigorous experimentation has been performed on three
+<br/>real-world surveillance face datasets : FR SURV [33], SCface [20] and ChokePoint [44]. Results have been shown using Rank-1
+<br/>Recognition Accuracy, ROC and CMC measures. Our proposed method outperforms all other recent state-of-the-art techniques by
+<br/>a considerable margin.
+<br/>Index Terms—Kernel Selection, Surveillance, Multiple Kernel Learning, Domain Adaptation, RKHS, Hallucination
+<br/>I. INTRODUCTION
+<br/>Face Recognition (FR) is a classical problem which is far
+<br/>from being solved. Face Recognition has a clear advantage
+<br/>of being natural and passive over other biometric techniques
+<br/>requiring co-operative subjects. Most face recognition algo-
+<br/>rithms perform well under a controlled environment. A face
+<br/>recognition system trained at a certain resolution, illumination
+<br/>and pose, recognizes faces under similar conditions with very
+<br/>high accuracy. In contrary, if the face of the same subject is
+<br/>presented with considerable change in environmental condi-
+<br/>tions, then such a face recognition system fails to achieve a
+<br/>desired level of accuracy. So, we aim to find a solution to the
+<br/>face recognition under unconstrained environment.
+<br/>Face images obtained by an outdoor panoramic surveillance
+<br/>camera, are often confronted with severe degradations (e.g.,
+<br/>low-resolution, blur, low-contrast, interlacing and noise). This
+<br/>significantly limits the performance of face recognition sys-
+<br/>tems used for binding “security with surveillance” applica-
+<br/>tions. Here, images used for training are usually available be-
+<br/>forehand which are taken under a well controlled environment
+<br/>in an indoor setup (laboratory, control room), whereas the
+<br/>images used for testing are captured when a subject comes
+<br/>under a surveillance scene. With ever increasing demands
+<br/>to combine “security with surveillance” in an integrated and
+<br/>automated framework, it is necessary to analyze samples of
+<br/>face images of subjects acquired by a surveillance camera
+<br/>from a long distance. Hence the subject must be accurately
+<br/>recognized from a low resolution, blurred and degraded (low
+<br/>contrast, aliasing, noise) face image, as obtained from the
+<br/>surveillance camera. These face images are difficult to match
+<br/>because they are often captured under non-ideal conditions.
+<br/>Thus, face recognition in surveillance scenario is an impor-
+<br/>tant and emerging research area which motivates the work
+<br/>presented in this paper.
+<br/>Performance of most classifiers degrade when both the
+<br/>resolution and contrast of face templates used for recognition
+<br/>are low. There have been many advancement in this area
+<br/>during the past decade, where attempts have been made to
+<br/>deal with this problem under an unconstrained environment.
+<br/>For surveillance applications, a face recognition system must
+<br/>recognize a face in an unconstrained environment without the
+<br/>notice of the subject. Degradation of faces is quite evident in
+<br/>the surveillance scenario due to low-resolution and camera-
+<br/>blur. Variations in the illuminating conditions of the faces
+<br/>not only reduces the recognition accuracy but occasionally
+<br/>degrades the performance of face detection which is the first
+<br/>step of face recognition. The work presented in this paper deals
+<br/>with such issues involved in FR under surveillance conditions.
+<br/>In the work presented in this paper, the face samples from
+<br/>both gallery and probe are initially passed through a robust
+<br/>face detector, the Chehra face tracker, to find a tightly cropped
+<br/>face image. A domain adaptation (DA) based algorithm,
+<br/>formulated using eigen-domain transformation is designed to
+<br/>bridge the gap between the features obtained from the gallery
+<br/>and the probe samples. A novel Multiple kernel Learning
+<br/>(MKL) based learning method, termed MFKL (Multi-Feature
+<br/>Kernel Learning), is then used to obtain an optimal combi-
+<br/>nation (pairing) of the feature and the kernel for FR. The
+<br/>novelty of the work presented in this paper is the optimal
+<br/>pairing of feature and kernel to provide best performance with
+<br/>DA based learning for FR. Results of performance analysis on
+</td><td>('2643208', 'Samik Banerjee', 'samik banerjee')<br/>('1680398', 'Sukhendu Das', 'sukhendu das')</td><td></td></tr><tr><td>86c053c162c08bc3fe093cc10398b9e64367a100</td><td>Cascade of Forests for Face Alignment
+</td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('2876552', 'Changqing Zou', 'changqing zou')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td></td></tr><tr><td>86b985b285c0982046650e8d9cf09565a939e4f9</td><td></td><td></td><td></td></tr><tr><td>861802ac19653a7831b314cd751fd8e89494ab12</td><td>Time-of-Flight and Depth Imaging. Sensors, Algorithms
+<br/>and Applications: Dagstuhl Seminar 2012 and GCPR
+<br/>Workshop on Imaging New Modalities (Lecture ... Vision,
+<br/>Pattern Recognition, and Graphics)
+<br/>Publisher: Springer; 2013 edition
+<br/>(November 8, 2013)
+<br/>Language: English
+<br/>Pages: 320
+<br/>ISBN: 978-3642449635
+<br/>Size: 20.46 MB
+<br/>Format: PDF / ePub / Kindle
+<br/>Cameras for 3D depth imaging, using
+<br/>either time-of-flight (ToF) or
+<br/>structured light sensors, have received
+<br/>a lot of attention recently and have
+<br/>been improved considerably over the
+<br/>last few years. The present
+<br/>techniques...
+</td><td>('1727057', 'Marcin Grzegorzek', 'marcin grzegorzek')<br/>('1680185', 'Christian Theobalt', 'christian theobalt')<br/>('39897382', 'Reinhard Koch', 'reinhard koch')<br/>('1758212', 'Andreas Kolb', 'andreas kolb')</td><td></td></tr><tr><td>86ed5b9121c02bcf26900913f2b5ea58ba23508f</td><td>Actions ⇠ Transformations
+<br/><b>Carnegie Mellon University</b><br/><b>University of Washington</b><br/><b>The Allen Institute for AI</b></td><td>('39849136', 'Xiaolong Wang', 'xiaolong wang')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td></td></tr><tr><td>861b12f405c464b3ffa2af7408bff0698c6c9bf0</td><td>International Journal on Recent and Innovation Trends in Computing and Communication ISSN: 2321-8169
+<br/>Volume: 3 Issue: 5
+<br/> 3337 - 3342
+<br/>_______________________________________________________________________________________________
+<br/>An Effective Technique for Removal of Facial Dupilcation by SBFA
+<br/>Computer Department,
+<br/>GHRCEM,
+<br/>Pune, India
+<br/>Computer Department,
+<br/>GHRCEM,
+<br/> Pune, India
+</td><td>('2947776', 'Ayesha Butalia', 'ayesha butalia')</td><td>deepikapatil941@gmail.com
+<br/>ayeshabutalia@gmail.com
+</td></tr><tr><td>86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd</td><td>YUE et al.: ATTENTIONAL ALIGNMENT NETWORK
+<br/>Attentional Alignment Network
+<br/><b>Beihang University, Beijing, China</b><br/>2 The Key Laboratory of Advanced
+<br/>Technologies for Near Space
+<br/>Information Systems
+<br/>Ministry of
+<br/>Technology of China
+<br/><b>University of Texas at Arlington</b><br/>TX, USA
+<br/><b>Shanghai Jiao Tong University</b><br/>Shanghai, China
+<br/>Industry and Information
+</td><td>('35310815', 'Lei Yue', 'lei yue')<br/>('6050999', 'Xin Miao', 'xin miao')<br/>('3127895', 'Pengbo Wang', 'pengbo wang')<br/>('1740430', 'Baochang Zhang', 'baochang zhang')<br/>('34798935', 'Xiantong Zhen', 'xiantong zhen')<br/>('40916581', 'Xianbin Cao', 'xianbin cao')</td><td>yuelei@buaa.edu.cn
+<br/>xin.miao@mavs.uta.edu
+<br/>wangpengbo_vincent@sjtu.edu.cn
+<br/>bczhang@buaa.edu.cn
+<br/>zhenxt@buaa.edu.cn
+<br/>xbcao@buaa.edu.cn
+</td></tr><tr><td>862d17895fe822f7111e737cbcdd042ba04377e8</td><td>Semi-Latent GAN: Learning to generate and modify facial images from
+<br/>attributes
+<br/><b>The school of Data Science, Fudan University</b><br/>† Disney Research,
+</td><td>('11740128', 'Weidong Yin', 'weidong yin')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>yanweifu@fudan.edu.cn
+</td></tr><tr><td>86d0127e1fd04c3d8ea78401c838af621647dc95</td><td>Facial Attribute Prediction
+<br/><b>College of Information and Engineering, Hunan University, Changsha, China</b><br/><b>School of Computer Science, National University of Defense Technology, Changsha, China</b><br/><b>University of Texas at San Antonio, USA</b></td><td>('48664471', 'Mingxing Duan', 'mingxing duan')<br/>('50842217', 'Qi Tian', 'qi tian')</td><td>duanmingxing16@nudt.edu.cn, lkl@hnu.edu.cn, qi.tian@utsa.edu
+</td></tr><tr><td>86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6</td><td>The Kinetics Human Action Video Dataset
+<br/>Jo˜ao Carreira
+<br/>Paul Natsev
+</td><td>('21028601', 'Will Kay', 'will kay')<br/>('34838386', 'Karen Simonyan', 'karen simonyan')<br/>('11809518', 'Brian Zhang', 'brian zhang')<br/>('38961760', 'Chloe Hillier', 'chloe hillier')<br/>('2259154', 'Sudheendra Vijayanarasimhan', 'sudheendra vijayanarasimhan')<br/>('39045746', 'Fabio Viola', 'fabio viola')<br/>('1691808', 'Tim Green', 'tim green')<br/>('2830305', 'Trevor Back', 'trevor back')<br/>('2573615', 'Mustafa Suleyman', 'mustafa suleyman')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>wkay@google.com
+<br/>joaoluis@google.com
+<br/>simonyan@google.com
+<br/>brianzhang@google.com
+<br/>chillier@google.com
+<br/>svnaras@google.com
+<br/>fviola@google.com
+<br/>tfgg@google.com
+<br/>back@google.com
+<br/>natsev@google.com
+<br/>mustafasul@google.com
+<br/>zisserman@google.com
+</td></tr><tr><td>86b6de59f17187f6c238853810e01596d37f63cd</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 7, No. 3, 2016
+<br/>Competitive Representation Based Classification
+<br/>Using Facial Noise Detection
+<br/>Chongqing Key Laboratory of Computational Intelligence
+<br/><b>College of Computer Science and Technology, Chongqing</b><br/>Chongqing Key Laboratory of Computational Intelligence
+<br/><b>College of Computer Science and Technology, Chongqing</b><br/><b>University of Posts and Telecommunications</b><br/><b>University of Posts and Telecommunications</b><br/>Chongqing, China
+<br/>Chongqing, China
+<br/>Chongqing Key Laboratory of Computational Intelligence
+<br/><b>College of Computer Science and Technology, Chongqing</b><br/>Chongqing Key Laboratory of Computational Intelligence
+<br/><b>College of Computer Science and Technology, Chongqing</b><br/><b>University of Posts and Telecommunications</b><br/><b>University of Posts and Telecommunications</b><br/>Chongqing, China
+<br/>Chongqing, China
+</td><td>('1779859', 'Tao Liu', 'tao liu')<br/>('32611393', 'Ying Liu', 'ying liu')<br/>('38837555', 'Cong Li', 'cong li')<br/>('40032263', 'Chao Li', 'chao li')</td><td></td></tr><tr><td>86b105c3619a433b6f9632adcf9b253ff98aee87</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
+<br/>1013
+<br/>ICME 2006
+</td><td></td><td></td></tr><tr><td>86f3552b822f6af56cb5079cc31616b4035ccc4e</td><td>Towards Miss Universe Automatic Prediction: The Evening Gown Competition
+<br/><b>University of Queensland, Brisbane, Australia</b><br/>(cid:5) Data61, CSIRO, Australia
+</td><td>('1850202', 'Johanna Carvajal', 'johanna carvajal')<br/>('2331880', 'Arnold Wiliem', 'arnold wiliem')<br/>('1781182', 'Conrad Sanderson', 'conrad sanderson')</td><td></td></tr><tr><td>86a8b3d0f753cb49ac3250fa14d277983e30a4b7</td><td>Exploiting Unlabeled Ages for Aging Pattern Analysis on A Large Database
+<br/><b>West Virginia University, Morgantown, WV</b></td><td>('1720735', 'Chao Zhang', 'chao zhang')<br/>('1822413', 'Guodong Guo', 'guodong guo')</td><td>cazhang@mix.wvu.edu, guodong.guo@mail.wvu.edu
+</td></tr><tr><td>860588fafcc80c823e66429fadd7e816721da42a</td><td>Unsupervised Discovery of Object Landmarks as Structural Representations
+<br/><b>University of Michigan, Ann Arbor</b><br/>2Google Brain
+</td><td>('1692992', 'Yuting Zhang', 'yuting zhang')<br/>('1857914', 'Yijie Guo', 'yijie guo')<br/>('50442731', 'Yixin Jin', 'yixin jin')<br/>('49513553', 'Yijun Luo', 'yijun luo')<br/>('46915665', 'Zhiyuan He', 'zhiyuan he')<br/>('1697141', 'Honglak Lee', 'honglak lee')</td><td>{yutingzh, guoyijie, jinyixin, lyjtour, zhiyuan, honglak}@umich.edu
+<br/>honglak@google.com
+</td></tr><tr><td>86b51bd0c80eecd6acce9fc538f284b2ded5bcdd</td><td></td><td></td><td></td></tr><tr><td>8699268ee81a7472a0807c1d3b1db0d0ab05f40d</td><td></td><td></td><td></td></tr><tr><td>86374bb8d309ad4dbde65c21c6fda6586ae4147a</td><td>Detect-and-Track: Efficient Pose Estimation in Videos
+<br/><b>The Robotics Institute, Carnegie Mellon University</b><br/><b>Dartmouth College</b><br/>2Facebook
+<br/>https://rohitgirdhar.github.io/DetectAndTrack
+</td><td>('3102850', 'Rohit Girdhar', 'rohit girdhar')<br/>('2082991', 'Georgia Gkioxari', 'georgia gkioxari')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')</td><td></td></tr><tr><td>869583b700ecf33a9987447aee9444abfe23f343</td><td></td><td></td><td></td></tr><tr><td>72282287f25c5419dc6fd9e89ec9d86d660dc0b5</td><td>A Rotation Invariant Latent Factor Model for
+<br/>Moveme Discovery from Static Poses
+<br/><b>California Institute of Technology, Pasadena, CA, USA</b></td><td>('3339867', 'Matteo Ruggero Ronchi', 'matteo ruggero ronchi')<br/>('14834454', 'Joon Sik Kim', 'joon sik kim')<br/>('1740159', 'Yisong Yue', 'yisong yue')</td><td>{mronchi, jkim5, yyue}@caltech.edu
+</td></tr><tr><td>72a87f509817b3369f2accd7024b2e4b30a1f588</td><td>Fault diagnosis of a railway device using semi-supervised
+<br/>independent factor analysis with mixing constraints
+<br/>To cite this version:
+<br/>using semi-supervised independent factor analysis with mixing constraints. Pattern Analysis and
+<br/>Applications, Springer Verlag, 2012, 15 (3), pp.313-326. <hal-00750589>
+<br/>HAL Id: hal-00750589
+<br/>https://hal.archives-ouvertes.fr/hal-00750589
+<br/>Submitted on 11 Nov 2012
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('3202810', 'Etienne Côme', 'etienne côme')<br/>('1707103', 'Latifa Oukhellou', 'latifa oukhellou')<br/>('1710347', 'Thierry Denoeux', 'thierry denoeux')<br/>('2688359', 'Patrice Aknin', 'patrice aknin')<br/>('3202810', 'Etienne Côme', 'etienne côme')<br/>('1707103', 'Latifa Oukhellou', 'latifa oukhellou')<br/>('1710347', 'Thierry Denoeux', 'thierry denoeux')<br/>('2688359', 'Patrice Aknin', 'patrice aknin')</td><td></td></tr><tr><td>72a00953f3f60a792de019a948174bf680cd6c9f</td><td>Stat Comput (2007) 17:57–70
+<br/>DOI 10.1007/s11222-006-9004-9
+<br/>Understanding the role of facial asymmetry in human face
+<br/>identification
+<br/>Received: May 2005 / Accepted: September 2006 / Published online: 30 January 2007
+<br/>C(cid:1) Springer Science + Business Media, LLC 2007
+</td><td>('2046854', 'Sinjini Mitra', 'sinjini mitra')</td><td></td></tr><tr><td>726b8aba2095eef076922351e9d3a724bb71cb51</td><td></td><td></td><td></td></tr><tr><td>721b109970bf5f1862767a1bec3f9a79e815f79a</td><td></td><td></td><td></td></tr><tr><td>727ecf8c839c9b5f7b6c7afffe219e8b270e7e15</td><td>LEVERAGING GEO-REFERENCED DIGITAL PHOTOGRAPHS
+<br/>A DISSERTATION
+<br/>SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+<br/>AND THE COMMITTEE ON GRADUATE STUDIES
+<br/><b>OF STANFORD UNIVERSITY</b><br/>IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+<br/>FOR THE DEGREE OF
+<br/>DOCTOR OF PHILOSOPHY
+<br/>July 2005
+</td><td>('1687465', 'Mor Naaman', 'mor naaman')</td><td></td></tr><tr><td>72a5e181ee8f71b0b153369963ff9bfec1c6b5b0</td><td>Expression recognition in videos using a weighted
+<br/>component-based feature descriptor
+<br/>1. Machine Vision Group, Department of Electrical and Information Engineering,
+<br/><b>University of Oulu, Finland</b><br/><b>Research Center for Learning Science, Southeast University, China</b><br/>http://www.ee.oulu.fi/mvg
+</td><td>('18780812', 'Xiaohua Huang', 'xiaohua huang')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('40608983', 'Wenming Zheng', 'wenming zheng')</td><td>{huang.xiaohua,gyzhao,mkp}@ee.oulu.fi
+<br/>wenming_zheng@seu.edu.cn
+</td></tr><tr><td>72ecaff8b57023f9fbf8b5b2588f3c7019010ca7</td><td>Facial Keypoints Detection
+</td><td>('27744156', 'Shenghao Shi', 'shenghao shi')</td><td></td></tr><tr><td>72591a75469321074b072daff80477d8911c3af3</td><td>Group Component Analysis for Multi-block Data:
+<br/>Common and Individual Feature Extraction
+</td><td>('1764724', 'Guoxu Zhou', 'guoxu zhou')<br/>('1747156', 'Andrzej Cichocki', 'andrzej cichocki')<br/>('38741479', 'Yu Zhang', 'yu zhang')</td><td></td></tr><tr><td>7224d58a7e1f02b84994b60dc3b84d9fe6941ff5</td><td>When Face Recognition Meets with Deep Learning: an Evaluation of
+<br/>Convolutional Neural Networks for Face Recognition
+<br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</b><br/><b>Electronic Engineering and Computer Science, Queen Mary University of London, UK</b><br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition, Chinese Academy of Sciences, China♠
+</td><td>('38819702', 'Guosheng Hu', 'guosheng hu')<br/>('2653152', 'Yongxin Yang', 'yongxin yang')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{g.hu,j.kittler,w.christmas}@surrey.ac.uk,{yongxin.yang,t.hospedales}@qmul.ac.uk, {szli,dyi}@cbsr.ia.ac.cn
+</td></tr><tr><td>729dbe38538fbf2664bc79847601f00593474b05</td><td></td><td></td><td></td></tr><tr><td>729a9d35bc291cc7117b924219bef89a864ce62c</td><td>Recognizing Material Properties from Images
+</td><td>('40116153', 'Gabriel Schwartz', 'gabriel schwartz')<br/>('1708819', 'Ko Nishino', 'ko nishino')</td><td></td></tr><tr><td>72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114</td><td>Face Alignment using Cascade Gaussian Process Regression Trees
+<br/><b>Korea Advanced institute of Science and Technology</b><br/>Face alignment is a task to locate fiducial facial landmark points, such as eye
+<br/>corners, nose tip, mouth corners, and chin, in a face image. Shape regression
+<br/>has become an accurate, robust, and fast framework for face alignment [2,
+<br/>In shape regression, face shape s = (x1,y1,··· ,xp,yp)(cid:62), that is a
+<br/>4, 5].
+<br/>concatenation of p facial landmark coordinates {(xi,yi)}p
+<br/>i=1, is initialized
+<br/>and iteratively updated through a cascade regression trees (CRT) as shown
+<br/>in Figure 1. Each tree estimates the shape increment from the current shape
+<br/>estimate, and the final shape estimate is given by a cumulated sum of the
+<br/>outputs of the trees to the initial estimate as follows:
+<br/>ˆsT = ˆs0 +
+<br/>t=1
+<br/>f t (xt;θ t ),
+<br/>(1)
+<br/>where T is the number of stages, t is an index that denotes the stage, ˆst is a
+<br/>shape estimate, xt is a feature vector that is extracted from an input image
+<br/>I, and f t (·;·) is a tree that is parameterized by θ t. Starting from the rough
+<br/>initial shape estimate ˆs0, each stage iteratively updates the shape estimate
+<br/>by ˆst = ˆst−1 + f t (xt;θ t ).
+<br/>The two key elements of CRT-based shape regression that impact to the
+<br/>prediction performance are gradient boosting [3] for learning the CRT and
+<br/>the shape-indexed features [2] which the trees are based. In gradient boost-
+<br/>ing, each stage iteratively fits training data in a greedy stage-wise manner by
+<br/>reducing the regression residuals that are defined as the differences between
+<br/>the ground truth shapes and shape estimates. The shape-indexed features
+<br/>are extracted from the pixel coordinates referenced by the shape estimate.
+<br/>The shape-indexed features are extremely cheap to compute and are robust
+<br/>against geometric variations.
+<br/>Instead of using gradient boosting, we propose cascade Gaussian pro-
+<br/>cess regression trees (cGPRT) that can be incorporated as a learning method
+<br/>for a CRT prediction framework. The cGPRT is constructed by combining
+<br/>Gaussian process regression trees (GPRT) in a cascade stage-wise manner.
+<br/>Given training samples S = (s1,··· ,sN )(cid:62) and Xt = (x1,··· ,xN )(cid:62), GPRT
+<br/>models the relationship between inputs and outputs by a regression function
+<br/>f (x) drawn from a Gaussian process with independent additive noise εi,
+<br/>i = 1,··· ,N,
+<br/>si = f (xi) + εi,
+<br/>f (x) ∼ GP(0,k(x,x(cid:48))),
+<br/>εi ∼ N (0,σ 2
+<br/>n ).
+<br/>A kernel k(x,x(cid:48)) in GPRT is defined by a set of M number of trees:
+<br/>k(x,x(cid:48)) = σ 2
+<br/>κm(x,x(cid:48)) =
+<br/>m=1
+<br/>(cid:26) 1
+<br/>κm(x,x(cid:48)),
+<br/>if τm(x) = τm(x(cid:48))
+<br/>otherwise,
+<br/>(2)
+<br/>(3)
+<br/>(4)
+<br/>(5)
+<br/>(6)
+<br/>where σ 2
+<br/>k is the scaling parameter that represents the kernel power, and τ is
+<br/>a split function takes an input x and computes the leaf index b ∈ {1,··· ,B}.
+<br/>Given an input x∗, distribution over its predictive variable f∗ is given as
+<br/>¯f∗ =
+<br/>i=1
+<br/>αik(xi,x∗),
+<br/>(7)
+<br/>where α = (α1,··· ,αN )(cid:62) is given by K−1
+<br/>n IN,
+<br/>and K is a covariance matrix of which K(i, j) is computed from the i-th and
+<br/>j-th row vector of X. Computation of Equation (7) is in O(N); however, this
+<br/>can be more efficient as follows:
+<br/>s S. Here, Ks is given by K+σ 2
+<br/>¯f∗ =
+<br/>m=1
+<br/>¯αm,τm(x∗),
+<br/>(8)
+</td><td>('2350325', 'Donghoon Lee', 'donghoon lee')<br/>('2857402', 'Hyunsin Park', 'hyunsin park')</td><td></td></tr><tr><td>72160aae43cd9b2c3aae5574acc0d00ea0993b9e</td><td>Boosting Facial Expression Recognition in a Noisy Environment
+<br/>Using LDSP-Local Distinctive Star Pattern
+<br/> 1 Department of Computer Science and Engineering
+<br/><b>Stamford University Bangladesh, Dhaka-1209, Bangladesh</b><br/>2 Department of Computer Science and Engineering
+<br/><b>Stamford University Bangladesh, Dhaka-1209, Bangladesh</b><br/>3 Department of Computer Science and Engineering
+<br/><b>Stamford University Bangladesh, Dhaka-1209, Bangladesh</b></td><td>('7484236', 'Mohammad Shahidul Islam', 'mohammad shahidul islam')<br/>('7497618', 'Tarin Kazi', 'tarin kazi')</td><td></td></tr><tr><td>72cbbdee4f6eeee8b7dd22cea6092c532271009f</td><td>Adversarial Occlusion-aware Face Detection
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/>2Center for Research on Intelligent Perception and Computing, CASIA
+<br/><b>University of Chinese Academy of Sciences, Beijing 100190, China</b></td><td>('3065234', 'Yujia Chen', 'yujia chen')<br/>('3051419', 'Lingxiao Song', 'lingxiao song')<br/>('1705643', 'Ran He', 'ran he')</td><td></td></tr><tr><td>721d9c387ed382988fce6fa864446fed5fb23173</td><td></td><td></td><td></td></tr><tr><td>72c0c8deb9ea6f59fde4f5043bff67366b86bd66</td><td>Age progression in Human Faces : A Survey
+</td><td>('34713849', 'Narayanan Ramanathan', 'narayanan ramanathan')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>721e5ba3383b05a78ef1dfe85bf38efa7e2d611d</td><td>BULAT, TZIMIROPOULOS: CONVOLUTIONAL AGGREGATION OF LOCAL EVIDENCE
+<br/>Convolutional aggregation of local evidence
+<br/>for large pose face alignment
+<br/>Computer Vision Laboratory
+<br/><b>University of Nottingham</b><br/>Nottingham, UK
+</td><td>('3458121', 'Adrian Bulat', 'adrian bulat')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>adrian.bulat@nottingham.ac.uk
+<br/>yorgos.tzimiropoulos@nottingham.ac.uk
+</td></tr><tr><td>72f4aaf7e2e3f215cd8762ce283988220f182a5b</td><td>Turk J Elec Eng & Comp Sci, Vol.18, No.4, 2010, c(cid:2) T ¨UB˙ITAK
+<br/>doi:10.3906/elk-0906-48
+<br/>Active illumination and appearance model for face
+<br/>alignment
+<br/><b>Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY</b><br/><b>Istanbul Technical University, Istanbul, 34469, TURKEY</b><br/><b>DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK</b></td><td>('2061450', 'Fatih KAHRAMAN', 'fatih kahraman')<br/>('1762901', 'Sune DARKNER', 'sune darkner')<br/>('2134834', 'Rasmus LARSEN', 'rasmus larsen')</td><td>e-mail: kahraman@be.itu.edu.tr
+<br/>e-mail: gokmen@itu.edu.tr
+<br/>e-mail: {sda, rl}@imm.dtu.dk
+</td></tr><tr><td>72a55554b816b66a865a1ec1b4a5b17b5d3ba784</td><td>Real-Time Face Identification
+<br/>via CNN
+<br/>and Boosted Hashing Forest
+<br/><b>State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia</b><br/>IEEE Computer Society Workshop on Biometrics
+<br/>In conjunction with CVPR 2016, June 26, 2016
+</td><td>('2966131', 'Yury Vizilter', 'yury vizilter')<br/>('5669812', 'Vladimir Gorbatsevich', 'vladimir gorbatsevich')<br/>('34296728', 'Andrey Vorotnikov', 'andrey vorotnikov')<br/>('7729536', 'Nikita Kostromov', 'nikita kostromov')</td><td>viz@gosniias.ru, gvs@gosniias.ru, vorotnikov@gosniias.ru, nikita-kostromov@yandex.ru
+</td></tr><tr><td>72450d7e5cbe79b05839c30a4f0284af5aa80053</td><td>Natural Facial Expression Recognition Using Dynamic
+<br/>and Static Schemes
+<br/>1 Computer Vision Center, 08193 Bellaterra, Barcelona, Spain
+<br/>2 IKERBASQUE, Basque Foundation for Science
+<br/><b>University of the Basque Country, San Sebastian, Spain</b></td><td>('3262395', 'Bogdan Raducanu', 'bogdan raducanu')<br/>('1803584', 'Fadi Dornaika', 'fadi dornaika')</td><td>bogdan@cvc.uab.es
+<br/>fadi dornaika@ehu.es
+</td></tr><tr><td>72bf9c5787d7ff56a1697a3389f11d14654b4fcf</td><td>RobustFaceRecognitionUsing
+<br/>SymmetricShape-from-Shading
+<br/>W.Zhao
+<br/>RamaChellappa
+<br/>CenterforAutomationResearchand
+<br/>ElectricalandComputerEngineeringDepartment
+<br/><b>UniversityofMaryland</b><br/><b>CollegePark, MD</b><br/>ThesupportoftheO(cid:14)ceofNavalResearchunderGrantN
+</td><td></td><td>Email:fwyzhao,ramag@cfar.umd.edu
+</td></tr><tr><td>725c3605c2d26d113637097358cd4c08c19ff9e1</td><td>Deep Reasoning with Knowledge Graph for Social Relationship Understanding
+<br/><b>School of Data and Computer Science, Sun Yat-sen University, China</b><br/>2 SenseTime Research, China
+</td><td>('29988001', 'Zhouxia Wang', 'zhouxia wang')<br/>('1765674', 'Tianshui Chen', 'tianshui chen')<br/>('12254824', 'Weihao Yu', 'weihao yu')<br/>('47413456', 'Hui Cheng', 'hui cheng')<br/>('1737218', 'Liang Lin', 'liang lin')</td><td>zhouzi1212,tianshuichen,jimmy.sj.ren,weihaoyu6@gmail.com,
+<br/>chengh9@mail.sysu.edu.cn, linliang@ieee.org
+</td></tr><tr><td>445461a34adc4bcdccac2e3c374f5921c93750f8</td><td>Emotional Expression Classification using Time-Series Kernels∗
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td>1E¨otv¨os Lor´and University, Budapest, Hungary, {andras.lorincz,szzoli}@elte.hu
+<br/>2Carnegie Mellon University, Pittsburgh, PA, laszlo.jeni@ieee.org,tk@cs.cmu.edu
+<br/>3University of Pittsburgh, Pittsburgh, PA, jeffcohn@cs.cmu.edu
+</td></tr><tr><td>4414a328466db1e8ab9651bf4e0f9f1fe1a163e4</td><td>1164
+<br/>© EURASIP, 2010 ISSN 2076-1465
+<br/>18th European Signal Processing Conference (EUSIPCO-2010)
+<br/>INTRODUCTION
+</td><td></td><td></td></tr><tr><td>442f09ddb5bb7ba4e824c0795e37cad754967208</td><td></td><td></td><td></td></tr><tr><td>443acd268126c777bc7194e185bec0984c3d1ae7</td><td>Retrieving Relative Soft Biometrics
+<br/>for Semantic Identification
+<br/>School of Electronics and Computer Science,
+<br/><b>University of Southampton, United Kingdom</b></td><td>('3408521', 'Daniel Martinho-Corbishley', 'daniel martinho-corbishley')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('3000521', 'John N. Carter', 'john n. carter')</td><td>{dmc,msn,jnc}@ecs.soton.ac.uk
+</td></tr><tr><td>44f23600671473c3ddb65a308ca97657bc92e527</td><td>Convolutional Two-Stream Network Fusion for Video Action Recognition
+<br/><b>Graz University of Technology</b><br/><b>Graz University of Technology</b><br/><b>University of Oxford</b></td><td>('2322150', 'Christoph Feichtenhofer', 'christoph feichtenhofer')<br/>('1718587', 'Axel Pinz', 'axel pinz')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>feichtenhofer@tugraz.at
+<br/>axel.pinz@tugraz.at
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>4439746eeb7c7328beba3f3ef47dc67fbb52bcb3</td><td>An Efficient Face Detection Method Using Adaboost and Facial Parts
+<br/>Computer, IT and Electronic department
+<br/><b>Azad University of Qazvin</b><br/>Tehran, Iran
+</td><td>('2514753', 'Yasaman Heydarzadeh', 'yasaman heydarzadeh')<br/>('2514753', 'Yasaman Heydarzadeh', 'yasaman heydarzadeh')<br/>('1681854', 'Abolfazl Toroghi Haghighat', 'abolfazl toroghi haghighat')</td><td>heydarzadeh@ qiau.ac.ir , haghighat@qiau.ac.ir
+</td></tr><tr><td>446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03</td><td>A Pose-Adaptive Constrained Local Model For
+<br/>Accurate Head Pose Tracking
+<br/>Eikeo
+<br/>11 rue Leon Jouhaux,
+<br/>F-75010, Paris, France
+<br/>Sorbonne Universit´es
+<br/>UPMC Univ Paris 06
+<br/>CNRS UMR 7222, ISIR
+<br/>F-75005, Paris, France
+<br/>Eikeo
+<br/>11 rue Leon Jouhaux,
+<br/>F-75010, Paris, France
+</td><td>('2416620', 'Lucas Zamuner', 'lucas zamuner')<br/>('2521061', 'Kevin Bailly', 'kevin bailly')<br/>('2254216', 'Erwan Bigorgne', 'erwan bigorgne')</td><td>lucas.zamuner@eikeo.com
+<br/>kevin.bailly@upmc.fr
+<br/>erwan.bigorgne@eikeo.com
+</td></tr><tr><td>4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f</td><td>Learning features from Improved Dense Trajectories using deep convolutional
+<br/>networks for Human Activity Recognition
+<br/><b>University Drive</b><br/>Burnaby, BC
+<br/>Canada V5A 1S6
+<br/>Sportlogiq Inc.
+<br/>780 Avenue Brewster,
+<br/>Montreal QC,
+<br/>Canada H4C 1A8
+<br/><b>University Drive</b><br/>Burnaby, BC
+<br/>Canada V5A 1S6
+</td><td>('2716937', 'Srikanth Muralidharan', 'srikanth muralidharan')<br/>('2190580', 'Simon Fraser', 'simon fraser')<br/>('15695326', 'Mehrsan Javan', 'mehrsan javan')<br/>('10771328', 'Greg Mori', 'greg mori')<br/>('2190580', 'Simon Fraser', 'simon fraser')</td><td>smuralid@sfu.ca
+<br/>mehrsan@sportlogiq.com
+<br/>mori@cs.sfu.ca
+</td></tr><tr><td>44b1399e8569a29eed0d22d88767b1891dbcf987</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Learning Multi-modal Latent Attributes
+</td><td>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td></td></tr><tr><td>44f48a4b1ef94a9104d063e53bf88a69ff0f55f3</td><td>Automatically Building Face Datasets of New Domains
+<br/>from Weakly Labeled Data with Pretrained Models
+<br/><b>Sun Yat-sen University</b></td><td>('2442939', 'Shengyong Ding', 'shengyong ding')<br/>('4080607', 'Junyu Wu', 'junyu wu')<br/>('1723992', 'Wei Xu', 'wei xu')<br/>('38255852', 'Hongyang Chao', 'hongyang chao')</td><td></td></tr><tr><td>446dc1413e1cfaee0030dc74a3cee49a47386355</td><td>Recent Advances in Zero-shot Recognition
+</td><td>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td></td></tr><tr><td>44a3ec27f92c344a15deb8e5dc3a5b3797505c06</td><td>A Taxonomy of Part and Attribute Discovery
+<br/>Techniques
+</td><td>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td></td></tr><tr><td>44aeda8493ad0d44ca1304756cc0126a2720f07b</td><td>Face Alive Icons
+</td><td>('1685323', 'Xin Li', 'xin li')<br/>('2304980', 'Chieh-Chih Chang', 'chieh-chih chang')<br/>('1679040', 'Shi-Kuo Chang', 'shi-kuo chang')</td><td>1University of Pittsburgh, USA,{flying, chang}@cs.pitt.edu
+<br/>2Industrial Technology Research Institute, Taiwan, chieh@itri.org.tw
+</td></tr><tr><td>449b1b91029e84dab14b80852e35387a9275870e</td><td></td><td></td><td></td></tr><tr><td>44078d0daed8b13114cffb15b368acc467f96351</td><td></td><td></td><td></td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>Human Attribute Recognition by Deep
+<br/>Hierarchical Contexts
+<br/><b>The Chinese University of Hong Kong</b></td><td>('47002704', 'Yining Li', 'yining li')<br/>('2000034', 'Chen Huang', 'chen huang')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{ly015,chuang,ccloy,xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>44c9b5c55ca27a4313daf3760a3f24a440ce17ad</td><td>Revisiting hand-crafted feature for action recognition:
+<br/>a set of improved dense trajectories
+<br/><b>Hiroshima University, Japan</b><br/>ENSICAEN, France
+<br/><b>Hiroshima University, Japan</b></td><td>('2223849', 'Kenji Matsui', 'kenji matsui')<br/>('1744862', 'Toru Tamaki', 'toru tamaki')<br/>('30171131', 'Gwladys Auffret', 'gwladys auffret')<br/>('1688940', 'Bisser Raytchev', 'bisser raytchev')<br/>('1686272', 'Kazufumi Kaneda', 'kazufumi kaneda')</td><td></td></tr><tr><td>44dd150b9020b2253107b4a4af3644f0a51718a3</td><td>An Analysis of the Sensitivity of Active Shape
+<br/>Models to Initialization when Applied to Automatic
+<br/>Facial Landmarking
+</td><td>('2363348', 'Keshav Seshadri', 'keshav seshadri')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td></td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td><td>('35734434', 'Maxime BERTHE', 'maxime berthe')</td><td></td></tr><tr><td>44f65e3304bdde4be04823fd7ca770c1c05c2cef</td><td>SIViP
+<br/>DOI 10.1007/s11760-009-0125-4
+<br/>ORIGINAL PAPER
+<br/>On the use of phase of the Fourier transform for face recognition
+<br/>under variations in illumination
+<br/>Received: 17 November 2008 / Revised: 20 February 2009 / Accepted: 7 July 2009
+<br/>© Springer-Verlag London Limited 2009
+</td><td>('2627097', 'Anil Kumar Sao', 'anil kumar sao')</td><td></td></tr><tr><td>44fbbaea6271e47ace47c27701ed05e15da8f7cf</td><td>588306 PSSXXX10.1177/0956797615588306Kret et al.Effect of Pupil Size on Trust
+<br/>research-article2015
+<br/>Research Article
+<br/>Pupil Mimicry Correlates With Trust in
+<br/>In-Group Partners With Dilating Pupils
+<br/> 1 –10
+<br/>© The Author(s) 2015
+<br/>Reprints and permissions:
+<br/>sagepub.com/journalsPermissions.nav
+<br/>DOI: 10.1177/0956797615588306
+<br/>pss.sagepub.com
+<br/>M. E. Kret1,2, A. H. Fischer1,2, and C. K. W. De Dreu1,2,3
+<br/><b>University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of</b><br/><b>Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam</b></td><td></td><td></td></tr><tr><td>44eb4d128b60485377e74ffb5facc0bf4ddeb022</td><td></td><td></td><td></td></tr><tr><td>448ed201f6fceaa6533d88b0b29da3f36235e131</td><td></td><td></td><td></td></tr><tr><td>441bf5f7fe7d1a3939d8b200eca9b4bb619449a9</td><td>Head Pose Estimation in the Wild using Approximate View Manifolds
+<br/><b>University of Florida</b><br/>Gainesville, FL, USA
+<br/><b>University of Florida</b><br/>Gainesville, FL, USA
+</td><td>('30900274', 'Kalaivani Sundararajan', 'kalaivani sundararajan')<br/>('2171076', 'Damon L. Woodard', 'damon l. woodard')</td><td>kalaivani.s@ufl.edu
+<br/>dwoodard@ufl.edu
+</td></tr><tr><td>447a5e1caf847952d2bb526ab2fb75898466d1bc</td><td>Under review as a conference paper at ICLR 2018
+<br/>LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+<br/>INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td><td></td><td></td></tr><tr><td>449808b7aa9ee6b13ad1a21d9f058efaa400639a</td><td>Recovering 3D Facial Shape via Coupled 2D/3D Space Learning
+<br/>1Key Lab of Intelligent Information Processing of CAS,
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>Graduate University of CAS, 100190, Beijing, China</b><br/><b>System Research Center, NOKIA Research Center, Beijing, 100176, China</b><br/><b>Institute of Digital Media, Peking University, Beijing, 100871, China</b></td><td>('3079475', 'Annan Li', 'annan li')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td>{anli,sgshan,xlchen,wgao}@jdl.ac.cn
+<br/>ext-xiujuan.chai@nokia.com
+</td></tr><tr><td>2a7bca56e2539c8cf1ae4e9da521879b7951872d</td><td>Exploiting Unrelated Tasks in Multi-Task Learning
+<br/>Anonymous Author 1
+<br/>Unknown Institution 1
+<br/>Anonymous Author 2
+<br/>Unknown Institution 2
+<br/>Anonymous Author 3
+<br/>Unknown Institution 3
+</td><td></td><td></td></tr><tr><td>2a65d7d5336b377b7f5a98855767dd48fa516c0f</td><td>Fast Supervised LDA for Discovering Micro-Events in
+<br/>Large-Scale Video Datasets
+<br/>Multimedia Understanding Group
+<br/><b>Aristotle University of Thessaloniki, Greece</b></td><td>('3493855', 'Angelos Katharopoulos', 'angelos katharopoulos')<br/>('3493472', 'Despoina Paschalidou', 'despoina paschalidou')<br/>('1789830', 'Christos Diou', 'christos diou')<br/>('1708199', 'Anastasios Delopoulos', 'anastasios delopoulos')</td><td>{katharas, pdespoin}@auth.gr; diou@mug.ee.auth.gr; adelo@eng.auth.gr
+</td></tr><tr><td>2af2b74c3462ccff3a6881ff7cf4f321b3242fa9</td><td>Chen ZN, Ngo CW, Zhang W et al. Name-face association in Web videos: A large-scale dataset, baselines, and open issues.
+<br/>1468-z
+<br/>Name-Face Association in Web Videos: A Large-Scale Dataset,
+<br/>Baselines, and Open Issues
+<br/><b>Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China</b><br/><b>City University of Hong Kong, Hong Kong, China</b><br/><b>Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China</b><br/><b>School of Computer Science, Fudan University, Shanghai 200433, China</b><br/>Received February 24, 2014; revised July 3, 2014.
+</td><td>('1751681', 'Chong-Wah Ngo', 'chong-wah ngo')<br/>('40538946', 'Wei Zhang', 'wei zhang')<br/>('1778024', 'Juan Cao', 'juan cao')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')</td><td>E-mail: zhineng.chen@ia.ac.cn; cscwngo@cityu.edu.hk; wzhang34-c@my.cityu.edu.hk; caojuan@ict.ac.cn; ygj@fudan.edu.cn
+</td></tr><tr><td>2aaa6969c03f435b3ea8431574a91a0843bd320b</td><td></td><td></td><td></td></tr><tr><td>2af620e17d0ed67d9ccbca624250989ce372e255</td><td>Meta-Class Features for Large-Scale Object Categorization on a Budget
+<br/><b>Dartmouth College</b><br/>Hanover, NH, U.S.A.
+</td><td>('34338883', 'Alessandro Bergamo', 'alessandro bergamo')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>{aleb, lorenzo}@cs.dartmouth.edu
+</td></tr><tr><td>2a35d20b2c0a045ea84723f328321c18be6f555c</td><td>on Converting Supervised Classification to Semi-supervised Classification
+<br/>Boost Picking: A Universal Method
+<br/><b>Beijing Institute of Technology, Beijing 100081 CHINA</b><br/><b>North China University of Technology, Beijing 100144 CHINA</b><br/><b>Beijing Institute of Technology, Beijing 100081 CHINA</b><br/><b>Beijing Institute of Technology, Beijing 100081 CHINA</b></td><td>('1742846', 'Fuqiang Liu', 'fuqiang liu')<br/>('33179404', 'Fukun Bi', 'fukun bi')<br/>('3148439', 'Yiding Yang', 'yiding yang')<br/>('36522003', 'Liang Chen', 'liang chen')</td><td></td></tr><tr><td>2ad7cef781f98fd66101fa4a78e012369d064830</td><td></td><td></td><td></td></tr><tr><td>2ad29b2921aba7738c51d9025b342a0ec770c6ea</td><td></td><td></td><td></td></tr><tr><td>2a9b398d358cf04dc608a298d36d305659e8f607</td><td>Facial Action Unit Recognition with Sparse Representation
+<br/><b>University of Denver, Denver, CO</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b><br/>facial
+<br/>image exhibiting
+</td><td>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')<br/>('5510802', 'Mu Zhou', 'mu zhou')<br/>('1837267', 'Kevin L. Veon', 'kevin l. veon')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>Emails: mmahoor@du.edu, mu.zhou09fall@gmail.com, kevin.veon@du.edu, seyedmohammad.mavadati@du.edu, and jeffcohn@pitt.edu
+</td></tr><tr><td>2a0efb1c17fbe78470acf01e4601a75735a805cc</td><td>Illumination-InsensitiveFaceRecognitionUsing
+<br/>SymmetricShape-from-Shading
+<br/>WenYiZhao
+<br/>RamaChellappa
+<br/>CenterforAutomationResearch
+<br/><b>UniversityofMaryland, CollegePark, MD</b></td><td></td><td>Email:fwyzhao,ramag@cfar.umd.edu
+</td></tr><tr><td>2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924</td><td></td><td></td><td></td></tr><tr><td>2ac21d663c25d11cda48381fb204a37a47d2a574</td><td>Interpreting Hand-Over-Face Gestures
+<br/><b>University of Cambridge</b></td><td>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td></td></tr><tr><td>2a4153655ad1169d482e22c468d67f3bc2c49f12</td><td>Face Alignment Across Large Poses: A 3D Solution
+<br/>1 Center for Biometrics and Security Research & National Laboratory of Pattern Recognition,
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/><b>Michigan State University</b></td><td>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('1704812', 'Hailin Shi', 'hailin shi')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{xiangyu.zhu,zlei,hailin.shi,szli}@nlpr.ia.ac.cn
+<br/>liuxm@msu.edu
+</td></tr><tr><td>2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40</td><td>Estimating Sheep Pain Level Using Facial Action Unit Detection
+<br/><b>Computer Laboratory, University of Cambridge, Cambridge, UK</b></td><td>('9871228', 'Yiting Lu', 'yiting lu')<br/>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td></td></tr><tr><td>2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c</td><td>Robust Registration and Geometry Estimation from Unstructured
+<br/>Facial Scans
+</td><td>('19214361', 'Maxim Bazik', 'maxim bazik')</td><td></td></tr><tr><td>2ae139b247057c02cda352f6661f46f7feb38e45</td><td>Combining Modality Specific Deep Neural Networks for
+<br/>Emotion Recognition in Video
+<br/>1École Polytechique de Montréal, Université de Montréal, Montréal, Canada
+<br/>2Laboratoire d’Informatique des Systèmes Adaptatifs, Université de Montréal, Montréal, Canada
+</td><td>('3127597', 'Samira Ebrahimi Kahou', 'samira ebrahimi kahou')<br/>('2900675', 'Xavier Bouthillier', 'xavier bouthillier')<br/>('2558801', 'Pierre Froumenty', 'pierre froumenty')<br/>('1710604', 'Roland Memisevic', 'roland memisevic')<br/>('1724875', 'Pascal Vincent', 'pascal vincent')<br/>('1751762', 'Yoshua Bengio', 'yoshua bengio')</td><td>{samira.ebrahimi-kahou, christopher.pal, pierre.froumenty}@polymtl.ca
+<br/>{bouthilx, gulcehrc, memisevr, vincentp, courvila, bengioy}@iro.umontreal.ca
+</td></tr><tr><td>2a3e19d7c54cba3805115497c69069dd5a91da65</td><td>Looking at Hands in Autonomous Vehicles:
+<br/>A ConvNet Approach using Part Affinity Fields
+<br/>LISA: Laboratory for Intelligent & Safe Automobiles
+<br/><b>University of California San Diego</b></td><td>('2812409', 'Kevan Yuen', 'kevan yuen')<br/>('1713989', 'Mohan M. Trivedi', 'mohan m. trivedi')</td><td>kcyuen@eng.ucsd.edu, mtrivedi@eng.ucsd.edu
+</td></tr><tr><td>2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc</td><td>Multi-Region Probabilistic Histograms
+<br/>for Robust and Scalable Identity Inference (cid:63)
+<br/>NICTA, PO Box 6020, St Lucia, QLD 4067, Australia
+<br/><b>University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td></td></tr><tr><td>2a14b6d9f688714dc60876816c4b7cf763c029a9</td><td>Combining Multiple Sources of Knowledge in Deep CNNs for Action Recognition
+<br/><b>University of North Carolina at Chapel Hill</b></td><td>('2155311', 'Eunbyung Park', 'eunbyung park')<br/>('1682965', 'Xufeng Han', 'xufeng han')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')</td><td>{eunbyung,xufeng,tlberg,aberg}@cs.unc.edu
+</td></tr><tr><td>2a88541448be2eb1b953ac2c0c54da240b47dd8a</td><td>Discrete Graph Hashing
+<br/><b>IBM T. J. Watson Research Center</b><br/><b>Columbia University</b><br/>(cid:2)Google Research
+</td><td>('39059457', 'Wei Liu', 'wei liu')<br/>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>weiliu@us.ibm.com
+<br/>cm3052@columbia.edu
+<br/>sfchang@ee.columbia.edu
+<br/>sanjivk@google.com
+</td></tr><tr><td>2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83</td><td>121
+<br/>The Effect of Computer-Generated Descriptions
+<br/>on Photo-Sharing Experiences of People With
+<br/>Visual Impairments
+<br/>Like sighted people, visually impaired people want to share photographs on social networking services, but
+<br/>find it difficult to identify and select photos from their albums. We aimed to address this problem by
+<br/>incorporating state-of-the-art computer-generated descriptions into Facebook’s photo-sharing feature. We
+<br/>interviewed 12 visually impaired participants to understand their photo-sharing experiences and designed a
+<br/>photo description feature for the Facebook mobile application. We evaluated this feature with six
+<br/>participants in a seven-day diary study. We found that participants used the descriptions to recall and
+<br/>organize their photos, but they hesitated to upload photos without a sighted person’s input. In addition to
+<br/>basic information about photo content, participants wanted to know more details about salient objects and
+<br/>people, and whether the photos reflected their personal aesthetic. We discuss these findings from the lens of
+<br/>self-disclosure and self-presentation theories and propose new computer vision research directions that will
+<br/>better support visual content sharing by visually impaired people.
+<br/>CCS Concepts: • Information interfaces and presentations → Multimedia and information systems; •
+<br/>Social and professional topics → People with disabilities
+<br/>KEYWORDS
+<br/>Visual impairments; computer-generated descriptions; SNSs; photo sharing; self-disclosure; self-presentation
+<br/>ACM Reference format:
+<br/>The Effect of Computer-Generated Descriptions On Photo-Sharing Experiences of People With Visual
+<br/>Impairments. Proc. ACM Hum.-Comput. Interact. 1, CSCW. 121 (November 2017), 22 pages.
+<br/>DOI: 10.1145/3134756
+<br/>1 INTRODUCTION
+<br/>Sharing memories and experiences via photos is a common way to engage with others on social networking
+<br/>services (SNSs) [39,46,51]. For instance, Facebook users uploaded more than 350 million photos a day [24]
+<br/>and Twitter, which initially supported only text in tweets, now has more than 28.4% of tweets containing
+<br/>images [39]. Visually impaired people (both blind and low vision) have a strong presence on SNS and are
+<br/>interested in sharing photos [50]. They take photos for the same reasons that sighted people do: sharing
+<br/>daily moments with their sighted friends and family [30,32]. A prior study showed that visually impaired
+<br/>people shared a relatively large number of photos on Facebook—only slightly less than their sighted
+<br/>counterparts [50].
+<br/>
+<br/> PACM on Human-Computer Interaction, Vol. 1, No. 2, Article 121. Publication date: November 2017
+</td><td>('2582568', 'Yuhang Zhao', 'yuhang zhao')<br/>('1968133', 'Shaomei Wu', 'shaomei wu')<br/>('39685591', 'Lindsay Reynolds', 'lindsay reynolds')<br/>('3283573', 'Shiri Azenkot', 'shiri azenkot')</td><td></td></tr><tr><td>2a02355c1155f2d2e0cf7a8e197e0d0075437b19</td><td></td><td></td><td></td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>Learning Social Relation Traits from Face Images
+<br/><b>The Chinese University of Hong Kong</b></td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>zz013@ie.cuhk.edu.hk, pluo@ie.cuhk.edu.hk, ccloy@ie.cuhk.edu.hk, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>2aea27352406a2066ddae5fad6f3f13afdc90be9</td><td></td><td></td><td></td></tr><tr><td>2a0623ae989f2236f5e1fe3db25ab708f5d02955</td><td>3D Face Modelling for 2D+3D Face Recognition
+<br/>J.R. Tena Rodr´ıguez
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Centre for Vision, Speech and Signal Processing
+<br/>School of Electronics and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>November 2007
+<br/>c(cid:13) J.R. Tena Rodr´ıguez 2007
+</td><td></td><td></td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>Acquiring Linear Subspaces for Face
+<br/>Recognition under Variable Lighting
+<br/>David Kriegman, Senior Member, IEEE
+</td><td>('2457452', 'Kuang-chih Lee', 'kuang-chih lee')<br/>('1788818', 'Jeffrey Ho', 'jeffrey ho')</td><td></td></tr><tr><td>2afdda6fb85732d830cea242c1ff84497cd5f3cb</td><td>Face Image Retrieval by Using Haar Features
+<br/><b>Institute ofInformation Science, Academia Sinica, Taipei, Taiwan</b><br/><b>Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan</b><br/><b>Tamkang University, Taipei, Taiwan</b></td><td>('2609751', 'Bau-Cheng Shen', 'bau-cheng shen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')<br/>('1679560', 'Hui-Huang Hsu', 'hui-huang hsu')</td><td>{bcshen, song} @ iis.sinica. edu. tw, h_hsu@mail. tku. edu. tw
+</td></tr><tr><td>2ab034e1f54c37bfc8ae93f7320160748310dc73</td><td>Siamese Capsule Networks
+<br/>James O’ Neill
+<br/>Department of Computer Science
+<br/><b>University of Liverpool</b><br/>Liverpool, L69 3BX
+</td><td></td><td>james.o-neill@liverpool.ac.uk
+</td></tr><tr><td>2ff9618ea521df3c916abc88e7c85220d9f0ff06</td><td>Facial Tic Detection Using Computer Vision
+<br/>Christopher D. Leveille
+<br/>March 20, 2014
+</td><td>('40579411', 'Aaron Cass', 'aaron cass')</td><td></td></tr><tr><td>2fda461869f84a9298a0e93ef280f79b9fb76f94</td><td>OpenFace: an open source facial behavior analysis toolkit
+<br/>Tadas Baltruˇsaitis
+</td><td>('39626495', 'Peter Robinson', 'peter robinson')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>Tadas.Baltrusaitis@cl.cam.ac.uk
+<br/>Peter.Robinson@cl.cam.ac.uk
+<br/>morency@cs.cmu.edu
+</td></tr><tr><td>2ff9ffedfc59422a8c7dac418a02d1415eec92f1</td><td>Face Verification Using Boosted Cross-Image Features
+<br/><b>University of Central Florida</b><br/><b>University of California, Berkeley</b><br/>Orlando, FL
+<br/>Berkeley, CA
+<br/><b>University of Central Florida</b><br/>Orlando, FL
+</td><td>('1720307', 'Dong Zhang', 'dong zhang')<br/>('2405613', 'Omar Oreifej', 'omar oreifej')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>dzhang@cs.ucf.edu
+<br/>oreifej@eecs.berkeley.edu
+<br/>shah@crcv.ucf.edu
+</td></tr><tr><td>2fdce3228d384456ea9faff108b9c6d0cf39e7c7</td><td></td><td></td><td></td></tr><tr><td>2ffcd35d9b8867a42be23978079f5f24be8d3e35</td><td>
+<br/>ISSN XXXX XXXX © 2018 IJESC
+<br/>
+<br/>
+<br/>Research Article Volume 8 Issue No.6
+<br/>Satellite based Image Processing using Data mining
+<br/>E.Malleshwari1, S.Nirmal Kumar2, J.Dhinesh3
+<br/>Professor1, Assistant Professor2, PG Scholar3
+<br/>Department of Information Technology1, 2, Master of Computer Applications3
+<br/><b>Vel Tech High Tech Dr Rangarajan Dr Sakunthala Engineering College, Avadi, Chennai, India</b></td><td></td><td></td></tr><tr><td>2f7e9b45255c9029d2ae97bbb004d6072e70fa79</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>cvpaper.challenge in 2015
+<br/>A review of CVPR2015 and DeepSurvey
+<br/>Nakamura
+<br/>Received: date / Accepted: date
+</td><td>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('29998543', 'Hironori Hoshino', 'hironori hoshino')<br/>('3407486', 'Takaaki Imanari', 'takaaki imanari')</td><td></td></tr><tr><td>2f53b97f0de2194d588bc7fb920b89cd7bcf7663</td><td>Facial Expression Recognition Using Sparse
+<br/>Gaussian Conditional Random Field
+<br/>School of Electrical and Computer Engineering
+<br/>School of Electrical and Computer Engineering
+<br/><b>Shiraz University</b><br/>Shiraz, Iran
+<br/><b>Shiraz University</b><br/>Shiraz, Iran
+</td><td>('37514045', 'Mohammadamin Abbasnejad', 'mohammadamin abbasnejad')<br/>('2229932', 'Mohammad Ali Masnadi-Shirazi', 'mohammad ali masnadi-shirazi')</td><td>Email: amin.abbasnejad@gmail.com
+<br/>Email: mmasnadi@shirazu.ac.ir
+</td></tr><tr><td>2f16baddac6af536451b3216b02d3480fc361ef4</td><td>Web-Scale Training for Face
+<br/>Identification
+<br/>1 Facebook AI Research
+<br/><b>Tel Aviv University</b></td><td>('2909406', 'Ming Yang', 'ming yang')<br/>('2188620', 'Yaniv Taigman', 'yaniv taigman')</td><td></td></tr><tr><td>2f489bd9bfb61a7d7165a2f05c03377a00072477</td><td>JIA, YANG: STRUCTURED SEMI-SUPERVISED FOREST
+<br/>Structured Semi-supervised Forest for
+<br/>Facial Landmarks Localization with Face
+<br/>Mask Reasoning
+<br/>1 Department of Computer Science
+<br/>The Univ. of Hong Kong, HK
+<br/>2 School of EECS
+<br/>Queen Mary Univ. of London, UK
+<br/>Angran Lin1
+</td><td>('34760532', 'Xuhui Jia', 'xuhui jia')<br/>('2966679', 'Heng Yang', 'heng yang')<br/>('40392393', 'Kwok-Ping Chan', 'kwok-ping chan')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td>xhjia@cs.hku.hk
+<br/>heng.yang@qmul.ac.uk
+<br/>arlin@cs.hku.hk
+<br/>kpchan@cs.hku.hk
+<br/>i.patras@qmul.ac.uk
+</td></tr><tr><td>2f2aa67c5d6dbfaf218c104184a8c807e8b29286</td><td>Video Analytics for Surveillance Camera Networks
+<br/>(Invited Paper)
+<br/><b>Interactive and Digital Media Institute</b><br/><b>National University of Singapore, Singapore</b></td><td>('1986874', 'Lekha Chaisorn', 'lekha chaisorn')<br/>('3026404', 'Yongkang Wong', 'yongkang wong')</td><td></td></tr><tr><td>2f16459e2e24dc91b3b4cac7c6294387d4a0eacf</td><td></td><td></td><td></td></tr><tr><td>2f59f28a1ca3130d413e8e8b59fb30d50ac020e2</td><td>Children Gender Recognition Under Unconstrained
+<br/>Conditions Based on Contextual Information
+<br/>Joint Research Centre, European Commission, Ispra, Italy
+</td><td>('3309307', 'Riccardo Satta', 'riccardo satta')<br/>('1907426', 'Javier Galbally', 'javier galbally')<br/>('2730666', 'Laurent Beslay', 'laurent beslay')</td><td>Email: {riccardo.satta,javier.galbally,laurent.beslay}@jrc.ec.europa.eu
+</td></tr><tr><td>2f78e471d2ec66057b7b718fab8bfd8e5183d8f4</td><td>SOFTWARE ENGINEERING
+<br/>VOLUME: 14 | NUMBER: 5 | 2016 | DECEMBER
+<br/>An Investigation of a New Social Networks
+<br/>Contact Suggestion Based on Face Recognition
+<br/>Algorithm
+<br/>1Modeling Evolutionary Algorithms Simulation and Artificial Intelligence, Faculty of Electrical & Electronics
+<br/><b>Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman</b><br/>2Department of Computer Science, Faculty of Electrical Engineering and Computer Science,
+<br/><b>VSB Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic</b><br/>DOI: 10.15598/aeee.v14i5.1116
+</td><td>('1681072', 'Ivan ZELINKA', 'ivan zelinka')<br/>('1856530', 'Petr SALOUN', 'petr saloun')<br/>('2053234', 'Jakub STONAWSKI', 'jakub stonawski')<br/>('2356663', 'Adam ONDREJKA', 'adam ondrejka')</td><td>ivan.zelinka@tdt.edu.vn, petr.saloun@vsb.cz, stonawski.jakub@gmail.com, adam.ondrejka@gmail.com
+</td></tr><tr><td>2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a</td><td>Hierarchical Video Generation from Orthogonal
+<br/>Information: Optical Flow and Texture
+<br/><b>The University of Tokyo</b><br/><b>The University of Tokyo</b><br/><b>The University of Tokyo</b><br/><b>The University of Tokyo / RIKEN</b></td><td>('8197937', 'Katsunori Ohnishi', 'katsunori ohnishi')<br/>('48333400', 'Shohei Yamamoto', 'shohei yamamoto')<br/>('3250559', 'Yoshitaka Ushiku', 'yoshitaka ushiku')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td>ohnishi@mi.t.u-tokyo.ac.jp
+<br/>yamamoto@mi.t.u-tokyo.ac.jp
+<br/>ushiku@mi.t.u-tokyo.ac.jp
+<br/>harada@mi.t.u-tokyo.ac.jp
+</td></tr><tr><td>2f88d3189723669f957d83ad542ac5c2341c37a5</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Attribute-correlatedlocalregionsfordeeprelativeattributeslearningFenZhangXiangweiKongZeJiaFenZhang,XiangweiKong,ZeJia,“Attribute-correlatedlocalregionsfordeeprelativeattributeslearning,”J.Electron.Imaging27(4),043021(2018),doi:10.1117/1.JEI.27.4.043021. </td><td></td><td></td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>Names and Faces in the News
+<br/>Computer Science Division
+<br/>U.C. Berkeley
+<br/>Berkeley, CA 94720
+</td><td>('1685538', 'Tamara L. Berg', 'tamara l. berg')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('34497462', 'Jaety Edwards', 'jaety edwards')<br/>('1965929', 'Michael Maire', 'michael maire')<br/>('6714943', 'Ryan White', 'ryan white')</td><td>daf@cs.berkeley.edu
+</td></tr><tr><td>2fa057a20a2b4a4f344988fee0a49fce85b0dc33</td><td></td><td></td><td></td></tr><tr><td>2f8ef26bfecaaa102a55b752860dbb92f1a11dc6</td><td>A Graph Based Approach to Speaker Retrieval in Talk
+<br/>Show Videos with Transcript-Based Supervision
+</td><td>('1859487', 'Yina Han', 'yina han')<br/>('1774346', 'Guizhong Liu', 'guizhong liu')<br/>('1692389', 'Hichem Sahbi', 'hichem sahbi')<br/>('1693574', 'Gérard Chollet', 'gérard chollet')</td><td></td></tr><tr><td>2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd</td><td>Article
+<br/>Robust Face Recognition Using the Deep C2D-CNN
+<br/>Model Based on Decision-Level Fusion
+<br/><b>School of Electronic and Information, Yangtze University, Jingzhou 434023, China</b><br/><b>National Demonstration Center for Experimental Electrical and Electronic Education, Yangtze University</b><br/>Jingzhou 434023, China
+<br/>† These authors contributed equally to this work.
+<br/>Received: 20 May 2018; Accepted: 25 June 2018; Published: 28 June 2018
+</td><td>('1723081', 'Jing Li', 'jing li')<br/>('48216473', 'Tao Qiu', 'tao qiu')<br/>('41208300', 'Chang Wen', 'chang wen')<br/>('36203475', 'Kai Xie', 'kai xie')</td><td>201501479@yangtzeu.edu.cn (J.L.); 500646@yangtzeu.edu.cn (K.X.); wenfangqing@yangtzeu.edu.cn (F-Q.W.)
+<br/>School of Computer Science, Yangtze University, Jingzhou 434023, China; 201603441@yangtzeu.edu.cn
+<br/>* Correspondence: 400100@yangtzeu.edu.cn; Tel.: +86-136-9731-5482
+</td></tr><tr><td>2f184c6e2c31d23ef083c881de36b9b9b6997ce9</td><td>Polichotomies on Imbalanced Domains
+<br/>by One-per-Class Compensated Reconstruction Rule
+<br/>Integrated Research Centre, Universit´a Campus Bio-Medico of Rome, Rome, Italy
+</td><td>('1720099', 'Paolo Soda', 'paolo soda')</td><td>{r.dambrosio,p.soda}@unicampus.it
+</td></tr><tr><td>2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d</td><td></td><td></td><td></td></tr><tr><td>2f8183b549ec51b67f7dad717f0db6bf342c9d02</td><td></td><td></td><td></td></tr><tr><td>2f13dd8c82f8efb25057de1517746373e05b04c4</td><td>EVALUATION OF STATE-OF-THE-ART ALGORITHMS FOR REMOTE FACE
+<br/>RECOGNITION
+<br/><b>University</b><br/><b>of Maryland, College Park, MD 20742, USA</b></td><td>('38811046', 'Jie Ni', 'jie ni')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475</td><td>A novel approach to personal photo album representation
+<br/>and management
+<br/>Universit`a di Palermo - Dipartimento di Ingegneria Informatica
+<br/>Viale delle Scienze, 90128, Palermo, Italy
+</td><td>('1762753', 'Edoardo Ardizzone', 'edoardo ardizzone')<br/>('9127836', 'Marco La Cascia', 'marco la cascia')<br/>('1698741', 'Filippo Vella', 'filippo vella')</td><td></td></tr><tr><td>2f2406551c693d616a840719ae1e6ea448e2f5d3</td><td>Age Estimation from Face Images:
+<br/>Human vs. Machine Performance
+<br/>Pattern Recognition & Image Processing Laboratory
+<br/><b>Michigan State University</b></td><td>('34393045', 'Hu Han', 'hu han')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>2f882ceaaf110046e63123b495212d7d4e99f33d</td><td>High Frequency Component Compensation based Super-resolution
+<br/>Algorithm for Face Video Enhancement
+<br/>CVRR Lab, UC San Diego, La Jolla, CA 92093, USA
+</td><td>('1807917', 'Junwen Wu', 'junwen wu')</td><td></td></tr><tr><td>2f95340b01cfa48b867f336185e89acfedfa4d92</td><td>Face Expression Recognition with a 2-Channel
+<br/>Convolutional Neural Network
+<br/><b></b><br/>Vogt-K¨olln-Straße 30, 22527 Hamburg, Germany
+<br/>http://www.informatik.uni-hamburg.de/WTM/
+</td><td>('2283866', 'Dennis Hamester', 'dennis hamester')<br/>('1736513', 'Stefan Wermter', 'stefan wermter')</td><td>{hamester,barros,wermter}@informatik.uni-hamburg.de
+</td></tr><tr><td>2f7fc778e3dec2300b4081ba2a1e52f669094fcd</td><td>Action Representation Using Classifier Decision Boundaries
+<br/>3 Fatih Porikli1
+<br/>1Data61/CSIRO,
+<br/>2Australian Centre for Robotic Vision
+<br/><b>The Australian National University, Canberra, Australia</b></td><td>('36541522', 'Jue Wang', 'jue wang')<br/>('2691929', 'Anoop Cherian', 'anoop cherian')<br/>('2377076', 'Stephen Gould', 'stephen gould')</td><td>firstname.lastname@anu.edu.au
+</td></tr><tr><td>2fea258320c50f36408032c05c54ba455d575809</td><td></td><td></td><td></td></tr><tr><td>2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1</td><td>Facial Expression Recognition Using a Hybrid CNN–SIFT Aggregator
+<br/>Mundher Ahmed Al-Shabi
+<br/>Tee Connie
+<br/>Faculty of Information Science and Technology (FIST)
+<br/><b>Multimedia University</b><br/>Melaka, Malaysia
+</td><td>('1700590', 'Wooi Ping Cheah', 'wooi ping cheah')</td><td></td></tr><tr><td>2faa09413162b0a7629db93fbb27eda5aeac54ca</td><td>NISTIR 7674
+<br/>Quantifying How Lighting and Focus
+<br/>Affect Face Recognition Performance
+<br/>Phillips, P. J.
+<br/>Beveridge, J. R.
+<br/>Draper, B.
+<br/>Bolme, D.
+<br/>Givens, G. H.
+<br/>Lui, Y. M.
+<br/>1
+</td><td></td><td></td></tr><tr><td>2f5e057e35a97278a9d824545d7196c301072ebf</td><td>Capturing long-tail distributions of object subcategories
+<br/><b>University of California, Irvine</b><br/>Google Inc.
+<br/><b>University of California, Irvine</b></td><td>('32542103', 'Xiangxin Zhu', 'xiangxin zhu')<br/>('1838674', 'Dragomir Anguelov', 'dragomir anguelov')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>xzhu@ics.uci.edu
+<br/>dragomir@google.com
+<br/>dramanan@ics.uci.edu
+</td></tr><tr><td>2f04ba0f74df046b0080ca78e56898bd4847898b</td><td>Aggregate Channel Features for Multi-view Face Detection
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, China</b></td><td>('1716231', 'Bin Yang', 'bin yang')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jjyan,zlei,szli}@nlpr.ia.ac.cn
+<br/>yb.derek@gmail.com
+</td></tr><tr><td>433bb1eaa3751519c2e5f17f47f8532322abbe6d</td><td></td><td></td><td></td></tr><tr><td>4300fa1221beb9dc81a496cd2f645c990a7ede53</td><td></td><td></td><td></td></tr><tr><td>43010792bf5cdb536a95fba16b8841c534ded316</td><td>Towards General Motion-Based Face Recognition
+<br/><b>School of Computing, National University of Singapore, Singapore</b></td><td>('2268503', 'Ning Ye', 'ning ye')<br/>('1715286', 'Terence Sim', 'terence sim')</td><td>{yening,tsim}@comp.nus.edu.sg
+</td></tr><tr><td>43bb20ccfda7b111850743a80a5929792cb031f0</td><td>PhD Dissertation
+<br/>International Doctorate School in Information and
+<br/>Communication Technologies
+<br/><b>DISI - University of Trento</b><br/>Discrimination of Computer Generated
+<br/>versus Natural Human Faces
+<br/>Advisor:
+<br/>Prof. Giulia Boato
+<br/>Universit`a degli Studi di Trento
+<br/>Co-Advisor:
+<br/>Prof. Francesco G. B. De Natale
+<br/>Universit`a degli Studi di Trento
+<br/>February 2014
+</td><td>('2598811', 'Duc-Tien Dang-Nguyen', 'duc-tien dang-nguyen')</td><td></td></tr><tr><td>438c4b320b9a94a939af21061b4502f4a86960e3</td><td>Reconstruction-Based Disentanglement for Pose-invariant Face Recognition
+<br/><b>Rutgers, The State University of New Jersey</b><br/><b>University of California, San Diego</b><br/>‡ NEC Laboratories America
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>{xipeng.cs, dnm}@rutgers.edu, {xiangyu,ksohn,manu}@nec-labs.com
+</td></tr><tr><td>439ac8edfa1e7cbc65474cab544a5b8c4c65d5db</td><td>SIViP (2011) 5:401–413
+<br/>DOI 10.1007/s11760-011-0244-6
+<br/>ORIGINAL PAPER
+<br/>Face authentication with undercontrolled pose and illumination
+<br/>Received: 15 September 2010 / Revised: 14 December 2010 / Accepted: 17 February 2011 / Published online: 7 August 2011
+<br/>© Springer-Verlag London Limited 2011
+</td><td>('1763890', 'Maria De Marsico', 'maria de marsico')</td><td></td></tr><tr><td>43f6953804964037ff91a4f45d5b5d2f8edfe4d5</td><td>Multi-Feature Fusion in Advanced Robotics Applications
+<br/>Institut für Informatik
+<br/>Technische Universität München
+<br/>D-85748 Garching, Germany
+</td><td>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('1685773', 'Christoph Mayer', 'christoph mayer')<br/>('1746229', 'Michael Beetz', 'michael beetz')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td>{riaz,mayerc,beetz,radig}@in.tum.de
+</td></tr><tr><td>439ec47725ae4a3660e509d32828599a495559bf</td><td>Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+<br/>and Evaluation
+</td><td></td><td></td></tr><tr><td>43e99b76ca8e31765d4571d609679a689afdc99e</td><td>Learning Dense Facial Correspondences in Unconstrained Images
+<br/><b>University of Southern California</b><br/>2Adobe Research
+<br/>3Pinscreen
+<br/><b>USC Institute for Creative Technologies</b></td><td>('9965153', 'Ronald Yu', 'ronald yu')<br/>('2059597', 'Shunsuke Saito', 'shunsuke saito')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('39686979', 'Duygu Ceylan', 'duygu ceylan')<br/>('1706574', 'Hao Li', 'hao li')</td><td></td></tr><tr><td>4377b03bbee1f2cf99950019a8d4111f8de9c34a</td><td>Selective Encoding for Recognizing Unreliably Localized Faces
+<br/><b>Institute for Advanced Computer Studies</b><br/><b>University of Maryland, College Park, MD</b></td><td>('40592297', 'Ang Li', 'ang li')<br/>('2852035', 'Vlad I. Morariu', 'vlad i. morariu')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{angli, morariu, lsd}@umiacs.umd.edu
+</td></tr><tr><td>43a03cbe8b704f31046a5aba05153eb3d6de4142</td><td>Towards Robust Face Recognition from Video
+<br/>Image Science and Machine Vision Group
+<br/>Oak Ridge National Laboratory
+<br/>Oak Ridge, TN 37831-6010
+</td><td>('3211433', 'Jeffery R. Price', 'jeffery r. price')<br/>('2743462', 'Timothy F. Gee', 'timothy f. gee')</td><td>{pricejr, geetf}@ornl.gov
+</td></tr><tr><td>434bf475addfb580707208618f99c8be0c55cf95</td><td>UNDER CONSIDERATION FOR PUBLICATION IN PATTERN RECOGNITION LETTERS
+<br/>DeXpression: Deep Convolutional Neural
+<br/>Network for Expression Recognition
+<br/><b>German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany</b><br/><b>University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany</b></td><td>('20651567', 'Peter Burkert', 'peter burkert')<br/>('3026604', 'Felix Trier', 'felix trier')<br/>('6149779', 'Muhammad Zeshan Afzal', 'muhammad zeshan afzal')<br/>('1703343', 'Andreas Dengel', 'andreas dengel')<br/>('1743758', 'Marcus Liwicki', 'marcus liwicki')</td><td>p burkert11@cs.uni-kl.de, f
+<br/>trier10@cs.uni-kl.de, afzal@iupr.com, andreas.dengel@dfki.de,
+<br/>liwicki@dfki.uni-kl.de
+</td></tr><tr><td>43836d69f00275ba2f3d135f0ca9cf88d1209a87</td><td>Ozaki et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:20
+<br/>DOI 10.1186/s41074-017-0030-7
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>RESEARCH PAPER
+<br/>Open Access
+<br/>Effective hyperparameter optimization
+<br/>using Nelder-Mead method in deep learning
+</td><td>('2167404', 'Yoshihiko Ozaki', 'yoshihiko ozaki')<br/>('30735847', 'Masaki Yano', 'masaki yano')<br/>('1703823', 'Masaki Onishi', 'masaki onishi')</td><td></td></tr><tr><td>4307e8f33f9e6c07c8fc2aeafc30b22836649d8c</td><td>Supervised Earth Mover’s Distance Learning
+<br/>and its Computer Vision Applications
+<br/><b>Stanford University, CA, United States</b></td><td>('1716453', 'Fan Wang', 'fan wang')<br/>('1744254', 'Leonidas J. Guibas', 'leonidas j. guibas')</td><td></td></tr><tr><td>435642641312364e45f4989fac0901b205c49d53</td><td>Face Model Compression
+<br/>by Distilling Knowledge from Neurons
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('1693209', 'Ping Luo', 'ping luo')<br/>('2042558', 'Zhenyao Zhu', 'zhenyao zhu')<br/>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{pluo,zz012,lz013,xtang}@ie.cuhk.edu.hk, {xgwang}@ee.cuhk.edu.hk
+</td></tr><tr><td>43aa40eaa59244c233f83d81f86e12eba8d74b59</td><td></td><td></td><td></td></tr><tr><td>4362368dae29cc66a47114d5ffeaf0534bf0159c</td><td>UACEE International Journal of Artificial Intelligence and Neural Networks ISSN:- 2250-3749 (online)
+<br/>Performance Analysis of FDA Based Face
+<br/>Recognition Using Correlation, ANN and SVM
+<br/>Department of Computer Engineering
+<br/>Department of Computer Engineering
+<br/>Department of Computer Engineering
+<br/>Anand, INDIA
+<br/>Anand, INDIA
+<br/>Anand, INDIA
+</td><td>('9318822', 'Mahesh Goyani', 'mahesh goyani')<br/>('40632096', 'Ronak Paun', 'ronak paun')<br/>('40803051', 'Sardar Patel', 'sardar patel')<br/>('40803051', 'Sardar Patel', 'sardar patel')<br/>('40803051', 'Sardar Patel', 'sardar patel')</td><td>e- mail : mgoyani@gmail.com
+<br/>e- mail : akashdhorajiya@gmail.com
+<br/>e- mail : ronak_paun@yahoo.com
+</td></tr><tr><td>43e268c118ac25f1f0e984b57bc54f0119ded520</td><td></td><td></td><td></td></tr><tr><td>4350bb360797a4ade4faf616ed2ac8e27315968e</td><td><b>MITSUBISHI ELECTRIC RESEARCH LABORATORIES</b><br/>http://www.merl.com
+<br/>Edge Suppression by Gradient Field
+<br/>Transformation using Cross-Projection
+<br/>Tensors
+<br/>TR2006-058
+<br/>June 2006
+</td><td>('1717566', 'Ramesh Raskar', 'ramesh raskar')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>43476cbf2a109f8381b398e7a1ddd794b29a9a16</td><td>A Practical Transfer Learning Algorithm for Face Verification
+<br/>David Wipf
+</td><td>('2032273', 'Xudong Cao', 'xudong cao')<br/>('1716835', 'Fang Wen', 'fang wen')<br/>('3168114', 'Genquan Duan', 'genquan duan')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>{xudongca,davidwip,fangwen,genduan,jiansun}@microsoft.com
+</td></tr><tr><td>4353d0dcaf450743e9eddd2aeedee4d01a1be78b</td><td>Learning Discriminative LBP-Histogram Bins
+<br/>for Facial Expression Recognition
+<br/>Philips Research, High Tech Campus 36, Eindhoven 5656 AE, The Netherlands
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('3006670', 'Tommaso Gritti', 'tommaso gritti')</td><td>{caifeng.shan, tommaso.gritti}@philips.com
+</td></tr><tr><td>437a720c6f6fc1959ba95e48e487eb3767b4e508</td><td></td><td></td><td></td></tr><tr><td>436d80cc1b52365ed7b2477c0b385b6fbbb51d3b</td><td></td><td></td><td></td></tr><tr><td>434d6726229c0f556841fad20391c18316806f73</td><td>Detecting Visual Relationships with Deep Relational Networks
+<br/><b>The Chinese University of Hong Kong</b></td><td>('38222190', 'Bo Dai', 'bo dai')<br/>('2617419', 'Yuqi Zhang', 'yuqi zhang')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td>db014@ie.cuhk.edu.hk
+<br/>zy016@ie.cuhk.edu.hk
+<br/>dhlin@ie.cuhk.edu.hk
+</td></tr><tr><td>43b8b5eeb4869372ef896ca2d1e6010552cdc4d4</td><td>Large-scale Supervised Hierarchical Feature Learning for Face Recognition
+<br/>Intel Labs China
+</td><td>('35423937', 'Jianguo Li', 'jianguo li')<br/>('6060281', 'Yurong Chen', 'yurong chen')</td><td></td></tr><tr><td>43ae4867d058453e9abce760ff0f9427789bab3a</td><td>951
+<br/>Graph Embedded Nonparametric Mutual
+<br/>Information For Supervised
+<br/>Dimensionality Reduction
+</td><td>('2784463', 'Dimitrios Bouzas', 'dimitrios bouzas')<br/>('2965236', 'Nikolaos Arvanitopoulos', 'nikolaos arvanitopoulos')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')</td><td></td></tr><tr><td>435dc062d565ce87c6c20a5f49430eb9a4b573c4</td><td>to appear.
+<br/>Lighting Condition Adaptation
+<br/>for Perceived Age Estimation
+<br/>NEC Soft, Ltd., Japan
+<br/><b>Tokyo Institute of Technology, Japan</b><br/>NEC Soft, Ltd., Japan
+</td><td>('2163491', 'Kazuya Ueki', 'kazuya ueki')<br/>('1719221', 'Masashi Sugiyama', 'masashi sugiyama')<br/>('1853974', 'Yasuyuki Ihara', 'yasuyuki ihara')</td><td></td></tr><tr><td>430c4d7ad76e51d83bbd7ec9d3f856043f054915</td><td></td><td></td><td></td></tr><tr><td>438b88fe40a6f9b5dcf08e64e27b2719940995e0</td><td>Building a Classi(cid:2)cation Cascade for Visual Identi(cid:2)cation from One Example
+<br/>Computer Science, U.C. Berkeley
+<br/>Computer Science, UMass Amherst
+<br/>Computer Science, U.C. Berkeley
+</td><td>('3236352', 'Andras Ferencz', 'andras ferencz')<br/>('1714536', 'Erik G. Learned-Miller', 'erik g. learned-miller')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td>ferencz@cs.berkeley.edu
+<br/>elm@cs.umass.edu
+<br/>malik@cs.berkeley.edu
+</td></tr><tr><td>433a6d6d2a3ed8a6502982dccc992f91d665b9b3</td><td>Transferring Landmark Annotations for
+<br/>Cross-Dataset Face Alignment
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Tsinghua University</b></td><td>('2226254', 'Shizhan Zhu', 'shizhan zhu')<br/>('40475617', 'Cheng Li', 'cheng li')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>438e7999c937b94f0f6384dbeaa3febff6d283b6</td><td>Face Detection, Bounding Box Aggregation and Pose Estimation for Robust
+<br/>Facial Landmark Localisation in the Wild
+<br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</b><br/><b>School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China</b></td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>{z.feng, j.kittler, m.a.rana, p.huber}@surrey.ac.uk, wu xiaojun@jiangnan.edu.cn
+</td></tr><tr><td>43776d1bfa531e66d5e9826ff5529345b792def7</td><td>Automatic Critical Event Extraction
+<br/>and Semantic Interpretation
+<br/>by Looking-Inside
+<br/>Laboratory for Intelligent and Safe Automobiles
+<br/><b>University of California, San Diego</b><br/>Sept 17th, 2015
+</td><td>('1841835', 'Sujitha Martin', 'sujitha martin')<br/>('1802326', 'Eshed Ohn-Bar', 'eshed ohn-bar')<br/>('1713989', 'Mohan M. Trivedi', 'mohan m. trivedi')</td><td></td></tr><tr><td>43fb9efa79178cb6f481387b7c6e9b0ca3761da8</td><td>Mixture of Parts Revisited: Expressive Part Interactions for Pose Estimation
+<br/>Anoop R Katti
+<br/>IIT Madras
+<br/>Chennai, India
+<br/>IIT Madras
+<br/>Chennai, India
+</td><td>('1717115', 'Anurag Mittal', 'anurag mittal')</td><td>akatti@cse.iitm.ac.in
+<br/>amittal@cse.iitm.ac.in
+</td></tr><tr><td>432d8cba544bf7b09b0455561fea098177a85db1</td><td>Published as a conference paper at ICLR 2017
+<br/>TOWARDS A NEURAL STATISTICIAN
+<br/>Harrison Edwards
+<br/>School of Informatics
+<br/><b>University of Edinburgh</b><br/>Edinburgh, UK
+<br/>Amos Storkey
+<br/>School of Informatics
+<br/><b>University of Edinburgh</b><br/>Edinburgh, UK
+</td><td></td><td>H.L.Edwards@sms.ed.ac.uk
+<br/>A.Storkey@ed.ac.uk
+</td></tr><tr><td>43ed518e466ff13118385f4e5d039ae4d1c000fb</td><td>Classification of Occluded Objects using Fast Recurrent
+<br/>Processing
+<br/>Ozgur Yilmaza,∗
+<br/><b>aTurgut Ozal University, Ankara Turkey</b></td><td></td><td></td></tr><tr><td>439647914236431c858535a2354988dde042ef4d</td><td>Face Illumination Normalization on Large and Small Scale Features
+<br/><b>School of Mathematics and Computational Science, Sun Yat-sen University, China</b><br/><b>School of Information Science and Technology, Sun Yat-sen University, China</b><br/>3 Guangdong Province Key Laboratory of Information Security, China,
+<br/><b>Hong Kong Baptist University</b></td><td>('2002129', 'Xiaohua Xie', 'xiaohua xie')<br/>('3333315', 'Wei-Shi Zheng', 'wei-shi zheng')<br/>('1768574', 'Pong C. Yuen', 'pong c. yuen')</td><td>Email: sysuxiexh@gmail.com, wszheng@ieee.org, stsljh@mail.sysu.edu.cn, pcyuen@comp.hkbu.edu.hk
+</td></tr><tr><td>43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a</td><td>Pobrane z czasopisma Annales AI- Informatica http://ai.annales.umcs.pl
+<br/>Data: 04/05/2018 16:53:32
+<br/>U M CS
+</td><td></td><td></td></tr><tr><td>439ca6ded75dffa5ddea203dde5e621dc4a88c3e</td><td>Robust Real-time Performance-driven 3D Face Tracking
+<br/><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore</b><br/><b>Rutgers University, USA</b></td><td>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')<br/>('1775268', 'Tat-Jen Cham', 'tat-jen cham')</td><td>{hxp1,vladimir}@cs.rutgers.edu
+<br/>{asjfcai,astfcham}@ntu.edu.sg
+</td></tr><tr><td>88e090ffc1f75eed720b5afb167523eb2e316f7f</td><td>Attribute-Based Transfer Learning for Object
+<br/>Categorization with Zero/One Training Example
+<br/><b>University of Maryland, College Park, MD, USA</b></td><td>('3099583', 'Xiaodong Yu', 'xiaodong yu')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td>xdyu@umiacs.umd.edu, yiannis@cs.umd.edu
+</td></tr><tr><td>8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4</td><td>AUTOMATIC FACIAL EXPRESSION RECOGNITION FOR AFFECTIVE COMPUTING
+<br/>BASED ON BAG OF DISTANCES
+<br/><b>National Chung Cheng University, Chiayi, Taiwan, R.O.C</b><br/>E-mail: {hfs95p,wylin}cs.ccu.edu.tw
+<br/><b>National Taichung University of Science and Technology, Taichung, Taiwan, R.O.C</b></td><td>('2240934', 'Fu-Song Hsu', 'fu-song hsu')<br/>('1682393', 'Wei-Yang Lin', 'wei-yang lin')<br/>('2080026', 'Tzu-Wei Tsai', 'tzu-wei tsai')</td><td>E-mail: wei@nutc.edu.tw
+</td></tr><tr><td>88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320</td><td>Image patch modeling in a light field
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2014-81
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-81.html
+<br/>May 15, 2014
+</td><td>('2040369', 'Zeyu Li', 'zeyu li')</td><td></td></tr><tr><td>889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7</td><td>174
+<br/>Using Support Vector Machines to Enhance the
+<br/>Performance of Bayesian Face Recognition
+</td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>88a898592b4c1dfd707f04f09ca58ec769a257de</td><td>MobileFace: 3D Face Reconstruction
+<br/>with Efficient CNN Regression
+<br/>1 VisionLabs, Amsterdam, The Netherlands
+<br/>2 Inria, WILLOW, Departement d’Informatique de l’Ecole Normale Superieure, PSL
+<br/><b>Research University, ENS/INRIA/CNRS UMR 8548, Paris, France</b></td><td>('51318557', 'Nikolai Chinaev', 'nikolai chinaev')<br/>('2564281', 'Alexander Chigorin', 'alexander chigorin')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')</td><td>{n.chinaev, a.chigorin}@visionlabs.ru
+<br/>ivan.laptev@inria.fr
+</td></tr><tr><td>88f7a3d6f0521803ca59fde45601e94c3a34a403</td><td>Semantic Aware Video Transcription
+<br/>Using Random Forest Classifiers
+<br/><b>University of Southern California, Institute for Robotics and Intelligent Systems</b><br/>Los Angeles, CA 90089, USA
+</td><td>('1726241', 'Chen Sun', 'chen sun')</td><td></td></tr><tr><td>8812aef6bdac056b00525f0642702ecf8d57790b</td><td>A Unified Features Approach to Human Face Image
+<br/>Analysis and Interpretation
+<br/>Department of Informatics,
+<br/>Technische Universit¨at M¨unchen
+<br/>85748 Garching, Germany
+</td><td>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('2110952', 'Suat Gedikli', 'suat gedikli')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td>{riaz|gedikli|beetz|radig}@in.tum.de
+</td></tr><tr><td>881066ec43bcf7476479a4146568414e419da804</td><td>From Traditional to Modern : Domain Adaptation for
+<br/>Action Classification in Short Social Video Clips
+<br/>Center for Visual Information Technology, IIIT Hyderabad, India
+</td><td>('2461059', 'Aditya Singh', 'aditya singh')<br/>('3448416', 'Saurabh Saini', 'saurabh saini')<br/>('1962817', 'Rajvi Shah', 'rajvi shah')</td><td></td></tr><tr><td>8813368c6c14552539137aba2b6f8c55f561b75f</td><td>Trunk-Branch Ensemble Convolutional Neural
+<br/>Networks for Video-based Face Recognition
+</td><td>('37990555', 'Changxing Ding', 'changxing ding')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>88e2574af83db7281c2064e5194c7d5dfa649846</td><td>Hindawi Publishing Corporation
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2017, Article ID 4579398, 11 pages
+<br/>http://dx.doi.org/10.1155/2017/4579398
+<br/>Research Article
+<br/>A Robust Shape Reconstruction Method for Facial Feature
+<br/>Point Detection
+<br/><b>School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave</b><br/>West Hi-Tech Zone, Chengdu 611731, China
+<br/>Received 24 October 2016; Revised 18 January 2017; Accepted 30 January 2017; Published 19 February 2017
+<br/>Academic Editor: Ezequiel L´opez-Rubio
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Facial feature point detection has been receiving great research advances in recent years. Numerous methods have been developed
+<br/>and applied in practical face analysis systems. However, it is still a quite challenging task because of the large variability in expression
+<br/>and gestures and the existence of occlusions in real-world photo shoot. In this paper, we present a robust sparse reconstruction
+<br/>method for the face alignment problems. Instead of a direct regression between the feature space and the shape space, the concept
+<br/>of shape increment reconstruction is introduced. Moreover, a set of coupled overcomplete dictionaries termed the shape increment
+<br/>dictionary and the local appearance dictionary are learned in a regressive manner to select robust features and fit shape increments.
+<br/>Additionally, to make the learned model more generalized, we select the best matched parameter set through extensive validation
+<br/>tests. Experimental results on three public datasets demonstrate that the proposed method achieves a better robustness over the
+<br/>state-of-the-art methods.
+<br/>1. Introduction
+<br/>In most literatures, facial feature points are also referred to
+<br/>facial landmarks or facial fiducial points. These points mainly
+<br/>locate around edges or corners of facial components such as
+<br/>eyebrows, eyes, mouth, nose, and jaw (see Figure 1). Existing
+<br/>databases for method comparison are labeled with different
+<br/>number of feature points, varying from the minimum 5-point
+<br/>configuration [1] to the maximal 194-point configuration
+<br/>[2]. Generally facial feature point detection is a supervised
+<br/>or semisupervised learning process that trains model on
+<br/>a large number of labeled facial images. It starts from a
+<br/>face detection process and then predicts facial landmarks
+<br/>inside the detected face bounding box. The localized facial
+<br/>feature points can be utilized for various face analysis
+<br/>tasks, for example, face recognition [3], facial animation
+<br/>[4], facial expression detection [5], and head pose tracking
+<br/>[6].
+<br/>In recent years, regression-based methods have gained
+<br/>increasing attention for robust facial feature point detection.
+<br/>Among these methods, a cascade framework is adopted to
+<br/>recursively estimate the face shape 𝑆 of an input image,
+<br/>which is the concatenation of facial feature point coordinates.
+<br/>Beginning with an initial shape 𝑆(1), 𝑆 is updated by inferring
+<br/>a shape increment Δ𝑆 from the previous shape:
+<br/>Δ𝑆(𝑡) = 𝑊(𝑡)Φ(𝑡) (𝐼, 𝑆(𝑡)) ,
+<br/>(1)
+<br/>where Δ𝑆(𝑡) and 𝑊(𝑡) are the shape increment and linear
+<br/>regression matrix after 𝑡 iterations, respectively. As the input
+<br/>variable of the mapping function Φ(𝑡), 𝐼 denotes the image
+<br/>appearance and 𝑆(𝑡) denotes the corresponding face shape.
+<br/>The regression goes to the next iteration by the additive
+<br/>formula:
+<br/>𝑆(𝑡) = 𝑆(𝑡−1) + Δ𝑆(𝑡−1).
+<br/>(2)
+<br/>In this paper, we propose a sparse reconstruction method
+<br/>that embeds sparse coding in the reconstruction of shape
+<br/>increment. As a very popular signal coding algorithm, sparse
+<br/>coding has been recently successfully applied to the fields
+<br/>of computer vision and machine learning, such as feature
+<br/>selection and clustering analysis, image classification, and
+<br/>face recognition [7–11]. In our method, sparse overcomplete
+<br/>dictionaries are learned to encode various facial poses and
+<br/>local textures considering the complex nature of imaging
+</td><td>('9684590', 'Shuqiu Tan', 'shuqiu tan')<br/>('2915473', 'Dongyi Chen', 'dongyi chen')<br/>('9486108', 'Chenggang Guo', 'chenggang guo')<br/>('2122143', 'Zhiqi Huang', 'zhiqi huang')<br/>('9684590', 'Shuqiu Tan', 'shuqiu tan')</td><td>Correspondence should be addressed to Shuqiu Tan; tanshuqiu123136@hotmail.com and Dongyi Chen; dychen@uestc.edu.cn
+</td></tr><tr><td>88bef50410cea3c749c61ed68808fcff84840c37</td><td>Sparse Representations of Image Gradient Orientations for Visual Recognition
+<br/>and Tracking
+<br/><b>Imperial College London</b><br/><b>EEMCS, University of Twente</b><br/>180 Queen’s Gate, London SW7 2AZ, U.K.
+<br/>Drienerlolaan 5, 7522 NB Enschede,
+<br/>The Netherlands
+</td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{gt204,s.zafeiriou,m.pantic}@imperial.ac.uk
+<br/>PanticM@cs.utwente.nl
+</td></tr><tr><td>883006c0f76cf348a5f8339bfcb649a3e46e2690</td><td>Weakly Supervised Pain Localization using Multiple Instance Learning
+</td><td>('39707211', 'Karan Sikka', 'karan sikka')<br/>('1735697', 'Abhinav Dhall', 'abhinav dhall')</td><td></td></tr><tr><td>88850b73449973a34fefe491f8836293fc208580</td><td>www.ijaret.org Vol. 2, Issue I, Jan. 2014
+<br/> ISSN 2320-6802
+<br/>INTERNATIONAL JOURNAL FOR ADVANCE RESEARCH IN
+<br/>ENGINEERING AND TECHNOLOGY
+<br/>WINGS TO YOUR THOUGHTS…..
+<br/>XBeats-An Emotion Based Music Player
+<br/>1U.G. Student, Dept. of Computer Engineering,
+<br/><b>D.J. Sanghvi College of Engineering</b><br/>Vile Parle (W), Mumbai-400056.
+<br/>2 U.G. Student, Dept. of Computer Engineering,
+<br/><b>D.J. Sanghvi College of Engineering</b><br/>Vile Parle (W), Mumbai-400056.
+<br/>3 U.G. Student, Dept. of Computer Engineering,
+<br/><b>D.J. Sanghvi College of Engineering</b><br/>Vile Parle (W), Mumbai-400056.
+<br/>4 Assistant Professor, Dept. of Computer Engineering,
+<br/><b>D.J. Sanghvi College of Engineering</b><br/>Vile Parle (W), Mumbai-400056.
+</td><td>('40770722', 'Sayali Chavan', 'sayali chavan')<br/>('2122358', 'Dipali Bhatt', 'dipali bhatt')</td><td>sayalichavan17@gmail.com
+<br/>ekta.malkan27@yahoo.in
+<br/>dipupb1392@gmail.com
+<br/>prakashparanjape2012@gmail.com
+</td></tr><tr><td>8820d1d3fa73cde623662d92ecf2e3faf1e3f328</td><td>Continuous Video to Simple Signals for Swimming Stroke Detection with
+<br/>Convolutional Neural Networks
+<br/><b>La Trobe University, Australia</b><br/><b>Australian Institute of Sport</b></td><td>('38689120', 'Brandon Victor', 'brandon victor')<br/>('1787185', 'Zhen He', 'zhen he')<br/>('31548192', 'Stuart Morgan', 'stuart morgan')<br/>('2874225', 'Dino Miniutti', 'dino miniutti')</td><td>{b.victor,z.he,s.morgan}@latrobe.edu.au
+<br/>Dino.Miniutti@ausport.gov.au
+</td></tr><tr><td>88f2952535df5859c8f60026f08b71976f8e19ec</td><td>A neural network framework for face
+<br/>recognition by elastic bunch graph matching
+</td><td>('37048377', 'Francisco A. Pujol López', 'francisco a. pujol lópez')<br/>('3144590', 'Higinio Mora Mora', 'higinio mora mora')<br/>('2260459', 'José A. Girona Selva', 'josé a. girona selva')</td><td></td></tr><tr><td>8818b12aa0ff3bf0b20f9caa250395cbea0e8769</td><td>Fashion Conversation Data on Instagram
+<br/>∗Graduate School of Culture Technology, KAIST, South Korea
+<br/>†Department of Communication Studies, UCLA, USA
+</td><td>('3459091', 'Yu-i Ha', 'yu-i ha')<br/>('2399803', 'Sejeong Kwon', 'sejeong kwon')<br/>('1775511', 'Meeyoung Cha', 'meeyoung cha')<br/>('1834047', 'Jungseock Joo', 'jungseock joo')</td><td></td></tr><tr><td>8862a573a42bbaedd392e9e634c1ccbfd177a01d</td><td>3D Face Tracking and Texture Fusion in the Wild
+<br/>Centre for Vision, Speech and Signal Processing
+<br/>Image Understanding and Interactive Robotics
+<br/><b>University of Surrey</b><br/>Guildford, GU2 7XH, United Kingdom
+<br/>Contact: http://www.patrikhuber.ch
+<br/><b>Reutlingen University</b><br/>D-72762 Reutlingen, Germany
+</td><td>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('16764402', 'Philipp Kopp', 'philipp kopp')</td><td></td></tr><tr><td>887b7676a4efde616d13f38fcbfe322a791d1413</td><td>Deep Temporal Appearance-Geometry Network
+<br/>for Facial Expression Recognition
+<br/><b>Korea Advanced Institute of Science and Technology</b><br/><b>Electronics and Telecommunications Research Institute</b></td><td>('8271137', 'Injae Lee', 'injae lee')<br/>('1769295', 'Junmo Kim', 'junmo kim')<br/>('1800903', 'Heechul Jung', 'heechul jung')</td><td>{heechul, haeng, sunny0414, junmo.kim}@kaist.ac.kr†, {ninja, hyun}@etri.re.kr‡
+</td></tr><tr><td>8878871ec2763f912102eeaff4b5a2febfc22fbe</td><td>3781
+<br/>Human Action Recognition in Unconstrained
+<br/>Videos by Explicit Motion Modeling
+</td><td>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('9227981', 'Qi Dai', 'qi dai')<br/>('39059457', 'Wei Liu', 'wei liu')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')<br/>('1751681', 'Chong-Wah Ngo', 'chong-wah ngo')</td><td></td></tr><tr><td>8855d6161d7e5b35f6c59e15b94db9fa5bbf2912</td><td>COGNITION IN PREGNANCY AND THE POSTPARTUM PERIOD
+</td><td></td><td></td></tr><tr><td>8895d6ae9f095a8413f663cc83f5b7634b3dc805</td><td>BEHL ET AL: INCREMENTAL TUBE CONSTRUCTION FOR HUMAN ACTION DETECTION 1
+<br/>Incremental Tube Construction for Human
+<br/>Action Detection
+<br/>Harkirat Singh Behl1
+<br/>1 Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Oxford, UK
+<br/>2 Think Tank Team
+<br/>Samsung Research America
+<br/>Mountain View, CA
+<br/>3 Dept. of Computing and
+<br/>Communication Technologies
+<br/><b>Oxford Brookes University</b><br/>Oxford, UK
+<br/>(a) Illustrative results on a video sequence from the LIRIS-HARL dataset [23]. Two people enter a room
+<br/>Figure 1:
+<br/>and put/take an object from a box (frame 150). They then shake hands (frame 175) and start having a discussion
+<br/>(frame 350). In frame 450, another person enters the room, shakes hands, and then joins the discussion. Each
+<br/>action tube instance is numbered and coloured according to its action category. We selected this video to show that
+<br/>our tube construction algorithm can handle very complex situations in which multiple distinct action categories
+<br/>occur in sequence and at concurrent times. (b) Action tubes drawn as viewed from above, compared to (c) the
+<br/>ground truth action tubes.
+</td><td>('3019396', 'Michael Sapienza', 'michael sapienza')<br/>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('49348905', 'Suman Saha', 'suman saha')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')<br/>('1730268', 'Philip H. S. Torr', 'philip h. s. torr')</td><td>harkirat@robots.ox.ac.uk
+<br/>m.sapienza@samsung.com
+<br/>gurkirt.singh-2015@brookes.ac.uk
+<br/>suman.saha-2014@brookes.ac.uk
+<br/>fabio.cuzzolin@brookes.ac.uk
+<br/>phst@robots.ox.ac.uk
+</td></tr><tr><td>88bee9733e96958444dc9e6bef191baba4fa6efa</td><td>Extending Face Identification to
+<br/>Open-Set Face Recognition
+<br/>Department of Computer Science
+<br/>Universidade Federal de Minas Gerais
+<br/>Belo Horizonte, Brazil
+</td><td>('2823797', 'Cassio E. dos Santos', 'cassio e. dos santos')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')</td><td>{cass,william}@dcc.ufmg.br
+</td></tr><tr><td>88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2697
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>887745c282edf9af40d38425d5fdc9b3fe139c08</td><td>FAME:
+<br/>Face Association through Model Evolution
+<br/><b>Bilkent University</b><br/>06800 Ankara/Turkey
+<br/>Pinar Duygulu
+<br/><b>Bilkent University</b><br/>06800 Ankara/Turkey
+</td><td>('2540074', 'Eren Golge', 'eren golge')</td><td>eren.golge@bilkent.edu.tr
+<br/>pinar.duygulu@gmail.com
+</td></tr><tr><td>9f6d04ce617d24c8001a9a31f11a594bd6fe3510</td><td>Personality and Individual Differences 52 (2012) 61–66
+<br/>Contents lists available at SciVerse ScienceDirect
+<br/>Personality and Individual Differences
+<br/>j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / p a i d
+<br/>Attentional bias towards angry faces in trait-reappraisal
+<br/><b>1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R</b><br/>a r t i c l e
+<br/>i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 31 May 2011
+<br/>Received in revised form 26 August 2011
+<br/>Accepted 31 August 2011
+<br/>Available online 2 October 2011
+<br/>Keywords:
+<br/>Trait emotion regulation
+<br/>Reappraisal
+<br/>Attention
+<br/>Individual differences
+<br/>Dot-probe
+<br/>Emotion regulation (ER) strategies differ in when and how they influence emotion experience, expres-
+<br/>sion, and concomitant cognition. However, no study to date has directly compared cognition in individ-
+<br/>uals who have a clear disposition for either cognitive or behavioural ER strategies. The present study
+<br/>compared selective attention to angry faces in groups of high trait-suppressors (people who are hiding
+<br/>emotional reactions in response to emotional challenge) and high trait-reappraisers (people who cogni-
+<br/>tively reinterpret emotional events). Since reappraisers are also low trait-anxious and suppressors are
+<br/>high trait-anxious, high and low anxious control groups, both being low in trait-ER, were also included.
+<br/>Attention to angry faces was assessed using an emotional dot-probe task. Trait-reappraisers and high-
+<br/>anxious individuals both showed attentional biases towards angry faces. Trait-reappraisers’ vigilance
+<br/>for angry faces was significantly more pronounced compared to both trait-suppressors and low anxious
+<br/>controls. We suggest that threat prioritization in high trait-reappraisal may allow deeper cognitive pro-
+<br/>cessing of threat information without being associated with psychological maladjustment.
+<br/>Ó 2011 Elsevier Ltd. All rights reserved.
+<br/>1. Introduction
+<br/>An extensive literature suggests that cognition is influenced by
+<br/>the emotional connotation of to-be-processed information. Emo-
+<br/>tional events, especially negative emotional events, orient, attract
+<br/>and/or capture attention more so than neutral events. Evidence
+<br/>comes from studies using the emotional dot-probe paradigm
+<br/>(MacLeod & Mathews, 1988). This task measures selective atten-
+<br/>tion biases towards or away from emotional relative to neutral
+<br/>stimuli (see Methods for details). Several person variables influ-
+<br/>ence such biases. For example, high trait anxious individuals are
+<br/>more likely than low trait anxious individuals to show an atten-
+<br/>tional bias towards threatening stimuli (Frewen, Dozois, Joanisse,
+<br/>& Neufeld, 2008). Interestingly, trait anxiety seems to modify the
+<br/>ability to disengage attentional resources from the location of a
+<br/>threatening stimulus more so than the speed of orienting attention
+<br/>toward the stimulus location. For example, Fox, Russo, Bowles, and
+<br/>Dutton (2001) found that high anxious, but not low anxious indi-
+<br/>viduals responded slower to a dot-probe when an angry face, as
+<br/>opposed to a happy or a neutral face, appeared in a different screen
+<br/>location just prior. However, high anxious participants were not
+<br/>faster to respond to the dot-probe when it followed in the same
+<br/>location as the angry faces compared to happy or neutral faces
+<br/>(attentional orienting). Hence, trait anxiety seems associated with
+<br/><b>University of</b><br/><b>Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel</b><br/>4667; fax: +1 403 282 8249.
+<br/>0191-8869/$ - see front matter Ó 2011 Elsevier Ltd. All rights reserved.
+<br/>doi:10.1016/j.paid.2011.08.030
+<br/>a tendency to dwell on (i.e., difficulty in disengaging attention),
+<br/>rather than to quickly orient toward, threatening stimuli such as
+<br/>angry facial expressions.
+<br/>information,
+<br/>Although it is relatively well-established that individual differ-
+<br/>ences in trait emotionality (i.e., high versus low trait anxiety) influ-
+<br/>ence attentional processing of emotional
+<br/>little is
+<br/>known about how attentional biases may interact with a person’s
+<br/>attempt to modulate their emotional responses. Recent findings
+<br/>in emotion regulation (ER) suggest that emotion regulative strate-
+<br/>gies differ in their consequences for the emotional response and
+<br/>concomitant cognition. To date, most studies of ER have compared
+<br/>cognitive and behavioural forms of ER, with the two most com-
+<br/>monly studied ER strategies being cognitive reappraisal and
+<br/>expressive suppression (Gross, 1998; Richards & Gross, 2000).
+<br/>According to Gross (1998), reappraisal involves cognitively chang-
+<br/>ing our appraisal of the emotional meaning of a stimulus in order
+<br/>to render it less emotional, and in so doing, down-regulating our
+<br/>own emotional response. In contrast, suppression involves the
+<br/>behavioural inhibition of overt reactions to emotional experiences
+<br/>(e.g., frowning) without changing the evaluation of the emotional
+<br/>stimulus itself.
+<br/>1.1. Instructed emotion regulation
+<br/>To examine the consequences of ER, researchers have tradition-
+<br/>ally exposed participants to an emotion-eliciting stimulus with an
+<br/>instruction to use a specific ER strategy to down-regulate (or more
+<br/>rarely, up-regulate) the resulting emotion. Because participants are
+</td><td>('6027810', 'Jody E. Arndt', 'jody e. arndt')<br/>('2726268', 'Esther Fujiwara', 'esther fujiwara')</td><td>E-mail address: jearndt@ucalgary.ca (J.E. Arndt).
+</td></tr><tr><td>9f499948121abb47b31ca904030243e924585d5f</td><td>Hierarchical Attention Network for Action
+<br/>Recognition in Videos
+<br/><b>Arizona State University</b><br/><b>Arizona State University</b><br/>Yahoo Research
+<br/>Neil O’Hare
+<br/>Yahoo Research
+<br/>Yahoo Research
+<br/><b>Arizona State University</b></td><td>('33513248', 'Yilin Wang', 'yilin wang')<br/>('2893721', 'Suhang Wang', 'suhang wang')<br/>('1736632', 'Jiliang Tang', 'jiliang tang')<br/>('1787097', 'Yi Chang', 'yi chang')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td>ywang370@asu.edu
+<br/>suhang.wang@asu.edu
+<br/>jlt@yahoo-inc.com
+<br/>nohare@yahoo-inc.com
+<br/>yichang@yahoo-inc.com
+<br/>baoxin.li@asu.edu
+</td></tr><tr><td>9fc04a13eef99851136eadff52e98eb9caac919d</td><td>Rethinking the Camera Pipeline for Computer Vision
+<br/><b>Cornell University</b><br/><b>Carnegie Mellon University</b><br/><b>Cornell University</b></td><td>('2328520', 'Mark Buckler', 'mark buckler')<br/>('39131476', 'Suren Jayasuriya', 'suren jayasuriya')<br/>('2138184', 'Adrian Sampson', 'adrian sampson')</td><td>mab598@cornell.edu
+<br/>sjayasur@andrew.cmu.edu
+<br/>asampson@cs.cornell.edu
+</td></tr><tr><td>9f4078773c8ea3f37951bf617dbce1d4b3795839</td><td>Leveraging Inexpensive Supervision Signals
+<br/>for Visual Learning
+<br/>Technical Report Number:
+<br/>CMU-RI-TR-17-13
+<br/>a dissertation presented
+<br/>by
+<br/>to
+<br/><b>The Robotics Institute</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Master of Science
+<br/>in the subject of
+<br/>Robotics
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania
+<br/>May 2017
+<br/>All rights reserved.
+</td><td>('3234247', 'Senthil Purushwalkam', 'senthil purushwalkam')<br/>('3234247', 'Senthil Purushwalkam', 'senthil purushwalkam')</td><td></td></tr><tr><td>9f65319b8a33c8ec11da2f034731d928bf92e29d</td><td>TAKING ROLL: A PIPELINE FOR FACE RECOGNITION
+<br/>Dip. di Scienze Teoriche e Applicate
+<br/><b>University of Insubria</b><br/>21100, Varese, Italy
+<br/><b>Louisiana State University</b><br/>2222 Business Education Complex South,
+<br/>LA, 70803, USA
+</td><td>('39149650', 'I. Gallo', 'i. gallo')<br/>('1876793', 'S. Nawaz', 's. nawaz')<br/>('3457883', 'A. Calefati', 'a. calefati')<br/>('2398301', 'G. Piccoli', 'g. piccoli')</td><td></td></tr><tr><td>9fa1be81d31fba07a1bde0275b9d35c528f4d0b8</td><td>Identifying Persons by Pictorial and
+<br/>Contextual Cues
+<br/>Nicholas Leonard Pi¨el
+<br/>Thesis submitted for the degree of Master of Science
+<br/>Supervisor:
+<br/>April 2009
+</td><td>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>9f094341bea610a10346f072bf865cb550a1f1c1</td><td>Recognition and Volume Estimation of Food Intake using a Mobile Device
+<br/>Sarnoff Corporation
+<br/>201 Washington Rd,
+<br/>Princeton, NJ, 08540
+</td><td>('1981308', 'Manika Puri', 'manika puri')</td><td>{mpuri, zzhu, qyu, adivakaran, hsawhney}@sarnoff.com
+</td></tr><tr><td>9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd</td><td>EURASIP Journal on Applied Signal Processing 2005:13, 2091–2100
+<br/>c(cid:1) 2005 Hindawi Publishing Corporation
+<br/>Spatio-Temporal Graphical-Model-Based
+<br/>Multiple Facial Feature Tracking
+<br/>Congyong Su
+<br/><b>College of Computer Science, Zhejiang University, Hangzhou 310027, China</b><br/>Li Huang
+<br/><b>College of Computer Science, Zhejiang University, Hangzhou 310027, China</b><br/>Received 1 January 2004; Revised 20 February 2005
+<br/>It is challenging to track multiple facial features simultaneously when rich expressions are presented on a face. We propose a two-
+<br/>step solution. In the first step, several independent condensation-style particle filters are utilized to track each facial feature in the
+<br/>temporal domain. Particle filters are very effective for visual tracking problems; however multiple independent trackers ignore
+<br/>the spatial constraints and the natural relationships among facial features. In the second step, we use Bayesian inference—belief
+<br/>propagation—to infer each facial feature’s contour in the spatial domain, in which we learn the relationships among contours of
+<br/>facial features beforehand with the help of a large facial expression database. The experimental results show that our algorithm
+<br/>can robustly track multiple facial features simultaneously, while there are large interframe motions with expression changes.
+<br/>Keywords and phrases: facial feature tracking, particle filter, belief propagation, graphical model.
+<br/>1.
+<br/>INTRODUCTION
+<br/>Multiple facial feature tracking is very important in the com-
+<br/>puter vision field: it needs to be carried out before video-
+<br/>based facial expression analysis and expression cloning. Mul-
+<br/>tiple facial feature tracking is also very challenging be-
+<br/>cause there are plentiful nonrigid motions in facial fea-
+<br/>tures besides rigid motions in faces. Nonrigid facial fea-
+<br/>ture motions are usually very rapid and often form dense
+<br/>clutter by facial features themselves. Only using traditional
+<br/>Kalman filter is inadequate because it is based on Gaus-
+<br/>sian density, and works relatively poorly in clutter, which
+<br/>causes the density for facial feature’s contour to be multi-
+<br/>modal and therefore non-Gaussian. Isard and Blake [1] firstly
+<br/>proposed a face tracker by particle filters—condensation—
+<br/>which is more effective in clutter than comparable Kalman
+<br/>filter.
+<br/>Although particle filters are often very effective for visual
+<br/>tracking problems, they are specialized to temporal problems
+<br/>whose corresponding graphs are simple Markov chains (see
+<br/>Figure 1). There is often structure within each time instant
+<br/>that is ignored by particle filters. For example, in multiple
+<br/>facial feature tracking, the expressions of each facial feature
+<br/>(such as eyes, brows, lips) are closely related; therefore a more
+<br/>complex graph should be formulated.
+<br/>The contribution of this paper is extending particle filters
+<br/>to track multiple facial features simultaneously. The straight-
+<br/>forward approach of tracking each facial feature by one in-
+<br/>dependent particle filter is questionable, because influences
+<br/>and actions among facial features are not taken into account.
+<br/>In this paper, we propose a spatio-temporal graphical
+<br/>model for multiple facial feature tracking (see Figure 2). Here
+<br/>the graphical model is not a 2D or a 3D facial mesh model.
+<br/>In the spatial domain, the model is shown in Figure 3, where
+<br/>xi is a hidden random variable and yi is a noisy local ob-
+<br/>servation. Nonparametric belief propagation is used to infer
+<br/>facial feature’s interrelationships in a part-based face model,
+<br/>allowing positions and states of some features in clutter to
+<br/>be recovered. Facial structure is also taken into account, be-
+<br/>cause facial features have spatial position constraints [2]. In
+<br/>the temporal domain, every facial feature forms a Markov
+<br/>chain (see Figure 1).
+<br/>After briefly reviewing related work in Section 2, we
+<br/>introduce the details of our algorithm in Sections 3 and
+<br/>4. Many convincing experimental results are shown in
+<br/>Section 5. Conclusions are given in Section 6.
+<br/>2. RELATED WORK
+<br/>After the pioneering work of Isard and Blake [1] who
+<br/>creatively used particle filters for visual tracking, many
+</td><td></td><td>Email: su@cs.zju.edu.cn
+<br/>Email: lihuang@cs.zju.edu.cn
+</td></tr><tr><td>6b333b2c6311e36c2bde920ab5813f8cfcf2b67b</td><td></td><td></td><td></td></tr><tr><td>6b3e360b80268fda4e37ff39b7f303e3684e8719</td><td>FACE RECOGNITION FROM SKETCHES USING ADVANCED
+<br/>CORRELATION FILTERS USING HYBRID EIGENANALYSIS
+<br/>FOR FACE SYNTHESIS
+<br/><b>Language Technology Institute, Carnegie Mellon Universty</b><br/><b>Carnegie Mellon University</b><br/>Keywords:
+<br/>Face from sketch synthesis, face recognition, eigenface, advanced correlation filters, OTSDF.
+</td><td>('3036546', 'Yung-hui Li', 'yung-hui li')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td></td></tr><tr><td>6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9</td><td></td><td></td><td></td></tr><tr><td>6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0</td><td>Facial expression recognition in the wild using improved dense trajectories and
+<br/>Fisher vector encoding
+<br/><b>Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey</b><br/><b>Bo gazic i University, Istanbul, Turkey</b></td><td>('2471932', 'Sadaf Afshar', 'sadaf afshar')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td>{sadaf.afshar, salah}@boun.edu.tr
+</td></tr><tr><td>6bca0d1f46b0f7546ad4846e89b6b842d538ee4e</td><td>FACE RECOGNITION FROM SURVEILLANCE-QUALITY VIDEO
+<br/>A Dissertation
+<br/>Submitted to the Graduate School
+<br/><b>of the University of Notre Dame</b><br/>in Partial Fulfillment of the Requirements
+<br/>for the Degree of
+<br/>Doctor of Philosophy
+<br/>by
+<br/>Patrick J. Flynn, Co-Director
+<br/>Graduate Program in Computer Science and Engineering
+<br/>Notre Dame, Indiana
+<br/>July 2010
+</td><td>('30042752', 'Deborah Thomas', 'deborah thomas')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')</td><td></td></tr><tr><td>6b089627a4ea24bff193611e68390d1a4c3b3644</td><td>CROSS-POLLINATION OF NORMALISATION
+<br/>TECHNIQUES FROM SPEAKER TO FACE
+<br/>AUTHENTICATION USING GAUSSIAN
+<br/>MIXTURE MODELS
+<br/>Idiap-RR-03-2012
+<br/>JANUARY 2012
+<br/>Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+</td><td>('1843477', 'Roy Wallace', 'roy wallace')<br/>('1698382', 'Sébastien Marcel', 'sébastien marcel')</td><td>T +41 27 721 77 11 F +41 27 721 77 12 info@idiap.ch www.idiap.ch
+</td></tr><tr><td>6b8d0569fffce5cc221560d459d6aa10c4db2f03</td><td>Interlinked Convolutional Neural Networks for
+<br/>Face Parsing
+<br/>State Key Laboratory of Intelligent Technology and Systems
+<br/>Tsinghua National Laboratory for Information Science and Technology (TNList)
+<br/>Department of Computer Science and Technology
+<br/><b>Tsinghua University, Beijing 100084, China</b></td><td>('1879713', 'Yisu Zhou', 'yisu zhou')<br/>('1705418', 'Xiaolin Hu', 'xiaolin hu')<br/>('49846744', 'Bo Zhang', 'bo zhang')</td><td></td></tr><tr><td>6be0ab66c31023762e26d309a4a9d0096f72a7f0</td><td>Enhance Visual Recognition under Adverse
+<br/>Conditions via Deep Networks
+</td><td>('1771885', 'Ding Liu', 'ding liu')<br/>('2392101', 'Bowen Cheng', 'bowen cheng')<br/>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('40479011', 'Haichao Zhang', 'haichao zhang')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>6bcee7dba5ed67b3f9926d2ae49f9a54dee64643</td><td>Assessment of Time Dependency in Face Recognition:
+<br/>An Initial Study
+<br/>IDept of Computer Science and Engineering
+<br/><b>University of Notre Dame. Notre Dame, IN 46556.USA</b><br/><b>Nqtional Institute of Standards and Technology</b><br/>100 Bureau Dr.• Stop 8940, Gaithersburg, MD 20899 USA
+<br/>Performance
+<br/>and products
+<br/>research matures
+<br/>factors of strong practical
+<br/>the performance of such syslemsis
+<br/>are
+</td><td>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>{flynn,kwb}@nd.edu
+<br/>jonathon@nist.gov
+</td></tr><tr><td>6b18628cc8829c3bf851ea3ee3bcff8543391819</td><td>Face recognition based on subset selection via metric learning on manifold.
+<br/>1058. [doi:10.1631/FITEE.1500085]
+<br/>Face recognition based on subset
+<br/>selection via metric learning on manifold
+<br/>Key words: Face recognition, Sparse representation, Manifold structure,
+<br/>Metric learning, Subset selection
+<br/> ORCID: http://orcid.org/0000-0001-7441-4749
+<br/>Front Inform Technol & Electron Eng </td><td>('2684160', 'Hong Shao', 'hong shao')<br/>('1752664', 'Shuang Chen', 'shuang chen')<br/>('1941366', 'Wen-cheng Cui', 'wen-cheng cui')<br/>('1752664', 'Shuang Chen', 'shuang chen')</td><td>E-mail: chenshuang19891129@gmail.com
+</td></tr><tr><td>6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6</td><td>April 13, 2009
+<br/>14:13 WSPC/INSTRUCTION FILE
+<br/>International Journal of Pattern Recognition and Artificial Intelligence
+<br/>c(cid:13) World Scientific Publishing Company
+<br/>Feature extraction through cross - phase congruency for facial
+<br/>expression analysis
+<br/>Electronics Department
+<br/>Faculty of Electrical Engineering and Information Technology
+<br/><b>University of Oradea</b><br/>410087, Universitatii 1,
+<br/>Romania
+<br/>http://webhost.uoradea.ro/ibuciu
+<br/>Electronics and Communications Faculty
+<br/><b>Politehnica University of Timisoara</b><br/>Bd. Vasile Parvan, no.2
+<br/>300223 Timisoara
+<br/>Romania
+<br/>http://hermes.etc.upt.ro
+<br/>Human face analysis has attracted a large number of researchers from various fields,
+<br/>such as computer vision, image processing, neurophysiology or psychology. One of the
+<br/>particular aspects of human face analysis is encompassed by facial expression recognition
+<br/>task. A novel method based on phase congruency for extracting the facial features used
+<br/>in the facial expression classification procedure is developed. Considering a set of image
+<br/>samples comprising humans expressing various expressions, this new approach computes
+<br/>the phase congruency map between the samples. The analysis is performed in the fre-
+<br/>quency space where the similarity (or dissimilarity) between sample phases is measured
+<br/>to form discriminant features. The experiments were run using samples from two facial
+<br/>expression databases. To assess the method’s performance, the technique is compared to
+<br/>the state-of-the art techniques utilized for classifying facial expressions, such as Principal
+<br/>Component Analysis (PCA), Independent Component Analysis (ICA), Linear Discrim-
+<br/>inant Analysis (LDA), and Gabor jets. The features extracted by the aforementioned
+<br/>techniques are further classified using two classifiers: a distance-based classifier and a
+<br/>Support Vector Machine - based classifier. Experiments reveal superior facial expression
+<br/>recognition performance for the proposed approach with respect to other techniques.
+<br/>Keywords: feature extraction; phase congruency; facial expression analysis.
+<br/>1. Feature Extraction for Facial Expression Recognition
+<br/>Facial expression analysis is a concern of several disciplinary scientific fields, such
+<br/>as computer vision, image processing, neurophysiology and psychology. The large
+<br/>interest for this analysis is motivated by an impressive area of applications. These
+</td><td>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('2526319', 'Ioan Nafornita', 'ioan nafornita')</td><td>ibuciu@uoradea.ro
+<br/>ioan.nafornita@etc.upt.ro
+</td></tr><tr><td>6b1b43d58faed7b457b1d4e8c16f5f7e7d819239</td><td></td><td></td><td></td></tr><tr><td>6bb0425baac448297fbd29a00e9c9b9926ce8870</td><td>INTERNATIONAL CONFERENCE ON COMMUNICATION, COMPUTER AND POWER (ICCCP’09)
+<br/>MUSCAT, FEBRUARY 15-18, 2009
+<br/>Facial Expression Recognition Using Log-Gabor
+<br/>Filters and Local Binary Pattern Operators
+<br/><b>School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia</b></td><td>('1857490', 'Seyed Mehdi Lajevardi', 'seyed mehdi lajevardi')<br/>('1749220', 'Zahir M. Hussain', 'zahir m. hussain')</td><td>seyed.lajevardi@rmit.edu.au, zmhussain@ieee.org
+</td></tr><tr><td>6b35b15ceba2f26cf949f23347ec95bbbf7bed64</td><td></td><td></td><td></td></tr><tr><td>6b6493551017819a3d1f12bbf922a8a8c8cc2a03</td><td>Pose Normalization for Local Appearance-Based
+<br/>Face Recognition
+<br/>Computer Science Department, Universit¨at Karlsruhe (TH)
+<br/>Am Fasanengarten 5, Karlsruhe 76131, Germany
+<br/>http://isl.ira.uka.de/cvhci
+</td><td>('1697965', 'Hua Gao', 'hua gao')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{hua.gao,ekenel,stiefel}@ira.uka.de
+</td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td><b>Carnegie Mellon University</b><br/><b>Language Technologies Institute</b><br/>School of Computer Science
+<br/>10-1-2016
+<br/>Videos from the 2013 Boston Marathon: An Event
+<br/>Reconstruction Dataset for Synchronization and
+<br/>Localization
+<br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/>Follow this and additional works at: http://repository.cmu.edu/lti
+<br/>Part of the Computer Sciences Commons
+</td><td>('3175807', 'Jia Chen', 'jia chen')<br/>('1915796', 'Junwei Liang', 'junwei liang')<br/>('2075232', 'Han Lu', 'han lu')<br/>('2927024', 'Shoou-I Yu', 'shoou-i yu')<br/>('7661726', 'Alexander Hauptmann', 'alexander hauptmann')</td><td>Research Showcase @ CMU
+<br/>Carnegie Mellon University, alex@cs.cmu.edu
+<br/>This Technical Report is brought to you for free and open access by the School of Computer Science at Research Showcase @ CMU. It has been
+<br/>accepted for inclusion in Language Technologies Institute by an authorized administrator of Research Showcase @ CMU. For more information, please
+<br/>contact research-showcase@andrew.cmu.edu.
+</td></tr><tr><td>6bb630dfa797168e6627d972560c3d438f71ea99</td><td></td><td></td><td></td></tr><tr><td>6b6ff9d55e1df06f8b3e6f257e23557a73b2df96</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 61– No.17, January 2013
+<br/>Survey of Threats to the Biometric Authentication
+<br/>Systems and Solutions
+<br/>Research Scholor,Mewar
+<br/><b>University, Chitorgarh. (INDIA</b><br/>P.C.Gupta
+<br/><b>Kota University, Kota(INDIA</b><br/>Khushboo Mantri
+<br/><b>M.tech.student, Arya College of</b><br/>engineering ,Jaipur(INDIA)
+</td><td>('2875951', 'Sarika Khandelwal', 'sarika khandelwal')</td><td></td></tr><tr><td>07377c375ac76a34331c660fe87ebd7f9b3d74c4</td><td>Detailed Human Avatars from Monocular Video
+<br/>1Computer Graphics Lab, TU Braunschweig, Germany
+<br/><b>Max Planck Institute for Informatics, Saarland Informatics Campus, Germany</b><br/>Figure 1: Our method creates a detailed avatar from a monocular video of a person turning around. Based on the SMPL
+<br/>model, we first compute a medium-level avatar, then add subject-specific details and finally generate a seamless texture.
+</td><td>('1914886', 'Thiemo Alldieck', 'thiemo alldieck')<br/>('9765909', 'Weipeng Xu', 'weipeng xu')</td><td>{alldieck,magnor}@cg.cs.tu-bs.de {wxu,theobalt,gpons}@mpi-inf.mpg.de
+</td></tr><tr><td>0729628db4bb99f1f70dd6cb2353d7b76a9fce47</td><td>Separating Pose and Expression in Face Images:
+<br/>A Manifold Learning Approach
+<br/><b>University of Pennsylvania</b><br/>Moore Bldg, 200 South 33rd St, Philadelphia, PA 19104, USA
+<br/>(Submitted on December 27, 2006)
+</td><td>('1732066', 'Daniel D. Lee', 'daniel d. lee')</td><td>E-mail: {jhham,ddlee}@seas.upenn.edu
+</td></tr><tr><td>0728f788107122d76dfafa4fb0c45c20dcf523ca</td><td>The Best of Both Worlds: Combining Data-independent and Data-driven
+<br/>Approaches for Action Recognition
+</td><td>('1711953', 'Dezhong Yao', 'dezhong yao')<br/>('2735055', 'Ming Lin', 'ming lin')<br/>('2927024', 'Shoou-I Yu', 'shoou-i yu')</td><td>{lanzhzh, minglin, iyu, alex@cs.cmu.edu}, dyao@hust.edu.cn
+</td></tr><tr><td>07c90e85ac0f74b977babe245dea0f0abcf177e3</td><td>Appeared in the 4th International Conference on Audio- and Video-Based
+<br/>Biometric Person Authentication, pp 10{18, June 9 - 11, 2003, Guildford, UK
+<br/>An Image Preprocessing Algorithm for
+<br/>Illumination Invariant Face Recognition
+<br/><b>The Robotics Institute, Carnegie Mellon University</b><br/>5000 Forbes Avenue, Pittsburgh, PA 15213
+</td><td>('33731953', 'Ralph Gross', 'ralph gross')<br/>('2407094', 'Vladimir Brajovic', 'vladimir brajovic')</td><td>frgross,brajovicg@cs.cmu.edu
+</td></tr><tr><td>07ea3dd22d1ecc013b6649c9846d67f2bf697008</td><td>HUMAN-CENTRIC VIDEO UNDERSTANDING WITH WEAK
+<br/>SUPERVISION
+<br/>A DISSERTATION
+<br/>SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+<br/>AND THE COMMITTEE ON GRADUATE STUDIES
+<br/><b>OF STANFORD UNIVERSITY</b><br/>IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+<br/>FOR THE DEGREE OF
+<br/>DOCTOR OF PHILOSOPHY
+<br/>June 2016
+</td><td>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')</td><td></td></tr><tr><td>071099a4c3eed464388c8d1bff7b0538c7322422</td><td>FACIAL EXPRESSION RECOGNITION IN THE WILD USING RICH DEEP FEATURES
+<br/>Microsoft Advanced Technology labs, Microsoft Technology and Research, Cairo, Egypt
+<br/>
+</td><td>('34828041', 'Abubakrelsedik Karali', 'abubakrelsedik karali')<br/>('2376438', 'Ahmad Bassiouny', 'ahmad bassiouny')<br/>('3144122', 'Motaz El-Saban', 'motaz el-saban')</td><td></td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>Large Scale Unconstrained Open Set Face Database
+<br/><b>University of Colorado at Colorado Springs</b><br/>2Securics Inc, Colorado Springs
+</td><td>('27469806', 'Archana Sapkota', 'archana sapkota')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td>asapkota@vast.uccs.edu
+<br/>tboult@vast.uccs.edu
+</td></tr><tr><td>076d3fc800d882445c11b9af466c3af7d2afc64f</td><td>FACE ATTRIBUTE CLASSIFICATION USING ATTRIBUTE-AWARE CORRELATION MAP
+<br/>AND GATED CONVOLUTIONAL NEURAL NETWORKS
+<br/><b>Korea Advanced institute of Science and Technology</b><br/>Department of Electrical Engineering
+<br/>291 Daehak-ro, Yuseong-gu, Daejeon, Korea
+</td><td>('3315036', 'Sunghun Kang', 'sunghun kang')<br/>('2350325', 'Donghoon Lee', 'donghoon lee')</td><td></td></tr><tr><td>071af21377cc76d5c05100a745fb13cb2e40500f</td><td></td><td></td><td></td></tr><tr><td>070ab604c3ced2c23cce2259043446c5ee342fd6</td><td>AnActiveIlluminationandAppearance(AIA)ModelforFaceAlignment
+<br/>FatihKahraman,MuhittinGokmen
+<br/><b>IstanbulTechnicalUniversity</b><br/>ComputerScienceDept.,Turkey
+<br/>InformaticsandMathematicalModelling,Denmark
+<br/>SuneDarkner,RasmusLarsen
+<br/><b>TechnicalUniversityofDenmark</b></td><td></td><td>{fkahraman, gokmen}@itu.edu.tr
+<br/>{sda,rl}@imm.dtu.dk
+</td></tr><tr><td>071135dfb342bff884ddb9a4d8af0e70055c22a1</td><td>New Architecture and Transfer Learning for Video Classification
+<br/>Temporal 3D ConvNets:
+<br/><b>ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai</b></td><td>('3310120', 'Ali Diba', 'ali diba')<br/>('3169187', 'Mohsen Fayyaz', 'mohsen fayyaz')<br/>('38035876', 'Vivek Sharma', 'vivek sharma')<br/>('31493847', 'Amir Hossein Karami', 'amir hossein karami')<br/>('2713759', 'Mohammad Mahdi Arzani', 'mohammad mahdi arzani')<br/>('9456273', 'Rahman Yousefzadeh', 'rahman yousefzadeh')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{firstname.lastname}@esat.kuleuven.be, {lastname}@sensifai.com,
+<br/>fayyaz@iai.uni-bonn.de, vivek.sharma@kit.edu
+</td></tr><tr><td>0754e769eb613fd3968b6e267a301728f52358be</td><td>Towards a Watson That Sees: Language-Guided Action Recognition for
+<br/>Robots
+</td><td>('7607499', 'Yezhou Yang', 'yezhou yang')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td></td></tr><tr><td>07c83f544d0604e6bab5d741b0bf9a3621d133da</td><td>Learning Spatio-Temporal Features with 3D Residual Networks
+<br/>for Action Recognition
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b><br/>Tsukuba, Ibaraki, Japan
+</td><td>('2199251', 'Kensho Hara', 'kensho hara')<br/>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')</td><td>{kensho.hara, hirokatsu.kataoka, yu.satou}@aist.go.jp
+</td></tr><tr><td>0773c320713dae62848fceac5a0ac346ba224eca</td><td>Digital Facial Augmentation for Interactive
+<br/>Entertainment
+<br/>Centre for Intelligent Machines
+<br/><b>McGill University</b><br/>Montreal, Quebec, Canada
+</td><td>('2726121', 'Naoto Hieda', 'naoto hieda')<br/>('2242019', 'Jeremy R. Cooperstock', 'jeremy r. cooperstock')</td><td>Email: {nhieda, jer}@cim.mcgill.ca
+</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>A FACS Valid 3D Dynamic Action Unit Database with Applications to 3D
+<br/>Dynamic Morphable Facial Modeling
+<br/>Department of Computer Science
+<br/>School of Humanities and Social Sciences
+<br/><b>University of Bath</b><br/><b>Jacobs University</b><br/>Centre for Vision, Speech and Signal Processing
+<br/><b>University of Surrey</b></td><td>('1792288', 'Darren Cosker', 'darren cosker')<br/>('2035177', 'Eva Krumhuber', 'eva krumhuber')<br/>('1695085', 'Adrian Hilton', 'adrian hilton')</td><td>dpc@cs.bath.ac.uk
+<br/>e.krumhuber@jacobs-university.de
+<br/>a.hilton@surrey.ac.uk
+</td></tr><tr><td>07a472ea4b5a28b93678a2dcf89028b086e481a2</td><td>Head Dynamic Analysis: A Multi-view
+<br/>Framework
+<br/><b>University of California, San Diego, USA</b></td><td>('1947383', 'Ashish Tawari', 'ashish tawari')</td><td>{atawari,mtrivedi}@ucsd.edu
+</td></tr><tr><td>0717b47ab84b848de37dbefd81cf8bf512b544ac</td><td>International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+<br/>International Conference on Humming Bird ( 01st March 2014)
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Robust Face Recognition and Tagging in Visual Surveillance
+<br/>System
+</td><td>('21008397', 'Kavitha MS', 'kavitha ms')<br/>('39546266', 'Siva Pradeepa', 'siva pradeepa')<br/>('21008397', 'Kavitha MS', 'kavitha ms')<br/>('39546266', 'Siva Pradeepa', 'siva pradeepa')</td><td>e-mail:kavithams999@gmail.com
+</td></tr><tr><td>07fa153b8e6196ee6ef6efd8b743de8485a07453</td><td>Action Prediction from Videos via Memorizing Hard-to-Predict Samples
+<br/><b>Northeastern University, Boston, MA, USA</b><br/><b>College of Engineering, Northeastern University, Boston, MA, USA</b><br/><b>College of Computer and Information Science, Northeastern University, Boston, MA, USA</b></td><td>('48901920', 'Yu Kong', 'yu kong')<br/>('9355577', 'Shangqian Gao', 'shangqian gao')<br/>('47935056', 'Bin Sun', 'bin sun')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>{yukong,yunfu}@ece.neu.edu, {gao.sh,sun.bi}@husky.neu.edu
+</td></tr><tr><td>0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b</td><td>Graph-regularized Multi-class Support Vector
+<br/>Machines for Face and Action Recognition
+<br/><b>Tampere University of Technology, Tampere, Finland</b></td><td>('9219875', 'Moncef Gabbouj', 'moncef gabbouj')</td><td>Email: {alexandros.iosifidis,moncef.gabbouj}@tut.fi
+</td></tr><tr><td>074af31bd9caa61fea3c4216731420bd7c08b96a</td><td>Face Verification Using Sparse Representations
+<br/><b>Institute for Advanced Computer Studies, University of Maryland, College Park, MD</b><br/><b>TNLIST, Tsinghua University, Beijing, 100084, China</b></td><td>('2723427', 'Huimin Guo', 'huimin guo')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('3826759', 'Jonghyun Choi', 'jonghyun choi')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{hmguo, jhchoi, lsd}@umiacs.umd.edu, rpwang@tsinghua.edu.cn
+</td></tr><tr><td>0750a816858b601c0dbf4cfb68066ae7e788f05d</td><td>CosFace: Large Margin Cosine Loss for Deep Face Recognition
+<br/>Tencent AI Lab
+</td><td>('39049654', 'Hao Wang', 'hao wang')<br/>('1996677', 'Yitong Wang', 'yitong wang')<br/>('48741267', 'Zheng Zhou', 'zheng zhou')<br/>('3478009', 'Xing Ji', 'xing ji')<br/>('2856494', 'Dihong Gong', 'dihong gong')<br/>('2263912', 'Jingchao Zhou', 'jingchao zhou')<br/>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('46641573', 'Wei Liu', 'wei liu')</td><td>{hawelwang,yitongwang,encorezhou,denisji,sagazhou,michaelzfli}@tencent.com
+<br/>gongdihong@gmail.com wliu@ee.columbia.edu
+</td></tr><tr><td>078d507703fc0ac4bf8ca758be101e75ea286c80</td><td> ISSN: 2321-8169
+<br/>International Journal on Recent and Innovation Trends in Computing and Communication
+<br/>Volume: 3 Issue: 8
+<br/> 5287 - 5296
+<br/> ________________________________________________________________________________________________________________________________
+<br/>Large- Scale Content Based Face Image Retrieval using Attribute Enhanced
+<br/>Sparse Codewords.
+<br/>Chaitra R,
+<br/>Mtech Digital Coomunication Engineering
+<br/><b>Acharya Institute Of Technology</b><br/>Bangalore
+</td><td></td><td></td></tr><tr><td>0716e1ad868f5f446b1c367721418ffadfcf0519</td><td>Interactively Guiding Semi-Supervised
+<br/>Clustering via Attribute-Based Explanations
+<br/>Virginia Tech, Blacksburg, VA, USA
+</td><td>('9276834', 'Shrenik Lad', 'shrenik lad')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td></td></tr><tr><td>073eaa49ccde15b62425cda1d9feab0fea03a842</td><td></td><td></td><td></td></tr><tr><td>07f31bef7a7035792e3791473b3c58d03928abbf</td><td>Lessons from Collecting a Million
+<br/>Biometric Samples
+<br/><b>University of Notre Dame</b><br/><b>National Institute of Standards and Technology</b></td><td>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td></td></tr><tr><td>0726a45eb129eed88915aa5a86df2af16a09bcc1</td><td>Introspective Perception: Learning to Predict Failures in Vision Systems
+</td><td>('2739544', 'Shreyansh Daftry', 'shreyansh daftry')<br/>('3308210', 'Sam Zeng', 'sam zeng')<br/>('1756566', 'J. Andrew Bagnell', 'j. andrew bagnell')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td></td></tr><tr><td>07de8371ad4901356145722aa29abaeafd0986b9</td><td>April 13, 2017
+<br/>DRAFT
+<br/>Towards Usable Multimedia Event Detection
+<br/>February, 2017
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Alexander G. Hauptmann (Chair)
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy.
+</td><td>('34692532', 'Zhenzhong Lan', 'zhenzhong lan')<br/>('1880336', 'Bhiksha Raj Ramakrishnan', 'bhiksha raj ramakrishnan')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')<br/>('34692532', 'Zhenzhong Lan', 'zhenzhong lan')</td><td></td></tr><tr><td>07e639abf1621ceff27c9e3f548fadfa2052c912</td><td>RESEARCH ARTICLE
+<br/>5-HTTLPR Expression Outside the Skin: An
+<br/>Experimental Test of the Emotional
+<br/>Reactivity Hypothesis in Children
+<br/><b>Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands</b><br/><b>Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The</b><br/><b>Netherlands, Utrecht University, Utrecht, The Netherlands</b><br/><b>Current Address: Research Institute of Child Development and Education, University of Amsterdam</b><br/>Amsterdam,The Netherlands
+</td><td>('4594074', 'Joyce Weeland', 'joyce weeland')<br/>('6811600', 'Meike Slagt', 'meike slagt')<br/>('5859538', 'Eddie Brummelman', 'eddie brummelman')<br/>('3935697', 'Walter Matthys', 'walter matthys')<br/>('4441681', 'Geertjan Overbeek', 'geertjan overbeek')</td><td>* j.weeland@uva.nl
+</td></tr><tr><td>07da958db2e561cc7c24e334b543d49084dd1809</td><td>Dictionary Learning Based Dimensionality
+<br/>Reduction for Classification
+<br/>Karin Schnass and Pierre Vandergheynst
+<br/><b>Signal Processing Institute</b><br/><b>Swiss Federal Institute of Technology</b><br/>Lausanne, Switzerland
+<br/>EPFL-STI-ITS-LTS2
+<br/>CH-1015 Lausanne
+<br/>Tel: +41 21 693 2657
+<br/>Fax: +41 21 693 7600
+<br/>EDICS: SPC-CODC
+</td><td></td><td>{karin.schnass, pierre.vandergheynst}@epfl.ch
+</td></tr><tr><td>0742d051caebf8a5d452c03c5d55dfb02f84baab</td><td>Real-Time Geometric Motion Blur for a Deforming Polygonal Mesh
+<br/>Nathan Jones
+<br/><b>Formerly: Texas AandM University</b><br/>Currently: The Software Group
+</td><td></td><td>nathan.jones@tylertechnologies.com
+</td></tr><tr><td>07d986b1005593eda1aeb3b1d24078db864f8f6a</td><td>International Journal of Industrial Electronics and Electrical Engineering, ISSN: 2347-6982
+<br/>Volume-3, Issue-11, Nov.-2015
+<br/>FACIAL EXPRESSION RECOGNITION USING LOCAL FACIAL
+<br/>FEATURES
+<br/><b>National University of Kaohsiung, 811 Kaohsiung, Taiwan</b><br/><b>National University of Kaohsiung, 811 Kaohsiung, Taiwan</b><br/><b>National Sun Yat Sen University, 804 Kaohsiung, Taiwan</b><br/>followed by
+<br/>communications
+<br/>[1]. Automatic
+</td><td></td><td>E-mail: abc3329797@gmail.com, {cclai, johnw, stpan}@nuk.edu.tw, leesj@mail.ee.nsysu.edu.tw
+</td></tr><tr><td>38d56ddcea01ce99902dd75ad162213cbe4eaab7</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2648
+</td><td></td><td></td></tr><tr><td>389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26</td><td>FACIAL PARAMETER EXTRACTION SYSTEM BASED ON ACTIVE CONTOURS
+<br/>Universitat Politècnica de Catalunya, Barcelona, Spain
+</td><td>('1767549', 'Montse Pardàs', 'montse pardàs')<br/>('1820469', 'Marcos Losada', 'marcos losada')</td><td></td></tr><tr><td>38f7f3c72e582e116f6f079ec9ae738894785b96</td><td>IJARCCE
+<br/>ISSN (Online) 2278-1021
+<br/>ISSN (Print) 2319 5940
+<br/>International Journal of Advanced Research in Computer and Communication Engineering
+<br/>Vol. 4, Issue 11, November 2015
+<br/>A New Technique for Face Matching after
+<br/>Plastic Surgery in Forensics
+<br/><b>Student, Amal Jyothi College of Engineering, Kanjirappally, India</b><br/><b>Amal Jyothi College of Engineering, Kanjirappally, India</b><br/>I. INTRODUCTION
+<br/>Facial recognition is one of the most important task that
+<br/>forensic examiners execute
+<br/>their
+<br/>investigation. This work focuses on analysing the effect of
+<br/>plastic surgery in face recognition algorithms. It is
+<br/>imperative for the subsequent facial recognition systems to
+<br/>be capable of addressing this significant issue and
+<br/>accordingly there is a need for more research in this
+<br/>important area.
+</td><td>('32764403', 'Anju Joseph', 'anju joseph')<br/>('16501589', 'Nilu Tressa Thomas', 'nilu tressa thomas')<br/>('40864737', 'Neethu C. Sekhar', 'neethu c. sekhar')</td><td></td></tr><tr><td>380dd0ddd5d69adc52defc095570d1c22952f5cc</td><td></td><td></td><td></td></tr><tr><td>38679355d4cfea3a791005f211aa16e76b2eaa8d</td><td>Title
+<br/>Evolutionary cross-domain discriminative Hessian Eigenmaps
+<br/>Author(s)
+<br/>Si, S; Tao, D; Chan, KP
+<br/>Citation
+<br/>1086
+<br/>Issued Date
+<br/>2010
+<br/>URL
+<br/>http://hdl.handle.net/10722/127357
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.; ©2010
+<br/>IEEE. Personal use of this material is permitted. However,
+<br/>permission to reprint/republish this material for advertising or
+<br/>promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any
+<br/>copyrighted component of this work in other works must be
+<br/>obtained from the IEEE.
+</td><td></td><td></td></tr><tr><td>3802c97f925cb03bac91d9db13d8b777dfd29dcc</td><td>Non-Parametric Bayesian Constrained Local Models
+<br/><b>Institute of Systems and Robotics, University of Coimbra, Portugal</b></td><td>('39458914', 'Pedro Martins', 'pedro martins')<br/>('2117944', 'Rui Caseiro', 'rui caseiro')<br/>('1678231', 'Jorge Batista', 'jorge batista')</td><td>{pedromartins,ruicaseiro,batista}@isr.uc.pt
+</td></tr><tr><td>38a2661b6b995a3c4d69e7d5160b7596f89ce0e6</td><td>Randomized Intraclass-Distance Minimizing Binary Codes for Face Recognition
+<br/><b>Colorado State University</b><br/>Fort Collins, CO 80523
+<br/><b>National Institute of Standards and Technology</b></td><td>('40370804', 'Hao Zhang', 'hao zhang')<br/>('1757322', 'J. Ross Beveridge', 'j. ross beveridge')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>{zhangh, ross, qmo, draper}@cs.colostate.edu
+<br/>jonathon.phillips@nist.gov
+</td></tr><tr><td>38682c7b19831e5d4f58e9bce9716f9c2c29c4e7</td><td>International Journal of Computer Trends and Technology (IJCTT) – Volume 18 Number 5 – Dec 2014
+<br/>Movie Character Identification Using Graph Matching
+<br/>Algorithm
+<br/>M.Tech Scholar, Dept of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India.
+<br/>Associate Professor, Department of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India
+</td><td></td><td></td></tr><tr><td>38787338ba659f0bfbeba11ec5b7748ffdbb1c3d</td><td>EVALUATION OF THE DISCRIMINATION POWER OF FEATURES EXTRACTED
+<br/>FROM 2-D AND 3-D FACIAL IMAGES FOR FACIAL EXPRESSION ANALYSIS
+<br/><b>University of Piraeus</b><br/>Karaoli & Dimitriou 80, Piraeus 185 34
+<br/>GREECE
+</td><td>('2828175', 'Ioanna-Ourania Stathopoulou', 'ioanna-ourania stathopoulou')<br/>('1802584', 'George A. Tsihrintzis', 'george a. tsihrintzis')</td><td>phone: + 30 210 4142322, fax: + 30 210 4142264, email: {iostath, geoatsi}@unipi.gr
+</td></tr><tr><td>3803b91e784922a2dacd6a18f61b3100629df932</td><td>Temporal Multimodal Fusion
+<br/>for Video Emotion Classification in the Wild
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Normandie Univ., UNICAEN,
+<br/>ENSICAEN, CNRS
+<br/>Caen, France
+</td><td>('26339425', 'Valentin Vielzeuf', 'valentin vielzeuf')<br/>('2642628', 'Stéphane Pateux', 'stéphane pateux')<br/>('1801809', 'Frédéric Jurie', 'frédéric jurie')</td><td>valentin.vielzeuf@orange.com
+<br/>stephane.pateux@orange.com
+<br/>frederic.jurie@unicaen.fr
+</td></tr><tr><td>38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Multi-distance Support Matrix Machine
+<br/>Received: date / Accepted: date
+</td><td>('34679353', 'Yunfei Ye', 'yunfei ye')<br/>('49405675', 'Dong Han', 'dong han')</td><td></td></tr><tr><td>38c901a58244be9a2644d486f9a1284dc0edbf8a</td><td>Multi-Camera Action Dataset for Cross-Camera Action Recognition
+<br/>Benchmarking
+<br/><b>School of Electronic Information Engineering, Tianjin University, China</b><br/><b>Interactive and Digital Media Institute, National University of Singapore, Singapore</b><br/><b>School of Computing, National University of Singapore, Singapore</b></td><td>('1803305', 'Wenhui Li', 'wenhui li')<br/>('3026404', 'Yongkang Wong', 'yongkang wong')<br/>('1678662', 'Yang Li', 'yang li')</td><td></td></tr><tr><td>385750bcf95036c808d63db0e0b14768463ff4c6</td><td></td><td></td><td></td></tr><tr><td>3852968082a16db8be19b4cb04fb44820ae823d4</td><td>Unsupervised Learning of Long-Term Motion Dynamics for Videos
+<br/><b>Stanford University</b></td><td>('3378742', 'Zelun Luo', 'zelun luo')<br/>('3378457', 'Boya Peng', 'boya peng')<br/>('38485317', 'De-An Huang', 'de-an huang')<br/>('3304525', 'Alexandre Alahi', 'alexandre alahi')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>{zelunluo,boya,dahuang,alahi,feifeili}@cs.stanford.edu
+</td></tr><tr><td>38cc2f1c13420170c7adac30f9dfac69b297fb76</td><td><b>Rochester Institute of Technology</b><br/>RIT Scholar Works
+<br/>Theses
+<br/>7-1-2009
+<br/>Thesis/Dissertation Collections
+<br/>Recognition of human activities and expressions in
+<br/>video sequences using shape context descriptor
+<br/>Follow this and additional works at: http://scholarworks.rit.edu/theses
+<br/>Recommended Citation
+<br/>Kholgade, Natasha Prashant, "Recognition of human activities and expressions in video sequences using shape context descriptor"
+<br/><b>Thesis. Rochester Institute of Technology. Accessed from</b><br/>This Thesis is brought to you for free and open access by the Thesis/Dissertation Collections at RIT Scholar Works. It has been accepted for inclusion
+</td><td>('2201569', 'Natasha Prashant Kholgade', 'natasha prashant kholgade')</td><td>in Theses by an authorized administrator of RIT Scholar Works. For more information, please contact ritscholarworks@rit.edu.
+</td></tr><tr><td>38cbb500823057613494bacd0078aa0e57b30af8</td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops
+<br/>Deep Face Deblurring
+<br/><b>Imperial College London</b><br/><b>Imperial College London</b></td><td>('34586458', 'Grigorios G. Chrysos', 'grigorios g. chrysos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>g.chrysos@imperial.ac.uk
+<br/>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>384f972c81c52fe36849600728865ea50a0c4670</td><td>1
+<br/>Multi-Fold Gabor, PCA and ICA Filter
+<br/>Convolution Descriptor for Face Recognition
+<br/>
+</td><td>('1801904', 'Andrew Beng Jin Teoh', 'andrew beng jin teoh')<br/>('3326176', 'Cong Jie Ng', 'cong jie ng')</td><td></td></tr><tr><td>38f1fac3ed0fd054e009515e7bbc72cdd4cf801a</td><td>Finding Person Relations in Image Data of the
+<br/>Internet Archive
+<br/>Eric M¨uller-Budack1,2[0000−0002−6802−1241],
+<br/>1 Leibniz Information Centre for Science and Technology (TIB), Hannover, Germany
+<br/><b>L3S Research Center, Leibniz Universit at Hannover, Germany</b></td><td>('51008013', 'Kader Pustu-Iren', 'kader pustu-iren')<br/>('50983345', 'Sebastian Diering', 'sebastian diering')<br/>('1738703', 'Ralph Ewerth', 'ralph ewerth')</td><td></td></tr><tr><td>38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f</td><td>Shrinkage Expansion Adaptive Metric Learning
+<br/>1 School of Information and Communications Engineering,
+<br/><b>Dalian University of Technology, China</b><br/><b>School of Computer Science and Technology, Harbin Institute of Technology, China</b><br/><b>Hong Kong Polytechnic University, Hong Kong</b></td><td>('2769011', 'Qilong Wang', 'qilong wang')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('40426020', 'Peihua Li', 'peihua li')</td><td>{csqlwang,cswmzuo}@gmail.com, cslzhang@comp.polyu.edu.hk,
+<br/>peihuali@dlut.edu.cn
+</td></tr><tr><td>380d5138cadccc9b5b91c707ba0a9220b0f39271</td><td>Deep Imbalanced Learning for Face Recognition
+<br/>and Attribute Prediction
+</td><td>('2000034', 'Chen Huang', 'chen huang')<br/>('47002704', 'Yining Li', 'yining li')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>384945abd53f6a6af51faf254ba8ef0f0fb3f338</td><td>Visual Recognition with Humans in the Loop
+<br/><b>University of California, San Diego</b><br/><b>California Institute of Technology</b></td><td>('3251767', 'Steve Branson', 'steve branson')<br/>('2367820', 'Catherine Wah', 'catherine wah')<br/>('2490700', 'Boris Babenko', 'boris babenko')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>{sbranson,cwah,gschroff,bbabenko,sjb}@cs.ucsd.edu
+<br/>{welinder,perona}@caltech.edu
+</td></tr><tr><td>38215c283ce4bf2c8edd597ab21410f99dc9b094</td><td>The SEMAINE Database: Annotated Multimodal Records of
+<br/>Emotionally Colored Conversations between a Person and a Limited
+<br/>Agent
+<br/>McKeown, G., Valstar, M., Cowie, R., Pantic, M., & Schröder, M. (2012). The SEMAINE Database: Annotated
+<br/>Multimodal Records of Emotionally Colored Conversations between a Person and a Limited Agent. IEEE
+<br/>Transactions on Affective Computing, 3(1), 5-17. DOI: 10.1109/T-AFFC.2011.20
+<br/>Published in:
+<br/>Document Version:
+<br/>Peer reviewed version
+<br/><b>Queen's University Belfast - Research Portal</b><br/><b>Link to publication record in Queen's University Belfast Research Portal</b><br/>General rights
+<br/><b>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</b><br/>copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+<br/>with these rights.
+<br/>Take down policy
+<br/>The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+<br/>ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the
+<br/>Download date:05. Nov. 2018
+</td><td></td><td>Research Portal that you believe breaches copyright or violates any law, please contact openaccess@qub.ac.uk.
+</td></tr><tr><td>38861d0d3a0292c1f54153b303b0d791cbba1d50</td><td></td><td></td><td></td></tr><tr><td>38d8ff137ff753f04689e6b76119a44588e143f3</td><td>When 3D-Aided 2D Face Recognition Meets Deep Learning:
+<br/>An extended UR2D for Pose-Invariant Face Recognition
+<br/>Computational Biomedicine Lab
+<br/><b>University of Houston</b><br/>4800 Calhoun Rd. Houston, TX, USA
+</td><td>('5084124', 'Xiang Xu', 'xiang xu')<br/>('39634395', 'Pengfei Dou', 'pengfei dou')<br/>('26401746', 'Ha A. Le', 'ha a. le')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td></td></tr><tr><td>3896c62af5b65d7ba9e52f87505841341bb3e8df</td><td>Face Recognition from Still Images and Video
+<br/>Department of Electrical and Computer Engineering
+<br/>Center for Automation Research
+<br/><b>University of Maryland, College Park</b><br/>Related concepts Biometric identification, verification.
+<br/>Definition Face recognition is concerned with identifying or verifying one or more persons from still
+<br/>images or video sequences using a stored database of faces.
+<br/>Background The earliest work on face recognition started as early as 1950’s in psychology and in the
+<br/>1960’s in engineering, but research on automatic face recognition practically started in the 1970’s after the
+<br/>seminal work of Kanade [1] and Kelly [2].
+<br/>Application Face recognition has wide range of applications in many different areas ranging from
+<br/>law enforcement and surveillance, information security to human-computer interaction, virtual reality and
+<br/>computer entertainment.
+<br/>1 Introduction
+<br/>Face recognition with its wide range of commercial and law enforcement applications has been one of the
+<br/>most active areas of research in the field of computer vision and pattern recognition. Personal identification
+<br/>systems based on faces have the advantage that facial images can be obtained from a distance without requir-
+<br/>ing cooperation of the subject, as compared to other biometrics such as fingerprint, iris, etc. Face recognition
+<br/>is concerned with identifying or verifying one or more persons from still images or video sequences using
+<br/>a stored database of faces. Depending on the particular application, there can be different scenarios, rang-
+<br/>ing from controlled still images to uncontrolled videos. Since face recognition is essentially the problem of
+<br/>recognizing a 3D object from its 2D image or a video sequence, it has to deal with significant appearance
+<br/>changes due to illumination and pose variations. Current algorithms perform well in controlled scenarios,
+<br/>but their performance is far from satisfactory in uncontrolled scenarios. Most of the current research in this
+<br/>area is focused toward recognizing faces in uncontrolled scenarios. This chapter is broadly divided into two
+<br/>sections. The first section discusses the approaches proposed for recognizing faces from still images and the
+<br/>second section deals with face recognition from video sequences.
+<br/>2 Still image face recognition
+<br/>This section discusses some of the early subspace and feature-based approaches, followed by those which
+<br/>address the problem of appearance change due to illumination variations and approaches that can handle both
+<br/>illumination and pose variations.
+<br/>2.1 Early approaches
+<br/>Among the early subspace-based holistic approaches, eigenfaces [3] and Fisherfaces [4][5] have proved to be
+<br/>very effective for the task of face recognition. Since human faces have similar overall configuration, the facial
+<br/>images can be described by a relatively low-dimensional subspace. Principal Component Analysis (PCA) [3]
+<br/>has been used for finding those vectors which can best account for the distribution of facial images within the
+<br/>whole image space. These vectors are eigenvectors of the covariance matrix computed from the aligned face
+<br/>images in the training set and are thus known as ’eigenfaces’. Given the eigenfaces, every face in the gallery
+<br/>database is represented as a vector of weights obtained by projecting the image onto the eigenfaces using
+</td><td>('2642508', 'Soma Biswas', 'soma biswas')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>38192a0f9261d9727b119e294a65f2e25f72d7e6</td><td></td><td></td><td></td></tr><tr><td>38bbca5f94d4494494860c5fe8ca8862dcf9676e</td><td>Probabilistic, Features-based Object Recognition
+<br/>Thesis by
+<br/>In Partial Ful(cid:2)llment of the Requirements
+<br/>for the Degree of
+<br/>Doctor of Philosophy
+<br/><b>California Institute of Technology</b><br/>Pasadena, California
+<br/>2008
+<br/>(Defended October 12, 2007)
+</td><td>('2462051', 'Pierre Moreels', 'pierre moreels')</td><td></td></tr><tr><td>38183fe28add21693729ddeaf3c8a90a2d5caea3</td><td>Scale-Aware Face Detection
+<br/><b>SenseTime, 2Tsinghua University</b></td><td>('19235216', 'Zekun Hao', 'zekun hao')<br/>('1715752', 'Yu Liu', 'yu liu')<br/>('2137185', 'Hongwei Qin', 'hongwei qin')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2693308', 'Xiu Li', 'xiu li')<br/>('1705418', 'Xiaolin Hu', 'xiaolin hu')</td><td>{haozekun, yanjunjie}@outlook.com, liuyuisanai@gmail.com,
+<br/>{qhw12@mails., xlhu@, li.xiu@sz.}tsinghua.edu.cn
+</td></tr><tr><td>38a9ca2c49a77b540be52377784b9f734e0417e4</td><td>Face Verification using Large Feature Sets and One Shot Similarity
+<br/>1Department of Computer Science
+<br/><b>University of Maryland</b><br/><b>College Park, MD, 20740, USA</b><br/><b>Institute of Computing</b><br/><b>University of Campinas</b><br/>Campinas, SP, 13084-971, Brazil
+</td><td>('2723427', 'Huimin Guo', 'huimin guo')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>hmguo@cs.umd.edu
+<br/>schwartz@ic.unicamp.br
+<br/>lsd@umiacs.umd.edu
+</td></tr><tr><td>3802da31c6d33d71b839e260f4022ec4fbd88e2d</td><td>Deep Attributes for One-Shot Face Recognition
+<br/><b>Xerox Research Center India</b><br/>3Department of Electrical Engineering, IIT Kanpur
+</td><td>('5060928', 'Aishwarya Jadhav', 'aishwarya jadhav')<br/>('1744135', 'Vinay P. Namboodiri', 'vinay p. namboodiri')<br/>('1797662', 'K. S. Venkatesh', 'k. s. venkatesh')</td><td>aishwaryauj@gmail.com, vinaypn@iitk.ac.in, venkats@iitk.ac.in
+</td></tr><tr><td>00fb2836068042c19b5197d0999e8e93b920eb9c</td><td></td><td></td><td></td></tr><tr><td>00f7f7b72a92939c36e2ef9be97397d8796ee07c</td><td>3D ConvNets with Optical Flow Based Regularization
+<br/><b>Stanford University</b><br/>Stanford, CA
+</td><td>('35627656', 'Kevin Chavez', 'kevin chavez')</td><td>kjchavez@stanford.edu
+</td></tr><tr><td>0021f46bda27ea105d722d19690f5564f2b8869e</td><td>Deep Region and Multi-label Learning for Facial Action Unit Detection
+<br/><b>School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China</b><br/><b>Robotics Institute, Carnegie Mellon University, USA</b></td><td>('2393320', 'Kaili Zhao', 'kaili zhao')</td><td></td></tr><tr><td>0081e2188c8f34fcea3e23c49fb3e17883b33551</td><td>Training Deep Face Recognition Systems
+<br/>with Synthetic Data
+<br/>Department of Mathematics and Computer Science
+<br/><b>University of Basel</b></td><td>('2780587', 'Adam Kortylewski', 'adam kortylewski')<br/>('1801001', 'Andreas Schneider', 'andreas schneider')<br/>('3277377', 'Thomas Gerig', 'thomas gerig')<br/>('34460642', 'Bernhard Egger', 'bernhard egger')<br/>('31540387', 'Andreas Morel-Forster', 'andreas morel-forster')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td></td></tr><tr><td>00dc942f23f2d52ab8c8b76b6016d9deed8c468d</td><td>Advanced Correlation-Based Character Recognition Applied to
+<br/>the Archimedes Palimpsest
+<br/>by
+<br/><b>B. S. Rochester Institute of Technology</b><br/>A dissertation submitted in partial fulfillment of the
+<br/>requirements for the degree of Doctor of Philosophy
+<br/>in the Chester F. Carlson Center for Imaging Science
+<br/><b>Rochester Institute of Technology</b><br/>May 2008
+<br/>Signature of the Author
+<br/>Accepted by
+<br/>Coordinator, Ph.D. Degree Program
+<br/>Date
+</td><td>('31960835', 'Derek J. Walvoord', 'derek j. walvoord')</td><td></td></tr><tr><td>0077cd8f97cafd2b389783858a6e4ab7887b0b6b</td><td>MAI et al.: ON THE RECONSTRUCTION OF DEEP FACE TEMPLATES
+<br/>On the Reconstruction of Deep Face Templates
+</td><td>('3391550', 'Guangcan Mai', 'guangcan mai')<br/>('1684684', 'Kai Cao', 'kai cao')<br/>('1768574', 'Pong C. Yuen', 'pong c. yuen')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>0055c7f32fa6d4b1ad586d5211a7afb030ca08cc</td><td>SAHAet al.: DEEPLEARNINGFORDETECTINGSPACE-TIMEACTIONTUBES
+<br/>Deep Learning for Detecting Multiple
+<br/>Space-Time Action Tubes in Videos
+<br/>1 Dept. of Computing and
+<br/>Communication Technologies
+<br/><b>Oxford Brookes University</b><br/>Oxford, UK
+<br/>2 Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Oxford, UK
+</td><td>('3017538', 'Suman Saha', 'suman saha')<br/>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('3019396', 'Michael Sapienza', 'michael sapienza')<br/>('1730268', 'Philip H. S. Torr', 'philip h. s. torr')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td>suman.saha-2014@brookes.ac.uk
+<br/>gurkirt.singh-2015@brookes.ac.uk
+<br/>michael.sapienza@eng.ox.ac.uk
+<br/>philip.torr@eng.ox.ac.uk
+<br/>fabio.cuzzolin@brookes.ac.uk
+</td></tr><tr><td>009cd18ff06ff91c8c9a08a91d2516b264eee48e</td><td>8
+<br/>Face and Automatic Target Recognition Based
+<br/>on Super-Resolved Discriminant Subspace
+<br/><b>Chulalongkorn University, Bangkok</b><br/>Thailand
+<br/>1. Introduction
+<br/>Recently, super-resolution reconstruction (SRR) method of low-dimensional face subspaces
+<br/>has been proposed for face recognition. This face subspace, also known as eigenface, is
+<br/>extracted using principal component analysis (PCA). One of the disadvantages of the
+<br/>reconstructed features obtained from the super-resolution face subspace is that no class
+<br/>information is included. To remedy the mentioned problem, at first, this chapter will be
+<br/>discussed about two novel methods for super-resolution reconstruction of discriminative
+<br/>features, i.e., class-specific and discriminant analysis of principal components; that aims on
+<br/>improving the discriminant power of the recognition systems. Next, we discuss about two-
+<br/>dimensional principal component analysis (2DPCA), also refered to as image PCA. We suggest
+<br/>new reconstruction algorithm based on the replacement of PCA with 2DPCA in extracting
+<br/>super-resolution subspace for face and automatic target recognition. Our experimental
+<br/>results on Yale and ORL face databases are very encouraging. Furthermore, the performance
+<br/>of our proposed approach on the MSTAR database is also tested.
+<br/>In general, the fidelity of data, feature extraction, discriminant analysis, and classification
+<br/>rule are four basic elements in face and target recognition systems. One of the efficacies of
+<br/>recognition systems could be improved by enhancing the fidelity of the noisy, blurred, and
+<br/>undersampled images that are captured by the surveillance imagers. Regarding to the
+<br/>fidelity of data, when the resolution of the captured image is too small, the quality of the
+<br/>detail information becomes too limited, leading to severely poor decisions in most of the
+<br/>existing recognition systems. Having used super-resolution reconstruction algorithms (Park
+<br/>et al., 2003), it is fortunately to learn that a high-resolution (HR) image can be reconstructed
+<br/>from an undersampled image sequence obtained from the original scene with pixel
+<br/>displacements among images. This HR image is then used to input to the recognition system
+<br/>in order to improve the recognition performance. In fact, super-resolution can be considered
+<br/>as the numerical and regularization study of the ill-conditioned large scale problem given to
+<br/>describe the relationship between low-resolution (LR) and HR pixels (Nguyen et al., 2001).
+<br/>On the one hand, feature extraction aims at reducing the dimensionality of face or target
+<br/>image so that the extracted feature is as representative as possible. On the other hand,
+<br/>super-resolution aims at visually increasing the dimensionality of face or target image.
+<br/>Having applied super-resolution methods at pixel domain (Lin et al., 2005; Wagner et al.,
+<br/>2004), the performance of face and target recognition applicably increases. However, with
+<br/>the emphases on improving computational complexity and robustness to registration error
+<br/>www.intechopen.com
+</td><td>('2874330', 'Widhyakorn Asdornwised', 'widhyakorn asdornwised')</td><td></td></tr><tr><td>00214fe1319113e6649435cae386019235474789</td><td>Bachelorarbeit im Fach Informatik
+<br/>Face Recognition using
+<br/>Distortion Models
+<br/>Mathematik, Informatik und Naturwissenschaften der
+<br/>RHEINISCH-WESTFÄLISCHEN TECHNISCHEN HOCHSCHULE AACHEN
+<br/>Der Fakultät für
+<br/>Lehrstuhl für Informatik VI
+<br/>Prof. Dr.-Ing. H. Ney
+<br/>vorgelegt von:
+<br/>Matrikelnummer 252400
+<br/>Gutachter:
+<br/>Prof. Dr.-Ing. H. Ney
+<br/>Prof. Dr. B. Leibe
+<br/>Betreuer:
+<br/>September 2009
+</td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1967060', 'Philippe Dreuw', 'philippe dreuw')</td><td></td></tr><tr><td>004e3292885463f97a70e1f511dc476289451ed5</td><td>Quadruplet-wise Image Similarity Learning
+<br/>Marc T. Law
+<br/><b>LIP6, UPMC - Sorbonne University, Paris, France</b></td><td>('1728523', 'Nicolas Thome', 'nicolas thome')<br/>('1702233', 'Matthieu Cord', 'matthieu cord')</td><td>{Marc.Law, Nicolas.Thome, Matthieu.Cord}@lip6.fr
+</td></tr><tr><td>0004f72a00096fa410b179ad12aa3a0d10fc853c</td><td></td><td></td><td></td></tr><tr><td>00b08d22abc85361e1c781d969a1b09b97bc7010</td><td>Who is the Hero? − Semi-Supervised Person Re-Identification in Videos
+<br/><b>Tampere University of Technology, Tampere, Finland</b><br/><b>Nokia Research Center, Tampere, Finland</b><br/>Keywords:
+<br/>Semi-supervised person re-identification, Important person detection, Face tracks, Clustering
+</td><td>('13413642', 'Umar Iqbal', 'umar iqbal')<br/>('9219875', 'Moncef Gabbouj', 'moncef gabbouj')</td><td>{umar.iqbal, moncef.gabbouj}@tut.fi, igor.curcio@nokia.com
+</td></tr><tr><td>007250c2dce81dd839a55f9108677b4f13f2640a</td><td>Advances in Component Based Face Detection
+<br/>S. M. Bileschi
+<br/>B. Heisele
+<br/>Center for Biological And Computational Learning
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA.
+<br/>Honda Research and Development
+<br/>Boston, MA.
+</td><td></td><td></td></tr><tr><td>00e3957212517a252258baef833833921dd308d4</td><td>Adaptively Weighted Multi-task Deep Network for Person
+<br/>A￿ribute Classification
+<br/><b>Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University, China</b><br/><b>School of Data Science, Fudan University, China</b></td><td>('37391748', 'Keke He', 'keke he')<br/>('11032846', 'Zhanxiong Wang', 'zhanxiong wang')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('6260277', 'Rui Feng', 'rui feng')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>{kkhe15,15210240046,yanweifu,fengrui,ygj,xyxue}@fudan.edu.cn
+</td></tr><tr><td>00f0ed04defec19b4843b5b16557d8d0ccc5bb42</td><td></td><td></td><td></td></tr><tr><td>0037bff7be6d463785d4e5b2671da664cd7ef746</td><td>Author manuscript, published in "European Conference on Computer Vision (ECCV '10) 6311 (2010) 634--647"
+<br/> DOI : 10.1007/978-3-642-15549-9_46
+</td><td></td><td></td></tr><tr><td>009a18d04a5e3ec23f8ffcfc940402fd8ec9488f</td><td>BOYRAZ ET AL. : WEAKLY-SUPERVISED ACTION RECOGNITION BY LOCALIZATION
+<br/>Action Recognition by Weakly-Supervised
+<br/>Discriminative Region Localization
+<br/>Marshall Tappen12
+<br/>1 Department of EECS
+<br/><b>University of Central Florida</b><br/>Orlando, FL USA
+<br/><b>Amazon, Inc</b><br/>Seattle, WA USA
+<br/><b>Sighthound, Inc</b><br/>Orlando, FL USA
+</td><td>('3174233', 'Hakan Boyraz', 'hakan boyraz')<br/>('2234898', 'Syed Zain Masood', 'syed zain masood')<br/>('6312216', 'Baoyuan Liu', 'baoyuan liu')<br/>('1691260', 'Hassan Foroosh', 'hassan foroosh')</td><td>hakanb@amazon.com
+<br/>zainmasood@sighthound.com
+<br/>bliu@cs.ucf.edu
+<br/>tappenm@amazon.com
+<br/>foroosh@cs.ucf.edu
+</td></tr><tr><td>0066caed1238de95a431d836d8e6e551b3cde391</td><td>Filtered Component Analysis to Increase Robustness
+<br/>to Local Minima in Appearance Models
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania</b><br/><b>Pennsylvania</b></td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td>ftorre@cs.cmu.edu acollet@cs.cmu.edu mquero@andrew.cmu.edu
+<br/>tk@cs.cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>00075519a794ea546b2ca3ca105e2f65e2f5f471</td><td>Generating a Large, Freely-Available Dataset for
+<br/>Face-Related Algorithms
+<br/><b>Amherst College</b></td><td>('40175953', 'Benjamin Mears', 'benjamin mears')</td><td></td></tr><tr><td>0019925779bff96448f0c75492717e4473f88377</td><td>Deep Heterogeneous Face Recognition Networks based on Cross-modal
+<br/>Distillation and an Equitable Distance Metric
+<br/><b>U.S. Army Research Laboratory</b><br/><b>University of Maryland, College Park</b><br/>3Booz Allen Hamilton Inc.
+</td><td>('39412489', 'Christopher Reale', 'christopher reale')<br/>('2445131', 'Hyungtae Lee', 'hyungtae lee')<br/>('1688527', 'Heesung Kwon', 'heesung kwon')</td><td>reale@umiacs.umd.edu
+<br/>lee hyungtae@bah.com
+<br/>heesung.kwon.civ@mail.mil
+</td></tr><tr><td>00e9011f58a561500a2910a4013e6334627dee60</td><td>FACIAL EXPRESSION RECOGNITION USING ANGLE-RELATED INFORMATION
+<br/>FROM FACIAL MESHES
+<br/>1Computer Science Department, Aristotle
+<br/><b>University of Thessaloniki</b><br/><b>University Campus, 54124, Thessaloniki, Greece</b><br/>phone: (+30) 2310 996361, fax: (+30) 2310 996304,
+<br/>web: www.aiia.csd.auth.gr
+</td><td>('1738865', 'Nicholas Vretos', 'nicholas vretos')<br/>('1681629', 'Vassilios Solachidis', 'vassilios solachidis')<br/>('3176394', 'Petr Somol', 'petr somol')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: vretos,vasilis,pitas@aiia.csd.auth.gr
+</td></tr><tr><td>00d9d88bb1bdca35663946a76d807fff3dc1c15f</td><td>Subjects and Their Objects: Localizing Interactees for a
+<br/>Person-Centric View of Importance
+</td><td>('3197570', 'Chao-Yeh Chen', 'chao-yeh chen')</td><td></td></tr><tr><td>00a967cb2d18e1394226ad37930524a31351f6cf</td><td>Fully-adaptive Feature Sharing in Multi-Task Networks with Applications in
+<br/>Person Attribute Classification
+<br/>UC San Diego
+<br/>IBM Research
+<br/>IBM Research
+<br/>Binghamton Univeristy, SUNY
+<br/>UC San Diego
+<br/>Rogerio Feris
+<br/>IBM Research
+</td><td>('2325498', 'Yongxi Lu', 'yongxi lu')<br/>('8991006', 'Yu Cheng', 'yu cheng')<br/>('40632040', 'Abhishek Kumar', 'abhishek kumar')<br/>('2443456', 'Shuangfei Zhai', 'shuangfei zhai')<br/>('1737723', 'Tara Javidi', 'tara javidi')</td><td>yol070@ucsd.edu
+<br/>abhishk@us.ibm.com
+<br/>szhai2@binghamton.edu
+<br/>chengyu@us.ibm.com
+<br/>tjavidi@eng.ucsd.edu
+<br/>rsferis@us.ibm.com
+</td></tr><tr><td>00f1e5e954f9eb7ffde3ca74009a8c3c27358b58</td><td>Unsupervised Clustering for Google Searches of Celebrity Images
+<br/><b>California Institute of Technology, Pasadena, CA</b><br/>* These authors contributed equally in this work
+</td><td>('3075121', 'Alex Holub', 'alex holub')<br/>('2462051', 'Pierre Moreels', 'pierre moreels')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>holub@vision.caltech.edu, pmoreels@vision.caltech.edu, perona@vision.caltech.edu
+</td></tr><tr><td>00a3cfe3ce35a7ffb8214f6db15366f4e79761e3</td><td>Kinect for real-time emotion recognition via facial expressions. Frontiers of
+<br/>Information Technology & Electronic Engineering, 16(4):272-282.
+<br/>[doi:10.1631/FITEE.1400209]
+<br/>Using Kinect for real-time emotion
+<br/>recognition via facial expressions
+<br/>Key words: Kinect, Emotion recognition, Facial expression, Real-time
+<br/>classification, Fusion algorithm, Support vector machine (SVM)
+<br/> ORCID: http://orcid.org/0000-0002-5021-9057
+<br/>Front Inform Technol & Electron Eng </td><td>('2566775', 'Qi-rong Mao', 'qi-rong mao')<br/>('2016065', 'Xin-yu Pan', 'xin-yu pan')<br/>('20342486', 'Yong-zhao Zhan', 'yong-zhao zhan')<br/>('2800876', 'Xiang-jun Shen', 'xiang-jun shen')<br/>('2566775', 'Qi-rong Mao', 'qi-rong mao')</td><td>E-mail: mao_qr@ujs.edu.cn
+</td></tr><tr><td>0058cbe110933f73c21fa6cc9ae0cd23e974a9c7</td><td>BISWAS, JACOBS: AN EFFICIENT ALGORITHM FOR LEARNING DISTANCES
+<br/>An Efficient Algorithm for Learning
+<br/>Distances that Obey the Triangle Inequality
+<br/>http://www.xrci.xerox.com/profile-main/67
+<br/>http://www.cs.umd.edu/~djacobs/
+<br/>Xerox Research Centre India
+<br/>Bangalore, India
+<br/>Computer Science Department
+<br/><b>University of Maryland</b><br/><b>College Park, USA</b></td><td>('2221075', 'Arijit Biswas', 'arijit biswas')<br/>('1682573', 'David Jacobs', 'david jacobs')</td><td></td></tr><tr><td>004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4</td><td>Balanced k-Means and Min-Cut Clustering
+</td><td>('1729163', 'Xiaojun Chang', 'xiaojun chang')<br/>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1727419', 'Zhigang Ma', 'zhigang ma')<br/>('39033919', 'Yi Yang', 'yi yang')</td><td></td></tr><tr><td>00d94b35ffd6cabfb70b9a1d220b6823ae9154ee</td><td>Discriminative Bayesian Dictionary Learning
+<br/>for Classification
+</td><td>('2941543', 'Naveed Akhtar', 'naveed akhtar')<br/>('1688013', 'Faisal Shafait', 'faisal shafait')</td><td></td></tr><tr><td>00ebc3fa871933265711558fa9486057937c416e</td><td>Collaborative Representation based Classification
+<br/>for Face Recognition
+<br/><b>The Hong Kong Polytechnic University, Hong Kong, China</b><br/><b>b School of Applied Mathematics, Xidian University, Xi an, China</b><br/>c Principal Researcher, Microsoft Research Asia, Beijing, China
+</td><td>('36685537', 'Lei Zhang', 'lei zhang')<br/>('5828998', 'Meng Yang', 'meng yang')<br/>('2340559', 'Xiangchu Feng', 'xiangchu feng')<br/>('1700297', 'Yi Ma', 'yi ma')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>006f283a50d325840433f4cf6d15876d475bba77</td><td>756
+<br/>Preserving Structure in Model-Free Tracking
+</td><td>('2883723', 'Lu Zhang', 'lu zhang')<br/>('1803520', 'Laurens van der Maaten', 'laurens van der maaten')</td><td></td></tr><tr><td>00b29e319ff8b3a521b1320cb8ab5e39d7f42281</td><td>Towards Transparent Systems: Semantic
+<br/>Characterization of Failure Modes
+<br/><b>Carnegie Mellon University, Pittsburgh, USA</b><br/><b>University of Washington, Seattle, USA</b><br/>3 Virginia Tech, Blacksburg, USA
+</td><td>('3294630', 'Aayush Bansal', 'aayush bansal')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td></td></tr><tr><td>00d931eccab929be33caea207547989ae7c1ef39</td><td>The Natural Input Memory Model
+<br/><b>IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands</b><br/><b>Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands</b><br/><b>IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands</b></td><td></td><td>Joyca P.W. Lacroix (j.lacroix@cs.unimaas.nl)
+<br/>Jaap M.J. Murre (jaap@murre.com)
+<br/>Eric O. Postma (postma@cs.unimaas.nl)
+<br/>H. Jaap van den Herik (herik@cs.unimaas.nl)
+</td></tr><tr><td>0059b3dfc7056f26de1eabaafd1ad542e34c2c2e</td><td></td><td></td><td></td></tr><tr><td>0052de4885916cf6949a6904d02336e59d98544c</td><td><b>Springer Science + Business Media, Inc. Manufactured in The Netherlands</b><br/>DOI: 10.1007/s10994-005-3561-6
+<br/>Generalized Low Rank Approximations of Matrices
+<br/><b>University of Minnesota-Twin Cities, Minneapolis</b><br/>MN 55455, USA
+<br/>Editor:
+<br/>Peter Flach
+<br/>Published online: 12 August 2005
+</td><td>('37513601', 'Jieping Ye', 'jieping ye')</td><td>jieping@cs.umn.edu
+</td></tr><tr><td>6e60536c847ac25dba4c1c071e0355e5537fe061</td><td>Computer Vision and Natural Language Processing: Recent
+<br/>Approaches in Multimedia and Robotics
+<br/>71
+<br/>Integrating computer vision and natural language processing is a novel interdisciplinary field that has
+<br/>received a lot of attention recently. In this survey, we provide a comprehensive introduction of the integration
+<br/>of computer vision and natural language processing in multimedia and robotics applications with more than
+<br/>200 key references. The tasks that we survey include visual attributes, image captioning, video captioning,
+<br/>visual question answering, visual retrieval, human-robot interaction, robotic actions, and robot navigation.
+<br/>We also emphasize strategies to integrate computer vision and natural language processing models as a
+<br/>unified theme of distributional semantics. We make an analog of distributional semantics in computer vision
+<br/>and natural language processing as image embedding and word embedding, respectively. We also present a
+<br/>unified view for the field and propose possible future directions.
+<br/>Categories and Subject Descriptors: I.2.0 [Artificial Intelligence]: General; I.2.7 [Artificial Intelligence]:
+<br/>Natural Language Processing; I.2.9 [Artificial Intelligence]: Robotics; I.2.10 [Artificial Intelligence]:
+<br/>Vision and Scene Understanding; I.4.9 [Image Processing and Computer Vision]: Applications; I.5.4
+<br/>[Pattern Recognition]: Applications
+<br/>General Terms: Computer Vision, Natural Language Processing, Robotics
+<br/>Additional Key Words and Phrases: Language and vision, survey, multimedia, robotics, symbol grounding,
+<br/>distributional semantics, computer vision, natural language processing, visual attribute, image captioning,
+<br/>imitation learning, word2vec, word embedding, image embedding, semantic parsing, lexical semantics
+<br/>ACM Reference Format:
+<br/>Computer vision and natural language processing: Recent approaches in multimedia and robotics. ACM
+<br/>Comput. Surv. 49, 4, Article 71 (December 2016), 44 pages.
+<br/>DOI: http://dx.doi.org/10.1145/3009906
+<br/>1. INTRODUCTION
+<br/>We have many ways to describe the world for communication between people: texts,
+<br/>gestures, sign languages, and face expressions are all ways of sharing meaning. Lan-
+<br/>guage is unique among communication systems in that its compositionality through
+<br/>syntax allows a limitless number of meanings to be expressed. Such meaning ulti-
+<br/>mately must be tied to perception of the world. This is usually referred to as the symbol
+<br/>An earlier version of this article appeared as “Computer Vision and Natural Language Processing: Re-
+<br/>cent Approaches in Multimedia and Robotics,” Scholarly Paper Archive, Department of Computer Science,
+<br/><b>University of Maryland, College Park, MD</b><br/>Authors’ addresses: P. Wiriyathammabhum, C. Ferm ¨uller, and Y. Aloimonos, Computer Vision Lab, Uni-
+<br/>Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted
+<br/>without fee provided that copies are not made or distributed for profit or commercial advantage and that
+<br/>copies show this notice on the first page or initial screen of a display along with the full citation. Copyrights for
+</td><td>('2862582', 'Peratham Wiriyathammabhum', 'peratham wiriyathammabhum')<br/>('1937719', 'Douglas Summers-Stay', 'douglas summers-stay')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')<br/>('2862582', 'Peratham Wiriyathammabhum', 'peratham wiriyathammabhum')<br/>('1937719', 'Douglas Summers-Stay', 'douglas summers-stay')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td>versity of Maryland College Park, MD 20742-3275; email: {peratham@cs.umd.edu, fer@umiacs.umd.edu,
+<br/>yiannis@cs.umd.edu}. D. Summers-Stay, U.S. Army Research Laboratory, Adelphi, MD 20783; email:
+<br/>{douglas.a.summers-stay.civ@mail.mil}.
+</td></tr><tr><td>6e198f6cc4199e1c4173944e3df6f39a302cf787</td><td>MORPH-II: Inconsistencies and Cleaning Whitepaper
+<br/>NSF-REU Site at UNC Wilmington, Summer 2017
+</td><td>('39845059', 'G. Bingham', 'g. bingham')<br/>('1693470', 'B. Yip', 'b. yip')<br/>('1833570', 'M. Ferguson', 'm. ferguson')<br/>('1693283', 'C. Chen', 'c. chen')<br/>('11134292', 'Y. Wang', 'y. wang')<br/>('3369885', 'T. Kling', 't. kling')</td><td></td></tr><tr><td>6eaf446dec00536858548fe7cc66025b70ce20eb</td><td></td><td></td><td></td></tr><tr><td>6e173ad91b288418c290aa8891193873933423b3</td><td>Are you from North or South India? A hard race classification task reveals
+<br/>systematic representational differences between humans and machines
+<br/><b>aCentre for Neuroscience, Indian Institute of Science, Bangalore, India</b></td><td>('2478739', 'Harish Katti', 'harish katti')</td><td></td></tr><tr><td>6e91be2ad74cf7c5969314b2327b513532b1be09</td><td>Dimensionality Reduction with Subspace Structure
+<br/>Preservation
+<br/>Department of Computer Science
+<br/>SUNY Buffalo
+<br/>Buffalo, NY 14260
+</td><td>('2309967', 'Devansh Arpit', 'devansh arpit')<br/>('1841118', 'Ifeoma Nwogu', 'ifeoma nwogu')<br/>('1723877', 'Venu Govindaraju', 'venu govindaraju')</td><td>{devansh,inwogua,govind}@buffalo.edu
+</td></tr><tr><td>6eba25166fe461dc388805cc2452d49f5d1cdadd</td><td>Pages 122.1-122.12
+<br/>DOI: https://dx.doi.org/10.5244/C.30.122
+</td><td></td><td></td></tr><tr><td>6e8a81d452a91f5231443ac83e4c0a0db4579974</td><td>Illumination robust face representation based on intrinsic geometrical
+<br/>information
+<br/>Soyel, H; Ozmen, B; McOwan, PW
+<br/>This is a pre-copyedited, author-produced PDF of an article accepted for publication in IET
+<br/>Conference on Image Processing (IPR 2012). The version of record is available
+<br/>http://ieeexplore.ieee.org/document/6290632/?arnumber=6290632&tag=1
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/16147
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td><td></td><td>more information contact scholarlycommunications@qmul.ac.uk
+</td></tr><tr><td>6ed738ff03fd9042965abdfaa3ed8322de15c116</td><td>This document is downloaded from DR-NTU, Nanyang Technological
+<br/><b>University Library, Singapore</b><br/>Title
+<br/>K-MEAP: Generating Specified K Clusters with Multiple
+<br/>Exemplars by Efficient Affinity Propagation
+<br/>Author(s) Wang, Yangtao; Chen, Lihui
+<br/>Citation
+<br/>Wang, Y & Chen, L. (2014). K-MEAP: Generating
+<br/>Specified K Clusters with Multiple Exemplars by Efficient
+<br/>Affinity Propagation. 2014 IEEE International Conference
+<br/>on Data Mining (ICDM), 1091-1096.
+<br/>Date
+<br/>2014
+<br/>URL
+<br/>http://hdl.handle.net/10220/39690
+<br/>Rights
+<br/>© 2014 IEEE. Personal use of this material is permitted.
+<br/>Permission from IEEE must be obtained for all other
+<br/><b>uses, in any current or future media, including</b><br/>reprinting/republishing this material for advertising or
+<br/>promotional purposes, creating new collective works, for
+<br/>resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works. The
+<br/>published version is available at:
+<br/>[http://dx.doi.org/10.1109/ICDM.2014.54].
+</td><td></td><td></td></tr><tr><td>6ecd4025b7b5f4894c990614a9a65e3a1ac347b2</td><td>International Journal on Recent and Innovation Trends in Computing and Communication
+<br/>
+<br/> ISSN: 2321-8169
+<br/>Volume: 2 Issue: 5
+<br/> 1275– 1281
+<br/>_______________________________________________________________________________________________
+<br/>Automatic Naming of Character using Video Streaming for Face
+<br/>Recognition with Graph Matching
+<br/>Nivedita.R.Pandey
+<br/>Ranjan.P.Dahake
+<br/>PG Student at MET’s IOE Bhujbal Knowledge City,
+<br/>PG Student at MET’s IOE Bhujbal Knowledge City,
+<br/>Nasik, Maharashtra, India,
+<br/>Nasik, Maharashtra, India,
+</td><td></td><td>pandeynivedita7@gmail.com
+<br/>dahakeranjan@gmail.com
+</td></tr><tr><td>6eddea1d991e81c1c3024a6cea422bc59b10a1dc</td><td>Towards automatic analysis of gestures and body
+<br/>expressions in depression
+<br/><b>University of Cambridge</b><br/>Computer Laboratory
+<br/>Cambridge, UK
+<br/><b>University of Cambridge</b><br/>Computer Laboratory
+<br/>Cambridge, UK
+</td><td>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39840677', 'Peter Robinson', 'peter robinson')</td><td>marwa.mahmoud@cl.cam.ac.uk
+<br/>peter.robinson@cl.cam.ac.uk
+</td></tr><tr><td>6eaeac9ae2a1697fa0aa8e394edc64f32762f578</td><td></td><td></td><td></td></tr><tr><td>6ee2ea416382d659a0dddc7a88fc093accc2f8ee</td><td></td><td></td><td></td></tr><tr><td>6e97a99b2879634ecae962ddb8af7c1a0a653a82</td><td>Towards Context-aware Interaction Recognition∗
+<br/><b>School of Computer Science, University of Adelaide, Australia</b><br/>Contents
+<br/>1. Introduction
+<br/>2. Related work
+<br/>3. Methods
+<br/>3.1. Context-aware interaction classification
+<br/>framework . . . . . . . . . . . . . . . . .
+<br/>3.2. Feature representations for interactions
+<br/>recognition . . . . . . . . . . . . . . . .
+<br/>3.2.1
+<br/>Spatial feature representation . .
+<br/>3.2.2 Appearance feature representation
+<br/>Improving appearance representation
+<br/>with attention and context-aware atten-
+<br/>tion . . . . . . . . . . . . . . . . . . . .
+<br/>3.4. Implementation details . . . . . . . . . .
+<br/>3.3.
+<br/>4. Experiments
+<br/>4.1. Evaluation on the Visual Relationship
+<br/>dataset . . . . . . . . . . . . . . . . . . .
+<br/>4.1.1 Detection results comparison . .
+<br/>Zero-shot learning performance
+<br/>4.1.2
+<br/>evaluation . . . . . . . . . . . . .
+<br/>4.1.3 Extensions and comparison with
+<br/>the state-of-the-art methods . . .
+<br/>4.2. Evaluation on the Visual Phrase dataset
+<br/>5. Conclusion
+</td><td>('3194022', 'Bohan Zhuang', 'bohan zhuang')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')</td><td></td></tr><tr><td>6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c</td><td>Fusing Aligned and Non-Aligned Face Information
+<br/>for Automatic Affect Recognition in the Wild: A Deep Learning Approach
+<br/>Computational NeuroSystems Laboratory (CNSL)
+<br/><b>Korea Advanced Institute of Science and Technology (KAIST</b></td><td>('3918690', 'Bo-Kyeong Kim', 'bo-kyeong kim')<br/>('2527421', 'Suh-Yeon Dong', 'suh-yeon dong')<br/>('3294960', 'Jihyeon Roh', 'jihyeon roh')<br/>('34577016', 'Soo-Young Lee', 'soo-young lee')</td><td>{bokyeong1015, suhyeon.dong}@gmail.com, {rohleejh, gmkim90, sylee}@kaist.ac.kr
+</td></tr><tr><td>6e3a181bf388dd503c83dc324561701b19d37df1</td><td>Finding a low-rank basis in a matrix subspace
+<br/>Andr´e Uschmajew
+</td><td>('2391697', 'Yuji Nakatsukasa', 'yuji nakatsukasa')</td><td></td></tr><tr><td>6ef1996563835b4dfb7fda1d14abe01c8bd24a05</td><td>Nonparametric Part Transfer for Fine-grained Recognition
+<br/><b>Computer Vision Group, Friedrich Schiller University Jena</b><br/>www.inf-cv.uni-jena.de
+</td><td>('1679449', 'Erik Rodner', 'erik rodner')<br/>('1720839', 'Alexander Freytag', 'alexander freytag')<br/>('1728382', 'Joachim Denzler', 'joachim denzler')</td><td></td></tr><tr><td>6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f</td><td>Deep Episodic Memory: Encoding, Recalling, and Predicting
+<br/>Episodic Experiences for Robot Action Execution
+</td><td>('35309584', 'Jonas Rothfuss', 'jonas rothfuss')<br/>('2128564', 'Fabio Ferreira', 'fabio ferreira')<br/>('34876449', 'Eren Erdal Aksoy', 'eren erdal aksoy')<br/>('46432716', 'You Zhou', 'you zhou')<br/>('1722677', 'Tamim Asfour', 'tamim asfour')</td><td></td></tr><tr><td>6e911227e893d0eecb363015754824bf4366bdb7</td><td>Wasserstein Divergence for GANs
+<br/>1 Computer Vision Lab, ETH Zurich, Switzerland
+<br/>2 VISICS, KU Leuven, Belgium
+</td><td>('1839268', 'Jiqing Wu', 'jiqing wu')<br/>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('30691454', 'Janine Thoma', 'janine thoma')<br/>('32610154', 'Dinesh Acharya', 'dinesh acharya')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{jwu,zhiwu.huang,jthoma,vangool}@vision.ee.ethz.ch,
+<br/>acharyad@student.ethz.ch
+</td></tr><tr><td>6ee8a94ccba10062172e5b31ee097c846821a822</td><td>Submitted 3/13; Revised 10/13; Published 12/13
+<br/>How to Solve Classification and Regression Problems on
+<br/>High-Dimensional Data with a Supervised
+<br/>Extension of Slow Feature Analysis
+<br/>Institut f¨ur Neuroinformatik
+<br/>Ruhr-Universit¨at Bochum
+<br/>Bochum D-44801, Germany
+<br/>Editor: David Dunson
+</td><td>('2366497', 'Alberto N. Escalante', 'alberto n. escalante')<br/>('1736245', 'Laurenz Wiskott', 'laurenz wiskott')</td><td>ALBERTO.ESCALANTE@INI.RUB.DE
+<br/>LAURENZ.WISKOTT@INI.RUB.DE
+</td></tr><tr><td>6ee64c19efa89f955011531cde03822c2d1787b8</td><td>Table S1: Review of existing facial expression databases that are often used in social
+<br/>psycholgy.
+<br/>Author
+<br/>Face
+<br/>name
+<br/>database
+<br/>Expressions1
+<br/>Format
+<br/>Short summary
+<br/>[1]
+<br/>GEMEP Corpus
+<br/>Mind Reading: the
+<br/>interactive
+<br/>guide
+<br/>to emotions
+<br/>audio
+<br/>and
+<br/>video
+<br/>record-
+<br/>ings
+<br/>Videos
+<br/>anger,
+<br/>amuse-
+<br/>admiration,
+<br/>ment,
+<br/>tender-
+<br/>ness, disgust, despair,
+<br/>pride,
+<br/>shame, anxiety
+<br/>(worry),
+<br/>interest,
+<br/>irritation, joy (elation),
+<br/>contempt, panic fear,
+<br/>pleasure
+<br/>(sensual),
+<br/>relief, surprise, sadness
+<br/>expressions
+<br/>groups
+<br/>:afraid, angry, bored,
+<br/>bothered, disbelieving,
+<br/>disgust, excited,
+<br/>fond,
+<br/>happy, hurt, interested,
+<br/>kind,
+<br/>romantic,
+<br/>sad, sneaky, sorry, sure,
+<br/>thinking,
+<br/>surprised,
+<br/>touched,
+<br/>unfriendly,
+<br/>unsure, wanting
+<br/>liked,
+<br/>RU-FACS Sponta-
+<br/>neous Expression
+<br/>Database
+<br/>spontaneous facial ac-
+<br/>tions
+<br/>Videos
+<br/>This database contains more than 7000 clips of the six basic
+<br/>emotions as well as subtle emotions. For the recordings 10
+<br/>professional actors (5 female) were coached by a professional
+<br/>director. The actors received a list of the emotions together
+<br/>with short definitions and brief scenarios. The recordings are
+<br/>available in different intensity levels and part of the database
+<br/>has been validated.
+<br/>The database contains over 400 videos of facial expressions
+<br/>that are summarized in 24 groups. Each group consists of dif-
+<br/>ferent subordinate expressions. Each expression is displayed
+<br/>by 6 models ranging in age.
+<br/>100 participants were asked for recording the database. There-
+<br/>fore, a false option paradigm was used which is though to elicit
+<br/>spontaneous facial expressions. Here, participants fill out a
+<br/>questionnaire regarding their opinions about particular social
+<br/>or political issues. Participants are then asked about their
+<br/>answer by an interviewer. Either participants are asked to
+<br/>tell the truth or to fool the interviewer. Moreover, partici-
+<br/>pants were financially rewarded. The expressions were video
+<br/>captured by four synchronized cameras and clips of 33 partic-
+<br/>ipants have been FACS coded (onset, apex, and offset of the
+<br/>face action).
+<br/>Comprises 1008 short videos of expressions produced by 8 Ital-
+<br/>ian professional actors. Each expression was recorded in three
+<br/>intensities (low, medium, and high) and in two different condi-
+<br/>tions: (1) Utterance condition in which actors spoke additional
+<br/>sentences and (2) Non-Utterance condition. Here, actors were
+<br/>additionally given scenarios according to the expressions to be
+<br/>produced.
+<br/>The expressions are taken from 12 participants (European,
+<br/>Asian and African). Each expression was created using a di-
+<br/>rect facial action task and all expressions were FACS coded.
+<br/>Moreover, the expressions have been morphed into 5 different
+<br/>levels of intensity.
+<br/>It contains 165 greyscale images of 15 individuals one per
+<br/>different facial expression or configuration (with or without
+<br/>glasses, different camera perspectives).
+<br/>This database contains two sets of facial expressions: (1) The
+<br/>laboratory set, that includes 40 participants (varied in culture,
+<br/>race, and appearance) displaying their own choice of expres-
+<br/>sions. Participants were allowed to move their head without
+<br/>going into profile view. Moreover, they were asked to avoid
+<br/>speech. Each video sequence contains 1-3 expressions.
+<br/>(2)
+<br/>video recordings from TV that also contained speech.
+<br/>The database contains videos of one actor performing approx-
+<br/>imately 45 action units which were recorded from six different
+<br/>viewpoints simultaneously.
+<br/>For this database, between 19 and 97 different action units
+<br/>were recorded form 10 participants. Action unit sequences
+<br/>contain single and combined action units. The peak of each ex-
+<br/>pression has been manually coded by certified FACS experts.
+<br/>Moreover, a framework is proposed that allows to build dy-
+<br/>namic 3D morphable models for the first time.
+<br/>[2]
+<br/>[3]
+<br/>[5]
+<br/>[6]
+<br/>[7]
+<br/>Breidt2
+<br/>[8]
+<br/>Chen3 , 2007
+<br/>[4]
+<br/>DaFEx
+<br/>happiness,
+<br/>fear,
+<br/>and disgust
+<br/>sadness,
+<br/>surprise,
+<br/>anger
+<br/>Videos
+<br/>Images
+<br/>Images
+<br/>Videos
+<br/>happiness,
+<br/>anger,
+<br/>and embarrassment
+<br/>fear,
+<br/>sadness,
+<br/>disgust,
+<br/>happiness,
+<br/>sleepy,
+<br/>wink
+<br/>surprise,
+<br/>sadness,
+<br/>and
+<br/>happiness,
+<br/>fear,
+<br/>and disgust
+<br/>sadness,
+<br/>surprise,
+<br/>anger
+<br/>Facial action units
+<br/>Videos
+<br/>Facial action units
+<br/>Videos
+<br/>Montreal Set
+<br/>of
+<br/>facial displays of
+<br/>emotion (MSFDE)
+<br/>The Yale
+<br/>Database
+<br/>Face
+<br/><b>University</b><br/>of
+<br/>Database
+<br/>Maryland
+<br/>Face
+<br/>Database
+<br/>MPI
+<br/>Video
+<br/>the
+<br/>of
+<br/>Dynamic
+<br/>FACS
+<br/>(D3DFACS)
+<br/>data
+<br/>3D
+<br/>set
+<br/>Fa-
+<br/>Taiwanese
+<br/>cial
+<br/>Expression
+<br/>Database (TFEID)
+<br/>anger, contempt, dis-
+<br/>gust,
+<br/>fear, happiness,
+<br/>sadness and surprise
+<br/>Images
+<br/>The database consists of 7200 images captured from 40 indi-
+<br/>viduals. The expressions are displayed in two (high and low)
+<br/>intensities and two viewing angles (0◦ and 45◦ ) simultane-
+<br/>ously.
+<br/>[9]
+<br/>CAFE Database
+<br/>anger, disgust, happy,
+<br/>maudlin (for sad), fear,
+<br/>surprise
+<br/>Images
+<br/>The database consists of two normalized versions (one gamma
+<br/>corrected and the other histogram equalized) of the faces.
+<br/>1Neutral expression is not included.
+<br/>2Please see http://vdb.kyb.tuebingen.mpg.de/.
+<br/>3Please see http://bml.ym.edu.tw/ download/html/news.htm.
+</td><td></td><td></td></tr><tr><td>6e00a406edb508312108f683effe6d3c1db020fb</td><td>Faces as Lighting Probes via Unsupervised Deep
+<br/>Highlight Extraction
+<br/><b>Simon Fraser University, Burnaby, Canada</b><br/><b>National University of Defense Technology, Changsha, China</b><br/>3 Microsoft Research, Beijing, China
+</td><td>('2693616', 'Renjiao Yi', 'renjiao yi')<br/>('2041096', 'Chenyang Zhu', 'chenyang zhu')<br/>('37291674', 'Ping Tan', 'ping tan')<br/>('1686911', 'Stephen Lin', 'stephen lin')</td><td>{renjiaoy, cza68, pingtan}@sfu.ca
+<br/>stevelin@microsoft.com
+</td></tr><tr><td>6e94c579097922f4bc659dd5d6c6238a428c4d22</td><td>Graph Based Multi-class Semi-supervised
+<br/>Learning Using Gaussian Process
+<br/>State Key Laboratory of Intelligent Technology and Systems,
+<br/><b>Tsinghua University, Beijing, China</b></td><td>('1809614', 'Yangqiu Song', 'yangqiu song')<br/>('1700883', 'Changshui Zhang', 'changshui zhang')<br/>('1760678', 'Jianguo Lee', 'jianguo lee')</td><td>{songyq99, lijg01}@mails.tsinghua.edu.cn, zcs@mail.tsinghua.edu.cn
+</td></tr><tr><td>6e379f2d34e14efd85ae51875a4fa7d7ae63a662</td><td>A NEW MULTI-MODAL BIOMETRIC SYSTEM
+<br/>BASED ON FINGERPRINT AND FINGER
+<br/>VEIN RECOGNITION
+<br/>Master's Thesis
+<br/>Department of Software Engineering
+<br/>JULY-2014
+<br/>I
+</td><td>('37171106', 'Naveed AHMED', 'naveed ahmed')<br/>('1987743', 'Asaf VAROL', 'asaf varol')</td><td></td></tr><tr><td>6eb1e006b7758b636a569ca9e15aafd038d2c1b1</td><td>Human Capabilities on Video-based Facial
+<br/>Expression Recognition
+<br/><b>Faculty of Science and Engineering, Waseda University, Tokyo, Japan</b><br/>2 Institut f¨ur Informatik, Technische Universit¨at M¨unchen, Germany
+</td><td>('32131501', 'Matthias Wimmer', 'matthias wimmer')<br/>('1989987', 'Ursula Zucker', 'ursula zucker')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td></td></tr><tr><td>6eece104e430829741677cadc1dfacd0e058d60f</td><td>Automated Facial Image Analysis 1
+<br/>To appear in J. A. Coan & J. B. Allen (Eds.), The handbook of emotion elicitation and assess-
+<br/><b>ment. Oxford University Press Series in Affective Science. New York: Oxford</b><br/>Use of Automated Facial Image Analysis for Measurement of Emotion Expression
+<br/>Department of Psychology
+<br/><b>University of Pittsburgh</b><br/>Takeo Kanade
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Facial expressions are a key index of emotion. They have consistent correlation with
+<br/>self-reported emotion (Keltner, 1995; Rosenberg & Ekman, 1994; Ekman & Rosenberg, in press)
+<br/>and emotion-related central and peripheral physiology (Davidson, Ekman, Saron, Senulis, &
+<br/>Friesen, 1990; Fox & Davidson, 1988; Levenson, Ekman, & Friesen, 1990). They putatively
+<br/>share similar underlying dimensions with self-reported emotion (e.g., positive and negative
+<br/>affect) (Bullock & Russell, 1984; Gross & John, 1997; Watson & Tellegen, 1985). Facial
+<br/>expressions serve interpersonal functions of emotion by conveying communicative intent,
+<br/>signaling affective information in social referencing (Campos, Bertenthal, & Kermoian, 1992),
+<br/>and more generally contributing to the regulation of social interaction (Cohn & Elmore, 1988;
+<br/>Fridlund, 1994; Schmidt & Cohn, 2001). As a measure of trait affect, stability in facial
+<br/>expression emerges early in life (Cohn & Campbell, 1992; Malatesta, Culver, Tesman, &
+<br/>Shephard, 1989). By adulthood, stability is moderately strong, comparable to that for self-
+<br/>reported emotion (Cohn, Schmidt, Gross, & Ekman, 2002), and predictive of favorable outcomes
+<br/>in emotion-related domains including marriage and personal well-being over periods as long as
+<br/>30 years (Harker & Keltner, 2001). Expressive changes in the face are a rich source of cues
+<br/>about intra- and interpersonal functions of emotion (cf. Keltner & Haitd, 1999).
+<br/>clinical practice, reliable, valid, and efficient methods of measurement are critical. Until recently,
+<br/>selecting a measurement method meant choosing among one or another human-observer-based
+<br/>coding system (e.g., Ekman & Friesen, 1978 and Izard, 1983) or facial electromyography
+<br/>(EMG). While each of these approaches has advantages, they are not without costs. Human-
+<br/>observer-based methods are time consuming to learn and use, and they are difficult to
+<br/>standardize, especially across laboratories and over time (Bakeman & Gottman, 1986; Martin &
+<br/>Bateson, 1986). Facial EMG requires placement of sensors on the face, which may inhibit facial
+<br/>action and which rules out its use for naturalistic observation.
+<br/>computer vision. Computer vision is the science of extracting and representing meaningful
+<br/>information from digitized video and recognizing perceptually meaningful patterns. An early
+<br/>focus in automated face image analysis by computer vision was face recognition (Kanade, 1973,
+<br/>To make use of the information afforded by facial expression for emotion science and
+<br/>An emerging alternative to these methods is automated facial image analysis using
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94</td><td>RANDOM GRAPHS FOR STRUCTURE
+<br/>DISCOVERY IN HIGH-DIMENSIONAL DATA
+<br/>by
+<br/>Jos¶e Ant¶onio O. Costa
+<br/>A dissertation submitted in partial fulflllment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>(Electrical Engineering: Systems)
+<br/><b>in The University of Michigan</b><br/>2005
+<br/>Doctoral Committee:
+<br/>Professor Alfred O. Hero III, Chair
+<br/>Professor Jefirey A. Fessler
+<br/>Professor David L. Neuhofi
+</td><td>('1703616', 'Susan A. Murphy', 'susan a. murphy')</td><td></td></tr><tr><td>6e1802874ead801a7e1072aa870681aa2f555f35</td><td>1­4244­0728­1/07/$20.00 ©2007 IEEE
+<br/>I ­ 629
+<br/>ICASSP 2007
+</td><td></td><td>-:241/.-)674-,-5+412645.4.)+-4-+/16115DKE?DAC;=20K=9=C2:E=K6=C=@16D=I0K=C1-+-,AF=HJAJ7ELAHIEJOB1EEI=J7H>==+D=F=EC75)2,AF=HJAJB1BH=JE-CEAAHEC+DEAIA7ELAHIEJOB0CC0CC)*564)+60MJA?@A=B=?AEI=ME@AOIJK@EA@FH>AE>JDF=JJAHHA?CEJE=@FIO?DCOEJAH=JKHAI=OBA=JKHA@AI?HEFJHI/=>HBA=JKHA?=*E=HO2=JJAH*2=@-@CAHEAJ=JE0EIJCH=D=LA>AAFHFIA@1JDEIF=FAHMACELA=?FHADAIELAIJK@OBJDAIA@AI?HEFJHIK@AHJDABH=AMHB2HE?EF=+FAJ)=OIEI2+)BMA@>OEA=H,EI?HEE=J)=OIEI,)?F=HA@JDHAA@EBBAHAJFFK=HIEE=HEJOA=IKHAI=@JM@EBBAHAJBA=JKHA?HHAIF@A?AIJH=JACEAIDEIJE?=@?=HALAHMAFHAIAJ=AMBA=JKHA@AI?HEFJH=A@KJE4=@EKI*2=@=IFHFIA=?>E=JEI?DAABHJDA*2=@/=>H@AI?HEFJH6DAANFAHEAJIJDA2KH@KA=@+721-@=J=>=IAI@AIJH=JAJD=J=>LEKIHA?CEJE>IJB*2EI=?DEALA@K@AH2+),)BH=AMH?F=HA@JJDA@EHA?J?=IIE?=JE JDA*2=@/=>HBA=JKHAI=HA?F=H=>A=IMA=IKJK=O?FAAJ=HO=@JDA?>E=JEBJDAIAJM@AI?HEFJHI>HECI=IECE?=JEFHLAAJE?=IIE?=JE?=F=>EEJOLAHIECAAI=@!JDAKJE4=@EKI*2IDMIJKJFAHBH=JDAIJ=JABJDA=HJBA=JKHA@AI?HEFJHI1@AN6AHIa.A=JKHA,AI?HEFJH5EE=HEJOA=IKHA164,7+616DAIK??AIIB=B=?AHA?CEJE=CHEJDCHA=JOHAEAIDMJANJH=?JABBA?JELABA=JKHAIJ@AI?HE>A=E=CA=@ DMJEBAHJDAIEE=HEJOBJMB=?AI>=IA@JDAANJH=?JA@BA=JKHAIIJFHALEKIIJK@EAIB?KIJDA=JJAHF=HJIK?D=I-ECAB=?A .EIDAHB=?A =@LEAM>=IA@HA?CEJE=FFH=?DAI!6DAO=HA=>=IA@JDAHECE=CH=OALAL=KAI=@FHAIAJ@EBBAHAJIEE=HEJOA=IKHAI>=IA@IK>IF=?AJA?DEGKAI6DAHECE=CH=OALABA=JKHABJAIKBBAHIBHJDAEKE=JE=@ANFHAIIEL=HE=JEI=@=O=FFH=?DAID=LA>AAFHFIA@JANJH=?JHAH>KIJHIA=JE?BA=JKHAIBHE=CAI)CJDA/=>HBA=JKHAEIJDAIJFFK=HA=@JDA-=IJE?*K?D/H=FD=J?DEC-*/"AJD@D=IIK??AIIBKOEJACH=JA@/=>HBA=JKHAI=@JDA?=?HHAIF@A?AIJH=JACOBHJDAKJELEAMB=?AHA?CEJEFH>A+>EA@MEJDKEA@IK>IF=?A==OIEI EJD=I=I>AAKIA@JEFHLAE@H=@KJ@HB=?AHA?CEJE4A?AJO?=*E=HO2=JJAH*2HECE=OEJH@K?A@BHJANJKHAHAFHAIAJ=JED=IFHLA@J>A=FMAHBK@AI?HEFJHBHB=?AHA?CEJE6DA-0@AI?HEFJH?FKJAIJDADEIJCH=BJDAA@CAHEAJ=JE@EIJHE>KJEMEJDEJDAAECD>HD@B=FEJ=@JDAMHE#IDMIJD=J-0BA=JKHA?=IECE?=JOEFHLAJDAB=?A@AJA?JEFAHBH=?AE?F=HEIJJDAHECE=CH=OALABA=JKHA1JDEIF=FAHMA=FFOJDA-0@AI?HEFJHBHB=?AHA?CEJE6DAMHEIDMIJD=J*2>=IA@@EHA?JA=HAIJAECD>H+=IIEAH@AIJ=M=OIFH@K?AI=JEIBOECHAIKJI1JDEIMHMAFHFIAJKJEEAJDABH=AMHB2HE?EF=+FAJ)=OIEI2+)BMA@>OEA=H,EI?HEE=J)=OIEI,)BH*2>=IA@B=?AHA?CEJEEMDE?D=FIIE>A2+)=@,)@EAIE?>E=JEI=HAANFHA@)@H==JE?HA?CEJE>IJEI>IAHLA@>=IA@JDEIAMBH=AMH)IMAFHFIAJDAKJE4=@EKI*2HAFHAIAJ=JEBHB=?AHA?CEJE=@CELA=?FHADAIELAIJK@OMEJDJDABA=JKHA@AI?HEFJHI/=>H*2-0=IMA=IJDAHECE=CH=OALABA=JKHAI6DAIABA=JKHA@AI?HEFJHI=HA?F=HA@MEJDJM@EBBAHAJBA=JKHA?HHAIF@A?AAJD@IEADEIJE?=@?=6DABHAHA?IJHK?JIJDABA=JKHA?HHAIF@A?A@EHA?JO>=IA@JDAE=CA?H@E=JAIMDEAJDA=JJAHAEI>=IA@=IAJBAOBA=JKHAFEJIIK?D=IJDAKJD=@AOA?HAHIIA=@B=?A?JKHFEJIHALAHJDHAA@EBBAHAJIEE=HEJOA=IKHAI =@+IEA=HA=FFEA@JANJAIELAOAL=K=JAJDAABBA?JELAAIIBJDAIA@EBBAHAJ@AI?HEFJHI/=>HBA=JKHA=@*2?D=H=?JAHEAJDAFHFAHJOB?=JANJKHA@EIJHE>KJEIE@EIJE?JM=OI1JDEIMHE=@@EJEJJDA?FHADAIELA?F=HEIMAAL=K=JAJDAEH?FAAJ=HOFHFAHJO=@JDAANFAHEAJ=HAIKJIJDA+721-=@2KH@KA@=J=>=IAIIDMJD=JJDA?>E=JEBJDA>HECIIECE?=JFAHBH=?AEFHLAAJI 4-81-9.60-.-)674-,-5+4126451JDEIIA?JEMACELA=LAHLEAMBJDAIJ=JABJDA=HJBA=JKHA@AI?HEFJHIBHJDAB=?AHA?CEJEFH>A </td></tr><tr><td>6ed22b934e382c6f72402747d51aa50994cfd97b</td><td>Customized Expression Recognition for Performance-Driven
+<br/>Cutout Character Animation
+<br/>†NEC Laboratories America
+<br/>‡Snapchat
+</td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1706007', 'Jianchao Yang', 'jianchao yang')</td><td></td></tr><tr><td>6e93fd7400585f5df57b5343699cb7cda20cfcc2</td><td>http://journalofvision.org/9/2/22/
+<br/>Comparing a novel model based on the transferable
+<br/>belief model with humans during the recognition of
+<br/>partially occluded facial expressions
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Humans recognize basic facial expressions effortlessly. Yet, despite a considerable amount of research, this task remains
+<br/>elusive for computer vision systems. Here, we compared the behavior of one of the best computer models of facial
+<br/>expression recognition (Z. Hammal, L. Couvreur, A. Caplier, & M. Rombaut, 2007) with the behavior of human observers
+<br/>during the M. Smith, G. Cottrell, F. Gosselin, and P. G. Schyns (2005) facial expression recognition task performed on
+<br/>stimuli randomly sampled using Gaussian apertures. The modelVwhich we had to significantly modify in order to give the
+<br/>ability to deal with partially occluded stimuliVclassifies the six basic facial expressions (Happiness, Fear, Sadness,
+<br/>Surprise, Anger, and Disgust) plus Neutral from static images based on the permanent facial feature deformations and the
+<br/>Transferable Belief Model (TBM). Three simulations demonstrated the suitability of the TBM-based model to deal with
+<br/>partially occluded facial parts and revealed the differences between the facial information used by humans and by the
+<br/>model. This opens promising perspectives for the future development of the model.
+<br/>Keywords: facial features behavior, facial expressions classification, Transferable Belief Model, Bubbles
+<br/>Citation: Hammal, Z., Arguin, M., & Gosselin, F. (2009). Comparing a novel model based on the transferable belief
+<br/>http://journalofvision.org/9/2/22/, doi:10.1167/9.2.22.
+<br/>Introduction
+<br/>Facial expressions communicate information from
+<br/>which we can quickly infer the state of mind of our peers
+<br/>and adjust our behavior accordingly (Darwin, 1872). To
+<br/>illustrate, take a person like patient SM with complete
+<br/>bilateral damage to the amygdala nuclei that prevents her
+<br/>from recognizing facial expressions of fear. SM would be
+<br/>incapable of interpreting the fearful expression on the face
+<br/>of a bystander, who has encountered a furious Grizzly
+<br/>bear, as a sign of potential
+<br/>threat (Adolphs, Tranel,
+<br/>Damasio, & Damasio, 1994).
+<br/>Facial expressions are typically arranged into six
+<br/>universally recognized basic categories Happiness, Sur-
+<br/>prise, Disgust, Anger, Sadness, and Fear that are similarly
+<br/>expressed across different backgrounds and cultures
+<br/>(Cohn, 2006; Ekman, 1999; Izard, 1971, 1994). Facial
+<br/>expressions result
+<br/>from the precisely choreographed
+<br/>deformation of facial features, which are often described
+<br/>using the 46 Action Units (AUs; Ekman & Friesen,
+<br/>1978).
+<br/>Facial expression recognition and computer
+<br/>vision
+<br/>The study of human facial expressions has an impact in
+<br/>several areas of life such as art, social interaction, cognitive
+<br/>science, medicine, security, affective computing, and
+<br/>human-computer interaction (HCI). An automatic facial
+<br/>expressions classification system may contribute signifi-
+<br/>cantly to the development of all these disciplines. However,
+<br/>the development of such a system constitutes a significant
+<br/>challenge because of the many constraints that are imposed
+<br/>by its application in a real-world context (Pantic & Bartlett,
+<br/>2007; Pantic & Patras, 2006). In particular, such systems
+<br/>need to provide great accuracy and robustness without
+<br/>demanding too many interventions from the user.
+<br/>There have been major advances in computer vision
+<br/>over the past 15 years for the recognition of the six basic
+<br/>facial expressions (for reviews, see Fasel & Luettin, 2003;
+<br/>Pantic & Rothkrantz, 2000b). The main approaches can be
+<br/>divided in two classes: Model-based and fiducial points
+<br/>approaches. The model-based approach requires the
+<br/>design of a deterministic physical model that can represent
+<br/>doi: 10.1167/9.2.22
+<br/>Received January 28, 2008; published February 26, 2009
+<br/>ISSN 1534-7362 * ARVO
+</td><td>('1785007', 'Zakia Hammal', 'zakia hammal')<br/>('3005969', 'Martin Arguin', 'martin arguin')<br/>('2074568', 'Frédéric Gosselin', 'frédéric gosselin')</td><td></td></tr><tr><td>6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9</td><td>LEGO Pictorial Scales for Assessing Affective Responses
+<br/><b>t2i Lab, Chalmers University of Technology, Gothenburg, Sweden</b><br/>2Digital Productivity, CSIRO, Australia
+<br/><b>University of Canterbury, New Zealand</b><br/><b>Texas AandM University, College Station, TX, USA</b><br/><b>Human Centered Multimedia, Augsburg University, Germany</b><br/><b>Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand</b></td><td>('1761180', 'Mohammad Obaid', 'mohammad obaid')<br/>('39191121', 'Andreas Dünser', 'andreas dünser')<br/>('1719307', 'Elena Moltchanova', 'elena moltchanova')<br/>('33096182', 'Danielle Cummings', 'danielle cummings')<br/>('1728894', 'Christoph Bartneck', 'christoph bartneck')</td><td>mobaid@chalmers.se
+</td></tr><tr><td>6e12ba518816cbc2d987200c461dc907fd19f533</td><td></td><td></td><td></td></tr><tr><td>6e782073a013ce3dbc5b9b56087fd0300c510f67</td><td>IOSR Journal of Computer Engineering (IOSR-JCE)
+<br/>e-ISSN: 2278-0661,p-ISSN: 2278-8727, Volume 17, Issue 3, Ver. II (May – Jun. 2015), PP 61-68
+<br/>www.iosrjournals.org
+<br/>Real Time Facial Emotion Recognition using Kinect V2 Sensor
+<br/><b>Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania</b><br/><b>Ministry of Higher Education and Scientific Research / The University of Mustsnsiriyah/Baghdad IRAQ</b><br/>2(Department of Computers/Faculty of Automatic Control and ComputersPOLITEHNICA of Bucharest
+<br/>3(Department of Computers/Faculty of Automatic Control and ComputersPOLITEHNICA of Bucharest
+<br/>ROMANIA)
+<br/>ROMANIA)
+</td><td>('9384437', 'Hesham A. Alabbasi', 'hesham a. alabbasi')<br/>('3088730', 'Alin Moldoveanu', 'alin moldoveanu')</td><td></td></tr><tr><td>9ab463d117219ed51f602ff0ddbd3414217e3166</td><td>Weighted Transmedia
+<br/>Relevance Feedback for
+<br/>Image Retrieval and
+<br/>Auto-annotation
+<br/>TECHNICAL
+<br/>REPORT
+<br/>N° 0415
+<br/>December 2011
+<br/>Project-Teams LEAR - INRIA
+<br/>and TVPA - XRCE
+</td><td>('1722052', 'Thomas Mensink', 'thomas mensink')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')<br/>('1808423', 'Gabriela Csurka', 'gabriela csurka')</td><td></td></tr><tr><td>9ac82909d76b4c902e5dde5838130de6ce838c16</td><td>Recognizing Facial Expressions Automatically
+<br/>from Video
+<br/>1 Introduction
+<br/>Facial expressions, resulting from movements of the facial muscles, are the face
+<br/>changes in response to a person’s internal emotional states, intentions, or social
+<br/>communications. There is a considerable history associated with the study on fa-
+<br/>cial expressions. Darwin (1872) was the first to describe in details the specific fa-
+<br/>cial expressions associated with emotions in animals and humans, who argued that
+<br/>all mammals show emotions reliably in their faces. Since that, facial expression
+<br/>analysis has been a area of great research interest for behavioral scientists (Ekman,
+<br/>Friesen, and Hager, 2002). Psychological studies (Mehrabian, 1968; Ambady and
+<br/>Rosenthal, 1992) suggest that facial expressions, as the main mode for non-verbal
+<br/>communication, play a vital role in human face-to-face communication. For illus-
+<br/>tration, we show some examples of facial expressions in Fig. 1.
+<br/>Computer recognition of facial expressions has many important applications in
+<br/>intelligent human-computer interaction, computer animation, surveillance and se-
+<br/>curity, medical diagnosis, law enforcement, and awareness systems (Shan, 2007).
+<br/>Therefore, it has been an active research topic in multiple disciplines such as psy-
+<br/>chology, cognitive science, human-computer interaction, and pattern recognition.
+<br/>Meanwhile, as a promising unobtrusive solution, automatic facial expression analy-
+<br/>sis from video or images has received much attention in last two decades (Pantic and
+<br/>Rothkrantz, 2000a; Fasel and Luettin, 2003; Tian, Kanade, and Cohn, 2005; Pantic
+<br/>and Bartlett, 2007).
+<br/>This chapter introduces recent advances in computer recognition of facial expres-
+<br/>sions. Firstly, we describe the problem space, which includes multiple dimensions:
+<br/>level of description, static versus dynamic expression, facial feature extraction and
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('3297850', 'Ralph Braspenning', 'ralph braspenning')<br/>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('3297850', 'Ralph Braspenning', 'ralph braspenning')</td><td>Philips Research, Eindhoven, The Netherlands, e-mail: caifeng.shan@philips.com
+<br/>Philips Research, Eindhoven, The Netherlands, e-mail: ralph.braspenning@philips.com
+</td></tr><tr><td>9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a</td><td>Automatic and Quantitative evaluation of attribute discovery methods
+<br/><b>The University of Queensland, School of ITEE</b><br/>QLD 4072, Australia
+</td><td>('2499431', 'Liangchen Liu', 'liangchen liu')<br/>('2331880', 'Arnold Wiliem', 'arnold wiliem')<br/>('3104113', 'Shaokang Chen', 'shaokang chen')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td>l.liu9@uq.edu.au
+<br/>a.wiliem@uq.edu.au
+<br/>shaokangchenuq@gmail.com
+<br/>lovell@itee.uq.edu.au
+</td></tr><tr><td>9ac15845defcd0d6b611ecd609c740d41f0c341d</td><td>Copyright
+<br/>by
+<br/>2011
+</td><td>('1926834', 'Juhyun Lee', 'juhyun lee')</td><td></td></tr><tr><td>9ac43a98fe6fde668afb4fcc115e4ee353a6732d</td><td>Survey of Face Detection on Low-quality Images
+<br/><b>Beckmann Institute, University of Illinois at Urbana-Champaign, USA</b></td><td>('1698743', 'Yuqian Zhou', 'yuqian zhou')<br/>('1771885', 'Ding Liu', 'ding liu')</td><td>{yuqian2, dingliu2}@illinois.edu
+<br/>huang@ifp.uiuc.edu
+</td></tr><tr><td>9af1cf562377b307580ca214ecd2c556e20df000</td><td>Feb. 28
+<br/> International Journal of Advanced Studies in Computer Science and Engineering
+<br/>IJASCSE, Volume 4, Issue 2, 2015
+<br/> Video-Based Facial Expression Recognition
+<br/>Using Local Directional Binary Pattern
+<br/>Electrical Engineering Dept., AmirKabir Univarsity of Technology
+<br/>Tehran, Iran
+</td><td>('38519671', 'Sahar Hooshmand', 'sahar hooshmand')<br/>('3232144', 'Ali Jamali Avilaq', 'ali jamali avilaq')<br/>('3293075', 'Amir Hossein Rezaie', 'amir hossein rezaie')</td><td></td></tr><tr><td>9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb</td><td>High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs
+<br/>1NVIDIA Corporation
+<br/>2UC Berkeley
+<br/>Figure 1: We propose a generative adversarial framework for synthesizing 2048 × 1024 images from semantic label maps
+<br/>(lower left corner in (a)). Compared to previous work [5], our results express more natural textures and details. (b) We can
+<br/>change labels in the original label map to create new scenes, like replacing trees with buildings. (c) Our framework also
+<br/>allows a user to edit the appearance of individual objects in the scene, e.g. changing the color of a car or the texture of a road.
+<br/>Please visit our website for more side-by-side comparisons as well as interactive editing demos.
+</td><td>('2195314', 'Ting-Chun Wang', 'ting-chun wang')<br/>('2436356', 'Jun-Yan Zhu', 'jun-yan zhu')<br/>('1690538', 'Jan Kautz', 'jan kautz')</td><td></td></tr><tr><td>9a4c45e5c6e4f616771a7325629d167a38508691</td><td>A Facial Features Detector Integrating Holistic Facial Information and
+<br/>Part-based Model
+<br/>Eslam Mostafa1,2
+<br/>Aly Farag1
+<br/><b>CVIP Lab, University of Louisville, Louisville, KY 40292, USA</b><br/><b>Alexandria University, Alexandria, Egypt</b><br/><b>Assiut University, Assiut 71515, Egypt</b><br/>4Kentucky Imaging Technology (KIT), Louisville, KY 40245, USA.
+</td><td>('28453046', 'Asem A. Ali', 'asem a. ali')<br/>('2239392', 'Ahmed Shalaby', 'ahmed shalaby')</td><td></td></tr><tr><td>9af9a88c60d9e4b53e759823c439fc590a4b5bc5</td><td>Learning Deep Convolutional Embeddings for Face Representation Using Joint
+<br/>Sample- and Set-based Supervision
+<br/>Department of Electrical and Electronic Engineering,
+<br/><b>Imperial College London</b></td><td>('2151914', 'Baris Gecer', 'baris gecer')<br/>('3288623', 'Vassileios Balntas', 'vassileios balntas')<br/>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')</td><td>{b.gecer,v.balntas15,tk.kim}@imperial.ac.uk
+</td></tr><tr><td>9a7858eda9b40b16002c6003b6db19828f94a6c6</td><td>MOONEY FACE CLASSIFICATION AND PREDICTION BY LEARNING ACROSS TONE
+<br/>(cid:63) UC Berkeley / †ICSI
+</td><td>('2301765', 'Tsung-Wei Ke', 'tsung-wei ke')<br/>('2251428', 'Stella X. Yu', 'stella x. yu')<br/>('1821337', 'David Whitney', 'david whitney')</td><td></td></tr><tr><td>9a3535cabf5d0f662bff1d897fb5b777a412d82e</td><td><b>University of Kentucky</b><br/>UKnowledge
+<br/>Computer Science
+<br/>Computer Science Faculty Publications
+<br/>6-10-2015
+<br/>Large-Scale Geo-Facial Image Analysis
+<br/>Mohammed T. Islam
+<br/><b>University of Kentucky</b><br/><b>University of North Carolina at Charlotte</b><br/>Click here to let us know how access to this document benefits you.
+<br/>Follow this and additional works at: https://uknowledge.uky.edu/cs_facpub
+<br/>Part of the Computer Sciences Commons
+<br/>Repository Citation
+<br/>Islam, Mohammed T.; Greenwell, Connor; Souvenir, Richard; and Jacobs, Nathan, "Large-Scale Geo-Facial Image Analysis" (2015).
+<br/>Computer Science Faculty Publications. 7.
+<br/>https://uknowledge.uky.edu/cs_facpub/7
+<br/>This Article is brought to you for free and open access by the Computer Science at UKnowledge. It has been accepted for inclusion in Computer
+</td><td>('2121759', 'Connor Greenwell', 'connor greenwell')<br/>('1690110', 'Richard Souvenir', 'richard souvenir')<br/>('1990750', 'Nathan Jacobs', 'nathan jacobs')</td><td>University of Kentucky, connor.greenwell@uky.edu
+<br/>University of Kentucky, nathan.jacobs@uky.edu
+<br/>Science Faculty Publications by an authorized administrator of UKnowledge. For more information, please contact UKnowledge@lsv.uky.edu.
+</td></tr><tr><td>9abd35b37a49ee1295e8197aac59bde802a934f3</td><td>Depth2Action: Exploring Embedded Depth for
+<br/>Large-Scale Action Recognition
+<br/><b>University of California, Merced</b></td><td>('1749901', 'Yi Zhu', 'yi zhu')</td><td>{yzhu25,snewsam}@ucmerced.edu
+</td></tr><tr><td>9a276c72acdb83660557489114a494b86a39f6ff</td><td>Emotion Classification through Lower Facial Expressions using Adaptive
+<br/>Support Vector Machines
+<br/>Department of Information Technology, Faculty of Industrial Technology and Management,
+</td><td>('2621463', 'Porawat Visutsak', 'porawat visutsak')</td><td>King Mongkut’s University of Technology North Bangkok, porawatv@kmutnb.ac.th
+</td></tr><tr><td>9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e</td><td>Automatic Recognition of Spontaneous Facial
+<br/>Actions
+<br/><b>Institute for Neural Computation, University of California, San Diego</b><br/><b>University at Buffalo, State University of New York</b></td><td>('2218905', 'Marian Stewart Bartlett', 'marian stewart bartlett')<br/>('21751782', 'Gwen C. Littlewort', 'gwen c. littlewort')<br/>('2639526', 'Mark G. Frank', 'mark g. frank')<br/>('2767464', 'Claudia Lainscsek', 'claudia lainscsek')<br/>('2039025', 'Ian R. Fasel', 'ian r. fasel')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td>mbartlet@ucsd.edu, gwen@mplab.ucsd.edu, clainscsek@ucsd.edu, ianfasel@cogsci.ucsd.edu,
+<br/>movellan@mplab.ucsd.edu
+<br/>mfrank83@buffalo.edu
+</td></tr><tr><td>9a42c519f0aaa68debbe9df00b090ca446d25bc4</td><td>Face Recognition via Centralized Coordinate
+<br/>Learning
+</td><td>('2689287', 'Xianbiao Qi', 'xianbiao qi')<br/>('1684635', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>9aad8e52aff12bd822f0011e6ef85dfc22fe8466</td><td>Temporal-Spatial Mapping for Action Recognition
+</td><td>('3865974', 'Xiaolin Song', 'xiaolin song')<br/>('40093162', 'Cuiling Lan', 'cuiling lan')<br/>('8434337', 'Wenjun Zeng', 'wenjun zeng')<br/>('1757173', 'Junliang Xing', 'junliang xing')<br/>('1759461', 'Jingyu Yang', 'jingyu yang')<br/>('1692735', 'Xiaoyan Sun', 'xiaoyan sun')</td><td></td></tr><tr><td>36b40c75a3e53c633c4afb5a9309d10e12c292c7</td><td></td><td></td><td></td></tr><tr><td>363ca0a3f908859b1b55c2ff77cc900957653748</td><td>International Journal of Computer Trends and Technology (IJCTT) – volume 1 Issue 3 Number 4 – Aug 2011
+<br/> Local Binary Patterns and Linear Programming using
+<br/>Facial Expression
+<br/>Ms.P.Jennifer
+<br/><b>Bharath Institute of Science and Technology</b><br/><b>B.Tech (C.S.E), Bharath University, Chennai</b><br/>Dr. A. Muthu kumaravel
+<br/><b>Bharath Institute of Science and Technology</b><br/><b>B.Tech (C.S.E), Bharath University, Chennai</b><br/>
+</td><td></td><td></td></tr><tr><td>36939e6a365e9db904d81325212177c9e9e76c54</td><td>Assessing the Accuracy of Four Popular Face Recognition Tools for
+<br/>Inferring Gender, Age, and Race
+<br/><b>Qatar Computing Research Institute, HBKU</b><br/>HBKU Research Complex, Doha, P.O. Box 34110, Qatar
+</td><td>('1861541', 'Soon-Gyo Jung', 'soon-gyo jung')<br/>('40660541', 'Jisun An', 'jisun an')<br/>('2592694', 'Haewoon Kwak', 'haewoon kwak')<br/>('2734912', 'Joni Salminen', 'joni salminen')</td><td>{sjung,jan,hkwak,jsalminen,bjansen}@hbku.edu.qa
+</td></tr><tr><td>3646b42511a6a0df5470408bc9a7a69bb3c5d742</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Applications of Computers and Electronics for the Welfare of Rural Masses (ACEWRM) 2015
+<br/>Detection of Facial Parts based on ABLATA
+<br/>Technical Campus, Bhilai
+<br/>Vikas Singh
+<br/>Technical Campus, Bhilai
+<br/>Abha Choubey
+<br/>Technical Campus, Bhilai
+</td><td>('9173769', 'Siddhartha Choubey', 'siddhartha choubey')</td><td></td></tr><tr><td>365f67fe670bf55dc9ccdcd6888115264b2a2c56</td><td></td><td></td><td></td></tr><tr><td>36fe39ed69a5c7ff9650fd5f4fe950b5880760b0</td><td>Tracking von Gesichtsmimik
+<br/>mit Hilfe von Gitterstrukturen
+<br/>zur Klassifikation von schmerzrelevanten Action
+<br/>Units
+<br/>1Fraunhofer-Institut f¨ur Integrierte Schaltungen IIS, Erlangen,
+<br/>2Otto-Friedrich-Universit¨at Bamberg, 3Universit¨atsklinkum Erlangen
+<br/>Kurzfassung. In der Schmerzforschung werden schmerzrelevante Mi-
+<br/>mikbewegungen von Probanden mittels des Facial Action Coding System
+<br/>klassifiziert. Die manuelle Klassifikation hierbei ist aufw¨andig und eine
+<br/>automatische (Vor-)klassifikation k¨onnte den diagnostischen Wert dieser
+<br/>Analysen erh¨ohen sowie den klinischen Workflow unterst¨utzen. Der hier
+<br/>vorgestellte regelbasierte Ansatz erm¨oglicht eine automatische Klassifika-
+<br/>tion ohne große Trainingsmengen vorklassifizierter Daten. Das Verfahren
+<br/>erkennt und verfolgt Mimikbewegungen, unterst¨utzt durch ein Gitter,
+<br/>und ordnet diese Bewegungen bestimmten Gesichtsarealen zu. Mit die-
+<br/>sem Wissen kann aus den Bewegungen auf die zugeh¨origen Action Units
+<br/>geschlossen werden.
+<br/>1 Einleitung
+<br/>Menschliche Empfindungen wie Emotionen oder Schmerz l¨osen spezifische Mu-
+<br/>ster von Kontraktionen der Gesichtsmuskulatur aus, die Grundlage dessen sind,
+<br/>was wir Mimik nennen. Aus der Beobachtung der Mimik kann wiederum auf
+<br/>menschliche Empfindungen r¨uckgeschlossen werden. Im Rahmen der Schmerz-
+<br/>forschung werden Videoaufnahmen von Probanden hinsichtlich des mimischen
+<br/>Schmerzausdrucks analysiert. Zur Beschreibung des mimischen Ausdrucks und
+<br/>dessen Ver¨anderungen wird das Facial Action Coding System (FACS) [1] verwen-
+<br/>det, das anatomisch begr¨undet, kleinste sichtbare Muskelbewegungen im Gesicht
+<br/>beschreibt und als einzelne Action Units (AUs) kategorisiert. Eine Vielzahl von
+<br/>Untersuchungen hat gezeigt, dass spezifische Muster von Action Units auftre-
+<br/>ten, wenn Probanden Schmerzen angeben [2]. Die manuelle Klassifikation und
+<br/>Markierung der Action Units von Probanden in Videosequenzen bedarf einer
+<br/>langwierigen Beobachtung durch ausgebildete FACS-Coder. Eine automatische
+<br/>(Vor-)klassifikation kann hierbei den klinischen Workflow unterst¨utzen und dieses
+<br/>Verfahren zum brauchbaren diagnostischen Instrument machen. Bisher realisier-
+<br/>te Ans¨atze zum Erkennen von Gesichtsausdr¨ucken basieren auf der Klassifikation
+</td><td>('31431972', 'Christine Barthold', 'christine barthold')<br/>('2009811', 'Anton Papst', 'anton papst')<br/>('1773752', 'Thomas Wittenberg', 'thomas wittenberg')<br/>('1793798', 'Stefan Lautenbacher', 'stefan lautenbacher')<br/>('1727734', 'Ute Schmid', 'ute schmid')<br/>('2500903', 'Sven Friedl', 'sven friedl')</td><td>sven.friedl@iis.fraunhofer.de
+</td></tr><tr><td>36a3a96ef54000a0cd63de867a5eb7e84396de09</td><td>Automatic Photo Orientation Detection with Convolutional Neural Networks
+<br/>Dept. of Computer Science
+<br/><b>University of Toronto</b><br/>Toronto, Ontario, Canada
+</td><td>('40121109', 'Ujash Joshi', 'ujash joshi')<br/>('1959343', 'Michael Guerzhoy', 'michael guerzhoy')</td><td>ujash.joshi@utoronto.ca, guerzhoy@cs.toronto.edu
+</td></tr><tr><td>36fc4120fc0638b97c23f97b53e2184107c52233</td><td>National Conference on Innovative Paradigms in Engineering & Technology (NCIPET-2013)
+<br/>Proceedings published by International Journal of Computer Applications® (IJCA)
+<br/>Introducing Celebrities in an Images using HAAR
+<br/>Cascade algorithm
+<br/>Asst. Professor
+<br/><b>PES Modern College of Engg</b><br/><b>PES Modern College of Engg</b><br/><b>PES Modern College of Engg</b><br/>Shivaji Nagar, Pune
+<br/>Shivaji Nagar, Pune
+<br/>Shivaji Nagar, Pune
+</td><td>('12682677', 'Deipali V. Gore', 'deipali v. gore')</td><td></td></tr><tr><td>36ce0b68a01b4c96af6ad8c26e55e5a30446f360</td><td>Multimed Tools Appl
+<br/>DOI 10.1007/s11042-014-2322-6
+<br/>Facial expression recognition based on a mlp neural
+<br/>network using constructive training algorithm
+<br/>Received: 5 February 2014 / Revised: 22 August 2014 / Accepted: 13 October 2014
+<br/>© Springer Science+Business Media New York 2014
+</td><td>('1746834', 'Hayet Boughrara', 'hayet boughrara')<br/>('3410172', 'Chokri Ben Amar', 'chokri ben amar')</td><td></td></tr><tr><td>3674f3597bbca3ce05e4423611d871d09882043b</td><td>ISSN 1796-2048
+<br/>Volume 7, Number 4, August 2012
+<br/>Contents
+<br/>Special Issue: Multimedia Contents Security in Social Networks Applications
+<br/>Guest Editors: Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>Guest Editorial
+<br/>Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>SPECIAL ISSUE PAPERS
+<br/>DRTEMBB: Dynamic Recommendation Trust Evaluation Model Based on Bidding
+<br/>Gang Wang and Xiao-lin Gui
+<br/>Block-Based Parallel Intra Prediction Scheme for HEVC
+<br/>Jie Jiang, Baolong, Wei Mo, and Kefeng Fan
+<br/>Optimized LSB Matching Steganography Based on Fisher Information
+<br/>Yi-feng Sun, Dan-mei Niu, Guang-ming Tang, and Zhan-zhan Gao
+<br/>A Novel Robust Zero-Watermarking Scheme Based on Discrete Wavelet Transform
+<br/>Yu Yang, Min Lei, Huaqun Liu, Yajian Zhou, and Qun Luo
+<br/>Stego Key Estimation in LSB Steganography
+<br/>Jing Liu and Guangming Tang
+<br/>REGULAR PAPERS
+<br/>Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions
+<br/>277
+<br/>279
+<br/>289
+<br/>295
+<br/>303
+<br/>309
+<br/>314
+</td><td>('46575279', 'H. Madokoro', 'h. madokoro')</td><td></td></tr><tr><td>362bfeb28adac5f45b6ef46c07c59744b4ed6a52</td><td>INCORPORATING SCALABILITY IN UNSUPERVISED SPATIO-TEMPORAL FEATURE
+<br/>LEARNING
+<br/><b>University of California, Riverside, CA</b></td><td>('49616225', 'Sujoy Paul', 'sujoy paul')<br/>('2177805', 'Sourya Roy', 'sourya roy')<br/>('1688416', 'Amit K. Roy-Chowdhury', 'amit k. roy-chowdhury')</td><td></td></tr><tr><td>360d66e210f7011423364327b7eccdf758b5fdd2</td><td>17th European Signal Processing Conference (EUSIPCO 2009)
+<br/>Glasgow, Scotland, August 24-28, 2009
+<br/>LOCAL FEATURE EXTRACTION METHODS FOR FACIAL EXPRESSION
+<br/>RECOGNITION
+<br/><b>School of Electrical and Computer Engineering, RMIT University</b><br/>City Campus, Swanston St., Melbourne, Australia
+<br/>http://www.rmit.edu.au
+</td><td>('1857490', 'Seyed Mehdi Lajevardi', 'seyed mehdi lajevardi')<br/>('1749220', 'Zahir M. Hussain', 'zahir m. hussain')</td><td>seyed.lajevardi@rmit.edu.au, zmhussain@ieee.org
+</td></tr><tr><td>365866dc937529c3079a962408bffaa9b87c1f06</td><td> IJISET - International Journal of Innovative Science, Engineering & Technology, Vol. 1 Issue 3, May 2014.
+<br/>www.ijiset.com
+<br/>ISSN 2348 – 7968
+<br/>Facial Feature Expression Based Approach for Human Face
+<br/>Recognition: A Review
+<br/><b>SSESA, Science College, Congress Nagar, Nagpur, (MS)-India</b><br/><b>RTM Nagpur University, Campus Nagpur, (MS)-India</b><br/>for
+<br/>face
+<br/>task
+<br/>required
+<br/>extraction of
+</td><td></td><td></td></tr><tr><td>361c9ba853c7d69058ddc0f32cdbe94fbc2166d5</td><td>Deep Reinforcement Learning of
+<br/>Video Games
+<br/>s2098407
+<br/>September 29, 2017
+<br/>MSc. Project
+<br/>Arti(cid:12)cial Intelligence
+<br/><b>University of Groningen, The Netherlands</b><br/>Supervisors
+<br/>Dr. M.A. (Marco) Wiering
+<br/>Prof. dr. L.R.B. (Lambert) Schomaker
+<br/><b>ALICE Institute</b><br/><b>University of Groningen</b><br/>Nijenborgh 9, 9747 AG, Groningen, The Netherlands
+</td><td>('3405120', 'Jos van de Wolfshaar', 'jos van de wolfshaar')</td><td></td></tr><tr><td>368e99f669ea5fd395b3193cd75b301a76150f9d</td><td>One-to-many face recognition with bilinear CNNs
+<br/>Aruni RoyChowdhury
+<br/><b>University of Massachusetts, Amherst</b><br/>Erik Learned-Miller
+</td><td>('2144284', 'Tsung-Yu Lin', 'tsung-yu lin')<br/>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td>{arunirc,tsungyulin,smaji,elm}@cs.umass.edu
+</td></tr><tr><td>362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2792
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>36df81e82ea5c1e5edac40b60b374979a43668a5</td><td>ON-THE-FLY SPECIFIC PERSON RETRIEVAL
+<br/><b>University of Oxford, United Kingdom</b></td><td>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{omkar,vedaldi,az}@robots.ox.ac.uk
+</td></tr><tr><td>3619a9b46ad4779d0a63b20f7a6a8d3d49530339</td><td>SIMONYAN et al.: FISHER VECTOR FACES IN THE WILD
+<br/>Fisher Vector Faces in the Wild
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b></td><td>('34838386', 'Karen Simonyan', 'karen simonyan')<br/>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>karen@robots.ox.ac.uk
+<br/>omkar@robots.ox.ac.uk
+<br/>vedaldi@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>366d20f8fd25b4fe4f7dc95068abc6c6cabe1194</td><td></td><td></td><td></td></tr><tr><td>3630324c2af04fd90f8668f9ee9709604fe980fd</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2016.2607345, IEEE
+<br/>Transactions on Circuits and Systems for Video Technology
+<br/>Image Classification with Tailored Fine-Grained
+<br/>Dictionaries
+</td><td>('2287686', 'Xiangbo Shu', 'xiangbo shu')<br/>('8053308', 'Jinhui Tang', 'jinhui tang')<br/>('2272096', 'Guo-Jun Qi', 'guo-jun qi')<br/>('3233021', 'Zechao Li', 'zechao li')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>362ba8317aba71c78dafca023be60fb71320381d</td><td></td><td></td><td></td></tr><tr><td>36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958</td><td>RGB-D Face Recognition with Texture and
+<br/>Attribute Features
+<br/>Member, IEEE
+</td><td>('1931069', 'Gaurav Goswami', 'gaurav goswami')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('39129417', 'Richa Singh', 'richa singh')</td><td></td></tr><tr><td>36e8ef2e5d52a78dddf0002e03918b101dcdb326</td><td>Multiview Active Shape Models with SIFT Descriptors
+<br/>for the 300-W Face Landmark Challenge
+<br/><b>University of Cape Town</b><br/>Anthropics Technology Ltd.
+<br/><b>University of Cape Town</b></td><td>('2822258', 'Stephen Milborrow', 'stephen milborrow')<br/>('1823550', 'Tom E. Bishop', 'tom e. bishop')<br/>('2537623', 'Fred Nicolls', 'fred nicolls')</td><td>milbo@sonic.net
+<br/>t.e.bishop@gmail.com
+<br/>fred.nicolls@uct.ac.za
+</td></tr><tr><td>36018404263b9bb44d1fddaddd9ee9af9d46e560</td><td>OCCLUDED FACE RECOGNITION BY USING GABOR
+<br/>FEATURES
+<br/>1 Department of Electrical And Electronics Engineering, METU, Ankara, Turkey
+<br/>2 7h%ł7$.(cid:3)%ł/7(1(cid:15)(cid:3)$QNDUD(cid:15)(cid:3)7XUNH\
+</td><td>('2920043', 'Burcu Kepenekci', 'burcu kepenekci')<br/>('3110567', 'F. Boray Tek', 'f. boray tek')<br/>('1929001', 'Gozde Bozdagi Akar', 'gozde bozdagi akar')</td><td></td></tr><tr><td>367f2668b215e32aff9d5122ce1f1207c20336c8</td><td>Proceedings of the Pakistan Academy of Sciences 52 (1): 15–25 (2015)
+<br/>Copyright © Pakistan Academy of Sciences
+<br/>ISSN: 0377 - 2969 (print), 2306 - 1448 (online)
+<br/> Pakistan Academy of Sciences
+<br/>Research Article
+<br/>Speaker-Dependent Human Emotion Recognition in
+<br/>Unimodal and Bimodal Scenarios
+<br/><b>University of Peshawar, Pakistan</b><br/><b>University of Engineering and Technology</b><br/><b>Sarhad University of Science and Information Technology</b><br/><b>University of Peshawar, Peshawar, Pakistan</b><br/>Peshawar, Pakistan
+<br/>Peshawar, Pakistan
+</td><td>('34267835', 'Sanaul Haq', 'sanaul haq')<br/>('3124216', 'Tariqullah Jan', 'tariqullah jan')<br/>('1766329', 'Muhammad Asif', 'muhammad asif')<br/>('1710701', 'Amjad Ali', 'amjad ali')<br/>('40332145', 'Naveed Ahmad', 'naveed ahmad')</td><td></td></tr><tr><td>36c2db5ff76864d289781f93cbb3e6351f11984c</td><td>17th European Signal Processing Conference (EUSIPCO 2009)
+<br/>Glasgow, Scotland, August 24-28, 2009
+<br/>ONE COLORED IMAGE BASED 2.5D HUMAN FACE RECONSTRUCTION
+<br/>School of Electrical, Electronic and Computer Engineering
+<br/><b>Newcastle University, Newcastle upon Tyne</b><br/>England, United Kingdom
+</td><td>('1687577', 'Peng Liu', 'peng liu')</td><td>Email: peng.liu2@ncl.ac.uk, w.l.woo@ncl.ac.uk, s.s.dlay@ncl.ac.uk
+</td></tr><tr><td>3661a34f302883c759b9fa2ce03de0c7173d2bb2</td><td>Peak-Piloted Deep Network for Facial Expression
+<br/>Recognition
+<br/><b>University of California, San Diego 2 Carnegie Mellon University</b><br/><b>AI Institute</b><br/><b>National University of Singapore</b><br/><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>('8343585', 'Xiangyun Zhao', 'xiangyun zhao')<br/>('1776665', 'Luoqi Liu', 'luoqi liu')<br/>('1699559', 'Nuno Vasconcelos', 'nuno vasconcelos')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1743598', 'Teng Li', 'teng li')</td><td>xiz019@ucsd.edu xdliang328@gmail.com liuluoqi@360.cn
+<br/>tenglwy@gmail.com nvasconcelos@ucsd.edu eleyans@nus.edu.sg
+</td></tr><tr><td>36c473fc0bf3cee5fdd49a13cf122de8be736977</td><td>Temporal Segment Networks: Towards Good
+<br/>Practices for Deep Action Recognition
+<br/>1Computer Vision Lab, ETH Zurich, Switzerland
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>368d59cf1733af511ed8abbcbeb4fb47afd4da1c</td><td>To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques
+<br/>and Their Impact on Recognition
+<br/>RichardWebster1, Vitomir ˇStruc2, Patrick J. Flynn1 and Walter J. Scheirer1
+<br/><b>University of Notre Dame, USA</b><br/><b>Faculty of Electrical Engineering, University of Ljubljana, Slovenia</b></td><td>('40061203', 'Sandipan Banerjee', 'sandipan banerjee')<br/>('6846673', 'Joel Brogan', 'joel brogan')<br/>('5014060', 'Aparna Bharati', 'aparna bharati')</td><td>{janez.krizaj, vitomir.struc}@fe.uni-lj.si
+<br/>{sbanerj1, jbrogan4, abharati, brichar1, flynn, wscheire}@nd.edu
+</td></tr><tr><td>366595171c9f4696ec5eef7c3686114fd3f116ad</td><td>Algorithms and Representations for Visual
+<br/>Recognition
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2012-53
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-53.html
+<br/>May 1, 2012
+</td><td>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td></td></tr><tr><td>36b9f46c12240898bafa10b0026a3fb5239f72f3</td><td>Collaborative Deep Reinforcement Learning for Joint Object Search
+<br/><b>Peking University</b><br/>Microsoft Research
+<br/><b>Peking University</b><br/>Microsoft Research
+</td><td>('2045334', 'Xiangyu Kong', 'xiangyu kong')<br/>('1894653', 'Bo Xin', 'bo xin')<br/>('36637369', 'Yizhou Wang', 'yizhou wang')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td>kong@pku.edu.cn
+<br/>boxin@microsoft.com
+<br/>yizhou.wang@pku.edu.cn
+<br/>ganghua@microsoft.com
+</td></tr><tr><td>361d6345919c2edc5c3ce49bb4915ed2b4ee49be</td><td><b>Delft University of Technology</b><br/>Models for supervised learning in sequence data
+<br/>Pei, Wenjie
+<br/>DOI
+<br/>10.4233/uuid:fff15717-71ec-402d-96e6-773884659f2c
+<br/>Publication date
+<br/>2018
+<br/>Document Version
+<br/>Publisher's PDF, also known as Version of record
+<br/>Citation (APA)
+<br/>Pei, W. (2018). Models for supervised learning in sequence data DOI: 10.4233/uuid:fff15717-71ec-402d-
+<br/>96e6-773884659f2c
+<br/>Important note
+<br/>To cite this publication, please use the final published version (if applicable).
+<br/>Please check the document version above.
+<br/>Copyright
+<br/>Other than for strictly personal use, it is not permitted to download, forward or distribute the text or part of it, without the consent
+<br/>of the author(s) and/or copyright holder(s), unless the work is under an open content license such as Creative Commons.
+<br/>Takedown policy
+<br/>Please contact us and provide details if you believe this document breaches copyrights.
+<br/>We will remove access to the work immediately and investigate your claim.
+<br/><b>This work is downloaded from Delft University of Technology</b><br/>For technical reasons the number of authors shown on this cover page is limited to a maximum of 10.
+<br/> </td><td></td><td></td></tr><tr><td>3634b4dd263c0f330245c086ce646c9bb748cd6b</td><td>Temporal Localization of Fine-Grained Actions in Videos
+<br/>by Domain Transfer from Web Images
+<br/><b>University of Southern California</b><br/><b>Google, Inc</b></td><td>('1726241', 'Chen Sun', 'chen sun')</td><td>{chensun,nevatia}@usc.edu
+<br/>{sanketh,sukthankar}@google.com
+</td></tr><tr><td>367a786cfe930455cd3f6bd2492c304d38f6f488</td><td>A Training Assistant Tool for the Automated Visual
+<br/>Inspection System
+<br/>A Thesis
+<br/>Presented to
+<br/>the Graduate School of
+<br/><b>Clemson University</b><br/>In Partial Fulfillment
+<br/>of the Requirements for the Degree
+<br/>Master of Science
+<br/>Electrical Engineering
+<br/>by
+<br/>December 2015
+<br/>Accepted by:
+<br/>Dr. Adam W. Hoover, Committee Chair
+<br/>Dr. Richard E. Groff
+<br/>Dr. Yongqiang Wang
+</td><td>('4154752', 'Mohan Karthik Ramaraj', 'mohan karthik ramaraj')</td><td></td></tr><tr><td>5c4ce36063dd3496a5926afd301e562899ff53ea</td><td></td><td></td><td></td></tr><tr><td>5c6de2d9f93b90034f07860ae485a2accf529285</td><td>Int. J. Biometrics, Vol. X, No. Y, xxxx
+<br/>Compensating for pose and illumination in
+<br/>unconstrained periocular biometrics
+<br/>Department of Computer Science,
+<br/>IT – Instituto de Telecomunicações,
+<br/><b>University of Beira Interior</b><br/>6200-Covilhã, Portugal
+<br/>Fax: +351-275-319899
+<br/>*Corresponding author
+</td><td>('1678263', 'Chandrashekhar N. Padole', 'chandrashekhar n. padole')<br/>('1712429', 'Hugo Proença', 'hugo proença')</td><td>E-mail: chandupadole@ubi.pt
+<br/>E-mail: hugomcp@di.ubi.pt
+</td></tr><tr><td>5cbe1445d683d605b31377881ac8540e1d17adf0</td><td>On 3D Face Reconstruction via Cascaded Regression in Shape Space
+<br/><b>College of Computer Science, Sichuan University, Chengdu, China</b></td><td>('50207647', 'Feng Liu', 'feng liu')<br/>('39422721', 'Dan Zeng', 'dan zeng')<br/>('1723081', 'Jing Li', 'jing li')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')</td><td>qjzhao@scu.edu.cn
+</td></tr><tr><td>5ca23ceb0636dfc34c114d4af7276a588e0e8dac</td><td>Texture Representation in AAM using Gabor Wavelet
+<br/>and Local Binary Patterns
+<br/>School of Electronic Engineering,
+<br/><b>Xidian University</b><br/>Xi’an 710071, China
+<br/>School of Computer Science and Information Systems,
+<br/><b>Birkbeck College, University of London</b><br/>London WC1E 7HX, U.K.
+<br/>School of Computer Engineering,
+<br/><b>Nanyang Technological University</b><br/>50 Nanyang Avenue, Singapore 639798
+<br/>School of Electronic Engineering,
+<br/><b>Xidian University</b><br/>Xi’an 710071, China
+</td><td>('5452186', 'Ya Su', 'ya su')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('10699750', 'Xinbo Gao', 'xinbo gao')</td><td>su1981ya@gmail.com
+<br/>xuelong@dcs.bbk.ac.uk
+<br/>dacheng.tao@gmail.com
+<br/>xbgao@mail.xidian.edu.cn
+</td></tr><tr><td>5c2a7518fb26a37139cebff76753d83e4da25159</td><td></td><td></td><td></td></tr><tr><td>5c493c42bfd93e4d08517438983e3af65e023a87</td><td>The Thirty-Second AAAI Conference
+<br/>on Artificial Intelligence (AAAI-18)
+<br/>Multimodal Keyless Attention
+<br/>Fusion for Video Classification
+<br/><b>Tsinghua University, 2Rutgers University, 3Baidu IDL</b></td><td>('1716690', 'Xiang Long', 'xiang long')<br/>('2551285', 'Chuang Gan', 'chuang gan')<br/>('1732213', 'Gerard de Melo', 'gerard de melo')<br/>('48033101', 'Xiao Liu', 'xiao liu')<br/>('48515099', 'Yandong Li', 'yandong li')<br/>('9921390', 'Fu Li', 'fu li')<br/>('35247507', 'Shilei Wen', 'shilei wen')</td><td>{longx13, ganc13}@mails.tsinghua.edu.cn, gdm@demelo.org, {liuxiao12, liyandong, lifu, wenshilei}@baidu.com
+</td></tr><tr><td>5cb83eba8d265afd4eac49eb6b91cdae47def26d</td><td>Face Recognition with Local Line Binary Pattern
+<br/><b>Mahanakorn University of Technology</b><br/>51 Cheum-Sampan Rd., Nong Chok, Bangkok, THAILAND 10530
+</td><td>('2337544', 'Amnart Petpon', 'amnart petpon')<br/>('1805935', 'Sanun Srisuk', 'sanun srisuk')</td><td>ta tee473@hotmail.com, sanun@mut.ac.th
+</td></tr><tr><td>5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48</td><td>Robust Face Detection by Simple Means
+<br/><b>Institute for Computer Graphics and Vision</b><br/><b>Graz University of Technology, Austria</b><br/>1 Motivation
+<br/>Face detection is still one of the core problems in computer vision, especially in
+<br/>unconstrained real-world situations where variations in face pose or bad imaging
+<br/>conditions have to be handled. These problems are covered by recent benchmarks
+<br/>such as Face Detection Dataset and Benchmark (FDDB) [2], which reveals that
+<br/>established methods, e.g, Viola and Jones [8] suffer a drop in performance. More
+<br/>effective approaches exist, but are closed source and not publicly available. Thus,
+<br/>we propose a simple but effective detector that is available to the public. It
+<br/>combines Histograms of Orientated Gradient (HOG) [1] features with linear
+<br/>Support Vector Machine (SVM) classification.
+<br/>2 Technical Details
+<br/>One important aspect in the training of our face detector is bootstrapping. Thus,
+<br/>we rely on iterative training. In particular, each iteration consists of first describ-
+<br/>ing the face patches by HOGs [1] and then learning a linear SVM. At the end
+<br/>of each iteration we bootstrap with the preliminary detector hard examples to
+<br/>enrich the training set. We perform several bootstrapping rounds to improve the
+<br/>detector until the desired false positive per window rate is reached. Interestingly,
+<br/>we found out that picking up false positives at multiple scales in a sliding win-
+<br/>dow fashion yields better results than just at a single scale. Testing several patch
+<br/>sizes and HOG layouts revealed that a patch size of 36 by 36 delivers the best
+<br/>results. For the HOG descriptor we ended up with a block size of 12x12, 4x4 for
+<br/>the cells. Prior to the actual training we gathered face crops of the Annotated
+<br/>facial landmarks in the wild (AFLW) dataset [4]. As AFLW includes the coarse
+<br/>face pose we are able to retrieve about 28k frontal faces by limiting the yaw angle
+<br/>between ± π
+<br/>6 and mirroring them. For each face we crop a square region between
+<br/>forehead and chin. The non-face patches are obtained by randomly sampling at
+<br/>multiple scales of the PASCAL VOC 2007 dataset, excluding the persons subset.
+<br/>3 Results
+<br/>In Figure 1 we report the performance of our final detector on the challenging
+<br/>FDDB benchmark compared to state-of-the-art methods. Despite the simplicity
+<br/>of our detector it is able to improve considerably over the boosted classifier cas-
+<br/>cade of Viola and Jones [8] and even outperforms the recent work of Jain and
+</td><td>('3202367', 'Paul Wohlhart', 'paul wohlhart')<br/>('1791182', 'Peter M. Roth', 'peter m. roth')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{koestinger,wohlhart,pmroth,bischof}@icg.tugraz.at
+</td></tr><tr><td>5c3dce55c61ee86073575ac75cc882a215cb49e6</td><td>Neural Codes for Image Retrieval
+<br/>Alexandr Chigorin1, and Victor Lempitsky2
+<br/>1 Yandex, Russia
+<br/><b>Skolkovo Institute of Science and Technology (Skoltech), Russia</b><br/><b>Moscow Institute of Physics and Technology, Russia</b></td><td>('2412441', 'Artem Babenko', 'artem babenko')<br/>('32829387', 'Anton Slesarev', 'anton slesarev')</td><td></td></tr><tr><td>5c2e264d6ac253693469bd190f323622c457ca05</td><td>978-1-4799-2341-0/13/$31.00 ©2013 IEEE
+<br/>4367
+<br/>ICIP 2013
+</td><td></td><td></td></tr><tr><td>5c473cfda1d7c384724fbb139dfe8cb39f79f626</td><td></td><td></td><td></td></tr><tr><td>5c820e47981d21c9dddde8d2f8020146e600368f</td><td>Extended Supervised Descent Method for
+<br/>Robust Face Alignment
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>('9120475', 'Liu Liu', 'liu liu')<br/>('23224233', 'Jiani Hu', 'jiani hu')<br/>('1678529', 'Shuo Zhang', 'shuo zhang')<br/>('1774956', 'Weihong Deng', 'weihong deng')</td><td></td></tr><tr><td>5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0</td><td>2132
+<br/>Reference Face Graph for Face Recognition
+</td><td>('1784929', 'Mehran Kafai', 'mehran kafai')<br/>('39776603', 'Le An', 'le an')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td></td></tr><tr><td>5c35ac04260e281141b3aaa7bbb147032c887f0c</td><td>Face Detection and Tracking Control with Omni Car
+<br/>CS 231A Final Report
+<br/>June 31, 2016
+</td><td>('2645488', 'Tung-Yu Wu', 'tung-yu wu')</td><td></td></tr><tr><td>5c435c4bc9c9667f968f891e207d241c3e45757a</td><td>RUIZ-HERNANDEZ, CROWLEY, LUX: HOW OLD ARE YOU?
+<br/>"How old are you?" : Age Estimation with
+<br/>Tensors of Binary Gaussian Receptive Maps
+<br/>INRIA Grenoble Rhones-Alpes
+<br/><b>Research Center and Laboratoire</b><br/>d’Informatique de Grenoble (LIG)
+<br/>655 avenue de l’Europe
+<br/>38 334 Saint Ismier Cedex, France
+</td><td>('2291512', 'John A. Ruiz-Hernandez', 'john a. ruiz-hernandez')<br/>('34740185', 'James L. Crowley', 'james l. crowley')<br/>('2599357', 'Augustin Lux', 'augustin lux')</td><td>john-alexander.ruiz-hernandez@inrialpes.fr
+<br/>james.crowley@inrialpes.fr
+<br/>augustin.lux@inrialpes.fr
+</td></tr><tr><td>5c7adde982efb24c3786fa2d1f65f40a64e2afbf</td><td>Ranking Domain-Specific Highlights
+<br/>by Analyzing Edited Videos
+<br/><b>University of Washington, Seattle, WA, USA</b></td><td>('1711801', 'Min Sun', 'min sun')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td></td></tr><tr><td>5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934</td><td>GeePS: Scalable deep learning on distributed GPUs
+<br/>with a GPU-specialized parameter server
+<br/><b>Carnegie Mellon University</b></td><td>('1874200', 'Henggang Cui', 'henggang cui')<br/>('1682058', 'Hao Zhang', 'hao zhang')<br/>('1707164', 'Gregory R. Ganger', 'gregory r. ganger')<br/>('1974678', 'Phillip B. Gibbons', 'phillip b. gibbons')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')</td><td></td></tr><tr><td>5cfbeae360398de9e20e4165485837bd42b93217</td><td>Cengil Emine, Cınars Ahmet, International Journal of Advance Research, Ideas and Innovations in Technology.
+<br/>ISSN: 2454-132X
+<br/>Impact factor: 4.295
+<br/>(Volume3, Issue5)
+<br/>Available online at www.ijariit.com
+<br/>Comparison Of Hog (Histogram of Oriented Gradients) and
+<br/>Haar Cascade Algorithms with a Convolutional Neural Network
+<br/>Based Face Detection Approaches
+<br/>Computer Engineering Department
+<br/><b>Firat University</b><br/>Computer Engineering Department
+<br/><b>Firat University</b></td><td>('27758959', 'Emine Cengil', 'emine cengil')</td><td>ecengil@firat.edu.tr
+<br/>acinar@firat.edu.tr
+</td></tr><tr><td>5ca14fa73da37855bfa880b549483ee2aba26669</td><td>ISSN (e): 2250 – 3005 || Volume, 07 || Issue, 07|| June – 2017 ||
+<br/>International Journal of Computational Engineering Research (IJCER)
+<br/>Face Recognition under Varying Illuminations Using Local
+<br/>Binary Pattern And Local Ternary Pattern Fusion
+<br/><b>Punjabi University Patiala</b><br/><b>Punjabi University Patiala</b></td><td>('2029759', 'Reecha Sharma', 'reecha sharma')</td><td></td></tr><tr><td>5c02bd53c0a6eb361972e8a4df60cdb30c6e3930</td><td>Multimedia stimuli databases usage patterns: a
+<br/>survey report
+<br/>M. Horvat1, S. Popović1 and K. Ćosić1
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing</b><br/>Department of Electric Machines, Drives and Automation
+<br/>Zagreb, Croatia
+</td><td></td><td>marko.horvat2@fer.hr
+</td></tr><tr><td>5c8ae37d532c7bb8d7f00dfde84df4ba63f46297</td><td>DiscrimNet: Semi-Supervised Action Recognition from Videos using Generative
+<br/>Adversarial Networks
+<br/><b>Georgia Institute of Technology</b><br/>Google
+<br/>Irfan Essa
+<br/><b>Georgia Institute of Technology</b></td><td>('2308598', 'Unaiza Ahsan', 'unaiza ahsan')<br/>('1726241', 'Chen Sun', 'chen sun')</td><td>uahsan3@gatech.edu
+<br/>chensun@google.com
+<br/>irfan@gatech.edu
+</td></tr><tr><td>5c717afc5a9a8ccb1767d87b79851de8d3016294</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1845
+<br/>ICASSP 2012
+</td><td></td><td></td></tr><tr><td>5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49</td><td>Facial Expression Intensity Estimation Using Ordinal Information
+<br/><b>Computer and Systems Engineering, Rensselaer Polytechnic Institute</b><br/><b>School of Computer Science and Technology, University of Science and Technology of China</b></td><td>('1746803', 'Rui Zhao', 'rui zhao')<br/>('2316359', 'Quan Gan', 'quan gan')<br/>('1791319', 'Shangfei Wang', 'shangfei wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>1{zhaor,jiq}@rpi.edu, 2{gqquan@mail.,sfwang@}ustc.edu.cn
+</td></tr><tr><td>5c4d4fd37e8c80ae95c00973531f34a6d810ea3a</td><td>The Open World of Micro-Videos
+<br/><b>UC Irvine1, INRIA2, Carnegie Mellon University</b></td><td>('1879100', 'Phuc Xuan Nguyen', 'phuc xuan nguyen')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td></td></tr><tr><td>09b80d8eea809529b08a8b0ff3417950c048d474</td><td>Adding Unlabeled Samples to Categories by Learned Attributes
+<br/><b>University of Maryland, College Park</b><br/><b>University of Washington</b></td><td>('3826759', 'Jonghyun Choi', 'jonghyun choi')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{jhchoi,mrastega,lsd}@umiacs.umd.edu
+<br/>ali@cs.uw.edu
+</td></tr><tr><td>09f58353e48780c707cf24a0074e4d353da18934</td><td>To appear in Proc. IEEE IJCB, 2014
+<br/>Unconstrained Face Recognition: Establishing Baseline
+<br/>Human Performance via Crowdsourcing
+<br/><b>Michigan State University, East Lansing, MI, U.S.A</b><br/><b>Cornell University, Ithaca, NY, U.S.A</b><br/>3Noblis, Falls Church, VA, U.S.A.
+</td><td>('2180413', 'Lacey Best-Rowden', 'lacey best-rowden')<br/>('2339748', 'Shiwani Bisht', 'shiwani bisht')<br/>('2619953', 'Joshua C. Klontz', 'joshua c. klontz')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>bestrow1@cse.msu.edu;sb854@cornell.edu;joshua.klontz@noblis.org;jain@cse.msu.edu
+</td></tr><tr><td>096eb8b4b977aaf274c271058feff14c99d46af3</td><td>REPORT DOCUMENTATION PAGE
+<br/>Form Approved OMB NO. 0704-0188
+<br/>including
+<br/>the
+<br/>time
+<br/>for reviewing
+<br/>for
+<br/>information,
+<br/>for
+<br/>this collection of
+<br/>information
+<br/>is estimated
+<br/>to average 1 hour per response,
+<br/>the data needed, and completing and reviewing
+<br/>this collection of
+<br/>instructions,
+<br/>The public reporting burden
+<br/> Send comments
+<br/>searching existing data sources, gathering and maintaining
+<br/>to Washington
+<br/>regarding
+<br/>this burden estimate or any other aspect of
+<br/>Information Operations and Reports, 1215 Jefferson Davis Highway, Suite 1204, Arlington VA, 22202-4302.
+<br/>Headquarters Services, Directorate
+<br/>Respondents should be aware that notwithstanding any other provision of law, no person shall be subject to any oenalty for failing to comply with a collection of
+<br/>information if it does not display a currently valid OMB control number.
+<br/>PLEASE DO NOT RETURN YOUR FORM TO THE ABOVE ADDRESS.
+<br/>1. REPORT DATE (DD-MM-YYYY)
+<br/>05-10-2012
+<br/>4. TITLE AND SUBTITLE
+<br/>Multi-observation visual recognition via joint dynamic sparse
+<br/>representation
+<br/>5a. CONTRACT NUMBER
+<br/>W911NF-09-1-0383
+<br/>5b. GRANT NUMBER
+<br/>2. REPORT TYPE
+<br/>Conference Proceeding
+<br/>3. DATES COVERED (From - To)
+<br/>the collection of
+<br/>reducing
+<br/>for
+<br/>information.
+<br/>this burden,
+<br/>including suggesstions
+<br/>6. AUTHORS
+<br/>Huang
+<br/>7. PERFORMING ORGANIZATION NAMES AND ADDRESSES
+<br/><b>William Marsh Rice University</b><br/>Office of Sponsored Research
+<br/><b>William Marsh Rice University</b><br/>Houston, TX
+<br/>9. SPONSORING/MONITORING AGENCY NAME(S) AND
+<br/>ADDRESS(ES)
+<br/>77005 -
+<br/>U.S. Army Research Office
+<br/> P.O. Box 12211
+<br/> Research Triangle Park, NC 27709-2211
+<br/>5c. PROGRAM ELEMENT NUMBER
+<br/>611103
+<br/>5d. PROJECT NUMBER
+<br/>5e. TASK NUMBER
+<br/>5f. WORK UNIT NUMBER
+<br/>8. PERFORMING ORGANIZATION REPORT
+<br/>NUMBER
+<br/>10. SPONSOR/MONITOR'S ACRONYM(S)
+<br/> ARO
+<br/>11. SPONSOR/MONITOR'S REPORT
+<br/>NUMBER(S)
+<br/>56177-CS-MUR.84
+<br/>12. DISTRIBUTION AVAILIBILITY STATEMENT
+<br/>Approved for public release; distribution is unlimited.
+<br/>13. SUPPLEMENTARY NOTES
+<br/>The views, opinions and/or findings contained in this report are those of the author(s) and should not contrued as an official Department
+<br/>of the Army position, policy or decision, unless so designated by other documentation.
+</td><td>('40479011', 'Haichao Zhang', 'haichao zhang')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')<br/>('1801395', 'Yanning Zhang', 'yanning zhang')</td><td></td></tr><tr><td>0952ac6ce94c98049d518d29c18d136b1f04b0c0</td><td></td><td></td><td></td></tr><tr><td>0969e0dc05fca21ff572ada75cb4b703c8212e80</td><td>Article
+<br/>Semi-Supervised Classification Based on
+<br/>Low Rank Representation
+<br/><b>College of Computer and Information Science, Southwest University, Chongqing 400715, China</b><br/>Academic Editor: Javier Del Ser Lorente
+<br/>Received: 1 June 2016; Accepted: 20 July 2016; Published: 22 July 2016
+</td><td>('40290479', 'Xuan Hou', 'xuan hou')<br/>('3439025', 'Guangjun Yao', 'guangjun yao')<br/>('40362316', 'Jun Wang', 'jun wang')</td><td>hx1995@email.swu.edu.cn (X.H.); guangjunyao@email.swu.edu.cn (G.Y.)
+<br/>* Correspondence: kingjun@swu.edu.cn; Tel.: +86-23-6825-4396
+</td></tr><tr><td>09137e3c267a3414314d1e7e4b0e3a4cae801f45</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Two Birds with One Stone: Transforming and Generating
+<br/>Facial Images with Iterative GAN
+<br/>Received: date / Accepted: date
+</td><td>('49626434', 'Dan Ma', 'dan ma')</td><td></td></tr><tr><td>09dd01e19b247a33162d71f07491781bdf4bfd00</td><td>Efficiently Scaling Up Video Annotation
+<br/>with Crowdsourced Marketplaces
+<br/>Department of Computer Science
+<br/><b>University of California, Irvine, USA</b></td><td>('1856025', 'Carl Vondrick', 'carl vondrick')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>{cvondric,dramanan,djp3}@ics.uci.edu
+</td></tr><tr><td>09cf3f1764ab1029f3a7d57b70ae5d5954486d69</td><td>Comparison of ICA approaches for facial
+<br/>expression recognition
+<br/>I. Buciu 1,2 C. Kotropoulos 1
+<br/>I. Pitas 1
+<br/><b>Aristotle University of Thessaloniki</b><br/>GR-541 24, Thessaloniki, Box 451, Greece
+<br/>2 Electronics Department
+<br/>Faculty of Electrical Engineering and Information Technology
+<br/><b>University of Oradea 410087, Universitatii 1, Romania</b><br/>August 18, 2008
+<br/>DRAFT
+</td><td></td><td>costas,pitas@aiia.csd.auth.gr
+<br/>ibuciu@uoradea.ro
+</td></tr><tr><td>09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081</td><td>Where to Buy It: Matching Street Clothing Photos in Online Shops
+<br/><b>University of North Carolina at Chapel Hill</b><br/><b>University of Illinois at Urbana-Champaign</b></td><td>('1772294', 'M. Hadi Kiapour', 'm. hadi kiapour')<br/>('1682965', 'Xufeng Han', 'xufeng han')<br/>('1749609', 'Svetlana Lazebnik', 'svetlana lazebnik')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')</td><td>{hadi,xufeng,tlberg,aberg}@cs.unc.edu
+<br/>slazebni@illinois.edu
+</td></tr><tr><td>09926ed62511c340f4540b5bc53cf2480e8063f8</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
+</td><td>('1881509', 'Vicky Kalogeiton', 'vicky kalogeiton')<br/>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>0951f42abbf649bb564a21d4ff5dddf9a5ea54d9</td><td>Joint Estimation of Age and Gender from Unconstrained Face Images
+<br/>using Lightweight Multi-task CNN for Mobile Applications
+<br/><b>Institute of Information Science, Academia Sinica, Taipei</b></td><td>('1781429', 'Jia-Hong Lee', 'jia-hong lee')<br/>('2679814', 'Yi-Ming Chan', 'yi-ming chan')<br/>('2329177', 'Ting-Yen Chen', 'ting-yen chen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')</td><td>{honghenry.lee, yiming, timh20022002, song}@iis.sinica.edu.tw
+</td></tr><tr><td>09628e9116e7890bc65ebeabaaa5f607c9847bae</td><td>Semantically Consistent Regularization for Zero-Shot Recognition
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of California, San Diego</b></td><td>('1797523', 'Pedro Morgado', 'pedro morgado')<br/>('1699559', 'Nuno Vasconcelos', 'nuno vasconcelos')</td><td>{pmaravil,nuno}@ucsd.edu
+</td></tr><tr><td>09733129161ca7d65cf56a7ad63c17f493386027</td><td>Face Recognition under Varying Illumination
+<br/><b>Vienna University of Technology</b><br/>Inst. of Computer Graphics and
+<br/>Algorithms
+<br/>Vienna, Austria
+<br/><b>Istanbul Technical University</b><br/>Department of Computer
+<br/>Engineering
+<br/>Istanbul, Turkey
+<br/><b>Vienna University of Technology</b><br/>Inst. of Computer Graphics and
+<br/>Algorithms
+<br/>Vienna, Austria
+</td><td>('1968256', 'Erald VUÇINI', 'erald vuçini')<br/>('1766445', 'Muhittin GÖKMEN', 'muhittin gökmen')<br/>('1725803', 'Eduard GRÖLLER', 'eduard gröller')</td><td>vucini@cg.tuwien.ac.at
+<br/> gokmen@cs.itu.edu.tr
+<br/>groeller@cg.tuwien.ac.at
+</td></tr><tr><td>097340d3ac939ce181c829afb6b6faff946cdce0</td><td>Adding New Tasks to a Single Network with
+<br/>Weight Transformations using Binary Masks
+<br/><b>Sapienza University of Rome, 2Fondazione Bruno Kessler, 3University of Trento</b><br/><b>Italian Institute of Technology, 5Mapillary Research</b></td><td>('38286801', 'Massimiliano Mancini', 'massimiliano mancini')<br/>('40811261', 'Elisa Ricci', 'elisa ricci')<br/>('3033284', 'Barbara Caputo', 'barbara caputo')</td><td>{mancini,caputo}@diag.uniroma1.it,eliricci@fbk.eu,samuel@mapillary.com
+</td></tr><tr><td>09507f1f1253101d04a975fc5600952eac868602</td><td>Motion Feature Network: Fixed Motion Filter
+<br/>for Action Recognition
+<br/><b>Seoul National University, Seoul, South Korea</b><br/>2 V.DO Inc., Suwon, Korea
+</td><td>('2647624', 'Myunggi Lee', 'myunggi lee')<br/>('51151436', 'Seungeui Lee', 'seungeui lee')<br/>('51136389', 'Gyutae Park', 'gyutae park')<br/>('3160425', 'Nojun Kwak', 'nojun kwak')</td><td>{myunggi89, dehlix, sjson, pgt4861, nojunk}@snu.ac.kr
+</td></tr><tr><td>09718bf335b926907ded5cb4c94784fd20e5ccd8</td><td>875
+<br/>Recognizing Partially Occluded, Expression Variant
+<br/>Faces From Single Training Image per Person
+<br/>With SOM and Soft k-NN Ensemble
+</td><td>('2248421', 'Xiaoyang Tan', 'xiaoyang tan')<br/>('1680768', 'Songcan Chen', 'songcan chen')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('2375371', 'Fuyan Zhang', 'fuyan zhang')</td><td></td></tr><tr><td>098a1ccc13b8d6409aa333c8a1079b2c9824705b</td><td>Attribute Pivots for Guiding Relevance Feedback in Image Search
+<br/><b>The University of Texas at Austin</b></td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{adriana, grauman}@cs.utexas.edu
+</td></tr><tr><td>0903bb001c263e3c9a40f430116d1e629eaa616f</td><td>CVPR
+<br/>#987
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>An Empirical Study of Context in Object Detection
+<br/>Anonymous CVPR submission
+<br/>Paper ID 987
+</td><td></td><td></td></tr><tr><td>090ff8f992dc71a1125636c1adffc0634155b450</td><td>Topic-aware Deep Auto-encoders (TDA)
+<br/>for Face Alignment
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/><b>Imperial College London, London, UK</b></td><td>('1698586', 'Jie Zhang', 'jie zhang')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1874505', 'Xiaowei Zhao', 'xiaowei zhao')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>09b43b59879d59493df2a93c216746f2cf50f4ac</td><td>Deep Transfer Metric Learning
+<br/><b>School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore</b><br/>How to design a good similarity function plays an important role in many
+<br/>visual recognition tasks. Recent advances have shown that learning a dis-
+<br/>tance metric directly from a set of training examples can usually achieve
+<br/>proposing performance than hand-crafted distance metrics [2, 3]. While
+<br/>many metric learning algorithms have been presented in recent years, there
+<br/>are still two shortcomings: 1) most of them usually seek a single linear dis-
+<br/>tance to transform sample into a linear feature space, so that the nonlinear
+<br/>relationship of samples cannot be well exploited. Even if the kernel trick
+<br/>can be employed to addressed the nonlinearity issue, these methods still
+<br/>suffer from the scalability problem because they cannot obtain the explicit
+<br/>nonlinear mapping functions; 2) most of them assume that the training and
+<br/>test samples are captured in similar scenarios so that their distributions are
+<br/>assumed to be the same. This assumption doesn’t hold in many real visual
+<br/>recognition applications, when samples are captured across datasets.
+<br/>We propose a deep transfer metric learning (DTML) method for cross-
+<br/>dataset visual recognition. Our method learns a set of hierarchical nonlinear
+<br/>transformations by transferring discriminative knowledge from the labeled
+<br/>source domain to the unlabeled target domain, under which the inter-class
+<br/>variations are maximized and the intra-class variations are minimized, and
+<br/>the distribution divergence between the source domain and the target do-
+<br/>main at the top layer of the network is minimized, simultaneously. Figure 1
+<br/>illustrates the basic idea of the proposed method.
+<br/>Figure 1: The basic idea of the proposed DTML method. For each sample
+<br/>in the training sets from the source domain and the target domain, we pass
+<br/>it to the developed deep neural network. We enforce two constraints on
+<br/>the outputs of all training samples at the top of the network: 1) the inter-
+<br/>class variations are maximized and the intra-class variations are minimized,
+<br/>and 2) the distribution divergence between the source domain and the target
+<br/>domain at the top layer of the network is minimized.
+<br/>Deep Metric Learning. We construct a deep neural network to compute
+<br/>the representations of each sample x. Assume there are M + 1 layers of the
+<br/>network and p(m) units in the mth layer, where m = 1,2,··· ,M. The output
+<br/>of x at the mth layer is computed as:
+<br/>(cid:16)
+<br/>W(m)h(m−1) + b(m)(cid:17) ∈ Rp(m)
+<br/>(1)
+<br/>f (m)(x) = h(m) = ϕ
+<br/>where W(m) ∈ Rp(m)×p(m−1) and b(m) ∈ Rp(m) are the weight matrix and bias
+<br/>of the parameters in this layer; and ϕ is a nonlinear activation function which
+<br/>operates component-wisely, e.g., tanh or sigmoid functions. The nonlinear
+<br/>mapping f (m) : Rd (cid:55)→ Rp(m) is a function parameterized by {W(i)}m
+<br/>i=1 and
+<br/>{b(i)}m
+<br/>i=1. For the first layer, we assume h(0) = x.
+<br/>For each pair of samples xi and x j, they can be finally represented as
+<br/>f (m)(xi) and f (m)(x j) at the mth layer of our designed network, and their
+<br/>distance metric can be measured by computing the squared Euclidean dis-
+<br/>tance between f (m)(xi) and f (m)(x j) at the mth layer:
+<br/>where Pi j is set as one if x j is one of k1-intra-class nearest neighbors of xi,
+<br/>and zero otherwise; and Qi j is set as one if x j is one of k2-interclass nearest
+<br/>neighbors of xi, and zero otherwise.
+<br/>Deep Transfer Metric Learning. Given target domain data Xt and source
+<br/>domain data Xs, their probability distributions are usually different in the o-
+<br/>riginal feature space when they are captured from different datasets. To
+<br/>reduce the distribution difference, we apply the Maximum Mean Discrep-
+<br/>ancy (MMD) criterion [1] to measure their distribution difference at the mth
+<br/>layer, which is defined as as follows:
+<br/>ts (Xt ,Xs) =
+<br/>D(m)
+<br/>Nt ∑Nt
+<br/>i=1 f (m)(xti)− 1
+<br/>Ns ∑Ns
+<br/>i=1 f (m)(xsi)
+<br/>(6)
+<br/>By combining (3) and (6), we formulate DTML as the following opti-
+<br/>mization problem:
+<br/>(cid:13)(cid:13)(cid:13)(cid:13) 1
+<br/>(cid:13)(cid:13)(cid:13)(cid:13)2
+<br/>d2
+<br/>f (m) (xi,x j) =
+<br/>(2)
+<br/>min
+<br/>f (M)
+<br/>(cid:13)(cid:13)(cid:13) f (m)(xi)− f (m)(x j)
+<br/>(cid:13)(cid:13)(cid:13)2
+<br/>(cid:16)(cid:13)(cid:13)W(m)(cid:13)(cid:13)2
+<br/>Following the graph embedding framework, we enforce the marginal
+<br/>fisher analysis criterion [4] on the output of all training samples at the top
+<br/>layer and formulate a strongly-supervised deep metric learning method:
+<br/>F +(cid:13)(cid:13)b(m)(cid:13)(cid:13)2
+<br/>(cid:17)
+<br/>(3)
+<br/>J = S(M)
+<br/>c − α S(M)
+<br/>b + γ ∑M
+<br/>m=1
+<br/>min
+<br/>f (M)
+<br/>where α (α > 0) is a free parameter which balances the important between
+<br/>intra-class compactness and interclass separability; (cid:107)Z(cid:107)F denotes the Frobe-
+<br/>nius norm of the matrix Z; γ (γ > 0) is a tunable positive regularization pa-
+<br/>rameter; S(m)
+<br/>define the intra-class compactness and the interclass
+<br/>separability, which are defined as follows:
+<br/>and S(m)
+<br/>S(m)
+<br/>c =
+<br/>S(m)
+<br/>b =
+<br/>Nk1
+<br/>Nk2
+<br/>i=1∑N
+<br/>∑N
+<br/>i=1∑N
+<br/>∑N
+<br/>j=1 Pi j d2
+<br/>f (m) (xi,x j),
+<br/>j=1 Qi j d2
+<br/>f (m) (xi,x j),
+<br/>(4)
+<br/>(5)
+</td><td>('34651153', 'Junlin Hu', 'junlin hu')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('1689805', 'Yap-Peng Tan', 'yap-peng tan')</td><td></td></tr><tr><td>09df62fd17d3d833ea6b5a52a232fc052d4da3f5</td><td>ISSN: 1405-5546
+<br/>Instituto Politécnico Nacional
+<br/>México
+<br/>
+<br/>Rivas Araiza, Edgar A.; Mendiola Santibañez, Jorge D.; Herrera Ruiz, Gilberto; González Gutiérrez,
+<br/>Carlos A.; Trejo Perea, Mario; Ríos Moreno, G. J.
+<br/>Mejora de Contraste y Compensación en Cambios de la Iluminación
+<br/>Instituto Politécnico Nacional
+<br/>Distrito Federal, México
+<br/>Disponible en: http://www.redalyc.org/articulo.oa?id=61509703
+<br/> Cómo citar el artículo
+<br/> Número completo
+<br/> Más información del artículo
+<br/> Página de la revista en redalyc.org
+<br/>Sistema de Información Científica
+<br/>Red de Revistas Científicas de América Latina, el Caribe, España y Portugal
+<br/>Proyecto académico sin fines de lucro, desarrollado bajo la iniciativa de acceso abierto
+</td><td></td><td>computacion-y-sistemas@cic.ipn.mx
+</td></tr><tr><td>09b0ef3248ff8f1a05b8704a1b4cf64951575be9</td><td>Recognizing Activities of Daily Living with a Wrist-mounted Camera
+<br/><b>Graduate School of Information Science and Technology, The University of Tokyo</b></td><td>('8197937', 'Katsunori Ohnishi', 'katsunori ohnishi')<br/>('2551640', 'Atsushi Kanehira', 'atsushi kanehira')<br/>('2554424', 'Asako Kanezaki', 'asako kanezaki')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td>{ohnishi, kanehira, kanezaki, harada}@mi.t.u-tokyo.ac.jp
+</td></tr><tr><td>097104fc731a15fad07479f4f2c4be2e071054a2</td><td></td><td></td><td></td></tr><tr><td>094357c1a2ba3fda22aa6dd9e496530d784e1721</td><td>A Unified Probabilistic Approach Modeling Relationships
+<br/>between Attributes and Objects
+<br/><b>Rensselaer Polytechnic Institute</b><br/>110 Eighth Street, Troy, NY USA 12180
+</td><td>('40066738', 'Xiaoyang Wang', 'xiaoyang wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{wangx16,jiq}@rpi.edu
+</td></tr><tr><td>09f853ce12f7361c4b50c494df7ce3b9fad1d221</td><td>myjournal manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Random forests for real time 3D face analysis
+<br/>Received: date / Accepted: date
+</td><td>('3092828', 'Gabriele Fanelli', 'gabriele fanelli')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>09111da0aedb231c8484601444296c50ca0b5388</td><td></td><td></td><td></td></tr><tr><td>09750c9bbb074bbc4eb66586b20822d1812cdb20</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1385
+<br/>ICASSP 2012
+</td><td></td><td></td></tr><tr><td>09ce14b84af2dc2f76ae1cf227356fa0ba337d07</td><td>Face Reconstruction in the Wild
+<br/><b>University of Washington</b><br/><b>University of Washington and Google Inc</b></td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')<br/>('1679223', 'Steven M. Seitz', 'steven m. seitz')</td><td>kemelmi@cs.washington.edu
+<br/>seitz@cs.washington.edu
+</td></tr><tr><td>090e4713bcccff52dcd0c01169591affd2af7e76</td><td>What Do You Do? Occupation Recognition
+<br/>in a Photo via Social Context
+<br/><b>College of Computer and Information Science, Northeastern University, MA, USA</b><br/><b>Northeastern University, MA, USA</b></td><td>('2025056', 'Ming Shao', 'ming shao')<br/>('2897748', 'Liangyue Li', 'liangyue li')</td><td>mingshao@ccs.neu.edu, {liangyue, yunfu}@ece.neu.edu
+</td></tr><tr><td>097f674aa9e91135151c480734dda54af5bc4240</td><td>Proc. VIIth Digital Image Computing: Techniques and Applications, Sun C., Talbot H., Ourselin S. and Adriaansen T. (Eds.), 10-12 Dec. 2003, Sydney
+<br/>Face Recognition Based on Multiple Region Features
+<br/>CSIRO Telecommunications & Industrial Physics
+<br/>Australia
+<br/>Tel: 612 9372 4104, Fax: 612 9372 4411, Email:
+</td><td>('40833472', 'Jiaming Li', 'jiaming li')<br/>('1751724', 'Ying Guo', 'ying guo')<br/>('39877973', 'Rong-yu Qiao', 'rong-yu qiao')</td><td>jiaming.li@csiro.au
+</td></tr><tr><td>5d485501f9c2030ab33f97972aa7585d3a0d59a7</td><td></td><td></td><td></td></tr><tr><td>5da740682f080a70a30dc46b0fc66616884463ec</td><td>Real-Time Head Pose Estimation Using
+<br/>Multi-Variate RVM on Faces in the Wild
+<br/>Augmented Vision Research Group,
+<br/><b>German Research Center for Arti cial Intelligence (DFKI</b><br/>Tripstaddterstr. 122, 67663 Kaiserslautern, Germany
+<br/><b>Technical University of Kaiserslautern</b><br/>http://www.av.dfki.de
+</td><td>('2585383', 'Mohamed Selim', 'mohamed selim')<br/>('1771057', 'Alain Pagani', 'alain pagani')<br/>('1807169', 'Didier Stricker', 'didier stricker')</td><td>{mohamed.selim,alain.pagani,didier.stricker}@dfki.de
+</td></tr><tr><td>5de5848dc3fc35e40420ffec70a407e4770e3a8d</td><td>WebVision Database: Visual Learning and Understanding from Web Data
+<br/>1 Computer Vision Laboratory, ETH Zurich
+<br/>2 Google Switzerland
+</td><td>('1702619', 'Wen Li', 'wen li')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1688012', 'Wei Li', 'wei li')<br/>('2794259', 'Eirikur Agustsson', 'eirikur agustsson')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>5da139fc43216c86d779938d1c219b950dd82a4c</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>II - 205
+<br/>ICIP 2007
+</td><td></td><td></td></tr><tr><td>5dc056fe911a3e34a932513abe637076250d96da</td><td></td><td></td><td></td></tr><tr><td>5d185d82832acd430981ffed3de055db34e3c653</td><td>A Fuzzy Reasoning Model for Recognition
+<br/>of Facial Expressions
+<br/><b>Research Center CENTIA, Electronics and Mechatronics</b><br/>Universidad de las Américas, 72820, Puebla, Mexico
+<br/>{oleg.starostenko; renan.contrerasgz; vicente.alarcon; leticia.florespo;
+<br/><b>Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez</b><br/>Insurgentes Este, 21280, Mexicali, Baja California, Mexico
+<br/>3 Universidad Politécnica de Baja California, Mexicali, Baja California, Mexico
+</td><td>('1956337', 'Oleg Starostenko', 'oleg starostenko')<br/>('20083621', 'Renan Contreras', 'renan contreras')<br/>('1690236', 'Vicente Alarcón Aquino', 'vicente alarcón aquino')<br/>('2069473', 'Oleg Sergiyenko', 'oleg sergiyenko')</td><td>jorge.rodriguez}@udlap.mx
+<br/>srgnk@iing.mxl.uabc.mx
+<br/>vera-tyrsa@yandex.ru
+</td></tr><tr><td>5d233e6f23b1c306cf62af49ce66faac2078f967</td><td>RESEARCH ARTICLE
+<br/>Optimal Geometrical Set for Automated
+<br/>Marker Placement to Virtualized Real-Time
+<br/>Facial Emotions
+<br/>School of Mechatronic Engineering, Universiti Malaysia Perlis, 02600, Ulu Pauh, Arau, Perlis, West Malaysia
+</td><td>('6962924', 'Vasanthan Maruthapillai', 'vasanthan maruthapillai')<br/>('32588646', 'Murugappan Murugappan', 'murugappan murugappan')</td><td>* murugappan@unimap.edu.my
+</td></tr><tr><td>5dd496e58cfedfc11b4b43c4ffe44ac72493bf55</td><td>Discriminative convolutional Fisher vector network for action recognition
+<br/>School of Electrical Engineering and Computer Science
+<br/><b>Queen Mary University of London</b><br/>London E1 4NS, United Kingdom
+</td><td>('2685285', 'Petar Palasek', 'petar palasek')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td>p.palasek@qmul.ac.uk, i.patras@qmul.ac.uk
+</td></tr><tr><td>5db075a308350c083c3fa6722af4c9765c4b8fef</td><td>The Novel Method of Moving Target Tracking Eyes
+<br/>Location based on SIFT Feature Matching and Gabor
+<br/>Wavelet Algorithm
+<br/><b>College of Computer and Information Engineering, Nanyang Institute of Technology</b><br/>Henan Nanyang, 473004, China
+<br/>* Tel.: 0086+13838972861
+<br/>Sensors & Transducers, Vol. 154, Issue 7, July 2013, pp. 129-137
+<br/>
+<br/>SSSeeennnsssooorrrsss &&& TTTrrraaannnsssddduuuccceeerrrsss
+<br/>© 2013 by IFSA
+<br/>http://www.sensorsportal.com
+<br/>Received: 28 April 2013 /Accepted: 19 July 2013 /Published: 31 July 2013
+</td><td>('2266189', 'Jing Zhang', 'jing zhang')<br/>('2732767', 'Caixia Yang', 'caixia yang')<br/>('1809507', 'Kecheng Liu', 'kecheng liu')</td><td>* E-mail: eduzhangjing@163.com
+</td></tr><tr><td>5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf</td><td>Robust Registration of Dynamic Facial Sequences
+</td><td>('2046537', 'Evangelos Sariyanidi', 'evangelos sariyanidi')<br/>('1781916', 'Hatice Gunes', 'hatice gunes')<br/>('1713138', 'Andrea Cavallaro', 'andrea cavallaro')</td><td></td></tr><tr><td>5dcf78de4d3d867d0fd4a3105f0defae2234b9cb</td><td></td><td></td><td></td></tr><tr><td>5db4fe0ce9e9227042144758cf6c4c2de2042435</td><td>INTERNATIONAL JOURNAL OF ELECTRICAL AND ELECTRONIC SYSTEMS RESEARCH, VOL.3, JUNE 2010
+<br/>Recognition of Facial Expression Using Haar
+<br/>Wavelet Transform
+<br/>for
+<br/>paper
+<br/>features
+<br/>investigates
+<br/>
+</td><td>('2254697', 'M. Satiyan', 'm. satiyan')</td><td></td></tr><tr><td>5d88702cdc879396b8b2cc674e233895de99666b</td><td>Exploiting Feature Hierarchies with Convolutional Neural Networks
+<br/>for Cultural Event Recognition
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>School of Computer Science, Carnegie Mellon University, 15213, USA</b></td><td>('1730228', 'Mengyi Liu', 'mengyi liu')<br/>('1731144', 'Xin Liu', 'xin liu')<br/>('38751558', 'Yan Li', 'yan li')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')</td><td>{mengyi.liu, xin.liu, yan.li}@vipl.ict.ac.cn, {xlchen, sgshan}@ict.ac.cn, alex@cs.cmu.edu
+</td></tr><tr><td>5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e</td><td>Face Recognition Algorithms
+<br/>June 16, 2010
+<br/>Ion Marqu´es
+<br/>Supervisor:
+<br/>Manuel Gra˜na
+</td><td></td><td></td></tr><tr><td>5d09d5257139b563bd3149cfd5e6f9eae3c34776</td><td>Optics Communications 338 (2015) 77–89
+<br/>Contents lists available at ScienceDirect
+<br/>Optics Communications
+<br/>journal homepage: www.elsevier.com/locate/optcom
+<br/>Pattern recognition with composite correlation filters designed with
+<br/>multi-objective combinatorial optimization
+<br/>a Instituto Politécnico Nacional – CITEDI, Ave. del Parque 1310, Mesade Otay, Tijuana B.C. 22510, México
+<br/>b Department of Computer Science, CICESE, Carretera Ensenada-Tijuana 3918, Ensenada B.C. 22860, México
+<br/>c Instituto Tecnológico de Tijuana, Blvd. Industrial y Ave. ITR TijuanaS/N, Mesa de Otay, Tijuana B.C. 22500, México
+<br/>d National Ignition Facility, Lawrence Livermore National Laboratory, Livermore, CA 94551, USA
+<br/>a r t i c l e i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 12 July 2014
+<br/>Accepted 16 November 2014
+<br/>Available online 23 October 2014
+<br/>Keywords:
+<br/>Object recognition
+<br/>Composite correlation filters
+<br/>Multi-objective evolutionary algorithm
+<br/>Combinatorial optimization
+<br/>Composite correlation filters are used for solving a wide variety of pattern recognition problems. These
+<br/>filters are given by a combination of several training templates chosen by a designer in an ad hoc manner.
+<br/>In this work, we present a new approach for the design of composite filters based on multi-objective
+<br/>combinatorial optimization. Given a vast search space of training templates, an iterative algorithm is used
+<br/>to synthesize a filter with an optimized performance in terms of several competing criteria. Moreover, by
+<br/>employing a suggested binary-search procedure a filter bank with a minimum number of filters can be
+<br/>constructed, for a prespecified trade-off of performance metrics. Computer simulation results obtained
+<br/>with the proposed method in recognizing geometrically distorted versions of a target in cluttered and
+<br/>noisy scenes are discussed and compared in terms of recognition performance and complexity with
+<br/>existing state-of-the-art filters.
+<br/>& Elsevier B.V. All rights reserved.
+<br/>1.
+<br/>Introduction
+<br/>Nowadays, object recognition receives much research interest
+<br/>due to its high impact in real-life activities, such as robotics, bio-
+<br/>metrics, and target tracking [1,2]. Object recognition consists in
+<br/>solving two essential tasks: detection of a target within an ob-
+<br/>served scene and determination of the exact position of the de-
+<br/>tected object. Different approaches can be utilized to address these
+<br/>tasks, that is feature-based methods [3–6] and template matching
+<br/>algorithms [7,8]. In feature-based methods the observed scene is
+<br/>processed to extract relevant features of potential targets within
+<br/>the scene. Next, the extracted features are processed and analyzed
+<br/>to make decisions. Feature-based methods yield good results in
+<br/>many applications. However, they depend on several subjective
+<br/>decisions which often require optimization [9,10]. On the other
+<br/>hand, correlation filtering is a template matching processing. In
+<br/>this approach, the coordinates of the maximum of the filter output
+<br/>are taken as estimates of the target coordinates in the observed
+<br/>scene. Correlation filters possess a good mathematical basis and
+<br/>they can be implemented by exploiting massive parallelism either
+<br/>in hybrid opto-digital correlators [11,12] or in high-performance
+<br/>n Corresponding author. Tel.: þ52 664 623 1344x82856.
+<br/>http://dx.doi.org/10.1016/j.optcom.2014.10.038
+<br/>0030-4018/& Elsevier B.V. All rights reserved.
+<br/>hardware such as graphics processing units (GPUs) [13] or field
+<br/>programmable gate arrays (FPGAs) [14] at high rate. Additionally,
+<br/>these filters are capable to reliably recognize a target in highly
+<br/>cluttered and noisy environments [8,15,16]. Moreover, they are
+<br/>able to estimate very accurately the position of the target within
+<br/>the scene [17]. Correlation filters are usually designed by a opti-
+<br/>mization of various criteria [18,19]. The filters can be broadly
+<br/>classified in to two main categories: analytical and composite fil-
+<br/>ters. Analytical filters optimize a performance criterion using
+<br/>mathematical models of signals and noise [20,21]. Composite fil-
+<br/>ters are constructed by combination of several training templates,
+<br/>each of them representing an expected target view in the observed
+<br/>scene [22,21]. In practice, composite filters are effective for real-
+<br/>life degradations of targets such as rotations and scaling. Compo-
+<br/>site filters are synthesized by means of a supervised training
+<br/>process. Thus, the performance of the filters highly depends on a
+<br/>proper selection of image templates used for training [20,23].
+<br/>Normally, the training templates are chosen by a designer in an ad
+<br/>hoc manner. Such a subjective procedure is not optimal. In addi-
+<br/>tion, Kumar and Pochavsky [24] showed that the signal to noise
+<br/>ratio of a composite filter gradually reduces when the number of
+<br/>training templates increases. In order to synthesize composite
+<br/>filters with improved performance in terms of several competing
+<br/>metrics, a search and optimization strategy is required to auto-
+<br/>matically choose the set of training templates.
+</td><td>('1908859', 'Victor H. Diaz-Ramirez', 'victor h. diaz-ramirez')<br/>('14245397', 'Andres Cuevas', 'andres cuevas')<br/>('1684262', 'Vitaly Kober', 'vitaly kober')<br/>('2166904', 'Leonardo Trujillo', 'leonardo trujillo')<br/>('37615801', 'Abdul Awwal', 'abdul awwal')</td><td>E-mail address: vdiazr@ipn.mx (V.H. Diaz-Ramirez).
+</td></tr><tr><td>5d479f77ecccfac9f47d91544fd67df642dfab3c</td><td>Linking People in Videos with “Their” Names
+<br/>Using Coreference Resolution
+<br/><b>Stanford University, USA</b><br/><b>Stanford University, USA</b></td><td>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')<br/>('2319608', 'Armand Joulin', 'armand joulin')<br/>('40085065', 'Percy Liang', 'percy liang')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>{vigneshr,ajoulin,pliang,feifeili}@cs.stanford.edu
+</td></tr><tr><td>5d01283474b73a46d80745ad0cc0c4da14aae194</td><td></td><td></td><td></td></tr><tr><td>5d197c8cd34473eb6cde6b65ced1be82a3a1ed14</td><td><b>AFaceImageDatabaseforEvaluatingOut-of-FocusBlurQiHan,QiongLiandXiamuNiuHarbinInstituteofTechnologyChina1.IntroductionFacerecognitionisoneofthemostpopularresearchfieldsofcomputervisionandmachinelearning(Tores(2004);Zhaoetal.(2003)).Alongwithinvestigationoffacerecognitionalgorithmsandsystems,manyfaceimagedatabaseshavebeencollected(Gross(2005)).Facedatabasesareimportantfortheadvancementoftheresearchfield.Becauseofthenonrigidityandcomplex3Dstructureofface,manyfactorsinfluencetheperformanceoffacedetectionandrecognitionalgorithmssuchaspose,expression,age,brightness,contrast,noise,blurandetc.Someearlyfacedatabasesgatheredunderstrictlycontrolledenvironment(Belhumeuretal.(1997);Samaria&Harter(1994);Turk&Pentland(1991))onlyallowslightexpressionvariation.Toinvestigatetherelationshipsbetweenalgorithms’performanceandtheabovefactors,morefacedatabaseswithlargerscaleandvariouscharacterswerebuiltinthepastyears(Bailly-Bailliereetal.(2003);Flynnetal.(2003);Gaoetal.(2008);Georghiadesetal.(2001);Hallinan(1995);Phillipsetal.(2000);Simetal.(2003)).Forinstance,The"CAS-PEAL","FERET","CMUPIE",and"YaleB"databasesincludevariousposes(Gaoetal.(2008);Georghiadesetal.(2001);Phillipsetal.(2000);Simetal.(2003));The"HarvardRL","CMUPIE"and"YaleB"databasesinvolvemorethan40differentconditionsinillumination(Georghiadesetal.(2001);Hallinan(1995);Simetal.(2003));Andthe"BANCA",and"NDHID"databasescontainover10timesgathering(Bailly-Bailliereetal.(2003);Flynnetal.(2003)).Thesedatabaseshelpresearcherstoevaluateandimprovetheiralgorithmsaboutfacedetection,recognition,andotherpurposes.Blurisnotthemostimportantbutstillanotablefactoraffectingtheperformanceofabiometricsystem(Fronthaleretal.(2006);Zamanietal.(2007)).Themainreasonsleadingblurconsistinout-of-focusofcameraandmotionofobject,andtheout-of-focusblurismoresignificantintheapplicationenvironmentoffacerecognition(Eskicioglu&Fisher(1995);Kimetal.(1998);Tanakaetal.(2007);Yitzhaky&Kopeika(1996)).Toinvestigatetheinfluenceofbluronafacerecognitionsystem,afaceimagedatabasewithdifferentconditionsofclarityandefficientblurevaluatingalgorithmsareneeded.Thischapterintroducesanewfacedatabasebuiltforthepurposeofblurevaluation.Theapplicationenvironmentsoffacerecognitionareanalyzedfirstly,thenaimagegatheringschemeisdesigned.Twotypicalgatheringfacilitiesareusedandthefocusstatusaredividedinto11steps.Further,theblurassessmentalgorithmsaresummarizedandthecomparisonbetweenthemisraisedonthevarious-claritydatabase.The7www.intechopen.com</b></td><td></td><td></td></tr><tr><td>5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e</td><td><b>This is an Open Access document downloaded from ORCA, Cardiff University's institutional</b><br/>repository: http://orca.cf.ac.uk/111659/
+<br/>This is the author’s version of a work that was submitted to / accepted for publication.
+<br/>Citation for final published version:
+<br/>Krumhuber, Eva G, Lai, Yukun, Rosin, Paul and Hugenberg, Kurt 2018. When facial expressions
+<br/>Publishers page:
+<br/>Please note:
+<br/>Changes made as a result of publishing processes such as copy-editing, formatting and page
+<br/>numbers may not be reflected in this version. For the definitive version of this publication, please
+<br/>refer to the published source. You are advised to consult the publisher’s version if you wish to cite
+<br/>this paper.
+<br/>This version is being made available in accordance with publisher policies. See
+<br/>http://orca.cf.ac.uk/policies.html for usage policies. Copyright and moral rights for publications
+<br/>made available in ORCA are retained by the copyright holders.
+</td><td></td><td></td></tr><tr><td>5df376748fe5ccd87a724ef31d4fdb579dab693f</td><td>A Dashboard for Affective E-learning:
+<br/>Data Visualization for Monitoring Online Learner Emotions
+<br/>School of Computer Science
+<br/><b>Carleton University</b><br/>Canada
+</td><td>('2625368', 'Reza GhasemAghaei', 'reza ghasemaghaei')<br/>('40230630', 'Ali Arya', 'ali arya')<br/>('8547603', 'Robert Biddle', 'robert biddle')</td><td>Reza.GhasemAghaei@carleton.ca
+</td></tr><tr><td>31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a</td><td></td><td></td><td></td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td><td></td><td></td></tr><tr><td>31625522950e82ad4dffef7ed0df00fdd2401436</td><td>Motion Representation with Acceleration Images
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b><br/>Tsukuba, Ibaraki, Japan
+</td><td>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('1713046', 'Yun He', 'yun he')<br/>('3393640', 'Soma Shirakabe', 'soma shirakabe')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')</td><td>{hirokatsu.kataoka, yun.he, shirakabe-s, yu.satou}@aist.go.jp
+</td></tr><tr><td>3167f415a861f19747ab5e749e78000179d685bc</td><td>RankBoost with l1 regularization for Facial Expression Recognition and
+<br/>Intensity Estimation
+<br/><b>Rutgers University, Piscataway NJ 08854, USA</b><br/>2National Laboratory of Pattern Recognition, Chinese Academy of Sciences Beijing, 100080, China
+</td><td>('39606160', 'Peng Yang', 'peng yang')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td></td></tr><tr><td>3107316f243233d45e3c7e5972517d1ed4991f91</td><td>CVAE-GAN: Fine-Grained Image Generation through Asymmetric Training
+<br/><b>University of Science and Technology of China</b><br/>2Microsoft Research Asia,
+</td><td>('3093568', 'Jianmin Bao', 'jianmin bao')<br/>('39447786', 'Dong Chen', 'dong chen')<br/>('1716835', 'Fang Wen', 'fang wen')<br/>('7179232', 'Houqiang Li', 'houqiang li')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td>jmbao@mail.ustc.edu.cn, lihq@ustc.edu.cn
+<br/>{doch,fangwen,ganghua}@microsoft.com
+</td></tr><tr><td>318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a</td><td>Sparsity in Dynamics of Spontaneous
+<br/>Subtle Emotions: Analysis & Application
+</td><td>('35256518', 'Anh Cat Le Ngo', 'anh cat le ngo')<br/>('2339975', 'John See', 'john see')<br/>('6633183', 'Raphael C.-W. Phan', 'raphael c.-w. phan')</td><td></td></tr><tr><td>31c0968fb5f587918f1c49bf7fa51453b3e89cf7</td><td>Deep Transfer Learning for Person Re-identification
+</td><td>('3447059', 'Mengyue Geng', 'mengyue geng')<br/>('5765799', 'Yaowei Wang', 'yaowei wang')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('1705972', 'Yonghong Tian', 'yonghong tian')</td><td></td></tr><tr><td>313d5eba97fe064bdc1f00b7587a4b3543ef712a</td><td>Compact Deep Aggregation for Set Retrieval
+<br/><b>Visual Geometry Group, University of Oxford, UK</b><br/>2 DeepMind
+</td><td>('6730372', 'Yujie Zhong', 'yujie zhong')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{yujie,az}@robots.ox.ac.uk
+<br/>relja@google.com
+</td></tr><tr><td>31e57fa83ac60c03d884774d2b515813493977b9</td><td></td><td></td><td></td></tr><tr><td>3137a3fedf23717c411483c7b4bd2ed646258401</td><td>Joint Learning of Discriminative Prototypes
+<br/>and Large Margin Nearest Neighbor Classifiers
+<br/><b>Institute for Computer Graphics and Vision, Graz University of Technology</b></td><td>('3202367', 'Paul Wohlhart', 'paul wohlhart')<br/>('1791182', 'Peter M. Roth', 'peter m. roth')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{koestinger,wohlhart,pmroth,bischof}@icg.tugraz.at
+</td></tr><tr><td>31c34a5b42a640b824fa4e3d6187e3675226143e</td><td>Shape and Texture based Facial Action and Emotion
+<br/>Recognition
+<br/>(Demonstration)
+<br/>Department of Computer Science and Digital Technologies
+<br/><b>Northumbria University</b><br/>Newcastle, NE1 8ST, UK
+</td><td>('1712838', 'Li Zhang', 'li zhang')<br/>('2801063', 'Kamlesh Mistry', 'kamlesh mistry')</td><td>{li.zhang, kamlesh.mistry, alamgir.hossain}@northumbria.ac.uk
+</td></tr><tr><td>316e67550fbf0ba54f103b5924e6537712f06bee</td><td>Multimodal semi-supervised learning
+<br/>for image classification
+<br/>LEAR team, INRIA Grenoble, France
+</td><td>('2737253', 'Matthieu Guillaumin', 'matthieu guillaumin')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>31ef5419e026ef57ff20de537d82fe3cfa9ee741</td><td>Facial Expression Analysis Based on
+<br/>High Dimensional Binary Features
+<br/>´Ecole Polytechique de Montr´eal, Universit´e de Montr´eal, Montr´eal, Canada
+</td><td>('3127597', 'Samira Ebrahimi Kahou', 'samira ebrahimi kahou')<br/>('2558801', 'Pierre Froumenty', 'pierre froumenty')</td><td>{samira.ebrahimi-kahou, pierre.froumenty, christopher.pal}@polymtl.ca
+</td></tr><tr><td>31ea88f29e7f01a9801648d808f90862e066f9ea</td><td>Published as a conference paper at ICLR 2017
+<br/>DEEP MULTI-TASK REPRESENTATION LEARNING:
+<br/>A TENSOR FACTORISATION APPROACH
+<br/><b>Queen Mary, University of London</b></td><td>('2653152', 'Yongxin Yang', 'yongxin yang')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')</td><td>{yongxin.yang, t.hospedales}@qmul.ac.uk
+</td></tr><tr><td>3176ee88d1bb137d0b561ee63edf10876f805cf0</td><td>Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation
+<br/><b>University of Montreal, 2Cornell University, 3Ecole Polytechnique of Montreal, 4CIFAR</b></td><td>('25056820', 'Sina Honari', 'sina honari')<br/>('2965424', 'Jason Yosinski', 'jason yosinski')<br/>('1707326', 'Pascal Vincent', 'pascal vincent')</td><td>1{honaris, vincentp}@iro.umontreal.ca, 2yosinski@cs.cornell.edu, 3christopher.pal@polymtl.ca
+</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td></td><td></td><td></td></tr><tr><td>31835472821c7e3090abb42e57c38f7043dc3636</td><td>Flow Counting Using Realboosted
+<br/>Multi-sized Window Detectors
+<br/><b>Lund University, Cognimatics AB</b></td><td>('38481779', 'Mikael Nilsson', 'mikael nilsson')<br/>('3181258', 'Rikard Berthilsson', 'rikard berthilsson')</td><td></td></tr><tr><td>312b2566e315dd6e65bd42cfcbe4d919159de8a1</td><td>An Accurate Algorithm for Generating a Music Playlist
+<br/>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 100– No.9, August 2014
+<br/>based on Facial Expressions
+<br/>Computer Science and Engineering Department
+<br/>Amity School of Engineering & Technology,
+<br/><b>Amity University, Noida, India</b></td><td></td><td></td></tr><tr><td>3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4</td><td>Exploring Stereotypes and Biased Data with the Crowd
+<br/>Department of Computer Science
+<br/><b>The University of Texas at Austin</b><br/>Department of Computer Science
+<br/><b>The University of Texas at Austin</b><br/>Introduction
+<br/>In 2016, Baidu and Google spent somewhere between
+<br/>twenty and thirty billion dollars developing and acquir-
+<br/>ing artificial intelligence and machine learning technolo-
+<br/><b>gies (Bughin et al. 2017). A range of other sectors, includ</b><br/>ing health care, education, and manufacturing, are also pre-
+<br/>dicted to adopt these technologies at increasing rates. Ma-
+<br/>chine learning and AI are proven to have the capacity to
+<br/>greatly improve lives and spur innovation. However, as soci-
+<br/>ety becomes increasingly dependent on these technologies,
+<br/>it is crucial that we acknowledge some of the dangers, in-
+<br/>cluding the capacity for these algorithms to absorb and am-
+<br/>plify harmful cultural biases.
+<br/>Algorithms are often praised for their objectivity, but ma-
+<br/>chine learning algorithms have increasingly made news for a
+<br/>number of problematic outcomes, ranging from Google Pho-
+<br/>the judicial system using algorithms that are biased against
+<br/>African Americans (Dougherty 2015; Angwin et al. 2016).
+<br/>These harmful outcomes can be traced back to the data that
+<br/>was used to train the models.
+<br/>Machine learning applications put a heavy premium on
+<br/>data quantity. Research communities generally believe that
+<br/>the more training data there is, the better the learning out-
+<br/>come of the models will be (Halevy, Norvig, and Pereira
+<br/>2009). This has led to large scale data collection. How-
+<br/>ever, unless extra care is taken by the researchers, these
+<br/>large data sets will often contain bias that can profoundly
+<br/>change the learning outcome. Even minimal bias within
+<br/>a data set can end up being amplified by machine learn-
+<br/>ing models, leading to skewed results. Researchers have
+<br/>found that widely used image data sets imSitu and MS-
+<br/>COCO, along with textual data sets mined from Google
+<br/>News, contain significant gender bias (Zhao et al. 2017;
+<br/>Bolukbasi et al. 2016). This research also found that train-
+<br/>ing models with this data amplified the bias in the final out-
+<br/>comes.
+<br/>Once these algorithms have been improperly trained they
+<br/>can then be implemented into feedback loops where systems
+<br/>“define their own reality and use it to justify their results” as
+<br/>Copyright c(cid:13) 2018 is held by the authors. Copies may be freely
+<br/>made and distributed by others. Presented at the 2016 AAAI Con-
+<br/>ference on Human Computation and Crowdsourcing (HCOMP).
+<br/>Cathy O’Neil describes in her book Weapons of Math De-
+<br/>struction. O’Neil discusses problematic systems like Pred-
+<br/>Pol, a program that predicts where crimes are most likely to
+<br/>occur based on past crime reports, which may unfairly target
+<br/>poor communities.
+<br/>It therefore becomes necessary to consider the bias that
+<br/>may be introduced as a data set is being collected and to
+<br/>attempt to prevent that bias from being absorbed by an al-
+<br/>gorithm. We propose using the crowd to help uncover what
+<br/>bias may reside in a specific data set.
+<br/>The crowd has potential to be useful for this task. One
+<br/>of the key difficulties in preventing bias is knowing what
+<br/>to look for. The varied demographics of crowd workers pro-
+<br/>vide an extended range of perspectives that can help uncover
+<br/>stereotypes that may go unnoticed by a small group of re-
+<br/>searchers. Some work has already been conducted in this
+<br/>area, and Bolukbasi et al. (2016) found that the crowd was
+<br/>useful in determining the level of stereotype associated with
+<br/>ased words by asking the crowd to rate analogies such as
+<br/>“she is to sewing as he is to carpentry”. We want to extend
+<br/><b>our analysis to stereotypes beyond gender, including those</b><br/>surrounding race and class.
+<br/>The goal of our research is to contribute information about
+<br/>how useful the crowd is at anticipating stereotypes that may
+<br/>be biasing a data set without a researcher’s knowledge. The
+<br/>results of the crowd’s prediction can potentially be used dur-
+<br/>ing data collection to help prevent the suspected stereotypes
+<br/>from introducing bias to the dataset. We conduct our re-
+<br/>search by asking the crowd on Amazon’s Mechanical Turk
+<br/>(AMT) to complete two similar Human Intelligence Tasks
+<br/>(HITs) by suggesting stereotypes relating to their personal
+<br/>experience. Our analysis of these responses focuses on de-
+<br/>termining the level of diversity in the workers’ suggestions
+<br/>and their demographics. Through this process we begin a
+<br/>discussion on how useful the crowd can be in tackling this
+<br/>difficult problem within machine learning data collection.
+<br/>2 Related Work
+<br/>2.1 Work on bias in data sets and amplification
+<br/>As biased data sets get more coverage in the news, an in-
+<br/>creasing amount of research has been conducted around de-
+<br/>termining if data sets are biased and trying to mitigate the
+</td><td>('32193161', 'Zeyuan Hu', 'zeyuan hu')<br/>('40410119', 'Julia Strout', 'julia strout')</td><td>iamzeyuanhu@utexas.edu
+<br/>jstrout@utexas.edu
+</td></tr><tr><td>31ace8c9d0e4550a233b904a0e2aabefcc90b0e3</td><td>Learning Deep Face Representation
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+</td><td>('1934546', 'Haoqiang Fan', 'haoqiang fan')<br/>('2695115', 'Zhimin Cao', 'zhimin cao')<br/>('1691963', 'Yuning Jiang', 'yuning jiang')<br/>('2274228', 'Qi Yin', 'qi yin')<br/>('2479859', 'Chinchilla Doudou', 'chinchilla doudou')</td><td>fhq@megvii.com
+<br/>czm@megvii.com
+<br/>jyn@megvii.com
+<br/>yq@megvii.com
+<br/>doudou@megvii.com
+</td></tr><tr><td>316d51aaa37891d730ffded7b9d42946abea837f</td><td>CBMM Memo No. 23
+<br/>April 27, 2015
+<br/>Unsupervised learning of clutter-resistant visual
+<br/>representations from natural videos
+<br/>by
+<br/><b>MIT, McGovern Institute, Center for Brains, Minds and Machines</b></td><td>('1694846', 'Qianli Liao', 'qianli liao')</td><td></td></tr><tr><td>31afdb6fa95ded37e5871587df38976fdb8c0d67</td><td>QUANTIZED FUZZY LBP FOR FACE RECOGNITION
+<br/>Jianfeng
+<br/>Ren
+<br/>Junsong
+<br/>Yuan
+<br/>BeingThere
+<br/>Centre
+<br/><b>Institute</b><br/>of Media Innovation
+<br/>Nanyang
+<br/>50 Nanyang
+<br/>Technological
+<br/>Singapore
+<br/>Drive,
+<br/>637553.
+<br/><b>University</b><br/>School of Electrical
+<br/>& Electronics
+<br/>Engineering
+<br/>Nanyang
+<br/>50 Nanyang
+<br/>Technological
+<br/>Singapore
+<br/>Avenue,
+<br/>639798
+<br/><b>University</b></td><td>('3307580', 'Xudong Jiang', 'xudong jiang')</td><td></td></tr><tr><td>31d60b2af2c0e172c1a6a124718e99075818c408</td><td>Robust Facial Expression Recognition using Near Infrared Cameras
+<br/>Paper: jc*-**-**-****
+<br/>Robust Facial Expression Recognition using Near Infrared
+<br/>Cameras
+<br/><b>The University of Tokyo</b><br/><b>Electronics and Communication Engineering, Chuo University</b><br/>[Received 00/00/00; accepted 00/00/00]
+</td><td>('34415055', 'Hideki Hashimoto', 'hideki hashimoto')<br/>('9181040', 'Takashi Kubota', 'takashi kubota')</td><td></td></tr><tr><td>31f1e711fcf82c855f27396f181bf5e565a2f58d</td><td>Unconstrained Age Estimation with Deep Convolutional Neural Networks
+<br/>Jun Cheng Chen1
+<br/><b>University of Maryland</b><br/>2Montgomery Blair High School
+<br/><b>Rutgers University</b></td><td>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('2349530', 'Sabrina Zhou', 'sabrina zhou')<br/>('40080979', 'Amit Kumar', 'amit kumar')<br/>('2943431', 'Azadeh Alavi', 'azadeh alavi')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>rranjan1@.umiacs.umd.edu, sabrina.zhou.m@gmail.com, {pullpull,akumar14,azadeh}@umiacs.umd.edu,
+<br/>vishal.m.patel@rutgers.edu, Rama@umiacs.umd.edu
+</td></tr><tr><td>312afff739d1e0fcd3410adf78be1c66b3480396</td><td></td><td></td><td></td></tr><tr><td>3107085973617bbfc434c6cb82c87f2a952021b7</td><td>Spatio-temporal Human Action Localisation and
+<br/>Instance Segmentation in Temporally Untrimmed Videos
+<br/><b>Oxford Brookes University</b><br/><b>University of Oxford</b><br/>Figure 1: A video sequence taken from the LIRIS-HARL dataset plotted in space-and time. (a) A top down view of the
+<br/>video plotted with the detected action tubes of class ‘handshaking’ in green, and ‘person leaves baggage unattended’ in
+<br/>red. Each action is located to be within a space-time tube. (b) A side view of the same space-time detections. Note that
+<br/>no action is detected at the beginning of the video when there is human motion present in the video. (c) The detection
+<br/>and instance segmentation result of two actions occurring simultaneously in a single frame.
+</td><td>('3017538', 'Suman Saha', 'suman saha')<br/>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('3019396', 'Michael Sapienza', 'michael sapienza')<br/>('1730268', 'Philip H. S. Torr', 'philip h. s. torr')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td>{suman.saha-2014, gurkirt.singh-2015, fabio.cuzzolin}@brookes.ac.uk
+<br/>{michael.sapienza, philip.torr}@eng.ox.ac.uk
+</td></tr><tr><td>31182c5ffc8c5d8772b6db01ec98144cd6e4e897</td><td>3D Face Reconstruction with Region Based Best Fit Blending Using
+<br/>Mobile Phone for Virtual Reality Based Social Media
+<br/>VALGMA 1∗
+<br/><b>iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia</b><br/><b>Hasan Kalyoncu University, Gaziantep, Turkey</b></td><td>('3087532', 'Gholamreza Anbarjafari', 'gholamreza anbarjafari')<br/>('35447268', 'Rain Eric Haamer', 'rain eric haamer')<br/>('7296001', 'Iiris Lüsi', 'iiris lüsi')<br/>('12602781', 'Toomas Tikk', 'toomas tikk')</td><td></td></tr><tr><td>31bb49ba7df94b88add9e3c2db72a4a98927bb05</td><td></td><td></td><td></td></tr><tr><td>3146fabd5631a7d1387327918b184103d06c2211</td><td>Person-independent 3D Gaze Estimation using Face Frontalization
+<br/>L´aszl´o A. Jeni
+<br/><b>Carnegie Mellon University</b><br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA, USA
+<br/>Pittsburgh, PA, USA
+<br/>Figure 1: From a 2D image of a person’s face (a) a dense, part-based 3D deformable model is aligned (b) to reconstruct a partial frontal
+<br/>view of the face (c). Binary features are extracted around eye and pupil markers (d) for the 3D gaze calculation (e).
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>laszlojeni@cmu.edu
+<br/>jeffcohn@pitt.edu
+</td></tr><tr><td>91811203c2511e919b047ebc86edad87d985a4fa</td><td>Expression Subspace Projection for Face
+<br/>Recognition from Single Sample per Person
+</td><td>('1782221', 'Hoda Mohammadzade', 'hoda mohammadzade')</td><td></td></tr><tr><td>91495c689e6e614247495c3f322d400d8098de43</td><td>A Deep-Learning Approach to Facial Expression Recognition
+<br/>with Candid Images
+<br/>Wei Li
+<br/><b>CUNY City College</b><br/>Min Li
+<br/>Alibaba. Inc
+<br/>Zhong Su
+<br/><b>IBM China Research Lab</b><br/>Zhigang Zhu
+<br/><b>CUNY Graduate Center and City College</b></td><td></td><td>lwei000@citymail.cuny.edu
+<br/>mushi.lm@alibaba.inc
+<br/>suzhong@cn.ibm.com
+<br/>zhu@cs.ccny.cuny.edu
+</td></tr><tr><td>910524c0d0fe062bf806bb545627bf2c9a236a03</td><td>Master Thesis
+<br/>Improvement of Facial Expression Recognition through the
+<br/>Evaluation of Dynamic and Static Features in Video Sequences
+<br/>Submitted by:
+<br/>Dated:
+<br/> 24th June, 2008
+<br/>Supervisors:
+<br/><b>Otto-von-Guericke University Magdeburg</b><br/>Faculty of Computer Science
+<br/>Department of Simulation und Graphics
+<br/><b>Otto-von-Guericke University Magdeburg</b><br/>Faculty of Electrical Engineering and Information Technology
+<br/><b>Institute for Electronics, Signal Processing and Communications</b><br/><b></b></td><td>('1692049', 'Klaus Toennies', 'klaus toennies')<br/>('1741165', 'Ayoub Al-Hamadi', 'ayoub al-hamadi')</td><td></td></tr><tr><td>9117fd5695582961a456bd72b157d4386ca6a174</td><td>Facial Expression
+<br/>n Recognition Using Dee
+<br/>ep Neural
+<br/>Networks
+<br/>Departm
+<br/>ment of Electrical and Electronic Engineering
+<br/><b>he University of Hong Kong, Pokfulam</b><br/>Hong Kong
+</td><td>('8550244', 'Junnan Li', 'junnan li')<br/>('1725389', 'Edmund Y. Lam', 'edmund y. lam')</td><td></td></tr><tr><td>91df860368cbcebebd83d59ae1670c0f47de171d</td><td>COCO Attributes:
+<br/>Attributes for People, Animals, and Objects
+<br/>Microsoft Research
+<br/><b>Georgia Institute of Technology</b></td><td>('40541456', 'Genevieve Patterson', 'genevieve patterson')<br/>('12532254', 'James Hays', 'james hays')</td><td>gen@microsoft.com
+<br/>hays@gatech.edu
+</td></tr><tr><td>91067f298e1ece33c47df65236853704f6700a0b</td><td>IJSTE - International Journal of Science Technology & Engineering | Volume 2 | Issue 11 | May 2016
+<br/>ISSN (online): 2349-784X
+<br/>Local Binary Pattern and Local Linear
+<br/>Regression for Pose Invariant Face Recognition
+<br/>M. Tech Student
+<br/>
+<br/>Shreekumar T
+<br/>Associate Professor
+<br/>Department of Computer Science & Engineering
+<br/>Department of Computer Science & Engineering
+<br/><b>Mangalore Institute of Engineering and Technology, Badaga</b><br/><b>Mangalore Institute of Engineering and Technology, Badaga</b><br/>Mijar, Moodbidri, Mangalore
+<br/>Mijar, Moodbidri, Mangalore
+<br/>Karunakara K
+<br/>Professor & Head of Dept.
+<br/>Department of Information Science & Engineering
+<br/><b>Sri SidarthaInstitute of Technology, Tumkur</b></td><td></td><td></td></tr><tr><td>919d3067bce76009ce07b070a13728f549ebba49</td><td>International Journal of Scientific and Research Publications, Volume 4, Issue 6, June 2014
+<br/>ISSN 2250-3153
+<br/>1
+<br/>Time Based Re-ranking for Web Image Search
+<br/>Ms. A.Udhayabharadhi *, Mr. R.Ramachandran **
+<br/><b>MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry</b><br/><b>Sri Manakula Vinayagar Engineering College, Pondicherry</b></td><td></td><td></td></tr><tr><td>9110c589c6e78daf4affd8e318d843dc750fb71a</td><td>Chapter 6
+<br/>Facial Expression Synthesis Based on Emotion
+<br/>Dimensions for Affective Talking Avatar
+<br/>1 Key Laboratory of Pervasive Computing, Ministry of Education
+<br/>Tsinghua National Laboratory for Information Science and Technology
+<br/>Department of Computer Science and Technology,
+<br/><b>Tsinghua University, Beijing 100084, China</b><br/><b>Tsinghua-CUHK Joint Research Center for Media Sciences</b><br/>Technologies and Systems,
+<br/><b>Graduate School at Shenzhen, Tsinghua University, Shenzhen</b><br/>3 Department of Systems Engineering and Engineering Management
+<br/><b>The Chinese University of Hong Kong, HKSAR, China</b></td><td>('2180849', 'Shen Zhang', 'shen zhang')<br/>('3860920', 'Zhiyong Wu', 'zhiyong wu')<br/>('1702243', 'Helen M. Meng', 'helen m. meng')<br/>('7239047', 'Lianhong Cai', 'lianhong cai')</td><td>zhangshen05@mails.tsinghua.edu.cn, john.zy.wu@gmail.com,
+<br/>hmmeng@se.cuhk.edu.hk, clh-dcs@tsinghua.edu.cn
+</td></tr><tr><td>91e57667b6fad7a996b24367119f4b22b6892eca</td><td>Probabilistic Corner Detection for Facial Feature
+<br/>Extraction
+<br/>Article
+<br/>Accepted version
+<br/>E. Ardizzone, M. La Cascia, M. Morana
+<br/>In Lecture Notes in Computer Science Volume 5716, 2009
+<br/>It is advisable to refer to the publisher's version if you intend to cite
+<br/>from the work.
+<br/>Publisher: Springer
+<br/>http://link.springer.com/content/pdf/10.1007%2F978-3-
+<br/>642-04146-4_50.pdf
+</td><td></td><td></td></tr><tr><td>91883dabc11245e393786d85941fb99a6248c1fb</td><td></td><td></td><td></td></tr><tr><td>917bea27af1846b649e2bced624e8df1d9b79d6f</td><td>Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for
+<br/>Mobile and Embedded Applications
+<br/>Gyrfalcon Technology Inc.
+<br/>1900 McCarthy Blvd. Milpitas, CA 95035
+</td><td>('47935028', 'Baohua Sun', 'baohua sun')<br/>('49576071', 'Lin Yang', 'lin yang')<br/>('46195424', 'Patrick Dong', 'patrick dong')<br/>('49039276', 'Wenhan Zhang', 'wenhan zhang')<br/>('35287113', 'Jason Dong', 'jason dong')<br/>('48990565', 'Charles Young', 'charles young')</td><td>{baohua.sun,lin.yang,patrick.dong,wenhan.zhang,jason.dong,charles.yang}@gyrfalcontech.com
+</td></tr><tr><td>91b1a59b9e0e7f4db0828bf36654b84ba53b0557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>
+<br/>Simultaneous Hallucination and Recognition of
+<br/>Low-Resolution Faces Based on Singular Value
+<br/>Decomposition
+<br/>(SVD)
+<br/>for performing both
+</td><td>('1783889', 'Muwei Jian', 'muwei jian')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')</td><td></td></tr><tr><td>911bef7465665d8b194b6b0370b2b2389dfda1a1</td><td>RANJAN, ROMERO, BLACK: LEARNING HUMAN OPTICAL FLOW
+<br/>Learning Human Optical Flow
+<br/>1 MPI for Intelligent Systems
+<br/>Tübingen, Germany
+<br/>2 Amazon Inc.
+</td><td>('1952002', 'Anurag Ranjan', 'anurag ranjan')<br/>('39040964', 'Javier Romero', 'javier romero')<br/>('2105795', 'Michael J. Black', 'michael j. black')</td><td>aranjan@tuebingen.mpg.de
+<br/>javier@amazon.com
+<br/>black@tuebingen.mpg.de
+</td></tr><tr><td>91ead35d1d2ff2ea7cf35d15b14996471404f68d</td><td>Combining and Steganography of 3D Face Textures
+</td><td>('38478675', 'Mohsen Moradi', 'mohsen moradi')</td><td></td></tr><tr><td>919d0e681c4ef687bf0b89fe7c0615221e9a1d30</td><td></td><td></td><td></td></tr><tr><td>912a6a97af390d009773452814a401e258b77640</td><td></td><td></td><td></td></tr><tr><td>91d513af1f667f64c9afc55ea1f45b0be7ba08d4</td><td>Automatic Face Image Quality Prediction
+</td><td>('2180413', 'Lacey Best-Rowden', 'lacey best-rowden')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>91e507d2d8375bf474f6ffa87788aa3e742333ce</td><td>Robust Face Recognition Using Probabilistic
+<br/>Facial Trait Code
+<br/>†Department of Computer Science and Information Engineering, National Taiwan
+<br/><b>Graduate Institute of Networking and Multimedia, National Taiwan University</b><br/><b>National Taiwan University of Science and</b><br/><b>University</b><br/>Technology
+</td><td>('1822733', 'Ping-Han Lee', 'ping-han lee')<br/>('38801529', 'Gee-Sern Hsu', 'gee-sern hsu')<br/>('2250469', 'Szu-Wei Wu', 'szu-wei wu')<br/>('1732064', 'Yi-Ping Hung', 'yi-ping hung')</td><td></td></tr><tr><td>918b72a47b7f378bde0ba29c908babf6dab6f833</td><td></td><td></td><td></td></tr><tr><td>91e58c39608c6eb97b314b0c581ddaf7daac075e</td><td>Pixel-wise Ear Detection with Convolutional
+<br/>Encoder-Decoder Networks
+</td><td>('31834768', 'Luka Lan Gabriel', 'luka lan gabriel')<br/>('34862665', 'Peter Peer', 'peter peer')</td><td></td></tr><tr><td>91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0</td><td></td><td></td><td></td></tr><tr><td>9103148dd87e6ff9fba28509f3b265e1873166c9</td><td>Face Analysis using 3D Morphable Models
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Centre for Vision, Speech and Signal Processing
+<br/>Faculty of Engineering and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>April 2015
+</td><td>('38819702', 'Guosheng Hu', 'guosheng hu')<br/>('38819702', 'Guosheng Hu', 'guosheng hu')</td><td></td></tr><tr><td>9131c990fad219726eb38384976868b968ee9d9c</td><td>Deep Facial Expression Recognition: A Survey
+</td><td>('39433609', 'Shan Li', 'shan li')<br/>('1774956', 'Weihong Deng', 'weihong deng')</td><td></td></tr><tr><td>911505a4242da555c6828509d1b47ba7854abb7a</td><td>IMPROVED ACTIVE SHAPE MODEL FOR FACIAL FEATURE LOCALIZATION
+<br/><b>National Formosa University, Taiwan</b></td><td>('1711364', 'Hui-Yu Huang', 'hui-yu huang')<br/>('2782376', 'Shih-Hang Hsu', 'shih-hang hsu')</td><td>Email: hyhuang@nfu.edu.tw
+</td></tr><tr><td>915d4a0fb523249ecbc88eb62cb150a60cf60fa0</td><td>Comparison of Feature Extraction Techniques in Automatic
+<br/>Face Recognition Systems for Security Applications
+<br/>S . Cruz-Llanas, J. Ortega-Garcia, E. Martinez-Torrico, J. Gonzalez-Rodriguez
+<br/>Dpto. Ingenieria Audiovisual y Comunicaciones, EUIT Telecomunicacion, Univ. PolitCcnica de Madrid, Spain
+<br/>http://www.atvs.diac.upm.es
+</td><td></td><td>{cruzll, jortega, etorrico, jgonzalz}@atvs.diac.upm.es.
+</td></tr><tr><td>65126e0b1161fc8212643b8ff39c1d71d262fbc1</td><td>Occlusion Coherence: Localizing Occluded Faces with a
+<br/>Hierarchical Deformable Part Model
+<br/><b>University of California, Irvine</b></td><td>('1898210', 'Golnaz Ghiasi', 'golnaz ghiasi')</td><td>{gghiasi,fowlkes}@ics.uci.edu
+</td></tr><tr><td>65b737e5cc4a565011a895c460ed8fd07b333600</td><td>Transfer Learning For Cross-Dataset Recognition: A Survey
+<br/>This paper summarises and analyses the cross-dataset recognition transfer learning techniques with the
+<br/>emphasis on what kinds of methods can be used when the available source and target data are presented
+<br/>in different forms for boosting the target task. This paper for the first time summarises several transferring
+<br/>criteria in details from the concept level, which are the key bases to guide what kind of knowledge to transfer
+<br/>between datasets. In addition, a taxonomy of cross-dataset scenarios and problems is proposed according the
+<br/>properties of data that define how different datasets are diverged, thereby review the recent advances on
+<br/>each specific problem under different scenarios. Moreover, some real world applications and corresponding
+<br/>commonly used benchmarks of cross-dataset recognition are reviewed. Lastly, several future directions are
+<br/>identified.
+<br/>Additional Key Words and Phrases: Cross-dataset, transfer learning, domain adaptation
+<br/>1. INTRODUCTION
+<br/>It has been explored how human would transfer learning in one context to another
+<br/>similar context [Woodworth and Thorndike 1901; Perkins et al. 1992] in the field of
+<br/>Psychology and Education. For example, learning to drive a car helps a person later
+<br/>to learn more quickly to drive a truck, and learning mathematics prepares students to
+<br/>study physics. The machine learning algorithms are mostly inspired by human brains.
+<br/>However, most of them require a huge amount of training examples to learn a new
+<br/>model from scratch and fail to apply knowledge learned from previous domains or
+<br/>tasks. This may be due to that a basic assumption of statistical learning theory is
+<br/>that the training and test data are drawn from the same distribution and belong to
+<br/>the same task. Intuitively, learning from scratch is not realistic and practical, because
+<br/>it violates how human learn things. In addition, manually labelling a large amount
+<br/>of data for new domain or task is labour extensive, especially for the modern “data-
+<br/>hungry” and “data-driven” learning techniques (i.e. deep learning). However, the big
+<br/>data era provides a huge amount available data collected for other domains and tasks.
+<br/>Hence, how to use the previously available data smartly for the current task with
+<br/>scarce data will be beneficial for real world applications.
+<br/>To reuse the previous knowledge for current tasks, the differences between old data
+<br/>and new data need to be taken into account. Take the object recognition as an ex-
+<br/>ample. As claimed by Torralba and Efros [2011], despite the great efforts of object
+<br/>datasets creators, the datasets appear to have strong build-in bias caused by various
+<br/>factors, such as selection bias, capture bias, category or label bias, and negative set
+<br/>bias. This suggests that no matter how big the dataset is, it is impossible to cover
+<br/>the complexity of the real visual world. Hence, the dataset bias needs to be consid-
+<br/>ered before reusing data from previous datasets. Pan and Yang [2010] summarise that
+<br/>the differences between different datasets can be caused by domain divergence (i.e.
+<br/>distribution shift or feature space difference) or task divergence (i.e. conditional dis-
+<br/>tribution shift or label space difference), or both. For example, in visual recognition,
+<br/>the distributions between the previous and current data can be discrepant due to the
+<br/>different environments, lighting, background, sensor types, resolutions, view angles,
+<br/>and post-processing. Those external factors may cause the distribution divergence or
+<br/>even feature space divergence between different domains. On the other hand, the task
+<br/>divergence between current and previous data is also ubiquitous. For example, it is
+<br/>highly possible that an animal species that we want to recognize have not been seen
+<br/>ACM Journal Name, Vol. V, No. N, Article A, Publication date: January YYYY.
+</td><td>('38791459', 'Jing Zhang', 'jing zhang')<br/>('1685696', 'Wanqing Li', 'wanqing li')<br/>('1719314', 'Philip Ogunbona', 'philip ogunbona')</td><td></td></tr><tr><td>6582f4ec2815d2106957215ca2fa298396dde274</td><td>JUNE 2007
+<br/>1005
+<br/>Discriminative Learning and Recognition
+<br/>of Image Set Classes Using
+<br/>Canonical Correlations
+</td><td>('1700968', 'Tae-Kyun Kim', 'tae-kyun kim')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('1745672', 'Roberto Cipolla', 'roberto cipolla')</td><td></td></tr><tr><td>65b1760d9b1541241c6c0222cc4ee9df078b593a</td><td>Enhanced Pictorial Structures for Precise Eye Localization
+<br/>Under Uncontrolled Conditions
+<br/>1Department of Computer Science and Engineering
+<br/><b>Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China</b><br/>2National Key Laboratory for Novel Software Technology
+<br/><b>Nanjing University, Nanjing 210093, China</b></td><td>('2248421', 'Xiaoyang Tan', 'xiaoyang tan')<br/>('3075941', 'Fengyi Song', 'fengyi song')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('1680768', 'Songcan Chen', 'songcan chen')</td><td>{x.tan, f.song, s.chen}@nuaa.edu.cn
+<br/>zhouzh@lamda.nju.edu.cn
+</td></tr><tr><td>65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220</td><td>Face Recognition for the Visually Impaired
+<br/><b>King Saud University, Riyadh, Saudi Arabia</b><br/>2ISM-TEC LLC, Wilmington, Delaware, U.S.A
+<br/><b>University of Georgia, Athens, GA, U.S.A</b></td><td>('2278811', 'Rabia Jafri', 'rabia jafri')<br/>('2227653', 'Syed Abid Ali', 'syed abid ali')<br/>('1712033', 'Hamid R. Arabnia', 'hamid r. arabnia')</td><td></td></tr><tr><td>65bba9fba03e420c96ec432a2a82521ddd848c09</td><td>Connectionist Temporal Modeling for Weakly
+<br/>Supervised Action Labeling
+<br/><b>Stanford University</b></td><td>('38485317', 'De-An Huang', 'de-an huang')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')</td><td>{dahuang,feifeili,jniebles}@cs.stanford.edu
+</td></tr><tr><td>656531036cee6b2c2c71954bb6540ef6b2e016d0</td><td>W. LIU ET AL.: JOINTLY LEARNING NON-NEGATIVE PROJECTION AND DICTIONARY 1
+<br/>Jointly Learning Non-negative Projection
+<br/>and Dictionary with Discriminative Graph
+<br/>Constraints for Classification
+<br/>Yandong Wen3
+<br/>Rongmei Lin4
+<br/>Meng Yang*1
+<br/><b>College of Computer Science</b><br/>Software Engineering,
+<br/><b>Shenzhen University, China</b><br/>2 School of ECE,
+<br/><b>Peking University, China</b><br/>3 Dept. of ECE,
+<br/><b>Carnegie Mellon University, USA</b><br/>4 Dept. of Math & Computer Science,
+<br/><b>Emory University, USA</b></td><td>('36326884', 'Weiyang Liu', 'weiyang liu')<br/>('1751019', 'Zhiding Yu', 'zhiding yu')</td><td>wyliu@pku.edu.cn
+<br/>yzhiding@andrew.cmu.edu
+<br/>yandongw@andrew.cmu.edu
+<br/>rongmei.lin@emory.edu
+<br/>yang.meng@szu.edu.cn
+</td></tr><tr><td>65b1209d38c259fe9ca17b537f3fb4d1857580ae</td><td>Information Constraints on Auto-Encoding Variational Bayes
+<br/><b>University of California, Berkeley</b><br/><b>University of California, Berkeley</b><br/><b>Ragon Institute of MGH, MIT and Harvard</b><br/>4Chan-Zuckerberg Biohub
+</td><td>('39848341', 'Romain Lopez', 'romain lopez')<br/>('39967607', 'Jeffrey Regier', 'jeffrey regier')<br/>('1694621', 'Michael I. Jordan', 'michael i. jordan')<br/>('2163873', 'Nir Yosef', 'nir yosef')</td><td>{romain_lopez, regier, niryosef}@berkeley.edu
+<br/>jordan@cs.berkeley.edu
+</td></tr><tr><td>655d9ba828eeff47c600240e0327c3102b9aba7c</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+<br/>489
+<br/>Kernel Pooled Local Subspaces for Classification
+</td><td>('40409453', 'Peng Zhang', 'peng zhang')<br/>('1708023', 'Jing Peng', 'jing peng')<br/>('1741392', 'Carlotta Domeniconi', 'carlotta domeniconi')</td><td></td></tr><tr><td>656a59954de3c9fcf82ffcef926af6ade2f3fdb5</td><td>Convolutional Network Representation
+<br/>for Visual Recognition
+<br/>Doctoral Thesis
+<br/>Stockholm, Sweden, 2017
+</td><td>('2835963', 'Ali Sharif Razavian', 'ali sharif razavian')</td><td></td></tr><tr><td>652aac54a3caf6570b1c10c993a5af7fa2ef31ff</td><td><b>CARNEGIE MELLON UNIVERSITY</b><br/>STATISTICAL MODELING FOR NETWORKED VIDEO:
+<br/>CODING OPTIMIZATION, ERROR CONCEALMENT AND
+<br/>TRAFFIC ANALYSIS
+<br/>A DISSERTATION
+<br/>SUBMITTED TO THE GRADUATE SCHOOL
+<br/>IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+<br/>for the degree
+<br/>DOCTOR OF PHILOSOPHY
+<br/>in
+<br/>ELECTRICAL AND COMPUTER ENGINEERING
+<br/>by
+<br/>Pittsburgh, Pennsylvania
+<br/>July, 2001
+</td><td>('1727257', 'Deepak Srinivas Turaga', 'deepak srinivas turaga')</td><td></td></tr><tr><td>656ef752b363a24f84cc1aeba91e4fa3d5dd66ba</td><td>Robust Open-Set Face Recognition for
+<br/>Small-Scale Convenience Applications
+<br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology</b><br/>Karlsruhe, Germany
+</td><td>('1697965', 'Hua Gao', 'hua gao')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>Email: {hua.gao, ekenel, rainer.stiefelhagen}@kit.edu
+</td></tr><tr><td>656aeb92e4f0e280576cbac57d4abbfe6f9439ea</td><td>Journal of Engineering Science and Technology
+<br/>Vol. 12, No. 1 (2017) 155 - 167
+<br/><b>School of Engineering, Taylor s University</b><br/>USE OF IMAGE ENHANCEMENT TECHNIQUES
+<br/>FOR IMPROVING REAL TIME FACE RECOGNITION EFFICIENCY
+<br/>ON WEARABLE GADGETS
+<br/><b>Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia</b><br/><b>Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom</b></td><td>('22422404', 'MUHAMMAD EHSAN RANA', 'muhammad ehsan rana')</td><td>*Corresponding Author: muhd_ehsanrana@apu.edu.my
+</td></tr><tr><td>656f05741c402ba43bb1b9a58bcc5f7ce2403d9a</td><td></td><td>('2319574', 'Danila Potapov', 'danila potapov')</td><td></td></tr><tr><td>6577c76395896dd4d352f7b1ee8b705b1a45fa90</td><td>TOWARDS COMPUTATIONAL MODELS OF KINSHIP VERIFICATION
+<br/><b>Cornell University</b><br/><b>Cornell University</b></td><td>('2666471', 'Ruogu Fang', 'ruogu fang')<br/>('1830653', 'Noah Snavely', 'noah snavely')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td></td></tr><tr><td>650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772</td><td>A Deep Learning Approach for Subject Independent Emotion
+<br/>Recognition from Facial Expressions
+<br/>*Faculty of Electronics, Telecommunications & Information Technology
+<br/><b>Polytechnic University of Bucharest</b><br/>Splaiul Independentei No. 313, Sector 6, Bucharest,
+<br/>ROMANIA
+<br/>**Department of Information Engineering and Computer Science
+<br/><b>University of Trento</b><br/>ITALY
+</td><td>('3178525', 'VICTOR-EMIL NEAGOE', 'victor-emil neagoe')</td><td>victoremil@gmail.com, andreibarar@gmail.com, robitupaul@gmail.com
+<br/>sebe@disi.unitn.it
+</td></tr><tr><td>65293ecf6a4c5ab037a2afb4a9a1def95e194e5f</td><td>Face, Age and Gender Recognition
+<br/>using Local Descriptors
+<br/>by
+<br/>Thesis submitted to the
+<br/>Faculty of Graduate and Postdoctoral Studies
+<br/>In partial fulfillment of the requirements
+<br/>For the M.A.Sc. degree in
+<br/>Electrical and Computer Engineering
+<br/>School of Electrical Engineering and Computer Science
+<br/>Faculty of Engineering
+<br/><b>University of Ottawa</b></td><td>('15604275', 'Mohammad Esmaeel Mousa Pasandi', 'mohammad esmaeel mousa pasandi')<br/>('15604275', 'Mohammad Esmaeel Mousa Pasandi', 'mohammad esmaeel mousa pasandi')</td><td></td></tr><tr><td>65817963194702f059bae07eadbf6486f18f4a0a</td><td>http://dx.doi.org/10.1007/s11263-015-0814-0
+<br/>WhittleSearch: Interactive Image Search with Relative Attribute
+<br/>Feedback
+<br/>Received: date / Accepted: date
+</td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td></td></tr><tr><td>6581c5b17db7006f4cc3575d04bfc6546854a785</td><td>Contextual Person Identification
+<br/>in Multimedia Data
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktors der Ingenieurwissenschaften
+<br/>der Fakultät für Informatik
+<br/>des Karlsruher Instituts für Technologie (KIT)
+<br/>genehmigte
+<br/>Dissertation
+<br/>von
+<br/>aus Erlangen
+<br/>Tag der mündlichen Prüfung:
+<br/>18. November 2014
+<br/>Hauptreferent:
+<br/>Korreferent:
+<br/>Prof. Dr. Rainer Stiefelhagen
+<br/>Karlsruher Institut für Technologie
+<br/>Prof. Dr. Gerhard Rigoll
+<br/>Technische Universität München
+<br/>KIT – Universität des Landes Baden-Württemberg und nationales Forschungszentrum in der Helmholtz-Gemeinschaft
+<br/>www.kit.edu
+</td><td>('1931707', 'Martin Bäuml', 'martin bäuml')</td><td></td></tr><tr><td>6515fe829d0b31a5e1f4dc2970a78684237f6edb</td><td>Constrained Maximum Likelihood Learning of
+<br/>Bayesian Networks for Facial Action Recognition
+<br/>1 Electrical, Computer and Systems Eng. Dept.
+<br/><b>Rensselaer Polytechnic Institute</b><br/>Troy, NY, USA
+<br/>2 Visualization and Computer Vision Lab
+<br/><b>GE Global Research Center</b><br/>Niskayuna, NY, USA
+</td><td>('1686235', 'Yan Tong', 'yan tong')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td></td></tr><tr><td>653d19e64bd75648cdb149f755d59e583b8367e3</td><td>Decoupling “when to update” from “how to
+<br/>update”
+<br/><b>School of Computer Science, The Hebrew University, Israel</b></td><td>('19201820', 'Eran Malach', 'eran malach')<br/>('2554670', 'Shai Shalev-Shwartz', 'shai shalev-shwartz')</td><td></td></tr><tr><td>65babb10e727382b31ca5479b452ee725917c739</td><td>Label Distribution Learning
+</td><td>('1735299', 'Xin Geng', 'xin geng')</td><td></td></tr><tr><td>62dccab9ab715f33761a5315746ed02e48eed2a0</td><td>A Short Note about Kinetics-600
+<br/>Jo˜ao Carreira
+</td><td>('51210148', 'Eric Noland', 'eric noland')<br/>('51215438', 'Andras Banki-Horvath', 'andras banki-horvath')<br/>('38961760', 'Chloe Hillier', 'chloe hillier')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>joaoluis@google.com
+<br/>enoland@google.com
+<br/>bhandras@google.com
+<br/>chillier@google.com
+<br/>zisserman@google.com
+</td></tr><tr><td>62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4</td><td>Ding et al. EURASIP Journal on Image and Video Processing (2017) 2017:43
+<br/>DOI 10.1186/s13640-017-0188-z
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Noise-resistant network: a deep-learning
+<br/>method for face recognition under noise
+<br/>Open Access
+</td><td>('3012331', 'Yuanyuan Ding', 'yuanyuan ding')<br/>('1976669', 'Yongbo Cheng', 'yongbo cheng')<br/>('1847689', 'Xiaoliu Cheng', 'xiaoliu cheng')<br/>('4869582', 'Baoqing Li', 'baoqing li')<br/>('2757480', 'Xing You', 'xing you')<br/>('38334864', 'Xiaobing Yuan', 'xiaobing yuan')</td><td></td></tr><tr><td>62694828c716af44c300f9ec0c3236e98770d7cf</td><td>Padrón-Rivera, G., Rebolledo-Mendez, G., Parra, P. P., & Huerta-Pacheco, N. S. (2016). Identification of Action Units Related to
+<br/>Identification of Action Units Related to Affective States in a Tutoring System
+<br/>1Facultad de Estadística e Informática, Universidad Veracruzana, Mexico // 2Universidad Juárez Autónoma de
+<br/>for Mathematics
+<br/>Huerta-Pacheco1
+<br/>*Corresponding author
+</td><td>('2221778', 'Gustavo Padrón-Rivera', 'gustavo padrón-rivera')<br/>('1731562', 'Genaro Rebolledo-Mendez', 'genaro rebolledo-mendez')</td><td>Tabasco, Mexico // zS12020111@estudiantes.uv.mx // grebolledo@uv.mx // pilar.pozos@ujat.mx //
+<br/>nehuerta@uv.mx
+</td></tr><tr><td>6261eb75066f779e75b02209fbd3d0f02d3e1e45</td><td>Fudan-Huawei at MediaEval 2015: Detecting Violent
+<br/>Scenes and Affective Impact in Movies with Deep Learning
+<br/><b>School of Computer Science, Fudan University, Shanghai, China</b><br/>2Media Lab, Huawei Technologies Co. Ltd., China
+</td><td>('9227981', 'Qi Dai', 'qi dai')<br/>('3066866', 'Rui-Wei Zhao', 'rui-wei zhao')<br/>('3099139', 'Zuxuan Wu', 'zuxuan wu')<br/>('31825486', 'Xi Wang', 'xi wang')<br/>('2650085', 'Zichen Gu', 'zichen gu')<br/>('2273062', 'Wenhai Wu', 'wenhai wu')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')</td><td></td></tr><tr><td>622daa25b5e6af69f0dac3a3eaf4050aa0860396</td><td>Greedy Feature Selection for Subspace Clustering
+<br/>Greedy Feature Selection for Subspace Clustering
+<br/>Department of Electrical & Computer Engineering
+<br/><b>Rice University, Houston, TX, 77005, USA</b><br/>Department of Electrical & Computer Engineering
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, 15213, USA</b><br/>Department of Electrical & Computer Engineering
+<br/><b>Rice University, Houston, TX, 77005, USA</b><br/>Editor:
+</td><td>('1746363', 'Eva L. Dyer', 'eva l. dyer')<br/>('1745861', 'Aswin C. Sankaranarayanan', 'aswin c. sankaranarayanan')<br/>('1746260', 'Richard G. Baraniuk', 'richard g. baraniuk')</td><td>e.dyer@rice.edu
+<br/>saswin@ece.cmu.edu
+<br/>richb@rice.edu
+</td></tr><tr><td>620339aef06aed07a78f9ed1a057a25433faa58b</td><td></td><td></td><td></td></tr><tr><td>62b3598b401c807288a113796f424612cc5833ca</td><td></td><td></td><td></td></tr><tr><td>62f0d8446adee6a5e8102053a63a61af07ac4098</td><td>FACIAL POINT DETECTION USING CONVOLUTIONAL NEURAL NETWORK
+<br/>TRANSFERRED FROM A HETEROGENEOUS TASK
+<br/>**Tome R&D
+<br/><b>Chubu University</b><br/>1200, Matsumoto-cho, Kasugai, AICHI
+</td><td>('1687819', 'Takayoshi Yamashita', 'takayoshi yamashita')</td><td></td></tr><tr><td>628a3f027b7646f398c68a680add48c7969ab1d9</td><td>Plan for Final Year Project:
+<br/>HKU-Face: A Large Scale Dataset for Deep Face
+<br/>Recognition
+<br/>3035140108
+<br/>3035141841
+<br/>Introduction
+<br/>Face recognition has been one of the most successful techniques in the field of artificial intelligence
+<br/>because of its surpassing human-level performance in academic experiments and broad application in
+<br/>the industrial world. Gaussian-face[1] and Facenet[2] hold state-of-the-art record using statistical
+<br/>method and deep-learning method respectively. What’s more, face recognition has been applied
+<br/>in various areas like authority checking and recording, fostering a large number of start-ups like
+<br/>Face++.
+<br/>Our final year project will deal with the face recognition task by building a large-scaled and carefully-
+<br/>filtered dataset. Our project plan specifies our roadmap and current research process. This plan first
+<br/>illustrates the significance and potential enhancement in constructing large-scale face dataset for
+<br/>both academics and companies. Then objectives to accomplish and related literature review will be
+<br/>expressed in detail. Next, methodologies used, scope of our project and challenges faced by us are
+<br/>described. The detailed timeline for this project follows as well as a small summary.
+<br/>2 Motivation
+<br/>Nowadays most of the face recognition tasks are supervised learning tasks which use dataset annotated
+<br/>by human beings. This contains mainly two drawbacks: (1) limited size of dataset due to limited
+<br/>human effort; (2) accuracy problem resulted from human perceptual bias.
+<br/>Parkhi et al.[3] discuss the first problem, showing that giant companies hold private face databases
+<br/>with larger size of data (See the comparison in Table 1). Other research institution could only get
+<br/>access to public but smaller databases like LFW[4, 5], which acts like a barricade to even higher
+<br/>performance.
+<br/>Dataset
+<br/>IJB-A [6]
+<br/>LFW [4, 5]
+<br/>YFD [7]
+<br/>CelebFaces [8]
+<br/>CASIA-WebFace [9]
+<br/>MS-Celeb-1M [10]
+<br/>Facebook
+<br/>Google
+<br/>Availability
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>private
+<br/>private
+<br/>identities
+<br/>500
+<br/>5K
+<br/>1595
+<br/>10K
+<br/>10K
+<br/>100K
+<br/>4K
+<br/>8M
+<br/>images
+<br/>5712
+<br/>13K
+<br/>3425 videos
+<br/>202K
+<br/>500K
+<br/>about 10M
+<br/>4400K
+<br/>100-200M
+<br/>Table 1: Face recognition datasets
+</td><td>('3347561', 'Haicheng Wang', 'haicheng wang')<br/>('40456402', 'Haoyu Li', 'haoyu li')</td><td></td></tr><tr><td>626913b8fcbbaee8932997d6c4a78fe1ce646127</td><td>Learning from Millions of 3D Scans for Large-scale 3D Face Recognition
+<br/>(This the preprint of the paper published in CVPR 2018)
+<br/>School of Computer Science and Software Engineering,
+<br/><b>The University of Western Australia</b></td><td>('1746166', 'Syed Zulqarnain Gilani', 'syed zulqarnain gilani')<br/>('46332747', 'Ajmal Mian', 'ajmal mian')</td><td>{zulqarnain.gilani,ajmal.mian}@uwa.edu.au
+</td></tr><tr><td>62374b9e0e814e672db75c2c00f0023f58ef442c</td><td>Frontalfaceauthenticationusingdiscriminatinggridswith
+<br/>morphologicalfeaturevectors
+<br/>A.Tefas
+<br/>C.Kotropoulos
+<br/>I.Pitas
+<br/><b>AristotleUniversityofThessaloniki</b><br/>Box,Thessaloniki
+<br/>EDICSnumbers:-KNOWContentRecognitionandUnderstanding
+<br/>-MODAMultimodalandMultimediaEnvironments
+<br/>Anovelelasticgraphmatchingprocedurebasedonmultiscalemorphologicaloperations,thesocalled
+<br/>morphologicaldynamiclinkarchitecture,isdevelopedforfrontalfaceauthentication.Fastalgorithms
+<br/>forimplementingmathematicalmorphologyoperationsarepresented.Featureselectionbyemploying
+<br/>linearprojectionalgorithmsisproposed.Discriminatorypowercoe(cid:14)cientsthatweighthematching
+<br/>errorateachgridnodearederived.Theperformanceofmorphologicaldynamiclinkarchitecturein
+<br/>frontalfaceauthenticationisevaluatedintermsofthereceiveroperatingcharacteristicontheMVTS
+<br/>faceimagedatabase.Preliminaryresultsforfacerecognitionusingtheproposedtechniquearealso
+<br/>presented.
+<br/>Correspondingauthor:I.Pitas
+<br/>DRAFT
+<br/>September
+</td><td></td><td>E-mail:fcostas,tefas,pitasg@zeus.csd.auth.gr
+</td></tr><tr><td>6257a622ed6bd1b8759ae837b50580657e676192</td><td></td><td></td><td></td></tr><tr><td>6226f2ea345f5f4716ac4ddca6715a47162d5b92</td><td>PERSPECTIVE
+<br/>published: 19 November 2015
+<br/>doi: 10.3389/frobt.2015.00029
+<br/>Object Detection: Current and
+<br/>Future Directions
+<br/>1 Advanced Mining Technology Center, Universidad de Chile, Santiago, Chile, 2 Department of Electrical Engineering,
+<br/>Universidad de Chile, Santiago, Chile
+<br/>Object detection is a key ability required by most computer and robot vision systems.
+<br/>The latest research on this area has been making great progress in many directions. In
+<br/>the current manuscript, we give an overview of past research on object detection, outline
+<br/>the current main research directions, and discuss open problems and possible future
+<br/>directions.
+<br/>Keywords: object detection, perspective, mini review, current directions, open problems
+<br/>1. INTRODUCTION
+<br/>During the last years, there has been a rapid and successful expansion on computer vision research.
+<br/>Parts of this success have come from adopting and adapting machine learning methods, while others
+<br/>from the development of new representations and models for specific computer vision problems
+<br/>or from the development of efficient solutions. One area that has attained great progress is object
+<br/>detection. The present works gives a perspective on object detection research.
+<br/>Given a set of object classes, object detection consists in determining the location and scale of all
+<br/>object instances, if any, that are present in an image. Thus, the objective of an object detector is to find
+<br/>all object instances of one or more given object classes regardless of scale, location, pose, view with
+<br/>respect to the camera, partial occlusions, and illumination conditions.
+<br/>In many computer vision systems, object detection is the first task being performed as it allows
+<br/>to obtain further information regarding the detected object and about the scene. Once an object
+<br/><b>instance has been detected (e.g., a face), it is be possible to obtain further information, including: (i</b><br/>to recognize the specific instance (e.g., to identify the subject’s face), (ii) to track the object over an
+<br/>image sequence (e.g., to track the face in a video), and (iii) to extract further information about the
+<br/>object (e.g., to determine the subject’s gender), while it is also possible to (a) infer the presence or
+<br/>location of other objects in the scene (e.g., a hand may be near a face and at a similar scale) and (b) to
+<br/>better estimate further information about the scene (e.g., the type of scene, indoor versus outdoor,
+<br/>etc.), among other contextual information.
+<br/>Object detection has been used in many applications, with the most popular ones being: (i)
+<br/>human-computer interaction (HCI), (ii) robotics (e.g., service robots), (iii) consumer electronics
+<br/>(e.g., smart-phones), (iv) security (e.g., recognition, tracking), (v) retrieval (e.g., search engines,
+<br/>photo management), and (vi) transportation (e.g., autonomous and assisted driving). Each of these
+<br/><b>applications has different requirements, including: processing time (off-line, on-line, or real-time</b><br/>robustness to occlusions, invariance to rotations (e.g., in-plane rotations), and detection under pose
+<br/>changes. While many applications consider the detection of a single object class (e.g., faces) and from
+<br/>a single view (e.g., frontal faces), others require the detection of multiple object classes (humans,
+<br/>vehicles, etc.), or of a single class from multiple views (e.g., side and frontal view of vehicles).
+<br/>In general, most systems can detect only a single object class from a restricted set of views and
+<br/>poses.
+<br/>Edited by:
+<br/>Venkatesh Babu Radhakrishnan,
+<br/><b>Indian Institute of Science Bangalore</b><br/>India
+<br/>Reviewed by:
+<br/>Juxi Leitner,
+<br/><b>Queensland University of Technology</b><br/>Australia
+<br/>George Azzopardi,
+<br/><b>University of Groningen, Netherlands</b><br/>Soma Biswas,
+<br/><b>Indian Institute of Science Bangalore</b><br/>India
+<br/>*Correspondence:
+<br/>†Present address:
+<br/>Graduate School of Informatics,
+<br/><b>Kyoto University, Kyoto, Japan</b><br/>Specialty section:
+<br/>This article was submitted to Vision
+<br/>Systems Theory, Tools and
+<br/>Applications, a section of the
+<br/>journal Frontiers in Robotics and AI
+<br/>Received: 20 July 2015
+<br/>Accepted: 04 November 2015
+<br/>Published: 19 November 2015
+<br/>Citation:
+<br/>Verschae R and Ruiz-del-Solar J
+<br/>(2015) Object Detection: Current and
+<br/>Future Directions.
+<br/>Front. Robot. AI 2:29.
+<br/>doi: 10.3389/frobt.2015.00029
+<br/>Frontiers in Robotics and AI | www.frontiersin.org
+<br/>November 2015 | Volume 2 | Article 29
+</td><td>('1689681', 'Rodrigo Verschae', 'rodrigo verschae')<br/>('1737300', 'Javier Ruiz-del-Solar', 'javier ruiz-del-solar')<br/>('1689681', 'Rodrigo Verschae', 'rodrigo verschae')<br/>('1689681', 'Rodrigo Verschae', 'rodrigo verschae')</td><td>rodrigo@verschae.org
+</td></tr><tr><td>62e913431bcef5983955e9ca160b91bb19d9de42</td><td>Facial Landmark Detection with Tweaked Convolutional Neural Networks
+<br/><b>USC Information Sciences Institute</b><br/><b>The Open University of Israel</b></td><td>('1746738', 'Yue Wu', 'yue wu')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td></td></tr><tr><td>626859fe8cafd25da13b19d44d8d9eb6f0918647</td><td>Activity Recognition based on a
+<br/>Magnitude-Orientation Stream Network
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td><td>('2119408', 'Carlos Caetano', 'carlos caetano')<br/>('1679142', 'William Robson Schwartz', 'william robson schwartz')</td><td>{carlos.caetano,victorhcmelo,jefersson,william}@dcc.ufmg.br
+</td></tr><tr><td>624e9d9d3d941bab6aaccdd93432fc45cac28d4b</td><td>Object-Scene Convolutional Neural Networks for Event Recognition in Images
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('35031371', 'Wenbin Du', 'wenbin du')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>07wanglimin@gmail.com, buptwangzhe2012@gmail.com, wb.du@siat.ac.cn, yu.qiao@siat.ac.cn
+</td></tr><tr><td>620e1dbf88069408b008347cd563e16aeeebeb83</td><td></td><td></td><td></td></tr><tr><td>624496296af19243d5f05e7505fd927db02fd0ce</td><td>Gauss-Newton Deformable Part Models for Face Alignment in-the-Wild
+<br/>1. School of Computer Science
+<br/><b>University of Lincoln, U.K</b><br/>2. Department of Computing
+<br/><b>Imperial College London, U.K</b></td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>gtzimiropoulos@lincoln.ac.uk
+</td></tr><tr><td>62fd622b3ca97eb5577fd423fb9efde9a849cbef</td><td>Turning a Blind Eye: Explicit Removal of Biases and
+<br/>Variation from Deep Neural Network Embeddings
+<br/><b>Visual Geometry Group, University of Oxford</b><br/><b>University of Oxford</b><br/><b>Big Data Institute, University of Oxford</b></td><td>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>621ff353960d5d9320242f39f85921f72be69dc8</td><td>Explicit Occlusion Detection based Deformable Fitting for
+<br/>Facial Landmark Localization
+<br/>1Department of Computer Science
+<br/><b>Rutgers University</b><br/>617 Bowser Road, Piscataway, N.J, USA
+</td><td>('39960064', 'Xiang Yu', 'xiang yu')<br/>('1684164', 'Fei Yang', 'fei yang')<br/>('1768190', 'Junzhou Huang', 'junzhou huang')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')</td><td>{xiangyu,feiyang,dnm}@cs.rutgers.edu
+</td></tr><tr><td>62007c30f148334fb4d8975f80afe76e5aef8c7f</td><td>Eye In-Painting with Exemplar Generative Adversarial Networks
+<br/>Facebook Inc.
+<br/>1 Hacker Way, Menlo Park (CA), USA
+</td><td>('8277405', 'Brian Dolhansky', 'brian dolhansky')</td><td>{bdol, ccanton}@fb.com
+</td></tr><tr><td>62a30f1b149843860938de6dd6d1874954de24b7</td><td>418
+<br/>Fast Algorithm for Updating the Discriminant Vectors
+<br/>of Dual-Space LDA
+</td><td>('40608983', 'Wenming Zheng', 'wenming zheng')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>621e8882c41cdaf03a2c4a986a6404f0272ba511</td><td>On Robust Biometric Identity Verification via
+<br/>Sparse Encoding of Faces: Holistic vs Local Approaches
+<br/><b>The University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('3026404', 'Yongkang Wong', 'yongkang wong')<br/>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td></td></tr><tr><td>62e0380a86e92709fe2c64e6a71ed94d152c6643</td><td>Facial Emotion Recognition With Expression Energy
+<br/>Albert Cruz
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Riverside, CA, 92521-0425,
+<br/>Riverside, CA, 92521-0425,
+<br/>Riverside, CA, 92521-0425,
+<br/>USA
+<br/>USA
+<br/>USA
+</td><td>('1707159', 'Bir Bhanu', 'bir bhanu')<br/>('3254753', 'Ninad Thakoor', 'ninad thakoor')</td><td>acruz006@student.ucr.edu
+<br/>bhanu@ee.ucr.edu
+<br/>ninadt@ee.ucr.edu
+</td></tr><tr><td>621f656fedda378ceaa9c0096ebb1556a42e5e0f</td><td>Single Sample Face Recognition from Video via
+<br/>Stacked Supervised Auto-encoder
+<br/><b>Ponti cal Catholic University of Rio de Janeiro, Brazil</b><br/><b>Rio de Janeiro State University, Brazil</b></td><td>('8730918', 'Pedro J. Soto Vega', 'pedro j. soto vega')<br/>('2017816', 'Raul Queiroz Feitosa', 'raul queiroz feitosa')<br/>('2222679', 'Patrick Nigri Happ', 'patrick nigri happ')</td><td>{psoto, raul, vhaymaq, patrick}@ele.puc-rio.br
+</td></tr><tr><td>965f8bb9a467ce9538dec6bef57438964976d6d9</td><td>Recognizing Human Faces under Disguise and Makeup
+<br/><b>The Hong Kong Polytechnic University</b><br/>Hung Hom, Kowloon, Hong Kong
+</td><td>('17671202', 'Tsung Ying Wang', 'tsung ying wang')<br/>('35680604', 'Ajay Kumar', 'ajay kumar')</td><td>cstywang@comp.polyu.edu.hk, csajaykr@comp.polyu.edu.hk
+</td></tr><tr><td>961a5d5750f18e91e28a767b3cb234a77aac8305</td><td>Face Detection without Bells and Whistles
+<br/>1 ESAT-PSI/VISICS, iMinds, KU Leuven, Belgium
+<br/>2 MPI Informatics, Saarbrücken, Germany
+<br/>3 D-ITET/CVL, ETH Zürich, Switzerland
+</td><td>('11983029', 'Markus Mathias', 'markus mathias')<br/>('1798000', 'Rodrigo Benenson', 'rodrigo benenson')<br/>('3048367', 'Marco Pedersoli', 'marco pedersoli')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>96f0e7416994035c91f4e0dfa40fd45090debfc5</td><td>Unsupervised Learning of Face Representations
+<br/><b>Georgia Institute of Technology, CVIT, IIIT Hyderabad, IIT Kanpur</b></td><td>('19200118', 'Samyak Datta', 'samyak datta')<br/>('39396475', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c</td><td>UPTEC STS 17033
+<br/>Examensarbete 30 hp
+<br/>November 2017
+<br/>A deep learning approach for
+<br/>action classification in American
+<br/>football video sequences
+</td><td>('5845058', 'Jacob Westerberg', 'jacob westerberg')</td><td></td></tr><tr><td>963d0d40de8780161b70d28d2b125b5222e75596</td><td>Convolutional Experts Network for Facial Landmark Detection
+<br/><b>Carnegie Mellon University</b><br/>Tadas Baltruˇsaitis∗
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+</td><td>('1783029', 'Amir Zadeh', 'amir zadeh')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>abagherz@cs.cmu.edu
+<br/>tbaltrus@cs.cmu.edu
+<br/>morency@cs.cmu.edu
+</td></tr><tr><td>968b983fa9967ff82e0798a5967920188a3590a8</td><td>2013, Vol. 139, No. 2, 271–299
+<br/>© 2013 American Psychological Association
+<br/>0033-2909/13/$12.00 DOI: 10.1037/a0031640
+<br/>Children’s Recognition of Disgust in Others
+<br/>Sherri C. Widen and James A. Russell
+<br/><b>Boston College</b><br/>Disgust has been theorized to be a basic emotion with a facial signal that is easily, universally,
+<br/>automatically, and perhaps innately recognized by observers from an early age. This article questions one
+<br/>key part of that theory: the hypothesis that children recognize disgust from its purported facial signal.
+<br/>Over the first 5 years, children experience disgust, produce facial expressions of disgust, develop a
+<br/>concept of disgust, understand and produce the word disgust or a synonym, know about disgust’s causes
+<br/>and consequences, and infer disgust in others from a situation or a behavior. Yet, only gradually do these
+<br/>children come to “recognize” disgust specifically from the “disgust face” found in standardized sets of
+<br/>the facial expressions of basic emotions. Improvement is gradual, with more than half of children
+<br/>matching the standard disgust face to disgust only at around 9 years of age and with subsequent
+<br/>improvement continuing gradually until the late teens or early adulthood. Up to age 8, a majority of
+<br/>children studied believe that the standard disgust face indicates anger. Rather than relying on an already
+<br/>known signal value, children may be actively learning to interpret the expression.
+<br/>Keywords: facial expression, disgust, anger, emotion recognition, disgust face
+<br/>Disgust has been theorized to be important for many reasons: its
+<br/>status as one of only a handful of basic human emotions and hence
+<br/>as a building block of other emotions (Rozin, Haidt, & McCauley,
+<br/>2008); its role in avoidance of poisons, parasites, disease, and
+<br/>contaminants (Curtis, De Barra, & Aunger, 2011; Hart, 1990;
+<br/>Oaten, Stevenson, & Case, 2009; Schaller & Park, 2011); its role
+<br/>in determining food preferences (Rozin & Fallon, 1987); its rela-
+<br/>tion to psychiatric disorders, especially obsessive-compulsive dis-
+<br/>order, phobias, and other anxiety disorders (Olatunji & McKay,
+<br/>2007; Phillips, Fahy, David, & Senior, 1998); its diagnostic role in
+<br/>neurological disorders such as Huntington’s disease (Spren-
+<br/><b>gelmeyer et al., 1996); and, increasingly, its role in reactions to</b><br/>cheating and other social and moral infractions (Haidt, 2003;
+<br/>Prinz, 2007). According to Giner-Sorolla, Bosson, Caswell, and
+<br/>Hettinger (2012), disgust plays “a powerful role in shaping cultural
+<br/>attitudes, policy, and law” (p. 1). Articles, books, and conferences
+<br/>demonstrate a surge of vigorous scientific theorizing and research
+<br/>on disgust. One result of this surge of research is that the idea of
+<br/>disgust as a simple reaction is giving way to a more complex story.
+<br/>As Herz (2012) summarized, “Our age, our personality, our cul-
+<br/>ture, our thoughts and beliefs, our mood, our morals, whom we’re
+<br/>with, where we are, and which of our senses is giving us the
+<br/>Sherri C. Widen and James A. Russell, Department of Psychology,
+<br/><b>Boston College</b><br/>This article was funded by Grant 1025563 from the National Science
+<br/>Foundation.
+<br/>We thank Nicole Nelson, Mary Kayyal, Joe Pochedly, Alyssa McCarthy,
+<br/>Nicole Trauffer, Cara D’Arcy, Marissa DiGirolamo, Anne Yoder, and Erin
+<br/>Heitzman for their comments on a draft of this article.
+<br/>Correspondence concerning this article should be addressed to Sherri C.
+<br/>Widen, Department of Psychology, McGuinn Hall, 140 Commonwealth
+<br/>bc.edu
+<br/>271
+<br/>feeling, all shape whether and how strongly we are able to feel
+<br/>disgusted” (p. 57).
+<br/>Much of the theorizing and research on disgust to date have
+<br/>been guided, explicitly or implicitly, by a research program cen-
+<br/>tered on the concept of basic emotions—indeed, that research
+<br/>program has provided the standard account of disgust. Theories
+<br/>within this research program (Ekman & Cordaro, 2011; Izard,
+<br/>1971, 1994; Tomkins, 1962) place facial expressions at the center
+<br/>of emotion. In this article, we question one key part of the standard
+<br/>account of disgust: the hypothesis that, from an early age, a child
+<br/>recognizes disgust in others from their facial expressions. Our
+<br/>review finds evidence that is inconsistent with this hypothesis, and
+<br/>we suggest that the field examine alternative accounts. To place
+<br/>this evidence in a broader context, we also review evidence on
+<br/>closely related topics, such as children’s disgust reactions, their
+<br/>acquisition of a word for disgust, their inference of disgust from
+<br/>nonfacial cues, and adults’ recognition of disgust from facial
+<br/>expressions.
+<br/>The Standard Account
+<br/>The widely assumed standard account of disgust stems from the
+<br/>classic work of Allport (1924) and Tomkins (1962) and those they
+<br/>influenced (Ekman & Cordaro, 2011; Izard, 2011; Levenson,
+<br/>2011). In this simple, elegant, and plausible account, so-called
+<br/>basic emotions—including disgust— have dedicated neural cir-
+<br/>cuitry, are triggered by specific releasing stimuli, and produce a
+<br/>coordinated response pattern that includes specific autonomic ner-
+<br/>vous system activation, a behavioral tendency, and a facial expres-
+<br/>sion. Ekman, Friesen, and Ellsworth (1972) described this last
+<br/>aspect of their theory as follows:
+<br/>Regardless of the language, of whether the culture is Western or
+<br/>Eastern, industrialized or preliterate, [certain] facial expressions are
+<br/>labeled with the same emotion terms . . . Our neuro-cultural theory
+<br/>postulates a facial affect program, located within the nervous system
+</td><td></td><td>Avenue, Boston College, Chestnut Hill, MA 02467. E-mail: widensh@
+</td></tr><tr><td>969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce</td><td>End-to-End Spatial Transform Face Detection and Recognition
+<br/><b>Zhejiang University</b><br/><b>Zhejiang University</b><br/>Rokid.inc
+</td><td>('39106061', 'Liying Chi', 'liying chi')<br/>('35028106', 'Hongxin Zhang', 'hongxin zhang')<br/>('9932177', 'Mingxiu Chen', 'mingxiu chen')</td><td>charrin0531@gmail.com
+<br/>zhx@cad.zju.edu.cn
+<br/>cmxnono@rokid.com
+</td></tr><tr><td>96a9ca7a8366ae0efe6b58a515d15b44776faf6e</td><td>Grid Loss: Detecting Occluded Faces
+<br/><b>Institute for Computer Graphics and Vision</b><br/><b>Graz University of Technology</b></td><td>('34847524', 'Michael Opitz', 'michael opitz')<br/>('1903921', 'Georg Waltner', 'georg waltner')<br/>('1762885', 'Georg Poier', 'georg poier')<br/>('1720811', 'Horst Possegger', 'horst possegger')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{michael.opitz,waltner,poier,possegger,bischof}@icg.tugraz.at
+</td></tr><tr><td>9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4</td><td>J Inf Process Syst, Vol.9, No.1, March 2013
+<br/>pISSN 1976-913X
+<br/>eISSN 2092-805X
+<br/>Region-Based Facial Expression Recognition in
+<br/>Still Images
+</td><td>('2648759', 'Gawed M. Nagi', 'gawed m. nagi')<br/>('2057896', 'Fatimah Khalid', 'fatimah khalid')</td><td></td></tr><tr><td>964a3196d44f0fefa7de3403849d22bbafa73886</td><td></td><td></td><td></td></tr><tr><td>96e1ccfe96566e3c96d7b86e134fa698c01f2289</td><td>Published in Proc. of 11th IAPR International Conference on Biometrics (ICB 2018). Gold Coast, Australia, Feb. 2018
+<br/>Semi-Adversarial Networks: Convolutional Autoencoders for Imparting Privacy
+<br/>to Face Images
+<br/>Anoop Namboodiri 2
+<br/><b>Michigan State University, East Lansing, USA</b><br/><b>International Institute of Information Technology, Hyderabad, India</b></td><td>('5456235', 'Vahid Mirjalili', 'vahid mirjalili')<br/>('2562040', 'Sebastian Raschka', 'sebastian raschka')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>mirjalil@msu.edu
+<br/>raschkas@msu.edu
+<br/>anoop@iiit.ac.in
+<br/>rossarun@cse.msu.edu
+</td></tr><tr><td>96f4a1dd1146064d1586ebe86293d02e8480d181</td><td>COMPARATIVE ANALYSIS OF RERANKING
+<br/>TECHNIQUES FOR WEB IMAGE SEARCH
+<br/><b>Pune Institute of Computer Technology, Pune, ( India</b></td><td></td><td></td></tr><tr><td>9606b1c88b891d433927b1f841dce44b8d3af066</td><td>Principal Component Analysis with Tensor Train
+<br/>Subspace
+</td><td>('2329741', 'Wenqi Wang', 'wenqi wang')<br/>('1732805', 'Vaneet Aggarwal', 'vaneet aggarwal')<br/>('1980683', 'Shuchin Aeron', 'shuchin aeron')</td><td></td></tr><tr><td>9627f28ea5f4c389350572b15968386d7ce3fe49</td><td>Load Balanced GANs for Multi-view Face Image Synthesis
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/>2Center for Research on Intelligent Perception and Computing, CASIA
+<br/>3Center for Excellence in Brain Science and Intelligence Technology, CAS
+<br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/>5Noah’s Ark Lab of Huawei Technologies
+</td><td>('1680853', 'Jie Cao', 'jie cao')<br/>('49995036', 'Yibo Hu', 'yibo hu')<br/>('49828394', 'Bing Yu', 'bing yu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>{jie.cao,yibo.hu}@cripac.ia.ac.cn, yubing5@huawei.com, {rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>966e36f15b05ef8436afecf57a97b73d6dcada94</td><td>Dimensionality Reduction using Relative
+<br/>Attributes
+<br/><b>Institute for Human-Machine Communication, Technische Universit at M unchen</b><br/><b>Iran</b><br/><b>The Remote Sensing Technology Institute (IMF), German Aerospace Center</b><br/>1 Introduction
+<br/>Visual attributes are high-level semantic description of visual data that are close
+<br/>to the language of human. They have been intensively used in various appli-
+<br/>cations such as image classification [1,2], active learning [3,4], and interactive
+<br/>search [5]. However, the usage of attributes in dimensionality reduction has not
+<br/>been considered yet. In this work, we propose to utilize relative attributes as
+<br/>semantic cues in dimensionality reduction. To this end, we employ Non-negative
+<br/>Matrix Factorization (NMF) [6] constrained by embedded relative attributes to
+<br/>come up with a new algorithm for dimensionality reduction, namely attribute
+<br/>regularized NMF (ANMF).
+<br/>2 Approach
+<br/>We assume that X ∈ RD×N denotes N data points (e.g., images) represented by
+<br/>D dimensional low-level feature vectors. The NMF decomposes the non-negative
+<br/>matrix X into two non-negative matrices U ∈ RD×K and V ∈ RN×K such that
+<br/>the multiplication of U and V approximates the original matrix X. Here, U
+<br/>represents the bases and V contains the coefficients, which are considered as
+<br/>new representation of the original data. The NMF objective function is:
+<br/>F =(cid:13)(cid:13)X − U V T(cid:13)(cid:13)2
+<br/>s.t. U = [uik] ≥ 0
+<br/>V = [vjk] ≥ 0.
+<br/>(1)
+<br/>Additionally, we assume that M semantic attributes have been predefined
+<br/>for the data and the relative attributes of each image are available. Precisely,
+<br/>the matrix of relative attributes, Q ∈ RM×N , has been learned by some ranking
+<br/>function (e,.g, rankSVM). Intuitively, those images which own similar relative
+<br/>attributes have similar semantic contents and therefore belong to the same se-
+<br/>mantic class. This concept can be formulated as a regularizer to be added to the
+</td><td>('2133342', 'Mohammadreza Babaee', 'mohammadreza babaee')<br/>('2165157', 'Stefanos Tsoukalas', 'stefanos tsoukalas')<br/>('3281049', 'Maryam Babaee', 'maryam babaee')<br/>('1705843', 'Gerhard Rigoll', 'gerhard rigoll')<br/>('1777167', 'Mihai Datcu', 'mihai datcu')</td><td>{reza.babaee,rigoll}@tum.de, s.tsoukalas@mytum.de
+<br/>babaee@eng.ui.ac.ir
+<br/>mihai.datcu@dlr.de
+</td></tr><tr><td>96b1000031c53cd4c1c154013bb722ffd87fa7da</td><td>ContextVP: Fully Context-Aware Video
+<br/>Prediction
+<br/>1 NVIDIA, Santa Clara, CA, USA
+<br/>2 ETH Zurich, Zurich, Switzerland
+<br/>3 The Swiss AI Lab IDSIA, Manno, Switzerland
+<br/>4 NNAISENSE, Lugano, Switzerland
+</td><td>('2387035', 'Wonmin Byeon', 'wonmin byeon')<br/>('1794816', 'Qin Wang', 'qin wang')<br/>('2100612', 'Rupesh Kumar Srivastava', 'rupesh kumar srivastava')<br/>('1802604', 'Petros Koumoutsakos', 'petros koumoutsakos')</td><td>wbyeon@nvidia.com
+</td></tr><tr><td>96578785836d7416bf2e9c154f687eed8f93b1e4</td><td>Automated video-based facial expression analysis
+<br/>of neuropsychiatric disorders
+<br/><b>a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA</b><br/><b>b Brain Behavior Center, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania</b><br/>3400 Spruce Street, 10th Floor Gates Building Philadelphia, PA 19104, USA
+<br/><b>c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania</b><br/><b>University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania</b><br/>3400 Spruce Street, 10th Floor Gates Building Philadelphia, PA 19104, USA
+<br/>3400 Spruce Street, 10th Floor Gates Building Philadelphia, PA 19104, USA
+<br/><b>University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania</b><br/><b>f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania</b><br/>3400 Spruce Street, 10th Floor Gates Building Philadelphia, PA 19104, USA
+<br/>3400 Spruce Street, 10th Floor Gates Building Philadelphia, PA 19104, USA
+<br/>Received 16 July 2007; received in revised form 20 September 2007; accepted 20 September 2007
+</td><td>('37761073', 'Peng Wang', 'peng wang')<br/>('28501509', 'Frederick Barrett', 'frederick barrett')<br/>('2953329', 'Elizabeth Martin', 'elizabeth martin')<br/>('5747394', 'Marina Milonova', 'marina milonova')<br/>('1826037', 'Christian Kohler', 'christian kohler')<br/>('7467718', 'Ragini Verma', 'ragini verma')</td><td></td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Washington</b><br/>(a) FaceScrub + MegaFace
+<br/>(b) FGNET + MegaFace
+<br/>Figure 1. The MegaFace challenge evaluates identification and verification as a function of increasing number of gallery distractors (going
+<br/>from 10 to 1 Million). We use two different probe sets (a) FaceScrub–photos of celebrities, (b) FGNET–photos with a large variation in
+<br/>age per person. We present rank-1 identification of state of the art algorithms that participated in our challenge. On the left side of each
+<br/>plot is current major benchmark LFW scale (i.e., 10 distractors, see how all the top algorithms are clustered above 95%). On the right is
+<br/>mega-scale (with a million distractors). Observe that rates drop with increasing numbers of distractors, even though the probe set is fixed,
+<br/>and that algorithms trained on larger sets (dashed lines) generally perform better. Participate at: http://megaface.cs.washington.edu.
+</td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')<br/>('1679223', 'Steven M. Seitz', 'steven m. seitz')<br/>('2721528', 'Evan Brossard', 'evan brossard')</td><td></td></tr><tr><td>968f472477a8afbadb5d92ff1b9c7fdc89f0c009</td><td>Firefly-based Facial Expression Recognition
+</td><td></td><td></td></tr><tr><td>96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d</td><td>Dynamic Attention-controlled Cascaded Shape Regression Exploiting Training
+<br/>Data Augmentation and Fuzzy-set Sample Weighting
+<br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</b><br/><b>School of IoT Engineering, Jiangnan University, Wuxi 214122, China</b></td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>{z.feng, j.kittler, w.christmas, p.huber}@surrey.ac.uk, wu xiaojun@jiangnan.edu.cn
+</td></tr><tr><td>96e731e82b817c95d4ce48b9e6b08d2394937cf8</td><td>Unconstrained Face Verification using Deep CNN Features
+<br/><b>University of Maryland, College Park</b><br/><b>Rutgers, The State University of New Jersey</b></td><td>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>pullpull@cs.umd.edu, vishal.m.patel@rutgers.edu, rama@umiacs.umd.edu
+</td></tr><tr><td>9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc</td><td>International Journal of Engineering Research and General Science Volume 3, Issue 1, January-February, 2015
+<br/>ISSN 2091-2730
+<br/>Emotion Based Music Player
+<br/>1Department of Computer Science and Engineering
+<br/>2Department of Computer Science and Engineering
+<br/>3Department of Computer Science and Engineering
+<br/>4Asst. Professor, Department of Computer Science and Engineering
+<br/><b>M.H Saboo Siddik College of Engineering, University of Mumbai, India</b></td><td>('9928295', 'Sharik Khan', 'sharik khan')<br/>('1762886', 'Omar Khan', 'omar khan')<br/>('16079307', 'Shabana Tadvi', 'shabana tadvi')</td><td>Email:-kabani152@gmail.com
+</td></tr><tr><td>9636c7d3643fc598dacb83d71f199f1d2cc34415</td><td></td><td></td><td></td></tr><tr><td>3a27d164e931c422d16481916a2fa6401b74bcef</td><td>Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant
+<br/>Face Verification
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>Center for Excellence in Brain Science and Intelligence Technology, CAS
+<br/><b>University of Chinese Academy of Sciences, Beijing 100190, China</b></td><td>('2496686', 'Yi Li', 'yi li')<br/>('3051419', 'Lingxiao Song', 'lingxiao song')<br/>('2225749', 'Xiang Wu', 'xiang wu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td>yi.li@cripac.ia.ac.cn, {lingxiao.song, rhe, tnt}@nlpr.ia.ac.cn, alfredxiangwu@gmail.com
+</td></tr><tr><td>3af8d38469fb21368ee947d53746ea68cd64eeae</td><td>Multimodal Intelligent Affect Detection with Kinect
+<br/>(Doctoral Consortium)
+<br/><b>Northumbria University</b><br/>United Kingdom
+<br/><b>Northumbria University</b><br/>United Kingdom
+<br/><b>Northumbria University</b><br/>United Kingdom
+</td><td>('1886853', 'Li Zhang', 'li zhang')<br/>('2004913', 'Alamgir Hossain', 'alamgir hossain')<br/>('39617655', 'Yang Zhang', 'yang zhang')</td><td>li.zhang@northumbria.ac.uk
+<br/>Yang4.zhang@northumbria.ac.uk
+</td></tr><tr><td>3a2fc58222870d8bed62442c00341e8c0a39ec87</td><td>Probabilistic Local Variation
+<br/>Segmentation
+<br/>Technion - Computer Science Department - M.Sc. Thesis MSC-2014-02 - 2014 </td><td>('3139600', 'Michael Baltaxe', 'michael baltaxe')</td><td></td></tr><tr><td>3a3f75e0ffdc0eef07c42b470593827fcd4020b4</td><td>NORMAL SIMILARITY NETWORK FOR GENERATIVE MODELLING
+<br/><b>School of Computing, National University of Singapore</b></td><td>('40456486', 'Jay Nandy', 'jay nandy')<br/>('1725063', 'Wynne Hsu', 'wynne hsu')</td><td></td></tr><tr><td>3a76e9fc2e89bdd10a9818f7249fbf61d216efc4</td><td>Face Sketch Matching via Coupled Deep Transform Learning
+<br/><b>IIIT-Delhi, India, 2West Virginia University</b></td><td>('1925017', 'Shruti Nagpal', 'shruti nagpal')<br/>('2220719', 'Maneet Singh', 'maneet singh')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('2487227', 'Afzel Noore', 'afzel noore')<br/>('2641605', 'Angshul Majumdar', 'angshul majumdar')</td><td>{shrutin, maneets, rsingh, mayank, angshul}@iiitd.ac.in, afzel.noore@mail.wvu.edu
+</td></tr><tr><td>3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2</td><td>End-to-End Deep Learning for Steering Autonomous
+<br/>Vehicles Considering Temporal Dependencies
+<br/><b>The American University in Cairo, Egypt</b><br/>2Valeo Schalter und Sensoren GmbH, Germany
+</td><td>('2150605', 'Hesham M. Eraqi', 'hesham m. eraqi')<br/>('2233511', 'Mohamed N. Moustafa', 'mohamed n. moustafa')<br/>('11300101', 'Jens Honer', 'jens honer')</td><td></td></tr><tr><td>3a0ea368d7606030a94eb5527a12e6789f727994</td><td>Categorization by Learning
+<br/>and Combining Object Parts
+<br/>
+<br/>Tomaso Poggio
+<br/><b>Honda RandD Americas, Inc., Boston, MA, USA</b><br/><b>University of Siena, Siena, Italy</b><br/><b>Computer Graphics Research Group, University of Freiburg, Freiburg, Germany</b><br/> heisele,serre,tp
+</td><td>('1684626', 'Bernd Heisele', 'bernd heisele')</td><td>@ai.mit.edu pontil@ing.unisi.it
+<br/>vetter@informatik.uni-freiburg.de
+</td></tr><tr><td>3a804cbf004f6d4e0b041873290ac8e07082b61f</td><td>Language-Action Tools for Cognitive Artificial Agents: Papers from the 2011 AAAI Workshop (WS-11-14)
+<br/>A Corpus-Guided Framework for Robotic Visual Perception
+<br/><b>University of Maryland Institute for Advanced Computer Studies, College Park, MD</b></td><td>('7607499', 'Yezhou Yang', 'yezhou yang')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td>{cteo, yzyang, hal, fer, yiannis}@umiacs.umd.edu
+</td></tr><tr><td>3a04eb72aa64760dccd73e68a3b2301822e4cdc3</td><td>Scalable Sparse Subspace Clustering
+<br/><b>Machine Intelligence Laboratory, College of Computer Science, Sichuan University</b><br/>Chengdu, 610065, China.
+</td><td>('8249791', 'Xi Peng', 'xi peng')<br/>('36794849', 'Lei Zhang', 'lei zhang')<br/>('9276020', 'Zhang Yi', 'zhang yi')</td><td>pangsaai@gmail.com, {leizhang, zhangyi}@scu.edu.cn
+</td></tr><tr><td>3af130e2fd41143d5fc49503830bbd7bafd01f8b</td><td>How Do We Evaluate the Quality of Computational Editing Systems?
+<br/>1 Inria, Univ. Grenoble Alpes & CNRS (LJK), Grenoble, France
+<br/><b>University of Wisconsin-Madison, Madison, WI, USA</b></td><td>('2869929', 'Christophe Lino', 'christophe lino')<br/>('1810286', 'Quentin Galvane', 'quentin galvane')<br/>('1776507', 'Michael Gleicher', 'michael gleicher')</td><td></td></tr><tr><td>3a2cf589f5e11ca886417b72c2592975ff1d8472</td><td>Spontaneously Emerging Object Part Segmentation
+<br/>Machine Learning Department
+<br/><b>Carnegie Mellon University</b><br/>Machine Learning Department
+<br/><b>Carnegie Mellon University</b></td><td>('1696365', 'Yijie Wang', 'yijie wang')<br/>('1705557', 'Katerina Fragkiadaki', 'katerina fragkiadaki')</td><td>yijiewang@cmu.edu
+<br/>katef@cs.cmu.edu
+</td></tr><tr><td>3ada7640b1c525056e6fcd37eea26cd638815cd6</td><td>Abnormal Object Recognition:
+<br/>A Comprehensive Study
+<br/><b>Rutgers University</b><br/><b>University of Washington</b></td><td>('3139794', 'Babak Saleh', 'babak saleh')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td></td></tr><tr><td>3abc833f4d689f37cc8a28f47fb42e32deaa4b17</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large Scale Retrieval and Generation of Image Descriptions
+<br/>Received: date / Accepted: date
+</td><td>('2004053', 'Vicente Ordonez', 'vicente ordonez')<br/>('38390487', 'Margaret Mitchell', 'margaret mitchell')<br/>('34176020', 'Jesse Dodge', 'jesse dodge')<br/>('1699545', 'Yejin Choi', 'yejin choi')</td><td></td></tr><tr><td>3acb6b3e3f09f528c88d5dd765fee6131de931ea</td><td>(cid:49)(cid:50)(cid:57)(cid:40)(cid:47)(cid:3)(cid:53)(cid:40)(cid:51)(cid:53)(cid:40)(cid:54)(cid:40)(cid:49)(cid:55)(cid:36)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:41)(cid:50)(cid:53)(cid:3)(cid:39)(cid:53)(cid:44)(cid:57)(cid:40)(cid:53)(cid:3)(cid:40)(cid:48)(cid:50)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:53)(cid:40)(cid:38)(cid:50)(cid:42)(cid:49)(cid:44)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:3)
+<br/>(cid:44)(cid:49)(cid:3)(cid:48)(cid:50)(cid:55)(cid:50)(cid:53)(cid:3)(cid:57)(cid:40)(cid:43)(cid:44)(cid:38)(cid:47)(cid:40)(cid:3)(cid:57)(cid:44)(cid:39)(cid:40)(cid:50)(cid:54)(cid:3)
+<br/>(cid:53)(cid:68)(cid:77)(cid:78)(cid:88)(cid:80)(cid:68)(cid:85)(cid:3)(cid:55)(cid:75)(cid:72)(cid:68)(cid:74)(cid:68)(cid:85)(cid:68)(cid:77)(cid:68)(cid:81)(cid:13)(cid:15)(cid:3)(cid:37)(cid:76)(cid:85)(cid:3)(cid:37)(cid:75)(cid:68)(cid:81)(cid:88)(cid:13)(cid:15)(cid:3)(cid:36)(cid:79)(cid:69)(cid:72)(cid:85)(cid:87)(cid:3)(cid:38)(cid:85)(cid:88)(cid:93)(cid:130)(cid:15)(cid:3)(cid:37)(cid:72)(cid:79)(cid:76)(cid:81)(cid:71)(cid:68)(cid:3)(cid:47)(cid:72)(cid:13)(cid:15)(cid:3)(cid:36)(cid:86)(cid:82)(cid:81)(cid:74)(cid:88)(cid:3)(cid:55)(cid:68)(cid:80)(cid:69)(cid:82)(cid:13)(cid:3)
+<br/>(cid:3)
+<br/><b>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</b><br/><b>cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA</b><br/>(cid:36)(cid:37)(cid:54)(cid:55)(cid:53)(cid:36)(cid:38)(cid:55)(cid:3)
+<br/>the background
+<br/>(cid:3)
+<br/>A novel feature representation of human facial expressions
+<br/>for emotion recognition is developed. The representation
+<br/>leveraged
+<br/>texture removal ability of
+<br/>Anisotropic Inhibited Gabor Filtering (AIGF) with the
+<br/>compact representation of spatiotemporal
+<br/>local binary
+<br/>patterns. The emotion recognition system incorporated face
+<br/>detection and registration followed by the proposed feature
+<br/>representation: Local Anisotropic Inhibited Binary Patterns
+<br/>in Three Orthogonal
+<br/>and
+<br/>classification. The system is evaluated on videos from Motor
+<br/>(cid:55)(cid:85)(cid:72)(cid:81)(cid:71)(cid:3)(cid:48)(cid:68)(cid:74)(cid:68)(cid:93)(cid:76)(cid:81)(cid:72)(cid:182)(cid:86)(cid:3)(cid:37)(cid:72)(cid:86)(cid:87)(cid:3)(cid:39)(cid:85)(cid:76)(cid:89)(cid:72)(cid:85)(cid:3)(cid:38)(cid:68)(cid:85)(cid:3)(cid:82)(cid:73)(cid:3)(cid:87)(cid:75)(cid:72)(cid:3)(cid:60)(cid:72)(cid:68)(cid:85) 2014-2016.
+<br/>The results showed improved performance compared to
+<br/>other state-of-the-art feature representations.(cid:3)
+<br/>(LAIBP-TOP)
+<br/>Index(cid:3)Terms(cid:178)(cid:3)Facial expression, emotion recognition,
+<br/>feature extraction, background texture, anisotropic Gabor
+<br/>filter.(cid:3)
+<br/>(cid:3)
+<br/>Planes
+<br/>(cid:20)(cid:17)(cid:3)(cid:44)(cid:49)(cid:55)(cid:53)(cid:50)(cid:39)(cid:56)(cid:38)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)
+<br/>Facial expressions are crucial to non-verbal communication
+<br/>of emotion. Automatic facial emotion recognition software
+<br/>has applications in lie detection, human behavior analysis,
+<br/>medical applications, and human-computer interfaces. We
+<br/>develop a system to detect stress and inattention of a motor
+<br/>vehicle operator from a single camera. Previous work in
+<br/>observation of motor vehicle operators employed multiple
+<br/>cameras for 3-D reconstruction [1], but multi-camera
+<br/>systems may introduce too much complexity and too many
+<br/>constraints in the design of a system. Another possible
+<br/>solution is gaze, but as of yet there is no consensus on how
+<br/>to detect inattention from gaze [2]. The goal of our work is a
+<br/>system that can extrapolate high stress and inattention from
+<br/>valence and arousal measurements on a low-cost platform so
+<br/>as to prevent motor vehicle accidents.
+<br/> To this end, we present a novel dynamic local appearance
+<br/>feature that can compactly describe the spatiotemporal
+<br/>behavior of a local neighborhood in the video. The method
+<br/>is based on Local Binary Patterns in Three Orthogonal
+<br/>Planes (LBP-TOP) [3] and background suppressing Gabor
+<br/>Energy filtering [4], but it is significantly different. We
+<br/>demonstrate that the background suppression concept can be
+<br/>applied to LBP-TOP to improve performance. The system is
+<br/>tested on three data sets collected from the Motor Trend
+<br/>(cid:48)(cid:68)(cid:74)(cid:68)(cid:93)(cid:76)(cid:81)(cid:72)(cid:182)(cid:86)(cid:3) (cid:37)(cid:72)(cid:86)(cid:87)(cid:3) (cid:39)(cid:85)(cid:76)(cid:89)(cid:72)(cid:85)(cid:3) (cid:38)(cid:68)r of the Year 2014, 2015 and
+<br/>2016.
+<br/>(cid:21)(cid:17)(cid:3)(cid:53)(cid:40)(cid:47)(cid:36)(cid:55)(cid:40)(cid:39)(cid:3)(cid:58)(cid:50)(cid:53)(cid:46)(cid:3)(cid:36)(cid:49)(cid:39)(cid:3)(cid:38)(cid:50)(cid:49)(cid:55)(cid:53)(cid:44)(cid:37)(cid:56)(cid:55)(cid:44)(cid:50)(cid:49)(cid:54)(cid:3)
+<br/>(cid:3)
+<br/>The current challenge to dynamic facial emotion recognition
+<br/>is the detection of emotion despite the various extrinsic and
+<br/>intrinsic imaging conditions, and intra-personnel differences
+<br/>in expression. While deep learning has been a growing trend
+<br/>in image processing and computer vision, the effects of
+<br/>transfer learning (cid:178) using expression data from other
+<br/>datasets [5] (cid:178) are diminished possibly because of various
+<br/>factors [6]. Thus, hand-crafted features, not learned from the
+<br/>neural networks, are still of great interest to unconstrained
+<br/>facial emotion recognition. This work focuses on the
+<br/>development of a novel hand-crafted feature representation.
+<br/> Local Binary Pattern (LBP) is the most commonly used
+<br/>appearance-based feature extraction method [7]. LBP is a
+<br/>static texture descriptor and is not suitable for dynamic
+<br/>facial expressions in videos.
+<br/> A variation of LBP, Volume Local Binary Patterns
+<br/>(VLBP), was developed to capture dynamic textures [8].
+<br/>VLBP uses 3 parallel planes in the spatiotemporal domain
+<br/>where the center pixel is on the center plane, and it records
+<br/>the dynamic patterns in the neighborhood of each pixel into
+<br/>a (3(cid:81)+2) dimensional histogram, where (cid:81) is the number of
+<br/>neighboring pixels.
+<br/> The high dimensionality of VLBP is 23(cid:81)+2, makes it
+<br/>impractical to use due to the rapid increase in dimensionality
+<br/>as the size of the neighborhood increases. An alternate
+<br/>solution to VLBP is the Local Binary Patterns in Three
+<br/>Orthogonal Planes (LBP-TOP). The dimensionality of LBP-
+<br/>TOP (3*2(cid:81)) is significantly lower than VLBP. The working
+<br/>of LBP-TOP is described in section 3.
+<br/> The other major type of appearance feature is the Gabor
+<br/>filter. Traditional Gabor filters are
+<br/>in
+<br/>unconstrained settings; it captures all edges within an image,
+<br/>noise included. Cruz (cid:72)(cid:87)(cid:3) (cid:68)(cid:79)(cid:17)(cid:3) [4] proposed Anisotropic
+<br/>Inhibited Gabor Filter (AIGF) that is robust to background
+<br/>noise and computationally efficient. Almaev (cid:72)(cid:87)(cid:3) (cid:68)(cid:79)(cid:17)(cid:3) [9]
+<br/>too sensitive
+<br/>(cid:28)(cid:26)(cid:27)(cid:16)(cid:20)(cid:16)(cid:24)(cid:19)(cid:28)(cid:19)(cid:16)(cid:21)(cid:20)(cid:26)(cid:24)(cid:16)(cid:27)(cid:18)(cid:20)(cid:26)(cid:18)(cid:7)(cid:22)(cid:20)(cid:17)(cid:19)(cid:19)(cid:3)(cid:139)(cid:21)(cid:19)(cid:20)(cid:26)(cid:3)(cid:44)(cid:40)(cid:40)(cid:40)
+<br/>(cid:27)(cid:20)(cid:19)
+<br/>(cid:44)(cid:38)(cid:44)(cid:51)(cid:3)(cid:21)(cid:19)(cid:20)(cid:26)
+</td><td></td><td></td></tr><tr><td>3a60678ad2b862fa7c27b11f04c93c010cc6c430</td><td>JANUARY-MARCH 2012
+<br/>A Multimodal Database for
+<br/>Affect Recognition and Implicit Tagging
+</td><td>('2463695', 'Mohammad Soleymani', 'mohammad soleymani')<br/>('2796371', 'Jeroen Lichtenauer', 'jeroen lichtenauer')<br/>('1809085', 'Thierry Pun', 'thierry pun')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>3a591a9b5c6d4c62963d7374d58c1ae79e3a4039</td><td>Driver Cell Phone Usage Detection From HOV/HOT NIR Images
+<br/><b>Xerox Research Center Webster</b><br/>800 Phillips Rd. Webster NY 14580
+</td><td>('1762503', 'Yusuf Artan', 'yusuf artan')<br/>('2415287', 'Orhan Bulan', 'orhan bulan')<br/>('1736673', 'Robert P. Loce', 'robert p. loce')<br/>('5942563', 'Peter Paul', 'peter paul')</td><td>yusuf.artan,orhan.bulan,robert.loce,peter.paul@xerox.com
+</td></tr><tr><td>3aa9c8c65ce63eb41580ba27d47babb1100df8a3</td><td>Annals of the  
+<br/><b>University of North Carolina Wilmington</b><br/>Master of Science in  
+<br/>Computer Science and Information Systems 
+</td><td></td><td></td></tr><tr><td>3a0a839012575ba455f2b84c2d043a35133285f9</td><td>444
+<br/>Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 444–454,
+<br/>Edinburgh, Scotland, UK, July 27–31, 2011. c(cid:13)2011 Association for Computational Linguistics
+</td><td></td><td></td></tr><tr><td>3af1a375c7c1decbcf5c3a29774e165cafce390c</td><td>Quantifying Facial Expression Abnormality in Schizophrenia by Combining
+<br/>2D and 3D Features
+<br/>1 Department of Radiology
+<br/><b>University of Pennsylvania</b><br/>2 Department of Psychiatry
+<br/><b>University of Pennsylvania</b><br/>Philadelphia, PA 19104
+<br/>Philadelphia, PA 19104
+</td><td>('1722767', 'Peng Wang', 'peng wang')<br/>('15741672', 'Fred Barrett', 'fred barrett')<br/>('7467718', 'Ragini Verma', 'ragini verma')</td><td>{wpeng@ieee.org, ragini.verma@uphs.upenn.edu }
+<br/>{kohler, fbarrett, raquel, gur}@bbl.med.upenn.edu
+</td></tr><tr><td>3a9681e2e07be7b40b59c32a49a6ff4c40c962a2</td><td>Biometrics & Biostatistics International Journal
+<br/>Comparing treatment means: overlapping standard
+<br/>errors, overlapping confidence intervals, and tests of
+<br/>hypothesis
+</td><td></td><td></td></tr><tr><td>3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e</td><td>in any current or
+<br/>future media,
+<br/>for all other uses,
+<br/>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/>obtained
+<br/>including
+<br/>reprinting/republishing this material for advertising or promotional purposes, creating
+<br/>new collective works, for resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works.
+<br/>Pre-print of article that appeared at the IEEE Computer Society Workshop on Biometrics
+<br/>2010.
+<br/>The published article can be accessed from:
+<br/>http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5544597
+</td><td></td><td></td></tr><tr><td>3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d</td><td>Handling Uncertain Tags in Visual Recognition
+<br/><b>School of Computing Science, Simon Fraser University, Canada</b></td><td>('3214848', 'Arash Vahdat', 'arash vahdat')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>{avahdat, mori}@cs.sfu.ca
+</td></tr><tr><td>3a95eea0543cf05670e9ae28092a114e3dc3ab5c</td><td>Constructing the L2-Graph for Robust Subspace
+<br/>Learning and Subspace Clustering
+</td><td>('8249791', 'Xi Peng', 'xi peng')<br/>('1751019', 'Zhiding Yu', 'zhiding yu')<br/>('3134548', 'Huajin Tang', 'huajin tang')<br/>('9276020', 'Zhang Yi', 'zhang yi')</td><td></td></tr><tr><td>3a4f522fa9d2c37aeaed232b39fcbe1b64495134</td><td>ISSN (Online) 2321 – 2004
+<br/>ISSN (Print) 2321 – 5526
+<br/> INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+<br/> Vol. 4, Issue 5, May 2016
+<br/>IJIREEICE
+<br/>Face Recognition and Retrieval Using Cross
+<br/>Age Reference Coding
+<br/>Sricharan H S1, Srinidhi K S1, Rajath D N1, Tejas J N1, Chandrakala B M2
+<br/> BE, DSCE, Bangalore1
+<br/>Assistant Professor, DSCE, Bangalore2
+</td><td></td><td></td></tr><tr><td>54948ee407b5d32da4b2eee377cc44f20c3a7e0c</td><td>Right for the Right Reason: Training Agnostic
+<br/>Networks
+<br/><b>Intelligent Systems Laboratory, University of Bristol, Bristol BS8 1UB, UK</b><br/>use of classifiers in “out of domain” situations, a problem that
+<br/>leads to research questions in domain adaptation [6], [18].
+<br/>Other concerns are also created around issues of bias, e.g.
+<br/>classifiers incorporating biases that are present in the data
+<br/>and are not intended to be used [2], which run the risk of
+<br/>reinforcing or amplifying cultural (and other) biases [20].
+<br/>Therefore, both predictive accuracy and fairness are heavily
+<br/>influenced by the choices made when developing black-box
+<br/>machine-learning models.
+</td><td>('1805367', 'Sen Jia', 'sen jia')<br/>('2031978', 'Thomas Lansdall-Welfare', 'thomas lansdall-welfare')<br/>('1685083', 'Nello Cristianini', 'nello cristianini')</td><td>{sen.jia, thomas.lansdall-welfare, nello.cristianini}@bris.ac.uk
+</td></tr><tr><td>540b39ba1b8ef06293ed793f130e0483e777e278</td><td>ORIGINAL RESEARCH
+<br/>published: 13 July 2018
+<br/>doi: 10.3389/fpsyg.2018.01191
+<br/>Biologically Inspired Emotional
+<br/>Expressions for Artificial Agents
+<br/><b>Optics and Engineering Informatics, Budapest University of Technology and Economics</b><br/><b>Budapest, Hungary, E tv s Lor nd University, Budapest, Hungary, 3 Institute for Computer Science</b><br/><b>and Control, Hungarian Academy of Sciences, Budapest, Hungary, Chuo University</b><br/>Tokyo, Japan, 5 MTA-ELTE Comparative Ethology Research Group, Budapest, Hungary, 6 Department of Telecommunications
+<br/><b>and Media Informatics, Budapest University of Technology and Economics, Budapest, Hungary</b><br/>A special area of human-machine interaction,
+<br/>the expression of emotions gains
+<br/>importance with the continuous development of artificial agents such as social robots or
+</td><td>('31575111', 'Beáta Korcsok', 'beáta korcsok')<br/>('3410664', 'Veronika Konok', 'veronika konok')<br/>('10791722', 'György Persa', 'györgy persa')<br/>('2725581', 'Tamás Faragó', 'tamás faragó')<br/>('1701851', 'Mihoko Niitsuma', 'mihoko niitsuma')<br/>('1769570', 'Péter Baranyi', 'péter baranyi')<br/>('3131165', 'Márta Gácsi', 'márta gácsi')</td><td></td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>AgeDB: the first manually collected, in-the-wild age database
+<br/><b>Imperial College London</b><br/><b>Imperial College London</b><br/><b>Imperial College London, On do</b><br/><b>Imperial College London</b><br/><b>Middlesex University London</b><br/><b>Imperial College London</b></td><td>('24278037', 'Stylianos Moschoglou', 'stylianos moschoglou')<br/>('40598566', 'Athanasios Papaioannou', 'athanasios papaioannou')<br/>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('1754270', 'Irene Kotsia', 'irene kotsia')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>s.moschoglou@imperial.ac.uk
+<br/>a.papaioannou11@imperial.ac.uk
+<br/>c.sagonas@imperial.ac.uk
+<br/>j.deng16@imperial.ac.uk
+<br/>i.kotsia@mdx.ac.uk
+<br/>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>54bae57ed37ce50e859cbc4d94d70cc3a84189d5</td><td>FACE RECOGNITION COMMITTEE MACHINE
+<br/>Department of Computer Science and Engineering
+<br/><b>The Chinese University of Hong Kong</b><br/>Shatin, Hong Kong
+</td><td>('2899702', 'Ho-Man Tang', 'ho-man tang')<br/>('1681775', 'Michael R. Lyu', 'michael r. lyu')<br/>('1706259', 'Irwin King', 'irwin king')</td><td>
+</td></tr><tr><td>54f442c7fa4603f1814ebd8eba912a00dceb5cb2</td><td>The Indian Buffet Process:
+<br/>Scalable Inference and Extensions
+<br/>A Thesis
+<br/>Presented to the Fellowship of
+<br/><b>The University of Cambridge</b><br/>in Candidacy for the Degree of
+<br/>Master of Science
+<br/>Department of Engineering
+<br/>Zoubin Ghahramani, supervisor
+<br/>August 2009
+</td><td>('2292194', 'Finale Doshi-Velez', 'finale doshi-velez')</td><td></td></tr><tr><td>543f21d81bbea89f901dfcc01f4e332a9af6682d</td><td>Published as a conference paper at ICLR 2016
+<br/>UNSUPERVISED AND SEMI-SUPERVISED LEARNING
+<br/>WITH CATEGORICAL GENERATIVE ADVERSARIAL
+<br/>NETWORKS
+<br/><b>University of Freiburg</b><br/>79110 Freiburg, Germany
+</td><td>('2060551', 'Jost Tobias Springenberg', 'jost tobias springenberg')</td><td>springj@cs.uni-freiburg.de
+</td></tr><tr><td>54969bcd728b0f2d3285866c86ef0b4797c2a74d</td><td>IEEE TRANSACTION SUBMISSION
+<br/>Learning for Video Compression
+</td><td>('31482866', 'Zhibo Chen', 'zhibo chen')<br/>('50258851', 'Tianyu He', 'tianyu he')<br/>('50562569', 'Xin Jin', 'xin jin')<br/>('1697194', 'Feng Wu', 'feng wu')</td><td></td></tr><tr><td>5456166e3bfe78a353df988897ec0bd66cee937f</td><td>Improved Boosting Performance by Exclusion
+<br/>of Ambiguous Positive Examples
+<br/>Computer Vision and Active Perception, KTH, Stockholm 10800, Sweden
+<br/>Keywords:
+<br/>Boosting, Image Classification, Algorithm Evaluation, Dataset Pruning, VOC2007.
+</td><td>('1750517', 'Miroslav Kobetski', 'miroslav kobetski')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')</td><td>{kobetski, sullivan}@kth.se
+</td></tr><tr><td>54a9ed950458f4b7e348fa78a718657c8d3d0e05</td><td>Learning Neural Models for End-to-End
+<br/>Clustering
+<br/>1 ZHAW Datalab & School of Engineering, Winterthur, Switzerland
+<br/>2 ARGUS DATA INSIGHTS Schweiz AG, Zurich, Switzerland
+<br/><b>Ca Foscari University of Venice, Venice, Italy</b><br/><b>Institute of Neural Information Processing, Ulm University, Germany</b><br/><b>Institute for Optical Systems, HTWG Konstanz, Germany</b></td><td>('50415299', 'Benjamin Bruno Meier', 'benjamin bruno meier')<br/>('3469013', 'Ismail Elezi', 'ismail elezi')<br/>('1985672', 'Mohammadreza Amirian', 'mohammadreza amirian')<br/>('3238279', 'Oliver Dürr', 'oliver dürr')<br/>('2793787', 'Thilo Stadelmann', 'thilo stadelmann')</td><td></td></tr><tr><td>541f1436c8ffef1118a0121088584ddbfd3a0a8a</td><td>A Spatio-Temporal Feature based on Triangulation of Dense SURF
+<br/><b>The University of Electro-Communications, Tokyo</b><br/>1-5-1 Chofu, Tokyo 182-0021 JAPAN
+</td><td>('2274625', 'Do Hang Nga', 'do hang nga')<br/>('1681659', 'Keiji Yanai', 'keiji yanai')</td><td>dohang@mm.cs.uec.ac.jp, yanai@cs.uec.ac.jp
+</td></tr><tr><td>54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3</td><td>A Joint Learning Framework for Attribute Models and Object Descriptions
+<br/>Dhruv Mahajan
+<br/>Yahoo! Labs, Bangalore, India
+</td><td>('1779926', 'Sundararajan Sellamanickam', 'sundararajan sellamanickam')<br/>('4989209', 'Vinod Nair', 'vinod nair')</td><td>{dkm,ssrajan,vnair}@yahoo-inc.com
+</td></tr><tr><td>54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Local Centroids Structured Non-Negative Matrix Factorization
+<br/><b>University of Texas at Arlington, Texas, USA</b><br/><b>School of Computer Science, OPTIMAL, Northwestern Polytechnical University, Xian 710072, Shaanxi, P. R. China</b></td><td>('2141896', 'Hongchang Gao', 'hongchang gao')<br/>('1688370', 'Feiping Nie', 'feiping nie')</td><td>{hongchanggao, feipingnie}@gmail.com, heng@uta.edu
+</td></tr><tr><td>541bccf19086755f8b5f57fd15177dc49e77d675</td><td></td><td>('2154872', 'Lijin Aryananda', 'lijin aryananda')</td><td></td></tr><tr><td>5495e224ac7b45b9edc5cfeabbb754d8a40a879b</td><td>Feature Reconstruction Disentangling for Pose-invariant Face Recognition
+<br/>Supplementary Material
+<br/><b>Rutgers, The State University of New Jersey</b><br/><b>University of California, San Diego</b><br/>‡ NEC Laboratories America
+<br/>1. Summary of The Supplementary
+<br/>This supplementary file includes two parts: (a) Addi-
+<br/>tional implementation details are presented to improve the
+<br/>reproducibility; (b) More experimental results are presented
+<br/>to validate our approach in different aspects, which are not
+<br/>shown in the main submission due to the space limitation.
+<br/>2. Additional Implementation Details
+<br/>Pose-variant face generation We designed a network to
+<br/>predict 3DMM parameters from a single face image. The
+<br/>design is mainly based on VGG16 [4]. We use the same num-
+<br/>ber of convolutional layers as VGG16 but replacing all max
+<br/>pooling layers with stride-2 convolutional operations. The
+<br/>fully connected (fc) layers are also different: we first use two
+<br/>fc layers, each of which has 1024 neurons, to connect with
+<br/>the convolutional modules; then, a fc layer of 30 neurons is
+<br/>used for identity parameters, a fc layer of 29 neurons is used
+<br/>for expression parameters, and a fc layer of 7 neurons is used
+<br/>for pose parameters. Different from [8] uses 199 parameters
+<br/>to represent the identity coefficients, we truncate the num-
+<br/>ber of identity eigenvectors to 30 which preserves 90% of
+<br/>variations. This truncation leads to fast convergence and less
+<br/>overfitting. For texture, we only generate non-frontal faces
+<br/>from frontal ones, which significantly mitigate the halluci-
+<br/>nating texture issue caused by self occlusion and guarantee
+<br/>high-fidelity reconstruction. We apply the Z-Buffer algo-
+<br/>rithm used in [8] to prevent ambiguous pixel intensities due
+<br/>to same image plane position but different depths.
+<br/>Rich feature embedding The design of the rich em-
+<br/>bedding network is mainly based on the architecture of
+<br/>CASIA-net [6] since it is wildly used in former approach
+<br/>and achieves strong performance in face recognition. During
+<br/>training, CASIA+MultiPIE or CASIA+300WLP are used.
+<br/>As shown in Figure 3 of the main submission, after the con-
+<br/>volutional layers of CASIA-net, we use a 512-d FC for the
+<br/>rich feature embedding, which is further branched into a
+<br/>256-d identity feature and a 128-d non-identity feature. The
+<br/>128-d non-identity feature is further connected with a 136-d
+<br/>landmark prediction and a 7-d pose prediction. Notice that
+<br/>in the face generation network, the number of pose parame-
+<br/>ters is 7 instead of 3 because we need to uniquely depict the
+<br/>projection matrix from the 3D model and the 2D face shape
+<br/>in image domain, which includes scale, pitch, yaw, roll, x
+<br/>translation, y translation, and z translations.
+<br/>Disentanglement by feature reconstruction Once the
+<br/>rich embedding network is trained, we feed genius pair that
+<br/>share the same identity but different viewpoints into the
+<br/>network to obtain the corresponding rich embedding, identity
+<br/>and non-identity features. To disentangle the identity and
+<br/>pose factors, we concatenate the identity and non-identity
+<br/>features and roll though two 512-d fully connected layers
+<br/>to output a reconstructed rich embedding depicted by 512
+<br/>neurons. Both self and cross reconstruction loss are designed
+<br/>to eventually push the two identity features close to each
+<br/>other. At the same time, a cross-entropy loss is applied on the
+<br/>near-frontal identity feature to maintain the discriminative
+<br/>power of the learned representation. The disentanglement
+<br/>of the identity and pose is finally achieved by the proposed
+<br/>feature reconstruction based metric learning.
+<br/>3. Additional Experimental Results
+<br/>In addition to the main submission, we present more
+<br/>experimental results in this section to further validate our
+<br/>approach in different aspects.
+<br/>3.1. P1 and P2 protocol on MultiPIE
+<br/>In the main submission, due to space considerations, we
+<br/>only report the mean accuracy over 10 random training and
+<br/>testing splits, on MultiPIE and 300WLP separately. In Ta-
+<br/>ble 1, we report the standard deviation of our method as a
+<br/>more complete comparison. From the results, the standard
+<br/>deviation of our method is also very small, which suggests
+<br/>that the performance is consistent across all the trials. We
+</td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('15644381', 'Xiang Yu', 'xiang yu')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('1711560', 'Dimitris N. Metaxas', 'dimitris n. metaxas')<br/>('2099305', 'Manmohan Chandraker', 'manmohan chandraker')</td><td>{xipeng.cs, dnm}@rutgers.edu, {xiangyu,ksohn,manu}@nec-labs.com
+</td></tr><tr><td>54756f824befa3f0c2af404db0122f5b5bbf16e0</td><td>Research Statement
+<br/>Computer Vision — Visual Recognition
+<br/>Computational visual recognition concerns identifying what is in an image, video, or other visual data, enabling
+<br/>applications such as measuring location, pose, size, activity, and identity as well as indexing for search by content.
+<br/>Recent progress in making economical sensors and improvements in network, storage, and computational power
+<br/>make visual recognition practical and relevant in almost all experimental sciences and commercial applications
+<br/>such as image search. My work in visual recognition brings together machine learning, insights from psychology
+<br/>and physiology, computer graphics, algorithms, and a great deal of computation.
+<br/>While I am best known for my work on general object category detection – creating techniques and building
+<br/>systems for some of the best performing approaches to categorizing and localizing objects in images, recognizing
+<br/>action in video, and searching large collections of video and images – my research extends widely across visual
+<br/>recognition including:
+<br/>• Creating low-level image descriptors – procedures for converting pixel values to features that can be used
+<br/>to model appearance for recognition. These include widely used descriptors for category recognition in
+<br/>images [4, 2], object detection in images and video [11, 10, 2], and optical flow based descriptors for action
+<br/>recognition in video [8].
+<br/>• Developing models for recognition – ranging from what is becoming seminal work in recognizing human
+<br/>actions in video [8], to formulating object localization as approximate subgraph isomorphism [2], to models
+<br/>for parsing architectural images [3], to a novel approach for face recognition based on high level describable
+<br/>visual attributes [9].
+<br/>• Deriving machine learning techniques – this includes both techniques for increasing the accuracy of clas-
+<br/>sification [15] and techniques that provide improvements in the trade-off between accuracy and efficiency
+<br/>of classification for detection and categorization [11, 10] – making some approaches exponentially faster
+<br/>and therefore useful for a new range of applications.
+<br/>• Applications to web scale visual data – introducing novel techniques to automatically extract useful in-
+<br/>formation from web-scale data. Successful applications include extracting models of face appearance [7]
+<br/>and representative iconic images [5]. Some of my work on machine learning techniques for visual recogni-
+<br/><b>tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including</b><br/>a collaboration with the ImageNet (10 million images in 10 thousand categories) team at Princeton and
+<br/>Stanford, and efforts by other researchers in industry (Google and Yahoo!) and academia.
+<br/>• Applications to analyzing imagery of people – probably the most important type of content in images and
+<br/>video. Several of my projects address analyzing imagery of people, from detection [10], to identification by
+<br/>face recognition [9, 7, 6], to localizing limbs (pose estimation) [14], and recognizing actions [8].
+<br/>All of this work is part of an attempt to understand the structure of visual data and build better systems
+<br/>for extracting information from visual signals. Such systems are useful in practice because, although for many
+<br/>application areas human perceptual abilities far outstrip the ability of computational systems, automated systems
+<br/>already have the upper hand in running constantly over vast amounts of data, e.g. surveillance systems and process
+<br/>monitoring, and in making metric decisions about specific quantities such as size, distance, or orientation, where
+<br/>humans have difficulty. Surveillance illustrates the need for recognition in order to increase performance. From
+<br/>watching cells under a microscope to observing research mice in habitats to guarding national borders, surveillance
+<br/>systems are limited by false detections produced due to spurious and unimportant activity. This cost can be reduced
+<br/>by visual recognition algorithms that identify either activities of interest or the commonly occurring unimportant
+<br/>activity.
+<br/>Part of my work at Yahoo! Research emphasized another key application area for visual recognition, extracting
+<br/>useful information from the vast and ever changing image and video data available on the world wide web. For
+<br/>some of this data people provide partial annotation in the form of tags, captions, and freeform text on web pages.
+<br/>One major challenge is to combine results from computational visual recognition with these partial annotations to
+</td><td>('39668247', 'Alexander C. Berg', 'alexander c. berg')</td><td></td></tr><tr><td>54204e28af73c7aca073835a14afcc5d8f52a515</td><td>Fine-Pruning: Defending Against Backdooring Attacks
+<br/>on Deep Neural Networks
+<br/><b>New York University, Brooklyn, NY, USA</b></td><td>('48087922', 'Kang Liu', 'kang liu')<br/>('3337066', 'Brendan Dolan-Gavitt', 'brendan dolan-gavitt')<br/>('1696125', 'Siddharth Garg', 'siddharth garg')</td><td>{kang.liu,brendandg,siddharth.garg}@nyu.edu
+</td></tr><tr><td>549c719c4429812dff4d02753d2db11dd490b2ae</td><td>YouTube-BoundingBoxes: A Large High-Precision
+<br/>Human-Annotated Data Set for Object Detection in Video
+<br/>Google Brain
+<br/>Google Brain
+<br/>Google Research
+<br/>Google Brain
+<br/>Google Brain
+</td><td>('2892780', 'Esteban Real', 'esteban real')<br/>('1789737', 'Jonathon Shlens', 'jonathon shlens')<br/>('30554825', 'Stefano Mazzocchi', 'stefano mazzocchi')<br/>('3165011', 'Xin Pan', 'xin pan')<br/>('2657155', 'Vincent Vanhoucke', 'vincent vanhoucke')</td><td>ereal@google.com
+<br/>shlens@google.com
+<br/>stefanom@google.com
+<br/>xpan@google.com
+<br/>vanhoucke@google.com
+</td></tr><tr><td>98b2f21db344b8b9f7747feaf86f92558595990c</td><td></td><td></td><td></td></tr><tr><td>98142103c311b67eeca12127aad9229d56b4a9ff</td><td>GazeDirector: Fully Articulated Eye Gaze Redirection in Video
+<br/><b>University of Cambridge, UK 2Carnegie Mellon University, USA</b><br/><b>Max Planck Institute for Informatics, Germany</b><br/>4Microsoft
+</td><td>('34399452', 'Erroll Wood', 'erroll wood')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td></td></tr><tr><td>9820920d4544173e97228cb4ab8b71ecf4548475</td><td>ORIGINAL RESEARCH
+<br/>published: 11 September 2015
+<br/>doi: 10.3389/fpsyg.2015.01386
+<br/>Automated facial coding software
+<br/>outperforms people in recognizing
+<br/>neutral faces as neutral from
+<br/>standardized datasets
+<br/><b>The Amsterdam School of Communication Research, University of Amsterdam</b><br/>Amsterdam, Netherlands
+<br/>Little is known about people’s accuracy of recognizing neutral faces as neutral. In this
+<br/>paper, I demonstrate the importance of knowing how well people recognize neutral
+<br/>faces. I contrasted human recognition scores of 100 typical, neutral front-up facial
+<br/>images with scores of an arguably objective judge – automated facial coding (AFC)
+<br/>software. I hypothesized that the software would outperform humans in recognizing
+<br/>neutral faces because of the inherently objective nature of computer algorithms. Results
+<br/>confirmed this hypothesis. I provided the first-ever evidence that computer software
+<br/>(90%) was more accurate in recognizing neutral faces than people were (59%). I posited
+<br/>two theoretical mechanisms, i.e., smile-as-a-baseline and false recognition of emotion,
+<br/>as possible explanations for my findings.
+<br/>Keywords: non-verbal communication, facial expression, face recognition, neutral face, automated facial coding
+<br/>Introduction
+<br/>lack of anger,
+<br/>face should indicate lack of emotion, e.g.,
+<br/>Recognizing a neutral face as neutral is vital in social interactions. By virtue of “expressing”
+<br/>“nothing” (for a separate discussion on faces “expressing” something, see Russell and Fernández-
+<br/>Dols, 1997), a neutral
+<br/>fear, or
+<br/>disgust. This article’s inspiration was the interesting observation that in the literature on
+<br/>facial recognition, little attention has been paid to neutral face recognition scores of human
+<br/>raters. Russell (1994) and Nelson and Russell (2013), who provided the two most important
+<br/>overviews on the topic, did not include or discuss recognition rates of lack of emotion
+<br/>(neutral) in neutral faces. They provided overviews of matching scores (i.e., accuracy) for
+<br/>six basic emotions, but they were silent on the issue of recognition accuracy of neutral
+<br/>faces.
+<br/>A distinct lack of articles that explicitly report accuracy scores for recognition of neutral face
+<br/>could explain the silence of researchers in this field. One notable exception is the Amsterdam
+<br/>Dynamic Facial Expression Set (ADFES; van der Schalk et al., 2011), where the authors provide
+<br/>an average matching score of 0.67 for their neutral faces. This score is considerably low when one
+<br/>considers that an average for six basic emotions is also in this range ( 0.67, see Nelson and Russell,
+<br/>2013, Table A1 for datasets between pre-1994 and 2010).
+<br/>Edited by:
+<br/>Paola Ricciardelli,
+<br/><b>University of Milano-Bicocca, Italy</b><br/>Reviewed by:
+<br/>Luis J. Fuentes,
+<br/>Universidad de Murcia, Spain
+<br/>Francesca Gasparini,
+<br/><b>University of Milano-Bicocca, Italy</b><br/>*Correspondence:
+<br/>The Amsterdam School
+<br/>of Communication Research,
+<br/>Department of Communication
+<br/><b>Science, University of Amsterdam</b><br/>Postbus 15793,
+<br/>1001 NG Amsterdam, Netherlands
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Cognition,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 22 April 2015
+<br/>Accepted: 31 August 2015
+<br/>Published: 11 September 2015
+<br/>Citation:
+<br/>Lewinski P (2015) Automated facial
+<br/>coding software outperforms people
+<br/>in recognizing neutral faces as neutral
+<br/>from standardized datasets.
+<br/>Front. Psychol. 6:1386.
+<br/>doi: 10.3389/fpsyg.2015.01386
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>September 2015 | Volume 6 | Article 1386
+</td><td>('6402753', 'Peter Lewinski', 'peter lewinski')<br/>('6402753', 'Peter Lewinski', 'peter lewinski')</td><td>p.lewinski@uva.nl
+</td></tr><tr><td>9853136dbd7d5f6a9c57dc66060cab44a86cd662</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 34– No.2, November 2011
+<br/>Improving the Neural Network Training for Face
+<br/>Recognition using Adaptive Learning Rate, Resilient
+<br/>Back Propagation and Conjugate Gradient Algorithm
+<br/>M.Sc. Student
+<br/>Department of Electrical
+<br/><b>Engineering, Iran University</b><br/>of Science and Technology,
+<br/>Tehran, Iran
+<br/>Saeid Sanei
+<br/>Associate Professor
+<br/>Department of Computing,
+<br/>Faculty of Engineering and
+<br/><b>Physical Sciences, University</b><br/>of Surrey, UK
+<br/>Karim Mohammadi
+<br/>Professor
+<br/>Department of Electrical
+<br/><b>Engineering, Iran University</b><br/>of Science and Technology,
+<br/>Tehran, Iran
+</td><td>('47250218', 'Hamed Azami', 'hamed azami')</td><td></td></tr><tr><td>989332c5f1b22604d6bb1f78e606cb6b1f694e1a</td><td>Recurrent Face Aging
+<br/><b>University of Trento, Italy</b><br/><b>National University of Singapore</b><br/><b>Research Center for Learning Science, Southeast University, Nanjing, China</b><br/><b>Arti cial Intelligence Institute, China</b></td><td>('39792736', 'Wei Wang', 'wei wang')<br/>('10338111', 'Zhen Cui', 'zhen cui')<br/>('32059677', 'Yan Yan', 'yan yan')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('2287686', 'Xiangbo Shu', 'xiangbo shu')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td>{wei.wang,yan.yan,niculae.sebe}@unitn.it {elefjia,eleyans}@nus.edu.sg
+<br/>zhen.cui@seu.edu.cn shuxb104@gmail.com
+</td></tr><tr><td>982f5c625d6ad0dac25d7acbce4dabfb35dd7f23</td><td>Facial Expression Recognition by SVM-based Two-stage Classifier on
+<br/>Gabor Features
+<br/>School of Information Science
+<br/><b>Japan Advanced Institute of Science and Technology</b><br/>Ashahi-dai 1-8, Nomi, Ishikawa 923-1292, Japan
+</td><td>('1753878', 'Fan Chen', 'fan chen')<br/>('1791753', 'Kazunori Kotani', 'kazunori kotani')</td><td>chen-fan@jaist.ac.jp
+<br/>ikko@jaist.ac.jp
+</td></tr><tr><td>98af221afd64a23e82c40fd28d25210c352e41b7</td><td>ISCA Archive
+<br/>http://www.isca-speech.org/archive
+<br/>AVSP 2010 -- International Conference
+<br/>on Audio-Visual Speech Processing
+<br/>Hakone, Kanagawa, Japan
+<br/>September 30--October 3, 2010
+<br/>Exploring Visual Features Through Gabor Representations for Facial
+<br/>Expression Detection
+<br/><b>Image and Video Research Laboratory, Queensland University of Technology</b><br/>GPO Box 2424, Brisbane 4001, Australia
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/><b>University of Pittsburgh, Pittsburgh, USA</b></td><td>('2739248', 'Sien W. Chew', 'sien w. chew')<br/>('1713496', 'Patrick Lucey', 'patrick lucey')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')<br/>('3140440', 'Clinton Fookes', 'clinton fookes')</td><td>s4.chew@student.qut.edu.au, patlucey@andrew.cmu.edu, {s.sridharan;c.fookes}@qut.edu.au
+</td></tr><tr><td>9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5</td><td>Illumination Normalization Using Logarithm Transforms
+<br/>for Face Authentication
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, USA
+</td><td>('1794486', 'Marios Savvides', 'marios savvides')</td><td>msavvid@ri.cmu.edu
+<br/>kumar@ece.cmu.edu
+</td></tr><tr><td>988d1295ec32ce41d06e7cf928f14a3ee079a11e</td><td>Semantic Deep Learning
+<br/>September 29, 2015
+</td><td>('36097730', 'Hao Wang', 'hao wang')</td><td></td></tr><tr><td>98c548a4be0d3b62971e75259d7514feab14f884</td><td>Deep generative-contrastive networks for facial expression recognition
+<br/><b>Samsung Advanced Institute of Technology (SAIT), KAIST</b></td><td>('2310577', 'Youngsung Kim', 'youngsung kim')<br/>('1757573', 'ByungIn Yoo', 'byungin yoo')<br/>('9942811', 'Youngjun Kwak', 'youngjun kwak')<br/>('36995891', 'Changkyu Choi', 'changkyu choi')<br/>('1769295', 'Junmo Kim', 'junmo kim')</td><td>yo.s.ung.kim@samsung.com, byungin.yoo@kaist.ac.kr, yjk.kwak@samsung.com, changkyu choi@samsung.com,
+<br/>junmo.kim@ee.kaist.ac.kr
+</td></tr><tr><td>9887ab220254859ffc7354d5189083a87c9bca6e</td><td>Generic Image Classification Approaches Excel on Face Recognition
+<br/><b>Nanjing University of Science and Technology, China</b><br/><b>The University of Adelaide, Australia</b></td><td>('2731972', 'Fumin Shen', 'fumin shen')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')</td><td></td></tr><tr><td>985cd420c00d2f53965faf63358e8c13d1951fa8</td><td>Pixel-Level Hand Detection with Shape-aware
+<br/>Structured Forests
+<br/>Department of Computer Science
+<br/><b>The University of Hong Kong</b><br/>Pokfulam Road, Hong Kong
+</td><td>('35130187', 'Xiaolong Zhu', 'xiaolong zhu')<br/>('34760532', 'Xuhui Jia', 'xuhui jia')</td><td>{xlzhu,xhjia,kykwong}@cs.hku.hk
+</td></tr><tr><td>981449cdd5b820268c0876477419cba50d5d1316</td><td>Learning Deep Features for One-Class
+<br/>Classification
+</td><td>('15206897', 'Pramuditha Perera', 'pramuditha perera')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')</td><td></td></tr><tr><td>9821669a989a3df9d598c1b4332d17ae8e35e294</td><td>Minimal Correlation Classification
+<br/><b>The Blavatnik School of Computer Science, Tel Aviv University, Israel</b></td><td>('21494706', 'Noga Levy', 'noga levy')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>9854145f2f64d52aac23c0301f4bb6657e32e562</td><td>An Improved Face Verification Approach based on
+<br/>Speedup Robust Features and Pairwise Matching
+<br/>Center for Electrical Engineering and Informatics (CEEI)
+<br/><b>Federal University of Campina Grande (UFCG</b><br/>Campina Grande, Para´ıba, Brazil
+</td><td>('2092178', 'Herman Martins Gomes', 'herman martins gomes')</td><td>Email: {edumoura,hmg}@dsc.ufcg.edu.br, carvalho@dee.ufcg.edu.br
+</td></tr><tr><td>9865fe20df8fe11717d92b5ea63469f59cf1635a</td><td>YUCEL ET AL.: WILDEST FACES
+<br/>Wildest Faces: Face Detection and
+<br/>Recognition in Violent Settings
+<br/>Pinar Duygulu1
+<br/>1 Department of Computer Science
+<br/><b>Hacettepe University</b><br/>Ankara, Turkey
+<br/>2 Department of Computer Engineering
+<br/><b>Middle East Technical University</b><br/>Ankara, Turkey
+<br/>* indicates equal contribution.
+</td><td>('46234524', 'Mehmet Kerim Yucel', 'mehmet kerim yucel')<br/>('39032755', 'Yunus Can Bilge', 'yunus can bilge')<br/>('46437368', 'Oguzhan Oguz', 'oguzhan oguz')<br/>('2011587', 'Nazli Ikizler-Cinbis', 'nazli ikizler-cinbis')<br/>('1939006', 'Ramazan Gokberk Cinbis', 'ramazan gokberk cinbis')</td><td>mkerimyucel@hacettepe.edu.tr
+<br/>yunuscan.bilge@hacettepe.edu.tr
+<br/>oguzhan.oguz@hacettepe.edu.tr
+<br/>nazli@cs.hacettepe.edu.tr
+<br/>pinar@cs.hacettepe.edu.tr
+<br/>gcinbis@ceng.metu.edu.tr
+</td></tr><tr><td>98c2053e0c31fab5bcb9ce5386335b647160cc09</td><td>A Distributed Framework for Spatio-temporal Analysis on Large-scale Camera
+<br/>Networks
+<br/><b>Georgia Institute of Technology</b><br/><b>University of Stuttgart</b><br/>†SUNY Buffalo
+</td><td>('5540701', 'Kirak Hong', 'kirak hong')<br/>('1723877', 'Venu Govindaraju', 'venu govindaraju')<br/>('1752885', 'Bharat Jayaraman', 'bharat jayaraman')<br/>('1751741', 'Umakishore Ramachandran', 'umakishore ramachandran')</td><td>{khong9, rama}@cc.gatech.edu
+<br/>marco.voelz@ipvs.uni-stuttgart.de
+<br/>{govind, bharat}@buffalo.edu
+</td></tr><tr><td>98127346920bdce9773aba6a2ffc8590b9558a4a</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Efficient Human Action Recognition using
+<br/>Histograms of Motion Gradients and
+<br/>VLAD with Descriptor Shape Information
+<br/>Received: date / Accepted: date
+</td><td>('3429470', 'Ionut C. Duta', 'ionut c. duta')<br/>('1796198', 'Bogdan Ionescu', 'bogdan ionescu')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td></td></tr><tr><td>98a660c15c821ea6d49a61c5061cd88e26c18c65</td><td>IOSR Journal of Engineering (IOSRJEN)
+<br/>e-ISSN: 2250-3021, p-ISSN: 2278-8719
+<br/>Vol. 3, Issue 4 (April. 2013), ||V1 || PP 43-48
+<br/>Face Databases for 2D and 3D Facial Recognition: A Survey
+<br/>R.Senthilkumar1, Dr.R.K.Gnanamurthy2
+<br/><b>Institute of Road and</b><br/><b>Odaiyappa College of</b><br/>Transport Technology,Erode-638 316.
+<br/>Engineering and Technology,Theni-625 531.
+</td><td></td><td></td></tr><tr><td>982fed5c11e76dfef766ad9ff081bfa25e62415a</td><td></td><td></td><td></td></tr><tr><td>98fb3890c565f1d32049a524ec425ceda1da5c24</td><td>A Robust Learning Framework Using PSM and
+<br/>Ameliorated SVMs for Emotional Recognition
+<br/><b>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</b></td><td>('2866465', 'Jinhui Chen', 'jinhui chen')<br/>('21172382', 'Yosuke Kitano', 'yosuke kitano')<br/>('3207738', 'Yiting Li', 'yiting li')<br/>('1744026', 'Tetsuya Takiguchi', 'tetsuya takiguchi')<br/>('1678564', 'Yasuo Ariki', 'yasuo ariki')</td><td>{ianchen, kitano, liyiting }@me.cs.scitec.kobe-u.ac.jp
+<br/>{takigu, ariki}@kobe-u.ac.jp
+</td></tr><tr><td>98519f3f615e7900578bc064a8fb4e5f429f3689</td><td>Dictionary-based Domain Adaptation Methods
+<br/>for the Re-identification of Faces
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('38811046', 'Jie Ni', 'jie ni')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>9825aa96f204c335ec23c2b872855ce0c98f9046</td><td>International Journal of Ethics in Engineering & Management Education
+<br/>Website: www.ijeee.in (ISSN: 2348-4748, Volume 1, Issue 5, May2014)
+<br/>FACE AND FACIAL EXPRESSION
+<br/>RECOGNITION IN 3-D USING MASKED
+<br/>PROJECTION UNDER OCCLUSION
+<br/>Jyoti patil *
+<br/>M.Tech (CSE)
+<br/>GNDEC Bidar-585401
+<br/>BIDAR, INDIA
+<br/> M.Tech (CSE)
+<br/> GNDEC Bidar- 585401
+<br/> BIDAR, INDIA
+<br/> M.Tech (CSE)
+<br/> VKIT, Bangalore- 560040
+<br/>BANGALORE, INDIA
+</td><td>('39365176', 'Gouri Patil', 'gouri patil')<br/>('4787347', 'Snehalata Patil', 'snehalata patil')</td><td>Email-jyoti.spatil35@gmail.com Email-greatgouri@gmail.com
+<br/> Email-snehasharad09@gmail.com
+</td></tr><tr><td>9825c4dddeb2ed7eaab668b55403aa2c38bc3320</td><td>Aerial Imagery for Roof Segmentation: A Large-Scale Dataset
+<br/>towards Automatic Mapping of Buildings
+<br/><b>aCenter for Spatial Information Science, University of Tokyo, Kashiwa 277-8568, Japan</b><br/><b>University of Waterloo, Waterloo, ON N2L 3G1, Canada</b><br/><b>cFaculty of Information Engineering, China University of Geosciences (Wuhan), Wuhan 430074, China</b><br/>dAtlasAI Inc., Waterloo, ON N2L 3G1, Canada
+</td><td>('1783637', 'Qi Chen', 'qi chen')<br/>('48169641', 'Lei Wang', 'lei wang')<br/>('50117915', 'Yifan Wu', 'yifan wu')<br/>('3043983', 'Guangming Wu', 'guangming wu')<br/>('40477085', 'Zhiling Guo', 'zhiling guo')</td><td></td></tr><tr><td>980266ad6807531fea94252e8f2b771c20e173b3</td><td>Continuous Regression for
+<br/>Non-Rigid Image Alignment
+<br/>Enrique S´anchez-Lozano1
+<br/>Daniel Gonz´alez-Jim´enez1
+<br/>1Multimodal Information Area, Gradiant, Vigo, Pontevedra, 36310. Spain.
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA</b></td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td>{esanchez,dgonzalez}@gradiant.org
+<br/>ftorre@cs.cmu.edu
+</td></tr><tr><td>53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9</td><td>Discriminative vs. Generative Object Recognition:
+<br/>Objects, Faces, and the Web
+<br/>Thesis by
+<br/>In Partial Fulfillment of the Requirements
+<br/>for the Degree of
+<br/>Doctor of Philosophy
+<br/><b>California Institute of Technology</b><br/>Pasadena, California
+<br/>2007
+<br/>(Defended April 30, 2007)
+</td><td>('3075121', 'Alex Holub', 'alex holub')</td><td></td></tr><tr><td>533d14e539ae5cdca0ece392487a2b19106d468a</td><td>Bidirectional Multirate Reconstruction for Temporal Modeling in Videos
+<br/><b>University of Technology Sydney</b></td><td>('2948393', 'Linchao Zhu', 'linchao zhu')<br/>('2351434', 'Zhongwen Xu', 'zhongwen xu')<br/>('1698559', 'Yi Yang', 'yi yang')</td><td>{zhulinchao7, zhongwen.s.xu, yee.i.yang}@gmail.com
+</td></tr><tr><td>5334ac0a6438483890d5eef64f6db93f44aacdf4</td><td></td><td></td><td></td></tr><tr><td>53dd25350d3b3aaf19beb2104f1e389e3442df61</td><td></td><td></td><td></td></tr><tr><td>53e081f5af505374c3b8491e9c4470fe77fe7934</td><td>Unconstrained Realtime Facial Performance Capture
+<br/><b>University of Southern California</b><br/>† Industrial Light & Magic
+<br/>Figure 1: Calibration-free realtime facial performance capture on highly occluded subjects using an RGB-D sensor.
+</td><td>('2519072', 'Pei-Lun Hsieh', 'pei-lun hsieh')<br/>('1797422', 'Chongyang Ma', 'chongyang ma')<br/>('2977637', 'Jihun Yu', 'jihun yu')<br/>('1706574', 'Hao Li', 'hao li')</td><td></td></tr><tr><td>53698b91709112e5bb71eeeae94607db2aefc57c</td><td>Two-Stream Convolutional Networks
+<br/>for Action Recognition in Videos
+<br/><b>Visual Geometry Group, University of Oxford</b></td><td>('34838386', 'Karen Simonyan', 'karen simonyan')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>{karen,az}@robots.ox.ac.uk
+</td></tr><tr><td>5394d42fd27b7e14bd875ec71f31fdd2fcc8f923</td><td>Visual Recognition Using Directional Distribution Distance
+<br/>National Key Laboratory for Novel Software Technology
+<br/><b>Nanjing University, China</b><br/>Minieye, Youjia Innovation LLC
+</td><td>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('2226422', 'Bin-Bin Gao', 'bin-bin gao')<br/>('15527784', 'Guoqing Liu', 'guoqing liu')</td><td>guoqing@minieye.cc
+<br/>wujx2001@nju.edu.cn, gaobb@lamda.nju.edu.cn
+</td></tr><tr><td>530243b61fa5aea19b454b7dbcac9f463ed0460e</td><td></td><td></td><td></td></tr><tr><td>5397c34a5e396658fa57e3ca0065a2878c3cced7</td><td>Lighting Normalization with Generic Intrinsic Illumination Subspace for Face
+<br/>Recognition
+<br/><b>Institute of Information Science, Academia Sinica, Taipei, Taiwan</b></td><td>('1686057', 'Chia-Ping Chen', 'chia-ping chen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')</td><td>{cpchen, song}@iis.sinica.edu.tw
+</td></tr><tr><td>539ca9db570b5e43be0576bb250e1ba7a727d640</td><td></td><td></td><td></td></tr><tr><td>539287d8967cdeb3ef60d60157ee93e8724efcac</td><td>Learning Deep (cid:96)0 Encoders
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA</b><br/><b>University of Science and Technology of China, Hefei, 230027, China</b></td><td>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('1682497', 'Qing Ling', 'qing ling')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>532f7ec8e0c8f7331417dd4a45dc2e8930874066</td><td>6060
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>Box 451, Thessaloniki 54124, GREECE
+<br/><b>Aristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>tel: +30 2310 996361
+<br/>1. INTRODUCTION
+</td><td>('1905139', 'Olga Zoidi', 'olga zoidi')<br/>('1718330', 'Nikos Nikolaidis', 'nikos nikolaidis')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>{ozoidi, nikolaid, pitas}@aiia.csd.auth.gr
+</td></tr><tr><td>53c8cbc4a3a3752a74f79b74370ed8aeed97db85</td><td></td><td></td><td></td></tr><tr><td>53c36186bf0ffbe2f39165a1824c965c6394fe0d</td><td>I Know How You Feel: Emotion Recognition with Facial Landmarks
+<br/><b>Tooploox 2Polish-Japanese Academy of Information Technology 3Warsaw University of Technology</b></td><td>('22188614', 'Ivona Tautkute', 'ivona tautkute')<br/>('1760267', 'Tomasz Trzcinski', 'tomasz trzcinski')<br/>('48657002', 'Adam Bielski', 'adam bielski')</td><td>{firstname.lastname}@tooploox.com
+</td></tr><tr><td>5366573e96a1dadfcd4fd592f83017e378a0e185</td><td>Böhlen, Chandola and Salunkhe
+<br/>Server, server in the cloud.
+<br/>Who is the fairest in the crowd?
+</td><td></td><td></td></tr><tr><td>53a41c711b40e7fe3dc2b12e0790933d9c99a6e0</td><td>Recurrent Memory Addressing for describing videos
+<br/><b>Indian Institute of Technology Kharagpur</b></td><td>('7284555', 'Arnav Kumar Jain', 'arnav kumar jain')<br/>('6565766', 'Kumar Krishna Agrawal', 'kumar krishna agrawal')<br/>('1781070', 'Pabitra Mitra', 'pabitra mitra')</td><td>{arnavkj95, abhinavagarawalla, kumarkrishna, pabitra}@iitkgp.ac.in
+</td></tr><tr><td>53bfe2ab770e74d064303f3bd2867e5bf7b86379</td><td>Learning to Synthesize and Manipulate Natural Images
+<br/>By
+<br/>A dissertation submitted in partial satisfaction of the
+<br/>requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Engineering - Electrical Engineering and Computer Science
+<br/>in the
+<br/>Graduate Division
+<br/>of the
+<br/><b>University of California, Berkeley</b><br/>Committee in charge:
+<br/>Professor Alexei A. Efros, Chair
+<br/>Professor Jitendra Malik
+<br/>Professor Ren Ng
+<br/>Professor Michael DeWeese
+<br/>Fall 2017
+</td><td>('3132726', 'Junyan Zhu', 'junyan zhu')</td><td></td></tr><tr><td>533bfb82c54f261e6a2b7ed7d31a2fd679c56d18</td><td>Technical Report MSU-CSE-14-1
+<br/>Unconstrained Face Recognition: Identifying a
+<br/>Person of Interest from a Media Collection
+</td><td>('2180413', 'Lacey Best-Rowden', 'lacey best-rowden')<br/>('34393045', 'Hu Han', 'hu han')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('1817623', 'Brendan Klare', 'brendan klare')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>537d8c4c53604fd419918ec90d6ef28d045311d0</td><td>Active Collaborative Ensemble Tracking
+<br/><b>Graduate School of Informatics, Kyoto University</b><br/>Yoshida-Honmachi, Sakyo Ward, Kyoto 606–8501, Japan
+</td><td>('2146623', 'Kourosh Meshgi', 'kourosh meshgi')<br/>('31095396', 'Maryam Sadat Mirzaei', 'maryam sadat mirzaei')<br/>('38809507', 'Shigeyuki Oba', 'shigeyuki oba')<br/>('2851612', 'Shin Ishii', 'shin ishii')</td><td>meshgi-k@sys.i.kyoto-u.ac.jp
+</td></tr><tr><td>530ce1097d0681a0f9d3ce877c5ba31617b1d709</td><td></td><td></td><td></td></tr><tr><td>53ce84598052308b86ba79d873082853022aa7e9</td><td>Optimized Method for Real-Time Face Recognition System Based
+<br/>on PCA and Multiclass Support Vector Machine
+<br/><b>IEEE Member, Shahid Rajaee Teacher training University</b><br/>Tehran, Iran
+<br/><b>Institute of Computer science, Shahid Bahonar University</b><br/>Shiraz, Iran
+<br/><b>Islamic Azad University, Science and Research Campus</b><br/>Hamedan, Iran
+</td><td>('1763181', 'Reza Azad', 'reza azad')<br/>('39864738', 'Babak Azad', 'babak azad')<br/>('2904132', 'Iman Tavakoli Kazerooni', 'iman tavakoli kazerooni')</td><td>rezazad68@gmail.com
+<br/>babak.babi72@gmail.com
+<br/>iman_tavakoli2008@yahoo.com
+</td></tr><tr><td>3fbd68d1268922ee50c92b28bd23ca6669ff87e5</td><td>598
+<br/>IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. 10, NO. 4, APRIL 2001
+<br/>A Shape- and Texture-Based Enhanced Fisher
+<br/>Classifier for Face Recognition
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td></td></tr><tr><td>3fe4109ded039ac9d58eb9f5baa5327af30ad8b6</td><td>Spatio-Temporal GrabCut Human Segmentation for Face and Pose Recovery
+<br/>Antonio Hern´andez1
+<br/><b>University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain</b><br/>1 Computer Vision Center, Campus UAB, 08193 Bellaterra, Barcelona, Spain.
+</td><td>('3276130', 'Miguel Reyes', 'miguel reyes')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>ahernandez@cvc.uab.es
+<br/>mreyese@gmail.com
+<br/>sergio@maia.ub.es
+<br/>petia@cvc.uab.es
+</td></tr><tr><td>3f22a4383c55ceaafe7d3cfed1b9ef910559d639</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Robust Kronecker Component Analysis
+</td><td>('11352680', 'Mehdi Bahri', 'mehdi bahri')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>3fefc856a47726d19a9f1441168480cee6e9f5bb</td><td><b>Carnegie Mellon University</b><br/>Dissertations
+<br/>Summer 8-2014
+<br/>Theses and Dissertations
+<br/>Perceptually Valid Dynamics for Smiles and Blinks
+<br/><b>Carnegie Mellon University</b><br/>Follow this and additional works at: http://repository.cmu.edu/dissertations
+<br/>Recommended Citation
+<br/>Trutoiu, Laura, "Perceptually Valid Dynamics for Smiles and Blinks" (2014). Dissertations. Paper 428.
+</td><td>('2048839', 'Laura Trutoiu', 'laura trutoiu')</td><td>Research Showcase @ CMU
+<br/>This Dissertation is brought to you for free and open access by the Theses and Dissertations at Research Showcase @ CMU. It has been accepted for
+<br/>inclusion in Dissertations by an authorized administrator of Research Showcase @ CMU. For more information, please contact research-
+<br/>showcase@andrew.cmu.edu.
+</td></tr><tr><td>3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001</td><td>A top-down approach for a synthetic
+<br/>autobiographical memory system
+<br/>1Sheffield Centre for Robotics (SCentRo), Univ. of Sheffield, Sheffield, S10 2TN, UK
+<br/>2Dept. of Computer Science, Univ. of Sheffield, Sheffield, S1 4DP, UK
+<br/>3 CVAP Lab, KTH, Stockholm, Sweden
+</td><td>('2484138', 'Carl Henrik Ek', 'carl henrik ek')<br/>('1739851', 'Neil D. Lawrence', 'neil d. lawrence')<br/>('1750570', 'Tony J. Prescott', 'tony j. prescott')</td><td>andreas.damianou@shef.ac.uk
+</td></tr><tr><td>3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96</td><td>Person identity label propagation in stereo videos
+<br/>Department of Informatics
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, Thessaloniki 54124, GREECE
+<br/>tel: +30 2310 996361
+</td><td>('1905139', 'Olga Zoidi', 'olga zoidi')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1718330', 'Nikos Nikolaidis', 'nikos nikolaidis')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>{tefas, nikolaid, pitas}@aiia.csd.auth.gr
+</td></tr><tr><td>3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9</td><td>Not Afraid of the Dark: NIR-VIS Face Recognition via Cross-spectral
+<br/>Hallucination and Low-rank Embedding
+<br/><b>IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA</b></td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td></td></tr><tr><td>3f848d6424f3d666a1b6dd405a48a35a797dd147</td><td>GHODRATI et al.: IS 2D INFORMATION ENOUGH FOR VIEWPOINT ESTIMATION?
+<br/>Is 2D Information Enough For Viewpoint
+<br/>Estimation?
+<br/>KU Leuven, ESAT - PSI, iMinds
+<br/>Leuven, Belgium
+</td><td>('3060081', 'Amir Ghodrati', 'amir ghodrati')<br/>('3048367', 'Marco Pedersoli', 'marco pedersoli')<br/>('1704728', 'Tinne Tuytelaars', 'tinne tuytelaars')</td><td>amir.ghodrati@esat.kuleuven.be
+<br/>marco.pedersoli@esat.kuleuven.be
+<br/>tinne.tuytelaars@esat.kuleuven.be
+</td></tr><tr><td>3fa738ab3c79eacdbfafa4c9950ef74f115a3d84</td><td>DaMN – Discriminative and Mutually Nearest:
+<br/>Exploiting Pairwise Category Proximity
+<br/>for Video Action Recognition
+<br/>1 Center for Research in Computer Vision at UCF, Orlando, USA
+<br/>2 Google Research, Mountain View, USA
+<br/>http://crcv.ucf.edu/projects/DaMN/
+</td><td>('2099254', 'Rui Hou', 'rui hou')<br/>('40029556', 'Amir Roshan Zamir', 'amir roshan zamir')<br/>('1694199', 'Rahul Sukthankar', 'rahul sukthankar')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td></td></tr><tr><td>3fb26f3abcf0d287243646426cd5ddeee33624d4</td><td>Joint Training of Cascaded CNN for Face Detection
+<br/><b>Grad. School at Shenzhen, Tsinghua University</b><br/><b>Tsinghua University 4SenseTime</b></td><td>('2137185', 'Hongwei Qin', 'hongwei qin')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2693308', 'Xiu Li', 'xiu li')<br/>('1705418', 'Xiaolin Hu', 'xiaolin hu')</td><td>{qhw12@mails., li.xiu@sz., xlhu@}tsinghua.edu.cn yanjunjie@outlook.com
+</td></tr><tr><td>3f9ca2526013e358cd8caeb66a3d7161f5507cbc</td><td>Improving Sparse Representation-Based Classification
+<br/>Using Local Principal Component Analysis
+<br/>Department of Mathematics
+<br/><b>University of California, Davis</b><br/>One Shields Avenue
+<br/>Davis, California, 95616, United States
+</td><td>('32898818', 'Chelsea Weaver', 'chelsea weaver')<br/>('3493752', 'Naoki Saito', 'naoki saito')</td><td></td></tr><tr><td>3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5</td><td>Face Recognition Across Poses Using A Single 3D Reference Model
+<br/><b>National Taiwan University of Science and Technology</b><br/>No. 43, Sec.4, Keelung Rd., Taipei, 106, Taiwan
+</td><td>('38801529', 'Gee-Sern Hsu', 'gee-sern hsu')<br/>('3329222', 'Hsiao-Chia Peng', 'hsiao-chia peng')</td><td>∗jison@mail.ntust.edu.tw
+</td></tr><tr><td>3feb69531653e83d0986a0643e4a6210a088e3e5</td><td>Using Group Prior to Identify People in Consumer Images
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania
+</td><td>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>agallagh@cmu.edu
+<br/>tsuhan@cmu.edu
+</td></tr><tr><td>3f12701449a82a5e01845001afab3580b92da858</td><td>Joint Object Class Sequencing and Trajectory
+<br/>Triangulation (JOST)
+<br/><b>The University of North Carolina, Chapel Hill</b></td><td>('2873326', 'Enliang Zheng', 'enliang zheng')<br/>('1751643', 'Ke Wang', 'ke wang')<br/>('29274093', 'Enrique Dunn', 'enrique dunn')<br/>('40454588', 'Jan-Michael Frahm', 'jan-michael frahm')</td><td></td></tr><tr><td>3fb98e76ffd8ba79e1c22eda4d640da0c037e98a</td><td>Convolutional Neural Networks for Crop Yield Prediction using Satellite Images
+<br/>H. Russello
+</td><td></td><td></td></tr><tr><td>3fde656343d3fd4223e08e0bc835552bff4bda40</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/>A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/> IJCSMC, Vol. 2, Issue. 4, April 2013, pg.232 – 237
+<br/>RESEARCH ARTICLE
+<br/>Character Identification Using Graph
+<br/>Matching Algorithm
+<br/><b>Anna University Chennai, India</b><br/>5Assistant Professor, Department Of Computer Science and Engineering,
+<br/><b>K.S.R. College Of Engineering, Tiruchengode, India</b></td><td>('1795761', 'S. Bharathi', 's. bharathi')<br/>('36510121', 'Ranjith Kumar', 'ranjith kumar')</td><td> 1 rathiranya@gmail.com ; 2 manirathnam60@gmail.com ; 3 ramya1736@yahoo.com ; 4 ranjith.rhl@gmail.com
+</td></tr><tr><td>3f957142ef66f2921e7c8c7eadc8e548dccc1327</td><td>Merging SVMs with Linear Discriminant Analysis: A Combined Model
+<br/><b>Imperial College London, United Kingdom</b><br/><b>EEMCS, University of Twente, Netherlands</b></td><td>('1793625', 'Symeon Nikitidis', 'symeon nikitidis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{s.nikitidis,s.zafeiriou,m.pantic}@imperial.ac.uk
+</td></tr><tr><td>3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3</td><td>Intensity-Depth Face Alignment Using Cascade
+<br/>Shape Regression
+<br/>1 Center for Brain-like Computing and Machine Intelligence
+<br/>Department of Computer Science and Engineering
+<br/><b>Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China</b><br/>2 Key Laboratory of Shanghai Education Commission for
+<br/>Intelligent Interaction and Cognitive Engineering
+<br/><b>Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China</b></td><td>('1740511', 'Yang Cao', 'yang cao')<br/>('1715839', 'Bao-Liang Lu', 'bao-liang lu')</td><td></td></tr><tr><td>3f540faf85e1f8de6ce04fb37e556700b67e4ad3</td><td>Article
+<br/>Face Verification with Multi-Task and Multi-Scale
+<br/>Feature Fusion
+<br/><b>College of Sciences, Northeastern University, Shenyang 110819, China</b><br/><b>New York University Shanghai, 1555 Century Ave, Pudong</b><br/>Academic Editor: Maxim Raginsky
+<br/>Received: 18 March 2017; Accepted: 13 May 2017; Published: 17 May 2017
+</td><td>('26337951', 'Xiaojun Lu', 'xiaojun lu')<br/>('1983143', 'Yue Yang', 'yue yang')<br/>('8030754', 'Weilin Zhang', 'weilin zhang')<br/>('40435166', 'Qi Wang', 'qi wang')<br/>('2295608', 'Yang Wang', 'yang wang')</td><td>luxiaojun@mail.neu.edu.cn (X.L.); YangY1503@163.com (Y.Y.); wangy_neu@163.com (Y.W.)
+<br/>Shanghai 200122, China; wz723@nyu.edu
+<br/>* Correspondence: wangqi@mail.neu.edu.cn; Tel.: +86-24-8368-7680
+</td></tr><tr><td>3f4bfa4e3655ef392eb5ad609d31c05f29826b45</td><td>ROBUST MULTI-CAMERA VIEW FACE RECOGNITION
+<br/>Department of Computer Science and Engineering
+<br/><b>Dr. B. C. Roy Engineering College</b><br/>Durgapur - 713206
+<br/>India
+<br/>Department of Computer Science and Engineering
+<br/><b>National Institute of Technology Rourkela</b><br/>Rourkela – 769008
+<br/>India
+<br/>Department of Computer Science and Engineering
+<br/><b>Indian Institute of Technology Kanpur</b><br/>Kanpur – 208016
+<br/>India
+<br/>Department of Computer Science and Engineering
+<br/><b>Jadavpur University</b><br/>Kolkata – 700032,
+<br/>India
+<br/>face
+<br/>recognition
+<br/>to face
+<br/>filter banks
+<br/>system uses Gabor
+<br/>images produces Gabor
+</td><td>('1810015', 'Dakshina Ranjan Kisku', 'dakshina ranjan kisku')<br/>('1868921', 'Hunny Mehrotra', 'hunny mehrotra')<br/>('1687389', 'Phalguni Gupta', 'phalguni gupta')<br/>('1786127', 'Jamuna Kanta Sing', 'jamuna kanta sing')</td><td>drkisku@ieee.org; hunny04@gmail.com; pg@cse.iitk.ac.in; , jksing@ieee.org
+</td></tr><tr><td>3f5cf3771446da44d48f1d5ca2121c52975bb3d3</td><td></td><td></td><td></td></tr><tr><td>3fb4bf38d34f7f7e5b3df36de2413d34da3e174a</td><td>THOMAS AND KOVASHKA: PERSUASIVE FACES: GENERATING FACES IN ADS
+<br/>Persuasive Faces: Generating Faces in
+<br/>Advertisements
+<br/>Department of Computer Science
+<br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA USA
+</td><td>('40540691', 'Christopher Thomas', 'christopher thomas')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td>chris@cs.pitt.edu
+<br/>kovashka@cs.pitt.edu
+</td></tr><tr><td>3f14b504c2b37a0e8119fbda0eff52efb2eb2461</td><td>5727
+<br/>Joint Facial Action Unit Detection and Feature
+<br/>Fusion: A Multi-Conditional Learning Approach
+</td><td>('2308430', 'Stefanos Eleftheriadis', 'stefanos eleftheriadis')<br/>('1729713', 'Ognjen Rudovic', 'ognjen rudovic')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>3fac7c60136a67b320fc1c132fde45205cd2ac66</td><td>Remarks on Computational Facial Expression
+<br/>Recognition from HOG Features Using
+<br/>Quaternion Multi-layer Neural Network
+<br/><b>Information Systems Design, Doshisha University, Kyoto, Japan</b><br/><b>Graduate School of Doshisha University, Kyoto, Japan</b><br/><b>Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan</b></td><td>('39452921', 'Kazuhiko Takahashi', 'kazuhiko takahashi')<br/>('10728256', 'Sae Takahashi', 'sae takahashi')<br/>('1824476', 'Yunduan Cui', 'yunduan cui')<br/>('2565962', 'Masafumi Hashimoto', 'masafumi hashimoto')</td><td>{katakaha@mail,buj1078@mail4}.doshisha.ac.jp
+<br/>dum3101@mail4.doshisha.ac.jp
+<br/>mhashimo@mail.doshisha.ac.jp
+</td></tr><tr><td>3f9a7d690db82cf5c3940fbb06b827ced59ec01e</td><td>VIP: Finding Important People in Images
+<br/>Virginia Tech
+<br/>Google Inc.
+<br/>Virginia Tech
+<br/>Project: https://computing.ece.vt.edu/~mclint/vip/
+<br/>Demo: http://cloudcv.org/vip/
+</td><td>('3085140', 'Clint Solomon Mathialagan', 'clint solomon mathialagan')<br/>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1746610', 'Dhruv Batra', 'dhruv batra')</td><td></td></tr><tr><td>3fd90098551bf88c7509521adf1c0ba9b5dfeb57</td><td>Page 1 of 21
+<br/>*****For Peer Review Only*****
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>36
+<br/>37
+<br/>38
+<br/>39
+<br/>40
+<br/>41
+<br/>42
+<br/>43
+<br/>44
+<br/>45
+<br/>46
+<br/>47
+<br/>48
+<br/>49
+<br/>50
+<br/>51
+<br/>52
+<br/>53
+<br/>54
+<br/>55
+<br/>56
+<br/>57
+<br/>58
+<br/>59
+<br/>60
+<br/>Attribute-Based Classification for Zero-Shot
+<br/>Visual Object Categorization
+</td><td>('1787591', 'Christoph H. Lampert', 'christoph h. lampert')<br/>('1748758', 'Hannes Nickisch', 'hannes nickisch')<br/>('1734990', 'Stefan Harmeling', 'stefan harmeling')</td><td></td></tr><tr><td>3f623bb0c9c766a5ac612df248f4a59288e4d29f</td><td>Genetic Programming for Region Detection,
+<br/>Feature Extraction, Feature Construction and
+<br/>Classification in Image Data
+<br/>School of Engineering and Computer Science,
+<br/><b>Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand</b></td><td>('39251110', 'Andrew Lensen', 'andrew lensen')<br/>('2480750', 'Harith Al-Sahaf', 'harith al-sahaf')<br/>('1679067', 'Mengjie Zhang', 'mengjie zhang')<br/>('1712740', 'Bing Xue', 'bing xue')</td><td>{Andrew.Lensen,Harith.Al-Sahaf,Mengjie.Zhang,Bing.Xue}@ecs.vuw.ac.nz
+</td></tr><tr><td>3f4798c7701da044bdb7feb61ebdbd1d53df5cfe</td><td>VECTOR QUANTIZATION WITH CONSTRAINED LIKELIHOOD FOR FACE
+<br/>RECOGNITION
+<br/><b>University of Geneva</b><br/>Computer Science Department, Stochastic Information Processing Group
+<br/>7 Route de Drize, Geneva, Switzerland
+</td><td>('36133844', 'Dimche Kostadinov', 'dimche kostadinov')<br/>('8995309', 'Sviatoslav Voloshynovskiy', 'sviatoslav voloshynovskiy')<br/>('2771643', 'Maurits Diephuis', 'maurits diephuis')<br/>('1682792', 'Sohrab Ferdowsi', 'sohrab ferdowsi')</td><td></td></tr><tr><td>3f4c262d836b2867a53eefb959057350bf7219c9</td><td><b>Eastern Mediterranean University</b><br/>Gazimağusa, Mersin 10, TURKEY.
+<br/>
+<br/>Occlusions
+<br/>Recognizing Faces under Facial Expression Variations and Partial
+</td><td>('2108310', 'TIWUYA H. FAAYA', 'tiwuya h. faaya')</td><td></td></tr><tr><td>3f7723ab51417b85aa909e739fc4c43c64bf3e84</td><td>Improved Performance in Facial Expression
+<br/>Recognition Using 32 Geometric Features
+<br/><b>University of Bari, Bari, Italy</b><br/><b>National Institute of Optics, National Research Council, Arnesano, LE, Italy</b></td><td>('2235498', 'Giuseppe Palestra', 'giuseppe palestra')<br/>('39814343', 'Adriana Pettinicchio', 'adriana pettinicchio')<br/>('33097940', 'Marco Del Coco', 'marco del coco')<br/>('4730472', 'Marco Leo', 'marco leo')<br/>('1741861', 'Cosimo Distante', 'cosimo distante')</td><td>giuseppe.palestra@gmail.com
+</td></tr><tr><td>3f5e8f884e71310d7d5571bd98e5a049b8175075</td><td>Making a Science of Model Search: Hyperparameter Optimization
+<br/>in Hundreds of Dimensions for Vision Architectures
+<br/>J. Bergstra
+<br/><b>Rowland Institute at Harvard</b><br/>100 Edwin H. Land Boulevard
+<br/>Cambridge, MA 02142, USA
+<br/>D. Yamins
+<br/>Department of Brain and Cognitive Sciences
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA 02139, USA
+<br/>D. D. Cox
+<br/><b>Rowland Institute at Harvard</b><br/>100 Edwin H. Land Boulevard
+<br/>Cambridge, MA 02142, USA
+</td><td></td><td></td></tr><tr><td>3f63f9aaec8ba1fa801d131e3680900680f14139</td><td>Facial Expression Recognition using Local Binary
+<br/>Patterns and Kullback Leibler Divergence
+<br/>AnushaVupputuri, SukadevMeher
+<br/>
+<br/>divergence.
+<br/>role
+</td><td></td><td></td></tr><tr><td>3f0e0739677eb53a9d16feafc2d9a881b9677b63</td><td>Efficient Two-Stream Motion and Appearance 3D CNNs for
+<br/>Video Classification
+<br/>ESAT-KU Leuven
+<br/>Ali Pazandeh
+<br/>Sharif UTech
+<br/>ESAT-KU Leuven, ETH Zurich
+</td><td>('3310120', 'Ali Diba', 'ali diba')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>ali.diba@esat.kuleuven.be
+<br/>pazandeh@ee.sharif.ir
+<br/>luc.vangool@esat.kuleuven.be
+</td></tr><tr><td>3f5693584d7dab13ffc12122d6ddbf862783028b</td><td>Ranking CGANs: Subjective Control over Semantic Image
+<br/>Attributes
+<br/><b>University of Bath</b></td><td>('41020280', 'Yassir Saquil', 'yassir saquil')<br/>('1808255', 'Kwang In Kim', 'kwang in kim')</td><td></td></tr><tr><td>30b15cdb72760f20f80e04157b57be9029d8a1ab</td><td>Face Aging with Identity-Preserved
+<br/>Conditional Generative Adversarial Networks
+<br/><b>Shanghaitech University</b><br/>Baidu
+<br/><b>Shanghaitech University</b></td><td>('50219041', 'Zongwei Wang', 'zongwei wang')<br/>('48785141', 'Xu Tang', 'xu tang')<br/>('2074878', 'Weixin Luo', 'weixin luo')<br/>('1702868', 'Shenghua Gao', 'shenghua gao')</td><td>wangzw@shanghaitech.edu.cn
+<br/>tangxu02@baidu.com
+<br/>{luowx, gaoshh}@shanghaitech.edu.cn
+</td></tr><tr><td>3039627fa612c184228b0bed0a8c03c7f754748c</td><td>Robust Regression on Image Manifolds for Ordered Label Denoising
+<br/><b>University of North Carolina at Charlotte</b></td><td>('1873911', 'Hui Wu', 'hui wu')<br/>('1690110', 'Richard Souvenir', 'richard souvenir')</td><td>{hwu13,souvenir}@uncc.edu
+</td></tr><tr><td>30870ef75aa57e41f54310283c0057451c8c822b</td><td>Overcoming Catastrophic Forgetting with Hard Attention to the Task
+</td><td>('50101040', 'Marius Miron', 'marius miron')</td><td></td></tr><tr><td>303065c44cf847849d04da16b8b1d9a120cef73a</td><td></td><td></td><td></td></tr><tr><td>305346d01298edeb5c6dc8b55679e8f60ba97efb</td><td>Article
+<br/>Fine-Grained Face Annotation Using Deep
+<br/>Multi-Task CNN
+<br/><b>Systems and Communication, University of Milano-Bicocca</b><br/>Received: 3 July 2018; Accepted: 13 August 2018; Published: 14 August 2018
+</td><td>('3390122', 'Luigi Celona', 'luigi celona')<br/>('2217051', 'Simone Bianco', 'simone bianco')<br/>('1743714', 'Raimondo Schettini', 'raimondo schettini')</td><td>viale Sarca, 336 Milano, Italy; bianco@disco.unimib.it (S.B.); schettini@disco.unimib.it (R.S.)
+<br/>* Correspondence: luigi.celona@disco.unimib.it
+</td></tr><tr><td>303a7099c01530fa0beb197eb1305b574168b653</td><td>Occlusion-free Face Alignment: Deep Regression Networks Coupled with
+<br/>De-corrupt AutoEncoders
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3CAS Center for Excellence in Brain Science and Intelligence Technology
+</td><td>('1698586', 'Jie Zhang', 'jie zhang')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{jie.zhang,meina.kan,shiguang.shan,xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>30cd39388b5c1aae7d8153c0ab9d54b61b474ffe</td><td>Deep Cascaded Regression for Face Alignment
+<br/><b>School of Data and Computer Science, Sun Yat-Sen University, China</b><br/><b>National University of Singapore, Singapore</b><br/>algorithm refines the shape by estimating a shape increment
+<br/>∆S. In particular, a shape increment at stage k is calculated
+<br/>as:
+</td><td>('3124720', 'Shengtao Xiao', 'shengtao xiao')<br/>('10338111', 'Zhen Cui', 'zhen cui')<br/>('40080379', 'Yan Pan', 'yan pan')<br/>('3029624', 'Chunyan Xu', 'chunyan xu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>303517dfc327c3004ae866a6a340f16bab2ee3e3</td><td>Inte rnational Journal of Engineering Technology, Manage ment and Applied Sciences
+<br/>www.ijetmas.com August 2014, Volume 2 Issue 3, ISSN 2349-4476
+<br/>
+<br/>Using Locality Preserving Projections in
+<br/>Face Recognition
+<br/>Galaxy Global Imperial Technical Campus
+<br/>Galaxy Global Imperial Technical Campus
+<br/><b>DIT UNIVERSITY, DEHRADUN</b></td><td>('34272062', 'PRACHI BANSAL', 'prachi bansal')</td><td></td></tr><tr><td>30fd1363fa14965e3ab48a7d6235e4b3516c1da1</td><td>A Deep Semi-NMF Model for Learning Hidden Representations
+<br/>Stefanos Zafeiriou
+<br/>Bj¨orn W. Schuller
+<br/><b>Imperial College London, United Kingdom</b></td><td>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('2732737', 'Konstantinos Bousmalis', 'konstantinos bousmalis')</td><td>GEORGE.TRIGEORGIS08@IMPERIAL.AC.UK
+<br/>K.BOUSMALIS@IMPERIAL.AC.UK
+<br/>S.ZAFEIRIOU@IMPERIAL.AC.UK
+<br/>BJOERN.SCHULLER@IMPERIAL.AC.UK
+</td></tr><tr><td>309e17e6223e13b1f76b5b0eaa123b96ef22f51b</td><td>Face Recognition based on a 3D Morphable Model
+<br/><b>University of Siegen</b><br/>H¤olderlinstr. 3
+<br/>57068 Siegen, Germany
+</td><td>('2880906', 'Volker Blanz', 'volker blanz')</td><td>blanz@informatik.uni-siegen.de
+</td></tr><tr><td>3046baea53360a8c5653f09f0a31581da384202e</td><td>Deformable Face Alignment via Local
+<br/>Measurements and Global Constraints
+</td><td>('2398245', 'Jason M. Saragih', 'jason m. saragih')</td><td></td></tr><tr><td>3026722b4cbe9223eda6ff2822140172e44ed4b1</td><td>Jointly Estimating Demographics and Height with a Calibrated Camera
+<br/>Eastman Kodak Company
+<br/>Eastman Kodak Company
+<br/><b>Cornell University</b></td><td>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('2224373', 'Andrew C. Blose', 'andrew c. blose')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>andrew.gallagher@kodak.com
+<br/>andrew.blose@kodak.com
+<br/>tsuhan@ece.cornell.edu
+</td></tr><tr><td>3028690d00bd95f20842d4aec84dc96de1db6e59</td><td>Leveraging Union of Subspace Structure to Improve Constrained Clustering
+</td><td>('1782134', 'John Lipor', 'john lipor')</td><td></td></tr><tr><td>30c96cc041bafa4f480b7b1eb5c45999701fe066</td><td>1090
+<br/>Discrete Cosine Transform Locality-Sensitive
+<br/>Hashes for Face Retrieval
+</td><td>('1784929', 'Mehran Kafai', 'mehran kafai')<br/>('1745657', 'Kave Eshghi', 'kave eshghi')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td></td></tr><tr><td>306957285fea4ce11a14641c3497d01b46095989</td><td>FACE RECOGNITION UNDER VARYING LIGHTING BASED ON
+<br/>DERIVATES OF LOG IMAGE
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing 100080, China
+<br/>1Graduate School, CAS, Beijing, 100039, China
+</td><td>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td></td></tr><tr><td>304b1f14ca6a37552dbfac443f3d5b36dbe1a451</td><td>Collaborative Low-Rank Subspace Clustering
+<br/><b>aSchool of Computing and Mathematics, Charles Sturt University, Bathurst, NSW</b><br/><b>bDiscipline of Business Analytics, The University of Sydney Business School</b><br/><b>The University of Sydney, NSW 2006, Australia</b><br/>cCentre for Research in Mathematics, School of Computing, Engineering and Mathematics,
+<br/><b>Western Sydney University, Parramatta, NSW 2150, Australia</b><br/>Australia
+</td><td>('40635684', 'Stephen Tierney', 'stephen tierney')<br/>('1767638', 'Yi Guo', 'yi guo')<br/>('1750488', 'Junbin Gao', 'junbin gao')</td><td></td></tr><tr><td>306127c3197eb5544ab1e1bf8279a01e0df26120</td><td>Sparse Coding and Dictionary Learning with Linear Dynamical Systems∗
+<br/><b>Tsinghua University, State Key Lab. of Intelligent</b><br/>Technology and Systems, Tsinghua National Lab. for Information Science and Technology (TNList);
+<br/><b>Australian National University and NICTA, Australia</b></td><td>('36823190', 'Fuchun Sun', 'fuchun sun')<br/>('1678783', 'Deli Zhao', 'deli zhao')<br/>('2641547', 'Huaping Liu', 'huaping liu')<br/>('23911916', 'Mehrtash Harandi', 'mehrtash harandi')</td><td>1{huangwb12@mails, fcsun@mail, caoll12@mails, hpliu@mail}.tsinghua.edu.cn,
+<br/>2zhaodeli@gmail.com, 3Mehrtash.Harandi@nicta.com.au,
+</td></tr><tr><td>307a810d1bf6f747b1bd697a8a642afbd649613d</td><td>An affordable contactless security system access
+<br/> for restricted area
+<br/>Laboratory Le2i
+<br/><b>University Bourgogne Franche-Comt , France</b><br/>2 Odalid compagny, France
+<br/>Keywords – Smart Camera, Real-time Image Processing, Biometrics, Face Detection, Face Verifica-
+<br/>tion, EigenFaces, Support Vector Machine,
+<br/>We present in this paper a security system based on
+<br/>identity verification process and a low-cost smart cam-
+<br/>era, intended to avoid unauthorized access to restricted
+<br/>area. The Le2i laboratory has a longstanding experi-
+<br/>ence in smart cameras implementation and design [1],
+<br/>for example in the case of real-time classical face de-
+<br/>tection [2] or human fall detection [3].
+<br/>The principle of the system, fully thought and designed
+<br/>in our laboratory, is as follows: the allowed user pre-
+<br/>sents a RFID card to the reader based on Odalid system
+<br/>[4]. The card ID, time and date of authorized access are
+<br/>checked using connection to an online server. In the
+<br/>same time, multi-modality identity verification is per-
+<br/>formed using the camera.
+<br/>There are many ways to perform face recognition and
+<br/>face verification. As a first approach, we implemented a
+<br/>standard face localization using Haar cascade [5] and
+<br/>verification process based on Eigenfaces (feature ex-
+<br/>traction), with the ORL face data base (or AT&T) [6], and
+<br/>SVM (verification) [7].
+<br/>On the one hand, the training step has been performed
+<br/>with 10-folds cross validation using the 3000 first faces
+<br/>from LFW face database [8] as unauthorized class and
+<br/>20 known faces were used for the authorized class. On
+<br/>the other hand, the testing step has been performed us-
+<br/>ing the rest of the LFW data base and 40 other faces
+<br/>from the same known person. The false positive and
+<br/>false negative rates are respectively 0,004% and 1,39%
+<br/>with a standard deviation of respectively 0,006% and
+<br/>2,08%, considering a precision of 98,9% and a recall of
+<br/>98,6%.
+<br/>The current PC based implementation has been de-
+<br/>signed to be easily deployed on a Raspberry Pi3 or sim-
+<br/>ilar based target. A combination of Eigenfaces [9], Fish-
+<br/>erfaces [9] , Local Binary Patterns [9] and Generalized
+<br/>Fourier Descriptors [10] will be also studied.
+<br/>However, it is known that the use of single modality such
+<br/>as standard face luminosity for identity control leads of-
+<br/>ten to ergonomics problems due to the high intra-varia-
+<br/>bility of human faces [11]. Recent work published in the
+<br/>literature and developed in our laboratory showed that
+<br/>it is possible to extract precise multispectral body infor-
+<br/>mation from standard camera.
+<br/>The next step and originality of our system will resides
+<br/>in the fact that we will consider Near Infrared or multi-
+<br/>spectral approach in order to improve the security level
+<br/>(decrease false positive rate) as well as ergonomics
+<br/>(decrease false negative rate).
+<br/>The proposed platform enables security access to be
+<br/>improved and original solutions based on specific illumi-
+<br/>nation to be investigated.
+<br/>ACKNOWLEDGMENT
+<br/>This research was supported by the Conseil Regional
+<br/>de Bourgogne Franche-Comte, and institut Carnot
+<br/>ARTS
+<br/>REFERENCES
+<br/>[1] R. Mosqueron, J. Dubois, M. Mattavelli, D. Mauvilet, Smart camera
+<br/>based on embedded HW/SW coprocessor, EURASIP Journal on Em-
+<br/>bedded Systems, p.3:1-3:13, Hindawi Publishing Corp, 2008.
+<br/>[2] K. Khattab, J. Dubois, J. Miteran, Cascade Boosting Based Object
+<br/>Detection from High-Level Description to Hardware Implementation,
+<br/>EURASIP Journal on Embedded System, August 2009
+<br/>[3] B. Senouci, I. Charfi, B. Barthelemy, J. Dubois, J. Miteran, Fast
+<br/>prototyping of a SoC-based smart-camera: a real-time fall detection
+<br/>case study, Journal of Real-Time Image Processing, p.1-14, 2014.
+<br/>[4] http://odalid.com/
+<br/>[5] P. Viola, M.J. Jones, Robust Real-Time Face Detection, Interna-
+<br/>tional Journal of Computer Vision, p137-154, May 2004
+<br/>[6] www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
+<br/>[7] K. Jonsson, J. Kittler, Y.P. Li, J. Matas, Support Vector Machines
+<br/>for Face Authentication, Image and Vision Computing, p543-553,
+<br/>1999
+<br/>[8] G.B. Huang, M. Ramesh, T. Berg, E. Learned-Miller, Labeled
+<br/>Faces in the Wild: A Database for Studying Face Recognition in Un-
+<br/>constrained Environments, Tehcnical Report p07-49, October 2007
+<br/>[9] R. Jafri, H.R. Arabnia, A Survey of Face Recognition Techniques,
+<br/>Journal of Information Processing Systems, p41-68, June 2009
+<br/>[10] F. Smach, C. Lemaitre, J-P. Gauthier, J. Miteran, M. Atri, Gener-
+<br/>alized Fourier Descriptors With Applications to Objects Recognition in
+<br/>SVM Context, Journal of Mathematical Imaging and Vision, p43-47,
+<br/>2007
+<br/>[11] T. Bourlai, B. Cukic, Multi-Spectral Face Recognition: Identifica-
+<br/>tion of People in Difficult Environments, p196-201, June 2012
+</td><td>('2787483', 'Johel Mitéran', 'johel mitéran')<br/>('2274333', 'Barthélémy Heyrman', 'barthélémy heyrman')<br/>('1873153', 'Dominique Ginhac', 'dominique ginhac')<br/>('33359945', 'Julien Dubois', 'julien dubois')</td><td>Contact julien.dubois@u-bourgogne.fr
+</td></tr><tr><td>30180f66d5b4b7c0367e4b43e2b55367b72d6d2a</td><td>Template Adaptation for Face Verification and Identification
+<br/>1 Systems and Technology Research, Woburn MA USA
+<br/>2 Visionary Systems and Research, Framingham, MA USA
+<br/><b>Visual Geometry Group, University of Oxford, Oxford UK</b></td><td>('3390731', 'Nate Crosswhite', 'nate crosswhite')<br/>('36067742', 'Jeffrey Byrne', 'jeffrey byrne')<br/>('34712076', 'Chris Stauffer', 'chris stauffer')<br/>('1954340', 'Qiong Cao', 'qiong cao')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>3083d2c6d4f456e01cbb72930dc2207af98a6244</td><td>16
+<br/>Perceived Age Estimation from Face Images
+<br/>1NEC Soft, Ltd.
+<br/><b>Tokyo Institute of Technology</b><br/>Japan
+<br/>1. Introduction
+<br/>In recent years, demographic analysis in public places such as shopping malls and stations
+<br/>is attracting a great deal of attention. Such demographic information is useful for various
+<br/>purposes, e.g., designing effective marketing strategies and targeted advertisement based
+<br/>on customers’ gender and age.
+<br/>For this reason, a number of approaches have been
+<br/>explored for age estimation from face images (Fu et al., 2007; Geng et al., 2006; Guo et al.,
+<br/>2009), and several databases became publicly available recently (FG-Net Aging Database,
+<br/>n.d.; Phillips et al., 2005; Ricanek & Tesafaye, 2006).
+<br/>It has been reported that age can be
+<br/>accurately estimated under controlled environment such as frontal faces, no expression, and
+<br/>static lighting conditions. However, it is not straightforward to achieve the same accuracy
+<br/>level in a real-world environment due to considerable variations in camera settings, facial
+<br/>poses, and illumination conditions. The recognition performance of age prediction systems is
+<br/>significantly influenced by such factors as the type of camera, camera calibration, and lighting
+<br/>variations. On the other hand, the publicly available databases were mainly collected in
+<br/>semi-controlled environments. For this reason, existing age prediction systems built upon
+<br/>such databases tend to perform poorly in a real-world environment.
+<br/>In this chapter, we address the problem of perceived age estimation from face images, and
+<br/>describe our new approaches proposed in Ueki et al. (2010) and Ueki et al. (2011), which
+<br/>involve three novel aspects.
+<br/>The first novelty of our proposed approaches is to take the heterogeneous characteristics of
+<br/>human age perception into account.
+<br/>It is rare to misjudge the age of a 5-year-old child as
+<br/>15 years old, but the age of a 35-year-old person is often misjudged as 45 years old. Thus,
+<br/>magnitude of the error is different depending on subjects’ age. We carried out a large-scale
+<br/>questionnaire survey for quantifying human age perception characteristics, and propose to
+<br/>utilize the quantified characteristics in the framework of weighted regression.
+<br/>The second is an efficient active learning strategy for reducing the cost of labeling face
+<br/>samples. Given a large number of unlabeled face samples, we reveal the cluster structure
+<br/>of the data and propose to label cluster-representative samples for covering as many
+<br/>clusters as possible. This simple sampling strategy allows us to boost the performance of
+<br/>a manifold-based semi-supervised learning method only with a relatively small number of
+<br/>labeled samples.
+<br/>The third contribution is to apply a recently proposed machine learning technique called
+<br/>covariate shift adaptation (Shimodaira, 2000; Sugiyama & Kawanabe, 2011; Sugiyama et al.,
+</td><td>('2163491', 'Kazuya Ueki', 'kazuya ueki')<br/>('1853974', 'Yasuyuki Ihara', 'yasuyuki ihara')<br/>('1719221', 'Masashi Sugiyama', 'masashi sugiyama')</td><td></td></tr><tr><td>30cbd41e997445745b6edd31f2ebcc7533453b61</td><td>What Makes a Video a Video: Analyzing Temporal Information in Video
+<br/>Understanding Models and Datasets
+<br/><b>Stanford University, 2Facebook, 3Dartmouth College</b></td><td>('38485317', 'De-An Huang', 'de-an huang')<br/>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')<br/>('49274550', 'Dhruv Mahajan', 'dhruv mahajan')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')</td><td></td></tr><tr><td>302c9c105d49c1348b8f1d8cc47bead70e2acf08</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2017.2710120, IEEE
+<br/>Transactions on Circuits and Systems for Video Technology
+<br/>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+<br/>Unconstrained Face Recognition Using A Set-to-Set
+<br/>Distance Measure
+</td><td>('4712803', 'Jiaojiao Zhao', 'jiaojiao zhao')<br/>('1783847', 'Jungong Han', 'jungong han')</td><td></td></tr><tr><td>304a306d2a55ea41c2355bd9310e332fa76b3cb0</td><td></td><td></td><td></td></tr><tr><td>301b0da87027d6472b98361729faecf6e1d5e5f6</td><td>HEAD POSE ESTIMATION IN FACE RECOGNITION ACROSS
+<br/>POSE SCENARIOS
+<br/><b>Computer vision and Remote Sensing, Berlin university of Technology</b><br/>Sekr. FR-3-1, Franklinstr. 28/29, D-10587, Berlin, Germany.
+<br/>Keywords:
+<br/>Pose estimation, facial pose, face recognition, local energy models, shape description, local features, head
+<br/>pose classification.
+</td><td>('4241648', 'M. Saquib Sarfraz', 'm. saquib sarfraz')<br/>('2962236', 'Olaf Hellwich', 'olaf hellwich')</td><td>{saquib;hellwich}@fpk.tu-berlin.de
+</td></tr><tr><td>30b103d59f8460d80bb9eac0aa09aaa56c98494f</td><td>Enhancing Human Action Recognition with Region Proposals
+<br/>Australian Centre for Robotic Vision(ACRV), School of Electrical Engineering and Computer Science
+<br/><b>Queensland University of Technology(QUT</b></td><td>('2256817', 'Fahimeh Rezazadegan', 'fahimeh rezazadegan')<br/>('34686772', 'Sareh Shirazi', 'sareh shirazi')<br/>('1771913', 'Niko Sünderhauf', 'niko sünderhauf')<br/>('1809144', 'Michael Milford', 'michael milford')<br/>('1803115', 'Ben Upcroft', 'ben upcroft')</td><td>fahimeh.rezazadegan@qut.edu.au
+</td></tr><tr><td>5e59193a0fc22a0c37301fb05b198dd96df94266</td><td>Example-Based Modeling of Facial Texture from Deficient Data
+<br/>1 IMB / LaBRI, Universit´e de Bordeaux, France
+<br/><b>University of York, UK</b></td><td>('34895713', 'Arnaud Dessein', 'arnaud dessein')<br/>('1679753', 'Edwin R. Hancock', 'edwin r. hancock')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')<br/>('1718243', 'Richard C. Wilson', 'richard c. wilson')</td><td></td></tr><tr><td>5e6f546a50ed97658be9310d5e0a67891fe8a102</td><td>Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST</b><br/>Tsukuba, Ibaraki, Japan
+</td><td>('2199251', 'Kensho Hara', 'kensho hara')<br/>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')</td><td>{kensho.hara, hirokatsu.kataoka, yu.satou}@aist.go.jp
+</td></tr><tr><td>5e0eb34aeb2b58000726540336771053ecd335fc</td><td>Low-Quality Video Face Recognition with Deep
+<br/>Networks and Polygonal Chain Distance
+<br/><b>Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany</b><br/>†Fraunhofer IOSB, Karlsruhe, Germany
+</td><td>('37646107', 'Christian Herrmann', 'christian herrmann')<br/>('1783486', 'Dieter Willersinn', 'dieter willersinn')</td><td>{christian.herrmann|dieter.willersinn|juergen.beyerer}@iosb.fraunhofer.de
+</td></tr><tr><td>5e7e055ef9ba6e8566a400a8b1c6d8f827099553</td><td></td><td></td><td>Accepted manuscripts are peer-reviewed but have not been through the copyediting, formatting, or proofreadingprocess.Copyright © 2018 the authorsThis Accepted Manuscript has not been copyedited and formatted. The final version may differ from this version.Research Articles: Behavioral/CognitiveOn the role of cortex-basal ganglia interactions for category learning: Aneuro-computational approachFrancesc Villagrasa1, Javier Baladron1, Julien Vitay1, Henning Schroll1, Evan G. Antzoulatos2, Earl K.Miller3 and Fred H. Hamker11Chemnitz University of Technology, Department of Computer Science, 09107 Chemnitz, Germany2UC Davis Center for Neuroscience and Department of Neurobiology, Physiology and Behavior, Davis, CA95616, United States3The Picower Institute for Learning and Memory and Department of Brain and Cognitive Sciences,Massachusetts Institute of Technology, Cambridge, MA 02139, United StatesDOI: 10.1523/JNEUROSCI.0874-18.2018Received: 5 April 2018Revised: 7 August 2018Accepted: 28 August 2018Published: 18 September 2018Author contributions: F.V., J.V., E.G.A., and F.H.H. performed research; F.V., J.B., J.V., H.S., E.G.A., andE.K.M. analyzed data; F.V. wrote the first draft of the paper; J.B. and F.H.H. designed research; J.B., J.V., H.S.,E.G.A., E.K.M., and F.H.H. edited the paper; F.H.H. wrote the paper.Conflict of Interest: The authors declare no competing financial interests.This work has been supported by the German Research Foundation (DFG, grant agreements no. HA2630/4-2and HA2630/8-1), the European Social Fund and the Free State of Saxony (ESF, grant agreement no.ESF-100269974), the NIMH R01MH065252, and the MIT Picower Institute Innovation Fund.Corresponding author: Fred H. Hamker, fred.hamker@informatik.tu-chemnitz.de, 09107 Chemnitz, GermanyCite as: J. Neurosci ; 10.1523/JNEUROSCI.0874-18.2018Alerts: Sign up at www.jneurosci.org/cgi/alerts to receive customized email alerts when the fully formattedversion of this article is published. </td></tr><tr><td>5e28673a930131b1ee50d11f69573c17db8fff3e</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+<br/>(2008)"
+</td><td></td><td></td></tr><tr><td>5ea9063b44b56d9c1942b8484572790dff82731e</td><td>MULTICLASS SUPPORT VECTOR MACHINES AND METRIC MULTIDIMENSIONAL
+<br/>SCALING FOR FACIAL EXPRESSION RECOGNITION
+<br/>Irene Kotsiay, Stefanos Zafeiriouy, Nikolaos Nikolaidisy and Ioannis Pitasy
+<br/><b>yAristotle University of Thessaloniki</b><br/>Thessaloniki, Greece
+</td><td></td><td>email: fekotsia, dralbert, nikolaid, pitasg@aiia.csd.auth.gr
+</td></tr><tr><td>5e16f10f2d667d17c029622b9278b6b0a206d394</td><td>Learning to Rank Binary Codes
+<br/><b>Columbia University</b><br/><b>IBM T. J. Watson Research Center</b><br/><b>Columbia University</b></td><td>('1710567', 'Jie Feng', 'jie feng')<br/>('1722649', 'Wei Liu', 'wei liu')<br/>('1678691', 'Yan Wang', 'yan wang')</td><td></td></tr><tr><td>5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7</td><td>Rendering or normalization?
+<br/>An analysis of the 3D-aided pose-invariant face recognition
+<br/>Computational Biomedicine Lab
+<br/><b>University of Houston, Houston, TX, USA</b></td><td>('2461369', 'Yuhang Wu', 'yuhang wu')<br/>('2700399', 'Shishir K. Shah', 'shishir k. shah')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>ywu36@uh.edu {sshah,ikakadia}@central.uh.edu
+</td></tr><tr><td>5ea165d2bbd305dc125415487ef061bce75dac7d</td><td>Efficient Human Action Recognition by Luminance Field Trajectory and Geometry Information
+<br/><b>Hong Kong Polytechnic University, Hong Kong, China</b><br/>2BBN Technologies, Cambridge, MA 02138, USA
+</td><td>('3079962', 'Haomian Zheng', 'haomian zheng')<br/>('2659956', 'Zhu Li', 'zhu li')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>{cshmzheng,cszli}@comp.polyu.edu.hk, yfu@bbn.com
+</td></tr><tr><td>5e6ba16cddd1797853d8898de52c1f1f44a73279</td><td>Face Identification with Second-Order Pooling
+</td><td>('2731972', 'Fumin Shen', 'fumin shen')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('1724393', 'Heng Tao Shen', 'heng tao shen')</td><td></td></tr><tr><td>5ea9cba00f74d2e113a10c484ebe4b5780493964</td><td>Automated Drowsiness Detection For Improved
+<br/>Driving Safety
+<br/><b>Sabanci University</b><br/>Faculty of
+<br/>Engineering and Natural Sciences
+<br/>Orhanli, Istanbul
+<br/><b>University of California San Diego</b><br/><b>Institute of</b><br/>Neural Computation
+<br/>La Jolla, San Diego
+</td><td>('40322754', 'Esra Vural', 'esra vural')<br/>('21691177', 'Mujdat Cetin', 'mujdat cetin')<br/>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('1858421', 'Marian Bartlett', 'marian bartlett')<br/>('29794862', 'Javier Movellan', 'javier movellan')</td><td></td></tr><tr><td>5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43</td><td>Naming Every Individual in News Video Monologues
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave., Pittsburgh, PA 15213, USA
+<br/>1-412-268-{9747, 1448}
+</td><td>('38936351', 'Jun Yang', 'jun yang')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td>{juny, alex}@cs.cmu.edu
+</td></tr><tr><td>5ec94adc9e0f282597f943ea9f4502a2a34ecfc2</td><td>Leveraging the Power of Gabor Phase for Face
+<br/>Identification: A Block Matching Approach
+<br/><b>KTH, Royal Institute of Technology</b></td><td>('39750744', 'Yang Zhong', 'yang zhong')<br/>('40565290', 'Haibo Li', 'haibo li')</td><td></td></tr><tr><td>5e0e516226413ea1e973f1a24e2fdedde98e7ec0</td><td>The Invariance Hypothesis and the Ventral Stream
+<br/>by
+<br/><b>B.S./M.S. Brandeis University</b><br/>Submitted to the Department of Brain and Cognitive Sciences
+<br/>in partial fulfillment of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY</b><br/>February 2014
+<br/><b>Massachusetts Institute of Technology 2014. All rights reserved</b><br/>Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Department of Brain and Cognitive Sciences
+<br/>September 5, 2013
+<br/>Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Thesis Supervisor
+<br/>Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Sherman Fairchild Professor of Neuroscience and Picower Scholar
+<br/>Director of Graduate Education for Brain and Cognitive Sciences
+</td><td>('1700356', 'Joel Zaidspiner Leibo', 'joel zaidspiner leibo')<br/>('5856191', 'Tomaso Poggio', 'tomaso poggio')<br/>('1724891', 'Eugene McDermott', 'eugene mcdermott')<br/>('3034182', 'Matthew Wilson', 'matthew wilson')</td><td></td></tr><tr><td>5e821cb036010bef259046a96fe26e681f20266e</td><td></td><td></td><td></td></tr><tr><td>5e7cb894307f36651bdd055a85fdf1e182b7db30</td><td>A Comparison of Multi-class Support Vector Machine Methods for
+<br/>Face Recognition
+<br/>Department of Electrical and Computer Engineering
+<br/><b>The University of Maryland</b><br/>December 6, 2007
+</td><td></td><td>Naotoshi Seo, sonots@umd.edu
+</td></tr><tr><td>5b693cb3bedaa2f1e84161a4261df9b3f8e77353</td><td>Proc. VIIth Digital Image Computing: Techniques and Applications, Sun C., Talbot H., Ourselin S. and Adriaansen T. (Eds.), 10-12 Dec. 2003, Sydney
+<br/>Robust Face Localisation Using Motion, Colour
+<br/>& Fusion
+<br/>Speech, Audio, Image and Video Technologies Program
+<br/>Faculty of Built Environment and Engineering
+<br/><b>Queensland University of Technology</b><br/>GPO Box 2434, Brisbane QLD 4001, Australia
+<br/>http://www.bee.qut.edu.au/research/prog_saivt.shtml
+</td><td>('1763662', 'Chris McCool', 'chris mccool')<br/>('33258846', 'Matthew McKay', 'matthew mckay')<br/>('40453073', 'Scott Lowther', 'scott lowther')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')</td><td></td></tr><tr><td>5b73b7b335f33cda2d0662a8e9520f357b65f3ac</td><td>Intensity Rank Estimation of Facial Expressions
+<br/>Based on A Single Image
+<br/><b>Institute of Information Science, Academia Sinica, Taipei, Taiwan</b><br/><b>National Taiwan University, Taipei, Taiwan</b><br/><b>Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan</b><br/><b>Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan</b></td><td>('34692779', 'Kuang-Yu Chang', 'kuang-yu chang')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')<br/>('1732064', 'Yi-Ping Hung', 'yi-ping hung')</td><td>Email: song@iis.sinica.edu.tw
+</td></tr><tr><td>5b6d05ce368e69485cb08dd97903075e7f517aed</td><td>Robust Active Shape Model for
+<br/>Landmarking Frontal Faces
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Carnegie Mellon University Pittsburgh, PA - 15213, USA</b><br/>June 15, 2009
+</td><td>('2363348', 'Keshav Seshadri', 'keshav seshadri')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>kseshadr@andrew.cmu.edu, msavvid@cs.cmu.edu
+</td></tr><tr><td>5b0bf1063b694e4b1575bb428edb4f3451d9bf04</td><td>Facial shape tracking via spatio-temporal cascade shape regression
+<br/><b>Nanjing University of Information Science and Technology</b><br/>Nanjing, China
+</td><td>('37953909', 'Jing Yang', 'jing yang')<br/>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('3198263', 'Kaihua Zhang', 'kaihua zhang')<br/>('1734954', 'Qingshan Liu', 'qingshan liu')</td><td>nuist yj@126.com
+<br/>jiankangdeng@gmail.com
+<br/>zhkhua@gmail.com
+<br/>qsliu@nuist.edu.cn
+</td></tr><tr><td>5b59e6b980d2447b2f3042bd811906694e4b0843</td><td>Two-stage Cascade Model for Unconstrained
+<br/>Face Detection
+<br/>Darijan Marčetić, Tomislav Hrkać, Slobodan Ribarić
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia</b></td><td></td><td>{darijan.marcetic, tomislav.hrkac, slobodan.ribaric}@fer.hr
+</td></tr><tr><td>5bb53fb36a47b355e9a6962257dd465cd7ad6827</td><td>Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays
+<br/><b>University of Kentucky</b><br/><b>North Carolina Central University</b><br/>Figure 1: Our system automatically reconstruct photo-realistic face videos for users wearing HMD. (Left) Input NIR eye images. (Middle)
+<br/>Input face image with upper face blocked by HMD device. (Right) The output of our system.
+</td><td>('2613340', 'Yajie Zhao', 'yajie zhao')<br/>('8285167', 'Qingguo Xu', 'qingguo xu')<br/>('2257812', 'Xinyu Huang', 'xinyu huang')<br/>('38958903', 'Ruigang Yang', 'ruigang yang')</td><td></td></tr><tr><td>5b89744d2ac9021f468b3ffd32edf9c00ed7fed7</td><td>Beyond Mahalanobis Metric: Cayley-Klein Metric Learning
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/>Beijing, 100190, China
+</td><td>('2495602', 'Yanhong Bi', 'yanhong bi')<br/>('1684958', 'Bin Fan', 'bin fan')<br/>('3104867', 'Fuchao Wu', 'fuchao wu')</td><td>{yanhong.bi, bfan, fcwu}@nlpr.ia.ac.cn
+</td></tr><tr><td>5bfc32d9457f43d2488583167af4f3175fdcdc03</td><td>International Journal of Science and Research (IJSR), India Online ISSN: 2319-7064
+<br/>Local Gray Code Pattern (LGCP): A Robust
+<br/>Feature Descriptor for Facial Expression
+<br/>Recognition
+</td><td>('7484236', 'Mohammad Shahidul Islam', 'mohammad shahidul islam')</td><td></td></tr><tr><td>5b7cb9b97c425b52b2e6f41ba8028836029c4432</td><td>Smooth Representation Clustering
+<br/>1State Key Laboratory on Intelligent Technology and Systems, TNList
+<br/><b>Tsinghua University</b><br/><b>Key Lab. of Machine Perception, School of EECS, Peking University</b></td><td>('40234323', 'Han Hu', 'han hu')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('2632601', 'Jianjiang Feng', 'jianjiang feng')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td>huh04@mails.thu.edu.cn, zlin@pku.edu.cn, {jfeng,jzhou}@tsinghua.edu.cn
+</td></tr><tr><td>5ba7882700718e996d576b58528f1838e5559225</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2016.2628787, IEEE
+<br/>Transactions on Affective Computing
+<br/>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. X, NO. X, OCTOBER 2016
+<br/>Predicting Personalized Image Emotion
+<br/>Perceptions in Social Networks
+</td><td>('1755487', 'Sicheng Zhao', 'sicheng zhao')<br/>('1720100', 'Hongxun Yao', 'hongxun yao')<br/>('33375873', 'Yue Gao', 'yue gao')<br/>('38329336', 'Guiguang Ding', 'guiguang ding')<br/>('1684968', 'Tat-Seng Chua', 'tat-seng chua')</td><td></td></tr><tr><td>5b6f0a508c1f4097dd8dced751df46230450b01a</td><td>Finding Lost Children
+<br/>Ashley Michelle Eden
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2010-174
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.html
+<br/>December 20, 2010
+</td><td></td><td></td></tr><tr><td>5b9d41e2985fa815c0f38a2563cca4311ce82954</td><td>Exploitation of 3D Images for Face Authentication Under Pose and Illumination
+<br/>Variations
+<br/>1Information Processing Laboratory, Electrical and Computer Engineering Department,
+<br/><b>Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece</b><br/><b>Informatics and Telematics Institute, Centre for Research and Technology Hellas</b><br/>1st Km Thermi-Panorama Rd, Thessaloniki 57001, Greece
+</td><td>('1807962', 'Filareti Tsalakanidou', 'filareti tsalakanidou')<br/>('1744180', 'Sotiris Malassiotis', 'sotiris malassiotis')<br/>('1721460', 'Michael G. Strintzis', 'michael g. strintzis')</td><td>Email: filareti@iti.gr, malasiot@iti.gr, strintzi@eng.auth.gr
+</td></tr><tr><td>5b6593a6497868a0d19312952d2b753232414c23</td><td>Face Recognition by 3D Registration for the
+<br/>Visually Impaired Using a RGB-D Sensor
+<br/><b>The City College of New York, New York, NY 10031, USA</b><br/><b>Beihang University, Beijing 100191, China</b><br/>3 The CUNY Graduate Center, New York, NY 10016, USA
+</td><td>('40617554', 'Wei Li', 'wei li')<br/>('3042950', 'Xudong Li', 'xudong li')<br/>('40152663', 'Martin Goldberg', 'martin goldberg')<br/>('4697712', 'Zhigang Zhu', 'zhigang zhu')</td><td>lwei000@citymail.cuny.edu, xdli@buaa.edu.cn,
+<br/>mgoldberg@gc.cuny.edu, zhu@cs.ccny.cuny.edu
+</td></tr><tr><td>5bb684dfe64171b77df06ba68997fd1e8daffbe1</td><td></td><td></td><td></td></tr><tr><td>5b719410e7829c98c074bc2947697fac3b505b64</td><td>ACTIVE APPEARANCE MODELS FOR AFFECT RECOGNITION USING FACIAL
+<br/>EXPRESSIONS
+<br/>Matthew Stephen Ratliff
+<br/><b>University of North Carolina Wilmington in Partial Ful llment</b><br/>A Thesis Submitted to the
+<br/>of the Requirements for the Degree of
+<br/>Master of Science
+<br/>Department of Computer Science
+<br/>Department of Information Systems and Operations Management
+<br/><b>University of North Carolina Wilmington</b><br/>2010
+<br/>Approved by
+<br/>Advisory Committee
+<br/>Curry Guinn
+<br/>Thomas Janicki
+<br/>Eric Patterson
+<br/>Chair
+<br/>Accepted by
+<br/>Dean, Graduate School
+</td><td></td><td></td></tr><tr><td>5bae9822d703c585a61575dced83fa2f4dea1c6d</td><td>MOTChallenge 2015:
+<br/>Towards a Benchmark for Multi-Target Tracking
+</td><td>('34761498', 'Anton Milan', 'anton milan')<br/>('34493380', 'Stefan Roth', 'stefan roth')<br/>('1803034', 'Konrad Schindler', 'konrad schindler')</td><td></td></tr><tr><td>5b0008ba87667085912ea474025d2323a14bfc90</td><td>SoS-RSC: A Sum-of-Squares Polynomial Approach to Robustifying Subspace
+<br/>Clustering Algorithms∗
+<br/>Electrical and Computer Engineering
+<br/><b>Northeastern University, Boston, MA</b></td><td>('1687866', 'Mario Sznaier', 'mario sznaier')</td><td>{msznaier,camps}@coe.neu.edu
+</td></tr><tr><td>5b97e997b9b654373bd129b3baf5b82c2def13d1</td><td>3D Face Tracking and Texture Fusion in the Wild
+<br/>Centre for Vision, Speech and Signal Processing
+<br/>Image Understanding and Interactive Robotics
+<br/><b>University of Surrey</b><br/>Guildford, GU2 7XH, United Kingdom
+<br/>Contact: http://www.patrikhuber.ch
+<br/><b>Reutlingen University</b><br/>D-72762 Reutlingen, Germany
+</td><td>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('49330989', 'Philipp Kopp', 'philipp kopp')</td><td></td></tr><tr><td>5bd3d08335bb4e444a86200c5e9f57fd9d719e14</td><td>3D Face Morphable Models “In-the-Wild”
+<br/>,∗
+<br/>Stefanos Zafeiriou1
+<br/><b>Imperial College London, UK</b><br/>2Amazon, Berlin, Germany
+<br/><b>University of Oulu, Finland</b></td><td>('47456731', 'James Booth', 'james booth')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2015036', 'Stylianos Ploumpis', 'stylianos ploumpis')<br/>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')</td><td>1{james.booth,s.ploumpis,g.trigeorgis,i.panagakis,s.zafeiriou}@imperial.ac.uk
+<br/>2antonak@amazon.com
+</td></tr><tr><td>5babbad3daac5c26503088782fd5b62067b94fa5</td><td>Are You Sure You Want To Do That?
+<br/>Classification with Verification
+</td><td>('31920847', 'Harris Chan', 'harris chan')<br/>('36964031', 'Atef Chaudhury', 'atef chaudhury')<br/>('50715871', 'Kevin Shen', 'kevin shen')</td><td>hchan@cs.toronto.edu
+<br/>atef@cs.toronto.edu
+<br/>shenkev@cs.toronto.edu
+</td></tr><tr><td>5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f</td><td>Targeting Ultimate Accuracy: Face Recognition via Deep Embedding
+<br/><b>Baidu Research Institute of Deep Learning</b></td><td>('2272123', 'Jingtuo Liu', 'jingtuo liu')</td><td></td></tr><tr><td>5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65</td><td>Evolving Systems. manuscript No.
+<br/>(will be inserted by the editor)
+<br/>An evolving spatio-temporal approach for gender and age
+<br/>group classification with Spiking Neural Networks
+<br/>Received: date / Accepted: date
+</td><td>('39323169', 'Fahad Bashir Alvi', 'fahad bashir alvi')<br/>('2662466', 'Russel Pears', 'russel pears')<br/>('1686744', 'Nikola Kasabov', 'nikola kasabov')</td><td></td></tr><tr><td>5bf70c1afdf4c16fd88687b4cf15580fd2f26102</td><td>Accepted in Pattern Recognition Letters
+<br/>Pattern Recognition Letters
+<br/>journal homepage: www.elsevier.com
+<br/>Residual Codean Autoencoder for Facial Attribute Analysis
+<br/>IIIT-Delhi, New Delhi, India
+<br/>Article history:
+<br/>Received 29 March 2017
+</td><td>('40639989', 'Akshay Sethi', 'akshay sethi')<br/>('2220719', 'Maneet Singh', 'maneet singh')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td></td></tr><tr><td>5b2cfee6e81ef36507ebf3c305e84e9e0473575a</td><td></td><td></td><td></td></tr><tr><td>5b01d4338734aefb16ee82c4c59763d3abc008e6</td><td>A Robust Face Recognition Algorithm Based on Kernel Regularized
+<br/>Relevance-Weighted Discriminant Analysis
+<br/>
+<br/><b>Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China</b><br/><b>College of Electrical and Information Engineering</b><br/>or
+<br/>In
+<br/>I. INTRODUCTION
+<br/>interface and security
+<br/>recognition
+<br/>their
+<br/>from
+<br/>this paper, we propose an effective
+</td><td>('38296532', 'Di WU', 'di wu')<br/>('38296532', 'Di WU', 'di wu')</td><td> [e-mail: wudi6152007@163.com]
+</td></tr><tr><td>5b721f86f4a394f05350641e639a9d6cb2046c45</td><td>A short version of this paper is accepted to ACM Asia Conference on Computer and Communications Security (ASIACCS) 2018
+<br/>Detection under Privileged Information (Full Paper)∗
+<br/><b>Pennsylvania State University</b><br/>Patrick McDaniel
+<br/><b>Pennsylvania State University</b><br/>Vencore Labs
+<br/><b>Pennsylvania State University</b><br/><b>Army Research Laboratory</b></td><td>('2950892', 'Z. Berkay Celik', 'z. berkay celik')<br/>('1804289', 'Rauf Izmailov', 'rauf izmailov')<br/>('1967156', 'Nicolas Papernot', 'nicolas papernot')<br/>('9541640', 'Ryan Sheatsley', 'ryan sheatsley')<br/>('30792942', 'Raquel Alvarez', 'raquel alvarez')<br/>('1703726', 'Ananthram Swami', 'ananthram swami')</td><td>zbc102@cse.psu.edu
+<br/>mcdaniel@cse.psu.edu
+<br/>rizmailov@appcomsci.com
+<br/>{ngp5056,rms5643,rva5120}@cse.psu.edu
+<br/>ananthram.swami.civ@mail.mil
+</td></tr><tr><td>5b4b84ce3518c8a14f57f5f95a1d07fb60e58223</td><td>Diagnosing Error in Object Detectors
+<br/>Department of Computer Science
+<br/><b>University of Illinois at Urbana-Champaign</b></td><td>('2433269', 'Derek Hoiem', 'derek hoiem')<br/>('2918391', 'Yodsawalai Chodpathumwan', 'yodsawalai chodpathumwan')<br/>('2279233', 'Qieyun Dai', 'qieyun dai')</td><td></td></tr><tr><td>5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79</td><td>Improving 3D Face Details based on Normal Map of Hetero-source Images
+<br/><b>Tsinghua University</b><br/>Beijing, 100084, China
+</td><td>('8100333', 'Chang Yang', 'chang yang')<br/>('1752427', 'Jiansheng Chen', 'jiansheng chen')<br/>('1949216', 'Nan Su', 'nan su')<br/>('7284296', 'Guangda Su', 'guangda su')</td><td>yangchang11@mails.tsinghua.edu.cn, jschenthu@tsinghua.edu.cn
+<br/>v377026@sina.com, susu@tsinghua.edu.cn
+</td></tr><tr><td>5b86c36e3eb59c347b81125d5dd57dd2a2c377a9</td><td>Name Identification of People in News Video
+<br/>by Face Matching
+<br/><b>Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan</b><br/>Japan Society for the Promotion of Science
+<br/><b>Nagoya University</b><br/>School of Information Science,
+<br/><b>Nagoya University</b></td><td>('1679187', 'Ichiro IDE', 'ichiro ide')<br/>('8027540', 'Takashi OGASAWARA', 'takashi ogasawara')<br/>('1685524', 'Tomokazu TAKAHASHI', 'tomokazu takahashi')<br/>('1725612', 'Hiroshi MURASE', 'hiroshi murase')</td><td>ide@is.nagoya-u.ac.jp, ide@nii.ac.jp
+<br/>toga@murase.m.is.nagoya-u.ac.jp
+<br/>ttakahashi@murase.m.is.nagoya-u.ac.jp
+<br/>murase@is.nagoya-u.ac.jp Graduate
+</td></tr><tr><td>5be3cc1650c918da1c38690812f74573e66b1d32</td><td>Relative Parts: Distinctive Parts for Learning Relative Attributes
+<br/>Center for Visual Information Technology, IIIT Hyderabad, India - 500032
+</td><td>('32337248', 'Ramachandruni N. Sandeep', 'ramachandruni n. sandeep')<br/>('2169614', 'Yashaswi Verma', 'yashaswi verma')<br/>('1694502', 'C. V. Jawahar', 'c. v. jawahar')</td><td></td></tr><tr><td>5bc0a89f4f73523967050374ed34d7bc89e4d9e1</td><td><b></b><br/>On: 12 August 2015, At: 08:38
+<br/>Publisher: Routledge
+<br/>Informa Ltd Registered in England and Wales Registered Number: 1072954 Registered office: 5
+<br/>Howick Place, London, SW1P 1WG
+<br/>Cognition and Emotion
+<br/><b>Publication details, including instructions for authors and subscription</b><br/>information:
+<br/>http://www.tandfonline.com/loi/pcem20
+<br/>The role of emotion transition for the
+<br/>perception of social dominance and
+<br/>affiliation
+<br/><b>University of Haifa, Haifa, Israel</b><br/><b>b The Interdisciplinary Center for Research on Emotions, University of</b><br/>Haifa, Haifa, Israel
+<br/><b>Humboldt-University, Berlin, Germany</b><br/>Published online: 11 Aug 2015.
+<br/>Click for updates
+<br/>perception of social dominance and affiliation, Cognition and Emotion, DOI: 10.1080/02699931.2015.1056107
+<br/>To link to this article: http://dx.doi.org/10.1080/02699931.2015.1056107
+<br/>PLEASE SCROLL DOWN FOR ARTICLE
+<br/>Taylor & Francis makes every effort to ensure the accuracy of all the information (the “Content”)
+<br/>contained in the publications on our platform. However, Taylor & Francis, our agents, and our
+<br/>licensors make no representations or warranties whatsoever as to the accuracy, completeness, or
+<br/>suitability for any purpose of the Content. Any opinions and views expressed in this publication
+<br/>are the opinions and views of the authors, and are not the views of or endorsed by Taylor &
+<br/>Francis. The accuracy of the Content should not be relied upon and should be independently
+<br/>verified with primary sources of information. Taylor and Francis shall not be liable for any
+<br/>losses, actions, claims, proceedings, demands, costs, expenses, damages, and other liabilities
+<br/>whatsoever or howsoever caused arising directly or indirectly in connection with, in relation to or
+<br/>arising out of the use of the Content.
+<br/>This article may be used for research, teaching, and private study purposes. Any substantial
+<br/>or systematic reproduction, redistribution, reselling, loan, sub-licensing, systematic supply, or
+<br/>distribution in any form to anyone is expressly forbidden. Terms & Conditions of access and use
+<br/>can be found at http://www.tandfonline.com/page/terms-and-conditions
+</td><td>('3141618', 'Shlomo Hareli', 'shlomo hareli')<br/>('6885116', 'Shlomo David', 'shlomo david')<br/>('3141618', 'Shlomo Hareli', 'shlomo hareli')<br/>('6885116', 'Shlomo David', 'shlomo david')</td><td></td></tr><tr><td>5b6bed112e722c0629bcce778770d1b28e42fc96</td><td>FLOREA ET AL.:CANYOUREYESTELLMEHOWYOUTHINK?
+<br/>Can Your Eyes Tell Me How You Think? A
+<br/>Gaze Directed Estimation of the Mental
+<br/>Activity
+<br/>http://alpha.imag.pub.ro/common/staff/lflorea
+<br/>http://alpha.imag.pub.ro/common/staff/cflorea
+<br/>http://alpha.imag.pub.ro/common/staff/vertan
+<br/>Image Processing and Analysis
+<br/>Laboratory, LAPI
+<br/><b>University Politehnica of Bucharest</b><br/>Bucharest, Romania
+</td><td>('2143956', 'Laura Florea', 'laura florea')<br/>('2760434', 'Corneliu Florea', 'corneliu florea')<br/>('29723670', 'Ruxandra Vrânceanu', 'ruxandra vrânceanu')<br/>('2905899', 'Constantin Vertan', 'constantin vertan')</td><td>rvranceanu@alpha.imag.pub.ro
+</td></tr><tr><td>5bde1718253ec28a753a892b0ba82d8e553b6bf3</td><td>JMLR: Workshop and Conference Proceedings 13: 79-94
+<br/>2nd Asian Conference on Machine Learning (ACML2010), Tokyo, Japan, Nov. 8{10, 2010.
+<br/>Variational Relevance Vector Machine for Tabular Data
+<br/>Dorodnicyn Computing Centre of the Russian Academy of Sciences
+<br/>119333, Russia, Moscow, Vavilov str., 40
+<br/>Dmitry Vetrov
+<br/><b>Lomonosov Moscow State University</b><br/>119992, Russia, Moscow, Leninskie Gory, 1, 2nd ed. bld., CMC department
+<br/><b>The Blavatnik School of Computer Science, The Tel-Aviv University</b><br/><b>Schreiber Building, room 103, Tel Aviv University, P.O.B. 39040, Ramat Aviv, Tel Aviv</b><br/><b>Computer Science Division, The Open University of Israel</b><br/>108 Ravutski Str. P.O.B. 808, Raanana 43107, Israel
+<br/>Editor: Masashi Sugiyama and Qiang Yang
+</td><td>('3160602', 'Dmitry Kropotov', 'dmitry kropotov')<br/>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td>dmitry.kropotov@gmail.com
+<br/>hassner@openu.ac.il
+<br/>vetrovd@yandex.ru
+<br/>wolf@cs.tau.ac.il
+</td></tr><tr><td>5b0ebb8430a04d9259b321fc3c1cc1090b8e600e</td><td></td><td></td><td></td></tr><tr><td>37c8514df89337f34421dc27b86d0eb45b660a5e</td><td>Facial Landmark Tracking by Tree-based Deformable Part Model
+<br/>Based Detector
+<br/>Michal Uˇriˇc´aˇr, Vojtˇech Franc, and V´aclav Hlav´aˇc
+<br/>Center for Machine Perception, Department of Cybernetics
+<br/><b>Faculty of Electrical Engineering, Czech Technical University in Prague</b><br/>166 27 Prague 6, Technick´a 2, Czech Republic
+</td><td></td><td>{uricamic, xfrancv, hlavac}@cmp.felk.cvut.cz
+</td></tr><tr><td>371f40f6d32ece05cc879b6954db408b3d4edaf3</td><td>Mining Semantic Affordances of Visual Object Categories
+<br/><b>Computer Science and Engineering, University of Michigan, Ann Arbor</b><br/> accelerate
+<br/> intervie w
+<br/> race
+<br/> h urt
+<br/> h u nt
+<br/> fe e d
+<br/> m a n ufacture
+<br/> o p erate
+<br/> drive
+<br/> rid e
+<br/> b o ard
+<br/>bicycle
+<br/>bird
+<br/>boat
+<br/>bottle
+<br/>car
+<br/>cat
+<br/>cow
+<br/>dining table
+<br/>horse
+<br/>person
+<br/>train
+<br/>tv
+<br/>15
+<br/>10
+<br/>−5
+<br/>−10
+<br/>−15
+<br/>−20
+<br/>−15
+<br/>airplane
+<br/>boat
+<br/>car
+<br/>train
+<br/>bus
+<br/>motorcycle
+<br/>bicycle
+<br/>chair
+<br/>tv
+<br/>couch
+<br/>dining table
+<br/>bottle
+<br/>potted plant
+<br/>person
+<br/>horse
+<br/>dog
+<br/>cow
+<br/>sheep
+<br/>cat
+<br/>bird
+<br/>−10
+<br/>−5
+<br/>10
+<br/>15
+<br/>20
+<br/>25
+<br/>30
+<br/>(a)
+<br/>(b)
+<br/>Figure 1: (a) “Affordance matrix” encoding the plausibility of each action-
+<br/>object pair. (b) 20 PASCAL VOC object classes in the semantic affordance
+<br/>space.
+<br/>Affordances are fundamental attributes of objects. Affordances reveal the
+<br/>functionalities of objects and the possible actions that can be performed on
+<br/>them. We can “hug” a dog, but not an ant. We can “turn on” a tv, but not a
+<br/>bottle. Acquiring such knowledge is crucial for recognizing human activities
+<br/>in visual data and for robots to interact with the world. The key question is:
+<br/>given an object, can an action be performed on it? While this might seem
+<br/>obvious to a human, there is no automated system that can readily answer
+<br/>this question and there is no knowledge base that provides comprehensive
+<br/>knowledge of object affordances.
+<br/>In this paper, we introduce the problem of mining the knowledge of
+<br/>semantic affordance: given an action and an object, determine whether the
+<br/>action can be applied to the object. For example, the action of “carry” form a
+<br/>valid combination with “bag”, but not with “skyscraper”. This is equivalent
+<br/>to establishing connections between action concepts and object concepts,
+<br/>or filling an “affordance matrix” encoding the plausibility of each action-
+<br/>object pair (Fig. 1). The key scientific question is: “how can we collect
+<br/>affordance knowledge”? We first introduce a new benchmark with crowd-
+<br/>sourced ground truth affordances on 20 PASCAL VOC object classes and
+<br/>957 action classes. We then study a variety of approaches including 1) text
+<br/>mining, 2) visual mining, and 3) collaborative filtering. We quantitatively
+<br/>evaluate all approaches using ground truth affordances collected through
+<br/>crowdsourcing.
+<br/>For our crowdsourcing study, we ask human annotators to label whether
+<br/>an action-object pair is a valid combination. We use the 20 object categories
+<br/>in PASCAL VOC [2]. We design experiments to obtain a list of action
+<br/>categories that are both common and “visual”. Our list contains 957 ac-
+<br/>tion categories extracted from the verb synsets on Wordnet [6] that has 1) a
+<br/>member verb that frequently occurs in text corpora, and 2) high “visualness
+<br/>score” determined by human labelers. Given the list of actions and objects,
+<br/>we set up a crowdsourcing task on Amazon Mechanical Turk (AMT). We
+<br/>ask crowd workers whether it is possible (for a human) to perform a given
+<br/>action on a given object. For instance,
+<br/>Is it possible to hunt (pursue for food or sport, as of wild animals) a car?
+<br/>For every possible action-object pair formed by the 20 PASCAL VOC ob-
+<br/>jects and the 957 visual verb synsets, we ask 5 workers to determine its
+<br/>plausibility. This gives a total of 19K action-object questions and 96K an-
+<br/>swers
+<br/>What is the distribution of 20 PASCAL object classes in their affordance
+<br/>space? We answer this by analyzing the human annotated affordances. Each
+<br/>object has a 957 dimensional“affordance vector“, where each dimension
+<br/>is the plausibility score with an action. We use PCA to project the affor-
+<br/>dance vectors to a 2-dimensional space and plot the coordinates of the object
+</td><td>('2820136', 'Yu-Wei Chao', 'yu-wei chao')<br/>('1718667', 'Zhan Wang', 'zhan wang')<br/>('1738516', 'Rada Mihalcea', 'rada mihalcea')<br/>('8342699', 'Jia Deng', 'jia deng')</td><td></td></tr><tr><td>374c7a2898180723f3f3980cbcb31c8e8eb5d7af</td><td>FACIAL EXPRESSION RECOGNITION IN VIDEOS USING A NOVEL MULTI-CLASS
+<br/>SUPPORT VECTOR MACHINES VARIANT
+<br/><b>yAristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451, 54124 Thessaloniki, Greece
+</td><td>('1754270', 'Irene Kotsia', 'irene kotsia')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td></td></tr><tr><td>37007af698b990a3ea8592b11d264b14d39c843f</td><td>DCMSVM: Distributed Parallel Training For Single-Machine Multiclass
+<br/>Classifiers
+<br/>Computer Science Department
+<br/><b>Stony Brook University</b></td><td>('1682965', 'Xufeng Han', 'xufeng han')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')</td><td></td></tr><tr><td>374a0df2aa63b26737ee89b6c7df01e59b4d8531</td><td>Temporal Action Localization with Pyramid of Score Distribution Features
+<br/><b>National University of Singapore, 2Shanghai Jiao Tong University</b></td><td>('1746449', 'Jun Yuan', 'jun yuan')<br/>('5796401', 'Bingbing Ni', 'bingbing ni')<br/>('1795291', 'Xiaokang Yang', 'xiaokang yang')</td><td>yuanjun@nus.edu.sg, nibingbing@sjtu.edu.cn, xkyang@sjtu.edu.cn, ashraf@nus.edu.sg
+</td></tr><tr><td>378ae5ca649f023003021f5a63e393da3a4e47f0</td><td>Multi-Class Object Localization by Combining Local Contextual Interactions
+<br/>Serge Belongie†
+<br/>Gert Lanckriet‡
+<br/>†Computer Science and Engineering Department
+<br/>‡Electrical and Computer Engineering Department
+<br/><b>University of California, San Diego</b></td><td>('1954793', 'Carolina Galleguillos', 'carolina galleguillos')</td><td>{cgallegu,bmcfee,sjb}@cs.ucsd.edu, gert@ece.ucsd.edu
+</td></tr><tr><td>37619564574856c6184005830deda4310d3ca580</td><td>A Deep Pyramid Deformable Part Model for Face Detection
+<br/>Center for Automation Research
+<br/><b>University of Maryland, College Park, MD</b></td><td>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{rranjan1, pvishalm, rama}@umiacs.umd.edu
+</td></tr><tr><td>372fb32569ced35eaf3740a29890bec2be1869fa</td><td>Running head: MU RHYTHM MODULATION BY CLASSIFICATION OF EMOTION 1
+<br/>Mu rhythm suppression is associated with the classification of emotion in faces
+<br/><b>University of Otago, Dunedin, New Zealand</b><br/>Corresponding authors:
+<br/>Phone: +64 (3) 479 5269; Fax: +64 (3) 479 8335
+<br/>Department of Psychology
+<br/><b>University of Otago</b><br/>PO Box 56
+<br/>Dunedin, New Zealand
+</td><td>('2187036', 'Elizabeth A. Franz', 'elizabeth a. franz')</td><td>Matthew Moore (matthew.moore@otago.ac.nz) & Liz Franz (lfranz@psy.otago.ac.nz)
+</td></tr><tr><td>37ce1d3a6415d6fc1760964e2a04174c24208173</td><td>Pose-Invariant 3D Face Alignment
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing MI</b></td><td>('2357264', 'Amin Jourabloo', 'amin jourabloo')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{jourablo, liuxm}@msu.edu
+</td></tr><tr><td>3765c26362ad1095dfe6744c6d52494ea106a42c</td><td></td><td></td><td></td></tr><tr><td>3727ac3d50e31a394b200029b2c350073c1b69e3</td><td></td><td></td><td></td></tr><tr><td>37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e</td><td>WACV
+<br/>#394
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>WACV 2015 Submission #394. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>Co-operative Pedestrians Group Tracking in Crowded Scenes using an MST
+<br/>Approach
+<br/>Anonymous WACV submission
+<br/>Paper ID 394
+</td><td></td><td></td></tr><tr><td>3795974e24296185d9b64454cde6f796ca235387</td><td>Finding your Lookalike:
+<br/>Measuring Face Similarity Rather than Face Identity
+<br/><b>Lafayette College</b><br/>Easton, PA
+<br/>Andrew Gallagher
+<br/>Google Research
+<br/>Mountain View, CA
+</td><td>('1803066', 'Amir Sadovnik', 'amir sadovnik')<br/>('50977255', 'Wassim Gharbi', 'wassim gharbi')<br/>('2197717', 'Thanh Vu', 'thanh vu')</td><td>{sadovnia,gharbiw,vut}@lafayette.edu
+<br/>agallagher@google.com
+</td></tr><tr><td>37278ffce3a0fe2c2bbf6232e805dd3f5267eba3</td><td>Can we still avoid automatic face detection?
+<br/>Serge Belongie1,2
+<br/><b>Cornell University 2 Cornell Tech</b></td><td>('3035230', 'Michael J. Wilber', 'michael j. wilber')<br/>('1723945', 'Vitaly Shmatikov', 'vitaly shmatikov')</td><td></td></tr><tr><td>377a1be5113f38297716c4bb951ebef7a93f949a</td><td>Dear Faculty, IGERT Fellows, IGERT Associates and Students,
+<br/>You are cordially invited to attend a Seminar presented by Albert Cruz. Please
+<br/>plan to attend.
+<br/> Albert Cruz
+<br/>IGERT Fellow
+<br/>Electrical Engineering
+<br/>
+<br/>Date: Friday, October 11, 2013
+<br/>Location: Bourns A265
+<br/>Time: 11:00am
+<br/>Facial emotion recognition with anisotropic
+<br/>inhibited gabor energy histograms
+</td><td></td><td></td></tr><tr><td>377c6563f97e76a4dc836a0bd23d7673492b1aae</td><td></td><td></td><td></td></tr><tr><td>370e0d9b89518a6b317a9f54f18d5398895a7046</td><td>IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY, VOL. X, NO. X, XXXXXXX 20XX
+<br/>Cross-pollination of normalisation techniques
+<br/>from speaker to face authentication
+<br/>using Gaussian mixture models
+<br/>and S´ebastien Marcel, Member, IEEE
+</td><td>('1843477', 'Roy Wallace', 'roy wallace')</td><td></td></tr><tr><td>37ba12271d09d219dd1a8283bc0b4659faf3a6c6</td><td>Domain Transfer for Person Re-identification
+<br/><b>Queen Mary University of London</b><br/>London, England
+</td><td>('3264124', 'Ryan Layne', 'ryan layne')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td>{rlayne, tmh, sgg}@eecs.qmul.ac.uk
+</td></tr><tr><td>3773e5d195f796b0b7df1fca6e0d1466ad84b5e7</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>RIVERSIDE
+<br/>Learning from Time Series in the Presence of Noise: Unsupervised and Semi-Supervised
+<br/>Approaches
+<br/>A Dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Computer Science
+<br/>by
+<br/>March 2008
+<br/>Dissertation Committee:
+<br/>Dr. Eamonn Keogh, Chairperson
+<br/>Dr. Vassilis Tsotras
+</td><td>('40564016', 'Dragomir Dimitrov', 'dragomir dimitrov')<br/>('1736011', 'Stefano Lonardi', 'stefano lonardi')</td><td></td></tr><tr><td>37eb666b7eb225ffdafc6f318639bea7f0ba9a24</td><td>MSU Technical Report (2014): MSU-CSE-14-5
+<br/>Age, Gender and Race Estimation from
+<br/>Unconstrained Face Images
+</td><td>('34393045', 'Hu Han', 'hu han')<br/>('40437942', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>Pushing the Limits of Unconstrained Face Detection:
+<br/>a Challenge Dataset and Baseline Results
+<br/>1Fujitsu Laboratories Ltd., Kanagawa, Japan
+<br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b><br/><b>Rutgers University, 94 Brett Rd, Piscataway Township, NJ 08854, USA</b></td><td>('41018586', 'Hajime Nada', 'hajime nada')<br/>('2577847', 'Vishwanath A. Sindagi', 'vishwanath a. sindagi')<br/>('46197381', 'He Zhang', 'he zhang')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')</td><td>nada.hajime@jp.fujitsu.com, vishwanath.sindagi@gmail.com, he.zhang92@rutgers.edu,
+<br/>vpatel36@jhu.edu
+</td></tr><tr><td>375435fb0da220a65ac9e82275a880e1b9f0a557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>From Pixels to Response Maps: Discriminative Image
+<br/>Filtering for Face Alignment in the Wild
+</td><td>('3183108', 'Akshay Asthana', 'akshay asthana')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1902288', 'Shiyang Cheng', 'shiyang cheng')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Face Alignment Refinement via Exploiting
+<br/>Low-Rank property and Temporal Stability
+<br/>Shuang LIU
+<br/><b>Bournemouth University</b><br/><b>Bournemouth University</b><br/>Wenyu HU
+<br/><b>Gannan Normal University</b><br/>Xiaosong YANG
+<br/>Ruofeng TONG
+<br/><b>Zhejiang University</b><br/>Jian J. ZHANG
+<br/><b>Bournemouth University</b><br/><b>Bournemouth University</b><br/>face
+<br/>and
+<br/>alignment
+</td><td>('48708691', 'Zhao Wang', 'zhao wang')</td><td>zwang@bournemouth.ac.uk
+<br/>sliu@bournemouth.ac.uk
+<br/>wenyu.huu@gmail.com
+<br/>trf@zju.edu.cn
+<br/>xyang@bournemouth.ac.uk
+<br/>jzhang@bournemouth.ac.uk
+</td></tr><tr><td>37b6d6577541ed991435eaf899a2f82fdd72c790</td><td>Vision-based Human Gender Recognition: A Survey
+<br/>Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia.
+</td><td>('32877936', 'Choon Boon Ng', 'choon boon ng')<br/>('9201065', 'Yong Haur Tay', 'yong haur tay')</td><td>{ngcb,tayyh,goibm}@utar.edu.my
+</td></tr><tr><td>372a8bf0ef757c08551d41e40cb7a485527b6cd7</td><td>Unsupervised Video Hashing by Exploiting
+<br/>Spatio-Temporal Feature
+<br/><b>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong</b><br/><b>University, Shanghai, China</b></td><td>('46194894', 'Chao Ma', 'chao ma')<br/>('46964428', 'Yun Gu', 'yun gu')<br/>('46641573', 'Wei Liu', 'wei liu')<br/>('39264954', 'Jie Yang', 'jie yang')</td><td>{sjtu_machao,geron762,liuwei.1989,jieyang}@sjtu.edu.cn
+</td></tr><tr><td>37ef18d71c1ca71c0a33fc625ef439391926bfbb</td><td>Extraction of Subject-Specific Facial Expression
+<br/>Categories and Generation of Facial Expression
+<br/>Feature Space using Self-Mapping
+<br/>Department of Machine Intelligence and Systems Engineering, Faculty of Systems Science and Technology,
+<br/><b>Akita Prefectural University, Yurihonjo, Japan</b><br/>Department of Computer Science and Engineering, Faculty of Engineering and Resource Science,
+<br/><b>Akita University, Akita, Japan</b></td><td>('1932760', 'Masaki Ishii', 'masaki ishii')<br/>('2052920', 'Kazuhito Sato', 'kazuhito sato')<br/>('1738333', 'Hirokazu Madokoro', 'hirokazu madokoro')<br/>('21063785', 'Makoto Nishida', 'makoto nishida')</td><td>Email: {ishii, ksato, madokoro}@akita-pu.ac.jp
+<br/>Email: nishida@ie.akita-u.ac.jp
+</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>Labeled Faces in the Wild: A Database for Studying
+<br/>Face Recognition in Unconstrained Environments
+</td><td>('3219900', 'Gary B. Huang', 'gary b. huang')<br/>('1685538', 'Tamara Berg', 'tamara berg')<br/>('1714536', 'Erik Learned-Miller', 'erik learned-miller')</td><td></td></tr><tr><td>081189493ca339ca49b1913a12122af8bb431984</td><td>Photorealistic Facial Texture Inference Using Deep Neural Networks
+<br/>Supplemental Material for
+<br/>*Pinscreen
+<br/><b>University of Southern California</b><br/><b>USC Institute for Creative Technologies</b><br/>Appendix I. Additional Results
+<br/>Our main results in the paper demonstrate successful in-
+<br/>ference of high-fidelity texture maps from unconstrained
+<br/>images. The input images have mostly low resolutions, non-
+<br/>frontal faces, and the subjects are often captured in chal-
+<br/>lenging lighting conditions. We provide additional results
+<br/>with pictures from the annotated faces-in-the-wild (AFW)
+<br/>dataset [10] to further demonstrate how photorealistic pore-
+<br/>level details can be synthesized using our deep learning ap-
+<br/>proach. We visualize in Figure 9 the input, the intermedi-
+<br/>ate low-frequency albedo map obtained using a linear PCA
+<br/>model, and the synthesized high-frequency albedo texture
+<br/>map. We also show several views of the final renderings us-
+<br/>ing the Arnold renderer [13]. We refer to the accompanying
+<br/>video for additional rotating views of the resulting textured
+<br/>3D face models.
+<br/>Figure 2: Even for largely downsized image resolutions, our
+<br/>algorithm can produce fine-scale details while preserving
+<br/>the person’s similarity.
+<br/>We also evaluate the robustness of our inference frame-
+<br/>work for downsized image resolutions in Figure 2. We crop
+<br/>a diffuse lit face from a Light Stage capture [5]. The re-
+<br/>sulting image has 435 × 652 pixels and we decrease its res-
+<br/>olution to 108 × 162 pixels. In addition to complex skin
+<br/>pigmentations, even the tiny mole on the lower left cheek is
+<br/>properly reconstructed from the reduced input image using
+<br/>our synthesis approach.
+<br/>Figure 1: Comparison between different convolutional neu-
+<br/>ral network architectures.
+<br/>Evaluation. As Figure 1 indicates, other deep convolu-
+<br/>tional neural networks can be used to extract mid-layer fea-
+<br/>ture correlations to characterize multi-scale details, but it
+<br/>seems that deeper architectures produce fewer artifacts and
+<br/>higher quality textures. All three convolutional neural net-
+<br/>works are pre-trained for classification tasks using images
+<br/>from the ImageNet object recognition dataset [4]. The re-
+<br/>sults of the 8 layer CaffeNet [2] show noticeable blocky ar-
+<br/>tifacts in the synthesized textures and the ones from the 16
+<br/>layer VGG [12] are slightly noisy around boundaries, while
+<br/>the 19 layer VGG network performs the best.
+<br/>§- indicates equal contribution
+<br/>Comparison. We provide in Figure 3 additional visual-
+<br/>izations of our method when using the closest feature corre-
+<br/>lation, unconstrained linear combinations, and convex com-
+<br/>binations. We also compare against a PCA-based model
+<br/>fitting [3] approach and the state-of-the-art visio-lization
+<br/>framework [9]. We notice that only our proposed tech-
+<br/>nique using convex combinations is effective in generating
+<br/>mesoscopic-scale texture details. Both visio-lization and
+<br/>the PCA-based model result in lower frequency textures and
+<br/>less similar faces than the ground truth. Since our inference
+<br/>also fills holes, we compare our synthesis technique with
+<br/>a general inpainting solution for predicting unseen face re-
+<br/>gions. We test with the widely used PatchMatch [1] tech-
+<br/>nique as illustrated in Figure 4. Unsurprisingly, we observe
+<br/>unwanted repeating structures and semantically wrong fill-
+<br/>ings since this method is based on low-level vision cues.
+<br/>CaffeNetVGG-16VGG-19albedo mapinputrendering input (magnified) </td><td>('2059597', 'Shunsuke Saito', 'shunsuke saito')<br/>('1792471', 'Lingyu Wei', 'lingyu wei')<br/>('1808579', 'Liwen Hu', 'liwen hu')<br/>('1897417', 'Koki Nagano', 'koki nagano')<br/>('40348249', 'Hao Li', 'hao li')</td><td></td></tr><tr><td>08ee541925e4f7f376538bc289503dd80399536f</td><td>Runtime Neural Pruning
+<br/>Department of Automation
+<br/><b>Tsinghua University</b><br/>Department of Automation
+<br/><b>Tsinghua University</b><br/>Department of Automation
+<br/><b>Tsinghua University</b><br/>Department of Automation
+<br/><b>Tsinghua University</b></td><td>('2772283', 'Ji Lin', 'ji lin')<br/>('39358728', 'Yongming Rao', 'yongming rao')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td>lin-j14@mails.tsinghua.edu.cn
+<br/>raoyongming95@gmail.com
+<br/>lujiwen@tsinghua.edu.cn
+<br/>jzhou@tsinghua.edu.cn
+</td></tr><tr><td>08d2f655361335bdd6c1c901642981e650dff5ec</td><td>This is the published version:  
+<br/> Arandjelovic, Ognjen and Cipolla, R. 2006, Automatic cast listing in feature‐length films with
+<br/>Anisotropic Manifold Space, in CVPR 2006 : Proceedings of the Computer Vision and Pattern
+<br/>Recognition Conference 2006, IEEE, Piscataway, New Jersey, pp. 1513‐1520.
+<br/>
+<br/> http://hdl.handle.net/10536/DRO/DU:30058435
+<br/> Reproduced with the kind permission of the copyright owner.
+<br/>Copyright : 2006, IEEE
+<br/>Available from Deakin Research Online: 
+</td><td></td><td></td></tr><tr><td>08fbe3187f31b828a38811cc8dc7ca17933b91e9</td><td><b>MITSUBISHI ELECTRIC RESEARCH LABORATORIES</b><br/>http://www.merl.com
+<br/>Statistical Computations on Grassmann and
+<br/>Stiefel Manifolds for Image and Video-Based
+<br/>Recognition
+<br/>Turaga, P.; Veeraraghavan, A.; Srivastava, A.; Chellappa, R.
+<br/>TR2011-084 April 2011
+</td><td></td><td></td></tr><tr><td>08ae100805d7406bf56226e9c3c218d3f9774d19</td><td>Gavrilescu and Vizireanu EURASIP Journal on Image and Video Processing (2017) 2017:59
+<br/>DOI 10.1186/s13640-017-0211-4
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Predicting the Sixteen Personality Factors
+<br/>(16PF) of an individual by analyzing facial
+<br/>features
+<br/>Open Access
+</td><td>('2132188', 'Mihai Gavrilescu', 'mihai gavrilescu')<br/>('1929703', 'Nicolae Vizireanu', 'nicolae vizireanu')</td><td></td></tr><tr><td>08c18b2f57c8e6a3bfe462e599a6e1ce03005876</td><td>A Least-Squares Framework
+<br/>for Component Analysis
+</td><td>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>08f6ad0a3e75b715852f825d12b6f28883f5ca05</td><td>To appear in the 9th IEEE Int'l Conference on Automatic Face and Gesture Recognition, Santa Barbara, CA, March, 2011.
+<br/>Face Recognition: Some Challenges in Forensics
+<br/><b>Michigan State University</b><br/>East Lansing, MI, U.S.A
+</td><td>('6680444', 'Anil K. Jain', 'anil k. jain')<br/>('1817623', 'Brendan Klare', 'brendan klare')<br/>('2222919', 'Unsang Park', 'unsang park')</td><td>{jain, klarebre, parkunsa}@cse.msu.edu
+</td></tr><tr><td>08ff81f3f00f8f68b8abd910248b25a126a4dfa4</td><td>Papachristou, K., Tefas, A., & Pitas, I. (2014). Symmetric Subspace Learning
+<br/>5697. DOI: 10.1109/TIP.2014.2367321
+<br/>Peer reviewed version
+<br/>Link to published version (if available):
+<br/>10.1109/TIP.2014.2367321
+<br/>Link to publication record in Explore Bristol Research
+<br/>PDF-document
+<br/>This is the author accepted manuscript (AAM). The final published version (version of record) is available online
+<br/><b>via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to</b><br/>any applicable terms of use of the publisher.
+<br/><b>University of Bristol - Explore Bristol Research</b><br/>General rights
+<br/>This document is made available in accordance with publisher policies. Please cite only the published
+<br/>version using the reference above. Full terms of use are available:
+<br/>http://www.bristol.ac.uk/pure/about/ebr-terms
+<br/> </td><td></td><td></td></tr><tr><td>081a431107eb38812b74a8cd036ca5e97235b499</td><td></td><td></td><td></td></tr><tr><td>084bd02d171e36458f108f07265386f22b34a1ae</td><td>Face Alignment at 3000 FPS via Regressing Local Binary Features
+<br/><b>University of Science and Technology of China</b><br/>Microsoft Research
+</td><td>('2032273', 'Xudong Cao', 'xudong cao')<br/>('3080683', 'Shaoqing Ren', 'shaoqing ren')<br/>('1732264', 'Yichen Wei', 'yichen wei')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>sqren@mail.ustc.edu.cn
+<br/>{xudongca,yichenw,jiansun}@microsoft.com
+</td></tr><tr><td>081cb09791e7ff33c5d86fd39db00b2f29653fa8</td><td>Square Loss based Regularized LDA for Face Recognition Using Image Sets
+<br/><b>Center for Information Science, Peking University, Beijing 100871, China</b><br/>2Philips Research, High Tech Campus 36, 5656 AE Eindhoven, The Netherlands
+<br/><b>Queen Mary, University of London, London E1 4NS, UK</b></td><td>('37536447', 'Yanlin Geng', 'yanlin geng')<br/>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('1685266', 'Pengwei Hao', 'pengwei hao')</td><td>gengyanlin@cis.pku.edu.cn, caifeng.shan@philips.com, phao@dcs.qmul.ac.uk
+</td></tr><tr><td>086131159999d79adf6b31c1e604b18809e70ba8</td><td>Deep Action Unit Classification using a Binned
+<br/>Intensity Loss and Semantic Context Model
+<br/>Department of Computing Sciences
+<br/><b>Villanova University</b><br/>Villanova, Pennsylvania 19085
+<br/>Department of Computing Sciences
+<br/><b>Villanova University</b><br/>Villanova, Pennsylvania 19085
+</td><td>('1904114', 'Edward Kim', 'edward kim')<br/>('35266734', 'Shruthika Vangala', 'shruthika vangala')</td><td>Email: edward.kim@villanova.edu
+<br/>Email: svagal1@villanova.edu
+</td></tr><tr><td>0831a511435fd7d21e0cceddb4a532c35700a622</td><td></td><td></td><td></td></tr><tr><td>0861f86fb65aa915fbfbe918b28aabf31ffba364</td><td>International Journal of Computer Trends and Technology (IJCTT) – volume 22 Number 3–April 2015
+<br/> An Efficient Facial Annotation with Machine Learning Approach
+<br/>1A.Anusha,2R.Srinivas
+<br/>1Final M.Tech Student, 2Associate Professor
+<br/><b>Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh</b></td><td></td><td></td></tr><tr><td>089513ca240c6d672c79a46fa94a92cde28bd567</td><td>RNN Fisher Vectors for Action Recognition and Image Annotation
+<br/><b>The Blavatnik School of Computer Science, Tel Aviv University, Israel</b><br/>2IBM Research, Haifa, Israel
+</td><td>('3004979', 'Guy Lev', 'guy lev')<br/>('2251827', 'Gil Sadeh', 'gil sadeh')<br/>('2205955', 'Benjamin Klein', 'benjamin klein')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>089b5e8eb549723020b908e8eb19479ba39812f5</td><td>A Cross Benchmark Assessment of A Deep Convolutional Neural
+<br/>Network for Face Recognition
+<br/><b>National Institute of Standards and Technology</b><br/>Gaithersburg, MD 20899 USA
+</td><td>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td></td></tr><tr><td>080c204edff49bf85b335d3d416c5e734a861151</td><td>CLAD: A Complex and Long Activities
+<br/>Dataset with Rich Crowdsourced
+<br/>Annotations
+<br/>Journal Title
+<br/>XX(X):1–6
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td><td>('3280554', 'Jawad Tayyub', 'jawad tayyub')<br/>('2762811', 'Majd Hawasly', 'majd hawasly')<br/>('1967104', 'David C. Hogg', 'david c. hogg')<br/>('1703235', 'Anthony G. Cohn', 'anthony g. cohn')</td><td></td></tr><tr><td>08f4832507259ded9700de81f5fd462caf0d5be8</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 118 – No.14, May 2015
+<br/>Geometric Approach for Human Emotion
+<br/>Recognition using Facial Expression
+<br/>S. S. Bavkar
+<br/>Assistant Professor
+<br/>J. S. Rangole
+<br/>Assistant Professor
+<br/>V. U. Deshmukh
+<br/>Assistant Professor
+</td><td></td><td></td></tr><tr><td>08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d</td><td>BERG, BELHUMEUR: TOM-VS-PETE CLASSIFIERS AND IDENTITY-PRESERVING ALIGNMENT
+<br/>Tom-vs-Pete Classifiers and Identity-Preserving
+<br/>Alignment for Face Verification
+<br/><b>Columbia University</b><br/>New York, NY
+</td><td>('1778562', 'Thomas Berg', 'thomas berg')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>tberg@cs.columbia.edu
+<br/>belhumeur@cs.columbia.edu
+</td></tr><tr><td>08d40ee6e1c0060d3b706b6b627e03d4b123377a</td><td>Human Action Localization
+<br/>with Sparse Spatial Supervision
+</td><td>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('3269403', 'Xavier Martin', 'xavier martin')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>08c1f8f0e69c0e2692a2d51040ef6364fb263a40</td><td></td><td></td><td></td></tr><tr><td>088aabe3da627432fdccf5077969e3f6402f0a80</td><td>Under review as a conference paper at ICLR 2018
+<br/>CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+<br/>OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td><td></td><td></td></tr><tr><td>087002ab569e35432cdeb8e63b2c94f1abc53ea9</td><td>Looking at People
+<br/>CVPRW 2015
+<br/>Spatio-temporal Analysis of RGB-D-T Facial
+<br/>Images for Multimodal Pain Level
+<br/>Recognition
+<br/><b>Visual Analysis of People Lab, Aalborg University, Denmark</b><br/>Computer Vision Center, UAB, Barcelona, Spain
+<br/><b>Aalborg University, Denmark</b></td><td>('37541412', 'Ramin Irani', 'ramin irani')<br/>('1803459', 'Kamal Nasrollahi', 'kamal nasrollahi')<br/>('3321700', 'Ciprian A. Corneanu', 'ciprian a. corneanu')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('40526933', 'Tanja L. Pedersen', 'tanja l. pedersen')<br/>('31627926', 'Maria-Louise Klitgaard', 'maria-louise klitgaard')<br/>('35675498', 'Laura Petrini', 'laura petrini')</td><td></td></tr><tr><td>08903bf161a1e8dec29250a752ce9e2a508a711c</td><td>Joint Dimensionality Reduction and Metric Learning: A Geometric Take
+</td><td>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')</td><td></td></tr><tr><td>08cb294a08365e36dd7ed4167b1fd04f847651a9</td><td>EXAMINING VISIBLE ARTICULATORY FEATURES IN CLEAR AND
+<br/>CONVERSATIONAL SPEECH
+<br/><b>Medical Image Analysis Lab, School of Computing Science, Simon Fraser University, Canada</b><br/><b>Language and Brain Lab, Simon Fraser University, Canada</b><br/><b>KU Phonetics and Psycholinguistics Lab, University of Kansas</b></td><td>('2664514', 'Lisa Tang', 'lisa tang')<br/>('26839551', 'Beverly Hannah', 'beverly hannah')<br/>('3200950', 'Allard Jongman', 'allard jongman')<br/>('1723309', 'Yue Wang', 'yue wang')<br/>('3049056', 'Ghassan Hamarneh', 'ghassan hamarneh')</td><td> lisat@sfu.ca, beverlyw@sfu.ca, jongman@ku.edu, sereno@ku.edu, yuew@sfu.ca, hamarneh@sfu.ca
+</td></tr><tr><td>081286ede247c5789081502a700b378b6223f94b</td><td>ORIGINAL RESEARCH
+<br/>published: 06 February 2018
+<br/>doi: 10.3389/fpsyg.2018.00052
+<br/>Neural Correlates of Facial Mimicry:
+<br/>Simultaneous Measurements of EMG
+<br/>and BOLD Responses during
+<br/>Perception of Dynamic Compared to
+<br/>Static Facial Expressions
+<br/><b>Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social</b><br/>Sciences and Humanities, Warsaw, Poland, 2 Laboratory of Psychophysiology, Department of Neurophysiology, Nencki
+<br/><b>Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland</b><br/>Facial mimicry (FM) is an automatic response to imitate the facial expressions of others.
+<br/>However, neural correlates of the phenomenon are as yet not well established. We
+<br/>investigated this issue using simultaneously recorded EMG and BOLD signals during
+<br/>perception of dynamic and static emotional facial expressions of happiness and anger.
+<br/>During display presentations, BOLD signals and zygomaticus major (ZM), corrugator
+<br/>supercilii (CS) and orbicularis oculi (OO) EMG responses were recorded simultaneously
+<br/>from 46 healthy individuals. Subjects reacted spontaneously to happy facial expressions
+<br/>with increased EMG activity in ZM and OO muscles and decreased CS activity, which
+<br/>was interpreted as FM. Facial muscle responses correlated with BOLD activity in regions
+<br/>associated with motor simulation of facial expressions [i.e., inferior frontal gyrus, a
+<br/>classical Mirror Neuron System (MNS)]. Further, we also found correlations for regions
+<br/>associated with emotional processing (i.e., insula, part of the extended MNS). It is
+<br/>concluded that FM involves both motor and emotional brain structures, especially during
+<br/>perception of natural emotional expressions.
+<br/>Keywords: facial mimicry, EMG, fMRI, mirror neuron system, emotional expressions, dynamic, happiness, anger
+<br/>INTRODUCTION
+<br/>Facial mimicry (FM) is an unconscious and unintentional automatic response to the facial
+<br/>expressions of others. Numerous studies have shown that observing the emotional states of others
+<br/>leads to congruent facial muscle activity. For example, observing angry facial expressions can result
+<br/>in enhanced activity in the viewer’s muscle responsible for frowning (CS), while viewing happy
+<br/>images leads to Increased activity in the facial muscle involved in smiling (ZM), and decreased
+<br/>activity of the CS (Hess et al., 1998; Dimberg and Petterson, 2000). However, it has recently been
+<br/>suggested that FM may not be an exclusive automatic reaction but rather a multifactorial response
+<br/>dependent on properties such as stimulus modality (e.g., static or dynamic) or interpersonal
+<br/>characteristics (e.g., emotional contagion susceptibility) (for review see Seibt et al., 2015).
+<br/>There are two main psychological approaches trying to explain the mechanisms of
+<br/>FM. One of
+<br/>these is the perception-behavior link model which assumes perception and
+<br/>execution of a specific action show a certain overlap (Chartrand and Bargh, 1999).
+<br/>Edited by:
+<br/>Alessio Avenanti,
+<br/>Università di Bologna, Italy
+<br/>Reviewed by:
+<br/>Sebastian Korb,
+<br/><b>University of Vienna, Austria</b><br/>Frank A. Russo,
+<br/><b>Ryerson University, Canada</b><br/>*Correspondence:
+<br/>Łukasz ˙Zurawski
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Emotion Science,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 20 July 2017
+<br/>Accepted: 12 January 2018
+<br/>Published: 06 February 2018
+<br/>Citation:
+<br/>Rymarczyk K, ˙Zurawski Ł,
+<br/>Jankowiak-Siuda K and Szatkowska I
+<br/>(2018) Neural Correlates of Facial
+<br/>Mimicry: Simultaneous Measurements
+<br/>of EMG and BOLD Responses during
+<br/>Perception of Dynamic Compared to
+<br/>Static Facial Expressions.
+<br/>Front. Psychol. 9:52.
+<br/>doi: 10.3389/fpsyg.2018.00052
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>February 2018 | Volume 9 | Article 52
+</td><td>('4079953', 'Krystyna Rymarczyk', 'krystyna rymarczyk')<br/>('4022705', 'Kamila Jankowiak-Siuda', 'kamila jankowiak-siuda')<br/>('4970569', 'Iwona Szatkowska', 'iwona szatkowska')<br/>('4079953', 'Krystyna Rymarczyk', 'krystyna rymarczyk')</td><td>krymarczyk@swps.edu.pl
+<br/>l.zurawski@nencki.gov.pl
+</td></tr><tr><td>08e995c080a566fe59884a527b72e13844b6f176</td><td>A New KSVM + KFD Model for Improved
+<br/>Classification and Face Recognition
+<br/><b>School of Computer Science, University of Windsor, Windsor, ON, Canada N9B 3P</b></td><td>('1687000', 'Riadh Ksantini', 'riadh ksantini')</td><td>Email: {ksantini, boufama, imran}@uwindsor.ca
+</td></tr><tr><td>08e24f9df3d55364290d626b23f3d42b4772efb6</td><td>ENHANCING FACIAL EXPRESSION CLASSIFICATION BY INFORMATION
+<br/>FUSION
+<br/>I. Buciu1, Z. Hammal 2, A. Caplier2, N. Nikolaidis 1, and I. Pitas 1
+<br/><b></b><br/>GR-54124, Thessaloniki, Box 451, Greece
+<br/>2 Laboratoire des Images et des Signaux / Institut National Polytechnique de Grenoble
+<br/>web: http://www.aiia.csd.auth.gr
+<br/>38031 Grenoble, France
+<br/>web: http://www.lis.inpg.fr
+</td><td></td><td>phone: + 30(2310)99.6361, fax: + 30(2310)99.8453, email: {nelu,nikolaid,pitas}@aiia.csd.auth.gr
+<br/>phone: + 33(0476)574363, fax: + 33(0476)57 47 90, email: alice.caplier@inpg.fr
+</td></tr><tr><td>085ceda1c65caf11762b3452f87660703f914782</td><td>Large-pose Face Alignment via CNN-based Dense 3D Model Fitting
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing MI</b></td><td>('2357264', 'Amin Jourabloo', 'amin jourabloo')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{jourablo, liuxm}@msu.edu
+</td></tr><tr><td>0830c9b9f207007d5e07f5269ffba003235e4eff</td><td></td><td></td><td></td></tr><tr><td>08d55271589f989d90a7edce3345f78f2468a7e0</td><td>Quality Aware Network for Set to Set Recognition
+<br/>SenseTime Group Limited
+<br/>SenseTime Group Limited
+<br/><b>University of Sydney</b></td><td>('1715752', 'Yu Liu', 'yu liu')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('3001348', 'Wanli Ouyang', 'wanli ouyang')</td><td>liuyuisanai@gmail.com
+<br/>yanjunjie@sensetime.com
+<br/>wanli.ouyang@gmail.com
+</td></tr><tr><td>081fb4e97d6bb357506d1b125153111b673cc128</td><td></td><td></td><td></td></tr><tr><td>08a98822739bb8e6b1388c266938e10eaa01d903</td><td>SensorSift: Balancing Sensor Data Privacy and Utility in
+<br/>Automated Face Understanding
+<br/><b>University of Washington</b><br/>**Microsoft Research, Redmond WA
+</td><td>('3299424', 'Miro Enev', 'miro enev')<br/>('33481800', 'Jaeyeon Jung', 'jaeyeon jung')<br/>('1766509', 'Liefeng Bo', 'liefeng bo')<br/>('1728501', 'Xiaofeng Ren', 'xiaofeng ren')<br/>('1769675', 'Tadayoshi Kohno', 'tadayoshi kohno')</td><td></td></tr><tr><td>084bebc5c98872e9307cd8e7f571d39ef9c1b81e</td><td>A Discriminative Feature Learning Approach
+<br/>for Deep Face Recognition
+<br/>1 Shenzhen Key Lab of Computer Vision and Pattern Recognition,
+<br/><b>Shenzhen Institutes of Advanced Technology, CAS, Shenzhen, China</b><br/><b>The Chinese University of Hong Kong, Sha Tin, Hong Kong</b></td><td>('2512949', 'Yandong Wen', 'yandong wen')<br/>('3393556', 'Kaipeng Zhang', 'kaipeng zhang')<br/>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>yandongw@andrew.cmu.edu, {kp.zhang,zhifeng.li,yu.qiao}@siat.ac.cn
+</td></tr><tr><td>0857281a3b6a5faba1405e2c11f4e17191d3824d</td><td>Chude-Olisah et al. EURASIP Journal on Advances in Signal Processing 2014, 2014:102
+<br/>http://asp.eurasipjournals.com/content/2014/1/102
+<br/>R ES EAR CH
+<br/>Face recognition via edge-based Gabor feature
+<br/>representation for plastic surgery-altered images
+<br/>Open Access
+</td><td>('2529988', 'Ghazali Sulong', 'ghazali sulong')</td><td></td></tr><tr><td>08f1e9e14775757298afd9039f46ec56e80677f9</td><td>Attentional Push: Augmenting Salience with
+<br/>Shared Attention Modeling
+<br/>Centre for Intelligent Machines, Department of Electrical and Computer Engineering,
+<br/><b>McGill University</b><br/>Montreal, Quebec, Canada
+</td><td>('38111179', 'Siavash Gorji', 'siavash gorji')<br/>('1713608', 'James J. Clark', 'james j. clark')</td><td>siagorji@cim.mcgill.ca clark@cim.mcgill.ca
+</td></tr><tr><td>08d41d2f68a2bf0091dc373573ca379de9b16385</td><td>Recursive Chaining of Reversible Image-to-Image
+<br/>Translators for Face Aging
+<br/><b>Aalto University, Espoo, Finland</b><br/>1 GenMind Ltd, Finland
+<br/>{ari.heljakka,arno.solin,juho.kannala}aalto.fi
+</td><td>('2622083', 'Ari Heljakka', 'ari heljakka')<br/>('1768402', 'Arno Solin', 'arno solin')<br/>('1776374', 'Juho Kannala', 'juho kannala')</td><td></td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>Understanding Kin Relationships in a Photo
+</td><td>('2025056', 'Ming Shao', 'ming shao')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td></td></tr><tr><td>082ad50ac59fc694ba4369d0f9b87430553b11db</td><td></td><td></td><td></td></tr><tr><td>6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d</td><td>Robust Deep Appearance Models
+<br/><b>Concordia University, Montreal, Quebec, Canada</b><br/>2 CyLab Biometrics Center and the Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b><br/>face images. In this approach,
+</td><td>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('1699922', 'Tien D. Bui', 'tien d. bui')</td><td>Email: {k q, c duon, bui}@encs.concordia.ca
+<br/>Email: kluu@andrew.cmu.edu
+</td></tr><tr><td>6dbdb07ce2991db0f64c785ad31196dfd4dae721</td><td>Seeing Small Faces from Robust Anchor’s Perspective
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Avenue, Pittsburgh, PA 15213, USA
+</td><td>('47894545', 'Chenchen Zhu', 'chenchen zhu')<br/>('1794486', 'Marios Savvides', 'marios savvides')<br/>('47599820', 'Ran Tao', 'ran tao')<br/>('1769788', 'Khoa Luu', 'khoa luu')</td><td>{chenchez, rant, kluu, marioss}@andrew.cmu.edu
+</td></tr><tr><td>6dd052df6b0e89d394192f7f2af4a3e3b8f89875</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
+<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+<br/>A literature survey on Facial Expression
+<br/>Recognition using Global Features
+<br/>
+</td><td>('9318822', 'Mahesh M. Goyani', 'mahesh m. goyani')</td><td></td></tr><tr><td>6d7a32f594d46f4087b71e2a2bb66a4b25da5e30</td><td>Towards Person Authentication by Fusing Visual and Thermal Face
+<br/>Biometrics
+<br/>1 Department of Engineering
+<br/><b>University of Cambridge</b><br/>Cambridge, CB2 1TQ
+<br/>UK
+<br/>2 Delphi Corporation
+<br/>Delphi Electronics and Safety
+<br/>Kokomo, IN 46901-9005
+<br/>USA
+</td><td>('2214319', 'Riad Hammoud', 'riad hammoud')<br/>('1745672', 'Roberto Cipolla', 'roberto cipolla')</td><td>{oa214,cipolla}@eng.cam.ac.uk
+<br/>riad.hammoud@delphi.com
+</td></tr><tr><td>6dd5dbb6735846b214be72983e323726ef77c7a9</td><td>Josai Mathematical Monographs
+<br/>vol. 7 (2014), pp. 25-40
+<br/>A Survey on Newer Prospective
+<br/>Biometric Authentication Modalities
+</td><td>('3322335', 'Narishige Abe', 'narishige abe')<br/>('2395689', 'Takashi Shinzaki', 'takashi shinzaki')</td><td></td></tr><tr><td>6d10beb027fd7213dd4bccf2427e223662e20b7d</td><td></td><td></td><td>ResearchArticleUserAdaptiveandContext-AwareSmartHomeUsingPervasiveandSemanticTechnologiesAggelikiVlachostergiou,1GeorgiosStratogiannis,1GeorgeCaridakis,1,2GeorgeSiolas,1andPhivosMylonas1,31IntelligentSystemsContentandInteractionLaboratory,NationalTechnicalUniversityofAthens,IroonPolytexneiou9,15780Zografou,Greece2DepartmentofCulturalTechnologyandCommunication,UniversityoftheAegean,Mytilene,Lesvos,Greece3DepartmentofInformatics,IonianUniversity,Corfu,GreeceCorrespondenceshouldbeaddressedtoAggelikiVlachostergiou;aggelikivl@image.ntua.grReceived17January2016;Revised6July2016;Accepted17July2016AcademicEditor:JohnN.SahalosCopyright©2016AggelikiVlachostergiouetal.ThisisanopenaccessarticledistributedundertheCreativeCommonsAttributionLicense,whichpermitsunrestricteduse,distribution,andreproductioninanymedium,providedtheoriginalworkisproperlycited.UbiquitousComputingismovingtheinteractionawayfromthehuman-computerparadigmandtowardsthecreationofsmartenvironmentsthatusersandthings,fromtheIoTperspective,interactwith.Usermodelingandadaptationisconsistentlypresenthavingthehumanuserasaconstantbutpervasiveinteractionintroducestheneedforcontextincorporationtowardscontext-awaresmartenvironments.Thecurrentarticlediscussesbothaspectsoftheusermodelingandadaptationaswellascontextawarenessandincorporationintothesmarthomedomain.Usersaremodeledasfuzzypersonasandthesemodelsaresemanticallyrelated.Contextinformationiscollectedviasensorsandcorrespondstovariousaspectsofthepervasiveinteractionsuchastemperatureandhumidity,butalsosmartcitysensorsandservices.Thiscontextinformationenhancesthesmarthomeenvironmentviatheincorporationofuserdefinedhomerules.SemanticWebtechnologiessupporttheknowledgerepresentationofthisecosystemwhiletheoverallarchitecturehasbeenexperimentallyverifiedusinginputfromtheSmartSantandersmartcityandapplyingittotheSandSsmarthomewithinFIREandFIWAREframeworks.1.IntroductionAlthoughintheirinitialdefinitionanddevelopmentstagespervasivecomputingpracticesdidnotnecessarilyrelyontheuseoftheInternet,currenttrendsshowtheemergenceofmanyconvergencepointswiththeInternetofThings(IoT)paradigm,whereobjectsareidentifiedasInternetresourcesandcanbeaccessedandutilizedassuch.Inthesametime,theHuman-ComputerInteraction(HCI)paradigminthedomainofdomoticshaswideneditsscopeconsiderably,placingthehumaninhabitantinapervasiveenvironmentandinacontinuousinteractionwithsmartobjectsandappliances.SmarthomesthatadditionallyadheretotheIoTapproachconsiderthatthisdatacontinuouslyproducedbyappliances,sensors,andhumanscanbeprocessedandassessedcollaboratively,remotely,andevensocially.Inthepresentpaper,wetrytobuildanewknowledgerepresentationframeworkwherewefirstplacethehumanuserinthecenterofthisinteraction.Wethenproposetobreakdownthemultitudeofpossibleuserbehaviorstoafewprototypicalusermodelsandthentoresynthesizethemusingfuzzyreasoning.Then,wediscusstheubiquityofcontextinformationinrelationtotheuserandthedifficultyofproposingauniversalformalizationframeworkfortheopenworld.Weshowthat,byrestrictinguser-relatedcontexttothesmarthomeenvironment,wecanreliablydefinesimplerulestructuresthatcorrelatespecificsensorinputdataanduseractionsthatcanbeusedtotriggerarbitrarysmarthomeevents.ThisrationaleisthenevolvedtoahigherlevelsemanticrepresentationofthedomoticecosysteminwhichcomplexhomerulescanbedefinedusingSemanticWebtechnologies.Itisthusobservedthatasmarthomeusingpervasiveandsemantictechnologiesinwhichthehumanuserisinthecenteroftheinteractionhastobeadaptive(itsbehaviorcanchangeinresponsetoaperson’sactionsandenvironment)andpersonalized(itsbehaviorcanbetailoredtotheuser’sHindawi Publishing CorporationJournal of Electrical and Computer EngineeringVolume 2016, Article ID 4789803, 20 pageshttp://dx.doi.org/10.1155/2016/4789803 </td></tr><tr><td>6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75</td><td>ROC Speak: Semi-Automated Personalized Feedback on
+<br/>Nonverbal Behavior from Recorded Videos
+<br/><b>Rochester Human-Computer Interaction (ROC HCI), University of Rochester, NY</b><br/>Figure 1. An overview of our system. Once the user finishes recording, the video is analyzed on the server for objective feedback
+<br/>and sent to Mechanical Turk for subjective feedback. The objective feedback is then combined with subjective feedback that is
+<br/>scored based on helpfulness, under which the sentiment is then classified.
+</td><td>('1825866', 'Michelle Fung', 'michelle fung')<br/>('2961433', 'Yina Jin', 'yina jin')<br/>('2171034', 'RuJie Zhao', 'rujie zhao')</td><td>{mfung, yjin18, rzhao2, mehoque}@cs.rochester.edu
+</td></tr><tr><td>6dddf1440617bf7acda40d4d75c7fb4bf9517dbb</td><td>JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, MM YY
+<br/>Beyond Counting: Comparisons of Density Maps for Crowd
+<br/>Analysis Tasks - Counting, Detection, and Tracking
+</td><td>('41201301', 'Di Kang', 'di kang')<br/>('1730232', 'Zheng Ma', 'zheng ma')<br/>('3651407', 'Antoni B. Chan', 'antoni b. chan')</td><td></td></tr><tr><td>6de18708218988b0558f6c2f27050bb4659155e4</td><td></td><td></td><td></td></tr><tr><td>6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1</td><td>Semi-Supervised Learning for Facial Expression
+<br/>Recognition
+<br/>1HP Labs, Palo Alto, CA, USA
+<br/><b>Faculty of Science, University of Amsterdam, The Netherlands</b><br/>3Escola Polit´ecnica, Universidade de S˜ao Paulo, Brazil
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA</b></td><td>('1774778', 'Ira Cohen', 'ira cohen')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>Ira.cohen@hp.com
+<br/>nicu@science.uva.nl
+<br/>fgcozman@usp.br
+<br/>huang@ifp.uiuc.edu
+</td></tr><tr><td>6d91da37627c05150cb40cac323ca12a91965759</td><td></td><td></td><td></td></tr><tr><td>6d07e176c754ac42773690d4b4919a39df85d7ec</td><td>Face Attribute Prediction Using Off-The-Shelf Deep
+<br/>Learning Networks
+<br/>Computer Science and Communication
+<br/><b>KTH Royal Institute of Technology</b><br/>100 44 Stockholm, Sweden
+</td><td>('50262049', 'Yang Zhong', 'yang zhong')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')<br/>('40565290', 'Haibo Li', 'haibo li')</td><td>{yzhong, sullivan, haiboli}@kth.se
+</td></tr><tr><td>6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf</td><td>Webly-supervised Video Recognition by Mutually
+<br/>Voting for Relevant Web Images and Web Video Frames
+<br/><b>IIIS, Tsinghua University</b><br/>2Google Research
+<br/>3Amazon
+<br/><b>CRCV, University of Central Florida</b></td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('1726241', 'Chen Sun', 'chen sun')<br/>('2055900', 'Lixin Duan', 'lixin duan')<br/>('40206014', 'Boqing Gong', 'boqing gong')</td><td></td></tr><tr><td>6d4b5444c45880517213a2fdcdb6f17064b3fa91</td><td>Journal of Information Engineering and Applications
+<br/>ISSN 2224-5782 (print) ISSN 2225-0506 (online)
+<br/>Vol 2, No.3, 2012
+<br/>www.iiste.org
+<br/>Harvesting Image Databases from The Web
+<br/><b>G.H.Raisoni College of Engg. and Mgmt., Pune, India</b><br/><b>G.H.Raisoni College of Engg. and Mgmt., Pune, India</b><br/><b>G.H.Raisoni College of Engg. and Mgmt., Pune, India</b></td><td>('2671016', 'Snehal M. Gaikwad', 'snehal m. gaikwad')<br/>('40050646', 'Snehal S. Pathare', 'snehal s. pathare')</td><td>*gaikwad.snehal99@gmail.com
+<br/>*snehalpathare4@gmail.com
+<br/>*truptijachak311991@gmail.com
+</td></tr><tr><td>6d8c9a1759e7204eacb4eeb06567ad0ef4229f93</td><td>Face Alignment Robust to Pose, Expressions and
+<br/>Occlusions
+</td><td>('2232940', 'Vishnu Naresh Boddeti', 'vishnu naresh boddeti')<br/>('1767616', 'Myung-Cheol Roh', 'myung-cheol roh')<br/>('2526145', 'Jongju Shin', 'jongju shin')<br/>('3149566', 'Takaharu Oguri', 'takaharu oguri')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td></td></tr><tr><td>6dc1f94b852538d572e4919238ddb10e2ee449a4</td><td>Objects as context for detecting their semantic parts
+<br/><b>University of Edinburgh</b></td><td>('20758701', 'Abel Gonzalez-Garcia', 'abel gonzalez-garcia')<br/>('1996209', 'Davide Modolo', 'davide modolo')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td>a.gonzalez-garcia@sms.ed.ac.uk
+<br/>davide.modolo@gmail.com
+<br/>vferrari@staffmail.ed.ac.uk
+</td></tr><tr><td>6d4e3616d0b27957c4107ae877dc0dd4504b69ab</td><td>Shuffle and Learn: Unsupervised Learning using
+<br/>Temporal Order Verification
+<br/><b>The Robotics Institute, Carnegie Mellon University</b><br/>2 Facebook AI Research
+</td><td>('1806773', 'Ishan Misra', 'ishan misra')<br/>('1699161', 'C. Lawrence Zitnick', 'c. lawrence zitnick')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td>{imisra, hebert}@cs.cmu.edu, zitnick@fb.com
+</td></tr><tr><td>6d5125c9407c7762620eeea7570af1a8ee7d76f3</td><td>Video Frame Interpolation by Plug-and-Play
+<br/>Deep Locally Linear Embedding
+<br/><b>Yonsei University</b></td><td>('1886286', 'Anh-Duc Nguyen', 'anh-duc nguyen')<br/>('47902684', 'Woojae Kim', 'woojae kim')<br/>('2078790', 'Jongyoo Kim', 'jongyoo kim')<br/>('39200200', 'Sanghoon Lee', 'sanghoon lee')</td><td></td></tr><tr><td>6d8e3f3a83514381f890ab7cd2a1f1c5be597b69</td><td><b>University of Massachusetts - Amherst</b><br/>Doctoral Dissertations 2014-current
+<br/>Dissertations and Theses
+<br/>2014
+<br/>Improving Text Recognition in Images of Natural
+<br/>Scenes
+<br/>Jacqueline Feild
+<br/>Follow this and additional works at: http://scholarworks.umass.edu/dissertations_2
+<br/>Recommended Citation
+<br/>Feild, Jacqueline, "Improving Text Recognition in Images of Natural Scenes" (2014). Doctoral Dissertations 2014-current. Paper 37.
+</td><td></td><td>ScholarWorks@UMass Amherst
+<br/>University of Massachusetts - Amherst, jacqueline.feild@gmail.com
+<br/>This Open Access Dissertation is brought to you for free and open access by the Dissertations and Theses at ScholarWorks@UMass Amherst. It has
+<br/>been accepted for inclusion in Doctoral Dissertations 2014-current by an authorized administrator of ScholarWorks@UMass Amherst. For more
+<br/>information, please contact scholarworks@library.umass.edu.
+</td></tr><tr><td>6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19</td><td>Understanding Representations and Reducing
+<br/>their Redundancy in Deep Networks
+<br/>Thesis submitted to the Faculty of
+<br/><b>Virginia Polytechnic Institute and State University</b><br/>in partial fulfillment of the requirements for the degree of
+<br/>Master of Science
+<br/>in
+<br/>Computer Science and Applications
+<br/>Chair
+<br/>Co-chair
+<br/>February 18, 2016
+<br/>Blacksburg, Virginia
+<br/>Keywords: Computer Vision, Machine Learning, Object Recognition, Overfitting
+</td><td>('3358085', 'Micheal Cogswell', 'micheal cogswell')<br/>('40486307', 'Bert Huang', 'bert huang')<br/>('1746610', 'Dhruv Batra', 'dhruv batra')<br/>('38013066', 'B. Aditya Prakash', 'b. aditya prakash')</td><td>Copyright @ 2016 Michael Cogswell
+</td></tr><tr><td>6d618657fa5a584d805b562302fe1090957194ba</td><td>Full Paper
+<br/>NNGT Int. J. of Artificial Intelligence , Vol. 1, July 2014
+<br/>Human Facial Expression Recognition based
+<br/>on Principal Component Analysis and
+<br/>Artificial Neural Network
+<br/>Laboratory of Automatic and Signals Annaba (LASA) , Department of electronics, Faculty of Engineering,
+<br/>Zermi.Narima, Ramdani.M, Saaidia.M
+<br/><b>Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria</b></td><td></td><td>E-Mail : naili.narima@gmail.com, messaoud.ramdani@univ-annaba.org
+</td></tr><tr><td>6d66c98009018ac1512047e6bdfb525c35683b16</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 9, SEPTEMBER 2003
+<br/>1063
+<br/>Face Recognition Based on
+<br/>Fitting a 3D Morphable Model
+</td><td>('2880906', 'Volker Blanz', 'volker blanz')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td></td></tr><tr><td>016cbf0878db5c40566c1fbc237686fbad666a33</td><td></td><td></td><td></td></tr><tr><td>016800413ebd1a87730a5cf828e197f43a08f4b3</td><td>Learning Attributes Equals
+<br/>Multi-Source Domain Generalization
+<br/><b>IIIS, Tsinghua University</b><br/><b>University of Iowa</b><br/>CRCV, U. of Central Florida
+</td><td>('2551285', 'Chuang Gan', 'chuang gan')<br/>('40381920', 'Tianbao Yang', 'tianbao yang')<br/>('40206014', 'Boqing Gong', 'boqing gong')</td><td>ganchuang1990@gmail.com
+<br/>tianbao-yang@uiowa.edu
+<br/>bgong@crcv.ucf.edu
+</td></tr><tr><td>0172867f4c712b33168d9da79c6d3859b198ed4c</td><td>Technique for Face Recognition
+<br/><b>Faculty of Engineering, Ain Shams University, Cairo, Egypt</b><br/>Expression and Illumination Invariant Preprocessing
+</td><td>('1726416', 'A. Abbas', 'a. abbas')<br/>('9159923', 'S. Abdel-Hay', 's. abdel-hay')</td><td></td></tr><tr><td>0145dc4505041bf39efa70ea6d95cf392cfe7f19</td><td>Human Action Segmentation with Hierarchical Supervoxel Consistency
+<br/><b>University of Michigan</b><br/>Detailed analysis of human action, such as classification, detection and lo-
+<br/>calization has received increasing attention from the community; datasets
+<br/>like J-HMDB [1] have made it plausible to conduct studies analyzing the
+<br/>impact that such deeper information has on the greater action understanding
+<br/>problem. However, detailed automatic segmentation of human action has
+<br/>comparatively been unexplored. In this paper, we introduce a hierarchical
+<br/>MRF model to automatically segment human action boundaries in videos
+<br/>“in-the-wild” (see Fig. 1).
+<br/>We first propose a human motion saliency representation which incor-
+<br/>porates two parts: foreground motion and human appearance information.
+<br/>For foreground motion estimation, we propose a new motion saliency fea-
+<br/>ture by using long-term trajectories to build a camera motion model, and
+<br/>then measure the motion saliency via the deviation from the camera model.
+<br/>For human appearance information, we use a DPM person detector trained
+<br/>on PASCAL VOC 2007 and construct a saliency map by averaging the nor-
+<br/>malized detection score of all the scale and all components.
+<br/>Then, to segment the human action, we start by applying hierarchical
+<br/>graph-based video segmentation [2] to form a hierarchy of supervoxels. On
+<br/>this hierarchy, we define an MRF model, using our novel human motion
+<br/>saliency as the unary term. We consider the joint information of temporal
+<br/>connections in the direction of optical flow and human appearance-aware
+<br/>spatial neighbors as pairwise potential. We design an innovative high-order
+<br/>potential between different supervoxels on different levels of the hierar-
+<br/>chy to alleviate leaks and sustain better semantic information. Given the
+<br/>graph structure G = (X ,E) induced by the supervoxel hierarchy (E is the
+<br/>set of edges in the graph hiearchy). We introduce an energy function over
+<br/>G = (X ,E) that enforces hierarchical supervoxel consistency through higher
+<br/>order potentials derived from supervoxel V.
+<br/>E(Y ) = ∑
+<br/>i∈X
+<br/>Φi(yi) + ∑
+<br/>(i, j)∈E
+<br/>Φi, j(yi,y j) + ∑
+<br/>v∈V
+<br/>Φv(yv)
+<br/>(1)
+<br/>where Φi(yi) denotes unary potential for a supervoxel with index i, Φi, j(yi,y j)
+<br/>denotes pairwise potential between two supervoxels with edge, and Φv(yv)
+<br/>denotes high order potential of supervoxels between two layers. Unary po-
+<br/>tential: We encode the motion saliency and human saliency feature into
+<br/>supervoxels to get the unary potential components:
+<br/>Φi(yi) = γMMi(yi) + γPPi(yi) + γSSi(yi)
+<br/>(2)
+<br/>where γM, γP and γS are weights for the unary terms. Mi(yi) reflects the
+<br/>motion evidence, Pi(yi) and Si(yi) reflect the human evidence. Pairwise
+<br/>potential: we constrain the edge space with only two types of neighbors:
+<br/>temporal supervoxel neighbors and human-aware spatial neighbors, so we
+<br/>define the pairwise potential as:
+<br/>Φi, j(yi,y j) = γIIi, j(yi,y j) + γKKi, j(yi,y j))
+<br/>(3)
+<br/>where γI and γK are pairwise potential weights. Ii, j(yi,y j) is the cost be-
+<br/>tween supervoxel i and supervoxel j with human detection constraints, which
+<br/>ensures the smoothness spatially. Note that i and j could be determined as
+<br/>neighbors without pixel-level connection. Ki, j(yi,y j) is the virtual dissim-
+<br/>ilarity which ensures the smoothness temporally. Higher order potential:
+<br/>We define the hierarchical supervoxel label consistency potential. We utilize
+<br/>the connection between different supervoxel hierarchical levels. In practice,
+<br/>we adopt the Robust Pn model [3] to define the potentials,
+<br/>if N(yv) (cid:54) Q
+<br/>otherwise
+<br/>(cid:26) N(yv) 1
+<br/>Φv(yv) =
+<br/>Q γmax(v)
+<br/>γmax(v)
+</td><td>('8553015', 'Jiasen Lu', 'jiasen lu')<br/>('1856629', 'Ran Xu', 'ran xu')</td><td></td></tr><tr><td>01bef320b83ac4405b3fc5b1cff788c124109fb9</td><td>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>Translating Head Motion into Attention - Towards
+<br/>Processing of Student’s Body-Language
+<br/>CHILI Laboratory
+<br/>Łukasz Kidzi´nski
+<br/>CHILI Laboratory
+<br/>CHILI Laboratory
+<br/>École polytechnique fédérale
+<br/>École polytechnique fédérale
+<br/>École polytechnique fédérale
+</td><td>('1850245', 'Mirko Raca', 'mirko raca')<br/>('1799133', 'Pierre Dillenbourg', 'pierre dillenbourg')</td><td>mirko.raca@epfl.ch
+<br/>lukasz.kidzinski@epfl.ch
+<br/>pierre.dillenbourg@epfl.ch
+</td></tr><tr><td>01c9dc5c677aaa980f92c4680229db482d5860db</td><td>Temporal Action Detection using a Statistical Language Model
+<br/><b>University of Bonn, Germany</b></td><td>('32774629', 'Alexander Richard', 'alexander richard')<br/>('2946643', 'Juergen Gall', 'juergen gall')</td><td>{richard,gall}@iai.uni-bonn.de
+</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>A semi-automatic methodology for facial landmark annotation
+<br/><b>Imperial College London, UK</b><br/><b>School of Computer Science, University of Lincoln, U.K</b><br/><b>EEMCS, University of Twente, The Netherlands</b></td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{c.sagonas, gt204, s.zafeiriou, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>01c7a778cde86ad1b89909ea809d55230e569390</td><td>A Supervised Low-rank Method for Learning Invariant Subspaces
+<br/><b>West Virginia University</b><br/>Morgantown, WV 26508
+</td><td>('1803400', 'Farzad Siyahjani', 'farzad siyahjani')<br/>('3360490', 'Ranya Almohsen', 'ranya almohsen')<br/>('36911226', 'Sinan Sabri', 'sinan sabri')<br/>('1736352', 'Gianfranco Doretto', 'gianfranco doretto')</td><td>{fsiyahja, ralmohse, sisabri, gidoretto}@mix.wvu.edu
+</td></tr><tr><td>01c8d7a3460422412fba04e7ee14c4f6cdff9ad7</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 4, No. 7, 2013
+<br/>Rule Based System for Recognizing Emotions Using
+<br/>Multimodal Approach
+<br/>Information System
+<br/>SBM, SVKM’s NMIMS
+<br/>Mumbai, India
+<br/>
+</td><td>('9575671', 'Preeti Khanna', 'preeti khanna')</td><td></td></tr><tr><td>0115f260069e2e501850a14845feb400142e2443</td><td>An On-Line Handwriting Recognizer
+<br/>with Fisher Matching, Hypotheses
+<br/>Propagation Network and Context
+<br/>Constraint Models
+<br/>By
+<br/>A dissertation submitted in partial fulfillment of
+<br/>the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>Department of Computer Science
+<br/><b>New York University</b><br/>May 2001
+<br/>_____________________
+<br/>Davi Geiger
+</td><td>('2034318', 'Jong Oh', 'jong oh')</td><td></td></tr><tr><td>01cc8a712e67384f9ef9f30580b7415bfd71e980</td><td>14750 • The Journal of Neuroscience, November 3, 2010 • 30(44):14750 –14758
+<br/>Behavioral/Systems/Cognitive
+<br/>Failing to Ignore: Paradoxical Neural Effects of Perceptual
+<br/>Load on Early Attentional Selection in Normal Aging
+<br/><b>2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada</b><br/>We examined visual selective attention under perceptual load—simultaneous presentation of task-relevant and -irrelevant informa-
+<br/>tion—in healthy young and older adult human participants to determine whether age differences are observable at early stages of
+<br/>selection in the visual cortices. Participants viewed 50/50 superimposed face/place images and judged whether the faces were male or
+<br/>female, rendering places perceptible but task-irrelevant. Each stimulus was repeated, allowing us to index dynamic stimulus-driven
+<br/>competition from places. Consistent with intact early selection in young adults, we observed no adaptation to unattended places in
+<br/>parahippocampal place area (PPA) and significant adaptation to attended faces in fusiform face area (FFA). Older adults, however,
+<br/>exhibited both PPA adaptation to places and weak FFA adaptation to faces. We also probed participants’ associative recognition for
+<br/>face-place pairs post-task. Older adults with better place recognition memory scores were found to exhibit both the largest magnitudes of
+<br/>PPA adaptation and the smallest magnitudes of FFA adaptation on the attention task. In a control study, we removed the competing
+<br/>perceptual information to decrease perceptual load. These data revealed that the initial age-related impairments in selective attention
+<br/>were not due to a general decline in visual cortical selectivity; both young and older adults exhibited robust FFA adaptation and neither
+<br/>group exhibited PPA adaptation to repeated faces. Accordingly, distracting information does not merely interfere with attended input in
+<br/>older adults, but is co-encoded along with the contents of attended input, to the extent that this information can subsequently be
+<br/>recovered from recognition memory.
+<br/>Introduction
+<br/>Age-related changes in selective attention have traditionally been
+<br/>examined using manipulations of executive attention, e.g., the
+<br/>capacity to selectively maintain targets and suppress distractors
+<br/>in working memory (WM) (Hasher and Zacks, 1988; Gazzaley et
+<br/>al., 2005, 2008; Healey et al., 2008). Under cognitive load from
+<br/>WM, older adults appear more susceptible to interference from
+<br/>distracting stimuli compared with young controls.
+<br/>At the neural level, executive attention appears to reconcile
+<br/>interference from unattended distractors at stages of processing
+<br/>after encoding in the perceptual cortices, i.e., late selection, and
+<br/>relies on prefrontal control mechanisms (de Fockert et al., 2001;
+<br/>Gehring and Knight, 2002). Experimental tasks that manipulate
+<br/>executive attention, such as distractor exclusion (de Fockert et al.,
+<br/>2001; Yi et al., 2004) and attentional blink (Luck et al., 1996;
+<br/>Marois et al., 2000) have routinely demonstrated late selection of
+<br/>unattended information.
+<br/>However, the focus of aging research on executive attention
+<br/>and distractor interference has left several questions unexplored.
+<br/>Executive attention appears to be dissociable from the type of
+<br/>perceptual attention used for reconciling distractor competition
+<br/>Received May 26, 2010; revised Aug. 28, 2010; accepted Sept. 11, 2010.
+<br/><b>This work was supported by Grant MOP102637 from the Canadian Institutes of Health Research to E.D.R. and the</b><br/>Vanier National Science and Engineering Research Council Scholarship to T.W.S. We also thank Adam K. Anderson
+<br/>and Daniel H. Lee for helpful editorial input on the manuscript.
+<br/>DOI:10.1523/JNEUROSCI.2687-10.2010
+<br/>Copyright © 2010 the authors
+<br/>0270-6474/10/3014750-09$15.00/0
+<br/>within the visual field, which is thought to be embedded in pos-
+<br/>terior cortical subsystems (Treisman, 1969; Desimone and Dun-
+<br/>can, 1995; Lavie et al., 2004). For instance, Yi et al. (2004)
+<br/>observed that under perceptual load but not WM load, unat-
+<br/>tended distractors were suppressed at stages of visual processing
+<br/>before extrastriate encoding. These finding indicate that percep-
+<br/>tual attention relies on a distinct early selection mechanism.
+<br/>In the present study, we therefore explored with functional
+<br/>magnetic resonance imaging (fMRI) whether perceptual at-
+<br/>tention is also susceptible to an age-related impairment. We
+<br/>hypothesized that under perceptually demanding conditions,
+<br/>when task-relevant and -irrelevant stimuli were simultaneously
+<br/>presented in the visual field, early competitive perceptual inter-
+<br/>actions from task-irrelevant sensory information would be suc-
+<br/>cessfully filtered in younger adults before encoding (Lavie, 1995;
+<br/>Yi et al., 2004). By contrast, if older adults do exhibit impaired
+<br/>perceptual attention, then age-differences in distractor encoding
+<br/>should be observable in extrastriate cortex sensitive to the
+<br/>unattended stream of input. We were also interested in eluci-
+<br/>dating the precise neural fate of this unattended information
+<br/>in older adults. Specifically, do distractors merely interfere
+<br/>with attended input, or are distractors co-encoded along with
+<br/>the content of attended input to the extent that they can sub-
+<br/>sequently be recognized?
+<br/>To interrogate these hypotheses, we acquired fMRI while a
+<br/>group of healthy young (mean age ⫽ 22.2 years) and older (mean
+<br/>age ⫽ 77.4 years) adults viewed 50/50 threshold superimposed
+<br/>face and place images (O’Craven et al., 1999; Yi et al., 2006) (Fig.
+<br/>1a). Participants decided whether faces were male or female, ren-
+</td><td>('4258285', 'Eve De Rosa', 'eve de rosa')<br/>('4258285', 'Eve De Rosa', 'eve de rosa')</td><td>George Street, Toronto, ON M5S 3G3, Canada. E-mail: taylor@aclab.ca or derosa@psych.utoronto.ca.
+</td></tr><tr><td>01e12be4097fa8c94cabeef0ad61498c8e7762f2</td><td></td><td></td><td></td></tr><tr><td>0163d847307fae508d8f40ad193ee542c1e051b4</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+<br/>Classemes and Other Classifier-based
+<br/>Features for Efficient Object Categorization
+<br/>- Supplementary material -
+<br/>1 LOW-LEVEL FEATURES
+<br/>We extract the SIFT [1] features for our descriptor
+<br/>according to the following pipeline. We first convert
+<br/>each image to gray-scale, then we normalize the con-
+<br/>trast by forcing the 0.01% of lightest and darkest pixels
+<br/>to be mapped to white and black respectively, and
+<br/>linearly rescaling the values in between. All images
+<br/>exceeding 786,432 pixels of resolution are downsized
+<br/>to this maximum value while keeping the aspect ratio.
+<br/>The 128-dimensional SIFT descriptors are computed
+<br/>from the interest points returned by a DoG detec-
+<br/>tor [2]. We finally compute a Bag-Of-Word histogram
+<br/>of these descriptors, using a K-means vocabulary of
+<br/>500 words.
+<br/>2 CLASSEMES
+<br/>The LSCOM categories were developed specifically
+<br/>for multimedia annotation and retrieval, and have
+<br/>been used in the TRECVID video retrieval series.
+<br/>We took the LSCOM CYC ontology dated 2006-06-30,
+<br/>which contains 2832 unique categories. We removed
+</td><td>('34338883', 'Alessandro Bergamo', 'alessandro bergamo')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td></td></tr><tr><td>01dc1e03f39901e212bdf291209b7686266aeb13</td><td>Actionness Estimation Using Hybrid Fully Convolutional Networks
+<br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b><br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/>3Computer Vision Lab, ETH Zurich, Switzerland
+</td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('33427555', 'Yu Qiao', 'yu qiao')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>016f49a54b79ec787e701cc8c7d0280273f9b1ef</td><td>SELF ORGANIZING MAPS FOR REDUCING THE NUMBER OF CLUSTERS BY ONE ON
+<br/>SIMPLEX SUBSPACES
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, Thessaloniki 541 24, Greece
+</td><td>('1736143', 'Constantine Kotropoulos', 'constantine kotropoulos')<br/>('1762248', 'Vassiliki Moschou', 'vassiliki moschou')</td><td>E-mail: {costas, vmoshou}@aiia.csd.auth.gr
+</td></tr><tr><td>01c4cf9c7c08f0ad3f386d88725da564f3c54679</td><td>Interpretability Beyond Feature Attribution:
+<br/>Quantitative Testing with Concept Activation Vectors (TCAV)
+</td><td>('3351164', 'Been Kim', 'been kim')<br/>('2217654', 'Rory Sayres', 'rory sayres')</td><td></td></tr><tr><td>017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637</td><td>FACE RECOGNITION WITH HARMONIC DE-LIGHTING
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+<br/>1Graduate School, CAS, Beijing, China, 100080
+<br/>Emails: {lyqing, sgshan, wgao}jdl.ac.cn
+</td><td>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('40049005', 'Wen Gao', 'wen gao')</td><td></td></tr><tr><td>014e3d0fa5248e6f4634dc237e2398160294edce</td><td>Int J Comput Vis manuscript No.
+<br/>(will be inserted by the editor)
+<br/>What does 2D geometric information really tell us about
+<br/>3D face shape?
+<br/>Received: date / Accepted: date
+</td><td>('39180407', 'Anil Bas', 'anil bas')</td><td></td></tr><tr><td>01125e3c68edb420b8d884ff53fb38d9fbe4f2b8</td><td>Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric
+<br/>CNN Regression
+<br/><b>The University of Nottingham, UK</b><br/><b>Kingston University, UK</b><br/><b>Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions</b></td><td>('34596685', 'Aaron S. Jackson', 'aaron s. jackson')<br/>('3458121', 'Adrian Bulat', 'adrian bulat')<br/>('1689047', 'Vasileios Argyriou', 'vasileios argyriou')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>1{aaron.jackson, adrian.bulat, yorgos.tzimiropoulos}@nottingham.ac.uk
+<br/>2 vasileios.argyriou@kingston.ac.uk
+</td></tr><tr><td>01c09acf0c046296643de4c8b55a9330e9c8a419</td><td>MANIFOLD LEARNING USING EUCLIDEAN
+<br/>-NEAREST NEIGHBOR GRAPHS
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>University of Michigan, Ann Arbor, MI</b></td><td>('1759109', 'Jose A. Costa', 'jose a. costa')<br/>('35806564', 'Alfred O. Hero', 'alfred o. hero')</td><td>Email: jcosta@umich.edu, hero@eecs.umich.edu
+</td></tr><tr><td>01d23cbac762b0e46251f5dbde08f49f2d13b9f8</td><td>Combining Face Verification Experts
+<br/>+Telecommunication laboratory, Universit´e catholique de Louvain, B-1348 Belgium
+<br/>⁄Center for Vision, Speech and Signal Processing,
+<br/><b>University of Surrey, Guildford, Surrey GU2 7XH, UK</b></td><td>('34964585', 'Jacek Czyz', 'jacek czyz')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('1698047', 'Luc Vandendorpe', 'luc vandendorpe')</td><td>czyz@tele.ucl.ac.be
+</td></tr><tr><td>014143aa16604ec3f334c1407ceaa496d2ed726e</td><td>Large-Scale Manifold Learning
+<br/><b>Courant Institute</b><br/>New York, NY
+<br/>Google Research
+<br/>New York, NY
+<br/>Henry Rowley
+<br/>Google Research
+<br/>Mountain View, CA
+</td><td>('8395559', 'Ameet Talwalkar', 'ameet talwalkar')<br/>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')</td><td>ameet@cs.nyu.edu
+<br/>sanjivk@google.com
+<br/>har@google.com
+</td></tr><tr><td>011e6146995d5d63c852bd776f782cc6f6e11b7b</td><td>Fast Training of Triplet-based Deep Binary Embedding Networks
+<br/><b>The University of Adelaide; and Australian Centre for Robotic Vision</b></td><td>('3194022', 'Bohan Zhuang', 'bohan zhuang')<br/>('2604251', 'Guosheng Lin', 'guosheng lin')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')</td><td></td></tr><tr><td>0182d090478be67241392df90212d6cd0fb659e6</td><td>Discovering Localized Attributes for Fine-grained Recognition
+<br/><b>Indiana University</b><br/>Bloomington, IN
+<br/>TTI-Chicago
+<br/>Chicago, IL
+<br/>David Crandall
+<br/><b>Indiana University</b><br/>Bloomington, IN
+<br/><b>University of Texas</b><br/>Austin, TX
+</td><td>('2481141', 'Kun Duan', 'kun duan')<br/>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>kduan@indiana.edu
+<br/>dparikh@ttic.edu
+<br/>djcran@indiana.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>016a8ed8f6ba49bc669dbd44de4ff31a79963078</td><td>1Graduate School, CAS, Beijing, 100039, China,
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+<br/><b>Harbin Institute of Technology, Harbin, China</b><br/>FACE RELIGHTING FOR FACE RECOGNTION UNDER GENERIC ILLUMINATION
+</td><td>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>01beab8f8293a30cf48f52caea6ca0fb721c8489</td><td></td><td></td><td></td></tr><tr><td>0178929595f505ef7655272cc2c339d7ed0b9507</td><td></td><td></td><td></td></tr><tr><td>0181fec8e42d82bfb03dc8b82381bb329de00631</td><td>Discriminative Subspace Clustering
+<br/><b>CVL, Link oping University, Link oping, Sweden</b><br/><b>VSI Lab, Goethe University, Frankfurt, Germany</b></td><td>('1797883', 'Vasileios Zografos', 'vasileios zografos')<br/>('34824636', 'Rudolf Mester', 'rudolf mester')</td><td></td></tr><tr><td>01b4b32c5ef945426b0396d32d2a12c69c282e29</td><td></td><td></td><td></td></tr><tr><td>0113b302a49de15a1d41ca4750191979ad756d2f</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
+<br/>537
+<br/>ICME 2006
+</td><td></td><td></td></tr><tr><td>019e471667c72b5b3728b4a9ba9fe301a7426fb2</td><td>Cross-Age Face Verification by Coordinating with Cross-Face Age Verification
+<br/><b>Temple University, Philadelphia, USA</b></td><td>('38909760', 'Liang Du', 'liang du')<br/>('1805398', 'Haibin Ling', 'haibin ling')</td><td>{liang.du, hbling}@temple.edu
+</td></tr><tr><td>0601416ade6707c689b44a5bb67dab58d5c27814</td><td>Feature Selection in Face Recognition: A Sparse
+<br/>Representation Perspective
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2007-99
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2007/EECS-2007-99.html
+<br/>August 14, 2007
+</td><td>('2223304', 'Allan Y. Yang', 'allan y. yang')<br/>('1738310', 'John Wright', 'john wright')<br/>('7777470', 'Yi Ma', 'yi ma')<br/>('1717598', 'S. Shankar Sastry', 's. shankar sastry')</td><td></td></tr><tr><td>064b797aa1da2000640e437cacb97256444dee82</td><td>Coarse-to-fine Face Alignment with Multi-Scale Local Patch Regression
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+</td><td>('18036051', 'Zhiao Huang', 'zhiao huang')<br/>('1848243', 'Erjin Zhou', 'erjin zhou')<br/>('2695115', 'Zhimin Cao', 'zhimin cao')</td><td>hza@megvii.com
+<br/>zej@megvii.com
+<br/>czm@megvii.com
+</td></tr><tr><td>06f146dfcde10915d6284981b6b84b85da75acd4</td><td>Scalable Face Image Retrieval using
+<br/>Attribute-Enhanced Sparse Codewords
+</td><td>('33970300', 'Bor-Chun Chen', 'bor-chun chen')<br/>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('1692811', 'Yin-Hsi Kuo', 'yin-hsi kuo')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td></td></tr><tr><td>067126ce1f1a205f98e33db7a3b77b7aec7fb45a</td><td>On Improving Dissimilarity-Based Classifications Using
+<br/>a Statistical Similarity Measure(cid:2)
+<br/><b>Myongji University</b><br/>Yongin, 449-728 South Korea
+<br/>2 Faculty of Electrical Engineering, Mathematics and Computer Science,
+<br/><b>Delft University of Technology, The Netherlands</b></td><td>('34959719', 'Sang-Woon Kim', 'sang-woon kim')</td><td>kimsw@mju.ac.kr
+<br/>r.p.w.duin@tudelft.nl
+</td></tr><tr><td>06466276c4955257b15eff78ebc576662100f740</td><td>Where Is Who: Large-Scale Photo Retrieval by Facial
+<br/>Attributes and Canvas Layout
+<br/><b>National Taiwan University, Taipei, Taiwan</b></td><td>('2476032', 'Yu-Heng Lei', 'yu-heng lei')<br/>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('33970300', 'Bor-Chun Chen', 'bor-chun chen')<br/>('2817570', 'Lime Iida', 'lime iida')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td>{siriushpa, limeiida}@gmail.com, winston@csie.ntu.edu.tw
+<br/>{ryanlei, yanying}@cmlab.csie.ntu.edu.tw,
+</td></tr><tr><td>0697bd81844d54064d992d3229162fe8afcd82cb</td><td>User-driven mobile robot storyboarding: Learning image interest and
+<br/>saliency from pairwise image comparisons
+</td><td>('1699287', 'Michael Burke', 'michael burke')</td><td></td></tr><tr><td>06262d6beeccf2784e4e36a995d5ee2ff73c8d11</td><td>Recognize Actions by Disentangling Components of Dynamics
+<br/><b>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong 2Amazon Rekognition</b></td><td>('47827548', 'Yue Zhao', 'yue zhao')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td>{zy317,dhlin}@ie.cuhk.edu.hk {yuanjx}@amazon.com
+</td></tr><tr><td>06f585a3a05dd3371cd600a40dc35500e2f82f9b</td><td>Better and Faster: Knowledge Transfer from Multiple Self-supervised Learning
+<br/>Tasks via Graph Distillation for Video Classification
+<br/><b>Institute of Computer Science and Technology, Peking University</b><br/>Beijing 100871, China
+</td><td>('2439211', 'Chenrui Zhang', 'chenrui zhang')<br/>('1704081', 'Yuxin Peng', 'yuxin peng')</td><td>pengyuxin@pku.edu.cn
+</td></tr><tr><td>06f8aa1f436a33014e9883153b93581eea8c5c70</td><td>Leaving Some Stones Unturned:
+<br/>Dynamic Feature Prioritization
+<br/>for Activity Detection in Streaming Video
+<br/><b>The University of Texas at Austin</b><br/>Current approaches for activity recognition often ignore con-
+<br/>straints on computational resources: 1) they rely on extensive
+<br/>feature computation to obtain rich descriptors on all frames,
+<br/>and 2) they assume batch-mode access to the entire test video at
+<br/>once. We propose a new active approach to activity recognition
+<br/>that prioritizes “what to compute when” in order to make timely
+<br/>predictions. The main idea is to learn a policy that dynamically
+<br/>schedules the sequence of features to compute on selected frames
+<br/>of a given test video. In contrast to traditional static feature
+<br/>selection, our approach continually re-prioritizes computation
+<br/>based on the accumulated history of observations and accounts
+<br/>for the transience of those observations in ongoing video. We
+<br/>develop variants to handle both the batch and streaming settings.
+<br/>On two challenging datasets, our method provides significantly
+<br/>better accuracy than alternative techniques for a wide range of
+<br/>computational budgets.
+<br/>I. INTRODUCTION
+<br/>Activity recognition in video is a core vision challenge. It
+<br/>has applications in surveillance, autonomous driving, human-
+<br/>robot interaction, and automatic tagging for large-scale video
+<br/>retrieval. In any such setting, a system that can both categorize
+<br/>and temporally localize activities would be of great value.
+<br/>Activity recognition has attracted a steady stream of in-
+<br/>teresting research [1]. Recent methods are largely learning-
+<br/>based, and tackle realistic everyday activities (e.g., making
+<br/>tea, riding a bike). Due to the complexity of the problem,
+<br/>as well as the density of raw data comprising even short
+<br/>videos, useful video representations are often computationally
+<br/>intensive—whether dense trajectories, interest points, object
+<br/>detectors, or convolutional neural network (CNN) features run
+<br/>on each frame [2]–[8]. In fact, the expectation is that the more
+<br/>features one extracts from the video, the better for accuracy.
+<br/>For a practitioner wanting reliable activity recognition, then,
+<br/>the message is to “leave no stone unturned”, ideally extracting
+<br/>complementary descriptors from all video frames.
+<br/>However, the “no stone unturned” strategy is problematic.
+<br/>Not only does it assume virtually unbounded computational
+<br/>resources, it also assumes that an entire video is available
+<br/>at once for batch processing. In reality, a recognition system
+<br/>will have some computational budget. Further, it may need
+<br/>to perform in a streaming manner, with access to only a short
+<br/>buffer of recent frames. Together, these considerations suggest
+<br/>some form of feature triage is needed.
+<br/>Yet prioritizing features for activity in video is challenging,
+<br/>for two key reasons. First,
+<br/>informative features
+<br/>may depend critically on what has been observed so far in
+<br/>the most
+<br/>the specific test video, making traditional fixed/static feature
+<br/>selection methods inadequate. In other words, the recognition
+<br/>system’s belief state must evolve over time, and its priorities of
+<br/>which features to extract next must evolve too. Second, when
+<br/>processing streaming video, the entire video is never available
+<br/>to the algorithm at once. This puts limits on what features can
+<br/>even be considered each time step, and requires accounting
+<br/>for the feature extractors’ framerates when allocating compu-
+<br/>tation.
+<br/>In light of these challenges, we propose a dynamic approach
+<br/>to prioritize which features to compute when for activity
+<br/>recognition. We formulate the problem as policy learning in a
+<br/>Markov decision process. In particular, we learn a non-myopic
+<br/>policy that maps the accumulated feature history (state) to the
+<br/>subsequent feature and space-time location (action) that, once
+<br/>extracted, is most expected to improve recognition accuracy
+<br/>(reward) over a sequence of such actions. We develop two
+<br/>variants of our approach: one for batch processing, where
+<br/>we are free to “jump” around the video to get
+<br/>the next
+<br/>desired feature, and one for streaming video, where we are
+<br/>confined to a buffer of newly received frames. By dynamically
+<br/>allocating feature extraction effort, our method wisely leaves
+<br/>some stones unturned—that is, some features unextracted—in
+<br/>order to meet real computational budget constraints.
+<br/>To our knowledge, our work is the first to actively triage
+<br/>feature computation for streaming activity recognition.1 While
+<br/>recent work explores ways to intelligently order feature com-
+<br/>putation in a static image for the sake of object or scene
+<br/>recognition [10]–[17] or offline batch activity detection [18],
+<br/>streaming video presents unique challenges, as we explain in
+<br/>detail below. While methods for “early” detection can fire on
+<br/>an action prior to its completion [19]–[21], they nonetheless
+<br/>passively extract all features in each incoming frame.
+<br/>We validate our approach on two public datasets consist-
+<br/>ing of third- and first-person video from over 120 activity
+<br/>categories. We show its impact in both the streaming and
+<br/>batch settings, and we further consider scenarios where the test
+<br/>video is “untrimmed”. Comparisons with status quo passive
+<br/>feature extraction, traditional feature selection approaches, and
+<br/>a state-of-the-art early event detector demonstrate the clear
+<br/>advantages of our approach.
+<br/>1This paper extends our earlier technical report [9].
+</td><td>('39523296', 'Yu-Chuan Su', 'yu-chuan su')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>061c84a4143e859a7caf6e6d283dfb30c23ee56e</td><td>DEEP-CARVING : Discovering Visual Attributes by Carving Deep Neural Nets
+<br/><b>Machine Intelligence Lab (MIL), Cambridge University</b><br/>∗Computer Science & Artificial Intelligence Lab (CSAIL), MIT
+<br/>Most of the approaches for discovering visual attributes in images de-
+<br/>mand significant supervision, which is cumbersome to obtain. In this paper,
+<br/>we aim to discover visual attributes in a weakly supervised setting that is
+<br/>commonly encountered with contemporary image search engines.
+<br/>For instance, given a noun (say forest) and its associated attributes (say
+<br/>dense, sunlit, autumn), search engines can now generate many valid im-
+<br/>ages for any attribute-noun pair (dense forests, autumn forests, etc). How-
+<br/>ever, images for an attribute-noun pair do not contain any information about
+<br/>other attributes (like which forests in the autumn are dense too). Thus, a
+<br/>weakly supervised scenario occurs. Let A = {a1, . . . ,aM} be the set of
+<br/>M attributes under consideration. We have a weakly supervised training
+<br/>set, S = {(x1,y1), . . . , (xN,yN )} of N images x1, . . . ,xN ∈ X having labels
+<br/>y1, . . . ,yN ∈ A respectively. Equivalently, segregating the training images
+<br/>based on their label, we obtain M sets Sm = Xm × am, where Xm = {x ∈
+<br/>X|(x,am) ∈ S} denotes the set of Nm = |Xm| images each having the (sin-
+<br/>gle) positive training label am,m ∈ {1, . . . ,M}. For a test image xt, the task
+<br/>is to predict yt ⊆ A, i.e. all the attributes present in xt. The aforemen-
+<br/>tioned weakly supervised problem setting is more challenging for attributes
+<br/>as compared to object and scene detection, because attributes can highly co-
+</td><td>('1808862', 'Sukrit Shankar', 'sukrit shankar')<br/>('3307138', 'Vikas K. Garg', 'vikas k. garg')<br/>('1745672', 'Roberto Cipolla', 'roberto cipolla')</td><td></td></tr><tr><td>06d93a40365da90f30a624f15bf22a90d9cfe6bb</td><td>Learning from Candidate Labeling Sets
+<br/><b>Idiap Research Institute and EPF Lausanne</b><br/>Luo Jie
+<br/>DSI, Universit`a degli Studi di Milano
+</td><td>('1721068', 'Francesco Orabona', 'francesco orabona')</td><td>jluo@idiap.ch
+<br/>orabona@dsi.unimi.it
+</td></tr><tr><td>061e29eae705f318eee703b9e17dc0989547ba0c</td><td>Enhancing Expression Recognition in the Wild
+<br/>with Unlabeled Reference Data
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('1730228', 'Mengyi Liu', 'mengyi liu')<br/>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{mengyi.liu, shaoxin.li, shiguang.shan, xilin.chen}@vipl.ict.ac.cn;
+</td></tr><tr><td>06850b60e33baa4ea9473811d58c0d5015da079e</td><td>A SURVEY OF THE TRENDS IN FACIAL AND
+<br/>EXPRESSION RECOGNITION DATABASES AND
+<br/>METHODS
+<br/><b>University of Washington, Bothell</b><br/><b>University of Washington, Bothell</b></td><td>('2971095', 'Sohini Roychowdhury', 'sohini roychowdhury')<br/>('31448697', 'Michelle Emmons', 'michelle emmons')</td><td>roych@uw.edu
+<br/>memmons1442@gmail.com
+</td></tr><tr><td>06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32</td><td>WhittleSearch: Image Search with Relative Attribute Feedback
+<br/>(Supplementary Material)
+<br/>1 Comparative Qualitative Search Results
+<br/>We present three qualitative search results for human-generated feedback, in addition to those
+<br/>shown in the paper. Each example shows one search iteration, where the 20 reference images are
+<br/>randomly selected (rather than ones that match a keyword search, as the image examples in the
+<br/>main paper illustrate). For each result, the first figure shows our method and the second figure
+<br/>shows the binary feedback result for the corresponding target image. Note that for our method,
+<br/>“more/less X” (where X is an attribute) means that the target image is more/less X than the
+<br/>reference image which is shown.
+<br/>Figures 1 and 2 show results for human-generated relative attribute and binary feedback, re-
+<br/>spectively, when both methods are used to target the same “mental image” of a shoe shown in the
+<br/>top left bubble. The top right grid of 20 images are the reference images displayed to the user, and
+<br/>those outlined and annotated with constraints are the ones chosen by the user to give feedback.
+<br/>The bottom row of images in either figure shows the top-ranked images after integrating the user’s
+<br/>feedback into the scoring function, revealing the two methods’ respective performance. We see that
+<br/>while both methods retrieve high-heeled shoes, only our method retrieves images that are as “open”
+<br/>as the target image. This is because using the proposed approach, the user was able to comment
+<br/>explicitly on the desired openness property.
+</td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td></td></tr><tr><td>06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc</td><td>Active Image Clustering: Seeking Constraints from Humans to Complement
+<br/>Algorithms
+<br/>Computer Science Department
+<br/><b>University of Maryland, College Park</b></td><td>('2221075', 'Arijit Biswas', 'arijit biswas')</td><td>arijitbiswas87@gmail.com, djacobs@umiacs.umd.edu
+</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>(cid:1)(cid:2)(cid:3)(cid:4)(cid:5)(cid:3)(cid:4)(cid:6)(cid:7)(cid:3)(cid:8)(cid:9)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:3)(cid:4)(cid:14)(cid:6)(cid:15)(cid:16)(cid:3)(cid:17)(cid:18)(cid:3)(cid:11)(cid:5)(cid:19)(cid:4) (cid:20)(cid:5)(cid:11)(cid:21)(cid:6)(cid:3)(cid:6)(cid:22)(cid:9)(cid:20)(cid:6)(cid:10)(cid:9)(cid:11)(cid:9)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)(cid:6)(cid:23)(cid:17)(cid:24)(cid:19)(cid:2)(cid:5)(cid:11)(cid:21)(cid:25)
+<br/>(cid:26)(cid:11)(cid:5)(cid:8)(cid:17)(cid:6)(cid:27)(cid:1)(cid:9)(cid:22)(cid:8)(cid:18)(cid:1)(cid:28)(cid:12)(cid:6)(cid:29)(cid:4)(cid:20)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1)(cid:15)(cid:25)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)!(cid:8) (cid:8)(cid:6)(cid:4)(cid:1)"(cid:16)(cid:8)(cid:16)(cid:20)(cid:14)(cid:1)(cid:3)(cid:15)(cid:8)(cid:22)(cid:4)(cid:12)(cid:1)(cid:23)(cid:5)(cid:29)(cid:18)(cid:14)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)(cid:26)!(cid:9)(cid:13)(cid:14)(cid:1)#(cid:17)(cid:8)(cid:6)(cid:5)$(cid:1)(cid:17)(cid:4)(cid:5)%(cid:8)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)&(cid:30)(cid:8)(cid:16)(cid:15)(cid:15)(cid:21)(cid:27)(cid:15)(cid:17)
+<br/>(cid:3)(cid:4)(cid:5)(cid:6)(cid:7)(cid:8)(cid:1)(cid:9)(cid:10)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)(cid:1)(cid:13)(cid:6)(cid:7)(cid:14) (cid:3)(cid:15)(cid:16)(cid:8)(cid:17)(cid:17)(cid:8)(cid:18)(cid:1)(cid:3)(cid:8)(cid:16)(cid:18)(cid:6)(cid:1)(cid:19)(cid:4)(cid:16)(cid:11)(cid:16)(cid:6)(cid:10)(cid:6)(cid:14)(cid:1)(cid:19)(cid:20)(cid:21)(cid:1)(cid:9)(cid:22)(cid:8)(cid:17)(cid:1)(cid:23)(cid:8)(cid:11)(cid:24)(cid:8)(cid:12)(cid:25)(cid:8)(cid:20)(cid:18)
+<br/>(cid:23)(cid:12)(cid:13)(cid:11)(cid:2)(cid:3)(cid:8)(cid:11)$(cid:1)’(cid:16)(cid:6)(cid:11) ((cid:8)((cid:4)(cid:20)(cid:1)(cid:6)(cid:12)(cid:24)(cid:20)(cid:15)(cid:18))(cid:27)(cid:4)(cid:11)(cid:1)(cid:8)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:15)(cid:25)(cid:1)(cid:15)(cid:29)(cid:4)(cid:20)(cid:1)*(cid:14)+,,(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1).(cid:4)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)+(cid:2)+(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:16))(cid:17)(cid:8)(cid:12)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:11) (cid:6)(cid:12)(cid:1)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:10)(cid:4)(cid:24).(cid:4)(cid:4)(cid:12)(cid:1)/
+<br/>(cid:8)(cid:12)(cid:18) 01(cid:21)(cid:1)2(cid:4)(cid:1)(cid:12)(cid:8)(cid:17)(cid:4)(cid:18)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)4(cid:26)3(cid:19)(cid:23)5(cid:21)(cid:1)’(cid:15)(cid:1)(cid:4)(cid:29)(cid:8)(cid:5))(cid:8)(cid:24)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:4)6((cid:4)(cid:20)(cid:6)(cid:17)(cid:4)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1)(cid:20)(cid:4)(cid:11))(cid:5)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1)(cid:8)(cid:1)(cid:12)(cid:4).(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:1)(cid:6)(cid:11)(cid:1)(cid:20)(cid:4)((cid:15)(cid:20)(cid:24)(cid:4)(cid:18)(cid:21)
+<br/>(cid:26)(cid:9)(cid:27) (cid:28)(cid:19)(cid:2)(cid:14)(cid:13)$(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:14)(cid:1)3(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)3(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:19)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:9)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1)(cid:9)-(cid:4)(cid:1)7(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:21)
+<br/>(cid:29) (cid:1)(cid:4)(cid:11)(cid:2)(cid:19)(cid:14)(cid:18)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)
+<br/>8)(cid:17)(cid:8)(cid:12)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:17)(cid:15)(cid:11)(cid:24)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) )(cid:11)(cid:4)(cid:25))(cid:5)(cid:1) (cid:7)(cid:4)(cid:30)(cid:1) (cid:24)(cid:15)(cid:1) (cid:8)(cid:1)
+<br/>((cid:4)(cid:20)(cid:11)(cid:15)(cid:12)9(cid:11)(cid:1) (cid:6)(cid:18)(cid:4)(cid:12)(cid:24)(cid:6)(cid:24)(cid:30)(cid:21)(cid:1) (cid:9)(cid:11)(cid:1) (cid:16))(cid:17)(cid:8)(cid:12)(cid:11)(cid:14)(cid:1) .(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:24)(cid:15)(cid:1) (cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:22)(cid:4)(cid:1) (cid:8)(cid:1)
+<br/>((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:8)-(cid:4)(cid:1)-(cid:20)(cid:15))((cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)(cid:8)(cid:1)((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:15)(cid:25)(cid:24)(cid:4)(cid:12)(cid:1)
+<br/>(cid:8)(cid:10)(cid:5)(cid:4)(cid:1)(cid:24)(cid:15)(cid:1)(cid:10)(cid:4)(cid:1);)(cid:6)(cid:24)(cid:4)(cid:1)((cid:20)(cid:4)(cid:27)(cid:6)(cid:11)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:4)(cid:11)(cid:24)(cid:6)(cid:17)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)<(cid:2)=(cid:21)(cid:1)(cid:26)(cid:12)(cid:1)(cid:20)(cid:4)(cid:27)(cid:4)(cid:12)(cid:24)(cid:1)(cid:30)(cid:4)(cid:8)(cid:20)(cid:11)(cid:14)(cid:1)
+<br/>(cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:20)(cid:4)(cid:5)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1) .(cid:15)(cid:20)(cid:7)(cid:11)(cid:1) (cid:16)(cid:8)(cid:29)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:4)(cid:6)(cid:29)(cid:4)(cid:18)(cid:1) (cid:11))(cid:10)(cid:11)(cid:24)(cid:8)(cid:12)(cid:24)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:8)(cid:24)(cid:24)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) (cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1) (cid:6)(cid:12)(cid:1) (cid:10)(cid:6)(cid:15)(cid:17)(cid:4)(cid:24)(cid:20)(cid:6)(cid:27)(cid:11)(cid:14)(cid:1) ((cid:8)(cid:24)(cid:24)(cid:4)(cid:20)(cid:12)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)
+<br/>(cid:8)(cid:12)(cid:18)(cid:1) (cid:27)(cid:15)(cid:17)()(cid:24)(cid:4)(cid:20) (cid:29)(cid:6)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17))(cid:12)(cid:6)(cid:24)(cid:6)(cid:4)(cid:11)(cid:1) </(cid:14)(cid:1) *(cid:14)(cid:1) > (cid:8)(cid:12)(cid:18) 1=(cid:21)(cid:1) ’(cid:16)(cid:4)(cid:11)(cid:4)(cid:1)
+<br/>(cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:20)(cid:4)(cid:11)(cid:24)(cid:11)(cid:1)(cid:8)(cid:17)(cid:15)(cid:12)-(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1)(cid:17)(cid:15)(cid:24)(cid:6)(cid:29)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1))(cid:11)(cid:1)(cid:24)(cid:15)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:1)(cid:8)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) ((cid:4)(cid:15)((cid:5)(cid:4)(cid:1) (cid:6)(cid:12)(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1) (cid:8)-(cid:4)(cid:11)(cid:21) ’(cid:16)(cid:4)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:11)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:12)(cid:18)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:18)(cid:6)(cid:11)(cid:24)(cid:20)(cid:6)(cid:10))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:24)(cid:15)(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:21)
+<br/>’(cid:16)(cid:4)(cid:20)(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:17)(cid:8)(cid:12)(cid:30)(cid:1) ()(cid:10)(cid:5)(cid:6)(cid:27)(cid:8)(cid:5)(cid:5)(cid:30)(cid:1) (cid:8)(cid:29)(cid:8)(cid:6)(cid:5)(cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1)
+<br/>(cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:21)(cid:1) (cid:23)(cid:4)(cid:11)(cid:6)(cid:18)(cid:4)(cid:1) (cid:8)(cid:10)(cid:15)(cid:29)(cid:4)(cid:1)
+<br/>(cid:8)(((cid:5)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:14)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)4(cid:26)3(cid:19)(cid:23)5(cid:1)(cid:27)(cid:8)(cid:12)(cid:1)(cid:10)(cid:4)(cid:1))(cid:11)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:11))(cid:20)-(cid:4)(cid:20)(cid:30)(cid:14)(cid:1) (cid:20)(cid:8)(cid:27)(cid:4)(cid:1) (cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) 4(cid:10)(cid:4)(cid:11)(cid:6)(cid:18)(cid:4)(cid:1) (cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)5(cid:14)(cid:1) (cid:11)(cid:24))(cid:18)(cid:30)(cid:6)(cid:12)-(cid:1) (cid:6)(cid:12)(cid:25)(cid:5))(cid:4)(cid:12)(cid:27)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:27)(cid:8)(cid:20)(cid:4)(cid:4)(cid:20)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:7)(cid:6)(cid:12)(cid:18)(cid:1) (cid:15)(cid:25)(cid:1) (cid:11)(cid:7)(cid:6)(cid:12)(cid:1) (cid:15)(cid:12)(cid:1)
+<br/>(cid:8)-(cid:6)(cid:12)-(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:11)(cid:6)(cid:17)(cid:6)(cid:5)(cid:8)(cid:20)(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:11)(cid:21)
+<br/>(cid:26)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:20)(cid:4)(cid:17)(cid:8)(cid:6)(cid:12)(cid:6)(cid:12)-(cid:1) ((cid:8)(cid:20)(cid:24)(cid:11) (cid:18)(cid:4)(cid:24)(cid:8)(cid:6)(cid:5)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:4)6(cid:6)(cid:11)(cid:24)(cid:6)(cid:12)-(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11) (cid:8)(cid:12)(cid:18) (cid:24)(cid:16)(cid:4)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:11)(cid:1)-(cid:6)(cid:29)(cid:4)(cid:12)(cid:21) (cid:9)(cid:5)(cid:11)(cid:15)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:4)(cid:29)(cid:8)(cid:5))(cid:8)(cid:24)(cid:4)(cid:18)(cid:1) (cid:10)(cid:30)(cid:1) (cid:8)(((cid:5)(cid:30)(cid:6)(cid:12)- (cid:8)(cid:1) (cid:12)(cid:4).(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)
+<br/>(cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:21)(cid:1)
+<br/>(cid:30) (cid:15)(cid:31)(cid:5)(cid:13)(cid:11)(cid:5)(cid:4)(cid:24)(cid:6)(cid:7)(cid:3)(cid:8)(cid:9)(cid:6)(cid:1)(cid:25)(cid:3)(cid:24)(cid:9)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)(cid:13)
+<br/>(cid:3)(cid:8)(cid:12)(cid:30)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)(cid:20)(cid:18)(cid:4)(cid:18)(cid:1) )(cid:12)(cid:18)(cid:4)(cid:20)(cid:1) (cid:8)(cid:1) (cid:29)(cid:8)(cid:20)(cid:6)(cid:4)(cid:24)(cid:30)(cid:1) (cid:15)(cid:25)(cid:1)
+<br/>(cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:29)(cid:8)(cid:20)(cid:6)(cid:15))(cid:11)(cid:1)(cid:8)(((cid:5)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)(cid:17)(cid:6)(cid:12)(cid:18)(cid:21)(cid:1)(cid:9)(cid:5)(cid:15)(cid:12)-(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1) (cid:18)(cid:4)(cid:29)(cid:4)(cid:5)(cid:15)((cid:17)(cid:4)(cid:12)(cid:24)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>(cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1) (cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1) (cid:8)(cid:1) (cid:27)(cid:15)(cid:17)((cid:8)(cid:20)(cid:8)(cid:24)(cid:6)(cid:29)(cid:4)(cid:5)(cid:30)(cid:1) (cid:5)(cid:8)(cid:20)-(cid:4)(cid:1) (cid:12))(cid:17)(cid:10)(cid:4)(cid:20)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1)
+<br/>A(cid:8)(cid:5)(cid:4)(cid:1)<0=(cid:14)(cid:1)(cid:3)(cid:26)’(cid:1)<B=(cid:14)(cid:1)C(cid:9)33#(cid:1)<(cid:2),=(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:17)(cid:8)(cid:12)(cid:30)(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)<(cid:2)(cid:2)(cid:14)(cid:1)
+<br/>(cid:2)/=(cid:21)(cid:1)8(cid:4)(cid:20)(cid:4)(cid:1)3#!#’(cid:1)<(cid:2)*= (cid:8)(cid:12)(cid:18)(cid:1)3DE(cid:13)#’(cid:1)<(cid:2)>=(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:20)(cid:4)(cid:29)(cid:6)(cid:4).(cid:4)(cid:18)(cid:21)
+<br/>(cid:30) (cid:29) (cid:7)(cid:15)!(cid:15)"(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>’(cid:16)(cid:4)(cid:1) 3(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) !(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) ’(cid:4)(cid:27)(cid:16)(cid:12)(cid:15)(cid:5)(cid:15)-(cid:30)(cid:1) 43#!#’5(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)
+<br/>.(cid:8)(cid:11)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1) (cid:8)(cid:24)(cid:1) D(cid:4)(cid:15)(cid:20)-(cid:4)(cid:1)(cid:3)(cid:8)(cid:11)(cid:15)(cid:12)(cid:1) (cid:28)(cid:12)(cid:6)(cid:29)(cid:4)(cid:20)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1) (cid:28)"(cid:1) (cid:9)(cid:20)(cid:17)(cid:30)(cid:1)
+<br/>!(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:1)F(cid:8)(cid:10)(cid:15)(cid:20)(cid:8)(cid:24)(cid:15)(cid:20)(cid:30)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:5)(cid:6)(cid:24)(cid:6)(cid:4)(cid:11)(cid:1) (cid:8)(cid:11)(cid:1)((cid:8)(cid:20)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1)3#!#’(cid:1) ((cid:20)(cid:15)-(cid:20)(cid:8)(cid:17)(cid:1)
+<br/><(cid:2)*=(cid:21)(cid:1)(cid:26)(cid:12)(cid:1)3#!#’(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:2)(cid:2)BB(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:1)(cid:4)6(cid:6)(cid:11)(cid:24)(cid:1)(cid:6)(cid:12)(cid:1)BE/,
+<br/>(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1) ((cid:15)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1) /(cid:1)
+<br/>(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) /(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)
+<br/>(cid:6)(cid:5)(cid:5))(cid:17)(cid:6)(cid:12)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)/(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:24)(cid:6)(cid:17)(cid:4)(cid:11)(cid:21)(cid:1)(cid:1)’(cid:16)(cid:4)(cid:20)(cid:4)(cid:1)(cid:8)(cid:20)(cid:4) (cid:2)>(cid:14),1(cid:2)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)
+<br/>/1+G*0>(cid:1)((cid:6)6(cid:4)(cid:5)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)(cid:11)(cid:6)(cid:22)(cid:4)(cid:21)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)(cid:8)(cid:24)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:25)(cid:15)(cid:5)(cid:5)(cid:15).(cid:6)(cid:12)-(cid:1)
+<br/>((cid:15)(cid:11)(cid:4)(cid:11)$(cid:1)(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:5)(cid:4)(cid:25)(cid:24)(cid:1)((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:14)(cid:1)(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:5)(cid:4)(cid:25)(cid:24)(cid:1);)(cid:8)(cid:20)(cid:24)(cid:4)(cid:20)(cid:1)((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:5)(cid:4)(cid:25)(cid:24)(cid:1) (cid:16)(cid:8)(cid:5)(cid:25)(cid:1) ((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:21)(cid:1) (cid:26)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:11)(cid:4)(cid:1) (cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:4)(cid:11)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1)
+<br/>(cid:20)(cid:4)(cid:27)(cid:15)(cid:20)(cid:18)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)1,0(cid:1)(cid:24)(cid:15)(cid:1)B0,(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:21)
+<br/>(cid:30) (cid:30)(cid:6)(cid:7)#$(cid:22)(cid:15)"(cid:6)(cid:23)(cid:24)(cid:5)(cid:4)(cid:24)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:2)
+<br/>’(cid:16)(cid:4)(cid:1)3DE(cid:13)#’(cid:1)(cid:9)-(cid:6)(cid:12)-(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1).(cid:8)(cid:11)(cid:1)-(cid:4)(cid:12)(cid:4)(cid:20)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1)(cid:8)(cid:11)(cid:1)((cid:8)(cid:20)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)
+<br/>#)(cid:20)(cid:15)((cid:4)(cid:8)(cid:12)(cid:1) (cid:28)(cid:12)(cid:6)(cid:15)(cid:12)(cid:1) ((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:1) 3DE(cid:13)#’(cid:1)
+<br/>43(cid:8)(cid:27)(cid:4)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) D(cid:4)(cid:11)(cid:24))(cid:20)(cid:4)(cid:1)
+<br/>!(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) !(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:1) (cid:13)(cid:4)(cid:24).(cid:15)(cid:20)(cid:7)5(cid:21)’(cid:16)(cid:6)(cid:11)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:6)(cid:12)-(cid:1)
+<br/>(cid:2),,/(cid:1) (cid:11)(cid:27)(cid:8)(cid:12)(cid:12)(cid:4)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:11)(cid:16)(cid:15).(cid:6)(cid:12)-(cid:1) 0/(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1) (cid:8)(cid:24)(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)
+<br/>(cid:8)-(cid:4)(cid:11)(cid:21)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:16)(cid:8)(cid:29)(cid:4)(cid:1)(cid:29)(cid:8)(cid:20)(cid:30)(cid:6)(cid:12)-(cid:1)(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)?(cid:1)(cid:8)(((cid:20)(cid:15)6(cid:6)(cid:17)(cid:8)(cid:24)(cid:4)(cid:5)(cid:30)(cid:1)>,,G1,,
+<br/>((cid:6)6(cid:4)(cid:5)(cid:11)(cid:21)(cid:1) ’(cid:16)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) .(cid:8)(cid:11)(cid:1) (cid:18)(cid:4)(cid:29)(cid:4)(cid:5)(cid:15)((cid:4)(cid:18)(cid:1) (cid:6)(cid:12)(cid:1) (cid:8)(cid:12)(cid:1) (cid:8)(cid:24)(cid:24)(cid:4)(cid:17)((cid:24)(cid:1) (cid:24)(cid:15)(cid:1) (cid:8)(cid:11)(cid:11)(cid:6)(cid:11)(cid:24)(cid:1)
+<br/>(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1) .(cid:16)(cid:15)(cid:1) (cid:6)(cid:12)(cid:29)(cid:4)(cid:11)(cid:24)(cid:6)-(cid:8)(cid:24)(cid:4)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:4)(cid:25)(cid:25)(cid:4)(cid:27)(cid:24)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:8)-(cid:6)(cid:12)-(cid:1) (cid:15)(cid:12)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:8)(((cid:4)(cid:8)(cid:20)(cid:8)(cid:12)(cid:27)(cid:4)(cid:1)<(cid:2)> =(cid:21)
+<br/>(cid:30) % (cid:22)(cid:9)(cid:9)(cid:14)(cid:6)(cid:7)(cid:19)(cid:2)(cid:6)(cid:23)(cid:6)(cid:22)(cid:9)(cid:20)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:26)(cid:12)(cid:1)(cid:15)(cid:20)(cid:18)(cid:4)(cid:20)(cid:1)(cid:24)(cid:15)(cid:1) (cid:10))(cid:6)(cid:5)(cid:18)(cid:14)(cid:1) (cid:24)(cid:20)(cid:8)(cid:6)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:20)(cid:4)(cid:5)(cid:6)(cid:8)(cid:10)(cid:5)(cid:30)(cid:1) (cid:24)(cid:4)(cid:11)(cid:24)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:27)(cid:15)(cid:12)(cid:24)(cid:20)(cid:15)(cid:5)(cid:5)(cid:4)(cid:18)(cid:1)(cid:29)(cid:8)(cid:20)(cid:6)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:25)(cid:8)(cid:27)(cid:24)(cid:15)(cid:20)(cid:11)(cid:1)(cid:11))(cid:27)(cid:16)(cid:1)
+<br/>(cid:8)(cid:11)(cid:1)(cid:8)-(cid:4)(cid:14)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:1)((cid:15)(cid:11)(cid:4)(cid:14)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)(cid:15)(cid:27)(cid:27)(cid:5))(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:16)(cid:8)(cid:6)(cid:20)(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:6)(cid:5)(cid:5))(cid:17)(cid:6)(cid:12)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:6)(cid:11)(cid:1) (cid:12)(cid:4)(cid:4)(cid:18)(cid:4)(cid:18)(cid:21)(cid:1) (cid:26)(cid:12)(cid:1) (cid:11)((cid:6)(cid:24)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:29)(cid:8)(cid:20)(cid:6)(cid:15))(cid:11)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:20)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1)
+<br/>(cid:12)(cid:15)(cid:24)(cid:1)(cid:8)(cid:12)(cid:1)(cid:8)(((cid:20)(cid:15)((cid:20)(cid:6)(cid:8)(cid:24)(cid:4)(cid:1)(cid:15)(cid:12)(cid:4)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:8)-(cid:4)(cid:1)(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:21)(cid:1)(cid:3)(cid:15)(cid:11)(cid:24)(cid:1)(cid:27))(cid:20)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:1)
+<br/>(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)(cid:18)(cid:15)(cid:12):(cid:24)(cid:1)(cid:16)(cid:8)(cid:29)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)((cid:4)(cid:15)((cid:5)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:8)-(cid:4)(cid:11)(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:6)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:30)(cid:1)
+<br/>(cid:16)(cid:8)(cid:29)(cid:4)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:30)(cid:1) (cid:18)(cid:15)(cid:1) (cid:12)(cid:15)(cid:24)(cid:1) (cid:17)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:6)(cid:20)(cid:1) (cid:8)-(cid:4)(cid:11)(cid:21)(cid:1) 3DE(cid:13)#’(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)
+<br/>(cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1) (cid:11)(cid:27)(cid:8)(cid:12)(cid:12)(cid:4)(cid:18)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) ((cid:4)(cid:20)(cid:11)(cid:15)(cid:12)(cid:11)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:1) (cid:17)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:6)(cid:12)-(cid:1) (cid:24)(cid:16)(cid:4)(cid:6)(cid:20)(cid:1)
+<br/>(cid:8)-(cid:4)(cid:11)?(cid:1)(cid:10))(cid:24)(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:6)(cid:12)-(cid:1)(cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:14)(cid:1)(cid:10)(cid:8)(cid:27)(cid:7)-(cid:20)(cid:15))(cid:12)(cid:18)(cid:14)(cid:1)((cid:15)(cid:11)(cid:4)(cid:11)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:11)(cid:21)(cid:1)(cid:23)(cid:30)(cid:1)(cid:11)(cid:24))(cid:18)(cid:30)(cid:6)(cid:12)-(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)(cid:6)(cid:24)(cid:1) .(cid:8)(cid:11)(cid:1) (cid:27)(cid:15)(cid:12)(cid:27)(cid:5))(cid:18)(cid:4)(cid:18)(cid:1)(cid:24)(cid:15)(cid:1)
+<br/>((cid:20)(cid:15)(cid:29)(cid:6)(cid:18)(cid:4)(cid:1) (cid:8)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:1) (cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:8)(cid:12)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)
+<br/>((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:21)(cid:1) (cid:9)-(cid:4)(cid:14)(cid:1) (cid:4)(cid:12)(cid:15))-(cid:16)(cid:1) (cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) .(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:25)(cid:20)(cid:15)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1)((cid:15)(cid:11)(cid:4)(cid:11)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:10)(cid:8)(cid:11)(cid:6)(cid:27)(cid:1)(cid:12)(cid:4)(cid:4)(cid:18)(cid:11)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:25)(cid:6)(cid:4)(cid:5)(cid:18)(cid:21)(cid:1)
+<br/>% (cid:10)(cid:9)(cid:13)(cid:8)(cid:2)(cid:5)&(cid:11)(cid:5)(cid:19)(cid:4)(cid:6) ’((cid:6)
+<br/>(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:1)(cid:2)(cid:3)(cid:4)(cid:5)(cid:3)(cid:4)(cid:6) (cid:7)(cid:3)(cid:8)(cid:9)(cid:6)
+<br/>’(cid:16)(cid:4)(cid:1) (cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1) 3(cid:8)(cid:27)(cid:4)(cid:1) (cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:25)(cid:6)(cid:20)(cid:11)(cid:24)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:12)(cid:1)
+<br/>(cid:17)(cid:6)(cid:18)(cid:18)(cid:5)(cid:4)E(cid:4)(cid:8)(cid:11)(cid:24)(cid:14)(cid:1)(cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:20)(cid:30)(cid:1)(cid:15)(cid:25)(cid:1)(cid:8)(cid:1)(cid:5)(cid:8)(cid:20)-(cid:4)(cid:1)(cid:12))(cid:17)(cid:10)(cid:4)(cid:20)(cid:1)(cid:15)(cid:25)(cid:1)
+<br/>(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11) (cid:10)(cid:4)(cid:24).(cid:4)(cid:4)(cid:12)(cid:1)/(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)01(cid:1)(cid:30)(cid:4)(cid:8)(cid:20)(cid:11)(cid:1)(cid:15)(cid:5)(cid:18)(cid:21)
+<br/>(cid:26)3(cid:19)(cid:23)(cid:1)(cid:6)(cid:11)(cid:1)(cid:8)(cid:1)(cid:5)(cid:8)(cid:20)-(cid:4)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:8)(cid:24)(cid:1)(cid:27)(cid:8)(cid:12)(cid:1)(cid:11))(((cid:15)(cid:20)(cid:24)(cid:1)(cid:11)(cid:24))(cid:18)(cid:6)(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:11)(cid:30)(cid:11)(cid:24)(cid:4)(cid:17)(cid:11)(cid:21)(cid:1) (cid:26)(cid:24)(cid:1) (cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1) (cid:15)(cid:29)(cid:4)(cid:20)(cid:1) *(cid:14)+,,(cid:1) (cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)
+<br/>(cid:13)(cid:15)(cid:1)(cid:20)(cid:4)(cid:11)(cid:24)(cid:20)(cid:6)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:15)(cid:12)(cid:1).(cid:4)(cid:8)(cid:20)(cid:1)4(cid:27)(cid:5)(cid:15)(cid:24)(cid:16)(cid:4)(cid:11)(cid:14)(cid:1)-(cid:5)(cid:8)(cid:11)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1)(cid:4)(cid:24)(cid:27)(cid:21)5(cid:14)(cid:1) (cid:17)(cid:8)(cid:7)(cid:4)E)((cid:14)(cid:1)(cid:16)(cid:8)(cid:6)(cid:20)(cid:1)
+<br/>(cid:11)(cid:24)(cid:30)(cid:5)(cid:4)(cid:14)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:16)(cid:8)(cid:6)(cid:20)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1) (cid:6)(cid:17)((cid:15)(cid:11)(cid:4)(cid:18)(cid:1) (cid:24)(cid:15)(cid:1) ((cid:8)(cid:20)(cid:24)(cid:6)(cid:27)(cid:6)((cid:8)(cid:12)(cid:24)(cid:11)(cid:21)(cid:1) D(cid:20)(cid:15))(cid:12)(cid:18)E(cid:24)(cid:20))(cid:24)(cid:16)(cid:1)
+<br/>(cid:6)(cid:12)(cid:25)(cid:15)(cid:20)(cid:17)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)(cid:6)(cid:12)(cid:27)(cid:5))(cid:18)(cid:6)(cid:12)-(cid:1)(cid:26)(cid:19)(cid:14)(cid:1)(cid:8)-(cid:4)(cid:14)(cid:1)(cid:7)(cid:6)(cid:12)(cid:18)(cid:1)(cid:15)(cid:25) ((cid:15)(cid:11)(cid:4)(cid:1)(cid:15)(cid:20)(cid:1)(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:6)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:1) (cid:16)(cid:8)(cid:11)(cid:1) -(cid:5)(cid:8)(cid:11)(cid:11)(cid:4)(cid:11)(cid:1) (cid:6)(cid:11)(cid:1) ((cid:20)(cid:15)(cid:29)(cid:6)(cid:18)(cid:4)(cid:18)(cid:21)(cid:1) #6((cid:4)(cid:20)(cid:6)(cid:17)(cid:4)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1)
+<br/>.(cid:4)(cid:20)(cid:4)(cid:1)((cid:16)(cid:15)(cid:24)(cid:15)-(cid:20)(cid:8)((cid:16)(cid:4)(cid:18)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:8)(cid:1)(cid:25)(cid:6)(cid:12)(cid:4)E(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:18)(cid:6)-(cid:6)(cid:24)(cid:8)(cid:5)(cid:1)(cid:27)(cid:8)(cid:17)(cid:4)(cid:20)(cid:8)(cid:1)
+<br/>(cid:6)(cid:12)(cid:1)(cid:18)(cid:8)(cid:30)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:21)(cid:1)’(cid:16)(cid:4)(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1)(cid:11)(cid:4)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1)(cid:15)(cid:12)(cid:1)(cid:8)(cid:1)(cid:11)(cid:24)(cid:15)(cid:15)(cid:5)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:6)(cid:12)(cid:11)(cid:24)(cid:20))(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)
+<br/>(cid:24)(cid:15)(cid:1) (cid:17)(cid:8)(cid:6)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:1) (cid:8)(cid:1) (cid:27)(cid:15)(cid:12)(cid:11)(cid:24)(cid:8)(cid:12)(cid:24)(cid:1) (cid:16)(cid:4)(cid:8)(cid:18)(cid:1) ((cid:15)(cid:11)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) 4(cid:8)(cid:5)(cid:24)(cid:16)(cid:15))-(cid:16)(cid:1) (cid:11)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:1)
+<br/>(cid:17)(cid:15)(cid:29)(cid:4)(cid:17)(cid:4)(cid:12)(cid:24)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1))(cid:12)(cid:8)(cid:29)(cid:15)(cid:6)(cid:18)(cid:8)(cid:10)(cid:5)(cid:4)5(cid:21)
+<br/>’(cid:16)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)>0,G+>,(cid:1)((cid:6)6(cid:4)(cid:5)(cid:11)(cid:1)(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)/>(cid:1)(cid:10)(cid:6)(cid:24)(cid:1)(cid:18)(cid:4)((cid:24)(cid:16) (cid:14)(cid:1)
+<br/>(cid:8)(cid:10)(cid:15))(cid:24)(cid:1)>,(cid:1)(cid:31)(cid:10)(cid:30)(cid:24)(cid:4)(cid:11)(cid:1)(cid:11)(cid:6)(cid:22)(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)CHD(cid:1)(cid:25)(cid:15)(cid:20)(cid:17)(cid:8)(cid:24) (cid:21)(cid:1)
+<br/>#(cid:12)(cid:15))-(cid:16)(cid:1) (cid:5))(cid:17)(cid:6)(cid:12)(cid:15)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) .(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) ((cid:20)(cid:15)(cid:27)(cid:4)(cid:11)(cid:11)(cid:6)(cid:12)-(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:11)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:15))(cid:24)(cid:1) (cid:11)(cid:16)(cid:8)(cid:18)(cid:15).(cid:11)(cid:1) (cid:6)(cid:11)(cid:1) (cid:12)(cid:4)(cid:4)(cid:18)(cid:4)(cid:18)(cid:1) 4(cid:6)(cid:12)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>.(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) (cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1)
+<br/>(cid:18)(cid:6)(cid:11)(cid:24)(cid:6)(cid:12)-)(cid:6)(cid:11)(cid:16)(cid:6)(cid:12)-(cid:1)(cid:15)(cid:25)(cid:1)(cid:11)(cid:4)(cid:12)(cid:6)(cid:15)(cid:20)(cid:11)(cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)(cid:24)(cid:16)(cid:15)(cid:11)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:30)(cid:15))(cid:12)-(cid:4)(cid:20)(cid:1)(cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:4)(cid:11)(cid:1)
+<br/><(cid:2)=5(cid:21)(cid:1) ")(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1) ((cid:16)(cid:15)(cid:24)(cid:15)-(cid:20)(cid:8)((cid:16)(cid:4)(cid:18)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:15))(cid:24)(cid:1) (cid:8)(cid:12)(cid:30)(cid:1) ((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:15)(cid:20)(cid:11)(cid:1) (cid:15)(cid:20)(cid:1)
+<br/>(cid:6)(cid:17)((cid:15)(cid:20)(cid:24)(cid:8)(cid:12)(cid:24)(cid:1)
+<br/>(cid:25)(cid:15)(cid:20)(cid:1)
+<br/>(cid:6)(cid:11)(cid:1)
+</td><td></td><td>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:16)(cid:8)(cid:29)(cid:4)(cid:1) (cid:10)(cid:4)(cid:4)(cid:12)(cid:1) (cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)?(cid:1) (cid:11))(cid:27)(cid:16)(cid:1) (cid:8)(cid:11)(cid:1) (cid:9)!(cid:1) <+=(cid:14)(cid:1) (cid:23)(cid:9)(cid:13)7(cid:9)(cid:1) <@=(cid:14)(cid:1)
+<br/>(cid:27)(cid:15)(cid:20)(cid:20)(cid:4)(cid:11)((cid:15)(cid:12)(cid:18)(cid:6)(cid:12)-(cid:1) (cid:24)(cid:15)(cid:1) +(cid:2)+(cid:1) ((cid:4)(cid:15)((cid:5)(cid:4)9(cid:11)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:11)(cid:1) 4>0@(cid:1) (cid:17)(cid:4)(cid:12)(cid:14)(cid:1) (cid:2)/B(cid:1) .(cid:15)(cid:17)(cid:4)(cid:12)5(cid:21)(cid:1)
+</td></tr><tr><td>06560d5721ecc487a4d70905a485e22c9542a522</td><td>SUN, YU: DEEP FACIAL ATTRIBUTE DETECTION IN THE WILD
+<br/>Deep Facial Attribute Detection in the Wild:
+<br/>From General to Specific
+<br/>Department of Automation
+<br/><b>University of Science and Technology</b><br/>of China
+<br/>Hefei, China
+</td><td>('4364455', 'Yuechuan Sun', 'yuechuan sun')<br/>('1720236', 'Jun Yu', 'jun yu')</td><td>ycsun@mail.ustc.edu.cn
+<br/>harryjun@ustc.edu.cn
+</td></tr><tr><td>06526c52a999fdb0a9fd76e84f9795a69480cecf</td><td></td><td></td><td></td></tr><tr><td>06bad0cdda63e3fd054e7b334a5d8a46d8542817</td><td>Sharing Features Between Objects and Their Attributes
+<br/>1Department of Computer Science
+<br/><b>University of Texas at Austin</b><br/>2Computer Science Department
+<br/><b>University of Southern California</b></td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')<br/>('1693054', 'Fei Sha', 'fei sha')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{sjhwang,grauman}@cs.utexas.edu
+<br/>feisha@usc.edu
+</td></tr><tr><td>06fe63b34fcc8ff68b72b5835c4245d3f9b8a016</td><td>Mach Learn
+<br/>DOI 10.1007/s10994-013-5336-9
+<br/>Learning semantic representations of objects
+<br/>and their parts
+<br/>Received: 24 May 2012 / Accepted: 26 February 2013
+<br/>© The Author(s) 2013
+</td><td>('1935910', 'Grégoire Mesnil', 'grégoire mesnil')<br/>('1732280', 'Gal Chechik', 'gal chechik')</td><td></td></tr><tr><td>06aab105d55c88bd2baa058dc51fa54580746424</td><td>Image Set based Collaborative Representation for
+<br/>Face Recognition
+</td><td>('2873638', 'Pengfei Zhu', 'pengfei zhu')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>0641dbee7202d07b6c78a39eecd312c17607412e</td><td>283
+<br/>978-1-4799-5751-4/14/$31.00 ©2014 IEEE
+<br/>ICIP 2014
+<br/>WITH APPLICATIONS TO MOTION SEGMENTATION AND FACE CLUSTERING
+<br/>NULL SPACE CLUSTERING
+<br/><b>Australian National University, Canberra</b><br/>2NICTA, Canberra
+</td><td>('2744345', 'Pan Ji', 'pan ji')<br/>('2015152', 'Yiran Zhong', 'yiran zhong')<br/>('40124570', 'Hongdong Li', 'hongdong li')<br/>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')</td><td>fpan.ji,hongdong.lig@anu.edu.au,mathieu.salzmann@nicta.com.au
+</td></tr><tr><td>06262d14323f9e499b7c6e2a3dec76ad9877ba04</td><td>Real-Time Pose Estimation Piggybacked on Object Detection
+<br/>Brno, Czech Republic
+</td><td>('1785162', 'Adam Herout', 'adam herout')</td><td>Graph@FIT, Brno University of Technology
+<br/>ijuranek,herout,idubska,zemcik@fit.vutbr.cz
+</td></tr><tr><td>062c41dad67bb68fefd9ff0c5c4d296e796004dc</td><td>Temporal Generative Adversarial Nets with Singular Value Clipping
+<br/>Preferred Networks inc., Japan
+</td><td>('49160719', 'Masaki Saito', 'masaki saito')<br/>('8252749', 'Eiichi Matsumoto', 'eiichi matsumoto')<br/>('3083107', 'Shunta Saito', 'shunta saito')</td><td>{msaito, matsumoto, shunta}@preferred.jp
+</td></tr><tr><td>06400a24526dd9d131dfc1459fce5e5189b7baec</td><td>Event Recognition in Photo Collections with a Stopwatch HMM
+<br/>1Computer Vision Lab
+<br/>ETH Z¨urich, Switzerland
+<br/>2ESAT, PSI-VISICS
+<br/>K.U. Leuven, Belgium
+</td><td>('1696393', 'Lukas Bossard', 'lukas bossard')<br/>('2737253', 'Matthieu Guillaumin', 'matthieu guillaumin')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>lastname@vision.ee.ethz.ch
+<br/>vangool@esat.kuleuven.be
+</td></tr><tr><td>062d67af7677db086ef35186dc936b4511f155d7</td><td>They Are Not Equally Reliable: Semantic Event Search
+<br/>using Differentiated Concept Classifiers
+<br/><b>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</b><br/><b>Carnegie Mellon University</b></td><td>('1729163', 'Xiaojun Chang', 'xiaojun chang')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')</td><td>cxj273@gmail.com, yaoliang@cs.cmu.edu, yi.yang@uts.edu.au, epxing@cs.cmu.edu
+</td></tr><tr><td>06c2086f7f72536bf970ca629151b16927104df3</td><td>PALMERO ET AL.: MULTI-MODAL RECURRENT CNN FOR 3D GAZE ESTIMATION
+<br/>Recurrent CNN for 3D Gaze Estimation
+<br/>using Appearance and Shape Cues
+<br/>1 Dept. Mathematics and Informatics
+<br/>Universitat de Barcelona, Spain
+<br/>2 Computer Vision Center
+<br/>Campus UAB, Bellaterra, Spain
+<br/>3 Dept. Electrical and Computer Eng.
+<br/><b>University of Calgary, Canada</b><br/>4 Dept. Engineering
+<br/><b>University of Larestan, Iran</b></td><td>('3413560', 'Cristina Palmero', 'cristina palmero')<br/>('38081877', 'Javier Selva', 'javier selva')<br/>('1921285', 'Mohammad Ali Bagheri', 'mohammad ali bagheri')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')</td><td>crpalmec7@alumnes.ub.edu
+<br/>javier.selva.castello@est.fib.upc.edu
+<br/>mohammadali.bagheri@ucalgary.ca
+<br/>sergio@maia.ub.es
+</td></tr><tr><td>0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0</td><td>Iosifidis, A., Tefas, A., & Pitas, I. (2014). Exploiting Local Class Information
+<br/>in Extreme Learning Machine. Paper presented at International Joint
+<br/>Conference on Computational Intelligence (IJCCI), Rome, Italy.
+<br/>Peer reviewed version
+<br/>Link to publication record in Explore Bristol Research
+<br/>PDF-document
+<br/><b>University of Bristol - Explore Bristol Research</b><br/>General rights
+<br/>This document is made available in accordance with publisher policies. Please cite only the published
+<br/>version using the reference above. Full terms of use are available:
+<br/>http://www.bristol.ac.uk/pure/about/ebr-terms
+<br/> </td><td></td><td></td></tr><tr><td>060034b59275c13746413ca9c67d6304cba50da6</td><td>Ordered Trajectories for Large Scale Human Action Recognition
+<br/>1Vision & Sensing, HCC Lab,
+<br/><b>ESTeM, University of Canberra</b><br/>2IHCC, RSCS, CECS,
+<br/><b>Australian National University</b></td><td>('1793720', 'O. V. Ramana Murthy', 'o. v. ramana murthy')<br/>('1717204', 'Roland Goecke', 'roland goecke')</td><td>O.V.RamanaMurthy@ieee.org
+<br/>roland.goecke@ieee.org
+</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>Fine-Grained Classification of Pedestrians in Video: Benchmark and State of the Art
+<br/><b>California Institute of Technology</b><br/>The dataset was labelled with bounding boxes, tracks, pose and fine-
+<br/>grained labels. To achieve this, crowdsourcing, using workers from Ama-
+<br/>zon’s Mechanical Turk (MTURK) was used. A summary of the dataset’s
+<br/>statistics can be found in Table 1.
+<br/>Number of Frames Sent to MTURK
+<br/>Number of Frames with at least 1 Pedestrian
+<br/>Number of Bounding Box Labels
+<br/>Number of Pose Labels
+<br/>Number of Tracks
+<br/>38,708
+<br/>20,994
+<br/>32,457
+<br/>27,454
+<br/>4,222
+<br/>Table 1: Dataset Statistics
+<br/>A state-of-the-art algorithm for fine-grained classification was tested us-
+<br/>ing the dataset. The results are reported as a useful performance baseline.
+<br/>The dataset is split into a training/validation set containing 4 videos, with
+<br/>the remaining 3 videos forming the test set. Since each video was collected
+<br/>on a unique day, different images of the same person do not appear in both
+<br/>the training and testing sets.
+<br/>The fine-grained categorisation benchmark uses ’pose normalised deep
+<br/>convolutional nets’ as proposed by Branson et al. [1]. In this framework,
+<br/>features are extracted by applying deep convolutional nets to image re-
+<br/>gions that are normalised by pose. It has state-of the-art performance on
+<br/>bird species categorisation and we believe that it will generalise to the CRP
+<br/>dataset. Results can be found in Figure 2
+<br/>Figure 2: Fine-grained classification results. We report the mean average
+<br/>accuracy across 10 different train/test splits, for each of the subcategories
+<br/>in CRP, using the method of [1]. Average accuracy is computed assuming
+<br/>that there is a uniform prior across the classes. The reference value for
+<br/>each subcategory corresponds to chance. The results suggest that CRP is a
+<br/>challenging dataset.
+<br/>A novel feature of our dataset is the occlusion labelling of the keypoints.
+<br/>Exploiting this information may be the first step towards improving perfor-
+<br/>mance for fine-grained classification. Using temporal information is another
+<br/>alternative. Most pedestrians in CRP appear multiple times over large inter-
+<br/>vals of time. We are planning on adding an identity label for each individ-
+<br/>ual, to make our dataset useful for studying individual re-identification from
+<br/>a moving camera.
+<br/>Improved Bird Species Recognition Using Pose Normalized Deep Con-
+<br/>volutional Nets. In BMVC, 2014.
+<br/>Figure 1: Three examples from the CRP dataset. Annotations include a
+<br/>bounding box, tracks, parts, occlusion, sex, age, weight and clothing style.
+<br/>People are an important component of a machine’s environment. De-
+<br/>tecting, tracking, and recognising people, interpreting their behaviour and
+<br/>interacting with them is a valuable capability for machines. Using vision to
+<br/>estimate human attributes such as: age, sex, activity, social status, health,
+<br/>pose and motion patterns is useful for interpreting and predicting behaviour.
+<br/>This motivates our interest in fine-grained categorisation of people.
+<br/>In this work, we introduce a public video dataset—Caltech Roadside
+<br/>Pedestrians (CRP)—to further advance the state-of-the-art in fine-grained
+<br/>categorisation of people using the entire human body. This dataset is also
+<br/>useful for benchmarking tracking, detection and pose estimation of pedes-
+<br/>trians.
+<br/>Its novel and distinctive features are:
+<br/>1. Size (27,454 bounding box and pose labels) – making it suitable for
+<br/>training deep-networks.
+<br/>2. Natural behaviour – subjects are recorded “in-the-wild” so are un-
+<br/>aware, and behave naturally.
+<br/>3. Viewpoint – Pedestrians are viewed from front, profile, back and ev-
+<br/>erything in between.
+<br/>4. Moving camera – More general and challenging than surveillance
+<br/>video with static background.
+<br/>5. Realism – There is a variety of outdoor background and lighting con-
+<br/>ditions
+<br/>6. Multi-class subcategories – age, clothing style and body shape.
+<br/>7. Detailed annotation – bounding boxes, tracks and 14 keypoints with
+<br/>occlusion information; examples can be found in Figure 1. Each
+<br/>bounding box is also labelled with the fine-grained categories of age
+<br/>(5 classes), sex (2 classes), weight (3 classes) and clothing type (4
+<br/>classes).
+<br/>8. Availability – All videos and annotations are publicly available
+<br/>CRP contains seven, twenty-one minute videos. Each video is captured
+<br/>by mounting a rightwards-pointing, GoPro Hero3 camera to the roof of a
+<br/>car. The car then completes three laps of a ring road within a park where
+<br/>there are many walkers and joggers. Each video was recorded on a different
+<br/>day.
+</td><td>('1990633', 'David Hall', 'david hall')<br/>('1690922', 'Pietro Perona', 'pietro perona')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td></td></tr><tr><td>0653dcdff992ad980cd5ea5bc557efb6e2a53ba1</td><td></td><td></td><td></td></tr><tr><td>063a3be18cc27ba825bdfb821772f9f59038c207</td><td>This is a repository copy of The development of spontaneous facial responses to others’
+<br/>emotions in infancy. An EMG study.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/125231/
+<br/>Version: Published Version
+<br/>Article:
+<br/>Kaiser, Jakob, Crespo-Llado, Maria Magdalena, Turati, Chiara et al. (1 more author)
+<br/>(2017) The development of spontaneous facial responses to others’ emotions in infancy.
+<br/>An EMG study. Scientific Reports. ISSN 2045-2322
+<br/>https://doi.org/10.1038/s41598-017-17556-y
+<br/>Reuse
+<br/>This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+<br/>allows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+<br/>authors for the original work. More information and the full terms of the licence here:
+<br/>https://creativecommons.org/licenses/
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td><td></td><td>emailing eprints@whiterose.ac.uk including the URL of the record and the reason for the withdrawal request.
+<br/>eprints@whiterose.ac.uk
+</td></tr><tr><td>064cd41d323441209ce1484a9bba02a22b625088</td><td>Selective Transfer Machine for Personalized Facial Action Unit Detection
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b></td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')</td><td></td></tr><tr><td>06c2dfe1568266ad99368fc75edf79585e29095f</td><td>Bayesian Active Appearance Models
+<br/><b>Imperial College London, United Kingdom</b></td><td>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>{ja310,s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>06f39834e870278243dda826658319be2d5d8ded</td><td>RECOGNIZING UNSEEN ACTIONS IN A DOMAIN-ADAPTED EMBEDDING SPACE
+<br/><b>Arizona State University</b></td><td>('2180892', 'Yikang Li', 'yikang li')<br/>('8060096', 'Sheng-hung Hu', 'sheng-hung hu')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td></td></tr><tr><td>06d7ef72fae1be206070b9119fb6b61ce4699587</td><td>On One-Shot Similarity Kernels: explicit feature maps and properties
+<br/>†Department of Computing
+<br/><b>Imperial College London</b><br/>,†,(cid:2)
+<br/>∗Electronics Laboratory, Department of Physics,
+<br/><b>University of Patras, Greece</b><br/>(cid:2)School of Science and Technology,
+<br/><b>Middlesex University, London</b></td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1754270', 'Irene Kotsia', 'irene kotsia')</td><td>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>062d0813815c2b9864cd9bb4f5a1dc2c580e0d90</td><td>Encouraging LSTMs to Anticipate Actions Very Early
+<br/><b>Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO</b></td><td>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')<br/>('1688071', 'Basura Fernando', 'basura fernando')<br/>('2370776', 'Lars Petersson', 'lars petersson')<br/>('34234277', 'Lars Andersson', 'lars andersson')</td><td>firstname.lastname@data61.csiro.au, mathieu.salzmann@epfl.ch, basura.fernando@anu.edu.au
+</td></tr><tr><td>06a9ed612c8da85cb0ebb17fbe87f5a137541603</td><td>Deep Learning of Player Trajectory Representations for Team
+<br/>Activity Analysis
+</td><td>('10386960', 'Nazanin Mehrasa', 'nazanin mehrasa')<br/>('19198359', 'Yatao Zhong', 'yatao zhong')<br/>('2123865', 'Frederick Tung', 'frederick tung')<br/>('3004771', 'Luke Bornn', 'luke bornn')<br/>('10771328', 'Greg Mori', 'greg mori')<br/>('2190580', 'Simon Fraser', 'simon fraser')</td><td>{nmehrasa, yataoz, ftung, lbornn}@sfu.ca, mori@cs.sfu.ca
+</td></tr><tr><td>06ad99f19cf9cb4a40741a789e4acbf4433c19ae</td><td>SenTion: A framework for Sensing Facial
+<br/>Expressions
+</td><td>('31623038', 'Rahul Islam', 'rahul islam')<br/>('3451315', 'Karan Ahuja', 'karan ahuja')<br/>('1784438', 'Sandip Karmakar', 'sandip karmakar')</td><td>{rahul.islam, karan.ahuja, sandip, ferdous}@iiitg.ac.in
+</td></tr><tr><td>6c27eccf8c4b22510395baf9f0d0acc3ee547862</td><td>Using CMU PIE Human Face Database to a
+<br/>Convolutional Neural Network - Neocognitron
+<br/><b></b><br/>Rodovia Washington Luis, Km 235, São Carlos – SP - Brazil
+<br/><b>Systems and Telematics - Neurolab</b><br/>Via Opera Pia, 13 – I-16145 – Genoa - Italy
+</td><td>('2231336', 'José Hiroki Saito', 'josé hiroki saito')<br/>('3261775', 'Marcelo Hirakuri', 'marcelo hirakuri')<br/>('2558289', 'André Saunite', 'andré saunite')<br/>('36243877', 'Alessandro Noriaki Ide', 'alessandro noriaki ide')<br/>('40209065', 'Sandra Abib', 'sandra abib')</td><td>{saito,hirakuri,sabib}@dc.ufscar.br, tiagocarvalho@uol.com.br, saunite@fai.com.br
+<br/>noriaki@dist.unige.it
+</td></tr><tr><td>6c66ae815e7e508e852ecb122fb796abbcda16a8</td><td>International Journal of Computer Science & Engineering Survey (IJCSES) Vol.6, No.5, October 2015
+<br/>A SURVEY OF THE TRENDS IN FACIAL AND
+<br/>EXPRESSION RECOGNITION DATABASES AND
+<br/>METHODS
+<br/><b>University of Washington, Bothell, USA</b></td><td>('2971095', 'Sohini Roychowdhury', 'sohini roychowdhury')<br/>('33073434', 'Michelle Emmons', 'michelle emmons')</td><td></td></tr><tr><td>6ca2c5ff41e91c34696f84291a458d1312d15bf2</td><td>LIPNET: SENTENCE-LEVEL LIPREADING
+<br/><b>University of Oxford, Oxford, UK</b><br/>Google DeepMind, London, UK 2
+<br/>CIFAR, Canada 3
+<br/>{yannis.assael,brendan.shillingford,
+</td><td>('3365565', 'Yannis M. Assael', 'yannis m. assael')<br/>('3144580', 'Brendan Shillingford', 'brendan shillingford')<br/>('1766767', 'Shimon Whiteson', 'shimon whiteson')</td><td>shimon.whiteson,nando.de.freitas}@cs.ox.ac.uk
+</td></tr><tr><td>6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365</td><td>Privacy-Preserving Deep Inference for Rich User
+<br/>Data on The Cloud
+<br/><b>Sharif University of Technology</b><br/><b>Queen Mary University of London</b><br/><b>Nokia Bell Labs and University of Oxford</b></td><td>('9920557', 'Ali Shahin Shamsabadi', 'ali shahin shamsabadi')<br/>('2251846', 'Ali Taheri', 'ali taheri')<br/>('2226725', 'Kleomenis Katevas', 'kleomenis katevas')<br/>('1688652', 'Hamid R. Rabiee', 'hamid r. rabiee')<br/>('2772904', 'Nicholas D. Lane', 'nicholas d. lane')<br/>('1763096', 'Hamed Haddadi', 'hamed haddadi')</td><td></td></tr><tr><td>6c690af9701f35cd3c2f6c8d160b8891ad85822a</td><td>Multi-Task Learning with Low Rank Attribute Embedding for Person
+<br/>Re-identification
+<br/><b>Peking University</b><br/><b>University of Maryland College Park</b><br/><b>University of Texas at San Antonio</b></td><td>('20798990', 'Chi Su', 'chi su')<br/>('1752128', 'Fan Yang', 'fan yang')<br/>('1776581', 'Shiliang Zhang', 'shiliang zhang')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td></td></tr><tr><td>6c5fbf156ef9fc782be0089309074cc52617b868</td><td>Controllable Video Generation with Sparse Trajectories
+<br/><b>Cornell University</b></td><td>('19235216', 'Zekun Hao', 'zekun hao')<br/>('47932904', 'Xun Huang', 'xun huang')<br/>('50172592', 'Serge Belongie', 'serge belongie')</td><td>{hz472,xh258,sjb344}@cornell.edu
+</td></tr><tr><td>6c304f3b9c3a711a0cca5c62ce221fb098dccff0</td><td>Attentive Semantic Video Generation using Captions
+<br/>IIT Hyderabad
+<br/>IIT Hyderabad
+</td><td>('8268761', 'Tanya Marwah', 'tanya marwah')<br/>('47351893', 'Gaurav Mittal', 'gaurav mittal')<br/>('1699429', 'Vineeth N. Balasubramanian', 'vineeth n. balasubramanian')</td><td>ee13b1044@iith.ac.in
+<br/>gaurav.mittal.191013@gmail.com
+<br/>vineethnb@iith.ac.in
+</td></tr><tr><td>6ce23cf4f440021b7b05aa3c1c2700cc7560b557</td><td>Learning Local Convolutional Features for Face
+<br/>Recognition with 2D-Warping
+<br/>Human Language Technology and Pattern Recognition Group,
+<br/><b>RWTH Aachen University</b></td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>surname@cs.rwth-aachen.de
+</td></tr><tr><td>6c80c834d426f0bc4acd6355b1946b71b50cbc0b</td><td>Pose-Based Two-Stream Relational Networks for
+<br/>Action Recognition in Videos
+<br/>1Center for Research on Intelligent Perception and Computing (CRIPAC),
+<br/>National Laboratory of Pattern Recognition (NLPR)
+<br/>2Center for Excellence in Brain Science and Intelligence Technology (CEBSIT),
+<br/><b>Institute of Automation, Chinese Academy of Sciences (CASIA</b><br/><b>University of Chinese Academy of Sciences (UCAS</b></td><td>('47824598', 'Wei Wang', 'wei wang')<br/>('47539600', 'Jinjin Zhang', 'jinjin zhang')<br/>('39927579', 'Chenyang Si', 'chenyang si')<br/>('1693997', 'Liang Wang', 'liang wang')</td><td>{wangwei, wangliang}@nlpr.ia.ac.cn, {jinjin.zhang,
+<br/>chenyang.si}@cripac.ia.ac.cn
+</td></tr><tr><td>6cb7648465ba7757ecc9c222ac1ab6402933d983</td><td>Visual Forecasting by Imitating Dynamics in Natural Sequences
+<br/><b>Stanford University National Tsing Hua University</b></td><td>('32970572', 'Kuo-Hao Zeng', 'kuo-hao zeng')</td><td>{khzeng, bshen88, dahuang, jniebles}@cs.stanford.edu sunmin@ee.nthu.edu.tw
+</td></tr><tr><td>6c2b392b32b2fd0fe364b20c496fcf869eac0a98</td><td>DOI 10.1007/s00138-012-0423-7
+<br/>ORIGINAL PAPER
+<br/>Fully automatic face recognition framework based
+<br/>on local and global features
+<br/>Received: 30 May 2011 / Revised: 21 February 2012 / Accepted: 29 February 2012 / Published online: 22 March 2012
+<br/>© Springer-Verlag 2012
+</td><td>('36048866', 'Cong Geng', 'cong geng')</td><td></td></tr><tr><td>6c6bb85a08b0bdc50cf8f98408d790ccdb418798</td><td>Recognition of facial expressions in presence of
+<br/>partial occlusion
+<br/>AIIA Laboratory
+<br/>Computer Vision and Image Processing Group
+<br/>Department of Informatics
+<br/><b>Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece</b><br/>Phone: +30 2310 996361
+<br/>Fax: +30 2310 998453
+<br/>Web: http://poseidon.csd.auth.gr
+</td><td>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>E-mail: {nelu,ekotsia,pitas}@zeus.csd.auth.gr
+</td></tr><tr><td>6c705285c554985ecfe1117e854e1fe1323f8c21</td><td>DIY Human Action Data Set Generation
+<br/>Illya Zharkov
+<br/><b>Simon Fraser University</b><br/>Microsoft
+<br/>Microsoft
+<br/>Microsoft
+</td><td>('1916516', 'Mehran Khodabandeh', 'mehran khodabandeh')<br/>('3227254', 'Hamid Reza Vaezi Joze', 'hamid reza vaezi joze')<br/>('3811436', 'Vivek Pradeep', 'vivek pradeep')</td><td>mkhodaba@sfu.ca
+<br/>hava@microsoft.com
+<br/>zharkov@microsoft.com
+<br/>vpradeep@microsoft.com
+</td></tr><tr><td>6cddc7e24c0581c50adef92d01bb3c73d8b80b41</td><td>Face Verification Using the LARK
+<br/>Representation
+</td><td>('3326805', 'Hae Jong Seo', 'hae jong seo')<br/>('1718280', 'Peyman Milanfar', 'peyman milanfar')</td><td></td></tr><tr><td>6cfc337069868568148f65732c52cbcef963f79d</td><td>Audio-Visual Speaker Localization via Weighted
+<br/>Clustering
+<br/>To cite this version:
+<br/>Localization via Weighted Clustering. IEEE Workshop on Machine Learning for Signal Processing,
+<br/>Sep 2014, Reims, France. pp.1-6, 2014, <10.1109/MLSP.2014.6958874>. <hal-01053732>
+<br/>HAL Id: hal-01053732
+<br/>https://hal.archives-ouvertes.fr/hal-01053732
+<br/>Submitted on 11 Aug 2014
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('1780201', 'Xavier Alameda-Pineda', 'xavier alameda-pineda')<br/>('1794229', 'Radu Horaud', 'radu horaud')<br/>('1785817', 'Florence Forbes', 'florence forbes')<br/>('1780201', 'Xavier Alameda-Pineda', 'xavier alameda-pineda')<br/>('1794229', 'Radu Horaud', 'radu horaud')<br/>('1785817', 'Florence Forbes', 'florence forbes')</td><td></td></tr><tr><td>6cd96f2b63c6b6f33f15c0ea366e6003f512a951</td><td>A New Approach in Solving Illumination and Facial Expression Problems
+<br/>for Face Recognition
+<br/><b>a The University of Nottingham Malaysia Campus</b><br/>Tel : 03-89248358, Fax : 03-89248017
+<br/>Jalan Broga
+<br/>43500 Semenyih, Selangor
+</td><td>('1968167', 'Yee Wan Wong', 'yee wan wong')<br/>('9273662', 'Kah Phooi Seng', 'kah phooi seng')<br/>('2808528', 'Li-Minn Ang', 'li-minn ang')</td><td>E-mail : yeewan.wong@nottingham.edu.my
+</td></tr><tr><td>6c8c7065d1041146a3604cbe15c6207f486021ba</td><td>Attention Modeling for Face Recognition via Deep Learning
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 999077 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+</td><td></td><td>Sheng-hua Zhong (csshzhong@comp.polyu.edu.hk)
+<br/>Yan Liu (csyliu@comp.polyu.edu.hk)
+<br/>Yao Zhang (csyaozhang@comp.polyu.edu.hk)
+<br/>Fu-lai Chung (cskchung@comp.polyu.edu.hk)
+</td></tr><tr><td>390f3d7cdf1ce127ecca65afa2e24c563e9db93b</td><td>Learning Deep Representation for Face
+<br/>Alignment with Auxiliary Attributes
+</td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>39ed31ced75e6151dde41944a47b4bdf324f922b</td><td>Pose-Guided Photorealistic Face Rotation
+<br/><b>CRIPAC and NLPR and CEBSIT, CASIA 2University of Chinese Academy of Sciences</b><br/>3Noah’s Ark Laboratory, Huawei Technologies Co., Ltd.
+</td><td>('49995036', 'Yibo Hu', 'yibo hu')<br/>('47150161', 'Xiang Wu', 'xiang wu')<br/>('46806278', 'Bing Yu', 'bing yu')<br/>('50361927', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>{yibo.hu, xiang.wu}@cripac.ia.ac.cn, yubing5@huawei.com, {rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>3918b425bb9259ddff9eca33e5d47bde46bd40aa</td><td>Copyright
+<br/>by
+<br/>David Lieh-Chiang Chen
+<br/>2012
+</td><td></td><td></td></tr><tr><td>39ce143238ea1066edf0389d284208431b53b802</td><td></td><td></td><td></td></tr><tr><td>39ce2232452c0cd459e32a19c1abe2a2648d0c3f</td><td></td><td></td><td></td></tr><tr><td>3998c5aa6be58cce8cb65a64cb168864093a9a3e</td><td></td><td></td><td></td></tr><tr><td>39dc2ce4cce737e78010642048b6ed1b71e8ac2f</td><td>Recognition of Six Basic Facial Expressions by Feature-Points Tracking using
+<br/>RBF Neural Network and Fuzzy Inference System
+<br/><b>Islamic Azad University of AHAR</b><br/><b>Elect. Eng. Faculty, Tabriz University, Tabriz, Iran</b><br/>
+</td><td>('3210269', 'Hadi Seyedarabi', 'hadi seyedarabi')<br/>('2488201', 'Ali Aghagolzadeh', 'ali aghagolzadeh')<br/>('1766050', 'Sohrab Khanmohammadi', 'sohrab khanmohammadi')</td><td>seyedarabi@tabrizu.ac.ir , aghagol@tabrizu.ac.ir , khan@tabrizu.ac.ir
+</td></tr><tr><td>397aeaea61ecdaa005b09198942381a7a11cd129</td><td></td><td></td><td></td></tr><tr><td>3991223b1dc3b87883cec7af97cf56534178f74a</td><td>A Unified Framework for Context Assisted Face Clustering
+<br/>Department of Computer Science
+<br/><b>University of California, Irvine</b></td><td>('3338094', 'Liyan Zhang', 'liyan zhang')<br/>('1818681', 'Dmitri V. Kalashnikov', 'dmitri v. kalashnikov')<br/>('1686199', 'Sharad Mehrotra', 'sharad mehrotra')</td><td></td></tr><tr><td>39b22bcbd452d5fea02a9ee63a56c16400af2b83</td><td></td><td></td><td></td></tr><tr><td>399a2c23bd2592ebe20aa35a8ea37d07c14199da</td><td></td><td></td><td></td></tr><tr><td>396a19e29853f31736ca171a3f40c506ef418a9f</td><td>Real World Real-time Automatic Recognition of Facial Expressions
+<br/><b>Exploratory Computer Vision Group, IBM T. J. Watson Research Center</b><br/>PO Box 704, Yorktown Heights, NY 10598
+</td><td>('8193125', 'Ying-li Tian', 'ying-li tian')<br/>('1773140', 'Ruud Bolle', 'ruud bolle')</td><td>{yltian,lisabr,arunh,sharat,aws,bolle}@us.ibm.com
+</td></tr><tr><td>392d35bb359a3b61cca1360272a65690a97a2b3f</td><td>YAN, YAP, MORI: ONE-SHOT MULTI-TASK LEARNING FOR VIDEO EVENT DETECTION 1
+<br/>Multi-Task Transfer Methods to Improve
+<br/>One-Shot Learning for Multimedia Event
+<br/>Detection
+<br/>School of Computing Science
+<br/><b>Simon Fraser University</b><br/>Burnaby, BC, CANADA
+</td><td>('34289418', 'Wang Yan', 'wang yan')<br/>('32874186', 'Jordan Yap', 'jordan yap')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>wyan@sfu.ca
+<br/>jjyap@sfu.ca
+<br/>mori@cs.sfu.ca
+</td></tr><tr><td>397085122a5cade71ef6c19f657c609f0a4f7473</td><td>GHIASI, FOWLKES: USING SEGMENTATION TO DETECT OCCLUSION
+<br/>Using Segmentation to Predict the Absence
+<br/>of Occluded Parts
+<br/>Dept. of Computer Science
+<br/><b>University of California</b><br/>Irvine, CA
+</td><td>('1898210', 'Golnaz Ghiasi', 'golnaz ghiasi')<br/>('3157443', 'Charless C. Fowlkes', 'charless c. fowlkes')</td><td>gghiasi@ics.uci.edu
+<br/>fowlkes@ics.uci.edu
+</td></tr><tr><td>39c48309b930396a5a8903fdfe781d3e40d415d0</td><td>Learning Spatial and Temporal Cues for Multi-label Facial Action Unit Detection
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh PA</b><br/><b>University of Pittsburgh, Pittsburgh PA</b></td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>39c8b34c1b678235b60b648d0b11d241a34c8e32</td><td>Learning to Deblur Images with Exemplars
+</td><td>('9416825', 'Jinshan Pan', 'jinshan pan')<br/>('2776845', 'Wenqi Ren', 'wenqi ren')<br/>('1786024', 'Zhe Hu', 'zhe hu')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>3986161c20c08fb4b9b791b57198b012519ea58b</td><td>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-4 Issue-4, September 2014
+<br/>An Efficient Method for Face Recognition based on
+<br/>Fusion of Global and Local Feature Extraction
+</td><td>('9218646', 'E. Gomathi', 'e. gomathi')<br/>('1873007', 'K. Baskaran', 'k. baskaran')</td><td></td></tr><tr><td>392425be1c9d9c2ee6da45de9df7bef0d278e85f</td><td></td><td></td><td></td></tr><tr><td>392c3cabe516c0108b478152902a9eee94f4c81e</td><td>Computer Science and Artificial Intelligence Laboratory
+<br/>Technical Report
+<br/>MIT-CSAIL-TR-2007-024
+<br/>April 23, 2007
+<br/>Tiny images
+<br/>m a s s a c h u s e t t s i n s t i t u t e o f t e c h n o l o g y, c a m b r i d g e , m a 0 213 9 u s a — w w w. c s a i l . m i t . e d u
+</td><td>('34943293', 'Antonio Torralba', 'antonio torralba')<br/>('2276554', 'Rob Fergus', 'rob fergus')<br/>('1768236', 'William T. Freeman', 'william t. freeman')</td><td></td></tr><tr><td>39f525f3a0475e6bbfbe781ae3a74aca5b401125</td><td>Deep Joint Face Hallucination and Recognition
+<br/><b>Sun Yat-sen University</b><br/><b>Sun Yat-sen University</b><br/><b>Sun Yat-sen University</b><br/><b>Sun Yat-sen University</b><br/>November 28, 2016
+</td><td>('4080607', 'Junyu Wu', 'junyu wu')<br/>('2442939', 'Shengyong Ding', 'shengyong ding')<br/>('1723992', 'Wei Xu', 'wei xu')<br/>('38255852', 'Hongyang Chao', 'hongyang chao')</td><td>wujunyu2@mail2.sysu.edu.cn
+<br/>1633615231@qq.com
+<br/>xuwei1993@qq.com
+<br/>isschhy@mail.sysu.edu.cn
+</td></tr><tr><td>3946b8f862ecae64582ef0912ca2aa6d3f6f84dc</td><td>Who and Where: People and Location Co-Clustering
+<br/>Electrical Engineering
+<br/><b>Stanford University</b></td><td>('8491578', 'Zixuan Wang', 'zixuan wang')</td><td>zxwang@stanford.edu
+</td></tr><tr><td>3933416f88c36023a0cba63940eb92f5cef8001a</td><td>Learning Robust Subspace Clustering
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Duke University</b><br/>Durham, NC, 27708
+<br/>May 11, 2014
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td>{qiang.qiu, guillermo.sapiro}@duke.edu
+</td></tr><tr><td>39150acac6ce7fba56d54248f9c0badbfaeef0ea</td><td>Proceedings, Digital Signal Processing for in-Vehicle and mobile systems, Istanbul, Turkey, June 2007.
+<br/><b>Sabanci University</b><br/>Faculty of
+<br/>Engineering and Natural Sciences
+<br/>Orhanli, Istanbul
+</td><td>('40322754', 'Esra Vural', 'esra vural')<br/>('21691177', 'Mujdat Cetin', 'mujdat cetin')<br/>('31849282', 'Aytul Ercil', 'aytul ercil')<br/>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('1858421', 'Marian Bartlett', 'marian bartlett')<br/>('29794862', 'Javier Movellan', 'javier movellan')</td><td></td></tr><tr><td>3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1</td><td></td><td></td><td></td></tr><tr><td>3965d61c4f3b72044f43609c808f8760af8781a2</td><td></td><td></td><td></td></tr><tr><td>39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc</td><td>Simultaneous Local Binary Feature Learning and Encoding for Face Recognition
+<br/><b>Tsinghua University, Beijing, China</b><br/>2Rapid-Rich Object Search (ROSE) Lab, Interdisciplinary Graduate School,
+<br/><b>Nanyang Technological University, Singapore</b></td><td>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('1754854', 'Venice Erin Liong', 'venice erin liong')<br/>('39491387', 'Jie Zhou', 'jie zhou')</td><td>elujiwen@gmail.com; veniceer001@e.ntu.edu.sg; jzhou@tsinghua.edu.cn
+</td></tr><tr><td>395bf182983e0917f33b9701e385290b64e22f9a</td><td></td><td></td><td></td></tr><tr><td>3983637022992a329f1d721bed246ae76bc934f7</td><td>Wide-Baseline Stereo for Face Recognition with Large Pose Variation
+<br/>Computer Science Department
+<br/><b>University of Maryland, College Park</b></td><td>('38171682', 'Carlos D. Castillo', 'carlos d. castillo')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')</td><td>{carlos,djacobs}@cs.umd.edu
+</td></tr><tr><td>3933e323653ff27e68c3458d245b47e3e37f52fd</td><td>Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+<br/>Computational Biomedicine Lab
+<br/>4800 Calhoun Rd. Houston, TX, USA
+</td><td>('26401746', 'Ha A. Le', 'ha a. le')<br/>('39634395', 'Pengfei Dou', 'pengfei dou')<br/>('2461369', 'Yuhang Wu', 'yuhang wu')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>{xxu18, hale4, pdou, ywu35, ikakadia}@central.uh.edu
+</td></tr><tr><td>39b452453bea9ce398613d8dd627984fd3a0d53c</td><td></td><td></td><td></td></tr><tr><td>3958db5769c927cfc2a9e4d1ee33ecfba86fe054</td><td>Describable Visual Attributes for
+<br/>Face Verification and Image Search
+</td><td>('40631426', 'Neeraj Kumar', 'neeraj kumar')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('1750470', 'Shree K. Nayar', 'shree k. nayar')</td><td></td></tr><tr><td>39ecdbad173e45964ffe589b9ced9f1ebfe2d44e</td><td>Automatic Recognition of Lower Facial Action Units
+<br/>Joint Research Group on Audio Visual Signal Processing (AVSP),
+<br/><b>Vrije Universiteit Brussel</b><br/>Pleinlaan 2, 1050 Brussels
+<br/>lower
+<br/>recognize
+</td><td>('1802474', 'Werner Verhelst', 'werner verhelst')<br/>('34068333', 'Isabel Gonzalez', 'isabel gonzalez')<br/>('1970907', 'Hichem Sahli', 'hichem sahli')</td><td>igonzale@etro.vub.ac.be
+<br/>hichem.sahli@etro.vub.ac.be
+<br/>wverhels@etro.vub.ac.be
+</td></tr><tr><td>39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df</td><td></td><td></td><td></td></tr><tr><td>99ced8f36d66dce20d121f3a29f52d8b27a1da6c</td><td>Organizing Multimedia Data in Video
+<br/>Surveillance Systems Based on Face Verification
+<br/>with Convolutional Neural Networks
+<br/><b>National Research University Higher School of Economics, Nizhny Novgorod, Russian</b><br/>Federation
+</td><td>('26376584', 'Anastasiia D. Sokolova', 'anastasiia d. sokolova')<br/>('26427828', 'Angelina S. Kharchevnikova', 'angelina s. kharchevnikova')<br/>('35153729', 'Andrey V. Savchenko', 'andrey v. savchenko')</td><td>adsokolova96@mail.ru
+</td></tr><tr><td>994f7c469219ccce59c89badf93c0661aae34264</td><td>1
+<br/>Model Based Face Recognition Across Facial
+<br/>Expressions
+<br/>
+<br/>screens, embedded into mobiles and installed into everyday
+<br/>living and working environments they become valuable tools
+<br/>for human system interaction. A particular important aspect of
+<br/>this interaction is detection and recognition of faces and
+<br/>interpretation of facial expressions. These capabilities are
+<br/>deeply rooted in the human visual system and a crucial
+<br/>building block for social interaction. Consequently, these
+<br/>capabilities are an important step towards the acceptance of
+<br/>many technical systems.
+<br/>trees as a classifier
+<br/>lies not only
+</td><td>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('50565622', 'Christoph Mayer', 'christoph mayer')<br/>('32131501', 'Matthias Wimmer', 'matthias wimmer')<br/>('1699132', 'Bernd Radig', 'bernd radig')<br/>('31311898', 'Senior Member', 'senior member')</td><td></td></tr><tr><td>9949ac42f39aeb7534b3478a21a31bc37fe2ffe3</td><td>Parametric Stereo for Multi-Pose Face Recognition and
+<br/>3D-Face Modeling
+<br/>PSI ESAT-KUL
+<br/>Leuven, Belgium
+</td><td>('2733505', 'Rik Fransens', 'rik fransens')<br/>('2404667', 'Christoph Strecha', 'christoph strecha')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>999289b0ef76c4c6daa16a4f42df056bf3d68377</td><td>The Role of Color and Contrast in Facial Age Estimation
+<br/><b>Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands</b><br/><b>Pattern Recognition and Bioinformatics Group, Delft University of Technology, The Netherlands</b><br/><b>Bo gazic i University, Istanbul, Turkey</b></td><td>('1695527', 'Theo Gevers', 'theo gevers')<br/>('1764521', 'Albert Ali Salah', 'albert ali salah')</td><td>{h.dibeklioglu,th.gevers,m.p.Lucassen}@uva.nl
+<br/>salah@boun.edu.tr
+</td></tr><tr><td>9958942a0b7832e0774708a832d8b7d1a5d287ae</td><td>The Sparse Matrix Transform for Covariance
+<br/>Estimation and Analysis of High Dimensional
+<br/>Signals
+</td><td>('1696925', 'Guangzhi Cao', 'guangzhi cao')<br/>('1709256', 'Leonardo R. Bachega', 'leonardo r. bachega')<br/>('1745655', 'Charles A. Bouman', 'charles a. bouman')</td><td></td></tr><tr><td>995d55fdf5b6fe7fb630c93a424700d4bc566104</td><td>The One Triangle Three Parallelograms Sampling Strategy and Its Application
+<br/>in Shape Regression
+<br/>Centre of Mathematical Sciences
+<br/><b>Lund University, Lund, Sweden</b></td><td>('38481779', 'Mikael Nilsson', 'mikael nilsson')</td><td>mikael.nilsson@math.lth.se
+</td></tr><tr><td>99726ad232cef837f37914b63de70d8c5101f4e2</td><td>International Journal of Scientific & Engineering Research, Volume 5, Issue 5, May-2014 570
+<br/>ISSN 2229-5518
+<br/>Facial Expression Recognition Using PCA & Distance Classifier
+<br/>Dept. of Electronics & Telecomm. Engg.
+<br/> Ph.D Scholar,VSSUT
+<br/>BURLA, ODISHA, INDIA
+<br/>Nilamani Bhoi
+<br/>Reader in Dept. of Electronics & Telecomm. Engg.
+<br/><b>VEER SURENDRA SAI UNIVERSITY OF</b><br/>TECHNOLOGY
+<br/>BURLA, ODISHA, INDIA
+</td><td></td><td>alpesh.d123@gmail.com
+<br/>nilamanib@gmail.com
+</td></tr><tr><td>993d189548e8702b1cb0b02603ef02656802c92b</td><td>Highly-Economized Multi-View Binary
+<br/>Compression for Scalable Image Clustering
+<br/><b>Harbin Institute of Technology (Shenzhen), China</b><br/><b>The University of Queensland, Australia</b><br/><b>Inception Institute of Arti cial Intelligence, UAE</b><br/>4 Computer Vision Laboratory, ETH Zurich, Switzerland
+<br/><b>University of Electronic Science and Technology of China, China</b></td><td>('38448016', 'Zheng Zhang', 'zheng zhang')<br/>('40241836', 'Li Liu', 'li liu')<br/>('1747229', 'Jie Qin', 'jie qin')<br/>('39986542', 'Fan Zhu', 'fan zhu')<br/>('2731972', 'Fumin Shen', 'fumin shen')<br/>('1725160', 'Yong Xu', 'yong xu')<br/>('40799321', 'Ling Shao', 'ling shao')<br/>('1724393', 'Heng Tao Shen', 'heng tao shen')</td><td></td></tr><tr><td>9931c6b050e723f5b2a189dd38c81322ac0511de</td><td></td><td></td><td></td></tr><tr><td>994b52bf884c71a28b4f5be4eda6baaacad1beee</td><td>Categorizing Big Video Data on the Web:
+<br/>Challenges and Opportunities
+<br/>School of Computer Science
+<br/><b>Fudan University</b><br/>Shanghai, China
+<br/>http://www.yugangjiang.info
+</td><td>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')</td><td></td></tr><tr><td>99001ac9fdaf7649c0d0bd8d2078719bafd216d9</td><td>> TPAMI-0571-1005<
+<br/>General Tensor Discriminant Analysis and
+<br/>Gabor Features for Gait Recognition
+<br/><b>School of Computer Science and Information Systems, Birkbeck College, University of London</b><br/><b>University of Vermont, 33 Colchester Avenue, Burlington</b><br/>Malet Street, London WC1E 7HX, United Kingdom.
+<br/>Vermont 05405, United States of America.
+</td><td>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1748808', 'Xindong Wu', 'xindong wu')<br/>('1740503', 'Stephen J. Maybank', 'stephen j. maybank')</td><td>{dacheng, xuelong, sjmaybank}@dcs.bbk.ac.uk; xwu@cs.uvm.edu.
+</td></tr><tr><td>9993f1a7cfb5b0078f339b9a6bfa341da76a3168</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>A Simple, Fast and Highly-Accurate Algorithm to
+<br/>Recover 3D Shape from 2D Landmarks on a Single
+<br/>Image
+</td><td>('39071836', 'Ruiqi Zhao', 'ruiqi zhao')<br/>('1678691', 'Yan Wang', 'yan wang')</td><td></td></tr><tr><td>9901f473aeea177a55e58bac8fd4f1b086e575a4</td><td>Human and Sheep Facial Landmarks Localisation
+<br/>by Triplet Interpolated Features
+<br/><b>University of Cambridge</b></td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('2271111', 'Renqiao Zhang', 'renqiao zhang')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td>hy306, rz264, pr10@cam.ac.uk
+</td></tr><tr><td>992ebd81eb448d1eef846bfc416fc929beb7d28b</td><td>Exemplar-Based Face Parsing
+<br/>Supplementary Material
+<br/><b>University of Wisconsin Madison</b><br/>Adobe Research
+<br/>http://www.cs.wisc.edu/~lizhang/projects/face-parsing/
+<br/>1. Additional Selected Results
+<br/>Figures 1 and 2 supplement Figure 4 in our paper. In all cases, the input images come from our Helen [1] test set. We note
+<br/>that our algorithm generally produces accurate results, as shown in Figures 1. However, our algorithm is not perfect and makes
+<br/>mistakes on especially challenging input images, as shown in Figure 2.
+<br/>In our view, the mouth is the most challenging region of the face to segment: the shape and appearance of the lips vary
+<br/>widely from subject to subject, mouths deform significantly, and the overall appearance of the mouth region changes depending
+<br/>on whether the inside of the mouth is visible or not. Unusual mouth expressions, like those shown in Figure 2, are not repre-
+<br/>sented well in the exemplar images, which results in poor label transfer from the top exemplars to the test image. Despite these
+<br/>challenges, our algorithm generally performs well on the mouth, with large segmentation errors occurring infrequently.
+<br/>2. Comparisons with Liu et al. [2]
+<br/>The scene parsing approach by Liu et al. [2] shares sevaral similarities with our work. Like our approach, they propose a
+<br/>nonparametric system that transfers labels from exemplars in a database to annotate a test image. This begs the question, Why
+<br/>not simply apply the approach from Liu et al. to face images?
+<br/>To help answer this question, we used the code provided by Liu et al. on our Helen [1] images; our exemplar set is used for
+<br/>training their system, and our test set is used for testing. Please see Section 4.3 in our paper for more details. Figure 3 shows
+<br/>several selected results for qualitative comparison. In general, our algorithm performs much better than Liu et al.’s algorithm.
+<br/>References
+<br/>[1] V. Le, J. Brandt, Z. Lin, L. Bourdev, and T. S. Huang. Interactive facial feature localization. In ECCV, 2012.
+<br/>[2] C. Liu, J. Yuen, and A. Torralba. Nonparametric scene parsing via label transfer. In PAMI, December 2011.
+</td><td>('2721523', 'Brandon M. Smith', 'brandon m. smith')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')</td><td></td></tr><tr><td>99c20eb5433ed27e70881d026d1dbe378a12b342</td><td>ISCA Archive
+<br/>http://www.isca-speech.org/archive
+<br/>First Workshop on Speech, Language
+<br/>and Audio in Multimedia
+<br/>Marseille, France
+<br/>August 22-23, 2013
+<br/>Proceedings of the First Workshop on Speech, Language and Audio in Multimedia (SLAM), Marseille, France, August 22-23, 2013.
+<br/>78
+</td><td></td><td></td></tr><tr><td>99facca6fc50cc30f13b7b6dd49ace24bc94f702</td><td>Front.Comput.Sci.
+<br/>DOI
+<br/>RESEARCH ARTICLE
+<br/>VIPLFaceNet: An Open Source Deep Face Recognition SDK
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>c(cid:13) Higher Education Press and Springer-Verlag Berlin Heidelberg 2016
+</td><td>('46522348', 'Xin Liu', 'xin liu')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('3468240', 'Wanglong Wu', 'wanglong wu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>9990e0b05f34b586ffccdc89de2f8b0e5d427067</td><td>International Journal of Modeling and Optimization, Vol. 3, No. 2, April 2013
+<br/>Auto-Optimized Multimodal Expression Recognition
+<br/>Framework Using 3D Kinect Data for ASD Therapeutic
+<br/>Aid
+<br/>
+<br/>regarding
+<br/>emotion
+<br/>and
+<br/>to
+<br/>recognize
+</td><td>('25833279', 'Amira E. Youssef', 'amira e. youssef')<br/>('1720250', 'Ahmed S. Ibrahim', 'ahmed s. ibrahim')<br/>('1731164', 'A. Lynn Abbott', 'a. lynn abbott')</td><td></td></tr><tr><td>99d7678039ad96ee29ab520ff114bb8021222a91</td><td>Political image analysis with deep neural
+<br/>networks
+<br/>November 28, 2017
+</td><td>('41096358', 'L. Jason Anastasopoulos', 'l. jason anastasopoulos')<br/>('2361255', 'Shiry Ginosar', 'shiry ginosar')<br/>('2007721', 'Dhruvil Badani', 'dhruvil badani')<br/>('2459453', 'Jake Ryland Williams', 'jake ryland williams')<br/>('50521070', 'Crystal Lee', 'crystal lee')</td><td></td></tr><tr><td>52012b4ecb78f6b4b9ea496be98bcfe0944353cd</td><td>
+<br/> JOURNAL OF COMPUTATION IN BIOSCIENCES AND ENGINEERING
+<br/>
+<br/> Journal homepage: http://scienceq.org/Journals/JCLS.php
+<br/>
+<br/>Research Article
+<br/>Using Support Vector Machine and Local Binary Pattern for Facial Expression
+<br/>Recognition
+<br/>Open Access
+<br/><b>Federal University Technology Akure, PMB 704, Akure, Nigeria</b><br/>2. Department of computer science, Kwara state polytechnic Ilorin, Kwara-State, Nigeria.
+<br/> Received: September 22, 2015, Accepted: December 14, 2015, Published: December 14, 2015.
+</td><td>('10698338', 'Alese Boniface Kayode', 'alese boniface kayode')</td><td>. *Corresponding author: Ayeni Olaniyi Abiodun Mail Id: oaayeni@futa.edu.ng
+</td></tr><tr><td>523854a7d8755e944bd50217c14481fe1329a969</td><td>A Differentially Private Kernel Two-Sample Test
+<br/>MPI-IS
+<br/><b>University Of Oxford</b><br/><b>University Of Oxford</b><br/>MPI-IS
+<br/>April 17, 2018
+</td><td>('39565862', 'Anant Raj', 'anant raj')<br/>('35142231', 'Ho Chung Leon Law', 'ho chung leon law')<br/>('1698032', 'Dino Sejdinovic', 'dino sejdinovic')<br/>('37292171', 'Mijung Park', 'mijung park')</td><td>anant.raj@tuebingen.mpg.de
+<br/>ho.law@stats.ox.ac.uk
+<br/>dino.sejdinovic@stats.ox.ac.uk
+<br/>mijung.park@tuebingen.mpg.de
+</td></tr><tr><td>521cfbc1949289a7ffc3ff90af7c55adeb43db2a</td><td>Action Recognition with Coarse-to-Fine Deep Feature Integration and
+<br/>Asynchronous Fusion
+<br/><b>Shanghai Jiao Tong University, China</b><br/><b>National Key Laboratory for Novel Software Technology, Nanjing University, China</b><br/><b>University of Chinese Academy of Sciences, China</b></td><td>('8131625', 'Weiyao Lin', 'weiyao lin')<br/>('1926641', 'Yang Mi', 'yang mi')<br/>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('1875882', 'Ke Lu', 'ke lu')<br/>('37028145', 'Hongkai Xiong', 'hongkai xiong')</td><td>{wylin, deyangmiyang, xionghongkai}@sjtu.edu.cn, wujx2001@nju.edu.cn, luk@ucas.ac.cn
+</td></tr><tr><td>529e2ce6fb362bfce02d6d9a9e5de635bde81191</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>> TIP-05732-2009<
+<br/>1
+<br/>Normalization of Face Illumination Based
+<br/>on Large- and Small- Scale Features
+</td><td>('2002129', 'Xiaohua Xie', 'xiaohua xie')<br/>('3333315', 'Wei-Shi Zheng', 'wei-shi zheng')<br/>('1768574', 'Pong C. Yuen', 'pong c. yuen')<br/>('1713795', 'Ching Y. Suen', 'ching y. suen')</td><td></td></tr><tr><td>52887969107956d59e1218abb84a1f834a314578</td><td>1283
+<br/>Travel Recommendation by Mining People
+<br/>Attributes and Travel Group Types From
+<br/>Community-Contributed Photos
+</td><td>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('2363522', 'An-Jung Cheng', 'an-jung cheng')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td></td></tr><tr><td>521482c2089c62a59996425603d8264832998403</td><td></td><td></td><td></td></tr><tr><td>521b625eebea73b5deb171a350e3709a4910eebf</td><td></td><td></td><td></td></tr><tr><td>52258ec5ec73ce30ca8bc215539c017d279517cf</td><td>Recognizing Faces with Expressions: Within-class Space and Between-class Space
+<br/><b>Zhejang University, Hangzhou 310027, P.R.China</b><br/>Yu Bing Chen Ping Jin Lianfu
+</td><td></td><td>Email: BingbingYu@21cn.com Pchen@mail.hz.zj.cn Lfjin@mail.hz.zj.cn
+</td></tr><tr><td>5253c94f955146ba7d3566196e49fe2edea1c8f4</td><td>Internet-based Morphable Model
+<br/><b>University of Washington</b><br/>  
+<br/> 
+<br/>
+<br/>
+<br/>
+<br/>  
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 
+<br/>
+<br/> 
+<br/> 
+<br/> 
+<br/>
+<br/>
+<br/>  
+<br/>
+<br/> 
+<br/> 
+<br/> 
+<br/>
+<br/> 
+<br/> 
+<br/>!
+<br/>!
+<br/>
+<br/>
+<br/>Figure 1. Overview of the method. We construct a morphable
+<br/>model directly from Internet photos, the model is then used for
+<br/>single view reconstruction from any new input image (Face An-
+<br/>alyzer) and further for shape modification (Face Modifier), e.g.,
+<br/>from neutral to smile in 3D.
+</td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')</td><td>kemelmi@cs.washington.edu
+</td></tr><tr><td>527dda77a3864d88b35e017d542cb612f275a4ec</td><td></td><td></td><td></td></tr><tr><td>529b1f33aed49dbe025a99ac1d211c777ad881ec</td><td>FAST AND EXACT BI-DIRECTIONAL FITTING OF ACTIVE APPEARANCE MODELS
+<br/>Jean Kossaifi(cid:63)
+<br/><b>cid:63) Imperial College London, UK</b><br/><b>University of Nottingham, UK, School of Computer Science</b><br/><b>University of Twente, The Netherlands</b></td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>523b2cbc48decfabffb66ecaeced4fe6a6f2ac78</td><td>Photorealistic Facial Expression Synthesis by the Conditional Difference Adversarial
+<br/>Autoencoder
+<br/>Department of Electronic and Computer Engineering
+<br/><b>The Hong Kong University of Science and Technology</b><br/>HKSAR, China
+</td><td>('1698743', 'Yuqian Zhou', 'yuqian zhou')</td><td>yzhouas@ust.hk, eebert@ust.hk
+</td></tr><tr><td>52472ec859131844f38fc7d57944778f01d109ac</td><td>Improving speaker turn embedding by
+<br/>crossmodal transfer learning from face embedding
+<br/><b>Idiap Research Institute, Martigny, Switzerland</b><br/>2 ´Ecole Polytechnique F´ed´eral de Lausanne, Switzerland
+</td><td>('39560344', 'Nam Le', 'nam le')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>{nle, odobez}@idiap.ch
+</td></tr><tr><td>5287d8fef49b80b8d500583c07e935c7f9798933</td><td>Generative Adversarial Text to Image Synthesis
+<br/><b>University of Michigan, Ann Arbor, MI, USA (UMICH.EDU</b><br/><b>Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE</b><br/>REEDSCOT1, AKATA2, XCYAN1, LLAJAN1
+<br/>HONGLAK1, SCHIELE2
+</td><td>('2893664', 'Zeynep Akata', 'zeynep akata')<br/>('3084614', 'Xinchen Yan', 'xinchen yan')<br/>('2876316', 'Lajanugen Logeswaran', 'lajanugen logeswaran')<br/>('1697141', 'Honglak Lee', 'honglak lee')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>52c59f9f4993c8248dd3d2d28a4946f1068bcbbe</td><td>Structural Similarity and Distance in Learning
+<br/>Dept. of Electrical and
+<br/>Computer Engineering
+<br/><b>Boston University</b><br/>Boston, MA 02215
+<br/>Dept. of Electrical and
+<br/>Computer Engineering
+<br/><b>Boston University</b><br/>Boston, MA 02215
+<br/>David A. Casta˜n´on
+<br/>Dept. of Electrical and
+<br/>Computer Engineering
+<br/><b>Boston University</b><br/>Boston, MA 02215
+<br/>information,
+</td><td>('1928419', 'Joseph Wang', 'joseph wang')<br/>('1699322', 'Venkatesh Saligrama', 'venkatesh saligrama')</td><td>Email: joewang@bu.edu
+<br/>Email: srv@bu.edu
+<br/>Email: dac@bu.edu
+</td></tr><tr><td>52bf00df3b970e017e4e2f8079202460f1c0e1bd</td><td>Learning High-level Prior with Convolutional Neural Networks
+<br/>for Semantic Segmentation
+<br/><b>University of Science and Technology of China</b><br/>Hefei, China
+<br/><b>Tsinghua University</b><br/>Beijing, China
+<br/><b>The Hong Kong University of Science and Technology</b><br/>HongKong, China
+</td><td>('2743695', 'Haitian Zheng', 'haitian zheng')<br/>('1697194', 'Feng Wu', 'feng wu')<br/>('39987643', 'Lu Fang', 'lu fang')<br/>('1680777', 'Yebin Liu', 'yebin liu')<br/>('1916870', 'Mengqi Ji', 'mengqi ji')</td><td>{zhenght,fengwu,fanglu}@mail.ustc.edu.cn
+<br/>liuyebin@mail.tsinghua.edu.cn
+<br/>mji@ust.hk
+</td></tr><tr><td>52c91fcf996af72d191520d659af44e310f86ef9</td><td>Interactive Image Search with Attribute-based Guidance and Personalization
+<br/><b>The University of Texas at Austin</b></td><td>('1770205', 'Adriana Kovashka', 'adriana kovashka')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>{adriana, grauman}@cs.utexas.edu
+</td></tr><tr><td>52885fa403efbab5ef21274282edd98b9ca70cbf</td><td>Discriminant Graph Structures for Facial
+<br/>Expression Recognition
+<br/><b>Aristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451
+<br/>54124 Thessaloniki, Greece
+<br/>Address for correspondence :
+<br/><b>Aristotle University of Thessaloniki</b><br/>54124 Thessaloniki
+<br/>GREECE
+<br/>Tel. ++ 30 231 099 63 04
+<br/>Fax ++ 30 231 099 63 04
+<br/>April 2, 2008
+<br/>DRAFT
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>email: pitas@zeus.csd.auth.gr
+</td></tr><tr><td>52f23e1a386c87b0dab8bfdf9694c781cd0a3984</td><td></td><td></td><td></td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>WIDER FACE: A Face Detection Benchmark
+<br/><b>The Chinese University of Hong Kong</b></td><td>('1692609', 'Shuo Yang', 'shuo yang')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{ys014, pluo, ccloy, xtang}@ie.cuhk,edu.hk
+</td></tr><tr><td>528069963f0bd0861f380f53270c96c269a3ea1c</td><td><b>Cardi University</b><br/>School of Computer Science and Informatics
+<br/>Visual Computing Group
+<br/>4D (3D Dynamic) Statistical Models of
+<br/>Conversational Expressions and the
+<br/>Synthesis of Highly-Realistic 4D Facial
+<br/>Expression Sequences
+<br/>Submitted in part fulfilment of the requirements for the degree of
+<br/><b>Doctor of Philosophy in Computer Science at Cardi University, July 24th</b></td><td>('1812779', 'Jason Vandeventer', 'jason vandeventer')</td><td></td></tr><tr><td>529baf1a79cca813f8c9966ceaa9b3e42748c058</td><td>Triangle Wise Mapping Technique to Transform one Face Image into Another Face Image
+<br/>
+<br/>{tag} {/tag}
+<br/>
+<br/> International Journal of Computer Applications
+<br/>
+<br/> © 2014 by IJCA Journal
+<br/> Volume 87 - Number 6
+<br/>
+<br/> Year of Publication: 2014
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>Bhogeswar Borah
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/15209-3714
+<br/> {bibtex}pxc3893714.bib{/bibtex}
+</td><td></td><td></td></tr><tr><td>5239001571bc64de3e61be0be8985860f08d7e7e</td><td>SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, JUNE 2016
+<br/>Deep Appearance Models: A Deep Boltzmann
+<br/>Machine Approach for Face Modeling
+</td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('2687827', 'Kha Gia Quach', 'kha gia quach')<br/>('1699922', 'Tien D. Bui', 'tien d. bui')</td><td></td></tr><tr><td>556b9aaf1bc15c928718bc46322d70c691111158</td><td>Exploiting Qualitative Domain Knowledge for Learning Bayesian
+<br/>Network Parameters with Incomplete Data
+<br/>Thomson-Reuters Corporation
+<br/><b>Rensselaer Polytechnic Institute</b></td><td>('2460793', 'Wenhui Liao', 'wenhui liao')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>wenhui.liao@thomsonreuters.com
+<br/>qji@ecse.rpi.edu
+</td></tr><tr><td>55ea0c775b25d9d04b5886e322db852e86a556cd</td><td>DOCK: Detecting Objects
+<br/>by transferring Common-sense Knowledge
+<br/><b>University of California, Davis 2University of Washington 3Allen Institute for AI</b><br/>https://dock-project.github.io
+</td><td>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('19553871', 'Krishna Kumar Singh', 'krishna kumar singh')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>550858b7f5efaca2ebed8f3969cb89017bdb739f</td><td></td><td></td><td></td></tr><tr><td>554b9478fd285f2317214396e0ccd81309963efd</td><td>Spatio-Temporal Action Localization For Human Action
+<br/>Recognition in Large Dataset
+<br/>1L2TI, Institut Galil´ee, Universit´e Paris 13, France;
+<br/>2SERCOM, Ecole Polytechnique de Tunisie
+</td><td>('3240115', 'Sameh MEGRHI', 'sameh megrhi')<br/>('2504338', 'Marwa JMAL', 'marwa jmal')<br/>('1731553', 'Azeddine BEGHDADI', 'azeddine beghdadi')<br/>('14521102', 'Wided Mseddi', 'wided mseddi')</td><td></td></tr><tr><td>55c68c1237166679d2cb65f266f496d1ecd4bec6</td><td>Learning to Score Figure Skating Sport Videos
+</td><td>('2708397', 'Chengming Xu', 'chengming xu')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('10110775', 'Zitian Chen', 'zitian chen')<br/>('40379722', 'Bing Zhang', 'bing zhang')<br/>('1717861', 'Yu-Gang Jiang', 'yu-gang jiang')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td></td></tr><tr><td>558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f</td><td>DEPARTMENT OF INFORMATION ENGINEERING AND COMPUTER SCIENCE
+<br/>ICT International Doctoral School
+<br/>Efficient and Effective Solutions
+<br/>for Video Classification
+<br/>Advisor:
+<br/>Prof. Nicu Sebe
+<br/><b>University of Trento</b><br/>Co-Advisor:
+<br/>Prof. Bogdan Ionescu
+<br/><b>University Politehnica of Bucharest</b><br/>November 2017
+</td><td>('28957796', 'Ionut Cosmin Duta', 'ionut cosmin duta')</td><td></td></tr><tr><td>55138c2b127ebdcc508503112bf1d1eeb5395604</td><td>Ensemble Nystr¨om Method
+<br/>Google Research
+<br/>New York, NY
+<br/><b>Courant Institute and Google Research</b><br/>New York, NY
+<br/><b>Courant Institute of Mathematical Sciences</b><br/>New York, NY
+</td><td>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')<br/>('1709415', 'Mehryar Mohri', 'mehryar mohri')<br/>('8395559', 'Ameet Talwalkar', 'ameet talwalkar')</td><td>sanjivk@google.com
+<br/>mohri@cs.nyu.edu
+<br/>ameet@cs.nyu.edu
+</td></tr><tr><td>5502dfe47ac26e60e0fb25fc0f810cae6f5173c0</td><td>Affordance Prediction via Learned Object Attributes
+</td><td>('2749326', 'Tucker Hermans', 'tucker hermans')<br/>('1692956', 'James M. Rehg', 'james m. rehg')<br/>('1688328', 'Aaron Bobick', 'aaron bobick')</td><td></td></tr><tr><td>55e18e0dde592258882134d2dceeb86122b366ab</td><td>Journal of Artificial Intelligence Research 37 (2010) 397-435
+<br/>Submitted 11/09; published 03/10
+<br/>Training a Multilingual Sportscaster:
+<br/>Using Perceptual Context to Learn Language
+<br/>Department of Computer Science
+<br/><b>The University of Texas at Austin</b><br/><b>University Station C0500, Austin TX 78712, USA</b></td><td>('39230960', 'David L. Chen', 'david l. chen')<br/>('1765656', 'Joohyun Kim', 'joohyun kim')<br/>('1797655', 'Raymond J. Mooney', 'raymond j. mooney')</td><td>DLCC@CS.UTEXAS.EDU
+<br/>SCIMITAR@CS.UTEXAS.EDU
+<br/>MOONEY@CS.UTEXAS.EDU
+</td></tr><tr><td>55a158f4e7c38fe281d06ae45eb456e05516af50</td><td>The 22nd International Conference on Computer Graphics and Vision
+<br/>108
+<br/>GraphiCon’2012
+</td><td></td><td></td></tr><tr><td>5506a1a1e1255353fde05d9188cb2adc20553af5</td><td></td><td></td><td></td></tr><tr><td>55966926e7c28b1eee1c7eb7a0b11b10605a1af0</td><td>Surpassing Human-Level Face Verification Performance on LFW with
+<br/>GaussianFace
+<br/><b>The Chinese University of Hong Kong</b></td><td>('2312486', 'Chaochao Lu', 'chaochao lu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{lc013, xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>552c55c71bccfc6de7ce1343a1cd12208e9a63b3</td><td>Accurate Eye Center Location and Tracking Using Isophote Curvature
+<br/>Intelligent Systems Lab Amsterdam
+<br/><b>University of Amsterdam, The Netherlands</b></td><td>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td>{rvalenti,gevers}@science.uva.nl
+</td></tr><tr><td>5517b28795d7a68777c9f3b2b46845dcdb425b2c</td><td>Deep video gesture recognition using illumination invariants
+<br/><b>Massachusetts Institute of Technology</b><br/>Figure 1: Automated facial gesture recognition is a fundamental problem in human computer interaction. While tackling real world tasks of
+<br/>expression recognition sudden changes in illumination from multiple sources can be expected. We show how to build a robust system to detect
+<br/>human emotions while showing invariance to illumination.
+</td><td>('37381309', 'Otkrist Gupta', 'otkrist gupta')<br/>('2283049', 'Dan Raviv', 'dan raviv')<br/>('1717566', 'Ramesh Raskar', 'ramesh raskar')</td><td></td></tr><tr><td>55c81f15c89dc8f6eedab124ba4ccab18cf38327</td><td></td><td></td><td></td></tr><tr><td>5550a6df1b118a80c00a2459bae216a7e8e3966c</td><td>ISSN: 0974-2115
+<br/>www.jchps.com Journal of Chemical and Pharmaceutical Sciences
+<br/>A perusal on Facial Emotion Recognition System (FERS)
+<br/><b>School of Information Technology and Engineering, VIT University, Vellore, 632014, India</b></td><td></td><td>*Corresponding author: E-Mail: krithika.lb@vit.ac.in
+</td></tr><tr><td>55e87050b998eb0a8f0b16163ef5a28f984b01fa</td><td>CAN YOU FIND A FACE IN A HEVC BITSTREAM?
+<br/><b>School of Engineering Science, Simon Fraser University, Burnaby, BC, Canada</b></td><td>('3393216', 'Saeed Ranjbar Alvar', 'saeed ranjbar alvar')<br/>('3320198', 'Hyomin Choi', 'hyomin choi')</td><td></td></tr><tr><td>55bc7abcef8266d76667896bbc652d081d00f797</td><td>Impact of Facial Cosmetics on Automatic Gender and Age Estimation
+<br/>Algorithms
+<br/><b>Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA</b><br/><b>Computer Science and Engineering, Michigan State University, East Lansing, USA</b><br/>Keywords:
+<br/>Biometrics, Face Recognition, Facial Cosmetics, Makeup, Gender Spoofing, Age Alteration, Automatic
+<br/>Gender Estimation, Automatic Age Estimation
+</td><td>('1751335', 'Cunjian Chen', 'cunjian chen')<br/>('3299530', 'Antitza Dantcheva', 'antitza dantcheva')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>cchen10@csee.wvu.edu, {antitza, rossarun}@msu.edu
+</td></tr><tr><td>55b4b1168c734eeb42882082bd131206dbfedd5b</td><td>Learning to Align from Scratch
+<br/><b>University of Massachusetts, Amherst, MA</b><br/><b>University of Michigan, Ann Arbor, MI</b></td><td>('3219900', 'Gary B. Huang', 'gary b. huang')</td><td>{gbhuang,mmattar,elm}@cs.umass.edu
+<br/>honglak@eecs.umich.edu
+</td></tr><tr><td>55079a93b7d1eb789193d7fcdcf614e6829fad0f</td><td>Efficient and Robust Inverse Lighting of a Single Face Image using Compressive
+<br/>Sensing
+<br/><b>Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen</b><br/>57076 Siegen, Germany
+</td><td>('1747804', 'Miguel Heredia Conde', 'miguel heredia conde')<br/>('1967283', 'Davoud Shahlaei', 'davoud shahlaei')<br/>('2880906', 'Volker Blanz', 'volker blanz')<br/>('1698728', 'Otmar Loffeld', 'otmar loffeld')</td><td>heredia@zess.uni-siegen.de
+</td></tr><tr><td>55804f85613b8584d5002a5b0ddfe86b0d0e3325</td><td>Data Complexity in Machine Learning
+<br/><b>Learning Systems Group, California Institute of Technology</b></td><td>('37715538', 'Ling Li', 'ling li')<br/>('1817975', 'Yaser S. Abu-Mostafa', 'yaser s. abu-mostafa')</td><td></td></tr><tr><td>551fa37e8d6d03b89d195a5c00c74cc52ff1c67a</td><td>GeThR-Net: A Generalized Temporally Hybrid
+<br/>Recurrent Neural Network for Multimodal
+<br/>Information Fusion
+<br/>1 Xerox Research Centre India; 2 Amazon Development Center India
+</td><td>('2757149', 'Ankit Gandhi', 'ankit gandhi')<br/>('34751361', 'Arjun Sharma', 'arjun sharma')<br/>('2221075', 'Arijit Biswas', 'arijit biswas')<br/>('2116262', 'Om Deshmukh', 'om deshmukh')</td><td>{ankit.g1290,arjunsharma.iitg,arijitbiswas87}@gmail.com;
+<br/>om.deshmukh@xerox.com (*-equal contribution)
+</td></tr><tr><td>55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c</td><td>CAS(ME)2: A Database of Spontaneous
+<br/>Macro-expressions and Micro-expressions
+<br/><b>State Key Laboratory of Brain and Cognitive Science, Institute of Psychology</b><br/>Chinese Academy of Sciences, Beijing, China
+<br/><b>University of Chinese Academy of Sciences, Beijing, China</b><br/><b>Key Laboratory of Behavior Sciences, Institute of Psychology</b><br/>Chinese Academy of Sciences, Beijing, China
+<br/><b>Institute of Psychology and Behavioral Sciences</b><br/><b>Wenzhou University, Wenzhou, China</b></td><td>('34495371', 'Fangbing Qu', 'fangbing qu')<br/>('9185305', 'Wen-Jing Yan', 'wen-jing yan')<br/>('1684007', 'Xiaolan Fu', 'xiaolan fu')</td><td>{qufb,fuxl}@psych.ac.cn
+<br/>wangsujing@psych.ac.cn
+<br/>yanwj@wzu.edu.cn
+</td></tr><tr><td>55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96</td><td>1 Million Full-Sentences Visual Question Answering (FSVQA)
+<br/>The Color of the Cat is Gray:
+<br/><b>The University of Tokyo</b><br/>7 Chome-3-1 Hongo, Bunkyo
+<br/>Tokyo 113-8654, Japan
+</td><td>('2518695', 'Andrew Shin', 'andrew shin')<br/>('3250559', 'Yoshitaka Ushiku', 'yoshitaka ushiku')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td></td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation
+<br/>{tag} {/tag}
+<br/> International Journal of Computer Applications
+<br/>
+<br/> Foundation of Computer Science (FCS), NY, USA
+<br/>
+<br/>
+<br/>Volume 126
+<br/>-
+<br/>Number 5
+<br/>
+<br/>
+<br/> Year of Publication: 2015
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/ijca2015906055
+<br/> {bibtex}2015906055.bib{/bibtex}
+</td><td>('2029759', 'Reecha Sharma', 'reecha sharma')</td><td></td></tr><tr><td>9788b491ddc188941dadf441fc143a4075bff764</td><td>LOGAN: Membership Inference Attacks Against Generative Models∗
+<br/><b>University College London</b></td><td>('9200194', 'Jamie Hayes', 'jamie hayes')<br/>('2008164', 'Luca Melis', 'luca melis')<br/>('1722262', 'George Danezis', 'george danezis')<br/>('1728207', 'Emiliano De Cristofaro', 'emiliano de cristofaro')</td><td>{j.hayes, l.melis, g.danezis, e.decristofaro}@cs.ucl.ac.uk
+</td></tr><tr><td>973e3d9bc0879210c9fad145a902afca07370b86</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 7, No. 7, 2016
+<br/>From Emotion Recognition to Website
+<br/>Customizations
+<br/>O.B. Efremides
+<br/>School of Web Media
+<br/>Bahrain Polytechnic
+<br/>Isa Town, Kingdom of Bahrain
+</td><td></td><td></td></tr><tr><td>970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>Discriminative Log-Euclidean Feature Learning for Sparse
+<br/>Representation-Based Recognition of Faces from Videos
+<br/><b>Center for Automation Research, University of Maryland</b><br/><b>College Park, MD</b><br/>{mefathy, azadeh, rama} (at) umiacs.umd.edu
+</td><td>('4570075', 'Mohammed E. Fathy', 'mohammed e. fathy')<br/>('2943431', 'Azadeh Alavi', 'azadeh alavi')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>97b8249914e6b4f8757d22da51e8347995a40637</td><td>28
+<br/>Large-Scale Vehicle Detection, Indexing,
+<br/>and Search in Urban Surveillance Videos
+</td><td>('1832513', 'Behjat Siddiquie', 'behjat siddiquie')<br/>('3151405', 'James Petterson', 'james petterson')<br/>('2029646', 'Yun Zhai', 'yun zhai')<br/>('3233207', 'Ankur Datta', 'ankur datta')<br/>('34609371', 'Lisa M. Brown', 'lisa m. brown')<br/>('1767897', 'Sharath Pankanti', 'sharath pankanti')</td><td></td></tr><tr><td>972ef9ddd9059079bdec17abc8b33039ed25c99c</td><td>International Journal of Innovations in Engineering and Technology (IJIET)
+<br/>A Novel on understanding How IRIS
+<br/>Recognition works
+<br/>Dept. of Comp. Science
+<br/><b>M.P.M. College, Bhopal, India</b><br/>Asst. Professor CSE
+<br/><b>M.P.M. College, Bhopal, India</b></td><td>('37930830', 'Vijay Shinde', 'vijay shinde')<br/>('9345591', 'Prakash Tanwar', 'prakash tanwar')</td><td></td></tr><tr><td>97032b13f1371c8a813802ade7558e816d25c73f</td><td>Total Recall Final Report
+<br/>Supervisor: Professor Duncan Gillies
+<br/>January 11, 2006
+</td><td>('2561350', 'Peter Collingbourne', 'peter collingbourne')<br/>('3036326', 'Khilan Gudka', 'khilan gudka')<br/>('15490561', 'Steve Lovegrove', 'steve lovegrove')<br/>('35260800', 'Jiefei Ma', 'jiefei ma')</td><td></td></tr><tr><td>97137d5154a9f22a5d9ecc32e8e2b95d07a5a571</td><td>The final publication is available at Springer via http://dx.doi.org/10.1007/s11042-016-3418-y
+<br/>Facial Expression Recognition based on Local Region
+<br/>Specific Features and Support Vector Machines
+<br/>Park1
+<br/><b>Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of Korea; E</b><br/><b>Division of Computer Engineering, Jeonbuk National University, Jeonju-si, Jeollabuk-do</b><br/>Tel.: +82-63-270-2406; Fax: +82-63-270-2394.
+</td><td>('32322842', 'Deepak Ghimire', 'deepak ghimire')<br/>('31984909', 'SungHwan Jeong', 'sunghwan jeong')<br/>('2034182', 'Joonwhoan Lee', 'joonwhoan lee')</td><td>Mails: (deepak, shjeong, shpark)@keti.re.kr
+<br/>756, Rep. of Korea; E-Mail: chlee@jbnu.ac.kr
+<br/>♣ Corresponding Author; E-Mail: chlee@jbnu.ac.kr;
+</td></tr><tr><td>9730b9cd998c0a549601c554221a596deda8af5b</td><td>Spatio-temporal Person Retrieval via Natural Language Queries
+<br/><b>Graduate School of Information Science and Technology, The University of Tokyo</b></td><td>('3369734', 'Masataka Yamaguchi', 'masataka yamaguchi')<br/>('8915348', 'Kuniaki Saito', 'kuniaki saito')<br/>('3250559', 'Yoshitaka Ushiku', 'yoshitaka ushiku')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td>{yamaguchi, ksaito, ushiku, harada}@mi.t.u-tokyo.ac.jp
+</td></tr><tr><td>978a219e07daa046244821b341631c41f91daccd</td><td>Emotional Intelligence: Giving Computers
+<br/>Effective Emotional Skills to Aid Interaction
+<br/><b>School of Computer Science, University of Birmingham, UK</b><br/>1 Introduction
+<br/>Why do computers need emotional intelligence? Science fiction often por-
+<br/>trays emotional computers as dangerous and frightening, and as a serious
+<br/>threat to human life. One of the most famous examples is HAL, the supercom-
+<br/>puter onboard the spaceship Discovery, in the movie 2001: A Space Odyssey.
+<br/>HAL could express, recognize and respond to human emotion, and generally
+<br/>had strong emotional skills – the consequences of which were catastrophic.
+<br/>However, since the movie’s release almost 40 years ago, the traditional view
+<br/>of emotions as contributing to irrational and unpredictable behaviour has
+<br/>changed. Recent research has suggested that emotions play an essential role
+<br/>in important areas such as learning, memory, motivation, attention, creativ-
+<br/>ity, and decision making. These findings have prompted a large number of
+<br/>research groups around the world to start examining the role of emotions and
+<br/>emotional intelligence in human-computer interaction (HCI).
+<br/>For almost half a century, computer scientists have been attempting to build
+<br/>machines that can interact intelligently with us, and despite initial optimism,
+<br/>they are still struggling to do so. For much of this time, the role of emotion in
+<br/>developing intelligent computers was largely overlooked, and it is only recently
+<br/>that interest in this area has risen dramatically. This increased interest can
+<br/>largely be attributed to the work of [6] and [85] who were amongst the first to
+<br/>bring emotion to the attention of computer scientists. The former highlighted
+<br/>emotion as a fundamental component required in building believable agents,
+<br/>while the latter further raised the awareness of emotion and its potential
+<br/>importance in HCI. Since these publications, the literature on emotions and
+<br/>computing has grown considerably with progress being made on a number of
+<br/>different fronts.
+<br/>The concept of designing computers to have emotional intelligence may seem
+<br/>strange, but equipping computers with this type of intelligence may provide
+<br/>a number of important advantages. For example, in spite of a computer’s
+</td><td>('3134697', 'Chris Creed', 'chris creed')<br/>('2282865', 'Russell Beale', 'russell beale')</td><td>cpc@cs.bham.ac.uk
+<br/>r.beale@cs.bham.ac.uk
+</td></tr><tr><td>976e0264bb57786952a987d4456850e274714fb8</td><td>Improving Semantic Concept Detection through the
+<br/>Dictionary of Visually-distinct Elements
+<br/><b>Center for Research in Computer Vision, University of Central Florida</b></td><td>('1707795', 'Afshin Dehghan', 'afshin dehghan')<br/>('1803711', 'Haroon Idrees', 'haroon idrees')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>{adehghan, haroon, shah}@cs.ucf.edu
+</td></tr><tr><td>9758f3fd94239a8d974217fe12599f88fb413f3d</td><td>UC-HCC Submission to Thumos 2014
+<br/><b>Vision and Sensing, HCC, ESTeM, University of Canberra</b></td><td>('1793720', 'O. V. Ramana Murthy', 'o. v. ramana murthy')<br/>('1717204', 'Roland Goecke', 'roland goecke')</td><td></td></tr><tr><td>97f9c3bdb4668f3e140ded2da33fe704fc81f3ea</td><td>AnExperimentalComparisonofAppearance
+<br/>andGeometricModelBasedRecognition
+<br/>J.Mundy,A.Liu,N.Pillow,A.Zisserman,S.Abdallah,S.Utcke,
+<br/>S.NayarandC.Rothwell
+<br/>GeneralElectricCorporateResearchandDevelopment,Schenectady,NY,USA
+<br/><b>RoboticsResearchGroup, UniversityofOxford, Oxford, UK</b><br/><b>ColumbiaUniversity, NY, USA</b><br/>INRIA,SophiaAntipolis,France
+</td><td></td><td></td></tr><tr><td>97e569159d5658760eb00ca9cb662e6882d2ab0e</td><td>Correlation Filters for Object Alignment
+<br/><b>Carnegie Mellon University</b><br/><b>Carnegie Mellon University</b><br/>B.V.K. Vijaya Kumar
+<br/><b>Carnegie Mellon University</b></td><td>('2232940', 'Vishnu Naresh Boddeti', 'vishnu naresh boddeti')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td>naresh@cmu.edu
+<br/>tk@cs.cmu.edu
+<br/>kumar@ece.cmu.edu
+</td></tr><tr><td>97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5</td><td>manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Deep Affect Prediction in-the-wild: Aff-Wild Database and Challenge,
+<br/>Deep Architectures, and Beyond
+<br/>Zafeiriou4
+</td><td>('1811396', 'Dimitrios Kollias', 'dimitrios kollias')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')</td><td></td></tr><tr><td>97d1d561362a8b6beb0fdbee28f3862fb48f1380</td><td>1955
+<br/>Age Synthesis and Estimation via Faces:
+<br/>A Survey
+</td><td>('1708679', 'Yun Fu', 'yun fu')<br/>('1822413', 'Guodong Guo', 'guodong guo')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>97540905e4a9fdf425989a794f024776f28a3fa9</td><td></td><td></td><td></td></tr><tr><td>97865d31b5e771cf4162bc9eae7de6991ceb8bbf</td><td>Face and Gender Classification in Crowd Video
+<br/>IIIT-D-MTech-CS-GEN-13-100
+<br/>July 16, 2015
+<br/><b>Indraprastha Institute of Information Technology</b><br/>New Delhi
+<br/>Thesis Advisors
+<br/>Dr. Richa Singh
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the Degree of M.Tech. in Computer Science
+<br/>c(cid:13) Verma, 2015
+<br/>Keywords : Face Recognition, Gender Classification, Crowd database
+</td><td>('2578160', 'Priyanka Verma', 'priyanka verma')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td></td></tr><tr><td>975978ee6a32383d6f4f026b944099e7739e5890</td><td>Privacy-Preserving Age Estimation
+<br/>for Content Rating
+<br/>Binglin Li∗
+<br/><b>University of Manitoba</b><br/><b>Simon Fraser University</b><br/>Winnipeg, Canada
+<br/>Burnaby, Canada
+<br/>Noman Mohammed
+<br/><b>University of Manitoba</b><br/>Winnipeg, Canada
+<br/>Yang Wang
+<br/>Jie Liang
+<br/><b>University of Manitoba</b><br/><b>Simon Fraser University</b><br/>Winnipeg, Canada
+<br/>Burnaby, Canada
+</td><td>('2373631', 'Linwei Ye', 'linwei ye')</td><td>yel3@cs.umanitoba.ca
+<br/>binglinl@sfu.ca
+<br/>noman@cs.umanitoba.ca
+<br/>ywang@cs.umanitoba.ca
+<br/>jiel@sfu.ca
+</td></tr><tr><td>9755554b13103df634f9b1ef50a147dd02eab02f</td><td>How Transferable are CNN-based Features for
+<br/>Age and Gender Classification?
+<br/> 1
+</td><td>('2850086', 'Gökhan Özbulak', 'gökhan özbulak')<br/>('3152281', 'Yusuf Aytar', 'yusuf aytar')</td><td></td></tr><tr><td>635158d2da146e9de559d2742a2fa234e06b52db</td><td></td><td></td><td></td></tr><tr><td>63d8110ac76f57b3ba8a5947bc6bdbb86f25a342</td><td>On Modeling Variations for Face Authentication
+<br/><b>Carnegie Mellon University, Pittsburgh, PA</b></td><td>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td>xiaoming@andrew.cmu.edu tsuhan@cmu.edu kumar@ece.cmu.edu
+</td></tr><tr><td>63cf5fc2ee05eb9c6613043f585dba48c5561192</td><td>Prototype Selection for
+<br/>Classification in Standard
+<br/>and Generalized
+<br/>Dissimilarity Spaces
+</td><td></td><td></td></tr><tr><td>632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c</td><td>Real-Time Facial Segmentation
+<br/>and Performance Capture from RGB Input
+<br/>Pinscreen
+<br/><b>University of Southern California</b></td><td>('2059597', 'Shunsuke Saito', 'shunsuke saito')<br/>('50290121', 'Tianye Li', 'tianye li')<br/>('1706574', 'Hao Li', 'hao li')</td><td></td></tr><tr><td>6324fada2fb00bd55e7ff594cf1c41c918813030</td><td>Uncertainty Reduction For Active Image Clustering
+<br/>via a Hybrid Global-Local Uncertainty Model
+<br/><b>State University of New York at Buffalo</b><br/>Department of Computer Science and Engineering
+<br/>338 Davis Hall, Buffalo, NY, 14260-2500
+</td><td>('2228109', 'Caiming Xiong', 'caiming xiong')<br/>('34187462', 'David M. Johnson', 'david m. johnson')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td>{cxiong,davidjoh,jcorso}@buffalo.edu
+</td></tr><tr><td>6308e9c991125ee6734baa3ec93c697211237df8</td><td>LEARNING THE SPARSE REPRESENTATION FOR CLASSIFICATION
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, USA</b></td><td>('1706007', 'Jianchao Yang', 'jianchao yang')<br/>('7898154', 'Jiangping Wang', 'jiangping wang')</td><td>{jyang29, jwang63, huang}@ifp.illinois.edu
+</td></tr><tr><td>6342a4c54835c1e14159495373ab18b4233d2d9b</td><td>TOWARDS POSE-ROBUST
+<br/>FACE RECOGNITION ON VIDEO
+<br/>Submitted as a requirement of the degree
+<br/>of doctor of philosophy
+<br/>at the
+<br/>Science and Engineering Faculty
+<br/><b>Queensland University of Technology</b><br/>September, 2014
+</td><td>('23168868', 'Moh Edi Wibowo', 'moh edi wibowo')</td><td></td></tr><tr><td>63d8d69e90e79806a062cb8654ad78327c8957bb</td><td></td><td></td><td></td></tr><tr><td>63c109946ffd401ee1195ed28f2fb87c2159e63d</td><td>14-1
+<br/>MVA2011 IAPR Conference on Machine Vision Applications, June 13-15, 2011, Nara, JAPAN
+<br/>Robust Facial Feature Localization using Improved Active Shape
+<br/>Model and Gabor Filter
+<br/><b>Engineering, National Formosa University</b><br/>Taiwan
+</td><td>('1711364', 'Hui-Yu Huang', 'hui-yu huang')</td><td>E-mail: hyhuang@nfu.edu.tw
+</td></tr><tr><td>63b29886577a37032c7e32d8899a6f69b11a90de</td><td>Image-set based Face Recognition Using Boosted Global
+<br/>and Local Principal Angles
+<br/><b>Xi an Jiaotong University, China</b><br/><b>University of Tsukuba, Japan</b></td><td>('6916241', 'Xi Li', 'xi li')<br/>('1770128', 'Kazuhiro Fukui', 'kazuhiro fukui')<br/>('1715389', 'Nanning Zheng', 'nanning zheng')</td><td>lxaccv09@yahoo.com,
+<br/>znn@xjtu.edu.cn
+<br/>kf@cs.tsukuba.ac.jp
+</td></tr><tr><td>631483c15641c3652377f66c8380ff684f3e365c</td><td>Sync-DRAW: Automatic Video Generation using Deep Recurrent
+<br/>A(cid:130)entive Architectures
+<br/>Gaurav Mi(cid:138)al∗
+<br/>IIT Hyderabad
+<br/>Vineeth N Balasubramanian
+<br/>IIT Hyderabad
+</td><td>('8268761', 'Tanya Marwah', 'tanya marwah')</td><td>gaurav.mi(cid:138)al.191013@gmail.com
+<br/>ee13b1044@iith.ac.in
+<br/>vineethnb@iith.ac.in
+</td></tr><tr><td>63a6c256ec2cf2e0e0c9a43a085f5bc94af84265</td><td>Complexity of Multiverse Networks and
+<br/>their Multilayer Generalization
+<br/>The Blavatnik School of Computer Science
+<br/><b>Tel Aviv University</b></td><td>('1762320', 'Etai Littwin', 'etai littwin')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>63213d080a43660ac59ea12e3c35e6953f6d7ce8</td><td>ActionVLAD: Learning spatio-temporal aggregation for action classification
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/>2Adobe Research
+<br/>3INRIA
+<br/>http://rohitgirdhar.github.io/ActionVLAD
+</td><td>('3102850', 'Rohit Girdhar', 'rohit girdhar')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')<br/>('1782755', 'Josef Sivic', 'josef sivic')<br/>('2015670', 'Bryan Russell', 'bryan russell')</td><td></td></tr><tr><td>630d1728435a529d0b0bfecb0e7e335f8ea2596d</td><td>Facial Action Unit Detection by Cascade of Tasks
+<br/><b>School of Information Science and Engineering, Southeast University, Nanjing, China</b><br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b></td><td>('2499751', 'Xiaoyu Ding', 'xiaoyu ding')<br/>('18870591', 'Qiao Wang', 'qiao wang')</td><td></td></tr><tr><td>63eefc775bcd8ccad343433fc7a1dd8e1e5ee796</td><td></td><td></td><td></td></tr><tr><td>632fa986bed53862d83918c2b71ab953fd70d6cc</td><td>GÜNEL ET AL.: WHAT FACE AND BODY SHAPES CAN TELL ABOUT HEIGHT
+<br/>What Face and Body Shapes Can Tell
+<br/>About Height
+<br/>CVLab
+<br/>EPFL,
+<br/>Lausanne, Switzerland
+</td><td>('46211822', 'Semih Günel', 'semih günel')<br/>('2933543', 'Helge Rhodin', 'helge rhodin')<br/>('1717736', 'Pascal Fua', 'pascal fua')</td><td>semih.gunel@epfl.ch
+<br/>helge.rhodin@epfl.ch
+<br/>pascal.fua@epfl.ch
+</td></tr><tr><td>63340c00896d76f4b728dbef85674d7ea8d5ab26</td><td>1732
+<br/>Discriminant Subspace Analysis:
+<br/>A Fukunaga-Koontz Approach
+</td><td>('40404906', 'Sheng Zhang', 'sheng zhang')<br/>('1715286', 'Terence Sim', 'terence sim')</td><td></td></tr><tr><td>633101e794d7b80f55f466fd2941ea24595e10e6</td><td>In submission to IEEE conference
+<br/>Face Attribute Prediction with classification CNN
+<br/>FACE ATTRIBUTE PREDICTION WITH
+<br/>CLASSIFICATION CNN
+<br/>Computer Science and Communication
+<br/><b>KTH Royal Institute of Technology</b><br/>100 44 Stockholm, Sweden
+</td><td>('50262049', 'Yang Zhong', 'yang zhong')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')<br/>('40565290', 'Haibo Li', 'haibo li')</td><td>{yzhong, sullivan, haiboli}@kth.se
+</td></tr><tr><td>63a2e2155193dc2da9764ae7380cdbd044ff2b94</td><td>A Dense SURF and Triangulation based
+<br/>Spatio-Temporal Feature for Action Recognition
+<br/><b>The University of Electro-Communications</b><br/>Chofu, Tokyo 182-8585 JAPAN
+</td><td>('2274625', 'Do Hang Nga', 'do hang nga')<br/>('1681659', 'Keiji Yanai', 'keiji yanai')</td><td>fdohang,yanaig@mm.cs.uec.ac.jp
+</td></tr><tr><td>63d865c66faaba68018defee0daf201db8ca79ed</td><td>Deep Regression for Face Alignment
+<br/>1Dept. of Electronics and Information Engineering, Huazhong Univ. of Science and Technology, China
+<br/>2Microsoft Research, Beijing, China
+</td><td>('2276155', 'Baoguang Shi', 'baoguang shi')<br/>('1688516', 'Jingdong Wang', 'jingdong wang')</td><td>shibaoguang@gmail.com,{xbai,liuwy}@hust.edu.cn,jingdw@microsoft.com
+</td></tr><tr><td>63cff99eff0c38b633c8a3a2fec8269869f81850</td><td>Feature Correlation Filter for Face Recognition
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern
+<br/>Recognition,
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/>95 Zhongguancun East Road, 100080 Beijing, China
+<br/>http://www.cbsr.ia.ac.cn
+</td><td>('32015491', 'XiangXin Zhu', 'xiangxin zhu')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('3168566', 'Rong Liu', 'rong liu')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{xxzhu,scliao,zlei,rliu,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>634541661d976c4b82d590ef6d1f3457d2857b19</td><td>AAllmmaa MMaatteerr SSttuuddiioorruumm –– UUnniivveerrssiittàà ddii BBoollooggnnaa
+<br/>in cotutela con Università di Sassari
+<br/>DOTTORATO DI RICERCA IN
+<br/>INGEGNERIA ELETTRONICA, INFORMATICA E DELLE
+<br/>TELECOMUNICAZIONI
+<br/>Ciclo XXVI
+<br/>Settore Concorsuale di afferenza: 09/H1
+<br/>Settore Scientifico disciplinare: ING-INF/05
+<br/>ADVANCED TECHNIQUES FOR FACE RECOGNITION
+<br/>UNDER CHALLENGING ENVIRONMENTS
+<br/>TITOLO TESI
+<br/>Presentata da:
+<br/>Coordinatore Dottorato
+<br/>ALESSANDRO VANELLI-CORALLI
+<br/>
+<br/>Relatore
+<br/> DAVIDE MALTONI
+<br/>Relatore
+<br/> MASSIMO TISTARELLI
+<br/>Esame finale anno 2014
+</td><td>('2384894', 'Yunlian Sun', 'yunlian sun')</td><td></td></tr><tr><td>6332a99e1680db72ae1145d65fa0cccb37256828</td><td>MASTER IN COMPUTER VISION AND ARTIFICIAL INTELLIGENCE
+<br/>REPORT OF THE RESEARCH PROJECT
+<br/>OPTION: COMPUTER VISION
+<br/>Pose and Face Recovery via
+<br/>Spatio-temporal GrabCut Human
+<br/>Segmentation
+<br/>Date: 13/07/2010
+</td><td>('4765407', 'Antonio Hernández Vela', 'antonio hernández vela')<br/>('10722928', 'Sergio Escalera Guerrero', 'sergio escalera guerrero')</td><td></td></tr><tr><td>63488398f397b55552f484409b86d812dacde99a</td><td>Learning Universal Multi-view Age Estimator by Video Contexts
+<br/><b>2 School of Computing, National University of Singapore</b><br/>3 Advanced Digital Sciences Center, Singapore; 4 Facebook
+</td><td>('1964516', 'Zheng Song', 'zheng song')<br/>('5796401', 'Bingbing Ni', 'bingbing ni')<br/>('39034731', 'Dong Guo', 'dong guo')<br/>('1715286', 'Terence Sim', 'terence sim')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{zheng.s, eleyans}@nus.edu.sg, bingbing.ni@adsc.com.sg, dnguo@fb.com, tsim@comp.nus.edu.sg
+</td></tr><tr><td>6341274aca0c2977c3e1575378f4f2126aa9b050</td><td>A Multi-Scale Cascade Fully Convolutional
+<br/>Network Face Detector
+<br/><b>Institute for Robotics and Intelligent Systems</b><br/><b>University of Southern California</b><br/>Los Angeles, California 90089
+</td><td>('3469030', 'Zhenheng Yang', 'zhenheng yang')<br/>('1694832', 'Ramakant Nevatia', 'ramakant nevatia')</td><td>Email:(cid:8)zhenheny,nevatia(cid:9)@usc.edu
+</td></tr><tr><td>63c022198cf9f084fe4a94aa6b240687f21d8b41</td><td>425
+</td><td></td><td></td></tr><tr><td>632441c9324cd29489cee3da773a9064a46ae26b</td><td>Video-based Cardiac Physiological Measurements Using
+<br/>Joint Blind Source Separation Approaches
+<br/>by
+<br/><b>B. Eng., Zhejiang University</b><br/>A THESIS SUBMITTED IN PARTIAL FULFILLMENT
+<br/>OF THE REQUIREMENTS FOR THE DEGREE OF
+<br/>Master of Applied Science
+<br/>in
+<br/>THE FACULTY OF GRADUATE AND POSTDOCTORAL
+<br/>STUDIES
+<br/>(Electrical and Computer Engineering)
+<br/><b>The University of British Columbia</b><br/>(Vancouver)
+<br/>July 2015
+</td><td>('33064881', 'Huan Qi', 'huan qi')<br/>('33064881', 'Huan Qi', 'huan qi')</td><td></td></tr><tr><td>0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab</td><td>Multi-Directional Multi-Level Dual-Cross
+<br/>Patterns for Robust Face Recognition
+</td><td>('37990555', 'Changxing Ding', 'changxing ding')<br/>('3826759', 'Jonghyun Choi', 'jonghyun choi')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td></td></tr><tr><td>0f112e49240f67a2bd5aaf46f74a924129f03912</td><td>947
+<br/>Age-Invariant Face Recognition
+</td><td>('2222919', 'Unsang Park', 'unsang park')<br/>('3225345', 'Yiying Tong', 'yiying tong')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>0fc254272db096a9305c760164520ad9914f4c9e</td><td>UNSUPERVISED CONVOLUTIONAL NEURAL NETWORKS FOR MOTION ESTIMATION
+<br/>School of Electronic Engineering and Computer Science
+<br/><b>Queen Mary University of London</b><br/>Mile End road, E1 4NS, London, UK
+</td><td>('29946980', 'Aria Ahmadi', 'aria ahmadi')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td></td></tr><tr><td>0fae5d9d2764a8d6ea691b9835d497dd680bbccd</td><td>Face Recognition using Canonical Correlation Analysis
+<br/>Department of Electrical Engineering
+<br/><b>Indian Institute of Technology, Madras</b><br/>Department of Electrical Engineering
+<br/><b>Indian Institute of Technology, Madras</b></td><td>('37274547', 'Amit C. Kale', 'amit c. kale')<br/>('4436239', 'R. Aravind', 'r. aravind')</td><td>ee04s043@ee.iitm.ac.in
+<br/>aravind@tenet.res.in
+</td></tr><tr><td>0f4cfcaca8d61b1f895aa8c508d34ad89456948e</td><td>LOCAL APPEARANCE BASED FACE RECOGNITION USING
+<br/>DISCRETE COSINE TRANSFORM (WedPmPO4)
+<br/>Author(s) :
+</td><td></td><td></td></tr><tr><td>0fdcfb4197136ced766d538b9f505729a15f0daf</td><td>Multiple Pattern Classification by Sparse Subspace Decomposition
+<br/><b>Institute of Media and Information Technology, Chiba University</b><br/>1-33 Yayoi, Inage, Chiba, Japan
+</td><td>('1688743', 'Tomoya Sakai', 'tomoya sakai')</td><td>tsakai@faculty.chiba-u.jp
+</td></tr><tr><td>0fad544edfc2cd2a127436a2126bab7ad31ec333</td><td>Decorrelating Semantic Visual Attributes by Resisting the Urge to Share
+<br/>UT Austin
+<br/>USC
+<br/>UT Austin
+</td><td>('2228235', 'Dinesh Jayaraman', 'dinesh jayaraman')<br/>('1693054', 'Fei Sha', 'fei sha')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>dineshj@cs.utexas.edu
+<br/>feisha@usc.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>0f32df6ae76402b98b0823339bd115d33d3ec0a0</td><td>Emotion recognition from embedded bodily
+<br/>expressions and speech during dyadic interactions
+</td><td>('40404576', 'Sikandar Amin', 'sikandar amin')<br/>('2766593', 'Prateek Verma', 'prateek verma')<br/>('1906895', 'Mykhaylo Andriluka', 'mykhaylo andriluka')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>∗Max Planck Institute for Informatics, Germany, {pmueller,andriluk,bulling}@mpi-inf.mpg.de
+<br/>†Stanford University, USA, prateekv@stanford.edu
+<br/>‡Technical University of Munich, Germany, sikandar.amin@in.tum.de
+</td></tr><tr><td>0fd1715da386d454b3d6571cf6d06477479f54fc</td><td>J Intell Robot Syst (2016) 82:101–133
+<br/>DOI 10.1007/s10846-015-0259-2
+<br/>A Survey of Autonomous Human Affect Detection Methods
+<br/>for Social Robots Engaged in Natural HRI
+<br/>Received: 10 December 2014 / Accepted: 11 August 2015 / Published online: 23 August 2015
+<br/>© Springer Science+Business Media Dordrecht 2015
+</td><td>('2929516', 'Derek McColl', 'derek mccoll')<br/>('31839336', 'Naoaki Hatakeyama', 'naoaki hatakeyama')<br/>('1719617', 'Beno Benhabib', 'beno benhabib')</td><td></td></tr><tr><td>0f9bf5d8f9087fcba419379600b86ae9e9940013</td><td></td><td></td><td></td></tr><tr><td>0f829fee12e86f980a581480a9e0cefccb59e2c5</td><td>Bird Part Localization Using Exemplar-Based Models with Enforced
+<br/>Pose and Subcategory Consistency
+<br/><b>Columbia University</b><br/>Problem
+<br/>The goal of our work is to localize the parts au-
+<br/>tomatically and accurately for fine-grained cate-
+<br/>gories. We evaluate our method on bird images in
+<br/>the CUB-200-2011 [1] dataset.
+<br/>Pipeline
+<br/>Approach
+<br/>Subcategory Detectors
+<br/>Localization Examples
+<br/>(1) Sliding-window detection. (2) Matching and ranking exemplars. (3) Predicting the final part configuration.
+<br/>Does Xk,t match the image I? ⇐⇒ P (Xk,t|I) =?
+<br/>k, si
+<br/>k,t|di
+<br/>k,t])}
+<br/>P (Xk,t|I) = P (Xk,t|Dp)αP (Xk,t|Ds)1−α
+<br/>P (Xk,t|Dp) = Gavg{P (xi
+<br/>P (Xk,t|Ds) = max
+<br/>P (Xk,t|l, Ds) = Gavg{P (xi
+<br/>(1)
+<br/>(2)
+<br/>(3)
+<br/>k,t])} (4)
+<br/>We use the most likely models M to predict the
+<br/>part locations of the testing sample:
+<br/>k,t)P (xi|di
+<br/>p[ci
+<br/>P (Xk,t|l, Ds)
+<br/>s[l, si
+<br/>ˆxi = arg max
+<br/>(cid:88)
+<br/>P ((cid:52)xi
+<br/>k,t]) (5)
+<br/>k,t|di
+<br/>p[ci
+<br/>k, si
+<br/>k,t, θi
+<br/>xi
+<br/>k,t∈M
+<br/>Species 1
+<br/>Species 2
+<br/>Species 3
+<br/>Subcategory clusters of Back
+<br/>For each species l of part i, we build a detector after
+<br/>aligning the samples. Assuming the detector scans
+<br/>the image over scales and orientations, then the re-
+<br/>sponse map of this detector at a particular scale si
+<br/>and orientation θi is denoted as di
+<br/>Enforcing Consistency
+<br/>P (xi
+<br/>P (xi
+<br/>s[l, si, θi].
+<br/>s[l, si
+<br/>k,t, θi
+<br/>k,t|di
+<br/>k,t|di
+<br/>p[ci
+<br/>k, si
+<br/>k,t])
+<br/>k,t])
+<br/>Pose Detectors
+<br/>Pose 1
+<br/>Pose 2
+<br/>Pose 3
+<br/>Poses clusters of Back
+<br/>For each pose cluster ci of part i, we build a de-
+<br/>tector. The detector scans the image over scales,
+<br/>and the response map of this detector at a particu-
+<br/>lar scale si is denoted as di
+<br/>p[ci, si].
+<br/>References
+<br/>[1] C. Wah, S. Branson, P. Welinder, P. Perona, S. Belongie. The
+<br/>Caltech-UCSD Birds-200-2011 Dataset. Computation & Neu-
+<br/>ral Systems Technical Report, CNS-TR-2011-001, 2011
+<br/>[2] P. N. Belhumeur, D. W. Jacobs, D. J. Kriegman, N. Kumar.
+<br/>Localizing Parts of Faces Using a Consensus of Exemplars.
+<br/>In CVPR ’11
+<br/>Comparisons
+<br/>PCP
+<br/>Back
+<br/>Beak
+<br/>Belly
+<br/>Breast
+<br/>Crown
+<br/>Forehead
+<br/>Left Eye
+<br/>Left Leg
+<br/>Left Wing
+<br/>Nape
+<br/>Right Eye
+<br/>Right Leg
+<br/>Right Wing
+<br/>Tail
+<br/>Throat
+<br/>Average
+<br/>CoE [2] Ours
+<br/>62.08
+<br/>46.29
+<br/>49.02
+<br/>43.08
+<br/>69.02
+<br/>54.44
+<br/>66.98
+<br/>54.19
+<br/>72.85
+<br/>64.69
+<br/>58.46
+<br/>51.48
+<br/>55.78
+<br/>47.53
+<br/>40.94
+<br/>29.67
+<br/>71.57
+<br/>59.58
+<br/>70.78
+<br/>58.91
+<br/>55.51
+<br/>46.50
+<br/>40.52
+<br/>29.03
+<br/>71.56
+<br/>58.47
+<br/>40.16
+<br/>27.77
+<br/>70.83
+<br/>58.89
+<br/>59.74
+<br/>48.70
+<br/>mAP
+<br/>Birdlets
+<br/>Template bagging
+<br/>Pose pooling
+<br/>Ours
+<br/>200 species
+<br/>14 species
+<br/>28.18
+<br/>44.13
+<br/>40.25
+<br/>44.73
+<br/>57.44
+<br/>62.42
+</td><td>('2454675', 'Jiongxin Liu', 'jiongxin liu')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>{liujx09, belhumeur}@cs.columbia.edu
+</td></tr><tr><td>0faee699eccb2da6cf4307ded67ba8434368257b</td><td>TAIGMAN: MULTIPLE ONE-SHOTS FOR UTILIZING CLASS LABEL INFORMATION
+<br/>Multiple One-Shots for Utilizing Class Label
+<br/>Information
+<br/>1 The Blavatnik School of Computer
+<br/>Science,
+<br/><b>Tel-Aviv University, Israel</b><br/>2 Computer Science Division,
+<br/><b>The Open University of Israel</b><br/>3 face.com
+<br/>Tel-Aviv, Israel
+</td><td>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td>yaniv@face.com
+<br/>wolf@cs.tau.ac.il
+<br/>hassner@openu.ac.il
+</td></tr><tr><td>0fabb4a40f2e3a2502cd935e54e090a304006c1c</td><td>Regularized Robust Coding for Face Recognition
+<br/><b>The Hong Kong Polytechnic University, Hong Kong, China</b><br/>bSchool of Computer Science and Technology, Nanjing Univ. of Science and Technology, Nanjing, China
+</td><td>('5828998', 'Meng Yang', 'meng yang')<br/>('36685537', 'Lei Zhang', 'lei zhang')<br/>('37081450', 'Jian Yang', 'jian yang')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>0f92e9121e9c0addc35eedbbd25d0a1faf3ab529</td><td>MORPH-II: A Proposed Subsetting Scheme
+<br/>NSF-REU Site at UNC Wilmington, Summer 2017
+</td><td>('1940145', 'K. Park', 'k. park')<br/>('11134292', 'Y. Wang', 'y. wang')<br/>('1693283', 'C. Chen', 'c. chen')<br/>('3369885', 'T. Kling', 't. kling')</td><td></td></tr><tr><td>0f0366070b46972fcb2976775b45681e62a94a26</td><td>Reliable Posterior Probability Estimation for Streaming Face Recognition
+<br/><b>University of Colorado at Colorado Springs</b><br/>Terrance Boult
+<br/><b>University of Colorado at Colorado Springs</b></td><td>('3274223', 'Abhijit Bendale', 'abhijit bendale')</td><td>abendale@vast.uccs.edu
+<br/>tboult@vast.uccs.edu
+</td></tr><tr><td>0ff23392e1cb62a600d10bb462d7a1f171f579d0</td><td>Toward Sparse Coding on Cosine
+<br/>Distance
+<br/>Jonghyun Choi, Hyunjong Cho, Jungsuk Kwak#,
+<br/>Larry S. Davis
+<br/><b>UMIACS | University of Maryland, College Park</b><br/><b>Stanford University</b></td><td></td><td></td></tr><tr><td>0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd</td><td>Hair Style Retrieval by Semantic Mapping on
+<br/>Informative Patches
+<br/><b>Tsinghua University, Beijing, China</b></td><td>('38081719', 'Nan Wang', 'nan wang')<br/>('1679380', 'Haizhou Ai', 'haizhou ai')</td><td>wang-n04@mails.tsinghua.edu.cn, ahz@mail.tsinghua.edu.cn
+</td></tr><tr><td>0f533bc9fdfb75a3680d71c84f906bbd59ee48f1</td><td>Illumination Invariant Feature Extraction Based on Natural Images Statistics –
+<br/>Taking Face Images as An Example
+<br/><b>Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan</b><br/><b>National Taiwan University, Taipei, Taiwan</b></td><td>('2314709', 'Lu-Hung Chen', 'lu-hung chen')<br/>('1934873', 'Yao-Hsiang Yang', 'yao-hsiang yang')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')<br/>('2809590', 'Ming-Yen Cheng', 'ming-yen cheng')</td><td>luhung.chen,yhyang@statistics.twbbs.org song@iis.sincia.edu.tw
+<br/>cheng@math.ntu.edu.tw
+</td></tr><tr><td>0f4eb63402a4f3bae8f396e12133684fb760def1</td><td>LONG, LIU, SHAO: ATTRIBUTE EMBEDDING WITH VSAR FOR ZERO-SHOT LEARNING 1
+<br/>Attribute Embedding with Visual-Semantic
+<br/>Ambiguity Removal for Zero-shot Learning
+<br/>1 Department of Electronic and Electrical
+<br/>Engineering
+<br/><b>The University of Shef eld</b><br/>Sheffield , UK
+<br/>2 Department of Computer and
+<br/>Information Sciences
+<br/><b>Northumbria University</b><br/>Newcastle upon Tyne, UK
+</td><td>('39650869', 'Yang Long', 'yang long')<br/>('40017778', 'Li Liu', 'li liu')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td>ylong2@sheffield.ac.uk
+<br/>li2.liu@northumbria.ac.uk
+<br/>ling.shao@ieee.org
+</td></tr><tr><td>0fba39bf12486c7684fd3d51322e3f0577d3e4e8</td><td>Task Specific Local Region Matching
+<br/>Department of Computer Science and Engineering
+<br/><b>University of California, San Diego</b></td><td>('2490700', 'Boris Babenko', 'boris babenko')</td><td>{bbabenko,pdollar,sjb}@cs.ucsd.edu
+</td></tr><tr><td>0f395a49ff6cbc7e796656040dbf446a40e300aa</td><td>ORIGINAL RESEARCH
+<br/>published: 22 December 2015
+<br/>doi: 10.3389/fpsyg.2015.01937
+<br/>The Change of Expression
+<br/>Configuration Affects
+<br/>Identity-Dependent Expression
+<br/>Aftereffect but Not
+<br/>Identity-Independent Expression
+<br/>Aftereffect
+<br/><b>College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University</b><br/><b>of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science</b><br/>and Technology, Kunming, China
+<br/>The present study examined the influence of expression configuration on cross-identity
+<br/>expression aftereffect. The expression configuration refers to the spatial arrangement
+<br/>of facial features in a face for conveying an emotion, e.g., an open-mouth smile vs.
+<br/>a closed-mouth smile. In the first of two experiments, the expression aftereffect is
+<br/>measured using a cross-identity/cross-expression configuration factorial design. The
+<br/>facial
+<br/>identities of test faces were the same or different from the adaptor, while
+<br/>orthogonally, the expression configurations of those facial identities were also the same
+<br/>or different. The results show that the change of expression configuration impaired
+<br/>the expression aftereffect when the facial
+<br/>identities of adaptor and tests were the
+<br/>same; however, the impairment effect disappears when facial identities were different,
+<br/>indicating the identity-independent expression representation is more robust to the
+<br/>change of the expression configuration in comparison with the identity-dependent
+<br/>expression representation. In the second experiment, we used schematic line faces
+<br/>as adaptors and real faces as tests to minimize the similarity between the adaptor
+<br/>and tests, which is expected to exclude the contribution from the identity-dependent
+<br/>expression representation to expression aftereffect. The second experiment yields a
+<br/>similar result as the identity-independent expression aftereffect observed in Experiment 1.
+<br/>The findings indicate the different neural sensitivities to expression configuration for
+<br/>identity-dependent and identity-independent expression systems.
+<br/>Keywords: facial expression, adaptation, aftereffect, visual representation, vision
+<br/>INTRODUCTION
+<br/>One key issue in face study is to understand how emotional expression is represented in the
+<br/>human visual system. According to the classical cognitive model (Bruce and Young, 1986) and
+<br/>neural model (Haxby et al., 2000), emotional expression is consider to be represented and
+<br/>processed independent of facial identity. This view is supported by several lines of evidence.
+<br/>Edited by:
+<br/>Wenfeng Chen,
+<br/><b>Institute of Psychology, Chinese</b><br/>Academy of Sciences, China
+<br/>Reviewed by:
+<br/>Marianne Latinus,
+<br/>Aix Marseille Université, France
+<br/>Jan Van den Stock,
+<br/>KU Leuven, Belgium
+<br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Emotion Science,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 03 January 2015
+<br/>Accepted: 02 December 2015
+<br/>Published: 22 December 2015
+<br/>Citation:
+<br/>Song M, Shinomori K, Qian Q, Yin J
+<br/>and Zeng W (2015) The Change of
+<br/>Expression Configuration Affects
+<br/>Identity-Dependent Expression
+<br/>Aftereffect but Not
+<br/>Identity-Independent Expression
+<br/>Aftereffect. Front. Psychol. 6:1937.
+<br/>doi: 10.3389/fpsyg.2015.01937
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>December 2015 | Volume 6 | Article 1937
+</td><td>('1692572', 'Miao Song', 'miao song')<br/>('1970678', 'Keizo Shinomori', 'keizo shinomori')<br/>('2431558', 'Qian Qian', 'qian qian')<br/>('40596849', 'Jun Yin', 'jun yin')<br/>('2161630', 'Weiming Zeng', 'weiming zeng')<br/>('1692572', 'Miao Song', 'miao song')</td><td>songmiaolm@gmail.com
+</td></tr><tr><td>0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277</td><td>Detecting Hands in Egocentric Videos: Towards
+<br/>Action Recognition
+<br/>Gran Via de les Corts Catalanes, 585, 08007 Barcelona, Spain
+<br/><b>University of Barcelona</b><br/>2 Computer Vision Centre,
+<br/>Campus UAB, 08193 Cerdanyola del Valls, Barcelona, Spain
+</td><td>('1901010', 'Alejandro Cartas', 'alejandro cartas')<br/>('2837527', 'Mariella Dimiccoli', 'mariella dimiccoli')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>alejandro.cartas@ub.edu
+</td></tr><tr><td>0fe96806c009e8d095205e8f954d41b2b9fd5dcf</td><td>On-the-Job Learning with Bayesian Decision Theory
+<br/>Department of Computer Science
+<br/><b>Stanford University</b><br/>Arun Chaganty
+<br/>Department of Computer Science
+<br/><b>Stanford University</b><br/>Department of Computer Science
+<br/><b>Stanford University</b><br/>Department of Computer Science
+<br/><b>Stanford University</b></td><td>('2795219', 'Keenon Werling', 'keenon werling')<br/>('40085065', 'Percy Liang', 'percy liang')<br/>('1812612', 'Christopher D. Manning', 'christopher d. manning')</td><td>keenon@cs.stanford.edu
+<br/>chaganty@cs.stanford.edu
+<br/>pliang@cs.stanford.edu
+<br/>manning@cs.stanford.edu
+</td></tr><tr><td>0f940d2cdfefc78c92ec6e533a6098985f47a377</td><td>A Hierarchical Framework for Simultaneous Facial Activity Tracking
+<br/>Department of Electrical,Computer and System Engineering
+<br/><b>Rensselaer Polytechnic Institute</b><br/>Troy, NY 12180
+</td><td>('1713712', 'Jixu Chen', 'jixu chen')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>chenj4@rpi.edu
+<br/>qji@ecse.rpi.edu
+</td></tr><tr><td>0f21a39fa4c0a19c4a5b4733579e393cb1d04f71</td><td>Evaluation of optimization
+<br/>components of a 3D to 2D
+<br/>landmark fitting algorithm for
+<br/>head pose estimation
+<br/>11029668
+<br/>Bachelor thesis
+<br/>Credits: 18 EC
+<br/>Bachelor Opleiding Kunstmatige Intelligentie
+<br/><b>University of Amsterdam</b><br/>Faculty of Science
+<br/>Science Park 904
+<br/>1098 XH Amsterdam
+<br/>Supervisors
+<br/>dr. Sezer Karaoglu
+<br/>MSc. Minh Ngo
+<br/><b>Informatics Institute</b><br/>Faculty of Science
+<br/><b>University of Amsterdam</b><br/>Science Park 904
+<br/>1090 GH Amsterdam
+<br/>June 29th, 2018
+</td><td></td><td></td></tr><tr><td>0fd1bffb171699a968c700f206665b2f8837d953</td><td>Weakly Supervised Object Localization with
+<br/>Multi-fold Multiple Instance Learning
+</td><td>('1939006', 'Ramazan Gokberk Cinbis', 'ramazan gokberk cinbis')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>0faeec0d1c51623a511adb779dabb1e721a6309b</td><td>Seeing is Worse than Believing: Reading
+<br/>People’s Minds Better than Computer-Vision
+<br/>Methods Recognize Actions
+<br/>1 MIT, Cambridge, MA, USA
+<br/><b>Purdue University, West Lafayette, IN, USA</b><br/>3 SUNY Buffalo, Buffalo, NY, USA
+<br/><b>Stanford University, Stanford, CA, USA</b><br/><b>University of California at Los Angeles, Los Angeles, CA, USA</b><br/><b>University of Michigan, Ann Arbor, MI, USA</b><br/><b>Princeton University, Princeton, NJ, USA</b><br/><b>Rutgers University, Newark, NJ, USA</b><br/><b>University of Texas at Arlington, Arlington, TX, USA</b><br/><b>National University of Ireland Maynooth, Co. Kildare, Ireland</b></td><td>('21570451', 'Andrei Barbu', 'andrei barbu')<br/>('1728624', 'Wei Chen', 'wei chen')<br/>('2228109', 'Caiming Xiong', 'caiming xiong')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')<br/>('2663295', 'Christiane D. Fellbaum', 'christiane d. fellbaum')<br/>('32218165', 'Catherine Hanson', 'catherine hanson')<br/>('20009336', 'Evguenia Malaia', 'evguenia malaia')<br/>('1700974', 'Barak A. Pearlmutter', 'barak a. pearlmutter')<br/>('2465833', 'Ronnie B. Wilbur', 'ronnie b. wilbur')</td><td>andrei@0xab.com
+<br/>{dpbarret,shelie,qobi,tmt,wilbur}@purdue.edu
+<br/>wchen23@buffalo.edu
+<br/>nsid@stanford.edu
+<br/>caimingxiong@ucla.edu
+<br/>jjcorso@eecs.umich.edu
+<br/>fellbaum@princeton.edu
+<br/>{cat,jose}@psychology.rutgers.edu
+<br/>malaia@uta.edu
+<br/>barak@cs.nuim.ie
+</td></tr><tr><td>0f81b0fa8df5bf3fcfa10f20120540342a0c92e5</td><td>Mirror, mirror on the wall, tell me, is the error small?
+<br/><b>Queen Mary University of London</b><br/><b>Queen Mary University of London</b></td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td>heng.yang@qmul.ac.uk
+<br/>i.patras@qmul.ac.uk
+</td></tr><tr><td>0f0241124d6092a0bb56259ac091467c2c6938ca</td><td>Associating Faces and Names in Japanese Photo News Articles on the Web
+<br/><b>The University of Electro-Communications, JAPAN</b></td><td>('32572703', 'Akio Kitahara', 'akio kitahara')<br/>('2558848', 'Taichi Joutou', 'taichi joutou')<br/>('1681659', 'Keiji Yanai', 'keiji yanai')</td><td></td></tr><tr><td>0a6d344112b5af7d1abbd712f83c0d70105211d0</td><td>Constrained Local Neural Fields for robust facial landmark detection in the wild
+<br/>Tadas Baltruˇsaitis
+<br/><b>University of Cambridge Computer Laboratory</b><br/><b>USC Institute for Creative Technologies</b><br/>15 JJ Thomson Avenue
+<br/>12015 Waterfront Drive
+</td><td>('40609287', 'Peter Robinson', 'peter robinson')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>tb346@cl.cam.ac.uk
+<br/>pr10@cl.cam.ac.uk
+<br/>morency@ict.usc.edu
+</td></tr><tr><td>0a64f4fec592662316764283575d05913eb2135b</td><td>Joint Pixel and Feature-level Domain Adaptation in the Wild
+<br/><b>Michigan State University</b><br/>2NEC Labs America
+<br/>3UC San Diego
+</td><td>('1849929', 'Luan Tran', 'luan tran')</td><td></td></tr><tr><td>0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Learning Invariant Deep Representation
+<br/>for NIR-VIS Face Recognition
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>Center for Excellence in Brain Science and Intelligence Technology, CAS
+<br/><b>University of Chinese Academy of Sciences, Beijing 100190, China</b></td><td>('1705643', 'Ran He', 'ran he')<br/>('2225749', 'Xiang Wu', 'xiang wu')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td>{rhe,znsun,tnt}@nlpr.ia.ac.cn, alfredxiangwu@gmail.com
+</td></tr><tr><td>0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112</td><td>Patch-based Models For Visual Object Classes
+<br/>A dissertation submitted in partial fulfilment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>at
+<br/><b>University College London</b><br/>Department of Computer Science
+<br/><b>University College London</b><br/>February 24, 2011
+</td><td>('1904148', 'Jania Aghajanian', 'jania aghajanian')</td><td></td></tr><tr><td>0a5ffc55b584da7918c2650f9d8602675d256023</td><td>Efficient Face Alignment via Locality-constrained Representation for Robust
+<br/>Recognition
+<br/><b>School of Electronic and Information Engineering, South China University of Technology</b><br/><b>School of Electronic and Computer Engineering, Peking University</b><br/><b>School of Computer Science and Software Engineering, Shenzhen University</b><br/>4SIAT, Chinese Academy of Sciences
+</td><td>('36326884', 'Weiyang Liu', 'weiyang liu')</td><td></td></tr><tr><td>0aeb5020003e0c89219031b51bd30ff1bceea363</td><td>Sparsifying Neural Network Connections for Face Recognition
+<br/>1SenseTime Group
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sunyi@sensetime.com
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>0a511058edae582e8327e8b9d469588c25152dc6</td><td></td><td></td><td></td></tr><tr><td>0a4f3a423a37588fde9a2db71f114b293fc09c50</td><td></td><td></td><td></td></tr><tr><td>0aa74ad36064906e165ac4b79dec298911a7a4db</td><td>Variational Inference for the Indian Buffet Process
+<br/>Engineering Department
+<br/><b>Cambridge University</b><br/>Cambridge, UK
+<br/>Engineering Department
+<br/><b>Cambridge University</b><br/>Cambridge, UK
+<br/>Gatsby Unit
+<br/><b>University College London</b><br/>London, UK
+<br/>Kurt T. Miller∗
+<br/>Computer Science Division
+<br/><b>University of California, Berkeley</b><br/>Berkeley, CA
+</td><td>('2292194', 'Finale Doshi-Velez', 'finale doshi-velez')<br/>('1689857', 'Jurgen Van Gael', 'jurgen van gael')<br/>('1725303', 'Yee Whye Teh', 'yee whye teh')</td><td></td></tr><tr><td>0abf67e7bd470d9eb656ea2508beae13ca173198</td><td>Going Deeper into First-Person Activity Recognition
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213, USA
+</td><td>('2238622', 'Minghuang Ma', 'minghuang ma')<br/>('2681569', 'Haoqi Fan', 'haoqi fan')<br/>('37991449', 'Kris M. Kitani', 'kris m. kitani')</td><td>minghuam@andrew.cmu.edu haoqif@andrew.cmu.edu kkitani@cs.cmu.edu
+</td></tr><tr><td>0af33f6b5fcbc5e718f24591b030250c6eec027a</td><td>Text Analysis for Automatic Image Annotation
+<br/>Interdisciplinary Centre for Law & IT
+<br/>Department of Computer Science
+<br/><b>Katholieke Universiteit Leuven</b><br/>Tiensestraat 41, 3000 Leuven, Belgium
+</td><td>('1797588', 'Koen Deschacht', 'koen deschacht')<br/>('1802161', 'Marie-Francine Moens', 'marie-francine moens')</td><td>{koen.deschacht,marie-france.moens}@law.kuleuven.ac.be
+</td></tr><tr><td>0a3863a0915256082aee613ba6dab6ede962cdcd</td><td>Early and Reliable Event Detection Using Proximity Space Representation
+<br/>LTCI, CNRS, T´el´ecom ParisTech, Universit´e Paris-Saclay, 75013, Paris, France
+<br/>J´erˆome Gauthier
+<br/>LADIS, CEA, LIST, 91191, Gif-sur-Yvette, France
+<br/>Normandie Universit´e, UR, LITIS EA 4108, Avenue de l’universit´e, 76801, Saint-Etienne-du-Rouvray, France
+</td><td>('2527457', 'Maxime Sangnier', 'maxime sangnier')<br/>('1792962', 'Alain Rakotomamonjy', 'alain rakotomamonjy')</td><td>MAXIME.SANGNIER@TELECOM-PARISTECH.FR
+<br/>JEROME.GAUTHIER@CEA.FR
+<br/>ALAIN.RAKOTO@INSA-ROUEN.FR
+</td></tr><tr><td>0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f</td><td>Chimpanzee Faces in the Wild: Log-Euclidean CNNs for Predicting Identities and Attributes of Primates. GCPR 2016
+<br/>c(cid:13) Copyright by Springer. The final publication will be available at link.springer.com
+<br/>A. Freytag, E. Rodner, M. Simon, A. Loos, H. K¨uhl and J. Denzler
+<br/>Chimpanzee Faces in the Wild:
+<br/>Log-Euclidean CNNs for Predicting Identities
+<br/>and Attributes of Primates
+<br/><b>Computer Vision Group, Friedrich Schiller University Jena, Germany</b><br/>2Michael Stifel Center Jena, Germany
+<br/><b>Fraunhofer Institute for Digital Media Technology, Germany</b><br/><b>Max Planck Institute for Evolutionary Anthropology, Germany</b><br/>5German Centre for Integrative Biodiversity Research (iDiv), Germany
+</td><td>('1720839', 'Alexander Freytag', 'alexander freytag')<br/>('1679449', 'Erik Rodner', 'erik rodner')<br/>('49675890', 'Marcel Simon', 'marcel simon')<br/>('4572597', 'Alexander Loos', 'alexander loos')<br/>('1728382', 'Joachim Denzler', 'joachim denzler')</td><td></td></tr><tr><td>0a34fe39e9938ae8c813a81ae6d2d3a325600e5c</td><td>FacePoseNet: Making a Case for Landmark-Free Face Alignment
+<br/><b>Institute for Robotics and Intelligent Systems, USC, CA, USA</b><br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b></td><td>('1752756', 'Feng-Ju Chang', 'feng-ju chang')<br/>('46634688', 'Anh Tuan Tran', 'anh tuan tran')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('11269472', 'Iacopo Masi', 'iacopo masi')</td><td>{fengjuch,anhttran,iacopoma,nevatia,medioni}@usc.edu, hassner@isi.edu
+</td></tr><tr><td>0ad8149318912b5449085187eb3521786a37bc78</td><td>CP-mtML: Coupled Projection multi-task Metric Learning
+<br/>for Large Scale Face Retrieval
+<br/>Frederic Jurie1,∗
+<br/><b>University of Caen, France</b><br/>2MPI for Informatics, Germany
+<br/>3IIT Kanpur, India
+</td><td>('2078892', 'Binod Bhattarai', 'binod bhattarai')<br/>('2515597', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>0a9d204db13d395f024067cf70ac19c2eeb5f942</td><td>Viewpoint-aware Video Summarization
+<br/><b>The University of Tokyo, 2RIKEN, 3ETH Z urich, 4KU Leuven</b></td><td>('2551640', 'Atsushi Kanehira', 'atsushi kanehira')<br/>('1681236', 'Luc Van Gool', 'luc van gool')<br/>('3250559', 'Yoshitaka Ushiku', 'yoshitaka ushiku')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td></td></tr><tr><td>0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7</td><td><b>Imperial College of Science, Technology and Medicine</b><br/>Department of Computing
+<br/>Timing is everything
+<br/>A spatio-temporal approach to the analysis of facial
+<br/>actions
+<br/>Michel Fran¸cois Valstar
+<br/>Submitted in part fulfilment of the requirements for the degree of
+<br/><b>Doctor of Philosophy in Computing of Imperial College, February</b></td><td></td><td></td></tr><tr><td>0aae88cf63090ea5b2c80cd014ef4837bcbaadd8</td><td>3D Face Structure Extraction from Images at Arbitrary Poses and under
+<br/>Arbitrary Illumination Conditions
+<br/>A Thesis
+<br/>Submitted to the Faculty
+<br/>Of
+<br/><b>Drexel University</b><br/>By
+<br/>In partial fulfillment of the
+<br/>Requirements for the degree
+<br/>Of
+<br/>Doctor of Philosophy
+<br/>October 2006
+</td><td>('40531119', 'Cuiping Zhang', 'cuiping zhang')</td><td></td></tr><tr><td>0a87d781fe2ae2e700237ddd00314dbc10b1429c</td><td>Distribution Statement A: Approved for public release; distribution unlimited.
+<br/>Multi-scale HOG Prescreening Algorithm for Detection of Buried
+<br/>Explosive Hazards in FL-IR and FL-GPR Data
+<br/><b>University of Missouri, Columbia, MO</b></td><td>('2741325', 'K. Stone', 'k. stone')<br/>('9187168', 'J. M. Keller', 'j. m. keller')</td><td></td></tr><tr><td>0ad90118b4c91637ee165f53d557da7141c3fde0</td><td></td><td></td><td></td></tr><tr><td>0a82860d11fcbf12628724333f1e7ada8f3cd255</td><td>Action Temporal Localization in Untrimmed Videos via Multi-stage CNNs
+<br/><b>Columbia University</b><br/>New York, NY, USA
+</td><td>('2195345', 'Zheng Shou', 'zheng shou')<br/>('2704179', 'Dongang Wang', 'dongang wang')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{zs2262,dw2648,sc250}@columbia.edu
+</td></tr><tr><td>0a4fc9016aacae9cdf40663a75045b71e64a70c9</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING XX, XXX-XXX (201X)
+<br/> Illumination Normalization Based on
+<br/>Homomorphic Wavelet Filtering for Face Recognition
+<br/>1School of Electronic and Information Engineering
+<br/><b>Beijing Jiaotong University</b><br/>No.3 Shang Yuan Cun,Hai Dian District
+<br/>Beijing 100044,China
+<br/>2School of Physics Electrical Information Engineering
+<br/><b>Ningxia University</b><br/>Yinchuan Ningxia 750021,China
+<br/>Phone number: 086-010-51688165
+<br/>The performance of face recognition techniques is greatly challenged by the pose,
+<br/>expression and illumination of the image. For most existing systems, the recognition rate
+<br/>will decrease due to changes in environmental illumination. In this paper, a
+<br/>Homomorphic Wavelet-based Illumination Normalization (HWIN) method is proposed.
+<br/>The purpose of this method is to normalize the uneven illumination of the facial image.
+<br/>The image is analyzed in the logarithm domain with wavelet transform, the
+<br/>approximation coefficients of the image are mapped according to the reference
+<br/>illumination map in order to normalize the distribution of illumination energy resulting
+<br/>from different lighting effects, and the detail components are enhanced to achieve detail
+<br/>information emphasis. Then, a Difference of Gaussian (DoG) filter is also applied to
+<br/>reduce the noise resulting from different lighting effects, which exists on detail
+<br/>components. The proposed methods are implemented on Yale B and Extended Yale B
+<br/>facial databases. The experimental results show that the methods described in this study
+<br/>are capable of effectively eliminating the effects of uneven illumination and of greatly
+<br/>improving the recognition rate, and are therefore more effective than other popular
+<br/>methods.
+<br/>Keywords: face recognition; homomorphic filtering; wavelet transfer; illumination
+<br/>mapping
+<br/>1. INTRODUCTION
+<br/>Automatic face recognition has received significant attention over the past several
+<br/>decades due to its numerous potential applications, such as human-computer interfaces,
+<br/>access control, security and surveillance, e-commerce, entertainment, and so on. Related
+<br/>research performed in recent years has made great progress, and a number of face
+<br/>recognition systems have achieved strong results, as shown in the latest report of Face
+<br/>Recognition Vendor Test (FRVT, 2006). Despite this remarkable progress, face
+<br/>recognition still faces a challenging problem, which is its sensitivity to the dramatic
+<br/>variations among images of the same face. For example, facial expression, pose, ageing,
+<br/>make-up, background and illumination variations are all factors which may result in
+<br/>significant variations [1-26].
+<br/>Illumination variation is one of the most significant factors limiting the performance
+<br/>of face recognition. Since several images of the same person appear to be dramatically
+<br/>1
+</td><td>('2613621', 'Xue Yuan', 'xue yuan')<br/>('47884608', 'Yifei Meng', 'yifei meng')</td><td>E-mail: 10111045@bjtu.edu.cn
+</td></tr><tr><td>0a85afebaa19c80fddb660110a4352fd22eb2801</td><td>Neural Animation and Reenactment of Human Actor Videos
+<br/>Fig. 1. We propose a novel learning-based approach for the animation and reenactment of human actor videos. The top row shows some frames of the video
+<br/>We propose a method for generating (near) video-realistic animations of
+<br/>real humans under user control. In contrast to conventional human char-
+<br/>acter rendering, we do not require the availability of a production-quality
+<br/>photo-realistic 3D model of the human, but instead rely on a video sequence
+<br/>in conjunction with a (medium-quality) controllable 3D template model
+<br/>of the person. With that, our approach significantly reduces production
+<br/>cost compared to conventional rendering approaches based on production-
+<br/>quality 3D models, and can also be used to realistically edit existing videos.
+<br/>Technically, this is achieved by training a neural network that translates
+<br/>simple synthetic images of a human character into realistic imagery. For
+<br/>training our networks, we first track the 3D motion of the person in the
+<br/>video using the template model, and subsequently generate a synthetically
+<br/><b>mpg.de, Max Planck Institute for Informatics</b><br/>Permission to make digital or hard copies of part or all of this work for personal or
+<br/>classroom use is granted without fee provided that copies are not made or distributed
+<br/>for profit or commercial advantage and that copies bear this notice and the full citation
+<br/>on the first page. Copyrights for third-party components of this work must be honored.
+<br/>For all other uses, contact the owner/author(s).
+<br/>© 2018 Copyright held by the owner/author(s).
+<br/>XXXX-XXXX/2018/9-ART282
+<br/>https://doi.org/10.475/123_4
+<br/>rendered version of the video. These images are then used to train a con-
+<br/>ditional generative adversarial network that translates synthetic images of
+<br/>the 3D model into realistic imagery of the human. We evaluate our method
+<br/>for the reenactment of another person that is tracked in order to obtain the
+<br/>motion data, and show video results generated from artist-designed skeleton
+<br/>motion. Our results outperform the state-of-the-art in learning-based human
+<br/>image synthesis.
+<br/>CCS Concepts: • Computing methodologies → Computer graphics;
+<br/>Neural networks; Appearance and texture representations; Animation; Ren-
+<br/>dering;
+<br/>Additional Key Words and Phrases: Video-based Characters, Deep Learning,
+<br/>Conditional GAN, Rendering-to-Video Translation
+<br/>ACM Reference Format:
+<br/>Animation and Reenactment of Human Actor Videos. 1, 1, Article 282
+<br/>(September 2018), 13 pages. https://doi.org/10.475/123_4
+<br/>INTRODUCTION
+<br/>The creation of realistically rendered and controllable animations
+<br/>of human characters is a crucial task in many computer graphics
+<br/>applications. Virtual actors play a key role in games and visual ef-
+<br/>fects, in telepresence, or in virtual and augmented reality. Today, the
+<br/>plausible rendition of video-realistic characters—i.e., animations in-
+<br/>distinguishable from a video of a human—under user control is also
+<br/>Submission ID: 282. 2018-09-12 00:32. Page 1 of 1–13.
+<br/>, Vol. 1, No. 1, Article 282. Publication date: September 2018.
+</td><td>('46458089', 'Lingjie Liu', 'lingjie liu')<br/>('9765909', 'Weipeng Xu', 'weipeng xu')<br/>('1699058', 'Michael Zollhöfer', 'michael zollhöfer')<br/>('3022958', 'Hyeongwoo Kim', 'hyeongwoo kim')<br/>('39600032', 'Florian Bernard', 'florian bernard')<br/>('14210288', 'Marc Habermann', 'marc habermann')<br/>('1698520', 'Wenping Wang', 'wenping wang')<br/>('1680185', 'Christian Theobalt', 'christian theobalt')<br/>('3022958', 'Hyeongwoo Kim', 'hyeongwoo kim')<br/>('46458089', 'Lingjie Liu', 'lingjie liu')<br/>('9765909', 'Weipeng Xu', 'weipeng xu')<br/>('1699058', 'Michael Zollhöfer', 'michael zollhöfer')<br/>('3022958', 'Hyeongwoo Kim', 'hyeongwoo kim')<br/>('39600032', 'Florian Bernard', 'florian bernard')<br/>('14210288', 'Marc Habermann', 'marc habermann')<br/>('1698520', 'Wenping Wang', 'wenping wang')<br/>('1680185', 'Christian Theobalt', 'christian theobalt')</td><td>Authors’ addresses: Lingjie Liu, liulingjie0206@gmail.com, University of Hong Kong,
+<br/>Max Planck Institute for Informatics; Weipeng Xu, wxu@mpi-inf.mpg.de, Max Planck
+<br/>Institute for Informatics; Michael Zollhöfer, zollhoefer@cs.stanford.edu, Stanford
+<br/>kim@mpi-inf.mpg.de; Florian Bernard, fbernard@mpi-inf.mpg.de; Marc Habermann,
+<br/>mhaberma@mpi-inf.mpg.de, Max Planck Institute for Informatics; Wenping Wang,
+<br/>wenping@cs.hku.hk, University of Hong Kong; Christian Theobalt, theobalt@mpi-inf.
+</td></tr><tr><td>0ac442bb570b086d04c4d51a8410fcbfd0b1779d</td><td>WarpNet: Weakly Supervised Matching for Single-view Reconstruction
+<br/><b>University of Maryland, College Park</b><br/>Manmohan Chandraker
+<br/>NEC Labs America
+</td><td>('20615377', 'Angjoo Kanazawa', 'angjoo kanazawa')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')</td><td></td></tr><tr><td>0af48a45e723f99b712a8ce97d7826002fe4d5a5</td><td>2982
+<br/>Toward Wide-Angle Microvision Sensors
+<br/>Todd Zickler, Member, IEEE
+</td><td>('2724462', 'Sanjeev J. Koppal', 'sanjeev j. koppal')<br/>('2407724', 'Ioannis Gkioulekas', 'ioannis gkioulekas')<br/>('3091204', 'Travis Young', 'travis young')<br/>('2070262', 'Hyunsung Park', 'hyunsung park')<br/>('2140759', 'Kenneth B. Crozier', 'kenneth b. crozier')<br/>('40431923', 'Geoffrey L. Barrows', 'geoffrey l. barrows')</td><td></td></tr><tr><td>0aa8a0203e5f406feb1815f9b3dd49907f5fd05b</td><td>Mixture subclass discriminant analysis
+</td><td>('1827419', 'Nikolaos Gkalelis', 'nikolaos gkalelis')<br/>('1737436', 'Vasileios Mezaris', 'vasileios mezaris')</td><td></td></tr><tr><td>0ac664519b2b8abfb8966dafe60d093037275573</td><td>Facial Action Unit Detection Using Kernel Partial Least Squares -
+<br/>Supplemental Material
+<br/><b>Facial Image Processing and Analysis Group, Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology</b><br/>D-76131 Karlsruhe, P.O. Box 6980 Germany
+<br/>1. Introduction
+<br/>In this document we present additional results corre-
+<br/>sponding to the experiments shown in [1].
+<br/>A. ROC Curves
+<br/>The ROC curves for the AU estimates are shown in this
+<br/>section.
+<br/>A.1. Evaluation on a Single Dataset
+<br/>A.1.1 Experiment on the CK+ Dataset with Eye Labels
+<br/>See Figure 1.
+<br/>A.1.2 Experiment on the CK+ Dataset with Automatic
+<br/>Eye Detection
+<br/>See Figure 2.
+<br/>A.1.3 Experiment on the GEMEP-FERA Dataset
+<br/>See Figure 3.
+<br/>A.2. Evaluation across Datasets
+<br/>A.2.1 Generalization from Constrained to less Con-
+<br/>strained Condition
+<br/>See Figure 4.
+<br/>A.2.2 Generalization from less Constrained to Con-
+<br/>strained Condition
+<br/>See Figure 5.
+<br/>B. F1-Score
+<br/>The F1-Scores for the AU estimates are shown in this
+<br/>section. If no threshold optimization is performed then the
+<br/>thresholds are set to 0.5 for the PLS-based approaches and
+<br/>Table 1. F1 scores in % on CK+ using eye labels. AVG is the
+<br/>weighted average over the individual results, depending on the
+<br/>number of positive samples given by in the column N.
+<br/>linear PLS
+<br/>RBF PLS
+<br/>linear SVM RBF SVM
+<br/>176
+<br/>117
+<br/>193
+<br/>102
+<br/>123
+<br/>120
+<br/>75
+<br/>34
+<br/>131
+<br/>94
+<br/>201
+<br/>79
+<br/>60
+<br/>58
+<br/>324
+<br/>50
+<br/>81
+<br/>AU
+<br/>11
+<br/>12
+<br/>15
+<br/>17
+<br/>20
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>AVG
+<br/>78.1
+<br/>80.4
+<br/>74.2
+<br/>77.5
+<br/>72.8
+<br/>64.0
+<br/>84.3
+<br/>15.0
+<br/>84.7
+<br/>60.3
+<br/>77.4
+<br/>64.8
+<br/>35.2
+<br/>38.2
+<br/>85.4
+<br/>15.6
+<br/>85.9
+<br/>72.3
+<br/>77.5
+<br/>76.2
+<br/>75.9
+<br/>76.2
+<br/>68.2
+<br/>51.0
+<br/>84.2
+<br/>5.7
+<br/>81.9
+<br/>51.5
+<br/>78.3
+<br/>57.1
+<br/>28.6
+<br/>26.7
+<br/>86.5
+<br/>7.4
+<br/>83.0
+<br/>69.5
+<br/>69.6
+<br/>78.9
+<br/>72.8
+<br/>74.3
+<br/>67.0
+<br/>51.9
+<br/>84.5
+<br/>14.6
+<br/>78.3
+<br/>52.6
+<br/>73.6
+<br/>49.6
+<br/>28.9
+<br/>14.1
+<br/>86.5
+<br/>5.9
+<br/>84.6
+<br/>67.4
+<br/>71.5
+<br/>76.7
+<br/>68.0
+<br/>73.8
+<br/>65.7
+<br/>42.3
+<br/>83.0
+<br/>0.0
+<br/>80.0
+<br/>49.6
+<br/>76.8
+<br/>28.0
+<br/>14.3
+<br/>9.0
+<br/>86.1
+<br/>0.0
+<br/>77.7
+<br/>64.4
+<br/>0.0 for the SVM-based approaches. Otherwise thresholds
+<br/>are optimized using equal error rate (EER) or F1 score as
+<br/>metrics [2] on either the training folds of the LOSO scheme
+<br/>or the whole training data in case of the cross-dataset tests.
+<br/>B.1. Evaluation on a Single Dataset
+<br/>B.1.1 Experiment on the CK+ Dataset with Eye Labels
+<br/>See Table 1 for F1 scores without threshold optimization,
+<br/>Table 2 for F1 scores using threshold optimization based on
+<br/>EER and Table 3 for F1 scores using threshold optimization
+<br/>based on F1 score.
+<br/>B.1.2 Experiment on the CK+ Dataset with Automatic
+<br/>Eye Detection
+<br/>See Table 4 for F1 scores without threshold optimization,
+<br/>Table 5 for F1 scores using threshold optimization based on
+<br/>EER and Table 6 for F1 scores using threshold optimization
+<br/>based on F1 score.
+</td><td>('40303076', 'Tobias Gehrig', 'tobias gehrig')</td><td>{tobias.gehrig, ekenel}@kit.edu
+</td></tr><tr><td>0a9345ea6e488fb936e26a9ba70b0640d3730ba7</td><td>Deep Bi-directional Cross-triplet Embedding for
+<br/>Cross-Domain Clothing Retrieval
+<br/><b>Northeastern University, Boston, USA</b><br/><b>College of Computer and Information Science, Northeastern University, Boston, USA</b></td><td>('3343578', 'Shuhui Jiang', 'shuhui jiang')<br/>('1746738', 'Yue Wu', 'yue wu')<br/>('37771688', 'Yun Fu', 'yun fu')</td><td>{shjiang, yuewu, yunfu}@ece.neu.edu
+</td></tr><tr><td>0a79d0ba1a4876086e64fc0041ece5f0de90fbea</td><td>FACE ILLUMINATION NORMALIZATION
+<br/>WITH SHADOW CONSIDERATION
+<br/>By
+<br/>SUBMITTED IN PARTIAL FULFILLMENT OF THE
+<br/>REQUIREMENTS FOR THE DEGREE OF
+<br/>MASTER OF SCIENCE
+<br/>AT
+<br/><b>CARNEGIE MELLON UNIVERSITY</b><br/>5000 FORBES AVENUE PITTSBURGH PA 15213-3890
+<br/>MAY 2004
+</td><td>('3039721', 'Avinash B. Baliga', 'avinash b. baliga')<br/>('3039721', 'Avinash B. Baliga', 'avinash b. baliga')</td><td></td></tr><tr><td>0a7309147d777c2f20f780a696efe743520aa2db</td><td>Stories for Images-in-Sequence by using Visual
+<br/>and Narrative Components (cid:63)
+<br/><b>Ss. Cyril and Methodius University, Skopje, Macedonia</b><br/>2 Pendulibrium, Skopje, Macedonia
+<br/>3 Elevate Global, Skopje, Macedonia
+</td><td>('46205557', 'Marko Smilevski', 'marko smilevski')<br/>('46242132', 'Ilija Lalkovski', 'ilija lalkovski')</td><td>{marko.smilevski,ilija}@webfactory.mk, gjorgji.madjarov@finki.ukim.mk
+</td></tr><tr><td>0a11b82aa207d43d1b4c0452007e9388a786be12</td><td>Feature Level Multiple Model Fusion Using Multilinear
+<br/>Subspace Analysis with Incomplete Training Set
+<br/>and Its Application to Face Image Analysis
+<br/><b>School of IoT Engineering, Jiangnan University, Wuxi, 214122, China</b><br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH</b><br/>United Kingdom
+</td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>xiaojun wu jnu@163.com
+<br/>{Z.Feng,J.Kittler,W.Christmas}@surrey.ac.uk
+</td></tr><tr><td>0a1138276c52c734b67b30de0bf3f76b0351f097</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at
+<br/> http://dx.doi.org/10.1109/TIP.2016.2539502
+<br/>Discriminant Incoherent Component Analysis
+</td><td>('2812961', 'Christos Georgakis', 'christos georgakis')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a</td><td></td><td>('1802883', 'Soufiane Belharbi', 'soufiane belharbi')</td><td></td></tr><tr><td>0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Multi-task, multi-label and multi-domain learning with
+<br/>residual convolutional networks for emotion recognition
+<br/>Received: date / Accepted: date
+</td><td>('10157512', 'Gerard Pons', 'gerard pons')</td><td></td></tr><tr><td>0acf23485ded5cb9cd249d1e4972119239227ddb</td><td>Dual coordinate solvers for large-scale structural SVMs
+<br/>UC Irvine
+<br/>This manuscript describes a method for training linear SVMs (including binary SVMs, SVM regression,
+<br/>and structural SVMs) from large, out-of-core training datasets. Current strategies for large-scale learning fall
+<br/>into one of two camps; batch algorithms which solve the learning problem given a finite datasets, and online
+<br/>algorithms which can process out-of-core datasets. The former typically requires datasets small enough to fit
+<br/>in memory. The latter is often phrased as a stochastic optimization problem [4, 15]; such algorithms enjoy
+<br/>strong theoretical properties but often require manual tuned annealing schedules, and may converge slowly
+<br/>for problems with large output spaces (e.g., structural SVMs). We discuss an algorithm for an “intermediate”
+<br/>regime in which the data is too large to fit in memory, but the active constraints (support vectors) are small
+<br/>enough to remain in memory.
+<br/>In this case, one can design rather efficient learning algorithms that are
+<br/>as stable as batch algorithms, but capable of processing out-of-core datasets. We have developed such a
+<br/>MATLAB-based solver and used it to train a series of recognition systems [19, 7, 21, 12] for articulated pose
+<br/>estimation, facial analysis, 3D object recognition, and action classification, all with publicly-available code.
+<br/>This writeup describes the solver in detail.
+<br/>Approach: Our approach is closely based on data-subsampling algorithms for collecting hard exam-
+<br/>ples [9, 10, 6], combined with the dual coordinate quadratic programming (QP) solver described in liblinear
+<br/>[8]. The latter appears to be current fastest method for learning linear SVMs. We make two extensions (1)
+<br/>We show how to generalize the solver to other types of SVM problems such as (latent) structural SVMs (2)
+<br/>We show how to modify it to behave as a partially-online algorithm, which only requires access to small
+<br/>amounts of data at a time.
+<br/>Overview: Sec. 1 describes a general formulation of an SVM problem that encompasses many standard
+<br/>tasks such as multi-class classification and (latent) structural prediction. Sec. 2 derives its dual QP, and Sec. 3
+<br/>describes a dual coordinate descent optimization algorithm. Sec. 4 describes modifications for optimizing
+<br/>in an online fashion, allowing one to learn near-optimal models with a single pass over large, out-of-core
+<br/>datasets. Sec. 5 briefly touches on some theoretical issues that are necessary to ensure convergence. Finally,
+<br/>Sec. 6 and Sec. 7 describe modifications to our basic formulation to accommodate non-negativity constraints
+<br/>and flexible regularization schemes during learning.
+<br/>1 Generalized SVMs
+<br/>We first describe a general formulation of a SVM which encompasses various common problems such as
+<br/>binary classification, regression, and structured prediction. Assume we are given training data where the ith
+<br/>example is described by a set of Ni vectors {xij} and a set of Ni scalars {lij}, where j varies from 1 to Ni.
+<br/>We wish to solve the following optimization problem:
+<br/>(0, lij − wT xij)
+<br/>max
+<br/>j∈Ni
+<br/>(1)
+<br/>(cid:88)
+<br/>argmin
+<br/>L(w) =
+<br/>||w||2 +
+</td><td>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td></td></tr><tr><td>0ad4a814b30e096ad0e027e458981f812c835aa0</td><td></td><td></td><td></td></tr><tr><td>6448d23f317babb8d5a327f92e199aaa45f0efdc</td><td></td><td></td><td></td></tr><tr><td>6412d8bbcc01f595a2982d6141e4b93e7e982d0f</td><td>Deep Convolutional Neural Network using Triplets of Faces, Deep Ensemble, and
+<br/>Score-level Fusion for Face Recognition
+<br/>1Department of Creative IT Engineering, POSTECH, Korea
+<br/>2Department of Computer Science and Engineering, POSTECH, Korea
+</td><td>('2794366', 'Bong-Nam Kang', 'bong-nam kang')<br/>('1804861', 'Yonghyun Kim', 'yonghyun kim')<br/>('1695669', 'Daijin Kim', 'daijin kim')</td><td>{bnkang, gkyh0805, dkim}@postech.ac.kr
+</td></tr><tr><td>641f0989b87bf7db67a64900dcc9568767b7b50f</td><td>Reconstructing Faces from their Signatures using RBF
+<br/>Regression
+<br/>To cite this version:
+<br/>sion. British Machine Vision Conference 2013, Sep 2013, Bristol, United Kingdom. pp.103.1–
+<br/>103.12, 2013, <10.5244/C.27.103>. <hal-00943426>
+<br/>HAL Id: hal-00943426
+<br/>https://hal.archives-ouvertes.fr/hal-00943426
+<br/>Submitted on 13 Feb 2014
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('34723309', 'Alexis Mignon', 'alexis mignon')<br/>('34723309', 'Alexis Mignon', 'alexis mignon')</td><td></td></tr><tr><td>6409b8879c7e61acf3ca17bcc62f49edca627d4c</td><td>Learning Finite Beta-Liouville Mixture Models via
+<br/>Variational Bayes for Proportional Data Clustering
+<br/>Electrical and Computer Engineering
+<br/><b>Institute for Information Systems Engineering</b><br/><b>Concordia University, Canada</b><br/><b>Concordia University, Canada</b></td><td>('2038786', 'Wentao Fan', 'wentao fan')<br/>('1729109', 'Nizar Bouguila', 'nizar bouguila')</td><td>wenta fa@encs.concordia.ca
+<br/>nizar.bouguila@concordia.ca
+</td></tr><tr><td>64153df77fe137b7c6f820a58f0bdb4b3b1a879b</td><td>Shape Invariant Recognition of Segmented Human
+<br/>Faces using Eigenfaces
+<br/>Department of Informatics
+<br/><b>Technical University of Munich, Germany</b></td><td>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('1746229', 'Michael Beetz', 'michael beetz')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td>{riaz,beetz,radig}@in.tum.de
+</td></tr><tr><td>649eb674fc963ce25e4e8ce53ac7ee20500fb0e3</td><td></td><td></td><td></td></tr><tr><td>64ec0c53dd1aa51eb15e8c2a577701e165b8517b</td><td>Online Regression with Feature Selection in
+<br/>Stochastic Data Streams
+<br/><b>Florida State University</b><br/><b>Florida State University</b></td><td>('5517409', 'Lizhe Sun', 'lizhe sun')<br/>('2455529', 'Adrian Barbu', 'adrian barbu')</td><td>lizhe.sun@stat.fsu.edu
+<br/>abarbu@stat.fsu.edu
+</td></tr><tr><td>642c66df8d0085d97dc5179f735eed82abf110d0</td><td></td><td></td><td></td></tr><tr><td>6459f1e67e1ea701b8f96177214583b0349ed964</td><td>GENERALIZED SUBSPACE BASED HIGH DIMENSIONAL DENSITY ESTIMATION
+<br/><b>University of California Santa Barbara</b><br/><b>University of California Santa Barbara</b></td><td>('3231876', 'Karthikeyan Shanmuga Vadivel', 'karthikeyan shanmuga vadivel')</td><td>(cid:63){karthikeyan,msargin,sjoshi,manj}@ece.ucsb.edu
+<br/>†grafton@psych.ucsb.edu
+</td></tr><tr><td>64cf86ba3b23d3074961b485c16ecb99584401de</td><td>Single Image 3D Interpreter Network
+<br/><b>Massachusetts Institute of Technology</b><br/><b>Stanford University</b><br/>3Facebook AI Research
+<br/>4Google Research
+</td><td>('3045089', 'Jiajun Wu', 'jiajun wu')<br/>('3222730', 'Tianfan Xue', 'tianfan xue')<br/>('35198686', 'Joseph J. Lim', 'joseph j. lim')<br/>('39402399', 'Yuandong Tian', 'yuandong tian')<br/>('1763295', 'Joshua B. Tenenbaum', 'joshua b. tenenbaum')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')<br/>('1768236', 'William T. Freeman', 'william t. freeman')</td><td></td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>Deep Learning Face Attributes in the Wild∗
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b></td><td>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('1693209', 'Ping Luo', 'ping luo')</td><td>{lz013,pluo,xtang}@ie.cuhk.edu.hk, xgwang@ee.cuhk.edu.hk
+</td></tr><tr><td>6479b61ea89e9d474ffdefa71f068fbcde22cc44</td><td><b>University of Exeter</b><br/>Department of Computer Science
+<br/>Some Topics on Similarity Metric Learning
+<br/>June 2015
+<br/>Supervised by Dr. Yiming Ying
+<br/>Philosophy in Computer Science , June 2015.
+<br/>This thesis is available for Library use on the understanding that it is copyright material
+<br/>and that no quotation from the thesis may be published without proper acknowledgement.
+<br/>I certify that all material in this thesis which is not my own work has been identified and
+<br/>that no material has previously been submitted and approved for the award of a degree by this or
+<br/><b>any other University</b><br/>(signature) .................................................................................................
+</td><td>('1954340', 'Qiong Cao', 'qiong cao')<br/>('1954340', 'Qiong Cao', 'qiong cao')</td><td></td></tr><tr><td>64e75f53ff3991099c3fb72ceca55b76544374e5</td><td>Simultaneous Feature Selection and Classifier Training via Linear
+<br/>Programming: A Case Study for Face Expression Recognition
+<br/>Computer Sciences Department
+<br/><b>University of Wisconsin-Madison</b><br/>Madison, WI 53706
+</td><td>('1822413', 'Guodong Guo', 'guodong guo')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')</td><td>fgdguo, dyerg@cs.wisc.edu
+</td></tr><tr><td>64f9519f20acdf703984f02e05fd23f5e2451977</td><td>Learning Temporal Alignment Uncertainty for
+<br/>Efficient Event Detection
+<br/><b>Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia</b><br/><b>The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA</b></td><td>('2838646', 'Iman Abbasnejad', 'iman abbasnejad')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')<br/>('1980700', 'Simon Denman', 'simon denman')<br/>('3140440', 'Clinton Fookes', 'clinton fookes')<br/>('1820249', 'Simon Lucey', 'simon lucey')</td><td>Email:{i.abbasnejad, s.sridharan, s.denman, c.fookes}@qut.edu.au, slucey@cs.cmu.edu
+</td></tr><tr><td>641f34deb3bdd123c6b6e7b917519c3e56010cb7</td><td></td><td></td><td></td></tr><tr><td>64782a2bc5da11b1b18ca20cecf7bdc26a538d68</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING XX, XXX-XXX (2011)
+<br/>Facial Expression Recognition using
+<br/>Spectral Supervised Canonical Correlation Analysis*
+<br/><b>Institute of Information Science</b><br/><b>Beijing Jiaotong University</b><br/>Beijing, 100044 P.R. China
+<br/>Feature extraction plays an important role in facial expression recognition. Canoni-
+<br/>cal correlation analysis (CCA), which studies the correlation between two random vec-
+<br/>tors, is a major linear feature extraction method based on feature fusion. Recent studies
+<br/>have shown that facial expression images often reside on a latent nonlinear manifold.
+<br/>However, either CCA or its kernel version KCCA, which is globally linear or nonlinear,
+<br/>cannot effectively utilize the local structure information to discover the low-dimensional
+<br/>manifold embedded in the original data. Inspired by the successful application of spectral
+<br/>graph theory in classification, we proposed spectral supervised canonical correlation
+<br/>analysis (SSCCA) to overcome the shortcomings of CCA and KCCA. In SSCCA, we
+<br/>construct an affinity matrix, which incorporates both the class information and local
+<br/>structure information of the data points, as the supervised matrix. The spectral feature of
+<br/>covariance matrices is used to extract a new combined feature with more discriminative
+<br/>information, and it can reveal the nonlinear manifold structure of the data. Furthermore,
+<br/>we proposed a unified framework for CCA to offer an effective methodology for
+<br/>non-empirical structural comparison of different forms of CCA as well as providing a
+<br/>way to extend the CCA algorithm. The correlation feature extraction power is then pro-
+<br/>posed to evaluate the effectiveness of our method. Experimental results on two facial ex-
+<br/>pression databases validate the effectiveness of our method.
+<br/>Keywords: spectral supervised canonical correlation analysis, spectral classification, fea-
+<br/>ture fusion, feature extraction, facial expression recognition
+<br/>1. INTRODUCTION
+<br/>Facial expression conveys visual human emotions, which makes the facial expres-
+<br/>sion recognition (FER) plays an important role in human–computer interaction, image
+<br/>retrieval, synthetic face animation, video conferencing, human emotion analysis [1, 2].
+<br/>Due to its wide range of applications, FER has attracted much attention in recent years.
+<br/>Generally speaking, a FER system consists of three major components: face detection,
+<br/>facial expression feature extraction and facial expression classification [1, 2]. Since ap-
+<br/>propriate facial expression representation can effectively alleviate the complexity of the
+<br/>design of classification and improve the performance of the FER system, most researches
+<br/>currently concentrate on how to extract effective facial expression features.
+<br/>A variety of methods have been proposed for facial expression feature extraction
+<br/>[3-7], and there are generally two common approaches: single feature extraction and
+<br/>feature fusion. Single feature extraction is based on a particular method, i.e. principal
+<br/>component analysis (PCA) [3], fisher’s linear discriminant (FLD) [4], locality preserving
+<br/>*This paper was supported by the National Natural Science Foundation of China (Grant No.60973060), Spe-
+<br/>cialized Research Fund for the Doctoral Program of Higher Education (Grant No. 200800040008), Beijing
+<br/>Program (Grant No. YB20081000401) and the Fundamental Research Funds for the Central Universities
+<br/>(Grant No. 2011JBM022).
+<br/>1
+</td><td>('1701978', 'Song Guo', 'song guo')<br/>('1738408', 'Qiuqi Ruan', 'qiuqi ruan')<br/>('1718667', 'Zhan Wang', 'zhan wang')<br/>('1702894', 'Shuai Liu', 'shuai liu')</td><td></td></tr><tr><td>645de797f936cb19c1b8dba3b862543645510544</td><td>Deep Temporal Linear Encoding Networks
+<br/>1ESAT-PSI, KU Leuven, 2CVL, ETH Z¨urich
+</td><td>('3310120', 'Ali Diba', 'ali diba')<br/>('50633941', 'Vivek Sharma', 'vivek sharma')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{firstname.lastname}@esat.kuleuven.be
+</td></tr><tr><td>6462ef39ca88f538405616239471a8ea17d76259</td><td></td><td></td><td></td></tr><tr><td>64d5772f44efe32eb24c9968a3085bc0786bfca7</td><td>Morphable Displacement Field Based Image
+<br/>Matching for Face Recognition across Pose
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3 Omron Social Solutions Co., LTD., Kyoto, Japan
+</td><td>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('1731144', 'Xin Liu', 'xin liu')<br/>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('1705483', 'Haihong Zhang', 'haihong zhang')<br/>('1710195', 'Shihong Lao', 'shihong lao')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')</td><td>{shaoxin.li,xiujuan.chai,xin.liu,shiguang.shan}@vipl.ict.ac.cn,
+<br/>lao@ari.ncl.omron.co.jp, angelazhang@ssb.kusatsu.omron.co.jp
+</td></tr><tr><td>64d7e62f46813b5ad08289aed5dc4825d7ec5cff</td><td>YAMAGUCHI et al.: MIX AND MATCH
+<br/>Mix and Match: Joint Model for Clothing and
+<br/>Attribute Recognition
+<br/>http://vision.is.tohoku.ac.jp/~kyamagu
+<br/><b>Tohoku University</b><br/>Sendai, Japan
+<br/>2 NTT
+<br/>Yokosuka, Japan
+<br/><b>Tokyo University of Science</b><br/>Tokyo, Japan
+</td><td>('1721910', 'Kota Yamaguchi', 'kota yamaguchi')<br/>('1718872', 'Takayuki Okatani', 'takayuki okatani')<br/>('1745497', 'Kyoko Sudo', 'kyoko sudo')<br/>('2023568', 'Kazuhiko Murasaki', 'kazuhiko murasaki')<br/>('2113938', 'Yukinobu Taniguchi', 'yukinobu taniguchi')</td><td>okatani@vision.is.tohoku.ac.jp
+<br/>sudo.kyoko@lab.ntt.co.jp
+<br/>murasaki.kazuhiko@lab.ntt.co.jp
+<br/>ytaniguti@ms.kagu.tus.ac.jp
+</td></tr><tr><td>90ac0f32c0c29aa4545ed3d5070af17f195d015f</td><td></td><td></td><td></td></tr><tr><td>90d735cffd84e8f2ae4d0c9493590f3a7d99daf1</td><td>Original Research Paper
+<br/>American Journal of Engineering and Applied Sciences
+<br/>Recognition of Faces using Efficient Multiscale Local Binary
+<br/>Pattern and Kernel Discriminant Analysis in Varying
+<br/>Environment
+<br/>V.H. Mankar
+<br/><b>Priyadarshini College of Engg, Nagpur, India</b><br/>2Department of Electronics Engg, Government Polytechnic, Nagpur, India
+<br/>Article history
+<br/>Received: 20-06-2017
+<br/>Revised: 18-07-2017
+<br/>Accepted: 21-08-2017
+<br/>Corresponding Author:
+<br/>Department of Electronics
+<br/><b>Engg, Priyadarshini College of</b><br/>Engg, Nagpur, India
+<br/>face
+</td><td>('9128944', 'Sujata G. Bhele', 'sujata g. bhele')<br/>('9128944', 'Sujata G. Bhele', 'sujata g. bhele')</td><td>Email: sujata_bhele@yahoo.co.in
+</td></tr><tr><td>90298f9f80ebe03cb8b158fd724551ad711d4e71</td><td>A Pursuit of Temporal Accuracy in General Activity Detection
+<br/><b>The Chinese University of Hong Kong</b><br/>2Computer Vision Laboratory, ETH Zurich, Switzerland
+</td><td>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1695765', 'Yue Zhao', 'yue zhao')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>900207b3bc3a4e5244cae9838643a9685a84fee0</td><td>Reconstructing Geometry from Its Latent Structures
+<br/>A Thesis
+<br/>Submitted to the Faculty
+<br/>of
+<br/><b>Drexel University</b><br/>by
+<br/>Geoffrey Oxholm
+<br/>in partial fulfillment of the
+<br/>requirements for the degree
+<br/>of
+<br/>Doctor of Philosophy
+<br/>June 2014
+</td><td></td><td></td></tr><tr><td>90498b95fe8b299ce65d5cafaef942aa58bd68b7</td><td>Face Recognition: Primates in the Wild∗
+<br/><b>Michigan State University, East Lansing, MI, USA</b><br/><b>University of Chester, UK, 3Conservation Biologist</b></td><td>('32623642', 'Debayan Deb', 'debayan deb')<br/>('46516859', 'Susan Wiper', 'susan wiper')<br/>('9658130', 'Sixue Gong', 'sixue gong')<br/>('9644181', 'Yichun Shi', 'yichun shi')<br/>('41022894', 'Cori Tymoszek', 'cori tymoszek')</td><td>E-mail: 1{debdebay, gongsixu, shiyichu, tymoszek, jain}@cse.msu.edu,
+<br/>2s.wiper@chester.ac.uk, 3alexandra.h.russo@gmail.com
+</td></tr><tr><td>90cc2f08a6c2f0c41a9dd1786bae097f9292105e</td><td>Top-down Attention Recurrent VLAD Encoding
+<br/>for Action Recognition in Videos
+<br/>1 Fondazione Bruno Kessler, Trento, Italy
+<br/><b>University of Trento, Trento, Italy</b></td><td>('1756362', 'Swathikiran Sudhakaran', 'swathikiran sudhakaran')<br/>('1717522', 'Oswald Lanz', 'oswald lanz')</td><td>{sudhakaran,lanz}@fbk.eu
+</td></tr><tr><td>90fb58eeb32f15f795030c112f5a9b1655ba3624</td><td>INTERNATIONAL JOURNAL OF RESEARCH IN COMPUTER APPLICATIONS AND ROBOTICS
+<br/> www.ijrcar.com
+<br/>Vol.4 Issue 6, Pg.: 12-27
+<br/>June 2016
+<br/>INTERNATIONAL JOURNAL OF
+<br/>RESEARCH IN COMPUTER
+<br/>APPLICATIONS AND ROBOTICS
+<br/>ISSN 2320-7345
+<br/>FACE AND IRIS RECOGNITION IN A
+<br/>VIDEO SEQUENCE USING DBPNN AND
+<br/>ADAPTIVE HAMMING DISTANCE
+<br/><b>PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India</b><br/><b>Hindusthan College of Engineering and Technology, Coimbatore, India</b></td><td>('3406423', 'S. Revathy', 's. revathy')</td><td>Email id: revathysreeni14@gmail.com
+</td></tr><tr><td>90c4f15f1203a3a8a5bf307f8641ba54172ead30</td><td>A 2D Morphable Model of Craniofacial Profile
+<br/>and Its Application to Craniosynostosis
+<br/><b>University of York, York, UK</b><br/>2 Alder Hey Craniofacial Unit, Liverpool, UK
+<br/>https://www-users.cs.york.ac.uk/~nep/research/LYHM/
+</td><td>('1694260', 'Hang Dai', 'hang dai')<br/>('1737428', 'Nick Pears', 'nick pears')<br/>('14154312', 'Christian Duncan', 'christian duncan')</td><td>{hd816,nick.pears}@york.ac.uk
+<br/>Christian.Duncan@alderhey.nhs.uk
+</td></tr><tr><td>902114feaf33deac209225c210bbdecbd9ef33b1</td><td>KAN et al.: SIDE-INFORMATION BASED LDA FOR FACE RECOGNITION
+<br/>Side-Information based Linear
+<br/>Discriminant Analysis for Face
+<br/>Recognition
+<br/><b>Digital Media Research Center</b><br/><b>Institute of Computing</b><br/>Technology, CAS, Beijing, China
+<br/>2 Key Laboratory of Intelligent
+<br/>Information Processing, Chinese
+<br/>Academy of Sciences, Beijing,
+<br/>China
+<br/>3 School of Computer Engineering,
+<br/>Nanyang Technological
+<br/><b>University, Singapore</b></td><td>('1693589', 'Meina Kan', 'meina kan')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1714390', 'Dong Xu', 'dong xu')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>mnkan@jdl.ac.cn
+<br/>sgshan@jdl.ac.cn
+<br/>dongxu@ntu.edu.sg
+<br/>xlchen@jdl.ac.cn
+</td></tr><tr><td>90ad0daa279c3e30b360f9fe9371293d68f4cebf</td><td>SPATIO-TEMPORAL FRAMEWORK AND
+<br/>ALGORITHMS FOR VIDEO-BASED FACE
+<br/>RECOGNITION
+<br/>DOCTOR OF PHILOSOPHY
+<br/><b>MULTIMEDIA UNIVERSITY</b><br/>MAY 2014
+</td><td>('2339975', 'JOHN SEE', 'john see')</td><td></td></tr><tr><td>90a754f597958a2717862fbaa313f67b25083bf9</td><td>REVIEW
+<br/>published: 16 November 2015
+<br/>doi: 10.3389/frobt.2015.00028
+<br/>A Review of Human Activity
+<br/>Recognition Methods
+<br/><b>University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine</b><br/><b>Laboratory, University of Houston, Houston, TX, USA</b><br/>Recognizing human activities from video sequences or still images is a challenging task
+<br/>due to problems, such as background clutter, partial occlusion, changes in scale, view-
+<br/><b>point, lighting, and appearance. Many applications, including video surveillance systems</b><br/>human-computer interaction, and robotics for human behavior characterization, require
+<br/>a multiple activity recognition system. In this work, we provide a detailed review of recent
+<br/>and state-of-the-art research advances in the field of human activity classification. We
+<br/>propose a categorization of human activity methodologies and discuss their advantages
+<br/>and limitations. In particular, we divide human activity classification methods into two large
+<br/>categories according to whether they use data from different modalities or not. Then, each
+<br/>of these categories is further analyzed into sub-categories, which reflect how they model
+<br/>human activities and what type of activities they are interested in. Moreover, we provide
+<br/>a comprehensive analysis of the existing, publicly available human activity classification
+<br/>datasets and examine the requirements for an ideal human activity recognition dataset.
+<br/>Finally, we report the characteristics of future research directions and present some open
+<br/>issues on human activity recognition.
+<br/>Keywords: human activity recognition, activity categorization, activity datasets, action representation,
+<br/>review, survey
+<br/>1. INTRODUCTION
+<br/>Human activity recognition plays a significant role in human-to-human interaction and interper-
+<br/>sonal relations. Because it provides information about the identity of a person, their personality,
+<br/>and psychological state, it is difficult to extract. The human ability to recognize another person’s
+<br/>activities is one of the main subjects of study of the scientific areas of computer vision and machine
+<br/><b>learning. As a result of this research, many applications, including video surveillance systems</b><br/>human-computer interaction, and robotics for human behavior characterization, require a multiple
+<br/>activity recognition system.
+<br/>Among various classification techniques two main questions arise: “What action?” (i.e., the
+<br/>recognition problem) and “Where in the video?” (i.e., the localization problem). When attempting to
+<br/>recognize human activities, one must determine the kinetic states of a person, so that the computer
+<br/>can efficiently recognize this activity. Human activities, such as “walking” and “running,” arise very
+<br/>naturally in daily life and are relatively easy to recognize. On the other hand, more complex activities,
+<br/>such as “peeling an apple,” are more difficult to identify. Complex activities may be decomposed into
+<br/>other simpler activities, which are generally easier to recognize. Usually, the detection of objects in
+<br/>a scene may help to better understand human activities as it may provide useful information about
+<br/>the ongoing event (Gupta and Davis, 2007).
+<br/>Edited by:
+<br/>Venkatesh Babu Radhakrishnan,
+<br/><b>Indian Institute of Science, India</b><br/>Reviewed by:
+<br/>Stefano Berretti,
+<br/><b>University of Florence, Italy</b><br/>Xinlei Chen,
+<br/><b>Carnegie Mellon University, USA</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to Vision
+<br/>Systems Theory, Tools and
+<br/>Applications, a section of the
+<br/>journal Frontiers in Robotics and AI
+<br/>Received: 09 July 2015
+<br/>Accepted: 29 October 2015
+<br/>Published: 16 November 2015
+<br/>Citation:
+<br/>Vrigkas M, Nikou C and Kakadiaris IA
+<br/>(2015) A Review of Human Activity
+<br/>Recognition Methods.
+<br/>Front. Robot. AI 2:28.
+<br/>doi: 10.3389/frobt.2015.00028
+<br/>Frontiers in Robotics and AI | www.frontiersin.org
+<br/>November 2015 | Volume 2 | Article 28
+</td><td>('2045915', 'Michalis Vrigkas', 'michalis vrigkas')<br/>('1727495', 'Christophoros Nikou', 'christophoros nikou')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')<br/>('1727495', 'Christophoros Nikou', 'christophoros nikou')</td><td>cnikou@cs.uoi.gr
+</td></tr><tr><td>90d9209d5dd679b159051a8315423a7f796d704d</td><td>Temporal Sequence Distillation: Towards Few-Frame Action
+<br/>Recognition in Videos
+<br/><b>Wuhan University</b><br/>SenseTime Research
+<br/>SenseTime Research
+<br/><b>The Chinese University of Hong Kong</b><br/>SenseTime Research
+<br/>SenseTime Research
+</td><td>('40192003', 'Zhaoyang Zhang', 'zhaoyang zhang')<br/>('1874900', 'Zhanghui Kuang', 'zhanghui kuang')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1739512', 'Litong Feng', 'litong feng')<br/>('1726357', 'Wei Zhang', 'wei zhang')</td><td>zhangzhaoyang@whu.edu.cn
+<br/>kuangzhanghui@sensetime.com
+<br/>pluo@ie.cuhk.edu.hk
+<br/>fenglitong@sensetime.com
+<br/>wayne.zhang@sensetime.com
+</td></tr><tr><td>90dd2a53236b058c79763459b9d8a7ba5e58c4f1</td><td>Capturing Correlations Among Facial Parts for
+<br/>Facial Expression Analysis
+<br/>Department of Computer Science
+<br/><b>Queen Mary, University of London</b><br/>Mile End Road, London E1 4NS, UK
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2803283', 'Peter W. McOwan', 'peter w. mcowan')</td><td>{cfshan, sgg, pmco}@dcs.qmul.ac.uk
+</td></tr><tr><td>90cb074a19c5e7d92a1c0d328a1ade1295f4f311</td><td>MIT. Media Laboratory Affective Computing Technical Report #571
+<br/>Appears in IEEE International Workshop on Analysis and Modeling of Faces and Gestures , Oct 2003
+<br/>Fully Automatic Upper Facial Action Recognition
+<br/>MIT Media Laboratory
+<br/>Cambridge, MA 02139
+</td><td>('2189118', 'Ashish Kapoor', 'ashish kapoor')</td><td></td></tr><tr><td>90b11e095c807a23f517d94523a4da6ae6b12c76</td><td></td><td></td><td></td></tr><tr><td>90c2d4d9569866a0b930e91713ad1da01c2a6846</td><td>528
+<br/>The Open Automation and Control Systems Journal, 2014, 6, 528-534
+<br/>Dimensionality Reduction Based on Low Rank Representation
+<br/>Open Access
+<br/><b>School of Electronic and Information Engineering, Tongji University, Shanghai, China</b></td><td>('40328872', 'Cheng Luo', 'cheng luo')<br/>('40174994', 'Yang Xiang', 'yang xiang')</td><td>Send Orders for Reprints to reprints@benthamscience.ae
+</td></tr><tr><td>907475a4febf3f1d4089a3e775ea018fbec895fe</td><td>STATISTICAL MODELING FOR FACIAL EXPRESSION ANALYSIS AND SYNTHESIS
+<br/><b>Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne</b><br/>BP 20529, 60205 COMPIEGNE Cedex, FRANCE.
+</td><td>('2371236', 'Bouchra Abboud', 'bouchra abboud')<br/>('1742818', 'Franck Davoine', 'franck davoine')</td><td>E-mail: Franck.Davoine@hds.utc.fr
+</td></tr><tr><td>9028fbbd1727215010a5e09bc5758492211dec19</td><td>Solving the Uncalibrated Photometric Stereo
+<br/>Problem using Total Variation
+<br/>1 IRIT, UMR CNRS 5505, Toulouse, France
+<br/>2 Dept. of Computer Science, Univ. of Copenhagen, Denmark
+</td><td>('2233590', 'Jean-Denis Durou', 'jean-denis durou')</td><td>yvain.queau@enseeiht.fr
+<br/>durou@irit.fr
+<br/>francois@diku.dk
+</td></tr><tr><td>bff77a3b80f40cefe79550bf9e220fb82a74c084</td><td>Facial Expression Recognition Based on Local Binary Patterns and
+<br/>Local Fisher Discriminant Analysis
+<br/>1School of Physics and Electronic Engineering
+<br/><b>Taizhou University</b><br/>Taizhou 318000
+<br/>CHINA
+<br/> 2Department of Computer Science
+<br/><b>Taizhou University</b><br/>Taizhou 318000
+<br/>CHINA
+</td><td>('1695589', 'SHIQING ZHANG', 'shiqing zhang')<br/>('1730594', 'XIAOMING ZHAO', 'xiaoming zhao')<br/>('38909691', 'BICHENG LEI', 'bicheng lei')</td><td>tzczsq@163.com, leibicheng@163.com
+<br/>tzxyzxm@163.com
+</td></tr><tr><td>bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11</td><td>EFFECT OF FACIAL EXPRESSIONS ON FEATURE-BASED
+<br/>LANDMARK LOCALIZATION IN STATIC GREY SCALE
+<br/>IMAGES
+<br/>Research Group for Emotions, Sociality, and Computing, Tampere Unit for Computer-Human Interaction (TAUCHI)
+<br/><b>University of Tampere, Kanslerinnrinne 1, 33014, Tampere, Finland</b><br/>Keywords:
+<br/>Image processing and computer vision, segmentation, edge detection, facial landmark localization, facial
+<br/>expressions, action units.
+</td><td>('2935367', 'Yulia Gizatdinova', 'yulia gizatdinova')<br/>('1718377', 'Veikko Surakka', 'veikko surakka')</td><td>{yulia.gizatdinova, veikko.surakka}@cs.uta.fi
+</td></tr><tr><td>bf961e4a57a8f7e9d792e6c2513ee1fb293658e9</td><td>EURASIP Journal on Applied Signal Processing 2004:16, 2533–2543
+<br/>c(cid:1) 2004 Hindawi Publishing Corporation
+<br/>Robust Face Image Matching under
+<br/>Illumination Variations
+<br/><b>National Tsing Hua University, 101 Kuang Fu Road, Section 2, Hsinchu 300, Taiwan</b><br/><b>National Tsing Hua University, 101 Kuang Fu Road, Section 2, Hsinchu 300, Taiwan</b><br/><b>National Tsing Hua University, 101 Kuang Fu Road, Section 2, Hsinchu 300, Taiwan</b><br/>Received 1 September 2003; Revised 21 September 2004
+<br/>Face image matching is an essential step for face recognition and face verification. It is difficult to achieve robust face matching
+<br/>under various image acquisition conditions. In this paper, a novel face image matching algorithm robust against illumination
+<br/>variations is proposed. The proposed image matching algorithm is motivated by the characteristics of high image gradient along
+<br/>the face contours. We define a new consistency measure as the inner product between two normalized gradient vectors at the
+<br/>corresponding locations in two images. The normalized gradient is obtained by dividing the computed gradient vector by the
+<br/>corresponding locally maximal gradient magnitude. Then we compute the average consistency measures for all pairs of the corre-
+<br/>sponding face contour pixels to be the robust matching measure between two face images. To alleviate the problem due to shadow
+<br/>and intensity saturation, we introduce an intensity weighting function for each individual consistency measure to form a weighted
+<br/>average of the consistency measure. This robust consistency measure is further extended to integrate multiple face images of the
+<br/>same person captured under different illumination conditions, thus making our robust face matching algorithm. Experimental
+<br/>results of applying the proposed face image matching algorithm on some well-known face datasets are given in comparison with
+<br/>some existing face recognition methods. The results show that the proposed algorithm consistently outperforms other methods
+<br/>and achieves higher than 93% recognition rate with three reference images for different datasets under different lighting condi-
+<br/>tions.
+<br/>Keywords and phrases: robust image matching, face recognition, illumination variations, normalized gradient.
+<br/>INTRODUCTION
+<br/>1.
+<br/>Face recognition has attracted the attention of a number
+<br/>of researchers from academia and industry because of its
+<br/>challenges and related applications, such as security access
+<br/>control, personal ID verification, e-commerce, video surveil-
+<br/>lance, and so forth. The details of these applications are re-
+<br/>ferred to in the surveys [1, 2, 3]. Face matching is the most
+<br/>important and crucial component in face recognition. Al-
+<br/>though there have been many efforts in previous works to
+<br/>achieve robust face matching under a wide variety of dif-
+<br/>ferent image capturing conditions, such as lighting changes,
+<br/>head pose or view angle variations, expression variations,
+<br/>and so forth, these problems are still difficult to overcome.
+<br/>It is a great challenge to achieve robust face matching under
+<br/>all kinds of different face imaging variations. A practical face
+<br/>recognition system needs to work under different imaging
+<br/>conditions, such as different face poses, or different illumi-
+<br/>nation conditions. Therefore, a robust face matching method
+<br/>is essential to the development of an illumination-insensitive
+<br/>face recognition system. In this paper, we particularly focus
+<br/>on robust face matching under different illumination condi-
+<br/>tions.
+<br/>Many researchers have proposed face recognition meth-
+<br/>ods or face verification systems under different illumination
+<br/>conditions. Some of these methods extracted representative
+<br/>features from face images to compute the distance between
+<br/>these features. In general, these methods can be categorized
+<br/>into the feature-based approach [4, 5, 6, 7, 8, 9, 10, 11], the
+<br/>appearance-based approach [12, 13, 14, 15, 16, 17, 18, 19, 20,
+<br/>21, 22, 23], and the hybrid approach [22, 24].
+</td><td>('2393568', 'Chyuan-Huei Thomas Yang', 'chyuan-huei thomas yang')<br/>('1696527', 'Shang-Hong Lai', 'shang-hong lai')<br/>('39505245', 'Long-Wen Chang', 'long-wen chang')</td><td>Email: chyang@cs.nthu.edu.tw
+<br/>Email: lai@cs.nthu.edu.tw
+<br/>Email: lchang@cs.nthu.edu.tw
+</td></tr><tr><td>bf54b5586cdb0b32f6eed35798ff91592b03fbc4</td><td>Journal of Signal and Information Processing, 2017, 8, 78-98
+<br/>http://www.scirp.org/journal/jsip
+<br/>ISSN Online: 2159-4481
+<br/>ISSN Print: 2159-4465
+<br/>Methodical Analysis of Western-Caucasian and
+<br/>East-Asian Basic Facial Expressions of Emotions
+<br/>Based on Specific Facial Regions
+<br/><b>The University of Electro-Communications, Tokyo, Japan</b><br/>How to cite this paper: Benitez-Garcia, G.,
+<br/>Nakamura, T. and Kaneko, M. (2017) Me-
+<br/>thodical Analysis of Western-Caucasian and
+<br/>East-Asian Basic Facial Expressions of Emo-
+<br/>tions Based on Specific Facial Regions. Jour-
+<br/>nal of Signal and Information Processing, 8,
+<br/>78-98.
+<br/>https://doi.org/10.4236/jsip.2017.82006
+<br/>Received: March 30, 2017
+<br/>Accepted: May 15, 2017
+<br/>Published: May 18, 2017
+<br/>Copyright © 2017 by authors and
+<br/>Scientific Research Publishing Inc.
+<br/>This work is licensed under the Creative
+<br/>Commons Attribution International
+<br/>License (CC BY 4.0).
+<br/>http://creativecommons.org/licenses/by/4.0/
+<br/>
+<br/>Open Access
+</td><td>('2567776', 'Gibran Benitez-Garcia', 'gibran benitez-garcia')<br/>('1693821', 'Tomoaki Nakamura', 'tomoaki nakamura')<br/>('49061848', 'Masahide Kaneko', 'masahide kaneko')</td><td></td></tr><tr><td>bf1e0279a13903e1d43f8562aaf41444afca4fdc</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+<br/> Volume: 04 Issue: 10 | Oct -2017 www.irjet.net p-ISSN: 2395-0072
+<br/>Different Viewpoints of Recognizing Fleeting Facial Expressions with
+<br/>DWT
+<br/>information
+<br/>to get desired
+<br/>information
+<br/>Introduction
+<br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+</td><td>('1848141', 'SANJEEV SHRIVASTAVA', 'sanjeev shrivastava')<br/>('34417227', 'MOHIT GANGWAR', 'mohit gangwar')</td><td></td></tr><tr><td>bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103</td><td>Brain Sci. 2015, 5, 369-386; doi:10.3390/brainsci5030369
+<br/>OPEN ACCESS
+<br/>brain sciences
+<br/>ISSN 2076-3425
+<br/>www.mdpi.com/journal/brainsci/
+<br/>Article
+<br/>Emotion Regulation in Adolescent Males with Attention-Deficit
+<br/>Hyperactivity Disorder: Testing the Effects of Comorbid
+<br/>Conduct Disorder
+<br/><b>School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK</b><br/><b>MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff</b><br/>Tel.: +44-2920-874630; Fax: +44-2920-874545.
+<br/>Received: 17 July 2015 / Accepted: 25 August 2015 / Published: 7 September 2015
+</td><td>('5383377', 'Clare Northover', 'clare northover')<br/>('4094135', 'Anita Thapar', 'anita thapar')<br/>('39373878', 'Kate Langley', 'kate langley')<br/>('4552820', 'Stephanie van Goozen', 'stephanie van goozen')<br/>('2928107', 'Derek G.V. Mitchell', 'derek g.v. mitchell')</td><td>E-Mails: NorthoverC@cardiff.ac.uk (C.N.); LangleyK@cardiff.ac.uk (K.L.)
+<br/>CF24 4HQ, UK; E-Mail: Thapar@cardiff.ac.uk
+<br/>* Author to whom correspondence should be addressed; E-Mail: vangoozens@cardiff.ac.uk;
+</td></tr><tr><td>bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5</td><td>Visual face scanning and emotion
+<br/>perception analysis between Autistic
+<br/>and Typically Developing children
+<br/><b>University of Dhaka</b><br/><b>University of Dhaka</b><br/>Dhaka, Bangladesh
+<br/>Dhaka, Bangladesh
+</td><td>('24613724', 'Uzma Haque Syeda', 'uzma haque syeda')<br/>('24572640', 'Syed Mahir Tazwar', 'syed mahir tazwar')</td><td></td></tr><tr><td>bf4825474673246ae855979034c8ffdb12c80a98</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>RIVERSIDE
+<br/>Active Learning in Multi-Camera Networks, With Applications in Person
+<br/>Re-Identification
+<br/>A Dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Electrical Engineering
+<br/>by
+<br/>December 2015
+<br/>Dissertation Committee:
+</td><td>('40521893', 'Abir Das', 'abir das')<br/>('1688416', 'Amit K. Roy-Chowdhury', 'amit k. roy-chowdhury')<br/>('1751869', 'Anastasios Mourikis', 'anastasios mourikis')<br/>('1778860', 'Walid Najjar', 'walid najjar')</td><td></td></tr><tr><td>bf8a520533f401347e2f55da17383a3e567ef6d8</td><td>Bounded-Distortion Metric Learning
+<br/><b>The Chinese University of Hong Kong</b><br/><b>University of Chinese Academy of Sciences</b><br/><b>Tsinghua University</b><br/><b>The Chinese University of Hong Kong</b></td><td>('2246396', 'Renjie Liao', 'renjie liao')<br/>('1788070', 'Jianping Shi', 'jianping shi')<br/>('2376789', 'Ziyang Ma', 'ziyang ma')<br/>('37670465', 'Jun Zhu', 'jun zhu')<br/>('1729056', 'Jiaya Jia', 'jiaya jia')</td><td>rjliao,jpshi@cse.cuhk.edu.hk
+<br/>maziyang08@gmail.com
+<br/>dcszj@mail.tsinghua.edu.cn
+<br/>leojia@cse.cuhk.edu.hk
+</td></tr><tr><td>bf5940d57f97ed20c50278a81e901ae4656f0f2c</td><td>Query-free Clothing Retrieval via Implicit
+<br/>Relevance Feedback
+</td><td>('26331884', 'Zhuoxiang Chen', 'zhuoxiang chen')<br/>('1691461', 'Zhe Xu', 'zhe xu')<br/>('48380192', 'Ya Zhang', 'ya zhang')<br/>('48531192', 'Xiao Gu', 'xiao gu')</td><td></td></tr><tr><td>bff567c58db554858c7f39870cff7c306523dfee</td><td>Neural Task Graphs: Generalizing to Unseen
+<br/>Tasks from a Single Video Demonstration
+<br/><b>Stanford University</b></td><td>('38485317', 'De-An Huang', 'de-an huang')<br/>('4734949', 'Suraj Nair', 'suraj nair')<br/>('2068265', 'Danfei Xu', 'danfei xu')<br/>('2117748', 'Yuke Zhu', 'yuke zhu')<br/>('1873736', 'Animesh Garg', 'animesh garg')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')<br/>('1702137', 'Silvio Savarese', 'silvio savarese')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')</td><td></td></tr><tr><td>bfb98423941e51e3cd067cb085ebfa3087f3bfbe</td><td>Sparseness helps: Sparsity Augmented
+<br/>Collaborative Representation for Classification
+</td><td>('2941543', 'Naveed Akhtar', 'naveed akhtar')<br/>('1688013', 'Faisal Shafait', 'faisal shafait')</td><td></td></tr><tr><td>bffbd04ee5c837cd919b946fecf01897b2d2d432</td><td><b>Boston University Computer Science Technical Report No</b><br/>Facial Feature Tracking and Occlusion
+<br/>Recovery in American Sign Language
+<br/>1 Department of Computer Science, 2 Department of Modern Foreign Languages
+<br/><b>Boston University</b><br/>Facial features play an important role in expressing grammatical information
+<br/><b>in signed languages, including American Sign Language (ASL). Gestures such</b><br/>as raising or furrowing the eyebrows are key indicators of constructions such
+<br/>as yes-no questions. Periodic head movements (nods and shakes) are also an
+<br/>essential part of the expression of syntactic information, such as negation
+<br/>(associated with a side-to-side headshake). Therefore, identification of these
+<br/>facial gestures is essential to sign language recognition. One problem with
+<br/>detection of such grammatical indicators is occlusion recovery. If the signer’s
+<br/>hand blocks his/her eyebrows during production of a sign, it becomes difficult
+<br/>to track the eyebrows. We have developed a system to detect such grammatical
+<br/>markers in ASL that recovers promptly from occlusion.
+<br/>Our system detects and tracks evolving templates of facial features, which
+<br/>are based on an anthropometric face model, and interprets the geometric
+<br/>relationships of these templates to identify grammatical markers. It was tested
+<br/>on a variety of ASL sentences signed by various Deaf 1native signers and
+<br/>detected facial gestures used to express grammatical information, such as
+<br/>raised and furrowed eyebrows as well as headshakes.
+<br/>1 Introduction
+<br/>A computer-based translator of American Sign Language (ASL) would be
+<br/>useful in enabling people who do not know ASL to communicate with Deaf1
+<br/>individuals. Facial gesture interpretation would be an essential part of an in-
+<br/>terface that eliminates the language barrier between Deaf and hearing people.
+<br/>Our work focuses on facial feature detection and tracking in ASL, specifically
+<br/>in occlusion processing and recovery.
+<br/>1 The word “Deaf” is capitalized to designate those individuals who are linguisti-
+<br/>cally and culturally deaf and who use ASL as their primary language, whereas
+<br/>“deaf” refers to the status of those who cannot hear [25].
+</td><td>('2313369', 'Thomas J. Castelli', 'thomas j. castelli')<br/>('1723703', 'Margrit Betke', 'margrit betke')<br/>('1732359', 'Carol Neidle', 'carol neidle')</td><td></td></tr><tr><td>d35534f3f59631951011539da2fe83f2844ca245</td><td>Published as a conference paper at ICLR 2018
+<br/>SEMANTICALLY DECOMPOSING THE LATENT SPACES
+<br/>OF GENERATIVE ADVERSARIAL NETWORKS
+<br/>Department of Music
+<br/><b>University of California, San Diego</b><br/>Department of Genetics
+<br/><b>Stanford University</b><br/>Zachary C. Lipton
+<br/><b>Carnegie Mellon University</b><br/>Amazon AI
+<br/>Department of Computer Science
+<br/><b>University of California, San Diego</b></td><td>('1872307', 'Chris Donahue', 'chris donahue')<br/>('1693411', 'Akshay Balsubramani', 'akshay balsubramani')<br/>('1814008', 'Julian McAuley', 'julian mcauley')</td><td>cdonahue@ucsd.edu
+<br/>abalsubr@stanford.edu
+<br/>zlipton@cmu.edu
+<br/>jmcauley@eng.ucsd.edu
+</td></tr><tr><td>d3edbfe18610ce63f83db83f7fbc7634dde1eb40</td><td>Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17)
+<br/>Large Graph Hashing with Spectral Rotation
+<br/>School of Computer Science and Center for OPTical IMagery Analysis and Learning (OPTIMAL),
+<br/><b>Northwestern Polytechnical University</b><br/>Xi’an 710072, Shaanxi, P. R. China
+</td><td>('1720243', 'Xuelong Li', 'xuelong li')<br/>('48080389', 'Di Hu', 'di hu')<br/>('1688370', 'Feiping Nie', 'feiping nie')</td><td>xuelong li@opt.ac.cn, hdui831@mail.nwpu.edu.cn, feipingnie@gmail.com
+</td></tr><tr><td>d3424761e06a8f5f3c1f042f1f1163a469872129</td><td>Pose-invariant, model-based object
+<br/>recognition, using linear combination of views
+<br/>and Bayesian statistics.
+<br/>A dissertation submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>of the
+<br/><b>University of London</b><br/>Department of Computer Science
+<br/><b>University College London</b><br/>2009
+</td><td>('1797883', 'Vasileios Zografos', 'vasileios zografos')</td><td></td></tr><tr><td>d33b26794ea6d744bba7110d2d4365b752d7246f</td><td>Transfer Feature Representation via Multiple Kernel Learning
+<br/>1. Science and Technology on Integrated Information System Laboratory
+<br/>2. State Key Laboratory of Computer Science
+<br/><b>Institute of Software, Chinese Academy of Sciences, Beijing 100190, China</b></td><td>('40451597', 'Wei Wang', 'wei wang')<br/>('39483391', 'Hao Wang', 'hao wang')<br/>('1783918', 'Chen Zhang', 'chen zhang')<br/>('34532334', 'Fanjiang Xu', 'fanjiang xu')</td><td>weiwangpenny@gmail.com
+</td></tr><tr><td>d3b73e06d19da6b457924269bb208878160059da</td><td>Proceedings of the 5th International Conference on Computing and Informatics, ICOCI 2015
+<br/>11-13 August, 2015 Istanbul, Turkey. Universiti Utara Malaysia (http://www.uum.edu.my )
+<br/>Paper No.
+<br/>065
+<br/>IMPLEMENTATION OF AN AUTOMATED SMART HOME
+<br/>CONTROL FOR DETECTING HUMAN EMOTIONS VIA FACIAL
+<br/>DETECTION
+<br/>Osman4
+</td><td>('9164797', 'Lim Teck Boon', 'lim teck boon')<br/>('2229534', 'Mohd Heikal Husin', 'mohd heikal husin')<br/>('1881455', 'Zarul Fitri Zaaba', 'zarul fitri zaaba')</td><td>1Universiti Sains Malaysia, Malaysia, ltboon.ucom10@student.usm.my
+<br/>2Universiti Sains Malaysia, Malaysia, heikal@usm.my
+<br/>3Universiti Sains Malaysia, Malaysia, zarulfitri@usm.my
+<br/>4Universiti Sains Malaysia, Malaysia, azam@usm.my
+</td></tr><tr><td>d3d5d86afec84c0713ec868cf5ed41661fc96edc</td><td>A Comprehensive Analysis of Deep Learning Based Representation
+<br/>for Face Recognition
+<br/>Mostafa Mehdipour Ghazi
+<br/>Faculty of Engineering and Natural Sciences
+<br/><b>Sabanci University, Istanbul, Turkey</b><br/>Hazım Kemal Ekenel
+<br/>Department of Computer Engineering
+<br/><b>Istanbul Technical University, Istanbul, Turkey</b></td><td></td><td>mehdipour@sabanciuniv.edu
+<br/>ekenel@itu.edu.tr
+</td></tr><tr><td>d3e04963ff42284c721f2bc6a90b7a9e20f0242f</td><td>On Forensic Use of Biometrics
+<br/><b>University of Southampton, UK, 2University of Warwick, UK</b><br/>This chapter discusses the use of biometrics techniques within forensic science. It outlines the
+<br/>historic connections between the subjects and then examines face and ear biometrics as two
+<br/>case studies to demonstrate the application, the challenges and the acceptability of biometric
+<br/>features and techniques in forensics. The detailed examination starts with one of the most
+<br/>common and familiar biometric features, face, and then examines an emerging biometric
+<br/>feature, ear.
+<br/>1.1 Introduction
+<br/>Forensic science largely concerns the analysis of crime: its existence, the perpetrator(s) and
+<br/>the modus operandi. The science of biometrics has been developing approaches that can
+<br/>be used to automatically identify individuals by personal characteristics. The relationship
+<br/>of biometrics and forensics centers primarily on identifying people: the central question is
+<br/>whether a perpetrator can reliably be identified from scene-of-crime data or can reliably
+<br/>be excluded, wherein the reliability concerns reasonable doubt. The personal characteristics
+<br/>which can be used as biometrics include face, finger, iris, gait, ear, electroencephalogram
+<br/>(EEG), handwriting, voice and palm. Those which are suited to forensic use concern traces
+<br/>left at a scene-of-crime, such as latent fingerprints, palmprints or earprints, or traces which
+<br/>have been recorded, such as face, gait or ear in surveillance video.
+<br/>Biometrics is generally concerned with the recognition of individuals based on their
+<br/>physical or behavioral attributes. So far, biometric techniques have primarily been used to
+<br/>assure identity (in immigration and commerce etc.). These techniques are largely automatic
+<br/>or semi-automatic approaches steeped in pattern recognition and computer vision. The main
+<br/>steps of a biometric recognition approach are: (1) acquisition of the biometric data; (2)
+<br/>localization and alignment of the data; (3) feature extraction; and (4) matching. Feature
+<br/>This is a Book Title Name of the Author/Editor
+<br/>c(cid:13) XXXX John Wiley & Sons, Ltd
+</td><td>('2804800', 'Banafshe Arbab-Zavar', 'banafshe arbab-zavar')<br/>('40655450', 'Xingjie Wei', 'xingjie wei')<br/>('2365596', 'John D. Bustard', 'john d. bustard')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('1799504', 'Chang-Tsun Li', 'chang-tsun li')</td><td>1{baz10v,jdb,msn}@ecs.soton.ac.uk, 2{x.wei, c-t.li}@warwick.ac.uk
+</td></tr><tr><td>d3d71a110f26872c69cf25df70043f7615edcf92</td><td>2736
+<br/>Learning Compact Feature Descriptor and Adaptive
+<br/>Matching Framework for Face Recognition
+<br/>improvements
+</td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('2856494', 'Dihong Gong', 'dihong gong')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')</td><td></td></tr><tr><td>d35c82588645b94ce3f629a0b98f6a531e4022a3</td><td>Scalable Online Annotation &
+<br/>Object Localisation
+<br/>For Broadcast Media Production
+<br/>Submitted for the Degree of
+<br/>Master of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/>Centre for Vision, Speech and Signal Processing
+<br/>Faculty of Engineering and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>August 2016
+</td><td>('39222045', 'Charles Gray', 'charles gray')<br/>('39222045', 'Charles Gray', 'charles gray')</td><td></td></tr><tr><td>d3b18ba0d9b247bfa2fb95543d172ef888dfff95</td><td>Learning and Using the Arrow of Time
+<br/><b>Harvard University 2University of Southern California</b><br/><b>University of Oxford 4Massachusetts Institute of Technology 5Google Research</b><br/>(a)
+<br/>(c)
+<br/>(b)
+<br/>(d)
+<br/>Figure 1: Seeing these ordered frames from videos, can you tell whether each video is playing forward or backward? (answer
+<br/>below1). Depending on the video, solving the task may require (a) low-level understanding (e.g. physics), (b) high-level
+<br/>reasoning (e.g. semantics), or (c) familiarity with very subtle effects or with (d) camera conventions. In this work, we learn
+<br/>and exploit several types of knowledge to predict the arrow of time automatically with neural network models trained on
+<br/>large-scale video datasets.
+</td><td>('1766333', 'Donglai Wei', 'donglai wei')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')<br/>('1768236', 'William T. Freeman', 'william t. freeman')</td><td>donglai@seas.harvard.edu, limjj@usc.edu, az@robots.ox.ac.uk, billf@mit.edu
+</td></tr><tr><td>d309e414f0d6e56e7ba45736d28ee58ae2bad478</td><td>Efficient Two-Stream Motion and Appearance 3D CNNs for
+<br/>Video Classification
+<br/>Ali Diba
+<br/>ESAT-KU Leuven
+<br/>Ali Pazandeh
+<br/>Sharif UTech
+<br/>Luc Van Gool
+<br/>ESAT-KU Leuven, ETH Zurich
+</td><td></td><td>ali.diba@esat.kuleuven.be
+<br/>pazandeh@ee.sharif.ir
+<br/>luc.vangool@esat.kuleuven.be
+</td></tr><tr><td>d394bd9fbaad1f421df8a49347d4b3fca307db83</td><td>Recognizing Facial Expressions at Low Resolution
+<br/><b>Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK</b></td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2803283', 'Peter W. McOwan', 'peter w. mcowan')</td><td>{cfshan, sgg, pmco}@dcs.qmul.ac.uk
+</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td></td><td></td><td></td></tr><tr><td>d3b550e587379c481392fb07f2cbbe11728cf7a6</td><td>Small Sample Size Face Recognition using Random Quad-Tree based
+<br/>Ensemble Algorithm
+<br/><b>Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan</b></td><td>('7923772', 'Cuicui Zhang', 'cuicui zhang')<br/>('2735528', 'Xuefeng Liang', 'xuefeng liang')<br/>('1731351', 'Takashi Matsuyama', 'takashi matsuyama')</td><td>zhang@vision.kuee.kyoto-u.ac.jp, fxliang, tmg@i.kyoto-u.ac.jp
+</td></tr><tr><td>d307a766cc9c728a24422313d4c3dcfdb0d16dd5</td><td>Deep Keyframe Detection in Human Action Videos
+<br/><b>School of Physics and Optoelectronic Engineering, Xidian University, China</b><br/><b>School of Computer Science and Software Engineering, University of Western Australia</b><br/><b>College of Electrical and Information Engineering, Hunan University, China</b><br/><b>School of Software, Xidian University, China</b></td><td>('46580760', 'Xiang Yan', 'xiang yan')<br/>('1746166', 'Syed Zulqarnain Gilani', 'syed zulqarnain gilani')<br/>('2404621', 'Hanlin Qin', 'hanlin qin')<br/>('3446916', 'Mingtao Feng', 'mingtao feng')<br/>('48570713', 'Liang Zhang', 'liang zhang')<br/>('46332747', 'Ajmal Mian', 'ajmal mian')</td><td>xyan@stu.xidian.edu.cn, hlqin@mail.xidian.edu.cn
+<br/>{zulqarnain.gilani, ajmal.mian}@uwa.edu.au
+<br/>mintfeng@hnu.edu.cn
+<br/>liangzhang@xidian.edu.cn
+</td></tr><tr><td>d31af74425719a3840b496b7932e0887b35e9e0d</td><td>Article
+<br/>A Multimodal Deep Log-Based User Experience (UX)
+<br/>Platform for UX Evaluation
+<br/><b>Ubiquitous Computing Lab, Kyung Hee University</b><br/><b>College of Electronics and Information Engineering, Sejong University</b><br/>Received: 16 March 2018; Accepted: 15 May 2018; Published: 18 May 2018
+</td><td>('33081617', 'Jamil Hussain', 'jamil hussain')<br/>('2794241', 'Wajahat Ali Khan', 'wajahat ali khan')<br/>('27531310', 'Anees Ul Hassan', 'anees ul hassan')<br/>('1765947', 'Muhammad Afzal', 'muhammad afzal')<br/>('1700806', 'Sungyoung Lee', 'sungyoung lee')</td><td>Giheung-gu, Yongin-si, Gyeonggi-do, Seoul 446-701, Korea; jamil@oslab.khu.ac.kr (J.H.);
+<br/>wajahat.alikhan@oslab.khu.ac.kr (W.A.K.); hth@oslab.khu.ac.kr (T.H.); bilalrizvi@oslab.khu.ac.kr (H.S.M.B.);
+<br/>jhb@oslab.khu.ac.kr (J.B.); anees@oslab.khu.ac.kr (A.U.H.)
+<br/>Seoul 05006, Korea; mafzal@sejong.ac.kr
+<br/>* Correspondence: sylee@oslab.khu.ac.kr; Tel.: +82-31-201-2514
+</td></tr><tr><td>d3b0839324d0091e70ce34f44c979b9366547327</td><td>Precise Box Score: Extract More Information from Datasets to Improve the
+<br/>Performance of Face Detection
+<br/>1School of Information and Communication Engineering
+<br/>2Beijing Key Laboratory of Network System and Network Culture
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>('49712251', 'Ce Qi', 'ce qi')<br/>('1684263', 'Fei Su', 'fei su')<br/>('8120542', 'Pingyu Wang', 'pingyu wang')</td><td></td></tr><tr><td>d30050cfd16b29e43ed2024ae74787ac0bbcf2f7</td><td>Facial Expression Classification Using
+<br/>Convolutional Neural Network and Support Vector
+<br/>Machine
+<br/>Graduate Program in Electrical and Computer Engineering
+<br/><b>Federal University of Technology - Paran a</b><br/>Department of Electrical and Computer Engineering
+<br/><b>Opus College of Engineering</b><br/><b>Marquette University</b></td><td>('11857183', 'Cristian Bortolini', 'cristian bortolini')<br/>('2357308', 'Humberto R. Gamba', 'humberto r. gamba')<br/>('2432946', 'Gustavo Benvenutti Borba', 'gustavo benvenutti borba')<br/>('2767912', 'Henry Medeiros', 'henry medeiros')</td><td>Email: vpillajr@mail.com
+</td></tr><tr><td>d3faed04712b4634b47e1de0340070653546deb2</td><td>Neural Best-Buddies: Sparse Cross-Domain Correspondence
+<br/>Fig. 1. Top 5 Neural Best-Buddies for two cross-domain image pairs. Using deep features of a pre-trained neural network, our coarse-to-fine sparse
+<br/>correspondence algorithm first finds high-level, low resolution, semantically matching areas (indicated by the large blue circles), then narrows down the search
+<br/>area to intermediate levels (middle green circles), until precise localization on well-defined edges in the pixel space (colored in corresponding unique colors).
+<br/>Correspondence between images is a fundamental problem in computer
+<br/>vision, with a variety of graphics applications. This paper presents a novel
+<br/>method for sparse cross-domain correspondence. Our method is designed for
+<br/>pairs of images where the main objects of interest may belong to different
+<br/>semantic categories and differ drastically in shape and appearance, yet still
+<br/>contain semantically related or geometrically similar parts. Our approach
+<br/>operates on hierarchies of deep features, extracted from the input images
+<br/>by a pre-trained CNN. Specifically, starting from the coarsest layer in both
+<br/>hierarchies, we search for Neural Best Buddies (NBB): pairs of neurons
+<br/>that are mutual nearest neighbors. The key idea is then to percolate NBBs
+<br/>through the hierarchy, while narrowing down the search regions at each
+<br/>level and retaining only NBBs with significant activations. Furthermore, in
+<br/>order to overcome differences in appearance, each pair of search regions is
+<br/>transformed into a common appearance.
+<br/>We evaluate our method via a user study, in addition to comparisons
+<br/>with alternative correspondence approaches. The usefulness of our method
+<br/><b>is demonstrated using a variety of graphics applications, including cross</b><br/>domain image alignment, creation of hybrid images, automatic image mor-
+<br/>phing, and more.
+<br/>CCS Concepts: • Computing methodologies → Interest point and salient
+<br/>region detections; Matching; Image manipulation;
+<br/><b>University</b><br/>© 2018 Association for Computing Machinery.
+<br/>This is the author’s version of the work. It is posted here for your personal use. Not for
+<br/>redistribution. The definitive Version of Record was published in ACM Transactions on
+<br/>Graphics, https://doi.org/10.1145/3197517.3201332.
+<br/>Additional Key Words and Phrases: cross-domain correspondence, image
+<br/>hybrids, image morphing
+<br/>ACM Reference Format:
+<br/>Cohen-Or. 2018. Neural Best-Buddies: Sparse Cross-Domain Correspon-
+<br/>//doi.org/10.1145/3197517.3201332
+<br/>INTRODUCTION
+<br/>Finding correspondences between a pair of images has been a long
+<br/>standing problem, with a multitude of applications in computer
+<br/>vision and graphics. In particular, sparse sets of corresponding point
+<br/>pairs may be used for tasks such as template matching, image align-
+<br/>ment, and image morphing, to name a few. Over the years, a variety
+<br/>of dense and sparse correspondence methods have been developed,
+<br/>most of which assume that the input images depict the same scene
+<br/>or object (with differences in viewpoint, lighting, object pose, etc.),
+<br/>or a pair of objects from the same class.
+<br/>In this work, we are concerned with sparse cross-domain corre-
+<br/>spondence: a more general and challenging version of the sparse
+<br/>correspondence problem, where the object of interest in the two
+<br/>input images can differ more drastically in their shape and appear-
+<br/>ance, such as objects belonging to different semantic categories
+<br/>(domains). It is, however, assumed that the objects contain at least
+<br/>some semantically related parts or geometrically similar regions, oth-
+<br/>erwise the correspondence task cannot be considered well-defined.
+<br/>Two examples of cross-domain scenarios and the results of our ap-
+<br/>proach are shown in Figure 1. We focus on sparse correspondence,
+<br/>since in many cross-domain image pairs, dense correspondence
+<br/>ACM Transactions on Graphics, Vol. 37, No. 4, Article 69. Publication date: August 2018.
+</td><td>('3451442', 'Kfir Aberman', 'kfir aberman')<br/>('39768043', 'Jing Liao', 'jing liao')<br/>('5807605', 'Mingyi Shi', 'mingyi shi')<br/>('1684384', 'Dani Lischinski', 'dani lischinski')<br/>('1748939', 'Baoquan Chen', 'baoquan chen')<br/>('1701009', 'Daniel Cohen-Or', 'daniel cohen-or')<br/>('3451442', 'Kfir Aberman', 'kfir aberman')<br/>('39768043', 'Jing Liao', 'jing liao')<br/>('5807605', 'Mingyi Shi', 'mingyi shi')<br/>('1684384', 'Dani Lischinski', 'dani lischinski')<br/>('1748939', 'Baoquan Chen', 'baoquan chen')<br/>('1701009', 'Daniel Cohen-Or', 'daniel cohen-or')<br/>('3451442', 'Kfir Aberman', 'kfir aberman')<br/>('39768043', 'Jing Liao', 'jing liao')<br/>('5807605', 'Mingyi Shi', 'mingyi shi')<br/>('1684384', 'Dani Lischinski', 'dani lischinski')<br/>('1748939', 'Baoquan Chen', 'baoquan chen')</td><td></td></tr><tr><td>d3c004125c71942846a9b32ae565c5216c068d1e</td><td>RESEARCH ARTICLE
+<br/>Recognizing Age-Separated Face Images:
+<br/>Humans and Machines
+<br/><b>West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi</b><br/>Delhi, India
+</td><td>('3017294', 'Daksha Yadav', 'daksha yadav')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('2487227', 'Afzel Noore', 'afzel noore')</td><td>*mayank@iiitd.ac.in
+</td></tr><tr><td>d350a9390f0818703f886138da27bf8967fe8f51</td><td>LIGHTING DESIGN FOR PORTRAITS WITH A VIRTUAL LIGHT STAGE
+<br/><b>Institute for Vision and Graphics, University of Siegen, Germany</b></td><td>('1967283', 'Davoud Shahlaei', 'davoud shahlaei')<br/>('2712313', 'Marcel Piotraschke', 'marcel piotraschke')<br/>('2880906', 'Volker Blanz', 'volker blanz')</td><td></td></tr><tr><td>d33fcdaf2c0bd0100ec94b2c437dccdacec66476</td><td>Neurons with Paraboloid Decision Boundaries for
+<br/>Improved Neural Network Classification
+<br/>Performance
+</td><td>('2320550', 'Nikolaos Tsapanos', 'nikolaos tsapanos')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td></td></tr><tr><td>d4a5eaf2e9f2fd3e264940039e2cbbf08880a090</td><td>An Occluded Stacked Hourglass Approach to Facial
+<br/>Landmark Localization and Occlusion Estimation
+<br/><b>University of California San Diego</b></td><td>('2812409', 'Kevan Yuen', 'kevan yuen')</td><td>kcyuen@eng.ucsd.edu, mtrivedi@eng.ucsd.edu
+</td></tr><tr><td>d46b790d22cb59df87f9486da28386b0f99339d3</td><td>Learning Face Deblurring Fast and Wide
+<br/><b>University of Bern</b><br/>Switzerland
+<br/>Amazon Research
+<br/>Germany
+<br/><b>University of Bern</b><br/>Switzerland
+</td><td>('39866194', 'Meiguang Jin', 'meiguang jin')<br/>('36266446', 'Michael Hirsch', 'michael hirsch')<br/>('1739080', 'Paolo Favaro', 'paolo favaro')</td><td>jin@inf.unibe.ch
+<br/>hirsch@amazon.com
+<br/>favaro@inf.unibe.ch
+</td></tr><tr><td>d41c11ebcb06c82b7055e2964914b9af417abfb2</td><td>CDI-Type I: Unsupervised and Weakly-Supervised
+<br/>1 Introduction
+<br/>Discovery of Facial Events
+<br/>The face is one of the most powerful channels of nonverbal communication. Facial expression has been a
+<br/>focus of emotion research for over a hundred years [12]. It is central to several leading theories of emotion
+<br/>[18, 31, 54] and has been the focus of at times heated debate about issues in emotion science [19, 24, 50].
+<br/><b>Facial expression gures prominently in research on almost every aspect of emotion, including psychophys</b><br/>iology [40], neural correlates [20], development [11], perception [4], addiction [26], social processes [30],
+<br/>depression [49] and other emotion disorders [55], to name a few. In general, facial expression provides cues
+<br/>about emotional response, regulates interpersonal behavior, and communicates aspects of psychopathology.
+<br/>Because of its importance to behavioral science and the emerging fields of computational behavior
+<br/>science, perceptual computing, and human-robot interaction, significant efforts have been applied toward
+<br/>developing algorithms that automatically detect facial expression. With few exceptions, previous work on
+<br/>facial expression relies on supervised approaches to learning (i.e. event categories are defined in advance
+<br/>in labeled training data). While supervised learning has important advantages, two critical limitations may
+<br/>be noted. One, because labeling facial expression is highly labor intensive, progress in automated facial
+<br/>expression recognition and analysis is slowed. For the most detailed and comprehensive labeling or coding
+<br/>systems, such as Facial Action Coding System (FACS), three to four months is typically required to train
+<br/>a coder (’coding’ refers to the labeling of video using behavioral descriptors). Once trained, each minute
+<br/>of video may require 1 hour or more to code [9]. No wonder relatively few databases are yet available,
+<br/>especially those of real-world rather than posed behavior [61]. Second, research has been limited to the
+<br/>perceptual categories used by human observers. Those categories were operationalized in large part based
+<br/>on technology available in the past [36]. While a worthy goal of computer vision and machine learning
+<br/>is to efficiently replicate human-based measurement, should that be our only goal? New measurement
+<br/>approaches make possible new scientific discoveries. Two in particular, unsupervised and weakly-supervised
+<br/>learning have the potential to inform new ways of perceiving and modeling human behavior, to impact the
+<br/>infrastructure of science, and contribute to the design of perceptual computing applications.
+<br/>We propose that unsupervised and weakly-supervised approaches to automatic facial expression analysis
+<br/>can increase the efficiency of current measurement approaches in behavioral science, demonstrate conver-
+<br/>gent validity with supervised approaches, and lead to new knowledge in clinical and developmental science.
+<br/>Specifically, we will:
+<br/>• Develop two novel non-parametric algorithms for unsupervised and weakly-supervised time-series
+<br/>analysis. The proposed approaches are general and can be applied to a myriad of problems in behav-
+<br/>ioral science and computer vision (e.g., gesture or activity recognition).
+<br/>• Exploit the potential of these algorithms in four applications:
+<br/>1) New tools to improve the reliability and utility of human FACS coding. Using unsupervised learn-
+<br/>ing, we will develop and validate a computer-assisted approach to FACS coding that doubles the
+<br/>efficiency of human FACS coding.
+<br/>2) At present, taxonomies of facial expression are based on FACS or other observer-based schemes.
+<br/>Consequently, approaches to automatic facial expression recognition are dependent on access to cor-
+<br/>puses of FACS or similarly labeled video. In the proposed work we raise the question of whether
+</td><td></td><td></td></tr><tr><td>d444e010049944c1b3438c9a25ae09b292b17371</td><td>Structure Preserving Video Prediction
+<br/><b>Shanghai Institute for Advanced Communication and Data Science</b><br/>Shanghai Key Laboratory of Digital Media Processing and Transmission
+<br/><b>Shanghai Jiao Tong University, Shanghai 200240, China</b></td><td>('47882735', 'Jingwei Xu', 'jingwei xu')<br/>('47889348', 'Shuo Cheng', 'shuo cheng')</td><td>{xjwxjw,nibingbing,Leezf,xkyang}@sjtu.edu.cn, acccheng94@gmail.com
+</td></tr><tr><td>d46fda4b49bbc219e37ef6191053d4327e66c74b</td><td>Facial Expression Recognition Based on Complexity Perception Classification
+<br/>Algorithm
+<br/><b>School of Computer Science and Engineering, South China University of Technology, Guangzhou, China</b></td><td>('36047279', 'Tianyuan Chang', 'tianyuan chang')<br/>('9725901', 'Guihua Wen', 'guihua wen')<br/>('39946628', 'Yang Hu', 'yang hu')<br/>('35847383', 'JiaJiong Ma', 'jiajiong ma')</td><td>tianyuan_chang@163.com, crghwen@scut.edu.cn
+</td></tr><tr><td>d448d67c6371f9abf533ea0f894ef2f022b12503</td><td>Weakly Supervised Collective Feature Learning from Curated Media
+<br/>1. NTT Communication Science Laboratories, Japan.
+<br/><b>University of Cambridge, United Kingdom</b><br/><b>The University of Tokyo, Japan</b><br/><b>Technical University of Munich, Germany</b><br/>5. Uber AI Labs, USA.
+</td><td>('2374364', 'Yusuke Mukuta', 'yusuke mukuta')<br/>('34454585', 'Akisato Kimura', 'akisato kimura')<br/>('2584289', 'David B. Adrian', 'david b. adrian')<br/>('1983575', 'Zoubin Ghahramani', 'zoubin ghahramani')</td><td>mukuta@mi.t.u-tokyo.ac.jp, akisato@ieee.org, david.adrian@tum.de, zoubin@eng.cam.ac.uk
+</td></tr><tr><td>d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d</td><td>Deep Cost-Sensitive and Order-Preserving Feature Learning for
+<br/>Cross-Population Age Estimation
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/><b>University of Chinese Academy of Sciences</b><br/>3 KingSoft Ltd.
+<br/>4 CAS Center for Excellence in Brain Science and Intelligence Technology
+<br/>5 Vimicro AI Chip Technology Corporation
+<br/><b>Birkbeck University of London</b></td><td>('2168945', 'Kai Li', 'kai li')<br/>('1757173', 'Junliang Xing', 'junliang xing')<br/>('49734675', 'Chi Su', 'chi su')<br/>('40506509', 'Weiming Hu', 'weiming hu')<br/>('2373307', 'Yundong Zhang', 'yundong zhang')</td><td>{kai.li,jlxing,wmhu}@nlpr.ia.ac.cn suchi@kingsoft.com raymond@vimicro.com sjmaybank@dcs.bbk.ac.uk
+</td></tr><tr><td>d444368421f456baf8c3cb089244e017f8d32c41</td><td>CNN for IMU Assisted Odometry Estimation using Velodyne LiDAR
+</td><td>('3414588', 'Martin Velas', 'martin velas')<br/>('2131298', 'Michal Spanel', 'michal spanel')<br/>('1700956', 'Michal Hradis', 'michal hradis')<br/>('1785162', 'Adam Herout', 'adam herout')</td><td></td></tr><tr><td>d4885ca24189b4414031ca048a8b7eb2c9ac646c</td><td>Efficient Facial Representations for Age, Gender
+<br/>and Identity Recognition in Organizing Photo
+<br/>Albums using Multi-output CNN
+<br/><b>Samsung-PDMI Joint AI Center</b><br/>Mathematics
+<br/><b>National Research University Higher School of Economics</b><br/>Nizhny Novgorod, Russia
+</td><td>('35153729', 'Andrey V. Savchenko', 'andrey v. savchenko')</td><td></td></tr><tr><td>d4c7d1a7a03adb2338704d2be7467495f2eb6c7b</td><td></td><td></td><td></td></tr><tr><td>d4001826cc6171c821281e2771af3a36dd01ffc0</td><td>Modélisation de contextes pour l’annotation sémantique
+<br/>de vidéos
+<br/>To cite this version:
+<br/>Ecole Nationale Supérieure des Mines de Paris, 2013. Français. <NNT : 2013ENMP0051>. <pastel-
+<br/>00958135>
+<br/>HAL Id: pastel-00958135
+<br/>https://pastel.archives-ouvertes.fr/pastel-00958135
+<br/>Submitted on 11 Mar 2014
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('2482072', 'Nicolas Ballas', 'nicolas ballas')<br/>('2482072', 'Nicolas Ballas', 'nicolas ballas')</td><td></td></tr><tr><td>d46b4e6871fc9974542215f001e92e3035aa08d9</td><td>A Gabor Quotient Image for Face Recognition
+<br/>under Varying Illumination
+<br/><b>Mahanakorn University of Technology</b><br/>51 Cheum-Sampan Rd., Nong Chok, Bangkok, THAILAND 10530
+</td><td>('1805935', 'Sanun Srisuk', 'sanun srisuk')<br/>('2337544', 'Amnart Petpon', 'amnart petpon')</td><td>sanun@mut.ac.th, amnartpe@dtac.co.th
+</td></tr><tr><td>d458c49a5e34263c95b3393386b5d76ba770e497</td><td>Middle-East Journal of Scientific Research 20 (1): 01-13, 2014
+<br/>ISSN 1990-9233
+<br/>© IDOSI Publications, 2014
+<br/>DOI: 10.5829/idosi.mejsr.2014.20.01.11434
+<br/>A Comparative Analysis of Gender Classification Techniques
+<br/><b>Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan</b></td><td>('46883468', 'Sajid Ali Khan', 'sajid ali khan')<br/>('48767110', 'Maqsood Ahmad', 'maqsood ahmad')<br/>('2521631', 'Naveed Riaz', 'naveed riaz')</td><td></td></tr><tr><td>d454ad60b061c1a1450810a0f335fafbfeceeccc</td><td>Deep Regression Forests for Age Estimation
+<br/>1 Key Laboratory of Specialty Fiber Optics and Optical Access Networks,
+<br/><b>Shanghai Institute for Advanced Communication and Data Science</b><br/><b>School of Communication and Information Engineering, Shanghai University</b><br/><b>Johns Hopkins University</b><br/><b>College of Computer and Control Engineering, Nankai University 4 Hikvision Research</b></td><td>('41187410', 'Wei Shen', 'wei shen')<br/>('9544564', 'Yilu Guo', 'yilu guo')<br/>('47906413', 'Yan Wang', 'yan wang')<br/>('1681247', 'Kai Zhao', 'kai zhao')<br/>('49292319', 'Bo Wang', 'bo wang')</td><td>{shenwei1231,gyl.luan0,wyanny.9,zhaok1206,wangbo.yunze,alan.l.yuille}@gmail.com
+</td></tr><tr><td>d40cd10f0f3e64fd9b0c2728089e10e72bea9616</td><td>Article
+<br/>Enhancing Face Identification Using Local Binary
+<br/>Patterns and K-Nearest Neighbors
+<br/><b>School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone</b><br/>Received: 21 March 2017; Accepted: 29 August 2017; Published: 5 September 2017
+</td><td>('11249315', 'Idelette Laure Kambi Beli', 'idelette laure kambi beli')<br/>('2826297', 'Chunsheng Guo', 'chunsheng guo')</td><td>Hangzhou 310018, China; guo.chsh@gmail.com
+<br/>* Correspondence: kblaure@yahoo.fr
+</td></tr><tr><td>d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d</td><td></td><td></td><td></td></tr><tr><td>d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e</td><td>A Lightened CNN for Deep Face Representation
+<br/>School of Computer and Communication Engineering
+<br/><b>University of Science and Technology Beijing, Beijing, China</b><br/>National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation Chinese Academy of Sciences, Beijing, China</b></td><td>('2225749', 'Xiang Wu', 'xiang wu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>aflredxiangwu@gmail.com
+<br/>{rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>d46e793b945c4f391031656357625e902c4405e8</td><td>Face-off: Automatic Alteration of Facial Features
+<br/>Department of Information Management
+<br/><b>National Taiwan University of Science and Technology</b><br/>No. 43, Sec. 4, Keelung Road
+<br/>Taipei, 106, Taiwan, ROC
+</td><td>('40119465', 'Jia-Kai Chou', 'jia-kai chou')<br/>('2241272', 'Chuan-Kai Yang', 'chuan-kai yang')<br/>('2553196', 'Sing-Dong Gong', 'sing-dong gong')</td><td>A9409004@mail.ntust.edu.tw,ckyang@cs.ntust.edu.tw,hgznrn@uj.com.tw
+</td></tr><tr><td>d44a93027208816b9e871101693b05adab576d89</td><td></td><td></td><td></td></tr><tr><td>d4c2d26523f577e2d72fc80109e2540c887255c8</td><td>Face-space Action Recognition by Face-Object Interactions
+<br/><b>Weizmann Institute of Science</b><br/>Rehovot, 7610001, Israel
+</td><td>('32928116', 'Amir Rosenfeld', 'amir rosenfeld')<br/>('1743045', 'Shimon Ullman', 'shimon ullman')</td><td>{amir.rosenfeld,shimon.ullman}@weizmann.ac.il
+</td></tr><tr><td>d4b88be6ce77164f5eea1ed2b16b985c0670463a</td><td>TECHNICAL REPORT JAN.15.2016
+<br/>A Survey of Different 3D Face Reconstruction
+<br/>Methods
+<br/>Department of Computer Science and Engineering
+</td><td>('2357264', 'Amin Jourabloo', 'amin jourabloo')</td><td>jourablo@msu.edu
+</td></tr><tr><td>d44ca9e7690b88e813021e67b855d871cdb5022f</td><td>QUT Digital Repository:
+<br/>http://eprints.qut.edu.au/
+<br/>Zhang, Ligang and Tjondronegoro, Dian W. (2009) Selecting, optimizing and
+<br/>fusing ‘salient’ Gabor features for facial expression recognition. In: Neural
+<br/>Information Processing (Lecture Notes in Computer Science), 1-5 December
+<br/>2009, Hotel Windsor Suites Bangkok, Bangkok.
+<br/>
+<br/> © Copyright 2009 Springer-Verlag GmbH Berlin Heidelberg
+<br/>
+</td><td></td><td></td></tr><tr><td>baaaf73ec28226d60d923bc639f3c7d507345635</td><td><b>Stanford University</b><br/>CS229 : Machine Learning techniques
+<br/>Project report
+<br/>Emotion Classification on face images
+<br/>Authors:
+<br/>Instructor
+<br/>December 12, 2015
+</td><td>('40503018', 'Mikael Jorda', 'mikael jorda')<br/>('2765850', 'Nina Miolane', 'nina miolane')<br/>('34699434', 'Andrew Ng', 'andrew ng')</td><td></td></tr><tr><td>ba2bbef34f05551291410103e3de9e82fdf9dddd</td><td>A Study on Cross-Population Age Estimation
+<br/><b>LCSEE, West Virginia University</b><br/><b>LCSEE, West Virginia University</b></td><td>('1822413', 'Guodong Guo', 'guodong guo')<br/>('1720735', 'Chao Zhang', 'chao zhang')</td><td>guodong.guo@mai1.wvu.edu
+<br/>cazhang@mix.wvu.edu
+</td></tr><tr><td>bafb8812817db7445fe0e1362410a372578ec1fc</td><td>805
+<br/>Image-Quality-Based Adaptive Face Recognition
+</td><td>('2284264', 'Harin Sellahewa', 'harin sellahewa')</td><td></td></tr><tr><td>baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143</td><td>International Journal of Signal Processing, Image Processing and Pattern Recognition
+<br/>Vol. 8, No. 1 (2015), pp. 9-22
+<br/>http://dx.doi.org/10.14257/ijsip.2015.8.1.02
+<br/>Facial Expression Analysis using Active Shape Model
+<br/><b>School of Engineering, University of Portsmouth, United Kingdom</b></td><td>('2226048', 'Reda Shbib', 'reda shbib')<br/>('32991189', 'Shikun Zhou', 'shikun zhou')</td><td>reda.shbib@port.ac.uk, Shikun.zhou@port.ac.uk
+</td></tr><tr><td>ba99c37a9220e08e1186f21cab11956d3f4fccc2</td><td>A Fast Factorization-based Approach to Robust PCA
+<br/><b>Southern Illinois University, Carbondale, IL 62901 USA</b></td><td>('33048613', 'Chong Peng', 'chong peng')<br/>('1686710', 'Zhao Kang', 'zhao kang')<br/>('39951979', 'Qiang Cheng', 'qiang cheng')</td><td>Email: {pchong,zhao.kang,qcheng}@siu.edu
+</td></tr><tr><td>ba816806adad2030e1939450226c8647105e101c</td><td>MindLAB at the THUMOS Challenge
+<br/>Fabi´an P´aez
+<br/>Fabio A. Gonz´alez
+<br/>MindLAB Research Group
+<br/>MindLAB Research Group
+<br/>MindLAB Research Group
+<br/>Bogot´a, Colombia
+<br/>Bogot´a, Colombia
+<br/>Bogot´a, Colombia
+</td><td>('1939861', 'Jorge A. Vanegas', 'jorge a. vanegas')</td><td>fmpaezri@unal.edu.co
+<br/>javanegasr@unal.edu.co
+<br/>fagonzalezo@unal.edu.co
+</td></tr><tr><td>badcd992266c6813063c153c41b87babc0ba36a3</td><td>Recent Advances in Object Detection in the Age
+<br/>of Deep Convolutional Neural Networks
+<br/>,1,2), Fr´ed´eric Jurie(1)
+<br/>(∗) equal contribution
+<br/>(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+<br/>(2)Safran Electronics and Defense
+<br/>September 11, 2018
+</td><td>('51443250', 'Shivang Agarwal', 'shivang agarwal')<br/>('35527701', 'Jean Ogier Du Terrail', 'jean ogier du terrail')</td><td></td></tr><tr><td>ba788365d70fa6c907b71a01d846532ba3110e31</td><td></td><td></td><td></td></tr><tr><td>badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e</td><td>The Application of Extended Geodesic Distance
+<br/>in Head Poses Estimation
+<br/><b>Institute of Computing Technology</b><br/>Chinese Academy of Sciences, Beijing 100080, China
+<br/>2 Department of Computer Science and Engineering,
+<br/><b>Harbin Institute of Technology, Harbin, China</b><br/>3 Graduate School of the Chinese Academy of Sciences, Beijing 100039, China
+</td><td>('1798982', 'Bingpeng Ma', 'bingpeng ma')<br/>('1684164', 'Fei Yang', 'fei yang')<br/>('1698902', 'Wen Gao', 'wen gao')<br/>('1740430', 'Baochang Zhang', 'baochang zhang')</td><td></td></tr><tr><td>ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906</td><td>ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
+<br/>EXISTING SEPARATE ENGLISH EDITION
+<br/>Uporaba emotivno pogojenega raˇcunalniˇstva v
+<br/>priporoˇcilnih sistemih
+<br/>Marko Tkalˇciˇc, Andrej Koˇsir, Jurij Tasiˇc
+<br/>1Univerza v Ljubljani, Fakulteta za elektrotehniko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+<br/>2Univerza v Ljubljani, Fakulteta za raˇcunalniˇstvo in informatiko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+<br/>Povzetek. V ˇclanku predstavljamo rezultate treh raziskav, vezanih na izboljˇsanje delovanja multimedijskih
+<br/>priporoˇcilnih sistemov s pomoˇcjo metod emotivno pogojenega raˇcunalniˇstva (ang. affective computing).
+<br/>Vsebinski priporoˇcilni sistem smo izboljˇsali s pomoˇcjo metapodatkov, ki opisujejo emotivne odzive uporabnikov.
+<br/>Pri skupinskem priporoˇcilnem sistemu smo dosegli znaˇcilno izboljˇsanje v obmoˇcju hladnega zagona z uvedbo
+<br/>nove mere podobnosti, ki temelji na osebnostnem modelu velikih pet (ang. five factor model). Razvili smo tudi
+<br/>sistem za neinvazivno oznaˇcevanje vsebin z emotivnimi parametri, ki pa ˇse ni zrel za uporabo v priporoˇcilnih
+<br/>sistemih.
+<br/>Kljuˇcne besede: priporoˇcilni sistemi, emotivno pogojeno raˇcunalniˇstvo, strojno uˇcenje, uporabniˇski profil,
+<br/>emocije
+<br/>Uporaba emotivnega raˇcunalniˇstva v priporoˇcilnih
+<br/>sistemih
+<br/>In this paper we present the results of three investigations of
+<br/>our broad research on the usage of affect and personality in
+<br/>recommender systems. We improved the accuracy of content-
+<br/>based recommender system with the inclusion of affective
+<br/>parameters of user and item modeling. We improved the
+<br/>accuracy of a content filtering recommender system under the
+<br/>cold start conditions with the introduction of a personality
+<br/>based user similarity measure. Furthermore we developed a
+<br/>system for implicit tagging of content with affective metadata.
+<br/>1 UVOD
+<br/>Uporabniki (porabniki) multimedijskih (MM) vsebin so
+<br/>v ˇcedalje teˇzjem poloˇzaju, saj v veliki koliˇcini vse-
+<br/>bin teˇzko najdejo zanje primerne. Pomagajo si s pri-
+<br/>poroˇcilnimi sistemi, ki na podlagi osebnih preferenc
+<br/>uporabnikov izberejo manjˇso koliˇcino relevantnih MM
+<br/>vsebin, med katerimi uporabnik laˇze izbira. Noben danes
+<br/>znan priporoˇcilni sistem ne zadoˇsˇca v celoti potrebam
+<br/>uporabnikov, saj je izbor priporoˇcenih vsebin obiˇcajno
+<br/>nezadovoljive kakovosti [10]. Cilj tega ˇclanka je pred-
+<br/>staviti metode emotivno pogojenega raˇcunalniˇstva (ang.
+<br/>affective computing - glej [12]) za izboljˇsanje kakovosti
+<br/>priporoˇcilnih sistemov in utrditi za slovenski prostor
+<br/>novo terminologijo.
+<br/>1.1 Opis problema
+<br/>Za izboljˇsanje kakovosti priporoˇcilnih sistemov sta
+<br/>na voljo dve poti: (i) optimizacija algoritmov ali (ii)
+<br/>uporaba boljˇsih znaˇcilk, ki bolje razloˇzijo neznano
+<br/>Prejet 13. oktober, 2010
+<br/>Odobren 1. februar, 2011
+<br/>varianco [8]. V tem ˇclanku predstavljamo izboljˇsanje
+<br/>priporoˇcilnih sistemov z uporabo novih znaˇcilk, ki te-
+<br/>meljijo na emotivnih odzivih uporabnikov in na njiho-
+<br/>vih osebnostnih lastnostih. Te znaˇcilke razloˇzijo velik
+<br/>del uporabnikovih preferenc, ki se izraˇzajo v obliki
+<br/>ocen posameznih vsebin (npr. Likertova lestvica, binarne
+<br/>ocene itd.). Ocene vsebin se pri priporoˇcilnih sistemih
+<br/>zajemajo eksplicitno (ocena) ali implicitno, pri ˇcemer o
+<br/>oceni sklepamo na podlagi opazovanj (npr. ˇcas gledanja
+<br/>kot indikator vˇseˇcnosti [7].
+<br/>Izboljˇsanja uˇcinkovitosti priporoˇcilnih sistemov smo
+<br/>se lotili na treh podroˇcjih: (i) uporaba emotivnega
+<br/>modeliranja uporabnikov v vsebinskem priporoˇcilnem
+<br/>sistemu, (ii) neinvazivna (implicitna) detekcija emocij za
+<br/>emotivno modeliranje in (iii) uporaba osebnostne mere
+<br/>podobnosti v skupinskem priporoˇcilnem sistemu. Slika 1
+<br/>prikazuje arhitekturo emotivnega priporoˇcilnega sistema
+<br/>in mesta, kjer smo vnesli opisane izboljˇsave.
+<br/>Preostanek ˇclanka je strukturiran tako: v razdelku
+<br/>2 je predstavljen zajem podatkov. V razdelku 3 je
+<br/>predstavljen vsebinski priporoˇcilni sistem z emotivnimi
+<br/>metapodatki. V razdelku 4 je predstavljen skupinski
+<br/>priporoˇcilni sistem, ki uporablja mero podobnosti na
+<br/>podlagi osebnosti, v razdelku 5 pa algoritem za razpo-
+<br/>znavo emocij. Vsak od teh razdelov je sestavljen iz opisa
+<br/>eksperimenta in predstavitve rezultatov. V razdelku 6 so
+<br/>predstavljeni sklepi.
+<br/>1.2 Sorodno delo
+<br/>Najbolj groba delitev priporoˇcilnih sistemov je na vse-
+<br/>binske, skupinske ter hibridne sisteme [1]. Z izjemo vse-
+<br/>binskih priporoˇcilnih sistemov, ki sta ga razvila Arapakis
+<br/>[2] in Tkalˇciˇc [14], sorodnega dela na podroˇcju emotivno
+<br/>pogojenih priporoˇcilnih sistemov takorekoˇc ni. Panti´c in
+</td><td></td><td>E-poˇsta: avtor@naslov.com
+</td></tr><tr><td>bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea</td><td>Weakly Supervised Learning of Heterogeneous
+<br/>Concepts in Videos
+<br/>Larry Davis1
+<br/><b>University of Maryland, College Park; 2Arizona State University; 3Xerox Research Centre</b><br/>India
+</td><td>('36861219', 'Sohil Shah', 'sohil shah')<br/>('40222634', 'Kuldeep Kulkarni', 'kuldeep kulkarni')<br/>('2221075', 'Arijit Biswas', 'arijit biswas')<br/>('2757149', 'Ankit Gandhi', 'ankit gandhi')<br/>('2116262', 'Om Deshmukh', 'om deshmukh')</td><td></td></tr><tr><td>ba29ba8ec180690fca702ad5d516c3e43a7f0bb8</td><td></td><td></td><td></td></tr><tr><td>ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb</td><td>Natural and Effective Obfuscation by Head Inpainting
+<br/><b>Max Planck Institute for Informatics, Saarland Informatics Campus</b><br/>2KU-Leuven/PSI, Toyota Motor Europe (TRACE)
+<br/>3ETH Zurich
+</td><td>('32222907', 'Qianru Sun', 'qianru sun')<br/>('1681236', 'Luc Van Gool', 'luc van gool')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{qsun, joon, schiele, mfritz}@mpi-inf.mpg.de
+<br/>{liqian.ma, luc.vangool}@esat.kuleuven.be
+<br/>vangool@vision.ee.ethz.ch
+</td></tr><tr><td>bab88235a30e179a6804f506004468aa8c28ce4f</td><td></td><td></td><td></td></tr><tr><td>badd371a49d2c4126df95120902a34f4bee01b00</td><td>GONDA, WEI, PARAG, PFISTER: PARALLEL SEPARABLE 3D CONVOLUTION
+<br/>Parallel Separable 3D Convolution for Video
+<br/>and Volumetric Data Understanding
+<br/>Harvard John A. Paulson School of
+<br/>Engineering and Applied Sciences
+<br/>Camabridge MA, USA
+<br/>Toufiq Parag
+<br/>Hanspeter Pfister
+</td><td>('49147616', 'Felix Gonda', 'felix gonda')<br/>('1766333', 'Donglai Wei', 'donglai wei')</td><td>fgonda@g.harvard.edu
+<br/>donglai@seas.harvard.edu
+<br/>paragt@seas.harvard.edu
+<br/>pfister@g.harvard.edu
+</td></tr><tr><td>a065080353d18809b2597246bb0b48316234c29a</td><td>FHEDN: A based on context modeling Feature Hierarchy
+<br/>Encoder-Decoder Network for face detection
+<br/><b>College of Computer Science, Chongqing University, Chongqing, China</b><br/><b>College of Medical Informatics, Chongqing Medical University, Chongqing, China</b><br/><b>Sichuan Fine Arts Institute, Chongqing, China</b></td><td>('6030130', 'Zexun Zhou', 'zexun zhou')<br/>('7686690', 'Zhongshi He', 'zhongshi he')<br/>('2685579', 'Ziyu Chen', 'ziyu chen')<br/>('33458882', 'Yuanyuan Jia', 'yuanyuan jia')<br/>('1768826', 'Haiyan Wang', 'haiyan wang')<br/>('8784203', 'Jinglong Du', 'jinglong du')<br/>('2961485', 'Dingding Chen', 'dingding chen')</td><td>{zexunzhou,zshe,chenziyu,yyjia,jldu,dingding}@cqu.edu.cn;{why}@scfai.edu.cn
+</td></tr><tr><td>a0f94e9400938cbd05c4b60b06d9ed58c3458303</td><td>1118
+<br/>Value-Directed Human Behavior Analysis
+<br/>from Video Using Partially Observable
+<br/>Markov Decision Processes
+</td><td>('1773895', 'Jesse Hoey', 'jesse hoey')<br/>('1710980', 'James J. Little', 'james j. little')</td><td></td></tr><tr><td>a022eff5470c3446aca683eae9c18319fd2406d5</td><td>2017-ENST-0071
+<br/>EDITE - ED 130
+<br/>Doctorat ParisTech
+<br/>T H È S E
+<br/>pour obtenir le grade de docteur délivré par
+<br/>TÉLÉCOM ParisTech
+<br/>Spécialité « SIGNAL et IMAGES »
+<br/>présentée et soutenue publiquement par
+<br/>le 15 décembre 2017
+<br/>Apprentissage Profond pour la Description Sémantique des Traits
+<br/>Visuels Humains
+<br/>Directeur de thèse : Jean-Luc DUGELAY
+<br/>Co-encadrement de la thèse : Moez BACCOUCHE
+<br/>Jury
+<br/>Mme Bernadette DORIZZI, PRU, Télécom SudParis
+<br/>Mme Jenny BENOIS-PINEAU, PRU, Université de Bordeaux
+<br/>M. Christian WOLF, MC/HDR, INSA de Lyon
+<br/>M. Patrick PEREZ, Chercheur/HDR, Technicolor Rennes
+<br/>M. Moez BACCOUCHE, Chercheur/Docteur, Orange Labs Rennes
+<br/>M. Jean-Luc DUGELAY, PRU, Eurecom Sophia Antipolis
+<br/>M. Sid-Ahmed BERRANI, Directeur de l’Innovation/HDR, Algérie Télécom
+<br/>Présidente
+<br/>Rapporteur
+<br/>Rapporteur
+<br/>Examinateur
+<br/>Encadrant
+<br/>Directeur de Thèse
+<br/>Invité
+<br/>TÉLÉCOM ParisTech
+<br/>école de l’Institut Télécom - membre de ParisTech
+<br/>N°: 2009 ENAM XXXX T H È S E </td><td>('3116433', 'Grigory Antipov', 'grigory antipov')</td><td></td></tr><tr><td>a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 21, 819-828 (2005)
+<br/>Short Paper_________________________________________________
+<br/>A New Classification Approach using
+<br/>Discriminant Functions
+<br/>Department of Computer Engineering
+<br/>+Department of Electrical and Electronic Engineering
+<br/><b>Sakarya University</b><br/>54187 Sakarya, Turkey
+<br/>In this study, an approach involving new types of cost functions is given for the
+<br/>construction of discriminant functions. Centers of mass, not specified a priori, around
+<br/>feature vectors are clustered using cost function. Thus, the algorithms yield both the
+<br/>centers of mass and the distinct classes.
+<br/>Keywords: classification, feature vectors, linear discriminant function, Fisher’s LDF,
+<br/>dimension reduction
+<br/>1. INTRODUCTION
+<br/>There are many algorithms for, and many applications of classification and dis-
+<br/>crimination (grouping of a set of objects into subsets of similar objects where the objects
+<br/>in different subsets are different) in several diverse fields [2-15, 23, 24], ranging from
+<br/>engineering to medicine, to econometrics, etc. Some examples are automatic target rec-
+<br/>ognition (ATR), fault and maintenance-time recognition, optical character recognition
+<br/>(OCR), speech and speaker recognition, etc.
+<br/>In this study, a new approach and algorithm to the classification problem are de-
+<br/>scribed with the goal of finding a single (possibly vector-valued) linear discriminant
+<br/>function. This approach is in terms of some optimal centers of mass for the transformed
+<br/>feature vectors of each class, the transforms being performed via the discriminant func-
+<br/>tions. As such, it follows the same philosophy which is behind the approaches such as
+<br/>principal component analysis (PCA), Fisher’s linear discriminant functions (LDF), and
+<br/>minimum total covariance (MTC) [1-16, 22, 25-28], providing alternatives which extend
+<br/>this work.
+<br/>Linear discriminant functions (LDF) are often used in pattern recognition to classify
+<br/>a given object or pattern, based on its features, into one of several given classes. For sim-
+<br/>plicity, consider the discrimination problem for two classes. Let x = [x1, x2, …, xm] be the
+<br/>Received April 28, 2003; revised March 1 and March 29, 2004; accepted May 3, 2004.
+<br/>Communicated by H. Y. Mark Liao.
+<br/>819
+</td><td>('7605725', 'Zafer Demir', 'zafer demir')<br/>('2279264', 'Erol Emre', 'erol emre')</td><td>E-mail: {askind, zdemir, eemre}@sakarya.edu.tr
+</td></tr><tr><td>a0c37f07710184597befaa7e6cf2f0893ff440e9</td><td></td><td></td><td></td></tr><tr><td>a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f</td><td>Fusing with Context: a Bayesian Approach to Combining Descriptive Attributes
+<br/><b>University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA</b><br/><b>Columbia University, New York, NY, USA</b><br/><b>University of North Carolina Wilmington, Wilmington, NC, USA</b></td><td>('2613438', 'Walter J. Scheirer', 'walter j. scheirer')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td></td></tr><tr><td>a0021e3bbf942a88e13b67d83db7cf52e013abfd</td><td>Human concerned object detecting in video
+<br/><b>School of Computer Science and Technology, Shandong Institute of Business and Technology</b><br/>Yantai, Shandong, 264005, China
+<br/><b>School of Computer Science and Technology, Shandong University</b><br/>Jinan, Shandong, 250101, China
+<br/>Received 11 December 2014
+</td><td>('2525711', 'Jinjiang LI', 'jinjiang li')<br/>('1733582', 'Jie GUO', 'jie guo')<br/>('9242942', 'Hui FAN', 'hui fan')</td><td>E-mail: lijinjiang@gmail.com
+</td></tr><tr><td>a0d6390dd28d802152f207940c7716fe5fae8760</td><td>Bayesian Face Revisited: A Joint Formulation
+<br/><b>University of Science and Technology of China</b><br/><b>The Chinese University of Hong Kong</b><br/>3 Microsoft Research Asia, Beijing, China
+</td><td>('39447786', 'Dong Chen', 'dong chen')<br/>('2032273', 'Xudong Cao', 'xudong cao')<br/>('34508239', 'Liwei Wang', 'liwei wang')<br/>('1716835', 'Fang Wen', 'fang wen')<br/>('40055995', 'Jian Sun', 'jian sun')</td><td>chendong@mail.ustc.edu.cn
+<br/>lwwang@cse.cuhk.edu.hk
+<br/>{xudongca,fangwen,jiansun}@microsoft.com
+</td></tr><tr><td>a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670</td><td></td><td></td><td></td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>Learning Deep Representation for Face
+<br/>Alignment with Auxiliary Attributes
+</td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>a0dfb8aae58bd757b801e2dcb717a094013bc178</td><td>Reconocimiento de expresiones faciales con base
+<br/>en la din´amica de puntos de referencia faciales
+<br/>Instituto Nacional de Astrof´ısica ´Optica y Electr´onica,
+<br/>Divisi´on de Ciencias Computacionales, Tonantzintla, Puebla,
+<br/>M´exico
+<br/>Resumen. Las expresiones faciales permiten a las personas comunicar
+<br/>emociones, y es pr´acticamente lo primero que observamos al interactuar
+<br/>con alguien. En el ´area de computaci´on, el reconocimiento de expresiones
+<br/>faciales es importante debido a que su an´alisis tiene aplicaci´on directa en
+<br/>´areas como psicolog´ıa, medicina, educaci´on, entre otras. En este articulo
+<br/>se presenta el proceso de dise˜no de un sistema para el reconocimiento de
+<br/>expresiones faciales utilizando la din´amica de puntos de referencia ubi-
+<br/>cados en el rostro, su implementaci´on, experimentos realizados y algunos
+<br/>de los resultados obtenidos hasta el momento.
+<br/>Palabras clave: Expresiones faciales, clasificaci´on, m´aquinas de soporte
+<br/>vectorial,modelos activos de apariencia.
+<br/>Facial Expressions Recognition Based on Facial
+<br/>Landmarks Dynamics
+</td><td>('40452660', 'E. Morales-Vargas', 'e. morales-vargas')<br/>('2737777', 'Hayde Peregrina-Barreto', 'hayde peregrina-barreto')</td><td>emoralesv@inaoep.mx, kargaxxi@inaoep.mx, hperegrina@inaoep.mx
+</td></tr><tr><td>a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b</td><td>Cascaded Regressor based 3D Face Reconstruction
+<br/>from a Single Arbitrary View Image
+<br/><b>College of Computer Science, Sichuan University, Chengdu, China</b></td><td>('50207647', 'Feng Liu', 'feng liu')<br/>('39422721', 'Dan Zeng', 'dan zeng')<br/>('1723081', 'Jing Li', 'jing li')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')</td><td>qjzhao@scu.edu.cn
+</td></tr><tr><td>a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60</td><td>Simultaneous Learning and Alignment:
+<br/>Multi-Instance and Multi-Pose Learning?
+<br/>1 Comp. Science & Eng.
+<br/>Univ. of CA, San Diego
+<br/>2 Electrical Engineering
+<br/>California Inst. of Tech.
+<br/>3 Lab of Neuro Imaging
+<br/>Univ. of CA, Los Angeles
+</td><td>('2490700', 'Boris Babenko', 'boris babenko')<br/>('1736745', 'Zhuowen Tu', 'zhuowen tu')<br/>('1769406', 'Serge Belongie', 'serge belongie')</td><td>{bbabenko,sjb}@cs.ucsd.edu
+<br/>pdollar@caltech.edu
+<br/>zhuowen.tu@loni.ucla.edu
+</td></tr><tr><td>a06b6d30e2b31dc600f622ab15afe5e2929581a7</td><td>Robust Joint and Individual Variance Explained
+<br/><b>Imperial College London, UK</b><br/>2Onfido, UK
+<br/><b>Middlesex University London, UK</b></td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('28943361', 'Alina Leidinger', 'alina leidinger')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>christos.sagonas@onfido.com, {i.panagakis, s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>a0b1990dd2b4cd87e4fd60912cc1552c34792770</td><td>Deep Constrained Local Models for Facial Landmark Detection
+<br/><b>Carnegie Mellon University</b><br/>Tadas Baltruaitis
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+</td><td>('1783029', 'Amir Zadeh', 'amir zadeh')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>abagherz@cs.cmu.edu
+<br/>tbaltrus@cs.cmu.edu
+<br/>morency@cs.cmu.edu
+</td></tr><tr><td>a090d61bfb2c3f380c01c0774ea17929998e0c96</td><td>On the Dimensionality of Video Bricks under Varying Illumination
+<br/>Beijing Lab of Intelligent Information Technology, School of Computer Science,
+<br/><b>Beijing Institute of Technology, Beijing 100081, PR China</b></td><td>('2852150', 'Youdong Zhao', 'youdong zhao')<br/>('38150687', 'Xi Song', 'xi song')<br/>('7415267', 'Yunde Jia', 'yunde jia')</td><td>{zyd458, songxi, jiayunde}@bit.edu.cn
+</td></tr><tr><td>a0e7f8771c7d83e502d52c276748a33bae3d5f81</td><td>Ensemble Nystr¨om
+<br/>A common problem in many areas of large-scale machine learning involves ma-
+<br/>nipulation of a large matrix. This matrix may be a kernel matrix arising in Support
+<br/>Vector Machines [9, 15], Kernel Principal Component Analysis [47] or manifold
+<br/>learning [43,51]. Large matrices also naturally arise in other applications, e.g., clus-
+<br/>tering, collaborative filtering, matrix completion, and robust PCA. For these large-
+<br/>scale problems, the number of matrix entries can easily be in the order of billions
+<br/>or more, making them hard to process or even store. An attractive solution to this
+<br/>problem involves the Nystr¨om method, in which one samples a small number of
+<br/>columns from the original matrix and generates its low-rank approximation using
+<br/>the sampled columns [53]. The accuracy of the Nystr¨om method depends on the
+<br/>number columns sampled from the original matrix. Larger the number of samples,
+<br/>higher the accuracy but slower the method.
+<br/>In the Nystr¨om method, one needs to perform SVD on a l × l matrix where l is
+<br/>the number of columns sampled from the original matrix. This SVD operation is
+<br/>typically carried out on a single machine. Thus, the maximum value of l used for an
+<br/>application is limited by the capacity of the machine. That is why in practice, one
+<br/>restricts l to be less than 20K or 30K, even when the size of matrix is in millions.
+<br/>This restricts the accuracy of the Nystr¨om method in very large-scale settings.
+<br/>This chapter describes a family of algorithms based on mixtures of Nystr¨om
+<br/>approximations called, Ensemble Nystr¨om algorithms, which yields more accurate
+<br/>low-rank approximations than the standard Nystr¨om method. The core idea of En-
+<br/>semble Nystr¨om is to sample many subsets of columns from the original matrix,
+<br/>each containing a relatively small number of columns. Then, Nystr¨om method is
+<br/><b>Division of Computer Science, University of California, Berkeley, CA, USA e-mail</b></td><td>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')<br/>('1709415', 'Mehryar Mohri', 'mehryar mohri')<br/>('8395559', 'Ameet Talwalkar', 'ameet talwalkar')<br/>('2794322', 'Sanjiv Kumar', 'sanjiv kumar')<br/>('1709415', 'Mehryar Mohri', 'mehryar mohri')<br/>('8395559', 'Ameet Talwalkar', 'ameet talwalkar')</td><td>Google Research, New York, NY, USA e-mail: sanjivk@google.com
+<br/>Courant Institute, New York, NY, USA e-mail: mohri@cs.nyu.edu
+<br/>ameet@eecs.berkeley.edu
+</td></tr><tr><td>a0061dae94d916f60a5a5373088f665a1b54f673</td><td>Research Article
+<br/>Lensless computational imaging through deep
+<br/>learning
+<br/><b>Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA</b><br/><b>Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA</b><br/>3Singapore-MIT Alliance for Research and Technology (SMART) Centre, One Create Way, Singapore 117543, Singapore
+<br/>†These authors contributed equally
+<br/>Compiled March 1, 2017
+<br/>Deep learning has been proven to yield reliably generalizable answers to numerous classification and
+<br/>decision tasks. Here, we demonstrate for the first time, to our knowledge, that deep neural networks
+<br/>(DNNs) can be trained to solve inverse problems in computational imaging. We experimentally demon-
+<br/>strate a lens-less imaging system where a DNN was trained to recover a phase object given a raw
+<br/>intensity image recorded some distance away. ©
+<br/>OCIS codes:
+<br/>(110.1758) Computational imaging.
+<br/>(100.3190) Inverse problems; (100.4996) Pattern recognition, neural networks; (100.5070) Phase retrieval;
+<br/>http://dx.doi.org/10.1364/optica.XX.XXXXXX
+<br/>1. INTRODUCTION
+<br/>Neural network training can be thought of as generic function approxi-
+<br/>mation, as follows: given a training set (i.e., examples of matched input
+<br/>and output data obtained from a hitherto-unknown model), generate
+<br/>the computational architecture that most accurately maps all inputs in
+<br/>In this paper, we propose that deep neural networks may “learn” to
+<br/>approximate solutions to inverse problems in computational imaging.
+<br/>A general computational imaging system consists of a physical part
+<br/>where light propagates through one or more objects of interest as well
+<br/>as optical elements such as lenses, prisms, etc. finally producing a
+<br/>raw intensity image on a digital camera. The raw intensity image is
+<br/>then computationally processed to yield object attributes, e.g. a spatial
+<br/>map of light attenuation and/or phase delay through the object—what
+<br/>we call traditionally “intensity image” and “quantitative phase image,”
+<br/>respectively. The computational part of the system is then said to solve
+<br/>the inverse problem.
+<br/>The study of inverse problems is traced back at least a century ago
+<br/>to Tikhonov [1] and Wiener [2]. A good introductory book with rigor-
+<br/>ous but not overwhelming discussion of the underlying mathematical
+<br/>concepts, especially regularization, is [3]. During the past decade, the
+<br/>field experienced a renaissance due to the almost simultaneous matura-
+<br/>tion of two related mathematics disciplines: convex optimization and
+<br/>harmonic analysis, especially sparse representations. A light technical
+<br/>introduction to these fascinating developments is in [4].
+<br/>Neural networks have their own history of legendary ups-and-downs
+<br/>[5] culminating with an even more recent renaissance. This was driven
+<br/>by Hinton’s insight that multi-layer architectures with numerous layers,
+<br/>dubbed as “deep networks,” DNNs, can generalize better than had been
+<br/>previously thought after some simple but ingenious changes in the
+<br/>nonlinearity and training algorithms [6]. Even more recently developed
+<br/>architectures [7–9] have enabled neural networks to “learn deeper;”
+<br/>and modern DNNs have shown spectacular success at solving “hard”
+<br/>computational problems, such as: playing complex games like Atari
+<br/>[17] and Go [18], object detection [19], and image restoration (e.g.,
+<br/>colorization [20], deblurring [21–23], in-painting [24]).
+<br/>The idea of using neural networks to clean up images isn’t exactly
+<br/>new: for example, Hopfield’s associative memory network [25] was
+<br/>capable of retrieving entire faces from partially obscured inputs, and
+<br/>was implemented in an all-optical architecture [26] when computers
+<br/>weren’t nearly as powerful as they are now. Recently, Horisaki et al.
+<br/>[27] used support-vector machines, a form of bi-layer neural network
+<br/>with nonlinear discriminant functions, also to recover face images
+<br/>when the obscuration is caused by scattering media.
+<br/>The hypothesis that we set out to test in this paper is whether
+<br/>a neural network can be trained by being presented pairs of known
+<br/>objects and their raw intensity image representations on the digital
+<br/>camera of a computational imaging system; and then be used to produce
+<br/>object estimates given raw intensity images from hitherto unknown
+<br/>test objects, thus solving the inverse problem. This is a rather general
+<br/>question and may take several flavors, depending on the nature of the
+<br/>object, the physical design of the imaging system, etc. We chose to
+</td><td>('3365480', 'Ayan Sinha', 'ayan sinha')<br/>('2371140', 'Justin Lee', 'justin lee')<br/>('1804684', 'Shuai Li', 'shuai li')<br/>('2455899', 'George Barbastathis', 'george barbastathis')</td><td></td></tr><tr><td>a0848d7b1bb43f4b4f1b4016e58c830f40944817</td><td>Face matching for post-disaster family reunification
+<br/><b>Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health</b><br/>8600 Rockville Pike, Bethesda, MD USA
+</td><td>('1744255', 'Eugene Borovikov', 'eugene borovikov')<br/>('2075836', 'Girish Lingappa', 'girish lingappa')</td><td>FaceMatch@NIH.gov
+</td></tr><tr><td>a000149e83b09d17e18ed9184155be140ae1266e</td><td>Chapter 9
+<br/>Action Recognition in Realistic
+<br/>Sports Videos
+</td><td>('1799979', 'Khurram Soomro', 'khurram soomro')<br/>('40029556', 'Amir R. Zamir', 'amir r. zamir')</td><td></td></tr><tr><td>a01f9461bc8cf8fe40c26d223ab1abea5d8e2812</td><td>Facial Age Estimation Through the Fusion of Texture
+<br/>and local appearance Descriptors
+<br/><b>DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy</b><br/>2 Herta Security, Pau Claris 165 4-B, 08037 Barcelona, Spain
+</td><td>('1733945', 'Andrea Prati', 'andrea prati')</td><td>huertacasado@iuav.it, aprati@iuav.it
+<br/>carles.fernandez@hertasecurity.com
+</td></tr><tr><td>a702fc36f0644a958c08de169b763b9927c175eb</td><td>FACIAL EXPRESSION RECOGNITION USING HOUGH FOREST
+<br/><b>National Tsing-Hua University, Hsin-Chu, Taiwan</b><br/><b>Asia University, Taichung, Taiwan</b></td><td>('2867389', 'Chi-Ting Hsu', 'chi-ting hsu')<br/>('2790846', 'Shih-Chung Hsu', 'shih-chung hsu')<br/>('1793389', 'Chung-Lin Huang', 'chung-lin huang')</td><td>Email: s9961601@m99.nthu.edu.tw, d9761817@oz.nthu.edu.tw, clhuang@ee.nthu.edu.tw
+</td></tr><tr><td>a7267bc781a4e3e79213bb9c4925dd551ea1f5c4</td><td>Proceedings of eNTERFACE’15
+<br/>The 11th Summer Workshop
+<br/>on Multimodal Interfaces
+<br/>August 10th - September 4th, 2015
+<br/><b>Numediart Institute, University of Mons</b><br/>Mons, Belgium
+<br/>
+</td><td></td><td></td></tr><tr><td>a784a0d1cea26f18626682ab108ce2c9221d1e53</td><td>Anchored Regression Networks applied to Age Estimation and Super Resolution
+<br/>D-ITET, ETH Zurich
+<br/>Switzerland
+<br/>D-ITET, ETH Zurich
+<br/>Merantix GmbH
+<br/>D-ITET, ETH Zurich
+<br/>ESAT, KU Leuven
+</td><td>('2794259', 'Eirikur Agustsson', 'eirikur agustsson')<br/>('1732855', 'Radu Timofte', 'radu timofte')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>aeirikur@vision.ee.ethz.ch
+<br/>timofter@vision.ee.ethz.ch
+<br/>vangool@vision.ee.ethz.ch
+</td></tr><tr><td>a77e9f0bd205a7733431a6d1028f09f57f9f73b0</td><td>Multimodal feature fusion for CNN-based gait recognition: an
+<br/>empirical comparison
+<br/>F.M. Castroa,, M.J. Mar´ın-Jim´enezb, N. Guila, N. P´erez de la Blancac
+<br/><b>University of Malaga, Spain</b><br/><b>University of Cordoba, Spain</b><br/><b>University of Granada, Spain</b></td><td></td><td></td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td><td></td><td></td></tr><tr><td>a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51</td><td>Postgraduate Annual Research Seminar 2007 (3-4 July 2007)
+<br/>A Summary of literature review : Face Recognition
+<br/>Faculty of Computer Science & Information System,
+<br/><b>University Technology of Malaysia, 81310 Skudai, Johor, Malaysia</b></td><td></td><td>kittmee@yahoo.com; dzulkifli@fsksm.utm.my
+</td></tr><tr><td>a70e36daf934092f40a338d61e0fe27be633f577</td><td>Enhanced Facial Feature Tracking of Spontaneous and Continuous Expressions
+<br/>A.Goneid and R. El Kaliouby
+<br/><b>The American University in Cairo, Egypt</b></td><td></td><td>goneid@aucegypt.edu, ranak@aucegypt.edu
+</td></tr><tr><td>a7664247a37a89c74d0e1a1606a99119cffc41d4</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3287
+</td><td></td><td></td></tr><tr><td>a7191958e806fce2505a057196ccb01ea763b6ea</td><td>Convolutional Neural Network based
+<br/>Age Estimation from Facial Image and
+<br/>Depth Prediction from Single Image
+<br/>B. Eng. (Honours)
+<br/><b>Australian National University</b><br/>January 2016
+<br/>A thesis submitted for the degree of Master of Philosophy
+<br/><b>at The Australian National University</b><br/>Computer Vision Group
+<br/>Research School of Engineering
+<br/><b>College of Engineering and Computer Science</b><br/><b>The Australian National University</b></td><td>('2124180', 'Jiayan Qiu', 'jiayan qiu')</td><td></td></tr><tr><td>a7e1327bd76945a315f2869bfae1ce55bb94d165</td><td>Kernel Fisher Discriminant Analysis with Locality Preserving for Feature Extraction and
+<br/>Recognition
+<br/><b>School of Information Engineering, Guangdong Medical College, Song Shan Hu</b><br/>Dongguan, Guangdong, China
+<br/><b>Shaoguan University, Da Tang Lu</b><br/>Shaoguan, Guangdong, China
+<br/><b>School of Information Engineering, Guangdong Medical College, Song Shan Hu</b><br/>Dongguan, Guangdong, China
+</td><td>('2588058', 'Di Zhang', 'di zhang')<br/>('2007270', 'Jiazhong He', 'jiazhong he')<br/>('20374749', 'Yun Zhao', 'yun zhao')</td><td>E-mail: changnuode@163.com
+<br/>E-mail: hejiazhong@126.com
+<br/>E-mail: zyun@gdmc.edu.cn
+</td></tr><tr><td>a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9</td><td>11th International Symposium of Robotics Research (ISRR2003), pp.192-201, 2003
+<br/>Face Recognition Using Multi-viewpoint Patterns for
+<br/>Robot Vision
+<br/>Corporate Research and Development Center, TOSHIBA Corporation
+<br/>1, KomukaiToshiba-cho, Saiwai-ku, Kawasaki 212-8582 Japan
+</td><td>('1770128', 'Kazuhiro Fukui', 'kazuhiro fukui')<br/>('1708862', 'Osamu Yamaguchi', 'osamu yamaguchi')</td><td>kazuhiro.fukui@toshiba.co.jp / osamu1.yamaguchi@toshiba.co.jp
+</td></tr><tr><td>a758b744a6d6962f1ddce6f0d04292a0b5cf8e07</td><td>
+<br/>ISSN XXXX XXXX © 2017 IJESC
+<br/>
+<br/>
+<br/>Research Article Volume 7 Issue No.4
+<br/>Study on Human Face Recognition under Invariant Pose, Illumination
+<br/>and Expression using LBP, LoG and SVM
+<br/>Amrutha
+<br/>Depart ment of Co mputer Science & Engineering
+<br/><b>Mangalore Institute of Technology and Engineering, Moodabidri, Mangalore, India</b><br/>INTRODUCTION
+<br/>RELATED WORK
+<br/>Abstrac t:
+<br/>Face recognition system uses human face for the identification of the user. Face recognition is a difficu lt task there is no unique
+<br/>method that provide accurate an accurate and effic ient solution in all the situations like the face image with differen t pose ,
+<br/>illu mination and exp ression. Local Binary Pattern (LBP) and Laplac ian of Gaussian (Lo G) operators. Support Vector Machine
+<br/>classifier is used to recognize the human face. The Lo G algorith m is used to preprocess the image to detect the edges of the face
+<br/>image to get the image information. The LBP operator divides the face image into several blocks to generate the features informat ion
+<br/>on pixe l level by creating LBP labels for all the blocks of image is obtained by concatenating all the individual local histo grams.
+<br/>Support Vector Machine classifier (SVM ) is used to classify t he image. The a lgorith m performances is verified under the constraints
+<br/>like illu mination, e xp ression and pose variation
+<br/>Ke ywor ds: Face Recognition, Local Binary Pattern, Laplac ian of Gaussian, histogram, illu mination, pose angle, exp ression
+<br/>variations, SVM .
+<br/>1.
+<br/>The Technology used for recognizing the face under security
+<br/>systems works on the bio metric principles. There are many
+<br/>human characteristics which can be used
+<br/>for biometric
+<br/>identification such that palm, finger print, face, and iris etc. one
+<br/>of these biometrics methods face recognition is advantageous
+<br/>because of it can be detected fro m much more d istance without
+<br/>need of scanning devices this provides easy observation to
+<br/>identify indiv iduals in group of persons. Most of the military
+<br/>application security systems, attendance systems, authentication,
+<br/>criminal identity etc. are performed using this technology. The
+<br/>computer uses this recognition technology to identify or to
+<br/>compare the person with same person or with some other person.
+<br/>The human faces are very important factor to identify who the
+<br/>person is and how the people will ma ke out his/her face. The
+<br/>images of faces are taken fro m the distance without having
+<br/>contact with a person, capturing the face images. Verification
+<br/>and Identification s teps are used for comparison. The first
+<br/>method is verification wh ich co mpares the face image with
+<br/>his/her image wh ich is a lready stored in database. It is one to one
+<br/>matching because it tries to match individual against same
+<br/>person's image stored in database. The second method is
+<br/>called one to n matching because it matches individual person's
+<br/>face image with every person's face images. If the face images
+<br/>are effected by lightning condition, different posing angle or
+<br/>diffe rent expression then it is difficult to identify the human
+<br/>face. Many algorithms are used to extract features of face and to
+<br/>match the face images such as Principal Co mponent Analysis
+<br/>(PCA) and Independent Component Analysis (ICA) [1], Elastic
+<br/>Bunch Graph Matching (EBGM) [2], K -nearest neighbor
+<br/>algorith m classifier and Linear Discriminant Analysis (LDA)
+<br/>[3]. Th is paper is organized as fo llo ws: Section II revie ws the
+<br/>related works done on data security in cloud. Section III
+<br/>describes the proposed system and assumptions. Section IV
+<br/>provides the conclusion of the paper
+<br/>2.
+<br/>the most biometrics
+<br/>Face Recognition becomes one of
+<br/>authentication
+<br/>the past few years. Face
+<br/>recognition is an interesting and successful application of Pattern
+<br/>recognition and Image analysis. It co mpares a query face image
+<br/>against all image te mplates in a face database. Face recognition
+<br/>is very important due to its wide range of commercia l and law
+<br/>enforcement applicat ions, which include forensic identificat ion,
+<br/>access control, border surveillance and human interactions and
+<br/>availability of low cost recording devices. Principa l Co mponent
+<br/>Analysis and Independent Component Analysis [1], Elastic
+<br/>Bunch Graph Matching [2], K-nearest neighbor algorithm
+<br/>classifier and Linear Discriminant Analysis [3], Loca l Derivative
+<br/>pattern and Local Binary Pattern [4]. These algorithms are still
+<br/>having some proble ms
+<br/>the
+<br/>constraints like variations in pose, expression and illu mination.
+<br/>This variation in the image degrades the performance of
+<br/>recognition rate. Local Binary Pattern (LBP) and Laplac ian of
+<br/>Gaussian (Lo G) is used to reduce the illu mination effects by
+<br/>increasing the contrast of the image which does not effect to the
+<br/>original
+<br/>image and diffe rential e xc itation pixe l used for
+<br/>preprocessing which is to make the algorithm invariant to the
+<br/>illu mination changes
+<br/>[4]. The Local Direct ional Pattern
+<br/>descriptor (LDP) uses the edge values of surrounding pixe l of
+<br/>the center pixe l and Two Dimensional Principal Analysis (2D-
+<br/>PCA) is used for feature extraction which uses Euclidean
+<br/>distance to measure the simila rity between tra ining database
+<br/>images and test image features. The nearest neighbor classifier is
+<br/>used to classify the images [5]. To reduce the influence of
+<br/>illu mination fro m an input image an adaptive homo morphic
+<br/>filtering is used in adaptive homo morphic eight local d irectional
+<br/>to recognize
+<br/>the face under
+<br/>techniques from
+<br/>International Journal of Engineering Science and Computing, April 2017 10081 http://ije sc.org/
+</td><td></td><td></td></tr><tr><td>a7c39a4e9977a85673892b714fc9441c959bf078</td><td>Automated Individualization of Deformable Eye Region Model and Its
+<br/>Application to Eye Motion Analysis
+<br/>Dept. of Media and Image Technology,
+<br/><b>Tokyo Polytechnic University</b><br/>1583 Iiyama, Atsugi,
+<br/>Kanagawa 243-0297, Japan
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>5000 Forbes Avenue Pittsburgh,
+<br/>PA 15213-3891, USA
+</td><td>('1683262', 'Tsuyoshi Moriyama', 'tsuyoshi moriyama')<br/>('1733113', 'Takeo Kanade', 'takeo kanade')</td><td>moriyama@mega.t-kougei.ac.jp
+<br/>tk@cs.cmu.edu
+</td></tr><tr><td>a75edf8124f5b52690c08ff35b0c7eb8355fe950</td><td>Authentic Emotion Detection in Real-Time Video
+<br/><b>School of Computer Science and Engineering, Sichuan University, China</b><br/><b>Faculty of Science, University of Amsterdam, The Netherlands</b><br/><b>LIACS Media Lab, Leiden University, The Netherlands</b></td><td>('1840164', 'Yafei Sun', 'yafei sun')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1731570', 'Michael S. Lew', 'michael s. lew')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>a75ee7f4c4130ef36d21582d5758f953dba03a01</td><td>DD2427 Final Project Report
+<br/>DD2427 Final Project Report
+<br/>Human face attributes prediction with Deep
+<br/>Learning
+</td><td></td><td>moaah@kth.se
+</td></tr><tr><td>a775da3e6e6ea64bffab7f9baf665528644c7ed3</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 142 – No.9, May 2016
+<br/>Human Face Pose Estimation based on Feature
+<br/>Extraction Points
+<br/>Research scholar,
+<br/> Department of ECE
+<br/>SBSSTC, Moga Road,
+<br/> Ferozepur, Punjab, India
+</td><td></td><td></td></tr><tr><td>a703d51c200724517f099ee10885286ddbd8b587</td><td>Fuzzy Neural Networks(FNN)-based Approach for
+<br/>Personalized Facial Expression Recognition with
+<br/>Novel Feature Selection Method
+<br/>Div. of EE, Dept. of EECS, KAIST
+<br/>373-1 Guseong-dong, Yuseong-gu, Daejeon 305-701, Korea
+<br/><b>Human-friendly Welfare Robotic System Engineering Research Center, KAIST</b><br/>373-1 Guseong-dong, Yuseong-gu, Daejeon 305-701, Korea
+</td><td>('1793114', 'Dae-Jin Kim', 'dae-jin kim')<br/>('5960489', 'Kwang-Hyun Park', 'kwang-hyun park')</td><td>djkim@mail.kaist.ac.kr, zbien@ee.kaist.ac.kr
+<br/>akaii@robotian.net
+</td></tr><tr><td>a75dfb5a839f0eb4b613d150f54a418b7812aa90</td><td>MULTIBIOMETRIC SECURE SYSTEM BASED ON DEEP LEARNING
+<br/><b>West Virginia University, Morgantown, USA</b></td><td>('23980155', 'Veeru Talreja', 'veeru talreja')<br/>('1709360', 'Matthew C. Valenti', 'matthew c. valenti')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')</td><td></td></tr><tr><td>b88ceded6467e9b286f048bb1b17be5998a077bd</td><td>Sparse Subspace Clustering via Diffusion Process
+<br/><b>Curtin University, Perth, Australia</b></td><td>('2191968', 'Qilin Li', 'qilin li')<br/>('1919769', 'Ling Li', 'ling li')<br/>('1713220', 'Wanquan Liu', 'wanquan liu')</td><td>kylinlovesummer@gmail.com
+</td></tr><tr><td>b871d1b8495025ff8a6255514ed39f7765415935</td><td>Application of Completed Local Binary Pattern for Facial Expression
+<br/>Recognition on Gabor Filtered Facial Images
+<br/><b>University of Ulsan, Ulsan, Republic of Korea</b></td><td>('2288674', 'Tanveer Ahsan', 'tanveer ahsan')</td><td>1tanveerahsan@gmail.com, 2rsbdce@yahoo.com, *3upchong@ulsan.ac.kr
+</td></tr><tr><td>b8375ff50b8a6f1a10dd809129a18df96888ac8b</td><td>Published as a conference paper at ICLR 2017
+<br/>DECOMPOSING MOTION AND CONTENT FOR
+<br/>NATURAL VIDEO SEQUENCE PREDICTION
+<br/><b>University of Michigan, Ann Arbor, USA</b><br/>2Adobe Research, San Jose, CA 95110
+<br/>3POSTECH, Pohang, Korea
+<br/><b>Beihang University, Beijing, China</b><br/>5Google Brain, Mountain View, CA 94043
+</td><td>('2241528', 'Seunghoon Hong', 'seunghoon hong')<br/>('10668384', 'Xunyu Lin', 'xunyu lin')<br/>('1697141', 'Honglak Lee', 'honglak lee')<br/>('1768964', 'Jimei Yang', 'jimei yang')<br/>('1711926', 'Ruben Villegas', 'ruben villegas')</td><td></td></tr><tr><td>b88d5e12089f6f598b8c72ebeffefc102cad1fc0</td><td>Robust 2DPCA and Its Application
+<br/><b>Xidian University</b><br/>Xi’an China
+<br/><b>Xidian University</b><br/>Xi’an China
+</td><td>('40326660', 'Qianqian Wang', 'qianqian wang')<br/>('38469552', 'Quanxue Gao', 'quanxue gao')</td><td>610887187@qq.com
+<br/>xd ste pr@163.com
+</td></tr><tr><td>b84b7b035c574727e4c30889e973423fe15560d7</td><td>Human Age Estimation Using Ranking SVM
+<br/><b>HoHai University</b><br/>2Center for Biometrics and Security Research & National Laboratory of Pattern
+<br/><b>Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/>3China Research and Development Center for Internet of Thing
+</td><td>('40478348', 'Dong Cao', 'dong cao')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1959072', 'Zhiwei Zhang', 'zhiwei zhang')<br/>('39189280', 'Jun Feng', 'jun feng')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>fdcao,zlei,zwzhang,szlig@cbsr.ia.ac.cn, fengjun@hhu.edu.cn
+</td></tr><tr><td>b8dba0504d6b4b557d51a6cf4de5507141db60cf</td><td>Comparing Performances of Big Data Stream
+<br/>Processing Platforms with RAM3S
+</td><td></td><td></td></tr><tr><td>b89862f38fff416d2fcda389f5c59daba56241db</td><td>A Web Survey for Facial Expressions Evaluation
+<br/>Ecole Polytechnique Federale de Lausanne
+<br/><b>Signal Processing Institute</b><br/>Ecublens, 1015 Lausanne, Switzerland
+<br/>Ecole Polytechnique Federale de Lausanne, Operation Research Group
+<br/>Ecublens, 1015 Lausanne, Switzerland
+<br/>June 9, 2008
+</td><td>('2916630', 'Matteo Sorci', 'matteo sorci')<br/>('1794461', 'Gianluca Antonini', 'gianluca antonini')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')<br/>('1690395', 'Michel Bierlaire', 'michel bierlaire')</td><td>{Matteo.Sorci,Gianluca.Antonini,JP.Thiran}@epfl.ch
+<br/>Michel.Bierlaire@epfl.ch
+</td></tr><tr><td>b8caf1b1bc3d7a26a91574b493c502d2128791f6</td><td>RESEARCH ARTICLE
+<br/>As Far as the Eye Can See: Relationship
+<br/>between Psychopathic Traits and Pupil
+<br/>Response to Affective Stimuli
+<br/>Daniel T. Burley1*, Nicola S. Gray2,3, Robert J. Snowden1*
+<br/><b>School of Psychology, Cardiff University, Cardiff, United Kingdom, College of</b><br/><b>Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg</b><br/><b>University Health Board, Swansea, United Kingdom</b></td><td></td><td>* BurleyD2@Cardiff.ac.uk (DTB); Snowden@Cardiff.ac.uk (RJS)
+</td></tr><tr><td>b8084d5e193633462e56f897f3d81b2832b72dff</td><td>DeepID3: Face Recognition with Very Deep Neural Networks
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b><br/>2SenseTime Group
+</td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('1865674', 'Ding Liang', 'ding liang')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sy011@ie.cuhk.edu.hk
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>liangding@sensetime.com
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>b8378ab83bc165bc0e3692f2ce593dcc713df34a</td><td></td><td></td><td></td></tr><tr><td>b8f3f6d8f188f65ca8ea2725b248397c7d1e662d</td><td>Selfie Detection by Synergy-Constriant Based
+<br/>Convolutional Neural Network
+<br/>Electrical and Electronics Engineering, NITK-Surathkal, India.
+</td><td>('7245071', 'Yashas Annadani', 'yashas annadani')<br/>('8341302', 'Akshay Kumar Jagadish', 'akshay kumar jagadish')<br/>('2139966', 'Krishnan Chemmangat', 'krishnan chemmangat')</td><td></td></tr><tr><td>b8ebda42e272d3617375118542d4675a0c0e501d</td><td>Deep Hashing Network for Unsupervised Domain Adaptation
+<br/><b>Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA</b></td><td>('3151995', 'Hemanth Venkateswara', 'hemanth venkateswara')<br/>('30443430', 'Jose Eusebio', 'jose eusebio')<br/>('2471253', 'Shayok Chakraborty', 'shayok chakraborty')<br/>('1743991', 'Sethuraman Panchanathan', 'sethuraman panchanathan')</td><td>{hemanthv, jeusebio, shayok.chakraborty, panch}@asu.edu
+</td></tr><tr><td>b85580ff2d8d8be0a2c40863f04269df4cd766d9</td><td>HCMUS team at the Multimodal Person Discovery in
+<br/>Broadcast TV Task of MediaEval 2016
+<br/>Faculty of Information Technology
+<br/><b>University of Science, Vietnam National University-Ho Chi Minh city</b></td><td>('34453615', 'Vinh-Tiep Nguyen', 'vinh-tiep nguyen')<br/>('30097677', 'Manh-Tien H. Nguyen', 'manh-tien h. nguyen')<br/>('8176737', 'Quoc-Huu Che', 'quoc-huu che')<br/>('7736164', 'Van-Tu Ninh', 'van-tu ninh')<br/>('38994364', 'Tu-Khiem Le', 'tu-khiem le')<br/>('7213584', 'Thanh-An Nguyen', 'thanh-an nguyen')<br/>('1780348', 'Minh-Triet Tran', 'minh-triet tran')</td><td>nvtiep@fit.hcmus.edu.vn, {nhmtien, cqhuu, nvtu, ltkhiem}@apcs.vn,
+<br/>1312016@student.hcmus.edu.vn, tmtriet@fit.hcmus.edu.vn
+</td></tr><tr><td>b87b0fa1ac0aad0ca563844daecaeecb2df8debf</td><td>Computational Aesthetics in Graphics, Visualization, and Imaging
+<br/>EXPRESSIVE 2015
+<br/>Non-Photorealistic Rendering of Portraits
+<br/><b>Cardiff University, UK</b><br/>Figure 1: President Obama re-rendered in “puppet” style and in the style of Julian Opie.
+</td><td>('1734823', 'Paul L. Rosin', 'paul l. rosin')<br/>('1734823', 'Paul L. Rosin', 'paul l. rosin')<br/>('7827503', 'Yu-Kun Lai', 'yu-kun lai')</td><td></td></tr><tr><td>b87db5ac17312db60e26394f9e3e1a51647cca66</td><td>Semi-definite Manifold Alignment
+<br/><b>Tsinghua University</b><br/>Beijing, China
+</td><td>('2066355', 'Liang Xiong', 'liang xiong')<br/>('34410258', 'Fei Wang', 'fei wang')<br/>('1700883', 'Changshui Zhang', 'changshui zhang')</td><td>{xiongl,feiwang03}@mails.tsinghua.edu.cn, zcs@mail.tsinghua.edu.cn
+</td></tr><tr><td>b81cae2927598253da37954fb36a2549c5405cdb</td><td>Experiments on Visual Information Extraction with the Faces of Wikipedia
+<br/>D´epartement de g´enie informatique et g´enie logiciel, Polytechnique Montr´eal
+<br/>2500, Chemin de Polytechnique, Universit´e de Montr´eal, Montr`eal, Qu´ebec, Canada
+</td><td>('2811524', 'Md. Kamrul Hasan', 'md. kamrul hasan')</td><td></td></tr><tr><td>b8a829b30381106b806066d40dd372045d49178d</td><td>1872
+<br/>A Probabilistic Framework for Joint Pedestrian Head
+<br/>and Body Orientation Estimation
+</td><td>('2869660', 'Fabian Flohr', 'fabian flohr')<br/>('1898318', 'Madalin Dumitru-Guzu', 'madalin dumitru-guzu')<br/>('34846285', 'Julian F. P. Kooij', 'julian f. p. kooij')</td><td></td></tr><tr><td>b1d89015f9b16515735d4140c84b0bacbbef19ac</td><td>Too Far to See? Not Really!
+<br/>— Pedestrian Detection with Scale-aware
+<br/>Localization Policy
+</td><td>('47957574', 'Xiaowei Zhang', 'xiaowei zhang')<br/>('50791064', 'Li Cheng', 'li cheng')<br/>('49729740', 'Bo Li', 'bo li')<br/>('2938403', 'Hai-Miao Hu', 'hai-miao hu')</td><td></td></tr><tr><td>b191aa2c5b8ece06c221c3a4a0914e8157a16129</td><td>: DEEP SPATIO-TEMPORAL MANIFOLD NETWORK FOR ACTION RECOGNITION
+<br/>Deep Spatio-temporal Manifold Network for
+<br/>Action Recognition
+<br/>Department of Computer Science
+<br/><b>China University of Mining and Technol</b><br/>ogy, Beijing, China
+<br/>Center for Research in Computer
+<br/>Vision (CRCV)
+<br/><b>University of Central Florida, Orlando</b><br/>FL, USA
+<br/>School of Automation Science and
+<br/>electrical engineering
+<br/><b>Beihang University, Beijing, China</b><br/><b>University of Chinese Academy of</b><br/>Sciences
+<br/>Beijing, China
+<br/>Nortumbria Univesity
+<br/>Newcastle, UK
+<br/>Xiamen Univesity
+<br/>Xiamen, China
+</td><td>('2606761', 'Ce Li', 'ce li')<br/>('9497155', 'Chen Chen', 'chen chen')<br/>('1740430', 'Baochang Zhang', 'baochang zhang')<br/>('1694936', 'Qixiang Ye', 'qixiang ye')<br/>('1783847', 'Jungong Han', 'jungong han')<br/>('1725599', 'Rongrong Ji', 'rongrong ji')</td><td>celi@cumtb.edu.cn
+<br/>chenchen870713@gmail.com
+<br/>bczhang@139.com
+<br/>qxye@ucas.ac.cn
+<br/>jungonghan77@gmail.com
+<br/>rrji@xmu.edu.cn
+</td></tr><tr><td>b13bf657ca6d34d0df90e7ae739c94a7efc30dc3</td><td>Attribute and Simile Classifiers for Face Verification (In submission please do
+<br/>not distribute.)
+<br/><b>Columbia University</b><br/>New York, NY
+<br/><b>Columbia University</b><br/>New York, NY
+<br/><b>Columbia University</b><br/><b>Columbia University</b><br/>New York, NY
+</td><td>('3586464', 'Neeraj Kumar', 'neeraj kumar')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('1750470', 'Shree K. Nayar', 'shree k. nayar')</td><td>belhumeur@cs.columbia.edu
+<br/>neeraj@cs.columbia.edu
+<br/>aberg@cs.columbia.edu
+<br/>nayar@cs.columbia.edu
+</td></tr><tr><td>b13a882e6168afc4058fe14cc075c7e41434f43e</td><td>Recognition of Humans and Their Activities Using Video
+<br/>Center for Automation Research
+<br/><b>University of Maryland</b><br/><b>College Park, MD</b><br/>Dept. of Electrical Engineering
+<br/><b>University of California</b><br/>Riverside, CA 92521
+<br/>Shaohua K. Zhou
+<br/>Siemens Research
+<br/>Princeton, NJ 08540
+</td><td>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('1688416', 'Amit K. Roy-Chowdhury', 'amit k. roy-chowdhury')</td><td></td></tr><tr><td>b14b672e09b5b2d984295dfafb05604492bfaec5</td><td>LearningImageClassificationandRetrievalModelsThomasMensink </td><td></td><td></td></tr><tr><td>b1665e1ddf9253dcaebecb48ac09a7ab4095a83e</td><td>EMOTION RECOGNITION USING FACIAL EXPRESSIONS WITH ACTIVE
+<br/>APPEARANCE MODELS
+<br/>Department of Computer Science
+<br/><b>University of North Carolina Wilmington</b><br/><b>South College Road</b><br/>Wilmington, NC, USA
+<br/>Department of Computer Science
+<br/><b>University of North Carolina Wilmington</b><br/><b>South College Road</b><br/>Wilmington, NC, USA
+</td><td>('12675740', 'Matthew S. Ratliff', 'matthew s. ratliff')<br/>('37804931', 'Eric Patterson', 'eric patterson')</td><td>msr3520@uncw.edu
+<br/>pattersone@uncw.edu
+</td></tr><tr><td>b16580d27bbf4e17053f2f91bc1d0be12045e00b</td><td>Pose-invariant Face Recognition with a
+<br/>Two-Level Dynamic Programming Algorithm
+<br/>1 Human Language Technology and Pattern Recognition Group
+<br/><b>RWTH Aachen University, Aachen, Germany</b><br/>2 Robert Bosch GmbH, Hildesheim, Germany
+</td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1685956', 'Hermann Ney', 'hermann ney')<br/>('1967060', 'Philippe Dreuw', 'philippe dreuw')</td><td><surname>@cs.rwth-aachen.de
+<br/>philippe.dreuw@de.bosch.com
+</td></tr><tr><td>b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000</td><td>Deep Variation-structured Reinforcement Learning for Visual Relationship and
+<br/>Attribute Detection
+<br/><b>School of Computer Science, Carnegie Mellon University</b></td><td>('40250403', 'Xiaodan Liang', 'xiaodan liang')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')<br/>('49441821', 'Lisa Lee', 'lisa lee')</td><td>{xiaodan1,lslee,epxing}@cs.cmu.edu
+</td></tr><tr><td>b11bb6bd63ee6f246d278dd4edccfbe470263803</td><td>Joint Voxel and Coordinate Regression for Accurate
+<br/>3D Facial Landmark Localization
+<br/>†Center for Research on Intelligent Perception and Computing (CRIPAC)
+<br/><b>Institute of Automation, Chinese Academy of Sciences (CASIA</b><br/>†National Laboratory of Pattern Recognition (NLPR)
+<br/><b>University of Chinese Academy of Sciences (UCAS</b><br/>§Center for Excellence in Brain Science and Intelligence Technology (CEBSIT), CAS
+</td><td>('37536613', 'Hongwen Zhang', 'hongwen zhang')<br/>('39763795', 'Qi Li', 'qi li')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>Email: hongwen.zhang@cripac.ia.ac.cn, {qli, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>b171f9e4245b52ff96790cf4f8d23e822c260780</td><td></td><td></td><td></td></tr><tr><td>b1a3b19700b8738b4510eecf78a35ff38406df22</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2731763, IEEE
+<br/>Transactions on Affective Computing
+<br/>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Automatic Analysis of Facial Actions: A Survey
+<br/>and Maja Pantic, Fellow, IEEE
+</td><td>('1680608', 'Brais Martinez', 'brais martinez')<br/>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('39532631', 'Bihan Jiang', 'bihan jiang')</td><td></td></tr><tr><td>b166ce267ddb705e6ed855c6b679ec699d62e9cb</td><td>Turk J Elec Eng & Comp Sci
+<br/>(2017) 25: 4421 { 4430
+<br/>c⃝ T (cid:127)UB_ITAK
+<br/>doi:10.3906/elk-1702-49
+<br/>Sample group and misplaced atom dictionary learning for face recognition
+<br/><b>Faculty of Electronics and Communication, Yanshan University</b><br/><b>Faculty of Electronics and Communication, Taishan University</b><br/>Qinhuangdao, P.R. China
+<br/>Tai’an, P.R. China
+<br/>Received: 04.02.2017
+<br/>(cid:15)
+<br/>Accepted/Published Online: 01.06.2017
+<br/>(cid:15)
+<br/>Final Version: 05.10.2017
+</td><td>('39980529', 'Meng Wang', 'meng wang')<br/>('49576759', 'Zhe Sun', 'zhe sun')<br/>('6410069', 'Mei Zhu', 'mei zhu')<br/>('49632877', 'Mei Sun', 'mei sun')</td><td></td></tr><tr><td>b13e2e43672e66ba45d1b852a34737e4ce04226b</td><td>CROWLEY, PARKHI, ZISSERMAN: FACE PAINTING
+<br/>Face Painting: querying art with photos
+<br/>Elliot J. Crowley
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b></td><td>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>elliot@robots.ox.ac.uk
+<br/>omkar@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c</td><td>RESEARCH ARTICLE
+<br/>Conveying facial expressions to blind and
+<br/>visually impaired persons through a wearable
+<br/>vibrotactile device
+<br/><b>MIRA Institute, University of Twente, Enschede, The</b><br/><b>Netherlands, Donders Institute, Radboud University, Nijmegen, The</b><br/>Netherlands, 3 VicarVision, Amsterdam, The Netherlands, 4 Department of Media, Communication, &
+<br/><b>Organization, University of Twente, Enschede, The Netherlands, HAN</b><br/><b>University of Applied Sciences, Arnhem, The Netherlands</b></td><td>('1950480', 'Hendrik P. Buimer', 'hendrik p. buimer')<br/>('25188062', 'Marian Bittner', 'marian bittner')<br/>('3427220', 'Tjerk Kostelijk', 'tjerk kostelijk')<br/>('49432294', 'Abdellatif Nemri', 'abdellatif nemri')<br/>('2968885', 'Richard J. A. van Wezel', 'richard j. a. van wezel')</td><td>* h.buimer@donders.ru.nl
+</td></tr><tr><td>b1301c722886b6028d11e4c2084ee96466218be4</td><td></td><td></td><td></td></tr><tr><td>b15a06d701f0a7f508e3355a09d0016de3d92a6d</td><td>Running head: FACIAL CONTRAST LOOKS HEALTHY
+<br/>1
+<br/>Facial contrast is a cue for perceiving health from the face
+<br/>Mauger2, Frederique Morizot2
+<br/><b>Gettysburg College, Gettysburg, PA, USA</b><br/>2 CHANEL Recherche et Technologie, Chanel PB
+<br/>3 Université Grenoble Alpes
+<br/>Author Note
+<br/>Psychologie et NeuroCognition, Université Grenoble Alpes.
+<br/>This is a prepublication copy. This article may not exactly replicate the authoritative document
+<br/>published in the APA journal. It is not the copy of record. The authoritative document can be
+<br/>found through this DOI: http://psycnet.apa.org/doi/10.1037/xhp0000219
+</td><td>('40482411', 'Richard Russell', 'richard russell')<br/>('4556101', 'Aurélie Porcheron', 'aurélie porcheron')<br/>('40482411', 'Richard Russell', 'richard russell')<br/>('4556101', 'Aurélie Porcheron', 'aurélie porcheron')<br/>('6258499', 'Emmanuelle Mauger', 'emmanuelle mauger')<br/>('4556101', 'Aurélie Porcheron', 'aurélie porcheron')<br/>('40482411', 'Richard Russell', 'richard russell')</td><td>College, Gettysburg, PA 17325, USA. Email: rrussell@gettysburg.edu
+</td></tr><tr><td>b1c5581f631dba78927aae4f86a839f43646220c</td><td></td><td></td><td></td></tr><tr><td>b18858ad6ec88d8b443dffd3e944e653178bc28b</td><td><b>Purdue University</b><br/>Purdue e-Pubs
+<br/>Department of Computer Science Technical
+<br/>Reports
+<br/>Department of Computer Science
+<br/>2017
+<br/>Trojaning Attack on Neural Networks
+<br/>See next page for additional authors
+<br/>Report Number:
+<br/>17-002
+<br/>Liu, Yingqi; Ma, Shiqing; Aafer, Yousra; Lee, Wen-Chuan; Zhai, Juan; Wang, Weihang; and Zhang, Xiangyu, "Trojaning Attack on
+<br/>Neural Networks" (2017). Department of Computer Science Technical Reports. Paper 1781.
+<br/>https://docs.lib.purdue.edu/cstech/1781
+<br/>additional information.
+</td><td>('3347155', 'Yingqi Liu', 'yingqi liu')<br/>('40306181', 'Shiqing Ma', 'shiqing ma')<br/>('3216258', 'Yousra Aafer', 'yousra aafer')<br/>('2547748', 'Wen-Chuan Lee', 'wen-chuan lee')<br/>('3293342', 'Juan Zhai', 'juan zhai')</td><td>Purdue University, liu1751@purdue.edu
+<br/>Purdue University, ma229@purdue.edu
+<br/>Purdue University, yaafer@purdue.edu
+<br/>Purdue University, lee1938@purdue.edu
+<br/>Nanjing University, China, zhaijuan@nju.edu.cn
+<br/>This document has been made available through Purdue e-Pubs, a service of the Purdue University Libraries. Please contact epubs@purdue.edu for
+</td></tr><tr><td>b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1</td><td>LOCAL DIRECTIONAL RELATION PATTERN
+<br/>Local Directional Relation Pattern for
+<br/>Unconstrained and Robust Face Retrieval
+</td><td>('34992579', 'Shiv Ram Dubey', 'shiv ram dubey')</td><td></td></tr><tr><td>b133b2d7df9b848253b9d75e2ca5c68e21eba008</td><td><b>Kobe University, NICT and University of Siegen</b><br/>at TRECVID 2017 AVS Task
+<br/><b>Graduate School of System Informatics, Kobe University</b><br/><b>Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT</b><br/><b>Pattern Recognition Group, University of Siegen</b></td><td>('2240008', 'Zhenying He', 'zhenying he')<br/>('8183718', 'Takashi Shinozaki', 'takashi shinozaki')<br/>('1707938', 'Kimiaki Shirahama', 'kimiaki shirahama')<br/>('1727057', 'Marcin Grzegorzek', 'marcin grzegorzek')<br/>('1711781', 'Kuniaki Uehara', 'kuniaki uehara')</td><td>jennyhe@ai.cs.kobe-u.ac.jp, uehara@kobe-u.ac.jp
+<br/>tshino@nict.go.jp
+<br/>kimiaki.shirahama@uni-siegen.de, marcin.grzegorzek@uni-siegen.de
+</td></tr><tr><td>b1451721864e836069fa299a64595d1655793757</td><td>Criteria Sliders: Learning Continuous
+<br/>Database Criteria via Interactive Ranking
+<br/><b>Brown University 2University of Bath</b><br/><b>Harvard University 4Max Planck Institute for Informatics</b></td><td>('1854493', 'James Tompkin', 'james tompkin')<br/>('1808255', 'Kwang In Kim', 'kwang in kim')<br/>('1680185', 'Christian Theobalt', 'christian theobalt')</td><td></td></tr><tr><td>b1df214e0f1c5065f53054195cd15012e660490a</td><td>Supplementary Material to Sparse Coding and Dictionary Learning with Linear
+<br/>Dynamical Systems∗
+<br/><b>Tsinghua University, State Key Lab. of Intelligent</b><br/>Technology and Systems, Tsinghua National Lab. for Information Science and Technology (TNList);
+<br/><b>Australian National University and NICTA, Australia</b><br/>In this supplementary material, we present the proofs of Theorems (1-3), the algorithm for learning the transition matrix
+<br/>of LDSST, and the reconstruction error approach for classification in LDS-SC, LDSST-SC and covLDSST-SC. In addition,
+<br/>we describe the details of the benchmark datasets that are applied in our experiments. Our dictionary learning algorithm for
+<br/>anormaly detection is also explored in this supplementary material.
+<br/>1. Proofs
+<br/>Theorem 1. Suppose V1, V2, · · · , VM ∈ S(n, ∞), and y1, y2, · · · , yM ∈ R, we have
+<br/>Xi=1
+<br/>yiΠ(Vi) k2
+<br/>F =
+<br/>Xi,j=1
+<br/>yiyj k VT
+<br/>i Vj k2
+<br/>F ,
+<br/>i Oj can be computed with the Lyapunov equation defined in Equation (2), Li and Lj
+<br/>i Vj = L−1
+<br/>where VT
+<br/>are Cholesky decomposition matrices for OT
+<br/>i Oj L−T
+<br/>i OT
+<br/>. OT
+<br/>i Oi and OT
+<br/>j Oj , respectively.
+<br/>i)]T by
+<br/>Proof. We denote the sub-matrix of the extended observability matrix Oi as Oi(t) = [CT
+<br/>taking the first t rows. We suppose that the Cholesky decomposition matrix for Oi is Li and denote that Vi(t) = Oi(t)L−T
+<br/>Then, we derive
+<br/>i , (CiAi)T, · · · , (CiAt
+<br/>Xi=1
+<br/>yiΠ(Vi) k2
+<br/>F = lim
+<br/>t→∞
+<br/>= lim
+<br/>t→∞
+<br/>= lim
+<br/>t→∞
+<br/>yiVi(t)Vi(t)T
+<br/>yiVi(t)Vi(t)T k2
+<br/>Xi=1
+<br/>Tr
+<br/>Xi=1
+<br/>Xi,j=1
+<br/>yiyjTr(cid:0)Vi(t)TVj(t)Vj(t)TVi(t)(cid:1)
+<br/>yj Vj(t)Vj(t)T
+<br/>Xj=1
+<br/>yiyj lim
+<br/>t→∞
+<br/>k Vi(t)TVj(t) k2
+<br/>yiyj lim
+<br/>t→∞
+<br/>k L−1
+<br/>(Oi(t)TOj(t))L−T
+<br/>k2
+<br/>yiyj k L−1
+<br/>i Oij L−T
+<br/>k2
+<br/>F ,
+<br/>(13)
+<br/>Xi,j=1
+<br/>Xi,j=1
+<br/>Xi,j=1
+<br/>∗This work is jointly supported by National Natural Science Foundation of China under Grant No. 61327809, 61210013, 91420302 and 91520201.
+</td><td>('8984539', 'Wenbing Huang', 'wenbing huang')<br/>('40203750', 'Fuchun Sun', 'fuchun sun')<br/>('2507718', 'Lele Cao', 'lele cao')<br/>('1678783', 'Deli Zhao', 'deli zhao')<br/>('31833173', 'Huaping Liu', 'huaping liu')<br/>('23911916', 'Mehrtash Harandi', 'mehrtash harandi')</td><td>1{huangwb12@mails, fcsun@mail, caoll12@mails, hpliu@mail}.tsinghua.edu.cn,
+<br/>zhaodeli@gmail.com,
+<br/>Mehrtash.Harandi@nicta.com.au,
+</td></tr><tr><td>b185f0a39384ceb3c4923196aeed6d68830a069f</td><td>Describing Clothing by Semantic Attributes
+<br/><b>Stanford University, Stanford, California</b><br/><b>Kodak Research Laboratories, Rochester, New York</b><br/><b>Cornell University, Ithaca, New York</b></td><td>('2896700', 'Huizhong Chen', 'huizhong chen')<br/>('1739786', 'Bernd Girod', 'bernd girod')</td><td></td></tr><tr><td>b19e83eda4a602abc5a8ef57467c5f47f493848d</td><td>JOURNAL OF LATEX CLASS FILES
+<br/>Heat Kernel Based Local Binary Pattern for
+<br/>Face Representation
+</td><td>('38979129', 'Xi Li', 'xi li')<br/>('40506509', 'Weiming Hu', 'weiming hu')<br/>('1720488', 'Zhongfei Zhang', 'zhongfei zhang')<br/>('37414077', 'Hanzi Wang', 'hanzi wang')</td><td></td></tr><tr><td>b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e</td><td>Person Re-identification Using Multiple First-Person-Views on Wearable Devices
+<br/><b>Nanyang Technological University, Singapore</b><br/><b>Institute for Infocomm Research (I2R), A*STAR, Singapore</b><br/>Istituto Italiano di Tecnologia (IIT), Genova, 16163, Italy
+</td><td>('37287044', 'Anirban Chakraborty', 'anirban chakraborty')<br/>('1709001', 'Bappaditya Mandal', 'bappaditya mandal')<br/>('2860592', 'Hamed Kiani Galoogahi', 'hamed kiani galoogahi')</td><td>a.chakraborty@ntu.edu.sg
+<br/>bmandal@i2r.a-star.edu.sg
+<br/>kiani.galoogahi@iit.it
+</td></tr><tr><td>b1fdd4ae17d82612cefd4e78b690847b071379d3</td><td>Supervised Descent Method
+<br/>CMU-RI-TR-15-28
+<br/>September 2015
+<br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Fernando De la Torre, Chair
+<br/>Srinivasa Narasimhan
+<br/>Kris Kitani
+<br/>Aleix Martinez
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy in Robotics.
+</td><td>('3182065', 'Xuehan Xiong', 'xuehan xiong')<br/>('3182065', 'Xuehan Xiong', 'xuehan xiong')</td><td></td></tr><tr><td>dde5125baefa1141f1ed50479a3fd67c528a965f</td><td>Synthesizing Normalized Faces from Facial Identity Features
+<br/><b>Google, Inc. 2University of Massachusetts Amherst 3MIT CSAIL</b></td><td>('39578349', 'Forrester Cole', 'forrester cole')<br/>('1707347', 'Dilip Krishnan', 'dilip krishnan')</td><td>{fcole, dbelanger, dilipkay, sarna, inbarm, wfreeman}@google.com
+</td></tr><tr><td>dd8084b2878ca95d8f14bae73e1072922f0cc5da</td><td>Model Distillation with Knowledge Transfer from
+<br/>Face Classification to Alignment and Verification
+<br/>Beijing Orion Star Technology Co., Ltd. Beijing, China
+</td><td>('1747751', 'Chong Wang', 'chong wang')<br/>('26403761', 'Xipeng Lan', 'xipeng lan')</td><td>{chongwang.nlpr, xipeng.lan, caveman1984}@gmail.com
+</td></tr><tr><td>ddf55fc9cf57dabf4eccbf9daab52108df5b69aa</td><td>International Journal of Grid and Distributed Computing
+<br/>Vol. 4, No. 3, September, 2011
+<br/>Methodology and Performance Analysis of 3-D Facial Expression
+<br/>Recognition Using Statistical Shape Representation
+<br/><b>ADSIP Research Centre, University of Central Lancashire</b><br/><b>School of Psychology, University of Central Lancashire</b></td><td>('2343120', 'Wei Quan', 'wei quan')<br/>('2647218', 'Bogdan J. Matuszewski', 'bogdan j. matuszewski')<br/>('2550166', 'Lik-Kwan Shark', 'lik-kwan shark')<br/>('2942330', 'Charlie Frowd', 'charlie frowd')</td><td>{WQuan, BMatuszewski1, LShark}@uclan.ac.uk
+<br/>CFrowd@uclan.ac.uk
+</td></tr><tr><td>dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335</td><td>Unsupervised Learning and Segmentation of Complex Activities from Video
+<br/><b>University of Bonn, Germany</b></td><td>('34678431', 'Fadime Sener', 'fadime sener')<br/>('2569989', 'Angela Yao', 'angela yao')</td><td>{sener,yao}@cs.uni-bonn.de
+</td></tr><tr><td>dda35768681f74dafd02a667dac2e6101926a279</td><td>MULTI-LAYER TEMPORAL GRAPHICAL MODEL
+<br/>FOR HEAD POSE ESTIMATION IN REAL-WORLD VIDEOS
+<br/><b>McGill University</b><br/>Centre for Intelligent Machines,
+</td><td>('2515930', 'Meltem Demirkus', 'meltem demirkus')<br/>('1724729', 'Doina Precup', 'doina precup')<br/>('1713608', 'James J. Clark', 'james j. clark')<br/>('1699104', 'Tal Arbel', 'tal arbel')</td><td></td></tr><tr><td>dd0760bda44d4e222c0a54d41681f97b3270122b</td><td></td><td></td><td></td></tr><tr><td>ddea3c352f5041fb34433b635399711a90fde0e8</td><td>Facial Expression Classification using Visual Cues and Language
+<br/>Department of Computer Science and Engineering, IIT Kanpur
+</td><td>('2094658', 'Abhishek Kar', 'abhishek kar')<br/>('1803835', 'Amitabha Mukerjee', 'amitabha mukerjee')</td><td>{akar,amit}@iitk.ac.in
+</td></tr><tr><td>dd033d4886f2e687b82d893a2c14dae02962ea70</td><td>Electronic Letters on Computer Vision and Image Analysis 11(1):41-54; 2012
+<br/>Facial Expression Recognition Using New Feature Extraction
+<br/>Algorithm
+<br/><b>National Cheng Kung University, Tainan, Taiwan</b><br/> Received 10th Oct. 2011; accepted 5th Sep. 2012
+</td><td>('2499819', 'Hung-Fu Huang', 'hung-fu huang')<br/>('1751725', 'Shen-Chuan Tai', 'shen-chuan tai')</td><td></td></tr><tr><td>ddbd24a73ba3d74028596f393bb07a6b87a469c0</td><td>Multi-region two-stream R-CNN
+<br/>for action detection
+<br/>Inria(cid:63)
+</td><td>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td>{xiaojiang.peng,cordelia.schmid}@inria.fr
+</td></tr><tr><td>ddf099f0e0631da4a6396a17829160301796151c</td><td>IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY
+<br/>Learning Face Image Quality from
+<br/>Human Assessments
+</td><td>('2180413', 'Lacey Best-Rowden', 'lacey best-rowden')<br/>('40217643', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>dd0a334b767e0065c730873a95312a89ef7d1c03</td><td>Eigenexpressions: Emotion Recognition using Multiple
+<br/>Eigenspaces
+<br/>Luis Marco-Gim´enez1, Miguel Arevalillo-Herr´aez1, and Cristina Cuhna-P´erez2
+<br/><b></b><br/>Burjassot. Valencia 46100, Spain,
+<br/>2 Universidad Cat´olica San Vicente M´artir de Valencia (UCV),
+<br/>Burjassot. Valencia. Spain
+</td><td></td><td>margi4@alumni.uv.es
+</td></tr><tr><td>dd2f6a1ba3650075245a422319d86002e1e87808</td><td></td><td></td><td></td></tr><tr><td>ddaa8add8528857712424fd57179e5db6885df7c</td><td>METTES, SNOEK, CHANG: ACTION LOCALIZATION WITH PSEUDO-ANNOTATIONS
+<br/>Localizing Actions from Video Labels
+<br/>and Pseudo-Annotations
+<br/>Cees G.M. Snoek1
+<br/><b>University of Amsterdam</b><br/>Amsterdam, NL
+<br/><b>Columbia University</b><br/>New York, USA
+</td><td>('2606260', 'Pascal Mettes', 'pascal mettes')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td></td></tr><tr><td>dd8d53e67668067fd290eb500d7dfab5b6f730dd</td><td>69
+<br/>A Parameter-Free Framework for General
+<br/>Supervised Subspace Learning
+</td><td>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('7137861', 'Jianzhuang Liu', 'jianzhuang liu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>ddbb6e0913ac127004be73e2d4097513a8f02d37</td><td>264
+<br/>IEEE TRANSACTIONS ON MULTIMEDIA, VOL. 1, NO. 3, SEPTEMBER 1999
+<br/>Face Detection Using Quantized Skin Color
+<br/>Regions Merging and Wavelet Packet Analysis
+</td><td>('34798028', 'Christophe Garcia', 'christophe garcia')<br/>('2441655', 'Georgios Tziritas', 'georgios tziritas')</td><td></td></tr><tr><td>dd600e7d6e4443ebe87ab864d62e2f4316431293</td><td></td><td></td><td></td></tr><tr><td>dc550f361ae82ec6e1a0cf67edf6a0138163382e</td><td>
+<br/>ISSN XXXX XXXX © 2018 IJESC
+<br/>
+<br/>
+<br/>Research Article Volume 8 Issue No.3
+<br/>Emotion Based Music Player
+<br/>Professor1, UG Student2, 3, 4, 5, 6
+<br/>Department of Electronics Engineering
+<br/><b>K.D.K. College of Engineering Nagpur, India</b></td><td>('9217928', 'Vijay Chakole', 'vijay chakole')<br/>('48228560', 'Kalyani Trivedi', 'kalyani trivedi')</td><td></td></tr><tr><td>dcf71245addaf66a868221041aabe23c0a074312</td><td>S3FD: Single Shot Scale-invariant Face Detector
+<br/><b>CBSR and NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('3220556', 'Shifeng Zhang', 'shifeng zhang')</td><td>{shifeng.zhang,xiangyu.zhu,zlei,hailin.shi,xiaobo.wang,szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>dcb44fc19c1949b1eda9abe998935d567498467d</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1916
+</td><td></td><td></td></tr><tr><td>dcc38db6c885444694f515d683bbb50521ff3990</td><td>Learning to hallucinate face images via Component Generation and Enhancement
+<br/><b>City University of Hong Kong</b><br/><b>South China University of Technology</b><br/>3Tencent AI Lab
+<br/><b>University of Science and Technology of China</b></td><td>('2255687', 'Yibing Song', 'yibing song')<br/>('1718428', 'Jiawei Zhang', 'jiawei zhang')<br/>('2548483', 'Shengfeng He', 'shengfeng he')<br/>('2780029', 'Linchao Bao', 'linchao bao')<br/>('1777434', 'Qingxiong Yang', 'qingxiong yang')</td><td></td></tr><tr><td>dc5cde7e4554db012d39fc41ac8580f4f6774045</td><td>FAKTOR, IRANI: VIDEO SEGMENTATION BY NON-LOCAL CONSENSUS VOTING
+<br/>Video Segmentation by Non-Local
+<br/>Consensus Voting
+<br/>http://www.wisdom.weizmann.ac.il/~alonf/
+<br/>http://www.wisdom.weizmann.ac.il/~irani/
+<br/>Dept. of Computer Science and
+<br/>Applied Math
+<br/><b>The Weizmann Institute of Science</b><br/>ISRAEL
+</td><td>('2859022', 'Alon Faktor', 'alon faktor')<br/>('1696887', 'Michal Irani', 'michal irani')</td><td></td></tr><tr><td>dc7df544d7c186723d754e2e7b7217d38a12fcf7</td><td>Facial expression recognition using salient facial patches
+<br/>MIRACL-ENET’COM
+<br/><b>University of Sfax</b><br/>Tunisia (3018), Sfax
+<br/>MIRACL-FSS
+<br/><b>University of Sfax</b><br/>Tunisia (3018), Sfax
+</td><td>('2049116', 'Hazar Mliki', 'hazar mliki')<br/>('1749733', 'Mohamed Hammami', 'mohamed hammami')</td><td>mliki.hazar@gmail.com
+<br/>mohamed.hammami@fss.rnu.tn
+</td></tr><tr><td>dc77287bb1fcf64358767dc5b5a8a79ed9abaa53</td><td>Fashion Conversation Data on Instagram
+<br/>∗Graduate School of Culture Technology, KAIST, South Korea
+<br/>†Department of Communication Studies, UCLA, USA
+</td><td>('3459091', 'Yu-i Ha', 'yu-i ha')<br/>('2399803', 'Sejeong Kwon', 'sejeong kwon')<br/>('1775511', 'Meeyoung Cha', 'meeyoung cha')<br/>('1834047', 'Jungseock Joo', 'jungseock joo')</td><td></td></tr><tr><td>dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb</td><td></td><td></td><td></td></tr><tr><td>dced05d28f353be971ea2c14517e85bc457405f3</td><td>Multimodal Priority Verification of Face and Speech
+<br/>Using Momentum Back-Propagation Neural Network
+<br/>1 Image Processing and Intelligent Systems Laboratory, Department of Image Engineering,
+<br/><b>Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University</b><br/>221 Huksuk-dong, Tongjak-Ku, Seoul 156-756, Korea,
+<br/>2 Broadcasting Media Research Group, Digital Broadcasting Research Division, ETRI, 161
+<br/>Gajeong-dong, Yuseong-Gu, Daejeon 305-700, Korea,
+<br/>3 Intelligent Image Communication Laboratory, Department of Computer Engineering,
+<br/><b>Kwangwoon University, 447-1 Wolge-dong, Nowon-Gu, Seoul 139-701, Korea</b></td><td>('1727735', 'Changhan Park', 'changhan park')<br/>('1722181', 'Myungseok Ki', 'myungseok ki')<br/>('1723542', 'Jaechan Namkung', 'jaechan namkung')<br/>('1684329', 'Joonki Paik', 'joonki paik')</td><td>initialchp@wm.cau.ac.kr, http://ipis.cau.ac.kr,
+<br/>kkim@etri.re.kr, http://www.etri.re.kr,
+<br/>namjc@daisy.kw.ac.kr, http://vision.kw.ac.kr.
+</td></tr><tr><td>dce5e0a1f2cdc3d4e0e7ca0507592860599b0454</td><td>Facelet-Bank for Fast Portrait Manipulation
+<br/><b>The Chinese University of Hong Kong</b><br/>2Tencent Youtu Lab
+<br/><b>Johns Hopkins University</b></td><td>('2070527', 'Ying-Cong Chen', 'ying-cong chen')<br/>('40898180', 'Yangang Ye', 'yangang ye')<br/>('1729056', 'Jiaya Jia', 'jiaya jia')</td><td>{ycchen, linhj, ryli, xtao}@cse.cuhk.edu.hk
+<br/>goodshenxy@gmail.com
+<br/>Mshu1@jhu.edu
+<br/>yangangye@tecent.com
+<br/>leojia9@gmail.com
+</td></tr><tr><td>dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd</td><td>Learning with Confident Examples:
+<br/>Rank Pruning for Robust Classification with Noisy Labels
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA 02139
+</td><td>('39972987', 'Curtis G. Northcutt', 'curtis g. northcutt')<br/>('3716141', 'Tailin Wu', 'tailin wu')<br/>('1706040', 'Isaac L. Chuang', 'isaac l. chuang')</td><td>{cgn, tailin, ichuang}@mit.edu
+</td></tr><tr><td>dcce3d7e8d59041e84fcdf4418702fb0f8e35043</td><td>Probabilistic Identity Characterization for Face Recognition∗
+<br/>Center for Automation Research (CfAR) and
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Maryland, College Park, MD</b></td><td>('1682187', 'Shaohua Kevin Zhou', 'shaohua kevin zhou')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{shaohua, rama}@cfar.umd.edu
+</td></tr><tr><td>dce3dff9216d63c4a77a2fcb0ec1adf6d2489394</td><td>Manifold Learning for Gender Classification
+<br/>from Face Sequences
+<br/><b>Machine Vision Group, P.O. Box 4500, FI-90014, University of Oulu, Finland</b></td><td>('1751372', 'Abdenour Hadid', 'abdenour hadid')</td><td></td></tr><tr><td>dc974c31201b6da32f48ef81ae5a9042512705fe</td><td>Am I done? Predicting Action Progress in Video
+<br/>1 Media Integration and Communication Center, Univ. of Florence, Italy
+<br/>2 Department of Mathematics “Tullio Levi-Civita”, Univ. of Padova, Italy
+</td><td>('41172759', 'Federico Becattini', 'federico becattini')<br/>('1789269', 'Tiberio Uricchio', 'tiberio uricchio')<br/>('2831602', 'Lorenzo Seidenari', 'lorenzo seidenari')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')<br/>('1795847', 'Lamberto Ballan', 'lamberto ballan')</td><td></td></tr><tr><td>b6f758be954d34817d4ebaa22b30c63a4b8ddb35</td><td>A Proximity-Aware Hierarchical Clustering of Faces
+<br/><b>University of Maryland, College Park</b></td><td>('3329881', 'Wei-An Lin', 'wei-an lin')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>walin@terpmail.umd.edu, pullpull@cs.umd.edu, rama@umiacs.umd.edu
+</td></tr><tr><td>b62571691a23836b35719fc457e093b0db187956</td><td> Volume 3, Issue 5, May 2013 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>A Novel approach for securing biometric template
+<br/>Dr.Chander Kant
+<br/> Department of computer Science & applications Department of computer Science & applications
+<br/><b>Kurukshetra University, Kurukshetra, India</b><br/><b>Kurukshetra University, Kurukshetra, India</b><br/>
+</td><td>('3384880', 'Shweta Malhotra', 'shweta malhotra')</td><td></td></tr><tr><td>b69b239217d4e9a20fe4fe1417bf26c94ded9af9</td><td>A Temporally-Aware Interpolation Network for
+<br/>Video Frame Inpainting
+<br/><b>University of Michigan, Ann Arbor, USA</b></td><td>('2582303', 'Ximeng Sun', 'ximeng sun')<br/>('34246012', 'Ryan Szeto', 'ryan szeto')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td>{sunxm,szetor,jjcorso}@umich.edu
+</td></tr><tr><td>b6c047ab10dd86b1443b088029ffe05d79bbe257</td><td></td><td></td><td></td></tr><tr><td>b6052dc718c72f2506cfd9d29422642ecf3992ef</td><td>A Survey on Human Motion Analysis from
+<br/>Depth Data
+<br/><b>University of Kentucky, 329 Rose St., Lexington, KY, 40508, U.S.A</b><br/>2 Microsoft, One Microsoft Way, Redmond, WA, 98052, U.S.A
+<br/>3 SRI International Sarnoff, 201 Washington Rd, Princeton, NJ, 08540, U.S.A
+<br/><b>University of Bonn, Roemerstrasse 164, 53117 Bonn, Germany</b></td><td>('3876303', 'Mao Ye', 'mao ye')<br/>('1681771', 'Qing Zhang', 'qing zhang')<br/>('40476140', 'Liang Wang', 'liang wang')<br/>('2446676', 'Jiejie Zhu', 'jiejie zhu')<br/>('38958903', 'Ruigang Yang', 'ruigang yang')<br/>('2946643', 'Juergen Gall', 'juergen gall')</td><td>mao.ye@uky.edu, qing.zhang@uky.edu, ryang@cs.uky.edu
+<br/>liangwan@microsoft.com
+<br/>jiejie.zhu@sri.com
+<br/>gall@iai.uni-bonn.de
+</td></tr><tr><td>b6145d3268032da70edc9cfececa1f9ffa4e3f11</td><td>c(cid:2) 2001 Kluwer Academic Publishers. Manufactured in The Netherlands.
+<br/>Face Recognition Using the Discrete Cosine Transform
+<br/><b>Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A</b></td><td>('1693521', 'Ziad M. Hafed', 'ziad m. hafed')<br/>('3631473', 'Martin D. Levine', 'martin d. levine')</td><td>zhafed@cim.mcgill.ca
+<br/>levine@cim.mcgill.ca
+</td></tr><tr><td>b6c53891dff24caa1f2e690552a1a5921554f994</td><td></td><td></td><td></td></tr><tr><td>b6ef158d95042f39765df04373c01546524c9ccd</td><td>Im2vid: Future Video Prediction for Static Image Action
+<br/>Recognition
+<br/>Badour Ahmad AlBahar
+<br/>Thesis submitted to the Faculty of the
+<br/><b>Virginia Polytechnic Institute and State University</b><br/>in partial fulfillment of the requirements for the degree of
+<br/>Master of Science
+<br/>in
+<br/>Computer Engineering
+<br/>Jia-Bin Huang, Chair
+<br/>A. Lynn Abbott
+<br/>Pratap Tokekar
+<br/>May 9, 2018
+<br/>Blacksburg, Virginia
+<br/>Keywords: Human Action Recognition, Static Image Action Recognition, Video Action
+<br/>Recognition, Future Video Prediction.
+<br/>Copyright 2018, Badour Ahmad AlBahar
+</td><td></td><td></td></tr><tr><td>b68150bfdec373ed8e025f448b7a3485c16e3201</td><td>Adversarial Image Perturbation for Privacy Protection
+<br/>A Game Theory Perspective
+<br/><b>Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbr cken, Germany</b></td><td>('2390510', 'Seong Joon Oh', 'seong joon oh')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td>{joon,mfritz,schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>b613b30a7cbe76700855479a8d25164fa7b6b9f1</td><td>1
+<br/>Identifying User-Specific Facial Affects from
+<br/>Spontaneous Expressions with Minimal Annotation
+</td><td>('23417737', 'Michael Xuelin Huang', 'michael xuelin huang')<br/>('1706729', 'Grace Ngai', 'grace ngai')<br/>('1730455', 'Kien A. Hua', 'kien a. hua')<br/>('1714454', 'Hong Va Leong', 'hong va leong')</td><td></td></tr><tr><td>b64cfb39840969b1c769e336a05a30e7f9efcd61</td><td>ORIGINAL RESEARCH
+<br/>published: 15 June 2016
+<br/>doi: 10.3389/fict.2016.00009
+<br/>CRF-Based Context Modeling for
+<br/>Person Identification in Broadcast
+<br/>Videos
+<br/><b>LIUM Laboratory, Le Mans, France, 2 Idiap Research Institute, Martigny, Switzerland</b><br/>We are investigating the problem of speaker and face identification in broadcast videos.
+<br/>Identification is performed by associating automatically extracted names from overlaid
+<br/>texts with speaker and face clusters. We aimed at exploiting the structure of news
+<br/>videos to solve name/cluster association ambiguities and clustering errors. The proposed
+<br/>approach combines iteratively two conditional random fields (CRF). The first CRF performs
+<br/>the person diarization (joint temporal segmentation, clustering, and association of voices
+<br/>jointly over the speech segments and the face tracks. It benefits from
+<br/>and faces)
+<br/>contextual
+<br/>information being extracted from the image backgrounds and the overlaid
+<br/>texts. The second CRF associates names with person clusters, thanks to co-occurrence
+<br/>statistics. Experiments conducted on a recent and substantial public dataset containing
+<br/>reports and debates demonstrate the interest and complementarity of the different
+<br/>modeling steps and information sources: the use of these elements enables us to obtain
+<br/>better performances in clustering and identification, especially in studio scenes.
+<br/>Keywords: face identification, speaker identification, broadcast videos, conditional random field, face clustering,
+<br/>speaker diarization
+<br/>1. INTRODUCTION
+<br/>For the last two decades, researchers have been trying to create indexing and fast search and
+<br/>browsing tools capable of handling the growing amount of available video collections. Among the
+<br/>associated possibilities, person identification is an important one. Indeed, video contents can often
+<br/>be browsed through the appearances of their different actors. Moreover, the availability of each
+<br/>person intervention allows easier access to video structure elements, such as the scene segmentation.
+<br/>Both motivations are especially verified in the case of news collections. The focus of this paper is,
+<br/>therefore, to develop a program able to identify persons in broadcast videos. That is, the program
+<br/>must be able to provide all temporal segments corresponding to each face and speaker.
+<br/>Person identification can be supervised. A face and/or a speaker model of the queried person is
+<br/>then learned over manually labeled training data. However, this raises the problem of annotation
+<br/>cost. An unsupervised and complementary approach consists of using the naming information
+<br/>already present in the documents. Such resources include overlaid texts, speech transcripts, and
+<br/>metadata. Motivated by this opportunity, unsupervised identification has been investigated for
+<br/>15 years from the early work of Satoh et al. (1999) to the development of more complex news-
+<br/>browsing systems exploiting this paradigm (Jou et al., 2013), or thanks to sponsored competitions
+<br/>(Giraudel et al., 2012). Whatever the source of naming information, it must tackle two main
+<br/>obstacles: associate the names to co-occurring speech and face segments and propagate this naming
+<br/>information from the co-occurring segments to the other segments of this person.
+<br/>Edited by:
+<br/>Shin’Ichi Satoh,
+<br/><b>National Institute of Informatics, Japan</b><br/>Reviewed by:
+<br/>Thanh Duc Ngo,
+<br/><b>Vietnam National University Ho Chi</b><br/>Minh City, Vietnam
+<br/>Ichiro Ide,
+<br/><b>Nagoya University, Japan</b><br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Computer Image Analysis, a section
+<br/>of the journal Frontiers in ICT
+<br/>Received: 16 October 2015
+<br/>Accepted: 12 May 2016
+<br/>Published: 15 June 2016
+<br/>Citation:
+<br/>Gay P, Meignier S, Deléglise P and
+<br/>Odobez J-M (2016) CRF-Based
+<br/>Context Modeling for Person
+<br/>Identification in Broadcast Videos.
+<br/>doi: 10.3389/fict.2016.00009
+<br/>Frontiers in ICT | www.frontiersin.org
+<br/>June 2016 | Volume 3 | Article 9
+</td><td>('14556501', 'Paul Gay', 'paul gay')<br/>('2446815', 'Sylvain Meignier', 'sylvain meignier')<br/>('1682046', 'Paul Deléglise', 'paul deléglise')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>odobez@idiap.ch
+</td></tr><tr><td>b6f682648418422e992e3ef78a6965773550d36b</td><td>February 8, 2017
+</td><td></td><td></td></tr><tr><td>b689d344502419f656d482bd186a5ee6b0140891</td><td>2009, Vol. 9, No. 2, 260 –264
+<br/>© 2009 American Psychological Association
+<br/>1528-3542/09/$12.00 DOI: 10.1037/a0014681
+<br/>CORRECTED JULY 1, 2009; SEE LAST PAGE
+<br/>BRIEF REPORTS
+<br/>Christopher P. Said
+<br/><b>Princeton University</b><br/><b>University of Amsterdam, University of Trento, Italy</b><br/><b>Princeton University</b><br/>People make trait inferences based on facial appearance despite little evidence that these inferences
+<br/>accurately reflect personality. The authors tested the hypothesis that these inferences are driven in part
+<br/>neutral faces on a set of trait dimensions. The authors then submitted the face images to a Bayesian
+<br/>expression. In general, neutral faces that are perceived to have positive valence resemble happiness, faces
+<br/>that are perceived to have negative valence resemble disgust and fear, and faces that are perceived to be
+<br/>threatening resemble anger. These results support the idea that trait inferences are in part the result of an
+<br/>then be misattributed as traits.
+<br/>People evaluate neutral faces on multiple trait dimensions and
+<br/>these evaluations have social consequences (Hassin & Trope,
+<br/>2000). For instance, political candidates whose faces are perceived
+<br/>as more competent are more likely to win elections (Ballew &
+<br/>Todorov, 2007; Todorov, Mandisodza, Goren, & Hall, 2005), and
+<br/>cadets whose faces are perceived as more dominant are more likely
+<br/>to be promoted to higher military ranks (Mazur, Mazur, & Keating,
+<br/>1984).
+<br/>Although inferences about traits based on facial appearance are
+<br/>made reliably across observers, there is little evidence that these
+<br/>inferences accurately reflect the personality of the observed face.
+<br/>Most correlations between perceived traits and actual traits are
+<br/>weak though positive (Bond, Berry, & Omar, 1994), some are
+<br/>inconsistent for men and women (Zebrowitz, Voinescu, & Collins,
+<br/>1996), and some are negative (Zebrowitz, Andreoletti, Collins,
+<br/>ogy and the Center for the Study of Brain, Mind and Behavior at
+<br/><b>versity of Amsterdam, Amsterdam and University of Trento</b><br/>We thank Valerie Loehr for her assistance with the acquisition of trait
+<br/>ratings, and Nick Oosterhof for helpful discussions. This research was
+<br/>supported by National Science Foundation Grant BCS-0446846.
+<br/>Correspondence should be addressed to Christopher P. Said, Department
+<br/><b>of Psychology, Princeton University, Princeton, NJ 08540. E-mail</b><br/>260
+<br/>Lee, & Blumenthal, 1998). It is therefore puzzling that people
+<br/>make reliable and rapid trait inferences from faces (Willis &
+<br/>Todorov, 2006) when only little accurate information, at best, is
+<br/>provided about personality. One intriguing explanation is that
+<br/>neutral faces may contain structural properties that cause them to
+<br/>resemble faces with more accurate and ecologically relevant in-
+<br/>son, 1996; Montepare & Dobish, 2003).
+<br/>Under this hypothesis, the adaptive ability to recognize emo-
+<br/>tions overgeneralizes to neutral faces that merely bear a subtle
+<br/>faces vary on trait dimensions such as trustworthiness (Engell,
+<br/>Haxby, & Todorov, 2007). One possibility is that the source of
+<br/>consensus in judging faces on social dimensions is the similarity of
+<br/>the face to expressions corresponding to the dimension of trait
+<br/>judgment (e.g., aggressiveness and anger). When given the task of
+<br/>could base their judgments on this similarity. Evidence for this
+<br/>hypothesis comes from research showing that the more a neutral
+<br/>face is rated as happy by one group of participants the higher it is
+<br/>rated on dominance and affiliation by another group of partici-
+<br/>pants, and the more a face is rated as angry the higher it is rated on
+<br/>dominance and the lower on affiliation (Montepare & Dobish,
+<br/>2003). One interpretation of these findings is that people misat-
+</td><td>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('2913698', 'Alexander Todorov', 'alexander todorov')<br/>('2913698', 'Alexander Todorov', 'alexander todorov')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td>csaid@princeton.edu
+</td></tr><tr><td>b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3</td><td>Learning Spatio-Temporal Representation with Pseudo-3D Residual Networks ∗
+<br/><b>University of Science and Technology of China, Hefei, China</b><br/>‡ Microsoft Research, Beijing, China
+</td><td>('3430743', 'Zhaofan Qiu', 'zhaofan qiu')<br/>('2053452', 'Ting Yao', 'ting yao')<br/>('1724211', 'Tao Mei', 'tao mei')</td><td>zhaofanqiu@gmail.com, {tiyao, tmei}@microsoft.com
+</td></tr><tr><td>b6d0e461535116a675a0354e7da65b2c1d2958d4</td><td>Deep Directional Statistics:
+<br/>Pose Estimation with
+<br/>Uncertainty Quantification
+<br/><b>Max Planck Institute for Intelligent Systems, T ubingen, Germany</b><br/>2 Amazon, T¨ubingen, Germany
+<br/>3 Microsoft Research, Cambridge, UK
+</td><td>('15968671', 'Sergey Prokudin', 'sergey prokudin')<br/>('2388416', 'Sebastian Nowozin', 'sebastian nowozin')</td><td>sergey.prokudin@tuebingen.mpg.de
+</td></tr><tr><td>b656abc4d1e9c8dc699906b70d6fcd609fae8182</td><td></td><td></td><td></td></tr><tr><td>b6a01cd4572b5f2f3a82732ef07d7296ab0161d3</td><td>Kernel-Based Supervised Discrete Hashing for
+<br/>Image Retrieval
+<br/><b>University of Florida, Gainesville, FL, 32611, USA</b></td><td>('2766473', 'Xiaoshuang Shi', 'xiaoshuang shi')<br/>('2082604', 'Fuyong Xing', 'fuyong xing')<br/>('3457945', 'Jinzheng Cai', 'jinzheng cai')<br/>('2476328', 'Zizhao Zhang', 'zizhao zhang')<br/>('1877955', 'Yuanpu Xie', 'yuanpu xie')<br/>('1705066', 'Lin Yang', 'lin yang')</td><td>xsshi2015@ufl.edu
+</td></tr><tr><td>a9791544baa14520379d47afd02e2e7353df87e5</td><td>Technical Note
+<br/>The Need for Careful Data Collection for Pattern Recognition in
+<br/>Digital Pathology
+<br/><b>Montefiore Institute, University of Li ge, 4000 Li ge, Belgium</b><br/>Received: 08 December 2016
+<br/>Accepted: 15 March 2017
+<br/> Published: 10 April 2017
+</td><td>('1689882', 'Raphaël Marée', 'raphaël marée')</td><td></td></tr><tr><td>a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd</td><td>(IJARAI) International Journal of Advanced Research in Artificial Intelligence,
+<br/>Vol. 5, No.6, 2016
+<br/>A Model for Facial Emotion Inference Based on
+<br/>Planar Dynamic Emotional Surfaces
+<br/>Ruivo, J. P. P.
+<br/>Escola Polit´ecnica
+<br/>Negreiros, T.
+<br/>Escola Polit´ecnica
+<br/>Barretto, M. R. P.
+<br/>Escola Polit´ecnica
+<br/>Tinen, B.
+<br/>Escola Polit´ecnica
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+</td><td></td><td></td></tr><tr><td>a955033ca6716bf9957b362b77092592461664b4</td><td> ISSN(Online): 2320-9801
+<br/> ISSN (Print): 2320-9798
+<br/>International Journal of Innovative Research in Computer
+<br/>and Communication Engineering
+<br/>(An ISO 3297: 2007 Certified Organization)
+<br/>Video Based Face Recognition Using Artificial
+<br/>Vol. 3, Issue 6, June 2015
+<br/>Neural Network
+<br/><b>Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India</b><br/><b>Caarmel Engineering College, MG University, Kerala, India</b></td><td></td><td></td></tr><tr><td>a956ff50ca958a3619b476d16525c6c3d17ca264</td><td>A Novel Bidirectional Neural Network for Face Recognition
+<br/>JalilMazloum, Ali Jalali and Javad Amiryan
+<br/>Electrical and Computer Engineering Department
+<br/><b>ShahidBeheshti University</b><br/>Tehran, Iran
+</td><td></td><td>J_Mazloum@sbu.ac.ir, A_Jalali@sbu.ac.ir, Amiryan.j@robocyrus.ir
+</td></tr><tr><td>a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f</td><td></td><td></td><td></td></tr><tr><td>a98316980b126f90514f33214dde51813693fe0d</td><td>Collaborations on YouTube: From Unsupervised Detection to the
+<br/>Impact on Video and Channel Popularity
+<br/>Multimedia Communications Lab (KOM), Technische Universität Darmstadt, Germany
+</td><td>('49495293', 'Christian Koch', 'christian koch')<br/>('46203604', 'Moritz Lode', 'moritz lode')<br/>('2214486', 'Denny Stohr', 'denny stohr')<br/>('2869441', 'Amr Rizk', 'amr rizk')<br/>('1725298', 'Ralf Steinmetz', 'ralf steinmetz')</td><td>E-Mail: {Christian.Koch | Denny.Stohr | Amr.Rizk | Ralf.Steinmetz}@kom.tu-darmstadt.de
+</td></tr><tr><td>a93781e6db8c03668f277676d901905ef44ae49f</td><td>Recent Datasets on Object Manipulation: A Survey
+</td><td>('3112203', 'Yongqiang Huang', 'yongqiang huang')<br/>('39545911', 'Matteo Bianchi', 'matteo bianchi')<br/>('2646612', 'Minas Liarokapis', 'minas liarokapis')<br/>('1681376', 'Yu Sun', 'yu sun')</td><td></td></tr><tr><td>a9fc23d612e848250d5b675e064dba98f05ad0d9</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 9, No. 2, 2018
+<br/>Face Age Estimation Approach based on Deep
+<br/>Learning and Principle Component Analysis
+<br/>Faculty of Computers and
+<br/>Informatics,
+<br/><b>Benha University, Egypt</b><br/> Faculty of Computers and
+<br/>Information,
+<br/><b>Minia University, Egypt</b><br/>Faculty of Computers and
+<br/>Informatics,
+<br/><b>Benha University, Egypt</b></td><td>('3488856', 'Essam H. Houssein', 'essam h. houssein')<br/>('33680569', 'Hala H. Zayed', 'hala h. zayed')</td><td></td></tr><tr><td>a9adb6dcccab2d45828e11a6f152530ba8066de6</td><td>Aydınlanma Alt-uzaylarına dayalı Gürbüz Yüz Tanıma
+<br/>Illumination Subspaces based Robust Face Recognition
+<br/>Interactive Systems Labs, Universität Karlsruhe (TH)
+<br/>76131 Karlsruhe, Almanya
+<br/>web: http://isl.ira.uka.de/face_recognition
+<br/>Özetçe
+<br/>yönlerine
+<br/>aydınlanma
+<br/>kaynaklanan
+<br/>sonra, yüz uzayı
+<br/>Bu çalışmada aydınlanma alt-uzaylarına dayalı bir yüz tanıma
+<br/>sistemi sunulmuştur. Bu sistemde,
+<br/>ilk olarak, baskın
+<br/>aydınlanma yönleri, bir topaklandırma algoritması kullanılarak
+<br/>öğrenilmiştir. Topaklandırma algoritması sonucu önden, sağ
+<br/>ve sol yanlardan olmak üzere üç baskın aydınlanma yönü
+<br/>gözlemlenmiştir. Baskın
+<br/>karar
+<br/>-yüzün görünümündeki
+<br/>kılındıktan
+<br/>aydınlanmadan
+<br/>kişi
+<br/>kimliklerinden kaynaklanan değişimlerden ayırmak için- bu üç
+<br/>aydınlanma uzayına bölünmüştür. Daha sonra, ek aydınlanma
+<br/>yönü bilgisinden faydalanmak için aydınlanma alt-uzaylarına
+<br/>dayalı yüz
+<br/>tanıma algoritması kullanılmıştır. Önerilen
+<br/>yaklaşım, CMU PIE veritabanında, “illumination” ve
+<br/>“lighting” kümelerinde yer alan yüz
+<br/>imgeleri üzerinde
+<br/>sınanmıştır. Elde edilen deneysel sonuçlar, aydınlanma
+<br/>yönünden yararlanmanın ve aydınlanma alt-uzaylarına dayalı
+<br/>yüz tanıma algoritmasının yüz tanıma başarımını önemli
+<br/>ölçüde arttırdığını göstermiştir.
+<br/>değişimleri,
+<br/>farklı
+</td><td>('1770336', 'D. Kern', 'd. kern')<br/>('1742325', 'R. Stiefelhagen', 'r. stiefelhagen')</td><td>ekenel@ira.uka.de
+</td></tr><tr><td>a967426ec9b761a989997d6a213d890fc34c5fe3</td><td>Relative Ranking of Facial Attractiveness
+<br/>Department of Computer Science and Engineering
+<br/><b>University of California, San Diego</b></td><td>('3079766', 'Hani Altwaijry', 'hani altwaijry')</td><td>{haltwaij,sjb}@cs.ucsd.edu
+</td></tr><tr><td>a95dc0c4a9d882a903ce8c70e80399f38d2dcc89</td><td> TR-IIS-14-003
+<br/>Review and Implementation of
+<br/>High-Dimensional Local Binary
+<br/>Patterns and Its Application to
+<br/>Face Recognition
+<br/>July. 24, 2014 || Technical Report No. TR-IIS-14-003
+<br/>http://www.iis.sinica.edu.tw/page/library/TechReport/tr2014/tr14.html
+</td><td>('33970300', 'Bor-Chun Chen', 'bor-chun chen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')</td><td></td></tr><tr><td>a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6</td><td>Learning to Succeed while Teaching to Fail:
+<br/>Privacy in Closed Machine Learning Systems
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('4838771', 'Miguel R. D. Rodrigues', 'miguel r. d. rodrigues')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td></td></tr><tr><td>a949b8700ca6ba96ee40f75dfee1410c5bbdb3db</td><td>Instance-weighted Transfer Learning of Active Appearance Models
+<br/><b>Computer Vision Group, Friedrich Schiller University of Jena, Germany</b><br/>Ernst-Abbe-Platz 2-4, 07743 Jena, Germany
+</td><td>('1708249', 'Daniel Haase', 'daniel haase')<br/>('1679449', 'Erik Rodner', 'erik rodner')<br/>('1728382', 'Joachim Denzler', 'joachim denzler')</td><td>{daniel.haase,erik.rodner,joachim.denzler}@uni-jena.de
+</td></tr><tr><td>a92b5234b8b73e06709dd48ec5f0ec357c1aabed</td><td></td><td></td><td></td></tr><tr><td>a9be20954e9177d8b2bc39747acdea4f5496f394</td><td>Event-specific Image Importance
+<br/><b>University of California, San Diego</b><br/>2Adobe Research
+</td><td>('35259685', 'Yufei Wang', 'yufei wang')</td><td>{yuw176, gary}@ucsd.edu
+<br/>{zlin, xshen, rmech, gmiller}@adobe.com
+</td></tr><tr><td>d5afd7b76f1391321a1340a19ba63eec9e0f9833</td><td>Journal of Information Hiding and Multimedia Signal Processing
+<br/>Ubiquitous International
+<br/>c⃝2010 ISSN 2073-4212
+<br/>Volume 1, Number 3, July 2010
+<br/>Statistical Analysis of Human Facial Expressions
+<br/>Department of Informatics
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, 54124 Thessaloniki, Greece
+<br/>Department of Informatics
+<br/><b>Aristotle University of Thessaloniki</b><br/>Box 451, 54124 Thessaloniki, Greece
+<br/><b>Informatics and Telematics Institute</b><br/>CERTH, Greece
+<br/>Received March 2010; revised June 2010
+</td><td>('2764130', 'Stelios Krinidis', 'stelios krinidis')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>stelios.krinidis@mycosmos.gr
+<br/>pitas@aiia.csd.auth.gr
+</td></tr><tr><td>d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12</td><td>Manifold Ranking-Based Locality Preserving Projections
+<br/><b>School of Computer Science and Engineering, South China University of Technology</b><br/>Guangzhou 510006, Guangdong, China
+</td><td>('2132230', 'Jia Wei', 'jia wei')<br/>('3231018', 'Zewei Chen', 'zewei chen')<br/>('1837988', 'Pingyang Niu', 'pingyang niu')<br/>('2524825', 'Yishun Chen', 'yishun chen')<br/>('7307608', 'Wenhui Chen', 'wenhui chen')</td><td>csjwei@scut.edu.cn
+</td></tr><tr><td>d5d7e89e6210fcbaa52dc277c1e307632cd91dab</td><td>DOTA: A Large-scale Dataset for Object Detection in Aerial Images∗
+<br/><b>State Key Lab. LIESMARS, Wuhan University, China</b><br/>2EIS, Huazhong Univ. Sci. and Tech., China
+<br/><b>Computer Science Depart., Cornell University, USA</b><br/><b>Computer Science Depart., Rochester University, USA</b><br/>5German Aerospace Center (DLR), Germany
+<br/><b>DAIS, University of Venice, Italy</b><br/>January 30, 2018
+</td><td>('39943835', 'Gui-Song Xia', 'gui-song xia')<br/>('1686737', 'Xiang Bai', 'xiang bai')<br/>('1749386', 'Jian Ding', 'jian ding')<br/>('48148046', 'Zhen Zhu', 'zhen zhu')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1777167', 'Mihai Datcu', 'mihai datcu')<br/>('8111020', 'Marcello Pelillo', 'marcello pelillo')<br/>('1733213', 'Liangpei Zhang', 'liangpei zhang')</td><td>{guisong.xia, jding, zlp62}@whu.edu.cn
+<br/>{xbai, zzhu}@hust.edu.cn
+<br/>sjb344@cornell.edu
+<br/>jiebo.luo@gmail.com
+<br/>mihai.datcu@dlr.de
+<br/>pelillo@dsi.unive.it
+</td></tr><tr><td>d50c6d22449cc9170ab868b42f8c72f8d31f9b6c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1668
+</td><td></td><td></td></tr><tr><td>d522c162bd03e935b1417f2e564d1357e98826d2</td><td>He et al. EURASIP Journal on Advances in Signal Processing 2013, 2013:19
+<br/>http://asp.eurasipjournals.com/content/2013/1/19
+<br/>RESEARCH
+<br/>Open Access
+<br/>Weakly supervised object extraction with
+<br/>iterative contour prior for remote sensing
+<br/>images
+</td><td>('2456383', 'Chu He', 'chu he')<br/>('40382947', 'Yu Zhang', 'yu zhang')<br/>('1813780', 'Bo Shi', 'bo shi')<br/>('1727252', 'Xin Su', 'xin su')<br/>('32514309', 'Xin Xu', 'xin xu')<br/>('2048631', 'Mingsheng Liao', 'mingsheng liao')</td><td></td></tr><tr><td>d59f18fcb07648381aa5232842eabba1db52383e</td><td>International Conference on Systemics, Cybernetics and Informatics, February 12–15, 2004
+<br/>ROBUST FACIAL EXPRESSION RECOGNITION USING SPATIALLY
+<br/>LOCALIZED GEOMETRIC MODEL
+<br/>Department of Electrical Engineering
+<br/>Dept. of Computer Sc. and Engg.
+<br/>IIT Kanpur
+<br/> Kanpur 208016, India
+<br/>Kanpur 208016, India
+<br/> IIT Kanpur
+<br/>Dept. of Computer Sc. and Engg.
+<br/> IIT Kanpur
+<br/>Kanpur 208016, India
+<br/>While approaches based on 3D deformable facial model have
+<br/>achieved expression recognition rates of as high as 98% [2], they
+<br/>are computationally inefficient and require considerable apriori
+<br/>training based on 3D information, which is often unavailable.
+<br/>Recognition from 2D images remains a difficult yet important
+<br/>problem for areas such as
+<br/>image database querying and
+<br/>classification. The accuracy rates achieved for 2D images are
+<br/>around 90% [3,4,5,11]. In a recent review of expression
+<br/>recognition, Fasel [1] considers the problem along several
+<br/>dimensions: whether features such as lips or eyebrows are first
+<br/>identified in the face (local [4] vs holistic [11]), or whether the
+<br/>image model used is 2D or 3D. Methods proposed for expression
+<br/>recognition from 2D images include the Gabor-Wavelet [5] or
+<br/>Holistic Optical flow [11] approach.
+<br/>This paper describes a more robust system for facial expression
+<br/>recognition from image sequences using 2D appearance-based
+<br/>local approach for the extraction of intransient facial features, i.e.
+<br/>features such as eyebrows, lips, or mouth, which are always
+<br/>present in the image, but may be deformed [1] (in contrast,
+<br/>transient features are wrinkles or bulges that disappear at other
+<br/>times). The main advantages of such an approach is low
+<br/>computational requirements, ability to work with both colored and
+<br/>grayscale images and robustness in handling partial occlusions
+<br/>[3].
+<br/>Edge projection analysis which is used here for feature extraction
+<br/>(eyebrows and lips) is well known [6]. Unlike [6] which describes
+<br/>a template based matching as an essential starting point, we use
+<br/>contours analysis. Our system computes a feature vector based on
+<br/>geometrical model of the face and then classifies it into four
+<br/>expression classes using a feed-forward basis function net. The
+<br/>system detects open and closed state of the mouth as well. The
+<br/>algorithm presented here works on both color and grayscale image
+<br/>sequences. An important aspect of our work is the use of color
+<br/>information for robust and more accurate segmentation of lip
+<br/>region in case of color images. The novel lip-enhancement
+<br/>transform is based on Gaussian modeling of skin and lip color.
+<br/>To place the work in a larger context of face analysis and
+<br/>recognition, the overall task requires that the part of the image
+<br/>involving the face be detected and segmented. We assume that a
+<br/>near-frontal view of the face is available. Tests on a grayscale
+<br/>and two color face image databases ([8] and [9,10]) demonstrate a
+<br/>superior recognition rate for four facial expressions (smile,
+<br/>surprise, disgust and sad against neutral).
+<br/>image sequences
+</td><td>('1681995', 'Ashutosh Saxena', 'ashutosh saxena')<br/>('40101676', 'Ankit Anand', 'ankit anand')<br/>('1803835', 'Amitabha Mukerjee', 'amitabha mukerjee')</td><td>ashutosh.saxena@ieee.org
+<br/>ankanand@cse.iitk.ac.in
+<br/>amit@cse.iitk.ac.in
+</td></tr><tr><td>d5fa9d98c8da54a57abf353767a927d662b7f026</td><td> VOL. 1, NO. 2, Oct 2010 E-ISSN 2218-6301
+<br/>Journal of Emerging Trends in Computing and Information Sciences
+<br/>©2009-2010 CIS Journal. All rights reserved.
+<br/>http://www.cisjournal.org
+<br/>Age Estimation based on Neural Networks using Face Features
+<br/>
+<br/>Corresponding Author: Faculty of Information Technology
+<br/><b>Islamic University of Gaza - Palestine</b><br/>Email
+<br/>:
+<br/>edu.
+<br/> ps.
+</td><td>('1714298', 'Nabil Hewahi', 'nabil hewahi')</td><td>nhewahi@iugaza
+</td></tr><tr><td>d588dd4f305cdea37add2e9bb3d769df98efe880</td><td>
+<br/>Audio-Visual Authentication System over the
+<br/>Internet Protocol
+<br/>abandoned.
+<br/>in
+<br/>illumination based
+<br/>is developed with the objective to
+</td><td>('1968167', 'Yee Wan Wong', 'yee wan wong')</td><td></td></tr><tr><td>d5444f9475253bbcfef85c351ea9dab56793b9ea</td><td>IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS
+<br/>BoxCars: Improving Fine-Grained Recognition
+<br/>of Vehicles using 3D Bounding Boxes
+<br/>in Traffic Surveillance
+<br/>in contrast
+</td><td>('34891870', 'Jakub Sochor', 'jakub sochor')<br/>('1785162', 'Adam Herout', 'adam herout')</td><td></td></tr><tr><td>d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e</td><td>World Journal of Computer Application and Technology 2(7): 133-138, 2014
+<br/>DOI: 10.13189/wjcat.2014.020701
+<br/>http://www.hrpub.org
+<br/>Optimized Structure for Facial Action Unit Relationship
+<br/>Using Bayesian Network
+<br/>Intelligent Biometric Group, School of Electrical and Electronic Engineering, Engineering Campus, Universiti Sains Malaysia, Pulau
+<br/>Pinang, Malaysia
+<br/>Copyright © 2014 Horizon Research Publishing All rights reserved.
+</td><td>('9115930', 'Yee Koon Loh', 'yee koon loh')<br/>('3120408', 'Shahrel A. Suandi', 'shahrel a. suandi')</td><td>*Corresponding Author: lyk10_eee045@student.usm.my
+</td></tr><tr><td>d5b0e73b584be507198b6665bcddeba92b62e1e5</td><td>CHEN ET AL.: MULTI-REGION ENSEMBLE CNNS FOR AGE ESTIMATION
+<br/>Multi-Region Ensemble Convolutional Neural
+<br/>Networks for High-Accuracy Age Estimation
+<br/>1 Faculty of Information Technology
+<br/><b>Macau University of Science and</b><br/>Technology, Macau SAR
+<br/>2 National Laboratory of Pattern
+<br/><b>Recognition, Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/><b>University of Chinese Academy of</b><br/>Sciences
+<br/>4 Computing, School of Science and
+<br/><b>Engineering, University of Dundee</b></td><td>('38141486', 'Yiliang Chen', 'yiliang chen')<br/>('9645431', 'Zichang Tan', 'zichang tan')<br/>('1916793', 'Alex Po Leung', 'alex po leung')<br/>('1756538', 'Jun Wan', 'jun wan')<br/>('40539612', 'Jianguo Zhang', 'jianguo zhang')</td><td>elichan5168@gmail.com
+<br/>tanzichang2016@ia.ac.cn
+<br/>pleung@must.edu.mo
+<br/>jun.wan@ia.ac.cn
+<br/>jnzhang@dundee.ac.uk
+</td></tr><tr><td>d56fe69cbfd08525f20679ffc50707b738b88031</td><td>Training of multiple classifier systems utilizing
+<br/>partially labelled sequences
+<br/><b></b><br/>89069 Ulm - Germany
+</td><td>('3037635', 'Martin Schels', 'martin schels')<br/>('2307794', 'Patrick Schillinger', 'patrick schillinger')<br/>('1685857', 'Friedhelm Schwenker', 'friedhelm schwenker')</td><td></td></tr><tr><td>d5de42d37ee84c86b8f9a054f90ddb4566990ec0</td><td>Asynchronous Temporal Fields for Action Recognition
+<br/><b>Carnegie Mellon University 2University of Washington 3Allen Institute for Arti cial Intelligence</b><br/>github.com/gsig/temporal-fields/
+</td><td>('34280810', 'Gunnar A. Sigurdsson', 'gunnar a. sigurdsson')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td></td></tr><tr><td>d50751da2997e7ebc89244c88a4d0d18405e8507</td><td></td><td></td><td></td></tr><tr><td>d511e903a882658c9f6f930d6dd183007f508eda</td><td></td><td></td><td></td></tr><tr><td>d50a40f2d24363809a9ac57cf7fbb630644af0e5</td><td>END-TO-END TRAINED CNN ENCODER-DECODER NETWORKS FOR IMAGE
+<br/>STEGANOGRAPHY
+<br/><b>National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan</b><br/>Reveal.ai (Recognition, Vision & Learning) Lab
+</td><td>('9205693', 'Atique ur Rehman', 'atique ur rehman')<br/>('2695106', 'Sibt ul Hussain', 'sibt ul hussain')</td><td></td></tr><tr><td>d5b5c63c5611d7b911bc1f7e161a0863a34d44ea</td><td>Extracting Scene-dependent Discriminant
+<br/>Features for Enhancing Face Recognition
+<br/>under Severe Conditions
+<br/><b>Information and Media Processing Research Laboratories, NEC Corporation</b><br/>1753, Shimonumabe, Nakahara-Ku, Kawasaki 211-8666 Japan
+</td><td>('1709089', 'Rui Ishiyama', 'rui ishiyama')<br/>('35577655', 'Nobuyuki Yasukawa', 'nobuyuki yasukawa')</td><td></td></tr><tr><td>d59404354f84ad98fa809fd1295608bf3d658bdc</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Face Synthesis from Visual Attributes via Sketch using
+<br/>Conditional VAEs and GANs
+<br/>Received: date / Accepted: date
+</td><td>('29673017', 'Xing Di', 'xing di')</td><td></td></tr><tr><td>d5e1173dcb2a51b483f86694889b015d55094634</td><td></td><td></td><td></td></tr><tr><td>d28d32af7ef9889ef9cb877345a90ea85e70f7f1</td><td>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>Local-Global Landmark Confidences for Face Recognition
+<br/><b>Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA</b><br/><b>Language Technologies Institute, Carnegie Mellon University, PA, USA</b></td><td>('2792633', 'KangGeon Kim', 'kanggeon kim')<br/>('1752756', 'Feng-Ju Chang', 'feng-ju chang')<br/>('1689391', 'Jongmoo Choi', 'jongmoo choi')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')<br/>('1694832', 'Ramakant Nevatia', 'ramakant nevatia')</td><td></td></tr><tr><td>d28d697b578867500632b35b1b19d3d76698f4a9</td><td>Appears in the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, CVPR’99, Fort Collins, Colorado, USA, June 23-25, 1999.
+<br/>Face Recognition Using Shape and Texture
+<br/>Department of Computer Science
+<br/><b>George Mason University</b><br/>Fairfax, VA 22030-4444
+<br/>
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td>@cs.gmu.edu
+</td></tr><tr><td>d231a81b38fde73bdbf13cfec57d6652f8546c3c</td><td>SUPERRESOLUTION TECHNIQUES
+<br/> FOR FACE RECOGNITION FROM VIDEO
+<br/>by
+<br/><b>B.S., E.E., Bo azi i University</b><br/>Submitted to the Graduate School of Engineering
+<br/> and Natural Sciences in partially fulfillment of
+<br/>the requirement for the degree of
+<br/>Master of Science
+<br/>Graduate Program in Electronics Engineering and Computer Science
+<br/><b>Sabanc University</b><br/>Spring 2005
+</td><td>('2258053', 'Osman Gökhan Sezer', 'osman gökhan sezer')</td><td></td></tr><tr><td>d22785eae6b7503cb16402514fd5bd9571511654</td><td>Evaluating Facial Expressions with Different
+<br/>Occlusion around Image Sequence
+<br/>
+<br/>Department of Computer Science
+<br/><b>Sanghvi Institute of Management and Science</b><br/>Indore (MP), India
+<br/>I.
+<br/>local
+<br/>INTRODUCTION
+<br/>
+</td><td>('2890210', 'Ramchand Hablani', 'ramchand hablani')</td><td></td></tr><tr><td>d2eb1079552fb736e3ba5e494543e67620832c52</td><td>ANNUNZIATA, SAGONAS, CALÌ: DENSELY FUSED SPATIAL TRANSFORMER NETWORKS1
+<br/>DeSTNet: Densely Fused Spatial
+<br/>Transformer Networks1
+<br/>Onfido Research
+<br/>3 Finsbury Avenue
+<br/>London, UK
+</td><td>('31336510', 'Roberto Annunziata', 'roberto annunziata')<br/>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('1997807', 'Jacques Calì', 'jacques calì')</td><td>roberto.annunziata@onfido.com
+<br/>christos.sagonas@onfido.com
+<br/>jacques.cali@onfido.com
+</td></tr><tr><td>d24dafe10ec43ac8fb98715b0e0bd8e479985260</td><td>J Nonverbal Behav (2018) 42:81–99
+<br/>https://doi.org/10.1007/s10919-017-0266-z
+<br/>O R I G I N A L P A P E R
+<br/>Effects of Social Anxiety on Emotional Mimicry
+<br/>and Contagion: Feeling Negative, but Smiling Politely
+<br/>• Gerben A. van Kleef2
+<br/>• Agneta H. Fischer2
+<br/>Published online: 25 September 2017
+<br/>Ó The Author(s) 2017. This article is an open access publication
+</td><td>('4041392', 'Corine Dijk', 'corine dijk')<br/>('35427440', 'Charlotte van Eeuwijk', 'charlotte van eeuwijk')<br/>('1878851', 'Nexhmedin Morina', 'nexhmedin morina')</td><td></td></tr><tr><td>d29eec5e047560627c16803029d2eb8a4e61da75</td><td>Feature Transfer Learning for Deep Face
+<br/>Recognition with Long-Tail Data
+<br/><b>Michigan State University, NEC Laboratories America</b></td><td>('39708770', 'Xi Yin', 'xi yin')<br/>('15644381', 'Xiang Yu', 'xiang yu')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('40022363', 'Xiaoming Liu', 'xiaoming liu')<br/>('2099305', 'Manmohan Chandraker', 'manmohan chandraker')</td><td>{yinxi1,liuxm}@cse.msu.edu,{xiangyu,ksohn,manu}@nec-labs.com
+</td></tr><tr><td>d280bcbb387b1d548173917ae82cb6944e3ceca6</td><td>FACIAL GRID TRANSFORMATION: A NOVEL FACE REGISTRATION APPROACH FOR
+<br/>IMPROVING FACIAL ACTION UNIT RECOGNITION
+<br/><b>University of South Carolina, Columbia, USA</b></td><td>('3225915', 'Shizhong Han', 'shizhong han')<br/>('3091647', 'Zibo Meng', 'zibo meng')<br/>('40205868', 'Ping Liu', 'ping liu')<br/>('1686235', 'Yan Tong', 'yan tong')</td><td></td></tr><tr><td>d278e020be85a1ccd90aa366b70c43884dd3f798</td><td>Learning From Less Data: Diversified Subset Selection and
+<br/>Active Learning in Image Classification Tasks
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>Rishabh Iyer
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>Narsimha Raju
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>May 30, 2018
+</td><td>('3333118', 'Vishal Kaushal', 'vishal kaushal')<br/>('40224337', 'Khoshrav Doctor', 'khoshrav doctor')<br/>('33911191', 'Suyash Shetty', 'suyash shetty')<br/>('10710354', 'Anurag Sahoo', 'anurag sahoo')<br/>('49613683', 'Pankaj Singh', 'pankaj singh')<br/>('1697088', 'Ganesh Ramakrishnan', 'ganesh ramakrishnan')</td><td>vkaushal@cse.iitb.ac.in
+<br/>khoshrav@gmail.com
+<br/>suyashshetty29@gmail.com
+<br/>rishabh@aitoelabs.com
+<br/>anurag@aitoelabs.com
+<br/>uavnraju@cse.iitb.ac.in
+<br/>pr.pankajsingh@gmail.com
+<br/>ganesh@cse.iitb.ac.in
+</td></tr><tr><td>d26b443f87df76034ff0fa9c5de9779152753f0c</td><td>A GPU-Oriented Algorithm Design for
+<br/>Secant-Based Dimensionality Reduction
+<br/>Department of Mathematics
+<br/><b>Colorado State University</b><br/>Fort Collins, CO 80523-1874
+<br/>tool
+<br/>for extracting useful
+</td><td>('51042250', 'Henry Kvinge', 'henry kvinge')<br/>('51121534', 'Elin Farnell', 'elin farnell')<br/>('41211081', 'Michael Kirby', 'michael kirby')<br/>('30383278', 'Chris Peterson', 'chris peterson')</td><td></td></tr><tr><td>d2cd9a7f19600370bce3ea29aba97d949fe0ceb9</td><td>Separability Oriented Preprocessing for
+<br/>Illumination-Insensitive Face Recognition
+<br/>1 Key Lab of Intelligent Information Processing
+<br/>of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/>2 Department of Computer Science and Engineering,
+<br/><b>Michigan State University, East Lansing, MI 48824, U.S.A</b><br/>3 Omron Social Solutions Co., LTD., Kyoto, Japan
+<br/><b>Institute of Digital Media, Peking University, Beijing 100871, China</b><br/>some
+<br/>last decade,
+</td><td>('34393045', 'Hu Han', 'hu han')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1710195', 'Shihong Lao', 'shihong lao')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td>{hhan,sgshan,xlchen}@jdl.ac.cn, lao@ari.ncl.omron.co.jp, wgao@pku.edu.cn
+</td></tr><tr><td>d22b378fb4ef241d8d210202893518d08e0bb213</td><td>Random Faces Guided Sparse Many-to-One Encoder
+<br/>for Pose-Invariant Face Recognition
+<br/><b>Polytechnic Institute of NYU, NY, USA</b><br/><b>College of Computer and Information Science, Northeastern University, MA, USA</b><br/><b>Northeastern University, MA, USA</b></td><td>('3272356', 'Yizhe Zhang', 'yizhe zhang')</td><td>zhangyizhe1987@gmail.com, mingshao@ccs.neu.edu, wong@poly.edu, yunfu@ece.neu.edu
+</td></tr><tr><td>aac39ca161dfc52aade063901f02f56d01a1693c</td><td>The Analysis of Parameters t and k of LPP on
+<br/>Several Famous Face Databases
+<br/><b>College of Computer Science and Technology</b><br/><b>Jilin University, Changchun 130012, China</b></td><td>('7489436', 'Sujing Wang', 'sujing wang')<br/>('1758249', 'Na Zhang', 'na zhang')<br/>('3028807', 'Mingfang Sun', 'mingfang sun')<br/>('8239114', 'Chunguang Zhou', 'chunguang zhou')</td><td>{wangsj08, nazhang08}@mails.jlu.edu.cn; cgzhou@jlu.edu.cn
+</td></tr><tr><td>aadf4b077880ae5eee5dd298ab9e79a1b0114555</td><td>Dynamics-based Facial Emotion Recognition and Pain Detection
+<br/>Using Hankel Matrices for
+<br/><b>DICGIM - University of Palermo</b><br/>V.le delle Scienze, Ed. 6, 90128 Palermo (Italy)
+</td><td>('1711610', 'Liliana Lo Presti', 'liliana lo presti')<br/>('9127836', 'Marco La Cascia', 'marco la cascia')</td><td>liliana.lopresti@unipa.it
+</td></tr><tr><td>aa127e6b2dc0aaccfb85e93e8b557f83ebee816b</td><td>Advancing Human Pose and
+<br/>Gesture Recognition
+<br/>DPhil Thesis
+<br/>Supervisor: Professor Andrew Zisserman
+<br/>Tomas Pfister
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/><b>Wolfson College</b><br/>April 2015
+</td><td></td><td></td></tr><tr><td>aafb271684a52a0b23debb3a5793eb618940c5dd</td><td></td><td></td><td></td></tr><tr><td>aae742779e8b754da7973949992d258d6ca26216</td><td>Robust Facial Expression Classification Using Shape
+<br/>and Appearance Features
+<br/>Department of Electrical Engineering,
+<br/><b>Indian Institute of Technology Kharagpur, India</b></td><td>('2680543', 'Aurobinda Routray', 'aurobinda routray')</td><td></td></tr><tr><td>aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52</td><td>Improved Face Tracking Thanks to Local Features
+<br/>Correspondence
+<br/>Department of Information Engineering
+<br/><b>University of Brescia</b></td><td>('3134795', 'Alberto Piacenza', 'alberto piacenza')<br/>('1806359', 'Fabrizio Guerrini', 'fabrizio guerrini')<br/>('1741369', 'Riccardo Leonardi', 'riccardo leonardi')</td><td></td></tr><tr><td>aab3561acbd19f7397cbae39dd34b3be33220309</td><td>Quantization Mimic: Towards Very Tiny CNN
+<br/>for Object Detection
+<br/><b>Tsinghua University, Beijing, China</b><br/><b>The Chinese University of Hong Kong, Hong Kong, China</b><br/>3SenseTime, Beijing, China
+<br/><b>The University of Sydney, SenseTime Computer Vision Research Group, Sydney</b><br/>New South Wales, Australia
+</td><td>('49019561', 'Yi Wei', 'yi wei')<br/>('7418754', 'Xinyu Pan', 'xinyu pan')<br/>('46636770', 'Hongwei Qin', 'hongwei qin')<br/>('1721677', 'Junjie Yan', 'junjie yan')</td><td>wei-y15@mails.tsinghua.edu.cn,THUSEpxy@gmail.com
+<br/>qinhongwei@sensetime.com,wanli.ouyang@sydney.edu.au
+<br/>yanjunjie@sensetime.com
+</td></tr><tr><td>aa912375eaf50439bec23de615aa8a31a3395ad3</td><td>International Journal on Cryptography and Information Security(IJCIS),Vol.2, No.2, June 2012
+<br/>Implementation of a New Methodology to Reduce
+<br/>the Effects of Changes of Illumination in Face
+<br/>Recognition-based Authentication
+<br/><b>Howard University, Washington DC</b><br/><b>Howard University, Washington DC</b></td><td>('3437323', 'Andres Alarcon-Ramirez', 'andres alarcon-ramirez')<br/>('2522254', 'Mohamed F. Chouikha', 'mohamed f. chouikha')</td><td>alarconramirezandr@bison.howard.edu
+<br/>mchouikha@howard.edu
+</td></tr><tr><td>aa52910c8f95e91e9fc96a1aefd406ffa66d797d</td><td>FACE RECOGNITION SYSTEM BASED
+<br/>ON 2DFLD AND PCA
+<br/>E&TC Department
+<br/>Sinhgad Academy of Engineering
+<br/>Pune, India
+<br/>Mr. Hulle Rohit Rajiv
+<br/>ME E&TC [Digital System]
+<br/>Sinhgad Academy of Engineering
+<br/>Pune, India
+</td><td>('2985198', 'Sachin D. Ruikar', 'sachin d. ruikar')</td><td>ruikarsachin@gmail.com
+<br/>rohithulle@gmail.com
+</td></tr><tr><td>aaeb8b634bb96a372b972f63ec1dc4db62e7b62a</td><td>ISSN (e): 2250 – 3005 || Vol, 04 || Issue, 12 || December – 2014 ||
+<br/>International Journal of Computational Engineering Research (IJCER)
+<br/>Facial Expression Recognition System: A Digital Printing
+<br/>Application
+<br/><b>Jadavpur University, India</b><br/><b>Jadavpur University, India</b></td><td>('2226316', 'Somnath Banerjee', 'somnath banerjee')</td><td></td></tr><tr><td>aafb8dc8fda3b13a64ec3f1ca7911df01707c453</td><td>Excitation Backprop for RNNs
+<br/><b>Boston University 2Pattern Analysis and Computer Vision (PAVIS</b><br/>Istituto Italiano di Tecnologia 3Adobe Research 4Computer Science Department, Universit`a di Verona
+<br/>Figure 1: Our proposed framework spatiotemporally highlights/grounds the evidence that an RNN model used in producing a class label
+<br/>or caption for a given input video. In this example, by using our proposed back-propagation method, the evidence for the activity class
+<br/>CliffDiving is highlighted in a video that contains CliffDiving and HorseRiding. Our model employs a single backward pass to produce
+<br/>saliency maps that highlight the evidence that a given RNN used in generating its outputs.
+</td><td>('3298267', 'Sarah Adel Bargal', 'sarah adel bargal')<br/>('40063519', 'Andrea Zunino', 'andrea zunino')<br/>('40622560', 'Donghyun Kim', 'donghyun kim')<br/>('1701293', 'Jianming Zhang', 'jianming zhang')<br/>('1727204', 'Vittorio Murino', 'vittorio murino')<br/>('1749590', 'Stan Sclaroff', 'stan sclaroff')</td><td>{sbargal,donhk,sclaroff}@bu.edu, {andrea.zunino,vittorio.murino}@iit.it, jianmzha@adobe.com
+</td></tr><tr><td>aa0c30bd923774add6e2f27ac74acd197b9110f2</td><td>DYNAMIC PROBABILISTIC LINEAR DISCRIMINANT ANALYSIS FOR VIDEO
+<br/>CLASSIFICATION
+<br/><b>Deparment of Computing, Imperial College London, UK</b><br/><b>Deparment of Computing, Goldsmiths, University of London, UK</b><br/><b>Middlesex University London, 4International Hellenic University</b><br/><b>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</b></td><td>('35340264', 'Alessandro Fabris', 'alessandro fabris')<br/>('1752913', 'Mihalis A. Nicolaou', 'mihalis a. nicolaou')<br/>('1754270', 'Irene Kotsia', 'irene kotsia')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>aadfcaf601630bdc2af11c00eb34220da59b7559</td><td>Multi-view Hybrid Embedding:
+<br/>A Divide-and-Conquer Approach
+</td><td>('30443690', 'Jiamiao Xu', 'jiamiao xu')<br/>('2462771', 'Shujian Yu', 'shujian yu')<br/>('1744228', 'Xinge You', 'xinge you')<br/>('3381421', 'Mengjun Leng', 'mengjun leng')<br/>('15132338', 'Xiao-Yuan Jing', 'xiao-yuan jing')<br/>('1697202', 'C. L. Philip Chen', 'c. l. philip chen')</td><td></td></tr><tr><td>aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5</td><td>Biometrics in Ambient Intelligence
+</td><td>('1725688', 'Massimo Tistarelli', 'massimo tistarelli')</td><td></td></tr><tr><td>aac934f2eed758d4a27562dae4e9c5415ff4cdb7</td><td>TS-LSTM and Temporal-Inception:
+<br/>Exploiting Spatiotemporal Dynamics for Activity Recognition
+<br/><b>Georgia Institute of Technology</b><br/>2Georgia Tech Research Institution
+</td><td>('7437104', 'Chih-Yao Ma', 'chih-yao ma')<br/>('1960668', 'Min-Hung Chen', 'min-hung chen')<br/>('1746245', 'Zsolt Kira', 'zsolt kira')</td><td>{cyma, cmhungsteve, zkira, alregib}@gatech.edu
+</td></tr><tr><td>aa331fe378056b6d6031bb8fe6676e035ed60d6d</td><td></td><td></td><td></td></tr><tr><td>aae0e417bbfba701a1183d3d92cc7ad550ee59c3</td><td>844
+<br/>A Statistical Method for 2-D Facial Landmarking
+</td><td>('1764521', 'Albert Ali Salah', 'albert ali salah')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>aa577652ce4dad3ca3dde44f881972ae6e1acce7</td><td>Deep Attribute Networks
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+</td><td>('8270717', 'Junyoung Chung', 'junyoung chung')<br/>('2350325', 'Donghoon Lee', 'donghoon lee')<br/>('2397884', 'Youngjoo Seo', 'youngjoo seo')<br/>('5578091', 'Chang D. Yoo', 'chang d. yoo')</td><td>jych@kaist.ac.kr
+<br/>iamdh@kaist.ac.kr
+<br/>minerrba@kaist.ac.kr
+<br/>cdyoo@ee.kaist.ac.kr
+</td></tr><tr><td>aa3c9de34ef140ec812be85bb8844922c35eba47</td><td>Reducing Gender Bias Amplification using Corpus-level Constraints
+<br/>Men Also Like Shopping:
+<br/><b>University of Virginia</b><br/><b>University of Washington</b></td><td>('3456473', 'Tianlu Wang', 'tianlu wang')<br/>('2064210', 'Mark Yatskar', 'mark yatskar')<br/>('33524946', 'Jieyu Zhao', 'jieyu zhao')<br/>('2782886', 'Kai-Wei Chang', 'kai-wei chang')<br/>('2004053', 'Vicente Ordonez', 'vicente ordonez')</td><td>{jz4fu, tw8cb, vicente, kc2wc}@virginia.edu
+<br/>my89@cs.washington.edu
+</td></tr><tr><td>aa94f214bb3e14842e4056fdef834a51aecef39c</td><td>Reconhecimento de padrões faciais: Um estudo
+<br/>Universidade Federal
+<br/>Rural do Semi-Árido
+<br/>Departamento de Ciências Naturais
+<br/>Mossoró, RN - 59625-900
+<br/>Resumo—O reconhecimento facial tem sido utilizado em di-
+<br/>versas áreas para identificação e autenticação de usuários. Um
+<br/>dos principais mercados está relacionado a segurança, porém há
+<br/>uma grande variedade de aplicações relacionadas ao uso pessoal,
+<br/>conveniência, aumento de produtividade, etc. O rosto humano
+<br/>possui um conjunto de padrões complexos e mutáveis. Para
+<br/>reconhecer esses padrões, são necessárias técnicas avançadas de
+<br/>reconhecimento de padrões capazes, não apenas de reconhecer,
+<br/>mas de se adaptar às mudanças constantes das faces das pessoas.
+<br/>Este documento apresenta um método de reconhecimento facial
+<br/>proposto a partir da análise comparativa de trabalhos encontra-
+<br/>dos na literatura.
+<br/>biométrica é o uso da biometria para reconhecimento, identi-
+<br/>ficação ou verificação, de um ou mais traços biométricos de
+<br/>um indivíduo com o objetivo de autenticar sua identidade. Os
+<br/>traços biométricos são os atributos analisados pelas técnicas
+<br/>de reconhecimento biométrico.
+<br/>A tarefa de reconhecimento facial é composta por três
+<br/>processos distintos: Registro, verificação e identificação bio-
+<br/>métrica. Os processos se diferenciam pela forma de determinar
+<br/>a identidade de um indivíduo. Na Figura 1 são descritos os
+<br/>processos de registro, verificação e identificação biométrica.
+<br/>I. INTRODUÇÃO
+<br/>Biometria é a ciência que estabelece a identidade de um
+<br/>indivíduo baseada em seus atributos físicos, químicos ou
+<br/>comportamentais [1]. Possui inúmeras aplicações em diver-
+<br/>sas áreas, se destacando mais na área de segurança, como
+<br/>por exemplo sistemas de gerenciamento de identidade, cuja
+<br/>funcionalidade é autenticar a identidade de um indivíduo no
+<br/>contexto de uma aplicação.
+<br/>O reconhecimento facial é uma técnica biométrica que
+<br/>consiste em identificar padrões em características faciais como
+<br/>formato da boca, do rosto, distância dos olhos, entre outros.
+<br/>Um humano é capaz de reconhecer uma pessoa familiar
+<br/>mesmo com muitos obstáculos com distância, sombras ou
+<br/>apenas a visão parcial do rosto. Uma máquina, no entanto,
+<br/>precisa realizar inúmeros processos para detectar e reconhecer
+<br/>um conjunto de padrões específicos para rotular uma face
+<br/>como conhecida ou desconhecida. Para isso, exitem métodos
+<br/>capazes de detectar, extrair e classificar as características
+<br/>faciais, fornecendo um reconhecimento automático de pessoas.
+<br/>II. RECONHECIMENTO FACIAL
+<br/>A tecnologia biométrica oferece vantagens em relação a
+<br/>outros métodos tradicionais de identificação como senhas,
+<br/>documentos e tokens. Entre elas estão o fato de que os
+<br/>traços biométricos não podem ser perdidos ou esquecidos, são
+<br/>difíceis de serem copiados, compartilhados ou distribuídos. Os
+<br/>métodos requerem que a pessoa autenticada esteja presente
+<br/>na hora e lugar da autenticação, evitando que pessoas má
+<br/>intencionadas tenham acesso sem autorização.
+<br/>A autenticação é o ato de estabelecer ou confirmar alguém,
+<br/>ou alguma coisa, como autêntico, isto é, que as alegações
+<br/>feitas por ou sobre a coisa é verdadeira [2]. Autenticação
+<br/>(a)
+<br/>(b)
+<br/>(c)
+<br/>Figura 1: Registro biométrico (a), identificação biométrica (b)
+<br/>e verificação biométrica (c)
+<br/>A Figura 1a descreve o processo de registro de dados
+</td><td>('2545499', 'Marcos Evandro Cintra', 'marcos evandro cintra')</td><td>Email: alexdemise@gmail.com, mecintra@gmail.com
+</td></tr><tr><td>aac101dd321e6d2199d8c0b48c543b541c181b66</td><td>USING CONTEXT TO ENHANCE THE
+<br/>UNDERSTANDING OF FACE IMAGES
+<br/>A Dissertation Presented
+<br/>by
+<br/>VIDIT JAIN
+<br/>Submitted to the Graduate School of the
+<br/><b>University of Massachusetts Amherst in partial ful llment</b><br/>of the requirements for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>September 2010
+<br/>Department of Computer Science
+</td><td></td><td></td></tr><tr><td>af8fe1b602452cf7fc9ecea0fd4508ed4149834e</td><td></td><td></td><td></td></tr><tr><td>af6e351d58dba0962d6eb1baf4c9a776eb73533f</td><td>How to Train Your Deep Neural Network with
+<br/>Dictionary Learning
+<br/>*IIIT Delhi
+<br/>Okhla Phase 3
+<br/>Delhi, 110020, India
+<br/>+IIIT Delhi
+<br/>Okhla Phase 3
+<br/>#IIIT Delhi
+<br/>Okhla Phase 3
+<br/>Delhi, 110020, India
+<br/>Delhi, 110020, India
+</td><td>('30255052', 'Vanika Singhal', 'vanika singhal')<br/>('38608015', 'Shikha Singh', 'shikha singh')<br/>('2641605', 'Angshul Majumdar', 'angshul majumdar')</td><td>vanikas@iiitd.ac.in
+<br/>shikhas@iiitd.ac.in
+<br/>angshul@iiitd.ac.in
+</td></tr><tr><td>aff92784567095ee526a705e21be4f42226bbaab</td><td>Face Recognition in Uncontrolled
+<br/>Environments
+<br/>A dissertation submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>at
+<br/><b>University College London</b><br/>Department of Computer Science
+<br/><b>University College London</b><br/>May 26, 2015
+</td><td>('38098063', 'Yun Fu', 'yun fu')</td><td></td></tr><tr><td>aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a</td><td>Attributes in Multiple Facial Images
+<br/><b>West Virginia University, Morgantown</b><br/>WV 26506, USA
+</td><td>('1767347', 'Xudong Liu', 'xudong liu')<br/>('1822413', 'Guodong Guo', 'guodong guo')</td><td>xdliu@mix.wvu.edu, guodong.guo@mail.wvu.edu
+</td></tr><tr><td>af13c355a2a14bb74847aedeafe990db3fc9cbd4</td><td>Happy and Agreeable? Multi-Label Classification of
+<br/>Impressions in Social Video
+<br/><b>Idiap Research Institute</b><br/>Switzerland
+<br/>Instituto Potosino de
+<br/>Investigación Científica y
+<br/>Tecnológica
+<br/>Mexico
+<br/><b>Idiap Research Institute</b><br/>École Polytechnique Fédérale
+<br/>de Lausanne
+<br/>Switzerland
+</td><td>('2389354', 'Gilberto Chávez-Martínez', 'gilberto chávez-martínez')<br/>('1934619', 'Salvador Ruiz-Correa', 'salvador ruiz-correa')<br/>('1698682', 'Daniel Gatica-Perez', 'daniel gatica-perez')</td><td>gchavez@idiap.ch
+<br/>src@cmls.pw
+<br/>gatica@idiap.ch
+</td></tr><tr><td>af6cae71f24ea8f457e581bfe1240d5fa63faaf7</td><td></td><td></td><td></td></tr><tr><td>af62621816fbbe7582a7d237ebae1a4d68fcf97d</td><td>International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+<br/>International Conference on Humming Bird ( 01st March 2014)
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Active Shape Model Based Recognition Of Facial Expression
+<br/>AncyRija V , Gayathri. S2
+<br/><b>AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of</b><br/>Engineering,
+<br/><b>Gayathri.S, M.E., Vins Christian college of Engineering</b></td><td></td><td>e-mail: ancyrija@gmail.com.
+</td></tr><tr><td>afdf9a3464c3b015f040982750f6b41c048706f5</td><td>A Recurrent Encoder-Decoder Network for Sequential Face Alignment
+<br/><b>Rutgers University</b><br/>Rogerio Feris
+<br/>IBM T. J. Watson
+<br/>Snapchat Research
+<br/>Dimitris Metaxas
+<br/><b>Rutgers University</b></td><td>('4340744', 'Xi Peng', 'xi peng')<br/>('48631738', 'Xiaoyu Wang', 'xiaoyu wang')</td><td>xipeng.cs@rutgers.edu
+<br/>rsferis@us.ibm.com
+<br/>fanghuaxue@gmail.com
+<br/>dnm@cs.rutgers.edu
+</td></tr><tr><td>af54dd5da722e104740f9b6f261df9d4688a9712</td><td></td><td></td><td></td></tr><tr><td>afa57e50570a6599508ee2d50a7b8ca6be04834a</td><td>Motion in action : optical flow estimation and action
+<br/>localization in videos
+<br/>To cite this version:
+<br/>Computer Vision and Pattern Recognition [cs.CV]. Université Grenoble Alpes, 2016. English. <NNT :
+<br/>2016GREAM013>. <tel-01407258>
+<br/>HAL Id: tel-01407258
+<br/>https://tel.archives-ouvertes.fr/tel-01407258
+<br/>Submitted on 1 Dec 2016
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')</td><td></td></tr><tr><td>afe9cfba90d4b1dbd7db1cf60faf91f24d12b286</td><td>Principal Directions of Synthetic Exact Filters
+<br/>for Robust Real-Time Eye Localization
+<br/>Vitomir ˇStruc1;2, Jerneja ˇZganec Gros1, and Nikola Paveˇsi´c2
+<br/>1 Alpineon Ltd, Ulica Iga Grudna 15, SI-1000 Ljubljana, Slovenia,
+<br/><b>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta</b><br/>SI-1000 Ljubljana, Slovenia,
+</td><td></td><td>fvitomir.struc, jerneja.grosg@alpineon.com,
+<br/>fvitomir.struc, nikola.pavesicg@fe.uni-lj.si
+</td></tr><tr><td>afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3</td><td>Two-stream Flow-guided Convolutional Attention Networks for Action
+<br/>Recognition
+<br/><b>National University of Singapore</b><br/>Loong-Fah Cheong
+</td><td>('25205026', 'An Tran', 'an tran')</td><td>an.tran@u.nus.edu
+<br/>eleclf@nus.edu.sg
+</td></tr><tr><td>af278274e4bda66f38fd296cfa5c07804fbc26ee</td><td>RESEARCH ARTICLE
+<br/>A Novel Maximum Entropy Markov Model for
+<br/>Human Facial Expression Recognition
+<br/><b>College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi</b><br/><b>do, Rep. of Korea, Kyung Hee University, Suwon, Rep. of Korea</b><br/><b>Innopolis University, Kazan, Russia</b><br/>a11111
+<br/>☯ These authors contributed equally to this work.
+</td><td>('1711083', 'Muhammad Hameed Siddiqi', 'muhammad hameed siddiqi')<br/>('2401685', 'Md. Golam Rabiul Alam', 'md. golam rabiul alam')<br/>('1683244', 'Choong Seon Hong', 'choong seon hong')<br/>('1734679', 'Hyunseung Choo', 'hyunseung choo')</td><td>* choo@skku.edu
+</td></tr><tr><td>af654a7ec15168b16382bd604889ea07a967dac6</td><td>FACE RECOGNITION COMMITTEE MACHINE
+<br/>Department of Computer Science and Engineering
+<br/><b>The Chinese University of Hong Kong</b><br/>Shatin, Hong Kong
+</td><td>('2899702', 'Ho-Man Tang', 'ho-man tang')<br/>('1681775', 'Michael R. Lyu', 'michael r. lyu')<br/>('1706259', 'Irwin King', 'irwin king')</td><td>
+</td></tr><tr><td>afc7092987f0d05f5685e9332d83c4b27612f964</td><td>Person-Independent Facial Expression Detection using Constrained
+<br/>Local Models
+</td><td>('1713496', 'Patrick Lucey', 'patrick lucey')<br/>('1820249', 'Simon Lucey', 'simon lucey')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1729760', 'Sridha Sridharan', 'sridha sridharan')</td><td></td></tr><tr><td>b730908bc1f80b711c031f3ea459e4de09a3d324</td><td>2024
+<br/>Active Orientation Models for Face
+<br/>Alignment In-the-Wild
+</td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('2575567', 'Joan Alabort-i-Medina', 'joan alabort-i-medina')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>b7426836ca364603ccab0e533891d8ac54cf2429</td><td>Hindawi
+<br/>Journal of Healthcare Engineering
+<br/>Volume 2017, Article ID 3090343, 31 pages
+<br/>https://doi.org/10.1155/2017/3090343
+<br/>Review Article
+<br/>A Review on Human Activity Recognition Using
+<br/>Vision-Based Method
+<br/><b>College of Information Science and Engineering, Ocean University of China, Qingdao, China</b><br/><b>Tsinghua University, Beijing, China</b><br/>Received 22 February 2017; Accepted 11 June 2017; Published 20 July 2017
+<br/>Academic Editor: Dong S. Park
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Human activity recognition (HAR) aims to recognize activities from a series of observations on the actions of subjects and the
+<br/>environmental conditions. The vision-based HAR research is the basis of many applications including video surveillance, health
+<br/>care, and human-computer interaction (HCI). This review highlights the advances of state-of-the-art activity recognition
+<br/>approaches, especially for the activity representation and classification methods. For the representation methods, we sort out a
+<br/>chronological research trajectory from global representations to local representations, and recent depth-based representations.
+<br/>For the classification methods, we conform to the categorization of template-based methods, discriminative models, and
+<br/>generative models and review several prevalent methods. Next, representative and available datasets are introduced. Aiming to
+<br/>provide an overview of those methods and a convenient way of comparing them, we classify existing literatures with a detailed
+<br/>taxonomy including representation and classification methods, as well as the datasets they used. Finally, we investigate the
+<br/>directions for future research.
+<br/>1. Introduction
+<br/>Human activity recognition (HAR) is a widely studied com-
+<br/>puter vision problem. Applications of HAR include video
+<br/>surveillance, health care, and human-computer interaction.
+<br/>As the imaging technique advances and the camera device
+<br/>upgrades, novel approaches for HAR constantly emerge. This
+<br/>review aims to provide a comprehensive introduction to the
+<br/>video-based human activity recognition, giving an overview
+<br/>of various approaches as well as their evolutions by covering
+<br/>both the representative classical literatures and the state-of-
+<br/>the-art approaches.
+<br/>Human activities have an inherent hierarchical structure
+<br/>that indicates the different levels of it, which can be consid-
+<br/>ered as a three-level categorization. First, for the bottom level,
+<br/>there is an atomic element and these action primitives consti-
+<br/>tute more complex human activities. After the action primi-
+<br/>tive level, the action/activity comes as the second level.
+<br/>Finally, the complex interactions form the top level, which
+<br/>refers to the human activities that involve more than two
+<br/>persons and objects. In this paper, we follow this three-level
+<br/>categorization namely action primitives, actions/activities,
+<br/>and interactions. This three-level categorization varies a little
+<br/>from previous surveys [1–4] and maintains a consistent
+<br/>theme. Action primitives are those atomic actions at the limb
+<br/>level, such as “stretching the left arm,” and “raising the right
+<br/>leg.” Atomic actions are performed by a specific part of the
+<br/>human body, such as the hands, arms, or upper body part
+<br/>[4]. Actions and activities are used interchangeably in this
+<br/>review, referring to the whole-body movements composed
+<br/>of several action primitives in temporal sequential order
+<br/>and performed by a single person with no more person or
+<br/>additional objects. Specifically, we refer the terminology
+<br/>human activities as all movements of the three layers and
+<br/>the activities/actions as the middle level of human activities.
+<br/>Human activities like walking, running, and waving hands
+<br/>are categorized in the actions/activities level. Finally, similar
+<br/>to Aggarwal et al.’s review [2], interactions are human activ-
+<br/>ities that involve two or more persons and objects. The
+<br/>additional person or object is an important characteristic of
+</td><td>('7671146', 'Shugang Zhang', 'shugang zhang')<br/>('39868595', 'Zhiqiang Wei', 'zhiqiang wei')<br/>('2896895', 'Jie Nie', 'jie nie')<br/>('40284611', 'Lei Huang', 'lei huang')<br/>('40658604', 'Shuang Wang', 'shuang wang')<br/>('40166799', 'Zhen Li', 'zhen li')<br/>('7671146', 'Shugang Zhang', 'shugang zhang')</td><td>Correspondence should be addressed to Zhen Li; lizhen0130@gmail.com
+</td></tr><tr><td>b73795963dc623a634d218d29e4a5b74dfbc79f1</td><td>ZHAO, YANG: IDENTITY PRESERVING FACE COMPLETION FOR LARGE OCULAR RO
+<br/>Identity Preserving Face Completion for
+<br/>Large Ocular Region Occlusion
+<br/>1 Computer Science Department
+<br/><b>University of Kentucky</b><br/>Lexington, KY, USA
+<br/><b>Institute for Creative Technologies</b><br/><b>University of Southern California</b><br/>Playa Vista, California, USA
+<br/>3 School of Computer Science and
+<br/>Technology
+<br/><b>Harbin Institute of Technology</b><br/>Harbin, China
+<br/><b>Hangzhou Institute of Service</b><br/>Engineering
+<br/><b>Hangzhou Normal University</b><br/>Hangzhou, China
+</td><td>('2613340', 'Yajie Zhao', 'yajie zhao')<br/>('47483055', 'Weikai Chen', 'weikai chen')<br/>('1780032', 'Jun Xing', 'jun xing')<br/>('21515518', 'Xiaoming Li', 'xiaoming li')<br/>('3408065', 'Zach Bessinger', 'zach bessinger')<br/>('1752129', 'Fuchang Liu', 'fuchang liu')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('38958903', 'Ruigang Yang', 'ruigang yang')</td><td>yajie.zhao@uky.edu
+<br/>wechen@ict.usc.edu
+<br/>junxnui@gmail.com
+<br/>hit.xmshr@gmail.com
+<br/>zach.bessinger@gmail.com
+<br/>20140022@hznu.edu.cn
+<br/>cswmzuo@gmail.com
+<br/>ryang@cs.uky.edu
+</td></tr><tr><td>b7cf7bb574b2369f4d7ebc3866b461634147041a</td><td>Neural Comput & Applic (2012) 21:1575–1583
+<br/>DOI 10.1007/s00521-011-0728-x
+<br/>O R I G I N A L A R T I C L E
+<br/>From NLDA to LDA/GSVD: a modified NLDA algorithm
+<br/>Received: 2 August 2010 / Accepted: 3 August 2011 / Published online: 19 August 2011
+<br/>Ó Springer-Verlag London Limited 2011
+</td><td>('1692984', 'Jun Yin', 'jun yin')</td><td></td></tr><tr><td>b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24</td><td>Unified Solution to Nonnegative Data Factorization Problems
+<br/><b>Huazhong University of Science and Technology, Wuhan, China</b><br/><b>National University of Singapore, Singapore</b></td><td>('1817910', 'Xiaobai Liu', 'xiaobai liu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('2156156', 'Hai Jin', 'hai jin')</td><td></td></tr><tr><td>b7894c1f805ffd90ab4ab06002c70de68d6982ab</td><td>Biomedical Research 2017; Special Issue: S610-S618
+<br/>ISSN 0970-938X
+<br/>www.biomedres.info
+<br/>A comprehensive age estimation on face images using hybrid filter based
+<br/>feature extraction.
+<br/>Karthikeyan D1*, Balakrishnan G2
+<br/><b>Srinivasan Engineering College, Perambalur, India</b><br/><b>Indra Ganesan College of Engineering, Trichy, India</b></td><td></td><td></td></tr><tr><td>b7eead8586ffe069edd190956bd338d82c69f880</td><td>A VIDEO DATABASE FOR FACIAL
+<br/>BEHAVIOR UNDERSTANDING
+<br/>D. Freire-Obreg´on and M. Castrill´on-Santana.
+<br/>SIANI, Universidad de Las Palmas de Gran Canaria, Spain
+</td><td></td><td>dfreire@iusiani.ulpgc.es, mcastrillon@iusiani.ulpgc.es
+</td></tr><tr><td>b75cee96293c11fe77ab733fc1147950abbe16f9</td><td></td><td></td><td></td></tr><tr><td>b7774c096dc18bb0be2acef07ff5887a22c2a848</td><td>Distance metric learning for image and webpage
+<br/>comparison
+<br/>To cite this version:
+<br/>versité Pierre et Marie Curie - Paris VI, 2015. English. <NNT : 2015PA066019>. <tel-01135698v2>
+<br/>HAL Id: tel-01135698
+<br/>https://tel.archives-ouvertes.fr/tel-01135698v2
+<br/>Submitted on 18 Mar 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('32868306', 'Marc Teva Law', 'marc teva law')<br/>('32868306', 'Marc Teva Law', 'marc teva law')</td><td></td></tr><tr><td>b7f05d0771da64192f73bdb2535925b0e238d233</td><td> MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+<br/>4-3
+<br/>Robust Active Shape Model using AdaBoosted Histogram Classifiers
+<br/>W ataru Ito
+<br/>Imaging Software Technology Center
+<br/>Imaging Software Technology Center
+<br/>FUJI PHOTO FILM CO., LTD.
+<br/>FUJI PHOTO FILM CO., LTD.
+</td><td>('1724928', 'Yuanzhong Li', 'yuanzhong li')</td><td>li_yuanzhong@ fujifilm.co.jp
+<br/>wataru_ito@ fujifilm.co.jp
+</td></tr><tr><td>b755505bdd5af078e06427d34b6ac2530ba69b12</td><td>To appear in the International Joint Conf. Biometrics, Washington D.C., October, 2011
+<br/>NFRAD: Near-Infrared Face Recognition at a Distance
+<br/>aDept. of Brain and Cognitive Eng. Korea Univ., Seoul, Korea
+<br/>bDept. of Comp. Sci. & Eng. Michigan State Univ., E. Lansing, MI, USA 48824
+</td><td>('2429013', 'Hyunju Maeng', 'hyunju maeng')<br/>('2131755', 'Hyun-Cheol Choi', 'hyun-cheol choi')<br/>('2222919', 'Unsang Park', 'unsang park')<br/>('1703007', 'Seong-Whan Lee', 'seong-whan lee')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>{hjmaeng, hcchoi}@korea.ac.kr, parkunsa@cse.msu.edu, swlee@image.korea.ac.kr , jain@cse.msu.edu
+</td></tr><tr><td>b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89</td><td>Visual Data Synthesis via GAN for Zero-Shot Video Classification
+<br/><b>Institute of Computer Science and Technology, Peking University</b><br/>Beijing 100871, China
+</td><td>('2439211', 'Chenrui Zhang', 'chenrui zhang')<br/>('1704081', 'Yuxin Peng', 'yuxin peng')</td><td>pengyuxin@pku.edu.cn
+</td></tr><tr><td>b7b461f82c911f2596b310e2b18dd0da1d5d4491</td><td>2961
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>K-MAPPINGS AND REGRESSION TREES
+<br/><b>SAMSI and Duke University</b><br/>1. INTRODUCTION
+<br/>argminM1,...,MK
+<br/>P1,...PK
+<br/>2.1. Partitioning Y
+<br/>K(cid:2)
+<br/>(cid:2)
+<br/>(cid:3)
+<br/>(cid:4)
+</td><td>('3149531', 'Arthur Szlam', 'arthur szlam')</td><td></td></tr><tr><td>b73fdae232270404f96754329a1a18768974d3f6</td><td></td><td></td><td></td></tr><tr><td>b76af8fcf9a3ebc421b075b689defb6dc4282670</td><td>Face Mask Extraction in Video Sequence
+</td><td>('2563750', 'Yujiang Wang', 'yujiang wang')</td><td></td></tr><tr><td>b7c5f885114186284c51e863b58292583047a8b4</td><td>GAdaBoost: Accelerating Adaboost Feature Selection with Genetic
+<br/>Algorithms
+<br/><b>The American University In Cairo, Road 90, New Cairo, Cairo, Egypt</b><br/>Keywords:
+<br/>Object Detection, Genetic Algorithms, Haar Features, Adaboost, Face Detection.
+</td><td>('3468033', 'Mai F. Tolba', 'mai f. tolba')<br/>('27045559', 'Mohamed Moustafa', 'mohamed moustafa')</td><td>maitolba@aucegypt.edu, m.moustafa@aucegypt.edu
+</td></tr><tr><td>b73d9e1af36aabb81353f29c40ecdcbdf731dbed</td><td>Sensors 2015, 15, 20945-20966; doi:10.3390/s150920945
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Head Pose Estimation on Top of Haar-Like Face Detection:
+<br/>A Study Using the Kinect Sensor
+<br/><b>Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University</b><br/><b>College of Computer Science and Information Sciences</b><br/><b>College of Science, Menou a University, Menou a 32721, Egypt</b><br/>Tel.: +49-391-67-11033; Fax: +49-391-67-11231.
+<br/>Academic Editor: Vittorio M. N. Passaro
+<br/>Received: 3 July 2015 / Accepted: 6 August 2015 / Published: 26 August 2015
+</td><td>('2712124', 'Anwar Saeed', 'anwar saeed')<br/>('1741165', 'Ayoub Al-Hamadi', 'ayoub al-hamadi')<br/>('1889194', 'Ahmed Ghoneim', 'ahmed ghoneim')</td><td>Magdeburg, Magdeburg D-39016, Germany; E-Mail: Ayoub.Al-Hamadi@ovgu.de
+<br/>King Saud University, Riyadh 11451, Saudi Arabia; E-Mail: ghoneim@KSU.EDU.SA
+<br/>* Author to whom correspondence should be addressed; E-Mail: anwar.saeed@ovgu.de;
+</td></tr><tr><td>b747fcad32484dfbe29530a15776d0df5688a7db</td><td></td><td></td><td></td></tr><tr><td>b7f7a4df251ff26aca83d66d6b479f1dc6cd1085</td><td>Bouges et al. EURASIP Journal on Image and Video Processing 2013, 2013:55
+<br/>http://jivp.eurasipjournals.com/content/2013/1/55
+<br/>RESEARCH
+<br/>Open Access
+<br/>Handling missing weak classifiers in boosted
+<br/>cascade: application to multiview and
+<br/>occluded face detection
+</td><td>('3212236', 'Pierre Bouges', 'pierre bouges')<br/>('1865978', 'Thierry Chateau', 'thierry chateau')<br/>('32323470', 'Christophe Blanc', 'christophe blanc')<br/>('1685767', 'Gaëlle Loosli', 'gaëlle loosli')</td><td></td></tr><tr><td>db848c3c32464d12da33b2f4c3a29fe293fc35d1</td><td>Pose Guided Human Video Generation
+<br/>1 CUHK-SenseTime Joint Lab, CUHK, Hong Kong S.A.R.
+<br/>2 SenseTime Research, Beijing, China
+<br/><b>Carnegie Mellon University</b></td><td>('49984891', 'Ceyuan Yang', 'ceyuan yang')<br/>('1915826', 'Zhe Wang', 'zhe wang')<br/>('22689408', 'Xinge Zhu', 'xinge zhu')<br/>('2000034', 'Chen Huang', 'chen huang')<br/>('1788070', 'Jianping Shi', 'jianping shi')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td>yangceyuan@sensetime.com
+</td></tr><tr><td>db1f48a7e11174d4a724a4edb3a0f1571d649670</td><td>Joint Constrained Clustering and Feature
+<br/>Learning based on Deep Neural Networks
+<br/>by
+<br/><b>B.Sc., University of Science and Technology of China</b><br/>Thesis Submitted in Partial Fulfillment of the
+<br/>Requirements for the Degree of
+<br/>Master of Science
+<br/>in the
+<br/>School of Computing Science
+<br/>Faculty of Applied Sciences
+<br/><b>SIMON FRASER UNIVERSITY</b><br/>Summer 2017
+<br/>However, in accordance with the Copyright Act of Canada, this work may be
+<br/>reproduced without authorization under the conditions for “Fair Dealing.”
+<br/>Therefore, limited reproduction of this work for the purposes of private study,
+<br/>research, education, satire, parody, criticism, review and news reporting is likely
+<br/>All rights reserved.
+<br/>to be in accordance with the law, particularly if cited appropriately.
+</td><td>('1707706', 'Xiaoyu Liu', 'xiaoyu liu')<br/>('1707706', 'Xiaoyu Liu', 'xiaoyu liu')</td><td></td></tr><tr><td>db227f72bb13a5acca549fab0dc76bce1fb3b948</td><td>International Refereed Journal of Engineering and Science (IRJES)
+<br/>ISSN (Online) 2319-183X, (Print) 2319-1821
+<br/>Volume 4, Issue 6 (June 2015), PP.169-169-174
+<br/>Characteristic Based Image Search using Re-Ranking method
+<br/>1Chitti Babu, 2Yasmeen Jaweed, 3G.Vijay Kumar
+<br/><b></b></td><td></td><td></td></tr><tr><td>dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a</td><td>PARKHI et al.: DEEP FACE RECOGNITION
+<br/>Deep Face Recognition
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b></td><td>('3188342', 'Omkar M. Parkhi', 'omkar m. parkhi')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>omkar@robots.ox.ac.uk
+<br/>vedaldi@robots.ox.ac.uk
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>dbaf89ca98dda2c99157c46abd136ace5bdc33b3</td><td>Nonlinear Cross-View Sample Enrichment for
+<br/>Action Recognition
+<br/>Institut Mines-T´el´ecom; T´el´ecom ParisTech; CNRS LTCI
+</td><td>('1695223', 'Ling Wang', 'ling wang')<br/>('1692389', 'Hichem Sahbi', 'hichem sahbi')</td><td></td></tr><tr><td>dbab6ac1a9516c360cdbfd5f3239a351a64adde7</td><td></td><td></td><td></td></tr><tr><td>dbe255d3d2a5d960daaaba71cb0da292e0af36a7</td><td>Evolutionary Cost-sensitive Extreme Learning
+<br/>Machine
+<br/>1
+</td><td>('36904370', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8</td><td>Chapter 7
+<br/>Machine Learning Techniques
+<br/>for Face Analysis
+</td><td>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1695527', 'Theo Gevers', 'theo gevers')<br/>('1774778', 'Ira Cohen', 'ira cohen')</td><td></td></tr><tr><td>db5a00984fa54b9d2a1caad0067a9ff0d0489517</td><td>Multi-Task Adversarial Network for Disentangled Feature Learning
+<br/>Ian Wassell1
+<br/><b>University of Cambridge</b><br/>2Adobe Research
+</td><td>('49421489', 'Yang Liu', 'yang liu')<br/>('48707577', 'Zhaowen Wang', 'zhaowen wang')</td><td>1{yl504,ijw24}@cam.ac.uk
+<br/>2{zhawang,hljin}@adobe.com
+</td></tr><tr><td>dbd958ffedc3eae8032be67599ec281310c05630</td><td>Automated Restyling of Human Portrait Based on Facial Expression Recognition
+<br/>and 3D Reconstruction
+<br/><b>Stanford University</b><br/>350 Serra Mall, Stanford, CA 94305, USA
+</td><td>('46740443', 'Cheng-Han Wu', 'cheng-han wu')</td><td>1chw0208@stanford.edu
+<br/>2hsinc@stanford.edu
+</td></tr><tr><td>dbed26cc6d818b3679e46677abc9fa8e04e8c6a6</td><td>A Hierarchical Generative Model for Eye Image Synthesis and Eye Gaze
+<br/>Estimation
+<br/><b>ECSE, Rensselaer Polytechnic Institute, Troy, NY, USA</b></td><td>('1771700', 'Kang Wang', 'kang wang')<br/>('49832825', 'Rui Zhao', 'rui zhao')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{wangk10, zhaor, jiq}@rpi.edu
+</td></tr><tr><td>db3545a983ffd24c97c18bf7f068783102548ad7</td><td>Enriching the Student Model in an
+<br/>Intelligent Tutoring System
+<br/>Submitted in partial fulfillment of the requirements for the degree
+<br/>of Doctor of Philosophy
+<br/>of the
+<br/><b>Indian Institute of Technology, Bombay, India</b><br/>and
+<br/><b>Monash University, Australia</b><br/>by
+<br/>Supervisors:
+<br/>The course of study for this award was developed jointly by
+<br/><b>the Indian Institute of Technology, Bombay and Monash University, Australia</b><br/>and given academic recognition by each of them.
+<br/>The programme was administered by The IITB-Monash Research Academy.
+<br/>2014
+</td><td>('2844237', 'Ramkumar Rajendran', 'ramkumar rajendran')<br/>('1946438', 'Sridhar Iyer', 'sridhar iyer')<br/>('1791910', 'Sahana Murthy', 'sahana murthy')<br/>('38751653', 'Campbell Wilson', 'campbell wilson')<br/>('1727078', 'Judithe Sheard', 'judithe sheard')</td><td></td></tr><tr><td>dba493caf6647214c8c58967a8251641c2bda4c2</td><td>Automatic 3D Facial Expression Editing in Videos
+<br/><b>University of California, Santa Barbara</b><br/>2IMPA – Instituto de Matematica Pura e Aplicada
+</td><td>('13303219', 'Ya Chang', 'ya chang')<br/>('2428542', 'Marcelo Vieira', 'marcelo vieira')<br/>('1752714', 'Matthew Turk', 'matthew turk')<br/>('1705620', 'Luiz Velho', 'luiz velho')</td><td></td></tr><tr><td>dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57</td><td>Face Image Analysis With
+<br/>Convolutional Neural Networks
+<br/>Dissertation
+<br/>Zur Erlangung des Doktorgrades
+<br/>der Fakult¨at f¨ur Angewandte Wissenschaften
+<br/>an der Albert-Ludwigs-Universit¨at Freiburg im Breisgau
+<br/>von
+<br/>Stefan Duffner
+<br/>2007
+</td><td></td><td></td></tr><tr><td>db36e682501582d1c7b903422993cf8d70bb0b42</td><td>Deep Trans-layer Unsupervised Networks for
+<br/>Representation Learning
+<br/>aKey Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>bSchool of Computer and Control Engineering, University of Chinese Academy of Sciences</b><br/>Beijing 100049, China
+</td><td>('1778018', 'Wentao Zhu', 'wentao zhu')<br/>('35048816', 'Jun Miao', 'jun miao')<br/>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>dbe0e533d715f8543bcf197f3b8e5cffa969dfc0</td><td>International Journal of Advanced Research in Electrical,
+<br/>Electronics and Instrumentation Engineering
+<br/> ISSN (Print) : 2320 – 3765
+<br/> ISSN (Online): 2278 – 8875
+<br/>(An ISO 3297: 2007 Certified Organization)
+<br/>Vol. 3, Issue 5, May 2014
+<br/>A Comprehensive Comparative Performance
+<br/>Analysis of Eigenfaces, Laplacianfaces and
+<br/>Orthogonal Laplacianfaces for Face Recognition
+<br/><b>UG student, Amity school of Engineering and Technology, Amity University, Haryana, India</b><br/><b>Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India</b></td><td></td><td></td></tr><tr><td>dbd5e9691cab2c515b50dda3d0832bea6eef79f2</td><td>Image-basedFaceRecognition:IssuesandMethods
+<br/>WenYiZhao
+<br/>RamaChellappa
+<br/>Sarno(cid:11)Corporation
+<br/>CenterforAutomationResearch
+<br/>
+<br/><b>UniversityofMaryland</b><br/>Princeton,NJ
+<br/><b>CollegePark, MD</b></td><td></td><td>Email:wzhao@sarno(cid:11).com
+<br/>Email:rama@cfar.umd.edu
+</td></tr><tr><td>db67edbaeb78e1dd734784cfaaa720ba86ceb6d2</td><td>SPECFACE - A Dataset of Human Faces Wearing Spectacles
+<br/><b>Indian Institute of Technology Kharagpur</b><br/>India
+</td><td>('30654921', 'Anirban Dasgupta', 'anirban dasgupta')<br/>('30572870', 'Shubhobrata Bhattacharya', 'shubhobrata bhattacharya')<br/>('2680543', 'Aurobinda Routray', 'aurobinda routray')</td><td></td></tr><tr><td>db82f9101f64d396a86fc2bd05b352e433d88d02</td><td>A Spatio-Temporal Probabilistic Framework for
+<br/>Dividing and Predicting Facial Action Units
+<br/><b>Electrical and Computer Engineering, The University of Memphis</b></td><td>('2497319', 'Md. Iftekhar Tanveer', 'md. iftekhar tanveer')<br/>('1828610', 'Mohammed Yeasin', 'mohammed yeasin')</td><td></td></tr><tr><td>db428d03e3dfd98624c23e0462817ad17ef14493</td><td>Oxford TRECVID 2006 – Notebook paper
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>United Kingdom
+</td><td>('2276542', 'James Philbin', 'james philbin')<br/>('8873555', 'Anna Bosch', 'anna bosch')<br/>('1720149', 'Jan-Mark Geusebroek', 'jan-mark geusebroek')<br/>('1782755', 'Josef Sivic', 'josef sivic')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>a83fc450c124b7e640adc762e95e3bb6b423b310</td><td>Deep Face Feature for Face Alignment
+</td><td>('15679675', 'Boyi Jiang', 'boyi jiang')<br/>('2938279', 'Juyong Zhang', 'juyong zhang')<br/>('2964129', 'Bailin Deng', 'bailin deng')<br/>('8280113', 'Yudong Guo', 'yudong guo')<br/>('1724542', 'Ligang Liu', 'ligang liu')</td><td></td></tr><tr><td>a85e9e11db5665c89b057a124547377d3e1c27ef</td><td>Dynamics of Driver’s Gaze: Explorations in
+<br/>Behavior Modeling & Maneuver Prediction
+</td><td>('1841835', 'Sujitha Martin', 'sujitha martin')<br/>('22254044', 'Sourabh Vora', 'sourabh vora')<br/>('2812409', 'Kevan Yuen', 'kevan yuen')</td><td></td></tr><tr><td>a8117a4733cce9148c35fb6888962f665ae65b1e</td><td>IEEE TRANSACTIONS ON XXXX, VOL. XX, NO. XX, XX 201X
+<br/>A Good Practice Towards Top Performance of Face
+<br/>Recognition: Transferred Deep Feature Fusion
+</td><td>('33419682', 'Lin Xiong', 'lin xiong')<br/>('1785111', 'Jayashree Karlekar', 'jayashree karlekar')<br/>('2052311', 'Jian Zhao', 'jian zhao')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('2668358', 'Sugiri Pranata', 'sugiri pranata')<br/>('3493398', 'Shengmei Shen', 'shengmei shen')</td><td></td></tr><tr><td>a87ab836771164adb95d6744027e62e05f47fd96</td><td>Understanding human-human interactions: a survey
+<br/><b>Utrecht University, Buys Ballotgebouw, Princetonplein 5, Utrecht, 3584CC, Netherlands</b><br/><b>Utrecht University, Buys Ballotgebouw, Princetonplein 5, Utrecht, 3584CC, Netherlands</b></td><td>('26936326', 'Alexandros Stergiou', 'alexandros stergiou')<br/>('1754666', 'Ronald Poppe', 'ronald poppe')</td><td></td></tr><tr><td>a896ddeb0d253739c9aaef7fc1f170a2ba8407d3</td><td>SSH: Single Stage Headless Face Detector
+<br/><b>University of Maryland</b></td><td>('40465379', 'Mahyar Najibi', 'mahyar najibi')<br/>('3383048', 'Pouya Samangouei', 'pouya samangouei')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{pouya,rama,lsd}@umiacs.umd.edu
+<br/>najibi@cs.umd.edu
+</td></tr><tr><td>a820941eaf03077d68536732a4d5f28d94b5864a</td><td>Leveraging Datasets with Varying Annotations for Face Alignment
+<br/>via Deep Regression Network
+<br/>1Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3CAS Center for Excellence in Brain Science and Intelligence Technology
+</td><td>('1698586', 'Jie Zhang', 'jie zhang')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{jie.zhang,meina.kan,shiguang.shan,xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>a8035ca71af8cc68b3e0ac9190a89fed50c92332</td><td>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>IIIT-CFW: A Benchmark Database of
+<br/>Cartoon Faces in the Wild
+<br/>1 IIIT Chittoor, Sri City, India
+<br/>2 CVIT, KCIS, IIIT Hyderabad, India
+</td><td>('2154430', 'Ashutosh Mishra', 'ashutosh mishra')<br/>('31821293', 'Shyam Nandan Rai', 'shyam nandan rai')<br/>('39719398', 'Anand Mishra', 'anand mishra')<br/>('1694502', 'C. V. Jawahar', 'c. v. jawahar')</td><td></td></tr><tr><td>a88640045d13fc0207ac816b0bb532e42bcccf36</td><td>ARXIV VERSION
+<br/>Simultaneously Learning Neighborship and
+<br/>Projection Matrix for Supervised
+<br/>Dimensionality Reduction
+</td><td>('34116743', 'Yanwei Pang', 'yanwei pang')<br/>('2521321', 'Bo Zhou', 'bo zhou')<br/>('1688370', 'Feiping Nie', 'feiping nie')</td><td></td></tr><tr><td>a803453edd2b4a85b29da74dcc551b3c53ff17f9</td><td>Pose Invariant Face Recognition Under Arbitrary
+<br/>Illumination Based on 3D Face Reconstruction
+<br/><b>School of Computer Science and Technology, Harbin Institute of Technology</b><br/>150001 Harbin, China
+<br/>2 ICT-ISVISION Joint R&D Lab for Face Recognition, ICT, CAS, 100080 Beijing, China
+</td><td>('1695600', 'Xiujuan Chai', 'xiujuan chai')<br/>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td>{xjchai,xlchen,wgao}@jdl.ac.cn
+<br/>{lyqing,sgshan}@jdl.ac.cn
+</td></tr><tr><td>a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8</td><td>This is a repository copy of Modelling of Orthogonal Craniofacial Profiles.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131767/
+<br/>Version: Published Version
+<br/>Article:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634 and Duncan, Christian
+<br/>(2017) Modelling of Orthogonal Craniofacial Profiles. Journal of Imaging. ISSN 2313-433X
+<br/>https://doi.org/10.3390/jimaging3040055
+<br/>Reuse
+<br/>This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+<br/>allows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+<br/>authors for the original work. More information and the full terms of the licence here:
+<br/>https://creativecommons.org/licenses/
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td><td></td><td>emailing eprints@whiterose.ac.uk including the URL of the record and the reason for the withdrawal request.
+<br/>eprints@whiterose.ac.uk
+</td></tr><tr><td>a8638a07465fe388ae5da0e8a68e62a4ee322d68</td><td>How to predict the global instantaneous feeling induced
+<br/>by a facial picture?
+<br/>To cite this version:
+<br/>feeling induced by a facial picture?. Signal Processing: Image Communication, Elsevier, 2015,
+<br/>pp.1-30. .
+<br/>HAL Id: hal-01198718
+<br/>https://hal.archives-ouvertes.fr/hal-01198718
+<br/>Submitted on 14 Sep 2015
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('25030249', 'Arnaud Lienhard', 'arnaud lienhard')<br/>('2216412', 'Patricia Ladret', 'patricia ladret')<br/>('1788869', 'Alice Caplier', 'alice caplier')<br/>('25030249', 'Arnaud Lienhard', 'arnaud lienhard')<br/>('2216412', 'Patricia Ladret', 'patricia ladret')<br/>('1788869', 'Alice Caplier', 'alice caplier')</td><td></td></tr><tr><td>a8e75978a5335fd3deb04572bb6ca43dbfad4738</td><td>Sparse Graphical Representation based Discriminant
+<br/>Analysis for Heterogeneous Face Recognition
+</td><td>('2299758', 'Chunlei Peng', 'chunlei peng')<br/>('10699750', 'Xinbo Gao', 'xinbo gao')<br/>('2870173', 'Nannan Wang', 'nannan wang')<br/>('38158055', 'Jie Li', 'jie li')</td><td></td></tr><tr><td>a8d52265649c16f95af71d6f548c15afc85ac905</td><td>Situation Recognition with Graph Neural Networks
+<br/><b>The Chinese University of Hong Kong, 2University of Toronto, 3Youtu Lab, Tencent</b><br/><b>Uber Advanced Technologies Group, 5Vector Institute</b></td><td>('8139953', 'Ruiyu Li', 'ruiyu li')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('2246396', 'Renjie Liao', 'renjie liao')<br/>('1729056', 'Jiaya Jia', 'jiaya jia')<br/>('2422559', 'Raquel Urtasun', 'raquel urtasun')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')</td><td>ryli@cse.cuhk.edu.hk, {makarand,rjliao,urtasun,fidler}@cs.toronto.edu, leojia9@gmail.com
+</td></tr><tr><td>a8583e80a455507a0f146143abeb35e769d25e4e</td><td>A DISTANCE-ACCURACY HYBRID WEIGHTED VOTING SCHEME
+<br/>FOR PARTIAL FACE RECOGNITION
+<br/>1Dept. of Information Engineering and Computer Science,
+<br/><b>Feng Chia University, Taichung, Taiwan</b><br/>2Department of Photonics,
+<br/><b>National Chiao Tung University, Taiwan</b></td><td>('40609876', 'Yung-Hui Li', 'yung-hui li')<br/>('3072232', 'Bo-Ren Zheng', 'bo-ren zheng')<br/>('2532474', 'Wei-Cheng Huang', 'wei-cheng huang')</td><td>ayunghui@gmail.com, bzawdcx@gmail.com, cs75757775@gmail.com, dchtien@mail.nctu.edu.tw
+</td></tr><tr><td>a87e37d43d4c47bef8992ace408de0f872739efc</td><td>Review
+<br/>A Comprehensive Review on Handcrafted and
+<br/>Learning-Based Action Representation Approaches
+<br/>for Human Activity Recognition
+<br/><b>School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK</b><br/><b>COMSATS Institute of Information Technology, Lahore 54000, Pakistan</b><br/>Academic Editor: José Santamaria
+<br/>Received: 5 September 2016; Accepted: 13 January 2017; Published: 23 January 2017
+</td><td>('2145942', 'Allah Bux Sargano', 'allah bux sargano')<br/>('5736243', 'Plamen Angelov', 'plamen angelov')</td><td>p.angelov@lancaster.ac.uk
+<br/>drzhabib@ciitlahore.edu.pk
+<br/>* Correspondence: a.bux@lancaster.ac.uk; Tel.: +44-152-451-0525
+</td></tr><tr><td>a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 8, No. 4, 2017
+<br/>3D Human Action Recognition using Hu Moment
+<br/>Invariants and Euclidean Distance Classifier
+<br/>System Engineering Department
+<br/>System Engineering Department
+<br/>Computer Science Department
+<br/><b>University of Arkansas at Little Rock</b><br/><b>University of Arkansas at Little Rock</b><br/><b>University of Arkansas at Little Rock</b><br/>Arkansas, USA
+<br/>Arkansas, USA
+<br/>Arkansas, USA
+</td><td>('19305764', 'Fadwa Al-Azzo', 'fadwa al-azzo')<br/>('22768683', 'Arwa Mohammed Taqi', 'arwa mohammed taqi')<br/>('1795699', 'Mariofanna Milanova', 'mariofanna milanova')</td><td></td></tr><tr><td>a8748a79e8d37e395354ba7a8b3038468cb37e1f</td><td>Seeing the Forest from the Trees: A Holistic Approach to Near-infrared
+<br/>Heterogeneous Face Recognition
+<br/><b>U.S. Army Research Laboratory</b><br/><b>University of Maryland, College Park</b><br/><b>West Virginia University</b></td><td>('39412489', 'Christopher Reale', 'christopher reale')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')<br/>('1688527', 'Heesung Kwon', 'heesung kwon')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>reale@umiacs.umd.edu
+<br/>heesung.kwon.civ@mail.mil
+<br/>nasser.nasrabadi@mail.wvu.edu
+<br/>rama@umiacs.umd.edu
+</td></tr><tr><td>a8a61badec9b8bc01f002a06e1426a623456d121</td><td>JOINT SPATIO-TEMPORAL ACTION LOCALIZATION
+<br/>IN UNTRIMMED VIDEOS WITH PER-FRAME SEGMENTATION
+<br/><b>Xi an Jiaotong University</b><br/>2HERE Technologies
+<br/>3Alibaba Group
+<br/>4Microsoft Research
+</td><td>('46809347', 'Xuhuan Duan', 'xuhuan duan')<br/>('40367806', 'Le Wang', 'le wang')<br/>('51262903', 'Changbo Zhai', 'changbo zhai')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('1786361', 'Zhenxing Niu', 'zhenxing niu')<br/>('1715389', 'Nanning Zheng', 'nanning zheng')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td></td></tr><tr><td>a8154d043f187c6640cb6aedeaa8385a323e46cf</td><td>MURRUGARRA, KOVASHKA: IMAGE RETRIEVAL WITH MIXED INITIATIVE
+<br/>Image Retrieval with Mixed Initiative and
+<br/>Multimodal Feedback
+<br/>Department of Computer Science
+<br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA, USA
+</td><td>('1916866', 'Nils Murrugarra-Llerena', 'nils murrugarra-llerena')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td>nineil@cs.pitt.edu
+<br/>kovashka@cs.pitt.edu
+</td></tr><tr><td>a812368fe1d4a186322bf72a6d07e1cf60067234</td><td><b>Imperial College London</b><br/>Department of Computing
+<br/>Gaussian Processes
+<br/>for Modeling of Facial Expressions
+<br/>September, 2016
+<br/>Supervised by Prof. Maja Pantic
+<br/>Submitted in part fulfilment of the requirements for the degree of PhD in Computing and
+<br/><b>the Diploma of Imperial College London. This thesis is entirely my own work, and, except</b><br/>where otherwise indicated, describes my own research.
+</td><td>('2308430', 'Stefanos Eleftheriadis', 'stefanos eleftheriadis')</td><td></td></tr><tr><td>de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0</td><td>Merge or Not? Learning to Group Faces via Imitation Learning
+<br/>SenseTime
+<br/>SenseTime
+<br/>SenseTime
+<br/>Chen Chang Loy
+<br/><b>The Chinese University of Hong Kong</b></td><td>('49990550', 'Yue He', 'yue he')<br/>('9963152', 'Kaidi Cao', 'kaidi cao')<br/>('46651787', 'Cheng Li', 'cheng li')</td><td>heyue@sensetime.com
+<br/>caokaidi@sensetime.com
+<br/>chengli@sensetime.com
+<br/>ccloy@ie.cuhk.edu.hk
+</td></tr><tr><td>de8381903c579a4fed609dff3e52a1dc51154951</td><td><b>Graz University of Technology</b><br/><b>Institute for Computer Graphics and Vision</b><br/>Dissertation
+<br/>Shape and Appearance Based Analysis
+<br/>of Facial Images for Assessing ICAO
+<br/>Compliance
+<br/>Graz, Austria, December 2010
+<br/>Thesis supervisors
+<br/>Prof. Dr. Horst Bischof
+<br/>Prof. Dr. Fernando De la Torre
+</td><td>('3464430', 'Markus Storer', 'markus storer')</td><td></td></tr><tr><td>ded968b97bd59465d5ccda4f1e441f24bac7ede5</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large scale 3D Morphable Models
+<br/>Zafeiriou
+<br/>Received: date / Accepted: date
+</td><td>('47456731', 'James Booth', 'james booth')</td><td></td></tr><tr><td>de0eb358b890d92e8f67592c6e23f0e3b2ba3f66</td><td>ACCEPTED BY IEEE TRANS. PATTERN ANAL. AND MACH. INTELL.
+<br/>Inference-Based Similarity Search in
+<br/>Randomized Montgomery Domains for
+<br/>Privacy-Preserving Biometric Identification
+</td><td>('46393453', 'Yi Wang', 'yi wang')<br/>('2087574', 'Jianwu Wan', 'jianwu wan')<br/>('39954962', 'Jun Guo', 'jun guo')<br/>('32840387', 'Yiu-ming Cheung', 'yiu-ming cheung')</td><td></td></tr><tr><td>def569db592ed1715ae509644444c3feda06a536</td><td>Discovery and usage of joint attention in images
+<br/><b>Weizmann Institute of Science, Rehovot, Israel</b><br/><b>The Center for Brains, Minds and Machines, Massachusetts Institute of Technology, Cambridge, MA USA</b><br/><b>Massachusetts Institute of Technology, Cambridge, MA USA</b><br/><b>Weizmann Institute of Science, Rehovot, Israel</b></td><td></td><td>Daniel Harari (hararid@weizmann.ac.il)
+<br/>Joshua B. Tenenbaum (jbt@mit.edu)
+<br/>Shimon Ullman (shimon.ullman@weizmann.ac.il)
+</td></tr><tr><td>dee406a7aaa0f4c9d64b7550e633d81bc66ff451</td><td>Content-Adaptive Sketch Portrait Generation by
+<br/>Decompositional Representation Learning
+</td><td>('8335563', 'Dongyu Zhang', 'dongyu zhang')<br/>('1737218', 'Liang Lin', 'liang lin')<br/>('1765674', 'Tianshui Chen', 'tianshui chen')<br/>('1738906', 'Xian Wu', 'xian wu')<br/>('1989769', 'Wenwei Tan', 'wenwei tan')<br/>('1732655', 'Ebroul Izquierdo', 'ebroul izquierdo')</td><td></td></tr><tr><td>de15af84b1257211a11889b6c2adf0a2bcf59b42</td><td>Anomaly Detection in Non-Stationary and
+<br/>Distributed Environments
+<br/>Colin O’Reilly
+<br/>Submitted for the Degree of
+<br/>Doctor of Philosophy
+<br/>from the
+<br/><b>University of Surrey</b><br/><b>Institute for Communication Systems</b><br/>Faculty of Engineering and Physical Sciences
+<br/><b>University of Surrey</b><br/>Guildford, Surrey GU2 7XH, U.K.
+<br/>November 2014
+<br/>© Colin O’Reilly 2014
+</td><td></td><td></td></tr><tr><td>de3285da34df0262a4548574c2383c51387a24bf</td><td>Two-Stream Convolutional Networks for Dynamic Texture Synthesis
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>York University, Toronto</b></td><td>('19251410', 'Matthew Tesfaldet', 'matthew tesfaldet')</td><td>{mtesfald,mab}@eecs.yorku.ca
+</td></tr><tr><td>dedabf9afe2ae4a1ace1279150e5f1d495e565da</td><td>3294
+<br/>Robust Face Recognition With Structurally
+<br/>Incoherent Low-Rank Matrix Decomposition
+</td><td>('2017922', 'Chia-Po Wei', 'chia-po wei')<br/>('2624492', 'Chih-Fan Chen', 'chih-fan chen')<br/>('2733735', 'Yu-Chiang Frank Wang', 'yu-chiang frank wang')</td><td></td></tr><tr><td>dec0c26855da90876c405e9fd42830c3051c2f5f</td><td>Supplementary Material: Learning Compositional Visual Concepts with Mutual
+<br/>Consistency
+<br/><b>School of Electrical and Computer Engineering, Cornell University, Ithaca NY</b><br/>3Siemens Corporate Technology, Princeton NJ
+<br/>Contents
+<br/>1. Objective functions
+<br/>1.1. Adversarial loss
+<br/>1.2. Extended cycle-consistency loss .
+<br/>1.3. Commutative loss
+<br/>. . .
+<br/>. . .
+<br/>. . .
+<br/>2. Additional implementation details
+<br/>3. Additional results
+<br/>4. Discussion
+<br/>5. Generalizing ConceptGAN
+<br/>5.1. Assumption: Concepts have distinct states . .
+<br/>5.2. Assumption: Concepts are mutually compatible
+<br/>5.3. Generalization .
+<br/>. . .
+<br/>1. Objective functions
+<br/>In this section, we provide complete mathematical
+<br/>expressions for each of the three terms in our loss func-
+<br/>tion, following the notation defined in Section 3 of the main
+<br/>paper and the assumption that no training data is available
+<br/>in subdomain Σ11.
+<br/>1.1. Adversarial loss
+<br/>For generator G1 and discriminator D10, for example,
+<br/>the adversarial loss is expressed as:
+<br/>Ladv(G1, D10, Σ00, Σ10) = Eσ10∼P10 [log D10(σ10)]
+<br/>+Eσ00∼P00[log(1 − D10(G1(σ00)))]
+<br/>(1)
+<br/>where the generator G1 and discriminator D10 are
+<br/>learned to optimize a minimax objective such that
+<br/>G∗
+<br/>1 = arg min
+<br/>G1
+<br/>max
+<br/>D10
+<br/>Ladv(G1, D10, Σ00, Σ10)
+<br/>(2)
+<br/>For generator G2 and discriminator D01, the adversarial
+<br/>loss is expressed as:
+<br/>Ladv(G2, D01, Σ00, Σ01) = Eσ01∼P01 [log D01(σ01)]
+<br/>+Eσ00∼P00[log(1 − D01(G2(σ00)))]
+<br/>For generator F1 and discriminator D00, the adversarial
+<br/>loss is expressed as:
+<br/>Ladv(F1, D00, Σ10, Σ00) = Eσ00∼P00 [log D00(σ00)]
+<br/>+Eσ10∼P10 [log(1 − D00(F1(σ10)))]
+<br/>For generator F2 and discriminator D00, the adversarial
+<br/>loss is expressed as:
+<br/>Ladv(F2, D00, Σ01, Σ00) = Eσ00∼P00 [log D00(σ00)]
+<br/>+Eσ01∼P01 [log(1 − D00(F2(σ01)))]
+<br/>(5)
+<br/>The overall adversarial loss LADV is the sum of these four
+<br/>terms.
+<br/>(3)
+<br/>(4)
+<br/>(6)
+<br/>LADV =Ladv(G1, D10, Σ00, Σ10)
+<br/>+ Ladv(G2, D01, Σ00, Σ01)
+<br/>+ Ladv(F1, D00, Σ10, Σ00)
+<br/>+ Ladv(F2, D00, Σ01, Σ00)
+<br/>1.2. Extended cycle-consistency loss
+<br/>Following our discussion in Section 3.2 of the main
+<br/>paper, for any data sample σ00 in subdomain Σ00, a
+<br/>distance-4 cycle consistency constraint is defined in the
+<br/>clockwise direction (F2 ◦ F1 ◦ G2 ◦ G1)(σ00) ≈ σ00 and in
+<br/>the counterclockwise direction (F1 ◦ F2 ◦ G1 ◦ G2)(σ00) ≈
+<br/>σ00. Such constraints are implemented by the penalty func-
+<br/>tion:
+<br/>Lcyc4(G, F, Σ00)
+<br/>= Eσ00∼P00[(cid:107)(F2 ◦ F1 ◦ G2 ◦ G1)(σ00) − σ00(cid:107)1]
+<br/>+ Eσ00∼P00[(cid:107)(F1 ◦ F2 ◦ G1 ◦ G2)(σ00) − σ00(cid:107)1].
+<br/>(7)
+</td><td>('3303727', 'Yunye Gong', 'yunye gong')<br/>('1976152', 'Srikrishna Karanam', 'srikrishna karanam')<br/>('3311781', 'Ziyan Wu', 'ziyan wu')<br/>('2692770', 'Kuan-Chuan Peng', 'kuan-chuan peng')<br/>('39497207', 'Jan Ernst', 'jan ernst')<br/>('1767099', 'Peter C. Doerschuk', 'peter c. doerschuk')</td><td>{yg326,pd83}@cornell.edu,{first.last}@siemens.com
+</td></tr><tr><td>de398bd8b7b57a3362c0c677ba8bf9f1d8ade583</td><td>Hierarchical Bayesian Theme Models for
+<br/>Multi-pose Facial Expression Recognition
+</td><td>('3069077', 'Qirong Mao', 'qirong mao')<br/>('1851510', 'Qiyu Rao', 'qiyu rao')<br/>('1770550', 'Yongbin Yu', 'yongbin yu')<br/>('1710341', 'Ming Dong', 'ming dong')</td><td></td></tr><tr><td>ded41c9b027c8a7f4800e61b7cfb793edaeb2817</td><td></td><td></td><td></td></tr><tr><td>defa8774d3c6ad46d4db4959d8510b44751361d8</td><td>FEBEI - Face Expression Based Emoticon Identification
+<br/>CS - B657 Computer Vision
+<br/>Robert J Henderson - rojahend
+</td><td>('1854614', 'Nethra Chandrasekaran', 'nethra chandrasekaran')<br/>('1830695', 'Prashanth Kumar Murali', 'prashanth kumar murali')</td><td></td></tr><tr><td>b0c512fcfb7bd6c500429cbda963e28850f2e948</td><td></td><td></td><td></td></tr><tr><td>b08203fca1af7b95fda8aa3d29dcacd182375385</td><td>OBJECT AND TEXT-GUIDED SEMANTICS FOR CNN-BASED ACTIVITY RECOGNITION
+<br/><b>U.S. Army Research Laboratory, Adelphi, MD, USA</b><br/>§Booz Allen Hamilton Inc., McLean, VA, USA
+</td><td>('3090299', 'Sungmin Eum', 'sungmin eum')<br/>('39412489', 'Christopher Reale', 'christopher reale')<br/>('1688527', 'Heesung Kwon', 'heesung kwon')<br/>('3202888', 'Claire Bonial', 'claire bonial')</td><td></td></tr><tr><td>b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89</td><td>Deep Alternative Neural Network: Exploring
+<br/>Contexts as Early as Possible for Action Recognition
+<br/><b>School of Electronics Engineering and Computer Science, Peking University</b><br/><b>School of Electronics and Computer Engineering, Peking University</b></td><td>('3258842', 'Jinzhuo Wang', 'jinzhuo wang')<br/>('1788029', 'Wenmin Wang', 'wenmin wang')<br/>('8082703', 'Xiongtao Chen', 'xiongtao chen')<br/>('1702330', 'Ronggang Wang', 'ronggang wang')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td>jzwang@pku.edu.cn, wangwm@ece.pku.edu.cn
+<br/>cxt@pku.edu.cn, rgwang@ece.pku.edu.cn, wgao@pku.edu.cn
+</td></tr><tr><td>b09b693708f412823053508578df289b8403100a</td><td>WANG et al.: TWO-STREAM SR-CNNS FOR ACTION RECOGNITION IN VIDEOS
+<br/>Two-Stream SR-CNNs for Action
+<br/>Recognition in Videos
+<br/>1 Advanced Interactive Technologies Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+<br/>2 Computer Vision Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+</td><td>('46394691', 'Yifan Wang', 'yifan wang')<br/>('40403685', 'Jie Song', 'jie song')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1681236', 'Luc Van Gool', 'luc van gool')<br/>('2531379', 'Otmar Hilliges', 'otmar hilliges')</td><td>yifan.wang@student.ethz.ch
+<br/>jsong@inf.ethz.ch
+<br/>07wanglimin@gmail.com
+<br/>vangool@vision.ee.ethz.ch
+<br/>otmar.hilliges@inf.ethz.ch
+</td></tr><tr><td>b013cce42dd769db754a57351d49b7410b8e82ad</td><td>Automatic Point-based Facial Trait Judgments Evaluation
+<br/>1Computer Vision Center, Edifici O, Campus UAB, Spain
+<br/>2Universitat Oberta de Catalunya, Rambla del Poblenou 156, 08018, Barcelona, Spain
+<br/><b>Princeton University, Princeton, New Jersey, USA</b><br/>4Department de Matematica Aplicada i Analisi, Universitat de Barcelona, Spain
+</td><td>('1863902', 'David Masip', 'david masip')<br/>('2913698', 'Alexander Todorov', 'alexander todorov')</td><td>mrojas@cvc.uab.es, dmasipr@uoc.edu, atodorov@princeton.edu, jordi.vitria@ub.edu
+</td></tr><tr><td>b07582d1a59a9c6f029d0d8328414c7bef64dca0</td><td>Employing Fusion of Learned and Handcrafted
+<br/>Features for Unconstrained Ear Recognition
+<br/>Maur´ıcio Pamplona Segundo∗†
+<br/>October 24, 2017
+</td><td>('26977067', 'Earnest E. Hansley', 'earnest e. hansley')<br/>('1715991', 'Sudeep Sarkar', 'sudeep sarkar')</td><td></td></tr><tr><td>b017963d83b3edf71e1673d7ffdec13a6d350a87</td><td>View Independent Face Detection Based on
+<br/>Combination of Local and Global Kernels
+<br/><b>The University of Electro-Communications</b><br/>1-5-1 Chofugaoka, Chofu-shi, Tokyo 182-8585, JAPAN
+</td><td>('2510362', 'Kazuhiro HOTTA', 'kazuhiro hotta')</td><td>hotta@ice.uec.ac.jp,
+</td></tr><tr><td>b03d6e268cde7380e090ddaea889c75f64560891</td><td></td><td></td><td></td></tr><tr><td>b084683e5bab9b2bc327788e7b9a8e049d5fff8f</td><td>Using LIP to Gloss Over Faces in Single-Stage Face Detection
+<br/>Networks
+<br/><b>The University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('1973322', 'Siqi Yang', 'siqi yang')<br/>('2331880', 'Arnold Wiliem', 'arnold wiliem')<br/>('3104113', 'Shaokang Chen', 'shaokang chen')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td>{siqi.yang, a.wiliem, s.chen2}@uq.edu.au, lovell@itee.uq.edu.au
+</td></tr><tr><td>b0c1615ebcad516b5a26d45be58068673e2ff217</td><td>How Image Degradations Affect Deep CNN-based Face
+<br/>Recognition?
+<br/>S¸amil Karahan1 Merve Kılınc¸ Yıldırım1 Kadir Kırtac¸1 Ferhat S¸ ¨ukr¨u Rende1
+<br/>G¨ultekin B¨ut¨un1Hazım Kemal Ekenel2
+</td><td></td><td></td></tr><tr><td>b03446a2de01126e6a06eb5d526df277fa36099f</td><td>A Torch Library for Action Recognition and Detection Using CNNs and LSTMs
+<br/><b>Stanford University</b></td><td>('4910251', 'Helen Jiang', 'helen jiang')</td><td>{gthung, helennn}@stanford.edu
+</td></tr><tr><td>b0de0892d2092c8c70aa22500fed31aa7eb4dd3f</td><td>(will be inserted by the editor)
+<br/>A robust and efficient video representation for action recognition
+<br/>Received: date / Accepted: date
+</td><td>('1804138', 'Heng Wang', 'heng wang')</td><td></td></tr><tr><td>b018fa5cb9793e260b8844ae155bd06380988584</td><td>Project STAR IST-2000-28764
+<br/>Deliverable D6.3 Enhanced face and arm/hand
+<br/>detector
+<br/>Date: August 29th, 2003
+<br/><b>Katholieke Universiteit Leuven, ESAT/VISICS</b><br/>Kasteelpark Arenberg 10, 3001 Heverlee, Belgium
+<br/>Tel. +32-16-32.10.61 and Fax. +32-16-32.17.23
+<br/>http://www.esat.kuleuven.ac.be/ knummiar/star/star.html
+<br/>To: STAR project partners
+<br/>Siemens CT PP6,
+<br/>Otto-Hahn-Ring 6, 81730 Munich, Germany
+<br/>Tel. +49-89-636.49.851, Fax. +49-89-636.481.00
+<br/>Introduction
+<br/>KU Leuven is responsible for the work package number 6, Automated view selection and
+<br/>camera hand-over. The main goal is to build an intelligent virtual editor that produces as
+<br/>an output a single video stream from multiple input streams. The selection should be made
+<br/>in such a way that the resulting stream is pleasant to watch and informative about what is
+<br/>going on in the scene. Face detection and object tracking is needed to select the best camera
+<br/>view from the multi-camera system.
+<br/>KUL has delivered the STAR deliverables D6.1 Initial face detection software and D6.2 Initial
+<br/>arm/hand tracking software from work package 6, July 2002 (month 12). The integration of
+<br/>the detection and tracking has been needed to successfully provide this deliverable D6.3
+<br/>Enhanced face and arm/hand detector.
+<br/>We explain (cid:12)rst the enhanced face detection, followed by the enhanced tracking software and
+<br/>(cid:12)nally the integration. Also the hand tracking results with simple histogram-based detection
+<br/>is presented. The results will be shown using the common STAR data sequencies, from
+<br/>di(cid:11)erent Siemens factories, in Germany.
+</td><td>('2381884', 'Katja Nummiaro', 'katja nummiaro')<br/>('2733505', 'Rik Fransens', 'rik fransens')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>fknummiar, fransen, vangoolg@esat.kuleuven.ac.be
+<br/>artur.raczynski@mchp.siemens.de
+</td></tr><tr><td>b073313325b6482e22032e259d7311fb9615356c</td><td>Robust and Accurate Cancer Classification with Gene Expression Profiling
+<br/>Dept. of Computer Science
+<br/><b>Human Interaction Research Lab</b><br/>Dept. of Computer Science
+<br/><b>University of California</b><br/>Riverside, CA 92521
+<br/><b>Motorola, Inc</b><br/>Tempe, AZ 85282
+<br/><b>University of California</b><br/>Riverside, CA 92521
+</td><td>('31947043', 'Haifeng Li', 'haifeng li')<br/>('1749400', 'Keshu Zhang', 'keshu zhang')<br/>('6820989', 'Tao Jiang', 'tao jiang')</td><td>hli@cs.ucr.edu
+<br/>keshu.zhang@motorola.com
+<br/>jiang@cs.ucr.edu
+</td></tr><tr><td>a6f81619158d9caeaa0863738ab400b9ba2d77c2</td><td>Face Recognition using Convolutional Neural Network
+<br/>and Simple Logistic Classifier
+<br/>Intelligent Systems Laboratory (ISLAB),
+<br/>Faculty of Electrical & Computer Engineering
+<br/><b>K.N. Toosi University of Technology, Tehran, Iran</b></td><td>('2040276', 'Hurieh Khalajzadeh', 'hurieh khalajzadeh')<br/>('10694774', 'Mohammad Mansouri', 'mohammad mansouri')<br/>('1709359', 'Mohammad Teshnehlab', 'mohammad teshnehlab')</td><td>hurieh.khalajzadeh@gmail.com,
+<br/>mohammad.mansouri@ee.kntu.ac.ir,
+<br/>teshnehlab@eetd.kntu.ac.ir
+</td></tr><tr><td>a66d89357ada66d98d242c124e1e8d96ac9b37a0</td><td>Failure Detection for Facial Landmark Detectors
+<br/>Computer Vision Lab, D-ITET, ETH Zurich, Switzerland
+</td><td>('33028242', 'Andreas Steger', 'andreas steger')<br/>('1732855', 'Radu Timofte', 'radu timofte')</td><td>stegeran@ethz.ch, {radu.timofte, vangool}@vision.ee.ethz.ch
+</td></tr><tr><td>a6d7cf29f333ea3d2aeac67cde39a73898e270b7</td><td>Gender Classification from Facial Images Using Texture Descriptors
+<br/>801
+<br/>Gender Classification from Facial Images Using Texture Descriptors
+<br/><b>King Saud University, KSA</b><br/><b>King Saud University, KSA</b><br/><b>King Saud University, KSA</b><br/><b>University of Nevada at Reno, USA</b></td><td>('1758125', 'Ihsan Ullah', 'ihsan ullah')<br/>('1966959', 'Hatim Aboalsamh', 'hatim aboalsamh')<br/>('2363759', 'Muhammad Hussain', 'muhammad hussain')<br/>('1758305', 'Ghulam Muhammad', 'ghulam muhammad')<br/>('1808451', 'George Bebis', 'george bebis')</td><td>{ihsanullah, hatim, mhussain, ghulam}@ksu.edu.sa, bebis@cse.unr.edu
+</td></tr><tr><td>a611c978e05d7feab01fb8a37737996ad6e88bd9</td><td>Benchmarking 3D pose estimation for
+<br/>face recognition
+<br/><b>Computational Biomedicine Lab, University of Houston, TX, USA</b></td><td>('39634395', 'Pengfei Dou', 'pengfei dou')<br/>('2461369', 'Yuhang Wu', 'yuhang wu')<br/>('2700399', 'Shishir K. Shah', 'shishir k. shah')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>{pengfei,yuhang}@cbl.uh.edu, {sshah,IKakadia}@central.uh.edu
+</td></tr><tr><td>a608c5f8fd42af6e9bd332ab516c8c2af7063c61</td><td>2408
+<br/>Age Estimation via Grouping and Decision Fusion
+</td><td>('3006921', 'Kuan-Hsien Liu', 'kuan-hsien liu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('9363144', 'C.-C. Jay Kuo', 'c.-c. jay kuo')</td><td></td></tr><tr><td>a6e8a8bb99e30a9e80dbf80c46495cf798066105</td><td>Ranking Generative Adversarial Networks:
+<br/>Subjective Control over Semantic Image Attributes
+<br/><b>University of Bath</b></td><td>('41020280', 'Yassir Saquil', 'yassir saquil')<br/>('1808255', 'Kwang In Kim', 'kwang in kim')</td><td></td></tr><tr><td>a6eb6ad9142130406fb4ffd4d60e8348c2442c29</td><td>Video Description: A Survey of Methods,
+<br/>Datasets and Evaluation Metrics
+</td><td>('50978260', 'Nayyer Aafaq', 'nayyer aafaq')<br/>('1746166', 'Syed Zulqarnain Gilani', 'syed zulqarnain gilani')<br/>('46641573', 'Wei Liu', 'wei liu')<br/>('46332747', 'Ajmal Mian', 'ajmal mian')</td><td></td></tr><tr><td>a6ffe238eaf8632b4a8a6f718c8917e7f3261546</td><td> Australasian Medical Journal [AMJ 2011, 4, 10, 555-562]
+<br/>Dynamic Facial Prosthetics for Sufferers of Facial Paralysis
+<br/><b>Nottingham Trent University, Nottingham, UK</b><br/><b>Nottingham University Hospital, Nottingham, UK</b><br/> RESEARCH
+<br/>
+<br/>Please cite this paper as: Coulter F, Breedon P, Vloeberghs
+<br/>M. Dynamic facial prosthetics for sufferers of facial
+<br/>paralysis.
+<br/>AMJ 2011, 4, 10, 555-562
+<br/>http//dx.doi.org/10.4066/AMJ.2011.921
+<br/>Corresponding Author:
+<br/><b>Nottingham Trent University</b><br/>
+<br/>United Kingdom
+</td><td>('6930559', 'Fergal Coulter', 'fergal coulter')<br/>('3214667', 'Philip Breedon', 'philip breedon')<br/>('40436855', 'Michael Vloeberghs', 'michael vloeberghs')<br/>('3214667', 'Philip Breedon', 'philip breedon')</td><td>philip.breedon@ntu.ac.uk
+</td></tr><tr><td>a6583c8daa7927eedb3e892a60fc88bdfe89a486</td><td></td><td></td><td></td></tr><tr><td>a660390654498dff2470667b64ea656668c98ecc</td><td>FACIAL EXPRESSION RECOGNITION BASED ON GRAPH-PRESERVING SPARSE
+<br/>NON-NEGATIVE MATRIX FACTORIZATION
+<br/><b>Institute of Information Science</b><br/><b>Beijing Jiaotong University</b><br/>Beijing 100044, P.R. China
+<br/>, Bastiaan Kleijn
+<br/>ACCESS Linnaeus Center
+<br/><b>KTH Royal Institute of Technology, Stockholm</b><br/>School of Electrical Engineering
+</td><td>('3247912', 'Ruicong Zhi', 'ruicong zhi')<br/>('1749334', 'Markus Flierl', 'markus flierl')<br/>('1738408', 'Qiuqi Ruan', 'qiuqi ruan')</td><td>{05120370, qqruan}@bjtu.edu.cn
+<br/>{ruicong, mflierl, bastiaan}@kth.se
+</td></tr><tr><td>a60907b7ee346b567972074e3e03c82f64d7ea30</td><td>Head Motion Signatures from Egocentric Videos
+<br/><b>The Hebrew University of Jerusalem, Israel</b><br/>2 IIIT Delhi, India
+</td><td>('2926663', 'Yair Poleg', 'yair poleg')<br/>('1897733', 'Chetan Arora', 'chetan arora')<br/>('1796055', 'Shmuel Peleg', 'shmuel peleg')</td><td></td></tr><tr><td>a6e43b73f9f87588783988333997a81b4487e2d5</td><td>Facial Age Estimation by Total Ordering
+<br/>Preserving Projection
+<br/>National Key Laboratory for Novel Software Technology
+<br/><b>Nanjing University, Nanjing 210023, China</b></td><td>('39527177', 'Xiao-Dong Wang', 'xiao-dong wang')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')</td><td>{wangxd,zhouzh}@lamda.nju.edu.cn
+</td></tr><tr><td>a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc</td><td>Semi-supervised Neighborhood Preserving
+<br/>Discriminant Embedding:
+<br/>A Semi-supervised Subspace Learning
+<br/>Algorithm
+<br/>1 Department of Computer Science and Software Engineering,
+<br/><b></b><br/><b>University of Western Australia</b></td><td>('2067346', 'Maryam Mehdizadeh', 'maryam mehdizadeh')<br/>('1766400', 'Cara MacNish', 'cara macnish')<br/>('39128433', 'R. Nazim Khan', 'r. nazim khan')<br/>('1698675', 'Mohammed Bennamoun', 'mohammed bennamoun')</td><td></td></tr><tr><td>a6b5ffb5b406abfda2509cae66cdcf56b4bb3837</td><td>One Shot Similarity Metric Learning
+<br/>for Action Recognition
+<br/><b>The Weizmann Institute of</b><br/><b>The Open University</b><br/>Science, Rehovot, Israel.
+<br/>Raanana, Israel.
+<br/><b>The Blavatnik School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel</b></td><td>('3294355', 'Orit Kliper-Gross', 'orit kliper-gross')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td>orit.kliper@weizmann.ac.il
+<br/>hassner@openu.ac.il
+<br/>wolf@cs.tau.ac.il
+</td></tr><tr><td>a6590c49e44aa4975b2b0152ee21ac8af3097d80</td><td>https://doi.org/10.1007/s11263-018-1074-6
+<br/>3D Interpreter Networks for Viewer-Centered Wireframe Modeling
+<br/>Received: date / Accepted: date
+</td><td>('3045089', 'Jiajun Wu', 'jiajun wu')<br/>('1763295', 'Joshua B. Tenenbaum', 'joshua b. tenenbaum')</td><td></td></tr><tr><td>a694180a683f7f4361042c61648aa97d222602db</td><td>Face Recognition using Scattering Wavelet under Illicit Drug Abuse Variations
+<br/>IIIT-Delhi India
+</td><td>('2503967', 'Prateekshit Pandey', 'prateekshit pandey')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td>fprateekshit12078, rsingh, mayankg@iiitd.ac.in
+</td></tr><tr><td>a6db73f10084ce6a4186363ea9d7475a9a658a11</td><td></td><td></td><td></td></tr><tr><td>a6e25cab2251a8ded43c44b28a87f4c62e3a548a</td><td>Let’s Dance: Learning From Online Dance Videos
+<br/><b>Georgia Institute of Technology</b><br/>Irfan Essa
+</td><td>('40333356', 'Daniel Castro', 'daniel castro')<br/>('2935619', 'Steven Hickson', 'steven hickson')<br/>('3430745', 'Patsorn Sangkloy', 'patsorn sangkloy')<br/>('40506496', 'Bhavishya Mittal', 'bhavishya mittal')<br/>('35459529', 'Sean Dai', 'sean dai')<br/>('1945508', 'James Hays', 'james hays')</td><td>shickson@gatech.edu
+<br/>patsorn sangkloy@gatech.edu
+<br/>dcastro9@gatech.edu
+<br/>bmittal6@gatech.edu
+<br/>sdai@gatech.edu
+<br/>hays@gatech.edu
+<br/>irfan@gatech.edu
+</td></tr><tr><td>a6634ff2f9c480e94ed8c01d64c9eb70e0d98487</td><td></td><td></td><td></td></tr><tr><td>a6270914cf5f60627a1332bcc3f5951c9eea3be0</td><td>Joint Attention in Driver-Pedestrian Interaction: from
+<br/>Theory to Practice
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>York University, Toronto, ON, Canada</b><br/>March 28, 2018
+</td><td>('26902477', 'Amir Rasouli', 'amir rasouli')<br/>('1727853', 'John K. Tsotsos', 'john k. tsotsos')</td><td>{aras,tsotsos}@eecs.yorku.ca
+</td></tr><tr><td>a6ce2f0795839d9c2543d64a08e043695887e0eb</td><td>Driver Gaze Region Estimation
+<br/>Without Using Eye Movement
+<br/><b>Massachusetts Institute of Technology (MIT</b></td><td>('49925254', 'Philipp Langhans', 'philipp langhans')<br/>('7137846', 'Joonbum Lee', 'joonbum lee')<br/>('1901227', 'Bryan Reimer', 'bryan reimer')</td><td></td></tr><tr><td>a6b1d79bc334c74cde199e26a7ef4c189e9acd46</td><td>bioRxiv preprint first posted online Aug. 17, 2017;
+<br/>doi:
+<br/>http://dx.doi.org/10.1101/177196
+<br/>.
+<br/>The copyright holder for this preprint (which was
+<br/>not peer-reviewed) is the author/funder. It is made available under a
+<br/>CC-BY-NC 4.0 International license
+<br/>Deep Recurrent Neural Network Reveals a Hierarchy of
+<br/>Process Memory during Dynamic Natural Vision
+<br/>1Weldon School of Biomedical Engineering
+<br/>2School of Electrical and Computer Engineering
+<br/><b>Purdue Institute for Integrative Neuroscience</b><br/><b>Purdue University, West Lafayette, Indiana, 47906, USA</b><br/>*Correspondence
+<br/>Assistant Professor of Biomedical Engineering
+<br/>Assistant Professor of Electrical and Computer Engineering
+<br/><b>College of Engineering, Purdue University</b><br/>206 S. Martin Jischke Dr.
+<br/>West Lafayette, IN 47907, USA
+<br/>Phone: +1 765 496 1872
+<br/>Fax: +1 765 496 1459
+</td><td>('4416237', 'Junxing Shi', 'junxing shi')<br/>('4431043', 'Haiguang Wen', 'haiguang wen')<br/>('3334748', 'Yizhen Zhang', 'yizhen zhang')<br/>('3418794', 'Kuan Han', 'kuan han')<br/>('1799110', 'Zhongming Liu', 'zhongming liu')<br/>('1799110', 'Zhongming Liu', 'zhongming liu')</td><td>Email: zmliu@purdue.edu
+</td></tr><tr><td>a6ebe013b639f0f79def4c219f585b8a012be04f</td><td>Facial Expression Recognition Based on Hybrid
+<br/>Approach
+<br/><b>Graduate School of Science and Engineering, Saitama University</b><br/>255 Shimo-Okubo, Sakura-ku, Saitama-shi, Saitama 338-8570, Japan
+<br/>E-mail
+</td><td>('13403748', 'Md. Abdul Mannan', 'md. abdul mannan')<br/>('34949901', 'Antony Lam', 'antony lam')<br/>('2367471', 'Yoshinori Kobayashi', 'yoshinori kobayashi')<br/>('1737913', 'Yoshinori Kuno', 'yoshinori kuno')</td><td></td></tr><tr><td>a6e21438695dbc3a184d33b6cf5064ddf655a9ba</td><td>PKU-MMD: A Large Scale Benchmark for Continuous Multi-Modal Human
+<br/>Action Understanding
+<br/><b>Institiude of Computer Science and Technology, Peking University</b></td><td>('2994549', 'Jiaying Liu', 'jiaying liu')<br/>('1708754', 'Chunhui Liu', 'chunhui liu')</td><td>{liuchunhui, huyy, lyttonhao, ssj940929, liujiaying}@pku.edu.cn
+</td></tr><tr><td>b9081856963ceb78dcb44ac410c6fca0533676a3</td><td>UntrimmedNets for Weakly Supervised Action Recognition and Detection
+<br/>1Computer Vision Laboratory, ETH Zurich, Switzerland
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b></td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>b97f694c2a111b5b1724eefd63c8d64c8e19f6c9</td><td>Group Affect Prediction Using Multimodal Distributions
+<br/>Aspiring Minds
+<br/>Univeristy of Massachusetts, Amherst
+<br/><b>Johns Hopkins University</b></td><td>('40997180', 'Saqib Nizam Shamsi', 'saqib nizam shamsi')<br/>('47679973', 'Bhanu Pratap Singh', 'bhanu pratap singh')<br/>('7341605', 'Manya Wadhwa', 'manya wadhwa')</td><td>shamsi.saqib@gmail.com
+<br/>bhanupratap.mnit@gmail.com
+<br/>mwadhwa1@jhu.edu
+</td></tr><tr><td>b9d0774b0321a5cfc75471b62c8c5ef6c15527f5</td><td>Fishy Faces: Crafting Adversarial Images to Poison Face Authentication
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+</td><td>('4412412', 'Giuseppe Garofalo', 'giuseppe garofalo')<br/>('23974422', 'Vera Rimmer', 'vera rimmer')<br/>('19243432', 'Tim Van hamme', 'tim van hamme')<br/>('1722184', 'Davy Preuveneers', 'davy preuveneers')<br/>('1752104', 'Wouter Joosen', 'wouter joosen')</td><td></td></tr><tr><td>b9cad920a00fc0e997fc24396872e03f13c0bb9c</td><td>FACE LIVENESS DETECTION UNDER BAD ILLUMINATION CONDITIONS
+<br/><b>University of Campinas (Unicamp</b><br/>Campinas, SP, Brazil
+</td><td>('2826093', 'Bruno Peixoto', 'bruno peixoto')<br/>('34629204', 'Carolina Michelassi', 'carolina michelassi')<br/>('2145405', 'Anderson Rocha', 'anderson rocha')</td><td></td></tr><tr><td>b908edadad58c604a1e4b431f69ac8ded350589a</td><td>Deep Face Feature for Face Alignment
+</td><td>('15679675', 'Boyi Jiang', 'boyi jiang')<br/>('2938279', 'Juyong Zhang', 'juyong zhang')<br/>('2964129', 'Bailin Deng', 'bailin deng')<br/>('8280113', 'Yudong Guo', 'yudong guo')<br/>('47968194', 'Ligang Liu', 'ligang liu')</td><td></td></tr><tr><td>b93bf0a7e449cfd0db91a83284d9eba25a6094d8</td><td>Supplementary Material for: Active Pictorial Structures
+<br/>Epameinondas Antonakos
+<br/>Joan Alabort-i-Medina
+<br/>Stefanos Zafeiriou
+<br/><b>Imperial College London</b><br/>180 Queens Gate, SW7 2AZ, London, U.K.
+<br/>In the following sections, we provide additional material for the paper “Active Pictorial Structures”. Section 1 explains in
+<br/>more detail the differences between the proposed Active Pictorial Structures (APS) and Pictorial Structures (PS). Section 2
+<br/>presents the proofs about the structure of the precision matrices of the Gaussian Markov Random Filed (GMRF) (Eqs. 10
+<br/>and 12 of the main paper). Section 3 gives an analysis about the forward Gauss-Newton optimization of APS and shows that
+<br/>the inverse technique with fixed Jacobian and Hessian, which is used in the main paper, is much faster. Finally, Sec. 4 shows
+<br/>additional experimental results and conducts new experiments on different objects (human eyes and cars). An open-source
+<br/>implementation of APS is available within the Menpo Project [1] in http://www.menpo.org/.
+<br/>1. Differences between Active Pictorial Structures and Pictorial Structures
+<br/>As explained in the main paper, the proposed model is partially motivated by PS [4, 8]. In the original formulation of PS,
+<br/>the cost function to be optimized has the form
+<br/>(cid:88)
+<br/>n(cid:88)
+<br/>n(cid:88)
+<br/>i=1
+<br/>arg min
+<br/>= arg min
+<br/>i=1
+<br/>mi((cid:96)i) +
+<br/>dij((cid:96)i, (cid:96)j) =
+<br/>i,j:(vi,vj )∈E
+<br/>[A((cid:96)i) − µa
+<br/>i ]T (Σa
+<br/>i )−1[A((cid:96)i) − µa
+<br/>i ] +
+<br/>(cid:88)
+<br/>i,j:(vi,vj )∈E
+<br/>[(cid:96)i − (cid:96)j − µd
+<br/>ij]T (Σd
+<br/>ij)−1[(cid:96)i − (cid:96)j − µd
+<br/>ij]
+<br/>(1)
+<br/>1 , . . . , (cid:96)T
+<br/>n ]T is the vector of landmark coordinates ((cid:96)i = [xi, yi]T , ∀i = 1, . . . , n), A((cid:96)i) is a feature vector
+<br/>where s = [(cid:96)T
+<br/>ij} denote the mean
+<br/>extracted from the image location (cid:96)i and we have assumed a tree G = (V, E). {µa
+<br/>and covariances of the appearance and deformation respectively. In Eq. 1, mi((cid:96)i) is a function measuring the degree of
+<br/>mismatch when part vi is placed at location (cid:96)i in the image. Moreover, dij((cid:96)i, (cid:96)j) denotes a function measuring the degree
+<br/>of deformation of the model when part vi is placed at location (cid:96)i and part vj is placed at location (cid:96)j. The authors show
+<br/>an inference algorithm based on distance transform [3] that can find a global minimum of Eq. 1 without any initialization.
+<br/>However, this algorithm imposes two important restrictions: (1) appearance of each part is independent of the rest of them
+<br/>and (2) G must always be acyclic (a tree). Additionally, the computation of mi((cid:96)i) for all parts (i = 1, . . . , n) and all possible
+<br/>image locations (response maps) has a high computational cost, which makes the algorithm very slow. Finally, in [8], the
+<br/>authors only use a diagonal covariance for the relative locations (deformation) of each edge of the graph, which restricts the
+<br/>flexibility of the model.
+<br/>i } and {µd
+<br/>ij, Σd
+<br/>i , Σa
+<br/>In the proposed APS, we aim to minimize the cost function (Eq. 19 of the main paper)
+<br/>(cid:107)A(S(¯s, p)) − ¯a(cid:107)2
+<br/>[A(S(¯s, p)) − ¯a]T Qa[A(S(¯s, p)) − ¯a] + [S(¯s, p) − ¯s]T Qd[S(¯s, p) − ¯s]
+<br/>Qa + (cid:107)S(¯s, p) − ¯s(cid:107)2
+<br/>Qd =
+<br/>arg min
+<br/>= arg min
+<br/>(2)
+<br/>There are two main differences between APS and PS: (1) we employ a statistical shape model and optimize with respect
+<br/>to its parameters and (2) we use the efficient Gauss-Newton optimization technique. However, these differences introduce
+<br/>some important advantages, as also mentioned in the main paper. The proposed formulation allows to define a graph (not
+<br/>only tree) between the object’s parts. This means that we can assume dependencies between any pair of landmarks for both
+</td><td></td><td>{e.antonakos, ja310, s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>b9c9c7ef82f31614c4b9226e92ab45de4394c5f6</td><td>11
+<br/>Face Recognition under Varying Illumination
+<br/><b>Nanyang Technological University</b><br/>Singapore
+<br/>1. Introduction
+<br/>Face Recognition by a robot or machine is one of the challenging research topics in the
+<br/>recent years. It has become an active research area which crosscuts several disciplines such
+<br/>as image processing, pattern recognition, computer vision, neural networks and robotics.
+<br/>For many applications, the performances of face recognition systems in controlled
+<br/>environments have achieved a satisfactory level. However, there are still some challenging
+<br/>issues to address in face recognition under uncontrolled conditions. The variation in
+<br/>illumination is one of the main challenging problems that a practical face recognition system
+<br/>needs to deal with. It has been proven that in face recognition, differences caused by
+<br/>illumination variations are more significant than differences between individuals (Adini et
+<br/>al., 1997). Various methods have been proposed to solve the problem. These methods can be
+<br/>classified into three categories, named face and illumination modeling, illumination
+<br/>invariant feature extraction and preprocessing and normalization. In this chapter, an
+<br/>extensive and state-of-the-art study of existing approaches to handle illumination variations
+<br/>is presented. Several latest and representative approaches of each category are presented in
+<br/>detail, as well as the comparisons between them. Moreover, to deal with complex
+<br/>environment where illumination variations are coupled with other problems such as pose
+<br/>and expression variations, a good feature representation of human face should not only be
+<br/>illumination invariant, but also robust enough against pose and expression variations. Local
+<br/>binary pattern (LBP) is such a local texture descriptor. In this chapter, a detailed study of the
+<br/>LBP and its several important extensions is carried out, as well as its various combinations
+<br/>with other techniques to handle illumination invariant face recognition under a complex
+<br/>environment. By generalizing different strategies in handling illumination variations and
+<br/>evaluating their performances, several promising directions for future research have been
+<br/>suggested.
+<br/>This chapter is organized as follows. Several famous methods of face and illumination
+<br/>modeling are introduced in Section 2. In Section 3, latest and representative approaches of
+<br/>illumination invariant feature extraction are presented in detail. More attentions are paid on
+<br/>quotient-image-based methods. In Section 4, the normalization methods on discarding low
+<br/>frequency coefficients in various transformed domains are introduced with details. In
+<br/>Section 5, a detailed introduction of the LBP and its several important extensions is
+<br/>presented, as well as its various combinations with other face recognition techniques. In
+<br/>Section 6, comparisons between different methods and discussion of their advantages and
+<br/>disadvantages are presented. Finally, several promising directions as the conclusions are
+<br/>drawn in Section 7.
+<br/>www.intechopen.com
+</td><td>('9244425', 'Lian Zhichao', 'lian zhichao')<br/>('9224769', 'Er Meng Joo', 'er meng joo')</td><td></td></tr><tr><td>b9f2a755940353549e55690437eb7e13ea226bbf</td><td>Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions
+</td><td>('3296857', 'Carolina Redondo-Cabrera', 'carolina redondo-cabrera')<br/>('2941882', 'Roberto J. López-Sastre', 'roberto j. lópez-sastre')</td><td>carolina.redondoc@edu.uah.es
+<br/>robertoj.lopez@uah.es
+</td></tr><tr><td>b9cedd1960d5c025be55ade0a0aa81b75a6efa61</td><td>INEXACT KRYLOV SUBSPACE ALGORITHMS FOR LARGE
+<br/>MATRIX EXPONENTIAL EIGENPROBLEM FROM
+<br/>DIMENSIONALITY REDUCTION
+</td><td>('1685951', 'Gang Wu', 'gang wu')<br/>('7139289', 'Ting-ting Feng', 'ting-ting feng')<br/>('9472022', 'Li-jia Zhang', 'li-jia zhang')<br/>('5828998', 'Meng Yang', 'meng yang')</td><td></td></tr><tr><td>b971266b29fcecf1d5efe1c4dcdc2355cb188ab0</td><td>MAI et al.: ON THE RECONSTRUCTION OF FACE IMAGES FROM DEEP FACE TEMPLATES
+<br/>On the Reconstruction of Face Images from
+<br/>Deep Face Templates
+</td><td>('3391550', 'Guangcan Mai', 'guangcan mai')<br/>('1684684', 'Kai Cao', 'kai cao')<br/>('1768574', 'Pong C. Yuen', 'pong c. yuen')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>a1af7ec84472afba0451b431dfdb59be323e35b7</td><td>LikeNet: A Siamese Motion Estimation
+<br/>Network Trained in an Unsupervised Way
+<br/>Multimedia and Vision Research Group
+<br/><b>Queen Mary University of London</b><br/>London, UK
+</td><td>('49505678', 'Aria Ahmadi', 'aria ahmadi')<br/>('2000297', 'Ioannis Marras', 'ioannis marras')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')<br/>('49505678', 'Aria Ahmadi', 'aria ahmadi')<br/>('2000297', 'Ioannis Marras', 'ioannis marras')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td>a.ahmadi@qmul.ac.uk
+<br/>i.marras@qmul.ac.uk
+<br/>i.patras@qmul.ac.uk
+</td></tr><tr><td>a1dd806b8f4f418d01960e22fb950fe7a56c18f1</td><td>Interactively Building a Discriminative Vocabulary of Nameable Attributes
+<br/><b>Toyota Technological Institute, Chicago (TTIC</b><br/><b>University of Texas at Austin</b></td><td>('1713589', 'Devi Parikh', 'devi parikh')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>dparikh@ttic.edu
+<br/>grauman@cs.utexas.edu
+</td></tr><tr><td>a158c1e2993ac90a90326881dd5cb0996c20d4f3</td><td>OPEN ACCESS
+<br/>ISSN 2073-8994
+<br/>Article
+<br/>1 DMA, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Italy
+<br/>2 CITC, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Itlay
+<br/>3 Istituto Nazionale di Ricerche Demopolis, via Col. Romey 7, 91100 Trapani, Italy
+<br/>† Deceased on 15 March 2009.
+<br/>Received: 4 March 2010; in revised form: 23 March 2010 / Accepted: 29 March 2010 /
+<br/>Published: 1 April 2010
+</td><td>('1716744', 'Bertrand Zavidovique', 'bertrand zavidovique')</td><td>4 IEF, Université Paris IX–Orsay, Paris, France; E-Mail: bertrand.zavidovique@u-psud.fr (B.Z.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: metabacchi@demopolis.it.
+</td></tr><tr><td>a15d9d2ed035f21e13b688a78412cb7b5a04c469</td><td>Object Detection Using
+<br/>Strongly-Supervised Deformable Part Models
+<br/>1Computer Vision and Active Perception Laboratory (CVAP), KTH, Sweden
+<br/>2INRIA, WILLOW, Laboratoire d’Informatique de l’Ecole Normale Superieure
+</td><td>('2622491', 'Hossein Azizpour', 'hossein azizpour')<br/>('1785596', 'Ivan Laptev', 'ivan laptev')</td><td>azizpour@kth.se,ivan.laptev@inria.fr
+</td></tr><tr><td>a1b1442198f29072e907ed8cb02a064493737158</td><td>456
+<br/>Crowdsourcing Facial Responses
+<br/>to Online Videos
+</td><td>('1801452', 'Daniel McDuff', 'daniel mcduff')<br/>('1754451', 'Rana El Kaliouby', 'rana el kaliouby')<br/>('1719389', 'Rosalind W. Picard', 'rosalind w. picard')</td><td></td></tr><tr><td>a14db48785d41cd57d4eac75949a6b79fc684e70</td><td>Fast High Dimensional Vector Multiplication Face Recognition
+<br/><b>Tel Aviv University</b><br/><b>Tel Aviv University</b><br/><b>Tel Aviv University</b><br/>IBM Research
+</td><td>('2109324', 'Oren Barkan', 'oren barkan')<br/>('40389676', 'Jonathan Weill', 'jonathan weill')<br/>('1776343', 'Lior Wolf', 'lior wolf')<br/>('2580470', 'Hagai Aronowitz', 'hagai aronowitz')</td><td>orenbarkan@post.tau.ac.il
+<br/>yonathanw@post.tau.ac.il
+<br/>wolf@cs.tau.ac.il
+<br/>hagaia@il.ibm.com
+</td></tr><tr><td>a15c728d008801f5ffc7898568097bbeac8270a4</td><td>Concise Preservation by Combining Managed Forgetting
+<br/>and Contextualized Remembering
+<br/>Grant Agreement No. 600826
+<br/>Deliverable D4.4
+<br/>Work-package
+<br/>Deliverable
+<br/>Deliverable Leader
+<br/>Quality Assessor
+<br/>Dissemination level
+<br/>Delivery date in Annex I
+<br/>Actual delivery date
+<br/>Revisions
+<br/>Status
+<br/>Keywords
+<br/>Information Consolidation and Con-
+<br/>WP4:
+<br/>centration
+<br/>D4.4:
+<br/>Information analysis, consolidation
+<br/>and concentration techniques, and evalua-
+<br/>tion - Final release.
+<br/>Vasileios Mezaris (CERTH)
+<br/>Walter Allasia (EURIX)
+<br/>PU
+<br/>31-01-2016 (M36)
+<br/>31-01-2016
+<br/>Final
+<br/>multidocument summarization, semantic en-
+<br/>richment,
+<br/>feature extraction, concept de-
+<br/>tection, event detection, image/video qual-
+<br/>ity, image/video aesthetic quality, face de-
+<br/>tection/clustering,
+<br/>im-
+<br/>age/video summarization, image/video near
+<br/>duplicate detection, data deduplication, con-
+<br/>densation, consolidation
+<br/>image clustering,
+</td><td></td><td></td></tr><tr><td>a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1</td><td>Longitudinal Study of Child Face Recognition
+<br/><b>Michigan State University</b><br/>East Lansing, MI, USA
+<br/><b>Malaviya National Institute of Technology</b><br/>Jaipur, India
+<br/><b>Michigan State University</b><br/>East Lansing, MI, USA
+</td><td>('32623642', 'Debayan Deb', 'debayan deb')<br/>('2117075', 'Neeta Nain', 'neeta nain')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>debdebay@msu.edu
+<br/>nnain.cse@mnit.ac.in
+<br/>jain@cse.msu.edu
+</td></tr><tr><td>a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca</td><td>Attentional Pooling for Action Recognition
+<br/><b>The Robotics Institute, Carnegie Mellon University</b><br/>http://rohitgirdhar.github.io/AttentionalPoolingAction
+</td><td>('3102850', 'Rohit Girdhar', 'rohit girdhar')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td></td></tr><tr><td>a1132e2638a8abd08bdf7fc4884804dd6654fa63</td><td>6
+<br/>Real-Time Video Face Recognition
+<br/>for Embedded Devices
+<br/>Tessera, Galway,
+<br/>Ireland
+<br/>1. Introduction
+<br/>This chapter will address the challenges of real-time video face recognition systems
+<br/>implemented in embedded devices. Topics to be covered include: the importance and
+<br/>challenges of video face recognition in real life scenarios, describing a general architecture of
+<br/>a generic video face recognition system and a working solution suitable for recognizing
+<br/>faces in real-time using low complexity devices. Each component of the system will be
+<br/>described together with the system’s performance on a database of video samples that
+<br/>resembles real life conditions.
+<br/>2. Video face recognition
+<br/>Face recognition remains a very active topic in computer vision and receives attention from
+<br/>a large community of researchers in that discipline. Many reasons feed this interest; the
+<br/>main being the wide range of commercial, law enforcement and security applications that
+<br/>require authentication. The progress made in recent years on the methods and algorithms
+<br/>for data processing as well as the availability of new technologies makes it easier to study
+<br/>these algorithms and turn them into commercially viable product. Biometric based security
+<br/>systems are becoming more popular due to their non-invasive nature and their increasing
+<br/>reliability. Surveillance applications based on face recognition are gaining increasing
+<br/>attention after the United States’ 9/11 events and with the ongoing security threats. The
+<br/>Face Recognition Vendor Test (FRVT) (Phillips et al., 2003) includes video face recognition
+<br/>testing starting with the 2002 series of tests.
+<br/>Recently, face recognition technology was deployed in consumer applications such as
+<br/>organizing a collection of images using the faces present in the images (Picassa; Corcoran &
+<br/>Costache, 2005), prioritizing family members for best capturing conditions when taking
+<br/>pictures, or directly annotating the images as they are captured (Costache et al., 2006).
+<br/>Video face recognition, compared with more traditional still face recognition, has the main
+<br/>advantage of using multiple instances of the same individual in sequential frames for
+<br/>recognition to occur. In still recognition case, the system has only one input image to make
+<br/>the decision if the person is or is not in the database. If the image is not suitable for
+<br/>recognition (due to face orientation, expression, quality or facial occlusions) the recognition
+<br/>result will most likely be incorrect. In the video image there are multiple frames which can
+<br/>www.intechopen.com
+</td><td>('1706790', 'Petronel Bigioi', 'petronel bigioi')<br/>('1734172', 'Peter Corcoran', 'peter corcoran')</td><td></td></tr><tr><td>a125bc55bdf4bec7484111eea9ae537be314ec62</td><td>Real-time Facial Expression Recognition in Image
+<br/>Sequences Using an AdaBoost-based Multi-classifier
+<br/><b>National Taiwan University of Science and Technology, Taipei 10607, Taiwan</b><br/><b>National Taiwan University of Science and Technology, Taipei 10607, Taiwan</b><br/><b>National Taiwan University of Science and Technology, Taipei 10607, Taiwan</b><br/> To surmount the shortcomings as stated above, we
+<br/>attempt to develop an automatic facial expression recognition
+<br/>system that detects human faces and extracts facial features
+<br/>from an image sequence. This system is employed for
+<br/>recognizing six kinds of facial expressions: joy, anger,
+<br/>surprise, fear, sadness, and neutral of a computer user. In the
+<br/>expression classification procedure, we mainly compare the
+<br/>performance of different classifiers using multi-layer
+<br/>perceptions (MLPs), SVMs, and AdaBoost algorithms
+<br/>(ABAs). Through evaluating experimental
+<br/>the
+<br/>performance of ABAs is superior to that of the other two.
+<br/>According to this, we develop an AdaBoost-based multi-
+<br/>classifier used in our facial expression recognition system.
+<br/>results,
+<br/>II. FACE AND FACIAL FEATURE DETECTION
+<br/>In our system design philosophy, the skin color cue is an
+<br/>obvious characteristic to detect human faces. To begin with,
+<br/>we will execute skin color detection, then the morphological
+<br/>dilation operation, and facial feature detection. Subsequently,
+<br/>a filtering operation based on geometrical properties is
+<br/>applied to eliminate the skin color regions that do not pertain
+<br/>to human faces.
+<br/>A. Color Space Transformation
+<br/>Face detection is dependent on skin color detection
+<br/>techniques which work in one of frequently used color spaces.
+<br/>In the past, three color spaces YCbCr, HSI, and RGB have
+<br/>been extensively applied for skin color detection. Accordingly,
+<br/>we extract the common attribute from skin color regions to
+<br/>perform face detection.
+<br/>The color model of an image captured from the
+<br/>experimental camera is composed of RGB values, but it’s
+<br/>easy to be influenced by lighting. Herein, we adopt the HSI
+<br/>color space to replace the traditional RGB color space for skin
+<br/>color detection. We distinguish skin color regions from non-
+<br/>skin color ones by means of lower and upper bound
+<br/>thresholds. Via many experiments of detecting human faces,
+<br/>we choose the H value between 3 and 38 as the range of skin
+<br/>colors.
+<br/>B. Connected Component Labeling
+<br/>After the processing of skin color detection, we employ
+<br/>linear-time connected-component
+<br/>technique
+<br/>labeling
+<br/>the
+</td><td>('2574621', 'Chin-Shyurng Fahn', 'chin-shyurng fahn')<br/>('2604646', 'Ming-Hui Wu', 'ming-hui wu')<br/>('2309647', 'Chang-Yi Kao', 'chang-yi kao')</td><td>E-mail: csfahn@mail.ntust.edu.tw Tel: +886-02-2730-1215
+<br/>E-mail: M9415054@mail.ntust.edu.tw Tel: +886-02-2733-3141 ext.7425
+<br/>E-mail: D9515011@mail.ntust.edu.tw Tel: +886-02-2733-3141 ext.7425
+</td></tr><tr><td>a14ae81609d09fed217aa12a4df9466553db4859</td><td>REVISED VERSION, JUNE 2011
+<br/>Face Identification Using Large Feature Sets
+</td><td>('1679142', 'William Robson Schwartz', 'william robson schwartz')<br/>('2723427', 'Huimin Guo', 'huimin guo')<br/>('3826759', 'Jonghyun Choi', 'jonghyun choi')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td></td></tr><tr><td>a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892</td><td>Latent Embeddings for Zero-shot Classification
+<br/>1MPI for Informatics
+<br/>2IIT Kanpur
+<br/><b>Saarland University</b></td><td>('3370667', 'Yongqin Xian', 'yongqin xian')<br/>('2893664', 'Zeynep Akata', 'zeynep akata')<br/>('2515597', 'Gaurav Sharma', 'gaurav sharma')<br/>('33460941', 'Matthias Hein', 'matthias hein')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a</td><td>Affective recommender systems: the role of emotions in
+<br/>recommender systems
+<br/>Jurij Tasiˇc
+<br/><b>University of Ljubljana Faculty</b><br/><b>University of Ljubljana Faculty</b><br/><b>University of Ljubljana Faculty</b><br/>of electrical engineering
+<br/>Tržaška 25, Ljubljana,
+<br/>Slovenia
+<br/>of electrical engineering
+<br/>Tržaška 25, Ljubljana,
+<br/>Slovenia
+<br/>of electrical engineering
+<br/>Tržaška 25, Ljubljana,
+<br/>Slovenia
+</td><td>('1717186', 'Andrej Košir', 'andrej košir')</td><td>marko.tkalcic@fe.uni-lj.si
+<br/>andrej.kosir@fe.uni-lj.si
+<br/>jurij.tasic@fe.uni-lj.si
+</td></tr><tr><td>a1dd9038b1e1e59c9d564e252d3e14705872fdec</td><td>Attributes as Operators:
+<br/>Factorizing Unseen Attribute-Object Compositions
+<br/><b>The University of Texas at Austin</b><br/>2 Facebook AI Research
+</td><td>('38661780', 'Tushar Nagarajan', 'tushar nagarajan')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>tushar@cs.utexas.edu, grauman@fb.com∗
+</td></tr><tr><td>a1e97c4043d5cc9896dc60ae7ca135782d89e5fc</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Re-identification of Humans in Crowds using
+<br/>Personal, Social and Environmental Constraints
+</td><td>('2963501', 'Shayan Modiri Assari', 'shayan modiri assari')<br/>('1803711', 'Haroon Idrees', 'haroon idrees')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td></td></tr><tr><td>a16fb74ea66025d1f346045fda00bd287c20af0e</td><td>A Coupled Evolutionary Network for Age Estimation
+<br/>National Laboratory of Pattern Recognition, CASIA, Beijing, China 100190
+<br/>Center for Research on Intelligent Perception and Computing, CASIA, Beijing, China 100190
+<br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('2112221', 'Peipei Li', 'peipei li')<br/>('49995036', 'Yibo Hu', 'yibo hu')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>Email: {peipei.li, yibo.hu}@cripac.ia.ac.cn, {rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>ef940b76e40e18f329c43a3f545dc41080f68748</td><td>
+<br/>
+<br/>Research Article Volume 7 Issue No.3
+<br/>ISSN XXXX XXXX © 2017 IJESC
+<br/>
+<br/>A Face Recognition and Spoofing Detection Adapted to Visually-
+<br/>Impaired People
+<br/><b>K.K Wagh Institute of Engineering and Education Research, Nashik, India</b><br/>Depart ment of Co mputer Engineering
+<br/>Abstrac t:
+<br/>According to estimates by the world Health organization, about 285 million people suffer fro m so me kind of v isual disabilit ies of
+<br/>which 39 million are blind, resulting in 0.7 of the word population. As many v isual impaired peoples in the word they are unable
+<br/>to recognize the people who is standing in front of them and some peoples who have problem to re me mbe r na me of the person.
+<br/>They can easily recognize the person using this system. A co mputer vision technique and image ana lysis can help v isually
+<br/>the home using face identification and spoofing detection system. This system also provide feature to add newly known people
+<br/>and keep records of all peoples visiting their ho me.
+<br/>Ke ywor ds: face-recognition, spoofing detection, visually-impaired, system architecture.
+<br/>I.
+<br/> INTRODUCTION
+<br/>The facia l ana lysis can be used to e xtract very useful and
+<br/>relevant information in order to help people with visual
+<br/>impairment in several of its tasks daily providing them with a
+<br/>greater degree of autonomy and security. Facia l recognition
+<br/>has received many improve ments recent years and today is
+<br/>approaching perfection. The advances in facia l recognition
+<br/>have not been outside the People with disab ilities. For
+<br/>e xa mple , recently it has an intelligent walking stick for the
+<br/>blind that uses facial recognition [5]. The cane co mes
+<br/>equipped with a fac ial recognition system, GPS and Bluetooth.
+<br/>at the sight the face of any acquaintance or friend whose
+<br/>picture is stored on the SD card stick, this will v ibrate and give
+<br/>to Bluetooth headset through a necessary instructions to reach
+<br/>this person. The system works with anyone who is at 10
+<br/>meters or less. And thanks to the GPS, the user will rece ive
+<br/>instructions for reach wherever, as with any GPS navigator.
+<br/>However, in addition to the task of recognition today have
+<br/>biometric systems to deal with other problems, such as
+<br/>spoofing. In network security terms, this term re fers to Using
+<br/>techniques through which an attacker, usually with malic ious
+<br/>use, it is passed by a other than through the falsification of
+<br/>data entity in a co mmun ication. Motivation of the p roject is to
+<br/>propose, build and validate an architecture based on face
+<br/>recognition and anti-spoofing system that both can be
+<br/>integrated in a video entry as a mobile app. In this way, we
+<br/>want to give the blind and visually impaired an instrument or
+<br/>tool to allo w an ult imate goal to improve the quality of life
+<br/>and increase both safety and the feel of it in your ho me or
+<br/>when you
+<br/>interact with other people. The p roposed
+<br/>architecture has been validated with rea l users and a real
+<br/>environment simulating the same conditions as could give
+<br/>both the images captured by a video portero as images taken
+<br/>by a person visually impa ired through their mobile device.
+<br/>Contributions are d iscussed below: First an algorith m is
+<br/>proposed for the normalization face robust user as to rotations
+<br/>and misalignments in the face detection algorith m. It is shown
+<br/>that a robust norma lizat ion algorithm you can significantly
+<br/>increase the rate of success in a face detection algorithm
+<br/>The organizat ion of this document is as follo ws. In Section 2
+<br/>gives literature survey, Section 3 gives details of system
+<br/>architecture. In Section 4 gives imp le mentation details.
+<br/>Section 5 presents research findings and your analysis of those
+<br/>findings. Section 6 concludes the paper.
+<br/>II. LITERATURE S URVEY
+<br/>A. Facial Rec ognition oriente d visual i mpair ment
+<br/>The proble m of face recognition adapted to visually impaired
+<br/>people has been investigated in their d ifferent ways. Belo w are
+<br/>summarized the work impo rtant, indicating for each the most
+<br/>important features that have been motivating development of
+<br/>the architecture proposed here. In [6] fac ia l recognition system
+<br/>is presented in mobile devices for the visually impaired, but
+<br/>meet ings main ly focused on what aspects as visual fie ld
+<br/>captured by the mobile focus much of the subject. In [7]
+<br/>system developed facial recognition based on Local Binary
+<br/>Pattern (LBP) [8]. They co mpared this with other a lternatives
+<br/>descriptor (Local Te rnary Pattern [9] or Histogram of
+<br/>Gradients [10]) and arrived It concluded that the performance
+<br/>is slightly LBP superior, its computational cost is lower and
+<br/>representation information is more co mpact. As has been
+<br/>mentioned above, in [5] it has developed a system fac ial
+<br/>recognition integrated into a cane. In none of these methods is
+<br/>carried out detection spoofing, making the system has a
+<br/>vulnerability high against such attacks. We believe it is a point
+<br/>very important especially in people with visual d isabilities.
+<br/>Moreover, none of the alternatives above mentioned is video
+<br/>porters oriented.
+<br/>B. De tection S poofing
+<br/>As none of the above has been studied spoofing detection to
+<br/>help people with visual impairment, we will discuss the
+<br/>results more significant as
+<br/>refers. There are many different methods
+<br/>for detecting
+<br/>spoofing. However, one o f the key factors in an application
+<br/>that must run in rea l time and in a device Embedded is what
+<br/>the method be co mputationally lightweight. Most algorith ms
+<br/>or proposed are very comple x and are therefo re unfit for rea l,
+<br/>far as detecting spoofing
+<br/>International Journal of Engineering Science and Computing, March 2017 6051 http://ijesc.org/
+</td><td></td><td></td></tr><tr><td>efd308393b573e5410455960fe551160e1525f49</td><td>Tracking Persons-of-Interest via
+<br/>Unsupervised Representation Adaptation
+</td><td>('2481388', 'Shun Zhang', 'shun zhang')<br/>('3068086', 'Jia-Bin Huang', 'jia-bin huang')<br/>('33047058', 'Jongwoo Lim', 'jongwoo lim')<br/>('1698965', 'Yihong Gong', 'yihong gong')<br/>('32014778', 'Jinjun Wang', 'jinjun wang')<br/>('1752333', 'Narendra Ahuja', 'narendra ahuja')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>ef230e3df720abf2983ba6b347c9d46283e4b690</td><td>Page 1 of 20
+<br/>QUIS-CAMPI: An Annotated Multi-biometrics Data Feed From
+<br/>Surveillance Scenarios
+<br/><b>IT - Instituto de Telecomunica es, University of Beira Interior</b><br/><b>University of Beira Interior</b><br/><b>IT - Instituto de Telecomunica es, University of Beira Interior</b></td><td>('1712429', 'Hugo Proença', 'hugo proença')</td><td>*jcneves@ubi.pt
+</td></tr><tr><td>ef4ecb76413a05c96eac4c743d2c2a3886f2ae07</td><td>Modeling the Importance of Faces in Natural Images
+<br/>Jin B.a, Yildirim G.a, Lau C.a, Shaji A.a, Ortiz Segovia M.b and S¨usstrunk S.a
+<br/>aEPFL, Lausanne, Switzerland;
+<br/>bOc´e, Paris, France
+</td><td></td><td></td></tr><tr><td>efd28eabebb9815e34031316624e7f095c7dfcfe</td><td>A. Uhl and P. Wild. Combining Face with Face-Part Detectors under Gaussian Assumption. In A. Campilho and M. Kamel,
+<br/>editors, Proceedings of the 9th International Conference on Image Analysis and Recognition (ICIAR’12), volume 7325 of
+<br/>LNCS, pages 80{89, Aveiro, Portugal, June 25{27, 2012. c⃝ Springer. doi: 10.1007/978-3-642-31298-4 10. The original
+<br/>publication is available at www.springerlink.com.
+<br/>Combining Face with Face-part Detectors
+<br/>under Gaussian Assumption⋆
+<br/>Multimedia Signal Processing and Security Lab
+<br/><b>University of Salzburg, Austria</b></td><td>('1689850', 'Andreas Uhl', 'andreas uhl')<br/>('2242291', 'Peter Wild', 'peter wild')</td><td>fuhl,pwildg@cosy.sbg.ac.at
+</td></tr><tr><td>eff87ecafed67cc6fc4f661cb077fed5440994bb</td><td>Evaluation of Expression Recognition
+<br/>Techniques
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign, USA</b><br/><b>Faculty of Science, University of Amsterdam, The Netherlands</b><br/><b>Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands</b></td><td>('1774778', 'Ira Cohen', 'ira cohen')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1840164', 'Yafei Sun', 'yafei sun')<br/>('1731570', 'Michael S. Lew', 'michael s. lew')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td></td></tr><tr><td>ef458499c3856a6e9cd4738b3e97bef010786adb</td><td>Learning Type-Aware Embeddings for Fashion
+<br/>Compatibility
+<br/>Department of Computer Science,
+<br/><b>University of Illinois at Urbana-Champaign</b></td><td>('47087718', 'Mariya I. Vasileva', 'mariya i. vasileva')<br/>('2856622', 'Bryan A. Plummer', 'bryan a. plummer')<br/>('40895028', 'Krishna Dusad', 'krishna dusad')<br/>('9560882', 'Shreya Rajpal', 'shreya rajpal')<br/>('40439276', 'Ranjitha Kumar', 'ranjitha kumar')</td><td>{mvasile2,bplumme2,dusad2,srajpal2,ranjitha,daf}@illnois.edu
+</td></tr><tr><td>ef032afa4bdb18b328ffcc60e2dc5229cc1939bc</td><td>Fang and Yuan EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:44
+<br/>https://doi.org/10.1186/s13640-018-0282-x
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Attribute-enhanced metric learning for
+<br/>face retrieval
+</td><td>('8589942', 'Yuchun Fang', 'yuchun fang')<br/>('30438417', 'Qiulong Yuan', 'qiulong yuan')</td><td></td></tr><tr><td>ef2a5a26448636570986d5cda8376da83d96ef87</td><td>Recurrent Neural Networks and Transfer Learning for Action Recognition
+<br/><b>Stanford University</b><br/><b>Stanford University</b></td><td>('11647121', 'Andrew Giel', 'andrew giel')<br/>('32426361', 'Ryan Diaz', 'ryan diaz')</td><td>agiel@stanford.edu
+<br/>ryandiaz@stanford.edu
+</td></tr><tr><td>ef5531711a69ed687637c48930261769465457f0</td><td>Studio2Shop: from studio photo shoots to fashion articles
+<br/>Zalando Research, Muehlenstr. 25, 10243 Berlin, Germany
+<br/>Keywords:
+<br/>computer vision, deep learning, fashion, item recognition, street-to-shop
+</td><td>('46928510', 'Julia Lasserre', 'julia lasserre')<br/>('1724791', 'Katharina Rasch', 'katharina rasch')<br/>('2742129', 'Roland Vollgraf', 'roland vollgraf')</td><td>julia.lasserre@zalando.de
+</td></tr><tr><td>ef559d5f02e43534168fbec86707915a70cd73a0</td><td>DING, HUO, HU, LU: DEEPINSIGHT
+<br/>DeepInsight: Multi-Task Multi-Scale Deep
+<br/>Learning for Mental Disorder Diagnosis
+<br/>1 School of Information
+<br/><b>Renmin University of China</b><br/>Beijing, 100872, China
+<br/>2 Beijing Key Laboratory
+<br/>of Big Data Management
+<br/>and Analysis Methods
+<br/>Beijing, 100872, China
+</td><td>('5535865', 'Mingyu Ding', 'mingyu ding')<br/>('4140493', 'Yuqi Huo', 'yuqi huo')<br/>('1745787', 'Jun Hu', 'jun hu')<br/>('1776220', 'Zhiwu Lu', 'zhiwu lu')</td><td>d130143597@163.com
+<br/>bnhony@163.com
+<br/>junhu@ruc.edu.cn
+<br/>luzhiwu@ruc.edu.cn
+</td></tr><tr><td>efa08283656714911acff2d5022f26904e451113</td><td>Active Object Localization in Visual Situations
+</td><td>('3438473', 'Max H. Quinn', 'max h. quinn')<br/>('13739397', 'Anthony D. Rhodes', 'anthony d. rhodes')<br/>('4421478', 'Melanie Mitchell', 'melanie mitchell')</td><td></td></tr><tr><td>ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98</td><td>Fine-grained Activity Recognition
+<br/>with Holistic and Pose based Features
+<br/><b>Max Planck Institute for Informatics, Germany</b><br/><b>Stanford University, USA</b></td><td>('2299109', 'Leonid Pishchulin', 'leonid pishchulin')<br/>('1906895', 'Mykhaylo Andriluka', 'mykhaylo andriluka')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')</td><td></td></tr><tr><td>ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d</td><td>Calhoun: The NPS Institutional Archive
+<br/>DSpace Repository
+<br/>Theses and Dissertations
+<br/>1. Thesis and Dissertation Collection, all items
+<br/>2017-12
+<br/>Improving face verification in photo albums by
+<br/>combining facial recognition and metadata
+<br/>with cross-matching
+<br/>Monterey, California: Naval Postgraduate School
+<br/>http://hdl.handle.net/10945/56868
+<br/>Downloaded from NPS Archive: Calhoun
+</td><td></td><td></td></tr><tr><td>c32fb755856c21a238857b77d7548f18e05f482d</td><td>Multimodal Emotion Recognition for Human-
+<br/>Computer Interaction: A Survey
+<br/><b>School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China</b></td><td>('10692633', 'Michele Mukeshimana', 'michele mukeshimana')<br/>('1714904', 'Xiaojuan Ban', 'xiaojuan ban')<br/>('17056027', 'Nelson Karani', 'nelson karani')<br/>('7247643', 'Ruoyi Liu', 'ruoyi liu')</td><td></td></tr><tr><td>c3beae515f38daf4bd8053a7d72f6d2ed3b05d88</td><td></td><td></td><td></td></tr><tr><td>c3dc4f414f5233df96a9661609557e341b71670d</td><td>Tao et al. EURASIP Journal on Advances in Signal Processing 2011, 2011:4
+<br/>http://asp.eurasipjournals.com/content/2011/1/4
+<br/>RESEARCH
+<br/>Utterance independent bimodal emotion
+<br/>recognition in spontaneous communication
+<br/>Open Access
+</td><td>('37670752', 'Jianhua Tao', 'jianhua tao')<br/>('48027528', 'Shifeng Pan', 'shifeng pan')<br/>('2740129', 'Minghao Yang', 'minghao yang')<br/>('3295988', 'Kaihui Mu', 'kaihui mu')<br/>('2253805', 'Jianfeng Che', 'jianfeng che')</td><td></td></tr><tr><td>c3b3636080b9931ac802e2dd28b7b684d6cf4f8b</td><td>International Journal of Security and Its Applications
+<br/>Vol. 7, No. 2, March, 2013
+<br/>Face Recognition via Local Directional Pattern
+<br/><b>Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology</b><br/>50-1, Sang-ri, Hyeonpung-myeon, Dalseong-gun, Daegu, Korea.
+</td><td>('2437301', 'Dong-Ju Kim', 'dong-ju kim')<br/>('38107412', 'Sang-Heon Lee', 'sang-heon lee')<br/>('2735120', 'Myoung-Kyu Sohn', 'myoung-kyu sohn')</td><td>*radioguy@dgist.ac.kr
+</td></tr><tr><td>c398684270543e97e3194674d9cce20acaef3db3</td><td>Chapter 2
+<br/>Comparative Face Soft Biometrics for
+<br/>Human Identification
+</td><td>('19249411', 'Nawaf Yousef Almudhahka', 'nawaf yousef almudhahka')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('31534955', 'Jonathon S. Hare', 'jonathon s. hare')</td><td></td></tr><tr><td>c3285a1d6ec6972156fea9e6dc9a8d88cd001617</td><td></td><td></td><td></td></tr><tr><td>c3418f866a86dfd947c2b548cbdeac8ca5783c15</td><td></td><td></td><td></td></tr><tr><td>c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0</td><td>MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+<br/>8-10
+<br/>A New Scheme for Image Recognition Using Higher-Order Local
+<br/>Autocorrelation and Factor Analysis
+<br/><b>yThe University of Tokyo</b><br/>Tokyo, Japan
+<br/>yyyAIST
+<br/>Tukuba, Japan
+</td><td>('29737626', 'Naoyuki Nomoto', 'naoyuki nomoto')<br/>('2163494', 'Yusuke Shinohara', 'yusuke shinohara')<br/>('2981587', 'Takayoshi Shiraki', 'takayoshi shiraki')<br/>('1800592', 'Takumi Kobayashi', 'takumi kobayashi')<br/>('1809629', 'Nobuyuki Otsu', 'nobuyuki otsu')</td><td>f shiraki, takumi, otsug @isi.imi.i.u-tokyo.ac.jp
+</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>A 3D Morphable Eye Region Model
+<br/>for Gaze Estimation
+<br/><b>University of Cambridge, Cambridge, UK</b><br/><b>Carnegie Mellon University, Pittsburgh, USA</b><br/><b>Max Planck Institute for Informatics, Saarbr ucken, Germany</b></td><td>('34399452', 'Erroll Wood', 'erroll wood')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')<br/>('39626495', 'Peter Robinson', 'peter robinson')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>{eww23,pr10}@cl.cam.ac.uk
+<br/>{tbaltrus,morency}@cs.cmu.edu
+<br/>bulling@mpi-inf.mpg.de
+</td></tr><tr><td>c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0</td><td>Person Reidentification and Recognition in
+<br/>Video
+<br/>Computer Science and Engineering,
+<br/><b>University of South Florida, Tampa, Florida, USA</b><br/>http://figment.csee.usf.edu/
+</td><td>('3110392', 'Rangachar Kasturi', 'rangachar kasturi')</td><td>R1K@cse.usf.edu,rajmadhan@mail.usf.edu
+</td></tr><tr><td>c32383330df27625592134edd72d69bb6b5cff5c</td><td>422
+<br/>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 2012
+<br/>Intrinsic Illumination Subspace for Lighting
+<br/>Insensitive Face Recognition
+</td><td>('1686057', 'Chia-Ping Chen', 'chia-ping chen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')</td><td></td></tr><tr><td>c3a3f7758bccbead7c9713cb8517889ea6d04687</td><td></td><td></td><td></td></tr><tr><td>c32f04ccde4f11f8717189f056209eb091075254</td><td>Analysis and Synthesis of Behavioural Specific
+<br/>Facial Motion
+<br/><b>A dissertation submitted to the University of Bristol in accordance with the requirements</b><br/>for the degree of Doctor of Philosophy in the Faculty of Engineering, Department of
+<br/>Computer Science.
+<br/>February 2007
+<br/>71657 words
+</td><td>('2903159', 'Lisa Nanette Gralewski', 'lisa nanette gralewski')</td><td></td></tr><tr><td>c30982d6d9bbe470a760c168002ed9d66e1718a2</td><td>Multi-Camera Head Pose Estimation
+<br/>Using an Ensemble of Exemplars
+<br/><b>University City Blvd., Charlotte, NC</b><br/>Department of Computer Science
+<br/><b>University of North Carolina at Charlotte</b></td><td>('1715594', 'Scott Spurlock', 'scott spurlock')<br/>('2549750', 'Peter Malmgren', 'peter malmgren')<br/>('1873911', 'Hui Wu', 'hui wu')<br/>('1690110', 'Richard Souvenir', 'richard souvenir')</td><td>{sspurloc, ptmalmyr, hwu13, souvenir}@uncc.edu
+</td></tr><tr><td>c39ffc56a41d436748b9b57bdabd8248b2d28a32</td><td>Residual Attention Network for Image Classification
+<br/><b>SenseTime Group Limited, 2Tsinghua University</b><br/><b>The Chinese University of Hong Kong, 4Beijing University of Posts and Telecommunications</b></td><td>('1682816', 'Fei Wang', 'fei wang')<br/>('9563639', 'Mengqing Jiang', 'mengqing jiang')<br/>('40110742', 'Chen Qian', 'chen qian')<br/>('1692609', 'Shuo Yang', 'shuo yang')<br/>('49672774', 'Cheng Li', 'cheng li')<br/>('1720776', 'Honggang Zhang', 'honggang zhang')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>1{wangfei, qianchen, chengli}@sensetime.com, 2jmq14@mails.tsinghua.edu.cn
+<br/>3{ys014, xtang}@ie.cuhk.edu.hk, xgwang@ee.cuhk.edu.hk, 4zhhg@bupt.edu.cn
+</td></tr><tr><td>c32cd207855e301e6d1d9ddd3633c949630c793a</td><td>On the Effect of Illumination and Face Recognition
+<br/>Jeffrey Ho
+<br/>Department of CISE
+<br/><b>University of Florida</b><br/>Gainesville, FL 32611
+<br/>Department of Computer Science
+<br/><b>University of California at San Diego</b><br/>La Jolla, CA 92093
+</td><td>('38998440', 'David Kriegman', 'david kriegman')</td><td>Email: jho@cise.ufl.edu
+<br/>Email: kriegman@cs.ucsd.edu
+</td></tr><tr><td>c317181fa1de2260e956f05cd655642607520a4f</td><td>Research Article
+<br/>Research
+<br/>Article for submission to journal
+<br/>Subject Areas:
+<br/>computer vision, pattern recognition,
+<br/>feature descriptor
+<br/>Keywords:
+<br/>micro-facial expression, expression
+<br/>recognition, action unit
+<br/>Objective Classes for
+<br/>Micro-Facial Expression
+<br/>Recognition
+<br/><b>Centre for Imaging Sciences, University of</b><br/>Manchester, Manchester, United Kingdom
+<br/><b>Sudan University of Science and Technology</b><br/>Khartoum, Sudan
+<br/>3School of Computing, Mathematics and Digital
+<br/><b>Technology, Manchester Metropolitan University</b><br/>Manchester, United Kingdom
+<br/>instead of predicted emotion,
+<br/>Micro-expressions are brief spontaneous facial expressions
+<br/>that appear on a face when a person conceals an emotion,
+<br/>making them different
+<br/>to normal facial expressions in
+<br/>subtlety and duration. Currently, emotion classes within
+<br/>the CASME II dataset are based on Action Units and
+<br/>self-reports, creating conflicts during machine learning
+<br/>training. We will show that classifying expressions using
+<br/>Action Units,
+<br/>removes
+<br/>the potential bias of human reporting. The proposed
+<br/>classes are tested using LBP-TOP, HOOF and HOG 3D
+<br/>feature descriptors. The experiments are evaluated on
+<br/>two benchmark FACS coded datasets: CASME II and
+<br/>SAMM. The best result achieves 86.35% accuracy when
+<br/>classifying the proposed 5 classes on CASME II using
+<br/>HOG 3D, outperforming the result of the state-of-the-
+<br/>art 5-class emotional-based classification in CASME II.
+<br/>Results indicate that classification based on Action Units
+<br/>provides an objective method to improve micro-expression
+<br/>recognition.
+<br/>1. Introduction
+<br/>A micro-facial expression is revealed when someone attempts
+<br/>to conceal their true emotion [1,2]. When they consciously
+<br/>realise that a facial expression is occurring, the person may try
+<br/>to suppress the facial expression because showing the emotion
+<br/>may not be appropriate [3]. Once the suppression has occurred,
+<br/>the person may mask over the original facial expression and
+<br/>cause a micro-facial expression. In a high-stakes environment,
+<br/>these expressions tend to become more likely as there is more
+<br/>risk to showing the emotion.
+</td><td>('3125772', 'Moi Hoon Yap', 'moi hoon yap')<br/>('36059631', 'Adrian K. Davison', 'adrian k. davison')<br/>('23986818', 'Walied Merghani', 'walied merghani')<br/>('3125772', 'Moi Hoon Yap', 'moi hoon yap')</td><td>e-mail: M.Yap@mmu.ac.uk
+</td></tr><tr><td>c30e4e4994b76605dcb2071954eaaea471307d80</td><td></td><td></td><td></td></tr><tr><td>c37a971f7a57f7345fdc479fa329d9b425ee02be</td><td>A Novice Guide towards Human Motion Analysis and Understanding
+</td><td>('40360970', 'Ahmed Nabil Mohamed', 'ahmed nabil mohamed')</td><td>dr.ahmed.mohamed@ieee.org
+</td></tr><tr><td>c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af</td><td></td><td></td><td></td></tr><tr><td>c32c8bfadda8f44d40c6cd9058a4016ab1c27499</td><td>Unconstrained Face Recognition From a Single
+<br/>Image
+<br/><b>Siemens Corporate Research, 755 College Road East, Princeton, NJ</b><br/><b>Center for Automation Research (CfAR), University of Maryland, College Park, MD</b><br/>I. INTRODUCTION
+<br/>In most situations, identifying humans using faces is an effortless task for humans. Is this true for computers?
+<br/>This very question defines the field of automatic face recognition [10], [38], [79], one of the most active research
+<br/>areas in computer vision, pattern recognition, and image understanding. Over the past decade, the problem of face
+<br/>recognition has attracted substantial attention from various disciplines and has witnessed a skyrocketing growth of
+<br/>the literature. Below, we mainly emphasize some key perspectives of the face recognition problem.
+<br/>A. Biometric perspective
+<br/>Face is a biometric. As a consequence, face recognition finds wide applications in authentication, security, and
+<br/>so on. One recent application is the US-VISIT system by the Department of Homeland Security (DHS), collecting
+<br/>foreign passengers’ fingerprints and face images.
+<br/>Biometric signatures of a person characterize the physiological or behavioral characteristics. Physiological bio-
+<br/>metrics are innate or naturally occuring, while behavioral biometrics arise from mannerisms or traits that are learned
+<br/>or acquired. Table I lists commonly used biometrics. Biometric technologies provide the foundation for an extensive
+<br/>array of highly secure identification and personal verification solutions. Compared to conventional identification and
+<br/>verification methods based on personal identification numbers (PINs) or passwords, biometric technologies offer
+<br/>many advantages. First, biometrics are individualized traits while passwords may be used or stolen by someone
+<br/>other than the authorized user. Also, biometric is very convenient since there is nothing to carry or remember. In
+<br/>addition, biometric technologies are becoming more accurate and less expensive.
+<br/>Among all biometrics listed in Table I, face is a very unique one because it is the only biometric belonging to
+<br/>both physiological and behavioral categories. While the physiological part of the face has been widely exploited
+<br/>for face recognition, the behavioral part has not yet been fully investigated. In addition, as reported in [23], [51],
+<br/>face enjoys many advantages over other biometrics because it is a natural, non-intrusive, and easy-to-use biometric.
+<br/>For example [23], among six biometrics of face, finger, hand, voice, eye, and signature, face biometric ranks the
+<br/>June 10, 2008
+<br/>DRAFT
+</td><td>('1682187', 'Shaohua Kevin Zhou', 'shaohua kevin zhou')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('34713849', 'Narayanan Ramanathan', 'narayanan ramanathan')</td><td>Email: {shaohua.zhou}@siemens.com, {rama, ramanath}@cfar.umd.edu
+</td></tr><tr><td>c3fb2399eb4bcec22723715556e31c44d086e054</td><td>499
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>1. INTRODUCTION
+</td><td></td><td></td></tr><tr><td>c37de914c6e9b743d90e2566723d0062bedc9e6a</td><td>©2016 Society for Imaging Science and Technology
+<br/>DOI: 10.2352/ISSN.2470-1173.2016.11.IMAWM-455
+<br/>Joint and Discriminative Dictionary Learning
+<br/>Expression Recognition
+<br/>for Facial
+</td><td>('38611433', 'Sriram Kumar', 'sriram kumar')<br/>('3168309', 'Behnaz Ghoraani', 'behnaz ghoraani')<br/>('32219349', 'Andreas Savakis', 'andreas savakis')</td><td></td></tr><tr><td>c418a3441f992fea523926f837f4bfb742548c16</td><td>A Computer Approach for Face Aging Problems
+<br/>Centre for Pattern Recognition and Machine Intelligence,
+<br/><b>Concordia University, Canada</b></td><td>('1769788', 'Khoa Luu', 'khoa luu')</td><td>kh_lu@cenparmi.concordia.ca
+</td></tr><tr><td>c4fb2de4a5dc28710d9880aece321acf68338fde</td><td>Interactive Generative Adversarial Networks for Facial Expression Generation
+<br/>in Dyadic Interactions
+<br/><b>University of Central Florida</b><br/>Educational Testing Service
+<br/>Saad Khan
+<br/>Educational Testing Service
+</td><td>('2974242', 'Behnaz Nojavanasghari', 'behnaz nojavanasghari')<br/>('2224875', 'Yuchi Huang', 'yuchi huang')</td><td>behnaz@eecs.ucf.edu
+<br/>yhuang001@ets.org
+<br/>skhan002@ets.org
+</td></tr><tr><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td><td>Cross-Age Reference Coding for Age-Invariant
+<br/>Face Recognition and Retrieval
+<br/><b>Institute of Information Science, Academia Sinica, Taipei, Taiwan</b><br/><b>National Taiwan University, Taipei, Taiwan</b></td><td>('33970300', 'Bor-Chun Chen', 'bor-chun chen')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td></td></tr><tr><td>c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4</td><td>Int J Comput Vis (2014) 108:3–29
+<br/>DOI 10.1007/s11263-014-0698-4
+<br/>The Ignorant Led by the Blind: A Hybrid Human–Machine Vision
+<br/>System for Fine-Grained Categorization
+<br/>Received: 7 March 2013 / Accepted: 8 January 2014 / Published online: 20 February 2014
+<br/>© Springer Science+Business Media New York 2014
+</td><td>('3251767', 'Steve Branson', 'steve branson')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td></td></tr><tr><td>c46a4db7247d26aceafed3e4f38ce52d54361817</td><td>A CNN Cascade for Landmark Guided Semantic
+<br/>Part Segmentation
+<br/><b>School of Computer Science, The University of Nottingham, Nottingham, UK</b></td><td>('34596685', 'Aaron S. Jackson', 'aaron s. jackson')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>{aaron.jackson, michel.valstar, yorgos.tzimiropoulos}@nottingham.ac.uk
+</td></tr><tr><td>c43862db5eb7e43e3ef45b5eac4ab30e318f2002</td><td>Provable Self-Representation Based Outlier Detection in a Union of Subspaces
+<br/><b>Johns Hopkins University, Baltimore, MD, 21218, USA</b></td><td>('1878841', 'Chong You', 'chong you')<br/>('1780452', 'Daniel P. Robinson', 'daniel p. robinson')</td><td></td></tr><tr><td>c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad</td><td> Understanding Activity: Learning the Language of Action
+<br/> Univ. of Rochester and Maryland
+<br/>1.1 Overview
+<br/>Understanding observed activity is an important
+<br/>problem, both from the standpoint of practical applications,
+<br/>and as a central issue in attempting to describe the
+<br/>phenomenon of intelligence. On the practical side, there are a
+<br/>large number of applications that would benefit from
+<br/>improved machine ability to analyze activity. The most
+<br/>prominent are various surveillance scenarios. The current
+<br/>emphasis on homeland security has brought this issue to the
+<br/>forefront, and resulted in considerable work on mostly low-
+<br/>level detection schemes. There are also applications in
+<br/>medical diagnosis and household assistants that, in the long
+<br/>run, may be even more important. In addition, there are
+<br/>numerous scientific projects, ranging from monitoring of
+<br/>weather conditions to observation of animal behavior that
+<br/>would be facilitated by automatic understanding of activity.
+<br/>From a scientific standpoint, understanding activity
+<br/>understanding is central to understanding intelligence.
+<br/>Analyzing what is happening in the environment, and acting
+<br/>on the results of that analysis is, to a large extent, what
+<br/>natural intelligent systems do, whether they are human or
+<br/>animal. Artificial intelligences, if we want them to work with
+<br/>people in the natural world, will need commensurate abilities.
+<br/>The importance of the problem has not gone unrecognized.
+<br/>There is a substantial body of work on various components of
+<br/>the problem, most especially on change detection, motion
+<br/>analysis, and tracking. More recently, in the context of
+<br/>surveillance applications, there have been some preliminary
+<br/>efforts to come up with a general ontology of human activity.
+<br/>These efforts have largely been top-down in the classic AI
+<br/>tradition, and, as with earlier analogous effort in areas such
+<br/>as object recognition and scene understanding, have seen
+<br/>limited practical application because of the difficulty in
+<br/>robustly extracting the putative primitives on which the top-
+<br/>down formalism is based. We propose a novel alternative
+<br/>approach, where understanding activity is centered on
+</td><td>('34344092', 'Randal Nelson', 'randal nelson')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td></td></tr><tr><td>c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f</td><td>Towards Unconstrained Face Recognition
+<br/>Using 3D Face Model
+<br/><b>Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching</b><br/><b>Computer Vision Research Group, COMSATS Institute of Information</b><br/>Technology, Lahore
+<br/>1Germany
+<br/>2Pakistan
+<br/>1. Introduction
+<br/>Over the last couple of decades, many commercial systems are available to identify human
+<br/>faces. However, face recognition is still an outstanding challenge against different kinds of
+<br/>real world variations especially facial poses, non-uniform lightings and facial expressions.
+<br/>Meanwhile the face recognition technology has extended its role from biometrics and security
+<br/>applications to human robot interaction (HRI). Person identity is one of the key tasks while
+<br/>interacting with intelligent machines/robots, exploiting the non intrusive system security
+<br/>and authentication of the human interacting with the system. This capability further helps
+<br/>machines to learn person dependent traits and interaction behavior to utilize this knowledge
+<br/>for tasks manipulation. In such scenarios acquired face images contain large variations which
+<br/>demands an unconstrained face recognition system.
+<br/>Fig. 1. Biometric analysis of past few years has been shown in figure showing the
+<br/>contribution of revenue generated by various biometrics. Although AFIS are getting popular
+<br/>in current biometric industry but faces are still considered as one of the widely used
+<br/>biometrics.
+<br/>www.intechopen.com
+</td><td>('1725709', 'Zahid Riaz', 'zahid riaz')<br/>('4241648', 'M. Saquib Sarfraz', 'm. saquib sarfraz')<br/>('1746229', 'Michael Beetz', 'michael beetz')</td><td></td></tr><tr><td>c41de506423e301ef2a10ea6f984e9e19ba091b4</td><td>Modeling Attributes from Category-Attribute Proportions
+<br/><b>Columbia University</b><br/>2IBM Research
+</td><td>('1815972', 'Felix X. Yu', 'felix x. yu')<br/>('29889388', 'Tao Chen', 'tao chen')</td><td>{yuxinnan, taochen, sfchang}@ee.columbia.edu
+<br/>{liangliang.cao, mimerler, nccodell, jsmith}@us.ibm.com
+</td></tr><tr><td>c4934d9f9c41dbc46f4173aad2775432fe02e0e6</td><td>Workshop track - ICLR 2017
+<br/>GENERALIZATION TO NEW COMPOSITIONS OF KNOWN
+<br/>ENTITIES IN IMAGE UNDERSTANDING
+<br/><b>Bar Ilan University, Israel</b><br/>Jonathan Berant &
+<br/>Amir Globerson
+<br/><b>Tel Aviv University</b><br/>Israel
+<br/>Vahid Kazemi &
+<br/>Gal Chechik
+<br/>Google Research,
+<br/>Mountain View CA, USA
+</td><td>('34815079', 'Yuval Atzmon', 'yuval atzmon')</td><td>yuval.atzmon@biu.ac.il
+</td></tr><tr><td>c40c23e4afc81c8b119ea361e5582aa3adecb157</td><td>Coupled Marginal Fisher Analysis for
+<br/>Low-resolution Face Recognition
+<br/><b>Carnegie Mellon University, Electrical and Computer Engineering</b><br/>5000 Forbes Avenue, Pittsburgh, Pennsylvania, USA 15213
+</td><td>('2883809', 'Stephen Siena', 'stephen siena')<br/>('2232940', 'Vishnu Naresh Boddeti', 'vishnu naresh boddeti')</td><td>ssiena@andrew.cmu.edu
+<br/>naresh@cmu.edu
+<br/>kumar@ece.cmu.edu
+</td></tr><tr><td>c49aed65fcf9ded15c44f9cbb4b161f851c6fa88</td><td>Multiscale Facial Expression Recognition using Convolutional Neural Networks
+<br/>IDIAP, Martigny, Switzerland
+</td><td>('8745904', 'Beat Fasel', 'beat fasel')</td><td>Beat.Fasel@idiap.ch
+</td></tr><tr><td>c466ad258d6262c8ce7796681f564fec9c2b143d</td><td>14-21
+<br/>MVA2013 IAPR International Conference on Machine Vision Applications, May 20-23, 2013, Kyoto, JAPAN
+<br/>Pose-Invariant Face Recognition
+<br/>Using A Single 3D Reference Model
+<br/><b>National Taiwan University of Science and Technology</b><br/>No. 43, Sec.4, Keelung Rd., Taipei, 106, Taiwan
+</td><td>('38801529', 'Gee-Sern Hsu', 'gee-sern hsu')<br/>('3329222', 'Hsiao-Chia Peng', 'hsiao-chia peng')</td><td>*jison@mail.ntust.edu.tw
+</td></tr><tr><td>ea46951b070f37ad95ea4ed08c7c2a71be2daedc</td><td>Using phase instead of optical flow
+<br/>for action recognition
+<br/><b>Computer Vision Lab, Delft University of Technology, Netherlands</b><br/><b>Intelligent Sensory Interactive Systems, University of Amsterdam, Netherlands</b></td><td>('9179750', 'Omar Hommos', 'omar hommos')<br/>('37041694', 'Silvia L. Pintea', 'silvia l. pintea')<br/>('1738975', 'Jan C. van Gemert', 'jan c. van gemert')</td><td></td></tr><tr><td>eac6aee477446a67d491ef7c95abb21867cf71fc</td><td>JOURNAL
+<br/>A survey of sparse representation: algorithms and
+<br/>applications
+</td><td>('38448016', 'Zheng Zhang', 'zheng zhang')<br/>('38649019', 'Yong Xu', 'yong xu')<br/>('37081450', 'Jian Yang', 'jian yang')<br/>('1720243', 'Xuelong Li', 'xuelong li')<br/>('1698371', 'David Zhang', 'david zhang')</td><td></td></tr><tr><td>ea079334121a0ba89452036e5d7f8e18f6851519</td><td>UNSUPERVISED INCREMENTAL LEARNING OF DEEP DESCRIPTORS
+<br/>FROM VIDEO STREAMS
+<br/><b>MICC University of Florence</b></td><td>('2619131', 'Federico Pernici', 'federico pernici')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td>federico.pernici@unifi.it, alberto.delbimbo@unifi.it
+</td></tr><tr><td>eac1b644492c10546a50f3e125a1f790ec46365f</td><td>Chained Multi-stream Networks Exploiting Pose, Motion, and Appearance for
+<br/>Action Classification and Detection
+<br/><b>University of Freiburg</b><br/>Freiburg im Breisgau, Germany
+</td><td>('2890820', 'Mohammadreza Zolfaghari', 'mohammadreza zolfaghari')<br/>('2371771', 'Gabriel L. Oliveira', 'gabriel l. oliveira')<br/>('31656404', 'Nima Sedaghat', 'nima sedaghat')<br/>('1710872', 'Thomas Brox', 'thomas brox')</td><td>{zolfagha,oliveira,nima,brox}@cs.uni-freiburg.de
+</td></tr><tr><td>ea80a050d20c0e24e0625a92e5c03e5c8db3e786</td><td>Face Verification and Face Image Synthesis
+<br/>under Illumination Changes
+<br/>using Neural Networks
+<br/>by
+<br/>Under the supervision of
+<br/>Prof. Daphna Weinshall
+<br/>School of Computer Science and Engineering
+<br/><b>The Hebrew University of Jerusalem</b><br/>Israel
+<br/>Submitted in partial fulfillment of the
+<br/>requirements of the degree of
+<br/>Master of Science
+<br/>December, 2017
+</td><td></td><td></td></tr><tr><td>eacba5e8fbafb1302866c0860fc260a2bdfff232</td><td>VOS-GAN: Adversarial Learning of Visual-Temporal
+<br/>Dynamics for Unsupervised Dense Prediction in Videos
+<br/>∗ Pattern Recognition and Computer Vision (PeRCeiVe) Lab
+<br/><b>University of Catania, Italy</b><br/>www.perceivelab.com
+<br/>§ Center for Research in Computer Vision
+<br/><b>University of Central Florida, USA</b><br/>http://crcv.ucf.edu
+</td><td>('31411067', 'C. Spampinato', 'c. spampinato')<br/>('35323264', 'S. Palazzo', 's. palazzo')<br/>('2004177', 'F. Murabito', 'f. murabito')<br/>('1690194', 'D. Giordano', 'd. giordano')<br/>('1797029', 'M. Shah', 'm. shah')</td><td></td></tr><tr><td>ea482bf1e2b5b44c520fc77eab288caf8b3f367a</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2592
+</td><td></td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>Applying a Set of Gabor Filter to 2D-Retinal Fundus Image
+<br/>to Detect the Optic Nerve Head (ONH)
+<br/>1Higher National School of engineering of Tunis, ENSIT, Laboratory LATICE (Information Technology and Communication and
+<br/><b>Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab</b><br/><b>Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master</b><br/><b>ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis</b><br/><b>ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address</b><br/>Rue Djebel Lakhdhar. La Rabta. 1007, Tunis - Tunisia
+<br/>Corresponding author:
+<br/><b>High Institute of Medical Technologies</b><br/>of Tunis, ISTMT, and High National
+<br/>School Engineering of Tunis,
+<br/>Information Technology and
+<br/>Communication Technology and
+<br/><b>Electrical Engineering, University of</b><br/>Tunis El-Manar, ENSIT 5, Avenue Taha
+<br/>Hussein, B. P.: 56, Bab Menara, 1008
+<br/>Tunis, Tunisia,
+<br/>Tel: 9419010363;
+</td><td>('9304667', 'Hédi Trabelsi', 'hédi trabelsi')<br/>('2281259', 'Ines Malek', 'ines malek')<br/>('31649078', 'Imed Jabri', 'imed jabri')</td><td>E-mail: rabelg@live.fr
+</td></tr><tr><td>eafda8a94e410f1ad53b3e193ec124e80d57d095</td><td>Jeffrey F. Cohn
+<br/>13
+<br/>Observer-Based Measurement of Facial Expression
+<br/>With the Facial Action Coding System
+<br/>Facial expression has been a focus of emotion research for over
+<br/>a hundred years (Darwin, 1872/1998). It is central to several
+<br/>leading theories of emotion (Ekman, 1992; Izard, 1977;
+<br/>Tomkins, 1962) and has been the focus of at times heated
+<br/>debate about issues in emotion science (Ekman, 1973, 1993;
+<br/>Fridlund, 1992; Russell, 1994). Facial expression figures
+<br/>prominently in research on almost every aspect of emotion,
+<br/>including psychophysiology (Levenson, Ekman, & Friesen,
+<br/>1990), neural bases (Calder et al., 1996; Davidson, Ekman,
+<br/>Saron, Senulis, & Friesen, 1990), development (Malatesta,
+<br/>Culver, Tesman, & Shephard, 1989; Matias & Cohn, 1993),
+<br/>perception (Ambadar, Schooler, & Cohn, 2005), social pro-
+<br/>cesses (Hatfield, Cacioppo, & Rapson, 1992; Hess & Kirouac,
+<br/>2000), and emotion disorder (Kaiser, 2002; Sloan, Straussa,
+<br/>Quirka, & Sajatovic, 1997), to name a few.
+<br/>Because of its importance to the study of emotion, a num-
+<br/>ber of observer-based systems of facial expression measure-
+<br/>ment have been developed (Ekman & Friesen, 1978, 1982;
+<br/>Ekman, Friesen, & Tomkins, 1971; Izard, 1979, 1983; Izard
+<br/>& Dougherty, 1981; Kring & Sloan, 1991; Tronick, Als, &
+<br/>Brazelton, 1980). Of these various systems for describing
+<br/>facial expression, the Facial Action Coding System (FACS;
+<br/>Ekman & Friesen, 1978; Ekman, Friesen, & Hager, 2002) is
+<br/>the most comprehensive, psychometrically rigorous, and
+<br/>widely used (Cohn & Ekman, 2005; Ekman & Rosenberg,
+<br/>2005). Using FACS and viewing video-recorded facial behav-
+<br/>ior at frame rate and slow motion, coders can manually code
+<br/>nearly all possible facial expressions, which are decomposed
+<br/>into action units (AUs). Action units, with some qualifica-
+<br/>tions, are the smallest visually discriminable facial move-
+<br/>ments. By comparison, other systems are less thorough
+<br/>(Malatesta et al., 1989), fail to differentiate between some
+<br/>anatomically distinct movements (Oster, Hegley, & Nagel,
+<br/>1992), consider movements that are not anatomically dis-
+<br/>tinct as separable (Oster et al., 1992), and often assume a one-
+<br/>to-one mapping between facial expression and emotion (for
+<br/>a review of these systems, see Cohn & Ekman, in press).
+<br/>Unlike systems that use emotion labels to describe ex-
+<br/>pression, FACS explicitly distinguishes between facial actions
+<br/>and inferences about what they mean. FACS itself is descrip-
+<br/>tive and includes no emotion-specified descriptors. Hypoth-
+<br/>eses and inferences about the emotional meaning of facial
+<br/>actions are extrinsic to FACS. If one wishes to make emo-
+<br/>tion-based inferences from FACS codes, a variety of related
+<br/>resources exist. These include the FACS Investigators’ Guide
+<br/>(Ekman et al., 2002), the FACS interpretive database (Ekman,
+<br/>Rosenberg, & Hager, 1998), and a large body of empirical
+<br/>research.(Ekman & Rosenberg, 2005). These resources sug-
+<br/>gest combination rules for defining emotion-specified expres-
+<br/>sions from FACS action units, but this inferential step remains
+<br/>extrinsic to FACS. Because of its descriptive power, FACS
+<br/>is regarded by many as the standard measure for facial be-
+<br/>havior and is used widely in diverse fields. Beyond emo-
+<br/>tion science, these include facial neuromuscular disorders
+<br/>(Van Swearingen & Cohn, 2005), neuroscience (Bruce &
+<br/>Young, 1998; Rinn, 1984, 1991), computer vision (Bartlett,
+<br/>203
+<br/>UNPROOFED PAGES </td><td>('2059653', 'Zara Ambadar', 'zara ambadar')<br/>('21451088', 'Paul Ekman', 'paul ekman')</td><td></td></tr><tr><td>ea85378a6549bb9eb9bcc13e31aa6a61b655a9af</td><td>Diplomarbeit
+<br/>Template Protection for PCA-LDA-based 3D
+<br/>Face Recognition System
+<br/>von
+<br/>Technische Universität Darmstadt
+<br/>Fachbereich Informatik
+<br/>Fachgebiet Graphisch-Interaktive Systeme
+<br/>Fraunhoferstraße 5
+<br/>64283 Darmstadt
+</td><td>('1788102', 'Daniel Hartung', 'daniel hartung')<br/>('35069235', 'Xuebing Zhou', 'xuebing zhou')<br/>('1734569', 'Dieter W. Fellner', 'dieter w. fellner')</td><td></td></tr><tr><td>ea2ee5c53747878f30f6d9c576fd09d388ab0e2b</td><td>Viola-Jones based Detectors: How much affects
+<br/>the Training Set?
+<br/>SIANI
+<br/>Edif. Central del Parque Cient´ıfico Tecnol´ogico
+<br/>Universidad de Las Palmas de Gran Canaria
+<br/>35017 - Spain
+</td><td>('4643134', 'Javier Lorenzo-Navarro', 'javier lorenzo-navarro')</td><td></td></tr><tr><td>ea890846912f16a0f3a860fce289596a7dac575f</td><td>ORIGINAL RESEARCH ARTICLE
+<br/>published: 09 October 2014
+<br/>doi: 10.3389/fpsyg.2014.01154
+<br/>Benefits of social vs. non-social feedback on learning and
+<br/>generosity. Results from theTipping Game
+<br/><b>Tilburg Center for Logic, General Ethics, and Philosophy of Science, Tilburg University, Tilburg, Netherlands</b><br/><b>Institute for Adaptive and Neural Computation, University of Edinburgh, Edinburgh, UK</b><br/>Edited by:
+<br/><b>Giulia Andrighetto, Institute of</b><br/>Cognitive Science and Technologies –
+<br/>National Research Council, Italy
+<br/>Reviewed by:
+<br/><b>David R. Simmons, University of</b><br/>Glasgow, UK
+<br/><b>Aron Szekely, University of Oxford, UK</b><br/>*Correspondence:
+<br/>Logic, General Ethics, and Philosophy
+<br/><b>of Science, Tilburg University</b><br/>P. O. Box 90153, 5000 LE
+<br/>Tilburg, Netherlands
+<br/>Stankevicius have contributed equally
+<br/>to this work.
+<br/>Although much work has recently been directed at understanding social decision-making,
+<br/>relatively little is known about how different types of feedback impact adaptive changes
+<br/>in social behavior. To address this issue quantitatively, we designed a novel associative
+<br/>learning task called the “Tipping Game,” in which participants had to learn a social norm
+<br/>of tipping in restaurants. Participants were found to make more generous decisions
+<br/>from feedback in the form of facial expressions,
+<br/>in comparison to feedback in the
+<br/>form of symbols such as ticks and crosses. Furthermore, more participants displayed
+<br/>learning in the condition where they received social feedback than participants in the non-
+<br/>social condition. Modeling results showed that the pattern of performance displayed by
+<br/>participants receiving social feedback could be explained by a lower sensitivity to economic
+<br/>costs.
+<br/>Keywords: social/non-social feedback, facial expressions, social norms, tipping behavior, associative learning
+<br/>INTRODUCTION
+<br/>Several behavioral, neurobiological and theoretical studies have
+<br/>shown that social norm compliance, and more generally adap-
+<br/>tive changes in social behavior, often require the effective use and
+<br/><b>weighing of different types of information, including expected</b><br/>economic costs and benefits, the potential impact of our behavior
+<br/>on the welfare of others and our own reputation, as well as feed-
+<br/>back information (Bicchieri, 2006; Adolphs, 2009; Frith and Frith,
+<br/>2012). Relatively little attention has been paid to how different
+<br/>types of feedback (or reward) may impact the way social norms
+<br/>are learned. The present study addresses this issue with behavioral
+<br/>and modeling results from a novel associative learning task called
+<br/>the “Tipping Game.” We take the example of tipping and ask: how
+<br/>do social feedback in the form of facial expressions, as opposed
+<br/>to non-social feedback in the form of such conventional signs as
+<br/>ticks and crosses, affect the way participants learn a social norm
+<br/>of tipping?
+<br/>Recent findings indicate that people’s decision-making is often
+<br/>biased by social stimuli. For example, images of a pair of eyes can
+<br/>significantly increase pro-social behavior in laboratory conditions
+<br/>as well as in real-world contexts (Haley and Fessler, 2005; Bateson
+<br/>et al., 2006; Rigdon et al., 2009; Ernest-Jones et al., 2011). Fur-
+<br/>thermore, decision-making can be systematically biased by facial
+<br/>emotional expressions used as predictors of monetary reward
+<br/>(Averbeck and Duchaine, 2009; Evans et al., 2011; Shore and
+<br/>Heerey, 2011). Facial expressions of happiness elicit approach-
+<br/>ing behavior, whereas angry faces elicit avoidance (Seidel et al.,
+<br/>2010; for a review seeBlair, 2003). Because they can function as
+<br/>signals to others, eliciting specific behavioral responses, emotional
+<br/>facial expressions play a major role in socialization practices that
+<br/>help individuals to adapt to the norms and values of their culture
+<br/>(Keltner and Haidt, 1999; Frith, 2009).
+<br/>Despite this body of findings, the literature does not pro-
+<br/>vide an unambiguous answer to the question of how learning
+<br/>performance is affected by social stimuli in comparison to differ-
+<br/>ent types of non-social stimuli used as feedback about previous
+<br/>decisions in a learning task (Ruff and Fehr, 2014). Consistent
+<br/>with the view that social reinforcement is a powerful facili-
+<br/>tator of human learning (Zajonc, 1965; Bandura, 1977), one
+<br/>recent study using a feedback-guided item-category association
+<br/>task found that learning performance in control groups was
+<br/>improved when social (smiling or angry faces) instead of non-
+<br/>social (green or red lights) reinforcement was used (Hurlemann
+<br/>et al., 2010).
+<br/>However, the paradigm used in this study did not distin-
+<br/>guish between two conditions in which social-facilitative effects
+<br/>on learning performance have been observed: first, a condition
+<br/>characterized by the mere presence of others (Allport, 1920); and
+<br/>second, a condition where others provide reinforcing feedback
+<br/>(Zajonc, 1965). In the task used by Hurlemann et al. (2010), faces
+<br/>were present onscreen throughout each trial, changing from a
+<br/>neutral to a happy expression for correct responses or angry for
+<br/>incorrect responses. So, this study could not identify the specific
+<br/>effect of social feedback on learning.
+<br/>Consistent with the assumption oft made in economics and
+<br/>psychology that optimal decisions and learning are based on an
+<br/>assessment of the evidence that is unbiased by the social or non-
+<br/>social nature of the evidence itself (Becker, 1976; Oaksford and
+<br/>Chater, 2007), Lin et al. (2012a) found that, instead of boosting
+<br/>learning performance, social reward (smiling or angry faces) made
+<br/>www.frontiersin.org
+<br/>October 2014 | Volume 5 | Article 1154 | 1
+</td><td>('37157064', 'Matteo Colombo', 'matteo colombo')<br/>('25749361', 'Aistis Stankevicius', 'aistis stankevicius')<br/>('2771872', 'Peggy Seriès', 'peggy seriès')<br/>('37157064', 'Matteo Colombo', 'matteo colombo')<br/>('37157064', 'Matteo Colombo', 'matteo colombo')</td><td>e-mail: m.colombo@uvt.nl
+</td></tr><tr><td>eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf</td><td><b>VICTORIA UNIVERSITY OF WELLINGTON</b><br/>Te Whare Wananga o te Upoko o te Ika a Maui
+<br/>School of Mathematics, Statistics and Computer Science
+<br/>Computer Science
+<br/>Algebraic Simplification of Genetic
+<br/>Programs during Evolution
+<br/>Technical Report CS-TR-06/7
+<br/>February 2006
+<br/>School of Mathematics, Statistics and Computer Science
+<br/><b>Victoria University</b><br/>PO Box 600, Wellington
+<br/>New Zealand
+<br/>Tel: +64 4 463 5341
+<br/>Fax: +64 4 463 5045
+<br/>http://www.mcs.vuw.ac.nz/research
+</td><td>('1679067', 'Mengjie Zhang', 'mengjie zhang')</td><td>Email: Tech.Reports@mcs.vuw.ac.nz
+</td></tr><tr><td>ea218cebea2228b360680cb85ca133e8c2972e56</td><td>Recover Canonical-View Faces in the 明Tild with Deep
+<br/>Neural Networks
+<br/><b>Departm nt of Information Engin ering Th Chines University of Hong Kong</b><br/><b>The Chinese University ofHong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b><br/>zz 012 日 ie . cuh k. edu . h k
+</td><td>('2042558', 'Zhenyao Zhu', 'zhenyao zhu')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>xgwang@ee . cuhk . edu . hk
+<br/>p 1 uo .1 h 工 @gm a i l . com
+<br/>xtang@ i e . cuhk. edu . hk
+</td></tr><tr><td>ea96bc017fb56593a59149e10d5f14011a3744a0</td><td></td><td></td><td></td></tr><tr><td>e1630014a5ae3d2fb7ff6618f1470a567f4d90f5</td><td>Look, Listen and Learn - A Multimodal LSTM for Speaker Identification
+<br/>SenseTime Group Limited1
+<br/><b>The University of Hong Kong</b><br/>Project page: http://www.deeplearning.cc/mmlstm
+</td><td>('46972608', 'Yongtao Hu', 'yongtao hu')</td><td>{rensijie, yuwing, xuli, sunwenxiu, yanqiong}@sensetime.com
+<br/>{herohuyongtao, wangchuan2400}@gmail.com
+</td></tr><tr><td>e19fb22b35c352f57f520f593d748096b41a4a7b</td><td>Modeling Context for Image
+<br/>Understanding:
+<br/>When, For What, and How?
+<br/>Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University</b><br/>A thesis submitted for the degree of
+<br/>Doctor of Philosophy
+<br/>April 3, 2009
+</td><td>('1713589', 'Devi Parikh', 'devi parikh')</td><td></td></tr><tr><td>e10a257f1daf279e55f17f273a1b557141953ce2</td><td></td><td></td><td></td></tr><tr><td>e171fba00d88710e78e181c3e807c2fdffc6798a</td><td></td><td></td><td></td></tr><tr><td>e1c59e00458b4dee3f0e683ed265735f33187f77</td><td>Spectral Rotation versus K-Means in Spectral Clustering
+<br/>Computer Science and Engineering Department
+<br/><b>University of Texas at Arlington</b><br/>Arlington,TX,76019
+</td><td>('39122448', 'Jin Huang', 'jin huang')<br/>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>huangjinsuzhou@gmail.com, feipingnie@gmail.com, heng@uta.edu
+</td></tr><tr><td>e1f790bbedcba3134277f545e56946bc6ffce48d</td><td>
+<br/>International Journal of Innovative Research in Science,
+<br/>Engineering and Technology
+<br/>(An ISO 3297: 2007 Certified Organization)
+<br/> Vol. 3, Issue 5, May 2014
+<br/>Sparse Code Words
+<br/>
+<br/>
+<br/> ISSN: 2319-8753
+<br/>Image Retrieval Using Attribute Enhanced
+<br/><b>SRV Engineering College, sembodai, india</b><br/><b>P.G. Student, SRV Engineering College, sembodai, India</b></td><td>('5768860', 'M.Balaganesh', 'm.balaganesh')<br/>('14176059', 'N.Arthi', 'n.arthi')</td><td></td></tr><tr><td>e1ab3b9dee2da20078464f4ad8deb523b5b1792e</td><td>Pre-Training CNNs Using Convolutional
+<br/>Autoencoders
+<br/>TU Berlin
+<br/>TU Berlin
+<br/>Sabbir Ahmmed
+<br/>TU Berlin
+<br/>TU Berlin
+</td><td>('16258861', 'Maximilian Kohlbrenner', 'maximilian kohlbrenner')<br/>('40805229', 'Russell Hofmann', 'russell hofmann')<br/>('3196053', 'Youssef Kashef', 'youssef kashef')</td><td>m.kohlbrenner@campus.tu-berlin.de
+<br/>r.hofmann@campus.tu-berlin.de
+<br/>ahmmed@campus.tu-berlin.de
+<br/>kashefy@ni.tu-berlin.de
+</td></tr><tr><td>e16efd2ae73a325b7571a456618bfa682b51aef8</td><td></td><td></td><td></td></tr><tr><td>e19ebad4739d59f999d192bac7d596b20b887f78</td><td>Learning Gating ConvNet for Two-Stream based Methods in Action
+<br/>Recognition
+</td><td>('1696573', 'Jiagang Zhu', 'jiagang zhu')<br/>('1726367', 'Wei Zou', 'wei zou')<br/>('48147901', 'Zheng Zhu', 'zheng zhu')</td><td></td></tr><tr><td>e13360cda1ebd6fa5c3f3386c0862f292e4dbee4</td><td></td><td></td><td></td></tr><tr><td>e1f6e2651b7294951b5eab5d2322336af1f676dc</td><td>Appl. Math. Inf. Sci. 9, No. 2L, 461-469 (2015)
+<br/>461
+<br/>Applied Mathematics & Information Sciences
+<br/>An International Journal
+<br/>http://dx.doi.org/10.12785/amis/092L21
+<br/>Emotional Avatars: Appearance Augmentation and
+<br/>Animation based on Facial Expression Analysis
+<br/><b>Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea</b><br/>Received: 22 May 2014, Revised: 23 Jul. 2014, Accepted: 24 Jul. 2014
+<br/>Published online: 1 Apr. 2015
+</td><td>('2137943', 'Taehoon Cho', 'taehoon cho')<br/>('4027010', 'Jin-Ho Choi', 'jin-ho choi')<br/>('2849238', 'Hyeon-Joong Kim', 'hyeon-joong kim')<br/>('7236280', 'Soo-Mi Choi', 'soo-mi choi')</td><td></td></tr><tr><td>e1d726d812554f2b2b92cac3a4d2bec678969368</td><td>J Electr Eng Technol.2015; 10(?): 30-40
+<br/>http://dx.doi.org/10.5370/JEET.2015.10.2.030
+<br/>ISSN(Print)
+<br/>1975-0102
+<br/>ISSN(Online) 2093-7423
+<br/>Human Action Recognition Bases on Local Action Attributes
+<br/>and Mohan S Kankanhalli**
+</td><td>('3132751', 'Weizhi Nie', 'weizhi nie')<br/>('3026404', 'Yongkang Wong', 'yongkang wong')</td><td></td></tr><tr><td>e1256ff535bf4c024dd62faeb2418d48674ddfa2</td><td>Towards Open-Set Identity Preserving Face Synthesis
+<br/><b>University of Science and Technology of China</b><br/>2Microsoft Research
+</td><td>('3093568', 'Jianmin Bao', 'jianmin bao')<br/>('39447786', 'Dong Chen', 'dong chen')<br/>('1716835', 'Fang Wen', 'fang wen')<br/>('7179232', 'Houqiang Li', 'houqiang li')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td>{doch, fangwen, ganghua}@microsoft.com
+<br/>lihq@ustc.edu.cn
+<br/>jmbao@mail.ustc.edu.cn
+</td></tr><tr><td>e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2</td><td>TSINGHUA SCIENCE AND TECHNOLOGY
+<br/>ISSNll1007-0214
+<br/>0?/?? pp???–???
+<br/>DOI: 10.26599/TST.2018.9010000
+<br/>Volume 1, Number 1, Septembelr 2018
+<br/>Ranking with Adaptive Neighbors
+</td><td>('39021559', 'Muge Li', 'muge li')<br/>('2897748', 'Liangyue Li', 'liangyue li')<br/>('1688370', 'Feiping Nie', 'feiping nie')</td><td></td></tr><tr><td>cd9666858f6c211e13aa80589d75373fd06f6246</td><td>A Novel Time Series Kernel for
+<br/>Sequences Generated by LTI Systems
+<br/>V.le delle Scienze Ed.6, DIID, Universit´a degli studi di Palermo, Italy
+</td><td>('1711610', 'Liliana Lo Presti', 'liliana lo presti')<br/>('9127836', 'Marco La Cascia', 'marco la cascia')</td><td></td></tr><tr><td>cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66</td><td>What Makes a Video a Video: Analyzing Temporal Information in Video
+<br/>Understanding Models and Datasets
+<br/><b>Stanford University, 2Facebook, 3Dartmouth College</b></td><td>('38485317', 'De-An Huang', 'de-an huang')<br/>('34066479', 'Vignesh Ramanathan', 'vignesh ramanathan')<br/>('49274550', 'Dhruv Mahajan', 'dhruv mahajan')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')</td><td></td></tr><tr><td>cd4941cbef1e27d7afdc41b48c1aff5338aacf06</td><td>MovieGraphs: Towards Understanding Human-Centric Situations from Videos
+<br/><b>University of Toronto</b><br/><b>Vector Institute</b><br/>Lluís Castrejón3
+<br/><b>Montreal Institute for Learning Algorithms</b><br/>http://moviegraphs.cs.toronto.edu
+<br/>Figure 1: An example from the MovieGraphs dataset. Each of the 7637 video clips is annotated with: 1) a graph that captures the characters
+<br/>in the scene and their attributes, interactions (with topics and reasons), relationships, and time stamps; 2) a situation label that captures the
+<br/>overarching theme of the interactions; 3) a scene label showing where the action takes place; and 4) a natural language description of the
+<br/>clip. The graphs at the bottom show situations that occur before and after the one depicted in the main panel.
+</td><td>('2039154', 'Paul Vicol', 'paul vicol')<br/>('2103464', 'Makarand Tapaswi', 'makarand tapaswi')<br/>('37895334', 'Sanja Fidler', 'sanja fidler')</td><td>{pvicol, makarand, fidler}@cs.toronto.edu, lluis.enric.castrejon.subira@umontreal.ca
+</td></tr><tr><td>cd4c047f4d4df7937aff8fc76f4bae7718004f40</td><td></td><td></td><td></td></tr><tr><td>cdef0eaff4a3c168290d238999fc066ebc3a93e8</td><td>CONTRASTIVE-CENTER LOSS FOR DEEP NEURAL NETWORKS
+<br/>1School of Information and Communication Engineering
+<br/>2Beijing Key Laboratory of Network System and Network Culture
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>('49712251', 'Ce Qi', 'ce qi')<br/>('1684263', 'Fei Su', 'fei su')</td><td></td></tr><tr><td>cd444ee7f165032b97ee76b21b9ff58c10750570</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>IRVINE
+<br/>Relational Models for Human-Object Interactions and Object Affordances
+<br/>DISSERTATION
+<br/>submitted in partial satisfaction of the requirements
+<br/>for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>in Computer Science
+<br/>by
+<br/>Dissertation Committee:
+<br/>Professor Deva Ramanan, Chair
+<br/>Professor Charless Fowlkes
+<br/>Professor Padhraic Smyth
+<br/>Professor Serge Belongie
+<br/>2013
+</td><td>('40277674', 'Chaitanya Desai', 'chaitanya desai')</td><td></td></tr><tr><td>cd23dc3227ee2a3ab0f4de1817d03ca771267aeb</td><td>WU, KAMATA, BRECKON: FACE RECOGNITION VIA DSGNN
+<br/>Face Recognition via Deep Sparse Graph
+<br/>Neural Networks
+<br/>Renjie WU1
+<br/>Toby Breckon2
+<br/>1 Graduate School of Information,
+<br/>Production and Systems
+<br/><b>Waseda University</b><br/>Kitakyushu-shi, Japan
+<br/>2 Engineering and Computing Sciences
+<br/><b>Durham University, Durham, UK</b></td><td>('35222422', 'Sei-ichiro Kamata', 'sei-ichiro kamata')</td><td>wurj-sjtu-waseda@ruri.waseda.jp
+<br/>kam@waseda.jp
+<br/>toby.breckon@durham.ac.uk
+</td></tr><tr><td>cd596a2682d74bdfa7b7160dd070b598975e89d9</td><td>Mood Detection: Implementing a facial
+<br/>expression recognition system
+<br/>1. Introduction
+<br/>Facial expressions play a significant role in human dialogue. As a result, there has been
+<br/>considerable work done on the recognition of emotional expressions and the application of this
+<br/>research will be beneficial in improving human-machine dialogue. One can imagine the
+<br/>improvements to computer interfaces, automated clinical (psychological) research or even
+<br/>interactions between humans and autonomous robots.
+<br/>Unfortunately, a lot of the literature does not focus on trying to achieve high recognition rates
+<br/>across multiple databases. In this project we develop our own mood detection system that
+<br/>addresses this challenge. The system involves pre-processing image data by normalizing and
+<br/>applying a simple mask, extracting certain (facial) features using PCA and Gabor filters and then
+<br/>using SVMs for classification and recognition of expressions. Eigenfaces for each class are used
+<br/>to determine class-specific masks which are then applied to the image data and used to train
+<br/>multiple, one against the rest, SVMs. We find that simply using normalized pixel intensities
+<br/>works well with such an approach.
+<br/>Figure 1 – Overview of our system design
+<br/>2. Image pre-processing
+<br/>We performed pre-processing on the images used to train and test our algorithms as follows:
+<br/>1. The location of the eyes is first selected manually
+<br/>2. Images are scaled and cropped to a fixed size (170 x 130) keeping the eyes in all images
+<br/>aligned
+<br/>3. The image is histogram equalized using the mean histogram of all the training images to
+<br/>make it invariant to lighting, skin color etc.
+<br/>4. A fixed oval mask is applied to the image to extract face region. This serves to eliminate
+<br/>the background, hair, ears and other extraneous features in the image which provide no
+<br/>information about facial expression.
+<br/>This approach works reasonably well in capturing expression-relevant facial information across
+<br/>all databases. Examples of pre-processed images from the various datasets are shown in Figure-
+<br/>2a below.
+</td><td>('1906123', 'Neeraj Agrawal', 'neeraj agrawal')<br/>('2929557', 'Rob Cosgriff', 'rob cosgriff')<br/>('2594170', 'Ritvik Mudur', 'ritvik mudur')</td><td></td></tr><tr><td>cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0</td><td>Functional Faces: Groupwise Dense Correspondence using Functional Maps
+<br/><b>The University of York, UK</b><br/>2IMB/LaBRI, Universit´e de Bordeaux, France
+</td><td>('1720735', 'Chao Zhang', 'chao zhang')<br/>('34895713', 'Arnaud Dessein', 'arnaud dessein')<br/>('1737428', 'Nick Pears', 'nick pears')<br/>('1694260', 'Hang Dai', 'hang dai')</td><td>{cz679, william.smith, nick.pears, hd816}@york.ac.uk
+<br/>arnaud.dessein@u-bordeaux.fr
+</td></tr><tr><td>cda4fb9df653b5721ad4fe8b4a88468a410e55ec</td><td>Gabor wavelet transform and its application
+</td><td>('38784892', 'Wei-lun Chao', 'wei-lun chao')</td><td></td></tr><tr><td>cd3005753012409361aba17f3f766e33e3a7320d</td><td>Multilinear Biased Discriminant Analysis: A Novel Method for Facial
+<br/>Action Unit Representation
+</td><td>('1736464', 'Mahmoud Khademi', 'mahmoud khademi')<br/>('2179339', 'Mehran Safayani', 'mehran safayani')</td><td>†: Sharif University of Tech., DSP Lab, {khademi@ce, safayani@ce, manzuri@}.sharif.edu
+</td></tr><tr><td>cd687ddbd89a832f51d5510c478942800a3e6854</td><td>A Game to Crowdsource Data for Affective Computing
+<br/><b>Games Studio, Faculty of Engineering and IT, University of Technology, Sydney</b></td><td>('1733360', 'Chek Tien Tan', 'chek tien tan')<br/>('2117735', 'Hemanta Sapkota', 'hemanta sapkota')<br/>('2823535', 'Daniel Rosser', 'daniel rosser')<br/>('3141633', 'Yusuf Pisan', 'yusuf pisan')</td><td>chek@gamesstudio.org
+<br/>hemanta.sapkota@student.uts.edu.au
+<br/>daniel.j.rosser@gmail.com
+<br/>yusuf.pisan@gamesstudio.org
+</td></tr><tr><td>cd436f05fb4aeeda5d1085f2fe0384526571a46e</td><td>Information Bottleneck Domain Adaptation with
+<br/>Privileged Information for Visual Recognition
+<br/>Lane Department of Computer Science and Electrical Engineering
+<br/><b>West Virginia University</b></td><td>('2897426', 'Saeid Motiian', 'saeid motiian')<br/>('1736352', 'Gianfranco Doretto', 'gianfranco doretto')</td><td>{samotiian,gidoretto}@mix.wvu.edu
+</td></tr><tr><td>cd2c54705c455a4379f45eefdf32d8d10087e521</td><td>A Hybrid Model for Identity Obfuscation by
+<br/>Face Replacement
+<br/><b>Max Planck Institute for Informatics, Saarland Informatics Campus</b></td><td>('32222907', 'Qianru Sun', 'qianru sun')<br/>('1739548', 'Mario Fritz', 'mario fritz')</td><td>{qsun, atewari, wxu, mfritz, theobalt, schiele}@mpi-inf.mpg.de
+</td></tr><tr><td>cd7a7be3804fd217e9f10682e0c0bfd9583a08db</td><td>Women also Snowboard:
+<br/>Overcoming Bias in Captioning Models
+</td><td>('40895688', 'Kaylee Burns', 'kaylee burns')</td><td></td></tr><tr><td>cd023d2d067365c83d8e27431e83e7e66082f718</td><td>Real-Time Rotation-Invariant Face Detection with
+<br/>Progressive Calibration Networks
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3 CAS Center for Excellence in Brain Science and Intelligence Technology
+</td><td>('41017549', 'Xuepeng Shi', 'xuepeng shi')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('3126238', 'Shuzhe Wu', 'shuzhe wu')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{xuepeng.shi, shiguang.shan, meina.kan, shuzhe.wu, xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>cca9ae621e8228cfa787ec7954bb375536160e0d</td><td>Learning to Collaborate for User-Controlled Privacy
+<br/>Martin Bertran 1†
+<br/>Natalia Martinez 1†*
+<br/>Afroditi Papadaki 2
+<br/>Miguel Rodrigues 2
+<br/><b>Duke University, Durham, NC, USA</b><br/><b>University College London, London, UK</b><br/>†These authors contributed equally to this work.
+<br/>Privacy is a human right. Tim Cook, Apple CEO.
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td>martin.bertran@duke.edu
+<br/>natalia.martinez@duke.edu
+<br/>a.papadaki.17@ucl.ac.uk
+<br/>qiuqiang@gmail.com
+<br/>m.rodrigues@ucl.ac.uk
+<br/>guillermo.sapiro@duke.edu
+</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>A 3D Facial Expression Database For Facial Behavior Research
+<br/><b>State University of New York at Binghamton</b></td><td>('8072251', 'Lijun Yin', 'lijun yin')</td><td></td></tr><tr><td>ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JUNE 2011
+<br/>A Dynamic Appearance Descriptor Approach to
+<br/>Facial Actions Temporal Modelling
+</td><td>('39532631', 'Bihan Jiang', 'bihan jiang')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>cc2eaa182f33defbb33d69e9547630aab7ed9c9c</td><td>Surpassing Humans and Computers with JELLYBEAN:
+<br/>Crowd-Vision-Hybrid Counting Algorithms
+<br/><b>Stanford University</b><br/><b>University of Illinois</b><br/><b>The Ohio State University</b><br/>Aditya Parameswaran
+<br/><b>University of Illinois</b></td><td>('32953042', 'Akash Das Sarma', 'akash das sarma')<br/>('2636295', 'Ayush Jain', 'ayush jain')<br/>('39393264', 'Arnab Nandi', 'arnab nandi')</td><td>akashds@stanford.edu
+<br/>ajain42@illinois.edu
+<br/>arnab@cse.osu.edu
+<br/>adityagp@illinois.edu
+</td></tr><tr><td>ccbfc004e29b3aceea091056b0ec536e8ea7c47e</td><td></td><td></td><td></td></tr><tr><td>ccdea57234d38c7831f1e9231efcb6352c801c55</td><td>Illumination Processing in Face Recognition
+<br/>187
+<br/>11
+<br/>X
+<br/>Illumination Processing in Face Recognition
+<br/>Yongping Li, Chao Wang and Xinyu Ao
+<br/><b>Shanghai Institute of Applied Physics, Chinese Academy of Sciences</b><br/>China
+<br/>1. Introduction
+<br/>Driven by the demanding of public security, face recognition has emerged as a viable
+<br/>solution and achieved comparable accuracies to fingerprint system under controlled
+<br/>lightning environment. In recent years, with wide installing of camera in open area, the
+<br/>automatic face recognition in watch-list application is facing a serious problem. Under the
+<br/>open environment, lightning changes is unpredictable, and the performance of face
+<br/>recognition degrades seriously.
+<br/>Illumination processing is a necessary step for face recognition to be useful in the
+<br/>uncontrolled environment. NIST has started a test called FRGC to boost the research in
+<br/>improving the performance under changing illumination. In this chapter, we will focus on
+<br/>the research effort made in this direction and the influence on face recognition caused by
+<br/>illumination.
+<br/>First of all, we will discuss the quest on the image formation mechanism under various
+<br/>illumination situations, and the corresponding mathematical modelling. The Lambertian
+<br/>lighting model, bilinear illuminating model and some recent model are reviewed. Secondly,
+<br/>under different state of face, like various head pose and different facial expression, how
+<br/>illumination influences the recognition result, where the different pose and illuminating will
+<br/>be examined carefully. Thirdly, the current methods researcher employ to counter the change
+<br/>of illumination to maintain good performance on face recognition are assessed briefly. The
+<br/>processing technique in video and how it will improve face recognition on video, where
+<br/>Wang’s (Wang & Li, 2009) work will be discussed to give an example on the related
+<br/>advancement in the fourth part. And finally, the current state-of-art of illumination
+<br/>processing and its future trends will be discussed.
+<br/>2. The formation of camera imaging and its difference from the human visual
+<br/>system
+<br/>With the camera invented in 1814 by Joseph N, recording of human face began its new era.
+<br/>Since we do not need to hire a painter to draw our figures, as the nobles did in the middle
+<br/>age. And the machine recorded our image as it is, if the camera is in good condition.
+<br/>Currently, the imaging system is mostly to be digital format. The central part is CCD
+<br/>(charge-coupled device) or CMOS (complimentary metal-oxide semiconductor). The
+<br/>CCD/CMOS operates just like the human eyes. Both CCD and CMOS image sensors operate
+<br/>www.intechopen.com
+</td><td></td><td></td></tr><tr><td>cc38942825d3a2c9ee8583c153d2c56c607e61a7</td><td>Database Cross Matching: A Novel Source of
+<br/>Fictitious Forensic Cases
+<br/>Signals and Systems Group, EEMCS,
+<br/><b>University of Twente, Netherlands</b></td><td>('34214663', 'Abhishek Dutta', 'abhishek dutta')<br/>('39128850', 'Raymond Veldhuis', 'raymond veldhuis')<br/>('1745742', 'Luuk Spreeuwers', 'luuk spreeuwers')</td><td>{a.dutta,r.n.j.veldhuis,l.j.spreeuwers}@utwente.nl
+</td></tr><tr><td>cc3c273bb213240515147e8be68c50f7ea22777c</td><td>Gaining Insight Into Films
+<br/>Via Topic Modeling & Visualization
+<br/>KEYWORDS Collaboration, computer vision, cultural
+<br/>analytics, economy of abundance, interactive data
+<br/>visualization
+<br/>We moved beyond misuse when the software actually
+<br/>became useful for film analysis with the addition of audio
+<br/>analysis, subtitle analysis, facial recognition, and topic
+<br/>modeling. Using multiple types of visualizations and
+<br/>a back-and-fourth workflow between people and AI
+<br/>we arrived at an approach for cultural analytics that
+<br/>can be used to review and develop film criticism. Finally,
+<br/>we present ways to apply these techniques to Database
+<br/>Cinema and other aspects of film and video creation.
+<br/>PROJECT DATE 2014
+<br/>URL http://misharabinovich.com/soyummy.html
+</td><td>('40462877', 'MISHA RABINOVICH', 'misha rabinovich')<br/>('1679896', 'Yogesh Girdhar', 'yogesh girdhar')</td><td></td></tr><tr><td>cc8e378fd05152a81c2810f682a78c5057c8a735</td><td>International Journal of Computer Sciences and Engineering Open Access
+<br/> Research Paper Volume-5, Issue-12 E-ISSN: 2347-2693
+<br/>Expression Invariant Face Recognition System based on Topographic
+<br/>Independent Component Analysis and Inner Product Classifier
+<br/>
+<br/>Department of Electrical Engineering, IIT Delhi, New Delhi, India
+<br/>Available online at: www.ijcseonline.org
+<br/>Received: 07/Nov/2017, Revised: 22/Nov/2017, Accepted: 14/Dec/2017, Published: 31/Dec/2017
+</td><td>('40258123', 'Aruna Bhat', 'aruna bhat')</td><td>*Corresponding Author: abigit06@yahoo.com
+</td></tr><tr><td>ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b</td><td>American Journal of Food Science and Health
+<br/>Vol. 2, No. 2, 2016, pp. 7-17
+<br/>http://www.aiscience.org/journal/ajfsh
+<br/>ISSN: 2381-7216 (Print); ISSN: 2381-7224 (Online)
+<br/>Nutraceuticals and Cosmeceuticals for Human
+<br/>Beings–An Overview
+<br/><b>Narayana Pharmacy College, Nellore, India</b></td><td>('40179150', 'R. Ramasubramania Raja', 'r. ramasubramania raja')</td><td></td></tr><tr><td>cc31db984282bb70946f6881bab741aa841d3a7c</td><td>ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV
+<br/>Learning Grimaces by Watching TV
+<br/>http://www.robots.ox.ac.uk/~albanie
+<br/>http://www.robots.ox.ac.uk/~vedaldi
+<br/>Engineering Science Department
+<br/>Univeristy of Oxford
+<br/>Oxford, UK
+</td><td>('7641268', 'Samuel Albanie', 'samuel albanie')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')</td><td></td></tr><tr><td>cc8bf03b3f5800ac23e1a833447c421440d92197</td><td></td><td></td><td></td></tr><tr><td>cc91001f9d299ad70deb6453d55b2c0b967f8c0d</td><td>OPEN ACCESS
+<br/>ISSN 2073-8994
+<br/>Article
+<br/>Performance Enhancement of Face Recognition in Smart TV
+<br/>Using Symmetrical Fuzzy-Based Quality Assessment
+<br/><b>Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu</b><br/>Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735.
+<br/>Academic Editor: Christopher Tyler
+<br/>Received: 31 March 2015 / Accepted: 21 August 2015 / Published: 25 August 2015
+</td><td>('3021526', 'Yeong Gon Kim', 'yeong gon kim')<br/>('2026806', 'Won Oh Lee', 'won oh lee')<br/>('1922686', 'Hyung Gil Hong', 'hyung gil hong')<br/>('4634733', 'Kang Ryoung Park', 'kang ryoung park')</td><td>Seoul 100-715, Korea; E-Mails: csokyg@dongguk.edu (Y.G.K.); 215p8@hanmail.net (W.O.L.);
+<br/>yawara18@hotmail.com (K.W.K.); hell@dongguk.edu (H.G.H.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: parkgr@dgu.edu;
+</td></tr><tr><td>cc96eab1e55e771e417b758119ce5d7ef1722b43</td><td>An Empirical Study of Recent
+<br/>Face Alignment Methods
+</td><td>('2966679', 'Heng Yang', 'heng yang')<br/>('34760532', 'Xuhui Jia', 'xuhui jia')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td></td></tr><tr><td>cc7e66f2ba9ac0c639c80c65534ce6031997acd7</td><td>Facial Descriptors for Identity-Preserving
+<br/>Multiple People Tracking
+<br/>CVLab, School of Computer and Communication Sciences
+<br/><b>Swiss Federal Institute of Technology, Lausanne (EPFL</b><br/>EPFL-REPORT-187534
+<br/>July 2013
+</td><td></td><td>Michalis Zervos1 (michail.zervos@epfl.ch)
+<br/>Horesh Ben Shitrit1 (horesh.benshitrit@epfl.ch)
+<br/>Franc¸ois Fleuret(cid:63) (francois.fleuret@idiap.ch)
+<br/>Pascal Fua (pascal.fua@epfl.ch)
+</td></tr><tr><td>cc9057d2762e077c53e381f90884595677eceafa</td><td>On the Exploration of Joint Attribute Learning
+<br/>for Person Re-identification
+<br/><b>Michigan State University</b></td><td>('38993748', 'Joseph Roth', 'joseph roth')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{rothjos1,liuxm}@cse.msu.edu
+</td></tr><tr><td>ccf16bcf458e4d7a37643b8364594656287f5bfc</td><td>A CNN Cascade for Landmark Guided Semantic
+<br/>Part Segmentation
+<br/><b>School of Computer Science, The University of Nottingham, Nottingham, UK</b></td><td>('34596685', 'Aaron S. Jackson', 'aaron s. jackson')<br/>('46637307', 'Michel Valstar', 'michel valstar')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>{aaron.jackson, michel.valstar, yorgos.tzimiropoulos}@nottingham.ac.uk
+</td></tr><tr><td>e64b683e32525643a9ddb6b6af8b0472ef5b6a37</td><td>Face Recognition and Retrieval in Video
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')</td><td></td></tr><tr><td>e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef</td><td>Research Article
+<br/>Addressing the illumination challenge in two-
+<br/>dimensional face recognition: a survey
+<br/>ISSN 1751-9632
+<br/>Received on 31st March 2014
+<br/>Revised on 7th January 2015
+<br/>Accepted on 9th April 2015
+<br/>doi: 10.1049/iet-cvi.2014.0086
+<br/>www.ietdl.org
+<br/><b>Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA</b><br/>2Department of Computer Science, Cybersecurity Laboratory, Instituto Tecnológico y de Estudios Superiores de Monterrey, Monterrey,
+<br/>NL 64840, Mexico
+</td><td>('2899018', 'Miguel A. Ochoa-Villegas', 'miguel a. ochoa-villegas')<br/>('1905427', 'Olivia Barron-Cano', 'olivia barron-cano')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>✉ E-mail: ioannisk@uh.edu
+</td></tr><tr><td>e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227</td><td>Pairwise Relational Networks for Face
+<br/>Recognition
+<br/>1 Department of Creative IT Engineering, POSTECH, Korea
+<br/>2 Department of Computer Science and Engineering, POSTECH, Korea
+</td><td>('2794366', 'Bong-Nam Kang', 'bong-nam kang')<br/>('50682377', 'Yonghyun Kim', 'yonghyun kim')<br/>('1695669', 'Daijin Kim', 'daijin kim')</td><td>{bnkang,gkyh0805,dkim}@postech.ac.kr
+</td></tr><tr><td>e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec</td><td>Chapter 15. The critical role of the
+<br/>cold-start problem and incentive systems
+<br/>in emotional Web 2.0 services
+</td><td>('2443050', 'Tobias Siebenlist', 'tobias siebenlist')<br/>('2153585', 'Kathrin Knautz', 'kathrin knautz')</td><td></td></tr><tr><td>e6d689054e87ad3b8fbbb70714d48712ad84dc1c</td><td>Robust Facial Feature Tracking
+<br/><b>School of Computing, Staffordshire University</b><br/>Stafford ST18 0DG
+</td><td>('2155770', 'Fabrice Bourel', 'fabrice bourel')<br/>('1919849', 'Claude C. Chibelushi', 'claude c. chibelushi')<br/>('32890308', 'Adrian A. Low', 'adrian a. low')</td><td>F.Bourel@staffs.ac.uk
+<br/>C.C.Chibelushi@staffs.ac.uk
+<br/>A.A.Low@staffs.ac.uk
+</td></tr><tr><td>e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd</td><td>1921
+<br/>Flexible Manifold Embedding: A Framework
+<br/>for Semi-Supervised and Unsupervised
+<br/>Dimension Reduction
+<br/>0 =
+<br/>, the linear regression function (
+</td><td>('1688370', 'Feiping Nie', 'feiping nie')<br/>('1714390', 'Dong Xu', 'dong xu')<br/>('1700883', 'Changshui Zhang', 'changshui zhang')</td><td></td></tr><tr><td>e6f20e7431172c68f7fce0d4595100445a06c117</td><td>Searching Action Proposals via Spatial
+<br/>Actionness Estimation and Temporal Path
+<br/>Inference and Tracking
+<br/><b>cid:93)Peking University Shenzhen Graduate School, Shenzhen, P.R.China</b><br/><b>DISI, University of Trento, Trento, Italy</b></td><td>('40147776', 'Dan Xu', 'dan xu')<br/>('3238696', 'Zhihao Li', 'zhihao li')<br/>('1684933', 'Ge Li', 'ge li')</td><td></td></tr><tr><td>e6e5a6090016810fb902b51d5baa2469ae28b8a1</td><td>Title
+<br/>Energy-Efficient Deep In-memory Architecture for NAND
+<br/>Flash Memories
+<br/>Archived version
+<br/>Accepted manuscript: the content is same as the published
+<br/>paper but without the final typesetting by the publisher
+<br/>Published version
+<br/>DOI
+<br/>Published paper
+<br/>URL
+<br/>Authors (contact)
+<br/>10.1109/ISCAS.2018.8351458
+</td><td></td><td></td></tr><tr><td>e6540d70e5ffeed9f447602ea3455c7f0b38113e</td><td></td><td></td><td></td></tr><tr><td>e6ee36444038de5885473693fb206f49c1369138</td><td></td><td></td><td></td></tr><tr><td>e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5</td><td>Improving Facial Landmark Detection via a
+<br/>Super-Resolution Inception Network
+<br/><b>Institute for Human-Machine Communication</b><br/><b>Technical University of Munich, Germany</b></td><td>('38746426', 'Martin Knoche', 'martin knoche')<br/>('3044182', 'Daniel Merget', 'daniel merget')<br/>('1705843', 'Gerhard Rigoll', 'gerhard rigoll')</td><td></td></tr><tr><td>f913bb65b62b0a6391ffa8f59b1d5527b7eba948</td><td></td><td></td><td></td></tr><tr><td>f9784db8ff805439f0a6b6e15aeaf892dba47ca0</td><td>Comparing the performance of Emotion-Recognition Implementations
+<br/>in OpenCV, Cognitive Services, and Google Vision APIs
+<br/>Department of Informatics and Artificial Intelligence
+<br/><b>Tomas Bata University in Zl n</b><br/>Nad Stráněmi 4511, 76005, Zlín
+<br/>CZECH REPUBLIC
+</td><td></td><td>beltran_prieto@fai.utb.cz
+</td></tr><tr><td>f935225e7811858fe9ef6b5fd3fdd59aec9abd1a</td><td>www.elsevier.com/locate/ynimg
+<br/>Spatiotemporal dynamics and connectivity pattern differences
+<br/>between centrally and peripherally presented faces
+<br/><b>Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan</b><br/>Received 4 May 2005; revised 26 January 2006; accepted 6 February 2006
+<br/>Available online 24 March 2006
+<br/>Most neuroimaging studies on face processing used centrally presented
+<br/>images with a relatively large visual field. Images presented in this way
+<br/>activate widespread striate and extrastriate areas and make it difficult
+<br/>to study spatiotemporal dynamics and connectivity pattern differences
+<br/>from various parts of the visual field. Here we studied magneto-
+<br/>encephalographic responses in humans to centrally and peripherally
+<br/>presented faces for testing the hypothesis that processing of visual
+<br/>stimuli with facial expressions of emotions depends on where the
+<br/>stimuli are presented in the visual field. Using our tomographic and
+<br/>statistical parametric mapping analyses, we identified occipitotemporal
+<br/>areas activated by face stimuli more than by control conditions. V1/V2
+<br/>activity was significantly stronger for lower than central and upper
+<br/>visual field presentation. Fusiform activity, however, was significantly
+<br/>stronger for central than for peripheral presentation. Both the V1/V2
+<br/>and fusiform areas activated earlier for peripheral than for central
+<br/>presentation. Fast responses in the fusiform were found at 70 – 80 ms
+<br/>after image onset, as well as a response at 130 – 160 ms. For peripheral
+<br/>presentation, contralateral V1/V2 and fusiform activated earlier (10 ms
+<br/>and 23 ms, respectively) and significantly stronger than their ipsilateral
+<br/>counterparts. Mutual
+<br/>information analysis further showed linked
+<br/>activity from bilateral V1/V2 to fusiform for central presentation and
+<br/>from contralateral V1/V2 to fusiform for lower visual field presenta-
+<br/>tion. In the upper visual field, the linkage was from fusiform to V1/V2.
+<br/>Our results showed that face stimuli are processed predominantly in
+<br/>the hemisphere contralateral to the stimulation and demonstrated for
+<br/>the first time early fusiform activation leading V1/V2 activation for
+<br/>upper visual field stimulation.
+<br/>D 2006 Elsevier Inc. All rights reserved.
+<br/>Keywords: Magnetoencephalography (MEG); Striate cortex; Extrastriate
+<br/>cortex; Fusiform gyrus; Face perception; Connectivity
+<br/>Introduction
+<br/>It is well established that visual stimuli presented in one part of
+<br/>the visual field are projected to the contralateral part of the visual
+<br/>cortex such that images presented in the right visual field are
+<br/>* Corresponding author. Fax: +81 48 467 9731.
+<br/>Available online on ScienceDirect (www.sciencedirect.com).
+<br/>1053-8119/$ - see front matter D 2006 Elsevier Inc. All rights reserved.
+<br/>projected to the left visual cortex. It is, however, unclear whether
+<br/>stimuli presented in different parts of the visual field are processed
+<br/>differently in extrastriate areas that specialize for processing
+<br/>complex properties of stimuli and whether different connectivity
+<br/>patterns are produced between striate and extrastriate cortices when
+<br/>such complex stimuli are presented to different quadrants. To
+<br/>address these questions, one needs to incorporate three ingredients
+<br/>in the experimental design and analysis. First, one must use stimuli
+<br/>that are known to excite at least one specific extrastriate area well.
+<br/>Second, one must present stimuli at positions in the visual field
+<br/>known to project to specific parts of the visual cortex so that the
+<br/>early entry into the visual system via V1 can be reliably extracted
+<br/>for connectivity analysis. Third, one must use a technique that can
+<br/>provide refined spatial and temporal
+<br/>information about brain
+<br/>activity. The information can then be used in follow-up analysis of
+<br/>spatiotemporal dynamics and connectivity patterns in the brain.
+<br/>The choice of faces is obvious because many studies have
+<br/>shown that faces are effective stimuli for exciting extrastriate areas.
+<br/>The posterior fusiform gyrus was first associated with cortical face
+<br/>processing from lesion studies on patients with specific recognition
+<br/>deficits of familiar faces (Meadows, 1974; Damasio et al., 1990;
+<br/>Sergent and Poncet, 1990). Neuroimaging studies have shown that
+<br/>extrastriate areas are involved in face processing in normal subjects
+<br/>using techniques such as positron emission tomography (PET)
+<br/>(Sergent et al., 1992; Haxby et al., 1994), functional magnetic
+<br/>resonance imaging (fMRI) (Puce et al., 1995; McCarthy et al.,
+<br/>1997; Kanwisher et al., 1997; Halgren et al., 1999), electroen-
+<br/>cephalography (EEG) (Allison et al., 1994; Bentin et al., 1996;
+<br/>George et al., 1996) and magnetoencephalography (MEG) (Link-
+<br/>enkaer-Hansen et al., 1998; Halgren et al., 2000). In the present
+<br/>study, we chose the same face stimuli from our earlier MEG study
+<br/>on complex object and face affect recognition that were shown to
+<br/>activate extrastriate areas well (Liu et al., 1999; Ioannides et al.,
+<br/>2000).
+<br/><b>Most of the earlier studies mentioned above, including ours</b><br/>have presented facial images centrally with a relatively large visual
+<br/>field covering both the fovea and parafovea. Central presentation
+<br/>of images activates widespread striate and extrastriate areas. Low
+<br/>order visual areas (V1/V2) corresponding to left – right – upper –
+<br/>lower visual field stimulation are therefore activated by the same
+</td><td>('2259342', 'Lichan Liu', 'lichan liu')</td><td>E-mail address: ioannides@postman.riken.jp (A.A. Ioannides).
+</td></tr><tr><td>f963967e52a5fd97fa3ebd679fd098c3cb70340e</td><td>Analysis, Interpretation, and Recognition of Facial
+<br/>Action Units and Expressions Using Neuro-Fuzzy
+<br/>Modeling
+<br/>and Ali A. Kiaei1
+<br/><b>DSP Lab, Sharif University of Technology, Tehran, Iran</b><br/><b>Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran</b></td><td>('1736464', 'Mahmoud Khademi', 'mahmoud khademi')<br/>('1702826', 'Mohammad Hadi Kiapour', 'mohammad hadi kiapour')</td><td>{khademi@ce.,kiapour@ee.,manzuri@,kiaei@ce.}sharif.edu
+</td></tr><tr><td>f9e0209dc9e72d64b290d0622c1c1662aa2cc771</td><td>CONTRIBUTIONS TO BIOMETRIC RECOGNITION:
+<br/>MATCHING IDENTICAL TWINS AND LATENT FINGERPRINTS
+<br/>By
+<br/>A DISSERTATION
+<br/>Submitted
+<br/><b>to Michigan State University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Computer Science– Doctor of Philosophy
+<br/>2013
+</td><td>('31508481', 'Alessandra Aparecida Paulino', 'alessandra aparecida paulino')</td><td></td></tr><tr><td>f92ade569cbe54344ffd3bb25efd366dcd8ad659</td><td>EFFECT OF SUPER RESOLUTION ON HIGH DIMENSIONAL FEATURES FOR
+<br/>UNSUPERVISED FACE RECOGNITION IN THE WILD
+<br/><b>University of Bridgeport, Bridgeport, CT 06604, USA</b></td><td>('40373065', 'Ahmed ElSayed', 'ahmed elsayed')<br/>('37374395', 'Ausif Mahmood', 'ausif mahmood')</td><td>Emails: aelsayed@my.bridgeport.edu, {mahmood,sobh}@bridgeport.edu
+</td></tr><tr><td>f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1</td><td></td><td></td><td></td></tr><tr><td>f93606d362fcbe62550d0bf1b3edeb7be684b000</td><td>The Computer Journal Advance Access published February 1, 2012
+<br/><b>The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved</b><br/>doi:10.1093/comjnl/bxs001
+<br/>Nearest Neighbor Classifier Based
+<br/>on Nearest Feature Decisions
+<br/><b>Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and</b><br/><b>Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University</b><br/>Management, Kerala, India
+<br/>Nathan, Australia
+<br/>High feature dimensionality of realistic datasets adversely affects the recognition accuracy of nearest
+<br/>neighbor (NN) classifiers. To address this issue, we introduce a nearest feature classifier that shifts
+<br/>the NN concept from the global-decision level to the level of individual features. Performance
+<br/><b>comparisons with 12 instance-based classi ers on 13 benchmark University of California Irvine</b><br/>classification datasets show average improvements of 6 and 3.5% in recognition accuracy and
+<br/>area under curve performance measures, respectively. The statistical significance of the observed
+<br/>performance improvements is verified by the Friedman test and by the post hoc Bonferroni–Dunn
+<br/>test. In addition, the application of the classifier is demonstrated on face recognition databases, a
+<br/>character recognition database and medical diagnosis problems for binary and multi-class diagnosis
+<br/>on databases including morphological and gene expression features.
+<br/>Keywords: nearest neighbors; classification; local features; local ranking
+<br/>Received 2 September 2011; revised 3 December 2011
+<br/>Handling editor: Ethem Alpaydin
+<br/>1.
+<br/>INTRODUCTION
+<br/>Automatic classification of patterns has been continuously and
+<br/>rigorously investigated for the last 30 years. Simple classifiers,
+<br/>based on the nearest neighbor (NN) principle, have been used
+<br/>to solve a wide range of classification problems [1–5]. The NN
+<br/>classification works on the idea of calculating global distances
+<br/>between patterns, followed by ranking to determine the NNs
+<br/>that best represent the class of a test pattern. Usually, distance
+<br/>metric measures are used to compute the distances between
+<br/>feature vectors. The accuracy of the calculated distances is
+<br/>affected by the quality of the features, which can be degraded by
+<br/>natural variability and measurement noise. Furthermore, some
+<br/>distance calculations are affected by falsely assumed correlation
+<br/>between different features. For example, Mahalanobis distance
+<br/>will include the comparison between poorly or uncorrelated
+<br/>features. This problem is more pronounced when the number
+<br/>of features in a pattern is very large because the irrelevant
+<br/>distance calculations can accumulate to a large value (for
+<br/>example,
+<br/>there will be many false correlations in gene
+<br/>expressions data that can have dimensionality higher than 104
+<br/>features). In addition to this problem, a considerable increase
+<br/>in dimensionality complicates the classifier implementations
+<br/>resulting in ‘curse of dimensionality’, where a possible
+<br/>convergence to a classification solution becomes very slow
+<br/>and inaccurate [6, 7]. The conventional solution to address
+<br/>these problems is to rely on feature extraction and feature
+<br/>selection methods [8–10]. However, unpredictability of natural
+<br/>variability in patterns makes processing a specific feature
+<br/>inapplicable to diverse pattern-recognition problems. Another
+<br/>approach to improve the classifier performance is by using
+<br/>machine learning techniques to learn the distance metrics
+<br/>[11–13]. These methods attempt to reduce the inaccuracies
+<br/>that occur with distance calculations. However, this solution
+<br/>tends to include optimization problems that suffer from
+<br/>high computational complexity and require reduced feature
+<br/>dimensionality, resulting in low accuracies when the feature
+<br/>vectors are highly dimensional and the number of intra-class
+<br/>gallery objects is low. Learning distance metrics can completely
+<br/>fail in high- and ultra-high-dimensional databases when the
+<br/>relevance and redundancy of features often become impossible
+<br/>to trace even with feature weighting or selection schemes.
+<br/>Owing to these reasons, performance improvement of the NN
+<br/>The Computer Journal, 2012
+</td><td>('1744784', 'Alex Pappachen James', 'alex pappachen james')<br/>('1697594', 'Sima Dimitrijev', 'sima dimitrijev')</td><td>For Permissions, please email: journals.permissions@oup.com
+<br/>Corresponding author: apj@ieee.org
+</td></tr><tr><td>f94f366ce14555cf0d5d34248f9467c18241c3ee</td><td>Deep Convolutional Neural Network in
+<br/>Deformable Part Models for Face Detection
+<br/><b>University of Science, Vietnam National University, HCMC</b><br/><b>School of Information Science, Japan Advanced Institute of Science and Technology</b></td><td>('2187730', 'Dinh-Luan Nguyen', 'dinh-luan nguyen')<br/>('34453615', 'Vinh-Tiep Nguyen', 'vinh-tiep nguyen')<br/>('1780348', 'Minh-Triet Tran', 'minh-triet tran')<br/>('2854896', 'Atsuo Yoshitaka', 'atsuo yoshitaka')</td><td>1212223@student.hcmus.edu.vn
+<br/>{nvtiep,tmtriet}@fit.hcmus.edu.vn
+<br/>ayoshi@jaist.ac.jp
+</td></tr><tr><td>f997a71f1e54d044184240b38d9dc680b3bbbbc0</td><td>Deep Cross Modal Learning for Caricature Verification and
+<br/>Identification(CaVINet)
+<br/>https://lsaiml.github.io/CaVINet/
+<br/><b>Indian Institute of Technology Ropar</b><br/><b>Indian Institute of Technology Ropar</b><br/><b>Indian Institute of Technology Ropar</b><br/>Narayanan C Krishnan
+<br/><b>Indian Institute of Technology Ropar</b></td><td>('6220011', 'Jatin Garg', 'jatin garg')<br/>('51152207', 'Himanshu Tolani', 'himanshu tolani')<br/>('41021778', 'Skand Vishwanath Peri', 'skand vishwanath peri')</td><td>2014csb1017@iitrpr.ac.in
+<br/>2014csb1015@iitrpr.ac.in
+<br/>pvskand@gmail.com
+<br/>ckn@iitrpr.ac.in
+</td></tr><tr><td>f909d04c809013b930bafca12c0f9a8192df9d92</td><td>Single Image Subspace for Face Recognition
+<br/><b>Nanjing University of Aeronautics and Astronautics, China</b><br/>1 Department of Computer Science and Engineering,
+<br/>2 National Key Laboratory for Novel Software Technology,
+<br/><b>Nanjing University, China</b></td><td>('39497343', 'Jun Liu', 'jun liu')<br/>('1680768', 'Songcan Chen', 'songcan chen')<br/>('1692625', 'Zhi-Hua Zhou', 'zhi-hua zhou')<br/>('2248421', 'Xiaoyang Tan', 'xiaoyang tan')</td><td>{j.liu, s.chen, x.tan}@nuaa.edu.cn
+<br/>zhouzh@nju.edu.cn
+</td></tr><tr><td>f9d1f12070e5267afc60828002137af949ff1544</td><td>Maximum Entropy Binary Encoding for Face Template Protection
+<br/>Rohit Kumar Pandey
+<br/><b>University at Buffalo, SUNY</b></td><td>('34872128', 'Yingbo Zhou', 'yingbo zhou')<br/>('3352136', 'Bhargava Urala Kota', 'bhargava urala kota')<br/>('1723877', 'Venu Govindaraju', 'venu govindaraju')</td><td>{rpandey, yingbozh, buralako, govind}@buffalo.edu
+</td></tr><tr><td>f9ccfe000092121a2016639732cdb368378256d5</td><td>Cognitive behaviour analysis based on facial
+<br/>information using depth sensors
+<br/><b>Kingston University London, University of Westminster London</b><br/><b>Imperial College London</b></td><td>('1686887', 'Juan Manuel Fernandez Montenegro', 'juan manuel fernandez montenegro')<br/>('2866802', 'Barbara Villarini', 'barbara villarini')<br/>('2140622', 'Athanasios Gkelias', 'athanasios gkelias')<br/>('1689047', 'Vasileios Argyriou', 'vasileios argyriou')</td><td>Juan.Fernandez@kingston.ac.uk,B.Villarini@westminster.ac.uk,A.Gkelias@
+<br/>imperial.ac.uk,Vasileios.Argyriou@kingston.ac.uk
+</td></tr><tr><td>f08e425c2fce277aedb51d93757839900d591008</td><td>Neural Motifs: Scene Graph Parsing with Global Context
+<br/><b>Paul G. Allen School of Computer Science and Engineering, University of Washington</b><br/><b>Allen Institute for Arti cial Intelligence</b><br/><b>School of Computer Science, Carnegie Mellon University</b><br/>https://rowanzellers.com/neuralmotifs
+</td><td>('2545335', 'Rowan Zellers', 'rowan zellers')<br/>('38094552', 'Sam Thomson', 'sam thomson')</td><td>{rowanz, my89, yejin}@cs.washington.edu, sthomson@cs.cmu.edu
+</td></tr><tr><td>f02f0f6fcd56a9b1407045de6634df15c60a85cd</td><td>Learning Low-shot facial representations via 2D warping
+<br/><b>RWTH Aachen University</b></td><td>('35362682', 'Shen Yan', 'shen yan')</td><td>shen.yan@rwth-aachen.de
+</td></tr><tr><td>f0cee87e9ecedeb927664b8da44b8649050e1c86</td><td></td><td></td><td></td></tr><tr><td>f0f4f16d5b5f9efe304369120651fa688a03d495</td><td>Temporal Generative Adversarial Nets
+<br/>Preferred Networks inc., Japan
+</td><td>('49160719', 'Masaki Saito', 'masaki saito')<br/>('8252749', 'Eiichi Matsumoto', 'eiichi matsumoto')</td><td>{msaito, matsumoto}@preferred.jp
+</td></tr><tr><td>f0ca31fd5cad07e84b47d50dc07db9fc53482a46</td><td>Advances in Pure Mathematics, 2012, 2, 226-242
+<br/>http://dx.doi.org/10.4236/apm.2012.24033 Published Online July 2012 (http://www.SciRP.org/journal/apm)
+<br/>Feature Patch Illumination Spaces and Karcher
+<br/>Compression for Face Recognition via
+<br/>Grassmannians
+<br/><b>California State University, Long Beach, USA</b><br/><b>Colorado State University, Fort Collins, USA</b><br/>Received January 7, 2012; revised February 20, 2012; accepted February 27, 2012
+</td><td>('2640182', 'Jen-Mei Chang', 'jen-mei chang')<br/>('30383278', 'Chris Peterson', 'chris peterson')<br/>('41211081', 'Michael Kirby', 'michael kirby')</td><td>Email: jen-mei.chang@csulb.edu, {peterson, Kirby}@math.colostate.edu
+</td></tr><tr><td>f0ae807627f81acb63eb5837c75a1e895a92c376</td><td>International Journal of Emerging Engineering Research and Technology
+<br/>Volume 3, Issue 12, December 2015, PP 128-133
+<br/>ISSN 2349-4395 (Print) & ISSN 2349-4409 (Online)
+<br/>Facial Landmark Detection using Ensemble of Cascaded
+<br/>Regressions
+<br/><b>Faculty of Telecommunications, Technical University, Sofia, Bulgaria</b><br/><b>Faculty of Telecommunications, Technical University, Sofia, Bulgaria</b></td><td>('6203133', 'Martin Penev', 'martin penev')<br/>('1734848', 'Ognian Boumbarov', 'ognian boumbarov')</td><td></td></tr><tr><td>f074e86e003d5b7a3b6e1780d9c323598d93f3bc</td><td>OPEN ACCESS
+<br/>ISSN 2075-1680
+<br/>Article
+<br/>Characteristic Number: Theory and Its Application to
+<br/>Shape Analysis
+<br/><b>School of Software, Dalian University of Technology, Tuqiang St. 321, Dalian 116620, China</b><br/><b>School of Mathematical Sciences, Dalian University of Technology, Linggong Rd. 2, Dalian</b><br/>Tel.: +86-411-87571777; Fax: +86-411-87571567.
+<br/>Received: 27 March 2014; in revised form: 28 April 2014 / Accepted: 28 April 2014 /
+<br/>Published: 15 May 2014
+</td><td>('1710408', 'Xin Fan', 'xin fan')<br/>('7864960', 'Zhongxuan Luo', 'zhongxuan luo')<br/>('1732068', 'Jielin Zhang', 'jielin zhang')<br/>('2758604', 'Xinchen Zhou', 'xinchen zhou')<br/>('2235253', 'Qi Jia', 'qi jia')<br/>('3136305', 'Daiyun Luo', 'daiyun luo')</td><td>E-Mails: xin.fan@ieee.org (X.F.); jiaqi7166@gmail.com (Q.J.)
+<br/>China; E-Mails: jielinzh@dlut.edu.cn (J.Z.); dasazxc@gmail.com (X.Z.); 419524597@qq.com (D.L.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: zxluo@dlut.edu.cn;
+</td></tr><tr><td>f0a4a3fb6997334511d7b8fc090f9ce894679faf</td><td>Generative Face Completion
+<br/><b>University of California, Merced</b><br/>2Adobe Research
+</td><td>('1754382', 'Yijun Li', 'yijun li')<br/>('2391885', 'Sifei Liu', 'sifei liu')<br/>('1768964', 'Jimei Yang', 'jimei yang')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td>{yli62,sliu32,mhyang}@ucmerced.edu
+<br/>jimyang@adobe.com
+</td></tr><tr><td>f0681fc08f4d7198dcde803d69ca62f09f3db6c5</td><td>Spatiotemporal Features for Effective Facial
+<br/>Expression Recognition
+<br/>Hatice C¸ ınar Akakın and B¨ulent Sankur
+<br/><b>Bogazici University, Bebek</b><br/>Istanbul
+<br/>http://www.ee.boun.edu.tr
+</td><td></td><td>{hatice.cinar,bulent.sankur}@boun.edu.tr
+</td></tr><tr><td>f0f501e1e8726148d18e70c8e9f6feea9360d119</td><td>OULU 2015
+<br/>C 537
+<br/>U N I V E R S I TAT I S O U L U E N S I S
+<br/>U N I V E R S I TAT I S O U L U E N S I S
+<br/>CTECHNICA
+<br/>CTECHNICA
+<br/>C537etukansi.kesken.fm Page 1 Thursday, June 18, 2015 3:57 PM
+<br/><b>UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND</b><br/>A C T A U N I V E R S I T A T I S O U L U E N S I S
+<br/>ACTA
+<br/>ACTA
+<br/>Professor Esa Hohtola
+<br/><b>University Lecturer Veli-Matti Ulvinen</b><br/><b>University Lecturer Anu Soikkeli</b><br/>Publications Editor Kirsti Nurkkala
+<br/>ISBN 978-952-62-0872-5 (Paperback)
+<br/>ISBN 978-952-62-0873-2 (PDF)
+<br/>ISSN 0355-3213 (Print)
+<br/>ISSN 1796-2226 (Online)
+<br/>SOFTWARE-BASED
+<br/>COUNTERMEASURES TO 2D
+<br/>FACIAL SPOOFING ATTACKS
+<br/><b>UNIVERSITY OF OULU GRADUATE SCHOOL</b><br/><b>UNIVERSITY OF OULU</b><br/>FACULTY OF INFORMATION TECHNOLOGY AND ELECTRICAL ENGINEERING,
+<br/>DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING;
+<br/>INFOTECH OULU
+</td><td>('6433503', 'Santeri Palviainen', 'santeri palviainen')<br/>('3797304', 'Sanna Taskila', 'sanna taskila')<br/>('5451992', 'Olli Vuolteenaho', 'olli vuolteenaho')<br/>('6238085', 'Sinikka Eskelinen', 'sinikka eskelinen')<br/>('2165962', 'Jari Juga', 'jari juga')<br/>('5451992', 'Olli Vuolteenaho', 'olli vuolteenaho')<br/>('35709493', 'Jukka Komulainen', 'jukka komulainen')</td><td></td></tr><tr><td>f0398ee5291b153b716411c146a17d4af9cb0edc</td><td>LEARNING OPTICAL FLOW VIA DILATED NETWORKS AND OCCLUSION REASONING
+<br/><b>University of California, Merced</b><br/>5200 N Lake Rd, Merced, CA, US
+</td><td>('1749901', 'Yi Zhu', 'yi zhu')</td><td>{yzhu25, snewsam}@ucmerced.edu
+</td></tr><tr><td>f0f0e94d333b4923ae42ee195df17c0df62ea0b1</td><td>Scaling Manifold Ranking Based Image Retrieval
+<br/>†NTT Software Innovation Center, 3-9-11 Midori-cho Musashino-shi, Tokyo, Japan
+<br/>‡NTT Service Evolution Laboratories, 1-1 Hikarinooka Yokosuka-shi, Kanagawa, Japan
+<br/><b>California Institute of Technology, 1200 East California Boulevard Pasadena, California, USA</b><br/><b>Osaka University, 1-5 Yamadaoka, Suita-shi, Osaka, Japan</b></td><td>('32130106', 'Yasuhiro Fujiwara', 'yasuhiro fujiwara')<br/>('32285163', 'Go Irie', 'go irie')<br/>('46593534', 'Shari Kuroyama', 'shari kuroyama')<br/>('48075831', 'Makoto Onizuka', 'makoto onizuka')</td><td>{fujiwara.yasuhiro, irie.go}@lab.ntt.co.jp, kuroyama@caltech.edu, oni@acm.org
+</td></tr><tr><td>f06b015bb19bd3c39ac5b1e4320566f8d83a0c84</td><td></td><td></td><td></td></tr><tr><td>f0a3f12469fa55ad0d40c21212d18c02be0d1264</td><td>Sparsity Sharing Embedding for Face
+<br/>Verification
+<br/>Department of Electrical Engineering, KAIST, Daejeon, Korea
+</td><td>('2350325', 'Donghoon Lee', 'donghoon lee')<br/>('2857402', 'Hyunsin Park', 'hyunsin park')<br/>('8270717', 'Junyoung Chung', 'junyoung chung')<br/>('2126465', 'Youngook Song', 'youngook song')</td><td></td></tr><tr><td>f05ad40246656a977cf321c8299158435e3f3b61</td><td>Face Recognition Using Face Patch Networks
+<br/><b>The Chinese University of Hong Kong</b></td><td>('2312486', 'Chaochao Lu', 'chaochao lu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1678783', 'Deli Zhao', 'deli zhao')</td><td>{cclu,dlzhao,xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>f02a6bccdaee14ab55ad94263539f4f33f1b15bb</td><td>Article
+<br/>Segment-Tube: Spatio-Temporal Action Localization
+<br/>in Untrimmed Videos with Per-Frame Segmentation
+<br/><b>Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an, Shannxi 710049, China</b><br/>Received: 23 April 2018; Accepted: 16 May 2018; Published: 22 May 2018
+</td><td>('40367806', 'Le Wang', 'le wang')<br/>('46809347', 'Xuhuan Duan', 'xuhuan duan')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('1786361', 'Zhenxing Niu', 'zhenxing niu')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('1715389', 'Nanning Zheng', 'nanning zheng')</td><td>duanxuhuan0123@stu.xjtu.edu.cn (X.D.); nnzheng@xjtu.edu.cn (N.Z.)
+<br/>2 HERE Technologies, Chicago, IL 60606, USA; qilin.zhang@here.com
+<br/>3 Alibaba Group, Hangzhou 311121, China; zhenxing.nzx@alibaba-inc.com
+<br/>4 Microsoft Research, Redmond, WA 98052, USA; ganghua@microsoft.com
+<br/>* Correspondence: lewang@xjtu.edu.cn; Tel.: +86-29-8266-8672
+</td></tr><tr><td>f7dea4454c2de0b96ab5cf95008ce7144292e52a</td><td></td><td></td><td></td></tr><tr><td>f781e50caa43be13c5ceb13f4ccc2abc7d1507c5</td><td>MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+<br/>12-1
+<br/>Towards Flexible and Intelligent Vision Systems
+<br/>– From Thresholding to CHLAC –
+<br/><b>University of Tokyo</b><br/>AISTy
+<br/><b>y National Institute of Advanced Industrial Science and Technology</b><br/>Umezono 1-1-1, Tsukuba-shi, Ibaraki-ken, 305-8568 Japan
+</td><td>('1809629', 'Nobuyuki Otsu', 'nobuyuki otsu')</td><td>Email: otsu.n@aist.go.jp
+</td></tr><tr><td>f7b4bc4ef14349a6e66829a0101d5b21129dcf55</td><td>LONG ET AL.: TOWARDS LIGHT-WEIGHT ANNOTATIONS: FIR FOR ZSL
+<br/>Towards Light-weight Annotations: Fuzzy
+<br/>Interpolative Reasoning for Zero-shot Image
+<br/>Classification
+<br/>1 Open Lab, School of Computing
+<br/><b>Newcastle University, UK</b><br/>2 Department of Computer Science and
+<br/>Digital Technologies, Northumbria Uni-
+<br/>versity, UK
+<br/><b>Inception Institute of Arti cial</b><br/>gence, UAE
+<br/>Intelli-
+</td><td>('50363618', 'Yang Long', 'yang long')<br/>('48272923', 'Yao Tan', 'yao tan')<br/>('34975328', 'Daniel Organisciak', 'daniel organisciak')<br/>('1706028', 'Longzhi Yang', 'longzhi yang')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td>yang.long@ieee.org
+<br/>yao.tan@northumbria.ac.uk
+<br/>d.organisciak@gmail.com
+<br/>longzhi.yang@northumbria.ac.uk
+<br/>ling.shao@ieee.org
+</td></tr><tr><td>f7b422df567ce9813926461251517761e3e6cda0</td><td>FACE AGING WITH CONDITIONAL GENERATIVE ADVERSARIAL NETWORKS
+<br/>(cid:63) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td><td>('3116433', 'Grigory Antipov', 'grigory antipov')<br/>('2341854', 'Moez Baccouche', 'moez baccouche')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td></td></tr><tr><td>f7824758800a7b1a386db5bd35f84c81454d017a</td><td>KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by
+<br/>Learning Efficient H-CNN Regressors
+<br/>Department of Electrical and Computer Engineering, CFAR and UMIACS
+<br/><b>University of Maryland-College Park, USA</b></td><td>('50333013', 'Amit Kumar', 'amit kumar')<br/>('2943431', 'Azadeh Alavi', 'azadeh alavi')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{akumar14,azadeh,rama}@umiacs.umd.edu
+</td></tr><tr><td>f74917fc0e55f4f5682909dcf6929abd19d33e2e</td><td>Workshop track - ICLR 2018
+<br/>GAN QUALITY INDEX (GQI) BY GAN-INDUCED
+<br/>CLASSIFIER
+<br/><b>The City College and the Graduate Center</b><br/><b>The City University of New York</b><br/>Department of Electrical & Computer Engineering
+<br/><b>Northeastern University</b><br/>Microsoft Research
+</td><td>('3105254', 'Yuancheng Ye', 'yuancheng ye')<br/>('39092100', 'Yue Wu', 'yue wu')<br/>('1689145', 'Lijuan Wang', 'lijuan wang')<br/>('2249952', 'Yinpeng Chen', 'yinpeng chen')<br/>('3419208', 'Zicheng Liu', 'zicheng liu')</td><td>yye@gradcenter.cuny.edu
+<br/>ytian@ccny.cuny.edu
+<br/>yuewu@ece.neu.edu
+<br/>{lijuanw, yiche, zliu, zhang}@microsoft.com
+</td></tr><tr><td>f740bac1484f2f2c70777db6d2a11cf4280081d6</td><td>Soft Locality Preserving Map (SLPM) for Facial Expression
+<br/>Recognition
+<br/>a Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong
+<br/><b>Kong Polytechnic University, Kowloon, Hong Kong</b><br/><b>b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney</b><br/>Australia
+</td><td>('13671251', 'Cigdem Turan', 'cigdem turan')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')<br/>('1706670', 'Xiangjian He', 'xiangjian he')</td><td>E-mail addresses: cigdem.turan@connect.polyu.hk (C. Turan), enkmlam@polyu.edu.hk (K.-M. Lam),
+<br/>xiangjian.he@uts.edu.au (X. He)
+</td></tr><tr><td>f78fe101b21be36e98cd3da010051bb9b9829a1e</td><td>Hindawi
+<br/>Computational Intelligence and Neuroscience
+<br/>Volume 2018, Article ID 7208794, 10 pages
+<br/>https://doi.org/10.1155/2018/7208794
+<br/>Research Article
+<br/>Unsupervised Domain Adaptation for Facial Expression
+<br/>Recognition Using Generative Adversarial Networks
+<br/>1,2
+<br/><b>State Key Laboratory of Precision Measuring Technology and Instruments, Tianjin University, 300072, China</b><br/><b>Key Laboratory of MOEMS of the Ministry of Education, Tianjin University, 300072, China</b><br/>Received 14 April 2018; Accepted 19 June 2018; Published 9 July 2018
+<br/>Academic Editor: Ant´onio D. P. Correia
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>In the facial expression recognition task, a good-performing convolutional neural network (CNN) model trained on one dataset
+<br/>(source dataset) usually performs poorly on another dataset (target dataset). This is because the feature distribution of the same
+<br/>emotion varies in different datasets. To improve the cross-dataset accuracy of the CNN model, we introduce an unsupervised
+<br/>domain adaptation method, which is especially suitable for unlabelled small target dataset. In order to solve the problem of lack of
+<br/>samples from the target dataset, we train a generative adversarial network (GAN) on the target dataset and use the GAN generated
+<br/>samples to fine-tune the model pretrained on the source dataset. In the process of fine-tuning, we give the unlabelled GAN generated
+<br/>samples distributed pseudolabels dynamically according to the current prediction probabilities. Our method can be easily applied
+<br/>to any existing convolutional neural networks (CNN). We demonstrate the effectiveness of our method on four facial expression
+<br/>recognition datasets with two CNN structures and obtain inspiring results.
+<br/>1. Introduction
+<br/>Facial expressions recognition (FER) has a wide spectrum of
+<br/>application potentials in human-computer interaction, cog-
+<br/>nitive psychology, computational neuroscience, and medical
+<br/>healthcare. In recent years, convolutional neural networks
+<br/>(CNN) have achieved many exciting results in artificial
+<br/>intelligent and pattern recognition and have been successfully
+<br/>used in facial expression recognition [1]. Jaiswal et al. [2]
+<br/>present a novel approach to facial action unit detection
+<br/>using a combination of Convolutional and Bidirectional
+<br/>Long Short-Term Memory Neural Networks (CNN-BLSTM),
+<br/>which jointly learns shape, appearance, and dynamics in a
+<br/>deep learning manner. You et al. [3] introduce a new data
+<br/>set, which contains more than 3 million weakly labelled
+<br/>images of different emotions. Esser et al. [4] develop a model
+<br/>for efficient neuromorphic computing using the Deep CNN
+<br/>technique. H-W.Ng et al. [5] develop a cascading fine-tuning
+<br/>approach for emotion recognition. Neagoe et al. [6] propose
+<br/>a model for subject independent emotion recognition from
+<br/>facial expressions using combined CNN and DBN. However,
+<br/>these CNN models are often trained and tested on the
+<br/>same dataset, whereas the cross-dataset performance is less
+<br/>concerned. Although the basic emotions defined by Ekman
+<br/>and Friesen [7], anger, disgust, fear, happy, sadness, and
+<br/>surprise, are believed to be universal, the way of expressing
+<br/>these emotions can be quite diverse across different cultures,
+<br/>ages, and genders [8]. As a result, a well-trained CNN model,
+<br/>having high recognition accuracy on the training dataset,
+<br/>usually performs poorly on other datasets. In order to make
+<br/>the facial expression recognition system more practical, it
+<br/>is necessary to improve the generalization ability of the
+<br/>recognition model.
+<br/>In this paper, we aim at improving the cross-dataset
+<br/>accuracy of a CNN model on facial expression recognition.
+<br/>One way to solve this problem is to rebuild models from
+<br/>scratch using large-scale newly collected samples. Large
+<br/>amounts of training samples, such as the dataset ImageNet [9]
+<br/>containing over 15 million images, can reduce the overfitting
+<br/>problem and help to train a reliable model. However, for
+<br/>facial expression recognition,
+<br/>it is expensive and some-
+<br/>times even impossible to get enough labelled training data.
+<br/>Therefore, we proposed an unsupervised domain adaptation
+<br/>method, which is especially suitable for unlabelled small
+</td><td>('47119020', 'Xiaoqing Wang', 'xiaoqing wang')<br/>('36142058', 'Xiangjun Wang', 'xiangjun wang')<br/>('3332231', 'Yubo Ni', 'yubo ni')<br/>('47119020', 'Xiaoqing Wang', 'xiaoqing wang')</td><td>Correspondence should be addressed to Xiangjun Wang; tjuxjw@126.com
+</td></tr><tr><td>f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f</td><td>Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+<br/>version when available.
+<br/>Title
+<br/>On color texture normalization for active appearance models
+<br/>Author(s)
+<br/>Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+<br/>Publication
+<br/>Date
+<br/>2009-05-12
+<br/>Publication
+<br/>Information
+<br/>Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+<br/>Texture Normalization for Active Appearance Models. Image
+<br/>Processing, IEEE Transactions on, 18(6), 1372-1378.
+<br/>Publisher
+<br/>IEEE
+<br/>Link to
+<br/>publisher's
+<br/>version
+<br/>http://dx.doi.org/10.1109/TIP.2009.2017163
+<br/>Item record
+<br/>http://hdl.handle.net/10379/1350
+<br/>Some rights reserved. For more information, please see the item record link above.
+<br/>Downloaded 2017-06-17T22:38:27Z
+</td><td></td><td></td></tr><tr><td>f7452a12f9bd927398e036ea6ede02da79097e6e</td><td></td><td></td><td></td></tr><tr><td>f7a271acccf9ec66c9b114d36eec284fbb89c7ef</td><td>Open Access
+<br/>Research
+<br/>Does attractiveness influence condom
+<br/>use intentions in heterosexual men?
+<br/>An experimental study
+<br/>To cite: Eleftheriou A,
+<br/>Bullock S, Graham CA, et al.
+<br/>Does attractiveness influence
+<br/>condom use intentions in
+<br/>heterosexual men?
+<br/>An experimental study. BMJ
+<br/>Open 2016;6:e010883.
+<br/>doi:10.1136/bmjopen-2015-
+<br/>010883
+<br/>▸ Prepublication history for
+<br/>this paper is available online.
+<br/>To view these files please
+<br/>visit the journal online
+<br/>(http://dx.doi.org/10.1136/
+<br/>bmjopen-2015-010883).
+<br/>Received 17 December 2015
+<br/>Revised 1 March 2016
+<br/>Accepted 7 April 2016
+<br/>1Department of Electronics
+<br/>and Computer Science,
+<br/><b>University of Southampton</b><br/>Southampton, UK
+<br/><b>Institute for Complex</b><br/>Systems Simulation,
+<br/><b>University of Southampton</b><br/>Southampton, UK
+<br/>3Department of Computer
+<br/><b>Science, University of Bristol</b><br/>Bristol, UK
+<br/>4Centre for Sexual Health
+<br/>Research, Department of
+<br/><b>Psychology, University of</b><br/>Southampton, Southampton,
+<br/>UK
+<br/>Correspondence to
+</td><td>('6093065', 'Anastasia Eleftheriou', 'anastasia eleftheriou')<br/>('1733871', 'Seth Bullock', 'seth bullock')<br/>('4712904', 'Cynthia A Graham', 'cynthia a graham')<br/>('48479171', 'Nicole Stone', 'nicole stone')<br/>('50227141', 'Roger Ingham', 'roger ingham')<br/>('6093065', 'Anastasia Eleftheriou', 'anastasia eleftheriou')</td><td>ae2n12@soton.ac.uk
+</td></tr><tr><td>f7093b138fd31956e30d411a7043741dcb8ca4aa</td><td>Hierarchical Clustering in Face Similarity Score
+<br/>Space
+<br/>Jason Grant and Patrick Flynn
+<br/>Department of Computer Science and Engineering
+<br/><b>University of Notre Dame</b><br/>Notre Dame, IN 46556
+</td><td></td><td></td></tr><tr><td>f7dcadc5288653ec6764600c7c1e2b49c305dfaa</td><td>Copyright
+<br/>by
+<br/>Adriana Ivanova Kovashka
+<br/>2014
+</td><td></td><td></td></tr><tr><td>f7de943aa75406fe5568fdbb08133ce0f9a765d4</td><td>Project 1.5: Human Identification at a Distance - Hornak, Adjeroh, Cukic, Gautum, & Ross
+<br/>Project 1.5
+<br/>Biometric Identification and Surveillance1
+<br/>Year 5 Deliverable 
+<br/>Technical Report: 
+<br/>and
+<br/>Research Challenges in Biometrics
+<br/>Indexed biography of relevant biometric research literature
+<br/>Donald Adjeroh, Bojan Cukic, Arun Ross 
+<br/>April, 2014  
+<br/>                                                            
+<br/>1 "This research was supported by the United States Department of Homeland Security through the National Center for Border Security
+<br/>and Immigration (BORDERS) under grant number 2008-ST-061-BS0002. However, any opinions, findings, and conclusions or
+<br/>recommendations in this document are those of the authors and do not necessarily reflect views of the United States Department of
+<br/>Homeland Security."
+</td><td>('4800511', 'Don Adjeroh', 'don adjeroh')<br/>('1702603', 'Bojan Cukic', 'bojan cukic')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>donald.adjeroh@mail.wvu.edu; bojan.cukic@mail.wvu.edu; arun.ross@mail.wvu.edu
+</td></tr><tr><td>f75852386e563ca580a48b18420e446be45fcf8d</td><td>ILLUMINATION INVARIANT FACE RECOGNITION
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>ENEE 631: Digital Image and Video Processing
+<br/>Instructor: Dr. K. J. Ray Liu
+<br/>Term Project - Spring 2006
+<br/>1.
+<br/>INTRODUCTION
+<br/>
+<br/>
+<br/>The performance of the Face Recognition algorithms is severely affected by two
+<br/>important factors: the change in Pose and Illumination conditions of the subjects. The
+<br/>changes in Illumination conditions of the subjects can be so drastic that, the variation in
+<br/>lighting will be of the similar order as that of the variation due to the change in subjects
+<br/>[1] and this can result in misclassification.
+<br/>
+<br/> For example, in the acquisition of the face of a person from a real time video, the
+<br/>ambient conditions will cause different lighting variations on the tracked face. Some
+<br/>examples of images with different illumination conditions are shown in Fig. 1. In this
+<br/>project, we study some algorithms that are capable of performing Illumination Invariant
+<br/>Face Recognition. The performances of these algorithms were compared on the CMU-
+<br/>Illumination dataset [13], by using the entire face as the input to the algorithms. Then, a
+<br/>model of dividing the face into four regions is proposed and the performance of the
+<br/>algorithms on these new features is analyzed.
+<br/>
+<br/>
+</td><td>('33692583', 'Raghuraman Gopalan', 'raghuraman gopalan')</td><td>raghuram@umd.edu
+</td></tr><tr><td>f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3</td><td>Large Margin Multi-Metric Learning for Face
+<br/>and Kinship Verification in the Wild
+<br/><b>School of EEE, Nanyang Technological University, Singapore</b><br/>2Advanced Digital Sciences Center, Singapore
+</td><td>('34651153', 'Junlin Hu', 'junlin hu')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')<br/>('1689805', 'Yap-Peng Tan', 'yap-peng tan')</td><td></td></tr><tr><td>f78863f4e7c4c57744715abe524ae4256be884a9</td><td></td><td></td><td></td></tr><tr><td>f77c9bf5beec7c975584e8087aae8d679664a1eb</td><td>Local Deep Neural Networks for Age and Gender Classification
+<br/>March 27, 2017
+</td><td>('9949538', 'Zukang Liao', 'zukang liao')<br/>('2403354', 'Stavros Petridis', 'stavros petridis')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a</td><td>This is a pre-print of the original paper accepted at the International Joint Conference on Biometrics (IJCB) 2017.
+<br/>LOTS about Attacking Deep Features
+<br/>Vision and Security Technology (VAST) Lab
+<br/><b>University of Colorado, Colorado Springs, USA</b></td><td>('2974221', 'Andras Rozsa', 'andras rozsa')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td>{arozsa,mgunther,tboult}@vast.uccs.edu
+</td></tr><tr><td>e8686663aec64f4414eba6a0f821ab9eb9f93e38</td><td>IMPROVING SHAPE-BASED FACE RECOGNITION BY MEANS OF A SUPERVISED
+<br/>DISCRIMINANT HAUSDORFF DISTANCE
+<br/>J.L. Alba
+<br/>, A. Pujol
+<br/>††
+<br/>, A. L´opez
+<br/>†††
+<br/>and J.J. Villanueva
+<br/>†††
+<br/><b>University of Vigo, Spain</b><br/>†††Centre de Visio per Computador, Universitat Autonoma de Barcelona, Spain
+<br/>††Digital Pointer MVT
+</td><td></td><td></td></tr><tr><td>e82360682c4da11f136f3fccb73a31d7fd195694</td><td><b>AALTO UNIVERSITY</b><br/>SCHOOL OF SCIENCE AND TECHNOLOGY
+<br/>Faculty of Information and Natural Science
+<br/>Department of Information and Computer Science
+<br/>Online Face Recognition with
+<br/>Application to Proactive Augmented
+<br/>Reality
+<br/>Master’s Thesis submitted in partial fulfillment of the requirements for the
+<br/>degree of Master of Science in Technology.
+<br/>Espoo, May 25, 2010
+<br/>Supervisor:
+<br/>Instructor:
+<br/>Professor Erkki Oja
+</td><td>('1700492', 'Jing Wu', 'jing wu')<br/>('1758971', 'Markus Koskela', 'markus koskela')</td><td></td></tr><tr><td>e8410c4cd1689829c15bd1f34995eb3bd4321069</td><td></td><td></td><td></td></tr><tr><td>e8fdacbd708feb60fd6e7843b048bf3c4387c6db</td><td>Deep Learning
+<br/>Hinnerup Net A/S
+<br/>www.hinnerup.net
+<br/>July 4, 2014
+<br/>Introduction
+<br/>Deep learning is a topic in the field of artificial intelligence (AI) and is a relatively
+<br/>new research area although based on the popular artificial neural networks (supposedly
+<br/>mirroring brain function). With the development of the perceptron in the 1950s and
+<br/>1960s by Frank RosenBlatt, research began on artificial neural networks. To further
+<br/>mimic the architectural depth of the brain, researchers wanted to train a deep multi-
+<br/>layer neural network – this, however, did not happen until Geoffrey Hinton in 2006
+<br/>introduced Deep Belief Networks [1].
+<br/>Recently, the topic of deep learning has gained public interest. Large web companies such
+<br/>as Google and Facebook have a focused research on AI and an ever increasing amount
+<br/>of compute power, which has led to researchers finally being able to produce results
+<br/>that are of interest to the general public. In July 2012 Google trained a deep learning
+<br/>network on YouTube videos with the remarkable result that the network learned to
+<br/>recognize humans as well as cats [6], and in January this year Google successfully used
+<br/>deep learning on Street View images to automatically recognize house numbers with
+<br/>an accuracy comparable to that of a human operator [5]. In March this year Facebook
+<br/>announced their DeepFace algorithm that is able to match faces in photos with Facebook
+<br/>users almost as accurately as a human can do [9].
+<br/>Deep learning and other AI are here to stay and will become more and more present in
+<br/>our daily lives, so we had better make ourselves acquainted with the technology. Let’s
+<br/>dive into the deep water and try not to drown!
+<br/>Data Representations
+<br/>Before presenting data to an AI algorithm, we would normally prepare the data to make
+<br/>it feasible to work with. For instance, if the data consists of images, we would take each
+</td><td></td><td></td></tr><tr><td>e8f0f9b74db6794830baa2cab48d99d8724e8cb6</td><td>Active Image Labeling and Its Application to
+<br/>Facial Action Labeling
+<br/><b>Electrical, Computer, Rensselaer Polytechnic Institute</b><br/><b>Visualization and Computer Vision Lab, GE Global Research Center</b></td><td>('40396543', 'Lei Zhang', 'lei zhang')<br/>('1686235', 'Yan Tong', 'yan tong')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>zhangl2@rpi.edu,tongyan@research.ge.com,qji@ecse.rpi.edu
+</td></tr><tr><td>e8b2a98f87b7b2593b4a046464c1ec63bfd13b51</td><td>CMS-RCNN: Contextual Multi-Scale
+<br/>Region-based CNN for Unconstrained Face
+<br/>Detection
+</td><td>('3117715', 'Chenchen Zhu', 'chenchen zhu')<br/>('3049981', 'Yutong Zheng', 'yutong zheng')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td></td></tr><tr><td>e87d6c284cdd6828dfe7c092087fbd9ff5091ee4</td><td>Unsupervised Creation of Parameterized Avatars
+<br/>1Facebook AI Research
+<br/><b>School of Computer Science, Tel Aviv University</b></td><td>('1776343', 'Lior Wolf', 'lior wolf')<br/>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('33964593', 'Adam Polyak', 'adam polyak')</td><td></td></tr><tr><td>e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7</td><td>Towards End-to-End Face Recognition through Alignment Learning
+<br/><b>Tsinghua University</b><br/>Beijing, China, 100084
+</td><td>('8802368', 'Yuanyi Zhong', 'yuanyi zhong')<br/>('1752427', 'Jiansheng Chen', 'jiansheng chen')<br/>('39071060', 'Bo Huang', 'bo huang')</td><td>zhongyy13@mails.tsinghua.edu.cn, jschenthu@mail.tsinghua.edu.cn, huangb14@mails.tsinghua.edu.cn
+</td></tr><tr><td>e85a255a970ee4c1eecc3e3d110e157f3e0a4629</td><td>Fusing Hierarchical Convolutional Features for Human Body Segmentation and
+<br/>Clothing Fashion Classification
+<br/><b>School of Computer Science, Wuhan University, P.R. China</b></td><td>('47294008', 'Zheng Zhang', 'zheng zhang')<br/>('3127916', 'Chengfang Song', 'chengfang song')<br/>('4793870', 'Qin Zou', 'qin zou')</td><td>E-mails: {zhangzheng, songchf, qzou}@whu.edu.cn
+</td></tr><tr><td>e8c9dcbf56714db53063b9c367e3e44300141ff6</td><td>Automated FACS face analysis benefits from the addition of velocity
+<br/>Get The FACS Fast:
+<br/>Timothy R. Brick
+<br/><b>University of Virginia</b><br/>Charlottesville, VA 22904
+<br/>Michael D. Hunter
+<br/><b>University of Virginia</b><br/>Charlottesville, VA 22904
+<br/>Jeffrey F. Cohn
+<br/><b>University of Pittsburgh</b><br/>Pittsburgh, PA 15260
+</td><td></td><td>tbrick@virginia.edu
+<br/>mhunter@virginia.edu
+<br/>jeffcohn@cs.cmu.edu
+</td></tr><tr><td>e8d1b134d48eb0928bc999923a4e092537e106f6</td><td>WEIGHTED MULTI-REGION CONVOLUTIONAL NEURAL NETWORK FOR ACTION
+<br/>RECOGNITION WITH LOW-LATENCY ONLINE PREDICTION
+<br/><b>cid:63)University of Science and Technology of China, Hefei, Anhui, China</b><br/>†HERE Technologies, Chicago, Illinois, USA
+</td><td>('49417387', 'Yunfeng Wang', 'yunfeng wang')<br/>('38272296', 'Wengang Zhou', 'wengang zhou')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('49897466', 'Xiaotian Zhu', 'xiaotian zhu')<br/>('7179232', 'Houqiang Li', 'houqiang li')</td><td></td></tr><tr><td>e8c6c3fc9b52dffb15fe115702c6f159d955d308</td><td>13
+<br/>Linear Subspace Learning for
+<br/>Facial Expression Analysis
+<br/>Philips Research
+<br/>The Netherlands
+<br/>1. Introduction
+<br/>Facial expression, resulting from movements of the facial muscles, is one of the most
+<br/>powerful, natural, and immediate means for human beings to communicate their emotions
+<br/>and intentions. Some examples of facial expressions are shown in Fig. 1. Darwin (1872) was
+<br/>the first to describe in detail the specific facial expressions associated with emotions in
+<br/>animals and humans; he argued that all mammals show emotions reliably in their faces.
+<br/>Psychological studies (Mehrabian, 1968; Ambady & Rosenthal, 1992) indicate that facial
+<br/>expressions, with other non-verbal cues, play a major and fundamental role in face-to-face
+<br/>communication.
+<br/>Fig. 1. Facial expressions of George W. Bush.
+<br/>Machine analysis of facial expressions, enabling computers to analyze and interpret facial
+<br/>expressions as humans do, has many important applications including intelligent human-
+<br/>computer interaction, computer animation, surveillance and security, medical diagnosis,
+<br/>law enforcement, and awareness system (Shan, 2007). Driven by its potential applications
+<br/>and theoretical interests of cognitive and psychological scientists, automatic facial
+<br/>expression analysis has attracted much attention in last two decades (Pantic & Rothkrantz,
+<br/>2000a; Fasel & Luettin, 2003; Tian et al, 2005; Pantic & Bartlett, 2007). It has been studied in
+<br/>multiple disciplines such as psychology, cognitive science, computer vision, pattern
+<br/>Source: Machine Learning, Book edited by: Abdelhamid Mellouk and Abdennacer Chebira,
+<br/> ISBN 978-3-902613-56-1, pp. 450, February 2009, I-Tech, Vienna, Austria
+<br/>www.intechopen.com
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')</td><td></td></tr><tr><td>e8b3a257a0a44d2859862cdec91c8841dc69144d</td><td>Liquid Pouring Monitoring via
+<br/>Rich Sensory Inputs
+<br/><b>National Tsing Hua University, Taiwan</b><br/><b>Stanford University, USA</b></td><td>('27555915', 'Tz-Ying Wu', 'tz-ying wu')<br/>('9618379', 'Juan-Ting Lin', 'juan-ting lin')<br/>('27538483', 'Chan-Wei Hu', 'chan-wei hu')<br/>('9200530', 'Juan Carlos Niebles', 'juan carlos niebles')<br/>('46611107', 'Min Sun', 'min sun')</td><td>{gina9726, brade31919, johnsonwang0810, huchanwei1204}@gmail.com,
+<br/>sunmin@ee.nthu.edu.tw
+<br/>jniebles@cs.stanford.edu
+</td></tr><tr><td>fa90b825346a51562d42f6b59a343b98ea2e501a</td><td>Modeling Naive Psychology of Characters in Simple Commonsense Stories
+<br/><b>Paul G. Allen School of Computer Science and Engineering, University of Washington</b><br/><b>Allen Institute for Arti cial Intelligence</b><br/><b>Information Sciences Institute and Computer Science, University of Southern California</b></td><td>('2516777', 'Hannah Rashkin', 'hannah rashkin')<br/>('2691021', 'Antoine Bosselut', 'antoine bosselut')<br/>('2729164', 'Maarten Sap', 'maarten sap')<br/>('1710034', 'Kevin Knight', 'kevin knight')<br/>('1699545', 'Yejin Choi', 'yejin choi')</td><td>{hrashkin,msap,antoineb,yejin}@cs.washington.edu
+<br/>knight@isi.edu
+</td></tr><tr><td>fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6</td><td>Draft: Evaluation Guidelines for Gender
+<br/>Classification and Age Estimation
+<br/>July 1, 2011
+<br/>Introduction
+<br/>In previous research on gender classification and age estimation did not use a
+<br/>standardised evaluation procedure. This makes comparison the different ap-
+<br/>proaches difficult.
+<br/>Thus we propose here a benchmarking and evaluation protocol for gender
+<br/>classification as well as age estimation to set a common ground for future re-
+<br/>search in these two areas.
+<br/>The evaluations are designed such that there is one scenario under controlled
+<br/>labratory conditions and one under uncontrolled real life conditions.
+<br/>The datasets were selected with the criteria of being publicly available for
+<br/>research purposes.
+<br/>File lists for the folds corresponding to the individual benchmarking proto-
+<br/>cols will be provided over our website at http://face.cs.kit.edu/befit. We
+<br/>will provide two kinds of folds for each of the tasks and conditions: one set of
+<br/>folds using the whole dataset and one set of folds using a reduced dataset, which
+<br/>is approximately balanced in terms of age, gender and ethnicity.
+<br/>2 Gender Classification
+<br/>In this task the goal is to determine the gender of the persons depicted in the
+<br/>individual images.
+<br/>2.1 Data
+<br/>In previous works one of the most commonly used databases is the Feret database [1,
+<br/>2]. We decided here not to take this database, because of its low number of im-
+<br/>ages.
+</td><td>('40303076', 'Tobias Gehrig', 'tobias gehrig')<br/>('39504159', 'Matthias Steiner', 'matthias steiner')</td><td>{tobias.gehrig, ekenel}@kit.edu
+</td></tr><tr><td>faeefc5da67421ecd71d400f1505cfacb990119c</td><td>Original research
+<br/>published: 20 November 2017
+<br/>doi: 10.3389/frobt.2017.00061
+<br/>PastVision+: Thermovisual inference
+<br/>of recent Medicine intake by
+<br/>Detecting heated Objects and
+<br/>cooled lips
+<br/><b>Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden</b><br/>This article addresses the problem of how a robot can infer what a person has done
+<br/>recently, with a focus on checking oral medicine intake in dementia patients. We present
+<br/>PastVision+, an approach showing how thermovisual cues in objects and humans can
+<br/>be leveraged to infer recent unobserved human–object interactions. Our expectation
+<br/>is that this approach can provide enhanced speed and robustness compared to exist-
+<br/>ing methods, because our approach can draw inferences from single images without
+<br/>needing to wait to observe ongoing actions and can deal with short-lasting occlusions;
+<br/>when combined, we expect a potential improvement in accuracy due to the extra infor-
+<br/>mation from knowing what a person has recently done. To evaluate our approach, we
+<br/>obtained some data in which an experimenter touched medicine packages and a glass
+<br/>of water to simulate intake of oral medicine, for a challenging scenario in which some
+<br/>touches were conducted in front of a warm background. Results were promising, with
+<br/>a detection accuracy of touched objects of 50% at the 15 s mark and 0% at the 60 s
+<br/>mark, and a detection accuracy of cooled lips of about 100 and 60% at the 15 s mark
+<br/>for cold and tepid water, respectively. Furthermore, we conducted a follow-up check for
+<br/>another challenging scenario in which some participants pretended to take medicine or
+<br/>otherwise touched a medicine package: accuracies of inferring object touches, mouth
+<br/>touches, and actions were 72.2, 80.3, and 58.3% initially, and 50.0, 81.7, and 50.0%
+<br/>at the 15 s mark, with a rate of 89.0% for person identification. The results suggested
+<br/>some areas in which further improvements would be possible, toward facilitating robot
+<br/>inference of human actions, in the context of medicine intake monitoring.
+<br/>Keywords: thermovisual inference, touch detection, medicine intake, action recognition, monitoring, near past
+<br/>inference
+<br/>1. inTrODUcTiOn
+<br/>This article addresses the problem of how a robot can detect what a person has touched recently,
+<br/>with a focus on checking oral medicine intake in dementia patients.
+<br/>Detecting recent touches would be useful because touch is a typical component of many human–
+<br/>object interactions; moreover, knowing which objects have been touched allows inference into
+<br/>what actions have been conducted, which is an important requirement for robots to collaborate
+<br/>effectively with people (Vernon et al., 2016). For example, touches to a stove, door handle, or pill
+<br/>bottle can occur as a result of cooking, leaving one’s house, or taking medicine, all of which could
+<br/>potentially be dangerous for a person with dementia, if they forget to turn off the heat, lose their
+<br/>way, or make a mistake. Here, we focus on the latter problem of medicine adherence—whose
+<br/>Edited by:
+<br/>Alberto Montebelli,
+<br/><b>University of Sk vde, Sweden</b><br/>Reviewed by:
+<br/>Sam Neymotin,
+<br/><b>Brown University, United States</b><br/>Per Backlund,
+<br/><b>University of Sk vde, Sweden</b><br/>Fernando Bevilacqua,
+<br/><b>University of Sk vde, Sweden</b><br/>(in collaboration with Per Backlund)
+<br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Computational Intelligence,
+<br/>a section of the journal
+<br/>Frontiers in Robotics and AI
+<br/>Received: 15 May 2017
+<br/>Accepted: 02 November 2017
+<br/>Published: 20 November 2017
+<br/>Citation:
+<br/>Cooney M and Bigun J (2017)
+<br/>PastVision+: Thermovisual Inference
+<br/>of Recent Medicine Intake by
+<br/>Detecting Heated Objects
+<br/>and Cooled Lips.
+<br/>Front. Robot. AI 4:61.
+<br/>doi: 10.3389/frobt.2017.00061
+<br/>Frontiers in Robotics and AI | www.frontiersin.org
+<br/>November 2017 | Volume 4 | Article 61
+</td><td>('7149684', 'Martin Cooney', 'martin cooney')<br/>('5058247', 'Josef Bigun', 'josef bigun')<br/>('7149684', 'Martin Cooney', 'martin cooney')</td><td>martin.daniel.cooney@gmail.com
+</td></tr><tr><td>fa4f59397f964a23e3c10335c67d9a24ef532d5c</td><td>DAP3D-Net: Where, What and How Actions Occur in Videos?
+<br/>Department of Computer Science and Digital Technologies
+<br/><b>Northumbria University, Newcastle upon Tyne, NE1 8ST, UK</b></td><td>('40241836', 'Li Liu', 'li liu')<br/>('47942896', 'Yi Zhou', 'yi zhou')<br/>('40799321', 'Ling Shao', 'ling shao')</td><td>li2.liu@northumbria.ac.uk, m.y.yu@ieee.org, ling.shao@ieee.org
+</td></tr><tr><td>fa08a4da5f2fa39632d90ce3a2e1688d147ece61</td><td>Supplementary material for
+<br/>“Unsupervised Creation of Parameterized Avatars”
+<br/>1 Summary of Notations
+<br/>Tab. 1 itemizes the symbols used in the submission. Fig. 2,3,4 of the main text illustrate many of these
+<br/>symbols.
+<br/>2 DANN results
+<br/>Fig. 1 shows side by side samples of the original image and the emoji generated by the method of [1].
+<br/>As can be seen, these results do not preserve the identity very well, despite considerable effort invested in
+<br/>finding suitable architectures.
+<br/>3 Multiple Images Per Person
+<br/>Following [4], we evaluate the visual quality that is obtained per person and not just per image, by testing
+<br/>TOS on the Facescrub dataset [3]. For each person p, we considered the set of their images Xp, and selected
+<br/>the emoji that was most similar to their source image, i.e., the one for which:
+<br/>||f (x) − f (e(c(G(x))))||.
+<br/>argmin
+<br/>x∈Xp
+<br/>(1)
+<br/>Fig. 2 depicts the results obtained by this selection method on sample images form the Facescrub dataset
+<br/>(it is an extension of Fig. 7 of the main text). The figure also shows, for comparison, the DTN [4] result for
+<br/>the same image.
+<br/>4 Detailed Architecture of the Various Networks
+<br/>In this section we describe the architectures of the networks used in for the emoji and avatar experiments.
+<br/>4.1 TOS
+<br/>Network g maps DeepFace’s 256-dimensional representation [5] into 64 × 64 RGB emoji images. Follow-
+<br/>ing [4], this is done through a network with 9 blocks, each consisting of a convolution, batch-normalization
+<br/>and ReLU, except the last layer which employs Tanh activation. The odd blocks 1,3,5,7,9 perform upscaling
+<br/>convolutions with 512-256-128-64-3 filters respectively of spatial size 4 × 4. The even ones perform 1 × 1
+<br/>convolutions [2]. The odd blocks use a stride of 2 and padding of 1, excluding the first one which does not
+<br/>use stride or padding.
+<br/>Network e maps emoji parameterization into the matching 64× 64 RGB emoji. The parameterization is
+<br/>given as binary vectors in R813 for emojis; Avatar parameterization is in R354. While there are dependencies
+<br/>among the various dimensions (an emoji cannot have two hairstyles at once), the binary representation is
+<br/>chosen for its simplicity and generality. e is trained in a fully supervised way, using pairs of matching
+<br/>parameterization vectors and images in a supervised manner.
+<br/>The architecture of e employs five upscaling convolutions with 512-256-128-64-3 filters respectively,
+<br/>each of spatial size 4×4. All layers except the last one are batch normalized followed by a ReLU activation.
+<br/>The last layer is followed by Tanh activation, generating an RGB image with values in range [−1, 1]. All
+<br/>the layers use a stride of 2 and padding of 1, excluding the first one which does not use stride or padding.
+</td><td></td><td></td></tr><tr><td>fab2fc6882872746498b362825184c0fb7d810e4</td><td>RESEARCH ARTICLE
+<br/>Right wing authoritarianism is associated with
+<br/>race bias in face detection
+<br/>1 Univ. Grenoble Alpes, LPNC, Grenoble, France, 2 CNRS, LPNC UMR 5105, Grenoble, France, 3 IPSY,
+<br/><b>Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The</b><br/><b>University of Queensland, St Lucia QLD Australia, 5 Institut Universitaire de France, Paris, France</b></td><td>('3128194', 'Brice Beffara', 'brice beffara')<br/>('2066203', 'Jessica McFadyen', 'jessica mcfadyen')<br/>('2634712', 'Martial Mermillod', 'martial mermillod')</td><td>* amelie.bret@univ-grenoble-alpes.fr
+</td></tr><tr><td>faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b</td><td></td><td></td><td></td></tr><tr><td>fa24bf887d3b3f6f58f8305dcd076f0ccc30272a</td><td>JMLR: Workshop and Conference Proceedings 39:189–204, 2014
+<br/>ACML 2014
+<br/>Interval Insensitive Loss for Ordinal Classification
+<br/>Vojtˇech Franc
+<br/>V´aclav Hlav´aˇc
+<br/>Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech
+<br/><b>Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic</b><br/>Editor: Dinh Phung and Hang Li
+</td><td>('2742026', 'Kostiantyn Antoniuk', 'kostiantyn antoniuk')</td><td>antonkos@cmp.felk.cvut.cz
+<br/>xfrancv@cmp.felk.cvut.cz
+<br/>hlavac@fel.cvut.cz
+</td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>(CV last updated Oct. 5th, 2009.)
+<br/>www.stat.cmu.edu/~abrock
+<br/>1-412-478-3609
+<br/>Citizenship: U.S., Australia (dual)
+<br/>Education
+<br/>1994-1998
+<br/>: Ph.D., Department of Statistics and Department of of Electrical Engineering at
+<br/><b>Melbourne University, Advisors: K. Borovkov, R. Evans</b><br/>1993
+<br/>: Honours Science Degree (in the Department of Statistics) completed at Melbourne
+<br/><b>University (H</b><br/>1988-92
+<br/>: Bachelor of Science and Bachelor of Engineering with Honours completed at Mel-
+<br/><b>bourne University</b><br/>Employment
+<br/>2007+
+<br/><b>Carnegie Mellon University</b><br/>2007-2009
+<br/>: Senior Analyst, Horton Point LLC (Hedge Fund Management Company)
+<br/>2006-2007
+<br/>: Associate Professor, Department of Statistics, Carnegie Mellon Uniuversity
+<br/>2005-2007
+<br/>: Affiliated faculty member, Machine Learning Department (formerly known as the
+<br/><b>Center for Automated Learning and Discovery), Carnegie Mellon University</b><br/>2003-2007
+<br/><b>Faculty member, Parallel Data Lab (PDL), Carnegie Mellon University</b><br/>2002-2005
+<br/><b>Carnegie Mellon University</b><br/>1999-2002
+<br/><b>Carnegie Mellon University</b><br/>1998-1999
+<br/>: Research Fellow, Department of Electrical and Electronic Engineering, The Univer-
+<br/>sity of Melbourne
+<br/>1993-1995
+<br/><b>Sessional Tutor, The University of Melbourne</b></td><td>('1680307', 'Anthony Brockwell', 'anthony brockwell')</td><td>anthony.brockwell@gmail.com
+</td></tr><tr><td>faa29975169ba3bbb954e518bc9814a5819876f6</td><td>Evolution-Preserving Dense Trajectory Descriptors
+<br/><b>Stony Brook University, Stony Brook, NY 11794, USA</b></td><td>('2295608', 'Yang Wang', 'yang wang')<br/>('3482497', 'Vinh Tran', 'vinh tran')<br/>('2356016', 'Minh Hoai', 'minh hoai')</td><td>{wang33, tquangvinh, minhhoai}@cs.stonybrook.edu
+</td></tr><tr><td>fafe69a00565895c7d57ad09ef44ce9ddd5a6caa</td><td>Applied Mathematics, 2012, 3, 2071-2079
+<br/>http://dx.doi.org/10.4236/am.2012.312A286 Published Online December 2012 (http://www.SciRP.org/journal/am)
+<br/>Gaussian Mixture Models for Human Face Recognition
+<br/>under Illumination Variations
+<br/><b>Mihaylo College of Business and Economics</b><br/><b>California State University, Fullerton, USA</b><br/>Received August 18, 2012; revised September 18, 2012; accepted September 25, 2012
+</td><td>('2046854', 'Sinjini Mitra', 'sinjini mitra')</td><td>Email: smitra@fullerton.edu
+</td></tr><tr><td>faf5583063682e70dedc4466ac0f74eeb63169e7</td><td></td><td></td><td>HolisticPersonProcessing:FacesWithBodiesTelltheWholeStoryHillelAviezerPrincetonUniversityandNewYorkUniversityYaacovTropeNewYorkUniversityAlexanderTodorovPrincetonUniversityFacesandbodiesaretypicallyencounteredsimultaneously,yetlittleresearchhasexploredthevisualprocessingofthefullperson.Specifically,itisunknownwhetherthefaceandbodyareperceivedasdistinctcomponentsorasanintegrated,gestalt-likeunit.Toexaminethisquestion,weinvestigatedwhetheremotionalface–bodycompositesareprocessedinaholistic-likemannerbyusingavariantofthecompositefacetask,ameasureofholisticprocessing.Participantsjudgedfacialexpressionscombinedwithemotionallycongruentorincongruentbodiesthathavebeenshowntoinfluencetherecognitionofemotionfromtheface.Critically,thefaceswereeitheralignedwiththebodyinanaturalpositionormisalignedinamannerthatbreakstheecologicalpersonform.Convergingdatafrom3experimentsconfirmthatbreakingthepersonformreducesthefacilitatinginfluenceofcongruentbodycontextaswellastheimpedinginfluenceofincongruentbodycontextontherecognitionofemotionfromtheface.Theseresultsshowthatfacesandbodiesareprocessedasasingleunitandsupportthenotionofacompositepersoneffectanalogoustotheclassiceffectdescribedforfaces.Keywords:emotionperception,contexteffects,facialandbodyexpressions,holisticperception,com-positeeffectAglanceisusuallysufficientforextractingagreatdealofsocialinformationfromotherpeople(Adolphs,2002).Perceptualcuestocharacteristicssuchasgender,sexualorientation,emotionalex-pression,attractiveness,andpersonalitytraitscanbefoundinboththefaceandthebody(e.g.,facecues,Adolphs,2003;Calder&Young,2005;Ekman,1993;Elfenbein&Ambady,2002;Haxby,Hoffman,&Gobbini,2000;Rule,Ambady,&Hallett,2009;Thornhill&Gangestad,1999;Todorov&Duchaine,2008;Todo-rov,Pakrashi,&Oosterhof,2009;Willis&Todorov,2006;Ze-browitz,Hall,Murphy,&Rhodes,2002;Zebrowitz&Montepare,2008;bodycues,deGelderetal.,2006;Johnson,Gill,Reichman,&Tassinary,2007;Peelen&Downing,2005;Stevenage,Nixon,&Vince,1999;Wallbott,1998).Todate,mostresearchershaveinvestigatedthefaceandthebodyasdiscreteperceptualunits,focusingontheprocessingofeachsourceinisolation.Althoughthisapproachhasprovedex-tremelyfruitfulforcharacterizingtheuniqueperceptualcontribu-tionsofthefaceandbody,surprisinglylittleisknownabouttheprocessingofbothsourcescombined.Theaimofthecurrentstudywastoshedlightontheperceptualprocessingofthefullpersonbyexaminingwhetherthefaceandbodyinconjunctionareprocessedasaholistic“personunit.”Onthebasisofpreviousaccounts,onemaypredictthatfacesandbodiesareprocessedastwovisualcomponentsofsocialinformation(Wallbott,1998).Theseviewsarguethatfacesandbodiesmaydifferinvalue,intensity,andclarity,andconsequentlytheinformationfromeachmustbeweightedandcombinedbythecognitivesysteminordertoreachaconclusionaboutthetarget(Ekman,Friesen,&Ellsworth,1982;Ellison&Massaro,1997;Trope,1986;Wallbott,1998).Accordingtothisapproach,thefaceandbodymayinfluenceeachother.However,theinfluenceisnotsynergistic,andtheperceptionofthefaceandbodyisequaltotheweightedsumoftheirparts(Wallbott,1998).Bycontrast,thehypothesisofferedhereisthatthefaceandbodyaresubcomponentsofalargerperceptualpersonunit.Fromanecologicalperspectivethisseemslikelybecauseundernaturalconditions,thevisualsystemrarelyencountersisolatedfacesandbodies(McArthur&Baron,1983;Russell,1997).Accordingtothisview,thefaceandbodyformaunitaryperceptthatmayencompassdifferentpropertiesthanthetwosourcesofinformationseparately.Inotherwords,theinformationreadoutfromthefullpersonmaybemorethanthesumofthefaceandbodyalone.HolisticProcessingandtheCompositeEffectPastresearchonsocialperceptionexaminingunitizedgestaltprocessinghasfocusedprimarilyontheface.Indeed,ahallmarkoffaceperceptionisholisticprocessingbywhichindividualfacialcomponentsbecomeintegratedintoawhole-faceunit(Farah,Wilson,Drain,&Tanaka,1995;Tanaka&Farah,1993).Althoughisolatedfacialcomponentsdobearspecificinformation(Smith,Cottrell,Gosselin,&Schyns,2005;Whalenetal.,2004),theirarrangementinthenaturalfaceconfigurationresultsinaninte-ThisarticlewaspublishedOnlineFirstFebruary20,2012.HillelAviezer,DepartmentofPsychology,PrincetonUniversity,andDepartmentofPsychology,NewYorkUniversity;YaacovTrope,Depart-mentofPsychology,NewYorkUniversity;AlexanderTodorov,Depart-mentofPsychology,PrincetonUniversity.CorrespondenceconcerningthisarticleshouldbeaddressedtoHillelAviezer,DepartmentofPsychology,PrincetonUniversity,Princeton,NJ08540-1010.E-mail:haviezer@princeton.eduJournalofPersonalityandSocialPsychology©2012AmericanPsychologicalAssociation2012,Vol.103,No.1,20–370022-3514/12/$12.00DOI:10.1037/a002741120 </td></tr><tr><td>faca1c97ac2df9d972c0766a296efcf101aaf969</td><td>Sympathy for the Details: Dense Trajectories and Hybrid
+<br/>Classification Architectures for Action Recognition
+<br/><b>Computer Vision Group, Xerox Research Center Europe, Meylan, France</b><br/>2Centre de Visi´o per Computador, Universitat Aut`onoma de Barcelona, Bellaterra, Spain
+<br/>3German Aerospace Center, Wessling, Germany
+</td><td>('1799820', 'Adrien Gaidon', 'adrien gaidon')<br/>('2286630', 'Eleonora Vig', 'eleonora vig')</td><td>{cesar.desouza, adrien.gaidon}@xrce.xerox.com,
+<br/>eleonora.vig@dlr.de, antonio@cvc.uab.es
+</td></tr><tr><td>fab60b3db164327be8588bce6ce5e45d5b882db6</td><td>Maximum A Posteriori Estimation of Distances
+<br/>Between Deep Features in Still-to-Video Face
+<br/>Recognition
+<br/><b>National Research University Higher School of Economics</b><br/>Laboratory of Algorithms and Technologies for Network Analysis,
+<br/>36 Rodionova St., Nizhny Novgorod, Russia
+<br/><b>National Research University Higher School of Economics</b><br/>20 Myasnitskaya St., Moscow, Russia
+<br/>September 2, 2018
+</td><td>('35153729', 'Andrey V. Savchenko', 'andrey v. savchenko')<br/>('2080292', 'Natalya S. Belova', 'natalya s. belova')</td><td>avsavchenko@hse.ru
+<br/>nbelova@hse.ru
+</td></tr><tr><td>fad895771260048f58d12158a4d4d6d0623f4158</td><td>Audio-Visual Emotion
+<br/>Recognition For Natural
+<br/>Human-Robot Interaction
+<br/>Dissertation zur Erlangung des akademischen Grades
+<br/>Doktor der Ingenieurwissenschaften (Dr.-Ing.)
+<br/>vorgelegt von
+<br/>an der Technischen Fakultät der Universität Bielefeld
+<br/>15. März 2010
+</td><td>('32382494', 'Ahmad Rabie', 'ahmad rabie')</td><td></td></tr><tr><td>fae83b145e5eeda8327de9f19df286edfaf5e60c</td><td>Readings in Technology and Education: Proceedings of ICICTE 2010
+<br/>367
+<br/>TOWARDS AN INTERACTIVE E-LEARNING SYSTEM BASED ON
+<br/>EMOTIONS AND AFFECTIVE COGNITION
+<br/>Department of Informatics
+<br/>Department of Audiovisual Arts
+<br/>Department of Informatics
+<br/>Konstantinos Ch. Drossos
+<br/>Department of Audiovisual Arts
+<br/><b>Ionian University</b><br/>Greece
+</td><td>('25189167', 'Panagiotis Vlamos', 'panagiotis vlamos')<br/>('2284118', 'Andreas Floros', 'andreas floros')<br/>('1761403', 'Michail N. Giannakos', 'michail n. giannakos')</td><td></td></tr><tr><td>ffea8775fc9c32f573d1251e177cd283b4fe09c9</td><td>Accepted to be Published in Proceedings of the IEEE International Conference on Multimedia and Expo (ICME) 2018, San Diego, USA
+<br/>TRANSFORMATION ON COMPUTER–GENERATED FACIAL IMAGE TO AVOID DETECTION
+<br/>BY SPOOFING DETECTOR
+<br/><b>Graduate University for Advanced Studies, Kanagawa, Japan</b><br/><b>National Institute of Informatics, Tokyo, Japan</b><br/><b>The University of Edinburgh, Edinburgh, UK</b></td><td>('47321045', 'Huy H. Nguyen', 'huy h. nguyen')<br/>('9328269', 'Ngoc-Dung T. Tieu', 'ngoc-dung t. tieu')<br/>('2912817', 'Hoang-Quoc Nguyen-Son', 'hoang-quoc nguyen-son')<br/>('1716857', 'Junichi Yamagishi', 'junichi yamagishi')<br/>('1678602', 'Isao Echizen', 'isao echizen')</td><td>{nhhuy, dungtieu, nshquoc, jyamagishi, iechizen}@nii.ac.jp
+</td></tr><tr><td>ff8315c1a0587563510195356c9153729b533c5b</td><td>432
+<br/>Zapping Index:Using Smile to Measure
+<br/>Advertisement Zapping Likelihood
+</td><td>('1803478', 'Songfan Yang', 'songfan yang')<br/>('1784929', 'Mehran Kafai', 'mehran kafai')<br/>('39776603', 'Le An', 'le an')<br/>('1707159', 'Bir Bhanu', 'bir bhanu')</td><td></td></tr><tr><td>ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a</td><td>Towards Video Captioning with Naming: a
+<br/>Novel Dataset and a Multi-Modal Approach
+<br/>Dipartimento di Ingegneria “Enzo Ferrari”
+<br/>Universit`a degli Studi di Modena e Reggio Emilia
+</td><td>('2035969', 'Stefano Pini', 'stefano pini')<br/>('3468983', 'Marcella Cornia', 'marcella cornia')<br/>('1843795', 'Lorenzo Baraldi', 'lorenzo baraldi')<br/>('1741922', 'Rita Cucchiara', 'rita cucchiara')</td><td>{name.surname}@unimore.it
+</td></tr><tr><td>fffefc1fb840da63e17428fd5de6e79feb726894</td><td>Fine-Grained Age Estimation in the wild with
+<br/>Attention LSTM Networks
+</td><td>('47969038', 'Ke Zhang', 'ke zhang')<br/>('49229283', 'Na Liu', 'na liu')<br/>('3451660', 'Xingfang Yuan', 'xingfang yuan')<br/>('46910049', 'Xinyao Guo', 'xinyao guo')<br/>('35038034', 'Ce Gao', 'ce gao')<br/>('2626320', 'Zhenbing Zhao', 'zhenbing zhao')</td><td></td></tr><tr><td>ff398e7b6584d9a692e70c2170b4eecaddd78357</td><td></td><td></td><td></td></tr><tr><td>ffc5a9610df0341369aa75c0331ef021de0a02a9</td><td>Transferred Dimensionality Reduction
+<br/>State Key Laboratory on Intelligent Technology and Systems
+<br/>Tsinghua National Laboratory for Information Science and Technology (TNList)
+<br/><b>Tsinghua University, Beijing 100084, China</b></td><td>('39747687', 'Zheng Wang', 'zheng wang')<br/>('1809614', 'Yangqiu Song', 'yangqiu song')<br/>('1700883', 'Changshui Zhang', 'changshui zhang')</td><td></td></tr><tr><td>ffd81d784549ee51a9b0b7b8aaf20d5581031b74</td><td>Performance Analysis of Retina and DoG
+<br/>Filtering Applied to Face Images for Training
+<br/>Correlation Filters
+<br/>Everardo Santiago Ram(cid:19)(cid:16)rez1, Jos(cid:19)e (cid:19)Angel Gonz(cid:19)alez Fraga1, Omar (cid:19)Alvarez
+<br/>1 Facultad de Ciencias, Universidad Aut(cid:19)onoma de Baja California,
+<br/>Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia Playitas,
+<br/>Ensenada, Baja California, C.P. 22860
+<br/>{everardo.santiagoramirez,angel_fraga,
+<br/>2 Facultad de Ingenier(cid:19)(cid:16)a, Arquitectura y Dise~no, Universidad Aut(cid:19)onoma de Baja
+<br/>California, Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia
+<br/>Playitas, Ensenada, Baja California, C.P. 22860
+</td><td>('2973536', 'Sergio Omar Infante Prieto', 'sergio omar infante prieto')</td><td>aomar,everardo.gutierrez}@uabc.edu.mx
+<br/>sinfante@uabc.edu.mx
+</td></tr><tr><td>ff01bc3f49130d436fca24b987b7e3beedfa404d</td><td>Article
+<br/>Fuzzy System-Based Face Detection Robust to
+<br/>In-Plane Rotation Based on Symmetrical
+<br/>Characteristics of a Face
+<br/><b>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu</b><br/>Academic Editor: Angel Garrido
+<br/>Received: 15 June 2016; Accepted: 29 July 2016; Published: 3 August 2016
+</td><td>('1922686', 'Hyung Gil Hong', 'hyung gil hong')<br/>('2026806', 'Won Oh Lee', 'won oh lee')<br/>('3021526', 'Yeong Gon Kim', 'yeong gon kim')<br/>('4634733', 'Kang Ryoung Park', 'kang ryoung park')</td><td>Seoul 100-715, Korea; hell@dongguk.edu (H.G.H.); 215p8@hanmail.net (W.O.L.); csokyg@dongguk.edu (Y.G.K.);
+<br/>yawara18@hotmail.com (K.W.K.); nguyentiendat@dongguk.edu (D.T.N.)
+<br/>* Correspondence: parkgr@dongguk.edu; Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+</td></tr><tr><td>ff061f7e46a6213d15ac2eb2c49d9d3003612e49</td><td>Morphable Human Face Modelling
+<br/>by
+<br/>Thesis
+<br/>for fulfillment of the Requirements for the Degree of
+<br/>Doctor of Philosophy (0190)
+<br/>Clayton School of Information Technology
+<br/><b>Monash University</b><br/>February, 2008
+</td><td>('1695402', 'Nathan Faggian', 'nathan faggian')<br/>('1695402', 'Nathan Faggian', 'nathan faggian')<br/>('1728337', 'Andrew Paplinski', 'andrew paplinski')<br/>('2696169', 'Jamie Sherrah', 'jamie sherrah')</td><td></td></tr><tr><td>ff1f45bdad41d8b35435098041e009627e60d208</td><td>NAGRANI, ZISSERMAN: FROM BENEDICT CUMBERBATCH TO SHERLOCK HOLMES
+<br/>From Benedict Cumberbatch to Sherlock
+<br/>Holmes: Character Identification in TV
+<br/>series without a Script
+<br/>Visual Geometry Group,
+<br/>Department of Engineering Science,
+<br/><b>University of Oxford, UK</b></td><td>('19263506', 'Arsha Nagrani', 'arsha nagrani')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>arsha@robots.ox.ac.uk/
+<br/>az@robots.ox.ac.uk/
+</td></tr><tr><td>ff60d4601adabe04214c67e12253ea3359f4e082</td><td></td><td></td><td></td></tr><tr><td>ffe4bb47ec15f768e1744bdf530d5796ba56cfc1</td><td>AFIF4: Deep Gender Classification based on
+<br/>AdaBoost-based Fusion of Isolated Facial Features and
+<br/>Foggy Faces
+<br/>aDepartment of Electrical Engineering and Computer Science, Lassonde School of
+<br/><b>Engineering, York University, Canada</b><br/><b>bFaculty of Computers and Information, Assiut University, Egypt</b></td><td>('40239027', 'Abdelrahman Abdelhamed', 'abdelrahman abdelhamed')</td><td></td></tr><tr><td>ffc9d6a5f353e5aec3116a10cf685294979c63d9</td><td>Eigenphase-based face recognition: a comparison of phase-
+<br/>information extraction methods
+<br/>Faculty of Electrical Engineering and Computing,
+<br/><b>University of Zagreb, Unska 3, 10 000 Zagreb</b></td><td>('35675021', 'Slobodan Ribarić', 'slobodan ribarić')<br/>('3069572', 'Marijo Maračić', 'marijo maračić')</td><td>E-mail: slobodan.ribaric@fer.hr
+</td></tr><tr><td>ff8ef43168b9c8dd467208a0b1b02e223b731254</td><td>BreakingNews: Article Annotation by
+<br/>Image and Text Processing
+</td><td>('1780343', 'Arnau Ramisa', 'arnau ramisa')<br/>('47242882', 'Fei Yan', 'fei yan')<br/>('1994318', 'Francesc Moreno-Noguer', 'francesc moreno-noguer')<br/>('1712041', 'Krystian Mikolajczyk', 'krystian mikolajczyk')</td><td></td></tr><tr><td>ff9195f99a1a28ced431362f5363c9a5da47a37b</td><td>Journal of Vision (2016) 16(15):28, 1–8
+<br/>Serial dependence in the perception of attractiveness
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/>David Whitney
+<br/><b>University of California</b><br/>Berkeley, CA, USA
+<br/><b>Helen Wills Neuroscience Institute, University of</b><br/>California, Berkeley, CA, USA
+<br/><b>Vision Science Group, University of California</b><br/>Berkeley, CA, USA
+<br/>The perception of attractiveness is essential for choices
+<br/>of food, object, and mate preference. Like perception of
+<br/>other visual features, perception of attractiveness is
+<br/>stable despite constant changes of image properties due
+<br/>to factors like occlusion, visual noise, and eye
+<br/>movements. Recent results demonstrate that perception
+<br/>of low-level stimulus features and even more complex
+<br/>attributes like human identity are biased towards recent
+<br/>percepts. This effect is often called serial dependence.
+<br/>Some recent studies have suggested that serial
+<br/>dependence also exists for perceived facial
+<br/>attractiveness, though there is also concern that the
+<br/>reported effects are due to response bias. Here we used
+<br/>an attractiveness-rating task to test the existence of
+<br/>serial dependence in perceived facial attractiveness. Our
+<br/>results demonstrate that perceived face attractiveness
+<br/>was pulled by the attractiveness level of facial images
+<br/>encountered up to 6 s prior. This effect was not due to
+<br/>response bias and did not rely on the previous motor
+<br/>response. This perceptual pull increased as the difference
+<br/>in attractiveness between previous and current stimuli
+<br/>increased. Our results reconcile previously conflicting
+<br/>findings and extend previous work, demonstrating that
+<br/>sequential dependence in perception operates across
+<br/>different levels of visual analysis, even at the highest
+<br/>levels of perceptual interpretation.
+<br/>Introduction
+<br/>Humans make aesthetic judgments all the time about
+<br/>the attractiveness or desirability of objects and scenes.
+<br/>Aesthetic judgments are not merely about judging
+<br/>works of art; they are constantly involved in our daily
+<br/>activity, influencing or determining our choices of food,
+<br/>object (Creusen & Schoormans, 2005), and mate
+<br/>preference (Rhodes, Simmons, & Peters, 2005).
+<br/>Aesthetic judgments are based on perceptual pro-
+<br/>cessing (Arnheim, 1954; Livingstone & Hubel, 2002;
+<br/>Solso, 1996). These judgments, like other perceptual
+<br/>experiences, are thought to be relatively stable in spite
+<br/>of fluctuations in the raw visual input we receive due to
+<br/>factors like occlusion, visual noise, and eye movements.
+<br/>One mechanism that allows the visual system to achieve
+<br/>this stability is serial dependence. Recent results have
+<br/>revealed that the perception of visual features such as
+<br/>orientation (Fischer & Whitney, 2014), numerosity
+<br/>(Cicchini, Anobile, & Burr, 2014), and facial identity
+<br/>(Liberman, Fischer, & Whitney, 2014) are systemati-
+<br/>cally assimilated toward visual input from the recent
+<br/>past. This perceptual pull has been distinguished from
+<br/>hysteresis in motor responses or decision processes, and
+<br/>has been shown to be tuned by the magnitude of the
+<br/>difference between previous and current visual inputs
+<br/>(Fischer & Whitney, 2014; Liberman, Fischer, &
+<br/>Whitney, 2014).
+<br/>Is aesthetics perception similarly stable like feature
+<br/>perception? Some previous studies have suggested that
+<br/>the answer is yes. It has been shown that there is a
+<br/>positive correlation between observers’ successive
+<br/>attractiveness ratings of facial images (Kondo, Taka-
+<br/>hashi, & Watanabe, 2012; Taubert, Van der Burg, &
+<br/>Alais, 2016). This suggests that there is an assimilative
+<br/>sequential dependence in attractiveness judgments.
+<br/>Citation: Xia, Y., Leib, A. Y., & Whitney, D. (2016). Serial dependence in the perception of attractiveness. Journal of Vision,
+<br/>16(15):28, 1–8, doi:10.1167/16.15.28.
+<br/>doi: 10 .116 7 /1 6. 15 . 28
+<br/>Received July 13, 2016; published December 22, 2016
+<br/>ISSN 1534-7362
+<br/>This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
+</td><td>('27678837', 'Ye Xia', 'ye xia')<br/>('6931574', 'Allison Yamanashi Leib', 'allison yamanashi leib')</td><td></td></tr><tr><td>ffaad0204f4af763e3390a2f6053c0e9875376be</td><td>Article
+<br/>Non-Convex Sparse and Low-Rank Based Robust
+<br/>Subspace Segmentation for Data Mining
+<br/><b>School of Information Science and Technology, Donghua University, Shanghai 200051, China</b><br/><b>City University of Hong Kong, Kowloon 999077, Hong Kong, China</b><br/><b>School of Mathematics and Computer Science, Northeastern State University, Tahlequah, OK 74464, USA</b><br/>Received: 16 June 2017; Accepted: 10 July 2017; Published: 15 July 2017
+</td><td>('1743434', 'Wenlong Cheng', 'wenlong cheng')<br/>('2482149', 'Mingbo Zhao', 'mingbo zhao')<br/>('1691742', 'Naixue Xiong', 'naixue xiong')<br/>('1977592', 'Kwok Tai Chui', 'kwok tai chui')</td><td>cheng.python@gmail.com
+<br/>ktchui3-c@my.cityu.edu.hk
+<br/>xiongnaixue@gmail.com
+<br/>* Correspondence: mbzhao4@gmail.com; Tel.: +86-131-0684-8616
+</td></tr><tr><td>ffcbedb92e76fbab083bb2c57d846a2a96b5ae30</td><td></td><td></td><td></td></tr><tr><td>ff7bc7a6d493e01ec8fa2b889bcaf6349101676e</td><td>Facial expression recognition with spatiotemporal local
+<br/>descriptors
+<br/>Machine Vision Group, Infotech Oulu and Department of Electrical and
+<br/><b>Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland</b></td><td>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('1714724', 'Matti Pietikäinen', 'matti pietikäinen')</td><td>{gyzhao, mkp}@ee.oulu.fi
+</td></tr><tr><td>fffa2943808509fdbd2fc817cc5366752e57664a</td><td>Combined Ordered and Improved Trajectories for Large Scale Human Action
+<br/>Recognition
+<br/>1Vision & Sensing, HCC Lab,
+<br/><b>ESTeM, University of Canberra</b><br/>2IHCC, RSCS, CECS,
+<br/><b>Australian National University</b></td><td>('1793720', 'O. V. Ramana Murthy', 'o. v. ramana murthy')<br/>('1717204', 'Roland Goecke', 'roland goecke')</td><td>O.V.RamanaMurthy@ieee.org
+<br/>roland.goecke@ieee.org
+</td></tr><tr><td>ff46c41e9ea139d499dd349e78d7cc8be19f936c</td><td>International Journal of Modern Engineering Research (IJMER)
+<br/>www.ijmer.com Vol.3, Issue.3, May-June. 2013 pp-1339-1342 ISSN: 2249-6645
+<br/>A Novel Method for Movie Character Identification and its
+<br/>Facial Expression Recognition
+<br/><b>M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli</b><br/><b>Sri Sunflower College of Engineering and Technology, Lankapalli</b></td><td>('6339174', 'N. Praveen', 'n. praveen')</td><td></td></tr><tr><td>ff5dd6f96e108d8233220cc262bc282229c1a582</td><td>Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+<br/>Vol. 2, Issue 6, November- December 2012, pp.708-715
+<br/>Robust Facial Marks Detection Method Using AAM And SURF
+<br/><b>B.S. Abdur Rahman University, Chennai-48, India</b><br/><b>B.S. Abdur Rahman University, Chennai-48, India</b><br/>
+</td><td>('9401261', 'Ziaul Haque Choudhury', 'ziaul haque choudhury')<br/>('9401261', 'Ziaul Haque Choudhury', 'ziaul haque choudhury')</td><td></td></tr><tr><td>c5468665d98ce7349d38afb620adbf51757ab86f</td><td>Pose-Encoded Spherical Harmonics for Robust Face
+<br/>Recognition Using a Single Image
+<br/><b>Center for Automation Research, University of Maryland, College Park, MD 20742, USA</b><br/>2 Vision Technologies Lab, Sarnoff Corporation, Princeton, NJ 08873, USA
+</td><td>('39265975', 'Zhanfeng Yue', 'zhanfeng yue')<br/>('38480590', 'Wenyi Zhao', 'wenyi zhao')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>c588c89a72f89eed29d42f34bfa5d4cffa530732</td><td>Attributes2Classname: A discriminative model for attribute-based
+<br/>unsupervised zero-shot learning
+<br/><b>HAVELSAN Inc., 2Bilkent University, 3Hacettepe University</b></td><td>('9424554', 'Berkan Demirel', 'berkan demirel')<br/>('1939006', 'Ramazan Gokberk Cinbis', 'ramazan gokberk cinbis')<br/>('2011587', 'Nazli Ikizler-Cinbis', 'nazli ikizler-cinbis')</td><td>bdemirel@havelsan.com.tr, gcinbis@cs.bilkent.edu.tr, nazli@cs.hacettepe.edu.tr
+</td></tr><tr><td>c5d13e42071813a0a9dd809d54268712eba7883f</td><td>Face Recognition Robust to Head Pose Changes Based on the RGB-D Sensor
+<br/><b>West Virginia University, Morgantown, WV</b></td><td>('2997432', 'Cesare Ciaccio', 'cesare ciaccio')<br/>('2671284', 'Lingyun Wen', 'lingyun wen')<br/>('1822413', 'Guodong Guo', 'guodong guo')</td><td>cciaccio@mix.wvu.edu, lwen@mix.wvu.edu, guodong.guo@mail.wvu.edu
+</td></tr><tr><td>c50d73557be96907f88b59cfbd1ab1b2fd696d41</td><td>JournalofElectronicImaging13(3),474–485(July2004).
+<br/>Semiconductor sidewall shape estimation
+<br/>Oak Ridge National Laboratory
+<br/>Oak Ridge, Tennessee 37831-6010
+</td><td>('3078522', 'Philip R. Bingham', 'philip r. bingham')<br/>('3211433', 'Jeffery R. Price', 'jeffery r. price')<br/>('2019731', 'Kenneth W. Tobin', 'kenneth w. tobin')<br/>('1970334', 'Thomas P. Karnowski', 'thomas p. karnowski')</td><td>E-mail: binghampr@ornl.gov
+</td></tr><tr><td>c54f9f33382f9f656ec0e97d3004df614ec56434</td><td></td><td></td><td></td></tr><tr><td>c574c72b5ef1759b7fd41cf19a9dcd67e5473739</td><td>Zlatintsi et al. EURASIP Journal on Image and Video Processing (2017) 2017:54
+<br/>DOI 10.1186/s13640-017-0194-1
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>COGNIMUSE: a multimodal video
+<br/>database annotated with saliency, events,
+<br/>semantics and emotion with application to
+<br/>summarization
+</td><td>('2641229', 'Athanasia Zlatintsi', 'athanasia zlatintsi')<br/>('27687205', 'Niki Efthymiou', 'niki efthymiou')<br/>('2861393', 'Katerina Pastra', 'katerina pastra')<br/>('1791187', 'Alexandros Potamianos', 'alexandros potamianos')<br/>('1750686', 'Petros Maragos', 'petros maragos')<br/>('2539459', 'Petros Koutras', 'petros koutras')<br/>('1710606', 'Georgios Evangelopoulos', 'georgios evangelopoulos')</td><td></td></tr><tr><td>c5a561c662fc2b195ff80d2655cc5a13a44ffd2d</td><td>Using Language to Learn Structured Appearance
+<br/>Models for Image Annotation
+</td><td>('37894231', 'Michael Jamieson', 'michael jamieson')<br/>('1775745', 'Afsaneh Fazly', 'afsaneh fazly')<br/>('1792908', 'Suzanne Stevenson', 'suzanne stevenson')<br/>('1724954', 'Sven Wachsmuth', 'sven wachsmuth')</td><td></td></tr><tr><td>c5fe40875358a286594b77fa23285fcfb7bda68e</td><td></td><td></td><td></td></tr><tr><td>c5c379a807e02cab2e57de45699ababe8d13fb6d</td><td> Facial Expression Recognition Using Sparse Representation
+<br/>1School of Physics and Electronic Engineering
+<br/><b>Taizhou University</b><br/>Taizhou 318000
+<br/>CHINA
+<br/> 2Department of Computer Science
+<br/><b>Taizhou University</b><br/>Taizhou 318000
+<br/>CHINA
+</td><td>('1695589', 'SHIQING ZHANG', 'shiqing zhang')<br/>('1730594', 'XIAOMING ZHAO', 'xiaoming zhao')<br/>('38909691', 'BICHENG LEI', 'bicheng lei')</td><td>tzczsq@163.com, leibicheng@163.com
+<br/>tzxyzxm@163.com
+</td></tr><tr><td>c5ea084531212284ce3f1ca86a6209f0001de9d1</td><td>Audio-Visual Speech Processing for
+<br/>Multimedia Localisation
+<br/>by
+<br/>Matthew Aaron Benatan
+<br/>Submitted in accordance with the requirements
+<br/>for the degree of Doctor of Philosophy
+<br/><b>The University of Leeds</b><br/>School of Computing
+<br/>September 2016
+</td><td></td><td></td></tr><tr><td>c5935b92bd23fd25cae20222c7c2abc9f4caa770</td><td>Spatiotemporal Multiplier Networks for Video Action Recognition
+<br/><b>Graz University of Technology</b><br/><b>Graz University of Technology</b><br/><b>York University, Toronto</b></td><td>('2322150', 'Christoph Feichtenhofer', 'christoph feichtenhofer')<br/>('1718587', 'Axel Pinz', 'axel pinz')<br/>('1709096', 'Richard P. Wildes', 'richard p. wildes')</td><td>feichtenhofer@tugraz.at
+<br/>axel.pinz@tugraz.at
+<br/>wildes@cse.yorku.ca
+</td></tr><tr><td>c5421a18583f629b49ca20577022f201692c4f5d</td><td>Facial Age Classification using Subpattern-based
+<br/>Approaches
+<br/><b>Eastern Mediterranean University, Gazima usa, Northern Cyprus</b><br/>Mersin 10, Turkey
+<br/>
+<br/>are
+<br/>(mPCA)
+<br/>examined
+</td><td>('3437942', 'Fatemeh Mirzaei', 'fatemeh mirzaei')<br/>('2907423', 'Önsen Toygar', 'önsen toygar')</td><td>{fatemeh.mirzaei, onsen.toygar}@emu.edu.tr
+</td></tr><tr><td>c5be0feacec2860982fbbb4404cf98c654142489</td><td>Semi-Qualitative Probabilistic Networks in Computer
+<br/>Vision Problems
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Received: ***
+<br/>Revised: ***
+</td><td>('1680860', 'Cassio P. de Campos', 'cassio p. de campos')<br/>('1684635', 'Lei Zhang', 'lei zhang')<br/>('1686235', 'Yan Tong', 'yan tong')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>Email: decamc@rpi.edu
+<br/>Email: zhangl2@rpi.edu
+<br/>Email: tongy2@rpi.edu
+<br/>Email: jiq@rpi.edu
+</td></tr><tr><td>c5844de3fdf5e0069d08e235514863c8ef900eb7</td><td>Lam S K et al. / (IJCSE) International Journal on Computer Science and Engineering
+<br/>Vol. 02, No. 08, 2010, 2659-2665
+<br/>A Study on Similarity Computations in Template
+<br/>Matching Technique for Identity Verification
+<br/>Lam, S. K., Yeong, C. Y., Yew, C. T., Chai, W. S., Suandi, S. A.
+<br/>Intelligent Biometric Group, School of Electrical and Electronic Engineering
+<br/>Engineering Campus, Universiti Sains Malaysia
+<br/>14300 Nibong Tebal, Pulau Pinang, MALAYSIA
+</td><td></td><td>Email: shahrel@eng.usm.my
+</td></tr><tr><td>c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1</td><td>Ultra-Resolving Face Images by Discriminative
+<br/>Generative Networks(cid:63)
+<br/><b>Australian National University</b></td><td>('4092561', 'Xin Yu', 'xin yu')</td><td>{xin.yu, fatih.porikli}@anu.edu.au
+</td></tr><tr><td>c590c6c171392e9f66aab1bce337470c43b48f39</td><td>Emotion Recognition by Machine Learning Algorithms using
+<br/>Psychophysiological Signals
+<br/>1, 2, 3 BT Convergence Technology Research Department, Electronics and Telecommunications
+<br/><b>Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea</b><br/><b>Chungnam National University</b></td><td>('2329242', 'Eun-Hye Jang', 'eun-hye jang')<br/>('1696731', 'Byoung-Jun Park', 'byoung-jun park')<br/>('2030031', 'Sang-Hyeob Kim', 'sang-hyeob kim')<br/>('2615387', 'Jin-Hun Sohn', 'jin-hun sohn')</td><td>cleta4u@etri.re.kr, bj_park@etri.re.kr, shk1028@etri.re.kr
+<br/>Gung-dong, Yuseong-gu, Daejeon, 305-765, Republic of Korea, jhsohn@cnu.ac.kr
+</td></tr><tr><td>c5f1ae9f46dc44624591db3d5e9f90a6a8391111</td><td>Application of non-negative and local non negative matrix factorization to facial
+<br/>expression recognition
+<br/>Dept. of Informatics
+<br/><b>Aristotle University of Thessaloniki</b><br/>GR-541 24, Thessaloniki, Box 451, Greece
+</td><td>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>{nelu,pitas}@zeus.csd.auth.gr
+</td></tr><tr><td>c53352a4239568cc915ad968aff51c49924a3072</td><td>Transfer Representation-Learning for Anomaly Detection
+<br/>Lewis D. Griffin†
+<br/><b>University College London, UK</b><br/><b>University College London, UK</b><br/>(cid:63)Rapiscan Systems Ltd, USA
+</td><td>('3451382', 'Thomas Tanay', 'thomas tanay')<br/>('13736095', 'Edward J. Morton', 'edward j. morton')</td><td>JERONE.ANDREWS@CS.UCL.AC.UK
+<br/>THOMAS.TANAY.13@UCL.AC.UK
+<br/>EMORTON@RAPISCANSYSTEMS.COM
+<br/>L.GRIFFIN@CS.UCL.AC.UK
+</td></tr><tr><td>c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4</td><td>Affective Computing: A Review
+<br/><b>National Laboratory of Pattern Recognition (NLPR), Institute of Automation</b><br/>Chinese Academy of Sciences, P.O.X. 2728, Beijing 100080
+</td><td>('37670752', 'Jianhua Tao', 'jianhua tao')<br/>('1688870', 'Tieniu Tan', 'tieniu tan')</td><td>{jhtao, tnt}@nlpr.ia.ac.cn
+</td></tr><tr><td>c2fa83e8a428c03c74148d91f60468089b80c328</td><td>Optimal Mean Robust Principal Component Analysis
+<br/><b>University of Texas at Arlington, Arlington, TX</b></td><td>('1688370', 'Feiping Nie', 'feiping nie')<br/>('40034801', 'Jianjun Yuan', 'jianjun yuan')<br/>('1748032', 'Heng Huang', 'heng huang')</td><td>FEIPINGNIE@GMAIL.COM
+<br/>WRIYJJ@GMAIL.COM
+<br/>HENG@UTA.EDU
+</td></tr><tr><td>c2c3ff1778ed9c33c6e613417832505d33513c55</td><td>Multimodal Biometric Person Authentication
+<br/>Using Fingerprint, Face Features
+<br/><b>University of Lac Hong 10 Huynh Van Nghe</b><br/>DongNai 71000, Viet Nam
+<br/><b>Ho Chi Minh City University of Science</b><br/>227 Nguyen Van Cu, HoChiMinh 70000, Viet Nam
+</td><td>('2009230', 'Tran Binh Long', 'tran binh long')<br/>('2710459', 'Le Hoang Thai', 'le hoang thai')<br/>('1971778', 'Tran Hanh', 'tran hanh')</td><td>tblong@lhu.edu.vn
+<br/>lhthai@fit.hcmus.edu.vn
+</td></tr><tr><td>c27f64eaf48e88758f650e38fa4e043c16580d26</td><td>Title of the proposed research project: Subspace analysis using Locality Preserving
+<br/>Projection and its applications for image recognition
+<br/>Research area: Data manifold learning for pattern recognition
+<br/>Contact Details:
+<br/><b>University: Dhirubhai Ambani Institute of Information and Communication Technology</b><br/>(DA-IICT), Gandhinagar.
+<br/>
+</td><td>('2050838', 'Gitam C Shikkenawis', 'gitam c shikkenawis')</td><td>Email Address: 201221004@daiict.ac.in
+</td></tr><tr><td>c23153aade9be0c941390909c5d1aad8924821db</td><td>Efficient and Accurate Tracking
+<br/>for Face Diarization via Periodical Detection
+<br/>∗Ecole Polytechnique Federal de Lausanne, Switzerland
+<br/><b>Idiap Research Institute, Martigny, Switzerland</b></td><td>('39560344', 'Nam Le', 'nam le')<br/>('30790014', 'Alexander Heili', 'alexander heili')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>Email: { nle, aheili, dwu, odobez }@idiap.ch
+</td></tr><tr><td>c207fd762728f3da4cddcfcf8bf19669809ab284</td><td>Face Alignment Using Boosting and Evolutionary
+<br/>Search
+<br/><b>College of Software Engineering, Southeast University, Nanjing 210096, China</b><br/><b>Lab of Science and Technology, Southeast University, Nanjing 210096, China</b><br/><b>Human Media Interaction, University of Twente, P.O. Box</b><br/>7500 AE Enschede, The Netherlands
+</td><td>('39063774', 'Hua Zhang', 'hua zhang')<br/>('2779570', 'Duanduan Liu', 'duanduan liu')<br/>('1688157', 'Mannes Poel', 'mannes poel')<br/>('1745198', 'Anton Nijholt', 'anton nijholt')</td><td>reynzhang@sina.com
+<br/>liuduanduan@seu.edu.cn
+<br/>{anijholt,mpoel}@cs.utwente.nl
+</td></tr><tr><td>c220f457ad0b28886f8b3ef41f012dd0236cd91a</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Crystal Loss and Quality Pooling for
+<br/>Unconstrained Face Verification and Recognition
+</td><td>('40497884', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('2068427', 'Ankan Bansal', 'ankan bansal')<br/>('2680836', 'Hongyu Xu', 'hongyu xu')<br/>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>c254b4c0f6d5a5a45680eb3742907ec93c3a222b</td><td>A Fusion-based Gender Recognition Method
+<br/>Using Facial Images
+</td><td>('24033665', 'Benyamin Ghojogh', 'benyamin ghojogh')<br/>('1779028', 'Saeed Bagheri Shouraki', 'saeed bagheri shouraki')<br/>('1782221', 'Hoda Mohammadzade', 'hoda mohammadzade')<br/>('22395643', 'Ensieh Iranmehr', 'ensieh iranmehr')</td><td></td></tr><tr><td>c2e03efd8c5217188ab685e73cc2e52c54835d1a</td><td>Deep Tree-structured Face: A Unified Representation for Multi-task Facial
+<br/>Biometrics
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>University of Tennessee, Knoxville</b></td><td>('1691576', 'Rui Guo', 'rui guo')<br/>('9120475', 'Liu Liu', 'liu liu')<br/>('40560485', 'Wei Wang', 'wei wang')<br/>('2885826', 'Ali Taalimi', 'ali taalimi')<br/>('1690083', 'Chi Zhang', 'chi zhang')<br/>('1698645', 'Hairong Qi', 'hairong qi')</td><td>{rguo1, lliu25, wwang34, ataalimi, czhang24, hqi} @utk.edu
+</td></tr><tr><td>c28461e266fe0f03c0f9a9525a266aa3050229f0</td><td>Automatic Detection of Facial Feature Points via
+<br/>HOGs and Geometric Prior Models
+<br/>1 Computer Vision Center , Universitat Aut`onoma de Barcelona
+<br/>2 Universitat Oberta de Catalunya
+<br/>3 Dept. de Matem`atica Aplicada i An`alisi
+<br/>Universitat de Barcelona
+</td><td>('1863902', 'David Masip', 'david masip')</td><td>mrojas@cvc.uab.es, dmasipr@uoc.edu, jordi.vitria@ub.edu
+</td></tr><tr><td>c29e33fbd078d9a8ab7adbc74b03d4f830714cd0</td><td></td><td></td><td></td></tr><tr><td>c2e6daebb95c9dfc741af67464c98f1039127627</td><td>5-1
+<br/>MVA2013 IAPR International Conference on Machine Vision Applications, May 20-23, 2013, Kyoto, JAPAN
+<br/>Efficient Measuring of Facial Action Unit Activation Intensities
+<br/>using Active Appearance Models
+<br/><b>Computer Vision Group, Friedrich Schiller University of Jena, Germany</b><br/><b>University Hospital Jena, Germany</b></td><td>('1708249', 'Daniel Haase', 'daniel haase')<br/>('8993584', 'Michael Kemmler', 'michael kemmler')<br/>('1814631', 'Orlando Guntinas-Lichius', 'orlando guntinas-lichius')<br/>('1728382', 'Joachim Denzler', 'joachim denzler')</td><td></td></tr><tr><td>f60a85bd35fa85739d712f4c93ea80d31aa7de07</td><td>VisDA: The Visual Domain Adaptation Challenge
+<br/><b>Boston University</b><br/><b>EECS, University of California Berkeley</b></td><td>('2960713', 'Xingchao Peng', 'xingchao peng')<br/>('39058756', 'Ben Usman', 'ben usman')<br/>('34836903', 'Neela Kaushik', 'neela kaushik')<br/>('50196944', 'Judy Hoffman', 'judy hoffman')<br/>('2774612', 'Dequan Wang', 'dequan wang')<br/>('2903226', 'Kate Saenko', 'kate saenko')</td><td>xpeng,usmn,nkaushik,saenko@bu.edu, jhoffman,dqwang@eecs.berkeley.edu
+</td></tr><tr><td>f6f06be05981689b94809130e251f9e4bf932660</td><td>An Approach to Illumination and Expression Invariant
+<br/>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 91 – No.15, April 2014
+<br/>Multiple Classifier Face Recognition
+<br/>Dalton Meitei Thounaojam
+<br/><b>National Institute of Technology</b><br/>Silchar
+<br/>Assam: 788010
+<br/>India
+<br/><b>National Institute of Technology</b><br/>Silchar
+<br/>Assam: 788010
+<br/>India
+<br/>Romesh Laishram
+<br/><b>Manipur Institute of Technology</b><br/>Imphal West: 795001
+<br/>India
+</td><td></td><td></td></tr><tr><td>f68ed499e9d41f9c3d16d843db75dc12833d988d</td><td></td><td></td><td></td></tr><tr><td>f6742010372210d06e531e7df7df9c01a185e241</td><td>Dimensional Affect and Expression in
+<br/>Natural and Mediated Interaction
+<br/><b>Ritsumeikan, University</b><br/>Kyoto, Japan
+<br/>October, 2007
+</td><td>('1709339', 'Michael J. Lyons', 'michael j. lyons')</td><td>lyons@im.ritsumei.ac.jp
+</td></tr><tr><td>f69de2b6770f0a8de6d3ec1a65cb7996b3c99317</td><td>Research Journal of Applied Sciences, Engineering and Technology 8(22): 2265-2271, 2014
+<br/>ISSN: 2040-7459; e-ISSN: 2040-7467
+<br/>© Maxwell Scientific Organization, 2014
+<br/>Submitted: September ‎13, ‎2014
+<br/>Accepted: ‎September ‎20, ‎2014
+<br/>Published: December 15, 2014
+<br/>Face Recognition System Based on Sparse Codeword Analysis
+<br/><b>St.Joseph s College of Engineering, Old Mamallapuram Road, Kamaraj Nagar, Semmencherry, Chennai</b><br/><b>Anna University, Chennai</b><br/>Tamil Nadu 600119, India
+</td><td>('2508896', 'P. Geetha', 'p. geetha')<br/>('40574934', 'Vasumathi Narayanan', 'vasumathi narayanan')</td><td></td></tr><tr><td>f6ca29516cce3fa346673a2aec550d8e671929a6</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
+<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+<br/>Algorithm for Face Matching Using Normalized
+<br/>Cross-Correlation
+<br/>
+</td><td>('2426695', 'C. Saravanan', 'c. saravanan')<br/>('14289238', 'M. Surender', 'm. surender')</td><td></td></tr><tr><td>f67a73c9dd1e05bfc51219e70536dbb49158f7bc</td><td>Journal of Computer Science 10 (11): 2292-2298, 2014
+<br/>ISSN: 1549-3636
+<br/>© 2014 Nithyashri and Kulanthaivel, This open access article is distributed under a Creative Commons Attribution
+<br/>(CC-BY) 3.0 license
+<br/>A GAUSSIAN MIXTURE MODEL FOR CLASSIFYING THE
+<br/>HUMAN AGE USING DWT AND SAMMON MAP
+<br/><b>Sathyabama University, Chennai, India</b><br/>2Department of Electronics Engineering, NITTTR, Chennai, India
+<br/>Received 2014-05-08; Revised 2014-05-23; Accepted 2014-11-28
+</td><td>('9513864', 'J. Nithyashri', 'j. nithyashri')<br/>('5014650', 'G. Kulanthaivel', 'g. kulanthaivel')</td><td></td></tr><tr><td>f6c70635241968a6d5fd5e03cde6907022091d64</td><td></td><td></td><td></td></tr><tr><td>f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a</td><td>Syn2Real: A New Benchmark for
+<br/>Synthetic-to-Real Visual Domain Adaptation
+<br/><b>Boston University1, University of Tokyo</b><br/><b>University of California Berkeley</b></td><td>('2960713', 'Xingchao Peng', 'xingchao peng')<br/>('39058756', 'Ben Usman', 'ben usman')<br/>('8915348', 'Kuniaki Saito', 'kuniaki saito')<br/>('34836903', 'Neela Kaushik', 'neela kaushik')<br/>('2903226', 'Kate Saenko', 'kate saenko')</td><td></td></tr><tr><td>f66f3d1e6e33cb9e9b3315d3374cd5f121144213</td><td>The Journal of Neuroscience, October 30, 2013 • 33(44):17435–17443 • 17435
+<br/>Behavioral/Cognitive
+<br/>Top-Down Control of Visual Responses to Fear by the
+<br/>Amygdala
+<br/>1Medical Research Council Cognition and Brain Sciences Unit, Cambridge CB2 7EF, United Kingdom, and 2Wellcome Centre for Imaging Neuroscience,
+<br/><b>University College London, London WC1N 3BG, United Kingdom</b><br/>The visual cortex is sensitive to emotional stimuli. This sensitivity is typically assumed to arise when amygdala modulates visual cortex
+<br/>via backwards connections. Using human fMRI, we compared dynamic causal connectivity models of sensitivity with fearful faces. This
+<br/>model comparison tested whether amygdala modulates distinct cortical areas, depending on dynamic or static face presentation. The
+<br/>ventral temporal fusiform face area showed sensitivity to fearful expressions in static faces. However, for dynamic faces, we found fear
+<br/>sensitivity in dorsal motion-sensitive areas within hMT⫹/V5 and superior temporal sulcus. The model with the greatest evidence
+<br/>included connections modulated by dynamic and static fear from amygdala to dorsal and ventral temporal areas, respectively. According
+<br/>to this functional architecture, amygdala could enhance encoding of fearful expression movements from video and the form of fearful
+<br/>expressions from static images. The amygdala may therefore optimize visual encoding of socially charged and salient information.
+<br/>Introduction
+<br/>Emotional images enhance responses in visual areas, an effect
+<br/>typically observed in the fusiform gyrus for static fearful faces and
+<br/>ascribed to backwards connections from amygdala (Morris et al.,
+<br/>1998; Vuilleumier and Pourtois, 2007). Although support for
+<br/>amygdala influence comes from structural connectivity (Amaral
+<br/>and Price, 1984; Catani et al., 2003), functional connectivity
+<br/>(Morris et al., 1998; Foley et al., 2012), and path analysis (Lim et
+<br/>al., 2009), directed connectivity measures and formal model
+<br/>comparison are still needed to show that backwards connections
+<br/>from amygdala are more likely than other architectures to gener-
+<br/>ate cortical emotion sensitivity.
+<br/>Moreover, it is surprising that the putative amygdala feedback
+<br/>would enhance fusiform cortex responses. According to the pre-
+<br/>vailing view, a face-selective area in fusiform cortex, the fusiform
+<br/>face area (FFA), is associated with processing facial identity,
+<br/>whereas dorsal temporal regions, particularly in the superior
+<br/>temporal sulcus (STS), are associated with processing facial ex-
+<br/>pression (Haxby et al., 2000). An alternative position is that fusi-
+<br/>form and STS areas both contribute to facial expression
+<br/>processing but contribute to encoding structural forms and dy-
+<br/>namic features, respectively (Calder and Young, 2005; Calder,
+<br/>2011). In this case, static fearful expressions may enhance FFA
+<br/>Received July 11, 2013; revised Sept. 7, 2013; accepted Sept. 12, 2013.
+<br/>Author contributions: N.F., R.N.H., K.J.F., and A.J.C. designed research; N.F. performed research; N.F. analyzed
+<br/>data; N.F., R.N.H., K.J.F., and A.J.C. wrote the paper.
+<br/>This work was supported by the United Kingdom Economic and Social Research Council Grant RES-062-23-2925
+<br/>to N.F. and the Medical Research Council Grant MC_US_A060_5PQ50 to A.J.C. and Grant MC_US_A060_0046 to
+<br/>R.N.H. We thank Christopher Fox for supplying the dynamic object stimuli and James Rowe and Francesca Carota for
+<br/>contributing useful comments.
+<br/>The authors declare no competing financial interests.
+<br/>DOI:10.1523/JNEUROSCI.2992-13.2013
+<br/>Copyright © 2013 the authors
+<br/>0270-6474/13/3317435-09$15.00/0
+<br/>encoding of structural cues associated with emotional expres-
+<br/>sion. We therefore characterized the conditions under which
+<br/>amygdala mediates fear sensitivity in fusiform cortex, compared
+<br/>with dorsal temporal areas (Sabatinelli et al., 2011).
+<br/>We asked whether dynamic and static fearful expressions en-
+<br/>hance responses in dorsal temporal and ventral fusiform areas, re-
+<br/>spectively. One dorsal temporal area, hMT⫹/V5, is sensitive to low
+<br/>level and facial motion and may be homologous to the middle tem-
+<br/>poral (MT), medial superior temporal (MST), and fundus of the
+<br/>super temporal (FST) areas in the macaque (Kolster et al., 2010).
+<br/>Another dorsal area, the posterior STS, is responsive generally to
+<br/>biological motion (Giese and Poggio, 2003). Compared with dorsal
+<br/>areas, the fusiform gyrus shows less sensitivity to facial motion
+<br/>(Schultz and Pilz, 2009; Trautmann et al., 2009; Pitcher et al., 2011;
+<br/>Foley et al., 2012; Schultz et al., 2012). Despite its association with
+<br/>facial identity processing, many studies have shown that FFA con-
+<br/>tributes to processing facial expressions (Ganel et al., 2005; Fox et al.,
+<br/>2009b; Cohen Kadosh et al., 2010; Harris et al., 2012) and may have
+<br/>a general role in processing facial form (O’Toole et al., 2002; Calder,
+<br/>2011). Sensitivity to static fearful expressions in the FFA may reflect
+<br/>this role in processing static form. If so, then dynamic fearful expres-
+<br/>sions may evoke fear sensitivity in dorsal temporal areas instead,
+<br/>reflecting the role of these areas to processing motion.
+<br/>Our fMRI results confirmed our hypothesis that dorsal
+<br/>motion-sensitive areas showed fear sensitivity for dynamic facial
+<br/>expressions, whereas the FFA showed fear sensitivity for static
+<br/>expressions. To explore connectivity mechanisms that mediate
+<br/>fear sensitivity, we used dynamic causal modeling (DCM) to ex-
+<br/>plore 508 plausible connectivity architectures. Our Bayesian
+<br/>model comparison identified the most likely model, which
+<br/>showed that dynamic and static fear modulated connections
+<br/>from amygdala to dorsal or ventral areas, respectively. Amygdala
+<br/>therefore may control how behaviorally relevant information is
+<br/>visually coded in a context-sensitive fashion.
+</td><td>('3162581', 'Nicholas Furl', 'nicholas furl')<br/>('3162581', 'Nicholas Furl', 'nicholas furl')</td><td>Unit, 15 Chaucer Road, Cambridge, CB2 7EF, United Kingdom. E-mail: nick.furl@mrc-cbu.cam.ac.uk.
+</td></tr><tr><td>f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca</td><td>Cross-label Suppression: A Discriminative and Fast
+<br/>Dictionary Learning with Group Regularization
+<br/>April 24, 2017
+</td><td>('9293691', 'Xiudong Wang', 'xiudong wang')<br/>('2080215', 'Yuantao Gu', 'yuantao gu')</td><td></td></tr><tr><td>f6abecc1f48f6ec6eede4143af33cc936f14d0d0</td><td></td><td></td><td></td></tr><tr><td>f61d5f2a082c65d5330f21b6f36312cc4fab8a3b</td><td>Multi-Level Variational Autoencoder:
+<br/>Learning Disentangled Representations from
+<br/>Grouped Observations
+<br/>OVAL Group
+<br/><b>University of Oxford</b><br/>Machine Intelligence and Perception Group
+<br/>Microsoft Research
+<br/>Cambridge, UK
+</td><td>('3365029', 'Diane Bouchacourt', 'diane bouchacourt')<br/>('2870603', 'Ryota Tomioka', 'ryota tomioka')<br/>('2388416', 'Sebastian Nowozin', 'sebastian nowozin')</td><td>diane@robots.ox.ac.uk
+<br/>{ryoto,Sebastian.Nowozin}@microsoft.com
+</td></tr><tr><td>f6fa97fbfa07691bc9ff28caf93d0998a767a5c1</td><td>k2-means for fast and accurate large scale clustering
+<br/>Computer Vision Lab
+<br/>D-ITET
+<br/>ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET
+<br/>ETH Zurich
+<br/>ESAT, KU Leuven
+<br/>D-ITET, ETH Zurich
+</td><td>('2794259', 'Eirikur Agustsson', 'eirikur agustsson')<br/>('1732855', 'Radu Timofte', 'radu timofte')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>aeirikur@vision.ee.ethz.ch
+<br/>timofter@vision.ee.ethz.ch
+<br/>vangool@vision.ee.ethz.ch
+</td></tr><tr><td>f6cf2108ec9d0f59124454d88045173aa328bd2e</td><td>Robust user identification based on facial action units
+<br/>unaffected by users’ emotions
+<br/><b>Aalen University, Germany</b></td><td>('3114281', 'Ricardo Buettner', 'ricardo buettner')</td><td>ricardo.buettner@hs-aalen.de
+</td></tr><tr><td>f68f20868a6c46c2150ca70f412dc4b53e6a03c2</td><td>157
+<br/>Differential Evolution to Optimize
+<br/>Hidden Markov Models Training:
+<br/>Application to Facial Expression
+<br/>Recognition
+<br/>Ars`ene Simbabawe
+<br/><b>MISC Laboratory, Constantine 2 University, Constantine, Algeria</b><br/>The base system in this paper uses Hidden Markov
+<br/>Models (HMMs) to model dynamic relationships among
+<br/>facial features in facial behavior interpretation and un-
+<br/>derstanding field. The input of HMMs is a new set
+<br/>of derived features from geometrical distances obtained
+<br/>from detected and automatically tracked facial points.
+<br/>Numerical data representation which is in the form of
+<br/>multi-time series is transformed to a symbolic repre-
+<br/>sentation in order to reduce dimensionality, extract the
+<br/>most pertinent information and give a meaningful repre-
+<br/>sentation to humans. The main problem of the use of
+<br/>HMMs is that the training is generally trapped in local
+<br/>minima, so we used the Differential Evolution (DE)
+<br/>algorithm to offer more diversity and so limit as much as
+<br/>possible the occurrence of stagnation. For this reason,
+<br/>this paper proposes to enhance HMM learning abilities
+<br/>by the use of DE as an optimization tool, instead of the
+<br/>classical Baum and Welch algorithm. Obtained results
+<br/>are compared against the traditional learning approach
+<br/>and significant improvements have been obtained.
+<br/>Keywords: facial expressions, occurrence order, Hidden
+<br/>Markov Model, Baum-Welch, optimization, differential
+<br/>evolution
+<br/>1. Introduction
+<br/>Analyzing the dynamics of facial features and
+<br/>(or) the changes in the appearance of facial fea-
+<br/>tures (eyes, eyebrows and mouth) is a very im-
+<br/>portant step in facial expression understanding
+<br/>and interpretation. Many researchers attempt to
+<br/>study the dynamic facial behavior. Timing, du-
+<br/>ration, speed and occurrence order of face/body
+<br/>actions are crucial parameters related to dy-
+<br/>namic behavior (Ekman, & Rosenberg, 2005).
+<br/>For instance, facial expression temporal dynam-
+<br/>ics are essential for recognition of either full ex-
+<br/>pressions (Kotsia & Pitas, 2007; Littlewort &
+<br/>al, 2006), or components of expressions such
+<br/>as facial Action Units (AUs) (Pantic & Patras,
+<br/>2006; Valstar & Pantic, 2007). They are essen-
+<br/>tial for categorization of complex psychologi-
+<br/>cal states like various types of pain and mood
+<br/>(Williams, 2002) and are highly important cues
+<br/>for distinguishing posed from spontaneous fa-
+<br/>cial expressions (Cohn & Schmidt, 2004; Val-
+<br/>star & al, 2006). Timing, duration and speed
+<br/>have been analyzed in several studies (Cohn &
+<br/>Schmidt, 2004; Valstar & al, 2006; Valstar & al
+<br/>2007). However, little attention has been given
+<br/>to occurrence order (Valstar & al, 2006; Valstar
+<br/>& al 2007).
+<br/>Several efforts have been recently reported on
+<br/>automatic analysis of facial expression data
+<br/>(Zeng & al, 2009; Sandbach & al, 2012; Gunes
+<br/>that most recent methods employ probabilistic
+<br/>(Hidden Markov Models, Dynamic Bayesian
+<br/>Network), statistical (Support Vector Machine),
+<br/>and ensemble learning techniques (Gentle-
+<br/>-Boost), which seem to be particularly suitable
+<br/>for automatic facial expression recognition from
+<br/>face image sequences. Because we want to ex-
+<br/>HMM (Koelstra & al, 2010; Cohen & al, 2003)
+<br/>and DBN (Tong & al, 2007; Tong & al, 2010)
+<br/>can be used.
+<br/>The presented work in this paper is a part of
+<br/>a project which aims to construct “An Optimal
+</td><td>('2654160', 'Khadoudja Ghanem', 'khadoudja ghanem')<br/>('1749675', 'Amer Draa', 'amer draa')<br/>('2483552', 'Elvis Vyumvuhore', 'elvis vyumvuhore')</td><td></td></tr><tr><td>f6e00d6430cbbaa64789d826d093f7f3e323b082</td><td>Visual Object Recognition
+<br/><b>University of Texas at Austin</b><br/><b>RWTH Aachen University</b><br/>SYNTHESIS LECTURES ON COMPUTER
+<br/>VISION # 1
+</td><td>('1794409', 'Kristen Grauman', 'kristen grauman')<br/>('1789756', 'Bastian Leibe', 'bastian leibe')</td><td></td></tr><tr><td>e9a5a38e7da3f0aa5d21499149536199f2e0e1f7</td><td>Article
+<br/>A Bayesian Scene-Prior-Based Deep Network Model
+<br/>for Face Verification
+<br/><b>North China University of Technology</b><br/><b>Curtin University, Perth, WA 6102, Australia</b><br/>† These authors contributed equally to this work.
+<br/>Received: 12 May 2018; Accepted: 8 June 2018 ; Published: 11 June 2018
+</td><td>('2104779', 'Huafeng Wang', 'huafeng wang')<br/>('2239474', 'Haixia Pan', 'haixia pan')<br/>('3229158', 'Wenfeng Song', 'wenfeng song')<br/>('1713220', 'Wanquan Liu', 'wanquan liu')<br/>('47311804', 'Ning Song', 'ning song')<br/>('2361868', 'Yuehai Wang', 'yuehai wang')</td><td>Beijing 100144, China; wangyuehai@ncut.edu.cn
+<br/>2 Department of Software, Beihang University, Beijing 100191, China; swfbuaa@163.com
+<br/>* Correspondence: wanghuafeng@ncut.edu.cn (H.W.); W.Liu@curtin.edu.au (W.L.); zy1621125@buaa.edu.cn
+<br/>(N.S.); haixiapan@buaa.edu.cn (H.P.); Tel.: +86-189-1192-4121 (H.W.)
+</td></tr><tr><td>e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66</td><td>International Journal of Enhanced Research in Science Technology & Engineering, ISSN: 2319-7463
+<br/>Vol. 3 Issue 1, January-2014, pp: (362-365), Impact Factor: 1.252, Available online at: www.erpublications.com
+<br/>Cognitive Learning for Social Robot through
+<br/>Facial Expression from Video Input
+<br/>1Department of Automation & Robotics, 2Department of Computer Science & Engg.
+</td><td>('26944751', 'Neeraj Rai', 'neeraj rai')<br/>('2586264', 'Deepak Rai', 'deepak rai')<br/>('26477055', 'Ajay Kumar Garg', 'ajay kumar garg')</td><td></td></tr><tr><td>e988be047b28ba3b2f1e4cdba3e8c94026139fcf</td><td>Multi-Task Convolutional Neural Network for
+<br/>Pose-Invariant Face Recognition
+</td><td>('2399004', 'Xi Yin', 'xi yin')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>e9d43231a403b4409633594fa6ccc518f035a135</td><td>Deformable Part Models with CNN Features
+<br/>Kokkinos1,2
+<br/>1 Ecole Centrale Paris,2 INRIA, 3TTI-Chicago (cid:63)
+</td><td>('2381485', 'Stavros Tsogkas', 'stavros tsogkas')<br/>('2776496', 'George Papandreou', 'george papandreou')</td><td></td></tr><tr><td>e90e12e77cab78ba8f8f657db2bf4ae3dabd5166</td><td>Nonconvex Sparse Spectral Clustering by Alternating Direction Method of
+<br/>Multipliers and Its Convergence Analysis
+<br/><b>National University of Singapore</b><br/><b>Key Laboratory of Machine Perception (MOE), School of EECS, Peking University</b><br/><b>Cooperative Medianet Innovation Center, Shanghai Jiao Tong University</b><br/><b>AI Institute</b></td><td>('33224509', 'Canyi Lu', 'canyi lu')<br/>('33221685', 'Jiashi Feng', 'jiashi feng')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>canyilu@gmail.com, elefjia@nus.edu.sg, zlin@pku.edu.cn, eleyans@nus.edu.sg
+</td></tr><tr><td>e9c008d31da38d9eef67a28d2c77cb7daec941fb</td><td>Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing
+<br/>the Early Softmax Saturation
+<br/><b>School of Information and Communication Engineering, Beijing University of Posts and Telecommunications</b><br/><b>School of Computer Science, Beijing University of Posts and Telecommunications, Beijing China</b></td><td>('3450321', 'Binghui Chen', 'binghui chen')<br/>('1774956', 'Weihong Deng', 'weihong deng')<br/>('8491162', 'Junping Du', 'junping du')</td><td>chenbinghui@bupt.edu.cn, whdeng@bupt.edu.cn, junpingd@bupt.edu.cn
+</td></tr><tr><td>e9e40e588f8e6510fa5537e0c9e083ceed5d07ad</td><td>Fast Face Detection Using Graphics Processor
+<br/><b>National Institute of Technology Karnataka</b><br/>Surathkal, India
+</td><td>('36598334', 'K.Vinay Kumar', 'k.vinay kumar')</td><td></td></tr><tr><td>e9bb045e702ee38e566ce46cc1312ed25cb59ea7</td><td>Integrating Geometric and Textural Features for
+<br/>Facial Emotion Classification using SVM
+<br/>Frameworks
+<br/>1 Department of Computer Science and Engineering,
+<br/><b>Indian Institute of Technology, Roorkee</b><br/>2 Department of Electronics and Electrical Communication Engineering,
+<br/><b>Indian Institute of Technology, Kharagpur</b></td><td>('19200118', 'Samyak Datta', 'samyak datta')<br/>('3165117', 'Debashis Sen', 'debashis sen')<br/>('1726184', 'R. Balasubramanian', 'r. balasubramanian')</td><td></td></tr><tr><td>e9fcd15bcb0f65565138dda292e0c71ef25ea8bb</td><td>Repositorio Institucional de la Universidad Autónoma de Madrid
+<br/>https://repositorio.uam.es
+<br/>Esta es la versión de autor de la comunicación de congreso publicada en:
+<br/>This is an author produced version of a paper published in:
+<br/>Highlights on Practical Applications of Agents and Multi-Agent Systems:
+<br/>International Workshops of PAAMS. Communications in Computer and
+<br/>Information Science, Volumen 365. Springer, 2013. 223-230
+<br/>DOI: http://dx.doi.org/10.1007/978-3-642-38061-7_22
+<br/>Copyright: © 2013 Springer-Verlag
+<br/>El acceso a la versión del editor puede requerir la suscripción del recurso
+<br/>Access to the published version may require subscription
+</td><td></td><td></td></tr><tr><td>e9f1cdd9ea95810efed306a338de9e0de25990a0</td><td>FEPS: An Easy-to-Learn Sensory Substitution System to
+<br/>Perceive Facial Expressions
+<br/>Electrical and Computer Engineering
+<br/><b>University of Memphis</b><br/>Memphis, TN 38152, USA
+</td><td>('2497319', 'M. Iftekhar Tanveer', 'm. iftekhar tanveer')<br/>('2464507', 'Sreya Ghosh', 'sreya ghosh')<br/>('33019079', 'A.K.M. Mahbubur Rahman', 'a.k.m. mahbubur rahman')<br/>('1828610', 'Mohammed Yeasin', 'mohammed yeasin')</td><td>{mtanveer,aanam,sghosh,arahman,myeasin}@memphis.edu
+</td></tr><tr><td>e9363f4368b04aeaa6d6617db0a574844fc59338</td><td>BENCHIP: Benchmarking Intelligence
+<br/>Processors
+<br/>1ICT CAS,2Cambricon,3Alibaba Infrastructure Service, Alibaba Group
+<br/>4IFLYTEK,5JD,6RDA Microelectronics,7AMD
+</td><td>('2631042', 'Jinhua Tao', 'jinhua tao')<br/>('1678776', 'Zidong Du', 'zidong du')<br/>('50770616', 'Qi Guo', 'qi guo')<br/>('4304175', 'Huiying Lan', 'huiying lan')<br/>('48571185', 'Lei Zhang', 'lei zhang')<br/>('7523063', 'Shengyuan Zhou', 'shengyuan zhou')<br/>('49046597', 'Cong Liu', 'cong liu')<br/>('49343896', 'Shan Tang', 'shan tang')<br/>('38253244', 'Allen Rush', 'allen rush')<br/>('47482936', 'Willian Chen', 'willian chen')<br/>('39419985', 'Shaoli Liu', 'shaoli liu')<br/>('7377735', 'Yunji Chen', 'yunji chen')<br/>('7934735', 'Tianshi Chen', 'tianshi chen')</td><td></td></tr><tr><td>f1250900074689061196d876f551ba590fc0a064</td><td>Learning to Recognize Actions from Limited Training
+<br/>Examples Using a Recurrent Spiking Neural Model
+<br/><b>School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN, USA</b><br/>2Intel Labs, Hillsboro, OR, USA 97124
+</td><td>('9352814', 'Priyadarshini Panda', 'priyadarshini panda')<br/>('1753812', 'Narayan Srinivasa', 'narayan srinivasa')</td><td>*Correspondence: narayan.srinivasa@intel.com
+</td></tr><tr><td>f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53</td><td>Faster Than Real-time Facial Alignment: A 3D Spatial Transformer Network
+<br/>Approach in Unconstrained Poses
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA
+</td><td>('47894545', 'Chenchen Zhu', 'chenchen zhu')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>cbhagava@andrew.cmu.edu, zcckernel@cmu.edu, kluu@andrew.cmu.edu, msavvid@ri.cmu.edu
+</td></tr><tr><td>f16a605abb5857c39a10709bd9f9d14cdaa7918f</td><td>Fast greyscale road sign model matching
+<br/>and recognition
+<br/>Centre de Visió per Computador
+<br/>Edifici O – Campus UAB, 08193 Bellaterra, Barcelona, Catalonia, Spain
+</td><td>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>{sescalera,petia}@cvc.uab.es
+</td></tr><tr><td>f1aa120fb720f6cfaab13aea4b8379275e6d40a2</td><td>InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image
+<br/><b>Max-Planck-Institute for Informatics</b><br/><b>University of Erlangen-Nuremberg 3 University of Bath</b><br/>Figure 1. Our single-shot deep inverse face renderer InverseFaceNet obtains a high-quality geometry, reflectance and illumination estimate
+<br/>from just a single input image. We jointly recover the face pose, shape, expression, reflectance and incident scene illumination. From left to
+<br/>right: input photo, our estimated face model, its geometry, and the pointwise Euclidean error compared to Garrido et al. [14].
+</td><td>('3022958', 'Hyeongwoo Kim', 'hyeongwoo kim')<br/>('34105638', 'Justus Thies', 'justus thies')<br/>('1699058', 'Michael Zollhöfer', 'michael zollhöfer')<br/>('1819028', 'Christian Richardt', 'christian richardt')<br/>('1680185', 'Christian Theobalt', 'christian theobalt')<br/>('9102722', 'Ayush Tewari', 'ayush tewari')</td><td></td></tr><tr><td>f1748303cc02424704b3a35595610890229567f9</td><td></td><td></td><td></td></tr><tr><td>f1ba2fe3491c715ded9677862fea966b32ca81f0</td><td>ISSN: 2321-7782 (Online)
+<br/>Volume 1, Issue 7, December 2013
+<br/>International Journal of Advance Research in
+<br/>Computer Science and Management Studies
+<br/>Research Paper
+<br/>Available online at: www.ijarcsms.com
+<br/>Face Tracking and Recognition in Videos:
+<br/>HMM Vs KNN
+<br/>Assistant Professor
+<br/>Department of Computer Engineering
+<br/><b>MIT College of Engineering (Pune University</b><br/>Pune - India
+</td><td></td><td></td></tr><tr><td>f1d090fcea63d9f9e835c49352a3cd576ec899c1</td><td>Iosifidis, A., Tefas, A., & Pitas, I. (2015). Single-Hidden Layer Feedforward
+<br/>Neual Network Training Using Class Geometric Information. In . J. J.
+<br/>Computational Intelligence: International Joint Conference, IJCCI 2014
+<br/>Rome, Italy, October 22-24, 2014 Revised Selected Papers. (Vol. III, pp.
+<br/>351-364). (Studies in Computational Intelligence; Vol. 620). Springer. DOI:
+<br/>10.1007/978-3-319-26393-9_21
+<br/>Peer reviewed version
+<br/>Link to published version (if available):
+<br/>10.1007/978-3-319-26393-9_21
+<br/>Link to publication record in Explore Bristol Research
+<br/>PDF-document
+<br/><b>University of Bristol - Explore Bristol Research</b><br/>General rights
+<br/>This document is made available in accordance with publisher policies. Please cite only the published
+<br/>version using the reference above. Full terms of use are available:
+<br/>http://www.bristol.ac.uk/pure/about/ebr-terms.html
+<br/> </td><td>('1685469', 'A. Rosa', 'a. rosa')<br/>('9246794', 'J. M. Cadenas', 'j. m. cadenas')<br/>('2092535', 'A. Dourado', 'a. dourado')<br/>('39545211', 'K. Madani', 'k. madani')</td><td></td></tr><tr><td>f113aed343bcac1021dc3e57ba6cc0647a8f5ce1</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>A Survey on Mining of Weakly Labeled Web Facial
+<br/>Images and Annotation
+<br/><b>Pune Institute of Computer Technology, Pune, India</b><br/><b>Pune Institute of Computer Technology, Pune, India</b><br/>the
+<br/>the proposed system which
+</td><td></td><td></td></tr><tr><td>f19777e37321f79e34462fc4c416bd56772031bf</td><td>International Journal of Scientific & Engineering Research, Volume 3, Issue 6, June-2012 1
+<br/>ISSN 2229-5518
+<br/>Literature Review of Image Compression Algorithm
+<br/> Dr. B. Chandrasekhar
+<br/>Padmaja.V.K
+<br/><b>Jawaharlal Technological University, Anantapur</b></td><td></td><td>email: padmaja_vk@yahoo.co.in email:: drchandrasekhar@gmail.com
+</td></tr><tr><td>f19ab817dd1ef64ee94e94689b0daae0f686e849</td><td>TECHNISCHE UNIVERSIT¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Blickrichtungsunabh¨angige Erkennung von
+<br/>Personen in Bild- und Tiefendaten
+<br/>Andre St¨ormer
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr.-Ing. Thomas Eibert
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. Horst-Michael Groß,
+<br/>Technische Universit¨at Ilmenau
+<br/>Die Dissertation wurde am 16.06.2009 bei der Technischen Universit¨at M¨unchen einge-
+<br/>reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 30.10.2009
+<br/>angenommen.
+</td><td></td><td></td></tr><tr><td>e76798bddd0f12ae03de26b7c7743c008d505215</td><td></td><td></td><td></td></tr><tr><td>e7cac91da51b78eb4a28e194d3f599f95742e2a2</td><td>RESEARCH ARTICLE
+<br/>Positive Feeling, Negative Meaning:
+<br/>Visualizing the Mental Representations of In-
+<br/>Group and Out-Group Smiles
+<br/><b>Saarland University, Saarbr cken, Germany, 2 Utrecht University, Utrecht, the Netherlands</b><br/><b>Behavioural Science Institute, Radboud University, Nijmegen, the Netherlands</b><br/>☯ These authors contributed equally to this work.
+</td><td>('34533048', 'Andrea Paulus', 'andrea paulus')<br/>('40358273', 'Michaela Rohr', 'michaela rohr')<br/>('2365875', 'Ron Dotsch', 'ron dotsch')<br/>('3905267', 'Dirk Wentura', 'dirk wentura')</td><td>* a.paulus@mx.uni-saarland.de
+</td></tr><tr><td>e793f8644c94b81b7a0f89395937a7f8ad428a89</td><td>LPM for Action Recognition in Temporally
+<br/>Untrimmed Videos
+<br/>School of Electrical Engineering and Computer Scinece
+<br/><b>University of Ottawa, Ottawa, On, Canada</b></td><td>('36047295', 'Feng Shi', 'feng shi')<br/>('1745632', 'Emil Petriu', 'emil petriu')</td><td>{fshi098, laganier, petriu}@site.uottawa.ca
+</td></tr><tr><td>e726174d516605f80ff359e71f68b6e8e6ec6d5d</td><td><b>Institute of Information Science</b><br/><b>Beijing Jiaotong University</b><br/>Beijing, 100044 P.R. China
+<br/>A novel Patched Locality Preserving Projections for 3D face recognition was pre-
+<br/>sented in this paper. In this paper, we firstly patched each image to get the spatial infor-
+<br/>mation, and then Gabor filter was used extract intrinsic discriminative information em-
+<br/>bedded in each patch. Finally Locality Preserving Projections, which was improved by
+<br/>Principle Components Analysis, was utilized to the corresponding patches to obtain lo-
+<br/>cality preserving information. The feature was constructed by connecting all these pro-
+<br/>jections. Recognition was achieved by using a Nearest Neighbor classifier finally. The
+<br/>novelty of this paper came from: (1) The method was robust to changes in facial expres-
+<br/>sions and poses, because Gabor filters promoted their useful properties, such as invari-
+<br/>ance to rotation, scale and translations, in feature extraction; (2) The method not only
+<br/>preserved spatial information, but also preserved locality information of the correspond-
+<br/>ing patches. Experiments demonstrated the efficiency and effectiveness of the new
+<br/>method. The experimental results showed that the new algorithm outperformed the other
+<br/>popular approaches reported in the literature and achieved a much higher accurate recog-
+<br/>nition rate.
+<br/>Keywords: 3D face recognition, Gabor filters, locality preserving projections, principle
+<br/>components analysis, nearest neighbor
+<br/>1. INTRODUCTION
+<br/>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 26, 2297-2307 (2010)
+<br/>Short Paper__________________________________________________
+<br/>3D Face Recognition Using Patched
+<br/>Locality Preserving Projections*
+<br/>Face recognition is a very challenging subject. So far, studies in 2D face recognition
+<br/>have gained significant development, such as Principal Component Analysis (PCA) [1],
+<br/>Linear Discriminant Analysis (LDA) [2], and Independent Component Analysis (ICA) [3]
+<br/>and so on. But it still bears limitations mostly due to pose variation, illumination, and
+<br/>facial expression. 3D face recognition stood out due to the use of face depth information
+<br/>which can overcome such limitations. Recently with the development of 3D acquisition
+<br/>system, 3D face recognition has attracted more and more interest and a great deal of re-
+<br/>search effort has been devoted to this topic [4-7].
+<br/>Many methods have been proposed for 3D face recognition over the last two dec-
+<br/>ades. Beumier et al. [8] proposed two methods of surface matching. Central and lateral
+<br/>profiles were compared in the curvature space to achieve recognition. However, the me-
+<br/>Received October 19, 2009; revised January 8, 2010; accepted March 5, 2010.
+<br/>Communicated by Tyng-Luh Liu.
+<br/>* This work was also partially supported by the National Natural Science Foundation of China under Grant No.
+<br/>60973060 and the Doctorial Foundation of Ministry of Education of China under Grant No. 200800040008.
+<br/>2297
+</td><td>('3282147', 'Xue-Qiao Wang', 'xue-qiao wang')<br/>('2383779', 'Qiu-Qi Ruan', 'qiu-qi ruan')</td><td></td></tr><tr><td>e78394213ae07b682ce40dc600352f674aa4cb05</td><td>Expression-invariant three-dimensional face recognition
+<br/>Computer Science Department,
+<br/><b>Technion Israel Institute of Technology</b><br/>Haifa 32000, Israel
+<br/>One of the hardest problems in face recognition is dealing with facial expressions. Finding an
+<br/>expression-invariant representation of the face could be a remedy for this problem. We suggest
+<br/>treating faces as deformable surfaces in the context of Riemannian geometry, and propose to ap-
+<br/>proximate facial expressions as isometries of the facial surface. This way, we can define geometric
+<br/>invariants of a given face under different expressions. One such invariant is constructed by iso-
+<br/>metrically embedding the facial surface structure into a low-dimensional flat space. Based on this
+<br/>approach, we built an accurate three-dimensional face recognition system that is able to distinguish
+<br/>between identical twins under various facial expressions. In this chapter we show how under the
+<br/>near-isometric model assumption, the difficult problem of face recognition in the presence of facial
+<br/>expressions can be solved in a relatively simple way.
+<br/>0.1 Introduction
+<br/>It is well-known that some characteristics or behavior patterns of the human body are strictly
+<br/>individual and can be observed in two different people with a very low probability – a few such
+<br/>examples include the DNA code, fingerprints, structure of retinal veins and iris, individual’s written
+<br/>signature or face. The term biometrics refers to a variety of methods that attempt to uniquely
+<br/>identify a person according to a set of such features.
+<br/>While many of today’s biometric technologies are based on the discoveries of the last century (like
+<br/>the DNA, for example), some of them have been exploited from the dawn of the human civilization
+<br/>[17]. One of the oldest written testimonies of a biometric technology and the first identity theft
+<br/>dates back to biblical times, when Jacob fraudulently used the identity of his twin brother Esau to
+<br/>benefit from their father’s blessing. The Genesis book describes a combination of hand scan and
+<br/>voice recognition that Isaac used to attempt to verify his son’s identity, without knowing that the
+<br/>smooth-skinned Jacob had wrapped his hands in kidskin:
+<br/>“And Jacob went near unto Isaac his father; and he felt him, and said, ’The voice is Jacob’s
+<br/>voice, but the hands are the hands of Esau’. And he recognized him not, because his hands
+<br/>were hairy, as his brother Esau’s hands.”
+<br/>The false acceptance which resulted from this very inaccurate biometric test had historical conse-
+<br/>quences of unmatched proportions.
+<br/>Face recognition is probably the most natural biometric method. The remarkable ability of the
+<br/>human vision to recognize faces is widely used for biometric authentication from prehistoric times.
+<br/>These days, almost every identification document contains a photograph of its bearer, which allows
+<br/>the respective officials to verify a person’s identity by comparing his actual face with the one on the
+<br/>photo.
+<br/>Unlike many other biometrics, face recognition does not require physical contact with the individ-
+<br/>ual (like fingerprint recognition) or taking samples of the body (like DNA-based identification) or the
+<br/>individual’s behavior (like signature recognition). For these reasons, face recognition is considered a
+<br/>natural, less intimidating, and widely accepted biometric identification method [4, 47], and as such,
+<br/>has the potential of becoming the leading biometric technology. The great technological challenge is
+<br/>to perform face recognition automatically, by means of computer algorithms that work without any
+</td><td>('1731883', 'Alexander M. Bronstein', 'alexander m. bronstein')<br/>('1732570', 'Michael M. Bronstein', 'michael m. bronstein')<br/>('1692832', 'Ron Kimmel', 'ron kimmel')</td><td>Email: alexbron@ieee.org
+<br/>bronstein@ieee.org
+<br/>ron@cs.technion.ac.il
+</td></tr><tr><td>e7b2b0538731adaacb2255235e0a07d5ccf09189</td><td>Learning Deep Representations with
+<br/>Probabilistic Knowledge Transfer
+<br/><b>Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece</b></td><td>('3200630', 'Nikolaos Passalis', 'nikolaos passalis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')</td><td>passalis@csd.auth.gr, tefas@aiia.csd.auth.gr
+</td></tr><tr><td>e726acda15d41b992b5a41feabd43617fab6dc23</td><td></td><td></td><td></td></tr><tr><td>e74816bc0803460e20edbd30a44ab857b06e288e</td><td>Semi-Automated Annotation of Discrete States
+<br/>in Large Video Datasets
+<br/>Lex Fridman
+<br/><b>Massachusetts Institute of Technology</b><br/><b>Massachusetts Institute of Technology</b></td><td>('1901227', 'Bryan Reimer', 'bryan reimer')</td><td>fridman@mit.edu
+<br/>reimer@mit.edu
+</td></tr><tr><td>e7b6887cd06d0c1aa4902335f7893d7640aef823</td><td>Modelling of Facial Aging and Kinship: A Survey
+</td><td>('34291068', 'Markos Georgopoulos', 'markos georgopoulos')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>e73b9b16adcf4339ff4d6723e61502489c50c2d9</td><td>Informatics Engineering, an International Journal (IEIJ) ,Vol.2, No.1, March 2014
+<br/>AN EFFICIENT FEATURE EXTRACTION METHOD WITH
+<br/>LOCAL REGION ZERNIKE MOMENT FOR FACIAL
+<br/>RECOGNITION OF IDENTICAL TWINS
+<br/>1Department of Electrical,Computer and Biomedical Engineering, Qazvin branch, Islamic
+<br/><b>Amirkabir University of Technology, Tehran</b><br/><b>Azad University, Qazvin, Iran</b><br/>Iran
+</td><td>('1692435', 'Karim Faez', 'karim faez')</td><td></td></tr><tr><td>cbca355c5467f501d37b919d8b2a17dcb39d3ef9</td><td>CANSIZOGLU, JONES: SUPER-RESOLUTION OF VERY LR FACES FROM VIDEOS
+<br/>Super-resolution of Very Low-Resolution
+<br/>Faces from Videos
+<br/>Esra Ataer-Cansizoglu
+<br/><b>Mitsubishi Electric Research Labs</b><br/>(MERL)
+<br/>Cambridge, MA, USA
+</td><td>('1961683', 'Michael Jones', 'michael jones')</td><td>cansizoglu@merl.com
+<br/>mjones@merl.com
+</td></tr><tr><td>cbbd13c29d042743f0139f1e044b6bca731886d0</td><td>Not-So-CLEVR: learning same–different relations strains
+<br/>feedforward neural networks
+<br/>†equal contributions
+<br/>Department of Cognitive, Linguistic & Psychological Sciences
+<br/><b>Carney Institute for Brain Science</b><br/><b>Brown University, Providence, RI 02912, USA</b></td><td>('5546699', 'Junkyung Kim', 'junkyung kim')</td><td></td></tr><tr><td>cbcf5da9f09b12f53d656446fd43bc6df4b2fa48</td><td>ISSN: 2277-3754
+<br/>ISO 9001:2008 Certified
+<br/>International Journal of Engineering and Innovative Technology (IJEIT)
+<br/>Volume 2, Issue 6, December 2012
+<br/> Face Recognition using Gray level Co-occurrence
+<br/>Matrix and Snap Shot Method of the Eigen Face
+<br/><b>Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India</b><br/>M. Madhu, R. Amutha
+<br/><b>SSN College of Engineering, Chennai, India</b></td><td></td><td></td></tr><tr><td>cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a</td><td>Emotion AI, Real-Time Emotion Detection using CNN
+<br/>M.S. Computer Science
+<br/><b>Stanford University</b><br/>B.S. Computer Science
+<br/><b>Stanford University</b></td><td></td><td>tanner12@stanford.edu
+<br/>bakis@stanford.edu
+</td></tr><tr><td>cb004e9706f12d1de83b88c209ac948b137caae0</td><td>Face Aging Effect Simulation using Hidden Factor
+<br/>Analysis Joint Sparse Representation
+</td><td>('1787137', 'Hongyu Yang', 'hongyu yang')<br/>('31454775', 'Di Huang', 'di huang')<br/>('40013375', 'Yunhong Wang', 'yunhong wang')<br/>('46506697', 'Heng Wang', 'heng wang')<br/>('2289713', 'Yuanyan Tang', 'yuanyan tang')</td><td></td></tr><tr><td>cb2917413c9b36c3bb9739bce6c03a1a6eb619b3</td><td>MiCT: Mixed 3D/2D Convolutional Tube for Human Action Recognition
+<br/><b>University of Science and Technology of China</b><br/>2Microsoft Research Asia
+</td><td>('49455479', 'Yizhou Zhou', 'yizhou zhou')<br/>('48305246', 'Xiaoyan Sun', 'xiaoyan sun')<br/>('2057216', 'Zheng-Jun Zha', 'zheng-jun zha')<br/>('8434337', 'Wenjun Zeng', 'wenjun zeng')</td><td>zyz0205@mail.ustc.edu.cn, zhazj@ustc.edu.cn
+<br/>{xysun,wezeng}@microsoft.com
+</td></tr><tr><td>cb9092fe74ea6a5b2bb56e9226f1c88f96094388</td><td></td><td></td><td></td></tr><tr><td>cb13e29fb8af6cfca568c6dc523da04d1db1fff5</td><td>Paper accepted to Frontiers in Psychology
+<br/>Received: 02 Dec 2017
+<br/>Accepted: 12 June 2018
+<br/>DOI: 10.3389/fpsyg.2018.01128
+<br/>A Survey of Automatic Facial
+<br/>Micro-expression Analysis:
+<br/>Databases, Methods and Challenges
+<br/><b>Multimedia University, Faculty of Engineering, Cyberjaya, 63100 Selangor, Malaysia</b><br/><b>Multimedia University, Faculty of Computing and Informatics, Cyberjaya</b><br/>Selangor, Malaysia
+<br/><b>University of Nottingham, School of Psychology, University Park, Nottingham NG</b><br/>2RD, United Kingdom
+<br/><b>Multimedia University, Research Institute for Digital Security, Cyberjaya</b><br/>Selangor, Malaysia
+<br/><b>Monash University Malaysia, School of Information Technology, Sunway</b><br/>Selangor, Malaysia
+<br/>Correspondence*:
+</td><td>('2154760', 'Yee-Hui Oh', 'yee-hui oh')<br/>('2339975', 'John See', 'john see')<br/>('35256518', 'Anh Cat Le Ngo', 'anh cat le ngo')<br/>('6633183', 'Raphael C.-W. Phan', 'raphael c.-w. phan')<br/>('34287833', 'Vishnu Monn Baskaran', 'vishnu monn baskaran')<br/>('2339975', 'John See', 'john see')</td><td>johnsee@mmu.edu.my
+</td></tr><tr><td>cb08f679f2cb29c7aa972d66fe9e9996c8dfae00</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Action Understanding
+<br/>with Multiple Classes of Actors
+</td><td>('2026123', 'Chenliang Xu', 'chenliang xu')<br/>('2228109', 'Caiming Xiong', 'caiming xiong')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td></td></tr><tr><td>cb84229e005645e8623a866d3d7956c197f85e11</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, MONTH 201X
+<br/>Disambiguating Visual Verbs
+</td><td>('2921001', 'Spandana Gella', 'spandana gella')<br/>('2505673', 'Frank Keller', 'frank keller')<br/>('1747893', 'Mirella Lapata', 'mirella lapata')</td><td></td></tr><tr><td>cb1b5e8b35609e470ce519303915236b907b13b6</td><td>On the Vulnerability of ECG Verification to Online Presentation Attacks
+<br/><b>University of Connecticut</b><br/>Electrical & Computer Engineering
+<br/><b>University of Florida</b><br/>Electrical & Computer Engineering
+</td><td>('3445153', 'Nima Karimian', 'nima karimian')<br/>('2171076', 'Damon L. Woodard', 'damon l. woodard')<br/>('2925373', 'Domenic Forte', 'domenic forte')</td><td>nima@engr.uconn.edu
+<br/>dwoodard, dforte@ece.ufl.edu
+</td></tr><tr><td>cbb27980eb04f68d9f10067d3d3c114efa9d0054</td><td>An Attention Model for group-level emotion recognition
+<br/><b>Indian Institute of Technology</b><br/>Roorkee
+<br/>Roorkee, India
+<br/><b>Indian Institute of Technology</b><br/>Roorkee
+<br/>Roorkee, India
+<br/><b>Indian Institute of Technology</b><br/>Roorkee
+<br/>Roorkee, India
+<br/>École de Technologie Supérieure
+<br/>Montreal, Canada
+<br/>École de Technologie Supérieure
+<br/>Montreal, Canada
+</td><td>('51127375', 'Aarush Gupta', 'aarush gupta')<br/>('51134535', 'Dakshit Agrawal', 'dakshit agrawal')<br/>('51118849', 'Hardik Chauhan', 'hardik chauhan')<br/>('3055538', 'Jose Dolz', 'jose dolz')<br/>('3048367', 'Marco Pedersoli', 'marco pedersoli')</td><td>agupta1@cs.iitr.ac.in
+<br/>dagrawal@cs.iitr.ac.in
+<br/>haroi.uee2014@iitr.ac.in
+<br/>jose.dolz@livia.etsmtl.ca
+<br/>Marco.Pedersoli@etsmtl.ca
+</td></tr><tr><td>cbe859d151466315a050a6925d54a8d3dbad591f</td><td>GAZE SHIFTS AS DYNAMICAL RANDOM SAMPLING
+<br/>Dipartimento di Scienze dell’Informazione
+<br/>Universit´a di Milano
+<br/>Via Comelico 39/41
+<br/>20135 Milano, Italy
+</td><td>('1715361', 'Giuseppe Boccignone', 'giuseppe boccignone')<br/>('3241931', 'Mario Ferraro', 'mario ferraro')</td><td>boccignone@dsi.unimi.it
+</td></tr><tr><td>f86ddd6561f522d115614c93520faad122eb3b56</td><td>PACS2016
+<br/>Beyond AlphaGo
+<br/>October 27-28, 2016
+<br/>Visual Imagination from Texts
+<br/>School of Computer Science and Engineering
+<br/><b>Seoul National University</b><br/>Seoul 151-744, Korea
+</td><td>('3434480', 'Hanock Kwak', 'hanock kwak')<br/>('1692756', 'Byoung-Tak Zhang', 'byoung-tak zhang')</td><td>Email: (hnkwak, btzhang)@bi.snu.ac.kr
+</td></tr><tr><td>f8015e31d1421f6aee5e17fc3907070b8e0a5e59</td><td>April 19, 2016
+<br/>DRAFT
+<br/>Towards Usable Multimedia Event Detection
+<br/>from Web Videos
+<br/>April, 2016
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Alexander G. Hauptmann, Chair
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy.
+</td><td>('34692532', 'Zhenzhong Lan', 'zhenzhong lan')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')<br/>('34692532', 'Zhenzhong Lan', 'zhenzhong lan')</td><td></td></tr><tr><td>f842b13bd494be1bbc1161dc6df244340b28a47f</td><td>An Improved Face Recognition Technique Based
+<br/>on Modular Multi-directional Two-dimensional
+<br/>Principle Component Analysis Approach
+<br/><b>Hanshan Normal University, Chaozhou, 521041, China</b><br/><b>Hanshan Normal University, Chaozhou, 521041, China</b></td><td>('48477766', 'Xiaoqing Dong', 'xiaoqing dong')<br/>('2747115', 'Hongcai Chen', 'hongcai chen')</td><td>Email: dxqzq110@163.com
+<br/>Email: czhschc@126.com
+</td></tr><tr><td>f83dd9ff002a40228bbe3427419b272ab9d5c9e4</td><td>Facial Features Matching using a Virtual Structuring Element
+<br/>Intelligent Systems Lab Amsterdam,
+<br/><b>University of Amsterdam</b><br/>Kruislaan 403, 1098 SJ Amsterdam, The Netherlands
+</td><td>('9301018', 'Roberto Valenti', 'roberto valenti')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td></td></tr><tr><td>f8c94afd478821681a1565d463fc305337b02779</td><td>
+<br/>www.semargroup.org,
+<br/>www.ijsetr.com
+<br/>
+<br/>ISSN 2319-8885
+<br/>Vol.03,Issue.25
+<br/>September-2014,
+<br/>Pages:5079-5085
+<br/>Design and Implementation of Robust Face Recognition System for
+<br/>Uncontrolled Pose and Illumination Changes
+<br/>2
+</td><td></td><td>1PG Scholar, Dept of ECE, LITAM, JNTUK, Andhrapradesh, India, Email: bhaskar.t60@gmail.com.
+<br/>2Assistant Professor, Dept of ECE, LITAM, JNTUK, Andhrapradesh, India, Email: venky999v@gmail.com.
+</td></tr><tr><td>f8f2d2910ce8b81cb4bbf84239f9229888158b34</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>A Generative Model for Recognizing
+<br/>Mixed Group Activities in Still Images
+<br/><b>School of Computer, Beijing Institute of Technology, Beijing, China</b><br/><b>School of Computing and Communications, University of Technology Sydney, Sydney, Australia</b></td><td>('32056779', 'Zheng Zhou', 'zheng zhou')<br/>('1780081', 'Kan Li', 'kan li')<br/>('1706670', 'Xiangjian He', 'xiangjian he')<br/>('3225703', 'Mengmeng Li', 'mengmeng li')</td><td>{zz24, likan}@bit.edu.cn, xiangjian.he@uts.edu.au, limengmeng93@163.com
+</td></tr><tr><td>f8ec92f6d009b588ddfbb47a518dd5e73855547d</td><td>J Inf Process Syst, Vol.10, No.3, pp.443~458, September 2014
+<br/>
+<br/>ISSN 1976-913X (Print)
+<br/>ISSN 2092-805X (Electronic)
+<br/>Extreme Learning Machine Ensemble Using
+<br/>Bagging for Facial Expression Recognition
+</td><td>('32322842', 'Deepak Ghimire', 'deepak ghimire')<br/>('2034182', 'Joonwhoan Lee', 'joonwhoan lee')</td><td></td></tr><tr><td>f869601ae682e6116daebefb77d92e7c5dd2cb15</td><td></td><td></td><td></td></tr><tr><td>f8ddb2cac276812c25021b5b79bf720e97063b1e</td><td>A Comprehensive Empirical Study on Linear Subspace Methods for Facial
+<br/>Expression Analysis
+<br/><b>Queen Mary, University of London</b><br/>Mile End Road, London E1 4NS
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2803283', 'Peter W. McOwan', 'peter w. mcowan')</td><td>{cfshan, sgg, pmco}@dcs.qmul.ac.uk
+</td></tr><tr><td>f8ed5f2c71e1a647a82677df24e70cc46d2f12a8</td><td>International Journal of Scientific & Engineering Research, Volume 2, Issue 12, December-2011 1
+<br/>ISSN 2229-5518
+<br/>Artificial Neural Network Design and Parameter
+<br/>Optimization for Facial Expressions Recognition
+</td><td></td><td></td></tr><tr><td>f8f872044be2918de442ba26a30336d80d200c42</td><td>IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 03, 2015 | ISSN (online): 2321-0613
+<br/>Facial Emotion Recognition Techniques: A Survey
+<br/>1,2Department of Computer Science and Engineering
+<br/><b>Dr C V Raman Institute of Science and Technology</b><br/>defense
+<br/>systems,
+<br/>surveillance
+</td><td></td><td></td></tr><tr><td>f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464</td><td>ORIGINAL RESEARCH
+<br/>published: 19 December 2017
+<br/>doi: 10.3389/fpsyg.2017.02181
+<br/>KDEF-PT: Valence, Emotional
+<br/>Intensity, Familiarity and
+<br/>Attractiveness Ratings of Angry,
+<br/>Neutral, and Happy Faces
+<br/>Instituto Universitário de Lisboa (ISCTE-IUL), CIS – IUL, Lisboa, Portugal
+<br/>The Karolinska Directed Emotional Faces (KDEF)
+<br/>is one of the most widely used
+<br/>human facial expressions database. Almost a decade after the original validation study
+<br/>(Goeleven et al., 2008), we present subjective rating norms for a sub-set of 210 pictures
+<br/>which depict 70 models (half female) each displaying an angry, happy and neutral facial
+<br/>expressions. Our main goals were to provide an additional and updated validation
+<br/>to this database, using a sample from a different nationality (N = 155 Portuguese
+<br/>students, M = 23.73 years old, SD = 7.24) and to extend the number of subjective
+<br/>dimensions used to evaluate each image. Specifically, participants reported emotional
+<br/>labeling (forced-choice task) and evaluated the emotional intensity and valence of the
+<br/>expression, as well as the attractiveness and familiarity of the model (7-points rating
+<br/>scales). Overall, results show that happy faces obtained the highest ratings across
+<br/>evaluative dimensions and emotion labeling accuracy. Female (vs. male) models were
+<br/>perceived as more attractive, familiar and positive. The sex of the model also moderated
+<br/>the accuracy of emotional
+<br/>labeling and ratings of different facial expressions. Each
+<br/>picture of the set was categorized as low, moderate, or high for each dimension.
+<br/>Normative data for each stimulus (hits proportion, means, standard deviations, and
+<br/>confidence intervals per evaluative dimension) is available as supplementary material
+<br/>(available at https://osf.io/fvc4m/).
+<br/>Keywords: facial expressions, normative data, subjective ratings, emotion labeling, sex differences
+<br/>INTRODUCTION
+<br/>The human face conveys important information for social interaction. For example, it is a major
+<br/>source for forming first impressions, and to make fast and automatic personality trait inferences
+<br/>(for a review, see Zebrowitz, 2017). Indeed, facial expressions have been the most studied non-
+<br/>verbal emotional cue (for a review, see Schirmer and Adolphs, 2017). In addition to their physical
+<br/>component (i.e., morphological changes in the face such as frowning or opening the mouth),
+<br/>emotional facial expressions also have an affective component that conveys information about the
+<br/>internal feelings of the person expressing it (for a review, see Calvo and Nummenmaa, 2016).
+<br/>Moreover, facial expressions communicate a social message that informs about the behavioral
+<br/>intentions of the expresser, which in turn prompt responses in the perceiver such approach and
+<br/>avoidance reactions (for a review, see Paulus and Wentura, 2016).
+<br/>Edited by:
+<br/>Sergio Machado,
+<br/><b>Salgado de Oliveira University, Brazil</b><br/>Reviewed by:
+<br/>Pietro De Carli,
+<br/>Dipartimento di Psicologia dello
+<br/>Sviluppo e della Socializzazione,
+<br/>Università degli Studi di Padova, Italy
+<br/>Sylvie Berthoz,
+<br/>Institut National de la Santé et de la
+<br/>Recherche Médicale, France
+<br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Quantitative Psychology
+<br/>and Measurement,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 18 July 2017
+<br/>Accepted: 30 November 2017
+<br/>Published: 19 December 2017
+<br/>Citation:
+<br/>Garrido MV and Prada M (2017)
+<br/>KDEF-PT: Valence, Emotional
+<br/>Intensity, Familiarity
+<br/>and Attractiveness Ratings of Angry,
+<br/>Neutral, and Happy Faces.
+<br/>Front. Psychol. 8:2181.
+<br/>doi: 10.3389/fpsyg.2017.02181
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>December 2017 | Volume 8 | Article 2181
+</td><td>('28239829', 'Margarida V. Garrido', 'margarida v. garrido')<br/>('38831356', 'Marília Prada', 'marília prada')<br/>('28239829', 'Margarida V. Garrido', 'margarida v. garrido')</td><td>margarida.garrido@iscte-iul.pt
+</td></tr><tr><td>f87b22e7f0c66225824a99cada71f9b3e66b5742</td><td>Robust Emotion Recognition from Low Quality and Low Bit Rate Video:
+<br/>A Deep Learning Approach
+<br/><b>Beckman Institute, University of Illinois at Urbana-Champaign</b><br/><b>Texas AandM University</b><br/><b>University of Missouri, Kansas City</b><br/>§ Snap Inc, USA
+<br/><b>University of Washington</b></td><td>('50563570', 'Bowen Cheng', 'bowen cheng')<br/>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('4622305', 'Zhaobin Zhang', 'zhaobin zhang')<br/>('49970050', 'Zhu Li', 'zhu li')<br/>('1771885', 'Ding Liu', 'ding liu')<br/>('1706007', 'Jianchao Yang', 'jianchao yang')<br/>('47156875', 'Shuai Huang', 'shuai huang')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{bcheng9, dingliu2, t-huang1}@illinois.edu
+<br/>atlaswang@tamu.edu
+<br/>{zzktb@mail., lizhu@}umkc.edu
+<br/>jianchao.yang@snap.com
+<br/>shuaih@uw.edu
+</td></tr><tr><td>cef841f27535c0865278ee9a4bc8ee113b4fb9f3</td><td></td><td></td><td></td></tr><tr><td>ce6d60b69eb95477596535227958109e07c61e1e</td><td>Unconstrained Face Verification Using Fisher Vectors
+<br/>Computed From Frontalized Faces
+<br/>Center for Automation Research
+<br/><b>University of Maryland, College Park, MD</b></td><td>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')<br/>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{pullpull, swamiviv, pvishalm, rama}@umiacs.umd.edu
+</td></tr><tr><td>ceb763d6657a07b47e48e8a2956bcfdf2cf10818</td><td>International Journal of Computational Science and Information Technology (IJCSITY) Vol.2, No.1, February 2014
+<br/>AN EFFICIENT FEATURE EXTRACTION METHOD
+<br/>WITH PSEUDO-ZERNIKE MOMENT FOR FACIAL
+<br/>RECOGNITION OF IDENTICAL TWINS
+<br/>1Department of Electrical, Computer and Biomedical Engineering, Qazvin branch,
+<br/><b>Amirkabir University of Technology, Tehran</b><br/><b>Islamic Azad University, Qazvin, Iran</b><br/>Iran
+</td><td>('13302047', 'Hoda Marouf', 'hoda marouf')<br/>('1692435', 'Karim Faez', 'karim faez')</td><td></td></tr><tr><td>cefd9936e91885ba7af9364d50470f6cb54315a4</td><td>The Journal of Neuroscience, December 8, 2010 • 30(49):16601–16608 • 16601
+<br/>Behavioral/Systems/Cognitive
+<br/>Expectation and Surprise Determine Neural Population
+<br/>Responses in the Ventral Visual Stream
+<br/><b>and 2Center for Cognitive Neuroscience, Duke University, Durham, North Carolina 27708</b><br/><b>Psychology, University of Illinois, Beckman Institute, Urbana-Champaign, Illinois 61801, University of</b><br/>Oxford, Oxford OX1 3UD, United Kingdom
+<br/>Visual cortex is traditionally viewed as a hierarchy of neural feature detectors, with neural population responses being driven by
+<br/>bottom-up stimulus features. Conversely, “predictive coding” models propose that each stage of the visual hierarchy harbors two
+<br/>computationally distinct classes of processing unit: representational units that encode the conditional probability of a stimulus and
+<br/>provide predictions to the next lower level; and error units that encode the mismatch between predictions and bottom-up evidence, and
+<br/>forward prediction error to the next higher level. Predictive coding therefore suggests that neural population responses in category-
+<br/>selective visual regions, like the fusiform face area (FFA), reflect a summation of activity related to prediction (“face expectation”) and
+<br/>prediction error (“face surprise”), rather than a homogenous feature detection response. We tested the rival hypotheses of the feature
+<br/>detection and predictive coding models by collecting functional magnetic resonance imaging data from the FFA while independently
+<br/>varying both stimulus features (faces vs houses) and subjects’ perceptual expectations regarding those features (low vs medium vs high
+<br/>face expectation). The effects of stimulus and expectation factors interacted, whereby FFA activity elicited by face and house stimuli was
+<br/>indistinguishable under high face expectation and maximally differentiated under low face expectation. Using computational modeling,
+<br/>we show that these data can be explained by predictive coding but not by feature detection models, even when the latter are augmented
+<br/>with attentional mechanisms. Thus, population responses in the ventral visual stream appear to be determined by feature expectation
+<br/>and surprise rather than by stimulus features per se.
+<br/>Introduction
+<br/>“Predictive coding” models of visual cognition propose that per-
+<br/>ceptual inference proceeds as an iterative matching process of
+<br/>top-down predictions against bottom-up evidence along the vi-
+<br/>sual cortical hierarchy (Mumford, 1992; Rao and Ballard, 1999;
+<br/>Lee and Mumford, 2003; Friston, 2005; Spratling, 2008). Specif-
+<br/>ically, each stage of the visual cortical hierarchy is thought to
+<br/>harbor two computationally distinct classes of processing unit:
+<br/>representational units that encode the conditional probability of
+<br/>a stimulus (“expectation”) and provide predictions regarding ex-
+<br/>pected inputs to the next lower level; and error units that encode
+<br/>the mismatch between predictions and bottom-up evidence
+<br/>(“surprise”), and forward this prediction error to the next higher
+<br/>level, where representations are adjusted to eliminate prediction
+<br/>error (Friston, 2005). These assumptions contrast sharply with
+<br/>more traditional views that cast visual neurons primarily as fea-
+<br/>ture detectors (Hubel and Wiesel, 1965; Riesenhuber and Poggio,
+<br/>2000), but explicit empirical tests adjudicating between these ri-
+<br/>val conceptions are lacking.
+<br/>Received June 1, 2010; revised Sept. 21, 2010; accepted Sept. 28, 2010.
+<br/>This work was supported by funds granted by the Cognitive Neurology and Alzheimer’s Disease Center
+<br/><b>Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of</b><br/>this manuscript.
+<br/>DOI:10.1523/JNEUROSCI.2770-10.2010
+<br/>Copyright © 2010 the authors
+<br/>0270-6474/10/3016601-08$15.00/0
+<br/>Here, we exploited the fact that the two models make diver-
+<br/>gent predictions regarding determinants of neural population
+<br/>responses in category-selective visual regions, like the fusiform
+<br/>face area (FFA) (Kanwisher et al., 1997). Predictive coding sug-
+<br/>gests that FFA population responses should reflect a summation
+<br/>of activity related to representational units (“face expectation”)
+<br/>and error units (“face surprise”), whereas feature detection mod-
+<br/>els suppose the population response to be driven by physical
+<br/>stimulus characteristics (“face features”) alone. We adjudicated
+<br/>between these hypotheses by acquiring functional magnetic res-
+<br/>onance imaging (fMRI) data from the FFA while independently
+<br/>varying both stimulus features (faces vs houses) and subjects’
+<br/>perceptual expectations regarding those features (low vs medium
+<br/>vs high face expectation) (Fig. 1A,C). Of note, both the feature
+<br/>detection and predictive coding views also allow for visual neural
+<br/>responses to be scaled by attention. Therefore, the above manip-
+<br/>ulations were orthogonal to the task demands (the detection of
+<br/>occasional inverted “target” stimuli) (Fig. 1B) to control for po-
+<br/>tential differences in attention across the conditions of interest.
+<br/>According to predictive coding, FFA activity in this experi-
+<br/>ment should vary as an additive function of face expectation
+<br/>(high ⬎ low) (Fig. 2A, left) and face surprise (unexpected ⬎
+<br/>expected faces) (Fig. 2A, middle). This would result in an inter-
+<br/>action between stimulus and expectation factors (Fig. 2A right
+<br/>panel), whereby FFA responses to face and house stimuli should
+<br/>be similar under high face expectation, because both of these
+<br/>conditions would be associated with activity related to face ex-
+</td><td>('1900710', 'Tobias Egner', 'tobias egner')<br/>('2372244', 'Christopher Summerfield', 'christopher summerfield')<br/>('1900710', 'Tobias Egner', 'tobias egner')</td><td>Box 90999, Durham, NC 27708. E-mail: tobias.egner@duke.edu.
+</td></tr><tr><td>ce85d953086294d989c09ae5c41af795d098d5b2</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Bilinear Analysis for Kernel Selection and
+<br/>Nonlinear Feature Extraction
+</td><td>('1718245', 'Shu Yang', 'shu yang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1720735', 'Chao Zhang', 'chao zhang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>ce5eac297174c17311ee28bda534faaa1d559bae</td><td>Automatic analysis of malaria infected red
+<br/>blood cell digitized microscope images
+<br/>A dissertation submitted in partial fulfilment
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>of
+<br/><b>University College London</b><br/>Department of Computer Science
+<br/><b>University College London</b><br/>Supervisor: Prof. Bernard F. Buxton
+<br/>February 2016
+</td><td>('2768033', 'Houari Abdallahi', 'houari abdallahi')</td><td></td></tr><tr><td>ce691a37060944c136d2795e10ed7ba751cd8394</td><td></td><td></td><td></td></tr><tr><td>ce3f3088d0c0bf236638014a299a28e492069753</td><td></td><td></td><td></td></tr><tr><td>ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6</td><td>UC San Diego
+<br/>UC San Diego Electronic Theses and Dissertations
+<br/>Title
+<br/>Inhibitions of ascorbate fatty acid derivatives on three rabbit muscle glycolytic enzymes
+<br/>Permalink
+<br/>https://escholarship.org/uc/item/8x33n1gj
+<br/>Author
+<br/>Pham, Duyen-Anh
+<br/>Publication Date
+<br/>2011-01-01
+<br/>Peer reviewed|Thesis/dissertation
+<br/>eScholarship.org
+<br/>Powered by the California Digital Library
+<br/><b>University of California</b></td><td></td><td></td></tr><tr><td>ce450e4849490924488664b44769b4ca57f1bc1a</td><td>Procedural Generation of Videos to Train Deep Action Recognition Networks
+<br/>1Computer Vision Group, NAVER LABS Europe, Meylan, France
+<br/>2Centre de Visi´o per Computador, Universitat Aut`onoma de Barcelona, Bellaterra, Spain
+<br/><b>Toyota Research Institute, Los Altos, CA, USA</b></td><td>('1799820', 'Adrien Gaidon', 'adrien gaidon')<br/>('3407519', 'Yohann Cabon', 'yohann cabon')</td><td>{cesar.desouza, yohann.cabon}@europe.naverlabs.com, adrien.gaidon@tri.global, antonio@cvc.uab.es
+</td></tr><tr><td>ceeb67bf53ffab1395c36f1141b516f893bada27</td><td>Face Alignment by Local Deep Descriptor Regression
+<br/><b>University of Maryland</b><br/><b>College Park, MD</b><br/><b>University of Maryland</b><br/><b>College Park, MD</b><br/><b>University of Maryland</b><br/><b>College Park, MD</b><br/><b>Rutgers University</b><br/>New Brunswick, NJ 08901
+</td><td>('40080979', 'Amit Kumar', 'amit kumar')<br/>('26988560', 'Rajeev Ranjan', 'rajeev ranjan')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')</td><td>akumar14@umd.edu
+<br/>rranjan1@umd.edu
+<br/>rama@umiacs.umd.edu
+<br/>vishal.m.patel@rutgers.edu
+</td></tr><tr><td>ce032dae834f383125cdd852e7c1bc793d4c3ba3</td><td>Motion Interchange Patterns for Action
+<br/>Recognition in Unconstrained Videos
+<br/><b>The Weizmann Institute of Science, Israel</b><br/><b>Tel-Aviv University, Israel</b><br/><b>The Open University, Israel</b></td><td>('3294355', 'Orit Kliper-Gross', 'orit kliper-gross')<br/>('2916582', 'Yaron Gurovich', 'yaron gurovich')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>ce9e1dfa7705623bb67df3a91052062a0a0ca456</td><td>Deep Feature Interpolation for Image Content Changes
+<br/>Kilian Weinberger1
+<br/><b>Cornell University</b><br/><b>George Washington University</b><br/>*Authors contributed equally
+</td><td>('3222840', 'Paul Upchurch', 'paul upchurch')<br/>('1791337', 'Kavita Bala', 'kavita bala')</td><td></td></tr><tr><td>ce9a61bcba6decba72f91497085807bface02daf</td><td>Eigen-Harmonics Faces: Face Recognition under Generic Lighting
+<br/>1Graduate School, CAS, Beijing, China, 100080
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+<br/>Emails: {lyqing, sgshan, wgao}jdl.ac.cn
+</td><td>('2343895', 'Laiyun Qing', 'laiyun qing')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1698902', 'Wen Gao', 'wen gao')</td><td></td></tr><tr><td>cef6cffd7ad15e7fa5632269ef154d32eaf057af</td><td>Emotion Detection Through Facial Feature
+<br/>Recognition
+<br/>through consistent
+</td><td>('4959365', 'James Pao', 'james pao')</td><td>jpao@stanford.edu
+</td></tr><tr><td>cebfafea92ed51b74a8d27c730efdacd65572c40</td><td>JANUARY 2006
+<br/>31
+<br/>Matching 2.5D Face Scans to 3D Models
+</td><td>('2637547', 'Xiaoguang Lu', 'xiaoguang lu')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')<br/>('2205218', 'Dirk Colbry', 'dirk colbry')</td><td></td></tr><tr><td>ce56be1acffda599dec6cc2af2b35600488846c9</td><td>Inferring Sentiment from Web Images with Joint Inference on Visual and Social
+<br/>Cues: A Regulated Matrix Factorization Approach
+<br/><b>Arizona State University, Tempe AZ</b><br/><b>IBM Almaden Research Center, San Jose CA</b></td><td>('33513248', 'Yilin Wang', 'yilin wang')</td><td>{ywang370,rao,baoxin.li}@asu.edu yuhenghu@us.ibm.com
+</td></tr><tr><td>ce54e891e956d5b502a834ad131616786897dc91</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Face Recognition Using LTP Algorithm
+<br/>1ECE & KUK
+<br/>2Assistant Professor (ECE)
+<br/>Volume 4 Issue 12, December 2015
+<br/>Licensed Under Creative Commons Attribution CC BY
+<br/>www.ijsr.net
+<br/> Variation in luminance: Third main challenge that
+<br/>appears in face recognition process is the luminance. Due
+<br/>to variation in the luminance the representation get varied
+<br/>from the original image. The person with same poses
+<br/>expression and seen from same viewpoint can be appear
+<br/>very different due to variation in lightening.
+</td><td>('1781253', 'Richa Sharma', 'richa sharma')<br/>('1887206', 'Rohit Arora', 'rohit arora')</td><td></td></tr><tr><td>ce6f459462ea9419ca5adcc549d1d10e616c0213</td><td>A Survey on Face Identification Methodologies in
+<br/>Videos
+<br/>Student, M.Tech CSE ,Department of Computer Science
+<br/><b>Engineering, G.H.Raisoni College of Engineering</b><br/>Technology for Women, Nagpur, Maharashtra, India.
+</td><td>('2776196', 'Deepti Yadav', 'deepti yadav')</td><td></td></tr><tr><td>ce933821661a0139a329e6c8243e335bfa1022b1</td><td>Temporal Modeling Approaches for Large-scale
+<br/>Youtube-8M Video Understanding
+<br/><b>Baidu IDL and Tsinghua University</b></td><td>('9921390', 'Fu Li', 'fu li')<br/>('2551285', 'Chuang Gan', 'chuang gan')<br/>('3025977', 'Xiao Liu', 'xiao liu')<br/>('38812373', 'Yunlong Bian', 'yunlong bian')<br/>('1716690', 'Xiang Long', 'xiang long')<br/>('2653177', 'Yandong Li', 'yandong li')<br/>('2027571', 'Zhichao Li', 'zhichao li')<br/>('1743129', 'Jie Zhou', 'jie zhou')<br/>('35247507', 'Shilei Wen', 'shilei wen')</td><td></td></tr><tr><td>e03bda45248b4169e2a20cb9124ae60440cad2de</td><td>Learning a Dictionary of Shape-Components in Visual Cortex:
+<br/>Comparison with Neurons, Humans and Machines
+<br/>by
+<br/>Ing´enieur de l’Ecole Nationale Sup´erieure
+<br/>des T´el´ecommunications de Bretagne, 2000
+<br/>and
+<br/>MS, Universit´e de Rennes, 2000
+<br/>Submitted to the Department of Brain and Cognitive Sciences
+<br/>in partial fulfillment of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY</b><br/>June 2006
+<br/><b>c(cid:13) Massachusetts Institute of Technology 2006. All rights reserved</b><br/>Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Department of Brain and Cognitive Sciences
+<br/>April 24, 2006
+<br/>Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Tomaso Poggio
+<br/>Eugene McDermott Professor in the Brain Sciences and Human Behavior
+<br/>Thesis Supervisor
+<br/>Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Matt Wilson
+<br/>Professor of Neurobiology and
+<br/>Chairman, Department Graduate Committee
+</td><td>('1981539', 'Thomas Serre', 'thomas serre')</td><td></td></tr><tr><td>e03e86ac61cfac9148b371d75ce81a55e8b332ca</td><td>Unsupervised Learning using Sequential
+<br/>Verification for Action Recognition
+<br/><b>cid:63)The Robotics Institute, Carnegie Mellon University</b><br/>†Facebook AI Research
+</td><td>('1806773', 'Ishan Misra', 'ishan misra')<br/>('1709305', 'Martial Hebert', 'martial hebert')<br/>('1699161', 'C. Lawrence Zitnick', 'c. lawrence zitnick')</td><td></td></tr><tr><td>e0dedb6fc4d370f4399bf7d67e234dc44deb4333</td><td>Supplementary Material: Multi-Task Video Captioning with Video and
+<br/>Entailment Generation
+<br/>UNC Chapel Hill
+<br/>1 Experimental Setup
+<br/>1.1 Datasets
+<br/>1.1.1 Video Captioning Datasets
+<br/>YouTube2Text or MSVD The Microsoft Re-
+<br/>search Video Description Corpus (MSVD) or
+<br/>YouTube2Text (Chen and Dolan, 2011) is used
+<br/>for our primary video captioning experiments. It
+<br/>has 1970 YouTube videos in the wild with many
+<br/>diverse captions in multiple languages for each
+<br/>video. Caption annotations to these videos are
+<br/>collected using Amazon Mechanical Turk (AMT).
+<br/>All our experiments use only English captions. On
+<br/>average, each video has 40 captions, and the over-
+<br/>all dataset has about 80, 000 unique video-caption
+<br/>pairs. The average clip duration is roughly 10 sec-
+<br/>onds. We used the standard split as stated in Venu-
+<br/>gopalan et al. (2015), i.e., 1200 videos for training,
+<br/>100 videos for validation, and 670 for testing.
+<br/>MSR-VTT MSR-VTT is a recent collection of
+<br/>10, 000 video clips of 41.2 hours duration (i.e.,
+<br/>average duration of 15 seconds), which are an-
+<br/>notated by AMT workers. It has 200, 000 video
+<br/>clip-sentence pairs covering diverse content from
+<br/>a commercial video search engine. On average,
+<br/>each clip is annotated with 20 natural language
+<br/>captions. We used the standard split as provided
+<br/>in (Xu et al., 2016), i.e., 6, 513 video clips for
+<br/>training, 497 for validation, and 2, 990 for testing.
+<br/>M-VAD M-VAD is a movie description dataset
+<br/>with 49, 000 video clips collected from 92 movies,
+<br/>with the average clip duration being 6 seconds.
+<br/>Alignment of descriptions to video clips is done
+<br/>through an automatic procedure using Descrip-
+<br/>tive Video Service (DVS) provided for the movies.
+<br/>Each video clip description has only 1 or 2 sen-
+<br/>tences, making most evaluation metrics (except
+<br/>paraphrase-based METEOR) infeasible. Again,
+<br/>we used the standard train/val/test split as pro-
+<br/>vided in Torabi et al. (2015).
+<br/>1.1.2 Video Prediction Dataset
+<br/>For our unsupervised video representation learn-
+<br/>ing task, we use the UCF-101 action videos
+<br/>dataset (Soomro et al., 2012), which contains
+<br/>13, 320 video clips of 101 action categories and
+<br/>with an average clip length of 7.21 seconds each.
+<br/>This dataset suits our video captioning task well
+<br/>because both contain short video clips of a sin-
+<br/>gle action or few actions, and hence using future
+<br/>frame prediction on UCF-101 helps learn more ro-
+<br/>bust and context-aware video representations for
+<br/>our short clip video captioning task. We use the
+<br/>standard split of 9, 500 videos for training (we
+<br/>don’t need any validation set in our setup because
+<br/>we directly tune on the validation set of the video
+<br/>captioning task).
+<br/>the
+<br/>three
+<br/>video
+<br/>captioning
+<br/>1.2 Pre-trained Visual Frame Features
+<br/>For
+<br/>datasets
+<br/>(Youtube2Text, MSR-VTT, M-VAD) and the
+<br/>unsupervised video prediction dataset (UCF-101),
+<br/>we fix our sampling rate to 3f ps to bring uni-
+<br/>formity in the temporal representation of actions
+<br/>across all videos. These sampled frames are then
+<br/>converted into features using several state-of-the-
+<br/>art pre-trained models on ImageNet (Deng et al.,
+<br/>2009) – VGGNet
+<br/>(Simonyan and Zisserman,
+<br/>2015), GoogLeNet (Szegedy et al., 2015; Ioffe
+<br/>and Szegedy, 2015), and Inception-v4 (Szegedy
+<br/>et al., 2016). For VGGNet, we use its f c7 layer
+<br/>features with dimension 4096. For GoogLeNet
+<br/>and Inception-v4, we use the layer before the fully
+<br/>connected layer with dimensions 1024 and 1536,
+<br/>respectively. We follow standard preprocessing
+<br/>and convert all the natural language descriptions
+<br/>to lower case and tokenize the sentences and
+<br/>remove punctuations.
+</td><td>('10721120', 'Ramakanth Pasunuru', 'ramakanth pasunuru')<br/>('7736730', 'Mohit Bansal', 'mohit bansal')</td><td>{ram, mbansal}@cs.unc.edu
+</td></tr><tr><td>e096b11b3988441c0995c13742ad188a80f2b461</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>DeepProposals: Hunting Objects and Actions by Cascading
+<br/>Deep Convolutional Layers
+<br/>Van Gool
+<br/>Received: date / Accepted: date
+</td><td>('3060081', 'Amir Ghodrati', 'amir ghodrati')</td><td></td></tr><tr><td>e0638e0628021712ac76e3472663ccc17bd8838c</td><td> VOL. 9, NO. 2, FEBRUARY 2014 ISSN 1819-6608
+<br/>ARPN Journal of Engineering and Applied Sciences
+<br/>©2006-2014 Asian Research Publishing Network (ARPN). All rights reserved.
+<br/>www.arpnjournals.com
+<br/>SIGN LANGUAGE RECOGNITION: STATE OF THE ART
+<br/><b>Sharda University, Greater Noida, India</b></td><td>('27105713', 'Ashok K Sahoo', 'ashok k sahoo')<br/>('40867787', 'Gouri Sankar Mishra', 'gouri sankar mishra')<br/>('3017041', 'Kiran Kumar Ravulakollu', 'kiran kumar ravulakollu')</td><td>E-Mail: ashoksahoo2000@yahoo.com
+</td></tr><tr><td>e0c081a007435e0c64e208e9918ca727e2c1c44e</td><td></td><td></td><td></td></tr><tr><td>e0d878cc095eaae220ad1f681b33d7d61eb5e425</td><td>Article
+<br/>Temporal and Fine-Grained Pedestrian Action
+<br/>Recognition on Driving Recorder Database
+<br/><b>National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan</b><br/><b>Keio University, Yokohama 223-8522, Japan</b><br/>Received: 5 January 2018; Accepted: 8 February 2018; Published: 20 February 2018
+</td><td>('1730200', 'Hirokatsu Kataoka', 'hirokatsu kataoka')<br/>('1732705', 'Yutaka Satoh', 'yutaka satoh')<br/>('1716469', 'Yoshimitsu Aoki', 'yoshimitsu aoki')<br/>('6881850', 'Shoko Oikawa', 'shoko oikawa')<br/>('1720770', 'Yasuhiro Matsui', 'yasuhiro matsui')</td><td>yu.satou@aist.go.jp
+<br/>aoki@elec.keio.ac.jp
+<br/>Tokyo Metropolitan University, Tokyo 192-0364, Japan; shoko_o@hotmail.com
+<br/>4 National Traffic Safety and Environment Laboratory, Tokyo 182-0012, Japan; ymatsui@ntsel.go.jp
+<br/>* Correspondence: hirokatsu.kataoka@aist.go.jp; Tel.: +81-29-861-2267
+</td></tr><tr><td>e00d4e4ba25fff3583b180db078ef962bf7d6824</td><td>Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 20 March 2017 doi:10.20944/preprints201703.0152.v1
+<br/>Article
+<br/>Face Verification with Multi-Task and Multi-Scale
+<br/>Features Fusion
+</td><td>('39198322', 'Xiaojun Lu', 'xiaojun lu')<br/>('39683642', 'Yue Yang', 'yue yang')<br/>('8030754', 'Weilin Zhang', 'weilin zhang')<br/>('36286794', 'Qi Wang', 'qi wang')<br/>('37622915', 'Yang Wang', 'yang wang')</td><td>1 College of Sciences, Northeastern University, Shenyang 110819, China; luxiaojun@mail.neu.edu.cn (X.L.);
+<br/>YangY1503@163.com (Y.Y.); wangy_neu@163.com (Y.W.)
+<br/>2 New York University Shanghai, 1555 Century Ave, Pudong, Shanghai 200122, China; wz723@nyu.edu
+<br/>* Correspondence: wangqimath@mail.neu.edu.cn; Tel.: +86-024-8368-7680
+</td></tr><tr><td>e01bb53b611c679141494f3ffe6f0b91953af658</td><td>FSRNet: End-to-End Learning Face Super-Resolution with Facial Priors
+<br/><b>Nanjing University of Science and Technology</b><br/>2Youtu Lab, Tencent
+<br/><b>Michigan State University</b><br/><b>University of Adelaide</b><br/>Figure 1: Visual results of different super-resolution methods on scale factor 8.
+</td><td>('50579509', 'Yu Chen', 'yu chen')<br/>('49499405', 'Jian Yang', 'jian yang')</td><td></td></tr><tr><td>e0bfcf965b402f3f209f26ae20ee88bc4d0002ab</td><td>AI Thinking for Cloud Education Platform with Personalized Learning
+<br/><b>University of Texas at San Antonio</b><br/><b>University of Texas at San Antonio</b><br/><b>University of Texas at San Antonio</b><br/><b>University of Texas at San Antonio</b><br/><b>University of Texas at San Antonio</b></td><td>('2055316', 'Paul Rad', 'paul rad')<br/>('2918902', 'Mehdi Roopaei', 'mehdi roopaei')<br/>('1716725', 'Nicole Beebe', 'nicole beebe')<br/>('9324267', 'Mehdi Shadaram', 'mehdi shadaram')<br/>('1839489', 'Yoris A. Au', 'yoris a. au')</td><td>Paul.rad@utsa.edu
+<br/> Mehdi.roopaei@utsa.edu
+<br/>Nicole.beebe@utsa.edu
+<br/>Mehdi.shadaram@utsa.edu
+<br/>Yoris.au@utsa.edu
+</td></tr><tr><td>e0939b4518a5ad649ba04194f74f3413c793f28e</td><td>Technical Report
+<br/>UCAM-CL-TR-636
+<br/>ISSN 1476-2986
+<br/>Number 636
+<br/>Computer Laboratory
+<br/>Mind-reading machines:
+<br/>automated inference
+<br/>of complex mental states
+<br/>July 2005
+<br/>15 JJ Thomson Avenue
+<br/>Cambridge CB3 0FD
+<br/>United Kingdom
+<br/>phone +44 1223 763500
+<br/>http://www.cl.cam.ac.uk/
+</td><td></td><td></td></tr><tr><td>e0ed0e2d189ff73701ec72e167d44df4eb6e864d</td><td>Recognition of static and dynamic facial expressions: a study review
+<br/>Estudos de Psicologia, 18(1), janeiro-março/2013, 125-130
+<br/><b>Federal University of Para ba</b></td><td>('39169435', 'Nelson Torro Alves', 'nelson torro alves')</td><td></td></tr><tr><td>e00d391d7943561f5c7b772ab68e2bb6a85e64c4</td><td>Robust continuous clustering
+<br/><b>University of Maryland, College Park, MD 20740; and bIntel Labs, Santa Clara, CA</b><br/><b>Edited by David L. Donoho, Stanford University, Stanford, CA, and approved August 7, 2017 (received for review January</b><br/>Clustering is a fundamental procedure in the analysis of scientific
+<br/>data. It is used ubiquitously across the sciences. Despite decades
+<br/>of research, existing clustering algorithms have limited effective-
+<br/>ness in high dimensions and often require tuning parameters for
+<br/>different domains and datasets. We present a clustering algo-
+<br/>rithm that achieves high accuracy across multiple domains and
+<br/>scales efficiently to high dimensions and large datasets. The pre-
+<br/>sented algorithm optimizes a smooth continuous objective, which
+<br/>is based on robust statistics and allows heavily mixed clusters to
+<br/>be untangled. The continuous nature of the objective also allows
+<br/>clustering to be integrated as a module in end-to-end feature
+<br/>learning pipelines. We demonstrate this by extending the algo-
+<br/>rithm to perform joint clustering and dimensionality reduction
+<br/>by efficiently optimizing a continuous global objective. The pre-
+<br/>sented approach is evaluated on large datasets of faces, hand-
+<br/>written digits, objects, newswire articles, sensor readings from
+<br/>the Space Shuttle, and protein expression levels. Our method
+<br/>achieves high accuracy across all datasets, outperforming the best
+<br/>prior algorithm by a factor of 3 in average rank.
+<br/>clustering | data analysis | unsupervised learning
+<br/>Clustering is one of the fundamental experimental procedures
+<br/>in data analysis. It is used in virtually all natural and social
+<br/>sciences and has played a central role in biology, astronomy,
+<br/>psychology, medicine, and chemistry. Data-clustering algorithms
+<br/>have been developed for more than half a century (1). Significant
+<br/>advances in the last two decades include spectral clustering (2–4),
+<br/>generalizations of classic center-based methods (5, 6), mixture
+<br/>models (7, 8), mean shift (9), affinity propagation (10), subspace
+<br/>clustering (11–13), nonparametric methods (14, 15), and feature
+<br/>selection (16–20).
+<br/>Despite these developments, no single algorithm has emerged
+<br/>to displace the k-means scheme and its variants (21). This
+<br/>is despite the known drawbacks of such center-based meth-
+<br/><b>ods, including sensitivity to initialization, limited effectiveness in</b><br/>high-dimensional spaces, and the requirement that the number
+<br/>of clusters be set in advance. The endurance of these methods
+<br/>is in part due to their simplicity and in part due to difficulties
+<br/>associated with some of the new techniques, such as additional
+<br/>hyperparameters that need to be tuned, high computational cost,
+<br/>and varying effectiveness across domains. Consequently, scien-
+<br/>tists who analyze large high-dimensional datasets with unknown
+<br/>distribution must maintain and apply multiple different cluster-
+<br/>ing algorithms in the hope that one will succeed. Books have
+<br/>been written to guide practitioners through the landscape of
+<br/>data-clustering techniques (22).
+<br/>We present a clustering algorithm that is fast, easy to use, and
+<br/>effective in high dimensions. The algorithm optimizes a clear
+<br/>continuous objective, using standard numerical methods that
+<br/>scale to massive datasets. The number of clusters need not be
+<br/>known in advance.
+<br/>The operation of the algorithm can be understood by contrast-
+<br/>ing it with other popular clustering techniques. In center-based
+<br/>algorithms such as k-means (1, 24), a small set of putative cluster
+<br/>centers is initialized from the data and then iteratively refined. In
+<br/>affinity propagation (10), data points communicate over a graph
+<br/>structure to elect a subset of the points as representatives. In the
+<br/>presented algorithm, each data point has a dedicated representa-
+<br/>tive, initially located at the data point. Over the course of the algo-
+<br/>rithm, the representatives move and coalesce into easily separable
+<br/>clusters. The progress of the algorithm is visualized in Fig. 1.
+<br/>Our formulation is based on recent convex relaxations for clus-
+<br/>tering (25, 26). However, our objective is deliberately not convex.
+<br/>We use redescending robust estimators that allow even heavily
+<br/>mixed clusters to be untangled by optimizing a single contin-
+<br/>uous objective. Despite the nonconvexity of the objective, the
+<br/>optimization can still be performed using standard linear least-
+<br/>squares solvers, which are highly efficient and scalable. Since the
+<br/>algorithm expresses clustering as optimization of a continuous
+<br/>objective based on robust estimation, we call it robust continu-
+<br/>ous clustering (RCC).
+<br/>One of the characteristics of the presented formulation is that
+<br/>clustering is reduced to optimization of a continuous objective.
+<br/>This enables the integration of clustering in end-to-end fea-
+<br/>ture learning pipelines. We demonstrate this by extending RCC
+<br/>to perform joint clustering and dimensionality reduction. The
+<br/>extended algorithm, called RCC-DR, learns an embedding of
+<br/>the data into a low-dimensional space in which it is clustered.
+<br/>Embedding and clustering are performed jointly, by an algorithm
+<br/>that optimizes a clear global objective.
+<br/>We evaluate RCC and RCC-DR on a large number of datasets
+<br/>from a variety of domains. These include image datasets, docu-
+<br/>ment datasets, a dataset of sensor readings from the Space Shut-
+<br/>tle, and a dataset of protein expression levels in mice. Exper-
+<br/>iments demonstrate that our method significantly outperforms
+<br/>prior state-of-the-art techniques. RCC-DR is particularly robust
+<br/>across datasets from different domains, outperforming the best
+<br/>prior algorithm by a factor of 3 in average rank.
+<br/>Formulation
+<br/>We consider the problem of clustering a set of n data points.
+<br/>The input is denoted by X = [x1, x2, . . . , xn ], where xi ∈ RD.
+<br/>Our approach operates on a set of representatives U =
+<br/>[u1, u2, . . . , un ], where ui ∈ RD. The representatives U are ini-
+<br/>tialized at the corresponding data points X. The optimization
+<br/>operates on the representation U, which coalesces to reveal the
+<br/>cluster structure latent in the data. Thus, the number of clusters
+<br/>Significance
+<br/>Clustering is a fundamental experimental procedure in data
+<br/>analysis. It is used in virtually all natural and social sciences
+<br/>and has played a central role in biology, astronomy, psychol-
+<br/>ogy, medicine, and chemistry. Despite the importance and
+<br/>ubiquity of clustering, existing algorithms suffer from a vari-
+<br/>ety of drawbacks and no universal solution has emerged. We
+<br/>present a clustering algorithm that reliably achieves high accu-
+<br/>racy across domains, handles high data dimensionality, and
+<br/>scales to large datasets. The algorithm optimizes a smooth
+<br/>global objective, using efficient numerical methods. Experi-
+<br/>ments demonstrate that our method outperforms state-of-
+<br/>the-art clustering algorithms by significant factors in multiple
+<br/>domains.
+<br/>Author contributions: S.A.S. and V.K. designed research, performed research, analyzed
+<br/>data, and wrote the paper.
+<br/>The authors declare no conflict of interest.
+<br/>This article is a PNAS Direct Submission.
+<br/>Freely available online through the PNAS open access option.
+<br/>This article contains supporting information online at www.pnas.org/lookup/suppl/doi:10.
+<br/>1073/pnas.1700770114/-/DCSupplemental.
+<br/>9814–9819 | PNAS | September 12, 2017 | vol. 114 | no. 37
+<br/>www.pnas.org/cgi/doi/10.1073/pnas.1700770114
+</td><td>('49485254', 'Sohil Atul Shah', 'sohil atul shah')<br/>('1770944', 'Vladlen Koltun', 'vladlen koltun')</td><td>1To whom correspondence should be addressed. Email: sohilas@umd.edu.
+</td></tr><tr><td>e0765de5cabe7e287582532456d7f4815acd74c1</td><td></td><td></td><td></td></tr><tr><td>e065a2cb4534492ccf46d0afc81b9ad8b420c5ec</td><td>SFace: An Efficient Network for Face Detection
+<br/>in Large Scale Variations
+<br/><b>College of Software, Beihang University</b><br/>Megvii Inc. (Face++)†
+</td><td>('38504661', 'Jianfeng Wang', 'jianfeng wang')<br/>('48009795', 'Ye Yuan', 'ye yuan')<br/>('2789329', 'Boxun Li', 'boxun li')<br/>('2352391', 'Gang Yu', 'gang yu')<br/>('2017810', 'Sun Jian', 'sun jian')</td><td>{wjfwzzc}@buaa.edu.cn, {yuanye,liboxun,yugang,sunjian}@megvii.com
+</td></tr><tr><td>e00241f00fb31c660df6c6f129ca38370e6eadb3</td><td>What have we learned from deep representations for action recognition?
+<br/>TU Graz
+<br/>TU Graz
+<br/><b>York University, Toronto</b><br/><b>University of Oxford</b></td><td>('2322150', 'Christoph Feichtenhofer', 'christoph feichtenhofer')<br/>('1718587', 'Axel Pinz', 'axel pinz')<br/>('1709096', 'Richard P. Wildes', 'richard p. wildes')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td>feichtenhofer@tugraz.at
+<br/>axel.pinz@tugraz.at
+<br/>wildes@cse.yorku.ca
+<br/>az@robots.ox.ac.uk
+</td></tr><tr><td>e013c650c7c6b480a1b692bedb663947cd9d260f</td><td>860
+<br/>Robust Image Analysis With Sparse Representation
+<br/>on Quantized Visual Features
+</td><td>('8180253', 'Bing-Kun Bao', 'bing-kun bao')<br/>('36601906', 'Guangyu Zhu', 'guangyu zhu')<br/>('38203359', 'Jialie Shen', 'jialie shen')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>e0244a8356b57a5721c101ead351924bcfb2eef4</td><td>Journal of Experimental Psychology: General
+<br/>2017, Vol. 146, No. 10, 1379 –1401
+<br/>0096-3445/17/$12.00
+<br/>© 2017 American Psychological Association
+<br/>http://dx.doi.org/10.1037/xge0000292
+<br/>Power as an Emotional Liability: Implications for Perceived Authenticity
+<br/>and Trust After a Transgression
+<br/><b>University of Southern California</b><br/><b>Webster University</b><br/><b>University of Haifa</b><br/>Alexandra Mislin
+<br/><b>American University</b><br/><b>University of Washington, Seattle</b><br/>Gerben A. van Kleef
+<br/><b>University of Amsterdam</b><br/>People may express a variety of emotions after committing a transgression. Through 6 empirical studies and
+<br/>a meta-analysis, we investigate how the perceived authenticity of such emotional displays and resulting levels
+<br/>of trust are shaped by the transgressor’s power. Past findings suggest that individuals with power tend to be
+<br/>more authentic because they have more freedom to act on the basis of their own personal inclinations. Yet,
+<br/>our findings reveal that (a) a transgressor’s display of emotion is perceived to be less authentic when that
+<br/>party’s power is high rather than low; (b) this perception of emotional authenticity, in turn, directly influences
+<br/>(and mediates) the level of trust in that party; and (c) perceivers ultimately exert less effort when asked to make
+<br/>a case for leniency toward high rather than low-power transgressors. This tendency to discount the emotional
+<br/>authenticity of the powerful was found to arise from power increasing the transgressor’s perceived level of
+<br/>emotional control and strategic motivation, rather than a host of alternative mechanisms. These results were
+<br/>also found across different types of emotions (sadness, anger, fear, happiness, and neutral), expressive
+<br/>modalities, operationalizations of the transgression, and participant populations. Altogether, our findings
+<br/>demonstrate that besides the wealth of benefits power can afford, it also comes with a notable downside. The
+<br/>findings, furthermore, extend past research on perceived emotional authenticity, which has focused on how
+<br/>and when specific emotions are expressed, by revealing how this perception can depend on considerations that
+<br/>have nothing to do with the expression itself.
+<br/>Keywords: trust, emotion, power, authenticity, perception
+<br/>Supplemental materials: http://dx.doi.org/10.1037/xge0000292.supp
+<br/>Research suggests that those who attain positions of power tend
+<br/>to be more emotionally skilled (Côté, Lopes, Salovey, & Miners,
+<br/>2010; George, 2000). Indeed, it is the very possession of such
+<br/>skills that has been suggested to help these parties attain and
+<br/>succeed in leadership positions (e.g., Lewis, 2000; Rubin, Munz,
+<br/><b>School of Business, University of Southern California; Alexandra Mislin</b><br/>Department of Management, Kogod School of Business, American Uni-
+<br/><b>chael G. Foster School of Business, University of Washington, Seattle</b><br/><b>A. van Kleef, University of Amsterdam</b><br/>This research was supported in part by a faculty research grant from
+<br/><b>Webster University</b><br/>Correspondence concerning this article should be addressed to Peter H.
+<br/>Kim, Marshall School of Business, Department of Management and Or-
+<br/><b>ganization, University of Southern California, Hoffman Hall 515, Los</b><br/>1379
+<br/>& Bommer, 2005). Yet, this tendency for the powerful to be
+<br/>emotionally skilled may not necessarily prove beneficial, to the
+<br/>extent that those evaluating such powerful individuals subscribe to
+<br/>this notion as well, and may even undermine the effectiveness of
+<br/>high-power parties’ emotional expressions when they might need
+<br/>them most. In particular, through six empirical studies and a
+<br/>meta-analysis, we investigate the possibility that perceivers’ gen-
+<br/>eral beliefs about the powerful as emotionally skilled would lead
+<br/>perceivers to discount the authenticity of the emotions the power-
+<br/>ful express, and that this would ultimately impair the effectiveness
+<br/>of those emotional displays for addressing a transgression.
+<br/>Theoretical Background
+<br/>Power, which has been defined as an individual’s capacity to
+<br/>modify others’ states by providing or withholding resources or
+<br/>administering punishments (Keltner, Gruenfeld, & Anderson,
+<br/>2003), has been widely recognized to offer numerous benefits to
+<br/><b>those who possess it, including the ability to act based on one s</b><br/>own inclinations, perceive greater choice, and obtain greater ben-
+<br/>efits from both work and nonwork interactions (e.g., Galinsky,
+</td><td>('34770901', 'Peter H. Kim', 'peter h. kim')<br/>('47847686', 'Ece Tuncel', 'ece tuncel')<br/>('3198839', 'Arik Cheshin', 'arik cheshin')<br/>('50222018', 'Ryan Fehr', 'ryan fehr')<br/>('34770901', 'Peter H. Kim', 'peter h. kim')<br/>('47847686', 'Ece Tuncel', 'ece tuncel')<br/>('50222018', 'Ryan Fehr', 'ryan fehr')<br/>('3198839', 'Arik Cheshin', 'arik cheshin')</td><td>Angeles, CA 90089-1421. E-mail: kimpeter@usc.edu
+</td></tr><tr><td>e0dc6f1b740479098c1d397a7bc0962991b5e294</td><td>快速人脸检测技术综述
+<br/>李月敏 1 陈杰 2 高文 1,2,3 尹宝才 1
+<br/>1(北京工业大学计算机学院多媒体与智能软件技术实验室 北京 100022)
+<br/>2(哈尔滨工业大学计算机科学与技术学院 哈尔滨 150001)
+<br/>3(中国科学院计算技术研究所先进人机通信技术联合实验室 北京 100080)
+<br/>摘 要 人脸检测问题研究具有很重要的意义,可以应用到人脸识别、新一代的人机界
+<br/>面、安全访问和视觉监控以及基于内容的检索等领域,近年来受到研究者的普遍重视。人脸
+<br/>检测要走向实际应用,精度和速度是亟需解决的两个关键问题。经过 20 世纪 90 年代以来十
+<br/>多年的发展,人脸检测的精度得到了大幅度的提高,但是速度却一直是阻挠人脸检测走向实
+<br/>用的绊脚石。为此研究者们也作了艰辛的努力。直到 21 世纪 Viola 基于 AdaBoost 算法的人
+<br/>脸检测器的发表,人脸检测的速度才得到了实质性的提高。该算法的发表也促进了人脸检测
+<br/>研究的进一步蓬勃发展,在这方面先后涌现出了一批优秀的文献。基于此,本文在系统地整
+<br/>理分析了人脸检测领域内的相关文献之后,从速度的角度将人脸检测的各种算法大致划分为
+<br/>初始期,发展期,转折点和综合期等四类,并在此基础上进行了全新的总结和论述,最后给
+<br/>出了人脸检测研究的一些可能的发展方向。
+<br/>关键词 人脸检测,速度,人脸识别,模式识别,Boosting
+<br/>图法分类号:TP391.4
+<br/>Face Detection: a Survey
+<br/>1(Multimedia and Intelligent Software Technology Laboratory
+<br/><b>Beijing University of Technology, Beijing 100022, China</b><br/><b>School of Computer Science and Technology, Harbin Institute of</b><br/>Technology, Harbin, 150001, China)
+<br/><b>Institute of Computing Technology, Chinese Academy of Sciences</b><br/>Beijing, 100080, China)
+</td><td>('7771395', 'Yuemin Li', 'yuemin li')<br/>('1714354', 'Baocai Yin', 'baocai yin')</td><td>ymli@jdl.ac.cn, chenjie@jdl.ac.cn,
+<br/>wgao@jdl.ac.cn, ybc@bjut.edu.cn
+</td></tr><tr><td>468c8f09d2ad8b558b65d11ec5ad49208c4da2f2</td><td>MSR-CNN: Applying Motion Salient Region Based
+<br/>Descriptors for Action Recognition
+<br/>School of Computing, Informatics,
+<br/>Decision System Engineering
+<br/><b>Arizona State University</b><br/>Tempe, USA
+<br/>Intel Corp.
+<br/>Tempe, USA
+<br/>School of Computing, Informatics,
+<br/>Decision System Engineering
+<br/><b>Arizona State University</b><br/>Tempe, USA
+</td><td>('3334478', 'Zhigang Tu', 'zhigang tu')<br/>('4244188', 'Jun Cao', 'jun cao')<br/>('2180892', 'Yikang Li', 'yikang li')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td>Email: Zhigang.Tu@asu.edu
+<br/>Email: jun.cao@intel.com
+<br/>Email: YikangLi,Baoxin.Li@asu.edu
+</td></tr><tr><td>46a4551a6d53a3cd10474ef3945f546f45ef76ee</td><td>2014 IEEE Intelligent Vehicles Symposium (IV)
+<br/>June 8-11, 2014. Dearborn, Michigan, USA
+<br/>978-1-4799-3637-3/14/$31.00 ©2014 IEEE
+<br/>344
+</td><td></td><td></td></tr><tr><td>4686bdcee01520ed6a769943f112b2471e436208</td><td>Utsumi et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:11
+<br/>DOI 10.1186/s41074-017-0024-5
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>EXPRESS PAPER
+<br/>Open Access
+<br/>Fast search based on generalized
+<br/>similarity measure
+</td><td>('40142989', 'Yuzuko Utsumi', 'yuzuko utsumi')<br/>('4629425', 'Tomoya Mizuno', 'tomoya mizuno')<br/>('35613969', 'Masakazu Iwamura', 'masakazu iwamura')<br/>('3277321', 'Koichi Kise', 'koichi kise')</td><td></td></tr><tr><td>4688787d064e59023a304f7c9af950d192ddd33e</td><td>Investigating the Discriminative Power of Keystroke
+<br/>Sound
+<br/>and Dimitris Metaxas, Member, IEEE
+</td><td>('38993748', 'Joseph Roth', 'joseph roth')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td></td></tr><tr><td>466184b10fb7ce9857e6b5bd6b4e5003e09a0b16</td><td>Extended Grassmann Kernels for
+<br/>Subspace-Based Learning
+<br/>GRASP Laboratory
+<br/><b>University of Pennsylvania</b><br/>Philadelphia, PA 19104
+<br/>GRASP Laboratory
+<br/><b>University of Pennsylvania</b><br/>Philadelphia, PA 19104
+</td><td>('2720935', 'Jihun Ham', 'jihun ham')<br/>('1732066', 'Daniel D. Lee', 'daniel d. lee')</td><td>jhham@seas.upenn.edu
+<br/>ddlee@seas.upenn.edu
+</td></tr><tr><td>46e86cdb674440f61b6658ef3e84fea95ea51fb4</td><td></td><td></td><td></td></tr><tr><td>46f2611dc4a9302e0ac00a79456fa162461a8c80</td><td>for Action Classification
+<br/><b>ESAT-PSI, KU Leuven, 2CV:HCI, KIT, Karlsruhe, 3University of Bonn, 4Sensifai</b></td><td>('3310120', 'Ali Diba', 'ali diba')<br/>('3169187', 'Mohsen Fayyaz', 'mohsen fayyaz')<br/>('50633941', 'Vivek Sharma', 'vivek sharma')<br/>('2946643', 'Juergen Gall', 'juergen gall')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>1{firstname.lastname}@kuleuven.be, 2{firstname.lastname}@kit.edu,
+<br/>3{lastname}@iai.uni-bonn.de, 4{firstname.lastname}@sensifai.com
+</td></tr><tr><td>46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4</td><td>3D FACIAL GEOMETRIC FEATURES FOR CONSTRAINED LOCAL MODEL
+<br/><b>cid:2) Imperial College London, United Kingdom</b><br/><b>University of Twente, EEMCS, Netherlands</b></td><td>('1694605', 'Maja Pantic', 'maja pantic')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('3183108', 'Akshay Asthana', 'akshay asthana')<br/>('1902288', 'Shiyang Cheng', 'shiyang cheng')</td><td>{shiyang.cheng11, s.zafeiriou, a.asthana, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>46ae4d593d89b72e1a479a91806c39095cd96615</td><td>A CONDITIONAL RANDOM FIELD APPROACH FOR FACE IDENTIFICATION IN
+<br/>BROADCAST NEWS USING OVERLAID TEXT
+<br/>(1,2)Gay Paul, 1Khoury Elie, 2Meignier Sylvain, 1Odobez Jean-Marc, 2Deleglise Paul
+<br/><b>Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France</b></td><td></td><td></td></tr><tr><td>467b602a67cfd7c347fe7ce74c02b38c4bb1f332</td><td>Large Margin Local Metric Learning
+<br/><b>University College London, London, UK</b><br/>2 Safran Morpho, Issy-les-Moulineaux, France
+<br/><b>University of Exceter, Exceter, UK</b></td><td>('38954213', 'Yiming Ying', 'yiming ying')<br/>('1704699', 'Massimiliano Pontil', 'massimiliano pontil')</td><td>m.pontil@cs.ucl.ac.uk
+<br/>{julien.bohne,stephane.gentric}@morpho.com
+<br/>y.ying@exeter.ac.uk
+</td></tr><tr><td>466f80b066215e85da63e6f30e276f1a9d7c843b</td><td>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>Joint Head Pose Estimation and Face Alignment Framework
+<br/>Using Global and Local CNN Features
+<br/>Computational Biomedicine Lab
+<br/><b>University of Houston, Houston, TX, USA</b></td><td>('5084124', 'Xiang Xu', 'xiang xu')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>{xxu18, ikakadia}@central.uh.edu
+</td></tr><tr><td>464de30d3310123644ab81a1f0adc51598586fd2</td><td></td><td></td><td></td></tr><tr><td>466a5add15bb5f91e0cfd29a55f5fb159a7980e5</td><td>Video Repeat Recognition and Mining by Visual
+<br/>Features
+</td><td>('4052001', 'Xianfeng Yang', 'xianfeng yang')</td><td></td></tr><tr><td>46f3b113838e4680caa5fc8bda6e9ae0d35a038c</td><td>Cancers 2010, 2, 262-273; doi:10.3390/cancers2020262
+<br/>OPEN ACCESS
+<br/>cancers
+<br/>ISSN 2072-6694
+<br/>www.mdpi.com/journal/cancers
+<br/>Review
+<br/>Automated Dermoscopy Image Analysis of Pigmented Skin
+<br/>Lesions
+<br/><b>Section of Pathology, Second University of Naples, Via L. Armanni</b><br/>5, 80138 Naples, Italy
+<br/>3 ACS, Advanced Computer Systems, Via della Bufalotta 378, 00139 Rome, Italy
+<br/>Fax: +390815569693.
+<br/>Received: 23 February 2010; in revised form: 15 March 2010 / Accepted: 25 March 2010 /
+<br/>Published: 26 March 2010
+</td><td>('32152948', 'Alfonso Baldi', 'alfonso baldi')<br/>('1705562', 'Marco Quartulli', 'marco quartulli')<br/>('3899127', 'Raffaele Murace', 'raffaele murace')<br/>('5703272', 'Emanuele Dragonetti', 'emanuele dragonetti')<br/>('38220535', 'Mario Manganaro', 'mario manganaro')<br/>('2237329', 'Oscar Guerra', 'oscar guerra')<br/>('4108084', 'Stefano Bizzi', 'stefano bizzi')</td><td>2 Futura-onlus, Via Pordenone 2, 00182 Rome, Italy; E-Mail: raffaele@murace.it
+<br/>* Author to whom correspondence should be addressed; E-Mail: alfonsobaldi@tiscali.it;
+</td></tr><tr><td>465d5bb11912005f0a4f0569c6524981df18a7de</td><td>IMOTION – Searching for Video Sequences
+<br/>using Multi-Shot Sketch Queries
+<br/>Metin Sezgin3, Ozan Can Altıok3, and Yusuf Sahillio˘glu3
+<br/>1 Databases and Information Systems Research Group,
+<br/><b>University of Basel, Switzerland</b><br/><b>Research Center in Information Technologies, Universit e de Mons, Belgium</b><br/><b>Intelligent User Interfaces Lab, Ko c University, Turkey</b></td><td>('27401642', 'Luca Rossetto', 'luca rossetto')<br/>('2155883', 'Ivan Giangreco', 'ivan giangreco')<br/>('34588610', 'Silvan Heller', 'silvan heller')<br/>('1806643', 'Heiko Schuldt', 'heiko schuldt')<br/>('3272087', 'Omar Seddati', 'omar seddati')</td><td>{luca.rossetto|ivan.giangreco|c.tanase|silvan.heller|heiko.schuldt}@unibas.ch
+<br/>{stephane.dupont|omar.seddati}@umons.ac.be
+<br/>{mtsezgin|oaltiok15|ysahillioglu}@ku.edu.tr
+</td></tr><tr><td>46c87fded035c97f35bb991fdec45634d15f9df2</td><td>Spatial-Aware Object Embeddings for Zero-Shot Localization
+<br/>and Classification of Actions
+<br/><b>University of Amsterdam</b></td><td>('2606260', 'Pascal Mettes', 'pascal mettes')</td><td></td></tr><tr><td>46e72046a9bb2d4982d60bcf5c63dbc622717f0f</td><td>Learning Discriminative Features with Class Encoder
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/><b>University of Chinese Academy of Science</b></td><td>('1704812', 'Hailin Shi', 'hailin shi')<br/>('8362374', 'Xiangyu Zhu', 'xiangyu zhu')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{hailin.shi, xiangyu.zhu, zlei, scliao, szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>46f32991ebb6235509a6d297928947a8c483f29e</td><td>In Proc. IEEE Computer Vision and Pattern Recognition (CVPR), Madison (WI), June 2003
+<br/>Recognizing Expression Variant Faces
+<br/>from a Single Sample Image per Class
+<br/>Aleix M. Mart(cid:19)(cid:16)nez
+<br/>Department of Electrical Engineering
+<br/><b>The Ohio State University, OH</b></td><td></td><td>aleix@ee.eng.ohio-state.edu
+</td></tr><tr><td>46538b0d841654a0934e4c75ccd659f6c5309b72</td><td>Signal & Image Processing : An International Journal (SIPIJ) Vol.5, No.1, February 2014
+<br/>A NOVEL APPROACH TO GENERATE FACE
+<br/>BIOMETRIC TEMPLATE USING BINARY
+<br/>DISCRIMINATING ANALYSIS
+<br/>1P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+<br/>2Associate Professor, Department of Computer Engineering,
+<br/>MCERC, Nashik (M.S.), India
+</td><td>('40075681', 'Shraddha S. Shinde', 'shraddha s. shinde')<br/>('2590072', 'Anagha P. Khedkar', 'anagha p. khedkar')</td><td></td></tr><tr><td>4641986af5fc8836b2c883ea1a65278d58fe4577</td><td>Scene Graph Generation by Iterative Message Passing
+<br/><b>Stanford University</b><br/><b>Stanford University</b></td><td>('2068265', 'Danfei Xu', 'danfei xu')</td><td>{danfei, yukez, chrischoy, feifeili}@cs.stanford.edu
+</td></tr><tr><td>464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a</td><td>Deep Adaptive Temporal Pooling for Activity Recognition
+<br/><b>Singapore University of Technology and Design</b><br/><b>Singapore University of Technology and Design</b><br/>Singapore, Singapore
+<br/>Singapore, Singapore
+<br/><b>Institute for Infocomm Research</b><br/>Singapore, Singapore
+<br/><b>Keele University</b><br/>Keele, Staffordshire, United Kingdom
+</td><td>('1729827', 'Ngai-Man Cheung', 'ngai-man cheung')<br/>('2527741', 'Sibo Song', 'sibo song')<br/>('1802086', 'Vijay Chandrasekhar', 'vijay chandrasekhar')<br/>('1709001', 'Bappaditya Mandal', 'bappaditya mandal')</td><td>ngaiman_cheung@sutd.edu.sg
+<br/>sibo_song@mymail.sutd.edu.sg
+<br/>vijay@i2r.a-star.edu.sg
+<br/>b.mandal@keele.ac.uk
+</td></tr><tr><td>469ee1b00f7bbfe17c698ccded6f48be398f2a44</td><td>MIT International Journal of Computer Science and Information Technology, Vol. 4, No. 2, August 2014, pp. 82-88
+<br/>ISSN 2230-7621©MIT Publications
+<br/>82
+<br/>SURVEy: Techniques for
+<br/>Aging Problems in Face Recognition
+<br/>Aashmi
+<br/>Scholar, Computer Science Engg. Dept.
+<br/><b>Moradabad Institute of Technology</b><br/>Scholar, Computer Science Engg. Dept.
+<br/><b>Moradabad Institute of Technology</b><br/>Scholar, Computer Science Engg. Dept.
+<br/><b>Moradabad Institute of Technology</b><br/>Moradabad, U.P., INDIA
+<br/>Moradabad, U.P., INDIA
+<br/>Moradabad, U.P., INDIA
+</td><td>('40062749', 'Sakshi Sahni', 'sakshi sahni')<br/>('9186211', 'Sakshi Saxena', 'sakshi saxena')</td><td>E-mail: aashmichaudhary@gmail.com
+<br/>E-mail: sakshisahni92@gmail.com
+<br/>E-mail: saxena.sakshi2511992@gmail.com
+</td></tr><tr><td>46196735a201185db3a6d8f6e473baf05ba7b68f</td><td></td><td></td><td></td></tr><tr><td>4682fee7dc045aea7177d7f3bfe344aabf153bd5</td><td>Tabula Rasa: Model Transfer for
+<br/>Object Category Detection
+<br/>Department of Engineering Science
+<br/>Oxford
+<br/>(Presented by Elad Liebman)
+</td><td>('3152281', 'Yusuf Aytar', 'yusuf aytar')</td><td></td></tr><tr><td>4657d87aebd652a5920ed255dca993353575f441</td><td>Image Normalization for
+<br/>Illumination Compensation in Facial Images
+<br/>by
+<br/>Department of Electrical & Computer Engineering
+<br/>& Center for Intelligent Machines
+<br/><b>McGill University, Montreal, Canada</b><br/>August 2004
+</td><td>('3631473', 'Martin D. Levine', 'martin d. levine')<br/>('35712223', 'Jisnu Bhattacharyya', 'jisnu bhattacharyya')</td><td></td></tr><tr><td>4622b82a8aff4ac1e87b01d2708a333380b5913b</td><td>Multi-label CNN Based Pedestrian Attribute Learning for Soft Biometrics
+<br/>Center for Biometrics and Security Research,
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b><br/>95 Zhongguancun Donglu, Beijing 100190, China
+</td><td>('1739258', 'Jianqing Zhu', 'jianqing zhu')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>jianqingzhu@foxmail.com, {scliao, dyi, zlei, szli}@nlpr.ia.ac.cn
+</td></tr><tr><td>46e866f58419ff4259c65e8256c1d4f14927b2c6</td><td>On the Generalization Power of Face and Gait Gender
+<br/>Recognition Methods
+<br/><b>University of Warwick</b><br/>Gibbet Hill Road, Coventry, CV4 7AL, UK
+</td><td>('1735787', 'Yu Guan', 'yu guan')<br/>('1799504', 'Chang-Tsun Li', 'chang-tsun li')</td><td>{g.yu, x.wei, c-t.li}@warwick.ac.uk
+</td></tr><tr><td>46072f872eee3413f9d05482be6446f6b96b6c09</td><td>Trace Quotient Problems Revisited
+<br/>1 Department of Information Engineering,
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/>2 Microsoft Research Asia, Beijing, China
+</td><td>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>4698a599425c3a6bae1c698456029519f8f2befe</td><td>Transferring Rich Deep Features
+<br/>for Facial Beauty Prediction
+<br/><b>College of Informatics</b><br/><b>College of Informatics</b><br/>Department of Computer Science and Engineering
+<br/><b>Huazhong Agricultural University</b><br/><b>Huazhong Agricultural University</b><br/>Wuhan, China
+<br/>Wuhan, China
+<br/><b>University of North Texas</b><br/>Denton, USA
+</td><td>('40557104', 'Lu Xu', 'lu xu')<br/>('2697879', 'Jinhai Xiang', 'jinhai xiang')<br/>('1982703', 'Xiaohui Yuan', 'xiaohui yuan')</td><td>Email: xulu coi@webmail.hzau.edu.cn
+<br/>Email: jimmy xiang@mail.hzau.edu.cn
+<br/>Email: Xiaohui.Yuan@unt.edu
+</td></tr><tr><td>2c424f21607ff6c92e640bfe3da9ff105c08fac4</td><td>Learning Structured Output Representation
+<br/>using Deep Conditional Generative Models
+<br/><b>NEC Laboratories America, Inc</b><br/><b>University of Michigan, Ann Arbor</b></td><td>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('3084614', 'Xinchen Yan', 'xinchen yan')<br/>('1697141', 'Honglak Lee', 'honglak lee')</td><td>ksohn@nec-labs.com, {xcyan,honglak}@umich.edu
+</td></tr><tr><td>2c258eec8e4da9e65018f116b237f7e2e0b2ad17</td><td>Deep Quantization: Encoding Convolutional Activations
+<br/>with Deep Generative Model ∗
+<br/><b>University of Science and Technology of China, Hefei, China</b><br/>Microsoft Research, Beijing, China
+</td><td>('3430743', 'Zhaofan Qiu', 'zhaofan qiu')<br/>('2053452', 'Ting Yao', 'ting yao')<br/>('1724211', 'Tao Mei', 'tao mei')</td><td>zhaofanqiu@gmail.com, {tiyao, tmei}@microsoft.com
+</td></tr><tr><td>2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58</td><td></td><td></td><td></td></tr><tr><td>2c8743089d9c7df04883405a31b5fbe494f175b4</td><td>Washington State Convention Center
+<br/>Seattle, Washington, May 26-30, 2015
+<br/>978-1-4799-6922-7/15/$31.00 ©2015 IEEE
+<br/>3039
+</td><td></td><td></td></tr><tr><td>2c61a9e26557dd0fe824909adeadf22a6a0d86b0</td><td></td><td></td><td></td></tr><tr><td>2c93c8da5dfe5c50119949881f90ac5a0a4f39fe</td><td>Advanced local motion patterns for macro and micro facial
+<br/>expression recognition
+<br/>B. Allaerta,∗, IM. Bilascoa, C. Djerabaa
+<br/>aUniv. Lille, CNRS, Centrale Lille, UMR 9189 - CRIStAL -
+<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+</td><td></td><td></td></tr><tr><td>2c34bf897bad780e124d5539099405c28f3279ac</td><td>Robust Face Recognition via Block Sparse Bayesian Learning
+<br/><b>School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu</b><br/>China
+<br/><b>Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China</b><br/><b>University of California at San Diego, La Jolla, CA</b><br/>USA
+<br/><b>Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA</b></td><td>('2775350', 'Taiyong Li', 'taiyong li')<br/>('1791667', 'Zhilin Zhang', 'zhilin zhang')</td><td></td></tr><tr><td>2c203050a6cca0a0bff80e574bda16a8c46fe9c2</td><td>Discriminative Deep Hashing for Scalable Face Image Retrieval
+<br/><b>School of Computer Science and Engineering, Nanjing University of Science and Technology</b></td><td>('1699053', 'Jie Lin', 'jie lin')<br/>('3233021', 'Zechao Li', 'zechao li')<br/>('8053308', 'Jinhui Tang', 'jinhui tang')</td><td>jinhuitang@njust.edu.cn
+</td></tr><tr><td>2cc4ae2e864321cdab13c90144d4810464b24275</td><td>23
+<br/>Face Recognition Using Optimized 3D
+<br/>Information from Stereo Images
+<br/>1Advanced Technology R&D Center, Samsung Thales Co., Ltd., 2Graduate School of
+<br/><b>Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul</b><br/>Korea
+<br/>1. Introduction
+<br/>Human biometric characteristics are unique, so it can not be easily duplicated [1]. Such
+<br/>information
+<br/>includes; facial, hands, torso, fingerprints, etc. Potential applications,
+<br/>economical efficiency, and user convenience make the face detection and recognition
+<br/>technique an important commodity compared to other biometric features [2], [3]. It can also
+<br/>use a low-cost personal computer (PC) camera instead of expensive equipments, and require
+<br/>minimal user interface. Recently, extensive research using 3D face data has been carried out
+<br/>in order to overcome the limits of 2D face detection and feature extraction [2], which
+<br/>includes PCA [3], neural networks (NN) [4], support vector machines (SVM) [5], hidden
+<br/>markov models (HMM) [6], and linear discriminant analysis (LDA) [7]. Among them, PCA
+<br/>and LDA methods with self-learning method are most widely used [3]. The frontal face
+<br/>image database provides fairly high recognition rate. However, if the view data of facial
+<br/>rotation, illumination and pose change is not acquired, the correct recognition rate
+<br/>remarkably drops because of the entire face modeling. Such performance degradation
+<br/>problem can be solved by using a new recognition method based on the optimized 3D
+<br/>information in the stereo face images.
+<br/>This chapter presents a new face detection and recognition method using optimized 3D
+<br/>information from stereo images. The proposed method can significantly improve the
+<br/>recognition rate and is robust against object’s size, distance, motion, and depth using the
+<br/>PCA algorithm. By using the optimized 3D information, we estimate the position of the eyes
+<br/>in the stereo face images. As a result, we can accurately detect the facial size, depth, and
+<br/>rotation in the stereo face images. For efficient detection of face area, we adopt YCbCr color
+<br/>format. The biggest object can be chosen as a face candidate among the candidate areas
+<br/>which are extracted by the morphological opening for the Cb and Cr components [8]. In
+<br/>order to detect the face characteristics such as eyes, nose, and mouth, a pre-processing is
+<br/>performed, which utilizes brightness information in the estimated face area. For fast
+<br/>processing, we train the partial face region segmented by estimating the position of eyes,
+<br/>instead of the entire face region. Figure 1. shows the block diagram of proposed algorithm.
+<br/>This chapter is organized as follows: Section 2 and 3 describe proposed stereo vision system
+<br/>and pos estimation for face recognition, respectively. Section 4 presents experimental, and
+<br/>section 5 concludes the chapter.
+<br/>Source: Face Recognition, Book edited by: Kresimir Delac and Mislav Grgic, ISBN 978-3-902613-03-5, pp.558, I-Tech, Vienna, Austria, June 2007
+</td><td>('1727735', 'Changhan Park', 'changhan park')<br/>('1684329', 'Joonki Paik', 'joonki paik')</td><td></td></tr><tr><td>2c3430e0cbe6c8d7be3316a88a5c13a50e90021d</td><td>Multi-feature Spectral Clustering with Minimax Optimization
+<br/>School of Electrical and Electronic Engineering
+<br/><b>Nanyang Technological University, Singapore</b></td><td>('19172541', 'Hongxing Wang', 'hongxing wang')<br/>('1764228', 'Chaoqun Weng', 'chaoqun weng')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')</td><td>{hwang8, weng0018}@e.ntu.edu.sg, jsyuan@ntu.edu.sg
+</td></tr><tr><td>2cac8ab4088e2bdd32dcb276b86459427355085c</td><td>A Face-to-Face Neural Conversation Model
+<br/>Hang Chu1
+<br/><b>University of Toronto 2Vector Institute</b></td><td>('46598920', 'Daiqing Li', 'daiqing li')</td><td>{chuhang1122, daiqing, fidler}@cs.toronto.edu
+</td></tr><tr><td>2cde051e04569496fb525d7f1b1e5ce6364c8b21</td><td>Sparse 3D convolutional neural networks
+<br/><b>University of Warwick</b><br/>August 26, 2015
+</td><td>('39294240', 'Ben Graham', 'ben graham')</td><td>b.graham@warwick.ac.uk
+</td></tr><tr><td>2c2786ea6386f2d611fc9dbf209362699b104f83</td><td></td><td>('31914125', 'Mohammad Shahidul Islam', 'mohammad shahidul islam')</td><td></td></tr><tr><td>2c92839418a64728438c351a42f6dc5ad0c6e686</td><td>Pose-Aware Face Recognition in the Wild
+<br/>Prem Natarajan2
+<br/><b>USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA</b><br/>G´erard Medioni1
+<br/><b>USC Information Sciences Institute (ISI), Marina Del Rey, CA</b></td><td>('11269472', 'Iacopo Masi', 'iacopo masi')<br/>('38696444', 'Stephen Rawls', 'stephen rawls')</td><td>{srawls,pnataraj}@isi.edu
+<br/>{iacopo.masi,medioni}@usc.edu
+</td></tr><tr><td>2c848cc514293414d916c0e5931baf1e8583eabc</td><td>An automatic facial expression recognition system
+<br/>evaluated by different classifiers
+<br/>∗Programa de P´os-Graduac¸˜ao em Mecatrˆonica
+<br/>Universidade Federal da Bahia,
+<br/>†Department of Electrical Engineering - EESC/USP
+</td><td>('3797834', 'Caroline Silva', 'caroline silva')<br/>('2105008', 'Raissa Tavares Vieira', 'raissa tavares vieira')</td><td>Email: lolyne.pacheco@gmail.com
+<br/>Email: andrewssobral@gmail.com
+<br/>Email: raissa@ieee.org,
+</td></tr><tr><td>2c883977e4292806739041cf8409b2f6df171aee</td><td>Aalborg Universitet
+<br/>Are Haar-like Rectangular Features for Biometric Recognition Reducible?
+<br/>Nasrollahi, Kamal; Moeslund, Thomas B.
+<br/>Published in:
+<br/>Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications
+<br/>DOI (link to publication from Publisher):
+<br/>10.1007/978-3-642-41827-3_42
+<br/>Publication date:
+<br/>2013
+<br/>Document Version
+<br/>Early version, also known as pre-print
+<br/><b>Link to publication from Aalborg University</b><br/>Citation for published version (APA):
+<br/>Nasrollahi, K., & Moeslund, T. B. (2013). Are Haar-like Rectangular Features for Biometric Recognition
+<br/>Reducible? In J. Ruiz-Shulcloper, & G. Sanniti di Baja (Eds.), Progress in Pattern Recognition, Image Analysis,
+<br/>Computer Vision, and Applications (Vol. 8259, pp. 334-341). Springer Berlin Heidelberg: Springer Publishing
+<br/>Company. Lecture Notes in Computer Science, DOI: 10.1007/978-3-642-41827-3_42
+<br/>General rights
+<br/>Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+<br/>and it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+<br/> ? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+<br/> ? You may not further distribute the material or use it for any profit-making activity or commercial gain
+<br/> ? You may freely distribute the URL identifying the publication in the public portal ?
+<br/>Take down policy
+<br/>the work immediately and investigate your claim.
+<br/>Downloaded from vbn.aau.dk on: oktober 28, 2017
+<br/> </td><td></td><td>If you believe that this document breaches copyright please contact us at vbn@aub.aau.dk providing details, and we will remove access to
+</td></tr><tr><td>2cdd9e445e7259117b995516025fcfc02fa7eebb</td><td>Title
+<br/>Temporal Exemplar-based Bayesian Networks for facial
+<br/>expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>Proceedings - 7Th International Conference On Machine
+<br/>Learning And Applications, Icmla 2008, 2008, p. 16-22
+<br/>Issued Date
+<br/>2008
+<br/>URL
+<br/>http://hdl.handle.net/10722/61208
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.;
+<br/>International Conference on Machine Learning and Applications
+<br/>Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+<br/>this material is permitted. However, permission to
+<br/>reprint/republish this material for advertising or promotional
+<br/>purposes or for creating new collective works for resale or
+<br/>redistribution to servers or lists, or to reuse any copyrighted
+<br/>component of this work in other works must be obtained from
+<br/>the IEEE.
+</td><td></td><td></td></tr><tr><td>2c1ffb0feea5f707c890347d2c2882be0494a67a</td><td>Learning to learn high capacity generative models from few examples
+<br/>The Variational Homoencoder:
+<br/>Tommi Jaakkola1
+<br/><b>Massachusetts Institute of Technology</b><br/>2MIT-IBM Watson AI Lab
+</td><td>('51152627', 'Luke B. Hewitt', 'luke b. hewitt')<br/>('51150953', 'Maxwell I. Nye', 'maxwell i. nye')<br/>('3071104', 'Andreea Gane', 'andreea gane')<br/>('1763295', 'Joshua B. Tenenbaum', 'joshua b. tenenbaum')</td><td></td></tr><tr><td>2cdc40f20b70ca44d9fd8e7716080ee05ca7924a</td><td>Real-time Convolutional Neural Networks for
+<br/>Emotion and Gender Classification
+<br/>Hochschule Bonn-Rhein-Sieg
+<br/>Sankt Augustin Germany
+<br/>Paul G. Pl¨oger
+<br/>Hochschule Bonn-Rhein-Sieg
+<br/>Sankt Augustin Germany
+<br/>Matias Valdenegro
+<br/><b>Heriot-Watt University</b><br/>Edinburgh, UK
+</td><td>('27629437', 'Octavio Arriaga', 'octavio arriaga')</td><td>Email: octavio.arriaga@smail.inf.h-brs.de
+<br/>Email: paul.ploeger@h-brs.de
+<br/>Email: m.valdenegro@hw.ac.uk
+</td></tr><tr><td>2cac70f9c8140a12b6a55cef834a3d7504200b62</td><td>Reconstructing High Quality Face-Surfaces using Model Based Stereo
+<br/><b>University of Basel, Switzerland</b><br/>Contribution
+<br/>We present a method to fit a detailed 3D morphable
+<br/>model to multiple images. Our formulation allows
+<br/>the fitting of the model without determining the
+<br/>lighting conditions and albedo of the face, mak-
+<br/>ing the system robust against difficult lighting sit-
+<br/>uations and unmodelled albedo variations such as
+<br/>skin colour, moles, freckles and cast shadows.
+<br/>The cost function employs
+<br/>Microsoft Research, Cambridge‡
+<br/>Ambient Lighting
+<br/>Evaluation: Gold Standard
+<br/>Ambient Only Dataset (20 Subjects)
+<br/>Stereo: Landmarks + Silhouette + Colour
+<br/>Stereo: Landmarks + Silhouette
+<br/>Stereo: Landmarks
+<br/>Monocular
+<br/>The model shape prior
+<br/>A small number of landmarks for initialization
+<br/>A monocular silhouette distance cost
+<br/>A stereo colour cost
+<br/>The optimisation consists of multiple runs of a non-
+<br/>linear minimizer. During each run the visibility of
+<br/>all sample points is assumed to stay constant. After
+<br/>some iterations the minimizer is stopped and visi-
+<br/>bility is reevaluated.
+<br/>Model
+<br/>The linear morphable face model was created by
+<br/>registering 200 face scans and performing a PCA on
+<br/>the data matrix to fit a Gaussian probability to the
+<br/>data and reduce the dimensionality of the model.
+<br/>Input Images
+<br/>Multiview
+<br/>Landmarks
+<br/>Multiview
+<br/>L.+Silhouette
+<br/>Multiview
+<br/>L.+S.+Colour
+<br/>Ground Truth
+<br/>Monocular [1]
+<br/>Each cue increases the reconstruction accuracy, lead-
+<br/>ing to significantly better result than possible with
+<br/>the state of the art monocular system [1]. Recon-
+<br/>structions of the face surface are compared to ground
+<br/>truth data acquired with a structured light system.
+<br/>The point wise distance from the reconstruction to
+<br/>the ground truth is shown in the inset head render-
+<br/>ings. Here green is a perfect match, and red denotes
+<br/>a distance of 3mm or more.
+<br/>The best of the three monocular results is shown.
+<br/>Silhouette Cost
+<br/>Directed Lighting
+<br/>The silhouette cost measures
+<br/>the distance of the silhouette
+<br/>to image edges. An edge cost
+<br/>surface is created from the im-
+<br/>age, by combining the distance
+<br/>transforms of edge detections
+<br/>with different thresholds. The
+<br/>cost ist integrated over the pro-
+<br/>jection of 3D sample points at
+<br/>the silhouette of the hypotheses.
+<br/>Edge Cost Surface
+<br/>Colour Reprojection Cost
+<br/>The colour
+<br/>reprojection cost
+<br/>measures the image colour dif-
+<br/>ference between the projected
+<br/>positions of sample points in
+<br/>two images. The sample points
+<br/>are spaced out regularly in the
+<br/>projected images.
+<br/>Multiview Ground Truth Monocular
+<br/>Input Images
+<br/>The new stereo algorithm is robust under directed
+<br/>lighting and yields significantly more accurate sur-
+<br/>face reconstructions than the monocular algorithm.
+<br/>Again the distance to the ground truth is shown
+<br/>Funding
+<br/>This work was supported in part by Microsoft Research through
+<br/>the European PhD Scholarship Programme.
+<br/>Multiview Ground Truth Monocular
+<br/>Input Images
+<br/>for green=0mm and red=3mm in the insets. Future
+<br/>work will include a skin and lighting model, hope-
+<br/>fully improving speed and accuracy of the method.
+<br/>All cues were used.
+<br/>References
+<br/>[1] S. Romdhani and T. Vetter. Estimating 3D Shape and Texture
+<br/>Using Pixel Intensity, Edges, Specular Highlights, Texture
+<br/>Constraints and a Prior. In CVPR 2005
+<br/>Distance to Ground Truth (mm)
+<br/>Directed Light Dataset (5 Subjects)
+<br/>Stereo: Landmarks + Silhouette + Colour
+<br/>Stereo: Landmarks + Silhouette
+<br/>Stereo: Landmarks
+<br/>Monocular
+<br/>Distance to Ground Truth (mm)
+<br/>The use of multi-view information results in a
+<br/>much higher accuracy than achievable by the
+<br/>monocular method. A higher frequency of lower
+<br/>residuals is better.
+<br/>Evaluation: Face Recognition
+<br/>To test the method on a difficult dataset, a face
+<br/>recognition experiment on the PIE dataset was per-
+<br/>formed. The results show, that the extracted sur-
+<br/>faces are consistent over variations in viewpoint
+<br/>and that the reconstruction quality increases with
+<br/>an increasing number of images.
+<br/>View-
+<br/>points
+<br/>Landmark
+<br/>+ Silhouette
+<br/>+ Colour
+<br/>2nd
+<br/>2nd
+<br/>1st
+<br/>1st
+<br/>2nd
+<br/>1st
+<br/>68% 63% 82%
+<br/>10% 18% 50%
+<br/>74% 74% 85%
+<br/>7% 18% 62%
+<br/>82% 87% 94%
+<br/>19% 37% 76%
+<br/>The columns labelled “1st” show the frequency of
+<br/>correct results, “2nd” is the frequency with which
+<br/>the correct result was within the first two subjects
+<br/>returned. The angle between the shape coefficients
+<br/>was used as the distance measure.
+<br/>Texture information should be used to achieve state
+<br/>of the art recognition results.
+<br/>FaceCamera1Camera2SamplePoint </td><td>('1994157', 'Brian Amberg', 'brian amberg')<br/>('1745076', 'Andrew Blake', 'andrew blake')<br/>('3293655', 'Sami Romdhani', 'sami romdhani')<br/>('1687079', 'Thomas Vetter', 'thomas vetter')</td><td></td></tr><tr><td>2c5d1e0719f3ad7f66e1763685ae536806f0c23b</td><td>AENet: Learning Deep Audio Features for Video
+<br/>Analysis
+</td><td>('47893464', 'Naoya Takahashi', 'naoya takahashi')<br/>('3037160', 'Michael Gygli', 'michael gygli')<br/>('7329802', 'Luc van Gool', 'luc van gool')</td><td></td></tr><tr><td>2c8f24f859bbbc4193d4d83645ef467bcf25adc2</td><td>845
+<br/>Classification in the Presence of
+<br/>Label Noise: a Survey
+</td><td>('1786603', 'Benoît Frénay', 'benoît frénay')<br/>('1782629', 'Michel Verleysen', 'michel verleysen')</td><td></td></tr><tr><td>2c1f8ddbfbb224271253a27fed0c2425599dfe47</td><td>Understanding and Comparing Deep Neural Networks
+<br/>for Age and Gender Classification
+<br/><b>Fraunhofer Heinrich Hertz Institute</b><br/><b>Singapore University of Technology and Design</b><br/>10587 Berlin, Germany
+<br/>Klaus-Robert M¨uller
+<br/><b>Berlin Institute of Technology</b><br/>10623 Berlin, Germany
+<br/>Singapore 487372, Singapore
+<br/><b>Fraunhofer Heinrich Hertz Institute</b><br/>10587 Berlin, Germany
+</td><td>('3633358', 'Sebastian Lapuschkin', 'sebastian lapuschkin')<br/>('40344011', 'Alexander Binder', 'alexander binder')<br/>('1699054', 'Wojciech Samek', 'wojciech samek')</td><td>sebastian.lapuschkin@hhi.fraunhofer.de
+<br/>alexander binder@sutd.edu.sg
+<br/>klaus-robert.mueller@tu-berlin.de
+<br/>wojciech.samek@hhi.fraunhofer.de
+</td></tr><tr><td>2ca43325a5dbde91af90bf850b83b0984587b3cc</td><td>For Your Eyes Only – Biometric Protection of PDF Documents
+<br/><b>Faculty of ETI, Gdansk University of Technology, Gdansk, Poland</b></td><td>('2026734', 'J. Siciarek', 'j. siciarek')</td><td></td></tr><tr><td>2cfc28a96b57e0817cc9624a5d553b3aafba56f3</td><td>P2F2: Privacy-Preserving Face Finder
+<br/><b>New Jersey Institute of Technology</b></td><td>('9037517', 'Nora Almalki', 'nora almalki')<br/>('1692516', 'Reza Curtmola', 'reza curtmola')<br/>('34645435', 'Xiaoning Ding', 'xiaoning ding')<br/>('1690806', 'Cristian Borcea', 'cristian borcea')</td><td>Email: {naa34, crix, xiaoning.ding, narain.gehani, borcea}@njit.edu
+</td></tr><tr><td>2cdd5b50a67e4615cb0892beaac12664ec53b81f</td><td>To appear in ACM TOG 33(6).
+<br/>Mirror Mirror: Crowdsourcing Better Portraits
+<br/>Jun-Yan Zhu1
+<br/>Aseem Agarwala2
+<br/>Jue Wang2
+<br/><b>University of California, Berkeley1 Adobe</b><br/>Figure 1: We collect thousands of portraits by capturing video of a subject while they watch movie clips designed to elicit a range of positive
+<br/>emotions. We use crowdsourcing and machine learning to train models that can predict attractiveness scores of different expressions. These
+<br/>models can be used to select a subject’s best expressions across a range of emotions, from more serious professional portraits to big smiles.
+</td><td>('1763086', 'Alexei A. Efros', 'alexei a. efros')<br/>('2177801', 'Eli Shechtman', 'eli shechtman')</td><td></td></tr><tr><td>2cae619d0209c338dc94593892a787ee712d9db0</td><td>Selective Hidden Random Fields: Exploiting Domain-Specific Saliency for Event
+<br/>Classification
+<br/><b>University of Massachusetts Amherst</b><br/>Amherst MA USA
+</td><td>('2246870', 'Vidit Jain', 'vidit jain')</td><td>vidit@cs.umass.edu
+</td></tr><tr><td>2c0acaec54ab2585ff807e18b6b9550c44651eab</td><td>Face Quality Assessment for Face Verification in Video
+<br/><b>Lomonosov Moscow State University, 2Video Analysis Technologies, LLC</b><br/>fusion of
+<br/>facial
+</td><td>('38982797', 'M. Nikitin', 'm. nikitin')<br/>('2943115', 'V. Konushin', 'v. konushin')<br/>('1934937', 'A. Konushin', 'a. konushin')</td><td>mnikitin@graphics.cs.msu.ru, vadim@tevian.ru, ktosh@graphics.cs.msu.ru
+</td></tr><tr><td>2cdde47c27a8ecd391cbb6b2dea64b73282c7491</td><td>ORDER-AWARE CONVOLUTIONAL POOLING FOR VIDEO BASED ACTION RECOGNITION
+<br/>Order-aware Convolutional Pooling for Video Based
+<br/>Action Recognition
+</td><td>('1722767', 'Peng Wang', 'peng wang')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')<br/>('1724393', 'Heng Tao Shen', 'heng tao shen')</td><td></td></tr><tr><td>2c62b9e64aeddf12f9d399b43baaefbca8e11148</td><td>Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild
+<br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</b><br/><b>Faculty of Natural Sciences, University of Stirling, Stirling FK9 4LA, UK</b><br/><b>School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China</b><br/><b>Biometrics Research Lab, College of Computer Science, Sichuan University, Chengdu 610065, China</b><br/><b>Image Understanding and Interactive Robotics, Reutlingen University, 72762 Reutlingen, Germany</b></td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')</td><td>{z.feng, j.kittler, p.koppen}@surrey.ac.uk, patrikhuber@gmail.com,
+<br/>wu_xiaojun@jiangnan.edu.cn, p.j.b.hancock@stir.ac.uk, qjzhao@scu.edu.cn
+</td></tr><tr><td>2c7c3a74da960cc76c00965bd3e343958464da45</td><td></td><td></td><td></td></tr><tr><td>2cf5f2091f9c2d9ab97086756c47cd11522a6ef3</td><td>MPIIGaze: Real-World Dataset and Deep
+<br/>Appearance-Based Gaze Estimation
+</td><td>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1751242', 'Yusuke Sugano', 'yusuke sugano')<br/>('1739548', 'Mario Fritz', 'mario fritz')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td></td></tr><tr><td>2c19d3d35ef7062061b9e16d040cebd7e45f281d</td><td>End-to-end Video-level Representation Learning for Action Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences (CASIA</b><br/><b>University of Chinese Academy of Sciences (UCAS</b></td><td>('1696573', 'Jiagang Zhu', 'jiagang zhu')<br/>('1726367', 'Wei Zou', 'wei zou')<br/>('48147901', 'Zheng Zhu', 'zheng zhu')</td><td>{zhujiagang2015, wei.zou}@ia.ac.cn, zhuzheng14@mails.ucas.ac.cn
+</td></tr><tr><td>2c17d36bab56083293456fe14ceff5497cc97d75</td><td>Unconstrained Face Alignment via Cascaded Compositional Learning
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b><br/>2SenseTime Group Limited
+</td><td>('2226254', 'Shizhan Zhu', 'shizhan zhu')<br/>('40475617', 'Cheng Li', 'cheng li')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>zs014@ie.cuhk.edu.hk, chengli@sensetime.com, ccloy@ie.cuhk.edu.hk, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>2c4b96f6c1a520e75eb37c6ee8b844332bc0435c</td><td>Automatic Emotion Recognition in Robot-Children Interaction for ASD
+<br/>Treatment
+<br/>ISASI UOS Lecce
+<br/>Campus Universitario via Monteroni sn, 73100 Lecce Italy
+<br/>ISASI UOS Messina
+<br/>Univerisita’ di Bari
+<br/><b>Marine Institute, via Torre Bianca, 98164 Messina Italy</b><br/>Via Orabona 4, 70126 Bari, Italy
+</td><td>('4730472', 'Marco Leo', 'marco leo')<br/>('33097940', 'Marco Del Coco', 'marco del coco')<br/>('1741861', 'Cosimo Distante', 'cosimo distante')<br/>('3049247', 'Giovanni Pioggia', 'giovanni pioggia')<br/>('2235498', 'Giuseppe Palestra', 'giuseppe palestra')</td><td>marco.leo@cnr.it
+</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>Exploring the Geo-Dependence of Human Face Appearance
+<br/>Computer Science
+<br/><b>University of Kentucky</b><br/>Computer Science
+<br/>UNC Charlotte
+<br/>Computer Science
+<br/><b>University of Kentucky</b></td><td>('2142962', 'Mohammad T. Islam', 'mohammad t. islam')<br/>('38792670', 'Scott Workman', 'scott workman')<br/>('1873911', 'Hui Wu', 'hui wu')<br/>('1690110', 'Richard Souvenir', 'richard souvenir')<br/>('1990750', 'Nathan Jacobs', 'nathan jacobs')</td><td>{tarik,scott}@cs.uky.edu
+<br/>{hwu13,souvenir}@uncc.edu
+<br/>jacobs@cs.uky.edu
+</td></tr><tr><td>79581c364cefe53bff6bdd224acd4f4bbc43d6d4</td><td></td><td></td><td></td></tr><tr><td>794ddb1f3b7598985d4d289b5b0664be736a50c4</td><td>Exploiting Competition Relationship for Robust Visual Recognition
+<br/>Center for Data Analytics and Biomedical Informatics
+<br/>Department of Computer and Information Science
+<br/><b>Temple University</b><br/>Philadelphia, PA, 19122, USA
+</td><td>('38909760', 'Liang Du', 'liang du')<br/>('1805398', 'Haibin Ling', 'haibin ling')</td><td>{liang.du, hbling}@temple.edu
+</td></tr><tr><td>790aa543151312aef3f7102d64ea699a1d15cb29</td><td>Confidence-Weighted Local Expression Predictions for
+<br/>Occlusion Handling in Expression Recognition and Action
+<br/>Unit detection
+<br/>1 Sorbonne Universités, UPMC Univ Paris 06, CNRS, ISIR UMR 7222
+<br/>4 place Jussieu 75005 Paris
+</td><td>('3190846', 'Arnaud Dapogny', 'arnaud dapogny')<br/>('2521061', 'Kevin Bailly', 'kevin bailly')<br/>('1701986', 'Séverine Dubuisson', 'séverine dubuisson')</td><td>arnaud.dapogny@isir.upmc.fr
+<br/>kevin.bailly@isir.upmc.fr
+<br/>severine.dubuisson@isir.upmc.fr
+</td></tr><tr><td>79f6a8f777a11fd626185ab549079236629431ac</td><td>Copyright
+<br/>by
+<br/>2013
+</td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')</td><td></td></tr><tr><td>795ea140df2c3d29753f40ccc4952ef24f46576c</td><td></td><td></td><td></td></tr><tr><td>79dc84a3bf76f1cb983902e2591d913cee5bdb0e</td><td></td><td></td><td></td></tr><tr><td>79744fc71bea58d2e1918c9e254b10047472bd76</td><td>Disentangling 3D Pose in A Dendritic CNN
+<br/>for Unconstrained 2D Face Alignment
+<br/>Department of Electrical and Computer Engineering, CFAR and UMIACS
+<br/><b>University of Maryland-College Park, USA</b></td><td>('50333013', 'Amit Kumar', 'amit kumar')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>akumar14@umiacs.umd.edu, rama@umiacs.umd.edu
+</td></tr><tr><td>79b669abf65c2ca323098cf3f19fa7bdd837ff31</td><td> Deakin Research Online
+<br/>This is the published version:
+<br/>Rana, Santu, Liu, Wanquan, Lazarescu, Mihai and Venkatesh, Svetha 2008, Efficient tensor
+<br/>based face recognition, in ICPR 2008 : Proceedings of the 19th International Conference on
+<br/>Pattern Recognition, IEEE, Washington, D. C., pp. 1-4.
+<br/>Available from Deakin Research Online:
+<br/>http://hdl.handle.net/10536/DRO/DU:30044585
+<br/>
+<br/>Reproduced with the kind permissions of the copyright owner.
+<br/>Personal use of this material is permitted. However, permission to reprint/republish this
+<br/>material for advertising or promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+<br/>in other works must be obtained from the IEEE.
+<br/>Copyright : 2008, IEEE
+</td><td></td><td></td></tr><tr><td>794c0dc199f0bf778e2d40ce8e1969d4069ffa7b</td><td>Odd Leaf Out
+<br/>Improving visual recognition with games
+<br/>Preece
+<br/>School of Information
+<br/><b>University of Maryland</b><br/><b>College Park, United States</b></td><td>('6519022', 'Darcy Lewis', 'darcy lewis')<br/>('2662457', 'Dana Rotman', 'dana rotman')</td><td></td></tr><tr><td>79c3a7131c6c176b02b97d368cd0cd0bc713ff7e</td><td></td><td></td><td></td></tr><tr><td>79dd787b2877cf9ce08762d702589543bda373be</td><td>Face Detection Using SURF Cascade
+<br/>Intel Labs China
+</td><td>('35423937', 'Jianguo Li', 'jianguo li')<br/>('40279370', 'Tao Wang', 'tao wang')<br/>('2470865', 'Yimin Zhang', 'yimin zhang')</td><td></td></tr><tr><td>799c02a3cde2c0805ea728eb778161499017396b</td><td>PersonRank: Detecting Important People in Images
+<br/><b>School of Electronics and Information Technology, Sun Yat-Sen University, GuangZhou, China</b><br/><b>School of Data and Computer Science, Sun Yat-Sen University, GuangZhou, China</b></td><td>('9186191', 'Benchao Li', 'benchao li')<br/>('3333315', 'Wei-Shi Zheng', 'wei-shi zheng')</td><td></td></tr><tr><td>7966146d72f9953330556baa04be746d18702047</td><td>Harnessing Human Manipulation
+<br/>NSF/ARL Workshop on Cloud Robotics: Challenges and Opportunities
+<br/>February 27-28, 2013
+<br/><b>The Robotics Institute Carnegie Mellon University</b><br/><b>Georgia Institute of Technology</b></td><td>('1781040', 'Matthew T. Mason', 'matthew t. mason')<br/>('1735665', 'Nancy Pollard', 'nancy pollard')<br/>('1760708', 'Alberto Rodriguez', 'alberto rodriguez')<br/>('38637733', 'Ryan Kerwin', 'ryan kerwin')</td><td><matt.mason, nsp, albertor>@cs.cmu.edu
+<br/>ryankerwin@gatech.edu
+</td></tr><tr><td>79fa57dedafddd3f3720ca26eb41c82086bfb332</td><td>Modeling Facial Expression Space for Recognition *
+<br/>National Lab. on Machine Perception
+<br/><b>Peking University</b><br/>Beijing, China
+<br/>National Lab. on Machine Perception
+<br/><b>Peking University</b><br/>Beijing, China
+<br/>National Lab. on Machine Perception
+<br/><b>Peking University</b><br/>Beijing, China
+</td><td>('2086289', 'Hong Liu', 'hong liu')<br/>('1687248', 'Hongbin Zha', 'hongbin zha')<br/>('2976781', 'Yuwen Wu', 'yuwen wu')</td><td>wuyw@cis.pku.edu.cn
+<br/>liuhong@cis.pku.edu.cn
+<br/>zha@cis.pku.edu.cn
+</td></tr><tr><td>793e7f1ba18848908da30cbad14323b0389fd2a8</td><td></td><td></td><td></td></tr><tr><td>79db191ca1268dc88271abef3179c4fe4ee92aed</td><td>Facial Expression Based Automatic Album
+<br/>Creation
+<br/><b>School of Computer Science, CECS, Australian National University, Canberra</b><br/><b>School of Engineering, CECS, Australian National University, Canberra, Australia</b><br/>3 Vision & Sensing, Faculty of Information Sciences and Engineering,
+<br/>Australia
+<br/><b>University of Canberra, Australia</b></td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('3183108', 'Akshay Asthana', 'akshay asthana')<br/>('1717204', 'Roland Goecke', 'roland goecke')</td><td>abhinav.dhall@anu.edu.au, aasthana@rsise.anu.edu.au,
+<br/>roland.goecke@ieee.org
+</td></tr><tr><td>2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359</td><td>Discriminative Dictionary Learning with
+<br/>Pairwise Constraints
+<br/><b>University of Maryland, College Park, MD</b></td><td>('2723427', 'Huimin Guo', 'huimin guo')<br/>('34145947', 'Zhuolin Jiang', 'zhuolin jiang')<br/>('1693428', 'Larry S. Davis', 'larry s. davis')</td><td>{hmguo,zhuolin,lsd}@umiacs.umd.edu
+</td></tr><tr><td>2d25045ec63f9132371841c0beccd801d3733908</td><td>Sensors 2015, 15, 6719-6739; doi:10.3390/s150306719
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Multi-Layer Sparse Representation for Weighted LBP-Patches
+<br/>Based Facial Expression Recognition
+<br/><b>School of Software, Dalian University of Technology, Dalian 116621, China</b><br/>Tel.: +86-411-8757-1516.
+<br/>Academic Editor: Vittorio M.N. Passaro
+<br/>Received: 15 December 2014 / Accepted: 10 March 2015 / Published: 19 March 2015
+</td><td>('2235253', 'Qi Jia', 'qi jia')<br/>('3459398', 'Xinkai Gao', 'xinkai gao')<br/>('2736880', 'He Guo', 'he guo')<br/>('7864960', 'Zhongxuan Luo', 'zhongxuan luo')<br/>('1734275', 'Yi Wang', 'yi wang')</td><td>E-Mails: jiaqi7166@gmail.com (Q.J.); gaoxinkai@mail.dlut.edu.cn (X.G.); zxluo@dlut.edu.cn (Z.L.);
+<br/>wangyi_dlut@126.com (Y.W.)
+<br/>* Author to whom correspondence should be addressed; E-Mail: guohe@dlut.edu.cn;
+</td></tr><tr><td>2dd6c988b279d89ab5fb5155baba65ce4ce53c1e</td><td></td><td></td><td></td></tr><tr><td>2d080662a1653f523321974a57518e7cb67ecb41</td><td>On Constrained Local Model Feature
+<br/>Normalization for Facial Expression Recognition
+<br/><b>School of Computing and Info. Sciences, Florida International University</b><br/>11200 SW 8th St, Miami, FL 33199, USA
+<br/>http://ascl.cis.fiu.edu/
+</td><td>('3489972', 'Zhenglin Pan', 'zhenglin pan')<br/>('2008564', 'Mihai Polceanu', 'mihai polceanu')</td><td>zpan004@fiu.edu,{mpolcean,lisetti}@cs.fiu.edu
+</td></tr><tr><td>2d4b9fe3854ccce24040074c461d0c516c46baf4</td><td>Temporal Action Localization by Structured Maximal Sums
+<br/><b>State Key Laboratory for Novel Software Technology, Nanjing University, China</b><br/><b>University of Michigan, Ann Arbor</b></td><td>('40188401', 'Jonathan C. Stroud', 'jonathan c. stroud')<br/>('2285916', 'Tong Lu', 'tong lu')<br/>('8342699', 'Jia Deng', 'jia deng')</td><td></td></tr><tr><td>2d294c58b2afb529b26c49d3c92293431f5f98d0</td><td>4413
+<br/>Maximum Margin Projection Subspace Learning
+<br/>for Visual Data Analysis
+</td><td>('1793625', 'Symeon Nikitidis', 'symeon nikitidis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td></td></tr><tr><td>2d1f86e2c7ba81392c8914edbc079ac64d29b666</td><td></td><td></td><td></td></tr><tr><td>2d9e58ea582e054e9d690afca8b6a554c3687ce6</td><td>Learning Local Feature Aggregation Functions
+<br/>with Backpropagation
+<br/>Multimedia Understanding Group
+<br/><b>Aristotle University of Thessaloniki, Greece</b></td><td>('3493855', 'Angelos Katharopoulos', 'angelos katharopoulos')<br/>('3493472', 'Despoina Paschalidou', 'despoina paschalidou')<br/>('1789830', 'Christos Diou', 'christos diou')<br/>('1708199', 'Anastasios Delopoulos', 'anastasios delopoulos')</td><td>{katharas, pdespoin}@auth.gr; diou@mug.ee.auth.gr; adelo@eng.auth.gr
+</td></tr><tr><td>2d164f88a579ba53e06b601d39959aaaae9016b7</td><td>Dynamic Facial Expression Recognition Using
+<br/>A Bayesian Temporal Manifold Model
+<br/>Department of Computer Science
+<br/><b>Queen Mary University of London</b><br/>Mile End Road, London E1 4NS, UK
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2803283', 'Peter W. McOwan', 'peter w. mcowan')</td><td>{cfshan, sgg, pmco}@dcs.qmul.ac.uk
+</td></tr><tr><td>2d8001ffee6584b3f4d951d230dc00a06e8219f8</td><td>Feature Agglomeration Networks for Single Stage Face Detection
+<br/><b>School of Information Systems, Singapore Management University, Singapore</b><br/><b>College of Computer Science and Technology, Zhejiang University, Hangzhou, China</b><br/>§DeepIR Inc., Beijing, China
+</td><td>('1826176', 'Jialiang Zhang', 'jialiang zhang')<br/>('2791484', 'Xiongwei Wu', 'xiongwei wu')<br/>('1704030', 'Jianke Zhu', 'jianke zhu')</td><td>{chhoi,xwwu.2015@phdis}@smu.edu.sg;{zjialiang,jkzhu}@zju.edu.cn
+</td></tr><tr><td>2d23fa205acca9c21e3e1a04674f1e5a9528550e</td><td>The Fast and the Flexible:
+<br/>Extended Pseudo Two-Dimensional Warping for
+<br/>Face Recognition
+<br/>1Computer Vision and Multimodal Computing
+<br/>2 Computer Vision Laboratory
+<br/>MPI Informatics, Saarbruecken
+<br/>ETH Zurich
+<br/>3Human Language Technology and Pattern Recognition Group,
+<br/><b>RWTH Aachen University</b></td><td>('2299109', 'Leonid Pishchulin', 'leonid pishchulin')<br/>('1948162', 'Tobias Gass', 'tobias gass')<br/>('1967060', 'Philippe Dreuw', 'philippe dreuw')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>leonid@mpi-inf.mpg.de
+<br/>gasst@vision.ee.ethz.ch
+<br/><last name>@cs.rwth-aachen.de
+</td></tr><tr><td>2d244d70ed1a2ba03d152189f1f90ff2b4f16a79</td><td>An Analytical Mapping for LLE and Its
+<br/>Application in Multi-Pose Face Synthesis
+<br/>State Key Lab of Intelligent Technology and Systems
+<br/><b>Tsinghua University</b><br/>Beijing, 100084, China
+</td><td>('1715001', 'Jun Wang', 'jun wang')</td><td>wangjun00@mails.tsinghua.edu.cn
+<br/>zcs@mail.tsinghua.edu.cn
+<br/>kzb98@mails.tsinghua.edu.cn
+</td></tr><tr><td>2d88e7922d9f046ace0234f9f96f570ee848a5b5</td><td>Building Better Detection with Privileged Information
+<br/>Department of CSE
+<br/>The Pennsylvania State
+<br/><b>University</b><br/>Department of CSE
+<br/>The Pennsylvania State
+<br/><b>University</b><br/>Applied Communication
+<br/>Sciences
+<br/>Basking Ridge, NJ, US
+<br/>Department of CSE
+<br/>The Pennsylvania State
+<br/><b>University</b><br/>Army Research
+<br/>Laboratory
+<br/>Adelphi, MD, USA
+</td><td>('2950892', 'Z. Berkay Celik', 'z. berkay celik')<br/>('4108832', 'Patrick McDaniel', 'patrick mcdaniel')<br/>('1804289', 'Rauf Izmailov', 'rauf izmailov')<br/>('1967156', 'Nicolas Papernot', 'nicolas papernot')<br/>('1703726', 'Ananthram Swami', 'ananthram swami')</td><td>zbc102@cse.psu.edu
+<br/>mcdaniel@cse.psu.edu
+<br/>rizmailov@appcomsci.com
+<br/>npg5056@cse.psu.edu
+<br/>ananthram.swami.civ@mail.mil
+</td></tr><tr><td>2d31ab536b3c8a05de0d24e0257ca4433d5a7c75</td><td>Materials Discovery: Fine-Grained Classification of X-ray Scattering Images
+<br/>Kevin Yager†
+<br/><b>University of North Carolina at Chapel Hill, NC, USA</b><br/>†Brookhaven National Lab, NY, USA
+</td><td>('1772294', 'M. Hadi Kiapour', 'm. hadi kiapour')<br/>('39668247', 'Alexander C. Berg', 'alexander c. berg')<br/>('1685538', 'Tamara L. Berg', 'tamara l. berg')</td><td>{hadi,aberg,tlberg}@cs.unc.edu
+<br/>kyager@bnl.gov
+</td></tr><tr><td>2dbde64ca75e7986a0fa6181b6940263bcd70684</td><td>Pose Independent Face Recognition by Localizing
+<br/>Local Binary Patterns via Deformation Components
+<br/><b>MICC, University of Florence</b><br/>Italy
+<br/>http://www.micc.unifi.it/vim
+<br/>G´erard Medioni
+<br/><b>USC IRIS Lab, University of Southern California</b><br/>Los Angeles, USA
+<br/>http://iris.usc.edu/USC-Computer-Vision.html
+</td><td>('11269472', 'Iacopo Masi', 'iacopo masi')<br/>('35220006', 'Claudio Ferrari', 'claudio ferrari')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td></td></tr><tr><td>2d0363a3ebda56d91d704d5ff5458a527775b609</td><td>Attribute2Image: Conditional Image Generation from Visual Attributes
+<br/>1Computer Science and Engineering Division
+<br/>2Adobe Research
+<br/>3NEC Labs
+<br/><b>University of Michigan, Ann Arbor</b></td><td>('3084614', 'Xinchen Yan', 'xinchen yan')<br/>('1768964', 'Jimei Yang', 'jimei yang')<br/>('1729571', 'Kihyuk Sohn', 'kihyuk sohn')<br/>('1697141', 'Honglak Lee', 'honglak lee')</td><td>{xcyan,kihyuks,honglak}@umich.edu
+<br/>jimyang@adobe.com
+<br/>ksohn@nec-labs.com
+</td></tr><tr><td>2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8</td><td>Perceptual Reward Functions
+<br/><b>College of Computing, Georgia Institute of Technology, Atlanta, GA, USA</b><br/><b>Waseda University, Tokyo, Japan</b></td><td>('1737432', 'Atsuo Takanishi', 'atsuo takanishi')</td><td>aedwards8@gatech.edu, isbell@cc.gatech.edu
+<br/>takanisi@waseda.jp
+</td></tr><tr><td>2dd2c7602d7f4a0b78494ac23ee1e28ff489be88</td><td>Large Scale Metric Learning from Equivalence Constraints ∗
+<br/><b>Institute for Computer Graphics and Vision, Graz University of Technology</b></td><td>('2918450', 'Martin Hirzer', 'martin hirzer')<br/>('3202367', 'Paul Wohlhart', 'paul wohlhart')<br/>('1791182', 'Peter M. Roth', 'peter m. roth')<br/>('3628150', 'Horst Bischof', 'horst bischof')</td><td>{koestinger,hirzer,wohlhart,pmroth,bischof}@icg.tugraz.at
+</td></tr><tr><td>2d84e30c61281d3d7cdd11676683d6e66a68aea6</td><td>Automatic Construction of Action Datasets
+<br/>using Web videos with Density-based Cluster
+<br/>Analysis and Outlier Detection
+<br/><b>The University of Electro-Communications</b><br/>185-8585 , Japan Tokyo Chofu Chofugaoka 1-5-1
+</td><td>('1681659', 'Keiji Yanai', 'keiji yanai')</td><td></td></tr><tr><td>2d98a1cb0d1a37c79a7ebcb727066f9ccc781703</td><td>Coupled Support Vector Machines for Supervised
+<br/>Domain Adaptation
+<br/>∗Center for Cognitive Ubiquitous Computing, Arizona State Univeristy
+<br/>† Bosch Research and Technology Center, Palo Alto
+<br/><b>University of Michigan, Ann Arbor</b></td><td>('3151995', 'Hemanth Venkateswara', 'hemanth venkateswara')<br/>('2929090', 'Prasanth Lade', 'prasanth lade')<br/>('37513601', 'Jieping Ye', 'jieping ye')<br/>('1743991', 'Sethuraman Panchanathan', 'sethuraman panchanathan')</td><td>hemanthv@asu.edu, prasanth.lade@us.bosch.com, jpye@umich.edu,
+<br/>panch@asu.edu
+</td></tr><tr><td>2dced31a14401d465cd115902bf8f508d79de076</td><td>ORIGINAL RESEARCH
+<br/>published: 26 May 2015
+<br/>doi: 10.3389/fbioe.2015.00064
+<br/>Can a humanoid face be expressive?
+<br/>A psychophysiological investigation
+<br/><b>Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy</b><br/><b>University of Pisa, Pisa, Italy</b><br/>Non-verbal signals expressed through body language play a crucial role in multi-modal
+<br/>human communication during social relations. Indeed, in all cultures, facial expressions
+<br/>are the most universal and direct signs to express innate emotional cues. A human face
+<br/>conveys important information in social interactions and helps us to better understand
+<br/>our social partners and establish empathic links. Latest researches show that humanoid
+<br/>and social robots are becoming increasingly similar to humans, both esthetically and
+<br/>expressively. However, their visual expressiveness is a crucial issue that must be improved
+<br/>to make these robots more realistic and intuitively perceivable by humans as not different
+<br/>from them. This study concerns the capability of a humanoid robot to exhibit emotions
+<br/>through facial expressions. More specifically, emotional signs performed by a humanoid
+<br/>robot have been compared with corresponding human facial expressions in terms of
+<br/>recognition rate and response time. The set of stimuli
+<br/>included standardized human
+<br/>expressions taken from an Ekman-based database and the same facial expressions
+<br/>performed by the robot. Furthermore, participants’ psychophysiological responses have
+<br/>been explored to investigate whether there could be differences induced by interpreting
+<br/>robot or human emotional stimuli. Preliminary results show a trend to better recognize
+<br/>expressions performed by the robot than 2D photos or 3D models. Moreover, no
+<br/>significant differences in the subjects’ psychophysiological state have been found during
+<br/>the discrimination of facial expressions performed by the robot in comparison with the
+<br/>same task performed with 2D photos and 3D models.
+<br/>Keywords: facial expressions, emotion perception, humanoid robot, expression recognition, social robots,
+<br/>psychophysiological signals, affective computing
+<br/>1. Introduction
+<br/>Human beings communicate in a rich and sophisticated way through many different channels,
+<br/>e.g., sound, vision, and touch. In human social relationships, visual information plays a crucial
+<br/>role. Human faces convey important information both from static features, such as identity, age,
+<br/>and gender, and from dynamic changes, such as expressions, eye blinking, and muscular micro-
+<br/>movements. The ability to recognize and understand facial expressions of the social partner allows
+<br/>us to establish and manage the empathic links that drive our social relationships.
+<br/>Charles Darwin was the first to observe that basic expressions, such as anger, disgust, contempt,
+<br/>fear, surprise, sadness, and happiness, are universal and innate (Darwin, 1872). Since the publication
+<br/>of his book “The Expression of the Emotions in Man and Animals” in 1872, a strong debate over the
+<br/>Edited by:
+<br/>Cecilia Laschi,
+<br/>Scuola Superiore Sant’Anna, Italy
+<br/>Reviewed by:
+<br/>John-John Cabibihan,
+<br/><b>Qatar University, Qatar</b><br/>Egidio Falotico,
+<br/>Scuola Superiore Sant’Anna, Italy
+<br/>*Correspondence:
+<br/><b>Research Center E. Piaggio</b><br/><b>University of Pisa, Largo Lucio</b><br/>Lazzarino 1, Pisa 56122, Italy
+<br/>Specialty section:
+<br/>This article was submitted to Bionics
+<br/>and Biomimetics, a section of the
+<br/>journal Frontiers in Bioengineering and
+<br/>Biotechnology
+<br/>Received: 24 November 2014
+<br/>Accepted: 27 April 2015
+<br/>Published: 26 May 2015
+<br/>Citation:
+<br/>Lazzeri N, Mazzei D, Greco A, Rotesi
+<br/>A, Lanatà A and De Rossi DE (2015)
+<br/>Can a humanoid face be expressive?
+<br/>A psychophysiological investigation.
+<br/>Front. Bioeng. Biotechnol. 3:64.
+<br/>doi: 10.3389/fbioe.2015.00064
+<br/>Frontiers in Bioengineering and Biotechnology | www.frontiersin.org
+<br/>May 2015 | Volume 3 | Article 64
+</td><td>('35440863', 'Nicole Lazzeri', 'nicole lazzeri')<br/>('34573296', 'Daniele Mazzei', 'daniele mazzei')<br/>('32070391', 'Alberto Greco', 'alberto greco')<br/>('6284325', 'Annalisa Rotesi', 'annalisa rotesi')<br/>('1730665', 'Antonio Lanatà', 'antonio lanatà')<br/>('20115987', 'Danilo Emilio De Rossi', 'danilo emilio de rossi')<br/>('34573296', 'Daniele Mazzei', 'daniele mazzei')</td><td>mazzei@di.unipi.it
+</td></tr><tr><td>2d05e768c64628c034db858b7154c6cbd580b2d5</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/> A Monthly Journal of Computer Science and Information Technology
+<br/> IJCSMC, Vol. 4, Issue. 8, August 2015, pg.431 – 446
+<br/> RESEARCH ARTICLE
+<br/>ISSN 2320–088X
+<br/>FACIAL EXPRESSION RECOGNITION:
+<br/>Machine Learning using C#
+</td><td></td><td>Author: Neda Firoz (nedafiroz1910@gmail.com)
+<br/>Advisor: Dr. Prashant Ankur Jain (prashant.jain@shiats.edu.in)
+</td></tr><tr><td>2dfe0e7e81f65716b09c590652a4dd8452c10294</td><td>ORIGINAL RESEARCH
+<br/>published: 06 June 2018
+<br/>doi: 10.3389/fpsyg.2018.00864
+<br/>Incongruence Between Observers’
+<br/>and Observed Facial Muscle
+<br/>Activation Reduces Recognition of
+<br/>Emotional Facial Expressions From
+<br/>Video Stimuli
+<br/><b>Centre for Applied Autism Research, University of Bath, Bath, United Kingdom, 2 Social and</b><br/><b>Cognitive Neuroscience Laboratory, Centre of Biology and Health Sciences, Mackenzie Presbyterian University, S o Paulo</b><br/><b>Brazil, University Hospital Zurich, Z rich</b><br/><b>Switzerland, Psychosomatic Medicine, and Psychotherapy, University Hospital Frankfurt</b><br/>Frankfurt, Germany
+<br/>According to embodied cognition accounts, viewing others’ facial emotion can elicit
+<br/>the respective emotion representation in observers which entails simulations of sensory,
+<br/>motor, and contextual experiences. In line with that, published research found viewing
+<br/>others’
+<br/>facial emotion to elicit automatic matched facial muscle activation, which
+<br/>was further found to facilitate emotion recognition. Perhaps making congruent facial
+<br/>muscle activity explicit produces an even greater recognition advantage. If there is
+<br/><b>con icting sensory information, i.e., incongruent facial muscle activity, this might impede</b><br/>recognition. The effects of actively manipulating facial muscle activity on facial emotion
+<br/>recognition from videos were investigated across three experimental conditions: (a)
+<br/>explicit imitation of viewed facial emotional expressions (stimulus-congruent condition),
+<br/>(b) pen-holding with the lips (stimulus-incongruent condition), and (c) passive viewing
+<br/>(control condition). It was hypothesised that (1) experimental condition (a) and (b) result
+<br/>in greater facial muscle activity than (c), (2) experimental condition (a) increases emotion
+<br/>recognition accuracy from others’ faces compared to (c), (3) experimental condition (b)
+<br/>lowers recognition accuracy for expressions with a salient facial feature in the lower,
+<br/>but not the upper face area, compared to (c). Participants (42 males, 42 females)
+<br/>underwent a facial emotion recognition experiment (ADFES-BIV) while electromyography
+<br/>(EMG) was recorded from five facial muscle sites. The experimental conditions’ order
+<br/>was counter-balanced. Pen-holding caused stimulus-incongruent facial muscle activity
+<br/>for expressions with facial feature saliency in the lower face region, which reduced
+<br/>recognition of lower face region emotions. Explicit imitation caused stimulus-congruent
+<br/>facial muscle activity without modulating recognition. Methodological
+<br/>implications are
+<br/>discussed.
+<br/>Keywords: facial emotion recognition, imitation, facial muscle activity, facial EMG, embodiment, videos, dynamic
+<br/>stimuli, facial expressions of emotion
+<br/>Edited by:
+<br/>Eva G. Krumhuber,
+<br/><b>University College London</b><br/>United Kingdom
+<br/>Reviewed by:
+<br/>Sebastian Korb,
+<br/>Universität Wien, Austria
+<br/>Michal Olszanowski,
+<br/><b>SWPS University of Social Sciences</b><br/>and Humanities, Poland
+<br/>*Correspondence:
+<br/>Tanja S. H. Wingenbach
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Emotion Science,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 15 December 2017
+<br/>Accepted: 14 May 2018
+<br/>Published: 06 June 2018
+<br/>Citation:
+<br/>Wingenbach TSH, Brosnan M,
+<br/>Pfaltz MC, Plichta MM and Ashwin C
+<br/>(2018) Incongruence Between
+<br/>Observers’ and Observed Facial
+<br/>Muscle Activation Reduces
+<br/>Recognition of Emotional Facial
+<br/>Expressions From Video Stimuli.
+<br/>Front. Psychol. 9:864.
+<br/>doi: 10.3389/fpsyg.2018.00864
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>June 2018 | Volume 9 | Article 864
+</td><td>('39455300', 'Mark Brosnan', 'mark brosnan')<br/>('34495803', 'Monique C. Pfaltz', 'monique c. pfaltz')<br/>('2976177', 'Michael M. Plichta', 'michael m. plichta')<br/>('2708124', 'Chris Ashwin', 'chris ashwin')</td><td>tanja.wingenbach@bath.edu
+</td></tr><tr><td>2d072cd43de8d17ce3198fae4469c498f97c6277</td><td>Random Cascaded-Regression Copse for Robust
+<br/>Facial Landmark Detection
+<br/>and Xiao-Jun Wu
+</td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td></td></tr><tr><td>2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3</td><td>20
+<br/>Machine Analysis of Facial Expressions
+<br/><b>Imperial College London</b><br/><b>Inst. Neural Computation, University of California</b><br/>1 UK, 2 USA
+<br/>1. Human Face and Its Expression
+<br/>The human face is the site for major sensory inputs and major communicative outputs. It
+<br/>houses the majority of our sensory apparatus as well as our speech production apparatus. It
+<br/>is used to identify other members of our species, to gather information about age, gender,
+<br/>attractiveness, and personality, and to regulate conversation by gazing or nodding.
+<br/>Moreover, the human face is our preeminent means of communicating and understanding
+<br/>somebody’s affective state and intentions on the basis of the shown facial expression
+<br/>(Keltner & Ekman, 2000). Thus, the human face
+<br/>input-output
+<br/>communicative system capable of tremendous flexibility and specificity (Ekman & Friesen,
+<br/>1975). In general, the human face conveys information via four kinds of signals.
+<br/>(a) Static facial signals represent relatively permanent features of the face, such as the bony
+<br/>structure, the soft tissue, and the overall proportions of the face. These signals
+<br/>contribute to an individual’s appearance and are usually exploited for person
+<br/>identification.
+<br/>is a multi-signal
+<br/>(b) Slow facial signals represent changes in the appearance of the face that occur gradually
+<br/>over time, such as development of permanent wrinkles and changes in skin texture.
+<br/>These signals can be used for assessing the age of an individual. Note that these signals
+<br/>might diminish the distinctness of the boundaries of the facial features and impede
+<br/>recognition of the rapid facial signals.
+<br/>(c) Artificial signals are exogenous features of the face such as glasses and cosmetics. These
+<br/>signals provide additional information that can be used for gender recognition. Note
+<br/>that these signals might obscure facial features or, conversely, might enhance them.
+<br/>(d) Rapid facial signals represent temporal changes in neuromuscular activity that may lead
+<br/><b>to visually detectable changes in facial appearance, including blushing and tears. These</b><br/>(atomic facial) signals underlie facial expressions.
+<br/>All four classes of signals contribute to person identification, gender recognition,
+<br/>attractiveness assessment, and personality prediction. In Aristotle’s time, a theory was
+<br/>proposed about mutual dependency between static facial signals (physiognomy) and
+<br/>personality: “soft hair reveals a coward, strong chin a stubborn person, and a smile a happy
+<br/>person”. Today, few psychologists share the belief about the meaning of soft hair and strong
+<br/>chin, but many believe that rapid facial signals (facial expressions) communicate emotions
+<br/>(Ekman & Friesen, 1975; Ambady & Rosenthal, 1992; Keltner & Ekman, 2000) and
+<br/>personality traits (Ambady & Rosenthal, 1992). More specifically, types of messages
+<br/>Source: Face Recognition, Book edited by: Kresimir Delac and Mislav Grgic, ISBN 978-3-902613-03-5, pp.558, I-Tech, Vienna, Austria, June 2007
+</td><td>('1694605', 'Maja Pantic', 'maja pantic')<br/>('2218905', 'Marian Stewart Bartlett', 'marian stewart bartlett')</td><td></td></tr><tr><td>2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>National Conference on Advancements in Computer & Information Technology (NCACIT-2016)
+<br/>A Survey on: Image Process using Two- Stage Crawler
+<br/>Assistant Professor
+<br/>SPPU, Pune
+<br/>Department of Computer Engg
+<br/>Department of Computer Engg
+<br/>Department of Computer Engg
+<br/>BE Student
+<br/>SPPU, Pune
+<br/>BE Student
+<br/>SPPU, Pune
+<br/>BE Student
+<br/>Department of Computer Engg
+<br/>SPPU, Pune
+<br/>additional
+<br/>analysis
+<br/>for
+<br/>information
+</td><td>('15156505', 'Nilesh Wani', 'nilesh wani')<br/>('1936852', 'Savita Gunjal', 'savita gunjal')</td><td></td></tr><tr><td>2d38fd1df95f5025e2cee5bc439ba92b369a93df</td><td>Scalable Object-Class Search
+<br/>via Sparse Retrieval Models and Approximate Ranking
+<br/>Dartmouth Computer Science Technical Report TR2011-700
+<br/>Computer Science Department
+<br/><b>Dartmouth College</b><br/>Hanover, NH 03755, U.S.A.
+<br/>July 5, 2011
+</td><td>('2563325', 'Mohammad Rastegari', 'mohammad rastegari')<br/>('2442612', 'Chen Fang', 'chen fang')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>{mrastegari, chenfang, lorenzo}@cs.dartmouth.edu
+</td></tr><tr><td>2d83ba2d43306e3c0587ef16f327d59bf4888dc3</td><td>Large-scale Video Classification with Convolutional Neural Networks
+<br/><b>Stanford University</b><br/>1Google Research
+<br/>http://cs.stanford.edu/people/karpathy/deepvideo
+</td><td>('2354728', 'Andrej Karpathy', 'andrej karpathy')<br/>('1805076', 'George Toderici', 'george toderici')<br/>('24792872', 'Sanketh Shetty', 'sanketh shetty')<br/>('1893833', 'Thomas Leung', 'thomas leung')<br/>('1694199', 'Rahul Sukthankar', 'rahul sukthankar')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>karpathy@cs.stanford.edu
+<br/>gtoderici@google.com
+<br/>sanketh@google.com
+<br/>leungt@google.com
+<br/>sukthankar@google.com
+<br/>feifeili@cs.stanford.edu
+</td></tr><tr><td>2d84c0d96332bb4fbd8acced98e726aabbf15591</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>RIVERSIDE
+<br/>Investigating the Role of Saliency for Face Recognition
+<br/>A Dissertation submitted in partial satisfaction
+<br/>of the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>in
+<br/>Electrical Engineering
+<br/>by
+<br/>March 2015
+<br/>Dissertation Committee:
+<br/>Professor Conrad Rudolph
+</td><td>('11012197', 'Ramya Malur Srinivasan', 'ramya malur srinivasan')<br/>('1688416', 'Amit K Roy-Chowdhury', 'amit k roy-chowdhury')<br/>('1686303', 'Ertem Tuncel', 'ertem tuncel')<br/>('2357146', 'Tamar Shinar', 'tamar shinar')</td><td></td></tr><tr><td>2d8d089d368f2982748fde93a959cf5944873673</td><td>Proceedings of NAACL-HLT 2018, pages 788–794
+<br/>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>788
+</td><td></td><td></td></tr><tr><td>2d79d338c114ece1d97cde1aa06ab4cf17d38254</td><td>iLab-20M: A large-scale controlled object dataset to investigate deep learning
+<br/><b>Center for Research in Computer Vision, University of Central Florida</b><br/><b>Amirkabir University of Technology, University of Southern California</b></td><td>('3177797', 'Ali Borji', 'ali borji')<br/>('2391309', 'Saeed Izadi', 'saeed izadi')<br/>('7326223', 'Laurent Itti', 'laurent itti')</td><td>aborji@crcv.ucf.edu, sizadi@aut.ac.ir, itti@usc.edu
+</td></tr><tr><td>2df4d05119fe3fbf1f8112b3ad901c33728b498a</td><td>Facial landmark detection using structured output deep
+<br/>neural networks
+<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+<br/>Adam∗2
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>September 24, 2015
+</td><td></td><td></td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>Labeled Faces in the Wild: Updates and
+<br/>New Reporting Procedures
+<br/><b>University of Massachusetts, Amherst Technical Report UM-CS</b></td><td>('3219900', 'Gary B. Huang', 'gary b. huang')<br/>('1714536', 'Erik Learned-Miller', 'erik learned-miller')</td><td></td></tr><tr><td>2d4a3e9361505616fa4851674eb5c8dd18e0c3cf</td><td>Towards Privacy-Preserving Visual Recognition
+<br/>via Adversarial Training: A Pilot Study
+<br/><b>Texas AandM University, College Station TX 77843, USA</b><br/>2 Adobe Research, San Jose CA 95110, USA
+</td><td>('1733940', 'Zhenyu Wu', 'zhenyu wu')<br/>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('8056043', 'Zhaowen Wang', 'zhaowen wang')<br/>('39909162', 'Hailin Jin', 'hailin jin')</td><td>{wuzhenyu sjtu,atlaswang}@tamu.edu
+<br/>{zhawang,hljin}@adobe.com
+</td></tr><tr><td>2d748f8ee023a5b1fbd50294d176981ded4ad4ee</td><td>TRIPLET SIMILARITY EMBEDDING FOR FACE VERIFICATION
+<br/><b>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</b><br/>1Department of Electrical and Computer Engineering,
+</td><td>('2716670', 'Swami Sankaranarayanan', 'swami sankaranarayanan')<br/>('2943431', 'Azadeh Alavi', 'azadeh alavi')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{swamiviv, azadeh, rama}@umiacs.umd.edu
+</td></tr><tr><td>2d3c17ced03e4b6c4b014490fe3d40c62d02e914</td><td>COMPUTER ANIMATION AND VIRTUAL WORLDS
+<br/>Comp.Anim.VirtualWorlds2012; 23:167–178
+<br/>Published online 30 May 2012 in Wiley Online Library (wileyonlinelibrary.com). DOI: 10.1002/cav.1455
+<br/>SPECIAL ISSUE PAPER
+<br/>Video-driven state-aware facial animation
+<br/><b>State Key Lab of CADandCG, Zhejiang University, Hangzhou, Zhejiang, China</b><br/>2 Microsoft Corporation, Seattle, WA, USA
+</td><td>('2894564', 'Ming Zeng', 'ming zeng')<br/>('1680293', 'Lin Liang', 'lin liang')<br/>('3227032', 'Xinguo Liu', 'xinguo liu')<br/>('1679542', 'Hujun Bao', 'hujun bao')</td><td></td></tr><tr><td>41f26101fed63a8d149744264dd5aa79f1928265</td><td>Spot On: Action Localization from
+<br/>Pointly-Supervised Proposals
+<br/><b>University of Amsterdam</b><br/><b>Delft University of Technology</b></td><td>('2606260', 'Pascal Mettes', 'pascal mettes')<br/>('1738975', 'Jan C. van Gemert', 'jan c. van gemert')</td><td></td></tr><tr><td>4188bd3ef976ea0dec24a2512b44d7673fd4ad26</td><td>1050
+<br/>Nonlinear Non-Negative Component
+<br/>Analysis Algorithms
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('2871609', 'Maria Petrou', 'maria petrou')</td><td></td></tr><tr><td>416b559402d0f3e2b785074fcee989d44d82b8e5</td><td>Multi-View Super Vector for Action Recognition
+<br/>1Shenzhen Key Lab of Computer Vision and Pattern Recognition,
+<br/><b>Shenzhen Institutes of Advanced Technology, CAS, China</b><br/><b>The Chinese University of Hong Kong, Hong Kong</b></td><td>('2985266', 'Zhuowei Cai', 'zhuowei cai')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>{iamcaizhuowei, 07wanglimin, xiaojiangp}@gmail.com, yu.qiao@siat.ac.cn
+</td></tr><tr><td>416364cfdbc131d6544582e552daf25f585c557d</td><td>Synthesis and Recognition of Facial Expressions in Virtual 3D Views
+<br/><b>Queen Mary, University of London, E1 4NS, UK</b></td><td>('34780294', 'Lukasz Zalewski', 'lukasz zalewski')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td>[lukas|sgg]@dcs.qmul.ac.uk
+</td></tr><tr><td>41000c3a3344676513ef4bfcd392d14c7a9a7599</td><td>A NOVEL APPROACH FOR GENERATING FACE
+<br/>TEMPLATE USING BDA
+<br/>1P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+<br/>2Associate Professor, Department of Computer Engineering, MCERC, Nashik (M.S.),
+<br/>India
+</td><td>('40075681', 'Shraddha S. Shinde', 'shraddha s. shinde')<br/>('2590072', 'Anagha P. Khedkar', 'anagha p. khedkar')</td><td>shraddhashinde@gmail.com
+<br/>anagha_p2@yahoo.com
+</td></tr><tr><td>411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8</td><td>Vol.59: e16161057, January-December 2016
+<br/>http://dx.doi.org/10.1590/1678-4324-2016161057
+<br/>ISSN 1678-4324 Online Edition
+<br/>1
+<br/>Biological and Applied Sciences
+<br/>BRAZILIAN ARCHIVES OF
+<br/>BIOLOGY AND TECHNOLOGY
+<br/>A N I N T E R N A T I O N A L J O U R N A L
+<br/>An Empirical Evaluation of the Local Texture Description
+<br/>Framework-Based Modified Local Directional Number
+<br/>Pattern with Various Classifiers for Face Recognition
+<br/><b>St. Xavier s Catholic College of Engineering, Nagercoil, India</b><br/><b>VelTech Dr. R.R. and Dr. S.R. Technical University, Chennai</b><br/><b>Manonmaniam Sundaranar University, Tirunelveli</b><br/>India.
+</td><td>('9375880', 'R. Reena Rose', 'r. reena rose')</td><td></td></tr><tr><td>411318684bd2d42e4b663a37dcf0532a48f0146d</td><td>Improved Face Verification with Simple
+<br/>Weighted Feature Combination
+<br/><b>College of Electronics and Information Engineering, Tongji University</b><br/>4800 Cao’an Highway, Shanghai 201804, People’s Republic of China
+</td><td>('1775391', 'Xinyu Zhang', 'xinyu zhang')<br/>('48566761', 'Jiang Zhu', 'jiang zhu')<br/>('34647494', 'Mingyu You', 'mingyu you')</td><td>{1510464,zhujiang,myyou}@tongji.edu.cn
+</td></tr><tr><td>4140498e96a5ff3ba816d13daf148fffb9a2be3f</td><td>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>2017 IEEE 12th International Conference on Automatic Face & Gesture Recognition
+<br/>Constrained Ensemble Initialization for Facial Landmark
+<br/>Tracking in Video
+<br/><b>Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td></td></tr><tr><td>41f8477a6be9cd992a674d84062108c68b7a9520</td><td>An Automated System for Visual Biometrics
+<br/>Dept. of Electrical Engineering and Computer Science
+<br/><b>Northwestern University</b><br/>Evanston, IL 60208-3118
+</td><td>('2563314', 'Derek J. Shiell', 'derek j. shiell')<br/>('3271105', 'Louis H. Terry', 'louis h. terry')<br/>('2691927', 'Petar S. Aleksic', 'petar s. aleksic')<br/>('1695338', 'Aggelos K. Katsaggelos', 'aggelos k. katsaggelos')</td><td>d-shiell@northwestern.edu, l-terry@northwestern.edu,
+<br/>apetar@eecs.northwestern.edu, aggk@eecs.northwestern.edu
+</td></tr><tr><td>414715421e01e8c8b5743c5330e6d2553a08c16d</td><td>PoTion: Pose MoTion Representation for Action Recognition
+<br/>1Inria∗
+<br/>2NAVER LABS Europe
+</td><td>('2492127', 'Philippe Weinzaepfel', 'philippe weinzaepfel')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>41aa8c1c90d74f2653ef4b3a2e02ac473af61e47</td><td>Compositional Structure Learning for Action Understanding
+<br/>1Department of Computer Science and Engineering, SUNY at Buffalo
+<br/>2Department of Statistics, UCLA
+<br/><b>University of Michigan</b><br/>October 23, 2014
+</td><td>('1856629', 'Ran Xu', 'ran xu')<br/>('1690235', 'Gang Chen', 'gang chen')<br/>('2228109', 'Caiming Xiong', 'caiming xiong')<br/>('1728624', 'Wei Chen', 'wei chen')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td></td></tr><tr><td>41ab4939db641fa4d327071ae9bb0df4a612dc89</td><td>Interpreting Face Images by Fitting a Fast
+<br/>Illumination-Based 3D Active Appearance
+<br/>Model
+<br/>Instituto Nacional de Astrof´ısica, ´Optica y Electr´onica,
+<br/>Luis Enrique Erro #1, 72840 Sta Ma. Tonantzintla. Pue., M´exico
+<br/>Coordinaci´on de Ciencias Computacionales
+</td><td>('2349309', 'Salvador E. Ayala-Raggi', 'salvador e. ayala-raggi')</td><td>{saraggi, robles, jcruze}@ccc.inaoep.mx
+</td></tr><tr><td>41971dfbf404abeb8cf73fea29dc37b9aae12439</td><td>Detection of Facial Feature Points Using
+<br/>Anthropometric Face Model
+<br/>
+<br/><b>Concordia University</b><br/>1455 de Maisonneuve Blvd. West, Montréal, Québec H3G 1M8, Canada
+</td><td>('8018736', 'Abu Sayeed', 'abu sayeed')<br/>('1715620', 'Prabir Bhattacharya', 'prabir bhattacharya')</td><td>E-mails: a_sohai@encs.concordia.ca, prabir@ciise.concordia.ca
+</td></tr><tr><td>4157e45f616233a0874f54a59c3df001b9646cd7</td><td>elifesciences.org
+<br/>RESEARCH ARTICLE
+<br/>Diagnostically relevant facial gestalt
+<br/>information from ordinary photos
+<br/><b>University of Oxford, Oxford, United Kingdom</b><br/>2Medical Research Council Functional Genomics Unit, Department of Physiology,
+<br/><b>Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome</b><br/><b>Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom</b><br/><b>Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular</b><br/>Medicine, Edinburgh, United Kingdom
+</td><td>('4569459', 'Quentin Ferry', 'quentin ferry')<br/>('1985983', 'Julia Steinberg', 'julia steinberg')<br/>('39722750', 'Caleb Webber', 'caleb webber')<br/>('1880309', 'David R FitzPatrick', 'david r fitzpatrick')<br/>('2500371', 'Chris P Ponting', 'chris p ponting')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')<br/>('2204967', 'Christoffer Nellåker', 'christoffer nellåker')</td><td></td></tr><tr><td>41a6196f88beced105d8bc48dd54d5494cc156fb</td><td>2015 International Conference on
+<br/>Communications, Signal
+<br/>Processing, and their Applications
+<br/>(ICCSPA 2015)
+<br/>Sharjah, United Arab Emirates
+<br/>17-19 February 2015
+<br/>IEEE Catalog Number:
+<br/>ISBN:
+<br/>CFP1574T-POD
+<br/>978-1-4799-6533-5
+</td><td></td><td></td></tr><tr><td>41de109bca9343691f1d5720df864cdbeeecd9d0</td><td>Article
+<br/>Facial Emotion Recognition: A Survey and
+<br/>Real-World User Experiences in Mixed Reality
+<br/>Received: 10 December 2017; Accepted: 26 January 2018; Published: 1 Febuary 2018
+</td><td>('38085139', 'Dhwani Mehta', 'dhwani mehta')<br/>('3655354', 'Mohammad Faridul Haque Siddiqui', 'mohammad faridul haque siddiqui')<br/>('39803999', 'Ahmad Y. Javaid', 'ahmad y. javaid')</td><td>EECS Department, The University of Toledo, Toledo, OH 43606, USA; dhwani.mehta@utoledo.edu (D.M.);
+<br/>mohammadfaridulhaque.siddiqui@utoledo.edu (M.F.H.S.)
+<br/>* Correspondence: ahmad.javaid@utoledo.edu; Tel.: +1-419-530-8260
+</td></tr><tr><td>41d9a240b711ff76c5448d4bf4df840cc5dad5fc</td><td>JOURNAL DRAFT, VOL. X, NO. X, APR 2013
+<br/>Image Similarity Using Sparse Representation
+<br/>and Compression Distance
+</td><td>('1720741', 'Tanaya Guha', 'tanaya guha')</td><td></td></tr><tr><td>419a6fca4c8d73a1e43003edc3f6b610174c41d2</td><td>A Component Based Approach Improves Classification of Discrete
+<br/>Facial Expressions Over a Holistic Approach
+</td><td>('2370974', 'Kenny Hong', 'kenny hong')<br/>('1716539', 'Stephan K. Chalup', 'stephan k. chalup')</td><td></td></tr><tr><td>4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c</td><td>MULTI-MODAL PERSON-PROFILES FROM BROADCAST NEWS VIDEO
+<br/><b>Beckman Institute for Advanced Science and Technology</b><br/><b>University of Illinois at Urbana-Champaign</b><br/>Urbana, IL 61801
+</td><td>('1804874', 'Charlie K. Dagli', 'charlie k. dagli')<br/>('25639435', 'Sharad V. Rao', 'sharad v. rao')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{dagli,svrao,huang}@ifp.uiuc.edu
+</td></tr><tr><td>4180978dbcd09162d166f7449136cb0b320adf1f</td><td>Real-time head pose classification in uncontrolled environments
+<br/>with Spatio-Temporal Active Appearance Models
+<br/>∗ Matematica Aplicada i Analisi ,Universitat de Barcelona, Barcelona, Spain
+<br/>+ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain
+<br/>+ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain
+</td><td>('3276130', 'Miguel Reyes', 'miguel reyes')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>E-mail:mreyes@cvc.uab.es
+<br/>E-mail:sergio@maia.ub.es
+<br/>E-mail:petia@cvc.uab.es
+</td></tr><tr><td>41b997f6cec7a6a773cd09f174cb6d2f036b36cd</td><td></td><td></td><td></td></tr><tr><td>41aa209e9d294d370357434f310d49b2b0baebeb</td><td>BEYOND CAPTION TO NARRATIVE:
+<br/>VIDEO CAPTIONING WITH MULTIPLE SENTENCES
+<br/><b>Grad. School of Information Science and Technology, The University of Tokyo, Japan</b></td><td>('2518695', 'Andrew Shin', 'andrew shin')<br/>('8197937', 'Katsunori Ohnishi', 'katsunori ohnishi')<br/>('1790553', 'Tatsuya Harada', 'tatsuya harada')</td><td></td></tr><tr><td>413a184b584dc2b669fbe731ace1e48b22945443</td><td>Human Pose Co-Estimation and Applications
+</td><td>('31786895', 'Marcin Eichner', 'marcin eichner')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td></td></tr><tr><td>83b7578e2d9fa60d33d9336be334f6f2cc4f218f</td><td>The S-HOCK Dataset: Analyzing Crowds at the Stadium
+<br/><b>University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento</b><br/>The topic of crowd modeling in computer vision usually assumes a sin-
+<br/>gle generic typology of crowd, which is very simplistic. In this paper we
+<br/>adopt a taxonomy that is widely accepted in sociology, focusing on a partic-
+<br/>ular category, the spectator crowd, which is formed by people “interested in
+<br/>watching something specific that they came to see” [1]. This can be found
+<br/>at the stadiums, amphitheaters, cinema, etc.
+<br/>In particular, we propose a
+<br/>novel dataset, the Spectators Hockey (S-HOCK), which deals with 4 hockey
+<br/>matches during an international tournament.
+<br/>The dataset is unique in the crowd literature, and in general in the
+<br/>surveillance realm. The dataset analyzes the crowd at different levels of
+<br/>detail. At the highest level, it models the network of social connections
+<br/>among the public (who knows whom in the neighborhood), what is the sup-
+<br/>ported team and what has been the best action in the match; all of this has
+<br/>been obtained by interviews at the stadium. At a medium level, spectators
+<br/>are localized, and information regarding the pose of their heads and body is
+<br/>given. Finally, at a lowest level, a fine grained specification of all the actions
+<br/>performed by each single person is available. This information is summa-
+<br/>rized by a large number of annotations collected over a year of work: more
+<br/>than 100 millions of double checked annotations. This permits potentially
+<br/>to deal with hundreds of tasks, some of which are documented in the full
+<br/>paper.
+<br/>Furthermore, the dataset is multidimensional, in the sense that offers
+<br/>not only the view of the crowd (at different resolutions, with 4 cameras) but
+<br/>also on the matches. This multiplies the number of possible applications that
+<br/>could be assessed, investigating the reactions of the crowd to the actions of
+<br/>the game, opening up to applications of summarization and content analysis.
+<br/>Besides these figures, S-HOCK is significantly different from all the other
+<br/>crowd datasets, since the crowd as a whole is mostly static and the motion
+<br/>of each spectator is constrained within a limited space in the surrounding of
+<br/>his position.
+<br/>Annotation
+<br/>People detection
+<br/>Head detection
+<br/>Head pose∗
+<br/>Body position
+<br/>Posture
+<br/>Locomotion
+<br/>Action / Interaction
+<br/>Supported team
+<br/>Best action
+<br/>Social relation
+<br/>Typical Values
+<br/>full body bounding box [x,y,width,height]
+<br/>head bounding box [x,y,width,height]
+<br/>left, frontal, right, away, down
+<br/>sitting, standing, (locomotion)
+<br/>crossed arms, hands in pocket, crossed legs . . .
+<br/>walking, jumping (each jump), rising pelvis slightly up
+<br/>waving arms, pointing toward game, applauding, . . .
+<br/>the team supported in this game
+<br/>the most exciting action of the game
+<br/>If he/she did know the person seated at his/her right
+<br/>Table 1: Some of the annotations provided for each person and each frame
+<br/>of the videos.
+<br/>Together with the annotations, in the paper we discuss issues related to
+<br/>low and high level detail of the crowd analysis, namely, people detection
+<br/>and head pose estimation for the low level analysis, and the spectator cate-
+<br/>gorization for the high level analysis. For all of these applications, we define
+<br/>the experimental protocols, promoting future comparisons.
+<br/>For people detection task we provide five different baselines, from the
+<br/>simplest algorithms to the state of the art method for object detection, show-
+<br/>ing how in this scenario the simplest method gets very high scores.
+<br/>Regarding head pose estimation, we tested two state of the art methods
+<br/>which work in a low resolution domain. Furthermore, we propose two novel
+<br/>approaches based on Deep Learning. In particular, we evaluate the perfor-
+<br/>mance of the Convolutional Neural Network and the Stacked Auto-encoder
+<br/>Neural Network architecture. Here the results are comparable with state of
+<br/>the art but are obtainable at a much higher speed.
+<br/>Spectator categorization is a kind of crowd segmentation, where the goal
+<br/>is to find the team supported by each spectator. This task is intuitively use-
+</td><td>('1843683', 'Davide Conigliaro', 'davide conigliaro')<br/>('39337007', 'Paolo Rota', 'paolo rota')<br/>('2793423', 'Francesco Setti', 'francesco setti')<br/>('1919464', 'Chiara Bassetti', 'chiara bassetti')<br/>('3058987', 'Nicola Conci', 'nicola conci')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')<br/>('1723008', 'Marco Cristani', 'marco cristani')</td><td></td></tr><tr><td>839a2155995acc0a053a326e283be12068b35cb8</td><td>Under review as a conference paper at ICLR 2016
+<br/>HANDCRAFTED LOCAL FEATURES ARE CONVOLU-
+<br/>TIONAL NEURAL NETWORKS
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213, USA
+</td><td>('2927024', 'Shoou-I Yu', 'shoou-i yu')<br/>('2735055', 'Ming Lin', 'ming lin')<br/>('1681921', 'Bhiksha Raj', 'bhiksha raj')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td>{lanzhzh, iyu, minglin, bhiksha, alex}@cs.cmu.edu
+</td></tr><tr><td>83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e</td><td>RESEARCH ARTICLE
+<br/>Neuropsychiatric Genetics
+<br/>Quantifying Naturalistic Social Gaze in Fragile X
+<br/>Syndrome Using a Novel Eye Tracking Paradigm
+<br/>and Allan L. Reiss1
+<br/>1Center for Interdisciplinary Brain Sciences Research, Department of Psychiatry and Behavioral Sciences, Stanford, California
+<br/><b>Stanford University, Stanford, California</b><br/>Manuscript Received: 7 November 2014; Manuscript Accepted: 22 May 2015
+<br/>A hallmark behavioral feature of fragile X syndrome (FXS) is
+<br/>the propensity for individuals with the syndrome to exhibit
+<br/>significant impairments in social gaze during interactions
+<br/>with others. However, previous studies employing eye tracking
+<br/>methodology to investigate this phenomenon have been limited
+<br/>to presenting static photographs or videos of social interactions
+<br/>rather than employing a real-life social partner. To improve
+<br/>upon previous studies, we used a customized eye tracking
+<br/>configuration to quantify the social gaze of 51 individuals
+<br/>with FXS and 19 controls, aged 14–28 years, while they engaged
+<br/>in a naturalistic face-to-face social interaction with a female
+<br/>experimenter. Importantly, our control group was matched to
+<br/>the FXS group on age, developmental functioning, and degree of
+<br/>autistic symptomatology. Results showed that participants with
+<br/>FXS spent significantly less time looking at the face and had
+<br/>shorter episodes (and longer inter-episodes) of social gaze than
+<br/>controls. Regression analyses indicated that communication
+<br/>ability predicted higher levels of social gaze in individuals
+<br/>with FXS, but not in controls. Conversely, degree of autistic
+<br/>symptoms predicted lower levels of social gaze in controls, but
+<br/>not in individuals with FXS. Taken together, these data indicate
+<br/>that naturalistic social gaze in FXS can be measured objectively
+<br/>using existing eye tracking technology during face-to-face social
+<br/>interactions. Given that impairments in social gaze were specific
+<br/>to FXS, this paradigm could be employed as an objective and
+<br/>ecologically valid outcome measure in ongoing Phase II/Phase
+<br/>III clinical trials of FXS-specific interventions.
+<br/><b>2015 Wiley Periodicals, Inc</b><br/>Key words: eye tracking; social gaze; autism;
+<br/>syndrome
+<br/>fragile X
+<br/>INTRODUCTION
+<br/>Children diagnosed with genetic syndromes associated with intel-
+<br/>lectual and developmental disability (e.g., fragile X syndrome,
+<br/>Williams syndrome) often engage in highly specific forms of aber-
+<br/>rant social behavior that can interfere with everyday functioning. For
+<br/>How to Cite this Article:
+<br/>Hall SS, Frank MC, Pusiol GT, Farzin F,
+<br/>Lightbody AA, Reiss AL. 2015. Quantifying
+<br/>Naturalistic Social Gaze in Fragile X
+<br/>Syndrome Using a Novel Eye Tracking
+<br/>Paradigm.
+<br/>Am J Med Genet Part B 9999:1–9.
+<br/>example, individuals diagnosed with Williams syndrome show a
+<br/>particular form of hypersociability in which they actively seek out
+<br/>social interactions with others [Jones et al., 2000; Frigerio et al.,
+<br/>2006]. Conversely, children with fragile X syndrome (FXS) com-
+<br/>monly show deficits in social gaze behavior in which interactions
+<br/>with others are actively avoided [Cohen et al., 1988; Cohen et al.,
+<br/>1989; Cohen et al., 1991; Hall et al., 2006; Hall et al., 2009]. These
+<br/>contrasting behavioral phenotypes have been considered useful
+<br/>and important models for investigations examining the interplay
+<br/>between genes and environment [Kennedy et al., 2001; Schroeder
+<br/>et al., 2001].
+<br/>FXS is a particularly interesting model of potential gene-envi-
+<br/>ronment interactions because it is a “single-gene” disorder. The
+<br/>disease affects approximately 1 in 3,000 individuals in the United
+<br/>States (approx. 100,000 people) and is the most common known
+<br/>form of inherited intellectual disability [Hagerman, 2008]. First
+<br/>described by Martin and Bell in 1943 as a “pedigree of mental defect
+<br/>showing sex linkage” [Martin and Bell, 1943], FXS is caused by
+<br/>mutations to the FMR1 gene at locus 27.3 on the long arm of the X
+<br/>chromosome [Verkerk et al., 1991]. Excessive methylation of the
+<br/>gene results in reduced or absent Fragile X Mental Retardation
+<br/>Protein (FMRP), a key protein involved in synaptic plasticity and
+<br/>Grant sponsor: NIH grants; Grant numbers: MH050047, MH081998.
+<br/>Correspondence to:
+<br/>Scott S. Hall, PhD, Department of Psychiatry and Behavioral Sciences,
+<br/><b>Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA</b><br/>Article first published online in Wiley Online Library
+<br/>(wileyonlinelibrary.com): 00 Month 2015
+<br/>DOI 10.1002/ajmg.b.32331
+<br/><b>2015 Wiley Periodicals, Inc</b></td><td>('4708625', 'Faraz Farzin', 'faraz farzin')</td><td>E-mail: hallss@stanford.edu
+</td></tr><tr><td>83ca4cca9b28ae58f461b5a192e08dffdc1c76f3</td><td>DETECTING EMOTIONAL STRESS FROM FACIAL EXPRESSIONS FOR DRIVING SAFETY
+<br/>Signal Processing Laboratory (LTS5),
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td><td>('1697965', 'Hua Gao', 'hua gao')<br/>('1710257', 'Jean-Philippe Thiran', 'jean-philippe thiran')</td><td></td></tr><tr><td>8356832f883207187437872742d6b7dc95b51fde</td><td>Adversarial Perturbations Against Real-Time Video
+<br/>Classification Systems
+<br/><b>University of California, Riverside</b><br/><b>University of California, Riverside</b><br/><b>University of California, Riverside</b><br/>Riverside, California
+<br/>Riverside, California
+<br/><b>University of California, Riverside</b><br/>Riverside, California
+<br/>Riverside, California
+<br/><b>University of California, Riverside</b><br/>Riverside, California
+<br/>Amit K. Roy Chowdhury
+<br/><b>University of California, Riverside</b><br/>Riverside, California
+<br/>United States Army Research
+<br/>Laboratory
+</td><td>('26576993', 'Shasha Li', 'shasha li')<br/>('2252367', 'Chengyu Song', 'chengyu song')<br/>('1718484', 'Ajaya Neupane', 'ajaya neupane')<br/>('49616225', 'Sujoy Paul', 'sujoy paul')<br/>('38774813', 'Srikanth V. Krishnamurthy', 'srikanth v. krishnamurthy')<br/>('1703726', 'Ananthram Swami', 'ananthram swami')</td><td>sli057@ucr.edu
+<br/>csong@cs.ucr.edu
+<br/>ajaya@ucr.edu
+<br/>spaul003@ucr.edu
+<br/>krish@cs.ucr.edu
+<br/>amitrc@ece.ucr.edu
+<br/>ananthram.swami.civ@mail.mil
+</td></tr><tr><td>831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9</td><td></td><td></td><td></td></tr><tr><td>835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd</td><td>Unsupervised Learning of Sequence Representations by
+<br/>Autoencoders
+<br/><b>aPattern Recognition Laboratory, Delft University of Technology</b></td><td>('1678473', 'Wenjie Pei', 'wenjie pei')</td><td></td></tr><tr><td>832e1d128059dd5ed5fa5a0b0f021a025903f9d5</td><td>Pairwise Conditional Random Forests for Facial Expression Recognition
+<br/>S´everine Dubuisson1
+<br/>1 Sorbonne Universit´es, UPMC Univ Paris 06, CNRS, ISIR UMR 7222, 4 place Jussieu 75005 Paris
+</td><td>('3190846', 'Arnaud Dapogny', 'arnaud dapogny')<br/>('2521061', 'Kevin Bailly', 'kevin bailly')</td><td>arnaud.dapogny@isir.upmc.fr
+<br/>kevin.bailly@isir.upmc.fr
+<br/>severine.dubuisson@isir.upmc.fr
+</td></tr><tr><td>83e093a07efcf795db5e3aa3576531d61557dd0d</td><td>Facial Landmark Localization using Robust
+<br/>Relationship Priors and Approximative Gibbs
+<br/>Sampling
+<br/>Institut f¨ur Informationsverarbeitung (tnt)
+<br/>Leibniz Universit¨at Hannover, Germany
+</td><td>('35033145', 'Karsten Vogt', 'karsten vogt')</td><td>{vogt, omueller, ostermann}@tnt.uni-hannover.de
+</td></tr><tr><td>831d661d657d97a07894da8639a048c430c5536d</td><td>Weakly Supervised Facial Analysis with Dense Hyper-column Features
+<br/>CyLab Biometrics Center and the Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('3117715', 'Chenchen Zhu', 'chenchen zhu')<br/>('3049981', 'Yutong Zheng', 'yutong zheng')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('6131978', 'T. Hoang Ngan Le', 't. hoang ngan le')<br/>('2043374', 'Chandrasekhar Bhagavatula', 'chandrasekhar bhagavatula')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>{chenchez, yutongzh, kluu, thihoanl, cbhagava}@andrew.cmu.edu, msavvid@ri.cmu.edu
+</td></tr><tr><td>83b4899d2899dd6a8d956eda3c4b89f27f1cd308</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>I - 377
+<br/>ICIP 2007
+</td><td></td><td></td></tr><tr><td>83295bce2340cb87901499cff492ae6ff3365475</td><td>Deep Multi-Center Learning for Face Alignment
+<br/><b>Shanghai Jiao Tong University, China</b><br/><b>School of Computer Science and Software Engineering, East China Normal University, China</b></td><td>('3403352', 'Zhiwen Shao', 'zhiwen shao')<br/>('7296339', 'Hengliang Zhu', 'hengliang zhu')<br/>('1767677', 'Xin Tan', 'xin tan')<br/>('2107352', 'Yangyang Hao', 'yangyang hao')<br/>('8452947', 'Lizhuang Ma', 'lizhuang ma')</td><td>{shaozhiwen, hengliang zhu, tanxin2017, haoyangyang2014}@sjtu.edu.cn, ma-lz@cs.sjtu.edu.cn
+</td></tr><tr><td>83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05</td><td>ARANDJELOVI´C:RECOGNITIONFROMAPPEARANCESUBSPACESACROSSSCALE
+<br/>Recognition from Appearance Subspaces
+<br/>Across Image Sets of Variable Scale
+<br/>Ognjen Arandjelovi´c
+<br/>http://mi.eng.cam.ac.uk/~oa214
+<br/><b>Trinity College</b><br/><b>University of Cambridge</b><br/>CB2 1TQ, UK
+</td><td></td><td></td></tr><tr><td>830e5b1043227fe189b3f93619ef4c58868758a7</td><td></td><td></td><td></td></tr><tr><td>8323af714efe9a3cadb31b309fcc2c36c8acba8f</td><td>Automatic Real-Time
+<br/>Facial Expression Recognition
+<br/>for Signed Language Translation
+<br/>A thesis submitted in partial fulfillment of the requirements for the de-
+<br/>gree of Magister Scientiae in the Department of Computer Science,
+<br/><b>University of the Western Cape</b><br/>May 2006
+</td><td>('1775637', 'Jacob Richard Whitehill', 'jacob richard whitehill')</td><td></td></tr><tr><td>831226405bb255527e9127b84e8eaedd7eb8e9f9</td><td>ORIGINAL RESEARCH
+<br/>published: 04 January 2017
+<br/>doi: 10.3389/fnins.2016.00594
+<br/>A Motion-Based Feature for
+<br/>Event-Based Pattern Recognition
+<br/>Centre National de la Recherche Scientifique, Institut National de la Santé Et de la Recherche Médicale, Institut de la Vision,
+<br/><b>Sorbonne Universit s, UPMC University Paris 06, Paris, France</b><br/>This paper introduces an event-based luminance-free feature from the output of
+<br/>asynchronous event-based neuromorphic retinas. The feature consists in mapping the
+<br/>distribution of the optical flow along the contours of the moving objects in the visual
+<br/>scene into a matrix. Asynchronous event-based neuromorphic retinas are composed
+<br/>of autonomous pixels, each of them asynchronously generating “spiking” events that
+<br/>encode relative changes in pixels’ illumination at high temporal resolutions. The optical
+<br/>flow is computed at each event, and is integrated locally or globally in a speed and
+<br/>direction coordinate frame based grid, using speed-tuned temporal kernels. The latter
+<br/>ensures that the resulting feature equitably represents the distribution of the normal
+<br/>motion along the current moving edges, whatever their respective dynamics. The
+<br/>usefulness and the generality of the proposed feature are demonstrated in pattern
+<br/>recognition applications: local corner detection and global gesture recognition.
+<br/>Keywords: neuromorphic sensor, event-driven vision, pattern recognition, motion-based feature, speed-tuned
+<br/>integration time, histogram of oriented optical flow, corner detection, gesture recognition
+<br/>1. INTRODUCTION
+<br/>In computer vision, a feature is a more or less compact representation of visual information that is
+<br/>relevant to solve a task related to a given application (see Laptev, 2005; Mikolajczyk and Schmid,
+<br/>2005; Mokhtarian and Mohanna, 2006; Moreels and Perona, 2007; Gil et al., 2010; Dickscheid et al.,
+<br/>2011; Gauglitz et al., 2011). Building a feature consists in encoding information contained in the
+<br/>visual scene (global approach) or in a neighborhood of a point (local approach). It can represent
+<br/>static information (e.g., shape of an object, contour, etc.), dynamic information (e.g., speed and
+<br/>direction at the point, dynamic deformations, etc.) or both simultaneously.
+<br/>In this article, we propose a motion-based feature computed on visual information provided by
+<br/>asynchronous image sensors known as neuromorphic retinas (see Delbrück et al., 2010; Posch,
+<br/>2015). These cameras provide visual information as asynchronous event-based streams while
+<br/>conventional cameras output it as synchronous frame-based streams. The ATIS (“Asynchronous
+<br/>Time-based Image Sensor,” Posch et al., 2010; Posch, 2015), one of the neuromorphic visual
+<br/>sensors used in this work, is a time-domain encoding image sensor with QVGA resolution. It
+<br/>contains an array of fully autonomous pixels that combine an illuminance change detector circuit,
+<br/>associated to the PD1 photodiode, see Figure 1A and a conditional exposure measurement block,
+<br/>associated to the PD2 photodiode. The change detector individually and asynchronously initiates
+<br/>the measurement of an exposure/gray scale value only if a brightness change of a certain magnitude
+<br/>has been detected in the field-of-view of the respective pixel, as shown in the functional diagram
+<br/>of the ATIS pixel in Figures 1B, 2. The exposure measurement circuit encodes the absolute
+<br/>instantaneous pixel illuminance into the timing of asynchronous event pulses, more precisely
+<br/>Edited by:
+<br/>Tobi Delbruck,
+<br/>ETH Zurich, Switzerland
+<br/>Reviewed by:
+<br/>Dan Hammerstrom,
+<br/><b>Portland State University, USA</b><br/>Rodrigo Alvarez-Icaza,
+<br/>IBM, USA
+<br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Neuromorphic Engineering,
+<br/>a section of the journal
+<br/>Frontiers in Neuroscience
+<br/>Received: 07 September 2016
+<br/>Accepted: 13 December 2016
+<br/>Published: 04 January 2017
+<br/>Citation:
+<br/>Clady X, Maro J-M, Barré S and
+<br/>Benosman RB (2017) A Motion-Based
+<br/>Feature for Event-Based Pattern
+<br/>Recognition. Front. Neurosci. 10:594.
+<br/>doi: 10.3389/fnins.2016.00594
+<br/>Frontiers in Neuroscience | www.frontiersin.org
+<br/>January 2017 | Volume 10 | Article 594
+</td><td>('1804748', 'Xavier Clady', 'xavier clady')<br/>('24337536', 'Jean-Matthieu Maro', 'jean-matthieu maro')<br/>('2133648', 'Sébastien Barré', 'sébastien barré')<br/>('1750848', 'Ryad B. Benosman', 'ryad b. benosman')<br/>('1804748', 'Xavier Clady', 'xavier clady')</td><td>xavier.clady@upmc.fr
+</td></tr><tr><td>83fd5c23204147844a0528c21e645b757edd7af9</td><td>USDOT Number Localization and Recognition From Vehicle Side-View NIR
+<br/>Images
+<br/><b>Palo Alto Research Center (PARC</b><br/>800 Phillips Rd. Webster NY 14580
+</td><td>('2415287', 'Orhan Bulan', 'orhan bulan')<br/>('1732789', 'Safwan Wshah', 'safwan wshah')<br/>('3195726', 'Ramesh Palghat', 'ramesh palghat')<br/>('2978081', 'Vladimir Kozitsky', 'vladimir kozitsky')<br/>('34801919', 'Aaron Burry', 'aaron burry')</td><td>orhan.bulan,safwan.wshah,ramesh.palghat,vladimir.kozitsky,aaron.burry@parc.com
+</td></tr><tr><td>8384e104796488fa2667c355dd15b65d6d5ff957</td><td>A Discriminative Latent Model of Image Region and
+<br/>Object Tag Correspondence
+<br/>Department of Computer Science
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>School of Computing Science
+<br/><b>Simon Fraser University</b></td><td>('40457160', 'Yang Wang', 'yang wang')<br/>('10771328', 'Greg Mori', 'greg mori')</td><td>yangwang@uiuc.edu
+<br/>mori@cs.sfu.ca
+</td></tr><tr><td>8323529cf37f955fb3fc6674af6e708374006a28</td><td>Evaluation of Face Resolution for Expression Analysis
+<br/><b>IBM T. J. Watson Research Center</b><br/>PO Box 704, Yorktown Heights, NY 10598
+</td><td>('40383812', 'Ying-li Tian', 'ying-li tian')</td><td>Email: yltian@us.ibm.com
+</td></tr><tr><td>8395cf3535a6628c3bdc9b8d0171568d551f5ff0</td><td>Entropy Non-increasing Games for the
+<br/>Improvement of Dataflow Programming
+<br/>Norbert B´atfai, Ren´at´o Besenczi, Gerg˝o Bogacsovics,
+<br/>February 16, 2017
+</td><td>('9544536', 'Fanny Monori', 'fanny monori')</td><td></td></tr><tr><td>83ac942d71ba908c8d76fc68de6173151f012b38</td><td></td><td></td><td></td></tr><tr><td>834f5ab0cb374b13a6e19198d550e7a32901a4b2</td><td>Face Translation between Images and Videos using Identity-aware CycleGAN
+<br/>†Computer Vision Lab, ETH Zurich, Switzerland
+<br/>‡VISICS, KU Leuven, Belgium
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('2208488', 'Bernhard Kratzwald', 'bernhard kratzwald')<br/>('35268081', 'Danda Pani Paudel', 'danda pani paudel')<br/>('1839268', 'Jiqing Wu', 'jiqing wu')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{zhiwu.huang, paudel, jwu, vangool}@vision.ee.ethz.ch, bkratzwald@ethz.ch
+</td></tr><tr><td>8320dbdd3e4712cca813451cd94a909527652d63</td><td>EAR BIOMETRICS
+<br/>and Wilhelm Burger
+<br/><b>Johannes Kepler University(cid:1) Institute of Systems Science(cid:1) A(cid:2)
+</td><td>('12811570', 'Mark Burge', 'mark burge')</td><td></td></tr><tr><td>837e99301e00c2244023a8a48ff98d7b521c93ac</td><td>Local Feature Evaluation for a Constrained
+<br/>Local Model Framework
+<br/><b>Graduate School of Engineering, Tottori University</b><br/>101 Minami 4-chome, Koyama-cho, Tottori 680-8550, Japan
+</td><td>('1770332', 'Maiya Hori', 'maiya hori')<br/>('48532779', 'Shogo Kawai', 'shogo kawai')<br/>('2020088', 'Hiroki Yoshimura', 'hiroki yoshimura')<br/>('1679437', 'Yoshio Iwai', 'yoshio iwai')</td><td>hori@ike.tottori-u.ac.jp
+</td></tr><tr><td>834b15762f97b4da11a2d851840123dbeee51d33</td><td>Landmark-free smile intensity estimation
+<br/>IMAGO Research Group - Universidade Federal do Paran´a
+<br/>Fig. 1. Overview of our method for smile intensity estimation
+</td><td>('1800955', 'Olga R. P. Bellon', 'olga r. p. bellon')</td><td>{julio.batista,olga,luciano}@ufpr.br
+</td></tr><tr><td>833f6ab858f26b848f0d747de502127406f06417</td><td>978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+<br/>61
+<br/>ICIP 2009
+</td><td></td><td></td></tr><tr><td>8334da483f1986aea87b62028672836cb3dc6205</td><td>Fully Associative Patch-based 1-to-N Matcher for Face Recognition
+<br/>Computational Biomedicine Lab
+<br/><b>University of Houston</b></td><td>('39089616', 'Lingfeng Zhang', 'lingfeng zhang')<br/>('1706204', 'Ioannis A. Kakadiaris', 'ioannis a. kakadiaris')</td><td>{lzhang34, ioannisk}@uh.edu
+</td></tr><tr><td>831b4d8b0c0173b0bac0e328e844a0fbafae6639</td><td>Consensus-Driven Propagation in
+<br/>Massive Unlabeled Data for Face Recognition
+<br/><b>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong</b><br/>2 SenseTime Group Limited
+<br/><b>Nanyang Technological University</b></td><td>('31818765', 'Xiaohang Zhan', 'xiaohang zhan')<br/>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('1807606', 'Dahua Lin', 'dahua lin')<br/>('1717179', 'Chen Change Loy', 'chen change loy')</td><td>{zx017, zwliu, dhlin}@ie.cuhk.edu.hk
+<br/>yanjunjie@sensetime.com
+<br/>ccloy@ieee.org
+</td></tr><tr><td>8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff</td><td>Generic versus Salient Region-based Partitioning
+<br/>for Local Appearance Face Recognition
+<br/>Computer Science Depatment, Universit¨at Karlsruhe (TH)
+<br/>Am Fasanengarten 5, Karlsruhe 76131, Germany
+<br/>http://isl.ira.uka.de/cvhci
+</td><td>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{ekenel,stiefel}@ira.uka.de
+</td></tr><tr><td>1b02b9413b730b96b91d16dcd61b2420aef97414</td><td>Détection de marqueurs affectifs et attentionnels de
+<br/>personnes âgées en interaction avec un robot
+<br/>To cite this version:
+<br/>avec un robot.
+<br/>Intelligence artificielle [cs.AI]. Université Paris-Saclay, 2015. Français. <NNT :
+<br/>2015SACLS081>. <tel-01280505>
+<br/>HAL Id: tel-01280505
+<br/>https://tel.archives-ouvertes.fr/tel-01280505
+<br/>Submitted on 29 Feb 2016
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('47829802', 'Fan Yang', 'fan yang')<br/>('47829802', 'Fan Yang', 'fan yang')</td><td></td></tr><tr><td>1b55c4e804d1298cbbb9c507497177014a923d22</td><td>Incremental Class Representation
+<br/>Learning for Face Recognition
+<br/>Degree’s Thesis
+<br/>Audiovisual Systems Engineering
+<br/>Author:
+<br/>Universitat Politècnica de Catalunya (UPC)
+<br/>2016 - 2017
+</td><td>('2470219', 'Elisa Sayrol', 'elisa sayrol')<br/>('2585946', 'Josep Ramon Morros', 'josep ramon morros')</td><td></td></tr><tr><td>1b635f494eff2e5501607ebe55eda7bdfa8263b8</td><td>USC at THUMOS 2014
+<br/><b>University of Southern California, Institute for Robotics and Intelligent Systems</b><br/>Los Angeles, CA 90089, USA
+</td><td>('1726241', 'Chen Sun', 'chen sun')<br/>('27735100', 'Ram Nevatia', 'ram nevatia')</td><td></td></tr><tr><td>1b6394178dbc31d0867f0b44686d224a19d61cf4</td><td>EPML: Expanded Parts based Metric Learning for
+<br/>Occlusion Robust Face Verification
+<br/>To cite this version:
+<br/>for Occlusion Robust Face Verification. Asian Conference on Computer Vision, Nov 2014, -,
+<br/>Singapore. pp.1-15, 2014. <hal-01070657>
+<br/>HAL Id: hal-01070657
+<br/>https://hal.archives-ouvertes.fr/hal-01070657
+<br/>Submitted on 2 Oct 2014
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('2515597', 'Gaurav Sharma', 'gaurav sharma')<br/>('2515597', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>1bd50926079e68a6e32dc4412e9d5abe331daefb</td><td></td><td></td><td></td></tr><tr><td>1bdef21f093c41df2682a07f05f3548717c7a3d1</td><td>Towards Automated Classification of Emotional Facial Expressions
+<br/>1Department of Mathematics and Computer Science, 2Department of Psychology
+<br/><b>Rutgers University Newark, 101 Warren St., Newark, NJ, 07102 USA</b></td><td></td><td>Lewis J. Baker (lewis.j.baker@rutgers.edu)1, Vanessa LoBue (vlobue@rutgers.edu)2,
+<br/>Elizabeth Bonawitz (elizabeth.bonawitz@rutgers.edu)2, & Patrick Shafto (patrick.shafto@gmail.com)1
+</td></tr><tr><td>1b150248d856f95da8316da868532a4286b9d58e</td><td>Analyzing 3D Objects in Cluttered Images
+<br/>UC Irvine
+<br/>UC Irvine
+</td><td>('1888731', 'Mohsen Hejrati', 'mohsen hejrati')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>shejrati@ics.uci.edu
+<br/>dramanan@ics.uci.edu
+</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Age and Gender Estimation of Unfiltered Faces
+</td><td>('2037829', 'Eran Eidinger', 'eran eidinger')<br/>('1792038', 'Roee Enbar', 'roee enbar')<br/>('1756099', 'Tal Hassner', 'tal hassner')</td><td></td></tr><tr><td>1bbec7190ac3ba34ca91d28f145e356a11418b67</td><td>Action Recognition with Dynamic Image Networks
+<br/>Citation for published version:
+<br/>Bilen, H, Fernando, B, Gravves, E & Vedaldi, A 2017, 'Action Recognition with Dynamic Image Networks'
+<br/>IEEE Transactions on Pattern Analysis and Machine Intelligence. DOI: 10.1109/TPAMI.2017.2769085
+<br/>Digital Object Identifier (DOI):
+<br/>10.1109/TPAMI.2017.2769085
+<br/>Link:
+<br/>Link to publication record in Edinburgh Research Explorer
+<br/>Document Version:
+<br/>Peer reviewed version
+<br/>Published In:
+<br/>IEEE Transactions on Pattern Analysis and Machine Intelligence
+<br/>General rights
+<br/>Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+<br/>and / or other copyright owners and it is a condition of accessing these publications that users recognise and
+<br/>abide by the legal requirements associated with these rights.
+<br/>Take down policy
+<br/><b>The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer</b><br/>content complies with UK legislation. If you believe that the public display of this file breaches copyright please
+<br/>investigate your claim.
+<br/>Download date: 25. Dec. 2017
+<br/> Edinburgh Research Explorer </td><td></td><td>contact openaccess@ed.ac.uk providing details, and we will remove access to the work immediately and
+</td></tr><tr><td>1b3587363d37dd197b6adbcfa79d49b5486f27d8</td><td>Multimodal Grounding for Language Processing
+<br/><b>Language Technology Lab, University of Duisburg-Essen</b><br/>(cid:52) Ubiquitous Knowledge Processing Lab (UKP) and Research Training Group AIPHES
+<br/>Department of Computer Science, Technische Universit¨at Darmstadt
+<br/>www.ukp.tu-darmstadt.de
+</td><td>('2752573', 'Lisa Beinborn', 'lisa beinborn')<br/>('25080314', 'Teresa Botschen', 'teresa botschen')<br/>('1730400', 'Iryna Gurevych', 'iryna gurevych')</td><td></td></tr><tr><td>1b5875dbebc76fec87e72cee7a5263d325a77376</td><td>Learnt Quasi-Transitive Similarity for Retrieval from Large Collections of Faces
+<br/>Ognjen Arandjelovi´c
+<br/><b>University of St Andrews, United Kingdom</b></td><td></td><td>ognjen.arandjelovic@gmail.com
+</td></tr><tr><td>1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9</td><td>Groupwise Constrained Reconstruction for Subspace Clustering
+<br/>Ke Zhang†
+<br/><b>School of Computer Science, Fudan University, Shanghai, 200433, China</b><br/><b>QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia</b></td><td>('1736607', 'Ruijiang Li', 'ruijiang li')<br/>('1713520', 'Bin Li', 'bin li')<br/>('1751513', 'Cheng Jin', 'cheng jin')<br/>('1713721', 'Xiangyang Xue', 'xiangyang xue')</td><td>rjli@fudan.edu.cn
+<br/>bin.li-1@uts.edu.au
+<br/>k_zhang@fudan.edu.cn
+<br/>jc@fudan.edu.cn
+<br/>xyxue@fudan.edu.cn
+</td></tr><tr><td>1b300a7858ab7870d36622a51b0549b1936572d4</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2537215, IEEE
+<br/>Transactions on Image Processing
+<br/>Dynamic Facial Expression Recognition with Atlas
+<br/>Construction and Sparse Representation
+</td><td>('1734663', 'Yimo Guo', 'yimo guo')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')</td><td></td></tr><tr><td>1b90507f02967ff143fce993a5abbfba173b1ed0</td><td>Image Processing Theory, Tools and Applications
+<br/>Gradient-DCT (G-DCT) Descriptors
+<br/><b>Technical University of Ostrava, FEECS</b><br/>17. listopadu 15, 708 33 Ostrava-Poruba, Czech Republic
+</td><td>('2467747', 'Radovan Fusek', 'radovan fusek')<br/>('2557877', 'Eduard Sojka', 'eduard sojka')</td><td>e-mail: radovan.fusek@vsb.cz, eduard.sojka@vsb.cz
+</td></tr><tr><td>1b794b944fd462a2742b6c2f8021fecc663004c9</td><td>A Hierarchical Probabilistic Model for Facial Feature Detection
+<br/><b>Rensselaer Polytechnic Institute</b></td><td>('1746738', 'Yue Wu', 'yue wu')<br/>('2860279', 'Ziheng Wang', 'ziheng wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{wuy9,wangz10,jiq}@rpi.edu
+</td></tr><tr><td>1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2</td><td>From Few to Many: Generative Models for Recognition
+<br/>Under Variable Pose and Illumination
+<br/>Departments of Electrical Engineering
+<br/><b>Beckman Institute</b><br/>and Computer Science
+<br/><b>Yale University</b><br/>New Haven, CT
+<br/><b>University of Illinois, Urbana-Champaign</b><br/>Urbana, IL 
+</td><td>('3230391', 'Athinodoros S. Georghiades', 'athinodoros s. georghiades')<br/>('1765887', 'David J. Kriegman', 'david j. kriegman')</td><td></td></tr><tr><td>1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c</td><td>Exploiting Temporal Information for DCNN-based Fine-Grained Object Classification
+<br/>Australian Centre for Robotic Vision, Australia
+<br/><b>Queensland University of Technology, Australia</b><br/>Data61, CSIRO, Australia
+<br/><b>University of Queensland, Australia</b><br/><b>University of Adelaide, Australia</b></td><td>('1808390', 'ZongYuan Ge', 'zongyuan ge')<br/>('1763662', 'Chris McCool', 'chris mccool')<br/>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('1722767', 'Peng Wang', 'peng wang')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')</td><td></td></tr><tr><td>1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d</td><td>DICTA
+<br/>#147
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>DICTA 2010 Submission #147. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>Registration Invariant Representations for Expression Detection
+<br/>Anonymous DICTA submission
+<br/>Paper ID 147
+</td><td></td><td></td></tr><tr><td>1b0a071450c419138432c033f722027ec88846ea</td><td>Windsor Oceanico Hotel, Rio de Janeiro, Brazil, November 1-4, 2016
+<br/>978-1-5090-1889-5/16/$31.00 ©2016 IEEE
+<br/>649
+</td><td></td><td></td></tr><tr><td>1b60b8e70859d5c85ac90510b370b501c5728620</td><td>Using Detailed Independent 3D Sub-models to Improve
+<br/>Facial Feature Localisation and Pose Estimation
+<br/><b>Imaging Science and Biomedical Engineering, The University of Manchester, UK</b></td><td>('1753123', 'Angela Caunce', 'angela caunce')</td><td></td></tr><tr><td>1b3b01513f99d13973e631c87ffa43904cd8a821</td><td>HMM RECOGNITION OF EXPRESSIONS IN UNRESTRAINED VIDEO INTERVALS
+<br/>Universitat Politècnica de Catalunya, Barcelona, Spain
+</td><td>('3067467', 'José Luis Landabaso', 'josé luis landabaso')<br/>('1767549', 'Montse Pardàs', 'montse pardàs')<br/>('2868058', 'Antonio Bonafonte', 'antonio bonafonte')</td><td></td></tr><tr><td>1bc214c39536c940b12c3a2a6b78cafcbfddb59a</td><td></td><td></td><td></td></tr><tr><td>1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113</td><td>Article
+<br/>k-Same-Net: k-Anonymity with Generative Deep
+<br/>Neural Networks for Face Deidentification †
+<br/><b>Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana</b><br/><b>Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia</b><br/>† This paper is an extended version of our paper published in Meden B.; Emeršiˇc Ž.; Štruc V.; Peer P.
+<br/>k-Same-Net: Neural-Network-Based Face Deidentification. In the Proceedings of the International
+<br/>Conference and Workshop on Bioinspired Intelligence (IWOBI), Funchal Madeira, Portugal, 10–12 July 2017.
+<br/>Received: 1 December 2017 ; Accepted: 9 January 2018; Published: 13 January 2018
+</td><td>('34862665', 'Peter Peer', 'peter peer')</td><td>Slovenia; ziga.emersic@fri.uni-lj.si (Z.E.); peter.peer@fri.uni-lj.si (P.P.)
+<br/>vitomir.struc@fe.uni-lj.si
+<br/>* Correspondence: blaz.meden@fri.uni-lj.si; Tel.: +386-1-479-8245
+</td></tr><tr><td>1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3</td><td>ENHANCEMENT OF FAST FACE DETECTION ALGORITHM BASED ON A CASCADE OF
+<br/>DECISION TREES
+<br/>Commission II, WG II/5
+<br/>KEY WORDS: Face Detection, Cascade Algorithm, Decision Trees.
+</td><td>('40293010', 'V. V. Khryashchev', 'v. v. khryashchev')<br/>('32423989', 'A. A. Lebedev', 'a. a. lebedev')<br/>('3414890', 'A. L. Priorov', 'a. l. priorov')</td><td>a YSU, Yaroslavl, Russia - lebedevdes@gmail.com, (vhr, andcat)@yandex.ru
+</td></tr><tr><td>1b79628af96eb3ad64dbb859dae64f31a09027d5</td><td></td><td></td><td></td></tr><tr><td>1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61</td><td>Computer Vision and Pattern Recognition 2005
+<br/>Recognizing Facial Expression: Machine Learning and Application to
+<br/>Spontaneous Behavior
+<br/><b>Institute for Neural Computation, University of California, San Diego</b><br/>Ian Fasel1, Javier Movellan1
+<br/><b>Rutgers University, New Brunswick, NJ</b></td><td>('2218905', 'Marian Stewart Bartlett', 'marian stewart bartlett')<br/>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('2767464', 'Claudia Lainscsek', 'claudia lainscsek')</td><td>mbartlett@ucsd.edu
+</td></tr><tr><td>1b70bbf7cdfc692873ce98dd3c0e191580a1b041</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+<br/> Volume: 03 Issue: 10 | Oct -2016 www.irjet.net p-ISSN: 2395-0072
+<br/>Enhancing Performance of Face Recognition
+<br/>System Using Independent Component Analysis
+<br/><b>Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India</b><br/><b>Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India</b><br/><b>Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India</b><br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+<br/>cards, tokens and keys. Biometric based methods examine
+</td><td>('32330340', 'Manimala Mahato', 'manimala mahato')</td><td></td></tr><tr><td>1b71d3f30238cb6621021a95543cce3aab96a21b</td><td>Fine-grained Video Classification and Captioning
+<br/><b>University of Toronto1, Twenty Billion Neurons</b></td><td>('2454800', 'Farzaneh Mahdisoltani', 'farzaneh mahdisoltani')<br/>('40586522', 'Guillaume Berger', 'guillaume berger')<br/>('3462264', 'Waseem Gharbieh', 'waseem gharbieh')<br/>('1710604', 'Roland Memisevic', 'roland memisevic')</td><td>1 {farzaneh, fleet}@cs.toronto.edu, {firstname.lastname}@twentybn.com
+</td></tr><tr><td>1b4f6f73c70353869026e5eec1dd903f9e26d43f</td><td>Robust Subjective Visual Property Prediction
+<br/>from Crowdsourced Pairwise Labels
+</td><td>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('1700927', 'Tao Xiang', 'tao xiang')<br/>('3081531', 'Jiechao Xiong', 'jiechao xiong')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('1717863', 'Yizhou Wang', 'yizhou wang')<br/>('1746280', 'Yuan Yao', 'yuan yao')</td><td></td></tr><tr><td>1bc23c771688109bed9fd295ce82d7e702726327</td><td></td><td>('1706007', 'Jianchao Yang', 'jianchao yang')</td><td></td></tr><tr><td>1bad8a9640cdbc4fe7de12685651f44c4cff35ce</td><td>THETIS: THree Dimensional Tennis Shots
+<br/>A human action dataset
+<br/>Sofia Gourgari
+<br/>Konstantinos Karpouzis
+<br/>Stefanos Kollias
+<br/><b>National Technical University of Athens</b><br/>Image Video and Multimedia Systems Laboratory
+</td><td>('2123731', 'Georgios Goudelis', 'georgios goudelis')</td><td></td></tr><tr><td>1b589016fbabe607a1fb7ce0c265442be9caf3a9</td><td></td><td></td><td></td></tr><tr><td>1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8</td><td>EXMOVES: Classifier-based Features for Scalable Action Recognition
+<br/><b>Dartmouth College, NH 03755 USA</b></td><td>('1687325', 'Du Tran', 'du tran')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')</td><td>{DUTRAN,LORENZO}@CS.DARTMOUTH.EDU
+</td></tr><tr><td>1b4bc7447f500af2601c5233879afc057a5876d8</td><td>Facial Action Unit Classification with Hidden Knowledge
+<br/>under Incomplete Annotation
+<br/><b>University of Science and</b><br/>Technology of China
+<br/>Hefei, Anhui
+<br/><b>University of Science and</b><br/>Technology of China
+<br/>Hefei, Anhui
+<br/>Rensselaer Polytechnic
+<br/><b>Institute</b><br/>Troy, NY
+<br/>P.R.China, 230027
+<br/>P.R.China, 230027
+<br/>USA, 12180
+</td><td>('1715001', 'Jun Wang', 'jun wang')<br/>('1791319', 'Shangfei Wang', 'shangfei wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>junwong@mail.ustc.edu.cn
+<br/>sfwang@ustc.edu.cn
+<br/>qji@ecse.rpi.edu
+</td></tr><tr><td>1b27ca161d2e1d4dd7d22b1247acee5c53db5104</td><td></td><td></td><td></td></tr><tr><td>1badfeece64d1bf43aa55c141afe61c74d0bd25e</td><td>OL ´E: Orthogonal Low-rank Embedding,
+<br/>A Plug and Play Geometric Loss for Deep Learning
+<br/>1Universidad de la Rep´ublica
+<br/>Uruguay
+<br/><b>Duke University</b><br/>USA
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td></td></tr><tr><td>7711a7404f1f1ac3a0107203936e6332f50ac30c</td><td>Action Classification and Highlighting in Videos
+<br/>Disney Research Pittsburgh
+<br/>Disney Research Pittsburgh
+</td><td>('1730844', 'Atousa Torabi', 'atousa torabi')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')</td><td>atousa.torabi@disneyresearch.com
+<br/>lsigal@disneyresearch.com
+</td></tr><tr><td>778c9f88839eb26129427e1b8633caa4bd4d275e</td><td>Pose Pooling Kernels for Sub-category Recognition
+<br/>ICSI & UC Berkeley
+<br/>ICSI & UC Berkeley
+<br/>Trever Darrell
+<br/>ICSI & UC Berkeley
+</td><td>('40565777', 'Ning Zhang', 'ning zhang')<br/>('2071606', 'Ryan Farrell', 'ryan farrell')</td><td>nzhang@eecs.berkeley.edu
+<br/>farrell@eecs.berkeley.edu
+<br/>trevor@eecs.berkeley.edu
+</td></tr><tr><td>7735f63e5790006cb3d989c8c19910e40200abfc</td><td>Multispectral Imaging For Face
+<br/>Recognition Over Varying
+<br/>Illumination
+<br/>A Dissertation
+<br/>Presented for the
+<br/>Doctor of Philosophy Degree
+<br/><b>The University of Tennessee, Knoxville</b><br/>December 2008
+</td><td>('21051127', 'Hong Chang', 'hong chang')</td><td></td></tr><tr><td>7789a5d87884f8bafec8a82085292e87d4e2866f</td><td>A Unified Tensor-based Active Appearance Face
+<br/>Model
+<br/>Member, IEEE
+</td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td></td></tr><tr><td>77b1db2281292372c38926cc4aca32ef056011dc</td><td>451492 EMR0010.1177/1754073912451492Widen Children’s Interpretation of Facial ExpressionsEmotion Review
+<br/>2012
+<br/>SPECIAL SECTION: FACIAL EXPRESSIONS
+<br/>Children’s Interpretation of Facial Expressions:
+<br/>The Long Path from Valence-Based to Specific
+<br/>Discrete Categories
+<br/>Emotion Review
+<br/>Vol. 0, No. 0 (2012) 1 –6
+<br/>© The Author(s) 2012
+<br/>ISSN 1754-0739
+<br/>DOI: 10.1177/1754073912451492
+<br/>er.sagepub.com
+<br/><b>Boston College, USA</b></td><td>('3947094', 'Sherri C. Widen', 'sherri c. widen')</td><td></td></tr><tr><td>776835eb176ed4655d6e6c308ab203126194c41e</td><td></td><td></td><td></td></tr><tr><td>77c53ec6ea448db4dad586e002a395c4a47ecf66</td><td>Research Journal of Applied Sciences, Engineering and Technology 4(17): 2879-2886, 2012
+<br/>ISSN: 2040-7467
+<br/>© Maxwell Scientific Organization, 2012
+<br/>Submitted: November 25, 2011
+<br/>Accepted: January 13, 2012
+<br/>Published: September 01, 2012
+<br/>Face Recognition Based on Facial Features
+<br/><b>COMSATS Institute of Information Technology Wah Cantt</b><br/>47040, Pakistan
+<br/><b>National University of Science and Technology</b><br/>Peshawar Road, Rawalpindi, 46000, Pakistan
+</td><td>('33088042', 'Muhammad Sharif', 'muhammad sharif')<br/>('3349608', 'Muhammad Younas Javed', 'muhammad younas javed')<br/>('32805529', 'Sajjad Mohsin', 'sajjad mohsin')</td><td></td></tr><tr><td>778bff335ae1b77fd7ec67404f71a1446624331b</td><td>Hough Forest-based Facial Expression Recognition from
+<br/>Video Sequences
+<br/>BIWI, ETH Zurich http://www.vision.ee.ethz.ch
+<br/>VISICS, K.U. Leuven http://www.esat.kuleuven.be/psi/visics
+</td><td>('3092828', 'Gabriele Fanelli', 'gabriele fanelli')<br/>('2569989', 'Angela Yao', 'angela yao')<br/>('40324831', 'Pierre-Luc Noel', 'pierre-luc noel')<br/>('2946643', 'Juergen Gall', 'juergen gall')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td>{gfanelli,yaoa,gall,vangool}@vision.ee.ethz.ch
+<br/>noelp@student.ethz.ch
+</td></tr><tr><td>7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d</td><td>CONTENT-AWARE COMPRESSION USING SALIENCY-DRIVEN IMAGE RETARGETING
+<br/>*Disney Research Zurich
+<br/>†ETH Zurich
+</td><td>('1782328', 'Yael Pritch', 'yael pritch')<br/>('2893744', 'Alexander Sorkine-Hornung', 'alexander sorkine-hornung')<br/>('1712877', 'Stefan Mangold', 'stefan mangold')</td><td></td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>I. KWAK ET AL.: VISUAL RECOGNITION OF URBAN TRIBES
+<br/>From Bikers to Surfers:
+<br/>Visual Recognition of Urban Tribes
+<br/>Ana C. Murillo2
+<br/>David Kriegman1
+<br/>Serge Belongie1
+<br/>1 Dept. of Computer Science and
+<br/>Engineering
+<br/><b>University of California, San Diego</b><br/>San Diego, CA, USA
+<br/>2 Dpt. Informática e Ing. Sistemas - Inst.
+<br/>Investigación en Ingeniería de Aragón.
+<br/><b>University of Zaragoza, Spain</b><br/>3 Department of Computer Science
+<br/><b>Columbia University, USA</b></td><td>('2064392', 'Iljung S. Kwak', 'iljung s. kwak')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>iskwak@cs.ucsd.edu
+<br/>acm@unizar.es
+<br/>belhumeur@cs.columbia.edu
+<br/>kriegman@cs.ucsd.edu
+<br/>sjb@cs.ucsd.edu
+</td></tr><tr><td>7754b708d6258fb8279aa5667ce805e9f925dfd0</td><td>Facial Action Unit Recognition by Exploiting
+<br/>Their Dynamic and Semantic Relationships
+</td><td>('1686235', 'Yan Tong', 'yan tong')<br/>('2460793', 'Wenhui Liao', 'wenhui liao')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td></td></tr><tr><td>77db171a523fc3d08c91cea94c9562f3edce56e1</td><td>Poursaberi et al. EURASIP Journal on Image and Video Processing 2012, 2012:17
+<br/>http://jivp.eurasipjournals.com/content/2012/1/17
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Gauss–Laguerre wavelet textural feature fusion
+<br/>with geometrical information for facial expression
+<br/>identification
+</td><td>('1786383', 'Ahmad Poursaberi', 'ahmad poursaberi')<br/>('1870195', 'Hossein Ahmadi', 'hossein ahmadi')</td><td></td></tr><tr><td>77037a22c9b8169930d74d2ce6f50f1a999c1221</td><td>Robust Face Recognition With Kernelized
+<br/>Locality-Sensitive Group Sparsity Representation
+</td><td>('1907978', 'Shoubiao Tan', 'shoubiao tan')<br/>('2796142', 'Xi Sun', 'xi sun')<br/>('2710497', 'Wentao Chan', 'wentao chan')<br/>('33306018', 'Lei Qu', 'lei qu')</td><td></td></tr><tr><td>779ad364cae60ca57af593c83851360c0f52c7bf</td><td>Steerable Pyramids Feature Based Classification Using Fisher
+<br/>Linear Discriminant for Face Recognition
+<br/>EL HASSOUNI MOHAMMED12
+<br/><b>GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco</b><br/><b>DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco</b><br/>PO.Box 1014, Rabat, Morocco
+</td><td>('37917405', 'ABOUTAJDINE DRISS', 'aboutajdine driss')</td><td>moha387@yahoo.fr
+</td></tr><tr><td>7792fbc59f3eafc709323cdb63852c5d3a4b23e9</td><td>Pose from Action: Unsupervised Learning of
+<br/>Pose Features based on Motion
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b></td><td>('3234247', 'Senthil Purushwalkam', 'senthil purushwalkam')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')</td><td>{spurushw@andrew,abhinavg@cs}.cmu.edu
+</td></tr><tr><td>77fbbf0c5729f97fcdbfdc507deee3d388cd4889</td><td>SMITH & DYER: 3D FACIAL LANDMARK ESTIMATION
+<br/>Pose-Robust 3D Facial Landmark Estimation
+<br/>from a Single 2D Image
+<br/>http://www.cs.wisc.edu/~bmsmith
+<br/>http://www.cs.wisc.edu/~dyer
+<br/>Department of Computer Sciences
+<br/><b>University of Wisconsin-Madison</b><br/>Madison, WI USA
+</td><td>('2721523', 'Brandon M. Smith', 'brandon m. smith')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')</td><td></td></tr><tr><td>776362314f1479f5319aaf989624ac604ba42c65</td><td>Attribute learning in large-scale datasets
+<br/><b>Stanford University</b></td><td>('2192178', 'Olga Russakovsky', 'olga russakovsky')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td>{olga,feifeili}@cs.stanford.edu
+</td></tr><tr><td>77d31d2ec25df44781d999d6ff980183093fb3de</td><td>The Multiverse Loss for Robust Transfer Learning
+<br/>Supplementary
+<br/>1. Omitted proofs
+<br/>for which the joint loss:
+<br/>m(cid:88)
+<br/>r=1
+<br/>L(F r, br, D, y)
+<br/>(2)
+<br/>J(F 1, b1...F m, bm, D, y) =
+<br/>is bounded by:
+<br/>mL∗(D, y) ≤ J(F 1, b1...F m, bm, D, y)
+<br/>m−1(cid:88)
+<br/>≤ mL∗(D, y) +
+<br/>Alλd−j+1
+<br/>(3)
+<br/>l=1
+<br/>where [A1 . . . Am−1] are bounded parameters.
+<br/>We provide proofs that were omitted from the paper for
+<br/>lack of space. We follow the same theorem numbering as in
+<br/>the paper.
+<br/>Lemma 1. The minimizers F ∗, b∗ of L are not unique, and
+<br/>it holds that for any vector v ∈ Rc and scalar s, the solu-
+<br/>tions F ∗ + v1(cid:62)
+<br/>Proof. denoting V = v1(cid:62)
+<br/>c , b∗ + s1c are also minimizers of L.
+<br/>c , s = s1c,
+<br/>i v+byi +s
+<br/>i v+bj +s
+<br/>i fyi +byi
+<br/>i v+sed(cid:62)
+<br/>i fj +bj
+<br/>i=1
+<br/>log(
+<br/>L(F ∗ + V, b∗ + s, D, y) =
+<br/>i fyi +d(cid:62)
+<br/>ed(cid:62)
+<br/>i fj +d(cid:62)
+<br/>j=1 ed(cid:62)
+<br/>i v+sed(cid:62)
+<br/>ed(cid:62)
+<br/>j=1 ed(cid:62)
+<br/>i v+sed(cid:62)
+<br/>ed(cid:62)
+<br/>(cid:80)c
+<br/>(cid:80)c
+<br/>i v+s(cid:80)c
+<br/>− n(cid:88)
+<br/>= − n(cid:88)
+<br/>= − n(cid:88)
+<br/>(cid:80)c
+<br/>= − n(cid:88)
+<br/>ed(cid:62)
+<br/>i fyi +byi
+<br/>j=1 ed(cid:62)
+<br/>i fj +bj
+<br/>ed(cid:62)
+<br/>log(
+<br/>log(
+<br/>log(
+<br/>i=1
+<br/>i=1
+<br/>i=1
+<br/>i fj +bj
+<br/>i fyi +byi
+<br/>j=1 ed(cid:62)
+<br/>) = L(F ∗, b∗, D, y)
+<br/>The following simple lemma was not part of the paper.
+<br/>However, it is the reasoning behind the statement at the end
+<br/>of the proof of Thm. 1. “Since ∀i, j pi(j) > 0 and since
+<br/>rank(D) is full,(cid:80)n
+<br/>Lemma 2. Let K =(cid:80)n
+<br/>such that ∀i qi > 0, the matrix ˆK =(cid:80)n
+<br/>i be a full rank d×d matrix,
+<br/>i.e., it is PD and not just PSD, then for all vector q ∈ Rn
+<br/>is also
+<br/>i pi(j)pi(j(cid:48)) is PD.”
+<br/>i=1 did(cid:62)
+<br/>i=1 did(cid:62)
+<br/>i=1 qidid(cid:62)
+<br/>full rank.
+<br/>Proof. For
+<br/>(miniqi)v(cid:62)Kv > 0.
+<br/>every vector v
+<br/>(cid:2)f 1
+<br/>(cid:3) , b1, F 2 = (cid:2)f 2
+<br/>Theorem 3. There exist a set of weights F 1 =
+<br/>j ⊥ f s
+<br/>C ] , bm which are orthogonal ∀jrs f r
+<br/>2 , ..., f 1
+<br/>2 , ..., f m
+<br/>1 , f 1
+<br/>1 , f m
+<br/>2 , ..., f 2
+<br/>1 , f 2
+<br/>[f m
+<br/>(cid:3) , b2...F m =
+<br/>Proof. We again prove the theorem by constructing such a
+<br/>solution. Denoting by vd−m+2...vd the eigenvectors of K
+<br/>corresponding to λd−m+2 . . . λd. Given F 1 = F ∗, b1 = b∗,
+<br/>we can construct each pair F r, br as follows:
+<br/>(1)
+<br/>∀j, r
+<br/>fj
+<br/>r = f1
+<br/>1 +
+<br/>m−1(cid:88)
+<br/>l=1
+<br/>αjlrvd−l+1
+<br/>br = b1
+<br/>(4)
+<br/>The tensor of parameters αjlr is constructed to insure the
+<br/>orthogonality condition. Formally, αjlr has to satisfy:
+<br/>Rd,
+<br/>v(cid:62) ˆKv
+<br/>∀j, r (cid:54)= s
+<br/>(f 1
+<br/>j +
+<br/>m−1(cid:88)
+<br/>l=1
+<br/>αjlrvd−l+1)(cid:62)f s
+<br/>j = 0
+<br/>(5)
+<br/>2 m(m− 1) equations, it
+<br/>Noticing that 5 constitutes a set of 1
+<br/>can be satisfied by the tensor αjlr which contains m(m −
+<br/>c ] = F r −
+<br/>1)c parameters. Defining Ψr = [ψr
+<br/>1, ψr
+<br/>2, . . . , ψr
+</td><td></td><td></td></tr><tr><td>77fb9e36196d7bb2b505340b6b94ba552a58b01b</td><td>Detecting the Moment of Completion:
+<br/>Temporal Models for Localising Action Completion
+<br/><b>University of Bristol, Bristol, BS8 1UB, UK</b></td><td>('10007321', 'Farnoosh Heidarivincheh', 'farnoosh heidarivincheh')<br/>('1728108', 'Majid Mirmehdi', 'majid mirmehdi')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td>farnoosh.heidarivincheh@bristol.ac.uk
+</td></tr><tr><td>486840f4f524e97f692a7f6b42cd19019ee71533</td><td>DeepVisage: Making face recognition simple yet with powerful generalization
+<br/>skills
+<br/>1Laboratoire LIRIS, ´Ecole centrale de Lyon, 69134 Ecully, France.
+<br/>2Safran Identity & Security, 92130 Issy-les-Moulineaux, France.
+</td><td>('34767162', 'Jonathan Milgram', 'jonathan milgram')<br/>('34086868', 'Liming Chen', 'liming chen')</td><td>md-abul.hasnat@ec-lyon.fr, {julien.bohne, stephane.gentric, jonathan.milgram}@safrangroup.com,
+<br/>liming.chen@ec-lyon.fr
+</td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 52– No.4, August 2012
+<br/>Intelligent Method for Face Recognition of Infant
+<br/>Department of Computer
+<br/>Engineering
+<br/><b>Indian Institute of Technology</b><br/><b>Banaras Hindu University</b><br/>Varanasi, India-221005
+<br/>Department of Computer
+<br/>Engineering
+<br/><b>Indian Institute of Technology</b><br/><b>Banaras Hindu University</b><br/>Varanasi, India-221005
+<br/>
+<br/>Department of Computer
+<br/>Engineering
+<br/><b>Indian Institute of Technology</b><br/><b>Banaras Hindu University</b><br/>Varanasi, India-221005
+</td><td>('2829597', 'Shrikant Tiwari', 'shrikant tiwari')<br/>('1920426', 'Aruni Singh', 'aruni singh')<br/>('32120516', 'Sanjay Kumar Singh', 'sanjay kumar singh')</td><td></td></tr><tr><td>488d3e32d046232680cc0ba80ce3879f92f35cac</td><td>Journal of Information Systems and Telecommunication, Vol. 2, No. 4, October-December 2014
+<br/>205
+<br/>Facial Expression Recognition Using Texture Description of
+<br/>Displacement Image
+<br/><b>Amirkabir University of Technology, Tehran. Iran</b><br/>Abolghasem-Asadollah Raie*
+<br/><b>Amirkabir University of Technology, Tehran. Iran</b><br/><b>Sharif University of Technology, Tehran. Iran</b><br/>Received: 14/Sep/2013 Revised: 15/Mar/2014 Accepted: 10/Aug/2014
+</td><td>('3295771', 'Hamid Sadeghi', 'hamid sadeghi')<br/>('1697809', 'Mohammad-Reza Mohammadi', 'mohammad-reza mohammadi')</td><td>hamid.sadeghi@aut.ac.ir
+<br/>raie@aut.ac.ir
+<br/>mrmohammadi@ee.sharif.edu
+</td></tr><tr><td>48186494fc7c0cc664edec16ce582b3fcb5249c0</td><td>P-CNN: Pose-based CNN Features for Action Recognition
+<br/>Guilhem Ch´eron∗ †
+<br/>INRIA
+</td><td>('1785596', 'Ivan Laptev', 'ivan laptev')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>48499deeaa1e31ac22c901d115b8b9867f89f952</td><td>Interim Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035140108
+<br/>Haoyu Li
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td><td>('3347561', 'Haicheng Wang', 'haicheng wang')</td><td></td></tr><tr><td>486a82f50835ea888fbc5c6babf3cf8e8b9807bc</td><td>MSU TECHNICAL REPORT MSU-CSE-15-11, JULY 24, 2015
+<br/>Face Search at Scale: 80 Million Gallery
+</td><td>('7496032', 'Dayong Wang', 'dayong wang')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>48fea82b247641c79e1994f4ac24cad6b6275972</td><td>Mining Discriminative Components With Low-Rank And
+<br/>Sparsity Constraints for Face Recognition
+<br/>Computer Science and Engineering
+<br/><b>Arizona State University</b><br/>Tempe, AZ, 85281
+</td><td>('1689161', 'Qiang Zhang', 'qiang zhang')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td>qzhang53, baoxin.li@asu.edu
+</td></tr><tr><td>48734cb558b271d5809286447ff105fd2e9a6850</td><td>Facial Expression Recognition Using Enhanced Deep 3D Convolutional Neural
+<br/>Networks
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Denver, Denver, CO</b></td><td>('3093835', 'Mohammad H. Mahoor', 'mohammad h. mahoor')</td><td>behzad.hasani@du.edu and mmahoor@du.edu
+</td></tr><tr><td>48a417cfeba06feb4c7ab30f06c57ffbc288d0b5</td><td>Robust Dictionary Learning by Error Source Decomposition
+<br/><b>Northwestern University</b><br/>2145 Sheridan Road, Evanston, IL 60208
+</td><td>('2240134', 'Zhuoyuan Chen', 'zhuoyuan chen')<br/>('39955137', 'Ying Wu', 'ying wu')</td><td>zhuoyuanchen2014@u.northwestern.edu,yingwu@eecs.northwestern.edu
+</td></tr><tr><td>4850af6b54391fc33c8028a0b7fafe05855a96ff</td><td>Discovering Useful Parts for Pose Estimation in Sparsely Annotated Datasets
+<br/>1Department of Computer Science and 2Department of Biology
+<br/><b>Boston University and 2University of North Carolina</b></td><td>('2025025', 'Mikhail Breslav', 'mikhail breslav')<br/>('1711465', 'Tyson L. Hedrick', 'tyson l. hedrick')<br/>('1749590', 'Stan Sclaroff', 'stan sclaroff')<br/>('1723703', 'Margrit Betke', 'margrit betke')</td><td>breslav@bu.edu, thedrick@bio.unc.edu, sclaroff@bu.edu, betke@bu.edu
+</td></tr><tr><td>48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e</td><td>Multi-Modal Embedding for Main Product Detection in Fashion
+<br/>1Institut de Robtica i Informtica Industrial (CSIC-UPC)
+<br/>2Wide Eyes Technologies
+<br/><b>Waseda University</b></td><td>('1737881', 'Antonio Rubio', 'antonio rubio')<br/>('9072783', 'LongLong Yu', 'longlong yu')<br/>('3114470', 'Edgar Simo-Serra', 'edgar simo-serra')<br/>('1994318', 'Francesc Moreno-Noguer', 'francesc moreno-noguer')</td><td>arubio@iri.upc.edu, longyu@wide-eyes.it, esimo@aoni.waseda.jp, fmoreno@iri.upc.edu
+</td></tr><tr><td>488a61e0a1c3768affdcd3c694706e5bb17ae548</td><td>FITTING A 3D MORPHABLE MODEL TO EDGES:
+<br/>A COMPARISON BETWEEN HARD AND SOFT CORRESPONDENCES
+<br/><b>Multimodal Computing and Interaction, Saarland University, Germany</b><br/><b>University of York, UK</b><br/>‡ Morpheo Team, INRIA Grenoble Rhˆone-Alpes, France
+</td><td>('39180407', 'Anil Bas', 'anil bas')<br/>('1687021', 'William A. P. Smith', 'william a. p. smith')<br/>('1780750', 'Timo Bolkart', 'timo bolkart')<br/>('1792200', 'Stefanie Wuhrer', 'stefanie wuhrer')</td><td></td></tr><tr><td>48910f9b6ccc40226cd4f105ed5291571271b39e</td><td>Learning Discriminative Fisher Kernels
+<br/><b>Pattern Recognition and Bio-informatics Laboratory, Delft University of Technology, THE NETHERLANDS</b></td><td>('1803520', 'Laurens van der Maaten', 'laurens van der maaten')</td><td>lvdmaaten@gmail.com
+</td></tr><tr><td>48a9241edda07252c1aadca09875fabcfee32871</td><td>Convolutional Experts Network for Facial Landmark Detection
+<br/><b>Carnegie Mellon University</b><br/>Tadas Baltruˇsaitis∗
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave, Pittsburgh, PA 15213, USA
+</td><td>('1783029', 'Amir Zadeh', 'amir zadeh')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>abagherz@cs.cmu.edu
+<br/>tbaltrus@cs.cmu.edu
+<br/>morency@cs.cmu.edu
+</td></tr><tr><td>48f0055295be7b175a06df5bc6fa5c6b69725785</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 96– No.19, June 2014
+<br/>Facial Action Unit Recognition from Video Streams
+<br/>with Recurrent Neural Networks
+<br/><b>University of the Witwatersrand</b><br/>Braamfontein, Johannesburg
+<br/>South Africa
+</td><td>('3122515', 'Hima Vadapalli', 'hima vadapalli')</td><td></td></tr><tr><td>48729e4de8aa478ee5eeeb08a72a446b0f5367d5</td><td>COMPRESSED FACE HALLUCINATION
+<br/>Electrical Engineering and Computer Science
+<br/><b>University of California, Merced, CA 95344, USA</b></td><td>('2391885', 'Sifei Liu', 'sifei liu')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>48e6c6d981efe2c2fb0ae9287376fcae59da9878</td><td>Sidekick Policy Learning
+<br/>for Active Visual Exploration
+<br/><b>The University of Texas at Austin, Austin, TX</b><br/>2 Facebook AI Research, 300 W. Sixth St. Austin, TX 78701
+</td><td>('21810992', 'Santhosh K. Ramakrishnan', 'santhosh k. ramakrishnan')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>srama@cs.utexas.edu, grauman@fb.com(cid:63)
+</td></tr><tr><td>48174c414cfce7f1d71c4401d2b3d49ba91c5338</td><td>Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes
+<br/><b>Rutgers University, USA</b><br/><b>Hong Kong Polytechnic University, Hong Kong</b><br/><b>School of Computer Engineering, Nanyang Technological University, Singapore</b></td><td>('1965812', 'Chongyu Chen', 'chongyu chen')<br/>('40643777', 'Luc N. Dao', 'luc n. dao')<br/>('1736042', 'Vladimir Pavlovic', 'vladimir pavlovic')<br/>('1688642', 'Jianfei Cai', 'jianfei cai')<br/>('1775268', 'Tat-Jen Cham', 'tat-jen cham')</td><td>{hxp1,vladimir}@cs.rutgers.edu
+<br/>{nldao,asjfcai,astfcham}@ntu.edu.sg
+<br/>cscychen@comp.polyu.edu.hk
+</td></tr><tr><td>48a5b6ee60475b18411a910c6084b3a32147b8cd</td><td>Pedestrian attribute recognition with part-based CNN
+<br/>and combined feature representations
+<br/>Baskurt
+<br/>To cite this version:
+<br/>recognition with part-based CNN and combined feature representations. VISAPP2018, Jan 2018,
+<br/>Funchal, Portugal. <hal-01625470>
+<br/>HAL Id: hal-01625470
+<br/>https://hal.archives-ouvertes.fr/hal-01625470
+<br/>Submitted on 21 Jun 2018
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destinée au dépôt et à la diffusion de documents
+<br/>scientifiques de niveau recherche, publiés ou non,
+<br/>émanant des établissements d’enseignement et de
+<br/>recherche français ou étrangers, des laboratoires
+<br/>publics ou privés.
+</td><td>('1705461', 'Yiqiang Chen', 'yiqiang chen')<br/>('1762557', 'Stefan Duffner', 'stefan duffner')<br/>('10469201', 'Andrei Stoian', 'andrei stoian')<br/>('1733569', 'Jean-Yves Dufour', 'jean-yves dufour')<br/>('1705461', 'Yiqiang Chen', 'yiqiang chen')<br/>('1762557', 'Stefan Duffner', 'stefan duffner')<br/>('10469201', 'Andrei Stoian', 'andrei stoian')<br/>('1733569', 'Jean-Yves Dufour', 'jean-yves dufour')<br/>('1739898', 'Atilla Baskurt', 'atilla baskurt')</td><td></td></tr><tr><td>488375ae857a424febed7c0347cc9590989f01f7</td><td>Convolutional neural networks for the analysis of broadcasted
+<br/>tennis games
+<br/><b>Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece</b><br/>(cid:63) NantVision Inc., Culver City, CA, 90230, USA.
+<br/><b>University of Crete, Crete, 73100, Greece</b></td><td>('2272443', 'Grigorios Tsagkatakis', 'grigorios tsagkatakis')<br/>('40495798', 'Mustafa Jaber', 'mustafa jaber')<br/>('1694755', 'Panagiotis Tsakalides', 'panagiotis tsakalides')</td><td></td></tr><tr><td>4836b084a583d2e794eb6a94982ea30d7990f663</td><td>Cascaded Face Alignment via Intimacy Definition Feature
+<br/><b>The Hong Kong Polytechnic University</b><br/><b>Hong Kong Applied Science and Technology Research Institute Company Limited</b><br/>Hong Kong, China
+<br/>
+</td><td>('2116302', 'Hailiang Li', 'hailiang li')<br/>('1703078', 'Kin-Man Lam', 'kin-man lam')<br/>('2233216', 'Kangheng Wu', 'kangheng wu')<br/>('1982263', 'Zhibin Lei', 'zhibin lei')</td><td>harley.li@connect.polyu.hk,{harleyli, edmondchiu, khwu, lei}@astri.org, enkmlam@polyu.edu.hk
+</td></tr><tr><td>4866a5d6d7a40a26f038fc743e16345c064e9842</td><td></td><td></td><td></td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences (CASIA</b><br/>95 Zhongguancun East Road, 100190, Beijing, China
+</td><td>('1739258', 'Jianqing Zhu', 'jianqing zhu')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1716143', 'Dong Yi', 'dong yi')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>{jqzhu, scliao, zlei, dyi, szli}@cbsr.ia.ac.cn
+</td></tr><tr><td>487df616e981557c8e1201829a1d0ec1ecb7d275</td><td>Acoustic Echo Cancellation Using a Vector-Space-Based
+<br/>Adaptive Filtering Algorithm
+</td><td>('1742704', 'Yu Tsao', 'yu tsao')<br/>('1757214', 'Shih-Hau Fang', 'shih-hau fang')<br/>('40466874', 'Yao Shiao', 'yao shiao')</td><td></td></tr><tr><td>48f211a9764f2bf6d6dda4a467008eda5680837a</td><td></td><td></td><td></td></tr><tr><td>4858d014bb5119a199448fcd36746c413e60f295</td><td></td><td></td><td></td></tr><tr><td>48319e611f0daaa758ed5dcf5a6496b4c6ef45f2</td><td>Non Binary Local Gradient Contours for Face Recognition
+<br/><b>P.A. College of Engnineering, Mangalore</b><br/>bSenior IEEE Member, Department of Electrical and Electronics Engineering, Aligarh Muslim
+<br/><b>P A College of Engineering, Nadupadavu</b><br/>As the features from the traditional Local Binary patterns (LBP) and Local Directional Patterns (LDP) are
+<br/>found to be ineffective for face recognition, we have proposed a new approach derived on the basis of Information
+<br/>sets whereby the loss of information that occurs during the binarization is eliminated. The information sets
+<br/>as a product. Since face is having smooth texture in a limited area, the extracted features must be highly
+<br/>discernible. To limit the number of features, we consider only the non overlapping windows. By the application
+<br/>of the information set theory we can reduce the number of feature of an image. The derived features are shown
+<br/>to work fairly well over eigenface, fisherface and LBP methods.
+<br/>Keywords: Local Binary Pattern, Local Directional Pattern, Information Sets, Gradient Contour, Support
+<br/>Vector Machine, KNN, Face Recognition.
+<br/>1. INTRODUCTION
+<br/>In face recognition, the major issue to be ad-
+<br/>dressed is the extraction of features which are
+<br/>discriminating in nature [1], [2]. The accuracy
+<br/>of classification depends upon which texture fea-
+<br/>ture of the face are extracted e.g., geometrical,
+<br/>statistical, local or global features in addition to
+<br/>representation of these features and the design
+<br/>extraction algorithm should produce little vari-
+<br/>ance of features within the class and large vari-
+<br/>ance between the classes. There are typically
+<br/>two common approaches to extract facial fea-
+<br/>tures: geometric-feature-based and appearance-
+<br/>based methods. The geometric-feature-based [[3],
+<br/>[4]] method encodes the shape and locations of
+<br/>different facial components, which are combined
+<br/>into a feature vector that represents the face.
+<br/>An illustration of this method is the graph-based
+<br/>method [5], that uses several facial components
+<br/>to create a representation of the face and pro-
+<br/>cess it. The Local-Global Graph algorithm [5] ap-
+<br/>proach makes use Voronoi tessellation and Delau-
+<br/>nay graphs to segment local features and builds
+<br/>a graph. These features are combined into a lo-
+<br/>cal graph, and then the skeleton (global graph)
+<br/>is created by interrelating the local graphs to
+<br/>represent the topology of the face. The major
+<br/>requirements of geometric-feature-based methods
+<br/>is accurate and reliable facial feature detection
+<br/>and tracking, which is difficult to accommodate
+<br/>in many situations.
+<br/>In the case of appearance
+<br/>based methods, there are many methods for the
+<br/>holistic classes such as, Eigenfaces [6] and Fisher-
+<br/>faces [7], which are built on Principal Component
+<br/>Analysis (PCA) [6], to the more recent 2D-PCA
+<br/>[8], and Linear Discriminant Analysis [9] are also
+<br/>examples of holistic methods. The [10] and [11]
+<br/>makes use of image filters, either on the whole
+<br/>face to create holistic features, or some specific
+<br/>face-region to create local features, to extract the
+</td><td>('1913846', 'Abdullah Gubbi', 'abdullah gubbi')<br/>('2093112', 'Mohammad Fazle Azeem', 'mohammad fazle azeem')</td><td>Nadupadavu, Mangalore, India, Contact: abdullahgubbi@yahoo.com
+<br/>University, India, Contact: mf.azeem@gmail.com
+<br/>Mangalore, India. Contact: sharmilabp@gmail.com
+</td></tr><tr><td>4896909796f9bd2f70a2cb24bf18daacd6a12128</td><td>Spatial Bag of Features Learning for Large Scale
+<br/>Face Image Retrieval
+<br/><b>Aristotle University of Thessaloniki, Thessaloniki, Greece</b></td><td>('3200630', 'Nikolaos Passalis', 'nikolaos passalis')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')</td><td>passalis@csd.auth.gr, tefas@aiia.csd.auth.gr
+</td></tr><tr><td>48cfc5789c246c6ad88ff841701204fc9d6577ed</td><td>J Inf Process Syst, Vol.12, No.3, pp.392~409, September 2016
+<br/>
+<br/>
+<br/>ISSN 1976-913X (Print)
+<br/>ISSN 2092-805X (Electronic)
+<br/>Age Invariant Face Recognition Based on DCT
+<br/>Feature Extraction and Kernel Fisher Analysis
+</td><td>('17349931', 'Leila Boussaad', 'leila boussaad')<br/>('2411455', 'Mohamed Benmohammed', 'mohamed benmohammed')<br/>('2123013', 'Redha Benzid', 'redha benzid')</td><td></td></tr><tr><td>481fb0a74528fa7706669a5cce6a212ac46eaea3</td><td>Recognizing RGB Images by Learning from RGB-D Data
+<br/><b>Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore</b><br/><b>School of Computer Engineering, Nanyang Technological University, Singapore</b></td><td>('39253009', 'Lin Chen', 'lin chen')<br/>('38188040', 'Dong Xu', 'dong xu')</td><td></td></tr><tr><td>70f189798c8b9f2b31c8b5566a5cf3107050b349</td><td>The Challenge of Face Recognition from Digital Point-and-Shoot Cameras
+<br/>David Bolme‡
+</td><td>('1757322', 'J. Ross Beveridge', 'j. ross beveridge')<br/>('1750370', 'Geof H. Givens', 'geof h. givens')<br/>('2067993', 'W. Todd Scruggs', 'w. todd scruggs')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')<br/>('1733571', 'Yui Man Lui', 'yui man lui')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('36903861', 'Mohammad Nayeem Teli', 'mohammad nayeem teli')<br/>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')<br/>('1694404', 'Bruce A. Draper', 'bruce a. draper')<br/>('40370804', 'Hao Zhang', 'hao zhang')<br/>('9099328', 'Su Cheng', 'su cheng')</td><td></td></tr><tr><td>70580ed8bc482cad66e059e838e4a779081d1648</td><td>Acta Polytechnica Hungarica
+<br/>Vol. 10, No. 4, 2013
+<br/>Gender Classification using Multi-Level
+<br/>Wavelets on Real World Face Images
+<br/><b>Shaheed Zulfikar Ali Bhutto Institute of</b><br/>Science and Technology, Plot # 67, Street # 9, H/8-4 Islamabad, 44000, Pakistan
+<br/>isb.edu.pk
+</td><td>('35332495', 'Sajid Ali Khan', 'sajid ali khan')<br/>('1723986', 'Muhammad Nazir', 'muhammad nazir')<br/>('2521631', 'Naveed Riaz', 'naveed riaz')</td><td>sajid.ali@szabist-isb.edu.pk, nazir@szabist-isb.edu.pk, n.r.ansari@szabist-
+</td></tr><tr><td>70109c670471db2e0ede3842cbb58ba6be804561</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Zero-Shot Visual Recognition via Bidirectional Latent Embedding
+<br/>Received: date / Accepted: date
+</td><td>('47599321', 'Qian Wang', 'qian wang')</td><td></td></tr><tr><td>703890b7a50d6535900a5883e8d2a6813ead3a03</td><td></td><td></td><td></td></tr><tr><td>703dc33736939f88625227e38367cfb2a65319fe</td><td>Labeling Temporal Bounds for Object Interactions in Egocentric Video
+<br/>Trespassing the Boundaries:
+<br/><b>University of Bristol, United Kingdom</b><br/>Walterio Mayol-Cuevas
+</td><td>('3420479', 'Davide Moltisanti', 'davide moltisanti')<br/>('2052236', 'Michael Wray', 'michael wray')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td><FirstName>.<LastName>@bristol.ac.uk
+</td></tr><tr><td>70db3a0d2ca8a797153cc68506b8650908cb0ada</td><td>An Overview of Research Activities in Facial
+<br/>Age Estimation Using the FG-NET Aging
+<br/>Database
+<br/>Visual Media Computing Lab,
+<br/>Dept. of Multimedia and Graphic Arts,
+<br/><b>Cyprus University of Technology, Cyprus</b></td><td>('31950370', 'Gabriel Panis', 'gabriel panis')<br/>('1830709', 'Andreas Lanitis', 'andreas lanitis')</td><td>gpanis@gmail.com, andreas.lanitis@cut.ac.cy
+</td></tr><tr><td>706236308e1c8d8b8ba7749869c6b9c25fa9f957</td><td>Crowdsourced Data Collection of Facial Responses
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+<br/>Rosalind Picard
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+</td><td>('1801452', 'Daniel McDuff', 'daniel mcduff')<br/>('1754451', 'Rana El Kaliouby', 'rana el kaliouby')</td><td>djmcduff@media.mit.edu
+<br/>kaliouby@media.mit.edu
+<br/>picard@media.mit.edu
+</td></tr><tr><td>701f56f0eac9f88387de1f556acef78016b05d52</td><td>Direct Shape Regression Networks for End-to-End Face Alignment
+<br/>1 ∗
+<br/><b>University of Texas at Arlington, TX, USA, 2Beihang University, Beijing, China</b><br/><b>Xidian University, Xi an, China, 4 University of Pittsburgh, PA, USA</b></td><td>('6050999', 'Xin Miao', 'xin miao')<br/>('34798935', 'Xiantong Zhen', 'xiantong zhen')<br/>('1720747', 'Vassilis Athitsos', 'vassilis athitsos')<br/>('6820648', 'Xianglong Liu', 'xianglong liu')<br/>('1748032', 'Heng Huang', 'heng huang')<br/>('50542664', 'Cheng Deng', 'cheng deng')</td><td>xin.miao@mavs.uta.edu, zhenxt@gmail.com, xlliu@nlsde.edu.cn, chdeng.xd@gmail.com
+<br/>athitsos@uta.edu, heng.huang@pitt.edu
+</td></tr><tr><td>7002d6fc3e0453320da5c863a70dbb598415e7aa</td><td>Electrical Engineering
+<br/><b>University of California, Riverside</b><br/>Date: Friday, October 21, 2011
+<br/>Location: EBU2 Room 205/206
+<br/>Time: 12:10am
+<br/>Understanding Discrete Facial
+<br/>Expression in Video Using Emotion
+<br/>Avatar Image
+</td><td>('1803478', 'Songfan Yang', 'songfan yang')</td><td></td></tr><tr><td>7071cd1ee46db4bc1824c4fd62d36f6d13cad08a</td><td>Face Detection through Scale-Friendly Deep Convolutional Networks
+<br/><b>The Chinese University of Hong Kong</b></td><td>('1692609', 'Shuo Yang', 'shuo yang')<br/>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')<br/>('1717179', 'Chen Change Loy', 'chen change loy')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{ys014, yjxiong, ccloy, xtang}@ie.cuhk,edu.hk
+</td></tr><tr><td>706b9767a444de4fe153b2f3bff29df7674c3161</td><td>Fast Metric Learning For Deep Neural Networks
+<br/><b>University of Waikato, Hamilton, New Zealand</b><br/><b>School of Engineering, University of Waikato, Hamilton, New Zealand</b></td><td>('2319565', 'Henry Gouk', 'henry gouk')<br/>('1737420', 'Bernhard Pfahringer', 'bernhard pfahringer')</td><td>hgrg1@students.waikato.ac.nz, bernhard@waikato.ac.nz
+<br/>cree@waikato.ac.nz
+</td></tr><tr><td>70c58700eb89368e66a8f0d3fc54f32f69d423e1</td><td>INCORPORATING SCALABILITY IN UNSUPERVISED SPATIO-TEMPORAL FEATURE
+<br/>LEARNING
+<br/><b>University of California, Riverside, CA</b></td><td>('49616225', 'Sujoy Paul', 'sujoy paul')<br/>('2177805', 'Sourya Roy', 'sourya roy')<br/>('1688416', 'Amit K. Roy-Chowdhury', 'amit k. roy-chowdhury')</td><td></td></tr><tr><td>707a542c580bcbf3a5a75cce2df80d75990853cc</td><td>Disentangled Variational Representation for Heterogeneous Face Recognition
+<br/>1 Center for Research on Intelligent Perception and Computing (CRIPAC), CASIA, Beijing, China
+<br/>2 National Laboratory of Pattern Recognition (NLPR), CASIA, Beijing, China
+<br/><b>School of Arti cial Intelligence, University of Chinese Academy of Sciences, Beijing, China</b><br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b></td><td>('2225749', 'Xiang Wu', 'xiang wu')<br/>('32885778', 'Huaibo Huang', 'huaibo huang')<br/>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('1705643', 'Ran He', 'ran he')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>alfredxiangwu@gmail.com, huaibo.huang@cripac.ia.ac.cn,
+<br/>vpatel36@jhu.edu, {rhe, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>70569810e46f476515fce80a602a210f8d9a2b95</td><td>Apparent Age Estimation from Face Images Combining General and
+<br/>Children-Specialized Deep Learning Models
+<br/>1Orange Labs – France Telecom, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>2Eurecom, 450 route des Chappes, 06410 Biot, France
+</td><td>('3116433', 'Grigory Antipov', 'grigory antipov')<br/>('2341854', 'Moez Baccouche', 'moez baccouche')<br/>('1708844', 'Sid-Ahmed Berrani', 'sid-ahmed berrani')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>{grigory.antipov,moez.baccouche,sidahmed.berrani}@orange.com, jean-luc.dugelay@eurecom.fr
+</td></tr><tr><td>704d88168bdfabe31b6ff484507f4a2244b8c52b</td><td>MLtuner: System Support for Automatic Machine Learning Tuning
+<br/><b>Carnegie Mellon University</b></td><td>('1874200', 'Henggang Cui', 'henggang cui')<br/>('1707164', 'Gregory R. Ganger', 'gregory r. ganger')<br/>('1974678', 'Phillip B. Gibbons', 'phillip b. gibbons')</td><td></td></tr><tr><td>70e79d7b64f5540d309465620b0dab19d9520df1</td><td>International Journal of Scientific & Engineering Research, Volume 8, Issue 3, March-2017
+<br/>ISSN 2229-5518
+<br/>Facial Expression Recognition System
+<br/>Using Extreme Learning Machine
+</td><td>('3274320', 'Firoz Mahmud', 'firoz mahmud')<br/>('2376022', 'Md. Al Mamun', 'md. al mamun')</td><td></td></tr><tr><td>7003d903d5e88351d649b90d378f3fc5f211282b</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 68– No.23, April 2013
+<br/>Facial Expression Recognition using Gabor Wavelet
+<br/>ENTC SVERI’S COE (Poly),
+<br/>Pandharpur,
+<br/>Solapur, India
+<br/>ENTC SVERI’S COE,
+<br/>Pandharpur,
+<br/>Solapur, India
+<br/>ENTC SVERI’S COE (Poly),
+<br/>Pandharpur,
+<br/>Solapur, India
+</td><td>('2730988', 'Mahesh Kumbhar', 'mahesh kumbhar')<br/>('10845943', 'Manasi Patil', 'manasi patil')<br/>('2707920', 'Ashish Jadhav', 'ashish jadhav')</td><td></td></tr><tr><td>703c9c8f20860a1b1be63e6df1622b2021b003ca</td><td>Flip-Invariant Motion Representation
+<br/><b>National Institute of Advanced Industrial Science and Technology</b><br/>Umezono 1-1-1, Tsukuba, Japan
+</td><td>('1800592', 'Takumi Kobayashi', 'takumi kobayashi')</td><td>takumi.kobayashi@aist.go.jp
+</td></tr><tr><td>70a69569ba61f3585cd90c70ca5832e838fa1584</td><td>Friendly Faces:
+<br/>Weakly supervised character identification
+<br/><b>CVSSP, University of Surrey, UK</b></td><td>('2735914', 'Matthew Marter', 'matthew marter')<br/>('1695195', 'Richard Bowden', 'richard bowden')</td><td>{m.marter, s.hadfield, r.bowden} @surrey.ac.uk
+</td></tr><tr><td>70bf1769d2d5737fc82de72c24adbb7882d2effd</td><td>Face detection in intelligent ambiences with colored illumination
+<br/>Department of Intelligent Systems
+<br/>TU Delft
+<br/>Delft, The Netherlands
+</td><td>('3137870', 'Christina Katsimerou', 'christina katsimerou')<br/>('1728396', 'Ingrid Heynderickx', 'ingrid heynderickx')</td><td></td></tr><tr><td>70c9d11cad12dc1692a4507a97f50311f1689dbf</td><td>Video Frame Synthesis using Deep Voxel Flow
+<br/><b>The Chinese University of Hong Kong</b><br/>3Pony.AI Inc.
+<br/><b>University of Illinois at Urbana-Champaign</b><br/>4Google Inc.
+</td><td>('3243969', 'Ziwei Liu', 'ziwei liu')</td><td>{lz013,xtang}@ie.cuhk.edu.hk
+<br/>yiming@pony.ai
+<br/>yeh17@illinois.edu
+<br/>aseemaa@google.com
+</td></tr><tr><td>1e5ca4183929929a4e6f09b1e1d54823b8217b8e</td><td>Classification in the Presence of Heavy
+<br/>Label Noise: A Markov Chain Sampling
+<br/>Framework
+<br/>by
+<br/><b>B.Eng., Nankai University</b><br/>Thesis Submitted in Partial Fulfillment of the
+<br/>Requirements for the Degree of
+<br/>Master of Science
+<br/>in the
+<br/>School of Computing Science
+<br/>Faculty of Applied Sciences
+<br/><b>SIMON FRASER UNIVERSITY</b><br/>Summer 2017
+<br/>However, in accordance with the Copyright Act of Canada, this work may be
+<br/>reproduced without authorization under the conditions for “Fair Dealing.”
+<br/>Therefore, limited reproduction of this work for the purposes of private study,
+<br/>research, education, satire, parody, criticism, review and news reporting is likely
+<br/>All rights reserved.
+<br/>to be in accordance with the law, particularly if cited appropriately.
+</td><td>('3440173', 'Zijin Zhao', 'zijin zhao')<br/>('3440173', 'Zijin Zhao', 'zijin zhao')</td><td></td></tr><tr><td>1e058b3af90d475bf53b3f977bab6f4d9269e6e8</td><td>Manifold Relevance Determination
+<br/><b>University of Shef eld, UK</b><br/><b>KTH Royal Institute of Technology, CVAP Lab, Stockholm, Sweden</b><br/>Wellcome Trust Centre for Human Genetics, Roosevelt Drive, Oxford OX3 7BN, UK
+<br/><b>University of Shef eld, UK</b></td><td>('3106771', 'Andreas C. Damianou', 'andreas c. damianou')<br/>('2484138', 'Carl Henrik Ek', 'carl henrik ek')<br/>('1722732', 'Michalis K. Titsias', 'michalis k. titsias')<br/>('1739851', 'Neil D. Lawrence', 'neil d. lawrence')</td><td>ANDREAS.DAMIANOU@SHEFFIELD.AC.UK
+<br/>CHEK@CSC.KTH.SE
+<br/>MTITSIAS@WELL.OX.AC.UK
+<br/>N.LAWRENCE@SHEFFIELD.AC.UK
+</td></tr><tr><td>1e799047e294267087ec1e2c385fac67074ee5c8</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 21, NO. 12, DECEMBER 1999
+<br/>1357
+<br/>Short Papers___________________________________________________________________________________________________
+<br/>Automatic Classification of
+<br/>Single Facial Images
+</td><td>('1709339', 'Michael J. Lyons', 'michael j. lyons')<br/>('2240088', 'Julien Budynek', 'julien budynek')<br/>('34801422', 'Shigeru Akamatsu', 'shigeru akamatsu')</td><td></td></tr><tr><td>1ef4815f41fa3a9217a8a8af12cc385f6ed137e1</td><td>Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
+</td><td>('34399452', 'Erroll Wood', 'erroll wood')<br/>('2520795', 'Xucong Zhang', 'xucong zhang')<br/>('1751242', 'Yusuke Sugano', 'yusuke sugano')<br/>('39626495', 'Peter Robinson', 'peter robinson')<br/>('3194727', 'Andreas Bulling', 'andreas bulling')</td><td>University of Cambridge, United Kingdom {eww23,tb346,pr10}@cam.ac.uk
+<br/>Max Planck Institute for Informatics, Germany {xczhang,sugano,bulling}@mpi-inf.mpg.de
+</td></tr><tr><td>1eb4ea011a3122dc7ef3447e10c1dad5b69b0642</td><td>Contextual Visual Recognition from Images and Videos
+<br/>Jitendra Malik
+<br/>Electrical Engineering and Computer Sciences
+<br/><b>University of California at Berkeley</b><br/>Technical Report No. UCB/EECS-2016-132
+<br/>http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-132.html
+<br/>July 19, 2016
+</td><td>('2082991', 'Georgia Gkioxari', 'georgia gkioxari')</td><td></td></tr><tr><td>1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3</td><td>Article
+<br/>A Brief Review of Facial Emotion Recognition Based
+<br/>on Visual Information
+<br/>Byoung Chul Ko ID
+<br/>Tel.: +82-10-3559-4564
+<br/>Received: 6 December 2017; Accepted: 25 January 2018; Published: 30 January 2018
+</td><td></td><td>Department of Computer Engineering, Keimyung University, Daegu 42601, Korea; niceko@kmu.ac.kr;
+</td></tr><tr><td>1e8eee51fd3bf7a9570d6ee6aa9a09454254689d</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2016.2582166, IEEE
+<br/>Transactions on Pattern Analysis and Machine Intelligence
+<br/>Face Search at Scale
+</td><td>('7496032', 'Dayong Wang', 'dayong wang')<br/>('40653304', 'Charles Otto', 'charles otto')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td><td></td><td></td></tr><tr><td>1ea74780d529a458123a08250d8fa6ef1da47a25</td><td>Videos from the 2013 Boston Marathon:
+<br/>An Event Reconstruction Dataset for
+<br/>Synchronization and Localization
+<br/>CMU-LTI-018
+<br/><b>Language Technologies Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Ave., Pittsburgh, PA 15213
+<br/>www.lti.cs.cmu.edu
+<br/>© October 1, 2016
+</td><td>('49252656', 'Jia Chen', 'jia chen')<br/>('1915796', 'Junwei Liang', 'junwei liang')<br/>('47896638', 'Han Lu', 'han lu')<br/>('2927024', 'Shoou-I Yu', 'shoou-i yu')<br/>('7661726', 'Alexander G. Hauptmann', 'alexander g. hauptmann')</td><td></td></tr><tr><td>1ebdfceebad642299e573a8995bc5ed1fad173e3</td><td></td><td></td><td></td></tr><tr><td>1eec03527703114d15e98ef9e55bee5d6eeba736</td><td>UNIVERSITÄT KARLSRUHE (TH)
+<br/>FAKULTÄT FÜR INFORMATIK
+<br/>INTERACTIVE SYSTEMS LABS
+<br/>DIPLOMA THESIS
+<br/>Automatic identification
+<br/>of persons in TV series
+<br/>SUBMITTED BY
+<br/>MAY 2008
+<br/>ADVISORS
+</td><td>('12141635', 'A. Waibel', 'a. waibel')<br/>('2284204', 'Mika Fischer', 'mika fischer')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td></td></tr><tr><td>1e07500b00fcd0f65cf30a11f9023f74fe8ce65c</td><td>WHOLE SPACE SUBCLASS DISCRIMINANT ANALYSIS FOR FACE RECOGNITION
+<br/><b>Institute for Infocomm Research, A*STAR, Singapore</b></td><td>('1709001', 'Bappaditya Mandal', 'bappaditya mandal')<br/>('35718875', 'Liyuan Li', 'liyuan li')<br/>('1802086', 'Vijay Chandrasekhar', 'vijay chandrasekhar')</td><td>Email: {bmandal, lyli, vijay, joohwee}@i2r.a-star.edu.sg
+</td></tr><tr><td>1e19ea6e7f1c04a18c952ce29386252485e4031e</td><td>International Association of Scientific Innovation and Research (IASIR)
+<br/>(An Association Unifying the Sciences, Engineering, and Applied Research)
+<br/>ISSN (Print): 2279-0047
+<br/>ISSN (Online): 2279-0055
+<br/> International Journal of Emerging Technologies in Computational
+<br/>and Applied Sciences (IJETCAS)
+<br/>www.iasir.net
+<br/>MATLAB Based Face Recognition System Using PCA and Neural Network
+<br/>1Faculty of Computer Science & Engineering, 2Research Scholar
+<br/><b>University Institute of Engineering and Technology</b><br/><b>Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA</b></td><td>('1989126', 'Sanjeev Dhawan', 'sanjeev dhawan')<br/>('7940433', 'Himanshu Dogra', 'himanshu dogra')</td><td>E-mail (s): rsdhawan@rediffmail.com, himanshu.dogra.13@gmail.com
+</td></tr><tr><td>1ec98785ac91808455b753d4bc00441d8572c416</td><td>Curriculum Learning for Facial Expression Recognition
+<br/><b>Language Technologies Institute, School of Computer Science</b><br/><b>Carnegie Mellon University, USA</b><br/>few years,
+</td><td>('1970583', 'Liangke Gui', 'liangke gui')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td></td></tr><tr><td>1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177</td><td>Face Detection with a 3D Model
+<br/>Department of Statistics
+<br/><b>Florida State University</b><br/><b>National Institutes of Health</b></td><td>('2455529', 'Adrian Barbu', 'adrian barbu')<br/>('2230628', 'Nathan Lay', 'nathan lay')</td><td>abarbu@stat.fsu.edu
+<br/>nathan.lay@nih.gov
+</td></tr><tr><td>1efacaa0eaa7e16146c34cd20814d1411b35538e</td><td>HEIDARIVINCHEHET AL: ACTIONCOMPLETION:A TEMPORALMODEL..
+<br/>Action Completion:
+<br/>A Temporal Model for Moment Detection
+<br/>Department of Computer Science
+<br/><b>University of Bristol</b><br/>Bristol, UK
+</td><td>('10007321', 'Farnoosh Heidarivincheh', 'farnoosh heidarivincheh')<br/>('1728108', 'Majid Mirmehdi', 'majid mirmehdi')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td>Farnoosh.Heidarivincheh@bristol.ac.uk
+<br/>M.Mirmehdi@bristol.ac.uk
+<br/>Dima.Damen@bristol.ac.uk
+</td></tr><tr><td>1eba6fc35a027134aa8997413647b49685f6fbd1</td><td>Superpower Glass: Delivering
+<br/>Unobtrusive Real-time Social Cues
+<br/>in Wearable Systems
+<br/>Dennis Wall
+<br/><b>Stanford University</b><br/>Stanford, CA 94305, USA
+<br/>Permission to make digital or hard copies of part or all of this work for
+<br/>personal or classroom use is granted without fee provided that copies are
+<br/>not made or distributed for profit or commercial advantage and that copies
+<br/>bear this notice and the full citation on the first page. Copyrights for third-
+<br/>party components of this work must be honored. For all other uses, contact
+<br/>the Owner/Author.
+<br/>Copyright is held by the owner/author(s).
+<br/>Ubicomp/ISWC'16 Adjunct , September 12-16, 2016, Heidelberg, Germany
+<br/>ACM 978-1-4503-4462-3/16/09.
+<br/>http://dx.doi.org/10.1145/2968219.2968310
+</td><td>('21701693', 'Catalin Voss', 'catalin voss')<br/>('40026202', 'Peter Washington', 'peter washington')<br/>('32551479', 'Nick Haber', 'nick haber')<br/>('40494635', 'Aaron Kline', 'aaron kline')<br/>('34240128', 'Jena Daniels', 'jena daniels')<br/>('3407835', 'Azar Fazel', 'azar fazel')<br/>('3457025', 'Titas De', 'titas de')<br/>('3456914', 'Beth McCarthy', 'beth mccarthy')<br/>('34925386', 'Carl Feinstein', 'carl feinstein')<br/>('1699245', 'Terry Winograd', 'terry winograd')</td><td>catalin@cs.stanford.edu
+<br/>peterwashington@stanford.edu
+<br/>nhaber@stanford.edu
+<br/>akline@stanford.edu
+<br/>danielsj@stanford.edu
+<br/>azarf@stanford.edu
+<br/>titasde@stanford.edu
+<br/>bethmac@stanford.edu
+<br/>carlf@stanford.edu
+<br/>winograd@cs.stanford.edu
+<br/>dpwall@stanford.edu
+</td></tr><tr><td>1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf</td><td>A Multi-Level Contextual Model For Person Recognition in Photo Albums
+<br/><b>Stevens Institute of Technology</b><br/>‡Adobe Research
+<br/>(cid:92)Microsoft Research
+</td><td>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('1720987', 'Xiaohui Shen', 'xiaohui shen')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td>†hli18@stevens.edu
+<br/>‡{jbrandt, zlin, xshen}@adobe.com
+<br/>(cid:92)ganghua@microsoft.com
+</td></tr><tr><td>1ef1f33c48bc159881c5c8536cbbd533d31b0e9a</td><td>Z. ZHANG ET AL.: ADVERSARIAL TRAINING FOR ACTION UNIT RECOGNITION
+<br/>Identity-based Adversarial Training of Deep
+<br/>CNNs for Facial Action Unit Recognition
+<br/>Department of Computer Science
+<br/><b>State University of New York at</b><br/>Binghamton
+<br/>NY, USA.
+</td><td>('47294008', 'Zheng Zhang', 'zheng zhang')<br/>('2443456', 'Shuangfei Zhai', 'shuangfei zhai')<br/>('8072251', 'Lijun Yin', 'lijun yin')</td><td>zzhang27@cs.binghamton.edu
+<br/>szhai2@cs.binghamton.edu
+<br/>lijun@cs.binghamton.edu
+</td></tr><tr><td>1ef5ce743a44d8a454dbfc2657e1e2e2d025e366</td><td>Global Journal of Computer Science & Technology
+<br/>Volume 11 Issue Version 1.0 April 2011
+<br/>Type: Double Blind Peer Reviewed International Research Journal
+<br/>Publisher: Global Journals Inc. (USA)
+<br/>Online ISSN: 0975-4172 & Print ISSN: 0975-4350
+<br/>
+<br/>Accurate Corner Detection Methods using Two Step Approach
+<br/><b>Thapar University</b></td><td>('1765523', 'Nitin Bhatia', 'nitin bhatia')<br/>('9180065', 'Megha Chhabra', 'megha chhabra')</td><td></td></tr><tr><td>1e58d7e5277288176456c66f6b1433c41ca77415</td><td>Bootstrapping Fine-Grained Classifiers:
+<br/>Active Learning with a Crowd in the Loop
+<br/><b>Brown University, 2University of California, San Diego, 3California Institute of Technology</b></td><td>('40541456', 'Genevieve Patterson', 'genevieve patterson')</td><td>{gen, hays}@cs.brown.edu gvanhorn@ucsd.edu sjb@cs.ucsd.edu
+<br/>perona@caltech.edu
+</td></tr><tr><td>1e5a1619fe5586e5ded2c7a845e73f22960bbf5a</td><td>Group Membership Prediction
+<br/><b>Boston University</b></td><td>('7969330', 'Ziming Zhang', 'ziming zhang')<br/>('9772059', 'Yuting Chen', 'yuting chen')<br/>('1699322', 'Venkatesh Saligrama', 'venkatesh saligrama')</td><td>{zzhang14, yutingch, srv}@bu.edu
+</td></tr><tr><td>1e9f1bbb751fe538dde9f612f60eb946747defaa</td><td>Journal of Systems Engineering and Electronics
+<br/>Vol. 28, No. 4, August 2017, pp.784 – 792
+<br/>Identity-aware convolutional neural networks for
+<br/>facial expression recognition
+<br/><b>The Big Data Research Center, Henan University, Kaifeng 475001, China</b><br/><b>Tampere University of Technology, Tampere 33720, Finland</b></td><td>('34461878', 'Chongsheng Zhang', 'chongsheng zhang')<br/>('39720477', 'Pengyou Wang', 'pengyou wang')<br/>('40611812', 'Ke Chen', 'ke chen')</td><td></td></tr><tr><td>1e917fe7462445996837934a7e46eeec14ebc65f</td><td>Expression Classification using
+<br/>Wavelet Packet Method
+<br/>on Asymmetry Faces
+<br/>CMU-RI-TR-06-03
+<br/>January 2006
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania 15213
+<br/><b>Carnegie Mellon University</b></td><td>('1689241', 'Yanxi Liu', 'yanxi liu')</td><td></td></tr><tr><td>1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de</td><td>TO APPEAR IN IEEE THMS
+<br/>Zero-Shot Object Recognition System
+<br/>based on Topic Model
+</td><td>('2800072', 'Wai Lam Hoo', 'wai lam hoo')<br/>('2863960', 'Chee Seng Chan', 'chee seng chan')</td><td></td></tr><tr><td>1ef4aac0ebc34e76123f848c256840d89ff728d0</td><td></td><td></td><td></td></tr><tr><td>1ecb56e7c06a380b3ce582af3a629f6ef0104457</td><td>List of Contents Vol.8
+<br/>Contents of
+<br/>Journal of Advanced Computational
+<br/> Intelligence and Intelligent Informatics
+<br/>Volume 8
+<br/>Vol.8 No.1, January 2004
+<br/>Editorial:
+<br/>o Special Issue on Selected Papers from Humanoid,
+<br/>Papers:
+<br/>o Dynamic Color Object Recognition Using Fuzzy
+<br/>Nano-technology, Information Technology,
+<br/>Communication and Control, Environment, and
+<br/>Management (HNICEM’03).
+<br/>. 1
+<br/>Elmer P. Dadios
+<br/>Papers:
+<br/>o A New Way of Discovery of Belief, Desire and
+<br/>Intention in the BDI Agent-Based Software
+<br/>Modeling .
+<br/>. 2
+<br/>o Integration of Distributed Robotic Systems
+<br/>. 7
+<br/>Fakhri Karray, Rogelio Soto, Federico Guedea,
+<br/>and Insop Song
+<br/>o A Searching and Tracking Framework for
+<br/>Multi-Robot Observation of Multiple Moving
+<br/>Targets .
+<br/>. 14
+<br/>Zheng Liu, Marcelo H. Ang Jr., and Winston
+<br/>Khoon Guan Seah
+<br/>Development Paper:
+<br/>o Possibilistic Uncertainty Propagation and
+<br/>Compromise Programming in the Life Cycle
+<br/>Analysis of Alternative Motor Vehicle Fuels
+<br/>Raymond R. Tan, Alvin B. Culaba, and
+<br/>Michael R. I. Purvis
+<br/>. 23
+<br/>Logic .
+<br/>Napoleon H. Reyes, and Elmer P. Dadios
+<br/>. 29
+<br/>o A Optical Coordinate Measuring Machine for
+<br/>Nanoscale Dimensional Metrology .
+<br/>. 39
+<br/>Eric Kirkland, Thomas R. Kurfess, and Steven
+<br/>Y. Liang
+<br/>o Humanoid Robot HanSaRam: Recent Progress
+<br/>and Developments .
+<br/>. 45
+<br/>Jong-Hwan Kim, Dong-Han Kim, Yong-Jae
+<br/>Kim, Kui-Hong Park, Jae-Ho Park,
+<br/>Choon-Kyoung Moon, Jee-Hwan Ryu, Kiam
+<br/>Tian Seow, and Kyoung-Chul Koh
+<br/>o Generalized Associative Memory Models: Their
+<br/>Memory Capacities and Potential Application
+<br/>. 56
+<br/>Teddy N. Yap, Jr., and Arnulfo P. Azcarraga
+<br/>o Hybrid Fuzzy Logic Strategy for Soccer Robot
+<br/>Game.
+<br/>. 65
+<br/>Elmer A. Maravillas , Napoleon H. Reyes, and
+<br/>Elmer P. Dadios
+<br/>o Image Compression and Reconstruction Based on
+<br/>Fuzzy Relation and Soft Computing
+<br/>Technology .
+<br/>. 72
+<br/>Kaoru Hirota, Hajime Nobuhara, Kazuhiko
+<br/>Kawamoto, and Shin’ichi Yoshida
+<br/>Vol.8 No.2, March 2004
+<br/>Editorial:
+<br/>o Special Issue on Pattern Recognition .
+<br/>. 83
+<br/>Papers:
+<br/>o Operation of Spatiotemporal Patterns Stored in
+<br/>Osamu Hasegawa
+<br/>Review:
+<br/>o Support Vector Machine and Generalization . 84
+<br/>Takio Kurita
+<br/>o Bayesian Network: Probabilistic Reasoning,
+<br/>Statistical Learning, and Applications .
+<br/>. 93
+<br/>Yoichi Motomura
+<br/>Living Neuronal Networks Cultured on a
+<br/>Microelectrode Array .
+<br/>Suguru N. Kudoh, and Takahisa Taguchi
+<br/>o Rapid Discriminative Learning .
+<br/>. 100
+<br/>. 108
+<br/>Jun Rokui
+<br/>o Robust Fuzzy Clustering Based on Similarity
+<br/>between Data .
+<br/>Kohei Inoue, and Kiichi Urahama
+<br/>Vol.8 No.6, 2004
+<br/>Journal of Advanced Computational Intelligence
+<br/>and Intelligent Informatics
+<br/>. 115
+<br/>I-1
+</td><td>('33358236', 'Chang-Hyun Jo', 'chang-hyun jo')</td><td></td></tr><tr><td>1e64b2d2f0a8a608d0d9d913c4baee6973995952</td><td>DOMINANT AND
+<br/>COMPLEMENTARY MULTI-
+<br/>EMOTIONAL FACIAL
+<br/>EXPRESSION RECOGNITION
+<br/>USING C-SUPPORT VECTOR
+<br/>CLASSIFICATION
+</td><td>('19172816', 'Christer Loob', 'christer loob')<br/>('2303909', 'Pejman Rasti', 'pejman rasti')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('2531522', 'Tomasz Sapinski', 'tomasz sapinski')<br/>('34969391', 'Dorota Kaminska', 'dorota kaminska')<br/>('3087532', 'Gholamreza Anbarjafari', 'gholamreza anbarjafari')</td><td></td></tr><tr><td>1e21b925b65303ef0299af65e018ec1e1b9b8d60</td><td>Under review as a conference paper at ICLR 2017
+<br/>UNSUPERVISED CROSS-DOMAIN IMAGE GENERATION
+<br/>Facebook AI Research
+<br/>Tel-Aviv, Israel
+</td><td>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('33964593', 'Adam Polyak', 'adam polyak')</td><td>{yaniv,adampolyak,wolf}@fb.com
+</td></tr><tr><td>1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9</td><td>Entropy Regularization
+<br/>The problem of semi-supervised induction consists in learning a decision rule from
+<br/>labeled and unlabeled data. This task can be undertaken by discriminative methods,
+<br/>provided that learning criteria are adapted consequently. In this chapter, we moti-
+<br/>vate the use of entropy regularization as a means to bene(cid:12)t from unlabeled data in
+<br/>the framework of maximum a posteriori estimation. The learning criterion is derived
+<br/>from clearly stated assumptions and can be applied to any smoothly parametrized
+<br/>model of posterior probabilities. The regularization scheme favors low density sep-
+<br/>aration, without any modeling of the density of input features. The contribution
+<br/>of unlabeled data to the learning criterion induces local optima, but this problem
+<br/>can be alleviated by deterministic annealing. For well-behaved models of posterior
+<br/>probabilities, deterministic annealing EM provides a decomposition of the learning
+<br/>problem in a series of concave subproblems. Other approaches to the semi-supervised
+<br/>problem are shown to be close relatives or limiting cases of entropy regularization.
+<br/>A series of experiments illustrates the good behavior of the algorithm in terms of
+<br/>performance and robustness with respect to the violation of the postulated low den-
+<br/>sity separation assumption. The minimum entropy solution bene(cid:12)ts from unlabeled
+<br/>data and is able to challenge mixture models and manifold learning in a number of
+<br/>situations.
+<br/>9.1 Introduction
+<br/>semi-supervised
+<br/>induction
+<br/>This chapter addresses semi-supervised induction, which refers to the learning of
+<br/>a decision rule, on the entire input domain X, from labeled and unlabeled data.
+<br/>The objective is identical to the one of supervised classi(cid:12)cation: generalize from
+<br/>examples. The problem di(cid:11)ers in the respect that the supervisor’s responses are
+<br/>missing for some training examples. This characteristic is shared with transduction,
+<br/>which has however a di(cid:11)erent goal, that is, of predicting labels on a set of prede(cid:12)ned
+</td><td>('1802711', 'Yves Grandvalet', 'yves grandvalet')<br/>('1751762', 'Yoshua Bengio', 'yoshua bengio')</td><td></td></tr><tr><td>1ee3b4ba04e54bfbacba94d54bf8d05fd202931d</td><td>Indonesian Journal of Electrical Engineering and Computer Science
+<br/>Vol. 12, No. 2, November 2018, pp. 476~481
+<br/>ISSN: 2502-4752, DOI: 10.11591/ijeecs.v12.i2.pp476-481
+<br/> 476
+<br/>Celebrity Face Recognition using Deep Learning
+<br/>1,2,3Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/>4Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/> Shah Alam, Selangor, Malaysia
+<br/>Campus Jasin, Melaka, Malaysia
+<br/>Article Info
+<br/>Article history:
+<br/>Received May 29, 2018
+<br/>Revised Jul 30, 2018
+<br/>Accepted Aug 3, 2018
+<br/>Keywords:
+<br/>AlexNet
+<br/>Convolutional neural network
+<br/>Deep learning
+<br/>Face recognition
+<br/>GoogLeNet
+</td><td>('2743254', 'Zaidah Ibrahim', 'zaidah ibrahim')</td><td></td></tr><tr><td>1e41a3fdaac9f306c0ef0a978ae050d884d77d2a</td><td>411
+<br/>Robust Object Recognition with
+<br/>Cortex-Like Mechanisms
+<br/>Tomaso Poggio, Member, IEEE
+</td><td>('1981539', 'Thomas Serre', 'thomas serre')<br/>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1996960', 'Maximilian Riesenhuber', 'maximilian riesenhuber')</td><td></td></tr><tr><td>1e94cc91c5293c8fc89204d4b881552e5b2ce672</td><td>Unsupervised Alignment of Actions in Video with Text Descriptions
+<br/><b>University of Rochester, Rochester, NY, USA</b><br/><b>Indian Institute of Technology Delhi, New Delhi, India</b></td><td>('3193978', 'Young Chol Song', 'young chol song')<br/>('2296971', 'Iftekhar Naim', 'iftekhar naim')<br/>('1782355', 'Abdullah Al Mamun', 'abdullah al mamun')<br/>('38370357', 'Kaustubh Kulkarni', 'kaustubh kulkarni')<br/>('35108153', 'Parag Singla', 'parag singla')<br/>('33642939', 'Jiebo Luo', 'jiebo luo')<br/>('1793218', 'Daniel Gildea', 'daniel gildea')</td><td></td></tr><tr><td>1e1e66783f51a206509b0a427e68b3f6e40a27c8</td><td>SEMI-SUPERVISED ESTIMATION OF PERCEIVED AGE
+<br/>FROM FACE IMAGES
+<br/>VALWAY Technology Center, NEC Soft, Ltd., Tokyo, Japan
+<br/>Keywords:
+</td><td>('2163491', 'Kazuya Ueki', 'kazuya ueki')<br/>('1719221', 'Masashi Sugiyama', 'masashi sugiyama')</td><td>ueki@mxf.nes.nec.co.jp
+</td></tr><tr><td>1efaa128378f988965841eb3f49d1319a102dc36</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Hierarchical binary CNNs for landmark
+<br/>localization with limited resources
+</td><td>('3458121', 'Adrian Bulat', 'adrian bulat')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td></td></tr><tr><td>1e8eec6fc0e4538e21909ab6037c228547a678ba</td><td><b>IMPERIAL COLLEGE</b><br/><b>University of London</b><br/>enVisage : Face Recognition in
+<br/>Videos
+<br/>Supervisor : Dr. Stefan Rüeger
+<br/>June 14, 2006
+</td><td>('23558890', 'Ashwin Venkatraman', 'ashwin venkatraman')<br/>('35805861', 'Ian Harries', 'ian harries')</td><td>(av102@doc.ic.ac.uk)
+</td></tr><tr><td>1e6ed6ca8209340573a5e907a6e2e546a3bf2d28</td><td>Pooling Faces: Template based Face Recognition with Pooled Face Images
+<br/>Prem Natarajan1
+<br/>Gérard Medioni3
+<br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b><br/><b>Institute for Robotics and Intelligent Systems, USC, CA, USA</b></td><td>('1756099', 'Tal Hassner', 'tal hassner')<br/>('11269472', 'Iacopo Masi', 'iacopo masi')<br/>('5911467', 'Jungyeon Kim', 'jungyeon kim')<br/>('1689391', 'Jongmoo Choi', 'jongmoo choi')<br/>('35840854', 'Shai Harel', 'shai harel')</td><td></td></tr><tr><td>8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2</td><td>Consensual and Privacy-Preserving Sharing of
+<br/>Multi-Subject and Interdependent Data
+<br/>EPFL, UNIL–HEC Lausanne
+<br/>K´evin Huguenin
+<br/>UNIL–HEC Lausanne
+<br/>EPFL
+<br/>EPFL
+</td><td>('1862343', 'Alexandra-Mihaela Olteanu', 'alexandra-mihaela olteanu')<br/>('2461431', 'Italo Dacosta', 'italo dacosta')<br/>('1757221', 'Jean-Pierre Hubaux', 'jean-pierre hubaux')</td><td>alexandramihaela.olteanu@epfl.ch
+<br/>kevin.huguenin@unil.ch
+<br/>italo.dacosta@epfl.ch
+<br/>jean-pierre.hubaux@epfl.ch
+</td></tr><tr><td>841855205818d3a6d6f85ec17a22515f4f062882</td><td>Low Resolution Face Recognition in the Wild
+<br/>Patrick Flynn1
+<br/>1Department of Computer Science and Engineering
+<br/><b>University of Notre Dame</b><br/>2Department of Computer Science
+<br/>Pontificia Universidad Cat´olica de Chile
+</td><td>('50492554', 'Pei Li', 'pei li')<br/>('47522390', 'Loreto Prieto', 'loreto prieto')<br/>('1797475', 'Domingo Mery', 'domingo mery')</td><td></td></tr><tr><td>84c0f814951b80c3b2e39caf3925b56a9b2e1733</td><td>Manifesto from Dagstuhl Perspectives Workshop 12382
+<br/>Computation and Palaeography: Potentials and Limits∗
+<br/>Edited by
+<br/><b>The Open University of</b><br/><b>University of Nebraska Lincoln, USA</b><br/><b>King s College London, UK</b><br/><b>The Blavatnik School of Computer Science, Tel Aviv University, IL</b></td><td>('1756099', 'Tal Hassner', 'tal hassner')<br/>('34564710', 'Malte Rehbein', 'malte rehbein')<br/>('34876976', 'Peter A. Stokes', 'peter a. stokes')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td>Israel, IL, hassner@openu.ac.il
+<br/>malte.rehbein@unl.edu
+<br/>peter.stokes@kcl.ac.uk
+<br/>wolf@cs.tau.ac.il
+</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td></td><td></td><td></td></tr><tr><td>84e4b7469f9c4b6c9e73733fa28788730fd30379</td><td>Duong et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:10
+<br/>DOI 10.1186/s13634-017-0521-9
+<br/>EURASIP Journal on Advances
+<br/>in Signal Processing
+<br/>R ES EAR CH
+<br/>Projective complex matrix factorization for
+<br/>facial expression recognition
+<br/>Open Access
+</td><td>('2345136', 'Viet-Hang Duong', 'viet-hang duong')<br/>('2033188', 'Yuan-Shan Lee', 'yuan-shan lee')<br/>('1782417', 'Jian-Jiun Ding', 'jian-jiun ding')<br/>('34759060', 'Bach-Tung Pham', 'bach-tung pham')<br/>('30065390', 'Manh-Quan Bui', 'manh-quan bui')<br/>('35196812', 'Pham The Bao', 'pham the bao')<br/>('3205648', 'Jia-Ching Wang', 'jia-ching wang')</td><td></td></tr><tr><td>84dcf04802743d9907b5b3ae28b19cbbacd97981</td><td></td><td></td><td></td></tr><tr><td>841bf196ee0086c805bd5d1d0bddfadc87e424ec</td><td>International Journal of Signal Processing, Image Processing and Pattern Recognition
+<br/>Vol. 5, No. 4, December, 2012
+<br/>Locally Kernel-based Nonlinear Regression for Face Recognition
+<br/>South Tehran Branch, Electrical Engineering Department, Tehran, Iran
+<br/><b>Islamic Azad University</b><br/><b>Amirkabir University of Technology</b><br/>Electrical Engineering Department,Tehran, Iran
+</td><td>('3345810', 'Yaser Arianpour', 'yaser arianpour')<br/>('2630546', 'Sedigheh Ghofrani', 'sedigheh ghofrani')<br/>('1685153', 'Hamidreza Amindavar', 'hamidreza amindavar')</td><td>st_y_arianpour@azad.ac.ir, s_ghofrani@azad.ac.ir and hamidami@aut.ac.ir
+</td></tr><tr><td>842d82081f4b27ca2d4bc05c6c7e389378f0c7b8</td><td>ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
+<br/>ENGLISH EDITION
+<br/>Usage of affective computing in recommender systems
+<br/>Marko Tkalˇciˇc, Andrej Koˇsir, Jurij Tasiˇc
+<br/><b>University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia</b></td><td></td><td>E-mail: marko.tkalcic@fe.uni-lj.si
+</td></tr><tr><td>84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1</td><td>Improved Boosting Performance by Explicit
+<br/>Handling of Ambiguous Positive Examples
+</td><td>('1750517', 'Miroslav Kobetski', 'miroslav kobetski')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')</td><td></td></tr><tr><td>84508e846af3ac509f7e1d74b37709107ba48bde</td><td>Use of the Septum as a Reference Point in a Neurophysiologic Approach to
+<br/>Facial Expression Recognition
+<br/>Department of Computer Engineering, Faculty of Engineering,
+<br/><b>Prince of Songkla University, Hat Yai, Songkhla, 90112 Thailand</b><br/>Telephone: (66)080-7045015, (66)074-287-357
+</td><td>('38928684', 'Igor Stankovic', 'igor stankovic')<br/>('2799130', 'Montri Karnjanadecha', 'montri karnjanadecha')</td><td>E-mail: bizmut@neobee.net, montri@coe.psu.ac.th
+</td></tr><tr><td>841a5de1d71a0b51957d9be9d9bebed33fb5d9fa</td><td>5017
+<br/>PCANet: A Simple Deep Learning Baseline for
+<br/>Image Classification?
+</td><td>('1926757', 'Tsung-Han Chan', 'tsung-han chan')<br/>('2370507', 'Kui Jia', 'kui jia')<br/>('1702868', 'Shenghua Gao', 'shenghua gao')<br/>('1697700', 'Jiwen Lu', 'jiwen lu')<br/>('1920683', 'Zinan Zeng', 'zinan zeng')<br/>('1700297', 'Yi Ma', 'yi ma')</td><td></td></tr><tr><td>84e6669b47670f9f4f49c0085311dce0e178b685</td><td>Face frontalization for Alignment and Recognition
+<br/>∗Department of Computing,
+<br/><b>Imperial College London</b><br/>180 Queens Gate,
+<br/>†EEMCS,
+<br/><b>University of Twente</b><br/>Drienerlolaan 5,
+<br/>London SW7 2AZ, U.K.
+<br/>7522 NB Enschede, The Netherlands
+</td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{c.sagonas, i.panagakis, s.zafeiriou, m.pantic}@imperial.ac.uk
+</td></tr><tr><td>847e07387142c1bcc65035109ccce681ef88362c</td><td>Feature Synthesis Using Genetic Programming for Face
+<br/>Expression Recognition
+<br/>Center for research in intelligent systems
+<br/><b>University of California, Riverside CA 92521-0425, USA</b></td><td>('1707159', 'Bir Bhanu', 'bir bhanu')<br/>('1723555', 'Jiangang Yu', 'jiangang yu')<br/>('1711543', 'Xuejun Tan', 'xuejun tan')<br/>('1742735', 'Yingqiang Lin', 'yingqiang lin')</td><td>{bhanu, jyu, xtan, yqlin}@cris.ucr.edu
+</td></tr><tr><td>8411fe1142935a86b819f065cd1f879f16e77401</td><td>International Journal of Artificial Intelligence & Applications (IJAIA), Vol. 4, No. 6, November 2013
+<br/>Facial Recognition using Modified Local Binary
+<br/>Pattern and Random Forest
+<br/>Department of Computer Science,
+<br/><b>North Carolina AandT State University</b><br/>Greensboro, NC 27411
+</td><td>('3536162', 'Brian O’Connor', 'brian o’connor')<br/>('34999544', 'Kaushik Roy', 'kaushik roy')</td><td></td></tr><tr><td>843e6f1e226480e8a6872d8fd7b7b2cd74b637a4</td><td>Research Journal of Applied Sciences, Engineering and Technology 4(22): 4724-4728, 2012
+<br/>ISSN: 2040-7467
+<br/>© Maxwell Scientific Organization, 2012
+<br/>Submitted: March 31, 2012
+<br/>Accepted: April 30, 2012
+<br/> Published: November 15, 2012
+<br/>Palmprint Recognition Using Directional Representation and
+<br/>Compresses Sensing
+<br/>1Shandong Provincial Key Laboratory of computer Network, Shandong Computer
+<br/>Science Center, Jinan 250014, China
+<br/><b>School of Mechanical Engineering, Southwest Jiaotong University, Chengdu 610031, China</b></td><td>('2112738', 'Hengjian Li', 'hengjian li')</td><td></td></tr><tr><td>84f904a71bee129a1cf00dc97f6cdbe1011657e6</td><td>Fashioning with Networks: Neural Style Transfer to Design
+<br/>Clothes
+<br/><b>University Of Maryland</b><br/>Baltimore County (UMBC),
+<br/><b>University Of Maryland</b><br/>Baltimore County (UMBC),
+<br/><b>University Of Maryland</b><br/>Baltimore County (UMBC),
+<br/>Baltimore, MD,
+<br/>USA
+<br/>Baltimore, MD,
+<br/>USA
+<br/>Baltimore, MD,
+<br/>USA
+</td><td>('30834050', 'Prutha Date', 'prutha date')<br/>('2116290', 'Ashwinkumar Ganesan', 'ashwinkumar ganesan')<br/>('1756624', 'Tim Oates', 'tim oates')</td><td>dprutha1@umbc.edu
+<br/>gashwin1@umbc.edu
+<br/>oates@cs.umbc.edu
+</td></tr><tr><td>849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b</td><td>Detecting Faces Using Region-based Fully
+<br/>Convolutional Networks
+<br/>Tencent AI Lab, China
+</td><td>('1996677', 'Yitong Wang', 'yitong wang')</td><td>{yitongwang,denisji,encorezhou,hawelwang,michaelzfli}@tencent.com
+</td></tr><tr><td>846c028643e60fefc86bae13bebd27341b87c4d1</td><td>Face Recognition Under Varying Illumination
+<br/>Based on MAP Estimation Incorporating
+<br/>Correlation Between Surface Points
+<br/>1 Panasonic Tokyo (Matsushita Electric Industrial Co., Ltd.,)
+<br/>4–3–1 Tsunashima-higashi, Kohoku-ku, Yokohama City, Kanagawa 223–8639, Japan
+<br/><b>Institute of Industrial Science, The University of Tokyo</b><br/>4–6–1 Komaba, Meguro-ku Tokyo 153–8505, Japan
+<br/><b>National Institute of Informatics</b><br/>2–1–2 Hitotsubashi, Chiyoda-ku Tokyo 101–8430, Japan
+</td><td>('20877506', 'Mihoko Shimano', 'mihoko shimano')<br/>('1977815', 'Kenji Nagao', 'kenji nagao')<br/>('1706742', 'Takahiro Okabe', 'takahiro okabe')<br/>('1746794', 'Imari Sato', 'imari sato')<br/>('9467266', 'Yoichi Sato', 'yoichi sato')</td><td>shimano.mhk@jp.panasonic.com
+<br/>{takahiro, ysato}@iis.u-tokyo.ac.jp
+<br/>imarik@nii.ac.jp
+</td></tr><tr><td>4a14a321a9b5101b14ed5ad6aa7636e757909a7c</td><td>Learning Semi-Supervised Representation Towards a Unified Optimization
+<br/>Framework for Semi-Supervised Learning
+<br/><b>School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications</b><br/><b>Key Laboratory of Machine Perception (MOE), School of EECS, Peking University</b><br/><b>Cooperative Medianet Innovation Center, Shanghai Jiaotong University</b></td><td>('9171002', 'Chun-Guang Li', 'chun-guang li')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('1720776', 'Honggang Zhang', 'honggang zhang')<br/>('39954962', 'Jun Guo', 'jun guo')</td><td>{lichunguang, zhhg, guojun}@bupt.edu.cn; zlin@pku.edu.cn
+</td></tr><tr><td>4adca62f888226d3a16654ca499bf2a7d3d11b71</td><td>Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 572–582,
+<br/>Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics
+<br/>572
+</td><td></td><td></td></tr><tr><td>4aa286914f17cd8cefa0320e41800a99c142a1cd</td><td>Leveraging Context to Support Automated Food Recognition in Restaurants
+<br/>School of Interactive Computing
+<br/><b>Georgia Institute of Technology, Atlanta, Georgia, USA</b><br/>http://www.vbettadapura.com/egocentric/food
+</td><td>('3115428', 'Vinay Bettadapura', 'vinay bettadapura')<br/>('39642711', 'Edison Thomaz', 'edison thomaz')<br/>('2943897', 'Aman Parnami', 'aman parnami')<br/>('9267108', 'Gregory D. Abowd', 'gregory d. abowd')</td><td></td></tr><tr><td>4a9d906935c9de019c61aedc10b77ee10e3aec63</td><td>Cross Modal Distillation for Supervision Transfer
+<br/><b>University of California, Berkeley</b></td><td>('3134457', 'Saurabh Gupta', 'saurabh gupta')<br/>('4742485', 'Judy Hoffman', 'judy hoffman')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td>{sgupta, jhoffman, malik}@eecs.berkeley.edu
+</td></tr><tr><td>4a2d54ea1da851151d43b38652b7ea30cdb6dfb2</td><td>Direct Recognition of Motion Blurred Faces
+</td><td>('39487011', 'Kaushik Mitra', 'kaushik mitra')<br/>('2715270', 'Priyanka Vageeswaran', 'priyanka vageeswaran')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>4ae59d2a28abd76e6d9fb53c9e7ece833dce7733</td><td>A Survey on Mobile Affective Computing
+<br/>Shengkai Zhang and Pan Hui
+<br/>Department of Computer Science and Engineering
+<br/><b>The Hong Kong University of Science and Technology</b></td><td></td><td>{szhangaj, panhui}@cse.ust.hk
+</td></tr><tr><td>4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8</td><td><b>Graz University of Technology</b><br/><b>Institute for Computer Graphics and Vision</b><br/>Dissertation
+<br/>Efficient Metric Learning for
+<br/>Real-World Face Recognition
+<br/>Graz, Austria, December 2013
+<br/>Thesis supervisors
+<br/>Prof. Dr. Horst Bischof
+<br/>Prof. Dr. Fernando De la Torre
+</td><td>('1993853', 'Martin Köstinger', 'martin köstinger')</td><td></td></tr><tr><td>4ab84f203b0e752be83f7f213d7495b04b1c4c79</td><td>CONCAVE LOSSES FOR ROBUST DICTIONARY LEARNING
+<br/><b>University of S ao Paulo</b><br/><b>Institute of Mathematics and Statistics</b><br/>Rua do Mat˜ao, 1010 – 05508-090 – S˜ao Paulo-SP, Brazil
+<br/>Universit´e de Rouen Normandie
+<br/>LITIS EA 4108
+<br/>76800 Saint- ´Etienne-du-Rouvray, France
+</td><td>('30146203', 'Rafael Will M. de Araujo', 'rafael will m. de araujo')<br/>('1792962', 'Alain Rakotomamonjy', 'alain rakotomamonjy')</td><td></td></tr><tr><td>4a484d97e402ed0365d6cf162f5a60a4d8000ea0</td><td>A Crowdsourcing Approach for Finding Misidentifications of Bibliographic Records
+<br/><b>University of Tsukuba</b><br/>2 National Diet Library
+<br/>3 Doshisha Univeristy
+</td><td>('34573158', 'Atsuyuki Morishima', 'atsuyuki morishima')<br/>('32857584', 'Takanori Kawashima', 'takanori kawashima')<br/>('23161591', 'Takashi Harada', 'takashi harada')<br/>('2406721', 'Sho Sato', 'sho sato')</td><td></td></tr><tr><td>4a3758f283b7c484d3f164528d73bc8667eb1591</td><td>Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial
+<br/>Networks
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>National Laboratory of Pattern Recognition, CASIA
+</td><td>('1860829', 'Yunfan Liu', 'yunfan liu')<br/>('1682467', 'Qi Li', 'qi li')<br/>('1757186', 'Zhenan Sun', 'zhenan sun')</td><td>yunfan,liu@cripac.ia.ac.cn, {qli, znsun}@nlpr.ia.ac.cn
+</td></tr><tr><td>4a4da3d1bbf10f15b448577e75112bac4861620a</td><td>FACE, EXPRESSION, AND IRIS RECOGNITION
+<br/>USING LEARNING-BASED APPROACHES
+<br/>by
+<br/>A dissertation submitted in partial fulfillment of
+<br/>the requirements for the degree of
+<br/>Doctor of Philosophy
+<br/>(Computer Sciences)
+<br/>at the
+<br/><b>UNIVERSITY OF WISCONSIN MADISON</b><br/>2006
+</td><td>('1822413', 'Guodong Guo', 'guodong guo')</td><td></td></tr><tr><td>4abd49538d04ea5c7e6d31701b57ea17bc349412</td><td>Recognizing Fine-Grained and Composite Activities
+<br/>using Hand-Centric Features and Script Data
+</td><td>('34849128', 'Marcus Rohrbach', 'marcus rohrbach')<br/>('40404576', 'Sikandar Amin', 'sikandar amin')</td><td></td></tr><tr><td>4aa093d1986b4ad9b073ac9edfb903f62c00e0b0</td><td>Facial Recognition with
+<br/>Encoded Local Projections
+<br/>Mechanincal and Mechatronics Engineering
+<br/><b>University of Waterloo</b><br/>Waterloo, Canada
+<br/>Kimia Lab
+<br/><b>University of Waterloo</b><br/>Waterloo, Canada
+</td><td>('34139904', 'Dhruv Sharma', 'dhruv sharma')<br/>('7641396', 'Sarim Zafar', 'sarim zafar')<br/>('38685017', 'Morteza Babaie', 'morteza babaie')</td><td></td></tr><tr><td>4a0f98d7dbc31497106d4f652968c708f7da6692</td><td>Real-time Eye Gaze Direction Classification Using
+<br/>Convolutional Neural Network
+</td><td>('3110004', 'Anjith George', 'anjith george')<br/>('2680543', 'Aurobinda Routray', 'aurobinda routray')</td><td></td></tr><tr><td>4aabd6db4594212019c9af89b3e66f39f3108aac</td><td><b>University of Colorado, Boulder</b><br/>CU Scholar
+<br/>Undergraduate Honors Theses
+<br/>Honors Program
+<br/>Spring 2015
+<br/>The Mere Exposure Effect and Classical
+<br/>Conditioning
+<br/>Follow this and additional works at: http://scholar.colorado.edu/honr_theses
+<br/>Part of the Cognition and Perception Commons, and the Cognitive Psychology Commons
+<br/>Recommended Citation
+<br/>Wong, Rosalyn, "The Mere Exposure Effect and Classical Conditioning" (2015). Undergraduate Honors Theses. Paper 937.
+<br/>This Thesis is brought to you for free and open access by Honors Program at CU Scholar. It has been accepted for inclusion in Undergraduate Honors
+</td><td>('10191508', 'Rosalyn Wong', 'rosalyn wong')</td><td>University of Colorado Boulder, Rosalyn.Wong@Colorado.EDU
+<br/>Theses by an authorized administrator of CU Scholar. For more information, please contact cuscholaradmin@colorado.edu.
+</td></tr><tr><td>4adb97b096b700af9a58d00e45a2f980136fcbb5</td><td>Exploring Temporal Preservation Networks for Precise Temporal Action
+<br/>Localization
+<br/>National Laboratory for Parallel and Distributed Processing,
+<br/><b>National University of Defense Technology</b><br/>Changsha, China
+</td><td>('40520103', 'Ke Yang', 'ke yang')<br/>('2292038', 'Peng Qiao', 'peng qiao')<br/>('40252278', 'Dongsheng Li', 'dongsheng li')<br/>('1893776', 'Shaohe Lv', 'shaohe lv')<br/>('1791001', 'Yong Dou', 'yong dou')</td><td>{yangke13,pengqiao,dongshengli,yongdou,shaohelv}@nudt.edu.cn
+</td></tr><tr><td>4a5592ae1f5e9fa83d9fa17451c8ab49608421e4</td><td>Multi-modal Social Signal Analysis for Predicting
+<br/>Agreement in Conversation Settings
+<br/><b>IN3, Open University of</b><br/>Catalonia, Roc Boronat, 117,
+<br/>08018 Barcelona, Spain.
+<br/><b>University of</b><br/>Barcelona, Gran Via, 585,
+<br/>08007 Barcelona, Spain.
+<br/>Computer Vision Center, UAB,
+<br/>08193 Barcelona, Spain.
+<br/><b>University of</b><br/>Barcelona, Gran Via, 585,
+<br/>08007 Barcelona, Spain.
+<br/>Computer Vision Center, UAB,
+<br/>08193 Barcelona, Spain.
+<br/><b>EIMT, Open University of</b><br/>Catalonia, Rbla. Poblenou,
+<br/>156, 08018 Barcelona, Spain.
+<br/>Computer Vision Center, UAB,
+<br/>08193 Barcelona, Spain.
+</td><td>('1960768', 'Víctor Ponce-López', 'víctor ponce-lópez')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1857280', 'Xavier Baró', 'xavier baró')</td><td>vponcel@uoc.edu
+<br/>sergio@maia.ub.es
+<br/>xbaro@uoc.edu
+</td></tr><tr><td>4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c</td><td> MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+<br/>3-22
+<br/>Face Recognition using Local Features based on Two-layer Block M odel
+<br/>W onjun Hwang1 Ji-Yeun Kim Seokcheol Kee
+<br/>Computing Lab.,
+<br/><b>Samsung Advanced Institute of Technology</b><br/>combined by Yang and etc [7]. The sparsification of LFA
+<br/>helps the reduction of dimension of image in LDA scheme
+<br/>and local topological property is more useful than holistic
+<br/>property of PCA in recognition, but there is still structural
+<br/>problem because the method to select the features is
+<br/>designed for minimization of reconstruction error, not for
+<br/>increasing discriminability in face model.
+<br/>In this paper, we proposed the novel recognition
+<br/>algorithm to merge LFA and LDA method. We do not use
+<br/>the existing sparsification method for selecting features but
+<br/>adopt the two-layer block model to make several groups
+<br/>with topographic local features in similar position. Each
+<br/>local block, flocked local features, can represent its own
+<br/>local property and at
+<br/>time holistic face
+<br/>information. Flocks of local features can easily solve the
+<br/>small sample size problem in LDA without discarding
+<br/>unselected local features, and LDA scheme can extract the
+<br/>important information for recognition not in focus of
+<br/>representation. M oreover, we can extract lots of vectors on
+<br/>separated viewpoint from different layer model in one face
+<br/>image and they have the property robust to environmental
+<br/>changes and overfitting problem as compared with limited
+<br/>number of features vectors.
+<br/>the same
+<br/>The rest of this paper is organized as follows: the brief
+<br/>description on LFA and LDA is explained in Section 2.1
+<br/>and Section 2.2, respectively and proposed algorithm -
+<br/>local feature based on two-layer block model is given in
+<br/>Section 2.3. The experimental results are given in Section 3.
+<br/>Conclusion is summarized in Section 4.
+<br/>2 LFA and LDA M ethod based on Two-
+<br/>Layer Block M odel
+<br/>2.1 Theory of local feature analysis
+<br/>A topographic representation based on second-order
+<br/>image dependencies called local features analysis (LFA)
+<br/>was developed by Penev and Atick [4]. Local feature
+<br/>analysis can makes a set of topographic and local kernels
+<br/>that are optimally matched to the second-order statistics of
+<br/>the input ensemble. Local features are basically derived
+<br/>from principal component eigenvectors, and consist of
+<br/>sphering principal component eigenvalues to equalize their
+<br/>variance.
+<br/>Suppose that we are given a set of M training
+<br/>M , each represented by an
+<br/>images,
+<br/>i(cid:77) , =1,(cid:133) ,
+<br/>dimensional vector obtained by a raster scan. The mean
+</td><td></td><td></td></tr><tr><td>4ae291b070ad7940b3c9d3cb10e8c05955c9e269</td><td>Automatic Detection of Naturalistic Hand-over-Face
+<br/>Gesture Descriptors
+<br/><b>University of Cambridge, Computer Laboratory, UK</b></td><td>('2022940', 'Marwa Mahmoud', 'marwa mahmoud')<br/>('39626495', 'Peter Robinson', 'peter robinson')</td><td>{marwa.mahmoud, tadas.baltrusaitis, peter.robinson}@cl.cam.ac.uk
+</td></tr><tr><td>4aa8db1a3379f00db2403bba7dade5d6e258b9e9</td><td>Recognizing Combinations of Facial Action Units with
+<br/>Different Intensity Using a Mixture of Hidden Markov
+<br/>Models and Neural Network
+<br/><b>DSP Lab, Sharif University of Technology, Tehran, Iran</b></td><td>('1736464', 'Mahmoud Khademi', 'mahmoud khademi')<br/>('1702826', 'Mohammad Hadi Kiapour', 'mohammad hadi kiapour')<br/>('1707281', 'Ali Akbar Kiaei', 'ali akbar kiaei')</td><td>{khademi@ce.,manzuri@,kiapour@ee.,kiaei@ce.}sharif.edu
+</td></tr><tr><td>4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9</td><td>Fusing Deep Convolutional Networks for Large
+<br/>Scale Visual Concept Classification
+<br/>Department of Computer Engineering
+<br/><b>Bas kent University</b><br/>06810 Ankara, TURKEY
+</td><td>('2140386', 'Hilal Ergun', 'hilal ergun')<br/>('1700011', 'Mustafa Sert', 'mustafa sert')</td><td>21020005@mail.baskent.edu.tr, Bmsert@baskent.edu.tr
+</td></tr><tr><td>4ac4e8d17132f2d9812a0088594d262a9a0d339b</td><td>Rank Constrained Recognition under Unknown Illuminations
+<br/>Center for Automation Research (CfAR)
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of Maryland, College Park, MD</b></td><td>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{shaohua, rama}@cfar.umd.edu
+</td></tr><tr><td>4ac3cd8b6c50f7a26f27eefc64855134932b39be</td><td>Robust Facial Landmark Detection
+<br/>via a Fully-Convolutional Local-Global Context Network
+<br/><b>Technical University of Munich</b></td><td>('3044182', 'Daniel Merget', 'daniel merget')<br/>('28096417', 'Matthias Rock', 'matthias rock')<br/>('46343645', 'Gerhard Rigoll', 'gerhard rigoll')</td><td>daniel.merget@tum.de
+<br/>matthias.rock@tum.de
+<br/>mmk@ei.tum.de
+</td></tr><tr><td>4abaebe5137d40c9fcb72711cdefdf13d9fc3e62</td><td>Dimension Reduction for Regression
+<br/>with Bottleneck Neural Networks
+<br/><b>BECS, Aalto University School of Science and Technology, Finland</b></td><td>('2504988', 'Elina Parviainen', 'elina parviainen')</td><td></td></tr><tr><td>4acd683b5f91589002e6f50885df51f48bc985f4</td><td>BRIDGING COMPUTER VISION AND SOCIAL SCIENCE : A MULTI-CAMERA VISION
+<br/>SYSTEM FOR SOCIAL INTERACTION TRAINING ANALYSIS
+<br/>Peter Tu
+<br/>GE Global Research, Niskayuna NY USA
+</td><td>('1713712', 'Jixu Chen', 'jixu chen')<br/>('39643145', 'Ming-Ching Chang', 'ming-ching chang')<br/>('2095482', 'Tai-Peng Tian', 'tai-peng tian')<br/>('1689202', 'Ting Yu', 'ting yu')</td><td></td></tr><tr><td>4a1d640f5e25bb60bb2347d36009718249ce9230</td><td>Towards Multi-view and Partially-occluded Face Alignment
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, P. R. China</b><br/><b>National University of Singapore, Singapore</b></td><td>('1757173', 'Junliang Xing', 'junliang xing')<br/>('1773437', 'Zhiheng Niu', 'zhiheng niu')<br/>('1753492', 'Junshi Huang', 'junshi huang')<br/>('40506509', 'Weiming Hu', 'weiming hu')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{jlxing,wmhu}@nlpr.ia.ac.cn
+<br/>{niuzhiheng,junshi.huang,eleyans}@nus.edu.sg
+</td></tr><tr><td>4aeb87c11fb3a8ad603311c4650040fd3c088832</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1816
+</td><td></td><td></td></tr><tr><td>4a3d96b2a53114da4be3880f652a6eef3f3cc035</td><td>2666
+<br/>A Dictionary Learning-Based
+<br/>3D Morphable Shape Model
+</td><td>('35220006', 'Claudio Ferrari', 'claudio ferrari')<br/>('2973738', 'Giuseppe Lisanti', 'giuseppe lisanti')<br/>('2507859', 'Stefano Berretti', 'stefano berretti')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td></td></tr><tr><td>4a6fcf714f663618657effc341ae5961784504c7</td><td>Scaling up Class-Specific Kernel Discriminant
+<br/>Analysis for large-scale Face Verification
+</td><td>('9219875', 'Moncef Gabbouj', 'moncef gabbouj')</td><td></td></tr><tr><td>24b37016fee57057cf403fe2fc3dda78476a8262</td><td>Automatic Recognition of Eye Blinking in Spontaneously Occurring Behavior
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh</b></td><td>('1683262', 'Tsuyoshi Moriyama', 'tsuyoshi moriyama')<br/>('1724419', 'Jing Xiao', 'jing xiao')</td><td></td></tr><tr><td>24115d209e0733e319e39badc5411bbfd82c5133</td><td>Long-term Recurrent Convolutional Networks for
+<br/>Visual Recognition and Description
+</td><td>('7408951', 'Jeff Donahue', 'jeff donahue')<br/>('2234342', 'Lisa Anne Hendricks', 'lisa anne hendricks')<br/>('34849128', 'Marcus Rohrbach', 'marcus rohrbach')<br/>('1811430', 'Subhashini Venugopalan', 'subhashini venugopalan')<br/>('1687120', 'Sergio Guadarrama', 'sergio guadarrama')<br/>('2903226', 'Kate Saenko', 'kate saenko')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td></td></tr><tr><td>24c442ac3f6802296d71b1a1914b5d44e48b4f29</td><td>Pose and expression-coherent face recovery in the wild
+<br/>Technicolor, Cesson-S´evign´e, France
+<br/>Franc¸ois Le Clerc
+<br/>Patrick P´erez
+</td><td>('2232848', 'Xavier P. Burgos-Artizzu', 'xavier p. burgos-artizzu')<br/>('2045531', 'Joaquin Zepeda', 'joaquin zepeda')</td><td>xavier.burgos,joaquin.zepeda,francois.leclerc,patrick.perez@technicolor.com
+</td></tr><tr><td>247cab87b133bd0f4f9e8ce5e7fc682be6340eac</td><td>RESEARCH ARTICLE
+<br/>Robust Eye Center Localization through Face
+<br/>Alignment and Invariant Isocentric Patterns
+<br/><b>School of Physics and Engineering, Sun Yat-Sen University, Guangzhou, China, 2 School of Information</b><br/><b>Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International</b><br/><b>Joint Research Institute, Foshan, China</b><br/>☯ These authors contributed equally to this work.
+</td><td>('36721307', 'Zhiyong Pang', 'zhiyong pang')<br/>('2940388', 'Chuansheng Wei', 'chuansheng wei')<br/>('2127322', 'Dongdong Teng', 'dongdong teng')<br/>('2547930', 'Dihu Chen', 'dihu chen')<br/>('31912378', 'Hongzhou Tan', 'hongzhou tan')</td><td>* issthz@mail.sysu.edu.cn (HT); stspzy@mail.sysu.edu.cn (ZP)
+</td></tr><tr><td>245f8ec4373e0a6c1cae36cd6fed5a2babed1386</td><td>J. Appl. Environ. Biol. Sci., 7(3S)1-10, 2017
+<br/>© 2017, TextRoad Publication
+<br/>ISSN: 2090-4274
+<br/>Journal of Applied Environmental
+<br/>and Biological Sciences
+<br/>www.textroad.com
+<br/>Lucas Kanade Optical Flow Computation from Superpixel based Intensity
+<br/>Region for Facial Expression Feature Extraction
+<br/>1Intelligent Biometric Group, School of Electrical and Electronics Engineering, Universiti Sains Malaysia,
+<br/><b>Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute</b><br/>Engineering Campus, 14300 Nibong Tebal, Pulau Pinang, Malaysia
+<br/>Kulim Hi-Tech Park, Kedah, Malaysia
+<br/>Received: February 21, 2017
+<br/>Accepted: May 14, 2017
+</td><td>('9114862', 'Halina Hassan', 'halina hassan')<br/>('2583099', 'Abduljalil Radman', 'abduljalil radman')<br/>('2612367', 'Shahrel Azmin Suandi', 'shahrel azmin suandi')<br/>('1685966', 'Sazali Yaacob', 'sazali yaacob')</td><td></td></tr><tr><td>24cb375a998f4af278998f8dee1d33603057e525</td><td>Projection Metric Learning on Grassmann Manifold with Application to Video based Face Recognition
+<br/>1Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/>seek to learn a generic mapping f : G(q,D) → G(q,d) that is defined as
+<br/>f (YYY iYYY T
+<br/>i ) = WWW TYYY iYYY T
+<br/>i WWW = (WWW TYYY i)(WWW TYYY i)T .
+<br/>(1)
+<br/>where WWW ∈ RD×d (d ≤ D), is a transformation matrix of column full rank.
+<br/>With this mapping, the original Grassmann manifold G(q,D) can be trans-
+<br/>formed into a lower-dimensional Grassmann manifold G(q,d). However,
+<br/>except the case WWW is an orthogonal matrix, WWW TYYY i is not generally an or-
+<br/>thonormal basis matrix. Note that only the linear subspaces spanned by or-
+<br/>thonormal basis matrix can form a valid Grassmann manifold. To tackle this
+<br/>problem, we temporarily use the orthonormal components of WWW TYYY i defined
+<br/>(cid:48)
+<br/>by WWW TYYY
+<br/>i to represent an orthonormal basis matrix of the transformed pro-
+<br/>(cid:48)
+<br/>jection matrices. As for the approach to get the WWW TYYY
+<br/>i, we give more details
+<br/>in the original paper. Here, we briefly describe the formulation of the Pro-
+<br/>jection Metric on the new Grassmann manifold and the proposed objection
+<br/>function in the following.
+<br/>Learned Projection Metric. The Projection Metric of any pair of trans-
+<br/>formed projection operators WWW TYYY
+<br/>(cid:48)T
+<br/>j WWW is defined by:
+<br/>(cid:48)
+<br/>jYYY
+<br/>(cid:48)
+<br/>iYYY
+<br/>(cid:48)
+<br/>iYYY
+<br/>(cid:48)
+<br/>jYYY
+<br/>(cid:48)T
+<br/>i WWW ,WWW TYYY
+<br/>(cid:48)T
+<br/>i WWW , WWW TYYY
+<br/>(cid:48)T
+<br/>p(WWW TYYY
+<br/>d2
+<br/>j WWW )
+<br/>= 2−1/2(cid:107)WWW TYYY
+<br/>(cid:48)T
+<br/>(cid:48)
+<br/>i WWW −WWW TYYY
+<br/>iYYY
+<br/>= 2−1/2tr(PPPAAAi jAAAT
+<br/>i jPPP).
+<br/>i −YYY
+<br/>(cid:48)T
+<br/>(cid:48)
+<br/>jYYY
+<br/>(cid:48)T
+<br/>j WWW(cid:107)2
+<br/>(2)
+<br/>(cid:48)
+<br/>iYYY
+<br/>(cid:48)
+<br/>jYYY
+<br/>(cid:48)T
+<br/>j and PPP = WWWWWW T . Since WWW is required to be a
+<br/>where AAAi j = YYY
+<br/>matrix with column full rank, PPP is a rank-d symmetric positive semidefinite
+<br/>matrix of size D× D, which has a similar form as Mahalanobis matrix.
+<br/>Discriminant Function. The discriminant function is designed to minimize
+<br/>the projection distances of any within-class subspace pairs while to maxi-
+<br/>mize the projection distances of between-class subspace pairs. The matrix
+<br/>PPP is thus achieved by the objective function J(PPP) as:
+<br/>PPP∗ = argmin
+<br/>PPP
+<br/>J(PPP) = argmin
+<br/>PPP
+<br/>(Jw(PPP)− αJb(PPP)).
+<br/>(3)
+<br/>where α reflects the trade-off between the within-class compactness term
+<br/>Jw(PPP) and between-class dispersion term Jb(PPP), which are measured by av-
+<br/>erage within-class scatter and average between-class scatter respectively as:
+<br/>Jw(PPP) =
+<br/>Jb(PPP) =
+<br/>Nw
+<br/>i=1
+<br/>Nb
+<br/>i=1
+<br/>j:Ci=Cj
+<br/>j:Ci(cid:54)=Cj
+<br/>2−1/2tr(PPPAAAi jAAAT
+<br/>i jPPP).
+<br/>2−1/2tr(PPPAAAi jAAAT
+<br/>i jPPP).
+<br/>(4)
+<br/>(5)
+<br/>where Nw is the number of pairs of samples from the same class, Nb is the
+<br/>(cid:48)T
+<br/>number of pairs of samples from different classes, AAAi j = YYY
+<br/>j and
+<br/>PPP is the PSD matrix that needs to be learned.
+<br/>i −YYY
+<br/>(cid:48)T
+<br/>(cid:48)
+<br/>jYYY
+<br/>(cid:48)
+<br/>iYYY
+<br/>[1] J. Hamm and D. D. Lee. Grassmann discriminant analysis: a unifying
+<br/>view on subspace-based learning. In ICML, 2008.
+<br/>[2] Jihun Hamm and Daniel D Lee. Extended grassmann kernels for
+<br/>subspace-based learning. In NIPS, 2008.
+<br/>[3] Mehrtash Tafazzoli Harandi, C. Sanderson, S. Shirazi, and B. C. Lovell.
+<br/>Graph embedding discriminant analysis on grassmannian manifolds for
+<br/>improved image set matching. In CVPR, 2011.
+<br/>[4] Mehrtash Tafazzoli Harandi, Mathieu Salzmann, Sadeep Jayasumana,
+<br/>Richard Hartley, and Hongdong Li. Expanding the family of grassman-
+<br/>nian kernels: An embedding perspective. In ECCV. 2014.
+<br/>[5] R. Vemulapalli, J. Pillai, and R. Chellappa. Kernel learning for extrinsic
+<br/>classification of manifold features. In CVPR, 2013.
+<br/>Figure 1: Conceptual illustration of the proposed Projection Metric Learn-
+<br/>ing (PML) on the Grassmann Manifold. Traditional Grassmann discrimi-
+<br/>nant analysis methods take the away (a)-(b)-(d)-(e) to first embed the origi-
+<br/>nal Grassmann manifold G(q,D) (b) into high dimensional Hilbert space H
+<br/>(d) and then learn a map from the Hilbert space to a lower-dimensional, op-
+<br/>tionally more discriminative space Rd (e). In contrast, the newly proposed
+<br/>approach goes the way (a)-(b)-(c) to learn the metric/mapping from the orig-
+<br/>inal Grassmann manifold G(q,D) (b) to a new more discriminant Grssmann
+<br/>manifold G(q,d) (c).
+<br/>In video based face recognition, great success has been made by represent-
+<br/>ing videos as linear subspaces, which typically reside on Grassmann mani-
+<br/>fold endowed with the well-studied projection metric. Under the projection
+<br/>metric framework, most of recent studies [1, 2, 3, 4, 5] exploited a series of
+<br/>positive definite kernel functions on Grassmann manifold to first embed the
+<br/>manifold into a high dimensional Hilbert space, and then map the flattened
+<br/>manifold into a lower-dimensional Euclidean space (see Fig.1 (a)-(b)-(d)-
+<br/>(e)). Although these methods can be employed for supervised classification,
+<br/>they are limited to the Mercer kernels which yields implicit projection, and
+<br/>thus restricted to use only kernel-based classifiers. Moreover, the computa-
+<br/>tional complexity of these kernel-based methods increases with the number
+<br/>of training sample.
+<br/>To overcome the limitations of existing Grassmann discriminant anal-
+<br/>ysis methods, by endowing the well-studied Projection Metric with Grass-
+<br/>mann manifold, this paper attempt to learn a Mahalanobis-like matrix on the
+<br/>Grassmann manifold without resorting to kernel Hilbert space embedding.
+<br/>In contrast to the kernelization scheme, our approach directly works on the
+<br/>original manifold and exploits its geometry to learn a representation that stil-
+<br/>l benefits from useful properties of the Grassmann manifold. Furthermore,
+<br/>the learned Mahalanobis-like matrix can be decomposed into the transfor-
+<br/>mation for dimensionality reduction, which maps the original Grassmann
+<br/>manifold to a lower-dimensional, more discriminative Grassmann manifold
+<br/>(see Fig.1 (a)-(b)-(c)).
+<br/>Formally, assume m video sequences are given as {XXX 1,XXX 2, . . . ,XXX m},
+<br/>where XXX i ∈ RD×ni describes a data matrix of the i-th video containing ni
+<br/>frames, each frame being expressed as a D-dimensional feature vector. In
+<br/>these data, each video belongs to one of face classes denoted by Ci. The
+<br/>i-th video XXX i is represented by a q-dimensional linear subspace spanned by
+<br/>an orthonormal basis matrix YYY i ∈ RD×q, s.t. XXX iXXX T
+<br/>i , where ΛΛΛi,
+<br/>YYY i correspond to the matrices of the q largest eigenvalues and eigenvectors
+<br/>respectively.
+<br/>i (cid:39) YYY iΛΛΛiYYY T
+<br/>Given a linear subspace span(YYY i) on Grassmann manifold (as discussed
+<br/>i as the elements on the manifold), we
+<br/>in the original paper, we denote YYY iYYY T
+</td><td>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>24aac045f1e1a4c13a58eab4c7618dccd4c0e671</td><td></td><td></td><td></td></tr><tr><td>240d5390af19bb43761f112b0209771f19bfb696</td><td></td><td></td><td></td></tr><tr><td>24f9248f01df3020351347c2a3f632e01de72090</td><td>Reconstructing a Fragmented Face from a
+<br/>Cryptographic Identification Protocol
+<br/><b>The University of Texas at Austin</b></td><td>('39573884', 'Andy Luong', 'andy luong')<br/>('2499821', 'Michael Gerbush', 'michael gerbush')<br/>('1715120', 'Brent Waters', 'brent waters')<br/>('1794409', 'Kristen Grauman', 'kristen grauman')</td><td>aluong,mgerbush,bwaters,grauman@cs.utexas.edu
+</td></tr><tr><td>24e099e77ae7bae3df2bebdc0ee4e00acca71250</td><td>Robust face alignment under occlusion via regional predictive power
+<br/>estimation.
+<br/>© 2015 IEEE
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/22467
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td><td>('2966679', 'Heng Yang', 'heng yang')</td><td>more information contact scholarlycommunications@qmul.ac.uk
+</td></tr><tr><td>24959d1a9c9faf29238163b6bcaf523e2b05a053</td><td>High accuracy head pose tracking survey
+<br/><b>Warsaw University of Technology, Poland</b></td><td>('1899063', 'Adam Strupczewski', 'adam strupczewski')</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>Synergistic Methods for using Language in Robotics
+<br/>Ching L. Teo
+<br/><b>University of Maryland</b><br/>Dept of Computer Science
+<br/><b>College Park, Maryland</b><br/>+01 3014051762
+<br/><b>University of Maryland</b><br/>Dept of Computer Science
+<br/><b>College Park, Maryland</b><br/>+01 3014051762
+<br/><b>University of Maryland</b><br/><b>Institute for Advanced</b><br/>Computer Studies
+<br/><b>College Park, Maryland</b><br/>+01 3014051743
+<br/><b>University of Maryland</b><br/>Dept of Computer Science
+<br/><b>College Park, Maryland</b><br/>+01 3014051768
+</td><td>('7607499', 'Yezhou Yang', 'yezhou yang')<br/>('1759899', 'Cornelia Fermüller', 'cornelia fermüller')<br/>('1697493', 'Yiannis Aloimonos', 'yiannis aloimonos')</td><td>cteo@cs.umd.edu
+<br/>yzyang@cs.umd.edu
+<br/>fer@umiacs.umd.edu
+<br/>yiannis@cs.umd.edu
+</td></tr><tr><td>2450c618cca4cbd9b8cdbdb05bb57d67e63069b1</td><td>A Connexionist Approach for Robust and Precise Facial Feature Detection in
+<br/>Complex Scenes
+<br/>Stefan Duffner and Christophe Garcia
+<br/>France Telecom Research & Development
+<br/>4, rue du Clos Courtel
+<br/>35512 Cesson-S´evign´e, France
+</td><td></td><td>fstefan.duffner, christophe.garciag@francetelecom.com
+</td></tr><tr><td>244b57cc4a00076efd5f913cc2833138087e1258</td><td>Warped Convolutions: Efficient Invariance to Spatial Transformations
+</td><td>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')</td><td></td></tr><tr><td>24cf9fe9045f50c732fc9c602358af89ae40a9f7</td><td>YANG et al.: ATTRIBUTE RECOGNITION FROM ADAPTIVE PARTS
+<br/>Attribute Recognition from Adaptive Parts
+<br/>Ligeng Zhu2
+<br/><b>Simon Fraser University</b><br/>Vancouver, Canada
+<br/><b>Zhejiang University</b><br/>Hangzhou, China
+<br/>3 Microsoft Research Asia
+<br/>Beijing, China
+<br/><b>Tongji University</b><br/>Shanghai, China
+</td><td>('3202074', 'Luwei Yang', 'luwei yang')<br/>('1732264', 'Yichen Wei', 'yichen wei')<br/>('1729017', 'Shuang Liang', 'shuang liang')<br/>('37291674', 'Ping Tan', 'ping tan')</td><td>luweiy@sfu.ca
+<br/>zhuligeng@zju.edu.cn
+<br/>yichenw@microsoft.com
+<br/>shuangliang@tongji.edu.cn
+<br/>pingtan@sfu.ca
+</td></tr><tr><td>24f022d807352abf071880877c38e53a98254dcd</td><td>Are screening methods useful in feature selection? An
+<br/>empirical study
+<br/><b>Florida State University, Tallahassee, Florida, U.S.A</b></td><td>('6693611', 'Mingyuan Wang', 'mingyuan wang')<br/>('2455529', 'Adrian Barbu', 'adrian barbu')</td><td>* abarbu@stat.fsu.edu
+</td></tr><tr><td>241d2c517dbc0e22d7b8698e06ace67de5f26fdf</td><td>Online, Real-Time Tracking
+<br/>Using a Category-to-Individual Detector(cid:2)
+<br/><b>California Institute of Technology, USA</b></td><td>('1990633', 'David Hall', 'david hall')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>{dhall,perona}@vision.caltech.edu
+</td></tr><tr><td>24869258fef8f47623b5ef43bd978a525f0af60e</td><td><b>UNIVERSITÉDEGRENOBLENoattribuéparlabibliothèqueTHÈSEpourobtenirlegradedeDOCTEURDEL’UNIVERSITÉDEGRENOBLESpécialité:MathématiquesetInformatiquepréparéeauLaboratoireJeanKuntzmanndanslecadredel’ÉcoleDoctoraleMathématiques,SciencesetTechnologiesdel’Information,InformatiqueprésentéeetsoutenuepubliquementparMatthieuGuillauminle27septembre2010ExploitingMultimodalDataforImageUnderstandingDonnéesmultimodalespourl’analysed’imageDirecteursdethèse:CordeliaSchmidetJakobVerbeekJURYM.ÉricGaussierUniversitéJosephFourierPrésidentM.AntonioTorralbaMassachusettsInstituteofTechnologyRapporteurMmeTinneTuytelaarsKatholiekeUniversiteitLeuvenRapporteurM.MarkEveringhamUniversityofLeedsExaminateurMmeCordeliaSchmidINRIAGrenobleExaminatriceM.JakobVerbeekINRIAGrenobleExaminateur</b></td><td></td><td></td></tr><tr><td>24e6a28c133b7539a57896393a79d43dba46e0f6</td><td>ROBUST BAYESIAN METHOD FOR SIMULTANEOUS BLOCK SPARSE SIGNAL
+<br/>RECOVERY WITH APPLICATIONS TO FACE RECOGNITION
+<br/>Department of Electrical and Computer Engineering
+<br/><b>University of California, San Diego</b></td><td>('32352411', 'Igor Fedorov', 'igor fedorov')<br/>('3291075', 'Ritwik Giri', 'ritwik giri')<br/>('1748319', 'Bhaskar D. Rao', 'bhaskar d. rao')<br/>('1690269', 'Truong Q. Nguyen', 'truong q. nguyen')</td><td></td></tr><tr><td>248db911e3a6a63ecd5ff6b7397a5d48ac15e77a</td><td>Enriching Texture Analysis with Semantic Data
+<br/>Communications, Signal Processing and Control Group
+<br/>School of Electronics and Computer Science
+<br/><b>University of Southampton</b></td><td>('28637223', 'Tim Matthews', 'tim matthews')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('1697360', 'Mahesan Niranjan', 'mahesan niranjan')</td><td>{tm1e10,msn,mn}@soton.ac.uk
+</td></tr><tr><td>24d376e4d580fb28fd66bc5e7681f1a8db3b6b78</td><td></td><td></td><td></td></tr><tr><td>24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39</td><td>Recognition of Action Units in the Wild
+<br/>with Deep Nets and a New Global-Local Loss
+<br/>C. Fabian Benitez-Quiroz
+<br/>Aleix M. Martinez
+<br/>Dept. Electrical and Computer Engineering
+<br/><b>The Ohio State University</b></td><td>('1678691', 'Yan Wang', 'yan wang')</td><td>{benitez-quiroz.1,wang.9021,martinez.158}@osu.edu
+</td></tr><tr><td>243e9d490fe98d139003bb8dc95683b366866c57</td><td>Distinctive Parts for Relative attributes
+<br/>Thesis submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>Master of science( by research)
+<br/>in
+<br/>Computer Science Engineering
+<br/>by
+<br/>Ramachandruni Naga Sandeep
+<br/>201207582
+<br/>Center for Visual Information Technology
+<br/><b>International Institute of Information Technology</b><br/>Hyderabad - 500 032, INDIA
+<br/>December 2014
+</td><td></td><td>nsandeep.ramachandruni@research.iiit.ac.in
+</td></tr><tr><td>2465fc22e03faf030e5a319479a95ef1dfc46e14</td><td>______________________________________________________PROCEEDING OF THE 20TH CONFERENCE OF FRUCT ASSOCIATION
+<br/>Influence of Different Feature Selection Approaches
+<br/>on the Performance of Emotion Recognition
+<br/>Methods Based on SVM
+<br/><b>Ural Federal University (UrFU</b><br/>Yekaterinburg, Russia
+</td><td>('11063038', 'Daniil Belkov', 'daniil belkov')<br/>('3457868', 'Konstantin Purtov', 'konstantin purtov')</td><td>d.d.belkov, k.s.purtov@gmail.com, kublanov@mail.ru
+</td></tr><tr><td>24ff832171cb774087a614152c21f54589bf7523</td><td>Beat-Event Detection in Action Movie Franchises
+<br/>Jerome Revaud
+<br/>Zaid Harchaoui
+</td><td>('2319574', 'Danila Potapov', 'danila potapov')<br/>('3271933', 'Matthijs Douze', 'matthijs douze')<br/>('2462253', 'Cordelia Schmid', 'cordelia schmid')</td><td></td></tr><tr><td>247a6b0e97b9447850780fe8dbc4f94252251133</td><td>Facial Action Unit Detection: 3D versus 2D Modality
+<br/>Electrical and Electronics Engineering
+<br/><b>Bo gazic i University, Istanbul, Turkey</b><br/>B¨ulent Sankur
+<br/>Electrical and Electronics Engineering
+<br/><b>Bo gazic i University, Istanbul, Turkey</b><br/>Department of Psychology
+<br/><b>Bo gazic i University, Istanbul, Turkey</b></td><td>('1839621', 'Arman Savran', 'arman savran')<br/>('27414819', 'M. Taha Bilge', 'm. taha bilge')</td><td>arman.savran@boun.edu.tr
+<br/>bulent.sankur@boun.edu.tr
+<br/>taha.bilge@boun.edu.tr
+</td></tr><tr><td>24bf94f8090daf9bda56d54e42009067839b20df</td><td></td><td></td><td></td></tr><tr><td>240eb0b34872c431ecf9df504671281f59e7da37</td><td>Cutout-Search: Putting a Name to the Picture
+<br/><b>Carnegie Mellon University</b><br/><b>Cornell University</b></td><td>('1746610', 'Dhruv Batra', 'dhruv batra')<br/>('2371390', 'Adarsh Kowdle', 'adarsh kowdle')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')<br/>('1713589', 'Devi Parikh', 'devi parikh')</td><td>batradhruv@cmu.edu
+<br/>apk64@cornell.edu dparikh@cmu.edu tsuhan@ece.cornell.edu
+</td></tr><tr><td>230527d37421c28b7387c54e203deda64564e1b7</td><td>Person Re-identification: System Design and
+<br/>Evaluation Overview
+</td><td>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('40156369', 'Rui Zhao', 'rui zhao')</td><td></td></tr><tr><td>23fdbef123bcda0f07d940c72f3b15704fd49a98</td><td></td><td></td><td></td></tr><tr><td>23ebbbba11c6ca785b0589543bf5675883283a57</td><td></td><td></td><td></td></tr><tr><td>23aef683f60cb8af239b0906c45d11dac352fb4e</td><td>Incorporating Context Information into Deep
+<br/>Neural Network Acoustic Models
+<br/>July 2016
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/><b>Florian Metze, Chair (Carnegie Mellon University</b><br/><b>Alan W Black (Carnegie Mellon University</b><br/><b>Alex Waibel (Carnegie Mellon University</b><br/>Jinyu Li (Microsoft)
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Doctor of Philosophy.
+</td><td>('37467623', 'Yajie Miao', 'yajie miao')<br/>('37467623', 'Yajie Miao', 'yajie miao')</td><td></td></tr><tr><td>235d5620d05bb7710f5c4fa6fceead0eb670dec5</td><td>Who’s Doing What: Joint Modeling of Names and
+<br/>Verbs for Simultaneous Face and Pose Annotation
+<br/>Luo Jie
+<br/>Idiap and EPF Lausanne
+<br/><b>Idiap Research Institute</b><br/>ETH Zurich
+</td><td>('3033284', 'Barbara Caputo', 'barbara caputo')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td>jluo@idiap.ch
+<br/>bcaputo@idiap.ch
+<br/>ferrari@vision.ee.ethz.ch
+</td></tr><tr><td>23ce6f404c504592767b8bec7d844d87b462de71</td><td>A Deep Face Identification Network Enhanced by Facial Attributes Prediction
+<br/><b>West Virginia University</b></td><td>('34708406', 'Fariborz Taherkhani', 'fariborz taherkhani')<br/>('8147588', 'Nasser M. Nasrabadi', 'nasser m. nasrabadi')</td><td>ft0009@mix.wvu.edu, nasser.nasrabadi@mail.wvu.edu, Jeremy.Dawson@mail.wvu.edu
+</td></tr><tr><td>23fd653b094c7e4591a95506416a72aeb50a32b5</td><td>Emotion Recognition using Fuzzy Rule-based System
+<br/>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 93 – No.11, May 2014
+<br/>Department of Computer Science
+<br/><b>Amity University, Lucknow, India</b><br/>Faculty in Department Of Computer Science
+<br/><b>Amity University, Lucknow, India</b><br/>
+<br/>
+</td><td>('14559473', 'Akanksha Chaturvedi', 'akanksha chaturvedi')</td><td></td></tr><tr><td>23172f9a397f13ae1ecb5793efd81b6aba9b4537</td><td>Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 10–17,
+<br/>Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics.
+<br/>10
+</td><td></td><td></td></tr><tr><td>231a6d2ee1cc76f7e0c5912a530912f766e0b459</td><td>Shape Primitive Histogram: A Novel Low-Level Face Representation for Face
+<br/>Recognition
+<br/><b>aCollege of Computer Science at Chongqing University, 400044, Chongqing, P.R.C</b><br/>bSchool of Software Engineering at Chongqing Univeristy,400044,Chongqing,P.R.C
+<br/><b>cSchool of Astronautics at Beihang University, 100191, Beijing, P.R.C</b><br/>dState Key Laboratory of Management and Control for Complex Systems
+<br/><b>Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C</b><br/>eMinistry of Education Key Laboratory of Dependable Service Computing in Cyber Physical Society, 400044, Chongqing, P.R.C
+</td><td>('1786011', 'Sheng Huang', 'sheng huang')<br/>('1698431', 'Dan Yang', 'dan yang')<br/>('1737368', 'Haopeng Zhang', 'haopeng zhang')</td><td></td></tr><tr><td>236a4f38f79a4dcc2183e99b568f472cf45d27f4</td><td>1632
+<br/>Randomized Clustering Forests
+<br/>for Image Classification
+<br/>Frederic Jurie, Member, IEEE Computer Society
+</td><td>('3128253', 'Frank Moosmann', 'frank moosmann')<br/>('1975110', 'Eric Nowak', 'eric nowak')</td><td></td></tr><tr><td>230c4a30f439700355b268e5f57d15851bcbf41f</td><td>EM Algorithms for Weighted-Data Clustering
+<br/>with Application to Audio-Visual Scene Analysis
+</td><td>('1780201', 'Xavier Alameda-Pineda', 'xavier alameda-pineda')<br/>('1785817', 'Florence Forbes', 'florence forbes')<br/>('1794229', 'Radu Horaud', 'radu horaud')</td><td></td></tr><tr><td>237fa91c8e8098a0d44f32ce259ff0487aec02cf</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+<br/>863
+<br/>Bidirectional PCA With Assembled Matrix
+<br/>Distance Metric for Image Recognition
+</td><td>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('1711542', 'Kuanquan Wang', 'kuanquan wang')</td><td></td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA 15213
+<br/>http://www.cs.cmu.edu/~face
+<br/>Department of Psychology
+<br/><b>University of Pittsburgh</b><br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>4015 O'Hara Street
+<br/>Pittsburgh, PA, USA 15260
+<br/>Yingli Tian
+<br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA 15213
+<br/>Proceedings of the Fourth IEEE International Conference on Automatic Face and Gesture Recognition
+<br/>(FG'00), pp. 484-490, Grenoble, France.
+<br/>Comprehensive Database for Facial Expression Analysis
+</td><td>('1733113', 'Takeo Kanade', 'takeo kanade')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td>tk@cs.cmu.edu
+<br/>yltian@cs.cmu.edu
+<br/>jeffcohn+@pitt.edu
+</td></tr><tr><td>2331df8ca9f29320dd3a33ce68a539953fa87ff5</td><td>Extended Isomap for Pattern Classification
+<br/><b>Honda Fundamental Research Labs</b><br/>Mountain View, CA 94041
+</td><td>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td>myang@hra.com
+</td></tr><tr><td>232b6e2391c064d483546b9ee3aafe0ba48ca519</td><td>Optimization problems for fast AAM fitting in-the-wild
+<br/>1. School of Computer Science
+<br/><b>University of Lincoln, U.K</b><br/>2. Department of Computing
+<br/><b>Imperial College London, U.K</b></td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')</td><td>gtzimiropoulos@lincoln.ac.uk
+</td></tr><tr><td>23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3</td><td>CS 229 Project, Fall 2014
+<br/>Determining Mood from Facial Expressions
+<br/>Introduction
+<br/>I
+<br/>Facial expressions play an extremely important role in human communication. As
+<br/>society continues to make greater use of human-machine interactions, it is important for
+<br/>machines to be able to interpret facial expressions in order to improve their
+<br/>authenticity. If machines can be trained to determine mood to a better extent than
+<br/>humans can, especially for more subtle moods, then this could be useful in fields such as
+<br/>counseling. This could also be useful for gauging reactions of large audiences in various
+<br/>contexts, such as political talks.
+<br/>The results of this project could also be applied to recognizing other features of facial
+<br/>expressions, such as determining when people are purposefully suppressing emotions or
+<br/>lying. The ability to recognize different facial expressions could also improve technology
+<br/>that recognizes to whom specific faces belong. This could in turn be used to search a
+<br/>large number of pictures for a specific photo, which is becoming increasingly difficult, as
+<br/>storing photos digitally has been extremely common in the past decade. The possibilities
+<br/>are endless.
+<br/>II Data and Features
+<br/>2.1 Data
+<br/>Our data consists of 1166 frontal images of
+<br/>people’s faces from three databases, with each
+<br/>image labeled with one of eight emotions:
+<br/>anger, contempt, disgust, fear, happiness,
+<br/>neutral, sadness, and surprise. The TFEID [1],
+<br/>CK+ [2], and JAFFE [3] databases primarily
+<br/>consist of Taiwanese, Caucasian, and Japanese
+<br/>subjects, respectively. The TFEID and JAFFE
+<br/>images are both cropped with the faces
+<br/>centered. Each image has a subject posing with
+<br/>one of the emotions. The JAFFE database does
+<br/>not have any images for contempt.
+<br/>2.2 Features
+<br/>On each face, there are many different facial landmarks. While some of these landmarks
+<br/>(pupil position, nose tip, and face contour) are not as indicative of emotion, others
+<br/>(eyebrow, mouth, and eye shape) are. To extract landmark data from images, we used
+<br/>Happiness
+<br/>Figure 1
+<br/>Anger
+</td><td>('34482382', 'Matthew Wang', 'matthew wang')</td><td>mmwang@stanford.edu
+<br/>spencery@stanford.edu
+</td></tr><tr><td>237eba4822744a9eabb121fe7b50fd2057bf744c</td><td>Facial Expression Synthesis Using PAD Emotional
+<br/>Parameters for a Chinese Expressive Avatar
+<br/>1 Department of Computer Science and Technology
+<br/><b>Tsinghua University, 100084 Beijing, China</b><br/>2 Department of Systems Engineering and Engineering Management
+<br/><b>The Chinese University of Hong Kong, HKSAR, China</b></td><td>('2180849', 'Shen Zhang', 'shen zhang')<br/>('3860920', 'Zhiyong Wu', 'zhiyong wu')<br/>('1702243', 'Helen M. Meng', 'helen m. meng')<br/>('7239047', 'Lianhong Cai', 'lianhong cai')</td><td>zhangshen05@mails.tsinghua.edu.cn, john.zy.wu@gmail.com
+<br/>hmmeng@se.cuhk.edu.hk, clh-dcs@tsinghua.edu.cn
+</td></tr><tr><td>238fc68b2e0ef9f5ec043d081451902573992a03</td><td>2656
+<br/>Enhanced Local Gradient Order Features and
+<br/>Discriminant Analysis for Face Recognition
+<br/>role in robust face recognition [5]. Many algorithms have
+<br/>been proposed to deal with the effectiveness of feature design
+<br/>and extraction [6], [7]; however, the performance of many
+<br/>existing methods is still highly sensitive to variations of
+<br/>imaging conditions, such as outdoor illumination, exaggerated
+<br/>expression, and continuous occlusion. These complex varia-
+<br/>tions are significantly affecting the recognition accuracy in
+<br/>recent years [8]–[10].
+<br/>Appearance-based subspace learning is one of the sim-
+<br/>plest approach for feature extraction, and many methods
+<br/>are usually based on linear correlation of pixel intensities.
+<br/>For example, Eigenface [11] uses eigen system of pixel
+<br/>intensities to estimate the lower rank linear subspace of
+<br/>a set of training face images by minimizing the (cid:2)2 dis-
+<br/>tance metric. The solution enjoys optimality properties when
+<br/>noise is independent
+<br/>identically distributed Gaussian only.
+<br/>Fisherface [12] will suffer more due to the estimation of
+<br/>inverse within-class covariance matrix [13],
+<br/>thus the per-
+<br/>formance will degenerate rapidly in the cases of occlusion
+<br/>and small sample size. Laplacianfaces [14] refer to another
+<br/>appearance-based approach which learns a locality preserv-
+<br/>ing subspace and seeks to capture the intrinsic geometry
+<br/>and local structure of the data. Other methods such as those
+<br/>in [5] and [15] also provide valuable approaches to supervised
+<br/>or unsupervised dimension reduction tasks.
+<br/>A fundamental problem of appearance-based methods for
+<br/>face recognition, however, is that they are sensitive to imag-
+<br/>ing conditions [10]. As for data corrupted by illumination
+<br/>changes, occlusions, and inaccurate alignment, the estimated
+<br/>subspace will be biased, thus much of the efforts concentrate
+<br/>on removing/shrinking the noise components. In contrast, local
+<br/>feature descriptors [15]–[19] have certain advantages as they
+<br/>are more stable to local changes. In the view of image pro-
+<br/>cessing and vision, the basic imaging system can be simply
+<br/>formulated as
+<br/>(x, y) = A(x, y) × L(x, y)
+<br/>(1)
+</td><td>('1688667', 'Chuan-Xian Ren', 'chuan-xian ren')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('1726138', 'Dao-Qing Dai', 'dao-qing dai')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td></td></tr><tr><td>2322ec2f3571e0ddc593c4e2237a6a794c61251d</td><td>Jack, R. E. , Sun, W., Delis, I., Garrod, O. G. B. and Schyns, P. G. (2016)
+<br/>Four not six: revealing culturally common facial expressions of
+<br/>emotion.Journal of Experimental Psychology: General, 145(6), pp. 708-
+<br/>730. (doi:10.1037/xge0000162)
+<br/>This is the author’s final accepted version.
+<br/>There may be differences between this version and the published version.
+<br/>You are advised to consult the publisher’s version if you wish to cite from
+<br/>it.
+<br/>http://eprints.gla.ac.uk/116592/
+<br/>
+<br/>Deposited on: 20 April 2016
+<br/><b>Enlighten Research publications by members of the University of Glasgow</b><br/>http://eprints.gla.ac.uk
+</td><td></td><td></td></tr><tr><td>23e75f5ce7e73714b63f036d6247fa0172d97cb6</td><td>BioMed Central
+<br/>Research
+<br/>Facial expression (mood) recognition from facial images using
+<br/>committee neural networks
+<br/>Open Access
+<br/><b>University of Akron, Akron</b><br/><b>Engineering, University of Akron, Akron, OH 44325-3904, USA</b><br/>* Corresponding author
+<br/>Published: 5 August 2009
+<br/>doi:10.1186/1475-925X-8-16
+<br/>Received: 24 September 2008
+<br/>Accepted: 5 August 2009
+<br/>This article is available from: http://www.biomedical-engineering-online.com/content/8/1/16
+<br/>© 2009 Kulkarni et al; licensee BioMed Central Ltd.
+<br/>This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/2.0),
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+</td><td>('39890387', 'Saket S Kulkarni', 'saket s kulkarni')<br/>('2484370', 'Narender P Reddy', 'narender p reddy')<br/>('32173165', 'SI Hariharan', 'si hariharan')</td><td>Email: Saket S Kulkarni - saketkulkarni@gmail.com; Narender P Reddy* - npreddy@uakron.edu; SI Hariharan - hari@uakron.edu
+</td></tr><tr><td>23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f</td><td>A Domain Based Approach to Social Relation Recognition
+<br/><b>Max Planck Institute for Informatics, Saarland Informatics Campus</b><br/>Figure 1: We investigate the recognition of social relations in a domain-based approach. Our study is based on Bugental’s
+<br/>social psychology theory [1] that partitions social life into 5 domains from which we derive 16 social relations.
+</td><td>('32222907', 'Qianru Sun', 'qianru sun')<br/>('1697100', 'Bernt Schiele', 'bernt schiele')<br/>('1739548', 'Mario Fritz', 'mario fritz')</td><td>{qsun, schiele, mfritz}@mpi-inf.mpg.de
+</td></tr><tr><td>23aba7b878544004b5dfa64f649697d9f082b0cf</td><td>Locality-Constrained Discriminative Learning and Coding
+<br/>1Department of Electrical & Computer Engineering,
+<br/><b>College of Computer and Information Science</b><br/><b>Northeastern University, Boston, MA, USA</b></td><td>('7489165', 'Shuyang Wang', 'shuyang wang')<br/>('37771688', 'Yun Fu', 'yun fu')</td><td>{shuyangwang, yunfu}@ece.neu.edu
+</td></tr><tr><td>23120f9b39e59bbac4438bf4a8a7889431ae8adb</td><td>Aalborg Universitet
+<br/>Improved RGB-D-T based Face Recognition
+<br/>Nikisins, Olegs; Sun, Yunlian; Li, Haiqing; Sun, Zhenan; Moeslund, Thomas B.; Greitans,
+<br/>Modris
+<br/>Published in:
+<br/>DOI (link to publication from Publisher):
+<br/>10.1049/iet-bmt.2015.0057
+<br/>Publication date:
+<br/>2016
+<br/>Document Version
+<br/>Accepted manuscript, peer reviewed version
+<br/><b>Link to publication from Aalborg University</b><br/>Citation for published version (APA):
+<br/>Oliu Simon, M., Corneanu, C., Nasrollahi, K., Guerrero, S. E., Nikisins, O., Sun, Y., ... Greitans, M. (2016).
+<br/>General rights
+<br/>Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+<br/>and it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+<br/> ? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+<br/> ? You may not further distribute the material or use it for any profit-making activity or commercial gain
+<br/> ? You may freely distribute the URL identifying the publication in the public portal ?
+<br/>Take down policy
+<br/>the work immediately and investigate your claim.
+<br/>Downloaded from vbn.aau.dk on: October 11, 2016
+<br/> </td><td>('7855312', 'Sergio Escalera', 'sergio escalera')</td><td>If you believe that this document breaches copyright please contact us at vbn@aub.aau.dk providing details, and we will remove access to
+</td></tr><tr><td>2303d07d839e8b20f33d6e2ec78d1353cac256cf</td><td>Squeeze-and-Excitation on Spatial and Temporal
+<br/>Deep Feature Space for Action Recognition
+<br/><b>Institute of Information Science, Beijing Jiaotong University, Beijing 100044, China</b><br/>Beijing Key Laboratory of Advanced Information Science and Network Technology, Beijing 100044, China
+</td><td>('2896701', 'Gaoyun An', 'gaoyun an')<br/>('3027947', 'Wen Zhou', 'wen zhou')<br/>('47095962', 'Yuxuan Wu', 'yuxuan wu')<br/>('4464686', 'ZhenXing Zheng', 'zhenxing zheng')<br/>('46398737', 'Yongwen Liu', 'yongwen liu')</td><td>Email:{gyan, 16125155, 16120307, zhxzheng, 17120314}@bjtu.edu.cn
+</td></tr><tr><td>23d55061f7baf2ffa1c847d356d8f76d78ebc8c1</td><td>Solmaz et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:22
+<br/>DOI 10.1186/s41074-017-0033-4
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>RESEARCH PAPER
+<br/>Open Access
+<br/>Generic and attribute-specific deep
+<br/>representations for maritime vessels
+</td><td>('2827750', 'Berkan Solmaz', 'berkan solmaz')<br/>('2131286', 'Erhan Gundogdu', 'erhan gundogdu')<br/>('32499620', 'Aykut Koc', 'aykut koc')</td><td></td></tr><tr><td>23c3eb6ad8e5f18f672f187a6e9e9b0d94042970</td><td>Deep Domain Adaptation for Describing People Based on
+<br/>Fine-Grained Clothing Attributes
+<br/><b>IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore</b><br/>Source domain
+<br/>RCNN
+<br/>body
+<br/>detection
+<br/>Alignment
+<br/>cost layer
+<br/>Multi-label
+<br/>attributes
+<br/>objective
+<br/>Target domain
+<br/>Alignment cost layer
+<br/>Extra Info
+<br/>(e.g. Labels)
+<br/>Figure 1: Our proposed Deep Domain Adaptation Network (DDAN).
+<br/>Source and target domains are modeled jointly with knowledge transfer oc-
+<br/>curring at multiple levels of the hierarchy through alignment cost layers.
+<br/>Describing people in detail is an important task for many applications.
+<br/>For instance, criminal investigation processes often involve searching for
+<br/>suspects based on detailed descriptions provided by eyewitnesses or com-
+<br/>piled from images captured by surveillance cameras. The FBI list of na-
+<br/>tionwide wanted bank robbers (https://bankrobbers.fbi.gov/) has clear exam-
+<br/><b>ples of such ne-grained descriptions, including attributes covering detailed</b><br/>color information (e.g., “light blue” “khaki”, “burgundy”), a variety of cloth-
+<br/>ing types (e.g., ‘leather jacket”, “polo-style shirt”, “zip-up windbreaker”)
+<br/>and also detailed clothing patterns (e.g., “narrow horizontal stripes”, “LA
+<br/>printed text”, “checkered”).
+<br/>Traditional computer vision methods for describing people, however,
+<br/>have only focused on a small set of coarse-grained attributes. As an exam-
+<br/>ple, the recent work of Zhang et al. [7] achieves impressive attribute predic-
+<br/>tion performance in unconstrained scenarios, but only considers nine human
+<br/>attributes. Existing systems for fashion analysis [1, 4, 6] and people search
+<br/>in surveillance videos [2, 5] also rely on a relatively small set of clothing
+<br/>attributes. Our work instead addresses the problem of describing people
+<br/>with very fine-grained clothing attributes. A natural question that arises in
+<br/>this setting is how to obtain a sufficient number of training samples for each
+<br/>attribute without significant annotation cost.
+<br/>Data collection: We observe that online shopping stores such as Ama-
+<br/>zon.com and TMALL.com have a large set of garment images with associ-
+<br/>ated descriptions. We created a huge dataset of clothing images with fine-
+<br/>grained attribute labels by crawling data from these shopping websites. Our
+<br/>dataset contains 1,108,013 clothing images with 25 different kinds attribute
+<br/>categories (e.g.
+<br/>type, color, pattern, season, occasion). The attribute la-
+<br/>bels are very fine-detailed. For instance, we can find thousands of different
+<br/>values for the “color” category. After data curation, we considered a subset
+<br/>of this data that is meaningful from our application perspective.
+<br/>Deep Domain Adaptation: Although we have collected a large-scale
+<br/>dataset with fine-grained attributes, these images are taken in ideal pose /
+<br/>lighting / background conditions, so it is unreliable to directly use them as
+<br/>training data for attribute prediction in the domain of unconstrained images
+<br/>captured, for example, by mobile phones or surveillance cameras. In or-
+<br/>der to bridge this gap, we design a specific double-path deep convolutional
+<br/>neural network for the domain adaptation problem. Each path receives one
+<br/>domain image as the input, i.e., the street domain and the shop domain im-
+<br/>ages. Each path consists of several convolutional layers which are stacked
+<br/>layer-by-layer and normally higher layers represent higher-level concept ab-
+<br/>stractions. Both of the two network paths share the same architecture, e.g.,
+<br/>the same number of convolutional filters and number of middle layers. This
+</td><td>('35370244', 'Qiang Chen', 'qiang chen')<br/>('1753492', 'Junshi Huang', 'junshi huang')<br/>('2106286', 'Jian Dong', 'jian dong')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td></td></tr><tr><td>23dd8d17ce09c22d367e4d62c1ccf507bcbc64da</td><td>Deep Density Clustering of Unconstrained Faces
+<br/>(Supplementary Material)
+<br/><b>University of Maryland, College Park</b><br/>A. Mathematical Details
+<br/>Let S = {i | 0 < αi < C}. We have the following results:
+<br/>nV(cid:88)
+<br/>nV(cid:88)
+<br/>i=1
+<br/>c∗ =
+<br/>w∗ =
+<br/>αiΨθ(xi),
+<br/>¯R∗ = (cid:107)Ψθ(xs) − c∗(cid:107)2 ,
+<br/>αiΨθ(xi),
+<br/>ρ∗ = w∗T Ψθ(xs),
+<br/>where s ∈ S. Substituting into (3) and (4), we obtain
+<br/>hSVDD(x) = 2 · hOC-SVM(x) = 2
+<br/>αiK(xi, x) − ρ∗
+<br/>(cid:34) nV(cid:88)
+<br/>i=1
+<br/>(1)
+<br/>(2)
+<br/>(5)
+<br/>(6)
+<br/>(cid:35)
+<br/>(7)
+<br/>A.2. Proof of Theorem 1
+<br/>Theorem 1. If 1/nV < ν ≤ 1 and c∗T Ψθ(xs) (cid:54)= 0 for
+<br/>some support vector xs, hSVDD(x) defined in (3) is asymp-
+<br/>totically a Parzen window density estimator in the feature
+<br/>space with Epanechnikov kernel.
+<br/>Proof. Given the condition, according to Lemma 1,
+<br/>hSVDD(x) is equivalent to hOC-SVM(x) with ρ∗ (cid:54)= 0. From
+<br/>the results in [10] and the fact that(cid:80) αi = 1, we obtain:
+<br/>(cid:21)
+<br/>(cid:20)
+<br/>hOC-SVM(x) =
+<br/>αi
+<br/>1 − 1
+<br/>(cid:107)Ψθ(x) − Ψθ(xi)(cid:107)2
+<br/>(cid:18)(cid:107)Ψθ(x) − Ψθ(xi)(cid:107)
+<br/>(cid:19)
+<br/>− ρ∗
+<br/>− ρ∗ − 1,
+<br/>αiKE
+<br/>nV(cid:88)
+<br/>nV(cid:88)
+<br/>i=1
+<br/>i=1
+<br/>4 (1 − u2), |u| ≤ 1 is the Epanechnikov
+<br/>where KE(u) = 3
+<br/>kernel. As a consequence of Proposition 4 in [10] and the
+<br/>proof of Proposition 1 in [11], as nV → ∞, the fraction
+<br/>of support vector is ν, and the fraction of points with 0 <
+<br/>αi < 1/(ν · nV ) vanishes. Therefore, either αi = 0 or
+<br/>αi = 1/(ν · nV ). We introduce the notation ¯S = {i | αi =
+<br/>ξ(z)
+<br/>i=1
+<br/>In this section, we first provide the two core mathe-
+<br/>matical formulations and then present detailed proofs for
+<br/>Lemma 1 and Theorem 1.
+<br/>SVDD formulation:
+<br/>(cid:88)
+<br/>z∈V (x)
+<br/>¯R +
+<br/>ν · nV
+<br/>min
+<br/>c, ¯R, ξ
+<br/>s.t.
+<br/>(cid:107)Ψθ(z) − c(cid:107)2 ≤ ¯R + ξ(z),
+<br/>ξ ≥ 0, ∀z ∈ V (x),
+<br/>OC-SVM formulation:
+<br/>(cid:88)
+<br/>min
+<br/>w, ρ, ξ
+<br/>s.t.
+<br/>(cid:107)w(cid:107)2 +
+<br/>ν · nV
+<br/>wT Ψθ(z) ≥ ρ − ξz,
+<br/>z∈V (x)
+<br/>ξz − ρ
+<br/>ξz ≥ 0, ∀z ∈ V (x).
+<br/>A.1. Proof of Lemma 1
+<br/>Lemma 1. If 1/nV < ν ≤ 1, the SVDD formulation in (1)
+<br/>is equivalent to the OC-SVM formulation in (2) when the
+<br/>evaluation functions for the two are given by
+<br/>hSVDD(x) = ¯R∗ − (cid:107)Ψθ(x) − c∗(cid:107)2 ,
+<br/>hOC-SVM(x) = w∗T Ψθ(x) − ρ∗,
+<br/>(3)
+<br/>(4)
+<br/>with the correspondence w∗ = c∗, and ρ∗ = c∗T Ψθ(xs),
+<br/>where xs is a support vector in (1) that lies on the learned
+<br/>enclosing sphere.
+<br/>Proof. The condition corresponds to the case 1/nV ≤ C <
+<br/>1 in [1] with C = 1/(ν · nV ). We introduce the kernel
+<br/>function K(xi, xj) = Ψθ(xi)T Ψθ(xj). Since K(xi, xi)
+<br/>is constant in our setting, the same dual formulation for (1)
+<br/>and (2) can be written as:
+<br/>(cid:88)
+<br/>min
+<br/>αiαjK(xi, xj)
+<br/>s.t.
+<br/>0 ≤ αi ≤ C,
+<br/>ij
+<br/>i=1
+<br/>nV(cid:88)
+<br/>αi = 1.
+</td><td>('3329881', 'Wei-An Lin', 'wei-an lin')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')</td><td>walin@umd.edu pullpull@cs.umd.edu carlos@cs.umd.edu rama@umiacs.umd.edu
+</td></tr><tr><td>23a8d02389805854cf41c9e5fa56c66ee4160ce3</td><td>Multimed Tools Appl
+<br/>DOI 10.1007/s11042-013-1568-8
+<br/>Influence of low resolution of images on reliability
+<br/>of face detection and recognition
+<br/>© The Author(s) 2013. This article is published with open access at SpringerLink.com
+</td><td>('2553748', 'Tomasz Marciniak', 'tomasz marciniak')<br/>('2009993', 'Radoslaw Weychan', 'radoslaw weychan')<br/>('40397247', 'Adam Dabrowski', 'adam dabrowski')</td><td></td></tr><tr><td>23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/>A Monthly Journal of Computer Science and Information Technology
+<br/>ISSN 2320–088X
+<br/> IJCSMC, Vol. 2, Issue. 4, April 2013, pg.646 – 649
+<br/>RESEARCH ARTICLE
+<br/>Modified Approaches on Face Recognition
+<br/>By using Multisensory Image
+<br/><b>Bharath University, India</b><br/><b>Bharath University, India</b></td><td></td><td></td></tr><tr><td>4f9e00aaf2736b79e415f5e7c8dfebda3043a97d</td><td>Machine Audition:
+<br/>Principles, Algorithms
+<br/>and Systems
+<br/><b>University of Surrey, UK</b><br/>InformatIon scIence reference
+<br/>Hershey • New York
+</td><td>('46314841', 'WenWu Wang', 'wenwu wang')</td><td></td></tr><tr><td>4fd29e5f4b7186e349ba34ea30738af7860cf21f</td><td></td><td></td><td></td></tr><tr><td>4f0d9200647042e41dea71c35eb59e598e6018a7</td><td><b></b><br/>Experiments of Image Retrieval Using Weak Attributes
+<br/><b>Columbia University, New York, NY</b></td><td>('1815972', 'Felix X. Yu', 'felix x. yu')<br/>('1725599', 'Rongrong Ji', 'rongrong ji')<br/>('3138710', 'Ming-Hen Tsai', 'ming-hen tsai')<br/>('35984288', 'Guangnan Ye', 'guangnan ye')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>yfyuxinnan, rrji, yegng@ee.columbia.edu
+<br/>xfminghen, sfchangg@cs.columbia.edu
+</td></tr><tr><td>4f051022de100241e5a4ba8a7514db9167eabf6e</td><td>Face Parsing via a Fully-Convolutional Continuous
+<br/>CRF Neural Network
+</td><td>('48207414', 'Lei Zhou', 'lei zhou')<br/>('36300239', 'Zhi Liu', 'zhi liu')<br/>('1706670', 'Xiangjian He', 'xiangjian he')</td><td></td></tr><tr><td>4faded442b506ad0f200a608a69c039e92eaff11</td><td><b>STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY</b><br/>FACE RECOGNITION UNDER VARYING
+<br/>ILLUMINATION
+<br/>Master Thesis by
+<br/>Department : Computer Engineering
+<br/>Programme: Computer Engineering
+<br/>JUNE 2006
+</td><td>('1968256', 'Erald VUÇINI', 'erald vuçini')<br/>('1766445', 'Muhittin GÖKMEN', 'muhittin gökmen')</td><td></td></tr><tr><td>4f7967158b257e86d66bdabfdc556c697d917d24</td><td>Guaranteed Parameter Estimation of Discrete Energy
+<br/>Minimization for 3D Scene Parsing
+<br/>CMU-RI-TR-16-49
+<br/>July 2016
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>Thesis Committee:
+<br/>Daniel Huber, Advisor
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Master of Science in Robotics.
+</td><td>('3439037', 'Mengtian Li', 'mengtian li')<br/>('1691629', 'Alexander J. Smola', 'alexander j. smola')<br/>('1786435', 'David Fouhey', 'david fouhey')<br/>('3439037', 'Mengtian Li', 'mengtian li')</td><td></td></tr><tr><td>4fc936102e2b5247473ea2dd94c514e320375abb</td><td>Guess Where? Actor-Supervision for Spatiotemporal Action Localization
+<br/><b>KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc</b></td><td>('2795139', 'Victor Escorcia', 'victor escorcia')<br/>('3409955', 'Cuong D. Dao', 'cuong d. dao')<br/>('40027484', 'Mihir Jain', 'mihir jain')<br/>('2931652', 'Bernard Ghanem', 'bernard ghanem')<br/>('1706203', 'Cees Snoek', 'cees snoek')</td><td></td></tr><tr><td>4f6adc53798d9da26369bea5a0d91ed5e1314df2</td><td>IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. , NO. , 2016
+<br/>Online Nonnegative Matrix Factorization with
+<br/>General Divergences
+</td><td>('2345985', 'Renbo Zhao', 'renbo zhao')<br/>('1678675', 'Huan Xu', 'huan xu')</td><td></td></tr><tr><td>4fbef7ce1809d102215453c34bf22b5f9f9aab26</td><td></td><td></td><td></td></tr><tr><td>4fa0d73b8ba114578744c2ebaf610d2ca9694f45</td><td></td><td></td><td></td></tr><tr><td>4fcd19b0cc386215b8bd0c466e42934e5baaa4b7</td><td>Human Action Recognition using Factorized Spatio-Temporal
+<br/>Convolutional Networks
+<br/><b>Hong Kong University of Science and Technology</b><br/><b>Hong Kong University of Science and Technology</b><br/><b>cid:93) Faculty of Science and Technology, University of Macau</b><br/>§ Lenovo Corporate Research Hong Kong Branch
+</td><td>('1750501', 'Lin Sun', 'lin sun')<br/>('2370507', 'Kui Jia', 'kui jia')<br/>('1739816', 'Dit-Yan Yeung', 'dit-yan yeung')<br/>('2131088', 'Bertram E. Shi', 'bertram e. shi')</td><td>lsunece@ust.hk, kuijia@gmail.com, dyyeung@cse.ust.hk, eebert@ust.hk
+</td></tr><tr><td>4f591e243a8f38ee3152300bbf42899ac5aae0a5</td><td>SUBMITTED TO TPAMI
+<br/>Understanding Higher-Order Shape
+<br/>via 3D Shape Attributes
+</td><td>('1786435', 'David F. Fouhey', 'david f. fouhey')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>4f9958946ad9fc71c2299847e9ff16741401c591</td><td>Facial Expression Recognition with Recurrent Neural Networks
+<br/>Robotics and Embedded Systems Lab, Department of Computer Science
+<br/>Image Understanding and Knowledge-Based Systems, Department of Computer Science
+<br/>Technische Universit¨at M¨unchen, Germany
+</td><td>('1753223', 'Alex Graves', 'alex graves')<br/>('1685773', 'Christoph Mayer', 'christoph mayer')<br/>('32131501', 'Matthias Wimmer', 'matthias wimmer')<br/>('1699132', 'Bernd Radig', 'bernd radig')</td><td>[graves,juergen.schmidhuber]@in.tum.de
+<br/>[mayerc,wimmerm,radig]@informatik.tu-muenchen.de
+</td></tr><tr><td>4f773c8e7ca98ece9894ba3a22823127a70c6e6c</td><td>A Real-Time System for Head Tracking
+<br/>and Pose Estimation
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/>2 Electrical & Controls Integration Lab, General Motors R&D
+</td><td>('29915644', 'Zengyin Zhang', 'zengyin zhang')<br/>('2918263', 'Minyoung Kim', 'minyoung kim')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')<br/>('9399514', 'Wende Zhang', 'wende zhang')</td><td></td></tr><tr><td>4ff11512e4fde3d1a109546d9c61a963d4391add</td><td>Proceedings of the Twenty-Ninth International
+<br/>Florida Artificial Intelligence Research Society Conference
+<br/>Selecting Vantage Points for an Autonomous Quadcopter Videographer
+<br/>Google
+<br/>Mountain View, CA
+<br/>Gita Sukthankar
+<br/><b>University of Central Florida</b><br/>Orlando, FL
+<br/>Google
+<br/>Mountain View, CA
+</td><td>('3391381', 'Rey Coaguila', 'rey coaguila')<br/>('1694199', 'Rahul Sukthankar', 'rahul sukthankar')</td><td>reyc@google.com
+<br/>gitars@eecs.ucf.edu
+<br/>sukthankar@google.com
+</td></tr><tr><td>4f028efe6708fc252851eee4a14292b7ce79d378</td><td>An Integrated Shape and Intensity Coding Scheme for Face Recognition
+<br/>Department of Computer Science
+<br/><b>George Mason University</b><br/>Fairfax, VA 22030-4444
+</td><td>('39664966', 'Chengjun Liu', 'chengjun liu')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td>fcliu, wechslerg@cs.gmu.edu
+</td></tr><tr><td>4f0bf2508ae801aee082b37f684085adf0d06d23</td><td></td><td></td><td></td></tr><tr><td>4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac</td><td>Deep Convolutional Neural Networks and Support
+<br/>Vector Machines for Gender Recognition
+<br/><b>Institute of Arti cial Intelligence and Cognitive Engineering</b><br/>Faculty of Mathematics and Natural Sciences
+<br/><b>University of Groningen, The Netherlands</b></td><td>('3405120', 'Jos van de Wolfshaar', 'jos van de wolfshaar')</td><td></td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>Fashion Landmark Detection in the Wild
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced</b><br/>Technology, CAS, China
+</td><td>('3243969', 'Ziwei Liu', 'ziwei liu')<br/>('1979911', 'Sijie Yan', 'sijie yan')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{lz013,siyan,pluo,xtang}@ie.cuhk.edu.hk, xgwang@ee.cuhk.edu.hk
+</td></tr><tr><td>4f4f920eb43399d8d05b42808e45b56bdd36a929</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 123 – No.4, August 2015
+<br/>A Novel Method for 3D Image Segmentation with Fusion
+<br/>of Two Images using Color K-means Algorithm
+<br/>Neelam Kushwah
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>Priusha Narwariya
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>two
+</td><td></td><td></td></tr><tr><td>4f0b8f730273e9f11b2bfad2415485414b96299f</td><td>BDD100K: A Diverse Driving Video Database with
+<br/>Scalable Annotation Tooling
+<br/>1UC Berkeley
+<br/><b>Georgia Institute of Technology</b><br/><b>Peking University</b><br/>4Uber AI Labs
+</td><td>('1807197', 'Fisher Yu', 'fisher yu')<br/>('32324034', 'Fangchen Liu', 'fangchen liu')<br/>('8309711', 'Vashisht Madhavan', 'vashisht madhavan')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td></td></tr><tr><td>4f77a37753c03886ca9c9349723ec3bbfe4ee967</td><td>Localizing Facial Keypoints with Global Descriptor Search,
+<br/>Neighbour Alignment and Locally Linear Models
+<br/>1 ´Ecole Polytechnique de Montr´eal, Universit´e de Montr´eal
+<br/><b>University of Toronto and Recognyz Systems Technologies</b><br/>also focused on emotion recognition in the wild [9].
+</td><td>('1972076', 'Christopher Pal', 'christopher pal')<br/>('9422894', 'Sharon Moalem', 'sharon moalem')</td><td>md-kamrul.hasan@polymtl.ca, christohper.pal@polymtl.ca, sharon@recognyz.com
+</td></tr><tr><td>4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e</td><td>Deep Density Clustering of Unconstrained Faces
+<br/><b>University of Maryland, College Park</b></td><td>('3329881', 'Wei-An Lin', 'wei-an lin')<br/>('36407236', 'Jun-Cheng Chen', 'jun-cheng chen')</td><td>walin@umd.edu pullpull@cs.umd.edu carlos@cs.umd.edu rama@umiacs.umd.edu
+</td></tr><tr><td>4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a</td><td>DU,CHELLAPPA: VIDEO-BASED FACE RECOGNITION
+<br/>Video-Based Face Recognition Using the
+<br/>Intra/Extra-Personal Difference Dictionary
+<br/>Department of Electrical and Computer
+<br/>Engineering
+<br/><b>University of Maryland</b><br/><b>College Park, USA</b></td><td>('35554856', 'Ming Du', 'ming du')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>mingdu@umd.edu
+<br/>rama@umiacs.umd.edu
+</td></tr><tr><td>8d71872d5877c575a52f71ad445c7e5124a4b174</td><td></td><td></td><td></td></tr><tr><td>8de06a584955f04f399c10f09f2eed77722f6b1c</td><td>Author manuscript, published in "International Conference on Computer Vision Theory and Applications (VISAPP 2013) (2013)"
+</td><td></td><td></td></tr><tr><td>8d4f0517eae232913bf27f516101a75da3249d15</td><td>ARXIV SUBMISSION, MARCH 2018
+<br/>Event-based Dynamic Face Detection and
+<br/>Tracking Based on Activity
+</td><td>('2500521', 'Gregor Lenz', 'gregor lenz')<br/>('1773138', 'Sio-Hoi Ieng', 'sio-hoi ieng')<br/>('1750848', 'Ryad Benosman', 'ryad benosman')</td><td></td></tr><tr><td>8de2dbe2b03be8a99628ffa000ac78f8b66a1028</td><td>´Ecole Nationale Sup´erieure dInformatique et de Math´ematiques Appliqu´ees de Grenoble
+<br/>INP Grenoble – ENSIMAG
+<br/>UFR Informatique et Math´ematiques Appliqu´ees de Grenoble
+<br/>Rapport de stage de Master 2 et de projet de fin d’´etudes
+<br/>Effectu´e au sein de l’´equipe LEAR, I.N.R.I.A., Grenoble
+<br/>Action Recognition in Videos
+<br/>3e ann´ee ENSIMAG – Option I.I.I.
+<br/>M2R Informatique – sp´ecialit´e I.A.
+<br/>04 f´evrier 2008 – 04 juillet 2008
+<br/>LEAR,
+<br/>I.N.R.I.A., Grenoble
+<br/>655 avenue de l’Europe
+<br/>38 334 Montbonnot
+<br/>France
+<br/>Responsable de stage
+<br/>Mme. Cordelia Schmid
+<br/>Tuteur ´ecole
+<br/>Jury
+</td><td>('16585941', 'Gaidon Adrien', 'gaidon adrien')<br/>('31899928', 'M. Augustin Lux', 'm. augustin lux')<br/>('12844736', 'Roger Mohr', 'roger mohr')<br/>('40419740', 'M. James Crowley', 'm. james crowley')</td><td></td></tr><tr><td>8d3fbdb9783716c1832a0b7ab1da6390c2869c14</td><td>12
+<br/>Discriminant Subspace Analysis for Uncertain
+<br/>Situation in Facial Recognition
+<br/><b>School of Computing and Communications University of Technology, Sydney</b><br/>Australia
+<br/>1. Introduction
+<br/>Facial analysis and recognition have received substential attention from researchers in
+<br/>biometrics, pattern recognition, and computer vision communities. They have a large
+<br/>number of applications, such as security, communication, and entertainment. Although a
+<br/>great deal of efforts has been devoted to automated face recognition systems, it still remains
+<br/>a challenging uncertainty problem. This is because human facial appearance has potentially
+<br/>of very large intra-subject variations of head pose, illumination, facial expression, occlusion
+<br/>due to other objects or accessories, facial hair and aging. These misleading variations may
+<br/>cause classifiers to degrade generalization performance.
+<br/>It is important for face recognition systems to employ an effective feature extraction scheme
+<br/>to enhance separability between pattern classes which should maintain and enhance
+<br/>features of the input data that make distinct pattern classes separable (Jan, 2004). In general,
+<br/>there exist a number of different feature extraction methods. The most common feature
+<br/>extraction methods are subspace analysis methods such as principle component analysis
+<br/>(PCA) (Kirby & Sirovich, 1990) (Jolliffe, 1986) (Turk & Pentland, 1991b), kernel principle
+<br/>component analysis (KPCA) (Schölkopf et al., 1998) (Kim et al., 2002) (all of which extract
+<br/>the most informative features and reduce the feature dimensionality), Fisher’s linear
+<br/>discriminant analysis (FLD) (Duda et al., 2000) (Belhumeur et al., 1997), and kernel Fisher’s
+<br/>discriminant analysis (KFLD) (Mika et al., 1999) (Scholkopf & Smola, 2002) (which
+<br/>discriminate different patterns; that is, they minimize the intra-class pattern compactness
+<br/>while enhancing the extra-class separability). The discriminant analysis is necessary because
+<br/>the patterns may overlap in decision space.
+<br/>Recently, Lu et al. (Lu et al., 2003) stated that PCA and LDA are the most widely used
+<br/>conventional tools for dimensionality reduction and feature extraction in the appearance-
+<br/>based face recognition. However, because facial features are naturally non-linear and the
+<br/>inherent linear nature of PCA and LDA, there are some limitations when applying these
+<br/>methods to the facial data distribution (Bichsel & Pentland, 1994) (Lu et al., 2003). To
+<br/>overcome such problems, nonlinear methods can be applied to better construct the most
+<br/>discriminative subspace.
+<br/>In real world applications, overlapping classes and various environmental variations can
+<br/>significantly impact face recognition accuracy and robustness. Such misleading information
+<br/>make Machine Learning difficult in modelling facial data. According to Adini et al. (Adini et
+<br/>al., 1997), it is desirable to have a recognition system which is able to recognize a face
+<br/>insensitive to these within-personal variations.
+</td><td>('3333820', 'Pohsiang Tsai', 'pohsiang tsai')<br/>('2184946', 'Tich Phuoc Tran', 'tich phuoc tran')<br/>('1801256', 'Tom Hintz', 'tom hintz')<br/>('2567343', 'Tony Jan', 'tony jan')</td><td></td></tr><tr><td>8d42a24d570ad8f1e869a665da855628fcb1378f</td><td>CVPR
+<br/>#987
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>An Empirical Study of Context in Object Detection
+<br/>Anonymous CVPR submission
+<br/>Paper ID 987
+</td><td></td><td></td></tr><tr><td>8d8461ed57b81e05cc46be8e83260cd68a2ebb4d</td><td>Age identification of Facial Images using Neural
+<br/>Network
+<br/>CSE Department,CSVTU
+<br/>RIT, Raipur, Chhattisgarh , INDIA
+</td><td>('7530203', 'Sneha Thakur', 'sneha thakur')</td><td></td></tr><tr><td>8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3</td><td>Cascaded Pose Regression
+<br/>Piotr Doll´ar
+<br/><b>California Institute of Technology</b></td><td>('2930640', 'Peter Welinder', 'peter welinder')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td>{pdollar,welinder,perona}@caltech.edu
+</td></tr><tr><td>8de6deefb90fb9b3f7d451b9d8a1a3264b768482</td><td>Multibiometric Systems: Fusion Strategies and
+<br/>Template Security
+<br/>By
+<br/>A Dissertation
+<br/>Submitted to
+<br/><b>Michigan State University</b><br/>in partial fulfillment of the requirements
+<br/>for the degree of
+<br/>Doctor of Philosophy
+<br/>Department of Computer Science and Engineering
+<br/>2008
+</td><td>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')</td><td></td></tr><tr><td>8d2c0c9155a1ed49ba576ac0446ec67725468d87</td><td>A Study of Two Image Representations for Head Pose Estimation
+<br/>Dept. of Computer Science and Technology,
+<br/><b>Tsinghua University, Beijing, China</b></td><td>('1968464', 'Ligeng Dong', 'ligeng dong')<br/>('3265275', 'Linmi Tao', 'linmi tao')<br/>('1797002', 'Guangyou Xu', 'guangyou xu')</td><td>dongligeng99@mails.thu.edu.cn,
+<br/>{linmi, xgy-dcs}@tsinghua.edu.cn
+</td></tr><tr><td>8d384e8c45a429f5c5f6628e8ba0d73c60a51a89</td><td>Temporal Dynamic Graph LSTM for Action-driven Video Object Detection
+<br/><b>The Hong Kong University of Science and Technology 2 Carneige Mellon University</b></td><td>('38937910', 'Yuan Yuan', 'yuan yuan')</td><td>yyuanad@ust.hk, xiaodan1@cs.cmu.edu, xiaolonw@cs.cmu.edu, dyyeung@cse.ust.hk, abhinavg@cs.cmu.edu
+</td></tr><tr><td>8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152</td><td>Development of Optical Computer Recognition (OCR) for Monitoring Stress and Emotions in Space
+<br/><b>Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ</b><br/><b>USA, 2Unit for Experimental Psychiatry, University of Pennsylvania School of Medicine</b><br/>Philadelphia, PA, USA
+<br/>INTRODUCTION. While in space, astronauts are required to perform mission-critical tasks on very expensive
+<br/>equipment at a high level of functional capability. Stressors can compromise their ability to do so, thus it is very
+<br/>important to have a system that can unobtrusively and objectively detect neurobehavioral problems involving
+<br/>elevated levels of behavioral stress and negative emotions. Computerized approaches involving inexpensive cameras
+<br/>offer an unobtrusive way to detect distress and to monitor observable emotions of astronauts during critical
+<br/>operations in space, by tracking and analyzing facial expressions and body gestures in video streams. Such systems
+<br/>can have applications beyond space flight, e.g., surveillance, law enforcement and human computer interaction.
+<br/>TECHNOLOGY DEVELOPMENT. We developed a framework [1-9] that is capable of real time tracking of faces
+<br/>and skin blobs of heads and hands. Face tracking uses a group of deformable statistical models of facial shape
+<br/>variation and local texture distribution to robustly track facial landmarks (e.g., eyes, eyebrows, nose, mouth). The
+<br/>model tolerates partial occlusions, it automatically detects and recovers from lost track, and it handles head rotations
+<br/>up to full profile view. The skin blob tracker is initialized with a generic skin color model, dynamically learning the
+<br/>specific color distribution online for adaptive tracking. Detected facial landmarks and blobs are filtered online, both
+<br/>in terms of shape and motion, using eigenspace analysis and temporal dynamical models to prune false detections.
+<br/>We then extract geometric and appearance features to learn models that detect relevant gestures and facial
+<br/>expressions. In particular, our method utilizes the relative intensity ordering of facial expressions (i.e., neutral, onset,
+<br/>apex, offset) found in the training set to learn a ranking model (Rankboost) for their recognition and intensity
+<br/>estimation, which improves our average recognition rate (~87.5% on CMU benchmark database [4,10]). In relation
+<br/>to stress detection, we piloted an experiment to learn subject-specific models of deception detection using behavioral
+<br/>cues to discriminate stressed and relaxed behaviors. We video recorded 147 subjects in 12-question interviews after
+<br/>a mock crime scenario, tracking their facial expressions and body gestures using our algorithm. Using leave-one-out
+<br/>cross validation we acquired separate Nearest Neighbor models per subject, discriminating deceptive from truthful
+<br/>responses with an average accuracy of 81.6% [7, 9]. We are currently experimenting with structured sparsity [14]
+<br/>and super-resolution [11-13] techniques to obtain better quality image features to improve tracking and recognition
+</td><td>('11788023', 'N. Michael', 'n. michael')<br/>('1748881', 'F. Yang', 'f. yang')<br/>('29384491', 'D. Metaxas', 'd. metaxas')</td><td></td></tr><tr><td>8d6c4af9d4c01ff47fe0be48155174158a9a5e08</td><td>Labeling, Discovering, and Detecting Objects in
+<br/>Images
+<br/>by
+<br/>Bryan Christopher Russell
+<br/>A.B., Computer Science
+<br/><b>Dartmouth College</b><br/>S.M., Electrical Engineering and Computer Science
+<br/><b>Massachusetts Institute of Technology</b><br/>Submitted to the Department of Electrical Engineering and Computer
+<br/>in partial fulfillment of the requirements for the degree of
+<br/>Doctor of Philosophy in Electrical Engineering and Computer Science
+<br/>Science
+<br/>at the
+<br/><b>MASSACHUSETTS INSTITUTE OF TECHNOLOGY</b><br/>February 2008
+<br/>c(cid:13) Bryan Christopher Russell, MMVIII. All rights reserved.
+<br/>The author hereby grants to MIT permission to reproduce and
+<br/>distribute publicly paper and electronic copies of this thesis document
+<br/>in whole or in part.
+<br/>Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Department of Electrical Engineering and Computer Science
+<br/>January 28, 2007
+<br/>Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>William T. Freeman
+<br/>Professor
+<br/>Thesis Supervisor
+<br/>Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Terry P. Orlando
+<br/>Chairman, Department Committee on Graduate Students
+</td><td></td><td></td></tr><tr><td>8dcc95debd07ebab1721c53fa50d846fef265022</td><td>MicroExpNet: An Extremely Small and Fast Model For Expression Recognition
+<br/>From Frontal Face Images
+<br/>˙Ilke C¸ u˘gu, Eren S¸ener, Emre Akbas¸
+<br/><b>Middle East Technical University</b><br/>06800 Ankara, Turkey
+</td><td></td><td>{cugu.ilke, sener.eren}@metu.edu.tr, emre@ceng.metu.edu.tr
+</td></tr><tr><td>8dbe79830713925affc48d0afa04ed567c54724b</td><td></td><td></td><td></td></tr><tr><td>8d1adf0ac74e901a94f05eca2f684528129a630a</td><td>Facial Expression Recognition Using Facial
+<br/>Movement Features
+</td><td></td><td></td></tr><tr><td>8d91f06af4ef65193f3943005922f25dbb483ee4</td><td>Facial Expression Classification Using Rotation
+<br/>Slepian-based Moment Invariants
+<br/><b>Faculty of Science and Technology, University of Macau</b><br/>Macao, China
+</td><td>('2888882', 'Cuiming Zou', 'cuiming zou')<br/>('3369665', 'Kit Ian Kou', 'kit ian kou')</td><td></td></tr><tr><td>8dc9de0c7324d098b537639c8214543f55392a6b</td><td>Pose-invariant 3d object recognition using linear combination of 2d views and
+<br/>evolutionary optimisation
+<br/>Department of Computer Science,
+<br/><b>University College London</b><br/>Malet Place, London, WC1E 6BT
+</td><td>('1797883', 'Vasileios Zografos', 'vasileios zografos')<br/>('31557997', 'Bernard F. Buxton', 'bernard f. buxton')</td><td>{v.zografos, b.buxton}@cs.ucl.ac.uk
+</td></tr><tr><td>8d712cef3a5a8a7b1619fb841a191bebc2a17f15</td><td></td><td></td><td></td></tr><tr><td>8d646ac6e5473398d668c1e35e3daa964d9eb0f6</td><td>MEMORY-EFFICIENT GLOBAL REFINEMENT OF DECISION-TREE ENSEMBLES AND
+<br/>ITS APPLICATION TO FACE ALIGNMENT
+<br/>Nenad Markuˇs†
+<br/>Ivan Gogi´c†
+<br/>Igor S. Pandˇzi´c†
+<br/>J¨orgen Ahlberg‡
+<br/><b>University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia</b><br/><b>Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden</b></td><td></td><td></td></tr><tr><td>8dffbb6d75877d7d9b4dcde7665888b5675deee1</td><td>Emotion Recognition with Deep-Belief
+<br/>Networks
+<br/>Introduction
+<br/>For our CS229 project, we studied the problem of
+<br/>reliable computerized emotion recognition in images of
+<br/>human
+<br/>faces. First, we performed a preliminary
+<br/>exploration using SVM classifiers, and then developed an
+<br/>approach based on Deep Belief Nets. Deep Belief Nets, or
+<br/>DBNs, are probabilistic generative models composed of
+<br/>multiple layers of stochastic latent variables, where each
+<br/>“building block” layer is a Restricted Boltzmann Machine
+<br/>(RBM). DBNs have a greedy layer-wise unsupervised
+<br/>learning algorithm as well as a discriminative fine-tuning
+<br/>procedure for optimizing performance on classification
+<br/>tasks. [1].
+<br/>We trained our classifier on three databases: the
+<br/>Cohn-Kanade Extended Database (CK+) [2], the Japanese
+<br/>Female Facial Expression Database (JAFFE) [3], and the
+<br/>Yale Face Database (YALE) [4]. We tested several
+<br/>different database configurations, image pre-processing
+<br/>settings, and DBN parameters, and obtained test errors as
+<br/>low as 20% on a limited subset of the emotion labels.
+<br/>Finally, we created a real-time system which takes
+<br/>images of a single subject using a computer webcam and
+<br/>classifies the emotion shown by the subject.
+<br/>Part 1: Exploration of SVM-based approaches
+<br/>To set a baseline for comparison, we applied an
+<br/>SVM classifier to the emotion images in the CK+
+<br/>database, using the LIBLINEAR library and its MATLAB
+<br/>interface [5]. This database contains 593 image sequences
+<br/>across 123 human subjects, beginning with a “neutral
+<br/>“expression and showing the progression to one of seven
+<br/>“peak” emotions. When given both a neutral and an
+<br/>expressive face to compare, the SVM obtained accuracy
+<br/>as high as 90%. This
+<br/>the
+<br/>implementation of the SVM classifier. For additional
+<br/>details on this stage of the project, please see our
+<br/>Milestone document.
+<br/>Part 1.1 Choice of labels (emotion numbers vs. FACS
+<br/>features)
+<br/>The CK+ database offers two sets of emotion
+<br/>features: “emotion numbers” and FACS features. Emotion
+<br/>numbers are integer values representing the main emotion
+<br/>shown in the “peak emotion” image. The emotions are
+<br/>coded as follows: 1=anger, 2=contempt, 3=disgust,
+<br/>4=fear, 5=happiness, 6=sadness, and 7=surprise.
+<br/>The other labeling option is called FACS, or the
+<br/>Facial Action Coding System. FACS decomposes every
+<br/>summarizes
+<br/>section
+<br/>facial emotion into a set of Action Units (AUs), which
+<br/>describe the specific muscle groups involved in forming
+<br/>the emotion. We chose not to use FACS because accurate
+<br/>labeling currently requires trained human experts [8], and
+<br/>we are interesting in creating an automated system.
+<br/>
+<br/>Part 1.2 Features
+<br/>Part 1.2.1 Norm of differences between neutral face
+<br/>and full emotion
+<br/>Each of the CK+ images has been hand-labeled with
+<br/>68 standard Active Appearance Models (AAM) face
+<br/>landmarks that describe the X and Y position of these
+<br/>landmarks on the image (Figure 1).
+<br/>Figure 1. AAM Facial Landmarks
+<br/>We initially trained the SVM on the norm of the
+<br/>vector differences in landmark positions between the
+<br/>neutral and peak expressions. With this approach, the
+<br/>training error was approximately 35% for hold out cross
+<br/>validation (see Figure 2).
+<br/>with
+<br/>Figure 3. Accuracy of
+<br/>SVM with separate X, Y
+<br/>displacement features.
+<br/>Figure 2. Accuracy of
+<br/>SVM
+<br/>norm-
+<br/>displacement features.
+<br/>Part 1.2.2 Separate X and Y differences between
+<br/>neutral face and full emotion
+<br/>Because the initial approach did not differentiate
+<br/>between displacements of
+<br/>in different
+<br/>directions, we also provided the differences in the X and
+<br/>Y components of each landmark separately. This doubled
+<br/>the size of our feature vector, and resulting in a significant
+<br/>(about 20%) improvement in accuracy (Figure 3).
+<br/>Part 1.2.3 Feature Selection
+<br/>landmarks
+<br/>Finally, we visualized which features were the most
+<br/>important for classifying each emotion; the results can be
+<br/>seen in Figure 4. The figure shows the X and Y
+</td><td>('39818775', 'Tom McLaughlin', 'tom mclaughlin')</td><td></td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>ChaLearn Looking at People:
+<br/>A Review of Events and Resources
+<br/>1 Dept. Mathematics and Computer Science, UB, Spain,
+<br/>2 Computer Vision Center, UAB, Barcelona, Spain,
+<br/><b>EIMT, Open University of Catalonia, Barcelona, Spain</b><br/>4 ChaLearn, California, USA, 5 INAOE, Puebla, Mexico,
+<br/>6 Universit´e Paris-Saclay, Paris, France,
+<br/>http://chalearnlap.cvc.uab.es
+</td><td>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1742688', 'Hugo Jair Escalante', 'hugo jair escalante')<br/>('1743797', 'Isabelle Guyon', 'isabelle guyon')</td><td>sergio.escalera.guerrero@gmail.com
+</td></tr><tr><td>8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff</td><td>Towards large scale multimedia indexing:
+<br/>A case study on person discovery in broadcast news
+<br/><b>Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay</b><br/>3 CNRS, Irisa & Inria Rennes, 4 PUC de Minas Gerais, Belo Horizonte,
+<br/><b>Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine</b></td><td>('39560344', 'Nam Le', 'nam le')<br/>('2578933', 'Hervé Bredin', 'hervé bredin')<br/>('2710421', 'Gabriel Sargent', 'gabriel sargent')<br/>('2613332', 'Miquel India', 'miquel india')<br/>('1794658', 'Paula Lopez-Otero', 'paula lopez-otero')<br/>('1802247', 'Claude Barras', 'claude barras')<br/>('1804407', 'Camille Guinaudeau', 'camille guinaudeau')<br/>('1708671', 'Guillaume Gravier', 'guillaume gravier')<br/>('23556030', 'Gabriel Barbosa da Fonseca', 'gabriel barbosa da fonseca')<br/>('32255257', 'Izabela Lyon Freire', 'izabela lyon freire')<br/>('37401316', 'Gerard Martí', 'gerard martí')<br/>('2585946', 'Josep Ramon Morros', 'josep ramon morros')<br/>('1726311', 'Javier Hernando', 'javier hernando')<br/>('2446815', 'Sylvain Meignier', 'sylvain meignier')<br/>('1719610', 'Jean-Marc Odobez', 'jean-marc odobez')</td><td>nle@idiap.ch,bredin@limsi.fr,gabriel.sargent@irisa.fr,miquel.india@tsc.upc.edu,plopez@gts.uvigo.es
+</td></tr><tr><td>153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4</td><td>Overview of algorithms for face detection and
+<br/>tracking
+<br/>Nenad Markuˇs
+</td><td></td><td></td></tr><tr><td>155199d7f10218e29ddaee36ebe611c95cae68c4</td><td>Towards Scalable Visual Navigation of
+<br/>Micro Aerial Vehicles
+<br/><b>Robotics Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/>April 2016
+<br/>Thesis Supervisors:
+<br/>Prof. Dr. Martial Hebert
+<br/>Prof. Dr. J. Andrew Bagnell
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the degree of Master of Science in Robotics.
+<br/>CMU-RI-TR-16-07
+</td><td>('2739544', 'Shreyansh Daftry', 'shreyansh daftry')<br/>('2739544', 'Shreyansh Daftry', 'shreyansh daftry')</td><td>daftry@cmu.edu
+</td></tr><tr><td>15cd05baa849ab058b99a966c54d2f0bf82e7885</td><td>Structured Sparse Subspace Clustering: A Unified Optimization Framework
+<br/><b>SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University</b><br/>In many real-world applications, we need to deal with high-dimensional
+<br/>datasets, such as images, videos, text, and more. In practice, such high-
+<br/>dimensional datasets can be well approximated by multiple low-dimensional
+<br/>subspaces corresponding to multiple classes or categories. For example, the
+<br/>feature point trajectories associated with a rigidly moving object in a video
+<br/>lie in an affine subspace (of dimension up to 4), and face images of a subject
+<br/>under varying illumination lie in a linear subspace (of dimension up to 9).
+<br/>Therefore, the task, known in the literature as subspace clustering [6], is
+<br/>to segment the data into the corresponding subspaces and finds multiple
+<br/>applications in computer vision.
+<br/>State of the art approaches [1, 2, 3, 4, 5, 7] for solving this problem fol-
+<br/>low a two-stage approach: a) Construct an affinity matrix between points by
+<br/>exploiting the ‘self-expressiveness’ property of the data, which allows any
+<br/>data point to be represented as a linear (or affine) combination of the other
+<br/>data points; b) Apply spectral clustering on the affinity matrix to recover
+<br/>the data segmentation. Dividing the problem in two steps is, on the one
+<br/>hand, appealing because the first step can be solved using convex optimiza-
+<br/>tion techniques, while the second one can be solved using existing spectral
+<br/>clustering techniques. On the other hand, its major disadvantage is that the
+<br/>natural relationship between the affinity matrix and the segmentation of the
+<br/>data is not explicitly captured.
+<br/>In this paper, we attempt to integrate the two separate stages into one
+<br/>unified optimization framework. One important motivating observation is
+<br/>that a perfect subspace clustering can often be obtained from an imperfec-
+<br/>t affinity matrix. In other words, the spectral clustering step can clean up
+<br/>the disturbance in the affinity matrix – which can be viewed as a process of
+<br/>information gain by denoising. Because of this, if we feed back the infor-
+<br/>mation gain properly, it may help the self-expressiveness model to yield a
+<br/>better affinity matrix.
+<br/>To jointly estimate the clustering and affinity matrix, we define a sus-
+<br/>pace structured (cid:96)1 norm as follows:
+<br/>(cid:107)Z(cid:107)1,Q
+<br/>= (cid:107)(11(cid:62) + αΘ)(cid:12) Z(cid:107)1
+<br/>(1)
+<br/>where α > 0 is a tradeoff parameter, Θi j ∈ {0,1} indicates whether two data
+<br/>points belong to the same subspace in which Θi j = 0 if point i and j lie in
+<br/>the same subspace and otherwise Θi j = 1, and 1 is the vector of all ones of
+<br/>appropriate dimension.
+<br/>Equipped with the subspace structured (cid:96)1 norm of Z, we then define the
+<br/>unified optimization framework for subspace clustering as follows:
+<br/>min
+<br/>Z,E,Q
+<br/>(cid:107)Z(cid:107)1,Q + λ(cid:107)E(cid:107)(cid:96) s.t. X = XZ + E, diag(Z) = 0, Q ∈ Q,
+<br/>where Q is the set of all valid binary segmentation matrices defined as
+<br/>(2)
+<br/>Q = {Q ∈ {0,1}N×k : Q1 = 1 and rank(Q) = k},
+<br/>(3)
+<br/>and the norm (cid:107)·(cid:107)(cid:96) on the error term E depends upon the prior knowledge
+<br/>about the pattern of noise or corruptions. We call problem (2) Structured
+<br/>Sparse Subspace Clustering (SSSC or S3C).
+<br/>The solution to the optimization problem in (2) is based on solving the
+<br/>following two subproblems alternatively: a) Find Z and E given Q by solv-
+<br/>ing a weighted sparse representation problem; b) Find Q given Z and E by
+<br/>spectral clustering. We solve this problem efficiently via a combination of an
+<br/>alternating direction method of multipliers with spectral clustering. Experi-
+<br/>ments on a synthetic data, the Hopkins 155 motion segmentation database,
+<br/>and the Extended Yale B data set demonstrate its effectiveness.
+<br/>Some results are presented in Figure 1, Table 1 and 2. Figure 1 shows
+<br/>the improvement in both the affinity matrix and the subspace clustering us-
+<br/>ing S3C over SSC on a subset of face images of three subjects from the
+</td><td>('9171002', 'Chun-Guang Li', 'chun-guang li')<br/>('1745721', 'René Vidal', 'rené vidal')</td><td></td></tr><tr><td>15136c2f94fd29fc1cb6bedc8c1831b7002930a6</td><td>Deep Learning Architectures for Face
+<br/>Recognition in Video Surveillance
+</td><td>('2805645', 'Saman Bashbaghi', 'saman bashbaghi')<br/>('1697195', 'Eric Granger', 'eric granger')<br/>('1744351', 'Robert Sabourin', 'robert sabourin')<br/>('3046171', 'Mostafa Parchami', 'mostafa parchami')</td><td></td></tr><tr><td>15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb</td><td>Semi-Supervised Classification Using Linear
+<br/>Neighborhood Propagation
+<br/><b>Tsinghua University, Beijing 100084, P.R.China</b><br/><b>The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong</b><br/>Semi-Supervised Classi(cid:12)cation
+<br/>A Toy Example
+<br/>Shape Ranking
+<br/>Digits Ranking
+<br/>(a)
+<br/>(b)
+<br/>Interactive Image Segmentation
+<br/>1.2
+<br/>0.8
+<br/>0.6
+<br/>0.4
+<br/>0.2
+<br/>−0.2
+<br/>−0.4
+<br/>−0.6
+<br/>−0.8
+<br/>−1.5
+<br/>1.2
+<br/>0.8
+<br/>0.6
+<br/>0.4
+<br/>0.2
+<br/>−0.2
+<br/>−0.4
+<br/>−0.6
+<br/>−0.8
+<br/>−1.5
+<br/>4−NN Connected Graph
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(a)
+<br/>1.5
+<br/>2.5
+<br/>Classification Results By Nearst Neighbor
+<br/>class 1
+<br/>class 2
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(c)
+<br/>1.5
+<br/>2.5
+<br/>1.2
+<br/>0.8
+<br/>0.6
+<br/>0.4
+<br/>0.2
+<br/>−0.2
+<br/>−0.4
+<br/>−0.6
+<br/>−0.8
+<br/>−1.5
+<br/>1.2
+<br/>0.8
+<br/>0.6
+<br/>0.4
+<br/>0.2
+<br/>−0.2
+<br/>−0.4
+<br/>−0.6
+<br/>−0.8
+<br/>−1.5
+<br/>Classification Results By LNP
+<br/>class 1
+<br/>class 2
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(b)
+<br/>1.5
+<br/>2.5
+<br/>Classification Results By Transductive SVM
+<br/>class 1
+<br/>class 2
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(d)
+<br/>1.5
+<br/>2.5
+<br/>Multi-Class Semi-Supervised Classi(cid:12)cation
+<br/>(cid:15) Label set: L = f1; 2; (cid:1) (cid:1) (cid:1) ; cg
+<br/>(cid:15) M be a set of n (cid:2) c matrices with non-negative real-value entries
+<br/>(cid:15) F = [f1; f2; (cid:1) (cid:1) (cid:1) ; fc] 2 M corresponds to a speci(cid:12)c classi(cid:12)cation on X
+<br/>(cid:15) The entry of Fij can be regarded as the likelihood that xi belongs to
+<br/>class j
+<br/>(cid:15) The label of xi can be computed by yi = arg maxj6c Fij
+<br/>Induction
+<br/>minimize (cid:17)?(xt) = (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>ft (cid:0) Xxj2N (xt)
+<br/>(cid:15) plug a test example xt into the cost function
+<br/>(cid:15) keep the labels of all xi 2 X (cid:12)xed when inducing the label of xt
+<br/>w(xt; xj)fj(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(5)
+<br/>Learning from partially labeled data
+<br/>(cid:15) Face/object recognition
+<br/>(cid:15) Image / video retrieval
+<br/>(cid:15) Interactive image segmentation
+<br/>Graph-Based Semi-Supervised Classi(cid:12)cation
+<br/>Represent the dataset as an weighted undirected graph G =< V; E >
+<br/>(cid:15) V: the node set, corresponding to the dataset
+<br/>(cid:15) E: the edge set, corresponding to the pairwise relationships
+<br/>wij = expn(cid:0)2(cid:12)kxi (cid:0) xjk2o
+<br/>(1)
+<br/>Cluster Assumption
+<br/>(cid:15) nearby points are likely to have the same label
+<br/>(cid:15) points on the same structure (such as a cluster or a submanifold) are
+<br/>prone to have the same label
+<br/>=) Similar to manifold analysis (ISOMAP, LLE, Laplacian Eigen-
+<br/>map(cid:1) (cid:1) (cid:1) )
+<br/>=) Incorporate the neighborhood information into graph construction
+<br/>Linear Neighborhoods
+<br/>The data point can be linearly reconstructed from its k-nearest neigh-
+<br/>bors.
+<br/>minimize "i = (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>s:t: Xj
+<br/>xi (cid:0) Xxj2N (xi)
+<br/>wij = 1; wij > 0
+<br/>wij xj(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(2)
+<br/>(cid:15) wij re(cid:13)ects the similarity between xj and xi
+<br/>(cid:15) How to solve it?=)Quadratic programming.
+<br/>Collaborative Label Prediction
+<br/>The label of an unlabeled point can be linearly reconstructed from its
+<br/>neighbors’ labels
+<br/>minimize (cid:17) = Xn
+<br/>i=1
+<br/>fi (cid:0) Xxj2N (xi)
+<br/>(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>wijfj(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>s:t:
+<br/>fi = li (f or all labeled point xi)
+<br/>(3)
+<br/>(cid:15) wij is calculated through solving Eq.(2).
+<br/>(cid:15) The neighborhood information are incorporated into label prediction.
+<br/>How to solve Eq.(3)?
+<br/>i=1
+<br/>fi (cid:0) Xxj2N (xi)
+<br/>(cid:17) = Xn
+<br/>=) minimize (cid:17) ()(I (cid:0) W)f = 0; s:t: fi = li
+<br/>(cid:15) Refer to the following paper
+<br/>wijfj(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>= f T (I (cid:0) W)T (I (cid:0) W)f (4)
+<br/>Recognition
+<br/>Recognition accuracies on ORL database
+<br/>LNP
+<br/>Consistency
+<br/>Kernel Eigenface
+<br/>Fisherface
+<br/>Eigenface
+<br/>0.9
+<br/>0.8
+<br/>0.7
+<br/>0.6
+<br/>0.5
+<br/>0.4
+<br/>Recognition accuracies on COIL database
+<br/>LNP
+<br/>Consistency
+<br/>Kernel PCA
+<br/>PCA+LDA
+<br/>PCA
+<br/>10
+<br/>12
+<br/>14
+<br/>16
+<br/>18
+<br/>References
+<br/>(cid:15) S.T. Roweis and L.K. Saul, Noninear Dimensionality Reduction by
+<br/>Locally Linear Embedding. Science: vol. 290, 2323-2326. 2000.
+<br/>(cid:15) O. Chapelle, et al. (eds.): Semi-Supervised Learning. MIT Press:
+<br/>Cambridge, MA. 2006.
+<br/>(cid:15) A. Levin D. Lischinski and Y. Weiss. Colorization using Optimization.
+<br/>SIGGRAPH, ACM Transactions on Graphics, Aug 2004.
+<br/>Data Ranking
+<br/>Ranking Result by Euclidean Distance
+<br/>1.5
+<br/>0.5
+<br/>−0.5
+<br/>Ranking Result by LNP
+<br/>0.95
+<br/>0.9
+<br/>0.85
+<br/>0.8
+<br/>0.75
+<br/>0.7
+<br/>0.65
+<br/>1.5
+<br/>0.5
+<br/>−0.5
+<br/>Zhu, X., Ghahramani, Z., & La(cid:11)erty, J.(2003). Semi-Supervised Learn-
+<br/>ing Using Gaussian Fields and Harmonic Functions. In Proceedings of
+<br/>the 20th International Conference on Machine Learning
+<br/>−1
+<br/>−1.5
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(a)
+<br/>1.5
+<br/>2.5
+<br/>−1
+<br/>−1.5
+<br/>−1
+<br/>−0.5
+<br/>0.5
+<br/>(b)
+<br/>1.5
+<br/>2.5
+</td><td>('34410258', 'Fei Wang', 'fei wang')<br/>('1688516', 'Jingdong Wang', 'jingdong wang')<br/>('1700883', 'Changshui Zhang', 'changshui zhang')<br/>('7969645', 'Helen C. Shen', 'helen c. shen')</td><td></td></tr><tr><td>15d653972d176963ef0ad2cc582d3b35ca542673</td><td>CSVideoNet: A Real-time End-to-end Learning Framework for
+<br/>High-frame-rate Video Compressive Sensing
+<br/>School of Computing, Informatics, and Decision Systems Engineering
+<br/><b>Arizona State University, Tempe AZ</b></td><td>('47831601', 'Kai Xu', 'kai xu')<br/>('40615963', 'Fengbo Ren', 'fengbo ren')</td><td>{kaixu, renfengbo}@asu.edu
+</td></tr><tr><td>159e792096756b1ec02ec7a980d5ef26b434ff78</td><td>Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence
+<br/>Signed Laplacian Embedding for Supervised Dimension Reduction
+<br/><b>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University</b><br/><b>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</b></td><td>('1710691', 'Chen Gong', 'chen gong')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('39264954', 'Jie Yang', 'jie yang')<br/>('1847070', 'Keren Fu', 'keren fu')</td><td>{goodgongchen, jieyang, fkrsuper}@sjtu.edu.cn
+<br/>dacheng.tao@uts.edu.au
+</td></tr><tr><td>153e5cddb79ac31154737b3e025b4fb639b3c9e7</td><td>PREPRINT SUBMITTED TO IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS
+<br/>Active Dictionary Learning in Sparse
+<br/>Representation Based Classification
+</td><td>('1935596', 'Jin Xu', 'jin xu')<br/>('2198278', 'Haibo He', 'haibo he')<br/>('1881104', 'Hong Man', 'hong man')</td><td></td></tr><tr><td>1586871a1ddfe031b885b94efdbff647cf03eff1</td><td>A Visual Historical Record of American High School Yearbooks
+<br/>A Century of Portraits:
+<br/><b>University of California Berkeley</b><br/><b>Brown University</b><br/><b>University of California Berkeley</b></td><td>('2361255', 'Shiry Ginosar', 'shiry ginosar')<br/>('2660664', 'Kate Rakelly', 'kate rakelly')<br/>('33385802', 'Sarah Sachs', 'sarah sachs')<br/>('2130100', 'Brian Yin', 'brian yin')<br/>('1763086', 'Alexei A. Efros', 'alexei a. efros')</td><td></td></tr><tr><td>15cf7bdc36ec901596c56d04c934596cf7b43115</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 8, No. 9, 2017
+<br/>Face Extraction from Image based on K-Means
+<br/>Clustering Algorithms
+<br/><b>Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran</b></td><td>('2062871', 'Yousef Farhang', 'yousef farhang')</td><td></td></tr><tr><td>1576ed0f3926c6ce65e0ca770475bca6adcfdbb4</td><td>Keep it Accurate and Diverse: Enhancing Action Recognition Performance by
+<br/>Ensemble Learning
+<br/><b>Faculty of Computer Science, Dalhousie University, Halifax, Canada</b><br/>Computer Vision Center, UAB
+<br/>Edificio O, Campus UAB, 08193, Bellaterra (Cerdanyola), Barcelona, Spain
+<br/><b>University of Barcelona</b><br/>Gran Via de les Corts Catalanes, 585, 08007, Barcelona
+<br/>Visual Analysis of People (VAP) Laboratory
+<br/>Rendsburggade 14, 9000 Aalborg, Denmark
+</td><td>('1921285', 'Mohammad Ali Bagheri', 'mohammad ali bagheri')<br/>('3212432', 'Qigang Gao', 'qigang gao')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')<br/>('1803459', 'Kamal Nasrollahi', 'kamal nasrollahi')<br/>('1876184', 'Michael B. Holte', 'michael b. holte')<br/>('1700569', 'Thomas B. Moeslund', 'thomas b. moeslund')</td><td>bagheri@cs.dal.ca
+<br/>sergio@maia.ub.es, aclapes@cvc.uab.cat
+<br/>{kn, mbh, tbm}@create.aau.dk
+</td></tr><tr><td>156cd2a0e2c378e4c3649a1d046cd080d3338bca</td><td>Exemplar based approaches on Face Fiducial Detection and
+<br/>Frontalization
+<br/>Thesis submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/>MS by Research
+<br/>in
+<br/>Computer Science & Engineering
+<br/>by
+<br/>Mallikarjun B R
+<br/>201307681
+<br/><b>International Institute of Information Technology</b><br/>Hyderabad - 500 032, India
+<br/>May 2017
+</td><td></td><td>mallikarjun.br@research.iiit.ac.in
+</td></tr><tr><td>157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6</td><td><b>MITSUBISHI ELECTRIC RESEARCH LABORATORIES</b><br/>http://www.merl.com
+<br/>Connecting the Dots in Multi-Class Classification: From
+<br/>Nearest Subspace to Collaborative Representation
+<br/>Chi, Y.; Porikli, F.
+<br/>TR2012-043
+<br/>June 2012
+</td><td></td><td></td></tr><tr><td>15e0b9ba3389a7394c6a1d267b6e06f8758ab82b</td><td>Xu et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:24
+<br/>DOI 10.1186/s41074-017-0035-2
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>TECHNICAL NOTE
+<br/>Open Access
+<br/>The OU-ISIR Gait Database comprising the
+<br/>Large Population Dataset with Age and
+<br/>performance evaluation of age estimation
+</td><td>('7513255', 'Chi Xu', 'chi xu')<br/>('1689334', 'Yasushi Makihara', 'yasushi makihara')<br/>('12881056', 'Gakuto Ogi', 'gakuto ogi')<br/>('1737850', 'Xiang Li', 'xiang li')<br/>('1715071', 'Yasushi Yagi', 'yasushi yagi')<br/>('6120396', 'Jianfeng Lu', 'jianfeng lu')</td><td></td></tr><tr><td>151481703aa8352dc78e2577f0601782b8c41b34</td><td>Appearance Manifold of Facial Expression
+<br/><b>Queen Mary, University of London, London E1 4NS, UK</b><br/>Department of Computer Science
+</td><td>('10795229', 'Caifeng Shan', 'caifeng shan')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')<br/>('2803283', 'Peter W. McOwan', 'peter w. mcowan')</td><td>{cfshan,sgg,pmco}@dcs.qmul.ac.uk
+</td></tr><tr><td>15aa6c457678e25f6bc0e818e5fc39e42dd8e533</td><td></td><td></td><td></td></tr><tr><td>15cf1f17aeba62cd834116b770f173b0aa614bf4</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 77 – No.5, September 2013
+<br/>Facial Expression Recognition using Neural Network with
+<br/>Regularized Back-propagation Algorithm
+<br/>Research Scholar
+<br/>Department of ECE,
+<br/><b></b><br/>Phagwara, India
+<br/>Assistant Professor
+<br/>Department of ECE,
+<br/><b></b><br/>Phagwara, India
+<br/>Research Scholar
+<br/>Department of ECE,
+<br/><b>Gyan Ganga Institute of</b><br/>Technology & Sciences,
+<br/>Jabalpur, India
+</td><td>('35358999', 'Ashish Kumar Dogra', 'ashish kumar dogra')<br/>('50227570', 'Nikesh Bajaj', 'nikesh bajaj')</td><td></td></tr><tr><td>1565721ebdbd2518224f54388ed4f6b21ebd26f3</td><td>Face and Landmark Detection by Using Cascade of Classifiers
+<br/><b>Eskisehir Osmangazi University</b><br/>Eskisehir, Turkey
+<br/>Laboratoire Jean Kuntzmann
+<br/>Grenoble Cedex 9, France
+<br/><b>Czech Technical University</b><br/>Praha, Czech Republic
+</td><td>('2277308', 'Hakan Cevikalp', 'hakan cevikalp')<br/>('1756114', 'Bill Triggs', 'bill triggs')<br/>('1778663', 'Vojtech Franc', 'vojtech franc')</td><td>hakan.cevikalp@gmail.com
+<br/>Bill.Triggs@imag.fr
+<br/>xfrancv@cmp.felk.cvut.cz
+</td></tr><tr><td>15f3d47b48a7bcbe877f596cb2cfa76e798c6452</td><td>Automatic face analysis tools for interactive digital games
+<br/>Anonymised for blind review
+<br/>Anonymous
+<br/>Anonymous
+<br/>Anonymous
+</td><td></td><td></td></tr><tr><td>15728d6fd5c9fc20b40364b733228caf63558c31</td><td></td><td>('2831988', 'IAN N. ENDRES', 'ian n. endres')</td><td></td></tr><tr><td>15252b7af081761bb00535aac6bd1987391f9b79</td><td>ESTIMATION OF EYE GAZE DIRECTION ANGLES BASED ON ACTIVE APPEARANCE
+<br/>MODELS
+<br/><b>School of E.C.E., National Technical University of Athens, 15773 Athens, Greece</b></td><td>('2539459', 'Petros Koutras', 'petros koutras')<br/>('1750686', 'Petros Maragos', 'petros maragos')</td><td>Email: {pkoutras, maragos}@cs.ntua.gr
+</td></tr><tr><td>1513949773e3a47e11ab87d9a429864716aba42d</td><td></td><td></td><td></td></tr><tr><td>15ee80e86e75bf1413dc38f521b9142b28fe02d1</td><td>Towards a Deep Learning Framework for
+<br/>Unconstrained Face Detection
+<br/>CyLab Biometrics Center and the Department of Electrical and Computer Engineering,
+<br/><b>Carnegie Mellon University, Pittsburgh, PA, USA</b></td><td>('3049981', 'Yutong Zheng', 'yutong zheng')<br/>('3117715', 'Chenchen Zhu', 'chenchen zhu')<br/>('6131978', 'T. Hoang Ngan Le', 't. hoang ngan le')<br/>('1769788', 'Khoa Luu', 'khoa luu')<br/>('2043374', 'Chandrasekhar Bhagavatula', 'chandrasekhar bhagavatula')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>{yutongzh, chenchez, kluu, cbhagava, thihoanl}@andrew.cmu.edu, msavvid@ri.cmu.edu
+</td></tr><tr><td>153c8715f491272b06dc93add038fae62846f498</td><td></td><td>('33047058', 'JONGWOO LIM', 'jongwoo lim')</td><td></td></tr><tr><td>15e27f968458bf99dd34e402b900ac7b34b1d575</td><td>8362
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/><b>University of Toronto</b><br/>1. INTRODUCTION
+</td><td>('2030736', 'Mohammad Shahin Mahanta', 'mohammad shahin mahanta')<br/>('1705037', 'Konstantinos N. Plataniotis', 'konstantinos n. plataniotis')</td><td>Email: {mahanta, kostas} @ece.utoronto.ca
+</td></tr><tr><td>15f70a0ad8903017250927595ae2096d8b263090</td><td>Learning Robust Deep Face Representation
+<br/><b>University of Science and Technology Beijing</b><br/>Beijing, China
+</td><td>('2225749', 'Xiang Wu', 'xiang wu')</td><td>alfredxiangwu@gmail.com
+</td></tr><tr><td>1564bf0a268662df752b68bee5addc4b08868739</td><td>With Whom Do I Interact?
+<br/>Detecting Social Interactions in Egocentric
+<br/>Photo-streams
+<br/><b>University of Barcelona</b><br/>Barcelona, Spain
+<br/>Computer Vision Center and
+<br/><b>University of Barcelona</b><br/>Barcelona, Spain
+<br/>Computer Vision Center and
+<br/><b>University of Barcelona</b><br/>Barcelona, Spain
+</td><td>('2084534', 'Maedeh Aghaei', 'maedeh aghaei')<br/>('2837527', 'Mariella Dimiccoli', 'mariella dimiccoli')<br/>('1724155', 'Petia Radeva', 'petia radeva')</td><td>Email:maghaeigavari@ub.edu
+</td></tr><tr><td>158e32579e38c29b26dfd33bf93e772e6211e188</td><td>Automated Real Time Emotion Recognition using
+<br/>Facial Expression Analysis
+<br/>by
+<br/>A thesis submitted to the Faculty of Graduate and Postdoctoral
+<br/>Affairs in partial fulfillment of the requirements for the degree of
+<br/>Master
+<br/>of
+<br/>Computer Science
+<br/><b>Carleton University</b><br/>Ottawa, Ontario
+</td><td></td><td></td></tr><tr><td>122f51cee489ba4da5ab65064457fbe104713526</td><td>Long Short Term Memory Recurrent Neural Network based
+<br/>Multimodal Dimensional Emotion Recognition
+<br/>Recognition
+<br/>Recognition
+<br/>Recognition
+<br/>National Laboratory of Pattern
+<br/>National Laboratory of Pattern
+<br/>National Laboratory of Pattern
+<br/><b>Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/><b>Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/><b>Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/>National Laboratory of Pattern Recognition
+<br/>National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation</b><br/>Chinese Academy of Sciences
+<br/>
+</td><td>('1850313', 'Linlin Chao', 'linlin chao')<br/>('37670752', 'Jianhua Tao', 'jianhua tao')<br/>('2740129', 'Minghao Yang', 'minghao yang')<br/>('1704841', 'Ya Li', 'ya li')</td><td>linlin.chao@nlpr.ia.ac.cn
+<br/>jhtao@nlpr.ia.ac.cn
+<br/>mhyang@nlpr.ia.ac.cn
+<br/>yli@nlpr.ia.ac.cn
+</td></tr><tr><td>121503705689f46546cade78ff62963574b4750b</td><td>We don’t need no bounding-boxes:
+<br/>Training object class detectors using only human verification
+<br/><b>University of Edinburgh</b></td><td>('1749373', 'Dim P. Papadopoulos', 'dim p. papadopoulos')<br/>('1823362', 'Jasper R. R. Uijlings', 'jasper r. r. uijlings')<br/>('48716849', 'Frank Keller', 'frank keller')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td>dim.papadopoulos@ed.ac.uk
+<br/>jrr.uijlings@ed.ac.uk
+<br/>keller@inf.ed.ac.uk
+<br/>vferrari@inf.ed.ac.uk
+</td></tr><tr><td>125d82fee1b9fbcc616622b0977f3d06771fc152</td><td>Hierarchical Face Parsing via Deep Learning
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1693209', 'Ping Luo', 'ping luo')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>pluo.lhi@gmail.com
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>1255afbf86423c171349e874b3ac297de19f00cd</td><td>Robust Face Recognition by Computing Distances
+<br/>from Multiple Histograms of Oriented Gradients
+<br/><b>Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen</b><br/>Nijenborgh 9, Groningen, The Netherlands
+</td><td>('3351361', 'Mahir Faik Karaaba', 'mahir faik karaaba')<br/>('1728531', 'Olarik Surinta', 'olarik surinta')<br/>('1799278', 'Lambert Schomaker', 'lambert schomaker')</td><td>Email: {m.f.karaaba, o.surinta, l.r.b.schomaker, m.a.wiering}@rug.nl
+</td></tr><tr><td>1275d6a800f8cf93c092603175fdad362b69c191</td><td>Deep Face Recognition: A Survey
+<br/>School of Information and Communication Engineering,
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b><br/>still have an inevitable limitation on robustness against the
+<br/>complex nonlinear facial appearance variations.
+<br/>In general, traditional methods attempted to solve FR prob-
+<br/>lem by one or two layer representation, such as filtering
+<br/>responses or histogram of the feature codes. The research com-
+<br/>munity studied intensively to separately improve the prepro-
+<br/>cessing, local descriptors, and feature transformation, which
+<br/>improve face recognition accuracy slowly. By the continuous
+<br/>improvement of a decade, “shallow” methods only improve the
+<br/>accuracy of the LFW benchmark to about 95% [26], which
+<br/>indicates that “shallow” methods are insufficient to extract
+<br/>stable identity feature against unconstrained facial variations.
+<br/>Due to the technical insufficiency, facial recognition systems
+<br/>were often reported with unstable performance or failures with
+<br/>countless false alarms in real-world applications.
+</td><td>('2285767', 'Mei Wang', 'mei wang')<br/>('1774956', 'Weihong Deng', 'weihong deng')</td><td>wm0245@126.com, whdeng@bupt.edu.cn
+</td></tr><tr><td>126535430845361cd7a3a6f317797fe6e53f5a3b</td><td>Robust Photometric Stereo via Low-Rank Matrix
+<br/>Completion and Recovery (cid:63)
+<br/><b>School of Optics and Electronics, Beijing Institute of Technology, Beijing</b><br/><b>Coordinated Science Lab, University of Illinois at Urbana-Champaign</b><br/><b>Key Laboratory of Machine Perception, Peking University, Beijing</b><br/>§Visual Computing Group, Microsoft Research Asia, Beijing
+</td><td>('2417838', 'Lun Wu', 'lun wu')<br/>('1701028', 'Arvind Ganesh', 'arvind ganesh')<br/>('35580784', 'Boxin Shi', 'boxin shi')<br/>('1774618', 'Yasuyuki Matsushita', 'yasuyuki matsushita')<br/>('1692621', 'Yongtian Wang', 'yongtian wang')<br/>('1700297', 'Yi Ma', 'yi ma')</td><td>lun.wu@hotmail.com, abalasu2@illinois.edu, shiboxin@cis.pku.edu.cn,
+<br/>yasumat@microsoft.com, wyt@bit.edu.cn, mayi@microsoft.com
+</td></tr><tr><td>122ee00cc25c0137cab2c510494cee98bd504e9f</td><td>The Application of
+<br/>Active Appearance Models to
+<br/>Comprehensive Face Analysis
+<br/>Technical Report
+<br/>TU M¨unchen
+<br/>April 5, 2007
+</td><td>('2866162', 'Simon Kriegel', 'simon kriegel')</td><td>kriegel@mmer-systems.eu
+</td></tr><tr><td>1287bfe73e381cc8042ac0cc27868ae086e1ce3b</td><td></td><td></td><td></td></tr><tr><td>121fe33daf55758219e53249cf8bcb0eb2b4db4b</td><td>CHAKRABARTI et al.: EMPIRICAL CAMERA MODEL
+<br/>An Empirical Camera Model
+<br/>for Internet Color Vision
+<br/>http://www.eecs.harvard.edu/~ayanc/
+<br/>http://www.cs.middlebury.edu/~schar/
+<br/>Todd Zickler1
+<br/>http://www.eecs.harvard.edu/~zickler/
+<br/>1 Harvard School of Engineering and
+<br/>Applied Sciences
+<br/>Cambridge, MA, USA 02139
+<br/>2 Department of Computer Science
+<br/><b>Middlebury College</b><br/>Middlebury, VT, USA 05753
+</td><td>('38534744', 'Ayan Chakrabarti', 'ayan chakrabarti')<br/>('1709053', 'Daniel Scharstein', 'daniel scharstein')</td><td></td></tr><tr><td>12408baf69419409d228d96c6f88b6bcde303505</td><td>Temporal Tessellation: A Unified Approach for Video Analysis
+<br/><b>The Blavatnik School of Computer Science, Tel Aviv University, Israel</b><br/><b>Information Sciences Institute, USC, CA, USA</b><br/><b>The Open University of Israel, Israel</b><br/>4Facebook AI Research
+</td><td>('48842639', 'Dotan Kaufman', 'dotan kaufman')<br/>('36813724', 'Gil Levi', 'gil levi')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td></td></tr><tr><td>120bcc9879d953de7b2ecfbcd301f72f3a96fb87</td><td>Report on the FG 2015 Video Person Recognition Evaluation
+<br/>Zhenhua Feng
+<br/><b>Colorado State University</b><br/>Fort Collins, CO, USA
+<br/><b>University of Notre Dame</b><br/>Notre Dame, IN, USA
+<br/><b>University of Surrey</b><br/>United Kingdom
+<br/>1 Key Laboratory of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>Institute of Computing Technology, CAS, Beijing, 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/><b>Stevens Institute of Technology</b><br/>Hoboken, NJ, USA
+<br/>Vitomir ˇStruc
+<br/>Janez Kriˇzaj
+<br/><b>University of Ljubljana</b><br/>Ljubljana, Slovenia
+<br/><b>University of Technology, Sydney</b><br/>Sydney, Australia
+<br/><b>National Institute of Standards and Technology</b><br/>Gaithersburg, MD, USA
+</td><td>('1757322', 'J. Ross Beveridge', 'j. ross beveridge')<br/>('1694404', 'Bruce A. Draper', 'bruce a. draper')<br/>('1704876', 'Patrick J. Flynn', 'patrick j. flynn')<br/>('39976184', 'Patrik Huber', 'patrik huber')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('7945869', 'Zhiwu Huang', 'zhiwu huang')<br/>('1688086', 'Shaoxin Li', 'shaoxin li')<br/>('38751558', 'Yan Li', 'yan li')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('3373117', 'Ruiping Wang', 'ruiping wang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('37990555', 'Changxing Ding', 'changxing ding')<br/>('32028519', 'P. Jonathon Phillips', 'p. jonathon phillips')</td><td>ross@cs.colostate.edu
+</td></tr><tr><td>12cb3bf6abf63d190f849880b1703ccc183692fe</td><td>Guess Who?: A game to crowdsource the labeling of affective facial
+<br/>expressions is comparable to expert ratings.
+<br/>Graduation research project, june 2012
+<br/>Supervised by: Dr. Joost Broekens
+<br/><b></b></td><td></td><td>mail@barryborsboom.nl
+</td></tr><tr><td>12095f9b35ee88272dd5abc2d942a4f55804b31e</td><td>DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild
+<br/>Rıza Alp G¨uler1
+<br/>1INRIA-CentraleSup´elec, France
+<br/><b>Imperial College London, UK</b><br/>Stefanos Zafeiriou2
+<br/>3Amazon, Berlin, Germany
+<br/><b>University College London, UK</b></td><td>('2814229', 'George Trigeorgis', 'george trigeorgis')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2796644', 'Patrick Snape', 'patrick snape')<br/>('48111527', 'Iasonas Kokkinos', 'iasonas kokkinos')</td><td>riza.guler@inria.fr
+<br/>2{g.trigeorgis, p.snape, s.zafeiriou}@imperial.ac.uk
+<br/>antonak@amazon.com
+<br/>i.kokkinos@cs.ucl.ac.uk
+</td></tr><tr><td>12cd96a419b1bd14cc40942b94d9c4dffe5094d2</td><td>29
+<br/>Proceedings of the 5th Workshop on Vision and Language, pages 29–38,
+<br/>Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics
+</td><td></td><td></td></tr><tr><td>1275852f2e78ed9afd189e8b845fdb5393413614</td><td>A Transfer Learning based Feature-Weak-Relevant Method for
+<br/>Image Clustering
+<br/><b>Dalian Maritime University</b><br/>Dalian, China
+</td><td>('3852923', 'Bo Dong', 'bo dong')<br/>('2860808', 'Xinnian Wang', 'xinnian wang')</td><td>{dukedong,wxn}@dlmu.edu.cn
+</td></tr><tr><td>1297ee7a41aa4e8499c7ddb3b1fed783eba19056</td><td><b>University of Nebraska - Lincoln</b><br/>US Army Research
+<br/>2015
+<br/>U.S. Department of Defense
+<br/>Effects of emotional expressions on persuasion
+<br/>Gale Lucas
+<br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/><b>University of Southern California</b><br/>Follow this and additional works at: http://digitalcommons.unl.edu/usarmyresearch
+<br/>Wang, Yuqiong; Lucas, Gale; Khooshabeh, Peter; de Melo, Celso; and Gratch, Jonathan, "Effects of emotional expressions on
+<br/>persuasion" (2015). US Army Research. 340.
+<br/>http://digitalcommons.unl.edu/usarmyresearch/340
+</td><td>('49416640', 'Yuqiong Wang', 'yuqiong wang')<br/>('2635945', 'Peter Khooshabeh', 'peter khooshabeh')<br/>('1977901', 'Celso de Melo', 'celso de melo')<br/>('1730824', 'Jonathan Gratch', 'jonathan gratch')</td><td>DigitalCommons@University of Nebraska - Lincoln
+<br/>University of Southern California, wangyuqiong@ymail.com
+<br/>This Article is brought to you for free and open access by the U.S. Department of Defense at DigitalCommons@University of Nebraska - Lincoln. It has
+<br/>been accepted for inclusion in US Army Research by an authorized administrator of DigitalCommons@University of Nebraska - Lincoln.
+</td></tr><tr><td>12055b8f82d5411f9ad196b60698d76fbd07ac1e</td><td>1475
+<br/>Multiview Facial Landmark Localization in RGB-D
+<br/>Images via Hierarchical Regression
+<br/>With Binary Patterns
+</td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('40647981', 'Wei Zhang', 'wei zhang')<br/>('7137861', 'Jianzhuang Liu', 'jianzhuang liu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>126214ef0dcef2b456cb413905fa13160c73ec8e</td><td>Modelling human perception of static facial expressions
+<br/>M.Sorci,J.Ph.Thiran
+<br/>J.Cruz,T.Robin,M.Bierlaire
+<br/><b>Electrical Engineering Institute, EPFL</b><br/>Transport and Mobility Laboratory,EPFL
+<br/>Station 11, CH-1015, Lausanne
+<br/>Station 11, CH-1015, Lausanne
+<br/>G.Antonini
+<br/>IBM Zurich Lab
+<br/>Saumerstrasse 4 ,Ruschlikon
+<br/>B.Cerretani
+<br/><b>University of Siena</b><br/>DII,Siena
+</td><td></td><td>{matteo.sorci,JP.Thiran}@epfl.ch
+<br/>{javier.cruz,thomas.robin,michel.bierlaire}@epfl.ch
+<br/>gan@zurich.ibm.com
+<br/>barbara.cerretani@gmail.com
+</td></tr><tr><td>120785f9b4952734818245cc305148676563a99b</td><td>Diagnostic automatique de l’état dépressif
+<br/>S. Cholet
+<br/>H. Paugam-Moisy
+<br/>Laboratoire de Mathématiques Informatique et Applications (LAMIA - EA 4540)
+<br/>Université des Antilles, Campus de Fouillole - Guadeloupe
+<br/>Résumé
+<br/>Les troubles psychosociaux sont un problème de santé pu-
+<br/>blique majeur, pouvant avoir des conséquences graves sur
+<br/>le court ou le long terme, tant sur le plan professionnel que
+<br/>personnel ou familial. Le diagnostic de ces troubles doit
+<br/>être établi par un professionnel. Toutefois, l’IA (l’Intelli-
+<br/>gence Artificielle) peut apporter une contribution en four-
+<br/>nissant au praticien une aide au diagnostic, et au patient
+<br/>un suivi permanent rapide et peu coûteux. Nous proposons
+<br/>une approche vers une méthode de diagnostic automatique
+<br/>de l’état dépressif à partir d’observations du visage en
+<br/>temps réel, au moyen d’une simple webcam. A partir de
+<br/>vidéos du challenge AVEC’2014, nous avons entraîné un
+<br/>classifieur neuronal à extraire des prototypes de visages
+<br/>selon différentes valeurs du score de dépression de Beck
+<br/>(BDI-II).
+</td><td></td><td>Stephane.Cholet@univ-antilles.fr
+</td></tr><tr><td>12692fbe915e6bb1c80733519371bbb90ae07539</td><td>Object Bank: A High-Level Image Representation for Scene
+<br/>Classification & Semantic Feature Sparsification
+<br/><b>Stanford University</b><br/><b>Carnegie Mellon University</b></td><td>('33642044', 'Li-Jia Li', 'li-jia li')<br/>('2888806', 'Hao Su', 'hao su')<br/>('1752601', 'Eric P. Xing', 'eric p. xing')<br/>('3216322', 'Li Fei-Fei', 'li fei-fei')</td><td></td></tr><tr><td>1251deae1b4a722a2155d932bdfb6fe4ae28dd22</td><td>A Large-scale Attribute Dataset for Zero-shot Learning
+<br/>1 National Engineering Laboratory for Video Technology,
+<br/>Key Laboratory of Machine Perception (MoE),
+<br/>Cooperative Medianet Innovation Center, Shanghai,
+<br/><b>School of EECS, Peking University, Beijing, 100871, China</b><br/><b>School of Data Science, Fudan University</b><br/>3 Sinovation Ventures
+</td><td>('49217762', 'Bo Zhao', 'bo zhao')<br/>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('1705512', 'Rui Liang', 'rui liang')<br/>('3165417', 'Jiahong Wu', 'jiahong wu')<br/>('47904050', 'Yonggang Wang', 'yonggang wang')<br/>('36637369', 'Yizhou Wang', 'yizhou wang')</td><td>bozhao, yizhou.wang@pku.edu.cn, yanweifu@fudan.edu.cn
+<br/>liangrui, wujiahong, wangyonggang@chuangxin.com
+</td></tr><tr><td>12ccfc188de0b40c84d6a427999239c6a379cd66</td><td>Sparse Adversarial Perturbations for Videos
+<br/>1 Tsinghua National Lab for Information Science and Technology
+<br/>1 State Key Lab of Intelligent Technology and Systems
+<br/><b>Tsinghua University</b><br/>1 Center for Bio-Inspired Computing Research
+</td><td>('2769710', 'Xingxing Wei', 'xingxing wei')<br/>('40062221', 'Jun Zhu', 'jun zhu')<br/>('37409747', 'Hang Su', 'hang su')</td><td>{xwei11, dcszj, suhangss}@mail.tsinghua.edu.cn
+</td></tr><tr><td>12c713166c46ac87f452e0ae383d04fb44fe4eb2</td><td></td><td></td><td></td></tr><tr><td>12ebeb2176a5043ad57bc5f3218e48a96254e3e9</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 120 – No.24, June 2015
+<br/>Traffic Road Sign Detection and Recognition for
+<br/>Automotive Vehicles
+<br/>Zakir Hyder
+<br/>Department of Electrical Engineering and
+<br/>Department of Electrical Engineering and
+<br/><b>Computer Science North South University, Dhaka</b><br/><b>Computer Science North South University, Dhaka</b><br/>Bangladesh
+<br/>Bangladesh
+</td><td></td><td></td></tr><tr><td>1270044a3fa1a469ec2f4f3bd364754f58a1cb56</td><td>Video-Based Face Recognition Using Probabilistic Appearance Manifolds
+<br/>yComputer Science
+<br/>Urbana, IL 61801
+<br/>zComputer Science & Engineering
+<br/><b>University of Illinois, Urbana-Champaign University of California, San Diego</b><br/>La Jolla, CA 92093
+<br/>David Kriegmanz
+<br/><b>Honda Research Institute</b><br/>800 California Street
+<br/>Mountain View, CA 94041
+</td><td>('2457452', 'Kuang-chih Lee', 'kuang-chih lee')<br/>('1788818', 'Jeffrey Ho', 'jeffrey ho')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td>klee10@uiuc.edu
+<br/>jho@cs.ucsd.edu myang@honda-ri.com kriegman@cs.ucsd.edu
+</td></tr><tr><td>12150d8b51a2158e574e006d4fbdd3f3d01edc93</td><td>Deep End2End Voxel2Voxel Prediction
+<br/>Presented by: Ahmed Osman
+<br/>Ahmed Osman
+</td><td>('1687325', 'Du Tran', 'du tran')<br/>('2276554', 'Rob Fergus', 'rob fergus')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')</td><td></td></tr><tr><td>12003a7d65c4f98fb57587fd0e764b44d0d10125</td><td>Face Recognition in the Wild with the Probabilistic Gabor-Fisher
+<br/>Classifier
+<br/>Simon Dobriˇsek, Vitomir ˇStruc, Janez Kriˇzaj, France Miheliˇc
+<br/><b>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia</b></td><td></td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>ORIGINAL RESEARCH ARTICLE
+<br/>published: 29 August 2013
+<br/>doi: 10.3389/fpsyg.2013.00506
+<br/>Basic level scene understanding: categories, attributes and
+<br/>structures
+<br/><b>Computer Science, Princeton University, Princeton, NJ, USA</b><br/><b>Computer Science, Brown University, Providence, RI, USA</b><br/><b>Computer Science and Engineering, University of Washington, Seattle, WA, USA</b><br/><b>Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA</b><br/><b>Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA</b><br/><b>Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA</b><br/>Edited by:
+<br/>Tamara Berg, Stony Brook
+<br/><b>University, USA</b><br/>Reviewed by:
+<br/>Andrew M. Haun, Harvard Medical
+<br/>School, USA
+<br/>Devi Parikh, Virginia Tech, USA
+<br/>*Correspondence:
+<br/><b>Brown University</b><br/>115 Waterman Street, Box 1910,
+<br/>Providence, RI 02912, USA
+<br/>A longstanding goal of computer vision is to build a system that can automatically
+<br/>understand a 3D scene from a single image. This requires extracting semantic concepts
+<br/>and 3D information from 2D images which can depict an enormous variety of
+<br/>environments that comprise our visual world. This paper summarizes our recent efforts
+<br/>toward these goals. First, we describe the richly annotated SUN database which is a
+<br/>collection of annotated images spanning 908 different scene categories with object,
+<br/>attribute, and geometric labels for many scenes. This database allows us to systematically
+<br/>study the space of scenes and to establish a benchmark for scene and object recognition.
+<br/>We augment the categorical SUN database with 102 scene attributes for every image and
+<br/>explore attribute recognition. Finally, we present an integrated system to extract the 3D
+<br/>structure of the scene and objects depicted in an image.
+<br/>Keywords: SUN database, basic level scene understanding, scene recognition, scene attributes, geometry
+<br/>recognition, 3D context
+<br/>1. INTRODUCTION
+<br/>The ability to understand a 3D scene depicted in a static 2D image
+<br/>goes to the very heart of the computer vision problem. By “scene”
+<br/>we mean a place in which a human can act within or navigate.
+<br/>What does it mean to understand a scene? There is no univer-
+<br/>sal answer as it heavily depends on the task involved, and this
+<br/>seemingly simple question hides a lot of complexity.
+<br/>The dominant view in the current computer vision literature
+<br/>is to name the scene and objects present in an image. However,
+<br/>this level of understanding is rather superficial. If we can reason
+<br/>about a larger variety of semantic properties and structures of
+<br/>scenes it will enable richer applications. Furthermore, working on
+<br/>an over-simplified task may distract us from exploiting the natu-
+<br/>ral structures of the problem (e.g., relationships between objects
+<br/>and 3d surfaces or the relationship between scene attributes and
+<br/>object presence), which may be critical for a complete scene
+<br/>understanding solution.
+<br/>What is the ultimate goal of computational scene under-
+<br/>standing? One goal might be to pass the Turing test for scene
+<br/>understanding: Given an image depicting a static scene, a human
+<br/>judge will ask a human or a machine questions about the picture.
+<br/>If the judge cannot reliably tell the machine from the human, the
+<br/>machine is said to have passed the test. This task is beyond the
+<br/>current state-of-the-art as humans could ask a huge variety of
+<br/>meaningful visual questions about an image, e.g., Is it safe to cross
+<br/>this road? Who ate the last cupcake? Is this a fun place to vacation?
+<br/>Are these people frustrated? Where can I set these groceries? etc.
+<br/>Therefore, we propose a set of goals that are suitable for the
+<br/>current state of research in computer vision that are not too
+<br/>simplistic nor challenging and lead to a natural representation of
+<br/>scenes. Based on these considerations, we define the task of scene
+<br/>understanding as predicting the scene category, scene attributes,
+<br/>the 3D enclosure of the space, and all the objects in the images.
+<br/>For each object, we want to know its category and 3D bound-
+<br/>ing box, as well as its 3D orientation relative to the scene. As an
+<br/>image is a viewer-centric observation of the space, we also want
+<br/>to recover the camera parameters, such as observer viewpoint
+<br/>and field of view. We call this taskbasic level scene understand-
+<br/>ing, with analogy to basic level in cognitive categorization (Rosch,
+<br/>1978). It has practical applications for providing sufficient infor-
+<br/>mation for simple interaction with the scene, such as navigation
+<br/>and object manipulation.
+<br/>1.1. OUTLINE
+<br/>In this paper we discuss several aspects of basic level scene under-
+<br/>standing. First, we quickly review our recent work on categorical
+<br/>(section 2) and attribute-based scene representations (section 3).
+<br/>Finally, we go into greater detail about novel work in 3d scene
+<br/>understanding using structured learning to simultaneously rea-
+<br/>son about many aspects of scenes (section 4).
+<br/>Supporting these research efforts is the Scene UNderstanding
+<br/>(SUN) database. By modern standards, the SUN database is not
+<br/>especially large, containing on the order of 100,000 scenes. But
+<br/>the SUN database is, instead, richly annotated with scene cat-
+<br/>egories, scene attributes, geometric properties, “memorability”
+<br/>measurements (Isola et al., 2011), and object segmentations.
+<br/>There are 326,582 manually segmented objects for the 5650
+<br/>object categories labeled (Barriuso and Torralba, 2012). Object
+<br/>www.frontiersin.org
+<br/>August 2013 | Volume 4 | Article 506 | 1
+</td><td>('40599257', 'Jianxiong Xiao', 'jianxiong xiao')<br/>('12532254', 'James Hays', 'james hays')<br/>('2537592', 'Bryan C. Russell', 'bryan c. russell')<br/>('40541456', 'Genevieve Patterson', 'genevieve patterson')<br/>('1865091', 'Krista A. Ehinger', 'krista a. ehinger')<br/>('38611723', 'Antonio Torralba', 'antonio torralba')<br/>('31735139', 'Aude Oliva', 'aude oliva')<br/>('12532254', 'James Hays', 'james hays')</td><td>e-mail: hays@cs.brown.edu
+</td></tr><tr><td>12d8730da5aab242795bdff17b30b6e0bac82998</td><td>Persistent Evidence of Local Image Properties in Generic ConvNets
+<br/><b>CVAP, KTH (Royal Institute of Technology), Stockholm, SE</b></td><td>('2835963', 'Ali Sharif Razavian', 'ali sharif razavian')<br/>('2622491', 'Hossein Azizpour', 'hossein azizpour')<br/>('1801052', 'Atsuto Maki', 'atsuto maki')<br/>('1736906', 'Josephine Sullivan', 'josephine sullivan')<br/>('2484138', 'Carl Henrik Ek', 'carl henrik ek')<br/>('1826607', 'Stefan Carlsson', 'stefan carlsson')</td><td>{razavian,azizpour,atsuto,sullivan,chek,stefanc}@csc.kth.se
+</td></tr><tr><td>8c13f2900264b5cf65591e65f11e3f4a35408b48</td><td>A GENERIC FACE REPRESENTATION APPROACH FOR
+<br/>LOCAL APPEARANCE BASED FACE VERIFICATION
+<br/>Interactive Systems Labs, Universität Karlsruhe (TH)
+<br/>76131 Karlsruhe, Germany
+<br/>web: http://isl.ira.uka.de/face_recognition/
+</td><td>('3025777', 'Hazim Kemal Ekenel', 'hazim kemal ekenel')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{ekenel, stiefel}@ira.uka.de
+</td></tr><tr><td>8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf</td><td></td><td></td><td></td></tr><tr><td>8c955f3827a27e92b6858497284a9559d2d0623a</td><td>Buletinul Ştiinţific al Universităţii "Politehnica" din Timişoara
+<br/>Seria ELECTRONICĂ şi TELECOMUNICAŢII
+<br/>TRANSACTIONS on ELECTRONICS and COMMUNICATIONS
+<br/>Tom 53(67), Fascicola 1-2, 2008
+<br/>Facial Expression Recognition under Noisy Environment
+<br/>Using Gabor Filters
+</td><td>('2336758', 'Ioan Buciu', 'ioan buciu')<br/>('2526319', 'I. Nafornita', 'i. nafornita')<br/>('29835181', 'I. Pitas', 'i. pitas')</td><td></td></tr><tr><td>8c8525e626c8857a4c6c385de34ffea31e7e41d1</td><td>Cross-domain Image Retrieval with a Dual Attribute-aware Ranking Network
+<br/><b>National University of Singapore, Singapore</b><br/>2IBM Research
+</td><td>('1753492', 'Junshi Huang', 'junshi huang')<br/>('35370244', 'Qiang Chen', 'qiang chen')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>{a0092558, eleyans}@nus.edu.sg
+<br/>rsferis@us.ibm.com
+<br/>qiangchen@au1.ibm.com
+</td></tr><tr><td>8c66378df977606d332fc3b0047989e890a6ac76</td><td>Hierarchical-PEP Model for Real-world Face Recognition
+<br/><b>Stevens Institute of Technology</b><br/>Pose variation remains one of the major factors adversely affect the accuracy
+<br/>of real-world face recognition systems. The same face in different poses
+<br/>can look drastically different to each other. Belhumeur et al. [1] empiri-
+<br/>cally demonstrate that frontal faces can be projected to a low-dimensional
+<br/>subspace invariant to variation in illumination and facial expressions. This
+<br/>observation highlights the importance of addressing pose variation because
+<br/>it can greatly help relieve the adverse effects of the other visual variations.
+<br/>A set of methods build pose-invariant face representations by locating
+<br/>the facial landmarks. For example, Chen et al. [2] concatenate dense fea-
+<br/>tures around the facial landmarks to build the face representation. The pose-
+<br/>invariance is achieved in this way, because it always extracts features from
+<br/>the face part surrounded around the facial landmarks regardless of their loca-
+<br/>tions in the image. The elastic matching methods [5] generalize this design
+<br/>with a probabilistic elastic part (PEP) model unsupervisedly learned from
+<br/>face image patches.
+<br/>While this procedure – locating the face parts and stacking the face part
+<br/>features to build face representation – is empirically demonstrated to be ef-
+<br/>fective by both Chen et al. [2] and Li et al. [5], we argue that directly de-
+<br/>scribing the face parts with naive dense extraction of low-level features may
+<br/>not be optimal.
+<br/>In this work, we propose to build a better face part model to construct
+<br/>an improved face representation. Inspired by the probabilistic elastic part
+<br/>(PEP) model and the success of the deep hierarchical architecture in a num-
+<br/>ber of visual tasks, we propose the Hierarchical-PEP model to approach the
+<br/>unconstrained face recognition problem.
+<br/>As shown in Figure 1, we apply the PEP model hierarchically to decom-
+<br/>pose a face image into face parts at different levels of details to build pose-
+<br/>invariant part-based face representations. Following the hierarchy from bottom-
+<br/>up, we stack the face part representations at each layer, discriminatively re-
+<br/>duce its dimensionality, and hence aggregate the face part representations
+<br/>layer-by-layer to build a compact and invariant face representation. The
+<br/>Hierarchical-PEP model exploits the fine-grained structures of the face parts
+<br/>at different levels of details to address the pose variations. It is also guided
+<br/>by supervised information in constructing the face part/face representations.
+<br/>We empirically verify the Hierarchical-PEP model on two public bench-
+<br/>marks and a face recognition challenge for image-based and video-based
+<br/>face verification. The state-of-the-art performance demonstrates the poten-
+<br/>tial of our method. We show the performance comparison on the YouTube
+<br/>faces dataset [9] in Table 1.
+<br/>Table 1: Performance comparison on YouTube Faces dataset under the re-
+<br/>stricted with no outside data protocol.
+<br/>Algorithm
+<br/>MBGS [9]
+<br/>MBGS+SVM- [8]
+<br/>STFRD+PMML [10]
+<br/>VF2 [7]
+<br/>DDML (combined) [3]
+<br/>Eigen-PEP [6]
+<br/>LM3L [4]
+<br/>Hierarchical-PEP (layers fusion)
+<br/>Accuracy ± Error(%)
+<br/>76.4± 1.8
+<br/>78.9± 1.9
+<br/>79.5± 2.5
+<br/>84.7± 1.4
+<br/>82.3± 1.5
+<br/>84.8± 1.4
+<br/>81.3± 1.2
+<br/>87.00± 1.50
+<br/>[1] Peter N. Belhumeur, Jo ˜ao P. Hespanha, and David J. Kriegman. Eigen-
+<br/>faces vs. Fisherfaces: Recognition using class specific linear projec-
+<br/>tion. PAMI, 1997.
+<br/>[2] Dong Chen, Xudong Cao, Fang Wen, and Jian Sun. Blessing of di-
+<br/>mensionality: High dimensional feature and its efficient compression
+<br/>for face verification. In CVPR, 2013.
+<br/>[3] Junlin Hu, Jiwen Lu, and Yap-Peng Tan. Discriminative deep metric
+<br/>learning for face verification in the wild. In CVPR, 2014.
+<br/>[4] Junlin Hu, Jiwen Lu, Junsong Yuan, and Yap-Peng Tan. Large margin
+<br/>multi-metric learning for face and kinship verification in the wild. In
+<br/>ACCV, 2014.
+<br/>Yang. Probabilistic elastic matching for pose variant face verification.
+<br/>In CVPR, 2013.
+<br/>Eigen-pep for video face recognition. In ACCV, 2014.
+<br/>[7] O. M. Parkhi, K. Simonyan, A. Vedaldi, and A. Zisserman. A compact
+<br/>and discriminative face track descriptor. In CVPR, 2014.
+<br/>[8] Lior Wolf and Noga Levy. The svm-minus similarity score for video
+<br/>face recognition. In CVPR, 2013.
+<br/>[9] Lior Wolf, Tal Hassner, and Itay Maoz. Face recognition in uncon-
+<br/>strained videos with matched background similarity. In CVPR, 2011.
+<br/>[10] Cui Zhen, Wen Li, Dong Xu, Shiguang Shan, and Xilin Chen. Fus-
+<br/>ing robust face region descriptors via multiple metric learning for face
+<br/>recognition in the wild. In CVPR, 2013.
+<br/>Figure 1: Construction of the face representation with an example 2-layer Hierarchical-PEP model: PCA at layer t keeps dt dimensions.
+</td><td>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('3131569', 'Haoxiang Li', 'haoxiang li')<br/>('1745420', 'Gang Hua', 'gang hua')</td><td></td></tr><tr><td>8c9c8111e18f8798a612e7386e88536dfe26455e</td><td>COMPARING BAYESIAN NETWORKS TO CLASSIFY FACIAL
+<br/>EXPRESSIONS
+<br/><b>Institute of Systems and Robotics</b><br/><b>University of Coimbra, Portugal</b><br/><b>Institute Polythechnic of Leiria, Portugal</b><br/>Jorge Dias
+<br/><b>Institute of Systems and Robotics</b><br/><b>University of Coimbra, Portugal</b><br/><b>Institute of Systems and Robotics</b><br/><b>University of Coimbra, Portugal</b></td><td>('2700157', 'Carlos Simplício', 'carlos simplício')<br/>('40031257', 'José Prado', 'josé prado')</td><td>carlos.simplicio@ipleiria.pt
+<br/>jaugusto@isr.uc.pt
+<br/>jorge@isr.uc.pt
+</td></tr><tr><td>8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa</td><td>Dataset Augmentation for Pose and Lighting
+<br/>Invariant Face Recognition
+<br/><b>Vision Systems, Inc</b><br/>†Systems and Technology Research
+</td><td>('2103732', 'Octavian Biris', 'octavian biris')<br/>('3390731', 'Nate Crosswhite', 'nate crosswhite')<br/>('36067742', 'Jeffrey Byrne', 'jeffrey byrne')<br/>('3453447', 'Joseph L. Mundy', 'joseph l. mundy')</td><td></td></tr><tr><td>8c81705e5e4a1e2068a5bd518adc6955d49ae434</td><td>3D Object Recognition with Enhanced
+<br/>Grassmann Discriminant Analysis
+<br/>Graduate School of Systems and Information Engineering,
+<br/><b>University of Tsukuba, Japan</b></td><td>('9641567', 'Lincon Sales de Souza', 'lincon sales de souza')<br/>('34581814', 'Hideitsu Hino', 'hideitsu hino')<br/>('1770128', 'Kazuhiro Fukui', 'kazuhiro fukui')</td><td>lincons@cvlab.cs.tsukuba.ac.jp, {hinohide, kfukui}@cs.tsukuba.ac.jp
+</td></tr><tr><td>8cb403c733a5f23aefa6f583a17cf9b972e35c90</td><td>Learning the semantic structure of objects
+<br/>from Web supervision
+<br/>David Novotny1
+<br/>1Visual Geometry Group
+<br/><b>University of Oxford</b><br/>2Computer Vision Group
+<br/>Xerox Research Centre Europe
+</td><td>('2295553', 'Diane Larlus', 'diane larlus')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')</td><td>{david,andrea}@robots.ox.ac.uk
+<br/>diane.larlus@xrce.xerox.com
+</td></tr><tr><td>8ccde9d80706a59e606f6e6d48d4260b60ccc736</td><td>RotDCF: Decomposition of Convolutional Filters for
+<br/>Rotation-Equivariant Deep Networks
+<br/><b>Duke University</b><br/><b>Duke University</b></td><td>('1823644', 'Xiuyuan Cheng', 'xiuyuan cheng')<br/>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('1699339', 'Guillermo Sapiro', 'guillermo sapiro')</td><td></td></tr><tr><td>8c6b9c9c26ead75ce549a57c4fd0a12b46142848</td><td>Facial expression recognition using shape and
+<br/>texture information
+<br/>I. Kotsia1 and I. Pitas1
+<br/><b>Aristotle University of Thessaloniki</b><br/>Department of Informatics
+<br/>Box 451 54124
+<br/>Thessaloniki, Greece
+<br/>Summary. A novel method based on shape and texture information is proposed in
+<br/>this paper for facial expression recognition from video sequences. The Discriminant
+<br/>Non-negative Matrix Factorization (DNMF) algorithm is applied at the image cor-
+<br/>responding to the greatest intensity of the facial expression (last frame of the video
+<br/>sequence), extracting that way the texture information. A Support Vector Machines
+<br/>(SVMs) system is used for the classi(cid:12)cation of the shape information derived from
+<br/>tracking the Candide grid over the video sequence. The shape information consists
+<br/>of the di(cid:11)erences of the node coordinates between the (cid:12)rst (neutral) and last (fully
+<br/>expressed facial expression) video frame. Subsequently, fusion of texture and shape
+<br/>information obtained is performed using Radial Basis Function (RBF) Neural Net-
+<br/>works (NNs). The accuracy achieved is equal to 98,2% when recognizing the six
+<br/>basic facial expressions.
+<br/>1.1 Introduction
+<br/>During the past two decades, many studies regarding facial expression recog-
+<br/>nition, which plays a vital role in human centered interfaces, have been
+<br/>conducted. Psychologists have de(cid:12)ned the following basic facial expressions:
+<br/>anger, disgust, fear, happiness, sadness and surprise [?]. A set of muscle move-
+<br/>ments, known as Action Units, was created. These movements form the so
+<br/>called F acial Action Coding System (F ACS) [?]. A survey on auto-
+<br/>matic facial expression recognition can be found in [?].
+<br/>In the current paper, a novel method for video based facial expression
+<br/>recognition by fusing texture and shape information is proposed. The texture
+<br/>information is obtained by applying the DNMF algorithm [?] on the last
+<br/>frame of the video sequence, i.e. the one that corresponds to the greatest
+<br/>intensity of the facial expression depicted. The shape information is calculated
+<br/>as the di(cid:11)erence of Candide facial model grid node coordinates between the
+<br/>(cid:12)rst and the last frame of a video sequence [?]. The decision made regarding
+</td><td></td><td>pitas@aiia.csd.auth.gr
+</td></tr><tr><td>8ce9b7b52d05701d5ef4a573095db66ce60a7e1c</td><td>Structured Sparse Subspace Clustering: A Joint
+<br/>Affinity Learning and Subspace Clustering
+<br/>Framework
+</td><td>('9171002', 'Chun-Guang Li', 'chun-guang li')<br/>('1878841', 'Chong You', 'chong you')</td><td></td></tr><tr><td>8cb6daba2cb1e208e809633133adfee0183b8dd2</td><td>Know Before You Do: Anticipating Maneuvers
+<br/>via Learning Temporal Driving Models
+<br/><b>Cornell University and Stanford University</b></td><td>('1726066', 'Ashesh Jain', 'ashesh jain')<br/>('3282281', 'Bharad Raghavan', 'bharad raghavan')<br/>('1681995', 'Ashutosh Saxena', 'ashutosh saxena')</td><td>{ashesh,hema,asaxena}@cs.cornell.edu {bharad,shanesoh}@stanford.edu
+</td></tr><tr><td>8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82</td><td>Article
+<br/>Gated Convolutional Neural Network for Semantic
+<br/>Segmentation in High-Resolution Images
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</b><br/><b>University of Chinese Academy of Sciences, Beijing 101408, China</b><br/>Academic Editors: Qi Wang, Xiaofeng Li and Prasad S. Thenkabail
+<br/>Received: 2 April 2017; Accepted: 1 May 2017; Published: 5 May 2017
+</td><td>('2206625', 'Hongzhen Wang', 'hongzhen wang')<br/>('1738352', 'Ying Wang', 'ying wang')<br/>('1737486', 'Qian Zhang', 'qian zhang')<br/>('1683738', 'Shiming Xiang', 'shiming xiang')<br/>('3364363', 'Chunhong Pan', 'chunhong pan')</td><td>95 Zhongguancun East Road, Beijing 100190, China; hongzhen.wang@nlpr.ia.ac.cn (H.W.);
+<br/>ywang@nlpr.ia.ac.cn (Y.W.); chpan@nlpr.ia.ac.cn (C.P.)
+<br/>3 Alibaba Group, Beijing 100102, China; zhangqiancsuia@163.com
+<br/>* Correspondence: smxiang@nlpr.ia.ac.cn; Tel.: +86-136-7118-9070
+</td></tr><tr><td>8c6c0783d90e4591a407a239bf6684960b72f34e</td><td>SESSION
+<br/>KNOWLEDGE ENGINEERING AND
+<br/>MANAGEMENT + KNOWLEDGE ACQUISITION
+<br/>Chair(s)
+<br/>TBA
+<br/>Int'l Conf. Information and Knowledge Engineering | IKE'13 |1 </td><td></td><td></td></tr><tr><td>8cb55413f1c5b6bda943697bba1dc0f8fc880d28</td><td>Video-based Face Recognition on Real-World Data
+<br/>Hazım K. Ekenel
+<br/>Interactive System Labs
+<br/><b>University of Karlsruhe, Germany</b></td><td>('1842921', 'Johannes Stallkamp', 'johannes stallkamp')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>{jstallkamp,ekenel,stiefel}@ira.uka.de
+</td></tr><tr><td>8cc07ae9510854ec6e79190cc150f9f1fe98a238</td><td>Article
+<br/>Using Deep Learning to Challenge Safety Standard
+<br/>for Highly Autonomous Machines in Agriculture
+<br/><b>Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark</b><br/>† These authors contributed equally to this work.
+<br/>Academic Editors: Francisco Rovira-Más and Gonzalo Pajares Martinsanz
+<br/>Received: 18 December 2015; Accepted: 2 February 2016; Published: 15 February 2016
+</td><td>('32688812', 'Kim Arild Steen', 'kim arild steen')<br/>('2139204', 'Peter Christiansen', 'peter christiansen')<br/>('2550309', 'Henrik Karstoft', 'henrik karstoft')</td><td>pech@eng.au.dk (P.C.); hka@eng.au.dk (H.K.); rnj@eng.au.dk (R.N.J.)
+<br/>* Correspondence: kim.steen@eng.au.dk; Tel.: +45-3116-8628
+</td></tr><tr><td>8509abbde2f4b42dc26a45cafddcccb2d370712f</td><td>Improving precision and recall of face recognition in SIPP with combination of
+<br/>modified mean search and LSH
+<br/>Xihua.Li
+</td><td></td><td>lixihua9@126.com
+</td></tr><tr><td>855bfc17e90ec1b240efba9100fb760c068a8efa</td><td></td><td></td><td></td></tr><tr><td>858ddff549ae0a3094c747fb1f26aa72821374ec</td><td>Survey on RGB, 3D, Thermal, and Multimodal
+<br/>Approaches for Facial Expression Recognition:
+<br/>History, Trends, and Affect-related Applications
+</td><td>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('7855312', 'Sergio Escalera', 'sergio escalera')</td><td></td></tr><tr><td>85041e48b51a2c498f22850ce7228df4e2263372</td><td>Subspace Regression: Predicting
+<br/>a Subspace from One Sample
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/>‡ Electrical & Controls Integration Lab, General Motors R&D
+</td><td>('34299925', 'Minyoung Kim', 'minyoung kim')</td><td></td></tr><tr><td>85fd2bda5eb3afe68a5a78c30297064aec1361f6</td><td>702003 PSSXXX10.1177/0956797617702003Carr et al.Are You Smiling, or Have I Seen You Before?
+<br/>research-article2017
+<br/>Research Article
+<br/>Are You Smiling, or Have I Seen You
+<br/>Before? Familiarity Makes Faces Look
+<br/>Happier
+<br/>2017, Vol. 28(8) 1087 –1102
+<br/>© The Author(s) 2017
+<br/>Reprints and permissions:
+<br/>sagepub.com/journalsPermissions.nav
+<br/>DOI: 10.1177/0956797617702003
+<br/>https://doi.org/10.1177/0956797617702003
+<br/>www.psychologicalscience.org/PS
+<br/><b>Columbia Business School, University of California, San Diego</b><br/><b>Behavioural Science Group, Warwick Business School, University of Warwick; and 4Faculty of Psychology</b><br/><b>SWPS University of Social Sciences and Humanities</b></td><td>('5907729', 'Evan W. Carr', 'evan w. carr')<br/>('3122131', 'Piotr Winkielman', 'piotr winkielman')</td><td></td></tr><tr><td>857ad04fca2740b016f0066b152bd1fa1171483f</td><td>Sample Images can be Independently Restored from
+<br/> Face Recognition Templates
+<br/><b>School of Information Technology and Engineering, University of Ottawa, Ontario, Canada</b><br/>are being piloted or implemented at airports, for
+<br/>government identification systems such as passports
+<br/>and drivers licenses, and in surveillance applications.
+<br/>In this paper, we consider the identifiability of stored
+<br/>biometric
+<br/>information, and
+<br/>for
+<br/>biometric privacy and security.
+<br/>implications
+<br/>its
+<br/>Biometric authentication is typically performed by
+<br/>a sophisticated software application, which manages
+<br/>the user interface and database, and interacts with a
+<br/>vendor specific, proprietary biometric algorithm.
+<br/>Algorithms undertake the following processing steps:
+<br/>1) acquisition of a biometric sample image, 2)
+<br/>conversion of the sample image to a biometric
+<br/>template, 3) comparison of the new (or "live")
+<br/>template to previously stored templates, to calculate a
+<br/>match score. High match scores indicate a likelihood
+<br/>that the corresponding images are from the same
+<br/>individual. The biometric template is a (typically
+<br/>vendor specific) compact digital representation of the
+<br/>essential features of the sample image. Biometric
+<br/>algorithm vendors have uniformly claimed that it is
+<br/>impossible or infeasible to recreate the image from the
+<br/>template. [2, 3, 4, 7] These claims are supported by: 1)
+<br/>the template records features (such as fingerprint
+<br/>minutiae) and not image primitives, 2) templates are
+<br/>typically calculated using only a small portion of the
+<br/>image, 3) templates are small − a few hundred bytes −
+<br/>much smaller than the sample image, and 4) the
+<br/>proprietary nature of
+<br/>the storage format makes
+<br/>templates infeasible to "hack". For these reasons,
+<br/>biometric templates are considered to be effectively
+<br/>non-identifiable data, much like a password hash [7].
+<br/>In fact, these arguments are not valid: this paper
+<br/>demonstrates a simple algorithm to recreate sample
+<br/>images from templates using only match score results.
+<br/>2. METHODS
+<br/>A software application (figure 1) was designed with
+<br/>the goal of recreating a face image of a specific person
+<br/>in a face recognition database. The application has
+<br/>local access to a database of face images, and has
+<br/>network access to a Face Recognition Server (FRS)
+</td><td>('2478519', 'Andy Adler', 'andy adler')</td><td>aadler@uottawa.ca
+</td></tr><tr><td>858901405086056361f8f1839c2f3d65fc86a748</td><td>ON TENSOR TUCKER DECOMPOSITION: THE CASE FOR AN
+<br/>ADJUSTABLE CORE SIZE
+</td><td>('2424633', 'BILIAN CHEN', 'bilian chen')<br/>('1792785', 'ZHENING LI', 'zhening li')<br/>('1789588', 'SHUZHONG ZHANG', 'shuzhong zhang')</td><td></td></tr><tr><td>85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille :
+<br/>France (2008)"
+</td><td></td><td></td></tr><tr><td>858b51a8a8aa082732e9c7fbbd1ea9df9c76b013</td><td>Can Computer Vision Problems Benefit from
+<br/>Structured Hierarchical Classification?
+<br/>Sandor Szedmak2
+<br/><b>INTELSIG, Monte ore Institute, University of Li`ege, Belgium</b><br/><b>Intelligent and Interactive Systems, Institute of Computer Science, University of</b><br/>Innsbruck, Austria
+</td><td>('3104165', 'Thomas Hoyoux', 'thomas hoyoux')<br/>('1772389', 'Justus H. Piater', 'justus h. piater')</td><td></td></tr><tr><td>856317f27248cdb20226eaae599e46de628fb696</td><td>A Method Based on Convex Cone Model for
+<br/>Image-Set Classification with CNN Features
+<br/><b>Graduate School of Systems and Information Engineering, University of Tsukuba</b><br/>1-1-1 Tennodai, Tsukuba, Ibaraki, 305-8573, Japan
+</td><td>('46230115', 'Naoya Sogi', 'naoya sogi')<br/>('2334316', 'Taku Nakayama', 'taku nakayama')<br/>('1770128', 'Kazuhiro Fukui', 'kazuhiro fukui')</td><td>Email: {sogi, nakayama}@cvlab.cs.tsukuba.ac.jp, kfukui@cs.tsukuba.ac.jp
+</td></tr><tr><td>8518b501425f2975ea6dcbf1e693d41e73d0b0af</td><td>Relative Hidden Markov Models for Evaluating Motion Skills
+<br/>Computer Science and Engineering
+<br/>Arizona State Univerisity, Tempe, AZ 85281
+</td><td>('1689161', 'Qiang Zhang', 'qiang zhang')<br/>('2913552', 'Baoxin Li', 'baoxin li')</td><td>qzhang53,baoxin.li@asu.edu
+</td></tr><tr><td>855184c789bca7a56bb223089516d1358823db0b</td><td>Automatic Procedure to Fix Closed-Eyes Image
+<br/><b>University of California, Berkeley</b><br/>Figure 1: Pipeline to Fix Closed-Eyes Image
+</td><td>('31781046', 'Hung Vu', 'hung vu')</td><td></td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>Learning Face Representation from Scratch
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences (CASIA</b></td><td>('1716143', 'Dong Yi', 'dong yi')<br/>('1718623', 'Zhen Lei', 'zhen lei')<br/>('40397682', 'Shengcai Liao', 'shengcai liao')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>dong.yi, zlei, scliao, szli@nlpr.ia.ac.cn
+</td></tr><tr><td>85639cefb8f8deab7017ce92717674d6178d43cc</td><td>Automatic Analysis of Spontaneous Facial Behavior:
+<br/>A Final Project Report
+<br/>(UCSD MPLab TR 2001.08, October 31 2001)
+<br/><b>cid:1)Institute for Neural Computation</b><br/>(cid:2)Department of Cognitive Science
+<br/><b>University of California, San Diego</b><br/><b>cid:3)The Salk Institute and Howard Hughes Medical Institute</b></td><td>('2218905', 'Marian S. Bartlett', 'marian s. bartlett')<br/>('33937541', 'Bjorn Braathen', 'bjorn braathen')<br/>('2039025', 'Ian Fasel', 'ian fasel')<br/>('1714528', 'Terrence J. Sejnowski', 'terrence j. sejnowski')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td></td></tr><tr><td>854dbb4a0048007a49df84e3f56124d387588d99</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Spatial-Temporal Recurrent Neural Network for
+<br/>Emotion Recognition
+</td><td>('38144094', 'Tong Zhang', 'tong zhang')<br/>('40608983', 'Wenming Zheng', 'wenming zheng')<br/>('10338111', 'Zhen Cui', 'zhen cui')<br/>('2378869', 'Yuan Zong', 'yuan zong')<br/>('1678662', 'Yang Li', 'yang li')</td><td></td></tr><tr><td>85674b1b6007634f362cbe9b921912b697c0a32c</td><td>Optimizing Facial Landmark Detection by
+<br/>Facial Attribute Learning
+<br/><b>The Chinese University of Hong Kong</b></td><td>('3152448', 'Zhanpeng Zhang', 'zhanpeng zhang')<br/>('1693209', 'Ping Luo', 'ping luo')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td></td></tr><tr><td>1d21e5beef23eecff6fff7d4edc16247f0fd984a</td><td>Face Recognition from Video using the Generic
+<br/>Shape-Illumination Manifold
+<br/>Department of Engineering
+<br/><b>University of Cambridge</b><br/>Cambridge, CB2 1PZ, UK
+</td><td>('1745672', 'Roberto Cipolla', 'roberto cipolla')</td><td>{oa214,cipolla}@eng.cam.ac.uk
+</td></tr><tr><td>1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b</td><td></td><td></td><td></td></tr><tr><td>1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9</td><td>1860
+<br/>The Hidden Sides of Names—Face Modeling
+<br/>with First Name Attributes
+</td><td>('2896700', 'Huizhong Chen', 'huizhong chen')<br/>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1739786', 'Bernd Girod', 'bernd girod')</td><td></td></tr><tr><td>1d846934503e2bd7b8ea63b2eafe00e29507f06a</td><td></td><td></td><td></td></tr><tr><td>1d19c6857e798943cd0ecd110a7a0d514c671fec</td><td>Do Deep Neural Networks Learn Facial Action Units
+<br/>When Doing Expression Recognition?
+<br/><b>Beckman Institute for Advanced Science and Technology</b><br/><b>University of Illinois at Urbana-Champaign</b></td><td>('1911177', 'Pooya Khorrami', 'pooya khorrami')<br/>('40470211', 'Tom Le Paine', 'tom le paine')<br/>('1739208', 'Thomas S. Huang', 'thomas s. huang')</td><td>{pkhorra2,paine1,t-huang1}@illinois.edu
+</td></tr><tr><td>1d1a7ef193b958f9074f4f236060a5f5e7642fc1</td><td>Int'l Conf. IP, Comp. Vision, and Pattern Recognition | IPCV'13 \
+<br/>675
+<br/>Ensemble of Patterns of Oriented Edge Magnitudes
+<br/>Descriptors For Face Recognition
+<br/><b>Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA</b><br/>faces; and 3) face tagging, which is a particular case of face
+<br/>identification.
+</td><td>('1804258', 'Loris Nanni', 'loris nanni')<br/>('1707759', 'Alessandra Lumini', 'alessandra lumini')<br/>('2292370', 'Sheryl Brahnam', 'sheryl brahnam')</td><td>*DEI, University o f Padua, viale Gradenigo 6, Padua, Italy, {loris.nanni, mauro.migliardi}@unipd.it;
+<br/>2DEI, Universita di Bologna, Via Venezia 52, 47521 Cesena, Italy, alessandra.lumini@ unibo.it;
+<br/>sbrahnam@missouristate.edu
+</td></tr><tr><td>1d696a1beb42515ab16f3a9f6f72584a41492a03</td><td>Deeply learned face representations are sparse, selective, and robust
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1681656', 'Yi Sun', 'yi sun')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>sy011@ie.cuhk.edu.hk
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>1d1caaa2312390260f7d20ad5f1736099818d358</td><td>Resource-Allocating Codebook for Patch-based Face Recognition
+<br/>School of Electronics and Computer Science
+<br/><b>University of Southampton, SO17 1BJ, UK</b></td><td>('34672932', 'Amirthalingam Ramanan', 'amirthalingam ramanan')<br/>('1697360', 'Mahesan Niranjan', 'mahesan niranjan')</td><td>{ar07r,mn}@ecs.soton.ac.uk
+</td></tr><tr><td>1dc241ee162db246882f366644171c11f7aed96d</td><td>Deep Action- and Context-Aware Sequence Learning for Activity Recognition
+<br/>and Anticipation
+<br/><b>Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL</b></td><td>('35441838', 'Fatemehsadat Saleh', 'fatemehsadat saleh')<br/>('1688071', 'Basura Fernando', 'basura fernando')<br/>('2862871', 'Mathieu Salzmann', 'mathieu salzmann')<br/>('2370776', 'Lars Petersson', 'lars petersson')<br/>('34234277', 'Lars Andersson', 'lars andersson')</td><td>firstname.lastname@data61.csiro.au, basura.fernando@anu.edu.au, mathieu.salzmann@epfl.ch
+</td></tr><tr><td>1d0128b9f96f4c11c034d41581f23eb4b4dd7780</td><td>Automatic Construction Of Robust Spherical Harmonic Subspaces
+<br/><b>Imperial College London</b></td><td>('2796644', 'Patrick Snape', 'patrick snape')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')</td><td>{p.snape,i.panagakis,s.zafeiriou}@imperial.ac.uk
+</td></tr><tr><td>1d3dd9aba79a53390317ec1e0b7cd742cba43132</td><td>A Maximum Entropy Feature Descriptor for Age Invariant Face Recognition
+<br/>(cid:31)
+<br/>1Shenzhen Key Lab of Computer Vision and Pattern Recognition
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China</b><br/><b>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of</b><br/>Technology, Sydney, NSW 2007, Australia
+<br/><b>the Chinese University of Hong Kong</b><br/>4Media Lab, Huawei Technologies Co. Ltd., China
+<br/><b>Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences</b></td><td>('2856494', 'Dihong Gong', 'dihong gong')<br/>('7137861', 'Jianzhuang Liu', 'jianzhuang liu')<br/>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('1720243', 'Xuelong Li', 'xuelong li')</td><td>dh.gong@siat.ac.cn
+<br/>zhifeng.li@siat.ac.cn
+<br/>dacheng.tao@uts.edu.au
+<br/>liu.jianzhuang@huawei.com
+<br/>xuelong_li@opt.ac.cn
+</td></tr><tr><td>1d0dd20b9220d5c2e697888e23a8d9163c7c814b</td><td>NEGREL ET AL.: BOOSTED METRIC LEARNING FOR FACE RETRIEVAL
+<br/>Boosted Metric Learning for Efficient
+<br/>Identity-Based Face Retrieval
+<br/>Frederic Jurie
+<br/>GREYC, CNRS UMR 6072, ENSICAEN
+<br/>Université de Caen Basse-Normandie
+<br/>France
+</td><td>('2838835', 'Romain Negrel', 'romain negrel')<br/>('2504258', 'Alexis Lechervy', 'alexis lechervy')</td><td>romain.negrel@unicaen.fr
+<br/>alexis.lechervy@unicaen.fr
+<br/>frederic.jurie@unicaen.fr
+</td></tr><tr><td>1d5aad4f7fae6d414ffb212cec1f7ac876de48bf</td><td>Face Retriever: Pre-filtering the Gallery via Deep Neural Net
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing, MI 48824, U.S.A</b></td><td>('7496032', 'Dayong Wang', 'dayong wang')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td>{dywang, jain}@msu.edu
+</td></tr><tr><td>1db23a0547700ca233aef9cfae2081cd8c5a04d7</td><td>www.ijecs.in
+<br/>International Journal Of Engineering And Computer Science ISSN:2319-7242
+<br/>Volume 4 Issue 5 May 2015, Page No. 11945-11951
+<br/>Comparative study and evaluation of various data classification
+<br/>techniques in data mining
+<br/>1Research scholar
+<br/>Department of computer science
+<br/><b>Raipur institute of technology</b><br/>Raipur, India
+<br/> 2Asst. professor
+<br/> Department of computer science
+<br/><b>Raipur institute of technology</b><br/> Raipur, India
+</td><td>('1977125', 'Vivek Verma', 'vivek verma')</td><td>E-mail: vivekverma.exe@gmail.com
+</td></tr><tr><td>1d776bfe627f1a051099997114ba04678c45f0f5</td><td>Deployment of Customized Deep Learning based
+<br/>Video Analytics On Surveillance Cameras
+<br/>AitoeLabs (www.aitoelabs.com)
+</td><td>('46175439', 'Pratik Dubal', 'pratik dubal')<br/>('22549601', 'Rohan Mahadev', 'rohan mahadev')<br/>('9745898', 'Suraj Kothawade', 'suraj kothawade')<br/>('46208440', 'Kunal Dargan', 'kunal dargan')</td><td></td></tr><tr><td>1d97735bb0f0434dde552a96e1844b064af08f62</td><td>Weber Binary Pattern and Weber Ternary Pattern
+<br/>for Illumination-Robust Face Recognition
+<br/><b>Tsinghua University, China</b><br/>Shenzhen Key Laboratory of Information Science and Technology, Guangdong, China
+</td><td>('35160104', 'Zuodong Yang', 'zuodong yang')<br/>('2312541', 'Yinyan Jiang', 'yinyan jiang')<br/>('40398990', 'Yong Wu', 'yong wu')<br/>('2265693', 'Zongqing Lu', 'zongqing lu')<br/>('1718891', 'Weifeng Li', 'weifeng li')<br/>('2883861', 'Qingmin Liao', 'qingmin liao')</td><td>(cid:3) E-mail: yangzd13@mails.tsinghua.edu.cn
+<br/>y E-mail: Li.Weifeng@sz.tsinghua.edu.cn
+</td></tr><tr><td>1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb</td><td></td><td></td><td></td></tr><tr><td>1dff919e51c262c22630955972968f38ba385d8a</td><td>Toward an Affect-Sensitive Multimodal
+<br/>Human–Computer Interaction
+<br/>Invited Paper
+<br/>The ability to recognize affective states of a person we are com-
+<br/>municating with is the core of emotional intelligence. Emotional
+<br/>intelligenceisa facet of human intelligence thathas been argued to be
+<br/>indispensable and perhaps the most important for successful inter-
+<br/>personal social interaction. This paper argues that next-generation
+<br/>human–computer interaction (HCI) designs need to include the
+<br/>essence of emotional intelligence—the ability to recognize a user’s
+<br/>affective states—in order to become more human-like, more effec-
+<br/>tive, and more efficient. Affective arousal modulates all nonverbal
+<br/>communicative cues (facial expressions, body movements, and vocal
+<br/>and physiological reactions). In a face-to-face interaction, humans
+<br/>detect and interpret those interactive signals of their communicator
+<br/>with little or no effort. Yet design and development of an automated
+<br/>system that accomplishes these tasks is rather difficult. This paper
+<br/>surveys the past work in solving these problems by a computer
+<br/>and provides a set of recommendations for developing the first
+<br/>part of an intelligent multimodal HCI—an automatic personalized
+<br/>analyzer of a user’s nonverbal affective feedback.
+<br/>Keywords—Affective computing, affective states, automatic
+<br/>analysis of nonverbal communicative cues, human–computer
+<br/>interaction (HCI), multimodal human–computer
+<br/>interaction,
+<br/>personalized human–computer interaction.
+<br/>I. INTRODUCTION
+<br/>The exploration of how we as human beings react to the
+<br/>world and interact with it and each other remains one of
+<br/>the greatest scientific challenges. Perceiving, learning, and
+<br/>adapting to the world around us are commonly labeled as
+<br/>“intelligent” behavior. But what does it mean being intelli-
+<br/>gent? Is IQ a good measure of human intelligence and the
+<br/>best predictor of somebody’s success in life? There is now
+<br/>growing research in the fields of neuroscience, psychology,
+<br/>and cognitive science which argues that our common view of
+<br/>intelligence is too narrow, ignoring a crucial range of abilities
+<br/>Manuscript received October 25, 2002; revised March 5, 2003. The work
+<br/>of M. Pantic was supported by the Netherlands Organization for Scientific
+<br/>Research (NWO) Grant EW-639.021.202.
+<br/><b>The authors are with the Delft University of Technology, Data and Knowl</b><br/>edge Systems Group, Mediamatics Department, 2600 AJ Delft, The Nether-
+<br/>Digital Object Identifier 10.1109/JPROC.2003.817122
+<br/>that matter immensely to how we do in life. This range
+<br/>of abilities is called emotional intelligence [44], [96] and
+<br/>includes the ability to have, express, and recognize affective
+<br/>states, coupled with the ability to regulate them, employ them
+<br/>for constructive purpose, and skillfully handle the affective
+<br/>arousal of others. The skills of emotional intelligence have
+<br/>been argued to be a better predictor than IQ for measuring
+<br/>aspects of success in life [44], especially in interpersonal
+<br/>communication, and learning and adapting to what
+<br/>is
+<br/>important [10], [96].
+<br/>When it comes to the world of computers, not all of them
+<br/>will need emotional skills and probably none will need all
+<br/>of the skills that humans need. Yet there are situations where
+<br/>the man–machine interaction could be improved by having
+<br/>machines capable of adapting to their users and where the in-
+<br/>formation about how, when, and how important it is to adapt
+<br/>involves information on the user’s affective state. In addition,
+<br/>it seems that people regard computers as social agents with
+<br/>whom “face-to-(inter)face” interaction may be most easy and
+<br/>serviceable [11], [75], [90], [101], [110]. Human–computer
+<br/>interaction (HCI) systems capable of sensing and responding
+<br/>appropriately to the user’s affective feedback are, therefore,
+<br/>likely to be perceived as more natural [73], more efficacious
+<br/>and persuasive [93], and more trustworthy [14], [78].
+<br/>These findings, together with recent advances in sensing,
+<br/>tracking, analyzing, and animating human nonverbal com-
+<br/>municative signals, have produced a surge of interest in
+<br/>affective computing by researchers of advanced HCI. This
+<br/>intriguing new field focuses on computational modeling of
+<br/>human perception of affective states, synthesis/animation of
+<br/>affective expressions, and design of affect-sensitive HCI.
+<br/>Indeed, the first step toward an intelligent HCI having the
+<br/>abilities to sense and respond appropriately to the user’s af-
+<br/>fective feedback is to detect and interpret affective states
+<br/>shown by the user in an automatic way. This paper focuses
+<br/>further on surveying the past work done on solving these
+<br/>problems and providing an advanced HCI with one of the
+<br/>key skills of emotional intelligence: the ability to recognize
+<br/>the user’s nonverbal affective feedback.
+<br/>0018-9219/03$17.00 © 2003 IEEE
+<br/>1370
+<br/>PROCEEDINGS OF THE IEEE, VOL. 91, NO. 9, SEPTEMBER 2003
+</td><td>('1694605', 'MAJA PANTIC', 'maja pantic')</td><td>lands (e-mail: M.Pantic@cs.tudelft.nl; L.J.M.Rothkrantz@cs.tudelft.nl).
+</td></tr><tr><td>1de8f38c35f14a27831130060810cf9471a62b45</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-0989-7
+<br/>A Branch-and-Bound Framework for Unsupervised Common
+<br/>Event Discovery
+<br/>Received: 3 June 2016 / Accepted: 12 January 2017
+<br/>© Springer Science+Business Media New York 2017
+</td><td>('39336289', 'Wen-Sheng Chu', 'wen-sheng chu')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')<br/>('1874236', 'Daniel S. Messinger', 'daniel s. messinger')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td></td></tr><tr><td>1da83903c8d476c64c14d6851c85060411830129</td><td>Iterated Support Vector Machines for Distance
+<br/>Metric Learning
+</td><td>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('6292353', 'Faqiang Wang', 'faqiang wang')<br/>('1698371', 'David Zhang', 'david zhang')<br/>('1737218', 'Liang Lin', 'liang lin')<br/>('2224875', 'Yuchi Huang', 'yuchi huang')<br/>('1803714', 'Deyu Meng', 'deyu meng')<br/>('36685537', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>1d6068631a379adbcff5860ca2311b790df3a70f</td><td></td><td></td><td></td></tr><tr><td>1dacc2f4890431d867a038fd81c111d639cf4d7e</td><td>2016, Vol. 125, No. 2, 310 –321
+<br/>0021-843X/16/$12.00
+<br/>© 2016 American Psychological Association
+<br/>http://dx.doi.org/10.1037/abn0000139
+<br/>Using Social Outcomes to Inform Decision-Making in Schizophrenia:
+<br/>Relationships With Symptoms and Functioning
+<br/>Timothy R. Campellone, Aaron J. Fisher, and Ann M. Kring
+<br/><b>University of California, Berkeley</b><br/>The outcomes of the decisions we make can be used to inform subsequent choices and behavior. We
+<br/>investigated whether and how people with and without schizophrenia use positive and negative social
+<br/>outcomes and emotional displays to inform decisions to place trust in social partners. We also investi-
+<br/>gated the impact of reversals in social partners’ behavior on decisions to trust. Thirty-two people with
+<br/>schizophrenia and 29 control participants completed a task in which they decided how much trust to place
+<br/>in social partners’ showing either a dynamic emotional (smiling, scowling) or neutral display. Interac-
+<br/>tions were predetermined to result in positive (trust reciprocated) or negative (trust abused) outcomes,
+<br/>and we modeled changes in trust decisions over the course of repeated interactions. Compared to
+<br/>controls, people with schizophrenia were less sensitive to positive social outcomes in that they placed less
+<br/>trust in trustworthy social partners during initial interactions. By contrast, people with schizophrenia were
+<br/>more sensitive to negative social outcomes during initial interactions with untrustworthy social partners,
+<br/>placing less trust in these partners compared to controls. People with schizophrenia did not differ from
+<br/>controls in detecting social partner behavior reversals from trustworthy to untrustworthy; however, they
+<br/>had difficulties detecting reversals from untrustworthy to trustworthy. Importantly, decisions to trust
+<br/>were associated with real-world social functioning. We discuss the implications of these findings for
+<br/>understanding social engagement among people with schizophrenia and the development of psychosocial
+<br/>interventions for social functioning.
+<br/>General Scientific Summary
+<br/>People with schizophrenia can have difficulties using decision outcomes to guide subsequent
+<br/>decision-making and behavior. This study extends previous work by showing that people with
+<br/>schizophrenia also have difficulties using social interaction outcomes to guide subsequent social
+<br/>decision-making and behavior. These findings have implications for understanding decreased social
+<br/>networks common among people with schizophrenia.
+<br/>Keywords: schizophrenia, decision-making, social interactions, trust
+<br/>Decision-making is an important part of daily life, with the
+<br/>outcomes of decisions influencing subsequent choices and deci-
+<br/>sions. While prior research has shown that people with schizo-
+<br/>phrenia have difficulty using monetary outcomes to guide subse-
+<br/>quent decisions (Heerey & Gold, 2007; Barch & Dowd, 2010), we
+<br/>know considerably less about whether people with schizophrenia
+<br/>have difficulty using social outcomes to inform decision-making
+<br/>in the context of social interactions. We investigated the extent to
+<br/>Timothy R. Campellone, Aaron J. Fisher, and Ann M. Kring, Depart-
+<br/><b>ment of Psychology, University of California, Berkeley</b><br/><b>Funding was provided by the U.S. National Institutes of Mental</b><br/>Health (Grant 5T32MH089919 to Timothy R. Campellone and Grant
+<br/>1R01MH082890 to Ann M. Kring). We are grateful to Janelle Painter,
+<br/>Erin Moran, and Jasmine Mote for their help in collecting this data. We are
+<br/>also grateful to Stephen Hinshaw for reading a previous version of this
+<br/>article. We would also like to thank all the participants in this study.
+<br/>Correspondence concerning this article should be addressed to Timothy
+<br/><b>R. Campellone, 3210 Tolman Hall, University of California, Berkeley</b><br/>310
+<br/>which people with schizophrenia use social outcomes to inform
+<br/>decision-making, and how this is related to motivation/pleasure
+<br/>negative symptoms and psychosocial functioning. Because social
+<br/>interactions often involve emotion, we also examined whether and
+<br/>how people with schizophrenia use social partners’ emotional
+<br/>displays to guide learning from social outcomes and inform sub-
+<br/>sequent decision-making.
+<br/>Monetary Decision-Making and Reversal Learning
+<br/>in Schizophrenia
+<br/>Studies using reward-learning paradigms with monetary out-
+<br/>comes have consistently shown that compared to controls, people
+<br/>with schizophrenia have difficulty using positive outcomes to
+<br/>inform decision-making (Strauss et al., 2011; Gold et al., 2012).
+<br/>These difficulties are associated with poorer functioning (Somlai,
+<br/>Moustafa, Kéri, Myers, & Gluck, 2011) as well as greater moti-
+<br/>vation/pleasure negative symptoms (Strauss et al., 2011; Gold et
+<br/>al., 2012), which are part of the two-factor solution of negative
+<br/>symptoms and refer to diminished engagement in and/or pleasure
+<br/>derived from social, vocational, and recreational life domains
+<br/>(Kring, Gur, Blanchard, Horan, & Reise, 2013). By contrast,
+</td><td></td><td>Berkeley, CA 94720-1690. E-mail: tcampellone@berkeley.edu
+</td></tr><tr><td>1de690714f143a8eb0d6be35d98390257a3f4a47</td><td>Face Detection Using Spectral Histograms and SVMs
+<br/><b>The Florida State University</b><br/>Tallahassee, FL 32306
+</td><td>('3209925', 'Christopher A. Waring', 'christopher a. waring')<br/>('1800002', 'Xiuwen Liu', 'xiuwen liu')</td><td>chwaring@cs.fsu.edu liux@cs.fsu.edu
+</td></tr><tr><td>1d7df3df839a6aa8f5392310d46b2a89080a3c25</td><td>Large-Margin Softmax Loss for Convolutional Neural Networks
+<br/>Meng Yang4
+<br/><b>School of ECE, Peking University 2School of EIE, South China University of Technology</b><br/><b>Carnegie Mellon University 4College of CS and SE, Shenzhen University</b></td><td>('36326884', 'Weiyang Liu', 'weiyang liu')<br/>('2512949', 'Yandong Wen', 'yandong wen')<br/>('1751019', 'Zhiding Yu', 'zhiding yu')</td><td>WYLIU@PKU.EDU.CN
+<br/>WEN.YANDONG@MAIL.SCUT.EDU.CN
+<br/>YZHIDING@ANDREW.CMU.EDU
+<br/>YANG.MENG@SZU.EDU.CN
+</td></tr><tr><td>1d6c09019149be2dc84b0c067595f782a5d17316</td><td>Encoding Video and Label Priors for Multi-label Video Classification
+<br/>on YouTube-8M dataset
+<br/><b>Seoul National University</b><br/><b>Seoul National University</b><br/><b>Seoul National University</b><br/>SK Telecom Video Tech. Lab
+<br/><b>Seoul National University</b></td><td>('19255603', 'Seil Na', 'seil na')<br/>('7877122', 'Youngjae Yu', 'youngjae yu')<br/>('1693291', 'Sangho Lee', 'sangho lee')<br/>('2077253', 'Jisung Kim', 'jisung kim')<br/>('1743920', 'Gunhee Kim', 'gunhee kim')</td><td>seil.na@vision.snu.ac.kr
+<br/>yj.yu@vision.snu.ac.kr
+<br/>sangho.lee@vision.snu.ac.kr
+<br/>joyful.kim@sk.com
+<br/>gunhee@snu.ac.kr
+</td></tr><tr><td>1d58d83ee4f57351b6f3624ac7e727c944c0eb8d</td><td>Enhanced Local Texture
+<br/>Feature Sets for Face
+<br/>Recognition under Difficult
+<br/>Lighting Conditions
+<br/>INRIA & Laboratoire Jean
+<br/>Kuntzmann,
+<br/>655 avenue de l'Europe, Montbonnot 38330, France
+</td><td>('2248421', 'Xiaoyang Tan', 'xiaoyang tan')<br/>('1756114', 'Bill Triggs', 'bill triggs')</td><td></td></tr><tr><td>1d729693a888a460ee855040f62bdde39ae273af</td><td>Photorealistic Face de-Identification by Aggregating
+<br/>Donors’ Face Components
+<br/>To cite this version:
+<br/>gating Donors’ Face Components. Asian Conference on Computer Vision, Nov 2014, Singapore.
+<br/>pp.1-16, 2014. <hal-01070658>
+<br/>HAL Id: hal-01070658
+<br/>https://hal.archives-ouvertes.fr/hal-01070658
+<br/>Submitted on 2 Oct 2014
+<br/>HAL is a multi-disciplinary open access
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>lished or not. The documents may come from
+<br/>teaching and research institutions in France or
+<br/><b>abroad, or from public or private research centers</b><br/>L’archive ouverte pluridisciplinaire HAL, est
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>scientifiques de niveau recherche, publi´es ou non,
+<br/>´emanant des ´etablissements d’enseignement et de
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+<br/>publics ou priv´es.
+</td><td>('3095534', 'Saleh Mosaddegh', 'saleh mosaddegh')<br/>('3095534', 'Saleh Mosaddegh', 'saleh mosaddegh')</td><td></td></tr><tr><td>1d4c25f9f8f08f5a756d6f472778ab54a7e6129d</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2014): 6.14 | Impact Factor (2014): 4.438
+<br/>An Innovative Mean Approach for Plastic Surgery
+<br/>Face Recognition
+<br/>1 Student of M.E., Department of Electronics & Telecommunication Engineering,
+<br/><b>P. R. Patil College of Engineering, Amravati Maharashtra India</b><br/>2 Assistant Professor, Department of Electronics & Telecommunication Engineering,
+<br/><b>P. R. Patil College of Engineering, Amravati Maharashtra India</b></td><td>('2936550', 'Umesh W. Hore', 'umesh w. hore')</td><td></td></tr><tr><td>71b376dbfa43a62d19ae614c87dd0b5f1312c966</td><td>The Temporal Connection Between Smiles and Blinks
+</td><td>('2048839', 'Laura C. Trutoiu', 'laura c. trutoiu')<br/>('1788773', 'Jessica K. Hodgins', 'jessica k. hodgins')<br/>('1737918', 'Jeffrey F. Cohn', 'jeffrey f. cohn')</td><td></td></tr><tr><td>71b07c537a9e188b850192131bfe31ef206a39a0</td><td>Image and Vision Computing 47 (2016) 3–18
+<br/>Contents lists available at ScienceDirect
+<br/>Image and Vision Computing
+<br/>j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / i m a v i s
+<br/>300 Faces In-The-Wild Challenge: database and results夽,夽夽
+<br/><b>aImperial College London, London, UK</b><br/><b>bUniversity of Nottingham, School of Computer Science, Nottingham, UK</b><br/><b>cFaculty of Electrical Engineering, Mathematics, and Computer Science, University of Twente, The Netherlands</b><br/>A R T I C L E
+<br/>I N F O
+<br/>A B S T R A C T
+<br/>Article history:
+<br/>Received 19 March 2015
+<br/>Received in revised form 2 October 2015
+<br/>Accepted 4 January 2016
+<br/>Available online 25 January 2016
+<br/>Keywords:
+<br/>Facial landmark localization
+<br/>Challenge
+<br/>Semi-automatic annotation tool
+<br/>Facial database
+<br/>Computer Vision has recently witnessed great research advance towards automatic facial points detection.
+<br/>Numerous methodologies have been proposed during the last few years that achieve accurate and efficient
+<br/>performance. However, fair comparison between these methodologies is infeasible mainly due to two issues.
+<br/>(a) Most existing databases, captured under both constrained and unconstrained (in-the-wild) conditions
+<br/>have been annotated using different mark-ups and, in most cases, the accuracy of the annotations is low. (b)
+<br/>Most published works report experimental results using different training/testing sets, different error met-
+<br/>rics and, of course, landmark points with semantically different locations. In this paper, we aim to overcome
+<br/>the aforementioned problems by (a) proposing a semi-automatic annotation technique that was employed
+<br/>to re-annotate most existing facial databases under a unified protocol, and (b) presenting the 300 Faces In-
+<br/>The-Wild Challenge (300-W), the first facial landmark localization challenge that was organized twice, in
+<br/>2013 and 2015. To the best of our knowledge, this is the first effort towards a unified annotation scheme
+<br/>of massive databases and a fair experimental comparison of existing facial landmark localization systems.
+<br/>The images and annotations of the new testing database that was used in the 300-W challenge are available
+<br/>from http://ibug.doc.ic.ac.uk/resources/300-W_IMAVIS/.
+<br/>© 2016 Elsevier B.V. All rights reserved.
+<br/>1. Introduction
+<br/>During the last decades we notice a wealth of scientific research
+<br/>in computer vision for the problem of facial landmark points localiza-
+<br/>tion using visual deformable models. The main reason behind this are
+<br/>the countless applications that the problem has in human-computer
+<br/>interaction and facial expression recognition. Numerous methodolo-
+<br/>gies have been proposed that are shown to achieve great accuracy
+<br/>and efficiency. They can be roughly divided into two categories:
+<br/>generative and discriminative. The generative techniques, which aim
+<br/>to find the parameters that maximize the probability of the test
+<br/><b>image being generated by the model, include Active Appearance</b><br/>Models (AAMs) [1,2], their improved extensions [3–10] and Pictorial
+<br/>夽 The contribution of the first two authors on writing this paper is equal, with
+<br/>various steps needed to run 300-W successfully including data annotation, annotation
+<br/>tool development, and running the experiments.
+<br/>夽夽 This paper has been recommended for acceptance by Richard Bowden, PhD.
+<br/>* Corresponding author.
+<br/>http://dx.doi.org/10.1016/j.imavis.2016.01.002
+<br/>0262-8856/© 2016 Elsevier B.V. All rights reserved.
+<br/>Structures [11–13]. The discriminative techniques can be further
+<br/>divided to those that use discriminative response map functions,
+<br/>such as Active Shape Models (ASMs) [14], Constrained Local Models
+<br/>(CLMs) [15–17] and Deformable Part Models (DPMs) [18], those that
+<br/>learn a cascade of regression functions, such as Supervised Descent
+<br/>Method (SDM) [19] and others [20–22], and, finally, those that
+<br/>employ random forests [23,24].
+<br/>Arguably, the main reason why many researchers of the field
+<br/>focus on the problem of face alignment is the plethora of publicly
+<br/>available annotated facial databases. These databases can be sepa-
+<br/>rated in two major categories: (a) those captured under controlled
+<br/>conditions, e.g. Multi-PIE [25], XM2VTS [26], FRGC-V2 [27], and
+<br/>AR [28], and (b) those captured under totally unconstrained condi-
+<br/>tions (in-the-wild), e.g. LFPW [29], HELEN [30], AFW[18], AFLW[31],
+<br/><b>and IBUG [32]. All of them cover large variations, including different</b><br/>subjects, poses, illumination conditions, expressions and occlusions.
+<br/>However, for most of them, the provided annotations appear to have
+<br/>several limitations. Specifically:
+<br/>• The majority of them provide annotations for a relatively small
+<br/>subset of images.
+</td><td>('3320415', 'Christos Sagonas', 'christos sagonas')<br/>('2788012', 'Epameinondas Antonakos', 'epameinondas antonakos')<br/>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')<br/>('3320415', 'Christos Sagonas', 'christos sagonas')</td><td>E-mail address: c.sagonas@imperial.ac.uk (C. Sagonas).
+</td></tr><tr><td>71fd29c2ae9cc9e4f959268674b6b563c06d9480</td><td>End-to-end 3D shape inverse rendering of different classes
+<br/>of objects from a single input image
+<br/>1Computer Science and Engineering and Information Technology, Shiraz
+<br/><b>university, Shiraz, Iran</b><br/>November 17, 2017
+</td><td>('34649340', 'Shima Kamyab', 'shima kamyab')<br/>('2014752', 'Zohreh Azimifar', 'zohreh azimifar')</td><td></td></tr><tr><td>7142ac9e4d5498037aeb0f459f278fd28dae8048</td><td>Semi-Supervised Learning for Optical Flow
+<br/>with Generative Adversarial Networks
+<br/><b>University of California, Merced</b><br/>2Virginia Tech
+<br/>3Nvidia Research
+</td><td>('2268189', 'Wei-Sheng Lai', 'wei-sheng lai')<br/>('3068086', 'Jia-Bin Huang', 'jia-bin huang')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td>1{wlai24|mhyang}@ucmerced.edu
+<br/>2jbhuang@vt.edu
+</td></tr><tr><td>71f36c8e17a5c080fab31fce1ffea9551fc49e47</td><td>Predicting Failures of Vision Systems
+<br/>1Virginia Tech
+<br/>2Univ. of Texas at Austin
+<br/>3Univ. of Washington
+<br/><b>Carnegie Mellon University</b></td><td>('40409467', 'Peng Zhang', 'peng zhang')<br/>('2537394', 'Jiuling Wang', 'jiuling wang')</td><td>1{zhangp, parikh}@vt.edu
+<br/>2jiuling@utexas.edu
+<br/>3ali@cs.uw.edu
+<br/>4hebert@ri.cmu.edu
+</td></tr><tr><td>7117ed0be436c0291bc6fb6ea6db18de74e2464a</td><td>Under review as a conference paper at ICLR 2017
+<br/>WARPED CONVOLUTIONS: EFFICIENT INVARIANCE TO
+<br/>SPATIAL TRANSFORMATIONS
+<br/>Visual Geometry Group
+<br/><b>University of Oxford</b></td><td>('36478254', 'João F. Henriques', 'joão f. henriques')</td><td>{joao,vedaldi}@robots.ox.ac.uk
+</td></tr><tr><td>71e6a46b32a8163c9eda69e1badcee6348f1f56a</td><td>Visually Interpreting Names as Demographic Attributes
+<br/>by Exploiting Click-Through Data
+<br/><b>National Taiwan University, Taipei, Taiwan</b><br/><b>FX Palo Alto Laboratory, Inc., California, USA</b></td><td>('35081710', 'Yan-Ying Chen', 'yan-ying chen')<br/>('1692811', 'Yin-Hsi Kuo', 'yin-hsi kuo')<br/>('2580465', 'Chun-Che Wu', 'chun-che wu')<br/>('1716836', 'Winston H. Hsu', 'winston h. hsu')</td><td>{yanying,kuonini,kenwu0721}@gmail.com, whsu@ntu.edu.tw
+</td></tr><tr><td>713594c18978b965be87651bb553c28f8501df0a</td><td>Fast Proximal Linearized Alternating Direction Method of Multiplier with
+<br/>Parallel Splitting
+<br/><b>National University of Singapore</b><br/><b>Key Laboratory of Machine Perception (MOE), School of EECS, Peking University</b><br/><b>Cooperative Medianet Innovation Center, Shanghai Jiaotong University</b></td><td>('33224509', 'Canyi Lu', 'canyi lu')<br/>('1775194', 'Huan Li', 'huan li')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')</td><td>canyilu@gmail.com, lihuan ss@126.com, zlin@pku.edu.cn, eleyans@nus.edu.sg
+</td></tr><tr><td>718824256b4461d62d192ab9399cfc477d3660b4</td><td>Selecting Training Data for Cross-Corpus Speech Emotion Recognition:
+<br/>Prototypicality vs. Generalization
+<br/><b>Institute for Human-Machine Communication, Technische Universit at M unchen, Germany</b></td><td>('30512170', 'Zixing Zhang', 'zixing zhang')<br/>('1740602', 'Felix Weninger', 'felix weninger')<br/>('1705843', 'Gerhard Rigoll', 'gerhard rigoll')</td><td>{schuller|zixing.zhang|weninger|rigoll}@tum.de
+</td></tr><tr><td>718d3137adba9e3078fa1f698020b666449f3336</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 8, No. 10, 2017
+<br/>Accuracy Based Feature Ranking Metric for
+<br/>Multi-Label Text Classification
+<br/><b>Al-Khwarizmi Institute of Computer Science</b><br/><b>University of Engineering and Technology</b><br/>Department of Computer
+<br/>Science,
+<br/>Department of Computer
+<br/>Science,
+<br/>Lahore, Pakistan
+<br/><b>University of Gujrat, Pakistan</b><br/><b>University of Gujrat, Pakistan</b></td><td>('35637737', 'Muhammad Nabeel Asim', 'muhammad nabeel asim')<br/>('3245405', 'Abdur Rehman', 'abdur rehman')<br/>('1981732', 'Umar Shoaib', 'umar shoaib')</td><td></td></tr><tr><td>714d487571ca0d676bad75c8fa622d6f50df953b</td><td>eBear: An Expressive Bear-Like Robot
+</td><td>('49470290', 'Xiao Zhang', 'xiao zhang')<br/>('2314025', 'Ali Mollahosseini', 'ali mollahosseini')<br/>('29764067', 'Evan Boucher', 'evan boucher')<br/>('1783240', 'Richard M. Voyles', 'richard m. voyles')</td><td></td></tr><tr><td>716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0</td><td>SMITH & DYER: 3D FACIAL LANDMARK ESTIMATION
+<br/>Pose-Robust 3D Facial Landmark Estimation
+<br/>from a Single 2D Image
+<br/>http://www.cs.wisc.edu/~bmsmith
+<br/>http://www.cs.wisc.edu/~dyer
+<br/>Department of Computer Sciences
+<br/><b>University of Wisconsin-Madison</b><br/>Madison, WI USA
+</td><td>('2721523', 'Brandon M. Smith', 'brandon m. smith')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')</td><td></td></tr><tr><td>7143518f847b0ec57a0ff80e0304c89d7e924d9a</td><td>Speeding-up Age Estimation in Intelligent
+<br/>Demographics System via Network Optimization
+<br/><b>School of Computer and Information, Hefei University of Technology, Hefei, China</b><br/><b>School of Computer Science and Engineering, Nanyang Technological University, Singapore</b></td><td>('49941674', 'Zhenzhen Hu', 'zhenzhen hu')<br/>('7739626', 'Peng Sun', 'peng sun')<br/>('40096128', 'Yonggang Wen', 'yonggang wen')</td><td>huzhen.ice@gmail.com, {sunp0003, ygwen}@ntu.edu.sg
+</td></tr><tr><td>710011644006c18291ad512456b7580095d628a2</td><td>Learning Residual Images for Face Attribute Manipulation
+<br/>Fujitsu Research & Development Center, Beijing, China.
+</td><td>('48157627', 'Wei Shen', 'wei shen')<br/>('2113095', 'Rujie Liu', 'rujie liu')</td><td>{shenwei, rjliu}@cn.fujitsu.com
+</td></tr><tr><td>713db3874b77212492d75fb100a345949f3d3235</td><td>Deep Semantic Face Deblurring
+<br/><b>Beijing Institute of Technology</b><br/><b>University of California, Merced</b><br/>3Nvidia
+<br/>4Google Cloud
+<br/>https://sites.google.com/site/ziyishenmi/cvpr18_face_deblur
+</td><td>('2182388', 'Ziyi Shen', 'ziyi shen')<br/>('2268189', 'Wei-Sheng Lai', 'wei-sheng lai')<br/>('39001620', 'Tingfa Xu', 'tingfa xu')<br/>('1690538', 'Jan Kautz', 'jan kautz')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>715b69575dadd7804b4f8ccb419a3ad8b7b7ca89</td><td>1
+<br/>Testing separability and independence of perceptual
+<br/>dimensions with general recognition theory: A tutorial and
+<br/>new R package (grtools)1
+<br/><b>Florida International University</b><br/><b>University of California, Santa Barbara</b><br/><b>Florida International University</b><br/><b>University of California, Santa Barbara</b></td><td>('2850756', 'Fabian A. Soto', 'fabian a. soto')<br/>('33897174', 'Johnny Fonseca', 'johnny fonseca')<br/>('5854837', 'F. Gregory Ashby', 'f. gregory ashby')</td><td></td></tr><tr><td>71e56f2aebeb3c4bb3687b104815e09bb4364102</td><td>Video Co-segmentation for Meaningful Action Extraction
+<br/><b>National University of Singapore, Singapore</b><br/><b>National University of Singapore Research Institute, Suzhou, China</b></td><td>('3036190', 'Jiaming Guo', 'jiaming guo')<br/>('3119455', 'Zhuwen Li', 'zhuwen li')<br/>('1809333', 'Steven Zhiying Zhou', 'steven zhiying zhou')</td><td>{guo.jiaming, lizhuwen, eleclf, elezzy}@nus.edu.sg
+</td></tr><tr><td>711bb5f63139ee7a9b9aef21533f959671a7d80e</td><td><b>Helsinki University of Technology Laboratory of Computational Engineering Publications</b><br/>Teknillisen korkeakoulun Laskennallisen tekniikan laboratorion julkaisuja
+<br/>Espoo 2007
+<br/>REPORT B68
+<br/>OBJECTS EXTRACTION AND RECOGNITION FOR
+<br/>CAMERA-BASED INTERACTION: HEURISTIC AND
+<br/>STATISTICAL APPROACHES
+<br/>TEKNILLINEN KORKEAKOULU
+<br/>TEKNILLINEN KORKEAKOULU
+<br/>TEKNISKA HÖGSKOLAN
+<br/>TEKNISKA HÖGSKOLAN
+<br/><b>HELSINKI UNIVERSITY OF TECHNOLOGY</b><br/><b>HELSINKI UNIVERSITY OF TECHNOLOGY</b><br/>TECHNISCHE UNIVERSITÄT HELSINKI
+<br/>TECHNISCHE UNIVERSITÄT HELSINKI
+<br/>UNIVERSITE DE TECHNOLOGIE D'HELSINKI
+<br/>UNIVERSITE DE TECHNOLOGIE D'HELSINKI
+</td><td>('37522511', 'Hao Wang', 'hao wang')</td><td></td></tr><tr><td>76fd801981fd69ff1b18319c450cb80c4bc78959</td><td>Proceedings of the 11th International Conference on Computational Semantics, pages 76–81,
+<br/>London, UK, April 15-17 2015. c(cid:13)2015 Association for Computational Linguistics
+<br/>76
+</td><td></td><td></td></tr><tr><td>76dc11b2f141314343d1601635f721fdeef86fdb</td><td>Weighted Decoding ECOC for Facial
+<br/>Action Unit Classification
+</td><td>('1732556', 'Terry Windeatt', 'terry windeatt')</td><td></td></tr><tr><td>76673de6d81bedd6b6be68953858c5f1aa467e61</td><td>Discovering a Lexicon of Parts and Attributes
+<br/><b>Toyota Technological Institute at Chicago</b><br/>Chicago, IL 60637, USA
+</td><td>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td>smaji@ttic.edu
+</td></tr><tr><td>76cd5e43df44e389483f23cb578a9015d1483d70</td><td>BORGHI ET AL.: FACE VERIFICATION FROM DEPTH
+<br/>Face Verification from Depth using
+<br/>Privileged Information
+<br/>Department of Engineering
+<br/>"Enzo Ferrari"
+<br/><b>University of Modena and Reggio</b><br/>Emilia
+<br/>Modena, Italy
+</td><td>('12010968', 'Guido Borghi', 'guido borghi')<br/>('2035969', 'Stefano Pini', 'stefano pini')<br/>('32044032', 'Filippo Grazioli', 'filippo grazioli')<br/>('1723285', 'Roberto Vezzani', 'roberto vezzani')<br/>('1741922', 'Rita Cucchiara', 'rita cucchiara')</td><td>guido.borghi@unimore.it
+<br/>stefano.pini@unimore.it
+<br/>filippo.grazioli@unimore.it
+<br/>roberto.vezzani@unimore.it
+<br/>rita.cucchiara@unimore.it
+</td></tr><tr><td>7643861bb492bf303b25d0306462f8fb7dc29878</td><td>Speeding up 2D-Warping for Pose-Invariant Face Recognition
+<br/><b>Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany</b></td><td>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>surname@cs.rwth-aachen.de
+</td></tr><tr><td>760a712f570f7a618d9385c0cee7e4d0d6a78ed2</td><td></td><td></td><td></td></tr><tr><td>76b11c281ac47fe6d95e124673a408ee9eb568e3</td><td>International Journal of Latest Engineering and Management Research (IJLEMR)
+<br/>ISSN: 2455-4847
+<br/>www.ijlemr.com || Volume 02 - Issue 03 || March 2017 || PP. 59-71
+<br/>REAL-TIME MULTI VIEW FACE DETECTION AND POSE
+<br/>ESTIMATION
+<br/><b>U. G STUDENTS, DEPT OF CSE, ALPHA COLLEGE OF ENGINEERING, CHENNAI</b><br/><b>ALPHA COLLEGE OF ENGINEERING, CHENNAI</b></td><td></td><td></td></tr><tr><td>76ce3d35d9370f0e2e27cfd29ea0941f1462895f</td><td>Hindawi Publishing Corporation
+<br/>e Scientific World Journal
+<br/>Volume 2014, Article ID 528080, 13 pages
+<br/>http://dx.doi.org/10.1155/2014/528080
+<br/>Research Article
+<br/>Efficient Parallel Implementation of Active Appearance
+<br/>Model Fitting Algorithm on GPU
+<br/><b>School of Computer Science and Technology, Tianjin University, Tianjin 300072, China</b><br/><b>College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China</b><br/>Received 25 August 2013; Accepted 19 January 2014; Published 2 March 2014
+<br/>Academic Editors: I. Lanese and G. Wei
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>The active appearance model (AAM) is one of the most powerful model-based object detecting and tracking methods which
+<br/>has been widely used in various situations. However, the high-dimensional texture representation causes very time-consuming
+<br/>computations, which makes the AAM difficult to apply to real-time systems. The emergence of modern graphics processing
+<br/>units (GPUs) that feature a many-core, fine-grained parallel architecture provides new and promising solutions to overcome the
+<br/>computational challenge. In this paper, we propose an efficient parallel implementation of the AAM fitting algorithm on GPUs.
+<br/>Our design idea is fine grain parallelism in which we distribute the texture data of the AAM, in pixels, to thousands of parallel GPU
+<br/>threads for processing, which makes the algorithm fit better into the GPU architecture. We implement our algorithm using the
+<br/>compute unified device architecture (CUDA) on the Nvidia’s GTX 650 GPU, which has the latest Kepler architecture. To compare
+<br/>the performance of our algorithm with different data sizes, we built sixteen face AAM models of different dimensional textures.
+<br/>The experiment results show that our parallel AAM fitting algorithm can achieve real-time performance for videos even on very
+<br/>high-dimensional textures.
+<br/>1. Introduction
+<br/>Detecting and tracking moving deformable objects in a video
+<br/>sequence is a complex and difficult task and has been a
+<br/>very important part of many applications, such as human
+<br/>computer interaction [1], automated surveillance [2], and
+<br/>emotion recognition [3]. This task allows us to determine the
+<br/>state of objects and helps us analyze their behaviors.
+<br/>The active appearance model (AAM) [4], first proposed
+<br/>by Cootes et al. [5], is one of the most powerful model-based
+<br/>object detecting and tracking algorithms. It is a nonlinear,
+<br/>generative, and parametric model and can be traced back
+<br/>to the active contour model (or “snakes,” [6]) and the active
+<br/>shape model (ASM) [7]. Particularly, the AAM decouples and
+<br/>models the shape and the texture of the deformable object
+<br/>to generate a variety of instant photos realistically. Therefore,
+<br/>the AAM has been widely used in various situations [8–10].
+<br/>The most frequent application of AAMs to date has been face
+<br/>modeling and tracking [11].
+<br/>Although the AAM possesses powerful modeling and
+<br/>efficient fitting ability, the high computational complexity
+<br/>caused by the high-dimensional texture representation limits
+<br/>its application in many conditions, for example, real-time
+<br/>systems. To make the AAM more applicable to practical
+<br/>applications, additional effort must be spent to accelerate the
+<br/>computation of the AAM. Therefore, several improvements
+<br/>are proposed to achieve this aim. Some methods are proposed
+<br/>to reduce the dimension of the texture, such as the Haar
+<br/>wavelet [12], the wedgelet-based regression tree [13], and
+<br/>the local sampling [14]. However, these methods improve
+<br/>efficiency at the expense of decreasing accuracy or losing
+<br/>detail information. From another perspective, researchers
+<br/>[15, 16] suggest reformulating the AAM in an analytic way to
+<br/>speed up the model fitting. A famous method is the inverse
+<br/>compositional image alignment (ICIA) [17] algorithm that
+<br/>avoids updating texture parameters every frame and is a very
+<br/>fast-fitting algorithm for the AAM. However, the limitation
+<br/>of this algorithm is that it cannot be applied to the AAMs
+</td><td>('1762397', 'Jinwei Wang', 'jinwei wang')<br/>('2518530', 'Xirong Ma', 'xirong ma')<br/>('34854285', 'Yuanping Zhu', 'yuanping zhu')<br/>('35900806', 'Jizhou Sun', 'jizhou sun')<br/>('1762397', 'Jinwei Wang', 'jinwei wang')</td><td>Correspondence should be addressed to Jinwei Wang; wangjinwei@tju.edu.cn
+</td></tr><tr><td>76b9fe32d763e9abd75b427df413706c4170b95c</td><td></td><td></td><td></td></tr><tr><td>768c332650a44dee02f3d1d2be1debfa90a3946c</td><td>Bayesian Face Recognition Using Support Vector Machine and Face Clustering
+<br/>Department of Information Engineering
+<br/><b>The Chinese University of Hong Kong</b><br/>Shatin, Hong Kong
+</td><td>('1911510', 'Zhifeng Li', 'zhifeng li')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{zli0, xtang}@ie.cuhk.edu.hk
+</td></tr><tr><td>769461ff717d987482b28b32b1e2a6e46570e3ff</td><td>MIC-TJU in MediaEval 2017 Emotional Impact of Movies Task
+<br/><b>Gannan Normal University, Ganzhou 341000, China</b><br/><b>Tongji University, Shanghai 201804, China</b></td><td>('40290178', 'Yun Yi', 'yun yi')<br/>('2774427', 'Hanli Wang', 'hanli wang')<br/>('28933059', 'Jiangchuan Wei', 'jiangchuan wei')</td><td></td></tr><tr><td>76d9f5623d3a478677d3f519c6e061813e58e833</td><td>FAST ALGORITHMS FOR THE GENERALIZED FOLEY-SAMMON
+<br/>DISCRIMINANT ANALYSIS
+</td><td>('35789819', 'Lei-Hong Zhang', 'lei-hong zhang')<br/>('14372428', 'Li-Zhi Liao', 'li-zhi liao')<br/>('1678715', 'Michael K. Ng', 'michael k. ng')</td><td></td></tr><tr><td>76e2d7621019bd45a5851740bd2742afdcf62837</td><td>Article
+<br/>Real-Time Detection and Measurement of Eye
+<br/>Features from Color Images
+<br/><b>Technical University of Cluj Napoca, 28 Memorandumului Street</b><br/><b>Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca</b><br/>Academic Editors: Changzhi Li, Roberto Gómez-García and José-María Muñoz-Ferreras
+<br/>Received: 28 April 2016; Accepted: 14 July 2016; Published: 16 July 2016
+</td><td>('31630857', 'Diana Borza', 'diana borza')<br/>('1821352', 'Adrian Sergiu Darabant', 'adrian sergiu darabant')<br/>('3331727', 'Radu Danescu', 'radu danescu')</td><td>Cluj Napoca 400114, Romania; borza_diana@yahoo.com
+<br/>Romania; adrian.darabant@tvarita.ro
+<br/>* Correspondence: Radu.Danescu@cs.utcluj.ro; Tel.: +40-740-502-223
+</td></tr><tr><td>765b2cb322646c52e20417c3b44b81f89860ff71</td><td>PoseShop: Human Image Database
+<br/>Construction and Personalized
+<br/>Content Synthesis
+</td><td>('29889388', 'Tao Chen', 'tao chen')<br/>('37291674', 'Ping Tan', 'ping tan')<br/>('1678872', 'Li-Qian Ma', 'li-qian ma')<br/>('37535930', 'Ming-Ming Cheng', 'ming-ming cheng')<br/>('2947946', 'Ariel Shamir', 'ariel shamir')<br/>('1686809', 'Shi-Min Hu', 'shi-min hu')</td><td></td></tr><tr><td>7644d90efef157e61fe4d773d8a3b0bad5feccec</td><td></td><td></td><td></td></tr><tr><td>763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff</td><td>Hierarchical Modeling and
+<br/>Applications to Recognition Tasks
+<br/>Thesis submitted for the degree of
+<br/>”Doctor of Philosophy”
+<br/>by
+<br/><b>Submitted to the Senate of the Hebrew University</b><br/>August / 2013
+</td><td>('39161025', 'Alon Zweig', 'alon zweig')</td><td></td></tr><tr><td>764882e6779fbee29c3d87e00302befc52d2ea8d</td><td>Deep Approximately Orthogonal Nonnegative
+<br/>Matrix Factorization for Clustering
+<br/>School of Automation
+<br/>School of Automation
+<br/>School of Automation
+<br/><b>Guangdong University of Technology</b><br/><b>Guangdong University of Technology</b><br/><b>Guangdong University of Technology</b><br/>Guangzhou, China
+<br/>Guangzhou, China
+<br/>Guangzhou, China
+</td><td>('30185240', 'Yuning Qiu', 'yuning qiu')<br/>('1764724', 'Guoxu Zhou', 'guoxu zhou')<br/>('2454506', 'Kan Xie', 'kan xie')</td><td>yn.qiu@foxmail.com
+<br/>guoxu.zhou@qq.com
+<br/>kanxiegdut@gmail.com
+</td></tr><tr><td>76d939f73a327bf1087d91daa6a7824681d76ea1</td><td>A Thermal Facial Emotion Database
+<br/>and Its Analysis
+<br/><b>Japan Advanced Institute of Science and Technology</b><br/>1-1 Asahidai, Nomi, Ishikawa, Japan
+<br/><b>University of Science, Ho Chi Minh city</b><br/>227 Nguyen Van Cu, Ho Chi Minh city, Vietnam
+</td><td>('2319415', 'Hung Nguyen', 'hung nguyen')<br/>('1791753', 'Kazunori Kotani', 'kazunori kotani')<br/>('1753878', 'Fan Chen', 'fan chen')</td><td>{nvhung,ikko,chen-fan}@jaist.ac.jp
+<br/>lhbac@hcmuns.edu.vn
+</td></tr><tr><td>760ba44792a383acd9ca8bef45765d11c55b48d4</td><td>~
+<br/>I .
+<br/>INTRODUCTION AND BACKGROUND
+<br/>The purpose of this article is to introduce the
+<br/>reader to the basic principles of classification with
+<br/>class-specific features. It is written both for readers
+<br/>interested in only the basic concepts as well as those
+<br/>interested in getting started in applying the method.
+<br/>For in-depth coverage, the reader is referred to a more
+<br/>detailed article [l].
+<br/>Class-Specific Classifier:
+<br/>Avoiding the Curse of
+<br/>Dimensionality
+<br/>PAUL M. BAGGENSTOSS, Member. lEEE
+<br/>US. Naval Undersea Warfare Center
+<br/>This article describes a new probabilistic method called the
+<br/>“class-specific method” (CSM). CSM has the potential to avoid
+<br/>the “curse of dimensionality” which plagues most clmiiiers
+<br/>which attempt to determine the decision boundaries in a
+<br/>highdimensional featue space. In contrast, in CSM, it is possible
+<br/>to build classifiers without a ” n o n feature space. Separate
+<br/>Law-dimensional features seta may be de6ned for each class, while
+<br/>the decision funetions are projected back to the common raw data
+<br/>space. CSM eflectively extends the classical classification theory
+<br/>to handle multiple feature spaw.. It is completely general, and
+<br/>requires no s i m p l i n g assumption such as Gaussianity or that
+<br/>data lies in linear subspaces.
+<br/>Manuscript received September 26, 2W2; revised February 12,
+<br/>2003.
+<br/>This work was supported by the Office of Naval Research.
+<br/>Author’s address: US. Naval Undersea Warfare Center, Newport
+<br/>Classification is the process of assigning data
+<br/>to one of a set of pre-determined class labels [2].
+<br/>Classification is a fundamental problem that has
+<br/>to be solved if machines are to approximate the
+<br/>human functions of recognizing sounds, images, or
+<br/>other sensory inputs. This is why classification is so
+<br/>important for automation in today’s commercial and
+<br/>military arenas.
+<br/>Many of us have first-hand knowledge of
+<br/>successful automated recognition systems from
+<br/>cameras that recognize faces in airports to computers
+<br/>that can scan and read printed and handwritten text,
+<br/>or systems that can recognize human speech. These
+<br/>systems are becoming more and more reliable and
+<br/>accurate. Given reasonably clean input data, the
+<br/>performance is often quite good if not perfect. But
+<br/>many of these systems fail in applications where
+<br/>clean, uncorrupted data is not available or if the
+<br/>problem is complicated by variability of conditions
+<br/>or by proliferation of inputs from unknown sources.
+<br/>In military environments, the targets to he recognized
+<br/>are often uncooperative and hidden in clutter and
+<br/>interference. In short, military uses of such systems
+<br/>still fall far short of what a well-trained alert human
+<br/>operator can achieve.
+<br/>We are often perplexed by the wide gap of
+<br/>as a car door slamming. From
+<br/>performance between humans and automated systems.
+<br/>Allow a human listener to hear two or three examples
+<br/>of a sound-such
+<br/>these few examples, the human can recognize
+<br/>the sound again and not confuse it with similar
+<br/>interfering sounds. But try the same experiment with
+<br/>general-purpose classifiers using neural networks
+<br/>and the story is quite different. Depending on the
+<br/>problem, the automated system may require hundreds,
+<br/>thousands, even millions of examples for training
+<br/>before it becomes both robust and reliable.
+<br/>Why? The answer lies in what is known as the
+<br/>“curse of dimensionality.” General-purpose classifiers
+<br/>need to extract a large number of measurements,
+<br/>or features, from the data to account for all the
+<br/>different possibilities of data types. The large
+<br/>collection of features form a high-dimensional space
+<br/>that the classifier has to sub-divide into decision
+<br/>boundaries. It is well-known that the complexity of
+<br/>a high-dimensional space increases exponentially
+<br/>with the number of measurements [31-and
+<br/>so does
+<br/>the difficulty of finding the hest decision boundaries
+<br/>from a fixed amount of training data. Unless a lot
+<br/>EEE A&E SYSTEMS MAGAZINE VOL. 19, NO. 1 JANUARY 2004 PART 2: TUTORIALS-BAGGENSTOSS
+<br/>37
+</td><td></td><td>RI, 02841, E-mail: (p.m.baggenstoss@ieee.arg).
+</td></tr><tr><td>766728bac030b169fcbc2fbafe24c6e22a58ef3c</td><td>A survey of deep facial landmark detection
+<br/>Yongzhe Yan1,2
+<br/>Thierry Chateau1
+<br/>1 Université Clermont Auvergne, France
+<br/>2 Wisimage, France
+<br/>3 Université de Lyon, CNRS, INSA Lyon, LIRIS, UMR5205, Lyon, France
+<br/>Résumé
+<br/>La détection de landmarks joue un rôle crucial dans de
+<br/>nombreuses applications d’analyse du visage comme la
+<br/>reconnaissance de l’identité, des expressions, l’animation
+<br/>d’avatar, la reconstruction 3D du visage, ainsi que pour
+<br/>les applications de réalité augmentée comme la pose de
+<br/>masque ou de maquillage virtuel. L’avènement de l’ap-
+<br/>prentissage profond a permis des progrès très importants
+<br/>dans ce domaine, y compris sur les corpus non contraints
+<br/>(in-the-wild). Nous présentons ici un état de l’art cen-
+<br/>tré sur la détection 2D dans une image fixe, et les mé-
+<br/>thodes spécifiques pour la vidéo. Nous présentons ensuite
+<br/>les corpus existants pour ces trois tâches, ainsi que les mé-
+<br/>triques d’évaluations associées. Nous exposons finalement
+<br/>quelques résultats, ainsi que quelques pistes de recherche.
+<br/>Mots Clef
+<br/>Détection de landmark facial, Alignement de visage, Deep
+<br/>learning
+</td><td>('3015472', 'Xavier Naturel', 'xavier naturel')<br/>('50493659', 'Christophe Garcia', 'christophe garcia')<br/>('48601809', 'Christophe Blanc', 'christophe blanc')<br/>('1762557', 'Stefan Duffner', 'stefan duffner')</td><td>yongzhe.yan@etu.uca.fr
+</td></tr><tr><td>7697295ee6fc817296bed816ac5cae97644c2d5b</td><td>Detecting and Recognizing Human-Object Interactions
+<br/>Facebook AI Research (FAIR)
+</td><td>('2082991', 'Georgia Gkioxari', 'georgia gkioxari')<br/>('39353098', 'Kaiming He', 'kaiming he')</td><td></td></tr><tr><td>7636f94ddce79f3dea375c56fbdaaa0f4d9854aa</td><td>Appl. Math. Inf. Sci. 6 No. 2S pp. 403S-408S (2012)
+<br/> An International Journal
+<br/>© 2012 NSP
+<br/>Applied Mathematics & Information Sciences
+<br/>Robust Facial Expression Recognition Using
+<br/>a Smartphone Working against Illumination Variation
+<br/> Natural Sciences Publishing Cor.
+<br/><b>Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea</b><br/>Received June 22, 2010; Revised March 21, 2011; Accepted 11 June 2011
+<br/>Published online: 1 January 2012
+</td><td>('2413560', 'Kyoung-Sic Cho', 'kyoung-sic cho')<br/>('9270794', 'In-Ho Choi', 'in-ho choi')<br/>('2706430', 'Yong-Guk Kim', 'yong-guk kim')</td><td> @ 2012 NSP
+<br/>Corresponding author: Email: ykim@sejong.ac.kr
+</td></tr><tr><td>1c80bc91c74d4984e6422e7b0856cf3cf28df1fb</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Hierarchical Adaptive Structural SVM for Domain Adaptation
+<br/>Received: date / Accepted: date
+</td><td>('2470198', 'Jiaolong Xu', 'jiaolong xu')</td><td></td></tr><tr><td>1ce3a91214c94ed05f15343490981ec7cc810016</td><td>Exploring Photobios
+<br/><b>University of Washington</b><br/>2Adobe Systems†
+<br/>3Google Inc.
+</td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')<br/>('2177801', 'Eli Shechtman', 'eli shechtman')<br/>('9748713', 'Rahul Garg', 'rahul garg')<br/>('1679223', 'Steven M. Seitz', 'steven m. seitz')</td><td></td></tr><tr><td>1c9efb6c895917174ac6ccc3bae191152f90c625</td><td>Unifying Identification and Context Learning for Person Recognition
+<br/><b>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</b></td><td>('39360892', 'Qingqiu Huang', 'qingqiu huang')<br/>('50446092', 'Yu Xiong', 'yu xiong')<br/>('1807606', 'Dahua Lin', 'dahua lin')</td><td>{hq016, xy017, dhlin}@ie.cuhk.edu.hk
+</td></tr><tr><td>1c2724243b27a18a2302f12dea79d9a1d4460e35</td><td>Fisher+Kernel Criterion for Discriminant Analysis*
+<br/><b>National Laboratory on Machine Perception, Peking University, Beijing, P.R. China</b><br/><b>the Chinese University of Hong Kong, Shatin, Hong Kong</b><br/>3 MOE-Microsoft Key Laboratory of Multimedia Computing and Communication & Department of EEIS,
+<br/><b>University of Science and Technology of China, Hefei, Anhui, P. R. China</b><br/>4Microsoft Research Asia, Beijing, P.R. China
+<br/>
+</td><td>('1718245', 'Shu Yang', 'shu yang')<br/>('1698982', 'Shuicheng Yan', 'shuicheng yan')<br/>('38188040', 'Dong Xu', 'dong xu')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')<br/>('1720735', 'Chao Zhang', 'chao zhang')</td><td>Contact: yangshu@cis.pku.edu.cn
+</td></tr><tr><td>1ca8c09abb73a02519d8db77e4fe107acfc589b6</td><td>Automatic Understanding of Image and Video Advertisements
+<br/><b>University of Pittsburgh</b><br/>IEEE 2017 Conference on
+<br/>Computer Vision and Pattern
+<br/>Recognition
+<br/>Introduction
+<br/>Dataset Overview
+<br/>Answering Questions about Ads
+<br/>• Advertisements implicitly persuade viewers to take certain actions.
+<br/>• Understanding ads requires more than recognizing physical content.
+<br/>Recognized Concepts (Clarifai):
+<br/>Car, Street, Transportation System, Traffic, Road, City,
+<br/>Pavement, Crossing, …
+<br/>Image Caption (Vinyals et al.):
+<br/>A red car driving down a street next to a traffic light.
+<br/>True Meaning in Advertisement:
+<br/>Automobile drivers should be cautious to avoid crashing
+<br/>into cyclists as they share the road.
+<br/>• We propose the novel problem of automatic advertisement
+<br/>understanding, and provide two datasets with rich annotations.
+<br/>• We analyze the common persuasive strategies: symbolism, atypical
+<br/>objects, physical processes, cultural knowledge, surprise/shock, etc.
+<br/>• We present baseline experiment results for several prediction tasks.
+<br/>Dataset Collection
+<br/>• 38 topics including commercials and public service announcements
+<br/>• 30 sentiments indicating how ads emotionally impress viewers
+<br/>• Questions and answers revealing the messages behind the visual ads
+<br/>I should stop smoking because my
+<br/>lungs are extremely sensitive and
+<br/>could go up in smoke.
+<br/>I should buy this candy because it
+<br/>is unique and rises above the rest,
+<br/>like the Swiss Alps.
+<br/>• Our dataset contains 64,832 image ads and 3,477 video ads, each
+<br/>annotated by 3-5 human workers from Amazon Mechanical Turk.
+<br/>Symbolism Detection
+<br/>Image
+<br/>Video
+<br/>Topic
+<br/>Symbol
+<br/>Topic
+<br/>Fun/Exciting
+<br/>204,340
+<br/>64,131
+<br/>17,345
+<br/>17,374
+<br/>Sentiment
+<br/>Strategy
+<br/>Sentiment
+<br/>English?
+<br/>102,340
+<br/>20,000
+<br/>17,345
+<br/>15,380
+<br/>Q + A Pairs
+<br/>Slogan
+<br/>Q + A Pairs
+<br/>Effectiveness
+<br/>202,090
+<br/>11,130
+<br/>17,345
+<br/>16,721
+</td><td>('1996796', 'Zaeem Hussain', 'zaeem hussain')<br/>('2365530', 'Mingda Zhang', 'mingda zhang')<br/>('3186356', 'Xiaozhong Zhang', 'xiaozhong zhang')<br/>('9085797', 'Keren Ye', 'keren ye')<br/>('40540691', 'Christopher Thomas', 'christopher thomas')<br/>('6004292', 'Zuha Agha', 'zuha agha')<br/>('34493995', 'Nathan Ong', 'nathan ong')<br/>('1770205', 'Adriana Kovashka', 'adriana kovashka')</td><td></td></tr><tr><td>1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc</td><td>Recognition of Facial Gestures based on Support
+<br/>Vector Machines
+<br/><b>Faculty of Informatics, University of Debrecen, Hungary</b><br/>H-4010 Debrecen P.O.Box 12.
+</td><td>('47547897', 'Attila Fazekas', 'attila fazekas')</td><td>Attila.Fazekas@inf.unideb.hu
+</td></tr><tr><td>1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee</td><td>Deep fusion of visual signatures
+<br/>for client-server facial analysis
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS, GREYC
+<br/>Computer Sc. & Engg.
+<br/>IIT Kanpur, India
+<br/>Frederic Jurie
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS, GREYC
+<br/>Facial analysis is a key technology for enabling human-
+<br/>machine interaction.
+<br/>In this context, we present a client-
+<br/>server framework, where a client transmits the signature of
+<br/>a face to be analyzed to the server, and, in return, the server
+<br/>sends back various information describing the face e.g. is the
+<br/>person male or female, is she/he bald, does he have a mus-
+<br/>tache, etc. We assume that a client can compute one (or a
+<br/>combination) of visual features; from very simple and effi-
+<br/>cient features, like Local Binary Patterns, to more complex
+<br/>and computationally heavy, like Fisher Vectors and CNN
+<br/>based, depending on the computing resources available. The
+<br/>challenge addressed in this paper is to design a common uni-
+<br/>versal representation such that a single merged signature is
+<br/>transmitted to the server, whatever be the type and num-
+<br/>ber of features computed by the client, ensuring nonetheless
+<br/>an optimal performance. Our solution is based on learn-
+<br/>ing of a common optimal subspace for aligning the different
+<br/>face features and merging them into a universal signature.
+<br/>We have validated the proposed method on the challenging
+<br/>CelebA dataset, on which our method outperforms existing
+<br/>state-of-art methods when rich representation is available at
+<br/>test time, while giving competitive performance when only
+<br/>simple signatures (like LBP) are available at test time due
+<br/>to resource constraints on the client.
+<br/>1.
+<br/>INTRODUCTION
+<br/>We propose a novel method in a heterogeneous server-
+<br/>client framework for the challenging and important task of
+<br/>analyzing images of faces. Facial analysis is a key ingredient
+<br/>for assistive computer vision and human-machine interaction
+<br/>methods, and systems and incorporating high-performing
+<br/>methods in daily life devices is a challenging task. The ob-
+<br/>jective of the present paper is to develop state-of-the-art
+<br/>technologies for recognizing facial expressions and facial at-
+<br/>tributes on mobile and low cost devices. Depending on their
+<br/>computing resources, the clients (i.e. the devices on which
+<br/>the face image is taken) are capable of computing different
+<br/>types of face signatures, from the simplest ones (e.g. LPB)
+<br/>to the most complex ones (e.g. very deep CNN features), and
+<br/>should be able to eventually combine them into a single rich
+<br/>signature. Moreover, it is convenient if the face analyzer,
+<br/>which might require significant computing resources, is im-
+<br/>plemented on a server receiving face signatures and comput-
+<br/>ing facial expressions and attributes from these signatures.
+<br/>Keeping the computation of the signatures on the client is
+<br/>safer in terms of privacy, as the original images are not trans-
+<br/>mitted, and keeping the analysis part on the server is also
+<br/>beneficial for easy model upgrades in the future. To limit
+<br/>the transmission costs, the signatures have to be made as
+<br/>compact as possible.
+<br/>In summary, the technology needed
+<br/>for this scenario has to be able to merge the different avail-
+<br/>able features – the number of features available at test time
+<br/>is not known in advance but is dependent on the computing
+<br/>resources available on the client – producing a unique rich
+<br/>and compact signature of the face, which can be transmitted
+<br/>and analyzed by a server. Ideally, we would like the univer-
+<br/>sal signature to have the following properties: when all the
+<br/>features are available, we would like the performance of the
+<br/>signature to be better than the one of a system specifically
+<br/>optimized for any single type of feature.
+<br/>In addition, we
+<br/>would like to have reasonable performance when only one
+<br/>type of feature is available at test time.
+<br/>For developing such a system, we propose a hybrid deep
+<br/>neural network and give a method to carefully fine-tune the
+<br/>network parameters while learning with all or a subset of
+<br/>features available. Thus, the proposed network can process a
+<br/>number of wide ranges of feature types such as hand-crafted
+<br/>LBP and FV, or even CNN features which are learned end-
+<br/>to-end.
+<br/>While CNNs have been quite successful in computer vi-
+<br/>sion [1], representing images with CNN features is relatively
+<br/>time consuming, much more than some simple hand-crafted
+<br/>features such as LBP. Thus, the use of CNN in real-time ap-
+<br/>plications is still not feasible. In addition, the use of robust
+<br/>hand-crafted features such as FV in hybrid architectures can
+<br/>give performance comparable to Deep CNN features [2]. The
+<br/>main advantage of learning hybrid architectures is to avoid
+<br/>having large numbers of convolutional and pooling layers.
+<br/>Again from [2], we can also observe that hybrid architec-
+<br/>tures improve the performance of hand-crafted features e.g.
+<br/>FVs. Therefore, hybrid architectures are useful for the cases
+<br/>where only hand-crafted features, and not the original im-
+<br/>ages, are available during training and testing time. This
+<br/>scenario is useful when it is not possible to share training
+<br/>images due to copyright or privacy issues.
+<br/>Hybrid networks are particularly adapted to our client-
+</td><td>('2078892', 'Binod Bhattarai', 'binod bhattarai')<br/>('2515597', 'Gaurav Sharma', 'gaurav sharma')</td><td>binod.bhattarai@unicaen.fr
+<br/>grv@cse.iitk.ac.in
+<br/>frederic.jurie@unicaen.fr
+</td></tr><tr><td>1c30bb689a40a895bd089e55e0cad746e343d1e2</td><td>Learning Spatiotemporal Features with 3D Convolutional Networks
+<br/><b>Facebook AI Research, 2Dartmouth College</b></td><td>('1687325', 'Du Tran', 'du tran')<br/>('2276554', 'Rob Fergus', 'rob fergus')<br/>('1732879', 'Lorenzo Torresani', 'lorenzo torresani')<br/>('2210374', 'Manohar Paluri', 'manohar paluri')</td><td>{dutran,lorenzo}@cs.dartmouth.edu
+<br/>{lubomir,robfergus,mano}@fb.com
+</td></tr><tr><td>1c4ceae745fe812d8251fda7aad03210448ae25e</td><td>EURASIP Journal on Applied Signal Processing 2004:4, 522–529
+<br/>c(cid:1) 2004 Hindawi Publishing Corporation
+<br/>Optimization of Color Conversion for Face Recognition
+<br/><b>Virginia Polytechnic Institute and State University</b><br/>Blacksburg, VA 24061-0111, USA
+<br/><b>Seattle Paci c University, Seattle, WA 98119-1957, USA</b><br/><b>Virginia Polytechnic Institute and State University</b><br/>Blacksburg, VA 24061-0111, USA
+<br/>Received 5 November 2002; Revised 16 October 2003
+<br/>This paper concerns the conversion of color images to monochromatic form for the purpose of human face recognition. Many
+<br/>face recognition systems operate using monochromatic information alone even when color images are available. In such cases,
+<br/>simple color transformations are commonly used that are not optimal for the face recognition task. We present a framework
+<br/>for selecting the transformation from face imagery using one of three methods: Karhunen-Lo`eve analysis, linear regression of
+<br/>color distribution, and a genetic algorithm. Experimental results are presented for both the well-known eigenface method and for
+<br/>extraction of Gabor-based face features to demonstrate the potential for improved overall system performance. Using a database
+<br/>of 280 images, our experiments using these methods resulted in performance improvements of approximately 4% to 14%.
+<br/>Keywords and phrases: face recognition, color image analysis, color conversion, Karhunen-Lo`eve analysis.
+<br/>1.
+<br/>INTRODUCTION
+<br/>Most single-view face recognition systems operate using in-
+<br/>tensity (monochromatic) information alone. This is true
+<br/>even for systems that accept color imagery as input. The
+<br/>reason for this is not
+<br/>that multispectral data is lack-
+<br/>ing in information content, but often because of practical
+<br/>considerations—difficulties associated with illumination and
+<br/>color balancing, for example, as well as compatibility with
+<br/>legacy systems. Associated with this is a lack of color image
+<br/>databases with which to develop and test new algorithms. Al-
+<br/>though work is in progress that will eventually aid in color-
+<br/>based tasks (e.g., through color constancy [1]), those efforts
+<br/>are still in the research stage.
+<br/>When color information is present, most of today’s face
+<br/>recognition systems convert the image to monochromatic
+<br/>form using simple transformations. For example, a common
+<br/>mapping [2, 3] produces an intensity value Ii by taking the
+<br/>average of red, green, and blue (RGB) values (Ir, Ig, and Ib,
+<br/>resp.):
+<br/>Ii(x, y) = Ir(x, y) + Ig(x, y) + Ib(x, y)
+<br/>(1)
+<br/>The resulting image is then used for feature extraction and
+<br/>analysis.
+<br/>We argue that more effective system performance is pos-
+<br/>sible if a color transformation is chosen that better matches
+<br/>the task at hand. For example, the mapping in (1) implic-
+<br/>itly assumes a uniform distribution of color values over the
+<br/>entire color space. For a task such as face recognition, color
+<br/>values tend to be more tightly confined to a small portion of
+<br/>the color space, and it is possible to exploit this narrow con-
+<br/>centration during color conversion. If the transformation is
+<br/>selected based on the expected color distribution, then it is
+<br/>reasonable to expect improved recognition accuracies.
+<br/>This paper presents a task-oriented approach for select-
+<br/>ing the color-to-grayscale image transformation. Our in-
+<br/>tended application is face recognition, although the frame-
+<br/>work that we present is applicable to other problem domains.
+<br/>We assume that frontal color views of the human face
+<br/>are available, and we develop a method for selecting alter-
+<br/>nate weightings of the separate color values in computing a
+<br/>single monochromatic value. Given the rich color content
+<br/>of the human face, it is desirable to maximize the use of
+<br/>this content even when full-color computation and match-
+<br/>ing is not used. As an illustration of this framework, we
+<br/>have used the Karhunen-Lo`eve (KL) transformation (also
+<br/>known as principal components analysis) of observed distri-
+<br/>butions in the color space to determine the improved map-
+<br/>ping.
+</td><td>('1719681', 'Creed F. Jones', 'creed f. jones')<br/>('1731164', 'A. Lynn Abbott', 'a. lynn abbott')</td><td>Email: crjones4@vt.edu
+<br/>Email: abbott@vt.edu
+</td></tr><tr><td>1c3073b57000f9b6dbf1c5681c52d17c55d60fd7</td><td>THÈSEprésentéepourl’obtentiondutitredeDOCTEURDEL’ÉCOLENATIONALEDESPONTSETCHAUSSÉESSpécialité:InformatiqueparCharlotteGHYSAnalyse,Reconstruction3D,&AnimationduVisageAnalysis,3DReconstruction,&AnimationofFacesSoutenancele19mai2010devantlejurycomposéde:Rapporteurs:MajaPANTICDimitrisSAMARASExaminateurs:MichelBARLAUDRenaudKERIVENDirectiondethèse:NikosPARAGIOSBénédicteBASCLE </td><td></td><td></td></tr><tr><td>1cee993dc42626caf5dbc26c0a7790ca6571d01a</td><td>Optimal Illumination for Image and Video Relighting
+<br/>Shree K.Nayar
+<br/>Peter N.Belhumeur
+<br/><b>Columbia University</b><br/>It has been shown in the literature that image-based relighting of
+<br/>scenes with unknown geometry can be achieved through linear
+<br/>combinations of a set of pre-acquired reference images. Since the
+<br/>placement and brightness of the light sources can be controlled, it
+<br/>is natural to ask: what is the optimal way to illuminate the scene to
+<br/>reduce the number of reference images that are needed?
+<br/>In this work we show that the best way to light the scene (i.e., the
+<br/>way that minimizes the number of reference images) is not using
+<br/>a sequence of single, compact light sources as is most commonly
+<br/>done, but rather to use a sequence of lighting patterns as given by an
+<br/>object-dependent lighting basis. While this lighting basis, which we
+<br/>call the optimal lighting basis (OLB), depends on camera and scene
+<br/>properties, we show that it can be determined as a simple calibration
+<br/>procedure before acquisition, through the SVD decomposition of
+<br/>the images of the object lighted by single light sources (Fig. 1).
+<br/>of basis images used, and for a set of four experiments (relighting
+<br/>of a sphere, a face, a buddha statue, and a dragon). For any given
+<br/>number of optimal lighting basis images, the corresponding num-
+<br/>ber of images of any other lighting basis that are needed to achieve
+<br/>the same reconstruction error equals the gain value. For instance, in
+<br/>the ‘buddha’ experiment instead of 6 optimal basis images, we will
+<br/>need to use 6× 1.8 ≈ 11 SHLB images, 6× 1.5 ≈ 9 FLB images or
+<br/>6× 2.3 ≈ 14 HaLB images.
+<br/>Figure 1: Computing the optimal lighting basis using SVD. First row: Images of the
+<br/>object illuminated by a single light source in different positions. Second row: Lighting
+<br/>patterns from the optimal lighting basis, containing both positive values, shown in
+<br/>grey, and negative values, shown in blue. Third row: Offset and scaling of the optimal
+<br/>lighting basis in order to make all its values positive.
+<br/>We demonstrate with experiments on real and synthetic data that
+<br/>the optimal lighting basis significantly reduces the number of refer-
+<br/>ence images that are needed to achieve a desired level of accuracy
+<br/>in the relit images. In particular, we show that the scene-dependent
+<br/>optimal lighting basis (OBL) performs much better than the Fourier
+<br/>lighting basis (FLB), Haar lighting basis (HaLB) and spherical har-
+<br/>monic lighting basis (SHLB).
+<br/>In Fig. 2 we show some reconstructed images of synthetic objects
+<br/>which have been illuminated by SHLB and OLB. Observe how
+<br/>when we reconstruct from images illuminated by OLB, the error is
+<br/>significantly smaller. In Fig. 3 we plot the gains of the optimal light-
+<br/>ing basis with respect the other basis, as a function of the number
+<br/>Figure 3: Gains of the OLB with respect all the other lighting basis, (for a set of 4
+<br/>experiments), plotted as a function of the number of basis images used.
+<br/>This reduction in the number of needed images is particularly criti-
+<br/>cal in the problem of relighting in video, as corresponding points on
+<br/>moving objects must be aligned from frame to frame during each
+<br/>cycle of the lighting basis. We show, however, that the efficiencies
+<br/>gained by the optimal lighting basis makes relighting in video pos-
+<br/>sible using only a simple optical flow alignment. Furthermore, in
+<br/>our experiments we verify that although the optimal lighting basis
+<br/>is computed for an initial orientation of the object, the reconstruc-
+<br/>tion error does not increase noticeably as the object changes its pose
+<br/>along the video sequence.
+<br/>We have performed several relighting experiments on real video se-
+<br/>quences of moving objects, moving faces, and scenes containing
+<br/>both. In each case, although a single video clip was captured, we
+<br/>are able to relight again and again, controlling the lighting direc-
+<br/>tion, extent, and color. Fig. 4 shows some frames of one of these
+<br/>sequences.
+<br/>Ground Truth
+<br/>FLB 16 basis OLB 16 basis
+<br/>Error FLB
+<br/>Error OLB
+<br/>SHLB 3 basis OLB 3 basis
+<br/>Ground Truth
+<br/>Figure 2: Examples of reconstructed images and reconstruction errors, for different
+<br/>lighting basis. Note that OLB performs much better.
+<br/>Error SHLB
+<br/>Error OLB
+<br/>Figure 4: Two frames of a video sequence, illuminated with the optimal lighting
+<br/>basis (first row), and relighted with a point light source (second row) and with an
+<br/>environmental light (third row).
+</td><td>('1994318', 'Francesc Moreno-Noguer', 'francesc moreno-noguer')</td><td></td></tr><tr><td>1c147261f5ab1b8ee0a54021a3168fa191096df8</td><td>Journal of Information Security, 2016, 7, 141-151
+<br/>Published Online April 2016 in SciRes. http://www.scirp.org/journal/jis
+<br/>http://dx.doi.org/10.4236/jis.2016.73010
+<br/>Face Recognition across Time Lapse Using
+<br/>Convolutional Neural Networks
+<br/><b>George Mason University, Fairfax, VA, USA</b><br/>Received 12 February 2016; accepted 8 April 2016; published 11 April 2016
+<br/>Copyright © 2016 by authors and Scientific Research Publishing Inc.
+<br/>This work is licensed under the Creative Commons Attribution International License (CC BY).
+<br/>http://creativecommons.org/licenses/by/4.0/
+<br/>
+<br/>
+</td><td>('2710867', 'Hachim El Khiyari', 'hachim el khiyari')<br/>('1781577', 'Harry Wechsler', 'harry wechsler')</td><td></td></tr><tr><td>1c17450c4d616e1e1eece248c42eba4f87de9e0d</td><td>YANG, LIN, CHANG, CHEN: AUTOMATIC AGE ESTIMATION VIA DEEP RANKING
+<br/>Automatic Age Estimation from Face Images
+<br/>via Deep Ranking
+<br/><b>Research Center for Information</b><br/>Technology Innovation
+<br/>Academia Sinica
+<br/>Taipei, Taiwan
+<br/><b>Institute of Information Science</b><br/>Academia Sinica
+<br/>Taipei, Taiwan
+</td><td>('35436145', 'Huei-Fang Yang', 'huei-fang yang')<br/>('36181124', 'Bo-Yao Lin', 'bo-yao lin')<br/>('34692779', 'Kuang-Yu Chang', 'kuang-yu chang')<br/>('1720473', 'Chu-Song Chen', 'chu-song chen')</td><td>hfyang@citi.sinica.edu.tw
+<br/>boyaolin@iis.sinica.edu.tw
+<br/>kuangyu@iis.sinica.edu.tw
+<br/>song@iis.sinica.edu.tw
+</td></tr><tr><td>1c93b48abdd3ef1021599095a1a5ab5e0e020dd5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. *, NO. *, JANUARY 2009
+<br/>A Compositional and Dynamic Model for Face Aging
+</td><td>('3133970', 'Song-Chun Zhu', 'song-chun zhu')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td></td></tr><tr><td>1c41965c5e1f97b1504c1bdde8037b5e0417da5e</td><td>Interaction-aware Spatio-temporal Pyramid
+<br/>Attention Networks for Action Classification
+<br/><b>University of Chinese Academy of Sciences</b><br/>2 CAS Center for Excellence in Brain Science and Intelligence Technology, National
+<br/><b>Laboratory of Pattern Recognition, Institute of Automation, CAS</b><br/>3 Meitu, 4 National Computer network Emergency Response technical
+<br/>Team/Coordination Center of China
+</td><td>('1807325', 'Yang Du', 'yang du')<br/>('2034987', 'Chunfeng Yuan', 'chunfeng yuan')<br/>('46708348', 'Bing Li', 'bing li')<br/>('40027215', 'Lili Zhao', 'lili zhao')<br/>('2082374', 'Yangxi Li', 'yangxi li')<br/>('40506509', 'Weiming Hu', 'weiming hu')</td><td>duyang2014@ia.ac.cn,{cfyuan,bli,wmhu}@nlpr.ia.ac.cn,
+<br/>lili.zhao@meitu.com, liyangxi@outlook.com
+</td></tr><tr><td>1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16</td><td>Fusion of audio-visual features using hierarchical classifier systems for
+<br/>the recognition of affective states and the state of depression
+<br/><b>Institute of Neural Information Processing, Ulm University, Ulm, Germany</b><br/>Keywords:
+<br/>Emotion Recognition, Multiple Classifier Systems, Affective Computing, Information Fusion
+</td><td>('1860319', 'Michael Glodek', 'michael glodek')<br/>('3243891', 'Sascha Meudt', 'sascha meudt')<br/>('1685857', 'Friedhelm Schwenker', 'friedhelm schwenker')</td><td>firstname.lastname@uni-ulm.de
+</td></tr><tr><td>1cad5d682393ffbb00fd26231532d36132582bb4</td><td>Spatio-Temporal Action Detection with
+<br/>Cascade Proposal and Location Anticipation
+<br/><b>Institute for Robotics and Intelligent</b><br/>Systems
+<br/><b>University of Southern California</b><br/>Los Angeles, CA, USA
+</td><td>('3469030', 'Zhenheng Yang', 'zhenheng yang')<br/>('3029956', 'Jiyang Gao', 'jiyang gao')<br/>('27735100', 'Ram Nevatia', 'ram nevatia')<br/>('3469030', 'Zhenheng Yang', 'zhenheng yang')<br/>('3029956', 'Jiyang Gao', 'jiyang gao')<br/>('27735100', 'Ram Nevatia', 'ram nevatia')</td><td>zhenheny@usc.edu
+<br/>jiyangga@usc.edu
+<br/>nevatia@usc.edu
+</td></tr><tr><td>1c1a98df3d0d5e2034ea723994bdc85af45934db</td><td>Guided Unsupervised Learning of Mode Specific Models for Facial Point
+<br/>Detection in the Wild
+<br/><b>School of Computer Science, The University of Nottingham</b></td><td>('2736086', 'Shashank Jaiswal', 'shashank jaiswal')<br/>('2449665', 'Timur R. Almaev', 'timur r. almaev')</td><td>{psxsj3,psxta4,michel.valstar}@nottingham.ac.uk
+</td></tr><tr><td>1ca815327e62c70f4ee619a836e05183ef629567</td><td>Global Supervised Descent Method
+<br/><b>Carnegie Mellon University, Pittsburgh PA</b></td><td>('3182065', 'Xuehan Xiong', 'xuehan xiong')<br/>('1707876', 'Fernando De la Torre', 'fernando de la torre')</td><td>{xxiong,ftorre}@andrew.cmu.edu
+</td></tr><tr><td>1c6be6874e150898d9db984dd546e9e85c85724e</td><td></td><td></td><td></td></tr><tr><td>1c65f3b3c70e1ea89114f955624d7adab620a013</td><td></td><td></td><td></td></tr><tr><td>1c530de1a94ac70bf9086e39af1712ea8d2d2781</td><td>Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+<br/>Sparsity Conditional Energy Label
+<br/>Distribution Learning for Age Estimation
+<br/>Key Lab of Computer Network and Information Integration (Ministry of Education)
+<br/><b>School of Computer Science and Engineering, Southeast University, Nanjing 211189, China</b></td><td>('2442058', 'Xu Yang', 'xu yang')<br/>('1735299', 'Xin Geng', 'xin geng')<br/>('1725992', 'Deyu Zhou', 'deyu zhou')</td><td>{x.yang,xgeng,d.zhou}@seu.edu.cn
+</td></tr><tr><td>1c6e22516ceb5c97c3caf07a9bd5df357988ceda</td><td></td><td></td><td></td></tr><tr><td>82f8652c2059187b944ce65e87bacb6b765521f6</td><td>Discriminative Object Categorization with
+<br/>External Semantic Knowledge
+<br/>Dissertation Proposal
+<br/>by
+<br/>Department of Computer Science
+<br/><b>University of Texas at Austin</b><br/>Committee:
+<br/>Prof. Kristen Grauman (Advisor)
+<br/>Prof. Fei Sha
+<br/>Prof. J. K. Aggarwal
+</td><td>('35788904', 'Sung Ju Hwang', 'sung ju hwang')<br/>('1797655', 'Raymond Mooney', 'raymond mooney')<br/>('2302443', 'Pradeep Ravikumar', 'pradeep ravikumar')</td><td></td></tr><tr><td>82bef8481207de9970c4dc8b1d0e17dced706352</td><td></td><td></td><td></td></tr><tr><td>825f56ff489cdd3bcc41e76426d0070754eab1a8</td><td>Making Convolutional Networks Recurrent for Visual Sequence Learning
+<br/>NVIDIA
+</td><td>('40058797', 'Xiaodong Yang', 'xiaodong yang')</td><td>{xiaodongy,pmolchanov,jkautz}@nvidia.com
+</td></tr><tr><td>82d2af2ffa106160a183371946e466021876870d</td><td>A Novel Space-Time Representation on the Positive Semidefinite Cone
+<br/>for Facial Expression Recognition
+<br/>1IMT Lille Douai, Univ. Lille, CNRS, UMR 9189 – CRIStAL –
+<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+<br/>2Univ. Lille, CNRS, UMR 8524, Laboratoire Paul Painlev´e, F-59000 Lille, France.
+</td><td>('37809060', 'Anis Kacem', 'anis kacem')<br/>('2909056', 'Mohamed Daoudi', 'mohamed daoudi')<br/>('2125606', 'Boulbaba Ben Amor', 'boulbaba ben amor')</td><td></td></tr><tr><td>824d1db06e1c25f7681e46199fd02cb5fc343784</td><td>Representing Relative Visual Attributes
+<br/>with a Reference-Point-Based Decision Model
+<br/>Marc T. Law
+<br/><b>University of Toronto</b><br/><b>Shanghai Jiao Tong University</b><br/><b>University of Michigan-Shanghai Jiao Tong University Joint Institute</b></td><td>('38481975', 'Paul Weng', 'paul weng')</td><td></td></tr><tr><td>82ccd62f70e669ec770daf11d9611cab0a13047e</td><td>Sparse Variation Pattern for Texture Classification
+<br/>Electrical Engineering Department
+<br/>Computer Science and Software Engineering
+<br/>Electrical Engineering Department
+<br/><b>Tafresh University</b><br/>Tafresh, Iran
+<br/><b>The University of Western Australia</b><br/><b>Central Tehran Branch, Azad University</b><br/>WA 6009, Australia
+<br/>Tehran, Iran
+</td><td>('2014145', 'Mohammad Tavakolian', 'mohammad tavakolian')<br/>('3046235', 'Farshid Hajati', 'farshid hajati')<br/>('1747500', 'Ajmal S. Mian', 'ajmal s. mian')<br/>('2997971', 'Soheila Gheisari', 'soheila gheisari')</td><td>m tavakolian,hajati@tafreshu.ac.ir
+<br/>ajmal.mian@uwa.edu.au
+<br/>gheisari.s@iauctb.ac.ir
+</td></tr><tr><td>82eff71af91df2ca18aebb7f1153a7aed16ae7cc</td><td>MSU-AVIS dataset:
+<br/>Fusing Face and Voice Modalities for Biometric
+<br/>Recognition in Indoor Surveillance Videos
+<br/><b>Michigan State University, USA</b><br/><b>Yarmouk University, Jordan</b></td><td>('39617163', 'Anurag Chowdhury', 'anurag chowdhury')<br/>('2447931', 'Yousef Atoum', 'yousef atoum')<br/>('1849929', 'Luan Tran', 'luan tran')<br/>('49543771', 'Xiaoming Liu', 'xiaoming liu')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td></td></tr><tr><td>82c303cf4852ad18116a2eea31e2291325bc19c3</td><td>Journal of Image and Graphics, Volume 2, No.1, June, 2014
+<br/>Fusion Based FastICA Method: Facial Expression
+<br/>Recognition
+<br/><b>Computer Science, Engineering and Mathematics School, Flinders University, Australia</b></td><td>('3105876', 'Humayra B. Ali', 'humayra b. ali')<br/>('1739260', 'David M W Powers', 'david m w powers')</td><td>Email: {ali0041, david.powers}@flinders.edu.au
+</td></tr><tr><td>8210fd10ef1de44265632589f8fc28bc439a57e6</td><td>Single Sample Face Recognition via Learning Deep
+<br/>Supervised Auto-Encoders
+<br/>Shenghua Gao, Yuting Zhang, Kui Jia, Jiwen Lu, Yingying Zhang
+</td><td></td><td></td></tr><tr><td>82a4a35b2bae3e5c51f4d24ea5908c52973bd5be</td><td>Real-time emotion recognition for gaming using
+<br/>deep convolutional network features
+<br/>S´ebastien Ouellet
+</td><td></td><td></td></tr><tr><td>82a610a59c210ff77cfdde7fd10c98067bd142da</td><td>UC San Diego
+<br/>UC San Diego Electronic Theses and Dissertations
+<br/>Title
+<br/>Human attention and intent analysis using robust visual cues in a Bayesian framework
+<br/>Permalink
+<br/>https://escholarship.org/uc/item/1cb8d7vw
+<br/>Author
+<br/>McCall, Joel Curtis
+<br/>Publication Date
+<br/>2006-01-01
+<br/>Peer reviewed|Thesis/dissertation
+<br/>eScholarship.org
+<br/>Powered by the California Digital Library
+<br/><b>University of California</b></td><td></td><td></td></tr><tr><td>829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 57– No.20, November 2012
+<br/>A Robust Rotation Invariant Multiview Face Detection in
+<br/>Erratic Illumination Condition
+<br/>G.Nirmala Priya
+<br/>Associate Professor, Department of ECE
+<br/><b>Sona College of Technology</b></td><td>('48201570', 'Salem', 'salem')</td><td></td></tr><tr><td>82f4e8f053d20be64d9318529af9fadd2e3547ef</td><td>Technical Report:
+<br/>Multibiometric Cryptosystems
+</td><td>('2743820', 'Abhishek Nagar', 'abhishek nagar')<br/>('34633765', 'Karthik Nandakumar', 'karthik nandakumar')<br/>('40437942', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>82b43bc9213230af9db17322301cbdf81e2ce8cc</td><td>Attention-Set based Metric Learning for Video Face Recognition
+<br/>Center for Research on Intelligent Perception and Computing,
+<br/><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>('33079499', 'Yibo Hu', 'yibo hu')<br/>('33680526', 'Xiang Wu', 'xiang wu')<br/>('1705643', 'Ran He', 'ran he')</td><td>yibo.hu@cripac.ia.ac.cn, alfredxiangwu@gmail.com, rhe@nlpr.ia.ac.cn
+</td></tr><tr><td>82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d</td><td>141
+<br/>REFERENCES
+<br/>1.
+<br/>2.
+<br/>3.
+<br/>4.
+<br/>5.
+<br/>6.
+<br/>7.
+<br/>8.
+<br/>9.
+<br/>Adler A., Youmaran R. and Loyka S., “Towards a Measure of
+<br/>Biometric Information”, Canadian Conference on Electrical and
+<br/>Computer Engineering, pp. 210-213, 2006.
+<br/>Military Academy, West Point, New York, pp. 452-458, 2005.
+<br/>Security and Trust, St. Andrews, New Brunswick, Canada, pp. 1-8,
+<br/>2005.
+<br/>Structural Model for Biometric Sketch Recognition”, Proceedings of
+<br/>DAGM, Magdeburg, Germany, Vol. 2781, pp. 187-195, 2003.
+<br/>of Security”, The First UAE International Conference on Biological
+<br/>and Medical Physics, pp. 1-4, 2005.
+<br/>Avraam Kasapis., “MLPs and Pose, Expression Classification”,
+<br/>Proceedings of UNiS Report, pp. 1-87, 2003.
+<br/>Detection for Storage Area Networks (SANs)”, Proceedings of 22nd
+<br/>IEEE / 13th NASA Goddard Conference on Mass Storage Systems and
+<br/>Technologies, pp. 118-127, 2005.
+<br/>Black M.J. and Yacoob Y., “Recognizing Facial Expressions in Image
+<br/>Sequences using Local Parameterized Models of Image Motion”, Int.
+<br/>Journal Computer Vision, Vol. 25, No. 1, pp. 23-48, 1997.
+<br/>10.
+<br/>Recognition using a State-Based Model of Spatially-Localized Facial
+</td><td>('1689298', 'Ahmed', 'ahmed')<br/>('1689298', 'Ahmed', 'ahmed')<br/>('29977973', 'Angle', 'angle')<br/>('20765969', 'Bolle', 'bolle')<br/>('16848439', 'Bourel', 'bourel')</td><td></td></tr><tr><td>82417d8ec8ac6406f2d55774a35af2a1b3f4b66e</td><td>Some faces are more equal than others:
+<br/>Hierarchical organization for accurate and
+<br/>efficient large-scale identity-based face retrieval
+<br/>GREYC, CNRS UMR 6072, Universit´e de Caen Basse-Normandie, France1
+<br/>Technicolor, Rennes, France2
+</td><td>('48467774', 'Binod Bhattarai', 'binod bhattarai')<br/>('2515597', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>82e66c4832386cafcec16b92ac88088ffd1a1bc9</td><td>OpenFace: A general-purpose face recognition
+<br/>library with mobile applications
+<br/>June 2016
+<br/>CMU-CS-16-118
+<br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA 15213
+<br/><b>Poznan University of Technology</b></td><td>('1773498', 'Brandon Amos', 'brandon amos')<br/>('1747303', 'Mahadev Satyanarayanan', 'mahadev satyanarayanan')</td><td></td></tr><tr><td>82eb267b8e86be0b444e841b4b4ed4814b6f1942</td><td>Single Image 3D Interpreter Network
+<br/><b>Massachusetts Institute of Technology</b><br/><b>Stanford University</b><br/>3Facebook AI Research
+<br/>4Google Research
+</td><td>('3045089', 'Jiajun Wu', 'jiajun wu')<br/>('3222730', 'Tianfan Xue', 'tianfan xue')<br/>('35198686', 'Joseph J. Lim', 'joseph j. lim')<br/>('39402399', 'Yuandong Tian', 'yuandong tian')<br/>('1763295', 'Joshua B. Tenenbaum', 'joshua b. tenenbaum')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')<br/>('1768236', 'William T. Freeman', 'william t. freeman')</td><td></td></tr><tr><td>826c66bd182b54fea3617192a242de1e4f16d020</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1602
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>499f1d647d938235e9186d968b7bb2ab20f2726d</td><td>Face Recognition via Archetype Hull Ranking
+<br/><b>The Chinese University of Hong Kong, Hong Kong</b><br/><b>IBM T. J. Watson Research Center, Yorktown Heights, NY, USA</b></td><td>('3331521', 'Yuanjun Xiong', 'yuanjun xiong')</td><td>{yjxiong,xtang}@ie.cuhk.edu.hk
+<br/>weiliu@us.ibm.com
+<br/>zhaodeli@gmail.com
+</td></tr><tr><td>4919663c62174a9bc0cc7f60da8f96974b397ad2</td><td>HUMAN AGE ESTIMATION USING ENHANCED BIO-INSPIRED FEATURES (EBIF)
+<br/><b>Faculty of Computers and Information, Cairo University, Cairo, Egypt</b></td><td>('3144122', 'Motaz El-Saban', 'motaz el-saban')</td><td>{mohamed.y.eldib,motaz.elsaban}@gmail.com
+</td></tr><tr><td>49f70f707c2e030fe16059635df85c7625b5dc7e</td><td>www.ietdl.org
+<br/>Received on 29th May 2014
+<br/>Revised on 29th August 2014
+<br/>Accepted on 23rd September 2014
+<br/>doi: 10.1049/iet-bmt.2014.0033
+<br/>ISSN 2047-4938
+<br/>Face recognition under illumination variations based
+<br/>on eight local directional patterns
+<br/><b>Utah State University, Logan, UT 84322-4205, USA</b></td><td>('2147212', 'Mohammad Reza Faraji', 'mohammad reza faraji')<br/>('1725739', 'Xiaojun Qi', 'xiaojun qi')</td><td>E-mail: Mohammadreza.Faraji@aggiemail.usu.edu
+</td></tr><tr><td>4967b0acc50995aa4b28e576c404dc85fefb0601</td><td> Vol. 4, No. 1 Jan 2013 ISSN 2079-8407
+<br/>Journal of Emerging Trends in Computing and Information Sciences
+<br/>©2009-2013 CIS Journal. All rights reserved.
+<br/>An Automatic Face Detection and Gender Classification from
+<br/>http://www.cisjournal.org
+<br/> Color Images using Support Vector Machine
+<br/>1, 2, 3 Department of Electrical & Electronic Engineering, International
+<br/><b>University of Business Agriculture and Technology, Dhaka-1230, Bangladesh</b><br/>
+</td><td>('2832495', 'Md. Hafizur Rahman', 'md. hafizur rahman')<br/>('2226529', 'Suman Chowdhury', 'suman chowdhury')<br/>('36231591', 'Md. Abul Bashar', 'md. abul bashar')</td><td></td></tr><tr><td>49820ae612b3c0590a8a78a725f4f378cb605cd1</td><td>Evaluation of Smile Detection Methods with
+<br/>Images in Real-world Scenarios
+<br/><b>Beijing University of Posts and Telecommunications, Beijing, China</b></td><td>('22550265', 'Zhoucong Cui', 'zhoucong cui')<br/>('1678529', 'Shuo Zhang', 'shuo zhang')<br/>('23224233', 'Jiani Hu', 'jiani hu')<br/>('1774956', 'Weihong Deng', 'weihong deng')</td><td></td></tr><tr><td>4972aadcce369a8c0029e6dc2f288dfd0241e144</td><td>Multi-target Unsupervised Domain Adaptation
+<br/>without Exactly Shared Categories
+</td><td>('2076460', 'Huanhuan Yu', 'huanhuan yu')<br/>('27096523', 'Menglei Hu', 'menglei hu')<br/>('1680768', 'Songcan Chen', 'songcan chen')</td><td></td></tr><tr><td>49dd4b359f8014e85ed7c106e7848049f852a304</td><td></td><td></td><td></td></tr><tr><td>49e975a4c60d99bcc42c921d73f8d89ec7130916</td><td>Human and computer recognition of facial expressions of emotion
+<br/>J.M. Susskind a, G. Littlewort b, M.S. Bartlett b, J. Movellan b, A.K. Anderson a,c,∗
+<br/><b>b Machine Perception Laboratory, Institute of Neural Computation, University of California, San Diego, United States</b><br/><b>c Rotman Research Institute, Baycrest Centre for Geriatric Care, Toronto, Ont. M6A 2E1, Canada</b><br/><b>University of Toronto, Canada</b><br/>Available online 12 June 2006
+</td><td></td><td></td></tr><tr><td>49e85869fa2cbb31e2fd761951d0cdfa741d95f3</td><td>253
+<br/>Adaptive Manifold Learning
+</td><td>('2923061', 'Zhenyue Zhang', 'zhenyue zhang')<br/>('1697912', 'Jing Wang', 'jing wang')<br/>('1750350', 'Hongyuan Zha', 'hongyuan zha')</td><td></td></tr><tr><td>49659fb64b1d47fdd569e41a8a6da6aa76612903</td><td></td><td></td><td></td></tr><tr><td>490a217a4e9a30563f3a4442a7d04f0ea34442c8</td><td>International Journal on Soft Computing, Artificial Intelligence and Applications (IJSCAI), Vol.2, No.4, August 2013
+<br/>An SOM-based Automatic Facial Expression
+<br/>Recognition System
+<br/>Hsieh1, andPa-Chun Wang2
+<br/>1Department of Computer Science &InformationEngineering,National Central
+<br/><b>University, Taiwan, R.O.C</b><br/>2Cathay General Hospital, Taiwan, R.O.C.
+</td><td>('1720774', 'Mu-Chun Su', 'mu-chun su')<br/>('4226881', 'Chun-Kai Yang', 'chun-kai yang')<br/>('40179526', 'Shih-Chieh Lin', 'shih-chieh lin')</td><td>E-mail: muchun@csie.ncu.edu.tw
+</td></tr><tr><td>49a7949fabcdf01bbae1c2eb38946ee99f491857</td><td>A CONCATENATING FRAMEWORK OF SHORTCUT
+<br/>CONVOLUTIONAL NEURAL NETWORKS
+</td><td></td><td>Yujian Li (liyujian@bjut.edu.cn), Ting Zhang, Zhaoying Liu, Haihe Hu
+</td></tr><tr><td>4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1</td><td>A Deep Sum-Product Architecture for Robust Facial Attributes Analysis
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('1693209', 'Ping Luo', 'ping luo')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>pluo.lhi@gmail.com
+<br/>xgwang@ee.cuhk.edu.hk
+<br/>xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>499343a2fd9421dca608d206e25e53be84489f44</td><td>Anil Kumar.C, et.al, International Journal of Technology and Engineering Science [IJTES]TM
+<br/>
+<br/>Volume 1[9], pp: 1371-1375, December 2013
+<br/>Face Recognition with Name Using Local Weber‟s
+<br/>Law Descriptor
+<br/>1C.Anil kumar,2A.Rajani,3I.Suneetha
+<br/>1M.Tech Student,2Assistant Professor,3Associate Professor
+<br/><b>Annamacharya Institute of Technology and Sciences, Tirupati, India</b><br/>on FERET
+</td><td></td><td>1Anilyadav.kumar7@gmail.com,2rajanirevanth446@gmail.com,3iralasuneetha.aits@gmail.com
+</td></tr><tr><td>498fd231d7983433dac37f3c97fb1eafcf065268</td><td>LINEAR DISENTANGLED REPRESENTATION LEARNING FOR FACIAL ACTIONS
+<br/>1Dept. of Computer Science
+<br/>2Dept. of Electrical & Computer Engineering
+<br/><b>Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA</b><br/>Fig. 1. The separability of the neutral face yn and expression
+<br/>component ye. We find yn is better for identity recognition
+<br/>than y and ye is better for expression recognition than y.
+</td><td>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')</td><td></td></tr><tr><td>49e1aa3ecda55465641b2c2acc6583b32f3f1fc6</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+<br/>Support Vector Machine for age classification
+<br/>1Assistant Professor, CSE, RSR RCET, Kohka Bhilai
+<br/>2,3 Sr. Assistant Professor, CSE, SSCET, Junwani Bhilai
+</td><td>('6552360', 'Sangeeta Agrawal', 'sangeeta agrawal')<br/>('40618181', 'Rohit Raja', 'rohit raja')<br/>('40323262', 'Sonu Agrawal', 'sonu agrawal')</td><td>1agrawal.sans@gmail.com
+<br/>2rohitraja4u@gmail.com
+<br/>3agrawalsonu@gmail.com
+</td></tr><tr><td>499f2b005e960a145619305814a4e9aa6a1bba6a</td><td>Robust human face recognition based on locality preserving
+<br/>sparse overcomplete block approximation
+<br/><b>University of Geneva</b><br/>7 Route de Drize, Geneva, Switzerland
+</td><td>('36133844', 'Dimche Kostadinov', 'dimche kostadinov')<br/>('8995309', 'Sviatoslav Voloshynovskiy', 'sviatoslav voloshynovskiy')<br/>('1682792', 'Sohrab Ferdowsi', 'sohrab ferdowsi')</td><td></td></tr><tr><td>497bf2df484906e5430aa3045cf04a40c9225f94</td><td>Sensors 2013, 13, 16682-16713; doi:10.3390/s131216682
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Hierarchical Recognition Scheme for Human Facial Expression
+<br/>Recognition Systems
+<br/><b>UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea</b><br/><b>Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea</b><br/>Tel.: +82-31-201-2514.
+<br/>Received: 28 October 2013; in revised form: 30 November 2013 / Accepted: 2 December 2013 /
+<br/>Published: 5 December 2013
+</td><td>('1711083', 'Muhammad Hameed Siddiqi', 'muhammad hameed siddiqi')<br/>('1700806', 'Sungyoung Lee', 'sungyoung lee')<br/>('1750915', 'Young-Koo Lee', 'young-koo lee')<br/>('1714762', 'Adil Mehmood Khan', 'adil mehmood khan')<br/>('34601872', 'Phan Tran Ho Truc', 'phan tran ho truc')</td><td>E-Mails: siddiqi@oslab.khu.ac.kr (M.H.S.); sylee@oslab.khu.ac.kr (S.L.); yklee@khu.ac.kr (Y.-K.L.)
+<br/>E-Mail: amtareen@ajou.ac.kr
+<br/>* Author to whom correspondence should be addressed; E-Mail: pthtruc@oslab.khu.ac.kr;
+</td></tr><tr><td>492f41e800c52614c5519f830e72561db205e86c</td><td>A Deep Regression Architecture with Two-Stage Re-initialization for
+<br/>High Performance Facial Landmark Detection
+<br/>Jiangjing Lv1
+<br/><b>Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences</b><br/><b>University of Chinese Academy of Sciences</b><br/><b>Institute of Automation, Chinese Academy of Sciences</b></td><td>('3492237', 'Xiaohu Shao', 'xiaohu shao')<br/>('1757173', 'Junliang Xing', 'junliang xing')<br/>('2095535', 'Cheng Cheng', 'cheng cheng')<br/>('39959302', 'Xi Zhou', 'xi zhou')</td><td>{lvjiangjing,shaoxiaohu,chengcheng,zhouxi}@cigit.ac.cn
+<br/>jlxing@nlpr.ia.ac.cn
+</td></tr><tr><td>49df381ea2a1e7f4059346311f1f9f45dd997164</td><td>2018
+<br/>On the Use of Client-Specific Information for Face
+<br/>Presentation Attack Detection Based on Anomaly
+<br/>Detection
+</td><td>('1690611', 'Shervin Rahimzadeh Arashloo', 'shervin rahimzadeh arashloo')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td></td></tr><tr><td>493ec9e567c5587c4cbeb5f08ca47408ca2d6571</td><td>You et al. Complex Adapt Syst Model (2016) 4:22
+<br/>DOI 10.1186/s40294‑016‑0034‑7
+<br/>RESEARCH
+<br/>Combining graph embedding
+<br/>and sparse regression with structure low‑rank
+<br/>representation for semi‑supervised learning
+<br/>Open Access
+<br/>*Correspondence:
+<br/>1 School of IoT Engineering,
+<br/><b>Jiangnan University, Wuxi</b><br/>China
+<br/>Full list of author information
+<br/>is available at the end of the
+<br/>article
+</td><td>('1766488', 'Vasile Palade', 'vasile palade')</td><td>youcongzhe@gmail.com
+</td></tr><tr><td>49570b41bd9574bd9c600e24b269d945c645b7bd</td><td>A Framework for Performance Evaluation
+<br/>of Face Recognition Algorithms
+<br/><b>Visual Computing and Communications Lab, Arizona State University</b></td><td>('40401270', 'John A. Black', 'john a. black')<br/>('1743991', 'Sethuraman Panchanathan', 'sethuraman panchanathan')</td><td></td></tr><tr><td>496074fcbeefd88664b7bd945012ca22615d812e</td><td>Review
+<br/>Driver Distraction Using Visual-Based Sensors
+<br/>and Algorithms
+<br/>1 Grupo TSK, Technological Scientific Park of Gijón, 33203 Gijón, Asturias, Spain;
+<br/><b>University of Oviedo, Campus de Viesques, 33204 Gij n</b><br/>Academic Editor: Gonzalo Pajares Martinsanz
+<br/>Received: 14 July 2016; Accepted: 24 October 2016; Published: 28 October 2016
+</td><td>('8306548', 'Rubén Usamentiaga', 'rubén usamentiaga')<br/>('27666409', 'Juan Luis Carús', 'juan luis carús')</td><td>juanluis.carus@grupotsk.com
+<br/>Asturias, Spain; rusamentiaga@uniovi.es (R.U.); rcasado@lsi.uniovi.es (R.C.)
+<br/>* Corrospondence: alberto.fernandez@grupotsk.com; Tel.: +34-984-29-12-12; Fax: +34-984-39-06-12
+</td></tr><tr><td>40205181ed1406a6f101c5e38c5b4b9b583d06bc</td><td>Using Context to Recognize People in Consumer Images
+</td><td>('39460815', 'Andrew C. Gallagher', 'andrew c. gallagher')<br/>('1746230', 'Tsuhan Chen', 'tsuhan chen')</td><td></td></tr><tr><td>40dab43abef32deaf875c2652133ea1e2c089223</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Facial Communicative Signals
+<br/>Valence Recognition in Task-Oriented Human-Robot Interaction
+<br/>Received: date / Accepted: date
+</td><td>('33734208', 'Christian Lang', 'christian lang')</td><td></td></tr><tr><td>40b0fced8bc45f548ca7f79922e62478d2043220</td><td>Do Convnets Learn Correspondence?
+<br/><b>University of California Berkeley</b></td><td>('1753210', 'Trevor Darrell', 'trevor darrell')<br/>('34703740', 'Jonathan Long', 'jonathan long')<br/>('40565777', 'Ning Zhang', 'ning zhang')</td><td>{jonlong, nzhang, trevor}@cs.berkeley.edu
+</td></tr><tr><td>405b43f4a52f70336ac1db36d5fa654600e9e643</td><td>What can we learn about CNNs from a large scale controlled object dataset?
+<br/>UWM
+<br/>AUT
+<br/>USC
+</td><td>('3177797', 'Ali Borji', 'ali borji')<br/>('2391309', 'Saeed Izadi', 'saeed izadi')<br/>('7326223', 'Laurent Itti', 'laurent itti')</td><td>borji@uwm.edu
+<br/>sizadi@aut.ac.ir
+<br/>itti@usc.edu
+</td></tr><tr><td>40b86ce698be51e36884edcc8937998979cd02ec</td><td>Yüz ve İsim İlişkisi kullanarak Haberlerdeki Kişilerin Bulunması
+<br/>Finding Faces in News Photos Using Both Face and Name Information
+<br/>Derya Ozkan, Pınar Duygulu
+<br/>Bilgisayar Mühendisliği Bölümü, Bilkent Üniversitesi, 06800, Ankara
+<br/>Özetçe
+<br/>Bu çalışmada, haber fotoğraflarından oluşan geniş veri
+<br/>kümelerinde kişilerin sorgulanmasını sağlayan bir yöntem
+<br/>sunulmuştur. Yöntem isim ve yüzlerin ilişkilendirilmesine
+<br/>dayanmaktadır. Haber başlığında kişinin ismi geçiyor ise
+<br/>fotoğrafta da o kişinin yüzünün bulunacağı varsayımıyla, ilk
+<br/>olarak sorgulanan isim ile ilişkilendirilmiş, fotoğraflardaki
+<br/>tüm yüzler seçilir. Bu yüzler arasında sorgu kişisine ait farklı
+<br/>koşul, poz ve zamanlarda çekilmiş pek çok resmin yanında,
+<br/>haberde ismi geçen başka kişilere ait yüzler ya da kullanılan
+<br/>yüz bulma yönteminin hatasından kaynaklanan yüz olmayan
+<br/>resimler de bulunabilir. Yine de, çoğu zaman, sorgu kişisine
+<br/>ait resimler daha çok olup, bu resimler birbirine diğerlerine
+<br/>olduğundan daha çok benzeyeceklerdir. Bu nedenle, yüzler
+<br/>arasındaki benzerlikler çizgesel olarak betimlendiğinde ,
+<br/>birbirine en çok benzeyen yüzler bu çizgede en yoğun bileşen
+<br/>olacaktır. Bu çalışmada, sorgu ismiyle ilişkilendirilmiş,
+<br/>yüzler arasında birbirine en çok benzeyen alt kümeyi bulan,
+<br/>çizgeye dayalı bir yöntem sunulmaktadır.
+</td><td></td><td>deryao@cs.bilkent.edu.tr, duygulu@cs.bilkent.edu.tr
+</td></tr><tr><td>40a74eea514b389b480d6fe8b359cb6ad31b644a</td><td>Discrete Deep Feature Extraction: A Theory and New Architectures
+<br/>Aleksandar Stani´c1
+<br/>Helmut B¨olcskei1
+<br/>1Dept. IT & EE, ETH Zurich, Switzerland
+<br/><b>University of Vienna, Austria</b></td><td>('2076040', 'Thomas Wiatowski', 'thomas wiatowski')<br/>('2208878', 'Michael Tschannen', 'michael tschannen')<br/>('1690644', 'Philipp Grohs', 'philipp grohs')</td><td></td></tr><tr><td>403a108dec92363fd1f465340bd54dbfe65af870</td><td>describing images with statistics of local non-binarized pixel patterns
+<br/>Local Higher-Order Statistics (LHS)
+<br/>aGREYC CNRS UMR 6072, Universit´e de Caen Basse-Normandie, France
+<br/><b>bMax Planck Institute for Informatics, Germany</b></td><td>('2515597', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>40ee38d7ff2871761663d8634c3a4970ed1dc058</td><td>Three-Dimensional Face Recognition: A Fishersurface
+<br/>Approach
+<br/><b>The University of York, United Kingdom</b></td><td>('2023950', 'Thomas Heseltine', 'thomas heseltine')<br/>('1737428', 'Nick Pears', 'nick pears')<br/>('2405628', 'Jim Austin', 'jim austin')</td><td></td></tr><tr><td>402f6db00251a15d1d92507887b17e1c50feebca</td><td>3D Facial Action Units Recognition for Emotional
+<br/>Expression
+<br/>1Department of Information Technology and Communication, Politeknik Kuching, Sarawak, Malaysia
+<br/>2Faculty of Computer Science and Information Technology, Universiti Malaysia Sarawak, Kota Samarahan, Sarawak, Malaysia
+<br/>The muscular activities caused the activation of certain AUs for every facial expression at the certain duration of time
+<br/>throughout the facial expression. This paper presents the methods to recognise facial Action Unit (AU) using facial distance
+<br/>of the facial features which activates the muscles. The seven facial action units involved are AU1, AU4, AU6, AU12, AU15,
+<br/>AU17 and AU25 that characterises happy and sad expression. The recognition is performed on each AU according to rules
+<br/>defined based on the distance of each facial points. The facial distances chosen are extracted from twelve facial features.
+<br/>Then the facial distances are trained using Support Vector Machine (SVM) and Neural Network (NN). Classification result
+<br/>using SVM is presented with several different SVM kernels while result using NN is presented for each training, validation
+<br/>and testing phase.
+<br/>Keywords: Facial action units recognition, 3D AU recognition, facial expression
+<br/>
+</td><td>('2801456', 'Hamimah Ujir', 'hamimah ujir')<br/>('3310557', 'Jacey-Lynn Minoi', 'jacey-lynn minoi')</td><td></td></tr><tr><td>404042a1dcfde338cf24bc2742c57c0fb1f48359</td><td>中国图象图形学报 vol.8, no.8, pp.849-859, 2003.
+<br/>脸部特征定位方法综述1
+<br/>林维训 潘纲 吴朝晖 潘云鹤
+<br/>(浙江大学计算机系 310027)
+<br/>摘 要 脸部特征定位是人脸分析技术的一个重要组成部分,其目标是在图像或图像序列中的指定
+<br/>区域内搜索人脸特征(如眼、鼻、嘴、耳等)的位置。它可广泛应用于人脸检测和定位、人脸识别、
+<br/>姿态识别、表情识别、头部像压缩及重构、脸部动画等领域。近年来该领域的研究有了较大的发展,
+<br/>为了让相关领域内的理论研究和开发人员对目前的进展有一个全面的了解,本文将近年来提出的脸
+<br/>部特征定位方法根据其所依据的基本信息类型分为基于先验知识、几何形状、色彩、外观和关联信
+<br/>息等五类并分别作了介绍,对各类方法的性能作了一些比较和讨论,对未来的发展作了展望。
+<br/>关键词 脸部特征定位 脸部特征提取
+<br/>中图法分类号:TP391.41
+<br/>A Survey on Facial Features Localization
+<br/><b>College of Computer Science, Zhejiang University</b></td><td></td><td></td></tr><tr><td>4015e8195db6edb0ef8520709ca9cb2c46f29be7</td><td><b>UNIVERSITY OF TARTU</b><br/>FACULTY OF MATHEMATICS AND COMPUTER SCIENCE
+<br/><b>Institute of Computer Science</b><br/>Computer Science Curriculum
+<br/>Smile Detector Based on the Motion of
+<br/>Face Reference Points
+<br/>Bachelor’s Thesis (6 ECTS)
+<br/>Supervisor: Gholamreza Anbarjafari, PhD
+<br/>Tartu 2014
+</td><td>('3168586', 'Andres Traumann', 'andres traumann')</td><td></td></tr><tr><td>407bb798ab153bf6156ba2956f8cf93256b6910a</td><td>Fisher Pruning of Deep Nets for Facial Trait
+<br/>Classification
+<br/><b>McGill University</b><br/><b>University Street, Montreal, QC H3A 0E9, Canada</b></td><td>('1992537', 'Qing Tian', 'qing tian')<br/>('1699104', 'Tal Arbel', 'tal arbel')<br/>('1713608', 'James J. Clark', 'james j. clark')</td><td></td></tr><tr><td>40fb4e8932fb6a8fef0dddfdda57a3e142c3e823</td><td>A Mixed Generative-Discriminative Framework for Pedestrian Classification
+<br/>Dariu M. Gavrila2,3
+<br/>1 Image & Pattern Analysis Group, Dept. of Math. and Comp. Sc., Univ. of Heidelberg, Germany
+<br/>2 Environment Perception, Group Research, Daimler AG, Ulm, Germany
+<br/>3 Intelligent Systems Lab, Faculty of Science, Univ. of Amsterdam, The Netherlands
+</td><td>('1765022', 'Markus Enzweiler', 'markus enzweiler')</td><td>{uni-heidelberg.enzweiler,dariu.gavrila}@daimler.com
+</td></tr><tr><td>40dd2b9aace337467c6e1e269d0cb813442313d7</td><td>This thesis has been submitted in fulfilment of the requirements for a postgraduate degree
+<br/><b>e.g. PhD, MPhil, DClinPsychol) at the University of Edinburgh. Please note the following</b><br/>terms and conditions of use:
+<br/>This work is protected by copyright and other intellectual property rights, which are
+<br/>retained by the thesis author, unless otherwise stated.
+<br/>A copy can be downloaded for personal non-commercial research or study, without
+<br/>prior permission or charge.
+<br/>This thesis cannot be reproduced or quoted extensively from without first obtaining
+<br/>permission in writing from the author.
+<br/>The content must not be changed in any way or sold commercially in any format or
+<br/>medium without the formal permission of the author.
+<br/>When referring to this work, full bibliographic details including the author, title,
+<br/>awarding institution and date of the thesis must be given.
+</td><td></td><td></td></tr><tr><td>407de9da58871cae7a6ded2f3a6162b9dc371f38</td><td>TraMNet - Transition Matrix Network for
+<br/>Efficient Action Tube Proposals
+<br/><b>Oxford Brookes University, UK</b></td><td>('1931660', 'Gurkirt Singh', 'gurkirt singh')<br/>('49348905', 'Suman Saha', 'suman saha')<br/>('1754181', 'Fabio Cuzzolin', 'fabio cuzzolin')</td><td>gurkirt.singh-2015@brookes.ac.uk
+</td></tr><tr><td>405526dfc79de98f5bf3c97bf4aa9a287700f15d</td><td>MegaFace: A Million Faces for Recognition at Scale
+<br/>D. Miller
+<br/>E. Brossard
+<br/>S. Seitz
+<br/>Dept. of Computer Science and Engineering
+<br/><b>University of Washington</b><br/>I. Kemelmacher-Shlizerman
+<br/>Figure 1: We evaluate how recognition performs with increasing numbers of faces in the database: (a) shows rank-1 iden-
+<br/>tification rates, and (b) rank-10. Recognition rates drop once the number of distractors increases. We also present first
+<br/>large-scale human recognition results (up to 10K distractors). Interestingly, Google’s deep learning based FaceNet is more
+<br/>robust at scale than humans. See http://megaface.cs.washington.edu to participate in the challenge.
+</td><td></td><td></td></tr><tr><td>40cd062438c280c76110e7a3a0b2cf5ef675052c</td><td></td><td></td><td></td></tr><tr><td>40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b</td><td>Beyond Trade-off: Accelerate FCN-based Face Detector with Higher Accuracy
+<br/><b>Beihang University, 2The Chinese University of Hong Kong, 3Sensetime Group Limited</b></td><td>('12920342', 'Guanglu Song', 'guanglu song')<br/>('1715752', 'Yu Liu', 'yu liu')<br/>('40452812', 'Ming Jiang', 'ming jiang')<br/>('33598672', 'Yujie Wang', 'yujie wang')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('2858789', 'Biao Leng', 'biao leng')</td><td>{guanglusong,jiangming1406,yujiewang,lengbiao}@buaa.edu.cn,
+<br/>yuliu@ee.cuhk.edu.hk, yanjunjie@sensetime.com
+</td></tr><tr><td>40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d</td><td>Feedback-Controlled Sequential Lasso Screening
+<br/>Department of Electrical Engineering
+<br/><b>Princeton University</b></td><td>('1719525', 'Yun Wang', 'yun wang')<br/>('1734498', 'Xu Chen', 'xu chen')<br/>('1693135', 'Peter J. Ramadge', 'peter j. ramadge')</td><td></td></tr><tr><td>40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a</td><td>80
+</td><td></td><td></td></tr><tr><td>40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60</td><td>AUTOMATIC LIP TRACKING AND ACTION UNITS CLASSIFICATION USING
+<br/>TWO-STEP ACTIVE CONTOURS AND PROBABILISTIC NEURAL NETWORKS
+<br/>Faculty of Electrical and
+<br/>Computer Engineering
+<br/><b>University of Tabriz, Tabriz, Iran</b><br/>WonSook LEE
+<br/>School of Information Technology
+<br/>and Engineering (SITE)
+<br/>Faculty of Engineering,
+<br/><b>University of Ottawa, Canada</b><br/>Faculty of Electrical and
+<br/>Computer Engineering
+<br/><b>University of Tabriz, Tabriz, Iran</b><br/>
+<br/>
+</td><td>('3210269', 'Hadi Seyedarabi', 'hadi seyedarabi')<br/>('2488201', 'Ali Aghagolzadeh', 'ali aghagolzadeh')</td><td>email: hadis@discover.uottawa.ca
+<br/>email: wslee@uottawa.ca
+<br/>email: aghagol@tabrizu.ac.ir
+</td></tr><tr><td>40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd</td><td>Bridging Heterogeneous Domains With Parallel Transport For Vision and
+<br/>Multimedia Applications
+<br/>Dept. of Video and Multimedia Technologies Research
+<br/>AT&T Labs-Research
+<br/>San Francisco, CA 94108
+</td><td>('33692583', 'Raghuraman Gopalan', 'raghuraman gopalan')</td><td></td></tr><tr><td>40389b941a6901c190fb74e95dc170166fd7639d</td><td>Automatic Facial Expression Recognition
+<br/>Emotient
+<br/>http://emotient.com
+<br/>February 12, 2014
+<br/>Imago animi vultus est, indices oculi. (Cicero)
+<br/>Introduction
+<br/>The face is innervated by two different brain systems that compete for control of its muscles:
+<br/>a cortical brain system related to voluntary and controllable behavior, and a sub-cortical
+<br/>system responsible for involuntary expressions. The interplay between these two systems
+<br/>generates a wealth of information that humans constantly use to read the emotions, inten-
+<br/>tions, and interests [25] of others.
+<br/>Given the critical role that facial expressions play in our daily life, technologies that can
+<br/>interpret and respond to facial expressions automatically are likely to find a wide range of
+<br/>applications. For example, in pharmacology, the effect of new anti-depression drugs could
+<br/>be assessed more accurately based on daily records of the patients’ facial expressions than
+<br/>asking the patients to fill out a questionnaire, as it is currently done [7]. Facial expression
+<br/>recognition may enable a new generation of teaching systems to adapt to the expression
+<br/>of their students in the way good teachers do [61]. Expression recognition could be used
+<br/>to assess the fatigue of drivers and air-pilots [58, 59]. Daily-life robots with automatic
+<br/>expression recognition will be able to assess the states and intentions of humans and respond
+<br/>accordingly [41]. Smart phones with expression analysis may help people to prepare for
+<br/>important meetings and job interviews.
+<br/>Thanks to the introduction of machine learning methods, recent years have seen great
+<br/>progress in the field of automatic facial expression recognition. Commercial real-time ex-
+<br/>pression recognition systems are starting to be used in consumer applications, e.g., smile
+<br/>detectors embedded in digital cameras [62]. Nonetheless, considerable progress has yet to be
+<br/>made: Methods for face detection and tracking (the first step of automated face analysis)
+<br/>work well for frontal views of adult Caucasian and Asian faces [50], but their performance
+</td><td>('1775637', 'Jacob Whitehill', 'jacob whitehill')<br/>('40648952', 'Marian Stewart', 'marian stewart')<br/>('1741200', 'Javier R. Movellan', 'javier r. movellan')</td><td></td></tr><tr><td>40e1743332523b2ab5614bae5e10f7a7799161f4</td><td>Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural
+<br/>Networks
+<br/><b>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</b><br/><b>School of IoT Engineering, Jiangnan University, Wuxi 214122, China</b></td><td>('2976854', 'Zhen-Hua Feng', 'zhen-hua feng')<br/>('1748684', 'Josef Kittler', 'josef kittler')</td><td>{z.feng, j.kittler, m.a.rana}@surrey.ac.uk, patrikhuber@gmail.com, wu xiaojun@jiangnan.edu.cn
+</td></tr><tr><td>40c8cffd5aac68f59324733416b6b2959cb668fd</td><td>Pooling Facial Segments to Face: The Shallow and Deep Ends
+<br/>Department of Electrical and Computer Engineering and the Center for Automation Research,
+<br/><b>UMIACS, University of Maryland, College Park, MD</b></td><td>('3152615', 'Upal Mahbub', 'upal mahbub')<br/>('40599829', 'Sayantan Sarkar', 'sayantan sarkar')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>{umahbub, ssarkar2, rama}@umiacs.umd.edu
+</td></tr><tr><td>40273657e6919455373455bd9a5355bb46a7d614</td><td>Anonymizing k-Facial Attributes via Adversarial Perturbations
+<br/>1 IIIT Delhi, New Delhi, India
+<br/>2 Ministry of Electronics and Information Technology, New Delhi, India
+</td><td>('24380882', 'Saheb Chhabra', 'saheb chhabra')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('50046315', 'Gaurav Gupta', 'gaurav gupta')</td><td>{sahebc, rsingh, mayank@iiitd.ac.in}, gauravg@gov.in
+</td></tr><tr><td>40b10e330a5511a6a45f42c8b86da222504c717f</td><td>Implementing the Viola-Jones
+<br/>Face Detection Algorithm
+<br/>Kongens Lyngby 2008
+<br/>IMM-M.Sc.-2008-93
+</td><td>('24007383', 'Ole Helvig Jensen', 'ole helvig jensen')</td><td></td></tr><tr><td>40bb090a4e303f11168dce33ed992f51afe02ff7</td><td>Marginal Loss for Deep Face Recognition
+<br/><b>Imperial College London</b><br/><b>Imperial College London</b><br/><b>Imperial College London</b><br/>UK
+<br/>UK
+<br/>UK
+</td><td>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('2321938', 'Yuxiang Zhou', 'yuxiang zhou')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>j.deng16@imperial.ac.uk
+<br/>yuxiang.zhou10@imperial.ac.uk
+<br/>s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>40ca925befa1f7e039f0cd40d57dbef6007b4416</td><td>Sampling Matters in Deep Embedding Learning
+<br/>UT Austin
+<br/>A9/Amazon
+<br/>Amazon
+<br/>Philipp Kr¨ahenb¨uhl
+<br/>UT Austin
+</td><td>('2978413', 'Chao-Yuan Wu', 'chao-yuan wu')<br/>('1758550', 'R. Manmatha', 'r. manmatha')<br/>('1691629', 'Alexander J. Smola', 'alexander j. smola')</td><td>cywu@cs.utexas.edu
+<br/>manmatha@a9.com
+<br/>smola@amazon.com
+<br/>philkr@cs.utexas.edu
+</td></tr><tr><td>4042bbb4e74e0934f4afbedbe92dd3e37336b2f4</td><td></td><td></td><td></td></tr><tr><td>4026dc62475d2ff2876557fc2b0445be898cd380</td><td>An Affective User Interface Based on Facial Expression
+<br/>Recognition and Eye-Gaze Tracking
+<br/><b>School of Computer Engineering, Sejong University, Seoul, Korea</b></td><td>('7236280', 'Soo-Mi Choi', 'soo-mi choi')<br/>('2706430', 'Yong-Guk Kim', 'yong-guk kim')</td><td>{smchoi,ykim}@sejong.ac.kr
+</td></tr><tr><td>40f127fa4459a69a9a21884ee93d286e99b54c5f</td><td>Optimizing Apparent Display Resolution
+<br/>Enhancement for Arbitrary Videos
+</td><td>('2267017', 'Michael Stengel', 'michael stengel')<br/>('1701306', 'Martin Eisemann', 'martin eisemann')<br/>('34751565', 'Stephan Wenger', 'stephan wenger')<br/>('2765149', 'Benjamin Hell', 'benjamin hell')</td><td></td></tr><tr><td>401e6b9ada571603b67377b336786801f5b54eee</td><td>Active Image Clustering: Seeking Constraints from
+<br/>Humans to Complement Algorithms
+<br/>November 22, 2011
+</td><td></td><td></td></tr><tr><td>406431d2286a50205a71f04e0b311ba858fc7b6c</td><td>3D FACIAL EXPRESSION CLASSIFICATION USING
+<br/>A STATISTICAL MODEL OF SURFACE NORMALS
+<br/>AND A MODULAR APPROACH
+<br/>A thesis submitted to
+<br/><b>University of Birmingham</b><br/>for the degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>School of Electronic, Electrical & Computer Engineering
+<br/><b>University of Birmingham</b><br/>August 2012
+</td><td>('2801456', 'Hamimah Ujir', 'hamimah ujir')</td><td></td></tr><tr><td>40217a8c60e0a7d1735d4f631171aa6ed146e719</td><td>Part-Pair Representation for Part Localization
+<br/><b>Columbia University</b></td><td>('2454675', 'Jiongxin Liu', 'jiongxin liu')<br/>('3173493', 'Yinxiao Li', 'yinxiao li')<br/>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')</td><td>{liujx09, yli, belhumeur}@cs.columbia.edu
+</td></tr><tr><td>2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9</td><td></td><td></td><td></td></tr><tr><td>2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Pointly-Supervised Action Localization
+<br/>Received: date / Accepted: date
+</td><td>('2606260', 'Pascal Mettes', 'pascal mettes')</td><td></td></tr><tr><td>2eb37a3f362cffdcf5882a94a20a1212dfed25d9</td><td>4
+<br/>Local Feature Based Face Recognition
+<br/>R.I.T., Rajaramnagar and S.G.G.S. COE &T, Nanded
+<br/>India
+<br/>1. Introduction
+<br/>A reliable automatic face recognition (AFR) system is a need of time because in today's
+<br/>networked world, maintaining the security of private information or physical property is
+<br/>becoming increasingly important and difficult as well. Most of the time criminals have been
+<br/>taking the advantage of fundamental flaws in the conventional access control systems i.e.
+<br/>the systems operating on credit card, ATM etc. do not grant access by "who we are", but by
+<br/>"what we have”. The biometric based access control systems have a potential to overcome
+<br/>most of the deficiencies of conventional access control systems and has been gaining the
+<br/>importance in recent years. These systems can be designed with biometric traits such as
+<br/>fingerprint, face, iris, signature, hand geometry etc. But comparison of different biometric
+<br/>traits shows that face is very attractive biometric because of its non-intrusiveness and social
+<br/>acceptability. It provides automated methods of verifying or recognizing the identity of a
+<br/>living person based on its facial characteristics.
+<br/>In last decade, major advances occurred in face recognition, with many systems capable of
+<br/>achieving recognition rates greater than 90%. However real-world scenarios remain a
+<br/>challenge, because face acquisition process can undergo to a wide range of variations. Hence
+<br/>the AFR can be thought as a very complex object recognition problem, where the object to be
+<br/>recognized is the face. This problem becomes even more difficult because the search is done
+<br/>among objects belonging to the same class and very few images of each class are available to
+<br/>train the system. Moreover different problems arise when images are acquired under
+<br/>uncontrolled conditions such as illumination variations, pose changes, occlusion, person
+<br/>appearance at different ages, expression changes and face deformations. The numbers of
+<br/>approaches has been proposed by various researchers to deal with these problems but still
+<br/>reported results cannot suffice the need of the reliable AFR system in presence of all facial
+<br/>image variations. A recent survey paper (Abate et al., 2007) states that the sensibility of the
+<br/>AFR systems to illumination and pose variations are the main problems researchers have
+<br/>been facing up till.
+<br/>2. Face recognition methods
+<br/>The existing face recognition methods can be divided into two categories: holistic matching
+<br/>methods and local matching methods.The holistic matching methods use complete face
+<br/>region as a input to face recognition system and constructs a lower dimensional subspace
+<br/>using principal component analysis (PCA) (Turk & Pentland, 1991), linear discriminant
+<br/>www.intechopen.com
+</td><td>('2321206', 'Sanjay A. Pardeshi', 'sanjay a. pardeshi')<br/>('3092481', 'Sanjay N. Talbar', 'sanjay n. talbar')</td><td></td></tr><tr><td>2e0addeffba4be98a6ad0460453fbab52616b139</td><td>Face View Synthesis
+<br/>Using A Single Image
+<br/>Thesis Proposal
+<br/>May 2006
+<br/>Committee Members
+<br/>Henry Schneiderman (Chair)
+<br/>Alexei (Alyosha) Efros
+<br/><b>Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania 15213
+<br/><b>c(cid:13) Carnegie Mellon University</b></td><td>('2989714', 'Jiang Ni', 'jiang ni')<br/>('1709305', 'Martial Hebert', 'martial hebert')<br/>('38998440', 'David Kriegman', 'david kriegman')</td><td></td></tr><tr><td>2e5cfa97f3ecc10ae8f54c1862433285281e6a7c</td><td></td><td></td><td></td></tr><tr><td>2e091b311ac48c18aaedbb5117e94213f1dbb529</td><td>Collaborative Facial Landmark Localization
+<br/>for Transferring Annotations Across Datasets
+<br/><b>University of Wisconsin Madison</b><br/>http://www.cs.wisc.edu/~lizhang/projects/collab-face-landmarks/
+</td><td>('1893050', 'Brandon M. Smith', 'brandon m. smith')<br/>('40396555', 'Li Zhang', 'li zhang')</td><td></td></tr><tr><td>2e1415a814ae9abace5550e4893e13bd988c7ba1</td><td>International Journal of Engineering Trends and Technology (IJETT) – Volume 21 Number 3 – March 2015
+<br/>Dictionary Based Face Recognition in Video Using
+<br/>Fuzzy Clustering and Fusion
+<br/>#1IInd year M.E. Student, #2Assistant Professor
+<br/><b>Dhanalakshmi Srinivasan College of Engineering</b><br/>Coimbatore,Tamilnadu,India.
+<br/><b>Anna University</b></td><td></td><td></td></tr><tr><td>2e0e056ed5927a4dc6e5c633715beb762628aeb0</td><td></td><td></td><td></td></tr><tr><td>2e8a0cc071017845ee6f67bd0633b8167a47abed</td><td>Spatio-Temporal Covariance Descriptors for Action and Gesture Recognition
+<br/>NICTA, PO Box 6020, St Lucia, QLD 4067, Australia ∗
+<br/><b>University of Queensland, School of ITEE, QLD 4072, Australia</b></td><td>('2706642', 'Andres Sanin', 'andres sanin')<br/>('1781182', 'Conrad Sanderson', 'conrad sanderson')<br/>('2270092', 'Brian C. Lovell', 'brian c. lovell')</td><td></td></tr><tr><td>2e68190ebda2db8fb690e378fa213319ca915cf8</td><td>Generating Videos with Scene Dynamics
+<br/>MIT
+<br/>UMBC
+<br/>MIT
+</td><td>('1856025', 'Carl Vondrick', 'carl vondrick')<br/>('2367683', 'Hamed Pirsiavash', 'hamed pirsiavash')<br/>('1690178', 'Antonio Torralba', 'antonio torralba')</td><td>vondrick@mit.edu
+<br/>hpirsiav@umbc.edu
+<br/>torralba@mit.edu
+</td></tr><tr><td>2e0d56794379c436b2d1be63e71a215dd67eb2ca</td><td>Improving precision and recall of face recognition in SIPP with combination of
+<br/>modified mean search and LSH
+<br/>Xihua.Li
+</td><td></td><td>lixihua9@126.com
+</td></tr><tr><td>2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd</td><td></td><td></td><td></td></tr><tr><td>2e475f1d496456831599ce86d8bbbdada8ee57ed</td><td>Groupsourcing: Team Competition Designs for
+<br/>Crowdsourcing
+<br/><b>L3S Research Center, Hannover, Germany</b></td><td>('2993225', 'Markus Rokicki', 'markus rokicki')<br/>('2553718', 'Sergej Zerr', 'sergej zerr')<br/>('1745880', 'Stefan Siersdorfer', 'stefan siersdorfer')</td><td>{rokicki,siersdorfer,zerr}@L3S.de
+</td></tr><tr><td>2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522</td><td>Leveraging Billions of Faces to Overcome
+<br/>Performance Barriers in Unconstrained Face
+<br/>Recognition
+<br/>face.com
+</td><td>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td>{yaniv, wolf}@face.com
+</td></tr><tr><td>2e231f1e7e641dd3619bec59e14d02e91360ac01</td><td>FUSION NETWORK FOR FACE-BASED AGE ESTIMATION
+<br/><b>The University of Warwick, Coventry, UK</b><br/><b>School of Management, University of Bath, Bath, UK</b><br/><b>School of Computing and Mathematics, Charles Sturt University, Wagga Wagga, Australia</b></td><td>('1750506', 'Haoyi Wang', 'haoyi wang')<br/>('40655450', 'Xingjie Wei', 'xingjie wei')<br/>('1901920', 'Victor Sanchez', 'victor sanchez')<br/>('1799504', 'Chang-Tsun Li', 'chang-tsun li')</td><td>{h.wang.16, vsanchez, C-T.Li}@warwick.ac.uk, x.wei@bath.ac.uk
+</td></tr><tr><td>2e6cfeba49d327de21ae3186532e56cadeb57c02</td><td>Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model
+<br/><b>Rensselaer Polytechnic Institute</b><br/>110 8th Street, Troy, NY, USA
+</td><td>('1771700', 'Kang Wang', 'kang wang')<br/>('1726583', 'Qiang Ji', 'qiang ji')</td><td>{wangk10, jiq}@rpi.edu
+</td></tr><tr><td>2ee817981e02c4709d65870c140665ed25b005cc</td><td>Sparse Representations and Random Projections for
+<br/>Robust and Cancelable Biometrics
+<br/>(Invited Paper)
+<br/>Center for Automation Research
+<br/><b>University of Maryland</b><br/><b>College Park, MD 20742 USA</b><br/><b>DAP - University of Sassari</b><br/>piazza Duomo, 6
+<br/>Alghero 07041 Italy
+<br/>robust and secure physiological biometrics recognition such
+<br/>as face and iris [6], [7], [9], [1]. In this paper, we categorize
+<br/>approaches to biometrics based on sparse representations.
+</td><td>('1741177', 'Vishal M. Patel', 'vishal m. patel')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')<br/>('1725688', 'Massimo Tistarelli', 'massimo tistarelli')</td><td>{pvishalm,rama}@umiacs.umd.edu
+<br/>tista@uniss.it
+</td></tr><tr><td>2e98329fdec27d4b3b9b894687e7d1352d828b1d</td><td>Using Affect Awareness to Modulate Task Experience:
+<br/>A Study Amongst Pre-Elementary School Kids
+<br/><b>Carnegie Mellon University</b><br/>5000 Forbes Avenue,
+<br/>Pittsburgh, PA 15213
+</td><td>('29120285', 'Vivek Pai', 'vivek pai')<br/>('1760345', 'Raja Sooriamurthi', 'raja sooriamurthi')</td><td></td></tr><tr><td>2e19371a2d797ab9929b99c80d80f01a1fbf9479</td><td></td><td></td><td></td></tr><tr><td>2ed4973984b254be5cba3129371506275fe8a8eb</td><td>
+<br/>THE EFFECTS OF MOOD ON
+<br/>EMOTION RECOGNITION AND
+<br/>ITS RELATIONSHIP WITH THE
+<br/>GLOBAL VS LOCAL
+<br/>INFORMATION PROCESSING
+<br/>STYLES
+<br/>BASIC RESEARCH PROGRAM
+<br/>WORKING PAPERS
+<br/>SERIES: PSYCHOLOGY
+<br/>WP BRP 60/PSY/2016
+<br/>This Working Paper is an output of a research project implemented at the National Research
+<br/><b>University Higher School of Economics (HSE). Any opinions or claims contained in this</b><br/>Working Paper do not necessarily reflect the views of HSE
+<br/>
+</td><td>('15615673', 'Victoria Ovsyannikova', 'victoria ovsyannikova')</td><td></td></tr><tr><td>2e9c780ee8145f29bd1a000585dd99b14d1f5894</td><td>Simultaneous Adversarial Training - Learn from
+<br/>Others’ Mistakes
+<br/><b>Lite-On Singapore Pte. Ltd, 2Imperial College London</b></td><td>('9949538', 'Zukang Liao', 'zukang liao')</td><td></td></tr><tr><td>2ebc35d196cd975e1ccbc8e98694f20d7f52faf3</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Towards Wide-angle Micro Vision Sensors
+</td><td>('2724462', 'Sanjeev J. Koppal', 'sanjeev j. koppal')<br/>('2407724', 'Ioannis Gkioulekas', 'ioannis gkioulekas')<br/>('2140759', 'Kenneth B. Crozier', 'kenneth b. crozier')</td><td></td></tr><tr><td>2e3d081c8f0e10f138314c4d2c11064a981c1327</td><td></td><td></td><td></td></tr><tr><td>2e86402b354516d0a8392f75430156d629ca6281</td><td></td><td></td><td></td></tr><tr><td>2ea78e128bec30fb1a623c55ad5d55bb99190bd2</td><td>Residual vs. Inception vs. Classical Networks for
+<br/>Low-Resolution Face Recognition
+<br/><b>Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany</b><br/>2Fraunhofer IOSB, Karlsruhe, Germany
+<br/>{christian.herrmann,dieter.willersinn,
+</td><td>('37646107', 'Christian Herrmann', 'christian herrmann')<br/>('1783486', 'Dieter Willersinn', 'dieter willersinn')</td><td>juergen.beyerer}@iosb.fraunhofer.de
+</td></tr><tr><td>2e8eb9dc07deb5142a99bc861e0b6295574d1fbd</td><td>Analysis by Synthesis: 3D Object Recognition by Object Reconstruction
+<br/><b>University of California, Irvine</b><br/><b>University of California, Irvine</b></td><td>('1888731', 'Mohsen Hejrati', 'mohsen hejrati')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')</td><td>shejrati@ics.uci.edu
+<br/>dramanan@ics.uci.edu
+</td></tr><tr><td>2e0f5e72ad893b049f971bc99b67ebf254e194f7</td><td>Apparel Classification with Style
+<br/>1ETH Z¨urich, Switzerland 2Microsoft, Austria 3Kooaba AG, Switzerland
+<br/>4KU Leuven, Belgium
+</td><td>('1696393', 'Lukas Bossard', 'lukas bossard')<br/>('1727791', 'Matthias Dantone', 'matthias dantone')<br/>('1695579', 'Christian Leistner', 'christian leistner')<br/>('1793359', 'Christian Wengert', 'christian wengert')<br/>('1726249', 'Till Quack', 'till quack')<br/>('1681236', 'Luc Van Gool', 'luc van gool')</td><td></td></tr><tr><td>2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5</td><td>Simultaneous Object Detection and Ranking with
+<br/>Weak Supervision
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>United Kingdom
+</td><td>('1758219', 'Matthew B. Blaschko', 'matthew b. blaschko')<br/>('1687524', 'Andrea Vedaldi', 'andrea vedaldi')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c</td><td>Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 26 January 2017 doi:10.20944/preprints201701.0120.v1
+<br/>Peer-reviewed version available at Sensors 2017, 17, 275; doi:10.3390/s17020275
+<br/>Article
+<br/>Active AU Based Patch Weighting for Facial
+<br/>Expression Recognition
+<br/><b>School of Computer Science and Software Engineering, Shenzhen University, Nanhai Ave 3688, Shenzhen</b><br/>Guangdong 518060, China
+</td><td>('34181727', 'Weicheng Xie', 'weicheng xie')<br/>('1687690', 'LinLin Shen', 'linlin shen')<br/>('5828998', 'Meng Yang', 'meng yang')<br/>('5383601', 'Zhihui Lai', 'zhihui lai')</td><td>* Correspondence: llshen@szu.edu.cn; Tel.: +86-755-8693-5089
+</td></tr><tr><td>2edc6df161f6aadbef9c12408bdb367e72c3c967</td><td>Improved Spatiotemporal Local Monogenic Binary Pattern
+<br/>for Emotion Recognition in The Wild
+<br/>Center for Machine Vision
+<br/>Research
+<br/>Department of Computer
+<br/>Science and Engineering
+<br/><b>University of Oulu, Finland</b><br/>Center for Machine Vision
+<br/>Research
+<br/>Department of Computer
+<br/>Science and Engineering
+<br/><b>University of Oulu, Finland</b><br/>Center for Machine Vision
+<br/>Research
+<br/>Department of Computer
+<br/>Science and Engineering
+<br/><b>University of Oulu, Finland</b><br/>Center for Machine Vision
+<br/>Research
+<br/>Department of Computer
+<br/>Science and Engineering
+<br/><b>University of Oulu, Finland</b><br/>Matti Pietikänen
+<br/>Center for Machine Vision
+<br/>Research
+<br/>Department of Computer
+<br/>Science and Engineering
+<br/><b>University of Oulu, Finland</b></td><td>('18780812', 'Xiaohua Huang', 'xiaohua huang')<br/>('2512942', 'Qiuhai He', 'qiuhai he')<br/>('1836646', 'Xiaopeng Hong', 'xiaopeng hong')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')</td><td>huang.xiaohua@ee.oulu.fi
+<br/>qiuhai.he@ee.oulu.fi
+<br/>xhong@ee.oulu.fi
+<br/>gyzhao@ee.oulu.fi
+<br/>mkp@ee.oulu.fi
+</td></tr><tr><td>2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c</td><td>International Journal of Scientific Research and Management Studies (IJSRMS)
+<br/>ISSN: 2349-3771
+<br/>
+<br/>Volume 3 Issue 4, pg: 164-169
+<br/>A REVIEW ON TEXTURE BASED EMOTION RECOGNITION
+<br/>FROM FACIAL EXPRESSION
+<br/>1U.G. Scholars, 2Assistant Professor,
+<br/>Dept. of E & C Engg., MIT Moradabad, Ram Ganga Vihar, Phase II, Moradabad, India.
+</td><td>('5255436', 'Shubham Kashyap', 'shubham kashyap')<br/>('2036732', 'Pankaj Pandey', 'pankaj pandey')<br/>('36216996', 'Prashant Kumar', 'prashant kumar')</td><td></td></tr><tr><td>2eb9f1dbea71bdc57821dedbb587ff04f3a25f07</td><td>Face for Ambient Interface
+<br/><b>Imperial College, 180 Queens Gate</b><br/>London SW7 2AZ, U.K.
+</td><td>('1694605', 'Maja Pantic', 'maja pantic')</td><td>m.pantic@imperial.ac.uk
+</td></tr><tr><td>2e1fd8d57425b727fd850d7710d38194fa6e2654</td><td>Learning Structured Appearance Models
+<br/>from Captioned Images of Cluttered Scenes ∗
+<br/><b>University of Toronto</b><br/><b>Bielefeld University</b></td><td>('37894231', 'Michael Jamieson', 'michael jamieson')<br/>('1724954', 'Sven Wachsmuth', 'sven wachsmuth')</td><td>{jamieson, afsaneh, sven, suzanne}@cs.toronto.edu
+<br/>swachsmu@techfak.uni-bielefeld.de
+</td></tr><tr><td>2e1b1969ded4d63b69a5ec854350c0f74dc4de36</td><td></td><td></td><td></td></tr><tr><td>2e832d5657bf9e5678fd45b118fc74db07dac9da</td><td>Running head: RECOGNITION OF FACIAL EXPRESSIONS OF EMOTION 
+<br/>1 
+<br/>Recognition of Facial Expressions of Emotion: The Effects of Anxiety, Depression, and Fear of Negative 
+<br/>Evaluation 
+<br/>Rachel Merchak 
+<br/><b>Wittenberg University</b><br/><b>Rachel Merchak, Wittenberg University</b><br/>Author Note 
+<br/>This research was conducted in collaboration with Dr. Stephanie Little, Psychology Department, 
+<br/><b>Wittenberg University, and Dr. Michael Anes, Wittenberg University</b><br/>Correspondence concerning this article should be addressed to Rachel Merchak, 10063 Fox 
+<br/>Chase Drive, Loveland, OH 45140.  
+</td><td></td><td>E‐mail: merchakr@wittenberg.edu 
+</td></tr><tr><td>2be0ab87dc8f4005c37c523f712dd033c0685827</td><td>RELAXED LOCAL TERNARY PATTERN FOR FACE RECOGNITION
+<br/>BeingThere Centre
+<br/><b>Institute of Media Innovation</b><br/><b>Nanyang Technological University</b><br/>50 Nanyang Drive, Singapore 637553.
+<br/>School of Electrical & Electronics Engineering
+<br/><b>Nanyang Technological University</b><br/>50 Nanyang Avenue, Singapore 639798
+</td><td>('1690809', 'Jianfeng Ren', 'jianfeng ren')<br/>('3307580', 'Xudong Jiang', 'xudong jiang')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')</td><td></td></tr><tr><td>2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb</td><td>Image-based recommendations on styles and substitutes
+<br/>Julian McAuley
+<br/>UC San Diego
+<br/><b>University of Adelaide</b><br/>Qinfeng (‘Javen’) Shi
+<br/><b>University of Adelaide</b></td><td>('2110208', 'Christopher Targett', 'christopher targett')</td><td>jmcauley@ucsd.edu
+<br/>christopher.targett@student.adelaide.edu.au
+<br/>javen.shi@adelaide.edu.au
+</td></tr><tr><td>2b339ece73e3787f445c5b92078e8f82c9b1c522</td><td>Human Re-identification in Crowd Videos Using
+<br/>Personal, Social and Environmental Constraints
+<br/><b>University of Central Florida, Orlando, USA</b><br/>Center for Research in Computer Vision,
+</td><td>('2963501', 'Shayan Modiri Assari', 'shayan modiri assari')<br/>('1803711', 'Haroon Idrees', 'haroon idrees')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>{smodiri,haroon,shah}@cs.ucf.edu
+</td></tr><tr><td>2b4d092d70efc13790d0c737c916b89952d4d8c7</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 32, XXXX-XXXX (2016)
+<br/>Robust Facial Expression Recognition using Local Haar
+<br/>Mean Binary Pattern
+<br/>1,2 Department of Computer Engineering
+<br/><b>Charotar University of Science and Technology, Changa, India</b><br/><b>Gujarat Technological University, V.V.Nagar, India</b><br/>In this paper, we propose a hybrid statistical feature extractor, Local Haar Mean Bina-
+<br/>ry Pattern (LHMBP). It extracts level-1 haar approximation coefficients and computes Local
+<br/>Mean Binary Pattern (LMBP) of it. LMBP code of pixel is obtained by weighting the
+<br/>thresholded neighbor value of 3  3 patch on its mean. LHMBP produces highly discrimina-
+<br/>tive code compared to other state of the art methods. To localize appearance features, ap-
+<br/>proximation subband is divided into M  N regions. LHMBP feature descriptor is derived
+<br/>by concatenating LMBP distribution of each region. We also propose a novel template
+<br/>matching strategy called Histogram Normalized Absolute Difference (HNAD) for histogram
+<br/>based feature comparison. Experiments prove the superiority of HNAD over well-known
+<br/>template matching techniques such as L2 norm and Chi-Square. We also investigated
+<br/>LHMBP for expression recognition in low resolution. The performance of the proposed ap-
+<br/>proach is tested on well-known CK, JAFFE, and SFEW facial expression datasets in diverse
+<br/>situations.
+<br/>Keywords: affective computing, appearance based feature, local binary pattern, Gabor filter,
+<br/>support vector machine.
+<br/>1. INTRODUCTION
+<br/>Facial Expression Recognition (FER) is a classical problem of pattern recognition
+<br/>and machine learning. It plays a vital role in social communication and in conveying
+<br/>emotions [1]. In the earlier development stage, the scope of FER was confined to psy-
+<br/>chological studies only, but nowadays it covers a broad range of applications including
+<br/>human-computer interfaces (HCI), industrial automation, surveillance systems, senti-
+<br/>ment identification, etc. Precise recognition of facial expressions can become a driving
+<br/>force for the future automation interfaces like car driving, robotics, driver alert systems,
+<br/>etc.
+<br/>According to input, expression recognition systems can be classified as static or
+<br/>dynamic. In static approaches, features are computed from given still image only.
+<br/>Whereas in dynamic approaches, temporal relationships between features over the
+<br/>image sequence is extracted. Temporal relationships play a major role in expression
+<br/>recognition from an image sequence. In last decade, many video-based methods have
+<br/>been studied [2]. Research is also focused on detecting micro-expressions [2], [3], [4],
+<br/>[5], recognition of spontaneous expressions [6], analysis of multi-views or profile views
+<br/>[7] and fusion of geometric and appearance features [8], [9], [10]. Nowadays, deep
+<br/>1249
+</td><td>('9318822', 'MAHESH GOYANI', 'mahesh goyani')<br/>('11384332', 'NARENDRA PATEL', 'narendra patel')</td><td>E-mail: mgoyani@gmail.com, nmpatel@bvmengineerring.ac.in
+</td></tr><tr><td>2bb53e66aa9417b6560e588b6235e7b8ebbc294c</td><td>SEMANTIC EMBEDDING SPACE FOR ZERO-SHOT ACTION RECOGNITION
+<br/><b>School of EECS, Queen Mary University of London, London, UK</b></td><td>('47158489', 'Xun Xu', 'xun xu')<br/>('2073354', 'Shaogang Gong', 'shaogang gong')</td><td></td></tr><tr><td>2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f</td><td>An Effective Approach for Facial Expression Recognition with Local Binary
+<br/>Pattern and Support Vector Machine
+</td><td>('20656805', 'Thi Nhan', 'thi nhan')<br/>('9872793', 'Il Choi', 'il choi')</td><td>*1School of Media, Soongsil University, ctnhen@yahoo.com
+<br/>2School of Media, Soongsil University, an_tth@yahoo.com
+<br/>3School of Media, Soongsil University, hic@ssu.ac.kr
+</td></tr><tr><td>2b3ceb40dced78a824cf67054959e250aeaa573b</td><td></td><td></td><td></td></tr><tr><td>2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0</td><td>Watch and Learn: Semi-Supervised Learning of Object Detectors From Videos
+<br/><b>Robotics Institute, Carnegie Mellon University</b><br/>The availability of large labeled image datasets [1, 2] has been one of the
+<br/>key factors for advances in recognition. These datasets have not only helped
+<br/>boost performance, but have also fostered the development of new tech-
+<br/>niques. However, compared to images, videos seem like a more natural
+<br/>source of training data because of the additional temporal continuity they
+<br/>offer for both learning and labeling. The available video datasets lack the
+<br/>richness and variety of annotations offered by benchmark image datasets.
+<br/>It also seems unlikely that human per-image labeling will scale to the web-
+<br/>scale video data without using temporal constraints. In this paper, we show
+<br/>how to exploit the temporal information provided by videos to enable semi-
+<br/>supervised learning.
+<br/>We present a scalable framework that discovers and localizes multiple ob-
+<br/>jects in video using semi-supervised learning (see Figure 1). It tackles this
+<br/>challenging problem in long video (a million frames in our experiments)
+<br/>starting from only a few labeled examples.
+<br/>In addition, we present our
+<br/>algorithm in a realistic setting of sparse labels [3], i.e., in the few initial
+<br/>“labeled” frames, not all objects are annotated. This setting relaxes the as-
+<br/>sumption that in a given frame all object instances have been exhaustively
+<br/>annotated. It also implies that we do not know if any unannotated region
+<br/>in the frame is an instance of the object category or the background, and
+<br/>thus cannot use any region from our input as negative data. While much of
+<br/>the past work has ignored this type of sparse labeling and lack of explicit
+<br/>negatives, we show ways to overcome this handicap.
+<br/>Contributions: Our semi-supervised learning (SSL) framework localizes
+<br/>multiple unknown objects in videos. Starting from sparsely labeled objects,
+<br/>it iteratively labels new training examples in the videos. Our key contribu-
+<br/>tions are: 1) We tackle the SSL problem for discovering multiple objects in
+<br/>sparsely labeled videos; 2) We present an approach to constrain SSL [6] by
+<br/>combining multiple weak cues in videos and exploiting decorrelated errors
+<br/>by modeling data in multiple feature spaces. We demonstrate its effective-
+<br/>ness as compared to traditional tracking-by-detection approaches. 3) Given
+<br/>the redundancy in video data, we need a method that can automatically de-
+<br/>termine the relevance of training examples to the target detection task. We
+<br/>present a way to include relevance and diversity of the training examples in
+<br/>each iteration of the SSL, leading to scalable incremental learning.
+<br/>Our algorithm starts with a few sparsely annotated video frames (L) and
+<br/>iteratively discovers new instances in the large unlabeled set of videos (U ).
+<br/>Simply put, we first train detectors on annotated objects, followed by de-
+<br/>tection on input videos. We determine good detections (removing confident
+<br/>false positives) which serve as starting points for short-term tracking. The
+<br/>short-term tracking aims to label unseen examples reliably. Amongst these
+<br/>newly labeled examples, we identify diverse examples which are used to
+<br/>update the detector without re-training from scratch. We iteratively repeat
+<br/>this process to label new examples. We now describe our algorithm.
+<br/>Sparse Annotations (lack of explicit negatives): We start with a few sparsely
+<br/>annotated frames in a random subset of U . Sparse labeling implies that un-
+<br/>like standard tracking-by-detection approaches, we cannot sample negatives
+<br/>from the vicinity of labeled positives. We use random images from the in-
+<br/>ternet as negative data for training object detectors on these sparse labels.
+<br/>We use these detectors to detect objects on a subset of the video, e.g., every
+<br/>30 frames. Training on a few positives without domain negatives results in
+<br/>high confidence false positives. Removing such false positives is important
+<br/>because if we track them, we will add many more bad training examples,
+<br/>thus degrading the detector’s performance over iterations.
+<br/>Temporally consistent detections: We first remove detections that are tem-
+<br/>porally inconsistent using a smoothness prior on the motion of detections.
+<br/>Decorrelated errors: To remove high confidence false positives, we rely
+<br/>on the principle of decorrelated errors (similar to multi-view SSL [5]). The
+<br/>intuition is that the detector makes mistakes that are related to its feature
+</td><td>('1806773', 'Ishan Misra', 'ishan misra')<br/>('1781242', 'Abhinav Shrivastava', 'abhinav shrivastava')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td></td></tr><tr><td>2bcec23ac1486f4106a3aa588b6589e9299aba70</td><td>An Uncertain Future: Forecasting from Static
+<br/>Images using Variational Autoencoders
+<br/><b>The Robotics Institute, Carnegie Mellon University</b></td><td>('14192361', 'Jacob Walker', 'jacob walker')<br/>('2786693', 'Carl Doersch', 'carl doersch')<br/>('1737809', 'Abhinav Gupta', 'abhinav gupta')<br/>('1709305', 'Martial Hebert', 'martial hebert')</td><td></td></tr><tr><td>2b773fe8f0246536c9c40671dfa307e98bf365ad</td><td>Hindawi Publishing Corporation
+<br/>Computational and Mathematical Methods in Medicine
+<br/>Volume 2013, Article ID 106867, 14 pages
+<br/>http://dx.doi.org/10.1155/2013/106867
+<br/>Research Article
+<br/>Fast Discriminative Stochastic Neighbor Embedding Analysis
+<br/><b>School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China</b><br/>Received 9 February 2013; Accepted 22 March 2013
+<br/>Academic Editor: Carlo Cattani
+<br/>which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+<br/>Feature is important for many applications in biomedical signal analysis and living system analysis. A fast discriminative stochastic
+<br/>neighbor embedding analysis (FDSNE) method for feature extraction is proposed in this paper by improving the existing DSNE
+<br/>method. The proposed algorithm adopts an alternative probability distribution model constructed based on its K-nearest neighbors
+<br/>from the interclass and intraclass samples. Furthermore, FDSNE is extended to nonlinear scenarios using the kernel trick and
+<br/>then kernel-based methods, that is, KFDSNE1 and KFDSNE2. FDSNE, KFDSNE1, and KFDSNE2 are evaluated in three aspects:
+<br/>visualization, recognition, and elapsed time. Experimental results on several datasets show that, compared with DSNE and MSNP,
+<br/>the proposed algorithm not only significantly enhances the computational efficiency but also obtains higher classification accuracy.
+<br/>1. Introduction
+<br/>In recent years, dimensional reduction which can reduce the
+<br/>curse of dimensionality [1] and remove irrelevant attributes in
+<br/>high-dimensional space plays an increasingly important role
+<br/>in many areas. It promotes the classification, visualization,
+<br/>and compression of the high dimensional data. In machine
+<br/>learning, dimension reduction is used to reduce the dimen-
+<br/>sion by mapping the samples from the high-dimensional
+<br/>space to the low-dimensional space. There are many purposes
+<br/>of studying it: firstly, to reduce the amount of storage, sec-
+<br/>ondly, to remove the influence of noise, thirdly, to understand
+<br/>data distribution easily, and last but not least, to achieve good
+<br/>results in classification or clustering.
+<br/>Currently, many dimensional reduction methods have
+<br/>been proposed, and they can be classified variously from dif-
+<br/>ferent perspectives. Based on the nature of the input data,
+<br/>they are broadly categorized into two classes: linear subspace
+<br/>methods which try to find a linear subspace as feature space
+<br/>so as to preserve certain kind of characteristics of observed
+<br/>data, and nonlinear approaches such as kernel-based tech-
+<br/>niques and geometry-based techniques; from the class labels’
+<br/>perspective, they are divided into supervised learning and
+<br/>unsupervised learning; furthermore, the purpose of the for-
+<br/>mer is to maximize the recognition rate between classes while
+<br/>the latter is for making the minimum of information loss. In
+<br/>addition, judging whether samples utilize local information
+<br/>or global information, we divide them into local method and
+<br/>global method.
+<br/>We briefly introduce several existing dimensional reduc-
+<br/>tion techniques. In the main linear techniques, principal
+<br/>component analysis (PCA) [2] aims at maximizing the vari-
+<br/>ance of the samples in the low-dimensional representation
+<br/>with a linear mapping matrix. It is global and unsupervised.
+<br/>Different from PCA, linear discriminant analysis (LDA) [3]
+<br/>learns a linear projection with the assistance of class labels.
+<br/>It computes the linear transformation by maximizing the
+<br/>amount of interclass variance relative to the amount of intra-
+<br/>class variance. Based on LDA, marginal fisher analysis (MFA)
+<br/>[4], local fisher discriminant analysis (LFDA) [5], and max-
+<br/>min distance analysis (MMDA) [6] are proposed. All of the
+<br/>three are linear supervised dimensional reduction methods.
+<br/>MFA utilizes the intrinsic graph to characterize the intraclass
+<br/>compactness and uses meanwhile the penalty graph to char-
+<br/>acterize interclass separability. LFDA introduces the locality
+<br/>to the LFD algorithm and is particularly useful for samples
+<br/>consisting of intraclass separate clusters. MMDA considers
+<br/>maximizing the minimum pairwise samples of interclass.
+<br/>To deal with nonlinear structural data, which can often be
+<br/>found in biomedical applications [7–10], a number of nonlin-
+<br/>ear approaches have been developed for dimensional reduc-
+<br/>tion. Among these kernel-based techniques and geometry-
+<br/>based techniques are two hot issues. Kernel-based techniques
+</td><td>('1807755', 'Jianwei Zheng', 'jianwei zheng')<br/>('1767635', 'Hong Qiu', 'hong qiu')<br/>('2587047', 'Xinli Xu', 'xinli xu')<br/>('7634945', 'Wanliang Wang', 'wanliang wang')<br/>('1802128', 'Qiongfang Huang', 'qiongfang huang')<br/>('1807755', 'Jianwei Zheng', 'jianwei zheng')</td><td>Correspondence should be addressed to Jianwei Zheng; zjw@zjut.edu.cn
+</td></tr><tr><td>2bab44d3a4c5ca79fb8f87abfef4456d326a0445</td><td>Player Identification in Soccer Videos
+<br/><b>Dipartimento di Sistemi e Informatica, University of Florence</b><br/>Via S. Marta, 3 - 50139 Florence, Italy
+</td><td>('1801509', 'Marco Bertini', 'marco bertini')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')<br/>('2308851', 'Walter Nunziati', 'walter nunziati')</td><td>bertini@dsi.unifi.it, delbimbo@dsi.unifi.it, nunziati@dsi.unifi.it
+</td></tr><tr><td>2b0102d77d3d3f9bc55420d862075934f5c85bec</td><td>Slicing Convolutional Neural Network for Crowd Video Understanding
+<br/><b>The Chinese University of Hong Kong</b><br/><b>The Chinese University of Hong Kong</b></td><td>('2205438', 'Jing Shao', 'jing shao')</td><td>jshao@ee.cuhk.edu.hk, ccloy@ie.cuhk.edu.hk, kkang@ee.cuhk.edu.hk, xgwang@ee.cuhk.edu.hk
+</td></tr><tr><td>2b435ee691718d0b55d057d9be4c3dbb8a81526e</td><td>DREUW ET AL.: SURF-FACE RECOGNITION
+<br/>SURF-Face: Face Recognition Under
+<br/>Viewpoint Consistency Constraints
+<br/>Human Language Technology and
+<br/>Pattern Recognition
+<br/><b>RWTH Aachen University</b><br/>Aachen, Germany
+</td><td>('1967060', 'Philippe Dreuw', 'philippe dreuw')<br/>('2044128', 'Pascal Steingrube', 'pascal steingrube')<br/>('1804963', 'Harald Hanselmann', 'harald hanselmann')<br/>('1685956', 'Hermann Ney', 'hermann ney')</td><td>dreuw@cs.rwth-aachen.de
+<br/>steingrube@cs.rwth-aachen.de
+<br/>hanselmann@cs.rwth-aachen.de
+<br/>ney@cs.rwth-aachen.de
+</td></tr><tr><td>2b1327a51412646fcf96aa16329f6f74b42aba89</td><td>Under review as a conference paper at ICLR 2016
+<br/>IMPROVING PERFORMANCE OF RECURRENT NEURAL
+<br/>NETWORK WITH RELU NONLINEARITY
+<br/>Qualcomm Research
+<br/>San Diego, CA 92121, USA
+</td><td>('2390504', 'Sachin S. Talathi', 'sachin s. talathi')</td><td>{stalathi,avartak}@qti.qualcomm.com
+</td></tr><tr><td>2b5cb5466eecb131f06a8100dcaf0c7a0e30d391</td><td>A Comparative Study of Active Appearance Model
+<br/>Annotation Schemes for the Face
+<br/>Face Aging Group
+<br/>UNCW, USA
+<br/>Face Aging Group
+<br/>UNCW, USA
+<br/>Face Aging Group
+<br/>UNCW, USA
+</td><td>('2401418', 'Amrutha Sethuram', 'amrutha sethuram')<br/>('1710348', 'Karl Ricanek', 'karl ricanek')<br/>('37804931', 'Eric Patterson', 'eric patterson')</td><td>sethurama@uncw.edu
+<br/>ricanekk@uncw.edu
+<br/>pattersone@uncw.edu
+</td></tr><tr><td>2b64a8c1f584389b611198d47a750f5d74234426</td><td>Deblurring Face Images with Exemplars
+<br/><b>Dalian University of Technology, Dalian, China</b><br/><b>University of California, Merced, USA</b></td><td>('1786024', 'Zhe Hu', 'zhe hu')<br/>('4642456', 'Zhixun Su', 'zhixun su')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td></td></tr><tr><td>2b632f090c09435d089ff76220fd31fd314838ae</td><td>Early Adaptation of Deep Priors in Age Prediction from Face Images
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>CVL, D-ITET, ETH Zurich
+<br/>Merantix GmbH
+</td><td>('35647143', 'Mahdi Hajibabaei', 'mahdi hajibabaei')<br/>('5328844', 'Anna Volokitin', 'anna volokitin')<br/>('1732855', 'Radu Timofte', 'radu timofte')</td><td>hmahdi@student.ethz.ch
+<br/>voanna@vision.ee.ethz.ch
+<br/>timofter@vision.ee.ethz.ch
+</td></tr><tr><td>2b10a07c35c453144f22e8c539bf9a23695e85fc</td><td>Standardization of Face Image Sample Quality(cid:63)
+<br/><b>University of Science and Technology of China</b><br/>Hefei 230026, China
+<br/>2Center for Biometrics and Security Research &
+<br/>National Laboratory of Pattern Recognition
+<br/><b>Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China</b><br/>http://www.cbsr.ia.ac.cn
+</td><td>('39609587', 'Xiufeng Gao', 'xiufeng gao')<br/>('34679741', 'Stan Z. Li', 'stan z. li')<br/>('3168566', 'Rong Liu', 'rong liu')<br/>('2777824', 'Peiren Zhang', 'peiren zhang')</td><td></td></tr><tr><td>2b84630680e2c906f8d7ac528e2eb32c99ef203a</td><td>We are not All Equal: Personalizing Models for
+<br/>Facial Expression Analysis
+<br/>with Transductive Parameter Transfer
+<br/><b>DISI, University of Trento, Italy</b><br/><b>DIEI, University of Perugia, Italy</b><br/>3 Fondazione Bruno Kessler (FBK), Italy
+</td><td>('1716310', 'Enver Sangineto', 'enver sangineto')<br/>('2933565', 'Gloria Zen', 'gloria zen')<br/>('40811261', 'Elisa Ricci', 'elisa ricci')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td></td></tr><tr><td>2b507f659b341ed0f23106446de8e4322f4a3f7e</td><td>Deep Identity-aware Transfer of Facial Attributes
+<br/><b>The Hong Kong Polytechnic University 2Harbin Institute of Technology</b></td><td>('1701799', 'Mu Li', 'mu li')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('1698371', 'David Zhang', 'david zhang')</td><td>csmuli@comp.polyu.edu.hk cswmzuo@gmail.com csdzhang@comp.polyu.edu.hk
+</td></tr><tr><td>2b7ef95822a4d577021df16607bf7b4a4514eb4b</td><td>Emergence of Object-Selective Features in
+<br/>Unsupervised Feature Learning
+<br/>Computer Science Department
+<br/><b>Stanford University</b><br/>Stanford, CA 94305
+</td><td>('5574038', 'Adam Coates', 'adam coates')<br/>('2354728', 'Andrej Karpathy', 'andrej karpathy')<br/>('1701538', 'Andrew Y. Ng', 'andrew y. ng')</td><td>{acoates,karpathy,ang}@cs.stanford.edu
+</td></tr><tr><td>2b8dfbd7cae8f412c6c943ab48c795514d53c4a7</td><td>529
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>RECOGNITION
+<br/>1. INTRODUCTION
+<br/>(d1,d2)∈[0;d]2
+<br/>d1+d2≤d
+</td><td></td><td>e-mail: firstname.lastname@technicolor.com
+<br/>e-mail: firstname.lastname@univ-poitiers.fr
+</td></tr><tr><td>2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4</td><td>Ring loss: Convex Feature Normalization for Face Recognition
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Carnegie Mellon University</b></td><td>('3049981', 'Yutong Zheng', 'yutong zheng')<br/>('2628116', 'Dipan K. Pal', 'dipan k. pal')<br/>('1794486', 'Marios Savvides', 'marios savvides')</td><td>{yutongzh, dipanp, marioss}@andrew.cmu.edu
+</td></tr><tr><td>2bae810500388dd595f4ebe992c36e1443b048d2</td><td>International Journal of Bioelectromagnetism
+<br/>Vol. 18, No. 1, pp. 13 - 18, 2016
+<br/>www.ijbem.org
+<br/>Analysis of Facial Expression Recognition
+<br/>by Event-related Potentials
+<br/> Department of Information and Computer Engineering,
+<br/><b>National Institute of Technology, Toyota College, Japan</b><br/><b>Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan</b></td><td>('2179262', 'Taichi Hayasaka', 'taichi hayasaka')<br/>('2179262', 'Taichi Hayasaka', 'taichi hayasaka')</td><td>E-mail: hayasaka@toyota-ct.ac.jp, phone +81 565 36 5861, fax +81 565 36 5926
+</td></tr><tr><td>2b42f83a720bd4156113ba5350add2df2673daf0</td><td>Action Recognition and Detection by Combining
+<br/>Motion and Appearance Features
+<br/><b>The Chinese University of Hong Kong</b><br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology</b><br/>Chinese Academy of Sciences, Shenzhen, China
+</td><td>('33345248', 'Limin Wang', 'limin wang')<br/>('39843569', 'Yu Qiao', 'yu qiao')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>07wanglimin@gmail.com, yu.qiao@siat.ac.cn, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>2bbbbe1873ad2800954058c749a00f30fe61ab17</td><td>
+<br/> ISSN(Online): 2320-9801
+<br/> ISSN (Print): 2320-9798
+<br/>International Journal of Innovative Research in Computer and Communication Engineering
+<br/>(An ISO 3297: 2007 Certified Organization)
+<br/>Vol.2, Special Issue 1, March 2014
+<br/>Proceedings of International Conference On Global Innovations In Computing Technology (ICGICT’14)
+<br/>Organized by
+<br/>Department of CSE, JayShriram Group of Institutions, Tirupur, Tamilnadu, India on 6th & 7th March 2014
+<br/>Face Verification across Ages Using Self
+<br/>Organizing Map
+<br/>B.Mahalakshmi1, K.Duraiswamy2, P.Gnanasuganya3, P.Aruldhevi4, R.Sundarapandiyan5
+<br/><b>K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India</b><br/><b>Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India</b><br/><b>B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India</b></td><td></td><td></td></tr><tr><td>2baec98c19804bf19b480a9a0aa814078e28bb3d</td><td></td><td></td><td></td></tr><tr><td>47fdbd64edd7d348713253cf362a9c21f98e4296</td><td>FACIAL POINT DETECTION BASED ON A CONVOLUTIONAL NEURAL NETWORK WITH
+<br/>OPTIMAL MINI-BATCH PROCEDURE
+<br/><b>Chubu University</b><br/>1200, Matsumoto-cho, Kasugai, AICHI
+</td><td>('2488607', 'Masatoshi Kimura', 'masatoshi kimura')<br/>('35008538', 'Yuji Yamauchi', 'yuji yamauchi')</td><td></td></tr><tr><td>47382cb7f501188a81bb2e10cfd7aed20285f376</td><td>Articulated Pose Estimation Using Hierarchical Exemplar-Based Models
+<br/><b>Columbia University in the City of New York</b></td><td>('2454675', 'Jiongxin Liu', 'jiongxin liu')<br/>('3173493', 'Yinxiao Li', 'yinxiao li')</td><td>{liujx09, yli, allen, belhumeur}@cs.columbia.edu
+</td></tr><tr><td>473366f025c4a6e0783e6174ca914f9cb328fe70</td><td>Review of
+<br/>Action Recognition and Detection
+<br/>Methods
+<br/>Department of Electrical Engineering and Computer Science
+<br/><b>York University</b><br/>Toronto, Ontario
+<br/>Canada
+</td><td>('1709096', 'Richard P. Wildes', 'richard p. wildes')</td><td></td></tr><tr><td>477236563c6a6c6db922045453b74d3f9535bfa1</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Attribute Based Image Search Re-Ranking
+<br/>Snehal S Patil1, Ajay Dani2
+<br/><b>Master of Computer Engg, Savitribai Phule Pune University, G. H. Raisoni Collage of Engg and Technology, Wagholi, Pune</b><br/><b>G. H .Raisoni Collage of Engg and Technology, Wagholi, Pune</b><br/>integrating
+<br/>images by
+</td><td></td><td></td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>Kinship Verification through Transfer Learning
+<br/>Siyu Xia∗
+<br/>CSE, SUNY at Buffalo, USA
+<br/><b>and Southeast University, China</b><br/>CSE
+<br/>CSE
+<br/>SUNY at Buffalo, USA
+<br/>SUNY at Buffalo, USA
+</td><td>('2025056', 'Ming Shao', 'ming shao')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>xsy@seu.edu.cn
+<br/>mingshao@buffalo.edu
+<br/>yunfu@buffalo.edu
+</td></tr><tr><td>470dbd3238b857f349ebf0efab0d2d6e9779073a</td><td>Unsupervised Simultaneous Orthogonal Basis Clustering Feature Selection
+<br/>School of Electrical Engineering, KAIST, South Korea
+<br/>In this paper, we propose a novel unsupervised feature selection method: Si-
+<br/>multaneous Orthogonal basis Clustering Feature Selection (SOCFS). To per-
+<br/>form feature selection on unlabeled data effectively, a regularized regression-
+<br/>based formulation with a new type of target matrix is designed. The target
+<br/>matrix captures latent cluster centers of the projected data points by per-
+<br/>forming the orthogonal basis clustering, and then guides the projection ma-
+<br/>trix to select discriminative features. Unlike the recent unsupervised feature
+<br/>selection methods, SOCFS does not explicitly use the pre-computed local
+<br/>structure information for data points represented as additional terms of their
+<br/>objective functions, but directly computes latent cluster information by the
+<br/>target matrix conducting orthogonal basis clustering in a single unified term
+<br/>of the proposed objective function.
+<br/>Since the target matrix is put in a single unified term for regression of
+<br/>the proposed objective function, feature selection and clustering are simul-
+<br/>taneously performed. In this way, the projection matrix for feature selection
+<br/>is more properly computed by the estimated latent cluster centers of the
+<br/>projected data points. To the best of our knowledge, this is the first valid
+<br/>formulation to consider feature selection and clustering together in a sin-
+<br/>gle unified term of the objective function. The proposed objective function
+<br/>has fewer parameters to tune and does not require complicated optimization
+<br/>tools so just a simple optimization algorithm is sufficient. Substantial ex-
+<br/>periments are performed on several publicly available real world datasets,
+<br/>which shows that SOCFS outperforms various unsupervised feature selec-
+<br/>tion methods and that latent cluster information by the target matrix is ef-
+<br/>fective for regularized regression-based feature selection.
+<br/>Problem Formulation: Given training data, let X = [x1, . . . ,xn] ∈ Rd×n
+<br/>denote the data matrix with n instances where dimension is d and T =
+<br/>[t1, . . . ,tn] ∈ Rm×n denote the corresponding target matrix where dimension
+<br/>is m. We start from the regularized regression-based formulation to select
+<br/>maximum r features is minW (cid:107)WT X− T(cid:107)2
+<br/>s.t. (cid:107)W(cid:107)2,0 ≤ r. To exploit
+<br/>such formulation on unlabeled data more effectively, it is crucial for the tar-
+<br/>get matrix T to have discriminative destinations for projected clusters. To
+<br/>this end, a new type of target matrix T is proposed to conduct clustering di-
+<br/>rectly on the projected data points WT X. We allow extra degrees of freedom
+<br/>to T by decomposing it into two other matrices B ∈ Rm×c and E ∈ Rn×c as
+<br/>T = BET with additional constraints as
+<br/>(1)
+<br/>F + λ(cid:107)W(cid:107)2,1
+<br/>(cid:107)WT X− BET(cid:107)2
+<br/>s.t. BT B = I, ET E = I, E ≥ 0,
+<br/>min
+<br/>W,B,E
+<br/>where λ > 0 is a weighting parameter for the relaxed regularizer (cid:107)W(cid:107)2,1
+<br/>that induces row sparsity of the projection matrix W. The meanings of the
+<br/>constraints BT B = I,ET E = I,E ≥ 0 are as follows: 1) the orthogonal con-
+<br/>straint of B lets each column of B be independent; 2) the orthogonal and
+<br/>the nonnegative constraint of E make each row of E has only one non-zero
+<br/>element [2]. From 1) and 2), we can clearly interpret B as the basis matrix,
+<br/>which has orthogonality and E as the encoding matrix, where the non-zero
+<br/>element of each column of ET selects one column in B.
+<br/>While optimizing problem (1), T = BET acts like clustering of projected
+<br/>data points WT X with orthogonal basis B and encoder E, so T can estimate
+<br/>latent cluster centers of the WT X. Then, W successively projects X close
+<br/>to corresponding latent cluster centers, which are estimated by T. Note that
+<br/>the orthogonal constraint of B makes each projected cluster in WT X be sep-
+<br/>arated (independent of each other), and it helps W to be a better projection
+<br/>matrix for selecting more discriminative features. If the clustering is directly
+<br/>performed on X not on WT X, the orthogonal constraint of B extremely re-
+<br/>stricts the degree of freedom of B. However, since features are selected by
+<br/>W and the clustering is carried out on WT X in our formulation, so the or-
+<br/>thogonal constraint of B is highly reasonable. A schematic illustration of
+<br/>the proposed method is shown in Figure 1.
+</td><td>('2086576', 'Dongyoon Han', 'dongyoon han')<br/>('1769295', 'Junmo Kim', 'junmo kim')</td><td></td></tr><tr><td>473031328c58b7461753e81251379331467f7a69</td><td>Exploring Fisher Vector and Deep Networks for Action Spotting
+<br/><b>Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China</b><br/><b>The Chinese University of Hong Kong</b></td><td>('1915826', 'Zhe Wang', 'zhe wang')<br/>('33345248', 'Limin Wang', 'limin wang')<br/>('35031371', 'Wenbin Du', 'wenbin du')<br/>('33427555', 'Yu Qiao', 'yu qiao')</td><td>buptwangzhe2012@gmail.com, 07wanglimin@gmail.com, wb.du@siat.ac.cn, yu.qiao@siat.ac.cn
+</td></tr><tr><td>47638197d83a8f8174cdddc44a2c7101fa8301b7</td><td>Object-Centric Anomaly Detection by Attribute-Based Reasoning
+<br/><b>Rutgers University</b><br/><b>University of Washington</b><br/>Ahmed Elgammal
+<br/><b>Rutgers University</b></td><td>('3139794', 'Babak Saleh', 'babak saleh')<br/>('2270286', 'Ali Farhadi', 'ali farhadi')</td><td>babaks@cs.rutgers.edu
+<br/>ali@cs.uw.edu
+<br/>elgammal@cs.rutgers.edu
+</td></tr><tr><td>47541d04ec24662c0be438531527323d983e958e</td><td>Affective Information Processing
+</td><td></td><td></td></tr><tr><td>476f177b026830f7b31e94bdb23b7a415578f9a4</td><td>INTRA-CLASS MULTI-OUTPUT REGRESSION BASED SUBSPACE ANALYSIS
+<br/><b>University of California Santa Barbara</b><br/><b>University of California Santa Barbara</b></td><td>('32919393', 'Swapna Joshi', 'swapna joshi')</td><td>(cid:63){karthikeyan,swapna,manj}@ece.ucsb.edu
+<br/>†{grafton}@psych.ucsb.edu
+</td></tr><tr><td>474b461cd12c6d1a2fbd67184362631681defa9e</td><td>2014 IEEE International
+<br/>Conference on Systems, Man
+<br/>and Cybernetics
+<br/>(SMC 2014)
+<br/>San Diego, California, USA
+<br/>5-8 October 2014
+<br/>Pages 1-789
+<br/>IEEE Catalog Number:
+<br/>ISBN:
+<br/>CFP14SMC-POD
+<br/>978-1-4799-3841-4
+<br/>1/5
+</td><td></td><td></td></tr><tr><td>472ba8dd4ec72b34e85e733bccebb115811fd726</td><td>Cosine Similarity Metric Learning
+<br/>for Face Verication
+<br/><b>School of Computer Science, University of Nottingham</b><br/>Jubilee Campus, Wollaton Road, Nottingham, NG8 1BB, UK
+<br/>http://www.nottingham.ac.uk/cs/
+</td><td>('2243665', 'Hieu V. Nguyen', 'hieu v. nguyen')<br/>('1735386', 'Li Bai', 'li bai')</td><td>{vhn,bai}@cs.nott.ac.uk
+</td></tr><tr><td>47ca2df3d657d7938d7253bed673505a6a819661</td><td><b>UNIVERSITY OF CALIFORNIA</b><br/>Santa Barbara
+<br/>Facial Expression Analysis on Manifolds
+<br/>A Dissertation submitted in partial satisfaction of the
+<br/>requirements for the degree Doctor of Philosophy
+<br/>in Computer Science
+<br/>by
+<br/>Committee in charge:
+<br/>Professor B.S. Manjunath
+<br/>September 2006
+</td><td>('13303219', 'Ya Chang', 'ya chang')<br/>('1752714', 'Matthew Turk', 'matthew turk')<br/>('1706938', 'Yuan-Fang Wang', 'yuan-fang wang')<br/>('2875421', 'Andy Beall', 'andy beall')</td><td></td></tr><tr><td>47d4838087a7ac2b995f3c5eba02ecdd2c28ba14</td><td>JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+<br/>Automatic Recognition of Facial Displays of
+<br/>Unfelt Emotions
+<br/>Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+</td><td>('38370357', 'Kaustubh Kulkarni', 'kaustubh kulkarni')<br/>('22197083', 'Ciprian Adrian Corneanu', 'ciprian adrian corneanu')<br/>('22211769', 'Ikechukwu Ofodile', 'ikechukwu ofodile')<br/>('3087532', 'Gholamreza Anbarjafari', 'gholamreza anbarjafari')</td><td></td></tr><tr><td>47eba2f95679e106e463e8296c1f61f6ddfe815b</td><td>Deep Co-occurrence Feature Learning for Visual Object Recognition
+<br/><b>Research Center for Information Technology Innovation, Academia Sinica</b><br/><b>National Taiwan University</b><br/><b>Graduate Institute of Electronics Engineering, National Taiwan University</b><br/><b>Smart Network System Institute, Institute for Information Industry</b></td><td>('28990604', 'Ya-Fang Shih', 'ya-fang shih')<br/>('28982867', 'Yang-Ming Yeh', 'yang-ming yeh')<br/>('1744044', 'Yen-Yu Lin', 'yen-yu lin')<br/>('34779427', 'Ming-Fang Weng', 'ming-fang weng')<br/>('2712675', 'Yi-Chang Lu', 'yi-chang lu')<br/>('37761361', 'Yung-Yu Chuang', 'yung-yu chuang')</td><td></td></tr><tr><td>47a2727bd60e43f3253247b6d6f63faf2b67c54b</td><td>Semi-supervised Vocabulary-informed Learning
+<br/>Disney Research
+</td><td>('35782003', 'Yanwei Fu', 'yanwei fu')<br/>('14517812', 'Leonid Sigal', 'leonid sigal')</td><td>y.fu@qmul.ac.uk, lsigal@disneyresearch.com
+</td></tr><tr><td>47d3b923730746bfaabaab29a35634c5f72c3f04</td><td>ISSN : 2248-9622, Vol. 7, Issue 7, ( Part -3) July 2017, pp.30-38
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Real-Time Facial Expression Recognition App Development on
+<br/>Smart Phones
+<br/><b>Florida Institute Of Technology, Melbourne Fl</b><br/>USA
+</td><td>('7155812', 'Humaid Alshamsi', 'humaid alshamsi')<br/>('7155812', 'Humaid Alshamsi', 'humaid alshamsi')</td><td></td></tr><tr><td>47190d213caef85e8b9dd0d271dbadc29ed0a953</td><td>The Devil of Face Recognition is in the Noise
+<br/>1 SenseTime Research
+<br/><b>University of California San Diego</b><br/><b>Nanyang Technological University</b></td><td>('1682816', 'Fei Wang', 'fei wang')<br/>('3203648', 'Liren Chen', 'liren chen')<br/>('46651787', 'Cheng Li', 'cheng li')<br/>('1937119', 'Shiyao Huang', 'shiyao huang')<br/>('47557603', 'Yanjie Chen', 'yanjie chen')<br/>('49215552', 'Chen Qian', 'chen qian')<br/>('1717179', 'Chen Change Loy', 'chen change loy')</td><td>{wangfei, chengli, huangshiyao, chenyanjie, qianchen}@sensetime.com,
+<br/>lic002@eng.ucsd.edu, ccloy@ieee.org
+</td></tr><tr><td>47e3029a3d4cf0a9b0e96252c3dc1f646e750b14</td><td>International Conference on Computer Systems and Technologies - CompSysTech’07
+<br/>Facial Expression Recognition in still pictures and videos using Active
+<br/>Appearance Models. A comparison approach.
+<br/>Drago(cid:1) Datcu
+<br/>Léon Rothkrantz
+</td><td></td><td></td></tr><tr><td>475e16577be1bfc0dd1f74f67bb651abd6d63524</td><td>DAiSEE: Towards User Engagement Recognition in the Wild
+<br/>Microsoft
+<br/>Vineeth N Balasubramanian
+<br/>Indian Institution of Technology Hyderabad
+</td><td>('38330340', 'Abhay Gupta', 'abhay gupta')</td><td>abhgup@microsoft.com
+<br/>vineethnb@iith.ac.in
+</td></tr><tr><td>471befc1b5167fcfbf5280aa7f908eff0489c72b</td><td>570
+<br/>Class-Specific Kernel-Discriminant
+<br/>Analysis for Face Verification
+<br/>class problems (
+</td><td>('2123731', 'Georgios Goudelis', 'georgios goudelis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td></td></tr><tr><td>47f8b3b3f249830b6e17888df4810f3d189daac1</td><td></td><td></td><td></td></tr><tr><td>47e8db3d9adb79a87c8c02b88f432f911eb45dc5</td><td>MAGMA: Multi-level accelerated gradient mirror descent algorithm for
+<br/>large-scale convex composite minimization
+<br/>July 15, 2016
+</td><td>('39984225', 'Vahan Hovhannisyan', 'vahan hovhannisyan')<br/>('3112745', 'Panos Parpas', 'panos parpas')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td></td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td></td><td></td><td></td></tr><tr><td>47dabb566f2bdd6b3e4fa7efc941824d8b923a13</td><td>Probabilistic Temporal Head Pose Estimation
+<br/>Using a Hierarchical Graphical Model
+<br/><b>Centre for Intelligent Machines, McGill University, Montreal, Canada</b></td><td>('2515930', 'Meltem Demirkus', 'meltem demirkus')<br/>('1724729', 'Doina Precup', 'doina precup')<br/>('1713608', 'James J. Clark', 'james j. clark')<br/>('1699104', 'Tal Arbel', 'tal arbel')</td><td></td></tr><tr><td>47f5f740e225281c02c8a2ae809be201458a854f</td><td>Simultaneous Unsupervised Learning of Disparate Clusterings
+<br/><b>University of Texas, Austin, TX 78712-1188, USA</b><br/>Received 14 April 2008; accepted 05 May 2008
+<br/>DOI:10.1002/sam.10007
+<br/>Published online 3 November 2008 in Wiley InterScience (www.interscience.wiley.com).
+</td><td>('3164102', 'Prateek Jain', 'prateek jain')<br/>('1751621', 'Raghu Meka', 'raghu meka')<br/>('1783667', 'Inderjit S. Dhillon', 'inderjit s. dhillon')</td><td></td></tr><tr><td>47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 83 – No 5, December 2013
+<br/>Automatic Face Recognition System using Pattern
+<br/>Recognition Techniques: A Survey
+<br/>Department of Computer Science Department of Computer Science
+<br/><b>Assam University, Silchar-788011 Assam University, Silchar</b></td><td>('37792796', 'Ningthoujam Sunita Devi', 'ningthoujam sunita devi')</td><td></td></tr><tr><td>47b508abdaa5661fe14c13e8eb21935b8940126b</td><td> Volume 4, Issue 12, December 2014 ISSN: 2277 128X
+<br/>International Journal of Advanced Research in
+<br/> Computer Science and Software Engineering
+<br/> Research Paper
+<br/> Available online at: www.ijarcsse.com
+<br/>An Efficient Method for Feature Extraction of Face
+<br/>Recognition Using PCA
+<br/>(M.Tech. Student)
+<br/>Computer Science & Engineering
+<br/><b>Iftm University, Moradabad-244001 U.P</b></td><td>('9247488', 'Tara Prasad Singh', 'tara prasad singh')</td><td></td></tr><tr><td>477811ff147f99b21e3c28309abff1304106dbbe</td><td></td><td></td><td></td></tr><tr><td>47e14fdc6685f0b3800f709c32e005068dfc8d47</td><td></td><td></td><td></td></tr><tr><td>473cbc5ec2609175041e1410bc6602b187d03b23</td><td>Semantic Audio-Visual Data Fusion for Automatic Emotion Recognition
+<br/>Man-Machine Interaction Group
+<br/><b>Delft University of Technology</b><br/>2628 CD, Delft,
+<br/>The Netherlands
+<br/>KEYWORDS
+<br/>Data fusion, automatic emotion recognition, speech analysis,
+<br/>face detection, facial feature extraction, facial characteristic
+<br/>point extraction, Active Appearance Models, Support Vector
+<br/>Machines.
+</td><td>('2866326', 'Dragos Datcu', 'dragos datcu')</td><td>E-mail: {D.Datcu ; L.J.M.Rothkrantz}@tudelft.nl
+</td></tr><tr><td>782188821963304fb78791e01665590f0cd869e8</td><td></td><td></td><td></td></tr><tr><td>78a4cabf0afc94da123e299df5b32550cd638939</td><td></td><td></td><td></td></tr><tr><td>78f08cc9f845dc112f892a67e279a8366663e26d</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Semi-Autonomous Data Enrichment and
+<br/>Optimisation for Intelligent Speech Analysis
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr.-Ing. habil. Dr. h.c. Alexander W. Koch
+<br/>Pr¨ufer der Dissertation:
+<br/>1.
+<br/>Univ.-Prof. Dr.-Ing. habil. Bj¨orn W. Schuller,
+<br/>Universit¨at Passau
+<br/>2. Univ.-Prof. Gordon Cheng, Ph.D.
+<br/>Die Dissertation wurde am 30.09.2014 bei der Technischen Universit¨at M¨unchen einge-
+<br/>reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 07.04.2015
+<br/>angenommen.
+</td><td>('1742291', 'Zixing Zhang', 'zixing zhang')</td><td></td></tr><tr><td>78d645d5b426247e9c8f359694080186681f57db</td><td>Gender Classification by LUT Based Boosting
+<br/>of Overlapping Block Patterns
+<br/><b>Tampere University of Technology, Tampere, Finland</b><br/><b>Idiap Research Institute, Martigny, Switzerland</b></td><td>('3350574', 'Rakesh Mehta', 'rakesh mehta')</td><td>rakesh.mehta@tut.fi
+<br/>{manuel.guenther,marcel}@idiap.ch
+</td></tr><tr><td>7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a</td><td>Demography-based Facial Retouching Detection using Subclass Supervised
+<br/>Sparse Autoencoder
+<br/><b>University of Notre Dame, 2IIIT-Delhi</b></td><td>('5014060', 'Aparna Bharati', 'aparna bharati')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('1799014', 'Kevin W. Bowyer', 'kevin w. bowyer')<br/>('1743927', 'Xin Tong', 'xin tong')</td><td>1{abharati, kwb, xtong1}@nd.edu, 2{mayank, rsingh}@iiitd.ac.in
+</td></tr><tr><td>783f3fccde99931bb900dce91357a6268afecc52</td><td>Hindawi Publishing Corporation
+<br/>EURASIP Journal on Image and Video Processing
+<br/>Volume 2009, Article ID 945717, 14 pages
+<br/>doi:10.1155/2009/945717
+<br/>Research Article
+<br/>Adapted Active Appearance Models
+<br/>1 SUP ´ELEC/IETR, Avenue de la Boulaie, 35511 Cesson-S´evign´e, France
+<br/>2 Orange Labs—TECH/IRIS, 4 rue du clos courtel, 35 512 Cesson S´evign´e, France
+<br/>Received 5 January 2009; Revised 2 September 2009; Accepted 20 October 2009
+<br/>Recommended by Kenneth M. Lam
+<br/>Active Appearance Models (AAMs) are able to align efficiently known faces under duress, when face pose and illumination are
+<br/>controlled. We propose Adapted Active Appearance Models to align unknown faces in unknown poses and illuminations. Our
+<br/>proposal is based on the one hand on a specific transformation of the active model texture in an oriented map, which changes the
+<br/>AAM normalization process; on the other hand on the research made in a set of different precomputed models related to the most
+<br/>adapted AAM for an unknown face. Tests on public and private databases show the interest of our approach. It becomes possible
+<br/>to align unknown faces in real-time situations, in which light and pose are not controlled.
+<br/>Copyright © 2009 Renaud S´eguier et al. This is an open access article distributed under the Creative Commons Attribution
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>1. Introduction
+<br/>All applications related to face analysis and synthesis (Man-
+<br/>Machine Interaction, compression in video communication,
+<br/>augmented reality) need to detect and then to align the user’s
+<br/>face. This latest process consists in the precise localization of
+<br/>the eyes, nose, and mouth gravity center. Face detection can
+<br/>now be realized in real time and in a rather efficient manner
+<br/>[1, 2]; the technical bottleneck lies now in the face alignment
+<br/>when it is done in real conditions, which is precisely the
+<br/>object of this paper.
+<br/>Since such Active Appearance Models (AAMs) as those
+<br/>described in [3] exist, it is therefore possible to align faces
+<br/>in real time. The AAMs exploit a set of face examples in
+<br/>order to extract a statistical model. To align an unknown
+<br/>face in new image, the models parameters must be tuned, in
+<br/>order to match the analyzed face features in the best possible
+<br/>way. There is no difficulty to align a face featuring the same
+<br/>characteristics (same morphology, illumination, and pose)
+<br/>as those constituting the example data set. Unfortunately,
+<br/>AAMs are less outstanding when illumination, pose, and
+<br/>face type changes. We suggest in this paper a robust Active
+<br/>Appearance Model allowing a real-time implementation. In
+<br/>the next section, we will survey the different techniques,
+<br/>which aim to increase the AAM robustness. We will see
+<br/>that none of them address at the same time the three types
+<br/>of robustness, we are interested in pose, illumination, and
+<br/>identity. It must be pointed out that we do not consider the
+<br/>robustness against occlusion as [4] does, for example, when
+<br/>a person moves his hand around the face.
+<br/>After a quick introduction of the Active Appearance
+<br/>Models and their limitations (Section 3), we will present our
+<br/>two main contributions in Section 4.1 in order to improve
+<br/>AAM robustness in illumination, pose, and identity. Exper-
+<br/>iments will be conducted and discussed in Section 5 before
+<br/>drawing a conclusion, suggesting new research directions in
+<br/>the last section.
+<br/>2. State of the Art
+<br/>We propose to classify the methods which lead to an increase
+<br/>of the AAM robustness as follows. The specific types of
+<br/>dedicated robustness are in italic.
+<br/>(i) Preprocess
+<br/>(1) Invariant features (illumination)
+<br/>(2) Canonical representation (illumination)
+<br/>(ii) Parameter space extension
+<br/>(1) Light modeling (illumination)
+<br/>(2) 3D modeling (pose)
+</td><td>('3353560', 'Sylvain Le Gallou', 'sylvain le gallou')<br/>('40427923', 'Gaspard Breton', 'gaspard breton')<br/>('34798028', 'Christophe Garcia', 'christophe garcia')</td><td>Correspondence should be addressed to Renaud S´eguier, renaud.seguier@supelec.fr
+</td></tr><tr><td>7897c8a9361b427f7b07249d21eb9315db189496</td><td></td><td></td><td></td></tr><tr><td>7859667ed6c05a467dfc8a322ecd0f5e2337db56</td><td>Web-Scale Transfer Learning for Unconstrained 1:N Face Identification
+<br/>Facebook AI Research
+<br/>Menlo Park, CA 94025, USA
+<br/><b>Tel Aviv University</b><br/>Tel Aviv, Israel
+</td><td>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('32447229', 'Ming Yang', 'ming yang')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td>{yaniv, mingyang, ranzato}@fb.com
+<br/>wolf@cs.tau.ac.il
+</td></tr><tr><td>78c1ad33772237bf138084220d1ffab800e1200d</td><td><b>State Key Laboratory of Software Development Environment, Beihang University, P.R.China</b><br/><b>University of Michigan, Ann Arbor</b></td><td>('48545182', 'Lei Huang', 'lei huang')<br/>('8342699', 'Jia Deng', 'jia deng')</td><td></td></tr><tr><td>78436256ff8f2e448b28e854ebec5e8d8306cf21</td><td>Measuring and Understanding Sensory Representations within
+<br/>Deep Networks Using a Numerical Optimization Framework
+<br/><b>Harvard University, Cambridge, MA</b><br/>USA
+<br/><b>Center for Brain Science, Harvard University, Cambridge, MA, USA</b><br/><b>Harvard University, Cambridge, MA, USA</b></td><td>('1739108', 'Chuan-Yung Tsai', 'chuan-yung tsai')<br/>('2042941', 'David D. Cox', 'david d. cox')</td><td>∗ E-mail: davidcox@fas.harvard.edu
+</td></tr><tr><td>78f438ed17f08bfe71dfb205ac447ce0561250c6</td><td></td><td></td><td></td></tr><tr><td>78f79c83b50ff94d3e922bed392737b47f93aa06</td><td>The Computer Expression Recognition Toolbox (CERT)
+<br/>Mark Frank3, Javier Movellan1, and Marian Bartlett1
+<br/><b>Machine Perception Laboratory, University of California, San Diego</b><br/><b>University of Arizona</b><br/><b>University of Buffalo</b></td><td>('2724380', 'Gwen Littlewort', 'gwen littlewort')<br/>('1775637', 'Jacob Whitehill', 'jacob whitehill')<br/>('4072965', 'Tingfan Wu', 'tingfan wu')</td><td>{gwen, jake, ting, movellan}@mplab.ucsd.edu,
+<br/>ianfasel@cs.arizona.edu, mfrank83@buffalo.edu, marni@salk.edu
+</td></tr><tr><td>78fede85d6595e7a0939095821121f8bfae05da6</td><td>KSII TRANSACTIONS ON INTERNET AND INFORMATION SYSTEMS VOL. 9, NO. 2, Feb. 2015 742
+<br/>Copyright © 2015 KSII
+<br/>Discriminant Metric Learning Approach for
+<br/>Face Verification
+<br/>1 Department of Computer Science and Information Engineering
+<br/><b>National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC</b><br/>2 Department of Computer Science and Information Engineering
+<br/><b>National Cheng Kung University, Tainan, Taiwan, ROC</b><br/>Received September 3, 2014; revised November 12, 2014; accepted December 13, 2014;
+<br/> published February 28, 2015
+</td><td>('37284667', 'Ju-Chin Chen', 'ju-chin chen')<br/>('36612683', 'Pei-Hsun Wu', 'pei-hsun wu')<br/>('3461535', 'Jenn-Jier James Lien', 'jenn-jier james lien')<br/>('37284667', 'Ju-Chin Chen', 'ju-chin chen')</td><td>[e-mail: jc.chen@cc.kuas.edu.tw]
+<br/>[e-mail: jjlien@csie.ncku.edu.tw]
+</td></tr><tr><td>78598e7005f7c96d64cc47ff47e6f13ae52245b8</td><td>Hand2Face: Automatic Synthesis and Recognition of Hand Over Face Occlusions
+<br/>Synthetic Reality Lab
+<br/>Department of Computer Science
+<br/><b>University of Central Florida</b><br/>Orlando, Florida
+<br/>Synthetic Reality Lab
+<br/>Department of Computer Science
+<br/><b>University of Central Florida</b><br/>Orlando, Florida
+<br/>Tadas Baltruˇsaitis
+<br/><b>Language Technology Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA
+<br/><b>Language Technology Institute</b><br/>School of Computer Science
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA
+</td><td>('2974242', 'Behnaz Nojavanasghari', 'behnaz nojavanasghari')<br/>('32827434', 'Charles E. Hughes', 'charles e. hughes')<br/>('1767184', 'Louis-Philippe Morency', 'louis-philippe morency')</td><td>Email: behnaz@eecs.ucf.edu
+<br/>Email: ceh@cs.ucf.edu
+<br/>Email: tbaltrus@cs.cmu.edu
+<br/>Email: morency@cs.cmu.edu
+</td></tr><tr><td>7862f646d640cbf9f88e5ba94a7d642e2a552ec9</td><td>Being John Malkovich
+<br/><b>University of Washington</b><br/>2 Adobe Systems
+<br/>3 Google Inc.
+</td><td>('2419955', 'Ira Kemelmacher-Shlizerman', 'ira kemelmacher-shlizerman')<br/>('40416141', 'Aditya Sankar', 'aditya sankar')<br/>('2177801', 'Eli Shechtman', 'eli shechtman')<br/>('1679223', 'Steven M. Seitz', 'steven m. seitz')</td><td>{kemelmi,aditya,seitz}@cs.washington.edu
+<br/>elishe@adobe.com
+</td></tr><tr><td>78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c</td><td></td><td></td><td></td></tr><tr><td>78a4eb59ec98994bebcf3a5edf9e1d34970c45f6</td><td>Conveying Shape and Features with Image-Based Relighting
+<br/><b>Stanford University</b><br/><b>Stanford University</b><br/><b>Stanford University</b><br/><b>Stanford University</b><br/>Microsoft Research
+<br/><b>Stanford University</b></td><td>('36475465', 'David Akers', 'david akers')<br/>('1967534', 'Frank Losasso', 'frank losasso')<br/>('37133509', 'John Rick', 'john rick')<br/>('2367620', 'Jeff Klingner', 'jeff klingner')<br/>('1820412', 'Maneesh Agrawala', 'maneesh agrawala')<br/>('1689128', 'Pat Hanrahan', 'pat hanrahan')</td><td></td></tr><tr><td>781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1023-9
+<br/>Tubelets: Unsupervised Action Proposals from Spatiotemporal
+<br/>Super-Voxels
+<br/>Cees G. M. Snoek1
+<br/>Received: 25 June 2016 / Accepted: 18 May 2017
+<br/>© The Author(s) 2017. This article is an open access publication
+</td><td>('40027484', 'Mihir Jain', 'mihir jain')<br/>('1681054', 'Hervé Jégou', 'hervé jégou')</td><td></td></tr><tr><td>78174c2be084e67f48f3e8ea5cb6c9968615a42c</td><td>Periocular Recognition Using CNN Features
+<br/>Off-the-Shelf
+<br/><b>School of Information Technology (ITE), Halmstad University, Box 823, 30118 Halmstad, Sweden</b></td><td>('51446244', 'Kevin Hernandez-Diaz', 'kevin hernandez-diaz')<br/>('2847751', 'Fernando Alonso-Fernandez', 'fernando alonso-fernandez')<br/>('5058247', 'Josef Bigun', 'josef bigun')</td><td>Email: kevin.hernandez-diaz@hh.se, feralo@hh.se, josef.bigun@hh.se
+</td></tr><tr><td>78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e</td><td>TEMPORAL PYRAMID POOLING CNN FOR ACTION RECOGNITION
+<br/>Temporal Pyramid Pooling Based Convolutional
+<br/>Neural Network for Action Recognition
+</td><td>('40378631', 'Peng Wang', 'peng wang')<br/>('2572430', 'Yuanzhouhan Cao', 'yuanzhouhan cao')<br/>('40529029', 'Chunhua Shen', 'chunhua shen')<br/>('2161037', 'Lingqiao Liu', 'lingqiao liu')<br/>('1724393', 'Heng Tao Shen', 'heng tao shen')</td><td></td></tr><tr><td>780557daaa39a445b24c41f637d5fc9b216a0621</td><td>Large Video Event Ontology Browsing, Search and
+<br/>Tagging (EventNet Demo)
+<br/><b>Columbia University, New York, NY 10027, USA</b></td><td>('2368325', 'Hongliang Xu', 'hongliang xu')<br/>('35984288', 'Guangnan Ye', 'guangnan ye')<br/>('2664705', 'Yitong Li', 'yitong li')<br/>('40313086', 'Dong Liu', 'dong liu')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>{hx2168, gy2179, yl3029, dl2713, sc250}@columbia.edu
+</td></tr><tr><td>78fdf2b98cf6380623b0e20b0005a452e736181e</td><td></td><td></td><td></td></tr><tr><td>788a7b59ea72e23ef4f86dc9abb4450efefeca41</td><td></td><td></td><td></td></tr><tr><td>787c1bb6d1f2341c5909a0d6d7314bced96f4681</td><td>Face Detection and Verification in Unconstrained
+<br/>Videos: Challenges, Detection, and Benchmark
+<br/>Evaluation
+<br/>IIIT-D-MTech-CS-GEN-13-106
+<br/>July 16, 2015
+<br/><b>Indraprastha Institute of Information Technology, Delhi</b><br/>Thesis Advisors
+<br/>Dr. Mayank Vatsa
+<br/>Dr. Richa Singh
+<br/>Submitted in partial fulfillment of the requirements
+<br/>for the Degree of M.Tech. in Computer Science
+<br/>c(cid:13) Shah, 2015
+<br/>Keywords: face recognition, face detection, face verification
+</td><td>('25087736', 'Mahek Shah', 'mahek shah')</td><td></td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>Describing People: A Poselet-Based Approach to Attribute Classification ∗
+<br/>1EECS, U.C. Berkeley, Berkeley, CA 94720
+<br/><b>Adobe Systems, Inc., 345 Park Ave, San Jose, CA</b></td><td>('35208858', 'Subhransu Maji', 'subhransu maji')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td>{lbourdev,smaji,malik}@eecs.berkeley.edu
+</td></tr><tr><td>8b2c090d9007e147b8c660f9282f357336358061</td><td><b>Lake Forest College</b><br/><b>Lake Forest College Publications</b><br/>Senior Theses
+<br/>4-23-2018
+<br/>Student Publications
+<br/>Emotion Classification based on Expressions and
+<br/>Body Language using Convolutional Neural
+<br/>Networks
+<br/>Follow this and additional works at: https://publications.lakeforest.edu/seniortheses
+<br/>Part of the Neuroscience and Neurobiology Commons
+<br/>Recommended Citation
+<br/>Tanveer, Aasimah S., "Emotion Classification based on Expressions and Body Language using Convolutional Neural Networks"
+<br/>(2018). Senior Theses.
+<br/><b>This Thesis is brought to you for free and open access by the Student Publications at Lake Forest College Publications. It has been accepted for</b><br/><b>inclusion in Senior Theses by an authorized administrator of Lake Forest College Publications. For more information, please contact</b></td><td></td><td>Lake Forest College, tanveeras@lakeforest.edu
+<br/>levinson@lakeforest.edu.
+</td></tr><tr><td>8ba67f45fbb1ce47a90df38f21834db37c840079</td><td>People Search and Activity Mining in Large-Scale
+<br/>Community-Contributed Photos
+<br/><b>National Taiwan University, Taipei, Taiwan</b><br/>Winston H. Hsu, Hong-Yuan Mark Liao
+<br/>Advised by
+</td><td>('35081710', 'Yan-Ying Chen', 'yan-ying chen')</td><td>yanying@cmlab.csie.ntu.edu.tw
+</td></tr><tr><td>8b547b87fd95c8ff6a74f89a2b072b60ec0a3351</td><td>Initial Perceptions of a Casual Game to Crowdsource
+<br/>Facial Expressions in the Wild
+<br/><b>Games Studio, Faculty of Engineering and IT, University of Technology, Sydney</b></td><td>('1733360', 'Chek Tien Tan', 'chek tien tan')<br/>('2117735', 'Hemanta Sapkota', 'hemanta sapkota')<br/>('2823535', 'Daniel Rosser', 'daniel rosser')<br/>('3141633', 'Yusuf Pisan', 'yusuf pisan')</td><td>chek@gamesstudio.org
+<br/>hemanta.sapkota@student.uts.edu.au
+<br/>daniel.j.rosser@gmail.com
+<br/>yusuf.pisan@gamesstudio.org
+</td></tr><tr><td>8bed7ff2f75d956652320270eaf331e1f73efb35</td><td>Emotion Recognition in the Wild using
+<br/>Deep Neural Networks and Bayesian Classifiers
+<br/>Elena Ba(cid:138)ini S¨onmez
+<br/><b>University of Calabria - DeMACS</b><br/>Via Pietro Bucci
+<br/>Rende (CS), Italy
+<br/><b>Plymouth University - CRNS</b><br/>Portland Square PL4 8AA
+<br/>Plymouth, United Kingdom
+<br/>ac.uk
+<br/><b>Istanbul Bilgi University - DCE</b><br/>Eski Silahtaraa Elektrik Santral Kazm
+<br/>Karabekir Cad. No: 2/13 34060 Eyp
+<br/>Istanbul, Turkey
+<br/><b>University of Calabria - DeMACS</b><br/>Via Pietro Bucci
+<br/>Rende (CS), Italy
+<br/><b>Plymouth University - CRNS</b><br/>Portland Square PL4 8AA
+<br/>Plymouth, United Kingdom
+</td><td>('32751441', 'Luca Surace', 'luca surace')<br/>('3366919', 'Massimiliano Patacchiola', 'massimiliano patacchiola')<br/>('3205804', 'William Spataro', 'william spataro')<br/>('1692929', 'Angelo Cangelosi', 'angelo cangelosi')</td><td>lucasurace11@gmail.com
+<br/>massimiliano.patacchiola@plymouth.
+<br/>ebsonmez@bilgi.edu.tr
+<br/>william.spataro@unical.it
+<br/>angelo.cangelosi@plymouth.ac.uk
+</td></tr><tr><td>8b7191a2b8ab3ba97423b979da6ffc39cb53f46b</td><td>Search Pruning in Video Surveillance Systems: Efficiency-Reliability Tradeoff
+<br/>EURECOM
+<br/>Sophia Antipolis, France
+</td><td>('3299530', 'Antitza Dantcheva', 'antitza dantcheva')<br/>('1688531', 'Petros Elia', 'petros elia')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>{Antitza.Dantcheva, Arun.Singh, Petros.Elia, Jean-Luc.Dugelay}@eurecom.fr
+</td></tr><tr><td>8bf57dc0dd45ed969ad9690033d44af24fd18e05</td><td>Subject-Independent Emotion Recognition from Facial Expressions
+<br/>using a Gabor Feature RBF Neural Classifier Trained with Virtual
+<br/>Samples Generated by Concurrent Self-Organizing Maps
+<br/>VICTOR-EMIL NEAGOE, ADRIAN-DUMITRU CIOTEC
+<br/>Depart. Electronics, Telecommunications & Information Technology
+<br/><b>Polytechnic University of Bucharest</b><br/>Splaiul Independentei No. 313, Sector 6, Bucharest,
+<br/>ROMANIA
+</td><td></td><td>victoremil@gmail.com, adryyandc@yahoo.com
+</td></tr><tr><td>8bf243817112ac0aa1348b40a065bb0b735cdb9c</td><td>LEARNING A REPRESSION NETWORK FOR PRECISE VEHICLE SEARCH
+<br/><b>Institute of Digital Media</b><br/><b>School of Electrical Engineering and Computer Science, Peking University</b><br/>No.5 Yiheyuan Road, 100871, Beijing, China
+</td><td>('17872416', 'Qiantong Xu', 'qiantong xu')<br/>('13318784', 'Ke Yan', 'ke yan')<br/>('1705972', 'Yonghong Tian', 'yonghong tian')</td><td>{xuqiantong, keyan, yhtian}@pku.edu.cn
+</td></tr><tr><td>8bfada57140aa1aa22a575e960c2a71140083293</td><td>Can we match Ultraviolet Face Images against their Visible
+<br/>Counterparts?
+<br/><b>aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA</b></td><td>('33240042', 'Neeru Narang', 'neeru narang')<br/>('1731727', 'Thirimachos Bourlai', 'thirimachos bourlai')<br/>('1678573', 'Lawrence A. Hornak', 'lawrence a. hornak')<br/>('11898042', 'Paul D. Coverdell', 'paul d. coverdell')</td><td></td></tr><tr><td>8b8728edc536020bc4871dc66b26a191f6658f7c</td><td></td><td></td><td></td></tr><tr><td>8befcd91c24038e5c26df0238d26e2311b21719a</td><td>A Joint Sequence Fusion Model for Video
+<br/>Question Answering and Retrieval
+<br/>Department of Computer Science and Engineering,
+<br/><b>Seoul National University, Seoul, Korea</b><br/>http://vision.snu.ac.kr/projects/jsfusion/
+</td><td>('7877122', 'Youngjae Yu', 'youngjae yu')<br/>('2175130', 'Jongseok Kim', 'jongseok kim')</td><td>{yj.yu,js.kim}@vision.snu.ac.kr, gunhee@snu.ac.kr
+</td></tr><tr><td>8bbbdff11e88327816cad3c565f4ab1bb3ee20db</td><td>Automatic Semantic Face Recognition
+<br/><b>University of Southampton</b><br/>Southampton, United Kingdom
+</td><td>('19249411', 'Nawaf Yousef Almudhahka', 'nawaf yousef almudhahka')<br/>('1727698', 'Mark S. Nixon', 'mark s. nixon')<br/>('31534955', 'Jonathon S. Hare', 'jonathon s. hare')</td><td>{nya1g14,msn,jsh2}@ecs.soton.ac.uk
+</td></tr><tr><td>8bdf6f03bde08c424c214188b35be8b2dec7cdea</td><td>Exploiting Unintended Feature Leakage in Collaborative Learning∗
+<br/>UCL
+<br/><b>Cornell University</b><br/><b>UCL and Alan Turing Institute</b><br/>Cornell Tech
+</td><td>('2008164', 'Luca Melis', 'luca melis')<br/>('3469125', 'Congzheng Song', 'congzheng song')<br/>('1728207', 'Emiliano De Cristofaro', 'emiliano de cristofaro')<br/>('1723945', 'Vitaly Shmatikov', 'vitaly shmatikov')</td><td>luca.melis.14@alumni.ucl.ac.uk
+<br/>cs2296@cornell.edu
+<br/>e.decristofaro@ucl.ac.uk
+<br/>shmat@cs.cornell.edu
+</td></tr><tr><td>8b744786137cf6be766778344d9f13abf4ec0683</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2697
+<br/>ICASSP 2016
+</td><td></td><td></td></tr><tr><td>8b10383ef569ea0029a2c4a60cc2d8c87391b4db</td><td>ZHOU,MILLERANDZHANG:AGECLASSIFICATIONUSINGRADONTRANSFORM...
+<br/>Age classification using Radon transform
+<br/>and entropy based scaling SVM
+<br/>Paul Miller1
+<br/><b>The Institute of Electronics</b><br/>Communications
+<br/>and Information Technology
+<br/><b>Queen s University Belfast</b><br/>2 School of Computing
+<br/><b>University of Dundee</b><br/>United Kingdom
+</td><td>('2040772', 'Huiyu Zhou', 'huiyu zhou')<br/>('1744844', 'Jianguo Zhang', 'jianguo zhang')</td><td>h.zhou@ecit.qub.ac.uk
+<br/>p.miller@ecit.qub.ac.uk
+<br/>jgzhang@computing.dundee.ac.uk
+</td></tr><tr><td>8b30259a8ab07394d4dac971f3d3bd633beac811</td><td>Representing Sets of Instances for Visual Recognition
+<br/>1 National Key Laboratory for Novel Software Technology
+<br/><b>Nanjing University, China</b><br/>2 Minieye, Youjia Innovation LLC, China
+</td><td>('1808816', 'Jianxin Wu', 'jianxin wu')<br/>('2226422', 'Bin-Bin Gao', 'bin-bin gao')<br/>('15527784', 'Guoqing Liu', 'guoqing liu')</td><td>∗ wujx2001@nju.edu.cn, gaobb@lamda.nju.edu.cn
+<br/>guoqing@minieye.cc
+</td></tr><tr><td>8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8</td><td>HU, LIU, LI, LIU: TEMPORAL PERCEPTIVE NETWORK FOR ACTION RECOGNITION
+<br/>Temporal Perceptive Network for
+<br/>Skeleton-Based Action Recognition
+<br/><b>Institute of Computer Science and</b><br/>Technology
+<br/><b>Peking University</b><br/>Beijing, China
+<br/>Sijie Song
+</td><td>('9956463', 'Yueyu Hu', 'yueyu hu')<br/>('49046516', 'Chunhui Liu', 'chunhui liu')<br/>('3128506', 'Yanghao Li', 'yanghao li')<br/>('41127426', 'Jiaying Liu', 'jiaying liu')</td><td>huyy@pku.edu.cn
+<br/>liuchunhui@pku.edu.cn
+<br/>lyttonhao@pku.edu.cn
+<br/>ssj940920@pku.edu.cn
+<br/>liujiaying@pku.edu.cn
+</td></tr><tr><td>8b19efa16a9e73125ab973429eb769d0ad5a8208</td><td>SCAR: Dynamic adaptation for person detection and
+<br/>persistence analysis in unconstrained videos
+<br/>Department of Computer Science
+<br/><b>Stevens Institute of Technology</b><br/>Hoboken, NJ 07030, USA
+</td><td>('2789357', 'George Kamberov', 'george kamberov')<br/>('3219999', 'Matt Burlick', 'matt burlick')<br/>('2283008', 'Lazaros Karydas', 'lazaros karydas')<br/>('3228177', 'Olga Koteoglou', 'olga koteoglou')</td><td>gkambero,mburlick,lkarydas,okoteogl@stevens.edu (cid:63)
+</td></tr><tr><td>8b6fded4d08bf0b7c56966b60562ee096af1f0c4</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 59– No.3, December 2012
+<br/>A Neural Network based Facial Expression Recognition
+<br/>using Fisherface
+<br/>Department of Mathematics
+<br/><b>Semarang State University</b><br/>Semarang, 50229, Indonesia
+<br/>
+</td><td>('39807349', 'Zaenal Abidin', 'zaenal abidin')</td><td></td></tr><tr><td>8bf647fed40bdc9e35560021636dfb892a46720e</td><td>Learning to Hash-tag Videos with Tag2Vec
+<br/>CVIT, KCIS, IIIT Hyderabad, India
+<br/>P J Narayanan
+<br/>http://cvit.iiit.ac.in/research/projects/tag2vec
+<br/>Figure 1. Learning a direct mapping from videos to hash-tags : sample frames from short video clips with user-given hash-tags
+<br/>(left); a sample frame from a query video and hash-tags suggested by our system for this query (right).
+</td><td>('2461059', 'Aditya Singh', 'aditya singh')<br/>('3448416', 'Saurabh Saini', 'saurabh saini')<br/>('1962817', 'Rajvi Shah', 'rajvi shah')</td><td>{(aditya.singh,saurabh.saini,rajvi.shah)@research.,pjn@}iiit.ac.in
+</td></tr><tr><td>8b2704a5218a6ef70e553eaf0a463bd55129b69d</td><td>Sensors 2013, 13, 7714-7734; doi:10.3390/s130607714
+<br/>OPEN ACCESS
+<br/>sensors
+<br/>ISSN 1424-8220
+<br/>www.mdpi.com/journal/sensors
+<br/>Article
+<br/>Geometric Feature-Based Facial Expression Recognition in
+<br/>Image Sequences Using Multi-Class AdaBoost and Support
+<br/>Vector Machines
+<br/><b>Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do</b><br/>Tel.: +82-63-270-2406; Fax: +82-63-270-2394.
+<br/>Received: 3 May 2013; in revised form: 29 May 2013 / Accepted: 3 June 2013 /
+<br/>Published: 14 June 2013
+</td><td>('32322842', 'Deepak Ghimire', 'deepak ghimire')<br/>('2034182', 'Joonwhoan Lee', 'joonwhoan lee')</td><td>Korea; E-Mail: deep@jbnu.ac.kr
+<br/>* Author to whom correspondence should be addressed; E-Mail: chlee@jbnu.ac.kr;
+</td></tr><tr><td>8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0</td><td>Methoden
+<br/>at 11/2013
+<br/>(cid:2)(cid:2)(cid:2)
+<br/>Multimodale Interaktion
+<br/>auf einer sozialen Roboterplattform
+<br/>Multimodal Interaction on a Social Robotic Platform
+<br/>Zusammenfassung Dieser Beitrag beschreibt die multimo-
+<br/>dalen Interaktionsmöglichkeiten mit der Forschungsroboter-
+<br/>plattform ELIAS. Zunächst wird ein Überblick über die Ro-
+<br/>boterplattform sowie die entwickelten Verarbeitungskompo-
+<br/>nenten gegeben, die Einteilung dieser Komponenten erfolgt
+<br/>nach dem Konzept von wahrnehmenden und agierenden Mo-
+<br/>dalitäten. Anschließend wird das Zusammenspiel der Kom-
+<br/>ponenten in einem multimodalen Spieleszenario näher be-
+<br/>trachtet. (cid:2)(cid:2)(cid:2) Summary
+<br/>This paper presents the mul-
+<br/>timodal
+<br/>interaction capabilities of the robotic research plat-
+<br/>form ELIAS. An overview of the robotic platform as well
+<br/>as the developed processing components is presented, the
+<br/>classification of the components follows the concept of sen-
+<br/>sing and acting modalities. Finally,
+<br/>the interplay between
+<br/>those components within a multimodal gaming scenario is
+<br/>described.
+<br/>Schlagwörter Mensch-Roboter-Interaktion, Multimodalität, Gesten, Blick (cid:2)(cid:2)(cid:2) Keywords Human-robot interaction,
+<br/>multimodal, gestures, gaze
+<br/>1 Einleitung
+<br/>Eine intuitive und natürliche Bedienbarkeit der zuneh-
+<br/>mend komplexeren Technik wird für den Menschen
+<br/>immer wichtiger, da im heutigen Alltag eine Vielzahl an
+<br/>technischen Geräten mit wachsendem Funktionsumfang
+<br/>anzutreffen ist. Unterschiedliche Aktivitäten in der For-
+<br/>schungsgemeinschaft haben sich schon seit längerer Zeit
+<br/>mit verbalen sowie nonverbalen Kommunikationsformen
+<br/>(bspw. Emotions- und Gestenerkennung) in der Mensch-
+<br/>Maschine-Interaktion beschäftigt. Gerade in der jüngeren
+<br/>Zeit trugen auf diesem Forschungsfeld unterschiedliche
+<br/>Innovationen (bspw. Touchscreen, Gestensteuerung im
+<br/>Fernseher) dazu bei, dass intuitive und natürliche Bedien-
+<br/>konzepte mehr und mehr im Alltag Verwendung finden.
+<br/>Auch Möglichkeiten zur Sprach- und Gestensteuerung
+<br/>von Konsolen und Mobiltelefonen finden heute vermehr-
+<br/>ten Einsatz in der Gerätebedienung. Diese natürlicheren
+<br/>und multimodalen Benutzerschnittstellen sind dem Nut-
+<br/>zer schnell zugänglich und erlauben eine intuitivere
+<br/>Interaktion mit komplexen technischen Geräten.
+<br/>Auch für Robotersysteme bietet sich eine multimodale
+<br/>Interaktion an, um die Benutzung und den Zugang zu
+<br/>den Funktionalitäten zu vereinfachen. Der Mensch soll
+<br/>in seiner Kommunikation idealerweise vollkommene Ent-
+<br/>scheidungsfreiheit bei der Wahl der Modalitäten haben,
+<br/>um sein gewünschtes Ziel zu erreichen. Dafür werden
+<br/>in diesem Beitrag die wahrnehmenden und agieren-
+<br/>den Modalitäten einer, rein auf Kommunikationsaspekte
+<br/>reduzierten, Forschungsroboterplattform beispielhaft in
+<br/>einer Spieleanwendung untersucht.
+<br/>1.1 Struktur des Beitrags
+<br/>In diesem Beitrag wird zunächst ein kurzer Über-
+<br/>blick über die multimodale Interaktion im Allgemeinen
+<br/>gegeben, hierbei erfolgt eine Betrachtung nach wahr-
+<br/>nehmenden und agierenden Modalitäten. Im nächsten
+<br/>Abschnitt werden Arbeiten vorgestellt, die sich auch mit
+<br/>multimodalen Robotersystemen beschäftigen. Im darauf
+<br/>folgenden Abschnitt wird die Roboterplattform ELIAS
+<br/>mit den wahrnehmenden, verarbeitenden und agierenden
+<br/>at – Automatisierungstechnik 61 (2013) 11 / DOI 10.1515/auto.2013.1062 © Oldenbourg Wissenschaftsverlag
+<br/> - 10.1515/auto.2013.1062
+<br/>Downloaded from De Gruyter Online at 09/27/2016 10:08:34PM
+<br/>via Technische Universität München
+<br/>737
+</td><td>('35116429', 'Jürgen Blume', 'jürgen blume')<br/>('1682283', 'Tobias Rehrl', 'tobias rehrl')<br/>('1705843', 'Gerhard Rigoll', 'gerhard rigoll')</td><td>Korrespondenzautor: blume@tum.de
+</td></tr><tr><td>8b1db0894a23c4d6535b5adf28692f795559be90</td><td>Biometric and Surveillance Technology for Human and Activity Identification X, edited by Ioannis Kakadiaris,
+<br/>Walter J. Scheirer, Laurence G. Hassebrook, Proc. of SPIE Vol. 8712, 87120Q · © 2013 SPIE
+<br/>CCC code: 0277-786X/13/$18 · doi: 10.1117/12.2018974
+<br/>Proc. of SPIE Vol. 8712 87120Q-1
+</td><td></td><td></td></tr><tr><td>8b2e3805b37c18618b74b243e7a6098018556559</td><td>Workshop track - ICLR 2018
+<br/>IMPROVING VARIATIONAL AUTOENCODER WITH DEEP
+<br/><b>University of Nottingham, Nottingham, UK</b><br/><b>Shenzhen University, Shenzhen, China</b></td><td>('3468964', 'Xianxu Hou', 'xianxu hou')<br/>('1698461', 'Guoping Qiu', 'guoping qiu')</td><td>xianxu.hou@nottingham.edu.cn
+<br/>guoping.qiu@nottingham.ac.uk
+</td></tr><tr><td>8b74252625c91375f55cbdd2e6415e752a281d10</td><td>Using Convolutional 3D Neural Networks for
+<br/>User-Independent Continuous Gesture Recognition
+<br/>Necati Cihan Camgoz, Simon Hadfield
+<br/><b>University of Surrey</b><br/>Guildford, GU2 7XH, UK
+<br/>Human Technology & Pattern Recognition
+<br/><b>RWTH Aachen University, Germany</b><br/><b>University of Surrey</b><br/>Guildford, GU2 7XH, UK
+</td><td>('2309364', 'Oscar Koller', 'oscar koller')<br/>('1695195', 'Richard Bowden', 'richard bowden')</td><td>{n.camgoz, s.hadfield}@surrey.ac.uk
+<br/>koller@cs.rwth-aachen.de
+<br/>r.bowden@surrey.ac.uk
+</td></tr><tr><td>8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259</td><td>Real-time 3D Face Fitting and Texture Fusion
+<br/>on In-the-wild Videos
+<br/>Centre for Vision, Speech and Signal Processing
+<br/>Image Understanding and Interactive Robotics
+<br/><b>University of Surrey</b><br/>Guildford, GU2 7XH, United Kingdom
+<br/>Contact: http://www.patrikhuber.ch
+<br/><b>Reutlingen University</b><br/>D-72762 Reutlingen, Germany
+</td><td>('39976184', 'Patrik Huber', 'patrik huber')<br/>('49759031', 'William Christmas', 'william christmas')<br/>('1748684', 'Josef Kittler', 'josef kittler')<br/>('49330989', 'Philipp Kopp', 'philipp kopp')</td><td></td></tr><tr><td>134f1cee8408cca648d8b4ca44b38b0a7023af71</td><td>Partially Shared Multi-Task Convolutional Neural Network with Local
+<br/>Constraint for Face Attribute Learning
+<br/><b>College of Information Science and Electronic Engineering</b><br/><b>Zhejiang University, China</b></td><td>('41021477', 'Jiajiong Cao', 'jiajiong cao')<br/>('2367491', 'Yingming Li', 'yingming li')<br/>('1720488', 'Zhongfei Zhang', 'zhongfei zhang')</td><td>{jiajiong, yingming, zhongfei}@zju.edu.cn
+</td></tr><tr><td>13719bbb4bb8bbe0cbcdad009243a926d93be433</td><td>Deep LDA-Pruned Nets for Efficient Facial Gender Classification
+<br/><b>McGill University</b><br/><b>University Street, Montral, QC H3A 0E9, Canada</b></td><td>('1992537', 'Qing Tian', 'qing tian')<br/>('1699104', 'Tal Arbel', 'tal arbel')<br/>('1713608', 'James J. Clark', 'james j. clark')</td><td>{qtian,arbel,clark}@cim.mcgill.ca
+</td></tr><tr><td>134db6ca13f808a848321d3998e4fe4cdc52fbc2</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 2, APRIL 2006
+<br/>433
+<br/>Dynamics of Facial Expression: Recognition of
+<br/>Facial Actions and Their Temporal Segments
+<br/>From Face Profile Image Sequences
+</td><td>('1694605', 'Maja Pantic', 'maja pantic')<br/>('1744405', 'Ioannis Patras', 'ioannis patras')</td><td></td></tr><tr><td>133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>Active Clustering with Model-Based
+<br/>Uncertainty Reduction
+</td><td>('2228109', 'Caiming Xiong', 'caiming xiong')<br/>('34187462', 'David M. Johnson', 'david m. johnson')<br/>('3587688', 'Jason J. Corso', 'jason j. corso')</td><td></td></tr><tr><td>1329206dbdb0a2b9e23102e1340c17bd2b2adcf5</td><td>Part-based R-CNNs for
+<br/>Fine-grained Category Detection
+<br/><b>University of California, Berkeley</b></td><td>('40565777', 'Ning Zhang', 'ning zhang')<br/>('1753210', 'Trevor Darrell', 'trevor darrell')</td><td>{nzhang,jdonahue,rbg,trevor}@eecs.berkeley.edu
+</td></tr><tr><td>1369e9f174760ea592a94177dbcab9ed29be1649</td><td>Geometrical Facial Modeling for Emotion Recognition
+</td><td>('3250085', 'Giampaolo L. Libralon', 'giampaolo l. libralon')</td><td></td></tr><tr><td>133900a0e7450979c9491951a5f1c2a403a180f0</td><td>JOURNAL OF LATEX CLASS FILES
+<br/>Social Grouping for Multi-target Tracking and
+<br/>Head Pose Estimation in Video
+</td><td>('12561781', 'Zhen Qin', 'zhen qin')<br/>('3564227', 'Christian R. Shelton', 'christian r. shelton')</td><td></td></tr><tr><td>13bda03fc8984d5943ed8d02e49a779d27c84114</td><td>Efficient Object Detection Using Cascades of Nearest Convex Model Classifiers
+<br/><b>Eskisehir Osmangazi University</b><br/>Laboratoire Jean Kuntzmann
+<br/>Meselik Kampusu, 26480, Eskisehir Turkey
+<br/>B.P. 53, 38041 Grenoble Cedex 9, France
+</td><td>('2277308', 'Hakan Cevikalp', 'hakan cevikalp')<br/>('1756114', 'Bill Triggs', 'bill triggs')</td><td>hakan.cevikalp@gmail.com
+<br/>Bill.Triggs@imag.fr
+</td></tr><tr><td>13db9466d2ddf3c30b0fd66db8bfe6289e880802</td><td>I.J. Image, Graphics and Signal Processing, 2017, 1, 27-32
+<br/>Published Online January 2017 in MECS (http://www.mecs-press.org/)
+<br/>DOI: 10.5815/ijigsp.2017.01.04
+<br/>Transfer Subspace Learning Model for Face
+<br/>Recognition at a Distance
+<br/>MIT, Pune ,India
+<br/>AISSM’S IOT,India
+<br/><b>College of Engineering Pune, India</b><br/>learning algorithms work
+</td><td>('3335915', 'Alwin Anuse', 'alwin anuse')<br/>('32032353', 'Vibha Vyas', 'vibha vyas')</td><td>Email: alwin.anuse@mitpune.edu.in
+<br/>Email: deshmukhnilima@gmail.com
+<br/>Email: vsv.extc@coep.ac.in
+</td></tr><tr><td>13a994d489c15d440c1238fc1ac37dad06dd928c</td><td>Learning Discriminant Face Descriptor for Face
+<br/>Recognition
+<br/>Center for Biometrics and Security Research & National Laboratory of Pattern
+<br/><b>Recognition, Institute of Automation, Chinese Academy of Sciences</b></td><td>('1718623', 'Zhen Lei', 'zhen lei')<br/>('34679741', 'Stan Z. Li', 'stan z. li')</td><td>fzlei,szlig@nlpr.ia.ac.cn
+</td></tr><tr><td>131178dad3c056458e0400bed7ee1a36de1b2918</td><td>Visual Reranking through Weakly Supervised Multi-Graph Learning
+<br/><b>Xidian University, Xi an, China</b><br/><b>Xiamen University, Xiamen, China</b><br/><b>IBM Watson Research Center, Armonk, NY, USA</b><br/><b>University of Technology, Sydney, Australia</b></td><td>('1715156', 'Cheng Deng', 'cheng deng')<br/>('1725599', 'Rongrong Ji', 'rongrong ji')<br/>('39059457', 'Wei Liu', 'wei liu')<br/>('1692693', 'Dacheng Tao', 'dacheng tao')<br/>('10699750', 'Xinbo Gao', 'xinbo gao')</td><td>{chdeng.xd, jirongrong, wliu.cu, dacheng.tao, xbgao.xidian}@gmail.com
+</td></tr><tr><td>13141284f1a7e1fe255f5c2b22c09e32f0a4d465</td><td>Object Tracking by
+<br/>Oversampling Local Features
+</td><td>('2619131', 'Federico Pernici', 'federico pernici')<br/>('8196487', 'Alberto Del Bimbo', 'alberto del bimbo')</td><td></td></tr><tr><td>132527383890565d18f1b7ad50d76dfad2f14972</td><td>JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 22, 1033-1046 (2006)
+<br/>Facial Expression Classification Using PCA and
+<br/>Hierarchical Radial Basis Function Network*
+<br/>Department of Computer Science and Information Engineering
+<br/><b>National Taipei University</b><br/>Sanshia, 237 Taiwan
+<br/>Intelligent human-computer interaction (HCI) integrates versatile tools such as per-
+<br/>ceptual recognition, machine learning, affective computing, and emotion cognition to
+<br/>enhance the ways humans interact with computers. Facial expression analysis is one of
+<br/>the essential medium of behavior interpretation and emotion modeling. In this paper, we
+<br/>modify and develop a reconstruction method utilizing Principal Component Analysis
+<br/>(PCA) to perform facial expression recognition. A framework of hierarchical radial basis
+<br/>function network (HRBFN) is further proposed to classify facial expressions based on
+<br/>local features extraction by PCA technique from lips and eyes images. It decomposes the
+<br/>acquired data into a small set of characteristic features. The objective of this research is
+<br/>to develop a more efficient approach to discriminate between seven prototypic facial ex-
+<br/>pressions, such as neutral, smile, anger, surprise, fear, disgust, and sadness. A construc-
+<br/>tive procedure is detailed and the system performance is evaluated on a public database
+<br/>“Japanese Females Facial Expression (JAFFE).” We conclude that local images of lips
+<br/>and eyes can be treated as cues for facial expression. As anticipated, the experimental
+<br/>results demonstrate the potential capabilities of the proposed approach.
+<br/>Keywords: intelligent human-computer interaction, facial expression classification, hier-
+<br/>archical radial basis function network, principal component analysis, local features
+<br/>1. INTRODUCTION
+<br/>The intelligent human-computer interaction (HCI) technologies play important roles
+<br/>in the development of advanced and ambient communication/computation. In contrast to
+<br/>the conventional mechanisms of passive manipulation, intelligent HCI integrates versa-
+<br/>tile tools such as perceptual recognition, machine learning, affective computing, and
+<br/>emotion cognition to enhance the ways humans interact with computers. Migrating from
+<br/>W4 (what, where, when, who) to W5+ (what, where, when, who, why, how), novel intel-
+<br/>ligent interface design has placed emphasis on both apparent and internal behavior of
+<br/>users [1]. Nonverbal information such as facial expression, posture, gesture, and eye gaze
+<br/>is suitable for behavior interpretation. Facial data analysis is one of the essential medium
+<br/>of perceptual processing and emotion modeling.
+<br/>Received August 16, 2005; accepted January 17, 2006.
+<br/>Communicated by Jhing-Fa Wang, Pau-Choo Chung and Mark Billinghurst.
+<br/>* This work was supported in part by the National Science Council of Taiwan, R.O.C., under grants No. NSC
+<br/>88-2213-E216-010 and No. NSC 89-2213-E216-016.
+<br/>* The preliminary content of this paper has been presented in “International Conference on Neural Information
+<br/>Processing,” Perth, Australia, November 1999. Acknowledgement also due to Mr. Der-Chen Pan at the Na-
+<br/><b>tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming</b><br/>Shon Chen at Ulead System Inc., Taipei, Taiwan, for his early work and assistance in this research.
+<br/>1033
+</td><td>('39548632', 'Daw-Tung Lin', 'daw-tung lin')</td><td></td></tr><tr><td>13604bbdb6f04a71dea4bd093794e46730b0a488</td><td>Robust Loss Functions under Label Noise for
+<br/>Deep Neural Networks
+<br/>Microsoft, Bangalore
+<br/><b>Indian Institute of Science, Bangalore</b><br/><b>Indian Institute of Science, Bangalore</b></td><td>('3201314', 'Aritra Ghosh', 'aritra ghosh')<br/>('47602083', 'Himanshu Kumar', 'himanshu kumar')<br/>('1711348', 'P. S. Sastry', 'p. s. sastry')</td><td>arghosh@microsoft.com
+<br/>himanshukr@ee.iisc.ernet.in
+<br/>sastry@ee.iisc.ernet.in
+</td></tr><tr><td>1394ca71fc52db972366602a6643dc3e65ee8726</td><td>See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/308407783
+<br/>EmoReact: A Multimodal Approach and Dataset
+<br/>for Recognizing Emotional Responses in Children
+<br/>Conference Paper · November 2016
+<br/>DOI: 10.1145/2993148.2993168
+<br/>CITATIONS
+<br/>READS
+<br/>95
+<br/>4 authors, including:
+<br/>Behnaz Nojavanasghari
+<br/><b>University of Central Florida</b><br/>4 PUBLICATIONS 20 CITATIONS
+<br/>Tadas Baltrusaitis
+<br/><b>Carnegie Mellon University</b><br/>30 PUBLICATIONS 247 CITATIONS
+<br/>SEE PROFILE
+<br/>SEE PROFILE
+<br/>Charles E. Hughes
+<br/><b>University of Central Florida</b><br/>185 PUBLICATIONS 1,248 CITATIONS
+<br/>SEE PROFILE
+<br/>All in-text references underlined in blue are linked to publications on ResearchGate,
+<br/>letting you access and read them immediately.
+<br/>Available from: Behnaz Nojavanasghari
+<br/>Retrieved on: 13 October 2016
+</td><td></td><td></td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of
+<br/>Face-based Recognition on a Transgender Dataset
+<br/><b>Institute of Interdisciplinary Studies in Identity Sciences (IISIS</b><br/><b>University of North Carolina Wilmington</b></td><td>('1805620', 'Gayathri Mahalingam', 'gayathri mahalingam')<br/>('3275890', 'Karl Ricanek', 'karl ricanek')</td><td>{mahalingamg, ricanekk}@uncw.edu
+</td></tr><tr><td>13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a</td><td>A Hierarchical Deep Temporal Model for
+<br/>Group Activity Recognition
+<br/>by
+<br/><b>B. Tech., Indian Institute of Technology Jodhpur</b><br/>Thesis Submitted in Partial Fulfillment of the
+<br/>Requirements for the Degree of
+<br/>Master of Science
+<br/>in the
+<br/>School of Computing Science
+<br/>Faculty of Applied Sciences
+<br/><b>SIMON FRASER UNIVERSITY</b><br/>Spring 2016
+<br/>However, in accordance with the Copyright Act of Canada, this work may be
+<br/>reproduced without authorization under the conditions for “Fair Dealing.”
+<br/>Therefore, limited reproduction of this work for the purposes of private study,
+<br/>research, education, satire, parody, criticism, review and news reporting is likely
+<br/>All rights reserved.
+<br/>to be in accordance with the law, particularly if cited appropriately.
+</td><td>('2716937', 'Srikanth Muralidharan', 'srikanth muralidharan')<br/>('2716937', 'Srikanth Muralidharan', 'srikanth muralidharan')</td><td></td></tr><tr><td>1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca</td><td>Dataset Issues in Object Recognition
+<br/>J. Ponce1,2, T.L. Berg3, M. Everingham4, D.A. Forsyth1, M. Hebert5,
+<br/>S. Lazebnik1, M. Marszalek6, C. Schmid6, B.C. Russell7, A. Torralba7,
+<br/>C.K.I. Williams8, J. Zhang6, and A. Zisserman4
+<br/><b>University of Illinois at Urbana-Champaign, USA</b><br/>2 Ecole Normale Sup´erieure, Paris, France
+<br/><b>University of California at Berkeley, USA</b><br/><b>Oxford University, UK</b><br/><b>Carnegie Mellon University, Pittsburgh, USA</b><br/>6 INRIA Rhˆone-Alpes, Grenoble, France
+<br/>7 MIT, Cambridge, USA
+<br/><b>University of Edinburgh, Edinburgh, UK</b></td><td></td><td></td></tr><tr><td>133da0d8c7719a219537f4a11c915bf74c320da7</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 123 – No.4, August 2015
+<br/>A Novel Method for 3D Image Segmentation with Fusion
+<br/>of Two Images using Color K-means Algorithm
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>two
+</td><td></td><td></td></tr><tr><td>13c250fb740cb5616aeb474869db6ab11560e2a6</td><td>LEARNING LANGUAGE–VISION CORRESPONDENCES
+<br/>by
+<br/>A thesis submitted in conformity with the requirements
+<br/>for the degree of Doctor of Philosophy
+<br/>Graduate Department of Computer Science
+<br/><b>University of Toronto</b></td><td>('38986168', 'Michael Jamieson', 'michael jamieson')<br/>('38986168', 'Michael Jamieson', 'michael jamieson')</td><td></td></tr><tr><td>13940d0cc90dbf854a58f92d533ce7053aac024a</td><td><b>Boston University</b><br/>OpenBU
+<br/>Theses & Dissertations
+<br/>http://open.bu.edu
+<br/><b>Boston University Theses and Dissertations</b><br/>2015
+<br/>Local learning by partitioning
+<br/>http://hdl.handle.net/2144/15204
+<br/><b>Boston University</b></td><td>('2870611', 'Wang', 'wang')<br/>('17099457', 'Joseph', 'joseph')</td><td></td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>Effective Unconstrained Face Recognition by
+<br/>Combining Multiple Descriptors and Learned
+<br/>Background Statistics
+</td><td>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('2188620', 'Yaniv Taigman', 'yaniv taigman')</td><td></td></tr><tr><td>131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1</td><td>Elastic Graph Matching versus Linear Subspace
+<br/>Methods for Frontal Face Verification
+<br/>Dept. of Informatics
+<br/><b>Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece</b><br/>Tel: +30-2310-996361, Fax: +30-2310-998453
+</td><td>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1737071', 'Anastasios Tefas', 'anastasios tefas')<br/>('1698588', 'Ioannis Pitas', 'ioannis pitas')</td><td>E-mail: pitas@zeus.csd.auth.gr
+</td></tr><tr><td>13841d54c55bd74964d877b4b517fa94650d9b65</td><td>Generalised Ambient Reflection Models for Lambertian and
+<br/>Phong Surfaces
+<br/>Author
+<br/>Zhang, Paul, Gao, Yongsheng
+<br/>Published
+<br/>2009
+<br/>Conference Title
+<br/>Proceedings of the 2009 IEEE International Conference on Image Processing (ICIP 2009)
+<br/>DOI
+<br/>https://doi.org/10.1109/ICIP.2009.5413812
+<br/>Copyright Statement
+<br/>© 2009 IEEE. Personal use of this material is permitted. However, permission to reprint/
+<br/>republish this material for advertising or promotional purposes or for creating new collective
+<br/>works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+<br/>this work in other works must be obtained from the IEEE.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/30001
+<br/>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+</td><td></td><td></td></tr><tr><td>1389ba6c3ff34cdf452ede130c738f37dca7e8cb</td><td>A Convolution Tree with Deconvolution Branches: Exploiting Geometric
+<br/>Relationships for Single Shot Keypoint Detection
+<br/>Department of Electrical and Computer Engineering, CFAR and UMIACS
+<br/><b>University of Maryland-College Park, USA</b></td><td>('40080979', 'Amit Kumar', 'amit kumar')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td>akumar14@umiacs.umd.edu, rama@umiacs.umd.edu
+</td></tr><tr><td>131e395c94999c55c53afead65d81be61cd349a4</td><td></td><td></td><td></td></tr><tr><td>1384a83e557b96883a6bffdb8433517ec52d0bea</td><td></td><td></td><td></td></tr><tr><td>13fd0a4d06f30a665fc0f6938cea6572f3b496f7</td><td></td><td></td><td></td></tr><tr><td>132f88626f6760d769c95984212ed0915790b625</td><td>UC Irvine
+<br/>UC Irvine Electronic Theses and Dissertations
+<br/>Title
+<br/>Exploring Entity Resolution for Multimedia Person Identification
+<br/>Permalink
+<br/>https://escholarship.org/uc/item/9t59f756
+<br/>Author
+<br/>Zhang, Liyan
+<br/>Publication Date
+<br/>2014-01-01
+<br/>Peer reviewed|Thesis/dissertation
+<br/>eScholarship.org
+<br/>Powered by the California Digital Library
+<br/><b>University of California</b></td><td></td><td></td></tr><tr><td>13aef395f426ca8bd93640c9c3f848398b189874</td><td>Image Preprocessing and Complete 2DPCA with Feature
+<br/>Extraction for Gender Recognition
+<br/>NSF REU 2017: Statistical Learning and Data Mining
+<br/><b>University of North Carolina Wilmington</b></td><td></td><td></td></tr><tr><td>13f6ab2f245b4a871720b95045c41a4204626814</td><td>RESEARCH ARTICLE
+<br/>Cortex commands the performance of
+<br/>skilled movement
+<br/><b>Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United</b><br/>States
+</td><td>('13837962', 'Jian-Zhong Guo', 'jian-zhong guo')<br/>('35466277', 'Austin R Graves', 'austin r graves')<br/>('31262308', 'Wendy W Guo', 'wendy w guo')<br/>('12009815', 'Jihong Zheng', 'jihong zheng')<br/>('3031589', 'Allen Lee', 'allen lee')<br/>('38033405', 'Nuo Li', 'nuo li')<br/>('40634144', 'John J Macklin', 'john j macklin')<br/>('34447371', 'James W Phillips', 'james w phillips')<br/>('1875164', 'Brett D Mensh', 'brett d mensh')<br/>('2424812', 'Kristin Branson', 'kristin branson')<br/>('5832202', 'Adam W Hantman', 'adam w hantman')</td><td></td></tr><tr><td>13be4f13dac6c9a93f969f823c4b8c88f607a8c4</td><td>Families in the Wild (FIW): Large-Scale Kinship Image
+<br/>Database and Benchmarks
+<br/>Dept. of Electrical and Computer Engineering
+<br/><b>Northeastern University</b><br/>Boston, MA, USA
+</td><td>('14802538', 'Joseph P. Robinson', 'joseph p. robinson')<br/>('2025056', 'Ming Shao', 'ming shao')<br/>('1746738', 'Yue Wu', 'yue wu')<br/>('1708679', 'Yun Fu', 'yun fu')</td><td>{jrobins1, mingshao, yuewu, yunfu}@ece.neu.edu
+</td></tr><tr><td>13afc4f8d08f766479577db2083f9632544c7ea6</td><td>Multiple Kernel Learning for
+<br/>Emotion Recognition in the Wild
+<br/>Machine Perception Laboratory
+<br/>UCSD
+<br/>EmotiW Challenge, ICMI, 2013
+<br/>1
+</td><td>('39707211', 'Karan Sikka', 'karan sikka')<br/>('1963167', 'Karmen Dykstra', 'karmen dykstra')<br/>('1924458', 'Suchitra Sathyanarayana', 'suchitra sathyanarayana')<br/>('2724380', 'Gwen Littlewort', 'gwen littlewort')</td><td></td></tr><tr><td>13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd</td><td><b>HOD, St. Joseph College of Information Technology, Songea, Tanzania</b></td><td></td><td></td></tr><tr><td>13d9da779138af990d761ef84556e3e5c1e0eb94</td><td>Int J Comput Vis (2008) 77: 3–24
+<br/>DOI 10.1007/s11263-007-0093-5
+<br/>Learning to Locate Informative Features for Visual Identification
+<br/>Received: 18 August 2005 / Accepted: 11 September 2007 / Published online: 9 November 2007
+<br/>© Springer Science+Business Media, LLC 2007
+</td><td>('3236352', 'Andras Ferencz', 'andras ferencz')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td></td></tr><tr><td>1316296fae6485c1510f00b1b57fb171b9320ac2</td><td>FaceID-GAN: Learning a Symmetry Three-Player GAN
+<br/>for Identity-Preserving Face Synthesis
+<br/><b>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong</b><br/>2SenseTime Research
+<br/><b>Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences</b></td><td>('8035201', 'Yujun Shen', 'yujun shen')<br/>('47571885', 'Ping Luo', 'ping luo')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>{sy116, pluo, xtang}@ie.cuhk.edu.hk, yanjunjie@sensetime.com, xgwang@ee.cuhk.edu.hk
+</td></tr><tr><td>7f57e9939560562727344c1c987416285ef76cda</td><td>Accessorize to a Crime: Real and Stealthy Attacks on
+<br/>State-of-the-Art Face Recognition
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA
+<br/><b>Carnegie Mellon University</b><br/>Pittsburgh, PA, USA
+<br/><b>University of North Carolina</b><br/>Chapel Hill, NC, USA
+</td><td>('36301492', 'Mahmood Sharif', 'mahmood sharif')<br/>('38572260', 'Lujo Bauer', 'lujo bauer')<br/>('38181360', 'Sruti Bhagavatula', 'sruti bhagavatula')<br/>('1746214', 'Michael K. Reiter', 'michael k. reiter')</td><td>mahmoods@cmu.edu
+<br/>lbauer@cmu.edu
+<br/>srutib@cmu.edu
+<br/>reiter@cs.unc.edu
+</td></tr><tr><td>7fc5b6130e9d474dfb49d9612b6aa0297d481c8e</td><td>Dimensionality Reduction on Grassmannian via Riemannian
+<br/>Optimization:
+<br/>A Generalized Perspective
+<br/><b>Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China</b><br/><b>University of Chinese Academy of Sciences, Beijing, 100049, China</b><br/>3Key Laboratory of Optical-Electronics Information Processing
+<br/>November 20, 2017
+</td><td>('1803285', 'Tianci Liu', 'tianci liu')<br/>('2172914', 'Zelin Shi', 'zelin shi')<br/>('2556853', 'Yunpeng Liu', 'yunpeng liu')</td><td></td></tr><tr><td>7f511a6a2b38a26f077a5aec4baf5dffc981d881</td><td>LOW-LATENCY HUMAN ACTION RECOGNITION WITH WEIGHTED MULTI-REGION
+<br/>CONVOLUTIONAL NEURAL NETWORK
+<br/><b>cid:63)University of Science and Technology of China, Hefei, Anhui, China</b><br/>†HERE Technologies, Chicago, Illinois, USA
+</td><td>('49417387', 'Yunfeng Wang', 'yunfeng wang')<br/>('38272296', 'Wengang Zhou', 'wengang zhou')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('49897466', 'Xiaotian Zhu', 'xiaotian zhu')<br/>('7179232', 'Houqiang Li', 'houqiang li')</td><td></td></tr><tr><td>7f21a7441c6ded38008c1fd0b91bdd54425d3f80</td><td>Real Time System for Facial Analysis
+<br/><b>Tampere University of Technology, Finland</b><br/>I.
+<br/>INTRODUCTION
+<br/>Most signal or image processing algorithms should be
+<br/>designed with real-time execution in mind. Most use cases
+<br/>compute on an embedded platform while receiving streaming
+<br/>data as a constant data flow. In machine learning, however, the
+<br/>real time deployment and streaming data processing are less
+<br/>often a design criterion. Instead, the bulk of machine learning is
+<br/>executed offline on the cloud without any real time restrictions.
+<br/>However, the real time use is rapidly becoming more important
+<br/>as deep learning systems are appearing into, for example,
+<br/>autonomous vehicles and working machines.
+<br/>In this work, we describe the functionality of our demo
+<br/>system integrating a number of common real time machine
+<br/>learning systems together. The demo system consists of a
+<br/>screen, webcam and a computer, and it estimates the age,
+<br/>gender and facial expression of all faces seen by the webcam.
+<br/>A picture of the system in use is shown in Figure 1. There is
+<br/>also a Youtube video at https://youtu.be/Kfe5hKNwrCU and
+<br/>the code is freely available at https://github.com/mahehu/TUT-
+<br/>live-age-estimator.
+<br/>Apart from serving as an illustrative example of modern
+<br/>human level machine learning for the general public, the
+<br/>system also highlights several aspects that are common in real
+<br/>time machine learning systems. First, the subtasks needed to
+<br/>achieve the three recognition results represent a wide variety of
+<br/>machine learning problems: (1) object detection is used to find
+<br/>the faces, (2) age estimation represents a regression problem
+<br/>with a real-valued target output (3) gender prediction is a
+<br/>binary classification problem, and (4) facial expression
+<br/>prediction is a multi-class classification problem. Moreover, all
+<br/>these tasks should operate in unison, such that each task will
+<br/>receive enough resources from a limited pool.
+<br/>In the remainder of this paper, we first describe the system
+<br/>level multithreaded architecture for real time processing in
+<br/>Section II. This is followed by detailed discussion individual
+<br/>components of the system in Section III. Next, we report
+<br/>experimental results on the accuracy of each individual
+<br/>recognition component in Section IV, and finally, discuss the
+<br/>benefits of demonstrating the potential of modern machine
+<br/>learning to both general public and experts in the field.
+<br/>II. SYSTEM LEVEL FUNCTIONALITY
+<br/>The challenge in real-time operation is that there are
+<br/>numerous components in the system, and each uses different
+<br/>amount of execution time. The system should be designed
+<br/>such that the operation appears smooth, which means that the
+<br/>most visible tasks should be fast and have the priority in
+<br/>scheduling.
+<br/>Figure 1. Demo system recognizes the age, gender and facial
+<br/>expression in real time.
+<br/>The system is running in threads, as illustrated in Figure 2.
+<br/>The whole system is controlled by the upper level controller
+<br/>and visualization thread, which owns and starts the sub-
+<br/>threads dedicated for individual tasks. The main thread holds
+<br/>all data and executes the visualization loop showing the
+<br/>recognition results to the user at 25 frames per second.
+<br/>The recognition process starts from the grabber thread,
+<br/>which is connected to a webcam. The thread requests video
+<br/>frames from camera for feeding them into a FIFO buffer
+<br/>located inside the controller thread. At grab time, each frame is
+<br/>wrapped inside a class object, which holds the necessary meta
+<br/>data related to each frame. More specifically, each frame is
+<br/>linked with a timestamp and a flag indicating whether the face
+<br/>detection has already been executed and
+<br/>locations
+<br/>(bounding boxes) of all found faces in the scene.
+<br/>the
+<br/>The actual face analysis consists of two parts: face
+<br/>detection and face analysis. The detection is executed in the
+<br/>detection thread, which operates asynchronously, requesting
+<br/>new non-processed frames from the controller thread. After
+<br/>face detection, the locations of found faces are sent to the
+<br/>controller thread, which then matches each new face with all
+<br/>face objects from the previous frames using straightforward
+<br/>centroid tracking. Tracking allows us to average the estimates
+<br/>for each face over a number of recent frames.
+<br/>The detection thread operates on the average faster than the
+<br/>frame rate, but sometimes there are delays due to high load on
+<br/>the other threads. Therefore, the controller thread holds a
+<br/>buffer of the most recent frames, in order to increase the
+<br/>flexibility of processing time.
+<br/>The recognition thread is responsible for assessing the age,
+<br/>gender and facial expression of each face crop found from the
+<br/>image. The thread operates also in an asynchronous mode,
+<br/>requesting new non-processed (but face-detected) frames from
+</td><td>('51232696', 'Janne Tommola', 'janne tommola')<br/>('51149972', 'Pedram Ghazi', 'pedram ghazi')<br/>('51131997', 'Bishwo Adhikari', 'bishwo adhikari')<br/>('1847889', 'Heikki Huttunen', 'heikki huttunen')</td><td></td></tr><tr><td>7fce5769a7d9c69248178989a99d1231daa4fce9</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 7, No. 5, 2016
+<br/>Towards Face Recognition Using Eigenface
+<br/>Department of Computer Engineering
+<br/><b>King Faisal University</b><br/>Hofuf, Al-Ahsa 31982, Saudi Arabia
+</td><td>('39604645', 'Md. Al-Amin Bhuiyan', 'md. al-amin bhuiyan')</td><td></td></tr><tr><td>7fa2605676c589a7d1a90d759f8d7832940118b5</td><td>A New Approach to Clothing Classification using Mid-Level Layers
+<br/>Department of Electrical and Computer Engineering
+<br/><b>Clemson University, Clemson, SC</b></td><td>('2181472', 'Bryan Willimon', 'bryan willimon')</td><td>{rwillim,iwalker,stb}@clemson.edu
+</td></tr><tr><td>7ff42ee09c9b1a508080837a3dc2ea780a1a839b</td><td>Data Fusion for Real-time Multimodal Emotion Recognition through Webcams
+<br/>and Microphones in E-Learning
+<br/><b>Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of</b><br/><b>Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg</b><br/>177, 6419 AT Heerlen, The Netherlands
+</td><td>('2565070', 'Kiavash Bahreini', 'kiavash bahreini')<br/>('1717772', 'Rob Nadolski', 'rob nadolski')<br/>('3235367', 'Wim Westera', 'wim westera')</td><td>{kiavash.bahreini, rob.nadolski, wim.westera}@ou.nl
+</td></tr><tr><td>7fb5006b6522436ece5bedf509e79bdb7b79c9a7</td><td>Multi-Task Convolutional Neural Network for Face Recognition
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing MI</b></td><td>('2399004', 'Xi Yin', 'xi yin')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td>{yinxi1,liuxm}@msu.edu
+</td></tr><tr><td>7f533bd8f32525e2934a66a5b57d9143d7a89ee1</td><td>Audio-Visual Identity Grounding for Enabling Cross Media Search
+<br/>Paper ID 22
+</td><td>('1950685', 'Kevin Brady', 'kevin brady')</td><td></td></tr><tr><td>7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5</td><td>Int J Comput Vis (2016) 119:60–75
+<br/>DOI 10.1007/s11263-015-0839-4
+<br/>Sparse Output Coding for Scalable Visual Recognition
+<br/>Received: 15 May 2013 / Accepted: 16 June 2015 / Published online: 26 June 2015
+<br/>© Springer Science+Business Media New York 2015
+</td><td>('1729034', 'Bin Zhao', 'bin zhao')</td><td></td></tr><tr><td>7f4bc8883c3b9872408cc391bcd294017848d0cf</td><td>
+<br/>
+<br/>Computer
+<br/>Sciences
+<br/>Department
+<br/>The Multimodal Focused Attribute Model: A Nonparametric
+<br/>Bayesian Approach to Simultaneous Object Classification and
+<br/>Attribute Discovery
+<br/>Technical Report #1697
+<br/>January 2012
+<br/>
+</td><td>('6256616', 'Jake Rosin', 'jake rosin')<br/>('1724754', 'Charles R. Dyer', 'charles r. dyer')<br/>('1832364', 'Xiaojin Zhu', 'xiaojin zhu')</td><td></td></tr><tr><td>7f6061c83dc36633911e4d726a497cdc1f31e58a</td><td>YouTube-8M: A Large-Scale Video Classification
+<br/>Benchmark
+<br/>Paul Natsev
+<br/>Google Research
+</td><td>('2461984', 'Sami Abu-El-Haija', 'sami abu-el-haija')<br/>('1805076', 'George Toderici', 'george toderici')<br/>('32575647', 'Nisarg Kothari', 'nisarg kothari')<br/>('2119006', 'Joonseok Lee', 'joonseok lee')<br/>('2758088', 'Balakrishnan Varadarajan', 'balakrishnan varadarajan')<br/>('2259154', 'Sudheendra Vijayanarasimhan', 'sudheendra vijayanarasimhan')</td><td>haija@google.com
+<br/>gtoderici@google.com
+<br/>ndk@google.com
+<br/>joonseok@google.com
+<br/>natsev@google.com
+<br/>balakrishnanv@google.com
+<br/>svnaras@google.com
+</td></tr><tr><td>7fa3d4be12e692a47b991c0b3d3eba3a31de4d05</td><td>Efficient Online Spatio-Temporal Filtering
+<br/>for Video Event Detection
+<br/>1 Department of Computer Science and Engineering,
+<br/><b>Shanghai Jiao Tong University, Shanghai 200240, China</b><br/>2 School of Electrical and Electronic Engineering,
+<br/><b>Nanyang Technological University, Singapore 639798, Singapore</b><br/>3 Computer Science and Engineering Division,
+<br/><b>University of Michigan</b><br/>Ann Arbor, MI 48105, USA
+</td><td>('3084614', 'Xinchen Yan', 'xinchen yan')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')<br/>('2574445', 'Hui Liang', 'hui liang')</td><td>skywalkeryxc@gmail.com
+<br/>jsyuan@ntu.edu.sg, hliang1@e.ntu.edu.sg
+</td></tr><tr><td>7f445191fa0475ff0113577d95502a96dc702ef9</td><td>Towards an Unequivocal Representation of Actions
+<br/><b>University of Bristol</b><br/><b>University of Bristol</b><br/><b>University of Bristol</b></td><td>('2052236', 'Michael Wray', 'michael wray')<br/>('3420479', 'Davide Moltisanti', 'davide moltisanti')<br/>('1728459', 'Dima Damen', 'dima damen')</td><td>firstname.surname@bristol.ac.uk
+</td></tr><tr><td>7f82f8a416170e259b217186c9e38a9b05cb3eb4</td><td>Multi-Attribute Robust Component Analysis for Facial UV Maps
+<br/><b>Imperial College London, London, UK</b><br/><b>Middlesex University London, London, UK</b><br/><b>Goldsmiths, University of London, London, UK</b></td><td>('24278037', 'Stylianos Moschoglou', 'stylianos moschoglou')<br/>('31243357', 'Evangelos Ververas', 'evangelos ververas')<br/>('1780393', 'Yannis Panagakis', 'yannis panagakis')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')</td><td>{s.moschoglou, e.ververas16, i.panagakis, s.zafeiriou}@imperial.ac.uk, m.nicolaou@gold.ac.uk
+</td></tr><tr><td>7f36dd9ead29649ed389306790faf3b390dc0aa2</td><td>MOVEMENT DIFFERENCES BETWEEN DELIBERATE
+<br/>AND SPONTANEOUS FACIAL EXPRESSIONS:
+<br/>ZYGOMATICUS MAJOR ACTION IN SMILING
+</td><td>('2059653', 'Zara Ambadar', 'zara ambadar')</td><td></td></tr><tr><td>7f6cd03e3b7b63fca7170e317b3bb072ec9889e0</td><td>A Face Recognition Signature Combining Patch-based
+<br/>Features with Soft Facial Attributes
+<br/>L. Zhang, P. Dou, I.A. Kakadiaris
+<br/>Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204
+</td><td></td><td></td></tr><tr><td>7fab17ef7e25626643f1d55257a3e13348e435bd</td><td>Age Progression/Regression by Conditional Adversarial Autoencoder
+<br/><b>The University of Tennessee, Knoxville, TN, USA</b></td><td>('1786391', 'Zhifei Zhang', 'zhifei zhang')<br/>('46970616', 'Yang Song', 'yang song')<br/>('1698645', 'Hairong Qi', 'hairong qi')</td><td>{zzhang61, ysong18, hqi}@utk.edu
+</td></tr><tr><td>7f6599e674a33ed64549cd512ad75bdbd28c7f6c</td><td>Kernel Alignment Inspired
+<br/>Linear Discriminant Analysis
+<br/>Department of Computer Science and Engineering,
+<br/><b>University of Texas at Arlington, TX, USA</b></td><td>('1747268', 'Shuai Zheng', 'shuai zheng')</td><td>zhengs123@gmail.com, chqding@uta.edu
+</td></tr><tr><td>7f9260c00a86a0d53df14469f1fa10e318ee2a3c</td><td>HOW IRIS RECOGNITION WORKS
+<br/><b>University of Cambridge, The Computer Laboratory, Cambridge CB3 0FD, U.K</b></td><td>('1781325', 'John Daugman', 'john daugman')</td><td></td></tr><tr><td>7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae</td><td>Incremental Classifier Learning with Generative Adversarial Networks
+<br/><b>Northeastern University 2Microsoft Research 3City University of New York</b></td><td>('1746738', 'Yue Wu', 'yue wu')<br/>('1691128', 'Zicheng Liu', 'zicheng liu')</td><td>{yuewu,yunfu}@ece.neu.edu, yye@gradcenter.cuny.edu
+<br/>{yiche,lijuanw,zliu,yandong.guo,zhang}@microsoft.com
+</td></tr><tr><td>7f2a4cd506fe84dee26c0fb41848cb219305173f</td><td>International Journal of Hybrid Information Technology
+<br/>Vol.8, No.2 (2015), pp.109-120
+<br/>http://dx.doi.org/10.14257/ijhit.2015.8.2.10
+<br/>Face Detection and Pose Estimation Based on Evaluating Facial
+<br/>Feature Selection
+<br/><b>School of Information Science and Engineering, Central South University, Changsha</b><br/>410083, China
+<br/><b>Huazhong University of</b><br/>Science and Technology, Wuhan, China
+<br/><b>Collage of Sciences, Baghdad University, Iraq</b></td><td>('2759156', 'Hiyam Hatem', 'hiyam hatem')<br/>('2742321', 'Mohammed Lutf', 'mohammed lutf')<br/>('2462860', 'Jumana Waleed', 'jumana waleed')</td><td>hiamhatim2005@yahoo.com, bjzou@vip.163.com, aed.m.muttasher@gmail.com,
+<br/>jumana_waleed@yahoo.com, mohammed.lutf@gmail.com1
+</td></tr><tr><td>7fd700f4a010d765c506841de9884df394c1de1c</td><td>Correlational Spectral Clustering
+<br/><b>Max Planck Institute for Biological Cybernetics</b><br/>72076 T¨ubingen, Germany
+</td><td>('1758219', 'Matthew B. Blaschko', 'matthew b. blaschko')<br/>('1787591', 'Christoph H. Lampert', 'christoph h. lampert')</td><td>{blaschko,chl}@tuebingen.mpg.de
+</td></tr><tr><td>7f59657c883f77dc26393c2f9ed3d19bdf51137b</td><td><b>University of Wollongong</b><br/>Research Online
+<br/>Faculty of Informatics - Papers (Archive)
+<br/>Faculty of Engineering and Information Sciences
+<br/>2006
+<br/>Facial expression recognition for multiplayer online
+<br/>games
+<br/>Publication Details
+<br/>Zhan, C., Li, W., Ogunbona, P. O. & Safaei, F. (2006). Facial expression recognition for multiplayer online games. Joint International
+<br/><b>Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university</b><br/>Research Online is the open access institutional repository for the
+<br/><b>University of Wollongong. For further information contact the UOW</b></td><td>('3283367', 'Ce Zhan', 'ce zhan')<br/>('1685696', 'Wanqing Li', 'wanqing li')<br/>('1719314', 'Philip O. Ogunbona', 'philip o. ogunbona')<br/>('1803733', 'Farzad Safaei', 'farzad safaei')</td><td>University of Wollongong, czhan@uow.edu.au
+<br/>University of Wollongong, wanqing@uow.edu.au
+<br/>University of Wollongong, philipo@uow.edu.au
+<br/>University of Wollongong, farzad@uow.edu.au
+<br/>Library: research-pubs@uow.edu.au
+</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>Improving Person Re-identification by Attribute and Identity Learning
+<br/><b>University of Technology Sydney</b></td><td>('9919679', 'Yutian Lin', 'yutian lin')<br/>('14904242', 'Liang Zheng', 'liang zheng')<br/>('7435343', 'Zhedong Zheng', 'zhedong zheng')<br/>('1887625', 'Yu Wu', 'yu wu')<br/>('1698559', 'Yi Yang', 'yi yang')</td><td>yutianlin477,liangzheng06,zdzheng12,wu08yu,yee.i.yang@gmail.com
+</td></tr><tr><td>7f268f29d2c8f58cea4946536f5e2325777fa8fa</td><td>Facial Emotion Recognition in Curvelet Domain
+<br/><b>Indian Institute of Informaiton Technology, Allahabad, India</b><br/>Allahabad, India - 211012
+</td><td>('35077572', 'Gyanendra K Verma', 'gyanendra k verma')<br/>('30102998', 'Bhupesh Kumar Singh', 'bhupesh kumar singh')</td><td>gyanendra@iiita.ac.in , rs65@iiita.ac.in
+</td></tr><tr><td>7fc3442c8b4c96300ad3e860ee0310edb086de94</td><td>Similarity Scores based on Background Samples
+<br/><b>The School of Computer Science, Tel-Aviv University, Israel</b><br/><b>Computer Science Division, The Open University of Israel, Israel</b><br/>3 face.com
+</td><td>('1776343', 'Lior Wolf', 'lior wolf')<br/>('1756099', 'Tal Hassner', 'tal hassner')<br/>('2188620', 'Yaniv Taigman', 'yaniv taigman')</td><td></td></tr><tr><td>7f3a73babe733520112c0199ff8d26ddfc7038a0</td><td></td><td></td><td></td></tr><tr><td>7f8d44e7fd2605d580683e47bb185de7f9ea9e28</td><td>Predicting Personal Traits from Facial Images using Convolutional Neural
+<br/>Networks Augmented with Facial Landmark Information
+<br/><b>The Hebrew University of Jerusalem, Israel</b><br/>2Microsoft Research, Cambridge, United Kingdom
+<br/><b>Machine Intelligence Lab (MIL), Cambridge University</b></td><td>('2291654', 'Yoad Lewenberg', 'yoad lewenberg')<br/>('1698412', 'Yoram Bachrach', 'yoram bachrach')<br/>('1808862', 'Sukrit Shankar', 'sukrit shankar')<br/>('1716777', 'Antonio Criminisi', 'antonio criminisi')</td><td>yoadlew@cs.huji.ac.il
+<br/>yobach@microsoft.com
+<br/>ss965@cam.ac.uk
+<br/>antcrim@microsoft.com
+</td></tr><tr><td>7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a</td><td>Proc. of IEEE International
+<br/>Symposium on Computational
+<br/>Intelligence in Robotics and
+<br/>Automation (CIRA), July.16-20,
+<br/>2003, Kobe Japan, pp. 954-959
+<br/>Multi-Subregion Based Probabilistic Approach Toward
+<br/>Pose-Invariant Face Recognition
+<br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/>2SANYO Electric Co., Ltd., Osaka, Japan 573-8534
+</td><td>('1733113', 'Takeo Kanade', 'takeo kanade')<br/>('3151943', 'Akihiko Yamada', 'akihiko yamada')</td><td>E-mail: tk@ri.cmu.edu, aki-yamada@rd.sanyo.co.jp,
+</td></tr><tr><td>7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2</td><td>Robust FEC-CNN: A High Accuracy Facial Landmark Detection System
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS),
+<br/><b>Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b><br/>3 CAS Center for Excellence in Brain Science and Intelligence Technology
+</td><td>('3469114', 'Zhenliang He', 'zhenliang he')<br/>('1698586', 'Jie Zhang', 'jie zhang')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{zhenliang.he,jie.zhang,meina.kan,shiguang.shan,xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>7f205b9fca7e66ac80758c4d6caabe148deb8581</td><td>Page 1 of 47
+<br/>Computing Surveys
+<br/>A Survey on Mobile Social Signal Processing
+<br/>Understanding human behaviour in an automatic but non-intrusive manner is an important area for various applications. This requires the
+<br/>collaboration of information technology with human sciences to transfer existing knowledge of human behaviour into self-acting tools. These
+<br/>tools will reduce human error that is introduced by current obtrusive methods such as questionnaires. To achieve unobtrusiveness, we focus on
+<br/>exploiting the pervasive and ubiquitous character of mobile devices.
+<br/>In this article, a survey of existing techniques for extracting social behaviour through mobile devices is provided. Initially we expose the
+<br/>terminology used in the area and introduce a concrete architecture for social signal processing applications on mobile phones, constituted by
+<br/>sensing, social interaction detection, behavioural cues extraction, social signal inference and social behaviour understanding. Furthermore, we
+<br/>present state-of-the-art techniques applied to each stage of the process. Finally, potential applications are shown while arguing about the main
+<br/>challenges of the area.
+<br/>Categories and Subject Descriptors: General and reference [Document Types]: Surveys and Overviews; Human-centered computing [Collab-
+<br/>orative and social computing, Ubiquitous and mobile computing]
+<br/>General Terms: Design, Theory, Human Factors, Performance
+<br/>Additional Key Words and Phrases: Social Signal Processing, mobile phones, social behaviour
+<br/>ACM Reference Format:
+<br/>Processing. ACM V, N, Article A (January YYYY), 35 pages.
+<br/>DOI:http://dx.doi.org/10.1145/0000000.0000000
+<br/>1. INTRODUCTION
+<br/>Human behaviour understanding has received a great deal of interest since the beginning of the previous century.
+<br/>People initially conducted research on the way animals behave when they are surrounded by creatures of the same
+<br/>species. Acquiring basic underlying knowledge of animal relations led to extending this information to humans
+<br/>in order to understand social behaviour, social relations etc. Initial experiments were conducted by empirically
+<br/>observing people and retrieving feedback from them. These methods gave rise to well-established psychological
+<br/>approaches for understanding human behaviour, such as surveys, questionnaires, camera recordings and human
+<br/>observers. Nevertheless, these methods introduce several limitations including various sources of error. Complet-
+<br/>ing surveys and questionnaires induces partiality, unconcern etc. [Groves 2004], human error [Reason 1990], and
+<br/>additional restrictions in scalability of the experiments. Accumulating these research problems leads to a common
+<br/>challenge, the lack of automation in an unobtrusive manner.
+<br/>An area that has focussed on detecting social behaviour automatically and has received a great amount of at-
+<br/>tention is Social Signal Processing (SSP). The main target of the field is to model, analyse and synthesise human
+<br/>behaviour with limited user intervention. To achieve these targets, researchers presented three key terms which
+</td><td>('23537960', 'NIKLAS PALAGHIAS', 'niklas palaghias')<br/>('3339833', 'SEYED AMIR HOSEINITABATABAEI', 'seyed amir hoseinitabatabaei')<br/>('2082222', 'MICHELE NATI', 'michele nati')<br/>('1929850', 'ALEXANDER GLUHAK', 'alexander gluhak')<br/>('1693389', 'KLAUS MOESSNER', 'klaus moessner')<br/>('23537960', 'NIKLAS PALAGHIAS', 'niklas palaghias')<br/>('3339833', 'SEYED AMIR HOSEINITABATABAEI', 'seyed amir hoseinitabatabaei')<br/>('2082222', 'MICHELE NATI', 'michele nati')<br/>('1929850', 'ALEXANDER GLUHAK', 'alexander gluhak')<br/>('1693389', 'KLAUS MOESSNER', 'klaus moessner')</td><td></td></tr><tr><td>7fc76446d2b11fc0479df6e285723ceb4244d4ef</td><td>JRPIT 42.1.QXP:Layout 1 12/03/10 2:11 PM Page 3
+<br/>Laplacian MinMax Discriminant Projection and its
+<br/>Applications
+<br/><b>Zhejiang Normal University, Jinhua, China</b><br/>Jie Yang
+<br/><b>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China</b><br/>A new algorithm, Laplacian MinMax Discriminant Projection (LMMDP), is proposed in this paper
+<br/>for supervised dimensionality reduction. LMMDP aims at learning a discriminant linear
+<br/>transformation. Specifically, we define the within-class scatter and the between-class scatter using
+<br/>similarities which are based on pairwise distances in sample space. After the transformation, the
+<br/>considered pairwise samples within the same class are as close as possible, while those between
+<br/>classes are as far as possible. The structural information of classes is contained in the within-class
+<br/>and the between-class Laplacian matrices. Therefore, the discriminant projection subspace can be
+<br/>derived by controlling the structural evolution of Laplacian matrices. The performance on several
+<br/>data sets demonstrates the competence of the proposed algorithm.
+<br/>ACM Classification: I.5
+<br/>Keywords: Manifold Learning; Dimensionality Reduction; Supervised Learning; Discriminant
+<br/>Analysis
+<br/>1. INTRODUCTION
+<br/>Dimensionality reduction has attracted tremendous attention in the pattern recognition community
+<br/>over the past few decades and many new algorithms have been developed. Among these algorithms,
+<br/>linear dimensionality reduction is widely spread for its simplicity and effectiveness. Principal
+<br/>component analysis (PCA), as a classic linear method for unsupervised dimensionality reduction,
+<br/>aims at learning a kind of subspaces where the maximum covariance of all training samples are
+<br/>preserved (Turk,1991). Locality Preserving Projections, as another typical approach for
+<br/>unsupervised dimensionality reduction, seeks projections to preserve the local structure of the
+<br/>sample space (He, 2005). However, unsupervised learning algorithms cannot properly model the
+<br/>underlying structures and characteristics of different classes (Zhao, 2007). Discriminant features are
+<br/>often obtained by supervised dimensionality reduction. Linear discriminant analysis (LDA) is one
+<br/>of the most popular supervised techniques for classification (Fukunaga, 1990; Belhumeur, 1997).
+<br/>LDA aims at learning discriminant subspace where the within-class scatter is minimized and the
+<br/>between-class scatter of samples is maximized at the same time. Many improved LDAs up to date
+<br/>have demonstrated competitive performance in object classification (Howland, 2004; Liu, 2007;
+<br/>Martinez, 2006; Wang and Tang, 2004a; Yang, 2005).
+<br/>Copyright© 2010, Australian Computer Society Inc. General permission to republish, but not for profit, all or part of this
+<br/>material is granted, provided that the JRPIT copyright notice is given and that reference is made to the publication, to its
+<br/>date of issue, and to the fact that reprinting privileges were granted by permission of the Australian Computer Society Inc.
+<br/>Manuscript received: 15 April 2008
+<br/>Communicating Editor: Tele Tan
+</td><td>('3185576', 'Zhonglong Zheng', 'zhonglong zheng')<br/>('3140483', 'Xueping Chang', 'xueping chang')</td><td>Email: zhonglong@sjtu.org
+</td></tr><tr><td>7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098</td><td></td><td></td><td></td></tr><tr><td>7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b</td><td>eaig i C e Vii ad Beyd:
+<br/>Deve
+<br/>h . Weg
+<br/>Deae f C e Sciece
+<br/>ichiga Sae Uiveiy
+<br/>Ea aig  48824
+<br/>Abac
+<br/>Thi chae id ce wha i ca
+<br/>aic
+<br/>ve
+<br/>by h a cgiive deve
+<br/>ih i deeied befe he \bih" f he ye. Afe he \bih" i eab
+<br/>
+<br/>aach i  ea
+<br/>deve
+<br/>way aia
+<br/>  ea whi
+<br/>de deve
+</td><td></td><td>weg@c. .ed
+</td></tr><tr><td>7a81967598c2c0b3b3771c1af943efb1defd4482</td><td>Do We Need More Training Data?
+</td><td>('32542103', 'Xiangxin Zhu', 'xiangxin zhu')</td><td></td></tr><tr><td>7ae0212d6bf8a067b468f2a78054c64ea6a577ce</td><td>Human Face Processing Techniques
+<br/>With Application To
+<br/>Large Scale Video Indexing
+<br/>DOCTOR OF
+<br/>PHILOSOPHY
+<br/>Department of Informatics,
+<br/>School of Multidisciplinary Sciences,
+<br/><b>The Graduate University for Advanced Studies (SOKENDAI</b><br/>2006 (School Year)
+<br/>September 2006
+</td><td></td><td></td></tr><tr><td>7a9c317734acaf4b9bd8e07dd99221c457b94171</td><td>Lorentzian Discriminant Projection and Its Applications
+<br/><b>Dalian University of Technology, Dalian 116024, China</b><br/>2 Microsoft Research Asia, Beijing 100080, China
+</td><td>('34469457', 'Risheng Liu', 'risheng liu')<br/>('4642456', 'Zhixun Su', 'zhixun su')<br/>('33383055', 'Zhouchen Lin', 'zhouchen lin')<br/>('40290490', 'Xiaoyu Hou', 'xiaoyu hou')</td><td>zxsu@dlut.edu.cn
+</td></tr><tr><td>7a0fb972e524cb9115cae655e24f2ae0cfe448e0</td><td>Facial Expression Classification Using RBF AND Back-Propagation Neural Networks
+<br/>R.Q.Feitosa1,2,
+<br/>M.M.B.Vellasco1,2,
+<br/>D.T.Oliveira1,
+<br/>D.V.Andrade1,
+<br/>S.A.R.S.Maffra1
+<br/><b>Catholic University of Rio de Janeiro, Brazil</b><br/>Department of Electric Engineering
+<br/><b>State University of Rio de Janeiro, Brazil</b><br/>Department of Computer Engineering
+</td><td></td><td>e-mail: [raul, marley]@ele.puc -rio.br, tuler@inf.puc-rio.br, [diogo, sam]@tecgraf.puc-rio.br
+</td></tr><tr><td>7ad77b6e727795a12fdacd1f328f4f904471233f</td><td>Supervised Local Descriptor Learning
+<br/>for Human Action Recognition
+</td><td>('34798935', 'Xiantong Zhen', 'xiantong zhen')<br/>('40255667', 'Feng Zheng', 'feng zheng')<br/>('40799321', 'Ling Shao', 'ling shao')<br/>('1720247', 'Xianbin Cao', 'xianbin cao')<br/>('40147776', 'Dan Xu', 'dan xu')</td><td></td></tr><tr><td>7a3d46f32f680144fd2ba261681b43b86b702b85</td><td>Multi-label Learning Based Deep Transfer Neural Network for Facial Attribute
+<br/>Classification
+<br/><b>School of Information Science and Engineering, Xiamen University, Xiamen 361005, China</b><br/><b>bSchool of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China</b><br/>aFujian Key Laboratory of Sensing and Computing for Smart City,
+<br/><b>cSchool of Computer Science, The University of Adelaide, Adelaide, SA 5005, Australia</b></td><td>('41034942', 'Ni Zhuang', 'ni zhuang')<br/>('40461734', 'Yan Yan', 'yan yan')<br/>('47336404', 'Si Chen', 'si chen')<br/>('37414077', 'Hanzi Wang', 'hanzi wang')<br/>('1780381', 'Chunhua Shen', 'chunhua shen')</td><td></td></tr><tr><td>7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b</td><td></td><td></td><td></td></tr><tr><td>7a7f2403e3cc7207e76475e8f27a501c21320a44</td><td>Emotion Recognition from Multi-Modal Information
+<br/>Department of Computer Science and Information Engineering,
+<br/><b>National Cheng Kung University, Tainan, Taiwan, R.O.C</b></td><td>('1681512', 'Chung-Hsien Wu', 'chung-hsien wu')<br/>('1709777', 'Jen-Chun Lin', 'jen-chun lin')<br/>('1691390', 'Wen-Li Wei', 'wen-li wei')<br/>('2891156', 'Kuan-Chun Cheng', 'kuan-chun cheng')</td><td>E-mail: chunghsienwu@gmail.com, jenchunlin@gmail.com, lilijinjin@gmail.com, davidcheng817@gmail.com
+</td></tr><tr><td>7aafeb9aab48fb2c34bed4b86755ac71e3f00338</td><td>Article
+<br/>Real Time 3D Facial Movement Tracking Using a
+<br/>Monocular Camera
+<br/><b>School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai</b><br/><b>Kumamoto University, 2-39-1 Kurokami, Kumamoto shi</b><br/>Academic Editor: Vittorio M. N. Passaro
+<br/>Received: 9 May 2016; Accepted: 20 July 2016; Published: 25 July 2016
+</td><td>('2576907', 'Yanchao Dong', 'yanchao dong')<br/>('1715838', 'Yanming Wang', 'yanming wang')<br/>('2721582', 'Jiguang Yue', 'jiguang yue')<br/>('3256415', 'Zhencheng Hu', 'zhencheng hu')</td><td>China; 11wanggyanming@tongji.edu.cn (Y.W.); yuejiguang@tongji.edu.cn (J.Y.)
+<br/>Japan; hu@cs.kumamoto-u.ac.jp
+<br/>* Correspondence: dongyanchao@tongji.edu.cn; Tel.: +86-21-6958-3806
+</td></tr><tr><td>7a84368ebb1a20cc0882237a4947efc81c56c0c0</td><td>Robust and Efficient Parametric Face Alignment
+<br/>†Dept. of Computing,
+<br/><b>Imperial College London</b><br/>180 Queen’s Gate
+<br/>London SW7 2AZ, U.K.
+<br/>∗EEMCS
+<br/><b>University of Twente</b><br/>Drienerlolaan 5
+<br/>7522 NB Enschede
+<br/>The Netherlands ∗
+</td><td>('2610880', 'Georgios Tzimiropoulos', 'georgios tzimiropoulos')<br/>('1776444', 'Stefanos Zafeiriou', 'stefanos zafeiriou')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td>{gt204,s.zafeiriou,m.pantic}@imperial.ac.uk
+</td></tr><tr><td>7aa4c16a8e1481629f16167dea313fe9256abb42</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2981
+<br/>ICASSP 2017
+</td><td></td><td></td></tr><tr><td>7a85b3ab0efb6b6fcb034ce13145156ee9d10598</td><td></td><td></td><td></td></tr><tr><td>7ab930146f4b5946ec59459f8473c700bcc89233</td><td></td><td></td><td></td></tr><tr><td>7a65fc9e78eff3ab6062707deaadde024d2fad40</td><td>A Study on Apparent Age Estimation
+<br/><b>West Virginia University, Morgantown WV 26506, USA</b><br/><b>Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of</b><br/>Computing Technology, CAS, Beijing, 100190, China
+</td><td>('1736182', 'Yu Zhu', 'yu zhu')<br/>('1698571', 'Yan Li', 'yan li')<br/>('2501850', 'Guowang Mu', 'guowang mu')<br/>('1822413', 'Guodong Guo', 'guodong guo')</td><td>yzhu4@mix.wvu.edu, yan.li@vipl.ict.ac.cn, guowang.mu@mail.wvu.edu ,
+<br/>Guodong.Guo@mail.wvu.edu (corresponding author)
+</td></tr><tr><td>7ad7897740e701eae455457ea74ac10f8b307bed</td><td>Random Subspace Two-dimensional LDA for Face Recognition*
+</td><td>('29980351', 'Garrett Bingham', 'garrett bingham')</td><td></td></tr><tr><td>7ac9aaafe4d74542832c273acf9d631cb8ea6193</td><td>Deep Micro-Dictionary Learning and Coding Network
+<br/><b>University of Trento, Trento, Italy</b><br/>2Department of Electrical Engineering, Hong Kong Polytechnic Unversity, Hong Kong, China
+<br/>3Lingxi Artificial Interlligence Co., Ltd, Shen Zhen, China
+<br/>4Computer Vision Laboratory, ´Ecole Polytechnique F´ed´erale de Lausanne, Lausanne, Switzerland
+<br/><b>University of Oxford, Oxford, UK</b><br/><b>Texas State University, San Marcos, USA</b></td><td>('46666325', 'Hao Tang', 'hao tang')<br/>('49567679', 'Heng Wei', 'heng wei')<br/>('38505394', 'Wei Xiao', 'wei xiao')<br/>('47824598', 'Wei Wang', 'wei wang')<br/>('40147776', 'Dan Xu', 'dan xu')<br/>('1703601', 'Nicu Sebe', 'nicu sebe')</td><td>{hao.tang, niculae.sebe}@unitn.it, 15102924d@connect.polyu.hk, xiaoweithu@163.com
+<br/>wei.wang@epfl.ch, danxu@robots.ox.ac.uk, y y34@txstate.edu
+</td></tr><tr><td>7a1ce696e260899688cb705f243adf73c679f0d9</td><td>Predicting Missing Demographic Information in
+<br/>Biometric Records using Label Propagation
+<br/>Techniques
+<br/>Department of Computer Science and Engineering
+<br/>Department of Computer Science and Engineering
+<br/><b>Michigan State University</b><br/>East Lansing, Michigan 48824
+<br/><b>Michigan State University</b><br/>East Lansing, Michigan 48824
+</td><td>('3153117', 'Thomas Swearingen', 'thomas swearingen')<br/>('1698707', 'Arun Ross', 'arun ross')</td><td>Email: swearin3@msu.edu
+<br/>Email: rossarun@msu.edu
+</td></tr><tr><td>7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697</td><td>Attend and Rectify: a Gated Attention
+<br/>Mechanism for Fine-Grained Recovery
+<br/>†Computer Vision Center and Universitat Aut`onoma de Barcelona (UAB),
+<br/>Campus UAB, 08193 Bellaterra, Catalonia Spain
+<br/>‡Visual Tagging Services, Parc de Recerca, Campus UAB
+</td><td>('1739551', 'Josep M. Gonfaus', 'josep m. gonfaus')<br/>('7153363', 'Guillem Cucurull', 'guillem cucurull')<br/>('1696387', 'F. Xavier Roca', 'f. xavier roca')</td><td></td></tr><tr><td>7aa062c6c90dba866273f5edd413075b90077b51</td><td>I.J. Information Technology and Computer Science, 2017, 5, 40-51
+<br/>Published Online May 2017 in MECS (http://www.mecs-press.org/)
+<br/>DOI: 10.5815/ijitcs.2017.05.06
+<br/>Minimizing Separability: A Comparative Analysis
+<br/>of Illumination Compensation Techniques in Face
+<br/>Recognition
+<br/><b>Baze University, Abuja, Nigeria</b></td><td>('7392398', 'Chollette C. Olisah', 'chollette c. olisah')</td><td>E-mail: chollette.olisah@bazeuniversity.edu.ng
+</td></tr><tr><td>7a131fafa7058fb75fdca32d0529bc7cb50429bd</td><td>Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and
+<br/>Identity Preserving Frontal View Synthesis
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/>2Center for Research on Intelligent Perception and Computing, CASIA
+<br/><b>University of Chinese Academy of Sciences, Beijing, China</b></td><td>('48241673', 'Rui Huang', 'rui huang')<br/>('50202300', 'Shu Zhang', 'shu zhang')<br/>('50290162', 'Tianyu Li', 'tianyu li')<br/>('1705643', 'Ran He', 'ran he')</td><td>huangrui@cmu.edu, tianyu.lizard@gmail.com, {shu.zhang, rhe}@nlpr.ia.ac.cn
+</td></tr><tr><td>1451e7b11e66c86104f9391b80d9fb422fb11c01</td><td>IET Signal Processing
+<br/>Research Article
+<br/>Image privacy protection with secure JPEG
+<br/>transmorphing
+<br/>ISSN 1751-9675
+<br/>Received on 30th December 2016
+<br/>Revised 13th July 2017
+<br/>Accepted on 11th August 2017
+<br/>doi: 10.1049/iet-spr.2016.0756
+<br/>www.ietdl.org
+<br/>1Multimedia Signal Processing Group, Electrical Engineering Department, EPFL, Station 11, Lausanne, Switzerland
+</td><td>('1681498', 'Touradj Ebrahimi', 'touradj ebrahimi')</td><td> E-mail: lin.yuan@epfl.ch
+</td></tr><tr><td>14761b89152aa1fc280a33ea4d77b723df4e3864</td><td></td><td></td><td></td></tr><tr><td>14b87359f6874ff9b8ee234b18b418e57e75b762</td><td>H. GAO ET AL: FACE ALIGNMENT USING A RANKING MODEL BASED ON RT
+<br/>Face Alignment Using a Ranking Model
+<br/>based on Regression Trees
+<br/>Hazım Kemal Ekenel1,2
+<br/><b>Institute for Anthropomatics</b><br/><b>Karlsruhe Institute of Technology</b><br/>Karlsruhe, Germany
+<br/>2 Faculty of Computer and Informatics
+<br/><b>Istanbul Technical University</b><br/>Istanbul, Turkey
+</td><td>('1697965', 'Hua Gao', 'hua gao')<br/>('1742325', 'Rainer Stiefelhagen', 'rainer stiefelhagen')</td><td>gao@kit.edu
+<br/>ekenel@kit.edu
+<br/>rainer.stiefelhagen@kit.edu
+</td></tr><tr><td>14fdec563788af3202ce71c021dd8b300ae33051</td><td>Social Influence Analysis based on Facial Emotions
+<br/>Department of Computer Science and Engineering
+<br/><b>Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan</b></td><td>('2159044', 'Pankaj Mishra', 'pankaj mishra')<br/>('1679044', 'Takayuki Ito', 'takayuki ito')</td><td>{pankaj.mishra, rafik}@itolab.nitech.ac.jp,
+<br/>ito.takayuki@nitech.ac.jp
+</td></tr><tr><td>142e5b4492bc83b36191be4445ef0b8b770bf4b0</td><td>Discriminative Analysis of Brain Function
+<br/>at Resting-State for Attention-Deficit/Hyperactivity
+<br/>Disorder
+<br/>Y.F. Wang2, and T. Z. Jiang1
+<br/><b>National Laboratory of Pattern Recognition, Institute of Automation</b><br/>Chinese Academy of Sciences, P.R. China
+<br/><b>Institute of Mental Health, Peking University, P.R. China</b></td><td>('2339602', 'M. Liang', 'm. liang')</td><td>czzhu@nlpr.ia.ac.cn
+</td></tr><tr><td>14b016c7a87d142f4b9a0e6dc470dcfc073af517</td><td>Modest proposals for improving biometric recognition papers
+<br/>NIST, Gaithersburg MD
+<br/><b>San Jose State University, San Jose, CA</b></td><td>('2145366', 'James R. Matey', 'james r. matey')<br/>('34958610', 'George W. Quinn', 'george w. quinn')<br/>('2136478', 'Patrick Grother', 'patrick grother')<br/>('2326261', 'Elham Tabassi', 'elham tabassi')<br/>('1707135', 'James L. Wayman', 'james l. wayman')</td><td>POC: james.matey@NIST.gov
+<br/>jlwayman@aol.com
+</td></tr><tr><td>14b66748d7c8f3752dca23991254fca81b6ee86c</td><td>A. RICHARD, J. GALL: A BOW-EQUIVALENT NEURAL NETWORK
+<br/>A BoW-equivalent Recurrent Neural Network
+<br/>for Action Recognition
+<br/><b>Institute of Computer Science III</b><br/><b>University of Bonn</b><br/>Bonn, Germany
+</td><td>('32774629', 'Alexander Richard', 'alexander richard')<br/>('2946643', 'Juergen Gall', 'juergen gall')</td><td>richard@iai.uni-bonn.de
+<br/>gall@iai.uni-bonn.de
+</td></tr><tr><td>14e8dbc0db89ef722c3c198ae19bde58138e88bf</td><td>HapFACS: an Open Source API/Software to
+<br/>Generate FACS-Based Expressions for ECAs
+<br/>Animation and for Corpus Generation
+<br/>Christine Lisetti
+<br/>School of Computing and Information Sciences
+<br/>School of Computing and Information Sciences
+<br/><b>Florida International University</b><br/>Miami, Florida, USA
+<br/><b>Florida International University</b><br/>Miami, Florida, USA
+</td><td>('1809087', 'Reza Amini', 'reza amini')</td><td>Email: ramin001@fiu.edu
+<br/>Email: lisetti@cis.fiu.edu
+</td></tr><tr><td>14fa27234fa2112014eda23da16af606db7f3637</td><td></td><td></td><td></td></tr><tr><td>1459d4d16088379c3748322ab0835f50300d9a38</td><td>Cross-Domain Visual Matching via Generalized
+<br/>Similarity Measure and Feature Learning
+</td><td>('40461403', 'Liang Lin', 'liang lin')<br/>('2749191', 'Guangrun Wang', 'guangrun wang')<br/>('1724520', 'Wangmeng Zuo', 'wangmeng zuo')<br/>('2340559', 'Xiangchu Feng', 'xiangchu feng')<br/>('40396552', 'Lei Zhang', 'lei zhang')</td><td></td></tr><tr><td>14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6</td><td></td><td></td><td></td></tr><tr><td>146bbf00298ee1caecde3d74e59a2b8773d2c0fc</td><td><b>University of Groningen</b><br/>4D Unconstrained Real-time Face Recognition Using a Commodity Depthh Camera
+<br/>Schimbinschi, Florin; Wiering, Marco; Mohan, R.E.; Sheba, J.K.
+<br/>Published in:
+<br/>7th IEEE Conference on Industrial Electronics and Applications
+<br/>IMPORTANT NOTE: You are advised to consult the publisher's version (publisher's PDF) if you wish to
+<br/>cite from it. Please check the document version below.
+<br/>Document Version
+<br/>Final author's version (accepted by publisher, after peer review)
+<br/>Publication date:
+<br/>2012
+<br/><b>Link to publication in University of Groningen/UMCG research database</b><br/>Citation for published version (APA):
+<br/>Schimbinschi, F., Wiering, M., Mohan, R. E., & Sheba, J. K. (2012). 4D Unconstrained Real-time Face
+<br/>Recognition Using a Commodity Depthh Camera. In 7th IEEE Conference on Industrial Electronics and
+<br/>Applications : ICIEA
+<br/>Copyright
+<br/>Other than for strictly personal use, it is not permitted to download or to forward/distribute the text or part of it without the consent of the
+<br/>author(s) and/or copyright holder(s), unless the work is under an open content license (like Creative Commons).
+<br/>Take-down policy
+<br/>If you believe that this document breaches copyright please contact us providing details, and we will remove access to the work immediately
+<br/>and investigate your claim.
+<br/><b>Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the</b><br/>number of authors shown on this cover page is limited to 10 maximum.
+<br/>Download date: 03-09-2017
+<br/> </td><td></td><td></td></tr><tr><td>14e9158daf17985ccbb15c9cd31cf457e5551990</td><td>ConvNets with Smooth Adaptive Activation Functions for
+<br/>Regression
+<br/>Tahsin M. Kurc1,2
+<br/><b>Stony Brook University</b><br/>2Oak Ridge National Laboratory
+<br/><b>Stony Brook University Hospital</b></td><td>('2321406', 'Le Hou', 'le hou')<br/>('1686020', 'Dimitris Samaras', 'dimitris samaras')<br/>('1735710', 'Joel H. Saltz', 'joel h. saltz')<br/>('1755448', 'Yi Gao', 'yi gao')</td><td></td></tr><tr><td>14ce7635ff18318e7094417d0f92acbec6669f1c</td><td>DeepFace: Closing the Gap to Human-Level Performance in Face Verification
+<br/>Marc’Aurelio Ranzato
+<br/>Facebook AI Group
+<br/>Menlo Park, CA, USA
+<br/><b>Tel Aviv University</b><br/>Tel Aviv, Israel
+</td><td>('2188620', 'Yaniv Taigman', 'yaniv taigman')<br/>('2909406', 'Ming Yang', 'ming yang')<br/>('1776343', 'Lior Wolf', 'lior wolf')</td><td>{yaniv, mingyang, ranzato}@fb.com
+<br/>wolf@cs.tau.ac.il
+</td></tr><tr><td>1450296fb936d666f2f11454cc8f0108e2306741</td><td>Learning to Discover Cross-Domain Relations
+<br/>with Generative Adversarial Networks
+</td><td>('2509132', 'Taeksoo Kim', 'taeksoo kim')</td><td></td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>Localizing Parts of Faces Using a Consensus of Exemplars
+<br/>(cid:63)Kriegman-Belhumeur Vision Technologies∗
+<br/><b>University of Maryland, College Park</b><br/><b>University of California, San Diego</b><br/><b>Columbia University</b></td><td>('1767767', 'Peter N. Belhumeur', 'peter n. belhumeur')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')<br/>('1765887', 'David J. Kriegman', 'david j. kriegman')<br/>('40631426', 'Neeraj Kumar', 'neeraj kumar')</td><td></td></tr><tr><td>143bee9120bcd7df29a0f2ad6f0f0abfb23977b8</td><td>Shared Gaussian Process Latent Variable Model
+<br/>for Multi-view Facial Expression Recognition
+<br/><b>Imperial College London, UK</b><br/><b>EEMCS, University of Twente, The Netherlands</b></td><td>('2308430', 'Stefanos Eleftheriadis', 'stefanos eleftheriadis')<br/>('1729713', 'Ognjen Rudovic', 'ognjen rudovic')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>14d72dc9f78d65534c68c3ed57305f14bd4b5753</td><td>Exploiting Multi-Grain Ranking Constraints for Precisely Searching
+<br/>Visually-similar Vehicles
+<br/>1National Engineering Laboratory for Video Technology, School of EE&CS,
+<br/><b>Peking University, Beijing, China</b><br/>2Cooperative Medianet Innovation Center, China
+<br/><b>Beijing Institute of Technology, China</b></td><td>('13318784', 'Ke Yan', 'ke yan')<br/>('5765799', 'Yaowei Wang', 'yaowei wang')<br/>('1687907', 'Wei Zeng', 'wei zeng')<br/>('1705972', 'Yonghong Tian', 'yonghong tian')<br/>('34097174', 'Tiejun Huang', 'tiejun huang')</td><td>{keyan, yhtian, weizeng, tjhuang}@pku.edu.cn;yaoweiwang@bit.edu.cn
+</td></tr><tr><td>14b162c2581aea1c0ffe84e7e9273ab075820f52</td><td>Training Object Class Detectors from Eye Tracking Data
+<br/><b>School of Informatics, University of Edinburgh, UK</b></td><td>('1749373', 'Dim P. Papadopoulos', 'dim p. papadopoulos')<br/>('2505673', 'Frank Keller', 'frank keller')<br/>('1749692', 'Vittorio Ferrari', 'vittorio ferrari')</td><td></td></tr><tr><td>14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d</td><td>Correlation Filter Cascade for Facial Landmark Localization
+<br/>Pattern Analysis and Computer Vision Department
+<br/>School of Computing
+<br/>Istituto Italiano di Tecnologia, Genova, Italy
+<br/><b>National University of Singapore, Singapore</b></td><td>('2860592', 'Hamed Kiani Galoogahi', 'hamed kiani galoogahi')<br/>('1715286', 'Terence Sim', 'terence sim')</td><td>hamed.kiani@iit.it
+<br/>tsim@comp.nus.edu.sg
+</td></tr><tr><td>14fdce01c958043140e3af0a7f274517b235adf3</td><td></td><td></td><td></td></tr><tr><td>14b69626b64106bff20e17cf8681790254d1e81c</td><td>Hybrid Super Vector with Improved Dense Trajectories for Action Recognition
+<br/><b>Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China</b><br/><b>Southwest Jiaotong University, Chengdu, P.R. China</b><br/><b>The Chinese University of Hong Kong, Hong Kong</b></td><td>('1766837', 'Xiaojiang Peng', 'xiaojiang peng')<br/>('40795365', 'LiMin Wang', 'limin wang')<br/>('2985266', 'Zhuowei Cai', 'zhuowei cai')<br/>('40285012', 'Yu Qiao', 'yu qiao')<br/>('39657084', 'Qiang Peng', 'qiang peng')</td><td>fxiaojiangp,07wanglimin,iamcaizhuoweig@gmail.com, yu.qiao@siat.ac.cn, qpeng@swjtu.edu.cn
+</td></tr><tr><td>14070478b8f0d84e5597c3e67c30af91b5c3a917</td><td>Detecting Social Actions of Fruit Flies
+<br/><b>California Institute of Technology, Pasadena, California, USA</b><br/><b>Howard Hughes Medical Institute (HHMI</b></td><td>('2948199', 'Eyrun Eyjolfsdottir', 'eyrun eyjolfsdottir')<br/>('3251767', 'Steve Branson', 'steve branson')<br/>('2232848', 'Xavier P. Burgos-Artizzu', 'xavier p. burgos-artizzu')<br/>('2954028', 'Eric D. Hoopfer', 'eric d. hoopfer')<br/>('20299567', 'Jonathan Schor', 'jonathan schor')<br/>('30334638', 'David J. Anderson', 'david j. anderson')<br/>('1690922', 'Pietro Perona', 'pietro perona')</td><td></td></tr><tr><td>14fb3283d4e37760b7dc044a1e2906e3cbf4d23a</td><td>Weak Attributes for Large-Scale Image Retrieval∗
+<br/><b>Columbia University, New York, NY</b></td><td>('1815972', 'Felix X. Yu', 'felix x. yu')<br/>('1725599', 'Rongrong Ji', 'rongrong ji')<br/>('3138710', 'Ming-Hen Tsai', 'ming-hen tsai')<br/>('35984288', 'Guangnan Ye', 'guangnan ye')<br/>('9546964', 'Shih-Fu Chang', 'shih-fu chang')</td><td>y{yuxinnan, rrji, yegn, sfchang}@ee.columbia.edu
+<br/>xminghen@cs.columbia.edu
+</td></tr><tr><td>14811696e75ce09fd84b75fdd0569c241ae02f12</td><td>Margin-Based Discriminant Dimensionality Reduction for Visual Recognition
+<br/><b>Eskisehir Osmangazi University</b><br/>Laboratoire Jean Kuntzmann
+<br/>Meselik Kampusu 26480 Eskisehir Turkey
+<br/>B.P. 53, 38041 Grenoble Cedex 9, France
+<br/>Fr´ed´eric Jurie
+<br/><b>University of Caen</b><br/>Universit´e de Caen - F-14032 Caen, France
+<br/><b>Rowan University</b><br/>201 Mullica Hill Road, Glassboro NJ USA
+</td><td>('2277308', 'Hakan Cevikalp', 'hakan cevikalp')<br/>('1756114', 'Bill Triggs', 'bill triggs')<br/>('1780024', 'Robi Polikar', 'robi polikar')</td><td>Hakan.Cevikalp@gmail.com
+<br/>Bill.Triggs@imag.fr
+<br/>Frederic.Jurie@unicaen.fr
+<br/>polikar@rowan.edu
+</td></tr><tr><td>141eab5f7e164e4ef40dd7bc19df9c31bd200c5e</td><td></td><td></td><td></td></tr><tr><td>14e759cb019aaf812d6ac049fde54f40c4ed1468</td><td>Subspace Methods
+<br/>Synonyms
+<br/>{ Multiple similarity method
+<br/>Related Concepts
+<br/>{ Principal component analysis (PCA)
+<br/>{ Subspace analysis
+<br/>{ Dimensionality reduction
+<br/>De(cid:12)nition
+<br/>Subspace analysis in computer vision is a generic name to describe a general
+<br/>framework for comparison and classification of subspaces. A typical approach in
+<br/>subspace analysis is the subspace method (SM) that classify an input pattern
+<br/>vector into several classes based on the minimum distance or angle between the
+<br/>input pattern vector and each class subspace, where a class subspace corresponds
+<br/>to the distribution of pattern vectors of the class in high dimensional vector
+<br/>space.
+<br/>Background
+<br/>Comparison and classification of subspaces has been one of the central prob-
+<br/>lems in computer vision, where an image set of an object to be classified is
+<br/>compactly represented by a subspace in high dimensional vector space.
+<br/>The subspace method is one of the most effective classification method in
+<br/>subspace analysis, which was developed by two Japanese researchers, Watanabe
+<br/>and Iijima around 1970, independently [1, 2]. Watanabe and Iijima named their
+<br/>methods the CLAFIC [3] and the multiple similarity method [4], respectively.
+<br/>The concept of the subspace method is derived from the observation that pat-
+<br/>terns belonging to a class forms a compact cluster in high dimensional vector
+<br/>space, where, for example, a w×h pixels image pattern is usually represented as a
+<br/>vector in w×h-dimensional vector space. The compact cluster can be represented
+<br/>by a subspace, which is generated by using Karhunen-Lo`eve (KL) expansion, also
+<br/>known as the principal component analysis (PCA). Note that a subspace is gen-
+<br/>erated for each class, unlike the Eigenface Method [5] in which only one subspace
+<br/>(called eigenspace) is generated.
+<br/>The SM has been known as one of the most useful methods in pattern recog-
+<br/>nition field, since its algorithm is very simple and it can handle classification
+<br/>of multiple classes. However, its classification performance was not sufficient for
+<br/>many applications in practice, because class subspaces are generated indepen-
+<br/>dently of each other [1]. There is no reason to assume a priori that each class
+</td><td>('1770128', 'Kazuhiro Fukui', 'kazuhiro fukui')</td><td></td></tr><tr><td>1442319de86d171ce9595b20866ec865003e66fc</td><td>Vision-Based Fall Detection with Convolutional
+<br/>Neural Networks
+<br/><b>DeustoTech - University of Deusto</b><br/>Avenida de las Universidades, 24 - 48007, Bilbao, Spain
+<br/>2 Dept. of Computer Science and Artificial Intelligence, Basque
+<br/><b>Country University, San Sebastian, Spain</b><br/>P. Manuel Lardizabal, 1 - 20018, San Sebastian, Spain
+<br/>3 Ikerbasque, Basque Foundation for Science, Bilbao, Spain
+<br/>Maria Diaz de Haro, 3 - 48013 Bilbao, Spain
+<br/>4 Donostia International Physics Center (DIPC), San Sebastian, Spain
+<br/>P. Manuel Lardizabal, 4 - 20018, San Sebastian, Spain
+</td><td>('2481918', 'Gorka Azkune', 'gorka azkune')<br/>('3147227', 'Ignacio Arganda-Carreras', 'ignacio arganda-carreras')</td><td>{adrian.nunez@deusto.es, gorka.azkune@deusto.es, ignacio.arganda@ehu.es}
+</td></tr><tr><td>146a7ecc7e34b85276dd0275c337eff6ba6ef8c0</td><td>This is a pre-print of the original paper submitted for review in FG 2017.
+<br/>AFFACT - Alignment Free Facial Attribute Classification Technique
+<br/>Vision and Security Technology (VAST) Lab,
+<br/><b>University of Colorado Colorado Springs</b><br/>∗ authors with equal contribution
+</td><td>('2974221', 'Andras Rozsa', 'andras rozsa')<br/>('1760117', 'Terrance E. Boult', 'terrance e. boult')</td><td>{mgunther,arozsa,tboult}@vast.uccs.edu
+</td></tr><tr><td>148eb413bede35487198ce7851997bf8721ea2d6</td><td>People Search in Surveillance Videos
+<br/>Four Eyes Lab, UCSB
+<br/>IBM Research
+<br/>IBM Research
+<br/>IBM Research
+<br/>Four Eyes Lab, UCSB
+<br/>INTRODUCTION
+<br/>1.
+<br/>In traditional surveillance scenarios, users are required to
+<br/>watch video footage corresponding to extended periods of
+<br/>time in order to find events of interest. However, this pro-
+<br/>cess is resource-consuming, and suffers from high costs of
+<br/>employing security personnel. The field of intelligent vi-
+<br/>sual surveillance [2] seeks to address these issues by applying
+<br/>computer vision techniques to automatically detect specific
+<br/>events in long video streams. The events can then be pre-
+<br/>sented to the user or be indexed into a database to allow
+<br/>queries such as “show me the red cars that entered a given
+<br/>parking lot from 7pm to 9pm on Monday” or “show me the
+<br/>faces of people who left the city’s train station last week.”
+<br/>In this work, we are interested in analyzing people, by ex-
+<br/>tracting information that can be used to search for them in
+<br/>surveillance videos. Current research on this topic focuses
+<br/>on approaches based on face recognition, where the goal is
+<br/>to establish the identity of a person given an image of a
+<br/>face. However, face recognition is still a very challenging
+<br/>problem, especially in low resolution images with variations
+<br/>in pose and lighting, which is often the case in surveillance
+<br/>data. State-of-the-art face recognition systems [1] require
+<br/>a fair amount of resolution in order to produce reliable re-
+<br/>sults, but in many cases this level of detail is not available
+<br/>in surveillance applications.
+<br/>We approach the problem in an alternative way, by avoiding
+<br/>face recognition and proposing a framework for finding peo-
+<br/>ple based on parsing the human body and exploiting part
+<br/>attributes. Those include visual attributes such as facial hair
+<br/>type (beards, mustaches, absence of facial hair), type of eye-
+<br/>wear (sunglasses, eyeglasses, absence of glasses), hair type
+<br/>(baldness, hair, wearing a hat), and clothing color. While
+<br/>face recognition is still a difficult problem, accurate and ef-
+<br/>ficient face detectors1 based on learning approaches [6] are
+<br/>available. Those have been demonstrated to work well on
+<br/>challenging low-resolution images, with variations in pose
+<br/>and lighting. In our method, we employ this technology to
+<br/>design detectors for facial attributes from large sets of train-
+<br/>ing data.
+<br/>1The face detection problem consists of localizing faces in
+<br/>images, while face recognition aims to establish the identity
+<br/>of a person given an image of a face. Face detection is a
+<br/>challenging problem, but it is arguably not as complex as
+<br/>face recognition.
+<br/>Our technique falls into the category of short term recogni-
+<br/>tion methods, taking advantage of features present in brief
+<br/>intervals in time, such as clothing color, hairstyle, and makeup,
+<br/>which are generally considered an annoyance in face recogni-
+<br/>tion methods. There are several applications that naturally
+<br/>fit within a short term recognition framework. An example
+<br/>is in criminal investigation, when the police are interested in
+<br/>locating a suspect. In those cases, eyewitnesses typically fill
+<br/>out a suspect description form, where they indicate personal
+<br/>traits of the suspect as seen at the moment when the crime
+<br/>was committed. Those include facial hair type, hair color,
+<br/>clothing type, etc. Based on that description, the police
+<br/>manually scan the entire video archive looking for a person
+<br/>with similar characteristics. This process is tedious and time
+<br/>consuming, and could be drastically accelerated by the use
+<br/>of our technique. Another application is on finding missing
+<br/>people. Parents looking for their children in an amusement
+<br/>park could provide a description including clothing and eye-
+<br/>wear type, and videos from multiple cameras in the park
+<br/>would then be automatically searched.
+</td><td>('2000950', 'Daniel A. Vaquero', 'daniel a. vaquero')<br/>('1723233', 'Rogerio S. Feris', 'rogerio s. feris')<br/>('11081274', 'Lisa Brown', 'lisa brown')<br/>('1690709', 'Arun Hampapur', 'arun hampapur')<br/>('1752714', 'Matthew Turk', 'matthew turk')</td><td>daniel@cs.ucsb.edu
+<br/>rsferis@us.ibm.com
+<br/>lisabr@us.ibm.com
+<br/>arunh@us.ibm.com
+<br/>mturk@cs.ucsb.edu
+</td></tr><tr><td>1462bc73834e070201acd6e3eaddd23ce3c1a114</td><td>International Journal of Science, Engineering and Technology Research (IJSETR), Volume 3, Issue 4, April 2014
+<br/>FACE AUTHENTICATION /RECOGNITION
+<br/>SYSTEM FOR FORENSIC APPLICATION
+<br/>USING SKETCH BASED ON THE SIFT
+<br/>FEATURES APPROACH
+<br/>Department of Electronics Engineering KITS,
+<br/><b>RTMNU Nagpur University, India</b><br/>
+</td><td></td><td></td></tr><tr><td>14014a1bdeb5d63563b68b52593e3ac1e3ce7312</td><td>ALNAJAR et al.: EXPRESSION-INVARIANT AGE ESTIMATION
+<br/>Expression-Invariant Age Estimation
+<br/>Jose Alvarez2
+<br/><b>ISLA Lab, Informatics Institute</b><br/><b>University of Amsterdam</b><br/>Amsterdam, The Netherlands
+<br/>2 NICTA
+<br/>Canberra ACT 2601
+<br/>Australia
+</td><td>('1765602', 'Fares Alnajar', 'fares alnajar')<br/>('39793067', 'Zhongyu Lou', 'zhongyu lou')<br/>('1695527', 'Theo Gevers', 'theo gevers')</td><td>F.alnajar@uva.nl
+<br/>z.lou@uva.nl
+<br/>jose.alvarez@nicta.com.au
+<br/>th.gevers@uva.nl
+</td></tr><tr><td>1473a233465ea664031d985e10e21de927314c94</td><td></td><td></td><td></td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A
+<br/>The development of accurate and scalable unconstrained face recogni-
+<br/>tion algorithms is a long term goal of the biometrics and computer vision
+<br/>communities. The term “unconstrained” implies a system can perform suc-
+<br/>cessful identifications regardless of face image capture presentation (illumi-
+<br/>nation, sensor, compression) or subject conditions (facial pose, expression,
+<br/>occlusion). While automatic, as well as human, face identification in certain
+<br/>scenarios may forever be elusive, such as when a face is heavily occluded or
+<br/>captured at very low resolutions, there still remains a large gap between au-
+<br/>tomated systems and human performance on familiar faces. In order to close
+<br/>this gap, large annotated sets of imagery are needed that are representative
+<br/>of the end goals of unconstrained face recognition. This will help continue
+<br/>to push the frontiers of unconstrained face detection and recognition, which
+<br/>are the primary goals of the IARPA Janus program.
+<br/>The current state of the art in unconstrained face recognition is high
+<br/>accuracy (roughly 99% true accept rate at a false accept rate of 1.0%) on
+<br/>faces that can be detected with a commodity face detectors, but unknown
+<br/>accuracy on other faces. Despite the fact that face detection and recognition
+<br/>research generally has advanced somewhat independently, the frontal face
+<br/>detector filtering approach used for key in the wild face recognition datasets
+<br/>means that progress in face recognition is currently hampered by progress
+<br/>in face detection. Hence, a major need exists for a face recognition dataset
+<br/>that captures as wide of a range of variations as possible to offer challenges
+<br/>to both face detection as well as face recognition.
+<br/>In this paper we introduce the IARPA Janus Benchmark A (IJB-A),
+<br/>which is publicly available for download. The IJB-A contains images and
+<br/>videos from 500 subjects captured from “in the wild” environment. All la-
+<br/>belled subjects have been manually localized with bounding boxes for face
+<br/>detection, as well as fiducial landmarks for the center of the two eyes (if
+<br/>visible) and base of the nose. Manual bounding box annotations for all non-
+<br/>labelled subjects (i.e., other persons captured in the imagery) have been cap-
+<br/>tured as well. All imagery is Creative Commons licensed, which is a license
+<br/>that allows open re-distribution provided proper attribution is made to the
+<br/>data creator. The subjects have been intentionally sampled to contain wider
+<br/>geographic distribution than previous datasets. Recognition and detection
+<br/>protocols are provided which are motivated by operational deployments of
+<br/>face recognition systems. An example of images and video from IJB-A can
+<br/>be found in Figure 3.
+<br/>The IJB-A dataset has the following claimed contributions: (i) The most
+<br/>unconstrained database released to date; (ii) The first joint face detection and
+<br/>face recognition benchmark dataset collected in the wild; (iii) Meta-data
+<br/>providing subject gender and skin color, and occlusion (eyes, mouth/nose,
+<br/>and forehead), facial hear, and coarse pose information for each imagery
+<br/>instance; (iv) Widest geographic distribution of any public face dataset; (v)
+<br/>The first in the wild dataset to contain a mixture of images and videos; (vi)
+<br/>Clear authority for re-distribution; (vii) Protocols for identification (search)
+<br/>and verification (compare); (viii) Baseline accuracies from off the shelf de-
+<br/>tectors and recognition algorithms; and (ix) Protocols for both template and
+<br/>model-based face recognition.
+<br/>Every subject in the dataset contains at least five images and one video.
+<br/>IJB-A consists of a total of 5,712 images and 2,085 videos, with an average
+<br/>of 11.4 images and 4.2 videos per subject.
+</td><td>('1885566', 'Emma Taborsky', 'emma taborsky')<br/>('1917247', 'Austin Blanton', 'austin blanton')<br/>('39403529', 'Jordan Cheney', 'jordan cheney')<br/>('2040584', 'Kristen Allen', 'kristen allen')<br/>('2136478', 'Patrick Grother', 'patrick grother')<br/>('2578654', 'Alan Mah', 'alan mah')<br/>('6680444', 'Anil K. Jain', 'anil k. jain')</td><td></td></tr><tr><td>14418ae9a6a8de2b428acb2c00064da129632f3e</td><td>Discovering the Spatial Extent of Relative Attributes
+<br/><b>University of California Davis</b><br/>Introduction
+<br/>Visual attributes are human-nameable object properties that serve as an in-
+<br/>termediate representation between low-level image features and high-level
+<br/>objects or scenes [3, 4, 5]. They can offer a great gateway for human-
+<br/>object interaction. For example, when we want to interact with an unfa-
+<br/>miliar object, it is likely that we first infer its attributes from its appear-
+<br/>ance (e.g., is it furry or slippery?) and then decide how to interact with
+<br/>it. Thus, modelling visual attributes would be valuable for understanding
+<br/>human-object interactions. Researchers have developed systems that model
+<br/>binary attributes [3, 4, 5]—a property’s presence/absence (e.g., “is furry/not
+<br/>furry”)—and relative attributes [6, 8]—a property’s relative strength (e.g.,
+<br/>“furrier than”). In this work, we focus on relative attributes since they of-
+<br/>ten describe object properties better than binary ones [6], especially if the
+<br/>property exhibits large appearance variations (see Fig. 1).
+<br/>While most existing work use global image representations to model
+<br/>attributes (e.g., [5, 6]), recent work demonstrates the effectiveness of using
+<br/>localized part-based representations [1, 7, 9]. They show that attributes—be
+<br/>it global (“is male”) or local (“smiling”)—can be more accurately learned
+<br/>by first bringing the underlying object-parts into correspondence, and then
+<br/>modeling the attributes conditioned on those object-parts. To compute such
+<br/>correspondences, pre-trained part detectors are used (e.g., faces [7] and peo-
+<br/>ple [1, 9]). However, because the part detectors are trained independently of
+<br/>the attribute, the learned parts may not necessarily be useful for modeling
+<br/>the desired attribute. Furthermore, some objects do not naturally have well-
+<br/>defined parts, which means modeling the part-based detector itself becomes
+<br/>a challenge. The approach of [2] address these issues by discovering useful
+<br/>and localized attributes. However, it requires a human-in-the-loop, which
+<br/>limits its scalability.
+<br/>So, how can we develop robust visual representations for relative at-
+<br/>tributes, without expensive and potentially uninformative pre-trained part
+<br/>detectors or humans-in-the-loop? To do so, we will need to automatically
+<br/>identify the visual patterns in each image whose appearance correlates with
+<br/>attribute strength.
+<br/>In this work, we propose a method that automatically
+<br/>discovers the spatial extent of relative attributes in images across varying at-
+<br/>tribute strengths. The main idea is to leverage the fact that the visual concept
+<br/>underlying the attribute undergos a gradual change in appearance across
+<br/>the attribute spectrum. In this way, we propose to discover a set of local,
+<br/>transitive connections (“visual chains”) that establish correspondences be-
+<br/>tween the same object-part, even when its appearance changes drastically
+<br/>over long ranges. Given the candidate set of visual chains, we then automat-
+<br/>ically select those that together best model the changing appearance of the
+<br/>attribute across the attribute spectrum. Importantly, by combining a subset
+<br/>of the most-informative discovered visual chains, our approach aims to dis-
+<br/>cover the full spatial extent of the attribute, whether it be concentrated on a
+<br/>particular object-part or spread across a larger spatial area.
+<br/>2 Approach
+<br/>Given an image collection S={I1, . . . ,IN} with pairwise ordered and un-
+<br/>ordered image-level relative comparisons of an attribute (i.e., in the form of
+<br/>Ω(Ii)>Ω(Ij) and Ω(Ii)≈Ω(Ij), where i, j∈{1, . . . ,N} and Ω(Ii) is Ii’s at-
+<br/>tribute strength), our goal is to discover the spatial extent of the attribute in
+<br/>each image and learn a ranking function that predicts the attribute strength
+<br/>for any new image.
+<br/>There are three main steps to our approach: (1) initializing a candidate
+<br/>set of visual chains; (2) iteratively growing each visual chain along the at-
+<br/>tribute spectrum; and (3) ranking the chains according to their relevance to
+<br/>the target attribute to create an ensemble image representation.
+<br/>Initializing candidate visual chains: A visual attribute can potentially
+<br/>exhibit large appearance variations across the attribute spectrum. Take the
+<br/>(top) Given pairs of images, each ordered according to rela-
+<br/>Figure 1:
+<br/>tive attribute strength (e.g., “higher/lower-at-the-heel”), (bottom) our ap-
+<br/>proach automatically discovers the attribute’s spatial extent in each image,
+<br/>and learns a ranking function that orders the image collection according to
+<br/>predicted attribute strength.
+<br/>high-at-the-heel attribute as an example: high-heeled shoes have strong
+<br/>vertical gradients while flat-heeled shoes have strong horizontal gradients.
+<br/>However, the attribute’s appearance will be quite similar in any local region
+<br/>of the attribute spectrum. Therefore, we start with multiple short but visu-
+<br/>ally homogeneous chains of image regions in a local region of the attribute
+<br/>spectrum, and smoothly grow them out to cover the entire spectrum.
+<br/>We start by first sorting the images in S in descending order of predicted
+<br/>attribute strength—with ˜I1 as the strongest image and ˜IN as the weakest—
+<br/>using a linear SVM-ranker trained with global image features. To initialize
+<br/>a single chain, we take the top Ninit images and select a set of patches (one
+<br/>from each image) whose appearance varies smoothly with its neighbors in
+<br/>the chain, by minimizing the following objective function:
+<br/>Ninit∑
+<br/>||φ (Pi)− φ (Pi−1)||2,
+<br/>i=2
+<br/>min
+<br/>C(P) =
+<br/>(1)
+<br/>where φ (Pi) is the appearance feature of patch Pi in ˜Ii, and P ={P1, . . . ,PNinit}
+<br/>is the set of patches in a chain. Candidate patches for each image are densely
+<br/>sampled at multiple scales. This objective enforces local smoothness: the
+<br/>appearances of the patches in the images with neighboring indices should
+<br/>vary smoothly within a chain. Given the objective’s chain structure, we can
+<br/>efficiently find its global optimum using Dynamic Programming (DP).
+<br/>In the backtracking stage of DP, we obtain a large number of K-best
+<br/>solutions. We then perform a chain-level non-maximum-suppression (NMS)
+<br/>to remove redundant chains to retain a set of Kinit diverse candidate chains.
+<br/>Iteratively growing each visual chain: The initial set of Kinit chains are
+<br/>visually homogeneous but cover only a tiny fraction of the attribute spec-
+<br/>trum. We next iteratively grow each chain to cover the entire attribute spec-
+<br/>trum by training a model that adapts to the attribute’s smoothly changing
+<br/>appearance. Specifically, for each chain, we iteratively train a detector and
+<br/>in each iteration and use it to grow the chain while simultaneously refining
+<br/>it. To grow the chain, we again minimize Eqn. 1 but now with an additional
+<br/>term:
+<br/>t∗Niter∑
+<br/>t∗Niter∑
+<br/>wT
+<br/>t φ (Pi),
+<br/>||φ (Pi)− φ (Pi−1)||2 − λ
+<br/>i=2
+<br/>i=1
+<br/>min
+<br/>C(P) =
+<br/>(2)
+<br/>where wt is a linear SVM detector learned from the patches in the chain
+<br/>from the (t−1)-th iteration, P = {P1, . . . ,Pt∗Niter} is the set of patches in a
+<br/>chain, and Niter is the number of images considered in each iteration. As
+<br/>before, the first term enforces local smoothness. The second term is the
+<br/>detection term: since the ordering of the images in the chain is only a rough
+<br/>estimate and thus possibly noisy, wt prevents the inference from drifting in
+<br/>the cases where local smoothness does not strictly hold. λ is a constant that
+<br/>trades-off the two terms. We use the same DP inference procedure used to
+<br/>optimize Eqn. 1.
+<br/>Once P is found, we train a new detector with all of its patches as posi-
+<br/>tive instances. The negative instances consist of randomly sampled patches
+<br/>strongweak,Attribute: “high-at-the-heel”,, </td><td>('2299381', 'Fanyi Xiao', 'fanyi xiao')<br/>('1883898', 'Yong Jae Lee', 'yong jae lee')</td><td></td></tr><tr><td>14ba910c46d659871843b31d5be6cba59843a8b8</td><td>Face Recognition in Movie Trailers via Mean Sequence Sparse
+<br/>Representation-based Classification
+<br/><b>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</b></td><td>('16131262', 'Enrique G. Ortiz', 'enrique g. ortiz')<br/>('2003981', 'Alan Wright', 'alan wright')<br/>('1745480', 'Mubarak Shah', 'mubarak shah')</td><td>eortiz@cs.ucf.edu, alanwright@knights.ucf.edu, shah@crcv.ucf.edu
+</td></tr><tr><td>1467c4ab821c3b340abe05a1b13a19318ebbce98</td><td>Multitask and Transfer Learning for
+<br/>Multi-Aspect Data
+<br/>Bernardino Romera Paredes
+<br/>UCL
+<br/>A dissertation submitted in partial fulfillment
+<br/>of the requirements for the degree of
+<br/><b>Doctor of Philosophy of University College London</b></td><td></td><td></td></tr><tr><td>14318d2b5f2cf731134a6964d8193ad761d86942</td><td>FaceDNA: Intelligent Face Recognition
+<br/>System with Intel RealSense 3D Camera
+<br/><b>National Taiwan University</b><br/>
+</td><td>('1678531', 'Dan Ye', 'dan ye')<br/>('40063567', 'Shih-Wei Liao', 'shih-wei liao')</td><td></td></tr><tr><td>142dcfc3c62b1f30a13f1f49c608be3e62033042</td><td>Adaptive Region Pooling for Object Detection
+<br/>UC Merced
+<br/>Qualcomm Research, San Diego
+<br/>UC Merced
+</td><td>('2580349', 'Yi-Hsuan Tsai', 'yi-hsuan tsai')<br/>('1872879', 'Onur C. Hamsici', 'onur c. hamsici')<br/>('1715634', 'Ming-Hsuan Yang', 'ming-hsuan yang')</td><td>ytsai2@ucmerced.edu
+<br/>ohamsici@qti.qualcomm.com
+<br/>mhyang@ucmerced.edu
+</td></tr><tr><td>14c0f9dc9373bea1e27b11fa0594c86c9e632c8d</td><td>Adaptive Exponential Smoothing for Online Filtering of Pixel Prediction Maps
+<br/>School of Electrical and Electronic Engineering,
+<br/><b>Nanyang Technological University, Singapore</b></td><td>('3064975', 'Kang Dang', 'kang dang')<br/>('1691251', 'Jiong Yang', 'jiong yang')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')</td><td>{dang0025, yang0374}@e.ntu.edu.sg, jsyuan@ntu.edu.sg
+</td></tr><tr><td>1439bf9ba7ff97df9a2da6dae4784e68794da184</td><td>LGE-KSVD: Flexible Dictionary Learning for Optimized Sparse
+<br/>Representation Classification
+<br/>Raymond Ptucha
+<br/><b>Rochester Institute of Technology</b><br/>Rochester, NY, USA
+</td><td></td><td>rwpeec@rit.edu
+</td></tr><tr><td>141768ab49a5a9f5adcf0cf7e43a23471a7e5d82</td><td>Relative Facial Action Unit Detection
+<br/>Department of Computing and Software
+<br/><b>McMaster University</b><br/>Hamilton, Canada
+</td><td>('1736464', 'Mahmoud Khademi', 'mahmoud khademi')</td><td>khademm@mcmaster.ca
+</td></tr><tr><td>14e428f2ff3dc5cf96e5742eedb156c1ea12ece1</td><td>Facial Expression Recognition Using Neural Network Trained with Zernike
+<br/>Moments
+<br/>Dept. Génie-Electrique
+<br/>Université M.C.M Souk-Ahras
+<br/>Souk-Ahras, Algeria
+</td><td>('3112602', 'Mohammed Saaidia', 'mohammed saaidia')</td><td>mohamed.saaidia@univ-soukahras.dz
+</td></tr><tr><td>14bca107bb25c4dce89210049bf39ecd55f18568</td><td>X.HUANG:EMOTIONRECOGNITIONFROMFACIALIMAGES
+<br/>Emotion recognition from facial images with
+<br/>arbitrary views
+<br/>Center for Machine Vision Research
+<br/>Department of Computer Science and
+<br/>Engineering
+<br/><b>University of Oulu</b><br/>Oulu, Finland
+</td><td>('18780812', 'Xiaohua Huang', 'xiaohua huang')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('1714724', 'Matti Pietikäinen', 'matti pietikäinen')</td><td>huang.xiaohua@ee.oulu.fi
+<br/>gyzhao@ee.oulu.fi
+<br/>mkp@ee.oulu.fi
+</td></tr><tr><td>14a5feadd4209d21fa308e7a942967ea7c13b7b6</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1025
+<br/>ICASSP 2012
+</td><td></td><td></td></tr><tr><td>14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b</td><td>Modeling Spatio-Temporal Human Track Structure for Action
+<br/>Localization
+</td><td>('2926143', 'Anton Osokin', 'anton osokin')</td><td></td></tr><tr><td>14ee4948be56caeb30aa3b94968ce663e7496ce4</td><td>Jang, Y; Gunes, H; Patras, I
+<br/>© Copyright 2018 IEEE
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/36405
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td><td></td><td>more information contact scholarlycommunications@qmul.ac.uk
+</td></tr><tr><td>8ec82da82416bb8da8cdf2140c740e1574eaf84f</td><td>CHUNG AND ZISSERMAN: BMVC AUTHOR GUIDELINES
+<br/>Lip Reading in Profile
+<br/>http://www.robots.ox.ac.uk/~joon
+<br/>http://www.robots.ox.ac.uk/~az
+<br/>Visual Geometry Group
+<br/>Department of Engineering Science
+<br/><b>University of Oxford</b><br/>Oxford, UK
+</td><td>('2863890', 'Joon Son Chung', 'joon son chung')<br/>('1688869', 'Andrew Zisserman', 'andrew zisserman')</td><td></td></tr><tr><td>8ee62f7d59aa949b4a943453824e03f4ce19e500</td><td>Robust Head-Pose Estimation Based on
+<br/>Partially-Latent Mixture of Linear Regression
+<br/>∗INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+<br/>†INRIA Rennes Bretagne Atlantique, Rennes, France
+</td><td>('2188660', 'Vincent Drouard', 'vincent drouard')<br/>('1794229', 'Radu Horaud', 'radu horaud')<br/>('3307172', 'Antoine Deleforge', 'antoine deleforge')<br/>('1690536', 'Georgios Evangelidis', 'georgios evangelidis')</td><td></td></tr><tr><td>8e0ede53dc94a4bfcf1238869bf1113f2a37b667</td><td>Joint Patch and Multi-label Learning for Facial Action Unit Detection
+<br/><b>School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China</b><br/><b>Robotics Institute, Carnegie Mellon University, Pittsburgh, PA</b><br/><b>University of Pittsburgh, Pittsburgh, PA</b></td><td>('2393320', 'Kaili Zhao', 'kaili zhao')<br/>('1720776', 'Honggang Zhang', 'honggang zhang')</td><td></td></tr><tr><td>8e33183a0ed7141aa4fa9d87ef3be334727c76c0</td><td>– COS429 Written Report, Fall 2017 –
+<br/>Robustness of Face Recognition to Image Manipulations
+<br/>1. Motivation
+<br/>We can often recognize pictures of people we know even if the image has low resolution or obscures
+<br/>part of the face, if the camera angle resulted in a distorted image of the subject’s face, or if the
+<br/>subject has aged or put on makeup since we last saw them. Although this is a simple recognition task
+<br/>for a human, when we think about how we accomplish this task, it seems non-trivial for computer
+<br/>algorithms to recognize faces despite visual changes.
+<br/>Computer facial recognition is relied upon for many application where accuracy is important.
+<br/>Facial recognition systems have applications ranging from airport security and suspect identification
+<br/>to personal device authentication and face tagging [7]. In these real-world applications, the system
+<br/>must continue to recognize images of a person who looks slightly different due to the passage of
+<br/>time, a change in environment, or a difference in clothing.
+<br/>Therefore, we are interested in investigating face recognition algorithms and their robustness to
+<br/>image changes resulting from realistically plausible manipulations. Furthermore, we are curious
+<br/>about whether the impact of image manipulations on computer algorithms’ face recognition ability
+<br/>mirrors related insights from neuroscience about humans’ face recognition abilities.
+<br/>2. Goal
+<br/>In this project, we implement both face recognition algorithms and image manipulations. We then
+<br/>analyze the impact of each image manipulation on the recognition accuracy each algorithm, and
+<br/>how these influences depend on the accuracy of each algorithm on non-manipulated images.
+<br/>3. Background and Related Work
+<br/>Researchers have developed a wide variety of face recognition algorithms, such as traditional
+<br/>statistical methods such as PCA, more opaque methods such as deep neural networks, and proprietary
+<br/>systems used by governments and corporations [1][13][14].
+<br/>Similarly, others have developed image manipulations using principles from linear algebra, such
+<br/>as mimicking distortions from lens distortions, as well as using neural networks, such as a system
+<br/>for transforming images according to specified characteristics [12][16].
+<br/>Furthermore, researchers in psychology have studied face recognition in humans. A study of
+<br/>“super-recognizers” (people with extraordinarily high powers of face recognition) and “developmen-
+<br/>tal prosopagnosics” (people with severely impaired face recognition abilities) found that inverting
+<br/>images of faces impaired recognition ability more for people with stronger face recognition abilities
+<br/>[11]. This could indicate that image manipulations tend to equalize face recognition abilities, and
+<br/>we investigate whether this is the case with the manipulations and face recognition algorithms we
+<br/>test.
+</td><td>('1897270', 'Cathy Chen', 'cathy chen')</td><td></td></tr><tr><td>8e3d0b401dec8818cd0245c540c6bc032f169a1d</td><td>McGan: Mean and Covariance Feature Matching GAN
+</td><td>('2211263', 'Youssef Mroueh', 'youssef mroueh')</td><td></td></tr><tr><td>8e3c97e420e0112c043929087d6456d8ab61e95c</td><td>SAFDARNEJAD et al.: ROBUST GLOBAL MOTION COMPENSATION
+<br/>Robust Global Motion Compensation in
+<br/>Presence of Predominant Foreground
+<br/>https://www.msu.edu/~safdarne/
+<br/>http://www.cse.msu.edu/~liuxm/
+<br/>http://www.egr.msu.edu/ndel/profile/lalita-udpa
+<br/><b>Michigan State University</b><br/>East Lansing
+<br/>Michigan, USA
+</td><td>('2941187', 'Seyed Morteza Safdarnejad', 'seyed morteza safdarnejad')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')<br/>('1938832', 'Lalita Udpa', 'lalita udpa')</td><td></td></tr><tr><td>8e0ab1b08964393e4f9f42ca037220fe98aad7ac</td><td>UV-GAN: Adversarial Facial UV Map Completion for Pose-invariant Face
+<br/>Recognition
+<br/><b>Imperial College London</b></td><td>('3234063', 'Jiankang Deng', 'jiankang deng')<br/>('1902288', 'Shiyang Cheng', 'shiyang cheng')<br/>('4091869', 'Niannan Xue', 'niannan xue')<br/>('47943220', 'Yuxiang Zhou', 'yuxiang zhou')</td><td>j.deng16, shiyang.cheng11,n.xue15,yuxiang.zhou10,s.zafeiriou@imperial.ac.uk
+</td></tr><tr><td>8e94ed0d7606408a0833e69c3185d6dcbe22bbbe</td><td>© 2012 IEEE. Personal use of this material is permitted. Permission from IEEE
+<br/>must be obtained for all other uses, in any current or future media, including
+<br/>reprinting/republishing this material for advertising or promotional purposes,
+<br/>creating new collective works, for resale or redistribution to servers or lists, or
+<br/>reuse of any copyrighted component of this work in other works.
+<br/>Pre-print of article that will appear at WACV 2012.
+</td><td></td><td></td></tr><tr><td>8e461978359b056d1b4770508e7a567dbed49776</td><td>LOMo: Latent Ordinal Model for Facial Analysis in Videos
+<br/>Marian Bartlett1,∗,‡
+<br/>1UCSD, USA
+<br/>2MPI for Informatics, Germany
+<br/>3IIT Kanpur, India
+</td><td>('39707211', 'Karan Sikka', 'karan sikka')<br/>('39396475', 'Gaurav Sharma', 'gaurav sharma')</td><td></td></tr><tr><td>8e4808e71c9b9f852dc9558d7ef41566639137f3</td><td>Adversarial Generative Nets: Neural Network
+<br/>Attacks on State-of-the-Art Face Recognition
+<br/><b>Carnegie Mellon University</b><br/><b>University of North Carolina at Chapel Hill</b></td><td>('36301492', 'Mahmood Sharif', 'mahmood sharif')<br/>('38181360', 'Sruti Bhagavatula', 'sruti bhagavatula')<br/>('38572260', 'Lujo Bauer', 'lujo bauer')<br/>('1746214', 'Michael K. Reiter', 'michael k. reiter')</td><td>{mahmoods, srutib, lbauer}@cmu.edu
+<br/>reiter@cs.unc.edu
+</td></tr><tr><td>8ea30ade85880b94b74b56a9bac013585cb4c34b</td><td>FROM TURBO HIDDEN MARKOV MODELS TO TURBO STATE-SPACE MODELS
+<br/>Institut Eur´ecom
+<br/>Multimedia Communications Department
+<br/>BP 193, 06904 Sophia Antipolis Cedex, France
+</td><td>('1723883', 'Florent Perronnin', 'florent perronnin')<br/>('1709849', 'Jean-Luc Dugelay', 'jean-luc dugelay')</td><td>fflorent.perronnin, jean-luc.dugelayg@eurecom.fr
+</td></tr><tr><td>8ed32c8fad924736ebc6d99c5c319312ba1fa80b</td><td></td><td></td><td></td></tr><tr><td>8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958</td><td>Segment-based SVMs for
+<br/>Time Series Analysis
+<br/>CMU-RI-TR-12-1
+<br/>Submitted in partial fulfillment of the
+<br/>requirements for the degree of
+<br/>Doctor of Philosophy in Robotics
+<br/><b>The Robotics Institute</b><br/><b>Carnegie Mellon University</b><br/>Pittsburgh, Pennsylvania 15213
+<br/>Version: 20 Jan 2012
+<br/>Thesis Committee:
+<br/>Fernando De la Torre (chair)
+</td><td>('1698158', 'Minh Hoai Nguyen', 'minh hoai nguyen')<br/>('1709305', 'Martial Hebert', 'martial hebert')<br/>('1730156', 'Carlos Guestrin', 'carlos guestrin')<br/>('2038264', 'Frank Dellaert', 'frank dellaert')<br/>('1698158', 'Minh Hoai Nguyen', 'minh hoai nguyen')</td><td></td></tr><tr><td>8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125</td><td>in any current or
+<br/>future media,
+<br/>for all other uses,
+<br/> 2012 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/>obtained
+<br/>including
+<br/>reprinting/republishing this material for advertising or promotional purposes, creating
+<br/>new collective works, for resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works.
+<br/>Pre-print of article that will appear at BTAS 2012.!!
+</td><td></td><td></td></tr><tr><td>8e378ef01171b33c59c17ff5798f30293fe30686</td><td>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>der Technischen Universit¨at M¨unchen
+<br/>A System for Automatic Face Analysis
+<br/>Based on
+<br/>Statistical Shape and Texture Models
+<br/>Ronald M¨uller
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at
+<br/>f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs
+<br/>genehmigten Dissertation
+<br/>Vorsitzender: Prof. Dr. rer. nat. Bernhard Wolf
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Prof. Dr.-Ing. habil. Alexander W. Koch
+<br/>Die Dissertation wurde am 28.02.2008 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>am 18.09.2008 angenommen.
+</td><td></td><td></td></tr><tr><td>8ed051be31309a71b75e584bc812b71a0344a019</td><td>Class-based feature matching across unrestricted
+<br/>transformations
+</td><td>('1938475', 'Evgeniy Bart', 'evgeniy bart')<br/>('1743045', 'Shimon Ullman', 'shimon ullman')</td><td></td></tr><tr><td>8e36100cb144685c26e46ad034c524b830b8b2f2</td><td>Modeling Facial Geometry using Compositional VAEs
+<br/>1 ´Ecole Polytechnique F´ed´erale de Lausanne
+<br/>2Facebook Reality Labs, Pittsburgh
+</td><td>('33846296', 'Chenglei Wu', 'chenglei wu')<br/>('14373499', 'Jason Saragih', 'jason saragih')<br/>('1717736', 'Pascal Fua', 'pascal fua')<br/>('1774867', 'Yaser Sheikh', 'yaser sheikh')</td><td>{firstname.lastname}@epfl.ch, {firstname.lastname}@fb.com
+</td></tr><tr><td>8ed33184fccde677ec8413ae06f28ea9f2ca70f3</td><td>Multimodal Visual Concept Learning with Weakly Supervised Techniques
+<br/><b>School of E.C.E., National Technical University of Athens, Greece</b></td><td>('7311172', 'Giorgos Bouritsas', 'giorgos bouritsas')<br/>('2539459', 'Petros Koutras', 'petros koutras')<br/>('2641229', 'Athanasia Zlatintsi', 'athanasia zlatintsi')<br/>('1750686', 'Petros Maragos', 'petros maragos')</td><td>gbouritsas@gmail.com, {pkoutras, nzlat, maragos}@cs.ntua.gr
+</td></tr><tr><td>8ee5b1c9fb0bded3578113c738060290403ed472</td><td>Extending Explicit Shape Regression with
+<br/>Mixed Feature Channels and Pose Priors
+<br/><b>Karlsruhe Institute of</b><br/>Technology (KIT)
+<br/>Karlsruhe, Germany
+<br/>Hazım Kemal Ekenel
+<br/>´Ecole Polytechnique F´ed´erale
+<br/>de Lausanne (EPFL)
+<br/>Lausanne, Switzerland
+<br/>Istanbul Technical
+<br/><b>University (ITU</b><br/>Istanbul, Turkey
+</td><td>('39610204', 'Matthias Richter', 'matthias richter')<br/>('1697965', 'Hua Gao', 'hua gao')</td><td>matthias.richter@kit.edu
+<br/>hua.gao@epfl.ch
+<br/>ekenel@itu.edu.tr
+</td></tr><tr><td>8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Learning from Longitudinal Face Demonstration -
+<br/>Where Tractable Deep Modeling Meets Inverse Reinforcement Learning
+<br/>Savvides · Tien D. Bui
+<br/>Received: date / Accepted: date
+</td><td>('1876581', 'Chi Nhan Duong', 'chi nhan duong')</td><td></td></tr><tr><td>8efda5708bbcf658d4f567e3866e3549fe045bbb</td><td>Pre-trained Deep Convolutional Neural Networks
+<br/>for Face Recognition
+<br/>Siebert Looije
+<br/>S2209276
+<br/>January 2018
+<br/>MSc. Thesis
+<br/>Artificial Intelligence
+<br/><b>University of Groningen, The Netherlands</b><br/>Supervisors
+<br/>Dr. M.A. (Marco) Wiering
+<br/>K. (Klaas) Dijkstra, MSc.
+<br/><b>ALICE Institute</b><br/><b>University of Groningen</b><br/>Nijenborgh 9, 9747 AG, Groningen, The Netherlands
+<br/><b>facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty</b></td><td></td><td></td></tr><tr><td>2227f978f084ebb18cb594c0cfaf124b0df6bf95</td><td>Pillar Networks for action recognition
+<br/>B Sengupta
+<br/>Cortexica Vision Systems Limited
+<br/><b>Imperial College London</b><br/>London, UK
+<br/>Y Qian
+<br/>Cortexica Vision Systems Limited
+<br/>30 Stamford Street SE1 9LQ
+<br/>London, UK
+</td><td></td><td>b.sengupta@imperial.ac.uk
+<br/>yu.qian@cortexica.com
+</td></tr><tr><td>225fb9181545f8750061c7693661b62d715dc542</td><td></td><td></td><td></td></tr><tr><td>22043cbd2b70cb8195d8d0500460ddc00ddb1a62</td><td>Separability-Oriented Subclass Discriminant
+<br/>Analysis
+</td><td>('2986129', 'Huan Wan', 'huan wan')<br/>('27838939', 'Hui Wang', 'hui wang')<br/>('35009947', 'Gongde Guo', 'gongde guo')<br/>('10803956', 'Xin Wei', 'xin wei')</td><td></td></tr><tr><td>22137ce9c01a8fdebf92ef35407a5a5d18730dde</td><td></td><td></td><td></td></tr><tr><td>22e2066acfb795ac4db3f97d2ac176d6ca41836c</td><td>Coarse-to-Fine Auto-Encoder Networks (CFAN)
+<br/>for Real-Time Face Alignment
+<br/>1 Key Lab of Intelligent Information Processing of Chinese Academy of Sciences
+<br/><b>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</b><br/><b>University of Chinese Academy of Sciences, Beijing 100049, China</b></td><td>('1698586', 'Jie Zhang', 'jie zhang')<br/>('1685914', 'Shiguang Shan', 'shiguang shan')<br/>('1693589', 'Meina Kan', 'meina kan')<br/>('1710220', 'Xilin Chen', 'xilin chen')</td><td>{jie.zhang,shiguang.shan,meina.kan,xilin.chen}@vipl.ict.ac.cn
+</td></tr><tr><td>22717ad3ad1dfcbb0fd2f866da63abbde9af0b09</td><td>A Learning-based Control Architecture for Socially
+<br/>Assistive Robots Providing Cognitive Interventions
+<br/>by
+<br/>A thesis submitted in conformity with the requirements
+<br/>for the degree of Masters of Applied Science
+<br/>Mechanical and Industrial Engineering
+<br/><b>University of Toronto</b></td><td>('39999379', 'Jeanie Chan', 'jeanie chan')<br/>('39999379', 'Jeanie Chan', 'jeanie chan')</td><td></td></tr><tr><td>2288696b6558b7397bdebe3aed77bedec7b9c0a9</td><td>WU, WANG, YANG, JI: JOINT ATTENTION ON MULTI-LEVEL DEEP FEATURES 1
+<br/>Action Recognition with Joint Attention
+<br/>on Multi-Level Deep Features
+<br/>Dept of Automation
+<br/><b>Tsinghua University</b><br/>Beijing, China
+</td><td>('35585536', 'Jialin Wu', 'jialin wu')<br/>('29644358', 'Gu Wang', 'gu wang')<br/>('3432961', 'Wukui Yang', 'wukui yang')<br/>('7807689', 'Xiangyang Ji', 'xiangyang ji')</td><td>wujl13@mails.tsinghua.edu.cn
+<br/>wangg12@mails.tsinghua.edu.cn
+<br/>yang-wk15@mails.tsinghua.edu.cn
+<br/>xyji@mail.tsinghua.edu.cn
+</td></tr><tr><td>22264e60f1dfbc7d0b52549d1de560993dd96e46</td><td>UnitBox: An Advanced Object Detection Network
+<br/>Thomas Huang1
+<br/><b>University of Illinois at Urbana Champaign</b><br/>2Megvii Inc
+</td><td>('3451838', 'Jiahui Yu', 'jiahui yu')<br/>('1691963', 'Yuning Jiang', 'yuning jiang')<br/>('2969311', 'Zhangyang Wang', 'zhangyang wang')<br/>('2695115', 'Zhimin Cao', 'zhimin cao')</td><td>{jyu79, zwang119, t-huang1}@illinois.edu, {jyn, czm}@megvii.com
+</td></tr><tr><td>22dada4a7ba85625824489375184ba1c3f7f0c8f</td><td></td><td></td><td></td></tr><tr><td>221252be5d5be3b3e53b3bbbe7a9930d9d8cad69</td><td>ZHU, VONDRICK, RAMANAN, AND FOWLKES: MORE DATA OR BETTER MODELS
+<br/>Do We Need More Training Data or Better
+<br/>Models for Object Detection?
+<br/>1 Computer Science Department
+<br/><b>University of California</b><br/>Irvine, CA, USA
+<br/>2 CSAIL
+<br/><b>Massachusetts Institute of Technology</b><br/>Cambridge, MA, USA
+<br/>(Work performed while at UC Irvine)
+</td><td>('32542103', 'Xiangxin Zhu', 'xiangxin zhu')<br/>('1856025', 'Carl Vondrick', 'carl vondrick')<br/>('1770537', 'Deva Ramanan', 'deva ramanan')<br/>('3157443', 'Charless C. Fowlkes', 'charless c. fowlkes')</td><td>xzhu@ics.uci.edu
+<br/>vondrick@mit.edu
+<br/>dramanan@ics.uci.edu
+<br/>fowlkes@ics.uci.edu
+</td></tr><tr><td>223ec77652c268b98c298327d42aacea8f3ce23f</td><td>TR-CS-11-02
+<br/>Acted Facial Expressions In The Wild
+<br/>Database
+<br/>September 2011
+<br/>ANU Computer Science Technical Report Series
+</td><td>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('1717204', 'Roland Goecke', 'roland goecke')<br/>('27011207', 'Tom Gedeon', 'tom gedeon')</td><td></td></tr><tr><td>22df6b6c87d26f51c0ccf3d4dddad07ce839deb0</td><td>Fast Action Proposals for Human Action Detection and Search
+<br/>School of Electrical and Electronic Engineering
+<br/><b>Nanyang Technological University, Singapore</b></td><td>('2352391', 'Gang Yu', 'gang yu')<br/>('34316743', 'Junsong Yuan', 'junsong yuan')</td><td>iskicy@gmail.com, jsyuan@ntu.edu.sg
+</td></tr><tr><td>228558a2a38a6937e3c7b1775144fea290d65d6c</td><td>Nonparametric Context Modeling of Local Appearance
+<br/>for Pose- and Expression-Robust Facial Landmark Localization
+<br/><b>University of Wisconsin Madison</b><br/>Zhe Lin2
+<br/>2Adobe Research
+<br/>http://www.cs.wisc.edu/~lizhang/projects/face-landmark-localization/
+</td><td>('1893050', 'Brandon M. Smith', 'brandon m. smith')<br/>('1721019', 'Jonathan Brandt', 'jonathan brandt')<br/>('40396555', 'Li Zhang', 'li zhang')</td><td></td></tr><tr><td>22fdd8d65463f520f054bf4f6d2d216b54fc5677</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 3, Issue 8, August 2013)
+<br/>Efficient Small and Capital Handwritten Character
+<br/>Recognition with Noise Reduction
+<br/><b>IES College of Technology, Bhopal</b></td><td>('1926347', 'Shailendra Tiwari', 'shailendra tiwari')<br/>('2152231', 'Sandeep Kumar', 'sandeep kumar')</td><td></td></tr><tr><td>2251a88fbccb0228d6d846b60ac3eeabe468e0f1</td><td>Matrix-Based Kernel Subspace Methods
+<br/>Integrated Data Systems Department
+<br/>Siemens Corporate Research
+<br/><b>College Road East, Princeton, NJ</b></td><td>('1682187', 'S. Kevin Zhou', 's. kevin zhou')</td><td>Email: {kzhou}@scr.siemens.com
+</td></tr><tr><td>22e678d3e915218a7c09af0d1602e73080658bb7</td><td>Adventures in Archiving and Using Three Years of Webcam Images
+<br/>Department of Computer Science and Engineering
+<br/><b>Washington University, St. Louis, MO, USA</b></td><td>('1990750', 'Nathan Jacobs', 'nathan jacobs')<br/>('39795519', 'Walker Burgin', 'walker burgin')<br/>('1761429', 'Robert Pless', 'robert pless')</td><td>{jacobsn,wsb1,rzs1,dyr1,pless}@cse.wustl.edu
+</td></tr><tr><td>2201f187a7483982c2e8e2585ad9907c5e66671d</td><td>Joint Face Alignment and 3D Face Reconstruction
+<br/><b>College of Computer Science, Sichuan University, Chengdu, China</b><br/>2 Department of Computer Science and Engineering
+<br/><b>Michigan State University, East Lansing, MI, U.S.A</b></td><td>('50207647', 'Feng Liu', 'feng liu')<br/>('39422721', 'Dan Zeng', 'dan zeng')<br/>('7345195', 'Qijun Zhao', 'qijun zhao')<br/>('1759169', 'Xiaoming Liu', 'xiaoming liu')</td><td></td></tr><tr><td>227b18fab568472bf14f9665cedfb95ed33e5fce</td><td>Compositional Dictionaries for Domain Adaptive
+<br/>Face Recognition
+</td><td>('2077648', 'Qiang Qiu', 'qiang qiu')<br/>('9215658', 'Rama Chellappa', 'rama chellappa')</td><td></td></tr><tr><td>227b1a09b942eaf130d1d84cdcabf98921780a22</td><td>Yang et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:51
+<br/>https://doi.org/10.1186/s13634-018-0572-6
+<br/>EURASIP Journal on Advances
+<br/>in Signal Processing
+<br/>R ES EAR CH
+<br/>Multi-feature shape regression for face
+<br/>alignment
+<br/>Open Access
+</td><td>('3413708', 'Wei-jong Yang', 'wei-jong yang')<br/>('49070426', 'Yi-Chen Chen', 'yi-chen chen')<br/>('1789917', 'Pau-Choo Chung', 'pau-choo chung')<br/>('1749263', 'Jar-Ferr Yang', 'jar-ferr yang')</td><td></td></tr><tr><td>2241eda10b76efd84f3c05bdd836619b4a3df97e</td><td>One-to-many face recognition with bilinear CNNs
+<br/>Aruni RoyChowdhury
+<br/><b>University of Massachusetts, Amherst</b><br/>Erik Learned-Miller
+</td><td>('2144284', 'Tsung-Yu Lin', 'tsung-yu lin')<br/>('35208858', 'Subhransu Maji', 'subhransu maji')</td><td>{arunirc,tsungyulin,smaji,elm}@cs.umass.edu
+</td></tr><tr><td>22646cf884cc7093b0db2c1731bd52f43682eaa8</td><td>Human Action Adverb Recognition: ADHA Dataset and A Three-Stream
+<br/>Hybrid Model
+<br/><b>Shanghai Jiao Tong University, China</b></td><td>('1717692', 'Bo Pang', 'bo pang')<br/>('15376265', 'Kaiwen Zha', 'kaiwen zha')<br/>('1830034', 'Cewu Lu', 'cewu lu')</td><td>pangbo@sjtu.edu.cn,Kevin zha@sjtu.edu.cn,lucewu@cs.sjtu.edu.cn
+</td></tr><tr><td>22f94c43dd8b203f073f782d91e701108909690b</td><td>MovieScope: Movie trailer classification using Deep Neural Networks
+<br/>Dept of Computer Science
+<br/><b>University of Virginia</b></td><td></td><td>{ks6cq, gs9ed}@virginia.edu
+</td></tr><tr><td>22dabd4f092e7f3bdaf352edd925ecc59821e168</td><td> Deakin Research Online
+<br/>This is the published version:
+<br/>An, Senjian, Liu, Wanquan and Venkatesh, Svetha 2008, Exploiting side information in
+<br/>locality preserving projection, in CVPR 2008 : Proceedings of the 26th IEEE Conference on
+<br/>Computer Vision and Pattern Recognition, IEEE, Washington, D. C., pp. 1-8.
+<br/>Available from Deakin Research Online:
+<br/>http://hdl.handle.net/10536/DRO/DU:30044576
+<br/>
+<br/>Reproduced with the kind permissions of the copyright owner.
+<br/>Personal use of this material is permitted. However, permission to reprint/republish this
+<br/>material for advertising or promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+<br/>in other works must be obtained from the IEEE.
+<br/>Copyright : 2008, IEEE
+</td><td></td><td></td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td></td><td></td><td></td></tr><tr><td>22143664860c6356d3de3556ddebe3652f9c912a</td><td>Facial Expression Recognition for Human-robot
+<br/>Interaction – A Prototype
+<br/>1 Department of Informatics, Technische Universitat M¨unchen, Germany
+<br/><b>Electrical and Computer Engineering, University of Auckland, New Zealand</b></td><td>('32131501', 'Matthias Wimmer', 'matthias wimmer')<br/>('1761487', 'Bruce A. MacDonald', 'bruce a. macdonald')<br/>('3235721', 'Dinuka Jayamuni', 'dinuka jayamuni')<br/>('2607879', 'Arpit Yadav', 'arpit yadav')</td><td></td></tr><tr><td>2271d554787fdad561fafc6e9f742eea94d35518</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Multimodale Mensch-Roboter-Interaktion
+<br/>f¨ur Ambient Assisted Living
+<br/>Tobias F. Rehrl
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzende:
+<br/>Pr¨ufer der Dissertation: 1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. Horst-Michael Groß
+<br/>Univ.-Prof. Dr.-Ing. Sandra Hirche
+<br/>(Technische Universit¨at Ilmenau)
+<br/>Die Dissertation wurde am 17. April 2013 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am
+<br/>8. Oktober 2013 angenommen.
+</td><td></td><td></td></tr><tr><td>22ec256400e53cee35f999244fb9ba6ba11c1d06</td><td></td><td></td><td></td></tr><tr><td>22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7</td><td></td><td></td><td></td></tr><tr><td>22e189a813529a8f43ad76b318207d9a4b6de71a</td><td>What will Happen Next?
+<br/>Forecasting Player Moves in Sports Videos
+<br/>UC Berkeley, STATS
+<br/>UC Berkeley
+<br/>UC Berkeley
+</td><td>('2986395', 'Panna Felsen', 'panna felsen')<br/>('33932184', 'Pulkit Agrawal', 'pulkit agrawal')<br/>('1689212', 'Jitendra Malik', 'jitendra malik')</td><td>panna@berkeley.edu
+<br/>pulkitag@berkeley.edu
+<br/>malik@berkeley.edu
+</td></tr><tr><td>25ff865460c2b5481fa4161749d5da8501010aa0</td><td>Seeing What Is Not There:
+<br/>Learning Context to Determine Where Objects Are Missing
+<br/>Department of Computer Science
+<br/><b>University of Maryland</b><br/>Figure 1: When curb ramps (green rectangle) are missing from a segment of sidewalks in an intersection (orange rectangle),
+<br/>people with mobility impairments are unable to cross the street. We propose an approach to determine where objects are
+<br/>missing by learning a context model so that it can be combined with object detection results.
+</td><td>('39516880', 'Jin Sun', 'jin sun')<br/>('34734622', 'David W. Jacobs', 'david w. jacobs')</td><td>{jinsun,djacobs}@cs.umd.edu
+</td></tr><tr><td>25d514d26ecbc147becf4117512523412e1f060b</td><td>Annotated Crowd Video Face Database
+<br/>IIIT-Delhi, India
+</td><td>('2952437', 'Tejas I. Dhamecha', 'tejas i. dhamecha')<br/>('2578160', 'Priyanka Verma', 'priyanka verma')<br/>('3239512', 'Mahek Shah', 'mahek shah')<br/>('39129417', 'Richa Singh', 'richa singh')<br/>('2338122', 'Mayank Vatsa', 'mayank vatsa')</td><td>{tejasd,priyanka13100,mahek13106,rsingh,mayank}@iiitd.ac.in
+</td></tr><tr><td>25c19d8c85462b3b0926820ee5a92fc55b81c35a</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Pose-Invariant Facial Expression Recognition
+<br/>Using Variable-Intensity Templates
+<br/>Received: date / Accepted: date
+</td><td>('3325574', 'Shiro Kumano', 'shiro kumano')<br/>('38178548', 'Eisaku Maeda', 'eisaku maeda')</td><td></td></tr><tr><td>258a8c6710a9b0c2dc3818333ec035730062b1a5</td><td>Benelearn 2005
+<br/>Annual Machine Learning Conference of
+<br/>Belgium and the Netherlands
+<br/>CTIT PROCEEDINGS OF THE FOURTEENTH
+<br/>ANNUAL MACHINE LEARNING CONFERENCE
+<br/>OF BELGIUM AND THE NETHERLANDS
+</td><td>('2541098', 'Martijn van Otterlo', 'martijn van otterlo')<br/>('1688157', 'Mannes Poel', 'mannes poel')<br/>('1745198', 'Anton Nijholt', 'anton nijholt')</td><td></td></tr><tr><td>25695abfe51209798f3b68fb42cfad7a96356f1f</td><td>AN INVESTIGATION INTO COMBINING
+<br/>BOTH FACIAL DETECTION AND
+<br/>LANDMARK LOCALISATION INTO A
+<br/>UNIFIED PROCEDURE USING GPU
+<br/>COMPUTING
+<br/> MSc by Research
+<br/>2016
+</td><td>('32464788', 'J M McDonagh', 'j m mcdonagh')</td><td></td></tr><tr><td>250ebcd1a8da31f0071d07954eea4426bb80644c</td><td>DenseBox: Unifying Landmark Localization with
+<br/>End to End Object Detection
+<br/><b>Institute of Deep Learning</b><br/>Baidu Research
+</td><td>('3168646', 'Lichao Huang', 'lichao huang')<br/>('1698559', 'Yi Yang', 'yi yang')<br/>('1987538', 'Yafeng Deng', 'yafeng deng')<br/>('2278628', 'Yinan Yu', 'yinan yu')</td><td>2{huanglichao01,yangyi05,dengyafeng}@baidu.com
+<br/>1alanhuang1990@gmail.com
+<br/>3bebekifis@gmail.com
+</td></tr><tr><td>25337690fed69033ef1ce6944e5b78c4f06ffb81</td><td>STRATEGIC ENGAGEMENT REGULATION:
+<br/>AN INTEGRATION OF SELF-ENHANCEMENT AND ENGAGEMENT
+<br/>by
+<br/><b>A dissertation submitted to the Faculty of the University of Delaware in partial</b><br/>fulfillment of the requirements for the degree of Doctor of Philosophy in Psychology
+<br/>Spring 2014
+<br/>All Rights Reserved
+</td><td>('2800616', 'Jordan B. Leitner', 'jordan b. leitner')<br/>('2800616', 'Jordan B. Leitner', 'jordan b. leitner')</td><td></td></tr><tr><td>25c3cdbde7054fbc647d8be0d746373e7b64d150</td><td>ForgetMeNot: Memory-Aware Forensic Facial Sketch Matching
+<br/><b>Beijing University of Posts and Telecommunications</b><br/><b>Queen Mary University of London, UK</b></td><td>('2961830', 'Shuxin Ouyang', 'shuxin ouyang')<br/>('1697755', 'Timothy M. Hospedales', 'timothy m. hospedales')<br/>('1705408', 'Yi-Zhe Song', 'yi-zhe song')<br/>('7823169', 'Xueming Li', 'xueming li')</td><td>{s.ouyang, t.hospedales, yizhe.song}@qmul.ac.uk
+<br/>lixm@bupt.edu.cn
+</td></tr><tr><td>25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b</td><td>Neural Networks with Smooth Adaptive Activation Functions
+<br/>for Regression
+<br/><b>Stony Brook University, NY, USA</b><br/><b>Stony Brook University, NY, USA</b><br/>3Oak Ridge National Laboratory, USA
+<br/>4Department of Applied Mathematics and Statistics, NY, USA
+<br/>5Department of Pathology, Stony Brook Hospital, NY, USA
+<br/>6Cancer Center, Stony Brook Hospital, NY, USA
+<br/>August 24, 2016
+</td><td>('2321406', 'Le Hou', 'le hou')<br/>('1686020', 'Dimitris Samaras', 'dimitris samaras')<br/>('1755448', 'Yi Gao', 'yi gao')<br/>('1735710', 'Joel H. Saltz', 'joel h. saltz')</td><td>{lehhou,samaras}@cs.stonybrook.edu
+<br/>{tahsin.kurc,joel.saltz}@stonybrook.edu
+<br/>yi.gao@stonybrookmedicine.edu
+</td></tr><tr><td>25d3e122fec578a14226dc7c007fb1f05ddf97f7</td><td>The First Facial Expression Recognition and Analysis Challenge
+</td><td>('1795528', 'Michel F. Valstar', 'michel f. valstar')<br/>('39532631', 'Bihan Jiang', 'bihan jiang')<br/>('1875347', 'Marc Mehu', 'marc mehu')<br/>('1694605', 'Maja Pantic', 'maja pantic')</td><td></td></tr><tr><td>2597b0dccdf3d89eaffd32e202570b1fbbedd1d6</td><td>Towards predicting the likeability of fashion images
+</td><td>('2569065', 'Jinghua Wang', 'jinghua wang')<br/>('2613790', 'Abrar Abdul Nabi', 'abrar abdul nabi')<br/>('22804340', 'Gang Wang', 'gang wang')<br/>('2737180', 'Chengde Wan', 'chengde wan')<br/>('2475944', 'Tian-Tsong Ng', 'tian-tsong ng')</td><td></td></tr><tr><td>2588acc7a730d864f84d4e1a050070ff873b03d5</td><td>Article
+<br/>Action Recognition by an Attention-Aware Temporal
+<br/>Weighted Convolutional Neural Network
+<br/><b>Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an 710049, China</b><br/>Received: 27 April 2018; Accepted: 19 June 2018; Published: 21 June 2018
+</td><td>('40367806', 'Le Wang', 'le wang')<br/>('14800230', 'Jinliang Zang', 'jinliang zang')<br/>('46324995', 'Qilin Zhang', 'qilin zhang')<br/>('1786361', 'Zhenxing Niu', 'zhenxing niu')<br/>('1745420', 'Gang Hua', 'gang hua')<br/>('1715389', 'Nanning Zheng', 'nanning zheng')</td><td>zjl19920904@stu.xjtu.edu.cn (J.Z.); nnzheng@xjtu.edu.cn (N.Z.)
+<br/>2 HERE Technologies, Chicago, IL 60606, USA; qilin.zhang@here.com
+<br/>3 Alibaba Group, Hangzhou 311121, China; zhenxing.nzx@alibaba-inc.com
+<br/>4 Microsoft Research, Redmond, WA 98052, USA; ganghua@microsoft.com
+<br/>* Correspondence: lewang@xjtu.edu.cn; Tel.: +86-29-8266-8672
+</td></tr><tr><td>25982e2bef817ebde7be5bb80b22a9864b979fb0</td><td></td><td></td><td></td></tr><tr><td>25c108a56e4cb757b62911639a40e9caf07f1b4f</td><td>Recurrent Scale Approximation for Object Detection in CNN
+<br/><b>Multimedia Laboratory at The Chinese University of Hong Kong</b><br/>1SenseTime Group Limited
+</td><td>('1715752', 'Yu Liu', 'yu liu')<br/>('1929886', 'Hongyang Li', 'hongyang li')<br/>('1721677', 'Junjie Yan', 'junjie yan')<br/>('22181490', 'Fangyin Wei', 'fangyin wei')<br/>('31843833', 'Xiaogang Wang', 'xiaogang wang')<br/>('1741901', 'Xiaoou Tang', 'xiaoou tang')</td><td>liuyuisanai@gmail.com,{yangli,xgwang}@ee.cuhk.edu.hk,
+<br/>{yanjunjie,weifangyin}@sensetime.com, xtang@ie.cuhk.edu.hk
+</td></tr><tr><td>2594a77a3f0dd5073f79ba620e2f287804cec630</td><td>TRANSFERRING FACE VERIFICATION NETS TO PAIN AND EXPRESSION REGRESSION
+<br/>Dept. of {Computer Science1, Electrical & Computer Engineering2, Radiation Oncology3, Cognitive Science4}
+<br/><b>Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA</b><br/>5Dept. of EE, UESTC, 2006 Xiyuan Ave, Chengdu, Sichuan 611731, China
+<br/><b>Tsinghua University, Beijing 100084, China</b></td><td>('1713335', 'Feng Wang', 'feng wang')<br/>('40031188', 'Xiang Xiang', 'xiang xiang')<br/>('1692867', 'Chang Liu', 'chang liu')<br/>('1709073', 'Trac D. Tran', 'trac d. tran')<br/>('3207112', 'Austin Reiter', 'austin reiter')<br/>('1678633', 'Gregory D. Hager', 'gregory d. hager')<br/>('2095823', 'Harry Quon', 'harry quon')<br/>('1709439', 'Jian Cheng', 'jian cheng')<br/>('1746141', 'Alan L. Yuille', 'alan l. yuille')</td><td></td></tr><tr><td>25e2d3122d4926edaab56a576925ae7a88d68a77</td><td>ORIGINAL RESEARCH
+<br/>published: 23 February 2016
+<br/>doi: 10.3389/fpsyg.2016.00166
+<br/>Communicative-Pragmatic
+<br/>Treatment in Schizophrenia: A Pilot
+<br/>Study
+<br/><b>Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin</b><br/><b>Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu</b><br/>Finland, 4 AslTo2 Department of Mental Health, Turin, Italy, 5 Brain Imaging Group, Turin, Italy
+<br/>This paper aims to verify the efficacy of Cognitive Pragmatic Treatment (CPT), a new
+<br/>remediation training for the improvement of the communicative-pragmatic abilities, in
+<br/>patients with schizophrenia. The CPT program is made up of 20 group sessions,
+<br/>focused on a number of communication modalities, i.e., linguistic, extralinguistic and
+<br/>paralinguistic, theory of mind (ToM) and other cognitive functions able to play a role
+<br/>on the communicative performance, such as awareness and planning. A group of 17
+<br/>patients with schizophrenia took part in the training program. They were evaluated
+<br/>before and after training, through the equivalent forms of the Assessment Battery for
+<br/>Communication (ABaCo), a tool for testing, both in comprehension and in production,
+<br/>a wide range of pragmatic phenomena such as direct and indirect speech acts,
+<br/>irony and deceit, and a series of neuropsychological and ToM tests. The results
+<br/>showed a significant improvement in patients’ performance on both production and
+<br/>comprehension tasks following the program, and in all the communication modalities
+<br/>evaluated through the ABaCo, i.e., linguistic, extralinguistic, paralinguistic, and social
+<br/>appropriateness. This improvement persisted after 3 months from the end of the training
+<br/>program, as shown by the follow-up tests. These preliminary findings provide evidence
+<br/>of the efficacy of the CPT program in improving communicative-pragmatic abilities in
+<br/>schizophrenic individuals.
+<br/>Keywords: rehabilitation, schizophrenia, pragmatic, communication, training
+<br/>INTRODUCTION
+<br/>People with schizophrenia experience symptoms such as delusions, hallucinations, disorganized
+<br/>speech and behavior, that cause difficulty in social relationships (DSM 5; American Psychiatric
+<br/>Association [APA], 2013). In the clinical pragmatic domain (Cummings, 2014), the area of study
+<br/>of pragmatic impairment in patients with communicative disorders, several studies have reported
+<br/>that communicative ability is impaired in patients with schizophrenia (Langdon et al., 2002; Bazin
+<br/>et al., 2005; Linscott, 2005; Marini et al., 2008; Colle et al., 2013). For example, Bazin et al. (2005),
+<br/>created a structured interview, the Schizophrenia Communication Disorder Scale, which they
+<br/>administered to patients with schizophrenia. The authors observed that these patients performed
+<br/>less well than those affected by mania or depression in managing a conversation on everyday
+<br/>topics, such as family, job, hobbies, and so on. Likewise, non-compliance with conversational
+<br/>rules, such as consistency with the agreed purpose of the interaction, giving the partner too little
+<br/>Edited by:
+<br/>Sayyed Mohsen Fatemi,
+<br/><b>Harvard University, USA</b><br/>Reviewed by:
+<br/>Silvia Serino,
+<br/>IRCCS Istituto Auxologico Italiano,
+<br/>Italy
+<br/>Michelle Dow Keawphalouk,
+<br/><b>Harvard and Massachusetts Institute</b><br/>of Technology, USA
+<br/>*Correspondence:
+<br/>Specialty section:
+<br/>This article was submitted to
+<br/>Psychology for Clinical Settings,
+<br/>a section of the journal
+<br/>Frontiers in Psychology
+<br/>Received: 07 October 2015
+<br/>Accepted: 28 January 2016
+<br/>Published: 23 February 2016
+<br/>Citation:
+<br/>Bosco FM, Gabbatore I, Gastaldo L
+<br/>and Sacco K (2016)
+<br/>Communicative-Pragmatic Treatment
+<br/>in Schizophrenia: A Pilot Study.
+<br/>Front. Psychol. 7:166.
+<br/>doi: 10.3389/fpsyg.2016.00166
+<br/>Frontiers in Psychology | www.frontiersin.org
+<br/>February 2016 | Volume 7 | Article 166
+</td><td>('2261858', 'Francesca M. Bosco', 'francesca m. bosco')<br/>('3175646', 'Ilaria Gabbatore', 'ilaria gabbatore')<br/>('39551201', 'Luigi Gastaldo', 'luigi gastaldo')<br/>('2159033', 'Katiuscia Sacco', 'katiuscia sacco')<br/>('3175646', 'Ilaria Gabbatore', 'ilaria gabbatore')</td><td>ilaria.gabbatore@oulu.fi;
+<br/>ilariagabbatore@gmail.com
+</td></tr><tr><td>25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8</td><td>Label Distribution Learning
+</td><td>('1735299', 'Xin Geng', 'xin geng')</td><td></td></tr><tr><td>2559b15f8d4a57694a0a33bdc4ac95c479a3c79a</td><td>570
+<br/>Contextual Object Localization With Multiple
+<br/>Kernel Nearest Neighbor
+<br/>Gert Lanckriet, Member, IEEE
+</td><td>('3215419', 'Brian McFee', 'brian mcfee')<br/>('1954793', 'Carolina Galleguillos', 'carolina galleguillos')</td><td></td></tr><tr><td>2574860616d7ffa653eb002bbaca53686bc71cdd</td><td></td><td></td><td></td></tr><tr><td>25f1f195c0efd84c221b62d1256a8625cb4b450c</td><td>1-4244-1017-7/07/$25.00 ©2007 IEEE
+<br/>1091
+<br/>ICME 2007
+</td><td></td><td></td></tr><tr><td>25885e9292957feb89dcb4a30e77218ffe7b9868</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2016
+<br/>Analyzing the Affect of a Group of People Using
+<br/>Multi-modal Framework
+</td><td>('18780812', 'Xiaohua Huang', 'xiaohua huang')<br/>('1735697', 'Abhinav Dhall', 'abhinav dhall')<br/>('40357816', 'Xin Liu', 'xin liu')<br/>('1757287', 'Guoying Zhao', 'guoying zhao')<br/>('2473859', 'Jingang Shi', 'jingang shi')</td><td></td></tr><tr><td>259706f1fd85e2e900e757d2656ca289363e74aa</td><td>Improving People Search Using Query Expansions
+<br/>How Friends Help To Find People
+<br/>LEAR - INRIA Rhˆone Alpes - Grenoble, France
+</td><td>('1722052', 'Thomas Mensink', 'thomas mensink')<br/>('34602236', 'Jakob Verbeek', 'jakob verbeek')</td><td>{thomas.mensink,jakob.verbeek}@inria.fr
+</td></tr><tr><td>25728e08b0ee482ee6ced79c74d4735bb5478e29</td><td></td><td></td><td></td></tr><tr><td>258a2dad71cb47c71f408fa0611a4864532f5eba</td><td>Discriminative Optimization
+<br/>of Local Features for Face Recognition
+<br/>
+<br/>H O S S E I N A Z I Z P O U R
+<br/>
+<br/>Master of Science Thesis
+<br/>Stockholm, Sweden 2011
+<br/>
+</td><td></td><td></td></tr><tr><td>25127c2d9f14d36f03d200a65de8446f6a0e3bd6</td><td>Journal of Theoretical and Applied Information Technology
+<br/> 20th May 2016. Vol.87. No.2
+<br/>© 2005 - 2016 JATIT & LLS. All rights reserved.
+<br/>ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+<br/>EVALUATING THE PERFORMANCE OF DEEP SUPERVISED
+<br/>AUTO ENCODER IN SINGLE SAMPLE FACE RECOGNITION
+<br/>PROBLEM USING KULLBACK-LEIBLER DIVERGENCE
+<br/>SPARSITY REGULARIZER
+<br/> Faculty of Computer of Computer Science, Universitas Indonesia, Kampus UI Depok, Indonesia
+</td><td>('9324684', 'ARIDA F. SYAFIANDINI', 'arida f. syafiandini')</td><td>E-mail: 1otniel.yosi@ui.ac.id , 2ito.wasito@cs.ui.ac.id, 2arida.ferti@ui.ac.id
+</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/institution_names-1.csv b/scraper/reports/institution_names-1.csv
new file mode 100644
index 00000000..d667654f
--- /dev/null
+++ b/scraper/reports/institution_names-1.csv
@@ -0,0 +1,714 @@
+"Johns Hopkins University, Center for Speech and Language Processing"
+"Gri th University, QLD-4111, Brisbane, Australia"
+"Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions"
+Alan W Black (Carnegie Mellon University
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of"
+"Computer Science Division, The Open University of Israel"
+"The authors are with the Delft University of Technology, Data and Knowl"
+"St.Joseph s College of Engineering, Old Mamallapuram Road, Kamaraj Nagar, Semmencherry, Chennai"
+University of Colorado Colorado Springs
+"College of Engineering Pune, India"
+"University of Maryland, CFAR"
+"University of Tampere, Kanslerinnrinne 1, 33014, Tampere, Finland"
+"St. Xavier s Catholic College of Engineering, India"
+"University of Surrey, United Kingdom"
+"Toyota Research Institute, Cambridge, MA 2 University of Michigan, Ann Arbor, MI"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD"
+M.S. University of Central Florida
+Computer Vision and Robotics Research Laboratory
+Kyung Hee University
+"School of Games, Hongik University, Seoul, Korea"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"National Chiao Tung University, Taiwan"
+"Shenzhen University, Shenzhen, China"
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+THE UNIVERSITY OF CHICAGO
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"University of Houston, Houston, TX 77204, USA"
+"Aalborg University, Denmark"
+"Wenzhou University, China"
+"University Street, Montral, QC H3A 0E9, Canada"
+"School of Software, Sun Yat-sen University, China"
+"Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network"
+"Beckman Institute, University of Illinois at Urbana-Champaign, USA"
+Sun Yat-Sen (Zhongshan) University
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC"
+RWTH Aachen University
+University Drive
+"Smart Network System Institute, Institute for Information Industry"
+"Goldsmiths, University of London, London, UK"
+"Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay"
+"of Maryland, College Park, MD 20742, USA"
+"Harvard University, Cambridge, MA, USA"
+College of Engineering and Mineral Resources
+"School of EEE, Nanyang Technological University, Singapore"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"School of Mathematical Sciences, Monash University, VIC 3800, Australia"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"School of Computer Science and Technology, Tianjin University"
+"UG student, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"Computer and Systems Engineering, Rensselaer Polytechnic Institute"
+University of Caen
+"University of Groningen, Netherlands"
+"Carnegie Mellon University, Pittsburgh, PA"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+Michigan State University
+"University of Catania, Italy"
+"University Health Board, Swansea, United Kingdom"
+Princeton University
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan"
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"subsection a table summarizing the key features of the database is provided, including (where available) the number of"
+"University of Tokyo, Japan"
+"our analysis to stereotypes beyond gender, including those"
+"School of Computer Science, Fudan University, Shanghai, 200433, China"
+Cornell University 2 Cornell Tech
+"Stanford University, USA"
+Massachusetts Institute of Technology 2014. All rights reserved
+ment. Oxford University Press Series in Affective Science. New York: Oxford
+College of Computer and Information Sciences
+"College of Computing, Georgia Institute of Technology, Atlanta, GA, USA"
+"gies (Bughin et al. 2017). A range of other sectors, includ"
+"he University of Hong Kong, Pokfulam"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+"University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada"
+"Salgado de Oliveira University, Brazil"
+"Helsinki Institute for Information Technology, Aalto University, Finland"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan"
+"Graduate Institute of Networking and Multimedia, National Taiwan University"
+"2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada"
+"the face, including negative affect and distress, dates"
+"Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia"
+Bilkent University
+"Hong Kong Polytechnic University, Hong Kong, China"
+Yaroslavl State University
+in the College of Engineering and Computer Science
+"University of California, Santa Barbara"
+"Uber Advanced Technologies Group, 5Vector Institute"
+"College of Information Science and Engineering, Ocean University of China, Qingdao, China"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+IBM T.J. Watson Research Center
+Central Washington University
+"University of California, San Diego"
+University of Tsukuba
+The University of the Humanities
+DISI - University of Trento
+Savitribai Phule Pune University
+"College of Software Engineering, Southeast University, Nanjing 210096, China"
+"Key Lab of Intelligent Information Processing, Institute of Computing Technology"
+in the Graduate School of Duke University
+"B.S., Computer Engineering, Bo gazi ci University"
+"Kodak Research Laboratories, Rochester, NY"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"Hindusthan College of Engineering and Technology, Coimbatore, India"
+Zhejiang University of Technology
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+ShahidBeheshti University
+cThe Open University
+College of Information and Control Engineering in China University of Petroleum
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"aTurgut Ozal University, Ankara Turkey"
+"School of Medicine, Shenzhen University, Shenzhen 518060, China"
+"DISI, University of Trento, Italy"
+"University of Texas at Arlington, TX, USA"
+"Computer Science and Engineering, Michigan State University, East Lansing, USA"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China"
+"Islamic Azad University, Shahrood, Iran"
+"Faculty of Electrical Engineering, University of Ljubljana"
+"Multimedia University, Cyberjaya, Malaysia"
+"and quantify distinct social behaviors, including those involving"
+"College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China"
+The University of Nottingham
+Dr. Babasaheb Ambedkar Marathwada University
+"The University of York, United Kingdom"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"DPDCE, University IUAV"
+"University of Vigo, Spain"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"School of Computer Science and Software Engineering, Shenzhen University, Nanhai Ave 3688, Shenzhen"
+"George Mason University, Fairfax, VA, USA"
+"Institute of Neural Information Processing, Ulm University, Ulm, Germany"
+William Marsh Rice University
+"Center for Research in Computer Vision, University of Central Florida, Orlando, FL"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+India
+Institute of Systems and Robotics
+"DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing"
+"Bioinformatics Institute, A*STAR, Singapore"
+"National University of Ireland Maynooth, Co. Kildare, Ireland"
+The Allen Institute for AI
+"Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School"
+The open University of Israel. 2Adience
+"Baidu Research, USA 3John Hopkins University"
+"Other uses, including reproduction and distribution, or selling or"
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran"
+"University of Miami, Coral Gables, FL"
+Queen s University Belfast
+Portland State University
+"Cornell University, Ithaca, NY, U.S.A"
+"University of Colorado, Colorado Springs"
+of Cornell University
+University of Texas at San Antonio
+"University of North Texas, Denton, Texas, USA"
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"School of Engineering, University of Guelph"
+Tokyo Denki University
+"KTH Royal Institute of Technology, CVAP Lab, Stockholm, Sweden"
+Villanova University
+"University of North Carolina at Chapel Hill, USA"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University"
+"The City College of New York, New York, NY 10031, USA"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"Arts, Science and Commerce College, Chopda"
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University"
+"Sri Manakula Vinayagar Engineering College, Pondicherry"
+Institute of Information Science
+"Michigan State University, E. Lansing, MI 48823, USA"
+"School of Computer Science and Technology, University of Science and Technology of China"
+"CISE, University of Florida, Gainesville, FL"
+"aCollege of Computer Science at Chongqing University, 400044, Chongqing, P.R.C"
+University of Illinois at Urbana-Champaign 2Adobe Research
+"Gujarat Technological University, India"
+"Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de"
+"University of Dschang, Cameroon"
+Informatics and Telematics Institute
+University of West Bohemia
+"Graduate School of System Informatics, Kobe University"
+"Amity University, Lucknow, India"
+"Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT"
+"The Institute of Scienti c and Industrial Research, Osaka University"
+"The University of Nottingham, UK"
+Macau University of Science and Technology
+"Computer Science and Software Engineering, The University of Western Australia"
+Oxford University
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Arti cial Intelligence Institute, China"
+"University of California, Merced, USA"
+University of Chinese Academy of Sciences (UCAS
+"New Jersey Institute of Technology, USA"
+"Sharif University of Technology, Tehran. Iran"
+"The University of Queensland, School of ITEE"
+"Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden"
+"Kingston University London, University of Westminster London"
+Charles Sturt University
+The University of Manchester
+"Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United"
+"MICC, University of Florence"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China"
+Simon Fraser University
+University of Buffalo
+"Student, Amal Jyothi College of Engineering, Kanjirappally, India"
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+"Akita Prefectural University, Yurihonjo, Japan"
+"Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"School of Computer Science, South China Normal University, China"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The"
+"Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria"
+"Institute of Microelectronics, Tsinghua University, Beijing 100084, China"
+"bRobotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, U.S.A"
+"University of Pittsburgh, Pittsburgh PA"
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Augsburg University, Germany"
+"Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada"
+University of Brescia
+"University of Technology, Sydney"
+Seoul National University
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada; E-Mail"
+"Warsaw University of Technology, Poland"
+Federal University of Bahia (UFBA
+University of Beira Interior
+"University of Kentucky, USA"
+The Hong Kong Polytechnic University
+"Beijing Institute of Technology University, P. R. China"
+"Research School of Computer Science, The Australian National University, ACT 2601, Australia"
+ICSI / UC Berkeley 2Brigham Young University
+"IN3, Open University of"
+"School of Computer Science, University of Nottingham"
+"The University of North Carolina, Chapel Hill"
+"California Institute of Technology, Pasadena, CA, USA"
+Link to publication record in Queen's University Belfast Research Portal
+"Amirkabir University of Technology, University of Southern California"
+"School of Physics and Engineering, Sun Yat-Sen University, Guangzhou, China, 2 School of Information"
+"University of Rochester, Rochester, NY, USA"
+"Beijing Institute of Technology, Beijing 100081 CHINA"
+West Virginia University
+"Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan"
+"Polytechnic Institute of NYU, NY, USA"
+University of Washington 4The Allen Institute for AI
+"Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India"
+"Center for Arti cial Vision Research, Korea University"
+"The University of Newcastle, Callaghan 2308, Australia"
+Human Interaction Research Lab
+University of Rochester
+"Tohoku University, Sendai, Japan"
+"Grad. School of Information Science and Technology, The University of Tokyo, Japan"
+"Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences"
+"Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A"
+"Southern Illinois University, Carbondale, IL 62901 USA"
+Doctor of Philosophy of University College London
+"Technical University of Munich, Germany"
+"Computer Vision Lab, Delft University of Technology"
+"University of Missouri, Columbia, MO"
+"Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece"
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"University of Central Punjab, Pakistan"
+"Ryerson University, Canada"
+Eastern University
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Samsung Advanced Institute of Technology (SAIT), KAIST"
+Institute for Numerical Mathematics
+The University of Cambridge
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"North China Electric Power University, Baoding, China"
+"The Big Data Research Center, Henan University, Kaifeng 475001, China"
+Myongji University
+comparisons with 12 instance-based classi ers on 13 benchmark University of California Irvine
+Sun Yat-Sen University
+"Link oping University, Computer Vision Laboratory"
+"Institute of Mental Health, Peking University, P.R. China"
+Halmstad University
+"University of Ottawa, Ottawa, On, Canada"
+"School of Electronic and Information Engineering, South China University of Technology"
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA"
+"Chulalongkorn University, Bangkok"
+a The University of Nottingham Malaysia Campus
+COLUMBIA UNIVERSITY
+Hong Kong Applied Science and Technology Research Institute Company Limited
+"University of Pittsburgh, PA, USA"
+"Computer Science and Engineering, University of Washington"
+"University of Oulu, Finland"
+"University of Genoa, Italy"
+"VHNSN College, Virudhunagar, ANJA College"
+"Clemson University, Clemson, SC"
+"Stony Brook University, Stony Brook, NY 11794, USA"
+"methods, including graph matching, optical- ow-based"
+"Biometric Research Center, The Hong Kong Polytechnic University"
+"Ruhr-University Bochum, Germany"
+University of Abertay
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, P. R. China"
+"Queen Mary College, London"
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+UNIVERSITY IN PRAGUE
+"School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China"
+"Vision and Security Technology Lab, University of Colorado at Colorado Springs, Colorado"
+"The University of Adelaide, Australia"
+"CISUC, University of Coimbra"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+Harbin Institute of Technology
+"Microsystems Design Lab, The Pennsylvania State University"
+"a The Robotics Institute, Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+International Institute of Information Technology
+"LCSEE, West Virginia University"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"University Bourgogne Franche-Comt , France"
+"Eastern Mediterranean University, Gazima usa, Northern Cyprus"
+University of Massachusetts - Amherst
+"University of T ubingen, T ubingen, Germany"
+"gelmeyer et al., 1996); and, increasingly, its role in reactions to"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+"School of Computing, National University of Singapore, Singapore"
+"National University of Singapore, 2Shanghai Jiao Tong University"
+Computer Vision Laboratory. University of Nottingham
+"Electrical Engineering, University of"
+"Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA"
+Sabanc University
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+Institute of Media Innovation
+University of Houston
+"Alexandria University, Alexandria, Egypt"
+The University of North Carolina at Charlotte
+"Graduate School of Informatics, Kyoto University"
+"Annamacharya Institute of Technology and Sciences, Tirupati, India"
+SUS college of Engineering and Technology
+"Stony Brook University, NY, USA"
+National Taiwan University
+"Nottingham University Hospital, Nottingham, UK"
+"Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University, China"
+"P A College of Engineering, Nadupadavu"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA"
+"Idiap Research Institute, Martigny, Switzerland"
+"Sendai National College of Technology, Natori, Japan"
+"Visual Analysis of People Lab, Aalborg University, Denmark"
+"School of ICE, Beijing University of Posts and Telecommunications, Beijing, China"
+"University City Blvd., Charlotte, NC"
+"University of Texas at Arlington, Arlington, TX"
+University of Wollongong
+CVSSP University of Surrey
+"Hengyang Normal University, Hengyang, China"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran"
+"Michigan State University, East Lansing, MI, USA"
+"b The Interdisciplinary Center for Research on Emotions, University of"
+Tongji University
+"PSGR Krishnammal College for Women, Coimbatore"
+"F.Ferraro, University of Rochester"
+"Columbia University, New York, NY 10027, USA"
+"Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran"
+"Zhengzhou University, Zhengzhou, Henan 450052, China"
+Tripura University (A Central University
+"Medical School, University of Ioannina, Ioannina, Greece"
+"Helsinki Collegium for Advanced Studies, University of Helsinki, Finland"
+"UMIACS, University of Maryland, College Park, USA"
+Cornell University 2Eastman Kodak Company
+"University of S ao Paulo, S ao Paulo, Brazil"
+"University of Gujrat, Pakistan"
+"Friedrich Schiller University, D-07740 Jena"
+"Ph.D student Zaid Shhedi, Doctoral School of Automatic Control and Computers, University"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced"
+"Harbin Institute of Technology, School of Computer Science and Technology"
+"Benha University, Egypt"
+"Aristotle University of Thessaloniki, Greece"
+Raipur institute of technology
+"The Blavatnik School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel"
+University of Illinois at Urbana-Champaign
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK"
+SungKyunKwan University
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"Machine Vision Group, P.O. Box 4500, FI-90014, University of Oulu, Finland"
+"Research Scholar, PSGR Krishnammal College for Women, Coimbatore"
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+Institute of Psychology and Behavioral Sciences
+"USC IRIS Lab, University of Southern California"
+"Google, Inc"
+"b School of Applied Mathematics, Xidian University, Xi an, China"
+"Rutgers, The State University of New Jersey"
+at the Delft University of Technology
+"University of Illinois at Urbana-Champaign, Urbana, IL, USA"
+"obtained for all other uses, in any current or future media, including reprinting/republishing"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen"
+"B.Eng., Nankai University"
+Dhanalakshmi Srinivasan College of Engineering
+Shandong University of Science and Technology
+"Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA"
+CUNY City College
+"Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master"
+"bTsinghua University, Beijing, China"
+"West Virginia University, Morgantown, USA"
+"Information Sciences Institute, USC, CA, USA"
+"College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China"
+"School of Information Technology and Engineering, University of Ottawa, Ontario, Canada"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"University of Ottawa, Canada"
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"Vision and Security Technology Lab, University of Colorado Colorado Springs"
+Bahcesehir University
+"Computer Vision Laboratory, The University of Nottingham"
+"Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"Research Center for Information Technology Innovation, Academia Sinica"
+"Jiangnan University, Wuxi"
+"Shri Shivaji College, Parbhani, M.S, India"
+"College Road East, Princeton, NJ"
+University of California San Diego
+"China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology"
+University of Adelaide
+"c School of Computational Science, Florida State University, Tallahassee, FL 32306, USA"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+Swiss Federal Institute of Technology
+"University B.D.T.College of Engineering, Visvesvaraya"
+"University of Nevada, Reno, USA"
+"Machine Vision Lab, Faculty of Environment and Technology, University of the West of England"
+"University of California, Riverside, California 92521, USA"
+"College of Information, Yunnan Normal University, Kunming, China"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands"
+"School of Computer and Information Science, Chongqing Normal University 401331, China"
+USC Information Sciences Institute
+"University of Maryland, Center for Automation Research"
+"cFaculty of Electrical Engineering, Mathematics, and Computer Science, University of Twente, The Netherlands"
+"College of Computer Science, Chongqing University, Chongqing, 400030, China"
+"Manchester University, UK"
+"Allen Institute for Arti cial Intelligence (AI2), Seattle, WA"
+"Pattern Recognition and Bio-informatics Laboratory, Delft University of Technology, THE NETHERLANDS"
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"Tokyo Institute of Technology, Japan"
+Georgia Institute of Technology
+"University of Ioannina, Ioannina, Greece"
+"Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, NY 12180 USA"
+Iran
+"King Saud University, Riyadh 11543, Saudi Arabia"
+"School of IoT Engineering, Jiangnan University, Wuxi 214122, China"
+Sarhad University of Science and Information Technology
+"Islamic Azad University, Gonabad, Iran"
+"College of Engineering, Mathematics and Physical Sciences"
+"College of Computer and Information Science, Northeastern University, Boston, USA"
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"Monash University, Victoria, Australia"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"University of Northern British Columbia, Canada"
+"University of Nevada at Reno, USA"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"University of Pittsburgh, Pittsburgh, PA, USA"
+Queen Mary University
+Odaiyappa College of
+"University of Nottingham, Ningbo, China"
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"Xidian University, Xi an, China"
+University of Minnesota
+University of Nebraska - Lincoln
+"SSESA, Science College, Congress Nagar, Nagpur, (MS)-India"
+Institute of Information Technology
+"Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO"
+"School of Computer Science, University of Windsor, Windsor, ON, Canada N9B 3P"
+"Wenzhou University, Wenzhou, China"
+Vienna University of Technology
+"Imperial College London, United Kingdom"
+"Charotar University of Science and Technology, Changa, India"
+"School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"Visual Geometry Group, University of Oxford, Oxford UK"
+"Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China"
+"Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"University of Maryland-College Park, USA"
+Northumbria University
+Courant Institute and Google Research
+Harbin Institute of Technology;Shenzhen University
+"University of South Carolina, USA"
+Rowland Institute
+"Final Year, PanimalarInstitute of Technology"
+University of Arkansas at Little Rock
+"Human Development and Applied Psychology, University of Toronto, Ontario, Canada"
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan"
+"R.C.Patel Institute of Technology, Shirpur, Dist.Dhule.Maharashtra, India"
+"York University, Toronto"
+"Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+South China University of Technology
+"Beijing University of Technology, Beijing 100022, China"
+University of Arizona
+"Sri Sunflower College of Engineering and Technology, Lankapalli"
+Japan Advanced Institute of Science and Technology
+"University of York, York, United Kingdom"
+"Proto Labs, Inc"
+"Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany"
+The University of Sydney
+The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved
+The Florida State University
+"Engineering, National Formosa University"
+Amirkabir University of Technology
+"National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce"
+"Institute of Automation, Chinese Academy of Sciences"
+Institute of Digital Media
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Temple University, Philadelphia, USA"
+"University of Salzburg, Austria"
+"CMR Institute of Technology, Hyderabad, (India"
+"Carnegie Mellon University Pittsburgh, PA - 15213, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA"
+"The Robotics Inistitute, Carnegie Mellon University"
+"Center for Brain Science, Harvard University, Cambridge, MA, USA"
+Submitted to the Senate of the Hebrew University
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of"
+"Key Laboratory of Behavior Sciences, Institute of Psychology"
+Graz University of Technology
+"University of California, San Diego 2 Carnegie Mellon University"
+University of Sfax
+Bangladesh University of Engineering and Technology(BUET
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"Xiamen University, Xiamen, China"
+"CVIP Lab, University of Louisville, Louisville, KY 40292, USA"
+"Chandigarh University, Gharuan, Punjab, India"
+"Education, Yunnan Normal University, Kunming, China"
+"Vickram College of Engineering, Enathi, Tamil Nadu, India"
+National Technical University of Athens
+"University College London, UK"
+Muhlenberg College
+"School of Mathematical Sciences, Dalian University of Technology, Linggong Rd. 2, Dalian"
+"Informatization Office, National University of Defense Technology, Changsha 410073, China"
+Acharya Institute Of Technology
+"Rutgers University, Newark, NJ, USA"
+"and Southeast University, China"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"McGill University, Montreal, Canada"
+"State University of New York Polytechnic Institute, Utica, New York"
+"UNIVERSITY OF CALIFORNIA, SAN DIEGO"
+"VISLab, EBUII-216, University of California Riverside"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+Institute of Electrical and Electronics Engineers
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+University of Tampere
+Drexel University
+"LIACS Media Lab, Leiden University, The Netherlands"
+Indian Institute of Technology
+University Institute of Engineering and Technology
+"Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab"
+University of Notre Dame
+"University of Massachusetts, Amherst"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Moscow State University, dept. of Computational Mathematics and Cybernetics"
+"Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA"
+"College of Information Science and Engineering, Xinjiang University"
+"Korea University, Seoul 136-713, Korea"
+"R. Campellone, 3210 Tolman Hall, University of California, Berkeley"
+University of Florida
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu"
+"Shanghai Jiao Tong University, CloudWalk Technology"
+"School of Computer Science, Carnegie Mellon University, PA 15213, USA"
+Tokyo Polytechnic University
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+Hong Kong Baptist University
+"School of Computer and Information, Hefei University of Technology, China"
+Victoria University of Wellington
+The University of Shef eld
+University of Caen Basse-Normandie
+"Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India"
+"uses, in any current or future media, including"
+"Psychology and Psychiatry, University of Pittsburgh, USA"
+Institute of Graduate Studies and Research
+"University of Science, VNU-HCM, Viet Nam"
+"Carnegie Mellon University, USA"
+"Electrical Eng. Dep., Central Tehran Branch, Islamic Azad University, Tehran, Iran"
+"Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj"
+University of the Western Cape
+"University of California, Berkeley1 Adobe"
+"UMIACS, University of Maryland, College Park, MD"
+"e ects of di erence factors, including age group, age gap"
+"Governance, Keio University"
+National Chiao-Tung University
+"University, China"
+"Electrical and Computer Engineering, The University of Memphis"
+"University of Maryland Institute for Advanced Computer Studies, College Park, MD"
+"Research Center for Learning Science, Southeast University, China"
+"College Park, MD"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+Aristotle University of Thessaloniki GR
+"Faculty of ETI, Gdansk University of Technology, Gdansk, Poland"
+"University of Pittsburgh, Pittsburgh"
+"University of Oxford, United Kingdom"
+VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College
+ICMC University of S ao Paulo
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"University of North Carolina Wilmington, Wilmington, NC, USA"
+"School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran"
+"College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China"
+"Grif th University, QLD, Australia"
+"Anna University Chennai, India"
+George Mason University
+"Language Technologies Institute, School of Computer Science"
+Shanghai Jiao Tong University
+The Hong Kong University of Science and Technology
+"York University, Toronto, Canada"
+University of Iowa
+"B. Eng., Zhejiang University"
+"University of Aizu, Japan"
+"International Institute of Information Technology, Hyderabad, India"
+"IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands"
+c(cid:13) Carnegie Mellon University
+"University of Queensland, Australia"
+"North China University of Technology, Beijing 100144 CHINA"
+UniversityofMaryland
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"University of Adelaide, Australia"
+"Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+Hanyang University
+"Computer Vision Laboratory, Link oping University, Sweden"
+Boston University / **Rutgers University / ***Gallaudet University
+"Center for Automation Research, University of Maryland, College Park, MD"
+South College Road
+Korea Advanced institute of Science and Technology
+"National Taichung University of Science and Technology, Taichung, Taiwan, R.O.C"
+Courant Institute of Mathematical Sciences
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+ARISTOTLE UNIVERSITY OF THESSALONIKI
+"Asia University, Taichung, Taiwan"
+University of Siegen
+"Bharathidasan University, Trichy, India"
+"Gdansk University of Technology, Faculty of Electronics, Telecommunication"
+Institute for Information Systems Engineering
+"National Institute of Informatics, Japan"
+"Rutgers, The State University of New Jersey, 508 CoRE, 94 Brett Rd, Piscataway, NJ"
+Institute of Arti cial Intelligence and Cognitive Engineering
+"M.Tech Student, Mount Zion College of Engineering, Pathanamthitta, Kerala, India"
+"Schreiber Building, room 103, Tel Aviv University, P.O.B. 39040, Ramat Aviv, Tel Aviv"
+Stony Brook University
+Rowan University
+"University of Bari, Bari, Italy"
+"University of Amsterdam, University of Trento, Italy"
+Interactive and Digital Media Institute
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+Politehnica University of Timisoara
+"Southwest University, China"
+Mihaylo College of Business and Economics
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+"Lund University, Lund, Sweden"
+"Brown University, United States"
+"Bilkent University, 06800 Cankaya, Turkey"
+"Goldsmiths, University of London, UK"
+"University, USA"
+University of Central Florida
+"of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Rochester Human-Computer Interaction (ROC HCI), University of Rochester, NY"
+Institute for Robotics and Intelligent
+"Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh"
+"Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+College of Informatics
+UNIVERSITY OF TARTU
+"Rayalaseema University Kurnool, Andhra Pradesh"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+National Institute of Development Administration
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers"
+"CRCV, University of Central Florida"
+"RTM Nagpur University, Campus Nagpur, (MS)-India"
+"IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY"
+"Bharti Vidyapeeth Deemed University, Pune, India"
+"King Saud University, KSA"
+Reutlingen University
+"Rutgers, The State University of New Jersey, 723 CoRE, 94 Brett Rd, Piscataway, NJ"
+University of Milan
+"Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA"
+The University of Hong Kong
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"College of Computer Science and Technology, Zhejiang University, China"
+University of Frankfurt
+"DIEI, University of Perugia, Italy"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+Jadavpur University
+"Collage of Sciences, Baghdad University, Iraq"
+The City College and the Graduate Center
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"Institute for Infocomm Research, A*STAR"
+"University of Bonn, Roemerstrasse 164, 53117 Bonn, Germany"
+MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia"
+Azad University of Qazvin
+"Florida Institute Of Technology, Melbourne Fl"
+"Center for Research in Computer Vision, University of Central Florida"
+"Harvard University, USA"
+"and Engineering, Beihang University, Beijing, China"
+"National Institute of Technology, Toyota College, Japan"
+"Ritsumeikan, University"
+"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea"
+Beijing University of Posts and Telecommunications
+"Shenzhen Institutes of Advanced Technology, CAS, China"
+"University of York, York, UK"
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune"
+Honda Fundamental Research Labs
+"Computer Science and Engineering, University of Washington, Seattle, WA"
+"University of Nottingham, UK, School of Computer Science"
+University of Tokyo
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"University of Business Agriculture and Technology, Dhaka-1230, Bangladesh"
+"State University of New York at Binghamton, Binghamton, NY"
+"Indraprastha Institute of Information Technology, Delhi"
+UNIVERSITY OF WISCONSIN MADISON
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China"
+Huazhong Agricultural University
+"Computer Graphics Research Group, University of Freiburg, Freiburg, Germany"
+"Nanjing University, China"
+"University of Akron, Akron"
+"TNLIST, Tsinghua University, Beijing, 100084, China"
diff --git a/scraper/reports/institution_names-2.csv b/scraper/reports/institution_names-2.csv
new file mode 100644
index 00000000..eee63d57
--- /dev/null
+++ b/scraper/reports/institution_names-2.csv
@@ -0,0 +1,714 @@
+University of Wisconsin Madison
+"Eindhoven University of Technology, The Netherlands"
+"Graduate School of Science and Engineering, Saitama University"
+"Gallaudet University, Technology Access Program, 800 Florida Ave NE, Washington, DC"
+"University of Minnesota-Twin Cities, Minneapolis"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+University
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China"
+Departm nt of Information Engin ering Th Chines University of Hong Kong
+"Key Laboratory of Computer Network and Information Integration of Ministry of Education, Southeast University, Nanjing"
+"University Street, Montreal, QC H3A 0E9, Canada"
+Sinhgad College of
+"National Cheng Kung University, Tainan, Taiwan, R.O.C"
+"University of California, Santa Cruz"
+"USC Information Sciences Institute (ISI), Marina Del Rey, CA"
+"Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain"
+The Rockefeller University
+"Centre for Intelligent Machines, McGill University, Montreal, Canada"
+Compi`egne University of Technology
+"University at Buffalo, SUNY"
+"Institute for Human-Machine Communication, Technische Universit at M unchen"
+"Lund University, Cognimatics AB"
+National Institutes of Health
+Intelligence Computing Research Center
+"Rutgers University, Piscataway, NJ 08854, USA"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+"PES Institute of Technology, Bangalore, Karnataka, India"
+"University of Tennessee, Knoxville"
+"School of Computer Science, CECS, Australian National University, Australia"
+"Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India"
+"Tarbiat Modarres University, Tehran, Iran"
+Glyndwr University
+University of Technology Sydney
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"Cornell University, Ithaca, New York"
+"University of Southampton, United Kingdom"
+Akita University
+"Intelligent Recognition and Image Processing Lab, Beihang University, Beijing"
+"National University of Defense Technology, Changsha 410073, China"
+Nqtional Institute of Standards and Technology
+"Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA"
+University of Pennsylvania
+"cid:2)Imperial College London, U.K"
+"Delft University of Technology, The Netherlands"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France"
+"University of Illinois at Urbana-Champaign, Urbana, IL"
+"cid:63)Queen Mary University of London, Imperial College London"
+"Kitware, Inc"
+University of Sydney
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+McGill University
+"RCC Institute of Information Technology, Kolkata, India"
+"Australian National University and NICTA, Australia"
+"University of Dhaka, Bangladesh"
+Arizona State University
+Tsinghua University 4SenseTime
+University of Virginia
+"Fudan University, Shanghai, China"
+"RIEB, Kobe University, Kobe, 657-8501, Japan"
+"Pattern Recognition Group, University of Siegen"
+California Institute of Technology
+"general term, including collaboration. Interaction determines action on someone"
+"Kwangwoon University, 447-1 Wolge-dong, Nowon-Gu, Seoul 139-701, Korea"
+"Address correspondence to: Karen L. Schmidt, University of"
+cid:1)Institute for Neural Computation
+"The Ohio State University, Columbus, OH, USA"
+University of Geneva
+University of Glasgow
+St. Anne s College
+"PSG College of Technology, Coimbatore, Tamil Nadu, India"
+"Wayne State University, Detroit, MI 48202, USA"
+University of Toronto and Recognyz Systems Technologies
+"Myongji University, Yongin, 449-728 South"
+"FI-90014 University of Oulu, Finland"
+Khulna University of Engineering and Technology
+"Akita University, Akita, Japan"
+University of Stuttgart
+"B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"School of Electronics and Information, Northwestern Polytechnical University, China"
+Macau University of Science and
+"University of California, Riverside"
+"California Institute of Technology, Pasadena, CA"
+"Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China"
+Psychiatry at the University of Pittsburgh
+"University of Pittsburgh, Pittsburgh, USA"
+University of California Berkeley
+University of London
+"Nam k Kemal University, Tekirda g, Turkey"
+"Information Systems Design, Doshisha University, Kyoto, Japan"
+Information Technologies Institute
+"Faculty of Electrical Engineering, Czech Technical University"
+"Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA"
+"Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+"Concordia University, Canada"
+"Purdue University, West Lafayette, IN, USA"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia"
+Kyung Hee University South of Korea
+ATR Human Information Processing Research Laboratory
+"The University of Tennessee, Knoxville"
+"CVSSP, University of Surrey, UK"
+"Center for Cognitive Neuroscience, Duke University, Durham, North Carolina"
+"University of Adelaide, SA, Australia"
+Hanoi University of Science and Technology
+"The Chinese University of Hong Kong, China"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology"
+"University of California, Irvine"
+"Middlesex University, London"
+Xerox Research Center Webster
+"China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of"
+"Foundation University Rawalpindi Campus, Pakistan"
+TechnicalUniversityofDenmark
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+Toyota Technological Institute at Chicago
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"Vision Science Group, University of California"
+"University of Southern California, Los Angeles, CA 90089, USA"
+"University of California, Merced, CA"
+"Innopolis University, Kazan, Russia"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada"
+University of Campinas
+"University of Twente, EEMCS, Netherlands"
+Tomas Bata University in Zl n
+"National Laboratory of Pattern Recognition, Institute of Automation"
+UNIVERSITY OF CALIFORNIA
+"School of EECS, Queen Mary University of London, UK"
+"Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL USA"
+Shenzhen Institutes of Advanced Technology
+"The Hong Kong Polytechnic University, Hong Kong, SAR, 2University of Technology Sydney, Australia"
+"Savitri Bai Phule Pune University, Maharashtra India"
+Institute for Communication Systems
+"Shanghai Institute of Applied Physics, Chinese Academy of Sciences"
+IBM Thomas J. Watson Research Center
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+Florida State University
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+University of Toulouse
+"University of Illinois at Urbana-Champaign, IL USA"
+"West Virginia University, Morgantown WV 26506, USA"
+"Beijing University of Posts and Telecommunications, Beijing, China"
+"Ritsumeikan University, Kyoto, Japan"
+"Graduate School of Doshisha University, Kyoto, Japan"
+"Much is known on how facial expressions of emotion are produced, including which individual muscles are most active in"
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"Section of Pathology, Second University of Naples, Via L. Armanni"
+Bangalore Institute of Technology
+Liverpool John Moores University
+"University of Bristol, Bristol, UK"
+"Electrical Engineering Institute, EPFL"
+"Computer Laboratory, University of Cambridge, Cambridge, UK"
+The Hong Kong Polytechnic University 2Harbin Institute of Technology
+"University of Science, Vietnam National University-Ho Chi Minh city"
+"Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea"
+"Kyung Hee University, Yongin, Rep. of Korea"
+"Queen Mary, University of London, London E1 4NS, UK"
+"Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India"
+"University, Taiwan, R.O.C"
+"Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey"
+Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university
+"University of California, Merced, CA 95344, USA"
+"Sensor-enhanced Social Media (SeSaMe) Centre, National University of Singapore, Singapore"
+Dietrich College Honors Theses
+"Dalian University of Technology, Dalian, China"
+The Australian National University Queensland University of Technology
+"University College London, London WC1N 3BG, United Kingdom"
+"University of Freiburg, Germany"
+"Moscow Institute of Physics and Technology, Russia"
+"Recognition, Institute of Automation, Chinese Academy of Sciences"
+"university, Shiraz, Iran"
+"University of Bath, Bath, Somerset, United Kingdom"
+"Portland State University, USA"
+"College of Computer Science, Chongqing University, Chongqing, China"
+"University of Pennsylvania, 2Ryerson University"
+"National Cheng Kung University, Tainan, Taiwan, ROC"
+"School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu"
+"University of Queensland, St Lucia QLD Australia, 5 Institut Universitaire de France, Paris, France"
+"School of Computer Science, University of Lincoln, U.K"
+"Marine Institute, via Torre Bianca, 98164 Messina Italy"
+"Key Laboratory of Machine Perception (MOE), School of EECS, Peking University"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"Jadavpur University, India"
+Virginia Tech Carilion Research Institute
+Boston University Computer Science Technical Report No
+"Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University"
+University of Toulouse II Le Mirail
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+University of Massachusetts
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia"
+University of Illinois at Urbana
+"Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan"
+CARNEGIE MELLON UNIVERSITY
+Sridevi Women's Engineering College
+Yale University
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+"Bo gazi ci University, Turkey"
+"Language Technologies Institute, Carnegie Mellon University, PA, USA"
+"Elect. Eng. Faculty, Tabriz University, Tabriz, Iran"
+"Kent State University, Kent, Ohio, USA"
+"School of Information Engineering, Nanchang University, China"
+"JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"aLawrence Technological University, 21000 W Ten Mile Rd., South eld, MI 48075, United States"
+"Max Planck Institute for Informatics, Germany"
+"Stanford University, CA, United States"
+AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of
+"University Station C0500, Austin TX 78712, USA"
+"Tafresh University, Tafresh, Iran"
+"Illinois Institute of Technology, Chicago, Illinois, USA"
+"University of Texas at Arlington, Arlington, Texas 76019, USA"
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+"Middlesex University London, London, UK"
+Kent State University
+"Assam University, Silchar-788011 Assam University, Silchar"
+"Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences"
+University of Lac Hong 10 Huynh Van Nghe
+"University of Central Florida, Orlando, USA"
+"Zhejang University, Hangzhou 310027, P.R.China"
+"Columbia University, New York, NY"
+Mahatma Gandhi Institute of Technology
+University of Aberdeen
+"DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK"
+"DISI, University of Trento, Trento, Italy"
+"Nagaoka University of Technology, Japan"
+Federal University of Technology - Paran a
+"Electrical, Computer, Rensselaer Polytechnic Institute"
+"University of Peshawar, Pakistan"
+of the University of Notre Dame
+"IHCC, RSCS, CECS, Australian National University"
+"Institute of Computing Technology, CAS, Beijing 100190, China"
+The Open University of Israel
+"Tsinghua University, Beijing 100084, China"
+"Leiden, the Netherlands, 3 Delft University of Technology"
+"University of Malaya, Kuala Lumpur, Malaysia"
+"University of Pittsburgh, Pittsburgh, PA 15260, USA"
+college of Engineering
+"Recognition, Institute of Automation"
+"Kogakuin University, Tokyo, Japan"
+"Recanati Genetic Institute, Rabin Medical Center and Schneider Children s Medical Center, Petah Tikva, Israel"
+"Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India"
+"Islamic Azad University, Mashhad Branch, Mashhad, Iran"
+"The School of Computer Science, Tel-Aviv University, Israel"
+Eastern Mediterranean University
+"New York University Shanghai, 1555 Century Ave, Pudong"
+DVMM Lab - Columbia University
+"Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece"
+Honda Research Institute
+University of Bridgeport
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 518055, China"
+University of Colorado at Colorado Springs
+"Electronics and Communication Engineering, Chuo University"
+"School of Computer Science, Fudan University, Shanghai, China"
+"Statistics, University of"
+"EEMCS, University of Twente"
+"Imperial College London, U.K"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"University of Tabriz, Tabriz, Iran"
+"Brown University, 2University of California, San Diego, 3California Institute of Technology"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China"
+Concordia University
+Oxford Brookes University
+"Shanghai Jiao Tong University, China"
+"Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA"
+"Rio de Janeiro State University, Brazil"
+"College of Computer Science, Zhejiang University, Zhejiang, China"
+Indian Institute of Technology Kanpur
+"College Heights Blvd, Bowling Green, KY"
+Polytechnic University of Bucharest
+University of Cagliari
+"Mackenzie Presbyterian University, S o Paulo, S o Paulo, Brazil"
+"t2i Lab, Chalmers University of Technology, Gothenburg, Sweden"
+"University of Maryland, College Park"
+"P.A. College of Engnineering, Mangalore"
+"B.Tech (C.S.E), Bharath University, Chennai"
+"b Computer Technology Institute, Beijing Union University, 100101, China"
+"Sri krishna College of Technology, Coimbatore, India"
+"Metron, Inc"
+"cCentre of Intelligent Machines, McGill University, Montr eal, QC H3A 0E9, Canada"
+"School of Information Science and Technology, Donghua University, Shanghai 200051, China"
+HELSINKI UNIVERSITY OF TECHNOLOGY
+Kingston University
+"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China"
+Institute for studies in theoretical Physics and Mathematics(IPM
+"School of Computing and Info. Sciences, Florida International University"
+Hong Kong University of Science and Technology
+by grants from the National Institute of Mental Health (MH 15279 and MH067976 (K. Schmidt
+Chungnam National University
+"Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand"
+"Jilin University, Changchun 130012, China"
+"Kumamoto University, 2-39-1 Kurokami, Kumamoto shi"
+"cid:63) Imperial College London, UK"
+"Psychology, University of Illinois, Beckman Institute, Urbana-Champaign, Illinois 61801, University of"
+"University of telecommunications and post, Sofia, Bulgaria"
+Zhejiang University
+"Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China"
+"Faculty of Computers and Information, Cairo University, Cairo, Egypt"
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan"
+"Washington University, St. Louis, MO, USA"
+"The Chinese University of Hong Kong, Hong Kong"
+"Institute of Psychology, Chinese"
+"B.A. Earlham College, Richmond Indiana"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"Several methods exists to induce anxiety in healthy individuals, including threat of shock (ToS), the Trier"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Gonda Brain Research Center, Bar Ilan University, Israel"
+Tokyo Metropolitan University
+"Faculty of EEMCS, University of Twente, The Netherlands"
+"University of Pisa, Pisa, Italy"
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Stony Brook University, Stony Brook NY 11794, USA"
+"Beijing Union University, 100101, China"
+"School of Computer Science, Tianjin University"
+"Georgia Institute of Technology, Atlanta, Georgia, USA"
+"University of Southampton, SO17 1BJ, UK"
+"Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne"
+"The Amsterdam School of Communication Research, University of Amsterdam"
+"Cambridge University, Trumpington Street, Cambridge CB21PZ, UK"
+"Dayananda Sagar College of Engg., India"
+"Toyota Technological Institute (Chicago, US"
+Baidu Research Institute of Deep Learning
+Southwest Jiaotong University
+"applications, including texture classification [16], face recognition [12], object detection [10], and"
+Firat University
+Institute for Advanced
+"University of Lincoln, UK"
+Fudan University
+"Anna University, Chennai"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+The Chinese University of Hong Kong
+College of Information Engineering
+"bSchool of Computer and Control Engineering, University of Chinese Academy of Sciences"
+Slovak University of Technology in
+"Madanapalle Institute of Technology and Science, Madanapalle, Andhra Pradesh"
+cid:3)The Salk Institute and Howard Hughes Medical Institute
+Institute of Computing
+"Rochester Institute of Technology, Rochester, NY"
+Queen's University Belfast - Research Portal
+massachusetts institute of technology artificial intelligence laboratory
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"School of Computing and Communications, University of Technology Sydney, Sydney, Australia"
+b Institute for Robotics and Intelligent Systems
+Samsung Advanced Institute of Technology
+"University, Singapore"
+"Nanjing University, Nanjing 210093, P.R.China"
+"University, Guangzhou, China"
+"National Institute of Informatics, Tokyo, Japan"
+"Research Center for Intelligent Security Technology, CIGIT"
+"Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"University of Delaware, Newark, DE. USA"
+"Institute of Data Science and Technology, Alibaba Group"
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"University of Edinburgh, Edinburgh, UK"
+National University
+"University of Washington, Bothell"
+Dietrich College of Humanities and Social Sciences
+International University of
+Beihang University
+Colorado State University
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney"
+"Most of the earlier studies mentioned above, including ours"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China"
+"MATS University, MATS School of Engineering and Technology, Arang, Raipur, India"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of"
+Xidian University 2Xi an Jiaotong University 3Microsoft Research Asia
+"University of York, UK"
+Huazhong University of
+"Foundation University, Rawalpindi 46000, Pakistan"
+CUNY Graduate Center and City College
+Toyota Technological Institute Chicago (TTIC
+"Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA"
+University of Western Australia
+"State Key Laboratory of Pulp and Paper Engineering, South China University of Technology, Guangzhou 510640, China"
+"Institute Polythechnic of Leiria, Portugal"
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+"School of Engineering, Taylor s University"
+"Boston University, Boston, MA"
+"ples of such ne-grained descriptions, including attributes covering detailed"
+"School of Computer Science and Technology, Shandong University"
+The University of Queensland in
+"National Key Laboratory for Novel Software Technology, Nanjing University, China"
+MULTIMEDIA UNIVERSITY
+"VSI Lab, Goethe University, Frankfurt, Germany"
+"NICTA , Queensland Research Laboratory, QLD, Australia"
+"Joint Research Institute, Foshan, China"
+Beckman Institute for Advanced Science and Technology
+"University of Illinois at Urbana-Champaign, USA"
+"University of Exceter, Exceter, UK"
+University of Toronto
+"State Key Lab of CADandCG, Zhejiang University, Hangzhou, Zhejiang, China"
+"IBM T. J. Watson Research Center, Yorktown Heights, NY, USA"
+"and Mathematical Biosciences Institute, The Ohio State University"
+"Robotics Institute, Carnegie Mellon University 3University of Pittsburgh, USA"
+"Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT"
+"University of Chinese Academy of Sciences, Beijing 100190, China"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India"
+Huazhong University of Science and Technology
+"University of Chinese Academy of Sciences, Beijing, China"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyd"
+"University of California, San Diego, USA"
+Vietnam National University of Agriculture
+"Motorola, Inc"
+IBM T. J. Watson Research Center
+"IBM Almaden Research Center, San Jose CA"
+"College Park, Maryland"
+cid:1) Honda Research Institute
+University of Freiburg
+Ruhr University Bochum
+Al-Khwarizmi Institute of Computer Science
+Electronic Engineering and Computer Science Queen Mary University of London
+The Weizmann Institute of Science
+Coburg University
+University of Birmingham
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"Beihang University, Beijing, China"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"University of Patras, Greece"
+"Istanbul Technical University, Istanbul, Turkey"
+"atry, University of Pennsylvania School of Medicine, Philadelphia, PA"
+"Colorado State University, Fort Collins, Colorado, USA"
+"D Research Center, Kwangwoon University and Springer"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+University of Illinois at Urbana Champaign
+"Manonmaniam Sundaranar University, Tirunelveli, India"
+"Purdue University, West Lafayette, IN. 47907, USA"
+National Institute of Technology Rourkela
+Duke University
+"Michigan State University, NEC Laboratories America"
+"National Technical University of Athens, 15780 Athens, Greece"
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"cid:93) Faculty of Science and Technology, University of Macau"
+"Ferdowsi University of Mashhad, Mashhad, Iran"
+"Jaipur, Rajasthan, India"
+"K.N. Toosi University of Technology, Tehran, Iran"
+"Computer Vision Laboratory, University of Nottingham, Nottingham, UK"
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Florian Metze, Chair (Carnegie Mellon University"
+"Medical Image Analysis Lab, School of Computing Science, Simon Fraser University, Canada"
+"University of Coimbra, Portugal"
+"P.S.R Engineering College, Sivakasi, Tamilnadu, India"
+"University Drive, Fairfax, VA 22030-4444, USA"
+"University of Kentucky, 329 Rose St., Lexington, KY, 40508, U.S.A"
+"Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea"
+"University of Rochester, NY 14627, USA"
+"Aditya College of Engineering, Surampalem, East Godavari"
+McGovern Institute for Brain Research
+"Hiroshima University, Japan"
+"School of Mathematics and Computational Science, Sun Yat-sen University, China"
+GE Global Research Center
+Allen Institute for Arti cial Intelligence (AI
+"Hong Kong University of Science and Technology, Hong Kong"
+"Western Sydney University, Parramatta, NSW 2150, Australia"
+"ment of Psychology, University of California, Berkeley"
+University of California Berkeley
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"EEMCS, University of Twente, The Netherlands"
+Indian Institute of Science
+"Informatics and Telematics Institute, Centre for Research and Technology Hellas"
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Human Genome Center, Institute of Medical Science"
+"ADSIP Research Centre, University of Central Lancashire"
+"Computer Science, Princeton University, Princeton, NJ, USA"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"School, The University of Sydney, Sydney, NSW, Australia"
+"Facial Image Processing and Analysis Group, Institute for Anthropomatics"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China"
+"Computer Science and Engineering, University of Washington, Seattle, WA, USA"
+Purdue Institute for Integrative Neuroscience
+The University of Adelaide; and Australian Centre for Robotic Vision
+Ministry of Higher Education and Scientific Research / The University of Mustsnsiriyah/Baghdad IRAQ
+"Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching"
+National Institute of Advanced Industrial Science and Technology (AIST
+"Visual Geometry Group, University of Oxford"
+years. According to the definition by the National Institute
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+Nokia Bell Labs and University of Oxford
+"University of California at Berkeley, USA"
+"Nanyang Technological University, 2University of California San Diego"
+Federal University of Campina Grande (UFCG
+"Institute for Infocomm Research (I2R), A*STAR, Singapore"
+Institute of Informatics - ISLA
+"University of Colorado, Boulder"
+Ho Chi Minh City University of
+"Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia"
+"Imperial College London, On do"
+Mitsubishi Electric Research Labs (MERL
+Federal University of Para ba
+"Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu"
+Nottingham Trent University
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB"
+"School of Information Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India"
+"Columbia University, United States"
+Aristotle University of Thessaloniki
+University of Edinburgh
+"Nottingham Trent University, Nottingham, UK"
+"School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China"
+Institute of Electrical and Electronics Engineers (IEEE). DOI
+"Technical University of Ostrava, FEECS"
+City University of Hong Kong
+"G.H.Raisoni College of Engg. and Mgmt., Pune, India"
+Institute of Industrial Science
+"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+Stanford University
+Max Planck Institute for Informatics
+Peking University
+Heriot-Watt University
+"Sapienza University of Rome, Italy"
+"King Saud University, P.O. Box 51178, Riyadh 11543, Saudi Arabia"
+"University of California at Los Angeles, Los Angeles, CA, USA"
+University of Copenhagen
+"Human Media Interaction, University of Twente, P.O. Box"
+"Center for Machine Vision Research, University of Oulu, Finland"
+Shaheed Zulfikar Ali Bhutto Institute of
+This work was supported by Grant MOP102637 from the Canadian Institutes of Health Research to E.D.R. and the
+"Middlesex University London, UK"
+"Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India"
+"Institute of Automation, Chinese Academy of Sciences, China"
+"Computer Vision Group, Xerox Research Center Europe, Meylan, France"
+"Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China"
+"University of Sk vde, Sweden"
+"LIUM Laboratory, Le Mans, France, 2 Idiap Research Institute, Martigny, Switzerland"
+"MTech Student 1, 2, Disha Institute of"
+at The Australian National University
+"Graduate Institute of Electronics Engineering, National Taiwan University"
+Institute for Anthropomatics
+Indiana University Bloomington
+"cSchool of Astronautics at Beihang University, 100191, Beijing, P.R.C"
+"QCIS, University of Technology Sydney, Sydney, Australia"
+"IDIAP Research Institute, Martigny, Switzerland"
+"Sathyabama University, Chennai, India"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+Democritus University of Thrace
+"FX Palo Alto Laboratory, Inc., California, USA"
+"School of Software, Dalian University of Technology, Tuqiang St. 321, Dalian 116620, China"
+ATR Interpreting Telecommunications Research Laboratories
+"University of Surrey, Guildford, Surrey GU2 7XH, UK"
+"Michigan State University, 3115 Engineering Building"
+St. Francis Institute of Technology
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"do, Rep. of Korea, Kyung Hee University, Suwon, Rep. of Korea"
+Signal Processing Institute
+"Numediart Institute, University of Mons"
+University Politehnica of Bucharest
+"Imperial College, 180 Queens Gate"
+"University, Hong Kong"
+"2015 Wiley Periodicals, Inc"
+"Research Center in Information Technologies, Universit e de Mons, Belgium"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+Poznan University of Technology
+"B.S., E.E., Bo azi i University"
+"cid:93)Peking University Shenzhen Graduate School, Shenzhen, P.R.China"
+"Engg, Priyadarshini College of"
+"Institute of Media and Information Technology, Chiba University"
+"Bilgi University, Dolapdere, Istanbul, TR"
+"Kurukshetra University, Kurukshetra, India"
+Idiap Research Institute
+University of Basel
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+"University of Vermont, 33 Colchester Avenue, Burlington"
+"Virginia Polytechnic Institute and State University, Blacksburg"
+University of Ljubljana Faculty
+Northwestern University
+"School of Electrical and Computer Engineering, Cornell University"
+Rowland Institute at Harvard
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+Xidian University
+"University of Cambridge, The Computer Laboratory, Cambridge CB3 0FD, U.K"
+The Chinese University of Hong Kong holds the copyright of this thesis. Any
+"NEC Laboratories America, Inc., Cupertino, CA"
+University of North Texas
+"Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan"
+"University of Michigan, Ann Arbor, MI, USA"
+"School of Software, Dalian University of Technology, Dalian 116621, China"
+"State Key Laboratory of Brain and Cognitive Science, Institute of Psychology"
+National University of Technology Technology
+Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA"
+"Columbia University, New York, NY, USA"
+"Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan"
+"University of Zagreb, Unska 3, 10 000 Zagreb"
+"Boston University, USA"
+Otto-von-Guericke University Magdeburg
+cid:63)Stanford University
+"School of Computer Science and Technology, Harbin Institute of Technology, China"
+University of Piraeus
+"American University, Washington, DC, USA"
+"Courant Institute of Mathematical Sciences, New York, NY"
+Fraser University
+"Asian University, Taichung, Taiwan"
+"University at Buffalo, State University of New York"
+National University of Science and Technology
+"School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China"
+"Sighthound, Inc"
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"Link oping University, SE-581 83 Link oping, Sweden"
+"instance has been detected (e.g., a face), it is be possible to obtain further information, including: (i"
+"Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan"
+METs Institute of Engineering
+Sanghvi Institute of Management and Science
+"Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China"
+"Institute for Robotics and Intelligent Systems, USC, CA, USA"
+"Tampere University of Technology, Finland"
+"Institute AIFB, Karlsruhe Institute of Technology, Germany"
+"School of Computer Science and Engineering, Southeast University, Nanjing 210096, China"
+Opus College of Engineering
+"Human Centered Multimedia, Augsburg University, Germany"
+College of Information Science and Engineering
+The University of Maryland
+"Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin"
+Electronics and Telecommunications Research Institute
+Akita Prefectural University
+"College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R"
+"Anjuman College of Engineering and Technology, Sadar, Nagpur, India"
+Carnegie Mellon University (CMU
+Indraprastha Institute of Information Technology
+"Faculty of Electrical Engineering, Czech Technical University in Prague"
+AALTO UNIVERSITY
+"MIRACL-FSEG, University of Sfax"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Nanjing University, Nanjing 210093, China"
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Scienti c Visualization and Computer Graphics, University of Groningen, Nijenborgh 9, Groningen, The Netherlands"
+"Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany"
+"Bogazici University, Bebek"
+"Technical University of Cluj Napoca, 28 Memorandumului Street"
+The Institute of Electronics
+Xerox Research Center India
+"German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany"
+Louisiana State University
+In the Graduate College
+"Australian National University, Canberra, ACT 0200, Australia"
+"Visualization and Computer Vision Lab, GE Global Research Center"
+"Rutgers University, Piscataway NJ 08854, USA"
+"College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan"
+"aResearch Scholar, Anna University, Chennai, Inida"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Gujarat Technological University, V.V.Nagar, India"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+Massachusetts Institute of Technology (MIT
+"IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore"
+The University of Western Australia
+"Mangalore Institute of Engineering and Technology, Badaga"
+"Normal University, Kunming, China"
+University of Dhaka
+"Information and Media Processing Research Laboratories, NEC Corporation"
+Cambridge University
+"School of Mathematics and Computer Science, Northeastern State University, Tahlequah, OK 74464, USA"
+"Oxford Brookes University, Oxford, United Kingdom"
+VEER SURENDRA SAI UNIVERSITY OF
+"Cyprus University of Technology, Cyprus"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Beihang University, Beijing 100191, China"
+"Florida State University, Tallahassee, FL 32306, USA"
+in The University of Michigan
+"Deva Ramanan, University of California at Irvine"
+"Stanford University, Stanford, California"
+"Graduate School of Information Science and Technology, The University of Tokyo"
+"SSN College of Engineering, Chennai, India"
+National Institute of Informatics
+"School of Psychology, Cardiff University, Cardiff, United Kingdom, College of"
+Canadian Institute for Advanced Research
+"Facebook AI Research, 2Dartmouth College"
+MICC - University of Florence
+"National Cheng Kung University, Tainan, Taiwan"
+"Computer Vision Group, Friedrich Schiller University Jena"
+"Montreal Institute for Learning Algorithms, Universit e de Montr eal"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"Dipartimento di Sistemi e Informatica, University of Florence"
+"Hong Kong Polytechnic University, Hong Kong"
+"Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College"
+"Electronics Engineering, National Institute of Technical Teachers"
+Baidu IDL and Tsinghua University
+King Faisal University
+"Tel Aviv University, Israel"
+"Head and Neck Surgery, Seoul National University"
+Helsinki University of Technology Laboratory of Computational Engineering Publications
+"Humboldt-University, Berlin, Germany"
+"Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of"
+"University of Milano-Bicocca, Italy"
+"Utah State University, Logan, UT 84322-4205, USA"
+"aSchool of Computing and Mathematics, Charles Sturt University, Bathurst, NSW"
+"Institute for Human-Machine Communication, Technische Universit at M unchen, Germany"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"University of California, San Diego, California, USA"
+University of Southampton
+"Institiude of Computer Science and Technology, Peking University"
+"School of Mechanical Engineering, Southwest Jiaotong University, Chengdu 610031, China"
+"Chalmers University of Technology, SAFER"
+"College of Computer and Information Engineering, Nanyang Institute of Technology"
+Institute of Computer Science III
+"The Robotics Institute, Carnegie Mellon University"
+"Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan"
+"University of Barcelona and Computer Vision Centre, Barcelona, Spain"
+"University of Cape Town, South Africa"
+Yeungnam University
+University of Pittsburgh
+"School of Computer Science, The University of Nottingham, Nottingham, UK"
+"School of Information Technology and Management, University of International"
+"MIRACL-FS, University of Sfax"
diff --git a/scraper/reports/institution_names-3.csv b/scraper/reports/institution_names-3.csv
new file mode 100644
index 00000000..fa865c1c
--- /dev/null
+++ b/scraper/reports/institution_names-3.csv
@@ -0,0 +1,712 @@
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Temple University, Philadelphia, PA 19122, USA"
+"Indian Institute of Technology, Madras"
+"School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China"
+University of Wollongong. For further information contact the UOW
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA"
+"Research Center CENTIA, Electronics and Mechatronics"
+Bielefeld University
+"National University of Singapore, Singapore"
+Datta Meghe College of Engineering
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+Pondicherry Engineering College
+Jahangirnagar University
+Australian National University and NICTA
+Tsinghua University
+University of Texas
+"Islamic Azad University, Qazvin, Iran"
+Banaras Hindu University
+"Beijing Institute of Technology, China"
+"North Dakota State University, Fargo, ND 58108-6050, USA"
+"Northeastern University, MA, USA"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+State University of Feira de Santana (UEFS
+"College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China"
+"Adobe Systems, Inc., 345 Park Ave, San Jose, CA"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Institute for Neural Computation, University of California, San Diego"
+"engineering, Government College of Engineering Kannur, Kerala, India"
+Massachusetts Institute of Technology Rapporteur
+University of Memphis
+"Computer Vision Group, Friedrich Schiller University of Jena, Germany"
+"Faculty of Computer Science, Dalhousie University, Halifax, Canada"
+"University of Ulsan, Ulsan, Republic of Korea"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Technical University Munich, Germany"
+"Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of"
+University of Illinois at
+"Institute of Child Health, University College London, UK"
+Purdue University
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+"Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+"Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom"
+"State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China"
+"UNIVERSITY OF CALIFORNIA, BERKELEY"
+Ho Chi Minh City University of Science
+c(cid:13)The Chinese University of Hong Kong
+"Institute of Deep Learning, Baidu Research"
+"Faculty of Science, University of Amsterdam, The Netherlands"
+"University of Washington, Seattle, WA 98195, United States"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"University of Tokyo, 4-6-1 Shirokanedai"
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+"aDivision of Biology and Biological Engineering 156-29, Howard Hughes Medical Institute, California Institute of Technology, Pasadena, CA"
+"Lab of Science and Technology, Southeast University, Nanjing 210096, China"
+"Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands"
+"M.P.M. College, Bhopal, India"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+"NEC Laboratories America, Inc"
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+"State Key Lab. LIESMARS, Wuhan University, China"
+"Sackler Faculty of Medicine, Tel Aviv University, Tel Aviv, Israel"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Peking University, Beijing, China"
+"The University of Electro-Communications, JAPAN"
+IBM China Research Lab
+Formerly: Texas AandM University
+"DIT UNIVERSITY, DEHRADUN"
+"IslamicAzad University, Qazvin, Iran"
+"University of Central Florida, Orlando"
+"Nokia Research Center, Tampere, Finland"
+"Sharda University, Greater Noida, India"
+"Science, University of Amsterdam"
+"University of St Andrews, United Kingdom"
+"Tsinghua University, Beijing, China"
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"ITEE, The University of Queensland, Australia"
+Plymouth University
+"The Blavatnik School of Computer Science, Tel-Aviv University, Israel"
+Rensselaer Polytechnic Institute
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi"
+Mans eld College
+"University of Barcelona, Spain"
+"Indian Institute of Technology, Roorkee"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+Gangnung-Wonju National University
+"Tsinghua University, 100084 Beijing, China"
+"Khalifa University, Abu Dhabi, United Arab Emirates"
+Queensland University of Technology (QUT
+Taizhou University
+"University of Oviedo, Campus de Viesques, 33204 Gij n"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+Computer and Vision Research Center
+Thesis. Rochester Institute of Technology. Accessed from
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+"ZHAW Datalab, Zurich University of Applied Sciences"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+B.S. University of Central Florida
+"BECS, Aalto University School of Science and Technology, Finland"
+"University of Notre Dame. Notre Dame, IN 46556.USA"
+"Brown University, Providence, RI"
+"Grad. School at Shenzhen, Tsinghua University"
+"Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest"
+"University of Cambridge, UK 2Carnegie Mellon University, USA"
+"The University of North Carolina at Charlotte, USA"
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+"iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium"
+"Princeton University, Princeton, New Jersey, USA"
+"School of Information Science and Engineering, Southeast University, Nanjing, China"
+University of Wisconsin-Madison
+"Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+"School of Information Science and Engineering, Central South University, Changsha"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"School of Data and Computer Science, Sun Yat-sen University"
+"National Taiwan University, Taipei, Taiwan"
+"University Hospital Jena, Germany"
+"University of Oradea 410087, Universitatii 1, Romania"
+Institute of control science and engineering
+"to process in all the illumination conditions, including total"
+"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University"
+Guangdong Medical College
+"Columbia University, New York NY 10027, USA"
+"Tsinghua University, State Key Lab. of Intelligent"
+"Technology, Nanjing University of Aero"
+"Computer Science Division, The Open University of Israel, Israel"
+Wayne State University
+University of Nottingham
+Massachusettes Institute of Technology
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"University of Technology, Sydney, Australia"
+"Pompeu Fabra University, Spain"
+"University Politehnica of Bucharest, Romania"
+Queen Mary University of London
+Nanjing University of Information Science and Technology
+"School of Computing Science, Simon Fraser University, Canada"
+Shandong Women s University
+"University of the South Paci c, Fiji"
+"Chu Hai College of Higher Education, Hong Kong"
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"University of Chinese Academy of Sciences, Beijing 101408, China"
+University of Michigan
+University of California at Berkeley
+University of Cambridge
+"University of South Carolina, Columbia, USA"
+University of Leeds
+The University of Texas at Austin
+Semarang State University
+University of North Carolina at Charlotte
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"School of Electronic and Computer Engineering, Peking University"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+"School of Electronics and Computer Engineering, Peking University"
+Northeastern University 2Microsoft Research 3City University of New York
+"EECS, Syracuse University, Syracuse, NY, USA"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"Waseda University, Tokyo, Japan"
+Cambridge Research Laboratory
+National Institute of Advanced Industrial Science and Technology
+"Faculty of Science and Engineering, Waseda University, Tokyo, Japan"
+"Center for Brain Science, Harvard University, Cambridge, MA 02138 USA"
+"Queen Mary University of London, London E1 4NS, UK"
+"School of Information Systems, Singapore Management University, Singapore"
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute"
+"Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"Bar Ilan University, Israel"
+"University of Freiburg, Instit ut f ur Informatik"
+National University of singapore
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+"S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Qihoo 360 AI Institute, Beijing, China"
+University of Barcelona
+"Bo gazic i University, Istanbul, Turkey"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences"
+Hacettepe University
+Bas kent University
+Vector Institute for Arti cial Intelligence
+"School of Computer Science and Engineering, Sichuan University, China"
+"USA, 2Unit for Experimental Psychiatry, University of Pennsylvania School of Medicine"
+"Sun Yat-Sen University, Guangzhou, P.R. China"
+"ECE, National University of Singapore, Singapore"
+The University of York
+"Nanjing, 210094, China, 3 School of Automation, Nanjing University of Posts and Telecommunications"
+"cid:2) Imperial College London, United Kingdom"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"School of Computer Science and Technology, Harbin Institute of"
+Okayama University
+"Center for Automation Research (CfAR), University of Maryland, College Park, MD"
+Korea Advanced Institute of Science and Technology (KAIST
+Dalle Molle Institute for Arti cial Intelligence
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+"United States of America, State University of New York Albany, Albany, New York"
+"Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Mancha, Spain, Imperial College, London, UK"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"University of Cambridge, Computer Laboratory, UK"
+"West Virginia University, Morgantown, WV 26506, USA"
+The Open University
+"Xi an Jiaotong University, China"
+"Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Columbia University, USA"
+"Chonbuk National University, Jeonju 561-756, Korea"
+The Weizmann Institute of
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+"Institute of Systems and Robotics - University of Coimbra, Portugal"
+"Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India"
+"Graduate School of Information Science, Nagoya University, Japan"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+"Curtin University, Perth, Australia"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"King Saud University, Riyadh"
+"Y ld z Teknik University, Istanbul, TR"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh"
+Institute of Automatic Control Engineering (LSR
+"Faculty of Engineering, Ain Shams University, Cairo, Egypt"
+"School of Computer Science, University of Birmingham, UK"
+University of Oxford
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+University of Information
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+The Ohio State University
+"Amazon, Inc"
+Research Center E. Piaggio
+"University of Vienna, Austria"
+AI Institute
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA"
+University of Oulu
+"Center for Research in Computer Vision (CRCV), University of Central Florida (UCF"
+University of Washington
+"IIIS, Tsinghua University"
+Monash University
+A dissertation submitted to the University of Bristol in accordance with the requirements
+Malaviya National Institute of Technology
+"Cooperative Medianet Innovation Center, Shanghai Jiaotong University"
+OF PRINCETON UNIVERSITY
+M. Mark Everingham University of Leeds
+Sakarya University
+Otto von Guericke University
+"L3S Research Center, Hannover, Germany"
+University of Science and Technology of China
+"School of Data Science, Fudan University, China"
+Nanyang Technological University
+"Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea"
+"College of Computer Science and Technology, Chongqing"
+The University of Electro-Communications
+OF STANFORD UNIVERSITY
+"University of Groningen, The Netherlands"
+"Manonmaniam Sundaranar University, India"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+Korea University
+"Istanbul Technical University, Istanbul, 34469, TURKEY"
+"School of Computer Science, University of Adelaide, Australia"
+"Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland"
+"Center for Automation Research, University of Maryland, College Park, MD 20740, USA"
+"School of Electronic and Information Engineering, Beihang University, Beijing, 100191, China"
+University of Science and
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"Iftm University, Moradabad-244001 U.P"
+Institute of Automation
+Moradabad Institute of Technology
+University of California Santa Barbara
+University of California Davis
+"VSB Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic"
+"University of Denver, Denver, CO"
+Vrije Universiteit Brussel
+University of California at Berkeley / ICSI
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"University of Notre Dame, USA"
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"Imperial College London, London, UK"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY
+Chubu University
+University of Posts and Telecommunications
+IMPERIAL COLLEGE
+"The school of Data Science, Fudan University"
+Rutgers University
+"IBM Watson Research Center, Armonk, NY, USA"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA"
+University of Groningen
+"Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA"
+Cyprus University of Technology
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"Nanyang Technological University, Singapore"
+"Oxford University, UK"
+"Tsinghua University, Beijing 100084, P.R.China"
+"Dnyanopasak College Parbhani, M.S, India"
+"Hankuk University of Foreign Studies, South Korea"
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"Laboratory, University of Houston, Houston, TX, USA"
+"Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai"
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+Middlesex University London
+"Harbin Institute of Technology, Harbin 150001, China"
+"Max Planck Institute for Biological Cybernetics, Spemannstr. 38, 72076 T bingen, Germany"
+FL
+"University of Peshawar, Peshawar, Pakistan"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+Gdansk University of Technology
+Rice University
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+"Nanjing University of Science and Technology, China"
+"Zhejiang Normal University, Jinhua, China"
+Queensland University of Technology(QUT
+Amherst College
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"Institute of Software, Chinese Academy of Sciences, Beijing 100190, China"
+"Paran a Federal University, Curitiba, Brazil"
+"University of Crete, Crete, 73100, Greece"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+Japan
+"University of Twente, The Netherlands"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+"The School of Electrical Electronic and Control Engineering, Kongju National University"
+the Chinese University of Hong Kong
+University of Amsterdam
+"Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein"
+"Language and Brain Lab, Simon Fraser University, Canada"
+"Image Processing Center, Beihang University"
+East China Normal University
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+"United States of America, State University of New York Albany, Albany"
+"Shanghai Jiao Tong University, Shanghai 200240, China"
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China"
+Northeastern University
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan"
+University of Northern British Columbia
+"Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic"
+"California Institute of Technology, USA"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+"Computer Vision Research Group, COMSATS Institute of Information"
+Temple University
+"International Islamic University, Islamabad 44000, Pakistan"
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"Australian National University, and NICTA"
+"Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK"
+"Engineering, University of Dundee"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China"
+"School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China"
+"Interactive and Digital Media Institute, National University of Singapore, SG"
+Islamic Azad University
+Institute of Computing Technology
+"Ultra College of Engineering and Technology for Women, India"
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+"University of California, Riverside CA 92521-0425, USA"
+"Caarmel Engineering College, MG University, Kerala, India"
+University of Wisconsin Madison
+Meiji University
+Digital Media Research Center
+"Kong Polytechnic University, Kowloon, Hong Kong"
+Achariya college of Engineering Technology
+"Graz University of Technology, Austria"
+UNIVERSITY OF OULU
+"School of Informatics, University of Edinburgh, UK"
+Sun Yat-sen University
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"Faculty of EEMCS, Delft University of Technology, The Netherlands"
+"Najafabad Branch, Islamic Azad University"
+"Shaoguan University, Da Tang Lu"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+Brown University
+Nanjing University of Science and
+"University of California at Irvine, Irvine, CA"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"PanimalarInstitute of Technology, Tamilnadu, India"
+Imperial College London / Twente University
+University of Oradea
+Submitted to the Institute for Graduate Studies in
+"Computer vision and Remote Sensing, Berlin university of Technology"
+"Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"School of Computer Science and Technology, Tianjin University, China"
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi an 710071 China"
+Xiamen University
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Aberystwyth University, UK"
+"University of Pisa, Largo Lucio"
+Institute of Computer Science
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+"University of Florence, Italy"
+Punjabi University Patiala
+"Institute of Automation, Chinese Academy of"
+"University of Technology, Baghdad, Iraq"
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"COMSATS Institute of Information Technology, Pakistan"
+"University of California, San Diego, La Jolla"
+K. N. Toosi University of
+Institute of
+"Institute of Biochemistry, University of Balochistan, Quetta"
+DUBLIN CITY UNIVERSITY
+"School of Engineering, University of Portsmouth, United Kingdom"
+"University POLITEHNICA of Bucharest, Bucharest, Romania"
+"P.G. Student, SRV Engineering College, sembodai, India"
+Sungkyunkwan University
+"Vision Systems, Inc"
+"Center for Brains, Minds and Machines, McGovern Institute, MIT"
+Central Mechanical Engineering Research Institute
+"University of Science and Technology of China, Hefei, China"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+Bournemouth University
+"CVSSP, University of Surrey"
+The University of Tokyo
+"GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"School of Computing, Staffordshire University"
+"VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain"
+"IIIT-Delhi, India, 2West Virginia University"
+Stony Brook University Hospital
+"Graduate University of Chinese Academy of Sciences, Beijing 100049, China"
+"School of Psychology, University of Central Lancashire"
+Tokyo Institute of Technology
+Ural Federal University (UrFU
+"Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+any other University
+"ECE dept, University of Miami"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+Indiana University
+"Kingston University, UK"
+"The Open University of Israel, Israel"
+"Michigan State University, East Lansing MI"
+"School of Computer Science and Engineering, South China University of Technology"
+"Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain"
+Research Center and Laboratoire
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+"Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland"
+"aSchool of Technology, University of Campinas"
+University of Waterloo
+"Birkbeck College, University of London"
+Istanbul Technical University
+"Y. Li, University of Maryland"
+"Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia"
+"Indian Institute of Informaiton Technology, Allahabad, India"
+"Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"College of Computer Science, Zhejiang University, Hangzhou, China"
+"Baingio Pinna, University of"
+B.S. (Cornell University
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+"Advanced Engineering, The Chinese University of Hong Kong"
+"Catholic University of Rio de Janeiro, Brazil"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Idiap Research Institute, Switzerland"
+Mitsubishi Electric Research Laboratory
+National Institute of Standards and Technology
+"Southwest University, Chongqing 400715, China"
+"Dalian University of Technology, China"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"RMIT University, Australia"
+"China-Singapore Institute of Digital Media, Singapore"
+Curtin University of Technology
+"Center for Automation Research, University of Maryland"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA"
+Massachusetts Institute
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+"SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia"
+"Tampere University of Technology, Tampere, Finland"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany"
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"Virginia Polytechnic Institute and State University, Blacksburg, Virginia"
+"B.S. Abdur Rahman University, Chennai-48, India"
+"College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+University of Bristol - Explore Bristol Research
+"University of Texas, Austin, TX 78712-1188, USA"
+"b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney"
+"San Jose State University, San Jose, CA"
+"University of Exeter, UK"
+"Research Center for Learning Science, Southeast University, Nanjing 210096, China"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China"
+Katholieke Universiteit Leuven
+"Jawaharlal Technological University, Anantapur"
+"University of Toronto, Toronto, ON M5S 2G4, Canada"
+"Division of Computer Science, University of California, Berkeley, CA, USA e-mail"
+"Sogang University, Seoul 121-742, Republic of Korea"
+"University of California at San Diego, La Jolla, CA"
+"Nanyang Technological University, Singapore 639798, Singapore"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+China University of Mining and Technol
+"Northeastern University, Boston, USA"
+"UniversityofMaryland, CollegePark, MD"
+"Sorbonne Universit s, UPMC University Paris 06, Paris, France"
+to Michigan State University
+University of Kentucky
+"Beijing Normal University, China"
+University of North Carolina Wilmington
+HoHai University
+"University of Technology, Guangzhou, 510640, P.R.China"
+"CollegePark, MD"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+National Taiwan University of Science and
+"University of Bristol, Bristol, BS8 1UB, UK"
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+"Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+UNIVERSITY OF TAMPERE
+"School of Mathematical Science, Peking University, China"
+"Dartmouth College, NH 03755 USA"
+"MES College of Engineering, Kuttippuram"
+yAristotle University of Thessaloniki
+"The University of Texas at Dallas, Richardson, TX"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+"Australian National University, Canberra"
+"SRI International, Menlo Park California / *Brooklyn College, Brooklyn New York"
+"School of Electrical and Computer Engineering, RMIT University"
+"Stony Brook University, NY 11794, USA"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan"
+"Computer Science, Brown University, Providence, RI, USA"
+COMSATS Institute of Information Technology Wah Cantt
+"University of Houston, Houston, TX, USA"
+"School of Information Science and Technology, Sun Yat-sen University, China"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Institute for Vision and Graphics, University of Siegen, Germany"
+Oregon State University
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+"SRV Engineering College, sembodai, india"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+Imperial College London
+"in signed languages, including American Sign Language (ASL). Gestures such"
+"Karlsruhe Institute of Technology, Germany"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+Middle East Technical University
+"Robotics Institute, Carnegie Mellon University, USA"
+The Author 2014. Published by Oxford University Press
+North Carolina AandT State University
+National Institute of Advanced Industrial
+"National Tsing-Hua University, Hsin-Chu, Taiwan"
+"California State University, Fullerton, USA"
+"School of Electronics Engineering and Computer Science, Peking University"
+"Queen Mary University of London, UK"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+Institute
+Xerox Research Center
+University of Southern California
+University of Iceland
+"KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc"
+"Electronics and Computer Science, University of Southampton, Southampton, Hampshire"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+Max Planck Institute f ur biologische Kybernetik
+"Imperial College, London, UK"
+"and IBUG [32]. All of them cover large variations, including different"
+"Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada"
+PES Modern College of Engg
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA"
+"K.S.R. College Of Engineering, Tiruchengode, India"
+Multimedia University
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"Research Reports of CMP, Czech Technical University in Prague, No"
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+"Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen"
+"University of Illinois at Urbana Champaign, Urbana"
+"Beihang University 2Gri th University 3University of York, UK"
+"College of Computing, Georgia Institute of Technology"
+"Carnegie Mellon University, Pittsburgh, PA, USA"
+"Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht"
+"School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications"
+Courant Institute
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA"
+yThe University of Tokyo
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+"Heilongjiang University, College of Computer Science and Technology, China"
+"Amirkabir University of Technology, Tehran. Iran"
+Beijing Jiaotong University
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+"Queensland University of Technology, Australia"
+"Computer Science and Engineering, Anna University, India"
+"College of Computer and Information Science, Southwest University, Chongqing 400715, China"
+"University of Santiago de Compostela, Santiago de Compostela, Spain"
+University of Chinese Academy of
+"Biometric and Image Processing Lab, University of Salerno, Italy"
+"Sathyabama University Old Mamallapuram Road, Chennai, India"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+and the institute of engineering and science
+"Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India"
+National Taipei University
+Swansea University
+"Center for Information Science, Peking University, Beijing 100871, China"
+"School of Computer Science, The University of Adelaide, Australia"
+Tel Aviv University
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"School of Computer Science, The University of Nottingham"
+Shiraz University
+"Nanjing University, Nanjing 210023, China"
+Middlebury College
+University of Massachusetts Amherst
+Ionian University
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"RGPV University, Indore"
+COMSATS Institute of Information Technology
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"Engineering, G.H.Raisoni College of Engineering"
+"University of California, Los Angeles"
+National University of Singapore
+Santa Fe Institute
+"PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan"
+"Middlesex University London, 4International Hellenic University"
+University of North Carolina at Chapel Hill
+"Howard University, Washington DC"
+This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E
+"The Remote Sensing Technology Institute (IMF), German Aerospace Center"
+Indian Institute of Science Bangalore
+Portugal
+"Ulm University, Germany"
+B.S. University of Indonesia
+"University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"Institute of Industrial Science, The University of Tokyo"
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"Learning Systems Group, California Institute of Technology"
+College of Electrical and Information Engineering
+Nanjing University of Science and Technology
+University (ITU
+"Indian Institute of Science, India"
+University of Applied Sciences Darmstadt - CASED
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"and 2Center for Cognitive Neuroscience, Duke University, Durham, North Carolina 27708"
+"Newcastle University, Newcastle upon Tyne"
+Czech Technical University
+"Bournemouth University, UK"
+Palo Alto Research Center (PARC
+College of Image Arts and Sciences
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"School of Computing, National University of Singapore, SG"
+"University of Zaragoza, Spain"
+University of Ottawa
+National Institute of Technology Karnataka
+Stevens Institute of Technology
+"Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ"
+"School of Computer Science, Wuhan University, P.R. China"
+"School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China"
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+"Chonbuk National University, Jeonju-si"
+National Taiwan University of Science and Technology
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Federal Institute of Science and Technology, Mookkannoor"
+"and education use, including for instruction at the authors institution"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+M.S. Brunel University of West London
+"b School of Business, Reykjavik University, Reykjavik, Iceland"
+"University of Maryland, College Park; 2Arizona State University; 3Xerox Research Centre"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+"College of Medicine, Seoul National University"
+"The Ohio State University, OH"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Current Address: Research Institute of Child Development and Education, University of Amsterdam"
+"Kurukshetra University, Kurukshetra"
+University of Washington and Google Inc
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Narayana Pharmacy College, Nellore, India"
+College of Computer Science and Technology
+German Research Center for Arti cial Intelligence (DFKI
+"University of Michigan, Ann Arbor, MI"
+Research Center for Information
+Carnegie Mellon University
+"Chung-Ang University, Seoul, Korea"
+"Assiut University, Assiut 71515, Egypt"
+"ITCS, Tsinghua University"
+"Technological University, Davanagere, Karnataka, India"
+"abroad, or from public or private research centers"
+"Qatar University, Qatar"
+Facebook 4Texas AandM University 5IBM Research
+"School of Computer Science and Software Engineering, Shenzhen University"
+"K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"
+"University of Lincoln, School of Computer Science, U.K"
+"Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan"
+"School of Computer Science, The University of Manchester"
+"University of Notre Dame, 2IIIT-Delhi"
+"University of Lincoln, U.K"
+"Southeast University, Nanjing 211189, China"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+"The University of Tokyo, Japan"
+"Tohoku University, Japan"
+"BECS, Aalto University, Helsinki, Finland"
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Visual Computing and Communications Lab, Arizona State University"
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+at West Virginia University
+University of Engineering and Technology
diff --git a/scraper/reports/institution_names-4.csv b/scraper/reports/institution_names-4.csv
new file mode 100644
index 00000000..394751e3
--- /dev/null
+++ b/scraper/reports/institution_names-4.csv
@@ -0,0 +1,711 @@
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+Dhaka University
+"Center for Machine Vision and Signal Analysis, University of Oulu, Finland"
+College of Engineering and Computer Science
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+University of California at San Diego
+"University of Central Florida, USA"
+"Neurological Institute, USA"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"Texas AandM University, College Station, TX, USA"
+IDIAP Research Institute
+Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT
+"Faculty of Informatics, E otv os Lor and University, Budapest, Hungary"
+"Computer Science and Engineering, Easwari Engineering College, India"
+Institute for Robotics and Intelligent Systems
+"Queensland University of Technology, Brisbane, QLD, Australia"
+"SBK Women s University, Quetta, Balochistan"
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+"George Mason University, Fairfax Virginia, USA"
+Istanbul University
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Institute for Infocomm Research, A*STAR, Singapore"
+Institute for Human-Machine Communication
+Delft University of Technology
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+"Psychopharmacology Unit, Educational and Health Psychology, University College"
+"Bogazici University, Turkey"
+"Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India"
+"Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada"
+University of Surrey
+"King Saud University, Riyadh, Saudi Arabia"
+"Ritsumeikan University, Japan"
+"IIIS, Tsinghua University, Beijing, China"
+"Carnegie Mellon University, Pittsburgh PA"
+"2 School of Computing, National University of Singapore"
+University of North Carolina
+"National Lab of Pattern Recognition, Institute of Automation"
+Lomonosov Moscow State University
+"H. He, Honkong Polytechnic University"
+"The University of Electro-Communications, Tokyo"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"School of Business, Aalto University, Finland"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"West Virginia University, Morgantown, WV"
+"Utah State University, Logan UT"
+"Gannan Normal University, Ganzhou 341000, China"
+"Southeast University, Nanjing 210096, China"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+"aIBM China Research Lab, Beijing, China"
+"Karlsruhe Institute of Technology, Karlsruhe, Germany"
+"B.Sc., University of Science and Technology of China"
+"University of Nevada, Reno, Reno, NV, USA"
+Institute of Electronics and Computer Science
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam"
+"Hua Zhong University of Science and Technology, Wuhan, China"
+University College London
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun"
+"University of Texas at Arlington, Arlington, TX, USA"
+Funding was provided by the U.S. National Institutes of Mental
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"Southwest Jiaotong University, Chengdu, China"
+"The Australian National University Canberra ACT 2601, Australia"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+Bharath Institute of Science and Technology
+"Stanford University, CA"
+Columbia University in the City of New York
+Zaragoza University
+Institute of Communications Engineering
+"Amirkabir University of Technology, Tehran"
+"GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh"
+"Institute ofInformation Science, Academia Sinica, Taipei, Taiwan"
+"aImperial College London, London, UK"
+University of North Carolina Wilmington in Partial Ful llment
+"point, lighting, and appearance. Many applications, including video surveillance systems"
+University of Texas at
+The Australian National University
+"Beijing Institute of Technology, Beijing, China"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+"University of Michigan, Ann Arbor"
+"Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan"
+Max-Planck Institute for Informatics
+Harvard University
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Deparment of Computing, Imperial College London, UK"
+"additional details of DCS descriptors, including visualization. For extending the evaluation"
+"University of Plymouth, UK"
+"ISLA Lab, Informatics Institute, University of Amsterdam"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology, The Netherlands"
+AristotleUniversityofThessaloniki
+UNIVERSITY OF OULU GRADUATE SCHOOL
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"College Park, MD, 20740, USA"
+"City University of Hong Kong, Hong Kong, China"
+Waseda University
+The University of Texas
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"VelTech Dr. R.R. and Dr. S.R. Technical University, Chennai"
+"Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China"
+The University of Newcastle
+"University of Canterbury, New Zealand"
+Dr. B. C. Roy Engineering College
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+Institute for Advanced Computer Studies
+"National University of Singapore Research Institute, Suzhou, China"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+Cornell University
+University of Miami
+"University of North Carolina at Chapel Hill, NC, USA"
+McMaster University
+SAMSI and Duke University
+"University of Miami, USA"
+University of Texas at Austin
+University of Bristol
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+"College of Electronics and Information, Northwestern Polytechnic University"
+"Tampere University of Technology, Tampere 33720, Finland"
+University of Illinois Urbana Champaign
+The Graduate University for Advanced Studies (SOKENDAI
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+"Minia University, Egypt"
+"University of Karlsruhe, Germany"
+Boston College
+National Cheng Kung University
+Tomsk Polytechnic University
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK"
+"University of Massachusetts, Amherst, MA"
+"SenseTime, 2Tsinghua University"
+Anna University
+"ISLA Lab, Informatics Institute"
+"Indraprastha Institute of Information Technology (Delhi, India"
+"University of Science and Technology of China, Hefei 230026, P. R. China"
+"Rutgers University, USA"
+"School of Computing and Communications University of Technology, Sydney"
+"University of Florida, Gainesville, FL, 32611, USA"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"Indian Institute of Technology Delhi, New Delhi, India"
+"University of Oxford, UK"
+"Toyota Technological Institute, Chicago (TTIC"
+Thapar University
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"College of Computer Science, Sichuan University, Chengdu 610065, P.R. China"
+Islamic Azad University of AHAR
+"Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA"
+eBay Research Labs
+"University of Maryland, College Park, MD"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+"University College London, London, UK"
+"Research Scholar (M.Tech, IT), Institute of Engineering and Technology"
+Sabanci University
+"ESAT, Katholieke Universiteit Leuven, Leuven, Belgium"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"of Psychology, University of Michigan, Ann Arbor, MI, United States, University of Michigan, Ann"
+York University
+The State University of New Jersey
+"University of Georgia, Athens, GA, U.S.A"
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium"
+College of Computing
+K S Rangasamy College of Technology
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+"Institute of Computing Technology, CAS, Beijing, 100190, China"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+College of Computer and Information Science
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+"The University of York, Heslington, York YO10 5DD, United Kingdom"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Purdue University, West Lafayette, IN 47907, USA"
+DICGIM - University of Palermo
+"Computer Science and Technology, Tsinghua University, Beijing, China"
+THE UNIVERSITY OF ARIZONA
+College of Computer Science and Information Sciences
+"Nanjing University of Aeronautics and Astronautics, China"
+Sharif University of Technology
+McGovern Institute
+University of Chinese Academy of Sciences
+"Machine Perception Laboratory, University of California, San Diego"
+"Cardiff University, UK"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand"
+"Tsinghua University, China"
+Karlsruhe Institute of Technology
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+DAP - University of Sassari
+University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento
+"University of Amsterdam, Amsterdam, The Netherlands"
+"The Chinese University of Hong Kong, New Territories, Hong Kong"
+"Visual Geometry Group, University of Oxford, UK"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+"b Brain Behavior Center, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+y National Institute of Advanced Industrial Science and Technology
+"College Park, United States"
+"ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis"
+"aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+Illinois Institute of Technology
+"Princeton University, Princeton, NJ, USA"
+"M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli"
+IDIAP RESEARCH INSTITUTE
+"Harvard University, Cambridge, MA"
+"Boston College, USA"
+"Stony Brook University, Stony Brook, USA"
+"image being generated by the model, include Active Appearance"
+Massachusetts Institute of Technology
+"MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy"
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"La Trobe University, Australia"
+Institute for Vision Systems Engineering
+Carnegie Melon University
+"Rice University, Houston, TX, 77005, USA"
+"Research School of Engineering, The Australian National University, ACT 2601, Australia"
+"Huazhong University of Science and Technology, Wuhan, China"
+"Central Tehran Branch, Azad University"
+"Aditya institute of Technology and Management, Tekkalli-532 201, A.P"
+"State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China"
+"University of Amsterdam, The Netherlands"
+Tampere University of Technology
+"Nagoya University, Japan"
+B.S./M.S. Brandeis University
+"University of Maryland, College Park, USA"
+"University of Pennsylvania School of Medicine, 1013 Blockley Hall"
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"College of Mechatronic Engineering and Automation, National University of Defense Technology"
+"Imaging Science and Biomedical Engineering, The University of Manchester, UK"
+Hong Kong Polytechnic University
+"Arts Media and Engineering, Arizona State University"
+University of Illinois at Chicago
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China"
+"National Laboratory of Pattern Recognition Institute of Automation, Chinese Academy of Sciences"
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore"
+"cid:3) School of Software, Tsinghua University"
+"Center for Healthy Aging, University of"
+University of Connecticut
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Dalian University of Technology, Dalian 116024, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+"Faculty of Engineering and Technology, Multimedia University (Melaka Campus"
+"EIMT, Open University of"
+University of Cape Town
+University of Cambridge Computer Laboratory
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+University of Maryland
+"aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA"
+"CVL, Link oping University, Link oping, Sweden"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"Inst. Neural Computation, University of California"
+New York University
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+Autonomous University of Barcelona
+"Kongju National University, South Korea"
+"Sri SidarthaInstitute of Technology, Tumkur"
+University Of California San Diego
+The American University in Cairo
+"Psychology, American University"
+"University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Institute for Infocomm Research, Singapore"
+Hunan University
+"School of Computer Science, Nanjing University of Science and Technology"
+Technical University of Kaiserslautern
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"School of EECS, Queen Mary University of London"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"Helen Wills Neuroscience Institute, University of"
+"Carnegie Mellon University, Pittsburgh, USA"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"Kyung Hee University, South Korea"
+IstanbulTechnicalUniversity
+Technion Israel Institute of Technology
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"University of Tsukuba, Japan"
+"ColumbiaUniversity, NY, USA"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+CALIFORNIA INSTITUTE OF TECHNOLOGY
+University of Dundee
+University of Twente
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"University of Wisconsin-Madison, Madison, WI, USA"
+"National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"
+University of Witwatersrand
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+Marquette University
+"ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences"
+"School of Control Science and Engineering, Shandong University, Jinan 250061, China"
+University of Perugia
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"NICTA, and Australian National University"
+"RWTH Aachen University, Germany"
+"College of Computer Science, Zhejiang University"
+"Computer Engineering and Computer Science, Duthie Center for Engineering, University of"
+University of Ljubljana
+"College of Science, Menou a University, Menou a 32721, Egypt"
+High Institute of Medical Technologies
+Vietnam National University Ho Chi
+University of Newcastle
+"University of Dammam, Saudi Arabia"
+"Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Queen Mary, University of London, E1 4NS, UK"
+Engineering Chaoyang University Nankai Institute of
+"Feng Chia University, Taichung, Taiwan"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"EEMCS, University of Twente, Netherlands"
+"Netherlands, Utrecht University, Utrecht, The Netherlands"
+University of Siena
+"Queen Mary, University of London"
+"Manonmaniam Sundaranar University, Tirunelveli"
+University of Thessaloniki
+B. S. Rochester Institute of Technology
+"California Institute of Technology, Pasadena, California, USA"
+"University of Haifa, Haifa, Israel"
+"Tamkang University, Taipei, Taiwan"
+"Imperial College of Science, Technology and Medicine"
+"Tel-Aviv University, Israel"
+"c Cardiff Business School, Cardiff University, Cardiff, United Kingdom"
+"RWTH Aachen University, Aachen, Germany"
+"Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands"
+Wolfson College
+Honda Research Institute USA
+"School of Management Engineering, Henan Institute of Engineering, Zhengzhou 451191, P.R. China"
+"Beijing, China"
+"Graduate University of Chinese Academy of Sciences(CAS), 100190, China"
+"Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University"
+"University of Caen, France"
+"Northeastern University, Boston, MA, USA"
+"Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden"
+"National Taiwan University, Taiwan"
+"University of Southern California, Institute for Robotics and Intelligent Systems"
+South China University of China
+Harvard and Massachusetts Institute
+"College Park, MD 20742 USA"
+"School of Electronics Engineering and Computer Science; Peking University, Beijing 100871, China"
+"Montefiore Institute, University of Li ge, 4000 Li ge, Belgium"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"Honda RandD Americas, Inc., Boston, MA, USA"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+Kobe University
+"Indian Institute of Technology, Kharagpur"
+"The Australian National University, Canberra, Australia"
+"University of Virginia, Charlottesville, VA"
+"University of California, Irvine, USA"
+Biometric Research Center
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+"Motorola China Research Center, Shanghai, 210000, P.R.China"
+"Institute for Electronics, Signal Processing and Communications"
+"Rutgers University, New Brunswick, NJ"
+"College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"KTH Royal Institute of Technology, Stockholm"
+Mahanakorn University of Technology
+"Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia"
+"of Engineering and Information Technology, University of Technology, Sydney, Australia"
+"University of Washington, Seattle, USA"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+Howard Hughes Medical Institute (HHMI
+"University of California, Berkeley, Berkeley CA 94720, USA"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+"COMSATS Institute of Information Technology, Islamabad"
+"Azad University, Qazvin, Iran"
+Islamic University of Gaza - Palestine
+University of Malta
+Tafresh University
+Stevens Institute of Technology Adobe Systems Inc
+"IES College of Technology, Bhopal"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+North Carolina Central University
+"University of Trento, Italy"
+"The University of Sydney, NSW 2006, Australia"
+"University Technology of Malaysia, 81310 Skudai, Johor, Malaysia"
+"University of Science, Ho Chi Minh city"
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+"recognition, such as human computer interfaces and e-services, including e-home"
+Australian Institute of Sport
+The University of British Columbia
+"Institute of Computing Technology, Chinese Academy of Sciences"
+U.S. Army Research Laboratory
+"Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas"
+ATR Human Information Processing Research Laboratories
+"Integrated Research Center, Universit`a Campus Bio-Medico di Roma"
+"Institute for Arts, Science and Technology"
+"Bo gazici University, Istanbul, TR"
+"versity of Amsterdam, Amsterdam and University of Trento"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan"
+"School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand"
+"Santa Clara University, Santa Clara, CA. 95053, USA"
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+Korea Advanced Institute of Science and Technology
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+"ESTeM, University of Canberra"
+"Rutgers University, Piscataway, NJ"
+"College of Science, Baghdad University, Baghdad, Iraq"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, USA"
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"University of Siena, Siena, Italy"
+"University of Zurich, Zurich, Switzerland"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+"The American University in Cairo, Egypt"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"Informatics and Telematics Institute, Centre of Research and Technology - Hellas"
+"Lotus Hill Institute for Computer Vision and Information Science, 436000, China"
+"State University of Rio de Janeiro, Brazil"
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+University of
+"Doctor of Philosophy in Computing of Imperial College, February"
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"Public University of Navarra, Spain"
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"University of Chinese Academy of Sciences, Beijing 100049, China"
+University Politehnica of Bucharest
+Western Kentucky University
+"University of Balochistan, Quetta"
+Pennsylvania
+"Electronic and Information Engineering, University of Bologna, Italy"
+"The Hong Kong Polytechnic University, Hong Kong, China"
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of"
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+Jacobs University
+"Carnegie Mellon University Pittsburgh, PA, USA"
+Weizmann Institute of Science
+Institute for Computer Graphics and Vision
+"University of Science and Technology of China, Hefei, 230027, China"
+Link to publication in University of Groningen/UMCG research database
+"University of Cambridge, United Kingdom"
+"School of Computer Engineering, Nanyang Technological University, Singapore"
+"Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health"
+"School of Mathematical Science, Dalian University of Technology, Dalian, China"
+Idiap Research Institute and EPF Lausanne
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Priyadarshini College of Engg, Nagpur, India"
+"School of Software, Tianjin University"
+"Information, Keio University"
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"University of Chinese Academy of Sciences, China"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+University of Science and Technology Beijing
+"University of California, Berkeley"
+"The University of Sydney, Sydney, Australia"
+at the University of Central Florida
+University of Campinas (Unicamp
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+"University of Verona, Verona, Italy"
+"The Chinese University of Hong Kong, HKSAR, China"
+University of Exeter
+University of Maryland College Park
+"Purdue University, West Lafayette, Indiana, 47906, USA"
+"Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan"
+"University of Washington, Seattle, WA, USA"
+"Faculty of Science and Technology, University of Macau"
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+ALICE Institute
+University of California
+Clemson University
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+"learning. As a result of this research, many applications, including video surveillance systems"
+"Harbin Institute of Technology, Harbin, China"
+of bilkent university
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+NSS College of Engineering
+Cardi University
+The City University of New York
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+"Universitat Polit`ecnica de Catalunya, Columbia University"
+University of Texas at Arlington
+"University Campus, 54124, Thessaloniki, Greece"
+Australian National University
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+"Publication details, including instructions for authors and subscription information"
+"University of Amsterdam, the Netherlands"
+Robotics Institute
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+"face processing, including age (Berry, 1990), sex (Hill"
+Queensland University of Technology
+Language Technologies Institute
+Transilvania University
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"University of Bath, Bath, United Kingdom"
+"College of Engineering, Pune, India"
+"University of Pittsburgh, Pittsburgh, PA"
+University of Nottingham Ningbo China
+Math Institute
+"SASTRA University, Thanjavur, Tamil Nadu, India"
+College of Engineering (Poly
+"MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry"
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+University of Venezia
+Institute of Road and
+"University of Basel, Departement Informatik, Basel, Switzerland"
+"Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA"
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Istanbul Technical University, Turkey"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"Solapur University, INDIA"
+Dartmouth College
+"School of Electrical Engineering and Computer Science, Peking University"
+"RTMNU Nagpur University, India"
+University of Massachusetts Amherst in partial ful llment
+University of Canberra
+University of Bath
+Research Center
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+"B. Tech., Indian Institute of Technology Jodhpur"
+Karlsruhe Institute of
+"Lille 1 University, France"
+University of Bonn
+"University of Szeged, 2 E tv s Lor nd University"
+"State Key Laboratory for Novel Software Technology, Nanjing University, China"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, USA"
+D.J. Sanghvi College of Engineering
+"KTH, Royal Institute of Technology"
+University of British Columbia
+Alex Waibel (Carnegie Mellon University
+Systems and Telematics - Neurolab
+Florida International University
+"Government College of Engineering, Aurangabad"
+Institute for Neural Computation
+"University of the Basque Country, San Sebastian, Spain"
+University of the Witwatersrand
+"University of Alberta, Edmonton, Canada"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"North Dakota State University, Fargo, ND58105, USA"
+"Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands"
+"University Library, Singapore"
+"University of Victoria, Victoria, Canada"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+"Michigan State University, East Lansing, MI, U.S.A"
+Oakland University
+"Stanford University, Stanford, CA, USA"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology"
+University of Trento
+"Springer Science + Business Media, Inc. Manufactured in The Netherlands"
+"Intelligence, Concordia University, Montreal"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Institute of Computing Technology, CAS"
+"Kobe University, NICT and University of Siegen"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"Southwest Jiaotong University, Chengdu, P.R. China"
+"Michigan State University, East Lansing, MI 48824, USA"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+College of Information and Electrical Engineering
+"Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn"
+"Institute for Advanced Computer Studies, University of Maryland, College Park, MD"
+"The University of York, UK"
+Chosun University
+"Rensselaer Polytechnic Institute, USA"
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"University of Southampton, UK, 2University of Warwick, UK"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Government College of Engineering, Aurangabad [Autonomous"
+"Research Center for Learning Science, Southeast University, Nanjing, China"
+"Imperial College London, UK"
+"Quantitative Employee unit, Finnish Institute of Occupational Health"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+"University of California, Merced"
+The Education University of Hong Kong
+Institute for System Programming
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+Max Planck Institute for Biological Cybernetics
+Pohang University of Science and Technology
+"Key Laboratory of Transient Optics and Photonics, Xi an Institute of Optics and Precision Mechanics, Chi"
+"University of Nottingham, Nottingham, UK"
+"M.S. (University of California, Berkeley"
+Rochester Institute of Technology
+USC Institute for Creative Technologies
+"QCIS, University of Technology, Sydney"
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+"and especially light angle, drastically change the appearance of a face [1]. Facial expressions, including"
+"College Park, USA"
+"University of Twente, Netherlands"
+"Arizona State University, Tempe AZ"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+"University of Basel, Switzerland"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+University Lecturer Veli-Matti Ulvinen
+University of Insubria
+Bo gazi ci University
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+aInformation Sciences Institute
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+Fraunhofer Institute for Integrated Circuits IIS
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+University Of Maryland
+Kyushu University
+Boston University Theses and Dissertations
+The University of Texas at
+"Kulhare, Sourabh, ""Deep Learning for Semantic Video Understanding"" (2017). Thesis. Rochester Institute of Technology. Accessed"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"University of Oxford, Oxford, United Kingdom"
+"Psychonomic Society, Inc"
+"University of Chinese Academy of Sciences, Beijing, 100049, China"
+State University of New York at Binghamton
+"Pune Institute of Computer Technology, Pune, ( India"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"University, Xi an Shaanxi Province, Xi an 710049, China"
+"Keio University, Yokohama 223-8522, Japan"
+"Rutgers University, Computer and Information Sciences, 110 Frelinghuysen Road, Piscataway, NJ"
+"UMIACS | University of Maryland, College Park"
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"University of Shef eld, UK"
+State University of New York at Buffalo
+"Australian Centre for Visual Technologies, The University of Adelaide, Australia (b"
+"School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave"
+Columbia University
+University of Warwick
+"College of Engineering, Purdue University"
+"National Institute of Technology, Durgapur, West Bengal, India"
+New Jersey Institute of Technology
+The Robotics Institute Carnegie Mellon University
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece"
+"C.L. Teo, University of Maryland"
+"University of Massachusetts, Amherst Technical Report UM-CS"
+Beckman Institute
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+The Chinese University ofHong Kong
+"School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN"
+"University of Alberta, Edmonton, AB T6G 2E8, Canada"
+"Robotics Institute, Carnegie Mellon University"
+University of Twente 2Dublin City University 3Oxford University
+"Southeast University, Nanjing, China"
+"University of Massachusetts, Amherst MA, USA"
+Virginia Polytechnic Institute and State University
+Link to publication from Aalborg University
+"the Diploma of Imperial College London. This thesis is entirely my own work, and, except"
+University of Manitoba
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"University of Michigan, Ann Arbor, USA"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan"
+Boston University
+"and Modeling, Rutgers University"
+"Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"applications has different requirements, including: processing time (off-line, on-line, or real-time"
+Nam k Kemal University
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+"Kyoto University, Kyoto, Japan"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"Multimedia University (MMU), Cyberjaya, Malaysia"
+Eskisehir Osmangazi University
+University of Illinois
+A dissertation submitted to the Faculty of the University of Delaware in partial
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+Institute of Deep Learning
+"The Hebrew University of Jerusalem, Israel"
+"University of Illinois, Urbana-Champaign"
+The Robotics Institute
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+"Macau University of Science and Technology, Macau"
+"Tongji University, Shanghai 201804, China"
+"Virudhunagar Hindu Nadars Senthikumara Nadar College, Virudhunagar"
+National University of Defense Technology
+"Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul"
+"Image and Video Research Laboratory, Queensland University of Technology"
+"Arizona State University, AZ, USA"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+"KU Phonetics and Psycholinguistics Lab, University of Kansas"
+"Publication details, including instructions for authors and subscription"
+"Kodak Research Laboratories, Rochester, New York"
+"J. P. College of Engineering, India"
+"University of Amsterdam, Amsterdam, The"
+"Deparment of Computing, Goldsmiths, University of London, UK"
+Carleton University
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"The Hong Kong Polytechnic University, Hong Kong"
+"Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C"
+"University of Bonn, Germany"
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+University Lecturer Anu Soikkeli
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"Amal Jyothi College of Engineering, Kanjirappally, India"
+"Graduate University of CAS, 100190, Beijing, China"
+SIMON FRASER UNIVERSITY
+"Capital Normal University, 100048, China"
+No Institute Given
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+Nagoya University
+"Sichuan Fine Arts Institute, Chongqing, China"
diff --git a/scraper/reports/institution_names.csv b/scraper/reports/institution_names.csv
new file mode 100644
index 00000000..7010cb87
--- /dev/null
+++ b/scraper/reports/institution_names.csv
@@ -0,0 +1,3563 @@
+"1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R"
+"2 School of Computing, National University of Singapore"
+"2015 Wiley Periodicals, Inc"
+"2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada"
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+A dissertation submitted to the Faculty of the University of Delaware in partial
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"A. van Kleef, University of Amsterdam"
+AALTO UNIVERSITY
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"ADSIP Research Centre, University of Central Lancashire"
+AI Institute
+ALICE Institute
+"ALPHA COLLEGE OF ENGINEERING, CHENNAI"
+ARISTOTLE UNIVERSITY OF THESSALONIKI
+ATR Human Information Processing Research Laboratories
+ATR Human Information Processing Research Laboratory
+ATR Interpreting Telecommunications Research Laboratories
+"Aalborg University, Denmark"
+"Aalen University, Germany"
+"Aalto University, Espoo, Finland"
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+"Abdul WaliKhan University, Mardan, KPK, Pakistan"
+"Aberystwyth University, UK"
+"Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India"
+"Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan"
+"Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of"
+Achariya college of Engineering Technology
+Acharya Institute Of Technology
+"Address correspondence to: Karen L. Schmidt, University of"
+"Aditya College of Engineering, Surampalem, East Godavari"
+"Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh"
+"Aditya institute of Technology and Management, Tekkalli-532 201, A.P"
+"Adobe Systems, Inc., 345 Park Ave, San Jose, CA"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"Advanced Digital Sciences Center, University of Illinois at Urbana-Champaign, Singapore"
+"Advanced Engineering, The Chinese University of Hong Kong"
+"Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul"
+"Affiliated to Anna university, Chennai"
+"Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India"
+AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of
+Akita Prefectural University
+"Akita Prefectural University, Yurihonjo, Japan"
+Akita University
+"Akita University, Akita, Japan"
+Al-Khwarizmi Institute of Computer Science
+Alan W Black (Carnegie Mellon University
+Alex Waibel (Carnegie Mellon University
+"Alexandria University, Alexandria, Egypt"
+"Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest"
+Allen Institute for Arti cial Intelligence
+Allen Institute for Arti cial Intelligence (AI
+"Allen Institute for Arti cial Intelligence (AI2), Seattle, WA"
+"Amal Jyothi College of Engineering, Kanjirappally, India"
+"Amazon, Inc"
+American University
+"American University, Washington, DC, USA"
+Amherst College
+Amirkabir University of Technology
+"Amirkabir University of Technology, University of Southern California"
+"Amirkabir University of Technology, Tehran"
+"Amirkabir University of Technology, Tehran, Iran"
+"Amirkabir University of Technology, Tehran. Iran"
+Amity University
+"Amity University, Lucknow, India"
+"Amity University, Noida, India"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"Anjuman College of Engineering and Technology, Sadar, Nagpur, India"
+Anna University
+"Anna University Chennai, India"
+"Anna University, Chennai"
+"Annamacharya Institute of Technology and Sciences, Tirupati, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+Aristotle University of Thessaloniki
+Aristotle University of Thessaloniki GR
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece"
+"Aristotle University of Thessaloniki, Thessaloniki, Greece"
+AristotleUniversityofThessaloniki
+Arizona State University
+"Arizona State University, AZ, USA"
+"Arizona State University, Phoenix, Arizona"
+"Arizona State University, Tempe AZ"
+Army Research Laboratory
+"Aron Szekely, University of Oxford, UK"
+"Arti cial Intelligence Institute, China"
+"Arts Media and Engineering, Arizona State University"
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Arts, Science and Commerce College, Chopda"
+"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia"
+"Asia University, Taichung, Taiwan"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Asian University, Taichung, Taiwan"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Assiut University, Assiut 71515, Egypt"
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+"Augsburg University, Germany"
+"Australian Centre for Visual Technologies, The University of Adelaide, Australia (b"
+Australian Institute of Sport
+Australian National University
+Australian National University and NICTA
+"Australian National University and NICTA, Australia"
+"Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"Australian National University, Canberra"
+"Australian National University, Canberra, ACT 0200, Australia"
+"Australian National University, Canberra, Australia"
+"Australian National University, and NICTA"
+"Author s addresses: X. Peng, University of Rochester; L. Chi"
+"Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy"
+Autonomous University of Barcelona
+Azad University of Qazvin
+"Azad University, Qazvin, Iran"
+"B. Eng., Zhejiang University"
+B. S. Rochester Institute of Technology
+"B. Tech., Indian Institute of Technology Jodhpur"
+"B.A. Earlham College, Richmond Indiana"
+"B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"B.Eng., Nankai University"
+B.S. (Cornell University
+"B.S. Abdur Rahman University, Chennai-48, India"
+B.S. University of Central Florida
+B.S. University of Indonesia
+"B.S., Computer Engineering, Bo gazi ci University"
+"B.S., E.E., Bo azi i University"
+"B.S., Pennsylvania State University"
+B.S./M.S. Brandeis University
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"B.Sc., University of Science and Technology of China"
+"B.Tech (C.S.E), Bharath University, Chennai"
+"B.Tech., Electronics Engineering, Institute of Technology, Banaras Hindu University"
+"BECS, Aalto University School of Science and Technology, Finland"
+"BECS, Aalto University, Helsinki, Finland"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Bacha Khan University, Charsadda, KPK, Pakistan"
+"Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria"
+"Bahc es ehir University, Istanbul, Turkey"
+Bahcesehir University
+Baidu IDL and Tsinghua University
+Baidu Research Institute of Deep Learning
+"Baidu Research, USA 3John Hopkins University"
+"Baingio Pinna, University of"
+Banaras Hindu University
+Bangalore Institute of Technology
+Bangladesh University of Engineering and Technology(BUET
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India"
+"Bar Ilan University, Israel"
+Bas kent University
+"Baze University, Abuja, Nigeria"
+Beckman Institute
+Beckman Institute for Advanced Science and Technology
+"Beckman Institute, University of Illinois at Urbana-Champaign"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA"
+"Beckmann Institute, University of Illinois at Urbana-Champaign, USA"
+"Behavioural Science Group, Warwick Business School, University of Warwick; and 4Faculty of Psychology"
+"Behavioural Science Institute, Radboud University, Nijmegen, the Netherlands"
+Beihang University
+"Beihang University 2Gri th University 3University of York, UK"
+"Beihang University, 2The Chinese University of Hong Kong, 3Sensetime Group Limited"
+"Beihang University, Beijing 100191, China"
+"Beihang University, Beijing, China"
+Beijing Institute of Technology
+"Beijing Institute of Technology University, P. R. China"
+"Beijing Institute of Technology, Beijing 100081 CHINA"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"Beijing Institute of Technology, Beijing, China"
+"Beijing Institute of Technology, China"
+Beijing Jiaotong University
+"Beijing Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, China"
+"Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China"
+Beijing National Research Center for Information Science and Technology
+"Beijing Normal University, China"
+"Beijing Union University, 100101, China"
+"Beijing University of Chemical Technology, China"
+Beijing University of Posts and Telecommunications
+"Beijing University of Posts and Telecommunications, Beijing, China"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China"
+"Beijing University of Technology, Beijing 100022, China"
+"Beijing, China"
+"BeingTogether Centre, Institute for Media Innovation, Singapore 637553, Singapore"
+"Benha University, Egypt"
+Berlin Institute of Technology
+Bharath Institute of Science and Technology
+"Bharath University, India"
+"Bharathidasan University, Trichy, India"
+"Bharti Vidyapeeth Deemed University, Pune, India"
+"Bibliographic details for the item, including a URL"
+Bielefeld University
+"Big Data Institute, University of Oxford"
+"Big Data Research Center, University of Electronic Science and Technology of China"
+"Bilgi University, Dolapdere, Istanbul, TR"
+Bilkent University
+"Bilkent University, 06800 Cankaya, Turkey"
+"Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network"
+"Bioinformatics Institute, A*STAR, Singapore"
+Biometric Research Center
+"Biometric Research Center, The Hong Kong Polytechnic University"
+"Biometric and Image Processing Lab, University of Salerno, Italy"
+"Biometrics Research Lab, College of Computer Science, Sichuan University, Chengdu 610065, China"
+"Birkbeck College, University of London"
+Birkbeck University of London
+Bo gazi ci University
+"Bo gazi ci University, Turkey"
+"Bo gazic i University, Istanbul, Turkey"
+"Bo gazici University, Istanbul, TR"
+"Bogazici University, Bebek"
+"Bogazici University, Turkey"
+Boston College
+"Boston College, USA"
+"Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos"
+Boston University
+Boston University / **Rutgers University / ***Gallaudet University
+Boston University 2Pattern Analysis and Computer Vision (PAVIS
+Boston University Computer Science Technical Report No
+Boston University Theses and Dissertations
+Boston University and 2University of North Carolina
+"Boston University, Boston, MA"
+"Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA"
+"Boston University, USA"
+"Boston University1, University of Tokyo"
+Bournemouth University
+"Bournemouth University, UK"
+"Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Brazil, University Hospital Zurich, Z rich"
+Brown University
+Brown University 2University of Bath
+"Brown University, 2University of California, San Diego, 3California Institute of Technology"
+"Brown University, Providence, RI"
+"Brown University, Providence, RI 02912, USA"
+"Brown University, United States"
+"Budapest, Hungary, E tv s Lor nd University, Budapest, Hungary, 3 Institute for Computer Science"
+"C.L. Teo, University of Maryland"
+CALIFORNIA INSTITUTE OF TECHNOLOGY
+CARNEGIE MELLON UNIVERSITY
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"CBSR and NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+"CBSRandNLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+"CISE, University of Florida, Gainesville, FL"
+"CISUC, University of Coimbra"
+"CMR Institute of Technology, Hyderabad, (India"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+COLUMBIA UNIVERSITY
+COMSATS Institute of Information Technology
+COMSATS Institute of Information Technology Wah Cantt
+"COMSATS Institute of Information Technology, Islamabad"
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan"
+"COMSATS Institute of Information Technology, Pakistan"
+"CRCV, University of Central Florida"
+"CRIPAC and NLPR and CEBSIT, CASIA 2University of Chinese Academy of Sciences"
+"CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong"
+"CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong 2Amazon Rekognition"
+"CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong"
+CUNY City College
+CUNY Graduate Center and City College
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+"CVIP Lab, University of Louisville, Louisville, KY 40292, USA"
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"CVL, Link oping University, Link oping, Sweden"
+CVSSP University of Surrey
+"CVSSP, University of Surrey"
+"CVSSP, University of Surrey, UK"
+"Ca Foscari University of Venice, Venice, Italy"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+California Institute of Technology
+"California Institute of Technology, 1200 East California Boulevard Pasadena, California, USA"
+"California Institute of Technology, Pasadena, CA"
+"California Institute of Technology, Pasadena, CA, USA"
+"California Institute of Technology, Pasadena, California, USA"
+"California Institute of Technology, USA"
+"California State University, Fullerton, USA"
+"California State University, Long Beach, USA"
+Cambridge Research Laboratory
+Cambridge University
+"Cambridge University, Trumpington Street, Cambridge CB21PZ, UK"
+Canadian Institute for Advanced Research
+"Capital Normal University, 100048, China"
+Cardi University
+"Cardiff University, UK"
+Carleton University
+Carnegie Mellon University
+Carnegie Mellon University (CMU
+Carnegie Mellon University 2University of Washington 3Allen Institute for Arti cial Intelligence
+"Carnegie Mellon University 4College of CS and SE, Shenzhen University"
+"Carnegie Mellon University Pittsburgh, PA - 15213, USA"
+"Carnegie Mellon University Pittsburgh, PA, USA"
+"Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"Carnegie Mellon University, Pittsburgh PA"
+"Carnegie Mellon University, Pittsburgh, PA"
+"Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, USA"
+"Carnegie Mellon University, Pittsburgh, USA"
+"Carnegie Mellon University, USA"
+Carnegie Melon University
+Carney Institute for Brain Science
+"Catholic University of Rio de Janeiro, Brazil"
+"Center For Automation Research, University of Maryland, College Park"
+"Center for Arti cial Vision Research, Korea University"
+"Center for Automated Learning and Discovery), Carnegie Mellon University"
+"Center for Automation Research (CfAR), University of Maryland, College Park, MD"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA"
+"Center for Automation Research, University of Maryland"
+"Center for Automation Research, University of Maryland, College Park, MD"
+"Center for Automation Research, University of Maryland, College Park, MD 20740, USA"
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA"
+"Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Center for Brain Science, Harvard University, Cambridge, MA 02138 USA"
+"Center for Brain Science, Harvard University, Cambridge, MA, USA"
+"Center for Brains, Minds and Machines, McGovern Institute, MIT"
+"Center for Cognitive Neuroscience, Duke University, Durham, North Carolina"
+"Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin"
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ"
+"Center for Computational Intelligence, Nanyang Technology University, Singapore"
+"Center for Healthy Aging, University of"
+"Center for Information Science, Peking University, Beijing 100871, China"
+"Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT"
+"Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A"
+"Center for Machine Vision Research, University of Oulu, Finland"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"Center for Machine Vision and Signal Analysis, University of Oulu, Finland"
+"Center for Research in Computer Vision (CRCV), University of Central Florida (UCF"
+"Center for Research in Computer Vision, University of Central Florida"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, FL"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, USA"
+"Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA"
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+"Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA"
+Central Mechanical Engineering Research Institute
+"Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India"
+"Central Tehran Branch, Azad University"
+"Central University of Finance and Economics, Beijing, China"
+Central Washington University
+"Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA"
+"Centre for Applied Autism Research, University of Bath, Bath, United Kingdom, 2 Social and"
+"Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de"
+"Centre for Imaging Sciences, University of"
+"Centre for Intelligent Machines, McGill University, Montreal, Canada"
+"Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia"
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, UK"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, UK"
+"Chalmers University of Technology, SAFER"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"Chandigarh University, Gharuan, Punjab, India"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+Charles Sturt University
+"Charotar University of Science and Technology, Changa, India"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+China University of Mining and Technol
+"China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of"
+"China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"China-Singapore Institute of Digital Media, Singapore"
+Chinese University of Hong Kong
+Chittagong University of Engineering and Technology
+"Chonbuk National University, Jeonju 561-756, Korea"
+"Chonbuk National University, Jeonju-si"
+"Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences"
+Chosun University
+"Chu Hai College of Higher Education, Hong Kong"
+Chubu University
+"Chulalongkorn University, Bangkok"
+"Chung-Ang University, Seoul, Korea"
+Chungnam National University
+City University of Hong Kong
+"City University of Hong Kong, Hong Kong, China"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+Clemson University
+"Clemson University, Clemson, SC"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+Coburg University
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Cognitive Neuroscience Laboratory, Centre of Biology and Health Sciences, Mackenzie Presbyterian University, S o Paulo"
+"Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany"
+"Collage of Sciences, Baghdad University, Iraq"
+"College Heights Blvd, Bowling Green, KY"
+"College Park, MD"
+"College Park, MD 20742 USA"
+"College Park, MD, 20740, USA"
+"College Park, MD, USA"
+"College Park, Maryland"
+"College Park, USA"
+"College Park, United States"
+"College Road East, Princeton, NJ"
+"College of Automation, Harbin Engineering University, Heilongjiang, China"
+College of Computer Science
+College of Computer Science and Information Sciences
+"College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China"
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun"
+"College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China"
+College of Computer Science and Technology
+"College of Computer Science and Technology, Chongqing"
+"College of Computer Science and Technology, Zhejiang University, China"
+"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"
+"College of Computer Science, Chongqing University, Chongqing, 400030, China"
+"College of Computer Science, Chongqing University, Chongqing, China"
+"College of Computer Science, Sichuan University"
+"College of Computer Science, Sichuan University, Chengdu 610065, P.R. China"
+"College of Computer Science, Sichuan University, Chengdu, China"
+"College of Computer Science, Zhejiang University"
+"College of Computer Science, Zhejiang University, Hangzhou 310027, China"
+"College of Computer Science, Zhejiang University, Hangzhou, China"
+"College of Computer Science, Zhejiang University, Zhejiang, China"
+"College of Computer and Control Engineering, Nankai University 4 Hikvision Research"
+"College of Computer and Control Engineering, Nankai University 4: Hikvision Research"
+"College of Computer and Information Engineering, Nanyang Institute of Technology"
+"College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China"
+College of Computer and Information Science
+"College of Computer and Information Science, Northeastern University, Boston, MA, USA"
+"College of Computer and Information Science, Northeastern University, Boston, USA"
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"College of Computer and Information Science, Southwest University, Chongqing 400715, China"
+College of Computer and Information Sciences
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+College of Computing
+"College of Computing, Georgia Institute of Technology"
+"College of Computing, Georgia Institute of Technology, Atlanta, GA, USA"
+College of Electrical and Information Engineering
+"College of Electrical and Information Engineering, Hunan University, China"
+"College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China"
+"College of Electronics and Information Engineering, Sejong University"
+"College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China"
+"College of Electronics and Information Engineering, Tongji University"
+"College of Electronics and Information, Northwestern Polytechnic University"
+College of Engineering (Poly
+"College of Engineering Pune, India"
+College of Engineering and Computer Science
+College of Engineering and Mineral Resources
+"College of Engineering, Mathematics and Physical Sciences"
+"College of Engineering, Northeastern University, Boston, MA, USA"
+"College of Engineering, Pune, India"
+"College of Engineering, Purdue University"
+College of Image Arts and Sciences
+College of Informatics
+College of Information Engineering
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+College of Information Science and Electronic Engineering
+College of Information Science and Engineering
+"College of Information Science and Engineering, Ocean University of China, Qingdao, China"
+"College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan"
+"College of Information Science and Engineering, Xinjiang University"
+College of Information Science and Technology
+"College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi"
+"College of Information and Computer Sciences, University of Massachusetts, Amherst"
+College of Information and Control Engineering in China University of Petroleum
+"College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China"
+College of Information and Electrical Engineering
+"College of Information and Engineering, Hunan University, Changsha, China"
+"College of Information, Yunnan Normal University, Kunming, China"
+"College of Mechatronic Engineering and Automation, National University of Defense Technology"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+"College of Medicine, Seoul National University"
+"College of Science, Baghdad University, Baghdad, Iraq"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"College of Software Engineering, Southeast University, Nanjing 210096, China"
+"College of Software, Beihang University"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"CollegePark, MD"
+Colorado State University
+"Colorado State University, Fort Collins, CO 80523, USA"
+"Colorado State University, Fort Collins, Colorado, USA"
+"Colorado State University, Fort Collins, USA"
+"Columbia Business School, University of California, San Diego"
+Columbia University
+Columbia University in the City of New York
+"Columbia University, National University of Singapore"
+"Columbia University, New York NY 10027, USA"
+"Columbia University, New York, NY"
+"Columbia University, New York, NY 10027, USA"
+"Columbia University, New York, NY, USA"
+"Columbia University, USA"
+"Columbia University, United States"
+"ColumbiaUniversity, NY, USA"
+Compi`egne University of Technology
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas"
+"Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey"
+"Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India"
+"Computer Engineering and Computer Science, Duthie Center for Engineering, University of"
+"Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran"
+"Computer Graphics Research Group, University of Freiburg, Freiburg, Germany"
+"Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA"
+"Computer Laboratory, University of Cambridge, Cambridge, UK"
+"Computer School, University of South China, Hengyang, China"
+"Computer Science Depart., Cornell University, USA"
+"Computer Science Depart., Rochester University, USA"
+"Computer Science Division, The Open University of Israel"
+"Computer Science Division, The Open University of Israel, Israel"
+"Computer Science North South University, Dhaka"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA"
+"Computer Science and Engineering, Anna University, India"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Computer Science and Engineering, Michigan State University, East Lansing, USA"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"Computer Science and Engineering, University of Texas at Arlington, USA"
+"Computer Science and Engineering, University of Washington"
+"Computer Science and Engineering, University of Washington, Seattle, WA"
+"Computer Science and Engineering, University of Washington, Seattle, WA, USA"
+"Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Science and Technology, Tsinghua University, Beijing, China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+"Computer Science, Brown University, Providence, RI, USA"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Computer Science, Princeton University, Princeton, NJ, USA"
+"Computer Vision Group, Friedrich Schiller University Jena"
+"Computer Vision Group, Friedrich Schiller University Jena, Germany"
+"Computer Vision Group, Friedrich Schiller University of Jena, Germany"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Computer Vision Group, Xerox Research Center Europe, Meylan, France"
+"Computer Vision Lab, Delft University of Technology"
+"Computer Vision Lab, Delft University of Technology, Netherlands"
+"Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden"
+"Computer Vision Laboratory, Link oping University, Sweden"
+"Computer Vision Laboratory, The University of Nottingham"
+"Computer Vision Laboratory, University of Nottingham, Nottingham, UK"
+Computer Vision Laboratory. University of Nottingham
+"Computer Vision Research Group, COMSATS Institute of Information"
+Computer Vision and Robotics Research Laboratory
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+"Computer and Systems Engineering, Rensselaer Polytechnic Institute"
+Computer and Vision Research Center
+"Computer vision and Remote Sensing, Berlin university of Technology"
+Concordia University
+"Concordia University, Canada"
+"Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada"
+"Concordia University, Montreal, Quebec, Canada"
+Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university
+"Cooperative Medianet Innovation Center (CMIC), Shanghai Jiao Tong University, China"
+"Cooperative Medianet Innovation Center, Shanghai Jiao Tong University"
+"Cooperative Medianet Innovation Center, Shanghai Jiaotong University"
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+"Copyright c(cid:2) 2017 The Institute of Electronics, Information and Communication Engineers"
+"Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers"
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+Cornell University
+Cornell University 2 Cornell Tech
+Cornell University 2Eastman Kodak Company
+Cornell University and Stanford University
+"Cornell University, Ithaca, NY, U.S.A"
+"Cornell University, Ithaca, New York"
+"Cornell University, Washington University in St. Louis"
+"Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein"
+"Country University, San Sebastian, Spain"
+Courant Institute
+Courant Institute and Google Research
+Courant Institute of Mathematical Sciences
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+"Courant Institute of Mathematical Sciences, New York, NY"
+"Current Address: Research Institute of Child Development and Education, University of Amsterdam"
+Curtin University of Technology
+"Curtin University, Perth, Australia"
+"Curtin University, Perth, WA 6102, Australia"
+Cyprus University of Technology
+"Cyprus University of Technology, Cyprus"
+Czech Technical University
+"D Research Center, Kwangwoon University and Springer"
+D.J. Sanghvi College of Engineering
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune"
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University"
+"DAIS, University of Venice, Italy"
+DAP - University of Sassari
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+DICGIM - University of Palermo
+"DIEI, University of Perugia, Italy"
+DISI - University of Trento
+"DISI, University of Trento, Italy"
+"DISI, University of Trento, Trento, Italy"
+"DIT UNIVERSITY, DEHRADUN"
+"DPDCE, University IUAV"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK"
+DUBLIN CITY UNIVERSITY
+"DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China"
+DVMM Lab - Columbia University
+Dalian Maritime University
+Dalian University of Technology
+"Dalian University of Technology, China"
+"Dalian University of Technology, Dalian 116024, China"
+"Dalian University of Technology, Dalian, China"
+"Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College"
+Dalle Molle Institute for Arti cial Intelligence
+"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea"
+Dartmouth College
+"Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA"
+"Dartmouth College, NH 03755 USA"
+Datta Meghe College of Engineering
+"David R. Simmons, University of"
+"Dayananda Sagar College of Engg., India"
+"Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+Delft University of Technology
+"Delft University of Technology, The Netherlands"
+Democritus University of Thrace
+"Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain"
+"Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK"
+"Deparment of Computing, Goldsmiths, University of London, UK"
+"Deparment of Computing, Imperial College London, UK"
+Departm nt of Information Engin ering Th Chines University of Hong Kong
+"Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India"
+DeustoTech - University of Deusto
+"Deva Ramanan, University of California at Irvine"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+Dhaka University
+Dhanalakshmi Srinivasan College of Engineering
+Dietrich College Honors Theses
+Dietrich College of Humanities and Social Sciences
+Digital Media Research Center
+"Dipartimento di Sistemi e Informatica, University of Florence"
+"Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"Division of Computer Engineering, Jeonbuk National University, Jeonju-si, Jeollabuk-do"
+"Division of Computer Science and Engineering, Hanyang University"
+"Division of Computer Science, University of California, Berkeley, CA, USA e-mail"
+"Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea"
+"Dnyanopasak College Parbhani, M.S, India"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Doctor of Philosophy in Computing of Imperial College, February"
+Doctor of Philosophy of University College London
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the
+Dr C V Raman Institute of Science and Technology
+Dr. B. C. Roy Engineering College
+Dr. Babasaheb Ambedkar Marathwada University
+"Dr.D.Y.Patil College of Engineering, Pune, Maharashtra, India"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+Drexel University
+"Drexel University, Philadelphia, PA"
+Duke University
+"Duke University, Durham, NC, USA"
+"Durham University Library, Stockton Road, Durham DH1 3LY, United Kingdom"
+"Durham University, Durham, UK"
+"ECE dept, University of Miami"
+"ECE, National University of Singapore, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY, USA"
+"EECS, Syracuse University, Syracuse, NY, USA"
+"EECS, University of California Berkeley"
+"EEMCS, University of Twente"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"EEMCS, University of Twente, Netherlands"
+"EEMCS, University of Twente, The Netherlands"
+"EIMT, Open University of"
+"EIMT, Open University of Catalonia, Barcelona, Spain"
+"ESAT, Katholieke Universiteit Leuven, Leuven, Belgium"
+"ESAT-PSI, KU Leuven, 2CV:HCI, KIT, Karlsruhe, 3University of Bonn, 4Sensifai"
+"ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+"ESTeM, University of Canberra"
+East China Normal University
+Eastern Mediterranean University
+"Eastern Mediterranean University, Gazima usa, Northern Cyprus"
+Eastern University
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT
+"Edited by David L. Donoho, Stanford University, Stanford, CA, and approved August 7, 2017 (received for review January"
+"Education, Yunnan Normal University, Kunming, China"
+"Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan"
+"Eindhoven University of Technology, The Netherlands"
+"Elect. Eng. Faculty, Tabriz University, Tabriz, Iran"
+"Electrical Eng. Dep., Central Tehran Branch, Islamic Azad University, Tehran, Iran"
+"Electrical Engineering Institute, EPFL"
+"Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab"
+"Electrical Engineering, University of"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+"Electrical and Computer Engineering, Northeastern University, Boston, MA"
+"Electrical and Computer Engineering, The University of Memphis"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"Electrical and Computer Engineering, University of Pittsburgh, USA"
+"Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada"
+"Electrical and Space Engineering, Lule University of Technology"
+"Electrical, Computer, Rensselaer Polytechnic Institute"
+"Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute"
+Electronic Engineering and Computer Science Queen Mary University of London
+"Electronic Engineering and Computer Science, Queen Mary University of London, UK"
+"Electronic and Information Engineering, University of Bologna, Italy"
+"Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India"
+"Electronics Engineering, National Institute of Technical Teachers"
+"Electronics and Communication Engineering, Chuo University"
+"Electronics and Computer Science, University of Southampton, Southampton, Hampshire"
+Electronics and Telecommunications Research Institute
+Emory University
+"Emory University, USA"
+"Engg, Priyadarshini College of"
+Engineering Chaoyang University Nankai Institute of
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+"Engineering and Applied Science, SUNY Binghamton University, NY, USA"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+"Engineering, G.H.Raisoni College of Engineering"
+"Engineering, Iran University"
+"Engineering, National Formosa University"
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"Engineering, University of Dundee"
+"Engineering, York University, Canada"
+Enlighten Research publications by members of the University of Glasgow
+"Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom"
+Eskisehir Osmangazi University
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"F.Ferraro, University of Rochester"
+"FI-90014 University of Oulu, Finland"
+FL
+"FX Palo Alto Laboratory, Inc., California, USA"
+"FaceTec, Inc"
+Facebook 4Texas AandM University 5IBM Research
+"Facebook AI Research, 2Dartmouth College"
+"Facial Image Processing and Analysis Group, Institute for Anthropomatics"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Faculty member, Parallel Data Lab (PDL), Carnegie Mellon University"
+"Faculty of Computer Science, Dalhousie University, Halifax, Canada"
+"Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"Faculty of Computers and Information, Cairo University, Cairo, Egypt"
+"Faculty of Computing and Informatics, Multimedia University, Malaysia"
+"Faculty of EEMCS, Delft University of Technology, The Netherlands"
+"Faculty of EEMCS, University of Twente, The Netherlands"
+"Faculty of ETI, Gdansk University of Technology, Gdansk, Poland"
+"Faculty of Electrical Engineering, Czech Technical University"
+"Faculty of Electrical Engineering, Czech Technical University in Prague"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The"
+"Faculty of Electrical Engineering, University of Ljubljana"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"Faculty of Electronics and Communication, Taishan University"
+"Faculty of Electronics and Communication, Yanshan University"
+"Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland"
+"Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"Faculty of Engineering and Technology, Multimedia University (Melaka Campus"
+"Faculty of Engineering, Ain Shams University, Cairo, Egypt"
+"Faculty of Engineering, Al Azhar University, Qena, Egypt"
+"Faculty of Engineering, Bar-Ilan University, Israel"
+"Faculty of Engineering, Ferdowsi University, Mashhad, Iran"
+"Faculty of Engineering, Multimedia University, Malaysia"
+"Faculty of Informatics, E otv os Lor and University, Budapest, Hungary"
+"Faculty of Informatics, University of Debrecen, Hungary"
+"Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia"
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"Faculty of Natural Sciences, University of Stirling, Stirling FK9 4LA, UK"
+"Faculty of Science and Engineering, Waseda University, Tokyo, Japan"
+"Faculty of Science and Technology, University of Macau"
+"Faculty of Science, University of Amsterdam, The Netherlands"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"Federal Institute of Science and Technology, Mookkannoor"
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+Federal University of Bahia (UFBA
+Federal University of Campina Grande (UFCG
+Federal University of Para ba
+Federal University of Technology - Paran a
+"Feng Chia University, Taichung, Taiwan"
+"Ferdowsi University of Mashhad, Mashhad, Iran"
+"Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions"
+"Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai"
+"Final Year, PanimalarInstitute of Technology"
+Firat University
+"Florian Metze, Chair (Carnegie Mellon University"
+"Florida Institute Of Technology, Melbourne Fl"
+Florida International University
+Florida State University
+"Florida State University, Tallahassee, FL 32306, USA"
+"Florida State University, Tallahassee, Florida, U.S.A"
+"Florida State University, USA"
+Formerly: Texas AandM University
+"Foundation University Rawalpindi Campus, Pakistan"
+"Foundation University, Rawalpindi 46000, Pakistan"
+"Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India"
+Franklin. W. Olin College of Engineering
+Fraser University
+Fraunhofer Heinrich Hertz Institute
+"Fraunhofer Institute for Digital Media Technology, Germany"
+Fraunhofer Institute for Integrated Circuits IIS
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB"
+"Friedrich Schiller University, D-07740 Jena"
+Fudan University
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"Fudan University, Shanghai, China"
+Funding was provided by the U.S. National Institutes of Mental
+"G. H .Raisoni Collage of Engg and Technology, Wagholi, Pune"
+"G.H.Raisoni College of Engg. and Mgmt., Pune, India"
+GE Global Research Center
+"GIPSA-lab, Institute of Engineering, Universit Grenoble Alpes, Centre National de la Recherche Scienti que, Grenoble INP"
+"GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+GREYC Research Lab
+"GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin"
+"GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco"
+"Gallaudet University, Technology Access Program, 800 Florida Ave NE, Washington, DC"
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+Gangnung-Wonju National University
+Gannan Normal University
+"Gannan Normal University, Ganzhou 341000, China"
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+Gdansk University of Technology
+"Gdansk University of Technology, Faculty of Electronics, Telecommunication"
+George Mason University
+"George Mason University, Fairfax Virginia, USA"
+"George Mason University, Fairfax, VA, USA"
+George Washington University
+Georgia Institute of Technology
+Georgia Institute of Technology 2Emory University
+"Georgia Institute of Technology, CVIT, IIIT Hyderabad, IIT Kanpur"
+"Georgia Institute of Technology, 2NEC Laboratories America, 3Georgia Tech Research Institute"
+"Georgia Institute of Technology, Atlanta, Georgia, USA"
+German Research Center for Arti cial Intelligence (DFKI
+"German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany"
+"Germany, University of Oldenburg, Oldenburg, Germany"
+"Gettysburg College, Gettysburg, PA, USA"
+Ghent University
+"Giulia Andrighetto, Institute of"
+"Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia"
+Glyndwr University
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyd"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+"Goldsmiths, University of London"
+"Goldsmiths, University of London, London, UK"
+"Goldsmiths, University of London, UK"
+"Gonda Brain Research Center, Bar Ilan University, Israel"
+"Google, Inc"
+"Google, Inc. 2University of Massachusetts Amherst 3MIT CSAIL"
+"Governance, Keio University"
+Government College of Engineering
+"Government College of Engineering, Aurangabad"
+"Government College of Engineering, Aurangabad [Autonomous"
+"Grad. School at Shenzhen, Tsinghua University"
+"Grad. School of Information Science and Technology, The University of Tokyo, Japan"
+"Graduate Institute of Electronics Engineering, National Taiwan University"
+"Graduate Institute of Networking and Multimedia, National Taiwan University"
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+"Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University"
+"Graduate School of Doshisha University, Kyoto, Japan"
+"Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan"
+"Graduate School of Engineering, Tottori University"
+"Graduate School of Informatics, Kyoto University"
+"Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan"
+"Graduate School of Information Science and Technology, The University of Tokyo"
+"Graduate School of Information Science, Nagoya University, Japan"
+"Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan"
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"Graduate School of Science and Engineering, Saitama University"
+"Graduate School of System Informatics, Kobe University"
+"Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan"
+"Graduate School of Systems and Information Engineering, University of Tsukuba"
+"Graduate University for Advanced Studies, Kanagawa, Japan"
+"Graduate University of CAS, 100190, Beijing, China"
+"Graduate University of Chinese Academy of Sciences(CAS), 100190, China"
+"Graduate University of Chinese Academy of Sciences, Beijing 100049, China"
+"Gravis Research Group, University of Basel"
+Graz University of Technology
+"Graz University of Technology, Austria"
+"Gri th University, QLD-4111, Brisbane, Australia"
+"Grif th University, Australia"
+"Grif th University, QLD, Australia"
+"Grove School of Engineering, CUNY City College, NY, USA"
+Guangdong Medical College
+Guangdong University of Technology
+"Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Gujarat Technological University, India"
+"Gujarat Technological University, V.V.Nagar, India"
+Gyan Ganga Institute of
+"H. He, Honkong Polytechnic University"
+"HAVELSAN Inc., 2Bilkent University, 3Hacettepe University"
+"HCI Lab., Samsung Advanced Institute of Technology, Yongin, Korea"
+HELSINKI UNIVERSITY OF TECHNOLOGY
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+Hacettepe University
+Halmstad University
+Hangzhou Institute of Service
+Hangzhou Normal University
+"Hankuk University of Foreign Studies, South Korea"
+Hanoi University of Science and Technology
+"Hanshan Normal University, Chaozhou, 521041, China"
+Hanyang University
+Harbin Institute of Technology
+"Harbin Institute of Technology (Shenzhen), China"
+"Harbin Institute of Technology, Harbin 150001, China"
+"Harbin Institute of Technology, Harbin, China"
+"Harbin Institute of Technology, School of Computer Science and Technology"
+Harbin Institute of Technology;Shenzhen University
+Harvard University
+Harvard University 2University of Southern California
+"Harvard University 3Perceptive Automata, Inc"
+Harvard University 4Max Planck Institute for Informatics
+"Harvard University, Cambridge, MA"
+"Harvard University, Cambridge, MA 02138, USA"
+"Harvard University, Cambridge, MA, USA"
+"Harvard University, USA"
+Harvard and Massachusetts Institute
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Head and Neck Surgery, Seoul National University"
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Heilongjiang University, College of Computer Science and Technology, China"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Helen Wills Neuroscience Institute, University of"
+"Helsinki Collegium for Advanced Studies, University of Helsinki, Finland"
+"Helsinki Institute for Information Technology, Aalto University, Finland"
+Helsinki University of Technology Laboratory of Computational Engineering Publications
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+"Hengyang Normal University, Hengyang, China"
+Heriot-Watt University
+"Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne"
+High Institute of Medical Technologies
+Hikvision Research Institute
+"Hindusthan College of Engineering and Technology, Coimbatore, India"
+"Hiroshima University, Japan"
+Ho Chi Minh City University of
+Ho Chi Minh City University of Science
+HoHai University
+Honda Fundamental Research Labs
+"Honda RandD Americas, Inc., Boston, MA, USA"
+Honda Research Institute
+Honda Research Institute USA
+Hong Kong Applied Science and Technology Research Institute Company Limited
+"Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China"
+Hong Kong Baptist University
+Hong Kong Polytechnic University
+"Hong Kong Polytechnic University, Hong Kong"
+"Hong Kong Polytechnic University, Hong Kong, China"
+Hong Kong University of Science and Technology
+"Hong Kong University of Science and Technology, Hong Kong"
+Howard Hughes Medical Institute (HHMI
+"Howard University, Washington DC"
+"Hua Zhong University of Science and Technology, Wuhan, China"
+Huazhong Agricultural University
+Huazhong University of
+Huazhong University of Science and Technology
+"Huazhong University of Science and Technology, Wuhan, China"
+"Human Centered Multimedia, Augsburg University, Germany"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+"Human Development and Applied Psychology, University of Toronto, Ontario, Canada"
+"Human Genome Center, Institute of Medical Science"
+Human Interaction Research Lab
+"Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand"
+"Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany"
+"Human Media Interaction, University of Twente, P.O. Box"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+"Humboldt-University, Berlin, Germany"
+"Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China"
+Hunan University
+"IBM Almaden Research Center, San Jose CA"
+IBM China Research Lab
+"IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore"
+IBM T. J. Watson Research Center
+"IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY"
+"IBM T. J. Watson Research Center, Yorktown Heights, NY, USA"
+IBM T.J. Watson Research Center
+"IBM TJ Watson Research Center, USA"
+IBM Thomas J. Watson Research Center
+"IBM Watson Research Center, Armonk, NY, USA"
+ICMC University of S ao Paulo
+ICSI / UC Berkeley 2Brigham Young University
+"ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences"
+IDIAP RESEARCH INSTITUTE
+IDIAP Research Institute
+"IDIAP Research Institute, Martigny, Switzerland"
+"IEEE Member, Shahid Rajaee Teacher training University"
+"IES College of Technology, Bhopal"
+"IHCC, RSCS, CECS, Australian National University"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"IIIS, Tsinghua University"
+"IIIS, Tsinghua University, Beijing, China"
+"IIIT-Delhi, India, 2West Virginia University"
+"IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands"
+IMPERIAL COLLEGE
+"IN3, Open University of"
+"INTELSIG, Monte ore Institute, University of Li`ege, Belgium"
+ISISTAN Research Institute - CONICET - UNICEN
+"ISLA Lab, Informatics Institute"
+"ISLA Lab, Informatics Institute, University of Amsterdam"
+"ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"IT Instituto de Telecomunica es, University of Beira Interior, Covilh , Portugal"
+"IT - Instituto de Telecomunica es, University of Beira Interior"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+"ITCS, Tsinghua University"
+"ITEE, The University of Queensland, Australia"
+"ITIC Research Institute, National University of Cuyo"
+Idiap Research Institute
+Idiap Research Institute and EPF Lausanne
+"Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay"
+"Idiap Research Institute, Martigny, Switzerland"
+"Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France"
+"Idiap Research Institute, Switzerland"
+"Idiap Research Institute. Centre du Parc, Rue Marconi 19, Martigny (VS), Switzerland"
+"Iftm University, Moradabad-244001 U.P"
+Illinois Institute of Technology
+"Illinois Institute of Technology, Chicago, Illinois, USA"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+"Image Processing Center, Beihang University"
+"Image Understanding and Interactive Robotics, Reutlingen University, 72762 Reutlingen, Germany"
+"Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia"
+"Image and Video Research Laboratory, Queensland University of Technology"
+"Imaging Science and Biomedical Engineering, The University of Manchester, UK"
+Imperial College London
+Imperial College London / Twente University
+"Imperial College London, London, UK"
+"Imperial College London, On do"
+"Imperial College London, U.K"
+"Imperial College London, UK"
+"Imperial College London, United Kingdom"
+"Imperial College of Science, Technology and Medicine"
+"Imperial College, 180 Queens Gate"
+"Imperial College, London, UK"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+In the Graduate College
+Inception Institute of Arti cial
+"Inception Institute of Arti cial Intelligence (IIAI), Abu Dhabi, UAE"
+"Inception Institute of Arti cial Intelligence, UAE"
+India
+"Indian Institute of Informaiton Technology, Allahabad, India"
+Indian Institute of Science
+Indian Institute of Science Bangalore
+"Indian Institute of Science, Bangalore"
+"Indian Institute of Science, India"
+Indian Institute of Technology
+"Indian Institute of Technology Delhi, New Delhi, India"
+Indian Institute of Technology Kanpur
+Indian Institute of Technology Kharagpur
+"Indian Institute of Technology Kharagpur, India"
+"Indian Institute of Technology Madras, Chennai 600036, India"
+"Indian Institute of Technology Madras, Chennai, India"
+Indian Institute of Technology Ropar
+"Indian Institute of Technology, Bombay, India"
+"Indian Institute of Technology, Kharagpur"
+"Indian Institute of Technology, Madras"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+"Indian Institute of Technology, Roorkee"
+Indiana University
+Indiana University Bloomington
+"Indra Ganesan College of Engineering, Trichy, India"
+Indraprastha Institute of Information Technology
+"Indraprastha Institute of Information Technology (Delhi, India"
+"Indraprastha Institute of Information Technology, Delhi"
+Informatics Institute
+Informatics and Telematics Institute
+"Informatics and Telematics Institute, Centre for Research and Technology Hellas"
+"Informatics and Telematics Institute, Centre of Research and Technology - Hellas"
+"Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"Information Sciences Institute and Computer Science, University of Southern California"
+"Information Sciences Institute, USC, CA, USA"
+"Information Systems Design, Doshisha University, Kyoto, Japan"
+"Information Systems, University of Wisconsin-River Falls, Wisconsin, WI, United States of America"
+Information Technologies Institute
+"Information Technology University (ITU), Punjab, Lahore, Pakistan"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+"Information and Media Processing Research Laboratories, NEC Corporation"
+"Information, Keio University"
+"Informatization Office, National University of Defense Technology, Changsha 410073, China"
+"Innopolis University, Kazan, Russia"
+"Inst. Neural Computation, University of California"
+"Institiude of Computer Science and Technology, Peking University"
+Institute
+"Institute AIFB, Karlsruhe Institute of Technology, Germany"
+"Institute Polythechnic of Leiria, Portugal"
+"Institute for Adaptive and Neural Computation, University of Edinburgh, Edinburgh, UK"
+Institute for Advanced
+Institute for Advanced Computer Studies
+"Institute for Advanced Computer Studies, University of Maryland, College Park, MD"
+Institute for Anthropomatics
+"Institute for Arts, Science and Technology"
+Institute for Communication Systems
+Institute for Complex
+Institute for Computer Graphics and Vision
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+Institute for Creative Technologies
+"Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States"
+"Institute for Electronics, Signal Processing and Communications"
+"Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn"
+Institute for Human-Machine
+Institute for Human-Machine Communication
+"Institute for Human-Machine Communication, Technische Universit at M unchen"
+"Institute for Human-Machine Communication, Technische Universit at M unchen, Germany"
+Institute for Infocomm Research
+"Institute for Infocomm Research (I2R), A*STAR, Singapore"
+"Institute for Infocomm Research, A*STAR"
+"Institute for Infocomm Research, A*STAR, Singapore"
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"Institute for Infocomm Research, Singapore"
+Institute for Information Systems Engineering
+"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"Institute for Language, Cognition and Computation"
+Institute for Media Technology
+"Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+Institute for Neural Computation
+"Institute for Neural Computation, University of California, San Diego"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+Institute for Numerical Mathematics
+"Institute for Optical Systems, HTWG Konstanz, Germany"
+"Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan"
+Institute for Robotics and Intelligent
+Institute for Robotics and Intelligent Systems
+"Institute for Robotics and Intelligent Systems, USC, CA, USA"
+"Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA"
+"Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran"
+Institute for System Programming
+Institute for Vision Systems Engineering
+"Institute for Vision and Graphics, University of Siegen, Germany"
+Institute for studies in theoretical Physics and Mathematics(IPM
+Institute of
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+Institute of Arti cial Intelligence and Cognitive Engineering
+"Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen"
+"Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an 710049, China"
+"Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an, Shannxi 710049, China"
+Institute of Automatic Control
+Institute of Automatic Control Engineering (LSR
+Institute of Automation
+"Institute of Automation Chinese Academy of Sciences, Beijing, China"
+"Institute of Automation, Chinese Academy of"
+"Institute of Automation, Chinese Academy of Sciences"
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+"Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Automation, Chinese Academy of Sciences, Beijing, P. R. China"
+"Institute of Automation, Chinese Academy of Sciences, China"
+"Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School"
+"Institute of Biochemistry, University of Balochistan, Quetta"
+"Institute of Child Health, University College London, UK"
+"Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China"
+"Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain"
+"Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social"
+Institute of Communications Engineering
+Institute of Computer Science
+Institute of Computer Science III
+Institute of Computer Science and
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Institute of Computer Science and Technology, Peking University"
+"Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece"
+"Institute of Computer science, Shahid Bahonar University"
+Institute of Computing
+Institute of Computing Technology
+"Institute of Computing Technology, CAS"
+"Institute of Computing Technology, CAS, Beijing 100190, China"
+"Institute of Computing Technology, CAS, Beijing, 100190, China"
+"Institute of Computing Technology, Chinese Academy of Sciences"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"Institute of Data Science and Technology, Alibaba Group"
+Institute of Deep Learning
+"Institute of Deep Learning, Baidu Research"
+Institute of Digital Media
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+Institute of Electrical and Electronics Engineers
+Institute of Electrical and Electronics Engineers (IEEE). DOI
+Institute of Electronics and Computer Science
+"Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj"
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+Institute of Graduate Studies and Research
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China"
+Institute of Industrial Science
+"Institute of Industrial Science, The University of Tokyo"
+Institute of Informatics - ISLA
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+Institute of Information Science
+"Institute of Information Science and Technologies of CNR (CNR-ISTI)-Italy, 56124 Pisa, Italy"
+"Institute of Information Science, Academia Sinica, Taipei"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, China"
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+Institute of Information Technology
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+Institute of Mathematics and Statistics
+Institute of Media Innovation
+"Institute of Media and Information Technology, Chiba University"
+"Institute of Mental Health, Peking University, P.R. China"
+"Institute of Mental Health, The University of Nottingham"
+"Institute of Microelectronics, Tsinghua University, Beijing 100084, China"
+"Institute of Neural Information Processing, Ulm University, Germany"
+"Institute of Neural Information Processing, Ulm University, Ulm, Germany"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+Institute of Psychology and Behavioral Sciences
+"Institute of Psychology, Chinese"
+"Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland"
+Institute of Road and
+"Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan"
+"Institute of Software, Chinese Academy of Sciences"
+"Institute of Software, Chinese Academy of Sciences (CAS"
+"Institute of Software, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+Institute of Systems and Robotics
+"Institute of Systems and Robotics - University of Coimbra, Portugal"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"Institute of Technology, Banaras Hindu"
+"Institute of Telecommunications, TU Wien"
+"Institute of Transportation Systems, German Aerospace Center (DLR), Braunschweig"
+Institute of control science and engineering
+"Institute ofInformation Science, Academia Sinica, Taipei, Taiwan"
+"Institute, CAS, China"
+"Integrated Research Center, Universit`a Campus Bio-Medico di Roma"
+"Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA"
+Intelligence Computing Research Center
+"Intelligence, Concordia University, Montreal"
+"Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching"
+"Intelligent Behaviour Understanding Group, Imperial College London, London, UK"
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+"Intelligent Recognition and Image Processing Lab, Beihang University, Beijing"
+"Intelligent Sensory Interactive Systems, University of Amsterdam, Netherlands"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands"
+"Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden"
+"Intelligent Systems Laboratory, University of Bristol, Bristol BS8 1UB, UK"
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Intelligent and Interactive Systems, Institute of Computer Science, University of"
+Interactive and Digital Media Institute
+"Interactive and Digital Media Institute, National University of Singapore, SG"
+"Interactive and Digital Media Institute, National University of Singapore, Singapore"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+"Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea"
+International Institute of Information Technology
+"International Institute of Information Technology, Hyderabad, India"
+"International Islamic University, Islamabad 44000, Pakistan"
+International University of
+Ionian University
+Iran
+Islamic Azad University
+Islamic Azad University of AHAR
+"Islamic Azad University, Gonabad, Iran"
+"Islamic Azad University, Mashhad Branch, Mashhad, Iran"
+"Islamic Azad University, Qazvin, Iran"
+"Islamic Azad University, Science and Research Campus"
+"Islamic Azad University, Shahrood, Iran"
+Islamic University of Gaza - Palestine
+"IslamicAzad University, Qazvin, Iran"
+Istanbul Bilgi University - DCE
+Istanbul Technical University
+"Istanbul Technical University, Istanbul, 34469, TURKEY"
+"Istanbul Technical University, Istanbul, Turkey"
+"Istanbul Technical University, Turkey"
+Istanbul University
+IstanbulTechnicalUniversity
+"Italian Institute of Technology, 5Mapillary Research"
+"J. P. College of Engineering, India"
+"JACOB GOLDBERGER, Bar-Ilan University"
+"JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China"
+Jacobs University
+Jadavpur University
+"Jadavpur University, India"
+Jahangirnagar University
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh"
+"Jaipur, Rajasthan, India"
+"Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United"
+Japan
+Japan Advanced Institute of Science and Technology
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+"Jawaharlal Technological University, Anantapur"
+"Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA"
+"Jiangnan University, Wuxi"
+"Jilin University, Changchun 130012, China"
+"Jo ef Stefan Institute, Jamova 39, 1000 Ljubljana, Slovenia"
+"Johannes Kepler University(cid:1) Institute of Systems Science(cid:1) A(cid:2)
+Johns Hopkins University
+"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"Johns Hopkins University, Baltimore, MD, 21218, USA"
+"Johns Hopkins University, Center for Speech and Language Processing"
+"Joint Research Institute, Foshan, China"
+K S Rangasamy College of Technology
+K. N. Toosi University of
+"K.D.K. College of Engineering Nagpur, India"
+"K.K Wagh Institute of Engineering and Education Research, Nashik, India"
+"K.N. Toosi University of Technology, Tehran, Iran"
+"K.S.R. College Of Engineering, Tiruchengode, India"
+"K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"KTH Royal Institute of Technology, CVAP Lab, Stockholm, Sweden"
+"KTH Royal Institute of Technology, Stockholm"
+KTH Royal Institute of Technology
+"KTH, Royal Institute of Technology"
+"KU Phonetics and Psycholinguistics Lab, University of Kansas"
+Karlsruhe Institute of
+Karlsruhe Institute of Technology
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany"
+"Karlsruhe Institute of Technology, Germany"
+"Karlsruhe Institute of Technology, Karlsruhe, Germany"
+"Karlsruhe Institute of Technology, P.O. Box 3640, 76021 Karlsruhe, Germany"
+Katholieke Universiteit Leuven
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+Keele University
+"Keio University, Yokohama 223-8522, Japan"
+Kent State University
+"Kent State University, Kent, Ohio, USA"
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of"
+"Key Lab of Intelligent Information Processing, Institute of Computing Technology"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"Key Laboratory of Behavior Sciences, Institute of Psychology"
+"Key Laboratory of Computer Network and Information Integration of Ministry of Education, Southeast University, Nanjing"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+"Key Laboratory of MOEMS of the Ministry of Education, Tianjin University, 300072, China"
+"Key Laboratory of Machine Perception (MOE), School of EECS, Peking University"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"Key Laboratory of Specialty Fiber Optics and Optical Access Networks, Shanghai University"
+"Key Laboratory of Transient Optics and Photonics, Xi an Institute of Optics and Precision Mechanics, Chi"
+"Khalifa University, Abu Dhabi, United Arab Emirates"
+Khulna University of Engineering and Technology
+"King Abdullah University of Science and Technology (KAUST), Saudi Arabia"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia"
+King Faisal University
+"King Saud University, KSA"
+"King Saud University, P.O. Box 51178, Riyadh 11543, Saudi Arabia"
+"King Saud University, Riyadh"
+"King Saud University, Riyadh 11543, Saudi Arabia"
+"King Saud University, Riyadh, Saudi Arabia"
+"King s College London, UK"
+Kingston University
+"Kingston University London, University of Westminster London"
+"Kingston University, UK"
+"Kitware, Inc"
+Kobe University
+"Kobe University, NICT and University of Siegen"
+"Kodak Research Laboratories, Rochester, NY"
+"Kodak Research Laboratories, Rochester, New York"
+"Kogakuin University, Tokyo, Japan"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+"Kongju National University, South Korea"
+Korea Advanced Institute of Science and Technology
+Korea Advanced Institute of Science and Technology (KAIST
+Korea Advanced institute of Science and Technology
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of Korea; E"
+Korea University
+"Korea University, Seoul 136-713, Korea"
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+"Kota University, Kota(INDIA"
+"Kulhare, Sourabh, ""Deep Learning for Semantic Video Understanding"" (2017). Thesis. Rochester Institute of Technology. Accessed"
+"Kumamoto University, 2-39-1 Kurokami, Kumamoto shi"
+"Kurukshetra University, Kurukshetra"
+"Kurukshetra University, Kurukshetra, India"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+"Kwangwoon University, 447-1 Wolge-dong, Nowon-Gu, Seoul 139-701, Korea"
+"Kyoto University, Kyoto, Japan"
+Kyung Hee University
+Kyung Hee University South of Korea
+"Kyung Hee University, South Korea"
+"Kyung Hee University, Yongin, Rep. of Korea"
+Kyushu University
+"L3S Research Center, Hannover, Germany"
+"L3S Research Center, Leibniz Universit at Hannover, Germany"
+"LCSEE, West Virginia University"
+"LIACS Media Lab, Leiden University, The Netherlands"
+"LIMSI, CNRS, University of Paris-Sud, Orsay, France"
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"LIUM Laboratory, Le Mans, France, 2 Idiap Research Institute, Martigny, Switzerland"
+"La Trobe University, Australia"
+"Lab of Science and Technology, Southeast University, Nanjing 210096, China"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan"
+"Laboratory of Pattern Recognition, Institute of Automation, CAS"
+"Laboratory, University of Houston, Houston, TX, USA"
+Lafayette College
+Lake Forest College
+Lake Forest College Publications
+Language Technologies Institute
+"Language Technologies Institute, Carnegie Mellon University"
+"Language Technologies Institute, Carnegie Mellon University, PA, USA"
+"Language Technologies Institute, School of Computer Science"
+Language Technology Institute
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"Language Technology Institute, Carnegie Mellon Universty"
+"Language Technology Lab, University of Duisburg-Essen"
+"Language and Brain Lab, Simon Fraser University, Canada"
+"Laval University, Qu bec, Canada"
+"Learning Systems Group, California Institute of Technology"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+Leeds Beckett University
+"Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands"
+"Leiden University, Netherlands"
+"Leiden, the Netherlands, 3 Delft University of Technology"
+"Lille 1 University, France"
+"Link oping University, Computer Vision Laboratory"
+"Link oping University, SE-581 83 Link oping, Sweden"
+Link ping University
+Link to publication from Aalborg University
+Link to publication in University of Groningen/UMCG research database
+Link to publication record in Queen's University Belfast Research Portal
+"Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health"
+"Lite-On Singapore Pte. Ltd, 2Imperial College London"
+Liverpool John Moores University
+Lomonosov Moscow State University
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+"Lomonosov Moscow State University, Faculty of Computational Mathematics and Cybernetics"
+"Lotus Hill Institute for Computer Vision and Information Science, 436000, China"
+Louisiana State University
+"Lund University, Cognimatics AB"
+"Lund University, Lund, Sweden"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India"
+M. Mark Everingham University of Leeds
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"M.P.M. College, Bhopal, India"
+"M.S. (University of California, Berkeley"
+M.S. Brunel University of West London
+M.S. University of Central Florida
+"M.S., Electrical and Computer Engineering, Carnegie Mellon University"
+"M.S., University of Memphis"
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+"M.Tech Student, Mount Zion College of Engineering, Pathanamthitta, Kerala, India"
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli"
+"M.tech.student, Arya College of"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT
+"MATS University, MATS School of Engineering and Technology, Arang, Raipur, India"
+"MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry"
+"MES College of Engineering, Kuttippuram"
+METs Institute of Engineering
+MICC University of Florence
+MICC - University of Florence
+"MICC, University of Florence"
+"MILA-University of Montreal, 2NVIDIA, 3Ecole Polytechnique of Montreal, 4CIFAR, 5Facebook AI Research"
+"MIRA Institute, University of Twente, Enschede, The"
+"MIRACL-FS, University of Sfax"
+"MIRACL-FSEG, University of Sfax"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+MIT College of Engineering (Pune University
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+"MOE Key Laboratory of Computer Network and Information Integration, Southeast University, China"
+"MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"MTech Student 1, 2, Disha Institute of"
+MULTIMEDIA UNIVERSITY
+Macau University of Science and
+Macau University of Science and Technology
+"Macau University of Science and Technology, Macau"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"Machine Perception Laboratory, University of California, San Diego"
+"Machine Vision Group, P.O. Box 4500, FI-90014, University of Oulu, Finland"
+"Machine Vision Lab, Faculty of Environment and Technology, University of the West of England"
+"Mackenzie Presbyterian University, S o Paulo, S o Paulo, Brazil"
+"Madanapalle Institute of Technology and Science, Madanapalle, Andhra Pradesh"
+Mahanakorn University of Technology
+Mahatma Gandhi Institute of Technology
+Malaviya National Institute of Technology
+"Mancha, Spain, Imperial College, London, UK"
+"Manchester University, UK"
+"Mangalore Institute of Engineering and Technology, Badaga"
+"Mangalore Institute of Technology and Engineering, Moodabidri, Mangalore, India"
+Manipur Institute of Technology
+"Manonmaniam Sundaranar University, India"
+"Manonmaniam Sundaranar University, Tirunelveli"
+"Manonmaniam Sundaranar University, Tirunelveli, India"
+Mans eld College
+"Marine Institute, via Torre Bianca, 98164 Messina Italy"
+Marquette University
+Massachusettes Institute of Technology
+Massachusetts Institute
+Massachusetts Institute of Technology
+Massachusetts Institute of Technology (MIT
+Massachusetts Institute of Technology 2013. All rights reserved
+Massachusetts Institute of Technology 2014. All rights reserved
+Massachusetts Institute of Technology Rapporteur
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA"
+"Massachusetts Institute of Technology, Cambridge, MA USA"
+"Master of Computer Engg, Savitribai Phule Pune University, G. H. Raisoni Collage of Engg and Technology, Wagholi, Pune"
+Math Institute
+Max Planck Institute f ur biologische Kybernetik
+Max Planck Institute for Biological Cybernetics
+"Max Planck Institute for Biological Cybernetics, Spemannstr. 38, 72076 T bingen, Germany"
+"Max Planck Institute for Evolutionary Anthropology, Germany"
+Max Planck Institute for Informatics
+"Max Planck Institute for Informatics, Germany"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+"Max Planck Institute for Informatics, Saarland Informatics Campus"
+"Max Planck Institute for Informatics, Saarland Informatics Campus, Germany"
+"Max Planck Institute for Informatics, Saarland Informatics Campus, Saarbr cken, Germany"
+"Max Planck Institute for Intelligent Systems, T ubingen, Germany"
+Max-Planck Institute for Informatics
+Max-Planck-Institute for Informatics
+McGill University
+"McGill University, Montreal, Canada"
+McGovern Institute
+McGovern Institute for Brain Research
+McMaster University
+"Medical Image Analysis Lab, School of Computing Science, Simon Fraser University, Canada"
+"Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular"
+"Medical School, University of Ioannina, Ioannina, Greece"
+Meiji University
+"Melbourne University, Advisors: K. Borovkov, R. Evans"
+"Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master"
+"Metron, Inc"
+Michigan State University
+"Michigan State University, 3115 Engineering Building"
+"Michigan State University, E. Lansing, MI 48823, USA"
+"Michigan State University, East Lansing MI"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Michigan State University, East Lansing, MI 48824, USA"
+"Michigan State University, East Lansing, MI, U.S.A"
+"Michigan State University, East Lansing, MI, USA"
+"Michigan State University, East Lansing, USA"
+"Michigan State University, MI"
+"Michigan State University, NEC Laboratories America"
+"Michigan State University, USA"
+"Microsystems Design Lab, The Pennsylvania State University"
+Middle East Technical University
+Middlebury College
+Middlesex University London
+"Middlesex University London, 4International Hellenic University"
+"Middlesex University London, London, UK"
+"Middlesex University London, UK"
+"Middlesex University, London"
+Mihaylo College of Business and Economics
+"Minia University, Egypt"
+Ministry of Higher Education and Scientific Research / The University of Mustsnsiriyah/Baghdad IRAQ
+Mitsubishi Electric Research Laboratory
+Mitsubishi Electric Research Labs
+Mitsubishi Electric Research Labs (MERL
+"Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+Monash University
+"Monash University Malaysia, School of Information Technology, Sunway"
+"Monash University, Australia"
+"Monash University, Victoria, Australia"
+"Montefiore Institute, University of Li ge, 4000 Li ge, Belgium"
+Montreal Institute for Learning Algorithms
+"Montreal Institute for Learning Algorithms, Universit e de Montr eal"
+Moradabad Institute of Technology
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+"Moscow Institute of Physics and Technology, Russia"
+"Moscow State University, dept. of Computational Mathematics and Cybernetics"
+"Most of the earlier studies mentioned above, including ours"
+"Motorola China Research Center, Shanghai, 210000, P.R.China"
+"Motorola, Inc"
+"Much is known on how facial expressions of emotion are produced, including which individual muscles are most active in"
+Muhlenberg College
+Multimedia Laboratory at The Chinese University of Hong Kong
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+Multimedia University
+"Multimedia University (MMU), Cyberjaya, Malaysia"
+"Multimedia University, Cyberjaya, Malaysia"
+"Multimedia University, Faculty of Computing and Informatics, Cyberjaya"
+"Multimedia University, Faculty of Engineering, Cyberjaya, 63100 Selangor, Malaysia"
+"Multimedia University, Research Institute for Digital Security, Cyberjaya"
+"Multimedia, Vision and Graphics Laboratory, Koc University, Istanbul, Turkey"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+Murdoch University
+Myongji University
+"Myongji University, Yongin, 449-728 South"
+"NEC Laboratories America, Inc"
+"NEC Laboratories America, Inc., Cupertino, CA"
+"NICTA , Queensland Research Laboratory, QLD, Australia"
+"NICTA, and Australian National University"
+NSS College of Engineering
+"Nagaoka University of Technology, Japan"
+"Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan"
+Nagoya University
+"Nagoya University, Japan"
+"Najafabad Branch, Islamic Azad University"
+Nam k Kemal University
+"Nam k Kemal University, Tekirda g, Turkey"
+"Nancy E. and Peter C. Meinig School of Biomedical Engineering, Cornell University, Ithaca NY"
+"Nanjing University of Aeronautics and Astronautics, China"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China"
+Nanjing University of Information Science and Technology
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+Nanjing University of Science and
+Nanjing University of Science and Technology
+"Nanjing University of Science and Technology, China"
+"Nanjing University, China"
+"Nanjing University, Nanjing 210023, China"
+"Nanjing University, Nanjing 210093, China"
+"Nanjing University, Nanjing 210093, P.R.China"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"Nanjing, 210094, China, 3 School of Automation, Nanjing University of Posts and Telecommunications"
+Nanyang Technological University
+"Nanyang Technological University, 2University of California San Diego"
+"Nanyang Technological University, Singapore"
+"Nanyang Technological University, Singapore 639798, Singapore"
+"Nanyang Technological University, Singapore, Singapore"
+"Narayana Pharmacy College, Nellore, India"
+National Cheng Kung University
+"National Cheng Kung University, Tainan, Taiwan"
+"National Cheng Kung University, Tainan, Taiwan, R.O.C"
+"National Cheng Kung University, Tainan, Taiwan, ROC"
+"National Chiao Tung University, Taiwan"
+National Chiao-Tung University
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"National Demonstration Center for Experimental Electrical and Electronic Education, Yangtze University"
+"National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"
+"National Formosa University, Taiwan"
+National Institute of Advanced Industrial
+National Institute of Advanced Industrial Science and Technology
+National Institute of Advanced Industrial Science and Technology (AIST
+"National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan"
+National Institute of Development Administration
+National Institute of Informatics
+"National Institute of Informatics, Japan"
+"National Institute of Informatics, Tokyo, Japan"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+National Institute of Standards and Technology
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA"
+National Institute of Technology
+National Institute of Technology Karnataka
+National Institute of Technology Rourkela
+"National Institute of Technology, Durgapur, West Bengal, India"
+"National Institute of Technology, Toyota College, Japan"
+National Institutes of Health
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"National Key Laboratory for Novel Software Technology, Nanjing University, China"
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China"
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, 210023, China"
+"National Lab of Pattern Recognition, Institute of Automation"
+"National Laboratory for Parallel and Distributed Processing, National University of Defense Technology, Changsha, China"
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"National Laboratory of Pattern Recognition Institute of Automation, Chinese Academy of Sciences"
+"National Laboratory of Pattern Recognition, Institute of Automation"
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, P. R. China"
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce"
+National Research University Higher
+National Research University Higher School of Economics
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan"
+"National Taichung University of Science and Technology, Taichung, Taiwan, R.O.C"
+National Taipei University
+National Taiwan University
+National Taiwan University of Science and
+National Taiwan University of Science and Technology
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan"
+"National Taiwan University, Taipei, Taiwan"
+"National Taiwan University, Taiwan"
+National Technical University of Athens
+"National Technical University of Athens, 15780 Athens, Greece"
+"National Tsing Hua University, 101 Kuang Fu Road, Section 2, Hsinchu 300, Taiwan"
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+"National Tsing Hua University, Taiwan"
+"National Tsing-Hua University, Hsin-Chu, Taiwan"
+National University
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+National University of Defense
+National University of Defense Technology
+"National University of Defense Technology, Changsha 410073, China"
+"National University of Defense Technology, Changsha, China"
+"National University of Ireland Maynooth, Co. Kildare, Ireland"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan"
+National University of Science and Technology
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan"
+National University of Singapore
+"National University of Singapore Research Institute, Suzhou, China"
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"National University of Singapore, Singapore"
+National University of Technology Technology
+National University of singapore
+"Netherlands, Donders Institute, Radboud University, Nijmegen, The"
+"Netherlands, Utrecht University, Utrecht, The Netherlands"
+"Neurological Institute, USA"
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+New Jersey Institute of Technology
+"New Jersey Institute of Technology, USA"
+New York University
+"New York University Shanghai, 1555 Century Ave, Pudong"
+"New York University, Brooklyn, NY, USA"
+"Newcastle University, Newcastle upon Tyne"
+"Newcastle University, UK"
+Ningxia University
+No Institute Given
+Nokia Bell Labs and University of Oxford
+"Nokia Research Center, Tampere, Finland"
+"Normal University, Kunming, China"
+North Carolina AandT State University
+North Carolina Central University
+"North Carolina State University, Raleigh, NC, USA"
+"North Carolina State University, Raleigh, USA"
+"North China Electric Power University, Baoding, China"
+North China University of Technology
+"North China University of Technology, Beijing 100144 CHINA"
+"North Dakota State University, Fargo, ND 58108-6050, USA"
+"North Dakota State University, Fargo, ND58105, USA"
+Northeastern University
+Northeastern University 2Microsoft Research 3City University of New York
+"Northeastern University, Boston, MA"
+"Northeastern University, Boston, MA, USA"
+"Northeastern University, Boston, USA"
+"Northeastern University, MA, USA"
+Northumbria University
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK"
+Northwestern Polytechnical University
+Northwestern University
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+Nottingham Trent University
+"Nottingham Trent University, Nottingham, UK"
+"Nottingham University Hospital, Nottingham, UK"
+Nqtional Institute of Standards and Technology
+"Numediart Institute, University of Mons"
+OF PRINCETON UNIVERSITY
+OF STANFORD UNIVERSITY
+Oakland University
+Odaiyappa College of
+Okayama University
+"Open Lab, School of Computing, Newcastle University, UK"
+"Optics and Engineering Informatics, Budapest University of Technology and Economics"
+Opus College of Engineering
+Oregon State University
+"Organization, University of Twente, Enschede, The Netherlands, HAN"
+"Osaka University, 1-5 Yamadaoka, Suita-shi, Osaka, Japan"
+"Other uses, including reproduction and distribution, or selling or"
+Otto von Guericke University
+Otto-von-Guericke University Magdeburg
+Oxford Brookes University
+"Oxford Brookes University, Oxford, United Kingdom"
+"Oxford Brookes University, UK"
+Oxford University
+"Oxford University, UK"
+"Ozye gin University, Istanbul, Turkey"
+"P A College of Engineering, Nadupadavu"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+"P. R. Patil College of Engineering, Amravati Maharashtra India"
+"P.A. College of Engnineering, Mangalore"
+"P.G. Student, SRV Engineering College, sembodai, India"
+"P.S.R Engineering College, Sivakasi, Tamilnadu, India"
+"PES Institute of Technology, Bangalore, Karnataka, India"
+PES Modern College of Engg
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"PSG College of Technology, Coimbatore, Tamil Nadu, India"
+"PSGR Krishnammal College for Women, Coimbatore"
+Palo Alto Research Center (PARC
+"PanimalarInstitute of Technology, Tamilnadu, India"
+"Paran a Federal University, Curitiba, Brazil"
+Parisutham Institute of Technology and Science
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"Pattern Recognition Group, University of Siegen"
+"Pattern Recognition and Bio-informatics Laboratory, Delft University of Technology, THE NETHERLANDS"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology, The Netherlands"
+"Paul G. Allen School of Computer Science and Engineering, University of Washington"
+Peking University
+"Peking University, Beijing, China"
+"Peking University, China"
+Pennsylvania
+Pennsylvania State University
+"Perceptive Automata, Inc"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"Ph.D student Zaid Shhedi, Doctoral School of Automatic Control and Computers, University"
+"Phiar Technologies, Inc"
+"Physical Sciences, University"
+Plymouth University
+Plymouth University - CRNS
+Pohang University of Science and Technology
+Politehnica University of Timisoara
+"Polytechnic Institute of NYU, NY, USA"
+Polytechnic University of Bucharest
+"Polytechnic University of Catalonia, Barcelona, 4National Taiwan University, Taipei, 5University of"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"Pompeu Fabra University, Spain"
+Pondicherry Engineering College
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+Portland State University
+"Portland State University, USA"
+Portugal
+Poznan University of Technology
+"Prince of Songkla University, Hat Yai, Songkhla, 90112 Thailand"
+Princeton University
+"Princeton University, Princeton, NJ, USA"
+"Princeton University, Princeton, New Jersey, USA"
+"Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+"Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India"
+"Priyadarshini College of Engg, Nagpur, India"
+Processing (pp. 1477-1481). [978-1-5090-4117-6/17] Institute of Electrical and Electronics Engineers (IEEE
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Proto Labs, Inc"
+Psychiatry at the University of Pittsburgh
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Psychology and Psychiatry, University of Pittsburgh, USA"
+"Psychology, American University"
+"Psychology, University of"
+"Psychology, University of Illinois, Beckman Institute, Urbana-Champaign, Illinois 61801, University of"
+"Psychonomic Society, Inc"
+"Psychopharmacology Unit, Educational and Health Psychology, University College"
+"Public University of Navarra, Spain"
+"Publication details, including instructions for authors and subscription"
+"Publication details, including instructions for authors and subscription information"
+"Pune Institute of Computer Technology, Pune, ( India"
+"Pune Institute of Computer Technology, Pune, India"
+Punjabi University Patiala
+Purdue Institute for Integrative Neuroscience
+Purdue University
+"Purdue University, 2Nanjing University"
+"Purdue University, West Lafayette, IN 47907, USA"
+"Purdue University, West Lafayette, IN, USA"
+"Purdue University, West Lafayette, IN. 47907, USA"
+"Purdue University, West Lafayette, Indiana, 47906, USA"
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"QCIS, University of Technology Sydney, Sydney, Australia"
+"QCIS, University of Technology, Sydney"
+"Qatar Computing Research Institute, HBKU"
+"Qatar University, Qatar"
+Qihoo 360 AI Institute
+"Qihoo 360 AI Institute, Beijing, China"
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"Quantitative Employee unit, Finnish Institute of Occupational Health"
+"Queen Mary College, London"
+Queen Mary University
+Queen Mary University of London
+"Queen Mary University of London, London E1 4NS, UK"
+"Queen Mary University of London, UK"
+"Queen Mary, University of London"
+"Queen Mary, University of London, E1 4NS, UK"
+"Queen Mary, University of London, London E1 4NS, UK"
+Queen s University Belfast
+Queen's University Belfast - Research Portal
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+Queensland University of Technology
+Queensland University of Technology (QUT
+Queensland University of Technology(QUT
+"Queensland University of Technology, Australia"
+"Queensland University of Technology, Brisbane, QLD, Australia"
+"Queensland University of Technology, Brisbane, Queensland, Australia"
+"R. Campellone, 3210 Tolman Hall, University of California, Berkeley"
+"R.C.Patel Institute of Technology, Shirpur, Dist.Dhule.Maharashtra, India"
+"RCC Institute of Information Technology, Kolkata, India"
+"RGPV University, Indore"
+"RIEB, Kobe University, Kobe, 657-8501, Japan"
+"RMIT University, Australia"
+"RTM Nagpur University, Campus Nagpur, (MS)-India"
+"RTMNU Nagpur University, India"
+RWTH Aachen University
+"RWTH Aachen University, Aachen, Germany"
+"RWTH Aachen University, Germany"
+"Rachel Merchak, Wittenberg University"
+"Ragon Institute of MGH, MIT and Harvard"
+Raipur institute of technology
+"Rayalaseema University Kurnool, Andhra Pradesh"
+"Recanati Genetic Institute, Rabin Medical Center and Schneider Children s Medical Center, Petah Tikva, Israel"
+"Recognition, Institute of Automation"
+"Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+"Remote Sensing Unit Optics, Optometry and Vision Sciences Group, University of Beira Interior"
+Renmin University of China
+Rensselaer Polytechnic Institute
+"Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, NY 12180 USA"
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA"
+"Rensselaer Polytechnic Institute, USA"
+Research Center
+Research Center E. Piaggio
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"Research Center CENTIA, Electronics and Mechatronics"
+Research Center and Laboratoire
+"Research Center for Cognitive and Behavioral Sciences, Tehran University of Medical Sciences, Tehran, Iran"
+Research Center for Information
+"Research Center for Information Technology Innovation, Academia Sinica"
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Research Center for Intelligent Security Technology, CIGIT"
+"Research Center for Learning Science, Southeast University, China"
+"Research Center for Learning Science, Southeast University, Nanjing 210096, China"
+"Research Center for Learning Science, Southeast University, Nanjing, China"
+"Research Center in Information Technologies, Universit e de Mons, Belgium"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea"
+"Research Institute, Watchdata Inc., Beijing, China"
+"Research Reports of CMP, Czech Technical University in Prague, No"
+"Research Scholar (M.Tech, IT), Institute of Engineering and Technology"
+"Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India"
+"Research Scholar, PSGR Krishnammal College for Women, Coimbatore"
+"Research School of Computer Science, The Australian National University, ACT 2601, Australia"
+"Research School of Engineering, The Australian National University, ACT 2601, Australia"
+"Research University, ENS/INRIA/CNRS UMR 8548, Paris, France"
+Reutlingen University
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany"
+Rice University
+"Rice University, Houston, TX, 77005, USA"
+"Rio de Janeiro State University, Brazil"
+"Ritsumeikan University, Japan"
+"Ritsumeikan University, Kyoto, Japan"
+"Ritsumeikan, University"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Robotic Research Centre, Nanyang Technological University, Singapore 639798, Singapore"
+Robotics Institute
+"Robotics Institute, Carnegie Mellon University"
+"Robotics Institute, Carnegie Mellon University 3University of Pittsburgh, USA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Robotics Institute, Carnegie Mellon University, USA"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"Rochester Human-Computer Interaction (ROC HCI), University of Rochester, NY"
+Rochester Institute of Technology
+"Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA"
+"Rochester Institute of Technology, Rochester, NY"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+Rowan University
+Rowland Institute
+Rowland Institute at Harvard
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+Ruhr University Bochum
+"Ruhr-University Bochum, Germany"
+Rutgers University
+"Rutgers University Newark, 101 Warren St., Newark, NJ, 07102 USA"
+"Rutgers University, 94 Brett Rd, Piscataway Township, NJ 08854, USA"
+"Rutgers University, Computer and Information Sciences, 110 Frelinghuysen Road, Piscataway, NJ"
+"Rutgers University, NJ, USA"
+"Rutgers University, New Brunswick, NJ"
+"Rutgers University, Newark, NJ, USA"
+"Rutgers University, Piscataway NJ 08854, USA"
+"Rutgers University, Piscataway, NJ"
+"Rutgers University, Piscataway, NJ 08854, USA"
+"Rutgers University, USA"
+"Rutgers, The State University of New Jersey"
+"Rutgers, The State University of New Jersey, 508 CoRE, 94 Brett Rd, Piscataway, NJ"
+"Rutgers, The State University of New Jersey, 723 CoRE, 94 Brett Rd, Piscataway, NJ"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"Ryerson University, Canada"
+"Ryerson University, Toronto, Canada"
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India"
+SAMSI and Duke University
+"SASTRA University, Thanjavur, Tamil Nadu, India"
+"SBK Women s University, Quetta, Balochistan"
+"SHIRI AZENKOT, Information Science, Cornell Tech, Cornell University"
+"SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University"
+SIMON FRASER UNIVERSITY
+"SRI International, Menlo Park California / *Brooklyn College, Brooklyn New York"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"SRV Engineering College, sembodai, india"
+"SSESA, Science College, Congress Nagar, Nagpur, (MS)-India"
+"SSN College of Engineering, Chennai, India"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+SUS college of Engineering and Technology
+SWPS University of Social Sciences
+SWPS University of Social Sciences and Humanities
+Saarland University
+"Saarland University, Saarbr cken, Germany, 2 Utrecht University, Utrecht, the Netherlands"
+Sabanc University
+Sabanci University
+"Sabanci University, Istanbul, Turkey"
+"Sackler Faculty of Medicine, Tel Aviv University, Tel Aviv, Israel"
+Sakarya University
+"Salgado de Oliveira University, Brazil"
+Samsung Advanced Institute of Technology
+"Samsung Advanced Institute of Technology (SAIT), KAIST"
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+"Samsung Telecommunication Research Institute, Beijing, China"
+Samsung-PDMI Joint AI Center
+"San Jose State University, San Jose, CA"
+Sanghvi Institute of Management and Science
+"Santa Clara University, Santa Clara, CA. 95053, USA"
+Santa Fe Institute
+"Sapienza University of Rome, 2Fondazione Bruno Kessler, 3University of Trento"
+"Sapienza University of Rome, Italy"
+Sarhad University of Science and Information Technology
+"Sathyabama University Old Mamallapuram Road, Chennai, India"
+"Sathyabama University, Chennai, India"
+"Savitri Bai Phule Pune University, Maharashtra India"
+Savitribai Phule Pune University
+"Sch l of EECS, Peking University, Beijing, 100871, China"
+"School of Advanced Technologies in Medicine, Tehran University of Medical Sciences, Tehran, Iran"
+"School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand"
+"School of Arti cial Intelligence, University of Chinese Academy of Sciences, Beijing, China"
+"School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave"
+"School of Automation Science and Electrical Engineering, Beihang University, Beijing, China"
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"School of Business, Aalto University, Finland"
+"School of Business, University of Southern California; Alexandra Mislin"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"School of Communication and Information Engineering, Shanghai University"
+"School of Computer Engineering, Nanyang Technological University, Singapore"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN"
+"School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China"
+"School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore"
+"School of Computer Science and Engineering, Sichuan University, China"
+"School of Computer Science and Engineering, South China University of Technology"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"School of Computer Science and Engineering, Southeast University, Nanjing 210096, China"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"School of Computer Science and Software Engineering, East China Normal University, China"
+"School of Computer Science and Software Engineering, Shenzhen University"
+"School of Computer Science and Software Engineering, Shenzhen University, Nanhai Ave 3688, Shenzhen"
+"School of Computer Science and Software Engineering, University of Western Australia"
+"School of Computer Science and Technology, Harbin Institute of"
+"School of Computer Science and Technology, Harbin Institute of Technology"
+"School of Computer Science and Technology, Harbin Institute of Technology, China"
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"School of Computer Science and Technology, Shandong University"
+"School of Computer Science and Technology, Shandong University, China"
+"School of Computer Science and Technology, Tianjin University"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"School of Computer Science and Technology, Tianjin University, China"
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+"School of Computer Science and Technology, University of Science and Technology of China"
+"School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China"
+"School of Computer Science, Beijing University of Posts and Telecommunications, Beijing China"
+"School of Computer Science, CECS, Australian National University, Australia"
+"School of Computer Science, CECS, Australian National University, Canberra"
+"School of Computer Science, Carnegie Mellon University"
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"School of Computer Science, Carnegie Mellon University, PA 15213, USA"
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, USA"
+"School of Computer Science, Carnegie Mellon University, USA"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"School of Computer Science, Fudan University, Shanghai, 200433, China"
+"School of Computer Science, Fudan University, Shanghai, China"
+"School of Computer Science, Nanjing University of Science and Technology"
+"School of Computer Science, National University of Defense Technology, Changsha, China"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"School of Computer Science, OPTIMAL, Northwestern Polytechnical University, Xian 710072, Shaanxi, P. R. China"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"School of Computer Science, South China Normal University, China"
+"School of Computer Science, Tel Aviv University"
+"School of Computer Science, The Hebrew University, Israel"
+"School of Computer Science, The University of Adelaide, Australia"
+"School of Computer Science, The University of Manchester"
+"School of Computer Science, The University of Nottingham"
+"School of Computer Science, The University of Nottingham, Nottingham, UK"
+"School of Computer Science, Tianjin University"
+"School of Computer Science, University of Adelaide, Australia"
+"School of Computer Science, University of Birmingham, UK"
+"School of Computer Science, University of Lincoln, U.K"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Computer Science, University of Nottingham"
+"School of Computer Science, University of Windsor, Windsor, ON, Canada N9B 3P"
+"School of Computer Science, Wuhan University, P.R. China"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China"
+"School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008 china"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"School of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China"
+"School of Computer and Information Science, Chongqing Normal University 401331, China"
+"School of Computer and Information Technology, Beijing Jiaotong University, Beijing"
+"School of Computer and Information, Hefei University of Technology, China"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"School of Computer and Information, Hefei University of Technology, Hefei, China"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"School of Computer, National University of Defense Technology"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada; E-Mail"
+"School of Computing Science, Simon Fraser University, Canada"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+"School of Computing and Communications University of Technology, Sydney"
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"School of Computing and Communications, University of Technology Sydney, Sydney, Australia"
+"School of Computing and Info. Sciences, Florida International University"
+"School of Computing and Mathematics, Charles Sturt University, Wagga Wagga, Australia"
+"School of Computing, National University of Singapore"
+"School of Computing, National University of Singapore, SG"
+"School of Computing, National University of Singapore, Singapore"
+"School of Computing, Staffordshire University"
+"School of Control Science and Engineering, Shandong University, Jinan 250061, China"
+"School of Data Science, Fudan University"
+"School of Data Science, Fudan University, China"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"School of Data and Computer Science, Sun Yat-Sen University, GuangZhou, China"
+"School of Data and Computer Science, Sun Yat-sen University"
+"School of Data and Computer Science, Sun Yat-sen University, China"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+"School of E.C.E., National Technical University of Athens, Greece"
+"School of ECE, Peking University 2School of EIE, South China University of Technology"
+"School of EECS, Peking University, Beijing, 100871, China"
+"School of EECS, Queen Mary University of London"
+"School of EECS, Queen Mary University of London, London, UK"
+"School of EECS, Queen Mary University of London, UK"
+"School of EEE, Nanyang Technological University, Singapore"
+"School of Electrical Engineering and Automation, Anhui University, Hefei, China, Hong Kong Polytechnic"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology (HIT"
+"School of Electrical Engineering and Computer Science, Peking University"
+"School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran"
+"School of Electrical and Computer Engineering, Cornell University"
+"School of Electrical and Computer Engineering, Cornell University, Ithaca NY"
+"School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN, USA"
+"School of Electrical and Computer Engineering, RMIT University"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+"School of Electrical and Information Engineering, Hunan University of Technology, Hunan, Zhuzhou, 412008 china"
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"School of Electronic Information Engineering, Tianjin University, China"
+"School of Electronic and Computer Engineering, Peking University"
+"School of Electronic and Information Engineering, Beihang University, Beijing, 100191, China"
+"School of Electronic and Information Engineering, South China University of Technology"
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Electronic and Information, Yangtze University, Jingzhou 434023, China"
+"School of Electronics Engineering and Computer Science, Peking University"
+"School of Electronics Engineering and Computer Science; Peking University, Beijing 100871, China"
+"School of Electronics and Computer Engineering, Peking University"
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"School of Electronics and Information Technology, Sun Yat-Sen University, GuangZhou, China"
+"School of Electronics and Information, Northwestern Polytechnical University, China"
+"School of Engineering Science, Simon Fraser University, Burnaby, BC, Canada"
+"School of Engineering, CECS, Australian National University, Canberra, Australia"
+"School of Engineering, Taylor s University"
+"School of Engineering, University of Guelph"
+"School of Engineering, University of Portsmouth, United Kingdom"
+"School of Engineering, University of Waikato, Hamilton, New Zealand"
+"School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu"
+"School of Games, Hongik University, Seoul, Korea"
+"School of ICE, Beijing University of Posts and Telecommunications, Beijing, China"
+"School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications"
+"School of Informatics, University of Edinburgh"
+"School of Informatics, University of Edinburgh, UK"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"School of Information Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"School of Information Engineering, Nanchang University, China"
+"School of Information Engineering, Wuyi University, Jiangmen 529020, China"
+"School of Information Science and Engineering, Central South University, Changsha"
+"School of Information Science and Engineering, Southeast University, Nanjing, China"
+"School of Information Science and Engineering, Xiamen University, Xiamen 361005, China"
+"School of Information Science and Technology, Donghua University, Shanghai 200051, China"
+"School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"School of Information Science and Technology, Sun Yat-sen University, China"
+"School of Information Science, Japan Advanced Institute of Science and Technology"
+"School of Information Systems, Singapore Management University, Singapore"
+"School of Information Technology (ITE), Halmstad University, Box 823, 30118 Halmstad, Sweden"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"School of Information Technology and Engineering, University of Ottawa, Ontario, Canada"
+"School of Information Technology and Engineering, VIT University, Vellore, 632014, India"
+"School of Information Technology and Management, University of International"
+"School of Information and Communication Engineering, Beijing University of Posts and Telecommunications"
+"School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China"
+"School of IoT Engineering, Jiangnan University, Wuxi 214122, China"
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"School of Management Engineering, Henan Institute of Engineering, Zhengzhou 451191, P.R. China"
+"School of Management, University of Bath, Bath, UK"
+"School of Mathematical Science, Dalian University of Technology, Dalian, China"
+"School of Mathematical Science, Peking University, China"
+"School of Mathematical Sciences, Dalian University of Technology, Linggong Rd. 2, Dalian"
+"School of Mathematical Sciences, Monash University, VIC 3800, Australia"
+"School of Mathematical Sciences, Peking University, China"
+"School of Mathematics and Computational Science, Sun Yat-sen University, China"
+"School of Mathematics and Computer Science, Northeastern State University, Tahlequah, OK 74464, USA"
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China"
+"School of Mechanical Engineering, Southwest Jiaotong University, Chengdu 610031, China"
+"School of Medicine, Shenzhen University, Shenzhen 518060, China"
+"School of Medicine, Tehran University of Medical Sciences, Tehran, Iran"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, China"
+"School of Physics and Engineering, Sun Yat-Sen University, Guangzhou, China, 2 School of Information"
+"School of Physics and Optoelectronic Engineering, Xidian University, China"
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"School of Psychology, Cardiff University, Cardiff, United Kingdom, College of"
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"School of Psychology, University of Central Lancashire"
+"School of Software, Dalian University of Technology, Dalian 116621, China"
+"School of Software, Dalian University of Technology, Tuqiang St. 321, Dalian 116620, China"
+"School of Software, Sun Yat-sen University, China"
+"School of Software, Tianjin University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"School of Software, Xidian University, China"
+"School, The University of Sydney, Sydney, NSW, Australia"
+"Schreiber Building, room 103, Tel Aviv University, P.O.B. 39040, Ramat Aviv, Tel Aviv"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+"Science, University of Amsterdam"
+"Science, University of Bristol"
+"Scienti c Visualization and Computer Graphics, University of Groningen, Nijenborgh 9, Groningen, The Netherlands"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Section of Pathology, Second University of Naples, Via L. Armanni"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea"
+Semarang State University
+"Sendai National College of Technology, Natori, Japan"
+"SenseTime Group Limited, 2Tsinghua University"
+"SenseTime Group Limited, 3Peking University"
+"SenseTime, 2Tsinghua University"
+"SenseTime-NTU Joint AI Research Centre, Nanyang Technological University"
+"Sensor-enhanced Social Media (SeSaMe) Centre, National University of Singapore, Singapore"
+Seoul National University
+"Seoul National University, Seoul, Korea"
+"Seoul National University, Seoul, South Korea"
+"Sessional Tutor, The University of Melbourne"
+"Several methods exists to induce anxiety in healthy individuals, including threat of shock (ToS), the Trier"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Shaheed Zulfikar Ali Bhutto Institute of
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan"
+ShahidBeheshti University
+Shandong University of Science and Technology
+Shandong Women s University
+Shanghai Institute for Advanced Communication and Data Science
+"Shanghai Institute of Applied Physics, Chinese Academy of Sciences"
+Shanghai Jiao Tong University
+"Shanghai Jiao Tong University, CloudWalk Technology"
+"Shanghai Jiao Tong University, 2Zhejiang University, 3Massachusetts Institute of Technology"
+"Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China"
+"Shanghai Jiao Tong University, China"
+"Shanghai Jiao Tong University, China. 2Columbia University, USA"
+"Shanghai Jiao Tong University, Shanghai 200240, China"
+"Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University, China"
+ShanghaiTech University
+Shanghaitech University
+"Shaoguan University, Da Tang Lu"
+"Sharda University, Greater Noida, India"
+Sharif University of Technology
+"Sharif University of Technology, Tehran. Iran"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+Shenzhen Institutes of Advanced Technology
+"Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Institutes of Advanced Technology, CAS, Shenzhen, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 518055, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China"
+"Shenzhen University, China"
+"Shenzhen University, Shenzhen China"
+"Shenzhen University, Shenzhen, China"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+Shiraz University
+"Shri Shivaji College, Parbhani, M.S, India"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"Sighthound, Inc"
+Signal Processing Institute
+Simon Fraser University
+"Simon Fraser University, Burnaby, Canada"
+Singapore University of Technology and Design
+Sinhgad College of
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+Slovak University of Technology in
+"Smart Network System Institute, Institute for Information Industry"
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Sogang University, Seoul 121-742, Republic of Korea"
+"Solapur University, INDIA"
+Sona College of Technology
+"Sorbonne Universit s, UPMC University Paris 06, Paris, France"
+South China University of China
+South China University of Technology
+South China University of Technology 4NVIDIA 5Google Brain 6Ant Financial
+"South China University of Technology, Guangzhou 510640, China"
+South College Road
+"Southeast University, Nanjing 210096, China"
+"Southeast University, Nanjing 211189, China"
+"Southeast University, Nanjing, China"
+"Southern Illinois University, Carbondale, IL 62901 USA"
+Southwest Jiaotong University
+"Southwest Jiaotong University, Chengdu, China"
+"Southwest Jiaotong University, Chengdu, P.R. China"
+"Southwest University, China"
+"Southwest University, Chongqing 400715, China"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Springer Science + Business Media, Inc. Manufactured in The Netherlands"
+"Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India"
+"Sri Manakula Vinayagar Engineering College, Pondicherry"
+"Sri SidarthaInstitute of Technology, Tumkur"
+"Sri Sunflower College of Engineering and Technology, Lankapalli"
+"Sri krishna College of Technology, Coimbatore, India"
+Sridevi Women's Engineering College
+"Srinivasan Engineering College, Perambalur, India"
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+"Ss. Cyril and Methodius University, Skopje, Macedonia"
+"St. Ann s College of Engineering and Technology, Andhra Pradesh, India"
+St. Anne s College
+St. Francis Institute of Technology
+"St. Xavier s Catholic College of Engineering, India"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"St.Joseph s College of Engineering, Old Mamallapuram Road, Kamaraj Nagar, Semmencherry, Chennai"
+"Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh"
+Stanford University
+Stanford University National Tsing Hua University
+"Stanford University, 2Facebook, 3Dartmouth College"
+"Stanford University, 2Simon Fraser University"
+"Stanford University, CA"
+"Stanford University, CA, United States"
+"Stanford University, Stanford, CA, USA"
+"Stanford University, Stanford, California"
+"Stanford University, USA"
+"State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China"
+"State Key Lab of CADandCG, Zhejiang University, Hangzhou, Zhejiang, China"
+"State Key Lab. LIESMARS, Wuhan University, China"
+"State Key Laboratory for Novel Software Technology, Nanjing University, China"
+"State Key Laboratory of Brain and Cognitive Science, Institute of Psychology"
+"State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China"
+"State Key Laboratory of ISN, Xidian University"
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi an 710071 China"
+"State Key Laboratory of Precision Measuring Technology and Instruments, Tianjin University, 300072, China"
+"State Key Laboratory of Pulp and Paper Engineering, South China University of Technology, Guangzhou 510640, China"
+"State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China"
+"State Key Laboratory of Software Development Environment, Beihang University, P.R.China"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+State University of Feira de Santana (UEFS
+"State University of New York Polytechnic Institute, Utica, New York"
+State University of New York at
+State University of New York at Binghamton
+"State University of New York at Binghamton, Binghamton, NY"
+"State University of New York at Binghamton, USA"
+State University of New York at Buffalo
+"State University of Rio de Janeiro, Brazil"
+"Statistics, University of"
+Stevens Institute of Technology
+Stevens Institute of Technology Adobe Systems Inc
+Stony Brook University
+"Stony Brook University 2Adobe Research 3 CentraleSup elec, Universit e Paris-Saclay"
+Stony Brook University Hospital
+"Stony Brook University, NY 11794, USA"
+"Stony Brook University, NY, USA"
+"Stony Brook University, Stony Brook NY 11794, USA"
+"Stony Brook University, Stony Brook, NY 11794, USA"
+"Stony Brook University, Stony Brook, USA"
+"Student, Amal Jyothi College of Engineering, Kanjirappally, India"
+"Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+Submitted to the Institute for Graduate Studies in
+Submitted to the Senate of the Hebrew University
+Sudan University of Science and Technology
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+Sun Yat-Sen (Zhongshan) University
+Sun Yat-Sen University
+"Sun Yat-Sen University, Guangzhou, P.R. China"
+Sun Yat-sen University
+SungKyunKwan University
+Sungkyunkwan University
+Swansea University
+Swiss Federal Institute of Technology
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"Switzerland, Psychosomatic Medicine, and Psychotherapy, University Hospital Frankfurt"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"Systems and Communication, University of Milano-Bicocca"
+Systems and Telematics - Neurolab
+THE UNIVERSITY OF ARIZONA
+THE UNIVERSITY OF CHICAGO
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+Tafresh University
+"Tafresh University, Tafresh, Iran"
+Taizhou University
+"Taizhou University, Taizhou 317000, China"
+"Tamkang University, Taipei, Taiwan"
+Tampere University of Technology
+"Tampere University of Technology, Finland"
+"Tampere University of Technology, Tampere 33720, Finland"
+"Tampere University of Technology, Tampere, Finland"
+"Tarbiat Modarres University, Tehran, Iran"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"Technical University Munich, Germany"
+"Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+"Technical University of Cluj Napoca, 28 Memorandumului Street"
+Technical University of Kaiserslautern
+Technical University of Munich
+"Technical University of Munich, Germany"
+"Technical University of Munich, Munich, 2KTH Royal Institute of Technology, Stockholm"
+"Technical University of Ostrava, FEECS"
+TechnicalUniversityofDenmark
+Technion Israel Institute of Technology
+Technion - Israel Institute of Technology
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"Technological University, Davanagere, Karnataka, India"
+"Technology, Manchester Metropolitan University"
+"Technology, Nanjing University of Aero"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+Tel Aviv University
+"Tel Aviv University, Columbia University"
+"Tel Aviv University, Cornell Tech"
+"Tel Aviv University, Israel"
+"Tel-Aviv University, Israel"
+Temple University
+"Temple University, Philadelphia, PA 19122, USA"
+"Temple University, Philadelphia, USA"
+Texas AandM University
+"Texas AandM University, College Station TX 77843, USA"
+"Texas AandM University, College Station, TX, USA"
+"Texas State University, San Marcos, USA"
+Thapar University
+The Allen Institute for AI
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+The American University in Cairo
+"The American University in Cairo, Egypt"
+"The American University in Cairo, New Cairo 11835, Egypt"
+"The Amsterdam School of Communication Research, University of Amsterdam"
+The Australian National University
+"The Australian National University Canberra ACT 2601, Australia"
+The Australian National University Queensland University of Technology
+"The Australian National University, Australia"
+"The Australian National University, Canberra, Australia"
+The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved
+The Author 2014. Published by Oxford University Press
+"The Big Data Research Center, Henan University, Kaifeng 475001, China"
+"The Blavatnik School of Computer Science, Tel Aviv University, IL"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Israel"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"The Center for Brains, Minds and Machines, Massachusetts Institute of Technology, Cambridge, MA USA"
+The Chinese University of Hong Kong
+The Chinese University of Hong Kong 3 SenseTime Group Limited
+The Chinese University of Hong Kong holds the copyright of this thesis. Any
+"The Chinese University of Hong Kong, 2University of Toronto, 3Youtu Lab, Tencent"
+"The Chinese University of Hong Kong, 4Beijing University of Posts and Telecommunications"
+"The Chinese University of Hong Kong, China"
+"The Chinese University of Hong Kong, HKSAR, China"
+"The Chinese University of Hong Kong, Hong Kong"
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"The Chinese University of Hong Kong, Hong Kong, China"
+"The Chinese University of Hong Kong, New Territories, Hong Kong"
+"The Chinese University of Hong Kong, Sha Tin, Hong Kong"
+The Chinese University ofHong Kong
+The City College and the Graduate Center
+"The City College of New York, New York, NY 10031, USA"
+The City University of New York
+The Education University of Hong Kong
+The Florida State University
+The Graduate University for Advanced Studies (SOKENDAI
+The Hebrew University of Jerusalem
+"The Hebrew University of Jerusalem, Israel"
+The Hong Kong Polytechnic University
+The Hong Kong Polytechnic University 2Harbin Institute of Technology
+"The Hong Kong Polytechnic University, Hong Kong"
+"The Hong Kong Polytechnic University, Hong Kong SAR, China"
+"The Hong Kong Polytechnic University, Hong Kong, China"
+"The Hong Kong Polytechnic University, Hong Kong, SAR, 2University of Technology Sydney, Australia"
+The Hong Kong University of Science and Technology
+The Hong Kong University of Science and Technology 2 Carneige Mellon University
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+The Institute of Electronics
+"The Institute of Scienti c and Industrial Research, Osaka University"
+The Ohio State University
+"The Ohio State University, Columbus, OH, USA"
+"The Ohio State University, OH"
+The Open University
+The Open University of
+The Open University of Israel
+"The Open University of Israel, Israel"
+"The Open University, Israel"
+"The Remote Sensing Technology Institute (IMF), German Aerospace Center"
+"The Robotics Inistitute, Carnegie Mellon University"
+The Robotics Institute
+The Robotics Institute Carnegie Mellon University
+"The Robotics Institute, Carnegie Mellon University"
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+The Rockefeller University
+"The School of Computer Science, Tel-Aviv University, Israel"
+"The School of Electrical Electronic and Control Engineering, Kongju National University"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+The State University of New Jersey
+"The University of Adelaide, Australia"
+The University of Adelaide; and Australian Centre for Robotic Vision
+The University of British Columbia
+The University of Cambridge
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+"The University of Edinburgh, Edinburgh, UK"
+The University of Electro-Communications
+"The University of Electro-Communications, JAPAN"
+"The University of Electro-Communications, Tokyo"
+"The University of Electro-Communications, Tokyo, Japan"
+The University of Hong Kong
+The University of Leeds
+The University of Manchester
+The University of Maryland
+The University of Newcastle
+"The University of Newcastle, Callaghan 2308, Australia"
+The University of North Carolina at Chapel Hill
+The University of North Carolina at Charlotte
+"The University of North Carolina at Charlotte, USA"
+"The University of North Carolina, Chapel Hill"
+The University of Nottingham
+"The University of Nottingham, UK"
+The University of Queensland in
+"The University of Queensland, Australia"
+"The University of Queensland, School of ITEE"
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+The University of Shef eld
+The University of Sydney
+The University of Sydney 2SenseTime Research 3The Chinese University of Hong Kong
+"The University of Sydney, NSW 2006, Australia"
+"The University of Sydney, SenseTime Computer Vision Research Group"
+"The University of Sydney, SenseTime Computer Vision Research Group, Sydney"
+"The University of Sydney, Sydney, Australia"
+"The University of Tennessee, Knoxville"
+"The University of Tennessee, Knoxville, TN, USA"
+The University of Texas
+The University of Texas at
+The University of Texas at Arlington
+The University of Texas at Austin
+"The University of Texas at Austin, 2Carnegie Mellon University"
+"The University of Texas at Austin, 78701 Austin, USA"
+"The University of Texas at Austin, Austin, TX"
+"The University of Texas at Dallas, Richardson, TX"
+The University of Tokyo
+The University of Tokyo / RIKEN
+"The University of Tokyo, 2RIKEN, 3ETH Z urich, 4KU Leuven"
+"The University of Tokyo, Japan"
+"The University of Warwick, Coventry, UK"
+The University of Western Australia
+The University of York
+"The University of York, Heslington, York YO10 5DD, United Kingdom"
+"The University of York, UK"
+"The University of York, United Kingdom"
+The University of the Humanities
+The Weizmann Institute of
+The Weizmann Institute of Science
+"The Weizmann Institute of Science, Israel"
+"The authors are with Hiroshima University, Higashihiroshima"
+"The authors are with the Delft University of Technology, Data and Knowl"
+The open University of Israel. 2Adience
+"The school of Data Science, Fudan University"
+Thesis. Rochester Institute of Technology. Accessed from
+This Thesis is brought to you for free and open access by the Student Publications at Lake Forest College Publications. It has been accepted for
+"This is an Open Access document downloaded from ORCA, Cardiff University's institutional"
+This work is downloaded from Delft University of Technology
+This work was supported by Grant MOP102637 from the Canadian Institutes of Health Research to E.D.R. and the
+This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E
+"Tilburg Center for Logic, General Ethics, and Philosophy of Science, Tilburg University, Tilburg, Netherlands"
+Tohoku University
+"Tohoku University, Japan"
+"Tohoku University, Sendai, Japan"
+Tokyo Denki University
+Tokyo Institute of Technology
+"Tokyo Institute of Technology, Japan"
+Tokyo Metropolitan University
+Tokyo Polytechnic University
+Tokyo University of Science
+"Tokyo, Tokyo, 6National Institute of Informatics, Tokyo"
+Tomas Bata University in Zl n
+Tomsk Polytechnic University
+Tongji University
+"Tongji University, Shanghai 201804, China"
+Tooploox 2Polish-Japanese Academy of Information Technology 3Warsaw University of Technology
+"Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan"
+"Toyota Research Institute, Cambridge, MA 2 University of Michigan, Ann Arbor, MI"
+"Toyota Research Institute, Los Altos, CA, USA"
+"Toyota Technological Institute (Chicago, US"
+Toyota Technological Institute Chicago (TTIC
+Toyota Technological Institute at Chicago
+"Toyota Technological Institute, Chicago"
+"Toyota Technological Institute, Chicago (TTIC"
+Transilvania University
+Trinity College
+Tripura University (A Central University
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+Tsinghua University
+Tsinghua University 4SenseTime
+"Tsinghua University, 100084 Beijing, China"
+"Tsinghua University, 2Rutgers University, 3Baidu IDL"
+"Tsinghua University, 2Rutgers University, 3Massachusetts Institute of Technology, 4Baidu IDL"
+"Tsinghua University, Beijing 100084, China"
+"Tsinghua University, Beijing 100084, P.R.China"
+"Tsinghua University, Beijing, China"
+"Tsinghua University, China"
+"Tsinghua University, State Key Lab. of Intelligent"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"U. G STUDENTS, DEPT OF CSE, ALPHA COLLEGE OF ENGINEERING, CHENNAI"
+U.S. Army Research Laboratory
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"U.S. Army Research Laboratory, Adelphi, MD, USA"
+"UC Irvine1, INRIA2, Carnegie Mellon University"
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+UCL and Alan Turing Institute
+"UG student, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"UMIACS | University of Maryland, College Park"
+"UMIACS, University of Maryland"
+"UMIACS, University of Maryland, College Park, MD"
+"UMIACS, University of Maryland, College Park, USA"
+UNIVERSITY IN PRAGUE
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+UNIVERSITY OF CALIFORNIA
+"UNIVERSITY OF CALIFORNIA, BERKELEY"
+"UNIVERSITY OF CALIFORNIA, SAN DIEGO"
+UNIVERSITY OF OULU
+UNIVERSITY OF OULU GRADUATE SCHOOL
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+UNIVERSITY OF TAMPERE
+UNIVERSITY OF TARTU
+UNIVERSITY OF WISCONSIN MADISON
+"USA, 2Unit for Experimental Psychiatry, University of Pennsylvania School of Medicine"
+"USC IRIS Lab, University of Southern California"
+USC Information Sciences Institute
+"USC Information Sciences Institute (ISI), Marina Del Rey, CA"
+USC Institute for Creative Technologies
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"USHER Institute, University of Edinburgh, United Kingdom"
+"Uber Advanced Technologies Group, 5Vector Institute"
+"Ubiquitous Computing Lab, Kyung Hee University"
+"UiT The Arctic University of Norway, Troms , Norway"
+"Ulm University, Germany"
+"Ultra College of Engineering and Technology for Women, India"
+"United States of America, State University of New York Albany, Albany"
+"United States of America, State University of New York Albany, Albany, New York"
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Universitat Polit`ecnica de Catalunya, Columbia University"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+University
+University Politehnica of Bucharest
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+University (H
+University (ITU
+"University B.D.T.College of Engineering, Visvesvaraya"
+"University Bourgogne Franche-Comt , France"
+"University Campus, 54124, Thessaloniki, Greece"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+"University City Blvd., Charlotte, NC"
+University College London
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"University College London, London WC1N 3BG, United Kingdom"
+"University College London, London, UK"
+"University College London, UK"
+University Drive
+"University Drive, Fairfax, VA 22030-4444, USA"
+"University Health Board, Swansea, United Kingdom"
+University Higher School of Economics (HSE). Any opinions or claims contained in this
+"University Hospital Jena, Germany"
+University Institute of Engineering and Technology
+University Lecturer Anu Soikkeli
+University Lecturer Veli-Matti Ulvinen
+"University Library, Singapore"
+University Of California San Diego
+University Of Maryland
+University Of Oxford
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania"
+"University POLITEHNICA of Bucharest, Bucharest, Romania"
+University Politehnica of Bucharest
+"University Politehnica of Bucharest, Romania"
+"University Station C0500, Austin TX 78712, USA"
+"University Street, Montral, QC H3A 0E9, Canada"
+"University Street, Montreal, QC H3A 0E9, Canada"
+"University Technology of Malaysia, 81310 Skudai, Johor, Malaysia"
+"University at Albany, SUNY"
+"University at Buffalo, SUNY"
+"University at Buffalo, State University of New York"
+University of
+University of Aberdeen
+University of Abertay
+University of Adelaide
+"University of Adelaide, Australia"
+"University of Adelaide, SA, Australia"
+"University of Aizu, Japan"
+"University of Akron, Akron"
+"University of Alabama, Tuscaloosa, AL"
+"University of Alberta, Edmonton, AB T6G 2E8, Canada"
+"University of Alberta, Edmonton, Canada"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+University of Amsterdam
+University of Amsterdam (UvA
+University of Amsterdam and Renmin University at TRECVID
+"University of Amsterdam, Amsterdam, The"
+"University of Amsterdam, Amsterdam, The Netherlands"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+"University of Amsterdam, The Netherlands"
+"University of Amsterdam, University of Trento, Italy"
+"University of Amsterdam, the Netherlands"
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+University of Applied Sciences Darmstadt - CASED
+"University of Applied Sciences, Arnhem, The Netherlands"
+University of Arizona
+University of Arkansas at Little Rock
+"University of Balochistan, Quetta"
+University of Barcelona
+"University of Barcelona and Computer Vision Centre, Barcelona, Spain"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+"University of Barcelona, Spain"
+"University of Bari, Bari, Italy"
+University of Basel
+"University of Basel, Departement Informatik, Basel, Switzerland"
+"University of Basel, Switzerland"
+University of Bath
+"University of Bath, Bath, Somerset, United Kingdom"
+"University of Bath, Bath, United Kingdom"
+University of Beira Interior
+University of Bern
+University of Birmingham
+University of Bonn
+"University of Bonn, Germany"
+"University of Bonn, Roemerstrasse 164, 53117 Bonn, Germany"
+University of Brescia
+University of Bridgeport
+"University of Bridgeport, Bridgeport, CT 06604, USA"
+University of Bristol
+University of Bristol - Explore Bristol Research
+"University of Bristol, Bristol, BS8 1UB, UK"
+"University of Bristol, Bristol, UK"
+"University of Bristol, United Kingdom"
+University of British Columbia
+University of Buffalo
+"University of Business Agriculture and Technology, Dhaka-1230, Bangladesh"
+University of Caen
+University of Caen Basse-Normandie
+"University of Caen, France"
+University of Cagliari
+University of Calabria - DeMACS
+"University of Calgary, Canada"
+University of California
+University of California Berkeley
+University of California Berkeley
+University of California Davis
+University of California San Diego
+"University of California San Diego, La Jolla, California, USA"
+University of California Santa Barbara
+University of California at Berkeley
+University of California at Berkeley / ICSI
+"University of California at Berkeley, USA"
+"University of California at Irvine, Irvine, CA"
+"University of California at Los Angeles, Los Angeles, CA, USA"
+University of California at San Diego
+"University of California at San Diego, La Jolla, CA"
+"University of California, Berkeley"
+"University of California, Berkeley, Berkeley CA 94720, USA"
+"University of California, Berkeley1 Adobe"
+"University of California, Davis"
+"University of California, Davis 2University of Washington 3Allen Institute for AI"
+"University of California, Irvine"
+"University of California, Irvine, USA"
+"University of California, Los Angeles"
+"University of California, Los Angeles, California, USA"
+"University of California, Los Angeles, USA"
+"University of California, Merced"
+"University of California, Merced, CA"
+"University of California, Merced, CA 95344, USA"
+"University of California, Merced, USA"
+"University of California, Riverside"
+"University of California, Riverside CA 92521-0425, USA"
+"University of California, Riverside, CA"
+"University of California, Riverside, California 92521, USA"
+"University of California, San Diego"
+"University of California, San Diego 2 Carnegie Mellon University"
+"University of California, San Diego, California, USA"
+"University of California, San Diego, La Jolla"
+"University of California, San Diego, USA"
+"University of California, Santa Barbara"
+"University of California, Santa Cruz"
+University of Cambridge
+University of Cambridge Computer Laboratory
+"University of Cambridge, Cambridge, UK"
+"University of Cambridge, Computer Laboratory, UK"
+"University of Cambridge, The Computer Laboratory, Cambridge CB3 0FD, U.K"
+"University of Cambridge, UK 2Carnegie Mellon University, USA"
+"University of Cambridge, United Kingdom"
+University of Campinas
+University of Campinas (Unicamp
+University of Canberra
+"University of Canberra, Australia"
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+"University of Canterbury, New Zealand"
+University of Cape Town
+"University of Cape Town, South Africa"
+"University of Catania, Italy"
+University of Central Florida
+"University of Central Florida, Orlando"
+"University of Central Florida, Orlando FL 32816, USA"
+"University of Central Florida, Orlando, USA"
+"University of Central Florida, USA"
+"University of Central Punjab, Pakistan"
+"University of Chester, UK, 3Conservation Biologist"
+University of Chinese Academy of
+University of Chinese Academy of Science
+University of Chinese Academy of Sciences
+University of Chinese Academy of Sciences (UCAS
+"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China"
+"University of Chinese Academy of Sciences, Beijing 100049, China"
+"University of Chinese Academy of Sciences, Beijing 100190, China"
+"University of Chinese Academy of Sciences, Beijing 101408, China"
+"University of Chinese Academy of Sciences, Beijing, 100049, China"
+"University of Chinese Academy of Sciences, Beijing, China"
+"University of Chinese Academy of Sciences, China"
+"University of Coimbra, Portugal"
+University of Colorado
+"University of Colorado Boulder, 2U.S. Army Research Lab"
+University of Colorado Colorado Springs
+University of Colorado at Colorado Springs
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"University of Colorado, Boulder"
+"University of Colorado, Colorado Springs"
+"University of Colorado, Colorado Springs, USA"
+University of Connecticut
+University of Copenhagen
+"University of Cordoba, Spain"
+"University of Crete, Crete, 73100, Greece"
+"University of Dammam, Saudi Arabia"
+"University of Delaware, Newark, DE. USA"
+"University of Denver, Denver, CO"
+University of Dhaka
+"University of Dhaka, Bangladesh"
+"University of Dschang, Cameroon"
+University of Dundee
+University of Edinburgh
+"University of Edinburgh, Edinburgh, UK"
+University of Electronic Science and Technology of China
+"University of Electronic Science and Technology of China, China"
+University of Engineering and Technology
+University of Erlangen-Nuremberg
+University of Erlangen-Nuremberg 3 University of Bath
+"University of Exceter, Exceter, UK"
+University of Exeter
+"University of Exeter, UK"
+University of Florence
+"University of Florence, Italy"
+University of Florida
+"University of Florida, Gainesville, FL, 32611, USA"
+University of Frankfurt
+University of Freiburg
+"University of Freiburg, Germany"
+"University of Freiburg, Instit ut f ur Informatik"
+University of Geneva
+"University of Genoa, Italy"
+"University of Georgia, Athens, GA, U.S.A"
+University of Glasgow
+"University of Granada, Granada, Spain"
+"University of Granada, Spain"
+University of Groningen
+"University of Groningen, Netherlands"
+"University of Groningen, The Netherlands"
+"University of Gujrat, Pakistan"
+University of Haifa
+"University of Haifa, Haifa, Israel"
+"University of Helsinki, Finland"
+University of Houston
+"University of Houston, Houston, TX 77204, USA"
+"University of Houston, Houston, TX, USA"
+University of Iceland
+University of Illinois
+University of Illinois Urbana Champaign
+University of Illinois at
+University of Illinois at Chicago
+University of Illinois at Urbana
+University of Illinois at Urbana Champaign
+"University of Illinois at Urbana Champaign, Urbana"
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA"
+University of Illinois at Urbana-Champaign
+University of Illinois at Urbana-Champaign 2Adobe Research
+"University of Illinois at Urbana-Champaign, IL USA"
+"University of Illinois at Urbana-Champaign, USA"
+"University of Illinois at Urbana-Champaign, Urbana, IL"
+"University of Illinois at Urbana-Champaign, Urbana, IL, USA"
+"University of Illinois, Urbana-Champaign"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+University of Information
+University of Insubria
+"University of Ioannina, Ioannina, Greece"
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+University of Iowa
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"University of Karlsruhe, Germany"
+University of Kentucky
+"University of Kentucky, 329 Rose St., Lexington, KY, 40508, U.S.A"
+"University of Kentucky, USA"
+University of Lac Hong 10 Huynh Van Nghe
+"University of Larestan, Iran"
+University of Leeds
+"University of Lincoln, School of Computer Science, U.K"
+"University of Lincoln, U.K"
+"University of Lincoln, UK"
+University of Liverpool
+University of Ljubljana
+University of Ljubljana Faculty
+"University of Ljubljana, Faculty of Electrical Engineering"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+University of London
+"University of Malaga, Spain"
+"University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"University of Malaya, Kuala Lumpur, Malaysia"
+University of Malta
+University of Manitoba
+University of Maryland
+University of Maryland College Park
+"University of Maryland Institute for Advanced Computer Studies, College Park, MD"
+"University of Maryland, CFAR"
+"University of Maryland, Center for Automation Research"
+"University of Maryland, College Park"
+"University of Maryland, College Park, MD"
+"University of Maryland, College Park, MD 20740; and bIntel Labs, Santa Clara, CA"
+"University of Maryland, College Park, MD, USA"
+"University of Maryland, College Park, USA"
+"University of Maryland, College Park; 2Arizona State University; 3Xerox Research Centre"
+"University of Maryland-College Park, USA"
+University of Massachusetts
+University of Massachusetts - Amherst
+University of Massachusetts Amherst
+University of Massachusetts Amherst in partial ful llment
+"University of Massachusetts, Amherst"
+"University of Massachusetts, Amherst MA, USA"
+"University of Massachusetts, Amherst Technical Report UM-CS"
+"University of Massachusetts, Amherst, MA"
+University of Memphis
+University of Miami
+"University of Miami, Coral Gables, FL"
+"University of Miami, USA"
+University of Michigan
+"University of Michigan, Ann Arbor"
+"University of Michigan, Ann Arbor, MI"
+"University of Michigan, Ann Arbor, MI, USA"
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"University of Michigan, Ann Arbor, USA"
+University of Michigan-Shanghai Jiao Tong University Joint Institute
+University of Milan
+"University of Milano-Bicocca, Italy"
+University of Minnesota
+"University of Minnesota-Twin Cities, Minneapolis"
+"University of Missouri, Columbia, MO"
+"University of Missouri, Kansas City"
+University of Modena and Reggio
+"University of Modena and Reggio Emilia, Italy"
+"University of Montreal, 2Cornell University, 3Ecole Polytechnique of Montreal, 4CIFAR"
+"University of Nebraska Lincoln, USA"
+University of Nebraska - Lincoln
+University of Nevada Las Vegas
+"University of Nevada at Reno, USA"
+"University of Nevada, Reno, Reno, NV, USA"
+"University of Nevada, Reno, USA"
+University of Newcastle
+University of North Carolina
+University of North Carolina Wilmington
+University of North Carolina Wilmington in Partial Ful llment
+"University of North Carolina Wilmington, Wilmington, NC, USA"
+University of North Carolina at Chapel Hill
+"University of North Carolina at Chapel Hill, 2Adobe Research"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC"
+"University of North Carolina at Chapel Hill, NC, USA"
+"University of North Carolina at Chapel Hill, USA"
+University of North Carolina at Charlotte
+University of North Texas
+"University of North Texas, Denton, Texas, USA"
+University of Northern British Columbia
+"University of Northern British Columbia, Canada"
+University of Notre Dame
+"University of Notre Dame, 2IIIT-Delhi"
+"University of Notre Dame, Notre Dame, IN, 46556, USA"
+"University of Notre Dame, USA"
+"University of Notre Dame. Notre Dame, IN 46556.USA"
+University of Nottingham
+University of Nottingham Ningbo China
+"University of Nottingham, Ningbo China"
+"University of Nottingham, Ningbo, China"
+"University of Nottingham, Nottingham, UK"
+"University of Nottingham, School of Psychology, University Park, Nottingham NG"
+"University of Nottingham, UK, School of Computer Science"
+University of Ontario Institute
+University of Oradea
+"University of Oradea 410087, Universitatii 1, Romania"
+University of Otago
+"University of Otago, Dunedin, New Zealand"
+University of Ottawa
+"University of Ottawa, Canada"
+"University of Ottawa, Ottawa, On, Canada"
+University of Oulu
+"University of Oulu, Finland"
+"University of Oviedo, Campus de Viesques, 33204 Gij n"
+University of Oxford
+University of Oxford 4Massachusetts Institute of Technology 5Google Research
+"University of Oxford, Oxford, UK"
+"University of Oxford, Oxford, United Kingdom"
+"University of Oxford, UK"
+"University of Oxford, United Kingdom"
+"University of Paderborn, Germany"
+"University of Patras, Greece"
+University of Pennsylvania
+"University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"University of Pennsylvania School of Medicine, 1013 Blockley Hall"
+"University of Pennsylvania, 2Ryerson University"
+University of Perugia
+"University of Peshawar, Pakistan"
+"University of Peshawar, Peshawar, Pakistan"
+University of Piraeus
+"University of Pisa, Largo Lucio"
+"University of Pisa, Pisa, Italy"
+University of Pittsburgh
+"University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada"
+"University of Pittsburgh, PA, USA"
+"University of Pittsburgh, Pittsburgh"
+"University of Pittsburgh, Pittsburgh PA"
+"University of Pittsburgh, Pittsburgh, PA"
+"University of Pittsburgh, Pittsburgh, PA 15260, USA"
+"University of Pittsburgh, Pittsburgh, PA, USA"
+"University of Pittsburgh, Pittsburgh, USA"
+"University of Plymouth, UK"
+University of Posts and Telecommunications
+"University of Queensland, Australia"
+"University of Queensland, Brisbane, Australia"
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+"University of Queensland, St Lucia QLD Australia, 5 Institut Universitaire de France, Paris, France"
+University of Rochester
+"University of Rochester and J. Luo, University of Rochester"
+"University of Rochester, NY 14627, USA"
+"University of Rochester, Rochester, NY, USA"
+University of S ao Paulo
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"University of S ao Paulo, S ao Paulo, Brazil"
+"University of Salzburg, Austria"
+"University of Santiago de Compostela, Santiago de Compostela, Spain"
+University of Science and
+University of Science and Technology
+University of Science and Technology Beijing
+"University of Science and Technology Beijing, Beijing, China"
+University of Science and Technology of China
+"University of Science and Technology of China, Hefei 230026, P. R. China"
+"University of Science and Technology of China, Hefei, 230027, China"
+"University of Science and Technology of China, Hefei, Anhui, China"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China"
+"University of Science and Technology of China, Hefei, China"
+"University of Science and Technology of China, Key Laboratory of Electromagnetic"
+"University of Science and Technology, Wuhan, 430074, China"
+"University of Science, Ho Chi Minh city"
+"University of Science, VNU-HCM, Viet Nam"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"University of Science, Vietnam National University, HCMC"
+"University of Science, Vietnam National University-Ho Chi Minh city"
+University of Sfax
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"University of Shef eld, UK"
+University of Siegen
+University of Siena
+"University of Siena, Siena, Italy"
+"University of Sk vde, Sweden"
+"University of South Carolina, Columbia, USA"
+"University of South Carolina, USA"
+"University of South Florida, Tampa, Florida, USA"
+University of Southampton
+"University of Southampton, SO17 1BJ, UK"
+"University of Southampton, UK, 2University of Warwick, UK"
+"University of Southampton, United Kingdom"
+University of Southern California
+"University of Southern California, 4A9, 5Amazon"
+"University of Southern California, Institute for Robotics and Intelligent Systems"
+"University of Southern California, Los Angeles, CA 90089, USA"
+"University of St Andrews, United Kingdom"
+University of Stuttgart
+University of Surrey
+"University of Surrey, Guildford, Surrey GU2 7XH, UK"
+"University of Surrey, UK"
+"University of Surrey, United Kingdom"
+University of Sydney
+"University of Szeged, 2 E tv s Lor nd University"
+"University of T ubingen, T ubingen, Germany"
+"University of Tabriz, Tabriz, Iran"
+University of Tampere
+"University of Tampere, Kanslerinnrinne 1, 33014, Tampere, Finland"
+University of Technology Sydney
+"University of Technology Sydney, 2 The University of Sydney"
+"University of Technology, Australia"
+"University of Technology, Baghdad, Iraq"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"University of Technology, Sydney"
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+"University of Technology, Sydney, Australia"
+"University of Tennessee, Knoxville"
+University of Texas
+University of Texas at
+University of Texas at Arlington
+"University of Texas at Arlington, Arlington, TX"
+"University of Texas at Arlington, Arlington, TX, USA"
+"University of Texas at Arlington, Arlington, Texas 76019, USA"
+"University of Texas at Arlington, TX, USA"
+"University of Texas at Arlington, TX, USA, 2Beihang University, Beijing, China"
+"University of Texas at Arlington, Texas, USA"
+University of Texas at Austin
+University of Texas at San Antonio
+"University of Texas at San Antonio, USA"
+"University of Texas, Austin"
+"University of Texas, Austin, TX 78712-1188, USA"
+University of Thessaloniki
+University of Tokyo
+"University of Tokyo, 4-6-1 Shirokanedai"
+"University of Tokyo, Japan"
+University of Toronto
+University of Toronto 2Vector Institute
+University of Toronto and Recognyz Systems Technologies
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"University of Toronto, Canada"
+"University of Toronto, Toronto, ON M5S 2G4, Canada"
+"University of Toronto1, Twenty Billion Neurons"
+University of Toulouse
+University of Toulouse II Le Mirail
+University of Trento
+"University of Trento, Italy"
+"University of Trento, Trento, Italy"
+University of Tsukuba
+"University of Tsukuba, Japan"
+University of Twente
+University of Twente 2Dublin City University 3Oxford University
+"University of Twente, EEMCS, Netherlands"
+"University of Twente, Netherlands"
+"University of Twente, The Netherlands"
+"University of Ulsan, Ulsan, Republic of Korea"
+University of Venezia
+"University of Vermont, 33 Colchester Avenue, Burlington"
+University of Verona
+"University of Verona, Verona, Italy"
+University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento
+"University of Victoria, Victoria, Canada"
+"University of Vienna, Austria"
+"University of Vigo, Spain"
+University of Virginia
+"University of Virginia, Charlottesville, VA"
+"University of Waikato, Hamilton, New Zealand"
+University of Warwick
+University of Washington
+University of Washington 4The Allen Institute for AI
+University of Washington and Google Inc
+"University of Washington, Bothell"
+"University of Washington, Bothell, USA"
+"University of Washington, Seattle"
+"University of Washington, Seattle, USA"
+"University of Washington, Seattle, WA 98195, United States"
+"University of Washington, Seattle, WA, USA"
+University of Waterloo
+"University of Waterloo, ON, Canada"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"University of Waterloo, Waterloo, ON N2L 3G1, Canada"
+University of West Bohemia
+University of Western Australia
+University of Wisconsin Madison
+"University of Wisconsin Madison, USA"
+University of Wisconsin Madison
+University of Wisconsin-Madison
+"University of Wisconsin-Madison, Madison, WI, USA"
+University of Witwatersrand
+University of Wollongong
+University of Wollongong. For further information contact the UOW
+University of York
+"University of York, Heslington, York YO10 5GH, UK"
+"University of York, UK"
+"University of York, York, UK"
+"University of York, York, United Kingdom"
+"University of Zagreb, Faculty of Electrical Engineering and Computing"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"University of Zagreb, Unska 3, 10 000 Zagreb"
+"University of Zaragoza, Spain"
+"University of Zurich, Zurich, Switzerland"
+"University of at Urbana-Champaign, Illinois, USA"
+"University of telecommunications and post, Sofia, Bulgaria"
+"University of the Basque Country, San Sebastian, Spain"
+"University of the South Paci c, Fiji"
+University of the Western Cape
+University of the Witwatersrand
+"University, China"
+"University, Chitorgarh. (INDIA"
+"University, Guangzhou, China"
+"University, Hong Kong"
+"University, Japan"
+"University, Shanghai, China"
+"University, Singapore"
+"University, Taiwan, R.O.C"
+"University, USA"
+"University, Varanasi, 221005, India"
+"University, Xi an Shaanxi Province, Xi an 710049, China"
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+UniversityofMaryland
+"UniversityofMaryland, CollegePark, MD"
+Ural Federal University (UrFU
+Usman Institute of Technology
+"Utah State University, Logan UT"
+"Utah State University, Logan, UT 84322-4205, USA"
+"Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands"
+"Utrecht University, Buys Ballotgebouw, Princetonplein 5, Utrecht, 3584CC, Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+VEER SURENDRA SAI UNIVERSITY OF
+"VHNSN College, Virudhunagar, ANJA College"
+VICTORIA UNIVERSITY OF WELLINGTON
+"VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain"
+"VISLab, EBUII-216, University of California Riverside"
+"VSB Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic"
+"VSI Lab, Goethe University, Frankfurt, Germany"
+"VU University Amsterdam, Computational Lexicology and Terminology Lab, De"
+Vector Institute
+Vector Institute for Arti cial Intelligence
+"Vel Tech High Tech Dr Rangarajan Dr Sakunthala Engineering College, Avadi, Chennai, India"
+"VelTech Dr. R.R. and Dr. S.R. Technical University, Chennai"
+VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College
+"Vickram College of Engineering, Enathi, Tamil Nadu, India"
+Victoria University
+Victoria University of Wellington
+"Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand"
+Vienna University of Technology
+Vietnam National University Ho Chi
+Vietnam National University of Agriculture
+Villanova University
+Virginia Polytechnic Institute and State University
+"Virginia Polytechnic Institute and State University, Blacksburg"
+"Virginia Polytechnic Institute and State University, Blacksburg, Virginia"
+Virginia Tech Carilion Research Institute
+"Virudhunagar Hindu Nadars Senthikumara Nadar College, Virudhunagar"
+"Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal"
+"Vision Science Group, University of California"
+"Vision Systems, Inc"
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+"Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT"
+"Vision and Security Technology Lab, University of Colorado Colorado Springs"
+"Vision and Security Technology Lab, University of Colorado at Colorado Springs, Colorado"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Visual Analysis of People Lab, Aalborg University, Denmark"
+"Visual Computing Center, King Abdullah University of Science and Technology (KAUST"
+"Visual Computing Institute, RWTH Aachen University"
+"Visual Computing and Communications Lab, Arizona State University"
+"Visual Geometry Group, University of Oxford"
+"Visual Geometry Group, University of Oxford, Oxford UK"
+"Visual Geometry Group, University of Oxford, UK"
+"Visualization and Computer Vision Lab, GE Global Research Center"
+"Viswajyothi College of Engineering and Technology Kerala, India"
+Vrije Universiteit Brussel
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium"
+Warsaw University of Technology
+"Warsaw University of Technology, Poland"
+Waseda University
+"Waseda University, Tokyo, Japan"
+Washington University in St. Louis
+"Washington University, St. Louis, MO, USA"
+Wayne State University
+"Wayne State University, Detroit, MI 48202, USA"
+"We thank the support of New York State through the Goergen Institute for Data Science, our corporate research sponsors"
+Webster University
+Weizmann Institute of Science
+"Weizmann Institute of Science, Rehovot, Israel"
+"Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of"
+"Wenzhou University, China"
+"Wenzhou University, Wenzhou, China"
+West Virginia University
+"West Virginia University, Morgantown"
+"West Virginia University, Morgantown WV 26506, USA"
+"West Virginia University, Morgantown, USA"
+"West Virginia University, Morgantown, WV"
+"West Virginia University, Morgantown, WV 26506, USA"
+"West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi"
+Western Kentucky University
+"Western Sydney University, Parramatta, NSW 2150, Australia"
+"While visual features in single frames are vague and limited, multi-frame information, including deformation and pose"
+William Marsh Rice University
+Wittenberg University
+"Wittenberg University, and Dr. Michael Anes, Wittenberg University"
+Wolfson College
+Wuhan University
+"Wuhan University, Tencent AI Lab, National University of Singapore, University of Rochester"
+Xerox Research Center
+Xerox Research Center India
+Xerox Research Center Webster
+Xi an Jiaotong University
+"Xi an Jiaotong University, China"
+"Xi an Jiaotong University, Xi an, Shannxi 710049, P.R.China"
+"Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences"
+Xiamen University
+"Xiamen University, Xiamen, China"
+"Xiamen University, Xiamen, Fujian, China"
+Xidian University
+Xidian University 2Xi an Jiaotong University 3Microsoft Research Asia
+"Xidian University, Xi an, China"
+"Xidian University, Xi an, China, 4 University of Pittsburgh, PA, USA"
+"Y ld z Teknik University, Istanbul, TR"
+"Y. Li, University of Maryland"
+"YUHANG ZHAO, Information Science, Cornell Tech, Cornell University"
+Yale University
+"Yarmouk University, Jordan"
+Yaroslavl State University
+Yeshiva University
+Yeungnam University
+Yonsei University
+York University
+"York University, Toronto"
+"York University, Toronto, Canada"
+"York University, Toronto, ON, Canada"
+"Young Researchers and Elite Club, Mashhad Branch, Islamic Azad University, Mashhad, Iran"
+"ZHAW Datalab, Zurich University of Applied Sciences"
+Zaragoza University
+"Zhejang University, Hangzhou 310027, P.R.China"
+"Zhejiang Normal University, Jinhua, China"
+Zhejiang University
+Zhejiang University of Technology
+"Zhejiang University, 2Southwest Jiaotong University, 3Carnegie Mellon University"
+"Zhejiang University, China"
+"Zhejiang University, Hangzhou, China"
+"Zhengzhou University, Zhengzhou, Henan 450052, China"
+"Zurich University of Applied Sciences, School of Engineering"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"a School of Computer and Information Technology, Beijing Jiaotong University, Beijing"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"a The Robotics Institute, Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+a The University of Nottingham Malaysia Campus
+"aCenter for Combinatorics, Nankai University, Tianjin 300071, China"
+"aCenter for Spatial Information Science, University of Tokyo, Kashiwa 277-8568, Japan"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"aCollege of Computer Science at Chongqing University, 400044, Chongqing, P.R.C"
+"aDivision of Biology and Biological Engineering 156-29, Howard Hughes Medical Institute, California Institute of Technology, Pasadena, CA"
+"aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia"
+"aIBM China Research Lab, Beijing, China"
+"aImperial College London, London, UK"
+aInformation Sciences Institute
+"aLawrence Technological University, 21000 W Ten Mile Rd., South eld, MI 48075, United States"
+"aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA"
+"aNo. 238 Songling Road, Ocean University of"
+"aPattern Recognition Laboratory, Delft University of Technology"
+"aResearch Scholar, Anna University, Chennai, Inida"
+"aSchool of Computing and Mathematics, Charles Sturt University, Bathurst, NSW"
+"aSchool of Electronic Information and Mechanics, China University of Geosciences, Wuhan, Hubei 430074, China"
+"aSchool of Engineering and Technology, University of Hertfordshire, Hat eld AL10 9AB, UK"
+"aSchool of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China"
+"aSchool of Technology, University of Campinas"
+"aTurgut Ozal University, Ankara Turkey"
+"abroad, or from public or private research centers"
+"additional details of DCS descriptors, including visualization. For extending the evaluation"
+"and 2Center for Cognitive Neuroscience, Duke University, Durham, North Carolina 27708"
+"and Control, Hungarian Academy of Sciences, Budapest, Hungary, Chuo University"
+"and Engineering, Beihang University, Beijing, China"
+"and IBUG [32]. All of them cover large variations, including different"
+"and Mathematical Biosciences Institute, The Ohio State University"
+"and Media Informatics, Budapest University of Technology and Economics, Budapest, Hungary"
+"and Modeling, Rutgers University"
+"and Southeast University, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+"and education use, including for instruction at the authors institution"
+"and especially light angle, drastically change the appearance of a face [1]. Facial expressions, including"
+"and quantify distinct social behaviors, including those involving"
+and the institute of engineering and science
+any other University
+"applications has different requirements, including: processing time (off-line, on-line, or real-time"
+"applications, including texture classification [16], face recognition [12], object detection [10], and"
+at The Australian National University
+at West Virginia University
+at the Delft University of Technology
+at the University of Central Florida
+"atry, University of Pennsylvania School of Medicine, Philadelphia, PA"
+"b Brain Behavior Center, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney"
+"b Computer Technology Institute, Beijing Union University, 100101, China"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+b Institute for Robotics and Intelligent Systems
+"b Machine Perception Laboratory, Institute of Neural Computation, University of California, San Diego, United States"
+"b Research Institute, Watchdata Inc., Beijing, China"
+"b School of Applied Mathematics, Xidian University, Xi an, China"
+"b School of Business, Reykjavik University, Reykjavik, Iceland"
+"b The Interdisciplinary Center for Research on Emotions, University of"
+"bCVSSP, University of Surrey, Guildford, GU2 7XH, UK"
+"bCenter for Applied Mathematics, Tianjin University, Tianjin 300072, China"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+"bFaculty of Computers and Information, Assiut University, Egypt"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"bMax Planck Institute for Informatics, Germany"
+"bRobotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, U.S.A"
+"bSchool of Automation, China University of Geosciences, Wuhan, Hubei 430074, China"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"bSchool of Computer and Control Engineering, University of Chinese Academy of Sciences"
+"bSchool of Computer and Information Engineering, Xiamen University of Technology, Xiamen 361024, China"
+"bTsinghua University, Beijing, China"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+bourne University
+by grants from the National Institute of Mental Health (MH 15279 and MH067976 (K. Schmidt
+"c Cardiff Business School, Cardiff University, Cardiff, United Kingdom"
+"c Rotman Research Institute, Baycrest Centre for Geriatric Care, Toronto, Ont. M6A 2E1, Canada"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"c School of Computational Science, Florida State University, Tallahassee, FL 32306, USA"
+c(cid:13) Carnegie Mellon University
+c(cid:13) Massachusetts Institute of Technology 2006. All rights reserved
+c(cid:13)The Chinese University of Hong Kong
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting"
+"cCentre of Intelligent Machines, McGill University, Montr eal, QC H3A 0E9, Canada"
+"cFaculty of Electrical Engineering, Mathematics, and Computer Science, University of Twente, The Netherlands"
+"cFaculty of Information Engineering, China University of Geosciences (Wuhan), Wuhan 430074, China"
+"cHuizhou School Affiliated to Beijing Normal University, Huizhou 516002, China"
+"cSchool of Astronautics at Beihang University, 100191, Beijing, P.R.C"
+"cSchool of Computer Science, The University of Adelaide, Adelaide, SA 5005, Australia"
+cThe Open University
+"chael G. Foster School of Business, University of Washington, Seattle"
+cid:1) Honda Research Institute
+cid:1)Institute for Neural Computation
+cid:107)Chongqing University of Posts and Telecommunications
+"cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA"
+"cid:2) Imperial College London, United Kingdom"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+"cid:2)Imperial College London, U.K"
+"cid:3) School of Software, Tsinghua University"
+cid:3)The Salk Institute and Howard Hughes Medical Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+"cid:63) Imperial College London, UK"
+"cid:63)Queen Mary University of London, Imperial College London"
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+cid:63)Stanford University
+"cid:63)The Robotics Institute, Carnegie Mellon University"
+"cid:63)University of Science and Technology of China, Hefei, Anhui, China"
+"cid:92)School of Information Technologies, University of Sydney, Australia"
+"cid:93) Faculty of Science and Technology, University of Macau"
+"cid:93)Peking University Shenzhen Graduate School, Shenzhen, P.R.China"
+"cid:93)School of Electronic Engineering, Xidian University, China"
+cid:93)University of North Carolina at Charlotte
+college of Engineering
+comparisons with 12 instance-based classi ers on 13 benchmark University of California Irvine
+"con icting sensory information, i.e., incongruent facial muscle activity, this might impede"
+"do, Rep. of Korea, Kyung Hee University, Suwon, Rep. of Korea"
+"e ects of di erence factors, including age group, age gap"
+"e.g. PhD, MPhil, DClinPsychol) at the University of Edinburgh. Please note the following"
+eBay Research Labs
+"engineering, Government College of Engineering Kannur, Kerala, India"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"face processing, including age (Berry, 1990), sex (Hill"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"ganization, University of Southern California, Hoffman Hall 515, Los"
+"gelmeyer et al., 1996); and, increasingly, its role in reactions to"
+"general term, including collaboration. Interaction determines action on someone"
+"gies (Bughin et al. 2017). A range of other sectors, includ"
+"he University of Hong Kong, Pokfulam"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium"
+"image being generated by the model, include Active Appearance"
+in The University of Michigan
+"in signed languages, including American Sign Language (ASL). Gestures such"
+in the College of Engineering and Computer Science
+in the Graduate School of Duke University
+"inclusion in Senior Theses by an authorized administrator of Lake Forest College Publications. For more information, please contact"
+"instance has been detected (e.g., a face), it is be possible to obtain further information, including: (i"
+"is demonstrated using a variety of graphics applications, including cross"
+"learning. As a result of this research, many applications, including video surveillance systems"
+massachusetts institute of technology artificial intelligence laboratory
+"media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or"
+"ment of Psychology, University of California, Berkeley"
+ment. Oxford University Press Series in Affective Science. New York: Oxford
+"methods, including graph matching, optical- ow-based"
+"mpg.de, Max Planck Institute for Informatics"
+"obtained for all other uses, in any current or future media, including reprinting/republishing"
+"ods, including sensitivity to initialization, limited effectiveness in"
+of Cornell University
+"of Engineering and Information Technology, University of Technology, Sydney, Australia"
+"of Maryland, College Park, MD 20742, USA"
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"of Psychology, University of Michigan, Ann Arbor, MI, United States, University of Michigan, Ann"
+of Saarland University
+"of Science, Tilburg University"
+"of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science"
+of bilkent university
+of the University of Notre Dame
+"other uses, in any current or future media, including reprinting/republishing this material for"
+"our analysis to stereotypes beyond gender, including those"
+"pelling applications, including cognitive assistance [29], life"
+"ples of such ne-grained descriptions, including attributes covering detailed"
+"point, lighting, and appearance. Many applications, including video surveillance systems"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"recognition, such as human computer interfaces and e-services, including e-home"
+"subsection a table summarizing the key features of the database is provided, including (where available) the number of"
+"t2i Lab, Chalmers University of Technology, Gothenburg, Sweden"
+the Chinese University of Hong Kong
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+"the Diploma of Imperial College London. This thesis is entirely my own work, and, except"
+"the Indian Institute of Technology, Bombay and Monash University, Australia"
+"the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam"
+"the face, including negative affect and distress, dates"
+"those who possess it, including the ability to act based on one s"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"tional functions, including the effective assessment of"
+to Michigan State University
+"to process in all the illumination conditions, including total"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"university, Shiraz, Iran"
+"uses, in any current or future media, including"
+"versity of Amsterdam, Amsterdam and University of Trento"
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+"weighing of different types of information, including expected"
+y National Institute of Advanced Industrial Science and Technology
+yAristotle University of Thessaloniki
+yThe University of Tokyo
+years. According to the definition by the National Institute
diff --git a/scraper/reports/institutions.html b/scraper/reports/institutions.html
new file mode 100644
index 00000000..da79f6e5
--- /dev/null
+++ b/scraper/reports/institutions.html
@@ -0,0 +1 @@
+<!doctype html><html><head><title>Institutions</title><link rel='stylesheet' href='reports.css'></head><body><h2>Institutions</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>9f6d04ce617d24c8001a9a31f11a594bd6fe3510</td><td>1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R</td><td>Department of Psychiatry</td></tr><tr><td>63488398f397b55552f484409b86d812dacde99a</td><td>2 School of Computing, National University of Singapore</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e</td><td>2015 Wiley Periodicals, Inc</td><td></td></tr><tr><td>83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e</td><td>2015 Wiley Periodicals, Inc</td><td></td></tr><tr><td>01cc8a712e67384f9ef9f30580b7415bfd71e980</td><td>2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada</td><td>Department of Psychology</td></tr><tr><td>c7f752eea91bf5495a4f6e6a67f14800ec246d08</td><td>A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER</td><td></td></tr><tr><td>3d68cedd80babfbb04ab197a0b69054e3c196cd9</td><td>A Thesis submitted to McGill University in partial fulfillment of the requirements for the</td><td></td></tr><tr><td>25337690fed69033ef1ce6944e5b78c4f06ffb81</td><td>A dissertation submitted to the Faculty of the University of Delaware in partial</td><td></td></tr><tr><td>c32f04ccde4f11f8717189f056209eb091075254</td><td>A dissertation submitted to the University of Bristol in accordance with the requirements</td><td></td></tr><tr><td>e0244a8356b57a5721c101ead351924bcfb2eef4</td><td>A. van Kleef, University of Amsterdam</td><td>Department of Social Psychology</td></tr><tr><td>e82360682c4da11f136f3fccb73a31d7fd195694</td><td>AALTO UNIVERSITY</td><td></td></tr><tr><td>33402ee078a61c7d019b1543bb11cc127c2462d2</td><td>ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam</td><td></td></tr><tr><td>0559fb9f5e8627fecc026c8ee6f7ad30e54ee929</td><td>ADSIP Research Centre, University of Central Lancashire</td><td></td></tr><tr><td>ddf55fc9cf57dabf4eccbf9daab52108df5b69aa</td><td>ADSIP Research Centre, University of Central Lancashire</td><td></td></tr><tr><td>0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58</td><td>AI Institute</td><td></td></tr><tr><td>3661a34f302883c759b9fa2ce03de0c7173d2bb2</td><td>AI Institute</td><td></td></tr><tr><td>e90e12e77cab78ba8f8f657db2bf4ae3dabd5166</td><td>AI Institute</td><td></td></tr><tr><td>361c9ba853c7d69058ddc0f32cdbe94fbc2166d5</td><td>ALICE Institute</td><td></td></tr><tr><td>8efda5708bbcf658d4f567e3866e3549fe045bbb</td><td>ALICE Institute</td><td></td></tr><tr><td>76b11c281ac47fe6d95e124673a408ee9eb568e3</td><td>ALPHA COLLEGE OF ENGINEERING, CHENNAI</td><td></td></tr><tr><td>1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c</td><td>ARISTOTLE UNIVERSITY OF THESSALONIKI</td><td></td></tr><tr><td>8aae23847e1beb4a6d51881750ce36822ca7ed0b</td><td>ATR Human Information Processing Research Laboratories</td><td></td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>ATR Human Information Processing Research Laboratory</td><td></td></tr><tr><td>8aae23847e1beb4a6d51881750ce36822ca7ed0b</td><td>ATR Interpreting Telecommunications Research Laboratories</td><td></td></tr><tr><td>7ef44b7c2b5533d00001ae81f9293bdb592f1146</td><td>Aalborg University, Denmark</td><td></td></tr><tr><td>087002ab569e35432cdeb8e63b2c94f1abc53ea9</td><td>Aalborg University, Denmark</td><td>Department of Communication and Psychology</td></tr><tr><td>f6cf2108ec9d0f59124454d88045173aa328bd2e</td><td>Aalen University, Germany</td><td></td></tr><tr><td>08d41d2f68a2bf0091dc373573ca379de9b16385</td><td>Aalto University, Espoo, Finland</td><td>Department of Computer Science</td></tr><tr><td>8cc07ae9510854ec6e79190cc150f9f1fe98a238</td><td>Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark</td><td>Department of Engineering</td></tr><tr><td>032825000c03b8ab4c207e1af4daeb1f225eb025</td><td>Abdul WaliKhan University, Mardan, KPK, Pakistan</td><td>Department of Computer Science</td></tr><tr><td>9264b390aa00521f9bd01095ba0ba4b42bf84d7e</td><td>Aberystwyth University, UK</td><td></td></tr><tr><td>d9ef1a80738bbdd35655c320761f95ee609b8f49</td><td>Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India</td><td></td></tr><tr><td>3d143cfab13ecd9c485f19d988242e7240660c86</td><td>Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan</td><td></td></tr><tr><td>16bce9f940bb01aa5ec961892cc021d4664eb9e4</td><td>Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of</td><td></td></tr><tr><td>458677de7910a5455283a2be99f776a834449f61</td><td>Achariya college of Engineering Technology</td><td></td></tr><tr><td>078d507703fc0ac4bf8ca758be101e75ea286c80</td><td>Acharya Institute Of Technology</td><td></td></tr><tr><td>4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6</td><td>Address correspondence to: Karen L. Schmidt, University of</td><td>Department of Psychiatry</td></tr><tr><td>1134a6be0f469ff2c8caab266bbdacf482f32179</td><td>Aditya College of Engineering, Surampalem, East Godavari</td><td>Department of Computer Science and Engineering</td></tr><tr><td>0861f86fb65aa915fbfbe918b28aabf31ffba364</td><td>Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh</td><td>Department of CSE</td></tr><tr><td>68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5</td><td>Aditya institute of Technology and Management, Tekkalli-532 201, A.P</td><td></td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>Adobe Systems, Inc., 345 Park Ave, San Jose, CA</td><td></td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore</td><td></td></tr><tr><td>34ce703b7e79e3072eed7f92239a4c08517b0c55</td><td>Advanced Digital Sciences Center, University of Illinois at Urbana-Champaign, Singapore</td><td></td></tr><tr><td>16bce9f940bb01aa5ec961892cc021d4664eb9e4</td><td>Advanced Engineering, The Chinese University of Hong Kong</td><td></td></tr><tr><td>2cc4ae2e864321cdab13c90144d4810464b24275</td><td>Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul</td><td></td></tr><tr><td>5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6</td><td>Affiliated to Anna university, Chennai</td><td></td></tr><tr><td>beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2</td><td>Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India</td><td></td></tr><tr><td>68d40176e878ebffbc01ffb0556e8cb2756dd9e9</td><td>AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of</td><td></td></tr><tr><td>eeb6d084f9906c53ec8da8c34583105ab5ab8284</td><td>Akita Prefectural University</td><td></td></tr><tr><td>37ef18d71c1ca71c0a33fc625ef439391926bfbb</td><td>Akita Prefectural University, Yurihonjo, Japan</td><td></td></tr><tr><td>eeb6d084f9906c53ec8da8c34583105ab5ab8284</td><td>Akita University</td><td></td></tr><tr><td>37ef18d71c1ca71c0a33fc625ef439391926bfbb</td><td>Akita University, Akita, Japan</td><td></td></tr><tr><td>718d3137adba9e3078fa1f698020b666449f3336</td><td>Al-Khwarizmi Institute of Computer Science</td><td></td></tr><tr><td>23aef683f60cb8af239b0906c45d11dac352fb4e</td><td>Alan W Black (Carnegie Mellon University</td><td></td></tr><tr><td>23aef683f60cb8af239b0906c45d11dac352fb4e</td><td>Alex Waibel (Carnegie Mellon University</td><td></td></tr><tr><td>6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2</td><td>Alexandria University, Alexandria, Egypt</td><td></td></tr><tr><td>9a4c45e5c6e4f616771a7325629d167a38508691</td><td>Alexandria University, Alexandria, Egypt</td><td>Electrical Engineering Department</td></tr><tr><td>bd0201b32e7eca7818468f2b5cb1fb4374de75b9</td><td>Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest</td><td></td></tr><tr><td>f08e425c2fce277aedb51d93757839900d591008</td><td>Allen Institute for Arti cial Intelligence</td><td></td></tr><tr><td>fa90b825346a51562d42f6b59a343b98ea2e501a</td><td>Allen Institute for Arti cial Intelligence</td><td></td></tr><tr><td>057d5f66a873ec80f8ae2603f937b671030035e6</td><td>Allen Institute for Arti cial Intelligence (AI</td><td></td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>Allen Institute for Arti cial Intelligence (AI2), Seattle, WA</td><td></td></tr><tr><td>02239ae5e922075a354169f75f684cad8fdfd5ab</td><td>Allen Institute for Arti cial Intelligence (AI2), Seattle, WA</td><td></td></tr><tr><td>38f7f3c72e582e116f6f079ec9ae738894785b96</td><td>Amal Jyothi College of Engineering, Kanjirappally, India</td><td>Department of CSE</td></tr><tr><td>009a18d04a5e3ec23f8ffcfc940402fd8ec9488f</td><td>Amazon, Inc</td><td></td></tr><tr><td>e0244a8356b57a5721c101ead351924bcfb2eef4</td><td>American University</td><td></td></tr><tr><td>4b7c110987c1d89109355b04f8597ce427a7cd72</td><td>American University, Washington, DC, USA</td><td>Department of Psychology and Center for Behavioral Neuroscience</td></tr><tr><td>00075519a794ea546b2ca3ca105e2f65e2f5f471</td><td>Amherst College</td><td></td></tr><tr><td>841bf196ee0086c805bd5d1d0bddfadc87e424ec</td><td>Amirkabir University of Technology</td><td></td></tr><tr><td>2d79d338c114ece1d97cde1aa06ab4cf17d38254</td><td>Amirkabir University of Technology, University of Southern California</td><td></td></tr><tr><td>0ce8a45a77e797e9d52604c29f4c1e227f604080</td><td>Amirkabir University of Technology, Tehran</td><td>Department of Electrical Engineering</td></tr><tr><td>e73b9b16adcf4339ff4d6723e61502489c50c2d9</td><td>Amirkabir University of Technology, Tehran</td><td>Department of Electrical Engineering</td></tr><tr><td>ceb763d6657a07b47e48e8a2956bcfdf2cf10818</td><td>Amirkabir University of Technology, Tehran</td><td>Department of Electrical Engineering</td></tr><tr><td>ae2c71080b0e17dee4e5a019d87585f2987f0508</td><td>Amirkabir University of Technology, Tehran, Iran</td><td>Department of Biomedical Engineering</td></tr><tr><td>ae2c71080b0e17dee4e5a019d87585f2987f0508</td><td>Amirkabir University of Technology, Tehran, Iran</td><td>Address: Department of Biomedical Engineering</td></tr><tr><td>488d3e32d046232680cc0ba80ce3879f92f35cac</td><td>Amirkabir University of Technology, Tehran. Iran</td><td>Department of Electrical Engineering</td></tr><tr><td>488d3e32d046232680cc0ba80ce3879f92f35cac</td><td>Amirkabir University of Technology, Tehran. Iran</td><td>Department of Electrical Engineering</td></tr><tr><td>33548531f9ed2ce6f87b3a1caad122c97f1fd2e9</td><td>Amity University</td><td></td></tr><tr><td>33548531f9ed2ce6f87b3a1caad122c97f1fd2e9</td><td>Amity University</td><td></td></tr><tr><td>33548531f9ed2ce6f87b3a1caad122c97f1fd2e9</td><td>Amity University</td><td></td></tr><tr><td>23fd653b094c7e4591a95506416a72aeb50a32b5</td><td>Amity University, Lucknow, India</td><td></td></tr><tr><td>23fd653b094c7e4591a95506416a72aeb50a32b5</td><td>Amity University, Lucknow, India</td><td></td></tr><tr><td>312b2566e315dd6e65bd42cfcbe4d919159de8a1</td><td>Amity University, Noida, India</td><td></td></tr><tr><td>44fbbaea6271e47ace47c27701ed05e15da8f7cf</td><td>Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam</td><td></td></tr><tr><td>4157e45f616233a0874f54a59c3df001b9646cd7</td><td>Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome</td><td></td></tr><tr><td>af62621816fbbe7582a7d237ebae1a4d68fcf97d</td><td>AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of</td><td></td></tr><tr><td>27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba</td><td>Anjuman College of Engineering and Technology, Sadar, Nagpur, India</td><td></td></tr><tr><td>2e1415a814ae9abace5550e4893e13bd988c7ba1</td><td>Anna University</td><td></td></tr><tr><td>3fde656343d3fd4223e08e0bc835552bff4bda40</td><td>Anna University Chennai, India</td><td>Department of Computer Science and Engineering</td></tr><tr><td>f69de2b6770f0a8de6d3ec1a65cb7996b3c99317</td><td>Anna University, Chennai</td><td></td></tr><tr><td>499343a2fd9421dca608d206e25e53be84489f44</td><td>Annamacharya Institute of Technology and Sciences, Tirupati, India</td><td>Department of ECE</td></tr><tr><td>a57ee5a8fb7618004dd1def8e14ef97aadaaeef5</td><td>Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland</td><td></td></tr><tr><td>0dbacb4fd069462841ebb26e1454b4d147cd8e98</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>3cc46bf79fb9225cf308815c7d41c8dd5625cc29</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>0be2245b2b016de1dcce75ffb3371a5e4b1e731b</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>b3658514a0729694d86a8b89c875a66cde20480c</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>b43b6551ecc556557b63edb8b0dc39901ed0343b</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>c00df53bd46f78ae925c5768d46080159d4ef87d</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>8f5ce25e6e1047e1bf5b782d045e1dac29ca747e</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>8f92cccacf2c84f5d69db3597a7c2670d93be781</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>09cf3f1764ab1029f3a7d57b70ae5d5954486d69</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>532f7ec8e0c8f7331417dd4a45dc2e8930874066</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>016f49a54b79ec787e701cc8c7d0280273f9b1ef</td><td>Aristotle University of Thessaloniki</td><td>Department of Informatics</td></tr><tr><td>52885fa403efbab5ef21274282edd98b9ca70cbf</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>52885fa403efbab5ef21274282edd98b9ca70cbf</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>d5afd7b76f1391321a1340a19ba63eec9e0f9833</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>d5afd7b76f1391321a1340a19ba63eec9e0f9833</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>c5f1ae9f46dc44624591db3d5e9f90a6a8391111</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>8c6b9c9c26ead75ce549a57c4fd0a12b46142848</td><td>Aristotle University of Thessaloniki</td><td></td></tr><tr><td>3e04feb0b6392f94554f6d18e24fadba1a28b65f</td><td>Aristotle University of Thessaloniki GR</td><td>Department of Informatics</td></tr><tr><td>131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1</td><td>Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece</td><td></td></tr><tr><td>a2eb90e334575d9b435c01de4f4bf42d2464effc</td><td>Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece</td><td></td></tr><tr><td>6c6bb85a08b0bdc50cf8f98408d790ccdb418798</td><td>Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece</td><td></td></tr><tr><td>ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7</td><td>Aristotle University of Thessaloniki, Greece</td><td></td></tr><tr><td>2a65d7d5336b377b7f5a98855767dd48fa516c0f</td><td>Aristotle University of Thessaloniki, Greece</td><td>ECE Department</td></tr><tr><td>2d9e58ea582e054e9d690afca8b6a554c3687ce6</td><td>Aristotle University of Thessaloniki, Greece</td><td>ECE Department</td></tr><tr><td>5b9d41e2985fa815c0f38a2563cca4311ce82954</td><td>Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece</td><td></td></tr><tr><td>e7b2b0538731adaacb2255235e0a07d5ccf09189</td><td>Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece</td><td></td></tr><tr><td>4896909796f9bd2f70a2cb24bf18daacd6a12128</td><td>Aristotle University of Thessaloniki, Thessaloniki, Greece</td><td></td></tr><tr><td>62374b9e0e814e672db75c2c00f0023f58ef442c</td><td>AristotleUniversityofThessaloniki</td><td>DepartmentofInformatics</td></tr><tr><td>5f6ab4543cc38f23d0339e3037a952df7bcf696b</td><td>Arizona State University</td><td></td></tr><tr><td>5f6ab4543cc38f23d0339e3037a952df7bcf696b</td><td>Arizona State University</td><td></td></tr><tr><td>5f6ab4543cc38f23d0339e3037a952df7bcf696b</td><td>Arizona State University</td><td></td></tr><tr><td>9f499948121abb47b31ca904030243e924585d5f</td><td>Arizona State University</td><td></td></tr><tr><td>9f499948121abb47b31ca904030243e924585d5f</td><td>Arizona State University</td><td></td></tr><tr><td>9f499948121abb47b31ca904030243e924585d5f</td><td>Arizona State University</td><td></td></tr><tr><td>06f39834e870278243dda826658319be2d5d8ded</td><td>Arizona State University</td><td></td></tr><tr><td>468c8f09d2ad8b558b65d11ec5ad49208c4da2f2</td><td>Arizona State University</td><td></td></tr><tr><td>468c8f09d2ad8b558b65d11ec5ad49208c4da2f2</td><td>Arizona State University</td><td></td></tr><tr><td>48fea82b247641c79e1994f4ac24cad6b6275972</td><td>Arizona State University</td><td></td></tr><tr><td>4b4ecc1cb7f048235605975ab37bb694d69f63e5</td><td>Arizona State University, AZ, USA</td><td></td></tr><tr><td>bd9c9729475ba7e3b255e24e7478a5acb393c8e9</td><td>Arizona State University, Phoenix, Arizona</td><td></td></tr><tr><td>ce56be1acffda599dec6cc2af2b35600488846c9</td><td>Arizona State University, Tempe AZ</td><td>Department of Computer Science</td></tr><tr><td>15d653972d176963ef0ad2cc582d3b35ca542673</td><td>Arizona State University, Tempe AZ</td><td></td></tr><tr><td>5b721f86f4a394f05350641e639a9d6cb2046c45</td><td>Army Research Laboratory</td><td></td></tr><tr><td>ea890846912f16a0f3a860fce289596a7dac575f</td><td>Aron Szekely, University of Oxford, UK</td><td></td></tr><tr><td>989332c5f1b22604d6bb1f78e606cb6b1f694e1a</td><td>Arti cial Intelligence Institute, China</td><td></td></tr><tr><td>45215e330a4251801877070c85c81f42c2da60fb</td><td>Arts Media and Engineering, Arizona State University</td><td></td></tr><tr><td>ed08ac6da6f8ead590b390b1d14e8a9b97370794</td><td>Arts, Commerce and Science College, Gangakhed, M.S, India</td><td>Department of C.S.</td></tr><tr><td>35e87e06cf19908855a16ede8c79a0d3d7687b5c</td><td>Arts, Science and Commerce College, Chopda</td><td></td></tr><tr><td>656aeb92e4f0e280576cbac57d4abbfe6f9439ea</td><td>Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia</td><td></td></tr><tr><td>a702fc36f0644a958c08de169b763b9927c175eb</td><td>Asia University, Taichung, Taiwan</td><td>Department of Applied Informatics and Multimedia</td></tr><tr><td>512befa10b9b704c9368c2fbffe0dc3efb1ba1bf</td><td>Asian Institute of Technology, Pathumthani, Thailand</td><td></td></tr><tr><td>3cd8ab6bb4b038454861a36d5396f4787a21cc68</td><td>Asian University, Taichung, Taiwan</td><td>Department of Applied Informatics and Multimedia</td></tr><tr><td>47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa</td><td>Assam University, Silchar-788011 Assam University, Silchar</td><td></td></tr><tr><td>50eb2ee977f0f53ab4b39edc4be6b760a2b05f96</td><td>Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq</td><td>Computer Science Department</td></tr><tr><td>9a4c45e5c6e4f616771a7325629d167a38508691</td><td>Assiut University, Assiut 71515, Egypt</td><td>Electrical Engineering Department</td></tr><tr><td>df054fa8ee6bb7d2a50909939d90ef417c73604c</td><td>Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany</td><td></td></tr><tr><td>182470fd0c18d0c5979dff75d089f1da176ceeeb</td><td>Augsburg University, Germany</td><td></td></tr><tr><td>11a2ef92b6238055cf3f6dcac0ff49b7b803aee3</td><td>Australian Centre for Visual Technologies, The University of Adelaide, Australia (b</td><td></td></tr><tr><td>8820d1d3fa73cde623662d92ecf2e3faf1e3f328</td><td>Australian Institute of Sport</td><td></td></tr><tr><td>0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad</td><td>Australian National University</td><td></td></tr><tr><td>9b684e2e2bb43862f69b12c6be94db0e7a756187</td><td>Australian National University</td><td></td></tr><tr><td>0573f3d2754df3a717368a6cbcd940e105d67f0b</td><td>Australian National University</td><td></td></tr><tr><td>0573f3d2754df3a717368a6cbcd940e105d67f0b</td><td>Australian National University</td><td></td></tr><tr><td>0573f3d2754df3a717368a6cbcd940e105d67f0b</td><td>Australian National University</td><td></td></tr><tr><td>060034b59275c13746413ca9c67d6304cba50da6</td><td>Australian National University</td><td></td></tr><tr><td>a7191958e806fce2505a057196ccb01ea763b6ea</td><td>Australian National University</td><td></td></tr><tr><td>fffa2943808509fdbd2fc817cc5366752e57664a</td><td>Australian National University</td><td></td></tr><tr><td>c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1</td><td>Australian National University</td><td></td></tr><tr><td>33695e0779e67c7722449e9a3e2e55fde64cfd99</td><td>Australian National University and NICTA</td><td></td></tr><tr><td>306127c3197eb5544ab1e1bf8279a01e0df26120</td><td>Australian National University and NICTA, Australia</td><td></td></tr><tr><td>b1df214e0f1c5065f53054195cd15012e660490a</td><td>Australian National University and NICTA, Australia</td><td></td></tr><tr><td>062d0813815c2b9864cd9bb4f5a1dc2c580e0d90</td><td>Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO</td><td></td></tr><tr><td>1dc241ee162db246882f366644171c11f7aed96d</td><td>Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL</td><td></td></tr><tr><td>0641dbee7202d07b6c78a39eecd312c17607412e</td><td>Australian National University, Canberra</td><td></td></tr><tr><td>87309bdb2b9d1fb8916303e3866eca6e3452c27d</td><td>Australian National University, Canberra, ACT 0200, Australia</td><td></td></tr><tr><td>8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f</td><td>Australian National University, Canberra, Australia</td><td></td></tr><tr><td>b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3</td><td>Australian National University, and NICTA</td><td></td></tr><tr><td>b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3</td><td>Australian National University, and NICTA</td><td></td></tr><tr><td>682760f2f767fb47e1e2ca35db3becbb6153756f</td><td>Author s addresses: X. Peng, University of Rochester; L. Chi</td><td>Data Science Department</td></tr><tr><td>16bce9f940bb01aa5ec961892cc021d4664eb9e4</td><td>Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy</td><td></td></tr><tr><td>d671a210990f67eba9b2d3dda8c2cb91575b4a7a</td><td>Autonomous University of Barcelona</td><td></td></tr><tr><td>4439746eeb7c7328beba3f3ef47dc67fbb52bcb3</td><td>Azad University of Qazvin</td><td></td></tr><tr><td>e73b9b16adcf4339ff4d6723e61502489c50c2d9</td><td>Azad University, Qazvin, Iran</td><td></td></tr><tr><td>632441c9324cd29489cee3da773a9064a46ae26b</td><td>B. Eng., Zhejiang University</td><td></td></tr><tr><td>00dc942f23f2d52ab8c8b76b6016d9deed8c468d</td><td>B. S. Rochester Institute of Technology</td><td></td></tr><tr><td>13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a</td><td>B. Tech., Indian Institute of Technology Jodhpur</td><td></td></tr><tr><td>87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5</td><td>B.A. Earlham College, Richmond Indiana</td><td></td></tr><tr><td>2bbbbe1873ad2800954058c749a00f30fe61ab17</td><td>B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India</td><td>Department of CSE</td></tr><tr><td>1e5ca4183929929a4e6f09b1e1d54823b8217b8e</td><td>B.Eng., Nankai University</td><td></td></tr><tr><td>348a16b10d140861ece327886b85d96cce95711e</td><td>B.S. (Cornell University</td><td></td></tr><tr><td>ff5dd6f96e108d8233220cc262bc282229c1a582</td><td>B.S. Abdur Rahman University, Chennai-48, India</td><td>Department of Information Technology</td></tr><tr><td>ff5dd6f96e108d8233220cc262bc282229c1a582</td><td>B.S. Abdur Rahman University, Chennai-48, India</td><td>Department of Computer Science and Engineering</td></tr><tr><td>d082f35534932dfa1b034499fc603f299645862d</td><td>B.S. University of Central Florida</td><td></td></tr><tr><td>580e48d3e7fe1ae0ceed2137976139852b1755df</td><td>B.S. University of Indonesia</td><td></td></tr><tr><td>80135ed7e34ac1dcc7f858f880edc699a920bf53</td><td>B.S., Computer Engineering, Bo gazi ci University</td><td></td></tr><tr><td>d231a81b38fde73bdbf13cfec57d6652f8546c3c</td><td>B.S., E.E., Bo azi i University</td><td></td></tr><tr><td>eed7920682789a9afd0de4efd726cd9a706940c8</td><td>B.S., Pennsylvania State University</td><td></td></tr><tr><td>5e0e516226413ea1e973f1a24e2fdedde98e7ec0</td><td>B.S./M.S. Brandeis University</td><td></td></tr><tr><td>287795991fad3c61d6058352879c7d7ae1fdd2b6</td><td>B.S.Abdur Rahman University B.S.Abdur Rahman University</td><td></td></tr><tr><td>db1f48a7e11174d4a724a4edb3a0f1571d649670</td><td>B.Sc., University of Science and Technology of China</td><td></td></tr><tr><td>363ca0a3f908859b1b55c2ff77cc900957653748</td><td>B.Tech (C.S.E), Bharath University, Chennai</td><td></td></tr><tr><td>363ca0a3f908859b1b55c2ff77cc900957653748</td><td>B.Tech (C.S.E), Bharath University, Chennai</td><td></td></tr><tr><td>eb70c38a350d13ea6b54dc9ebae0b64171d813c9</td><td>B.Tech., Electronics Engineering, Institute of Technology, Banaras Hindu University</td><td></td></tr><tr><td>4abaebe5137d40c9fcb72711cdefdf13d9fc3e62</td><td>BECS, Aalto University School of Science and Technology, Finland</td><td></td></tr><tr><td>3e3a87eb24628ab075a3d2bde3abfd185591aa4c</td><td>BECS, Aalto University, Helsinki, Finland</td><td></td></tr><tr><td>60a006bdfe5b8bf3243404fae8a5f4a9d58fa892</td><td>BRIC, University of North Carolina at Chapel Hill, NC 27599, USA</td><td></td></tr><tr><td>5f676d6eca4c72d1a3f3acf5a4081c29140650fb</td><td>BRIC, University of North Carolina at Chapel Hill, NC 27599, USA</td><td></td></tr><tr><td>76e2d7621019bd45a5851740bd2742afdcf62837</td><td>Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca</td><td>Computer Science Department</td></tr><tr><td>032825000c03b8ab4c207e1af4daeb1f225eb025</td><td>Bacha Khan University, Charsadda, KPK, Pakistan</td><td>Department of Computer Science</td></tr><tr><td>ec90d333588421764dff55658a73bbd3ea3016d2</td><td>Bacha Khan University, Charsadda, KPK, Pakistan</td><td>Department of Computer Science</td></tr><tr><td>6d618657fa5a584d805b562302fe1090957194ba</td><td>Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria</td><td></td></tr><tr><td>c7de0c85432ad17a284b5b97c4f36c23f506d9d1</td><td>Bahc es ehir University, Istanbul, Turkey</td><td>Department of Electrical and Electronics Engineering</td></tr><tr><td>9dcc6dde8d9f132577290d92a1e76b5decc6d755</td><td>Bahcesehir University</td><td>Department of Electrical and Electronics Eng</td></tr><tr><td>ce933821661a0139a329e6c8243e335bfa1022b1</td><td>Baidu IDL and Tsinghua University</td><td></td></tr><tr><td>5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f</td><td>Baidu Research Institute of Deep Learning</td><td></td></tr><tr><td>56a677c889e0e2c9f68ab8ca42a7e63acf986229</td><td>Baidu Research, USA 3John Hopkins University</td><td></td></tr><tr><td>4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d</td><td>Baidu Research, USA 3John Hopkins University</td><td></td></tr><tr><td>4b7c110987c1d89109355b04f8597ce427a7cd72</td><td>Baingio Pinna, University of</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Banaras Hindu University</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Banaras Hindu University</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Banaras Hindu University</td><td></td></tr><tr><td>8f5facdc0a2a79283864aad03edc702e2a400346</td><td>Bangalore Institute of Technology</td><td>Department of Elecronics and Instrumentation Engg</td></tr><tr><td>e5eb7fa8c9a812d402facfe8e4672670541ed108</td><td>Bangladesh University of Engineering and Technology(BUET</td><td></td></tr><tr><td>fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5</td><td>Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India</td><td>Department of Biomedical Engineering</td></tr><tr><td>c4934d9f9c41dbc46f4173aad2775432fe02e0e6</td><td>Bar Ilan University, Israel</td><td></td></tr><tr><td>4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9</td><td>Bas kent University</td><td></td></tr><tr><td>7aa062c6c90dba866273f5edd413075b90077b51</td><td>Baze University, Abuja, Nigeria</td><td>Department of Computer Science and IT</td></tr><tr><td>56f812661c3248ed28859d3b2b39e033b04ae6ae</td><td>Beckman Institute</td><td></td></tr><tr><td>56f812661c3248ed28859d3b2b39e033b04ae6ae</td><td>Beckman Institute</td><td></td></tr><tr><td>5185f2a40836a754baaa7419a1abdd1e7ffaf2ad</td><td>Beckman Institute</td><td></td></tr><tr><td>5185f2a40836a754baaa7419a1abdd1e7ffaf2ad</td><td>Beckman Institute</td><td></td></tr><tr><td>5185f2a40836a754baaa7419a1abdd1e7ffaf2ad</td><td>Beckman Institute</td><td></td></tr><tr><td>75d2ecbbcc934563dff6b39821605dc6f2d5ffcc</td><td>Beckman Institute</td><td></td></tr><tr><td>1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2</td><td>Beckman Institute</td><td></td></tr><tr><td>4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c</td><td>Beckman Institute for Advanced Science and Technology</td><td></td></tr><tr><td>1d19c6857e798943cd0ecd110a7a0d514c671fec</td><td>Beckman Institute for Advanced Science and Technology</td><td></td></tr><tr><td>f87b22e7f0c66225824a99cada71f9b3e66b5742</td><td>Beckman Institute, University of Illinois at Urbana-Champaign</td><td></td></tr><tr><td>9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, IL USA</td><td></td></tr><tr><td>0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA</td><td></td></tr><tr><td>6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA</td><td></td></tr><tr><td>102b968d836177f9c436141e382915a4f8549276</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, USA</td><td></td></tr><tr><td>6308e9c991125ee6734baa3ec93c697211237df8</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, USA</td><td></td></tr><tr><td>eff87ecafed67cc6fc4f661cb077fed5440994bb</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, USA</td><td></td></tr><tr><td>288d2704205d9ca68660b9f3a8fda17e18329c13</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA</td><td></td></tr><tr><td>539287d8967cdeb3ef60d60157ee93e8724efcac</td><td>Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA</td><td></td></tr><tr><td>9ac43a98fe6fde668afb4fcc115e4ee353a6732d</td><td>Beckmann Institute, University of Illinois at Urbana-Champaign, USA</td><td></td></tr><tr><td>85fd2bda5eb3afe68a5a78c30297064aec1361f6</td><td>Behavioural Science Group, Warwick Business School, University of Warwick; and 4Faculty of Psychology</td><td></td></tr><tr><td>e7cac91da51b78eb4a28e194d3f599f95742e2a2</td><td>Behavioural Science Institute, Radboud University, Nijmegen, the Netherlands</td><td></td></tr><tr><td>7c2c9b083817f7a779d819afee383599d2e97ed8</td><td>Beihang University</td><td></td></tr><tr><td>d7d166aee5369b79ea2d71a6edd73b7599597aaa</td><td>Beihang University 2Gri th University 3University of York, UK</td><td></td></tr><tr><td>40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b</td><td>Beihang University, 2The Chinese University of Hong Kong, 3Sensetime Group Limited</td><td></td></tr><tr><td>5b6593a6497868a0d19312952d2b753232414c23</td><td>Beihang University, Beijing 100191, China</td><td></td></tr><tr><td>570308801ff9614191cfbfd7da88d41fb441b423</td><td>Beihang University, Beijing, China</td><td></td></tr><tr><td>457cf73263d80a1a1338dc750ce9a50313745d1d</td><td>Beihang University, Beijing, China</td><td></td></tr><tr><td>86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd</td><td>Beihang University, Beijing, China</td><td></td></tr><tr><td>b8375ff50b8a6f1a10dd809129a18df96888ac8b</td><td>Beihang University, Beijing, China</td><td></td></tr><tr><td>b191aa2c5b8ece06c221c3a4a0914e8157a16129</td><td>Beihang University, Beijing, China</td><td></td></tr><tr><td>928b8eb47288a05611c140d02441660277a7ed54</td><td>Beijing Institute of Technology</td><td></td></tr><tr><td>713db3874b77212492d75fb100a345949f3d3235</td><td>Beijing Institute of Technology</td><td></td></tr><tr><td>0ea7b7fff090c707684fd4dc13e0a8f39b300a97</td><td>Beijing Institute of Technology University, P. R. China</td><td></td></tr><tr><td>2a35d20b2c0a045ea84723f328321c18be6f555c</td><td>Beijing Institute of Technology, Beijing 100081 CHINA</td><td></td></tr><tr><td>2a35d20b2c0a045ea84723f328321c18be6f555c</td><td>Beijing Institute of Technology, Beijing 100081 CHINA</td><td></td></tr><tr><td>2a35d20b2c0a045ea84723f328321c18be6f555c</td><td>Beijing Institute of Technology, Beijing 100081 CHINA</td><td></td></tr><tr><td>a090d61bfb2c3f380c01c0774ea17929998e0c96</td><td>Beijing Institute of Technology, Beijing 100081, PR China</td><td></td></tr><tr><td>b3b532e8ea6304446b1623e83b0b9a96968f926c</td><td>Beijing Institute of Technology, Beijing, China</td><td></td></tr><tr><td>c829be73584966e3162f7ccae72d9284a2ebf358</td><td>Beijing Institute of Technology, Beijing, China</td><td></td></tr><tr><td>14d72dc9f78d65534c68c3ed57305f14bd4b5753</td><td>Beijing Institute of Technology, China</td><td>Department of Electronic Engineering</td></tr><tr><td>b5968e7bb23f5f03213178c22fd2e47af3afa04c</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>b5930275813a7e7a1510035a58dd7ba7612943bc</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>b42a97fb47bcd6bfa72e130c08960a77ee96f9ab</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>0a4fc9016aacae9cdf40663a75045b71e64a70c9</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>64782a2bc5da11b1b18ca20cecf7bdc26a538d68</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>a660390654498dff2470667b64ea656668c98ecc</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>e726174d516605f80ff359e71f68b6e8e6ec6d5d</td><td>Beijing Jiaotong University</td><td></td></tr><tr><td>35e0256b33212ddad2db548484c595334f15b4da</td><td>Beijing Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, China</td><td></td></tr><tr><td>6fbb179a4ad39790f4558dd32316b9f2818cd106</td><td>Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China</td><td></td></tr><tr><td>3bb6570d81685b769dc9e74b6e4958894087f3f1</td><td>Beijing National Research Center for Information Science and Technology</td><td></td></tr><tr><td>7e18b5f5b678aebc8df6246716bf63ea5d8d714e</td><td>Beijing Normal University, China</td><td></td></tr><tr><td>16e95a907b016951da7c9327927bb039534151da</td><td>Beijing Union University, 100101, China</td><td></td></tr><tr><td>571b83f7fc01163383e6ca6a9791aea79cafa7dd</td><td>Beijing University of Chemical Technology, China</td><td></td></tr><tr><td>3dfb822e16328e0f98a47209d7ecd242e4211f82</td><td>Beijing University of Posts and Telecommunications</td><td></td></tr><tr><td>25c3cdbde7054fbc647d8be0d746373e7b64d150</td><td>Beijing University of Posts and Telecommunications</td><td></td></tr><tr><td>0294f992f8dfd8748703f953925f9aee14e1b2a2</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>80be8624771104ff4838dcba9629bacfe6b3ea09</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>5c820e47981d21c9dddde8d2f8020146e600368f</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>d3b0839324d0091e70ce34f44c979b9366547327</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>cdef0eaff4a3c168290d238999fc066ebc3a93e8</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>1275d6a800f8cf93c092603175fdad362b69c191</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>49820ae612b3c0590a8a78a725f4f378cb605cd1</td><td>Beijing University of Posts and Telecommunications, Beijing, China</td><td></td></tr><tr><td>a51882cfd0706512bf50e12c0a7dd0775285030d</td><td>Beijing University of Posts and Telecommunications, Beijing, China. 2School of</td><td></td></tr><tr><td>17579791ead67262fcfb62ed8765e115fb5eca6f</td><td>Beijing University of Posts and Telecommunications, Beijing, P.R. China</td><td></td></tr><tr><td>e0dc6f1b740479098c1d397a7bc0962991b5e294</td><td>Beijing University of Technology, Beijing 100022, China</td><td></td></tr><tr><td>7d9fe410f24142d2057695ee1d6015fb1d347d4a</td><td>Beijing, China</td><td>Computer and Information Engineering Department of Beijing Technology and Business University</td></tr><tr><td>7d9fe410f24142d2057695ee1d6015fb1d347d4a</td><td>Beijing, China</td><td>Computer and Information Engineering Department of Beijing Technology and Business University</td></tr><tr><td>1feeab271621128fe864e4c64bab9b2e2d0ed1f1</td><td>BeingTogether Centre, Institute for Media Innovation, Singapore 637553, Singapore</td><td></td></tr><tr><td>a9fc23d612e848250d5b675e064dba98f05ad0d9</td><td>Benha University, Egypt</td><td></td></tr><tr><td>a9fc23d612e848250d5b675e064dba98f05ad0d9</td><td>Benha University, Egypt</td><td></td></tr><tr><td>2c1f8ddbfbb224271253a27fed0c2425599dfe47</td><td>Berlin Institute of Technology</td><td></td></tr><tr><td>363ca0a3f908859b1b55c2ff77cc900957653748</td><td>Bharath Institute of Science and Technology</td><td>MCA Department</td></tr><tr><td>363ca0a3f908859b1b55c2ff77cc900957653748</td><td>Bharath Institute of Science and Technology</td><td>MCA Department</td></tr><tr><td>23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e</td><td>Bharath University, India</td><td>Computer Science Department</td></tr><tr><td>23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e</td><td>Bharath University, India</td><td>Computer Science Department</td></tr><tr><td>9c7444c6949427994b430787a153d5cceff46d5c</td><td>Bharathidasan University, Trichy, India</td><td></td></tr><tr><td>18cd79f3c93b74d856bff6da92bfc87be1109f80</td><td>Bharti Vidyapeeth Deemed University, Pune, India</td><td>Department of Information Tech.</td></tr><tr><td>0da4c3d898ca2fff9e549d18f513f4898e960aca</td><td>Bibliographic details for the item, including a URL</td><td></td></tr><tr><td>4ed54d5093d240cc3644e4212f162a11ae7d1e3b</td><td>Bielefeld University</td><td></td></tr><tr><td>1921e0a97904bdf61e17a165ab159443414308ed</td><td>Bielefeld University</td><td></td></tr><tr><td>2e1fd8d57425b727fd850d7710d38194fa6e2654</td><td>Bielefeld University</td><td></td></tr><tr><td>62fd622b3ca97eb5577fd423fb9efde9a849cbef</td><td>Big Data Institute, University of Oxford</td><td></td></tr><tr><td>928b8eb47288a05611c140d02441660277a7ed54</td><td>Big Data Research Center, University of Electronic Science and Technology of China</td><td></td></tr><tr><td>202d8d93b7b747cdbd6e24e5a919640f8d16298a</td><td>Bilgi University, Dolapdere, Istanbul, TR</td><td>Computer Science Department</td></tr><tr><td>0b6616f3ebff461e4b6c68205fcef1dae43e2a1a</td><td>Bilkent University</td><td></td></tr><tr><td>0b6616f3ebff461e4b6c68205fcef1dae43e2a1a</td><td>Bilkent University</td><td></td></tr><tr><td>887745c282edf9af40d38425d5fdc9b3fe139c08</td><td>Bilkent University</td><td></td></tr><tr><td>887745c282edf9af40d38425d5fdc9b3fe139c08</td><td>Bilkent University</td><td></td></tr><tr><td>1a6c9ef99bf0ab9835a91fe5f1760d98a0606243</td><td>Bilkent University, 06800 Cankaya, Turkey</td><td></td></tr><tr><td>95f26d1c80217706c00b6b4b605a448032b93b75</td><td>Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network</td><td></td></tr><tr><td>17d01f34dfe2136b404e8d7f59cebfb467b72b26</td><td>Bioinformatics Institute, A*STAR, Singapore</td><td></td></tr><tr><td>353b6c1f431feac6edde12b2dde7e6e702455abd</td><td>Biometric Research Center</td><td>Department of Computing</td></tr><tr><td>74f643579949ccd566f2638b85374e7a6857a9fc</td><td>Biometric Research Center, The Hong Kong Polytechnic University</td><td>Department of Computing</td></tr><tr><td>757e4cb981e807d83539d9982ad325331cb59b16</td><td>Biometric and Image Processing Lab, University of Salerno, Italy</td><td></td></tr><tr><td>2c62b9e64aeddf12f9d399b43baaefbca8e11148</td><td>Biometrics Research Lab, College of Computer Science, Sichuan University, Chengdu 610065, China</td><td></td></tr><tr><td>5ca23ceb0636dfc34c114d4af7276a588e0e8dac</td><td>Birkbeck College, University of London</td><td></td></tr><tr><td>d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d</td><td>Birkbeck University of London</td><td></td></tr><tr><td>ac12ba5bf81de83991210b4cd95b4ad048317681</td><td>Bo gazi ci University</td><td>Department of Computer Engineering</td></tr><tr><td>80135ed7e34ac1dcc7f858f880edc699a920bf53</td><td>Bo gazi ci University</td><td></td></tr><tr><td>fbf196d83a41d57dfe577b3a54b1b7fa06666e3b</td><td>Bo gazi ci University, Turkey</td><td>Department of Computer Engineering</td></tr><tr><td>4c81c76f799c48c33bb63b9369d013f51eaf5ada</td><td>Bo gazic i University, Istanbul, Turkey</td><td>Department of Computer Engineering</td></tr><tr><td>6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0</td><td>Bo gazic i University, Istanbul, Turkey</td><td>Department of Computer Engineering</td></tr><tr><td>999289b0ef76c4c6daa16a4f42df056bf3d68377</td><td>Bo gazic i University, Istanbul, Turkey</td><td>Department of Computer Engineering</td></tr><tr><td>247a6b0e97b9447850780fe8dbc4f94252251133</td><td>Bo gazic i University, Istanbul, Turkey</td><td></td></tr><tr><td>247a6b0e97b9447850780fe8dbc4f94252251133</td><td>Bo gazic i University, Istanbul, Turkey</td><td></td></tr><tr><td>247a6b0e97b9447850780fe8dbc4f94252251133</td><td>Bo gazic i University, Istanbul, Turkey</td><td></td></tr><tr><td>202d8d93b7b747cdbd6e24e5a919640f8d16298a</td><td>Bo gazici University, Istanbul, TR</td><td>Electric and Electronic Engineering Department</td></tr><tr><td>f0681fc08f4d7198dcde803d69ca62f09f3db6c5</td><td>Bogazici University, Bebek</td><td>Electrical and Electronics Engineering Department</td></tr><tr><td>ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7</td><td>Bogazici University, Turkey</td><td></td></tr><tr><td>968b983fa9967ff82e0798a5967920188a3590a8</td><td>Boston College</td><td></td></tr><tr><td>968b983fa9967ff82e0798a5967920188a3590a8</td><td>Boston College</td><td></td></tr><tr><td>77b1db2281292372c38926cc4aca32ef056011dc</td><td>Boston College, USA</td><td>Department of Psychology</td></tr><tr><td>0d3882b22da23497e5de8b7750b71f3a4b0aac6b</td><td>Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos</td><td></td></tr><tr><td>5050807e90a925120cbc3a9cd13431b98965f4b9</td><td>Boston University</td><td>Department of Computer Science</td></tr><tr><td>6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a</td><td>Boston University</td><td>Department of Computer Science</td></tr><tr><td>52c59f9f4993c8248dd3d2d28a4946f1068bcbbe</td><td>Boston University</td><td></td></tr><tr><td>52c59f9f4993c8248dd3d2d28a4946f1068bcbbe</td><td>Boston University</td><td></td></tr><tr><td>52c59f9f4993c8248dd3d2d28a4946f1068bcbbe</td><td>Boston University</td><td></td></tr><tr><td>bffbd04ee5c837cd919b946fecf01897b2d2d432</td><td>Boston University</td><td></td></tr><tr><td>f60a85bd35fa85739d712f4c93ea80d31aa7de07</td><td>Boston University</td><td>Department of Computer Science</td></tr><tr><td>1e5a1619fe5586e5ded2c7a845e73f22960bbf5a</td><td>Boston University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>13940d0cc90dbf854a58f92d533ce7053aac024a</td><td>Boston University</td><td></td></tr><tr><td>13940d0cc90dbf854a58f92d533ce7053aac024a</td><td>Boston University</td><td></td></tr><tr><td>fe961cbe4be0a35becd2d722f9f364ec3c26bd34</td><td>Boston University / **Rutgers University / ***Gallaudet University</td><td></td></tr><tr><td>aafb8dc8fda3b13a64ec3f1ca7911df01707c453</td><td>Boston University 2Pattern Analysis and Computer Vision (PAVIS</td><td>Department of Computer Science</td></tr><tr><td>bffbd04ee5c837cd919b946fecf01897b2d2d432</td><td>Boston University Computer Science Technical Report No</td><td></td></tr><tr><td>13940d0cc90dbf854a58f92d533ce7053aac024a</td><td>Boston University Theses and Dissertations</td><td></td></tr><tr><td>4850af6b54391fc33c8028a0b7fafe05855a96ff</td><td>Boston University and 2University of North Carolina</td><td></td></tr><tr><td>d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f</td><td>Boston University, Boston, MA</td><td>Department of Computer Science</td></tr><tr><td>fe961cbe4be0a35becd2d722f9f364ec3c26bd34</td><td>Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA</td><td></td></tr><tr><td>33f7e78950455c37236b31a6318194cfb2c302a4</td><td>Boston University, USA</td><td>Computer Science Department</td></tr><tr><td>f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a</td><td>Boston University1, University of Tokyo</td><td></td></tr><tr><td>199c2df5f2847f685796c2523221c6436f022464</td><td>Bournemouth University</td><td></td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Bournemouth University</td><td></td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Bournemouth University</td><td></td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Bournemouth University</td><td></td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Bournemouth University</td><td></td></tr><tr><td>dfd934ae448a1b8947d404b01303951b79b13801</td><td>Bournemouth University, UK</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA</td><td></td></tr><tr><td>2dfe0e7e81f65716b09c590652a4dd8452c10294</td><td>Brazil, University Hospital Zurich, Z rich</td><td>3 Department of Consultation-Liaison Psychiatry and Psychosomatic Medicine</td></tr><tr><td>df2841a1d2a21a0fc6f14fe53b6124519f3812f9</td><td>Brown University</td><td></td></tr><tr><td>df2841a1d2a21a0fc6f14fe53b6124519f3812f9</td><td>Brown University</td><td></td></tr><tr><td>e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf</td><td>Brown University</td><td></td></tr><tr><td>1586871a1ddfe031b885b94efdbff647cf03eff1</td><td>Brown University</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Brown University</td><td>Department</td></tr><tr><td>b1451721864e836069fa299a64595d1655793757</td><td>Brown University 2University of Bath</td><td></td></tr><tr><td>1e58d7e5277288176456c66f6b1433c41ca77415</td><td>Brown University, 2University of California, San Diego, 3California Institute of Technology</td><td></td></tr><tr><td>334ac2a459190b41923be57744aa6989f9a54a51</td><td>Brown University, Providence, RI</td><td></td></tr><tr><td>cbbd13c29d042743f0139f1e044b6bca731886d0</td><td>Brown University, Providence, RI 02912, USA</td><td></td></tr><tr><td>faeefc5da67421ecd71d400f1505cfacb990119c</td><td>Brown University, United States</td><td></td></tr><tr><td>540b39ba1b8ef06293ed793f130e0483e777e278</td><td>Budapest, Hungary, E tv s Lor nd University, Budapest, Hungary, 3 Institute for Computer Science</td><td>2 Department of Ethology</td></tr><tr><td>68f89c1ee75a018c8eff86e15b1d2383c250529b</td><td>C.L. Teo, University of Maryland</td><td></td></tr><tr><td>7b43326477795a772c08aee750d3e433f00f20be</td><td>CALIFORNIA INSTITUTE OF TECHNOLOGY</td><td></td></tr><tr><td>514a74aefb0b6a71933013155bcde7308cad2b46</td><td>CARNEGIE MELLON UNIVERSITY</td><td></td></tr><tr><td>652aac54a3caf6570b1c10c993a5af7fa2ef31ff</td><td>CARNEGIE MELLON UNIVERSITY</td><td></td></tr><tr><td>0a79d0ba1a4876086e64fc0041ece5f0de90fbea</td><td>CARNEGIE MELLON UNIVERSITY</td><td></td></tr><tr><td>32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b</td><td>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>c03e01717b2d93f04cce9b5fd2dcfd1143bcc180</td><td>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>090ff8f992dc71a1125636c1adffc0634155b450</td><td>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>061e29eae705f318eee703b9e17dc0989547ba0c</td><td>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>22e2066acfb795ac4db3f97d2ac176d6ca41836c</td><td>CAS), Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>d05513c754966801f26e446db174b7f2595805ba</td><td>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>80bd795930837330e3ced199f5b9b75398336b87</td><td>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>64d5772f44efe32eb24c9968a3085bc0786bfca7</td><td>CAS), Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>d69271c7b77bc3a06882884c21aa1b609b3f76cc</td><td>CBSR and NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</td><td></td></tr><tr><td>dcf71245addaf66a868221041aabe23c0a074312</td><td>CBSR and NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</td><td></td></tr><tr><td>4cdb6144d56098b819076a8572a664a2c2d27f72</td><td>CBSRandNLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China</td><td></td></tr><tr><td>21258aa3c48437a2831191b71cd069c05fb84cf7</td><td>CISE, University of Florida, Gainesville, FL</td><td></td></tr><tr><td>3dbfd2fdbd28e4518e2ae05de8374057307e97b3</td><td>CISUC, University of Coimbra</td><td>Department of Informatics Engineering</td></tr><tr><td>45efd6c2dd4ca19eed38ceeb7c2c5568231451e1</td><td>CMR Institute of Technology, Hyderabad, (India</td><td></td></tr><tr><td>32925200665a1bbb4fc8131cd192cb34c2d7d9e3</td><td>CNRS , Institute of Automation of the Chinese Academy of Sciences</td><td></td></tr><tr><td>0c7f27d23a162d4f3896325d147f412c40160b52</td><td>COLUMBIA UNIVERSITY</td><td></td></tr><tr><td>abac0fa75281c9a0690bf67586280ed145682422</td><td>COLUMBIA UNIVERSITY</td><td></td></tr><tr><td>280bc9751593897091015aaf2cab39805768b463</td><td>COMSATS Institute of Information Technology</td><td></td></tr><tr><td>77c53ec6ea448db4dad586e002a395c4a47ecf66</td><td>COMSATS Institute of Information Technology Wah Cantt</td><td>Department of Computer Sciences</td></tr><tr><td>bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9</td><td>COMSATS Institute of Information Technology, Islamabad</td><td></td></tr><tr><td>a87e37d43d4c47bef8992ace408de0f872739efc</td><td>COMSATS Institute of Information Technology, Lahore 54000, Pakistan</td><td>Department of Computer Science</td></tr><tr><td>5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0</td><td>COMSATS Institute of Information Technology, Pakistan</td><td>Department of Computer Science</td></tr><tr><td>6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf</td><td>CRCV, University of Central Florida</td><td></td></tr><tr><td>39ed31ced75e6151dde41944a47b4bdf324f922b</td><td>CRIPAC and NLPR and CEBSIT, CASIA 2University of Chinese Academy of Sciences</td><td></td></tr><tr><td>831b4d8b0c0173b0bac0e328e844a0fbafae6639</td><td>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>1316296fae6485c1510f00b1b57fb171b9320ac2</td><td>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>06262d6beeccf2784e4e36a995d5ee2ff73c8d11</td><td>CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong 2Amazon Rekognition</td><td></td></tr><tr><td>51faacfa4fb1e6aa252c6970e85ff35c5719f4ff</td><td>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>d78734c54f29e4474b4d47334278cfde6efe963a</td><td>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>c97a5f2241cc6cd99ef0c4527ea507a50841f60b</td><td>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>1c9efb6c895917174ac6ccc3bae191152f90c625</td><td>CUHK-SenseTime Joint Lab, The Chinese University of Hong Kong</td><td></td></tr><tr><td>59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb</td><td>CUNY City College</td><td></td></tr><tr><td>91495c689e6e614247495c3f322d400d8098de43</td><td>CUNY City College</td><td></td></tr><tr><td>59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb</td><td>CUNY Graduate Center and City College</td><td></td></tr><tr><td>91495c689e6e614247495c3f322d400d8098de43</td><td>CUNY Graduate Center and City College</td><td></td></tr><tr><td>12d8730da5aab242795bdff17b30b6e0bac82998</td><td>CVAP, KTH (Royal Institute of Technology), Stockholm, SE</td><td></td></tr><tr><td>9a4c45e5c6e4f616771a7325629d167a38508691</td><td>CVIP Lab, University of Louisville, Louisville, KY 40292, USA</td><td></td></tr><tr><td>6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2</td><td>CVIP Lab, University of Louisville, Louisville, KY, USA</td><td></td></tr><tr><td>0181fec8e42d82bfb03dc8b82381bb329de00631</td><td>CVL, Link oping University, Link oping, Sweden</td><td>Department of Electrical Engineering</td></tr><tr><td>bb489e4de6f9b835d70ab46217f11e32887931a2</td><td>CVSSP University of Surrey</td><td></td></tr><tr><td>c74b1643a108939c6ba42ae4de55cb05b2191be5</td><td>CVSSP, University of Surrey</td><td></td></tr><tr><td>c74b1643a108939c6ba42ae4de55cb05b2191be5</td><td>CVSSP, University of Surrey</td><td></td></tr><tr><td>c74b1643a108939c6ba42ae4de55cb05b2191be5</td><td>CVSSP, University of Surrey</td><td></td></tr><tr><td>70a69569ba61f3585cd90c70ca5832e838fa1584</td><td>CVSSP, University of Surrey, UK</td><td></td></tr><tr><td>54a9ed950458f4b7e348fa78a718657c8d3d0e05</td><td>Ca Foscari University of Venice, Venice, Italy</td><td></td></tr><tr><td>a955033ca6716bf9957b362b77092592461664b4</td><td>Caarmel Engineering College, MG University, Kerala, India</td><td>Department of CSE</td></tr><tr><td>9f6d04ce617d24c8001a9a31f11a594bd6fe3510</td><td>Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel</td><td></td></tr><tr><td>0e73d2b0f943cf8559da7f5002414ccc26bc77cd</td><td>California Institute of Technology</td><td></td></tr><tr><td>34108098e1a378bc15a5824812bdf2229b938678</td><td>California Institute of Technology</td><td></td></tr><tr><td>100da509d4fa74afc6e86a49352751d365fceee5</td><td>California Institute of Technology</td><td></td></tr><tr><td>384945abd53f6a6af51faf254ba8ef0f0fb3f338</td><td>California Institute of Technology</td><td></td></tr><tr><td>38bbca5f94d4494494860c5fe8ca8862dcf9676e</td><td>California Institute of Technology</td><td></td></tr><tr><td>53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9</td><td>California Institute of Technology</td><td></td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>California Institute of Technology</td><td></td></tr><tr><td>8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3</td><td>California Institute of Technology</td><td></td></tr><tr><td>f0f0e94d333b4923ae42ee195df17c0df62ea0b1</td><td>California Institute of Technology, 1200 East California Boulevard Pasadena, California, USA</td><td></td></tr><tr><td>00f1e5e954f9eb7ffde3ca74009a8c3c27358b58</td><td>California Institute of Technology, Pasadena, CA</td><td></td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>California Institute of Technology, Pasadena, CA, USA</td><td></td></tr><tr><td>72282287f25c5419dc6fd9e89ec9d86d660dc0b5</td><td>California Institute of Technology, Pasadena, CA, USA</td><td></td></tr><tr><td>14070478b8f0d84e5597c3e67c30af91b5c3a917</td><td>California Institute of Technology, Pasadena, California, USA</td><td></td></tr><tr><td>241d2c517dbc0e22d7b8698e06ace67de5f26fdf</td><td>California Institute of Technology, USA</td><td></td></tr><tr><td>fafe69a00565895c7d57ad09ef44ce9ddd5a6caa</td><td>California State University, Fullerton, USA</td><td></td></tr><tr><td>f0ca31fd5cad07e84b47d50dc07db9fc53482a46</td><td>California State University, Long Beach, USA</td><td>Department of Mathematics and Statistics</td></tr><tr><td>4ba38262fe20fab3e4c80215147b498f83843b93</td><td>Cambridge Research Laboratory</td><td></td></tr><tr><td>0aa74ad36064906e165ac4b79dec298911a7a4db</td><td>Cambridge University</td><td></td></tr><tr><td>0aa74ad36064906e165ac4b79dec298911a7a4db</td><td>Cambridge University</td><td></td></tr><tr><td>05a312478618418a2efb0a014b45acf3663562d7</td><td>Cambridge University, Trumpington Street, Cambridge CB21PZ, UK</td><td></td></tr><tr><td>e2d265f606cd25f1fd72e5ee8b8f4c5127b764df</td><td>Canadian Institute for Advanced Research</td><td></td></tr><tr><td>16e95a907b016951da7c9327927bb039534151da</td><td>Capital Normal University, 100048, China</td><td></td></tr><tr><td>528069963f0bd0861f380f53270c96c269a3ea1c</td><td>Cardi University</td><td></td></tr><tr><td>b87b0fa1ac0aad0ca563844daecaeecb2df8debf</td><td>Cardiff University, UK</td><td></td></tr><tr><td>5df376748fe5ccd87a724ef31d4fdb579dab693f</td><td>Carleton University</td><td></td></tr><tr><td>158e32579e38c29b26dfd33bf93e772e6211e188</td><td>Carleton University</td><td></td></tr><tr><td>0daf696253a1b42d2c9d23f1008b32c65a9e4c1e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>0c30f6303dc1ff6d05c7cee4f8952b74b9533928</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3b37d95d2855c8db64bd6b1ee5659f87fce36881</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6f84e61f33564e5188136474f9570b1652a0606f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6ae96f68187f1cdb9472104b5431ec66f4b2470f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>35f921def890210dda4b72247849ad7ba7d35250</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>51683eac8bbcd2944f811d9074a74d09d395c7f3</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3df8cc0384814c3fb05c44e494ced947a7d43f36</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>0e36ada8cb9c91f07c9dcaf196d036564e117536</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>34c594abba9bb7e5813cfae830e2c4db78cf138c</td><td>Carnegie Mellon University</td><td>Carnegie Mellon University. 2Department of Electrical and Computer Engineering</td></tr><tr><td>050eda213ce29da7212db4e85f948b812a215660</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>a36c8a4213251d3fd634e8893ad1b932205ad1ca</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>d9c4586269a142faee309973e2ce8cde27bda718</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>b3b467961ba66264bb73ffe00b1830d7874ae8ce</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>df9269657505fcdc1e10cf45bbb8e325678a40f5</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>daa52dd09b61ee94945655f0dde216cce0ebd505</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>bd236913cfe07896e171ece9bda62c18b8c8197e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>bd8f77b7d3b9d272f7a68defc1412f73e5ac3135</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>eb70c38a350d13ea6b54dc9ebae0b64171d813c9</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>e3bb83684817c7815f5005561a85c23942b1f46b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>e3bb83684817c7815f5005561a85c23942b1f46b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>e3bb83684817c7815f5005561a85c23942b1f46b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>ca37eda56b9ee53610c66951ee7ca66a35d0a846</td><td>Carnegie Mellon University</td><td>Machine Learning Department</td></tr><tr><td>c88c21eb9a8e08b66c981db35f6556f4974d27a8</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>edff76149ec44f6849d73f019ef9bded534a38c2</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>c6096986b4d6c374ab2d20031e026b581e7bf7e9</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>20a16efb03c366fa4180659c2b2a0c5024c679da</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>20c02e98602f6adf1cebaba075d45cef50de089f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4b61d8490bf034a2ee8aa26601d13c83ad7f843a</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>11b3877df0213271676fa8aa347046fd4b1a99ad</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>29479bb4fe8c04695e6f5ae59901d15f8da6124b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>29479bb4fe8c04695e6f5ae59901d15f8da6124b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>29479bb4fe8c04695e6f5ae59901d15f8da6124b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>7c825562b3ff4683ed049a372cb6807abb09af2a</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>45f858f9e8d7713f60f52618e54089ba68dfcd6d</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1f89439524e87a6514f4fbe7ed34bda4fd1ce286</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1f89439524e87a6514f4fbe7ed34bda4fd1ce286</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>87e6cb090aecfc6f03a3b00650a5c5f475dfebe1</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>176bd61cc843d0ed6aa5af83c22e3feb13b89fe1</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>192235f5a9e4c9d6a28ec0d333e36f294b32f764</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d15254f6f31356963cc70319ce416d28d8924a3</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4d16337cc0431cd43043dfef839ce5f0717c3483</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>86ed5b9121c02bcf26900913f2b5ea58ba23508f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>9fc04a13eef99851136eadff52e98eb9caac919d</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>9f4078773c8ea3f37951bf617dbce1d4b3795839</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6b3e360b80268fda4e37ff39b7f303e3684e8719</td><td>Carnegie Mellon University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6b17b219bd1a718b5cd63427032d93c603fcf24f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>07de8371ad4901356145722aa29abaeafd0986b9</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6eece104e430829741677cadc1dfacd0e058d60f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3146fabd5631a7d1387327918b184103d06c2211</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>963d0d40de8780161b70d28d2b125b5222e75596</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>963d0d40de8780161b70d28d2b125b5222e75596</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>963d0d40de8780161b70d28d2b125b5222e75596</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3a2cf589f5e11ca886417b72c2592975ff1d8472</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3a2cf589f5e11ca886417b72c2592975ff1d8472</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5</td><td>Carnegie Mellon University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>3fefc856a47726d19a9f1441168480cee6e9f5bb</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3fefc856a47726d19a9f1441168480cee6e9f5bb</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3feb69531653e83d0986a0643e4a6210a088e3e5</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>3feb69531653e83d0986a0643e4a6210a088e3e5</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>6dbdb07ce2991db0f64c785ad31196dfd4dae721</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>062d67af7677db086ef35186dc936b4511f155d7</td><td>Carnegie Mellon University</td><td>Machine Learning Department</td></tr><tr><td>97e569159d5658760eb00ca9cb662e6882d2ab0e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>97e569159d5658760eb00ca9cb662e6882d2ab0e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>97e569159d5658760eb00ca9cb662e6882d2ab0e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>0abf67e7bd470d9eb656ea2508beae13ca173198</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>d35534f3f59631951011539da2fe83f2844ca245</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>a0b1990dd2b4cd87e4fd60912cc1552c34792770</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>a0b1990dd2b4cd87e4fd60912cc1552c34792770</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>a0b1990dd2b4cd87e4fd60912cc1552c34792770</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>a7c39a4e9977a85673892b714fc9441c959bf078</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>b1fdd4ae17d82612cefd4e78b690847b071379d3</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>db848c3c32464d12da33b2f4c3a29fe293fc35d1</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>e19fb22b35c352f57f520f593d748096b41a4a7b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>Carnegie Mellon University</td><td>Department of Statistics</td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>Carnegie Mellon University</td><td>Department of Statistics</td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>Carnegie Mellon University</td><td>Department of Statistics</td></tr><tr><td>f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>f8015e31d1421f6aee5e17fc3907070b8e0a5e59</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>839a2155995acc0a053a326e283be12068b35cb8</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>7792fbc59f3eafc709323cdb63852c5d3a4b23e9</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>48a9241edda07252c1aadca09875fabcfee32871</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>48a9241edda07252c1aadca09875fabcfee32871</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>48a9241edda07252c1aadca09875fabcfee32871</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>704d88168bdfabe31b6ff484507f4a2244b8c52b</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1ea74780d529a458123a08250d8fa6ef1da47a25</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1e917fe7462445996837934a7e46eeec14ebc65f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1e917fe7462445996837934a7e46eeec14ebc65f</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>240eb0b34872c431ecf9df504671281f59e7da37</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>23aef683f60cb8af239b0906c45d11dac352fb4e</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>4f7967158b257e86d66bdabfdc556c697d917d24</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>155199d7f10218e29ddaee36ebe611c95cae68c4</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>12692fbe915e6bb1c80733519371bbb90ae07539</td><td>Carnegie Mellon University</td><td>Machine Learning Department</td></tr><tr><td>71f36c8e17a5c080fab31fce1ffea9551fc49e47</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>82e66c4832386cafcec16b92ac88088ffd1a1bc9</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>2e0addeffba4be98a6ad0460453fbab52616b139</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>2e98329fdec27d4b3b9b894687e7d1352d828b1d</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>78598e7005f7c96d64cc47ff47e6f13ae52245b8</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>78598e7005f7c96d64cc47ff47e6f13ae52245b8</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>1394ca71fc52db972366602a6643dc3e65ee8726</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>7f57e9939560562727344c1c987416285ef76cda</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>7f57e9939560562727344c1c987416285ef76cda</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>7f57e9939560562727344c1c987416285ef76cda</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>8e4808e71c9b9f852dc9558d7ef41566639137f3</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958</td><td>Carnegie Mellon University</td><td></td></tr><tr><td>74325f3d9aea3a810fe4eab8863d1a48c099de11</td><td>Carnegie Mellon University (CMU</td><td></td></tr><tr><td>d5de42d37ee84c86b8f9a054f90ddb4566990ec0</td><td>Carnegie Mellon University 2University of Washington 3Allen Institute for Arti cial Intelligence</td><td></td></tr><tr><td>1d7df3df839a6aa8f5392310d46b2a89080a3c25</td><td>Carnegie Mellon University 4College of CS and SE, Shenzhen University</td><td>Department of ECE</td></tr><tr><td>5b6d05ce368e69485cb08dd97903075e7f517aed</td><td>Carnegie Mellon University Pittsburgh, PA - 15213, USA</td><td></td></tr><tr><td>ec05078be14a11157ac0e1c6b430ac886124589b</td><td>Carnegie Mellon University Pittsburgh, PA, USA</td><td></td></tr><tr><td>b51b4ef97238940aaa4f43b20a861eaf66f67253</td><td>Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>266ed43dcea2e7db9f968b164ca08897539ca8dd</td><td>Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA</td><td></td></tr><tr><td>c40c23e4afc81c8b119ea361e5582aa3adecb157</td><td>Carnegie Mellon University, Electrical and Computer Engineering</td><td></td></tr><tr><td>1ca815327e62c70f4ee619a836e05183ef629567</td><td>Carnegie Mellon University, Pittsburgh PA</td><td></td></tr><tr><td>c71217b2b111a51a31cf1107c71d250348d1ff68</td><td>Carnegie Mellon University, Pittsburgh, PA</td><td></td></tr><tr><td>e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6</td><td>Carnegie Mellon University, Pittsburgh, PA</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>2679e4f84c5e773cae31cef158eb358af475e22f</td><td>Carnegie Mellon University, Pittsburgh, PA</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>63d8110ac76f57b3ba8a5947bc6bdbb86f25a342</td><td>Carnegie Mellon University, Pittsburgh, PA</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>c660500b49f097e3af67bb14667de30d67db88e3</td><td>Carnegie Mellon University, Pittsburgh, PA 15213, USA</td><td>c Statistics Department</td></tr><tr><td>61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8</td><td>Carnegie Mellon University, Pittsburgh, PA, 15213, USA</td><td></td></tr><tr><td>622daa25b5e6af69f0dac3a3eaf4050aa0860396</td><td>Carnegie Mellon University, Pittsburgh, PA, 15213, USA</td><td></td></tr><tr><td>9487cea80f23afe9bccc94deebaa3eefa6affa99</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>b234cd7788a7f7fa410653ad2bafef5de7d5ad29</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>eb8519cec0d7a781923f68fdca0891713cb81163</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>17670b60dcfb5cbf8fdae0b266e18cf995f6014c</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>831d661d657d97a07894da8639a048c430c5536d</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>15ee80e86e75bf1413dc38f521b9142b28fe02d1</td><td>Carnegie Mellon University, Pittsburgh, PA, USA</td><td></td></tr><tr><td>00b29e319ff8b3a521b1320cb8ab5e39d7f42281</td><td>Carnegie Mellon University, Pittsburgh, USA</td><td></td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>Carnegie Mellon University, Pittsburgh, USA</td><td></td></tr><tr><td>1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca</td><td>Carnegie Mellon University, Pittsburgh, USA</td><td></td></tr><tr><td>4d625677469be99e0a765a750f88cfb85c522cce</td><td>Carnegie Mellon University, USA</td><td></td></tr><tr><td>656531036cee6b2c2c71954bb6540ef6b2e016d0</td><td>Carnegie Mellon University, USA</td><td></td></tr><tr><td>90a754f597958a2717862fbaa313f67b25083bf9</td><td>Carnegie Mellon University, USA</td><td></td></tr><tr><td>1ec98785ac91808455b753d4bc00441d8572c416</td><td>Carnegie Mellon University, USA</td><td></td></tr><tr><td>f4ebbeb77249d1136c355f5bae30f02961b9a359</td><td>Carnegie Melon University</td><td></td></tr><tr><td>cbbd13c29d042743f0139f1e044b6bca731886d0</td><td>Carney Institute for Brain Science</td><td></td></tr><tr><td>7a0fb972e524cb9115cae655e24f2ae0cfe448e0</td><td>Catholic University of Rio de Janeiro, Brazil</td><td></td></tr><tr><td>8981be3a69cd522b4e57e9914bf19f034d4b530c</td><td>Center For Automation Research, University of Maryland, College Park</td><td></td></tr><tr><td>3d0f9a3031bee4b89fab703ff1f1d6170493dc01</td><td>Center for Arti cial Vision Research, Korea University</td><td></td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>Center for Automated Learning and Discovery), Carnegie Mellon University</td><td></td></tr><tr><td>c32c8bfadda8f44d40c6cd9058a4016ab1c27499</td><td>Center for Automation Research (CfAR), University of Maryland, College Park, MD</td><td></td></tr><tr><td>45215e330a4251801877070c85c81f42c2da60fb</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park</td><td></td></tr><tr><td>0db36bf08140d53807595b6313201a7339470cfe</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</td><td></td></tr><tr><td>93420d9212dd15b3ef37f566e4d57e76bb2fab2f</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</td><td></td></tr><tr><td>872dfdeccf99bbbed7c8f1ea08afb2d713ebe085</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</td><td></td></tr><tr><td>2d748f8ee023a5b1fbd50294d176981ded4ad4ee</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD</td><td></td></tr><tr><td>b239a756f22201c2780e46754d06a82f108c1d03</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA</td><td></td></tr><tr><td>c8e84cdff569dd09f8d31e9f9ba3218dee65e961</td><td>Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA</td><td></td></tr><tr><td>970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3</td><td>Center for Automation Research, University of Maryland</td><td></td></tr><tr><td>8983485996d5d9d162e70d66399047c5d01ac451</td><td>Center for Automation Research, University of Maryland, College Park, MD</td><td></td></tr><tr><td>100105d6c97b23059f7aa70589ead2f61969fbc3</td><td>Center for Automation Research, University of Maryland, College Park, MD 20740, USA</td><td></td></tr><tr><td>4b71d1ff7e589b94e0f97271c052699157e6dc4a</td><td>Center for Automation Research, University of Maryland, College Park, MD 20742, USA</td><td></td></tr><tr><td>c5468665d98ce7349d38afb620adbf51757ab86f</td><td>Center for Automation Research, University of Maryland, College Park, MD 20742, USA</td><td></td></tr><tr><td>add50a7d882eb38e35fe70d11cb40b1f0059c96f</td><td>Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>8f6263e4d3775757e804796e104631c7a2bb8679</td><td>Center for Brain Science, Harvard University, Cambridge, MA 02138 USA</td><td></td></tr><tr><td>8f6263e4d3775757e804796e104631c7a2bb8679</td><td>Center for Brain Science, Harvard University, Cambridge, MA 02138 USA</td><td></td></tr><tr><td>78436256ff8f2e448b28e854ebec5e8d8306cf21</td><td>Center for Brain Science, Harvard University, Cambridge, MA, USA</td><td></td></tr><tr><td>0b242d5123f79defd5f775d49d8a7047ad3153bc</td><td>Center for Brains, Minds and Machines, McGovern Institute, MIT</td><td></td></tr><tr><td>030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f</td><td>Center for Cognitive Neuroscience, Duke University, Durham, North Carolina</td><td></td></tr><tr><td>25e2d3122d4926edaab56a576925ae7a88d68a77</td><td>Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin</td><td>Department of Psychology</td></tr><tr><td>75bf3b6109d7a685236c8589f8ead7d769ea863f</td><td>Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA</td><td></td></tr><tr><td>b8ebda42e272d3617375118542d4675a0c0e501d</td><td>Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA</td><td></td></tr><tr><td>8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152</td><td>Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ</td><td></td></tr><tr><td>73b90573d272887a6d835ace89bfaf717747c59b</td><td>Center for Computational Intelligence, Nanyang Technology University, Singapore</td><td></td></tr><tr><td>3ca5d3b8f5f071148cb50f22955fd8c1c1992719</td><td>Center for Healthy Aging, University of</td><td>Department of Cellular and Molecular Medicine</td></tr><tr><td>081cb09791e7ff33c5d86fd39db00b2f29653fa8</td><td>Center for Information Science, Peking University, Beijing 100871, China</td><td></td></tr><tr><td>b133b2d7df9b848253b9d75e2ca5c68e21eba008</td><td>Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT</td><td></td></tr><tr><td>b6145d3268032da70edc9cfececa1f9ffa4e3f11</td><td>Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A</td><td></td></tr><tr><td>29b86534d4b334b670914038c801987e18eb5532</td><td>Center for Machine Vision Research, University of Oulu, Finland</td><td></td></tr><tr><td>ac2e44622efbbab525d4301c83cb4d5d7f6f0e55</td><td>Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland</td><td></td></tr><tr><td>27eb7a6e1fb6b42516041def6fe64bd028b7614d</td><td>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</td><td></td></tr><tr><td>7492c611b1df6bce895bee6ba33737e7fc7f60a6</td><td>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</td><td></td></tr><tr><td>193debca0be1c38dabc42dc772513e6653fd91d8</td><td>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</td><td></td></tr><tr><td>aa0c30bd923774add6e2f27ac74acd197b9110f2</td><td>Center for Machine Vision and Signal Analysis, University of Oulu, Finland</td><td></td></tr><tr><td>c73dd452c20460f40becb1fd8146239c88347d87</td><td>Center for Research in Computer Vision (CRCV), University of Central Florida (UCF</td><td></td></tr><tr><td>7ee53d931668fbed1021839db4210a06e4f33190</td><td>Center for Research in Computer Vision (CRCV), University of Central Florida (UCF</td><td></td></tr><tr><td>8fe38962c24300129391f6d7ac24d7783e0fddd0</td><td>Center for Research in Computer Vision, University of Central Florida</td><td></td></tr><tr><td>976e0264bb57786952a987d4456850e274714fb8</td><td>Center for Research in Computer Vision, University of Central Florida</td><td></td></tr><tr><td>2d79d338c114ece1d97cde1aa06ab4cf17d38254</td><td>Center for Research in Computer Vision, University of Central Florida</td><td></td></tr><tr><td>ad2339c48ad4ffdd6100310dcbb1fb78e72fac98</td><td>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</td><td></td></tr><tr><td>2910fcd11fafee3f9339387929221f4fc1160973</td><td>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</td><td></td></tr><tr><td>14ba910c46d659871843b31d5be6cba59843a8b8</td><td>Center for Research in Computer Vision, University of Central Florida, Orlando, FL</td><td></td></tr><tr><td>4205cb47ba4d3c0f21840633bcd49349d1dc02c1</td><td>Center for Research in Computer Vision, University of Central Florida, Orlando, USA</td><td></td></tr><tr><td>60a006bdfe5b8bf3243404fae8a5f4a9d58fa892</td><td>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</td><td></td></tr><tr><td>5f676d6eca4c72d1a3f3acf5a4081c29140650fb</td><td>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</td><td></td></tr><tr><td>3acb6b3e3f09f528c88d5dd765fee6131de931ea</td><td>Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA</td><td></td></tr><tr><td>55079a93b7d1eb789193d7fcdcf614e6829fad0f</td><td>Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen</td><td></td></tr><tr><td>81bfe562e42f2eab3ae117c46c2e07b3d142dade</td><td>Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA</td><td></td></tr><tr><td>0b9ce839b3c77762fff947e60a0eb7ebbf261e84</td><td>Central Mechanical Engineering Research Institute</td><td></td></tr><tr><td>81e11e33fc5785090e2d459da3ac3d3db5e43f65</td><td>Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India</td><td></td></tr><tr><td>82ccd62f70e669ec770daf11d9611cab0a13047e</td><td>Central Tehran Branch, Azad University</td><td></td></tr><tr><td>fd10b0c771a2620c0db294cfb82b80d65f73900d</td><td>Central University of Finance and Economics, Beijing, China</td><td></td></tr><tr><td>56c2fb2438f32529aec604e6fc3b06a595ddbfcc</td><td>Central Washington University</td><td></td></tr><tr><td>56c2fb2438f32529aec604e6fc3b06a595ddbfcc</td><td>Central Washington University</td><td></td></tr><tr><td>56c2fb2438f32529aec604e6fc3b06a595ddbfcc</td><td>Central Washington University</td><td></td></tr><tr><td>56c2fb2438f32529aec604e6fc3b06a595ddbfcc</td><td>Central Washington University</td><td></td></tr><tr><td>c88ce5ef33d5e544224ab50162d9883ff6429aa3</td><td>Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA</td><td></td></tr><tr><td>2dfe0e7e81f65716b09c590652a4dd8452c10294</td><td>Centre for Applied Autism Research, University of Bath, Bath, United Kingdom, 2 Social and</td><td>Department of Psychology</td></tr><tr><td>6f26ab7edd971148723d9b4dc8ddf71b36be9bf7</td><td>Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de</td><td></td></tr><tr><td>c317181fa1de2260e956f05cd655642607520a4f</td><td>Centre for Imaging Sciences, University of</td><td></td></tr><tr><td>47dabb566f2bdd6b3e4fa7efc941824d8b923a13</td><td>Centre for Intelligent Machines, McGill University, Montreal, Canada</td><td></td></tr><tr><td>e4e3faa47bb567491eaeaebb2213bf0e1db989e1</td><td>Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia</td><td></td></tr><tr><td>1d3dd9aba79a53390317ec1e0b7cd742cba43132</td><td>Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of</td><td></td></tr><tr><td>ca37eda56b9ee53610c66951ee7ca66a35d0a846</td><td>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</td><td></td></tr><tr><td>062d67af7677db086ef35186dc936b4511f155d7</td><td>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</td><td></td></tr><tr><td>159e792096756b1ec02ec7a980d5ef26b434ff78</td><td>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney</td><td></td></tr><tr><td>d0d7671c816ed7f37b16be86fa792a1b29ddd79b</td><td>Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia</td><td></td></tr><tr><td>438e7999c937b94f0f6384dbeaa3febff6d283b6</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</td><td></td></tr><tr><td>96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</td><td></td></tr><tr><td>2c62b9e64aeddf12f9d399b43baaefbca8e11148</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</td><td></td></tr><tr><td>40e1743332523b2ab5614bae5e10f7a7799161f4</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK</td><td></td></tr><tr><td>0a11b82aa207d43d1b4c0452007e9388a786be12</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH</td><td></td></tr><tr><td>bd70f832e133fb87bae82dfaa0ae9d1599e52e4b</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, UK</td><td></td></tr><tr><td>ed07856461da6c7afa4f1782b5b607b45eebe9f6</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</td><td></td></tr><tr><td>c146aa6d56233ce700032f1cb179700778557601</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</td><td></td></tr><tr><td>7df268a3f4da7d747b792882dfb0cbdb7cc431bc</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</td><td></td></tr><tr><td>7224d58a7e1f02b84994b60dc3b84d9fe6941ff5</td><td>Centre for Vision, Speech and Signal Processing, University of Surrey, UK</td><td></td></tr><tr><td>0cbe059c181278a373292a6af1667c54911e7925</td><td>Chalmers University of Technology, SAFER</td><td></td></tr><tr><td>5a86842ab586de9d62d5badb2ad8f4f01eada885</td><td>Chandigarh Engg. College, Mohali, Punjab, India</td><td>Department . of CSE</td></tr><tr><td>94b9c0a6515913bad345f0940ee233cdf82fffe1</td><td>Chandigarh University, Gharuan, Punjab, India</td><td>Department of Computer Science Engineering</td></tr><tr><td>2679e4f84c5e773cae31cef158eb358af475e22f</td><td>Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science</td><td></td></tr><tr><td>60970e124aa5fb964c9a2a5d48cd6eee769c73ef</td><td>Charles Sturt University</td><td></td></tr><tr><td>2b4d092d70efc13790d0c737c916b89952d4d8c7</td><td>Charotar University of Science and Technology, Changa, India</td><td></td></tr><tr><td>fd96432675911a702b8a4ce857b7c8619498bf9f</td><td>China Mobile Research Institute, Xuanwu Men West Street, Beijing</td><td></td></tr><tr><td>b191aa2c5b8ece06c221c3a4a0914e8157a16129</td><td>China University of Mining and Technol</td><td></td></tr><tr><td>df2494da8efa44d70c27abf23f73387318cf1ca8</td><td>China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of</td><td></td></tr><tr><td>bbcb4920b312da201bf4d2359383fb4ee3b17ed9</td><td>China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology</td><td></td></tr><tr><td>c089c7d8d1413b54f59fc410d88e215902e51638</td><td>China-Singapore Institute of Digital Media, Singapore</td><td></td></tr><tr><td>f3a59d85b7458394e3c043d8277aa1ffe3cdac91</td><td>Chinese University of Hong Kong</td><td></td></tr><tr><td>f3a59d85b7458394e3c043d8277aa1ffe3cdac91</td><td>Chinese University of Hong Kong</td><td></td></tr><tr><td>eed93d2e16b55142b3260d268c9e72099c53d5bc</td><td>Chittagong University of Engineering and Technology</td><td></td></tr><tr><td>89e7d23e0c6a1d636f2da68aaef58efee36b718b</td><td>Chonbuk National University, Jeonju 561-756, Korea</td><td></td></tr><tr><td>29fc4de6b680733e9447240b42db13d5832e408f</td><td>Chonbuk National University, Jeonju-si</td><td>Department of Computer Engineering</td></tr><tr><td>492f41e800c52614c5519f830e72561db205e86c</td><td>Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences</td><td></td></tr><tr><td>7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4</td><td>Chosun University</td><td></td></tr><tr><td>677ebde61ba3936b805357e27fce06c44513a455</td><td>Chu Hai College of Higher Education, Hong Kong</td><td>Department of Computer Science</td></tr><tr><td>b503f481120e69b62e076dcccf334ee50559451e</td><td>Chu Hai College of Higher Education, Hong Kong</td><td>Department of Computer Science</td></tr><tr><td>5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c</td><td>Chubu University</td><td></td></tr><tr><td>62f0d8446adee6a5e8102053a63a61af07ac4098</td><td>Chubu University</td><td></td></tr><tr><td>47fdbd64edd7d348713253cf362a9c21f98e4296</td><td>Chubu University</td><td></td></tr><tr><td>009cd18ff06ff91c8c9a08a91d2516b264eee48e</td><td>Chulalongkorn University, Bangkok</td><td>Department of Electrical Engineering</td></tr><tr><td>17cf6195fd2dfa42670dc7ada476e67b381b8f69</td><td>Chung-Ang University, Seoul, Korea</td><td></td></tr><tr><td>c590c6c171392e9f66aab1bce337470c43b48f39</td><td>Chungnam National University</td><td>Department of Psychology/Brain Research Institute</td></tr><tr><td>fc20149dfdff5fdf020647b57e8a09c06e11434b</td><td>City University of Hong Kong</td><td></td></tr><tr><td>dcc38db6c885444694f515d683bbb50521ff3990</td><td>City University of Hong Kong</td><td></td></tr><tr><td>2af2b74c3462ccff3a6881ff7cf4f321b3242fa9</td><td>City University of Hong Kong, Hong Kong, China</td><td>Department of Computer Science</td></tr><tr><td>ffaad0204f4af763e3390a2f6053c0e9875376be</td><td>City University of Hong Kong, Kowloon 999077, Hong Kong, China</td><td>Department of Electronic Engineering</td></tr><tr><td>5f453a35d312debfc993d687fd0b7c36c1704b16</td><td>Clemson University</td><td></td></tr><tr><td>ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9</td><td>Clemson University</td><td></td></tr><tr><td>367a786cfe930455cd3f6bd2492c304d38f6f488</td><td>Clemson University</td><td></td></tr><tr><td>7fa2605676c589a7d1a90d759f8d7832940118b5</td><td>Clemson University, Clemson, SC</td><td></td></tr><tr><td>1b70bbf7cdfc692873ce98dd3c0e191580a1b041</td><td>Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India</td><td></td></tr><tr><td>c7f0c0636d27a1d45b8fcef37e545b902195d937</td><td>Coburg University</td><td></td></tr><tr><td>c7f0c0636d27a1d45b8fcef37e545b902195d937</td><td>Coburg University</td><td></td></tr><tr><td>26d407b911d1234e8e3601e586b49316f0818c95</td><td>Coburg University</td><td></td></tr><tr><td>beb4546ae95f79235c5f3c0e9cc301b5d6fc9374</td><td>Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht</td><td></td></tr><tr><td>b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807</td><td>Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of</td><td></td></tr><tr><td>2dfe0e7e81f65716b09c590652a4dd8452c10294</td><td>Cognitive Neuroscience Laboratory, Centre of Biology and Health Sciences, Mackenzie Presbyterian University, S o Paulo</td><td></td></tr><tr><td>751970d4fb6f61d1b94ca82682984fd03c74f127</td><td>Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany</td><td></td></tr><tr><td>7f2a4cd506fe84dee26c0fb41848cb219305173f</td><td>Collage of Sciences, Baghdad University, Iraq</td><td>Department Of Computer Science</td></tr><tr><td>ab427f0c7d4b0eb22c045392107509451165b2ba</td><td>College Heights Blvd, Bowling Green, KY</td><td></td></tr><tr><td>053931267af79a89791479b18d1b9cde3edcb415</td><td>College Park, MD</td><td></td></tr><tr><td>9cd6a81a519545bf8aa9023f6e879521f85d4cd1</td><td>College Park, MD</td><td></td></tr><tr><td>b5f4e617ac3fc4700ec8129fcd0dcf5f71722923</td><td>College Park, MD</td><td></td></tr><tr><td>b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8</td><td>College Park, MD</td><td></td></tr><tr><td>bbc4b376ebd296fb9848b857527a72c82828fc52</td><td>College Park, MD</td><td></td></tr><tr><td>297d3df0cf84d24f7efea44f87c090c7d9be4bed</td><td>College Park, MD</td><td></td></tr><tr><td>7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d</td><td>College Park, MD</td><td></td></tr><tr><td>970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3</td><td>College Park, MD</td><td></td></tr><tr><td>b13a882e6168afc4058fe14cc075c7e41434f43e</td><td>College Park, MD</td><td></td></tr><tr><td>ceeb67bf53ffab1395c36f1141b516f893bada27</td><td>College Park, MD</td><td></td></tr><tr><td>ceeb67bf53ffab1395c36f1141b516f893bada27</td><td>College Park, MD</td><td></td></tr><tr><td>ceeb67bf53ffab1395c36f1141b516f893bada27</td><td>College Park, MD</td><td></td></tr><tr><td>2ee817981e02c4709d65870c140665ed25b005cc</td><td>College Park, MD 20742 USA</td><td></td></tr><tr><td>38a9ca2c49a77b540be52377784b9f734e0417e4</td><td>College Park, MD, 20740, USA</td><td></td></tr><tr><td>f571fe3f753765cf695b75b1bd8bed37524a52d2</td><td>College Park, MD, USA</td><td></td></tr><tr><td>f571fe3f753765cf695b75b1bd8bed37524a52d2</td><td>College Park, MD, USA</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>College Park, Maryland</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>College Park, Maryland</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>College Park, Maryland</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>College Park, Maryland</td><td></td></tr><tr><td>29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea</td><td>College Park, USA</td><td></td></tr><tr><td>0058cbe110933f73c21fa6cc9ae0cd23e974a9c7</td><td>College Park, USA</td><td></td></tr><tr><td>4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a</td><td>College Park, USA</td><td></td></tr><tr><td>794c0dc199f0bf778e2d40ce8e1969d4069ffa7b</td><td>College Park, United States</td><td></td></tr><tr><td>4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99</td><td>College Road East, Princeton, NJ</td><td></td></tr><tr><td>2251a88fbccb0228d6d846b60ac3eeabe468e0f1</td><td>College Road East, Princeton, NJ</td><td></td></tr><tr><td>cfa931e6728a825caada65624ea22b840077f023</td><td>College of Automation, Harbin Engineering University, Heilongjiang, China</td><td></td></tr><tr><td>656531036cee6b2c2c71954bb6540ef6b2e016d0</td><td>College of Computer Science</td><td></td></tr><tr><td>b73d9e1af36aabb81353f29c40ecdcbdf731dbed</td><td>College of Computer Science and Information Sciences</td><td>Department of Software Engineering</td></tr><tr><td>1a41e5d93f1ef5b23b95b7163f5f9aedbe661394</td><td>College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China</td><td></td></tr><tr><td>df2494da8efa44d70c27abf23f73387318cf1ca8</td><td>College of Computer Science and Information Technology, Northeast Normal University, Changchun</td><td></td></tr><tr><td>50e45e9c55c9e79aaae43aff7d9e2f079a2d787b</td><td>College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China</td><td></td></tr><tr><td>bd6099429bb7bf248b1fd6a1739e744512660d55</td><td>College of Computer Science and Technology</td><td></td></tr><tr><td>aac39ca161dfc52aade063901f02f56d01a1693c</td><td>College of Computer Science and Technology</td><td></td></tr><tr><td>86b6de59f17187f6c238853810e01596d37f63cd</td><td>College of Computer Science and Technology, Chongqing</td><td></td></tr><tr><td>86b6de59f17187f6c238853810e01596d37f63cd</td><td>College of Computer Science and Technology, Chongqing</td><td></td></tr><tr><td>86b6de59f17187f6c238853810e01596d37f63cd</td><td>College of Computer Science and Technology, Chongqing</td><td></td></tr><tr><td>86b6de59f17187f6c238853810e01596d37f63cd</td><td>College of Computer Science and Technology, Chongqing</td><td></td></tr><tr><td>edbb8cce0b813d3291cae4088914ad3199736aa0</td><td>College of Computer Science and Technology, Zhejiang University, China</td><td></td></tr><tr><td>2d8001ffee6584b3f4d951d230dc00a06e8219f8</td><td>College of Computer Science and Technology, Zhejiang University, Hangzhou, China</td><td></td></tr><tr><td>0517d08da7550241fb2afb283fc05d37fce5d7b7</td><td>College of Computer Science, Chongqing University, Chongqing, 400030, China</td><td></td></tr><tr><td>a065080353d18809b2597246bb0b48316234c29a</td><td>College of Computer Science, Chongqing University, Chongqing, China</td><td></td></tr><tr><td>4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec</td><td>College of Computer Science, Sichuan University</td><td></td></tr><tr><td>0bf3513d18ec37efb1d2c7934a837dabafe9d091</td><td>College of Computer Science, Sichuan University, Chengdu 610065, P.R. China</td><td></td></tr><tr><td>5cbe1445d683d605b31377881ac8540e1d17adf0</td><td>College of Computer Science, Sichuan University, Chengdu, China</td><td></td></tr><tr><td>a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b</td><td>College of Computer Science, Sichuan University, Chengdu, China</td><td></td></tr><tr><td>2201f187a7483982c2e8e2585ad9907c5e66671d</td><td>College of Computer Science, Sichuan University, Chengdu, China</td><td></td></tr><tr><td>404042a1dcfde338cf24bc2742c57c0fb1f48359</td><td>College of Computer Science, Zhejiang University</td><td></td></tr><tr><td>9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd</td><td>College of Computer Science, Zhejiang University, Hangzhou 310027, China</td><td></td></tr><tr><td>9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd</td><td>College of Computer Science, Zhejiang University, Hangzhou 310027, China</td><td></td></tr><tr><td>7c36afc9828379de97f226e131390af719dbc18d</td><td>College of Computer Science, Zhejiang University, Hangzhou, China</td><td></td></tr><tr><td>d0d7671c816ed7f37b16be86fa792a1b29ddd79b</td><td>College of Computer Science, Zhejiang University, Zhejiang, China</td><td></td></tr><tr><td>d454ad60b061c1a1450810a0f335fafbfeceeccc</td><td>College of Computer and Control Engineering, Nankai University 4 Hikvision Research</td><td></td></tr><tr><td>5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b</td><td>College of Computer and Control Engineering, Nankai University 4: Hikvision Research</td><td></td></tr><tr><td>5db075a308350c083c3fa6722af4c9765c4b8fef</td><td>College of Computer and Information Engineering, Nanyang Institute of Technology</td><td></td></tr><tr><td>76ce3d35d9370f0e2e27cfd29ea0941f1462895f</td><td>College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China</td><td></td></tr><tr><td>f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e</td><td>College of Computer and Information Science</td><td></td></tr><tr><td>23aba7b878544004b5dfa64f649697d9f082b0cf</td><td>College of Computer and Information Science</td><td></td></tr><tr><td>07fa153b8e6196ee6ef6efd8b743de8485a07453</td><td>College of Computer and Information Science, Northeastern University, Boston, MA, USA</td><td></td></tr><tr><td>e3c8e49ffa7beceffca3f7f276c27ae6d29b35db</td><td>College of Computer and Information Science, Northeastern University, Boston, USA</td><td></td></tr><tr><td>0a9345ea6e488fb936e26a9ba70b0640d3730ba7</td><td>College of Computer and Information Science, Northeastern University, Boston, USA</td><td></td></tr><tr><td>090e4713bcccff52dcd0c01169591affd2af7e76</td><td>College of Computer and Information Science, Northeastern University, MA, USA</td><td></td></tr><tr><td>d22b378fb4ef241d8d210202893518d08e0bb213</td><td>College of Computer and Information Science, Northeastern University, MA, USA</td><td></td></tr><tr><td>0969e0dc05fca21ff572ada75cb4b703c8212e80</td><td>College of Computer and Information Science, Southwest University, Chongqing 400715, China</td><td></td></tr><tr><td>5aadd85e2a77e482d44ac2a215c1f21e4a30d91b</td><td>College of Computer and Information Sciences</td><td></td></tr><tr><td>feb6e267923868bff6e2108603d00fdfd65251ca</td><td>College of Computer and Information Sciences</td><td>Computer Science Department</td></tr><tr><td>feb6e267923868bff6e2108603d00fdfd65251ca</td><td>College of Computer and Information Sciences</td><td>Computer Engineering Department</td></tr><tr><td>feb6e267923868bff6e2108603d00fdfd65251ca</td><td>College of Computer and Information Sciences</td><td>Computer Engineering Department</td></tr><tr><td>81bfe562e42f2eab3ae117c46c2e07b3d142dade</td><td>College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA</td><td></td></tr><tr><td>69eb6c91788e7c359ddd3500d01fb73433ce2e65</td><td>College of Computing</td><td></td></tr><tr><td>93af36da08bf99e68c9b0d36e141ed8154455ac2</td><td>College of Computing</td><td></td></tr><tr><td>5fa04523ff13a82b8b6612250a39e1edb5066521</td><td>College of Computing</td><td></td></tr><tr><td>b33e8db8ccabdfc49211e46d78d09b14557d4cba</td><td>College of Computing, Georgia Institute of Technology</td><td></td></tr><tr><td>2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8</td><td>College of Computing, Georgia Institute of Technology, Atlanta, GA, USA</td><td></td></tr><tr><td>5b01d4338734aefb16ee82c4c59763d3abc008e6</td><td>College of Electrical and Information Engineering</td><td></td></tr><tr><td>d307a766cc9c728a24422313d4c3dcfdb0d16dd5</td><td>College of Electrical and Information Engineering, Hunan University, China</td><td></td></tr><tr><td>5ae970294aaba5e0225122552c019eb56f20af74</td><td>College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China</td><td></td></tr><tr><td>d31af74425719a3840b496b7932e0887b35e9e0d</td><td>College of Electronics and Information Engineering, Sejong University</td><td>Department of Software</td></tr><tr><td>5f676d6eca4c72d1a3f3acf5a4081c29140650fb</td><td>College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China</td><td></td></tr><tr><td>411318684bd2d42e4b663a37dcf0532a48f0146d</td><td>College of Electronics and Information Engineering, Tongji University</td><td></td></tr><tr><td>3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07</td><td>College of Electronics and Information, Northwestern Polytechnic University</td><td></td></tr><tr><td>accbd6cd5dd649137a7c57ad6ef99232759f7544</td><td>College of Electronics and Information, Northwestern Polytechnic University</td><td></td></tr><tr><td>bb451dc2420e1a090c4796c19716f93a9ef867c9</td><td>College of Engineering (Poly</td><td></td></tr><tr><td>bb451dc2420e1a090c4796c19716f93a9ef867c9</td><td>College of Engineering (Poly</td><td></td></tr><tr><td>13db9466d2ddf3c30b0fd66db8bfe6289e880802</td><td>College of Engineering Pune, India</td><td></td></tr><tr><td>a7191958e806fce2505a057196ccb01ea763b6ea</td><td>College of Engineering and Computer Science</td><td></td></tr><tr><td>d9810786fccee5f5affaef59bc58d2282718af9b</td><td>College of Engineering and Mineral Resources</td><td></td></tr><tr><td>3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3</td><td>College of Engineering, Mathematics and Physical Sciences</td><td></td></tr><tr><td>07fa153b8e6196ee6ef6efd8b743de8485a07453</td><td>College of Engineering, Northeastern University, Boston, MA, USA</td><td></td></tr><tr><td>cfd933f71f4a69625390819b7645598867900eab</td><td>College of Engineering, Pune, India</td><td>Department of Electronics and Telecommunication</td></tr><tr><td>a6b1d79bc334c74cde199e26a7ef4c189e9acd46</td><td>College of Engineering, Purdue University</td><td></td></tr><tr><td>512befa10b9b704c9368c2fbffe0dc3efb1ba1bf</td><td>College of Image Arts and Sciences</td><td></td></tr><tr><td>4698a599425c3a6bae1c698456029519f8f2befe</td><td>College of Informatics</td><td></td></tr><tr><td>4698a599425c3a6bae1c698456029519f8f2befe</td><td>College of Informatics</td><td></td></tr><tr><td>66dcd855a6772d2731b45cfdd75f084327b055c2</td><td>College of Information Engineering</td><td></td></tr><tr><td>0f395a49ff6cbc7e796656040dbf446a40e300aa</td><td>College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University</td><td></td></tr><tr><td>134f1cee8408cca648d8b4ca44b38b0a7023af71</td><td>College of Information Science and Electronic Engineering</td><td></td></tr><tr><td>1fe990ca6df273de10583860933d106298655ec8</td><td>College of Information Science and Engineering</td><td></td></tr><tr><td>b7426836ca364603ccab0e533891d8ac54cf2429</td><td>College of Information Science and Engineering, Ocean University of China, Qingdao, China</td><td></td></tr><tr><td>1a41e5d93f1ef5b23b95b7163f5f9aedbe661394</td><td>College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan</td><td></td></tr><tr><td>a29a22878e1881d6cbf6acff2d0b209c8d3f778b</td><td>College of Information Science and Engineering, Xinjiang University</td><td></td></tr><tr><td>571b83f7fc01163383e6ca6a9791aea79cafa7dd</td><td>College of Information Science and Technology</td><td></td></tr><tr><td>af278274e4bda66f38fd296cfa5c07804fbc26ee</td><td>College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi</td><td></td></tr><tr><td>8986585975c0090e9ad97bec2ba6c4b437419dae</td><td>College of Information and Computer Sciences, University of Massachusetts, Amherst</td><td></td></tr><tr><td>04f55f81bbd879773e2b8df9c6b7c1d324bc72d8</td><td>College of Information and Control Engineering in China University of Petroleum</td><td></td></tr><tr><td>19868a469dc25ee0db00947e06c804b88ea94fd0</td><td>College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China</td><td></td></tr><tr><td>b5930275813a7e7a1510035a58dd7ba7612943bc</td><td>College of Information and Electrical Engineering</td><td></td></tr><tr><td>86d0127e1fd04c3d8ea78401c838af621647dc95</td><td>College of Information and Engineering, Hunan University, Changsha, China</td><td></td></tr><tr><td>74eae724ef197f2822fb7f3029c63014625ce1ca</td><td>College of Information, Yunnan Normal University, Kunming, China</td><td></td></tr><tr><td>a32d4195f7752a715469ad99cb1e6ebc1a099de6</td><td>College of Mechatronic Engineering and Automation, National University of Defense Technology</td><td>Department of Automatic Control</td></tr><tr><td>a065080353d18809b2597246bb0b48316234c29a</td><td>College of Medical Informatics, Chongqing Medical University, Chongqing, China</td><td></td></tr><tr><td>b4362cd87ad219790800127ddd366cc465606a78</td><td>College of Medicine, Seoul National University</td><td>Department of Biomedical Engineering</td></tr><tr><td>50eb2ee977f0f53ab4b39edc4be6b760a2b05f96</td><td>College of Science, Baghdad University, Baghdad, Iraq</td><td>Computer Science Department</td></tr><tr><td>b73d9e1af36aabb81353f29c40ecdcbdf731dbed</td><td>College of Science, Menou a University, Menou a 32721, Egypt</td><td>Department of Computer Science</td></tr><tr><td>3f540faf85e1f8de6ce04fb37e556700b67e4ad3</td><td>College of Sciences, Northeastern University, Shenyang 110819, China</td><td></td></tr><tr><td>c207fd762728f3da4cddcfcf8bf19669809ab284</td><td>College of Software Engineering, Southeast University, Nanjing 210096, China</td><td></td></tr><tr><td>e065a2cb4534492ccf46d0afc81b9ad8b420c5ec</td><td>College of Software, Beihang University</td><td></td></tr><tr><td>0517d08da7550241fb2afb283fc05d37fce5d7b7</td><td>College of software, Chongqing University of Posts and Telecommunications Chongqing</td><td></td></tr><tr><td>72bf9c5787d7ff56a1697a3389f11d14654b4fcf</td><td>CollegePark, MD</td><td></td></tr><tr><td>dbd5e9691cab2c515b50dda3d0832bea6eef79f2</td><td>CollegePark, MD</td><td></td></tr><tr><td>a481e394f58f2d6e998aa320dad35c0d0e15d43c</td><td>Colorado State University</td><td></td></tr><tr><td>ae5bb02599244d6d88c4fe466a7fdd80aeb91af4</td><td>Colorado State University</td><td></td></tr><tr><td>ae5bb02599244d6d88c4fe466a7fdd80aeb91af4</td><td>Colorado State University</td><td></td></tr><tr><td>38a2661b6b995a3c4d69e7d5160b7596f89ce0e6</td><td>Colorado State University</td><td></td></tr><tr><td>d26b443f87df76034ff0fa9c5de9779152753f0c</td><td>Colorado State University</td><td></td></tr><tr><td>120bcc9879d953de7b2ecfbcd301f72f3a96fb87</td><td>Colorado State University</td><td></td></tr><tr><td>3294e27356c3b1063595885a6d731d625b15505a</td><td>Colorado State University, Fort Collins, CO 80523, USA</td><td>Departments of Computer Science2 and Mathematics1</td></tr><tr><td>7d306512b545df98243f87cb8173df83b4672b18</td><td>Colorado State University, Fort Collins, Colorado, USA</td><td></td></tr><tr><td>f0ca31fd5cad07e84b47d50dc07db9fc53482a46</td><td>Colorado State University, Fort Collins, USA</td><td>Department of Mathematics</td></tr><tr><td>85fd2bda5eb3afe68a5a78c30297064aec1361f6</td><td>Columbia Business School, University of California, San Diego</td><td>Columbia University; 2Psychology Department</td></tr><tr><td>61f93ed515b3bfac822deed348d9e21d5dffe373</td><td>Columbia University</td><td>Department of Computer Science</td></tr><tr><td>61f93ed515b3bfac822deed348d9e21d5dffe373</td><td>Columbia University</td><td>Department of Electrical Engineering</td></tr><tr><td>03c48d8376990cff9f541d542ef834728a2fcda2</td><td>Columbia University</td><td></td></tr><tr><td>35f03f5cbcc21a9c36c84e858eeb15c5d6722309</td><td>Columbia University</td><td></td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>Columbia University</td><td></td></tr><tr><td>0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d</td><td>Columbia University</td><td>EE Department</td></tr><tr><td>33030c23f6e25e30b140615bb190d5e1632c3d3b</td><td>Columbia University</td><td></td></tr><tr><td>bbfe0527e277e0213aafe068113d719b2e62b09c</td><td>Columbia University</td><td></td></tr><tr><td>df0e280cae018cebd5b16ad701ad101265c369fa</td><td>Columbia University</td><td></td></tr><tr><td>4b507a161af8a7dd41e909798b9230f4ac779315</td><td>Columbia University</td><td></td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>Columbia University</td><td></td></tr><tr><td>217de4ff802d4904d3f90d2e24a29371307942fe</td><td>Columbia University</td><td></td></tr><tr><td>217de4ff802d4904d3f90d2e24a29371307942fe</td><td>Columbia University</td><td></td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>Columbia University</td><td></td></tr><tr><td>2a88541448be2eb1b953ac2c0c54da240b47dd8a</td><td>Columbia University</td><td></td></tr><tr><td>5e16f10f2d667d17c029622b9278b6b0a206d394</td><td>Columbia University</td><td>Department of Computer Science</td></tr><tr><td>5e16f10f2d667d17c029622b9278b6b0a206d394</td><td>Columbia University</td><td>Department of Electrical Engineering</td></tr><tr><td>08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d</td><td>Columbia University</td><td></td></tr><tr><td>0f829fee12e86f980a581480a9e0cefccb59e2c5</td><td>Columbia University</td><td></td></tr><tr><td>0a82860d11fcbf12628724333f1e7ada8f3cd255</td><td>Columbia University</td><td></td></tr><tr><td>b13bf657ca6d34d0df90e7ae739c94a7efc30dc3</td><td>Columbia University</td><td></td></tr><tr><td>b13bf657ca6d34d0df90e7ae739c94a7efc30dc3</td><td>Columbia University</td><td></td></tr><tr><td>b13bf657ca6d34d0df90e7ae739c94a7efc30dc3</td><td>Columbia University</td><td></td></tr><tr><td>b13bf657ca6d34d0df90e7ae739c94a7efc30dc3</td><td>Columbia University</td><td></td></tr><tr><td>ddaa8add8528857712424fd57179e5db6885df7c</td><td>Columbia University</td><td></td></tr><tr><td>c41de506423e301ef2a10ea6f984e9e19ba091b4</td><td>Columbia University</td><td></td></tr><tr><td>1cee993dc42626caf5dbc26c0a7790ca6571d01a</td><td>Columbia University</td><td>Department of Computer Science</td></tr><tr><td>40217a8c60e0a7d1735d4f631171aa6ed146e719</td><td>Columbia University</td><td></td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>Columbia University</td><td></td></tr><tr><td>47382cb7f501188a81bb2e10cfd7aed20285f376</td><td>Columbia University in the City of New York</td><td></td></tr><tr><td>3240c9359061edf7a06bfeb7cc20c103a65904c2</td><td>Columbia University, National University of Singapore</td><td></td></tr><tr><td>be86d88ecb4192eaf512f29c461e684eb6c35257</td><td>Columbia University, New York NY 10027, USA</td><td></td></tr><tr><td>4f0d9200647042e41dea71c35eb59e598e6018a7</td><td>Columbia University, New York, NY</td><td></td></tr><tr><td>14fb3283d4e37760b7dc044a1e2906e3cbf4d23a</td><td>Columbia University, New York, NY</td><td></td></tr><tr><td>780557daaa39a445b24c41f637d5fc9b216a0621</td><td>Columbia University, New York, NY 10027, USA</td><td>Department of Electrical Engineering</td></tr><tr><td>a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f</td><td>Columbia University, New York, NY, USA</td><td></td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>Columbia University, USA</td><td></td></tr><tr><td>7e18b5f5b678aebc8df6246716bf63ea5d8d714e</td><td>Columbia University, United States</td><td></td></tr><tr><td>97f9c3bdb4668f3e140ded2da33fe704fc81f3ea</td><td>ColumbiaUniversity, NY, USA</td><td>Department ofComputerScience</td></tr><tr><td>66aad5b42b7dda077a492e5b2c7837a2a808c2fa</td><td>Compi`egne University of Technology</td><td></td></tr><tr><td>a611c978e05d7feab01fb8a37737996ad6e88bd9</td><td>Computational Biomedicine Lab, University of Houston, TX, USA</td><td></td></tr><tr><td>e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef</td><td>Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA</td><td>Department of Computer Science</td></tr><tr><td>e30dc2abac4ecc48aa51863858f6f60c7afdf82a</td><td>Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas</td><td></td></tr><tr><td>6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0</td><td>Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey</td><td></td></tr><tr><td>d687fa99586a9ad229284229f20a157ba2d41aea</td><td>Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India</td><td></td></tr><tr><td>3ca5d3b8f5f071148cb50f22955fd8c1c1992719</td><td>Computer Engineering and Computer Science, Duthie Center for Engineering, University of</td><td></td></tr><tr><td>ad247138e751cefa3bb891c2fe69805da9c293d7</td><td>Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran</td><td></td></tr><tr><td>3a0ea368d7606030a94eb5527a12e6789f727994</td><td>Computer Graphics Research Group, University of Freiburg, Freiburg, Germany</td><td></td></tr><tr><td>1d1a7ef193b958f9074f4f236060a5f5e7642fc1</td><td>Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA</td><td></td></tr><tr><td>2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40</td><td>Computer Laboratory, University of Cambridge, Cambridge, UK</td><td></td></tr><tr><td>511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7</td><td>Computer School, University of South China, Hengyang, China</td><td></td></tr><tr><td>d5d7e89e6210fcbaa52dc277c1e307632cd91dab</td><td>Computer Science Depart., Cornell University, USA</td><td></td></tr><tr><td>d5d7e89e6210fcbaa52dc277c1e307632cd91dab</td><td>Computer Science Depart., Rochester University, USA</td><td></td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>Computer Science Division, The Open University of Israel</td><td></td></tr><tr><td>5bde1718253ec28a753a892b0ba82d8e553b6bf3</td><td>Computer Science Division, The Open University of Israel</td><td></td></tr><tr><td>7fc3442c8b4c96300ad3e860ee0310edb086de94</td><td>Computer Science Division, The Open University of Israel, Israel</td><td></td></tr><tr><td>12ebeb2176a5043ad57bc5f3218e48a96254e3e9</td><td>Computer Science North South University, Dhaka</td><td></td></tr><tr><td>12ebeb2176a5043ad57bc5f3218e48a96254e3e9</td><td>Computer Science North South University, Dhaka</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA</td><td>Department of EECS</td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA</td><td></td></tr><tr><td>55bc7abcef8266d76667896bbc652d081d00f797</td><td>Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA</td><td></td></tr><tr><td>a25106a76af723ba9b09308a7dcf4f76d9283589</td><td>Computer Science and Engineering, Anna University, India</td><td></td></tr><tr><td>a25106a76af723ba9b09308a7dcf4f76d9283589</td><td>Computer Science and Engineering, Easwari Engineering College, India</td><td></td></tr><tr><td>e22adcd2a6a7544f017ec875ce8f89d5c59e09c8</td><td>Computer Science and Engineering, Michigan State University, East Lansing, USA</td><td></td></tr><tr><td>55bc7abcef8266d76667896bbc652d081d00f797</td><td>Computer Science and Engineering, Michigan State University, East Lansing, USA</td><td></td></tr><tr><td>371f40f6d32ece05cc879b6954db408b3d4edaf3</td><td>Computer Science and Engineering, University of Michigan, Ann Arbor</td><td></td></tr><tr><td>f3ca2c43e8773b7062a8606286529c5bc9b3ce25</td><td>Computer Science and Engineering, University of Texas at Arlington, USA</td><td></td></tr><tr><td>345bea5f7d42926f857f395c371118a00382447f</td><td>Computer Science and Engineering, University of Washington</td><td></td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>Computer Science and Engineering, University of Washington, Seattle, WA</td><td></td></tr><tr><td>02239ae5e922075a354169f75f684cad8fdfd5ab</td><td>Computer Science and Engineering, University of Washington, Seattle, WA</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Computer Science and Engineering, University of Washington, Seattle, WA, USA</td><td></td></tr><tr><td>eb8519cec0d7a781923f68fdca0891713cb81163</td><td>Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada</td><td></td></tr><tr><td>17670b60dcfb5cbf8fdae0b266e18cf995f6014c</td><td>Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada</td><td></td></tr><tr><td>210b98394c3be96e7fd75d3eb11a391da1b3a6ca</td><td>Computer Science and Software Engineering, The University of Western Australia</td><td></td></tr><tr><td>ebb1c29145d31c4afa3c9be7f023155832776cd3</td><td>Computer Science and Technology, Tsinghua University, Beijing, China</td><td></td></tr><tr><td>fd96432675911a702b8a4ce857b7c8619498bf9f</td><td>Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Computer Science, Brown University, Providence, RI, USA</td><td></td></tr><tr><td>0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf</td><td>Computer Science, Engineering and Mathematics School, Flinders University, Australia</td><td></td></tr><tr><td>0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf</td><td>Computer Science, Engineering and Mathematics School, Flinders University, Australia</td><td></td></tr><tr><td>82c303cf4852ad18116a2eea31e2291325bc19c3</td><td>Computer Science, Engineering and Mathematics School, Flinders University, Australia</td><td></td></tr><tr><td>124538b3db791e30e1b62f81d4101be435ee12ef</td><td>Computer Science, Princeton University, Princeton, NJ, USA</td><td></td></tr><tr><td>6ef1996563835b4dfb7fda1d14abe01c8bd24a05</td><td>Computer Vision Group, Friedrich Schiller University Jena</td><td></td></tr><tr><td>0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f</td><td>Computer Vision Group, Friedrich Schiller University Jena, Germany</td><td></td></tr><tr><td>a949b8700ca6ba96ee40f75dfee1410c5bbdb3db</td><td>Computer Vision Group, Friedrich Schiller University of Jena, Germany</td><td></td></tr><tr><td>c2e6daebb95c9dfc741af67464c98f1039127627</td><td>Computer Vision Group, Friedrich Schiller University of Jena, Germany</td><td></td></tr><tr><td>0435a34e93b8dda459de49b499dd71dbb478dc18</td><td>Computer Vision Group, L. D. College of Engineering, Ahmedabad, India</td><td></td></tr><tr><td>7cee802e083c5e1731ee50e731f23c9b12da7d36</td><td>Computer Vision Group, L. D. College of Engineering, Ahmedabad, India</td><td></td></tr><tr><td>faca1c97ac2df9d972c0766a296efcf101aaf969</td><td>Computer Vision Group, Xerox Research Center Europe, Meylan, France</td><td></td></tr><tr><td>0d0b880e2b531c45ee8227166a489bf35a528cb9</td><td>Computer Vision Lab, Delft University of Technology</td><td></td></tr><tr><td>ea46951b070f37ad95ea4ed08c7c2a71be2daedc</td><td>Computer Vision Lab, Delft University of Technology, Netherlands</td><td></td></tr><tr><td>8d646ac6e5473398d668c1e35e3daa964d9eb0f6</td><td>Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden</td><td>Department of Electrical Engineering</td></tr><tr><td>264a84f4d27cd4bca94270620907cffcb889075c</td><td>Computer Vision Laboratory, Link oping University, Sweden</td><td>Department of Electrical Engineering</td></tr><tr><td>4cd0da974af9356027a31b8485a34a24b57b8b90</td><td>Computer Vision Laboratory, The University of Nottingham</td><td></td></tr><tr><td>02e628e99f9a1b295458cb453c09863ea1641b67</td><td>Computer Vision Laboratory, University of Nottingham, Nottingham, UK</td><td></td></tr><tr><td>056ba488898a1a1b32daec7a45e0d550e0c51ae4</td><td>Computer Vision Laboratory. University of Nottingham</td><td></td></tr><tr><td>c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f</td><td>Computer Vision Research Group, COMSATS Institute of Information</td><td></td></tr><tr><td>acee2201f8a15990551804dd382b86973eb7c0a8</td><td>Computer Vision and Robotics Research Laboratory</td><td></td></tr><tr><td>19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54</td><td>Computer Vision and Robotics Research Laboratory</td><td></td></tr><tr><td>29b86534d4b334b670914038c801987e18eb5532</td><td>Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany</td><td></td></tr><tr><td>5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49</td><td>Computer and Systems Engineering, Rensselaer Polytechnic Institute</td><td>Department of Electrical</td></tr><tr><td>a255a54b8758050ea1632bf5a88a201cd72656e1</td><td>Computer and Vision Research Center</td><td></td></tr><tr><td>0b02bfa5f3a238716a83aebceb0e75d22c549975</td><td>Computer vision and Remote Sensing, Berlin university of Technology</td><td></td></tr><tr><td>301b0da87027d6472b98361729faecf6e1d5e5f6</td><td>Computer vision and Remote Sensing, Berlin university of Technology</td><td></td></tr><tr><td>ec05078be14a11157ac0e1c6b430ac886124589b</td><td>Concordia University</td><td></td></tr><tr><td>ec05078be14a11157ac0e1c6b430ac886124589b</td><td>Concordia University</td><td></td></tr><tr><td>ec05078be14a11157ac0e1c6b430ac886124589b</td><td>Concordia University</td><td></td></tr><tr><td>41971dfbf404abeb8cf73fea29dc37b9aae12439</td><td>Concordia University</td><td></td></tr><tr><td>6409b8879c7e61acf3ca17bcc62f49edca627d4c</td><td>Concordia University, Canada</td><td></td></tr><tr><td>6409b8879c7e61acf3ca17bcc62f49edca627d4c</td><td>Concordia University, Canada</td><td></td></tr><tr><td>c418a3441f992fea523926f837f4bfb742548c16</td><td>Concordia University, Canada</td><td>Department of Computer Science and Software Engineering</td></tr><tr><td>266ed43dcea2e7db9f968b164ca08897539ca8dd</td><td>Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada</td><td></td></tr><tr><td>6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d</td><td>Concordia University, Montreal, Quebec, Canada</td><td>Department of Computer Science and Software Engineering</td></tr><tr><td>7f59657c883f77dc26393c2f9ed3d19bdf51137b</td><td>Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university</td><td></td></tr><tr><td>fd33df02f970055d74fbe69b05d1a7a1b9b2219b</td><td>Cooperative Medianet Innovation Center (CMIC), Shanghai Jiao Tong University, China</td><td></td></tr><tr><td>e90e12e77cab78ba8f8f657db2bf4ae3dabd5166</td><td>Cooperative Medianet Innovation Center, Shanghai Jiao Tong University</td><td></td></tr><tr><td>4a14a321a9b5101b14ed5ad6aa7636e757909a7c</td><td>Cooperative Medianet Innovation Center, Shanghai Jiaotong University</td><td></td></tr><tr><td>713594c18978b965be87651bb553c28f8501df0a</td><td>Cooperative Medianet Innovation Center, Shanghai Jiaotong University</td><td></td></tr><tr><td>126535430845361cd7a3a6f317797fe6e53f5a3b</td><td>Coordinated Science Lab, University of Illinois at Urbana-Champaign</td><td></td></tr><tr><td>bcf19b964e7d1134d00332cf1acf1ee6184aff00</td><td>Copyright c(cid:2) 2017 The Institute of Electronics, Information and Communication Engineers</td><td></td></tr><tr><td>b216040f110d2549f61e3f5a7261cab128cab361</td><td>Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers</td><td></td></tr><tr><td>04317e63c08e7888cef480fe79f12d3c255c5b00</td><td>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</td><td></td></tr><tr><td>aba770a7c45e82b2f9de6ea2a12738722566a149</td><td>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</td><td></td></tr><tr><td>c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290</td><td>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</td><td></td></tr><tr><td>38215c283ce4bf2c8edd597ab21410f99dc9b094</td><td>Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other</td><td></td></tr><tr><td>32d8e555441c47fc27249940991f80502cb70bd5</td><td>Cornell University</td><td></td></tr><tr><td>5aad56cfa2bac5d6635df4184047e809f8fecca2</td><td>Cornell University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>053b263b4a4ccc6f9097ad28ebf39c2957254dfb</td><td>Cornell University</td><td></td></tr><tr><td>053b263b4a4ccc6f9097ad28ebf39c2957254dfb</td><td>Cornell University</td><td></td></tr><tr><td>28d06fd508d6f14cd15f251518b36da17909b79e</td><td>Cornell University</td><td></td></tr><tr><td>8a8861ad6caedc3993e31d46e7de6c251a8cda22</td><td>Cornell University</td><td></td></tr><tr><td>192235f5a9e4c9d6a28ec0d333e36f294b32f764</td><td>Cornell University</td><td></td></tr><tr><td>192235f5a9e4c9d6a28ec0d333e36f294b32f764</td><td>Cornell University</td><td></td></tr><tr><td>9fc04a13eef99851136eadff52e98eb9caac919d</td><td>Cornell University</td><td></td></tr><tr><td>9fc04a13eef99851136eadff52e98eb9caac919d</td><td>Cornell University</td><td></td></tr><tr><td>6577c76395896dd4d352f7b1ee8b705b1a45fa90</td><td>Cornell University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>6577c76395896dd4d352f7b1ee8b705b1a45fa90</td><td>Cornell University</td><td>Department of Computer Science</td></tr><tr><td>3026722b4cbe9223eda6ff2822140172e44ed4b1</td><td>Cornell University</td><td></td></tr><tr><td>6c5fbf156ef9fc782be0089309074cc52617b868</td><td>Cornell University</td><td>Department of Computer Science and Cornell Tech</td></tr><tr><td>ce9e1dfa7705623bb67df3a91052062a0a0ca456</td><td>Cornell University</td><td></td></tr><tr><td>240eb0b34872c431ecf9df504671281f59e7da37</td><td>Cornell University</td><td></td></tr><tr><td>8bdf6f03bde08c424c214188b35be8b2dec7cdea</td><td>Cornell University</td><td></td></tr><tr><td>37278ffce3a0fe2c2bbf6232e805dd3f5267eba3</td><td>Cornell University 2 Cornell Tech</td><td>Department of Computer Science</td></tr><tr><td>e5799fd239531644ad9270f49a3961d7540ce358</td><td>Cornell University 2Eastman Kodak Company</td><td>Department of Elec. and Computer Eng.</td></tr><tr><td>8cb6daba2cb1e208e809633133adfee0183b8dd2</td><td>Cornell University and Stanford University</td><td></td></tr><tr><td>09f58353e48780c707cf24a0074e4d353da18934</td><td>Cornell University, Ithaca, NY, U.S.A</td><td></td></tr><tr><td>b185f0a39384ceb3c4923196aeed6d68830a069f</td><td>Cornell University, Ithaca, New York</td><td></td></tr><tr><td>345cc31c85e19cea9f8b8521be6a37937efd41c2</td><td>Cornell University, Washington University in St. Louis</td><td></td></tr><tr><td>93747de3d40376761d1ef83ffa72ec38cd385833</td><td>Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein</td><td>Department of Psychology</td></tr><tr><td>1442319de86d171ce9595b20866ec865003e66fc</td><td>Country University, San Sebastian, Spain</td><td></td></tr><tr><td>014143aa16604ec3f334c1407ceaa496d2ed726e</td><td>Courant Institute</td><td></td></tr><tr><td>55138c2b127ebdcc508503112bf1d1eeb5395604</td><td>Courant Institute and Google Research</td><td></td></tr><tr><td>55138c2b127ebdcc508503112bf1d1eeb5395604</td><td>Courant Institute of Mathematical Sciences</td><td></td></tr><tr><td>05d80c59c6fcc4652cfc38ed63d4c13e2211d944</td><td>Courant Institute of Mathematical Sciences and Google Research, New York, NY</td><td></td></tr><tr><td>05d80c59c6fcc4652cfc38ed63d4c13e2211d944</td><td>Courant Institute of Mathematical Sciences, New York, NY</td><td></td></tr><tr><td>07e639abf1621ceff27c9e3f548fadfa2052c912</td><td>Current Address: Research Institute of Child Development and Education, University of Amsterdam</td><td></td></tr><tr><td>3df7401906ae315e6aef3b4f13126de64b894a54</td><td>Curtin University of Technology</td><td>Department of Computing</td></tr><tr><td>1048c753e9488daa2441c50577fe5fdba5aa5d7c</td><td>Curtin University of Technology</td><td>Department of Computing</td></tr><tr><td>b88ceded6467e9b286f048bb1b17be5998a077bd</td><td>Curtin University, Perth, Australia</td><td></td></tr><tr><td>e9a5a38e7da3f0aa5d21499149536199f2e0e1f7</td><td>Curtin University, Perth, WA 6102, Australia</td><td>Department of Computing</td></tr><tr><td>3cc46bf79fb9225cf308815c7d41c8dd5625cc29</td><td>Cyprus University of Technology</td><td></td></tr><tr><td>9d3aa3b7d392fad596b067b13b9e42443bbc377c</td><td>Cyprus University of Technology</td><td>Department of Multimedia and Graphic Arts</td></tr><tr><td>70db3a0d2ca8a797153cc68506b8650908cb0ada</td><td>Cyprus University of Technology, Cyprus</td><td></td></tr><tr><td>1565721ebdbd2518224f54388ed4f6b21ebd26f3</td><td>Czech Technical University</td><td></td></tr><tr><td>276dbb667a66c23545534caa80be483222db7769</td><td>D Research Center, Kwangwoon University and Springer</td><td></td></tr><tr><td>88850b73449973a34fefe491f8836293fc208580</td><td>D.J. Sanghvi College of Engineering</td><td></td></tr><tr><td>88850b73449973a34fefe491f8836293fc208580</td><td>D.J. Sanghvi College of Engineering</td><td></td></tr><tr><td>88850b73449973a34fefe491f8836293fc208580</td><td>D.J. Sanghvi College of Engineering</td><td></td></tr><tr><td>88850b73449973a34fefe491f8836293fc208580</td><td>D.J. Sanghvi College of Engineering</td><td></td></tr><tr><td>9d757c0fede931b1c6ac344f67767533043cba14</td><td>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</td><td></td></tr><tr><td>9d757c0fede931b1c6ac344f67767533043cba14</td><td>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</td><td></td></tr><tr><td>c81ee278d27423fd16c1a114dcae486687ee27ff</td><td>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune</td><td></td></tr><tr><td>c81ee278d27423fd16c1a114dcae486687ee27ff</td><td>D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University</td><td></td></tr><tr><td>d5d7e89e6210fcbaa52dc277c1e307632cd91dab</td><td>DAIS, University of Venice, Italy</td><td></td></tr><tr><td>2ee817981e02c4709d65870c140665ed25b005cc</td><td>DAP - University of Sassari</td><td></td></tr><tr><td>568cff415e7e1bebd4769c4a628b90db293c1717</td><td>DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA</td><td></td></tr><tr><td>779ad364cae60ca57af593c83851360c0f52c7bf</td><td>DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco</td><td></td></tr><tr><td>aadf4b077880ae5eee5dd298ab9e79a1b0114555</td><td>DICGIM - University of Palermo</td><td></td></tr><tr><td>2b84630680e2c906f8d7ac528e2eb32c99ef203a</td><td>DIEI, University of Perugia, Italy</td><td></td></tr><tr><td>43bb20ccfda7b111850743a80a5929792cb031f0</td><td>DISI - University of Trento</td><td></td></tr><tr><td>2b84630680e2c906f8d7ac528e2eb32c99ef203a</td><td>DISI, University of Trento, Italy</td><td></td></tr><tr><td>e6f20e7431172c68f7fce0d4595100445a06c117</td><td>DISI, University of Trento, Trento, Italy</td><td></td></tr><tr><td>303517dfc327c3004ae866a6a340f16bab2ee3e3</td><td>DIT UNIVERSITY, DEHRADUN</td><td></td></tr><tr><td>5058a7ec68c32984c33f357ebaee96c59e269425</td><td>DPDCE, University IUAV</td><td></td></tr><tr><td>a01f9461bc8cf8fe40c26d223ab1abea5d8e2812</td><td>DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy</td><td></td></tr><tr><td>f963967e52a5fd97fa3ebd679fd098c3cb70340e</td><td>DSP Lab, Sharif University of Technology, Tehran, Iran</td><td></td></tr><tr><td>4aa8db1a3379f00db2403bba7dade5d6e258b9e9</td><td>DSP Lab, Sharif University of Technology, Tehran, Iran</td><td></td></tr><tr><td>72f4aaf7e2e3f215cd8762ce283988220f182a5b</td><td>DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK</td><td></td></tr><tr><td>f5af4e9086b0c3aee942cb93ece5820bdc9c9748</td><td>DUBLIN CITY UNIVERSITY</td><td></td></tr><tr><td>ae0765ebdffffd6e6cc33c7705df33b7e8478627</td><td>DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China</td><td></td></tr><tr><td>0b4c4ea4a133b9eab46b217e22bda4d9d13559e6</td><td>DVMM Lab - Columbia University</td><td></td></tr><tr><td>1275852f2e78ed9afd189e8b845fdb5393413614</td><td>Dalian Maritime University</td><td></td></tr><tr><td>052f994898c79529955917f3dfc5181586282cf8</td><td>Dalian University of Technology</td><td></td></tr><tr><td>38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f</td><td>Dalian University of Technology, China</td><td></td></tr><tr><td>7a9c317734acaf4b9bd8e07dd99221c457b94171</td><td>Dalian University of Technology, Dalian 116024, China</td><td></td></tr><tr><td>2b64a8c1f584389b611198d47a750f5d74234426</td><td>Dalian University of Technology, Dalian, China</td><td></td></tr><tr><td>9391618c09a51f72a1c30b2e890f4fac1f595ebd</td><td>Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College</td><td></td></tr><tr><td>8f3e120b030e6c1d035cb7bd9c22f6cc75782025</td><td>Dalle Molle Institute for Arti cial Intelligence</td><td></td></tr><tr><td>1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9</td><td>Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea</td><td>Department of Applied Computer Engineering</td></tr><tr><td>9b246c88a0435fd9f6d10dc88f47a1944dd8f89e</td><td>Dartmouth College</td><td></td></tr><tr><td>3328413ee9944de1cc7c9c1d1bf2fece79718ba1</td><td>Dartmouth College</td><td></td></tr><tr><td>3328413ee9944de1cc7c9c1d1bf2fece79718ba1</td><td>Dartmouth College</td><td></td></tr><tr><td>df71a00071d5a949f9c31371c2e5ee8b478e7dc8</td><td>Dartmouth College</td><td></td></tr><tr><td>df71a00071d5a949f9c31371c2e5ee8b478e7dc8</td><td>Dartmouth College</td><td></td></tr><tr><td>df71a00071d5a949f9c31371c2e5ee8b478e7dc8</td><td>Dartmouth College</td><td></td></tr><tr><td>fd7b6c77b46420c27725757553fcd1fb24ea29a8</td><td>Dartmouth College</td><td>Department of Computer Science</td></tr><tr><td>86374bb8d309ad4dbde65c21c6fda6586ae4147a</td><td>Dartmouth College</td><td></td></tr><tr><td>2af620e17d0ed67d9ccbca624250989ce372e255</td><td>Dartmouth College</td><td></td></tr><tr><td>2d38fd1df95f5025e2cee5bc439ba92b369a93df</td><td>Dartmouth College</td><td></td></tr><tr><td>8d6c4af9d4c01ff47fe0be48155174158a9a5e08</td><td>Dartmouth College</td><td></td></tr><tr><td>0cbc4dcf2aa76191bbf641358d6cecf38f644325</td><td>Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA</td><td></td></tr><tr><td>1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8</td><td>Dartmouth College, NH 03755 USA</td><td>Computer Science Department</td></tr><tr><td>e43cc682453cf3874785584fca813665878adaa7</td><td>Datta Meghe College of Engineering</td><td></td></tr><tr><td>ea890846912f16a0f3a860fce289596a7dac575f</td><td>David R. Simmons, University of</td><td></td></tr><tr><td>574705812f7c0e776ad5006ae5e61d9b071eebdb</td><td>Dayananda Sagar College of Engg., India</td><td>¹Department rtment of Telecommunication Engg.</td></tr><tr><td>574705812f7c0e776ad5006ae5e61d9b071eebdb</td><td>Dayananda Sagar College of Engg., India</td><td>²Department of Telecommunication Engg.</td></tr><tr><td>2bbbbe1873ad2800954058c749a00f30fe61ab17</td><td>Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India</td><td></td></tr><tr><td>738a985fba44f9f5acd516e07d0d9578f2ffaa4e</td><td>Delft University of Technology</td><td></td></tr><tr><td>361d6345919c2edc5c3ce49bb4915ed2b4ee49be</td><td>Delft University of Technology</td><td></td></tr><tr><td>41f26101fed63a8d149744264dd5aa79f1928265</td><td>Delft University of Technology</td><td></td></tr><tr><td>473cbc5ec2609175041e1410bc6602b187d03b23</td><td>Delft University of Technology</td><td></td></tr><tr><td>067126ce1f1a205f98e33db7a3b77b7aec7fb45a</td><td>Delft University of Technology, The Netherlands</td><td></td></tr><tr><td>42765c170c14bd58e7200b09b2e1e17911eed42b</td><td>Democritus University of Thrace</td><td></td></tr><tr><td>4d6462fb78db88afff44561d06dd52227190689c</td><td>Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain</td><td></td></tr><tr><td>d394bd9fbaad1f421df8a49347d4b3fca307db83</td><td>Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK</td><td></td></tr><tr><td>aa0c30bd923774add6e2f27ac74acd197b9110f2</td><td>Deparment of Computing, Goldsmiths, University of London, UK</td><td></td></tr><tr><td>aa0c30bd923774add6e2f27ac74acd197b9110f2</td><td>Deparment of Computing, Imperial College London, UK</td><td></td></tr><tr><td>ea218cebea2228b360680cb85ca133e8c2972e56</td><td>Departm nt of Information Engin ering Th Chines University of Hong Kong</td><td></td></tr><tr><td>68003e92a41d12647806d477dd7d20e4dcde1354</td><td>Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India</td><td></td></tr><tr><td>1442319de86d171ce9595b20866ec865003e66fc</td><td>DeustoTech - University of Deusto</td><td></td></tr><tr><td>74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8</td><td>Deva Ramanan, University of California at Irvine</td><td></td></tr><tr><td>89bc311df99ad0127383a9149d1684dfd8a5aa34</td><td>Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA</td><td></td></tr><tr><td>026e4ee480475e63ae68570d73388f8dfd4b4cde</td><td>Dhaka University</td><td></td></tr><tr><td>2e1415a814ae9abace5550e4893e13bd988c7ba1</td><td>Dhanalakshmi Srinivasan College of Engineering</td><td>ECE Department</td></tr><tr><td>6ae96f68187f1cdb9472104b5431ec66f4b2470f</td><td>Dietrich College Honors Theses</td><td></td></tr><tr><td>6ae96f68187f1cdb9472104b5431ec66f4b2470f</td><td>Dietrich College of Humanities and Social Sciences</td><td></td></tr><tr><td>1f89439524e87a6514f4fbe7ed34bda4fd1ce286</td><td>Dietrich College of Humanities and Social Sciences</td><td></td></tr><tr><td>902114feaf33deac209225c210bbdecbd9ef33b1</td><td>Digital Media Research Center</td><td></td></tr><tr><td>2bab44d3a4c5ca79fb8f87abfef4456d326a0445</td><td>Dipartimento di Sistemi e Informatica, University of Florence</td><td></td></tr><tr><td>a3dc109b1dff3846f5a2cc1fe2448230a76ad83f</td><td>Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India</td><td></td></tr><tr><td>273b0511588ab0a81809a9e75ab3bd93d6a0f1e3</td><td>Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do</td><td></td></tr><tr><td>8b2704a5218a6ef70e553eaf0a463bd55129b69d</td><td>Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do</td><td></td></tr><tr><td>97137d5154a9f22a5d9ecc32e8e2b95d07a5a571</td><td>Division of Computer Engineering, Jeonbuk National University, Jeonju-si, Jeollabuk-do</td><td></td></tr><tr><td>59e9934720baf3c5df3a0e1e988202856e1f83ce</td><td>Division of Computer Science and Engineering, Hanyang University</td><td></td></tr><tr><td>a0e7f8771c7d83e502d52c276748a33bae3d5f81</td><td>Division of Computer Science, University of California, Berkeley, CA, USA e-mail</td><td></td></tr><tr><td>cc91001f9d299ad70deb6453d55b2c0b967f8c0d</td><td>Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu</td><td></td></tr><tr><td>ff01bc3f49130d436fca24b987b7e3beedfa404d</td><td>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu</td><td></td></tr><tr><td>6f3054f182c34ace890a32fdf1656b583fbc7445</td><td>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu</td><td></td></tr><tr><td>d00c335fbb542bc628642c1db36791eae24e02b7</td><td>Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu</td><td></td></tr><tr><td>c3b3636080b9931ac802e2dd28b7b684d6cf4f8b</td><td>Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology</td><td></td></tr><tr><td>497bf2df484906e5430aa3045cf04a40c9225f94</td><td>Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea</td><td></td></tr><tr><td>ed08ac6da6f8ead590b390b1d14e8a9b97370794</td><td>Dnyanopasak College Parbhani, M.S, India</td><td>Department of C.S.</td></tr><tr><td>528069963f0bd0861f380f53270c96c269a3ea1c</td><td>Doctor of Philosophy in Computer Science at Cardi University, July 24th</td><td></td></tr><tr><td>0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7</td><td>Doctor of Philosophy in Computing of Imperial College, February</td><td></td></tr><tr><td>1467c4ab821c3b340abe05a1b13a19318ebbce98</td><td>Doctor of Philosophy of University College London</td><td></td></tr><tr><td>6e782073a013ce3dbc5b9b56087fd0300c510f67</td><td>Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania</td><td></td></tr><tr><td>146bbf00298ee1caecde3d74e59a2b8773d2c0fc</td><td>Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the</td><td></td></tr><tr><td>f8f872044be2918de442ba26a30336d80d200c42</td><td>Dr C V Raman Institute of Science and Technology</td><td></td></tr><tr><td>3f4bfa4e3655ef392eb5ad609d31c05f29826b45</td><td>Dr. B. C. Roy Engineering College</td><td></td></tr><tr><td>35e87e06cf19908855a16ede8c79a0d3d7687b5c</td><td>Dr. Babasaheb Ambedkar Marathwada University</td><td></td></tr><tr><td>1910f5f7ac81d4fcc30284e88dee3537887acdf3</td><td>Dr.D.Y.Patil College of Engineering, Pune, Maharashtra, India</td><td></td></tr><tr><td>e5342233141a1d3858ed99ccd8ca0fead519f58b</td><td>Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India</td><td>Department of CSE</td></tr><tr><td>59e75aad529b8001afc7e194e21668425119b864</td><td>Drexel University</td><td></td></tr><tr><td>0aae88cf63090ea5b2c80cd014ef4837bcbaadd8</td><td>Drexel University</td><td></td></tr><tr><td>900207b3bc3a4e5244cae9838643a9685a84fee0</td><td>Drexel University</td><td></td></tr><tr><td>17a8d1b1b4c23a630b051f35e47663fc04dcf043</td><td>Drexel University, Philadelphia, PA</td><td>Department of Computer Science</td></tr><tr><td>0be764800507d2e683b3fb6576086e37e56059d1</td><td>Duke University</td><td></td></tr><tr><td>9cd6a81a519545bf8aa9023f6e879521f85d4cd1</td><td>Duke University</td><td></td></tr><tr><td>9cd6a81a519545bf8aa9023f6e879521f85d4cd1</td><td>Duke University</td><td></td></tr><tr><td>2742a61d32053761bcc14bd6c32365bfcdbefe35</td><td>Duke University</td><td></td></tr><tr><td>2742a61d32053761bcc14bd6c32365bfcdbefe35</td><td>Duke University</td><td></td></tr><tr><td>3933416f88c36023a0cba63940eb92f5cef8001a</td><td>Duke University</td><td></td></tr><tr><td>1badfeece64d1bf43aa55c141afe61c74d0bd25e</td><td>Duke University</td><td></td></tr><tr><td>8ccde9d80706a59e606f6e6d48d4260b60ccc736</td><td>Duke University</td><td>Department of Mathematics</td></tr><tr><td>8ccde9d80706a59e606f6e6d48d4260b60ccc736</td><td>Duke University</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>cca9ae621e8228cfa787ec7954bb375536160e0d</td><td>Duke University, Durham, NC, USA</td><td></td></tr><tr><td>f412d9d7bc7534e7daafa43f8f5eab811e7e4148</td><td>Durham University Library, Stockton Road, Durham DH1 3LY, United Kingdom</td><td></td></tr><tr><td>cd23dc3227ee2a3ab0f4de1817d03ca771267aeb</td><td>Durham University, Durham, UK</td><td></td></tr><tr><td>c1f07ec629be1c6fe562af0e34b04c54e238dcd1</td><td>ECE dept, University of Miami</td><td></td></tr><tr><td>4e8c608fc4b8198f13f8a68b9c1a0780f6f50105</td><td>ECE, National University of Singapore, Singapore</td><td></td></tr><tr><td>7c4c442e9c04c6b98cd2aa221e9d7be15efd8663</td><td>ECSE, Rensselaer Polytechnic Institute, Troy, NY</td><td></td></tr><tr><td>dbed26cc6d818b3679e46677abc9fa8e04e8c6a6</td><td>ECSE, Rensselaer Polytechnic Institute, Troy, NY, USA</td><td></td></tr><tr><td>7b9961094d3e664fc76b12211f06e12c47a7e77d</td><td>EECS, Syracuse University, Syracuse, NY, USA</td><td></td></tr><tr><td>f60a85bd35fa85739d712f4c93ea80d31aa7de07</td><td>EECS, University of California Berkeley</td><td></td></tr><tr><td>88bef50410cea3c749c61ed68808fcff84840c37</td><td>EEMCS, University of Twente</td><td></td></tr><tr><td>1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6</td><td>EEMCS, University of Twente Enschede, The Netherlands</td><td></td></tr><tr><td>3f957142ef66f2921e7c8c7eadc8e548dccc1327</td><td>EEMCS, University of Twente, Netherlands</td><td></td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>EEMCS, University of Twente, The Netherlands</td><td></td></tr><tr><td>4c87aafa779747828054cffee3125fcea332364d</td><td>EEMCS, University of Twente, The Netherlands</td><td></td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>EEMCS, University of Twente, The Netherlands</td><td></td></tr><tr><td>143bee9120bcd7df29a0f2ad6f0f0abfb23977b8</td><td>EEMCS, University of Twente, The Netherlands</td><td></td></tr><tr><td>4a5592ae1f5e9fa83d9fa17451c8ab49608421e4</td><td>EIMT, Open University of</td><td></td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>EIMT, Open University of Catalonia, Barcelona, Spain</td><td></td></tr><tr><td>b5d7c5aba7b1ededdf61700ca9d8591c65e84e88</td><td>ESAT, Katholieke Universiteit Leuven, Leuven, Belgium</td><td></td></tr><tr><td>46f2611dc4a9302e0ac00a79456fa162461a8c80</td><td>ESAT-PSI, KU Leuven, 2CV:HCI, KIT, Karlsruhe, 3University of Bonn, 4Sensifai</td><td></td></tr><tr><td>071135dfb342bff884ddb9a4d8af0e70055c22a1</td><td>ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai</td><td></td></tr><tr><td>060034b59275c13746413ca9c67d6304cba50da6</td><td>ESTeM, University of Canberra</td><td></td></tr><tr><td>fffa2943808509fdbd2fc817cc5366752e57664a</td><td>ESTeM, University of Canberra</td><td></td></tr><tr><td>9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca</td><td>East China Normal University</td><td></td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>East China Normal University</td><td></td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>East China Normal University</td><td></td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>East China Normal University</td><td></td></tr><tr><td>d961617db4e95382ba869a7603006edc4d66ac3b</td><td>East China Normal University</td><td></td></tr><tr><td>03baf00a3d00887dd7c828c333d4a29f3aacd5f5</td><td>Eastern Mediterranean University</td><td></td></tr><tr><td>3f4c262d836b2867a53eefb959057350bf7219c9</td><td>Eastern Mediterranean University</td><td>Computer Engineering Department</td></tr><tr><td>c5421a18583f629b49ca20577022f201692c4f5d</td><td>Eastern Mediterranean University, Gazima usa, Northern Cyprus</td><td>Department of Computer Engineering</td></tr><tr><td>026e4ee480475e63ae68570d73388f8dfd4b4cde</td><td>Eastern University</td><td></td></tr><tr><td>0cd8895b4a8f16618686f622522726991ca2a324</td><td>Ecole Polytechnique Federale de Lausanne, Signal Processing Institute</td><td></td></tr><tr><td>b55d0c9a022874fb78653a0004998a66f8242cad</td><td>Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT</td><td></td></tr><tr><td>e00d391d7943561f5c7b772ab68e2bb6a85e64c4</td><td>Edited by David L. Donoho, Stanford University, Stanford, CA, and approved August 7, 2017 (received for review January</td><td></td></tr><tr><td>74eae724ef197f2822fb7f3029c63014625ce1ca</td><td>Education, Yunnan Normal University, Kunming, China</td><td></td></tr><tr><td>0ba0f000baf877bc00a9e144b88fa6d373db2708</td><td>Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan</td><td></td></tr><tr><td>d185f4f05c587e23c0119f2cdfac8ea335197ac0</td><td>Eindhoven University of Technology, The Netherlands</td><td></td></tr><tr><td>7e00fb79576fe213853aeea39a6bc51df9fdca16</td><td>Eindhoven University of Technology, The Netherlands</td><td></td></tr><tr><td>1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2</td><td>Eindhoven University of Technology, The Netherlands</td><td></td></tr><tr><td>39dc2ce4cce737e78010642048b6ed1b71e8ac2f</td><td>Elect. Eng. Faculty, Tabriz University, Tabriz, Iran</td><td></td></tr><tr><td>210b98394c3be96e7fd75d3eb11a391da1b3a6ca</td><td>Electrical Eng. Dep., Central Tehran Branch, Islamic Azad University, Tehran, Iran</td><td></td></tr><tr><td>126214ef0dcef2b456cb413905fa13160c73ec8e</td><td>Electrical Engineering Institute, EPFL</td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab</td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>Electrical Engineering, University of</td><td></td></tr><tr><td>0ea7b7fff090c707684fd4dc13e0a8f39b300a97</td><td>Electrical and Computer Engineering, National University of Singapore, Singapore</td><td></td></tr><tr><td>e38371b69be4f341baa95bc854584e99b67c6d3a</td><td>Electrical and Computer Engineering, Northeastern University, Boston, MA</td><td></td></tr><tr><td>db82f9101f64d396a86fc2bd05b352e433d88d02</td><td>Electrical and Computer Engineering, The University of Memphis</td><td></td></tr><tr><td>22143664860c6356d3de3556ddebe3652f9c912a</td><td>Electrical and Computer Engineering, University of Auckland, New Zealand</td><td></td></tr><tr><td>f3ca2c43e8773b7062a8606286529c5bc9b3ce25</td><td>Electrical and Computer Engineering, University of Pittsburgh, USA</td><td></td></tr><tr><td>ac75c662568cbb7308400cc002469a14ff25edfd</td><td>Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada</td><td></td></tr><tr><td>03ac1c694bc84a27621da6bfe73ea9f7210c6d45</td><td>Electrical and Space Engineering, Lule University of Technology</td><td>Department of Computer Science</td></tr><tr><td>e8f0f9b74db6794830baa2cab48d99d8724e8cb6</td><td>Electrical, Computer, Rensselaer Polytechnic Institute</td><td>and Systems Engineering Department</td></tr><tr><td>245f8ec4373e0a6c1cae36cd6fed5a2babed1386</td><td>Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute</td><td></td></tr><tr><td>a51882cfd0706512bf50e12c0a7dd0775285030d</td><td>Electronic Engineering and Computer Science Queen Mary University of London</td><td></td></tr><tr><td>7224d58a7e1f02b84994b60dc3b84d9fe6941ff5</td><td>Electronic Engineering and Computer Science, Queen Mary University of London, UK</td><td></td></tr><tr><td>b375db63742f8a67c2a7d663f23774aedccc84e5</td><td>Electronic and Information Engineering, University of Bologna, Italy</td><td>Department of Electrical</td></tr><tr><td>191674c64f89c1b5cba19732869aa48c38698c84</td><td>Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India</td><td></td></tr><tr><td>d82b93f848d5442f82154a6011d26df8a9cd00e7</td><td>Electronics Engineering, National Institute of Technical Teachers</td><td></td></tr><tr><td>31d60b2af2c0e172c1a6a124718e99075818c408</td><td>Electronics and Communication Engineering, Chuo University</td><td>Department of Electrical</td></tr><tr><td>3bd1d41a656c8159305ba2aa395f68f41ab84f31</td><td>Electronics and Computer Science, University of Southampton, Southampton, Hampshire</td><td></td></tr><tr><td>887b7676a4efde616d13f38fcbfe322a791d1413</td><td>Electronics and Telecommunications Research Institute</td><td></td></tr><tr><td>7cf579088e0456d04b531da385002825ca6314e2</td><td>Emory University</td><td></td></tr><tr><td>7cf579088e0456d04b531da385002825ca6314e2</td><td>Emory University</td><td></td></tr><tr><td>656531036cee6b2c2c71954bb6540ef6b2e016d0</td><td>Emory University, USA</td><td></td></tr><tr><td>90d735cffd84e8f2ae4d0c9493590f3a7d99daf1</td><td>Engg, Priyadarshini College of</td><td></td></tr><tr><td>9c1860de6d6e991a45325c997bf9651c8a9d716f</td><td>Engineering Chaoyang University Nankai Institute of</td><td></td></tr><tr><td>d02c54192dbd0798b43231efe1159d6b4375ad36</td><td>Engineering Chaoyang University Nankai Institute of</td><td></td></tr><tr><td>5d185d82832acd430981ffed3de055db34e3c653</td><td>Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez</td><td></td></tr><tr><td>ee815f60dc4a090fa9fcfba0135f4707af21420d</td><td>Engineering and Applied Science, SUNY Binghamton University, NY, USA</td><td></td></tr><tr><td>3dabf7d853769cfc4986aec443cc8b6699136ed0</td><td>Engineering and Natural Science, Sabanci University, Istanbul, Turkey</td><td></td></tr><tr><td>ce6f459462ea9419ca5adcc549d1d10e616c0213</td><td>Engineering, G.H.Raisoni College of Engineering</td><td></td></tr><tr><td>9853136dbd7d5f6a9c57dc66060cab44a86cd662</td><td>Engineering, Iran University</td><td></td></tr><tr><td>9853136dbd7d5f6a9c57dc66060cab44a86cd662</td><td>Engineering, Iran University</td><td></td></tr><tr><td>63c109946ffd401ee1195ed28f2fb87c2159e63d</td><td>Engineering, National Formosa University</td><td></td></tr><tr><td>2f78e471d2ec66057b7b718fab8bfd8e5183d8f4</td><td>Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman</td><td></td></tr><tr><td>23e75f5ce7e73714b63f036d6247fa0172d97cb6</td><td>Engineering, University of Akron, Akron, OH 44325-3904, USA</td><td></td></tr><tr><td>d5b0e73b584be507198b6665bcddeba92b62e1e5</td><td>Engineering, University of Dundee</td><td></td></tr><tr><td>ffe4bb47ec15f768e1744bdf530d5796ba56cfc1</td><td>Engineering, York University, Canada</td><td></td></tr><tr><td>2322ec2f3571e0ddc593c4e2237a6a794c61251d</td><td>Enlighten Research publications by members of the University of Glasgow</td><td></td></tr><tr><td>b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88</td><td>Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom</td><td></td></tr><tr><td>1565721ebdbd2518224f54388ed4f6b21ebd26f3</td><td>Eskisehir Osmangazi University</td><td></td></tr><tr><td>13bda03fc8984d5943ed8d02e49a779d27c84114</td><td>Eskisehir Osmangazi University</td><td></td></tr><tr><td>14811696e75ce09fd84b75fdd0569c241ae02f12</td><td>Eskisehir Osmangazi University</td><td></td></tr><tr><td>396a19e29853f31736ca171a3f40c506ef418a9f</td><td>Exploratory Computer Vision Group, IBM T. J. Watson Research Center</td><td></td></tr><tr><td>68f89c1ee75a018c8eff86e15b1d2383c250529b</td><td>F.Ferraro, University of Rochester</td><td></td></tr><tr><td>214ac8196d8061981bef271b37a279526aab5024</td><td>FI-90014 University of Oulu, Finland</td><td></td></tr><tr><td>5121f42de7cb9e41f93646e087df82b573b23311</td><td>FL</td><td>Department of Mechanical and Aerospace Engineering - University of Florida - Gainesville</td></tr><tr><td>71e6a46b32a8163c9eda69e1badcee6348f1f56a</td><td>FX Palo Alto Laboratory, Inc., California, USA</td><td></td></tr><tr><td>df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb</td><td>FaceTec, Inc</td><td></td></tr><tr><td>e378ce25579f3676ca50c8f6454e92a886b9e4d7</td><td>Facebook 4Texas AandM University 5IBM Research</td><td></td></tr><tr><td>1c30bb689a40a895bd089e55e0cad746e343d1e2</td><td>Facebook AI Research, 2Dartmouth College</td><td></td></tr><tr><td>0ac664519b2b8abfb8966dafe60d093037275573</td><td>Facial Image Processing and Analysis Group, Institute for Anthropomatics</td><td></td></tr><tr><td>34d484b47af705e303fc6987413dc0180f5f04a9</td><td>Facial expression gures prominently in research on almost every aspect of emotion, including psychophys</td><td></td></tr><tr><td>d41c11ebcb06c82b7055e2964914b9af417abfb2</td><td>Facial expression gures prominently in research on almost every aspect of emotion, including psychophys</td><td></td></tr><tr><td>fac8cff9052fc5fab7d5ef114d1342daba5e4b82</td><td>Faculty member, Parallel Data Lab (PDL), Carnegie Mellon University</td><td></td></tr><tr><td>1576ed0f3926c6ce65e0ca770475bca6adcfdbb4</td><td>Faculty of Computer Science, Dalhousie University, Halifax, Canada</td><td></td></tr><tr><td>9be94fa0330dd493f127d51e4ef7f9fd64613cfc</td><td>Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands</td><td></td></tr><tr><td>3dbfd2fdbd28e4518e2ae05de8374057307e97b3</td><td>Faculty of Computer Science, University of A Coru na, Coru na, Spain</td><td></td></tr><tr><td>1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113</td><td>Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana</td><td></td></tr><tr><td>15cf7bdc36ec901596c56d04c934596cf7b43115</td><td>Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran</td><td></td></tr><tr><td>4919663c62174a9bc0cc7f60da8f96974b397ad2</td><td>Faculty of Computers and Information, Cairo University, Cairo, Egypt</td><td></td></tr><tr><td>20b994a78cd1db6ba86ea5aab7211574df5940b3</td><td>Faculty of Computing and Informatics, Multimedia University, Malaysia</td><td></td></tr><tr><td>102b968d836177f9c436141e382915a4f8549276</td><td>Faculty of EEMCS, Delft University of Technology, The Netherlands</td><td></td></tr><tr><td>42afe6d016e52c99e2c0d876052ade9c192d91e7</td><td>Faculty of EEMCS, University of Twente, The Netherlands</td><td></td></tr><tr><td>2ca43325a5dbde91af90bf850b83b0984587b3cc</td><td>Faculty of ETI, Gdansk University of Technology, Gdansk, Poland</td><td>Department of Intelligent Interactive Systems</td></tr><tr><td>023ed32ac3ea6029f09b8c582efbe3866de7d00a</td><td>Faculty of Electrical Engineering, Czech Technical University</td><td></td></tr><tr><td>37c8514df89337f34421dc27b86d0eb45b660a5e</td><td>Faculty of Electrical Engineering, Czech Technical University in Prague</td><td></td></tr><tr><td>7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d</td><td>Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of</td><td></td></tr><tr><td>e42998bbebddeeb4b2bedf5da23fa5c4efc976fa</td><td>Faculty of Electrical Engineering, Mathematics and Computer Science, University</td><td></td></tr><tr><td>3505c9b0a9631539e34663310aefe9b05ac02727</td><td>Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The</td><td></td></tr><tr><td>ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea</td><td>Faculty of Electrical Engineering, University of Ljubljana</td><td></td></tr><tr><td>368d59cf1733af511ed8abbcbeb4fb47afd4da1c</td><td>Faculty of Electrical Engineering, University of Ljubljana, Slovenia</td><td></td></tr><tr><td>1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113</td><td>Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia</td><td></td></tr><tr><td>02e39f23e08c2cb24d188bf0ca34141f3cc72d47</td><td>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia</td><td></td></tr><tr><td>afe9cfba90d4b1dbd7db1cf60faf91f24d12b286</td><td>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta</td><td></td></tr><tr><td>12003a7d65c4f98fb57587fd0e764b44d0d10125</td><td>Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia</td><td></td></tr><tr><td>32728e1eb1da13686b69cc0bd7cce55a5c963cdd</td><td>Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran</td><td></td></tr><tr><td>32728e1eb1da13686b69cc0bd7cce55a5c963cdd</td><td>Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran</td><td></td></tr><tr><td>32728e1eb1da13686b69cc0bd7cce55a5c963cdd</td><td>Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran</td><td></td></tr><tr><td>b166ce267ddb705e6ed855c6b679ec699d62e9cb</td><td>Faculty of Electronics and Communication, Taishan University</td><td>Department of Physics and Electronics Engineering</td></tr><tr><td>b166ce267ddb705e6ed855c6b679ec699d62e9cb</td><td>Faculty of Electronics and Communication, Yanshan University</td><td>Department of Information Science and Engineering</td></tr><tr><td>fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f</td><td>Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland</td><td></td></tr><tr><td>a308077e98a611a977e1e85b5a6073f1a9bae6f0</td><td>Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia</td><td>Department of Biomedical Engineering</td></tr><tr><td>3dcebd4a1d66313dcd043f71162d677761b07a0d</td><td>Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey</td><td></td></tr><tr><td>89c51f73ec5ebd1c2a9000123deaf628acf3cdd8</td><td>Faculty of Engineering and Technology, Multimedia University (Melaka Campus</td><td></td></tr><tr><td>0172867f4c712b33168d9da79c6d3859b198ed4c</td><td>Faculty of Engineering, Ain Shams University, Cairo, Egypt</td><td>Computer and System Engineering Department</td></tr><tr><td>03ac1c694bc84a27621da6bfe73ea9f7210c6d45</td><td>Faculty of Engineering, Al Azhar University, Qena, Egypt</td><td></td></tr><tr><td>33ef419dffef85443ec9fe89a93f928bafdc922e</td><td>Faculty of Engineering, Bar-Ilan University, Israel</td><td></td></tr><tr><td>5f7c4c20ae2731bfb650a96b69fd065bf0bb950e</td><td>Faculty of Engineering, Ferdowsi University, Mashhad, Iran</td><td>Department of Computer Engineering</td></tr><tr><td>20b994a78cd1db6ba86ea5aab7211574df5940b3</td><td>Faculty of Engineering, Multimedia University, Malaysia</td><td></td></tr><tr><td>0b183f5260667c16ef6f640e5da50272c36d599b</td><td>Faculty of Informatics, E otv os Lor and University, Budapest, Hungary</td><td></td></tr><tr><td>1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc</td><td>Faculty of Informatics, University of Debrecen, Hungary</td><td></td></tr><tr><td>e4df83b7424842ff5864c10fa55d38eae1c45fac</td><td>Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia</td><td></td></tr><tr><td>3daf1191d43e21a8302d98567630b0e2025913b0</td><td>Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan</td><td></td></tr><tr><td>50e45e9c55c9e79aaae43aff7d9e2f079a2d787b</td><td>Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam</td><td></td></tr><tr><td>59e2037f5079794cb9128c7f0900a568ced14c2a</td><td>Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain</td><td></td></tr><tr><td>2c62b9e64aeddf12f9d399b43baaefbca8e11148</td><td>Faculty of Natural Sciences, University of Stirling, Stirling FK9 4LA, UK</td><td></td></tr><tr><td>6eb1e006b7758b636a569ca9e15aafd038d2c1b1</td><td>Faculty of Science and Engineering, Waseda University, Tokyo, Japan</td><td></td></tr><tr><td>8d91f06af4ef65193f3943005922f25dbb483ee4</td><td>Faculty of Science and Technology, University of Macau</td><td>Department of Mathematics</td></tr><tr><td>102b968d836177f9c436141e382915a4f8549276</td><td>Faculty of Science, University of Amsterdam, The Netherlands</td><td></td></tr><tr><td>6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1</td><td>Faculty of Science, University of Amsterdam, The Netherlands</td><td></td></tr><tr><td>a75edf8124f5b52690c08ff35b0c7eb8355fe950</td><td>Faculty of Science, University of Amsterdam, The Netherlands</td><td></td></tr><tr><td>eff87ecafed67cc6fc4f661cb077fed5440994bb</td><td>Faculty of Science, University of Amsterdam, The Netherlands</td><td></td></tr><tr><td>f0ae807627f81acb63eb5837c75a1e895a92c376</td><td>Faculty of Telecommunications, Technical University, Sofia, Bulgaria</td><td></td></tr><tr><td>f0ae807627f81acb63eb5837c75a1e895a92c376</td><td>Faculty of Telecommunications, Technical University, Sofia, Bulgaria</td><td></td></tr><tr><td>26af867977f90342c9648ccf7e30f94470d40a73</td><td>Federal Institute of Science and Technology, Mookkannoor</td><td></td></tr><tr><td>26af867977f90342c9648ccf7e30f94470d40a73</td><td>Federal Institute of Science and Technology, Mookkannoor</td><td></td></tr><tr><td>52012b4ecb78f6b4b9ea496be98bcfe0944353cd</td><td>Federal University Technology Akure, PMB 704, Akure, Nigeria</td><td>Department of Computer Science</td></tr><tr><td>21b16df93f0fab4864816f35ccb3207778a51952</td><td>Federal University of Bahia (UFBA</td><td></td></tr><tr><td>9854145f2f64d52aac23c0301f4bb6657e32e562</td><td>Federal University of Campina Grande (UFCG</td><td></td></tr><tr><td>e0ed0e2d189ff73701ec72e167d44df4eb6e864d</td><td>Federal University of Para ba</td><td></td></tr><tr><td>d30050cfd16b29e43ed2024ae74787ac0bbcf2f7</td><td>Federal University of Technology - Paran a</td><td></td></tr><tr><td>a8583e80a455507a0f146143abeb35e769d25e4e</td><td>Feng Chia University, Taichung, Taiwan</td><td></td></tr><tr><td>11a210835b87ccb4989e9ba31e7559bb7a9fd292</td><td>Ferdowsi University of Mashhad, Mashhad, Iran</td><td>b Department of Computer Engineering</td></tr><tr><td>01125e3c68edb420b8d884ff53fb38d9fbe4f2b8</td><td>Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions</td><td></td></tr><tr><td>89d7cc9bbcd2fdc4f4434d153ecb83764242227b</td><td>Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai</td><td></td></tr><tr><td>1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f</td><td>Final Year, PanimalarInstitute of Technology</td><td>Department of Computer Science and Engineering</td></tr><tr><td>5cfbeae360398de9e20e4165485837bd42b93217</td><td>Firat University</td><td></td></tr><tr><td>5cfbeae360398de9e20e4165485837bd42b93217</td><td>Firat University</td><td></td></tr><tr><td>23aef683f60cb8af239b0906c45d11dac352fb4e</td><td>Florian Metze, Chair (Carnegie Mellon University</td><td></td></tr><tr><td>47d3b923730746bfaabaab29a35634c5f72c3f04</td><td>Florida Institute Of Technology, Melbourne Fl</td><td>Electrical And Computer Engineering Department</td></tr><tr><td>68f69e6c6c66cfde3d02237a6918c9d1ee678e1b</td><td>Florida International University</td><td></td></tr><tr><td>33ac7fd3a622da23308f21b0c4986ae8a86ecd2b</td><td>Florida International University</td><td></td></tr><tr><td>715b69575dadd7804b4f8ccb419a3ad8b7b7ca89</td><td>Florida International University</td><td>Department of Psychology</td></tr><tr><td>715b69575dadd7804b4f8ccb419a3ad8b7b7ca89</td><td>Florida International University</td><td>Department of Mathematics and Statistics</td></tr><tr><td>14e8dbc0db89ef722c3c198ae19bde58138e88bf</td><td>Florida International University</td><td></td></tr><tr><td>14e8dbc0db89ef722c3c198ae19bde58138e88bf</td><td>Florida International University</td><td></td></tr><tr><td>26a44feb7a64db7986473ca801c251aa88748477</td><td>Florida State University</td><td></td></tr><tr><td>26a44feb7a64db7986473ca801c251aa88748477</td><td>Florida State University</td><td></td></tr><tr><td>64ec0c53dd1aa51eb15e8c2a577701e165b8517b</td><td>Florida State University</td><td></td></tr><tr><td>64ec0c53dd1aa51eb15e8c2a577701e165b8517b</td><td>Florida State University</td><td></td></tr><tr><td>1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177</td><td>Florida State University</td><td></td></tr><tr><td>2878b06f3c416c98496aad6fc2ddf68d2de5b8f6</td><td>Florida State University, Tallahassee, FL 32306, USA</td><td>a Department of Computer Science</td></tr><tr><td>2878b06f3c416c98496aad6fc2ddf68d2de5b8f6</td><td>Florida State University, Tallahassee, FL 32306, USA</td><td>b Department of Mathematics</td></tr><tr><td>24f022d807352abf071880877c38e53a98254dcd</td><td>Florida State University, Tallahassee, Florida, U.S.A</td><td>Statistics Department</td></tr><tr><td>42ea8a96eea023361721f0ea34264d3d0fc49ebd</td><td>Florida State University, USA</td><td>aDepartment of Statistics</td></tr><tr><td>0742d051caebf8a5d452c03c5d55dfb02f84baab</td><td>Formerly: Texas AandM University</td><td></td></tr><tr><td>7c42371bae54050dbbf7ded1e7a9b4109a23a482</td><td>Foundation University Rawalpindi Campus, Pakistan</td><td>Department of Software Engineering</td></tr><tr><td>0c3f7272a68c8e0aa6b92d132d1bf8541c062141</td><td>Foundation University, Rawalpindi 46000, Pakistan</td><td>Department of Software Engineering</td></tr><tr><td>8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09</td><td>Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India</td><td>Department of Computer Science and Engineering</td></tr><tr><td>c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d</td><td>Franklin. W. Olin College of Engineering</td><td></td></tr><tr><td>1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43</td><td>Fraser University</td><td></td></tr><tr><td>2c1f8ddbfbb224271253a27fed0c2425599dfe47</td><td>Fraunhofer Heinrich Hertz Institute</td><td></td></tr><tr><td>2c1f8ddbfbb224271253a27fed0c2425599dfe47</td><td>Fraunhofer Heinrich Hertz Institute</td><td></td></tr><tr><td>0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f</td><td>Fraunhofer Institute for Digital Media Technology, Germany</td><td></td></tr><tr><td>749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7</td><td>Fraunhofer Institute for Integrated Circuits IIS</td><td></td></tr><tr><td>50ccc98d9ce06160cdf92aaf470b8f4edbd8b899</td><td>Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB</td><td></td></tr><tr><td>346dbc7484a1d930e7cc44276c29d134ad76dc3f</td><td>Friedrich Schiller University, D-07740 Jena</td><td>b Department of Computer Science</td></tr><tr><td>7df4f96138a4e23492ea96cf921794fc5287ba72</td><td>Fudan University</td><td></td></tr><tr><td>994b52bf884c71a28b4f5be4eda6baaacad1beee</td><td>Fudan University</td><td></td></tr><tr><td>1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6</td><td>Fudan University, 2Microsoft Research Asia, 3University of Maryland</td><td></td></tr><tr><td>0dfa460a35f7cab4705726b6367557b9f7842c65</td><td>Fudan University, Shanghai, China</td><td></td></tr><tr><td>a46086e210c98dcb6cb9a211286ef906c580f4e8</td><td>Fudan University, Shanghai, China</td><td></td></tr><tr><td>b5c749f98710c19b6c41062c60fb605e1ef4312a</td><td>Fudan University, Shanghai, China</td><td></td></tr><tr><td>ee6b503ab512a293e3088fdd7a1c893a77902acb</td><td>Fudan University, Shanghai, China</td><td></td></tr><tr><td>1dacc2f4890431d867a038fd81c111d639cf4d7e</td><td>Funding was provided by the U.S. National Institutes of Mental</td><td></td></tr><tr><td>477236563c6a6c6db922045453b74d3f9535bfa1</td><td>G. H .Raisoni Collage of Engg and Technology, Wagholi, Pune</td><td>Computer and Science Department Savitribai Phule Pune University</td></tr><tr><td>6d4b5444c45880517213a2fdcdb6f17064b3fa91</td><td>G.H.Raisoni College of Engg. and Mgmt., Pune, India</td><td></td></tr><tr><td>6d4b5444c45880517213a2fdcdb6f17064b3fa91</td><td>G.H.Raisoni College of Engg. and Mgmt., Pune, India</td><td></td></tr><tr><td>6d4b5444c45880517213a2fdcdb6f17064b3fa91</td><td>G.H.Raisoni College of Engg. and Mgmt., Pune, India</td><td></td></tr><tr><td>6515fe829d0b31a5e1f4dc2970a78684237f6edb</td><td>GE Global Research Center</td><td></td></tr><tr><td>c87d5036d3a374c66ec4f5870df47df7176ce8b9</td><td>GIPSA-lab, Institute of Engineering, Universit Grenoble Alpes, Centre National de la Recherche Scienti que, Grenoble INP</td><td></td></tr><tr><td>69ff40fd5ce7c3e6db95a2b63d763edd8db3a102</td><td>GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology</td><td>Department of Computer Engineering</td></tr><tr><td>18166432309000d9a5873f989b39c72a682932f5</td><td>GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA</td><td></td></tr><tr><td>5860cf0f24f2ec3f8cbc39292976eed52ba2eafd</td><td>GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS</td><td></td></tr><tr><td>eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6</td><td>GREYC Research Lab</td><td></td></tr><tr><td>42dc36550912bc40f7faa195c60ff6ffc04e7cd6</td><td>GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin</td><td></td></tr><tr><td>779ad364cae60ca57af593c83851360c0f52c7bf</td><td>GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco</td><td></td></tr><tr><td>fe961cbe4be0a35becd2d722f9f364ec3c26bd34</td><td>Gallaudet University, Technology Access Program, 800 Florida Ave NE, Washington, DC</td><td></td></tr><tr><td>cd687ddbd89a832f51d5510c478942800a3e6854</td><td>Games Studio, Faculty of Engineering and IT, University of Technology, Sydney</td><td></td></tr><tr><td>8b547b87fd95c8ff6a74f89a2b072b60ec0a3351</td><td>Games Studio, Faculty of Engineering and IT, University of Technology, Sydney</td><td></td></tr><tr><td>0c8a0a81481ceb304bd7796e12f5d5fa869ee448</td><td>Gangnung-Wonju National University</td><td>Department of Electronics Engineering</td></tr><tr><td>370b6b83c7512419188f5373a962dd3175a56a9b</td><td>Gannan Normal University</td><td></td></tr><tr><td>769461ff717d987482b28b32b1e2a6e46570e3ff</td><td>Gannan Normal University, Ganzhou 341000, China</td><td>Department of Mathematics and Computer Science</td></tr><tr><td>0b183f5260667c16ef6f640e5da50272c36d599b</td><td>Gatsby Computational Neuroscience Unit, University College London, London, UK</td><td></td></tr><tr><td>af62621816fbbe7582a7d237ebae1a4d68fcf97d</td><td>Gayathri.S, M.E., Vins Christian college of Engineering</td><td>Department of Information Technology</td></tr><tr><td>81e366ed1834a8d01c4457eccae4d57d169cb932</td><td>Gdansk University of Technology</td><td></td></tr><tr><td>6821113166b030d2123c3cd793dd63d2c909a110</td><td>Gdansk University of Technology, Faculty of Electronics, Telecommunication</td><td></td></tr><tr><td>9c4cc11d0df2de42d6593f5284cfdf3f05da402a</td><td>George Mason University</td><td>Department of Computer Science</td></tr><tr><td>20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6</td><td>George Mason University</td><td>Department of Computer Science</td></tr><tr><td>d28d697b578867500632b35b1b19d3d76698f4a9</td><td>George Mason University</td><td></td></tr><tr><td>4f028efe6708fc252851eee4a14292b7ce79d378</td><td>George Mason University</td><td></td></tr><tr><td>757e4cb981e807d83539d9982ad325331cb59b16</td><td>George Mason University, Fairfax Virginia, USA</td><td>Department of Computer Science</td></tr><tr><td>1c147261f5ab1b8ee0a54021a3168fa191096df8</td><td>George Mason University, Fairfax, VA, USA</td><td>Department of Computer Science</td></tr><tr><td>ce9e1dfa7705623bb67df3a91052062a0a0ca456</td><td>George Washington University</td><td></td></tr><tr><td>59d225486161b43b7bf6919b4a4b4113eb50f039</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>69eb6c91788e7c359ddd3500d01fb73433ce2e65</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>93af36da08bf99e68c9b0d36e141ed8154455ac2</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>5a87bc1eae2ec715a67db4603be3d1bb8e53ace2</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>5fa04523ff13a82b8b6612250a39e1edb5066521</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>bd8f77b7d3b9d272f7a68defc1412f73e5ac3135</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>e293a31260cf20996d12d14b8f29a9d4d99c4642</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>f4f9697f2519f1fe725ee7e3788119ed217dca34</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>e4bc529ced68fae154e125c72af5381b1185f34e</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>fb85867c989b9ee6b7899134136f81d6372526a9</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>20c02e98602f6adf1cebaba075d45cef50de089f</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>20c02e98602f6adf1cebaba075d45cef50de089f</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>1fdeba9c4064b449231eac95e610f3288801fd3e</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>5c8ae37d532c7bb8d7f00dfde84df4ba63f46297</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>5c8ae37d532c7bb8d7f00dfde84df4ba63f46297</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>91df860368cbcebebd83d59ae1670c0f47de171d</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>98c2053e0c31fab5bcb9ce5386335b647160cc09</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>aac934f2eed758d4a27562dae4e9c5415ff4cdb7</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>a6e25cab2251a8ded43c44b28a87f4c62e3a548a</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>7966146d72f9953330556baa04be746d18702047</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>4f0b8f730273e9f11b2bfad2415485414b96299f</td><td>Georgia Institute of Technology</td><td></td></tr><tr><td>4dca3d6341e1d991c902492952e726dc2a443d1c</td><td>Georgia Institute of Technology 2Emory University</td><td></td></tr><tr><td>96f0e7416994035c91f4e0dfa40fd45090debfc5</td><td>Georgia Institute of Technology, CVIT, IIIT Hyderabad, IIT Kanpur</td><td></td></tr><tr><td>106092fafb53e36077eba88f06feecd07b9e78e7</td><td>Georgia Institute of Technology, 2NEC Laboratories America, 3Georgia Tech Research Institute</td><td></td></tr><tr><td>4aa286914f17cd8cefa0320e41800a99c142a1cd</td><td>Georgia Institute of Technology, Atlanta, Georgia, USA</td><td></td></tr><tr><td>20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba</td><td>German Research Center for Arti cial Intelligence (DFKI</td><td></td></tr><tr><td>5da740682f080a70a30dc46b0fc66616884463ec</td><td>German Research Center for Arti cial Intelligence (DFKI</td><td></td></tr><tr><td>df054fa8ee6bb7d2a50909939d90ef417c73604c</td><td>German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany</td><td></td></tr><tr><td>434bf475addfb580707208618f99c8be0c55cf95</td><td>German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany</td><td></td></tr><tr><td>fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e</td><td>Germany, University of Oldenburg, Oldenburg, Germany</td><td>2Department of Psychology</td></tr><tr><td>b15a06d701f0a7f508e3355a09d0016de3d92a6d</td><td>Gettysburg College, Gettysburg, PA, USA</td><td></td></tr><tr><td>9d58e8ab656772d2c8a99a9fb876d5611fe2fe20</td><td>Ghent University</td><td></td></tr><tr><td>ea890846912f16a0f3a860fce289596a7dac575f</td><td>Giulia Andrighetto, Institute of</td><td></td></tr><tr><td>c92bb26238f6e30196b0c4a737d8847e61cfb7d4</td><td>Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia</td><td></td></tr><tr><td>ae4390873485c9432899977499c3bf17886fa149</td><td>Glyndwr University</td><td></td></tr><tr><td>80c8d143e7f61761f39baec5b6dfb8faeb814be9</td><td>Gokaraju Rangaraju Institute of Engineering and Technology, Hyd</td><td></td></tr><tr><td>0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae</td><td>Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad</td><td>CSE Department</td></tr><tr><td>7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922</td><td>Goldsmiths, University of London</td><td></td></tr><tr><td>7f82f8a416170e259b217186c9e38a9b05cb3eb4</td><td>Goldsmiths, University of London, London, UK</td><td>Department of Computing</td></tr><tr><td>193debca0be1c38dabc42dc772513e6653fd91d8</td><td>Goldsmiths, University of London, UK</td><td>Department of Computing</td></tr><tr><td>936227f7483938097cc1cdd3032016df54dbd5b6</td><td>Gonda Brain Research Center, Bar Ilan University, Israel</td><td></td></tr><tr><td>51cb09ee04831b95ae02e1bee9b451f8ac4526e3</td><td>Google, Inc</td><td></td></tr><tr><td>113c22eed8383c74fe6b218743395532e2897e71</td><td>Google, Inc</td><td></td></tr><tr><td>3634b4dd263c0f330245c086ce646c9bb748cd6b</td><td>Google, Inc</td><td></td></tr><tr><td>dde5125baefa1141f1ed50479a3fd67c528a965f</td><td>Google, Inc. 2University of Massachusetts Amherst 3MIT CSAIL</td><td></td></tr><tr><td>924b14a9e36d0523a267293c6d149bca83e73f3b</td><td>Governance, Keio University</td><td></td></tr><tr><td>bc6de183cd8b2baeebafeefcf40be88468b04b74</td><td>Government College of Engineering</td><td></td></tr><tr><td>28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d</td><td>Government College of Engineering, Aurangabad</td><td></td></tr><tr><td>bd78a853df61d03b7133aea58e45cd27d464c3cf</td><td>Government College of Engineering, Aurangabad [Autonomous</td><td></td></tr><tr><td>3fb26f3abcf0d287243646426cd5ddeee33624d4</td><td>Grad. School at Shenzhen, Tsinghua University</td><td>Tsinghua University 2Department of Automation</td></tr><tr><td>41aa209e9d294d370357434f310d49b2b0baebeb</td><td>Grad. School of Information Science and Technology, The University of Tokyo, Japan</td><td></td></tr><tr><td>47eba2f95679e106e463e8296c1f61f6ddfe815b</td><td>Graduate Institute of Electronics Engineering, National Taiwan University</td><td></td></tr><tr><td>91e507d2d8375bf474f6ffa87788aa3e742333ce</td><td>Graduate Institute of Networking and Multimedia, National Taiwan University</td><td></td></tr><tr><td>6ab33fa51467595f18a7a22f1d356323876f8262</td><td>Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan</td><td></td></tr><tr><td>5b73b7b335f33cda2d0662a8e9520f357b65f3ac</td><td>Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan</td><td></td></tr><tr><td>2afdda6fb85732d830cea242c1ff84497cd5f3cb</td><td>Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan</td><td></td></tr><tr><td>9110c589c6e78daf4affd8e318d843dc750fb71a</td><td>Graduate School at Shenzhen, Tsinghua University, Shenzhen</td><td></td></tr><tr><td>207798603e3089a1c807c93e5f36f7767055ec06</td><td>Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China</td><td></td></tr><tr><td>dced05d28f353be971ea2c14517e85bc457405f3</td><td>Graduate School of Advanced Imaging Science, Multimedia, and Film, Chung-Ang University</td><td></td></tr><tr><td>3fac7c60136a67b320fc1c132fde45205cd2ac66</td><td>Graduate School of Doshisha University, Kyoto, Japan</td><td></td></tr><tr><td>11408af8861fb0a977412e58c1a23d61b8df458c</td><td>Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan</td><td></td></tr><tr><td>837e99301e00c2244023a8a48ff98d7b521c93ac</td><td>Graduate School of Engineering, Tottori University</td><td></td></tr><tr><td>537d8c4c53604fd419918ec90d6ef28d045311d0</td><td>Graduate School of Informatics, Kyoto University</td><td></td></tr><tr><td>d3b550e587379c481392fb07f2cbbe11728cf7a6</td><td>Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan</td><td></td></tr><tr><td>09b0ef3248ff8f1a05b8704a1b4cf64951575be9</td><td>Graduate School of Information Science and Technology, The University of Tokyo</td><td></td></tr><tr><td>9730b9cd998c0a549601c554221a596deda8af5b</td><td>Graduate School of Information Science and Technology, The University of Tokyo</td><td></td></tr><tr><td>c0723e0e154a33faa6ff959d084aebf07770ffaf</td><td>Graduate School of Information Science, Nagoya University, Japan</td><td></td></tr><tr><td>5b86c36e3eb59c347b81125d5dd57dd2a2c377a9</td><td>Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan</td><td></td></tr><tr><td>5865e824e3d8560e07840dd5f75cfe9bf68f9d96</td><td>Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara</td><td></td></tr><tr><td>a6ebe013b639f0f79def4c219f585b8a012be04f</td><td>Graduate School of Science and Engineering, Saitama University</td><td></td></tr><tr><td>b133b2d7df9b848253b9d75e2ca5c68e21eba008</td><td>Graduate School of System Informatics, Kobe University</td><td></td></tr><tr><td>9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d</td><td>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</td><td></td></tr><tr><td>11408af8861fb0a977412e58c1a23d61b8df458c</td><td>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</td><td></td></tr><tr><td>98fb3890c565f1d32049a524ec425ceda1da5c24</td><td>Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan</td><td></td></tr><tr><td>856317f27248cdb20226eaae599e46de628fb696</td><td>Graduate School of Systems and Information Engineering, University of Tsukuba</td><td></td></tr><tr><td>ffea8775fc9c32f573d1251e177cd283b4fe09c9</td><td>Graduate University for Advanced Studies, Kanagawa, Japan</td><td></td></tr><tr><td>449808b7aa9ee6b13ad1a21d9f058efaa400639a</td><td>Graduate University of CAS, 100190, Beijing, China</td><td></td></tr><tr><td>bd8b7599acf53e3053aa27cfd522764e28474e57</td><td>Graduate University of Chinese Academy of Sciences(CAS), 100190, China</td><td></td></tr><tr><td>32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b</td><td>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</td><td></td></tr><tr><td>80bd795930837330e3ced199f5b9b75398336b87</td><td>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</td><td></td></tr><tr><td>061e29eae705f318eee703b9e17dc0989547ba0c</td><td>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</td><td></td></tr><tr><td>64d5772f44efe32eb24c9968a3085bc0786bfca7</td><td>Graduate University of Chinese Academy of Sciences, Beijing 100049, China</td><td></td></tr><tr><td>ac86ccc16d555484a91741e4cb578b75599147b2</td><td>Gravis Research Group, University of Basel</td><td>Department for Mathematics and Computer Science</td></tr><tr><td>44f23600671473c3ddb65a308ca97657bc92e527</td><td>Graz University of Technology</td><td></td></tr><tr><td>44f23600671473c3ddb65a308ca97657bc92e527</td><td>Graz University of Technology</td><td></td></tr><tr><td>96a9ca7a8366ae0efe6b58a515d15b44776faf6e</td><td>Graz University of Technology</td><td></td></tr><tr><td>de8381903c579a4fed609dff3e52a1dc51154951</td><td>Graz University of Technology</td><td></td></tr><tr><td>c5935b92bd23fd25cae20222c7c2abc9f4caa770</td><td>Graz University of Technology</td><td></td></tr><tr><td>c5935b92bd23fd25cae20222c7c2abc9f4caa770</td><td>Graz University of Technology</td><td></td></tr><tr><td>4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8</td><td>Graz University of Technology</td><td></td></tr><tr><td>fc2bad3544c7c8dc7cd182f54888baf99ed75e53</td><td>Graz University of Technology, Austria</td><td></td></tr><tr><td>80277fb3a8a981933533cf478245f262652a33b5</td><td>Graz University of Technology, Austria</td><td></td></tr><tr><td>5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48</td><td>Graz University of Technology, Austria</td><td></td></tr><tr><td>9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6</td><td>Gri th University, QLD-4111, Brisbane, Australia</td><td></td></tr><tr><td>7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922</td><td>Grif th University, Australia</td><td></td></tr><tr><td>05f3d1e9fb254b275354ca69018e9ed321dd8755</td><td>Grif th University, QLD, Australia</td><td></td></tr><tr><td>ee815f60dc4a090fa9fcfba0135f4707af21420d</td><td>Grove School of Engineering, CUNY City College, NY, USA</td><td></td></tr><tr><td>d72973a72b5d891a4c2d873daeb1bc274b48cddf</td><td>Guangdong Medical College</td><td></td></tr><tr><td>764882e6779fbee29c3d87e00302befc52d2ea8d</td><td>Guangdong University of Technology</td><td></td></tr><tr><td>764882e6779fbee29c3d87e00302befc52d2ea8d</td><td>Guangdong University of Technology</td><td></td></tr><tr><td>764882e6779fbee29c3d87e00302befc52d2ea8d</td><td>Guangdong University of Technology</td><td></td></tr><tr><td>1b70bbf7cdfc692873ce98dd3c0e191580a1b041</td><td>Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India</td><td></td></tr><tr><td>9d36c81b27e67c515df661913a54a797cd1260bb</td><td>Gujarat Technological University, India</td><td>Department of Information Technology</td></tr><tr><td>9d36c81b27e67c515df661913a54a797cd1260bb</td><td>Gujarat Technological University, India</td><td>Department of Computer Engineering</td></tr><tr><td>2b4d092d70efc13790d0c737c916b89952d4d8c7</td><td>Gujarat Technological University, V.V.Nagar, India</td><td></td></tr><tr><td>15cf1f17aeba62cd834116b770f173b0aa614bf4</td><td>Gyan Ganga Institute of</td><td></td></tr><tr><td>68f89c1ee75a018c8eff86e15b1d2383c250529b</td><td>H. He, Honkong Polytechnic University</td><td></td></tr><tr><td>c588c89a72f89eed29d42f34bfa5d4cffa530732</td><td>HAVELSAN Inc., 2Bilkent University, 3Hacettepe University</td><td></td></tr><tr><td>bd70f832e133fb87bae82dfaa0ae9d1599e52e4b</td><td>HCI Lab., Samsung Advanced Institute of Technology, Yongin, Korea</td><td></td></tr><tr><td>711bb5f63139ee7a9b9aef21533f959671a7d80e</td><td>HELSINKI UNIVERSITY OF TECHNOLOGY</td><td></td></tr><tr><td>711bb5f63139ee7a9b9aef21533f959671a7d80e</td><td>HELSINKI UNIVERSITY OF TECHNOLOGY</td><td></td></tr><tr><td>13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd</td><td>HOD, St. Joseph College of Information Technology, Songea, Tanzania</td><td>Department of Computer Science</td></tr><tr><td>5050807e90a925120cbc3a9cd13431b98965f4b9</td><td>Hacettepe University</td><td>Department of Computer Engineering</td></tr><tr><td>9865fe20df8fe11717d92b5ea63469f59cf1635a</td><td>Hacettepe University</td><td></td></tr><tr><td>4bd088ba3f42aa1e43ae33b1988264465a643a1f</td><td>Halmstad University</td><td></td></tr><tr><td>b73795963dc623a634d218d29e4a5b74dfbc79f1</td><td>Hangzhou Institute of Service</td><td></td></tr><tr><td>b73795963dc623a634d218d29e4a5b74dfbc79f1</td><td>Hangzhou Normal University</td><td></td></tr><tr><td>8af411697e73f6cfe691fe502d4bfb42510b4835</td><td>Hankuk University of Foreign Studies, South Korea</td><td></td></tr><tr><td>a59cdc49185689f3f9efdf7ee261c78f9c180789</td><td>Hanoi University of Science and Technology</td><td></td></tr><tr><td>f842b13bd494be1bbc1161dc6df244340b28a47f</td><td>Hanshan Normal University, Chaozhou, 521041, China</td><td>Department of Physics and Electronic Engineering</td></tr><tr><td>f842b13bd494be1bbc1161dc6df244340b28a47f</td><td>Hanshan Normal University, Chaozhou, 521041, China</td><td>Department of Physics and Electronic Engineering</td></tr><tr><td>946017d5f11aa582854ac4c0e0f1b18b06127ef1</td><td>Hanyang University</td><td></td></tr><tr><td>7d53678ef6009a68009d62cd07c020706a2deac3</td><td>Hanyang University</td><td>Department of Electronics and Computer Engineering</td></tr><tr><td>f5149fb6b455a73734f1252a96a9ce5caa95ae02</td><td>Harbin Institute of Technology</td><td></td></tr><tr><td>f5149fb6b455a73734f1252a96a9ce5caa95ae02</td><td>Harbin Institute of Technology</td><td></td></tr><tr><td>b73795963dc623a634d218d29e4a5b74dfbc79f1</td><td>Harbin Institute of Technology</td><td></td></tr><tr><td>993d189548e8702b1cb0b02603ef02656802c92b</td><td>Harbin Institute of Technology (Shenzhen), China</td><td></td></tr><tr><td>a52581a7b48138d7124afc7ccfcf8ec3b48359d0</td><td>Harbin Institute of Technology, Harbin 150001, China</td><td>Department of Computer Science and Technology</td></tr><tr><td>ad784332cc37720f03df1c576e442c9c828a587a</td><td>Harbin Institute of Technology, Harbin, China</td><td>Department of Computer Science</td></tr><tr><td>016a8ed8f6ba49bc669dbd44de4ff31a79963078</td><td>Harbin Institute of Technology, Harbin, China</td><td>Department of Computer Science</td></tr><tr><td>badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e</td><td>Harbin Institute of Technology, Harbin, China</td><td></td></tr><tr><td>c9e955cb9709f16faeb0c840f4dae92eb875450a</td><td>Harbin Institute of Technology, School of Computer Science and Technology</td><td></td></tr><tr><td>f5149fb6b455a73734f1252a96a9ce5caa95ae02</td><td>Harbin Institute of Technology;Shenzhen University</td><td></td></tr><tr><td>591a737c158be7b131121d87d9d81b471c400dba</td><td>Harvard University</td><td></td></tr><tr><td>3d0379688518cc0e8f896e30815d0b5e8452d4cd</td><td>Harvard University</td><td></td></tr><tr><td>3d0379688518cc0e8f896e30815d0b5e8452d4cd</td><td>Harvard University</td><td></td></tr><tr><td>0ba402af3b8682e2aa89f76bd823ddffdf89fa0a</td><td>Harvard University</td><td></td></tr><tr><td>023be757b1769ecb0db810c95c010310d7daf00b</td><td>Harvard University</td><td></td></tr><tr><td>4b74f2d56cd0dda6f459319fec29559291c61bff</td><td>Harvard University</td><td></td></tr><tr><td>d3b18ba0d9b247bfa2fb95543d172ef888dfff95</td><td>Harvard University 2University of Southern California</td><td></td></tr><tr><td>17479e015a2dcf15d40190e06419a135b66da4e0</td><td>Harvard University 3Perceptive Automata, Inc</td><td>Department of Psychology</td></tr><tr><td>b1451721864e836069fa299a64595d1655793757</td><td>Harvard University 4Max Planck Institute for Informatics</td><td></td></tr><tr><td>20cfb4136c1a984a330a2a9664fcdadc2228b0bc</td><td>Harvard University, Cambridge, MA</td><td></td></tr><tr><td>78436256ff8f2e448b28e854ebec5e8d8306cf21</td><td>Harvard University, Cambridge, MA</td><td>Department of Molecular and Cellular Biology</td></tr><tr><td>d0509afe9c2c26fe021889f8efae1d85b519452a</td><td>Harvard University, Cambridge, MA 02138, USA</td><td></td></tr><tr><td>78436256ff8f2e448b28e854ebec5e8d8306cf21</td><td>Harvard University, Cambridge, MA, USA</td><td>Department of Computer Science</td></tr><tr><td>25e2d3122d4926edaab56a576925ae7a88d68a77</td><td>Harvard University, USA</td><td></td></tr><tr><td>25e2d3122d4926edaab56a576925ae7a88d68a77</td><td>Harvard and Massachusetts Institute</td><td></td></tr><tr><td>31182c5ffc8c5d8772b6db01ec98144cd6e4e897</td><td>Hasan Kalyoncu University, Gaziantep, Turkey</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>b4362cd87ad219790800127ddd366cc465606a78</td><td>Head and Neck Surgery, Seoul National University</td><td>Department of Otorhinolaryngology</td></tr><tr><td>581e920ddb6ecfc2a313a3aa6fed3d933b917ab0</td><td>Hector Research Institute of Education Sciences and Psychology, T ubingen</td><td></td></tr><tr><td>c9e955cb9709f16faeb0c840f4dae92eb875450a</td><td>Heilongjiang University, College of Computer Science and Technology, China</td><td></td></tr><tr><td>03adcf58d947a412f3904a79f2ab51cfdf0e838a</td><td>Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India</td><td></td></tr><tr><td>587c48ec417be8b0334fa39075b3bfd66cc29dbe</td><td>Helen Wills Neuroscience Institute, University of</td><td></td></tr><tr><td>ff9195f99a1a28ced431362f5363c9a5da47a37b</td><td>Helen Wills Neuroscience Institute, University of</td><td></td></tr><tr><td>b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807</td><td>Helsinki Collegium for Advanced Studies, University of Helsinki, Finland</td><td></td></tr><tr><td>b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807</td><td>Helsinki Institute for Information Technology, Aalto University, Finland</td><td></td></tr><tr><td>711bb5f63139ee7a9b9aef21533f959671a7d80e</td><td>Helsinki University of Technology Laboratory of Computational Engineering Publications</td><td></td></tr><tr><td>0b87d91fbda61cdea79a4b4dcdcb6d579f063884</td><td>Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China</td><td></td></tr><tr><td>17045163860fc7c38a0f7d575f3e44aaa5fa40d7</td><td>Hengyang Normal University, Hengyang, China</td><td></td></tr><tr><td>2cdc40f20b70ca44d9fd8e7716080ee05ca7924a</td><td>Heriot-Watt University</td><td></td></tr><tr><td>7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b</td><td>Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne</td><td></td></tr><tr><td>907475a4febf3f1d4089a3e775ea018fbec895fe</td><td>Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne</td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>High Institute of Medical Technologies</td><td></td></tr><tr><td>ac559873b288f3ac28ee8a38c0f3710ea3f986d9</td><td>Hikvision Research Institute</td><td></td></tr><tr><td>bd21109e40c26af83c353a3271d0cd0b5c4b4ade</td><td>Hikvision Research Institute</td><td></td></tr><tr><td>90fb58eeb32f15f795030c112f5a9b1655ba3624</td><td>Hindusthan College of Engineering and Technology, Coimbatore, India</td><td></td></tr><tr><td>44c9b5c55ca27a4313daf3760a3f24a440ce17ad</td><td>Hiroshima University, Japan</td><td></td></tr><tr><td>44c9b5c55ca27a4313daf3760a3f24a440ce17ad</td><td>Hiroshima University, Japan</td><td></td></tr><tr><td>167736556bea7fd57cfabc692ec4ae40c445f144</td><td>Ho Chi Minh City University of</td><td></td></tr><tr><td>c2c3ff1778ed9c33c6e613417832505d33513c55</td><td>Ho Chi Minh City University of Science</td><td>Department of Computer Science</td></tr><tr><td>b84b7b035c574727e4c30889e973423fe15560d7</td><td>HoHai University</td><td></td></tr><tr><td>2331df8ca9f29320dd3a33ce68a539953fa87ff5</td><td>Honda Fundamental Research Labs</td><td></td></tr><tr><td>3a0ea368d7606030a94eb5527a12e6789f727994</td><td>Honda RandD Americas, Inc., Boston, MA, USA</td><td></td></tr><tr><td>1270044a3fa1a469ec2f4f3bd364754f58a1cb56</td><td>Honda Research Institute</td><td></td></tr><tr><td>f2b13946d42a50fa36a2c6d20d28de2234aba3b4</td><td>Honda Research Institute USA</td><td></td></tr><tr><td>f2b13946d42a50fa36a2c6d20d28de2234aba3b4</td><td>Honda Research Institute USA</td><td></td></tr><tr><td>4836b084a583d2e794eb6a94982ea30d7990f663</td><td>Hong Kong Applied Science and Technology Research Institute Company Limited</td><td></td></tr><tr><td>4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7</td><td>Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China</td><td></td></tr><tr><td>439647914236431c858535a2354988dde042ef4d</td><td>Hong Kong Baptist University</td><td>Department of Computer Science</td></tr><tr><td>11c04c4f0c234a72f94222efede9b38ba6b2306c</td><td>Hong Kong Polytechnic University</td><td></td></tr><tr><td>38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f</td><td>Hong Kong Polytechnic University, Hong Kong</td><td>Department of Computing</td></tr><tr><td>48174c414cfce7f1d71c4401d2b3d49ba91c5338</td><td>Hong Kong Polytechnic University, Hong Kong</td><td>Department of Computing</td></tr><tr><td>5ea165d2bbd305dc125415487ef061bce75dac7d</td><td>Hong Kong Polytechnic University, Hong Kong, China</td><td>Department of Computing</td></tr><tr><td>8000c4f278e9af4d087c0d0895fff7012c5e3d78</td><td>Hong Kong University of Science and Technology</td><td>Department of Computer Science and Engineering</td></tr><tr><td>4fcd19b0cc386215b8bd0c466e42934e5baaa4b7</td><td>Hong Kong University of Science and Technology</td><td>Department of Electronic and Computer Engineering</td></tr><tr><td>4fcd19b0cc386215b8bd0c466e42934e5baaa4b7</td><td>Hong Kong University of Science and Technology</td><td>Department of Computer Science and Engineering</td></tr><tr><td>585260468d023ffc95f0e539c3fa87254c28510b</td><td>Hong Kong University of Science and Technology, Hong Kong</td><td></td></tr><tr><td>14070478b8f0d84e5597c3e67c30af91b5c3a917</td><td>Howard Hughes Medical Institute (HHMI</td><td></td></tr><tr><td>aa912375eaf50439bec23de615aa8a31a3395ad3</td><td>Howard University, Washington DC</td><td>Department of Electrical Engineering</td></tr><tr><td>aa912375eaf50439bec23de615aa8a31a3395ad3</td><td>Howard University, Washington DC</td><td>Department of Electrical Engineering</td></tr><tr><td>a3f684930c5c45fcb56a2b407d26b63879120cbf</td><td>Hua Zhong University of Science and Technology, Wuhan, China</td><td></td></tr><tr><td>4698a599425c3a6bae1c698456029519f8f2befe</td><td>Huazhong Agricultural University</td><td></td></tr><tr><td>4698a599425c3a6bae1c698456029519f8f2befe</td><td>Huazhong Agricultural University</td><td></td></tr><tr><td>7f2a4cd506fe84dee26c0fb41848cb219305173f</td><td>Huazhong University of</td><td>Department of Electronics and information Engineering</td></tr><tr><td>6a0368b4e132f4aa3bbdeada8d894396f201358a</td><td>Huazhong University of Science and Technology</td><td></td></tr><tr><td>51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6</td><td>Huazhong University of Science and Technology</td><td></td></tr><tr><td>b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24</td><td>Huazhong University of Science and Technology, Wuhan, China</td><td></td></tr><tr><td>6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9</td><td>Human Centered Multimedia, Augsburg University, Germany</td><td></td></tr><tr><td>0efdd82a4753a8309ff0a3c22106c570d8a84c20</td><td>Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea</td><td></td></tr><tr><td>3dabf7d853769cfc4986aec443cc8b6699136ed0</td><td>Human Development and Applied Psychology, University of Toronto, Ontario, Canada</td><td></td></tr><tr><td>9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6</td><td>Human Genome Center, Institute of Medical Science</td><td></td></tr><tr><td>b073313325b6482e22032e259d7311fb9615356c</td><td>Human Interaction Research Lab</td><td></td></tr><tr><td>6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9</td><td>Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand</td><td></td></tr><tr><td>950171acb24bb24a871ba0d02d580c09829de372</td><td>Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany</td><td></td></tr><tr><td>7643861bb492bf303b25d0306462f8fb7dc29878</td><td>Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany</td><td></td></tr><tr><td>c207fd762728f3da4cddcfcf8bf19669809ab284</td><td>Human Media Interaction, University of Twente, P.O. Box</td><td></td></tr><tr><td>b8caf1b1bc3d7a26a91574b493c502d2128791f6</td><td>Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg</td><td></td></tr><tr><td>a703d51c200724517f099ee10885286ddbd8b587</td><td>Human-friendly Welfare Robotic System Engineering Research Center, KAIST</td><td></td></tr><tr><td>5bc0a89f4f73523967050374ed34d7bc89e4d9e1</td><td>Humboldt-University, Berlin, Germany</td><td>c Department of Psychology</td></tr><tr><td>5b01d4338734aefb16ee82c4c59763d3abc008e6</td><td>Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China</td><td></td></tr><tr><td>1fe990ca6df273de10583860933d106298655ec8</td><td>Hunan University</td><td></td></tr><tr><td>ce56be1acffda599dec6cc2af2b35600488846c9</td><td>IBM Almaden Research Center, San Jose CA</td><td></td></tr><tr><td>59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb</td><td>IBM China Research Lab</td><td></td></tr><tr><td>91495c689e6e614247495c3f322d400d8098de43</td><td>IBM China Research Lab</td><td></td></tr><tr><td>23c3eb6ad8e5f18f672f187a6e9e9b0d94042970</td><td>IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore</td><td></td></tr><tr><td>2a88541448be2eb1b953ac2c0c54da240b47dd8a</td><td>IBM T. J. Watson Research Center</td><td></td></tr><tr><td>5e16f10f2d667d17c029622b9278b6b0a206d394</td><td>IBM T. J. Watson Research Center</td><td></td></tr><tr><td>8323529cf37f955fb3fc6674af6e708374006a28</td><td>IBM T. J. Watson Research Center</td><td></td></tr><tr><td>66b9d954dd8204c3a970d86d91dd4ea0eb12db47</td><td>IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY</td><td></td></tr><tr><td>499f1d647d938235e9186d968b7bb2ab20f2726d</td><td>IBM T. J. Watson Research Center, Yorktown Heights, NY, USA</td><td></td></tr><tr><td>3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f</td><td>IBM T.J. Watson Research Center</td><td></td></tr><tr><td>cfd8c66e71e98410f564babeb1c5fd6f77182c55</td><td>IBM T.J. Watson Research Center</td><td></td></tr><tr><td>7e9df45ece7843fe050033c81014cc30b3a8903a</td><td>IBM T.J. Watson Research Center</td><td></td></tr><tr><td>c1298120e9ab0d3764512cbd38b47cd3ff69327b</td><td>IBM TJ Watson Research Center, USA</td><td></td></tr><tr><td>350da18d8f7455b0e2920bc4ac228764f8fac292</td><td>IBM Thomas J. Watson Research Center</td><td></td></tr><tr><td>131178dad3c056458e0400bed7ee1a36de1b2918</td><td>IBM Watson Research Center, Armonk, NY, USA</td><td></td></tr><tr><td>bb489e4de6f9b835d70ab46217f11e32887931a2</td><td>ICMC University of S ao Paulo</td><td></td></tr><tr><td>0d538084f664b4b7c0e11899d08da31aead87c32</td><td>ICSI / UC Berkeley 2Brigham Young University</td><td></td></tr><tr><td>a52581a7b48138d7124afc7ccfcf8ec3b48359d0</td><td>ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences</td><td></td></tr><tr><td>89cabb60aa369486a1ebe586dbe09e3557615ef8</td><td>IDIAP RESEARCH INSTITUTE</td><td></td></tr><tr><td>816bd8a7f91824097f098e4f3e0f4b69f481689d</td><td>IDIAP Research Institute</td><td></td></tr><tr><td>816bd8a7f91824097f098e4f3e0f4b69f481689d</td><td>IDIAP Research Institute</td><td></td></tr><tr><td>5160569ca88171d5fa257582d161e9063c8f898d</td><td>IDIAP Research Institute, Martigny, Switzerland</td><td></td></tr><tr><td>53ce84598052308b86ba79d873082853022aa7e9</td><td>IEEE Member, Shahid Rajaee Teacher training University</td><td>Electrical and Computer Engineering Department</td></tr><tr><td>22fdd8d65463f520f054bf4f6d2d216b54fc5677</td><td>IES College of Technology, Bhopal</td><td>Department of Computer Science Engg.</td></tr><tr><td>69fb98e11df56b5d7ec7d45442af274889e4be52</td><td>IHCC, RSCS, CECS, Australian National University</td><td></td></tr><tr><td>3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9</td><td>IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA</td><td></td></tr><tr><td>a301ddc419cbd900b301a95b1d9e4bb770afc6a3</td><td>IIIS, Tsinghua University</td><td></td></tr><tr><td>6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf</td><td>IIIS, Tsinghua University</td><td></td></tr><tr><td>016800413ebd1a87730a5cf828e197f43a08f4b3</td><td>IIIS, Tsinghua University</td><td></td></tr><tr><td>568cff415e7e1bebd4769c4a628b90db293c1717</td><td>IIIS, Tsinghua University, Beijing, China</td><td></td></tr><tr><td>3a76e9fc2e89bdd10a9818f7249fbf61d216efc4</td><td>IIIT-Delhi, India, 2West Virginia University</td><td></td></tr><tr><td>00d931eccab929be33caea207547989ae7c1ef39</td><td>IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands</td><td>Department of Computer Science</td></tr><tr><td>00d931eccab929be33caea207547989ae7c1ef39</td><td>IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands</td><td>Department of Computer Science</td></tr><tr><td>1e8eec6fc0e4538e21909ab6037c228547a678ba</td><td>IMPERIAL COLLEGE</td><td></td></tr><tr><td>4a5592ae1f5e9fa83d9fa17451c8ab49608421e4</td><td>IN3, Open University of</td><td></td></tr><tr><td>858b51a8a8aa082732e9c7fbbd1ea9df9c76b013</td><td>INTELSIG, Monte ore Institute, University of Li`ege, Belgium</td><td></td></tr><tr><td>7bbaa09c9e318da4370a83b126bcdb214e7f8428</td><td>ISISTAN Research Institute - CONICET - UNICEN</td><td></td></tr><tr><td>14014a1bdeb5d63563b68b52593e3ac1e3ce7312</td><td>ISLA Lab, Informatics Institute</td><td></td></tr><tr><td>935a7793cbb8f102924fa34fce1049727de865c2</td><td>ISLA Lab, Informatics Institute, University of Amsterdam</td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis</td><td></td></tr><tr><td>ea6f5c8e12513dbaca6bbdff495ef2975b8001bd</td><td>ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address</td><td></td></tr><tr><td>178a82e3a0541fa75c6a11350be5bded133a59fd</td><td>IT Instituto de Telecomunica es, University of Beira Interior, Covilh , Portugal</td><td>Department of Computer Science</td></tr><tr><td>ef230e3df720abf2983ba6b347c9d46283e4b690</td><td>IT - Instituto de Telecomunica es, University of Beira Interior</td><td></td></tr><tr><td>ef230e3df720abf2983ba6b347c9d46283e4b690</td><td>IT - Instituto de Telecomunica es, University of Beira Interior</td><td></td></tr><tr><td>b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae</td><td>ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing</td><td></td></tr><tr><td>6043006467fb3fd1e9783928d8040ee1f1db1f3a</td><td>ITCS, Tsinghua University</td><td></td></tr><tr><td>4e8c608fc4b8198f13f8a68b9c1a0780f6f50105</td><td>ITEE, The University of Queensland, Australia</td><td></td></tr><tr><td>7bbaa09c9e318da4370a83b126bcdb214e7f8428</td><td>ITIC Research Institute, National University of Cuyo</td><td></td></tr><tr><td>93971a49ef6cc88a139420349a1dfd85fb5d3f5c</td><td>Idiap Research Institute</td><td></td></tr><tr><td>939123cf21dc9189a03671484c734091b240183e</td><td>Idiap Research Institute</td><td></td></tr><tr><td>b59cee1f647737ec3296ccb3daa25c890359c307</td><td>Idiap Research Institute</td><td></td></tr><tr><td>d7593148e4319df7a288180d920f2822eeecea0b</td><td>Idiap Research Institute</td><td></td></tr><tr><td>af13c355a2a14bb74847aedeafe990db3fc9cbd4</td><td>Idiap Research Institute</td><td></td></tr><tr><td>af13c355a2a14bb74847aedeafe990db3fc9cbd4</td><td>Idiap Research Institute</td><td></td></tr><tr><td>235d5620d05bb7710f5c4fa6fceead0eb670dec5</td><td>Idiap Research Institute</td><td></td></tr><tr><td>06d93a40365da90f30a624f15bf22a90d9cfe6bb</td><td>Idiap Research Institute and EPF Lausanne</td><td></td></tr><tr><td>8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff</td><td>Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay</td><td></td></tr><tr><td>0b642f6d48a51df64502462372a38c50df2051b1</td><td>Idiap Research Institute, Martigny, Switzerland</td><td></td></tr><tr><td>52472ec859131844f38fc7d57944778f01d109ac</td><td>Idiap Research Institute, Martigny, Switzerland</td><td></td></tr><tr><td>c23153aade9be0c941390909c5d1aad8924821db</td><td>Idiap Research Institute, Martigny, Switzerland</td><td></td></tr><tr><td>78d645d5b426247e9c8f359694080186681f57db</td><td>Idiap Research Institute, Martigny, Switzerland</td><td></td></tr><tr><td>46ae4d593d89b72e1a479a91806c39095cd96615</td><td>Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France</td><td></td></tr><tr><td>167736556bea7fd57cfabc692ec4ae40c445f144</td><td>Idiap Research Institute, Switzerland</td><td></td></tr><tr><td>68484ae8a042904a95a8d284a7f85a4e28e37513</td><td>Idiap Research Institute. Centre du Parc, Rue Marconi 19, Martigny (VS), Switzerland</td><td></td></tr><tr><td>47b508abdaa5661fe14c13e8eb21935b8940126b</td><td>Iftm University, Moradabad-244001 U.P</td><td></td></tr><tr><td>9d66de2a59ec20ca00a618481498a5320ad38481</td><td>Illinois Institute of Technology</td><td>y Department of Computer Science</td></tr><tr><td>27846b464369095f4909f093d11ed481277c8bba</td><td>Illinois Institute of Technology, Chicago, Illinois, USA</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>1149c6ac37ae2310fe6be1feb6e7e18336552d95</td><td>Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany</td><td></td></tr><tr><td>df0e280cae018cebd5b16ad701ad101265c369fa</td><td>Image Processing Center, Beihang University</td><td></td></tr><tr><td>2c62b9e64aeddf12f9d399b43baaefbca8e11148</td><td>Image Understanding and Interactive Robotics, Reutlingen University, 72762 Reutlingen, Germany</td><td></td></tr><tr><td>64f9519f20acdf703984f02e05fd23f5e2451977</td><td>Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia</td><td></td></tr><tr><td>98af221afd64a23e82c40fd28d25210c352e41b7</td><td>Image and Video Research Laboratory, Queensland University of Technology</td><td></td></tr><tr><td>0d14261e69a4ad4140ce17c1d1cea76af6546056</td><td>Imaging Science and Biomedical Engineering, The University of Manchester, UK</td><td></td></tr><tr><td>1b60b8e70859d5c85ac90510b370b501c5728620</td><td>Imaging Science and Biomedical Engineering, The University of Manchester, UK</td><td></td></tr><tr><td>0de91641f37b0a81a892e4c914b46d05d33fd36e</td><td>Imperial College London</td><td></td></tr><tr><td>59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b</td><td>Imperial College London</td><td></td></tr><tr><td>5040f7f261872a30eec88788f98326395a44db03</td><td>Imperial College London</td><td></td></tr><tr><td>03b03f5a301b2ff88ab3bb4969f54fd9a35c7271</td><td>Imperial College London</td><td></td></tr><tr><td>046a694bbb3669f2ff705c6c706ca3af95db798c</td><td>Imperial College London</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>56e079f4eb40744728fd1d7665938b06426338e5</td><td>Imperial College London</td><td></td></tr><tr><td>33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13</td><td>Imperial College London</td><td></td></tr><tr><td>a2bcfba155c990f64ffb44c0a1bb53f994b68a15</td><td>Imperial College London</td><td>Department of Computing</td></tr><tr><td>a2bcfba155c990f64ffb44c0a1bb53f994b68a15</td><td>Imperial College London</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>d140c5add2cddd4a572f07358d666fe00e8f4fe1</td><td>Imperial College London</td><td></td></tr><tr><td>4bbe460ab1b279a55e3c9d9f488ff79884d01608</td><td>Imperial College London</td><td></td></tr><tr><td>29c340c83b3bbef9c43b0c50b4d571d5ed037cbd</td><td>Imperial College London</td><td></td></tr><tr><td>7cffcb4f24343a924a8317d560202ba9ed26cd0b</td><td>Imperial College London</td><td></td></tr><tr><td>809ea255d144cff780300440d0f22c96e98abd53</td><td>Imperial College London</td><td></td></tr><tr><td>809ea255d144cff780300440d0f22c96e98abd53</td><td>Imperial College London</td><td></td></tr><tr><td>1a85956154c170daf7f15f32f29281269028ff69</td><td>Imperial College London</td><td>Department of Computing</td></tr><tr><td>1afdedba774f6689eb07e048056f7844c9083be9</td><td>Imperial College London</td><td></td></tr><tr><td>8f08b2101d43b1c0829678d6a824f0f045d57da5</td><td>Imperial College London</td><td>Department of Computing</td></tr><tr><td>7e0c75ce731131e613544e1a85ae0f2c28ee4c1f</td><td>Imperial College London</td><td></td></tr><tr><td>88bef50410cea3c749c61ed68808fcff84840c37</td><td>Imperial College London</td><td>Department of Computing</td></tr><tr><td>38cbb500823057613494bacd0078aa0e57b30af8</td><td>Imperial College London</td><td></td></tr><tr><td>38cbb500823057613494bacd0078aa0e57b30af8</td><td>Imperial College London</td><td></td></tr><tr><td>9af9a88c60d9e4b53e759823c439fc590a4b5bc5</td><td>Imperial College London</td><td></td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>Imperial College London</td><td></td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>Imperial College London</td><td></td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>Imperial College London</td><td></td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>Imperial College London</td><td></td></tr><tr><td>06d7ef72fae1be206070b9119fb6b61ce4699587</td><td>Imperial College London</td><td></td></tr><tr><td>a812368fe1d4a186322bf72a6d07e1cf60067234</td><td>Imperial College London</td><td></td></tr><tr><td>b93bf0a7e449cfd0db91a83284d9eba25a6094d8</td><td>Imperial College London</td><td>Department of Computing</td></tr><tr><td>f9ccfe000092121a2016639732cdb368378256d5</td><td>Imperial College London</td><td></td></tr><tr><td>2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3</td><td>Imperial College London</td><td>Computing Department</td></tr><tr><td>84e6669b47670f9f4f49c0085311dce0e178b685</td><td>Imperial College London</td><td></td></tr><tr><td>1d0128b9f96f4c11c034d41581f23eb4b4dd7780</td><td>Imperial College London</td><td></td></tr><tr><td>40bb090a4e303f11168dce33ed992f51afe02ff7</td><td>Imperial College London</td><td></td></tr><tr><td>40bb090a4e303f11168dce33ed992f51afe02ff7</td><td>Imperial College London</td><td></td></tr><tr><td>40bb090a4e303f11168dce33ed992f51afe02ff7</td><td>Imperial College London</td><td></td></tr><tr><td>7a84368ebb1a20cc0882237a4947efc81c56c0c0</td><td>Imperial College London</td><td></td></tr><tr><td>8e0ab1b08964393e4f9f42ca037220fe98aad7ac</td><td>Imperial College London</td><td></td></tr><tr><td>2227f978f084ebb18cb594c0cfaf124b0df6bf95</td><td>Imperial College London</td><td></td></tr><tr><td>aeeea6eec2f063c006c13be865cec0c350244e5b</td><td>Imperial College London / Twente University</td><td></td></tr><tr><td>1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6</td><td>Imperial College London, London, UK</td><td></td></tr><tr><td>090ff8f992dc71a1125636c1adffc0634155b450</td><td>Imperial College London, London, UK</td><td></td></tr><tr><td>7f82f8a416170e259b217186c9e38a9b05cb3eb4</td><td>Imperial College London, London, UK</td><td>Department of Computing</td></tr><tr><td>54bb25a213944b08298e4e2de54f2ddea890954a</td><td>Imperial College London, On do</td><td></td></tr><tr><td>624496296af19243d5f05e7505fd927db02fd0ce</td><td>Imperial College London, U.K</td><td></td></tr><tr><td>232b6e2391c064d483546b9ee3aafe0ba48ca519</td><td>Imperial College London, U.K</td><td></td></tr><tr><td>9b0489f2d5739213ef8c3e2e18739c4353c3a3b7</td><td>Imperial College London, UK</td><td>Department of Computing</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>Imperial College London, UK</td><td>Comp. Department</td></tr><tr><td>3505c9b0a9631539e34663310aefe9b05ac02727</td><td>Imperial College London, UK</td><td>Department of Computing</td></tr><tr><td>0ba1d855cd38b6a2c52860ae4d1a85198b304be4</td><td>Imperial College London, UK</td><td>Computing Department</td></tr><tr><td>055de0519da7fdf27add848e691087e0af166637</td><td>Imperial College London, UK</td><td>Computing Department</td></tr><tr><td>0209389b8369aaa2a08830ac3b2036d4901ba1f1</td><td>Imperial College London, UK</td><td></td></tr><tr><td>ac2e44622efbbab525d4301c83cb4d5d7f6f0e55</td><td>Imperial College London, UK</td><td></td></tr><tr><td>27eb7a6e1fb6b42516041def6fe64bd028b7614d</td><td>Imperial College London, UK</td><td></td></tr><tr><td>7df268a3f4da7d747b792882dfb0cbdb7cc431bc</td><td>Imperial College London, UK</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>42afe6d016e52c99e2c0d876052ade9c192d91e7</td><td>Imperial College London, UK</td><td>Computing Department</td></tr><tr><td>7492c611b1df6bce895bee6ba33737e7fc7f60a6</td><td>Imperial College London, UK</td><td>Department of Computing</td></tr><tr><td>193debca0be1c38dabc42dc772513e6653fd91d8</td><td>Imperial College London, UK</td><td>cid:63)Department of Computing</td></tr><tr><td>1921795408345751791b44b379f51b7dd54ebfa2</td><td>Imperial College London, UK</td><td></td></tr><tr><td>4c87aafa779747828054cffee3125fcea332364d</td><td>Imperial College London, UK</td><td>Comp. Department</td></tr><tr><td>5bd3d08335bb4e444a86200c5e9f57fd9d719e14</td><td>Imperial College London, UK</td><td></td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>Imperial College London, UK</td><td>Comp. Department</td></tr><tr><td>a06b6d30e2b31dc600f622ab15afe5e2929581a7</td><td>Imperial College London, UK</td><td></td></tr><tr><td>12095f9b35ee88272dd5abc2d942a4f55804b31e</td><td>Imperial College London, UK</td><td></td></tr><tr><td>143bee9120bcd7df29a0f2ad6f0f0abfb23977b8</td><td>Imperial College London, UK</td><td>Comp. Department</td></tr><tr><td>f4210309f29d4bbfea9642ecadfb6cf9581ccec7</td><td>Imperial College London, United Kingdom</td><td>Department of Computing</td></tr><tr><td>e42998bbebddeeb4b2bedf5da23fa5c4efc976fa</td><td>Imperial College London, United Kingdom</td><td>Department of Computing</td></tr><tr><td>3f957142ef66f2921e7c8c7eadc8e548dccc1327</td><td>Imperial College London, United Kingdom</td><td>Department of Computing</td></tr><tr><td>30fd1363fa14965e3ab48a7d6235e4b3516c1da1</td><td>Imperial College London, United Kingdom</td><td>Department of Computing</td></tr><tr><td>06c2dfe1568266ad99368fc75edf79585e29095f</td><td>Imperial College London, United Kingdom</td><td>Department of Computing</td></tr><tr><td>0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7</td><td>Imperial College of Science, Technology and Medicine</td><td></td></tr><tr><td>2eb9f1dbea71bdc57821dedbb587ff04f3a25f07</td><td>Imperial College, 180 Queens Gate</td><td>Computing Department</td></tr><tr><td>d65b82b862cf1dbba3dee6541358f69849004f30</td><td>Imperial College, London, UK</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>292c6b743ff50757b8230395c4a001f210283a34</td><td>Imperial College, South Kensington Campus, London SW7 2AZ, UK</td><td>Department of Electrical and Electronic Engineering</td></tr><tr><td>283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43</td><td>In the Graduate College</td><td></td></tr><tr><td>f7b4bc4ef14349a6e66829a0101d5b21129dcf55</td><td>Inception Institute of Arti cial</td><td></td></tr><tr><td>d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5</td><td>Inception Institute of Arti cial Intelligence (IIAI), Abu Dhabi, UAE</td><td></td></tr><tr><td>993d189548e8702b1cb0b02603ef02656802c92b</td><td>Inception Institute of Arti cial Intelligence, UAE</td><td></td></tr><tr><td>7553fba5c7f73098524fbb58ca534a65f08e91e7</td><td>India</td><td>CSE Department and CSVTU University</td></tr><tr><td>7553fba5c7f73098524fbb58ca534a65f08e91e7</td><td>India</td><td>CSE Department and CSVTU University</td></tr><tr><td>7f268f29d2c8f58cea4946536f5e2325777fa8fa</td><td>Indian Institute of Informaiton Technology, Allahabad, India</td><td></td></tr><tr><td>d79365336115661b0e8dbbcd4b2aa1f504b91af6</td><td>Indian Institute of Science</td><td></td></tr><tr><td>4be774af78f5bf55f7b7f654f9042b6e288b64bd</td><td>Indian Institute of Science</td><td></td></tr><tr><td>4542273a157bfd4740645a6129d1784d1df775d2</td><td>Indian Institute of Science</td><td></td></tr><tr><td>6226f2ea345f5f4716ac4ddca6715a47162d5b92</td><td>Indian Institute of Science Bangalore</td><td></td></tr><tr><td>6226f2ea345f5f4716ac4ddca6715a47162d5b92</td><td>Indian Institute of Science Bangalore</td><td></td></tr><tr><td>13604bbdb6f04a71dea4bd093794e46730b0a488</td><td>Indian Institute of Science, Bangalore</td><td></td></tr><tr><td>13604bbdb6f04a71dea4bd093794e46730b0a488</td><td>Indian Institute of Science, Bangalore</td><td></td></tr><tr><td>90a754f597958a2717862fbaa313f67b25083bf9</td><td>Indian Institute of Science, India</td><td></td></tr><tr><td>1fe1bd6b760e3059fff73d53a57ce3a6079adea1</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>cbb27980eb04f68d9f10067d3d3c114efa9d0054</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>cbb27980eb04f68d9f10067d3d3c114efa9d0054</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>cbb27980eb04f68d9f10067d3d3c114efa9d0054</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>48463a119f67ff2c43b7c38f0a722a32f590dfeb</td><td>Indian Institute of Technology</td><td></td></tr><tr><td>1e94cc91c5293c8fc89204d4b881552e5b2ce672</td><td>Indian Institute of Technology Delhi, New Delhi, India</td><td></td></tr><tr><td>3f4bfa4e3655ef392eb5ad609d31c05f29826b45</td><td>Indian Institute of Technology Kanpur</td><td></td></tr><tr><td>53a41c711b40e7fe3dc2b12e0790933d9c99a6e0</td><td>Indian Institute of Technology Kharagpur</td><td></td></tr><tr><td>db67edbaeb78e1dd734784cfaaa720ba86ceb6d2</td><td>Indian Institute of Technology Kharagpur</td><td></td></tr><tr><td>aae742779e8b754da7973949992d258d6ca26216</td><td>Indian Institute of Technology Kharagpur, India</td><td></td></tr><tr><td>68f61154a0080c4aae9322110c8827978f01ac2e</td><td>Indian Institute of Technology Madras, Chennai 600036, India</td><td>Department of Electrical Engineering</td></tr><tr><td>959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c</td><td>Indian Institute of Technology Madras, Chennai, India</td><td></td></tr><tr><td>59efb1ac77c59abc8613830787d767100387c680</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>59efb1ac77c59abc8613830787d767100387c680</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>f997a71f1e54d044184240b38d9dc680b3bbbbc0</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>f997a71f1e54d044184240b38d9dc680b3bbbbc0</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>f997a71f1e54d044184240b38d9dc680b3bbbbc0</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>f997a71f1e54d044184240b38d9dc680b3bbbbc0</td><td>Indian Institute of Technology Ropar</td><td></td></tr><tr><td>db3545a983ffd24c97c18bf7f068783102548ad7</td><td>Indian Institute of Technology, Bombay, India</td><td></td></tr><tr><td>e9bb045e702ee38e566ce46cc1312ed25cb59ea7</td><td>Indian Institute of Technology, Kharagpur</td><td></td></tr><tr><td>0fae5d9d2764a8d6ea691b9835d497dd680bbccd</td><td>Indian Institute of Technology, Madras</td><td></td></tr><tr><td>0fae5d9d2764a8d6ea691b9835d497dd680bbccd</td><td>Indian Institute of Technology, Madras</td><td></td></tr><tr><td>86c5478f21c4a9f9de71b5ffa90f2a483ba5c497</td><td>Indian Institute of Technology, Madras, Chennai 600036, INDIA</td><td>Department of Computer Science and Engineering</td></tr><tr><td>e9bb045e702ee38e566ce46cc1312ed25cb59ea7</td><td>Indian Institute of Technology, Roorkee</td><td></td></tr><tr><td>f3a59d85b7458394e3c043d8277aa1ffe3cdac91</td><td>Indiana University</td><td></td></tr><tr><td>0182d090478be67241392df90212d6cd0fb659e6</td><td>Indiana University</td><td></td></tr><tr><td>0182d090478be67241392df90212d6cd0fb659e6</td><td>Indiana University</td><td></td></tr><tr><td>0b835284b8f1f45f87b0ce004a4ad2aca1d9e153</td><td>Indiana University Bloomington</td><td></td></tr><tr><td>b7894c1f805ffd90ab4ab06002c70de68d6982ab</td><td>Indra Ganesan College of Engineering, Trichy, India</td><td>Department of Computer Science and Engineering</td></tr><tr><td>97865d31b5e771cf4162bc9eae7de6991ceb8bbf</td><td>Indraprastha Institute of Information Technology</td><td></td></tr><tr><td>8fa3478aaf8e1f94e849d7ffbd12146946badaba</td><td>Indraprastha Institute of Information Technology (Delhi, India</td><td></td></tr><tr><td>869a2fbe42d3fdf40ed8b768edbf54137be7ac71</td><td>Indraprastha Institute of Information Technology, Delhi</td><td></td></tr><tr><td>787c1bb6d1f2341c5909a0d6d7314bced96f4681</td><td>Indraprastha Institute of Information Technology, Delhi</td><td></td></tr><tr><td>0f21a39fa4c0a19c4a5b4733579e393cb1d04f71</td><td>Informatics Institute</td><td></td></tr><tr><td>205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa</td><td>Informatics and Telematics Institute</td><td></td></tr><tr><td>d5afd7b76f1391321a1340a19ba63eec9e0f9833</td><td>Informatics and Telematics Institute</td><td></td></tr><tr><td>5b9d41e2985fa815c0f38a2563cca4311ce82954</td><td>Informatics and Telematics Institute, Centre for Research and Technology Hellas</td><td></td></tr><tr><td>a2bcfba155c990f64ffb44c0a1bb53f994b68a15</td><td>Informatics and Telematics Institute, Centre of Research and Technology - Hellas</td><td></td></tr><tr><td>ff7bc7a6d493e01ec8fa2b889bcaf6349101676e</td><td>Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland</td><td></td></tr><tr><td>fa90b825346a51562d42f6b59a343b98ea2e501a</td><td>Information Sciences Institute and Computer Science, University of Southern California</td><td></td></tr><tr><td>582edc19f2b1ab2ac6883426f147196c8306685a</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>c75e6ce54caf17b2780b4b53f8d29086b391e839</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>870433ba89d8cab1656e57ac78f1c26f4998edfb</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>0a34fe39e9938ae8c813a81ae6d2d3a325600e5c</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>1e6ed6ca8209340573a5e907a6e2e546a3bf2d28</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>12408baf69419409d228d96c6f88b6bcde303505</td><td>Information Sciences Institute, USC, CA, USA</td><td></td></tr><tr><td>3fac7c60136a67b320fc1c132fde45205cd2ac66</td><td>Information Systems Design, Doshisha University, Kyoto, Japan</td><td></td></tr><tr><td>892c911ca68f5b4bad59cde7eeb6c738ec6c4586</td><td>Information Systems, University of Wisconsin-River Falls, Wisconsin, WI, United States of America</td><td></td></tr><tr><td>182470fd0c18d0c5979dff75d089f1da176ceeeb</td><td>Information Technologies Institute</td><td></td></tr><tr><td>bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9</td><td>Information Technology University (ITU), Punjab, Lahore, Pakistan</td><td></td></tr><tr><td>e4bf70e818e507b54f7d94856fecc42cc9e0f73d</td><td>Information Technology, Madras Institute of Technology, TamilNadu, India, email</td><td></td></tr><tr><td>d5b5c63c5611d7b911bc1f7e161a0863a34d44ea</td><td>Information and Media Processing Research Laboratories, NEC Corporation</td><td></td></tr><tr><td>924b14a9e36d0523a267293c6d149bca83e73f3b</td><td>Information, Keio University</td><td></td></tr><tr><td>ac51d9ddbd462d023ec60818bac6cdae83b66992</td><td>Informatization Office, National University of Defense Technology, Changsha 410073, China</td><td></td></tr><tr><td>af278274e4bda66f38fd296cfa5c07804fbc26ee</td><td>Innopolis University, Kazan, Russia</td><td>Department of Computer Science</td></tr><tr><td>2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3</td><td>Inst. Neural Computation, University of California</td><td></td></tr><tr><td>a6e21438695dbc3a184d33b6cf5064ddf655a9ba</td><td>Institiude of Computer Science and Technology, Peking University</td><td></td></tr><tr><td>31afdb6fa95ded37e5871587df38976fdb8c0d67</td><td>Institute</td><td></td></tr><tr><td>1b4bc7447f500af2601c5233879afc057a5876d8</td><td>Institute</td><td></td></tr><tr><td>66330846a03dcc10f36b6db9adf3b4d32e7a3127</td><td>Institute AIFB, Karlsruhe Institute of Technology, Germany</td><td></td></tr><tr><td>8c9c8111e18f8798a612e7386e88536dfe26455e</td><td>Institute Polythechnic of Leiria, Portugal</td><td></td></tr><tr><td>ea890846912f16a0f3a860fce289596a7dac575f</td><td>Institute for Adaptive and Neural Computation, University of Edinburgh, Edinburgh, UK</td><td></td></tr><tr><td>24f1febcdf56cd74cb19d08010b6eb5e7c81c362</td><td>Institute for Advanced</td><td></td></tr><tr><td>4377b03bbee1f2cf99950019a8d4111f8de9c34a</td><td>Institute for Advanced Computer Studies</td><td></td></tr><tr><td>074af31bd9caa61fea3c4216731420bd7c08b96a</td><td>Institute for Advanced Computer Studies, University of Maryland, College Park, MD</td><td></td></tr><tr><td>507c9672e3673ed419075848b4b85899623ea4b0</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>9ed4ad41cbad645e7109e146ef6df73f774cd75d</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>a5ade88747fa5769c9c92ffde9b7196ff085a9eb</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>a5ade88747fa5769c9c92ffde9b7196ff085a9eb</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>10f66f6550d74b817a3fdcef7fdeba13ccdba51c</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>656ef752b363a24f84cc1aeba91e4fa3d5dd66ba</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>14b87359f6874ff9b8ee234b18b418e57e75b762</td><td>Institute for Anthropomatics</td><td></td></tr><tr><td>ae4390873485c9432899977499c3bf17886fa149</td><td>Institute for Arts, Science and Technology</td><td></td></tr><tr><td>de15af84b1257211a11889b6c2adf0a2bcf59b42</td><td>Institute for Communication Systems</td><td></td></tr><tr><td>f7a271acccf9ec66c9b114d36eec284fbb89c7ef</td><td>Institute for Complex</td><td></td></tr><tr><td>fc2bad3544c7c8dc7cd182f54888baf99ed75e53</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>80277fb3a8a981933533cf478245f262652a33b5</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>96a9ca7a8366ae0efe6b58a515d15b44776faf6e</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>de8381903c579a4fed609dff3e52a1dc51154951</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8</td><td>Institute for Computer Graphics and Vision</td><td></td></tr><tr><td>3137a3fedf23717c411483c7b4bd2ed646258401</td><td>Institute for Computer Graphics and Vision, Graz University of Technology</td><td></td></tr><tr><td>2dd2c7602d7f4a0b78494ac23ee1e28ff489be88</td><td>Institute for Computer Graphics and Vision, Graz University of Technology</td><td></td></tr><tr><td>b73795963dc623a634d218d29e4a5b74dfbc79f1</td><td>Institute for Creative Technologies</td><td></td></tr><tr><td>e35b09879a7df814b2be14d9102c4508e4db458b</td><td>Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States</td><td></td></tr><tr><td>910524c0d0fe062bf806bb545627bf2c9a236a03</td><td>Institute for Electronics, Signal Processing and Communications</td><td></td></tr><tr><td>561ae67de137e75e9642ab3512d3749b34484310</td><td>Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn</td><td></td></tr><tr><td>fe464b2b54154d231671750053861f5fd14454f5</td><td>Institute for Human-Machine</td><td></td></tr><tr><td>e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5</td><td>Institute for Human-Machine Communication</td><td></td></tr><tr><td>966e36f15b05ef8436afecf57a97b73d6dcada94</td><td>Institute for Human-Machine Communication, Technische Universit at M unchen</td><td></td></tr><tr><td>718824256b4461d62d192ab9399cfc477d3660b4</td><td>Institute for Human-Machine Communication, Technische Universit at M unchen, Germany</td><td></td></tr><tr><td>464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a</td><td>Institute for Infocomm Research</td><td></td></tr><tr><td>b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e</td><td>Institute for Infocomm Research (I2R), A*STAR, Singapore</td><td></td></tr><tr><td>3b557c4fd6775afc80c2cf7c8b16edde125b270e</td><td>Institute for Infocomm Research, A*STAR</td><td></td></tr><tr><td>3d948e4813a6856e5b8b54c20e50cc5050e66abe</td><td>Institute for Infocomm Research, A*STAR, Singapore</td><td></td></tr><tr><td>1e07500b00fcd0f65cf30a11f9023f74fe8ce65c</td><td>Institute for Infocomm Research, A*STAR, Singapore</td><td></td></tr><tr><td>0bf3513d18ec37efb1d2c7934a837dabafe9d091</td><td>Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore</td><td></td></tr><tr><td>481fb0a74528fa7706669a5cce6a212ac46eaea3</td><td>Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore</td><td></td></tr><tr><td>c7c03324833ba262eeaada0349afa1b5990c1ea7</td><td>Institute for Infocomm Research, Singapore</td><td>Visual Computing Department</td></tr><tr><td>1f9b2f70c24a567207752989c5bd4907442a9d0f</td><td>Institute for Infocomm Research, Singapore</td><td></td></tr><tr><td>6409b8879c7e61acf3ca17bcc62f49edca627d4c</td><td>Institute for Information Systems Engineering</td><td></td></tr><tr><td>b73d9e1af36aabb81353f29c40ecdcbdf731dbed</td><td>Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University</td><td></td></tr><tr><td>d0d7671c816ed7f37b16be86fa792a1b29ddd79b</td><td>Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China</td><td></td></tr><tr><td>fdfd57d4721174eba288e501c0c120ad076cdca8</td><td>Institute for Language, Cognition and Computation</td><td></td></tr><tr><td>fe464b2b54154d231671750053861f5fd14454f5</td><td>Institute for Media Technology</td><td></td></tr><tr><td>a0061dae94d916f60a5a5373088f665a1b54f673</td><td>Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA</td><td></td></tr><tr><td>9547a7bce2b85ef159b2d7c1b73dea82827a449f</td><td>Institute for Neural Computation</td><td></td></tr><tr><td>57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1</td><td>Institute for Neural Computation</td><td></td></tr><tr><td>9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e</td><td>Institute for Neural Computation, University of California, San Diego</td><td></td></tr><tr><td>1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61</td><td>Institute for Neural Computation, University of California, San Diego</td><td></td></tr><tr><td>3dabf7d853769cfc4986aec443cc8b6699136ed0</td><td>Institute for Neural Computation, University of California, San Diego, La Jolla, CA</td><td></td></tr><tr><td>50f0c495a214b8d57892d43110728e54e413d47d</td><td>Institute for Numerical Mathematics</td><td></td></tr><tr><td>54a9ed950458f4b7e348fa78a718657c8d3d0e05</td><td>Institute for Optical Systems, HTWG Konstanz, Germany</td><td></td></tr><tr><td>c78fdd080df01fff400a32fb4cc932621926021f</td><td>Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan</td><td></td></tr><tr><td>c78fdd080df01fff400a32fb4cc932621926021f</td><td>Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan</td><td></td></tr><tr><td>87e6cb090aecfc6f03a3b00650a5c5f475dfebe1</td><td>Institute for Robotics and Intelligent</td><td></td></tr><tr><td>1cad5d682393ffbb00fd26231532d36132582bb4</td><td>Institute for Robotics and Intelligent</td><td></td></tr><tr><td>6341274aca0c2977c3e1575378f4f2126aa9b050</td><td>Institute for Robotics and Intelligent Systems</td><td></td></tr><tr><td>582edc19f2b1ab2ac6883426f147196c8306685a</td><td>Institute for Robotics and Intelligent Systems, USC, CA, USA</td><td></td></tr><tr><td>c75e6ce54caf17b2780b4b53f8d29086b391e839</td><td>Institute for Robotics and Intelligent Systems, USC, CA, USA</td><td></td></tr><tr><td>870433ba89d8cab1656e57ac78f1c26f4998edfb</td><td>Institute for Robotics and Intelligent Systems, USC, CA, USA</td><td></td></tr><tr><td>0a34fe39e9938ae8c813a81ae6d2d3a325600e5c</td><td>Institute for Robotics and Intelligent Systems, USC, CA, USA</td><td></td></tr><tr><td>1e6ed6ca8209340573a5e907a6e2e546a3bf2d28</td><td>Institute for Robotics and Intelligent Systems, USC, CA, USA</td><td></td></tr><tr><td>d28d32af7ef9889ef9cb877345a90ea85e70f7f1</td><td>Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA</td><td></td></tr><tr><td>f963967e52a5fd97fa3ebd679fd098c3cb70340e</td><td>Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran</td><td></td></tr><tr><td>d1881993c446ea693bbf7f7d6e750798bf958900</td><td>Institute for System Programming</td><td></td></tr><tr><td>d1881993c446ea693bbf7f7d6e750798bf958900</td><td>Institute for System Programming</td><td></td></tr><tr><td>0ef96d97365899af797628e80f8d1020c4c7e431</td><td>Institute for Vision Systems Engineering</td><td></td></tr><tr><td>87bee0e68dfc86b714f0107860d600fffdaf7996</td><td>Institute for Vision and Graphics, University of Siegen, Germany</td><td></td></tr><tr><td>d350a9390f0818703f886138da27bf8967fe8f51</td><td>Institute for Vision and Graphics, University of Siegen, Germany</td><td></td></tr><tr><td>b4f4b0d39fd10baec34d3412d53515f1a4605222</td><td>Institute for studies in theoretical Physics and Mathematics(IPM</td><td></td></tr><tr><td>0515e43c92e4e52254a14660718a9e498bd61cf5</td><td>Institute of</td><td></td></tr><tr><td>5ea9cba00f74d2e113a10c484ebe4b5780493964</td><td>Institute of</td><td></td></tr><tr><td>bbcb4920b312da201bf4d2359383fb4ee3b17ed9</td><td>Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing</td><td></td></tr><tr><td>bbe949c06dc4872c7976950b655788555fe513b8</td><td>Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany</td><td></td></tr><tr><td>4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac</td><td>Institute of Arti cial Intelligence and Cognitive Engineering</td><td></td></tr><tr><td>d8896861126b7fd5d2ceb6fed8505a6dff83414f</td><td>Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen</td><td></td></tr><tr><td>1255afbf86423c171349e874b3ac297de19f00cd</td><td>Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen</td><td></td></tr><tr><td>2588acc7a730d864f84d4e1a050070ff873b03d5</td><td>Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an 710049, China</td><td></td></tr><tr><td>f02a6bccdaee14ab55ad94263539f4f33f1b15bb</td><td>Institute of Arti cial Intelligence and Robotics, Xi an Jiaotong University, Xi an, Shannxi 710049, China</td><td></td></tr><tr><td>fe464b2b54154d231671750053861f5fd14454f5</td><td>Institute of Automatic Control</td><td></td></tr><tr><td>d074b33afd95074d90360095b6ecd8bc4e5bb6a2</td><td>Institute of Automatic Control Engineering (LSR</td><td></td></tr><tr><td>6691dfa1a83a04fdc0177d8d70e3df79f606b10f</td><td>Institute of Automation</td><td></td></tr><tr><td>171d8a39b9e3d21231004f7008397d5056ff23af</td><td>Institute of Automation</td><td></td></tr><tr><td>122f51cee489ba4da5ab65064457fbe104713526</td><td>Institute of Automation</td><td></td></tr><tr><td>122f51cee489ba4da5ab65064457fbe104713526</td><td>Institute of Automation</td><td></td></tr><tr><td>122f51cee489ba4da5ab65064457fbe104713526</td><td>Institute of Automation</td><td></td></tr><tr><td>122f51cee489ba4da5ab65064457fbe104713526</td><td>Institute of Automation</td><td></td></tr><tr><td>d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e</td><td>Institute of Automation Chinese Academy of Sciences, Beijing, China</td><td></td></tr><tr><td>b3c398da38d529b907b0bac7ec586c81b851708f</td><td>Institute of Automation, Chinese Academy of</td><td></td></tr><tr><td>3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>d04d5692461d208dd5f079b98082eda887b62323</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>bc910ca355277359130da841a589a36446616262</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>ca54d0a128b96b150baef392bf7e498793a6371f</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>4e6c17966efae956133bf8f22edeffc24a0470c1</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>2654ef92491cebeef0997fd4b599ac903e48d07a</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>2a4153655ad1169d482e22c468d67f3bc2c49f12</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>3661a34f302883c759b9fa2ce03de0c7173d2bb2</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>5b89744d2ac9021f468b3ffd32edf9c00ed7fed7</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>63cff99eff0c38b633c8a3a2fec8269869f81850</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>46e72046a9bb2d4982d60bcf5c63dbc622717f0f</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>4622b82a8aff4ac1e87b01d2708a333380b5913b</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>82b43bc9213230af9db17322301cbdf81e2ce8cc</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>492f41e800c52614c5519f830e72561db205e86c</td><td>Institute of Automation, Chinese Academy of Sciences</td><td></td></tr><tr><td>6c80c834d426f0bc4acd6355b1946b71b50cbc0b</td><td>Institute of Automation, Chinese Academy of Sciences (CASIA</td><td></td></tr><tr><td>b11bb6bd63ee6f246d278dd4edccfbe470263803</td><td>Institute of Automation, Chinese Academy of Sciences (CASIA</td><td></td></tr><tr><td>2c19d3d35ef7062061b9e16d040cebd7e45f281d</td><td>Institute of Automation, Chinese Academy of Sciences (CASIA</td><td></td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>Institute of Automation, Chinese Academy of Sciences (CASIA</td><td></td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>Institute of Automation, Chinese Academy of Sciences (CASIA</td><td></td></tr><tr><td>231a6d2ee1cc76f7e0c5912a530912f766e0b459</td><td>Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C</td><td></td></tr><tr><td>2b10a07c35c453144f22e8c539bf9a23695e85fc</td><td>Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China</td><td></td></tr><tr><td>2af2b74c3462ccff3a6881ff7cf4f321b3242fa9</td><td>Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China</td><td></td></tr><tr><td>212608e00fc1e8912ff845ee7a4a67f88ba938fc</td><td>Institute of Automation, Chinese Academy of Sciences, Beijing, P. R. China</td><td></td></tr><tr><td>506c2fbfa9d16037d50d650547ad3366bb1e1cde</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>321c8ba38db118d8b02c0ba209be709e6792a2c7</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>c94b3a05f6f41d015d524169972ae8fd52871b67</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>19a9f658ea14701502d169dc086651b1d9b2a8ea</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>4c6233765b5f83333f6c675d3389bbbf503805e3</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>2f04ba0f74df046b0080ca78e56898bd4847898b</td><td>Institute of Automation, Chinese Academy of Sciences, China</td><td></td></tr><tr><td>199c2df5f2847f685796c2523221c6436f022464</td><td>Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School</td><td></td></tr><tr><td>4ea53e76246afae94758c1528002808374b75cfa</td><td>Institute of Biochemistry, University of Balochistan, Quetta</td><td></td></tr><tr><td>0b85b50b6ff03a7886c702ceabad9ab8c8748fdc</td><td>Institute of Child Health, University College London, UK</td><td></td></tr><tr><td>2c34bf897bad780e124d5539099405c28f3279ac</td><td>Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China</td><td></td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain</td><td></td></tr><tr><td>081286ede247c5789081502a700b378b6223f94b</td><td>Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social</td><td>Department of Experimental Psychology</td></tr><tr><td>182470fd0c18d0c5979dff75d089f1da176ceeeb</td><td>Institute of Communications Engineering</td><td></td></tr><tr><td>81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f</td><td>Institute of Computer Science</td><td></td></tr><tr><td>4015e8195db6edb0ef8520709ca9cb2c46f29be7</td><td>Institute of Computer Science</td><td></td></tr><tr><td>14b66748d7c8f3752dca23991254fca81b6ee86c</td><td>Institute of Computer Science III</td><td></td></tr><tr><td>8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8</td><td>Institute of Computer Science and</td><td></td></tr><tr><td>0517d08da7550241fb2afb283fc05d37fce5d7b7</td><td>Institute of Computer Science and Technology, Chongqing University of Posts and</td><td></td></tr><tr><td>06f585a3a05dd3371cd600a40dc35500e2f82f9b</td><td>Institute of Computer Science and Technology, Peking University</td><td></td></tr><tr><td>b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89</td><td>Institute of Computer Science and Technology, Peking University</td><td></td></tr><tr><td>488375ae857a424febed7c0347cc9590989f01f7</td><td>Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece</td><td></td></tr><tr><td>53ce84598052308b86ba79d873082853022aa7e9</td><td>Institute of Computer science, Shahid Bahonar University</td><td></td></tr><tr><td>9d24179aa33a94c8c61f314203bf9e906d6b64de</td><td>Institute of Computing</td><td></td></tr><tr><td>4b74f2d56cd0dda6f459319fec29559291c61bff</td><td>Institute of Computing</td><td></td></tr><tr><td>38a9ca2c49a77b540be52377784b9f734e0417e4</td><td>Institute of Computing</td><td></td></tr><tr><td>902114feaf33deac209225c210bbdecbd9ef33b1</td><td>Institute of Computing</td><td></td></tr><tr><td>badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e</td><td>Institute of Computing Technology</td><td></td></tr><tr><td>51a8dabe4dae157aeffa5e1790702d31368b9161</td><td>Institute of Computing Technology, CAS</td><td></td></tr><tr><td>2969f822b118637af29d8a3a0811ede2751897b5</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>449808b7aa9ee6b13ad1a21d9f058efaa400639a</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>303a7099c01530fa0beb197eb1305b574168b653</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>d2cd9a7f19600370bce3ea29aba97d949fe0ceb9</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>db36e682501582d1c7b903422993cf8d70bb0b42</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>a820941eaf03077d68536732a4d5f28d94b5864a</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>cd023d2d067365c83d8e27431e83e7e66082f718</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2</td><td>Institute of Computing Technology, CAS, Beijing 100190, China</td><td></td></tr><tr><td>68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>56359d2b4508cc267d185c1d6d310a1c4c2cc8c2</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>0595d18e8d8c9fb7689f636341d8a55cc15b3e6a</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>0568fc777081cbe6de95b653644fec7b766537b2</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>bd8b7599acf53e3053aa27cfd522764e28474e57</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>ab734bac3994b00bf97ce22b9abc881ee8c12918</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>11dc744736a30a189f88fa81be589be0b865c9fa</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>7c9622ad1d8971cd74cc9e838753911fe27ccac4</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>288964068cd87d97a98b8bc927d6e0d2349458a2</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>5d88702cdc879396b8b2cc674e233895de99666b</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>99facca6fc50cc30f13b7b6dd49ace24bc94f702</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>24cb375a998f4af278998f8dee1d33603057e525</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>120bcc9879d953de7b2ecfbcd301f72f3a96fb87</td><td>Institute of Computing Technology, CAS, Beijing, 100190, China</td><td></td></tr><tr><td>e0dc6f1b740479098c1d397a7bc0962991b5e294</td><td>Institute of Computing Technology, Chinese Academy of Sciences</td><td></td></tr><tr><td>2af2b74c3462ccff3a6881ff7cf4f321b3242fa9</td><td>Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China</td><td></td></tr><tr><td>3b7f6035a113b560760c5e8000540fc46f91fed5</td><td>Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China</td><td></td></tr><tr><td>ed388878151a3b841f95a62c42382e634d4ab82e</td><td>Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China</td><td></td></tr><tr><td>ac1d97a465b7cc56204af5f2df0d54f819eef8a6</td><td>Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander</td><td></td></tr><tr><td>74875368649f52f74bfc4355689b85a724c3db47</td><td>Institute of Data Science and Technology, Alibaba Group</td><td></td></tr><tr><td>250ebcd1a8da31f0071d07954eea4426bb80644c</td><td>Institute of Deep Learning</td><td></td></tr><tr><td>74875368649f52f74bfc4355689b85a724c3db47</td><td>Institute of Deep Learning, Baidu Research</td><td></td></tr><tr><td>8bf243817112ac0aa1348b40a065bb0b735cdb9c</td><td>Institute of Digital Media</td><td></td></tr><tr><td>32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b</td><td>Institute of Digital Media, Peking University, Beijing 100871, China</td><td></td></tr><tr><td>d2cd9a7f19600370bce3ea29aba97d949fe0ceb9</td><td>Institute of Digital Media, Peking University, Beijing 100871, China</td><td></td></tr><tr><td>449808b7aa9ee6b13ad1a21d9f058efaa400639a</td><td>Institute of Digital Media, Peking University, Beijing, 100871, China</td><td></td></tr><tr><td>1130c38e88108cf68b92ecc61a9fc5aeee8557c9</td><td>Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria</td><td></td></tr><tr><td>be07f2950771d318a78d2b64de340394f7d6b717</td><td>Institute of Electrical and Electronics Engineers</td><td></td></tr><tr><td>162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e</td><td>Institute of Electrical and Electronics Engineers (IEEE). DOI</td><td></td></tr><tr><td>daa02cf195818cbf651ef81941a233727f71591f</td><td>Institute of Electronics and Computer Science</td><td></td></tr><tr><td>511b06c26b0628175c66ab70dd4c1a4c0c19aee9</td><td>Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj</td><td></td></tr><tr><td>081286ede247c5789081502a700b378b6223f94b</td><td>Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland</td><td></td></tr><tr><td>03baf00a3d00887dd7c828c333d4a29f3aacd5f5</td><td>Institute of Graduate Studies and Research</td><td></td></tr><tr><td>561ae67de137e75e9642ab3512d3749b34484310</td><td>Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany</td><td></td></tr><tr><td>c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774</td><td>Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University</td><td></td></tr><tr><td>372a8bf0ef757c08551d41e40cb7a485527b6cd7</td><td>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong</td><td></td></tr><tr><td>159e792096756b1ec02ec7a980d5ef26b434ff78</td><td>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University</td><td></td></tr><tr><td>7fc76446d2b11fc0479df6e285723ceb4244d4ef</td><td>Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China</td><td></td></tr><tr><td>4d625677469be99e0a765a750f88cfb85c522cce</td><td>Institute of Industrial Science</td><td></td></tr><tr><td>4d625677469be99e0a765a750f88cfb85c522cce</td><td>Institute of Industrial Science</td><td></td></tr><tr><td>846c028643e60fefc86bae13bebd27341b87c4d1</td><td>Institute of Industrial Science, The University of Tokyo</td><td></td></tr><tr><td>1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6</td><td>Institute of Informatics - ISLA</td><td></td></tr><tr><td>72f4aaf7e2e3f215cd8762ce283988220f182a5b</td><td>Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY</td><td></td></tr><tr><td>5f57a1a3a1e5364792b35e8f5f259f92ad561c1f</td><td>Institute of Information Science</td><td></td></tr><tr><td>b5930275813a7e7a1510035a58dd7ba7612943bc</td><td>Institute of Information Science</td><td></td></tr><tr><td>b42a97fb47bcd6bfa72e130c08960a77ee96f9ab</td><td>Institute of Information Science</td><td></td></tr><tr><td>64782a2bc5da11b1b18ca20cecf7bdc26a538d68</td><td>Institute of Information Science</td><td></td></tr><tr><td>a660390654498dff2470667b64ea656668c98ecc</td><td>Institute of Information Science</td><td></td></tr><tr><td>e726174d516605f80ff359e71f68b6e8e6ec6d5d</td><td>Institute of Information Science</td><td></td></tr><tr><td>1c17450c4d616e1e1eece248c42eba4f87de9e0d</td><td>Institute of Information Science</td><td></td></tr><tr><td>266766818dbc5a4ca1161ae2bc14c9e269ddc490</td><td>Institute of Information Science and Technologies of CNR (CNR-ISTI)-Italy, 56124 Pisa, Italy</td><td></td></tr><tr><td>0951f42abbf649bb564a21d4ff5dddf9a5ea54d9</td><td>Institute of Information Science, Academia Sinica, Taipei</td><td></td></tr><tr><td>6ab33fa51467595f18a7a22f1d356323876f8262</td><td>Institute of Information Science, Academia Sinica, Taipei, Taiwan</td><td></td></tr><tr><td>5397c34a5e396658fa57e3ca0065a2878c3cced7</td><td>Institute of Information Science, Academia Sinica, Taipei, Taiwan</td><td></td></tr><tr><td>5b73b7b335f33cda2d0662a8e9520f357b65f3ac</td><td>Institute of Information Science, Academia Sinica, Taipei, Taiwan</td><td></td></tr><tr><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td><td>Institute of Information Science, Academia Sinica, Taipei, Taiwan</td><td></td></tr><tr><td>2303d07d839e8b20f33d6e2ec78d1353cac256cf</td><td>Institute of Information Science, Beijing Jiaotong University, Beijing 100044, China</td><td></td></tr><tr><td>739d400cb6fb730b894182b29171faaae79e3f01</td><td>Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China</td><td></td></tr><tr><td>8af411697e73f6cfe691fe502d4bfb42510b4835</td><td>Institute of Information Technology</td><td></td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>Institute of Interdisciplinary Studies in Identity Sciences (IISIS</td><td></td></tr><tr><td>4ab84f203b0e752be83f7f213d7495b04b1c4c79</td><td>Institute of Mathematics and Statistics</td><td></td></tr><tr><td>2be0ab87dc8f4005c37c523f712dd033c0685827</td><td>Institute of Media Innovation</td><td></td></tr><tr><td>0fdcfb4197136ced766d538b9f505729a15f0daf</td><td>Institute of Media and Information Technology, Chiba University</td><td></td></tr><tr><td>142e5b4492bc83b36191be4445ef0b8b770bf4b0</td><td>Institute of Mental Health, Peking University, P.R. China</td><td></td></tr><tr><td>614079f1a0d0938f9c30a1585f617fa278816d53</td><td>Institute of Mental Health, The University of Nottingham</td><td></td></tr><tr><td>bc866c2ced533252f29cf2111dd71a6d1724bd49</td><td>Institute of Microelectronics, Tsinghua University, Beijing 100084, China</td><td></td></tr><tr><td>54a9ed950458f4b7e348fa78a718657c8d3d0e05</td><td>Institute of Neural Information Processing, Ulm University, Germany</td><td></td></tr><tr><td>50c0de2cccf7084a81debad5fdb34a9139496da0</td><td>Institute of Neural Information Processing, Ulm University, Ulm, Germany</td><td></td></tr><tr><td>1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16</td><td>Institute of Neural Information Processing, Ulm University, Ulm, Germany</td><td></td></tr><tr><td>a35dd69d63bac6f3296e0f1d148708cfa4ba80f6</td><td>Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain</td><td></td></tr><tr><td>55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c</td><td>Institute of Psychology and Behavioral Sciences</td><td></td></tr><tr><td>0f395a49ff6cbc7e796656040dbf446a40e300aa</td><td>Institute of Psychology, Chinese</td><td></td></tr><tr><td>b3b4a7e29b9186e00d2948a1d706ee1605fe5811</td><td>Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland</td><td></td></tr><tr><td>98a660c15c821ea6d49a61c5061cd88e26c18c65</td><td>Institute of Road and</td><td>Department of Electronics and Communication Engineering</td></tr><tr><td>3d143cfab13ecd9c485f19d988242e7240660c86</td><td>Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan</td><td></td></tr><tr><td>c9367ed83156d4d682cefc59301b67f5460013e0</td><td>Institute of Software, Chinese Academy of Sciences</td><td></td></tr><tr><td>cf805d478aeb53520c0ab4fcdc9307d093c21e52</td><td>Institute of Software, Chinese Academy of Sciences (CAS</td><td></td></tr><tr><td>19e62a56b6772bbd37dfc6b8f948e260dbb474f5</td><td>Institute of Software, Chinese Academy of Sciences, Beijing 100190, China</td><td></td></tr><tr><td>d33b26794ea6d744bba7110d2d4365b752d7246f</td><td>Institute of Software, Chinese Academy of Sciences, Beijing 100190, China</td><td></td></tr><tr><td>feb6e267923868bff6e2108603d00fdfd65251ca</td><td>Institute of Systems Engineering, Southeast University, Nanjing, China</td><td></td></tr><tr><td>8c9c8111e18f8798a612e7386e88536dfe26455e</td><td>Institute of Systems and Robotics</td><td></td></tr><tr><td>8c9c8111e18f8798a612e7386e88536dfe26455e</td><td>Institute of Systems and Robotics</td><td></td></tr><tr><td>8c9c8111e18f8798a612e7386e88536dfe26455e</td><td>Institute of Systems and Robotics</td><td></td></tr><tr><td>11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5</td><td>Institute of Systems and Robotics - University of Coimbra, Portugal</td><td></td></tr><tr><td>3802c97f925cb03bac91d9db13d8b777dfd29dcc</td><td>Institute of Systems and Robotics, University of Coimbra, Portugal</td><td></td></tr><tr><td>81706277ed180a92d2eeb94ac0560f7dc591ee13</td><td>Institute of Technology, Banaras Hindu</td><td></td></tr><tr><td>81706277ed180a92d2eeb94ac0560f7dc591ee13</td><td>Institute of Technology, Banaras Hindu</td><td></td></tr><tr><td>d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f</td><td>Institute of Telecommunications, TU Wien</td><td></td></tr><tr><td>fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e</td><td>Institute of Transportation Systems, German Aerospace Center (DLR), Braunschweig</td><td>Department of Human Factors</td></tr><tr><td>29e793271370c1f9f5ac03d7b1e70d1efa10577c</td><td>Institute of control science and engineering</td><td></td></tr><tr><td>2afdda6fb85732d830cea242c1ff84497cd5f3cb</td><td>Institute ofInformation Science, Academia Sinica, Taipei, Taiwan</td><td></td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>Institute, CAS, China</td><td></td></tr><tr><td>d93baa5ecf3e1196b34494a79df0a1933fd2b4ec</td><td>Institute, CAS, China</td><td></td></tr><tr><td>c91103e6612fa7e664ccbc3ed1b0b5deac865b02</td><td>Integrated Research Center, Universit`a Campus Bio-Medico di Roma</td><td></td></tr><tr><td>0cbc4dcf2aa76191bbf641358d6cecf38f644325</td><td>Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA</td><td></td></tr><tr><td>7c119e6bdada2882baca232da76c35ae9b5277f8</td><td>Intelligence Computing Research Center</td><td></td></tr><tr><td>3b2d5585af59480531616fe970cb265bbdf63f5b</td><td>Intelligence, Concordia University, Montreal</td><td></td></tr><tr><td>c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f</td><td>Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching</td><td></td></tr><tr><td>c87f7ee391d6000aef2eadb49f03fc237f4d1170</td><td>Intelligent Behaviour Understanding Group, Imperial College London, London, UK</td><td>Department of Computing</td></tr><tr><td>3fac7c60136a67b320fc1c132fde45205cd2ac66</td><td>Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan</td><td></td></tr><tr><td>bd8f3fef958ebed5576792078f84c43999b1b207</td><td>Intelligent Recognition and Image Processing Lab, Beihang University, Beijing</td><td></td></tr><tr><td>ea46951b070f37ad95ea4ed08c7c2a71be2daedc</td><td>Intelligent Sensory Interactive Systems, University of Amsterdam, Netherlands</td><td></td></tr><tr><td>4c8ef4f98c6c8d340b011cfa0bb65a9377107970</td><td>Intelligent Systems Group, University of Groningen, The Netherlands</td><td></td></tr><tr><td>beb4546ae95f79235c5f3c0e9cc301b5d6fc9374</td><td>Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht</td><td></td></tr><tr><td>937ffb1c303e0595317873eda5ce85b1a17f9943</td><td>Intelligent Systems Lab Amsterdam, University of Amsterdam</td><td></td></tr><tr><td>999289b0ef76c4c6daa16a4f42df056bf3d68377</td><td>Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands</td><td></td></tr><tr><td>faeefc5da67421ecd71d400f1505cfacb990119c</td><td>Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden</td><td></td></tr><tr><td>54948ee407b5d32da4b2eee377cc44f20c3a7e0c</td><td>Intelligent Systems Laboratory, University of Bristol, Bristol BS8 1UB, UK</td><td></td></tr><tr><td>465d5bb11912005f0a4f0569c6524981df18a7de</td><td>Intelligent User Interfaces Lab, Ko c University, Turkey</td><td></td></tr><tr><td>858b51a8a8aa082732e9c7fbbd1ea9df9c76b013</td><td>Intelligent and Interactive Systems, Institute of Computer Science, University of</td><td></td></tr><tr><td>2f2aa67c5d6dbfaf218c104184a8c807e8b29286</td><td>Interactive and Digital Media Institute</td><td></td></tr><tr><td>ee7093e91466b81d13f4d6933bcee48e4ee63a16</td><td>Interactive and Digital Media Institute, National University of Singapore, SG</td><td></td></tr><tr><td>38c901a58244be9a2644d486f9a1284dc0edbf8a</td><td>Interactive and Digital Media Institute, National University of Singapore, Singapore</td><td></td></tr><tr><td>c0ee89dc2dad76147780f96294de9e421348c1f4</td><td>Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea</td><td></td></tr><tr><td>b4362cd87ad219790800127ddd366cc465606a78</td><td>Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea</td><td></td></tr><tr><td>d02b32b012ffba2baeb80dca78e7857aaeececb0</td><td>International Institute of Information Technology</td><td></td></tr><tr><td>f5eb411217f729ad7ae84bfd4aeb3dedb850206a</td><td>International Institute of Information Technology</td><td></td></tr><tr><td>185263189a30986e31566394680d6d16b0089772</td><td>International Institute of Information Technology</td><td></td></tr><tr><td>243e9d490fe98d139003bb8dc95683b366866c57</td><td>International Institute of Information Technology</td><td></td></tr><tr><td>156cd2a0e2c378e4c3649a1d046cd080d3338bca</td><td>International Institute of Information Technology</td><td></td></tr><tr><td>0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1</td><td>International Institute of Information Technology, Hyderabad, India</td><td></td></tr><tr><td>96e1ccfe96566e3c96d7b86e134fa698c01f2289</td><td>International Institute of Information Technology, Hyderabad, India</td><td></td></tr><tr><td>0c3f7272a68c8e0aa6b92d132d1bf8541c062141</td><td>International Islamic University, Islamabad 44000, Pakistan</td><td>Department of Computer Science and Software Engineering</td></tr><tr><td>fde0180735699ea31f6c001c71eae507848b190f</td><td>International University of</td><td></td></tr><tr><td>fde0180735699ea31f6c001c71eae507848b190f</td><td>International University of</td><td></td></tr><tr><td>fae83b145e5eeda8327de9f19df286edfaf5e60c</td><td>Ionian University</td><td></td></tr><tr><td>966e36f15b05ef8436afecf57a97b73d6dcada94</td><td>Iran</td><td>Computer Engineering Department University of Isfahan</td></tr><tr><td>6fda12c43b53c679629473806c2510d84358478f</td><td>Islamic Azad University</td><td>Department of Computer Science</td></tr><tr><td>ad8540379884ec03327076b562b63bc47e64a2c7</td><td>Islamic Azad University</td><td></td></tr><tr><td>7cffcb4f24343a924a8317d560202ba9ed26cd0b</td><td>Islamic Azad University</td><td></td></tr><tr><td>841bf196ee0086c805bd5d1d0bddfadc87e424ec</td><td>Islamic Azad University</td><td></td></tr><tr><td>39dc2ce4cce737e78010642048b6ed1b71e8ac2f</td><td>Islamic Azad University of AHAR</td><td></td></tr><tr><td>19f076998ba757602c8fec04ce6a4ca674de0e25</td><td>Islamic Azad University, Gonabad, Iran</td><td>Department of Control and Electrical Engineering</td></tr><tr><td>11a210835b87ccb4989e9ba31e7559bb7a9fd292</td><td>Islamic Azad University, Mashhad Branch, Mashhad, Iran</td><td>a Department of Artificial Intelligence</td></tr><tr><td>ceb763d6657a07b47e48e8a2956bcfdf2cf10818</td><td>Islamic Azad University, Qazvin, Iran</td><td></td></tr><tr><td>53ce84598052308b86ba79d873082853022aa7e9</td><td>Islamic Azad University, Science and Research Campus</td><td>Department of Computer Engineering Hamedan Branch</td></tr><tr><td>ad247138e751cefa3bb891c2fe69805da9c293d7</td><td>Islamic Azad University, Shahrood, Iran</td><td>Department of Electrical and Computer Engineering</td></tr><tr><td>d5fa9d98c8da54a57abf353767a927d662b7f026</td><td>Islamic University of Gaza - Palestine</td><td></td></tr><tr><td>0ce8a45a77e797e9d52604c29f4c1e227f604080</td><td>IslamicAzad University, Qazvin, Iran</td><td></td></tr><tr><td>8bed7ff2f75d956652320270eaf331e1f73efb35</td><td>Istanbul Bilgi University - DCE</td><td></td></tr><tr><td>fd53be2e0a9f33080a9db4b5a5e416e24ae8e198</td><td>Istanbul Technical University</td><td></td></tr><tr><td>26f03693c50eb50a42c9117f107af488865f3dc1</td><td>Istanbul Technical University</td><td></td></tr><tr><td>09733129161ca7d65cf56a7ad63c17f493386027</td><td>Istanbul Technical University</td><td></td></tr><tr><td>14b87359f6874ff9b8ee234b18b418e57e75b762</td><td>Istanbul Technical University</td><td></td></tr><tr><td>72f4aaf7e2e3f215cd8762ce283988220f182a5b</td><td>Istanbul Technical University, Istanbul, 34469, TURKEY</td><td>Department of Computer Engineering</td></tr><tr><td>2050847bc7a1a0453891f03aeeb4643e360fde7d</td><td>Istanbul Technical University, Istanbul, Turkey</td><td></td></tr><tr><td>d3d5d86afec84c0713ec868cf5ed41661fc96edc</td><td>Istanbul Technical University, Istanbul, Turkey</td><td></td></tr><tr><td>3d9db1cacf9c3bb7af57b8112787b59f45927355</td><td>Istanbul Technical University, Turkey</td><td></td></tr><tr><td>a5ade88747fa5769c9c92ffde9b7196ff085a9eb</td><td>Istanbul Technical University, Turkey</td><td></td></tr><tr><td>9dcc6dde8d9f132577290d92a1e76b5decc6d755</td><td>Istanbul University</td><td>Department of Electrical and Electronics Eng</td></tr><tr><td>070ab604c3ced2c23cce2259043446c5ee342fd6</td><td>IstanbulTechnicalUniversity</td><td></td></tr><tr><td>097340d3ac939ce181c829afb6b6faff946cdce0</td><td>Italian Institute of Technology, 5Mapillary Research</td><td></td></tr><tr><td>18a9f3d855bd7728ed4f988675fa9405b5478845</td><td>J. P. College of Engineering, India</td><td>Department of Electronics and Communication Engineering</td></tr><tr><td>f28b7d62208fdaaa658716403106a2b0b527e763</td><td>JACOB GOLDBERGER, Bar-Ilan University</td><td></td></tr><tr><td>ad784332cc37720f03df1c576e442c9c828a587a</td><td>JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China</td><td></td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>Jacobs University</td><td></td></tr><tr><td>6f0900a7fe8a774a1977c5f0a500b2898bcbe149</td><td>Jadavpur University</td><td>Department of Computer Science and Engineering</td></tr><tr><td>3f4bfa4e3655ef392eb5ad609d31c05f29826b45</td><td>Jadavpur University</td><td></td></tr><tr><td>aaeb8b634bb96a372b972f63ec1dc4db62e7b62a</td><td>Jadavpur University, India</td><td>Department of Printing Engineering</td></tr><tr><td>aaeb8b634bb96a372b972f63ec1dc4db62e7b62a</td><td>Jadavpur University, India</td><td>Department of Computer Science and Engineering</td></tr><tr><td>4d01d78544ae0de3075304ff0efa51a077c903b7</td><td>Jahangirnagar University</td><td></td></tr><tr><td>8f8a5be9dc16d73664285a29993af7dc6a598c83</td><td>Jahangirnagar University, Savar, Dhaka 1342, Bangladesh</td><td>Department of Computer Science and Engineering</td></tr><tr><td>58db008b204d0c3c6744f280e8367b4057173259</td><td>Jaipur, Rajasthan, India</td><td>aDepartment of Computer Engineering Malaviya National Institute of Technology</td></tr><tr><td>13f6ab2f245b4a871720b95045c41a4204626814</td><td>Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United</td><td></td></tr><tr><td>c0723e0e154a33faa6ff959d084aebf07770ffaf</td><td>Japan</td><td>Department of Life System Science and Technology Chukyo University</td></tr><tr><td>9ed943f143d2deaac2efc9cf414b3092ed482610</td><td>Japan Advanced Institute of Science and Technology</td><td></td></tr><tr><td>26c884829897b3035702800937d4d15fef7010e4</td><td>Japan Advanced Institute of Science and Technology</td><td></td></tr><tr><td>982f5c625d6ad0dac25d7acbce4dabfb35dd7f23</td><td>Japan Advanced Institute of Science and Technology</td><td></td></tr><tr><td>76d939f73a327bf1087d91daa6a7824681d76ea1</td><td>Japan Advanced Institute of Science and Technology</td><td></td></tr><tr><td>c180f22a9af4a2f47a917fd8f15121412f2d0901</td><td>Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan</td><td></td></tr><tr><td>5865e824e3d8560e07840dd5f75cfe9bf68f9d96</td><td>Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara</td><td></td></tr><tr><td>f19777e37321f79e34462fc4c416bd56772031bf</td><td>Jawaharlal Technological University, Anantapur</td><td></td></tr><tr><td>0229829e9a1eed5769a2b5eccddcaa7cd9460b92</td><td>Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA</td><td></td></tr><tr><td>493ec9e567c5587c4cbeb5f08ca47408ca2d6571</td><td>Jiangnan University, Wuxi</td><td></td></tr><tr><td>aac39ca161dfc52aade063901f02f56d01a1693c</td><td>Jilin University, Changchun 130012, China</td><td></td></tr><tr><td>f5fae7810a33ed67852ad6a3e0144cb278b24b41</td><td>Jo ef Stefan Institute, Jamova 39, 1000 Ljubljana, Slovenia</td><td></td></tr><tr><td>8320dbdd3e4712cca813451cd94a909527652d63</td><td>Johannes Kepler University(cid:1) Institute of Systems Science(cid:1) A(cid:2)
diff --git a/scraper/reports/institutions_found.csv b/scraper/reports/institutions_found.csv
new file mode 100644
index 00000000..d1fce0a4
--- /dev/null
+++ b/scraper/reports/institutions_found.csv
@@ -0,0 +1,1042 @@
+AALTO UNIVERSITY,60.18558755,24.824273298775,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi"
+AI Institute,-34.6102167,-58.3752244291708,"INDEC, 609, Avenida Presidente Julio A. Roca, Microcentro, Comuna 1, Monserrat, CABA, C1067ABB, Argentina"
+ALICE Institute,-8.82143045,13.2347076178375,"Instituto Superior de Ciências da Educação (ISCED), Rua Salvador Allende (Salvador Guillermo Allende Gossens), Maculusso, Maianga, Município de Luanda, Luanda, 927, Angola"
+ARISTOTLE UNIVERSITY OF THESSALONIKI,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aalborg University, Denmark",57.01590275,9.97532826658991,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark"
+"Aberystwyth University, UK",52.4107358,-4.05295500914411,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK"
+Akita Prefectural University,39.8011499,140.045911602376,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本"
+"Akita Prefectural University, Yurihonjo, Japan",39.39325745,140.073500465928,"秋田県立大学, 日本海東北自動車道(無料区間), 八幡前, 由利本荘市, 秋田県, 東北地方, 〒015-0836, 日本"
+Akita University,39.7278142,140.133225661449,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本"
+"Akita University, Akita, Japan",39.7291921,140.136565773585,"秋田大学鉱業博物館, 2, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-8502, 日本"
+"Alexandria University, Alexandria, Egypt",31.21051105,29.9131456239399,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر"
+"American University, Washington, DC, USA",38.93804505,-77.0893922365193,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA"
+Amherst College,42.37289,-72.518814,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA"
+Amirkabir University of Technology,35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"Amity University, Lucknow, India",26.85095965,81.0495096452828,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India"
+Anna University,13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Anna University Chennai, India",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Anna University, Chennai",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+Aristotle University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Aristotle University of Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Arizona State University,33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia",3.0552109,101.7005831,"Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia"
+Australian Institute of Sport,-35.24737535,149.104454269689,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"Australian National University, Canberra",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Australian National University, Canberra, ACT 0200, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Azad University, Qazvin, Iran",36.3173432,50.0367286,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎"
+B.S. University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+Bahcesehir University,41.02451875,28.9769795349346,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye"
+Banaras Hindu University,25.2662887,82.9927969,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India",14.4443949,75.9027655185535,"Bapuji Institute of Engineering and Technology, 2nd Cross Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Bas kent University,52.08340265,5.14828494152362,"University College Utrecht 'Babel', 7, Campusplein, Utrecht, Nederland, 3584 ED, Nederland"
+Beckman Institute,40.11571585,-88.2275077179639,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA"
+Beihang University,39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Beihang University, Beijing 100191, China",39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Beijing Institute of Technology University, P. R. China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, Beijing 100081 CHINA",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, Beijing, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Beijing Institute of Technology, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"Beijing Normal University, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+"Beijing Union University, 100101, China",39.9890068,116.420677175386,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国"
+Beijing University of Posts and Telecommunications,39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Beijing University of Technology, Beijing 100022, China",39.87391435,116.477222846574,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国"
+"Beijing, China",39.906217,116.3912757,"北京市, 东城区, 北京市, 100010, 中国"
+"Benha University, Egypt",30.0818727,31.2445484105016,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر"
+"Bharathidasan University, Trichy, India",10.7778845,78.6966319,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India"
+Bielefeld University,52.0280421,8.51148270115395,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland"
+"Bilkent University, 06800 Cankaya, Turkey",39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+"Bogazici University, Turkey",41.08327335,29.0503931951846,"Boğaziçi Üniversitesi Güney Yerleşkesi, Sehitlikdergahı Sokağı, Beşiktaş, İstanbul, Marmara Bölgesi, 33345, Türkiye"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Boston University,42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Boston University, Boston, MA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Boston University, USA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+Bournemouth University,50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+"Bournemouth University, UK",50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+Brown University,41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Brown University, Providence, RI",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Brown University, United States",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+CARNEGIE MELLON UNIVERSITY,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+COLUMBIA UNIVERSITY,40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+"COMSATS Institute of Information Technology, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+CUNY City College,45.5546608,5.4065255,"Cuny, La Tour-du-Pin, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38110, France"
+California Institute of Technology,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, CA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, CA, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, Pasadena, California, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"California Institute of Technology, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+Cambridge Research Laboratory,52.17333465,0.149899463173698,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"Capital Normal University, 100048, China",39.92864575,116.30104052087,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国"
+Cardi University,10.6435074,-61.4022996445292,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Carleton University,45.3860843,-75.6953926739404,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"Carnegie Mellon University Pittsburgh, PA - 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Carnegie Mellon University, USA",37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"Central Tehran Branch, Azad University",35.753318,51.370631,"دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎"
+Central Washington University,47.00646895,-120.53673039883,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA"
+Charles Sturt University,-35.0636071,147.3552234,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia"
+"Chonbuk National University, Jeonju-si",35.84658875,127.135013303058,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국"
+Chosun University,35.1441031,126.9257858,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국"
+"Chu Hai College of Higher Education, Hong Kong",22.3760643,113.987153890134,"珠海學院 Chu Hai College of Higher Education, 80, 青盈路 Tsing Ying Road, 嘉和里 Ka Wo Lei, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国"
+Chubu University,35.2742655,137.013278412463,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Chungnam National University,36.37029045,127.347804575184,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국"
+City University of Hong Kong,22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+Clemson University,34.66869155,-82.837434756078,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA"
+"Clemson University, Clemson, SC",34.67871075,-82.8346790794026,"E-06 Parking, Parkway Drive, Pickens County, South Carolina, SC, USA"
+Coburg University,50.26506145,10.9519648264628,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+"College Park, MD",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, MD 20742 USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, Maryland",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"College Park, United States",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+College of Computing,-6.1992922,39.3081862,"computing, Tunguu, Unguja Kusini, Zanzibar, 146, Tanzania"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+"College of Engineering Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+College of Engineering and Computer Science,25.7589624,-80.3738881489383,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA"
+"College of Engineering, Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+College of Informatics,14.6173885,121.101327315511,"Informatics, F.P. Felix Avenue, Dela Paz, San Isidro, Cainta, Rizal, Metro Manila, 1900, Philippines"
+Colorado State University,40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+"Colorado State University, Fort Collins, Colorado, USA",40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York, NY",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, New York, NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Columbia University, United States",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Concordia University,45.57022705,-122.637093463826,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA"
+"Concordia University, Canada",45.4955911,-73.5775043,"FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+Courant Institute,40.7286994,-73.9957151,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+Courant Institute of Mathematical Sciences,40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Courant Institute of Mathematical Sciences, New York, NY",40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Curtin University, Perth, Australia",-32.00574155,115.892864389257,"Curtin University, B201 L2 Entry South, Waterford, Perth, Western Australia, 6102, Australia"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Cyprus University of Technology, Cyprus",34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+DUBLIN CITY UNIVERSITY,53.38522185,-6.25740874081493,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dalian University of Technology, Dalian, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea",37.3219575,127.1250723,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+"Delft University of Technology, The Netherlands",51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+Democritus University of Thrace,40.84941785,25.8344493892098,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα"
+Dhaka University,23.7317915,90.3805625,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+Dr. Babasaheb Ambedkar Marathwada University,19.8960918,75.3089470267316,"Boys Hostel No. 3, Shantipura road, Cantonment, Bidri workshop, Aurangabad, Maharashtra, 431004, India"
+Drexel University,39.9574,-75.1902670552555,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+Duke University,35.9990522,-78.9290629011139,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA"
+East China Normal University,31.2284923,121.402113889769,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国"
+Eastern Mediterranean University,35.14479945,33.90492318497,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs"
+Eastern University,40.0505672,-75.3710932636663,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Electrical Engineering, University of",47.6532412,-122.3061707,"Electrical Engineering, 185, Loading Dock, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+FL,27.7567667,-81.4639835,"Florida, USA"
+"Feng Chia University, Taichung, Taiwan",24.18005755,120.648360719503,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣"
+"Ferdowsi University of Mashhad, Mashhad, Iran",36.3076616,59.5269051097667,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎"
+Firat University,39.7275037,39.4712703382844,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye"
+"Florida Institute Of Technology, Melbourne Fl",28.0642296,-80.6230097241205,"Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"Florida State University, Tallahassee, FL 32306, USA",30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+Fraser University,44.9689836,-93.2094162948556,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA"
+Fudan University,31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+GE Global Research Center,42.8298248,-73.8771938492793,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA"
+Gdansk University of Technology,54.37086525,18.6171601574695,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"George Mason University, Fairfax Virginia, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"George Mason University, Fairfax, VA, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+Georgia Institute of Technology,33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+"Georgia Institute of Technology, Atlanta, Georgia, USA",33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+Glyndwr University,53.05373795,-3.00482075353073,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK"
+Graz University of Technology,47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+"Graz University of Technology, Austria",47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+Guangdong Medical College,23.1294489,113.343761097683,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国"
+Hacettepe University,39.86742125,32.7351907206768,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Halmstad University,56.66340325,12.8792972689712,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+Harbin Institute of Technology,45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, Harbin 150001, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Harbin Institute of Technology, Harbin, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+Harbin Institute of Technology;Shenzhen University,22.5895016,113.965710495775,"哈工大(深圳), 平山一路, 深圳大学城, 珠光村, 南山区, 深圳市, 广东省, 518000, 中国"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+Harvard and Massachusetts Institute,42.5268445,-71.6525446,"Massachusetts Correctional Institute Shirley Minimum Security Library, Harvard Road, Shaker Village, Shirley, Middlesex County, Massachusetts, 01464, USA"
+"Hengyang Normal University, Hengyang, China",26.8661136,112.620921219792,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国"
+Heriot-Watt University,55.91029135,-3.32345776559167,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK"
+"Hiroshima University, Japan",34.4019766,132.7123195,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本"
+HoHai University,32.05765485,118.755000398628,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国"
+Hong Kong Baptist University,22.3874201,114.2082222,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国"
+Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"Hong Kong Polytechnic University, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"Hong Kong University of Science and Technology, Hong Kong",22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"Howard University, Washington DC",38.921525,-77.019535656678,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA"
+Huazhong University of,22.53367445,113.917874206261,"深圳市第六人民医院, 89号, 桃园路, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518000, 中国"
+Huazhong University of Science and Technology,30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Huazhong University of Science and Technology, Wuhan, China",30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Humboldt-University, Berlin, Germany",52.51875685,13.3935604936378,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"IBM Almaden Research Center, San Jose CA",37.21095605,-121.807486683178,"IBM Almaden Research Center, San José, Santa Clara County, California, USA"
+IBM Thomas J. Watson Research Center,41.21002475,-73.8040705573196,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA"
+IDIAP RESEARCH INSTITUTE,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+IDIAP Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+IMPERIAL COLLEGE,39.9458551,116.406973072869,"国子监, 五道营胡同, Naga上院, 北京市, 东城区, 北京市, 100010, 中国"
+Idiap Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Idiap Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+"Illinois Institute of Technology, Chicago, Illinois, USA",41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+Imperial College London,51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, U.K",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Imperial College, London, UK",51.5004171,-0.1782711,"Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK"
+India,22.3511148,78.6677428,India
+Indian Institute of Science,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Indian Institute of Science Bangalore,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Indian Institute of Technology,28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+"Indian Institute of Technology Delhi, New Delhi, India",28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+Indian Institute of Technology Kanpur,26.513188,80.2365194538339,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India"
+"Indian Institute of Technology, Roorkee",29.8662461,77.8958708109136,"Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Indiana University Bloomington,39.17720475,-86.5154003022128,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA"
+Information Technologies Institute,33.5934539,130.3557837,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+"Information, Keio University",35.5416969,139.6347184,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本"
+Institute,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute for Advanced,38.7468877,139.824707282407,"Institute for Advanced Biosciences, 鶴岡市, 山形県, 東北地方, 日本"
+Institute for Communication Systems,51.2433692,-0.593220895014599,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK"
+Institute for System Programming,55.7449881,37.6645042069876,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute of Automation,54.1720834,12.0790983,"Institut für Automatisierungstechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+Institute of Communications Engineering,54.1718573,12.0784417,"Institut für Nachrichtentechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+Institute of Computer Science,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+Institute of Computing,43.47878995,-80.5548480959375,"Institute for Quantum Computing, Wes Graham Way, Lakeshore Village, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 6R2, Canada"
+Institute of Computing Technology,34.6988529,135.1936779,"神戸情報大学院大学, フラワーロード, 中央区, 神戸市, 兵庫県, 近畿地方, 650-0001, 日本"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+Institute of Industrial Science,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+Institute of Information Technology,23.7289899,90.3982682,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+Institute of Media Innovation,1.3433937,103.6793303,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore"
+Institute of Road and,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+International Institute of Information Technology,17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+"International Institute of Information Technology, Hyderabad, India",17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+International University of,11.5744201,104.8775841,"International University, ផ្លូវ ១៩៨៤, ភូមិភ្នំពេញថ្មី, ខណ្ឌសែនសុខ, រាជធានីភ្នំពេញ, 12101, ព្រះរាជាណាចក្រ​កម្ពុជា"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+Iran,32.9407495,52.9471344,‏ایران‎
+Islamic Azad University,34.8452999,48.5596212013643,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Istanbul University,41.0132424,28.9637609,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye"
+Jacobs University,53.4129148,-2.96897915394896,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK"
+Jadavpur University,22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+"Jadavpur University, India",22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+Jahangirnagar University,23.883312,90.2693921,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+"Jaipur, Rajasthan, India",26.916194,75.820349,"Jaipur, Rajasthan, 302001, India"
+Japan,36.5748441,139.2394179,日本
+Japan Advanced Institute of Science and Technology,36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan",36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+"Jiangnan University, Wuxi",31.4854255,120.2739581,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国"
+"Joint Research Institute, Foshan, China",22.83388935,113.285418245471,"广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国"
+"K.N. Toosi University of Technology, Tehran, Iran",35.76427925,51.409702762313,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎"
+"KTH Royal Institute of Technology, Stockholm",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+Karlsruhe Institute of,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Karlsruhe Institute of Technology, Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"Keio University, Yokohama 223-8522, Japan",35.55536215,139.654582444136,"慶應義塾大学 (矢上キャンパス), 理工坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-8522, 日本"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"Kent State University, Kent, Ohio, USA",41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+"King Saud University, Riyadh",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"King Saud University, Riyadh 11543, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Kingston University,51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+"Kingston University, UK",51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+"Kogakuin University, Tokyo, Japan",35.6902784,139.695400958171,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本"
+Korea Advanced Institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+Korea Advanced institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+"Kurukshetra University, Kurukshetra",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+"Kurukshetra University, Kurukshetra, India",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+"Kyoto University, Kyoto, Japan",35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Kyung Hee University,32.8536333,-117.2035286,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA"
+"Kyung Hee University, South Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+Liverpool John Moores University,53.4050747,-2.97030028586709,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK"
+Lomonosov Moscow State University,55.70229715,37.5317977694291,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India",13.0309553,77.5648559396817,"M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+METs Institute of Engineering,28.2140454,83.9607104993073,"Dihiko Paton, Pokhara Lekhnath Metropolitan Ward No. 6, Pokhara, Pokhara Lekhnath Metropolitan, कास्की, गण्डकी अञ्चल, पश्चिमाञ्चल विकास क्षेत्र, नेपाल"
+MULTIMEDIA UNIVERSITY,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+Macau University of Science and,22.3358031,114.265903983304,"HKUST, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+Macau University of Science and Technology,22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"Manchester University, UK",53.47020165,-2.23932183309859,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK"
+"Manonmaniam Sundaranar University, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Manonmaniam Sundaranar University, Tirunelveli",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Manonmaniam Sundaranar University, Tirunelveli, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+Marquette University,43.03889625,-87.9315544990507,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Massachusetts Institute of Technology (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Math Institute,43.65879595,-79.3975504060101,"Fields Institute for Research in Math Science, 222, College Street, Kensington Market, Old Toronto, Toronto, Ontario, M5T 3A1, Canada"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+Max Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"Max Planck Institute for Informatics, Germany",49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+Max-Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+McGill University,45.5039761,-73.5749687,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada"
+"McGill University, Montreal, Canada",45.50691775,-73.5791162596496,"McGill University, Avenue Docteur Penfield, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 2T8, Canada"
+McGovern Institute,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+McGovern Institute for Brain Research,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+McMaster University,43.26336945,-79.9180968401692,"McMaster University, Westdale, Hamilton, Ontario, Canada"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, E. Lansing, MI 48823, USA",42.7337998,-84.4804243,"Dero Fixit Bike Station, Grand River Avenue, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University London, London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Middlesex University, London",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+Monash University,-37.78397455,144.958674326093,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Muhlenberg College,40.5967637,-75.5124062,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA"
+Multimedia University,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+"Multimedia University, Cyberjaya, Malaysia",2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"Nagoya University, Japan",43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"Nanjing University of Aeronautics and Astronautics, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Nanjing University of Science and Technology,32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Nanjing University, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210023, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Nanjing University, Nanjing 210093, P.R.China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+National Cheng Kung University,22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+"National Cheng Kung University, Tainan, Taiwan",22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+"National Chiao Tung University, Taiwan",24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+National Chiao-Tung University,24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+National Institute of Standards and Technology,39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA",39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+National Institute of Technology Rourkela,22.2501589,84.9066855698087,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India"
+"National Institute of Technology, Durgapur, West Bengal, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+National Institutes of Health,39.00041165,-77.1032777503325,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+National Taiwan University,25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan",25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"National Taiwan University, Taipei, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+"National Taiwan University, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+National Technical University of Athens,37.98782705,23.7317973260904,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα"
+National University,14.6042947,120.994285201104,"National University, M.F. Jocson, Royal Plaza, Sampaloc, Fourth District, Manila, Metro Manila, 1008, Philippines"
+National University of Defense Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"National University of Defense Technology, Changsha 410073, China",28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan",22.73424255,120.283497550993,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣"
+National University of Science and Technology,33.6450855,72.9915892221655,"National University of Science and Technology, Indus Loop, H-11, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+National University of Singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+National University of Technology Technology,33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+National University of singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"Neurological Institute, USA",40.84211085,-73.9428460313244,"Neurological Institute of New York, Haven Avenue, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10032, USA"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"New Jersey Institute of Technology, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"Newcastle University, Newcastle upon Tyne",54.98023235,-1.61452627035949,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK"
+"Normal University, Kunming, China",25.0580509,102.6955241,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国"
+North Carolina Central University,35.97320905,-78.897550537484,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA"
+"North China Electric Power University, Baoding, China",38.8760446,115.4973873,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国"
+"North Dakota State University, Fargo, ND 58108-6050, USA",46.897155,-96.8182760282419,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA"
+Northeastern University,42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Northeastern University, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+Northumbria University,55.0030632,-1.57463231052026,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK"
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK",54.9781026,-1.6067699,"Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK"
+Northwestern University,42.0551164,-87.6758111348217,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+"Nottingham Trent University, Nottingham, UK",52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+"Nottingham University Hospital, Nottingham, UK",52.9434967,-1.18631123153121,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK"
+OF PRINCETON UNIVERSITY,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+OF STANFORD UNIVERSITY,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+Okayama University,34.6893393,133.9222272,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本"
+Oregon State University,45.5198289,-122.677979643331,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA"
+Otto von Guericke University,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+Oxford University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+"Oxford University, UK",51.7488051,-1.23874457456279,"James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK"
+"PSG College of Technology, Coimbatore, Tamil Nadu, India",11.0246833,77.0028424564731,"PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Peking University,39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Pennsylvania,40.9699889,-77.7278831,"Pennsylvania, USA"
+Plymouth University,50.3755269,-4.13937687442817,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+Pohang University of Science and Technology,36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Pondicherry Engineering College,12.0148693,79.8480910431981,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India"
+Portland State University,45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+"Portland State University, USA",45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+Portugal,40.033265,-7.8896263,Portugal
+Poznan University of Technology,52.4004837,16.9515808278647,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Princeton University, Princeton, NJ, USA",40.34725815,-74.6513455119257,"Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA"
+"Princeton University, Princeton, New Jersey, USA",40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Pune Institute of Computer Technology, Pune, ( India",18.4575638,73.8507352,"Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+Purdue University,40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, IN. 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Purdue University, West Lafayette, Indiana, 47906, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Qatar University, Qatar",25.37461295,51.4898035392337,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎"
+Queen Mary University,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+Queen Mary University of London,51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, London E1 4NS, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Queensland University of Technology (QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Queensland University of Technology, Brisbane, QLD, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"RMIT University, Australia",-37.8087465,144.9638875,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+RWTH Aachen University,50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+"RWTH Aachen University, Aachen, Germany",50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA",42.73280325,-73.6622354488153,"Rensselaer Polytechnic Institute, Tibbits Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+Research Center,24.7261991,46.6365468966391,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Rice University, Houston, TX, 77005, USA",29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Rio de Janeiro State University, Brazil",-22.91117105,-43.2357797110467,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil"
+"Ritsumeikan University, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+"Ritsumeikan University, Kyoto, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+"Ritsumeikan, University",49.26007165,-123.253442836235,"Ritsumeikan House, Lower Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+Rochester Institute of Technology,43.08250655,-77.6712166264273,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Ruhr University Bochum,51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+Rutgers University,40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+"Rutgers University, New Brunswick, NJ",40.50007595,-74.4457915242934,"Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA"
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Rutgers University, Piscataway NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, USA",40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+"Ryerson University, Canada",43.65815275,-79.3790801045263,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada"
+"SASTRA University, Thanjavur, Tamil Nadu, India",10.9628655,79.3853065130097,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+Sabanci University,40.8927159,29.3786332263582,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye"
+Sakarya University,40.76433515,30.3940787517111,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye"
+"San Jose State University, San Jose, CA",37.3351908,-121.881260081527,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Santa Fe Institute,35.7002878,-105.908648471331,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA"
+"School, The University of Sydney, Sydney, NSW, Australia",-33.8893229,151.180068,"Royal Prince Alfred Hospital School, 57-59, Grose Street, Camperdown, Sydney, NSW, 2050, Australia"
+"Science, University of Amsterdam",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+Semarang State University,-7.00349485,110.417749486905,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia"
+Seoul National University,37.26728,126.9841151,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Shandong University of Science and Technology,36.00146435,120.116240565627,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Sharif University of Technology, Tehran. Iran",35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+Shenzhen Institutes of Advanced Technology,22.59805605,113.985337841399,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国"
+"Shenzhen University, Shenzhen, China",22.53521465,113.931591101679,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国"
+Shiraz University,29.6385474,52.5245706,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎"
+Simon Fraser University,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+Sinhgad College of,19.0993293,74.7691424,"SINHGAD, NH61, Foi, Ahmadnagar, Ahmednagar, Maharashtra, 414001, India"
+South China University of China,23.0490047,113.3971571,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+South China University of Technology,23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+South College Road,39.2715228,-76.6936807,"South College Road, Beechfield, Baltimore, Maryland, 21229, USA"
+"Southeast University, Nanjing, China",32.0575279,118.786822520439,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国"
+Southwest Jiaotong University,30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Southwest University, Chongqing 400715, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+Stanford University,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, CA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, CA, United States",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, Stanford, California",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Stanford University, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"State University of New York Polytechnic Institute, Utica, New York",43.13800205,-75.2294359077068,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA"
+State University of New York at Binghamton,42.08779975,-75.9706606561486,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA"
+State University of New York at Buffalo,42.95485245,-78.8178238693065,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA"
+"Statistics, University of",32.0731522,72.6814703364947,"Department Of Statistics, University Road, Satellite Town, Cantonment, سرگودھا, Sargodha District, پنجاب, 40100, ‏پاکستان‎"
+Stevens Institute of Technology,40.742252,-74.0270949,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA"
+Stony Brook University,40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Stony Brook University Hospital,40.90826665,-73.1152089127966,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, NY, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Stony Brook University, Stony Brook, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Sun Yat-Sen University, Guangzhou, P.R. China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+SungKyunKwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Sungkyunkwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Swansea University,51.6091578,-3.97934429228629,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK"
+Swiss Federal Institute of Technology,47.3764534,8.54770931489751,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+THE UNIVERSITY OF CHICAGO,41.78468745,-87.6007493265106,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"Tafresh University, Tafresh, Iran",34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"Tamkang University, Taipei, Taiwan",25.17500615,121.450767514156,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣"
+Tampere University of Technology,61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+Technion Israel Institute of Technology,32.7767536,35.0241452903301,"הטכניון - מכון טכנולוגי לישראל, דוד רוז, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל"
+"Technological University, Davanagere, Karnataka, India",14.4525199,75.9179512,"UBDT College of Engineering, College Private Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+Tel Aviv University,32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"Tel-Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Temple University,39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+"Temple University, Philadelphia, USA",39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"The American University in Cairo, Egypt",30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+The Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"The Australian National University, Canberra, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"The Chinese University of Hong Kong, China",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, Hong Kong",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The Chinese University of Hong Kong, New Territories, Hong Kong",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+The City College and the Graduate Center,37.76799565,-122.400099572569,"Graduate Center, 184, Hooper Street, Mission Bay, SF, California, 94158, USA"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+The City University of New York,40.8722825,-73.8948917141949,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA"
+The Education University of Hong Kong,22.46935655,114.19474193618,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国"
+The Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"The Hebrew University of Jerusalem, Israel",31.7918555,35.244723,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל"
+The Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"The Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+The Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+The Ohio State University,40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"The Ohio State University, Columbus, OH, USA",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"The Ohio State University, OH",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+The Open University of Israel,32.77824165,34.9956567288188,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל"
+The Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"The University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+The University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"The University of Electro-Communications, JAPAN",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+The University of Hong Kong,22.2081469,114.259641148719,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国"
+The University of Manchester,53.46600455,-2.23300880782987,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK"
+The University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+The University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+"The University of Newcastle, Callaghan 2308, Australia",-32.8930923,151.705656,"University of Newcastle, Huxley Library, University Drive, Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia"
+The University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"The University of North Carolina at Charlotte, USA",35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"The University of North Carolina, Chapel Hill",35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+The University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"The University of Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Sydney, NSW 2006, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Sydney, Sydney, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"The University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+The University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+The University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+The University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+The University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+The University of York,53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"The University of York, Heslington, York YO10 5DD, United Kingdom",53.94830175,-1.05154975017361,"Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"The University of York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+The University of the Humanities,47.9218937,106.919552402206,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс"
+The Weizmann Institute of,31.904187,34.807378,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל"
+The Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+"Tohoku University, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+"Tohoku University, Sendai, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+Tokyo Denki University,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"Tokyo Institute of Technology, Japan",35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+Tokyo Metropolitan University,35.6200925,139.38296706394,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+Tongji University,31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"Tongji University, Shanghai 201804, China",31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"Toyota Technological Institute (Chicago, US",41.7847112,-87.5926056707507,"Toyota Technological Institute, 6045, South Kenwood Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+Tsinghua University,40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+UNIVERSITY IN PRAGUE,50.0714761,14.4542642,"Business Institut EDU, Kodaňská, Vršovice, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 10100, Česko"
+UNIVERSITY OF CALIFORNIA,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"UNIVERSITY OF CALIFORNIA, BERKELEY",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"UNIVERSITY OF CALIFORNIA, SAN DIEGO",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+UNIVERSITY OF TARTU,58.38131405,26.7207808104523,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti"
+UNIVERSITY OF WISCONSIN MADISON,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University (ITU,55.65965525,12.5910768893446,"IT-Universitetet i København, Emil Holms Kanal, Christianshavn, København, Københavns Kommune, Region Hovedstaden, 1424, Danmark"
+"University City Blvd., Charlotte, NC",35.312224,-80.7084736,"University City Boulevard, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+University College London,51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, London WC1N 3BG, United Kingdom",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University College London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+University Drive,-21.1753214,149.1432747,"University Drive, Ooralea, Mackay, QLD, 4740, Australia"
+"University Drive, Fairfax, VA 22030-4444, USA",38.835411,-77.316447,"University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA"
+University Institute of Engineering and Technology,26.9302879,80.9278433,"Maharishi University Of Information Technology, NH230, Jankipuram, Lucknow, Uttar Pradesh, 226021, India"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+University Of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania",45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of,29.3758342,71.7528712910287,"University of ..., University Road, بہاولپور, Bahāwalpur District, پنجاب, 63100, ‏پاکستان‎"
+University of Aberdeen,57.1646143,-2.10186013407315,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+University of Adelaide,-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Adelaide, SA, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of Aizu, Japan",37.5236728,139.938072464124,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本"
+"University of Akron, Akron",41.0789035,-81.5197127229943,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+University of Amsterdam,52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Amsterdam, the Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+University of Arizona,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+University of Barcelona,41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Basel, Switzerland",47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+University of Bath,51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Bath, Bath, Somerset, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+University of Birmingham,52.45044325,-1.93196134052244,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+University of Brescia,37.7689374,-87.1113859,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA"
+University of Bridgeport,41.1664858,-73.1920564,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA"
+University of Bristol,51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+"University of Bristol, Bristol, UK",51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+University of Buffalo,40.7021766,-99.0985061173294,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+University of California,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of California Davis,38.5336349,-121.790772639747,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of California, Berkeley, Berkeley CA 94720, USA",37.8756681,-122.257979979865,"Goldman School of Public Policy, Hearst Avenue, Northside, Berkeley, Alameda County, California, 94720, USA"
+"University of California, Irvine",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of California, Merced",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Merced, CA 95344, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Merced, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"University of California, Riverside",33.98071305,-117.332610354677,"University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"University of California, Riverside, California 92521, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, San Diego, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of California, Santa Barbara",34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"University of Cambridge, United Kingdom",52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+University of Campinas,-27.5953995,-48.6154218,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil"
+University of Campinas (Unicamp,-22.8224781,-47.0642599309425,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"University of Canterbury, New Zealand",-43.5240528,172.580306253669,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+"University of Cape Town, South Africa",-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, Orlando, USA",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Central Punjab, Pakistan",31.4466149,74.2679762,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎"
+University of Chinese Academy of Sciences,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Chinese Academy of Sciences (UCAS,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, Beijing, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+University of Colorado Colorado Springs,38.8920756,-104.797163894584,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA"
+"University of Colorado, Boulder",40.01407945,-105.266959437621,"Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA"
+University of Connecticut,41.8093779,-72.2536414,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA"
+University of Copenhagen,55.6801502,12.5723270014063,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark"
+"University of Crete, Crete, 73100, Greece",35.3713024,24.4754408,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα"
+"University of Dammam, Saudi Arabia",26.39793625,50.1980792430511,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+"University of Delaware, Newark, DE. USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+"University of Denver, Denver, CO",39.6766541,-104.962203,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA"
+University of Dhaka,23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"University of Dhaka, Bangladesh",23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"University of Dschang, Cameroon",5.4409448,10.0712056113589,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun"
+University of Dundee,56.45796755,-2.98214831353755,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+"University of Edinburgh, Edinburgh, UK",55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Engineering and Technology,31.6914689,74.2465617,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+"University of Exeter, UK",50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of Florida,29.6328784,-82.3490133048243,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+University of Geneva,42.57054745,-88.5557862661765,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+University of Groningen,53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Groningen, Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"University of Gujrat, Pakistan",32.63744845,74.1617455759799,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎"
+"University of Haifa, Haifa, Israel",32.76162915,35.0198630428453,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל"
+University of Houston,29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+"University of Houston, Houston, TX 77204, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+"University of Houston, Houston, TX, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+University of Iceland,64.137274,-21.9456145356869,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland"
+University of Illinois,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+University of Illinois at Urbana,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Urbana Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana Champaign, Urbana",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA",40.1066501,-88.2240260725426,"University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, Urbana, IL",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois at Urbana-Champaign, Urbana, IL, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"University of Illinois, Urbana-Champaign",40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Information,34.17980475,-117.325843648456,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA"
+"University of Ioannina, Ioannina, Greece",39.6162306,20.8396301098796,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Kentucky, USA",38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+University of Leeds,53.80387185,-1.55245712031677,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK"
+"University of Lincoln, U.K",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+"University of Lincoln, UK",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+University of Ljubljana,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+University of Ljubljana Faculty,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"University of Malaya, 50603 Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+"University of Malaya, Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+University of Malta,35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+University of Maryland College Park,38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+"University of Maryland-College Park, USA",38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Massachusetts - Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Massachusetts Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts, Amherst",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Memphis,35.1189387,-89.9372195996589,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"University of Miami, Coral Gables, FL",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"University of Miami, USA",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+University of Michigan,42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, MI, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Michigan, Ann Arbor, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+University of Milan,38.6796662,-90.3262816,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA"
+University of Minnesota,44.97308605,-93.2370881262941,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA"
+"University of Missouri, Columbia, MO",38.926761,-92.2919378337447,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Nevada, Reno, Reno, NV, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+"University of Nevada, Reno, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+University of North Carolina,35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+University of North Carolina Wilmington,34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+"University of North Carolina Wilmington, Wilmington, NC, USA",34.2377352,-77.92673494788,"Kenan House parking lot, Princess Street, Wilmington, New Hanover County, North Carolina, 28405, USA"
+University of North Carolina at Chapel Hill,35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, NC, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of North Carolina at Chapel Hill, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+University of North Texas,33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+"University of North Texas, Denton, Texas, USA",33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"University of Northern British Columbia, Canada",53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"University of Notre Dame, USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"University of Notre Dame. Notre Dame, IN 46556.USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+University of Oradea,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+University of Ottawa,45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"University of Ottawa, Ottawa, On, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+University of Oulu,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+"University of Oulu, Finland",65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+University of Oxford,51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, UK",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Patras, Greece",38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Pennsylvania,39.9492344,-75.191989851901,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"University of Peshawar, Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+University of Piraeus,37.94173275,23.6530326182197,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα"
+"University of Pisa, Pisa, Italy",43.7201299,10.4078976,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, PA, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, Pittsburgh",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Pittsburgh, Pittsburgh PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, PA 15260, USA",40.4437547,-79.9529557,"Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"University of Pittsburgh, Pittsburgh, PA, USA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+University of Posts and Telecommunications,32.11527165,118.925956600436,"南京邮电大学仙林校区, 9, 文苑路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210023, 中国"
+"University of Queensland, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+University of Rochester,43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+"University of Rochester, NY 14627, USA",43.1242954,-77.6288352530005,"Central Utilities Lot, Firemans, Rochester, Monroe County, New York, 14627, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+"University of Salzburg, Austria",47.79475945,13.0541752486067,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich"
+University of Science and,5.35755715,100.303850375,"USM, Lengkok Sastera, The LIGHT, Batu Uban, George Town, PNG, 11700, Malaysia"
+University of Science and Technology of China,31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University of Science and Technology of China, Hefei, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University of South Carolina, Columbia, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+University of Southampton,50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"University of Southampton, SO17 1BJ, UK",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"University of Southampton, United Kingdom",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of Southern California, Los Angeles, CA 90089, USA",34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+"University of St Andrews, United Kingdom",56.3411984,-2.7930938,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK"
+University of Stuttgart,48.9095338,9.1831892,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland"
+University of Surrey,51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Surrey, Guildford, Surrey GU2 7XH, UK",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Surrey, United Kingdom",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"University of Tabriz, Tabriz, Iran",38.0612553,46.3298484,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎"
+University of Tampere,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of Technology Sydney,-33.8809651,151.201072985483,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Baghdad, Iraq",33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+"University of Technology, Sydney",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Texas at Arlington,32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, Arlington, Texas 76019, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Texas at Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+University of Texas at San Antonio,29.58333105,-98.6194450505688,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA"
+"University of Texas, Austin, TX 78712-1188, USA",30.284458,-97.7342106,"University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Toronto,43.66333345,-79.3976997498952,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada"
+University of Toulouse,30.1781816,-93.2360581,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+"University of Trento, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+University of Tsukuba,36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+"University of Tsukuba, Japan",36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+University of Twente,52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Twente, Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Twente, The Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+"University of Vermont, 33 Colchester Avenue, Burlington",44.48116865,-73.2002178989123,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA"
+"University of Vienna, Austria",48.2131302,16.3606865338016,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich"
+University of Virginia,38.0353682,-78.5035322,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA"
+"University of Virginia, Charlottesville, VA",38.0410576,-78.5054996018357,"University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA"
+University of Warwick,52.3793131,-1.5604252,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, WA 98195, United States",47.6547795,-122.305818,"University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Washington, Seattle, WA, USA",47.65249975,-122.2998748,"University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+University of Waterloo,43.47061295,-80.5472473165632,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada"
+University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Wisconsin-Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"University of Wisconsin-Madison, Madison, WI, USA",43.0705257,-89.4059387,"UW Geology Museum, 1215, West Dayton Street, South Campus, Madison, Dane County, Wisconsin, 53715, USA"
+University of Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+"University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of York, York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of York, York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of Zurich, Zurich, Switzerland",47.4968476,8.72981767380829,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra"
+"University of telecommunications and post, Sofia, Bulgaria",42.6560524,23.3476108351659,"Висше Училище по Телекомуникации и Пощи, 1, бул. Акад. Стефан Младенов, ж.к. Студентски град, район Студентски, Столична, София-град, 1700, Бългaрия"
+"University of the Basque Country, San Sebastian, Spain",43.30927695,-2.01066784661227,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España"
+University of the Western Cape,-33.9327762,18.6291540714825,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa"
+University of the Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+"University, China",22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University, Guangzhou, China",23.1314851,113.2852239,"中山大学第一课室, 74号大院, 中山二路, 马棚岗, 农林街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国"
+"University, Hong Kong",54.0856448,13.389089,"Hong-Kong, Feldstraße, Greifswald, Südliche Mühlenvorstadt, Greifswald, Vorpommern-Greifswald, Mecklenburg-Vorpommern, 17489, Deutschland"
+"University, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"University, USA",25.7147949,-80.276947,"University, South Dixie Highway, Coral Gables, Miami-Dade County, Florida, 33124-6310, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+Ural Federal University (UrFU,56.8435083,60.6454805,"УрФУ, улица Гагарина, Эврика, Втузгородок, Кировский район, Екатеринбург, городской округ Екатеринбург, Свердловская область, Уральский федеральный округ, 620062, РФ"
+"Utah State University, Logan UT",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+"Utah State University, Logan, UT 84322-4205, USA",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+Victoria University of Wellington,-41.29052775,174.768469187426,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa"
+Vienna University of Technology,48.19853965,16.3698616762866,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich"
+Villanova University,40.0367774,-75.342023320028,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA"
+Virginia Polytechnic Institute and State University,37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg, Virginia",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+Virginia Tech Carilion Research Institute,37.2579548,-79.9423329131356,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA"
+Vrije Universiteit Brussel,50.8411007,4.32377555279953,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien"
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium",50.8223021,4.3967361,"Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+"Washington University, St. Louis, MO, USA",38.6480445,-90.3099667,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA"
+Wayne State University,42.357757,-83.0628671134125,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+West Virginia University,39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, WV",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"West Virginia University, Morgantown, WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+Western Kentucky University,36.9845317,-86.4576443016944,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA"
+"Western Sydney University, Parramatta, NSW 2150, Australia",-33.8160848,151.00560034186,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+Xerox Research Center,43.5129109,-79.6664076152913,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada"
+Xiamen University,24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+"Xiamen University, Xiamen, China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+Xidian University,34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Xidian University, Xi an, China",34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+Yeungnam University,35.8365403,128.7534309,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국"
+York University,43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"York University, Toronto",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"York University, Toronto, Canada",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+Zaragoza University,41.6406218,-0.900793992168927,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España"
+"Zhejiang Normal University, Jinhua, China",29.13646725,119.637686517179,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国"
+Zhejiang University,30.19331415,120.119308216677,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国"
+Zhejiang University of Technology,30.2931534,120.1620458,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国"
+"Zhengzhou University, Zhengzhou, Henan 450052, China",34.8088168,113.5352664,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国"
+a The University of Nottingham Malaysia Campus,2.9438432,101.8736196,"The University of Nottingham Malaysia Campus, Jalan Broga, Bandar Rinching, Semenyih, Selangor, 43500, Malaysia"
+any other University,53.8012316,-1.5476213,"Northern Film School, Millennium Square, Steander, Woodhouse, Leeds, Yorkshire and the Humber, England, LS1 3DW, UK"
+college of Engineering,13.0110912,80.2354520862161,"College of Engineering, Sardar Patel Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+of bilkent university,39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+of the University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+the Chinese University of Hong Kong,22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+"university, Shiraz, Iran",29.6284395,52.5181728343761,"دانشکده مهندسی دانشگاه شیراز, ملاصدرا, فلسطین, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71936, ‏ایران‎"
+y National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
diff --git a/scraper/reports/institutions_found/found-1.csv b/scraper/reports/institutions_found/found-1.csv
new file mode 100644
index 00000000..e76145e5
--- /dev/null
+++ b/scraper/reports/institutions_found/found-1.csv
@@ -0,0 +1,479 @@
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+FL,27.7567667,-81.4639835,"Florida, USA"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+University of Arizona,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"University of California, Riverside, California 92521, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+University of the Western Cape,-33.9327762,18.6291540714825,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa"
+"Electrical Engineering, University of",47.6532412,-122.3061707,"Electrical Engineering, 185, Loading Dock, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+Vienna University of Technology,48.19853965,16.3698616762866,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich"
+"Hengyang Normal University, Hengyang, China",26.8661136,112.620921219792,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国"
+Courant Institute of Mathematical Sciences,40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"International Institute of Information Technology, Hyderabad, India",17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Bilkent University, 06800 Cankaya, Turkey",39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"Tokyo Institute of Technology, Japan",35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+College of Informatics,14.6173885,121.101327315511,"Informatics, F.P. Felix Avenue, Dela Paz, San Isidro, Cainta, Rizal, Metro Manila, 1900, Philippines"
+"Akita Prefectural University, Yurihonjo, Japan",39.39325745,140.073500465928,"秋田県立大学, 日本海東北自動車道(無料区間), 八幡前, 由利本荘市, 秋田県, 東北地方, 〒015-0836, 日本"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+"University of Ottawa, Ottawa, On, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+Muhlenberg College,40.5967637,-75.5124062,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+National Technical University of Athens,37.98782705,23.7317973260904,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα"
+Amirkabir University of Technology,35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+Zhejiang University of Technology,30.2931534,120.1620458,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国"
+"Harvard University, Cambridge, MA, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+South China University of Technology,23.0502042,113.398803226836,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"University, China",22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"The University of Newcastle, Callaghan 2308, Australia",-32.8930923,151.705656,"University of Newcastle, Huxley Library, University Drive, Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia"
+University of Minnesota,44.97308605,-93.2370881262941,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA"
+"University of Nevada, Reno, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+"Anna University Chennai, India",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Carnegie Mellon University Pittsburgh, PA - 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Idiap Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+"University of York, York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA",39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+University Drive,-21.1753214,149.1432747,"University Drive, Ooralea, Mackay, QLD, 4740, Australia"
+"University City Blvd., Charlotte, NC",35.312224,-80.7084736,"University City Boulevard, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"University, USA",25.7147949,-80.276947,"University, South Dixie Highway, Coral Gables, Miami-Dade County, Florida, 33124-6310, USA"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+"The University of York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+Harbin Institute of Technology,45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"University of North Texas, Denton, Texas, USA",33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+"Nanjing University, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Carnegie Mellon University, Pittsburgh, PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Beijing University of Posts and Telecommunications,39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Chinese Academy of Sciences (UCAS,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Xidian University, Xi an, China",34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+University of Colorado Colorado Springs,38.8920756,-104.797163894584,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA"
+The Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+University of Brescia,37.7689374,-87.1113859,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA"
+The University of Hong Kong,22.2081469,114.259641148719,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"Benha University, Egypt",30.0818727,31.2445484105016,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر"
+Jadavpur University,22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+Indian Institute of Technology,28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+UNIVERSITY OF WISCONSIN MADISON,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Alexandria University, Alexandria, Egypt",31.21051105,29.9131456239399,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر"
+Charles Sturt University,-35.0636071,147.3552234,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia"
+"Aristotle University of Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Institute of Information Technology,23.7289899,90.3982682,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"University of York, York, United Kingdom",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"Beijing University of Technology, Beijing 100022, China",39.87391435,116.477222846574,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国"
+University of Houston,29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+The University of the Humanities,47.9218937,106.919552402206,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Stony Brook University,40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+University of Rochester,43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+The University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+Eastern University,40.0505672,-75.3710932636663,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA"
+"The University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"State University of New York Polytechnic Institute, Utica, New York",43.13800205,-75.2294359077068,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA"
+"University of Technology, Sydney",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"University of Miami, Coral Gables, FL",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+Institute of Media Innovation,1.3433937,103.6793303,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"University of North Carolina at Chapel Hill, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"Beijing Institute of Technology University, P. R. China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+The Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+University of Milan,38.6796662,-90.3262816,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+RWTH Aachen University,50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+"University of Surrey, United Kingdom",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+"North China Electric Power University, Baoding, China",38.8760446,115.4973873,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国"
+"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan",36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+Harbin Institute of Technology;Shenzhen University,22.5895016,113.965710495775,"哈工大(深圳), 平山一路, 深圳大学城, 珠光村, 南山区, 深圳市, 广东省, 518000, 中国"
+"University of California, Merced, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+Central Washington University,47.00646895,-120.53673039883,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA"
+CUNY City College,45.5546608,5.4065255,"Cuny, La Tour-du-Pin, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38110, France"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+"Multimedia University, Cyberjaya, Malaysia",2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+"University of Ioannina, Ioannina, Greece",39.6162306,20.8396301098796,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα"
+"Stony Brook University, NY, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"Amity University, Lucknow, India",26.85095965,81.0495096452828,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India"
+"University of Northern British Columbia, Canada",53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"Carnegie Mellon University, USA",37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"The University of Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea",37.3219575,127.1250723,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국"
+Simon Fraser University,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+"University of Texas at Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Groningen, Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"Bharathidasan University, Trichy, India",10.7778845,78.6966319,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India"
+"University of Aizu, Japan",37.5236728,139.938072464124,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本"
+"University of California, Santa Barbara",34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+Bahcesehir University,41.02451875,28.9769795349346,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Adelaide, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Kyung Hee University,32.8536333,-117.2035286,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA"
+"University of Kentucky, USA",38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+West Virginia University,39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"Jiangnan University, Wuxi",31.4854255,120.2739581,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国"
+Japan Advanced Institute of Science and Technology,36.4442949,136.5928587,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+"York University, Toronto",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"University College London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"University of Missouri, Columbia, MO",38.926761,-92.2919378337447,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA"
+Iran,32.9407495,52.9471344,‏ایران‎
+"University of Houston, Houston, TX 77204, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
+The University of Manchester,53.46600455,-2.23300880782987,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK"
+COLUMBIA UNIVERSITY,40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"University of Oulu, Finland",65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Korea Advanced institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+"University of Pittsburgh, Pittsburgh PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+"Nottingham University Hospital, Nottingham, UK",52.9434967,-1.18631123153121,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK"
+"Zhengzhou University, Zhengzhou, Henan 450052, China",34.8088168,113.5352664,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国"
+Graz University of Technology,47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+"University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Villanova University,40.0367774,-75.342023320028,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"George Mason University, Fairfax, VA, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"COMSATS Institute of Information Technology, Lahore 54000, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+Queen Mary University,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+India,22.3511148,78.6677428,India
+University of Buffalo,40.7021766,-99.0985061173294,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA"
+"Stanford University, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+International Institute of Information Technology,17.4454957,78.3485469754447,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India"
+University of Tampere,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+Seoul National University,37.26728,126.9841151,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국"
+"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia",3.0552109,101.7005831,"Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+ARISTOTLE UNIVERSITY OF THESSALONIKI,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+University of Florida,29.6328784,-82.3490133048243,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA"
+a The University of Nottingham Malaysia Campus,2.9438432,101.8736196,"The University of Nottingham Malaysia Campus, Jalan Broga, Bandar Rinching, Semenyih, Selangor, 43500, Malaysia"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+Dr. Babasaheb Ambedkar Marathwada University,19.8960918,75.3089470267316,"Boys Hostel No. 3, Shantipura road, Cantonment, Bidri workshop, Aurangabad, Maharashtra, 431004, India"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+"Manchester University, UK",53.47020165,-2.23932183309859,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK"
+"University of Massachusetts, Amherst",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+UNIVERSITY IN PRAGUE,50.0714761,14.4542642,"Business Institut EDU, Kodaňská, Vršovice, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 10100, Česko"
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+"Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"California Institute of Technology, Pasadena, CA, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"McGill University, Montreal, Canada",45.50691775,-73.5791162596496,"McGill University, Avenue Docteur Penfield, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 2T8, Canada"
+"Temple University, Philadelphia, USA",39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+"Florida Institute Of Technology, Melbourne Fl",28.0642296,-80.6230097241205,"Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA"
+"University of Salzburg, Austria",47.79475945,13.0541752486067,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich"
+Northumbria University,55.0030632,-1.57463231052026,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK"
+"Sharif University of Technology, Tehran. Iran",35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+The City College and the Graduate Center,37.76799565,-122.400099572569,"Graduate Center, 184, Hooper Street, Mission Bay, SF, California, 94158, USA"
+"University of Queensland, Australia",-27.49741805,153.013169559836,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia"
+"Tohoku University, Sendai, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+"Brown University, United States",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Stony Brook University, Stony Brook, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Georgia Institute of Technology,33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"University of Pittsburgh, PA, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Hong Kong Baptist University,22.3874201,114.2082222,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国"
+"University of Pittsburgh, Pittsburgh",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+"University of Akron, Akron",41.0789035,-81.5197127229943,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA"
+Halmstad University,56.66340325,12.8792972689712,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+Shandong University of Science and Technology,36.00146435,120.116240565627,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+"University of Pittsburgh, Pittsburgh, PA, USA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"Ritsumeikan, University",49.26007165,-123.253442836235,"Ritsumeikan House, Lower Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+UNIVERSITY OF TARTU,58.38131405,26.7207808104523,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti"
+"Clemson University, Clemson, SC",34.67871075,-82.8346790794026,"E-06 Parking, Parkway Drive, Pickens County, South Carolina, SC, USA"
+Oxford University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+National Chiao-Tung University,24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+THE UNIVERSITY OF CHICAGO,41.78468745,-87.6007493265106,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+Victoria University of Wellington,-41.29052775,174.768469187426,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa"
+"New Jersey Institute of Technology, USA",40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+"West Virginia University, Morgantown, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"Aalborg University, Denmark",57.01590275,9.97532826658991,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+"The University of North Carolina, Chapel Hill",35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+University Institute of Engineering and Technology,26.9302879,80.9278433,"Maharishi University Of Information Technology, NH230, Jankipuram, Lucknow, Uttar Pradesh, 226021, India"
+"King Saud University, Riyadh 11543, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+"Michigan State University, E. Lansing, MI 48823, USA",42.7337998,-84.4804243,"Dero Fixit Bike Station, Grand River Avenue, East Lansing, Ingham County, Michigan, 48824, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Drexel University,39.9574,-75.1902670552555,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+"University of North Carolina Wilmington, Wilmington, NC, USA",34.2377352,-77.92673494788,"Kenan House parking lot, Princess Street, Wilmington, New Hanover County, North Carolina, 28405, USA"
+"Harvard University, USA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+Macau University of Science and Technology,22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+University of Massachusetts - Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"UNIVERSITY OF CALIFORNIA, SAN DIEGO",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Columbia University, New York, NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+Tongji University,31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+National Taiwan University,25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+South College Road,39.2715228,-76.6936807,"South College Road, Beechfield, Baltimore, Maryland, 21229, USA"
+"College Park, MD",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+SungKyunKwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+"University of Gujrat, Pakistan",32.63744845,74.1617455759799,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎"
+The Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"National Chiao Tung University, Taiwan",24.78676765,120.997244116807,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣"
+Portland State University,45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+"Xiamen University, Xiamen, China",24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+Swiss Federal Institute of Technology,47.3764534,8.54770931489751,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra"
+"University of Illinois at Urbana-Champaign, Urbana, IL, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"College of Engineering Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+"Shenzhen University, Shenzhen, China",22.53521465,113.931591101679,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国"
+"University of Central Punjab, Pakistan",31.4466149,74.2679762,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎"
+University of Texas at San Antonio,29.58333105,-98.6194450505688,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA"
+"York University, Toronto, Canada",43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"University of Maryland-College Park, USA",38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+Tokyo Denki University,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+University of Adelaide,-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"Ryerson University, Canada",43.65815275,-79.3790801045263,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada"
+"University of Dschang, Cameroon",5.4409448,10.0712056113589,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun"
+University of Tsukuba,36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+The University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Beijing Institute of Technology, Beijing 100081 CHINA",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
diff --git a/scraper/reports/institutions_found/found-2.csv b/scraper/reports/institutions_found/found-2.csv
new file mode 100644
index 00000000..1b4d5911
--- /dev/null
+++ b/scraper/reports/institutions_found/found-2.csv
@@ -0,0 +1,510 @@
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+FL,27.7567667,-81.4639835,"Florida, USA"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+"PSG College of Technology, Coimbatore, Tamil Nadu, India",11.0246833,77.0028424564731,"PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+"Nottingham Trent University, Nottingham, UK",52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+National Institute of Technology Rourkela,22.2501589,84.9066855698087,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India"
+Arizona State University,33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan",25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+Beihang University,39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"Middlesex University, London",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"University College London, London WC1N 3BG, United Kingdom",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+The University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India",13.0309553,77.5648559396817,"M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India"
+"University of Vermont, 33 Colchester Avenue, Burlington",44.48116865,-73.2002178989123,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+Stanford University,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+Fudan University,31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"Hong Kong University of Science and Technology, Hong Kong",22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+Institute of Industrial Science,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+AALTO UNIVERSITY,60.18558755,24.824273298775,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Information Technologies Institute,33.5934539,130.3557837,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"University of Bath, Bath, Somerset, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Central Florida, Orlando, USA",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+"The University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+University of Birmingham,52.45044325,-1.93196134052244,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK"
+"University of Southampton, United Kingdom",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+Yeungnam University,35.8365403,128.7534309,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국"
+Beihang University,39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Heriot-Watt University,55.91029135,-3.32345776559167,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK"
+Indiana University Bloomington,39.17720475,-86.5154003022128,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA"
+"PSG College of Technology, Coimbatore, Tamil Nadu, India",11.0246833,77.0028424564731,"PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Max Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"University of Michigan, Ann Arbor, MI, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+University of Technology Sydney,-33.8809651,151.201072985483,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia"
+National University of Science and Technology,33.6450855,72.9915892221655,"National University of Science and Technology, Indus Loop, H-11, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+Duke University,35.9990522,-78.9290629011139,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA"
+The Open University of Israel,32.77824165,34.9956567288188,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל"
+IBM Thomas J. Watson Research Center,41.21002475,-73.8040705573196,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+University of Copenhagen,55.6801502,12.5723270014063,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark"
+"Middlesex University, London",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Dalian University of Technology, Dalian, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Illinois Institute of Technology, Chicago, Illinois, USA",41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+The Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+Sinhgad College of,19.0993293,74.7691424,"SINHGAD, NH61, Foi, Ahmadnagar, Ahmednagar, Maharashtra, 414001, India"
+"University, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"Rutgers University, Piscataway, NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+Zhejiang University,30.19331415,120.119308216677,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国"
+University of Piraeus,37.94173275,23.6530326182197,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα"
+"Concordia University, Canada",45.4955911,-73.5775043,"FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada"
+"Normal University, Kunming, China",25.0580509,102.6955241,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国"
+"Toyota Technological Institute (Chicago, US",41.7847112,-87.5926056707507,"Toyota Technological Institute, 6045, South Kenwood Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA"
+"University of Malaya, Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+University of Bridgeport,41.1664858,-73.1920564,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Edinburgh, Edinburgh, UK",55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+"Manonmaniam Sundaranar University, Tirunelveli, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Kent State University, Kent, Ohio, USA",41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"College Park, Maryland",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+California Institute of Technology,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"Akita University, Akita, Japan",39.7291921,140.136565773585,"秋田大学鉱業博物館, 2, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-8502, 日本"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India",13.0309553,77.5648559396817,"M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India"
+"University of California, San Diego, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+Ruhr University Bochum,51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India",14.4443949,75.9027655185535,"Bapuji Institute of Engineering and Technology, 2nd Cross Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"National University of Defense Technology, Changsha 410073, China",28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"University Drive, Fairfax, VA 22030-4444, USA",38.835411,-77.316447,"University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA"
+Peking University,39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+"University of Tabriz, Tabriz, Iran",38.0612553,46.3298484,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎"
+"University of Illinois at Urbana-Champaign, Urbana, IL",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+Colorado State University,40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+"Portland State University, USA",45.51181205,-122.684929993829,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+Glyndwr University,53.05373795,-3.00482075353073,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK"
+Chungnam National University,36.37029045,127.347804575184,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국"
+"Statistics, University of",32.0731522,72.6814703364947,"Department Of Statistics, University Road, Satellite Town, Cantonment, سرگودھا, Sargodha District, پنجاب, 40100, ‏پاکستان‎"
+"University of Pittsburgh, Pittsburgh, PA 15260, USA",40.4437547,-79.9529557,"Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Institute for Advanced,38.7468877,139.824707282407,"Institute for Advanced Biosciences, 鶴岡市, 山形県, 東北地方, 日本"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+GE Global Research Center,42.8298248,-73.8771938492793,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA"
+Massachusetts Institute of Technology (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Idiap Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"University of Adelaide, SA, Australia",-34.9189226,138.604236675404,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia"
+"University, Guangzhou, China",23.1314851,113.2852239,"中山大学第一课室, 74号大院, 中山二路, 马棚岗, 农林街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国"
+City University of Hong Kong,22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+"Utah State University, Logan, UT 84322-4205, USA",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+Indian Institute of Technology Kanpur,26.513188,80.2365194538339,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+"The Chinese University of Hong Kong, China",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University of Delaware, Newark, DE. USA",39.6810328,-75.7540184,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA"
+"university, Shiraz, Iran",29.6284395,52.5181728343761,"دانشکده مهندسی دانشگاه شیراز, ملاصدرا, فلسطین, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71936, ‏ایران‎"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"The Ohio State University, Columbus, OH, USA",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+The University of Western Australia,-31.95040445,115.797900374251,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+"University of telecommunications and post, Sofia, Bulgaria",42.6560524,23.3476108351659,"Висше Училище по Телекомуникации и Пощи, 1, бул. Акад. Стефан Младенов, ж.к. Студентски град, район Студентски, Столична, София-град, 1700, Бългaрия"
+"Jaipur, Rajasthan, India",26.916194,75.820349,"Jaipur, Rajasthan, 302001, India"
+"Australian National University, Canberra, ACT 0200, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+Shenzhen Institutes of Advanced Technology,22.59805605,113.985337841399,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国"
+Akita Prefectural University,39.8011499,140.045911602376,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"Kogakuin University, Tokyo, Japan",35.6902784,139.695400958171,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本"
+University of Ljubljana Faculty,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+Institute for Communication Systems,51.2433692,-0.593220895014599,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK"
+"University of Bristol, Bristol, UK",51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+National Institute of Technology Rourkela,22.2501589,84.9066855698087,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India"
+"Stanford University, Stanford, California",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"Rensselaer Polytechnic Institute, Troy, NY 12180, USA",42.73280325,-73.6622354488153,"Rensselaer Polytechnic Institute, Tibbits Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+Huazhong University of Science and Technology,30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+Stanford University,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+Kingston University,51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+University of Dhaka,23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+Institute of Computing,43.47878995,-80.5548480959375,"Institute for Quantum Computing, Wes Graham Way, Lakeshore Village, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 6R2, Canada"
+Macau University of Science and,22.3358031,114.265903983304,"HKUST, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"University of Patras, Greece",38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+"Beijing Union University, 100101, China",39.9890068,116.420677175386,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国"
+"University of Texas at Arlington, Arlington, Texas 76019, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+University of Virginia,38.0353682,-78.5035322,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA"
+"Nottingham Trent University, Nottingham, UK",52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+CARNEGIE MELLON UNIVERSITY,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+Aristotle University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+Democritus University of Thrace,40.84941785,25.8344493892098,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα"
+"University, Hong Kong",54.0856448,13.389089,"Hong-Kong, Feldstraße, Greifswald, Südliche Mühlenvorstadt, Greifswald, Vorpommern-Greifswald, Mecklenburg-Vorpommern, 17489, Deutschland"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Middlesex University London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+McGill University,45.5039761,-73.5749687,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada"
+University of Pennsylvania,39.9492344,-75.191989851901,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA"
+of the University of Notre Dame,41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+"University of Chinese Academy of Sciences, Beijing, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Humboldt-University, Berlin, Germany",52.51875685,13.3935604936378,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland"
+University,51.7520849,-1.25166460220888,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK"
+"California Institute of Technology, Pasadena, CA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+University of Illinois at Urbana,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+Firat University,39.7275037,39.4712703382844,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye"
+"Middlesex University London, London, UK",51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"University of Cape Town, South Africa",-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+"University of Illinois at Urbana-Champaign, USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+Northwestern University,42.0551164,-87.6758111348217,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA"
+"University of Lincoln, UK",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+"Columbia University, New York, NY",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Colorado State University, Fort Collins, Colorado, USA",40.5709358,-105.086552556269,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA"
+"University of Southern California, Los Angeles, CA 90089, USA",34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+University of Toulouse,30.1781816,-93.2360581,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA"
+"University of Rochester, NY 14627, USA",43.1242954,-77.6288352530005,"Central Utilities Lot, Firemans, Rochester, Monroe County, New York, 14627, USA"
+"University of Dhaka, Bangladesh",23.7316957,90.3965275,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ"
+"Max Planck Institute for Informatics, Germany",49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"American University, Washington, DC, USA",38.93804505,-77.0893922365193,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+"The Chinese University of Hong Kong, Hong Kong",22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+Hong Kong University of Science and Technology,22.3386304,114.2620337,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+"Jadavpur University, India",22.5611537,88.4131019353334,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India"
+"University of Surrey, Guildford, Surrey GU2 7XH, UK",51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"Tampere University of Technology, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Purdue University, West Lafayette, IN. 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Tafresh University, Tafresh, Iran",34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"University of Southampton, SO17 1BJ, UK",50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Ferdowsi University of Mashhad, Mashhad, Iran",36.3076616,59.5269051097667,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎"
+"K.N. Toosi University of Technology, Tehran, Iran",35.76427925,51.409702762313,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎"
+"University of Vermont, 33 Colchester Avenue, Burlington",44.48116865,-73.2002178989123,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA"
+"University of California, Merced, CA 95344, USA",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+"Stony Brook University, Stony Brook NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+Indian Institute of Science,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Fraser University,44.9689836,-93.2094162948556,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA"
+National University of Technology Technology,33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+UNIVERSITY OF CALIFORNIA,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+University of Illinois at Urbana Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"Western Sydney University, Parramatta, NSW 2150, Australia",-33.8160848,151.00560034186,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia"
+"Hiroshima University, Japan",34.4019766,132.7123195,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本"
+International University of,11.5744201,104.8775841,"International University, ផ្លូវ ១៩៨៤, ភូមិភ្នំពេញថ្មី, ខណ្ឌសែនសុខ, រាជធានីភ្នំពេញ, 12101, ព្រះរាជាណាចក្រ​កម្ពុជា"
+"Cyprus University of Technology, Cyprus",34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+McGovern Institute for Brain Research,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+National University,14.6042947,120.994285201104,"National University, M.F. Jocson, Royal Plaza, Sampaloc, Fourth District, Manila, Metro Manila, 1008, Philippines"
+Virginia Tech Carilion Research Institute,37.2579548,-79.9423329131356,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA"
+University of Geneva,42.57054745,-88.5557862661765,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+Liverpool John Moores University,53.4050747,-2.97030028586709,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK"
+"Stanford University, CA, United States",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+University of Aberdeen,57.1646143,-2.10186013407315,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK"
+"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+MULTIMEDIA UNIVERSITY,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+National Institutes of Health,39.00041165,-77.1032777503325,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA"
+"Ritsumeikan University, Kyoto, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+"Nanjing University, Nanjing 210093, P.R.China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+Tokyo Metropolitan University,35.6200925,139.38296706394,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本"
+Xidian University,34.1235825,108.83546,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国"
+"National Cheng Kung University, Tainan, Taiwan",22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+Coburg University,50.26506145,10.9519648264628,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland"
+Poznan University of Technology,52.4004837,16.9515808278647,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP"
+University of Campinas,-27.5953995,-48.6154218,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil"
+"Hong Kong Polytechnic University, Hong Kong",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+Huazhong University of,22.53367445,113.917874206261,"深圳市第六人民医院, 89号, 桃园路, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518000, 中国"
+"Purdue University, West Lafayette, IN, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Delft University of Technology, The Netherlands",51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+"University of Pisa, Pisa, Italy",43.7201299,10.4078976,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia"
+"University College London, London WC1N 3BG, United Kingdom",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"Boston University, USA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+University of Toronto,43.66333345,-79.3976997498952,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada"
+"Rio de Janeiro State University, Brazil",-22.91117105,-43.2357797110467,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil"
+"University of California, Irvine",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Southampton,50.89273635,-1.39464294664816,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK"
+Arizona State University,33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+Southwest Jiaotong University,30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"School, The University of Sydney, Sydney, NSW, Australia",-33.8893229,151.180068,"Royal Prince Alfred Hospital School, 57-59, Grose Street, Camperdown, Sydney, NSW, 2050, Australia"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+"Anna University, Chennai",13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Florida State University, Tallahassee, FL 32306, USA",30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+"Courant Institute of Mathematical Sciences, New York, NY",40.7286484,-73.9956863,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Colorado, Boulder",40.01407945,-105.266959437621,"Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA"
+"West Virginia University, Morgantown WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Beihang University, Beijing 100191, China",39.9808333,116.341012492788,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国"
+University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+University of North Texas,33.2098879,-97.1514748776857,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA"
+"Joint Research Institute, Foshan, China",22.83388935,113.285418245471,"广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国"
+"National Taiwan University of Science and Technology, Taipei 10607, Taiwan",25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"Columbia University, United States",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+"Imperial College London, U.K",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"University of Science and Technology of China, Hefei, Anhui, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"Washington University, St. Louis, MO, USA",38.6480445,-90.3099667,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA"
+college of Engineering,13.0110912,80.2354520862161,"College of Engineering, Sardar Patel Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+"Rutgers University, Piscataway NJ 08854, USA",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"University of California, Riverside",33.98071305,-117.332610354677,"University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+Akita University,39.7278142,140.133225661449,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本"
+METs Institute of Engineering,28.2140454,83.9607104993073,"Dihiko Paton, Pokhara Lekhnath Metropolitan Ward No. 6, Pokhara, Pokhara Lekhnath Metropolitan, कास्की, गण्डकी अञ्चल, पश्चिमाञ्चल विकास क्षेत्र, नेपाल"
+"IBM Almaden Research Center, San Jose CA",37.21095605,-121.807486683178,"IBM Almaden Research Center, San José, Santa Clara County, California, USA"
+"Boston University, Boston, MA",42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Carnegie Mellon University, Pittsburgh, PA 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+The University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"Georgia Institute of Technology, Atlanta, Georgia, USA",33.776033,-84.3988408600158,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA"
+Eastern Mediterranean University,35.14479945,33.90492318497,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs"
+Concordia University,45.57022705,-122.637093463826,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA"
+"Kurukshetra University, Kurukshetra, India",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+University of Stuttgart,48.9095338,9.1831892,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland"
diff --git a/scraper/reports/institutions_found/found-3.csv b/scraper/reports/institutions_found/found-3.csv
new file mode 100644
index 00000000..2e682380
--- /dev/null
+++ b/scraper/reports/institutions_found/found-3.csv
@@ -0,0 +1,811 @@
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+FL,27.7567667,-81.4639835,"Florida, USA"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+Vrije Universiteit Brussel,50.8411007,4.32377555279953,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Otto von Guericke University,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Illinois at,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+Bournemouth University,50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+"Karlsruhe Institute of Technology, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+University of Ottawa,45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+University of Leeds,53.80387185,-1.55245712031677,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+University of Oradea,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+Plymouth University,50.3755269,-4.13937687442817,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"University of Malaya, 50603 Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+University of Posts and Telecommunications,32.11527165,118.925956600436,"南京邮电大学仙林校区, 9, 文苑路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210023, 中国"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+Bielefeld University,52.0280421,8.51148270115395,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland"
+HoHai University,32.05765485,118.755000398628,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国"
+University of California Davis,38.5336349,-121.790772639747,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+University of Iceland,64.137274,-21.9456145356869,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland"
+National University of singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"University of Denver, Denver, CO",39.6766541,-104.962203,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA"
+"Queen Mary University of London, London E1 4NS, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+"Nanjing University, Nanjing 210023, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+University of Science and Technology of China,31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University of Wisconsin-Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+University of Barcelona,41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+"University of South Carolina, Columbia, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Jahangirnagar University,23.883312,90.2693921,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+Institute of Computer Science,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"University of Illinois at Urbana Champaign, Urbana",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"Aberystwyth University, UK",52.4107358,-4.05295500914411,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+Sungkyunkwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Santa Fe Institute,35.7002878,-105.908648471331,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA"
+"North Dakota State University, Fargo, ND 58108-6050, USA",46.897155,-96.8182760282419,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA"
+University of Nottingham,52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+University of Amsterdam,52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+Northeastern University,42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+Okayama University,34.6893393,133.9222272,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本"
+Swansea University,51.6091578,-3.97934429228629,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+University of Waterloo,43.47061295,-80.5472473165632,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada"
+Courant Institute,40.7286994,-73.9957151,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+DUBLIN CITY UNIVERSITY,53.38522185,-6.25740874081493,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+Ural Federal University (UrFU,56.8435083,60.6454805,"УрФУ, улица Гагарина, Эврика, Втузгородок, Кировский район, Екатеринбург, городской округ Екатеринбург, Свердловская область, Уральский федеральный округ, 620062, РФ"
+"Science, University of Amsterdam",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+National University of Singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"Brown University, Providence, RI",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Chu Hai College of Higher Education, Hong Kong",22.3760643,113.987153890134,"珠海學院 Chu Hai College of Higher Education, 80, 青盈路 Tsing Ying Road, 嘉和里 Ka Wo Lei, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+University of Oxford,51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+Shiraz University,29.6385474,52.5245706,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎"
+"Sun Yat-Sen University, Guangzhou, P.R. China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+FL,27.7567667,-81.4639835,"Florida, USA"
+"Princeton University, Princeton, New Jersey, USA",40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Amherst College,42.37289,-72.518814,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Beijing Normal University, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+"Newcastle University, Newcastle upon Tyne",54.98023235,-1.61452627035949,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK"
+"University of Washington, Seattle, WA 98195, United States",47.6547795,-122.305818,"University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Hacettepe University,39.86742125,32.7351907206768,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+University (ITU,55.65965525,12.5910768893446,"IT-Universitetet i København, Emil Holms Kanal, Christianshavn, København, Københavns Kommune, Region Hovedstaden, 1424, Danmark"
+Oregon State University,45.5198289,-122.677979643331,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA"
+"University of Vienna, Austria",48.2131302,16.3606865338016,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich"
+OF STANFORD UNIVERSITY,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Bas kent University,52.08340265,5.14828494152362,"University College Utrecht 'Babel', 7, Campusplein, Utrecht, Nederland, 3584 ED, Nederland"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"King Saud University, Riyadh",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+University of Oulu,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+"Australian National University, Canberra",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+University of North Carolina Wilmington,34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+"The Ohio State University, OH",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+Institute,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Chubu University,35.2742655,137.013278412463,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Memphis,35.1189387,-89.9372195996589,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA"
+Imperial College London,51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"University of Texas, Austin, TX 78712-1188, USA",30.284458,-97.7342106,"University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+"Qatar University, Qatar",25.37461295,51.4898035392337,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎"
+"UNIVERSITY OF CALIFORNIA, BERKELEY",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+Rutgers University,40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+AI Institute,-34.6102167,-58.3752244291708,"INDEC, 609, Avenida Presidente Julio A. Roca, Microcentro, Comuna 1, Monserrat, CABA, C1067ABB, Argentina"
+"West Virginia University, Morgantown, WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Graz University of Technology, Austria",47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+"University of Exeter, UK",50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+Nanjing University of Science and Technology,32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+Institute of Automation,54.1720834,12.0790983,"Institut für Automatisierungstechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"RMIT University, Australia",-37.8087465,144.9638875,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"Harbin Institute of Technology, Harbin 150001, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+The Ohio State University,40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania",45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+"University of Lincoln, U.K",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Chonbuk National University, Jeonju-si",35.84658875,127.135013303058,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국"
+"University of Technology, Baghdad, Iraq",33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+Monash University,-37.78397455,144.958674326093,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+"University of Notre Dame. Notre Dame, IN 46556.USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+Guangdong Medical College,23.1294489,113.343761097683,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"Harbin Institute of Technology, Harbin 150001, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+"Manonmaniam Sundaranar University, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA",40.1066501,-88.2240260725426,"University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Rutgers University,40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+University of Illinois at,40.1006938,-88.2313043272112,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+"Kurukshetra University, Kurukshetra",29.95826275,76.8156304467532,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+Swansea University,51.6091578,-3.97934429228629,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+University of California Davis,38.5336349,-121.790772639747,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Zhejiang Normal University, Jinhua, China",29.13646725,119.637686517179,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国"
+East China Normal University,31.2284923,121.402113889769,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Brown University,41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+Islamic Azad University,34.8452999,48.5596212013643,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎"
+"University of St Andrews, United Kingdom",56.3411984,-2.7930938,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+Japan,36.5748441,139.2394179,日本
+Amherst College,42.37289,-72.518814,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA"
+"Technological University, Davanagere, Karnataka, India",14.4525199,75.9179512,"UBDT College of Engineering, College Private Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"University of Notre Dame. Notre Dame, IN 46556.USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"West Virginia University, Morgantown, WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+"Southwest University, Chongqing 400715, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Tsinghua University, Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+any other University,53.8012316,-1.5476213,"Northern Film School, Millennium Square, Steander, Woodhouse, Leeds, Yorkshire and the Humber, England, LS1 3DW, UK"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Carnegie Mellon University, Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+University of North Carolina at Chapel Hill,35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+"Beijing Normal University, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+University (ITU,55.65965525,12.5910768893446,"IT-Universitetet i København, Emil Holms Kanal, Christianshavn, København, Københavns Kommune, Region Hovedstaden, 1424, Danmark"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+Otto von Guericke University,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+"Newcastle University, Newcastle upon Tyne",54.98023235,-1.61452627035949,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan",22.73424255,120.283497550993,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣"
+"UNIVERSITY OF CALIFORNIA, BERKELEY",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"University of Peshawar, Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+IMPERIAL COLLEGE,39.9458551,116.406973072869,"国子监, 五道营胡同, Naga上院, 北京市, 东城区, 北京市, 100010, 中国"
+"University of Denver, Denver, CO",39.6766541,-104.962203,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Institute,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+"Virginia Polytechnic Institute and State University, Blacksburg, Virginia",37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"Chonbuk National University, Jeonju-si",35.84658875,127.135013303058,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국"
+"California Institute of Technology, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+Nanjing University of Science and Technology,32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Oregon State University,45.5198289,-122.677979643331,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA"
+"Bournemouth University, UK",50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+Sungkyunkwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+Semarang State University,-7.00349485,110.417749486905,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+Cambridge Research Laboratory,52.17333465,0.149899463173698,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+University of Oradea,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+University of Iceland,64.137274,-21.9456145356869,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"King Saud University, Riyadh",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+Banaras Hindu University,25.2662887,82.9927969,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of Leeds,53.80387185,-1.55245712031677,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK"
+"The University of Electro-Communications, JAPAN",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+Guangdong Medical College,23.1294489,113.343761097683,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国"
+"Imperial College, London, UK",51.5004171,-0.1782711,"Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+"Oxford University, UK",51.7488051,-1.23874457456279,"James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK"
+Otto von Guericke University,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+University of Wisconsin-Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Sun Yat-Sen University, Guangzhou, P.R. China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"University of St Andrews, United Kingdom",56.3411984,-2.7930938,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK"
+"University POLITEHNICA Timisoara, Timisoara, 300223, Romania",45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Gdansk University of Technology,54.37086525,18.6171601574695,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP"
+"Nanyang Technological University, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+Chubu University,35.2742655,137.013278412463,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+OF PRINCETON UNIVERSITY,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Beijing Normal University, China",39.96014155,116.359704380265,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国"
+"University of Texas, Austin, TX 78712-1188, USA",30.284458,-97.7342106,"University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+University of Michigan,42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"Bournemouth University, UK",50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+Institute of Computer Science,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+Queen Mary University of London,51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"North Dakota State University, Fargo, ND 58108-6050, USA",46.897155,-96.8182760282419,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA"
+AI Institute,-34.6102167,-58.3752244291708,"INDEC, 609, Avenida Presidente Julio A. Roca, Microcentro, Comuna 1, Monserrat, CABA, C1067ABB, Argentina"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+University of Amsterdam,52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+University of California Davis,38.5336349,-121.790772639747,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA"
+Sakarya University,40.76433515,30.3940787517111,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye"
+"Beijing Institute of Technology, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+University of Engineering and Technology,31.6914689,74.2465617,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+Jahangirnagar University,23.883312,90.2693921,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+Indian Institute of Science Bangalore,13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"Manonmaniam Sundaranar University, India",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+Hacettepe University,39.86742125,32.7351907206768,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Monash University,-37.78397455,144.958674326093,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of Notre Dame. Notre Dame, IN 46556.USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"University of Science and Technology of China, Hefei, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Carnegie Mellon University, Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Institute,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+DUBLIN CITY UNIVERSITY,53.38522185,-6.25740874081493,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+Imperial College London,51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"Nanjing University, Nanjing 210023, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"National Taiwan University, Taipei, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+National Institute of Standards and Technology,39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+University of Oxford,51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+B.S. University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+University of Science and Technology of China,31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Sungkyunkwan University,37.3003127,126.972123,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+University of Iceland,64.137274,-21.9456145356869,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+University of North Carolina Wilmington,34.2375581,-77.9270129,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA"
+"Aberystwyth University, UK",52.4107358,-4.05295500914411,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+Santa Fe Institute,35.7002878,-105.908648471331,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+Temple University,39.95472495,-75.1534690525548,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+"Graz University of Technology, Austria",47.05821,15.460195677136,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Notre Dame, USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"Zhejiang Normal University, Jinhua, China",29.13646725,119.637686517179,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+"University of Illinois at Urbana Champaign, Urbana",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"Australian National University, Canberra",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"University of Crete, Crete, 73100, Greece",35.3713024,24.4754408,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"Curtin University, Perth, Australia",-32.00574155,115.892864389257,"Curtin University, B201 L2 Entry South, Waterford, Perth, Western Australia, 6102, Australia"
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"West Virginia University, Morgantown, WV 26506, USA",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+National University of singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+East China Normal University,31.2284923,121.402113889769,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国"
+Wayne State University,42.357757,-83.0628671134125,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Islamic Azad University,34.8452999,48.5596212013643,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎"
+University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+The Weizmann Institute of,31.904187,34.807378,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל"
+"University of Lincoln, U.K",53.22853665,-0.548734723802121,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK"
+University of Science and,5.35755715,100.303850375,"USM, Lengkok Sastera, The LIGHT, Batu Uban, George Town, PNG, 11700, Malaysia"
+Rutgers University,40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+Tsinghua University,40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"University Politehnica of Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+OF STANFORD UNIVERSITY,37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Stevens Institute of Technology,40.742252,-74.0270949,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"Southwest University, Chongqing 400715, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+The Ohio State University,40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+Japan,36.5748441,139.2394179,日本
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Sun Yat-sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+"University of Vienna, Austria",48.2131302,16.3606865338016,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"University of Malaya, 50603 Kuala Lumpur, Malaysia",3.12267405,101.65356103394,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia"
+"Kingston University, UK",51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+University of Massachusetts Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+Institute of Automation,54.1720834,12.0790983,"Institut für Automatisierungstechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+"Harbin Institute of Technology, Harbin 150001, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+"University of Exeter, UK",50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+Vrije Universiteit Brussel,50.8411007,4.32377555279953,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien"
+University of Information,34.17980475,-117.325843648456,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+Xiamen University,24.4399419,118.093017809127,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国"
+the Chinese University of Hong Kong,22.42031295,114.207886442805,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+HoHai University,32.05765485,118.755000398628,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国"
+University of Barcelona,41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Twente, The Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+"Carnegie Mellon University, Pittsburgh, PA, 15213, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"University of South Carolina, Columbia, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+"University of Washington, Seattle, WA 98195, United States",47.6547795,-122.305818,"University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"The Ohio State University, OH",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+"Queen Mary University of London, London E1 4NS, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Courant Institute,40.7286994,-73.9957151,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"King Saud University, Riyadh",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+"National University of Kaohsiung, 811 Kaohsiung, Taiwan",22.73424255,120.283497550993,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+Plymouth University,50.3755269,-4.13937687442817,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+"University of Twente, The Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+Brown University,41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+The Weizmann Institute of,31.904187,34.807378,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל"
+University of Oradea,47.0570222,21.922709,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"The University of North Carolina at Charlotte, USA",35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"The University of Electro-Communications, JAPAN",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"Imperial College, London, UK",51.5004171,-0.1782711,"Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"Nanjing University of Science and Technology, China",32.031826,118.852142742792,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国"
+"University of Illinois at Urbana Champaign, Urbana",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+Wayne State University,42.357757,-83.0628671134125,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA"
+"Qatar University, Qatar",25.37461295,51.4898035392337,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎"
+Shiraz University,29.6385474,52.5245706,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Oxford University, UK",51.7488051,-1.23874457456279,"James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK"
+"Queen Mary University of London, London E1 4NS, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"University of Technology, Baghdad, Iraq",33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+Stevens Institute of Technology,40.742252,-74.0270949,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA"
+"Chonbuk National University, Jeonju-si",35.84658875,127.135013303058,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국"
+"National Sun Yat Sen University, 804 Kaohsiung, Taiwan",22.62794005,120.266318480249,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣"
+"COMSATS Institute of Information Technology, Pakistan",31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+"Khalifa University, Abu Dhabi, United Arab Emirates",24.4469025,54.3942563,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+University of Information,34.17980475,-117.325843648456,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+"Kingston University, UK",51.4293086,-0.2684044,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK"
+"Australian National University, Canberra",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Aberystwyth University, UK",52.4107358,-4.05295500914411,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Bas kent University,52.08340265,5.14828494152362,"University College Utrecht 'Babel', 7, Campusplein, Utrecht, Nederland, 3584 ED, Nederland"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+National University of Singapore,1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+Multimedia University,2.92749755,101.641853013536,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia"
+"North Dakota State University, Fargo, ND 58108-6050, USA",46.897155,-96.8182760282419,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Northeastern University,42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+Gdansk University of Technology,54.37086525,18.6171601574695,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+Vrije Universiteit Brussel,50.8411007,4.32377555279953,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien"
+B.S. University of Central Florida,28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+Santa Fe Institute,35.7002878,-105.908648471331,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Banaras Hindu University,25.2662887,82.9927969,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+University of Engineering and Technology,31.6914689,74.2465617,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎"
+"Tsinghua University, Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Central Florida, Orlando",28.42903955,-81.4421617727936,"Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+Tsinghua University,40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+University of Memphis,35.1189387,-89.9372195996589,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Massachusetts Amherst,42.3869382,-72.5299147706745,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Information,34.17980475,-117.325843648456,"Information, University Parkway, San Bernardino, San Bernardino County, California, 92407, USA"
+Stony Brook University Hospital,40.90826665,-73.1152089127966,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"University of Technology, Baghdad, Iraq",33.3120263,44.4471829434368,"الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق"
+Tel Aviv University,32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+to Michigan State University,42.7231021,-84.4449848597663,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA"
+"University of Notre Dame, USA",41.70456775,-86.2382202601727,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+"Northeastern University, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"University of South Carolina, Columbia, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+"Massachusetts Institute of Technology, Cambridge, MA 02139, USA",42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+The Ohio State University,40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Princeton University, Princeton, New Jersey, USA",40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+IMPERIAL COLLEGE,39.9458551,116.406973072869,"国子监, 五道营胡同, Naga上院, 北京市, 东城区, 北京市, 100010, 中国"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+COMSATS Institute of Information Technology,31.4006332,74.2137296,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"Bournemouth University, UK",50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+"National University of Singapore, Singapore",1.2962018,103.776899437848,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Tsinghua University, 100084 Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Zhejiang Normal University, Jinhua, China",29.13646725,119.637686517179,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国"
+Istanbul Technical University,41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Y. Li, University of Maryland",39.2864694,-76.6263409932124,"Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA"
+"The University of Texas at Dallas, Richardson, TX",32.9820799,-96.7566278,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA"
+Tokyo Institute of Technology,35.5167538,139.483422513406,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本"
+"Northeastern University, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"Indian Institute of Technology, Roorkee",29.8662461,77.8958708109136,"Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+Japan,36.5748441,139.2394179,日本
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+The University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+Massachusetts Institute,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"Nanjing University, Nanjing 210023, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+"University of Illinois at Urbana Champaign, Urbana, IL 61801, USA",40.1066501,-88.2240260725426,"University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+National Taiwan University of Science and Technology,25.01353105,121.541737363138,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Queensland University of Technology, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Northeastern University, Boston, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+"University of Twente, The Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"Dartmouth College, NH 03755 USA",43.7070046,-72.2869048,"Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Science and Technology of China, Hefei, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+National Institute of Standards and Technology,39.1254938,-77.2229347515,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA"
+"Tsinghua University, Beijing, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Michigan State University, East Lansing MI",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Columbia University, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+University of Groningen,53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+"Ulm University, Germany",48.38044335,10.0101011516362,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland"
+"Brown University, Providence, RI",41.8268682,-71.4012314581107,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+"Sun Yat-Sen University, Guangzhou, P.R. China",23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+Pondicherry Engineering College,12.0148693,79.8480910431981,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India"
+University of Science and Technology of China,31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"Princeton University, Princeton, New Jersey, USA",40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+University of North Carolina at Charlotte,35.3103441,-80.732616166699,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA"
+University of Engineering and Technology,31.6914689,74.2465617,"University of Engineering and Technology, Lahore Bypass, لاہور, Shekhūpura District, پنجاب, ‏پاکستان‎"
+Hacettepe University,39.86742125,32.7351907206768,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Cambridge Research Laboratory,52.17333465,0.149899463173698,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK"
+"Imperial College London, London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Carnegie Mellon University, Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Bournemouth University,50.74223495,-1.89433738695589,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+Guangdong Medical College,23.1294489,113.343761097683,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国"
+Institute of Computer Science,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"The Ohio State University, OH",40.00471095,-83.0285936787604,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA"
+Korea University,37.5901411,127.0362318,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+"Nanyang Technological University, Singapore 639798, Singapore",1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+Xerox Research Center,43.5129109,-79.6664076152913,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada"
+"Howard University, Washington DC",38.921525,-77.019535656678,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA"
+Purdue University,40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"San Jose State University, San Jose, CA",37.3351908,-121.881260081527,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA"
+"Tohoku University, Japan",38.2530945,140.8736593,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本"
+Portugal,40.033265,-7.8896263,Portugal
+The University of York,53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+Queensland University of Technology (QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Institute of Computing Technology,34.6988529,135.1936779,"神戸情報大学院大学, フラワーロード, 中央区, 神戸市, 兵庫県, 近畿地方, 650-0001, 日本"
+"University of Houston, Houston, TX, USA",29.7207902,-95.3440627149137,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA"
diff --git a/scraper/reports/institutions_found/found-4.csv b/scraper/reports/institutions_found/found-4.csv
new file mode 100644
index 00000000..0c379d61
--- /dev/null
+++ b/scraper/reports/institutions_found/found-4.csv
@@ -0,0 +1,896 @@
+"Rutgers University, Newark, NJ, USA",40.7417586,-74.1750462269524,"Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA"
+"Sri krishna College of Technology, Coimbatore, India",10.925861,76.9224672855261,"Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India"
+Yale University,41.25713055,-72.9896696015223,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"Shanghai Jiao Tong University, China",31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"University of Coimbra, Portugal",40.2075951,-8.42566147540816,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal"
+"Fudan University, Shanghai, China",31.30104395,121.500454969435,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Columbia University, New York NY 10027, USA",40.81779415,-73.9578531933627,"Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Tsinghua University, Beijing 100084, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tsinghua University, Beijing 100084, P.R.China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"University of Ottawa, Canada",45.42580475,-75.6874011819989,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada"
+"Lund University, Lund, Sweden",55.7039571,13.1902011,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige"
+Princeton University,40.34829285,-74.66308325,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA"
+"Shanghai Jiao Tong University, Shanghai 200240, China",31.02775885,121.432219256081,"上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国"
+"National University of Ireland Maynooth, Co. Kildare, Ireland",53.3846975,-6.60039458177959,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland"
+Myongji University,37.2381023,127.1903431,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국"
+"Hankuk University of Foreign Studies, South Korea",37.5953979,127.0630499,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"Sharda University, Greater Noida, India",28.4737512,77.4836148,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India"
+The University of Electro-Communications,35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"IDIAP Research Institute, Martigny, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Peking University, Beijing, China",39.9922379,116.303938156219,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国"
+Middlesex University London,51.59029705,-0.229632209454029,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK"
+"Tampere University of Technology, Tampere, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"University of North Carolina at Chapel Hill, Chapel Hill, NC",35.9105975,-79.0517871,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"Temple University, Philadelphia, PA 19122, USA",39.9808569,-75.149594,"Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"University of California, Riverside CA 92521-0425, USA",33.9743275,-117.32558236636,"UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA"
+Shaheed Zulfikar Ali Bhutto Institute of,24.8186587,67.0316585,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎"
+Punjabi University Patiala,30.3568981,76.4551272,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+"Eindhoven University of Technology, The Netherlands",51.4486602,5.49039956550805,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+UNIVERSITY OF OULU,65.0592157,25.466326012507,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi"
+Carnegie Mellon University,37.4102193,-122.059654865858,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA"
+University of Caen,35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Rensselaer Polytechnic Institute,42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Wollongong,-34.40505545,150.878346547278,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+"Dalian University of Technology, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+University of Glasgow,55.87231535,-4.28921783557444,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK"
+"Idiap Research Institute, Switzerland",46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"University of Rochester, Rochester, NY, USA",43.1576969,-77.5882915756007,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA"
+Bangalore Institute of Technology,12.9551259,77.5741985,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India"
+Michigan State University,42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+George Mason University,38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"Stony Brook University, NY 11794, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"Nanjing University, Nanjing 210093, China",32.0565957,118.774088328078,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Yaroslavl State University,57.6252103,39.8845656,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+of Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"University of Michigan, Ann Arbor, MI",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Oxford Brookes University,51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+"Oxford Brookes University, Oxford, United Kingdom",51.7555205,-1.2261597,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"Tel Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+Hanyang University,37.5557271,127.0436642,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국"
+University of Northern British Columbia,53.8925662,-122.814715920529,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada"
+"Stamford University Bangladesh, Dhaka-1209, Bangladesh",23.7448166,90.4084351355108,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ"
+"National University of Sciences and Technology (NUST), Islamabad, Pakistan",33.644347,72.9885079,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+University of London,51.5217668,-0.130190717056655,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+The Open University,52.02453775,-0.709274809394501,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK"
+University of Illinois at Urbana-Champaign,40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Technical University Munich, Germany",48.14955455,11.5677531417838,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+The University of Sydney,-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+New Jersey Institute of Technology,40.7423025,-74.1792817237128,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA"
+Indiana University,39.86948105,-84.8795690544362,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+Florida State University,30.44235995,-84.2974786716626,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA"
+Sun Yat-Sen University,23.09461185,113.287889943975,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+University of Washington,47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+Nanjing University of Information Science and Technology,32.2068102,118.718472893883,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国"
+Louisiana State University,30.40550035,-91.1862047410405,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA"
+The University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"University of Groningen, The Netherlands",53.21967825,6.56251482206542,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland"
+University of Nebraska - Lincoln,40.8174723,-96.7044468,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"University of Bristol, Bristol, BS8 1UB, UK",51.4562363,-2.602779,"University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK"
+Cyprus University of Technology,34.67567405,33.0457764820597,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+Meiji University,35.6975029,139.761391749285,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本"
+Kyushu University,33.59914655,130.223598480987,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Rice University,29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+Rowan University,39.7103526,-75.1193266647699,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+"Beijing University of Posts and Telecommunications, Beijing, P.R. China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Amirkabir University of Technology, Tehran. Iran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"University of South Carolina, USA",33.9928298,-81.0268516781225,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+"Queen Mary University of London, UK",51.5247272,-0.0393103466301624,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK"
+Kent State University,41.1443525,-81.3398283284572,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+Politehnica University of Timisoara,45.746189,21.2275507517647,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România"
+Aristotle University of Thessaloniki GR,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Moscow Institute of Physics and Technology, Russia",55.929035,37.5186680829482,"МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+University of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Michigan State University, East Lansing, MI, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+Queensland University of Technology(QUT,-27.4770485,153.028373791304,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Otto-von-Guericke University Magdeburg,52.14005065,11.6447124822347,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+Czech Technical University,50.0764296,14.418023122743,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko"
+Middle East Technical University,39.87549675,32.7855350558467,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"University of Technology, Sydney, Australia",-33.8828784,151.200682779726,"UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia"
+Shanghai Jiao Tong University,31.20081505,121.428406809373,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国"
+"Monash University, Victoria, Australia",-37.9011951,145.130584919767,"Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia"
+FL,27.7567667,-81.4639835,"Florida, USA"
+Institute of Computer Science III,35.15456615,128.098476040221,"Institute of Computer Science, 8, 내동로, 신율리, 진주시, 경남, 52669, 대한민국"
+"The University of Tokyo, Japan",35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+Cambridge University,50.7944026,-1.0971748,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK"
+"University POLITEHNICA of Bucharest, Bucharest, Romania",44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+University of Southern California,34.0224149,-118.286344073446,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA"
+Institute of,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+University of California Berkeley,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Columbia University, New York, NY, USA",40.8419836,-73.9436897071772,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+Ionian University,38.2899482,21.7886469,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα"
+University of Arkansas at Little Rock,34.72236805,-92.3383025526859,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA"
+"College Heights Blvd, Bowling Green, KY",36.9881671,-86.4542111,"College Heights Boulevard, Bowling Green, Warren County, Kentucky, 42101, USA"
+University of Pittsburgh,40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+Katholieke Universiteit Leuven,50.8830686,4.7019503,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien"
+"University of Barcelona, Spain",41.3868913,2.16352384576632,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España"
+"University of Chinese Academy of Sciences, Beijing 100190, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of Frankfurt,50.13053055,8.69234223934388,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland"
+Hanoi University of Science and Technology,21.003952,105.843601832826,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Wayne State University, Detroit, MI 48202, USA",42.3656423,-83.0711533990367,"Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"Chung-Ang University, Seoul, Korea",37.50882,126.9619,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국"
+Beijing Jiaotong University,39.94976005,116.33629045844,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国"
+"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia",22.31055485,39.1051548637793,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+Rowland Institute,42.3639862,-71.0778293,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA"
+Nottingham Trent University,52.9577322,-1.15617099267709,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"Bogazici University, Bebek",41.0868841,29.0441316722649,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye"
+King Faisal University,26.397778,50.183056,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+National Institute of Technology Karnataka,13.01119095,74.7949882494716,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India"
+"University of California, San Diego",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+Institute of Information Science,25.0410728,121.614756201755,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣"
+University of Iowa,41.6659,-91.573103065,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA"
+University of Trento,46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+Middlebury College,44.0090777,-73.1767946,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA"
+"Indian Institute of Science, India",13.0222347,77.5671832476811,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Illinois at Urbana-Champaign, IL USA",40.101976,-88.2314378,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"DIT UNIVERSITY, DEHRADUN",30.3983396,78.0753455,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India"
+"Istanbul Technical University, Istanbul, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Nanyang Technological University,1.3484104,103.682979653067,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore"
+College of Electrical and Information Engineering,42.0049791,21.40834315,"Факултет за електротехника и информациски технологии, Орце Николов, Карпош 2, Карпош, Скопје, Општина Карпош, Град Скопје, Скопски Регион, 1000, Македонија"
+Institute of Systems and Robotics,53.8338371,10.7035939,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+UNIVERSITY OF TAMPERE,61.49412325,23.7792067776763,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi"
+University of California Santa Barbara,34.4145937,-119.84581949869,"UCSB, Santa Barbara County, California, 93106, USA"
+The Institute of Electronics,12.8447999,77.6632389626693,"International Institute of Information Technology Bangalore - IIITB, Infosys Avenue, Konappana Agrahara, Electronics City Phase 1, Vittasandra, Bangalore Urban, Karnataka, 560100, India"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Pittsburgh, Pittsburgh, USA",40.44415295,-79.9624399276271,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Institute of Digital Media,20.28907925,85.84232125,"Institute of Digital Media Technology, Way to Csa Odisha Office, Ward 35, South East Zone, Bhubaneswar Municipal Corporation, Khordha, Odisha, 751022, India"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+University of Kentucky,38.0333742,-84.5017758,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of California, San Diego, La Jolla",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+"Beijing University of Posts and Telecommunications, Beijing, China",39.9601488,116.351939210403,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国"
+"Bar Ilan University, Israel",32.06932925,34.8433433861531,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל"
+Raipur institute of technology,21.2262243,81.8013664,"Raipur institute of technology, NH53, Raipur, Chhattisgarh, 492101, India"
+"Nagaoka University of Technology, Japan",37.42354445,138.77807276029,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+"University of Chinese Academy of Sciences, Beijing 101408, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+University of Abertay,56.46323375,-2.97447511707098,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK"
+"Southwest University, China",29.82366295,106.420500156445,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国"
+"Cornell University, Ithaca, New York",42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+National Taipei University,24.94314825,121.368629787836,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣"
+"University of Tennessee, Knoxville",35.9542493,-83.9307395,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA"
+"University of California, San Diego, California, USA",32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+University of Tokyo,35.9020448,139.936220089117,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Ruhr-University Bochum, Germany",51.44415765,7.26096541306078,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland"
+"Warsaw University of Technology, Poland",52.22165395,21.0073577612511,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Basel,47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+"University of Peshawar, Pakistan",34.0092004,71.4877494739102,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎"
+"Foundation University Rawalpindi Campus, Pakistan",33.5609504,73.0712596618793,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎"
+"University of Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Massachusetts,42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Edinburgh,55.94951105,-3.19534912525441,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK"
+University of Wisconsin Madison,43.07982815,-89.4306642542901,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA"
+"Istanbul Technical University, Istanbul, 34469, TURKEY",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Reutlingen University,48.48187645,9.18682403998887,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland"
+"Chulalongkorn University, Bangkok",13.74311795,100.532879009091,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย"
+The University of Cambridge,52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"University of Texas at Arlington, Arlington, TX",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"Imperial College London, United Kingdom",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"The City College of New York, New York, NY 10031, USA",40.81819805,-73.9510089793336,"CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA"
+University of Malta,35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+"University of Basel, Switzerland",47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+University of Campinas (Unicamp,-22.8224781,-47.0642599309425,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"University of Oxford, Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"University of Haifa, Haifa, Israel",32.76162915,35.0198630428453,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל"
+"University of Amsterdam, the Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"University of Michigan, Ann Arbor",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Research Center,24.7261991,46.6365468966391,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية"
+"Southeast University, Nanjing, China",32.0575279,118.786822520439,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+"Pune Institute of Computer Technology, Pune, ( India",18.4575638,73.8507352,"Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India"
+"Carnegie Mellon University, Pittsburgh PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"University of Washington, Seattle, WA, USA",47.65249975,-122.2998748,"University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+"The University of York, Heslington, York YO10 5DD, United Kingdom",53.94830175,-1.05154975017361,"Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK"
+Chosun University,35.1441031,126.9257858,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국"
+"University of North Carolina at Chapel Hill, NC, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University College London, London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"Purdue University, West Lafayette, Indiana, 47906, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"Information, Keio University",35.5416969,139.6347184,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本"
+Rochester Institute of Technology,43.08250655,-77.6712166264273,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+University of the Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+"Michigan State University, East Lansing, MI 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+Clemson University,34.66869155,-82.837434756078,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+University of Warwick,52.3793131,-1.5604252,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+ALICE Institute,-8.82143045,13.2347076178375,"Instituto Superior de Ciências da Educação (ISCED), Rua Salvador Allende (Salvador Guillermo Allende Gossens), Maculusso, Maianga, Município de Luanda, Luanda, 927, Angola"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+"University of Dammam, Saudi Arabia",26.39793625,50.1980792430511,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+Pohang University of Science and Technology,36.01773095,129.321075092352,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국"
+"University of Oxford, UK",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"University of Texas at Arlington, Arlington, TX, USA",32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Nagoya University, Japan",43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"California Institute of Technology, Pasadena, California, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"The Chinese University of Hong Kong, New Territories, Hong Kong",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+"University of Nevada, Reno, Reno, NV, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+National Cheng Kung University,22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+The Education University of Hong Kong,22.46935655,114.19474193618,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国"
+"KTH Royal Institute of Technology, Stockholm",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+"The Hebrew University of Jerusalem, Israel",31.7918555,35.244723,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל"
+University of Dundee,56.45796755,-2.98214831353755,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+University Of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+The Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+Institute for System Programming,55.7449881,37.6645042069876,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+Chosun University,35.1441031,126.9257858,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"West Virginia University, Morgantown, WV",39.65404635,-79.96475355,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"University of Virginia, Charlottesville, VA",38.0410576,-78.5054996018357,"University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+University of Illinois,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"The Hebrew University of Jerusalem, Israel",31.7918555,35.244723,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"Nagoya University, Japan",43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+Anna University,13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+Clemson University,34.66869155,-82.837434756078,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+"Indian Institute of Technology Delhi, New Delhi, India",28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+"The University of Sydney, Sydney, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+"College Park, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"University of Tsukuba, Japan",36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+Rochester Institute of Technology,43.08250655,-77.6712166264273,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA"
+"Princeton University, Princeton, NJ, USA",40.34725815,-74.6513455119257,"Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA"
+"University of Cambridge, United Kingdom",52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+Carleton University,45.3860843,-75.6953926739404,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+"Purdue University, West Lafayette, Indiana, 47906, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"University of California, Berkeley, Berkeley CA 94720, USA",37.8756681,-122.257979979865,"Goldman School of Public Policy, Hearst Avenue, Northside, Berkeley, Alameda County, California, 94720, USA"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+Institute of Electronics and Computer Science,56.97734805,24.1951425550775,"EDI, 14, Dzērbenes iela, Biķerziedi, Teika, Ozolkalni, Rīga, Vidzeme, LV-1006, Latvija"
+"The Chinese University of Hong Kong, New Territories, Hong Kong",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+Hunan University,26.88111275,112.628506656425,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国"
+"Rutgers University, USA",40.47913175,-74.431688684404,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+"Northeastern University, Boston, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+The University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+McMaster University,43.26336945,-79.9180968401692,"McMaster University, Westdale, Hamilton, Ontario, Canada"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"City University of Hong Kong, Hong Kong, China",22.34000115,114.169702912423,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国"
+College of Engineering and Computer Science,25.7589624,-80.3738881489383,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA"
+University of Maryland College Park,38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+Technion Israel Institute of Technology,32.7767536,35.0241452903301,"הטכניון - מכון טכנולוגי לישראל, דוד רוז, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"University of Washington, Seattle, WA, USA",47.65249975,-122.2998748,"University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA"
+"Beijing Institute of Technology, Beijing, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+"Rice University, Houston, TX, 77005, USA",29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"University of North Carolina at Chapel Hill, NC, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+"National Taiwan University, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+Pennsylvania,40.9699889,-77.7278831,"Pennsylvania, USA"
+"Feng Chia University, Taichung, Taiwan",24.18005755,120.648360719503,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of Zurich, Zurich, Switzerland",47.4968476,8.72981767380829,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra"
+"University of Basel, Switzerland",47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+Math Institute,43.65879595,-79.3975504060101,"Fields Institute for Research in Math Science, 222, College Street, Kensington Market, Old Toronto, Toronto, Ontario, M5T 3A1, Canada"
+University of Surrey,51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+y National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+Research Center,24.7261991,46.6365468966391,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+The University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+University of Texas at Arlington,32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"The American University in Cairo, Egypt",30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"Macau University of Science and Technology, Macau",22.15263985,113.568032061523,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+Boston University,42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Ritsumeikan University, Japan",35.0333281,135.7249154,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本"
+The Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+"Neurological Institute, USA",40.84211085,-73.9428460313244,"Neurological Institute of New York, Haven Avenue, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10032, USA"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+Korea Advanced Institute of Science and Technology,36.3697191,127.362537001151,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+The American University in Cairo,30.04287695,31.2366413899265,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"Azad University, Qazvin, Iran",36.3173432,50.0367286,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎"
+University of Warwick,52.3793131,-1.5604252,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK"
+"The Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+University of Dundee,56.45796755,-2.98214831353755,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK"
+"University College London, London, UK",51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+Institute for System Programming,55.7449881,37.6645042069876,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ"
+"Karlsruhe Institute of Technology, Karlsruhe, Germany",49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+"Manonmaniam Sundaranar University, Tirunelveli",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+"Southeast University, Nanjing, China",32.0575279,118.786822520439,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+"University of Michigan, Ann Arbor",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Rutgers University, New Brunswick, NJ",40.50007595,-74.4457915242934,"Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"The University of Sydney, NSW 2006, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+University of Siena,22.4133862,114.210058,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+University of Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+Sabanci University,40.8927159,29.3786332263582,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye"
+"Harbin Institute of Technology, Harbin, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+Institute of Communications Engineering,54.1718573,12.0784417,"Institut für Nachrichtentechnik, 31, Richard-Wagner-Straße, Warnemünde, Ortsbeirat 1 : Diedrichshagen,Seebad Warnemünde, Rostock, Mecklenburg-Vorpommern, 18119, Deutschland"
+Tampere University of Technology,61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"KTH Royal Institute of Technology, Stockholm",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+Mahanakorn University of Technology,13.84450465,100.856208183836,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย"
+The City University of New York,40.8722825,-73.8948917141949,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA"
+Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+National Cheng Kung University,22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+"University of California, Berkeley",37.8687126,-122.255868148743,"Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+"Tsinghua University, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"Tel-Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+Harvard and Massachusetts Institute,42.5268445,-71.6525446,"Massachusetts Correctional Institute Shirley Minimum Security Library, Harvard Road, Shaker Village, Shirley, Middlesex County, Massachusetts, 01464, USA"
+North Carolina Central University,35.97320905,-78.897550537484,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA"
+Karlsruhe Institute of,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+University of Bristol,51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+University of Twente,52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+The University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Capital Normal University, 100048, China",39.92864575,116.30104052087,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+The University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+Dhaka University,23.7317915,90.3805625,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ"
+"Nanjing University of Aeronautics and Astronautics, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+College of Computing,-6.1992922,39.3081862,"computing, Tunguu, Unguja Kusini, Zanzibar, 146, Tanzania"
+"RWTH Aachen University, Aachen, Germany",50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+"Bogazici University, Turkey",41.08327335,29.0503931951846,"Boğaziçi Üniversitesi Güney Yerleşkesi, Sehitlikdergahı Sokağı, Beşiktaş, İstanbul, Marmara Bölgesi, 33345, Türkiye"
+Cardi University,10.6435074,-61.4022996445292,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"College Park, United States",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+Institute of Road and,38.3836097,-81.7654665,"Institute, Kanawha County, West Virginia, 25112, USA"
+Australian Institute of Sport,-35.24737535,149.104454269689,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"California Institute of Technology, Pasadena, California, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium",50.8223021,4.3967361,"Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK",54.9781026,-1.6067699,"Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK"
+"Beijing, China",39.906217,116.3912757,"北京市, 东城区, 北京市, 100010, 中国"
+"University of Oxford, Oxford, United Kingdom",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+of bilkent university,39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+"College Park, MD 20742 USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"The Australian National University, Canberra, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+University of Chinese Academy of Sciences,39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Keio University, Yokohama 223-8522, Japan",35.55536215,139.654582444136,"慶應義塾大学 (矢上キャンパス), 理工坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-8522, 日本"
+Jacobs University,53.4129148,-2.96897915394896,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK"
+"George Mason University, Fairfax Virginia, USA",38.83133325,-77.3079883887912,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA"
+"Arizona State University, AZ, USA",33.30715065,-111.676531568996,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"College of Engineering, Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+"Central Tehran Branch, Azad University",35.753318,51.370631,"دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎"
+"University of the Basque Country, San Sebastian, Spain",43.30927695,-2.01066784661227,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España"
+"Kyung Hee University, South Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+University of California,37.87631055,-122.238859269443,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA"
+"Tongji University, Shanghai 201804, China",31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Dammam, Saudi Arabia",26.39793625,50.1980792430511,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+"University, Xi an Shaanxi Province, Xi an 710049, China",34.2707834,108.94449949951,"西五路, 新城区, 新城区 (Xincheng), 西安市, 陕西省, 710003, 中国"
+"University of Oxford, UK",51.7534538,-1.25400997048855,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK"
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+"SASTRA University, Thanjavur, Tamil Nadu, India",10.9628655,79.3853065130097,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India"
+"Carnegie Mellon University, Pittsburgh PA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+National University of Defense Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+University of Malta,35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+"University of California, Merced",37.36566745,-120.421588883632,"University of California, Merced, Ansel Adams Road, Merced County, California, USA"
+IDIAP RESEARCH INSTITUTE,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+Lomonosov Moscow State University,55.70229715,37.5317977694291,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"Harvard University, Cambridge, MA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+The Education University of Hong Kong,22.46935655,114.19474193618,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国"
+"University of Canterbury, New Zealand",-43.5240528,172.580306253669,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa"
+"Southwest Jiaotong University, Chengdu, China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+"University of Michigan, Ann Arbor, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"Purdue University, West Lafayette, IN 47907, USA",40.4262569,-86.9157551,"Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+University Of California San Diego,32.87935255,-117.231100493855,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+Virginia Polytechnic Institute and State University,37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+"University of Twente, Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Nottingham, Nottingham, UK",52.9387428,-1.20029569274574,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+Zaragoza University,41.6406218,-0.900793992168927,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+University of,29.3758342,71.7528712910287,"University of ..., University Road, بہاولپور, Bahāwalpur District, پنجاب, 63100, ‏پاکستان‎"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+State University of New York at Buffalo,42.95485245,-78.8178238693065,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Huazhong University of Science and Technology, Wuhan, China",30.5097537,114.4062881,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国"
+"Utah State University, Logan UT",41.7411504,-111.8122309,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Stanford University, CA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+State University of New York at Buffalo,42.95485245,-78.8178238693065,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA"
+State University of New York at Binghamton,42.08779975,-75.9706606561486,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"The Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+University College London,51.5231607,-0.1282037,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"Michigan State University, East Lansing, MI 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+The University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+The Education University of Hong Kong,22.46935655,114.19474193618,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国"
+Massachusetts Institute of Technology,42.3583961,-71.0956778766393,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+"The University of Sydney, NSW 2006, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+"University of Miami, USA",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+Beckman Institute,40.11571585,-88.2275077179639,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+University of Witwatersrand,-26.1888813,28.0247907319205,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+York University,43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"University of Science and Technology of China, Hefei, 230027, China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+"College of Engineering, Pune, India",18.52930005,73.8568253702551,"College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"National Institute of Technology, Durgapur, West Bengal, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+National University of Defense Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"University of Zurich, Zurich, Switzerland",47.4968476,8.72981767380829,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra"
+University of North Carolina,35.90503535,-79.0477532652511,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"The University of York, Heslington, York YO10 5DD, United Kingdom",53.94830175,-1.05154975017361,"Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK"
+"University of Wisconsin-Madison, Madison, WI, USA",43.0705257,-89.4059387,"UW Geology Museum, 1215, West Dayton Street, South Campus, Madison, Dane County, Wisconsin, 53715, USA"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+Tampere University of Technology,61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+University of Ljubljana,46.0501558,14.4690732689076,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"Harbin Institute of Technology, Harbin, China",45.7413921,126.625527550394,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+University of Illinois at Chicago,41.86898915,-87.6485625597018,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA"
+"University of Cambridge, United Kingdom",52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+"Northeastern University, Boston, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+Dhaka University,23.7317915,90.3805625,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ"
+"University of Trento, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+McGovern Institute,42.3626295,-71.0914481,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+IDIAP Research Institute,46.109237,7.08453548522408,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+University of Dundee,56.45796755,-2.98214831353755,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK"
+University of Surrey,51.24303255,-0.590013824660236,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK"
+"The Australian National University, Canberra, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+Boston University,42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+"Harvard University, Cambridge, MA",42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Rutgers University, Piscataway, NJ",40.5234675,-74.436975,"The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Tamkang University, Taipei, Taiwan",25.17500615,121.450767514156,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣"
+"COMSATS Institute of Information Technology, Islamabad",33.65010145,73.1551494914791,"COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎"
+The City University of New York,40.8722825,-73.8948917141949,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA"
+McMaster University,43.26336945,-79.9180968401692,"McMaster University, Westdale, Hamilton, Ontario, Canada"
+University of British Columbia,49.25839375,-123.246581610019,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"University of North Carolina at Chapel Hill, NC, USA",35.9113971,-79.0504529,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+y National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+Marquette University,43.03889625,-87.9315544990507,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"University of Dammam, Saudi Arabia",26.39793625,50.1980792430511,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية"
+University Politehnica of Bucharest,44.43918115,26.0504456538413,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+of bilkent university,39.8720489,32.7539515466323,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+"Tel-Aviv University, Israel",32.1119889,34.8045970204252,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל"
+"Central Tehran Branch, Azad University",35.753318,51.370631,"دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+Carleton University,45.3860843,-75.6953926739404,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+The University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+"National Institute of Technology, Durgapur, West Bengal, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+"The Chinese University of Hong Kong, New Territories, Hong Kong",22.413656,114.2099405,"香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国"
+Dartmouth College,43.7047927,-72.2925909,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA"
+"Information, Keio University",35.5416969,139.6347184,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本"
+"University of Miami, USA",25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+Western Kentucky University,36.9845317,-86.4576443016944,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA"
+Cardi University,10.6435074,-61.4022996445292,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago"
+"University of Twente, Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Queensland University of Technology, Brisbane, QLD, Australia",-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Stony Brook University, Stony Brook, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Tsukuba, Japan",36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+University of Campinas (Unicamp,-22.8224781,-47.0642599309425,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil"
+University of,29.3758342,71.7528712910287,"University of ..., University Road, بہاولپور, Bahāwalpur District, پنجاب, 63100, ‏پاکستان‎"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+College of Engineering and Computer Science,25.7589624,-80.3738881489383,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA"
+York University,43.7743911,-79.5048108538813,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada"
+"University of Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Nanjing University of Aeronautics and Astronautics, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+"Kyoto University, Kyoto, Japan",35.0274996,135.781545126193,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+The Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+"Manonmaniam Sundaranar University, Tirunelveli",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+Jacobs University,53.4129148,-2.96897915394896,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK"
+"Tsinghua University, China",40.00229045,116.320989081778,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国"
+"College Park, MD 20742 USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+Nagoya University,43.53750985,143.60768225282,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本"
+"California Institute of Technology, Pasadena, California, USA",34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+The University of Texas,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Malta,35.9023226,14.4834189,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+"The Hebrew University of Jerusalem, Israel",31.7918555,35.244723,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל"
+"Purdue University, West Lafayette, Indiana, 47906, USA",40.4319722,-86.923893679845,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+University of Bath,51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+"King Saud University, Riyadh, Saudi Arabia",24.7246403,46.623350123456,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+Zaragoza University,41.6406218,-0.900793992168927,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España"
+"College Park, United States",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+The University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+University of Illinois,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+University of Manitoba,49.8091536,-97.133041790072,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada"
+University of Bath,51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University of Nevada, Reno, Reno, NV, USA",39.5469449,-119.813465660936,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Cardiff University, UK",51.4879961,-3.17969747443907,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+"Rice University, Houston, TX, 77005, USA",29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+University of Miami,25.7173339,-80.2786688657706,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"SASTRA University, Thanjavur, Tamil Nadu, India",10.9628655,79.3853065130097,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India"
+Harvard University,42.36782045,-71.1266665287448,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA"
+Thapar University,30.35566105,76.3658164148513,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+"Vrije Universiteit Brussel, 1050 Brussels, Belgium",50.8223021,4.3967361,"Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+Rochester Institute of Technology,43.08250655,-77.6712166264273,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+Wolfson College,51.7711076,-1.25361700492597,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"RWTH Aachen University, Aachen, Germany",50.7791703,6.06728732851292,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland"
+Weizmann Institute of Science,31.9078499,34.8133409244421,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל"
+"University of Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"Southwest Jiaotong University, Chengdu, P.R. China",30.697847,104.0520811,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Michigan State University, East Lansing, MI 48824, USA",42.718568,-84.4779157093052,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"University of Science and Technology of China, Hefei 230026, P. R. China",31.83907195,117.264207478576,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国"
+University of Twente,52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+Eskisehir Osmangazi University,39.7487516,30.4765307102195,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye"
+"University of Cambridge, United Kingdom",52.17638955,0.143088815415187,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK"
+University of Thessaloniki,40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"University Library, Singapore",1.30604775,103.7728987705,"University Town, College Avenue East, Rochester Hill, Clementi, Southwest, 138608, Singapore"
+University of Maryland College Park,38.99203005,-76.9461029019905,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"University of Chinese Academy of Sciences, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Rice University, Houston, TX, 77005, USA",29.71679145,-95.4047811339379,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA"
+"Beijing, China",39.906217,116.3912757,"北京市, 东城区, 北京市, 100010, 中国"
+University of Canberra,-35.23656905,149.084469935058,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+Islamic University of Gaza - Palestine,31.51368535,34.4401934143135,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية"
+University of Bath,51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+"Rensselaer Polytechnic Institute, USA",42.7298459,-73.6795021620135,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA"
+"Information Technology University (ITU), Punjab, Lahore, Pakistan",31.4760299,74.3427526,"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎"
+University Of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"The Australian National University Canberra ACT 2601, Australia",-35.28121335,149.11665331324,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia"
+"The University of Sydney, Sydney, Australia",-33.88890695,151.189433661925,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia"
+Boston College,42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"KTH Royal Institute of Technology, Stockholm",59.34986645,18.0706321329842,"KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige"
+Sabanci University,40.8927159,29.3786332263582,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye"
+Marquette University,43.03889625,-87.9315544990507,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA"
+National University of Defense Technology,28.2290209,112.994832044032,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国"
+"Boston College, USA",42.3354481,-71.1681386402306,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA"
+"Manonmaniam Sundaranar University, Tirunelveli",8.76554685,77.65100444813,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India"
+"University of Massachusetts, Amherst MA, USA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+Pennsylvania,40.9699889,-77.7278831,"Pennsylvania, USA"
+Waseda University,33.8898728,130.708562047107,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本"
+College of Computing,-6.1992922,39.3081862,"computing, Tunguu, Unguja Kusini, Zanzibar, 146, Tanzania"
+Boston University,42.3504253,-71.1005611418395,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA"
+"University of Canterbury, New Zealand",-43.5240528,172.580306253669,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa"
+National Cheng Kung University,22.9991916,120.216251337909,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣"
+University of Bonn,50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+Tomsk Polytechnic University,56.46255985,84.955654946724,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ"
+University of Maryland,39.2899685,-76.6219610316858,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA"
+"University of Virginia, Charlottesville, VA",38.0410576,-78.5054996018357,"University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA"
+"Pune Institute of Computer Technology, Pune, ( India",18.4575638,73.8507352,"Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India"
+College of Computer and Information Science,42.3192923,-83.2343465549018,"Computer & Information Science, John Montieth Boulevard, Dearborn, Wayne County, Michigan, 48128, USA"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"University of Pittsburgh, Pittsburgh, PA",40.4495417,-79.8957457221781,"Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA"
+THE UNIVERSITY OF ARIZONA,32.2351726,-110.950958317648,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+"Arizona State University, Tempe AZ",33.4206602,-111.932634924965,"Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+Beckman Institute,40.11571585,-88.2275077179639,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA"
+y National Institute of Advanced Industrial Science and Technology,36.05238585,140.118523607658,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本"
+"University of Amsterdam, the Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Santa Clara University, Santa Clara, CA. 95053, USA",37.34820285,-121.935635412063,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA"
+South China University of China,23.0490047,113.3971571,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国"
+"Northeastern University, Boston, MA, USA",42.3383668,-71.0879352428284,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"University of Florida, Gainesville, FL, 32611, USA",29.6447739,-82.3575193392276,"University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA"
+"Azad University, Qazvin, Iran",36.3173432,50.0367286,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎"
+"University of Massachusetts, Amherst, MA",42.3889785,-72.5286987,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA"
+University of Illinois Urbana Champaign,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+"University of Illinois, Urbana-Champaign",40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+Max-Planck Institute for Informatics,49.2579566,7.04577416640431,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+Hong Kong Polytechnic University,22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+"Feng Chia University, Taichung, Taiwan",24.18005755,120.648360719503,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣"
+University of Texas at Arlington,32.7283683,-97.112018348404,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA"
+"Stony Brook University, Stony Brook, USA",40.9153196,-73.1270626,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+Tafresh University,34.68092465,50.0534135183902,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎"
+University of Perugia,49.2622421,-123.2450052,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada"
+"Kyung Hee University, South Korea",37.5948716,127.0530887,"경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국"
+"University of California, Irvine, USA",33.6431901,-117.84016493553,"University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA"
+"University of Trento, Italy",46.0658836,11.1159894,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia"
+University of Bristol,51.4584837,-2.60977519828372,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK"
+"University of Chinese Academy of Sciences, Beijing, 100049, China",39.9082804,116.2458527,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国"
+"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK",54.9781026,-1.6067699,"Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK"
+The Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+Kobe University,34.7275714,135.237099997686,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本"
+"University of Michigan, Ann Arbor",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Amsterdam, Amsterdam, The Netherlands",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+"Beijing Institute of Technology, Beijing, China",39.9586652,116.309712808455,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国"
+"Imperial College London, UK",51.49887085,-0.175607973937072,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK"
+"National Taiwan University, Taiwan",25.01682835,121.538469235773,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣"
+"Rutgers University, New Brunswick, NJ",40.50007595,-74.4457915242934,"Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA"
+The Australian National University,-37.81354365,144.971791681654,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia"
+"University of Alberta, Edmonton, Canada",53.5238572,-113.522826652346,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada"
+University of Exeter,50.7369302,-3.53647671702167,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK"
+"University of Basel, Switzerland",47.5612651,7.5752961,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra"
+CALIFORNIA INSTITUTE OF TECHNOLOGY,34.13710185,-118.125274866116,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA"
+University of Texas at,32.3163078,-95.2536994379459,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA"
+Robotics Institute,13.65450525,100.494231705059,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย"
+"Dalian University of Technology, Dalian 116024, China",38.88140235,121.522810980755,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国"
+Illinois Institute of Technology,41.8361963,-87.6265591274291,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA"
+"Rheinische-Friedrich-Wilhelms University, Bonn, Germany",50.7338124,7.1022465,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland"
+"College Park, MD, 20740, USA",38.980666,-76.9369189,"College Park, Prince George's County, Maryland, USA"
+"Tamkang University, Taipei, Taiwan",25.17500615,121.450767514156,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣"
+Cornell University,42.4505507,-76.4783512955428,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA"
+Beckman Institute,40.11571585,-88.2275077179639,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA"
+"Carnegie Mellon University Pittsburgh, PA, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+University of Newcastle,-33.3578899,151.37834708231,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia"
+"University of Amsterdam, Amsterdam, The",52.3553655,4.9501644,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland"
+University of Illinois,40.11116745,-88.2258766477716,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA"
+"Carnegie Mellon University, Pittsburgh, USA",40.4441619,-79.942728259225,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA"
+Florida International University,25.75533775,-80.3762889746807,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA"
+Max Planck Institute for Biological Cybernetics,48.5369125,9.05922532743396,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland"
+Delft University of Technology,51.99882735,4.37396036815404,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland"
+"Princeton University, Princeton, NJ, USA",40.34725815,-74.6513455119257,"Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA"
+Institute for System Programming,55.7449881,37.6645042069876,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ"
+"University of Plymouth, UK",50.3752501,-4.13927692297343,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK"
+University of Venezia,45.4312742,12.3265377,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia"
+University of Texas at Austin,30.284151,-97.7319559808022,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA"
+Cardi University,10.6435074,-61.4022996445292,"CARDI, University of the West Indies, Saint Augustine, Tunapuna-Piarco, 686, Trinidad and Tobago"
+Australian Institute of Sport,-35.24737535,149.104454269689,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia"
+"University of Washington, Seattle, USA",47.6543238,-122.308008943203,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA"
+University of Campinas (Unicamp,-22.8224781,-47.0642599309425,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil"
+"University of Central Florida, USA",28.59899755,-81.1971250118395,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA"
+"National Institute of Technology, Durgapur, West Bengal, India",23.54869625,87.291057119111,"National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India"
+"The University of York, UK",53.94540365,-1.0313887829649,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK"
+Pennsylvania,40.9699889,-77.7278831,"Pennsylvania, USA"
+"Tampere University of Technology, Tampere 33720, Finland",61.44964205,23.8587746189096,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi"
+"The Hong Kong Polytechnic University, Hong Kong, China",22.304572,114.179762852269,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国"
+SIMON FRASER UNIVERSITY,49.2767454,-122.917773749103,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada"
+Dr. B. C. Roy Engineering College,23.54409755,87.342697070434,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India"
+"Nanjing University of Aeronautics and Astronautics, China",32.0373496,118.8140686,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国"
+University of Cape Town,-33.95828745,18.4599734888018,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa"
+"The University of Electro-Communications, Tokyo",35.6572957,139.542558677257,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本"
+"University of Karlsruhe, Germany",49.00664235,8.39405151637065,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland"
+Karlsruhe Institute of,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece",40.62984145,22.9588934957528,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα"
+"University of Twente, Netherlands",52.2380139,6.8566761,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland"
+Clemson University,34.66869155,-82.837434756078,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA"
+Oakland University,42.66663325,-83.2065575175658,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA"
+Queensland University of Technology,-27.47715625,153.028410039129,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia"
+"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh",23.88277575,90.2671009927283,"Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ"
+Karlsruhe Institute of Technology,49.10184375,8.43312559623876,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland"
+"Tongji University, Shanghai 201804, China",31.28473925,121.496949085887,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国"
+"Feng Chia University, Taichung, Taiwan",24.18005755,120.648360719503,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣"
+"Istanbul Technical University, Turkey",41.10427915,29.022311592943,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye"
+"University of Tsukuba, Japan",36.1112058,140.1055176,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本"
+Anna University,13.0105838,80.2353736,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India"
+Columbia University in the City of New York,40.8071772,-73.9625279772072,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA"
+"Amirkabir University of Technology, Tehran",35.704514,51.4097205774739,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎"
+"University of Michigan, Ann Arbor, USA",42.2942142,-83.710038935096,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA"
+"University of Bath, Bath, United Kingdom",51.3791442,-2.3252332,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK"
+Jacobs University,53.4129148,-2.96897915394896,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK"
+"Neurological Institute, USA",40.84211085,-73.9428460313244,"Neurological Institute of New York, Haven Avenue, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10032, USA"
+Research Center,24.7261991,46.6365468966391,"مركز البحوث, طريق تركي الأول بن عبدالعزيز آل سعود, المحمدية, Al Muhammadiyah District حي المحمدية, Al Maather Municipality, الرياض, منطقة الرياض, 12371, السعودية"
+"Central Tehran Branch, Azad University",35.753318,51.370631,"دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎"
+"SASTRA University, Thanjavur, Tamil Nadu, India",10.9628655,79.3853065130097,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India"
+Virginia Polytechnic Institute and State University,37.21872455,-80.4254251869494,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA"
+Sharif University of Technology,35.7036227,51.351250969544,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎"
+College of Engineering and Computer Science,25.7589624,-80.3738881489383,"ECS, University Drive, Sweetwater, Lil Abner Mobile Home Park, Miami-Dade County, Florida, 33199, USA"
+"Indian Institute of Technology Delhi, New Delhi, India",28.5444176,77.1893001,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India"
+New York University,40.72925325,-73.9962539360963,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA"
+The State University of New Jersey,40.51865195,-74.4409980124119,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA"
+University of Connecticut,41.8093779,-72.2536414,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA"
+Chosun University,35.1441031,126.9257858,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국"
+"Stanford University, Stanford, CA, USA",37.43131385,-122.169365354983,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA"
+"La Trobe University, Australia",-36.7784754,144.298047,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia"
+The City University of New York,40.8722825,-73.8948917141949,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA"
+Istanbul University,41.0132424,28.9637609,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye"
diff --git a/scraper/reports/institutions_missing.html b/scraper/reports/institutions_missing.html
new file mode 100644
index 00000000..93a26238
--- /dev/null
+++ b/scraper/reports/institutions_missing.html
@@ -0,0 +1,11693 @@
+<!doctype html><html><head><title>Institutions</title><link rel='stylesheet' href='reports.css'></head><body><h2>Institutions</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>61084a25ebe736e8f6d7a6e53b2c20d9723c4608</td><td></td></tr><tr><td>61f04606528ecf4a42b49e8ac2add2e9f92c0def</td><td>Deep Deformation Network for Object Landmark
+<br/>Localization
+<br/>NEC Laboratories America, Department of Media Analytics
+</td></tr><tr><td>614a7c42aae8946c7ad4c36b53290860f6256441</td><td>1
+<br/>Joint Face Detection and Alignment using
+<br/>Multi-task Cascaded Convolutional Networks
+</td></tr><tr><td>0d88ab0250748410a1bc990b67ab2efb370ade5d</td><td>Author(s) :
+<br/>ERROR HANDLING IN MULTIMODAL BIOMETRIC SYSTEMS USING
+<br/>RELIABILITY MEASURES (ThuPmOR6)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>(EPFL, Switzerland)
+<br/>Plamen Prodanov
+</td></tr><tr><td>0d467adaf936b112f570970c5210bdb3c626a717</td><td></td></tr><tr><td>0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306</td><td>Review of Perceptual Resemblance of Local
+<br/>Plastic Surgery Facial Images using Near Sets
+<br/>1,2 Department of Computer Technology,
+<br/>YCCE Nagpur, India
+</td></tr><tr><td>0db8e6eb861ed9a70305c1839eaef34f2c85bbaf</td><td></td></tr><tr><td>0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9</td><td></td></tr><tr><td>0d760e7d762fa449737ad51431f3ff938d6803fe</td><td>LCDet: Low-Complexity Fully-Convolutional Neural Networks for
+<br/>Object Detection in Embedded Systems
+<br/>UC San Diego ∗
+<br/>Gokce Dane
+<br/>Qualcomm Inc.
+<br/>UC San Diego
+<br/>Qualcomm Inc.
+<br/>UC San Diego
+</td></tr><tr><td>0dd72887465046b0f8fc655793c6eaaac9c03a3d</td><td>Real-time Head Orientation from a Monocular
+<br/>Camera using Deep Neural Network
+<br/>KAIST, Republic of Korea
+</td></tr><tr><td>0d087aaa6e2753099789cd9943495fbbd08437c0</td><td></td></tr><tr><td>0d8415a56660d3969449e77095be46ef0254a448</td><td></td></tr><tr><td>0d735e7552af0d1dcd856a8740401916e54b7eee</td><td></td></tr><tr><td>0d06b3a4132d8a2effed115a89617e0a702c957a</td><td></td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td></tr><tr><td>0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a</td><td>Detection and Tracking of Faces in Videos: A Review
+<br/>© 2016 IJEDR | Volume 4, Issue 2 | ISSN: 2321-9939
+<br/>of Related Work
+<br/>1Student, 2Assistant Professor
+<br/>1, 2Dept. of Electronics & Comm., S S I E T, Punjab, India
+<br/>________________________________________________________________________________________________________
+</td></tr><tr><td>956317de62bd3024d4ea5a62effe8d6623a64e53</td><td>Lighting Analysis and Texture Modification of 3D Human
+<br/>Face Scans
+<br/>Author
+<br/>Zhang, Paul, Zhao, Sanqiang, Gao, Yongsheng
+<br/>Published
+<br/>2007
+<br/>Conference Title
+<br/>Digital Image Computing Techniques and Applications
+<br/>DOI
+<br/>https://doi.org/10.1109/DICTA.2007.4426825
+<br/>Copyright Statement
+<br/>© 2007 IEEE. Personal use of this material is permitted. However, permission to reprint/
+<br/>republish this material for advertising or promotional purposes or for creating new collective
+<br/>works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+<br/>this work in other works must be obtained from the IEEE.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/17889
+<br/>Link to published version
+<br/>http://www.ieee.org/
+<br/>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+</td></tr><tr><td>956c634343e49319a5e3cba4f2bd2360bdcbc075</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+<br/>873
+<br/>A Novel Incremental Principal Component Analysis
+<br/>and Its Application for Face Recognition
+</td></tr><tr><td>958c599a6f01678513849637bec5dc5dba592394</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Generalized Zero-Shot Learning for Action
+<br/>Recognition with Web-Scale Video Data
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>59fc69b3bc4759eef1347161e1248e886702f8f7</td><td>Final Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td></tr><tr><td>59bfeac0635d3f1f4891106ae0262b81841b06e4</td><td>Face Verification Using the LARK Face
+<br/>Representation
+</td></tr><tr><td>590628a9584e500f3e7f349ba7e2046c8c273fcf</td><td></td></tr><tr><td>59eefa01c067a33a0b9bad31c882e2710748ea24</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+<br/>Fast Landmark Localization
+<br/>with 3D Component Reconstruction and CNN for
+<br/>Cross-Pose Recognition
+</td></tr><tr><td>5945464d47549e8dcaec37ad41471aa70001907f</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Every Moment Counts: Dense Detailed Labeling of Actions in Complex
+<br/>Videos
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>59c9d416f7b3d33141cc94567925a447d0662d80</td><td>Universität des Saarlandes
+<br/>Max-Planck-Institut für Informatik
+<br/>AG5
+<br/>Matrix factorization over max-times
+<br/>algebra for data mining
+<br/>Masterarbeit im Fach Informatik
+<br/>Master’s Thesis in Computer Science
+<br/>von / by
+<br/>angefertigt unter der Leitung von / supervised by
+<br/>begutachtet von / reviewers
+<br/>November 2013
+<br/>UNIVERSITASSARAVIENSIS </td></tr><tr><td>59a35b63cf845ebf0ba31c290423e24eb822d245</td><td>The FaceSketchID System: Matching Facial
+<br/>Composites to Mugshots
+<br/>tedious, and may not
+</td></tr><tr><td>59f325e63f21b95d2b4e2700c461f0136aecc171</td><td>3070
+<br/>978-1-4577-1302-6/11/$26.00 ©2011 IEEE
+<br/>FOR FACE RECOGNITION
+<br/>1. INTRODUCTION
+</td></tr><tr><td>5922e26c9eaaee92d1d70eae36275bb226ecdb2e</td><td>Boosting Classification Based Similarity
+<br/>Learning by using Standard Distances
+<br/>Departament d’Informàtica, Universitat de València
+<br/>Av. de la Universitat s/n. 46100-Burjassot (Spain)
+</td></tr><tr><td>59031a35b0727925f8c47c3b2194224323489d68</td><td>Sparse Variation Dictionary Learning for Face Recognition with A Single
+<br/>Training Sample Per Person
+<br/>ETH Zurich
+<br/>Switzerland
+</td></tr><tr><td>926c67a611824bc5ba67db11db9c05626e79de96</td><td>1913
+<br/>Enhancing Bilinear Subspace Learning
+<br/>by Element Rearrangement
+</td></tr><tr><td>923ede53b0842619831e94c7150e0fc4104e62f7</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1293
+<br/>ICASSP 2016
+</td></tr><tr><td>92b61b09d2eed4937058d0f9494d9efeddc39002</td><td>Under review in IJCV manuscript No.
+<br/>(will be inserted by the editor)
+<br/>BoxCars: Improving Vehicle Fine-Grained Recognition using
+<br/>3D Bounding Boxes in Traffic Surveillance
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>920a92900fbff22fdaaef4b128ca3ca8e8d54c3e</td><td>LEARNING PATTERN TRANSFORMATION MANIFOLDS WITH PARAMETRIC ATOM
+<br/>SELECTION
+<br/>Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+<br/>Signal Processing Laboratory (LTS4)
+<br/>Switzerland-1015 Lausanne
+</td></tr><tr><td>9207671d9e2b668c065e06d9f58f597601039e5e</td><td>Face Detection Using a 3D Model on
+<br/>Face Keypoints
+</td></tr><tr><td>9282239846d79a29392aa71fc24880651826af72</td><td>Antonakos et al. EURASIP Journal on Image and Video Processing 2014, 2014:14
+<br/>http://jivp.eurasipjournals.com/content/2014/1/14
+<br/>RESEARCH
+<br/>Open Access
+<br/>Classification of extreme facial events in sign
+<br/>language videos
+</td></tr><tr><td>92c2dd6b3ac9227fce0a960093ca30678bceb364</td><td>Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+<br/>version when available.
+<br/>Title
+<br/>On color texture normalization for active appearance models
+<br/>Author(s)
+<br/>Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+<br/>Publication
+<br/>Date
+<br/>2009-05-12
+<br/>Publication
+<br/>Information
+<br/>Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+<br/>Texture Normalization for Active Appearance Models. Image
+<br/>Processing, IEEE Transactions on, 18(6), 1372-1378.
+<br/>Publisher
+<br/>IEEE
+<br/>Link to
+<br/>publisher's
+<br/>version
+<br/>http://dx.doi.org/10.1109/TIP.2009.2017163
+<br/>Item record
+<br/>http://hdl.handle.net/10379/1350
+<br/>Some rights reserved. For more information, please see the item record link above.
+<br/>Downloaded 2018-11-06T00:40:53Z
+</td></tr><tr><td>92fada7564d572b72fd3be09ea3c39373df3e27c</td><td></td></tr><tr><td>927ad0dceacce2bb482b96f42f2fe2ad1873f37a</td><td>Interest-Point based Face Recognition System
+<br/>87
+<br/>X
+<br/>Interest-Point based Face Recognition System
+<br/>Spain
+<br/>1. Introduction
+<br/>Among all applications of face recognition systems, surveillance is one of the most
+<br/>challenging ones. In such an application, the goal is to detect known criminals in crowded
+<br/>environments, like airports or train stations. Some attempts have been made, like those of
+<br/>Tokio (Engadget, 2006) or Mainz (Deutsche Welle, 2006), with limited success.
+<br/>The first task to be carried out in an automatic surveillance system involves the detection of
+<br/>all the faces in the images taken by the video cameras. Current face detection algorithms are
+<br/>highly reliable and thus, they will not be the focus of our work. Some of the best performing
+<br/>examples are the Viola-Jones algorithm (Viola & Jones, 2004) or the Schneiderman-Kanade
+<br/>algorithm (Schneiderman & Kanade, 2000).
+<br/>The second task to be carried out involves the comparison of all detected faces among the
+<br/>database of known criminals. The ideal behaviour of an automatic system performing this
+<br/>task would be to get a 100% correct identification rate, but this behaviour is far from the
+<br/>capabilities of current face recognition algorithms. Assuming that there will be false
+<br/>identifications, supervised surveillance systems seem to be the most realistic option: the
+<br/>automatic system issues an alarm whenever it detects a possible match with a criminal, and
+<br/>a human decides whether it is a false alarm or not. Figure 1 shows an example.
+<br/>However, even in a supervised scenario the requirements for the face recognition algorithm
+<br/>are extremely high: the false alarm rate must be low enough as to allow the human operator
+<br/>to cope with it; and the percentage of undetected criminals must be kept to a minimum in
+<br/>order to ensure security. Fulfilling both requirements at the same time is the main challenge,
+<br/>as a reduction in false alarm rate usually implies an increase of the percentage of undetected
+<br/>criminals.
+<br/>We propose a novel face recognition system based in the use of interest point detectors and
+<br/>local descriptors. In order to check the performances of our system, and particularly its
+<br/>performances in a surveillance application, we present experimental results in terms of
+<br/>Receiver Operating Characteristic curves or ROC curves. From the experimental results, it
+<br/>becomes clear that our system outperforms classical appearance based approaches.
+<br/>www.intechopen.com
+</td></tr><tr><td>929bd1d11d4f9cbc638779fbaf958f0efb82e603</td><td>This is the author’s version of a work that was submitted/accepted for pub-
+<br/>lication in the following source:
+<br/>Zhang, Ligang & Tjondronegoro, Dian W. (2010) Improving the perfor-
+<br/>mance of facial expression recognition using dynamic, subtle and regional
+<br/>features.
+<br/>In Kok, WaiWong, B. Sumudu, U. Mendis, & Abdesselam ,
+<br/>Bouzerdoum (Eds.) Neural Information Processing. Models and Applica-
+<br/>tions, Lecture Notes in Computer Science, Sydney, N.S.W, pp. 582-589.
+<br/>This file was downloaded from: http://eprints.qut.edu.au/43788/
+<br/>c(cid:13) Copyright 2010 Springer-Verlag
+<br/>Conference proceedings published, by Springer Verlag, will be available
+<br/>via Lecture Notes in Computer Science http://www.springer.de/comp/lncs/
+<br/>Notice: Changes introduced as a result of publishing processes such as
+<br/>copy-editing and formatting may not be reflected in this document. For a
+<br/>definitive version of this work, please refer to the published source:
+<br/>http://dx.doi.org/10.1007/978-3-642-17534-3_72
+</td></tr><tr><td>0c36c988acc9ec239953ff1b3931799af388ef70</td><td>Face Detection Using Improved Faster RCNN
+<br/>Huawei Cloud BU, China
+<br/>Figure1.Face detection results of FDNet1.0
+</td></tr><tr><td>0c5ddfa02982dcad47704888b271997c4de0674b</td><td></td></tr><tr><td>0cccf576050f493c8b8fec9ee0238277c0cfd69a</td><td></td></tr><tr><td>0c069a870367b54dd06d0da63b1e3a900a257298</td><td>Author manuscript, published in "ICANN 2011 - International Conference on Artificial Neural Networks (2011)"
+</td></tr><tr><td>0c75c7c54eec85e962b1720755381cdca3f57dfb</td><td>2212
+<br/>Face Landmark Fitting via Optimized Part
+<br/>Mixtures and Cascaded Deformable Model
+</td></tr><tr><td>0ca36ecaf4015ca4095e07f0302d28a5d9424254</td><td>Improving Bag-of-Visual-Words Towards Effective Facial Expressive
+<br/>Image Classification
+<br/>1Univ. Grenoble Alpes, CNRS, Grenoble INP∗ , GIPSA-lab, 38000 Grenoble, France
+<br/>Keywords:
+<br/>BoVW, k-means++, Relative Conjunction Matrix, SIFT, Spatial Pyramids, TF.IDF.
+</td></tr><tr><td>0cfca73806f443188632266513bac6aaf6923fa8</td><td>Predictive Uncertainty in Large Scale Classification
+<br/>using Dropout - Stochastic Gradient Hamiltonian
+<br/>Monte Carlo.
+<br/>Vergara, Diego∗1, Hern´andez, Sergio∗2, Valdenegro-Toro, Mat´ıas∗∗3 and Jorquera, Felipe∗4.
+<br/>∗Laboratorio de Procesamiento de Informaci´on Geoespacial, Universidad Cat´olica del Maule, Chile.
+<br/>∗∗German Research Centre for Artificial Intelligence, Bremen, Germany.
+</td></tr><tr><td>0c54e9ac43d2d3bab1543c43ee137fc47b77276e</td><td></td></tr><tr><td>0c5afb209b647456e99ce42a6d9d177764f9a0dd</td><td>97
+<br/>Recognizing Action Units for
+<br/>Facial Expression Analysis
+</td></tr><tr><td>0c377fcbc3bbd35386b6ed4768beda7b5111eec6</td><td>258
+<br/>A Unified Probabilistic Framework
+<br/>for Spontaneous Facial Action Modeling
+<br/>and Understanding
+</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td></td></tr><tr><td>0cf7da0df64557a4774100f6fde898bc4a3c4840</td><td>Shape Matching and Object Recognition using Low Distortion Correspondences
+<br/>Department of Electrical Engineering and Computer Science
+<br/>U.C. Berkeley
+</td></tr><tr><td>0c4659b35ec2518914da924e692deb37e96d6206</td><td>1236
+<br/>Registering a MultiSensor Ensemble of Images
+</td></tr><tr><td>0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d</td><td>SUBMITTED TO JOURNAL
+<br/>Weakly Supervised PatchNets: Describing and
+<br/>Aggregating Local Patches for Scene Recognition
+</td></tr><tr><td>0c60eebe10b56dbffe66bb3812793dd514865935</td><td></td></tr><tr><td>6601a0906e503a6221d2e0f2ca8c3f544a4adab7</td><td>SRTM-2 2/9/06 3:27 PM Page 321
+<br/>Detection of Ancient Settlement Mounds:
+<br/>Archaeological Survey Based on the
+<br/>SRTM Terrain Model
+<br/>B.H. Menze, J.A. Ur, and A.G. Sherratt
+</td></tr><tr><td>660b73b0f39d4e644bf13a1745d6ee74424d4a16</td><td></td></tr><tr><td>66d512342355fb77a4450decc89977efe7e55fa2</td><td>Under review as a conference paper at ICLR 2018
+<br/>LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+<br/>INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td></tr><tr><td>6643a7feebd0479916d94fb9186e403a4e5f7cbf</td><td>Chapter 8
+<br/>3D Face Recognition
+</td></tr><tr><td>661ca4bbb49bb496f56311e9d4263dfac8eb96e9</td><td>Datasheets for Datasets
+</td></tr><tr><td>66d087f3dd2e19ffe340c26ef17efe0062a59290</td><td>Dog Breed Identification
+<br/>Brian Mittl
+<br/>Vijay Singh
+</td></tr><tr><td>66a2c229ac82e38f1b7c77a786d8cf0d7e369598</td><td>Proceedings of the 2016 Industrial and Systems Engineering Research Conference
+<br/>H. Yang, Z. Kong, and MD Sarder, eds.
+<br/>A Probabilistic Adaptive Search System
+<br/>for Exploring the Face Space
+<br/>Escuela Superior Politecnica del Litoral (ESPOL)
+<br/>Guayaquil-Ecuador
+</td></tr><tr><td>66886997988358847615375ba7d6e9eb0f1bb27f</td><td></td></tr><tr><td>66837add89caffd9c91430820f49adb5d3f40930</td><td></td></tr><tr><td>66a9935e958a779a3a2267c85ecb69fbbb75b8dc</td><td>FAST AND ROBUST FIXED-RANK MATRIX RECOVERY
+<br/>Fast and Robust Fixed-Rank Matrix
+<br/>Recovery
+<br/>Antonio Lopez
+</td></tr><tr><td>66533107f9abdc7d1cb8f8795025fc7e78eb1122</td><td>Vi a
+<br/>i a Whee
+<br/>W y g Sgy Dae i iy g S g iz ad Ze ga Biey
+<br/>y EECS AST 373 1  g Dg Y g G  Taej 305 701 REA
+<br/>z VR Cee ETR 161 ajg Dg Y g G  Taej 305 350 REA
+<br/>Abac
+<br/>Thee exi he c eaive aciviy bewee a h
+<br/>a beig ad ehabi
+<br/>a eae ehabi
+<br/>e ad ha he bee(cid:12) f ehabi
+<br/> ch a ai
+<br/>eadig i e f he eeia
+<br/>fied
+<br/>cf ad afey f a
+<br/>a
+<br/>bic a ye ARES  ad i h a b
+<br/>ieaci ech
+<br/>ech
+<br/>a
+<br/>vi a
+<br/>ecgizig he iive ad egaive eaig f he
+<br/> e i efed  he bai f chage f he facia
+<br/>exei a d
+<br/> e iei whi
+<br/> e wih a beveage. F he eÆcie vi a
+<br/>i ceig
+<br/>c
+<br/>ed e(cid:11)ec f he bic a. The vi a
+<br/>wih e(cid:11)ecive iei eadig i  ccef
+<br/> eve a beveage f he e.
+<br/>d ci
+<br/>Whee
+<br/>ai he e
+<br/>ca i ey ad  f ci i
+<br/>ye ci f a weed whee
+<br/>a ad ha  
+<br/>he whee
+<br/>he bic a ad h  ake ib
+<br/>exiece f a e ad a b i he ae evi
+<br/>e.
+<br/> hi cae he e eed  ieac wih
+<br/>he bic a i cfab
+<br/>Fig e 1: The whee
+<br/>h a b ieaci ech
+<br/>eve i ha bee eed ha ay diÆc
+<br/>i h a bf ieaci i exiig ehabi
+<br/>b. F exa
+<br/>a ake a high cgiive
+<br/>hyica
+<br/>eaig jyick dexe 
+<br/>de
+<br/>ai e eed ha he  diÆc
+<br/>ig ehabi
+<br/>a
+<br/>id a he begiig [4]. Theefe h a fied
+<br/>h a b ieaci i e f eeia
+<br/>i a whee
+<br/> hi ae we cide he whee
+<br/>bic ye ARES AST Rehabi
+<br/>gieeig Sevice ye  which we ae deve
+<br/>a a evice bic ye f he diab
+<br/>e
+<br/>i e Fig. 1. Ag h a b ieaci ech
+<br/>i e vi a
+</td></tr><tr><td>66810438bfb52367e3f6f62c24f5bc127cf92e56</td><td>Face Recognition of Illumination Tolerance in 2D
+<br/>Subspace Based on the Optimum Correlation
+<br/>Filter
+<br/>Xu Yi
+<br/>Department of Information Engineering, Hunan Industry Polytechnic, Changsha, China
+<br/>images will be tested to project
+</td></tr><tr><td>66af2afd4c598c2841dbfd1053bf0c386579234e</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Context Assisted Face Clustering Framework with
+<br/>Human-in-the-Loop
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>Int J Comput Vis (2014) 108:59–81
+<br/>DOI 10.1007/s11263-013-0695-z
+<br/>The SUN Attribute Database: Beyond Categories for Deeper Scene
+<br/>Understanding
+<br/>Received: 27 February 2013 / Accepted: 28 December 2013 / Published online: 18 January 2014
+<br/>© Springer Science+Business Media New York 2014
+</td></tr><tr><td>661da40b838806a7effcb42d63a9624fcd684976</td><td>53
+<br/>An Illumination Invariant Accurate
+<br/>Face Recognition with Down Scaling
+<br/>of DCT Coefficients
+<br/>Department of Computer Science and Engineering, Amity School of Engineering and Technology, New Delhi, India
+<br/>In this paper, a novel approach for illumination normal-
+<br/>ization under varying lighting conditions is presented.
+<br/>Our approach utilizes the fact that discrete cosine trans-
+<br/>form (DCT) low-frequency coefficients correspond to
+<br/>illumination variations in a digital image. Under varying
+<br/>illuminations, the images captured may have low con-
+<br/>trast; initially we apply histogram equalization on these
+<br/>for contrast stretching. Then the low-frequency DCT
+<br/>coefficients are scaled down to compensate the illumi-
+<br/>nation variations. The value of scaling down factor and
+<br/>the number of low-frequency DCT coefficients, which
+<br/>are to be rescaled, are obtained experimentally. The
+<br/>classification is done using k−nearest neighbor classi-
+<br/>fication and nearest mean classification on the images
+<br/>obtained by inverse DCT on the processed coefficients.
+<br/>The correlation coefficient and Euclidean distance ob-
+<br/>tained using principal component analysis are used as
+<br/>distance metrics in classification. We have tested our
+<br/>face recognition method using Yale Face Database B.
+<br/>The results show that our method performs without any
+<br/>error (100% face recognition performance), even on the
+<br/>most extreme illumination variations. There are different
+<br/>schemes in the literature for illumination normalization
+<br/>under varying lighting conditions, but no one is claimed
+<br/>to give 100% recognition rate under all illumination
+<br/>variations for this database. The proposed technique is
+<br/>computationally efficient and can easily be implemented
+<br/>for real time face recognition system.
+<br/>Keywords: discrete cosine transform, correlation co-
+<br/>efficient, face recognition, illumination normalization,
+<br/>nearest neighbor classification
+<br/>1. Introduction
+<br/>Two-dimensional pattern classification plays a
+<br/>crucial role in real-world applications. To build
+<br/>high-performance surveillance or information
+<br/>security systems, face recognition has been
+<br/>known as the key application attracting enor-
+<br/>mous researchers highlighting on related topics
+<br/>[1,2]. Even though current machine recognition
+<br/>systems have reached a certain level of matu-
+<br/>rity, their success is limited by the real appli-
+<br/>cations constraints, like pose, illumination and
+<br/>expression. The FERET evaluation shows that
+<br/>the performance of a face recognition system
+<br/>decline seriously with the change of pose and
+<br/>illumination conditions [31].
+<br/>To solve the variable illumination problem a
+<br/>variety of approaches have been proposed [3, 7-
+<br/>11, 26-29]. Early work in illumination invariant
+<br/>face recognition focused on image representa-
+<br/>tions that are mostly insensitive to changes in
+<br/>illumination. There were approaches in which
+<br/>the image representations and distance mea-
+<br/>sures were evaluated on a tightly controlled face
+<br/>database that varied the face pose, illumination,
+<br/>and expression. The image representations in-
+<br/>clude edge maps, 2D Gabor-like filters, first and
+<br/>second derivatives of the gray-level image, and
+<br/>the logarithmic transformations of the intensity
+<br/>image along with these representations [4].
+<br/>The different approaches to solve the prob-
+<br/>lem of illumination invariant face recognition
+<br/>can be broadly classified into two main cate-
+<br/>gories. The first category is named as passive
+<br/>approach in which the visual spectrum images
+<br/>are analyzed to overcome this problem. The
+<br/>approaches belonging to other category named
+<br/>active, attempt to overcome this problem by
+<br/>employing active imaging techniques to obtain
+<br/>face images captured in consistent illumina-
+<br/>tion condition, or images of illumination invari-
+<br/>ant modalities. There is a hierarchical catego-
+<br/>rization of these two approaches. An exten-
+<br/>sive review of both approaches is given in [5].
+</td></tr><tr><td>3edb0fa2d6b0f1984e8e2c523c558cb026b2a983</td><td>Automatic Age Estimation Based on
+<br/>Facial Aging Patterns
+</td></tr><tr><td>3ee7a8107a805370b296a53e355d111118e96b7c</td><td></td></tr><tr><td>3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b</td><td>Deep Value Networks Learn to
+<br/>Evaluate and Iteratively Refine Structured Outputs
+</td></tr><tr><td>3ea8a6dc79d79319f7ad90d663558c664cf298d4</td><td></td></tr><tr><td>3e4f84ce00027723bdfdb21156c9003168bc1c80</td><td>1979
+<br/>© EURASIP, 2011 - ISSN 2076-1465
+<br/>19th European Signal Processing Conference (EUSIPCO 2011)
+<br/>INTRODUCTION
+</td></tr><tr><td>3e685704b140180d48142d1727080d2fb9e52163</td><td>Single Image Action Recognition by Predicting
+<br/>Space-Time Saliency
+</td></tr><tr><td>3e687d5ace90c407186602de1a7727167461194a</td><td>Photo Tagging by Collection-Aware People Recognition
+<br/>UFF
+<br/>UFF
+<br/>Asla S´a
+<br/>FGV
+<br/>IMPA
+</td></tr><tr><td>501096cca4d0b3d1ef407844642e39cd2ff86b37</td><td>Illumination Invariant Face Image
+<br/>Representation using Quaternions
+<br/>Dayron Rizo-Rodr´ıguez, Heydi M´endez-V´azquez, and Edel Garc´ıa-Reyes
+<br/>Advanced Technologies Application Center. 7a # 21812 b/ 218 and 222,
+<br/>Rpto. Siboney, Playa, P.C. 12200, La Habana, Cuba.
+</td></tr><tr><td>501eda2d04b1db717b7834800d74dacb7df58f91</td><td></td></tr><tr><td>5083c6be0f8c85815ead5368882b584e4dfab4d1</td><td> Please do not quote. In press, Handbook of affective computing. New York, NY: Oxford
+<br/>Automated Face Analysis for Affective Computing
+</td></tr><tr><td>500b92578e4deff98ce20e6017124e6d2053b451</td><td></td></tr><tr><td>50ff21e595e0ebe51ae808a2da3b7940549f4035</td><td>IEEE TRANSACTIONS ON LATEX CLASS FILES, VOL. XX, NO. X, AUGUST 2017
+<br/>Age Group and Gender Estimation in the Wild with
+<br/>Deep RoR Architecture
+</td></tr><tr><td>5042b358705e8d8e8b0655d07f751be6a1565482</td><td>International Journal of
+<br/>Emerging Research in Management &Technology
+<br/>ISSN: 2278-9359 (Volume-4, Issue-8)
+<br/>Research Article
+<br/> August
+<br/> 2015
+<br/>Review on Emotion Detection in Image
+<br/>CSE & PCET, PTU HOD, CSE & PCET, PTU
+<br/> Punjab, India Punj ab, India
+</td></tr><tr><td>50e47857b11bfd3d420f6eafb155199f4b41f6d7</td><td>International Journal of Computer, Consumer and Control (IJ3C), Vol. 2, No.1 (2013)
+<br/>3D Human Face Reconstruction Using a Hybrid of Photometric
+<br/>Stereo and Independent Component Analysis
+</td></tr><tr><td>50eb75dfece76ed9119ec543e04386dfc95dfd13</td><td>Learning Visual Entities and their Visual Attributes from Text Corpora
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+<br/>Dept. of Computer Science
+<br/>K.U.Leuven, Belgium
+</td></tr><tr><td>50a0930cb8cc353e15a5cb4d2f41b365675b5ebf</td><td></td></tr><tr><td>50d15cb17144344bb1879c0a5de7207471b9ff74</td><td>Divide, Share, and Conquer: Multi-task
+<br/>Attribute Learning with Selective Sharing
+</td></tr><tr><td>5028c0decfc8dd623c50b102424b93a8e9f2e390</td><td>Published as a conference paper at ICLR 2017
+<br/>REVISITING CLASSIFIER TWO-SAMPLE TESTS
+<br/>1Facebook AI Research, 2WILLOW project team, Inria / ENS / CNRS
+</td></tr><tr><td>505e55d0be8e48b30067fb132f05a91650666c41</td><td>A Model of Illumination Variation for Robust Face Recognition
+<br/>Institut Eur´ecom
+<br/>Multimedia Communications Department
+<br/>BP 193, 06904 Sophia Antipolis Cedex, France
+</td></tr><tr><td>680d662c30739521f5c4b76845cb341dce010735</td><td>Int J Comput Vis (2014) 108:82–96
+<br/>DOI 10.1007/s11263-014-0716-6
+<br/>Part and Attribute Discovery from Relative Annotations
+<br/>Received: 25 February 2013 / Accepted: 14 March 2014 / Published online: 26 April 2014
+<br/>© Springer Science+Business Media New York 2014
+</td></tr><tr><td>68d2afd8c5c1c3a9bbda3dd209184e368e4376b9</td><td>Representation Learning by Rotating Your Faces
+</td></tr><tr><td>68a3f12382003bc714c51c85fb6d0557dcb15467</td><td></td></tr><tr><td>68d08ed9470d973a54ef7806318d8894d87ba610</td><td>Drive Video Analysis for the Detection of Traffic Near-Miss Incidents
+</td></tr><tr><td>68caf5d8ef325d7ea669f3fb76eac58e0170fff0</td><td></td></tr><tr><td>68d4056765c27fbcac233794857b7f5b8a6a82bf</td><td>Example-Based Face Shape Recovery Using the
+<br/>Zenith Angle of the Surface Normal
+<br/>Mario Castel´an1, Ana J. Almaz´an-Delf´ın2, Marco I. Ram´ırez-Sosa-Mor´an3,
+<br/>and Luz A. Torres-M´endez1
+<br/>1 CINVESTAV Campus Saltillo, Ramos Arizpe 25900, Coahuila, M´exico
+<br/>2 Universidad Veracruzana, Facultad de F´ısica e Inteligencia Artificial, Xalapa 91000,
+<br/>3 ITESM, Campus Saltillo, Saltillo 25270, Coahuila, M´exico
+<br/>Veracruz, M´exico
+</td></tr><tr><td>684f5166d8147b59d9e0938d627beff8c9d208dd</td><td>IEEE TRANS. NNLS, JUNE 2017
+<br/>Discriminative Block-Diagonal Representation
+<br/>Learning for Image Recognition
+</td></tr><tr><td>68cf263a17862e4dd3547f7ecc863b2dc53320d8</td><td></td></tr><tr><td>68e9c837431f2ba59741b55004df60235e50994d</td><td>Detecting Faces Using Region-based Fully
+<br/>Convolutional Networks
+<br/>Tencent AI Lab, China
+</td></tr><tr><td>687e17db5043661f8921fb86f215e9ca2264d4d2</td><td>A Robust Elastic and Partial Matching Metric for Face Recognition
+<br/>Microsoft Corporate
+<br/>One Microsoft Way, Redmond, WA 98052
+</td></tr><tr><td>688754568623f62032820546ae3b9ca458ed0870</td><td>bioRxiv preprint first posted online Sep. 27, 2016;
+<br/>doi:
+<br/>http://dx.doi.org/10.1101/077784
+<br/>.
+<br/>The copyright holder for this preprint (which was not
+<br/>peer-reviewed) is the author/funder. It is made available under a
+<br/>CC-BY-NC-ND 4.0 International license
+<br/>.
+<br/>Resting high frequency heart rate variability is not associated with the
+<br/>recognition of emotional facial expressions in healthy human adults.
+<br/>1 Univ. Grenoble Alpes, LPNC, F-38040, Grenoble, France
+<br/>2 CNRS, LPNC UMR 5105, F-38040, Grenoble, France
+<br/>3 IPSY, Université Catholique de Louvain, Louvain-la-Neuve, Belgium
+<br/>4 Fund for Scientific Research (FRS-FNRS), Brussels, Belgium
+<br/>Correspondence concerning this article should be addressed to Brice Beffara, Office E250, Institut
+<br/>de Recherches en Sciences Psychologiques, IPSY - Place du Cardinal Mercier, 10 bte L3.05.01 B-1348
+<br/>Author note
+<br/>This study explores whether the myelinated vagal connection between the heart and the brain
+<br/>is involved in emotion recognition. The Polyvagal theory postulates that the activity of the
+<br/>myelinated vagus nerve underlies socio-emotional skills. It has been proposed that the perception
+<br/>of emotions could be one of this skills dependent on heart-brain interactions. However, this
+<br/>assumption was differently supported by diverging results suggesting that it could be related to
+<br/>confounded factors. In the current study, we recorded the resting state vagal activity (reflected by
+<br/>High Frequency Heart Rate Variability, HF-HRV) of 77 (68 suitable for analysis) healthy human
+<br/>adults and measured their ability to identify dynamic emotional facial expressions. Results show
+<br/>that HF-HRV is not related to the recognition of emotional facial expressions in healthy human
+<br/>adults. We discuss this result in the frameworks of the polyvagal theory and the neurovisceral
+<br/>integration model.
+<br/>Keywords: HF-HRV; autonomic flexibility; emotion identification; dynamic EFEs; Polyvagal
+<br/>theory; Neurovisceral integration model
+<br/>Word count: 9810
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>Introduction
+<br/>The behavior of an animal is said social when involved in in-
+<br/>teractions with other animals (Ward & Webster, 2016). These
+<br/>interactions imply an exchange of information, signals, be-
+<br/>tween at least two animals. In humans, the face is an efficient
+<br/>communication channel, rapidly providing a high quantity of
+<br/>information. Facial expressions thus play an important role
+<br/>in the transmission of emotional information during social
+<br/>interactions. The result of the communication is the combina-
+<br/>tion of transmission from the sender and decoding from the
+<br/>receiver (Jack & Schyns, 2015). As a consequence, the quality
+<br/>of the interaction depends on the ability to both produce and
+<br/>identify facial expressions. Emotions are therefore a core
+<br/>feature of social bonding (Spoor & Kelly, 2004). Health
+<br/>of individuals and groups depend on the quality of social
+<br/>bonds in many animals (Boyer, Firat, & Leeuwen, 2015; S. L.
+<br/>Brown & Brown, 2015; Neuberg, Kenrick, & Schaller, 2011),
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>especially in highly social species such as humans (Singer &
+<br/>Klimecki, 2014).
+<br/>The recognition of emotional signals produced by others is
+<br/>not independent from its production by oneself (Niedenthal,
+<br/>2007). The muscles of the face involved in the production of
+<br/>a facial expressions are also activated during the perception of
+<br/>the same facial expressions (Dimberg, Thunberg, & Elmehed,
+<br/>2000). In other terms, the facial mimicry of the perceived
+<br/>emotional facial expression (EFE) triggers its sensorimotor
+<br/>simulation in the brain, which improves the recognition abili-
+<br/>ties (Wood, Rychlowska, Korb, & Niedenthal, 2016). Beyond
+<br/>that, the emotion can be seen as the body -including brain-
+<br/>dynamic itself (Gallese & Caruana, 2016) which helps to un-
+<br/>derstand why behavioral simulation is necessary to understand
+<br/>the emotion.
+<br/>The interplay between emotion production, emotion percep-
+<br/>tion, social communication and body dynamics has been sum-
+<br/>marized in the framework of the polyvagal theory (Porges,
+</td></tr><tr><td>68f9cb5ee129e2b9477faf01181cd7e3099d1824</td><td>ALDA Algorithms for Online Feature Extraction
+</td></tr><tr><td>68bf34e383092eb827dd6a61e9b362fcba36a83a</td><td></td></tr><tr><td>6889d649c6bbd9c0042fadec6c813f8e894ac6cc</td><td>Analysis of Robust Soft Learning Vector
+<br/>Quantization and an application to Facial
+<br/>Expression Recognition
+</td></tr><tr><td>68c17aa1ecbff0787709be74d1d98d9efd78f410</td><td>International Journal of Optomechatronics, 6: 92–119, 2012
+<br/>Copyright # Taylor & Francis Group, LLC
+<br/>ISSN: 1559-9612 print=1559-9620 online
+<br/>DOI: 10.1080/15599612.2012.663463
+<br/>GENDER CLASSIFICATION FROM FACE IMAGES
+<br/>USING MUTUAL INFORMATION AND FEATURE
+<br/>FUSION
+<br/>Department of Electrical Engineering and Advanced Mining Technology
+<br/>Center, Universidad de Chile, Santiago, Chile
+<br/>In this article we report a new method for gender classification from frontal face images
+<br/>using feature selection based on mutual information and fusion of features extracted from
+<br/>intensity, shape, texture, and from three different spatial scales. We compare the results of
+<br/>three different mutual information measures: minimum redundancy and maximal relevance
+<br/>(mRMR), normalized mutual information feature selection (NMIFS), and conditional
+<br/>mutual information feature selection (CMIFS). We also show that by fusing features
+<br/>extracted from six different methods we significantly improve the gender classification
+<br/>results relative to those previously published, yielding 99.13% of the gender classification
+<br/>rate on the FERET database.
+<br/>Keywords: Feature fusion, feature selection, gender classification, mutual information, real-time gender
+<br/>classification
+<br/>1. INTRODUCTION
+<br/>During the 90’s, one of the main issues addressed in the area of computer
+<br/>vision was face detection. Many methods and applications were developed including
+<br/>the face detection used in many digital cameras nowadays. Gender classification is
+<br/>important in many possible applications including electronic marketing. Displays
+<br/>at retail stores could show products and offers according to the person gender as
+<br/>the person passes in front of a camera at the store. This is not a simple task since
+<br/>faces are not rigid and depend on illumination, pose, gestures, facial expressions,
+<br/>occlusions (glasses), and other facial features (makeup, beard). The high variability
+<br/>in the appearance of the face directly affects their detection and classification. Auto-
+<br/>matic classification of gender from face images has a wide range of possible applica-
+<br/>tions, ranging from human-computer interaction to applications in real-time
+<br/>electronic marketing in retail stores (Shan 2012; Bekios-Calfa et al. 2011; Chu
+<br/>et al. 2010; Perez et al. 2010a).
+<br/>Automatic gender classification has a wide range of possible applications for
+<br/>improving human-machine interaction and face identification methods (Irick et al.
+<br/>ing.uchile.cl
+<br/>92
+</td></tr><tr><td>6888f3402039a36028d0a7e2c3df6db94f5cb9bb</td><td>Under review as a conference paper at ICLR 2018
+<br/>CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+<br/>OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td></tr><tr><td>574751dbb53777101502419127ba8209562c4758</td><td></td></tr><tr><td>57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5</td><td></td></tr><tr><td>57101b29680208cfedf041d13198299e2d396314</td><td></td></tr><tr><td>57893403f543db75d1f4e7355283bdca11f3ab1b</td><td></td></tr><tr><td>57f8e1f461ab25614f5fe51a83601710142f8e88</td><td>Region Selection for Robust Face Verification using UMACE Filters
+<br/>Department of Electrical, Electronic and Systems Engineering, Faculty of Engineering,
+<br/>Universiti Kebangsaan Malaysia, 43600 Bangi, Selangor, Malaysia.
+<br/>In this paper, we investigate the verification performances of four subdivided face images with varying expressions. The
+<br/>objective of this study is to evaluate which part of the face image is more tolerant to facial expression and still retains its personal
+<br/>characteristics due to the variations of the image. The Unconstrained Minimum Average Correlation Energy (UMACE) filter is
+<br/>implemented to perform the verification process because of its advantages such as shift–invariance, ability to trade-off between
+<br/>discrimination and distortion tolerance, e.g. variations in pose, illumination and facial expression. The database obtained from the
+<br/>facial expression database of Advanced Multimedia Processing (AMP) Lab at CMU is used in this study. Four equal
+<br/>sizes of face regions i.e. bottom, top, left and right halves are used for the purpose of this study. The results show that the bottom
+<br/>half of the face region gives the best performance in terms of the PSR values with zero false accepted rate (FAR) and zero false
+<br/>rejection rate (FRR) compared to the other three regions.
+<br/>1. Introduction
+<br/>Face recognition is a well established field of research,
+<br/>and a large number of algorithms have been proposed in the
+<br/>literature. Various classifiers have been explored to improve
+<br/>the accuracy of face classification. The basic approach is to
+<br/>use distance-base methods which measure Euclidean distance
+<br/>between any two vectors and then compare it with the preset
+<br/>threshold. Neural Networks are often used as classifiers due
+<br/>to their powerful generation ability [1]. Support Vector
+<br/>Machines (SVM) have been applied with encouraging results
+<br/>[2].
+<br/>In biometric applications, one of the important tasks is the
+<br/>matching process between an individual biometrics against
+<br/>the database that has been prepared during the enrolment
+<br/>stage. For biometrics systems such as face authentication that
+<br/>use images as personal characteristics, biometrics sensor
+<br/>output and image pre-processing play an important role since
+<br/>the quality of a biometric input can change significantly due
+<br/>to illumination, noise and pose variations. Over the years,
+<br/>researchers have studied the role of illumination variation,
+<br/>pose variation, facial expression, and occlusions in affecting
+<br/>the performance of face verification systems [3].
+<br/>The Minimum Average Correlation Energy (MACE)
+<br/>filters have been reported to be an alternative solution to these
+<br/>problems because of the advantages such as shift-invariance,
+<br/>close-form expressions and distortion-tolerance. MACE
+<br/>filters have been successfully applied in the field of automatic
+<br/>target recognition as well as in biometric verification [3][4].
+<br/>Face and fingerprint verification using correlation filters have
+<br/>been investigated in [5] and [6], respectively. Savvides et.al
+<br/>performed face authentication and identification using
+<br/>correlation filters based on illumination variation [7]. In the
+<br/>process of implementing correlation filters, the number of
+<br/>training images used depends on the level of distortions
+<br/>applied to the images [5], [6].
+<br/>In this study, we investigate which part of a face image is
+<br/>more tolerant to facial expression and retains its personal
+<br/>characteristics for the verification process. Four subdivided
+<br/>face images, i.e. bottom, top, left and right halves, with
+<br/>varying expressions are investigated. By identifying only the
+<br/>region of the face that gives the highest verification
+<br/>performance, that region can be used instead of the full-face
+<br/>to reduce storage requirements.
+<br/>2. Unconstrained Minimum Average Correlation
+<br/>Energy (UMACE) Filter
+<br/>Correlation filter theory and the descriptions of the design
+<br/>of the correlation filter can be found in a tutorial survey paper
+<br/>[8]. According to [4][6], correlation filter evolves from
+<br/>matched filters which are optimal for detecting a known
+<br/>reference image in the presence of additive white Gaussian
+<br/>noise. However, the detection rate of matched filters
+<br/>decreases significantly due to even the small changes of scale,
+<br/>rotation and pose of the reference image.
+<br/>the pre-specified peak values
+<br/>In an effort to solve this problem, the Synthetic
+<br/>Discriminant Function (SDF) filter and the Equal Correlation
+<br/>Peak SDF (ECP SDF) filter ware introduced which allowed
+<br/>several training images to be represented by a single
+<br/>correlation filter. SDF filter produces pre-specified values
+<br/>called peak constraints. These peak values correspond to the
+<br/>authentic class or impostor class when an image is tested.
+<br/>However,
+<br/>to
+<br/>misclassifications when the sidelobes are larger than the
+<br/>controlled values at the origin.
+<br/>Savvides et.al developed
+<br/>the Minimum Average
+<br/>Correlation Energy (MACE) filters [5]. This filter reduces the
+<br/>large sidelobes and produces a sharp peak when the test
+<br/>image is from the same class as the images that have been
+<br/>used to design the filter. There are two kinds of variants that
+<br/>can be used in order to obtain a sharp peak when the test
+<br/>image belongs to the authentic class. The first MACE filter
+<br/>variant minimizes the average correlation energy of the
+<br/>training images while constraining the correlation output at
+<br/>the origin to a specific value for each of the training images.
+<br/>The second MACE filter variant is the Unconstrained
+<br/>Minimum Average Correlation Energy (UMACE) filter
+<br/>which also minimizes the average correlation output while
+<br/>maximizing the correlation output at the origin [4].
+<br/>lead
+<br/>Proceedings of the International Conference onElectrical Engineering and InformaticsInstitut Teknologi Bandung, Indonesia June 17-19, 2007B-67ISBN 978-979-16338-0-2611 </td></tr><tr><td>57a1466c5985fe7594a91d46588d969007210581</td><td>A Taxonomy of Face-models for System Evaluation
+<br/>Motivation and Data Types
+<br/>Synthetic Data Types
+<br/>Unverified – Have no underlying physical or
+<br/>statistical basis
+<br/>Physics -Based – Based on structure and
+<br/>materials combined with the properties
+<br/>formally modeled in physics.
+<br/>Statistical – Use statistics from real
+<br/>data/experiments to estimate/learn model
+<br/>parameters. Generally have measurements
+<br/>of accuracy
+<br/>Guided Synthetic – Individual models based
+<br/>on individual people. No attempt to capture
+<br/>properties of large groups, a unique model
+<br/>per person. For faces, guided models are
+<br/>composed of 3D structure models and skin
+<br/>textures, capturing many artifacts not
+<br/>easily parameterized. Can be combined with
+<br/>physics-based rendering to generate samples
+<br/>under different conditions.
+<br/>Semi–Synethetic – Use measured data such
+<br/>as 2D images or 3D facial scans. These are
+<br/>not truly synthetic as they are re-rendering’s
+<br/>of real measured data.
+<br/>Semi and Guided Synthetic data provide
+<br/>higher operational relevance while
+<br/>maintaining a high degree of control.
+<br/>Generating statistically significant size
+<br/>datasets for face matching system
+<br/>evaluation is both a laborious and
+<br/>expensive process.
+<br/>There is a gap in datasets that allow for
+<br/>evaluation of system issues including:
+<br/> Long distance recognition
+<br/> Blur caused by atmospherics
+<br/> Various weather conditions
+<br/> End to end systems evaluation
+<br/>Our contributions:
+<br/> Define a taxonomy of face-models
+<br/>for controlled experimentations
+<br/> Show how Synthetic addresses gaps
+<br/>in system evaluation
+<br/> Show a process for generating and
+<br/>validating synthetic models
+<br/> Use these models in long distance
+<br/>face recognition system evaluation
+<br/>Experimental Setup
+<br/>Results and Conclusions
+<br/>Example Models
+<br/>Original Pie
+<br/>Semi-
+<br/>Synthetic
+<br/>FaceGen
+<br/>Animetrics
+<br/>http://www.facegen.com
+<br/>http://www.animetrics.com/products/Forensica.php
+<br/>Guided-
+<br/>Synthetic
+<br/>Models
+<br/> Models generated using the well
+<br/>known CMU PIE [18] dataset. Each of
+<br/>the 68 subjects of PIE were modeled
+<br/>using a right profile and frontal
+<br/>image from the lights subset.
+<br/> Two modeling programs were used,
+<br/>Facegen and Animetrics. Both
+<br/>programs create OBJ files and
+<br/>textures
+<br/> Models are re-rendered using
+<br/>custom display software built with
+<br/>OpenGL, GLUT and DevIL libraries
+<br/> Custom Display Box housing a BENQ SP820 high
+<br/>powered projector rated at 4000 ANSI Lumens
+<br/> Canon EOS 7D withd a Sigma 800mm F5.6 EX APO
+<br/>DG HSM lens a 2x adapter imaging the display
+<br/>from 214 meters
+<br/>Normalized Example Captures
+<br/>Real PIE 1 Animetrics
+<br/>FaceGen
+<br/>81M inside 214M outside
+<br/>Real PIE 2
+<br/> Pre-cropped images were used for the
+<br/>commercial core
+<br/> Ground truth eye points + geometric/lighting
+<br/>normalization pre processing before running
+<br/>through the implementation of the V1
+<br/>recognition algorithm found in [1].
+<br/> Geo normalization highlights how the feature
+<br/>region of the models looks very similar to
+<br/>that of the real person.
+<br/>Each test consisted of using 3 approximately frontal gallery images NOT used to
+<br/>make the 3D model used as the probe, best score over 3 images determined score.
+<br/>Even though the PIE-3D-20100224A–D sets were imaged on the same day, the V1
+<br/>core scored differently on each highlighting the synthetic data’s ability to help
+<br/>evaluate data capture methods and effects of varying atmospherics. The ISO setting
+<br/>varied which effects the shutter speed, with higher ISO generally yielding less blur.
+<br/>Dataset
+<br/>Range(m)
+<br/>Iso
+<br/>V1
+<br/>Comm.
+<br/>Original PIE Images
+<br/>FaceGen ScreenShots
+<br/>Animetrics Screenshots
+<br/>PIE-3D-20100210B
+<br/>PIE-3D-20100224A
+<br/>PIE-3D-20100224B
+<br/>PIE-3D-20100224C
+<br/>PIE-3D-20100224D
+<br/>N/A
+<br/>N/A
+<br/>N/A
+<br/>81m
+<br/>214m
+<br/>214m
+<br/>214m
+<br/>214m
+<br/>N/A
+<br/>N/A
+<br/>N/A
+<br/>500
+<br/>125
+<br/>125
+<br/>250
+<br/>400
+<br/>100
+<br/>47.76
+<br/>100
+<br/>100
+<br/>58.82
+<br/>45.59
+<br/>81.82
+<br/>79.1
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/>100
+<br/> The same (100 percent) recognition rate on screenshots as original images
+<br/>validate the Anmetrics guided synthetic models and fails FaceGen Models.
+<br/> 100% recognition means dataset is too small/easy; exapanding pose and models
+<br/>underway.
+<br/> Expanded the photohead methodology into 3D
+<br/> Developed a robust modeling system allowing for multiple configurations of a
+<br/>single real life data set.
+<br/> Gabor+SVM based V1[15] significantly more impacted by atmospheric blur than
+<br/>the commercial algorithm
+<br/>Key References:
+<br/>[6 of 21] R. Bevridge, D. Bolme, M Teixeira, and B. Draper. The CSU Face Identification Evaluation System Users Guide: Version 5.0. Technical report, CSU 2003
+<br/>[8 of 21] T. Boult and W. Scheirer. Long range facial image acquisition and quality. In M. Tisarelli, S. Li, and R. Chellappa.
+<br/>[15 of 21] N. Pinto, J. J. DiCarlo, and D. D. Cox. How far can you get with a modern face recognition test set using only simple features? In IEEE CVPR, 2009.
+<br/>[18 of 21] T. Sim, S. Baker, and M. Bsat. The CMU Pose, Illumination and Expression (PIE) Database. In Proceedings of the IEEE F&G, May 2002.
+</td></tr><tr><td>5721216f2163d026e90d7cd9942aeb4bebc92334</td><td></td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td></td></tr><tr><td>574ad7ef015995efb7338829a021776bf9daaa08</td><td>AdaScan: Adaptive Scan Pooling in Deep Convolutional Neural Networks
+<br/>for Human Action Recognition in Videos
+<br/>1IIT Kanpur‡
+<br/>2SRI International
+<br/>3UCSD
+</td></tr><tr><td>57d37ad025b5796457eee7392d2038910988655a</td><td>GEERATVEEETATF
+<br/>
+<br/>by
+<br/>DagaEha
+<br/>UdeheS eviif
+<br/>f.DahaWeiha
+<br/>ATheiS biediaia
+<br/>Re ieefheDegeef
+<br/>aefSciece
+<br/>a
+<br/>TheSch
+<br/>
+<br/>Decebe2009
+</td></tr><tr><td>3b1260d78885e872cf2223f2c6f3d6f6ea254204</td><td></td></tr><tr><td>3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5</td><td>Sparse Representation-based Open Set Recognition
+</td></tr><tr><td>3bc776eb1f4e2776f98189e17f0d5a78bb755ef4</td><td></td></tr><tr><td>3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10</td><td>Glimpse: Continuous, Real-Time Object Recognition on
+<br/>Mobile Devices
+<br/>MIT CSAIL
+<br/>Microsoft Research
+<br/>MIT CSAIL
+<br/>Microsoft Research
+<br/>MIT CSAIL
+</td></tr><tr><td>3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0</td><td>On Knowledge Transfer in
+<br/>Object Class Recognition
+<br/>A dissertation approved by
+<br/>TECHNISCHE UNIVERSITÄT DARMSTADT
+<br/>Fachbereich Informatik
+<br/>for the degree of
+<br/>Doktor-Ingenieur (Dr.-Ing.)
+<br/>presented by
+<br/>Dipl.-Inform.
+<br/>born in Mainz, Germany
+<br/>Prof. Dr.-Ing. Michael Goesele, examiner
+<br/>Prof. Martial Hebert, Ph.D., co-examiner
+<br/>Prof. Dr. Bernt Schiele, co-examiner
+<br/>Date of Submission: 12th of August, 2010
+<br/>Date of Defense: 23rd of September, 2010
+<br/>Darmstadt, 2010
+<br/>D17
+</td></tr><tr><td>3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e</td><td>Local Directional Number Pattern for Face
+<br/>Analysis: Face and Expression Recognition
+</td></tr><tr><td>3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5</td><td></td></tr><tr><td>3b9d94752f8488106b2c007e11c193f35d941e92</td><td>CVPR
+<br/>#2052
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2013 Submission #2052. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#2052
+<br/>Appearance, Visual and Social Ensembles for
+<br/>Face Recognition in Personal Photo Collections
+<br/>Anonymous CVPR submission
+<br/>Paper ID 2052
+</td></tr><tr><td>3be7b7eb11714e6191dd301a696c734e8d07435f</td><td></td></tr><tr><td>3b410ae97e4564bc19d6c37bc44ada2dcd608552</td><td>Scalability Analysis of Audio-Visual Person
+<br/>Identity Verification
+<br/>1 Communications Laboratory,
+<br/>Universit´e catholique de Louvain, B-1348 Belgium,
+<br/>2 IDIAP, CH-1920 Martigny,
+<br/>Switzerland
+</td></tr><tr><td>6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb</td><td>Low Resolution Face Recognition Using a
+<br/>Two-Branch Deep Convolutional Neural Network
+<br/>Architecture
+</td></tr><tr><td>6f288a12033fa895fb0e9ec3219f3115904f24de</td><td>Learning Expressionlets via Universal Manifold
+<br/>Model for Dynamic Facial Expression Recognition
+</td></tr><tr><td>6f2dc51d607f491dbe6338711c073620c85351ac</td><td></td></tr><tr><td>6f75697a86d23d12a14be5466a41e5a7ffb79fad</td><td></td></tr><tr><td>6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd</td><td>Pages 51.1-51.12
+<br/>DOI: https://dx.doi.org/10.5244/C.30.51
+</td></tr><tr><td>6f7a8b3e8f212d80f0fb18860b2495be4c363eac</td><td>Creating Capsule Wardrobes from Fashion Images
+<br/>UT-Austin
+<br/>UT-Austin
+</td></tr><tr><td>6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81</td><td>Structured Output SVM Prediction of Apparent Age,
+<br/>Gender and Smile From Deep Features
+<br/>Michal Uˇriˇc´aˇr
+<br/>CMP, Dept. of Cybernetics
+<br/>FEE, CTU in Prague
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>PSI, ESAT, KU Leuven
+<br/>CVL, D-ITET, ETH Zurich
+<br/>Jiˇr´ı Matas
+<br/>CMP, Dept. of Cybernetics
+<br/>FEE, CTU in Prague
+</td></tr><tr><td>6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3</td><td>DOI 10.1007/s00530-005-0177-4
+<br/>R E G U L A R PA P E R
+<br/>Learning probabilistic classifiers for human–computer
+<br/>interaction applications
+<br/>Published online: 10 May 2005
+<br/>c(cid:1) Springer-Verlag 2005
+<br/>intelligent
+<br/>interaction,
+</td></tr><tr><td>6fa3857faba887ed048a9e355b3b8642c6aab1d8</td><td>Face Recognition in Challenging Environments:
+<br/>An Experimental and Reproducible Research
+<br/>Survey
+</td></tr><tr><td>6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>937
+<br/>ICASSP 2012
+</td></tr><tr><td>6fe2efbcb860767f6bb271edbb48640adbd806c3</td><td>SOFT BIOMETRICS: HUMAN IDENTIFICATION USING COMPARATIVE DESCRIPTIONS
+<br/>Soft Biometrics; Human Identification using
+<br/>Comparative Descriptions
+</td></tr><tr><td>6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae</td><td>DAISEE: Dataset for Affective States in
+<br/>E-Learning Environments
+<br/>1 Microsoft India R&D Pvt. Ltd.
+<br/>2 Department of Computer Science, IIT Hyderabad
+</td></tr><tr><td>6f5151c7446552fd6a611bf6263f14e729805ec7</td><td>5KHHAO /7 %:0 7
+<br/>)>IJH=?J 9EJDE JDA ?JANJ B=?A ANFHAIIE ?=IIE?=JE KIEC JDA
+<br/>FH>=>EEJEAI JD=J A=?D A B IALAH= ?O ??KHHEC )7 CHKFI EI
+<br/>?=IIIAF=H=>EEJO MAECDJEC
+<br/>/=>H M=LAAJI H FHE?EF= ?FAJI ==OIEI 2+) ! 1 JDEI F=FAH MA
+</td></tr><tr><td>03c56c176ec6377dddb6a96c7b2e95408db65a7a</td><td>A Novel Geometric Framework on Gram Matrix
+<br/>Trajectories for Human Behavior Understanding
+</td></tr><tr><td>03d9ccce3e1b4d42d234dba1856a9e1b28977640</td><td></td></tr><tr><td>0322e69172f54b95ae6a90eb3af91d3daa5e36ea</td><td>Face Classification using Adjusted Histogram in
+<br/>Grayscale
+</td></tr><tr><td>03f7041515d8a6dcb9170763d4f6debd50202c2b</td><td>Clustering Millions of Faces by Identity
+</td></tr><tr><td>038ce930a02d38fb30d15aac654ec95640fe5cb0</td><td>Approximate Structured Output Learning for Constrained Local
+<br/>Models with Application to Real-time Facial Feature Detection and
+<br/>Tracking on Low-power Devices
+</td></tr><tr><td>03c1fc9c3339813ed81ad0de540132f9f695a0f8</td><td>Proceedings of Machine Learning Research 81:1–15, 2018
+<br/>Conference on Fairness, Accountability, and Transparency
+<br/>Gender Shades: Intersectional Accuracy Disparities in
+<br/>Commercial Gender Classification∗
+<br/>MIT Media Lab 75 Amherst St. Cambridge, MA 02139
+<br/>Microsoft Research 641 Avenue of the Americas, New York, NY 10011
+<br/>Editors: Sorelle A. Friedler and Christo Wilson
+</td></tr><tr><td>0339459a5b5439d38acd9c40a0c5fea178ba52fb</td><td>D|C|I&I 2009 Prague
+<br/>Multimodal recognition of emotions in car
+<br/>environments
+</td></tr><tr><td>03a8f53058127798bc2bc0245d21e78354f6c93b</td><td>Max-Margin Additive Classifiers for Detection
+<br/>Sam Hare
+<br/>VGG Reading Group
+<br/>October 30, 2009
+</td></tr><tr><td>03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20</td><td>A Real Time System for Model-based Interpretation of
+<br/>the Dynamics of Facial Expressions
+<br/>Technische Universit¨at M¨unchen
+<br/>Boltzmannstr. 3, 85748 Garching
+<br/>1. Motivation
+<br/>Recent progress in the field of Computer Vision allows
+<br/>intuitive interaction via speech, gesture or facial expressions
+<br/>between humans and technical systems.Model-based tech-
+<br/>niques facilitate accurately interpreting images with faces
+<br/>by exploiting a priori knowledge, such as shape and texture
+<br/>information. This renders them an inevitable component
+<br/>to realize the paradigm of intuitive human-machine interac-
+<br/>tion.
+<br/>Our demonstration shows model-based recognition of
+<br/>facial expressions in real-time via the state-of-the-art
+<br/>Candide-3 face model [1] as visible in Figure 1. This three-
+<br/>dimensional and deformable model is highly appropriate
+<br/>for real-world face interpretation applications. However,
+<br/>its complexity challenges the task of model fitting and we
+<br/>tackle this challenge with an algorithm that has been auto-
+<br/>matically learned from a large set of images. This solution
+<br/>provides both, high accuracy and runtime. Note, that our
+<br/>system is not limited to facial expression estimation. Gaze
+<br/>direction, gender and age are also estimated.
+<br/>2. Face Model Fitting
+<br/>Models reduce the large amount of image data to a
+<br/>small number of model parameters to describe the im-
+<br/>age content, which facilitates and accelerates the subse-
+<br/>quent interpretation task. Cootes et al. [3] introduced mod-
+<br/>elling shapes with Active Contours. Further enhancements
+<br/>emerged the idea of expanding shape models with texture
+<br/>information [2]. Recent research considers modelling faces
+<br/>in 3D space [1, 10].
+<br/>Fitting the face model is the computational challenge of
+<br/>finding the parameters that best describe the face within a
+<br/>given image. This task is often addressed by minimizing
+<br/>an objective function, such as the pixel error between the
+<br/>model’s rendered surface and the underlying image content.
+<br/>This section describes the four main components of model-
+<br/>based techniques, see [9].
+<br/>The face model contains a parameter vector p that repre-
+<br/>sents its configurations. We integrate the complex and de-
+<br/>formable 3D wire frame Candide-3 face model [1]. The
+<br/>model consists of 116 anatomical landmarks and its param-
+<br/>eter vector p = (rx, ry, rz, s, tx, ty, σ, α)T describes the
+<br/>affine transformation (rx, ry, rz, s, tx, ty) and the deforma-
+<br/>tion (σ, α). The 79 deformation parameters indicate the
+<br/>shape of facial components such as the mouth, the eyes, or
+<br/>the eye brows, etc., see Figure 2.
+<br/>The localization algorithm computes an initial estimate of
+<br/>the model parameters that is further refined by the subse-
+<br/>quent fitting algorithm. Our system integrates the approach
+<br/>of [8], which detects the model’s affine transformation in
+<br/>case the image shows a frontal view face.
+<br/>The objective function yields a comparable value that
+<br/>specifies how accurately a parameterized model matches an
+<br/>image. Traditional approaches manually specify the objec-
+<br/>tive function in a laborious and erroneous task. In contrast,
+<br/>we automatically learn the objective function from a large
+<br/>set of training data based on objective information theoretic
+<br/>measures [9]. This approach does not require expert knowl-
+<br/>edge and it is domain-independently applicable. As a re-
+<br/>sult, this approach yields more robust and accurate objective
+<br/>functions, which greatly facilitate the task of the associated
+<br/>fitting algorithms. Accurately estimated model parameters
+<br/>in turn are required to infer correct high-level information,
+<br/>such as facial expression or gaze direction.
+<br/>Figure 1. Interpreting expressions with the Candide-3 face model.
+</td></tr><tr><td>03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b</td><td>Acume: A Novel Visualization Tool for Understanding Facial
+<br/>Expression and Gesture Data
+</td></tr><tr><td>03104f9e0586e43611f648af1132064cadc5cc07</td><td></td></tr><tr><td>03f14159718cb495ca50786f278f8518c0d8c8c9</td><td>2015 IEEE International Conference on Control System, Computing and Engineering, Nov 27 – Nov 29, 2015 Penang, Malaysia
+<br/>2015 IEEE International Conference on Control System,
+<br/>Computing and Engineering (ICCSCE2015)
+<br/>Technical Session 1A – DAY 1 – 27th Nov 2015
+<br/>Time: 3.00 pm – 4.30 pm
+<br/>Venue: Jintan
+<br/>Topic: Signal and Image Processing
+<br/>3.00 pm – 3.15pm
+<br/>3.15 pm – 3.30pm
+<br/>3.30 pm – 3.45pm
+<br/>3.45 pm – 4.00pm
+<br/>4.00 pm – 4.15pm
+<br/>4.15 pm – 4.30pm
+<br/>4.30 pm – 4.45pm
+<br/>1A 01 ID3
+<br/>Can Subspace Based Learning Approach Perform on Makeup Face
+<br/>Recognition?
+<br/>Khor Ean Yee, Pang Ying Han, Ooi Shih Yin and Wee Kuok Kwee
+<br/>1A 02 ID35
+<br/>Performance Evaluation of HOG and Gabor Features for Vision-based
+<br/>Vehicle Detection
+<br/>1A 03 ID23
+<br/>Experimental Method to Pre-Process Fuzzy Bit Planes before Low-Level
+<br/>Feature Extraction in Thermal Images
+<br/>Chan Wai Ti and Sim Kok Swee
+<br/>1A 04 ID84
+<br/>Fractal-based Texture and HSV Color Features for Fabric Image Retrieval
+<br/>Nanik Suciati, Darlis Herumurti and Arya Yudhi Wijaya
+<br/>1A 05 ID168
+<br/>Study of Automatic Melody Extraction Methods for Philippine Indigenous
+<br/>Music
+<br/>Jason Disuanco, Vanessa Tan, Franz de Leon
+<br/>1A 06 ID211
+<br/>Acoustical Comparison between Voiced and Voiceless Arabic Phonemes of
+<br/>Malay
+<br/>Speakers
+<br/>Ali Abd Almisreb, Ahmad Farid Abidin, Nooritawati Md Tahir
+<br/>*shaded cell is the proposed session chair
+<br/>viii
+<br/>©Faculty of Electrical Engineering, Universiti Teknologi MARA
+</td></tr><tr><td>0394040749195937e535af4dda134206aa830258</td><td>Geodesic Entropic Graphs for Dimension and
+<br/>Entropy Estimation in Manifold Learning
+<br/>December 16, 2003
+</td></tr><tr><td>0334cc0374d9ead3dc69db4816d08c917316c6c4</td><td></td></tr><tr><td>0394e684bd0a94fc2ff09d2baef8059c2652ffb0</td><td>Median Robust Extended Local Binary Pattern
+<br/>for Texture Classification
+<br/>Index Terms— Texture descriptors, rotation invariance, local
+<br/>binary pattern (LBP), feature extraction, texture analysis.
+<br/>how the texture recognition process works in humans as
+<br/>well as in the important role it plays in the wide variety of
+<br/>applications of computer vision and image analysis [1], [2].
+<br/>The many applications of texture classification include medical
+<br/>image analysis and understanding, object recognition, biomet-
+<br/>rics, content-based image retrieval, remote sensing, industrial
+<br/>inspection, and document classification.
+<br/>As a classical pattern recognition problem, texture classifi-
+<br/>cation primarily consists of two critical subproblems: feature
+<br/>extraction and classifier designation [1], [2]. It is generally
+<br/>agreed that the extraction of powerful texture features plays a
+<br/>relatively more important role, since if poor features are used
+<br/>even the best classifier will fail to achieve good recognition
+<br/>results. Consequently, most research in texture classification
+<br/>focuses on the feature extraction part and numerous texture
+<br/>feature extraction methods have been developed, with excellent
+<br/>surveys given in [1]–[5]. Most existing methods have not,
+<br/>however, been capable of performing sufficiently well for
+<br/>real-world applications, which have demanding requirements
+<br/>including database size, nonideal environmental conditions,
+<br/>and running in real-time.
+</td></tr><tr><td>03e88bf3c5ddd44ebf0e580d4bd63072566613ad</td><td></td></tr><tr><td>03f4c0fe190e5e451d51310bca61c704b39dcac8</td><td>J Ambient Intell Human Comput
+<br/>DOI 10.1007/s12652-016-0406-z
+<br/>O R I G I N A L R E S E A R C H
+<br/>CHEAVD: a Chinese natural emotional audio–visual database
+<br/>Received: 30 March 2016 / Accepted: 22 August 2016
+<br/>Ó Springer-Verlag Berlin Heidelberg 2016
+</td></tr><tr><td>031055c241b92d66b6984643eb9e05fd605f24e2</td><td>Multi-fold MIL Training for Weakly Supervised Object Localization
+<br/>Inria∗
+</td></tr><tr><td>0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1009-7
+<br/>Large Scale 3D Morphable Models
+<br/>Received: 15 March 2016 / Accepted: 24 March 2017
+<br/>© The Author(s) 2017. This article is an open access publication
+</td></tr><tr><td>034addac4637121e953511301ef3a3226a9e75fd</td><td>Implied Feedback: Learning Nuances of User Behavior in Image Search
+<br/>Virginia Tech
+</td></tr><tr><td>03701e66eda54d5ab1dc36a3a6d165389be0ce79</td><td>179
+<br/>Improved Principal Component Regression for Face
+<br/>Recognition Under Illumination Variations
+</td></tr><tr><td>9b318098f3660b453fbdb7a579778ab5e9118c4c</td><td>3931
+<br/>Joint Patch and Multi-label Learning for Facial
+<br/>Action Unit and Holistic Expression Recognition
+<br/>classifiers without
+</td></tr><tr><td>9b000ccc04a2605f6aab867097ebf7001a52b459</td><td></td></tr><tr><td>9b474d6e81e3b94e0c7881210e249689139b3e04</td><td>VG-RAM Weightless Neural Networks for
+<br/>Face Recognition
+<br/>Departamento de Inform´atica
+<br/>Universidade Federal do Esp´ırito Santo
+<br/>Av. Fernando Ferrari, 514, 29075-910 - Vit´oria-ES
+<br/>Brazil
+<br/>1. Introduction
+<br/>Computerized human face recognition has many practical applications, such as access control,
+<br/>security monitoring, and surveillance systems, and has been one of the most challenging and
+<br/>active research areas in computer vision for many decades (Zhao et al.; 2003). Even though
+<br/>current machine recognition systems have reached a certain level of maturity, the recognition
+<br/>of faces with different facial expressions, occlusions, and changes in illumination and/or pose
+<br/>is still a hard problem.
+<br/>A general statement of the problem of machine recognition of faces can be formulated as fol-
+<br/>lows: given an image of a scene, (i) identify or (ii) verify one or more persons in the scene
+<br/>using a database of faces. In identification problems, given a face as input, the system reports
+<br/>back the identity of an individual based on a database of known individuals; whereas in veri-
+<br/>fication problems, the system confirms or rejects the claimed identity of the input face. In both
+<br/>cases, the solution typically involves segmentation of faces from scenes (face detection), fea-
+<br/>ture extraction from the face regions, recognition, or verification. In this chapter, we examine
+<br/>the recognition of frontal face images required in the context of identification problems.
+<br/>Many approaches have been proposed to tackle the problem of face recognition. One can
+<br/>roughly divide these into (i) holistic approaches, (ii) feature-based approaches, and (iii) hybrid
+<br/>approaches (Zhao et al.; 2003). Holistic approaches use the whole face region as the raw input
+<br/>to a recognition system (a classifier). In feature-based approaches, local features, such as the
+<br/>eyes, nose, and mouth, are first extracted and their locations and local statistics (geometric
+<br/>and/or appearance based) are fed into a classifier. Hybrid approaches use both local features
+<br/>and the whole face region to recognize a face.
+<br/>Among
+<br/>fisher-
+<br/>faces (Belhumeur et al.; 1997; Etemad and Chellappa; 1997) have proved to be effective
+<br/>(Turk and Pentland;
+<br/>eigenfaces
+<br/>holistic
+<br/>approaches,
+<br/>1991)
+<br/>and
+</td></tr><tr><td>9bc01fa9400c231e41e6a72ec509d76ca797207c</td><td></td></tr><tr><td>9bcfadd22b2c84a717c56a2725971b6d49d3a804</td><td>How to Detect a Loss of Attention in a Tutoring System
+<br/>using Facial Expressions and Gaze Direction
+</td></tr><tr><td>9bac481dc4171aa2d847feac546c9f7299cc5aa0</td><td>Matrix Product State for Higher-Order Tensor
+<br/>Compression and Classification
+</td></tr><tr><td>9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7</td><td>Faical Expression Recognition by Combining
+<br/>Texture and Geometrical Features
+</td></tr><tr><td>9ea73660fccc4da51c7bc6eb6eedabcce7b5cead</td><td>Talking Head Detection by Likelihood-Ratio Test†
+<br/>MIT Lincoln Laboratory,
+<br/>Lexington MA 02420, USA
+</td></tr><tr><td>9e9052256442f4e254663ea55c87303c85310df9</td><td>International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+<br/>Volume 4 Issue 10, October 2015
+<br/>Review On Attribute-assisted Reranking for
+<br/>Image Search
+<br/>
+</td></tr><tr><td>9e0285debd4b0ba7769b389181bd3e0fd7a02af6</td><td>From face images and attributes to attributes
+<br/>Computer Vision Laboratory, ETH Zurich, Switzerland
+</td></tr><tr><td>9e5c2d85a1caed701b68ddf6f239f3ff941bb707</td><td></td></tr><tr><td>04bb3fa0824d255b01e9db4946ead9f856cc0b59</td><td></td></tr><tr><td>040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large-scale Bisample Learning on ID vs. Spot Face Recognition
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>04470861408d14cc860f24e73d93b3bb476492d0</td><td></td></tr><tr><td>0447bdb71490c24dd9c865e187824dee5813a676</td><td>Manifold Estimation in View-based Feature
+<br/>Space for Face Synthesis Across Pose
+<br/>Paper 27
+</td></tr><tr><td>044ba70e6744e80c6a09fa63ed6822ae241386f2</td><td>TO APPEAR IN AUTONOMOUS ROBOTS, SPECIAL ISSUE IN LEARNING FOR HUMAN-ROBOT COLLABORATION
+<br/>Early Prediction for Physical Human Robot
+<br/>Collaboration in the Operating Room
+</td></tr><tr><td>04dcdb7cb0d3c462bdefdd05508edfcff5a6d315</td><td>Assisting the training of deep neural networks
+<br/>with applications to computer vision
+<br/>tesi doctoral està subjecta a
+<br/>la
+<br/>Aquesta
+<br/>CompartirIgual 4.0. Espanya de Creative Commons.
+<br/>Esta tesis doctoral está sujeta a la licencia Reconocimiento - NoComercial – CompartirIgual
+<br/>4.0. España de Creative Commons.
+<br/>This doctoral thesis is licensed under the Creative Commons Attribution-NonCommercial-
+<br/>ShareAlike 4.0. Spain License.
+<br/>llicència Reconeixement- NoComercial –
+</td></tr><tr><td>044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa</td><td>Dynamic Texture Recognition Using Local Binary
+<br/>Patterns with an Application to Facial Expressions
+</td></tr><tr><td>04250e037dce3a438d8f49a4400566457190f4e2</td><td></td></tr><tr><td>0431e8a01bae556c0d8b2b431e334f7395dd803a</td><td>Learning Localized Perceptual Similarity Metrics for Interactive Categorization
+<br/>Google Inc.
+<br/>google.com
+</td></tr><tr><td>04b4c779b43b830220bf938223f685d1057368e9</td><td>Video retrieval based on deep convolutional
+<br/>neural network
+<br/>Yajiao Dong
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
+<br/>Jianguo Li
+<br/>School of Information and Electronics,
+<br/>Beijing Institution of Technology, Beijing, China
+</td></tr><tr><td>04616814f1aabe3799f8ab67101fbaf9fd115ae4</td><td><b>UNIVERSIT´EDECAENBASSENORMANDIEU.F.R.deSciences´ECOLEDOCTORALESIMEMTH`ESEPr´esent´eeparM.GauravSHARMAsoutenuele17D´ecembre2012envuedel’obtentionduDOCTORATdel’UNIVERSIT´EdeCAENSp´ecialit´e:InformatiqueetapplicationsArrˆet´edu07aoˆut2006Titre:DescriptionS´emantiquedesHumainsPr´esentsdansdesImagesVid´eo(SemanticDescriptionofHumansinImages)TheworkpresentedinthisthesiswascarriedoutatGREYC-UniversityofCaenandLEAR–INRIAGrenobleJuryM.PatrickPEREZDirecteurdeRechercheINRIA/Technicolor,RennesRapporteurM.FlorentPERRONNINPrincipalScientistXeroxRCE,GrenobleRapporteurM.JeanPONCEProfesseurdesUniversit´esENS,ParisExaminateurMme.CordeliaSCHMIDDirectricedeRechercheINRIA,GrenobleDirectricedeth`eseM.Fr´ed´ericJURIEProfesseurdesUniversit´esUniversit´edeCaenDirecteurdeth`ese</b></td></tr><tr><td>6a3a07deadcaaab42a0689fbe5879b5dfc3ede52</td><td>Learning to Estimate Pose by Watching Videos
+<br/>Department of Computer Science and Engineering
+<br/>IIT Kanpur
+</td></tr><tr><td>6ad107c08ac018bfc6ab31ec92c8a4b234f67d49</td><td></td></tr><tr><td>6a184f111d26787703f05ce1507eef5705fdda83</td><td></td></tr><tr><td>6a16b91b2db0a3164f62bfd956530a4206b23fea</td><td>A Method for Real-Time Eye Blink Detection and Its Application
+<br/>Mahidol Wittayanusorn School
+<br/>Puttamonton, Nakornpatom 73170, Thailand
+</td></tr><tr><td>6a806978ca5cd593d0ccd8b3711b6ef2a163d810</td><td>Facial feature tracking for Emotional Dynamic
+<br/>Analysis
+<br/>1ISIR, CNRS UMR 7222
+<br/>Univ. Pierre et Marie Curie, Paris
+<br/>2LAMIA, EA 4540
+<br/>Univ. of Fr. West Indies & Guyana
+</td></tr><tr><td>6a8a3c604591e7dd4346611c14dbef0c8ce9ba54</td><td>ENTERFACE’10, JULY 12TH - AUGUST 6TH, AMSTERDAM, THE NETHERLANDS.
+<br/>58
+<br/>An Affect-Responsive Interactive Photo Frame
+</td></tr><tr><td>6aa43f673cc42ed2fa351cbc188408b724cb8d50</td><td></td></tr><tr><td>6aefe7460e1540438ffa63f7757c4750c844764d</td><td>Non-rigid Segmentation using Sparse Low Dimensional Manifolds and
+<br/>Deep Belief Networks ∗
+<br/>Instituto de Sistemas e Rob´otica
+<br/>Instituto Superior T´ecnico, Portugal
+</td></tr><tr><td>6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2</td><td></td></tr><tr><td>322c063e97cd26f75191ae908f09a41c534eba90</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Improving Image Classification using Semantic Attributes
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>325b048ecd5b4d14dce32f92bff093cd744aa7f8</td><td>CVPR
+<br/>#2670
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2008 Submission #2670. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#2670
+<br/>Multi-Image Graph Cut Clothing Segmentation for Recognizing People
+<br/>Anonymous CVPR submission
+<br/>Paper ID 2670
+</td></tr><tr><td>321bd4d5d80abb1bae675a48583f872af3919172</td><td>Wang et al. EURASIP Journal on Image and Video Processing (2016) 2016:44
+<br/>DOI 10.1186/s13640-016-0152-3
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R EV I E W
+<br/>Entropy-weighted feature-fusion method
+<br/>for head-pose estimation
+<br/>Open Access
+</td></tr><tr><td>32b8c9fd4e3f44c371960eb0074b42515f318ee7</td><td></td></tr><tr><td>32575ffa69d85bbc6aef5b21d73e809b37bf376d</td><td>-)5741/ *1-641+ 5)2- 37)16; 1 6-45 . *1-641+ 1.4)61
+<br/>7ELAHIEJO B JJ=M=
+<br/>)*564)+6
+<br/>IKHA L=HE=JEI E >EAJHE? I=FA GK=EJO 9A >ACE MEJD
+<br/>IKHAAJI 9A JDA IDM JD=J JDA >EAJHE? EBH=JE BH
+<br/>JA EI JDA A= D(p(cid:107)q) BH = FAHII E JDA FFK=JE 1
+<br/>BH I= ALAI B >KH MEJD = =IOFJJE? >AD=LEH =J =HCAH
+<br/>>KH
+<br/> 164,7+61
+<br/>*EAJHE? I=FA GK=EJO EI = A=IKHA B JDA KIABKAII B =
+<br/>GK=EJO
+<br/>F=FAH MA FHFIA = AM =FFH=?D J A=IKHA JDEI GK=JEJO
+<br/>JDA EJKEJELA >IAHL=JE JD=J = DECD GK=EJO >EAJHE? E=CA
+<br/>>EAJHE? EBH=JE
+<br/>EIIKAI E >EAJHE? JA?DCO .H AN=FA A B JDA IJ
+<br/>? >EAJHE? GKAIJEI EI JD=J B KEGKAAII AC J MD=J
+<br/>ANJAJ =HA CAHFHEJI KEGKA .H JDA FEJ B LEAM B
+<br/>=>A EBH=JE EI =L=E=>A BH = CELA JA?DCO IK?D
+<br/>  $  "
+<br/>1 JDEI F=FAH MA A=>H=JA = =FFH=?D J
+<br/>BMI
+<br/>AJI
+<br/> >ABHA = >EAJHE? A=IKHAAJ t0 =J MDE?D JEA MA O
+<br/>M = FAHI p EI F=HJ B = FFK=JE q MDE?D =O >A JDA
+</td></tr><tr><td>324b9369a1457213ec7a5a12fe77c0ee9aef1ad4</td><td>Dynamic Facial Analysis: From Bayesian Filtering to Recurrent Neural Network
+<br/>NVIDIA
+</td></tr><tr><td>32df63d395b5462a8a4a3c3574ae7916b0cd4d1d</td><td>978-1-4577-0539-7/11/$26.00 ©2011 IEEE
+<br/>1489
+<br/>ICASSP 2011
+</td></tr><tr><td>35308a3fd49d4f33bdbd35fefee39e39fe6b30b7</td><td></td></tr><tr><td>352d61eb66b053ae5689bd194840fd5d33f0e9c0</td><td>Analysis Dictionary Learning based
+<br/>Classification: Structure for Robustness
+</td></tr><tr><td>3538d2b5f7ab393387ce138611ffa325b6400774</td><td>A DSP-BASED APPROACH FOR THE IMPLEMENTATION OF FACE RECOGNITION
+<br/>ALGORITHMS
+<br/>A. U. Batur
+<br/>B. E. Flinchbaugh
+<br/>M. H. Hayes IIl
+<br/>Center for Signal and Image Proc.
+<br/>Georgia Inst. Of Technology
+<br/>Atlanta, GA
+<br/>Imaging and Audio Lab.
+<br/>Texas Instruments
+<br/>Dallas, TX
+<br/>Center for Signal and Image Proc.
+<br/>Georgia Inst. Of Technology
+<br/>Atlanta, CA
+</td></tr><tr><td>3504907a2e3c81d78e9dfe71c93ac145b1318f9c</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Unconstrained Still/Video-Based Face Verification with Deep
+<br/>Convolutional Neural Networks
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>35b1c1f2851e9ac4381ef41b4d980f398f1aad68</td><td>Geometry Guided Convolutional Neural Networks for
+<br/>Self-Supervised Video Representation Learning
+</td></tr><tr><td>351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd</td><td>ActionSnapping: Motion-based Video
+<br/>Synchronization
+<br/>Disney Research
+</td></tr><tr><td>35e4b6c20756cd6388a3c0012b58acee14ffa604</td><td>Gender Classification in Large Databases
+<br/>E. Ram´on-Balmaseda, J. Lorenzo-Navarro, and M. Castrill´on-Santana (cid:63)
+<br/>Universidad de Las Palmas de Gran Canaria
+<br/>SIANI
+<br/>Spain
+</td></tr><tr><td>357963a46dfc150670061dbc23da6ba7d6da786e</td><td></td></tr><tr><td>35f1bcff4552632419742bbb6e1927ef5e998eb4</td><td></td></tr><tr><td>35c973dba6e1225196566200cfafa150dd231fa8</td><td></td></tr><tr><td>35f084ddee49072fdb6e0e2e6344ce50c02457ef</td><td>A Bilinear Illumination Model
+<br/>for Robust Face Recognition
+<br/>The Harvard community has made this
+<br/>article openly available. Please share how
+<br/>this access benefits you. Your story matters
+<br/>Citation
+<br/>Machiraju. 2005. A bilinear illumination model for robust face
+<br/>recognition. Proceedings of the Tenth IEEE International Conference
+<br/>on Computer Vision: October 17-21, 2005, Beijing, China. 1177-1184.
+<br/>Los Almamitos, C.A.: IEEE Computer Society.
+<br/>Published Version
+<br/>doi:10.1109/ICCV.2005.5
+<br/>Citable link
+<br/>http://nrs.harvard.edu/urn-3:HUL.InstRepos:4238979
+<br/>Terms of Use
+<br/><b></b><br/>repository, and is made available under the terms and conditions
+<br/>applicable to Other Posted Material, as set forth at http://
+<br/>nrs.harvard.edu/urn-3:HUL.InstRepos:dash.current.terms-of-
+<br/>use#LAA
+</td></tr><tr><td>353a89c277cca3e3e4e8c6a199ae3442cdad59b5</td><td></td></tr><tr><td>352110778d2cc2e7110f0bf773398812fd905eb1</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, JUNE 2014
+<br/>Matrix Completion for Weakly-supervised
+<br/>Multi-label Image Classification
+</td></tr><tr><td>6964af90cf8ac336a2a55800d9c510eccc7ba8e1</td><td>Temporal Relational Reasoning in Videos
+<br/>MIT CSAIL
+</td></tr><tr><td>697b0b9630213ca08a1ae1d459fabc13325bdcbb</td><td></td></tr><tr><td>69d29012d17cdf0a2e59546ccbbe46fa49afcd68</td><td>Subspace clustering of dimensionality-reduced data
+<br/>ETH Zurich, Switzerland
+</td></tr><tr><td>69de532d93ad8099f4d4902c4cad28db958adfea</td><td></td></tr><tr><td>69526cdf6abbfc4bcd39616acde544568326d856</td><td>636
+<br/>[17] B. Moghaddam, T. Jebara, and A. Pentland, “Bayesian face recogni-
+<br/>tion,” Pattern Recognit., vol. 33, no. 11, pp. 1771–1782, Nov. 2000.
+<br/>[18] A. Nefian, “A hidden Markov model-based approach for face detection
+<br/>and recognition,” Ph.D. dissertation, Dept. Elect. Comput. Eng. Elect.
+<br/>Eng., Georgia Inst. Technol., Atlanta, 1999.
+<br/>[19] P. J. Phillips et al., “Overview of the face recognition grand challenge,”
+<br/>presented at the IEEE CVPR, San Diego, CA, Jun. 2005.
+<br/>[20] H. T. Tanaka, M. Ikeda, and H. Chiaki, “Curvature-based face surface
+<br/>recognition using spherical correlation-principal direction for curved
+<br/>object recognition,” in Proc. Int. Conf. Automatic Face and Gesture
+<br/>Recognition, 1998, pp. 372–377.
+<br/>[21] M. Turk and A. Pentland, “Eigenfaces for recognition,” J. Cognit. Sci.,
+<br/>pp. 71–86, 1991.
+<br/>[22] V. N. Vapnik, Statistical Learning Theory. New York: Wiley, 1998.
+<br/>[23] W. Zhao, R. Chellappa, A. Rosenfeld, and P. Phillips, “Face recogni-
+<br/>tion: A literature survey,” ACM Comput. Surveys, vol. 35, no. 44, pp.
+<br/>399–458, 2003.
+<br/>[24] W. Zhao, R. Chellappa, and P. J. Phillips, “Subspace linear discrimi-
+<br/>nant analysis for face recognition,” UMD TR4009, 1999.
+<br/>Face Verification Using Template Matching
+</td></tr><tr><td>690d669115ad6fabd53e0562de95e35f1078dfbb</td><td>Progressive versus Random Projections for Compressive Capture of Images,
+<br/>Lightfields and Higher Dimensional Visual Signals
+<br/>MIT Media Lab
+<br/>75 Amherst St, Cambridge, MA
+<br/>MERL
+<br/>201 Broadway, Cambridge MA
+<br/>MIT Media Lab
+<br/>75 Amherst St, Cambridge, MA
+</td></tr><tr><td>69a9da55bd20ce4b83e1680fbc6be2c976067631</td><td></td></tr><tr><td>6974449ce544dc208b8cc88b606b03d95c8fd368</td><td></td></tr><tr><td>3cfbe1f100619a932ba7e2f068cd4c41505c9f58</td><td>A Realistic Simulation Tool for Testing Face Recognition
+<br/>Systems under Real-World Conditions∗
+<br/>M. Correa, J. Ruiz-del-Solar, S. Parra-Tsunekawa, R. Verschae
+<br/>Department of Electrical Engineering, Universidad de Chile
+<br/>Advanced Mining Technology Center, Universidad de Chile
+</td></tr><tr><td>3c03d95084ccbe7bf44b6d54151625c68f6e74d0</td><td></td></tr><tr><td>3cd7b15f5647e650db66fbe2ce1852e00c05b2e4</td><td></td></tr><tr><td>3ce2ecf3d6ace8d80303daf67345be6ec33b3a93</td><td></td></tr><tr><td>3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8</td><td>Measuring Gaze Orientation for Human-Robot
+<br/>Interaction
+<br/>∗ CNRS; LAAS; 7 avenue du Colonel Roche, 31077 Toulouse Cedex, France
+<br/>† Universit´e de Toulouse; UPS; LAAS-CNRS : F-31077 Toulouse, France
+<br/>Introduction
+<br/>In the context of Human-Robot interaction estimating gaze orientation brings
+<br/>useful information about human focus of attention. This is a contextual infor-
+<br/>mation : when you point something you usually look at it. Estimating gaze
+<br/>orientation requires head pose estimation. There are several techniques to esti-
+<br/>mate head pose from images, they are mainly based on training [3, 4] or on local
+<br/>face features tracking [6]. The approach described here is based on local face
+<br/>features tracking in image space using online learning, it is a mixed approach
+<br/>since we track face features using some learning at feature level. It uses SURF
+<br/>features [2] to guide detection and tracking. Such key features can be matched
+<br/>between images, used for object detection or object tracking [10]. Several ap-
+<br/>proaches work on fixed size images like training techniques which mainly work
+<br/>on low resolution images because of computation costs whereas approaches based
+<br/>on local features tracking work on high resolution images. Tracking face features
+<br/>such as eyes, nose and mouth is a common problem in many applications such as
+<br/>detection of facial expression or video conferencing [8] but most of those appli-
+<br/>cations focus on front face images [9]. We developed an algorithm based on face
+<br/>features tracking using a parametric model. First we need face detection, then
+<br/>we detect face features in following order: eyes, mouth, nose. In order to achieve
+<br/>full profile detection we use sets of SURF to learn what eyes, mouth and nose
+<br/>look like once tracking is initialized. Once those sets of SURF are known they
+<br/>are used to detect and track face features. SURF have a descriptor which is often
+<br/>used to identify a key point and here we add some global geometry information
+<br/>by using the relative position between key points. Then we use a particle filter to
+<br/>track face features using those SURF based detectors, we compute the head pose
+<br/>angles from features position and pass the results through a median filter. This
+<br/>paper is organized as follows. Section 2 describes our modeling of visual features,
+<br/>section 3 presents our tracking implementation. Section 4 presents results we get
+<br/>with our implementation and future works in section 5.
+<br/>2 Visual features
+<br/>We use some basic properties of facial features to initialize our algorithm : eyes
+<br/>are dark and circular, mouth is an horizontal dark line with a specific color,...
+</td></tr><tr><td>3cb64217ca2127445270000141cfa2959c84d9e7</td><td></td></tr><tr><td>3cd5da596060819e2b156e8b3a28331ef633036b</td><td></td></tr><tr><td>3c56acaa819f4e2263638b67cea1ec37a226691d</td><td>Body Joint guided 3D Deep Convolutional
+<br/>Descriptors for Action Recognition
+</td></tr><tr><td>3c8da376576938160cbed956ece838682fa50e9f</td><td>Chapter 4
+<br/>Aiding Face Recognition with
+<br/>Social Context Association Rule
+<br/>based Re-Ranking
+<br/>Humans are very efficient at recognizing familiar face images even in challenging condi-
+<br/>tions. One reason for such capabilities is the ability to understand social context between
+<br/>individuals. Sometimes the identity of the person in a photo can be inferred based on the
+<br/>identity of other persons in the same photo, when some social context between them is
+<br/>known. This chapter presents an algorithm to utilize the co-occurrence of individuals as
+<br/>the social context to improve face recognition. Association rule mining is utilized to infer
+<br/>multi-level social context among subjects from a large repository of social transactions.
+<br/>The results are demonstrated on the G-album and on the SN-collection pertaining to 4675
+<br/>identities prepared by the authors from a social networking website. The results show that
+<br/>association rules extracted from social context can be used to augment face recognition and
+<br/>improve the identification performance.
+<br/>4.1
+<br/>Introduction
+<br/>Face recognition capabilities of humans have inspired several researchers to understand
+<br/>the science behind it and use it in developing automated algorithms. Recently, it is also
+<br/>argued that encoding social context among individuals can be leveraged for improved
+<br/>automatic face recognition [175]. As shown in Figure 4.1, often times a person’s identity
+<br/>can be inferred based on the identity of other persons in the same photo, when some social
+<br/>context between them is known. A subject’s face in consumer photos generally co-occur
+<br/>along with their socially relevant people. With the advent of social networking services,
+<br/>the social context between individuals is readily available. Face recognition performance
+<br/>105
+</td></tr><tr><td>56e885b9094391f7d55023a71a09822b38b26447</td><td>FREQUENCY DECODED LOCAL BINARY PATTERN
+<br/>Face Retrieval using Frequency Decoded Local
+<br/>Descriptor
+</td></tr><tr><td>56a653fea5c2a7e45246613049fb16b1d204fc96</td><td>3287
+<br/>Quaternion Collaborative and Sparse Representation
+<br/>With Application to Color Face Recognition
+<br/>representation-based
+</td></tr><tr><td>5666ed763698295e41564efda627767ee55cc943</td><td>Manuscript
+<br/>Click here to download Manuscript: template.tex
+<br/>Click here to view linked References
+<br/>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Relatively-Paired Space Analysis: Learning a Latent Common
+<br/>Space from Relatively-Paired Observations
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>5615d6045301ecbc5be35e46cab711f676aadf3a</td><td>Discriminatively Learned Hierarchical Rank Pooling Networks
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>566038a3c2867894a08125efe41ef0a40824a090</td><td>978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+<br/>1945
+<br/>ICASSP 2009
+</td></tr><tr><td>56dca23481de9119aa21f9044efd7db09f618704</td><td>Riemannian Dictionary Learning and Sparse
+<br/>Coding for Positive Definite Matrices
+</td></tr><tr><td>516a27d5dd06622f872f5ef334313350745eadc3</td><td>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>1
+<br/>Fine-Grained Facial Expression Analysis Us-
+<br/>ing Dimensional Emotion Model
+<br/>
+</td></tr><tr><td>51c3050fb509ca685de3d9ac2e965f0de1fb21cc</td><td>Fantope Regularization in Metric Learning
+<br/>Marc T. Law
+<br/>Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
+</td></tr><tr><td>51c7c5dfda47647aef2797ac3103cf0e108fdfb4</td><td>CS 395T: Celebrity Look-Alikes ∗
+</td></tr><tr><td>519f4eb5fe15a25a46f1a49e2632b12a3b18c94d</td><td>Non-Lambertian Reflectance Modeling and
+<br/>Shape Recovery of Faces using Tensor Splines
+</td></tr><tr><td>51528cdce7a92835657c0a616c0806594de7513b</td><td></td></tr><tr><td>5161e38e4ea716dcfb554ccb88901b3d97778f64</td><td>SSPP-DAN: DEEP DOMAIN ADAPTATION NETWORK FOR
+<br/>FACE RECOGNITION WITH SINGLE SAMPLE PER PERSON
+<br/>School of Computing, KAIST, Republic of Korea
+</td></tr><tr><td>51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>A Fast and Accurate System for Face Detection,
+<br/>Identification, and Verification
+</td></tr><tr><td>5157dde17a69f12c51186ffc20a0a6c6847f1a29</td><td>Evolutionary Cost-sensitive Extreme Learning
+<br/>Machine
+<br/>1
+</td></tr><tr><td>51dc127f29d1bb076d97f515dca4cc42dda3d25b</td><td></td></tr><tr><td>3daafe6389d877fe15d8823cdf5ac15fd919676f</td><td>Human Action Localization
+<br/>with Sparse Spatial Supervision
+</td></tr><tr><td>3db75962857a602cae65f60f202d311eb4627b41</td><td></td></tr><tr><td>3d36f941d8ec613bb25e80fb8f4c160c1a2848df</td><td>Out-of-sample generalizations for supervised
+<br/>manifold learning for classification
+</td></tr><tr><td>3d5a1be4c1595b4805a35414dfb55716e3bf80d8</td><td>Hidden Two-Stream Convolutional Networks for
+<br/>Action Recognition
+</td></tr><tr><td>3d62b2f9cef997fc37099305dabff356d39ed477</td><td>Joint Face Alignment and 3D Face
+<br/>Reconstruction with Application to Face
+<br/>Recognition
+</td></tr><tr><td>3dc522a6576c3475e4a166377cbbf4ba389c041f</td><td></td></tr><tr><td>3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd</td><td>Ensemble of Hankel Matrices for
+<br/>Face Emotion Recognition
+<br/>DICGIM, Universit´a degli Studi di Palermo,
+<br/>V.le delle Scienze, Ed. 6, 90128 Palermo, Italy,
+<br/>DRAFT
+<br/>To appear in ICIAP 2015
+</td></tr><tr><td>3dda181be266950ba1280b61eb63ac11777029f9</td><td></td></tr><tr><td>3d6ee995bc2f3e0f217c053368df659a5d14d5b5</td><td></td></tr><tr><td>3dd906bc0947e56d2b7bf9530b11351bbdff2358</td><td></td></tr><tr><td>3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a</td><td>1892
+<br/>Random Multispace Quantization as
+<br/>an Analytic Mechanism for BioHashing
+<br/>of Biometric and Random Identity Inputs
+</td></tr><tr><td>3d6943f1573f992d6897489b73ec46df983d776c</td><td></td></tr><tr><td>3d94f81cf4c3a7307e1a976dc6cb7bf38068a381</td><td>3846
+<br/>Data-Dependent Label Distribution Learning
+<br/>for Age Estimation
+</td></tr><tr><td>5859774103306113707db02fe2dd3ac9f91f1b9e</td><td></td></tr><tr><td>5892f8367639e9c1e3cf27fdf6c09bb3247651ed</td><td>Estimating Missing Features to Improve Multimedia Information Retrieval
+</td></tr><tr><td>5850aab97e1709b45ac26bb7d205e2accc798a87</td><td></td></tr><tr><td>587f81ae87b42c18c565694c694439c65557d6d5</td><td>DeepFace: Face Generation using Deep Learning
+</td></tr><tr><td>580054294ca761500ada71f7d5a78acb0e622f19</td><td>1331
+<br/>A Subspace Model-Based Approach to Face
+<br/>Relighting Under Unknown Lighting and Poses
+</td></tr><tr><td>58081cb20d397ce80f638d38ed80b3384af76869</td><td>Embedded Real-Time Fall Detection Using Deep
+<br/>Learning For Elderly Care
+<br/>Samsung Research, Samsung Electronics
+</td></tr><tr><td>58fa85ed57e661df93ca4cdb27d210afe5d2cdcd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
+<br/>978-1-5090-4847-2/16/$31.00 ©2016 IEEE
+<br/>4118
+</td></tr><tr><td>58bf72750a8f5100e0c01e55fd1b959b31e7dbce</td><td>PyramidBox: A Context-assisted Single Shot
+<br/>Face Detector.
+<br/>Baidu Inc.
+</td></tr><tr><td>58542eeef9317ffab9b155579256d11efb4610f2</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Face Recognition Revisited on Pose, Alignment,
+<br/>Color, Illumination and Expression-PyTen
+<br/>Computer Science, BIT Noida, India
+</td></tr><tr><td>58823377757e7dc92f3b70a973be697651089756</td><td>Technical Report
+<br/>UCAM-CL-TR-861
+<br/>ISSN 1476-2986
+<br/>Number 861
+<br/>Computer Laboratory
+<br/>Automatic facial expression analysis
+<br/>October 2014
+<br/>15 JJ Thomson Avenue
+<br/>Cambridge CB3 0FD
+<br/>United Kingdom
+<br/>phone +44 1223 763500
+<br/>http://www.cl.cam.ac.uk/
+</td></tr><tr><td>58bb77dff5f6ee0fb5ab7f5079a5e788276184cc</td><td>Facial Expression Recognition with PCA and LBP
+<br/>Features Extracting from Active Facial Patches
+<br/>
+</td></tr><tr><td>58cb1414095f5eb6a8c6843326a6653403a0ee17</td><td></td></tr><tr><td>677585ccf8619ec2330b7f2d2b589a37146ffad7</td><td>A flexible model for training action localization
+<br/>with varying levels of supervision
+</td></tr><tr><td>677477e6d2ba5b99633aee3d60e77026fb0b9306</td><td></td></tr><tr><td>6789bddbabf234f31df992a3356b36a47451efc7</td><td>Unsupervised Generation of Free-Form and
+<br/>Parameterized Avatars
+</td></tr><tr><td>675b2caee111cb6aa7404b4d6aa371314bf0e647</td><td>AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions
+<br/>Carl Vondrick∗
+</td></tr><tr><td>679b72d23a9cfca8a7fe14f1d488363f2139265f</td><td></td></tr><tr><td>67484723e0c2cbeb936b2e863710385bdc7d5368</td><td>Anchor Cascade for Efficient Face Detection
+</td></tr><tr><td>6742c0a26315d7354ab6b1fa62a5fffaea06da14</td><td>BAS AND SMITH: WHAT DOES 2D GEOMETRIC INFORMATION REALLY TELL US ABOUT 3D FACE SHAPE?
+<br/>What does 2D geometric information
+<br/>really tell us about 3D face shape?
+</td></tr><tr><td>67a50752358d5d287c2b55e7a45cc39be47bf7d0</td><td></td></tr><tr><td>67ba3524e135c1375c74fe53ebb03684754aae56</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1767
+<br/>ICASSP 2017
+</td></tr><tr><td>6769cfbd85329e4815bb1332b118b01119975a95</td><td>Tied factor analysis for face recognition across
+<br/>large pose changes
+</td></tr><tr><td>0be43cf4299ce2067a0435798ef4ca2fbd255901</td><td>Title
+<br/>A temporal latent topic model for facial expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>The 10th Asian Conference on Computer Vision (ACCV 2010),
+<br/>Queenstown, New Zealand, 8-12 November 2010. In Lecture
+<br/>Notes in Computer Science, 2010, v. 6495, p. 51-63
+<br/>Issued Date
+<br/>2011
+<br/>URL
+<br/>http://hdl.handle.net/10722/142604
+<br/>Rights
+<br/>Creative Commons: Attribution 3.0 Hong Kong License
+</td></tr><tr><td>0b2277a0609565c30a8ee3e7e193ce7f79ab48b0</td><td>944
+<br/>Cost-Sensitive Semi-Supervised Discriminant
+<br/>Analysis for Face Recognition
+</td></tr><tr><td>0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b</td><td></td></tr><tr><td>0b605b40d4fef23baa5d21ead11f522d7af1df06</td><td>Label-Embedding for Attribute-Based Classification
+<br/>a Computer Vision Group∗, XRCE, France
+<br/>b LEAR†, INRIA, France
+</td></tr><tr><td>0b0eb562d7341231c3f82a65cf51943194add0bb</td><td>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>Facial Image Analysis Based on Local Binary
+<br/>Patterns: A Survey
+<br/>
+</td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td>Who’s in the Picture?
+<br/>Berkeley, CA 94720
+<br/>Computer Science Division
+<br/>U.C. Berkeley
+</td></tr><tr><td>0b5bd3ce90bf732801642b9f55a781e7de7fdde0</td><td></td></tr><tr><td>0b0958493e43ca9c131315bcfb9a171d52ecbb8a</td><td>A Unified Neural Based Model for Structured Output Problems
+<br/>Soufiane Belharbi∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien Adam∗2
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>April 13, 2015
+</td></tr><tr><td>0b20f75dbb0823766d8c7b04030670ef7147ccdd</td><td>1
+<br/>Feature selection using nearest attributes
+</td></tr><tr><td>0b5a82f8c0ee3640503ba24ef73e672d93aeebbf</td><td>On Learning 3D Face Morphable Model
+<br/>from In-the-wild Images
+</td></tr><tr><td>0b174d4a67805b8796bfe86cd69a967d357ba9b6</td><td> Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
+<br/> Vol. 3(4), 56-62, April (2014)
+<br/>Res.J.Recent Sci.
+</td></tr><tr><td>0ba449e312894bca0d16348f3aef41ca01872383</td><td></td></tr><tr><td>0b572a2b7052b15c8599dbb17d59ff4f02838ff7</td><td>Automatic Subspace Learning via Principal
+<br/>Coefficients Embedding
+</td></tr><tr><td>0ba99a709cd34654ac296418a4f41a9543928149</td><td></td></tr><tr><td>0b8c92463f8f5087696681fb62dad003c308ebe2</td><td>On Matching Sketches with Digital Face Images
+<br/>in local
+</td></tr><tr><td>0bc0f9178999e5c2f23a45325fa50300961e0226</td><td>Recognizing facial expressions from videos using Deep
+<br/>Belief Networks
+<br/>CS 229 Project
+</td></tr><tr><td>0b3f354e6796ef7416bf6dde9e0779b2fcfabed2</td><td></td></tr><tr><td>93675f86d03256f9a010033d3c4c842a732bf661</td><td>Universit´edesSciencesetTechnologiesdeLilleEcoleDoctoraleSciencesPourl’ing´enieurUniversit´eLilleNord-de-FranceTHESEPr´esent´ee`al’Universit´edesSciencesetTechnologiesdeLillePourobtenirletitredeDOCTEURDEL’UNIVERSIT´ESp´ecialit´e:MicroetNanotechnologieParTaoXULocalizedgrowthandcharacterizationofsiliconnanowiresSoutenuele25Septembre2009Compositiondujury:Pr´esident:TuamiLASRIRapporteurs:ThierryBARONHenriMARIETTEExaminateurs:EricBAKKERSXavierWALLARTDirecteurdeth`ese:BrunoGRANDIDIER </td></tr><tr><td>936c7406de1dfdd22493785fc5d1e5614c6c2882</td><td>2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 762–772,
+<br/>Montr´eal, Canada, June 3-8, 2012. c(cid:13)2012 Association for Computational Linguistics
+<br/>762
+</td></tr><tr><td>93721023dd6423ab06ff7a491d01bdfe83db7754</td><td>ROBUST FACE ALIGNMENT USING CONVOLUTIONAL NEURAL
+<br/>NETWORKS
+<br/>Orange Labs, 4, Rue du Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>Keywords:
+<br/>Face alignment, Face registration, Convolutional Neural Networks.
+</td></tr><tr><td>93cbb3b3e40321c4990c36f89a63534b506b6daf</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+<br/>477
+<br/>Learning From Examples in the Small Sample Case:
+<br/>Face Expression Recognition
+</td></tr><tr><td>944faf7f14f1bead911aeec30cc80c861442b610</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
+</td></tr><tr><td>9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73</td><td>5967
+<br/>A Benchmark and Comparative Study of
+<br/>Video-Based Face Recognition
+<br/>on COX Face Database
+</td></tr><tr><td>94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81</td><td></td></tr><tr><td>94325522c9be8224970f810554611d6a73877c13</td><td></td></tr><tr><td>9441253b638373a0027a5b4324b4ee5f0dffd670</td><td>A Novel Scheme for Generating Secure Face
+<br/>Templates Using BDA
+<br/>P.G. Student, Department of Computer Engineering,
+<br/>Associate Professor, Department of Computer
+<br/>MCERC,
+<br/>Nashik (M.S.), India
+</td></tr><tr><td>94ac3008bf6be6be6b0f5140a0bea738d4c75579</td><td></td></tr><tr><td>94a11b601af77f0ad46338afd0fa4ccbab909e82</td><td></td></tr><tr><td>0e8760fc198a7e7c9f4193478c0e0700950a86cd</td><td></td></tr><tr><td>0e50fe28229fea45527000b876eb4068abd6ed8c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2936
+</td></tr><tr><td>0eff410cd6a93d0e37048e236f62e209bc4383d1</td><td>Anchorage Convention District
+<br/>May 3-8, 2010, Anchorage, Alaska, USA
+<br/>978-1-4244-5040-4/10/$26.00 ©2010 IEEE
+<br/>4803
+</td></tr><tr><td>0ee737085af468f264f57f052ea9b9b1f58d7222</td><td>SiGAN: Siamese Generative Adversarial Network
+<br/>for Identity-Preserving Face Hallucination
+</td></tr><tr><td>0ee661a1b6bbfadb5a482ec643573de53a9adf5e</td><td>JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, MONTH YEAR
+<br/>On the Use of Discriminative Cohort Score
+<br/>Normalization for Unconstrained Face Recognition
+</td></tr><tr><td>0e3840ea3227851aaf4633133dd3cbf9bbe89e5b</td><td></td></tr><tr><td>0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a</td><td></td></tr><tr><td>0e2ea7af369dbcaeb5e334b02dd9ba5271b10265</td><td></td></tr><tr><td>0e7c70321462694757511a1776f53d629a1b38f3</td><td>NIST Special Publication 1136
+<br/>2012 Proceedings of the
+<br/>Performance Metrics for Intelligent
+<br/>Systems (PerMI ‘12) Workshop
+<br/>
+<br/>http://dx.doi.org/10.6028/NIST.SP.1136
+</td></tr><tr><td>6080f26675e44f692dd722b61905af71c5260af8</td><td></td></tr><tr><td>60d765f2c0a1a674b68bee845f6c02741a49b44e</td><td></td></tr><tr><td>60c24e44fce158c217d25c1bae9f880a8bd19fc3</td><td>Controllable Image-to-Video Translation:
+<br/>A Case Study on Facial Expression Generation
+<br/>MIT CSAIL
+<br/>Wenbing Huang
+<br/>Tencent AI Lab
+<br/>MIT-Waston Lab
+<br/>Tencent AI Lab
+<br/>Tencent AI Lab
+</td></tr><tr><td>60e2b9b2e0db3089237d0208f57b22a3aac932c1</td><td>Frankenstein: Learning Deep Face Representations
+<br/>using Small Data
+</td></tr><tr><td>60ce4a9602c27ad17a1366165033fe5e0cf68078</td><td>TECHNICAL NOTE
+<br/>DIGITAL & MULTIMEDIA SCIENCES
+<br/>J Forensic Sci, 2015
+<br/>doi: 10.1111/1556-4029.12800
+<br/>Available online at: onlinelibrary.wiley.com
+<br/>Ph.D.
+<br/>Combination of Face Regions in Forensic
+<br/>Scenarios*
+</td></tr><tr><td>6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf</td><td></td></tr><tr><td>60efdb2e204b2be6701a8e168983fa666feac1be</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1043-5
+<br/>Transferring Deep Object and Scene Representations for Event
+<br/>Recognition in Still Images
+<br/>Received: 31 March 2016 / Accepted: 1 September 2017
+<br/>© Springer Science+Business Media, LLC 2017
+</td></tr><tr><td>60824ee635777b4ee30fcc2485ef1e103b8e7af9</td><td>Cascaded Collaborative Regression for Robust Facial
+<br/>Landmark Detection Trained using a Mixture of Synthetic and
+<br/>Real Images with Dynamic Weighting
+<br/>Life Member, IEEE, William Christmas, and Xiao-Jun Wu
+</td></tr><tr><td>60643bdab1c6261576e6610ea64ea0c0b200a28d</td><td></td></tr><tr><td>60a20d5023f2bcc241eb9e187b4ddece695c2b9b</td><td>Invertible Nonlinear Dimensionality Reduction
+<br/>via Joint Dictionary Learning
+<br/>Department of Electrical and Computer Engineering
+<br/>Technische Universit¨at M¨unchen, Germany
+</td></tr><tr><td>60cdcf75e97e88638ec973f468598ae7f75c59b4</td><td>86
+<br/>Face Annotation Using Transductive
+<br/>Kernel Fisher Discriminant
+</td></tr><tr><td>60b3601d70f5cdcfef9934b24bcb3cc4dde663e7</td><td>SUBMITTED TO IEEE TRANS. ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Binary Gradient Correlation Patterns
+<br/>for Robust Face Recognition
+</td></tr><tr><td>34a41ec648d082270697b9ee264f0baf4ffb5c8d</td><td></td></tr><tr><td>341002fac5ae6c193b78018a164d3c7295a495e4</td><td>von Mises-Fisher Mixture Model-based Deep
+<br/>learning: Application to Face Verification
+</td></tr><tr><td>34ec83c8ff214128e7a4a4763059eebac59268a6</td><td>Action Anticipation By Predicting Future
+<br/>Dynamic Images
+<br/>Australian Centre for Robotic Vision, ANU, Canberra, Australia
+</td></tr><tr><td>34b7e826db49a16773e8747bc8dfa48e344e425d</td><td></td></tr><tr><td>341ed69a6e5d7a89ff897c72c1456f50cfb23c96</td><td>DAGER: Deep Age, Gender and Emotion
+<br/>Recognition Using Convolutional Neural
+<br/>Networks
+<br/>Computer Vision Lab, Sighthound Inc., Winter Park, FL
+</td></tr><tr><td>340d1a9852747b03061e5358a8d12055136599b0</td><td>Audio-Visual Recognition System Insusceptible
+<br/>to Illumination Variation over Internet Protocol
+<br/>
+</td></tr><tr><td>5a3da29970d0c3c75ef4cb372b336fc8b10381d7</td><td>CNN-based Real-time Dense Face Reconstruction
+<br/>with Inverse-rendered Photo-realistic Face Images
+</td></tr><tr><td>5a34a9bb264a2594c02b5f46b038aa1ec3389072</td><td>Label-Embedding for Image Classification
+</td></tr><tr><td>5a4c6246758c522f68e75491eb65eafda375b701</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>1118
+<br/>ICASSP 2010
+</td></tr><tr><td>5aad5e7390211267f3511ffa75c69febe3b84cc7</td><td>Driver Gaze Estimation
+<br/>Without Using Eye Movement
+<br/>MIT AgeLab
+</td></tr><tr><td>5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372</td><td></td></tr><tr><td>5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>AutomaticageandgenderclassificationusingsupervisedappearancemodelAliMainaBukarHassanUgailDavidConnahAliMainaBukar,HassanUgail,DavidConnah,“Automaticageandgenderclassificationusingsupervisedappearancemodel,”J.Electron.Imaging25(6),061605(2016),doi:10.1117/1.JEI.25.6.061605. </td></tr><tr><td>5a7520380d9960ff3b4f5f0fe526a00f63791e99</td><td>The Indian Spontaneous Expression
+<br/>Database for Emotion Recognition
+</td></tr><tr><td>5fff61302adc65d554d5db3722b8a604e62a8377</td><td>Additive Margin Softmax for Face Verification
+<br/>UESTC
+<br/>Georgia Tech
+<br/>UESTC
+<br/>UESTC
+</td></tr><tr><td>5fa6e4a23da0b39e4b35ac73a15d55cee8608736</td><td>IJCV special issue (Best papers of ECCV 2016) manuscript No.
+<br/>(will be inserted by the editor)
+<br/>RED-Net:
+<br/>A Recurrent Encoder-Decoder Network for Video-based Face Alignment
+<br/>Submitted: April 19 2017 / Revised: December 12 2017
+</td></tr><tr><td>5f871838710a6b408cf647aacb3b198983719c31</td><td>1716
+<br/>Locally Linear Regression for Pose-Invariant
+<br/>Face Recognition
+</td></tr><tr><td>5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9</td><td></td></tr><tr><td>5f344a4ef7edfd87c5c4bc531833774c3ed23542</td><td>c
+</td></tr><tr><td>5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a</td><td></td></tr><tr><td>5f27ed82c52339124aa368507d66b71d96862cb7</td><td>Semi-supervised Learning of Classifiers: Theory, Algorithms
+<br/>and Their Application to Human-Computer Interaction
+<br/>This work has been partially funded by NSF Grant IIS 00-85980.
+<br/>DRAFT
+</td></tr><tr><td>5fa932be4d30cad13ea3f3e863572372b915bec8</td><td></td></tr><tr><td>5f5906168235613c81ad2129e2431a0e5ef2b6e4</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Unified Framework for Compositional Fitting of
+<br/>Active Appearance Models
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>5fc664202208aaf01c9b62da5dfdcd71fdadab29</td><td>arXiv:1504.05308v1 [cs.CV] 21 Apr 2015
+</td></tr><tr><td>5fa1724a79a9f7090c54925f6ac52f1697d6b570</td><td>Proceedings of the Workshop on Grammar and Lexicon: Interactions and Interfaces,
+<br/>pages 41–47, Osaka, Japan, December 11 2016.
+<br/>41
+</td></tr><tr><td>33aff42530c2fd134553d397bf572c048db12c28</td><td>From Emotions to Action Units with Hidden and Semi-Hidden-Task Learning
+<br/>Universitat Pompeu Fabra
+<br/>Centre de Visio per Computador
+<br/>Universitat Pompeu Fabra
+<br/>Barcelona
+<br/>Barcelona
+<br/>Barcelona
+</td></tr><tr><td>33a1a049d15e22befc7ddefdd3ae719ced8394bf</td><td>FULL PAPER
+<br/> International Journal of Recent Trends in Engineering, Vol 2, No. 1, November 2009
+<br/>An Efficient Approach to Facial Feature Detection
+<br/>for Expression Recognition
+<br/>S.P. Khandait1, P.D. Khandait2 and Dr.R.C.Thool2
+<br/>1Deptt. of Info.Tech., K.D.K.C.E., Nagpur, India
+<br/> 2Deptt.of Electronics Engg., K.D.K.C.E., Nagpur, India, 2Deptt. of Info.Tech., SGGSIET, Nanded
+</td></tr><tr><td>3399f8f0dff8fcf001b711174d29c9d4fde89379</td><td>Face R-CNN
+<br/>Tencent AI Lab, China
+</td></tr><tr><td>333aa36e80f1a7fa29cf069d81d4d2e12679bc67</td><td>Suggesting Sounds for Images
+<br/>from Video Collections
+<br/>1Computer Science Department, ETH Z¨urich, Switzerland
+<br/>2Disney Research, Switzerland
+</td></tr><tr><td>33792bb27ef392973e951ca5a5a3be4a22a0d0c6</td><td>Two-dimensional Whitening Reconstruction for
+<br/>Enhancing Robustness of Principal Component
+<br/>Analysis
+</td></tr><tr><td>3328674d71a18ed649e828963a0edb54348ee598</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 34, NO. 6, DECEMBER 2004
+<br/>2405
+<br/>A Face and Palmprint Recognition Approach Based
+<br/>on Discriminant DCT Feature Extraction
+</td></tr><tr><td>339937141ffb547af8e746718fbf2365cc1570c8</td><td>Facial Emotion Recognition in Real Time
+</td></tr><tr><td>33aa980544a9d627f305540059828597354b076c</td><td></td></tr><tr><td>33ae696546eed070717192d393f75a1583cd8e2c</td><td></td></tr><tr><td>3352426a67eabe3516812cb66a77aeb8b4df4d1b</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 4, NO. 5, APRIL 2015
+<br/>Joint Multi-view Face Alignment in the Wild
+</td></tr><tr><td>334d6c71b6bce8dfbd376c4203004bd4464c2099</td><td>BICONVEX RELAXATION FOR SEMIDEFINITE PROGRAMMING IN
+<br/>COMPUTER VISION
+</td></tr><tr><td>33e20449aa40488c6d4b430a48edf5c4b43afdab</td><td>TRANSACTIONS ON AFFECTIVE COMPUTING
+<br/>The Faces of Engagement: Automatic
+<br/>Recognition of Student Engagement from Facial
+<br/>Expressions
+</td></tr><tr><td>333e7ad7f915d8ee3bb43a93ea167d6026aa3c22</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at http://dx.doi.org/10.1109/TIFS.2014.2309851
+<br/>DRAFT
+<br/>3D Assisted Face Recognition: Dealing With
+<br/>Expression Variations
+<br/>
+</td></tr><tr><td>33403e9b4bbd913ae9adafc6751b52debbd45b0e</td><td></td></tr><tr><td>33ad23377eaead8955ed1c2b087a5e536fecf44e</td><td>Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling
+<br/>∗ indicates equal contribution
+</td></tr><tr><td>05b8673d810fadf888c62b7e6c7185355ffa4121</td><td>(will be inserted by the editor)
+<br/>A Comprehensive Survey to Face Hallucination
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>05e658fed4a1ce877199a4ce1a8f8cf6f449a890</td><td></td></tr><tr><td>05ad478ca69b935c1bba755ac1a2a90be6679129</td><td>Attribute Dominance: What Pops Out?
+<br/>Georgia Tech
+</td></tr><tr><td>0562fc7eca23d47096472a1d42f5d4d086e21871</td><td></td></tr><tr><td>054738ce39920975b8dcc97e01b3b6cc0d0bdf32</td><td>Towards the Design of an End-to-End Automated
+<br/>System for Image and Video-based Recognition
+</td></tr><tr><td>05e03c48f32bd89c8a15ba82891f40f1cfdc7562</td><td>Scalable Robust Principal Component
+<br/>Analysis using Grassmann Averages
+</td></tr><tr><td>050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371</td><td>Contents
+<br/>Scale Space and PDE Methods
+<br/>Spatio-Temporal Scale Selection in Video Data . . . . . . . . . . . . . . . . . . . . .
+<br/>Dynamic Texture Recognition Using Time-Causal Spatio-Temporal
+<br/>Scale-Space Filters . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Corner Detection Using the Affine Morphological Scale Space . . . . . . . . . . .
+<br/>Luis Alvarez
+<br/>Nonlinear Spectral Image Fusion. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Martin Benning, Michael Möller, Raz Z. Nossek, Martin Burger,
+<br/>Daniel Cremers, Guy Gilboa, and Carola-Bibiane Schönlieb
+<br/>16
+<br/>29
+<br/>41
+<br/>Tubular Structure Segmentation Based on Heat Diffusion. . . . . . . . . . . . . . .
+<br/>54
+<br/>Fang Yang and Laurent D. Cohen
+<br/>Analytic Existence and Uniqueness Results for PDE-Based Image
+<br/>Reconstruction with the Laplacian . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Laurent Hoeltgen, Isaac Harris, Michael Breuß, and Andreas Kleefeld
+<br/>Combining Contrast Invariant L1 Data Fidelities with Nonlinear
+<br/>Spectral Image Decomposition . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Leonie Zeune, Stephan A. van Gils, Leon W.M.M. Terstappen,
+<br/>and Christoph Brune
+<br/>An Efficient and Stable Two-Pixel Scheme for 2D
+<br/>Forward-and-Backward Diffusion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Martin Welk and Joachim Weickert
+<br/>66
+<br/>80
+<br/>94
+<br/>Restoration and Reconstruction
+<br/>Blind Space-Variant Single-Image Restoration of Defocus Blur. . . . . . . . . . .
+<br/>109
+<br/>Leah Bar, Nir Sochen, and Nahum Kiryati
+<br/>Denoising by Inpainting. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>121
+<br/>Robin Dirk Adam, Pascal Peter, and Joachim Weickert
+<br/>Stochastic Image Reconstruction from Local Histograms
+<br/>of Gradient Orientation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+<br/>Agnès Desolneux and Arthur Leclaire
+<br/>133
+</td></tr><tr><td>056294ff40584cdce81702b948f88cebd731a93e</td><td></td></tr><tr><td>052880031be0a760a5b606b2ad3d22f237e8af70</td><td>Datasets on object manipulation and interaction: a survey
+</td></tr><tr><td>05ea7930ae26165e7e51ff11b91c7aa8d7722002</td><td>Learning And-Or Model to Represent Context and
+<br/>Occlusion for Car Detection and Viewpoint Estimation
+</td></tr><tr><td>051a84f0e39126c1ebeeb379a405816d5d06604d</td><td>Cogn Comput (2009) 1:257–267
+<br/>DOI 10.1007/s12559-009-9018-7
+<br/>Biometric Recognition Performing in a Bioinspired System
+<br/>Joan Fa`bregas Æ Marcos Faundez-Zanuy
+<br/>Published online: 20 May 2009
+<br/>Ó Springer Science+Business Media, LLC 2009
+</td></tr><tr><td>05f4d907ee2102d4c63a3dc337db7244c570d067</td><td></td></tr><tr><td>05a7be10fa9af8fb33ae2b5b72d108415519a698</td><td>Multilayer and Multimodal Fusion of Deep Neural Networks
+<br/>for Video Classification
+<br/>NVIDIA
+</td></tr><tr><td>050a149051a5d268fcc5539e8b654c2240070c82</td><td>MAGISTERSKÉ A DOKTORSKÉSTUDIJNÍ PROGRAMY31. 5. 2018SBORNÍKSTUDENTSKÁ VĚDECKÁ KONFERENCE </td></tr><tr><td>0580edbd7865414c62a36da9504d1169dea78d6f</td><td>Baseline CNN structure analysis for facial expression recognition
+</td></tr><tr><td>05e96d76ed4a044d8e54ef44dac004f796572f1a</td><td></td></tr><tr><td>9d839dfc9b6a274e7c193039dfa7166d3c07040b</td><td>Augmented Faces
+<br/>1ETH Z¨urich
+<br/>2Kooaba AG
+<br/>3K.U. Leuven
+</td></tr><tr><td>9d60ad72bde7b62be3be0c30c09b7d03f9710c5f</td><td>A Survey: Face Recognition Techniques
+<br/>Assistant Professor, ITM GOI
+<br/>M Tech, ITM GOI
+<br/>face
+<br/>video
+<br/>(Eigen
+<br/>passport-verification,
+</td></tr><tr><td>9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1</td><td></td></tr><tr><td>9ca7899338129f4ba6744f801e722d53a44e4622</td><td>Deep Neural Networks Regularization for Structured
+<br/>Output Prediction
+<br/>Soufiane Belharbi∗
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>INSA Rouen, LITIS
+<br/>76000 Rouen, France
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+<br/>Normandie Univ, UNIROUEN, UNIHAVRE,
+</td></tr><tr><td>9c1664f69d0d832e05759e8f2f001774fad354d6</td><td>Action representations in robotics: A
+<br/>taxonomy and systematic classification
+<br/>Journal Title
+<br/>XX(X):1–32
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td></tr><tr><td>9c065dfb26ce280610a492c887b7f6beccf27319</td><td>Learning from Video and Text via Large-Scale Discriminative Clustering
+<br/>1 ´Ecole Normale Sup´erieure
+<br/>2Inria
+<br/>3CIIRC
+</td></tr><tr><td>02601d184d79742c7cd0c0ed80e846d95def052e</td><td>Graphical Representation for Heterogeneous
+<br/>Face Recognition
+</td></tr><tr><td>02cc96ad997102b7c55e177ac876db3b91b4e72c</td><td>MuseumVisitors: a dataset for pedestrian and group detection, gaze estimation
+<br/>and behavior understanding
+</td></tr><tr><td>02fda07735bdf84554c193811ba4267c24fe2e4a</td><td>Illumination Invariant Face Recognition
+<br/>Using Near-Infrared Images
+</td></tr><tr><td>02dd0af998c3473d85bdd1f77254ebd71e6158c6</td><td>PPP: Joint Pointwise and Pairwise Image Label Prediction
+<br/>1Department of Computer Science, Arizona State Univerity
+<br/>2Yahoo Research
+</td></tr><tr><td>029317f260b3303c20dd58e8404a665c7c5e7339</td><td>1276
+<br/>Character Identification in Feature-Length Films
+<br/>Using Global Face-Name Matching
+<br/>and Yeh-Min Huang, Member, IEEE
+</td></tr><tr><td>0273414ba7d56ab9ff894959b9d46e4b2fef7fd0</td><td>Photographic home styles in Congress: a
+<br/>computer vision approach∗
+<br/>December 1, 2016
+</td></tr><tr><td>02e133aacde6d0977bca01ffe971c79097097b7f</td><td></td></tr><tr><td>02567fd428a675ca91a0c6786f47f3e35881bcbd</td><td>ACCEPTED BY IEEE TIP
+<br/>Deep Label Distribution Learning
+<br/>With Label Ambiguity
+</td></tr><tr><td>029b53f32079063047097fa59cfc788b2b550c4b</td><td></td></tr><tr><td>02bd665196bd50c4ecf05d6852a4b9ba027cd9d0</td><td></td></tr><tr><td>026b5b8062e5a8d86c541cfa976f8eee97b30ab8</td><td>MDLFace: Memorability Augmented Deep Learning for Video Face Recognition
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>0278acdc8632f463232e961563e177aa8c6d6833</td><td>Selective Transfer Machine for Personalized
+<br/>Facial Expression Analysis
+<br/>1 INTRODUCTION
+<br/>Index Terms—Facial expression analysis, personalization, domain adaptation, transfer learning, support vector machine (SVM)
+<br/>A UTOMATIC facial AU detection confronts a number of
+</td></tr><tr><td>02c993d361dddba9737d79e7251feca026288c9c</td><td></td></tr><tr><td>a46283e90bcdc0ee35c680411942c90df130f448</td><td></td></tr><tr><td>a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2</td><td>Yudistira and Kurita EURASIP Journal on Image and Video
+<br/>Processing (2017) 2017:85
+<br/>DOI 10.1186/s13640-017-0235-9
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Gated spatio and temporal convolutional
+<br/>neural network for activity recognition:
+<br/>towards gated multimodal deep learning
+</td></tr><tr><td>a40f8881a36bc01f3ae356b3e57eac84e989eef0</td><td>End-to-end semantic face segmentation with conditional
+<br/>random fields as convolutional, recurrent and adversarial
+<br/>networks
+</td></tr><tr><td>a44590528b18059b00d24ece4670668e86378a79</td><td>Learning the Hierarchical Parts of Objects by Deep
+<br/>Non-Smooth Nonnegative Matrix Factorization
+</td></tr><tr><td>a4c430b7d849a8f23713dc283794d8c1782198b2</td><td>Video Concept Embedding
+<br/>1. Introduction
+<br/>In the area of natural language processing, there has been
+<br/>much success in learning distributed representations for
+<br/>words as vectors. Doing so has an advantage over using
+<br/>simple labels, or a one-hot coding scheme for representing
+<br/>individual words. In learning distributed vector representa-
+<br/>tions for words, we manage to capture semantic relatedness
+<br/>of words in vector distance. For example, the word vector
+<br/>for ”car” and ”road” should end up being closer together in
+<br/>the vector space representation than ”car” and ”penguin”.
+<br/>This has been very useful in NLP areas of machine transla-
+<br/>tion and semantic understanding.
+<br/>In the computer vision domain, video understanding is a
+<br/>very important topic.
+<br/>It is made hard due to the large
+<br/>amount of high dimensional data in videos. One strategy
+<br/>to address this is to summarize a video into concepts (eg.
+<br/>running, climbing, cooking). This allows us to represent a
+<br/>video in a very natural way to humans, such as a sequence
+<br/>of semantic events. However this has the same shortcom-
+<br/>ings that one-hot coding of words have.
+<br/>The goal of this project is to find a meaningful way to em-
+<br/>bed video concepts into a vector space. The hope would
+<br/>be to capture semantic relatedness of concepts in a vector
+<br/>representation, essentially doing for videos what word2vec
+<br/>did for text. Having a vector representation for video con-
+<br/>cepts would help in areas such as semantic video retrieval
+<br/>and video classification, as it would provide a statistically
+<br/>meaningful and robust way of representing videos as lower
+<br/>dimensional vectors. An interesting thing would be to ob-
+<br/>serve if such a vector representation would result in ana-
+<br/>logical reasoning using simple vector arithmetic.
+<br/>Figure 1 shows an example of concepts detected at differ-
+<br/>ent snapshots in the same video. For example, consider
+<br/>the scenario where the concepts Kicking a ball, Soccer and
+<br/>Running are detected in the three snapshots respectively
+<br/>(from left to right). Since, these snapshots belong in the
+<br/>same video, we expect that these concepts are semantically
+<br/>similar and that they should lie close in the resulting em-
+<br/>bedding space. The aim of this project is to find a vector
+<br/>space embedding for the space of concepts such that vector
+<br/>representations for semantically similar concepts (in this
+<br/>Figure 1. Example snapshots from the same video
+<br/>case, Running, Kicking and Soccer) lie in the vicinity of
+<br/>each other.
+<br/>2. Related Work
+<br/>(Mikolov et al., 2013a) introduces the popular skip-gram
+<br/>model to learn distributed representations of words from
+<br/>very large linguistic datasets. Specifically, it uses each
+<br/>word as an input to a log-linear classifier and predict words
+<br/>within a certain range before and after the current word in
+<br/>the dataset.
+<br/>(Mikolov et al., 2013b) extends this model
+<br/>to learn representations for phrases, in addition to words,
+<br/>and also improve the quality of vectors and training speed.
+<br/>These works also show that the skip-gram model exhibits
+<br/>a linear structure that enables it to perform reasoning using
+<br/>basic vector arithmetic. The skip-gram model from these
+<br/>works is the basis of our model in learning representations
+<br/>for concepts.
+<br/>(Le & Mikolov, 2014) extends the concept of word vectors
+<br/>to sentences and paragraphs. Their approach is more in-
+<br/>volved than a simple bag of words approach, in that it tries
+<br/>to capture the nature of the words in the paragraph. They
+<br/>construct the paragraph vector in such a way that it can be
+<br/>used to predict the word vectors that are contained inside
+<br/>the paragraph. They do this by first learning word vectors,
+<br/>such that the probability of a word vector given its context
+<br/>is maximized. To learn paragraph vectors, the paragraph
+<br/>is essentially treated as a word, and the words it contains
+<br/>become the context. This provides a key insight in how
+<br/>a set of concept vectors can be used together to provide a
+<br/>more meaningful vector representation for videos, which
+<br/>can then be used for retrieval.
+<br/>(Hu et al.) utilizes structured knowledge in the data to learn
+<br/>distributed representations that improve semantic related-
+</td></tr><tr><td>a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2</td><td></td></tr><tr><td>a4f37cfdde3af723336205b361aefc9eca688f5c</td><td>Recent Advances
+<br/>in Face Recognition
+</td></tr><tr><td>a30869c5d4052ed1da8675128651e17f97b87918</td><td>Fine-Grained Comparisons with Attributes
+</td></tr><tr><td>a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b</td><td>First International Workshop on Adaptive Shot Learning
+<br/>for Gesture Understanding and Production
+<br/>ASL4GUP 2017
+<br/>Held in conjunction with IEEE FG 2017, in May 30, 2017,
+<br/>Washington DC, USA
+</td></tr><tr><td>a3d8b5622c4b9af1f753aade57e4774730787a00</td><td>Pose-Aware Person Recognition
+<br/>Anoop Namboodiri (cid:63)
+<br/>(cid:63) CVIT, IIIT Hyderabad, India
+<br/>† Facebook AI Research
+</td></tr><tr><td>a3017bb14a507abcf8446b56243cfddd6cdb542b</td><td>Face Localization and Recognition in Varied
+<br/>Expressions and Illumination
+<br/>Hui-Yu Huang, Shih-Hang Hsu
+<br/>
+</td></tr><tr><td>a378fc39128107815a9a68b0b07cffaa1ed32d1f</td><td>Determining a Suitable Metric When using Non-negative Matrix Factorization∗
+<br/>Computer Vision Center, Dept. Inform`atica
+<br/>Universitat Aut`onoma de Barcelona
+<br/>08193 Bellaterra, Barcelona, Spain
+</td></tr><tr><td>a34d75da87525d1192bda240b7675349ee85c123</td><td>Naive-Deep Face Recognition: Touching the Limit of LFW Benchmark or Not?
+<br/>Face++, Megvii Inc.
+<br/>Face++, Megvii Inc.
+<br/>Face++, Megvii Inc.
+</td></tr><tr><td>a3f69a073dcfb6da8038607a9f14eb28b5dab2db</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1184
+</td></tr><tr><td>a3f78cc944ac189632f25925ba807a0e0678c4d5</td><td>Action Recognition in Realistic Sports Videos
+</td></tr><tr><td>a33f20773b46283ea72412f9b4473a8f8ad751ae</td><td></td></tr><tr><td>a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7</td><td>Multiple Local Curvature Gabor Binary
+<br/>Patterns for Facial Action Recognition
+<br/>Signal Processing Laboratory (LTS5),
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td></tr><tr><td>a32c5138c6a0b3d3aff69bcab1015d8b043c91fb</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/19/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Videoredaction:asurveyandcomparisonofenablingtechnologiesShaganSahAmeyaShringiRaymondPtuchaAaronBurryRobertLoceShaganSah,AmeyaShringi,RaymondPtucha,AaronBurry,RobertLoce,“Videoredaction:asurveyandcomparisonofenablingtechnologies,”J.Electron.Imaging26(5),051406(2017),doi:10.1117/1.JEI.26.5.051406. </td></tr><tr><td>a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9</td><td>Recognizing Violence in Movies
+<br/>CIS400/401 Project Final Report
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Ben Sapp
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+<br/>Univ. of Pennsylvania
+<br/>Philadelphia, PA
+</td></tr><tr><td>a3eab933e1b3db1a7377a119573ff38e780ea6a3</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>838
+<br/>ICASSP 2010
+</td></tr><tr><td>a35d3ba191137224576f312353e1e0267e6699a1</td><td>Increasing security in DRM systems
+<br/>through biometric authentication.
+<br/>ecuring the exchange
+<br/>of intellectual property
+<br/>and providing protection
+<br/>to multimedia contents in
+<br/>distribution systems have enabled the
+<br/>advent of digital rights management
+<br/>(DRM) systems [5], [14], [21], [47],
+<br/>[51], [53]. Rights holders should be able to
+<br/>license, monitor, and track the usage of rights
+<br/>in a dynamic digital trading environment, espe-
+<br/>cially in the near future when universal multimedia
+<br/>access (UMA) becomes a reality, and any multimedia
+<br/>content will be available anytime, anywhere. In such
+<br/>DRM systems, encryption algorithms, access control,
+<br/>key management strategies, identification and tracing
+<br/>of contents, or copy control will play a prominent role
+<br/>to supervise and restrict access to multimedia data,
+<br/>avoiding unauthorized or fraudulent operations.
+<br/>A key component of any DRM system, also known
+<br/>as intellectual property management and protection
+<br/>(IPMP) systems in the MPEG-21 framework, is user
+<br/>authentication to ensure that
+<br/>only those with specific rights are
+<br/>able to access the digital informa-
+<br/>tion. It is here that biometrics can
+<br/>play an essential role, reinforcing securi-
+<br/>ty at all stages where customer authentica-
+<br/>tion is needed. The ubiquity of users and
+<br/>devices, where the same user might want to
+<br/>access to multimedia contents from different
+<br/>environments (home, car, work, jogging, etc.) and
+<br/>also from different devices or media (CD, DVD,
+<br/>home computer, laptop, PDA, 2G/3G mobile phones,
+<br/>game consoles, etc.) strengthens the need for reliable
+<br/>and universal authentication of users.
+<br/>Classical user authentication systems have been
+<br/>based in something that you have (like a key, an identi-
+<br/>fication card, etc.) and/or something that you know
+<br/>(like a password, or a PIN). With biometrics, a new
+<br/>user authentication paradigm is added: something that
+<br/>you are (e.g., fingerprints or face) or something that
+<br/>you do or produce (e.g., handwritten signature or
+<br/>50
+<br/>IEEE SIGNAL PROCESSING MAGAZINE
+<br/>1053-5888/04/$20.00©2004IEEE
+<br/>MARCH 2004
+</td></tr><tr><td>b558be7e182809f5404ea0fcf8a1d1d9498dc01a</td><td>Bottom-up and top-down reasoning with convolutional latent-variable models
+<br/>UC Irvine
+<br/>UC Irvine
+</td></tr><tr><td>b5fc4f9ad751c3784eaf740880a1db14843a85ba</td><td>SIViP (2007) 1:225–237
+<br/>DOI 10.1007/s11760-007-0016-5
+<br/>ORIGINAL PAPER
+<br/>Significance of image representation for face verification
+<br/>Received: 29 August 2006 / Revised: 28 March 2007 / Accepted: 28 March 2007 / Published online: 1 May 2007
+<br/>© Springer-Verlag London Limited 2007
+</td></tr><tr><td>b562def2624f59f7d3824e43ecffc990ad780898</td><td></td></tr><tr><td>b5160e95192340c848370f5092602cad8a4050cd</td><td>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, TO APPEAR
+<br/>Video Classification With CNNs: Using The Codec
+<br/>As A Spatio-Temporal Activity Sensor
+</td></tr><tr><td>b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad</td><td>Journal of Applied Research and
+<br/>Technology
+<br/>ISSN: 1665-6423
+<br/>Centro de Ciencias Aplicadas y
+<br/>Desarrollo Tecnológico
+<br/>México
+<br/>
+<br/>Hussain Shah, Jamal; Sharif, Muhammad; Raza, Mudassar; Murtaza, Marryam; Ur-Rehman, Saeed
+<br/>Robust Face Recognition Technique under Varying Illumination
+<br/>Journal of Applied Research and Technology, vol. 13, núm. 1, febrero, 2015, pp. 97-105
+<br/>Centro de Ciencias Aplicadas y Desarrollo Tecnológico
+<br/>Distrito Federal, México
+<br/>Available in: http://www.redalyc.org/articulo.oa?id=47436895009
+<br/> How to cite
+<br/> Complete issue
+<br/> More information about this article
+<br/> Journal's homepage in redalyc.org
+<br/>Scientific Information System
+<br/>Network of Scientific Journals from Latin America, the Caribbean, Spain and Portugal
+<br/>Non-profit academic project, developed under the open access initiative
+</td></tr><tr><td>b52886610eda6265a2c1aaf04ce209c047432b6d</td><td>Microexpression Identification and Categorization
+<br/>using a Facial Dynamics Map
+</td></tr><tr><td>b5857b5bd6cb72508a166304f909ddc94afe53e3</td><td>SSIG and IRISA at Multimodal Person Discovery
+<br/>1Department of Computer Science, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+<br/>2IRISA & Inria Rennes , CNRS, Rennes, France
+</td></tr><tr><td>b59f441234d2d8f1765a20715e227376c7251cd7</td><td></td></tr><tr><td>b51e3d59d1bcbc023f39cec233f38510819a2cf9</td><td>CBMM Memo No. 003
+<br/>March 27, 2014
+<br/>Can a biologically-plausible hierarchy effectively
+<br/>replace face detection, alignment, and
+<br/>recognition pipelines?
+<br/>by
+</td></tr><tr><td>b54c477885d53a27039c81f028e710ca54c83f11</td><td>1201
+<br/>Semi-Supervised Kernel Mean Shift Clustering
+</td></tr><tr><td>b2a0e5873c1a8f9a53a199eecae4bdf505816ecb</td><td>Hybrid VAE: Improving Deep Generative Models
+<br/>using Partial Observations
+<br/>Snap Research
+<br/>Microsoft Research
+</td></tr><tr><td>b2b535118c5c4dfcc96f547274cdc05dde629976</td><td>JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+<br/>Automatic Recognition of Facial Displays of
+<br/>Unfelt Emotions
+<br/>Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+</td></tr><tr><td>b235b4ccd01a204b95f7408bed7a10e080623d2e</td><td>Regularizing Flat Latent Variables with Hierarchical Structures
+</td></tr><tr><td>b2c25af8a8e191c000f6a55d5f85cf60794c2709</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Novel Dimensionality Reduction Technique based on
+<br/>Kernel Optimization Through Graph Embedding
+<br/>N. Vretos, A. Tefas and I. Pitas
+<br/>the date of receipt and acceptance should be inserted later
+</td></tr><tr><td>d904f945c1506e7b51b19c99c632ef13f340ef4c</td><td>A scalable 3D HOG model for fast object detection and viewpoint estimation
+<br/>KU Leuven, ESAT/PSI - iMinds
+<br/>Kasteelpark Arenberg 10 B-3001 Leuven, Belgium
+</td></tr><tr><td>d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>3031
+<br/>ICASSP 2017
+</td></tr><tr><td>d9739d1b4478b0bf379fe755b3ce5abd8c668f89</td><td></td></tr><tr><td>d9318c7259e394b3060b424eb6feca0f71219179</td><td>406
+<br/>Face Matching and Retrieval Using Soft Biometrics
+</td></tr><tr><td>d9a1dd762383213741de4c1c1fd9fccf44e6480d</td><td></td></tr><tr><td>d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c</td><td>Learning Inference Models for Computer Vision
+</td></tr><tr><td>aca232de87c4c61537c730ee59a8f7ebf5ecb14f</td><td>EBGM VS SUBSPACE PROJECTION FOR FACE RECOGNITION
+<br/>19.5 Km Markopoulou Avenue, P.O. Box 68, Peania, Athens, Greece
+<br/>Athens Information Technology
+<br/>Keywords:
+<br/>Human-Machine Interfaces, Computer Vision, Face Recognition.
+</td></tr><tr><td>ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6</td><td>779
+<br/>Privacy-Protected Facial Biometric Verification
+<br/>Using Fuzzy Forest Learning
+</td></tr><tr><td>aca273a9350b10b6e2ef84f0e3a327255207d0f5</td><td></td></tr><tr><td>ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e</td><td></td></tr><tr><td>ac820d67b313c38b9add05abef8891426edd5afb</td><td></td></tr><tr><td>ac26166857e55fd5c64ae7194a169ff4e473eb8b</td><td>Personalized Age Progression with Bi-level
+<br/>Aging Dictionary Learning
+</td></tr><tr><td>ac8441e30833a8e2a96a57c5e6fede5df81794af</td><td>IEEE TRANSACTIONS ON IMAGE PROCESSING
+<br/>Hierarchical Representation Learning for Kinship
+<br/>Verification
+</td></tr><tr><td>acb83d68345fe9a6eb9840c6e1ff0e41fa373229</td><td>Kernel Methods in Computer Vision:
+<br/>Object Localization, Clustering,
+<br/>and Taxonomy Discovery
+<br/>vorgelegt von
+<br/>Matthew Brian Blaschko, M.S.
+<br/>aus La Jolla
+<br/>Von der Fakult¨at IV - Elektrotechnik und Informatik
+<br/>der Technischen Universit¨at Berlin
+<br/>zur Erlangung des akademischen Grades
+<br/>Doktor der Naturwissenschaften
+<br/>Dr. rer. nat.
+<br/>genehmigte Dissertation
+<br/>Promotionsausschuß:
+<br/>Vorsitzender: Prof. Dr. O. Hellwich
+<br/>Berichter: Prof. Dr. T. Hofmann
+<br/>Berichter: Prof. Dr. K.-R. M¨uller
+<br/>Berichter: Prof. Dr. B. Sch¨olkopf
+<br/>Tag der wissenschaftlichen Aussprache: 23.03.2009
+<br/>Berlin 2009
+<br/>D83
+</td></tr><tr><td>adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be</td><td>Apprentissage de métrique appliqué à la
+<br/>détection de changement de page Web et
+<br/>aux attributs relatifs
+<br/>thieu Cord*
+<br/>* Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris,
+<br/>France
+<br/>RÉSUMÉ. Nous proposons dans cet article un nouveau schéma d’apprentissage de métrique.
+<br/>Basé sur l’exploitation de contraintes qui impliquent des quadruplets d’images, notre approche
+<br/>vise à modéliser des relations sémantiques de similarités riches ou complexes. Nous étudions
+<br/>comment ce schéma peut être utilisé dans des contextes tels que la détection de régions impor-
+<br/>tantes dans des pages Web ou la reconnaissance à partir d’attributs relatifs.
+</td></tr><tr><td>ada73060c0813d957576be471756fa7190d1e72d</td><td>VRPBench: A Vehicle Routing Benchmark Tool
+<br/>October 19, 2016
+</td></tr><tr><td>adaf2b138094981edd615dbfc4b7787693dbc396</td><td>Statistical Methods For Facial
+<br/>Shape-from-shading and Recognition
+<br/>Submitted for the degree of Doctor of Philosophy
+<br/>Department of Computer Science
+<br/>20th February 2007
+</td></tr><tr><td>ad6745dd793073f81abd1f3246ba4102046da022</td><td></td></tr><tr><td>adf62dfa00748381ac21634ae97710bb80fc2922</td><td>ViFaI: A trained video face indexing scheme
+<br/>1. Introduction
+<br/>With the increasing prominence of inexpensive
+<br/>video recording devices (e.g., digital camcorders and
+<br/>video recording smartphones),
+<br/>the average user’s
+<br/>video collection today is increasing rapidly. With this
+<br/>development, there arises a natural desire to rapidly
+<br/>access a subset of one’s collection of videos. The solu-
+<br/>tion to this problem requires an effective video index-
+<br/>ing scheme. In particular, we must be able to easily
+<br/>process a video to extract such indexes.
+<br/>Today, there also exist large sets of labeled (tagged)
+<br/>face images. One important example is an individual’s
+<br/>Facebook profile. Such a set of of tagged images of
+<br/>one’s self, family, friends, and colleagues represents
+<br/>an extremely valuable potential training set.
+<br/>In this work, we explore how to leverage the afore-
+<br/>mentioned training set to solve the video indexing
+<br/>problem.
+<br/>2. Problem Statement
+<br/>Use a labeled (tagged) training set of face images
+<br/>to extract relevant indexes from a collection of videos,
+<br/>and use these indexes to answer boolean queries of the
+<br/>form: “videos with ‘Person 1’ OP1 ‘Person 2’ OP2 ...
+<br/>OP(N-1) ‘Person N’ ”, where ‘Person N’ corresponds
+<br/>to a training label (tag) and OPN is a boolean operand
+<br/>such as AND, OR, NOT, XOR, and so on.
+<br/>3. Proposed Scheme
+<br/>In this section, we outline our proposed scheme to
+<br/>address the problem we postulate in the previous sec-
+<br/>tion. We provide further details about the system im-
+<br/>plementation in Section 4.
+<br/>At a high level, we subdivide the problem into two
+<br/>key phases: the first ”off-line” executed once, and the
+<br/>second ”on-line” phase instantiated upon each query.
+<br/>For the purposes of this work, we define an index as
+<br/>follows: <video id, tag, frame #>.
+<br/>3.1. The training phase
+<br/>We first outline Phase 1 (the training or “off-line”
+<br/>phase):
+<br/>1. Use the labeled training set plus an additional set
+<br/>of ‘other’ faces to compute the Fisher Linear Dis-
+<br/>criminant (FLD) [1].
+<br/>2. Project the training data onto the space defined by
+<br/>the eigenvectors returned by the FLD, and train
+<br/>a classifier (first nearest neighbour, then SVM if
+<br/>required) using the training features.
+<br/>3. Iterate through each frame of each video, detect-
+<br/>ing faces [2], classifying detected results, and add
+<br/>an index if the detected face corresponds to one of
+<br/>the labeled classes from the previous step.
+<br/>3.2. The query phase
+<br/>Now, we outline Phase 2 (the query or “on-line”
+<br/>phase):
+<br/>1. Key the indexes on their video id.
+<br/>2. For each video, evaluate the boolean query for the
+<br/>set of corresponding indexes.
+<br/>3. Keep videos for which the boolean query evalu-
+<br/>ates true, and discard those for which it evaluates
+<br/>false.
+<br/>4. Implementation Details
+<br/>We are implementing the project in C++, leverag-
+<br/>ing the OpenCV v2.2 framework [4]. In this section,
+<br/>we will highlight some of the critical implementation
+<br/>details of our proposed system.
+</td></tr><tr><td>bba281fe9c309afe4e5cc7d61d7cff1413b29558</td><td>Social Cognitive and Affective Neuroscience, 2017, 984–992
+<br/>doi: 10.1093/scan/nsx030
+<br/>Advance Access Publication Date: 11 April 2017
+<br/>Original article
+<br/>An unpleasant emotional state reduces working
+<br/>memory capacity: electrophysiological evidence
+<br/>1Laboratorio de Neurofisiologia do Comportamento, Departamento de Fisiologia e Farmacologia, Instituto
+<br/>Biome´dico, Universidade Federal Fluminense, Niteroi, Brazil, 2MograbiLab, Departamento de Psicologia,
+<br/>Pontifıcia Universidade Catolica do Rio de Janeiro, Rio de Janeiro, Brazil, and 3Laboratorio de Engenharia
+<br/>Pulmonar, Programa de Engenharia Biome´dica, COPPE, Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil
+</td></tr><tr><td>bb557f4af797cae9205d5c159f1e2fdfe2d8b096</td><td></td></tr><tr><td>bb06ef67a49849c169781657be0bb717587990e0</td><td>Impact of Temporal Subsampling on Accuracy and
+<br/>Performance in Practical Video Classification
+<br/>F. Scheidegger∗†, L. Cavigelli∗, M. Schaffner∗, A. C. I. Malossi†, C. Bekas†, L. Benini∗‡
+<br/>∗ETH Zürich, 8092 Zürich, Switzerland
+<br/>†IBM Research - Zürich, 8803 Rüschlikon, Switzerland
+<br/>‡Università di Bologna, Italy
+</td></tr><tr><td>bb22104d2128e323051fb58a6fe1b3d24a9e9a46</td><td>IAJ=JE BH ==OIEI 1 AIIA?A ?= EBH=JE =EO B?KIAI  JDA IK>JA
+<br/>ABBA?JELAAII B KH =CHEJD
+<br/>==OIEI 7IK=O = B=?E= ANFHAIIE ==OIEI IOIJA ?J=EI JDHAA IJ=CAI B=?A =?GKE
+<br/>9DAJDAH KIEC *=OAIE= ?=IIEAH " & IKFFHJ LA?JH =?DEA 58  H AKH=
+<br/>HACEI E = IECA ?=IIEAH EI = ? IJH=JACO & 0MALAH J = ?= HACEI
+</td></tr><tr><td>bbe1332b4d83986542f5db359aee1fd9b9ba9967</td><td></td></tr><tr><td>bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197</td><td>TO APPEAR IN TPAMI
+<br/>From Images to 3D Shape Attributes
+</td></tr><tr><td>bbf01aa347982592b3e4c9e4f433e05d30e71305</td><td></td></tr><tr><td>bbf1396eb826b3826c5a800975047beabde2f0de</td><td></td></tr><tr><td>bbd1eb87c0686fddb838421050007e934b2d74ab</td><td></td></tr><tr><td>d73d2c9a6cef79052f9236e825058d5d9cdc1321</td><td>2014-ENST-0040
+<br/>EDITE - ED 130
+<br/>Doctorat ParisTech
+<br/>T H È S E
+<br/>pour obtenir le grade de docteur délivré par
+<br/>TELECOM ParisTech
+<br/>Spécialité « Signal et Images »
+<br/>présentée et soutenue publiquement par
+<br/>le 08 juillet 2014
+<br/>Cutting the Visual World into Bigger Slices for Improved Video
+<br/>Concept Detection
+<br/>Amélioration de la détection des concepts dans les vidéos par de plus grandes tranches du Monde
+<br/>Visuel
+<br/>Directeur de thèse : Bernard Mérialdo
+<br/>Jury
+<br/>M. Philippe-Henri Gosselin, Professeur, INRIA
+<br/>M. Georges Quénot, Directeur de recherche CNRS, LIG
+<br/>M. Georges Linares, Professeur, LIA
+<br/>M. François Brémond, Professeur, INRIA
+<br/>M. Bernard Mérialdo, Professeur, EURECOM
+<br/>Rapporteur
+<br/>Rapporteur
+<br/>Examinateur
+<br/>Examinateur
+<br/>Encadrant
+<br/>TELECOM ParisTech
+<br/>école de l’Institut Télécom - membre de ParisTech
+</td></tr><tr><td>d78077a7aa8a302d4a6a09fb9737ab489ae169a6</td><td></td></tr><tr><td>d7312149a6b773d1d97c0c2b847609c07b5255ec</td><td></td></tr><tr><td>d708ce7103a992634b1b4e87612815f03ba3ab24</td><td>FCVID: Fudan-Columbia Video Dataset
+<br/>Available at: http://bigvid.fudan.edu.cn/FCVID/
+<br/>1 OVERVIEW
+<br/>Recognizing visual contents in unconstrained videos
+<br/>has become a very important problem for many ap-
+<br/>plications, such as Web video search and recommen-
+<br/>dation, smart content-aware advertising, robotics, etc.
+<br/>Existing datasets for video content recognition are
+<br/>either small or do not have reliable manual labels.
+<br/>In this work, we construct and release a new Inter-
+<br/>net video dataset called Fudan-Columbia Video Dataset
+<br/>(FCVID), containing 91,223 Web videos (total duration
+<br/>4,232 hours) annotated manually according to 239
+<br/>categories. We believe that the release of FCVID can
+<br/>stimulate innovative research on this challenging and
+<br/>important problem.
+<br/>2 COLLECTION AND ANNOTATION
+<br/>The categories in FCVID cover a wide range of topics
+<br/>like social events (e.g., “tailgate party”), procedural
+<br/>events (e.g., “making cake”), objects (e.g., “panda”),
+<br/>scenes (e.g., “beach”), etc. These categories were de-
+<br/>fined very carefully. Specifically, we conducted user
+<br/>surveys and used the organization structures on
+<br/>YouTube and Vimeo as references, and browsed nu-
+<br/>merous videos to identify categories that satisfy the
+<br/>following three criteria: (1) utility — high relevance
+<br/>in supporting practical application needs; (2) cover-
+<br/>age — a good coverage of the contents that people
+<br/>record; and (3) feasibility — likely to be automatically
+<br/>recognized in the next several years, and a high
+<br/>frequency of occurrence that is sufficient for training
+<br/>a recognition algorithm.
+<br/>This definition effort led to a set of over 250 candi-
+<br/>date categories. For each category, in addition to the
+<br/>official name used in the public release, we manually
+<br/>defined another alternative name. Videos were then
+<br/>downloaded from YouTube searches using the official
+<br/>and the alternative names as search terms. The pur-
+<br/>pose of using the alternative names was to expand the
+<br/>candidate video sets. For each search, we downloaded
+<br/>1,000 videos, and after removing duplicate videos and
+<br/>some extremely long ones (longer than 30 minutes),
+<br/>there were around 1,000–1,500 candidate videos for
+<br/>each category.
+<br/>All the videos were annotated manually to ensure
+<br/>a high precision of the FCVID labels. In order to min-
+<br/>imize subjectivity, nearly 20 annotators were involved
+<br/>in the task, and a master annotator was assigned to
+<br/>monitor the entire process and double-check all the
+<br/>found positive videos. Some of the videos are multi-
+<br/>labeled, and thus filtering the 1,000–1,500 videos for
+<br/>each category with focus on just the single category
+<br/>label is not adequate. As checking the existence of all
+<br/>the 250+ classes for each video is extremely difficult,
+<br/>we use the following strategy to narrow down the “la-
+<br/>bel search space” for each video. We first grouped the
+<br/>categories according to subjective predictions of label
+<br/>co-occurrences, e.g., “wedding reception” & “wed-
+<br/>ding ceremony”, “waterfall” & “river”, “hiking” &
+<br/>“mountain”, and even “dog” & “birthday”. We then
+<br/>annotated the videos not only based on the target cat-
+<br/>egory label, but also according to the identified related
+<br/>labels. This helped produce a fairly complete label
+<br/>set for FCVID, but largely reduced the annotation
+<br/>workload. After removing the rare categories with
+<br/>less than 100 videos after annotation, the final FCVID
+<br/>dataset contains 91,223 videos and 239 categories,
+<br/>where 183 are events and 56 are objects, scenes, etc.
+<br/>Figure 1 shows the number of videos per category.
+<br/>“Dog” has the largest number of positive videos
+<br/>(1,136), while “making egg tarts” is the most infre-
+<br/>quent category containing only 108 samples. The total
+<br/>duration of FCVID is 4,232 hours with an average
+<br/>video duration of 167 seconds. Figure 2 further gives
+<br/>the average video duration of each category.
+<br/>The categories are organized using a hierarchy con-
+<br/>taining 11 high-level groups, as visualized in Figure 3.
+<br/>3 COMPARISON WITH RELATED DATASETS
+<br/>We compare FCVID with the following datasets. Most
+<br/>of them have been widely adopted in the existing
+<br/>works on video categorization.
+<br/>KTH and Weizmann: The KTH [1] and the Weiz-
+<br/>mann [2] datasets are well-known benchmarks for
+<br/>human action recognition. The former contains 600
+<br/>videos of 6 human actions performed by 25 people
+<br/>in four scenarios, and the latter consists of 81 videos
+<br/>associated with 9 actions performed by 9 actors.
+<br/>Hollywood Human Action: The Hollywood
+<br/>dataset [3] contains 8 action classes collected from
+<br/>32 Hollywood movies with a total of 430 videos.
+</td></tr><tr><td>d7b6bbb94ac20f5e75893f140ef7e207db7cd483</td><td>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+<br/>Face Recognition across Pose: A
+<br/>Review
+<br/>Author
+<br/>Zhang, Paul, Gao, Yongsheng
+<br/>Published
+<br/>2009
+<br/>Journal Title
+<br/>Pattern Recognition
+<br/>DOI
+<br/>https://doi.org/10.1016/j.patcog.2009.04.017
+<br/>Copyright Statement
+<br/>Copyright 2009 Elsevier. This is the author-manuscript version of this paper. Reproduced in accordance
+<br/>with the copyright policy of the publisher. Please refer to the journal's website for access to the
+<br/>definitive, published version.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/30193
+</td></tr><tr><td>d78373de773c2271a10b89466fe1858c3cab677f</td><td></td></tr><tr><td>d03265ea9200a993af857b473c6bf12a095ca178</td><td>Multiple deep convolutional neural
+<br/>networks averaging for face
+<br/>alignment
+<br/>Zhouping Yin
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 05/28/2015 Terms of Use: http://spiedl.org/terms </td></tr><tr><td>d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0</td><td></td></tr><tr><td>d03baf17dff5177d07d94f05f5791779adf3cd5f</td><td></td></tr><tr><td>d0144d76b8b926d22411d388e7a26506519372eb</td><td>Improving Regression Performance with Distributional Losses
+</td></tr><tr><td>d02e27e724f9b9592901ac1f45830341d37140fe</td><td>DA-GAN: Instance-level Image Translation by Deep Attention Generative
+<br/>Adversarial Networks
+<br/>The State Universtiy of New York at Buffalo
+<br/>The State Universtiy of New York at Buffalo
+<br/>Microsoft Research
+<br/>Microsoft Research
+</td></tr><tr><td>d0a21f94de312a0ff31657fd103d6b29db823caa</td><td>Facial Expression Analysis
+</td></tr><tr><td>d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea</td><td>Face Recognition with Patterns of Oriented
+<br/>Edge Magnitudes
+<br/>1 Vesalis Sarl, Clermont Ferrand, France
+<br/>2 Gipsa-lab, Grenoble INP, France
+</td></tr><tr><td>d00787e215bd74d32d80a6c115c4789214da5edb</td><td>Faster and Lighter Online
+<br/>Sparse Dictionary Learning
+<br/>Project report
+</td></tr><tr><td>be8c517406528edc47c4ec0222e2a603950c2762</td><td>Harrigan / The new handbook of methods in nonverbal behaviour research 02-harrigan-chap02 Page Proof page 7
+<br/>17.6.2005
+<br/>5:45pm
+<br/>B A S I C R E S E A RC H
+<br/>M E T H O D S A N D
+<br/>P RO C E D U R E S
+</td></tr><tr><td>be48b5dcd10ab834cd68d5b2a24187180e2b408f</td><td>FOR PERSONAL USE ONLY
+<br/>Constrained Low-rank Learning Using Least
+<br/>Squares Based Regularization
+</td></tr><tr><td>be437b53a376085b01ebd0f4c7c6c9e40a4b1a75</td><td>ISSN (Online) 2321 – 2004
+<br/>ISSN (Print) 2321 – 5526
+<br/> INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+<br/> Vol. 4, Issue 5, May 2016
+<br/>IJIREEICE
+<br/>Face Recognition and Retrieval Using Cross
+<br/>Age Reference Coding
+<br/> BE, DSCE, Bangalore1
+<br/>Assistant Professor, DSCE, Bangalore2
+</td></tr><tr><td>bebea83479a8e1988a7da32584e37bfc463d32d4</td><td>Discovery of Latent 3D Keypoints via
+<br/>End-to-end Geometric Reasoning
+<br/>Google AI
+</td></tr><tr><td>bef503cdfe38e7940141f70524ee8df4afd4f954</td><td></td></tr><tr><td>beab10d1bdb0c95b2f880a81a747f6dd17caa9c2</td><td>DeepDeblur: Fast one-step blurry face images restoration
+<br/>Tsinghua Unversity
+</td></tr><tr><td>b331ca23aed90394c05f06701f90afd550131fe3</td><td>Zhou et al. EURASIP Journal on Image and Video Processing (2018) 2018:49
+<br/>https://doi.org/10.1186/s13640-018-0287-5
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Double regularized matrix factorization for
+<br/>image classification and clustering
+<br/>Open Access
+</td></tr><tr><td>b3cb91a08be4117d6efe57251061b62417867de9</td><td>T. Swearingen and A. Ross. "A label propagation approach for predicting missing biographic labels in
+<br/>A Label Propagation Approach for
+<br/>Predicting Missing Biographic Labels
+<br/>in Face-Based Biometric Records
+</td></tr><tr><td>b3c60b642a1c64699ed069e3740a0edeabf1922c</td><td>Max-Margin Object Detection
+</td></tr><tr><td>b3f7c772acc8bc42291e09f7a2b081024a172564</td><td> www.ijmer.com Vol. 3, Issue. 5, Sep - Oct. 2013 pp-3225-3230 ISSN: 2249-6645
+<br/>International Journal of Modern Engineering Research (IJMER)
+<br/>A novel approach for performance parameter estimation of face
+<br/>recognition based on clustering, shape and corner detection
+<br/><b></b><br/>
+</td></tr><tr><td>b32631f456397462b3530757f3a73a2ccc362342</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3069
+</td></tr><tr><td>b3afa234996f44852317af382b98f5f557cab25a</td><td></td></tr><tr><td>df90850f1c153bfab691b985bfe536a5544e438b</td><td>FACE TRACKING ALGORITHM ROBUST TO POSE,
+<br/>ILLUMINATION AND FACE EXPRESSION CHANGES: A 3D
+<br/>PARAMETRIC MODEL APPROACH
+<br/><b></b><br/>via Bramante 65 - 26013, Crema (CR), Italy
+<br/>Luigi Arnone, Fabrizio Beverina
+<br/>STMicroelectronics - Advanced System Technology Group
+<br/>via Olivetti 5 - 20041, Agrate Brianza, Italy
+<br/>Keywords:
+<br/>Face tracking, expression changes, FACS, illumination changes.
+</td></tr><tr><td>df8da144a695269e159fb0120bf5355a558f4b02</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>International Conference on Recent Trends in engineering & Technology - 2013(ICRTET'2013)
+<br/>Face Recognition using PCA and Eigen Face
+<br/>Approach
+<br/>ME EXTC [VLSI & Embedded System]
+<br/>Sinhgad Academy of Engineering
+<br/>EXTC Department
+<br/>Pune, India
+</td></tr><tr><td>df577a89830be69c1bfb196e925df3055cafc0ed</td><td>Shift: A Zero FLOP, Zero Parameter Alternative to Spatial Convolutions
+<br/>UC Berkeley
+</td></tr><tr><td>dfabe7ef245ca68185f4fcc96a08602ee1afb3f7</td><td></td></tr><tr><td>df51dfe55912d30fc2f792561e9e0c2b43179089</td><td>Face Hallucination using Linear Models of Coupled
+<br/>Sparse Support
+<br/>grid and fuse them to suppress the aliasing caused by under-
+<br/>sampling [5], [6]. On the other hand, learning based meth-
+<br/>ods use coupled dictionaries to learn the mapping relations
+<br/>between low- and high- resolution image pairs to synthesize
+<br/>high-resolution images from low-resolution images [4], [7].
+<br/>The research community has lately focused on the latter
+<br/>category of super-resolution methods, since they can provide
+<br/>higher quality images and larger magnification factors.
+</td></tr><tr><td>df80fed59ffdf751a20af317f265848fe6bfb9c9</td><td>1666
+<br/>Learning Deep Sharable and Structural
+<br/>Detectors for Face Alignment
+</td></tr><tr><td>dfa80e52b0489bc2585339ad3351626dee1a8395</td><td>Human Action Forecasting by Learning Task Grammars
+</td></tr><tr><td>dfecaedeaf618041a5498cd3f0942c15302e75c3</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>A Recursive Framework for Expression Recognition: From
+<br/>Web Images to Deep Models to Game Dataset
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>df5fe0c195eea34ddc8d80efedb25f1b9034d07d</td><td>Robust Modified Active Shape Model for Automatic Facial Landmark
+<br/>Annotation of Frontal Faces
+</td></tr><tr><td>df674dc0fc813c2a6d539e892bfc74f9a761fbc8</td><td>IOSR Journal of Computer Engineering (IOSR-JCE)
+<br/>e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 10, Issue 6 (May. - Jun. 2013), PP 21-29
+<br/>www.iosrjournals.org
+<br/>An Image Mining System for Gender Classification & Age
+<br/>Prediction Based on Facial Features
+<br/> 1.Ms.Dhanashri Shirkey , 2Prof.Dr.S.R.Gupta,
+<br/>M.E(Scholar),Department Computer Science & Engineering, PRMIT & R, Badnera
+<br/>Asstt.Prof. Department Computer Science & Engineering, PRMIT & R, Badnera
+</td></tr><tr><td>da4170c862d8ae39861aa193667bfdbdf0ecb363</td><td>Multi-task CNN Model for Attribute Prediction
+</td></tr><tr><td>da15344a4c10b91d6ee2e9356a48cb3a0eac6a97</td><td></td></tr><tr><td>da5bfddcfe703ca60c930e79d6df302920ab9465</td><td></td></tr><tr><td>dac2103843adc40191e48ee7f35b6d86a02ef019</td><td>854
+<br/>Unsupervised Celebrity Face Naming in Web Videos
+</td></tr><tr><td>dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e</td><td>RECOGNIZING EMOTIONS IN SPONTANEOUS FACIAL EXPRESSIONS
+<br/>Institut f¨ur Nachrichtentechnik
+<br/>Universit¨at Karlsruhe (TH), Germany
+</td></tr><tr><td>daba8f0717f3f47c272f018d0a466a205eba6395</td><td></td></tr><tr><td>daefac0610fdeff415c2a3f49b47968d84692e87</td><td>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>Proceedings of NAACL-HLT 2018, pages 1481–1491
+<br/>1481
+</td></tr><tr><td>b49affdff167f5d170da18de3efa6fd6a50262a2</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+<br/>(2008)"
+</td></tr><tr><td>b41374f4f31906cf1a73c7adda6c50a78b4eb498</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Iterative Gaussianization: From ICA to
+<br/>Random Rotations
+</td></tr><tr><td>b4d7ca26deb83cec1922a6964c1193e8dd7270e7</td><td></td></tr><tr><td>b4ee64022cc3ccd14c7f9d4935c59b16456067d3</td><td>Unsupervised Cross-Domain Image Generation
+</td></tr><tr><td>b40290a694075868e0daef77303f2c4ca1c43269</td><td>第 40 卷 第 4 期
+<br/>2014 年 4 月
+<br/>自 动 化 学 报
+<br/>ACTA AUTOMATICA SINICA
+<br/>Vol. 40, No. 4
+<br/>April, 2014
+<br/>融合局部与全局信息的头发形状模型
+<br/>王 楠 1 艾海舟 1
+<br/>摘 要 头发在人体表观中具有重要作用, 然而, 因为缺少有效的形状模型, 头发分割仍然是一个非常具有挑战性的问题. 本
+<br/>文提出了一种基于部件的模型, 它对头发形状以及环境变化更加鲁棒. 该模型将局部与全局信息相结合以描述头发的形状. 局
+<br/>部模型通过一系列算法构建, 包括全局形状词表生成, 词表分类器学习以及参数优化; 而全局模型刻画不同的发型, 采用支持
+<br/>向量机 (Support vector machine, SVM) 来学习, 它为所有潜在的发型配置部件并确定势函数. 在消费者图片上的实验证明
+<br/>了本文算法在头发形状多变和复杂环境等条件下的准确性与有效性.
+<br/>关键词 头发形状建模, 部件模型, 部件配置算法, 支持向量机
+<br/>引用格式 王楠, 艾海舟. 融合局部与全局信息的头发形状模型. 自动化学报, 2014, 40(4): 615−623
+<br/>DOI 10.3724/SP.J.1004.2014.00615
+<br/>Combining Local and Global Information for Hair Shape Modeling
+<br/>AI Hai-Zhou1
+</td></tr><tr><td>a2359c0f81a7eb032cff1fe45e3b80007facaa2a</td><td>Towards Structured Analysis of Broadcast Badminton Videos
+<br/>C.V.Jawahar
+<br/>CVIT, KCIS, IIIT Hyderabad
+</td></tr><tr><td>a2d9c9ed29bbc2619d5e03320e48b45c15155195</td><td></td></tr><tr><td>a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d</td><td></td></tr><tr><td>a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa</td><td>Intention from Motion
+</td></tr><tr><td>a50b4d404576695be7cd4194a064f0602806f3c4</td><td>In Proceedings of BMVC, Edimburgh, UK, September 2006
+<br/>Efficiently estimating facial expression and
+<br/>illumination in appearance-based tracking
+<br/>†ESCET, U. Rey Juan Carlos
+<br/>C/ Tulip´an, s/n
+<br/>28933 M´ostoles, Spain
+<br/>‡Facultad Inform´atica, UPM
+<br/>Campus de Montegancedo s/n
+<br/>28660 Boadilla del Monte, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+</td></tr><tr><td>a56c1331750bf3ac33ee07004e083310a1e63ddc</td><td>Vol. xx, pp. x
+<br/>c(cid:13) xxxx Society for Industrial and Applied Mathematics
+<br/>x–x
+<br/>Efficient Point-to-Subspace Query in (cid:96)1 with Application to Robust Object
+<br/>Instance Recognition
+</td></tr><tr><td>a54e0f2983e0b5af6eaafd4d3467b655a3de52f4</td><td>Face Recognition Using Convolution Filters and
+<br/>Neural Networks
+<br/>Head, Dept. of E&E,PEC
+<br/>Sec-12, Chandigarh – 160012
+<br/>Department of CSE & IT, PEC
+<br/>Sec-12, Chandigarh – 160012
+<br/>C.P. Singh
+<br/>Physics Department, CFSL,
+<br/>Sec-36, Chandigarh - 160036
+<br/>a
+<br/>of
+<br/>to: (a)
+<br/>potential method
+</td></tr><tr><td>a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a</td><td>818
+<br/>Continuous Head Movement Estimator for
+<br/>Driver Assistance: Issues, Algorithms,
+<br/>and On-Road Evaluations
+<br/>Mohan Manubhai Trivedi, Fellow, IEEE
+</td></tr><tr><td>a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be</td><td></td></tr><tr><td>a503eb91c0bce3a83bf6f524545888524b29b166</td><td></td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>Moments in Time Dataset: one million
+<br/>videos for event understanding
+</td></tr><tr><td>bd9eb65d9f0df3379ef96e5491533326e9dde315</td><td></td></tr><tr><td>bd07d1f68486052b7e4429dccecdb8deab1924db</td><td></td></tr><tr><td>bd8e2d27987be9e13af2aef378754f89ab20ce10</td><td></td></tr><tr><td>bd2d7c7f0145028e85c102fe52655c2b6c26aeb5</td><td>Attribute-based People Search: Lessons Learnt from a
+<br/>Practical Surveillance System
+<br/>Rogerio Feris
+<br/>IBM Watson
+<br/>http://rogerioferis.com
+<br/>Russel Bobbitt
+<br/>IBM Watson
+<br/>Lisa Brown
+<br/>IBM Watson
+<br/>IBM Watson
+</td></tr><tr><td>bdbba95e5abc543981fb557f21e3e6551a563b45</td><td>International Journal of Computational Intelligence and Applications
+<br/>Vol. 17, No. 2 (2018) 1850008 (15 pages)
+<br/>#.c The Author(s)
+<br/>DOI: 10.1142/S1469026818500086
+<br/>Speeding up the Hyperparameter Optimization of Deep
+<br/>Convolutional Neural Networks
+<br/>Knowledge Technology, Department of Informatics
+<br/>Universit€at Hamburg
+<br/>Vogt-K€olln-Str. 30, Hamburg 22527, Germany
+<br/>Received 15 August 2017
+<br/>Accepted 23 March 2018
+<br/>Published 18 June 2018
+<br/>Most learning algorithms require the practitioner to manually set the values of many hyper-
+<br/>parameters before the learning process can begin. However, with modern algorithms, the
+<br/>evaluation of a given hyperparameter setting can take a considerable amount of time and the
+<br/>search space is often very high-dimensional. We suggest using a lower-dimensional represen-
+<br/>tation of the original data to quickly identify promising areas in the hyperparameter space. This
+<br/>information can then be used to initialize the optimization algorithm for the original, higher-
+<br/>dimensional data. We compare this approach with the standard procedure of optimizing the
+<br/>hyperparameters only on the original input.
+<br/>We perform experiments with various state-of-the-art hyperparameter optimization algo-
+<br/>rithms such as random search, the tree of parzen estimators (TPEs), sequential model-based
+<br/>algorithm con¯guration (SMAC), and a genetic algorithm (GA). Our experiments indicate that
+<br/>it is possible to speed up the optimization process by using lower-dimensional data repre-
+<br/>sentations at the beginning, while increasing the dimensionality of the input later in the opti-
+<br/>mization process. This is independent of the underlying optimization procedure, making the
+<br/>approach promising for many existing hyperparameter optimization algorithms.
+<br/>Keywords: Hyperparameter optimization; hyperparameter importance; convolutional neural
+<br/>networks; genetic algorithm; Bayesian optimization.
+<br/>1. Introduction
+<br/>The performance of many contemporary machine learning algorithms depends cru-
+<br/>cially on the speci¯c initialization of hyperparameters such as the general architec-
+<br/>ture, the learning rate, regularization parameters, and many others.1,2 Indeed,
+<br/>This is an Open Access article published by World Scienti¯c Publishing Company. It is distributed under
+<br/>the terms of the Creative Commons Attribution 4.0 (CC-BY) License. Further distribution of this work is
+<br/>permitted, provided the original work is properly cited.
+<br/>1850008-1
+<br/>Int. J. Comp. Intel. Appl. 2018.17. Downloaded from www.worldscientific.comby WSPC on 07/18/18. Re-use and distribution is strictly not permitted, except for Open Access articles. </td></tr><tr><td>d1dfdc107fa5f2c4820570e369cda10ab1661b87</td><td>Super SloMo: High Quality Estimation of Multiple Intermediate Frames
+<br/>for Video Interpolation
+<br/>Erik Learned-Miller1
+<br/>1UMass Amherst
+<br/>2NVIDIA 3UC Merced
+</td></tr><tr><td>d1a43737ca8be02d65684cf64ab2331f66947207</td><td>IJB–S: IARPA Janus Surveillance Video Benchmark (cid:3)
+<br/>Kevin O’Connor z
+</td></tr><tr><td>d1082eff91e8009bf2ce933ac87649c686205195</td><td>(will be inserted by the editor)
+<br/>Pruning of Error Correcting Output Codes by
+<br/>Optimization of Accuracy-Diversity Trade off
+<br/>S¨ureyya ¨Oz¨o˘g¨ur Aky¨uz · Terry
+<br/>Windeatt · Raymond Smith
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0</td><td></td></tr><tr><td>d6102a7ddb19a185019fd2112d2f29d9258f6dec</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3721
+</td></tr><tr><td>d6bfa9026a563ca109d088bdb0252ccf33b76bc6</td><td>Unsupervised Temporal Segmentation of Facial Behaviour
+<br/>Department of Computer Science and Engineering, IIT Kanpur
+</td></tr><tr><td>d6fb606e538763282e3942a5fb45c696ba38aee6</td><td></td></tr><tr><td>bc9003ad368cb79d8a8ac2ad025718da5ea36bc4</td><td>Technische Universit¨at M¨unchen
+<br/>Bildverstehen und Intelligente Autonome Systeme
+<br/>Facial Expression Recognition With A
+<br/>Three-Dimensional Face Model
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Informatik der Technischen Uni-
+<br/>versit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktors der Naturwissenschaften
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr. Johann Schlichter
+<br/>Pr¨ufer der Dissertation: 1. Univ.-Prof. Dr. Bernd Radig (i.R.)
+<br/>2. Univ.-Prof. Gudrun J. Klinker, Ph.D.
+<br/>Die Dissertation wurde am 04.07.2011 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Informatik am 02.12.2011 angenommen.
+</td></tr><tr><td>bcc346f4a287d96d124e1163e4447bfc47073cd8</td><td></td></tr><tr><td>bcc172a1051be261afacdd5313619881cbe0f676</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2197
+<br/>ICASSP 2017
+</td></tr><tr><td>bcfeac1e5c31d83f1ed92a0783501244dde5a471</td><td></td></tr><tr><td>bc2852fa0a002e683aad3fb0db5523d1190d0ca5</td><td></td></tr><tr><td>bcb99d5150d792001a7d33031a3bd1b77bea706b</td><td></td></tr><tr><td>bc811a66855aae130ca78cd0016fd820db1603ec</td><td>Towards three-dimensional face recognition in the real
+<br/>To cite this version:
+<br/>HAL Id: tel-00998798
+<br/>https://tel.archives-ouvertes.fr/tel-00998798
+<br/>Submitted on 2 Jun 2014
+<br/>archive for the deposit and dissemination of sci-
+<br/>entific research documents, whether they are pub-
+<br/>teaching and research institutions in France or
+<br/>destin´ee au d´epˆot et `a la diffusion de documents
+<br/>recherche fran¸cais ou ´etrangers, des laboratoires
+</td></tr><tr><td>bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab</td><td>MoCoGAN: Decomposing Motion and Content for Video Generation
+<br/>Snap Research
+<br/>NVIDIA
+</td></tr><tr><td>bcac3a870501c5510df80c2a5631f371f2f6f74a</td><td>CVPR
+<br/>#1387
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2013 Submission #1387. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#1387
+<br/>Structured Face Hallucination
+<br/>Anonymous CVPR submission
+<br/>Paper ID 1387
+</td></tr><tr><td>aed321909bb87c81121c841b21d31509d6c78f69</td><td></td></tr><tr><td>ae936628e78db4edb8e66853f59433b8cc83594f</td><td></td></tr><tr><td>ae2cf545565c157813798910401e1da5dc8a6199</td><td>Mahkonen et al. EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:61
+<br/>https://doi.org/10.1186/s13640-018-0303-9
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Cascade of Boolean detector
+<br/>combinations
+</td></tr><tr><td>aebb9649bc38e878baef082b518fa68f5cda23a5</td><td>
+</td></tr><tr><td>aeff403079022683b233decda556a6aee3225065</td><td>DeepFace: Face Generation using Deep Learning
+</td></tr><tr><td>ae753fd46a744725424690d22d0d00fb05e53350</td><td>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>Describing Clothing by Semantic Attributes
+<br/>Anonymous ECCV submission
+<br/>Paper ID 727
+</td></tr><tr><td>ae4e2c81c8a8354c93c4b21442c26773352935dd</td><td></td></tr><tr><td>ae85c822c6aec8b0f67762c625a73a5d08f5060d</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at http://dx.doi.org/10.1109/TPAMI.2014.2353624
+<br/>IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. M, NO. N, MONTH YEAR
+<br/>Retrieving Similar Styles to Parse Clothing
+</td></tr><tr><td>d861c658db2fd03558f44c265c328b53e492383a</td><td>Automated Face Extraction and Normalization of 3D Mesh Data
+</td></tr><tr><td>d83d2fb5403c823287f5889b44c1971f049a1c93</td><td>Motiv Emot
+<br/>DOI 10.1007/s11031-013-9353-6
+<br/>O R I G I N A L P A P E R
+<br/>Introducing the sick face
+<br/>Ó Springer Science+Business Media New York 2013
+</td></tr><tr><td>d8b568392970b68794a55c090c4dd2d7f90909d2</td><td>PDA Face Recognition System
+<br/>Using Advanced Correlation
+<br/>Filters
+<br/>Chee Kiat Ng
+<br/>2005
+<br/>Advisor: Prof. Khosla/Reviere
+</td></tr><tr><td>d83ae5926b05894fcda0bc89bdc621e4f21272da</td><td>version of the following thesis:
+<br/>Frugal Forests: Learning a Dynamic and Cost Sensitive
+<br/>Feature Extraction Policy for Anytime Activity Classification
+</td></tr><tr><td>d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d</td><td>Robust Face Recognition via Multimodal Deep
+<br/>Face Representation
+</td></tr><tr><td>ab8f9a6bd8f582501c6b41c0e7179546e21c5e91</td><td>Nonparametric Face Verification Using a Novel
+<br/>Face Representation
+</td></tr><tr><td>ab58a7db32683aea9281c188c756ddf969b4cdbd</td><td>Efficient Solvers for Sparse Subspace Clustering
+</td></tr><tr><td>ab989225a55a2ddcd3b60a99672e78e4373c0df1</td><td>Sample, Computation vs Storage Tradeoffs for
+<br/>Classification Using Tensor Subspace Models
+</td></tr><tr><td>ab6776f500ed1ab23b7789599f3a6153cdac84f7</td><td>International Journal of Scientific & Engineering Research, Volume 6, Issue 4, April-2015 1212
+<br/>ISSN 2229-5518
+<br/>A Survey on Various Facial Expression
+<br/>Techniques
+</td></tr><tr><td>ab2b09b65fdc91a711e424524e666fc75aae7a51</td><td>Multi-modal Biomarkers to Discriminate Cognitive State*
+<br/>1MIT Lincoln Laboratory, Lexington, Massachusetts, USA
+<br/>2USARIEM, 3NSRDEC
+<br/>1. Introduction
+<br/>Multimodal biomarkers based on behavorial, neurophysiolgical, and cognitive measurements have
+<br/>recently obtained increasing popularity in the detection of cognitive stress- and neurological-based
+<br/>disorders. Such conditions are significantly and adversely affecting human performance and quality
+<br/>of life for a large fraction of the world’s population. Example modalities used in detection of these
+<br/>conditions include voice, facial expression, physiology, eye tracking, gait, and EEG analysis.
+<br/>Toward the goal of finding simple, noninvasive means to detect, predict and monitor cognitive
+<br/>stress and neurological conditions, MIT Lincoln Laboratory is developing biomarkers that satisfy
+<br/>three criteria. First, we seek biomarkers that reflect core components of cognitive status such as
+<br/>working memory capacity, processing speed, attention, and arousal. Second, and as importantly, we
+<br/>seek biomarkers that reflect timing and coordination relations both within components of each
+<br/>modality and across different modalities. This is based on the hypothesis that neural coordination
+<br/>across different parts of the brain is essential in cognition (Figure 1). An example of timing and
+<br/>coordination within a modality is the set of finely timed and synchronized physiological
+<br/>components of speech production, while an example of coordination across modalities is the timing
+<br/>and synchrony that occurs across speech and facial expression while speaking. Third, we seek
+<br/>multimodal biomarkers that contribute in a complementary fashion under various channel and
+<br/>background conditions. In this chapter, as an illustration of this biomarker approach we focus on
+<br/>cognitive stress and the particular case of detecting different cognitive load levels. We also briefly
+<br/>show how similar feature-extraction principles can be applied to a neurological condition through
+<br/>the example of major depression disorder (MDD). MDD is one of several neurological disorders
+<br/>where multi-modal biomarkers based on principles of timing and coordination are important for
+<br/>detection [11]-[22]. In our cognitive load experiments, we use two easily obtained noninvasive
+<br/>modalities, voice and face, and show how these two modalities can be fused to produce results on
+<br/>par with more invasive, “gold-standard” EEG measurements. Vocal and facial biomarkers will also
+<br/>be used in our MDD case study. In both application areas we focus on timing and coordination
+<br/>relations within the components of each modality.
+<br/>* Distribution A: public release.This work is sponsored by the Assistant Secretary of Defense for Research & Engineering under Air Force contract
+<br/>#FA8721-05-C-0002. Opinions,interpretations, conclusions, and recommendations are those of the authors and are not necessarily endorsed by the United States
+<br/>Government.
+</td></tr><tr><td>ab87dfccb1818bdf0b41d732da1f9335b43b74ae</td><td>SUBMITTED TO IEEE TRANSACTIONS ON SIGNAL PROCESSING
+<br/>Structured Dictionary Learning for Classification
+</td></tr><tr><td>ab1dfcd96654af0bf6e805ffa2de0f55a73c025d</td><td></td></tr><tr><td>abeda55a7be0bbe25a25139fb9a3d823215d7536</td><td>UNIVERSITATPOLITÈCNICADECATALUNYAProgramadeDoctorat:AUTOMÀTICA,ROBÒTICAIVISIÓTesiDoctoralUnderstandingHuman-CentricImages:FromGeometrytoFashionEdgarSimoSerraDirectors:FrancescMorenoNoguerCarmeTorrasMay2015 </td></tr><tr><td>ab1900b5d7cf3317d17193e9327d57b97e24d2fc</td><td></td></tr><tr><td>ab8fb278db4405f7db08fa59404d9dd22d38bc83</td><td>UNIVERSITÉ DE GENÈVE
+<br/>Département d'Informatique
+<br/>FACULTÉ DES SCIENCES
+<br/>Implicit and Automated Emotional
+<br/>Tagging of Videos
+<br/>THÈSE
+<br/>présenté à la Faculté des sciences de l'Université de Genève
+<br/>pour obtenir le grade de Docteur ès sciences, mention informatique
+<br/>par
+<br/>de
+<br/>Téhéran (IRAN)
+<br/>Thèse No 4368
+<br/>GENÈVE
+<br/>Repro-Mail - Université de Genève
+<br/>2011
+</td></tr><tr><td>e5737ffc4e74374b0c799b65afdbf0304ff344cb</td><td></td></tr><tr><td>e5823a9d3e5e33e119576a34cb8aed497af20eea</td><td>DocFace+: ID Document to Selfie* Matching
+</td></tr><tr><td>e5dfd17dbfc9647ccc7323a5d62f65721b318ba9</td><td></td></tr><tr><td>e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td></tr><tr><td>e27c92255d7ccd1860b5fb71c5b1277c1648ed1e</td><td></td></tr><tr><td>e200c3f2849d56e08056484f3b6183aa43c0f13a</td><td></td></tr><tr><td>f437b3884a9e5fab66740ca2a6f1f3a5724385ea</td><td>Human Identification Technical Challenges
+<br/>DARPA
+<br/>3701 N. Fairfax Dr
+<br/>Arlington, VA 22203
+</td></tr><tr><td>f442a2f2749f921849e22f37e0480ac04a3c3fec</td><td></td></tr><tr><td>f4f6fc473effb063b7a29aa221c65f64a791d7f4</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 4/20/2018 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>FacialexpressionrecognitioninthewildbasedonmultimodaltexturefeaturesBoSunLiandongLiGuoyanZhouJunHeBoSun,LiandongLi,GuoyanZhou,JunHe,“Facialexpressionrecognitioninthewildbasedonmultimodaltexturefeatures,”J.Electron.Imaging25(6),061407(2016),doi:10.1117/1.JEI.25.6.061407. </td></tr><tr><td>f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0</td><td></td></tr><tr><td>f4373f5631329f77d85182ec2df6730cbd4686a9</td><td>Soft Computing manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Recognizing Gender from Human Facial Regions using
+<br/>Genetic Algorithm
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>f47404424270f6a20ba1ba8c2211adfba032f405</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+<br/>Identification of Face Age range Group using Neural
+<br/>Network
+</td></tr><tr><td>f3fcaae2ea3e998395a1443c87544f203890ae15</td><td></td></tr><tr><td>f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7</td><td>NosePose: a competitive, landmark-free
+<br/>methodology for head pose estimation in the wild
+<br/>IMAGO Research Group - Universidade Federal do Paran´a
+</td></tr><tr><td>f355e54ca94a2d8bbc598e06e414a876eb62ef99</td><td></td></tr><tr><td>f3ea181507db292b762aa798da30bc307be95344</td><td>Covariance Pooling for Facial Expression Recognition
+<br/>†Computer Vision Lab, ETH Zurich, Switzerland
+<br/>‡VISICS, KU Leuven, Belgium
+</td></tr><tr><td>f3cf10c84c4665a0b28734f5233d423a65ef1f23</td><td>Title
+<br/>Temporal Exemplar-based Bayesian Networks for facial
+<br/>expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>Proceedings - 7Th International Conference On Machine
+<br/>Learning And Applications, Icmla 2008, 2008, p. 16-22
+<br/>Issued Date
+<br/>2008
+<br/>URL
+<br/>http://hdl.handle.net/10722/61208
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.;
+<br/>International Conference on Machine Learning and Applications
+<br/>Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+<br/>this material is permitted. However, permission to
+<br/>reprint/republish this material for advertising or promotional
+<br/>purposes or for creating new collective works for resale or
+<br/>redistribution to servers or lists, or to reuse any copyrighted
+<br/>component of this work in other works must be obtained from
+<br/>the IEEE.
+</td></tr><tr><td>f3b7938de5f178e25a3cf477107c76286c0ad691</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, MARCH 2017
+<br/>Object Detection with Deep Learning: A Review
+</td></tr><tr><td>ebedc841a2c1b3a9ab7357de833101648281ff0e</td><td></td></tr><tr><td>eb526174fa071345ff7b1fad1fad240cd943a6d7</td><td>Deeply Vulnerable – A Study of the Robustness of Face Recognition to
+<br/>Presentation Attacks
+</td></tr><tr><td>eb566490cd1aa9338831de8161c6659984e923fd</td><td>From Lifestyle Vlogs to Everyday Interactions
+<br/>EECS Department, UC Berkeley
+</td></tr><tr><td>eb9312458f84a366e98bd0a2265747aaed40b1a6</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>IV - 473
+<br/>ICIP 2007
+</td></tr><tr><td>eb716dd3dbd0f04e6d89f1703b9975cad62ffb09</td><td>Copyright
+<br/>by
+<br/>2012
+</td></tr><tr><td>ebabd1f7bc0274fec88a3dabaf115d3e226f198f</td><td>Driver drowsiness detection system based on feature
+<br/>representation learning using various deep networks
+<br/>School of Electrical Engineering, KAIST,
+<br/>Guseong-dong, Yuseong-gu, Dajeon, Rep. of Korea
+</td></tr><tr><td>ebb9d53668205c5797045ba130df18842e3eadef</td><td></td></tr><tr><td>eb48a58b873295d719827e746d51b110f5716d6c</td><td>Face Alignment Using K-cluster Regression Forests
+<br/>With Weighted Splitting
+</td></tr><tr><td>c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e</td><td>The International Journal of Multimedia & Its Applications (IJMA) Vol.5, No.5, October 2013
+<br/>DYNEMO: A VIDEO DATABASE OF NATURAL FACIAL
+<br/>EXPRESSIONS OF EMOTIONS
+<br/>1LIP, Univ. Grenoble Alpes, BP 47 - 38040 Grenoble Cedex 9, France
+<br/>2LIG, Univ. Grenoble Alpes, BP 53 - 38041 Grenoble Cedex 9, France
+</td></tr><tr><td>c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c</td><td>THE IMPACT OF PRODUCT PHOTO ON ONLINE CONSUMER
+<br/>PURCHASE INTENTION: AN IMAGE-PROCESSING ENABLED
+<br/>EMPIRICAL STUDY
+</td></tr><tr><td>c758b9c82b603904ba8806e6193c5fefa57e9613</td><td>Heterogeneous Face Recognition with CNNs
+<br/>INRIA Grenoble, Laboratoire Jean Kuntzmann
+</td></tr><tr><td>c7c8d150ece08b12e3abdb6224000c07a6ce7d47</td><td>DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification
+<br/>National Laboratory of Pattern Recognition, CASIA
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+</td></tr><tr><td>c038beaa228aeec174e5bd52460f0de75e9cccbe</td><td>Temporal Segment Networks for Action
+<br/>Recognition in Videos
+</td></tr><tr><td>c043f8924717a3023a869777d4c9bee33e607fb5</td><td>Emotion Separation Is Completed Early and It Depends
+<br/>on Visual Field Presentation
+<br/><b>Lab for Human Brain Dynamics, RIKEN Brain Science Institute, Wakoshi, Saitama, Japan, 2 Lab for Human Brain Dynamics, AAI Scientific Cultural Services Ltd., Nicosia</b><br/>Cyprus
+</td></tr><tr><td>c05a7c72e679745deab9c9d7d481f7b5b9b36bdd</td><td>NPS-CS-11-005
+<br/>
+<br/>
+<br/>NAVAL
+<br/>POSTGRADUATE
+<br/>SCHOOL
+<br/>MONTEREY, CALIFORNIA
+<br/>by
+<br/>BIOMETRIC CHALLENGES FOR FUTURE DEPLOYMENTS:
+<br/>A STUDY OF THE IMPACT OF GEOGRAPHY, CLIMATE, CULTURE,
+<br/> AND SOCIAL CONDITIONS ON THE EFFECTIVE
+<br/>COLLECTION OF BIOMETRICS
+<br/>April 2011
+<br/>Approved for public release; distribution is unlimited
+</td></tr><tr><td>c02847a04a99a5a6e784ab580907278ee3c12653</td><td>Fine Grained Video Classification for
+<br/>Endangered Bird Species Protection
+<br/>Non-Thesis MS Final Report
+<br/>1. Introduction
+<br/>1.1 Background
+<br/>This project is about detecting eagles in videos. Eagles are endangered species at the brim of
+<br/>extinction since 1980s. With the bans of harmful pesticides, the number of eagles keep increasing.
+<br/>However, recent studies on golden eagles’ activities in the vicinity of wind turbines have shown
+<br/>significant number of turbine blade collisions with eagles as the major cause of eagles’ mortality. [1]
+<br/>This project is a part of a larger research project to build an eagle detection and deterrent system
+<br/>on wind turbine toward reducing eagles’ mortality. [2] The critical component of this study is a
+<br/>computer vision system for eagle detection in videos. The key requirement are that the system should
+<br/>work in real time and detect eagles at a far distance from the camera (i.e. in low resolution).
+<br/>There are three different bird species in my dataset - falcon, eagle and seagull. The reason for
+<br/>involving only these three species is based on the real world situation. Wind turbines are always
+<br/>installed near coast and mountain hill where falcons and seagulls will be the majority. So my model
+<br/>will classify the minority eagles out of other bird species during the immigration season and protecting
+<br/>them by using the deterrent system.
+<br/>1.2 Brief Approach
+<br/>Our approach represents a unified deep-learning architecture for eagle detection. Given videos,
+<br/>our goal is to detect eagle species at far distance from the camera, using both appearance and bird
+<br/>motion cues, so as to meet the recall-precision rates set by the user. Detecting eagle is a challenging
+<br/>task because of the following reasons. Frist, an eagle flies fast and high in the sky which means that
+<br/>we need a lens with wide angle such that captures their movement. However, a camera with wide
+<br/>angle produces a low resolution and low quality video and the detailed appearance of bird is
+<br/>compromised. Second, current neural network typically take as input low resolution images. This is
+<br/>because a higher resolution image will require larger filters and deeper networks which is turn hard to
+<br/>train [3]. So it is not clear whether the low resolution will cause challenge for fine-grained
+<br/>classification task. Last but not the least, there is not a large training database like PASCAL, MNIST
+</td></tr><tr><td>c0c8d720658374cc1ffd6116554a615e846c74b5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Modeling Multimodal Clues in a Hybrid Deep
+<br/>Learning Framework for Video Classification
+</td></tr><tr><td>c0d5c3aab87d6e8dd3241db1d931470c15b9e39d</td><td></td></tr><tr><td>eee8a37a12506ff5df72c402ccc3d59216321346</td><td>Uredniki:
+<br/>dr. Tomaž Erjavec
+<br/>Odsek za tehnologije znanja
+<br/>Institut »Jožef Stefan«, Ljubljana
+<br/>dr. Jerneja Žganec Gros
+<br/>Alpineon d.o.o, Ljubljana
+<br/>Založnik: Institut »Jožef Stefan«, Ljubljana
+<br/>Tisk: Birografika BORI d.o.o.
+<br/>Priprava zbornika: Mitja Lasič
+<br/>Oblikovanje naslovnice: dr. Damjan Demšar
+<br/>Tiskano iz predloga avtorjev
+<br/>Naklada: 50
+<br/>Ljubljana, oktober 2008
+<br/>Konferenco IS 2008 sofinancirata
+<br/>Ministrstvo za visoko šolstvo, znanost in tehnologijo
+<br/>Institut »Jožef Stefan«
+<br/>ISSN 1581-9973
+<br/>CIP - Kataložni zapis o publikaciji
+<br/>Narodna in univerzitetna knjižnica, Ljubljana
+<br/>004.934(082)
+<br/>81'25:004.6(082)
+<br/>004.8(063)
+<br/>oktober 2008, Ljubljana, Slovenia : zbornik 11. mednarodne
+<br/>Proceedings of the Sixth Language Technologies Conference, October
+<br/>16th-17th, 2008 : proceedings of the 11th International
+<br/>Multiconference Information Society - IS 2008, volume C / uredila,
+<br/>edited by Tomaž Erjavec, Jerneja Žganec Gros. - Ljubljana :
+<br/>1581-9973)
+<br/>ISBN 978-961-264-006-4
+<br/>družba 4. Information society 5. Erjavec, Tomaž, 1960- 6.
+<br/>Ljubljana)
+<br/>241520896
+</td></tr><tr><td>ee18e29a2b998eddb7f6663bb07891bfc7262248</td><td>1119
+<br/>Local Linear Discriminant Analysis Framework
+<br/>Using Sample Neighbors
+</td></tr><tr><td>ee461d060da58d6053d2f4988b54eff8655ecede</td><td></td></tr><tr><td>eefb8768f60c17d76fe156b55b8a00555eb40f4d</td><td>Subspace Scores for Feature Selection in Computer Vision
+</td></tr><tr><td>eed1dd2a5959647896e73d129272cb7c3a2e145c</td><td></td></tr><tr><td>ee92d36d72075048a7c8b2af5cc1720c7bace6dd</td><td>FACE RECOGNITION USING MIXTURES OF PRINCIPAL COMPONENTS
+<br/>Video and Display Processing
+<br/>Philips Research USA
+<br/>Briarcliff Manor, NY 10510
+</td></tr><tr><td>eedfb384a5e42511013b33104f4cd3149432bd9e</td><td>Multimodal Probabilistic Person
+<br/>Tracking and Identification
+<br/>in Smart Spaces
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktors der Ingenieurwissenschaften
+<br/>der Fakultät für Informatik
+<br/>der Universität Fridericiana zu Karlsruhe (TH)
+<br/>genehmigte
+<br/>Dissertation
+<br/>von
+<br/>aus Karlsruhe
+<br/>Tag der mündlichen Prüfung: 20.11.2009
+<br/>Erster Gutachter:
+<br/>Zweiter Gutachter:
+<br/>Prof. Dr. A. Waibel
+<br/>Prof. Dr. R. Stiefelhagen
+</td></tr><tr><td>c9424d64b12a4abe0af201e7b641409e182babab</td><td>Article
+<br/>Which, When, and How: Hierarchical Clustering with
+<br/>Human–Machine Cooperation
+<br/>Academic Editor: Tom Burr
+<br/>Received: 3 November 2016; Accepted: 14 December 2016; Published: 21 December 2016
+</td></tr><tr><td>c903af0d69edacf8d1bff3bfd85b9470f6c4c243</td><td></td></tr><tr><td>fc1e37fb16006b62848def92a51434fc74a2431a</td><td>DRAFT
+<br/>A Comprehensive Analysis of Deep Regression
+</td></tr><tr><td>fc516a492cf09aaf1d319c8ff112c77cfb55a0e5</td><td></td></tr><tr><td>fcd3d69b418d56ae6800a421c8b89ef363418665</td><td>Effects of Aging over Facial Feature Analysis and Face
+<br/>Recognition
+<br/>Bogaziçi Un. Electronics Eng. Dept. March 2010
+</td></tr><tr><td>fcd77f3ca6b40aad6edbd1dab9681d201f85f365</td><td>c(cid:13)Copyright 2014
+</td></tr><tr><td>fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46</td><td>MANUSCRIPT SUBMITTED TO IEEE TRANS. PATTERN ANAL. MACH. INTELL., JULY 2010
+<br/>Feature Selection via Sparse Approximation for
+<br/>Face Recognition
+</td></tr><tr><td>fcbf808bdf140442cddf0710defb2766c2d25c30</td><td>IJCV manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Unsupervised Semantic Action Discovery from Video
+<br/>Collections
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>fd4ac1da699885f71970588f84316589b7d8317b</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+<br/>Supervised Descent Method
+<br/>for Solving Nonlinear Least Squares
+<br/>Problems in Computer Vision
+</td></tr><tr><td>fdf533eeb1306ba418b09210387833bdf27bb756</td><td>951
+</td></tr><tr><td>fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3</td><td>Im2Flow: Motion Hallucination from Static Images for Action Recognition
+<br/>UT Austin
+<br/>UT Austin
+<br/>UT Austin
+</td></tr><tr><td>fdfaf46910012c7cdf72bba12e802a318b5bef5a</td><td>Computerized Face Recognition in Renaissance
+<br/>Portrait Art
+</td></tr><tr><td>fd15e397629e0241642329fc8ee0b8cd6c6ac807</td><td>Semi-Supervised Clustering with Neural Networks
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>fdca08416bdadda91ae977db7d503e8610dd744f</td><td>
+<br/>ICT-2009.7.1
+<br/>KSERA Project
+<br/>2010-248085
+<br/>Deliverable D3.1
+<br/>Deliverable D3.1
+<br/>Human Robot Interaction
+<br/>Human Robot Interaction
+<br/>18 October 2010
+<br/>Public Document
+<br/>The KSERA project (http://www.ksera
+<br/>KSERA project (http://www.ksera-project.eu) has received funding from the European Commission
+<br/>project.eu) has received funding from the European Commission
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+<br/>agreement n°2010-248085.
+</td></tr><tr><td>fdaf65b314faee97220162980e76dbc8f32db9d6</td><td>Accepted Manuscript
+<br/>Face recognition using both visible light image and near-infrared image and a deep
+<br/>network
+<br/>PII:
+<br/>DOI:
+<br/>Reference:
+<br/>S2468-2322(17)30014-8
+<br/>10.1016/j.trit.2017.03.001
+<br/>TRIT 41
+<br/>To appear in:
+<br/>CAAI Transactions on Intelligence Technology
+<br/>Received Date: 30 January 2017
+<br/>Accepted Date: 28 March 2017
+<br/>Please cite this article as: K. Guo, S. Wu, Y. Xu, Face recognition using both visible light image and
+<br/>near-infrared image and a deep network, CAAI Transactions on Intelligence Technology (2017), doi:
+<br/>10.1016/j.trit.2017.03.001.
+<br/>This is a PDF file of an unedited manuscript that has been accepted for publication. As a service to
+<br/>our customers we are providing this early version of the manuscript. The manuscript will undergo
+<br/>copyediting, typesetting, and review of the resulting proof before it is published in its final form. Please
+<br/>note that during the production process errors may be discovered which could affect the content, and all
+<br/>legal disclaimers that apply to the journal pertain.
+</td></tr><tr><td>f2e9494d0dca9fb6b274107032781d435a508de6</td><td></td></tr><tr><td>f2c568fe945e5743635c13fe5535af157b1903d1</td><td></td></tr><tr><td>f26097a1a479fb6f32b27a93f8f32609cfe30fdc</td><td></td></tr><tr><td>f231046d5f5d87e2ca5fae88f41e8d74964e8f4f</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td></tr><tr><td>f214bcc6ecc3309e2efefdc21062441328ff6081</td><td></td></tr><tr><td>f5770dd225501ff3764f9023f19a76fad28127d4</td><td>Real Time Online Facial Expression Transfer
+<br/>with Single Video Camera
+</td></tr><tr><td>f519723238701849f1160d5a9cedebd31017da89</td><td>Impact of multi-focused images on recognition of soft biometric traits
+<br/>aEURECOM, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia
+<br/>
+<br/>Antipolis cedex, FRANCE
+</td></tr><tr><td>f558af209dd4c48e4b2f551b01065a6435c3ef33</td><td>International Journal of Emerging Technology in Computer Science & Electronics (IJETCSE)
+<br/>ISSN: 0976-1353 Volume 23 Issue 1 –JUNE 2016.
+<br/>AN ENHANCED ATTRIBUTE
+<br/>RERANKING DESIGN FOR WEB IMAGE
+<br/>SEARCH
+<br/>#Student,Cse, CIET, Lam,Guntur, India
+<br/>* Assistant Professort,Cse, CIET, Lam,Guntur , India
+</td></tr><tr><td>e393a038d520a073b9835df7a3ff104ad610c552</td><td>Automatic temporal segment
+<br/>detection via bilateral long short-
+<br/>term memory recurrent neural
+<br/>networks
+<br/>detection via bilateral long short-term memory recurrent neural networks,” J.
+<br/>Electron. Imaging 26(2), 020501 (2017), doi: 10.1117/1.JEI.26.2.020501.
+<br/>Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 03/03/2017 Terms of Use: http://spiedigitallibrary.org/ss/termsofuse.aspx </td></tr><tr><td>e3657ab4129a7570230ff25ae7fbaccb4ba9950c</td><td></td></tr><tr><td>e315959d6e806c8fbfc91f072c322fb26ce0862b</td><td>An Efficient Face Recognition System Based on Sub-Window
+<br/>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-1, Issue-6, January 2012
+<br/>Extraction Algorithm
+</td></tr><tr><td>e3c011d08d04c934197b2a4804c90be55e21d572</td><td>How to Train Triplet Networks with 100K Identities?
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
+<br/>Orion Star
+<br/>Beijing, China
+</td></tr><tr><td>e39a0834122e08ba28e7b411db896d0fdbbad9ba</td><td>1368
+<br/>Maximum Likelihood Estimation of Depth Maps
+<br/>Using Photometric Stereo
+</td></tr><tr><td>e3917d6935586b90baae18d938295e5b089b5c62</td><td>152
+<br/>Face Localization and Authentication
+<br/>Using Color and Depth Images
+</td></tr><tr><td>cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2</td><td></td></tr><tr><td>cfffae38fe34e29d47e6deccfd259788176dc213</td><td>TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, DECEMBER 2012
+<br/>Matrix Completion for Weakly-supervised
+<br/>Multi-label Image Classification
+</td></tr><tr><td>cfd4004054399f3a5f536df71f9b9987f060f434</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. ??, NO. ??, ?? 20??
+<br/>Person Recognition in Personal Photo Collections
+</td></tr><tr><td>cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce</td><td></td></tr><tr><td>cf875336d5a196ce0981e2e2ae9602580f3f6243</td><td>7 What 1
+<br/>Rosalind W. Picard
+<br/>It Mean for a Computer to "Have" Emotions?
+<br/>There is a lot of talk about giving machines emotions, some of
+<br/>it fluff. Recently at a large technical meeting, a researcher stood up
+<br/>and talked of how a Bamey stuffed animal [the purple dinosaur for
+<br/>kids) "has emotions." He did not define what he meant by this, but
+<br/>after repeating it several times, it became apparent that children
+<br/>attributed emotions to Barney, and that Barney had deliberately
+<br/>expressive behaviors that would encourage the kids to think. Bar-
+<br/>ney had emotions. But kids have attributed emotions to dolls and
+<br/>stuffed animals for as long a s we know; and most of my technical
+<br/>colleagues would agree that such toys have never had and still do
+<br/>not have emotions. What is different now that prompts a researcher
+<br/>to make such a claim? Is the computational plush an example of a
+<br/>computer that really does have emotions?
+<br/>If not Barney, then what would be an example of a computa-
+<br/>tional system that has emotions? I am not a philosopher, and this
+<br/>paper will not be a discussion of the meaning of this question in
+<br/>any philosophical sense. However, as an engineer I am interested
+<br/>in what capabilities I would require a machine to have before I
+<br/>would say that it "has emotions," if that is even possible.
+<br/>Theorists still grappl~ with the problem of defining emotion,
+<br/>after many decades of discussion, and no clean definition looks
+<br/>likely to emerge. Even without a precise definition, one can still
+<br/>begin to say concrete things about certain components of emotion,
+<br/>at least based on what is known about human and animal emo-
+<br/>tions. Of course, much is still u d a o w n about human emotions, so
+<br/>we are nowhere near being able to model them, much less dupli-
+<br/>cate all their functions in machines.'~lso, all scientific findings are
+<br/>subject to revision-history has certainly taught us humility, that
+<br/>what scientists believed to be true at one point has often been
+<br/>changed at a later date.
+<br/>I wish to begin by mentioning four motivations for giving
+<br/>machines certain emotional abilities (and there are more). One goal
+<br/>is to build robots and synthetic characters that can emulate living
+<br/>humans and animals-for example, to build a humanoid robot. A
+<br/>I
+</td></tr><tr><td>cf54a133c89f730adc5ea12c3ac646971120781c</td><td></td></tr><tr><td>cfbb2d32586b58f5681e459afd236380acd86e28</td><td>Improving Alignment of Faces for Recognition
+<br/>Christopher J. Pal
+<br/>D´epartement de g´enie informatique et g´enie logiciel
+<br/>´Ecole Polytechnique de Montr´eal,
+<br/>D´epartement de g´enie informatique et g´enie logiciel
+<br/>´Ecole Polytechnique de Montr´eal,
+<br/>Qu´ebec, Canada
+<br/>Qu´ebec, Canada
+</td></tr><tr><td>cfa92e17809e8d20ebc73b4e531a1b106d02b38c</td><td>Advances in Data Analysis and Classification manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Parametric Classification with Soft Labels using the
+<br/>Evidential EM Algorithm
+<br/>Linear Discriminant Analysis vs. Logistic Regression
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>cfdc632adcb799dba14af6a8339ca761725abf0a</td><td>Probabilistic Formulations of Regression with Mixed
+<br/>Guidance
+</td></tr><tr><td>cfc30ce53bfc204b8764ebb764a029a8d0ad01f4</td><td>Regularizing Deep Neural Networks by Noise:
+<br/>Its Interpretation and Optimization
+<br/>Dept. of Computer Science and Engineering, POSTECH, Korea
+</td></tr><tr><td>cf86616b5a35d5ee777585196736dfafbb9853b5</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Learning Multiscale Active Facial Patches for
+<br/>Expression Analysis
+</td></tr><tr><td>cad52d74c1a21043f851ae14c924ac689e197d1f</td><td>From Ego to Nos-vision:
+<br/>Detecting Social Relationships in First-Person Views
+<br/>Universit`a degli Studi di Modena e Reggio Emilia
+<br/>Via Vignolese 905, 41125 Modena - Italy
+</td></tr><tr><td>cac8bb0e393474b9fb3b810c61efdbc2e2c25c29</td><td></td></tr><tr><td>cad24ba99c7b6834faf6f5be820dd65f1a755b29</td><td>Understanding hand-object
+<br/>manipulation by modeling the
+<br/>contextual relationship between actions,
+<br/>grasp types and object attributes
+<br/>Journal Title
+<br/>XX(X):1–14
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td></tr><tr><td>cadba72aa3e95d6dcf0acac828401ddda7ed8924</td><td>THÈSE PRÉSENTÉE À LA FACULTÉ DES SCIENCES
+<br/>POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+<br/>Algorithms and VLSI Architectures
+<br/>for Low-Power Mobile Face Verification
+<br/>par
+<br/>Acceptée sur proposition du jury:
+<br/>Prof. F. Pellandini, directeur de thèse
+<br/>PD Dr. M. Ansorge, co-directeur de thèse
+<br/>Prof. P.-A. Farine, rapporteur
+<br/>Dr. C. Piguet, rapporteur
+<br/>Soutenue le 2 juin 2005
+<br/>INSTITUT DE MICROTECHNIQUE
+<br/>UNIVERSITÉ DE NEUCHÂTEL
+<br/>2006
+</td></tr><tr><td>ca606186715e84d270fc9052af8500fe23befbda</td><td>Using Subclass Discriminant Analysis, Fuzzy Integral and Symlet Decomposition for
+<br/>Face Recognition
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Narmak, Tehran, Iran
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Department of Electrical Engineering,
+<br/>Iran Univ. of Science and Technology,
+<br/>Narmak, Tehran, Iran
+<br/>Narmak, Tehran, Iran
+</td></tr><tr><td>e465f596d73f3d2523dbf8334d29eb93a35f6da0</td><td></td></tr><tr><td>e4aeaf1af68a40907fda752559e45dc7afc2de67</td><td></td></tr><tr><td>e4c3d5d43cb62ac5b57d74d55925bdf76205e306</td><td></td></tr><tr><td>e4a1b46b5c639d433d21b34b788df8d81b518729</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Side Information for Face Completion: a Robust
+<br/>PCA Approach
+</td></tr><tr><td>e4c81c56966a763e021938be392718686ba9135e</td><td></td></tr><tr><td>e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc</td><td>Face Recognition with Independent Component Based
+<br/>Super-resolution
+<br/>aFaculty of Engineering and Natural Sciences, Sabanci Univ., Istanbul, Turkiye, 34956
+<br/>bSchool of Elec. and Comp. Eng. , Georgia Inst. of Tech., Atlanta, GA, USA, 30332-0250
+</td></tr><tr><td>e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5</td><td>Facial Expression Recognition Based on Constrained
+<br/>Local Models and Support Vector Machines
+</td></tr><tr><td>e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf</td><td></td></tr><tr><td>e475e857b2f5574eb626e7e01be47b416deff268</td><td>Facial Emotion Recognition Using Nonparametric
+<br/>Weighted Feature Extraction and Fuzzy Classifier
+</td></tr><tr><td>e4391993f5270bdbc621b8d01702f626fba36fc2</td><td>Author manuscript, published in "18th Scandinavian Conference on Image Analysis (2013)"
+<br/> DOI : 10.1007/978-3-642-38886-6_31
+</td></tr><tr><td>e4d8ba577cabcb67b4e9e1260573aea708574886</td><td>UM SISTEMA DE RECOMENDAC¸ ˜AO INTELIGENTE BASEADO EM V´IDIO
+<br/>AULAS PARA EDUCAC¸ ˜AO A DIST ˆANCIA
+<br/>Gaspare Giuliano Elias Bruno
+<br/>Tese de Doutorado apresentada ao Programa
+<br/>de P´os-gradua¸c˜ao em Engenharia de Sistemas e
+<br/>Computa¸c˜ao, COPPE, da Universidade Federal
+<br/>do Rio de Janeiro, como parte dos requisitos
+<br/>necess´arios `a obten¸c˜ao do t´ıtulo de Doutor em
+<br/>Engenharia de Sistemas e Computa¸c˜ao.
+<br/>Orientadores: Edmundo Albuquerque de
+<br/>Souza e Silva
+<br/>Rosa Maria Meri Le˜ao
+<br/>Rio de Janeiro
+<br/>Janeiro de 2016
+</td></tr><tr><td>e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd</td><td>Accepted in Pattern Recognition Letters
+<br/>Pattern Recognition Letters
+<br/>journal homepage: www.elsevier.com
+<br/>Are you eligible? Predicting adulthood from face images via class specific mean
+<br/>autoencoder
+<br/>IIIT-Delhi, New Delhi, 110020, India
+<br/>Article history:
+<br/>Received 15 March 2017
+</td></tr><tr><td>e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b</td><td>Implicit Video Multi-Emotion Tagging by Exploiting Multi-Expression
+<br/>Relations
+</td></tr><tr><td>fe9c460d5ca625402aa4d6dd308d15a40e1010fa</td><td>Neural Architecture for Temporal Emotion
+<br/>Classification
+<br/>Universit¨at Ulm, Neuroinformatik, Germany
+</td></tr><tr><td>fe7c0bafbd9a28087e0169259816fca46db1a837</td><td></td></tr><tr><td>fe48f0e43dbdeeaf4a03b3837e27f6705783e576</td><td></td></tr><tr><td>fea83550a21f4b41057b031ac338170bacda8805</td><td>Learning a Metric Embedding
+<br/>for Face Recognition
+<br/>using the Multibatch Method
+<br/>Orcam Ltd., Jerusalem, Israel
+</td></tr><tr><td>feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc</td><td>EURECOM
+<br/>Multimedia Communications Department
+<br/>and
+<br/>Mobile Communications Department
+<br/>2229, route des Crˆetes
+<br/>B.P. 193
+<br/>06904 Sophia-Antipolis
+<br/>FRANCE
+<br/>Research Report RR-11-255
+<br/>Search Pruning with Soft Biometric Systems:
+<br/>Efficiency-Reliability Tradeoff
+<br/>June 1st, 2011
+<br/>Last update June 1st, 2011
+<br/>1EURECOM’s research is partially supported by its industrial members: BMW Group, Cisco,
+<br/>Monaco Telecom, Orange, SAP, SFR, Sharp, STEricsson, Swisscom, Symantec, Thales.
+</td></tr><tr><td>fe108803ee97badfa2a4abb80f27fa86afd9aad9</td><td></td></tr><tr><td>fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139</td><td>Rahman et al. EURASIP Journal on Image and Video Processing (2015) 2015:35
+<br/>DOI 10.1186/s13640-015-0090-5
+<br/>RESEARCH
+<br/>Open Access
+<br/>Bayesian face recognition using 2D
+<br/>Gaussian-Hermite moments
+</td></tr><tr><td>c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d</td><td>Modeling for part-based visual object
+<br/>detection based on local features
+<br/>Von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Rheinisch-Westf¨alischen Technischen Hochschule Aachen
+<br/>zur Erlangung des akademischen Grades eines Doktors
+<br/>der Ingenieurwissenschaften genehmigte Dissertation
+<br/>vorgelegt von
+<br/>Diplom-Ingenieur
+<br/>aus Neuss
+<br/>Berichter:
+<br/>Univ.-Prof. Dr.-Ing. Jens-Rainer Ohm
+<br/>Univ.-Prof. Dr.-Ing. Til Aach
+<br/>Tag der m¨undlichen Pr¨ufung: 28. September 2011
+<br/>Diese Dissertation ist auf den Internetseiten der
+<br/>Hochschulbibliothek online verf¨ugbar.
+</td></tr><tr><td>c86e6ed734d3aa967deae00df003557b6e937d3d</td><td>Generative Adversarial Networks with
+<br/>Decoder-Encoder Output Noise
+<br/>conditional distribution of their neighbors. In [32], Portilla and
+<br/>Simoncelli proposed a parametric texture model based on joint
+<br/>statistics, which uses a decomposition method that is called
+<br/>steerable pyramid decomposition to decompose the texture
+<br/>of images. An example-based super-resolution algorithm [11]
+<br/>was proposed in 2002, which uses a Markov network to model
+<br/>the spatial relationship between the pixels of an image. A
+<br/>scene completion algorithm [16] was proposed in 2007, which
+<br/>applied a semantic scene match technique. These traditional
+<br/>algorithms can be applied to particular image generation tasks,
+<br/>such as texture synthesis and super-resolution. Their common
+<br/>characteristic is that they predict the images pixel by pixel
+<br/>rather than generate an image as a whole, and the basic idea
+<br/>of them is to make an interpolation according to the existing
+<br/>part of the images. Here, the problem is, given a set of images,
+<br/>can we generate totally new images with the same distribution
+<br/>of the given ones?
+</td></tr><tr><td>c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3</td><td>LOCATING FACIAL LANDMARKS WITH BINARY MAP CROSS-CORRELATIONS
+<br/>J´er´emie Nicolle
+<br/>K´evin Bailly
+<br/>Univ. Pierre & Marie Curie, ISIR - CNRS UMR 7222, F-75005, Paris - France
+</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Surveillance Face Recognition Challenge
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>c82c147c4f13e79ad49ef7456473d86881428b89</td><td></td></tr><tr><td>c84233f854bbed17c22ba0df6048cbb1dd4d3248</td><td>Exploring Locally Rigid Discriminative
+<br/>Patches for Learning Relative Attributes
+<br/>http://researchweb.iiit.ac.in/~yashaswi.verma/
+<br/>http://www.iiit.ac.in/~jawahar/
+<br/>CVIT
+<br/>IIIT-Hyderabad, India
+<br/>http://cvit.iiit.ac.in
+</td></tr><tr><td>c8adbe00b5661ab9b3726d01c6842c0d72c8d997</td><td>Deep Architectures for Face Attributes
+<br/>Computer Vision and Machine Learning Group, Flickr, Yahoo,
+</td></tr><tr><td>fb4545782d9df65d484009558e1824538030bbb1</td><td></td></tr><tr><td>fb5280b80edcf088f9dd1da769463d48e7b08390</td><td></td></tr><tr><td>fba464cb8e3eff455fe80e8fb6d3547768efba2f</td><td>
+<br/>International Journal of Engineering and Applied Sciences (IJEAS)
+<br/> ISSN: 2394-3661, Volume-3, Issue-2, February 2016
+<br/>Survey Paper on Emotion Recognition
+<br/>
+</td></tr><tr><td>fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59</td><td>Reading Hidden Emotions: Spontaneous
+<br/>Micro-expression Spotting and Recognition
+</td></tr><tr><td>fb9ad920809669c1b1455cc26dbd900d8e719e61</td><td>3D Gaze Estimation from Remote RGB-D Sensors
+<br/>THÈSE NO 6680 (2015)
+<br/>PRÉSENTÉE LE 9 OCTOBRE 2015
+<br/>À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+<br/>LABORATOIRE DE L'IDIAP
+<br/>PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE
+<br/>ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+<br/>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+<br/>PAR
+<br/>acceptée sur proposition du jury:
+<br/>Prof. K. Aminian, président du jury
+<br/>Dr J.-M. Odobez, directeur de thèse
+<br/>Prof. L.-Ph. Morency, rapporteur
+<br/>Prof. D. Witzner Hansen, rapporteur
+<br/>Dr R. Boulic, rapporteur
+<br/>Suisse
+<br/>2015
+</td></tr><tr><td>edef98d2b021464576d8d28690d29f5431fd5828</td><td>Pixel-Level Alignment of Facial Images
+<br/>for High Accuracy Recognition
+<br/>Using Ensemble of Patches
+</td></tr><tr><td>ed04e161c953d345bcf5b910991d7566f7c486f7</td><td>Combining facial expression analysis and synthesis on a
+<br/>Mirror my emotions!
+<br/>robot
+</td></tr><tr><td>c178a86f4c120eca3850a4915134fff44cbccb48</td><td></td></tr><tr><td>c1d2d12ade031d57f8d6a0333cbe8a772d752e01</td><td>Journal of Math-for-Industry, Vol.2(2010B-5), pp.147–156
+<br/>Convex optimization techniques for the efficient recovery of a sparsely
+<br/>corrupted low-rank matrix
+<br/>D 案
+<br/>Received on August 10, 2010 / Revised on August 31, 2010
+<br/>E 案
+</td></tr><tr><td>c10a15e52c85654db9c9343ae1dd892a2ac4a279</td><td>Int J Comput Vis (2012) 100:134–153
+<br/>DOI 10.1007/s11263-011-0494-3
+<br/>Learning the Relative Importance of Objects from Tagged Images
+<br/>for Retrieval and Cross-Modal Search
+<br/>Received: 16 December 2010 / Accepted: 23 August 2011 / Published online: 18 October 2011
+<br/>© Springer Science+Business Media, LLC 2011
+</td></tr><tr><td>c1fc70e0952f6a7587b84bf3366d2e57fc572fd7</td><td></td></tr><tr><td>c1dfabe36a4db26bf378417985a6aacb0f769735</td><td>Journal of Computer Vision and Image Processing, NWPJ-201109-50
+<br/>1
+<br/>Describing Visual Scene through EigenMaps
+<br/>
+</td></tr><tr><td>c1482491f553726a8349337351692627a04d5dbe</td><td></td></tr><tr><td>c1ff88493721af1940df0d00bcfeefaa14f1711f</td><td>CVPR
+<br/>#1369
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2010 Submission #1369. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>CVPR
+<br/>#1369
+<br/>Subspace Regression: Predicting a Subspace from one Sample
+<br/>Anonymous CVPR submission
+<br/>Paper ID 1369
+</td></tr><tr><td>c11eb653746afa8148dc9153780a4584ea529d28</td><td>Global and Local Consistent Wavelet-domain Age
+<br/>Synthesis
+</td></tr><tr><td>c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee</td><td>Robust Facial Landmark Localization Based on
+</td></tr><tr><td>c17a332e59f03b77921942d487b4b102b1ee73b6</td><td>Learning an appearance-based gaze estimator
+<br/>from one million synthesised images
+<br/>Tadas Baltruˇsaitis2
+</td></tr><tr><td>c1e76c6b643b287f621135ee0c27a9c481a99054</td><td></td></tr><tr><td>c6f3399edb73cfba1248aec964630c8d54a9c534</td><td>A Comparison of CNN-based Face and Head Detectors for
+<br/>Real-Time Video Surveillance Applications
+<br/>1 ´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+<br/>2 Genetec Inc., Montreal, Canada
+</td></tr><tr><td>c62c07de196e95eaaf614fb150a4fa4ce49588b4</td><td>Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+<br/>1078
+</td></tr><tr><td>ec1e03ec72186224b93b2611ff873656ed4d2f74</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>3D Reconstruction of “In-the-Wild” Faces in
+<br/>Images and Videos
+</td></tr><tr><td>ec22eaa00f41a7f8e45ed833812d1ac44ee1174e</td><td></td></tr><tr><td>ec54000c6c0e660dd99051bdbd7aed2988e27ab8</td><td>TWO IN ONE: JOINT POSE ESTIMATION AND FACE RECOGNITION WITH P2CA1
+<br/>*Dept. Teoria del Senyal i Comunicacions - Universitat Politècnica de Catalunya, Barcelona, Spain
+<br/>+Dipartimento di Elettronica e Informazione - Politecnico di Milano, Meiland, Italy
+</td></tr><tr><td>ec0104286c96707f57df26b4f0a4f49b774c486b</td><td>758
+<br/>An Ensemble CNN2ELM for Age Estimation
+</td></tr><tr><td>4e32fbb58154e878dd2fd4b06398f85636fd0cf4</td><td>A Hierarchical Matcher using Local Classifier Chains
+<br/>L. Zhang and I.A. Kakadiaris
+<br/>Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204
+</td></tr><tr><td>4e27fec1703408d524d6b7ed805cdb6cba6ca132</td><td>SSD-Sface: Single shot multibox detector for small faces
+<br/>C. Thuis
+</td></tr><tr><td>4e6c9be0b646d60390fe3f72ce5aeb0136222a10</td><td>Long-term Temporal Convolutions
+<br/>for Action Recognition
+</td></tr><tr><td>4e444db884b5272f3a41e4b68dc0d453d4ec1f4c</td><td></td></tr><tr><td>4ef0a6817a7736c5641dc52cbc62737e2e063420</td><td>International Journal of Advanced Computer Research (ISSN (Print): 2249-7277 ISSN (Online): 2277-7970)
+<br/>Volume-4 Number-4 Issue-17 December-2014
+<br/>Study of Face Recognition Techniques
+<br/>Received: 10-November-2014; Revised: 18-December-2014; Accepted: 23-December-2014
+<br/>©2014 ACCENTS
+</td></tr><tr><td>4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b</td><td></td></tr><tr><td>4e0e49c280acbff8ae394b2443fcff1afb9bdce6</td><td>Automatic learning of gait signatures for people identification
+<br/>F.M. Castro
+<br/>Univ. of Malaga
+<br/>fcastro<at>uma.es
+<br/>M.J. Mar´ın-Jim´enez
+<br/>Univ. of Cordoba
+<br/>mjmarin<at>uco.es
+<br/>N. Guil
+<br/>Univ. of Malaga
+<br/>nguil<at>uma.es
+<br/>N. P´erez de la Blanca
+<br/>Univ. of Granada
+<br/>nicolas<at>ugr.es
+</td></tr><tr><td>4e4e8fc9bbee816e5c751d13f0d9218380d74b8f</td><td></td></tr><tr><td>20a88cc454a03d62c3368aa1f5bdffa73523827b</td><td></td></tr><tr><td>20a432a065a06f088d96965f43d0055675f0a6c1</td><td>In: Proc. of the 25th Int. Conference on Artificial Neural Networks (ICANN)
+<br/>Part II, LNCS 9887, pp. 80-87, Barcelona, Spain, September 2016
+<br/>The final publication is available at Springer via
+<br/>http://dx.doi.org//10.1007/978-3-319-44781-0_10
+<br/>The Effects of Regularization on Learning Facial
+<br/>Expressions with Convolutional Neural Networks
+<br/><b></b><br/>Vogt-Koelln-Strasse 30, 22527 Hamburg, Germany
+<br/>http://www.informatik.uni-hamburg.de/WTM
+</td></tr><tr><td>20e504782951e0c2979d9aec88c76334f7505393</td><td>Robust LSTM-Autoencoders for Face De-Occlusion
+<br/>in the Wild
+</td></tr><tr><td>20ade100a320cc761c23971d2734388bfe79f7c5</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Subspace Clustering via Good Neighbors
+</td></tr><tr><td>20767ca3b932cbc7b8112db21980d7b9b3ea43a3</td><td></td></tr><tr><td>20c2a5166206e7ffbb11a23387b9c5edf42b5230</td><td></td></tr><tr><td>2098983dd521e78746b3b3fa35a22eb2fa630299</td><td></td></tr><tr><td>206e24f7d4b3943b35b069ae2d028143fcbd0704</td><td>Learning Structure and Strength of CNN Filters for Small Sample Size Training
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b</td><td>TRANSACTIONS ON AUDIO, SPEECH, AND LANGUAGE PROCESSING, VOL. 23, NO. 4, APRIL 2015
+<br/>Co-Localization of Audio Sources in Images Using
+<br/>Binaural Features and Locally-Linear Regression
+<br/>∗ INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+<br/>† Univ. Grenoble Alpes, GIPSA-Lab, France
+<br/>‡ Dept. Electrical Eng., Technion-Israel Inst. of Technology, Haifa, Israel
+</td></tr><tr><td>206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8</td><td></td></tr><tr><td>20111924fbf616a13d37823cd8712a9c6b458cd6</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 130 – No.11, November2015
+<br/>Linear Regression Line based Partial Face Recognition
+<br/>Naveena M.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>P. Nagabhushan
+<br/>Department of Studies in
+<br/>Computer Science,
+<br/>Manasagagothri,
+<br/>Mysore.
+<br/>images. In
+</td></tr><tr><td>20532b1f80b509f2332b6cfc0126c0f80f438f10</td><td>A deep matrix factorization method for learning
+<br/>attribute representations
+<br/>Bj¨orn W. Schuller, Senior member, IEEE
+</td></tr><tr><td>205af28b4fcd6b569d0241bb6b255edb325965a4</td><td>Intel Serv Robotics (2008) 1:143–157
+<br/>DOI 10.1007/s11370-007-0014-z
+<br/>SPECIAL ISSUE
+<br/>Facial expression recognition and tracking for intelligent human-robot
+<br/>interaction
+<br/>Received: 27 June 2007 / Accepted: 6 December 2007 / Published online: 23 January 2008
+<br/>© Springer-Verlag 2008
+</td></tr><tr><td>20a0b23741824a17c577376fdd0cf40101af5880</td><td>Learning to track for spatio-temporal action localization
+<br/>Zaid Harchaouia,b
+<br/>b NYU
+<br/>a Inria∗
+</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 23, NO. 6,
+<br/>JUNE 2001
+<br/>643
+<br/>From Few to Many: Illumination Cone
+<br/>Models for Face Recognition under
+<br/>Variable Lighting and Pose
+</td></tr><tr><td>18636347b8741d321980e8f91a44ee054b051574</td><td>978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+<br/>37
+<br/>ICIP 2009
+</td></tr><tr><td>18206e1b988389eaab86ef8c852662accf3c3663</td><td></td></tr><tr><td>181045164df86c72923906aed93d7f2f987bce6c</td><td>RHEINISCH-WESTFÄLISCHE TECHNISCHE
+<br/>HOCHSCHULE AACHEN
+<br/>KNOWLEDGE-BASED SYSTEMS GROUP
+<br/>Detection and Recognition of Human
+<br/>Faces using Random Forests for a
+<br/>Mobile Robot
+<br/>MASTER OF SCIENCE THESIS
+<br/>MATRICULATION NUMBER: 26 86 51
+<br/>SUPERVISOR:
+<br/>SECOND SUPERVISOR:
+<br/>PROF. ENRICO BLANZIERI, PH. D.
+<br/>ADVISERS:
+</td></tr><tr><td>18d5b0d421332c9321920b07e0e8ac4a240e5f1f</td><td>Collaborative Representation Classification
+<br/>Ensemble for Face Recognition
+</td></tr><tr><td>18d51a366ce2b2068e061721f43cb798177b4bb7</td><td>Cognition and Emotion
+<br/>ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+<br/>Looking into your eyes: observed pupil size
+<br/>influences approach-avoidance responses
+<br/>eyes: observed pupil size influences approach-avoidance responses, Cognition and Emotion, DOI:
+<br/>10.1080/02699931.2018.1472554
+<br/>To link to this article: https://doi.org/10.1080/02699931.2018.1472554
+<br/>View supplementary material
+<br/>Published online: 11 May 2018.
+<br/>Submit your article to this journal
+<br/>View related articles
+<br/>View Crossmark data
+<br/>Full Terms & Conditions of access and use can be found at
+<br/>http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+</td></tr><tr><td>1885acea0d24e7b953485f78ec57b2f04e946eaf</td><td>Combining Local and Global Features for 3D Face Tracking
+<br/>Megvii (face++) Research
+</td></tr><tr><td>184750382fe9b722e78d22a543e852a6290b3f70</td><td></td></tr><tr><td>18a849b1f336e3c3b7c0ee311c9ccde582d7214f</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-012-0564-1
+<br/>Efficiently Scaling up Crowdsourced Video Annotation
+<br/>A Set of Best Practices for High Quality, Economical Video Labeling
+<br/>Received: 31 October 2011 / Accepted: 20 August 2012
+<br/>© Springer Science+Business Media, LLC 2012
+</td></tr><tr><td>1886b6d9c303135c5fbdc33e5f401e7fc4da6da4</td><td>Knowledge Guided Disambiguation for Large-Scale
+<br/>Scene Classification with Multi-Resolution CNNs
+</td></tr><tr><td>1888bf50fd140767352158c0ad5748b501563833</td><td>PA R T 1
+<br/>THE BASICS
+</td></tr><tr><td>185360fe1d024a3313042805ee201a75eac50131</td><td>299
+<br/>Person De-Identification in Videos
+</td></tr><tr><td>18dfc2434a95f149a6cbb583cca69a98c9de9887</td><td></td></tr><tr><td>27d709f7b67204e1e5e05fe2cfac629afa21699d</td><td></td></tr><tr><td>275b5091c50509cc8861e792e084ce07aa906549</td><td>Institut für Informatik
+<br/>der Technischen
+<br/>Universität München
+<br/>Dissertation
+<br/>Leveraging the User’s Face as a Known Object
+<br/>in Handheld Augmented Reality
+<br/>Sebastian Bernhard Knorr
+</td></tr><tr><td>270733d986a1eb72efda847b4b55bc6ba9686df4</td><td>We are IntechOpen,
+<br/>the first native scientific
+<br/>publisher of Open Access books
+<br/>3,350
+<br/>108,000
+<br/>1.7 M
+<br/>Open access books available
+<br/>International authors and editors
+<br/>Downloads
+<br/>Our authors are among the
+<br/>151
+<br/>Countries delivered to
+<br/>TOP 1%
+<br/>12.2%
+<br/>most cited scientists
+<br/>Contributors from top 500 universities
+<br/>Selection of our books indexed in the Book Citation Index
+<br/>in Web of Science™ Core Collection (BKCI)
+<br/>Interested in publishing with us?
+<br/>Numbers displayed above are based on latest data collected.
+<br/>For more information visit www.intechopen.com
+</td></tr><tr><td>27da432cf2b9129dce256e5bf7f2f18953eef5a5</td><td></td></tr><tr><td>2770b095613d4395045942dc60e6c560e882f887</td><td>GridFace: Face Rectification via Learning Local
+<br/>Homography Transformations
+<br/>Face++, Megvii Inc.
+</td></tr><tr><td>27cccf992f54966feb2ab4831fab628334c742d8</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 64– No.18, February 2013
+<br/>Facial Expression Recognition by Statistical, Spatial
+<br/>Features and using Decision Tree
+<br/>Assistant Professor
+<br/>CSIT Department
+<br/>GGV BIlaspur, Chhattisgarh
+<br/>India
+<br/>Assistant Professor
+<br/>Electronics (ECE) Department
+<br/>JECRC Jaipur, Rajasthan India
+<br/>IshanBhardwaj
+<br/>Student of Ph.D.
+<br/>Electrical Department
+<br/>NIT Raipur, Chhattisgarh India
+</td></tr><tr><td>27f8b01e628f20ebfcb58d14ea40573d351bbaad</td><td>DEPARTMENT OF INFORMATION ENGINEERING AND COMPUTER SCIENCE
+<br/>ICT International Doctoral School
+<br/>Events based Multimedia Indexing
+<br/>and Retrieval
+<br/>SUBMITTED TO THE DEPARTMENT OF
+<br/>INFORMATION ENGINEERING AND COMPUTER SCIENCE (DISI)
+<br/>IN THE PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE
+<br/>OF
+<br/>DOCTOR OF PHILOSOPHY
+<br/>Advisor:
+<br/>Examiners: Prof. Marco Carli, Universit`a degli Studi di Roma Tre, Italy
+<br/>Prof. Nicola Conci, Universit`a degli Studi di Trento, Italy
+<br/>Prof. Pietro Zanuttigh, Universit`a degli Studi di Padova, Italy
+<br/>Prof. Giulia Boato, Universit`a degli Studi di Trento, Italy
+<br/>December 2017
+</td></tr><tr><td>274f87ad659cd90382ef38f7c6fafc4fc7f0d74d</td><td></td></tr><tr><td>27ee8482c376ef282d5eb2e673ab042f5ded99d7</td><td>Scale Normalization for the Distance Maps AAM.
+<br/>Avenue de la boulaie, BP 81127,
+<br/>35 511 Cesson-S´evign´e, France
+<br/>Sup´elec, IETR-SCEE Team
+</td></tr><tr><td>4b89cf7197922ee9418ae93896586c990e0d2867</td><td>LATEX Author Guidelines for CVPR Proceedings
+<br/>First Author
+<br/>Institution1
+<br/>Institution1 address
+</td></tr><tr><td>4b04247c7f22410681b6aab053d9655cf7f3f888</td><td>Robust Face Recognition by Constrained Part-based
+<br/>Alignment
+</td></tr><tr><td>4b60e45b6803e2e155f25a2270a28be9f8bec130</td><td>Attribute Based Object Identification
+</td></tr><tr><td>4b48e912a17c79ac95d6a60afed8238c9ab9e553</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Minimum Margin Loss for Deep Face Recognition
+</td></tr><tr><td>4b5eeea5dd8bd69331bd4bd4c66098b125888dea</td><td>Human Activity Recognition Using Conditional
+<br/>Random Fields and Privileged Information
+<br/>submitted to
+<br/>the designated by the General Assembly Composition of the
+<br/>Department of Computer Science & Engineering Inquiry
+<br/>Committee
+<br/>by
+<br/>in partial fulfillment of the Requirements for the Degree of
+<br/>DOCTOR OF PHILOSOPHY
+<br/>February 2016
+</td></tr><tr><td>4bbbee93519a4254736167b31be69ee1e537f942</td><td></td></tr><tr><td>4b6be933057d939ddfa665501568ec4704fabb39</td><td></td></tr><tr><td>4be03fd3a76b07125cd39777a6875ee59d9889bd</td><td>CONTENT-BASED ANALYSIS FOR ACCESSING AUDIOVISUAL ARCHIVES:
+<br/>ALTERNATIVES FOR CONCEPT-BASED INDEXING AND SEARCH
+<br/>ESAT/PSI - IBBT
+<br/>KU Leuven, Belgium
+</td></tr><tr><td>113e5678ed8c0af2b100245057976baf82fcb907</td><td>Facing Imbalanced Data
+<br/>Recommendations for the Use of Performance Metrics
+</td></tr><tr><td>11f17191bf74c80ad0b16b9f404df6d03f7c8814</td><td>Recognition of Visually Perceived Compositional
+<br/>Human Actions by Multiple Spatio-Temporal Scales
+<br/>Recurrent Neural Networks
+</td></tr><tr><td>11367581c308f4ba6a32aac1b4a7cdb32cd63137</td><td></td></tr><tr><td>1198572784788a6d2c44c149886d4e42858d49e4</td><td>Learning Discriminative Features using Encoder/Decoder type Deep
+<br/>Neural Nets
+</td></tr><tr><td>11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8</td><td>970
+<br/>JUNE 2008
+<br/>Tied Factor Analysis for Face Recognition
+<br/>across Large Pose Differences
+</td></tr><tr><td>112780a7fe259dc7aff2170d5beda50b2bfa7bda</td><td></td></tr><tr><td>111a9645ad0108ad472b2f3b243ed3d942e7ff16</td><td>Facial Expression Classification Using
+<br/>Combined Neural Networks
+<br/>DEE/PUC-Rio, Marquês de São Vicente 225, Rio de Janeiro – RJ - Brazil
+</td></tr><tr><td>111d0b588f3abbbea85d50a28c0506f74161e091</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 134 – No.10, January 2016
+<br/>Facial Expression Recognition from Visual Information
+<br/>using Curvelet Transform
+<br/>Surabhi Group of Institution Bhopal
+<br/>systems. Further applications
+</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>Labeled Faces in the Wild: A Survey
+</td></tr><tr><td>7d73adcee255469aadc5e926066f71c93f51a1a5</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1283
+<br/>ICASSP 2016
+</td></tr><tr><td>7dffe7498c67e9451db2d04bb8408f376ae86992</td><td>LEAR-INRIA submission for the THUMOS workshop
+<br/>LEAR, INRIA, France
+</td></tr><tr><td>7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2</td><td>2106
+<br/>Linear Regression for Face Recognition
+</td></tr><tr><td>29ce6b54a87432dc8371f3761a9568eb3c5593b0</td><td>Kent Academic Repository
+<br/>Full text document (pdf)
+<br/>Citation for published version
+<br/>Yassin, DK H. PHM and Hoque, Sanaul and Deravi, Farzin (2013) Age Sensitivity of Face Recognition
+<br/> pp. 12-15.
+<br/>DOI
+<br/>https://doi.org/10.1109/EST.2013.8
+<br/>Link to record in KAR
+<br/>http://kar.kent.ac.uk/43222/
+<br/>Document Version
+<br/>Author's Accepted Manuscript
+<br/>Copyright & reuse
+<br/>Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all
+<br/>content is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions
+<br/>for further reuse of content should be sought from the publisher, author or other copyright holder.
+<br/>Versions of research
+<br/>The version in the Kent Academic Repository may differ from the final published version.
+<br/>Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the
+<br/>published version of record.
+<br/>Enquiries
+<br/>For any further enquiries regarding the licence status of this document, please contact:
+<br/>If you believe this document infringes copyright then please contact the KAR admin team with the take-down
+<br/>information provided at http://kar.kent.ac.uk/contact.html
+</td></tr><tr><td>292eba47ef77495d2613373642b8372d03f7062b</td><td>Deep Secure Encoding: An Application to Face Recognition
+</td></tr><tr><td>29e96ec163cb12cd5bd33bdf3d32181c136abaf9</td><td>Report No. UIUCDCS-R-2006-2748
+<br/>UILU-ENG-2006-1788
+<br/>Regularized Locality Preserving Projections with Two-Dimensional
+<br/>Discretized Laplacian Smoothing
+<br/>by
+<br/>July 2006
+</td></tr><tr><td>29c1f733a80c1e07acfdd228b7bcfb136c1dff98</td><td></td></tr><tr><td>29f27448e8dd843e1c4d2a78e01caeaea3f46a2d</td><td></td></tr><tr><td>294d1fa4e1315e1cf7cc50be2370d24cc6363a41</td><td>2008 SPIE Digital Library -- Subscriber Archive Copy
+</td></tr><tr><td>29d414bfde0dfb1478b2bdf67617597dd2d57fc6</td><td>Multidim Syst Sign Process (2010) 21:213–229
+<br/>DOI 10.1007/s11045-009-0099-y
+<br/>Perfect histogram matching PCA for face recognition
+<br/>Received: 10 August 2009 / Revised: 21 November 2009 / Accepted: 29 December 2009 /
+<br/>Published online: 14 January 2010
+<br/>© Springer Science+Business Media, LLC 2010
+</td></tr><tr><td>290136947fd44879d914085ee51d8a4f433765fa</td><td>On a Taxonomy of Facial Features
+</td></tr><tr><td>2957715e96a18dbb5ed5c36b92050ec375214aa6</td><td>Improving Face Attribute Detection with Race and Gender Diversity
+<br/>InclusiveFaceNet:
+</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MS-Celeb-1M: A Dataset and Benchmark for
+<br/>Large-Scale Face Recognition
+<br/>Microsoft Research
+</td></tr><tr><td>2921719b57544cfe5d0a1614d5ae81710ba804fa</td><td>Face Recognition Enhancement Based on Image
+<br/>File Formats and Wavelet De-noising
+<br/>
+</td></tr><tr><td>29a013b2faace976f2c532533bd6ab4178ccd348</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Hierarchical Manifold Learning With Applications
+<br/>to Supervised Classification for High-Resolution
+<br/>Remotely Sensed Images
+</td></tr><tr><td>29756b6b16d7b06ea211f21cdaeacad94533e8b4</td><td>Thresholding Approach based on GPU for Facial
+<br/>Expression Recognition
+<br/>1 Benemérita Universidad Autónoma de Puebla, Faculty of Computer Science, Puebla, México
+<br/>2Instituto Tecnológico de Puebla, Puebla, México
+</td></tr><tr><td>293193d24d5c4d2975e836034bbb2329b71c4fe7</td><td>Building a Corpus of Facial Expressions
+<br/>for Learning-Centered Emotions
+<br/>Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+<br/>Mexico
+</td></tr><tr><td>2988f24908e912259d7a34c84b0edaf7ea50e2b3</td><td>A Model of Brightness Variations Due to
+<br/>Illumination Changes and Non-rigid Motion
+<br/>Using Spherical Harmonics
+<br/>Jos´e M. Buenaposada
+<br/>Dep. Ciencias de la Computaci´on,
+<br/>U. Rey Juan Carlos, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+<br/>Inst. for Systems and Robotics
+<br/>Inst. Superior T´ecnico, Portugal
+<br/>http://www.isr.ist.utl.pt/~adb
+<br/>Enrique Mu˜noz
+<br/>Facultad de Inform´atica,
+<br/>U. Complutense de Madrid, Spain
+<br/>Dep. de Inteligencia Artificial,
+<br/>U. Polit´ecnica de Madrid, Spain
+<br/>http://www.dia.fi.upm.es/~pcr
+<br/>http://www.dia.fi.upm.es/~pcr
+</td></tr><tr><td>29156e4fe317b61cdcc87b0226e6f09e416909e0</td><td></td></tr><tr><td>293ade202109c7f23637589a637bdaed06dc37c9</td><td></td></tr><tr><td>7c7ab59a82b766929defd7146fd039b89d67e984</td><td>Improving Multiview Face Detection with
+<br/>Multi-Task Deep Convolutional Neural Networks
+<br/>Microsoft Research
+<br/>One Microsoft Way, Redmond WA 98052
+</td></tr><tr><td>7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f</td><td>News image annotation on a large parallel text-image corpus
+<br/>Universit´e de Rennes 1/IRISA, CNRS/IRISA, INRIA Rennes-Bretagne Atlantique
+<br/>Campus de Beaulieu
+<br/>35042 Rennes Cedex, France
+</td></tr><tr><td>7c0a6824b556696ad7bdc6623d742687655852db</td><td>18th Telecommunications forum TELFOR 2010
+<br/>Serbia, Belgrade, November 23-25, 2010.
+<br/>MPCA+DATER: A Novel Approach for Face
+<br/>Recognition Based on Tensor Objects
+<br/>Ali. A. Shams Baboli, Member, IEEE, G. Rezai-rad, Member, IEEE, Aref. Shams Baboli
+</td></tr><tr><td>7c95449a5712aac7e8c9a66d131f83a038bb7caa</td><td>This is an author produced version of Facial first impressions from another angle: How
+<br/>social judgements are influenced by changeable and invariant facial properties.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/102935/
+<br/>Article:
+<br/>Rhodes (2017) Facial first impressions from another angle: How social judgements are
+<br/>influenced by changeable and invariant facial properties. British journal of psychology. pp.
+<br/>397-415. ISSN 0007-1269
+<br/>https://doi.org/10.1111/bjop.12206
+<br/>promoting access to
+<br/>White Rose research papers
+<br/>http://eprints.whiterose.ac.uk/
+</td></tr><tr><td>7c3e09e0bd992d3f4670ffacb4ec3a911141c51f</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Transferring Object-Scene Convolutional Neural Networks for
+<br/>Event Recognition in Still Images
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>7c7b0550ec41e97fcfc635feffe2e53624471c59</td><td>1051-4651/14 $31.00 © 2014 IEEE
+<br/>DOI 10.1109/ICPR.2014.124
+<br/>660
+</td></tr><tr><td>7ce03597b703a3b6754d1adac5fbc98536994e8f</td><td></td></tr><tr><td>7c9a65f18f7feb473e993077d087d4806578214e</td><td>SpringerLink - Zeitschriftenbeitrag
+<br/>http://www.springerlink.com/content/93hr862660nl1164/?p=abe5352...
+<br/>Deutsch
+<br/>Deutsch
+<br/>Go
+<br/>Vorherige Beitrag Nächste Beitrag
+<br/>Beitrag markieren
+<br/>In den Warenkorb legen
+<br/>Zu gespeicherten Artikeln
+<br/>hinzufügen
+<br/>Permissions & Reprints
+<br/>Diesen Artikel empfehlen
+<br/>Ergebnisse
+<br/>finden
+<br/>Erweiterte Suche
+<br/>Go
+<br/>im gesamten Inhalt
+<br/>in dieser Zeitschrift
+<br/>in diesem Heft
+<br/>Diesen Beitrag exportieren
+<br/>Diesen Beitrag exportieren als RIS
+<br/>| Text
+<br/>Text
+<br/>PDF
+<br/>PDF ist das gebräuchliche Format
+<br/>für Online Publikationen. Die Größe
+<br/>dieses Dokumentes beträgt 564
+<br/>Kilobyte. Je nach Art Ihrer
+<br/>Internetverbindung kann der
+<br/>Download einige Zeit in Anspruch
+<br/>nehmen.
+<br/>öffnen: Gesamtdokument
+<br/>Publikationsart Subject Collections
+<br/>Zurück zu: Journal Issue
+<br/>Athens Authentication Point
+<br/>Zeitschriftenbeitrag
+<br/>Willkommen!
+<br/>Um unsere personalisierten
+<br/>Angebote nutzen zu können,
+<br/>müssen Sie angemeldet sein.
+<br/>Login
+<br/>Jetzt registrieren
+<br/>Zugangsdaten vergessen?
+<br/>Hilfe.
+<br/>Mein Menü
+<br/>Markierte Beiträge
+<br/>Alerts
+<br/>Meine Bestellungen
+<br/>Private emotions versus social interaction: a data-driven approach towards
+<br/>analysing emotion in speech
+<br/>Zeitschrift
+<br/>Verlag
+<br/>ISSN
+<br/>Heft
+<br/>Kategorie
+<br/>DOI
+<br/>Seiten
+<br/>Subject Collection
+<br/>SpringerLink Date
+<br/>User Modeling and User-Adapted Interaction
+<br/>Springer Netherlands
+<br/>0924-1868 (Print) 1573-1391 (Online)
+<br/>Volume 18, Numbers 1-2 / Februar 2008
+<br/>Original Paper
+<br/>10.1007/s11257-007-9039-4
+<br/>175-206
+<br/>Informatik
+<br/>Freitag, 12. Oktober 2007
+<br/>Gespeicherte Beiträge
+<br/>Alle
+<br/>Favoriten
+<br/>(1) Lehrstuhl für Mustererkennung, FAU Erlangen – Nürnberg, Martensstr. 3, 91058 Erlangen,
+<br/>Germany
+<br/>Received: 3 July 2006 Accepted: 14 January 2007 Published online: 12 October 2007
+</td></tr><tr><td>7c1e1c767f7911a390d49bed4f73952df8445936</td><td>NON-RIGID OBJECT DETECTION WITH LOCAL INTERLEAVED SEQUENTIAL ALIGNMENT (LISA)
+<br/>Non-Rigid Object Detection with Local
+<br/>Interleaved Sequential Alignment (LISA)
+<br/>and Tom´aˇs Svoboda, Member, IEEE
+</td></tr><tr><td>7c349932a3d083466da58ab1674129600b12b81c</td><td></td></tr><tr><td>1648cf24c042122af2f429641ba9599a2187d605</td><td>Boosting Cross-Age Face Verification via Generative Age Normalization
+<br/>(cid:2) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td></tr><tr><td>162403e189d1b8463952fa4f18a291241275c354</td><td>Action Recognition with Spatio-Temporal
+<br/>Visual Attention on Skeleton Image Sequences
+<br/>With a strong ability of modeling sequential data, Recur-
+<br/>rent Neural Networks (RNN) with Long Short-Term Memory
+<br/>(LSTM) neurons outperform the previous hand-crafted feature
+<br/>based methods [9], [10]. Each skeleton frame is converted into
+<br/>a feature vector and the whole sequence is fed into the RNN.
+<br/>Despite the strong ability in modeling temporal sequences,
+<br/>RNN structures lack the ability to efficiently learn the spatial
+<br/>relations between the joints. To better use spatial information,
+<br/>a hierarchical structure is proposed in [11], [12] that feeds
+<br/>the joints into the network as several pre-defined body part
+<br/>groups. However,
+<br/>limit
+<br/>the effectiveness of representing spatial relations. A spatio-
+<br/>temporal 2D LSTM (ST-LSTM) network [13] is proposed
+<br/>to learn the spatial and temporal relations simultaneously.
+<br/>Furthermore, a two-stream RNN structure [14] is proposed to
+<br/>learn the spatio-temporal relations with two RNN branches.
+<br/>the pre-defined body regions still
+</td></tr><tr><td>160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b</td><td>Discriminant Multi-Label Manifold Embedding for Facial Action Unit
+<br/>Detection
+<br/>Signal Procesing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td></tr><tr><td>16671b2dc89367ce4ed2a9c241246a0cec9ec10e</td><td>2006
+<br/>Detecting the Number of Clusters
+<br/>in n-Way Probabilistic Clustering
+</td></tr><tr><td>16de1324459fe8fdcdca80bba04c3c30bb789bdf</td><td></td></tr><tr><td>16892074764386b74b6040fe8d6946b67a246a0b</td><td></td></tr><tr><td>16395b40e19cbc6d5b82543039ffff2a06363845</td><td>Action Recognition in Video Using Sparse Coding and Relative Features
+<br/>Anal´ı Alfaro
+<br/>P. Universidad Catolica de Chile
+<br/>P. Universidad Catolica de Chile
+<br/>P. Universidad Catolica de Chile
+<br/>Santiago, Chile
+<br/>Santiago, Chile
+<br/>Santiago, Chile
+</td></tr><tr><td>16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb</td><td>J Nonverbal Behav
+<br/>DOI 10.1007/s10919-008-0059-5
+<br/>O R I G I N A L P A P E R
+<br/>All Smiles are Not Created Equal: Morphology
+<br/>and Timing of Smiles Perceived as Amused, Polite,
+<br/>and Embarrassed/Nervous
+<br/>Ó Springer Science+Business Media, LLC 2008
+</td></tr><tr><td>166186e551b75c9b5adcc9218f0727b73f5de899</td><td>Volume 4, Issue 2, February 2016
+<br/>International Journal of Advance Research in
+<br/>Computer Science and Management Studies
+<br/>Research Article / Survey Paper / Case Study
+<br/>Available online at: www.ijarcsms.com
+<br/>ISSN: 2321-7782 (Online)
+<br/>Automatic Age and Gender Recognition in Human Face Image
+<br/>Dataset using Convolutional Neural Network System
+<br/>Subhani Shaik1
+<br/>Assoc. Prof & Head of the Department
+<br/>Department of CSE,
+<br/>Associate Professor
+<br/>Department of CSE,
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>St.Mary’s Group of Institutions Guntur
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
+<br/>Chebrolu(V&M),Guntur(Dt),
+<br/>Andhra Pradesh - India
+</td></tr><tr><td>16d9b983796ffcd151bdb8e75fc7eb2e31230809</td><td>EUROGRAPHICS 2018 / D. Gutierrez and A. Sheffer
+<br/>(Guest Editors)
+<br/>Volume 37 (2018), Number 2
+<br/>GazeDirector: Fully Articulated Eye Gaze Redirection in Video
+<br/>ID: paper1004
+</td></tr><tr><td>1679943d22d60639b4670eba86665371295f52c3</td><td></td></tr><tr><td>169076ffe5e7a2310e98087ef7da25aceb12b62d</td><td></td></tr><tr><td>161eb88031f382e6a1d630cd9a1b9c4bc6b47652</td><td>1
+<br/>Automatic Facial Expression Recognition
+<br/>Using Features of Salient Facial Patches
+</td></tr><tr><td>4209783b0cab1f22341f0600eed4512155b1dee6</td><td>Accurate and Efficient Similarity Search for Large Scale Face Recognition
+<br/>BUPT
+<br/>BUPT
+<br/>BUPT
+</td></tr><tr><td>42e3dac0df30d754c7c7dab9e1bb94990034a90d</td><td>PANDA: Pose Aligned Networks for Deep Attribute Modeling
+<br/>2EECS, UC Berkeley
+<br/>1Facebook AI Research
+</td></tr><tr><td>429c3588ce54468090cc2cf56c9b328b549a86dc</td><td></td></tr><tr><td>42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830</td><td>Coordinated Local Metric Learning
+<br/>Inria∗
+</td></tr><tr><td>42350e28d11e33641775bef4c7b41a2c3437e4fd</td><td>212
+<br/>Multilinear Discriminant Analysis
+<br/>for Face Recognition
+</td></tr><tr><td>42e155ea109eae773dadf74d713485be83fca105</td><td></td></tr><tr><td>4270460b8bc5299bd6eaf821d5685c6442ea179a</td><td>Int J Comput Vis (2009) 84: 163–183
+<br/>DOI 10.1007/s11263-008-0147-3
+<br/>Partial Similarity of Objects, or How to Compare a Centaur
+<br/>to a Horse
+<br/>Received: 30 September 2007 / Accepted: 3 June 2008 / Published online: 26 July 2008
+<br/>© Springer Science+Business Media, LLC 2008
+</td></tr><tr><td>429d4848d03d2243cc6a1b03695406a6de1a7abd</td><td>Face Recognition based on Logarithmic Fusion
+<br/>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-2, Issue-3, July 2012
+<br/>of SVD and KT
+<br/>Ramachandra A C, Raja K B, Venugopal K R, L M Patnaik
+<br/>to
+<br/>
+</td></tr><tr><td>424259e9e917c037208125ccc1a02f8276afb667</td><td></td></tr><tr><td>42ecfc3221c2e1377e6ff849afb705ecd056b6ff</td><td>Pose Invariant Face Recognition under Arbitrary
+<br/>Unknown Lighting using Spherical Harmonics
+<br/>Department of Computer Science,
+<br/>SUNY at Stony Brook, NY, 11790
+</td></tr><tr><td>421955c6d2f7a5ffafaf154a329a525e21bbd6d3</td><td>570
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 22, NO. 6,
+<br/>JUNE 2000
+<br/>Evolutionary Pursuit and Its
+<br/>Application to Face Recognition
+</td></tr><tr><td>42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0</td><td></td></tr><tr><td>42df75080e14d32332b39ee5d91e83da8a914e34</td><td>4280
+<br/>Illumination Compensation Using Oriented
+<br/>Local Histogram Equalization and
+<br/>Its Application to Face Recognition
+</td></tr><tr><td>89945b7cd614310ebae05b8deed0533a9998d212</td><td>Divide-and-Conquer Method for L1 Norm Matrix
+<br/>Factorization in the Presence of Outliers and
+<br/>Missing Data
+</td></tr><tr><td>89de30a75d3258816c2d4d5a733d2bef894b66b9</td><td></td></tr><tr><td>8913a5b7ed91c5f6dec95349fbc6919deee4fc75</td><td>BigBIRD: A Large-Scale 3D Database of Object Instances
+</td></tr><tr><td>89d3a57f663976a9ac5e9cdad01267c1fc1a7e06</td><td>Neural Class-Specific Regression for face
+<br/>verification
+</td></tr><tr><td>891b10c4b3b92ca30c9b93170ec9abd71f6099c4</td><td>Facial landmark detection using structured output deep
+<br/>neural networks
+<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>September 24, 2015
+</td></tr><tr><td>45c340c8e79077a5340387cfff8ed7615efa20fd</td><td></td></tr><tr><td>45e7ddd5248977ba8ec61be111db912a4387d62f</td><td>CHEN ET AL.: ADVERSARIAL POSENET
+<br/>Adversarial Learning of Structure-Aware Fully
+<br/>Convolutional Networks for Landmark
+<br/>Localization
+</td></tr><tr><td>45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8</td><td></td></tr><tr><td>4560491820e0ee49736aea9b81d57c3939a69e12</td><td>Investigating the Impact of Data Volume and
+<br/>Domain Similarity on Transfer Learning
+<br/>Applications
+<br/>State Farm Insurance, Bloomington IL 61710, USA,
+</td></tr><tr><td>4571626d4d71c0d11928eb99a3c8b10955a74afe</td><td>Geometry Guided Adversarial Facial Expression Synthesis
+<br/>1National Laboratory of Pattern Recognition, CASIA
+<br/>2Center for Research on Intelligent Perception and Computing, CASIA
+<br/>3Center for Excellence in Brain Science and Intelligence Technology, CAS
+</td></tr><tr><td>4534d78f8beb8aad409f7bfcd857ec7f19247715</td><td>Under review as a conference paper at ICLR 2017
+<br/>TRANSFORMATION-BASED MODELS OF VIDEO
+<br/>SEQUENCES
+<br/>Facebook AI Research
+</td></tr><tr><td>459e840ec58ef5ffcee60f49a94424eb503e8982</td><td>One-shot Face Recognition by Promoting Underrepresented Classes
+<br/>Microsoft
+<br/>One Microsoft Way, Redmond, Washington, United States
+</td></tr><tr><td>45fbeed124a8956477dbfc862c758a2ee2681278</td><td></td></tr><tr><td>451c42da244edcb1088e3c09d0f14c064ed9077e</td><td>1964
+<br/>© EURASIP, 2011 - ISSN 2076-1465
+<br/>19th European Signal Processing Conference (EUSIPCO 2011)
+<br/>INTRODUCTION
+</td></tr><tr><td>4511e09ee26044cb46073a8c2f6e1e0fbabe33e8</td><td></td></tr><tr><td>45a6333fc701d14aab19f9e2efd59fe7b0e89fec</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
+<br/>RECOGNITION
+<br/>Luis Anton-Canalis
+<br/>Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+<br/>Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+<br/>Elena Sanchez-Nielsen
+<br/>Departamento de E.I.O. y Computacion
+<br/>38271 Universidad de La Laguna, Spain
+<br/>Keywords:
+<br/>Image understanding, Gesture recognition, Hand dataset.
+</td></tr><tr><td>1ffe20eb32dbc4fa85ac7844178937bba97f4bf0</td><td>Face Clustering: Representation and Pairwise
+<br/>Constraints
+</td></tr><tr><td>1f8304f4b51033d2671147b33bb4e51b9a1e16fe</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Beyond Trees:
+<br/>MAP Inference in MRFs via Outer-Planar Decomposition
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>1f9ae272bb4151817866511bd970bffb22981a49</td><td>An Iterative Regression Approach for Face Pose Estima-
+<br/>tion from RGB Images
+<br/>This paper presents a iterative optimization method, explicit shape regression, for face pose
+<br/>detection and localization. The regression function is learnt to find out the entire facial shape
+<br/>and minimize the alignment errors. A cascaded learning framework is employed to enhance
+<br/>shape constraint during detection. A combination of a two-level boosted regression, shape
+<br/>performance. In this paper, we have explain the advantage of ESR for deformable object like
+<br/>face pose estimation and reveal its generic applications of the method. In the experiment,
+<br/>we compare the results with different work and demonstrate the accuracy and robustness in
+<br/>different scenarios.
+<br/>Introduction
+<br/>Pose estimation is an important problem in computer vision, and has enabled many practical ap-
+<br/>plication from face expression 1 to activity tracking 2. Researchers design a new algorithm called
+<br/>explicit shape regression (ESR) to find out face alignment from a picture 3. Figure 1 shows how
+<br/>the system uses ESR to learn a shape of a human face image. A simple way to identify a face is to
+<br/>find out facial landmarks like eyes, nose, mouth and chin. The researchers define a face shape S
+<br/>and S is composed of Nf p facial landmarks. Therefore, they get S = [x1, y1, ..., xNf p, yNf p]T . The
+<br/>objective of the researchers is to estimate a shape S of a face image. The way to know the accuracy
+</td></tr><tr><td>1fc249ec69b3e23856b42a4e591c59ac60d77118</td><td>Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+<br/>Computational Biomedicine Lab
+<br/>4800 Calhoun Rd. Houston, TX, USA
+</td></tr><tr><td>1fbde67e87890e5d45864e66edb86136fbdbe20e</td><td>The Action Similarity Labeling Challenge
+</td></tr><tr><td>1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6</td><td>Demographic Estimation from Face Images:
+<br/>Human vs. Machine Performance
+</td></tr><tr><td>1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0</td><td></td></tr><tr><td>1f2d12531a1421bafafe71b3ad53cb080917b1a7</td><td></td></tr><tr><td>1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d</td><td>vExplorer: A Search Method to Find Relevant YouTube Videos for Health
+<br/>Researchers
+<br/>IBM Research, Cambridge, MA, USA
+</td></tr><tr><td>1f94734847c15fa1da68d4222973950d6b683c9e</td><td>Embedding Label Structures for Fine-Grained Feature Representation
+<br/>UNC Charlotte
+<br/>Charlotte, NC 28223
+<br/>NEC Lab America
+<br/>Cupertino, CA 95014
+<br/>NEC Lab America
+<br/>Cupertino, CA 95014
+<br/>UNC Charlotte
+<br/>Charlotte, NC 28223
+</td></tr><tr><td>1f745215cda3a9f00a65166bd744e4ec35644b02</td><td>Facial Cosmetics Database and Impact Analysis on
+<br/>Automatic Face Recognition
+<br/># Computer Science Department, TU Muenchen
+<br/>Boltzmannstr. 3, 85748 Garching b. Muenchen, Germany
+<br/>∗ Multimedia Communications Department, EURECOM
+<br/>450 Route des Chappes, 06410 Biot, France
+</td></tr><tr><td>1fff309330f85146134e49e0022ac61ac60506a9</td><td>Data-Driven Sparse Sensor Placement for Reconstruction
+</td></tr><tr><td>7323b594d3a8508f809e276aa2d224c4e7ec5a80</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>An Experimental Evaluation of Covariates
+<br/>Effects on Unconstrained Face Verification
+</td></tr><tr><td>732e8d8f5717f8802426e1b9debc18a8361c1782</td><td>Unimodal Probability Distributions for Deep Ordinal Classification
+</td></tr><tr><td>73ed64803d6f2c49f01cffef8e6be8fc9b5273b8</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Cooking in the kitchen: Recognizing and Segmenting Human
+<br/>Activities in Videos
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>7306d42ca158d40436cc5167e651d7ebfa6b89c1</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Transductive Zero-Shot Action Recognition by
+<br/>Word-Vector Embedding
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>734cdda4a4de2a635404e4c6b61f1b2edb3f501d</td><td>Tie and Guan EURASIP Journal on Image and Video Processing 2013, 2013:8
+<br/>http://jivp.eurasipjournals.com/content/2013/1/8
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Automatic landmark point detection and tracking
+<br/>for human facial expressions
+</td></tr><tr><td>732686d799d760ccca8ad47b49a8308b1ab381fb</td><td>Running head: TEACHERS’ DIFFERING BEHAVIORS
+<br/>1
+<br/>Graduate School of Psychology
+<br/>RESEARCH MASTER’S PSYCHOLOGY THESıS REPORT
+<br/>
+<br/>Teachers’ differing classroom behaviors:
+<br/>The role of emotional sensitivity and cultural tolerance
+<br/>Research Master’s, Social Psychology
+<br/>Ethics Committee Reference Code: 2016-SP-7084
+</td></tr><tr><td>73fbdd57270b9f91f2e24989178e264f2d2eb7ae</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1945
+<br/>ICASSP 2012
+</td></tr><tr><td>73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c</td><td></td></tr><tr><td>871f5f1114949e3ddb1bca0982086cc806ce84a8</td><td>Discriminative Learning of Apparel Features
+<br/>1 Computer Vision Laboratory, D-ITET, ETH Z¨urich, Switzerland
+<br/>2 ESAT - PSI / IBBT, K.U. Leuven, Belgium
+</td></tr><tr><td>878169be6e2c87df2d8a1266e9e37de63b524ae7</td><td>CBMM Memo No. 089
+<br/>May 10, 2018
+<br/>Image interpretation above and below the object level
+</td></tr><tr><td>878301453e3d5cb1a1f7828002ea00f59cbeab06</td><td>Faceness-Net: Face Detection through
+<br/>Deep Facial Part Responses
+</td></tr><tr><td>87e592ee1a7e2d34e6b115da08700a1ae02e9355</td><td>Deep Pictorial Gaze Estimation
+<br/>AIT Lab, Department of Computer Science, ETH Zurich
+</td></tr><tr><td>87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd</td><td>Craniofacial Image Analysis
+</td></tr><tr><td>8006219efb6ab76754616b0e8b7778dcfb46603d</td><td>CONTRIBUTIONSTOLARGE-SCALELEARNINGFORIMAGECLASSIFICATIONZeynepAkataPhDThesisl’´EcoleDoctoraleMath´ematiques,SciencesetTechnologiesdel’Information,InformatiquedeGrenoble </td></tr><tr><td>80193dd633513c2d756c3f568ffa0ebc1bb5213e</td><td></td></tr><tr><td>804b4c1b553d9d7bae70d55bf8767c603c1a09e3</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>1831
+<br/>ICASSP 2016
+</td></tr><tr><td>800cbbe16be0f7cb921842d54967c9a94eaa2a65</td><td>MULTIMODAL RECOGNITION OF
+<br/>EMOTIONS
+</td></tr><tr><td>803c92a3f0815dbf97e30c4ee9450fd005586e1a</td><td>Max-Mahalanobis Linear Discriminant Analysis Networks
+</td></tr><tr><td>80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923</td><td>Research Article
+<br/>Soft Biometrics for a Socially Assistive Robotic
+<br/>Platform
+<br/>Open Access
+</td></tr><tr><td>80a6bb337b8fdc17bffb8038f3b1467d01204375</td><td>Proceedings of the International Conference on Computer and Information Science and Technology
+<br/>Ottawa, Ontario, Canada, May 11 – 12, 2015
+<br/>Paper No. 126
+<br/>Subspace LDA Methods for Solving the Small Sample Size
+<br/>Problem in Face Recognition
+<br/><b></b><br/>101 KwanFu Rd., Sec. 2, Hsinchu, Taiwan
+</td></tr><tr><td>80097a879fceff2a9a955bf7613b0d3bfa68dc23</td><td>Active Self-Paced Learning for Cost-Effective and
+<br/>Progressive Face Identification
+</td></tr><tr><td>74408cfd748ad5553cba8ab64e5f83da14875ae8</td><td>Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+<br/>and Evaluation
+</td></tr><tr><td>747d5fe667519acea1bee3df5cf94d9d6f874f20</td><td></td></tr><tr><td>74dbe6e0486e417a108923295c80551b6d759dbe</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 45– No.11, May 2012
+<br/>An HMM based Model for Prediction of Emotional
+<br/>Composition of a Facial Expression using both
+<br/>Significant and Insignificant Action Units and
+<br/>Associated Gender Differences
+<br/>Department of Management and Information
+<br/>Department of Management and Information
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+<br/>Systems Science
+<br/>1603-1 Kamitomioka, Nagaoka
+<br/>Niigata, Japan
+</td></tr><tr><td>747c25bff37b96def96dc039cc13f8a7f42dbbc7</td><td>EmoNets: Multimodal deep learning approaches for emotion
+<br/>recognition in video
+</td></tr><tr><td>74b0095944c6e29837c208307a67116ebe1231c8</td><td></td></tr><tr><td>74156a11c2997517061df5629be78428e1f09cbd</td><td>Cancún Center, Cancún, México, December 4-8, 2016
+<br/>978-1-5090-4846-5/16/$31.00 ©2016 IEEE
+<br/>2784
+</td></tr><tr><td>745b42050a68a294e9300228e09b5748d2d20b81</td><td></td></tr><tr><td>749d605dd12a4af58de1fae6f5ef5e65eb06540e</td><td>Multi-Task Video Captioning with Video and Entailment Generation
+<br/>UNC Chapel Hill
+</td></tr><tr><td>74c19438c78a136677a7cb9004c53684a4ae56ff</td><td>RESOUND: Towards Action Recognition
+<br/>without Representation Bias
+<br/>UC San Diego
+</td></tr><tr><td>7480d8739eb7ab97c12c14e75658e5444b852e9f</td><td>NEGREL ET AL.: REVISITED MLBOOST FOR FACE RETRIEVAL
+<br/>MLBoost Revisited: A Faster Metric
+<br/>Learning Algorithm for Identity-Based Face
+<br/>Retrieval
+<br/>Frederic Jurie
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS
+<br/>France
+</td></tr><tr><td>74ba4ab407b90592ffdf884a20e10006d2223015</td><td>Partial Face Detection in the Mobile Domain
+</td></tr><tr><td>7405ed035d1a4b9787b78e5566340a98fe4b63a0</td><td>Self-Expressive Decompositions for
+<br/>Matrix Approximation and Clustering
+</td></tr><tr><td>744db9bd550bf5e109d44c2edabffec28c867b91</td><td>FX e-Makeup for Muscle Based Interaction
+<br/>1 Department of Informatics, PUC-Rio, Rio de Janeiro, Brazil
+<br/>2 Department of Mechanical Engineering, PUC-Rio, Rio de Janeiro, Brazil
+<br/>3 Department of Administration, PUC-Rio, Rio de Janeiro, Brazil
+</td></tr><tr><td>744d23991a2c48d146781405e299e9b3cc14b731</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2535284, IEEE
+<br/>Transactions on Image Processing
+<br/>Aging Face Recognition: A Hierarchical Learning
+<br/>Model Based on Local Patterns Selection
+</td></tr><tr><td>1a45ddaf43bcd49d261abb4a27977a952b5fff12</td><td>LDOP: Local Directional Order Pattern for Robust
+<br/>Face Retrieval
+<br/>
+</td></tr><tr><td>1aa766bbd49bac8484e2545c20788d0f86e73ec2</td><td>
+<br/>Baseline Face Detection, Head Pose Estimation, and Coarse
+<br/>Direction Detection for Facial Data in the SHRP2 Naturalistic
+<br/>Driving Study
+<br/>J. Paone, D. Bolme, R. Ferrell, Member, IEEE, D. Aykac, and
+<br/>T. Karnowski, Member, IEEE
+<br/>Oak Ridge National Laboratory, Oak Ridge, TN
+</td></tr><tr><td>1a849b694f2d68c3536ed849ed78c82e979d64d5</td><td>This is a repository copy of Symmetric Shape Morphing for 3D Face and Head Modelling.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131760/
+<br/>Version: Accepted Version
+<br/>Proceedings Paper:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634, Smith, William Alfred
+<br/>Peter orcid.org/0000-0002-6047-0413 et al. (1 more author) (2018) Symmetric Shape
+<br/>Morphing for 3D Face and Head Modelling. In: The 13th IEEE Conference on Automatic
+<br/>Face and Gesture Recognition. IEEE .
+<br/>Reuse
+<br/>Items deposited in White Rose Research Online are protected by copyright, with all rights reserved unless
+<br/>indicated otherwise. They may be downloaded and/or printed for private study, or other acts as permitted by
+<br/>national copyright laws. The publisher or other rights holders may allow further reproduction and re-use of
+<br/>the full text version. This is indicated by the licence information on the White Rose Research Online record
+<br/>for the item.
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td></tr><tr><td>1a3eee980a2252bb092666cf15dd1301fa84860e</td><td>PCA GAUSSIANIZATION FOR IMAGE PROCESSING
+<br/>Image Processing Laboratory (IPL), Universitat de Val`encia
+<br/>Catedr´atico A. Escardino - 46980 Paterna, Val`encia, Spain
+</td></tr><tr><td>1a031378cf1d2b9088a200d9715d87db8a1bf041</td><td>Workshop track - ICLR 2018
+<br/>DEEP DICTIONARY LEARNING: SYNERGIZING RE-
+<br/>CONSTRUCTION AND CLASSIFICATION
+</td></tr><tr><td>1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f</td><td></td></tr><tr><td>1a9a192b700c080c7887e5862c1ec578012f9ed1</td><td>IEEE TRANSACTIONS ON SYSTEM, MAN AND CYBERNETICS, PART B
+<br/>Discriminant Subspace Analysis for Face
+<br/>Recognition with Small Number of Training
+<br/>Samples
+</td></tr><tr><td>1a8ccc23ed73db64748e31c61c69fe23c48a2bb1</td><td>Extensive Facial Landmark Localization
+<br/>with Coarse-to-fine Convolutional Network Cascade
+<br/>Megvii Inc.
+</td></tr><tr><td>1ad97cce5fa8e9c2e001f53f6f3202bddcefba22</td><td>Grassmann Averages for Scalable Robust PCA
+<br/>DIKU and MPIs T¨ubingen∗
+<br/>Denmark and Germany
+<br/>DTU Compute∗
+<br/>Lyngby, Denmark
+</td></tr><tr><td>1a1118cd4339553ad0544a0a131512aee50cf7de</td><td></td></tr><tr><td>1a7a2221fed183b6431e29a014539e45d95f0804</td><td>Person Identification Using Text and Image Data
+<br/>David S. Bolme, J. Ross Beveridge and Adele E. Howe
+<br/>Computer Science Department
+<br/>Colorado State Univeristy
+<br/>Fort Collins, Colorado 80523
+</td></tr><tr><td>28e0ed749ebe7eb778cb13853c1456cb6817a166</td><td></td></tr><tr><td>28b9d92baea72ec665c54d9d32743cf7bc0912a7</td><td></td></tr><tr><td>28d7029cfb73bcb4ad1997f3779c183972a406b4</td><td>Discriminative Nonlinear Analysis Operator
+<br/>Learning: When Cosparse Model Meets Image
+<br/>Classification
+</td></tr><tr><td>280d59fa99ead5929ebcde85407bba34b1fcfb59</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2662
+<br/>ICASSP 2016
+</td></tr><tr><td>28cd46a078e8fad370b1aba34762a874374513a5</td><td>CVPAPER.CHALLENGE IN 2016, JULY 2017
+<br/>cvpaper.challenge in 2016: Futuristic Computer
+<br/>Vision through 1,600 Papers Survey
+</td></tr><tr><td>282a3ee79a08486f0619caf0ada210f5c3572367</td><td></td></tr><tr><td>288dbc40c027af002298b38954d648fddd4e2fd3</td><td></td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td></td></tr><tr><td>28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b</td><td>A New Fuzzy Stacked Generalization Technique
+<br/>and Analysis of its Performance
+</td></tr><tr><td>28bc378a6b76142df8762cd3f80f737ca2b79208</td><td>Understanding Objects in Detail with Fine-grained Attributes
+<br/>Ross Girshick5
+<br/>David Weiss7
+</td></tr><tr><td>287900f41dd880802aa57f602e4094a8a9e5ae56</td><td></td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td></td></tr><tr><td>2866cbeb25551257683cf28f33d829932be651fe</td><td>In Proceedings of the 2018 IEEE International Conference on Image Processing (ICIP)
+<br/>The final publication is available at: http://dx.doi.org/10.1109/ICIP.2018.8451026
+<br/>A TWO-STEP LEARNING METHOD FOR DETECTING LANDMARKS
+<br/>ON FACES FROM DIFFERENT DOMAINS
+<br/>Erickson R. Nascimento
+<br/>Universidade Federal de Minas Gerais (UFMG), Brazil
+</td></tr><tr><td>28aa89b2c827e5dd65969a5930a0520fdd4a3dc7</td><td></td></tr><tr><td>28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68</td><td>Predicting User Annoyance Using Visual Attributes
+<br/>Virginia Tech
+<br/>Goibibo
+<br/>Virginia Tech
+<br/>Virginia Tech
+</td></tr><tr><td>17a85799c59c13f07d4b4d7cf9d7c7986475d01c</td><td>ADVERTIMENT. La consulta d’aquesta tesi queda condicionada a l’acceptació de les següents
+<br/>condicions d'ús: La difusió d’aquesta tesi per mitjà del servei TDX (www.tesisenxarxa.net) ha
+<br/>estat autoritzada pels titulars dels drets de propietat intel·lectual únicament per a usos privats
+<br/>emmarcats en activitats d’investigació i docència. No s’autoritza la seva reproducció amb finalitats
+<br/>de lucre ni la seva difusió i posada a disposició des d’un lloc aliè al servei TDX. No s’autoritza la
+<br/>presentació del seu contingut en una finestra o marc aliè a TDX (framing). Aquesta reserva de
+<br/>drets afecta tant al resum de presentació de la tesi com als seus continguts. En la utilització o cita
+<br/>de parts de la tesi és obligat indicar el nom de la persona autora.
+<br/>ADVERTENCIA. La consulta de esta tesis queda condicionada a la aceptación de las siguientes
+<br/>condiciones de uso: La difusión de esta tesis por medio del servicio TDR (www.tesisenred.net) ha
+<br/>sido autorizada por los titulares de los derechos de propiedad intelectual únicamente para usos
+<br/>privados enmarcados en actividades de investigación y docencia. No se autoriza su reproducción
+<br/>con finalidades de lucro ni su difusión y puesta a disposición desde un sitio ajeno al servicio TDR.
+<br/>No se autoriza la presentación de su contenido en una ventana o marco ajeno a TDR (framing).
+<br/>Esta reserva de derechos afecta tanto al resumen de presentación de la tesis como a sus
+<br/>contenidos. En la utilización o cita de partes de la tesis es obligado indicar el nombre de la
+<br/>persona autora.
+<br/>WARNING. On having consulted this thesis you’re accepting the following use conditions:
+<br/>Spreading this thesis by the TDX (www.tesisenxarxa.net) service has been authorized by the
+<br/>titular of the intellectual property rights only for private uses placed in investigation and teaching
+<br/>activities. Reproduction with lucrative aims is not authorized neither its spreading and availability
+<br/>from a site foreign to the TDX service. Introducing its content in a window or frame foreign to the
+<br/>TDX service is not authorized (framing). This rights affect to the presentation summary of the
+<br/>thesis as well as to its contents. In the using or citation of parts of the thesis it’s obliged to indicate
+<br/>the name of the author
+</td></tr><tr><td>176f26a6a8e04567ea71677b99e9818f8a8819d0</td><td>MEG: Multi-Expert Gender classification from
+<br/>face images in a demographics-balanced dataset
+</td></tr><tr><td>17035089959a14fe644ab1d3b160586c67327db2</td><td></td></tr><tr><td>17a995680482183f3463d2e01dd4c113ebb31608</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. Y, MONTH Z
+<br/>Structured Label Inference for
+<br/>Visual Understanding
+</td></tr><tr><td>17aa78bd4331ef490f24bdd4d4cd21d22a18c09c</td><td></td></tr><tr><td>17c0d99171efc957b88c31a465c59485ab033234</td><td></td></tr><tr><td>1742ffea0e1051b37f22773613f10f69d2e4ed2c</td><td></td></tr><tr><td>1791f790b99471fc48b7e9ec361dc505955ea8b1</td><td></td></tr><tr><td>174930cac7174257515a189cd3ecfdd80ee7dd54</td><td>Multi-view Face Detection Using Deep Convolutional
+<br/>Neural Networks
+<br/>Yahoo
+<br/>Mohammad Saberian
+<br/>inc.com
+<br/>Yahoo
+<br/>Yahoo
+</td></tr><tr><td>17fad2cc826d2223e882c9fda0715fcd5475acf3</td><td></td></tr><tr><td>1750db78b7394b8fb6f6f949d68f7c24d28d934f</td><td>Detecting Facial Retouching Using Supervised
+<br/>Deep Learning
+<br/>Bowyer, Fellow, IEEE
+</td></tr><tr><td>173657da03e3249f4e47457d360ab83b3cefbe63</td><td>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>Final Report
+<br/>3035140108
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td></tr><tr><td>7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889</td><td></td></tr><tr><td>7bfe085c10761f5b0cc7f907bdafe1ff577223e0</td><td></td></tr><tr><td>7b9b3794f79f87ca8a048d86954e0a72a5f97758</td><td>DOI 10.1515/jisys-2013-0016      Journal of Intelligent Systems 2013; 22(4): 365–415
+<br/>Passing an Enhanced Turing Test –
+<br/>Interacting with Lifelike Computer
+<br/>Representations of Specific Individuals 
+</td></tr><tr><td>7b0f1fc93fb24630eb598330e13f7b839fb46cce</td><td>Learning to Find Eye Region Landmarks for Remote Gaze
+<br/>Estimation in Unconstrained Settings
+<br/>ETH Zurich
+<br/>MPI for Informatics
+<br/>MPI for Informatics
+<br/>ETH Zurich
+</td></tr><tr><td>7bdcd85efd1e3ce14b7934ff642b76f017419751</td><td>289
+<br/>Learning Discriminant Face Descriptor
+</td></tr><tr><td>7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f</td><td>On the Design and Evaluation of Robust Head Pose for
+<br/>Visual User Interfaces: Algorithms, Databases, and
+<br/>Comparisons
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+<br/>Mohan Trivedi
+<br/>Laboratory of Intelligent and
+<br/>Safe Automobiles
+<br/>UCSD - La Jolla, CA, USA
+</td></tr><tr><td>8f772d9ce324b2ef5857d6e0b2a420bc93961196</td><td>MAHPOD et al.: CFDRNN
+<br/>Facial Landmark Point Localization using
+<br/>Coarse-to-Fine Deep Recurrent Neural Network
+</td></tr><tr><td>8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483</td><td></td></tr><tr><td>8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a</td><td>Feature Selection with Annealing for Big Data
+<br/>Learning
+</td></tr><tr><td>8f9c37f351a91ed416baa8b6cdb4022b231b9085</td><td>Generative Adversarial Style Transfer Networks for Face Aging
+<br/>Sveinn Palsson
+<br/>D-ITET, ETH Zurich
+<br/>Eirikur Agustsson
+<br/>D-ITET, ETH Zurich
+</td></tr><tr><td>8f8c0243816f16a21dea1c20b5c81bc223088594</td><td></td></tr><tr><td>8f89aed13cb3555b56fccd715753f9ea72f27f05</td><td>Attended End-to-end Architecture for Age
+<br/>Estimation from Facial Expression Videos
+</td></tr><tr><td>8f9f599c05a844206b1bd4947d0524234940803d</td><td></td></tr><tr><td>8fd9c22b00bd8c0bcdbd182e17694046f245335f</td><td>  
+<br/>Recognizing Facial Expressions in Videos
+</td></tr><tr><td>8a866bc0d925dfd8bb10769b8b87d7d0ff01774d</td><td>WikiArt Emotions: An Annotated Dataset of Emotions Evoked by Art
+<br/>National Research Council Canada
+</td></tr><tr><td>8a40b6c75dd6392ee0d3af73cdfc46f59337efa9</td><td></td></tr><tr><td>8a91ad8c46ca8f4310a442d99b98c80fb8f7625f</td><td>2592
+<br/>2D Segmentation Using a Robust Active
+<br/>Shape Model With the EM Algorithm
+</td></tr><tr><td>8aed6ec62cfccb4dba0c19ee000e6334ec585d70</td><td>Localizing and Visualizing Relative Attributes
+</td></tr><tr><td>8a336e9a4c42384d4c505c53fb8628a040f2468e</td><td>Wang and Luo EURASIP Journal on Bioinformatics
+<br/>and Systems Biology (2016) 2016:13
+<br/>DOI 10.1186/s13637-016-0048-7
+<br/>R ES EAR CH
+<br/>Detecting Visually Observable Disease
+<br/>Symptoms from Faces
+<br/>Open Access
+</td></tr><tr><td>7e600faee0ba11467d3f7aed57258b0db0448a72</td><td></td></tr><tr><td>7e8016bef2c180238f00eecc6a50eac473f3f138</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Immersive Interactive Data Mining and Machine
+<br/>Learning Algorithms for Big Data Visualization
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr. sc.techn. Andreas Herkersdorf
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. habil. Dirk Wollherr
+<br/>3. Prof. Dr. Mihai Datcu
+<br/>Die Dissertation wurde am 13.08.2015 bei der Technischen Universit¨at M¨unchen eingerei-
+<br/>cht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 16.02.2016
+<br/>angenommen.
+</td></tr><tr><td>7e3367b9b97f291835cfd0385f45c75ff84f4dc5</td><td>Improved Local Binary Pattern Based Action Unit Detection Using
+<br/>Morphological and Bilateral Filters
+<br/>1Signal Processing Laboratory (LTS5)
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne,
+<br/>Switzerland
+<br/>2nViso SA
+<br/>Lausanne, Switzerland
+</td></tr><tr><td>7ed6ff077422f156932fde320e6b3bd66f8ffbcb</td><td>State of 3D Face Biometrics for Homeland Security Applications
+<br/>Chaudhari4
+</td></tr><tr><td>7e507370124a2ac66fb7a228d75be032ddd083cc</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2708106, IEEE
+<br/>Transactions on Affective Computing
+<br/>Dynamic Pose-Robust Facial Expression
+<br/>Recognition by Multi-View Pairwise Conditional
+<br/>Random Forests
+<br/>1 Sorbonne Universit´es, UPMC Univ Paris 06
+<br/>CNRS, UMR 7222, F-75005, Paris, France
+</td></tr><tr><td>1056347fc5e8cd86c875a2747b5f84fd570ba232</td><td></td></tr><tr><td>10e7dd3bbbfbc25661213155e0de1a9f043461a2</td><td>Cross Euclidean-to-Riemannian Metric Learning
+<br/>with Application to Face Recognition from Video
+</td></tr><tr><td>10ab1b48b2a55ec9e2920a5397febd84906a7769</td><td></td></tr><tr><td>10ce3a4724557d47df8f768670bfdd5cd5738f95</td><td>Fihe igh Fie
+<br/>Ac e ad 
+<br/>Ra
+<br/>The Rbic i e Caegie e
+<br/>5000 Fbe Ave e ib gh A 15213
+<br/>Abac.  ay face ecgii ak he e ad i
+<br/>cdii f he be ad ga
+<br/>
+<br/>a di(cid:11)ee e ad de a di(cid:11)ee i
+<br/>ecgii a
+<br/> bjec ca ed a abiay e ad de abiay i
+<br/>ad ay  be f be iage agai ca ed a abiay e ad
+<br/> de abiay i
+<br/>Fihe
+<br/>iage. achig bewee he be ad ga
+<br/>he Fihe
+<br/>d ci
+<br/> ay face ecgii ceai he e f he be ad ga
+<br/>di(cid:11)ee. The ga
+<br/>The a
+<br/>ga
+<br/>view ca ed f a caea i he ce f he . The  be f ga
+<br/>ad be iage ca a
+<br/>iage f each  bjec a fa
+<br/>yica
+<br/>iage a ig
+<br/>Face ecgii ac e i.e. face ecgii whee he ga
+<br/>iage d  have he ae e ha eceived vey
+<br/>have bee ed which ca ecgize face [1]  e geea
+<br/>a a vaiey f e.
+<br/>a evey e. A
+<br/>f exa
+<br/>caiig a
+<br/>iai vaiai.  ca be ed wih abiay ga
+<br/>Afe e vaiai he ex  igi(cid:12)ca fac a(cid:11)ecig he aea
+<br/>ace f face i i
+<br/>face ecgii ac i
+<br/>face [4 5]. 
+<br/>i
+</td></tr><tr><td>102e374347698fe5404e1d83f441630b1abf62d9</td><td>Facial Image Analysis for Fully-Automatic
+<br/>Prediction of Difficult Endotracheal Intubation
+</td></tr><tr><td>100641ed8a5472536dde53c1f50fa2dd2d4e9be9</td><td>Visual Attributes for Enhanced Human-Machine Communication*
+</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td></tr><tr><td>10e704c82616fb5d9c48e0e68ee86d4f83789d96</td><td></td></tr><tr><td>101569eeef2cecc576578bd6500f1c2dcc0274e2</td><td>Multiaccuracy: Black-Box Post-Processing for Fairness in
+<br/>Classification
+<br/>James Zou
+</td></tr><tr><td>106732a010b1baf13c61d0994552aee8336f8c85</td><td>Expanded Parts Model for Semantic Description
+<br/>of Humans in Still Images
+</td></tr><tr><td>10e70a34d56258d10f468f8252a7762950830d2b</td><td></td></tr><tr><td>102b27922e9bd56667303f986404f0e1243b68ab</td><td>Wang et al. Appl Inform (2017) 4:13
+<br/>DOI 10.1186/s40535-017-0042-5
+<br/>RESEARCH
+<br/>Multiscale recurrent regression networks
+<br/>for face alignment
+<br/>Open Access
+<br/>*Correspondence:
+<br/>3 State Key Lab of Intelligent
+<br/>Technologies and Systems,
+<br/>Beijing 100084, People’s
+<br/>Republic of China
+<br/>Full list of author information
+<br/>is available at the end of the
+<br/>article
+</td></tr><tr><td>10fcbf30723033a5046db791fec2d3d286e34daa</td><td>On-Line Cursive Handwriting Recognition: A Survey of Methods
+<br/>and Performances
+<br/>*Faculty of Computer Science & Information Systems, Universiti Teknologi Malaysia (UTM) , 81310
+<br/>Skudai, Johor, Malaysia.
+</td></tr><tr><td>108b2581e07c6b7ca235717c749d45a1fa15bb24</td><td>Using Stereo Matching with General Epipolar
+<br/>Geometry for 2D Face Recognition
+<br/>across Pose
+</td></tr><tr><td>10d334a98c1e2a9e96c6c3713aadd42a557abb8b</td><td>Scene Text Recognition using Part-based Tree-structured Character Detection
+<br/>State Key Laboratory of Management and Control for Complex Systems, CASIA, Beijing, China
+</td></tr><tr><td>192723085945c1d44bdd47e516c716169c06b7c0</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation
+<br/>Vision and Attention Theory Based Sampling
+<br/>for Continuous Facial Emotion Recognition
+<br/>Ninad S. Thakoor, Member, IEEE
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>36
+<br/>37
+</td></tr><tr><td>19fb5e5207b4a964e5ab50d421e2549ce472baa8</td><td>International Conference on Computer Systems and Technologies - CompSysTech’14
+<br/>Online Emotional Facial Expression Dictionary
+<br/>Léon Rothkrantz
+</td></tr><tr><td>1962e4c9f60864b96c49d85eb897141486e9f6d1</td><td>Neural Comput & Applic (2011) 20:565–573
+<br/>DOI 10.1007/s00521-011-0577-7
+<br/>O R I G I N A L A R T I C L E
+<br/>Locality preserving embedding for face and handwriting digital
+<br/>recognition
+<br/>Received: 3 December 2008 / Accepted: 11 March 2011 / Published online: 1 April 2011
+<br/>Ó Springer-Verlag London Limited 2011
+<br/>supervised manifold
+<br/>the local sub-manifolds.
+</td></tr><tr><td>19af008599fb17bbd9b12288c44f310881df951c</td><td>Discriminative Local Sparse Representations for
+<br/>Robust Face Recognition
+</td></tr><tr><td>19296e129c70b332a8c0a67af8990f2f4d4f44d1</td><td>Metric Learning Approaches for Face Identification
+<br/>Is that you?
+<br/>M. Guillaumin, J. Verbeek and C. Schmid
+<br/>LEAR team, INRIA Rhˆone-Alpes, France
+<br/>Supplementary Material
+</td></tr><tr><td>19666b9eefcbf764df7c1f5b6938031bcf777191</td><td>Group Component Analysis for Multi-block Data:
+<br/>Common and Individual Feature Extraction
+</td></tr><tr><td>190b3caa2e1a229aa68fd6b1a360afba6f50fde4</td><td></td></tr><tr><td>19c0c7835dba1a319b59359adaa738f0410263e8</td><td>228
+<br/>Natural Image Statistics and
+<br/>Low-Complexity Feature Selection
+</td></tr><tr><td>19808134b780b342e21f54b60095b181dfc7a600</td><td></td></tr><tr><td>19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9</td><td>FaceNet: A Unified Embedding for Face Recognition and Clustering
+<br/>Google Inc.
+<br/>Google Inc.
+<br/>Google Inc.
+</td></tr><tr><td>197c64c36e8a9d624a05ee98b740d87f94b4040c</td><td>Regularized Greedy Column Subset Selection
+<br/>aDepartment of Computer Systems, Universidad Polit´ecnica de Madrid
+<br/>bDepartment of Applied Mathematics, Universidad Polit´ecnica de Madrid
+</td></tr><tr><td>19d4855f064f0d53cb851e9342025bd8503922e2</td><td>Learning SURF Cascade for Fast and Accurate Object Detection
+<br/>Intel Labs China
+</td></tr><tr><td>19eb486dcfa1963c6404a9f146c378fc7ae3a1df</td><td></td></tr><tr><td>4c6daffd092d02574efbf746d086e6dc0d3b1e91</td><td></td></tr><tr><td>4c6e1840451e1f86af3ef1cb551259cb259493ba</td><td>HAND POSTURE DATASET CREATION FOR GESTURE
+<br/>RECOGNITION
+<br/>Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+<br/>Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+<br/>Departamento de E.I.O. y Computacion
+<br/>38271 Universidad de La Laguna, Spain
+<br/>Keywords:
+<br/>Image understanding, Gesture recognition, Hand dataset.
+</td></tr><tr><td>4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc</td><td></td></tr><tr><td>4c815f367213cc0fb8c61773cd04a5ca8be2c959</td><td>978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+<br/>2470
+<br/>ICASSP 2010
+</td></tr><tr><td>4c4236b62302957052f1bbfbd34dbf71ac1650ec</td><td>SEMI-SUPERVISED FACE RECOGNITION WITH LDA SELF-TRAINING
+<br/>Multimedia Communications Department, EURECOM
+<br/>2229 Route des Crêtes , BP 193, F-06560 Sophia-Antipolis Cedex, France
+</td></tr><tr><td>2661f38aaa0ceb424c70a6258f7695c28b97238a</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 4, AUGUST 2012
+<br/>1027
+<br/>Multilayer Architectures for Facial
+<br/>Action Unit Recognition
+</td></tr><tr><td>2609079d682998da2bc4315b55a29bafe4df414e</td><td>ON RANK AGGREGATION FOR FACE RECOGNITION FROM VIDEOS
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>26a72e9dd444d2861298d9df9df9f7d147186bcd</td><td>DOI 10.1007/s00138-016-0768-4
+<br/>ORIGINAL PAPER
+<br/>Collecting and annotating the large continuous action dataset
+<br/>Received: 18 June 2015 / Revised: 18 April 2016 / Accepted: 22 April 2016 / Published online: 21 May 2016
+<br/>© The Author(s) 2016. This article is published with open access at Springerlink.com
+</td></tr><tr><td>265af79627a3d7ccf64e9fe51c10e5268fee2aae</td><td>1817
+<br/>A Mixture of Transformed Hidden Markov
+<br/>Models for Elastic Motion Estimation
+</td></tr><tr><td>267c6e8af71bab68547d17966adfaab3b4711e6b</td><td></td></tr><tr><td>26a89701f4d41806ce8dbc8ca00d901b68442d45</td><td></td></tr><tr><td>26ad6ceb07a1dc265d405e47a36570cb69b2ace6</td><td>RESEARCH AND EXPLOR ATORY
+<br/>DEVELOPMENT DEPARTMENT
+<br/>REDD-2015-384
+<br/>Neural Correlates of Cross-Cultural
+<br/>How to Improve the Training and Selection for
+<br/>Military Personnel Involved in Cross-Cultural
+<br/>Operating Under Grant #N00014-12-1-0629/113056
+<br/>Adaptation
+<br/>September, 2015
+<br/>Interactions
+<br/>Prepared for:
+<br/>Office of Naval Research
+</td></tr><tr><td>26e570049aaedcfa420fc8c7b761bc70a195657c</td><td>J Sign Process Syst
+<br/>DOI 10.1007/s11265-017-1276-0
+<br/>Hybrid Facial Regions Extraction for Micro-expression
+<br/>Recognition System
+<br/>Received: 2 February 2016 / Revised: 20 October 2016 / Accepted: 10 August 2017
+<br/>© Springer Science+Business Media, LLC 2017
+</td></tr><tr><td>21ef129c063bad970b309a24a6a18cbcdfb3aff5</td><td>POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Dr J.-M. Vesin, président du juryProf. J.-Ph. Thiran, Prof. D. Sander, directeurs de thèseProf. M. F. Valstar, rapporteurProf. H. K. Ekenel, rapporteurDr S. Marcel, rapporteurIndividual and Inter-related Action Unit Detection in Videos for Affect RecognitionTHÈSE NO 6837 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 19 FÉVRIER 2016À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 5PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2016PARAnıl YÜCE </td></tr><tr><td>218b2c5c9d011eb4432be4728b54e39f366354c1</td><td>Enhancing Training Collections for Image
+<br/>Annotation: An Instance-Weighted Mixture
+<br/>Modeling Approach
+</td></tr><tr><td>21e828071249d25e2edaca0596e27dcd63237346</td><td></td></tr><tr><td>2162654cb02bcd10794ae7e7d610c011ce0fb51b</td><td>4697
+<br/>978-1-4799-5751-4/14/$31.00 ©2014 IEEE
+<br/>1http://www.skype.com/
+<br/>2http://www.google.com/hangouts/
+<br/>tification, sparse coding
+</td></tr><tr><td>21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc</td><td>m a s s a c h u s e t t s i n s t i t u t e o f
+<br/>t e c h n o l o g y — a r t i f i c i a l i n t e l l i g e n c e l a b o r a t o r y
+<br/>Rotation Invariant Real-time
+<br/>Face Detection and
+<br/>Recognition System
+<br/>AI Memo 2001-010
+<br/>CBCL Memo 197
+<br/>May 31, 2001
+<br/>© 2 0 0 1 m a s s a c h u s e t t s i n s t i t u t e o f
+<br/>t e c h n o l o g y, c a m b r i d g e , m a 0 2 1 3 9 u s a — w w w. a i . m i t . e d u
+</td></tr><tr><td>21bd9374c211749104232db33f0f71eab4df35d5</td><td>Integrating Facial Makeup Detection Into
+<br/>Multimodal Biometric User Verification System
+<br/>CuteSafe Technology Inc.
+<br/>Gebze, Kocaeli, Turkey
+<br/>Eurecom Digital Security Department
+<br/>06410 Biot, France
+</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-013-0672-6
+<br/>Automatic and Efficient Human Pose Estimation for Sign
+<br/>Language Videos
+<br/>Received: 4 February 2013 / Accepted: 29 October 2013
+<br/>© Springer Science+Business Media New York 2013
+</td></tr><tr><td>21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1</td><td>978-1-4673-2533-2/12/$26.00 ©2012 IEEE
+<br/>1437
+<br/>ICIP 2012
+</td></tr><tr><td>4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27</td><td>Topological Principal Component Analysis for
+<br/>face encoding and recognition
+<br/>Juan J. Villanueva
+<br/>Computer Vision Center and Departament d’Inform(cid:18)atica, Edi(cid:12)ci O, Universitat
+<br/>Aut(cid:18)onoma de Barcelona
+</td></tr><tr><td>4da735d2ed0deeb0cae4a9d4394449275e316df2</td><td>Gothenburg, Sweden, June 19-22, 2016
+<br/>978-1-5090-1820-8/16/$31.00 ©2016 IEEE
+<br/>1410
+</td></tr><tr><td>4d530a4629671939d9ded1f294b0183b56a513ef</td><td>International Journal of Machine Learning and Computing, Vol. 2, No. 4, August 2012
+<br/>Facial Expression Classification Method Based on Pseudo
+<br/>Zernike Moment and Radial Basis Function Network
+<br/>
+</td></tr><tr><td>4d2975445007405f8cdcd74b7fd1dd547066f9b8</td><td>Image and Video Processing
+<br/>for Affective Applications
+</td></tr><tr><td>4df889b10a13021928007ef32dc3f38548e5ee56</td><td></td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td></tr><tr><td>4db9e5f19366fe5d6a98ca43c1d113dac823a14d</td><td>Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers
+<br/>Are 1,000 Features Worth A Picture?
+<br/>Department of Computer Science and Center for Human-Computer Interaction
+<br/>Virginia Tech, Arlington, VA, USA
+</td></tr><tr><td>4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41</td><td></td></tr><tr><td>4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2352
+<br/>ICASSP 2017
+</td></tr><tr><td>4d90bab42806d082e3d8729067122a35bbc15e8d</td><td></td></tr><tr><td>4d6ad0c7b3cf74adb0507dc886993e603c863e8c</td><td>Human Activity Recognition Based on Wearable
+<br/>Sensor Data: A Standardization of the
+<br/>State-of-the-Art
+<br/>Smart Surveillance Interest Group, Computer Science Department
+<br/>Universidade Federal de Minas Gerais, Brazil
+</td></tr><tr><td>4d0ef449de476631a8d107c8ec225628a67c87f9</td><td>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE
+<br/>must be obtained for all other uses, in any current or future media, including
+<br/>reprinting/republishing this material for advertising or promotional purposes,
+<br/>creating new collective works, for resale or redistribution to servers or lists, or
+<br/>reuse of any copyrighted component of this work in other works.
+<br/>Pre-print of article that appeared at BTAS 2010.
+<br/>The published article can be accessed from:
+<br/>http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5634517
+</td></tr><tr><td>4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f</td><td>Deep multi-frame face super-resolution
+<br/>Evgeniya Ustinova, Victor Lempitsky
+<br/>October 17, 2017
+</td></tr><tr><td>75879ab7a77318bbe506cb9df309d99205862f6c</td><td>Analysis Of Emotion Recognition From Facial
+<br/>Expressions Using Spatial And Transform Domain
+<br/>Methods
+</td></tr><tr><td>7574f999d2325803f88c4915ba8f304cccc232d1</td><td>Transfer Learning For Cross-Dataset Recognition: A Survey
+<br/>This paper summarises and analyses the cross-dataset recognition transfer learning techniques with the
+<br/>emphasis on what kinds of methods can be used when the available source and target data are presented
+<br/>in different forms for boosting the target task. This paper for the first time summarises several transferring
+<br/>criteria in details from the concept level, which are the key bases to guide what kind of knowledge to transfer
+<br/>between datasets. In addition, a taxonomy of cross-dataset scenarios and problems is proposed according the
+<br/>properties of data that define how different datasets are diverged, thereby review the recent advances on
+<br/>each specific problem under different scenarios. Moreover, some real world applications and corresponding
+<br/>commonly used benchmarks of cross-dataset recognition are reviewed. Lastly, several future directions are
+<br/>identified.
+<br/>Additional Key Words and Phrases: Cross-dataset, transfer learning, domain adaptation
+<br/>1. INTRODUCTION
+<br/>It has been explored how human would transfer learning in one context to another
+<br/>similar context [Woodworth and Thorndike 1901; Perkins et al. 1992] in the field of
+<br/>Psychology and Education. For example, learning to drive a car helps a person later
+<br/>to learn more quickly to drive a truck, and learning mathematics prepares students to
+<br/>study physics. The machine learning algorithms are mostly inspired by human brains.
+<br/>However, most of them require a huge amount of training examples to learn a new
+<br/>model from scratch and fail to apply knowledge learned from previous domains or
+<br/>tasks. This may be due to that a basic assumption of statistical learning theory is
+<br/>that the training and test data are drawn from the same distribution and belong to
+<br/>the same task. Intuitively, learning from scratch is not realistic and practical, because
+<br/>it violates how human learn things. In addition, manually labelling a large amount
+<br/>of data for new domain or task is labour extensive, especially for the modern “data-
+<br/>hungry” and “data-driven” learning techniques (i.e. deep learning). However, the big
+<br/>data era provides a huge amount available data collected for other domains and tasks.
+<br/>Hence, how to use the previously available data smartly for the current task with
+<br/>scarce data will be beneficial for real world applications.
+<br/>To reuse the previous knowledge for current tasks, the differences between old data
+<br/>and new data need to be taken into account. Take the object recognition as an ex-
+<br/>ample. As claimed by Torralba and Efros [2011], despite the great efforts of object
+<br/>datasets creators, the datasets appear to have strong build-in bias caused by various
+<br/>factors, such as selection bias, capture bias, category or label bias, and negative set
+<br/>bias. This suggests that no matter how big the dataset is, it is impossible to cover
+<br/>the complexity of the real visual world. Hence, the dataset bias needs to be consid-
+<br/>ered before reusing data from previous datasets. Pan and Yang [2010] summarise that
+<br/>the differences between different datasets can be caused by domain divergence (i.e.
+<br/>distribution shift or feature space difference) or task divergence (i.e. conditional dis-
+<br/>tribution shift or label space difference), or both. For example, in visual recognition,
+<br/>the distributions between the previous and current data can be discrepant due to the
+<br/>different environments, lighting, background, sensor types, resolutions, view angles,
+<br/>and post-processing. Those external factors may cause the distribution divergence or
+<br/>even feature space divergence between different domains. On the other hand, the task
+<br/>divergence between current and previous data is also ubiquitous. For example, it is
+<br/>highly possible that an animal species that we want to recognize have not been seen
+<br/>ACM Journal Name, Vol. V, No. N, Article A, Publication date: January YYYY.
+</td></tr><tr><td>75e9a141b85d902224f849ea61ab135ae98e7bfb</td><td></td></tr><tr><td>75503aff70a61ff4810e85838a214be484a674ba</td><td>Improved Facial Expression Recognition via Uni-Hyperplane Classification
+<br/>S.W. Chew∗, S. Lucey†, P. Lucey‡, S. Sridharan∗, and J.F. Cohn‡
+</td></tr><tr><td>75cd81d2513b7e41ac971be08bbb25c63c37029a</td><td></td></tr><tr><td>75e5ba7621935b57b2be7bf4a10cad66a9c445b9</td><td></td></tr><tr><td>75859ac30f5444f0d9acfeff618444ae280d661d</td><td>Multibiometric Cryptosystems based on Feature
+<br/>Level Fusion
+</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING
+<br/>AffectNet: A Database for Facial Expression,
+<br/>Valence, and Arousal Computing in the Wild
+</td></tr><tr><td>754f7f3e9a44506b814bf9dc06e44fecde599878</td><td>Quantized Densely Connected U-Nets for
+<br/>Efficient Landmark Localization
+</td></tr><tr><td>75249ebb85b74e8932496272f38af274fbcfd696</td><td>Face Identification in Large Galleries
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td></tr><tr><td>81a142c751bf0b23315fb6717bc467aa4fdfbc92</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1767
+<br/>ICASSP 2017
+</td></tr><tr><td>8147ee02ec5ff3a585dddcd000974896cb2edc53</td><td>Angular Embedding:
+<br/>A Robust Quadratic Criterion
+<br/>Stella X. Yu, Member,
+<br/>IEEE
+</td></tr><tr><td>8199803f476c12c7f6c0124d55d156b5d91314b6</td><td>The iNaturalist Species Classification and Detection Dataset
+<br/>1Caltech
+<br/>2Google
+<br/>3Cornell Tech
+<br/>4iNaturalist
+</td></tr><tr><td>81831ed8e5b304e9d28d2d8524d952b12b4cbf55</td><td></td></tr><tr><td>81b2a541d6c42679e946a5281b4b9dc603bc171c</td><td>Universit¨at Ulm | 89069 Ulm | Deutschland
+<br/>Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+<br/>Institut f¨ur Neuroinformatik
+<br/>Direktor: Prof. Dr. G¨unther Palm
+<br/>Semi-Supervised Learning with Committees:
+<br/>Exploiting Unlabeled Data Using Ensemble
+<br/>Learning Algorithms
+<br/>Dissertation zur Erlangung des Doktorgrades
+<br/>Doktor der Naturwissenschaften (Dr. rer. nat.)
+<br/>der Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+<br/>der Universit¨at Ulm
+<br/>vorgelegt von
+<br/>aus Kairo, ¨Agypten
+<br/>Ulm, Deutschland
+<br/>2010
+</td></tr><tr><td>8160b3b5f07deaa104769a2abb7017e9c031f1c1</td><td>683
+<br/>Exploiting Discriminant Information in Nonnegative
+<br/>Matrix Factorization With Application
+<br/>to Frontal Face Verification
+</td></tr><tr><td>816eff5e92a6326a8ab50c4c50450a6d02047b5e</td><td>fLRR: Fast Low-Rank Representation Using
+<br/>Frobenius Norm
+<br/>Low Rank Representation (LRR) intends to find the representation
+<br/>with lowest-rank of a given data set, which can be formulated as a
+<br/>rank minimization problem. Since the rank operator is non-convex and
+<br/>discontinuous, most of the recent works use the nuclear norm as a convex
+<br/>relaxation. This letter theoretically shows that under some conditions,
+<br/>Frobenius-norm-based optimization problem has an unique solution that
+<br/>is also a solution of the original LRR optimization problem. In other
+<br/>words, it is feasible to apply Frobenius-norm as a surrogate of the
+<br/>nonconvex matrix rank function. This replacement will largely reduce the
+<br/>time-costs for obtaining the lowest-rank solution. Experimental results
+<br/>show that our method (i.e., fast Low Rank Representation, fLRR),
+<br/>performs well in terms of accuracy and computation speed in image
+<br/>clustering and motion segmentation compared with nuclear-norm-based
+<br/>LRR algorithm.
+<br/>Introduction: Given a data set X ∈ Rm×n(m < n) composed of column
+<br/>vectors, let A be a data set composed of vectors with the same dimension
+<br/>as those in X. Both X and A can be considered as matrices. A linear
+<br/>representation of X with respect to A is a matrix Z that satisfies the
+<br/>equation X = AZ. The data set A is called a dictionary. In general, this
+<br/>linear matrix equation will have infinite solutions, and any solution can be
+<br/>considered to be a representation of X associated with the dictionary A. To
+<br/>obtain an unique Z and explore the latent structure of the given data set,
+<br/>various assumptions could be enforced over Z.
+<br/>Liu et al. recently proposed Low Rank Representation (LRR) [1] by
+<br/>assuming that data are approximately sampled from an union of low-rank
+<br/>subspaces. Mathematically, LRR aims at solving
+<br/>min rank(Z)
+<br/>s.t. X = AZ,
+<br/>(1)
+<br/>where rank(Z) could be defined as the number of nonzero eigenvalues of
+<br/>the matrix Z. Clearly, (1) is non-convex and discontinuous, whose convex
+<br/>relaxation is as follows,
+<br/>min kZk∗
+<br/>s.t. X = AZ,
+<br/>(2)
+<br/>where kZk∗ is the nuclear norm, which is a convex and continuous
+<br/>optimization problem.
+<br/>Considering the possible corruptions, the objective function of LRR is
+<br/>min kZk∗ + λkEkp
+<br/>s.t. X = AZ + E,
+<br/>(3)
+<br/>where k · kp could be ℓ1-norm for describing sparse corruption or ℓ2,1-
+<br/>norm for characterizing sample-specified corruption.
+<br/>The above nuclear-norm-based optimization problems are generally
+<br/>solved using Augmented Lagrange Multiplier algorithm (ALM) [2] which
+<br/>requires repeatedly performing Single Value Decomposition (SVD) over
+<br/>Z. Hence, this optimization program is inefficient.
+<br/>Beyond the nuclear-norm, do other norms exist that can be used as
+<br/>a surrogates for rank-minimization problem in LRR? Can we develop
+<br/>a fast algorithm to calculate LRR? This letter addresses these problems
+<br/>by theoretically showing the equivalence between the solutions of a
+<br/>Frobenius-norm-based problem and the original LRR problem. And we
+<br/>further develop fast Low Rank Representation (fLRR) based on the
+<br/>theoretical results.
+<br/>Theoretical Analysis: In the following analyses, Theorem 1 and
+<br/>Theorem 3 prove that Frobenius-norm-based problem is a surrogate of
+<br/>the rank-minimization problem of LRR in the case of clean data and
+<br/>corrupted ones, respectively. Theorem 2 shows that our Frobenius-norm-
+<br/>based method could produce a block-diagonal Z under some conditions.
+<br/>This property is helpful to subspace clustering.
+<br/>Let A ∈ Rm×n be a matrix with rank r. The full SVD and skinny
+<br/>SVD of A are A = U ΣV T and A = UrΣrV T
+<br/>r , where U and V are two
+<br/>orthogonal matrices with the size of m × m and n × n, respectively. In
+<br/>addition, Σ is an m × n rectangular diagonal matrix, its diagonal elements
+<br/>are nonnegative real numbers. Σr is a r × r diagonal matrix with singular
+<br/>values located on the diagonal in decreasing order, Ur and Vr consist of the
+<br/>first r columns of U and V , respectively. Clearly, Ur and Vr are column
+<br/>orthogonal matrices, i.e., U T
+<br/>r Vr = Ir, where Ir denotes the
+<br/>r Ur = Ir, V T
+<br/>identity matrix with the size of r × r. The pseudoinverse of A is defined
+<br/>by A† = VrΣ−1
+<br/>r U T
+<br/>r .
+<br/>Given a matrix M ∈ Rm×n, the Frobenius norm of M is defined by
+<br/>kM kF =ptrace (M T M ) =qPmin{m,n}
+<br/>value of M . Clearly, kM kF = 0 if and only if M = 0.
+<br/>i=1
+<br/>σ2
+<br/>i , where σi is a singular
+<br/>Lemma 1: Suppose P is a column orthogonal matrix, i.e., P T P = I. Then,
+<br/>kP M kF = kM kF .
+<br/>Lemma 2: For the matrices M and N with same number of columns, it
+<br/>holds that
+<br/>= kM k2
+<br/>F + kN k2
+<br/>F .
+<br/>(4)
+<br/>N (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>(cid:20) M
+<br/>The proofs of the above two lemmas are trivial.
+<br/>Theorem 1:
+<br/>minimization problem
+<br/>Suppose
+<br/>that X ∈ span{A},
+<br/>the Frobenius norm
+<br/>min kZkF
+<br/>s.t. X = AZ,
+<br/>(5)
+<br/>has an unique solution Z ∗ = A†X which is also the lowest-rank solution
+<br/>of LRR in terms of (1).
+<br/>Proof: Let the full and skinny SVDs of A be A = U ΣV T and A =
+<br/>r U T
+<br/>UrΣrV T
+<br/>r .
+<br/>r , respectively. Then, the pseudoinverse of A is A† = VrΣ−1
+<br/>Defining Vc by V T =(cid:20) V T
+<br/>V T
+<br/>(cid:21) and V T
+<br/>c Vr = 0. Moreover, it can be easily
+<br/>checked that Z ∗ satisfies X = AZ ∗ owing to X ∈ span{A}.
+<br/>To prove that Z ∗ is the unique solution of the optimization problem
+<br/>(5), two steps are required. First, we will prove that, for any solution Z of
+<br/>X = AZ, it must hold that kZkF ≥ kZ ∗kF . Using Lemma 1, we have
+<br/>kZkF = (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>= (cid:13)(cid:13)(cid:13)(cid:13)
+<br/>V T
+<br/>(cid:20) V T
+<br/>(cid:20) V T
+<br/>(cid:21) [Z ∗ + (Z − Z ∗)](cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>c (Z − Z ∗) (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>r (Z − Z ∗)
+<br/>r Z ∗ + V T
+<br/>c Z ∗ + V T
+<br/>V T
+<br/>As A (Z − Z ∗) = 0,
+<br/>r (Z − Z ∗) = 0. Denote B = Σ−1
+<br/>V T
+<br/>V T
+<br/>c Vr = 0, we have V T
+<br/>i.e., UrΣrV T
+<br/>r U T
+<br/>c VrB = 0. Then,
+<br/>r (Z − Z ∗) = 0,
+<br/>r X,
+<br/>follows that
+<br/>then Z ∗ = VrB. Because
+<br/>it
+<br/>c Z ∗ = V T
+<br/>(cid:20)
+<br/>kZkF =(cid:13)(cid:13)(cid:13)(cid:13)
+<br/>V T
+<br/>c (Z − Z ∗) (cid:21)(cid:13)(cid:13)(cid:13)(cid:13)F
+<br/>By Lemma 2,
+<br/>kZk2
+<br/>F = kBk2
+<br/>F + kV T
+<br/>c (Z − Z ∗)k2
+<br/>F ,
+<br/>then, kZkF ≥ kBkF .
+<br/>By Lemma 1,
+<br/>kBkF = kVrBkF = kZ ∗kF ,
+<br/>(6)
+<br/>(7)
+<br/>(8)
+<br/>thus, kZkF ≥ kZ ∗kF for any solution Z of X = AZ.
+<br/>In the second step, we will prove that if there exists another solution Z
+<br/>of (5), Z = Z ∗ must hold. Clearly, Z is a solution of (5) which implies that
+<br/>X = AZ and kZkF = kZ ∗kF . From (7) and (8),
+<br/>kZk2
+<br/>F + kV T
+<br/>F = kZ ∗k2
+<br/>Since kZkF = kZ ∗kF ,
+<br/>c (Z − Z ∗) k2
+<br/>F .
+<br/>c (Z − Z ∗) kF = 0,
+<br/>r (Z − Z ∗) = 0, this gives
+<br/>and so V T
+<br/>V T (Z − Z ∗) = 0. Because V is an orthogonal matrix, it must hold
+<br/>that Z = Z ∗. The above proves that Z ∗ is the unique solution of the
+<br/>optimization problem (5).
+<br/>c (Z − Z ∗) = 0. Together with V T
+<br/>it must hold that kV T
+<br/>(9)
+<br/>Next, we prove that Z ∗ is also a solution of the LRR optimization
+<br/>problem (1). Clearly, for any solution Z of X = AZ,
+<br/>it holds that
+<br/>rank(Z) ≥ rank(AZ) = rank(X). On the other hand, rank(Z ∗) =
+<br/>rank(A†X) ≤ rank(X). Thus, rank(Z ∗) = rank(X). This shows that
+<br/>Z ∗ is the lowest-rank solution of the LRR optimization problem (1). The
+<br/>proof is complete.
+<br/>(cid:4)
+<br/>In the following, Theorem 2 will show that the optimal Z of (5) will
+<br/>be block-diagonal if the data are sampled from a set of independent
+<br/>subspaces {S1, S2, · · · , Sk}, where the dimensionality of Si is ri and
+<br/>i = {1, 2, · · · , k}. Note that, {S1, S2, · · · , Sk} are independent if and
+<br/>only if SiTPj6=i Sj = {0}. Suppose that X = [X1, X2, · · · , Xk] and
+<br/>A = [A1, A2, · · · , Ak], where Ai and Xi contain mi and ni data points
+<br/>ELECTRONICS LETTERS 12th December 2011 Vol. 00 No. 00
+</td></tr><tr><td>8149c30a86e1a7db4b11965fe209fe0b75446a8c</td><td>Semi-Supervised Multiple Instance Learning based
+<br/>Domain Adaptation for Object Detection
+<br/>Siemens Corporate Research
+<br/>Siemens Corporate Research
+<br/>Siemens Corporate Research
+<br/>Amit Kale
+<br/>Bangalore
+<br/>Bangalore
+<br/>{chhaya.methani,
+<br/>Bangalore
+<br/>rahul.thota,
+</td></tr><tr><td>86b69b3718b9350c9d2008880ce88cd035828432</td><td>Improving Face Image Extraction by Using Deep Learning Technique
+<br/>National Library of Medicine, NIH, Bethesda, MD
+</td></tr><tr><td>86904aee566716d9bef508aa9f0255dc18be3960</td><td>Learning Anonymized Representations with
+<br/>Adversarial Neural Networks
+</td></tr><tr><td>867e709a298024a3c9777145e037e239385c0129</td><td> INTERNATIONAL JOURNAL
+<br/> OF PROFESSIONAL ENGINEERING STUDIES Volume VIII /Issue 2 / FEB 2017
+<br/>ANALYTICAL REPRESENTATION OF UNDERSAMPLED FACE
+<br/>RECOGNITION APPROACH BASED ON DICTIONARY LEARNING
+<br/>AND SPARSE REPRESENTATION
+<br/>(M.Tech)1, Assistant Professor2, Assistant Professor3, HOD of CSE Department4
+</td></tr><tr><td>86c053c162c08bc3fe093cc10398b9e64367a100</td><td>Cascade of Forests for Face Alignment
+</td></tr><tr><td>86b985b285c0982046650e8d9cf09565a939e4f9</td><td></td></tr><tr><td>861802ac19653a7831b314cd751fd8e89494ab12</td><td>Time-of-Flight and Depth Imaging. Sensors, Algorithms
+<br/>and Applications: Dagstuhl Seminar 2012 and GCPR
+<br/>Workshop on Imaging New Modalities (Lecture ... Vision,
+<br/>Pattern Recognition, and Graphics)
+<br/>Publisher: Springer; 2013 edition
+<br/>(November 8, 2013)
+<br/>Language: English
+<br/>Pages: 320
+<br/>ISBN: 978-3642449635
+<br/>Size: 20.46 MB
+<br/>Format: PDF / ePub / Kindle
+<br/>Cameras for 3D depth imaging, using
+<br/>either time-of-flight (ToF) or
+<br/>structured light sensors, have received
+<br/>a lot of attention recently and have
+<br/>been improved considerably over the
+<br/>last few years. The present
+<br/>techniques...
+</td></tr><tr><td>861b12f405c464b3ffa2af7408bff0698c6c9bf0</td><td>International Journal on Recent and Innovation Trends in Computing and Communication ISSN: 2321-8169
+<br/>Volume: 3 Issue: 5
+<br/> 3337 - 3342
+<br/>_______________________________________________________________________________________________
+<br/>An Effective Technique for Removal of Facial Dupilcation by SBFA
+<br/>Computer Department,
+<br/>GHRCEM,
+<br/>Pune, India
+<br/>Computer Department,
+<br/>GHRCEM,
+<br/> Pune, India
+</td></tr><tr><td>86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6</td><td>The Kinetics Human Action Video Dataset
+<br/>Jo˜ao Carreira
+<br/>Paul Natsev
+</td></tr><tr><td>86b105c3619a433b6f9632adcf9b253ff98aee87</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
+<br/>1013
+<br/>ICME 2006
+</td></tr><tr><td>86b51bd0c80eecd6acce9fc538f284b2ded5bcdd</td><td></td></tr><tr><td>8699268ee81a7472a0807c1d3b1db0d0ab05f40d</td><td></td></tr><tr><td>869583b700ecf33a9987447aee9444abfe23f343</td><td></td></tr><tr><td>72a00953f3f60a792de019a948174bf680cd6c9f</td><td>Stat Comput (2007) 17:57–70
+<br/>DOI 10.1007/s11222-006-9004-9
+<br/>Understanding the role of facial asymmetry in human face
+<br/>identification
+<br/>Received: May 2005 / Accepted: September 2006 / Published online: 30 January 2007
+<br/>C(cid:1) Springer Science + Business Media, LLC 2007
+</td></tr><tr><td>726b8aba2095eef076922351e9d3a724bb71cb51</td><td></td></tr><tr><td>721b109970bf5f1862767a1bec3f9a79e815f79a</td><td></td></tr><tr><td>72ecaff8b57023f9fbf8b5b2588f3c7019010ca7</td><td>Facial Keypoints Detection
+</td></tr><tr><td>72591a75469321074b072daff80477d8911c3af3</td><td>Group Component Analysis for Multi-block Data:
+<br/>Common and Individual Feature Extraction
+</td></tr><tr><td>729dbe38538fbf2664bc79847601f00593474b05</td><td></td></tr><tr><td>729a9d35bc291cc7117b924219bef89a864ce62c</td><td>Recognizing Material Properties from Images
+</td></tr><tr><td>721d9c387ed382988fce6fa864446fed5fb23173</td><td></td></tr><tr><td>72c0c8deb9ea6f59fde4f5043bff67366b86bd66</td><td>Age progression in Human Faces : A Survey
+</td></tr><tr><td>445461a34adc4bcdccac2e3c374f5921c93750f8</td><td>Emotional Expression Classification using Time-Series Kernels∗
+</td></tr><tr><td>4414a328466db1e8ab9651bf4e0f9f1fe1a163e4</td><td>1164
+<br/>© EURASIP, 2010 ISSN 2076-1465
+<br/>18th European Signal Processing Conference (EUSIPCO-2010)
+<br/>INTRODUCTION
+</td></tr><tr><td>442f09ddb5bb7ba4e824c0795e37cad754967208</td><td></td></tr><tr><td>446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03</td><td>A Pose-Adaptive Constrained Local Model For
+<br/>Accurate Head Pose Tracking
+<br/>Eikeo
+<br/>11 rue Leon Jouhaux,
+<br/>F-75010, Paris, France
+<br/>Sorbonne Universit´es
+<br/>UPMC Univ Paris 06
+<br/>CNRS UMR 7222, ISIR
+<br/>F-75005, Paris, France
+<br/>Eikeo
+<br/>11 rue Leon Jouhaux,
+<br/>F-75010, Paris, France
+</td></tr><tr><td>44b1399e8569a29eed0d22d88767b1891dbcf987</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Learning Multi-modal Latent Attributes
+</td></tr><tr><td>446dc1413e1cfaee0030dc74a3cee49a47386355</td><td>Recent Advances in Zero-shot Recognition
+</td></tr><tr><td>44a3ec27f92c344a15deb8e5dc3a5b3797505c06</td><td>A Taxonomy of Part and Attribute Discovery
+<br/>Techniques
+</td></tr><tr><td>44aeda8493ad0d44ca1304756cc0126a2720f07b</td><td>Face Alive Icons
+</td></tr><tr><td>449b1b91029e84dab14b80852e35387a9275870e</td><td></td></tr><tr><td>44078d0daed8b13114cffb15b368acc467f96351</td><td></td></tr><tr><td>44dd150b9020b2253107b4a4af3644f0a51718a3</td><td>An Analysis of the Sensitivity of Active Shape
+<br/>Models to Initialization when Applied to Automatic
+<br/>Facial Landmarking
+</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td></tr><tr><td>44f65e3304bdde4be04823fd7ca770c1c05c2cef</td><td>SIViP
+<br/>DOI 10.1007/s11760-009-0125-4
+<br/>ORIGINAL PAPER
+<br/>On the use of phase of the Fourier transform for face recognition
+<br/>under variations in illumination
+<br/>Received: 17 November 2008 / Revised: 20 February 2009 / Accepted: 7 July 2009
+<br/>© Springer-Verlag London Limited 2009
+</td></tr><tr><td>44eb4d128b60485377e74ffb5facc0bf4ddeb022</td><td></td></tr><tr><td>448ed201f6fceaa6533d88b0b29da3f36235e131</td><td></td></tr><tr><td>447a5e1caf847952d2bb526ab2fb75898466d1bc</td><td>Under review as a conference paper at ICLR 2018
+<br/>LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+<br/>INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td></tr><tr><td>2a7bca56e2539c8cf1ae4e9da521879b7951872d</td><td>Exploiting Unrelated Tasks in Multi-Task Learning
+<br/>Anonymous Author 1
+<br/>Unknown Institution 1
+<br/>Anonymous Author 2
+<br/>Unknown Institution 2
+<br/>Anonymous Author 3
+<br/>Unknown Institution 3
+</td></tr><tr><td>2aaa6969c03f435b3ea8431574a91a0843bd320b</td><td></td></tr><tr><td>2ad7cef781f98fd66101fa4a78e012369d064830</td><td></td></tr><tr><td>2ad29b2921aba7738c51d9025b342a0ec770c6ea</td><td></td></tr><tr><td>2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924</td><td></td></tr><tr><td>2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c</td><td>Robust Registration and Geometry Estimation from Unstructured
+<br/>Facial Scans
+</td></tr><tr><td>2ae139b247057c02cda352f6661f46f7feb38e45</td><td>Combining Modality Specific Deep Neural Networks for
+<br/>Emotion Recognition in Video
+<br/>1École Polytechique de Montréal, Université de Montréal, Montréal, Canada
+<br/>2Laboratoire d’Informatique des Systèmes Adaptatifs, Université de Montréal, Montréal, Canada
+</td></tr><tr><td>2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83</td><td>121
+<br/>The Effect of Computer-Generated Descriptions
+<br/>on Photo-Sharing Experiences of People With
+<br/>Visual Impairments
+<br/>Like sighted people, visually impaired people want to share photographs on social networking services, but
+<br/>find it difficult to identify and select photos from their albums. We aimed to address this problem by
+<br/>incorporating state-of-the-art computer-generated descriptions into Facebook’s photo-sharing feature. We
+<br/>interviewed 12 visually impaired participants to understand their photo-sharing experiences and designed a
+<br/>photo description feature for the Facebook mobile application. We evaluated this feature with six
+<br/>participants in a seven-day diary study. We found that participants used the descriptions to recall and
+<br/>organize their photos, but they hesitated to upload photos without a sighted person’s input. In addition to
+<br/>basic information about photo content, participants wanted to know more details about salient objects and
+<br/>people, and whether the photos reflected their personal aesthetic. We discuss these findings from the lens of
+<br/>self-disclosure and self-presentation theories and propose new computer vision research directions that will
+<br/>better support visual content sharing by visually impaired people.
+<br/>CCS Concepts: • Information interfaces and presentations → Multimedia and information systems; •
+<br/>Social and professional topics → People with disabilities
+<br/>KEYWORDS
+<br/>Visual impairments; computer-generated descriptions; SNSs; photo sharing; self-disclosure; self-presentation
+<br/>ACM Reference format:
+<br/>The Effect of Computer-Generated Descriptions On Photo-Sharing Experiences of People With Visual
+<br/>Impairments. Proc. ACM Hum.-Comput. Interact. 1, CSCW. 121 (November 2017), 22 pages.
+<br/>DOI: 10.1145/3134756
+<br/>1 INTRODUCTION
+<br/>Sharing memories and experiences via photos is a common way to engage with others on social networking
+<br/>services (SNSs) [39,46,51]. For instance, Facebook users uploaded more than 350 million photos a day [24]
+<br/>and Twitter, which initially supported only text in tweets, now has more than 28.4% of tweets containing
+<br/>images [39]. Visually impaired people (both blind and low vision) have a strong presence on SNS and are
+<br/>interested in sharing photos [50]. They take photos for the same reasons that sighted people do: sharing
+<br/>daily moments with their sighted friends and family [30,32]. A prior study showed that visually impaired
+<br/>people shared a relatively large number of photos on Facebook—only slightly less than their sighted
+<br/>counterparts [50].
+<br/>
+<br/> PACM on Human-Computer Interaction, Vol. 1, No. 2, Article 121. Publication date: November 2017
+</td></tr><tr><td>2a02355c1155f2d2e0cf7a8e197e0d0075437b19</td><td></td></tr><tr><td>2aea27352406a2066ddae5fad6f3f13afdc90be9</td><td></td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>Acquiring Linear Subspaces for Face
+<br/>Recognition under Variable Lighting
+<br/>David Kriegman, Senior Member, IEEE
+</td></tr><tr><td>2ff9618ea521df3c916abc88e7c85220d9f0ff06</td><td>Facial Tic Detection Using Computer Vision
+<br/>Christopher D. Leveille
+<br/>March 20, 2014
+</td></tr><tr><td>2fda461869f84a9298a0e93ef280f79b9fb76f94</td><td>OpenFace: an open source facial behavior analysis toolkit
+<br/>Tadas Baltruˇsaitis
+</td></tr><tr><td>2fdce3228d384456ea9faff108b9c6d0cf39e7c7</td><td></td></tr><tr><td>2f7e9b45255c9029d2ae97bbb004d6072e70fa79</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>cvpaper.challenge in 2015
+<br/>A review of CVPR2015 and DeepSurvey
+<br/>Nakamura
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>2f489bd9bfb61a7d7165a2f05c03377a00072477</td><td>JIA, YANG: STRUCTURED SEMI-SUPERVISED FOREST
+<br/>Structured Semi-supervised Forest for
+<br/>Facial Landmarks Localization with Face
+<br/>Mask Reasoning
+<br/>1 Department of Computer Science
+<br/>The Univ. of Hong Kong, HK
+<br/>2 School of EECS
+<br/>Queen Mary Univ. of London, UK
+<br/>Angran Lin1
+</td></tr><tr><td>2f16459e2e24dc91b3b4cac7c6294387d4a0eacf</td><td></td></tr><tr><td>2f59f28a1ca3130d413e8e8b59fb30d50ac020e2</td><td>Children Gender Recognition Under Unconstrained
+<br/>Conditions Based on Contextual Information
+<br/>Joint Research Centre, European Commission, Ispra, Italy
+</td></tr><tr><td>2f88d3189723669f957d83ad542ac5c2341c37a5</td><td>Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+<br/>Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+<br/>Attribute-correlatedlocalregionsfordeeprelativeattributeslearningFenZhangXiangweiKongZeJiaFenZhang,XiangweiKong,ZeJia,“Attribute-correlatedlocalregionsfordeeprelativeattributeslearning,”J.Electron.Imaging27(4),043021(2018),doi:10.1117/1.JEI.27.4.043021. </td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>Names and Faces in the News
+<br/>Computer Science Division
+<br/>U.C. Berkeley
+<br/>Berkeley, CA 94720
+</td></tr><tr><td>2fa057a20a2b4a4f344988fee0a49fce85b0dc33</td><td></td></tr><tr><td>2f8ef26bfecaaa102a55b752860dbb92f1a11dc6</td><td>A Graph Based Approach to Speaker Retrieval in Talk
+<br/>Show Videos with Transcript-Based Supervision
+</td></tr><tr><td>2f184c6e2c31d23ef083c881de36b9b9b6997ce9</td><td>Polichotomies on Imbalanced Domains
+<br/>by One-per-Class Compensated Reconstruction Rule
+<br/>Integrated Research Centre, Universit´a Campus Bio-Medico of Rome, Rome, Italy
+</td></tr><tr><td>2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d</td><td></td></tr><tr><td>2f8183b549ec51b67f7dad717f0db6bf342c9d02</td><td></td></tr><tr><td>2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475</td><td>A novel approach to personal photo album representation
+<br/>and management
+<br/>Universit`a di Palermo - Dipartimento di Ingegneria Informatica
+<br/>Viale delle Scienze, 90128, Palermo, Italy
+</td></tr><tr><td>2f882ceaaf110046e63123b495212d7d4e99f33d</td><td>High Frequency Component Compensation based Super-resolution
+<br/>Algorithm for Face Video Enhancement
+<br/>CVRR Lab, UC San Diego, La Jolla, CA 92093, USA
+</td></tr><tr><td>2f95340b01cfa48b867f336185e89acfedfa4d92</td><td>Face Expression Recognition with a 2-Channel
+<br/>Convolutional Neural Network
+<br/><b></b><br/>Vogt-K¨olln-Straße 30, 22527 Hamburg, Germany
+<br/>http://www.informatik.uni-hamburg.de/WTM/
+</td></tr><tr><td>2fea258320c50f36408032c05c54ba455d575809</td><td></td></tr><tr><td>2faa09413162b0a7629db93fbb27eda5aeac54ca</td><td>NISTIR 7674
+<br/>Quantifying How Lighting and Focus
+<br/>Affect Face Recognition Performance
+<br/>Phillips, P. J.
+<br/>Beveridge, J. R.
+<br/>Draper, B.
+<br/>Bolme, D.
+<br/>Givens, G. H.
+<br/>Lui, Y. M.
+<br/>1
+</td></tr><tr><td>433bb1eaa3751519c2e5f17f47f8532322abbe6d</td><td></td></tr><tr><td>4300fa1221beb9dc81a496cd2f645c990a7ede53</td><td></td></tr><tr><td>439ac8edfa1e7cbc65474cab544a5b8c4c65d5db</td><td>SIViP (2011) 5:401–413
+<br/>DOI 10.1007/s11760-011-0244-6
+<br/>ORIGINAL PAPER
+<br/>Face authentication with undercontrolled pose and illumination
+<br/>Received: 15 September 2010 / Revised: 14 December 2010 / Accepted: 17 February 2011 / Published online: 7 August 2011
+<br/>© Springer-Verlag London Limited 2011
+</td></tr><tr><td>43f6953804964037ff91a4f45d5b5d2f8edfe4d5</td><td>Multi-Feature Fusion in Advanced Robotics Applications
+<br/>Institut für Informatik
+<br/>Technische Universität München
+<br/>D-85748 Garching, Germany
+</td></tr><tr><td>439ec47725ae4a3660e509d32828599a495559bf</td><td>Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+<br/>and Evaluation
+</td></tr><tr><td>43a03cbe8b704f31046a5aba05153eb3d6de4142</td><td>Towards Robust Face Recognition from Video
+<br/>Image Science and Machine Vision Group
+<br/>Oak Ridge National Laboratory
+<br/>Oak Ridge, TN 37831-6010
+</td></tr><tr><td>43836d69f00275ba2f3d135f0ca9cf88d1209a87</td><td>Ozaki et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:20
+<br/>DOI 10.1186/s41074-017-0030-7
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>RESEARCH PAPER
+<br/>Open Access
+<br/>Effective hyperparameter optimization
+<br/>using Nelder-Mead method in deep learning
+</td></tr><tr><td>43aa40eaa59244c233f83d81f86e12eba8d74b59</td><td></td></tr><tr><td>4362368dae29cc66a47114d5ffeaf0534bf0159c</td><td>UACEE International Journal of Artificial Intelligence and Neural Networks ISSN:- 2250-3749 (online)
+<br/>Performance Analysis of FDA Based Face
+<br/>Recognition Using Correlation, ANN and SVM
+<br/>Department of Computer Engineering
+<br/>Department of Computer Engineering
+<br/>Department of Computer Engineering
+<br/>Anand, INDIA
+<br/>Anand, INDIA
+<br/>Anand, INDIA
+</td></tr><tr><td>43e268c118ac25f1f0e984b57bc54f0119ded520</td><td></td></tr><tr><td>43476cbf2a109f8381b398e7a1ddd794b29a9a16</td><td>A Practical Transfer Learning Algorithm for Face Verification
+<br/>David Wipf
+</td></tr><tr><td>4353d0dcaf450743e9eddd2aeedee4d01a1be78b</td><td>Learning Discriminative LBP-Histogram Bins
+<br/>for Facial Expression Recognition
+<br/>Philips Research, High Tech Campus 36, Eindhoven 5656 AE, The Netherlands
+</td></tr><tr><td>437a720c6f6fc1959ba95e48e487eb3767b4e508</td><td></td></tr><tr><td>436d80cc1b52365ed7b2477c0b385b6fbbb51d3b</td><td></td></tr><tr><td>43b8b5eeb4869372ef896ca2d1e6010552cdc4d4</td><td>Large-scale Supervised Hierarchical Feature Learning for Face Recognition
+<br/>Intel Labs China
+</td></tr><tr><td>43ae4867d058453e9abce760ff0f9427789bab3a</td><td>951
+<br/>Graph Embedded Nonparametric Mutual
+<br/>Information For Supervised
+<br/>Dimensionality Reduction
+</td></tr><tr><td>430c4d7ad76e51d83bbd7ec9d3f856043f054915</td><td></td></tr><tr><td>438b88fe40a6f9b5dcf08e64e27b2719940995e0</td><td>Building a Classi(cid:2)cation Cascade for Visual Identi(cid:2)cation from One Example
+<br/>Computer Science, U.C. Berkeley
+<br/>Computer Science, UMass Amherst
+<br/>Computer Science, U.C. Berkeley
+</td></tr><tr><td>43fb9efa79178cb6f481387b7c6e9b0ca3761da8</td><td>Mixture of Parts Revisited: Expressive Part Interactions for Pose Estimation
+<br/>Anoop R Katti
+<br/>IIT Madras
+<br/>Chennai, India
+<br/>IIT Madras
+<br/>Chennai, India
+</td></tr><tr><td>43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a</td><td>Pobrane z czasopisma Annales AI- Informatica http://ai.annales.umcs.pl
+<br/>Data: 04/05/2018 16:53:32
+<br/>U M CS
+</td></tr><tr><td>889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7</td><td>174
+<br/>Using Support Vector Machines to Enhance the
+<br/>Performance of Bayesian Face Recognition
+</td></tr><tr><td>8812aef6bdac056b00525f0642702ecf8d57790b</td><td>A Unified Features Approach to Human Face Image
+<br/>Analysis and Interpretation
+<br/>Department of Informatics,
+<br/>Technische Universit¨at M¨unchen
+<br/>85748 Garching, Germany
+</td></tr><tr><td>881066ec43bcf7476479a4146568414e419da804</td><td>From Traditional to Modern : Domain Adaptation for
+<br/>Action Classification in Short Social Video Clips
+<br/>Center for Visual Information Technology, IIIT Hyderabad, India
+</td></tr><tr><td>8813368c6c14552539137aba2b6f8c55f561b75f</td><td>Trunk-Branch Ensemble Convolutional Neural
+<br/>Networks for Video-based Face Recognition
+</td></tr><tr><td>883006c0f76cf348a5f8339bfcb649a3e46e2690</td><td>Weakly Supervised Pain Localization using Multiple Instance Learning
+</td></tr><tr><td>88f2952535df5859c8f60026f08b71976f8e19ec</td><td>A neural network framework for face
+<br/>recognition by elastic bunch graph matching
+</td></tr><tr><td>8818b12aa0ff3bf0b20f9caa250395cbea0e8769</td><td>Fashion Conversation Data on Instagram
+<br/>∗Graduate School of Culture Technology, KAIST, South Korea
+<br/>†Department of Communication Studies, UCLA, USA
+</td></tr><tr><td>8878871ec2763f912102eeaff4b5a2febfc22fbe</td><td>3781
+<br/>Human Action Recognition in Unconstrained
+<br/>Videos by Explicit Motion Modeling
+</td></tr><tr><td>8855d6161d7e5b35f6c59e15b94db9fa5bbf2912</td><td>COGNITION IN PREGNANCY AND THE POSTPARTUM PERIOD
+</td></tr><tr><td>88bee9733e96958444dc9e6bef191baba4fa6efa</td><td>Extending Face Identification to
+<br/>Open-Set Face Recognition
+<br/>Department of Computer Science
+<br/>Universidade Federal de Minas Gerais
+<br/>Belo Horizonte, Brazil
+</td></tr><tr><td>88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2697
+<br/>ICASSP 2016
+</td></tr><tr><td>9fa1be81d31fba07a1bde0275b9d35c528f4d0b8</td><td>Identifying Persons by Pictorial and
+<br/>Contextual Cues
+<br/>Nicholas Leonard Pi¨el
+<br/>Thesis submitted for the degree of Master of Science
+<br/>Supervisor:
+<br/>April 2009
+</td></tr><tr><td>9f094341bea610a10346f072bf865cb550a1f1c1</td><td>Recognition and Volume Estimation of Food Intake using a Mobile Device
+<br/>Sarnoff Corporation
+<br/>201 Washington Rd,
+<br/>Princeton, NJ, 08540
+</td></tr><tr><td>6b333b2c6311e36c2bde920ab5813f8cfcf2b67b</td><td></td></tr><tr><td>6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9</td><td></td></tr><tr><td>6b089627a4ea24bff193611e68390d1a4c3b3644</td><td>CROSS-POLLINATION OF NORMALISATION
+<br/>TECHNIQUES FROM SPEAKER TO FACE
+<br/>AUTHENTICATION USING GAUSSIAN
+<br/>MIXTURE MODELS
+<br/>Idiap-RR-03-2012
+<br/>JANUARY 2012
+<br/>Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+</td></tr><tr><td>6be0ab66c31023762e26d309a4a9d0096f72a7f0</td><td>Enhance Visual Recognition under Adverse
+<br/>Conditions via Deep Networks
+</td></tr><tr><td>6b18628cc8829c3bf851ea3ee3bcff8543391819</td><td>Face recognition based on subset selection via metric learning on manifold.
+<br/>1058. [doi:10.1631/FITEE.1500085]
+<br/>Face recognition based on subset
+<br/>selection via metric learning on manifold
+<br/>Key words: Face recognition, Sparse representation, Manifold structure,
+<br/>Metric learning, Subset selection
+<br/> ORCID: http://orcid.org/0000-0001-7441-4749
+<br/>Front Inform Technol & Electron Eng </td></tr><tr><td>6b1b43d58faed7b457b1d4e8c16f5f7e7d819239</td><td></td></tr><tr><td>6b35b15ceba2f26cf949f23347ec95bbbf7bed64</td><td></td></tr><tr><td>6b6493551017819a3d1f12bbf922a8a8c8cc2a03</td><td>Pose Normalization for Local Appearance-Based
+<br/>Face Recognition
+<br/>Computer Science Department, Universit¨at Karlsruhe (TH)
+<br/>Am Fasanengarten 5, Karlsruhe 76131, Germany
+<br/>http://isl.ira.uka.de/cvhci
+</td></tr><tr><td>6bb630dfa797168e6627d972560c3d438f71ea99</td><td></td></tr><tr><td>0728f788107122d76dfafa4fb0c45c20dcf523ca</td><td>The Best of Both Worlds: Combining Data-independent and Data-driven
+<br/>Approaches for Action Recognition
+</td></tr><tr><td>071099a4c3eed464388c8d1bff7b0538c7322422</td><td>FACIAL EXPRESSION RECOGNITION IN THE WILD USING RICH DEEP FEATURES
+<br/>Microsoft Advanced Technology labs, Microsoft Technology and Research, Cairo, Egypt
+<br/>
+</td></tr><tr><td>071af21377cc76d5c05100a745fb13cb2e40500f</td><td></td></tr><tr><td>0754e769eb613fd3968b6e267a301728f52358be</td><td>Towards a Watson That Sees: Language-Guided Action Recognition for
+<br/>Robots
+</td></tr><tr><td>0717b47ab84b848de37dbefd81cf8bf512b544ac</td><td>International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+<br/>International Conference on Humming Bird ( 01st March 2014)
+<br/>RESEARCH ARTICLE
+<br/> OPEN ACCESS
+<br/>Robust Face Recognition and Tagging in Visual Surveillance
+<br/>System
+</td></tr><tr><td>0750a816858b601c0dbf4cfb68066ae7e788f05d</td><td>CosFace: Large Margin Cosine Loss for Deep Face Recognition
+<br/>Tencent AI Lab
+</td></tr><tr><td>0716e1ad868f5f446b1c367721418ffadfcf0519</td><td>Interactively Guiding Semi-Supervised
+<br/>Clustering via Attribute-Based Explanations
+<br/>Virginia Tech, Blacksburg, VA, USA
+</td></tr><tr><td>073eaa49ccde15b62425cda1d9feab0fea03a842</td><td></td></tr><tr><td>0726a45eb129eed88915aa5a86df2af16a09bcc1</td><td>Introspective Perception: Learning to Predict Failures in Vision Systems
+</td></tr><tr><td>38d56ddcea01ce99902dd75ad162213cbe4eaab7</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2648
+</td></tr><tr><td>389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26</td><td>FACIAL PARAMETER EXTRACTION SYSTEM BASED ON ACTIVE CONTOURS
+<br/>Universitat Politècnica de Catalunya, Barcelona, Spain
+</td></tr><tr><td>380dd0ddd5d69adc52defc095570d1c22952f5cc</td><td></td></tr><tr><td>38679355d4cfea3a791005f211aa16e76b2eaa8d</td><td>Title
+<br/>Evolutionary cross-domain discriminative Hessian Eigenmaps
+<br/>Author(s)
+<br/>Si, S; Tao, D; Chan, KP
+<br/>Citation
+<br/>1086
+<br/>Issued Date
+<br/>2010
+<br/>URL
+<br/>http://hdl.handle.net/10722/127357
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.; ©2010
+<br/>IEEE. Personal use of this material is permitted. However,
+<br/>permission to reprint/republish this material for advertising or
+<br/>promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any
+<br/>copyrighted component of this work in other works must be
+<br/>obtained from the IEEE.
+</td></tr><tr><td>38682c7b19831e5d4f58e9bce9716f9c2c29c4e7</td><td>International Journal of Computer Trends and Technology (IJCTT) – Volume 18 Number 5 – Dec 2014
+<br/>Movie Character Identification Using Graph Matching
+<br/>Algorithm
+<br/>M.Tech Scholar, Dept of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India.
+<br/>Associate Professor, Department of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India
+</td></tr><tr><td>3803b91e784922a2dacd6a18f61b3100629df932</td><td>Temporal Multimodal Fusion
+<br/>for Video Emotion Classification in the Wild
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Orange Labs
+<br/>Cesson-Sévigné, France
+<br/>Normandie Univ., UNICAEN,
+<br/>ENSICAEN, CNRS
+<br/>Caen, France
+</td></tr><tr><td>38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Multi-distance Support Matrix Machine
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>385750bcf95036c808d63db0e0b14768463ff4c6</td><td></td></tr><tr><td>384f972c81c52fe36849600728865ea50a0c4670</td><td>1
+<br/>Multi-Fold Gabor, PCA and ICA Filter
+<br/>Convolution Descriptor for Face Recognition
+<br/>
+</td></tr><tr><td>380d5138cadccc9b5b91c707ba0a9220b0f39271</td><td>Deep Imbalanced Learning for Face Recognition
+<br/>and Attribute Prediction
+</td></tr><tr><td>38861d0d3a0292c1f54153b303b0d791cbba1d50</td><td></td></tr><tr><td>38192a0f9261d9727b119e294a65f2e25f72d7e6</td><td></td></tr><tr><td>00fb2836068042c19b5197d0999e8e93b920eb9c</td><td></td></tr><tr><td>0077cd8f97cafd2b389783858a6e4ab7887b0b6b</td><td>MAI et al.: ON THE RECONSTRUCTION OF DEEP FACE TEMPLATES
+<br/>On the Reconstruction of Deep Face Templates
+</td></tr><tr><td>00214fe1319113e6649435cae386019235474789</td><td>Bachelorarbeit im Fach Informatik
+<br/>Face Recognition using
+<br/>Distortion Models
+<br/>Mathematik, Informatik und Naturwissenschaften der
+<br/>RHEINISCH-WESTFÄLISCHEN TECHNISCHEN HOCHSCHULE AACHEN
+<br/>Der Fakultät für
+<br/>Lehrstuhl für Informatik VI
+<br/>Prof. Dr.-Ing. H. Ney
+<br/>vorgelegt von:
+<br/>Matrikelnummer 252400
+<br/>Gutachter:
+<br/>Prof. Dr.-Ing. H. Ney
+<br/>Prof. Dr. B. Leibe
+<br/>Betreuer:
+<br/>September 2009
+</td></tr><tr><td>0004f72a00096fa410b179ad12aa3a0d10fc853c</td><td></td></tr><tr><td>00f0ed04defec19b4843b5b16557d8d0ccc5bb42</td><td></td></tr><tr><td>0037bff7be6d463785d4e5b2671da664cd7ef746</td><td>Author manuscript, published in "European Conference on Computer Vision (ECCV '10) 6311 (2010) 634--647"
+<br/> DOI : 10.1007/978-3-642-15549-9_46
+</td></tr><tr><td>00d9d88bb1bdca35663946a76d807fff3dc1c15f</td><td>Subjects and Their Objects: Localizing Interactees for a
+<br/>Person-Centric View of Importance
+</td></tr><tr><td>00a967cb2d18e1394226ad37930524a31351f6cf</td><td>Fully-adaptive Feature Sharing in Multi-Task Networks with Applications in
+<br/>Person Attribute Classification
+<br/>UC San Diego
+<br/>IBM Research
+<br/>IBM Research
+<br/>Binghamton Univeristy, SUNY
+<br/>UC San Diego
+<br/>Rogerio Feris
+<br/>IBM Research
+</td></tr><tr><td>00a3cfe3ce35a7ffb8214f6db15366f4e79761e3</td><td>Kinect for real-time emotion recognition via facial expressions. Frontiers of
+<br/>Information Technology & Electronic Engineering, 16(4):272-282.
+<br/>[doi:10.1631/FITEE.1400209]
+<br/>Using Kinect for real-time emotion
+<br/>recognition via facial expressions
+<br/>Key words: Kinect, Emotion recognition, Facial expression, Real-time
+<br/>classification, Fusion algorithm, Support vector machine (SVM)
+<br/> ORCID: http://orcid.org/0000-0002-5021-9057
+<br/>Front Inform Technol & Electron Eng </td></tr><tr><td>004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4</td><td>Balanced k-Means and Min-Cut Clustering
+</td></tr><tr><td>00d94b35ffd6cabfb70b9a1d220b6823ae9154ee</td><td>Discriminative Bayesian Dictionary Learning
+<br/>for Classification
+</td></tr><tr><td>006f283a50d325840433f4cf6d15876d475bba77</td><td>756
+<br/>Preserving Structure in Model-Free Tracking
+</td></tr><tr><td>0059b3dfc7056f26de1eabaafd1ad542e34c2c2e</td><td></td></tr><tr><td>6e198f6cc4199e1c4173944e3df6f39a302cf787</td><td>MORPH-II: Inconsistencies and Cleaning Whitepaper
+<br/>NSF-REU Site at UNC Wilmington, Summer 2017
+</td></tr><tr><td>6eaf446dec00536858548fe7cc66025b70ce20eb</td><td></td></tr><tr><td>6e91be2ad74cf7c5969314b2327b513532b1be09</td><td>Dimensionality Reduction with Subspace Structure
+<br/>Preservation
+<br/>Department of Computer Science
+<br/>SUNY Buffalo
+<br/>Buffalo, NY 14260
+</td></tr><tr><td>6eba25166fe461dc388805cc2452d49f5d1cdadd</td><td>Pages 122.1-122.12
+<br/>DOI: https://dx.doi.org/10.5244/C.30.122
+</td></tr><tr><td>6e8a81d452a91f5231443ac83e4c0a0db4579974</td><td>Illumination robust face representation based on intrinsic geometrical
+<br/>information
+<br/>Soyel, H; Ozmen, B; McOwan, PW
+<br/>This is a pre-copyedited, author-produced PDF of an article accepted for publication in IET
+<br/>Conference on Image Processing (IPR 2012). The version of record is available
+<br/>http://ieeexplore.ieee.org/document/6290632/?arnumber=6290632&tag=1
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/16147
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td></tr><tr><td>6ecd4025b7b5f4894c990614a9a65e3a1ac347b2</td><td>International Journal on Recent and Innovation Trends in Computing and Communication
+<br/>
+<br/> ISSN: 2321-8169
+<br/>Volume: 2 Issue: 5
+<br/> 1275– 1281
+<br/>_______________________________________________________________________________________________
+<br/>Automatic Naming of Character using Video Streaming for Face
+<br/>Recognition with Graph Matching
+<br/>Nivedita.R.Pandey
+<br/>Ranjan.P.Dahake
+<br/>PG Student at MET’s IOE Bhujbal Knowledge City,
+<br/>PG Student at MET’s IOE Bhujbal Knowledge City,
+<br/>Nasik, Maharashtra, India,
+<br/>Nasik, Maharashtra, India,
+</td></tr><tr><td>6eaeac9ae2a1697fa0aa8e394edc64f32762f578</td><td></td></tr><tr><td>6ee2ea416382d659a0dddc7a88fc093accc2f8ee</td><td></td></tr><tr><td>6e3a181bf388dd503c83dc324561701b19d37df1</td><td>Finding a low-rank basis in a matrix subspace
+<br/>Andr´e Uschmajew
+</td></tr><tr><td>6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f</td><td>Deep Episodic Memory: Encoding, Recalling, and Predicting
+<br/>Episodic Experiences for Robot Action Execution
+</td></tr><tr><td>6e911227e893d0eecb363015754824bf4366bdb7</td><td>Wasserstein Divergence for GANs
+<br/>1 Computer Vision Lab, ETH Zurich, Switzerland
+<br/>2 VISICS, KU Leuven, Belgium
+</td></tr><tr><td>6ee8a94ccba10062172e5b31ee097c846821a822</td><td>Submitted 3/13; Revised 10/13; Published 12/13
+<br/>How to Solve Classification and Regression Problems on
+<br/>High-Dimensional Data with a Supervised
+<br/>Extension of Slow Feature Analysis
+<br/>Institut f¨ur Neuroinformatik
+<br/>Ruhr-Universit¨at Bochum
+<br/>Bochum D-44801, Germany
+<br/>Editor: David Dunson
+</td></tr><tr><td>6e379f2d34e14efd85ae51875a4fa7d7ae63a662</td><td>A NEW MULTI-MODAL BIOMETRIC SYSTEM
+<br/>BASED ON FINGERPRINT AND FINGER
+<br/>VEIN RECOGNITION
+<br/>Master's Thesis
+<br/>Department of Software Engineering
+<br/>JULY-2014
+<br/>I
+</td></tr><tr><td>6e1802874ead801a7e1072aa870681aa2f555f35</td><td>1­4244­0728­1/07/$20.00 ©2007 IEEE
+<br/>I ­ 629
+<br/>ICASSP 2007
+</td></tr><tr><td>6ed22b934e382c6f72402747d51aa50994cfd97b</td><td>Customized Expression Recognition for Performance-Driven
+<br/>Cutout Character Animation
+<br/>†NEC Laboratories America
+<br/>‡Snapchat
+</td></tr><tr><td>6e93fd7400585f5df57b5343699cb7cda20cfcc2</td><td>http://journalofvision.org/9/2/22/
+<br/>Comparing a novel model based on the transferable
+<br/>belief model with humans during the recognition of
+<br/>partially occluded facial expressions
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Département de Psychologie, Université de Montréal,
+<br/>Canada
+<br/>Humans recognize basic facial expressions effortlessly. Yet, despite a considerable amount of research, this task remains
+<br/>elusive for computer vision systems. Here, we compared the behavior of one of the best computer models of facial
+<br/>expression recognition (Z. Hammal, L. Couvreur, A. Caplier, & M. Rombaut, 2007) with the behavior of human observers
+<br/>during the M. Smith, G. Cottrell, F. Gosselin, and P. G. Schyns (2005) facial expression recognition task performed on
+<br/>stimuli randomly sampled using Gaussian apertures. The modelVwhich we had to significantly modify in order to give the
+<br/>ability to deal with partially occluded stimuliVclassifies the six basic facial expressions (Happiness, Fear, Sadness,
+<br/>Surprise, Anger, and Disgust) plus Neutral from static images based on the permanent facial feature deformations and the
+<br/>Transferable Belief Model (TBM). Three simulations demonstrated the suitability of the TBM-based model to deal with
+<br/>partially occluded facial parts and revealed the differences between the facial information used by humans and by the
+<br/>model. This opens promising perspectives for the future development of the model.
+<br/>Keywords: facial features behavior, facial expressions classification, Transferable Belief Model, Bubbles
+<br/>Citation: Hammal, Z., Arguin, M., & Gosselin, F. (2009). Comparing a novel model based on the transferable belief
+<br/>http://journalofvision.org/9/2/22/, doi:10.1167/9.2.22.
+<br/>Introduction
+<br/>Facial expressions communicate information from
+<br/>which we can quickly infer the state of mind of our peers
+<br/>and adjust our behavior accordingly (Darwin, 1872). To
+<br/>illustrate, take a person like patient SM with complete
+<br/>bilateral damage to the amygdala nuclei that prevents her
+<br/>from recognizing facial expressions of fear. SM would be
+<br/>incapable of interpreting the fearful expression on the face
+<br/>of a bystander, who has encountered a furious Grizzly
+<br/>bear, as a sign of potential
+<br/>threat (Adolphs, Tranel,
+<br/>Damasio, & Damasio, 1994).
+<br/>Facial expressions are typically arranged into six
+<br/>universally recognized basic categories Happiness, Sur-
+<br/>prise, Disgust, Anger, Sadness, and Fear that are similarly
+<br/>expressed across different backgrounds and cultures
+<br/>(Cohn, 2006; Ekman, 1999; Izard, 1971, 1994). Facial
+<br/>expressions result
+<br/>from the precisely choreographed
+<br/>deformation of facial features, which are often described
+<br/>using the 46 Action Units (AUs; Ekman & Friesen,
+<br/>1978).
+<br/>Facial expression recognition and computer
+<br/>vision
+<br/>The study of human facial expressions has an impact in
+<br/>several areas of life such as art, social interaction, cognitive
+<br/>science, medicine, security, affective computing, and
+<br/>human-computer interaction (HCI). An automatic facial
+<br/>expressions classification system may contribute signifi-
+<br/>cantly to the development of all these disciplines. However,
+<br/>the development of such a system constitutes a significant
+<br/>challenge because of the many constraints that are imposed
+<br/>by its application in a real-world context (Pantic & Bartlett,
+<br/>2007; Pantic & Patras, 2006). In particular, such systems
+<br/>need to provide great accuracy and robustness without
+<br/>demanding too many interventions from the user.
+<br/>There have been major advances in computer vision
+<br/>over the past 15 years for the recognition of the six basic
+<br/>facial expressions (for reviews, see Fasel & Luettin, 2003;
+<br/>Pantic & Rothkrantz, 2000b). The main approaches can be
+<br/>divided in two classes: Model-based and fiducial points
+<br/>approaches. The model-based approach requires the
+<br/>design of a deterministic physical model that can represent
+<br/>doi: 10.1167/9.2.22
+<br/>Received January 28, 2008; published February 26, 2009
+<br/>ISSN 1534-7362 * ARVO
+</td></tr><tr><td>6e12ba518816cbc2d987200c461dc907fd19f533</td><td></td></tr><tr><td>9ab463d117219ed51f602ff0ddbd3414217e3166</td><td>Weighted Transmedia
+<br/>Relevance Feedback for
+<br/>Image Retrieval and
+<br/>Auto-annotation
+<br/>TECHNICAL
+<br/>REPORT
+<br/>N° 0415
+<br/>December 2011
+<br/>Project-Teams LEAR - INRIA
+<br/>and TVPA - XRCE
+</td></tr><tr><td>9ac82909d76b4c902e5dde5838130de6ce838c16</td><td>Recognizing Facial Expressions Automatically
+<br/>from Video
+<br/>1 Introduction
+<br/>Facial expressions, resulting from movements of the facial muscles, are the face
+<br/>changes in response to a person’s internal emotional states, intentions, or social
+<br/>communications. There is a considerable history associated with the study on fa-
+<br/>cial expressions. Darwin (1872) was the first to describe in details the specific fa-
+<br/>cial expressions associated with emotions in animals and humans, who argued that
+<br/>all mammals show emotions reliably in their faces. Since that, facial expression
+<br/>analysis has been a area of great research interest for behavioral scientists (Ekman,
+<br/>Friesen, and Hager, 2002). Psychological studies (Mehrabian, 1968; Ambady and
+<br/>Rosenthal, 1992) suggest that facial expressions, as the main mode for non-verbal
+<br/>communication, play a vital role in human face-to-face communication. For illus-
+<br/>tration, we show some examples of facial expressions in Fig. 1.
+<br/>Computer recognition of facial expressions has many important applications in
+<br/>intelligent human-computer interaction, computer animation, surveillance and se-
+<br/>curity, medical diagnosis, law enforcement, and awareness systems (Shan, 2007).
+<br/>Therefore, it has been an active research topic in multiple disciplines such as psy-
+<br/>chology, cognitive science, human-computer interaction, and pattern recognition.
+<br/>Meanwhile, as a promising unobtrusive solution, automatic facial expression analy-
+<br/>sis from video or images has received much attention in last two decades (Pantic and
+<br/>Rothkrantz, 2000a; Fasel and Luettin, 2003; Tian, Kanade, and Cohn, 2005; Pantic
+<br/>and Bartlett, 2007).
+<br/>This chapter introduces recent advances in computer recognition of facial expres-
+<br/>sions. Firstly, we describe the problem space, which includes multiple dimensions:
+<br/>level of description, static versus dynamic expression, facial feature extraction and
+</td></tr><tr><td>9ac15845defcd0d6b611ecd609c740d41f0c341d</td><td>Copyright
+<br/>by
+<br/>2011
+</td></tr><tr><td>9af1cf562377b307580ca214ecd2c556e20df000</td><td>Feb. 28
+<br/> International Journal of Advanced Studies in Computer Science and Engineering
+<br/>IJASCSE, Volume 4, Issue 2, 2015
+<br/> Video-Based Facial Expression Recognition
+<br/>Using Local Directional Binary Pattern
+<br/>Electrical Engineering Dept., AmirKabir Univarsity of Technology
+<br/>Tehran, Iran
+</td></tr><tr><td>9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb</td><td>High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs
+<br/>1NVIDIA Corporation
+<br/>2UC Berkeley
+<br/>Figure 1: We propose a generative adversarial framework for synthesizing 2048 × 1024 images from semantic label maps
+<br/>(lower left corner in (a)). Compared to previous work [5], our results express more natural textures and details. (b) We can
+<br/>change labels in the original label map to create new scenes, like replacing trees with buildings. (c) Our framework also
+<br/>allows a user to edit the appearance of individual objects in the scene, e.g. changing the color of a car or the texture of a road.
+<br/>Please visit our website for more side-by-side comparisons as well as interactive editing demos.
+</td></tr><tr><td>9a7858eda9b40b16002c6003b6db19828f94a6c6</td><td>MOONEY FACE CLASSIFICATION AND PREDICTION BY LEARNING ACROSS TONE
+<br/>(cid:63) UC Berkeley / †ICSI
+</td></tr><tr><td>9a276c72acdb83660557489114a494b86a39f6ff</td><td>Emotion Classification through Lower Facial Expressions using Adaptive
+<br/>Support Vector Machines
+<br/>Department of Information Technology, Faculty of Industrial Technology and Management,
+</td></tr><tr><td>9a42c519f0aaa68debbe9df00b090ca446d25bc4</td><td>Face Recognition via Centralized Coordinate
+<br/>Learning
+</td></tr><tr><td>9aad8e52aff12bd822f0011e6ef85dfc22fe8466</td><td>Temporal-Spatial Mapping for Action Recognition
+</td></tr><tr><td>36b40c75a3e53c633c4afb5a9309d10e12c292c7</td><td></td></tr><tr><td>3646b42511a6a0df5470408bc9a7a69bb3c5d742</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Applications of Computers and Electronics for the Welfare of Rural Masses (ACEWRM) 2015
+<br/>Detection of Facial Parts based on ABLATA
+<br/>Technical Campus, Bhilai
+<br/>Vikas Singh
+<br/>Technical Campus, Bhilai
+<br/>Abha Choubey
+<br/>Technical Campus, Bhilai
+</td></tr><tr><td>365f67fe670bf55dc9ccdcd6888115264b2a2c56</td><td></td></tr><tr><td>36fe39ed69a5c7ff9650fd5f4fe950b5880760b0</td><td>Tracking von Gesichtsmimik
+<br/>mit Hilfe von Gitterstrukturen
+<br/>zur Klassifikation von schmerzrelevanten Action
+<br/>Units
+<br/>1Fraunhofer-Institut f¨ur Integrierte Schaltungen IIS, Erlangen,
+<br/>2Otto-Friedrich-Universit¨at Bamberg, 3Universit¨atsklinkum Erlangen
+<br/>Kurzfassung. In der Schmerzforschung werden schmerzrelevante Mi-
+<br/>mikbewegungen von Probanden mittels des Facial Action Coding System
+<br/>klassifiziert. Die manuelle Klassifikation hierbei ist aufw¨andig und eine
+<br/>automatische (Vor-)klassifikation k¨onnte den diagnostischen Wert dieser
+<br/>Analysen erh¨ohen sowie den klinischen Workflow unterst¨utzen. Der hier
+<br/>vorgestellte regelbasierte Ansatz erm¨oglicht eine automatische Klassifika-
+<br/>tion ohne große Trainingsmengen vorklassifizierter Daten. Das Verfahren
+<br/>erkennt und verfolgt Mimikbewegungen, unterst¨utzt durch ein Gitter,
+<br/>und ordnet diese Bewegungen bestimmten Gesichtsarealen zu. Mit die-
+<br/>sem Wissen kann aus den Bewegungen auf die zugeh¨origen Action Units
+<br/>geschlossen werden.
+<br/>1 Einleitung
+<br/>Menschliche Empfindungen wie Emotionen oder Schmerz l¨osen spezifische Mu-
+<br/>ster von Kontraktionen der Gesichtsmuskulatur aus, die Grundlage dessen sind,
+<br/>was wir Mimik nennen. Aus der Beobachtung der Mimik kann wiederum auf
+<br/>menschliche Empfindungen r¨uckgeschlossen werden. Im Rahmen der Schmerz-
+<br/>forschung werden Videoaufnahmen von Probanden hinsichtlich des mimischen
+<br/>Schmerzausdrucks analysiert. Zur Beschreibung des mimischen Ausdrucks und
+<br/>dessen Ver¨anderungen wird das Facial Action Coding System (FACS) [1] verwen-
+<br/>det, das anatomisch begr¨undet, kleinste sichtbare Muskelbewegungen im Gesicht
+<br/>beschreibt und als einzelne Action Units (AUs) kategorisiert. Eine Vielzahl von
+<br/>Untersuchungen hat gezeigt, dass spezifische Muster von Action Units auftre-
+<br/>ten, wenn Probanden Schmerzen angeben [2]. Die manuelle Klassifikation und
+<br/>Markierung der Action Units von Probanden in Videosequenzen bedarf einer
+<br/>langwierigen Beobachtung durch ausgebildete FACS-Coder. Eine automatische
+<br/>(Vor-)klassifikation kann hierbei den klinischen Workflow unterst¨utzen und dieses
+<br/>Verfahren zum brauchbaren diagnostischen Instrument machen. Bisher realisier-
+<br/>te Ans¨atze zum Erkennen von Gesichtsausdr¨ucken basieren auf der Klassifikation
+</td></tr><tr><td>36ce0b68a01b4c96af6ad8c26e55e5a30446f360</td><td>Multimed Tools Appl
+<br/>DOI 10.1007/s11042-014-2322-6
+<br/>Facial expression recognition based on a mlp neural
+<br/>network using constructive training algorithm
+<br/>Received: 5 February 2014 / Revised: 22 August 2014 / Accepted: 13 October 2014
+<br/>© Springer Science+Business Media New York 2014
+</td></tr><tr><td>3674f3597bbca3ce05e4423611d871d09882043b</td><td>ISSN 1796-2048
+<br/>Volume 7, Number 4, August 2012
+<br/>Contents
+<br/>Special Issue: Multimedia Contents Security in Social Networks Applications
+<br/>Guest Editors: Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>Guest Editorial
+<br/>Zhiyong Zhang and Muthucumaru Maheswaran
+<br/>SPECIAL ISSUE PAPERS
+<br/>DRTEMBB: Dynamic Recommendation Trust Evaluation Model Based on Bidding
+<br/>Gang Wang and Xiao-lin Gui
+<br/>Block-Based Parallel Intra Prediction Scheme for HEVC
+<br/>Jie Jiang, Baolong, Wei Mo, and Kefeng Fan
+<br/>Optimized LSB Matching Steganography Based on Fisher Information
+<br/>Yi-feng Sun, Dan-mei Niu, Guang-ming Tang, and Zhan-zhan Gao
+<br/>A Novel Robust Zero-Watermarking Scheme Based on Discrete Wavelet Transform
+<br/>Yu Yang, Min Lei, Huaqun Liu, Yajian Zhou, and Qun Luo
+<br/>Stego Key Estimation in LSB Steganography
+<br/>Jing Liu and Guangming Tang
+<br/>REGULAR PAPERS
+<br/>Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions
+<br/>277
+<br/>279
+<br/>289
+<br/>295
+<br/>303
+<br/>309
+<br/>314
+</td></tr><tr><td>362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2792
+<br/>ICASSP 2016
+</td></tr><tr><td>366d20f8fd25b4fe4f7dc95068abc6c6cabe1194</td><td></td></tr><tr><td>3630324c2af04fd90f8668f9ee9709604fe980fd</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2016.2607345, IEEE
+<br/>Transactions on Circuits and Systems for Video Technology
+<br/>Image Classification with Tailored Fine-Grained
+<br/>Dictionaries
+</td></tr><tr><td>362ba8317aba71c78dafca023be60fb71320381d</td><td></td></tr><tr><td>36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958</td><td>RGB-D Face Recognition with Texture and
+<br/>Attribute Features
+<br/>Member, IEEE
+</td></tr><tr><td>36018404263b9bb44d1fddaddd9ee9af9d46e560</td><td>OCCLUDED FACE RECOGNITION BY USING GABOR
+<br/>FEATURES
+<br/>1 Department of Electrical And Electronics Engineering, METU, Ankara, Turkey
+<br/>2 7h%ł7$.(cid:3)%ł/7(1(cid:15)(cid:3)$QNDUD(cid:15)(cid:3)7XUNH\
+</td></tr><tr><td>5c4ce36063dd3496a5926afd301e562899ff53ea</td><td></td></tr><tr><td>5c2a7518fb26a37139cebff76753d83e4da25159</td><td></td></tr><tr><td>5c2e264d6ac253693469bd190f323622c457ca05</td><td>978-1-4799-2341-0/13/$31.00 ©2013 IEEE
+<br/>4367
+<br/>ICIP 2013
+</td></tr><tr><td>5c473cfda1d7c384724fbb139dfe8cb39f79f626</td><td></td></tr><tr><td>5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0</td><td>2132
+<br/>Reference Face Graph for Face Recognition
+</td></tr><tr><td>5c35ac04260e281141b3aaa7bbb147032c887f0c</td><td>Face Detection and Tracking Control with Omni Car
+<br/>CS 231A Final Report
+<br/>June 31, 2016
+</td></tr><tr><td>5c717afc5a9a8ccb1767d87b79851de8d3016294</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1845
+<br/>ICASSP 2012
+</td></tr><tr><td>0952ac6ce94c98049d518d29c18d136b1f04b0c0</td><td></td></tr><tr><td>09137e3c267a3414314d1e7e4b0e3a4cae801f45</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Two Birds with One Stone: Transforming and Generating
+<br/>Facial Images with Iterative GAN
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>09926ed62511c340f4540b5bc53cf2480e8063f8</td><td>Action Tubelet Detector for Spatio-Temporal Action Localization
+</td></tr><tr><td>09718bf335b926907ded5cb4c94784fd20e5ccd8</td><td>875
+<br/>Recognizing Partially Occluded, Expression Variant
+<br/>Faces From Single Training Image per Person
+<br/>With SOM and Soft k-NN Ensemble
+</td></tr><tr><td>0903bb001c263e3c9a40f430116d1e629eaa616f</td><td>CVPR
+<br/>#987
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>An Empirical Study of Context in Object Detection
+<br/>Anonymous CVPR submission
+<br/>Paper ID 987
+</td></tr><tr><td>09df62fd17d3d833ea6b5a52a232fc052d4da3f5</td><td>ISSN: 1405-5546
+<br/>Instituto Politécnico Nacional
+<br/>México
+<br/>
+<br/>Rivas Araiza, Edgar A.; Mendiola Santibañez, Jorge D.; Herrera Ruiz, Gilberto; González Gutiérrez,
+<br/>Carlos A.; Trejo Perea, Mario; Ríos Moreno, G. J.
+<br/>Mejora de Contraste y Compensación en Cambios de la Iluminación
+<br/>Instituto Politécnico Nacional
+<br/>Distrito Federal, México
+<br/>Disponible en: http://www.redalyc.org/articulo.oa?id=61509703
+<br/> Cómo citar el artículo
+<br/> Número completo
+<br/> Más información del artículo
+<br/> Página de la revista en redalyc.org
+<br/>Sistema de Información Científica
+<br/>Red de Revistas Científicas de América Latina, el Caribe, España y Portugal
+<br/>Proyecto académico sin fines de lucro, desarrollado bajo la iniciativa de acceso abierto
+</td></tr><tr><td>097104fc731a15fad07479f4f2c4be2e071054a2</td><td></td></tr><tr><td>09f853ce12f7361c4b50c494df7ce3b9fad1d221</td><td>myjournal manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Random forests for real time 3D face analysis
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>09111da0aedb231c8484601444296c50ca0b5388</td><td></td></tr><tr><td>09750c9bbb074bbc4eb66586b20822d1812cdb20</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1385
+<br/>ICASSP 2012
+</td></tr><tr><td>097f674aa9e91135151c480734dda54af5bc4240</td><td>Proc. VIIth Digital Image Computing: Techniques and Applications, Sun C., Talbot H., Ourselin S. and Adriaansen T. (Eds.), 10-12 Dec. 2003, Sydney
+<br/>Face Recognition Based on Multiple Region Features
+<br/>CSIRO Telecommunications & Industrial Physics
+<br/>Australia
+<br/>Tel: 612 9372 4104, Fax: 612 9372 4411, Email:
+</td></tr><tr><td>5d485501f9c2030ab33f97972aa7585d3a0d59a7</td><td></td></tr><tr><td>5de5848dc3fc35e40420ffec70a407e4770e3a8d</td><td>WebVision Database: Visual Learning and Understanding from Web Data
+<br/>1 Computer Vision Laboratory, ETH Zurich
+<br/>2 Google Switzerland
+</td></tr><tr><td>5da139fc43216c86d779938d1c219b950dd82a4c</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>II - 205
+<br/>ICIP 2007
+</td></tr><tr><td>5dc056fe911a3e34a932513abe637076250d96da</td><td></td></tr><tr><td>5d233e6f23b1c306cf62af49ce66faac2078f967</td><td>RESEARCH ARTICLE
+<br/>Optimal Geometrical Set for Automated
+<br/>Marker Placement to Virtualized Real-Time
+<br/>Facial Emotions
+<br/>School of Mechatronic Engineering, Universiti Malaysia Perlis, 02600, Ulu Pauh, Arau, Perlis, West Malaysia
+</td></tr><tr><td>5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf</td><td>Robust Registration of Dynamic Facial Sequences
+</td></tr><tr><td>5dcf78de4d3d867d0fd4a3105f0defae2234b9cb</td><td></td></tr><tr><td>5db4fe0ce9e9227042144758cf6c4c2de2042435</td><td>INTERNATIONAL JOURNAL OF ELECTRICAL AND ELECTRONIC SYSTEMS RESEARCH, VOL.3, JUNE 2010
+<br/>Recognition of Facial Expression Using Haar
+<br/>Wavelet Transform
+<br/>for
+<br/>paper
+<br/>features
+<br/>investigates
+<br/>
+</td></tr><tr><td>5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e</td><td>Face Recognition Algorithms
+<br/>June 16, 2010
+<br/>Ion Marqu´es
+<br/>Supervisor:
+<br/>Manuel Gra˜na
+</td></tr><tr><td>5d09d5257139b563bd3149cfd5e6f9eae3c34776</td><td>Optics Communications 338 (2015) 77–89
+<br/>Contents lists available at ScienceDirect
+<br/>Optics Communications
+<br/>journal homepage: www.elsevier.com/locate/optcom
+<br/>Pattern recognition with composite correlation filters designed with
+<br/>multi-objective combinatorial optimization
+<br/>a Instituto Politécnico Nacional – CITEDI, Ave. del Parque 1310, Mesade Otay, Tijuana B.C. 22510, México
+<br/>b Department of Computer Science, CICESE, Carretera Ensenada-Tijuana 3918, Ensenada B.C. 22860, México
+<br/>c Instituto Tecnológico de Tijuana, Blvd. Industrial y Ave. ITR TijuanaS/N, Mesa de Otay, Tijuana B.C. 22500, México
+<br/>d National Ignition Facility, Lawrence Livermore National Laboratory, Livermore, CA 94551, USA
+<br/>a r t i c l e i n f o
+<br/>a b s t r a c t
+<br/>Article history:
+<br/>Received 12 July 2014
+<br/>Accepted 16 November 2014
+<br/>Available online 23 October 2014
+<br/>Keywords:
+<br/>Object recognition
+<br/>Composite correlation filters
+<br/>Multi-objective evolutionary algorithm
+<br/>Combinatorial optimization
+<br/>Composite correlation filters are used for solving a wide variety of pattern recognition problems. These
+<br/>filters are given by a combination of several training templates chosen by a designer in an ad hoc manner.
+<br/>In this work, we present a new approach for the design of composite filters based on multi-objective
+<br/>combinatorial optimization. Given a vast search space of training templates, an iterative algorithm is used
+<br/>to synthesize a filter with an optimized performance in terms of several competing criteria. Moreover, by
+<br/>employing a suggested binary-search procedure a filter bank with a minimum number of filters can be
+<br/>constructed, for a prespecified trade-off of performance metrics. Computer simulation results obtained
+<br/>with the proposed method in recognizing geometrically distorted versions of a target in cluttered and
+<br/>noisy scenes are discussed and compared in terms of recognition performance and complexity with
+<br/>existing state-of-the-art filters.
+<br/>& Elsevier B.V. All rights reserved.
+<br/>1.
+<br/>Introduction
+<br/>Nowadays, object recognition receives much research interest
+<br/>due to its high impact in real-life activities, such as robotics, bio-
+<br/>metrics, and target tracking [1,2]. Object recognition consists in
+<br/>solving two essential tasks: detection of a target within an ob-
+<br/>served scene and determination of the exact position of the de-
+<br/>tected object. Different approaches can be utilized to address these
+<br/>tasks, that is feature-based methods [3–6] and template matching
+<br/>algorithms [7,8]. In feature-based methods the observed scene is
+<br/>processed to extract relevant features of potential targets within
+<br/>the scene. Next, the extracted features are processed and analyzed
+<br/>to make decisions. Feature-based methods yield good results in
+<br/>many applications. However, they depend on several subjective
+<br/>decisions which often require optimization [9,10]. On the other
+<br/>hand, correlation filtering is a template matching processing. In
+<br/>this approach, the coordinates of the maximum of the filter output
+<br/>are taken as estimates of the target coordinates in the observed
+<br/>scene. Correlation filters possess a good mathematical basis and
+<br/>they can be implemented by exploiting massive parallelism either
+<br/>in hybrid opto-digital correlators [11,12] or in high-performance
+<br/>n Corresponding author. Tel.: þ52 664 623 1344x82856.
+<br/>http://dx.doi.org/10.1016/j.optcom.2014.10.038
+<br/>0030-4018/& Elsevier B.V. All rights reserved.
+<br/>hardware such as graphics processing units (GPUs) [13] or field
+<br/>programmable gate arrays (FPGAs) [14] at high rate. Additionally,
+<br/>these filters are capable to reliably recognize a target in highly
+<br/>cluttered and noisy environments [8,15,16]. Moreover, they are
+<br/>able to estimate very accurately the position of the target within
+<br/>the scene [17]. Correlation filters are usually designed by a opti-
+<br/>mization of various criteria [18,19]. The filters can be broadly
+<br/>classified in to two main categories: analytical and composite fil-
+<br/>ters. Analytical filters optimize a performance criterion using
+<br/>mathematical models of signals and noise [20,21]. Composite fil-
+<br/>ters are constructed by combination of several training templates,
+<br/>each of them representing an expected target view in the observed
+<br/>scene [22,21]. In practice, composite filters are effective for real-
+<br/>life degradations of targets such as rotations and scaling. Compo-
+<br/>site filters are synthesized by means of a supervised training
+<br/>process. Thus, the performance of the filters highly depends on a
+<br/>proper selection of image templates used for training [20,23].
+<br/>Normally, the training templates are chosen by a designer in an ad
+<br/>hoc manner. Such a subjective procedure is not optimal. In addi-
+<br/>tion, Kumar and Pochavsky [24] showed that the signal to noise
+<br/>ratio of a composite filter gradually reduces when the number of
+<br/>training templates increases. In order to synthesize composite
+<br/>filters with improved performance in terms of several competing
+<br/>metrics, a search and optimization strategy is required to auto-
+<br/>matically choose the set of training templates.
+</td></tr><tr><td>5d01283474b73a46d80745ad0cc0c4da14aae194</td><td></td></tr><tr><td>5d197c8cd34473eb6cde6b65ced1be82a3a1ed14</td><td><b>AFaceImageDatabaseforEvaluatingOut-of-FocusBlurQiHan,QiongLiandXiamuNiuHarbinInstituteofTechnologyChina1.IntroductionFacerecognitionisoneofthemostpopularresearchfieldsofcomputervisionandmachinelearning(Tores(2004);Zhaoetal.(2003)).Alongwithinvestigationoffacerecognitionalgorithmsandsystems,manyfaceimagedatabaseshavebeencollected(Gross(2005)).Facedatabasesareimportantfortheadvancementoftheresearchfield.Becauseofthenonrigidityandcomplex3Dstructureofface,manyfactorsinfluencetheperformanceoffacedetectionandrecognitionalgorithmssuchaspose,expression,age,brightness,contrast,noise,blurandetc.Someearlyfacedatabasesgatheredunderstrictlycontrolledenvironment(Belhumeuretal.(1997);Samaria&Harter(1994);Turk&Pentland(1991))onlyallowslightexpressionvariation.Toinvestigatetherelationshipsbetweenalgorithms’performanceandtheabovefactors,morefacedatabaseswithlargerscaleandvariouscharacterswerebuiltinthepastyears(Bailly-Bailliereetal.(2003);Flynnetal.(2003);Gaoetal.(2008);Georghiadesetal.(2001);Hallinan(1995);Phillipsetal.(2000);Simetal.(2003)).Forinstance,The"CAS-PEAL","FERET","CMUPIE",and"YaleB"databasesincludevariousposes(Gaoetal.(2008);Georghiadesetal.(2001);Phillipsetal.(2000);Simetal.(2003));The"HarvardRL","CMUPIE"and"YaleB"databasesinvolvemorethan40differentconditionsinillumination(Georghiadesetal.(2001);Hallinan(1995);Simetal.(2003));Andthe"BANCA",and"NDHID"databasescontainover10timesgathering(Bailly-Bailliereetal.(2003);Flynnetal.(2003)).Thesedatabaseshelpresearcherstoevaluateandimprovetheiralgorithmsaboutfacedetection,recognition,andotherpurposes.Blurisnotthemostimportantbutstillanotablefactoraffectingtheperformanceofabiometricsystem(Fronthaleretal.(2006);Zamanietal.(2007)).Themainreasonsleadingblurconsistinout-of-focusofcameraandmotionofobject,andtheout-of-focusblurismoresignificantintheapplicationenvironmentoffacerecognition(Eskicioglu&Fisher(1995);Kimetal.(1998);Tanakaetal.(2007);Yitzhaky&Kopeika(1996)).Toinvestigatetheinfluenceofbluronafacerecognitionsystem,afaceimagedatabasewithdifferentconditionsofclarityandefficientblurevaluatingalgorithmsareneeded.Thischapterintroducesanewfacedatabasebuiltforthepurposeofblurevaluation.Theapplicationenvironmentsoffacerecognitionareanalyzedfirstly,thenaimagegatheringschemeisdesigned.Twotypicalgatheringfacilitiesareusedandthefocusstatusaredividedinto11steps.Further,theblurassessmentalgorithmsaresummarizedandthecomparisonbetweenthemisraisedonthevarious-claritydatabase.The7www.intechopen.com</b></td></tr><tr><td>31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a</td><td></td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td></tr><tr><td>318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a</td><td>Sparsity in Dynamics of Spontaneous
+<br/>Subtle Emotions: Analysis & Application
+</td></tr><tr><td>31c0968fb5f587918f1c49bf7fa51453b3e89cf7</td><td>Deep Transfer Learning for Person Re-identification
+</td></tr><tr><td>31e57fa83ac60c03d884774d2b515813493977b9</td><td></td></tr><tr><td>316e67550fbf0ba54f103b5924e6537712f06bee</td><td>Multimodal semi-supervised learning
+<br/>for image classification
+<br/>LEAR team, INRIA Grenoble, France
+</td></tr><tr><td>31ef5419e026ef57ff20de537d82fe3cfa9ee741</td><td>Facial Expression Analysis Based on
+<br/>High Dimensional Binary Features
+<br/>´Ecole Polytechique de Montr´eal, Universit´e de Montr´eal, Montr´eal, Canada
+</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td></td></tr><tr><td>31ace8c9d0e4550a233b904a0e2aabefcc90b0e3</td><td>Learning Deep Face Representation
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+</td></tr><tr><td>312afff739d1e0fcd3410adf78be1c66b3480396</td><td></td></tr><tr><td>31bb49ba7df94b88add9e3c2db72a4a98927bb05</td><td></td></tr><tr><td>91811203c2511e919b047ebc86edad87d985a4fa</td><td>Expression Subspace Projection for Face
+<br/>Recognition from Single Sample per Person
+</td></tr><tr><td>91e57667b6fad7a996b24367119f4b22b6892eca</td><td>Probabilistic Corner Detection for Facial Feature
+<br/>Extraction
+<br/>Article
+<br/>Accepted version
+<br/>E. Ardizzone, M. La Cascia, M. Morana
+<br/>In Lecture Notes in Computer Science Volume 5716, 2009
+<br/>It is advisable to refer to the publisher's version if you intend to cite
+<br/>from the work.
+<br/>Publisher: Springer
+<br/>http://link.springer.com/content/pdf/10.1007%2F978-3-
+<br/>642-04146-4_50.pdf
+</td></tr><tr><td>91883dabc11245e393786d85941fb99a6248c1fb</td><td></td></tr><tr><td>917bea27af1846b649e2bced624e8df1d9b79d6f</td><td>Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for
+<br/>Mobile and Embedded Applications
+<br/>Gyrfalcon Technology Inc.
+<br/>1900 McCarthy Blvd. Milpitas, CA 95035
+</td></tr><tr><td>91b1a59b9e0e7f4db0828bf36654b84ba53b0557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+<br/>
+<br/>Simultaneous Hallucination and Recognition of
+<br/>Low-Resolution Faces Based on Singular Value
+<br/>Decomposition
+<br/>(SVD)
+<br/>for performing both
+</td></tr><tr><td>911bef7465665d8b194b6b0370b2b2389dfda1a1</td><td>RANJAN, ROMERO, BLACK: LEARNING HUMAN OPTICAL FLOW
+<br/>Learning Human Optical Flow
+<br/>1 MPI for Intelligent Systems
+<br/>Tübingen, Germany
+<br/>2 Amazon Inc.
+</td></tr><tr><td>91ead35d1d2ff2ea7cf35d15b14996471404f68d</td><td>Combining and Steganography of 3D Face Textures
+</td></tr><tr><td>919d0e681c4ef687bf0b89fe7c0615221e9a1d30</td><td></td></tr><tr><td>912a6a97af390d009773452814a401e258b77640</td><td></td></tr><tr><td>91d513af1f667f64c9afc55ea1f45b0be7ba08d4</td><td>Automatic Face Image Quality Prediction
+</td></tr><tr><td>918b72a47b7f378bde0ba29c908babf6dab6f833</td><td></td></tr><tr><td>91e58c39608c6eb97b314b0c581ddaf7daac075e</td><td>Pixel-wise Ear Detection with Convolutional
+<br/>Encoder-Decoder Networks
+</td></tr><tr><td>91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0</td><td></td></tr><tr><td>9131c990fad219726eb38384976868b968ee9d9c</td><td>Deep Facial Expression Recognition: A Survey
+</td></tr><tr><td>915d4a0fb523249ecbc88eb62cb150a60cf60fa0</td><td>Comparison of Feature Extraction Techniques in Automatic
+<br/>Face Recognition Systems for Security Applications
+<br/>S . Cruz-Llanas, J. Ortega-Garcia, E. Martinez-Torrico, J. Gonzalez-Rodriguez
+<br/>Dpto. Ingenieria Audiovisual y Comunicaciones, EUIT Telecomunicacion, Univ. PolitCcnica de Madrid, Spain
+<br/>http://www.atvs.diac.upm.es
+</td></tr><tr><td>65b737e5cc4a565011a895c460ed8fd07b333600</td><td>Transfer Learning For Cross-Dataset Recognition: A Survey
+<br/>This paper summarises and analyses the cross-dataset recognition transfer learning techniques with the
+<br/>emphasis on what kinds of methods can be used when the available source and target data are presented
+<br/>in different forms for boosting the target task. This paper for the first time summarises several transferring
+<br/>criteria in details from the concept level, which are the key bases to guide what kind of knowledge to transfer
+<br/>between datasets. In addition, a taxonomy of cross-dataset scenarios and problems is proposed according the
+<br/>properties of data that define how different datasets are diverged, thereby review the recent advances on
+<br/>each specific problem under different scenarios. Moreover, some real world applications and corresponding
+<br/>commonly used benchmarks of cross-dataset recognition are reviewed. Lastly, several future directions are
+<br/>identified.
+<br/>Additional Key Words and Phrases: Cross-dataset, transfer learning, domain adaptation
+<br/>1. INTRODUCTION
+<br/>It has been explored how human would transfer learning in one context to another
+<br/>similar context [Woodworth and Thorndike 1901; Perkins et al. 1992] in the field of
+<br/>Psychology and Education. For example, learning to drive a car helps a person later
+<br/>to learn more quickly to drive a truck, and learning mathematics prepares students to
+<br/>study physics. The machine learning algorithms are mostly inspired by human brains.
+<br/>However, most of them require a huge amount of training examples to learn a new
+<br/>model from scratch and fail to apply knowledge learned from previous domains or
+<br/>tasks. This may be due to that a basic assumption of statistical learning theory is
+<br/>that the training and test data are drawn from the same distribution and belong to
+<br/>the same task. Intuitively, learning from scratch is not realistic and practical, because
+<br/>it violates how human learn things. In addition, manually labelling a large amount
+<br/>of data for new domain or task is labour extensive, especially for the modern “data-
+<br/>hungry” and “data-driven” learning techniques (i.e. deep learning). However, the big
+<br/>data era provides a huge amount available data collected for other domains and tasks.
+<br/>Hence, how to use the previously available data smartly for the current task with
+<br/>scarce data will be beneficial for real world applications.
+<br/>To reuse the previous knowledge for current tasks, the differences between old data
+<br/>and new data need to be taken into account. Take the object recognition as an ex-
+<br/>ample. As claimed by Torralba and Efros [2011], despite the great efforts of object
+<br/>datasets creators, the datasets appear to have strong build-in bias caused by various
+<br/>factors, such as selection bias, capture bias, category or label bias, and negative set
+<br/>bias. This suggests that no matter how big the dataset is, it is impossible to cover
+<br/>the complexity of the real visual world. Hence, the dataset bias needs to be consid-
+<br/>ered before reusing data from previous datasets. Pan and Yang [2010] summarise that
+<br/>the differences between different datasets can be caused by domain divergence (i.e.
+<br/>distribution shift or feature space difference) or task divergence (i.e. conditional dis-
+<br/>tribution shift or label space difference), or both. For example, in visual recognition,
+<br/>the distributions between the previous and current data can be discrepant due to the
+<br/>different environments, lighting, background, sensor types, resolutions, view angles,
+<br/>and post-processing. Those external factors may cause the distribution divergence or
+<br/>even feature space divergence between different domains. On the other hand, the task
+<br/>divergence between current and previous data is also ubiquitous. For example, it is
+<br/>highly possible that an animal species that we want to recognize have not been seen
+<br/>ACM Journal Name, Vol. V, No. N, Article A, Publication date: January YYYY.
+</td></tr><tr><td>6582f4ec2815d2106957215ca2fa298396dde274</td><td>JUNE 2007
+<br/>1005
+<br/>Discriminative Learning and Recognition
+<br/>of Image Set Classes Using
+<br/>Canonical Correlations
+</td></tr><tr><td>655d9ba828eeff47c600240e0327c3102b9aba7c</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+<br/>489
+<br/>Kernel Pooled Local Subspaces for Classification
+</td></tr><tr><td>656a59954de3c9fcf82ffcef926af6ade2f3fdb5</td><td>Convolutional Network Representation
+<br/>for Visual Recognition
+<br/>Doctoral Thesis
+<br/>Stockholm, Sweden, 2017
+</td></tr><tr><td>656f05741c402ba43bb1b9a58bcc5f7ce2403d9a</td><td></td></tr><tr><td>65817963194702f059bae07eadbf6486f18f4a0a</td><td>http://dx.doi.org/10.1007/s11263-015-0814-0
+<br/>WhittleSearch: Interactive Image Search with Relative Attribute
+<br/>Feedback
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>6581c5b17db7006f4cc3575d04bfc6546854a785</td><td>Contextual Person Identification
+<br/>in Multimedia Data
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktors der Ingenieurwissenschaften
+<br/>der Fakultät für Informatik
+<br/>des Karlsruher Instituts für Technologie (KIT)
+<br/>genehmigte
+<br/>Dissertation
+<br/>von
+<br/>aus Erlangen
+<br/>Tag der mündlichen Prüfung:
+<br/>18. November 2014
+<br/>Hauptreferent:
+<br/>Korreferent:
+<br/>Prof. Dr. Rainer Stiefelhagen
+<br/>Karlsruher Institut für Technologie
+<br/>Prof. Dr. Gerhard Rigoll
+<br/>Technische Universität München
+<br/>KIT – Universität des Landes Baden-Württemberg und nationales Forschungszentrum in der Helmholtz-Gemeinschaft
+<br/>www.kit.edu
+</td></tr><tr><td>65babb10e727382b31ca5479b452ee725917c739</td><td>Label Distribution Learning
+</td></tr><tr><td>62dccab9ab715f33761a5315746ed02e48eed2a0</td><td>A Short Note about Kinetics-600
+<br/>Jo˜ao Carreira
+</td></tr><tr><td>62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4</td><td>Ding et al. EURASIP Journal on Image and Video Processing (2017) 2017:43
+<br/>DOI 10.1186/s13640-017-0188-z
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Noise-resistant network: a deep-learning
+<br/>method for face recognition under noise
+<br/>Open Access
+</td></tr><tr><td>62694828c716af44c300f9ec0c3236e98770d7cf</td><td>Padrón-Rivera, G., Rebolledo-Mendez, G., Parra, P. P., & Huerta-Pacheco, N. S. (2016). Identification of Action Units Related to
+<br/>Identification of Action Units Related to Affective States in a Tutoring System
+<br/>1Facultad de Estadística e Informática, Universidad Veracruzana, Mexico // 2Universidad Juárez Autónoma de
+<br/>for Mathematics
+<br/>Huerta-Pacheco1
+<br/>*Corresponding author
+</td></tr><tr><td>620339aef06aed07a78f9ed1a057a25433faa58b</td><td></td></tr><tr><td>62b3598b401c807288a113796f424612cc5833ca</td><td></td></tr><tr><td>628a3f027b7646f398c68a680add48c7969ab1d9</td><td>Plan for Final Year Project:
+<br/>HKU-Face: A Large Scale Dataset for Deep Face
+<br/>Recognition
+<br/>3035140108
+<br/>3035141841
+<br/>Introduction
+<br/>Face recognition has been one of the most successful techniques in the field of artificial intelligence
+<br/>because of its surpassing human-level performance in academic experiments and broad application in
+<br/>the industrial world. Gaussian-face[1] and Facenet[2] hold state-of-the-art record using statistical
+<br/>method and deep-learning method respectively. What’s more, face recognition has been applied
+<br/>in various areas like authority checking and recording, fostering a large number of start-ups like
+<br/>Face++.
+<br/>Our final year project will deal with the face recognition task by building a large-scaled and carefully-
+<br/>filtered dataset. Our project plan specifies our roadmap and current research process. This plan first
+<br/>illustrates the significance and potential enhancement in constructing large-scale face dataset for
+<br/>both academics and companies. Then objectives to accomplish and related literature review will be
+<br/>expressed in detail. Next, methodologies used, scope of our project and challenges faced by us are
+<br/>described. The detailed timeline for this project follows as well as a small summary.
+<br/>2 Motivation
+<br/>Nowadays most of the face recognition tasks are supervised learning tasks which use dataset annotated
+<br/>by human beings. This contains mainly two drawbacks: (1) limited size of dataset due to limited
+<br/>human effort; (2) accuracy problem resulted from human perceptual bias.
+<br/>Parkhi et al.[3] discuss the first problem, showing that giant companies hold private face databases
+<br/>with larger size of data (See the comparison in Table 1). Other research institution could only get
+<br/>access to public but smaller databases like LFW[4, 5], which acts like a barricade to even higher
+<br/>performance.
+<br/>Dataset
+<br/>IJB-A [6]
+<br/>LFW [4, 5]
+<br/>YFD [7]
+<br/>CelebFaces [8]
+<br/>CASIA-WebFace [9]
+<br/>MS-Celeb-1M [10]
+<br/>Facebook
+<br/>Google
+<br/>Availability
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>public
+<br/>private
+<br/>private
+<br/>identities
+<br/>500
+<br/>5K
+<br/>1595
+<br/>10K
+<br/>10K
+<br/>100K
+<br/>4K
+<br/>8M
+<br/>images
+<br/>5712
+<br/>13K
+<br/>3425 videos
+<br/>202K
+<br/>500K
+<br/>about 10M
+<br/>4400K
+<br/>100-200M
+<br/>Table 1: Face recognition datasets
+</td></tr><tr><td>6257a622ed6bd1b8759ae837b50580657e676192</td><td></td></tr><tr><td>626859fe8cafd25da13b19d44d8d9eb6f0918647</td><td>Activity Recognition based on a
+<br/>Magnitude-Orientation Stream Network
+<br/>Smart Surveillance Interest Group, Department of Computer Science
+<br/>Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+</td></tr><tr><td>620e1dbf88069408b008347cd563e16aeeebeb83</td><td></td></tr><tr><td>62007c30f148334fb4d8975f80afe76e5aef8c7f</td><td>Eye In-Painting with Exemplar Generative Adversarial Networks
+<br/>Facebook Inc.
+<br/>1 Hacker Way, Menlo Park (CA), USA
+</td></tr><tr><td>62a30f1b149843860938de6dd6d1874954de24b7</td><td>418
+<br/>Fast Algorithm for Updating the Discriminant Vectors
+<br/>of Dual-Space LDA
+</td></tr><tr><td>62e0380a86e92709fe2c64e6a71ed94d152c6643</td><td>Facial Emotion Recognition With Expression Energy
+<br/>Albert Cruz
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Center for Research in
+<br/>Intelligent Systems
+<br/>216 Winston Chung Hall
+<br/>Riverside, CA, 92521-0425,
+<br/>Riverside, CA, 92521-0425,
+<br/>Riverside, CA, 92521-0425,
+<br/>USA
+<br/>USA
+<br/>USA
+</td></tr><tr><td>961a5d5750f18e91e28a767b3cb234a77aac8305</td><td>Face Detection without Bells and Whistles
+<br/>1 ESAT-PSI/VISICS, iMinds, KU Leuven, Belgium
+<br/>2 MPI Informatics, Saarbrücken, Germany
+<br/>3 D-ITET/CVL, ETH Zürich, Switzerland
+</td></tr><tr><td>9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c</td><td>UPTEC STS 17033
+<br/>Examensarbete 30 hp
+<br/>November 2017
+<br/>A deep learning approach for
+<br/>action classification in American
+<br/>football video sequences
+</td></tr><tr><td>9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4</td><td>J Inf Process Syst, Vol.9, No.1, March 2013
+<br/>pISSN 1976-913X
+<br/>eISSN 2092-805X
+<br/>Region-Based Facial Expression Recognition in
+<br/>Still Images
+</td></tr><tr><td>964a3196d44f0fefa7de3403849d22bbafa73886</td><td></td></tr><tr><td>9606b1c88b891d433927b1f841dce44b8d3af066</td><td>Principal Component Analysis with Tensor Train
+<br/>Subspace
+</td></tr><tr><td>96b1000031c53cd4c1c154013bb722ffd87fa7da</td><td>ContextVP: Fully Context-Aware Video
+<br/>Prediction
+<br/>1 NVIDIA, Santa Clara, CA, USA
+<br/>2 ETH Zurich, Zurich, Switzerland
+<br/>3 The Swiss AI Lab IDSIA, Manno, Switzerland
+<br/>4 NNAISENSE, Lugano, Switzerland
+</td></tr><tr><td>968f472477a8afbadb5d92ff1b9c7fdc89f0c009</td><td>Firefly-based Facial Expression Recognition
+</td></tr><tr><td>9636c7d3643fc598dacb83d71f199f1d2cc34415</td><td></td></tr><tr><td>3a2fc58222870d8bed62442c00341e8c0a39ec87</td><td>Probabilistic Local Variation
+<br/>Segmentation
+<br/>Technion - Computer Science Department - M.Sc. Thesis MSC-2014-02 - 2014 </td></tr><tr><td>3abc833f4d689f37cc8a28f47fb42e32deaa4b17</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large Scale Retrieval and Generation of Image Descriptions
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>3a60678ad2b862fa7c27b11f04c93c010cc6c430</td><td>JANUARY-MARCH 2012
+<br/>A Multimodal Database for
+<br/>Affect Recognition and Implicit Tagging
+</td></tr><tr><td>3a0a839012575ba455f2b84c2d043a35133285f9</td><td>444
+<br/>Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 444–454,
+<br/>Edinburgh, Scotland, UK, July 27–31, 2011. c(cid:13)2011 Association for Computational Linguistics
+</td></tr><tr><td>3a9681e2e07be7b40b59c32a49a6ff4c40c962a2</td><td>Biometrics & Biostatistics International Journal
+<br/>Comparing treatment means: overlapping standard
+<br/>errors, overlapping confidence intervals, and tests of
+<br/>hypothesis
+</td></tr><tr><td>3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e</td><td>in any current or
+<br/>future media,
+<br/>for all other uses,
+<br/>© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/>obtained
+<br/>including
+<br/>reprinting/republishing this material for advertising or promotional purposes, creating
+<br/>new collective works, for resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works.
+<br/>Pre-print of article that appeared at the IEEE Computer Society Workshop on Biometrics
+<br/>2010.
+<br/>The published article can be accessed from:
+<br/>http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5544597
+</td></tr><tr><td>3a95eea0543cf05670e9ae28092a114e3dc3ab5c</td><td>Constructing the L2-Graph for Robust Subspace
+<br/>Learning and Subspace Clustering
+</td></tr><tr><td>3a4f522fa9d2c37aeaed232b39fcbe1b64495134</td><td>ISSN (Online) 2321 – 2004
+<br/>ISSN (Print) 2321 – 5526
+<br/> INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+<br/> Vol. 4, Issue 5, May 2016
+<br/>IJIREEICE
+<br/>Face Recognition and Retrieval Using Cross
+<br/>Age Reference Coding
+<br/>Sricharan H S1, Srinidhi K S1, Rajath D N1, Tejas J N1, Chandrakala B M2
+<br/> BE, DSCE, Bangalore1
+<br/>Assistant Professor, DSCE, Bangalore2
+</td></tr><tr><td>54969bcd728b0f2d3285866c86ef0b4797c2a74d</td><td>IEEE TRANSACTION SUBMISSION
+<br/>Learning for Video Compression
+</td></tr><tr><td>5456166e3bfe78a353df988897ec0bd66cee937f</td><td>Improved Boosting Performance by Exclusion
+<br/>of Ambiguous Positive Examples
+<br/>Computer Vision and Active Perception, KTH, Stockholm 10800, Sweden
+<br/>Keywords:
+<br/>Boosting, Image Classification, Algorithm Evaluation, Dataset Pruning, VOC2007.
+</td></tr><tr><td>54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3</td><td>A Joint Learning Framework for Attribute Models and Object Descriptions
+<br/>Dhruv Mahajan
+<br/>Yahoo! Labs, Bangalore, India
+</td></tr><tr><td>541bccf19086755f8b5f57fd15177dc49e77d675</td><td></td></tr><tr><td>549c719c4429812dff4d02753d2db11dd490b2ae</td><td>YouTube-BoundingBoxes: A Large High-Precision
+<br/>Human-Annotated Data Set for Object Detection in Video
+<br/>Google Brain
+<br/>Google Brain
+<br/>Google Research
+<br/>Google Brain
+<br/>Google Brain
+</td></tr><tr><td>98b2f21db344b8b9f7747feaf86f92558595990c</td><td></td></tr><tr><td>988d1295ec32ce41d06e7cf928f14a3ee079a11e</td><td>Semantic Deep Learning
+<br/>September 29, 2015
+</td></tr><tr><td>981449cdd5b820268c0876477419cba50d5d1316</td><td>Learning Deep Features for One-Class
+<br/>Classification
+</td></tr><tr><td>98127346920bdce9773aba6a2ffc8590b9558a4a</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Efficient Human Action Recognition using
+<br/>Histograms of Motion Gradients and
+<br/>VLAD with Descriptor Shape Information
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>982fed5c11e76dfef766ad9ff081bfa25e62415a</td><td></td></tr><tr><td>98519f3f615e7900578bc064a8fb4e5f429f3689</td><td>Dictionary-based Domain Adaptation Methods
+<br/>for the Re-identification of Faces
+</td></tr><tr><td>9825aa96f204c335ec23c2b872855ce0c98f9046</td><td>International Journal of Ethics in Engineering & Management Education
+<br/>Website: www.ijeee.in (ISSN: 2348-4748, Volume 1, Issue 5, May2014)
+<br/>FACE AND FACIAL EXPRESSION
+<br/>RECOGNITION IN 3-D USING MASKED
+<br/>PROJECTION UNDER OCCLUSION
+<br/>Jyoti patil *
+<br/>M.Tech (CSE)
+<br/>GNDEC Bidar-585401
+<br/>BIDAR, INDIA
+<br/> M.Tech (CSE)
+<br/> GNDEC Bidar- 585401
+<br/> BIDAR, INDIA
+<br/> M.Tech (CSE)
+<br/> VKIT, Bangalore- 560040
+<br/>BANGALORE, INDIA
+</td></tr><tr><td>5334ac0a6438483890d5eef64f6db93f44aacdf4</td><td></td></tr><tr><td>53dd25350d3b3aaf19beb2104f1e389e3442df61</td><td></td></tr><tr><td>530243b61fa5aea19b454b7dbcac9f463ed0460e</td><td></td></tr><tr><td>539ca9db570b5e43be0576bb250e1ba7a727d640</td><td></td></tr><tr><td>53c8cbc4a3a3752a74f79b74370ed8aeed97db85</td><td></td></tr><tr><td>5366573e96a1dadfcd4fd592f83017e378a0e185</td><td>Böhlen, Chandola and Salunkhe
+<br/>Server, server in the cloud.
+<br/>Who is the fairest in the crowd?
+</td></tr><tr><td>533bfb82c54f261e6a2b7ed7d31a2fd679c56d18</td><td>Technical Report MSU-CSE-14-1
+<br/>Unconstrained Face Recognition: Identifying a
+<br/>Person of Interest from a Media Collection
+</td></tr><tr><td>530ce1097d0681a0f9d3ce877c5ba31617b1d709</td><td></td></tr><tr><td>3fbd68d1268922ee50c92b28bd23ca6669ff87e5</td><td>598
+<br/>IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. 10, NO. 4, APRIL 2001
+<br/>A Shape- and Texture-Based Enhanced Fisher
+<br/>Classifier for Face Recognition
+</td></tr><tr><td>3f22a4383c55ceaafe7d3cfed1b9ef910559d639</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Robust Kronecker Component Analysis
+</td></tr><tr><td>3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001</td><td>A top-down approach for a synthetic
+<br/>autobiographical memory system
+<br/>1Sheffield Centre for Robotics (SCentRo), Univ. of Sheffield, Sheffield, S10 2TN, UK
+<br/>2Dept. of Computer Science, Univ. of Sheffield, Sheffield, S1 4DP, UK
+<br/>3 CVAP Lab, KTH, Stockholm, Sweden
+</td></tr><tr><td>3f848d6424f3d666a1b6dd405a48a35a797dd147</td><td>GHODRATI et al.: IS 2D INFORMATION ENOUGH FOR VIEWPOINT ESTIMATION?
+<br/>Is 2D Information Enough For Viewpoint
+<br/>Estimation?
+<br/>KU Leuven, ESAT - PSI, iMinds
+<br/>Leuven, Belgium
+</td></tr><tr><td>3fa738ab3c79eacdbfafa4c9950ef74f115a3d84</td><td>DaMN – Discriminative and Mutually Nearest:
+<br/>Exploiting Pairwise Category Proximity
+<br/>for Video Action Recognition
+<br/>1 Center for Research in Computer Vision at UCF, Orlando, USA
+<br/>2 Google Research, Mountain View, USA
+<br/>http://crcv.ucf.edu/projects/DaMN/
+</td></tr><tr><td>3fb98e76ffd8ba79e1c22eda4d640da0c037e98a</td><td>Convolutional Neural Networks for Crop Yield Prediction using Satellite Images
+<br/>H. Russello
+</td></tr><tr><td>3f5cf3771446da44d48f1d5ca2121c52975bb3d3</td><td></td></tr><tr><td>3f14b504c2b37a0e8119fbda0eff52efb2eb2461</td><td>5727
+<br/>Joint Facial Action Unit Detection and Feature
+<br/>Fusion: A Multi-Conditional Learning Approach
+</td></tr><tr><td>3f9a7d690db82cf5c3940fbb06b827ced59ec01e</td><td>VIP: Finding Important People in Images
+<br/>Virginia Tech
+<br/>Google Inc.
+<br/>Virginia Tech
+<br/>Project: https://computing.ece.vt.edu/~mclint/vip/
+<br/>Demo: http://cloudcv.org/vip/
+</td></tr><tr><td>3fd90098551bf88c7509521adf1c0ba9b5dfeb57</td><td>Page 1 of 21
+<br/>*****For Peer Review Only*****
+<br/>10
+<br/>11
+<br/>12
+<br/>13
+<br/>14
+<br/>15
+<br/>16
+<br/>17
+<br/>18
+<br/>19
+<br/>20
+<br/>21
+<br/>22
+<br/>23
+<br/>24
+<br/>25
+<br/>26
+<br/>27
+<br/>28
+<br/>29
+<br/>30
+<br/>31
+<br/>32
+<br/>33
+<br/>34
+<br/>35
+<br/>36
+<br/>37
+<br/>38
+<br/>39
+<br/>40
+<br/>41
+<br/>42
+<br/>43
+<br/>44
+<br/>45
+<br/>46
+<br/>47
+<br/>48
+<br/>49
+<br/>50
+<br/>51
+<br/>52
+<br/>53
+<br/>54
+<br/>55
+<br/>56
+<br/>57
+<br/>58
+<br/>59
+<br/>60
+<br/>Attribute-Based Classification for Zero-Shot
+<br/>Visual Object Categorization
+</td></tr><tr><td>3f63f9aaec8ba1fa801d131e3680900680f14139</td><td>Facial Expression Recognition using Local Binary
+<br/>Patterns and Kullback Leibler Divergence
+<br/>AnushaVupputuri, SukadevMeher
+<br/>
+<br/>divergence.
+<br/>role
+</td></tr><tr><td>3f0e0739677eb53a9d16feafc2d9a881b9677b63</td><td>Efficient Two-Stream Motion and Appearance 3D CNNs for
+<br/>Video Classification
+<br/>ESAT-KU Leuven
+<br/>Ali Pazandeh
+<br/>Sharif UTech
+<br/>ESAT-KU Leuven, ETH Zurich
+</td></tr><tr><td>30870ef75aa57e41f54310283c0057451c8c822b</td><td>Overcoming Catastrophic Forgetting with Hard Attention to the Task
+</td></tr><tr><td>303065c44cf847849d04da16b8b1d9a120cef73a</td><td></td></tr><tr><td>3046baea53360a8c5653f09f0a31581da384202e</td><td>Deformable Face Alignment via Local
+<br/>Measurements and Global Constraints
+</td></tr><tr><td>3028690d00bd95f20842d4aec84dc96de1db6e59</td><td>Leveraging Union of Subspace Structure to Improve Constrained Clustering
+</td></tr><tr><td>30c96cc041bafa4f480b7b1eb5c45999701fe066</td><td>1090
+<br/>Discrete Cosine Transform Locality-Sensitive
+<br/>Hashes for Face Retrieval
+</td></tr><tr><td>306957285fea4ce11a14641c3497d01b46095989</td><td>FACE RECOGNITION UNDER VARYING LIGHTING BASED ON
+<br/>DERIVATES OF LOG IMAGE
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing 100080, China
+<br/>1Graduate School, CAS, Beijing, 100039, China
+</td></tr><tr><td>302c9c105d49c1348b8f1d8cc47bead70e2acf08</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2017.2710120, IEEE
+<br/>Transactions on Circuits and Systems for Video Technology
+<br/>IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+<br/>Unconstrained Face Recognition Using A Set-to-Set
+<br/>Distance Measure
+</td></tr><tr><td>304a306d2a55ea41c2355bd9310e332fa76b3cb0</td><td></td></tr><tr><td>5e7e055ef9ba6e8566a400a8b1c6d8f827099553</td><td></td></tr><tr><td>5e28673a930131b1ee50d11f69573c17db8fff3e</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+<br/>(2008)"
+</td></tr><tr><td>5e6ba16cddd1797853d8898de52c1f1f44a73279</td><td>Face Identification with Second-Order Pooling
+</td></tr><tr><td>5e821cb036010bef259046a96fe26e681f20266e</td><td></td></tr><tr><td>5bfc32d9457f43d2488583167af4f3175fdcdc03</td><td>International Journal of Science and Research (IJSR), India Online ISSN: 2319-7064
+<br/>Local Gray Code Pattern (LGCP): A Robust
+<br/>Feature Descriptor for Facial Expression
+<br/>Recognition
+</td></tr><tr><td>5ba7882700718e996d576b58528f1838e5559225</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2016.2628787, IEEE
+<br/>Transactions on Affective Computing
+<br/>IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. X, NO. X, OCTOBER 2016
+<br/>Predicting Personalized Image Emotion
+<br/>Perceptions in Social Networks
+</td></tr><tr><td>5bb684dfe64171b77df06ba68997fd1e8daffbe1</td><td></td></tr><tr><td>5bae9822d703c585a61575dced83fa2f4dea1c6d</td><td>MOTChallenge 2015:
+<br/>Towards a Benchmark for Multi-Target Tracking
+</td></tr><tr><td>5babbad3daac5c26503088782fd5b62067b94fa5</td><td>Are You Sure You Want To Do That?
+<br/>Classification with Verification
+</td></tr><tr><td>5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65</td><td>Evolving Systems. manuscript No.
+<br/>(will be inserted by the editor)
+<br/>An evolving spatio-temporal approach for gender and age
+<br/>group classification with Spiking Neural Networks
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>5bf70c1afdf4c16fd88687b4cf15580fd2f26102</td><td>Accepted in Pattern Recognition Letters
+<br/>Pattern Recognition Letters
+<br/>journal homepage: www.elsevier.com
+<br/>Residual Codean Autoencoder for Facial Attribute Analysis
+<br/>IIIT-Delhi, New Delhi, India
+<br/>Article history:
+<br/>Received 29 March 2017
+</td></tr><tr><td>5b2cfee6e81ef36507ebf3c305e84e9e0473575a</td><td></td></tr><tr><td>5be3cc1650c918da1c38690812f74573e66b1d32</td><td>Relative Parts: Distinctive Parts for Learning Relative Attributes
+<br/>Center for Visual Information Technology, IIIT Hyderabad, India - 500032
+</td></tr><tr><td>5b0ebb8430a04d9259b321fc3c1cc1090b8e600e</td><td></td></tr><tr><td>3765c26362ad1095dfe6744c6d52494ea106a42c</td><td></td></tr><tr><td>3727ac3d50e31a394b200029b2c350073c1b69e3</td><td></td></tr><tr><td>37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e</td><td>WACV
+<br/>#394
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>WACV 2015 Submission #394. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>Co-operative Pedestrians Group Tracking in Crowded Scenes using an MST
+<br/>Approach
+<br/>Anonymous WACV submission
+<br/>Paper ID 394
+</td></tr><tr><td>377a1be5113f38297716c4bb951ebef7a93f949a</td><td>Dear Faculty, IGERT Fellows, IGERT Associates and Students,
+<br/>You are cordially invited to attend a Seminar presented by Albert Cruz. Please
+<br/>plan to attend.
+<br/> Albert Cruz
+<br/>IGERT Fellow
+<br/>Electrical Engineering
+<br/>
+<br/>Date: Friday, October 11, 2013
+<br/>Location: Bourns A265
+<br/>Time: 11:00am
+<br/>Facial emotion recognition with anisotropic
+<br/>inhibited gabor energy histograms
+</td></tr><tr><td>377c6563f97e76a4dc836a0bd23d7673492b1aae</td><td></td></tr><tr><td>370e0d9b89518a6b317a9f54f18d5398895a7046</td><td>IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY, VOL. X, NO. X, XXXXXXX 20XX
+<br/>Cross-pollination of normalisation techniques
+<br/>from speaker to face authentication
+<br/>using Gaussian mixture models
+<br/>and S´ebastien Marcel, Member, IEEE
+</td></tr><tr><td>37eb666b7eb225ffdafc6f318639bea7f0ba9a24</td><td>MSU Technical Report (2014): MSU-CSE-14-5
+<br/>Age, Gender and Race Estimation from
+<br/>Unconstrained Face Images
+</td></tr><tr><td>375435fb0da220a65ac9e82275a880e1b9f0a557</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>From Pixels to Response Maps: Discriminative Image
+<br/>Filtering for Face Alignment in the Wild
+</td></tr><tr><td>37b6d6577541ed991435eaf899a2f82fdd72c790</td><td>Vision-based Human Gender Recognition: A Survey
+<br/>Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia.
+</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>Labeled Faces in the Wild: A Database for Studying
+<br/>Face Recognition in Unconstrained Environments
+</td></tr><tr><td>08d2f655361335bdd6c1c901642981e650dff5ec</td><td>This is the published version:  
+<br/> Arandjelovic, Ognjen and Cipolla, R. 2006, Automatic cast listing in feature‐length films with
+<br/>Anisotropic Manifold Space, in CVPR 2006 : Proceedings of the Computer Vision and Pattern
+<br/>Recognition Conference 2006, IEEE, Piscataway, New Jersey, pp. 1513‐1520.
+<br/>
+<br/> http://hdl.handle.net/10536/DRO/DU:30058435
+<br/> Reproduced with the kind permission of the copyright owner.
+<br/>Copyright : 2006, IEEE
+<br/>Available from Deakin Research Online: 
+</td></tr><tr><td>08ae100805d7406bf56226e9c3c218d3f9774d19</td><td>Gavrilescu and Vizireanu EURASIP Journal on Image and Video Processing (2017) 2017:59
+<br/>DOI 10.1186/s13640-017-0211-4
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>R ES EAR CH
+<br/>Predicting the Sixteen Personality Factors
+<br/>(16PF) of an individual by analyzing facial
+<br/>features
+<br/>Open Access
+</td></tr><tr><td>08c18b2f57c8e6a3bfe462e599a6e1ce03005876</td><td>A Least-Squares Framework
+<br/>for Component Analysis
+</td></tr><tr><td>081a431107eb38812b74a8cd036ca5e97235b499</td><td></td></tr><tr><td>0831a511435fd7d21e0cceddb4a532c35700a622</td><td></td></tr><tr><td>080c204edff49bf85b335d3d416c5e734a861151</td><td>CLAD: A Complex and Long Activities
+<br/>Dataset with Rich Crowdsourced
+<br/>Annotations
+<br/>Journal Title
+<br/>XX(X):1–6
+<br/>c(cid:13)The Author(s) 2016
+<br/>Reprints and permission:
+<br/>sagepub.co.uk/journalsPermissions.nav
+<br/>DOI: 10.1177/ToBeAssigned
+<br/>www.sagepub.com/
+</td></tr><tr><td>08f4832507259ded9700de81f5fd462caf0d5be8</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 118 – No.14, May 2015
+<br/>Geometric Approach for Human Emotion
+<br/>Recognition using Facial Expression
+<br/>S. S. Bavkar
+<br/>Assistant Professor
+<br/>J. S. Rangole
+<br/>Assistant Professor
+<br/>V. U. Deshmukh
+<br/>Assistant Professor
+</td></tr><tr><td>08d40ee6e1c0060d3b706b6b627e03d4b123377a</td><td>Human Action Localization
+<br/>with Sparse Spatial Supervision
+</td></tr><tr><td>08c1f8f0e69c0e2692a2d51040ef6364fb263a40</td><td></td></tr><tr><td>088aabe3da627432fdccf5077969e3f6402f0a80</td><td>Under review as a conference paper at ICLR 2018
+<br/>CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+<br/>OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+<br/>Anonymous authors
+<br/>Paper under double-blind review
+</td></tr><tr><td>08903bf161a1e8dec29250a752ce9e2a508a711c</td><td>Joint Dimensionality Reduction and Metric Learning: A Geometric Take
+</td></tr><tr><td>08e24f9df3d55364290d626b23f3d42b4772efb6</td><td>ENHANCING FACIAL EXPRESSION CLASSIFICATION BY INFORMATION
+<br/>FUSION
+<br/>I. Buciu1, Z. Hammal 2, A. Caplier2, N. Nikolaidis 1, and I. Pitas 1
+<br/><b></b><br/>GR-54124, Thessaloniki, Box 451, Greece
+<br/>2 Laboratoire des Images et des Signaux / Institut National Polytechnique de Grenoble
+<br/>web: http://www.aiia.csd.auth.gr
+<br/>38031 Grenoble, France
+<br/>web: http://www.lis.inpg.fr
+</td></tr><tr><td>0830c9b9f207007d5e07f5269ffba003235e4eff</td><td></td></tr><tr><td>081fb4e97d6bb357506d1b125153111b673cc128</td><td></td></tr><tr><td>0857281a3b6a5faba1405e2c11f4e17191d3824d</td><td>Chude-Olisah et al. EURASIP Journal on Advances in Signal Processing 2014, 2014:102
+<br/>http://asp.eurasipjournals.com/content/2014/1/102
+<br/>R ES EAR CH
+<br/>Face recognition via edge-based Gabor feature
+<br/>representation for plastic surgery-altered images
+<br/>Open Access
+</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>Understanding Kin Relationships in a Photo
+</td></tr><tr><td>082ad50ac59fc694ba4369d0f9b87430553b11db</td><td></td></tr><tr><td>6dd052df6b0e89d394192f7f2af4a3e3b8f89875</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
+<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+<br/>A literature survey on Facial Expression
+<br/>Recognition using Global Features
+<br/>
+</td></tr><tr><td>6dd5dbb6735846b214be72983e323726ef77c7a9</td><td>Josai Mathematical Monographs
+<br/>vol. 7 (2014), pp. 25-40
+<br/>A Survey on Newer Prospective
+<br/>Biometric Authentication Modalities
+</td></tr><tr><td>6d10beb027fd7213dd4bccf2427e223662e20b7d</td><td></td></tr><tr><td>6dddf1440617bf7acda40d4d75c7fb4bf9517dbb</td><td>JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, MM YY
+<br/>Beyond Counting: Comparisons of Density Maps for Crowd
+<br/>Analysis Tasks - Counting, Detection, and Tracking
+</td></tr><tr><td>6de18708218988b0558f6c2f27050bb4659155e4</td><td></td></tr><tr><td>6d91da37627c05150cb40cac323ca12a91965759</td><td></td></tr><tr><td>6d8c9a1759e7204eacb4eeb06567ad0ef4229f93</td><td>Face Alignment Robust to Pose, Expressions and
+<br/>Occlusions
+</td></tr><tr><td>6d66c98009018ac1512047e6bdfb525c35683b16</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 9, SEPTEMBER 2003
+<br/>1063
+<br/>Face Recognition Based on
+<br/>Fitting a 3D Morphable Model
+</td></tr><tr><td>016cbf0878db5c40566c1fbc237686fbad666a33</td><td></td></tr><tr><td>01bef320b83ac4405b3fc5b1cff788c124109fb9</td><td>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>de Lausanne
+<br/>RLC D1 740, CH-1015
+<br/>Lausanne
+<br/>Translating Head Motion into Attention - Towards
+<br/>Processing of Student’s Body-Language
+<br/>CHILI Laboratory
+<br/>Łukasz Kidzi´nski
+<br/>CHILI Laboratory
+<br/>CHILI Laboratory
+<br/>École polytechnique fédérale
+<br/>École polytechnique fédérale
+<br/>École polytechnique fédérale
+</td></tr><tr><td>01c8d7a3460422412fba04e7ee14c4f6cdff9ad7</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 4, No. 7, 2013
+<br/>Rule Based System for Recognizing Emotions Using
+<br/>Multimodal Approach
+<br/>Information System
+<br/>SBM, SVKM’s NMIMS
+<br/>Mumbai, India
+<br/>
+</td></tr><tr><td>01e12be4097fa8c94cabeef0ad61498c8e7762f2</td><td></td></tr><tr><td>0163d847307fae508d8f40ad193ee542c1e051b4</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+<br/>Classemes and Other Classifier-based
+<br/>Features for Efficient Object Categorization
+<br/>- Supplementary material -
+<br/>1 LOW-LEVEL FEATURES
+<br/>We extract the SIFT [1] features for our descriptor
+<br/>according to the following pipeline. We first convert
+<br/>each image to gray-scale, then we normalize the con-
+<br/>trast by forcing the 0.01% of lightest and darkest pixels
+<br/>to be mapped to white and black respectively, and
+<br/>linearly rescaling the values in between. All images
+<br/>exceeding 786,432 pixels of resolution are downsized
+<br/>to this maximum value while keeping the aspect ratio.
+<br/>The 128-dimensional SIFT descriptors are computed
+<br/>from the interest points returned by a DoG detec-
+<br/>tor [2]. We finally compute a Bag-Of-Word histogram
+<br/>of these descriptors, using a K-means vocabulary of
+<br/>500 words.
+<br/>2 CLASSEMES
+<br/>The LSCOM categories were developed specifically
+<br/>for multimedia annotation and retrieval, and have
+<br/>been used in the TRECVID video retrieval series.
+<br/>We took the LSCOM CYC ontology dated 2006-06-30,
+<br/>which contains 2832 unique categories. We removed
+</td></tr><tr><td>01c4cf9c7c08f0ad3f386d88725da564f3c54679</td><td>Interpretability Beyond Feature Attribution:
+<br/>Quantitative Testing with Concept Activation Vectors (TCAV)
+</td></tr><tr><td>017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637</td><td>FACE RECOGNITION WITH HARMONIC DE-LIGHTING
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+<br/>1Graduate School, CAS, Beijing, China, 100080
+<br/>Emails: {lyqing, sgshan, wgao}jdl.ac.cn
+</td></tr><tr><td>014e3d0fa5248e6f4634dc237e2398160294edce</td><td>Int J Comput Vis manuscript No.
+<br/>(will be inserted by the editor)
+<br/>What does 2D geometric information really tell us about
+<br/>3D face shape?
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>01beab8f8293a30cf48f52caea6ca0fb721c8489</td><td></td></tr><tr><td>0178929595f505ef7655272cc2c339d7ed0b9507</td><td></td></tr><tr><td>01b4b32c5ef945426b0396d32d2a12c69c282e29</td><td></td></tr><tr><td>0113b302a49de15a1d41ca4750191979ad756d2f</td><td>1­4244­0367­7/06/$20.00 ©2006 IEEE
+<br/>537
+<br/>ICME 2006
+</td></tr><tr><td>064b797aa1da2000640e437cacb97256444dee82</td><td>Coarse-to-fine Face Alignment with Multi-Scale Local Patch Regression
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+<br/>Megvii Inc.
+</td></tr><tr><td>06f146dfcde10915d6284981b6b84b85da75acd4</td><td>Scalable Face Image Retrieval using
+<br/>Attribute-Enhanced Sparse Codewords
+</td></tr><tr><td>0697bd81844d54064d992d3229162fe8afcd82cb</td><td>User-driven mobile robot storyboarding: Learning image interest and
+<br/>saliency from pairwise image comparisons
+</td></tr><tr><td>06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32</td><td>WhittleSearch: Image Search with Relative Attribute Feedback
+<br/>(Supplementary Material)
+<br/>1 Comparative Qualitative Search Results
+<br/>We present three qualitative search results for human-generated feedback, in addition to those
+<br/>shown in the paper. Each example shows one search iteration, where the 20 reference images are
+<br/>randomly selected (rather than ones that match a keyword search, as the image examples in the
+<br/>main paper illustrate). For each result, the first figure shows our method and the second figure
+<br/>shows the binary feedback result for the corresponding target image. Note that for our method,
+<br/>“more/less X” (where X is an attribute) means that the target image is more/less X than the
+<br/>reference image which is shown.
+<br/>Figures 1 and 2 show results for human-generated relative attribute and binary feedback, re-
+<br/>spectively, when both methods are used to target the same “mental image” of a shoe shown in the
+<br/>top left bubble. The top right grid of 20 images are the reference images displayed to the user, and
+<br/>those outlined and annotated with constraints are the ones chosen by the user to give feedback.
+<br/>The bottom row of images in either figure shows the top-ranked images after integrating the user’s
+<br/>feedback into the scoring function, revealing the two methods’ respective performance. We see that
+<br/>while both methods retrieve high-heeled shoes, only our method retrieves images that are as “open”
+<br/>as the target image. This is because using the proposed approach, the user was able to comment
+<br/>explicitly on the desired openness property.
+</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>(cid:1)(cid:2)(cid:3)(cid:4)(cid:5)(cid:3)(cid:4)(cid:6)(cid:7)(cid:3)(cid:8)(cid:9)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:3)(cid:4)(cid:14)(cid:6)(cid:15)(cid:16)(cid:3)(cid:17)(cid:18)(cid:3)(cid:11)(cid:5)(cid:19)(cid:4) (cid:20)(cid:5)(cid:11)(cid:21)(cid:6)(cid:3)(cid:6)(cid:22)(cid:9)(cid:20)(cid:6)(cid:10)(cid:9)(cid:11)(cid:9)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)(cid:6)(cid:23)(cid:17)(cid:24)(cid:19)(cid:2)(cid:5)(cid:11)(cid:21)(cid:25)
+<br/>(cid:26)(cid:11)(cid:5)(cid:8)(cid:17)(cid:6)(cid:27)(cid:1)(cid:9)(cid:22)(cid:8)(cid:18)(cid:1)(cid:28)(cid:12)(cid:6)(cid:29)(cid:4)(cid:20)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1)(cid:15)(cid:25)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)!(cid:8) (cid:8)(cid:6)(cid:4)(cid:1)"(cid:16)(cid:8)(cid:16)(cid:20)(cid:14)(cid:1)(cid:3)(cid:15)(cid:8)(cid:22)(cid:4)(cid:12)(cid:1)(cid:23)(cid:5)(cid:29)(cid:18)(cid:14)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)(cid:26)!(cid:9)(cid:13)(cid:14)(cid:1)#(cid:17)(cid:8)(cid:6)(cid:5)$(cid:1)(cid:17)(cid:4)(cid:5)%(cid:8)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)&(cid:30)(cid:8)(cid:16)(cid:15)(cid:15)(cid:21)(cid:27)(cid:15)(cid:17)
+<br/>(cid:3)(cid:4)(cid:5)(cid:6)(cid:7)(cid:8)(cid:1)(cid:9)(cid:10)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)(cid:1)(cid:13)(cid:6)(cid:7)(cid:14) (cid:3)(cid:15)(cid:16)(cid:8)(cid:17)(cid:17)(cid:8)(cid:18)(cid:1)(cid:3)(cid:8)(cid:16)(cid:18)(cid:6)(cid:1)(cid:19)(cid:4)(cid:16)(cid:11)(cid:16)(cid:6)(cid:10)(cid:6)(cid:14)(cid:1)(cid:19)(cid:20)(cid:21)(cid:1)(cid:9)(cid:22)(cid:8)(cid:17)(cid:1)(cid:23)(cid:8)(cid:11)(cid:24)(cid:8)(cid:12)(cid:25)(cid:8)(cid:20)(cid:18)
+<br/>(cid:23)(cid:12)(cid:13)(cid:11)(cid:2)(cid:3)(cid:8)(cid:11)$(cid:1)’(cid:16)(cid:6)(cid:11) ((cid:8)((cid:4)(cid:20)(cid:1)(cid:6)(cid:12)(cid:24)(cid:20)(cid:15)(cid:18))(cid:27)(cid:4)(cid:11)(cid:1)(cid:8)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:15)(cid:25)(cid:1)(cid:15)(cid:29)(cid:4)(cid:20)(cid:1)*(cid:14)+,,(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1).(cid:4)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)+(cid:2)+(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:16))(cid:17)(cid:8)(cid:12)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:11) (cid:6)(cid:12)(cid:1)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:10)(cid:4)(cid:24).(cid:4)(cid:4)(cid:12)(cid:1)/
+<br/>(cid:8)(cid:12)(cid:18) 01(cid:21)(cid:1)2(cid:4)(cid:1)(cid:12)(cid:8)(cid:17)(cid:4)(cid:18)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)4(cid:26)3(cid:19)(cid:23)5(cid:21)(cid:1)’(cid:15)(cid:1)(cid:4)(cid:29)(cid:8)(cid:5))(cid:8)(cid:24)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:4)6((cid:4)(cid:20)(cid:6)(cid:17)(cid:4)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1)(cid:20)(cid:4)(cid:11))(cid:5)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1)(cid:8)(cid:1)(cid:12)(cid:4).(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:1)(cid:6)(cid:11)(cid:1)(cid:20)(cid:4)((cid:15)(cid:20)(cid:24)(cid:4)(cid:18)(cid:21)
+<br/>(cid:26)(cid:9)(cid:27) (cid:28)(cid:19)(cid:2)(cid:14)(cid:13)$(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:14)(cid:1)3(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)3(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:19)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:9)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1)(cid:9)-(cid:4)(cid:1)7(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:21)
+<br/>(cid:29) (cid:1)(cid:4)(cid:11)(cid:2)(cid:19)(cid:14)(cid:18)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)
+<br/>8)(cid:17)(cid:8)(cid:12)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:17)(cid:15)(cid:11)(cid:24)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) )(cid:11)(cid:4)(cid:25))(cid:5)(cid:1) (cid:7)(cid:4)(cid:30)(cid:1) (cid:24)(cid:15)(cid:1) (cid:8)(cid:1)
+<br/>((cid:4)(cid:20)(cid:11)(cid:15)(cid:12)9(cid:11)(cid:1) (cid:6)(cid:18)(cid:4)(cid:12)(cid:24)(cid:6)(cid:24)(cid:30)(cid:21)(cid:1) (cid:9)(cid:11)(cid:1) (cid:16))(cid:17)(cid:8)(cid:12)(cid:11)(cid:14)(cid:1) .(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:24)(cid:15)(cid:1) (cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:22)(cid:4)(cid:1) (cid:8)(cid:1)
+<br/>((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:8)-(cid:4)(cid:1)-(cid:20)(cid:15))((cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)(cid:8)(cid:1)((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:15)(cid:25)(cid:24)(cid:4)(cid:12)(cid:1)
+<br/>(cid:8)(cid:10)(cid:5)(cid:4)(cid:1)(cid:24)(cid:15)(cid:1)(cid:10)(cid:4)(cid:1);)(cid:6)(cid:24)(cid:4)(cid:1)((cid:20)(cid:4)(cid:27)(cid:6)(cid:11)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:4)(cid:11)(cid:24)(cid:6)(cid:17)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)<(cid:2)=(cid:21)(cid:1)(cid:26)(cid:12)(cid:1)(cid:20)(cid:4)(cid:27)(cid:4)(cid:12)(cid:24)(cid:1)(cid:30)(cid:4)(cid:8)(cid:20)(cid:11)(cid:14)(cid:1)
+<br/>(cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:20)(cid:4)(cid:5)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1) .(cid:15)(cid:20)(cid:7)(cid:11)(cid:1) (cid:16)(cid:8)(cid:29)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:4)(cid:6)(cid:29)(cid:4)(cid:18)(cid:1) (cid:11))(cid:10)(cid:11)(cid:24)(cid:8)(cid:12)(cid:24)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:8)(cid:24)(cid:24)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) (cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1) (cid:6)(cid:12)(cid:1) (cid:10)(cid:6)(cid:15)(cid:17)(cid:4)(cid:24)(cid:20)(cid:6)(cid:27)(cid:11)(cid:14)(cid:1) ((cid:8)(cid:24)(cid:24)(cid:4)(cid:20)(cid:12)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)
+<br/>(cid:8)(cid:12)(cid:18)(cid:1) (cid:27)(cid:15)(cid:17)()(cid:24)(cid:4)(cid:20) (cid:29)(cid:6)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17))(cid:12)(cid:6)(cid:24)(cid:6)(cid:4)(cid:11)(cid:1) </(cid:14)(cid:1) *(cid:14)(cid:1) > (cid:8)(cid:12)(cid:18) 1=(cid:21)(cid:1) ’(cid:16)(cid:4)(cid:11)(cid:4)(cid:1)
+<br/>(cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:20)(cid:4)(cid:11)(cid:24)(cid:11)(cid:1)(cid:8)(cid:17)(cid:15)(cid:12)-(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1)(cid:17)(cid:15)(cid:24)(cid:6)(cid:29)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1))(cid:11)(cid:1)(cid:24)(cid:15)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:1)(cid:8)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) ((cid:4)(cid:15)((cid:5)(cid:4)(cid:1) (cid:6)(cid:12)(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1) (cid:8)-(cid:4)(cid:11)(cid:21) ’(cid:16)(cid:4)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:11)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:12)(cid:18)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:18)(cid:6)(cid:11)(cid:24)(cid:20)(cid:6)(cid:10))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:24)(cid:15)(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:21)
+<br/>’(cid:16)(cid:4)(cid:20)(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:17)(cid:8)(cid:12)(cid:30)(cid:1) ()(cid:10)(cid:5)(cid:6)(cid:27)(cid:8)(cid:5)(cid:5)(cid:30)(cid:1) (cid:8)(cid:29)(cid:8)(cid:6)(cid:5)(cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1)
+<br/>(cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:21)(cid:1) (cid:23)(cid:4)(cid:11)(cid:6)(cid:18)(cid:4)(cid:1) (cid:8)(cid:10)(cid:15)(cid:29)(cid:4)(cid:1)
+<br/>(cid:8)(((cid:5)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:14)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)4(cid:26)3(cid:19)(cid:23)5(cid:1)(cid:27)(cid:8)(cid:12)(cid:1)(cid:10)(cid:4)(cid:1))(cid:11)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:11))(cid:20)-(cid:4)(cid:20)(cid:30)(cid:14)(cid:1) (cid:20)(cid:8)(cid:27)(cid:4)(cid:1) (cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) 4(cid:10)(cid:4)(cid:11)(cid:6)(cid:18)(cid:4)(cid:1) (cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)5(cid:14)(cid:1) (cid:11)(cid:24))(cid:18)(cid:30)(cid:6)(cid:12)-(cid:1) (cid:6)(cid:12)(cid:25)(cid:5))(cid:4)(cid:12)(cid:27)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:27)(cid:8)(cid:20)(cid:4)(cid:4)(cid:20)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:7)(cid:6)(cid:12)(cid:18)(cid:1) (cid:15)(cid:25)(cid:1) (cid:11)(cid:7)(cid:6)(cid:12)(cid:1) (cid:15)(cid:12)(cid:1)
+<br/>(cid:8)-(cid:6)(cid:12)-(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:11)(cid:6)(cid:17)(cid:6)(cid:5)(cid:8)(cid:20)(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:11)(cid:21)
+<br/>(cid:26)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:20)(cid:4)(cid:17)(cid:8)(cid:6)(cid:12)(cid:6)(cid:12)-(cid:1) ((cid:8)(cid:20)(cid:24)(cid:11) (cid:18)(cid:4)(cid:24)(cid:8)(cid:6)(cid:5)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:4)6(cid:6)(cid:11)(cid:24)(cid:6)(cid:12)-(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11) (cid:8)(cid:12)(cid:18) (cid:24)(cid:16)(cid:4)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:11)(cid:1)-(cid:6)(cid:29)(cid:4)(cid:12)(cid:21) (cid:9)(cid:5)(cid:11)(cid:15)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:4)(cid:29)(cid:8)(cid:5))(cid:8)(cid:24)(cid:4)(cid:18)(cid:1) (cid:10)(cid:30)(cid:1) (cid:8)(((cid:5)(cid:30)(cid:6)(cid:12)- (cid:8)(cid:1) (cid:12)(cid:4).(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)
+<br/>(cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:21)(cid:1)
+<br/>(cid:30) (cid:15)(cid:31)(cid:5)(cid:13)(cid:11)(cid:5)(cid:4)(cid:24)(cid:6)(cid:7)(cid:3)(cid:8)(cid:9)(cid:6)(cid:1)(cid:25)(cid:3)(cid:24)(cid:9)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)(cid:13)
+<br/>(cid:3)(cid:8)(cid:12)(cid:30)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)(cid:20)(cid:18)(cid:4)(cid:18)(cid:1) )(cid:12)(cid:18)(cid:4)(cid:20)(cid:1) (cid:8)(cid:1) (cid:29)(cid:8)(cid:20)(cid:6)(cid:4)(cid:24)(cid:30)(cid:1) (cid:15)(cid:25)(cid:1)
+<br/>(cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:29)(cid:8)(cid:20)(cid:6)(cid:15))(cid:11)(cid:1)(cid:8)(((cid:5)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)(cid:17)(cid:6)(cid:12)(cid:18)(cid:21)(cid:1)(cid:9)(cid:5)(cid:15)(cid:12)-(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1) (cid:18)(cid:4)(cid:29)(cid:4)(cid:5)(cid:15)((cid:17)(cid:4)(cid:12)(cid:24)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>(cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1) (cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1) (cid:8)(cid:1) (cid:27)(cid:15)(cid:17)((cid:8)(cid:20)(cid:8)(cid:24)(cid:6)(cid:29)(cid:4)(cid:5)(cid:30)(cid:1) (cid:5)(cid:8)(cid:20)-(cid:4)(cid:1) (cid:12))(cid:17)(cid:10)(cid:4)(cid:20)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1)
+<br/>A(cid:8)(cid:5)(cid:4)(cid:1)<0=(cid:14)(cid:1)(cid:3)(cid:26)’(cid:1)<B=(cid:14)(cid:1)C(cid:9)33#(cid:1)<(cid:2),=(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:17)(cid:8)(cid:12)(cid:30)(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)<(cid:2)(cid:2)(cid:14)(cid:1)
+<br/>(cid:2)/=(cid:21)(cid:1)8(cid:4)(cid:20)(cid:4)(cid:1)3#!#’(cid:1)<(cid:2)*= (cid:8)(cid:12)(cid:18)(cid:1)3DE(cid:13)#’(cid:1)<(cid:2)>=(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:20)(cid:4)(cid:29)(cid:6)(cid:4).(cid:4)(cid:18)(cid:21)
+<br/>(cid:30) (cid:29) (cid:7)(cid:15)!(cid:15)"(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>’(cid:16)(cid:4)(cid:1) 3(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) !(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) ’(cid:4)(cid:27)(cid:16)(cid:12)(cid:15)(cid:5)(cid:15)-(cid:30)(cid:1) 43#!#’5(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)
+<br/>.(cid:8)(cid:11)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1) (cid:8)(cid:24)(cid:1) D(cid:4)(cid:15)(cid:20)-(cid:4)(cid:1)(cid:3)(cid:8)(cid:11)(cid:15)(cid:12)(cid:1) (cid:28)(cid:12)(cid:6)(cid:29)(cid:4)(cid:20)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1) (cid:28)"(cid:1) (cid:9)(cid:20)(cid:17)(cid:30)(cid:1)
+<br/>!(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:1)F(cid:8)(cid:10)(cid:15)(cid:20)(cid:8)(cid:24)(cid:15)(cid:20)(cid:30)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:5)(cid:6)(cid:24)(cid:6)(cid:4)(cid:11)(cid:1) (cid:8)(cid:11)(cid:1)((cid:8)(cid:20)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1)3#!#’(cid:1) ((cid:20)(cid:15)-(cid:20)(cid:8)(cid:17)(cid:1)
+<br/><(cid:2)*=(cid:21)(cid:1)(cid:26)(cid:12)(cid:1)3#!#’(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:2)(cid:2)BB(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:1)(cid:4)6(cid:6)(cid:11)(cid:24)(cid:1)(cid:6)(cid:12)(cid:1)BE/,
+<br/>(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1) ((cid:15)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1) /(cid:1)
+<br/>(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) /(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)
+<br/>(cid:6)(cid:5)(cid:5))(cid:17)(cid:6)(cid:12)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)/(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:24)(cid:6)(cid:17)(cid:4)(cid:11)(cid:21)(cid:1)(cid:1)’(cid:16)(cid:4)(cid:20)(cid:4)(cid:1)(cid:8)(cid:20)(cid:4) (cid:2)>(cid:14),1(cid:2)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)
+<br/>/1+G*0>(cid:1)((cid:6)6(cid:4)(cid:5)(cid:11)(cid:1)(cid:6)(cid:12)(cid:1)(cid:11)(cid:6)(cid:22)(cid:4)(cid:21)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)(cid:8)(cid:24)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:25)(cid:15)(cid:5)(cid:5)(cid:15).(cid:6)(cid:12)-(cid:1)
+<br/>((cid:15)(cid:11)(cid:4)(cid:11)$(cid:1)(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:5)(cid:4)(cid:25)(cid:24)(cid:1)((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:14)(cid:1)(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:5)(cid:4)(cid:25)(cid:24)(cid:1);)(cid:8)(cid:20)(cid:24)(cid:4)(cid:20)(cid:1)((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:20)(cid:6)-(cid:16)(cid:24)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:5)(cid:4)(cid:25)(cid:24)(cid:1) (cid:16)(cid:8)(cid:5)(cid:25)(cid:1) ((cid:20)(cid:15)(cid:25)(cid:6)(cid:5)(cid:4)(cid:21)(cid:1) (cid:26)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:11)(cid:4)(cid:1) (cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:4)(cid:11)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1)
+<br/>(cid:20)(cid:4)(cid:27)(cid:15)(cid:20)(cid:18)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)1,0(cid:1)(cid:24)(cid:15)(cid:1)B0,(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:21)
+<br/>(cid:30) (cid:30)(cid:6)(cid:7)#$(cid:22)(cid:15)"(cid:6)(cid:23)(cid:24)(cid:5)(cid:4)(cid:24)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:2)
+<br/>’(cid:16)(cid:4)(cid:1)3DE(cid:13)#’(cid:1)(cid:9)-(cid:6)(cid:12)-(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1).(cid:8)(cid:11)(cid:1)-(cid:4)(cid:12)(cid:4)(cid:20)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1)(cid:8)(cid:11)(cid:1)((cid:8)(cid:20)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)
+<br/>#)(cid:20)(cid:15)((cid:4)(cid:8)(cid:12)(cid:1) (cid:28)(cid:12)(cid:6)(cid:15)(cid:12)(cid:1) ((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:1) 3DE(cid:13)#’(cid:1)
+<br/>43(cid:8)(cid:27)(cid:4)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) D(cid:4)(cid:11)(cid:24))(cid:20)(cid:4)(cid:1)
+<br/>!(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) !(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:1) (cid:13)(cid:4)(cid:24).(cid:15)(cid:20)(cid:7)5(cid:21)’(cid:16)(cid:6)(cid:11)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:6)(cid:12)-(cid:1)
+<br/>(cid:2),,/(cid:1) (cid:11)(cid:27)(cid:8)(cid:12)(cid:12)(cid:4)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:11)(cid:16)(cid:15).(cid:6)(cid:12)-(cid:1) 0/(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1) (cid:8)(cid:24)(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)
+<br/>(cid:8)-(cid:4)(cid:11)(cid:21)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:16)(cid:8)(cid:29)(cid:4)(cid:1)(cid:29)(cid:8)(cid:20)(cid:30)(cid:6)(cid:12)-(cid:1)(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)?(cid:1)(cid:8)(((cid:20)(cid:15)6(cid:6)(cid:17)(cid:8)(cid:24)(cid:4)(cid:5)(cid:30)(cid:1)>,,G1,,
+<br/>((cid:6)6(cid:4)(cid:5)(cid:11)(cid:21)(cid:1) ’(cid:16)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) .(cid:8)(cid:11)(cid:1) (cid:18)(cid:4)(cid:29)(cid:4)(cid:5)(cid:15)((cid:4)(cid:18)(cid:1) (cid:6)(cid:12)(cid:1) (cid:8)(cid:12)(cid:1) (cid:8)(cid:24)(cid:24)(cid:4)(cid:17)((cid:24)(cid:1) (cid:24)(cid:15)(cid:1) (cid:8)(cid:11)(cid:11)(cid:6)(cid:11)(cid:24)(cid:1)
+<br/>(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1) .(cid:16)(cid:15)(cid:1) (cid:6)(cid:12)(cid:29)(cid:4)(cid:11)(cid:24)(cid:6)-(cid:8)(cid:24)(cid:4)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:4)(cid:25)(cid:25)(cid:4)(cid:27)(cid:24)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:8)-(cid:6)(cid:12)-(cid:1) (cid:15)(cid:12)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:8)(((cid:4)(cid:8)(cid:20)(cid:8)(cid:12)(cid:27)(cid:4)(cid:1)<(cid:2)> =(cid:21)
+<br/>(cid:30) % (cid:22)(cid:9)(cid:9)(cid:14)(cid:6)(cid:7)(cid:19)(cid:2)(cid:6)(cid:23)(cid:6)(cid:22)(cid:9)(cid:20)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:26)(cid:12)(cid:1)(cid:15)(cid:20)(cid:18)(cid:4)(cid:20)(cid:1)(cid:24)(cid:15)(cid:1) (cid:10))(cid:6)(cid:5)(cid:18)(cid:14)(cid:1) (cid:24)(cid:20)(cid:8)(cid:6)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:20)(cid:4)(cid:5)(cid:6)(cid:8)(cid:10)(cid:5)(cid:30)(cid:1) (cid:24)(cid:4)(cid:11)(cid:24)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:27)(cid:15)(cid:12)(cid:24)(cid:20)(cid:15)(cid:5)(cid:5)(cid:4)(cid:18)(cid:1)(cid:29)(cid:8)(cid:20)(cid:6)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:25)(cid:8)(cid:27)(cid:24)(cid:15)(cid:20)(cid:11)(cid:1)(cid:11))(cid:27)(cid:16)(cid:1)
+<br/>(cid:8)(cid:11)(cid:1)(cid:8)-(cid:4)(cid:14)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:1)((cid:15)(cid:11)(cid:4)(cid:14)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)(cid:15)(cid:27)(cid:27)(cid:5))(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:16)(cid:8)(cid:6)(cid:20)(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:6)(cid:5)(cid:5))(cid:17)(cid:6)(cid:12)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:6)(cid:11)(cid:1) (cid:12)(cid:4)(cid:4)(cid:18)(cid:4)(cid:18)(cid:21)(cid:1) (cid:26)(cid:12)(cid:1) (cid:11)((cid:6)(cid:24)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:29)(cid:8)(cid:20)(cid:6)(cid:15))(cid:11)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:20)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1)
+<br/>(cid:12)(cid:15)(cid:24)(cid:1)(cid:8)(cid:12)(cid:1)(cid:8)(((cid:20)(cid:15)((cid:20)(cid:6)(cid:8)(cid:24)(cid:4)(cid:1)(cid:15)(cid:12)(cid:4)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:8)-(cid:4)(cid:1)(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:21)(cid:1)(cid:3)(cid:15)(cid:11)(cid:24)(cid:1)(cid:27))(cid:20)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:1)
+<br/>(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)(cid:18)(cid:15)(cid:12):(cid:24)(cid:1)(cid:16)(cid:8)(cid:29)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)((cid:4)(cid:15)((cid:5)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:8)-(cid:4)(cid:11)(cid:14)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:6)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:30)(cid:1)
+<br/>(cid:16)(cid:8)(cid:29)(cid:4)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:30)(cid:1) (cid:18)(cid:15)(cid:1) (cid:12)(cid:15)(cid:24)(cid:1) (cid:17)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:24)(cid:16)(cid:4)(cid:6)(cid:20)(cid:1) (cid:8)-(cid:4)(cid:11)(cid:21)(cid:1) 3DE(cid:13)#’(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)
+<br/>(cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1) (cid:11)(cid:27)(cid:8)(cid:12)(cid:12)(cid:4)(cid:18)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) ((cid:4)(cid:20)(cid:11)(cid:15)(cid:12)(cid:11)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:1) (cid:17)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:6)(cid:12)-(cid:1) (cid:24)(cid:16)(cid:4)(cid:6)(cid:20)(cid:1)
+<br/>(cid:8)-(cid:4)(cid:11)?(cid:1)(cid:10))(cid:24)(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:6)(cid:12)-(cid:1)(cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:14)(cid:1)(cid:10)(cid:8)(cid:27)(cid:7)-(cid:20)(cid:15))(cid:12)(cid:18)(cid:14)(cid:1)((cid:15)(cid:11)(cid:4)(cid:11)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:11)(cid:21)(cid:1)(cid:23)(cid:30)(cid:1)(cid:11)(cid:24))(cid:18)(cid:30)(cid:6)(cid:12)-(cid:1)(cid:15)(cid:24)(cid:16)(cid:4)(cid:20)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1)(cid:6)(cid:24)(cid:1) .(cid:8)(cid:11)(cid:1) (cid:27)(cid:15)(cid:12)(cid:27)(cid:5))(cid:18)(cid:4)(cid:18)(cid:1)(cid:24)(cid:15)(cid:1)
+<br/>((cid:20)(cid:15)(cid:29)(cid:6)(cid:18)(cid:4)(cid:1) (cid:8)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:1) (cid:27)(cid:15)(cid:12)(cid:18)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1) (cid:15)(cid:25)(cid:1) (cid:8)(cid:12)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)
+<br/>((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:21)(cid:1) (cid:9)-(cid:4)(cid:14)(cid:1) (cid:4)(cid:12)(cid:15))-(cid:16)(cid:1) (cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) .(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:25)(cid:20)(cid:15)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1)((cid:15)(cid:11)(cid:4)(cid:11)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:10)(cid:8)(cid:11)(cid:6)(cid:27)(cid:1)(cid:12)(cid:4)(cid:4)(cid:18)(cid:11)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:25)(cid:6)(cid:4)(cid:5)(cid:18)(cid:21)(cid:1)
+<br/>% (cid:10)(cid:9)(cid:13)(cid:8)(cid:2)(cid:5)&(cid:11)(cid:5)(cid:19)(cid:4)(cid:6) ’((cid:6)
+<br/>(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+<br/>(cid:1)(cid:2)(cid:3)(cid:4)(cid:5)(cid:3)(cid:4)(cid:6) (cid:7)(cid:3)(cid:8)(cid:9)(cid:6)
+<br/>’(cid:16)(cid:4)(cid:1) (cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1) 3(cid:8)(cid:27)(cid:4)(cid:1) (cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:14)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:25)(cid:6)(cid:20)(cid:11)(cid:24)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:6)(cid:12)(cid:1)
+<br/>(cid:17)(cid:6)(cid:18)(cid:18)(cid:5)(cid:4)E(cid:4)(cid:8)(cid:11)(cid:24)(cid:14)(cid:1)(cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:20)(cid:30)(cid:1)(cid:15)(cid:25)(cid:1)(cid:8)(cid:1)(cid:5)(cid:8)(cid:20)-(cid:4)(cid:1)(cid:12))(cid:17)(cid:10)(cid:4)(cid:20)(cid:1)(cid:15)(cid:25)(cid:1)
+<br/>(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11) (cid:10)(cid:4)(cid:24).(cid:4)(cid:4)(cid:12)(cid:1)/(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)01(cid:1)(cid:30)(cid:4)(cid:8)(cid:20)(cid:11)(cid:1)(cid:15)(cid:5)(cid:18)(cid:21)
+<br/>(cid:26)3(cid:19)(cid:23)(cid:1)(cid:6)(cid:11)(cid:1)(cid:8)(cid:1)(cid:5)(cid:8)(cid:20)-(cid:4)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:8)(cid:24)(cid:1)(cid:27)(cid:8)(cid:12)(cid:1)(cid:11))(((cid:15)(cid:20)(cid:24)(cid:1)(cid:11)(cid:24))(cid:18)(cid:6)(cid:4)(cid:11)(cid:1)(cid:15)(cid:25)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:8)-(cid:4)(cid:1)
+<br/>(cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:11)(cid:30)(cid:11)(cid:24)(cid:4)(cid:17)(cid:11)(cid:21)(cid:1) (cid:26)(cid:24)(cid:1) (cid:27)(cid:15)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:11)(cid:1) (cid:15)(cid:29)(cid:4)(cid:20)(cid:1) *(cid:14)+,,(cid:1) (cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)
+<br/>(cid:13)(cid:15)(cid:1)(cid:20)(cid:4)(cid:11)(cid:24)(cid:20)(cid:6)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:11)(cid:1)(cid:15)(cid:12)(cid:1).(cid:4)(cid:8)(cid:20)(cid:1)4(cid:27)(cid:5)(cid:15)(cid:24)(cid:16)(cid:4)(cid:11)(cid:14)(cid:1)-(cid:5)(cid:8)(cid:11)(cid:11)(cid:4)(cid:11)(cid:14)(cid:1)(cid:4)(cid:24)(cid:27)(cid:21)5(cid:14)(cid:1) (cid:17)(cid:8)(cid:7)(cid:4)E)((cid:14)(cid:1)(cid:16)(cid:8)(cid:6)(cid:20)(cid:1)
+<br/>(cid:11)(cid:24)(cid:30)(cid:5)(cid:4)(cid:14)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:16)(cid:8)(cid:6)(cid:20)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1) (cid:6)(cid:17)((cid:15)(cid:11)(cid:4)(cid:18)(cid:1) (cid:24)(cid:15)(cid:1) ((cid:8)(cid:20)(cid:24)(cid:6)(cid:27)(cid:6)((cid:8)(cid:12)(cid:24)(cid:11)(cid:21)(cid:1) D(cid:20)(cid:15))(cid:12)(cid:18)E(cid:24)(cid:20))(cid:24)(cid:16)(cid:1)
+<br/>(cid:6)(cid:12)(cid:25)(cid:15)(cid:20)(cid:17)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)(cid:6)(cid:12)(cid:27)(cid:5))(cid:18)(cid:6)(cid:12)-(cid:1)(cid:26)(cid:19)(cid:14)(cid:1)(cid:8)-(cid:4)(cid:14)(cid:1)(cid:7)(cid:6)(cid:12)(cid:18)(cid:1)(cid:15)(cid:25) ((cid:15)(cid:11)(cid:4)(cid:1)(cid:15)(cid:20)(cid:1)(cid:4)6((cid:20)(cid:4)(cid:11)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)
+<br/>(cid:6)(cid:25)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:1) (cid:16)(cid:8)(cid:11)(cid:1) -(cid:5)(cid:8)(cid:11)(cid:11)(cid:4)(cid:11)(cid:1) (cid:6)(cid:11)(cid:1) ((cid:20)(cid:15)(cid:29)(cid:6)(cid:18)(cid:4)(cid:18)(cid:21)(cid:1) #6((cid:4)(cid:20)(cid:6)(cid:17)(cid:4)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1) (cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1)
+<br/>.(cid:4)(cid:20)(cid:4)(cid:1)((cid:16)(cid:15)(cid:24)(cid:15)-(cid:20)(cid:8)((cid:16)(cid:4)(cid:18)(cid:1).(cid:6)(cid:24)(cid:16)(cid:1)(cid:8)(cid:1)(cid:25)(cid:6)(cid:12)(cid:4)E(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:18)(cid:6)-(cid:6)(cid:24)(cid:8)(cid:5)(cid:1)(cid:27)(cid:8)(cid:17)(cid:4)(cid:20)(cid:8)(cid:1)
+<br/>(cid:6)(cid:12)(cid:1)(cid:18)(cid:8)(cid:30)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:21)(cid:1)’(cid:16)(cid:4)(cid:1)(cid:11))(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1)(cid:11)(cid:4)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1)(cid:15)(cid:12)(cid:1)(cid:8)(cid:1)(cid:11)(cid:24)(cid:15)(cid:15)(cid:5)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:6)(cid:12)(cid:11)(cid:24)(cid:20))(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)
+<br/>(cid:24)(cid:15)(cid:1) (cid:17)(cid:8)(cid:6)(cid:12)(cid:24)(cid:8)(cid:6)(cid:12)(cid:1) (cid:8)(cid:1) (cid:27)(cid:15)(cid:12)(cid:11)(cid:24)(cid:8)(cid:12)(cid:24)(cid:1) (cid:16)(cid:4)(cid:8)(cid:18)(cid:1) ((cid:15)(cid:11)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) 4(cid:8)(cid:5)(cid:24)(cid:16)(cid:15))-(cid:16)(cid:1) (cid:11)(cid:5)(cid:6)-(cid:16)(cid:24)(cid:1)
+<br/>(cid:17)(cid:15)(cid:29)(cid:4)(cid:17)(cid:4)(cid:12)(cid:24)(cid:11)(cid:1).(cid:4)(cid:20)(cid:4)(cid:1))(cid:12)(cid:8)(cid:29)(cid:15)(cid:6)(cid:18)(cid:8)(cid:10)(cid:5)(cid:4)5(cid:21)
+<br/>’(cid:16)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)>0,G+>,(cid:1)((cid:6)6(cid:4)(cid:5)(cid:11)(cid:1)(cid:20)(cid:4)(cid:11)(cid:15)(cid:5))(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)/>(cid:1)(cid:10)(cid:6)(cid:24)(cid:1)(cid:18)(cid:4)((cid:24)(cid:16) (cid:14)(cid:1)
+<br/>(cid:8)(cid:10)(cid:15))(cid:24)(cid:1)>,(cid:1)(cid:31)(cid:10)(cid:30)(cid:24)(cid:4)(cid:11)(cid:1)(cid:11)(cid:6)(cid:22)(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)CHD(cid:1)(cid:25)(cid:15)(cid:20)(cid:17)(cid:8)(cid:24) (cid:21)(cid:1)
+<br/>#(cid:12)(cid:15))-(cid:16)(cid:1) (cid:5))(cid:17)(cid:6)(cid:12)(cid:15)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) .(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) ((cid:20)(cid:15)(cid:27)(cid:4)(cid:11)(cid:11)(cid:6)(cid:12)-(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+<br/>(cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:11)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:15))(cid:24)(cid:1) (cid:11)(cid:16)(cid:8)(cid:18)(cid:15).(cid:11)(cid:1) (cid:6)(cid:11)(cid:1) (cid:12)(cid:4)(cid:4)(cid:18)(cid:4)(cid:18)(cid:1) 4(cid:6)(cid:12)(cid:1) (cid:8)-(cid:4)(cid:1) (cid:27)(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)
+<br/>.(cid:20)(cid:6)(cid:12)(cid:7)(cid:5)(cid:4)(cid:1) (cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:8)(cid:12)(cid:8)(cid:5)(cid:30)(cid:11)(cid:6)(cid:11)(cid:1)
+<br/>(cid:24)(cid:16)(cid:4)(cid:1)
+<br/>(cid:18)(cid:6)(cid:11)(cid:24)(cid:6)(cid:12)-)(cid:6)(cid:11)(cid:16)(cid:6)(cid:12)-(cid:1)(cid:15)(cid:25)(cid:1)(cid:11)(cid:4)(cid:12)(cid:6)(cid:15)(cid:20)(cid:11)(cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)(cid:24)(cid:16)(cid:15)(cid:11)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:30)(cid:15))(cid:12)-(cid:4)(cid:20)(cid:1)(cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:4)(cid:11)(cid:1)
+<br/><(cid:2)=5(cid:21)(cid:1) ")(cid:10) (cid:4)(cid:27)(cid:24)(cid:11)(cid:1) .(cid:4)(cid:20)(cid:4)(cid:1) ((cid:16)(cid:15)(cid:24)(cid:15)-(cid:20)(cid:8)((cid:16)(cid:4)(cid:18)(cid:1) .(cid:6)(cid:24)(cid:16)(cid:15))(cid:24)(cid:1) (cid:8)(cid:12)(cid:30)(cid:1) ((cid:20)(cid:15) (cid:4)(cid:27)(cid:24)(cid:15)(cid:20)(cid:11)(cid:1) (cid:15)(cid:20)(cid:1)
+<br/>(cid:6)(cid:17)((cid:15)(cid:20)(cid:24)(cid:8)(cid:12)(cid:24)(cid:1)
+<br/>(cid:25)(cid:15)(cid:20)(cid:1)
+<br/>(cid:6)(cid:11)(cid:1)
+</td></tr><tr><td>06526c52a999fdb0a9fd76e84f9795a69480cecf</td><td></td></tr><tr><td>06fe63b34fcc8ff68b72b5835c4245d3f9b8a016</td><td>Mach Learn
+<br/>DOI 10.1007/s10994-013-5336-9
+<br/>Learning semantic representations of objects
+<br/>and their parts
+<br/>Received: 24 May 2012 / Accepted: 26 February 2013
+<br/>© The Author(s) 2013
+</td></tr><tr><td>06aab105d55c88bd2baa058dc51fa54580746424</td><td>Image Set based Collaborative Representation for
+<br/>Face Recognition
+</td></tr><tr><td>06262d14323f9e499b7c6e2a3dec76ad9877ba04</td><td>Real-Time Pose Estimation Piggybacked on Object Detection
+<br/>Brno, Czech Republic
+</td></tr><tr><td>062c41dad67bb68fefd9ff0c5c4d296e796004dc</td><td>Temporal Generative Adversarial Nets with Singular Value Clipping
+<br/>Preferred Networks inc., Japan
+</td></tr><tr><td>06400a24526dd9d131dfc1459fce5e5189b7baec</td><td>Event Recognition in Photo Collections with a Stopwatch HMM
+<br/>1Computer Vision Lab
+<br/>ETH Z¨urich, Switzerland
+<br/>2ESAT, PSI-VISICS
+<br/>K.U. Leuven, Belgium
+</td></tr><tr><td>0653dcdff992ad980cd5ea5bc557efb6e2a53ba1</td><td></td></tr><tr><td>063a3be18cc27ba825bdfb821772f9f59038c207</td><td>This is a repository copy of The development of spontaneous facial responses to others’
+<br/>emotions in infancy. An EMG study.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/125231/
+<br/>Version: Published Version
+<br/>Article:
+<br/>Kaiser, Jakob, Crespo-Llado, Maria Magdalena, Turati, Chiara et al. (1 more author)
+<br/>(2017) The development of spontaneous facial responses to others’ emotions in infancy.
+<br/>An EMG study. Scientific Reports. ISSN 2045-2322
+<br/>https://doi.org/10.1038/s41598-017-17556-y
+<br/>Reuse
+<br/>This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+<br/>allows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+<br/>authors for the original work. More information and the full terms of the licence here:
+<br/>https://creativecommons.org/licenses/
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td></tr><tr><td>06a9ed612c8da85cb0ebb17fbe87f5a137541603</td><td>Deep Learning of Player Trajectory Representations for Team
+<br/>Activity Analysis
+</td></tr><tr><td>06ad99f19cf9cb4a40741a789e4acbf4433c19ae</td><td>SenTion: A framework for Sensing Facial
+<br/>Expressions
+</td></tr><tr><td>6c304f3b9c3a711a0cca5c62ce221fb098dccff0</td><td>Attentive Semantic Video Generation using Captions
+<br/>IIT Hyderabad
+<br/>IIT Hyderabad
+</td></tr><tr><td>6c2b392b32b2fd0fe364b20c496fcf869eac0a98</td><td>DOI 10.1007/s00138-012-0423-7
+<br/>ORIGINAL PAPER
+<br/>Fully automatic face recognition framework based
+<br/>on local and global features
+<br/>Received: 30 May 2011 / Revised: 21 February 2012 / Accepted: 29 February 2012 / Published online: 22 March 2012
+<br/>© Springer-Verlag 2012
+</td></tr><tr><td>6cddc7e24c0581c50adef92d01bb3c73d8b80b41</td><td>Face Verification Using the LARK
+<br/>Representation
+</td></tr><tr><td>6c8c7065d1041146a3604cbe15c6207f486021ba</td><td>Attention Modeling for Face Recognition via Deep Learning
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 999077 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+<br/>Department of Computing, Hung Hom, Kowloon
+<br/>Hong Kong, 99907 CHINA
+</td></tr><tr><td>390f3d7cdf1ce127ecca65afa2e24c563e9db93b</td><td>Learning Deep Representation for Face
+<br/>Alignment with Auxiliary Attributes
+</td></tr><tr><td>3918b425bb9259ddff9eca33e5d47bde46bd40aa</td><td>Copyright
+<br/>by
+<br/>David Lieh-Chiang Chen
+<br/>2012
+</td></tr><tr><td>39ce143238ea1066edf0389d284208431b53b802</td><td></td></tr><tr><td>39ce2232452c0cd459e32a19c1abe2a2648d0c3f</td><td></td></tr><tr><td>3998c5aa6be58cce8cb65a64cb168864093a9a3e</td><td></td></tr><tr><td>397aeaea61ecdaa005b09198942381a7a11cd129</td><td></td></tr><tr><td>39b22bcbd452d5fea02a9ee63a56c16400af2b83</td><td></td></tr><tr><td>399a2c23bd2592ebe20aa35a8ea37d07c14199da</td><td></td></tr><tr><td>39c8b34c1b678235b60b648d0b11d241a34c8e32</td><td>Learning to Deblur Images with Exemplars
+</td></tr><tr><td>3986161c20c08fb4b9b791b57198b012519ea58b</td><td>International Journal of Soft Computing and Engineering (IJSCE)
+<br/>ISSN: 2231-2307, Volume-4 Issue-4, September 2014
+<br/>An Efficient Method for Face Recognition based on
+<br/>Fusion of Global and Local Feature Extraction
+</td></tr><tr><td>392425be1c9d9c2ee6da45de9df7bef0d278e85f</td><td></td></tr><tr><td>392c3cabe516c0108b478152902a9eee94f4c81e</td><td>Computer Science and Artificial Intelligence Laboratory
+<br/>Technical Report
+<br/>MIT-CSAIL-TR-2007-024
+<br/>April 23, 2007
+<br/>Tiny images
+<br/>m a s s a c h u s e t t s i n s t i t u t e o f t e c h n o l o g y, c a m b r i d g e , m a 0 213 9 u s a — w w w. c s a i l . m i t . e d u
+</td></tr><tr><td>3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1</td><td></td></tr><tr><td>3965d61c4f3b72044f43609c808f8760af8781a2</td><td></td></tr><tr><td>395bf182983e0917f33b9701e385290b64e22f9a</td><td></td></tr><tr><td>3933e323653ff27e68c3458d245b47e3e37f52fd</td><td>Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+<br/>Computational Biomedicine Lab
+<br/>4800 Calhoun Rd. Houston, TX, USA
+</td></tr><tr><td>39b452453bea9ce398613d8dd627984fd3a0d53c</td><td></td></tr><tr><td>3958db5769c927cfc2a9e4d1ee33ecfba86fe054</td><td>Describable Visual Attributes for
+<br/>Face Verification and Image Search
+</td></tr><tr><td>39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df</td><td></td></tr><tr><td>994f7c469219ccce59c89badf93c0661aae34264</td><td>1
+<br/>Model Based Face Recognition Across Facial
+<br/>Expressions
+<br/>
+<br/>screens, embedded into mobiles and installed into everyday
+<br/>living and working environments they become valuable tools
+<br/>for human system interaction. A particular important aspect of
+<br/>this interaction is detection and recognition of faces and
+<br/>interpretation of facial expressions. These capabilities are
+<br/>deeply rooted in the human visual system and a crucial
+<br/>building block for social interaction. Consequently, these
+<br/>capabilities are an important step towards the acceptance of
+<br/>many technical systems.
+<br/>trees as a classifier
+<br/>lies not only
+</td></tr><tr><td>9949ac42f39aeb7534b3478a21a31bc37fe2ffe3</td><td>Parametric Stereo for Multi-Pose Face Recognition and
+<br/>3D-Face Modeling
+<br/>PSI ESAT-KUL
+<br/>Leuven, Belgium
+</td></tr><tr><td>9958942a0b7832e0774708a832d8b7d1a5d287ae</td><td>The Sparse Matrix Transform for Covariance
+<br/>Estimation and Analysis of High Dimensional
+<br/>Signals
+</td></tr><tr><td>9931c6b050e723f5b2a189dd38c81322ac0511de</td><td></td></tr><tr><td>9993f1a7cfb5b0078f339b9a6bfa341da76a3168</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>A Simple, Fast and Highly-Accurate Algorithm to
+<br/>Recover 3D Shape from 2D Landmarks on a Single
+<br/>Image
+</td></tr><tr><td>99c20eb5433ed27e70881d026d1dbe378a12b342</td><td>ISCA Archive
+<br/>http://www.isca-speech.org/archive
+<br/>First Workshop on Speech, Language
+<br/>and Audio in Multimedia
+<br/>Marseille, France
+<br/>August 22-23, 2013
+<br/>Proceedings of the First Workshop on Speech, Language and Audio in Multimedia (SLAM), Marseille, France, August 22-23, 2013.
+<br/>78
+</td></tr><tr><td>9990e0b05f34b586ffccdc89de2f8b0e5d427067</td><td>International Journal of Modeling and Optimization, Vol. 3, No. 2, April 2013
+<br/>Auto-Optimized Multimodal Expression Recognition
+<br/>Framework Using 3D Kinect Data for ASD Therapeutic
+<br/>Aid
+<br/>
+<br/>regarding
+<br/>emotion
+<br/>and
+<br/>to
+<br/>recognize
+</td></tr><tr><td>99d7678039ad96ee29ab520ff114bb8021222a91</td><td>Political image analysis with deep neural
+<br/>networks
+<br/>November 28, 2017
+</td></tr><tr><td>529e2ce6fb362bfce02d6d9a9e5de635bde81191</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>> TIP-05732-2009<
+<br/>1
+<br/>Normalization of Face Illumination Based
+<br/>on Large- and Small- Scale Features
+</td></tr><tr><td>52887969107956d59e1218abb84a1f834a314578</td><td>1283
+<br/>Travel Recommendation by Mining People
+<br/>Attributes and Travel Group Types From
+<br/>Community-Contributed Photos
+</td></tr><tr><td>521482c2089c62a59996425603d8264832998403</td><td></td></tr><tr><td>521b625eebea73b5deb171a350e3709a4910eebf</td><td></td></tr><tr><td>527dda77a3864d88b35e017d542cb612f275a4ec</td><td></td></tr><tr><td>52f23e1a386c87b0dab8bfdf9694c781cd0a3984</td><td></td></tr><tr><td>529baf1a79cca813f8c9966ceaa9b3e42748c058</td><td>Triangle Wise Mapping Technique to Transform one Face Image into Another Face Image
+<br/>
+<br/>{tag} {/tag}
+<br/>
+<br/> International Journal of Computer Applications
+<br/>
+<br/> © 2014 by IJCA Journal
+<br/> Volume 87 - Number 6
+<br/>
+<br/> Year of Publication: 2014
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>Bhogeswar Borah
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/15209-3714
+<br/> {bibtex}pxc3893714.bib{/bibtex}
+</td></tr><tr><td>5239001571bc64de3e61be0be8985860f08d7e7e</td><td>SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, JUNE 2016
+<br/>Deep Appearance Models: A Deep Boltzmann
+<br/>Machine Approach for Face Modeling
+</td></tr><tr><td>550858b7f5efaca2ebed8f3969cb89017bdb739f</td><td></td></tr><tr><td>554b9478fd285f2317214396e0ccd81309963efd</td><td>Spatio-Temporal Action Localization For Human Action
+<br/>Recognition in Large Dataset
+<br/>1L2TI, Institut Galil´ee, Universit´e Paris 13, France;
+<br/>2SERCOM, Ecole Polytechnique de Tunisie
+</td></tr><tr><td>55c68c1237166679d2cb65f266f496d1ecd4bec6</td><td>Learning to Score Figure Skating Sport Videos
+</td></tr><tr><td>5502dfe47ac26e60e0fb25fc0f810cae6f5173c0</td><td>Affordance Prediction via Learned Object Attributes
+</td></tr><tr><td>55a158f4e7c38fe281d06ae45eb456e05516af50</td><td>The 22nd International Conference on Computer Graphics and Vision
+<br/>108
+<br/>GraphiCon’2012
+</td></tr><tr><td>5506a1a1e1255353fde05d9188cb2adc20553af5</td><td></td></tr><tr><td>55c81f15c89dc8f6eedab124ba4ccab18cf38327</td><td></td></tr><tr><td>551fa37e8d6d03b89d195a5c00c74cc52ff1c67a</td><td>GeThR-Net: A Generalized Temporally Hybrid
+<br/>Recurrent Neural Network for Multimodal
+<br/>Information Fusion
+<br/>1 Xerox Research Centre India; 2 Amazon Development Center India
+</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation
+<br/>{tag} {/tag}
+<br/> International Journal of Computer Applications
+<br/>
+<br/> Foundation of Computer Science (FCS), NY, USA
+<br/>
+<br/>
+<br/>Volume 126
+<br/>-
+<br/>Number 5
+<br/>
+<br/>
+<br/> Year of Publication: 2015
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> Authors:
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/> 10.5120/ijca2015906055
+<br/> {bibtex}2015906055.bib{/bibtex}
+</td></tr><tr><td>973e3d9bc0879210c9fad145a902afca07370b86</td><td>(IJACSA) International Journal of Advanced Computer Science and Applications,
+<br/>Vol. 7, No. 7, 2016
+<br/>From Emotion Recognition to Website
+<br/>Customizations
+<br/>O.B. Efremides
+<br/>School of Web Media
+<br/>Bahrain Polytechnic
+<br/>Isa Town, Kingdom of Bahrain
+</td></tr><tr><td>97b8249914e6b4f8757d22da51e8347995a40637</td><td>28
+<br/>Large-Scale Vehicle Detection, Indexing,
+<br/>and Search in Urban Surveillance Videos
+</td></tr><tr><td>97032b13f1371c8a813802ade7558e816d25c73f</td><td>Total Recall Final Report
+<br/>Supervisor: Professor Duncan Gillies
+<br/>January 11, 2006
+</td></tr><tr><td>97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5</td><td>manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Deep Affect Prediction in-the-wild: Aff-Wild Database and Challenge,
+<br/>Deep Architectures, and Beyond
+<br/>Zafeiriou4
+</td></tr><tr><td>97d1d561362a8b6beb0fdbee28f3862fb48f1380</td><td>1955
+<br/>Age Synthesis and Estimation via Faces:
+<br/>A Survey
+</td></tr><tr><td>97540905e4a9fdf425989a794f024776f28a3fa9</td><td></td></tr><tr><td>9755554b13103df634f9b1ef50a147dd02eab02f</td><td>How Transferable are CNN-based Features for
+<br/>Age and Gender Classification?
+<br/> 1
+</td></tr><tr><td>635158d2da146e9de559d2742a2fa234e06b52db</td><td></td></tr><tr><td>63cf5fc2ee05eb9c6613043f585dba48c5561192</td><td>Prototype Selection for
+<br/>Classification in Standard
+<br/>and Generalized
+<br/>Dissimilarity Spaces
+</td></tr><tr><td>63d8d69e90e79806a062cb8654ad78327c8957bb</td><td></td></tr><tr><td>631483c15641c3652377f66c8380ff684f3e365c</td><td>Sync-DRAW: Automatic Video Generation using Deep Recurrent
+<br/>A(cid:130)entive Architectures
+<br/>Gaurav Mi(cid:138)al∗
+<br/>IIT Hyderabad
+<br/>Vineeth N Balasubramanian
+<br/>IIT Hyderabad
+</td></tr><tr><td>63eefc775bcd8ccad343433fc7a1dd8e1e5ee796</td><td></td></tr><tr><td>632fa986bed53862d83918c2b71ab953fd70d6cc</td><td>GÜNEL ET AL.: WHAT FACE AND BODY SHAPES CAN TELL ABOUT HEIGHT
+<br/>What Face and Body Shapes Can Tell
+<br/>About Height
+<br/>CVLab
+<br/>EPFL,
+<br/>Lausanne, Switzerland
+</td></tr><tr><td>63340c00896d76f4b728dbef85674d7ea8d5ab26</td><td>1732
+<br/>Discriminant Subspace Analysis:
+<br/>A Fukunaga-Koontz Approach
+</td></tr><tr><td>63d865c66faaba68018defee0daf201db8ca79ed</td><td>Deep Regression for Face Alignment
+<br/>1Dept. of Electronics and Information Engineering, Huazhong Univ. of Science and Technology, China
+<br/>2Microsoft Research, Beijing, China
+</td></tr><tr><td>634541661d976c4b82d590ef6d1f3457d2857b19</td><td>AAllmmaa MMaatteerr SSttuuddiioorruumm –– UUnniivveerrssiittàà ddii BBoollooggnnaa
+<br/>in cotutela con Università di Sassari
+<br/>DOTTORATO DI RICERCA IN
+<br/>INGEGNERIA ELETTRONICA, INFORMATICA E DELLE
+<br/>TELECOMUNICAZIONI
+<br/>Ciclo XXVI
+<br/>Settore Concorsuale di afferenza: 09/H1
+<br/>Settore Scientifico disciplinare: ING-INF/05
+<br/>ADVANCED TECHNIQUES FOR FACE RECOGNITION
+<br/>UNDER CHALLENGING ENVIRONMENTS
+<br/>TITOLO TESI
+<br/>Presentata da:
+<br/>Coordinatore Dottorato
+<br/>ALESSANDRO VANELLI-CORALLI
+<br/>
+<br/>Relatore
+<br/> DAVIDE MALTONI
+<br/>Relatore
+<br/> MASSIMO TISTARELLI
+<br/>Esame finale anno 2014
+</td></tr><tr><td>6332a99e1680db72ae1145d65fa0cccb37256828</td><td>MASTER IN COMPUTER VISION AND ARTIFICIAL INTELLIGENCE
+<br/>REPORT OF THE RESEARCH PROJECT
+<br/>OPTION: COMPUTER VISION
+<br/>Pose and Face Recovery via
+<br/>Spatio-temporal GrabCut Human
+<br/>Segmentation
+<br/>Date: 13/07/2010
+</td></tr><tr><td>63c022198cf9f084fe4a94aa6b240687f21d8b41</td><td>425
+</td></tr><tr><td>0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab</td><td>Multi-Directional Multi-Level Dual-Cross
+<br/>Patterns for Robust Face Recognition
+</td></tr><tr><td>0f112e49240f67a2bd5aaf46f74a924129f03912</td><td>947
+<br/>Age-Invariant Face Recognition
+</td></tr><tr><td>0f4cfcaca8d61b1f895aa8c508d34ad89456948e</td><td>LOCAL APPEARANCE BASED FACE RECOGNITION USING
+<br/>DISCRETE COSINE TRANSFORM (WedPmPO4)
+<br/>Author(s) :
+</td></tr><tr><td>0fad544edfc2cd2a127436a2126bab7ad31ec333</td><td>Decorrelating Semantic Visual Attributes by Resisting the Urge to Share
+<br/>UT Austin
+<br/>USC
+<br/>UT Austin
+</td></tr><tr><td>0f32df6ae76402b98b0823339bd115d33d3ec0a0</td><td>Emotion recognition from embedded bodily
+<br/>expressions and speech during dyadic interactions
+</td></tr><tr><td>0fd1715da386d454b3d6571cf6d06477479f54fc</td><td>J Intell Robot Syst (2016) 82:101–133
+<br/>DOI 10.1007/s10846-015-0259-2
+<br/>A Survey of Autonomous Human Affect Detection Methods
+<br/>for Social Robots Engaged in Natural HRI
+<br/>Received: 10 December 2014 / Accepted: 11 August 2015 / Published online: 23 August 2015
+<br/>© Springer Science+Business Media Dordrecht 2015
+</td></tr><tr><td>0f9bf5d8f9087fcba419379600b86ae9e9940013</td><td></td></tr><tr><td>0f92e9121e9c0addc35eedbbd25d0a1faf3ab529</td><td>MORPH-II: A Proposed Subsetting Scheme
+<br/>NSF-REU Site at UNC Wilmington, Summer 2017
+</td></tr><tr><td>0fd1bffb171699a968c700f206665b2f8837d953</td><td>Weakly Supervised Object Localization with
+<br/>Multi-fold Multiple Instance Learning
+</td></tr><tr><td>0a511058edae582e8327e8b9d469588c25152dc6</td><td></td></tr><tr><td>0a4f3a423a37588fde9a2db71f114b293fc09c50</td><td></td></tr><tr><td>0a3863a0915256082aee613ba6dab6ede962cdcd</td><td>Early and Reliable Event Detection Using Proximity Space Representation
+<br/>LTCI, CNRS, T´el´ecom ParisTech, Universit´e Paris-Saclay, 75013, Paris, France
+<br/>J´erˆome Gauthier
+<br/>LADIS, CEA, LIST, 91191, Gif-sur-Yvette, France
+<br/>Normandie Universit´e, UR, LITIS EA 4108, Avenue de l’universit´e, 76801, Saint-Etienne-du-Rouvray, France
+</td></tr><tr><td>0ad90118b4c91637ee165f53d557da7141c3fde0</td><td></td></tr><tr><td>0af48a45e723f99b712a8ce97d7826002fe4d5a5</td><td>2982
+<br/>Toward Wide-Angle Microvision Sensors
+<br/>Todd Zickler, Member, IEEE
+</td></tr><tr><td>0aa8a0203e5f406feb1815f9b3dd49907f5fd05b</td><td>Mixture subclass discriminant analysis
+</td></tr><tr><td>0a1138276c52c734b67b30de0bf3f76b0351f097</td><td>This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+<br/>The final version of record is available at
+<br/> http://dx.doi.org/10.1109/TIP.2016.2539502
+<br/>Discriminant Incoherent Component Analysis
+</td></tr><tr><td>0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a</td><td></td></tr><tr><td>0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Multi-task, multi-label and multi-domain learning with
+<br/>residual convolutional networks for emotion recognition
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>0acf23485ded5cb9cd249d1e4972119239227ddb</td><td>Dual coordinate solvers for large-scale structural SVMs
+<br/>UC Irvine
+<br/>This manuscript describes a method for training linear SVMs (including binary SVMs, SVM regression,
+<br/>and structural SVMs) from large, out-of-core training datasets. Current strategies for large-scale learning fall
+<br/>into one of two camps; batch algorithms which solve the learning problem given a finite datasets, and online
+<br/>algorithms which can process out-of-core datasets. The former typically requires datasets small enough to fit
+<br/>in memory. The latter is often phrased as a stochastic optimization problem [4, 15]; such algorithms enjoy
+<br/>strong theoretical properties but often require manual tuned annealing schedules, and may converge slowly
+<br/>for problems with large output spaces (e.g., structural SVMs). We discuss an algorithm for an “intermediate”
+<br/>regime in which the data is too large to fit in memory, but the active constraints (support vectors) are small
+<br/>enough to remain in memory.
+<br/>In this case, one can design rather efficient learning algorithms that are
+<br/>as stable as batch algorithms, but capable of processing out-of-core datasets. We have developed such a
+<br/>MATLAB-based solver and used it to train a series of recognition systems [19, 7, 21, 12] for articulated pose
+<br/>estimation, facial analysis, 3D object recognition, and action classification, all with publicly-available code.
+<br/>This writeup describes the solver in detail.
+<br/>Approach: Our approach is closely based on data-subsampling algorithms for collecting hard exam-
+<br/>ples [9, 10, 6], combined with the dual coordinate quadratic programming (QP) solver described in liblinear
+<br/>[8]. The latter appears to be current fastest method for learning linear SVMs. We make two extensions (1)
+<br/>We show how to generalize the solver to other types of SVM problems such as (latent) structural SVMs (2)
+<br/>We show how to modify it to behave as a partially-online algorithm, which only requires access to small
+<br/>amounts of data at a time.
+<br/>Overview: Sec. 1 describes a general formulation of an SVM problem that encompasses many standard
+<br/>tasks such as multi-class classification and (latent) structural prediction. Sec. 2 derives its dual QP, and Sec. 3
+<br/>describes a dual coordinate descent optimization algorithm. Sec. 4 describes modifications for optimizing
+<br/>in an online fashion, allowing one to learn near-optimal models with a single pass over large, out-of-core
+<br/>datasets. Sec. 5 briefly touches on some theoretical issues that are necessary to ensure convergence. Finally,
+<br/>Sec. 6 and Sec. 7 describe modifications to our basic formulation to accommodate non-negativity constraints
+<br/>and flexible regularization schemes during learning.
+<br/>1 Generalized SVMs
+<br/>We first describe a general formulation of a SVM which encompasses various common problems such as
+<br/>binary classification, regression, and structured prediction. Assume we are given training data where the ith
+<br/>example is described by a set of Ni vectors {xij} and a set of Ni scalars {lij}, where j varies from 1 to Ni.
+<br/>We wish to solve the following optimization problem:
+<br/>(0, lij − wT xij)
+<br/>max
+<br/>j∈Ni
+<br/>(1)
+<br/>(cid:88)
+<br/>argmin
+<br/>L(w) =
+<br/>||w||2 +
+</td></tr><tr><td>0ad4a814b30e096ad0e027e458981f812c835aa0</td><td></td></tr><tr><td>6448d23f317babb8d5a327f92e199aaa45f0efdc</td><td></td></tr><tr><td>6412d8bbcc01f595a2982d6141e4b93e7e982d0f</td><td>Deep Convolutional Neural Network using Triplets of Faces, Deep Ensemble, and
+<br/>Score-level Fusion for Face Recognition
+<br/>1Department of Creative IT Engineering, POSTECH, Korea
+<br/>2Department of Computer Science and Engineering, POSTECH, Korea
+</td></tr><tr><td>649eb674fc963ce25e4e8ce53ac7ee20500fb0e3</td><td></td></tr><tr><td>642c66df8d0085d97dc5179f735eed82abf110d0</td><td></td></tr><tr><td>641f34deb3bdd123c6b6e7b917519c3e56010cb7</td><td></td></tr><tr><td>645de797f936cb19c1b8dba3b862543645510544</td><td>Deep Temporal Linear Encoding Networks
+<br/>1ESAT-PSI, KU Leuven, 2CVL, ETH Z¨urich
+</td></tr><tr><td>6462ef39ca88f538405616239471a8ea17d76259</td><td></td></tr><tr><td>90ac0f32c0c29aa4545ed3d5070af17f195d015f</td><td></td></tr><tr><td>90cb074a19c5e7d92a1c0d328a1ade1295f4f311</td><td>MIT. Media Laboratory Affective Computing Technical Report #571
+<br/>Appears in IEEE International Workshop on Analysis and Modeling of Faces and Gestures , Oct 2003
+<br/>Fully Automatic Upper Facial Action Recognition
+<br/>MIT Media Laboratory
+<br/>Cambridge, MA 02139
+</td></tr><tr><td>90b11e095c807a23f517d94523a4da6ae6b12c76</td><td></td></tr><tr><td>9028fbbd1727215010a5e09bc5758492211dec19</td><td>Solving the Uncalibrated Photometric Stereo
+<br/>Problem using Total Variation
+<br/>1 IRIT, UMR CNRS 5505, Toulouse, France
+<br/>2 Dept. of Computer Science, Univ. of Copenhagen, Denmark
+</td></tr><tr><td>bf1e0279a13903e1d43f8562aaf41444afca4fdc</td><td> International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+<br/> Volume: 04 Issue: 10 | Oct -2017 www.irjet.net p-ISSN: 2395-0072
+<br/>Different Viewpoints of Recognizing Fleeting Facial Expressions with
+<br/>DWT
+<br/>information
+<br/>to get desired
+<br/>information
+<br/>Introduction
+<br/>---------------------------------------------------------------------***---------------------------------------------------------------------
+</td></tr><tr><td>bf5940d57f97ed20c50278a81e901ae4656f0f2c</td><td>Query-free Clothing Retrieval via Implicit
+<br/>Relevance Feedback
+</td></tr><tr><td>bfb98423941e51e3cd067cb085ebfa3087f3bfbe</td><td>Sparseness helps: Sparsity Augmented
+<br/>Collaborative Representation for Classification
+</td></tr><tr><td>d3b73e06d19da6b457924269bb208878160059da</td><td>Proceedings of the 5th International Conference on Computing and Informatics, ICOCI 2015
+<br/>11-13 August, 2015 Istanbul, Turkey. Universiti Utara Malaysia (http://www.uum.edu.my )
+<br/>Paper No.
+<br/>065
+<br/>IMPLEMENTATION OF AN AUTOMATED SMART HOME
+<br/>CONTROL FOR DETECTING HUMAN EMOTIONS VIA FACIAL
+<br/>DETECTION
+<br/>Osman4
+</td></tr><tr><td>d3d71a110f26872c69cf25df70043f7615edcf92</td><td>2736
+<br/>Learning Compact Feature Descriptor and Adaptive
+<br/>Matching Framework for Face Recognition
+<br/>improvements
+</td></tr><tr><td>d309e414f0d6e56e7ba45736d28ee58ae2bad478</td><td>Efficient Two-Stream Motion and Appearance 3D CNNs for
+<br/>Video Classification
+<br/>Ali Diba
+<br/>ESAT-KU Leuven
+<br/>Ali Pazandeh
+<br/>Sharif UTech
+<br/>Luc Van Gool
+<br/>ESAT-KU Leuven, ETH Zurich
+</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td></td></tr><tr><td>d33fcdaf2c0bd0100ec94b2c437dccdacec66476</td><td>Neurons with Paraboloid Decision Boundaries for
+<br/>Improved Neural Network Classification
+<br/>Performance
+</td></tr><tr><td>d444368421f456baf8c3cb089244e017f8d32c41</td><td>CNN for IMU Assisted Odometry Estimation using Velodyne LiDAR
+</td></tr><tr><td>d4c7d1a7a03adb2338704d2be7467495f2eb6c7b</td><td></td></tr><tr><td>d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d</td><td></td></tr><tr><td>d44a93027208816b9e871101693b05adab576d89</td><td></td></tr><tr><td>d4b88be6ce77164f5eea1ed2b16b985c0670463a</td><td>TECHNICAL REPORT JAN.15.2016
+<br/>A Survey of Different 3D Face Reconstruction
+<br/>Methods
+<br/>Department of Computer Science and Engineering
+</td></tr><tr><td>d44ca9e7690b88e813021e67b855d871cdb5022f</td><td>QUT Digital Repository:
+<br/>http://eprints.qut.edu.au/
+<br/>Zhang, Ligang and Tjondronegoro, Dian W. (2009) Selecting, optimizing and
+<br/>fusing ‘salient’ Gabor features for facial expression recognition. In: Neural
+<br/>Information Processing (Lecture Notes in Computer Science), 1-5 December
+<br/>2009, Hotel Windsor Suites Bangkok, Bangkok.
+<br/>
+<br/> © Copyright 2009 Springer-Verlag GmbH Berlin Heidelberg
+<br/>
+</td></tr><tr><td>bafb8812817db7445fe0e1362410a372578ec1fc</td><td>805
+<br/>Image-Quality-Based Adaptive Face Recognition
+</td></tr><tr><td>ba816806adad2030e1939450226c8647105e101c</td><td>MindLAB at the THUMOS Challenge
+<br/>Fabi´an P´aez
+<br/>Fabio A. Gonz´alez
+<br/>MindLAB Research Group
+<br/>MindLAB Research Group
+<br/>MindLAB Research Group
+<br/>Bogot´a, Colombia
+<br/>Bogot´a, Colombia
+<br/>Bogot´a, Colombia
+</td></tr><tr><td>badcd992266c6813063c153c41b87babc0ba36a3</td><td>Recent Advances in Object Detection in the Age
+<br/>of Deep Convolutional Neural Networks
+<br/>,1,2), Fr´ed´eric Jurie(1)
+<br/>(∗) equal contribution
+<br/>(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+<br/>(2)Safran Electronics and Defense
+<br/>September 11, 2018
+</td></tr><tr><td>ba788365d70fa6c907b71a01d846532ba3110e31</td><td></td></tr><tr><td>ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906</td><td>ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
+<br/>EXISTING SEPARATE ENGLISH EDITION
+<br/>Uporaba emotivno pogojenega raˇcunalniˇstva v
+<br/>priporoˇcilnih sistemih
+<br/>Marko Tkalˇciˇc, Andrej Koˇsir, Jurij Tasiˇc
+<br/>1Univerza v Ljubljani, Fakulteta za elektrotehniko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+<br/>2Univerza v Ljubljani, Fakulteta za raˇcunalniˇstvo in informatiko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+<br/>Povzetek. V ˇclanku predstavljamo rezultate treh raziskav, vezanih na izboljˇsanje delovanja multimedijskih
+<br/>priporoˇcilnih sistemov s pomoˇcjo metod emotivno pogojenega raˇcunalniˇstva (ang. affective computing).
+<br/>Vsebinski priporoˇcilni sistem smo izboljˇsali s pomoˇcjo metapodatkov, ki opisujejo emotivne odzive uporabnikov.
+<br/>Pri skupinskem priporoˇcilnem sistemu smo dosegli znaˇcilno izboljˇsanje v obmoˇcju hladnega zagona z uvedbo
+<br/>nove mere podobnosti, ki temelji na osebnostnem modelu velikih pet (ang. five factor model). Razvili smo tudi
+<br/>sistem za neinvazivno oznaˇcevanje vsebin z emotivnimi parametri, ki pa ˇse ni zrel za uporabo v priporoˇcilnih
+<br/>sistemih.
+<br/>Kljuˇcne besede: priporoˇcilni sistemi, emotivno pogojeno raˇcunalniˇstvo, strojno uˇcenje, uporabniˇski profil,
+<br/>emocije
+<br/>Uporaba emotivnega raˇcunalniˇstva v priporoˇcilnih
+<br/>sistemih
+<br/>In this paper we present the results of three investigations of
+<br/>our broad research on the usage of affect and personality in
+<br/>recommender systems. We improved the accuracy of content-
+<br/>based recommender system with the inclusion of affective
+<br/>parameters of user and item modeling. We improved the
+<br/>accuracy of a content filtering recommender system under the
+<br/>cold start conditions with the introduction of a personality
+<br/>based user similarity measure. Furthermore we developed a
+<br/>system for implicit tagging of content with affective metadata.
+<br/>1 UVOD
+<br/>Uporabniki (porabniki) multimedijskih (MM) vsebin so
+<br/>v ˇcedalje teˇzjem poloˇzaju, saj v veliki koliˇcini vse-
+<br/>bin teˇzko najdejo zanje primerne. Pomagajo si s pri-
+<br/>poroˇcilnimi sistemi, ki na podlagi osebnih preferenc
+<br/>uporabnikov izberejo manjˇso koliˇcino relevantnih MM
+<br/>vsebin, med katerimi uporabnik laˇze izbira. Noben danes
+<br/>znan priporoˇcilni sistem ne zadoˇsˇca v celoti potrebam
+<br/>uporabnikov, saj je izbor priporoˇcenih vsebin obiˇcajno
+<br/>nezadovoljive kakovosti [10]. Cilj tega ˇclanka je pred-
+<br/>staviti metode emotivno pogojenega raˇcunalniˇstva (ang.
+<br/>affective computing - glej [12]) za izboljˇsanje kakovosti
+<br/>priporoˇcilnih sistemov in utrditi za slovenski prostor
+<br/>novo terminologijo.
+<br/>1.1 Opis problema
+<br/>Za izboljˇsanje kakovosti priporoˇcilnih sistemov sta
+<br/>na voljo dve poti: (i) optimizacija algoritmov ali (ii)
+<br/>uporaba boljˇsih znaˇcilk, ki bolje razloˇzijo neznano
+<br/>Prejet 13. oktober, 2010
+<br/>Odobren 1. februar, 2011
+<br/>varianco [8]. V tem ˇclanku predstavljamo izboljˇsanje
+<br/>priporoˇcilnih sistemov z uporabo novih znaˇcilk, ki te-
+<br/>meljijo na emotivnih odzivih uporabnikov in na njiho-
+<br/>vih osebnostnih lastnostih. Te znaˇcilke razloˇzijo velik
+<br/>del uporabnikovih preferenc, ki se izraˇzajo v obliki
+<br/>ocen posameznih vsebin (npr. Likertova lestvica, binarne
+<br/>ocene itd.). Ocene vsebin se pri priporoˇcilnih sistemih
+<br/>zajemajo eksplicitno (ocena) ali implicitno, pri ˇcemer o
+<br/>oceni sklepamo na podlagi opazovanj (npr. ˇcas gledanja
+<br/>kot indikator vˇseˇcnosti [7].
+<br/>Izboljˇsanja uˇcinkovitosti priporoˇcilnih sistemov smo
+<br/>se lotili na treh podroˇcjih: (i) uporaba emotivnega
+<br/>modeliranja uporabnikov v vsebinskem priporoˇcilnem
+<br/>sistemu, (ii) neinvazivna (implicitna) detekcija emocij za
+<br/>emotivno modeliranje in (iii) uporaba osebnostne mere
+<br/>podobnosti v skupinskem priporoˇcilnem sistemu. Slika 1
+<br/>prikazuje arhitekturo emotivnega priporoˇcilnega sistema
+<br/>in mesta, kjer smo vnesli opisane izboljˇsave.
+<br/>Preostanek ˇclanka je strukturiran tako: v razdelku
+<br/>2 je predstavljen zajem podatkov. V razdelku 3 je
+<br/>predstavljen vsebinski priporoˇcilni sistem z emotivnimi
+<br/>metapodatki. V razdelku 4 je predstavljen skupinski
+<br/>priporoˇcilni sistem, ki uporablja mero podobnosti na
+<br/>podlagi osebnosti, v razdelku 5 pa algoritem za razpo-
+<br/>znavo emocij. Vsak od teh razdelov je sestavljen iz opisa
+<br/>eksperimenta in predstavitve rezultatov. V razdelku 6 so
+<br/>predstavljeni sklepi.
+<br/>1.2 Sorodno delo
+<br/>Najbolj groba delitev priporoˇcilnih sistemov je na vse-
+<br/>binske, skupinske ter hibridne sisteme [1]. Z izjemo vse-
+<br/>binskih priporoˇcilnih sistemov, ki sta ga razvila Arapakis
+<br/>[2] in Tkalˇciˇc [14], sorodnega dela na podroˇcju emotivno
+<br/>pogojenih priporoˇcilnih sistemov takorekoˇc ni. Panti´c in
+</td></tr><tr><td>ba29ba8ec180690fca702ad5d516c3e43a7f0bb8</td><td></td></tr><tr><td>bab88235a30e179a6804f506004468aa8c28ce4f</td><td></td></tr><tr><td>badd371a49d2c4126df95120902a34f4bee01b00</td><td>GONDA, WEI, PARAG, PFISTER: PARALLEL SEPARABLE 3D CONVOLUTION
+<br/>Parallel Separable 3D Convolution for Video
+<br/>and Volumetric Data Understanding
+<br/>Harvard John A. Paulson School of
+<br/>Engineering and Applied Sciences
+<br/>Camabridge MA, USA
+<br/>Toufiq Parag
+<br/>Hanspeter Pfister
+</td></tr><tr><td>a0f94e9400938cbd05c4b60b06d9ed58c3458303</td><td>1118
+<br/>Value-Directed Human Behavior Analysis
+<br/>from Video Using Partially Observable
+<br/>Markov Decision Processes
+</td></tr><tr><td>a022eff5470c3446aca683eae9c18319fd2406d5</td><td>2017-ENST-0071
+<br/>EDITE - ED 130
+<br/>Doctorat ParisTech
+<br/>T H È S E
+<br/>pour obtenir le grade de docteur délivré par
+<br/>TÉLÉCOM ParisTech
+<br/>Spécialité « SIGNAL et IMAGES »
+<br/>présentée et soutenue publiquement par
+<br/>le 15 décembre 2017
+<br/>Apprentissage Profond pour la Description Sémantique des Traits
+<br/>Visuels Humains
+<br/>Directeur de thèse : Jean-Luc DUGELAY
+<br/>Co-encadrement de la thèse : Moez BACCOUCHE
+<br/>Jury
+<br/>Mme Bernadette DORIZZI, PRU, Télécom SudParis
+<br/>Mme Jenny BENOIS-PINEAU, PRU, Université de Bordeaux
+<br/>M. Christian WOLF, MC/HDR, INSA de Lyon
+<br/>M. Patrick PEREZ, Chercheur/HDR, Technicolor Rennes
+<br/>M. Moez BACCOUCHE, Chercheur/Docteur, Orange Labs Rennes
+<br/>M. Jean-Luc DUGELAY, PRU, Eurecom Sophia Antipolis
+<br/>M. Sid-Ahmed BERRANI, Directeur de l’Innovation/HDR, Algérie Télécom
+<br/>Présidente
+<br/>Rapporteur
+<br/>Rapporteur
+<br/>Examinateur
+<br/>Encadrant
+<br/>Directeur de Thèse
+<br/>Invité
+<br/>TÉLÉCOM ParisTech
+<br/>école de l’Institut Télécom - membre de ParisTech
+<br/>N°: 2009 ENAM XXXX T H È S E </td></tr><tr><td>a0c37f07710184597befaa7e6cf2f0893ff440e9</td><td></td></tr><tr><td>a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670</td><td></td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>Learning Deep Representation for Face
+<br/>Alignment with Auxiliary Attributes
+</td></tr><tr><td>a0dfb8aae58bd757b801e2dcb717a094013bc178</td><td>Reconocimiento de expresiones faciales con base
+<br/>en la din´amica de puntos de referencia faciales
+<br/>Instituto Nacional de Astrof´ısica ´Optica y Electr´onica,
+<br/>Divisi´on de Ciencias Computacionales, Tonantzintla, Puebla,
+<br/>M´exico
+<br/>Resumen. Las expresiones faciales permiten a las personas comunicar
+<br/>emociones, y es pr´acticamente lo primero que observamos al interactuar
+<br/>con alguien. En el ´area de computaci´on, el reconocimiento de expresiones
+<br/>faciales es importante debido a que su an´alisis tiene aplicaci´on directa en
+<br/>´areas como psicolog´ıa, medicina, educaci´on, entre otras. En este articulo
+<br/>se presenta el proceso de dise˜no de un sistema para el reconocimiento de
+<br/>expresiones faciales utilizando la din´amica de puntos de referencia ubi-
+<br/>cados en el rostro, su implementaci´on, experimentos realizados y algunos
+<br/>de los resultados obtenidos hasta el momento.
+<br/>Palabras clave: Expresiones faciales, clasificaci´on, m´aquinas de soporte
+<br/>vectorial,modelos activos de apariencia.
+<br/>Facial Expressions Recognition Based on Facial
+<br/>Landmarks Dynamics
+</td></tr><tr><td>a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60</td><td>Simultaneous Learning and Alignment:
+<br/>Multi-Instance and Multi-Pose Learning?
+<br/>1 Comp. Science & Eng.
+<br/>Univ. of CA, San Diego
+<br/>2 Electrical Engineering
+<br/>California Inst. of Tech.
+<br/>3 Lab of Neuro Imaging
+<br/>Univ. of CA, Los Angeles
+</td></tr><tr><td>a000149e83b09d17e18ed9184155be140ae1266e</td><td>Chapter 9
+<br/>Action Recognition in Realistic
+<br/>Sports Videos
+</td></tr><tr><td>a784a0d1cea26f18626682ab108ce2c9221d1e53</td><td>Anchored Regression Networks applied to Age Estimation and Super Resolution
+<br/>D-ITET, ETH Zurich
+<br/>Switzerland
+<br/>D-ITET, ETH Zurich
+<br/>Merantix GmbH
+<br/>D-ITET, ETH Zurich
+<br/>ESAT, KU Leuven
+</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td></tr><tr><td>a7664247a37a89c74d0e1a1606a99119cffc41d4</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>3287
+</td></tr><tr><td>a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9</td><td>11th International Symposium of Robotics Research (ISRR2003), pp.192-201, 2003
+<br/>Face Recognition Using Multi-viewpoint Patterns for
+<br/>Robot Vision
+<br/>Corporate Research and Development Center, TOSHIBA Corporation
+<br/>1, KomukaiToshiba-cho, Saiwai-ku, Kawasaki 212-8582 Japan
+</td></tr><tr><td>a75ee7f4c4130ef36d21582d5758f953dba03a01</td><td>DD2427 Final Project Report
+<br/>DD2427 Final Project Report
+<br/>Human face attributes prediction with Deep
+<br/>Learning
+</td></tr><tr><td>a775da3e6e6ea64bffab7f9baf665528644c7ed3</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 142 – No.9, May 2016
+<br/>Human Face Pose Estimation based on Feature
+<br/>Extraction Points
+<br/>Research scholar,
+<br/> Department of ECE
+<br/>SBSSTC, Moga Road,
+<br/> Ferozepur, Punjab, India
+</td></tr><tr><td>b8dba0504d6b4b557d51a6cf4de5507141db60cf</td><td>Comparing Performances of Big Data Stream
+<br/>Processing Platforms with RAM3S
+</td></tr><tr><td>b8378ab83bc165bc0e3692f2ce593dcc713df34a</td><td></td></tr><tr><td>b8f3f6d8f188f65ca8ea2725b248397c7d1e662d</td><td>Selfie Detection by Synergy-Constriant Based
+<br/>Convolutional Neural Network
+<br/>Electrical and Electronics Engineering, NITK-Surathkal, India.
+</td></tr><tr><td>b81cae2927598253da37954fb36a2549c5405cdb</td><td>Experiments on Visual Information Extraction with the Faces of Wikipedia
+<br/>D´epartement de g´enie informatique et g´enie logiciel, Polytechnique Montr´eal
+<br/>2500, Chemin de Polytechnique, Universit´e de Montr´eal, Montr`eal, Qu´ebec, Canada
+</td></tr><tr><td>b8a829b30381106b806066d40dd372045d49178d</td><td>1872
+<br/>A Probabilistic Framework for Joint Pedestrian Head
+<br/>and Body Orientation Estimation
+</td></tr><tr><td>b1d89015f9b16515735d4140c84b0bacbbef19ac</td><td>Too Far to See? Not Really!
+<br/>— Pedestrian Detection with Scale-aware
+<br/>Localization Policy
+</td></tr><tr><td>b14b672e09b5b2d984295dfafb05604492bfaec5</td><td>LearningImageClassificationandRetrievalModelsThomasMensink </td></tr><tr><td>b171f9e4245b52ff96790cf4f8d23e822c260780</td><td></td></tr><tr><td>b1a3b19700b8738b4510eecf78a35ff38406df22</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2731763, IEEE
+<br/>Transactions on Affective Computing
+<br/>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Automatic Analysis of Facial Actions: A Survey
+<br/>and Maja Pantic, Fellow, IEEE
+</td></tr><tr><td>b1301c722886b6028d11e4c2084ee96466218be4</td><td></td></tr><tr><td>b1c5581f631dba78927aae4f86a839f43646220c</td><td></td></tr><tr><td>b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1</td><td>LOCAL DIRECTIONAL RELATION PATTERN
+<br/>Local Directional Relation Pattern for
+<br/>Unconstrained and Robust Face Retrieval
+</td></tr><tr><td>b19e83eda4a602abc5a8ef57467c5f47f493848d</td><td>JOURNAL OF LATEX CLASS FILES
+<br/>Heat Kernel Based Local Binary Pattern for
+<br/>Face Representation
+</td></tr><tr><td>dd8084b2878ca95d8f14bae73e1072922f0cc5da</td><td>Model Distillation with Knowledge Transfer from
+<br/>Face Classification to Alignment and Verification
+<br/>Beijing Orion Star Technology Co., Ltd. Beijing, China
+</td></tr><tr><td>dd0760bda44d4e222c0a54d41681f97b3270122b</td><td></td></tr><tr><td>ddea3c352f5041fb34433b635399711a90fde0e8</td><td>Facial Expression Classification using Visual Cues and Language
+<br/>Department of Computer Science and Engineering, IIT Kanpur
+</td></tr><tr><td>ddbd24a73ba3d74028596f393bb07a6b87a469c0</td><td>Multi-region two-stream R-CNN
+<br/>for action detection
+<br/>Inria(cid:63)
+</td></tr><tr><td>ddf099f0e0631da4a6396a17829160301796151c</td><td>IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY
+<br/>Learning Face Image Quality from
+<br/>Human Assessments
+</td></tr><tr><td>dd0a334b767e0065c730873a95312a89ef7d1c03</td><td>Eigenexpressions: Emotion Recognition using Multiple
+<br/>Eigenspaces
+<br/>Luis Marco-Gim´enez1, Miguel Arevalillo-Herr´aez1, and Cristina Cuhna-P´erez2
+<br/><b></b><br/>Burjassot. Valencia 46100, Spain,
+<br/>2 Universidad Cat´olica San Vicente M´artir de Valencia (UCV),
+<br/>Burjassot. Valencia. Spain
+</td></tr><tr><td>dd2f6a1ba3650075245a422319d86002e1e87808</td><td></td></tr><tr><td>dd8d53e67668067fd290eb500d7dfab5b6f730dd</td><td>69
+<br/>A Parameter-Free Framework for General
+<br/>Supervised Subspace Learning
+</td></tr><tr><td>ddbb6e0913ac127004be73e2d4097513a8f02d37</td><td>264
+<br/>IEEE TRANSACTIONS ON MULTIMEDIA, VOL. 1, NO. 3, SEPTEMBER 1999
+<br/>Face Detection Using Quantized Skin Color
+<br/>Regions Merging and Wavelet Packet Analysis
+</td></tr><tr><td>dd600e7d6e4443ebe87ab864d62e2f4316431293</td><td></td></tr><tr><td>dcb44fc19c1949b1eda9abe998935d567498467d</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1916
+</td></tr><tr><td>dc77287bb1fcf64358767dc5b5a8a79ed9abaa53</td><td>Fashion Conversation Data on Instagram
+<br/>∗Graduate School of Culture Technology, KAIST, South Korea
+<br/>†Department of Communication Studies, UCLA, USA
+</td></tr><tr><td>dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb</td><td></td></tr><tr><td>dc974c31201b6da32f48ef81ae5a9042512705fe</td><td>Am I done? Predicting Action Progress in Video
+<br/>1 Media Integration and Communication Center, Univ. of Florence, Italy
+<br/>2 Department of Mathematics “Tullio Levi-Civita”, Univ. of Padova, Italy
+</td></tr><tr><td>b6c047ab10dd86b1443b088029ffe05d79bbe257</td><td></td></tr><tr><td>b6c53891dff24caa1f2e690552a1a5921554f994</td><td></td></tr><tr><td>b613b30a7cbe76700855479a8d25164fa7b6b9f1</td><td>1
+<br/>Identifying User-Specific Facial Affects from
+<br/>Spontaneous Expressions with Minimal Annotation
+</td></tr><tr><td>b6f682648418422e992e3ef78a6965773550d36b</td><td>February 8, 2017
+</td></tr><tr><td>b656abc4d1e9c8dc699906b70d6fcd609fae8182</td><td></td></tr><tr><td>a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd</td><td>(IJARAI) International Journal of Advanced Research in Artificial Intelligence,
+<br/>Vol. 5, No.6, 2016
+<br/>A Model for Facial Emotion Inference Based on
+<br/>Planar Dynamic Emotional Surfaces
+<br/>Ruivo, J. P. P.
+<br/>Escola Polit´ecnica
+<br/>Negreiros, T.
+<br/>Escola Polit´ecnica
+<br/>Barretto, M. R. P.
+<br/>Escola Polit´ecnica
+<br/>Tinen, B.
+<br/>Escola Polit´ecnica
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>Universidade de S˜ao Paulo
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+<br/>S˜ao Paulo, Brazil
+</td></tr><tr><td>a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f</td><td></td></tr><tr><td>a98316980b126f90514f33214dde51813693fe0d</td><td>Collaborations on YouTube: From Unsupervised Detection to the
+<br/>Impact on Video and Channel Popularity
+<br/>Multimedia Communications Lab (KOM), Technische Universität Darmstadt, Germany
+</td></tr><tr><td>a93781e6db8c03668f277676d901905ef44ae49f</td><td>Recent Datasets on Object Manipulation: A Survey
+</td></tr><tr><td>a9adb6dcccab2d45828e11a6f152530ba8066de6</td><td>Aydınlanma Alt-uzaylarına dayalı Gürbüz Yüz Tanıma
+<br/>Illumination Subspaces based Robust Face Recognition
+<br/>Interactive Systems Labs, Universität Karlsruhe (TH)
+<br/>76131 Karlsruhe, Almanya
+<br/>web: http://isl.ira.uka.de/face_recognition
+<br/>Özetçe
+<br/>yönlerine
+<br/>aydınlanma
+<br/>kaynaklanan
+<br/>sonra, yüz uzayı
+<br/>Bu çalışmada aydınlanma alt-uzaylarına dayalı bir yüz tanıma
+<br/>sistemi sunulmuştur. Bu sistemde,
+<br/>ilk olarak, baskın
+<br/>aydınlanma yönleri, bir topaklandırma algoritması kullanılarak
+<br/>öğrenilmiştir. Topaklandırma algoritması sonucu önden, sağ
+<br/>ve sol yanlardan olmak üzere üç baskın aydınlanma yönü
+<br/>gözlemlenmiştir. Baskın
+<br/>karar
+<br/>-yüzün görünümündeki
+<br/>kılındıktan
+<br/>aydınlanmadan
+<br/>kişi
+<br/>kimliklerinden kaynaklanan değişimlerden ayırmak için- bu üç
+<br/>aydınlanma uzayına bölünmüştür. Daha sonra, ek aydınlanma
+<br/>yönü bilgisinden faydalanmak için aydınlanma alt-uzaylarına
+<br/>dayalı yüz
+<br/>tanıma algoritması kullanılmıştır. Önerilen
+<br/>yaklaşım, CMU PIE veritabanında, “illumination” ve
+<br/>“lighting” kümelerinde yer alan yüz
+<br/>imgeleri üzerinde
+<br/>sınanmıştır. Elde edilen deneysel sonuçlar, aydınlanma
+<br/>yönünden yararlanmanın ve aydınlanma alt-uzaylarına dayalı
+<br/>yüz tanıma algoritmasının yüz tanıma başarımını önemli
+<br/>ölçüde arttırdığını göstermiştir.
+<br/>değişimleri,
+<br/>farklı
+</td></tr><tr><td>a95dc0c4a9d882a903ce8c70e80399f38d2dcc89</td><td> TR-IIS-14-003
+<br/>Review and Implementation of
+<br/>High-Dimensional Local Binary
+<br/>Patterns and Its Application to
+<br/>Face Recognition
+<br/>July. 24, 2014 || Technical Report No. TR-IIS-14-003
+<br/>http://www.iis.sinica.edu.tw/page/library/TechReport/tr2014/tr14.html
+</td></tr><tr><td>a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6</td><td>Learning to Succeed while Teaching to Fail:
+<br/>Privacy in Closed Machine Learning Systems
+</td></tr><tr><td>a92b5234b8b73e06709dd48ec5f0ec357c1aabed</td><td></td></tr><tr><td>d50c6d22449cc9170ab868b42f8c72f8d31f9b6c</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1668
+</td></tr><tr><td>d522c162bd03e935b1417f2e564d1357e98826d2</td><td>He et al. EURASIP Journal on Advances in Signal Processing 2013, 2013:19
+<br/>http://asp.eurasipjournals.com/content/2013/1/19
+<br/>RESEARCH
+<br/>Open Access
+<br/>Weakly supervised object extraction with
+<br/>iterative contour prior for remote sensing
+<br/>images
+</td></tr><tr><td>d59f18fcb07648381aa5232842eabba1db52383e</td><td>International Conference on Systemics, Cybernetics and Informatics, February 12–15, 2004
+<br/>ROBUST FACIAL EXPRESSION RECOGNITION USING SPATIALLY
+<br/>LOCALIZED GEOMETRIC MODEL
+<br/>Department of Electrical Engineering
+<br/>Dept. of Computer Sc. and Engg.
+<br/>IIT Kanpur
+<br/> Kanpur 208016, India
+<br/>Kanpur 208016, India
+<br/> IIT Kanpur
+<br/>Dept. of Computer Sc. and Engg.
+<br/> IIT Kanpur
+<br/>Kanpur 208016, India
+<br/>While approaches based on 3D deformable facial model have
+<br/>achieved expression recognition rates of as high as 98% [2], they
+<br/>are computationally inefficient and require considerable apriori
+<br/>training based on 3D information, which is often unavailable.
+<br/>Recognition from 2D images remains a difficult yet important
+<br/>problem for areas such as
+<br/>image database querying and
+<br/>classification. The accuracy rates achieved for 2D images are
+<br/>around 90% [3,4,5,11]. In a recent review of expression
+<br/>recognition, Fasel [1] considers the problem along several
+<br/>dimensions: whether features such as lips or eyebrows are first
+<br/>identified in the face (local [4] vs holistic [11]), or whether the
+<br/>image model used is 2D or 3D. Methods proposed for expression
+<br/>recognition from 2D images include the Gabor-Wavelet [5] or
+<br/>Holistic Optical flow [11] approach.
+<br/>This paper describes a more robust system for facial expression
+<br/>recognition from image sequences using 2D appearance-based
+<br/>local approach for the extraction of intransient facial features, i.e.
+<br/>features such as eyebrows, lips, or mouth, which are always
+<br/>present in the image, but may be deformed [1] (in contrast,
+<br/>transient features are wrinkles or bulges that disappear at other
+<br/>times). The main advantages of such an approach is low
+<br/>computational requirements, ability to work with both colored and
+<br/>grayscale images and robustness in handling partial occlusions
+<br/>[3].
+<br/>Edge projection analysis which is used here for feature extraction
+<br/>(eyebrows and lips) is well known [6]. Unlike [6] which describes
+<br/>a template based matching as an essential starting point, we use
+<br/>contours analysis. Our system computes a feature vector based on
+<br/>geometrical model of the face and then classifies it into four
+<br/>expression classes using a feed-forward basis function net. The
+<br/>system detects open and closed state of the mouth as well. The
+<br/>algorithm presented here works on both color and grayscale image
+<br/>sequences. An important aspect of our work is the use of color
+<br/>information for robust and more accurate segmentation of lip
+<br/>region in case of color images. The novel lip-enhancement
+<br/>transform is based on Gaussian modeling of skin and lip color.
+<br/>To place the work in a larger context of face analysis and
+<br/>recognition, the overall task requires that the part of the image
+<br/>involving the face be detected and segmented. We assume that a
+<br/>near-frontal view of the face is available. Tests on a grayscale
+<br/>and two color face image databases ([8] and [9,10]) demonstrate a
+<br/>superior recognition rate for four facial expressions (smile,
+<br/>surprise, disgust and sad against neutral).
+<br/>image sequences
+</td></tr><tr><td>d588dd4f305cdea37add2e9bb3d769df98efe880</td><td>
+<br/>Audio-Visual Authentication System over the
+<br/>Internet Protocol
+<br/>abandoned.
+<br/>in
+<br/>illumination based
+<br/>is developed with the objective to
+</td></tr><tr><td>d5444f9475253bbcfef85c351ea9dab56793b9ea</td><td>IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS
+<br/>BoxCars: Improving Fine-Grained Recognition
+<br/>of Vehicles using 3D Bounding Boxes
+<br/>in Traffic Surveillance
+<br/>in contrast
+</td></tr><tr><td>d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e</td><td>World Journal of Computer Application and Technology 2(7): 133-138, 2014
+<br/>DOI: 10.13189/wjcat.2014.020701
+<br/>http://www.hrpub.org
+<br/>Optimized Structure for Facial Action Unit Relationship
+<br/>Using Bayesian Network
+<br/>Intelligent Biometric Group, School of Electrical and Electronic Engineering, Engineering Campus, Universiti Sains Malaysia, Pulau
+<br/>Pinang, Malaysia
+<br/>Copyright © 2014 Horizon Research Publishing All rights reserved.
+</td></tr><tr><td>d56fe69cbfd08525f20679ffc50707b738b88031</td><td>Training of multiple classifier systems utilizing
+<br/>partially labelled sequences
+<br/><b></b><br/>89069 Ulm - Germany
+</td></tr><tr><td>d50751da2997e7ebc89244c88a4d0d18405e8507</td><td></td></tr><tr><td>d511e903a882658c9f6f930d6dd183007f508eda</td><td></td></tr><tr><td>d59404354f84ad98fa809fd1295608bf3d658bdc</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Face Synthesis from Visual Attributes via Sketch using
+<br/>Conditional VAEs and GANs
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>d5e1173dcb2a51b483f86694889b015d55094634</td><td></td></tr><tr><td>d2eb1079552fb736e3ba5e494543e67620832c52</td><td>ANNUNZIATA, SAGONAS, CALÌ: DENSELY FUSED SPATIAL TRANSFORMER NETWORKS1
+<br/>DeSTNet: Densely Fused Spatial
+<br/>Transformer Networks1
+<br/>Onfido Research
+<br/>3 Finsbury Avenue
+<br/>London, UK
+</td></tr><tr><td>d24dafe10ec43ac8fb98715b0e0bd8e479985260</td><td>J Nonverbal Behav (2018) 42:81–99
+<br/>https://doi.org/10.1007/s10919-017-0266-z
+<br/>O R I G I N A L P A P E R
+<br/>Effects of Social Anxiety on Emotional Mimicry
+<br/>and Contagion: Feeling Negative, but Smiling Politely
+<br/>• Gerben A. van Kleef2
+<br/>• Agneta H. Fischer2
+<br/>Published online: 25 September 2017
+<br/>Ó The Author(s) 2017. This article is an open access publication
+</td></tr><tr><td>d278e020be85a1ccd90aa366b70c43884dd3f798</td><td>Learning From Less Data: Diversified Subset Selection and
+<br/>Active Learning in Image Classification Tasks
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>AITOE Labs
+<br/>Mumbai, Maharashtra, India
+<br/>Rishabh Iyer
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>AITOE Labs
+<br/>Seattle, Washington, USA
+<br/>Narsimha Raju
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>IIT Bombay
+<br/>Mumbai, Maharashtra, India
+<br/>May 30, 2018
+</td></tr><tr><td>aafb271684a52a0b23debb3a5793eb618940c5dd</td><td></td></tr><tr><td>aa52910c8f95e91e9fc96a1aefd406ffa66d797d</td><td>FACE RECOGNITION SYSTEM BASED
+<br/>ON 2DFLD AND PCA
+<br/>E&TC Department
+<br/>Sinhgad Academy of Engineering
+<br/>Pune, India
+<br/>Mr. Hulle Rohit Rajiv
+<br/>ME E&TC [Digital System]
+<br/>Sinhgad Academy of Engineering
+<br/>Pune, India
+</td></tr><tr><td>aadfcaf601630bdc2af11c00eb34220da59b7559</td><td>Multi-view Hybrid Embedding:
+<br/>A Divide-and-Conquer Approach
+</td></tr><tr><td>aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5</td><td>Biometrics in Ambient Intelligence
+</td></tr><tr><td>aa331fe378056b6d6031bb8fe6676e035ed60d6d</td><td></td></tr><tr><td>aae0e417bbfba701a1183d3d92cc7ad550ee59c3</td><td>844
+<br/>A Statistical Method for 2-D Facial Landmarking
+</td></tr><tr><td>aa577652ce4dad3ca3dde44f881972ae6e1acce7</td><td>Deep Attribute Networks
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+<br/>Department of EE, KAIST
+<br/>Daejeon, South Korea
+</td></tr><tr><td>aa94f214bb3e14842e4056fdef834a51aecef39c</td><td>Reconhecimento de padrões faciais: Um estudo
+<br/>Universidade Federal
+<br/>Rural do Semi-Árido
+<br/>Departamento de Ciências Naturais
+<br/>Mossoró, RN - 59625-900
+<br/>Resumo—O reconhecimento facial tem sido utilizado em di-
+<br/>versas áreas para identificação e autenticação de usuários. Um
+<br/>dos principais mercados está relacionado a segurança, porém há
+<br/>uma grande variedade de aplicações relacionadas ao uso pessoal,
+<br/>conveniência, aumento de produtividade, etc. O rosto humano
+<br/>possui um conjunto de padrões complexos e mutáveis. Para
+<br/>reconhecer esses padrões, são necessárias técnicas avançadas de
+<br/>reconhecimento de padrões capazes, não apenas de reconhecer,
+<br/>mas de se adaptar às mudanças constantes das faces das pessoas.
+<br/>Este documento apresenta um método de reconhecimento facial
+<br/>proposto a partir da análise comparativa de trabalhos encontra-
+<br/>dos na literatura.
+<br/>biométrica é o uso da biometria para reconhecimento, identi-
+<br/>ficação ou verificação, de um ou mais traços biométricos de
+<br/>um indivíduo com o objetivo de autenticar sua identidade. Os
+<br/>traços biométricos são os atributos analisados pelas técnicas
+<br/>de reconhecimento biométrico.
+<br/>A tarefa de reconhecimento facial é composta por três
+<br/>processos distintos: Registro, verificação e identificação bio-
+<br/>métrica. Os processos se diferenciam pela forma de determinar
+<br/>a identidade de um indivíduo. Na Figura 1 são descritos os
+<br/>processos de registro, verificação e identificação biométrica.
+<br/>I. INTRODUÇÃO
+<br/>Biometria é a ciência que estabelece a identidade de um
+<br/>indivíduo baseada em seus atributos físicos, químicos ou
+<br/>comportamentais [1]. Possui inúmeras aplicações em diver-
+<br/>sas áreas, se destacando mais na área de segurança, como
+<br/>por exemplo sistemas de gerenciamento de identidade, cuja
+<br/>funcionalidade é autenticar a identidade de um indivíduo no
+<br/>contexto de uma aplicação.
+<br/>O reconhecimento facial é uma técnica biométrica que
+<br/>consiste em identificar padrões em características faciais como
+<br/>formato da boca, do rosto, distância dos olhos, entre outros.
+<br/>Um humano é capaz de reconhecer uma pessoa familiar
+<br/>mesmo com muitos obstáculos com distância, sombras ou
+<br/>apenas a visão parcial do rosto. Uma máquina, no entanto,
+<br/>precisa realizar inúmeros processos para detectar e reconhecer
+<br/>um conjunto de padrões específicos para rotular uma face
+<br/>como conhecida ou desconhecida. Para isso, exitem métodos
+<br/>capazes de detectar, extrair e classificar as características
+<br/>faciais, fornecendo um reconhecimento automático de pessoas.
+<br/>II. RECONHECIMENTO FACIAL
+<br/>A tecnologia biométrica oferece vantagens em relação a
+<br/>outros métodos tradicionais de identificação como senhas,
+<br/>documentos e tokens. Entre elas estão o fato de que os
+<br/>traços biométricos não podem ser perdidos ou esquecidos, são
+<br/>difíceis de serem copiados, compartilhados ou distribuídos. Os
+<br/>métodos requerem que a pessoa autenticada esteja presente
+<br/>na hora e lugar da autenticação, evitando que pessoas má
+<br/>intencionadas tenham acesso sem autorização.
+<br/>A autenticação é o ato de estabelecer ou confirmar alguém,
+<br/>ou alguma coisa, como autêntico, isto é, que as alegações
+<br/>feitas por ou sobre a coisa é verdadeira [2]. Autenticação
+<br/>(a)
+<br/>(b)
+<br/>(c)
+<br/>Figura 1: Registro biométrico (a), identificação biométrica (b)
+<br/>e verificação biométrica (c)
+<br/>A Figura 1a descreve o processo de registro de dados
+</td></tr><tr><td>af8fe1b602452cf7fc9ecea0fd4508ed4149834e</td><td></td></tr><tr><td>af6e351d58dba0962d6eb1baf4c9a776eb73533f</td><td>How to Train Your Deep Neural Network with
+<br/>Dictionary Learning
+<br/>*IIIT Delhi
+<br/>Okhla Phase 3
+<br/>Delhi, 110020, India
+<br/>+IIIT Delhi
+<br/>Okhla Phase 3
+<br/>#IIIT Delhi
+<br/>Okhla Phase 3
+<br/>Delhi, 110020, India
+<br/>Delhi, 110020, India
+</td></tr><tr><td>af6cae71f24ea8f457e581bfe1240d5fa63faaf7</td><td></td></tr><tr><td>af54dd5da722e104740f9b6f261df9d4688a9712</td><td></td></tr><tr><td>afc7092987f0d05f5685e9332d83c4b27612f964</td><td>Person-Independent Facial Expression Detection using Constrained
+<br/>Local Models
+</td></tr><tr><td>b730908bc1f80b711c031f3ea459e4de09a3d324</td><td>2024
+<br/>Active Orientation Models for Face
+<br/>Alignment In-the-Wild
+</td></tr><tr><td>b7cf7bb574b2369f4d7ebc3866b461634147041a</td><td>Neural Comput & Applic (2012) 21:1575–1583
+<br/>DOI 10.1007/s00521-011-0728-x
+<br/>O R I G I N A L A R T I C L E
+<br/>From NLDA to LDA/GSVD: a modified NLDA algorithm
+<br/>Received: 2 August 2010 / Accepted: 3 August 2011 / Published online: 19 August 2011
+<br/>Ó Springer-Verlag London Limited 2011
+</td></tr><tr><td>b7eead8586ffe069edd190956bd338d82c69f880</td><td>A VIDEO DATABASE FOR FACIAL
+<br/>BEHAVIOR UNDERSTANDING
+<br/>D. Freire-Obreg´on and M. Castrill´on-Santana.
+<br/>SIANI, Universidad de Las Palmas de Gran Canaria, Spain
+</td></tr><tr><td>b75cee96293c11fe77ab733fc1147950abbe16f9</td><td></td></tr><tr><td>b7f05d0771da64192f73bdb2535925b0e238d233</td><td> MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+<br/>4-3
+<br/>Robust Active Shape Model using AdaBoosted Histogram Classifiers
+<br/>W ataru Ito
+<br/>Imaging Software Technology Center
+<br/>Imaging Software Technology Center
+<br/>FUJI PHOTO FILM CO., LTD.
+<br/>FUJI PHOTO FILM CO., LTD.
+</td></tr><tr><td>b755505bdd5af078e06427d34b6ac2530ba69b12</td><td>To appear in the International Joint Conf. Biometrics, Washington D.C., October, 2011
+<br/>NFRAD: Near-Infrared Face Recognition at a Distance
+<br/>aDept. of Brain and Cognitive Eng. Korea Univ., Seoul, Korea
+<br/>bDept. of Comp. Sci. & Eng. Michigan State Univ., E. Lansing, MI, USA 48824
+</td></tr><tr><td>b73fdae232270404f96754329a1a18768974d3f6</td><td></td></tr><tr><td>b76af8fcf9a3ebc421b075b689defb6dc4282670</td><td>Face Mask Extraction in Video Sequence
+</td></tr><tr><td>b747fcad32484dfbe29530a15776d0df5688a7db</td><td></td></tr><tr><td>b7f7a4df251ff26aca83d66d6b479f1dc6cd1085</td><td>Bouges et al. EURASIP Journal on Image and Video Processing 2013, 2013:55
+<br/>http://jivp.eurasipjournals.com/content/2013/1/55
+<br/>RESEARCH
+<br/>Open Access
+<br/>Handling missing weak classifiers in boosted
+<br/>cascade: application to multiview and
+<br/>occluded face detection
+</td></tr><tr><td>db227f72bb13a5acca549fab0dc76bce1fb3b948</td><td>International Refereed Journal of Engineering and Science (IRJES)
+<br/>ISSN (Online) 2319-183X, (Print) 2319-1821
+<br/>Volume 4, Issue 6 (June 2015), PP.169-169-174
+<br/>Characteristic Based Image Search using Re-Ranking method
+<br/>1Chitti Babu, 2Yasmeen Jaweed, 3G.Vijay Kumar
+<br/><b></b></td></tr><tr><td>dbaf89ca98dda2c99157c46abd136ace5bdc33b3</td><td>Nonlinear Cross-View Sample Enrichment for
+<br/>Action Recognition
+<br/>Institut Mines-T´el´ecom; T´el´ecom ParisTech; CNRS LTCI
+</td></tr><tr><td>dbab6ac1a9516c360cdbfd5f3239a351a64adde7</td><td></td></tr><tr><td>dbe255d3d2a5d960daaaba71cb0da292e0af36a7</td><td>Evolutionary Cost-sensitive Extreme Learning
+<br/>Machine
+<br/>1
+</td></tr><tr><td>dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8</td><td>Chapter 7
+<br/>Machine Learning Techniques
+<br/>for Face Analysis
+</td></tr><tr><td>dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57</td><td>Face Image Analysis With
+<br/>Convolutional Neural Networks
+<br/>Dissertation
+<br/>Zur Erlangung des Doktorgrades
+<br/>der Fakult¨at f¨ur Angewandte Wissenschaften
+<br/>an der Albert-Ludwigs-Universit¨at Freiburg im Breisgau
+<br/>von
+<br/>Stefan Duffner
+<br/>2007
+</td></tr><tr><td>a83fc450c124b7e640adc762e95e3bb6b423b310</td><td>Deep Face Feature for Face Alignment
+</td></tr><tr><td>a85e9e11db5665c89b057a124547377d3e1c27ef</td><td>Dynamics of Driver’s Gaze: Explorations in
+<br/>Behavior Modeling & Maneuver Prediction
+</td></tr><tr><td>a8117a4733cce9148c35fb6888962f665ae65b1e</td><td>IEEE TRANSACTIONS ON XXXX, VOL. XX, NO. XX, XX 201X
+<br/>A Good Practice Towards Top Performance of Face
+<br/>Recognition: Transferred Deep Feature Fusion
+</td></tr><tr><td>a8035ca71af8cc68b3e0ac9190a89fed50c92332</td><td>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>IIIT-CFW: A Benchmark Database of
+<br/>Cartoon Faces in the Wild
+<br/>1 IIIT Chittoor, Sri City, India
+<br/>2 CVIT, KCIS, IIIT Hyderabad, India
+</td></tr><tr><td>a88640045d13fc0207ac816b0bb532e42bcccf36</td><td>ARXIV VERSION
+<br/>Simultaneously Learning Neighborship and
+<br/>Projection Matrix for Supervised
+<br/>Dimensionality Reduction
+</td></tr><tr><td>a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8</td><td>This is a repository copy of Modelling of Orthogonal Craniofacial Profiles.
+<br/>White Rose Research Online URL for this paper:
+<br/>http://eprints.whiterose.ac.uk/131767/
+<br/>Version: Published Version
+<br/>Article:
+<br/>Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634 and Duncan, Christian
+<br/>(2017) Modelling of Orthogonal Craniofacial Profiles. Journal of Imaging. ISSN 2313-433X
+<br/>https://doi.org/10.3390/jimaging3040055
+<br/>Reuse
+<br/>This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+<br/>allows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+<br/>authors for the original work. More information and the full terms of the licence here:
+<br/>https://creativecommons.org/licenses/
+<br/>Takedown
+<br/>If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+<br/>https://eprints.whiterose.ac.uk/
+</td></tr><tr><td>a8e75978a5335fd3deb04572bb6ca43dbfad4738</td><td>Sparse Graphical Representation based Discriminant
+<br/>Analysis for Heterogeneous Face Recognition
+</td></tr><tr><td>ded968b97bd59465d5ccda4f1e441f24bac7ede5</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Large scale 3D Morphable Models
+<br/>Zafeiriou
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>de0eb358b890d92e8f67592c6e23f0e3b2ba3f66</td><td>ACCEPTED BY IEEE TRANS. PATTERN ANAL. AND MACH. INTELL.
+<br/>Inference-Based Similarity Search in
+<br/>Randomized Montgomery Domains for
+<br/>Privacy-Preserving Biometric Identification
+</td></tr><tr><td>dee406a7aaa0f4c9d64b7550e633d81bc66ff451</td><td>Content-Adaptive Sketch Portrait Generation by
+<br/>Decompositional Representation Learning
+</td></tr><tr><td>dedabf9afe2ae4a1ace1279150e5f1d495e565da</td><td>3294
+<br/>Robust Face Recognition With Structurally
+<br/>Incoherent Low-Rank Matrix Decomposition
+</td></tr><tr><td>de398bd8b7b57a3362c0c677ba8bf9f1d8ade583</td><td>Hierarchical Bayesian Theme Models for
+<br/>Multi-pose Facial Expression Recognition
+</td></tr><tr><td>ded41c9b027c8a7f4800e61b7cfb793edaeb2817</td><td></td></tr><tr><td>defa8774d3c6ad46d4db4959d8510b44751361d8</td><td>FEBEI - Face Expression Based Emoticon Identification
+<br/>CS - B657 Computer Vision
+<br/>Robert J Henderson - rojahend
+</td></tr><tr><td>b0c512fcfb7bd6c500429cbda963e28850f2e948</td><td></td></tr><tr><td>b09b693708f412823053508578df289b8403100a</td><td>WANG et al.: TWO-STREAM SR-CNNS FOR ACTION RECOGNITION IN VIDEOS
+<br/>Two-Stream SR-CNNs for Action
+<br/>Recognition in Videos
+<br/>1 Advanced Interactive Technologies Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+<br/>2 Computer Vision Lab
+<br/>ETH Zurich
+<br/>Zurich, Switzerland
+</td></tr><tr><td>b07582d1a59a9c6f029d0d8328414c7bef64dca0</td><td>Employing Fusion of Learned and Handcrafted
+<br/>Features for Unconstrained Ear Recognition
+<br/>Maur´ıcio Pamplona Segundo∗†
+<br/>October 24, 2017
+</td></tr><tr><td>b03d6e268cde7380e090ddaea889c75f64560891</td><td></td></tr><tr><td>b0c1615ebcad516b5a26d45be58068673e2ff217</td><td>How Image Degradations Affect Deep CNN-based Face
+<br/>Recognition?
+<br/>S¸amil Karahan1 Merve Kılınc¸ Yıldırım1 Kadir Kırtac¸1 Ferhat S¸ ¨ukr¨u Rende1
+<br/>G¨ultekin B¨ut¨un1Hazım Kemal Ekenel2
+</td></tr><tr><td>b0de0892d2092c8c70aa22500fed31aa7eb4dd3f</td><td>(will be inserted by the editor)
+<br/>A robust and efficient video representation for action recognition
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>a66d89357ada66d98d242c124e1e8d96ac9b37a0</td><td>Failure Detection for Facial Landmark Detectors
+<br/>Computer Vision Lab, D-ITET, ETH Zurich, Switzerland
+</td></tr><tr><td>a608c5f8fd42af6e9bd332ab516c8c2af7063c61</td><td>2408
+<br/>Age Estimation via Grouping and Decision Fusion
+</td></tr><tr><td>a6eb6ad9142130406fb4ffd4d60e8348c2442c29</td><td>Video Description: A Survey of Methods,
+<br/>Datasets and Evaluation Metrics
+</td></tr><tr><td>a6583c8daa7927eedb3e892a60fc88bdfe89a486</td><td></td></tr><tr><td>a6590c49e44aa4975b2b0152ee21ac8af3097d80</td><td>https://doi.org/10.1007/s11263-018-1074-6
+<br/>3D Interpreter Networks for Viewer-Centered Wireframe Modeling
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>a694180a683f7f4361042c61648aa97d222602db</td><td>Face Recognition using Scattering Wavelet under Illicit Drug Abuse Variations
+<br/>IIIT-Delhi India
+</td></tr><tr><td>a6db73f10084ce6a4186363ea9d7475a9a658a11</td><td></td></tr><tr><td>a6634ff2f9c480e94ed8c01d64c9eb70e0d98487</td><td></td></tr><tr><td>b9d0774b0321a5cfc75471b62c8c5ef6c15527f5</td><td>Fishy Faces: Crafting Adversarial Images to Poison Face Authentication
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+<br/>imec-DistriNet, KU Leuven
+</td></tr><tr><td>b908edadad58c604a1e4b431f69ac8ded350589a</td><td>Deep Face Feature for Face Alignment
+</td></tr><tr><td>b9f2a755940353549e55690437eb7e13ea226bbf</td><td>Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions
+</td></tr><tr><td>b9cedd1960d5c025be55ade0a0aa81b75a6efa61</td><td>INEXACT KRYLOV SUBSPACE ALGORITHMS FOR LARGE
+<br/>MATRIX EXPONENTIAL EIGENPROBLEM FROM
+<br/>DIMENSIONALITY REDUCTION
+</td></tr><tr><td>b971266b29fcecf1d5efe1c4dcdc2355cb188ab0</td><td>MAI et al.: ON THE RECONSTRUCTION OF FACE IMAGES FROM DEEP FACE TEMPLATES
+<br/>On the Reconstruction of Face Images from
+<br/>Deep Face Templates
+</td></tr><tr><td>a158c1e2993ac90a90326881dd5cb0996c20d4f3</td><td>OPEN ACCESS
+<br/>ISSN 2073-8994
+<br/>Article
+<br/>1 DMA, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Italy
+<br/>2 CITC, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Itlay
+<br/>3 Istituto Nazionale di Ricerche Demopolis, via Col. Romey 7, 91100 Trapani, Italy
+<br/>† Deceased on 15 March 2009.
+<br/>Received: 4 March 2010; in revised form: 23 March 2010 / Accepted: 29 March 2010 /
+<br/>Published: 1 April 2010
+</td></tr><tr><td>a15d9d2ed035f21e13b688a78412cb7b5a04c469</td><td>Object Detection Using
+<br/>Strongly-Supervised Deformable Part Models
+<br/>1Computer Vision and Active Perception Laboratory (CVAP), KTH, Sweden
+<br/>2INRIA, WILLOW, Laboratoire d’Informatique de l’Ecole Normale Superieure
+</td></tr><tr><td>a1b1442198f29072e907ed8cb02a064493737158</td><td>456
+<br/>Crowdsourcing Facial Responses
+<br/>to Online Videos
+</td></tr><tr><td>a15c728d008801f5ffc7898568097bbeac8270a4</td><td>Concise Preservation by Combining Managed Forgetting
+<br/>and Contextualized Remembering
+<br/>Grant Agreement No. 600826
+<br/>Deliverable D4.4
+<br/>Work-package
+<br/>Deliverable
+<br/>Deliverable Leader
+<br/>Quality Assessor
+<br/>Dissemination level
+<br/>Delivery date in Annex I
+<br/>Actual delivery date
+<br/>Revisions
+<br/>Status
+<br/>Keywords
+<br/>Information Consolidation and Con-
+<br/>WP4:
+<br/>centration
+<br/>D4.4:
+<br/>Information analysis, consolidation
+<br/>and concentration techniques, and evalua-
+<br/>tion - Final release.
+<br/>Vasileios Mezaris (CERTH)
+<br/>Walter Allasia (EURIX)
+<br/>PU
+<br/>31-01-2016 (M36)
+<br/>31-01-2016
+<br/>Final
+<br/>multidocument summarization, semantic en-
+<br/>richment,
+<br/>feature extraction, concept de-
+<br/>tection, event detection, image/video qual-
+<br/>ity, image/video aesthetic quality, face de-
+<br/>tection/clustering,
+<br/>im-
+<br/>age/video summarization, image/video near
+<br/>duplicate detection, data deduplication, con-
+<br/>densation, consolidation
+<br/>image clustering,
+</td></tr><tr><td>a1132e2638a8abd08bdf7fc4884804dd6654fa63</td><td>6
+<br/>Real-Time Video Face Recognition
+<br/>for Embedded Devices
+<br/>Tessera, Galway,
+<br/>Ireland
+<br/>1. Introduction
+<br/>This chapter will address the challenges of real-time video face recognition systems
+<br/>implemented in embedded devices. Topics to be covered include: the importance and
+<br/>challenges of video face recognition in real life scenarios, describing a general architecture of
+<br/>a generic video face recognition system and a working solution suitable for recognizing
+<br/>faces in real-time using low complexity devices. Each component of the system will be
+<br/>described together with the system’s performance on a database of video samples that
+<br/>resembles real life conditions.
+<br/>2. Video face recognition
+<br/>Face recognition remains a very active topic in computer vision and receives attention from
+<br/>a large community of researchers in that discipline. Many reasons feed this interest; the
+<br/>main being the wide range of commercial, law enforcement and security applications that
+<br/>require authentication. The progress made in recent years on the methods and algorithms
+<br/>for data processing as well as the availability of new technologies makes it easier to study
+<br/>these algorithms and turn them into commercially viable product. Biometric based security
+<br/>systems are becoming more popular due to their non-invasive nature and their increasing
+<br/>reliability. Surveillance applications based on face recognition are gaining increasing
+<br/>attention after the United States’ 9/11 events and with the ongoing security threats. The
+<br/>Face Recognition Vendor Test (FRVT) (Phillips et al., 2003) includes video face recognition
+<br/>testing starting with the 2002 series of tests.
+<br/>Recently, face recognition technology was deployed in consumer applications such as
+<br/>organizing a collection of images using the faces present in the images (Picassa; Corcoran &
+<br/>Costache, 2005), prioritizing family members for best capturing conditions when taking
+<br/>pictures, or directly annotating the images as they are captured (Costache et al., 2006).
+<br/>Video face recognition, compared with more traditional still face recognition, has the main
+<br/>advantage of using multiple instances of the same individual in sequential frames for
+<br/>recognition to occur. In still recognition case, the system has only one input image to make
+<br/>the decision if the person is or is not in the database. If the image is not suitable for
+<br/>recognition (due to face orientation, expression, quality or facial occlusions) the recognition
+<br/>result will most likely be incorrect. In the video image there are multiple frames which can
+<br/>www.intechopen.com
+</td></tr><tr><td>a14ae81609d09fed217aa12a4df9466553db4859</td><td>REVISED VERSION, JUNE 2011
+<br/>Face Identification Using Large Feature Sets
+</td></tr><tr><td>a1e97c4043d5cc9896dc60ae7ca135782d89e5fc</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Re-identification of Humans in Crowds using
+<br/>Personal, Social and Environmental Constraints
+</td></tr><tr><td>efd308393b573e5410455960fe551160e1525f49</td><td>Tracking Persons-of-Interest via
+<br/>Unsupervised Representation Adaptation
+</td></tr><tr><td>ef4ecb76413a05c96eac4c743d2c2a3886f2ae07</td><td>Modeling the Importance of Faces in Natural Images
+<br/>Jin B.a, Yildirim G.a, Lau C.a, Shaji A.a, Ortiz Segovia M.b and S¨usstrunk S.a
+<br/>aEPFL, Lausanne, Switzerland;
+<br/>bOc´e, Paris, France
+</td></tr><tr><td>ef032afa4bdb18b328ffcc60e2dc5229cc1939bc</td><td>Fang and Yuan EURASIP Journal on Image and Video
+<br/>Processing (2018) 2018:44
+<br/>https://doi.org/10.1186/s13640-018-0282-x
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>Attribute-enhanced metric learning for
+<br/>face retrieval
+</td></tr><tr><td>ef5531711a69ed687637c48930261769465457f0</td><td>Studio2Shop: from studio photo shoots to fashion articles
+<br/>Zalando Research, Muehlenstr. 25, 10243 Berlin, Germany
+<br/>Keywords:
+<br/>computer vision, deep learning, fashion, item recognition, street-to-shop
+</td></tr><tr><td>efa08283656714911acff2d5022f26904e451113</td><td>Active Object Localization in Visual Situations
+</td></tr><tr><td>ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d</td><td>Calhoun: The NPS Institutional Archive
+<br/>DSpace Repository
+<br/>Theses and Dissertations
+<br/>1. Thesis and Dissertation Collection, all items
+<br/>2017-12
+<br/>Improving face verification in photo albums by
+<br/>combining facial recognition and metadata
+<br/>with cross-matching
+<br/>Monterey, California: Naval Postgraduate School
+<br/>http://hdl.handle.net/10945/56868
+<br/>Downloaded from NPS Archive: Calhoun
+</td></tr><tr><td>c3beae515f38daf4bd8053a7d72f6d2ed3b05d88</td><td></td></tr><tr><td>c3dc4f414f5233df96a9661609557e341b71670d</td><td>Tao et al. EURASIP Journal on Advances in Signal Processing 2011, 2011:4
+<br/>http://asp.eurasipjournals.com/content/2011/1/4
+<br/>RESEARCH
+<br/>Utterance independent bimodal emotion
+<br/>recognition in spontaneous communication
+<br/>Open Access
+</td></tr><tr><td>c398684270543e97e3194674d9cce20acaef3db3</td><td>Chapter 2
+<br/>Comparative Face Soft Biometrics for
+<br/>Human Identification
+</td></tr><tr><td>c3285a1d6ec6972156fea9e6dc9a8d88cd001617</td><td></td></tr><tr><td>c3418f866a86dfd947c2b548cbdeac8ca5783c15</td><td></td></tr><tr><td>c32383330df27625592134edd72d69bb6b5cff5c</td><td>422
+<br/>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 2012
+<br/>Intrinsic Illumination Subspace for Lighting
+<br/>Insensitive Face Recognition
+</td></tr><tr><td>c3a3f7758bccbead7c9713cb8517889ea6d04687</td><td></td></tr><tr><td>c30e4e4994b76605dcb2071954eaaea471307d80</td><td></td></tr><tr><td>c37a971f7a57f7345fdc479fa329d9b425ee02be</td><td>A Novice Guide towards Human Motion Analysis and Understanding
+</td></tr><tr><td>c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af</td><td></td></tr><tr><td>c3fb2399eb4bcec22723715556e31c44d086e054</td><td>499
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>1. INTRODUCTION
+</td></tr><tr><td>c37de914c6e9b743d90e2566723d0062bedc9e6a</td><td>©2016 Society for Imaging Science and Technology
+<br/>DOI: 10.2352/ISSN.2470-1173.2016.11.IMAWM-455
+<br/>Joint and Discriminative Dictionary Learning
+<br/>Expression Recognition
+<br/>for Facial
+</td></tr><tr><td>c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4</td><td>Int J Comput Vis (2014) 108:3–29
+<br/>DOI 10.1007/s11263-014-0698-4
+<br/>The Ignorant Led by the Blind: A Hybrid Human–Machine Vision
+<br/>System for Fine-Grained Categorization
+<br/>Received: 7 March 2013 / Accepted: 8 January 2014 / Published online: 20 February 2014
+<br/>© Springer Science+Business Media New York 2014
+</td></tr><tr><td>c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad</td><td> Understanding Activity: Learning the Language of Action
+<br/> Univ. of Rochester and Maryland
+<br/>1.1 Overview
+<br/>Understanding observed activity is an important
+<br/>problem, both from the standpoint of practical applications,
+<br/>and as a central issue in attempting to describe the
+<br/>phenomenon of intelligence. On the practical side, there are a
+<br/>large number of applications that would benefit from
+<br/>improved machine ability to analyze activity. The most
+<br/>prominent are various surveillance scenarios. The current
+<br/>emphasis on homeland security has brought this issue to the
+<br/>forefront, and resulted in considerable work on mostly low-
+<br/>level detection schemes. There are also applications in
+<br/>medical diagnosis and household assistants that, in the long
+<br/>run, may be even more important. In addition, there are
+<br/>numerous scientific projects, ranging from monitoring of
+<br/>weather conditions to observation of animal behavior that
+<br/>would be facilitated by automatic understanding of activity.
+<br/>From a scientific standpoint, understanding activity
+<br/>understanding is central to understanding intelligence.
+<br/>Analyzing what is happening in the environment, and acting
+<br/>on the results of that analysis is, to a large extent, what
+<br/>natural intelligent systems do, whether they are human or
+<br/>animal. Artificial intelligences, if we want them to work with
+<br/>people in the natural world, will need commensurate abilities.
+<br/>The importance of the problem has not gone unrecognized.
+<br/>There is a substantial body of work on various components of
+<br/>the problem, most especially on change detection, motion
+<br/>analysis, and tracking. More recently, in the context of
+<br/>surveillance applications, there have been some preliminary
+<br/>efforts to come up with a general ontology of human activity.
+<br/>These efforts have largely been top-down in the classic AI
+<br/>tradition, and, as with earlier analogous effort in areas such
+<br/>as object recognition and scene understanding, have seen
+<br/>limited practical application because of the difficulty in
+<br/>robustly extracting the putative primitives on which the top-
+<br/>down formalism is based. We propose a novel alternative
+<br/>approach, where understanding activity is centered on
+</td></tr><tr><td>c49aed65fcf9ded15c44f9cbb4b161f851c6fa88</td><td>Multiscale Facial Expression Recognition using Convolutional Neural Networks
+<br/>IDIAP, Martigny, Switzerland
+</td></tr><tr><td>eac6aee477446a67d491ef7c95abb21867cf71fc</td><td>JOURNAL
+<br/>A survey of sparse representation: algorithms and
+<br/>applications
+</td></tr><tr><td>ea482bf1e2b5b44c520fc77eab288caf8b3f367a</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>2592
+</td></tr><tr><td>eafda8a94e410f1ad53b3e193ec124e80d57d095</td><td>Jeffrey F. Cohn
+<br/>13
+<br/>Observer-Based Measurement of Facial Expression
+<br/>With the Facial Action Coding System
+<br/>Facial expression has been a focus of emotion research for over
+<br/>a hundred years (Darwin, 1872/1998). It is central to several
+<br/>leading theories of emotion (Ekman, 1992; Izard, 1977;
+<br/>Tomkins, 1962) and has been the focus of at times heated
+<br/>debate about issues in emotion science (Ekman, 1973, 1993;
+<br/>Fridlund, 1992; Russell, 1994). Facial expression figures
+<br/>prominently in research on almost every aspect of emotion,
+<br/>including psychophysiology (Levenson, Ekman, & Friesen,
+<br/>1990), neural bases (Calder et al., 1996; Davidson, Ekman,
+<br/>Saron, Senulis, & Friesen, 1990), development (Malatesta,
+<br/>Culver, Tesman, & Shephard, 1989; Matias & Cohn, 1993),
+<br/>perception (Ambadar, Schooler, & Cohn, 2005), social pro-
+<br/>cesses (Hatfield, Cacioppo, & Rapson, 1992; Hess & Kirouac,
+<br/>2000), and emotion disorder (Kaiser, 2002; Sloan, Straussa,
+<br/>Quirka, & Sajatovic, 1997), to name a few.
+<br/>Because of its importance to the study of emotion, a num-
+<br/>ber of observer-based systems of facial expression measure-
+<br/>ment have been developed (Ekman & Friesen, 1978, 1982;
+<br/>Ekman, Friesen, & Tomkins, 1971; Izard, 1979, 1983; Izard
+<br/>& Dougherty, 1981; Kring & Sloan, 1991; Tronick, Als, &
+<br/>Brazelton, 1980). Of these various systems for describing
+<br/>facial expression, the Facial Action Coding System (FACS;
+<br/>Ekman & Friesen, 1978; Ekman, Friesen, & Hager, 2002) is
+<br/>the most comprehensive, psychometrically rigorous, and
+<br/>widely used (Cohn & Ekman, 2005; Ekman & Rosenberg,
+<br/>2005). Using FACS and viewing video-recorded facial behav-
+<br/>ior at frame rate and slow motion, coders can manually code
+<br/>nearly all possible facial expressions, which are decomposed
+<br/>into action units (AUs). Action units, with some qualifica-
+<br/>tions, are the smallest visually discriminable facial move-
+<br/>ments. By comparison, other systems are less thorough
+<br/>(Malatesta et al., 1989), fail to differentiate between some
+<br/>anatomically distinct movements (Oster, Hegley, & Nagel,
+<br/>1992), consider movements that are not anatomically dis-
+<br/>tinct as separable (Oster et al., 1992), and often assume a one-
+<br/>to-one mapping between facial expression and emotion (for
+<br/>a review of these systems, see Cohn & Ekman, in press).
+<br/>Unlike systems that use emotion labels to describe ex-
+<br/>pression, FACS explicitly distinguishes between facial actions
+<br/>and inferences about what they mean. FACS itself is descrip-
+<br/>tive and includes no emotion-specified descriptors. Hypoth-
+<br/>eses and inferences about the emotional meaning of facial
+<br/>actions are extrinsic to FACS. If one wishes to make emo-
+<br/>tion-based inferences from FACS codes, a variety of related
+<br/>resources exist. These include the FACS Investigators’ Guide
+<br/>(Ekman et al., 2002), the FACS interpretive database (Ekman,
+<br/>Rosenberg, & Hager, 1998), and a large body of empirical
+<br/>research.(Ekman & Rosenberg, 2005). These resources sug-
+<br/>gest combination rules for defining emotion-specified expres-
+<br/>sions from FACS action units, but this inferential step remains
+<br/>extrinsic to FACS. Because of its descriptive power, FACS
+<br/>is regarded by many as the standard measure for facial be-
+<br/>havior and is used widely in diverse fields. Beyond emo-
+<br/>tion science, these include facial neuromuscular disorders
+<br/>(Van Swearingen & Cohn, 2005), neuroscience (Bruce &
+<br/>Young, 1998; Rinn, 1984, 1991), computer vision (Bartlett,
+<br/>203
+<br/>UNPROOFED PAGES </td></tr><tr><td>ea85378a6549bb9eb9bcc13e31aa6a61b655a9af</td><td>Diplomarbeit
+<br/>Template Protection for PCA-LDA-based 3D
+<br/>Face Recognition System
+<br/>von
+<br/>Technische Universität Darmstadt
+<br/>Fachbereich Informatik
+<br/>Fachgebiet Graphisch-Interaktive Systeme
+<br/>Fraunhoferstraße 5
+<br/>64283 Darmstadt
+</td></tr><tr><td>ea2ee5c53747878f30f6d9c576fd09d388ab0e2b</td><td>Viola-Jones based Detectors: How much affects
+<br/>the Training Set?
+<br/>SIANI
+<br/>Edif. Central del Parque Cient´ıfico Tecnol´ogico
+<br/>Universidad de Las Palmas de Gran Canaria
+<br/>35017 - Spain
+</td></tr><tr><td>ea96bc017fb56593a59149e10d5f14011a3744a0</td><td></td></tr><tr><td>e10a257f1daf279e55f17f273a1b557141953ce2</td><td></td></tr><tr><td>e171fba00d88710e78e181c3e807c2fdffc6798a</td><td></td></tr><tr><td>e1ab3b9dee2da20078464f4ad8deb523b5b1792e</td><td>Pre-Training CNNs Using Convolutional
+<br/>Autoencoders
+<br/>TU Berlin
+<br/>TU Berlin
+<br/>Sabbir Ahmmed
+<br/>TU Berlin
+<br/>TU Berlin
+</td></tr><tr><td>e16efd2ae73a325b7571a456618bfa682b51aef8</td><td></td></tr><tr><td>e19ebad4739d59f999d192bac7d596b20b887f78</td><td>Learning Gating ConvNet for Two-Stream based Methods in Action
+<br/>Recognition
+</td></tr><tr><td>e13360cda1ebd6fa5c3f3386c0862f292e4dbee4</td><td></td></tr><tr><td>e1d726d812554f2b2b92cac3a4d2bec678969368</td><td>J Electr Eng Technol.2015; 10(?): 30-40
+<br/>http://dx.doi.org/10.5370/JEET.2015.10.2.030
+<br/>ISSN(Print)
+<br/>1975-0102
+<br/>ISSN(Online) 2093-7423
+<br/>Human Action Recognition Bases on Local Action Attributes
+<br/>and Mohan S Kankanhalli**
+</td></tr><tr><td>e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2</td><td>TSINGHUA SCIENCE AND TECHNOLOGY
+<br/>ISSNll1007-0214
+<br/>0?/?? pp???–???
+<br/>DOI: 10.26599/TST.2018.9010000
+<br/>Volume 1, Number 1, Septembelr 2018
+<br/>Ranking with Adaptive Neighbors
+</td></tr><tr><td>cd9666858f6c211e13aa80589d75373fd06f6246</td><td>A Novel Time Series Kernel for
+<br/>Sequences Generated by LTI Systems
+<br/>V.le delle Scienze Ed.6, DIID, Universit´a degli studi di Palermo, Italy
+</td></tr><tr><td>cd4c047f4d4df7937aff8fc76f4bae7718004f40</td><td></td></tr><tr><td>cd596a2682d74bdfa7b7160dd070b598975e89d9</td><td>Mood Detection: Implementing a facial
+<br/>expression recognition system
+<br/>1. Introduction
+<br/>Facial expressions play a significant role in human dialogue. As a result, there has been
+<br/>considerable work done on the recognition of emotional expressions and the application of this
+<br/>research will be beneficial in improving human-machine dialogue. One can imagine the
+<br/>improvements to computer interfaces, automated clinical (psychological) research or even
+<br/>interactions between humans and autonomous robots.
+<br/>Unfortunately, a lot of the literature does not focus on trying to achieve high recognition rates
+<br/>across multiple databases. In this project we develop our own mood detection system that
+<br/>addresses this challenge. The system involves pre-processing image data by normalizing and
+<br/>applying a simple mask, extracting certain (facial) features using PCA and Gabor filters and then
+<br/>using SVMs for classification and recognition of expressions. Eigenfaces for each class are used
+<br/>to determine class-specific masks which are then applied to the image data and used to train
+<br/>multiple, one against the rest, SVMs. We find that simply using normalized pixel intensities
+<br/>works well with such an approach.
+<br/>Figure 1 – Overview of our system design
+<br/>2. Image pre-processing
+<br/>We performed pre-processing on the images used to train and test our algorithms as follows:
+<br/>1. The location of the eyes is first selected manually
+<br/>2. Images are scaled and cropped to a fixed size (170 x 130) keeping the eyes in all images
+<br/>aligned
+<br/>3. The image is histogram equalized using the mean histogram of all the training images to
+<br/>make it invariant to lighting, skin color etc.
+<br/>4. A fixed oval mask is applied to the image to extract face region. This serves to eliminate
+<br/>the background, hair, ears and other extraneous features in the image which provide no
+<br/>information about facial expression.
+<br/>This approach works reasonably well in capturing expression-relevant facial information across
+<br/>all databases. Examples of pre-processed images from the various datasets are shown in Figure-
+<br/>2a below.
+</td></tr><tr><td>cda4fb9df653b5721ad4fe8b4a88468a410e55ec</td><td>Gabor wavelet transform and its application
+</td></tr><tr><td>cd3005753012409361aba17f3f766e33e3a7320d</td><td>Multilinear Biased Discriminant Analysis: A Novel Method for Facial
+<br/>Action Unit Representation
+</td></tr><tr><td>cd7a7be3804fd217e9f10682e0c0bfd9583a08db</td><td>Women also Snowboard:
+<br/>Overcoming Bias in Captioning Models
+</td></tr><tr><td>ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JUNE 2011
+<br/>A Dynamic Appearance Descriptor Approach to
+<br/>Facial Actions Temporal Modelling
+</td></tr><tr><td>ccbfc004e29b3aceea091056b0ec536e8ea7c47e</td><td></td></tr><tr><td>cc3c273bb213240515147e8be68c50f7ea22777c</td><td>Gaining Insight Into Films
+<br/>Via Topic Modeling & Visualization
+<br/>KEYWORDS Collaboration, computer vision, cultural
+<br/>analytics, economy of abundance, interactive data
+<br/>visualization
+<br/>We moved beyond misuse when the software actually
+<br/>became useful for film analysis with the addition of audio
+<br/>analysis, subtitle analysis, facial recognition, and topic
+<br/>modeling. Using multiple types of visualizations and
+<br/>a back-and-fourth workflow between people and AI
+<br/>we arrived at an approach for cultural analytics that
+<br/>can be used to review and develop film criticism. Finally,
+<br/>we present ways to apply these techniques to Database
+<br/>Cinema and other aspects of film and video creation.
+<br/>PROJECT DATE 2014
+<br/>URL http://misharabinovich.com/soyummy.html
+</td></tr><tr><td>cc8e378fd05152a81c2810f682a78c5057c8a735</td><td>International Journal of Computer Sciences and Engineering Open Access
+<br/> Research Paper Volume-5, Issue-12 E-ISSN: 2347-2693
+<br/>Expression Invariant Face Recognition System based on Topographic
+<br/>Independent Component Analysis and Inner Product Classifier
+<br/>
+<br/>Department of Electrical Engineering, IIT Delhi, New Delhi, India
+<br/>Available online at: www.ijcseonline.org
+<br/>Received: 07/Nov/2017, Revised: 22/Nov/2017, Accepted: 14/Dec/2017, Published: 31/Dec/2017
+</td></tr><tr><td>cc31db984282bb70946f6881bab741aa841d3a7c</td><td>ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV
+<br/>Learning Grimaces by Watching TV
+<br/>http://www.robots.ox.ac.uk/~albanie
+<br/>http://www.robots.ox.ac.uk/~vedaldi
+<br/>Engineering Science Department
+<br/>Univeristy of Oxford
+<br/>Oxford, UK
+</td></tr><tr><td>cc8bf03b3f5800ac23e1a833447c421440d92197</td><td></td></tr><tr><td>cc96eab1e55e771e417b758119ce5d7ef1722b43</td><td>An Empirical Study of Recent
+<br/>Face Alignment Methods
+</td></tr><tr><td>e64b683e32525643a9ddb6b6af8b0472ef5b6a37</td><td>Face Recognition and Retrieval in Video
+</td></tr><tr><td>e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227</td><td>Pairwise Relational Networks for Face
+<br/>Recognition
+<br/>1 Department of Creative IT Engineering, POSTECH, Korea
+<br/>2 Department of Computer Science and Engineering, POSTECH, Korea
+</td></tr><tr><td>e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec</td><td>Chapter 15. The critical role of the
+<br/>cold-start problem and incentive systems
+<br/>in emotional Web 2.0 services
+</td></tr><tr><td>e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd</td><td>1921
+<br/>Flexible Manifold Embedding: A Framework
+<br/>for Semi-Supervised and Unsupervised
+<br/>Dimension Reduction
+<br/>0 =
+<br/>, the linear regression function (
+</td></tr><tr><td>e6e5a6090016810fb902b51d5baa2469ae28b8a1</td><td>Title
+<br/>Energy-Efficient Deep In-memory Architecture for NAND
+<br/>Flash Memories
+<br/>Archived version
+<br/>Accepted manuscript: the content is same as the published
+<br/>paper but without the final typesetting by the publisher
+<br/>Published version
+<br/>DOI
+<br/>Published paper
+<br/>URL
+<br/>Authors (contact)
+<br/>10.1109/ISCAS.2018.8351458
+</td></tr><tr><td>e6540d70e5ffeed9f447602ea3455c7f0b38113e</td><td></td></tr><tr><td>e6ee36444038de5885473693fb206f49c1369138</td><td></td></tr><tr><td>f913bb65b62b0a6391ffa8f59b1d5527b7eba948</td><td></td></tr><tr><td>f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1</td><td></td></tr><tr><td>f0cee87e9ecedeb927664b8da44b8649050e1c86</td><td></td></tr><tr><td>f0f4f16d5b5f9efe304369120651fa688a03d495</td><td>Temporal Generative Adversarial Nets
+<br/>Preferred Networks inc., Japan
+</td></tr><tr><td>f06b015bb19bd3c39ac5b1e4320566f8d83a0c84</td><td></td></tr><tr><td>f0a3f12469fa55ad0d40c21212d18c02be0d1264</td><td>Sparsity Sharing Embedding for Face
+<br/>Verification
+<br/>Department of Electrical Engineering, KAIST, Daejeon, Korea
+</td></tr><tr><td>f7dea4454c2de0b96ab5cf95008ce7144292e52a</td><td></td></tr><tr><td>f7b422df567ce9813926461251517761e3e6cda0</td><td>FACE AGING WITH CONDITIONAL GENERATIVE ADVERSARIAL NETWORKS
+<br/>(cid:63) Orange Labs, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>† Eurecom, 450 route des Chappes, 06410 Biot, France
+</td></tr><tr><td>f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f</td><td>Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+<br/>version when available.
+<br/>Title
+<br/>On color texture normalization for active appearance models
+<br/>Author(s)
+<br/>Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+<br/>Publication
+<br/>Date
+<br/>2009-05-12
+<br/>Publication
+<br/>Information
+<br/>Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+<br/>Texture Normalization for Active Appearance Models. Image
+<br/>Processing, IEEE Transactions on, 18(6), 1372-1378.
+<br/>Publisher
+<br/>IEEE
+<br/>Link to
+<br/>publisher's
+<br/>version
+<br/>http://dx.doi.org/10.1109/TIP.2009.2017163
+<br/>Item record
+<br/>http://hdl.handle.net/10379/1350
+<br/>Some rights reserved. For more information, please see the item record link above.
+<br/>Downloaded 2017-06-17T22:38:27Z
+</td></tr><tr><td>f7452a12f9bd927398e036ea6ede02da79097e6e</td><td></td></tr><tr><td>f7dcadc5288653ec6764600c7c1e2b49c305dfaa</td><td>Copyright
+<br/>by
+<br/>Adriana Ivanova Kovashka
+<br/>2014
+</td></tr><tr><td>f7de943aa75406fe5568fdbb08133ce0f9a765d4</td><td>Project 1.5: Human Identification at a Distance - Hornak, Adjeroh, Cukic, Gautum, & Ross
+<br/>Project 1.5
+<br/>Biometric Identification and Surveillance1
+<br/>Year 5 Deliverable 
+<br/>Technical Report: 
+<br/>and
+<br/>Research Challenges in Biometrics
+<br/>Indexed biography of relevant biometric research literature
+<br/>Donald Adjeroh, Bojan Cukic, Arun Ross 
+<br/>April, 2014  
+<br/>                                                            
+<br/>1 "This research was supported by the United States Department of Homeland Security through the National Center for Border Security
+<br/>and Immigration (BORDERS) under grant number 2008-ST-061-BS0002. However, any opinions, findings, and conclusions or
+<br/>recommendations in this document are those of the authors and do not necessarily reflect views of the United States Department of
+<br/>Homeland Security."
+</td></tr><tr><td>f75852386e563ca580a48b18420e446be45fcf8d</td><td>ILLUMINATION INVARIANT FACE RECOGNITION
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>
+<br/>ENEE 631: Digital Image and Video Processing
+<br/>Instructor: Dr. K. J. Ray Liu
+<br/>Term Project - Spring 2006
+<br/>1.
+<br/>INTRODUCTION
+<br/>
+<br/>
+<br/>The performance of the Face Recognition algorithms is severely affected by two
+<br/>important factors: the change in Pose and Illumination conditions of the subjects. The
+<br/>changes in Illumination conditions of the subjects can be so drastic that, the variation in
+<br/>lighting will be of the similar order as that of the variation due to the change in subjects
+<br/>[1] and this can result in misclassification.
+<br/>
+<br/> For example, in the acquisition of the face of a person from a real time video, the
+<br/>ambient conditions will cause different lighting variations on the tracked face. Some
+<br/>examples of images with different illumination conditions are shown in Fig. 1. In this
+<br/>project, we study some algorithms that are capable of performing Illumination Invariant
+<br/>Face Recognition. The performances of these algorithms were compared on the CMU-
+<br/>Illumination dataset [13], by using the entire face as the input to the algorithms. Then, a
+<br/>model of dividing the face into four regions is proposed and the performance of the
+<br/>algorithms on these new features is analyzed.
+<br/>
+<br/>
+</td></tr><tr><td>f78863f4e7c4c57744715abe524ae4256be884a9</td><td></td></tr><tr><td>f77c9bf5beec7c975584e8087aae8d679664a1eb</td><td>Local Deep Neural Networks for Age and Gender Classification
+<br/>March 27, 2017
+</td></tr><tr><td>e8410c4cd1689829c15bd1f34995eb3bd4321069</td><td></td></tr><tr><td>e8fdacbd708feb60fd6e7843b048bf3c4387c6db</td><td>Deep Learning
+<br/>Hinnerup Net A/S
+<br/>www.hinnerup.net
+<br/>July 4, 2014
+<br/>Introduction
+<br/>Deep learning is a topic in the field of artificial intelligence (AI) and is a relatively
+<br/>new research area although based on the popular artificial neural networks (supposedly
+<br/>mirroring brain function). With the development of the perceptron in the 1950s and
+<br/>1960s by Frank RosenBlatt, research began on artificial neural networks. To further
+<br/>mimic the architectural depth of the brain, researchers wanted to train a deep multi-
+<br/>layer neural network – this, however, did not happen until Geoffrey Hinton in 2006
+<br/>introduced Deep Belief Networks [1].
+<br/>Recently, the topic of deep learning has gained public interest. Large web companies such
+<br/>as Google and Facebook have a focused research on AI and an ever increasing amount
+<br/>of compute power, which has led to researchers finally being able to produce results
+<br/>that are of interest to the general public. In July 2012 Google trained a deep learning
+<br/>network on YouTube videos with the remarkable result that the network learned to
+<br/>recognize humans as well as cats [6], and in January this year Google successfully used
+<br/>deep learning on Street View images to automatically recognize house numbers with
+<br/>an accuracy comparable to that of a human operator [5]. In March this year Facebook
+<br/>announced their DeepFace algorithm that is able to match faces in photos with Facebook
+<br/>users almost as accurately as a human can do [9].
+<br/>Deep learning and other AI are here to stay and will become more and more present in
+<br/>our daily lives, so we had better make ourselves acquainted with the technology. Let’s
+<br/>dive into the deep water and try not to drown!
+<br/>Data Representations
+<br/>Before presenting data to an AI algorithm, we would normally prepare the data to make
+<br/>it feasible to work with. For instance, if the data consists of images, we would take each
+</td></tr><tr><td>e8b2a98f87b7b2593b4a046464c1ec63bfd13b51</td><td>CMS-RCNN: Contextual Multi-Scale
+<br/>Region-based CNN for Unconstrained Face
+<br/>Detection
+</td></tr><tr><td>e8c6c3fc9b52dffb15fe115702c6f159d955d308</td><td>13
+<br/>Linear Subspace Learning for
+<br/>Facial Expression Analysis
+<br/>Philips Research
+<br/>The Netherlands
+<br/>1. Introduction
+<br/>Facial expression, resulting from movements of the facial muscles, is one of the most
+<br/>powerful, natural, and immediate means for human beings to communicate their emotions
+<br/>and intentions. Some examples of facial expressions are shown in Fig. 1. Darwin (1872) was
+<br/>the first to describe in detail the specific facial expressions associated with emotions in
+<br/>animals and humans; he argued that all mammals show emotions reliably in their faces.
+<br/>Psychological studies (Mehrabian, 1968; Ambady & Rosenthal, 1992) indicate that facial
+<br/>expressions, with other non-verbal cues, play a major and fundamental role in face-to-face
+<br/>communication.
+<br/>Fig. 1. Facial expressions of George W. Bush.
+<br/>Machine analysis of facial expressions, enabling computers to analyze and interpret facial
+<br/>expressions as humans do, has many important applications including intelligent human-
+<br/>computer interaction, computer animation, surveillance and security, medical diagnosis,
+<br/>law enforcement, and awareness system (Shan, 2007). Driven by its potential applications
+<br/>and theoretical interests of cognitive and psychological scientists, automatic facial
+<br/>expression analysis has attracted much attention in last two decades (Pantic & Rothkrantz,
+<br/>2000a; Fasel & Luettin, 2003; Tian et al, 2005; Pantic & Bartlett, 2007). It has been studied in
+<br/>multiple disciplines such as psychology, cognitive science, computer vision, pattern
+<br/>Source: Machine Learning, Book edited by: Abdelhamid Mellouk and Abdennacer Chebira,
+<br/> ISBN 978-3-902613-56-1, pp. 450, February 2009, I-Tech, Vienna, Austria
+<br/>www.intechopen.com
+</td></tr><tr><td>fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6</td><td>Draft: Evaluation Guidelines for Gender
+<br/>Classification and Age Estimation
+<br/>July 1, 2011
+<br/>Introduction
+<br/>In previous research on gender classification and age estimation did not use a
+<br/>standardised evaluation procedure. This makes comparison the different ap-
+<br/>proaches difficult.
+<br/>Thus we propose here a benchmarking and evaluation protocol for gender
+<br/>classification as well as age estimation to set a common ground for future re-
+<br/>search in these two areas.
+<br/>The evaluations are designed such that there is one scenario under controlled
+<br/>labratory conditions and one under uncontrolled real life conditions.
+<br/>The datasets were selected with the criteria of being publicly available for
+<br/>research purposes.
+<br/>File lists for the folds corresponding to the individual benchmarking proto-
+<br/>cols will be provided over our website at http://face.cs.kit.edu/befit. We
+<br/>will provide two kinds of folds for each of the tasks and conditions: one set of
+<br/>folds using the whole dataset and one set of folds using a reduced dataset, which
+<br/>is approximately balanced in terms of age, gender and ethnicity.
+<br/>2 Gender Classification
+<br/>In this task the goal is to determine the gender of the persons depicted in the
+<br/>individual images.
+<br/>2.1 Data
+<br/>In previous works one of the most commonly used databases is the Feret database [1,
+<br/>2]. We decided here not to take this database, because of its low number of im-
+<br/>ages.
+</td></tr><tr><td>fa08a4da5f2fa39632d90ce3a2e1688d147ece61</td><td>Supplementary material for
+<br/>“Unsupervised Creation of Parameterized Avatars”
+<br/>1 Summary of Notations
+<br/>Tab. 1 itemizes the symbols used in the submission. Fig. 2,3,4 of the main text illustrate many of these
+<br/>symbols.
+<br/>2 DANN results
+<br/>Fig. 1 shows side by side samples of the original image and the emoji generated by the method of [1].
+<br/>As can be seen, these results do not preserve the identity very well, despite considerable effort invested in
+<br/>finding suitable architectures.
+<br/>3 Multiple Images Per Person
+<br/>Following [4], we evaluate the visual quality that is obtained per person and not just per image, by testing
+<br/>TOS on the Facescrub dataset [3]. For each person p, we considered the set of their images Xp, and selected
+<br/>the emoji that was most similar to their source image, i.e., the one for which:
+<br/>||f (x) − f (e(c(G(x))))||.
+<br/>argmin
+<br/>x∈Xp
+<br/>(1)
+<br/>Fig. 2 depicts the results obtained by this selection method on sample images form the Facescrub dataset
+<br/>(it is an extension of Fig. 7 of the main text). The figure also shows, for comparison, the DTN [4] result for
+<br/>the same image.
+<br/>4 Detailed Architecture of the Various Networks
+<br/>In this section we describe the architectures of the networks used in for the emoji and avatar experiments.
+<br/>4.1 TOS
+<br/>Network g maps DeepFace’s 256-dimensional representation [5] into 64 × 64 RGB emoji images. Follow-
+<br/>ing [4], this is done through a network with 9 blocks, each consisting of a convolution, batch-normalization
+<br/>and ReLU, except the last layer which employs Tanh activation. The odd blocks 1,3,5,7,9 perform upscaling
+<br/>convolutions with 512-256-128-64-3 filters respectively of spatial size 4 × 4. The even ones perform 1 × 1
+<br/>convolutions [2]. The odd blocks use a stride of 2 and padding of 1, excluding the first one which does not
+<br/>use stride or padding.
+<br/>Network e maps emoji parameterization into the matching 64× 64 RGB emoji. The parameterization is
+<br/>given as binary vectors in R813 for emojis; Avatar parameterization is in R354. While there are dependencies
+<br/>among the various dimensions (an emoji cannot have two hairstyles at once), the binary representation is
+<br/>chosen for its simplicity and generality. e is trained in a fully supervised way, using pairs of matching
+<br/>parameterization vectors and images in a supervised manner.
+<br/>The architecture of e employs five upscaling convolutions with 512-256-128-64-3 filters respectively,
+<br/>each of spatial size 4×4. All layers except the last one are batch normalized followed by a ReLU activation.
+<br/>The last layer is followed by Tanh activation, generating an RGB image with values in range [−1, 1]. All
+<br/>the layers use a stride of 2 and padding of 1, excluding the first one which does not use stride or padding.
+</td></tr><tr><td>faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b</td><td></td></tr><tr><td>faf5583063682e70dedc4466ac0f74eeb63169e7</td><td></td></tr><tr><td>fad895771260048f58d12158a4d4d6d0623f4158</td><td>Audio-Visual Emotion
+<br/>Recognition For Natural
+<br/>Human-Robot Interaction
+<br/>Dissertation zur Erlangung des akademischen Grades
+<br/>Doktor der Ingenieurwissenschaften (Dr.-Ing.)
+<br/>vorgelegt von
+<br/>an der Technischen Fakultät der Universität Bielefeld
+<br/>15. März 2010
+</td></tr><tr><td>ff8315c1a0587563510195356c9153729b533c5b</td><td>432
+<br/>Zapping Index:Using Smile to Measure
+<br/>Advertisement Zapping Likelihood
+</td></tr><tr><td>ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a</td><td>Towards Video Captioning with Naming: a
+<br/>Novel Dataset and a Multi-Modal Approach
+<br/>Dipartimento di Ingegneria “Enzo Ferrari”
+<br/>Universit`a degli Studi di Modena e Reggio Emilia
+</td></tr><tr><td>fffefc1fb840da63e17428fd5de6e79feb726894</td><td>Fine-Grained Age Estimation in the wild with
+<br/>Attention LSTM Networks
+</td></tr><tr><td>ff398e7b6584d9a692e70c2170b4eecaddd78357</td><td></td></tr><tr><td>ffd81d784549ee51a9b0b7b8aaf20d5581031b74</td><td>Performance Analysis of Retina and DoG
+<br/>Filtering Applied to Face Images for Training
+<br/>Correlation Filters
+<br/>Everardo Santiago Ram(cid:19)(cid:16)rez1, Jos(cid:19)e (cid:19)Angel Gonz(cid:19)alez Fraga1, Omar (cid:19)Alvarez
+<br/>1 Facultad de Ciencias, Universidad Aut(cid:19)onoma de Baja California,
+<br/>Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia Playitas,
+<br/>Ensenada, Baja California, C.P. 22860
+<br/>{everardo.santiagoramirez,angel_fraga,
+<br/>2 Facultad de Ingenier(cid:19)(cid:16)a, Arquitectura y Dise~no, Universidad Aut(cid:19)onoma de Baja
+<br/>California, Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia
+<br/>Playitas, Ensenada, Baja California, C.P. 22860
+</td></tr><tr><td>ff60d4601adabe04214c67e12253ea3359f4e082</td><td></td></tr><tr><td>ff8ef43168b9c8dd467208a0b1b02e223b731254</td><td>BreakingNews: Article Annotation by
+<br/>Image and Text Processing
+</td></tr><tr><td>ffcbedb92e76fbab083bb2c57d846a2a96b5ae30</td><td></td></tr><tr><td>c50d73557be96907f88b59cfbd1ab1b2fd696d41</td><td>JournalofElectronicImaging13(3),474–485(July2004).
+<br/>Semiconductor sidewall shape estimation
+<br/>Oak Ridge National Laboratory
+<br/>Oak Ridge, Tennessee 37831-6010
+</td></tr><tr><td>c54f9f33382f9f656ec0e97d3004df614ec56434</td><td></td></tr><tr><td>c574c72b5ef1759b7fd41cf19a9dcd67e5473739</td><td>Zlatintsi et al. EURASIP Journal on Image and Video Processing (2017) 2017:54
+<br/>DOI 10.1186/s13640-017-0194-1
+<br/>EURASIP Journal on Image
+<br/>and Video Processing
+<br/>RESEARCH
+<br/>Open Access
+<br/>COGNIMUSE: a multimodal video
+<br/>database annotated with saliency, events,
+<br/>semantics and emotion with application to
+<br/>summarization
+</td></tr><tr><td>c5a561c662fc2b195ff80d2655cc5a13a44ffd2d</td><td>Using Language to Learn Structured Appearance
+<br/>Models for Image Annotation
+</td></tr><tr><td>c5fe40875358a286594b77fa23285fcfb7bda68e</td><td></td></tr><tr><td>c5be0feacec2860982fbbb4404cf98c654142489</td><td>Semi-Qualitative Probabilistic Networks in Computer
+<br/>Vision Problems
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Troy, NY 12180, USA.
+<br/>Received: ***
+<br/>Revised: ***
+</td></tr><tr><td>c5844de3fdf5e0069d08e235514863c8ef900eb7</td><td>Lam S K et al. / (IJCSE) International Journal on Computer Science and Engineering
+<br/>Vol. 02, No. 08, 2010, 2659-2665
+<br/>A Study on Similarity Computations in Template
+<br/>Matching Technique for Identity Verification
+<br/>Lam, S. K., Yeong, C. Y., Yew, C. T., Chai, W. S., Suandi, S. A.
+<br/>Intelligent Biometric Group, School of Electrical and Electronic Engineering
+<br/>Engineering Campus, Universiti Sains Malaysia
+<br/>14300 Nibong Tebal, Pulau Pinang, MALAYSIA
+</td></tr><tr><td>c220f457ad0b28886f8b3ef41f012dd0236cd91a</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Crystal Loss and Quality Pooling for
+<br/>Unconstrained Face Verification and Recognition
+</td></tr><tr><td>c254b4c0f6d5a5a45680eb3742907ec93c3a222b</td><td>A Fusion-based Gender Recognition Method
+<br/>Using Facial Images
+</td></tr><tr><td>c28461e266fe0f03c0f9a9525a266aa3050229f0</td><td>Automatic Detection of Facial Feature Points via
+<br/>HOGs and Geometric Prior Models
+<br/>1 Computer Vision Center , Universitat Aut`onoma de Barcelona
+<br/>2 Universitat Oberta de Catalunya
+<br/>3 Dept. de Matem`atica Aplicada i An`alisi
+<br/>Universitat de Barcelona
+</td></tr><tr><td>c29e33fbd078d9a8ab7adbc74b03d4f830714cd0</td><td></td></tr><tr><td>f68ed499e9d41f9c3d16d843db75dc12833d988d</td><td></td></tr><tr><td>f6ca29516cce3fa346673a2aec550d8e671929a6</td><td>International Journal of Engineering and Advanced Technology (IJEAT)
+<br/>ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+<br/>Algorithm for Face Matching Using Normalized
+<br/>Cross-Correlation
+<br/>
+</td></tr><tr><td>f6c70635241968a6d5fd5e03cde6907022091d64</td><td></td></tr><tr><td>f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca</td><td>Cross-label Suppression: A Discriminative and Fast
+<br/>Dictionary Learning with Group Regularization
+<br/>April 24, 2017
+</td></tr><tr><td>f6abecc1f48f6ec6eede4143af33cc936f14d0d0</td><td></td></tr><tr><td>f6fa97fbfa07691bc9ff28caf93d0998a767a5c1</td><td>k2-means for fast and accurate large scale clustering
+<br/>Computer Vision Lab
+<br/>D-ITET
+<br/>ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET
+<br/>ETH Zurich
+<br/>ESAT, KU Leuven
+<br/>D-ITET, ETH Zurich
+</td></tr><tr><td>e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66</td><td>International Journal of Enhanced Research in Science Technology & Engineering, ISSN: 2319-7463
+<br/>Vol. 3 Issue 1, January-2014, pp: (362-365), Impact Factor: 1.252, Available online at: www.erpublications.com
+<br/>Cognitive Learning for Social Robot through
+<br/>Facial Expression from Video Input
+<br/>1Department of Automation & Robotics, 2Department of Computer Science & Engg.
+</td></tr><tr><td>e988be047b28ba3b2f1e4cdba3e8c94026139fcf</td><td>Multi-Task Convolutional Neural Network for
+<br/>Pose-Invariant Face Recognition
+</td></tr><tr><td>e9d43231a403b4409633594fa6ccc518f035a135</td><td>Deformable Part Models with CNN Features
+<br/>Kokkinos1,2
+<br/>1 Ecole Centrale Paris,2 INRIA, 3TTI-Chicago (cid:63)
+</td></tr><tr><td>e9fcd15bcb0f65565138dda292e0c71ef25ea8bb</td><td>Repositorio Institucional de la Universidad Autónoma de Madrid
+<br/>https://repositorio.uam.es
+<br/>Esta es la versión de autor de la comunicación de congreso publicada en:
+<br/>This is an author produced version of a paper published in:
+<br/>Highlights on Practical Applications of Agents and Multi-Agent Systems:
+<br/>International Workshops of PAAMS. Communications in Computer and
+<br/>Information Science, Volumen 365. Springer, 2013. 223-230
+<br/>DOI: http://dx.doi.org/10.1007/978-3-642-38061-7_22
+<br/>Copyright: © 2013 Springer-Verlag
+<br/>El acceso a la versión del editor puede requerir la suscripción del recurso
+<br/>Access to the published version may require subscription
+</td></tr><tr><td>e9363f4368b04aeaa6d6617db0a574844fc59338</td><td>BENCHIP: Benchmarking Intelligence
+<br/>Processors
+<br/>1ICT CAS,2Cambricon,3Alibaba Infrastructure Service, Alibaba Group
+<br/>4IFLYTEK,5JD,6RDA Microelectronics,7AMD
+</td></tr><tr><td>f16a605abb5857c39a10709bd9f9d14cdaa7918f</td><td>Fast greyscale road sign model matching
+<br/>and recognition
+<br/>Centre de Visió per Computador
+<br/>Edifici O – Campus UAB, 08193 Bellaterra, Barcelona, Catalonia, Spain
+</td></tr><tr><td>f1748303cc02424704b3a35595610890229567f9</td><td></td></tr><tr><td>f19ab817dd1ef64ee94e94689b0daae0f686e849</td><td>TECHNISCHE UNIVERSIT¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Blickrichtungsunabh¨angige Erkennung von
+<br/>Personen in Bild- und Tiefendaten
+<br/>Andre St¨ormer
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr.-Ing. Thomas Eibert
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. Horst-Michael Groß,
+<br/>Technische Universit¨at Ilmenau
+<br/>Die Dissertation wurde am 16.06.2009 bei der Technischen Universit¨at M¨unchen einge-
+<br/>reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 30.10.2009
+<br/>angenommen.
+</td></tr><tr><td>e76798bddd0f12ae03de26b7c7743c008d505215</td><td></td></tr><tr><td>e726acda15d41b992b5a41feabd43617fab6dc23</td><td></td></tr><tr><td>e7b6887cd06d0c1aa4902335f7893d7640aef823</td><td>Modelling of Facial Aging and Kinship: A Survey
+</td></tr><tr><td>cb004e9706f12d1de83b88c209ac948b137caae0</td><td>Face Aging Effect Simulation using Hidden Factor
+<br/>Analysis Joint Sparse Representation
+</td></tr><tr><td>cb9092fe74ea6a5b2bb56e9226f1c88f96094388</td><td></td></tr><tr><td>cb08f679f2cb29c7aa972d66fe9e9996c8dfae00</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Action Understanding
+<br/>with Multiple Classes of Actors
+</td></tr><tr><td>cb84229e005645e8623a866d3d7956c197f85e11</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, MONTH 201X
+<br/>Disambiguating Visual Verbs
+</td></tr><tr><td>cbe859d151466315a050a6925d54a8d3dbad591f</td><td>GAZE SHIFTS AS DYNAMICAL RANDOM SAMPLING
+<br/>Dipartimento di Scienze dell’Informazione
+<br/>Universit´a di Milano
+<br/>Via Comelico 39/41
+<br/>20135 Milano, Italy
+</td></tr><tr><td>f8c94afd478821681a1565d463fc305337b02779</td><td>
+<br/>www.semargroup.org,
+<br/>www.ijsetr.com
+<br/>
+<br/>ISSN 2319-8885
+<br/>Vol.03,Issue.25
+<br/>September-2014,
+<br/>Pages:5079-5085
+<br/>Design and Implementation of Robust Face Recognition System for
+<br/>Uncontrolled Pose and Illumination Changes
+<br/>2
+</td></tr><tr><td>f8ec92f6d009b588ddfbb47a518dd5e73855547d</td><td>J Inf Process Syst, Vol.10, No.3, pp.443~458, September 2014
+<br/>
+<br/>ISSN 1976-913X (Print)
+<br/>ISSN 2092-805X (Electronic)
+<br/>Extreme Learning Machine Ensemble Using
+<br/>Bagging for Facial Expression Recognition
+</td></tr><tr><td>f869601ae682e6116daebefb77d92e7c5dd2cb15</td><td></td></tr><tr><td>f8ed5f2c71e1a647a82677df24e70cc46d2f12a8</td><td>International Journal of Scientific & Engineering Research, Volume 2, Issue 12, December-2011 1
+<br/>ISSN 2229-5518
+<br/>Artificial Neural Network Design and Parameter
+<br/>Optimization for Facial Expressions Recognition
+</td></tr><tr><td>cef841f27535c0865278ee9a4bc8ee113b4fb9f3</td><td></td></tr><tr><td>ce85d953086294d989c09ae5c41af795d098d5b2</td><td>This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+<br/>Bilinear Analysis for Kernel Selection and
+<br/>Nonlinear Feature Extraction
+</td></tr><tr><td>ce691a37060944c136d2795e10ed7ba751cd8394</td><td></td></tr><tr><td>ce3f3088d0c0bf236638014a299a28e492069753</td><td></td></tr><tr><td>ce9a61bcba6decba72f91497085807bface02daf</td><td>Eigen-Harmonics Faces: Face Recognition under Generic Lighting
+<br/>1Graduate School, CAS, Beijing, China, 100080
+<br/>2ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+<br/>Emails: {lyqing, sgshan, wgao}jdl.ac.cn
+</td></tr><tr><td>cef6cffd7ad15e7fa5632269ef154d32eaf057af</td><td>Emotion Detection Through Facial Feature
+<br/>Recognition
+<br/>through consistent
+</td></tr><tr><td>cebfafea92ed51b74a8d27c730efdacd65572c40</td><td>JANUARY 2006
+<br/>31
+<br/>Matching 2.5D Face Scans to 3D Models
+</td></tr><tr><td>ce54e891e956d5b502a834ad131616786897dc91</td><td>International Journal of Science and Research (IJSR)
+<br/>ISSN (Online): 2319-7064
+<br/>Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+<br/>Face Recognition Using LTP Algorithm
+<br/>1ECE & KUK
+<br/>2Assistant Professor (ECE)
+<br/>Volume 4 Issue 12, December 2015
+<br/>Licensed Under Creative Commons Attribution CC BY
+<br/>www.ijsr.net
+<br/> Variation in luminance: Third main challenge that
+<br/>appears in face recognition process is the luminance. Due
+<br/>to variation in the luminance the representation get varied
+<br/>from the original image. The person with same poses
+<br/>expression and seen from same viewpoint can be appear
+<br/>very different due to variation in lightening.
+</td></tr><tr><td>e0dedb6fc4d370f4399bf7d67e234dc44deb4333</td><td>Supplementary Material: Multi-Task Video Captioning with Video and
+<br/>Entailment Generation
+<br/>UNC Chapel Hill
+<br/>1 Experimental Setup
+<br/>1.1 Datasets
+<br/>1.1.1 Video Captioning Datasets
+<br/>YouTube2Text or MSVD The Microsoft Re-
+<br/>search Video Description Corpus (MSVD) or
+<br/>YouTube2Text (Chen and Dolan, 2011) is used
+<br/>for our primary video captioning experiments. It
+<br/>has 1970 YouTube videos in the wild with many
+<br/>diverse captions in multiple languages for each
+<br/>video. Caption annotations to these videos are
+<br/>collected using Amazon Mechanical Turk (AMT).
+<br/>All our experiments use only English captions. On
+<br/>average, each video has 40 captions, and the over-
+<br/>all dataset has about 80, 000 unique video-caption
+<br/>pairs. The average clip duration is roughly 10 sec-
+<br/>onds. We used the standard split as stated in Venu-
+<br/>gopalan et al. (2015), i.e., 1200 videos for training,
+<br/>100 videos for validation, and 670 for testing.
+<br/>MSR-VTT MSR-VTT is a recent collection of
+<br/>10, 000 video clips of 41.2 hours duration (i.e.,
+<br/>average duration of 15 seconds), which are an-
+<br/>notated by AMT workers. It has 200, 000 video
+<br/>clip-sentence pairs covering diverse content from
+<br/>a commercial video search engine. On average,
+<br/>each clip is annotated with 20 natural language
+<br/>captions. We used the standard split as provided
+<br/>in (Xu et al., 2016), i.e., 6, 513 video clips for
+<br/>training, 497 for validation, and 2, 990 for testing.
+<br/>M-VAD M-VAD is a movie description dataset
+<br/>with 49, 000 video clips collected from 92 movies,
+<br/>with the average clip duration being 6 seconds.
+<br/>Alignment of descriptions to video clips is done
+<br/>through an automatic procedure using Descrip-
+<br/>tive Video Service (DVS) provided for the movies.
+<br/>Each video clip description has only 1 or 2 sen-
+<br/>tences, making most evaluation metrics (except
+<br/>paraphrase-based METEOR) infeasible. Again,
+<br/>we used the standard train/val/test split as pro-
+<br/>vided in Torabi et al. (2015).
+<br/>1.1.2 Video Prediction Dataset
+<br/>For our unsupervised video representation learn-
+<br/>ing task, we use the UCF-101 action videos
+<br/>dataset (Soomro et al., 2012), which contains
+<br/>13, 320 video clips of 101 action categories and
+<br/>with an average clip length of 7.21 seconds each.
+<br/>This dataset suits our video captioning task well
+<br/>because both contain short video clips of a sin-
+<br/>gle action or few actions, and hence using future
+<br/>frame prediction on UCF-101 helps learn more ro-
+<br/>bust and context-aware video representations for
+<br/>our short clip video captioning task. We use the
+<br/>standard split of 9, 500 videos for training (we
+<br/>don’t need any validation set in our setup because
+<br/>we directly tune on the validation set of the video
+<br/>captioning task).
+<br/>the
+<br/>three
+<br/>video
+<br/>captioning
+<br/>1.2 Pre-trained Visual Frame Features
+<br/>For
+<br/>datasets
+<br/>(Youtube2Text, MSR-VTT, M-VAD) and the
+<br/>unsupervised video prediction dataset (UCF-101),
+<br/>we fix our sampling rate to 3f ps to bring uni-
+<br/>formity in the temporal representation of actions
+<br/>across all videos. These sampled frames are then
+<br/>converted into features using several state-of-the-
+<br/>art pre-trained models on ImageNet (Deng et al.,
+<br/>2009) – VGGNet
+<br/>(Simonyan and Zisserman,
+<br/>2015), GoogLeNet (Szegedy et al., 2015; Ioffe
+<br/>and Szegedy, 2015), and Inception-v4 (Szegedy
+<br/>et al., 2016). For VGGNet, we use its f c7 layer
+<br/>features with dimension 4096. For GoogLeNet
+<br/>and Inception-v4, we use the layer before the fully
+<br/>connected layer with dimensions 1024 and 1536,
+<br/>respectively. We follow standard preprocessing
+<br/>and convert all the natural language descriptions
+<br/>to lower case and tokenize the sentences and
+<br/>remove punctuations.
+</td></tr><tr><td>e096b11b3988441c0995c13742ad188a80f2b461</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>DeepProposals: Hunting Objects and Actions by Cascading
+<br/>Deep Convolutional Layers
+<br/>Van Gool
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>e0c081a007435e0c64e208e9918ca727e2c1c44e</td><td></td></tr><tr><td>e00d4e4ba25fff3583b180db078ef962bf7d6824</td><td>Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 20 March 2017 doi:10.20944/preprints201703.0152.v1
+<br/>Article
+<br/>Face Verification with Multi-Task and Multi-Scale
+<br/>Features Fusion
+</td></tr><tr><td>e0939b4518a5ad649ba04194f74f3413c793f28e</td><td>Technical Report
+<br/>UCAM-CL-TR-636
+<br/>ISSN 1476-2986
+<br/>Number 636
+<br/>Computer Laboratory
+<br/>Mind-reading machines:
+<br/>automated inference
+<br/>of complex mental states
+<br/>July 2005
+<br/>15 JJ Thomson Avenue
+<br/>Cambridge CB3 0FD
+<br/>United Kingdom
+<br/>phone +44 1223 763500
+<br/>http://www.cl.cam.ac.uk/
+</td></tr><tr><td>e0765de5cabe7e287582532456d7f4815acd74c1</td><td></td></tr><tr><td>e013c650c7c6b480a1b692bedb663947cd9d260f</td><td>860
+<br/>Robust Image Analysis With Sparse Representation
+<br/>on Quantized Visual Features
+</td></tr><tr><td>46a4551a6d53a3cd10474ef3945f546f45ef76ee</td><td>2014 IEEE Intelligent Vehicles Symposium (IV)
+<br/>June 8-11, 2014. Dearborn, Michigan, USA
+<br/>978-1-4799-3637-3/14/$31.00 ©2014 IEEE
+<br/>344
+</td></tr><tr><td>4686bdcee01520ed6a769943f112b2471e436208</td><td>Utsumi et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:11
+<br/>DOI 10.1186/s41074-017-0024-5
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>EXPRESS PAPER
+<br/>Open Access
+<br/>Fast search based on generalized
+<br/>similarity measure
+</td></tr><tr><td>4688787d064e59023a304f7c9af950d192ddd33e</td><td>Investigating the Discriminative Power of Keystroke
+<br/>Sound
+<br/>and Dimitris Metaxas, Member, IEEE
+</td></tr><tr><td>46e86cdb674440f61b6658ef3e84fea95ea51fb4</td><td></td></tr><tr><td>464de30d3310123644ab81a1f0adc51598586fd2</td><td></td></tr><tr><td>466a5add15bb5f91e0cfd29a55f5fb159a7980e5</td><td>Video Repeat Recognition and Mining by Visual
+<br/>Features
+</td></tr><tr><td>46538b0d841654a0934e4c75ccd659f6c5309b72</td><td>Signal & Image Processing : An International Journal (SIPIJ) Vol.5, No.1, February 2014
+<br/>A NOVEL APPROACH TO GENERATE FACE
+<br/>BIOMETRIC TEMPLATE USING BINARY
+<br/>DISCRIMINATING ANALYSIS
+<br/>1P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+<br/>2Associate Professor, Department of Computer Engineering,
+<br/>MCERC, Nashik (M.S.), India
+</td></tr><tr><td>46196735a201185db3a6d8f6e473baf05ba7b68f</td><td></td></tr><tr><td>4682fee7dc045aea7177d7f3bfe344aabf153bd5</td><td>Tabula Rasa: Model Transfer for
+<br/>Object Category Detection
+<br/>Department of Engineering Science
+<br/>Oxford
+<br/>(Presented by Elad Liebman)
+</td></tr><tr><td>2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58</td><td></td></tr><tr><td>2c8743089d9c7df04883405a31b5fbe494f175b4</td><td>Washington State Convention Center
+<br/>Seattle, Washington, May 26-30, 2015
+<br/>978-1-4799-6922-7/15/$31.00 ©2015 IEEE
+<br/>3039
+</td></tr><tr><td>2c61a9e26557dd0fe824909adeadf22a6a0d86b0</td><td></td></tr><tr><td>2c93c8da5dfe5c50119949881f90ac5a0a4f39fe</td><td>Advanced local motion patterns for macro and micro facial
+<br/>expression recognition
+<br/>B. Allaerta,∗, IM. Bilascoa, C. Djerabaa
+<br/>aUniv. Lille, CNRS, Centrale Lille, UMR 9189 - CRIStAL -
+<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+</td></tr><tr><td>2c2786ea6386f2d611fc9dbf209362699b104f83</td><td></td></tr><tr><td>2c848cc514293414d916c0e5931baf1e8583eabc</td><td>An automatic facial expression recognition system
+<br/>evaluated by different classifiers
+<br/>∗Programa de P´os-Graduac¸˜ao em Mecatrˆonica
+<br/>Universidade Federal da Bahia,
+<br/>†Department of Electrical Engineering - EESC/USP
+</td></tr><tr><td>2cdd9e445e7259117b995516025fcfc02fa7eebb</td><td>Title
+<br/>Temporal Exemplar-based Bayesian Networks for facial
+<br/>expression recognition
+<br/>Author(s)
+<br/>Shang, L; Chan, KP
+<br/>Citation
+<br/>Proceedings - 7Th International Conference On Machine
+<br/>Learning And Applications, Icmla 2008, 2008, p. 16-22
+<br/>Issued Date
+<br/>2008
+<br/>URL
+<br/>http://hdl.handle.net/10722/61208
+<br/>Rights
+<br/>This work is licensed under a Creative Commons Attribution-
+<br/>NonCommercial-NoDerivatives 4.0 International License.;
+<br/>International Conference on Machine Learning and Applications
+<br/>Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+<br/>this material is permitted. However, permission to
+<br/>reprint/republish this material for advertising or promotional
+<br/>purposes or for creating new collective works for resale or
+<br/>redistribution to servers or lists, or to reuse any copyrighted
+<br/>component of this work in other works must be obtained from
+<br/>the IEEE.
+</td></tr><tr><td>2c5d1e0719f3ad7f66e1763685ae536806f0c23b</td><td>AENet: Learning Deep Audio Features for Video
+<br/>Analysis
+</td></tr><tr><td>2c8f24f859bbbc4193d4d83645ef467bcf25adc2</td><td>845
+<br/>Classification in the Presence of
+<br/>Label Noise: a Survey
+</td></tr><tr><td>2cdde47c27a8ecd391cbb6b2dea64b73282c7491</td><td>ORDER-AWARE CONVOLUTIONAL POOLING FOR VIDEO BASED ACTION RECOGNITION
+<br/>Order-aware Convolutional Pooling for Video Based
+<br/>Action Recognition
+</td></tr><tr><td>2c7c3a74da960cc76c00965bd3e343958464da45</td><td></td></tr><tr><td>2cf5f2091f9c2d9ab97086756c47cd11522a6ef3</td><td>MPIIGaze: Real-World Dataset and Deep
+<br/>Appearance-Based Gaze Estimation
+</td></tr><tr><td>79581c364cefe53bff6bdd224acd4f4bbc43d6d4</td><td></td></tr><tr><td>790aa543151312aef3f7102d64ea699a1d15cb29</td><td>Confidence-Weighted Local Expression Predictions for
+<br/>Occlusion Handling in Expression Recognition and Action
+<br/>Unit detection
+<br/>1 Sorbonne Universités, UPMC Univ Paris 06, CNRS, ISIR UMR 7222
+<br/>4 place Jussieu 75005 Paris
+</td></tr><tr><td>79f6a8f777a11fd626185ab549079236629431ac</td><td>Copyright
+<br/>by
+<br/>2013
+</td></tr><tr><td>795ea140df2c3d29753f40ccc4952ef24f46576c</td><td></td></tr><tr><td>79dc84a3bf76f1cb983902e2591d913cee5bdb0e</td><td></td></tr><tr><td>79b669abf65c2ca323098cf3f19fa7bdd837ff31</td><td> Deakin Research Online
+<br/>This is the published version:
+<br/>Rana, Santu, Liu, Wanquan, Lazarescu, Mihai and Venkatesh, Svetha 2008, Efficient tensor
+<br/>based face recognition, in ICPR 2008 : Proceedings of the 19th International Conference on
+<br/>Pattern Recognition, IEEE, Washington, D. C., pp. 1-4.
+<br/>Available from Deakin Research Online:
+<br/>http://hdl.handle.net/10536/DRO/DU:30044585
+<br/>
+<br/>Reproduced with the kind permissions of the copyright owner.
+<br/>Personal use of this material is permitted. However, permission to reprint/republish this
+<br/>material for advertising or promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+<br/>in other works must be obtained from the IEEE.
+<br/>Copyright : 2008, IEEE
+</td></tr><tr><td>79c3a7131c6c176b02b97d368cd0cd0bc713ff7e</td><td></td></tr><tr><td>79dd787b2877cf9ce08762d702589543bda373be</td><td>Face Detection Using SURF Cascade
+<br/>Intel Labs China
+</td></tr><tr><td>793e7f1ba18848908da30cbad14323b0389fd2a8</td><td></td></tr><tr><td>2dd6c988b279d89ab5fb5155baba65ce4ce53c1e</td><td></td></tr><tr><td>2d294c58b2afb529b26c49d3c92293431f5f98d0</td><td>4413
+<br/>Maximum Margin Projection Subspace Learning
+<br/>for Visual Data Analysis
+</td></tr><tr><td>2d1f86e2c7ba81392c8914edbc079ac64d29b666</td><td></td></tr><tr><td>2d05e768c64628c034db858b7154c6cbd580b2d5</td><td>Available Online at www.ijcsmc.com
+<br/>International Journal of Computer Science and Mobile Computing
+<br/> A Monthly Journal of Computer Science and Information Technology
+<br/> IJCSMC, Vol. 4, Issue. 8, August 2015, pg.431 – 446
+<br/> RESEARCH ARTICLE
+<br/>ISSN 2320–088X
+<br/>FACIAL EXPRESSION RECOGNITION:
+<br/>Machine Learning using C#
+</td></tr><tr><td>2d072cd43de8d17ce3198fae4469c498f97c6277</td><td>Random Cascaded-Regression Copse for Robust
+<br/>Facial Landmark Detection
+<br/>and Xiao-Jun Wu
+</td></tr><tr><td>2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>National Conference on Advancements in Computer & Information Technology (NCACIT-2016)
+<br/>A Survey on: Image Process using Two- Stage Crawler
+<br/>Assistant Professor
+<br/>SPPU, Pune
+<br/>Department of Computer Engg
+<br/>Department of Computer Engg
+<br/>Department of Computer Engg
+<br/>BE Student
+<br/>SPPU, Pune
+<br/>BE Student
+<br/>SPPU, Pune
+<br/>BE Student
+<br/>Department of Computer Engg
+<br/>SPPU, Pune
+<br/>additional
+<br/>analysis
+<br/>for
+<br/>information
+</td></tr><tr><td>2d8d089d368f2982748fde93a959cf5944873673</td><td>Proceedings of NAACL-HLT 2018, pages 788–794
+<br/>New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+<br/>788
+</td></tr><tr><td>2df4d05119fe3fbf1f8112b3ad901c33728b498a</td><td>Facial landmark detection using structured output deep
+<br/>neural networks
+<br/>Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+<br/>Adam∗2
+<br/>1LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+<br/>2LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+<br/>September 24, 2015
+</td></tr><tr><td>4188bd3ef976ea0dec24a2512b44d7673fd4ad26</td><td>1050
+<br/>Nonlinear Non-Negative Component
+<br/>Analysis Algorithms
+</td></tr><tr><td>41000c3a3344676513ef4bfcd392d14c7a9a7599</td><td>A NOVEL APPROACH FOR GENERATING FACE
+<br/>TEMPLATE USING BDA
+<br/>1P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+<br/>2Associate Professor, Department of Computer Engineering, MCERC, Nashik (M.S.),
+<br/>India
+</td></tr><tr><td>414715421e01e8c8b5743c5330e6d2553a08c16d</td><td>PoTion: Pose MoTion Representation for Action Recognition
+<br/>1Inria∗
+<br/>2NAVER LABS Europe
+</td></tr><tr><td>41ab4939db641fa4d327071ae9bb0df4a612dc89</td><td>Interpreting Face Images by Fitting a Fast
+<br/>Illumination-Based 3D Active Appearance
+<br/>Model
+<br/>Instituto Nacional de Astrof´ısica, ´Optica y Electr´onica,
+<br/>Luis Enrique Erro #1, 72840 Sta Ma. Tonantzintla. Pue., M´exico
+<br/>Coordinaci´on de Ciencias Computacionales
+</td></tr><tr><td>41a6196f88beced105d8bc48dd54d5494cc156fb</td><td>2015 International Conference on
+<br/>Communications, Signal
+<br/>Processing, and their Applications
+<br/>(ICCSPA 2015)
+<br/>Sharjah, United Arab Emirates
+<br/>17-19 February 2015
+<br/>IEEE Catalog Number:
+<br/>ISBN:
+<br/>CFP1574T-POD
+<br/>978-1-4799-6533-5
+</td></tr><tr><td>41de109bca9343691f1d5720df864cdbeeecd9d0</td><td>Article
+<br/>Facial Emotion Recognition: A Survey and
+<br/>Real-World User Experiences in Mixed Reality
+<br/>Received: 10 December 2017; Accepted: 26 January 2018; Published: 1 Febuary 2018
+</td></tr><tr><td>41d9a240b711ff76c5448d4bf4df840cc5dad5fc</td><td>JOURNAL DRAFT, VOL. X, NO. X, APR 2013
+<br/>Image Similarity Using Sparse Representation
+<br/>and Compression Distance
+</td></tr><tr><td>419a6fca4c8d73a1e43003edc3f6b610174c41d2</td><td>A Component Based Approach Improves Classification of Discrete
+<br/>Facial Expressions Over a Holistic Approach
+</td></tr><tr><td>4180978dbcd09162d166f7449136cb0b320adf1f</td><td>Real-time head pose classification in uncontrolled environments
+<br/>with Spatio-Temporal Active Appearance Models
+<br/>∗ Matematica Aplicada i Analisi ,Universitat de Barcelona, Barcelona, Spain
+<br/>+ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain
+<br/>+ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain
+</td></tr><tr><td>41b997f6cec7a6a773cd09f174cb6d2f036b36cd</td><td></td></tr><tr><td>413a184b584dc2b669fbe731ace1e48b22945443</td><td>Human Pose Co-Estimation and Applications
+</td></tr><tr><td>83ca4cca9b28ae58f461b5a192e08dffdc1c76f3</td><td>DETECTING EMOTIONAL STRESS FROM FACIAL EXPRESSIONS FOR DRIVING SAFETY
+<br/>Signal Processing Laboratory (LTS5),
+<br/>´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+</td></tr><tr><td>831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9</td><td></td></tr><tr><td>832e1d128059dd5ed5fa5a0b0f021a025903f9d5</td><td>Pairwise Conditional Random Forests for Facial Expression Recognition
+<br/>S´everine Dubuisson1
+<br/>1 Sorbonne Universit´es, UPMC Univ Paris 06, CNRS, ISIR UMR 7222, 4 place Jussieu 75005 Paris
+</td></tr><tr><td>83e093a07efcf795db5e3aa3576531d61557dd0d</td><td>Facial Landmark Localization using Robust
+<br/>Relationship Priors and Approximative Gibbs
+<br/>Sampling
+<br/>Institut f¨ur Informationsverarbeitung (tnt)
+<br/>Leibniz Universit¨at Hannover, Germany
+</td></tr><tr><td>83b4899d2899dd6a8d956eda3c4b89f27f1cd308</td><td>1-4244-1437-7/07/$20.00 ©2007 IEEE
+<br/>I - 377
+<br/>ICIP 2007
+</td></tr><tr><td>830e5b1043227fe189b3f93619ef4c58868758a7</td><td></td></tr><tr><td>8395cf3535a6628c3bdc9b8d0171568d551f5ff0</td><td>Entropy Non-increasing Games for the
+<br/>Improvement of Dataflow Programming
+<br/>Norbert B´atfai, Ren´at´o Besenczi, Gerg˝o Bogacsovics,
+<br/>February 16, 2017
+</td></tr><tr><td>83ac942d71ba908c8d76fc68de6173151f012b38</td><td></td></tr><tr><td>834f5ab0cb374b13a6e19198d550e7a32901a4b2</td><td>Face Translation between Images and Videos using Identity-aware CycleGAN
+<br/>†Computer Vision Lab, ETH Zurich, Switzerland
+<br/>‡VISICS, KU Leuven, Belgium
+</td></tr><tr><td>834b15762f97b4da11a2d851840123dbeee51d33</td><td>Landmark-free smile intensity estimation
+<br/>IMAGO Research Group - Universidade Federal do Paran´a
+<br/>Fig. 1. Overview of our method for smile intensity estimation
+</td></tr><tr><td>833f6ab858f26b848f0d747de502127406f06417</td><td>978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+<br/>61
+<br/>ICIP 2009
+</td></tr><tr><td>8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff</td><td>Generic versus Salient Region-based Partitioning
+<br/>for Local Appearance Face Recognition
+<br/>Computer Science Depatment, Universit¨at Karlsruhe (TH)
+<br/>Am Fasanengarten 5, Karlsruhe 76131, Germany
+<br/>http://isl.ira.uka.de/cvhci
+</td></tr><tr><td>1b55c4e804d1298cbbb9c507497177014a923d22</td><td>Incremental Class Representation
+<br/>Learning for Face Recognition
+<br/>Degree’s Thesis
+<br/>Audiovisual Systems Engineering
+<br/>Author:
+<br/>Universitat Politècnica de Catalunya (UPC)
+<br/>2016 - 2017
+</td></tr><tr><td>1bd50926079e68a6e32dc4412e9d5abe331daefb</td><td></td></tr><tr><td>1b150248d856f95da8316da868532a4286b9d58e</td><td>Analyzing 3D Objects in Cluttered Images
+<br/>UC Irvine
+<br/>UC Irvine
+</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Age and Gender Estimation of Unfiltered Faces
+</td></tr><tr><td>1b300a7858ab7870d36622a51b0549b1936572d4</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2537215, IEEE
+<br/>Transactions on Image Processing
+<br/>Dynamic Facial Expression Recognition with Atlas
+<br/>Construction and Sparse Representation
+</td></tr><tr><td>1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d</td><td>DICTA
+<br/>#147
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>DICTA 2010 Submission #147. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>Registration Invariant Representations for Expression Detection
+<br/>Anonymous DICTA submission
+<br/>Paper ID 147
+</td></tr><tr><td>1b0a071450c419138432c033f722027ec88846ea</td><td>Windsor Oceanico Hotel, Rio de Janeiro, Brazil, November 1-4, 2016
+<br/>978-1-5090-1889-5/16/$31.00 ©2016 IEEE
+<br/>649
+</td></tr><tr><td>1b3b01513f99d13973e631c87ffa43904cd8a821</td><td>HMM RECOGNITION OF EXPRESSIONS IN UNRESTRAINED VIDEO INTERVALS
+<br/>Universitat Politècnica de Catalunya, Barcelona, Spain
+</td></tr><tr><td>1bc214c39536c940b12c3a2a6b78cafcbfddb59a</td><td></td></tr><tr><td>1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3</td><td>ENHANCEMENT OF FAST FACE DETECTION ALGORITHM BASED ON A CASCADE OF
+<br/>DECISION TREES
+<br/>Commission II, WG II/5
+<br/>KEY WORDS: Face Detection, Cascade Algorithm, Decision Trees.
+</td></tr><tr><td>1b79628af96eb3ad64dbb859dae64f31a09027d5</td><td></td></tr><tr><td>1b4f6f73c70353869026e5eec1dd903f9e26d43f</td><td>Robust Subjective Visual Property Prediction
+<br/>from Crowdsourced Pairwise Labels
+</td></tr><tr><td>1bc23c771688109bed9fd295ce82d7e702726327</td><td></td></tr><tr><td>1b589016fbabe607a1fb7ce0c265442be9caf3a9</td><td></td></tr><tr><td>1b27ca161d2e1d4dd7d22b1247acee5c53db5104</td><td></td></tr><tr><td>7711a7404f1f1ac3a0107203936e6332f50ac30c</td><td>Action Classification and Highlighting in Videos
+<br/>Disney Research Pittsburgh
+<br/>Disney Research Pittsburgh
+</td></tr><tr><td>778c9f88839eb26129427e1b8633caa4bd4d275e</td><td>Pose Pooling Kernels for Sub-category Recognition
+<br/>ICSI & UC Berkeley
+<br/>ICSI & UC Berkeley
+<br/>Trever Darrell
+<br/>ICSI & UC Berkeley
+</td></tr><tr><td>7789a5d87884f8bafec8a82085292e87d4e2866f</td><td>A Unified Tensor-based Active Appearance Face
+<br/>Model
+<br/>Member, IEEE
+</td></tr><tr><td>776835eb176ed4655d6e6c308ab203126194c41e</td><td></td></tr><tr><td>778bff335ae1b77fd7ec67404f71a1446624331b</td><td>Hough Forest-based Facial Expression Recognition from
+<br/>Video Sequences
+<br/>BIWI, ETH Zurich http://www.vision.ee.ethz.ch
+<br/>VISICS, K.U. Leuven http://www.esat.kuleuven.be/psi/visics
+</td></tr><tr><td>7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d</td><td>CONTENT-AWARE COMPRESSION USING SALIENCY-DRIVEN IMAGE RETARGETING
+<br/>*Disney Research Zurich
+<br/>†ETH Zurich
+</td></tr><tr><td>7754b708d6258fb8279aa5667ce805e9f925dfd0</td><td>Facial Action Unit Recognition by Exploiting
+<br/>Their Dynamic and Semantic Relationships
+</td></tr><tr><td>77db171a523fc3d08c91cea94c9562f3edce56e1</td><td>Poursaberi et al. EURASIP Journal on Image and Video Processing 2012, 2012:17
+<br/>http://jivp.eurasipjournals.com/content/2012/1/17
+<br/>R ES EAR CH
+<br/>Open Access
+<br/>Gauss–Laguerre wavelet textural feature fusion
+<br/>with geometrical information for facial expression
+<br/>identification
+</td></tr><tr><td>77037a22c9b8169930d74d2ce6f50f1a999c1221</td><td>Robust Face Recognition With Kernelized
+<br/>Locality-Sensitive Group Sparsity Representation
+</td></tr><tr><td>77d31d2ec25df44781d999d6ff980183093fb3de</td><td>The Multiverse Loss for Robust Transfer Learning
+<br/>Supplementary
+<br/>1. Omitted proofs
+<br/>for which the joint loss:
+<br/>m(cid:88)
+<br/>r=1
+<br/>L(F r, br, D, y)
+<br/>(2)
+<br/>J(F 1, b1...F m, bm, D, y) =
+<br/>is bounded by:
+<br/>mL∗(D, y) ≤ J(F 1, b1...F m, bm, D, y)
+<br/>m−1(cid:88)
+<br/>≤ mL∗(D, y) +
+<br/>Alλd−j+1
+<br/>(3)
+<br/>l=1
+<br/>where [A1 . . . Am−1] are bounded parameters.
+<br/>We provide proofs that were omitted from the paper for
+<br/>lack of space. We follow the same theorem numbering as in
+<br/>the paper.
+<br/>Lemma 1. The minimizers F ∗, b∗ of L are not unique, and
+<br/>it holds that for any vector v ∈ Rc and scalar s, the solu-
+<br/>tions F ∗ + v1(cid:62)
+<br/>Proof. denoting V = v1(cid:62)
+<br/>c , b∗ + s1c are also minimizers of L.
+<br/>c , s = s1c,
+<br/>i v+byi +s
+<br/>i v+bj +s
+<br/>i fyi +byi
+<br/>i v+sed(cid:62)
+<br/>i fj +bj
+<br/>i=1
+<br/>log(
+<br/>L(F ∗ + V, b∗ + s, D, y) =
+<br/>i fyi +d(cid:62)
+<br/>ed(cid:62)
+<br/>i fj +d(cid:62)
+<br/>j=1 ed(cid:62)
+<br/>i v+sed(cid:62)
+<br/>ed(cid:62)
+<br/>j=1 ed(cid:62)
+<br/>i v+sed(cid:62)
+<br/>ed(cid:62)
+<br/>(cid:80)c
+<br/>(cid:80)c
+<br/>i v+s(cid:80)c
+<br/>− n(cid:88)
+<br/>= − n(cid:88)
+<br/>= − n(cid:88)
+<br/>(cid:80)c
+<br/>= − n(cid:88)
+<br/>ed(cid:62)
+<br/>i fyi +byi
+<br/>j=1 ed(cid:62)
+<br/>i fj +bj
+<br/>ed(cid:62)
+<br/>log(
+<br/>log(
+<br/>log(
+<br/>i=1
+<br/>i=1
+<br/>i=1
+<br/>i fj +bj
+<br/>i fyi +byi
+<br/>j=1 ed(cid:62)
+<br/>) = L(F ∗, b∗, D, y)
+<br/>The following simple lemma was not part of the paper.
+<br/>However, it is the reasoning behind the statement at the end
+<br/>of the proof of Thm. 1. “Since ∀i, j pi(j) > 0 and since
+<br/>rank(D) is full,(cid:80)n
+<br/>Lemma 2. Let K =(cid:80)n
+<br/>such that ∀i qi > 0, the matrix ˆK =(cid:80)n
+<br/>i be a full rank d×d matrix,
+<br/>i.e., it is PD and not just PSD, then for all vector q ∈ Rn
+<br/>is also
+<br/>i pi(j)pi(j(cid:48)) is PD.”
+<br/>i=1 did(cid:62)
+<br/>i=1 did(cid:62)
+<br/>i=1 qidid(cid:62)
+<br/>full rank.
+<br/>Proof. For
+<br/>(miniqi)v(cid:62)Kv > 0.
+<br/>every vector v
+<br/>(cid:2)f 1
+<br/>(cid:3) , b1, F 2 = (cid:2)f 2
+<br/>Theorem 3. There exist a set of weights F 1 =
+<br/>j ⊥ f s
+<br/>C ] , bm which are orthogonal ∀jrs f r
+<br/>2 , ..., f 1
+<br/>2 , ..., f m
+<br/>1 , f 1
+<br/>1 , f m
+<br/>2 , ..., f 2
+<br/>1 , f 2
+<br/>[f m
+<br/>(cid:3) , b2...F m =
+<br/>Proof. We again prove the theorem by constructing such a
+<br/>solution. Denoting by vd−m+2...vd the eigenvectors of K
+<br/>corresponding to λd−m+2 . . . λd. Given F 1 = F ∗, b1 = b∗,
+<br/>we can construct each pair F r, br as follows:
+<br/>(1)
+<br/>∀j, r
+<br/>fj
+<br/>r = f1
+<br/>1 +
+<br/>m−1(cid:88)
+<br/>l=1
+<br/>αjlrvd−l+1
+<br/>br = b1
+<br/>(4)
+<br/>The tensor of parameters αjlr is constructed to insure the
+<br/>orthogonality condition. Formally, αjlr has to satisfy:
+<br/>Rd,
+<br/>v(cid:62) ˆKv
+<br/>∀j, r (cid:54)= s
+<br/>(f 1
+<br/>j +
+<br/>m−1(cid:88)
+<br/>l=1
+<br/>αjlrvd−l+1)(cid:62)f s
+<br/>j = 0
+<br/>(5)
+<br/>2 m(m− 1) equations, it
+<br/>Noticing that 5 constitutes a set of 1
+<br/>can be satisfied by the tensor αjlr which contains m(m −
+<br/>c ] = F r −
+<br/>1)c parameters. Defining Ψr = [ψr
+<br/>1, ψr
+<br/>2, . . . , ψr
+</td></tr><tr><td>486840f4f524e97f692a7f6b42cd19019ee71533</td><td>DeepVisage: Making face recognition simple yet with powerful generalization
+<br/>skills
+<br/>1Laboratoire LIRIS, ´Ecole centrale de Lyon, 69134 Ecully, France.
+<br/>2Safran Identity & Security, 92130 Issy-les-Moulineaux, France.
+</td></tr><tr><td>48186494fc7c0cc664edec16ce582b3fcb5249c0</td><td>P-CNN: Pose-based CNN Features for Action Recognition
+<br/>Guilhem Ch´eron∗ †
+<br/>INRIA
+</td></tr><tr><td>48499deeaa1e31ac22c901d115b8b9867f89f952</td><td>Interim Report of Final Year Project
+<br/>HKU-Face: A Large Scale Dataset for
+<br/>Deep Face Recognition
+<br/>3035140108
+<br/>Haoyu Li
+<br/>3035141841
+<br/>COMP4801 Final Year Project
+<br/>Project Code: 17007
+</td></tr><tr><td>486a82f50835ea888fbc5c6babf3cf8e8b9807bc</td><td>MSU TECHNICAL REPORT MSU-CSE-15-11, JULY 24, 2015
+<br/>Face Search at Scale: 80 Million Gallery
+</td></tr><tr><td>4866a5d6d7a40a26f038fc743e16345c064e9842</td><td></td></tr><tr><td>487df616e981557c8e1201829a1d0ec1ecb7d275</td><td>Acoustic Echo Cancellation Using a Vector-Space-Based
+<br/>Adaptive Filtering Algorithm
+</td></tr><tr><td>48f211a9764f2bf6d6dda4a467008eda5680837a</td><td></td></tr><tr><td>4858d014bb5119a199448fcd36746c413e60f295</td><td></td></tr><tr><td>48cfc5789c246c6ad88ff841701204fc9d6577ed</td><td>J Inf Process Syst, Vol.12, No.3, pp.392~409, September 2016
+<br/>
+<br/>
+<br/>ISSN 1976-913X (Print)
+<br/>ISSN 2092-805X (Electronic)
+<br/>Age Invariant Face Recognition Based on DCT
+<br/>Feature Extraction and Kernel Fisher Analysis
+</td></tr><tr><td>70f189798c8b9f2b31c8b5566a5cf3107050b349</td><td>The Challenge of Face Recognition from Digital Point-and-Shoot Cameras
+<br/>David Bolme‡
+</td></tr><tr><td>70109c670471db2e0ede3842cbb58ba6be804561</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Zero-Shot Visual Recognition via Bidirectional Latent Embedding
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>703890b7a50d6535900a5883e8d2a6813ead3a03</td><td></td></tr><tr><td>706236308e1c8d8b8ba7749869c6b9c25fa9f957</td><td>Crowdsourced Data Collection of Facial Responses
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+<br/>Rosalind Picard
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+<br/>MIT Media Lab
+<br/>Cambridge
+<br/>02139, USA
+</td></tr><tr><td>70569810e46f476515fce80a602a210f8d9a2b95</td><td>Apparent Age Estimation from Face Images Combining General and
+<br/>Children-Specialized Deep Learning Models
+<br/>1Orange Labs – France Telecom, 4 rue Clos Courtel, 35512 Cesson-S´evign´e, France
+<br/>2Eurecom, 450 route des Chappes, 06410 Biot, France
+</td></tr><tr><td>70e79d7b64f5540d309465620b0dab19d9520df1</td><td>International Journal of Scientific & Engineering Research, Volume 8, Issue 3, March-2017
+<br/>ISSN 2229-5518
+<br/>Facial Expression Recognition System
+<br/>Using Extreme Learning Machine
+</td></tr><tr><td>7003d903d5e88351d649b90d378f3fc5f211282b</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 68– No.23, April 2013
+<br/>Facial Expression Recognition using Gabor Wavelet
+<br/>ENTC SVERI’S COE (Poly),
+<br/>Pandharpur,
+<br/>Solapur, India
+<br/>ENTC SVERI’S COE,
+<br/>Pandharpur,
+<br/>Solapur, India
+<br/>ENTC SVERI’S COE (Poly),
+<br/>Pandharpur,
+<br/>Solapur, India
+</td></tr><tr><td>70bf1769d2d5737fc82de72c24adbb7882d2effd</td><td>Face detection in intelligent ambiences with colored illumination
+<br/>Department of Intelligent Systems
+<br/>TU Delft
+<br/>Delft, The Netherlands
+</td></tr><tr><td>1e799047e294267087ec1e2c385fac67074ee5c8</td><td>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 21, NO. 12, DECEMBER 1999
+<br/>1357
+<br/>Short Papers___________________________________________________________________________________________________
+<br/>Automatic Classification of
+<br/>Single Facial Images
+</td></tr><tr><td>1ef4815f41fa3a9217a8a8af12cc385f6ed137e1</td><td>Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
+</td></tr><tr><td>1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3</td><td>Article
+<br/>A Brief Review of Facial Emotion Recognition Based
+<br/>on Visual Information
+<br/>Byoung Chul Ko ID
+<br/>Tel.: +82-10-3559-4564
+<br/>Received: 6 December 2017; Accepted: 25 January 2018; Published: 30 January 2018
+</td></tr><tr><td>1e8eee51fd3bf7a9570d6ee6aa9a09454254689d</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2016.2582166, IEEE
+<br/>Transactions on Pattern Analysis and Machine Intelligence
+<br/>Face Search at Scale
+</td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td></tr><tr><td>1ebdfceebad642299e573a8995bc5ed1fad173e3</td><td></td></tr><tr><td>1eec03527703114d15e98ef9e55bee5d6eeba736</td><td>UNIVERSITÄT KARLSRUHE (TH)
+<br/>FAKULTÄT FÜR INFORMATIK
+<br/>INTERACTIVE SYSTEMS LABS
+<br/>DIPLOMA THESIS
+<br/>Automatic identification
+<br/>of persons in TV series
+<br/>SUBMITTED BY
+<br/>MAY 2008
+<br/>ADVISORS
+</td></tr><tr><td>1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de</td><td>TO APPEAR IN IEEE THMS
+<br/>Zero-Shot Object Recognition System
+<br/>based on Topic Model
+</td></tr><tr><td>1ef4aac0ebc34e76123f848c256840d89ff728d0</td><td></td></tr><tr><td>1ecb56e7c06a380b3ce582af3a629f6ef0104457</td><td>List of Contents Vol.8
+<br/>Contents of
+<br/>Journal of Advanced Computational
+<br/> Intelligence and Intelligent Informatics
+<br/>Volume 8
+<br/>Vol.8 No.1, January 2004
+<br/>Editorial:
+<br/>o Special Issue on Selected Papers from Humanoid,
+<br/>Papers:
+<br/>o Dynamic Color Object Recognition Using Fuzzy
+<br/>Nano-technology, Information Technology,
+<br/>Communication and Control, Environment, and
+<br/>Management (HNICEM’03).
+<br/>. 1
+<br/>Elmer P. Dadios
+<br/>Papers:
+<br/>o A New Way of Discovery of Belief, Desire and
+<br/>Intention in the BDI Agent-Based Software
+<br/>Modeling .
+<br/>. 2
+<br/>o Integration of Distributed Robotic Systems
+<br/>. 7
+<br/>Fakhri Karray, Rogelio Soto, Federico Guedea,
+<br/>and Insop Song
+<br/>o A Searching and Tracking Framework for
+<br/>Multi-Robot Observation of Multiple Moving
+<br/>Targets .
+<br/>. 14
+<br/>Zheng Liu, Marcelo H. Ang Jr., and Winston
+<br/>Khoon Guan Seah
+<br/>Development Paper:
+<br/>o Possibilistic Uncertainty Propagation and
+<br/>Compromise Programming in the Life Cycle
+<br/>Analysis of Alternative Motor Vehicle Fuels
+<br/>Raymond R. Tan, Alvin B. Culaba, and
+<br/>Michael R. I. Purvis
+<br/>. 23
+<br/>Logic .
+<br/>Napoleon H. Reyes, and Elmer P. Dadios
+<br/>. 29
+<br/>o A Optical Coordinate Measuring Machine for
+<br/>Nanoscale Dimensional Metrology .
+<br/>. 39
+<br/>Eric Kirkland, Thomas R. Kurfess, and Steven
+<br/>Y. Liang
+<br/>o Humanoid Robot HanSaRam: Recent Progress
+<br/>and Developments .
+<br/>. 45
+<br/>Jong-Hwan Kim, Dong-Han Kim, Yong-Jae
+<br/>Kim, Kui-Hong Park, Jae-Ho Park,
+<br/>Choon-Kyoung Moon, Jee-Hwan Ryu, Kiam
+<br/>Tian Seow, and Kyoung-Chul Koh
+<br/>o Generalized Associative Memory Models: Their
+<br/>Memory Capacities and Potential Application
+<br/>. 56
+<br/>Teddy N. Yap, Jr., and Arnulfo P. Azcarraga
+<br/>o Hybrid Fuzzy Logic Strategy for Soccer Robot
+<br/>Game.
+<br/>. 65
+<br/>Elmer A. Maravillas , Napoleon H. Reyes, and
+<br/>Elmer P. Dadios
+<br/>o Image Compression and Reconstruction Based on
+<br/>Fuzzy Relation and Soft Computing
+<br/>Technology .
+<br/>. 72
+<br/>Kaoru Hirota, Hajime Nobuhara, Kazuhiko
+<br/>Kawamoto, and Shin’ichi Yoshida
+<br/>Vol.8 No.2, March 2004
+<br/>Editorial:
+<br/>o Special Issue on Pattern Recognition .
+<br/>. 83
+<br/>Papers:
+<br/>o Operation of Spatiotemporal Patterns Stored in
+<br/>Osamu Hasegawa
+<br/>Review:
+<br/>o Support Vector Machine and Generalization . 84
+<br/>Takio Kurita
+<br/>o Bayesian Network: Probabilistic Reasoning,
+<br/>Statistical Learning, and Applications .
+<br/>. 93
+<br/>Yoichi Motomura
+<br/>Living Neuronal Networks Cultured on a
+<br/>Microelectrode Array .
+<br/>Suguru N. Kudoh, and Takahisa Taguchi
+<br/>o Rapid Discriminative Learning .
+<br/>. 100
+<br/>. 108
+<br/>Jun Rokui
+<br/>o Robust Fuzzy Clustering Based on Similarity
+<br/>between Data .
+<br/>Kohei Inoue, and Kiichi Urahama
+<br/>Vol.8 No.6, 2004
+<br/>Journal of Advanced Computational Intelligence
+<br/>and Intelligent Informatics
+<br/>. 115
+<br/>I-1
+</td></tr><tr><td>1e64b2d2f0a8a608d0d9d913c4baee6973995952</td><td>DOMINANT AND
+<br/>COMPLEMENTARY MULTI-
+<br/>EMOTIONAL FACIAL
+<br/>EXPRESSION RECOGNITION
+<br/>USING C-SUPPORT VECTOR
+<br/>CLASSIFICATION
+</td></tr><tr><td>1e21b925b65303ef0299af65e018ec1e1b9b8d60</td><td>Under review as a conference paper at ICLR 2017
+<br/>UNSUPERVISED CROSS-DOMAIN IMAGE GENERATION
+<br/>Facebook AI Research
+<br/>Tel-Aviv, Israel
+</td></tr><tr><td>1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9</td><td>Entropy Regularization
+<br/>The problem of semi-supervised induction consists in learning a decision rule from
+<br/>labeled and unlabeled data. This task can be undertaken by discriminative methods,
+<br/>provided that learning criteria are adapted consequently. In this chapter, we moti-
+<br/>vate the use of entropy regularization as a means to bene(cid:12)t from unlabeled data in
+<br/>the framework of maximum a posteriori estimation. The learning criterion is derived
+<br/>from clearly stated assumptions and can be applied to any smoothly parametrized
+<br/>model of posterior probabilities. The regularization scheme favors low density sep-
+<br/>aration, without any modeling of the density of input features. The contribution
+<br/>of unlabeled data to the learning criterion induces local optima, but this problem
+<br/>can be alleviated by deterministic annealing. For well-behaved models of posterior
+<br/>probabilities, deterministic annealing EM provides a decomposition of the learning
+<br/>problem in a series of concave subproblems. Other approaches to the semi-supervised
+<br/>problem are shown to be close relatives or limiting cases of entropy regularization.
+<br/>A series of experiments illustrates the good behavior of the algorithm in terms of
+<br/>performance and robustness with respect to the violation of the postulated low den-
+<br/>sity separation assumption. The minimum entropy solution bene(cid:12)ts from unlabeled
+<br/>data and is able to challenge mixture models and manifold learning in a number of
+<br/>situations.
+<br/>9.1 Introduction
+<br/>semi-supervised
+<br/>induction
+<br/>This chapter addresses semi-supervised induction, which refers to the learning of
+<br/>a decision rule, on the entire input domain X, from labeled and unlabeled data.
+<br/>The objective is identical to the one of supervised classi(cid:12)cation: generalize from
+<br/>examples. The problem di(cid:11)ers in the respect that the supervisor’s responses are
+<br/>missing for some training examples. This characteristic is shared with transduction,
+<br/>which has however a di(cid:11)erent goal, that is, of predicting labels on a set of prede(cid:12)ned
+</td></tr><tr><td>1ee3b4ba04e54bfbacba94d54bf8d05fd202931d</td><td>Indonesian Journal of Electrical Engineering and Computer Science
+<br/>Vol. 12, No. 2, November 2018, pp. 476~481
+<br/>ISSN: 2502-4752, DOI: 10.11591/ijeecs.v12.i2.pp476-481
+<br/> 476
+<br/>Celebrity Face Recognition using Deep Learning
+<br/>1,2,3Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/>4Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+<br/> Shah Alam, Selangor, Malaysia
+<br/>Campus Jasin, Melaka, Malaysia
+<br/>Article Info
+<br/>Article history:
+<br/>Received May 29, 2018
+<br/>Revised Jul 30, 2018
+<br/>Accepted Aug 3, 2018
+<br/>Keywords:
+<br/>AlexNet
+<br/>Convolutional neural network
+<br/>Deep learning
+<br/>Face recognition
+<br/>GoogLeNet
+</td></tr><tr><td>1e41a3fdaac9f306c0ef0a978ae050d884d77d2a</td><td>411
+<br/>Robust Object Recognition with
+<br/>Cortex-Like Mechanisms
+<br/>Tomaso Poggio, Member, IEEE
+</td></tr><tr><td>1e1e66783f51a206509b0a427e68b3f6e40a27c8</td><td>SEMI-SUPERVISED ESTIMATION OF PERCEIVED AGE
+<br/>FROM FACE IMAGES
+<br/>VALWAY Technology Center, NEC Soft, Ltd., Tokyo, Japan
+<br/>Keywords:
+</td></tr><tr><td>1efaa128378f988965841eb3f49d1319a102dc36</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+<br/>Hierarchical binary CNNs for landmark
+<br/>localization with limited resources
+</td></tr><tr><td>8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2</td><td>Consensual and Privacy-Preserving Sharing of
+<br/>Multi-Subject and Interdependent Data
+<br/>EPFL, UNIL–HEC Lausanne
+<br/>K´evin Huguenin
+<br/>UNIL–HEC Lausanne
+<br/>EPFL
+<br/>EPFL
+</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td></td></tr><tr><td>84e4b7469f9c4b6c9e73733fa28788730fd30379</td><td>Duong et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:10
+<br/>DOI 10.1186/s13634-017-0521-9
+<br/>EURASIP Journal on Advances
+<br/>in Signal Processing
+<br/>R ES EAR CH
+<br/>Projective complex matrix factorization for
+<br/>facial expression recognition
+<br/>Open Access
+</td></tr><tr><td>84dcf04802743d9907b5b3ae28b19cbbacd97981</td><td></td></tr><tr><td>84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1</td><td>Improved Boosting Performance by Explicit
+<br/>Handling of Ambiguous Positive Examples
+</td></tr><tr><td>841a5de1d71a0b51957d9be9d9bebed33fb5d9fa</td><td>5017
+<br/>PCANet: A Simple Deep Learning Baseline for
+<br/>Image Classification?
+</td></tr><tr><td>849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b</td><td>Detecting Faces Using Region-based Fully
+<br/>Convolutional Networks
+<br/>Tencent AI Lab, China
+</td></tr><tr><td>4adca62f888226d3a16654ca499bf2a7d3d11b71</td><td>Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 572–582,
+<br/>Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics
+<br/>572
+</td></tr><tr><td>4a2d54ea1da851151d43b38652b7ea30cdb6dfb2</td><td>Direct Recognition of Motion Blurred Faces
+</td></tr><tr><td>4a3758f283b7c484d3f164528d73bc8667eb1591</td><td>Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial
+<br/>Networks
+<br/>Center for Research on Intelligent Perception and Computing, CASIA
+<br/>National Laboratory of Pattern Recognition, CASIA
+</td></tr><tr><td>4abd49538d04ea5c7e6d31701b57ea17bc349412</td><td>Recognizing Fine-Grained and Composite Activities
+<br/>using Hand-Centric Features and Script Data
+</td></tr><tr><td>4a0f98d7dbc31497106d4f652968c708f7da6692</td><td>Real-time Eye Gaze Direction Classification Using
+<br/>Convolutional Neural Network
+</td></tr><tr><td>4acd683b5f91589002e6f50885df51f48bc985f4</td><td>BRIDGING COMPUTER VISION AND SOCIAL SCIENCE : A MULTI-CAMERA VISION
+<br/>SYSTEM FOR SOCIAL INTERACTION TRAINING ANALYSIS
+<br/>Peter Tu
+<br/>GE Global Research, Niskayuna NY USA
+</td></tr><tr><td>4aeb87c11fb3a8ad603311c4650040fd3c088832</td><td>Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+<br/>1816
+</td></tr><tr><td>4a3d96b2a53114da4be3880f652a6eef3f3cc035</td><td>2666
+<br/>A Dictionary Learning-Based
+<br/>3D Morphable Shape Model
+</td></tr><tr><td>4a6fcf714f663618657effc341ae5961784504c7</td><td>Scaling up Class-Specific Kernel Discriminant
+<br/>Analysis for large-scale Face Verification
+</td></tr><tr><td>24115d209e0733e319e39badc5411bbfd82c5133</td><td>Long-term Recurrent Convolutional Networks for
+<br/>Visual Recognition and Description
+</td></tr><tr><td>24c442ac3f6802296d71b1a1914b5d44e48b4f29</td><td>Pose and expression-coherent face recovery in the wild
+<br/>Technicolor, Cesson-S´evign´e, France
+<br/>Franc¸ois Le Clerc
+<br/>Patrick P´erez
+</td></tr><tr><td>24aac045f1e1a4c13a58eab4c7618dccd4c0e671</td><td></td></tr><tr><td>240d5390af19bb43761f112b0209771f19bfb696</td><td></td></tr><tr><td>24e099e77ae7bae3df2bebdc0ee4e00acca71250</td><td>Robust face alignment under occlusion via regional predictive power
+<br/>estimation.
+<br/>© 2015 IEEE
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/22467
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td></tr><tr><td>2450c618cca4cbd9b8cdbdb05bb57d67e63069b1</td><td>A Connexionist Approach for Robust and Precise Facial Feature Detection in
+<br/>Complex Scenes
+<br/>Stefan Duffner and Christophe Garcia
+<br/>France Telecom Research & Development
+<br/>4, rue du Clos Courtel
+<br/>35512 Cesson-S´evign´e, France
+</td></tr><tr><td>244b57cc4a00076efd5f913cc2833138087e1258</td><td>Warped Convolutions: Efficient Invariance to Spatial Transformations
+</td></tr><tr><td>24869258fef8f47623b5ef43bd978a525f0af60e</td><td><b>UNIVERSITÉDEGRENOBLENoattribuéparlabibliothèqueTHÈSEpourobtenirlegradedeDOCTEURDEL’UNIVERSITÉDEGRENOBLESpécialité:MathématiquesetInformatiquepréparéeauLaboratoireJeanKuntzmanndanslecadredel’ÉcoleDoctoraleMathématiques,SciencesetTechnologiesdel’Information,InformatiqueprésentéeetsoutenuepubliquementparMatthieuGuillauminle27septembre2010ExploitingMultimodalDataforImageUnderstandingDonnéesmultimodalespourl’analysed’imageDirecteursdethèse:CordeliaSchmidetJakobVerbeekJURYM.ÉricGaussierUniversitéJosephFourierPrésidentM.AntonioTorralbaMassachusettsInstituteofTechnologyRapporteurMmeTinneTuytelaarsKatholiekeUniversiteitLeuvenRapporteurM.MarkEveringhamUniversityofLeedsExaminateurMmeCordeliaSchmidINRIAGrenobleExaminatriceM.JakobVerbeekINRIAGrenobleExaminateur</b></td></tr><tr><td>24d376e4d580fb28fd66bc5e7681f1a8db3b6b78</td><td></td></tr><tr><td>24ff832171cb774087a614152c21f54589bf7523</td><td>Beat-Event Detection in Action Movie Franchises
+<br/>Jerome Revaud
+<br/>Zaid Harchaoui
+</td></tr><tr><td>24bf94f8090daf9bda56d54e42009067839b20df</td><td></td></tr><tr><td>230527d37421c28b7387c54e203deda64564e1b7</td><td>Person Re-identification: System Design and
+<br/>Evaluation Overview
+</td></tr><tr><td>23fdbef123bcda0f07d940c72f3b15704fd49a98</td><td></td></tr><tr><td>23ebbbba11c6ca785b0589543bf5675883283a57</td><td></td></tr><tr><td>23172f9a397f13ae1ecb5793efd81b6aba9b4537</td><td>Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 10–17,
+<br/>Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics.
+<br/>10
+</td></tr><tr><td>236a4f38f79a4dcc2183e99b568f472cf45d27f4</td><td>1632
+<br/>Randomized Clustering Forests
+<br/>for Image Classification
+<br/>Frederic Jurie, Member, IEEE Computer Society
+</td></tr><tr><td>230c4a30f439700355b268e5f57d15851bcbf41f</td><td>EM Algorithms for Weighted-Data Clustering
+<br/>with Application to Audio-Visual Scene Analysis
+</td></tr><tr><td>237fa91c8e8098a0d44f32ce259ff0487aec02cf</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+<br/>863
+<br/>Bidirectional PCA With Assembled Matrix
+<br/>Distance Metric for Image Recognition
+</td></tr><tr><td>23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3</td><td>CS 229 Project, Fall 2014
+<br/>Determining Mood from Facial Expressions
+<br/>Introduction
+<br/>I
+<br/>Facial expressions play an extremely important role in human communication. As
+<br/>society continues to make greater use of human-machine interactions, it is important for
+<br/>machines to be able to interpret facial expressions in order to improve their
+<br/>authenticity. If machines can be trained to determine mood to a better extent than
+<br/>humans can, especially for more subtle moods, then this could be useful in fields such as
+<br/>counseling. This could also be useful for gauging reactions of large audiences in various
+<br/>contexts, such as political talks.
+<br/>The results of this project could also be applied to recognizing other features of facial
+<br/>expressions, such as determining when people are purposefully suppressing emotions or
+<br/>lying. The ability to recognize different facial expressions could also improve technology
+<br/>that recognizes to whom specific faces belong. This could in turn be used to search a
+<br/>large number of pictures for a specific photo, which is becoming increasingly difficult, as
+<br/>storing photos digitally has been extremely common in the past decade. The possibilities
+<br/>are endless.
+<br/>II Data and Features
+<br/>2.1 Data
+<br/>Our data consists of 1166 frontal images of
+<br/>people’s faces from three databases, with each
+<br/>image labeled with one of eight emotions:
+<br/>anger, contempt, disgust, fear, happiness,
+<br/>neutral, sadness, and surprise. The TFEID [1],
+<br/>CK+ [2], and JAFFE [3] databases primarily
+<br/>consist of Taiwanese, Caucasian, and Japanese
+<br/>subjects, respectively. The TFEID and JAFFE
+<br/>images are both cropped with the faces
+<br/>centered. Each image has a subject posing with
+<br/>one of the emotions. The JAFFE database does
+<br/>not have any images for contempt.
+<br/>2.2 Features
+<br/>On each face, there are many different facial landmarks. While some of these landmarks
+<br/>(pupil position, nose tip, and face contour) are not as indicative of emotion, others
+<br/>(eyebrow, mouth, and eye shape) are. To extract landmark data from images, we used
+<br/>Happiness
+<br/>Figure 1
+<br/>Anger
+</td></tr><tr><td>238fc68b2e0ef9f5ec043d081451902573992a03</td><td>2656
+<br/>Enhanced Local Gradient Order Features and
+<br/>Discriminant Analysis for Face Recognition
+<br/>role in robust face recognition [5]. Many algorithms have
+<br/>been proposed to deal with the effectiveness of feature design
+<br/>and extraction [6], [7]; however, the performance of many
+<br/>existing methods is still highly sensitive to variations of
+<br/>imaging conditions, such as outdoor illumination, exaggerated
+<br/>expression, and continuous occlusion. These complex varia-
+<br/>tions are significantly affecting the recognition accuracy in
+<br/>recent years [8]–[10].
+<br/>Appearance-based subspace learning is one of the sim-
+<br/>plest approach for feature extraction, and many methods
+<br/>are usually based on linear correlation of pixel intensities.
+<br/>For example, Eigenface [11] uses eigen system of pixel
+<br/>intensities to estimate the lower rank linear subspace of
+<br/>a set of training face images by minimizing the (cid:2)2 dis-
+<br/>tance metric. The solution enjoys optimality properties when
+<br/>noise is independent
+<br/>identically distributed Gaussian only.
+<br/>Fisherface [12] will suffer more due to the estimation of
+<br/>inverse within-class covariance matrix [13],
+<br/>thus the per-
+<br/>formance will degenerate rapidly in the cases of occlusion
+<br/>and small sample size. Laplacianfaces [14] refer to another
+<br/>appearance-based approach which learns a locality preserv-
+<br/>ing subspace and seeks to capture the intrinsic geometry
+<br/>and local structure of the data. Other methods such as those
+<br/>in [5] and [15] also provide valuable approaches to supervised
+<br/>or unsupervised dimension reduction tasks.
+<br/>A fundamental problem of appearance-based methods for
+<br/>face recognition, however, is that they are sensitive to imag-
+<br/>ing conditions [10]. As for data corrupted by illumination
+<br/>changes, occlusions, and inaccurate alignment, the estimated
+<br/>subspace will be biased, thus much of the efforts concentrate
+<br/>on removing/shrinking the noise components. In contrast, local
+<br/>feature descriptors [15]–[19] have certain advantages as they
+<br/>are more stable to local changes. In the view of image pro-
+<br/>cessing and vision, the basic imaging system can be simply
+<br/>formulated as
+<br/>(x, y) = A(x, y) × L(x, y)
+<br/>(1)
+</td></tr><tr><td>23d55061f7baf2ffa1c847d356d8f76d78ebc8c1</td><td>Solmaz et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:22
+<br/>DOI 10.1186/s41074-017-0033-4
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>RESEARCH PAPER
+<br/>Open Access
+<br/>Generic and attribute-specific deep
+<br/>representations for maritime vessels
+</td></tr><tr><td>23a8d02389805854cf41c9e5fa56c66ee4160ce3</td><td>Multimed Tools Appl
+<br/>DOI 10.1007/s11042-013-1568-8
+<br/>Influence of low resolution of images on reliability
+<br/>of face detection and recognition
+<br/>© The Author(s) 2013. This article is published with open access at SpringerLink.com
+</td></tr><tr><td>4fd29e5f4b7186e349ba34ea30738af7860cf21f</td><td></td></tr><tr><td>4f051022de100241e5a4ba8a7514db9167eabf6e</td><td>Face Parsing via a Fully-Convolutional Continuous
+<br/>CRF Neural Network
+</td></tr><tr><td>4f6adc53798d9da26369bea5a0d91ed5e1314df2</td><td>IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. , NO. , 2016
+<br/>Online Nonnegative Matrix Factorization with
+<br/>General Divergences
+</td></tr><tr><td>4fbef7ce1809d102215453c34bf22b5f9f9aab26</td><td></td></tr><tr><td>4fa0d73b8ba114578744c2ebaf610d2ca9694f45</td><td></td></tr><tr><td>4f591e243a8f38ee3152300bbf42899ac5aae0a5</td><td>SUBMITTED TO TPAMI
+<br/>Understanding Higher-Order Shape
+<br/>via 3D Shape Attributes
+</td></tr><tr><td>4f9958946ad9fc71c2299847e9ff16741401c591</td><td>Facial Expression Recognition with Recurrent Neural Networks
+<br/>Robotics and Embedded Systems Lab, Department of Computer Science
+<br/>Image Understanding and Knowledge-Based Systems, Department of Computer Science
+<br/>Technische Universit¨at M¨unchen, Germany
+</td></tr><tr><td>4f0bf2508ae801aee082b37f684085adf0d06d23</td><td></td></tr><tr><td>4f4f920eb43399d8d05b42808e45b56bdd36a929</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 123 – No.4, August 2015
+<br/>A Novel Method for 3D Image Segmentation with Fusion
+<br/>of Two Images using Color K-means Algorithm
+<br/>Neelam Kushwah
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>Priusha Narwariya
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>two
+</td></tr><tr><td>8d71872d5877c575a52f71ad445c7e5124a4b174</td><td></td></tr><tr><td>8de06a584955f04f399c10f09f2eed77722f6b1c</td><td>Author manuscript, published in "International Conference on Computer Vision Theory and Applications (VISAPP 2013) (2013)"
+</td></tr><tr><td>8d4f0517eae232913bf27f516101a75da3249d15</td><td>ARXIV SUBMISSION, MARCH 2018
+<br/>Event-based Dynamic Face Detection and
+<br/>Tracking Based on Activity
+</td></tr><tr><td>8de2dbe2b03be8a99628ffa000ac78f8b66a1028</td><td>´Ecole Nationale Sup´erieure dInformatique et de Math´ematiques Appliqu´ees de Grenoble
+<br/>INP Grenoble – ENSIMAG
+<br/>UFR Informatique et Math´ematiques Appliqu´ees de Grenoble
+<br/>Rapport de stage de Master 2 et de projet de fin d’´etudes
+<br/>Effectu´e au sein de l’´equipe LEAR, I.N.R.I.A., Grenoble
+<br/>Action Recognition in Videos
+<br/>3e ann´ee ENSIMAG – Option I.I.I.
+<br/>M2R Informatique – sp´ecialit´e I.A.
+<br/>04 f´evrier 2008 – 04 juillet 2008
+<br/>LEAR,
+<br/>I.N.R.I.A., Grenoble
+<br/>655 avenue de l’Europe
+<br/>38 334 Montbonnot
+<br/>France
+<br/>Responsable de stage
+<br/>Mme. Cordelia Schmid
+<br/>Tuteur ´ecole
+<br/>Jury
+</td></tr><tr><td>8d42a24d570ad8f1e869a665da855628fcb1378f</td><td>CVPR
+<br/>#987
+<br/>000
+<br/>001
+<br/>002
+<br/>003
+<br/>004
+<br/>005
+<br/>006
+<br/>007
+<br/>008
+<br/>009
+<br/>010
+<br/>011
+<br/>012
+<br/>013
+<br/>014
+<br/>015
+<br/>016
+<br/>017
+<br/>018
+<br/>019
+<br/>020
+<br/>021
+<br/>022
+<br/>023
+<br/>024
+<br/>025
+<br/>026
+<br/>027
+<br/>028
+<br/>029
+<br/>030
+<br/>031
+<br/>032
+<br/>033
+<br/>034
+<br/>035
+<br/>036
+<br/>037
+<br/>038
+<br/>039
+<br/>040
+<br/>041
+<br/>042
+<br/>043
+<br/>044
+<br/>045
+<br/>046
+<br/>047
+<br/>048
+<br/>049
+<br/>050
+<br/>051
+<br/>052
+<br/>053
+<br/>CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+<br/>An Empirical Study of Context in Object Detection
+<br/>Anonymous CVPR submission
+<br/>Paper ID 987
+</td></tr><tr><td>8d8461ed57b81e05cc46be8e83260cd68a2ebb4d</td><td>Age identification of Facial Images using Neural
+<br/>Network
+<br/>CSE Department,CSVTU
+<br/>RIT, Raipur, Chhattisgarh , INDIA
+</td></tr><tr><td>8dbe79830713925affc48d0afa04ed567c54724b</td><td></td></tr><tr><td>8d1adf0ac74e901a94f05eca2f684528129a630a</td><td>Facial Expression Recognition Using Facial
+<br/>Movement Features
+</td></tr><tr><td>8d712cef3a5a8a7b1619fb841a191bebc2a17f15</td><td></td></tr><tr><td>8dffbb6d75877d7d9b4dcde7665888b5675deee1</td><td>Emotion Recognition with Deep-Belief
+<br/>Networks
+<br/>Introduction
+<br/>For our CS229 project, we studied the problem of
+<br/>reliable computerized emotion recognition in images of
+<br/>human
+<br/>faces. First, we performed a preliminary
+<br/>exploration using SVM classifiers, and then developed an
+<br/>approach based on Deep Belief Nets. Deep Belief Nets, or
+<br/>DBNs, are probabilistic generative models composed of
+<br/>multiple layers of stochastic latent variables, where each
+<br/>“building block” layer is a Restricted Boltzmann Machine
+<br/>(RBM). DBNs have a greedy layer-wise unsupervised
+<br/>learning algorithm as well as a discriminative fine-tuning
+<br/>procedure for optimizing performance on classification
+<br/>tasks. [1].
+<br/>We trained our classifier on three databases: the
+<br/>Cohn-Kanade Extended Database (CK+) [2], the Japanese
+<br/>Female Facial Expression Database (JAFFE) [3], and the
+<br/>Yale Face Database (YALE) [4]. We tested several
+<br/>different database configurations, image pre-processing
+<br/>settings, and DBN parameters, and obtained test errors as
+<br/>low as 20% on a limited subset of the emotion labels.
+<br/>Finally, we created a real-time system which takes
+<br/>images of a single subject using a computer webcam and
+<br/>classifies the emotion shown by the subject.
+<br/>Part 1: Exploration of SVM-based approaches
+<br/>To set a baseline for comparison, we applied an
+<br/>SVM classifier to the emotion images in the CK+
+<br/>database, using the LIBLINEAR library and its MATLAB
+<br/>interface [5]. This database contains 593 image sequences
+<br/>across 123 human subjects, beginning with a “neutral
+<br/>“expression and showing the progression to one of seven
+<br/>“peak” emotions. When given both a neutral and an
+<br/>expressive face to compare, the SVM obtained accuracy
+<br/>as high as 90%. This
+<br/>the
+<br/>implementation of the SVM classifier. For additional
+<br/>details on this stage of the project, please see our
+<br/>Milestone document.
+<br/>Part 1.1 Choice of labels (emotion numbers vs. FACS
+<br/>features)
+<br/>The CK+ database offers two sets of emotion
+<br/>features: “emotion numbers” and FACS features. Emotion
+<br/>numbers are integer values representing the main emotion
+<br/>shown in the “peak emotion” image. The emotions are
+<br/>coded as follows: 1=anger, 2=contempt, 3=disgust,
+<br/>4=fear, 5=happiness, 6=sadness, and 7=surprise.
+<br/>The other labeling option is called FACS, or the
+<br/>Facial Action Coding System. FACS decomposes every
+<br/>summarizes
+<br/>section
+<br/>facial emotion into a set of Action Units (AUs), which
+<br/>describe the specific muscle groups involved in forming
+<br/>the emotion. We chose not to use FACS because accurate
+<br/>labeling currently requires trained human experts [8], and
+<br/>we are interesting in creating an automated system.
+<br/>
+<br/>Part 1.2 Features
+<br/>Part 1.2.1 Norm of differences between neutral face
+<br/>and full emotion
+<br/>Each of the CK+ images has been hand-labeled with
+<br/>68 standard Active Appearance Models (AAM) face
+<br/>landmarks that describe the X and Y position of these
+<br/>landmarks on the image (Figure 1).
+<br/>Figure 1. AAM Facial Landmarks
+<br/>We initially trained the SVM on the norm of the
+<br/>vector differences in landmark positions between the
+<br/>neutral and peak expressions. With this approach, the
+<br/>training error was approximately 35% for hold out cross
+<br/>validation (see Figure 2).
+<br/>with
+<br/>Figure 3. Accuracy of
+<br/>SVM with separate X, Y
+<br/>displacement features.
+<br/>Figure 2. Accuracy of
+<br/>SVM
+<br/>norm-
+<br/>displacement features.
+<br/>Part 1.2.2 Separate X and Y differences between
+<br/>neutral face and full emotion
+<br/>Because the initial approach did not differentiate
+<br/>between displacements of
+<br/>in different
+<br/>directions, we also provided the differences in the X and
+<br/>Y components of each landmark separately. This doubled
+<br/>the size of our feature vector, and resulting in a significant
+<br/>(about 20%) improvement in accuracy (Figure 3).
+<br/>Part 1.2.3 Feature Selection
+<br/>landmarks
+<br/>Finally, we visualized which features were the most
+<br/>important for classifying each emotion; the results can be
+<br/>seen in Figure 4. The figure shows the X and Y
+</td></tr><tr><td>153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4</td><td>Overview of algorithms for face detection and
+<br/>tracking
+<br/>Nenad Markuˇs
+</td></tr><tr><td>15136c2f94fd29fc1cb6bedc8c1831b7002930a6</td><td>Deep Learning Architectures for Face
+<br/>Recognition in Video Surveillance
+</td></tr><tr><td>153e5cddb79ac31154737b3e025b4fb639b3c9e7</td><td>PREPRINT SUBMITTED TO IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS
+<br/>Active Dictionary Learning in Sparse
+<br/>Representation Based Classification
+</td></tr><tr><td>15e0b9ba3389a7394c6a1d267b6e06f8758ab82b</td><td>Xu et al. IPSJ Transactions on Computer Vision and
+<br/>Applications (2017) 9:24
+<br/>DOI 10.1186/s41074-017-0035-2
+<br/>IPSJ Transactions on Computer
+<br/>Vision and Applications
+<br/>TECHNICAL NOTE
+<br/>Open Access
+<br/>The OU-ISIR Gait Database comprising the
+<br/>Large Population Dataset with Age and
+<br/>performance evaluation of age estimation
+</td></tr><tr><td>15aa6c457678e25f6bc0e818e5fc39e42dd8e533</td><td></td></tr><tr><td>15f3d47b48a7bcbe877f596cb2cfa76e798c6452</td><td>Automatic face analysis tools for interactive digital games
+<br/>Anonymised for blind review
+<br/>Anonymous
+<br/>Anonymous
+<br/>Anonymous
+</td></tr><tr><td>15728d6fd5c9fc20b40364b733228caf63558c31</td><td></td></tr><tr><td>1513949773e3a47e11ab87d9a429864716aba42d</td><td></td></tr><tr><td>153c8715f491272b06dc93add038fae62846f498</td><td></td></tr><tr><td>122ee00cc25c0137cab2c510494cee98bd504e9f</td><td>The Application of
+<br/>Active Appearance Models to
+<br/>Comprehensive Face Analysis
+<br/>Technical Report
+<br/>TU M¨unchen
+<br/>April 5, 2007
+</td></tr><tr><td>1287bfe73e381cc8042ac0cc27868ae086e1ce3b</td><td></td></tr><tr><td>12cb3bf6abf63d190f849880b1703ccc183692fe</td><td>Guess Who?: A game to crowdsource the labeling of affective facial
+<br/>expressions is comparable to expert ratings.
+<br/>Graduation research project, june 2012
+<br/>Supervised by: Dr. Joost Broekens
+<br/><b></b></td></tr><tr><td>12cd96a419b1bd14cc40942b94d9c4dffe5094d2</td><td>29
+<br/>Proceedings of the 5th Workshop on Vision and Language, pages 29–38,
+<br/>Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics
+</td></tr><tr><td>12055b8f82d5411f9ad196b60698d76fbd07ac1e</td><td>1475
+<br/>Multiview Facial Landmark Localization in RGB-D
+<br/>Images via Hierarchical Regression
+<br/>With Binary Patterns
+</td></tr><tr><td>120785f9b4952734818245cc305148676563a99b</td><td>Diagnostic automatique de l’état dépressif
+<br/>S. Cholet
+<br/>H. Paugam-Moisy
+<br/>Laboratoire de Mathématiques Informatique et Applications (LAMIA - EA 4540)
+<br/>Université des Antilles, Campus de Fouillole - Guadeloupe
+<br/>Résumé
+<br/>Les troubles psychosociaux sont un problème de santé pu-
+<br/>blique majeur, pouvant avoir des conséquences graves sur
+<br/>le court ou le long terme, tant sur le plan professionnel que
+<br/>personnel ou familial. Le diagnostic de ces troubles doit
+<br/>être établi par un professionnel. Toutefois, l’IA (l’Intelli-
+<br/>gence Artificielle) peut apporter une contribution en four-
+<br/>nissant au praticien une aide au diagnostic, et au patient
+<br/>un suivi permanent rapide et peu coûteux. Nous proposons
+<br/>une approche vers une méthode de diagnostic automatique
+<br/>de l’état dépressif à partir d’observations du visage en
+<br/>temps réel, au moyen d’une simple webcam. A partir de
+<br/>vidéos du challenge AVEC’2014, nous avons entraîné un
+<br/>classifieur neuronal à extraire des prototypes de visages
+<br/>selon différentes valeurs du score de dépression de Beck
+<br/>(BDI-II).
+</td></tr><tr><td>12c713166c46ac87f452e0ae383d04fb44fe4eb2</td><td></td></tr><tr><td>12150d8b51a2158e574e006d4fbdd3f3d01edc93</td><td>Deep End2End Voxel2Voxel Prediction
+<br/>Presented by: Ahmed Osman
+<br/>Ahmed Osman
+</td></tr><tr><td>8c13f2900264b5cf65591e65f11e3f4a35408b48</td><td>A GENERIC FACE REPRESENTATION APPROACH FOR
+<br/>LOCAL APPEARANCE BASED FACE VERIFICATION
+<br/>Interactive Systems Labs, Universität Karlsruhe (TH)
+<br/>76131 Karlsruhe, Germany
+<br/>web: http://isl.ira.uka.de/face_recognition/
+</td></tr><tr><td>8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf</td><td></td></tr><tr><td>8c955f3827a27e92b6858497284a9559d2d0623a</td><td>Buletinul Ştiinţific al Universităţii "Politehnica" din Timişoara
+<br/>Seria ELECTRONICĂ şi TELECOMUNICAŢII
+<br/>TRANSACTIONS on ELECTRONICS and COMMUNICATIONS
+<br/>Tom 53(67), Fascicola 1-2, 2008
+<br/>Facial Expression Recognition under Noisy Environment
+<br/>Using Gabor Filters
+</td></tr><tr><td>8ce9b7b52d05701d5ef4a573095db66ce60a7e1c</td><td>Structured Sparse Subspace Clustering: A Joint
+<br/>Affinity Learning and Subspace Clustering
+<br/>Framework
+</td></tr><tr><td>8c6c0783d90e4591a407a239bf6684960b72f34e</td><td>SESSION
+<br/>KNOWLEDGE ENGINEERING AND
+<br/>MANAGEMENT + KNOWLEDGE ACQUISITION
+<br/>Chair(s)
+<br/>TBA
+<br/>Int'l Conf. Information and Knowledge Engineering | IKE'13 |1 </td></tr><tr><td>8509abbde2f4b42dc26a45cafddcccb2d370712f</td><td>Improving precision and recall of face recognition in SIPP with combination of
+<br/>modified mean search and LSH
+<br/>Xihua.Li
+</td></tr><tr><td>855bfc17e90ec1b240efba9100fb760c068a8efa</td><td></td></tr><tr><td>858ddff549ae0a3094c747fb1f26aa72821374ec</td><td>Survey on RGB, 3D, Thermal, and Multimodal
+<br/>Approaches for Facial Expression Recognition:
+<br/>History, Trends, and Affect-related Applications
+</td></tr><tr><td>858901405086056361f8f1839c2f3d65fc86a748</td><td>ON TENSOR TUCKER DECOMPOSITION: THE CASE FOR AN
+<br/>ADJUSTABLE CORE SIZE
+</td></tr><tr><td>85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9</td><td>Author manuscript, published in "Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille :
+<br/>France (2008)"
+</td></tr><tr><td>8518b501425f2975ea6dcbf1e693d41e73d0b0af</td><td>Relative Hidden Markov Models for Evaluating Motion Skills
+<br/>Computer Science and Engineering
+<br/>Arizona State Univerisity, Tempe, AZ 85281
+</td></tr><tr><td>854dbb4a0048007a49df84e3f56124d387588d99</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+<br/>Spatial-Temporal Recurrent Neural Network for
+<br/>Emotion Recognition
+</td></tr><tr><td>1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b</td><td></td></tr><tr><td>1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9</td><td>1860
+<br/>The Hidden Sides of Names—Face Modeling
+<br/>with First Name Attributes
+</td></tr><tr><td>1d846934503e2bd7b8ea63b2eafe00e29507f06a</td><td></td></tr><tr><td>1d0dd20b9220d5c2e697888e23a8d9163c7c814b</td><td>NEGREL ET AL.: BOOSTED METRIC LEARNING FOR FACE RETRIEVAL
+<br/>Boosted Metric Learning for Efficient
+<br/>Identity-Based Face Retrieval
+<br/>Frederic Jurie
+<br/>GREYC, CNRS UMR 6072, ENSICAEN
+<br/>Université de Caen Basse-Normandie
+<br/>France
+</td></tr><tr><td>1d776bfe627f1a051099997114ba04678c45f0f5</td><td>Deployment of Customized Deep Learning based
+<br/>Video Analytics On Surveillance Cameras
+<br/>AitoeLabs (www.aitoelabs.com)
+</td></tr><tr><td>1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb</td><td></td></tr><tr><td>1de8f38c35f14a27831130060810cf9471a62b45</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-0989-7
+<br/>A Branch-and-Bound Framework for Unsupervised Common
+<br/>Event Discovery
+<br/>Received: 3 June 2016 / Accepted: 12 January 2017
+<br/>© Springer Science+Business Media New York 2017
+</td></tr><tr><td>1da83903c8d476c64c14d6851c85060411830129</td><td>Iterated Support Vector Machines for Distance
+<br/>Metric Learning
+</td></tr><tr><td>1d6068631a379adbcff5860ca2311b790df3a70f</td><td></td></tr><tr><td>1d58d83ee4f57351b6f3624ac7e727c944c0eb8d</td><td>Enhanced Local Texture
+<br/>Feature Sets for Face
+<br/>Recognition under Difficult
+<br/>Lighting Conditions
+<br/>INRIA & Laboratoire Jean
+<br/>Kuntzmann,
+<br/>655 avenue de l'Europe, Montbonnot 38330, France
+</td></tr><tr><td>71b376dbfa43a62d19ae614c87dd0b5f1312c966</td><td>The Temporal Connection Between Smiles and Blinks
+</td></tr><tr><td>714d487571ca0d676bad75c8fa622d6f50df953b</td><td>eBear: An Expressive Bear-Like Robot
+</td></tr><tr><td>710011644006c18291ad512456b7580095d628a2</td><td>Learning Residual Images for Face Attribute Manipulation
+<br/>Fujitsu Research & Development Center, Beijing, China.
+</td></tr><tr><td>76fd801981fd69ff1b18319c450cb80c4bc78959</td><td>Proceedings of the 11th International Conference on Computational Semantics, pages 76–81,
+<br/>London, UK, April 15-17 2015. c(cid:13)2015 Association for Computational Linguistics
+<br/>76
+</td></tr><tr><td>76dc11b2f141314343d1601635f721fdeef86fdb</td><td>Weighted Decoding ECOC for Facial
+<br/>Action Unit Classification
+</td></tr><tr><td>760a712f570f7a618d9385c0cee7e4d0d6a78ed2</td><td></td></tr><tr><td>76b9fe32d763e9abd75b427df413706c4170b95c</td><td></td></tr><tr><td>76d9f5623d3a478677d3f519c6e061813e58e833</td><td>FAST ALGORITHMS FOR THE GENERALIZED FOLEY-SAMMON
+<br/>DISCRIMINANT ANALYSIS
+</td></tr><tr><td>765b2cb322646c52e20417c3b44b81f89860ff71</td><td>PoseShop: Human Image Database
+<br/>Construction and Personalized
+<br/>Content Synthesis
+</td></tr><tr><td>7644d90efef157e61fe4d773d8a3b0bad5feccec</td><td></td></tr><tr><td>760ba44792a383acd9ca8bef45765d11c55b48d4</td><td>~
+<br/>I .
+<br/>INTRODUCTION AND BACKGROUND
+<br/>The purpose of this article is to introduce the
+<br/>reader to the basic principles of classification with
+<br/>class-specific features. It is written both for readers
+<br/>interested in only the basic concepts as well as those
+<br/>interested in getting started in applying the method.
+<br/>For in-depth coverage, the reader is referred to a more
+<br/>detailed article [l].
+<br/>Class-Specific Classifier:
+<br/>Avoiding the Curse of
+<br/>Dimensionality
+<br/>PAUL M. BAGGENSTOSS, Member. lEEE
+<br/>US. Naval Undersea Warfare Center
+<br/>This article describes a new probabilistic method called the
+<br/>“class-specific method” (CSM). CSM has the potential to avoid
+<br/>the “curse of dimensionality” which plagues most clmiiiers
+<br/>which attempt to determine the decision boundaries in a
+<br/>highdimensional featue space. In contrast, in CSM, it is possible
+<br/>to build classifiers without a ” n o n feature space. Separate
+<br/>Law-dimensional features seta may be de6ned for each class, while
+<br/>the decision funetions are projected back to the common raw data
+<br/>space. CSM eflectively extends the classical classification theory
+<br/>to handle multiple feature spaw.. It is completely general, and
+<br/>requires no s i m p l i n g assumption such as Gaussianity or that
+<br/>data lies in linear subspaces.
+<br/>Manuscript received September 26, 2W2; revised February 12,
+<br/>2003.
+<br/>This work was supported by the Office of Naval Research.
+<br/>Author’s address: US. Naval Undersea Warfare Center, Newport
+<br/>Classification is the process of assigning data
+<br/>to one of a set of pre-determined class labels [2].
+<br/>Classification is a fundamental problem that has
+<br/>to be solved if machines are to approximate the
+<br/>human functions of recognizing sounds, images, or
+<br/>other sensory inputs. This is why classification is so
+<br/>important for automation in today’s commercial and
+<br/>military arenas.
+<br/>Many of us have first-hand knowledge of
+<br/>successful automated recognition systems from
+<br/>cameras that recognize faces in airports to computers
+<br/>that can scan and read printed and handwritten text,
+<br/>or systems that can recognize human speech. These
+<br/>systems are becoming more and more reliable and
+<br/>accurate. Given reasonably clean input data, the
+<br/>performance is often quite good if not perfect. But
+<br/>many of these systems fail in applications where
+<br/>clean, uncorrupted data is not available or if the
+<br/>problem is complicated by variability of conditions
+<br/>or by proliferation of inputs from unknown sources.
+<br/>In military environments, the targets to he recognized
+<br/>are often uncooperative and hidden in clutter and
+<br/>interference. In short, military uses of such systems
+<br/>still fall far short of what a well-trained alert human
+<br/>operator can achieve.
+<br/>We are often perplexed by the wide gap of
+<br/>as a car door slamming. From
+<br/>performance between humans and automated systems.
+<br/>Allow a human listener to hear two or three examples
+<br/>of a sound-such
+<br/>these few examples, the human can recognize
+<br/>the sound again and not confuse it with similar
+<br/>interfering sounds. But try the same experiment with
+<br/>general-purpose classifiers using neural networks
+<br/>and the story is quite different. Depending on the
+<br/>problem, the automated system may require hundreds,
+<br/>thousands, even millions of examples for training
+<br/>before it becomes both robust and reliable.
+<br/>Why? The answer lies in what is known as the
+<br/>“curse of dimensionality.” General-purpose classifiers
+<br/>need to extract a large number of measurements,
+<br/>or features, from the data to account for all the
+<br/>different possibilities of data types. The large
+<br/>collection of features form a high-dimensional space
+<br/>that the classifier has to sub-divide into decision
+<br/>boundaries. It is well-known that the complexity of
+<br/>a high-dimensional space increases exponentially
+<br/>with the number of measurements [31-and
+<br/>so does
+<br/>the difficulty of finding the hest decision boundaries
+<br/>from a fixed amount of training data. Unless a lot
+<br/>EEE A&E SYSTEMS MAGAZINE VOL. 19, NO. 1 JANUARY 2004 PART 2: TUTORIALS-BAGGENSTOSS
+<br/>37
+</td></tr><tr><td>766728bac030b169fcbc2fbafe24c6e22a58ef3c</td><td>A survey of deep facial landmark detection
+<br/>Yongzhe Yan1,2
+<br/>Thierry Chateau1
+<br/>1 Université Clermont Auvergne, France
+<br/>2 Wisimage, France
+<br/>3 Université de Lyon, CNRS, INSA Lyon, LIRIS, UMR5205, Lyon, France
+<br/>Résumé
+<br/>La détection de landmarks joue un rôle crucial dans de
+<br/>nombreuses applications d’analyse du visage comme la
+<br/>reconnaissance de l’identité, des expressions, l’animation
+<br/>d’avatar, la reconstruction 3D du visage, ainsi que pour
+<br/>les applications de réalité augmentée comme la pose de
+<br/>masque ou de maquillage virtuel. L’avènement de l’ap-
+<br/>prentissage profond a permis des progrès très importants
+<br/>dans ce domaine, y compris sur les corpus non contraints
+<br/>(in-the-wild). Nous présentons ici un état de l’art cen-
+<br/>tré sur la détection 2D dans une image fixe, et les mé-
+<br/>thodes spécifiques pour la vidéo. Nous présentons ensuite
+<br/>les corpus existants pour ces trois tâches, ainsi que les mé-
+<br/>triques d’évaluations associées. Nous exposons finalement
+<br/>quelques résultats, ainsi que quelques pistes de recherche.
+<br/>Mots Clef
+<br/>Détection de landmark facial, Alignement de visage, Deep
+<br/>learning
+</td></tr><tr><td>7697295ee6fc817296bed816ac5cae97644c2d5b</td><td>Detecting and Recognizing Human-Object Interactions
+<br/>Facebook AI Research (FAIR)
+</td></tr><tr><td>1c80bc91c74d4984e6422e7b0856cf3cf28df1fb</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Hierarchical Adaptive Structural SVM for Domain Adaptation
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee</td><td>Deep fusion of visual signatures
+<br/>for client-server facial analysis
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS, GREYC
+<br/>Computer Sc. & Engg.
+<br/>IIT Kanpur, India
+<br/>Frederic Jurie
+<br/>Normandie Univ, UNICAEN,
+<br/>ENSICAEN, CNRS, GREYC
+<br/>Facial analysis is a key technology for enabling human-
+<br/>machine interaction.
+<br/>In this context, we present a client-
+<br/>server framework, where a client transmits the signature of
+<br/>a face to be analyzed to the server, and, in return, the server
+<br/>sends back various information describing the face e.g. is the
+<br/>person male or female, is she/he bald, does he have a mus-
+<br/>tache, etc. We assume that a client can compute one (or a
+<br/>combination) of visual features; from very simple and effi-
+<br/>cient features, like Local Binary Patterns, to more complex
+<br/>and computationally heavy, like Fisher Vectors and CNN
+<br/>based, depending on the computing resources available. The
+<br/>challenge addressed in this paper is to design a common uni-
+<br/>versal representation such that a single merged signature is
+<br/>transmitted to the server, whatever be the type and num-
+<br/>ber of features computed by the client, ensuring nonetheless
+<br/>an optimal performance. Our solution is based on learn-
+<br/>ing of a common optimal subspace for aligning the different
+<br/>face features and merging them into a universal signature.
+<br/>We have validated the proposed method on the challenging
+<br/>CelebA dataset, on which our method outperforms existing
+<br/>state-of-art methods when rich representation is available at
+<br/>test time, while giving competitive performance when only
+<br/>simple signatures (like LBP) are available at test time due
+<br/>to resource constraints on the client.
+<br/>1.
+<br/>INTRODUCTION
+<br/>We propose a novel method in a heterogeneous server-
+<br/>client framework for the challenging and important task of
+<br/>analyzing images of faces. Facial analysis is a key ingredient
+<br/>for assistive computer vision and human-machine interaction
+<br/>methods, and systems and incorporating high-performing
+<br/>methods in daily life devices is a challenging task. The ob-
+<br/>jective of the present paper is to develop state-of-the-art
+<br/>technologies for recognizing facial expressions and facial at-
+<br/>tributes on mobile and low cost devices. Depending on their
+<br/>computing resources, the clients (i.e. the devices on which
+<br/>the face image is taken) are capable of computing different
+<br/>types of face signatures, from the simplest ones (e.g. LPB)
+<br/>to the most complex ones (e.g. very deep CNN features), and
+<br/>should be able to eventually combine them into a single rich
+<br/>signature. Moreover, it is convenient if the face analyzer,
+<br/>which might require significant computing resources, is im-
+<br/>plemented on a server receiving face signatures and comput-
+<br/>ing facial expressions and attributes from these signatures.
+<br/>Keeping the computation of the signatures on the client is
+<br/>safer in terms of privacy, as the original images are not trans-
+<br/>mitted, and keeping the analysis part on the server is also
+<br/>beneficial for easy model upgrades in the future. To limit
+<br/>the transmission costs, the signatures have to be made as
+<br/>compact as possible.
+<br/>In summary, the technology needed
+<br/>for this scenario has to be able to merge the different avail-
+<br/>able features – the number of features available at test time
+<br/>is not known in advance but is dependent on the computing
+<br/>resources available on the client – producing a unique rich
+<br/>and compact signature of the face, which can be transmitted
+<br/>and analyzed by a server. Ideally, we would like the univer-
+<br/>sal signature to have the following properties: when all the
+<br/>features are available, we would like the performance of the
+<br/>signature to be better than the one of a system specifically
+<br/>optimized for any single type of feature.
+<br/>In addition, we
+<br/>would like to have reasonable performance when only one
+<br/>type of feature is available at test time.
+<br/>For developing such a system, we propose a hybrid deep
+<br/>neural network and give a method to carefully fine-tune the
+<br/>network parameters while learning with all or a subset of
+<br/>features available. Thus, the proposed network can process a
+<br/>number of wide ranges of feature types such as hand-crafted
+<br/>LBP and FV, or even CNN features which are learned end-
+<br/>to-end.
+<br/>While CNNs have been quite successful in computer vi-
+<br/>sion [1], representing images with CNN features is relatively
+<br/>time consuming, much more than some simple hand-crafted
+<br/>features such as LBP. Thus, the use of CNN in real-time ap-
+<br/>plications is still not feasible. In addition, the use of robust
+<br/>hand-crafted features such as FV in hybrid architectures can
+<br/>give performance comparable to Deep CNN features [2]. The
+<br/>main advantage of learning hybrid architectures is to avoid
+<br/>having large numbers of convolutional and pooling layers.
+<br/>Again from [2], we can also observe that hybrid architec-
+<br/>tures improve the performance of hand-crafted features e.g.
+<br/>FVs. Therefore, hybrid architectures are useful for the cases
+<br/>where only hand-crafted features, and not the original im-
+<br/>ages, are available during training and testing time. This
+<br/>scenario is useful when it is not possible to share training
+<br/>images due to copyright or privacy issues.
+<br/>Hybrid networks are particularly adapted to our client-
+</td></tr><tr><td>1c3073b57000f9b6dbf1c5681c52d17c55d60fd7</td><td>THÈSEprésentéepourl’obtentiondutitredeDOCTEURDEL’ÉCOLENATIONALEDESPONTSETCHAUSSÉESSpécialité:InformatiqueparCharlotteGHYSAnalyse,Reconstruction3D,&AnimationduVisageAnalysis,3DReconstruction,&AnimationofFacesSoutenancele19mai2010devantlejurycomposéde:Rapporteurs:MajaPANTICDimitrisSAMARASExaminateurs:MichelBARLAUDRenaudKERIVENDirectiondethèse:NikosPARAGIOSBénédicteBASCLE </td></tr><tr><td>1c93b48abdd3ef1021599095a1a5ab5e0e020dd5</td><td>JOURNAL OF LATEX CLASS FILES, VOL. *, NO. *, JANUARY 2009
+<br/>A Compositional and Dynamic Model for Face Aging
+</td></tr><tr><td>1c6be6874e150898d9db984dd546e9e85c85724e</td><td></td></tr><tr><td>1c65f3b3c70e1ea89114f955624d7adab620a013</td><td></td></tr><tr><td>1c6e22516ceb5c97c3caf07a9bd5df357988ceda</td><td></td></tr><tr><td>82bef8481207de9970c4dc8b1d0e17dced706352</td><td></td></tr><tr><td>825f56ff489cdd3bcc41e76426d0070754eab1a8</td><td>Making Convolutional Networks Recurrent for Visual Sequence Learning
+<br/>NVIDIA
+</td></tr><tr><td>82d2af2ffa106160a183371946e466021876870d</td><td>A Novel Space-Time Representation on the Positive Semidefinite Cone
+<br/>for Facial Expression Recognition
+<br/>1IMT Lille Douai, Univ. Lille, CNRS, UMR 9189 – CRIStAL –
+<br/>Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+<br/>2Univ. Lille, CNRS, UMR 8524, Laboratoire Paul Painlev´e, F-59000 Lille, France.
+</td></tr><tr><td>8210fd10ef1de44265632589f8fc28bc439a57e6</td><td>Single Sample Face Recognition via Learning Deep
+<br/>Supervised Auto-Encoders
+<br/>Shenghua Gao, Yuting Zhang, Kui Jia, Jiwen Lu, Yingying Zhang
+</td></tr><tr><td>82a4a35b2bae3e5c51f4d24ea5908c52973bd5be</td><td>Real-time emotion recognition for gaming using
+<br/>deep convolutional network features
+<br/>S´ebastien Ouellet
+</td></tr><tr><td>82f4e8f053d20be64d9318529af9fadd2e3547ef</td><td>Technical Report:
+<br/>Multibiometric Cryptosystems
+</td></tr><tr><td>82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d</td><td>141
+<br/>REFERENCES
+<br/>1.
+<br/>2.
+<br/>3.
+<br/>4.
+<br/>5.
+<br/>6.
+<br/>7.
+<br/>8.
+<br/>9.
+<br/>Adler A., Youmaran R. and Loyka S., “Towards a Measure of
+<br/>Biometric Information”, Canadian Conference on Electrical and
+<br/>Computer Engineering, pp. 210-213, 2006.
+<br/>Military Academy, West Point, New York, pp. 452-458, 2005.
+<br/>Security and Trust, St. Andrews, New Brunswick, Canada, pp. 1-8,
+<br/>2005.
+<br/>Structural Model for Biometric Sketch Recognition”, Proceedings of
+<br/>DAGM, Magdeburg, Germany, Vol. 2781, pp. 187-195, 2003.
+<br/>of Security”, The First UAE International Conference on Biological
+<br/>and Medical Physics, pp. 1-4, 2005.
+<br/>Avraam Kasapis., “MLPs and Pose, Expression Classification”,
+<br/>Proceedings of UNiS Report, pp. 1-87, 2003.
+<br/>Detection for Storage Area Networks (SANs)”, Proceedings of 22nd
+<br/>IEEE / 13th NASA Goddard Conference on Mass Storage Systems and
+<br/>Technologies, pp. 118-127, 2005.
+<br/>Black M.J. and Yacoob Y., “Recognizing Facial Expressions in Image
+<br/>Sequences using Local Parameterized Models of Image Motion”, Int.
+<br/>Journal Computer Vision, Vol. 25, No. 1, pp. 23-48, 1997.
+<br/>10.
+<br/>Recognition using a State-Based Model of Spatially-Localized Facial
+</td></tr><tr><td>82417d8ec8ac6406f2d55774a35af2a1b3f4b66e</td><td>Some faces are more equal than others:
+<br/>Hierarchical organization for accurate and
+<br/>efficient large-scale identity-based face retrieval
+<br/>GREYC, CNRS UMR 6072, Universit´e de Caen Basse-Normandie, France1
+<br/>Technicolor, Rennes, France2
+</td></tr><tr><td>826c66bd182b54fea3617192a242de1e4f16d020</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>1602
+<br/>ICASSP 2017
+</td></tr><tr><td>4972aadcce369a8c0029e6dc2f288dfd0241e144</td><td>Multi-target Unsupervised Domain Adaptation
+<br/>without Exactly Shared Categories
+</td></tr><tr><td>49dd4b359f8014e85ed7c106e7848049f852a304</td><td></td></tr><tr><td>49e85869fa2cbb31e2fd761951d0cdfa741d95f3</td><td>253
+<br/>Adaptive Manifold Learning
+</td></tr><tr><td>49659fb64b1d47fdd569e41a8a6da6aa76612903</td><td></td></tr><tr><td>49a7949fabcdf01bbae1c2eb38946ee99f491857</td><td>A CONCATENATING FRAMEWORK OF SHORTCUT
+<br/>CONVOLUTIONAL NEURAL NETWORKS
+</td></tr><tr><td>49e1aa3ecda55465641b2c2acc6583b32f3f1fc6</td><td>International Journal of Emerging Technology and Advanced Engineering
+<br/>Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+<br/>Support Vector Machine for age classification
+<br/>1Assistant Professor, CSE, RSR RCET, Kohka Bhilai
+<br/>2,3 Sr. Assistant Professor, CSE, SSCET, Junwani Bhilai
+</td></tr><tr><td>49df381ea2a1e7f4059346311f1f9f45dd997164</td><td>2018
+<br/>On the Use of Client-Specific Information for Face
+<br/>Presentation Attack Detection Based on Anomaly
+<br/>Detection
+</td></tr><tr><td>40205181ed1406a6f101c5e38c5b4b9b583d06bc</td><td>Using Context to Recognize People in Consumer Images
+</td></tr><tr><td>40dab43abef32deaf875c2652133ea1e2c089223</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Facial Communicative Signals
+<br/>Valence Recognition in Task-Oriented Human-Robot Interaction
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>405b43f4a52f70336ac1db36d5fa654600e9e643</td><td>What can we learn about CNNs from a large scale controlled object dataset?
+<br/>UWM
+<br/>AUT
+<br/>USC
+</td></tr><tr><td>40b86ce698be51e36884edcc8937998979cd02ec</td><td>Yüz ve İsim İlişkisi kullanarak Haberlerdeki Kişilerin Bulunması
+<br/>Finding Faces in News Photos Using Both Face and Name Information
+<br/>Derya Ozkan, Pınar Duygulu
+<br/>Bilgisayar Mühendisliği Bölümü, Bilkent Üniversitesi, 06800, Ankara
+<br/>Özetçe
+<br/>Bu çalışmada, haber fotoğraflarından oluşan geniş veri
+<br/>kümelerinde kişilerin sorgulanmasını sağlayan bir yöntem
+<br/>sunulmuştur. Yöntem isim ve yüzlerin ilişkilendirilmesine
+<br/>dayanmaktadır. Haber başlığında kişinin ismi geçiyor ise
+<br/>fotoğrafta da o kişinin yüzünün bulunacağı varsayımıyla, ilk
+<br/>olarak sorgulanan isim ile ilişkilendirilmiş, fotoğraflardaki
+<br/>tüm yüzler seçilir. Bu yüzler arasında sorgu kişisine ait farklı
+<br/>koşul, poz ve zamanlarda çekilmiş pek çok resmin yanında,
+<br/>haberde ismi geçen başka kişilere ait yüzler ya da kullanılan
+<br/>yüz bulma yönteminin hatasından kaynaklanan yüz olmayan
+<br/>resimler de bulunabilir. Yine de, çoğu zaman, sorgu kişisine
+<br/>ait resimler daha çok olup, bu resimler birbirine diğerlerine
+<br/>olduğundan daha çok benzeyeceklerdir. Bu nedenle, yüzler
+<br/>arasındaki benzerlikler çizgesel olarak betimlendiğinde ,
+<br/>birbirine en çok benzeyen yüzler bu çizgede en yoğun bileşen
+<br/>olacaktır. Bu çalışmada, sorgu ismiyle ilişkilendirilmiş,
+<br/>yüzler arasında birbirine en çok benzeyen alt kümeyi bulan,
+<br/>çizgeye dayalı bir yöntem sunulmaktadır.
+</td></tr><tr><td>402f6db00251a15d1d92507887b17e1c50feebca</td><td>3D Facial Action Units Recognition for Emotional
+<br/>Expression
+<br/>1Department of Information Technology and Communication, Politeknik Kuching, Sarawak, Malaysia
+<br/>2Faculty of Computer Science and Information Technology, Universiti Malaysia Sarawak, Kota Samarahan, Sarawak, Malaysia
+<br/>The muscular activities caused the activation of certain AUs for every facial expression at the certain duration of time
+<br/>throughout the facial expression. This paper presents the methods to recognise facial Action Unit (AU) using facial distance
+<br/>of the facial features which activates the muscles. The seven facial action units involved are AU1, AU4, AU6, AU12, AU15,
+<br/>AU17 and AU25 that characterises happy and sad expression. The recognition is performed on each AU according to rules
+<br/>defined based on the distance of each facial points. The facial distances chosen are extracted from twelve facial features.
+<br/>Then the facial distances are trained using Support Vector Machine (SVM) and Neural Network (NN). Classification result
+<br/>using SVM is presented with several different SVM kernels while result using NN is presented for each training, validation
+<br/>and testing phase.
+<br/>Keywords: Facial action units recognition, 3D AU recognition, facial expression
+<br/>
+</td></tr><tr><td>40fb4e8932fb6a8fef0dddfdda57a3e142c3e823</td><td>A Mixed Generative-Discriminative Framework for Pedestrian Classification
+<br/>Dariu M. Gavrila2,3
+<br/>1 Image & Pattern Analysis Group, Dept. of Math. and Comp. Sc., Univ. of Heidelberg, Germany
+<br/>2 Environment Perception, Group Research, Daimler AG, Ulm, Germany
+<br/>3 Intelligent Systems Lab, Faculty of Science, Univ. of Amsterdam, The Netherlands
+</td></tr><tr><td>40cd062438c280c76110e7a3a0b2cf5ef675052c</td><td></td></tr><tr><td>40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a</td><td>80
+</td></tr><tr><td>40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd</td><td>Bridging Heterogeneous Domains With Parallel Transport For Vision and
+<br/>Multimedia Applications
+<br/>Dept. of Video and Multimedia Technologies Research
+<br/>AT&T Labs-Research
+<br/>San Francisco, CA 94108
+</td></tr><tr><td>40389b941a6901c190fb74e95dc170166fd7639d</td><td>Automatic Facial Expression Recognition
+<br/>Emotient
+<br/>http://emotient.com
+<br/>February 12, 2014
+<br/>Imago animi vultus est, indices oculi. (Cicero)
+<br/>Introduction
+<br/>The face is innervated by two different brain systems that compete for control of its muscles:
+<br/>a cortical brain system related to voluntary and controllable behavior, and a sub-cortical
+<br/>system responsible for involuntary expressions. The interplay between these two systems
+<br/>generates a wealth of information that humans constantly use to read the emotions, inten-
+<br/>tions, and interests [25] of others.
+<br/>Given the critical role that facial expressions play in our daily life, technologies that can
+<br/>interpret and respond to facial expressions automatically are likely to find a wide range of
+<br/>applications. For example, in pharmacology, the effect of new anti-depression drugs could
+<br/>be assessed more accurately based on daily records of the patients’ facial expressions than
+<br/>asking the patients to fill out a questionnaire, as it is currently done [7]. Facial expression
+<br/>recognition may enable a new generation of teaching systems to adapt to the expression
+<br/>of their students in the way good teachers do [61]. Expression recognition could be used
+<br/>to assess the fatigue of drivers and air-pilots [58, 59]. Daily-life robots with automatic
+<br/>expression recognition will be able to assess the states and intentions of humans and respond
+<br/>accordingly [41]. Smart phones with expression analysis may help people to prepare for
+<br/>important meetings and job interviews.
+<br/>Thanks to the introduction of machine learning methods, recent years have seen great
+<br/>progress in the field of automatic facial expression recognition. Commercial real-time ex-
+<br/>pression recognition systems are starting to be used in consumer applications, e.g., smile
+<br/>detectors embedded in digital cameras [62]. Nonetheless, considerable progress has yet to be
+<br/>made: Methods for face detection and tracking (the first step of automated face analysis)
+<br/>work well for frontal views of adult Caucasian and Asian faces [50], but their performance
+</td></tr><tr><td>40273657e6919455373455bd9a5355bb46a7d614</td><td>Anonymizing k-Facial Attributes via Adversarial Perturbations
+<br/>1 IIIT Delhi, New Delhi, India
+<br/>2 Ministry of Electronics and Information Technology, New Delhi, India
+</td></tr><tr><td>40b10e330a5511a6a45f42c8b86da222504c717f</td><td>Implementing the Viola-Jones
+<br/>Face Detection Algorithm
+<br/>Kongens Lyngby 2008
+<br/>IMM-M.Sc.-2008-93
+</td></tr><tr><td>40ca925befa1f7e039f0cd40d57dbef6007b4416</td><td>Sampling Matters in Deep Embedding Learning
+<br/>UT Austin
+<br/>A9/Amazon
+<br/>Amazon
+<br/>Philipp Kr¨ahenb¨uhl
+<br/>UT Austin
+</td></tr><tr><td>4042bbb4e74e0934f4afbedbe92dd3e37336b2f4</td><td></td></tr><tr><td>40f127fa4459a69a9a21884ee93d286e99b54c5f</td><td>Optimizing Apparent Display Resolution
+<br/>Enhancement for Arbitrary Videos
+</td></tr><tr><td>401e6b9ada571603b67377b336786801f5b54eee</td><td>Active Image Clustering: Seeking Constraints from
+<br/>Humans to Complement Algorithms
+<br/>November 22, 2011
+</td></tr><tr><td>2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9</td><td></td></tr><tr><td>2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Pointly-Supervised Action Localization
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>2eb37a3f362cffdcf5882a94a20a1212dfed25d9</td><td>4
+<br/>Local Feature Based Face Recognition
+<br/>R.I.T., Rajaramnagar and S.G.G.S. COE &T, Nanded
+<br/>India
+<br/>1. Introduction
+<br/>A reliable automatic face recognition (AFR) system is a need of time because in today's
+<br/>networked world, maintaining the security of private information or physical property is
+<br/>becoming increasingly important and difficult as well. Most of the time criminals have been
+<br/>taking the advantage of fundamental flaws in the conventional access control systems i.e.
+<br/>the systems operating on credit card, ATM etc. do not grant access by "who we are", but by
+<br/>"what we have”. The biometric based access control systems have a potential to overcome
+<br/>most of the deficiencies of conventional access control systems and has been gaining the
+<br/>importance in recent years. These systems can be designed with biometric traits such as
+<br/>fingerprint, face, iris, signature, hand geometry etc. But comparison of different biometric
+<br/>traits shows that face is very attractive biometric because of its non-intrusiveness and social
+<br/>acceptability. It provides automated methods of verifying or recognizing the identity of a
+<br/>living person based on its facial characteristics.
+<br/>In last decade, major advances occurred in face recognition, with many systems capable of
+<br/>achieving recognition rates greater than 90%. However real-world scenarios remain a
+<br/>challenge, because face acquisition process can undergo to a wide range of variations. Hence
+<br/>the AFR can be thought as a very complex object recognition problem, where the object to be
+<br/>recognized is the face. This problem becomes even more difficult because the search is done
+<br/>among objects belonging to the same class and very few images of each class are available to
+<br/>train the system. Moreover different problems arise when images are acquired under
+<br/>uncontrolled conditions such as illumination variations, pose changes, occlusion, person
+<br/>appearance at different ages, expression changes and face deformations. The numbers of
+<br/>approaches has been proposed by various researchers to deal with these problems but still
+<br/>reported results cannot suffice the need of the reliable AFR system in presence of all facial
+<br/>image variations. A recent survey paper (Abate et al., 2007) states that the sensibility of the
+<br/>AFR systems to illumination and pose variations are the main problems researchers have
+<br/>been facing up till.
+<br/>2. Face recognition methods
+<br/>The existing face recognition methods can be divided into two categories: holistic matching
+<br/>methods and local matching methods.The holistic matching methods use complete face
+<br/>region as a input to face recognition system and constructs a lower dimensional subspace
+<br/>using principal component analysis (PCA) (Turk & Pentland, 1991), linear discriminant
+<br/>www.intechopen.com
+</td></tr><tr><td>2e5cfa97f3ecc10ae8f54c1862433285281e6a7c</td><td></td></tr><tr><td>2e0e056ed5927a4dc6e5c633715beb762628aeb0</td><td></td></tr><tr><td>2e68190ebda2db8fb690e378fa213319ca915cf8</td><td>Generating Videos with Scene Dynamics
+<br/>MIT
+<br/>UMBC
+<br/>MIT
+</td></tr><tr><td>2e0d56794379c436b2d1be63e71a215dd67eb2ca</td><td>Improving precision and recall of face recognition in SIPP with combination of
+<br/>modified mean search and LSH
+<br/>Xihua.Li
+</td></tr><tr><td>2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd</td><td></td></tr><tr><td>2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522</td><td>Leveraging Billions of Faces to Overcome
+<br/>Performance Barriers in Unconstrained Face
+<br/>Recognition
+<br/>face.com
+</td></tr><tr><td>2e19371a2d797ab9929b99c80d80f01a1fbf9479</td><td></td></tr><tr><td>2ebc35d196cd975e1ccbc8e98694f20d7f52faf3</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+<br/>IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+<br/>Towards Wide-angle Micro Vision Sensors
+</td></tr><tr><td>2e3d081c8f0e10f138314c4d2c11064a981c1327</td><td></td></tr><tr><td>2e86402b354516d0a8392f75430156d629ca6281</td><td></td></tr><tr><td>2e0f5e72ad893b049f971bc99b67ebf254e194f7</td><td>Apparel Classification with Style
+<br/>1ETH Z¨urich, Switzerland 2Microsoft, Austria 3Kooaba AG, Switzerland
+<br/>4KU Leuven, Belgium
+</td></tr><tr><td>2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c</td><td>International Journal of Scientific Research and Management Studies (IJSRMS)
+<br/>ISSN: 2349-3771
+<br/>
+<br/>Volume 3 Issue 4, pg: 164-169
+<br/>A REVIEW ON TEXTURE BASED EMOTION RECOGNITION
+<br/>FROM FACIAL EXPRESSION
+<br/>1U.G. Scholars, 2Assistant Professor,
+<br/>Dept. of E & C Engg., MIT Moradabad, Ram Ganga Vihar, Phase II, Moradabad, India.
+</td></tr><tr><td>2e1b1969ded4d63b69a5ec854350c0f74dc4de36</td><td></td></tr><tr><td>2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f</td><td>An Effective Approach for Facial Expression Recognition with Local Binary
+<br/>Pattern and Support Vector Machine
+</td></tr><tr><td>2b3ceb40dced78a824cf67054959e250aeaa573b</td><td></td></tr><tr><td>2b1327a51412646fcf96aa16329f6f74b42aba89</td><td>Under review as a conference paper at ICLR 2016
+<br/>IMPROVING PERFORMANCE OF RECURRENT NEURAL
+<br/>NETWORK WITH RELU NONLINEARITY
+<br/>Qualcomm Research
+<br/>San Diego, CA 92121, USA
+</td></tr><tr><td>2b5cb5466eecb131f06a8100dcaf0c7a0e30d391</td><td>A Comparative Study of Active Appearance Model
+<br/>Annotation Schemes for the Face
+<br/>Face Aging Group
+<br/>UNCW, USA
+<br/>Face Aging Group
+<br/>UNCW, USA
+<br/>Face Aging Group
+<br/>UNCW, USA
+</td></tr><tr><td>2b632f090c09435d089ff76220fd31fd314838ae</td><td>Early Adaptation of Deep Priors in Age Prediction from Face Images
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>Computer Vision Lab
+<br/>D-ITET, ETH Zurich
+<br/>CVL, D-ITET, ETH Zurich
+<br/>Merantix GmbH
+</td></tr><tr><td>2b8dfbd7cae8f412c6c943ab48c795514d53c4a7</td><td>529
+<br/>2014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+<br/>978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+<br/>RECOGNITION
+<br/>1. INTRODUCTION
+<br/>(d1,d2)∈[0;d]2
+<br/>d1+d2≤d
+</td></tr><tr><td>2baec98c19804bf19b480a9a0aa814078e28bb3d</td><td></td></tr><tr><td>470dbd3238b857f349ebf0efab0d2d6e9779073a</td><td>Unsupervised Simultaneous Orthogonal Basis Clustering Feature Selection
+<br/>School of Electrical Engineering, KAIST, South Korea
+<br/>In this paper, we propose a novel unsupervised feature selection method: Si-
+<br/>multaneous Orthogonal basis Clustering Feature Selection (SOCFS). To per-
+<br/>form feature selection on unlabeled data effectively, a regularized regression-
+<br/>based formulation with a new type of target matrix is designed. The target
+<br/>matrix captures latent cluster centers of the projected data points by per-
+<br/>forming the orthogonal basis clustering, and then guides the projection ma-
+<br/>trix to select discriminative features. Unlike the recent unsupervised feature
+<br/>selection methods, SOCFS does not explicitly use the pre-computed local
+<br/>structure information for data points represented as additional terms of their
+<br/>objective functions, but directly computes latent cluster information by the
+<br/>target matrix conducting orthogonal basis clustering in a single unified term
+<br/>of the proposed objective function.
+<br/>Since the target matrix is put in a single unified term for regression of
+<br/>the proposed objective function, feature selection and clustering are simul-
+<br/>taneously performed. In this way, the projection matrix for feature selection
+<br/>is more properly computed by the estimated latent cluster centers of the
+<br/>projected data points. To the best of our knowledge, this is the first valid
+<br/>formulation to consider feature selection and clustering together in a sin-
+<br/>gle unified term of the objective function. The proposed objective function
+<br/>has fewer parameters to tune and does not require complicated optimization
+<br/>tools so just a simple optimization algorithm is sufficient. Substantial ex-
+<br/>periments are performed on several publicly available real world datasets,
+<br/>which shows that SOCFS outperforms various unsupervised feature selec-
+<br/>tion methods and that latent cluster information by the target matrix is ef-
+<br/>fective for regularized regression-based feature selection.
+<br/>Problem Formulation: Given training data, let X = [x1, . . . ,xn] ∈ Rd×n
+<br/>denote the data matrix with n instances where dimension is d and T =
+<br/>[t1, . . . ,tn] ∈ Rm×n denote the corresponding target matrix where dimension
+<br/>is m. We start from the regularized regression-based formulation to select
+<br/>maximum r features is minW (cid:107)WT X− T(cid:107)2
+<br/>s.t. (cid:107)W(cid:107)2,0 ≤ r. To exploit
+<br/>such formulation on unlabeled data more effectively, it is crucial for the tar-
+<br/>get matrix T to have discriminative destinations for projected clusters. To
+<br/>this end, a new type of target matrix T is proposed to conduct clustering di-
+<br/>rectly on the projected data points WT X. We allow extra degrees of freedom
+<br/>to T by decomposing it into two other matrices B ∈ Rm×c and E ∈ Rn×c as
+<br/>T = BET with additional constraints as
+<br/>(1)
+<br/>F + λ(cid:107)W(cid:107)2,1
+<br/>(cid:107)WT X− BET(cid:107)2
+<br/>s.t. BT B = I, ET E = I, E ≥ 0,
+<br/>min
+<br/>W,B,E
+<br/>where λ > 0 is a weighting parameter for the relaxed regularizer (cid:107)W(cid:107)2,1
+<br/>that induces row sparsity of the projection matrix W. The meanings of the
+<br/>constraints BT B = I,ET E = I,E ≥ 0 are as follows: 1) the orthogonal con-
+<br/>straint of B lets each column of B be independent; 2) the orthogonal and
+<br/>the nonnegative constraint of E make each row of E has only one non-zero
+<br/>element [2]. From 1) and 2), we can clearly interpret B as the basis matrix,
+<br/>which has orthogonality and E as the encoding matrix, where the non-zero
+<br/>element of each column of ET selects one column in B.
+<br/>While optimizing problem (1), T = BET acts like clustering of projected
+<br/>data points WT X with orthogonal basis B and encoder E, so T can estimate
+<br/>latent cluster centers of the WT X. Then, W successively projects X close
+<br/>to corresponding latent cluster centers, which are estimated by T. Note that
+<br/>the orthogonal constraint of B makes each projected cluster in WT X be sep-
+<br/>arated (independent of each other), and it helps W to be a better projection
+<br/>matrix for selecting more discriminative features. If the clustering is directly
+<br/>performed on X not on WT X, the orthogonal constraint of B extremely re-
+<br/>stricts the degree of freedom of B. However, since features are selected by
+<br/>W and the clustering is carried out on WT X in our formulation, so the or-
+<br/>thogonal constraint of B is highly reasonable. A schematic illustration of
+<br/>the proposed method is shown in Figure 1.
+</td></tr><tr><td>47541d04ec24662c0be438531527323d983e958e</td><td>Affective Information Processing
+</td></tr><tr><td>474b461cd12c6d1a2fbd67184362631681defa9e</td><td>2014 IEEE International
+<br/>Conference on Systems, Man
+<br/>and Cybernetics
+<br/>(SMC 2014)
+<br/>San Diego, California, USA
+<br/>5-8 October 2014
+<br/>Pages 1-789
+<br/>IEEE Catalog Number:
+<br/>ISBN:
+<br/>CFP14SMC-POD
+<br/>978-1-4799-3841-4
+<br/>1/5
+</td></tr><tr><td>47d4838087a7ac2b995f3c5eba02ecdd2c28ba14</td><td>JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+<br/>Automatic Recognition of Facial Displays of
+<br/>Unfelt Emotions
+<br/>Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+</td></tr><tr><td>47a2727bd60e43f3253247b6d6f63faf2b67c54b</td><td>Semi-supervised Vocabulary-informed Learning
+<br/>Disney Research
+</td></tr><tr><td>47e3029a3d4cf0a9b0e96252c3dc1f646e750b14</td><td>International Conference on Computer Systems and Technologies - CompSysTech’07
+<br/>Facial Expression Recognition in still pictures and videos using Active
+<br/>Appearance Models. A comparison approach.
+<br/>Drago(cid:1) Datcu
+<br/>Léon Rothkrantz
+</td></tr><tr><td>475e16577be1bfc0dd1f74f67bb651abd6d63524</td><td>DAiSEE: Towards User Engagement Recognition in the Wild
+<br/>Microsoft
+<br/>Vineeth N Balasubramanian
+<br/>Indian Institution of Technology Hyderabad
+</td></tr><tr><td>471befc1b5167fcfbf5280aa7f908eff0489c72b</td><td>570
+<br/>Class-Specific Kernel-Discriminant
+<br/>Analysis for Face Verification
+<br/>class problems (
+</td></tr><tr><td>47f8b3b3f249830b6e17888df4810f3d189daac1</td><td></td></tr><tr><td>47e8db3d9adb79a87c8c02b88f432f911eb45dc5</td><td>MAGMA: Multi-level accelerated gradient mirror descent algorithm for
+<br/>large-scale convex composite minimization
+<br/>July 15, 2016
+</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td></td></tr><tr><td>477811ff147f99b21e3c28309abff1304106dbbe</td><td></td></tr><tr><td>47e14fdc6685f0b3800f709c32e005068dfc8d47</td><td></td></tr><tr><td>782188821963304fb78791e01665590f0cd869e8</td><td></td></tr><tr><td>78a4cabf0afc94da123e299df5b32550cd638939</td><td></td></tr><tr><td>78f08cc9f845dc112f892a67e279a8366663e26d</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Semi-Autonomous Data Enrichment and
+<br/>Optimisation for Intelligent Speech Analysis
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzender:
+<br/>Univ.-Prof. Dr.-Ing. habil. Dr. h.c. Alexander W. Koch
+<br/>Pr¨ufer der Dissertation:
+<br/>1.
+<br/>Univ.-Prof. Dr.-Ing. habil. Bj¨orn W. Schuller,
+<br/>Universit¨at Passau
+<br/>2. Univ.-Prof. Gordon Cheng, Ph.D.
+<br/>Die Dissertation wurde am 30.09.2014 bei der Technischen Universit¨at M¨unchen einge-
+<br/>reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 07.04.2015
+<br/>angenommen.
+</td></tr><tr><td>783f3fccde99931bb900dce91357a6268afecc52</td><td>Hindawi Publishing Corporation
+<br/>EURASIP Journal on Image and Video Processing
+<br/>Volume 2009, Article ID 945717, 14 pages
+<br/>doi:10.1155/2009/945717
+<br/>Research Article
+<br/>Adapted Active Appearance Models
+<br/>1 SUP ´ELEC/IETR, Avenue de la Boulaie, 35511 Cesson-S´evign´e, France
+<br/>2 Orange Labs—TECH/IRIS, 4 rue du clos courtel, 35 512 Cesson S´evign´e, France
+<br/>Received 5 January 2009; Revised 2 September 2009; Accepted 20 October 2009
+<br/>Recommended by Kenneth M. Lam
+<br/>Active Appearance Models (AAMs) are able to align efficiently known faces under duress, when face pose and illumination are
+<br/>controlled. We propose Adapted Active Appearance Models to align unknown faces in unknown poses and illuminations. Our
+<br/>proposal is based on the one hand on a specific transformation of the active model texture in an oriented map, which changes the
+<br/>AAM normalization process; on the other hand on the research made in a set of different precomputed models related to the most
+<br/>adapted AAM for an unknown face. Tests on public and private databases show the interest of our approach. It becomes possible
+<br/>to align unknown faces in real-time situations, in which light and pose are not controlled.
+<br/>Copyright © 2009 Renaud S´eguier et al. This is an open access article distributed under the Creative Commons Attribution
+<br/>License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+<br/>cited.
+<br/>1. Introduction
+<br/>All applications related to face analysis and synthesis (Man-
+<br/>Machine Interaction, compression in video communication,
+<br/>augmented reality) need to detect and then to align the user’s
+<br/>face. This latest process consists in the precise localization of
+<br/>the eyes, nose, and mouth gravity center. Face detection can
+<br/>now be realized in real time and in a rather efficient manner
+<br/>[1, 2]; the technical bottleneck lies now in the face alignment
+<br/>when it is done in real conditions, which is precisely the
+<br/>object of this paper.
+<br/>Since such Active Appearance Models (AAMs) as those
+<br/>described in [3] exist, it is therefore possible to align faces
+<br/>in real time. The AAMs exploit a set of face examples in
+<br/>order to extract a statistical model. To align an unknown
+<br/>face in new image, the models parameters must be tuned, in
+<br/>order to match the analyzed face features in the best possible
+<br/>way. There is no difficulty to align a face featuring the same
+<br/>characteristics (same morphology, illumination, and pose)
+<br/>as those constituting the example data set. Unfortunately,
+<br/>AAMs are less outstanding when illumination, pose, and
+<br/>face type changes. We suggest in this paper a robust Active
+<br/>Appearance Model allowing a real-time implementation. In
+<br/>the next section, we will survey the different techniques,
+<br/>which aim to increase the AAM robustness. We will see
+<br/>that none of them address at the same time the three types
+<br/>of robustness, we are interested in pose, illumination, and
+<br/>identity. It must be pointed out that we do not consider the
+<br/>robustness against occlusion as [4] does, for example, when
+<br/>a person moves his hand around the face.
+<br/>After a quick introduction of the Active Appearance
+<br/>Models and their limitations (Section 3), we will present our
+<br/>two main contributions in Section 4.1 in order to improve
+<br/>AAM robustness in illumination, pose, and identity. Exper-
+<br/>iments will be conducted and discussed in Section 5 before
+<br/>drawing a conclusion, suggesting new research directions in
+<br/>the last section.
+<br/>2. State of the Art
+<br/>We propose to classify the methods which lead to an increase
+<br/>of the AAM robustness as follows. The specific types of
+<br/>dedicated robustness are in italic.
+<br/>(i) Preprocess
+<br/>(1) Invariant features (illumination)
+<br/>(2) Canonical representation (illumination)
+<br/>(ii) Parameter space extension
+<br/>(1) Light modeling (illumination)
+<br/>(2) 3D modeling (pose)
+</td></tr><tr><td>7897c8a9361b427f7b07249d21eb9315db189496</td><td></td></tr><tr><td>78f438ed17f08bfe71dfb205ac447ce0561250c6</td><td></td></tr><tr><td>78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c</td><td></td></tr><tr><td>781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed</td><td>Int J Comput Vis
+<br/>DOI 10.1007/s11263-017-1023-9
+<br/>Tubelets: Unsupervised Action Proposals from Spatiotemporal
+<br/>Super-Voxels
+<br/>Cees G. M. Snoek1
+<br/>Received: 25 June 2016 / Accepted: 18 May 2017
+<br/>© The Author(s) 2017. This article is an open access publication
+</td></tr><tr><td>78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e</td><td>TEMPORAL PYRAMID POOLING CNN FOR ACTION RECOGNITION
+<br/>Temporal Pyramid Pooling Based Convolutional
+<br/>Neural Network for Action Recognition
+</td></tr><tr><td>78fdf2b98cf6380623b0e20b0005a452e736181e</td><td></td></tr><tr><td>788a7b59ea72e23ef4f86dc9abb4450efefeca41</td><td></td></tr><tr><td>8b7191a2b8ab3ba97423b979da6ffc39cb53f46b</td><td>Search Pruning in Video Surveillance Systems: Efficiency-Reliability Tradeoff
+<br/>EURECOM
+<br/>Sophia Antipolis, France
+</td></tr><tr><td>8b8728edc536020bc4871dc66b26a191f6658f7c</td><td></td></tr><tr><td>8b744786137cf6be766778344d9f13abf4ec0683</td><td>978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+<br/>2697
+<br/>ICASSP 2016
+</td></tr><tr><td>8bf647fed40bdc9e35560021636dfb892a46720e</td><td>Learning to Hash-tag Videos with Tag2Vec
+<br/>CVIT, KCIS, IIIT Hyderabad, India
+<br/>P J Narayanan
+<br/>http://cvit.iiit.ac.in/research/projects/tag2vec
+<br/>Figure 1. Learning a direct mapping from videos to hash-tags : sample frames from short video clips with user-given hash-tags
+<br/>(left); a sample frame from a query video and hash-tags suggested by our system for this query (right).
+</td></tr><tr><td>8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0</td><td>Methoden
+<br/>at 11/2013
+<br/>(cid:2)(cid:2)(cid:2)
+<br/>Multimodale Interaktion
+<br/>auf einer sozialen Roboterplattform
+<br/>Multimodal Interaction on a Social Robotic Platform
+<br/>Zusammenfassung Dieser Beitrag beschreibt die multimo-
+<br/>dalen Interaktionsmöglichkeiten mit der Forschungsroboter-
+<br/>plattform ELIAS. Zunächst wird ein Überblick über die Ro-
+<br/>boterplattform sowie die entwickelten Verarbeitungskompo-
+<br/>nenten gegeben, die Einteilung dieser Komponenten erfolgt
+<br/>nach dem Konzept von wahrnehmenden und agierenden Mo-
+<br/>dalitäten. Anschließend wird das Zusammenspiel der Kom-
+<br/>ponenten in einem multimodalen Spieleszenario näher be-
+<br/>trachtet. (cid:2)(cid:2)(cid:2) Summary
+<br/>This paper presents the mul-
+<br/>timodal
+<br/>interaction capabilities of the robotic research plat-
+<br/>form ELIAS. An overview of the robotic platform as well
+<br/>as the developed processing components is presented, the
+<br/>classification of the components follows the concept of sen-
+<br/>sing and acting modalities. Finally,
+<br/>the interplay between
+<br/>those components within a multimodal gaming scenario is
+<br/>described.
+<br/>Schlagwörter Mensch-Roboter-Interaktion, Multimodalität, Gesten, Blick (cid:2)(cid:2)(cid:2) Keywords Human-robot interaction,
+<br/>multimodal, gestures, gaze
+<br/>1 Einleitung
+<br/>Eine intuitive und natürliche Bedienbarkeit der zuneh-
+<br/>mend komplexeren Technik wird für den Menschen
+<br/>immer wichtiger, da im heutigen Alltag eine Vielzahl an
+<br/>technischen Geräten mit wachsendem Funktionsumfang
+<br/>anzutreffen ist. Unterschiedliche Aktivitäten in der For-
+<br/>schungsgemeinschaft haben sich schon seit längerer Zeit
+<br/>mit verbalen sowie nonverbalen Kommunikationsformen
+<br/>(bspw. Emotions- und Gestenerkennung) in der Mensch-
+<br/>Maschine-Interaktion beschäftigt. Gerade in der jüngeren
+<br/>Zeit trugen auf diesem Forschungsfeld unterschiedliche
+<br/>Innovationen (bspw. Touchscreen, Gestensteuerung im
+<br/>Fernseher) dazu bei, dass intuitive und natürliche Bedien-
+<br/>konzepte mehr und mehr im Alltag Verwendung finden.
+<br/>Auch Möglichkeiten zur Sprach- und Gestensteuerung
+<br/>von Konsolen und Mobiltelefonen finden heute vermehr-
+<br/>ten Einsatz in der Gerätebedienung. Diese natürlicheren
+<br/>und multimodalen Benutzerschnittstellen sind dem Nut-
+<br/>zer schnell zugänglich und erlauben eine intuitivere
+<br/>Interaktion mit komplexen technischen Geräten.
+<br/>Auch für Robotersysteme bietet sich eine multimodale
+<br/>Interaktion an, um die Benutzung und den Zugang zu
+<br/>den Funktionalitäten zu vereinfachen. Der Mensch soll
+<br/>in seiner Kommunikation idealerweise vollkommene Ent-
+<br/>scheidungsfreiheit bei der Wahl der Modalitäten haben,
+<br/>um sein gewünschtes Ziel zu erreichen. Dafür werden
+<br/>in diesem Beitrag die wahrnehmenden und agieren-
+<br/>den Modalitäten einer, rein auf Kommunikationsaspekte
+<br/>reduzierten, Forschungsroboterplattform beispielhaft in
+<br/>einer Spieleanwendung untersucht.
+<br/>1.1 Struktur des Beitrags
+<br/>In diesem Beitrag wird zunächst ein kurzer Über-
+<br/>blick über die multimodale Interaktion im Allgemeinen
+<br/>gegeben, hierbei erfolgt eine Betrachtung nach wahr-
+<br/>nehmenden und agierenden Modalitäten. Im nächsten
+<br/>Abschnitt werden Arbeiten vorgestellt, die sich auch mit
+<br/>multimodalen Robotersystemen beschäftigen. Im darauf
+<br/>folgenden Abschnitt wird die Roboterplattform ELIAS
+<br/>mit den wahrnehmenden, verarbeitenden und agierenden
+<br/>at – Automatisierungstechnik 61 (2013) 11 / DOI 10.1515/auto.2013.1062 © Oldenbourg Wissenschaftsverlag
+<br/> - 10.1515/auto.2013.1062
+<br/>Downloaded from De Gruyter Online at 09/27/2016 10:08:34PM
+<br/>via Technische Universität München
+<br/>737
+</td></tr><tr><td>8b1db0894a23c4d6535b5adf28692f795559be90</td><td>Biometric and Surveillance Technology for Human and Activity Identification X, edited by Ioannis Kakadiaris,
+<br/>Walter J. Scheirer, Laurence G. Hassebrook, Proc. of SPIE Vol. 8712, 87120Q · © 2013 SPIE
+<br/>CCC code: 0277-786X/13/$18 · doi: 10.1117/12.2018974
+<br/>Proc. of SPIE Vol. 8712 87120Q-1
+</td></tr><tr><td>134db6ca13f808a848321d3998e4fe4cdc52fbc2</td><td>IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 2, APRIL 2006
+<br/>433
+<br/>Dynamics of Facial Expression: Recognition of
+<br/>Facial Actions and Their Temporal Segments
+<br/>From Face Profile Image Sequences
+</td></tr><tr><td>133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d</td><td>This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+<br/>Active Clustering with Model-Based
+<br/>Uncertainty Reduction
+</td></tr><tr><td>1369e9f174760ea592a94177dbcab9ed29be1649</td><td>Geometrical Facial Modeling for Emotion Recognition
+</td></tr><tr><td>133900a0e7450979c9491951a5f1c2a403a180f0</td><td>JOURNAL OF LATEX CLASS FILES
+<br/>Social Grouping for Multi-target Tracking and
+<br/>Head Pose Estimation in Video
+</td></tr><tr><td>13141284f1a7e1fe255f5c2b22c09e32f0a4d465</td><td>Object Tracking by
+<br/>Oversampling Local Features
+</td></tr><tr><td>133da0d8c7719a219537f4a11c915bf74c320da7</td><td>International Journal of Computer Applications (0975 – 8887)
+<br/>Volume 123 – No.4, August 2015
+<br/>A Novel Method for 3D Image Segmentation with Fusion
+<br/>of Two Images using Color K-means Algorithm
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>Dept. of CSE
+<br/>ITM Universe
+<br/>Gwalior
+<br/>two
+</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>Effective Unconstrained Face Recognition by
+<br/>Combining Multiple Descriptors and Learned
+<br/>Background Statistics
+</td></tr><tr><td>13841d54c55bd74964d877b4b517fa94650d9b65</td><td>Generalised Ambient Reflection Models for Lambertian and
+<br/>Phong Surfaces
+<br/>Author
+<br/>Zhang, Paul, Gao, Yongsheng
+<br/>Published
+<br/>2009
+<br/>Conference Title
+<br/>Proceedings of the 2009 IEEE International Conference on Image Processing (ICIP 2009)
+<br/>DOI
+<br/>https://doi.org/10.1109/ICIP.2009.5413812
+<br/>Copyright Statement
+<br/>© 2009 IEEE. Personal use of this material is permitted. However, permission to reprint/
+<br/>republish this material for advertising or promotional purposes or for creating new collective
+<br/>works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+<br/>this work in other works must be obtained from the IEEE.
+<br/>Downloaded from
+<br/>http://hdl.handle.net/10072/30001
+<br/>Griffith Research Online
+<br/>https://research-repository.griffith.edu.au
+</td></tr><tr><td>131e395c94999c55c53afead65d81be61cd349a4</td><td></td></tr><tr><td>1384a83e557b96883a6bffdb8433517ec52d0bea</td><td></td></tr><tr><td>13fd0a4d06f30a665fc0f6938cea6572f3b496f7</td><td></td></tr><tr><td>13afc4f8d08f766479577db2083f9632544c7ea6</td><td>Multiple Kernel Learning for
+<br/>Emotion Recognition in the Wild
+<br/>Machine Perception Laboratory
+<br/>UCSD
+<br/>EmotiW Challenge, ICMI, 2013
+<br/>1
+</td></tr><tr><td>13d9da779138af990d761ef84556e3e5c1e0eb94</td><td>Int J Comput Vis (2008) 77: 3–24
+<br/>DOI 10.1007/s11263-007-0093-5
+<br/>Learning to Locate Informative Features for Visual Identification
+<br/>Received: 18 August 2005 / Accepted: 11 September 2007 / Published online: 9 November 2007
+<br/>© Springer Science+Business Media, LLC 2007
+</td></tr><tr><td>7f533bd8f32525e2934a66a5b57d9143d7a89ee1</td><td>Audio-Visual Identity Grounding for Enabling Cross Media Search
+<br/>Paper ID 22
+</td></tr><tr><td>7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5</td><td>Int J Comput Vis (2016) 119:60–75
+<br/>DOI 10.1007/s11263-015-0839-4
+<br/>Sparse Output Coding for Scalable Visual Recognition
+<br/>Received: 15 May 2013 / Accepted: 16 June 2015 / Published online: 26 June 2015
+<br/>© Springer Science+Business Media New York 2015
+</td></tr><tr><td>7f4bc8883c3b9872408cc391bcd294017848d0cf</td><td>
+<br/>
+<br/>Computer
+<br/>Sciences
+<br/>Department
+<br/>The Multimodal Focused Attribute Model: A Nonparametric
+<br/>Bayesian Approach to Simultaneous Object Classification and
+<br/>Attribute Discovery
+<br/>Technical Report #1697
+<br/>January 2012
+<br/>
+</td></tr><tr><td>7f6061c83dc36633911e4d726a497cdc1f31e58a</td><td>YouTube-8M: A Large-Scale Video Classification
+<br/>Benchmark
+<br/>Paul Natsev
+<br/>Google Research
+</td></tr><tr><td>7f36dd9ead29649ed389306790faf3b390dc0aa2</td><td>MOVEMENT DIFFERENCES BETWEEN DELIBERATE
+<br/>AND SPONTANEOUS FACIAL EXPRESSIONS:
+<br/>ZYGOMATICUS MAJOR ACTION IN SMILING
+</td></tr><tr><td>7f6cd03e3b7b63fca7170e317b3bb072ec9889e0</td><td>A Face Recognition Signature Combining Patch-based
+<br/>Features with Soft Facial Attributes
+<br/>L. Zhang, P. Dou, I.A. Kakadiaris
+<br/>Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204
+</td></tr><tr><td>7f3a73babe733520112c0199ff8d26ddfc7038a0</td><td></td></tr><tr><td>7f205b9fca7e66ac80758c4d6caabe148deb8581</td><td>Page 1 of 47
+<br/>Computing Surveys
+<br/>A Survey on Mobile Social Signal Processing
+<br/>Understanding human behaviour in an automatic but non-intrusive manner is an important area for various applications. This requires the
+<br/>collaboration of information technology with human sciences to transfer existing knowledge of human behaviour into self-acting tools. These
+<br/>tools will reduce human error that is introduced by current obtrusive methods such as questionnaires. To achieve unobtrusiveness, we focus on
+<br/>exploiting the pervasive and ubiquitous character of mobile devices.
+<br/>In this article, a survey of existing techniques for extracting social behaviour through mobile devices is provided. Initially we expose the
+<br/>terminology used in the area and introduce a concrete architecture for social signal processing applications on mobile phones, constituted by
+<br/>sensing, social interaction detection, behavioural cues extraction, social signal inference and social behaviour understanding. Furthermore, we
+<br/>present state-of-the-art techniques applied to each stage of the process. Finally, potential applications are shown while arguing about the main
+<br/>challenges of the area.
+<br/>Categories and Subject Descriptors: General and reference [Document Types]: Surveys and Overviews; Human-centered computing [Collab-
+<br/>orative and social computing, Ubiquitous and mobile computing]
+<br/>General Terms: Design, Theory, Human Factors, Performance
+<br/>Additional Key Words and Phrases: Social Signal Processing, mobile phones, social behaviour
+<br/>ACM Reference Format:
+<br/>Processing. ACM V, N, Article A (January YYYY), 35 pages.
+<br/>DOI:http://dx.doi.org/10.1145/0000000.0000000
+<br/>1. INTRODUCTION
+<br/>Human behaviour understanding has received a great deal of interest since the beginning of the previous century.
+<br/>People initially conducted research on the way animals behave when they are surrounded by creatures of the same
+<br/>species. Acquiring basic underlying knowledge of animal relations led to extending this information to humans
+<br/>in order to understand social behaviour, social relations etc. Initial experiments were conducted by empirically
+<br/>observing people and retrieving feedback from them. These methods gave rise to well-established psychological
+<br/>approaches for understanding human behaviour, such as surveys, questionnaires, camera recordings and human
+<br/>observers. Nevertheless, these methods introduce several limitations including various sources of error. Complet-
+<br/>ing surveys and questionnaires induces partiality, unconcern etc. [Groves 2004], human error [Reason 1990], and
+<br/>additional restrictions in scalability of the experiments. Accumulating these research problems leads to a common
+<br/>challenge, the lack of automation in an unobtrusive manner.
+<br/>An area that has focussed on detecting social behaviour automatically and has received a great amount of at-
+<br/>tention is Social Signal Processing (SSP). The main target of the field is to model, analyse and synthesise human
+<br/>behaviour with limited user intervention. To achieve these targets, researchers presented three key terms which
+</td></tr><tr><td>7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098</td><td></td></tr><tr><td>7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b</td><td>eaig i C e Vii ad Beyd:
+<br/>Deve
+<br/>h . Weg
+<br/>Deae f C e Sciece
+<br/>ichiga Sae Uiveiy
+<br/>Ea aig  48824
+<br/>Abac
+<br/>Thi chae id ce wha i ca
+<br/>aic
+<br/>ve
+<br/>by h a cgiive deve
+<br/>ih i deeied befe he \bih" f he ye. Afe he \bih" i eab
+<br/>
+<br/>aach i  ea
+<br/>deve
+<br/>way aia
+<br/>  ea whi
+<br/>de deve
+</td></tr><tr><td>7a81967598c2c0b3b3771c1af943efb1defd4482</td><td>Do We Need More Training Data?
+</td></tr><tr><td>7ad77b6e727795a12fdacd1f328f4f904471233f</td><td>Supervised Local Descriptor Learning
+<br/>for Human Action Recognition
+</td></tr><tr><td>7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b</td><td></td></tr><tr><td>7aa4c16a8e1481629f16167dea313fe9256abb42</td><td>978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+<br/>2981
+<br/>ICASSP 2017
+</td></tr><tr><td>7a85b3ab0efb6b6fcb034ce13145156ee9d10598</td><td></td></tr><tr><td>7ab930146f4b5946ec59459f8473c700bcc89233</td><td></td></tr><tr><td>7ad7897740e701eae455457ea74ac10f8b307bed</td><td>Random Subspace Two-dimensional LDA for Face Recognition*
+</td></tr><tr><td>7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697</td><td>Attend and Rectify: a Gated Attention
+<br/>Mechanism for Fine-Grained Recovery
+<br/>†Computer Vision Center and Universitat Aut`onoma de Barcelona (UAB),
+<br/>Campus UAB, 08193 Bellaterra, Catalonia Spain
+<br/>‡Visual Tagging Services, Parc de Recerca, Campus UAB
+</td></tr><tr><td>1451e7b11e66c86104f9391b80d9fb422fb11c01</td><td>IET Signal Processing
+<br/>Research Article
+<br/>Image privacy protection with secure JPEG
+<br/>transmorphing
+<br/>ISSN 1751-9675
+<br/>Received on 30th December 2016
+<br/>Revised 13th July 2017
+<br/>Accepted on 11th August 2017
+<br/>doi: 10.1049/iet-spr.2016.0756
+<br/>www.ietdl.org
+<br/>1Multimedia Signal Processing Group, Electrical Engineering Department, EPFL, Station 11, Lausanne, Switzerland
+</td></tr><tr><td>14761b89152aa1fc280a33ea4d77b723df4e3864</td><td></td></tr><tr><td>14fa27234fa2112014eda23da16af606db7f3637</td><td></td></tr><tr><td>1459d4d16088379c3748322ab0835f50300d9a38</td><td>Cross-Domain Visual Matching via Generalized
+<br/>Similarity Measure and Feature Learning
+</td></tr><tr><td>14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6</td><td></td></tr><tr><td>1450296fb936d666f2f11454cc8f0108e2306741</td><td>Learning to Discover Cross-Domain Relations
+<br/>with Generative Adversarial Networks
+</td></tr><tr><td>14fdce01c958043140e3af0a7f274517b235adf3</td><td></td></tr><tr><td>141eab5f7e164e4ef40dd7bc19df9c31bd200c5e</td><td></td></tr><tr><td>14e759cb019aaf812d6ac049fde54f40c4ed1468</td><td>Subspace Methods
+<br/>Synonyms
+<br/>{ Multiple similarity method
+<br/>Related Concepts
+<br/>{ Principal component analysis (PCA)
+<br/>{ Subspace analysis
+<br/>{ Dimensionality reduction
+<br/>De(cid:12)nition
+<br/>Subspace analysis in computer vision is a generic name to describe a general
+<br/>framework for comparison and classification of subspaces. A typical approach in
+<br/>subspace analysis is the subspace method (SM) that classify an input pattern
+<br/>vector into several classes based on the minimum distance or angle between the
+<br/>input pattern vector and each class subspace, where a class subspace corresponds
+<br/>to the distribution of pattern vectors of the class in high dimensional vector
+<br/>space.
+<br/>Background
+<br/>Comparison and classification of subspaces has been one of the central prob-
+<br/>lems in computer vision, where an image set of an object to be classified is
+<br/>compactly represented by a subspace in high dimensional vector space.
+<br/>The subspace method is one of the most effective classification method in
+<br/>subspace analysis, which was developed by two Japanese researchers, Watanabe
+<br/>and Iijima around 1970, independently [1, 2]. Watanabe and Iijima named their
+<br/>methods the CLAFIC [3] and the multiple similarity method [4], respectively.
+<br/>The concept of the subspace method is derived from the observation that pat-
+<br/>terns belonging to a class forms a compact cluster in high dimensional vector
+<br/>space, where, for example, a w×h pixels image pattern is usually represented as a
+<br/>vector in w×h-dimensional vector space. The compact cluster can be represented
+<br/>by a subspace, which is generated by using Karhunen-Lo`eve (KL) expansion, also
+<br/>known as the principal component analysis (PCA). Note that a subspace is gen-
+<br/>erated for each class, unlike the Eigenface Method [5] in which only one subspace
+<br/>(called eigenspace) is generated.
+<br/>The SM has been known as one of the most useful methods in pattern recog-
+<br/>nition field, since its algorithm is very simple and it can handle classification
+<br/>of multiple classes. However, its classification performance was not sufficient for
+<br/>many applications in practice, because class subspaces are generated indepen-
+<br/>dently of each other [1]. There is no reason to assume a priori that each class
+</td></tr><tr><td>148eb413bede35487198ce7851997bf8721ea2d6</td><td>People Search in Surveillance Videos
+<br/>Four Eyes Lab, UCSB
+<br/>IBM Research
+<br/>IBM Research
+<br/>IBM Research
+<br/>Four Eyes Lab, UCSB
+<br/>INTRODUCTION
+<br/>1.
+<br/>In traditional surveillance scenarios, users are required to
+<br/>watch video footage corresponding to extended periods of
+<br/>time in order to find events of interest. However, this pro-
+<br/>cess is resource-consuming, and suffers from high costs of
+<br/>employing security personnel. The field of intelligent vi-
+<br/>sual surveillance [2] seeks to address these issues by applying
+<br/>computer vision techniques to automatically detect specific
+<br/>events in long video streams. The events can then be pre-
+<br/>sented to the user or be indexed into a database to allow
+<br/>queries such as “show me the red cars that entered a given
+<br/>parking lot from 7pm to 9pm on Monday” or “show me the
+<br/>faces of people who left the city’s train station last week.”
+<br/>In this work, we are interested in analyzing people, by ex-
+<br/>tracting information that can be used to search for them in
+<br/>surveillance videos. Current research on this topic focuses
+<br/>on approaches based on face recognition, where the goal is
+<br/>to establish the identity of a person given an image of a
+<br/>face. However, face recognition is still a very challenging
+<br/>problem, especially in low resolution images with variations
+<br/>in pose and lighting, which is often the case in surveillance
+<br/>data. State-of-the-art face recognition systems [1] require
+<br/>a fair amount of resolution in order to produce reliable re-
+<br/>sults, but in many cases this level of detail is not available
+<br/>in surveillance applications.
+<br/>We approach the problem in an alternative way, by avoiding
+<br/>face recognition and proposing a framework for finding peo-
+<br/>ple based on parsing the human body and exploiting part
+<br/>attributes. Those include visual attributes such as facial hair
+<br/>type (beards, mustaches, absence of facial hair), type of eye-
+<br/>wear (sunglasses, eyeglasses, absence of glasses), hair type
+<br/>(baldness, hair, wearing a hat), and clothing color. While
+<br/>face recognition is still a difficult problem, accurate and ef-
+<br/>ficient face detectors1 based on learning approaches [6] are
+<br/>available. Those have been demonstrated to work well on
+<br/>challenging low-resolution images, with variations in pose
+<br/>and lighting. In our method, we employ this technology to
+<br/>design detectors for facial attributes from large sets of train-
+<br/>ing data.
+<br/>1The face detection problem consists of localizing faces in
+<br/>images, while face recognition aims to establish the identity
+<br/>of a person given an image of a face. Face detection is a
+<br/>challenging problem, but it is arguably not as complex as
+<br/>face recognition.
+<br/>Our technique falls into the category of short term recogni-
+<br/>tion methods, taking advantage of features present in brief
+<br/>intervals in time, such as clothing color, hairstyle, and makeup,
+<br/>which are generally considered an annoyance in face recogni-
+<br/>tion methods. There are several applications that naturally
+<br/>fit within a short term recognition framework. An example
+<br/>is in criminal investigation, when the police are interested in
+<br/>locating a suspect. In those cases, eyewitnesses typically fill
+<br/>out a suspect description form, where they indicate personal
+<br/>traits of the suspect as seen at the moment when the crime
+<br/>was committed. Those include facial hair type, hair color,
+<br/>clothing type, etc. Based on that description, the police
+<br/>manually scan the entire video archive looking for a person
+<br/>with similar characteristics. This process is tedious and time
+<br/>consuming, and could be drastically accelerated by the use
+<br/>of our technique. Another application is on finding missing
+<br/>people. Parents looking for their children in an amusement
+<br/>park could provide a description including clothing and eye-
+<br/>wear type, and videos from multiple cameras in the park
+<br/>would then be automatically searched.
+</td></tr><tr><td>1473a233465ea664031d985e10e21de927314c94</td><td></td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A
+<br/>The development of accurate and scalable unconstrained face recogni-
+<br/>tion algorithms is a long term goal of the biometrics and computer vision
+<br/>communities. The term “unconstrained” implies a system can perform suc-
+<br/>cessful identifications regardless of face image capture presentation (illumi-
+<br/>nation, sensor, compression) or subject conditions (facial pose, expression,
+<br/>occlusion). While automatic, as well as human, face identification in certain
+<br/>scenarios may forever be elusive, such as when a face is heavily occluded or
+<br/>captured at very low resolutions, there still remains a large gap between au-
+<br/>tomated systems and human performance on familiar faces. In order to close
+<br/>this gap, large annotated sets of imagery are needed that are representative
+<br/>of the end goals of unconstrained face recognition. This will help continue
+<br/>to push the frontiers of unconstrained face detection and recognition, which
+<br/>are the primary goals of the IARPA Janus program.
+<br/>The current state of the art in unconstrained face recognition is high
+<br/>accuracy (roughly 99% true accept rate at a false accept rate of 1.0%) on
+<br/>faces that can be detected with a commodity face detectors, but unknown
+<br/>accuracy on other faces. Despite the fact that face detection and recognition
+<br/>research generally has advanced somewhat independently, the frontal face
+<br/>detector filtering approach used for key in the wild face recognition datasets
+<br/>means that progress in face recognition is currently hampered by progress
+<br/>in face detection. Hence, a major need exists for a face recognition dataset
+<br/>that captures as wide of a range of variations as possible to offer challenges
+<br/>to both face detection as well as face recognition.
+<br/>In this paper we introduce the IARPA Janus Benchmark A (IJB-A),
+<br/>which is publicly available for download. The IJB-A contains images and
+<br/>videos from 500 subjects captured from “in the wild” environment. All la-
+<br/>belled subjects have been manually localized with bounding boxes for face
+<br/>detection, as well as fiducial landmarks for the center of the two eyes (if
+<br/>visible) and base of the nose. Manual bounding box annotations for all non-
+<br/>labelled subjects (i.e., other persons captured in the imagery) have been cap-
+<br/>tured as well. All imagery is Creative Commons licensed, which is a license
+<br/>that allows open re-distribution provided proper attribution is made to the
+<br/>data creator. The subjects have been intentionally sampled to contain wider
+<br/>geographic distribution than previous datasets. Recognition and detection
+<br/>protocols are provided which are motivated by operational deployments of
+<br/>face recognition systems. An example of images and video from IJB-A can
+<br/>be found in Figure 3.
+<br/>The IJB-A dataset has the following claimed contributions: (i) The most
+<br/>unconstrained database released to date; (ii) The first joint face detection and
+<br/>face recognition benchmark dataset collected in the wild; (iii) Meta-data
+<br/>providing subject gender and skin color, and occlusion (eyes, mouth/nose,
+<br/>and forehead), facial hear, and coarse pose information for each imagery
+<br/>instance; (iv) Widest geographic distribution of any public face dataset; (v)
+<br/>The first in the wild dataset to contain a mixture of images and videos; (vi)
+<br/>Clear authority for re-distribution; (vii) Protocols for identification (search)
+<br/>and verification (compare); (viii) Baseline accuracies from off the shelf de-
+<br/>tectors and recognition algorithms; and (ix) Protocols for both template and
+<br/>model-based face recognition.
+<br/>Every subject in the dataset contains at least five images and one video.
+<br/>IJB-A consists of a total of 5,712 images and 2,085 videos, with an average
+<br/>of 11.4 images and 4.2 videos per subject.
+</td></tr><tr><td>142dcfc3c62b1f30a13f1f49c608be3e62033042</td><td>Adaptive Region Pooling for Object Detection
+<br/>UC Merced
+<br/>Qualcomm Research, San Diego
+<br/>UC Merced
+</td></tr><tr><td>14e428f2ff3dc5cf96e5742eedb156c1ea12ece1</td><td>Facial Expression Recognition Using Neural Network Trained with Zernike
+<br/>Moments
+<br/>Dept. Génie-Electrique
+<br/>Université M.C.M Souk-Ahras
+<br/>Souk-Ahras, Algeria
+</td></tr><tr><td>14a5feadd4209d21fa308e7a942967ea7c13b7b6</td><td>978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+<br/>1025
+<br/>ICASSP 2012
+</td></tr><tr><td>14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b</td><td>Modeling Spatio-Temporal Human Track Structure for Action
+<br/>Localization
+</td></tr><tr><td>14ee4948be56caeb30aa3b94968ce663e7496ce4</td><td>Jang, Y; Gunes, H; Patras, I
+<br/>© Copyright 2018 IEEE
+<br/>For additional information about this publication click this link.
+<br/>http://qmro.qmul.ac.uk/xmlui/handle/123456789/36405
+<br/>Information about this research object was correct at the time of download; we occasionally
+<br/>make corrections to records, please therefore check the published record when citing. For
+</td></tr><tr><td>8ee62f7d59aa949b4a943453824e03f4ce19e500</td><td>Robust Head-Pose Estimation Based on
+<br/>Partially-Latent Mixture of Linear Regression
+<br/>∗INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+<br/>†INRIA Rennes Bretagne Atlantique, Rennes, France
+</td></tr><tr><td>8e33183a0ed7141aa4fa9d87ef3be334727c76c0</td><td>– COS429 Written Report, Fall 2017 –
+<br/>Robustness of Face Recognition to Image Manipulations
+<br/>1. Motivation
+<br/>We can often recognize pictures of people we know even if the image has low resolution or obscures
+<br/>part of the face, if the camera angle resulted in a distorted image of the subject’s face, or if the
+<br/>subject has aged or put on makeup since we last saw them. Although this is a simple recognition task
+<br/>for a human, when we think about how we accomplish this task, it seems non-trivial for computer
+<br/>algorithms to recognize faces despite visual changes.
+<br/>Computer facial recognition is relied upon for many application where accuracy is important.
+<br/>Facial recognition systems have applications ranging from airport security and suspect identification
+<br/>to personal device authentication and face tagging [7]. In these real-world applications, the system
+<br/>must continue to recognize images of a person who looks slightly different due to the passage of
+<br/>time, a change in environment, or a difference in clothing.
+<br/>Therefore, we are interested in investigating face recognition algorithms and their robustness to
+<br/>image changes resulting from realistically plausible manipulations. Furthermore, we are curious
+<br/>about whether the impact of image manipulations on computer algorithms’ face recognition ability
+<br/>mirrors related insights from neuroscience about humans’ face recognition abilities.
+<br/>2. Goal
+<br/>In this project, we implement both face recognition algorithms and image manipulations. We then
+<br/>analyze the impact of each image manipulation on the recognition accuracy each algorithm, and
+<br/>how these influences depend on the accuracy of each algorithm on non-manipulated images.
+<br/>3. Background and Related Work
+<br/>Researchers have developed a wide variety of face recognition algorithms, such as traditional
+<br/>statistical methods such as PCA, more opaque methods such as deep neural networks, and proprietary
+<br/>systems used by governments and corporations [1][13][14].
+<br/>Similarly, others have developed image manipulations using principles from linear algebra, such
+<br/>as mimicking distortions from lens distortions, as well as using neural networks, such as a system
+<br/>for transforming images according to specified characteristics [12][16].
+<br/>Furthermore, researchers in psychology have studied face recognition in humans. A study of
+<br/>“super-recognizers” (people with extraordinarily high powers of face recognition) and “developmen-
+<br/>tal prosopagnosics” (people with severely impaired face recognition abilities) found that inverting
+<br/>images of faces impaired recognition ability more for people with stronger face recognition abilities
+<br/>[11]. This could indicate that image manipulations tend to equalize face recognition abilities, and
+<br/>we investigate whether this is the case with the manipulations and face recognition algorithms we
+<br/>test.
+</td></tr><tr><td>8e3d0b401dec8818cd0245c540c6bc032f169a1d</td><td>McGan: Mean and Covariance Feature Matching GAN
+</td></tr><tr><td>8e94ed0d7606408a0833e69c3185d6dcbe22bbbe</td><td>© 2012 IEEE. Personal use of this material is permitted. Permission from IEEE
+<br/>must be obtained for all other uses, in any current or future media, including
+<br/>reprinting/republishing this material for advertising or promotional purposes,
+<br/>creating new collective works, for resale or redistribution to servers or lists, or
+<br/>reuse of any copyrighted component of this work in other works.
+<br/>Pre-print of article that will appear at WACV 2012.
+</td></tr><tr><td>8e461978359b056d1b4770508e7a567dbed49776</td><td>LOMo: Latent Ordinal Model for Facial Analysis in Videos
+<br/>Marian Bartlett1,∗,‡
+<br/>1UCSD, USA
+<br/>2MPI for Informatics, Germany
+<br/>3IIT Kanpur, India
+</td></tr><tr><td>8ea30ade85880b94b74b56a9bac013585cb4c34b</td><td>FROM TURBO HIDDEN MARKOV MODELS TO TURBO STATE-SPACE MODELS
+<br/>Institut Eur´ecom
+<br/>Multimedia Communications Department
+<br/>BP 193, 06904 Sophia Antipolis Cedex, France
+</td></tr><tr><td>8ed32c8fad924736ebc6d99c5c319312ba1fa80b</td><td></td></tr><tr><td>8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125</td><td>in any current or
+<br/>future media,
+<br/>for all other uses,
+<br/> 2012 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+<br/>obtained
+<br/>including
+<br/>reprinting/republishing this material for advertising or promotional purposes, creating
+<br/>new collective works, for resale or redistribution to servers or lists, or reuse of any
+<br/>copyrighted component of this work in other works.
+<br/>Pre-print of article that will appear at BTAS 2012.!!
+</td></tr><tr><td>8e378ef01171b33c59c17ff5798f30293fe30686</td><td>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>der Technischen Universit¨at M¨unchen
+<br/>A System for Automatic Face Analysis
+<br/>Based on
+<br/>Statistical Shape and Texture Models
+<br/>Ronald M¨uller
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at
+<br/>f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen
+<br/>zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs
+<br/>genehmigten Dissertation
+<br/>Vorsitzender: Prof. Dr. rer. nat. Bernhard Wolf
+<br/>Pr¨ufer der Dissertation:
+<br/>1. Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Prof. Dr.-Ing. habil. Alexander W. Koch
+<br/>Die Dissertation wurde am 28.02.2008 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>am 18.09.2008 angenommen.
+</td></tr><tr><td>8ed051be31309a71b75e584bc812b71a0344a019</td><td>Class-based feature matching across unrestricted
+<br/>transformations
+</td></tr><tr><td>8e36100cb144685c26e46ad034c524b830b8b2f2</td><td>Modeling Facial Geometry using Compositional VAEs
+<br/>1 ´Ecole Polytechnique F´ed´erale de Lausanne
+<br/>2Facebook Reality Labs, Pittsburgh
+</td></tr><tr><td>8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b</td><td>International Journal of Computer Vision manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Learning from Longitudinal Face Demonstration -
+<br/>Where Tractable Deep Modeling Meets Inverse Reinforcement Learning
+<br/>Savvides · Tien D. Bui
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>225fb9181545f8750061c7693661b62d715dc542</td><td></td></tr><tr><td>22043cbd2b70cb8195d8d0500460ddc00ddb1a62</td><td>Separability-Oriented Subclass Discriminant
+<br/>Analysis
+</td></tr><tr><td>22137ce9c01a8fdebf92ef35407a5a5d18730dde</td><td></td></tr><tr><td>22dada4a7ba85625824489375184ba1c3f7f0c8f</td><td></td></tr><tr><td>223ec77652c268b98c298327d42aacea8f3ce23f</td><td>TR-CS-11-02
+<br/>Acted Facial Expressions In The Wild
+<br/>Database
+<br/>September 2011
+<br/>ANU Computer Science Technical Report Series
+</td></tr><tr><td>227b18fab568472bf14f9665cedfb95ed33e5fce</td><td>Compositional Dictionaries for Domain Adaptive
+<br/>Face Recognition
+</td></tr><tr><td>227b1a09b942eaf130d1d84cdcabf98921780a22</td><td>Yang et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:51
+<br/>https://doi.org/10.1186/s13634-018-0572-6
+<br/>EURASIP Journal on Advances
+<br/>in Signal Processing
+<br/>R ES EAR CH
+<br/>Multi-feature shape regression for face
+<br/>alignment
+<br/>Open Access
+</td></tr><tr><td>22dabd4f092e7f3bdaf352edd925ecc59821e168</td><td> Deakin Research Online
+<br/>This is the published version:
+<br/>An, Senjian, Liu, Wanquan and Venkatesh, Svetha 2008, Exploiting side information in
+<br/>locality preserving projection, in CVPR 2008 : Proceedings of the 26th IEEE Conference on
+<br/>Computer Vision and Pattern Recognition, IEEE, Washington, D. C., pp. 1-8.
+<br/>Available from Deakin Research Online:
+<br/>http://hdl.handle.net/10536/DRO/DU:30044576
+<br/>
+<br/>Reproduced with the kind permissions of the copyright owner.
+<br/>Personal use of this material is permitted. However, permission to reprint/republish this
+<br/>material for advertising or promotional purposes or for creating new collective works for
+<br/>resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+<br/>in other works must be obtained from the IEEE.
+<br/>Copyright : 2008, IEEE
+</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td></td></tr><tr><td>2271d554787fdad561fafc6e9f742eea94d35518</td><td>TECHNISCHE UNIVERSIT ¨AT M ¨UNCHEN
+<br/>Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+<br/>Multimodale Mensch-Roboter-Interaktion
+<br/>f¨ur Ambient Assisted Living
+<br/>Tobias F. Rehrl
+<br/>Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+<br/>der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+<br/>Doktor-Ingenieurs (Dr.-Ing.)
+<br/>genehmigten Dissertation.
+<br/>Vorsitzende:
+<br/>Pr¨ufer der Dissertation: 1. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+<br/>2. Univ.-Prof. Dr.-Ing. Horst-Michael Groß
+<br/>Univ.-Prof. Dr.-Ing. Sandra Hirche
+<br/>(Technische Universit¨at Ilmenau)
+<br/>Die Dissertation wurde am 17. April 2013 bei der Technischen Universit¨at M¨unchen
+<br/>eingereicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am
+<br/>8. Oktober 2013 angenommen.
+</td></tr><tr><td>22ec256400e53cee35f999244fb9ba6ba11c1d06</td><td></td></tr><tr><td>22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7</td><td></td></tr><tr><td>22e189a813529a8f43ad76b318207d9a4b6de71a</td><td>What will Happen Next?
+<br/>Forecasting Player Moves in Sports Videos
+<br/>UC Berkeley, STATS
+<br/>UC Berkeley
+<br/>UC Berkeley
+</td></tr><tr><td>25d514d26ecbc147becf4117512523412e1f060b</td><td>Annotated Crowd Video Face Database
+<br/>IIIT-Delhi, India
+</td></tr><tr><td>25c19d8c85462b3b0926820ee5a92fc55b81c35a</td><td>Noname manuscript No.
+<br/>(will be inserted by the editor)
+<br/>Pose-Invariant Facial Expression Recognition
+<br/>Using Variable-Intensity Templates
+<br/>Received: date / Accepted: date
+</td></tr><tr><td>258a8c6710a9b0c2dc3818333ec035730062b1a5</td><td>Benelearn 2005
+<br/>Annual Machine Learning Conference of
+<br/>Belgium and the Netherlands
+<br/>CTIT PROCEEDINGS OF THE FOURTEENTH
+<br/>ANNUAL MACHINE LEARNING CONFERENCE
+<br/>OF BELGIUM AND THE NETHERLANDS
+</td></tr><tr><td>25695abfe51209798f3b68fb42cfad7a96356f1f</td><td>AN INVESTIGATION INTO COMBINING
+<br/>BOTH FACIAL DETECTION AND
+<br/>LANDMARK LOCALISATION INTO A
+<br/>UNIFIED PROCEDURE USING GPU
+<br/>COMPUTING
+<br/> MSc by Research
+<br/>2016
+</td></tr><tr><td>25d3e122fec578a14226dc7c007fb1f05ddf97f7</td><td>The First Facial Expression Recognition and Analysis Challenge
+</td></tr><tr><td>2597b0dccdf3d89eaffd32e202570b1fbbedd1d6</td><td>Towards predicting the likeability of fashion images
+</td></tr><tr><td>25982e2bef817ebde7be5bb80b22a9864b979fb0</td><td></td></tr><tr><td>25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8</td><td>Label Distribution Learning
+</td></tr><tr><td>2559b15f8d4a57694a0a33bdc4ac95c479a3c79a</td><td>570
+<br/>Contextual Object Localization With Multiple
+<br/>Kernel Nearest Neighbor
+<br/>Gert Lanckriet, Member, IEEE
+</td></tr><tr><td>2574860616d7ffa653eb002bbaca53686bc71cdd</td><td></td></tr><tr><td>25f1f195c0efd84c221b62d1256a8625cb4b450c</td><td>1-4244-1017-7/07/$25.00 ©2007 IEEE
+<br/>1091
+<br/>ICME 2007
+</td></tr><tr><td>25885e9292957feb89dcb4a30e77218ffe7b9868</td><td>JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2016
+<br/>Analyzing the Affect of a Group of People Using
+<br/>Multi-modal Framework
+</td></tr><tr><td>259706f1fd85e2e900e757d2656ca289363e74aa</td><td>Improving People Search Using Query Expansions
+<br/>How Friends Help To Find People
+<br/>LEAR - INRIA Rhˆone Alpes - Grenoble, France
+</td></tr><tr><td>25728e08b0ee482ee6ced79c74d4735bb5478e29</td><td></td></tr><tr><td>258a2dad71cb47c71f408fa0611a4864532f5eba</td><td>Discriminative Optimization
+<br/>of Local Features for Face Recognition
+<br/>
+<br/>H O S S E I N A Z I Z P O U R
+<br/>
+<br/>Master of Science Thesis
+<br/>Stockholm, Sweden 2011
+<br/>
+</td></tr><tr><td>25127c2d9f14d36f03d200a65de8446f6a0e3bd6</td><td>Journal of Theoretical and Applied Information Technology
+<br/> 20th May 2016. Vol.87. No.2
+<br/>© 2005 - 2016 JATIT & LLS. All rights reserved.
+<br/>ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+<br/>EVALUATING THE PERFORMANCE OF DEEP SUPERVISED
+<br/>AUTO ENCODER IN SINGLE SAMPLE FACE RECOGNITION
+<br/>PROBLEM USING KULLBACK-LEIBLER DIVERGENCE
+<br/>SPARSITY REGULARIZER
+<br/> Faculty of Computer of Computer Science, Universitas Indonesia, Kampus UI Depok, Indonesia
+</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/institutions_not_found.csv b/scraper/reports/institutions_not_found.csv
new file mode 100644
index 00000000..0b36eb36
--- /dev/null
+++ b/scraper/reports/institutions_not_found.csv
@@ -0,0 +1,1773 @@
+"1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R"
+"2 School of Computing, National University of Singapore"
+"2015 Wiley Periodicals, Inc"
+"2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada"
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+A dissertation submitted to the Faculty of the University of Delaware in partial
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"ADSIP Research Centre, University of Central Lancashire"
+ATR Human Information Processing Research Laboratories
+ATR Human Information Processing Research Laboratory
+ATR Interpreting Telecommunications Research Laboratories
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+"Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India"
+"Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan"
+"Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of"
+Achariya college of Engineering Technology
+Acharya Institute Of Technology
+"Address correspondence to: Karen L. Schmidt, University of"
+"Aditya College of Engineering, Surampalem, East Godavari"
+"Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh"
+"Aditya institute of Technology and Management, Tekkalli-532 201, A.P"
+"Adobe Systems, Inc., 345 Park Ave, San Jose, CA"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"Advanced Engineering, The Chinese University of Hong Kong"
+"Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul"
+"Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India"
+AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of
+Al-Khwarizmi Institute of Computer Science
+Alan W Black (Carnegie Mellon University
+Alex Waibel (Carnegie Mellon University
+"Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest"
+Allen Institute for Arti cial Intelligence (AI
+"Allen Institute for Arti cial Intelligence (AI2), Seattle, WA"
+"Amal Jyothi College of Engineering, Kanjirappally, India"
+"Amazon, Inc"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"Anjuman College of Engineering and Technology, Sadar, Nagpur, India"
+"Annamacharya Institute of Technology and Sciences, Tirupati, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece"
+AristotleUniversityofThessaloniki
+"Arti cial Intelligence Institute, China"
+"Arts Media and Engineering, Arizona State University"
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Arts, Science and Commerce College, Chopda"
+"Asia University, Taichung, Taiwan"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Asian University, Taichung, Taiwan"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Assiut University, Assiut 71515, Egypt"
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+"Augsburg University, Germany"
+"Australian Centre for Visual Technologies, The University of Adelaide, Australia (b"
+Australian National University and NICTA
+"Australian National University and NICTA, Australia"
+"Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"Australian National University, and NICTA"
+"Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy"
+Autonomous University of Barcelona
+Azad University of Qazvin
+"B. Eng., Zhejiang University"
+B. S. Rochester Institute of Technology
+"B. Tech., Indian Institute of Technology Jodhpur"
+"B.A. Earlham College, Richmond Indiana"
+"B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"B.Eng., Nankai University"
+B.S. (Cornell University
+"B.S. Abdur Rahman University, Chennai-48, India"
+B.S. University of Indonesia
+"B.S., Computer Engineering, Bo gazi ci University"
+"B.S., E.E., Bo azi i University"
+B.S./M.S. Brandeis University
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"B.Sc., University of Science and Technology of China"
+"B.Tech (C.S.E), Bharath University, Chennai"
+"BECS, Aalto University School of Science and Technology, Finland"
+"BECS, Aalto University, Helsinki, Finland"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria"
+Baidu IDL and Tsinghua University
+Baidu Research Institute of Deep Learning
+"Baidu Research, USA 3John Hopkins University"
+"Baingio Pinna, University of"
+Bangladesh University of Engineering and Technology(BUET
+Beckman Institute for Advanced Science and Technology
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA"
+"Beihang University 2Gri th University 3University of York, UK"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+Bharath Institute of Science and Technology
+"Bharti Vidyapeeth Deemed University, Pune, India"
+"Bilgi University, Dolapdere, Istanbul, TR"
+"Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network"
+"Bioinformatics Institute, A*STAR, Singapore"
+Biometric Research Center
+"Biometric Research Center, The Hong Kong Polytechnic University"
+"Biometric and Image Processing Lab, University of Salerno, Italy"
+"Birkbeck College, University of London"
+Bo gazi ci University
+"Bo gazi ci University, Turkey"
+"Bo gazic i University, Istanbul, Turkey"
+"Bo gazici University, Istanbul, TR"
+"Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos"
+Boston University / **Rutgers University / ***Gallaudet University
+Boston University Computer Science Technical Report No
+Boston University Theses and Dissertations
+"Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA"
+"Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Brown University, 2University of California, San Diego, 3California Institute of Technology"
+"C.L. Teo, University of Maryland"
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"CISE, University of Florida, Gainesville, FL"
+"CISUC, University of Coimbra"
+"CMR Institute of Technology, Hyderabad, (India"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+COMSATS Institute of Information Technology Wah Cantt
+"CRCV, University of Central Florida"
+CUNY Graduate Center and City College
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+"CVIP Lab, University of Louisville, Louisville, KY 40292, USA"
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"CVL, Link oping University, Link oping, Sweden"
+CVSSP University of Surrey
+"CVSSP, University of Surrey"
+"CVSSP, University of Surrey, UK"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"California State University, Fullerton, USA"
+"Cambridge University, Trumpington Street, Cambridge CB21PZ, UK"
+Canadian Institute for Advanced Research
+Carnegie Mellon University (CMU
+"Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+Carnegie Melon University
+"Catholic University of Rio de Janeiro, Brazil"
+"Center for Arti cial Vision Research, Korea University"
+"Center for Automation Research (CfAR), University of Maryland, College Park, MD"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA"
+"Center for Automation Research, University of Maryland"
+"Center for Automation Research, University of Maryland, College Park, MD"
+"Center for Automation Research, University of Maryland, College Park, MD 20740, USA"
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA"
+"Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Center for Brain Science, Harvard University, Cambridge, MA 02138 USA"
+"Center for Brain Science, Harvard University, Cambridge, MA, USA"
+"Center for Brains, Minds and Machines, McGovern Institute, MIT"
+"Center for Cognitive Neuroscience, Duke University, Durham, North Carolina"
+"Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin"
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ"
+"Center for Healthy Aging, University of"
+"Center for Information Science, Peking University, Beijing 100871, China"
+"Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT"
+"Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A"
+"Center for Machine Vision Research, University of Oulu, Finland"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"Center for Machine Vision and Signal Analysis, University of Oulu, Finland"
+"Center for Research in Computer Vision (CRCV), University of Central Florida (UCF"
+"Center for Research in Computer Vision, University of Central Florida"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, FL"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, USA"
+"Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA"
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+"Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA"
+Central Mechanical Engineering Research Institute
+"Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India"
+"Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA"
+"Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de"
+"Centre for Intelligent Machines, McGill University, Montreal, Canada"
+"Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia"
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+"Chalmers University of Technology, SAFER"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"Chandigarh University, Gharuan, Punjab, India"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Charotar University of Science and Technology, Changa, India"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+China University of Mining and Technol
+"China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of"
+"China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"China-Singapore Institute of Digital Media, Singapore"
+"Chonbuk National University, Jeonju 561-756, Korea"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany"
+"Collage of Sciences, Baghdad University, Iraq"
+"College Road East, Princeton, NJ"
+College of Computer Science and Information Sciences
+"College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China"
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun"
+"College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China"
+College of Computer Science and Technology
+"College of Computer Science and Technology, Chongqing"
+"College of Computer Science and Technology, Zhejiang University, China"
+"College of Computer Science, Chongqing University, Chongqing, 400030, China"
+"College of Computer Science, Chongqing University, Chongqing, China"
+"College of Computer Science, Sichuan University, Chengdu 610065, P.R. China"
+"College of Computer Science, Zhejiang University"
+"College of Computer Science, Zhejiang University, Hangzhou, China"
+"College of Computer Science, Zhejiang University, Zhejiang, China"
+"College of Computer and Information Engineering, Nanyang Institute of Technology"
+"College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China"
+"College of Computer and Information Science, Northeastern University, Boston, USA"
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"College of Computer and Information Science, Southwest University, Chongqing 400715, China"
+College of Computer and Information Sciences
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+"College of Computing, Georgia Institute of Technology"
+"College of Computing, Georgia Institute of Technology, Atlanta, GA, USA"
+"College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China"
+"College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China"
+"College of Electronics and Information, Northwestern Polytechnic University"
+College of Engineering (Poly
+College of Engineering and Mineral Resources
+"College of Engineering, Mathematics and Physical Sciences"
+"College of Engineering, Purdue University"
+College of Image Arts and Sciences
+College of Information Engineering
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+College of Information Science and Engineering
+"College of Information Science and Engineering, Ocean University of China, Qingdao, China"
+"College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan"
+"College of Information Science and Engineering, Xinjiang University"
+"College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi"
+College of Information and Control Engineering in China University of Petroleum
+"College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China"
+College of Information and Electrical Engineering
+"College of Information, Yunnan Normal University, Kunming, China"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+"College of Medicine, Seoul National University"
+"College of Science, Baghdad University, Baghdad, Iraq"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"College of Software Engineering, Southeast University, Nanjing 210096, China"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"CollegePark, MD"
+"ColumbiaUniversity, NY, USA"
+Compi`egne University of Technology
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas"
+"Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey"
+"Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India"
+"Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran"
+"Computer Graphics Research Group, University of Freiburg, Freiburg, Germany"
+"Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA"
+"Computer Laboratory, University of Cambridge, Cambridge, UK"
+"Computer Science Division, The Open University of Israel"
+"Computer Science Division, The Open University of Israel, Israel"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA"
+"Computer Science and Engineering, Anna University, India"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Computer Science and Engineering, Michigan State University, East Lansing, USA"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"Computer Science and Engineering, University of Washington"
+"Computer Science and Engineering, University of Washington, Seattle, WA"
+"Computer Science and Engineering, University of Washington, Seattle, WA, USA"
+"Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Science and Technology, Tsinghua University, Beijing, China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+"Computer Science, Brown University, Providence, RI, USA"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Computer Science, Princeton University, Princeton, NJ, USA"
+"Computer Vision Group, Friedrich Schiller University Jena"
+"Computer Vision Group, Friedrich Schiller University of Jena, Germany"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Computer Vision Group, Xerox Research Center Europe, Meylan, France"
+"Computer Vision Lab, Delft University of Technology"
+"Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden"
+"Computer Vision Laboratory, Link oping University, Sweden"
+"Computer Vision Laboratory, The University of Nottingham"
+"Computer Vision Laboratory, University of Nottingham, Nottingham, UK"
+Computer Vision Laboratory. University of Nottingham
+"Computer Vision Research Group, COMSATS Institute of Information"
+Computer Vision and Robotics Research Laboratory
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+"Computer and Systems Engineering, Rensselaer Polytechnic Institute"
+Computer and Vision Research Center
+"Computer vision and Remote Sensing, Berlin university of Technology"
+"Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada"
+Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university
+"Cooperative Medianet Innovation Center, Shanghai Jiaotong University"
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+"Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers"
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+Cornell University 2 Cornell Tech
+Cornell University 2Eastman Kodak Company
+"Cornell University, Ithaca, NY, U.S.A"
+"Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein"
+Courant Institute and Google Research
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+"Current Address: Research Institute of Child Development and Education, University of Amsterdam"
+Curtin University of Technology
+"D Research Center, Kwangwoon University and Springer"
+D.J. Sanghvi College of Engineering
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University"
+DAP - University of Sassari
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+DICGIM - University of Palermo
+"DIEI, University of Perugia, Italy"
+DISI - University of Trento
+"DISI, University of Trento, Italy"
+"DISI, University of Trento, Trento, Italy"
+"DPDCE, University IUAV"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK"
+"DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China"
+DVMM Lab - Columbia University
+"Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College"
+Dalle Molle Institute for Arti cial Intelligence
+"Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA"
+Datta Meghe College of Engineering
+"Dayananda Sagar College of Engg., India"
+"Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain"
+"Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK"
+"Deparment of Computing, Goldsmiths, University of London, UK"
+"Deparment of Computing, Imperial College London, UK"
+Departm nt of Information Engin ering Th Chines University of Hong Kong
+"Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India"
+"Deva Ramanan, University of California at Irvine"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+Dhanalakshmi Srinivasan College of Engineering
+Dietrich College Honors Theses
+Dietrich College of Humanities and Social Sciences
+Digital Media Research Center
+"Dipartimento di Sistemi e Informatica, University of Florence"
+"Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"Division of Computer Science, University of California, Berkeley, CA, USA e-mail"
+"Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea"
+"Dnyanopasak College Parbhani, M.S, India"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Doctor of Philosophy in Computing of Imperial College, February"
+Doctor of Philosophy of University College London
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"ECE dept, University of Miami"
+"ECE, National University of Singapore, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"EECS, Syracuse University, Syracuse, NY, USA"
+"EEMCS, University of Twente"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"EEMCS, University of Twente, Netherlands"
+"EEMCS, University of Twente, The Netherlands"
+"EIMT, Open University of"
+"ESAT, Katholieke Universiteit Leuven, Leuven, Belgium"
+"ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+"ESTeM, University of Canberra"
+"Eastern Mediterranean University, Gazima usa, Northern Cyprus"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT
+"Education, Yunnan Normal University, Kunming, China"
+"Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan"
+"Elect. Eng. Faculty, Tabriz University, Tabriz, Iran"
+"Electrical Engineering Institute, EPFL"
+"Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+"Electrical and Computer Engineering, The University of Memphis"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada"
+"Electrical, Computer, Rensselaer Polytechnic Institute"
+"Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute"
+Electronic Engineering and Computer Science Queen Mary University of London
+"Electronic and Information Engineering, University of Bologna, Italy"
+"Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India"
+"Electronics Engineering, National Institute of Technical Teachers"
+"Electronics and Communication Engineering, Chuo University"
+"Electronics and Computer Science, University of Southampton, Southampton, Hampshire"
+Electronics and Telecommunications Research Institute
+"Engg, Priyadarshini College of"
+Engineering Chaoyang University Nankai Institute of
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+"Engineering, G.H.Raisoni College of Engineering"
+"Engineering, National Formosa University"
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"Engineering, University of Dundee"
+"Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom"
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"F.Ferraro, University of Rochester"
+"FI-90014 University of Oulu, Finland"
+"FX Palo Alto Laboratory, Inc., California, USA"
+Facebook 4Texas AandM University 5IBM Research
+"Facebook AI Research, 2Dartmouth College"
+"Facial Image Processing and Analysis Group, Institute for Anthropomatics"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Faculty of Computer Science, Dalhousie University, Halifax, Canada"
+"Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"Faculty of Computers and Information, Cairo University, Cairo, Egypt"
+"Faculty of EEMCS, Delft University of Technology, The Netherlands"
+"Faculty of EEMCS, University of Twente, The Netherlands"
+"Faculty of ETI, Gdansk University of Technology, Gdansk, Poland"
+"Faculty of Electrical Engineering, Czech Technical University"
+"Faculty of Electrical Engineering, Czech Technical University in Prague"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The"
+"Faculty of Electrical Engineering, University of Ljubljana"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland"
+"Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"Faculty of Engineering and Technology, Multimedia University (Melaka Campus"
+"Faculty of Engineering, Ain Shams University, Cairo, Egypt"
+"Faculty of Informatics, E otv os Lor and University, Budapest, Hungary"
+"Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia"
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"Faculty of Science and Engineering, Waseda University, Tokyo, Japan"
+"Faculty of Science and Technology, University of Macau"
+"Faculty of Science, University of Amsterdam, The Netherlands"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"Federal Institute of Science and Technology, Mookkannoor"
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+Federal University of Bahia (UFBA
+Federal University of Campina Grande (UFCG
+Federal University of Para ba
+Federal University of Technology - Paran a
+"Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions"
+"Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai"
+"Final Year, PanimalarInstitute of Technology"
+"Florian Metze, Chair (Carnegie Mellon University"
+Formerly: Texas AandM University
+"Foundation University, Rawalpindi 46000, Pakistan"
+"Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India"
+Fraunhofer Institute for Integrated Circuits IIS
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB"
+"Friedrich Schiller University, D-07740 Jena"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+Funding was provided by the U.S. National Institutes of Mental
+"G.H.Raisoni College of Engg. and Mgmt., Pune, India"
+"GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+"GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin"
+"GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco"
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+Gangnung-Wonju National University
+"Gannan Normal University, Ganzhou 341000, China"
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"Gdansk University of Technology, Faculty of Electronics, Telecommunication"
+German Research Center for Arti cial Intelligence (DFKI
+"German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany"
+"Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyd"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+"Goldsmiths, University of London, London, UK"
+"Goldsmiths, University of London, UK"
+"Gonda Brain Research Center, Bar Ilan University, Israel"
+"Google, Inc"
+"Governance, Keio University"
+"Government College of Engineering, Aurangabad"
+"Government College of Engineering, Aurangabad [Autonomous"
+"Grad. School at Shenzhen, Tsinghua University"
+"Grad. School of Information Science and Technology, The University of Tokyo, Japan"
+"Graduate Institute of Electronics Engineering, National Taiwan University"
+"Graduate Institute of Networking and Multimedia, National Taiwan University"
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+"Graduate School of Doshisha University, Kyoto, Japan"
+"Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan"
+"Graduate School of Informatics, Kyoto University"
+"Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan"
+"Graduate School of Information Science and Technology, The University of Tokyo"
+"Graduate School of Information Science, Nagoya University, Japan"
+"Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan"
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"Graduate School of Science and Engineering, Saitama University"
+"Graduate School of System Informatics, Kobe University"
+"Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan"
+"Graduate University of CAS, 100190, Beijing, China"
+"Graduate University of Chinese Academy of Sciences(CAS), 100190, China"
+"Graduate University of Chinese Academy of Sciences, Beijing 100049, China"
+"Gri th University, QLD-4111, Brisbane, Australia"
+"Grif th University, QLD, Australia"
+"Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Gujarat Technological University, V.V.Nagar, India"
+"H. He, Honkong Polytechnic University"
+HELSINKI UNIVERSITY OF TECHNOLOGY
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Harbin Institute of Technology, School of Computer Science and Technology"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Head and Neck Surgery, Seoul National University"
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Heilongjiang University, College of Computer Science and Technology, China"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Helen Wills Neuroscience Institute, University of"
+"Helsinki Collegium for Advanced Studies, University of Helsinki, Finland"
+"Helsinki Institute for Information Technology, Aalto University, Finland"
+Helsinki University of Technology Laboratory of Computational Engineering Publications
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+"Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne"
+High Institute of Medical Technologies
+"Hindusthan College of Engineering and Technology, Coimbatore, India"
+Ho Chi Minh City University of
+Ho Chi Minh City University of Science
+Honda Fundamental Research Labs
+"Honda RandD Americas, Inc., Boston, MA, USA"
+Honda Research Institute
+Honda Research Institute USA
+Hong Kong Applied Science and Technology Research Institute Company Limited
+"Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China"
+Howard Hughes Medical Institute (HHMI
+"Hua Zhong University of Science and Technology, Wuhan, China"
+Huazhong Agricultural University
+"Human Centered Multimedia, Augsburg University, Germany"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+"Human Development and Applied Psychology, University of Toronto, Ontario, Canada"
+"Human Genome Center, Institute of Medical Science"
+Human Interaction Research Lab
+"Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand"
+"Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany"
+"Human Media Interaction, University of Twente, P.O. Box"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+"Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China"
+IBM China Research Lab
+"IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore"
+IBM T. J. Watson Research Center
+"IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY"
+"IBM T. J. Watson Research Center, Yorktown Heights, NY, USA"
+IBM T.J. Watson Research Center
+"IBM Watson Research Center, Armonk, NY, USA"
+ICMC University of S ao Paulo
+ICSI / UC Berkeley 2Brigham Young University
+"ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences"
+"IES College of Technology, Bhopal"
+"IHCC, RSCS, CECS, Australian National University"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"IIIS, Tsinghua University"
+"IIIS, Tsinghua University, Beijing, China"
+"IIIT-Delhi, India, 2West Virginia University"
+"IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands"
+"IN3, Open University of"
+"ISLA Lab, Informatics Institute"
+"ISLA Lab, Informatics Institute, University of Amsterdam"
+"ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+"ITCS, Tsinghua University"
+"ITEE, The University of Queensland, Australia"
+Idiap Research Institute and EPF Lausanne
+"Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay"
+"Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France"
+"Iftm University, Moradabad-244001 U.P"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+"Image Processing Center, Beihang University"
+"Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia"
+"Image and Video Research Laboratory, Queensland University of Technology"
+"Imaging Science and Biomedical Engineering, The University of Manchester, UK"
+Imperial College London / Twente University
+"Imperial College London, On do"
+"Imperial College of Science, Technology and Medicine"
+"Imperial College, 180 Queens Gate"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+In the Graduate College
+"Indian Institute of Informaiton Technology, Allahabad, India"
+"Indian Institute of Technology, Kharagpur"
+"Indian Institute of Technology, Madras"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+Indraprastha Institute of Information Technology
+"Indraprastha Institute of Information Technology (Delhi, India"
+"Indraprastha Institute of Information Technology, Delhi"
+Informatics and Telematics Institute
+"Informatics and Telematics Institute, Centre for Research and Technology Hellas"
+"Informatics and Telematics Institute, Centre of Research and Technology - Hellas"
+"Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"Information Sciences Institute, USC, CA, USA"
+"Information Systems Design, Doshisha University, Kyoto, Japan"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+"Information and Media Processing Research Laboratories, NEC Corporation"
+"Informatization Office, National University of Defense Technology, Changsha 410073, China"
+"Innopolis University, Kazan, Russia"
+"Inst. Neural Computation, University of California"
+"Institiude of Computer Science and Technology, Peking University"
+"Institute AIFB, Karlsruhe Institute of Technology, Germany"
+"Institute Polythechnic of Leiria, Portugal"
+Institute for Advanced Computer Studies
+"Institute for Advanced Computer Studies, University of Maryland, College Park, MD"
+Institute for Anthropomatics
+"Institute for Arts, Science and Technology"
+Institute for Computer Graphics and Vision
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States"
+"Institute for Electronics, Signal Processing and Communications"
+"Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn"
+Institute for Human-Machine Communication
+"Institute for Human-Machine Communication, Technische Universit at M unchen"
+"Institute for Human-Machine Communication, Technische Universit at M unchen, Germany"
+"Institute for Infocomm Research (I2R), A*STAR, Singapore"
+"Institute for Infocomm Research, A*STAR"
+"Institute for Infocomm Research, A*STAR, Singapore"
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"Institute for Infocomm Research, Singapore"
+Institute for Information Systems Engineering
+"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+Institute for Neural Computation
+"Institute for Neural Computation, University of California, San Diego"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+Institute for Numerical Mathematics
+"Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan"
+Institute for Robotics and Intelligent
+Institute for Robotics and Intelligent Systems
+"Institute for Robotics and Intelligent Systems, USC, CA, USA"
+"Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA"
+"Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran"
+Institute for Vision Systems Engineering
+"Institute for Vision and Graphics, University of Siegen, Germany"
+Institute for studies in theoretical Physics and Mathematics(IPM
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+Institute of Arti cial Intelligence and Cognitive Engineering
+"Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen"
+Institute of Automatic Control Engineering (LSR
+"Institute of Automation, Chinese Academy of"
+"Institute of Automation, Chinese Academy of Sciences"
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+"Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Automation, Chinese Academy of Sciences, China"
+"Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School"
+"Institute of Biochemistry, University of Balochistan, Quetta"
+"Institute of Child Health, University College London, UK"
+"Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China"
+"Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain"
+"Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece"
+"Institute of Computing Technology, CAS"
+"Institute of Computing Technology, CAS, Beijing 100190, China"
+"Institute of Computing Technology, CAS, Beijing, 100190, China"
+"Institute of Computing Technology, Chinese Academy of Sciences"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"Institute of Data Science and Technology, Alibaba Group"
+Institute of Deep Learning
+"Institute of Deep Learning, Baidu Research"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+Institute of Electrical and Electronics Engineers
+Institute of Electrical and Electronics Engineers (IEEE). DOI
+"Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj"
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+Institute of Graduate Studies and Research
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China"
+"Institute of Industrial Science, The University of Tokyo"
+Institute of Informatics - ISLA
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"Institute of Media and Information Technology, Chiba University"
+"Institute of Mental Health, Peking University, P.R. China"
+"Institute of Neural Information Processing, Ulm University, Ulm, Germany"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+Institute of Psychology and Behavioral Sciences
+"Institute of Psychology, Chinese"
+"Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland"
+"Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan"
+"Institute of Software, Chinese Academy of Sciences, Beijing 100190, China"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+"Institute of Systems and Robotics - University of Coimbra, Portugal"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+Institute of control science and engineering
+"Institute ofInformation Science, Academia Sinica, Taipei, Taiwan"
+"Integrated Research Center, Universit`a Campus Bio-Medico di Roma"
+"Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA"
+Intelligence Computing Research Center
+"Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching"
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+"Intelligent Recognition and Image Processing Lab, Beihang University, Beijing"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands"
+"Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden"
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+Interactive and Digital Media Institute
+"Interactive and Digital Media Institute, National University of Singapore, SG"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+"Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea"
+"International Islamic University, Islamabad 44000, Pakistan"
+Islamic Azad University of AHAR
+"Islamic Azad University, Gonabad, Iran"
+"Islamic Azad University, Mashhad Branch, Mashhad, Iran"
+"Islamic Azad University, Qazvin, Iran"
+"Islamic Azad University, Shahrood, Iran"
+"IslamicAzad University, Qazvin, Iran"
+IstanbulTechnicalUniversity
+"J. P. College of Engineering, India"
+"JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China"
+"Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+"Jawaharlal Technological University, Anantapur"
+"Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA"
+"Jilin University, Changchun 130012, China"
+"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"Johns Hopkins University, Center for Speech and Language Processing"
+K S Rangasamy College of Technology
+K. N. Toosi University of
+"K.S.R. College Of Engineering, Tiruchengode, India"
+"K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"KTH Royal Institute of Technology, CVAP Lab, Stockholm, Sweden"
+"KTH, Royal Institute of Technology"
+"KU Phonetics and Psycholinguistics Lab, University of Kansas"
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of"
+"Key Lab of Intelligent Information Processing, Institute of Computing Technology"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"Key Laboratory of Behavior Sciences, Institute of Psychology"
+"Key Laboratory of Computer Network and Information Integration of Ministry of Education, Southeast University, Nanjing"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+"Key Laboratory of Machine Perception (MOE), School of EECS, Peking University"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"Key Laboratory of Transient Optics and Photonics, Xi an Institute of Optics and Precision Mechanics, Chi"
+Khulna University of Engineering and Technology
+"King Saud University, KSA"
+"King Saud University, P.O. Box 51178, Riyadh 11543, Saudi Arabia"
+"Kingston University London, University of Westminster London"
+"Kitware, Inc"
+"Kobe University, NICT and University of Siegen"
+"Kodak Research Laboratories, Rochester, NY"
+"Kodak Research Laboratories, Rochester, New York"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+Korea Advanced Institute of Science and Technology (KAIST
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+"Korea University, Seoul 136-713, Korea"
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+"Kulhare, Sourabh, ""Deep Learning for Semantic Video Understanding"" (2017). Thesis. Rochester Institute of Technology. Accessed"
+"Kumamoto University, 2-39-1 Kurokami, Kumamoto shi"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+"Kwangwoon University, 447-1 Wolge-dong, Nowon-Gu, Seoul 139-701, Korea"
+Kyung Hee University South of Korea
+"Kyung Hee University, Yongin, Rep. of Korea"
+"L3S Research Center, Hannover, Germany"
+"LCSEE, West Virginia University"
+"LIACS Media Lab, Leiden University, The Netherlands"
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"LIUM Laboratory, Le Mans, France, 2 Idiap Research Institute, Martigny, Switzerland"
+"Lab of Science and Technology, Southeast University, Nanjing 210096, China"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan"
+"Laboratory, University of Houston, Houston, TX, USA"
+Language Technologies Institute
+"Language Technologies Institute, Carnegie Mellon University, PA, USA"
+"Language Technologies Institute, School of Computer Science"
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"Language and Brain Lab, Simon Fraser University, Canada"
+"Learning Systems Group, California Institute of Technology"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands"
+"Leiden, the Netherlands, 3 Delft University of Technology"
+"Lille 1 University, France"
+"Link oping University, Computer Vision Laboratory"
+"Link oping University, SE-581 83 Link oping, Sweden"
+Link to publication from Aalborg University
+Link to publication in University of Groningen/UMCG research database
+Link to publication record in Queen's University Belfast Research Portal
+"Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health"
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+"Lotus Hill Institute for Computer Vision and Information Science, 436000, China"
+"Lund University, Cognimatics AB"
+M. Mark Everingham University of Leeds
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"M.P.M. College, Bhopal, India"
+"M.S. (University of California, Berkeley"
+M.S. Brunel University of West London
+M.S. University of Central Florida
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+"M.Tech Student, Mount Zion College of Engineering, Pathanamthitta, Kerala, India"
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli"
+"MATS University, MATS School of Engineering and Technology, Arang, Raipur, India"
+"MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry"
+"MES College of Engineering, Kuttippuram"
+MICC - University of Florence
+"MICC, University of Florence"
+"MIRACL-FS, University of Sfax"
+"MIRACL-FSEG, University of Sfax"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+"MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"MTech Student 1, 2, Disha Institute of"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"Machine Perception Laboratory, University of California, San Diego"
+"Machine Vision Group, P.O. Box 4500, FI-90014, University of Oulu, Finland"
+"Machine Vision Lab, Faculty of Environment and Technology, University of the West of England"
+"Mackenzie Presbyterian University, S o Paulo, S o Paulo, Brazil"
+"Madanapalle Institute of Technology and Science, Madanapalle, Andhra Pradesh"
+Mahatma Gandhi Institute of Technology
+Malaviya National Institute of Technology
+"Mancha, Spain, Imperial College, London, UK"
+"Mangalore Institute of Engineering and Technology, Badaga"
+Mans eld College
+"Marine Institute, via Torre Bianca, 98164 Messina Italy"
+Massachusettes Institute of Technology
+Massachusetts Institute of Technology Rapporteur
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+Max Planck Institute f ur biologische Kybernetik
+"Max Planck Institute for Biological Cybernetics, Spemannstr. 38, 72076 T bingen, Germany"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+"Medical Image Analysis Lab, School of Computing Science, Simon Fraser University, Canada"
+"Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular"
+"Medical School, University of Ioannina, Ioannina, Greece"
+"Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master"
+"Metron, Inc"
+"Michigan State University, 3115 Engineering Building"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Michigan State University, East Lansing, MI, U.S.A"
+"Michigan State University, NEC Laboratories America"
+"Microsystems Design Lab, The Pennsylvania State University"
+"Middlesex University London, 4International Hellenic University"
+Mihaylo College of Business and Economics
+"Minia University, Egypt"
+Ministry of Higher Education and Scientific Research / The University of Mustsnsiriyah/Baghdad IRAQ
+Mitsubishi Electric Research Laboratory
+Mitsubishi Electric Research Labs (MERL
+"Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+"Montefiore Institute, University of Li ge, 4000 Li ge, Belgium"
+"Montreal Institute for Learning Algorithms, Universit e de Montr eal"
+Moradabad Institute of Technology
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+"Moscow State University, dept. of Computational Mathematics and Cybernetics"
+"Most of the earlier studies mentioned above, including ours"
+"Motorola China Research Center, Shanghai, 210000, P.R.China"
+"Motorola, Inc"
+"Much is known on how facial expressions of emotion are produced, including which individual muscles are most active in"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"Multimedia University (MMU), Cyberjaya, Malaysia"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"Myongji University, Yongin, 449-728 South"
+"NEC Laboratories America, Inc"
+"NEC Laboratories America, Inc., Cupertino, CA"
+"NICTA , Queensland Research Laboratory, QLD, Australia"
+"NICTA, and Australian National University"
+NSS College of Engineering
+"Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan"
+"Najafabad Branch, Islamic Azad University"
+Nam k Kemal University
+"Nam k Kemal University, Tekirda g, Turkey"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+Nanjing University of Science and
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"Nanjing, 210094, China, 3 School of Automation, Nanjing University of Posts and Telecommunications"
+"Nanyang Technological University, 2University of California San Diego"
+"Narayana Pharmacy College, Nellore, India"
+"National Cheng Kung University, Tainan, Taiwan, R.O.C"
+"National Cheng Kung University, Tainan, Taiwan, ROC"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"
+National Institute of Advanced Industrial
+National Institute of Advanced Industrial Science and Technology (AIST
+"National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan"
+National Institute of Development Administration
+National Institute of Informatics
+"National Institute of Informatics, Japan"
+"National Institute of Informatics, Tokyo, Japan"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"National Institute of Technology, Toyota College, Japan"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"National Key Laboratory for Novel Software Technology, Nanjing University, China"
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China"
+"National Lab of Pattern Recognition, Institute of Automation"
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"National Laboratory of Pattern Recognition, Institute of Automation"
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, P. R. China"
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce"
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+"National Taichung University of Science and Technology, Taichung, Taiwan, R.O.C"
+National Taiwan University of Science and
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+"National Tsing-Hua University, Hsin-Chu, Taiwan"
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"National University of Singapore Research Institute, Suzhou, China"
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"Netherlands, Utrecht University, Utrecht, The Netherlands"
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+No Institute Given
+Nokia Bell Labs and University of Oxford
+"Nokia Research Center, Tampere, Finland"
+North Carolina AandT State University
+"North China University of Technology, Beijing 100144 CHINA"
+"North Dakota State University, Fargo, ND58105, USA"
+Northeastern University 2Microsoft Research 3City University of New York
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+Nqtional Institute of Standards and Technology
+"Numediart Institute, University of Mons"
+Odaiyappa College of
+Opus College of Engineering
+"Other uses, including reproduction and distribution, or selling or"
+"P A College of Engineering, Nadupadavu"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+"P.A. College of Engnineering, Mangalore"
+"P.G. Student, SRV Engineering College, sembodai, India"
+"P.S.R Engineering College, Sivakasi, Tamilnadu, India"
+"PES Institute of Technology, Bangalore, Karnataka, India"
+PES Modern College of Engg
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"PSGR Krishnammal College for Women, Coimbatore"
+Palo Alto Research Center (PARC
+"PanimalarInstitute of Technology, Tamilnadu, India"
+"Paran a Federal University, Curitiba, Brazil"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"Pattern Recognition Group, University of Siegen"
+"Pattern Recognition and Bio-informatics Laboratory, Delft University of Technology, THE NETHERLANDS"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology, The Netherlands"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"Ph.D student Zaid Shhedi, Doctoral School of Automatic Control and Computers, University"
+"Polytechnic Institute of NYU, NY, USA"
+Polytechnic University of Bucharest
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"Pompeu Fabra University, Spain"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+"Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India"
+"Priyadarshini College of Engg, Nagpur, India"
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Proto Labs, Inc"
+Psychiatry at the University of Pittsburgh
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Psychology and Psychiatry, University of Pittsburgh, USA"
+"Psychology, American University"
+"Psychology, University of Illinois, Beckman Institute, Urbana-Champaign, Illinois 61801, University of"
+"Psychonomic Society, Inc"
+"Psychopharmacology Unit, Educational and Health Psychology, University College"
+"Public University of Navarra, Spain"
+"Publication details, including instructions for authors and subscription"
+"Publication details, including instructions for authors and subscription information"
+Purdue Institute for Integrative Neuroscience
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"QCIS, University of Technology Sydney, Sydney, Australia"
+"QCIS, University of Technology, Sydney"
+"Qihoo 360 AI Institute, Beijing, China"
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"Quantitative Employee unit, Finnish Institute of Occupational Health"
+"Queen Mary College, London"
+"Queen Mary, University of London"
+"Queen Mary, University of London, E1 4NS, UK"
+"Queen Mary, University of London, London E1 4NS, UK"
+Queen s University Belfast
+Queen's University Belfast - Research Portal
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"R. Campellone, 3210 Tolman Hall, University of California, Berkeley"
+"R.C.Patel Institute of Technology, Shirpur, Dist.Dhule.Maharashtra, India"
+"RCC Institute of Information Technology, Kolkata, India"
+"RGPV University, Indore"
+"RIEB, Kobe University, Kobe, 657-8501, Japan"
+"RTM Nagpur University, Campus Nagpur, (MS)-India"
+"RTMNU Nagpur University, India"
+"Rayalaseema University Kurnool, Andhra Pradesh"
+"Recanati Genetic Institute, Rabin Medical Center and Schneider Children s Medical Center, Petah Tikva, Israel"
+"Recognition, Institute of Automation"
+"Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+"Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, NY 12180 USA"
+Research Center E. Piaggio
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"Research Center CENTIA, Electronics and Mechatronics"
+Research Center and Laboratoire
+Research Center for Information
+"Research Center for Information Technology Innovation, Academia Sinica"
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Research Center for Intelligent Security Technology, CIGIT"
+"Research Center for Learning Science, Southeast University, China"
+"Research Center for Learning Science, Southeast University, Nanjing 210096, China"
+"Research Center for Learning Science, Southeast University, Nanjing, China"
+"Research Center in Information Technologies, Universit e de Mons, Belgium"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea"
+"Research Reports of CMP, Czech Technical University in Prague, No"
+"Research Scholar (M.Tech, IT), Institute of Engineering and Technology"
+"Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India"
+"Research Scholar, PSGR Krishnammal College for Women, Coimbatore"
+"Research School of Computer Science, The Australian National University, ACT 2601, Australia"
+"Research School of Engineering, The Australian National University, ACT 2601, Australia"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Robotics Institute, Carnegie Mellon University"
+"Robotics Institute, Carnegie Mellon University 3University of Pittsburgh, USA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Robotics Institute, Carnegie Mellon University, USA"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"Rochester Human-Computer Interaction (ROC HCI), University of Rochester, NY"
+"Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA"
+"Rochester Institute of Technology, Rochester, NY"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+Rowland Institute at Harvard
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+"Rutgers University, Computer and Information Sciences, 110 Frelinghuysen Road, Piscataway, NJ"
+"Rutgers, The State University of New Jersey"
+"Rutgers, The State University of New Jersey, 508 CoRE, 94 Brett Rd, Piscataway, NJ"
+"Rutgers, The State University of New Jersey, 723 CoRE, 94 Brett Rd, Piscataway, NJ"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India"
+SAMSI and Duke University
+"SBK Women s University, Quetta, Balochistan"
+"SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University"
+"SRI International, Menlo Park California / *Brooklyn College, Brooklyn New York"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"SRV Engineering College, sembodai, india"
+"SSESA, Science College, Congress Nagar, Nagpur, (MS)-India"
+"SSN College of Engineering, Chennai, India"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+SUS college of Engineering and Technology
+Sabanc University
+"Sackler Faculty of Medicine, Tel Aviv University, Tel Aviv, Israel"
+"Salgado de Oliveira University, Brazil"
+Samsung Advanced Institute of Technology
+"Samsung Advanced Institute of Technology (SAIT), KAIST"
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+Sanghvi Institute of Management and Science
+"Sapienza University of Rome, Italy"
+Sarhad University of Science and Information Technology
+"Sathyabama University Old Mamallapuram Road, Chennai, India"
+"Sathyabama University, Chennai, India"
+"Savitri Bai Phule Pune University, Maharashtra India"
+Savitribai Phule Pune University
+"School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand"
+"School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave"
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"School of Business, Aalto University, Finland"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"School of Computer Engineering, Nanyang Technological University, Singapore"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN"
+"School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China"
+"School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore"
+"School of Computer Science and Engineering, Sichuan University, China"
+"School of Computer Science and Engineering, South China University of Technology"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"School of Computer Science and Engineering, Southeast University, Nanjing 210096, China"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"School of Computer Science and Software Engineering, Shenzhen University"
+"School of Computer Science and Software Engineering, Shenzhen University, Nanhai Ave 3688, Shenzhen"
+"School of Computer Science and Technology, Harbin Institute of"
+"School of Computer Science and Technology, Harbin Institute of Technology, China"
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"School of Computer Science and Technology, Shandong University"
+"School of Computer Science and Technology, Tianjin University"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"School of Computer Science and Technology, Tianjin University, China"
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+"School of Computer Science and Technology, University of Science and Technology of China"
+"School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China"
+"School of Computer Science, CECS, Australian National University, Australia"
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"School of Computer Science, Carnegie Mellon University, PA 15213, USA"
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, USA"
+"School of Computer Science, Carnegie Mellon University, USA"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"School of Computer Science, Fudan University, Shanghai, 200433, China"
+"School of Computer Science, Fudan University, Shanghai, China"
+"School of Computer Science, Nanjing University of Science and Technology"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"School of Computer Science, South China Normal University, China"
+"School of Computer Science, The University of Adelaide, Australia"
+"School of Computer Science, The University of Manchester"
+"School of Computer Science, The University of Nottingham"
+"School of Computer Science, Tianjin University"
+"School of Computer Science, University of Adelaide, Australia"
+"School of Computer Science, University of Birmingham, UK"
+"School of Computer Science, University of Lincoln, U.K"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Computer Science, University of Nottingham"
+"School of Computer Science, University of Windsor, Windsor, ON, Canada N9B 3P"
+"School of Computer Science, Wuhan University, P.R. China"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"School of Computer and Information Science, Chongqing Normal University 401331, China"
+"School of Computer and Information, Hefei University of Technology, China"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada; E-Mail"
+"School of Computing Science, Simon Fraser University, Canada"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+"School of Computing and Communications University of Technology, Sydney"
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"School of Computing and Communications, University of Technology Sydney, Sydney, Australia"
+"School of Computing and Info. Sciences, Florida International University"
+"School of Computing, National University of Singapore, SG"
+"School of Computing, National University of Singapore, Singapore"
+"School of Computing, Staffordshire University"
+"School of Control Science and Engineering, Shandong University, Jinan 250061, China"
+"School of Data Science, Fudan University, China"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"School of Data and Computer Science, Sun Yat-sen University"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+"School of EECS, Queen Mary University of London"
+"School of EECS, Queen Mary University of London, UK"
+"School of EEE, Nanyang Technological University, Singapore"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+"School of Electrical Engineering and Computer Science, Peking University"
+"School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran"
+"School of Electrical and Computer Engineering, Cornell University"
+"School of Electrical and Computer Engineering, RMIT University"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"School of Electronic and Computer Engineering, Peking University"
+"School of Electronic and Information Engineering, Beihang University, Beijing, 100191, China"
+"School of Electronic and Information Engineering, South China University of Technology"
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Electronics Engineering and Computer Science, Peking University"
+"School of Electronics and Computer Engineering, Peking University"
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"School of Electronics and Information, Northwestern Polytechnical University, China"
+"School of Engineering, Taylor s University"
+"School of Engineering, University of Guelph"
+"School of Engineering, University of Portsmouth, United Kingdom"
+"School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu"
+"School of Games, Hongik University, Seoul, Korea"
+"School of ICE, Beijing University of Posts and Telecommunications, Beijing, China"
+"School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications"
+"School of Informatics, University of Edinburgh, UK"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"School of Information Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"School of Information Engineering, Nanchang University, China"
+"School of Information Science and Engineering, Central South University, Changsha"
+"School of Information Science and Engineering, Southeast University, Nanjing, China"
+"School of Information Science and Technology, Donghua University, Shanghai 200051, China"
+"School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"School of Information Science and Technology, Sun Yat-sen University, China"
+"School of Information Systems, Singapore Management University, Singapore"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"School of Information Technology and Engineering, University of Ottawa, Ontario, Canada"
+"School of Information Technology and Management, University of International"
+"School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China"
+"School of IoT Engineering, Jiangnan University, Wuxi 214122, China"
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"School of Management Engineering, Henan Institute of Engineering, Zhengzhou 451191, P.R. China"
+"School of Mathematical Science, Dalian University of Technology, Dalian, China"
+"School of Mathematical Science, Peking University, China"
+"School of Mathematical Sciences, Dalian University of Technology, Linggong Rd. 2, Dalian"
+"School of Mathematical Sciences, Monash University, VIC 3800, Australia"
+"School of Mathematics and Computational Science, Sun Yat-sen University, China"
+"School of Mathematics and Computer Science, Northeastern State University, Tahlequah, OK 74464, USA"
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China"
+"School of Mechanical Engineering, Southwest Jiaotong University, Chengdu 610031, China"
+"School of Medicine, Shenzhen University, Shenzhen 518060, China"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"School of Physics and Engineering, Sun Yat-Sen University, Guangzhou, China, 2 School of Information"
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"School of Psychology, Cardiff University, Cardiff, United Kingdom, College of"
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"School of Psychology, University of Central Lancashire"
+"School of Software, Dalian University of Technology, Dalian 116621, China"
+"School of Software, Dalian University of Technology, Tuqiang St. 321, Dalian 116620, China"
+"School of Software, Sun Yat-sen University, China"
+"School of Software, Tianjin University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"Schreiber Building, room 103, Tel Aviv University, P.O.B. 39040, Ramat Aviv, Tel Aviv"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+"Scienti c Visualization and Computer Graphics, University of Groningen, Nijenborgh 9, Groningen, The Netherlands"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Section of Pathology, Second University of Naples, Via L. Armanni"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea"
+"Sendai National College of Technology, Natori, Japan"
+"SenseTime, 2Tsinghua University"
+"Sensor-enhanced Social Media (SeSaMe) Centre, National University of Singapore, Singapore"
+"Several methods exists to induce anxiety in healthy individuals, including threat of shock (ToS), the Trier"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+ShahidBeheshti University
+Shandong Women s University
+"Shanghai Institute of Applied Physics, Chinese Academy of Sciences"
+"Shanghai Jiao Tong University, CloudWalk Technology"
+"Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China"
+"Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University, China"
+"Shaoguan University, Da Tang Lu"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+"Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 518055, China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"Shri Shivaji College, Parbhani, M.S, India"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"Sighthound, Inc"
+Signal Processing Institute
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+Slovak University of Technology in
+"Smart Network System Institute, Institute for Information Industry"
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Sogang University, Seoul 121-742, Republic of Korea"
+"Solapur University, INDIA"
+"Sorbonne Universit s, UPMC University Paris 06, Paris, France"
+"Southeast University, Nanjing 210096, China"
+"Southeast University, Nanjing 211189, China"
+"Southern Illinois University, Carbondale, IL 62901 USA"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Springer Science + Business Media, Inc. Manufactured in The Netherlands"
+"Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India"
+"Sri Manakula Vinayagar Engineering College, Pondicherry"
+"Sri SidarthaInstitute of Technology, Tumkur"
+"Sri Sunflower College of Engineering and Technology, Lankapalli"
+Sridevi Women's Engineering College
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+St. Anne s College
+St. Francis Institute of Technology
+"St. Xavier s Catholic College of Engineering, India"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"St.Joseph s College of Engineering, Old Mamallapuram Road, Kamaraj Nagar, Semmencherry, Chennai"
+"Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom"
+"State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China"
+"State Key Lab of CADandCG, Zhejiang University, Hangzhou, Zhejiang, China"
+"State Key Lab. LIESMARS, Wuhan University, China"
+"State Key Laboratory for Novel Software Technology, Nanjing University, China"
+"State Key Laboratory of Brain and Cognitive Science, Institute of Psychology"
+"State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China"
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi an 710071 China"
+"State Key Laboratory of Pulp and Paper Engineering, South China University of Technology, Guangzhou 510640, China"
+"State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+State University of Feira de Santana (UEFS
+"State University of New York at Binghamton, Binghamton, NY"
+"State University of Rio de Janeiro, Brazil"
+Stevens Institute of Technology Adobe Systems Inc
+"Student, Amal Jyothi College of Engineering, Kanjirappally, India"
+"Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+Submitted to the Institute for Graduate Studies in
+Submitted to the Senate of the Hebrew University
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+Sun Yat-Sen (Zhongshan) University
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+Systems and Telematics - Neurolab
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+Taizhou University
+"Tarbiat Modarres University, Tehran, Iran"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+"Technical University of Cluj Napoca, 28 Memorandumului Street"
+Technical University of Kaiserslautern
+"Technical University of Munich, Germany"
+"Technical University of Ostrava, FEECS"
+TechnicalUniversityofDenmark
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"Technology, Nanjing University of Aero"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"Texas AandM University, College Station, TX, USA"
+The Allen Institute for AI
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+"The Amsterdam School of Communication Research, University of Amsterdam"
+The Australian National University Queensland University of Technology
+The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved
+The Author 2014. Published by Oxford University Press
+"The Big Data Research Center, Henan University, Kaifeng 475001, China"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Israel"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+The Chinese University of Hong Kong holds the copyright of this thesis. Any
+"The Chinese University of Hong Kong, HKSAR, China"
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+The Chinese University ofHong Kong
+The Graduate University for Advanced Studies (SOKENDAI
+The Hong Kong Polytechnic University 2Harbin Institute of Technology
+"The Hong Kong Polytechnic University, Hong Kong, SAR, 2University of Technology Sydney, Australia"
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"The Institute of Scienti c and Industrial Research, Osaka University"
+"The Open University of Israel, Israel"
+"The Remote Sensing Technology Institute (IMF), German Aerospace Center"
+"The Robotics Inistitute, Carnegie Mellon University"
+The Robotics Institute Carnegie Mellon University
+"The Robotics Institute, Carnegie Mellon University"
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"The School of Computer Science, Tel-Aviv University, Israel"
+"The School of Electrical Electronic and Control Engineering, Kongju National University"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+The University of Adelaide; and Australian Centre for Robotic Vision
+The University of Queensland in
+"The University of Queensland, School of ITEE"
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+The University of Shef eld
+"The authors are with the Delft University of Technology, Data and Knowl"
+The open University of Israel. 2Adience
+"The school of Data Science, Fudan University"
+Thesis. Rochester Institute of Technology. Accessed from
+This article was downloaded from Harvard University s DASH
+This work was supported by Grant MOP102637 from the Canadian Institutes of Health Research to E.D.R. and the
+This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E
+Tokyo Polytechnic University
+Tomas Bata University in Zl n
+"Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan"
+"Toyota Research Institute, Cambridge, MA 2 University of Michigan, Ann Arbor, MI"
+Toyota Technological Institute Chicago (TTIC
+Toyota Technological Institute at Chicago
+"Toyota Technological Institute, Chicago (TTIC"
+Tripura University (A Central University
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+Tsinghua University 4SenseTime
+"Tsinghua University, State Key Lab. of Intelligent"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+U.S. Army Research Laboratory
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+"UG student, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"UMIACS | University of Maryland, College Park"
+"UMIACS, University of Maryland, College Park, USA"
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+UNIVERSITY OF OULU GRADUATE SCHOOL
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"USA, 2Unit for Experimental Psychiatry, University of Pennsylvania School of Medicine"
+"USC IRIS Lab, University of Southern California"
+USC Information Sciences Institute
+"USC Information Sciences Institute (ISI), Marina Del Rey, CA"
+USC Institute for Creative Technologies
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"Uber Advanced Technologies Group, 5Vector Institute"
+"Ultra College of Engineering and Technology for Women, India"
+"United States of America, State University of New York Albany, Albany"
+"United States of America, State University of New York Albany, Albany, New York"
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Universitat Polit`ecnica de Catalunya, Columbia University"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+"University B.D.T.College of Engineering, Visvesvaraya"
+"University Bourgogne Franche-Comt , France"
+"University Campus, 54124, Thessaloniki, Greece"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"University Health Board, Swansea, United Kingdom"
+"University Hospital Jena, Germany"
+University Lecturer Anu Soikkeli
+University Lecturer Veli-Matti Ulvinen
+"University Station C0500, Austin TX 78712, USA"
+"University Street, Montral, QC H3A 0E9, Canada"
+"University Street, Montreal, QC H3A 0E9, Canada"
+"University Technology of Malaysia, 81310 Skudai, Johor, Malaysia"
+"University at Buffalo, SUNY"
+"University at Buffalo, State University of New York"
+"University of Alberta, Edmonton, AB T6G 2E8, Canada"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+"University of Amsterdam, University of Trento, Italy"
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+University of Applied Sciences Darmstadt - CASED
+"University of Balochistan, Quetta"
+"University of Barcelona and Computer Vision Centre, Barcelona, Spain"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+"University of Bari, Bari, Italy"
+"University of Basel, Departement Informatik, Basel, Switzerland"
+University of Beira Interior
+"University of Bonn, Roemerstrasse 164, 53117 Bonn, Germany"
+University of Bristol - Explore Bristol Research
+"University of Business Agriculture and Technology, Dhaka-1230, Bangladesh"
+University of Caen Basse-Normandie
+"University of Caen, France"
+University of Cagliari
+University of California at Berkeley
+University of California at Berkeley / ICSI
+"University of California at Berkeley, USA"
+"University of California at Irvine, Irvine, CA"
+"University of California at Los Angeles, Los Angeles, CA, USA"
+University of California at San Diego
+"University of California at San Diego, La Jolla, CA"
+"University of California, Berkeley1 Adobe"
+"University of California, Los Angeles"
+"University of California, San Diego 2 Carnegie Mellon University"
+"University of California, Santa Cruz"
+University of Cambridge Computer Laboratory
+"University of Cambridge, Computer Laboratory, UK"
+"University of Cambridge, The Computer Laboratory, Cambridge CB3 0FD, U.K"
+"University of Cambridge, UK 2Carnegie Mellon University, USA"
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+University of Chinese Academy of
+University of Colorado at Colorado Springs
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"University of Colorado, Colorado Springs"
+"University of Exceter, Exceter, UK"
+"University of Florence, Italy"
+University of Freiburg
+"University of Freiburg, Germany"
+"University of Freiburg, Instit ut f ur Informatik"
+"University of Genoa, Italy"
+"University of Georgia, Athens, GA, U.S.A"
+University of Illinois at Urbana-Champaign 2Adobe Research
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+University of Insubria
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"University of Kentucky, 329 Rose St., Lexington, KY, 40508, U.S.A"
+University of Lac Hong 10 Huynh Van Nghe
+"University of Lincoln, School of Computer Science, U.K"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"University of Maryland Institute for Advanced Computer Studies, College Park, MD"
+"University of Maryland, CFAR"
+"University of Maryland, Center for Automation Research"
+"University of Maryland, College Park"
+"University of Maryland, College Park, MD"
+"University of Maryland, College Park, USA"
+"University of Maryland, College Park; 2Arizona State University; 3Xerox Research Centre"
+University of Massachusetts Amherst in partial ful llment
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"University of Milano-Bicocca, Italy"
+"University of Minnesota-Twin Cities, Minneapolis"
+"University of Nevada at Reno, USA"
+University of North Carolina Wilmington in Partial Ful llment
+"University of Notre Dame, 2IIIT-Delhi"
+"University of Nottingham, Ningbo, China"
+"University of Nottingham, UK, School of Computer Science"
+"University of Oradea 410087, Universitatii 1, Romania"
+"University of Oviedo, Campus de Viesques, 33204 Gij n"
+"University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"University of Pennsylvania School of Medicine, 1013 Blockley Hall"
+"University of Pennsylvania, 2Ryerson University"
+"University of Pisa, Largo Lucio"
+"University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada"
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+"University of Queensland, St Lucia QLD Australia, 5 Institut Universitaire de France, Paris, France"
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"University of S ao Paulo, S ao Paulo, Brazil"
+"University of Santiago de Compostela, Santiago de Compostela, Spain"
+University of Science and Technology Beijing
+"University of Science, Ho Chi Minh city"
+"University of Science, VNU-HCM, Viet Nam"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"University of Science, Vietnam National University-Ho Chi Minh city"
+University of Sfax
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"University of Shef eld, UK"
+University of Siegen
+"University of Siena, Siena, Italy"
+"University of Sk vde, Sweden"
+"University of Southampton, UK, 2University of Warwick, UK"
+"University of Szeged, 2 E tv s Lor nd University"
+"University of T ubingen, T ubingen, Germany"
+"University of Tampere, Kanslerinnrinne 1, 33014, Tampere, Finland"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+"University of Tokyo, 4-6-1 Shirokanedai"
+University of Toronto and Recognyz Systems Technologies
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"University of Toronto, Toronto, ON M5S 2G4, Canada"
+University of Toulouse II Le Mirail
+University of Twente 2Dublin City University 3Oxford University
+"University of Twente, EEMCS, Netherlands"
+"University of Ulsan, Ulsan, Republic of Korea"
+"University of Verona, Verona, Italy"
+University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento
+"University of Victoria, Victoria, Canada"
+"University of Vigo, Spain"
+University of Washington 4The Allen Institute for AI
+University of Washington and Google Inc
+"University of Washington, Bothell"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+University of West Bohemia
+University of Wollongong. For further information contact the UOW
+"University of Zagreb, Faculty of Electrical Engineering and Computing"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"University of Zagreb, Unska 3, 10 000 Zagreb"
+"University of Zaragoza, Spain"
+"University of the South Paci c, Fiji"
+"University, Taiwan, R.O.C"
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+UniversityofMaryland
+"UniversityofMaryland, CollegePark, MD"
+"Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+VEER SURENDRA SAI UNIVERSITY OF
+"VHNSN College, Virudhunagar, ANJA College"
+"VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain"
+"VISLab, EBUII-216, University of California Riverside"
+"VSB Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic"
+"VSI Lab, Goethe University, Frankfurt, Germany"
+Vector Institute for Arti cial Intelligence
+VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College
+"Vickram College of Engineering, Enathi, Tamil Nadu, India"
+"Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand"
+Vietnam National University Ho Chi
+Vietnam National University of Agriculture
+"Virudhunagar Hindu Nadars Senthikumara Nadar College, Virudhunagar"
+"Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal"
+"Vision Science Group, University of California"
+"Vision Systems, Inc"
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+"Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT"
+"Vision and Security Technology Lab, University of Colorado Colorado Springs"
+"Vision and Security Technology Lab, University of Colorado at Colorado Springs, Colorado"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Visual Analysis of People Lab, Aalborg University, Denmark"
+"Visual Computing and Communications Lab, Arizona State University"
+"Visual Geometry Group, University of Oxford"
+"Visual Geometry Group, University of Oxford, Oxford UK"
+"Visual Geometry Group, University of Oxford, UK"
+"Visualization and Computer Vision Lab, GE Global Research Center"
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Waseda University, Tokyo, Japan"
+"Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of"
+"Wenzhou University, China"
+"Wenzhou University, Wenzhou, China"
+"West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi"
+William Marsh Rice University
+Xerox Research Center India
+Xerox Research Center Webster
+"Xi an Jiaotong University, China"
+"Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences"
+Xidian University 2Xi an Jiaotong University 3Microsoft Research Asia
+"Y ld z Teknik University, Istanbul, TR"
+"ZHAW Datalab, Zurich University of Applied Sciences"
+"Zhejang University, Hangzhou 310027, P.R.China"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"a The Robotics Institute, Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"aCollege of Computer Science at Chongqing University, 400044, Chongqing, P.R.C"
+"aDivision of Biology and Biological Engineering 156-29, Howard Hughes Medical Institute, California Institute of Technology, Pasadena, CA"
+"aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia"
+"aIBM China Research Lab, Beijing, China"
+"aImperial College London, London, UK"
+aInformation Sciences Institute
+"aLawrence Technological University, 21000 W Ten Mile Rd., South eld, MI 48075, United States"
+"aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA"
+"aResearch Scholar, Anna University, Chennai, Inida"
+"aSchool of Computing and Mathematics, Charles Sturt University, Bathurst, NSW"
+"aSchool of Technology, University of Campinas"
+"aTurgut Ozal University, Ankara Turkey"
+"abroad, or from public or private research centers"
+"additional details of DCS descriptors, including visualization. For extending the evaluation"
+"and 2Center for Cognitive Neuroscience, Duke University, Durham, North Carolina 27708"
+"and Engineering, Beihang University, Beijing, China"
+"and IBUG [32]. All of them cover large variations, including different"
+"and Mathematical Biosciences Institute, The Ohio State University"
+"and Modeling, Rutgers University"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+"and education use, including for instruction at the authors institution"
+"and especially light angle, drastically change the appearance of a face [1]. Facial expressions, including"
+"and quantify distinct social behaviors, including those involving"
+and the institute of engineering and science
+"applications has different requirements, including: processing time (off-line, on-line, or real-time"
+"applications, including texture classification [16], face recognition [12], object detection [10], and"
+at The Australian National University
+at West Virginia University
+at the Delft University of Technology
+at the University of Central Florida
+"atry, University of Pennsylvania School of Medicine, Philadelphia, PA"
+"b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney"
+"b Computer Technology Institute, Beijing Union University, 100101, China"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+b Institute for Robotics and Intelligent Systems
+"b School of Applied Mathematics, Xidian University, Xi an, China"
+"b School of Business, Reykjavik University, Reykjavik, Iceland"
+"b The Interdisciplinary Center for Research on Emotions, University of"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"bRobotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, U.S.A"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"bSchool of Computer and Control Engineering, University of Chinese Academy of Sciences"
+"bTsinghua University, Beijing, China"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+by grants from the National Institute of Mental Health (MH 15279 and MH067976 (K. Schmidt
+"c Cardiff Business School, Cardiff University, Cardiff, United Kingdom"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"c School of Computational Science, Florida State University, Tallahassee, FL 32306, USA"
+c(cid:13) Carnegie Mellon University
+c(cid:13)The Chinese University of Hong Kong
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"cCentre of Intelligent Machines, McGill University, Montr eal, QC H3A 0E9, Canada"
+"cFaculty of Electrical Engineering, Mathematics, and Computer Science, University of Twente, The Netherlands"
+"cSchool of Astronautics at Beihang University, 100191, Beijing, P.R.C"
+cThe Open University
+cid:1) Honda Research Institute
+cid:1)Institute for Neural Computation
+"cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA"
+"cid:2) Imperial College London, United Kingdom"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+"cid:2)Imperial College London, U.K"
+"cid:3) School of Software, Tsinghua University"
+cid:3)The Salk Institute and Howard Hughes Medical Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+"cid:63) Imperial College London, UK"
+"cid:63)Queen Mary University of London, Imperial College London"
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+cid:63)Stanford University
+"cid:93) Faculty of Science and Technology, University of Macau"
+"cid:93)Peking University Shenzhen Graduate School, Shenzhen, P.R.China"
+comparisons with 12 instance-based classi ers on 13 benchmark University of California Irvine
+"do, Rep. of Korea, Kyung Hee University, Suwon, Rep. of Korea"
+"e ects of di erence factors, including age group, age gap"
+eBay Research Labs
+"engineering, Government College of Engineering Kannur, Kerala, India"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"face processing, including age (Berry, 1990), sex (Hill"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"gelmeyer et al., 1996); and, increasingly, its role in reactions to"
+"general term, including collaboration. Interaction determines action on someone"
+"gies (Bughin et al. 2017). A range of other sectors, includ"
+"he University of Hong Kong, Pokfulam"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium"
+"image being generated by the model, include Active Appearance"
+in The University of Michigan
+"in signed languages, including American Sign Language (ASL). Gestures such"
+in the College of Engineering and Computer Science
+in the Graduate School of Duke University
+"instance has been detected (e.g., a face), it is be possible to obtain further information, including: (i"
+"learning. As a result of this research, many applications, including video surveillance systems"
+massachusetts institute of technology artificial intelligence laboratory
+"ment of Psychology, University of California, Berkeley"
+ment. Oxford University Press Series in Affective Science. New York: Oxford
+"methods, including graph matching, optical- ow-based"
+"obtained for all other uses, in any current or future media, including reprinting/republishing"
+"of Engineering and Information Technology, University of Technology, Sydney, Australia"
+"of Maryland, College Park, MD 20742, USA"
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"of Psychology, University of Michigan, Ann Arbor, MI, United States, University of Michigan, Ann"
+"of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science"
+"our analysis to stereotypes beyond gender, including those"
+"ples of such ne-grained descriptions, including attributes covering detailed"
+"point, lighting, and appearance. Many applications, including video surveillance systems"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"recognition, such as human computer interfaces and e-services, including e-home"
+"subsection a table summarizing the key features of the database is provided, including (where available) the number of"
+"t2i Lab, Chalmers University of Technology, Gothenburg, Sweden"
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+"the Diploma of Imperial College London. This thesis is entirely my own work, and, except"
+"the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam"
+"the face, including negative affect and distress, dates"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"to process in all the illumination conditions, including total"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"uses, in any current or future media, including"
+"versity of Amsterdam, Amsterdam and University of Trento"
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+yAristotle University of Thessaloniki
+yThe University of Tokyo
+years. According to the definition by the National Institute
diff --git a/scraper/reports/institutions_not_found/not-found-1.csv b/scraper/reports/institutions_not_found/not-found-1.csv
new file mode 100644
index 00000000..29eee253
--- /dev/null
+++ b/scraper/reports/institutions_not_found/not-found-1.csv
@@ -0,0 +1,845 @@
+"MIRACL-FS, University of Sfax"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"Deparment of Computing, Imperial College London, UK"
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"School of Computer Science, University of Birmingham, UK"
+Stevens Institute of Technology Adobe Systems Inc
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+Tomas Bata University in Zl n
+"School of Computing and Info. Sciences, Florida International University"
+"Numediart Institute, University of Mons"
+in the College of Engineering and Computer Science
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+The University of Queensland in
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+M. Mark Everingham University of Leeds
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"School of Computer Science, Fudan University, Shanghai, China"
+"BECS, Aalto University, Helsinki, Finland"
+"Beihang University 2Gri th University 3University of York, UK"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+Sabanc University
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"School of Computer Science, University of Nottingham"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"University at Buffalo, SUNY"
+"University of Maryland, Center for Automation Research"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"IslamicAzad University, Qazvin, Iran"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+This article was downloaded from Harvard University s DASH
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"DISI, University of Trento, Italy"
+College of Information Engineering
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+University of California at Berkeley / ICSI
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+Honda Fundamental Research Labs
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"University of Zaragoza, Spain"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"Grif th University, QLD, Australia"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"Asian University, Taichung, Taiwan"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Sapienza University of Rome, Italy"
+"University of Milano-Bicocca, Italy"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"Machine Perception Laboratory, University of California, San Diego"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+University of Cagliari
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"Baidu Research, USA 3John Hopkins University"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Vision Laboratory, The University of Nottingham"
+"L3S Research Center, Hannover, Germany"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"he University of Hong Kong, Pokfulam"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"image being generated by the model, include Active Appearance"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"Queen Mary, University of London"
+University of Sfax
+SUS college of Engineering and Technology
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+College of Information Science and Engineering
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Institute for Neural Computation, University of California, San Diego"
+"Proto Labs, Inc"
+Institute of Psychology and Behavioral Sciences
+ATR Interpreting Telecommunications Research Laboratories
+Fraunhofer Institute for Integrated Circuits IIS
+"Chalmers University of Technology, SAFER"
+"Computer Vision Research Group, COMSATS Institute of Information"
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+"School of Computer Science, University of Lincoln, U.K"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+Language Technologies Institute
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Birkbeck College, University of London"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+Nokia Bell Labs and University of Oxford
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"VISLab, EBUII-216, University of California Riverside"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Assiut University, Assiut 71515, Egypt"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Computer Science Division, The Open University of Israel"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Facebook AI Research, 2Dartmouth College"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+Thesis. Rochester Institute of Technology. Accessed from
+cid:63)Stanford University
+"The Robotics Institute, Carnegie Mellon University"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+State University of Feira de Santana (UEFS
+University of Siegen
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+"USC IRIS Lab, University of Southern California"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"EEMCS, University of Twente, Netherlands"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"Electrical and Computer Engineering, The University of Memphis"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"University Health Board, Swansea, United Kingdom"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Governance, Keio University"
+"cid:63)Queen Mary University of London, Imperial College London"
+"RGPV University, Indore"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Honda Research Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+Sarhad University of Science and Information Technology
+"University of Twente, EEMCS, Netherlands"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Center for Arti cial Vision Research, Korea University"
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Technical University of Munich, Germany"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Laboratory, University of Houston, Houston, TX, USA"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+Institute of Deep Learning
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"FI-90014 University of Oulu, Finland"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Arts Media and Engineering, Arizona State University"
+University of Beira Interior
+"Institute of Biochemistry, University of Balochistan, Quetta"
+Ho Chi Minh City University of
+"Asia University, Taichung, Taiwan"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+Facebook 4Texas AandM University 5IBM Research
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+yAristotle University of Thessaloniki
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"CVSSP, University of Surrey"
+ShahidBeheshti University
+"Electronics Engineering, National Institute of Technical Teachers"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+"Institute for Electronics, Signal Processing and Communications"
+National Institute of Development Administration
+"EEMCS, University of Twente, The Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+Dietrich College of Humanities and Social Sciences
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Center for Automation Research, University of Maryland"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+University of Science and Technology Beijing
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+Nam k Kemal University
+University of Colorado at Colorado Springs
+University of Freiburg
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+Interactive and Digital Media Institute
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+National Institute of Advanced Industrial
+USC Institute for Creative Technologies
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+Institute for Vision Systems Engineering
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+eBay Research Labs
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+Howard Hughes Medical Institute (HHMI
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"and Modeling, Rutgers University"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"National Institute of Informatics, Japan"
+"Charotar University of Science and Technology, Changa, India"
+UniversityofMaryland
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"J. P. College of Engineering, India"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+Technical University of Kaiserslautern
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"learning. As a result of this research, many applications, including video surveillance systems"
+Taizhou University
+"Goldsmiths, University of London, London, UK"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"P.A. College of Engnineering, Mangalore"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Computer Science Division, The Open University of Israel, Israel"
+Achariya college of Engineering Technology
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+High Institute of Medical Technologies
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+Sun Yat-Sen (Zhongshan) University
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"Institute for Infocomm Research, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"Xi an Jiaotong University, China"
+"North Dakota State University, Fargo, ND58105, USA"
+University of Twente 2Dublin City University 3Oxford University
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"Queen Mary, University of London, E1 4NS, UK"
+University of Wollongong. For further information contact the UOW
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+College of Computer Science and Technology
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+Gangnung-Wonju National University
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"cid:2)Imperial College London, U.K"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"Friedrich Schiller University, D-07740 Jena"
+Mahatma Gandhi Institute of Technology
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+B.S. University of Indonesia
+"University of T ubingen, T ubingen, Germany"
+"School of Computer Science, The University of Adelaide, Australia"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+Honda Research Institute USA
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"University of Nottingham, UK, School of Computer Science"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Sogang University, Seoul 121-742, Republic of Korea"
+Imperial College London / Twente University
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"SRV Engineering College, sembodai, india"
+Central Mechanical Engineering Research Institute
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"CVSSP, University of Surrey, UK"
+Sanghvi Institute of Management and Science
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+College of Information and Electrical Engineering
+"aResearch Scholar, Anna University, Chennai, Inida"
+"School of Computer Science, Tianjin University"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+University of California at San Diego
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"DIEI, University of Perugia, Italy"
+"Michigan State University, 3115 Engineering Building"
+"Publication details, including instructions for authors and subscription"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"Jawaharlal Technological University, Anantapur"
+SAMSI and Duke University
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"SRV Engineering College, sembodai, india"
+"Institute of Computing Technology, CAS"
+Institute of control science and engineering
+National Institute of Informatics
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+Institute for Robotics and Intelligent Systems
+St. Anne s College
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"J. P. College of Engineering, India"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+SUS college of Engineering and Technology
+"methods, including graph matching, optical- ow-based"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"Michigan State University, NEC Laboratories America"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+M. Mark Everingham University of Leeds
+University of California at San Diego
+IBM T.J. Watson Research Center
+"Rensselaer Polytechnic Institute, 110 Eighth Street, Troy, NY 12180 USA"
+"LCSEE, West Virginia University"
+"e ects of di erence factors, including age group, age gap"
+"CRCV, University of Central Florida"
+"VHNSN College, Virudhunagar, ANJA College"
+Institute for Robotics and Intelligent
+"University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada"
+ICSI / UC Berkeley 2Brigham Young University
+"Microsystems Design Lab, The Pennsylvania State University"
+"Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Human Language Technology and Pattern Recognition Group, RWTH Aachen University, Germany"
+"UMIACS, University of Maryland, College Park, USA"
+"R.C.Patel Institute of Technology, Shirpur, Dist.Dhule.Maharashtra, India"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL, USA"
+"Center for Research in Computer Vision, University of Central Florida"
+University of Siegen
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"subsection a table summarizing the key features of the database is provided, including (where available) the number of"
+"USC IRIS Lab, University of Southern California"
+"bTsinghua University, Beijing, China"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+Interactive and Digital Media Institute
+"North China University of Technology, Beijing 100144 CHINA"
+"University of Vigo, Spain"
+"Electronics And Communication Engg., Adhiyamaan College of Engg., Hosur, (India"
+"cFaculty of Electrical Engineering, Mathematics, and Computer Science, University of Twente, The Netherlands"
+"Kingston University London, University of Westminster London"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, FL"
+"University Bourgogne Franche-Comt , France"
+"methods, including graph matching, optical- ow-based"
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"B. Eng., Zhejiang University"
+"Research Scholar, PSGR Krishnammal College for Women, Coimbatore"
+"he University of Hong Kong, Pokfulam"
+"Rutgers, The State University of New Jersey, 508 CoRE, 94 Brett Rd, Piscataway, NJ"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"Governance, Keio University"
+"Goldsmiths, University of London, UK"
+Honda Fundamental Research Labs
+"CVIP Lab, University of Louisville, Louisville, KY 40292, USA"
+"The Big Data Research Center, Henan University, Kaifeng 475001, China"
+"Rochester Human-Computer Interaction (ROC HCI), University of Rochester, NY"
+"Machine Vision Lab, Faculty of Environment and Technology, University of the West of England"
+"Visual Geometry Group, University of Oxford, Oxford UK"
+"gelmeyer et al., 1996); and, increasingly, its role in reactions to"
+"CMR Institute of Technology, Hyderabad, (India"
+M.S. University of Central Florida
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"Toyota Research Institute, Cambridge, MA 2 University of Michigan, Ann Arbor, MI"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Tel-Aviv, Israel"
+The open University of Israel. 2Adience
+Cornell University 2 Cornell Tech
+"School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran"
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+Sarhad University of Science and Information Technology
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, 100190, China"
+USC Information Sciences Institute
+"Graduate School of Engineering, Kobe University, Kobe, 657-8501, Japan"
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, P. R. China"
+"School of EEE, Nanyang Technological University, Singapore"
+"School of Computing, National University of Singapore, Singapore"
+"Islamic Azad University, Gonabad, Iran"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced"
+"Aditya Institute of Technology And Management, Tekkali, Srikakulam, Andhra Pradesh"
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"Centre for Quantum Computation and Intelligent Systems, Faculty of Engineering and IT, University of"
+"College of Computer Science and Information Technology, Central South University of Forestry and Technology, Hunan 410004, China"
+"Grif th University, QLD, Australia"
+"Rutgers, The State University of New Jersey, 723 CoRE, 94 Brett Rd, Piscataway, NJ"
+"School of Information and Control Engineering, Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"Institute of Mental Health, Peking University, P.R. China"
+"National Taichung University of Science and Technology, Taichung, Taiwan, R.O.C"
+"School of ICE, Beijing University of Posts and Telecommunications, Beijing, China"
+"Faculty of Engineering Building, University of Malaya, 50603 Kuala Lumpur, Malaysia"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"School of Electronic and Information Engineering, South China University of Technology"
+"College of Information, Yunnan Normal University, Kunming, China"
+"King Saud University, KSA"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Hindusthan College of Engineering and Technology, Coimbatore, India"
+"Institute of Automation, Chinese Academy of Sciences"
+"College of Computer and Information Engineering, Tianjin Normal University, Tianjin 300387, China"
+"University of Zagreb, Faculty of Electrical Engineering and Computing"
+"DIEI, University of Perugia, Italy"
+"National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce"
+"Beckman Institute, University of Illinois at Urbana-Champaign, USA"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"Final Year, PanimalarInstitute of Technology"
+"LIACS Media Lab, Leiden University, The Netherlands"
+"The Robotics Inistitute, Carnegie Mellon University"
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"Jet Propulsion Laboratory, California Institute of Technology, Pasadena, CA"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+Bangladesh University of Engineering and Technology(BUET
+"School of Software, Sun Yat-sen University, China"
+"Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria"
+Acharya Institute Of Technology
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Graduate School of System Informatics, Kobe University"
+"School of Mathematics and Statistics, Xi an Jiaotong University, Xi an, China"
+"Sendai National College of Technology, Natori, Japan"
+College of Engineering and Mineral Resources
+"aTurgut Ozal University, Ankara Turkey"
+"VISLab, EBUII-216, University of California Riverside"
+"Moscow State University, dept. of Computational Mathematics and Cybernetics"
+"Annamacharya Institute of Technology and Sciences, Tirupati, India"
+"Sri Manakula Vinayagar Engineering College, Pondicherry"
+"College of Computer and Information Science, Northeastern University, Boston, USA"
+"gies (Bughin et al. 2017). A range of other sectors, includ"
+Huazhong Agricultural University
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Rayalaseema University Kurnool, Andhra Pradesh"
+"IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands"
+"University of Genoa, Italy"
+ShahidBeheshti University
+"University B.D.T.College of Engineering, Visvesvaraya"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"IBM T. J. Watson Research Center, PO Box 704, Yorktown Heights, NY"
+"Graduate Institute of Networking and Multimedia, National Taiwan University"
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"School of Computer and Information Science, Chongqing Normal University 401331, China"
+Queen s University Belfast
+"Institute of Automation, Chinese Academy of Sciences; 2Miscrosoft Research Asian; 3Media School"
+ICMC University of S ao Paulo
+comparisons with 12 instance-based classi ers on 13 benchmark University of California Irvine
+"Faculty of Electrical Engineering, University of Ljubljana"
+"Concordia University, Computer Science and Software Engineering, Montr eal, Qu ebec, Canada"
+"Shanghai Key Lab of Intelligent Information Processing, School of Computer Science, Fudan University, China"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University"
+"Information Sciences Institute, USC, CA, USA"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Arti cial Intelligence Institute, China"
+"Intelligent Systems Laboratory, Halmstad University, Halmstad, Sweden"
+"Computer Science and Engineering, Michigan State University, East Lansing, USA"
+"Institute for Infocomm Research, A*STAR"
+Federal University of Bahia (UFBA
+"Google, Inc"
+Tripura University (A Central University
+"School of Medicine, Shenzhen University, Shenzhen 518060, China"
+"University of T ubingen, T ubingen, Germany"
+"Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India"
+"aCollege of Computer Science at Chongqing University, 400044, Chongqing, P.R.C"
+"University Street, Montral, QC H3A 0E9, Canada"
+"University of Bonn, Roemerstrasse 164, 53117 Bonn, Germany"
+"University of Amsterdam, University of Trento, Italy"
+"of Maryland, College Park, MD 20742, USA"
+"Beckman Institute, University of Illinois at Urbana-Champaign, Urbana, IL 61801, USA"
+"2Program in Neuroscience, and 3Rotman Research Institute, University of Toronto, Toronto, Ontario M5S 3G3, Canada"
+"Islamic Azad University, Shahrood, Iran"
+"Idiap Research Institute and EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Universit Paris-Saclay"
+"M.Tech Student, Mount Zion College of Engineering, Pathanamthitta, Kerala, India"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+Odaiyappa College of
+"Proto Labs, Inc"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"GIT Vision Lab, http://vision.gyte.edu.tr/, Gebze Institute of Technology"
+"Electrical and Computer Engineering, The University of Memphis"
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+"Visual Analysis of People Lab, Aalborg University, Denmark"
+"School of Games, Hongik University, Seoul, Korea"
+"B.Eng., Nankai University"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, The"
+"obtained for all other uses, in any current or future media, including reprinting/republishing"
+"College of Computer Science and Technology, Zhejiang University, China"
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"Key Laboratory of Behavior Sciences, Institute of Psychology"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+"College of Engineering, Mathematics and Physical Sciences"
+"Australian National University, 2CVLab, EPFL, Switzerland, 3Smart Vision Systems, CSIRO"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+"Korea University, Seoul 136-713, Korea"
+"Engineering, National Formosa University"
+VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College
+"Harbin Institute of Technology, School of Computer Science and Technology"
+"Figure 1: A few results from our VRN - Guided method, on a full range of pose, including large expressions"
+"and Engineering, Beihang University, Beijing, China"
+"School of Computer Science, University of Nottingham"
+"RTM Nagpur University, Campus Nagpur, (MS)-India"
+"Wenzhou University, China"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+Tokyo Polytechnic University
+"College of Software Engineering, Southeast University, Nanjing 210096, China"
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"b The Interdisciplinary Center for Research on Emotions, University of"
+"Asia University, Taichung, Taiwan"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+UniversityofMaryland
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+Computer Vision and Robotics Research Laboratory
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu"
+"Computer Graphics Research Group, University of Freiburg, Freiburg, Germany"
+"CISE, University of Florida, Gainesville, FL"
+Institute for Numerical Mathematics
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+"Rutgers, The State University of New Jersey"
+"Schreiber Building, room 103, Tel Aviv University, P.O.B. 39040, Ramat Aviv, Tel Aviv"
+Savitribai Phule Pune University
+"School of Computer Science, Carnegie Mellon University, PA 15213, USA"
+The Allen Institute for AI
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+Institute of Arti cial Intelligence and Cognitive Engineering
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Copyright c(cid:3) 2017 The Institute of Electronics, Information and Communication Engineers"
+"Eastern Mediterranean University, Gazima usa, Northern Cyprus"
+in the Graduate School of Duke University
+"uses, in any current or future media, including"
+"School of Mathematical Sciences, Dalian University of Technology, Linggong Rd. 2, Dalian"
+"College of Computer Science, Chongqing University, Chongqing, 400030, China"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"F.Ferraro, University of Rochester"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford GU2 7XH, UK"
+"College Road East, Princeton, NJ"
+"School of Computer Science and Software Engineering, Shenzhen University, Nanhai Ave 3688, Shenzhen"
+"Queen Mary College, London"
+"Shenzhen Institutes of Advanced Technology, CAS, China"
+"SSESA, Science College, Congress Nagar, Nagpur, (MS)-India"
+"The Institute of Scienti c and Industrial Research, Osaka University"
+"College of Information Science and Engineering, Ocean University of China, Qingdao, China"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+Informatics and Telematics Institute
+"Computer Science and Software Engineering, The University of Western Australia"
+"University of Nottingham, UK, School of Computer Science"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"School of Computer Science, South China Normal University, China"
+"Ph.D student Zaid Shhedi, Doctoral School of Automatic Control and Computers, University"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"Computer Vision Laboratory, The University of Nottingham"
+"Computer Vision Laboratory, Link oping University, Sweden"
+"D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University"
+SUS college of Engineering and Technology
+"Medical School, University of Ioannina, Ioannina, Greece"
+"Machine Vision Group, P.O. Box 4500, FI-90014, University of Oulu, Finland"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada; E-Mail"
+"Research Center for Learning Science, Southeast University, China"
+cThe Open University
+Dhanalakshmi Srinivasan College of Engineering
+"Arts, Science and Commerce College, Chopda"
+c(cid:13) Carnegie Mellon University
+"Bharti Vidyapeeth Deemed University, Pune, India"
+"The University of Queensland, School of ITEE"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"St.Joseph s College of Engineering, Old Mamallapuram Road, Kamaraj Nagar, Semmencherry, Chennai"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"College of Electronic Science and Engineering, National University of Defense Technology, Changsha, China"
+"Uber Advanced Technologies Group, 5Vector Institute"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"Computer Science Division, The Open University of Israel"
+"Computer Science and Engineering, University of Washington, Seattle, WA"
+"Link oping University, Computer Vision Laboratory"
+"Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong University, Shanghai, China"
+"Polytechnic Institute of NYU, NY, USA"
+"School of Mathematical Sciences, Monash University, VIC 3800, Australia"
+Institute for Information Systems Engineering
+"Allen Institute for Arti cial Intelligence (AI2), Seattle, WA"
+"Institute for Studies in Fundamental Sciences (IPM), Tehran, Iran"
+"University of Science, VNU-HCM, Viet Nam"
+"Gdansk University of Technology, Faculty of Electronics, Telecommunication"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+"Computer Science and Electrical Engineering, West Virginia University, Morgantown, USA"
+"UG student, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"Computer and Systems Engineering, Rensselaer Polytechnic Institute"
+"Informatization Office, National University of Defense Technology, Changsha 410073, China"
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Sri Sunflower College of Engineering and Technology, Lankapalli"
+Cornell University 2Eastman Kodak Company
+"University of Maryland, Center for Automation Research"
+"Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos"
+"Center for Brain Science, Harvard University, Cambridge, MA, USA"
+"Computer Science and Engineering, University of Washington"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam, The Netherlands"
+"M.Tech Student, SSG Engineering College, Odisha, India"
+Mihaylo College of Business and Economics
+"Johns Hopkins University, Center for Speech and Language Processing"
+"Institute for Disease Modeling, Intellectual Ventures Laboratory, Bellevue, WA 98004, United States"
+Human Interaction Research Lab
+Courant Institute and Google Research
+"State University of New York at Binghamton, Binghamton, NY"
+"University of Maryland Institute for Advanced Computer Studies, College Park, MD"
+"Augsburg University, Germany"
+Doctor of Philosophy of University College London
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, Delft University of"
+"National Institute of Technology, Toyota College, Japan"
+"Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master"
+"Research School of Computer Science, The Australian National University, ACT 2601, Australia"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"our analysis to stereotypes beyond gender, including those"
+"a The Robotics Institute, Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+Boston University / **Rutgers University / ***Gallaudet University
+CVSSP University of Surrey
+"the face, including negative affect and distress, dates"
+University of Sfax
+at the Delft University of Technology
+"R. Campellone, 3210 Tolman Hall, University of California, Berkeley"
+"College of Computing, Georgia Institute of Technology, Atlanta, GA, USA"
+"National University of Singapore, 2Shanghai Jiao Tong University"
+University of Beira Interior
+William Marsh Rice University
+Sun Yat-Sen (Zhongshan) University
+"Psychology and Psychiatry, University of Pittsburgh, USA"
+"Collage of Sciences, Baghdad University, Iraq"
+"School of Computer and Information, Hefei University of Technology, China"
+"University of Nottingham, Ningbo, China"
+"Kodak Research Laboratories, Rochester, NY"
+"School of IoT Engineering, Jiangnan University, Wuxi 214122, China"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"Center for Automation Research, University of Maryland, College Park, MD"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"Education, Yunnan Normal University, Kunming, China"
+"University of California, San Diego 2 Carnegie Mellon University"
+"Institute of Cognitive and Behavioural Neuroscience, SWPS University of Social"
+"Chandigarh University, Gharuan, Punjab, India"
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"Computer Vision Lab, Delft University of Technology"
+"Bioinformatics Institute, A*STAR, Singapore"
+"of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science"
+"University of Maryland, CFAR"
+"DPDCE, University IUAV"
+"University of Nevada at Reno, USA"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"University of Bari, Bari, Italy"
+Alan W Black (Carnegie Mellon University
+"Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United"
+Hong Kong Applied Science and Technology Research Institute Company Limited
+"Center for Arti cial Vision Research, Korea University"
+National Institute of Development Administration
+"School of Information Technology and Engineering, University of Ottawa, Ontario, Canada"
+"Human Development and Applied Psychology, University of Toronto, Ontario, Canada"
+"College of Information Science and Engineering, Xinjiang University"
+"National Institute of Informatics, Japan"
+"School of Engineering, University of Guelph"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+Azad University of Qazvin
+"Wenzhou University, Wenzhou, China"
+"Language Technologies Institute, School of Computer Science"
+"KTH Royal Institute of Technology, CVAP Lab, Stockholm, Sweden"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+Institute of Psychology and Behavioral Sciences
+"Grad. School of Information Science and Technology, The University of Tokyo, Japan"
+"Faculty of ETI, Gdansk University of Technology, Gdansk, Poland"
+"Baidu Research, USA 3John Hopkins University"
+"Graduate School of Informatics, Kyoto University"
+"DISI, University of Trento, Italy"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Technical University of Munich, Germany"
+College of Computer and Information Sciences
+"Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"The authors are with the Delft University of Technology, Data and Knowl"
+"Vickram College of Engineering, Enathi, Tamil Nadu, India"
+Computer Vision Laboratory. University of Nottingham
+"Institute of Computer Science, Foundation for Research and Technology - Hellas (FORTH), Crete, 73100, Greece"
+"Biometric Research Center, The Hong Kong Polytechnic University"
+"Smart Network System Institute, Institute for Information Industry"
+"School of Physics and Engineering, Sun Yat-Sen University, Guangzhou, China, 2 School of Information"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"Central Washington University, 400 E. University Way, Ellensburg, WA 98926, USA"
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+The University of Shef eld
+"School of Computer Science, Fudan University, Shanghai, 200433, China"
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+ment. Oxford University Press Series in Affective Science. New York: Oxford
+"University of California, Berkeley1 Adobe"
+Institute of Graduate Studies and Research
+"University of S ao Paulo, S ao Paulo, Brazil"
+"Vision and Security Technology Lab, University of Colorado Colorado Springs"
+"B.S., Computer Engineering, Bo gazi ci University"
+"Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"Key Lab of Intelligent Information Processing, Institute of Computing Technology"
+"P A College of Engineering, Nadupadavu"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen"
+Submitted to the Senate of the Hebrew University
+DISI - University of Trento
+"Salgado de Oliveira University, Brazil"
+"Faculty of Information Science and Technology, Multimedia University, 75450 Melaka, Malaysia"
+"Helsinki Institute for Information Technology, Aalto University, Finland"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Centre for Bioinformatics, Biomarker Discovery and Information-Based Medicine, The University of Newcastle, Callaghan, Australia, 2 Departamento de Engenharia de"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+Institute of Electrical and Electronics Engineers
+"Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Friedrich Schiller University, D-07740 Jena"
+in the College of Engineering and Computer Science
+"Cornell University, Ithaca, NY, U.S.A"
+"Helsinki Collegium for Advanced Studies, University of Helsinki, Finland"
+"University of Tampere, Kanslerinnrinne 1, 33014, Tampere, Finland"
+Link to publication record in Queen's University Belfast Research Portal
+University of Illinois at Urbana-Champaign 2Adobe Research
+"Computer Engineering, Faculty of Engineering, Kharazmi University of Tehran, Tehran, Iran"
+"CISUC, University of Coimbra"
+"DUT-RU International School of Information Science and Engineering, Dalian University of Technology, Dalian, China"
+"Gri th University, QLD-4111, Brisbane, Australia"
+"Research Center for Information Technology Innovation, Academia Sinica"
+Sabanc University
+"Samsung Advanced Institute of Technology (SAIT), KAIST"
+University of Caen Basse-Normandie
+"Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD"
+"Institute of Neural Information Processing, Ulm University, Ulm, Germany"
+"Indraprastha Institute of Information Technology, Delhi"
+"Other uses, including reproduction and distribution, or selling or"
+"Shanghai Jiao Tong University, CloudWalk Technology"
+"Center for Information and Neural Networks, National Institute of Information and Communications Technology (NICT"
+"Vision and Security Technology Lab, University of Colorado at Colorado Springs, Colorado"
+"Education, Yunnan NormalUniversity, Kunming, China2. College of Information, Yunnan"
+"b School of Applied Mathematics, Xidian University, Xi an, China"
+"Bio-Computing Research Center, Shenzhen Graduate School, Harbin Institute of Technology, Shenzhen, Guangdong Province, China, 2 Key Laboratory of Network"
+"School of Computer Science and Technology, Tianjin University"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"University Health Board, Swansea, United Kingdom"
+"University of Colorado, Colorado Springs"
+University of Washington 4The Allen Institute for AI
+"School of Computer Science and Technology, University of Science and Technology of China"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+University of West Bohemia
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+College of Information and Control Engineering in China University of Petroleum
+"University of Business Agriculture and Technology, Dhaka-1230, Bangladesh"
+"China, 2 School of Computer Science and Engineering, Nanjing University of Science and Technology"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"St. Xavier s Catholic College of Engineering, India"
+"c School of Computational Science, Florida State University, Tallahassee, FL 32306, USA"
+"Beijing Laboratory of IIT, School of Computer Science, Beijing Institute of Technology, Beijing, China"
+"Southern Illinois University, Carbondale, IL 62901 USA"
+The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved
+"MICC, University of Florence"
+"IN3, Open University of"
+"Charotar University of Science and Technology, Changa, India"
+"Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab"
+"Shri Shivaji College, Parbhani, M.S, India"
+"Center for Intelligent Machines, McGill University, 3480 University Street, Montreal, Canada H3A 2A"
+"bRobotics Institute, Carnegie Mellon University, Pittsburgh, PA 15213, U.S.A"
+"and quantify distinct social behaviors, including those involving"
+"Pattern Recognition and Bio-informatics Laboratory, Delft University of Technology, THE NETHERLANDS"
+"Student, Amal Jyothi College of Engineering, Kanjirappally, India"
+"School of Computer Science, University of Windsor, Windsor, ON, Canada N9B 3P"
+"PSGR Krishnammal College for Women, Coimbatore"
diff --git a/scraper/reports/institutions_not_found/not-found-2.csv b/scraper/reports/institutions_not_found/not-found-2.csv
new file mode 100644
index 00000000..f845f7b5
--- /dev/null
+++ b/scraper/reports/institutions_not_found/not-found-2.csv
@@ -0,0 +1,876 @@
+"MIRACL-FS, University of Sfax"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"Deparment of Computing, Imperial College London, UK"
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"School of Computer Science, University of Birmingham, UK"
+Stevens Institute of Technology Adobe Systems Inc
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+Tomas Bata University in Zl n
+"School of Computing and Info. Sciences, Florida International University"
+"Numediart Institute, University of Mons"
+in the College of Engineering and Computer Science
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+The University of Queensland in
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+M. Mark Everingham University of Leeds
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"School of Computer Science, Fudan University, Shanghai, China"
+"BECS, Aalto University, Helsinki, Finland"
+"Beihang University 2Gri th University 3University of York, UK"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+Sabanc University
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"School of Computer Science, University of Nottingham"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"University at Buffalo, SUNY"
+"University of Maryland, Center for Automation Research"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"IslamicAzad University, Qazvin, Iran"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+This article was downloaded from Harvard University s DASH
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"DISI, University of Trento, Italy"
+College of Information Engineering
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+University of California at Berkeley / ICSI
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+Honda Fundamental Research Labs
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"University of Zaragoza, Spain"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"Grif th University, QLD, Australia"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"Asian University, Taichung, Taiwan"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Sapienza University of Rome, Italy"
+"University of Milano-Bicocca, Italy"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"Machine Perception Laboratory, University of California, San Diego"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+University of Cagliari
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"Baidu Research, USA 3John Hopkins University"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Vision Laboratory, The University of Nottingham"
+"L3S Research Center, Hannover, Germany"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"he University of Hong Kong, Pokfulam"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"image being generated by the model, include Active Appearance"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"Queen Mary, University of London"
+University of Sfax
+SUS college of Engineering and Technology
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+College of Information Science and Engineering
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Institute for Neural Computation, University of California, San Diego"
+"Proto Labs, Inc"
+Institute of Psychology and Behavioral Sciences
+ATR Interpreting Telecommunications Research Laboratories
+Fraunhofer Institute for Integrated Circuits IIS
+"Chalmers University of Technology, SAFER"
+"Computer Vision Research Group, COMSATS Institute of Information"
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+"School of Computer Science, University of Lincoln, U.K"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+Language Technologies Institute
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Birkbeck College, University of London"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+Nokia Bell Labs and University of Oxford
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"VISLab, EBUII-216, University of California Riverside"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Assiut University, Assiut 71515, Egypt"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Computer Science Division, The Open University of Israel"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Facebook AI Research, 2Dartmouth College"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+Thesis. Rochester Institute of Technology. Accessed from
+cid:63)Stanford University
+"The Robotics Institute, Carnegie Mellon University"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+State University of Feira de Santana (UEFS
+University of Siegen
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+"USC IRIS Lab, University of Southern California"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"EEMCS, University of Twente, Netherlands"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"Electrical and Computer Engineering, The University of Memphis"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"University Health Board, Swansea, United Kingdom"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Governance, Keio University"
+"cid:63)Queen Mary University of London, Imperial College London"
+"RGPV University, Indore"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Honda Research Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+Sarhad University of Science and Information Technology
+"University of Twente, EEMCS, Netherlands"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Center for Arti cial Vision Research, Korea University"
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Technical University of Munich, Germany"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Laboratory, University of Houston, Houston, TX, USA"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+Institute of Deep Learning
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"FI-90014 University of Oulu, Finland"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Arts Media and Engineering, Arizona State University"
+University of Beira Interior
+"Institute of Biochemistry, University of Balochistan, Quetta"
+Ho Chi Minh City University of
+"Asia University, Taichung, Taiwan"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+Facebook 4Texas AandM University 5IBM Research
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+yAristotle University of Thessaloniki
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"CVSSP, University of Surrey"
+ShahidBeheshti University
+"Electronics Engineering, National Institute of Technical Teachers"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+"Institute for Electronics, Signal Processing and Communications"
+National Institute of Development Administration
+"EEMCS, University of Twente, The Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+Dietrich College of Humanities and Social Sciences
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Center for Automation Research, University of Maryland"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+University of Science and Technology Beijing
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+Nam k Kemal University
+University of Colorado at Colorado Springs
+University of Freiburg
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+Interactive and Digital Media Institute
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+National Institute of Advanced Industrial
+USC Institute for Creative Technologies
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+Institute for Vision Systems Engineering
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+eBay Research Labs
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+Howard Hughes Medical Institute (HHMI
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"and Modeling, Rutgers University"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"National Institute of Informatics, Japan"
+"Charotar University of Science and Technology, Changa, India"
+UniversityofMaryland
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"J. P. College of Engineering, India"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+Technical University of Kaiserslautern
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"learning. As a result of this research, many applications, including video surveillance systems"
+Taizhou University
+"Goldsmiths, University of London, London, UK"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"P.A. College of Engnineering, Mangalore"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Computer Science Division, The Open University of Israel, Israel"
+Achariya college of Engineering Technology
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+High Institute of Medical Technologies
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+Sun Yat-Sen (Zhongshan) University
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"Institute for Infocomm Research, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"Xi an Jiaotong University, China"
+"North Dakota State University, Fargo, ND58105, USA"
+University of Twente 2Dublin City University 3Oxford University
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"Queen Mary, University of London, E1 4NS, UK"
+University of Wollongong. For further information contact the UOW
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+College of Computer Science and Technology
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+Gangnung-Wonju National University
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"cid:2)Imperial College London, U.K"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"Friedrich Schiller University, D-07740 Jena"
+Mahatma Gandhi Institute of Technology
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+B.S. University of Indonesia
+"University of T ubingen, T ubingen, Germany"
+"School of Computer Science, The University of Adelaide, Australia"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+Honda Research Institute USA
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"University of Nottingham, UK, School of Computer Science"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Sogang University, Seoul 121-742, Republic of Korea"
+Imperial College London / Twente University
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"SRV Engineering College, sembodai, india"
+Central Mechanical Engineering Research Institute
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"CVSSP, University of Surrey, UK"
+Sanghvi Institute of Management and Science
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+College of Information and Electrical Engineering
+"aResearch Scholar, Anna University, Chennai, Inida"
+"School of Computer Science, Tianjin University"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+University of California at San Diego
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"DIEI, University of Perugia, Italy"
+"Michigan State University, 3115 Engineering Building"
+"Publication details, including instructions for authors and subscription"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"Jawaharlal Technological University, Anantapur"
+SAMSI and Duke University
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"SRV Engineering College, sembodai, india"
+"Institute of Computing Technology, CAS"
+Institute of control science and engineering
+National Institute of Informatics
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+Institute for Robotics and Intelligent Systems
+St. Anne s College
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"J. P. College of Engineering, India"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+SUS college of Engineering and Technology
+"methods, including graph matching, optical- ow-based"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"Michigan State University, NEC Laboratories America"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+M. Mark Everingham University of Leeds
+University of California at San Diego
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Cornell University, Ithaca, NY, U.S.A"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+University of Sfax
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Education, Yunnan Normal University, Kunming, China"
+"Arti cial Intelligence Institute, China"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"Imperial College London, On do"
+Xerox Research Center Webster
+"University of Minnesota-Twin Cities, Minneapolis"
+"School of Computer Science and Technology, Harbin Institute of Technology, China"
+"Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College"
+"Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China"
+"Shanghai Institute of Applied Physics, Chinese Academy of Sciences"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China"
+TechnicalUniversityofDenmark
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"School of Computer Science and Technology, Shandong University"
+"P.S.R Engineering College, Sivakasi, Tamilnadu, India"
+"Institute Polythechnic of Leiria, Portugal"
+"Institute for Human-Machine Communication, Technische Universit at M unchen, Germany"
+DVMM Lab - Columbia University
+cid:1)Institute for Neural Computation
+years. According to the definition by the National Institute
+Vietnam National University of Agriculture
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+"Information Systems Design, Doshisha University, Kyoto, Japan"
+"Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India"
+University of Cagliari
+Federal University of Para ba
+"Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+"Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT"
+"b Computer Technology Institute, Beijing Union University, 100101, China"
+"University Station C0500, Austin TX 78712, USA"
+"Graduate School of Informatics, Kyoto University, Kyoto 606-8501, Japan"
+"Institute of Automation, Chinese Academy of Sciences, China"
+Vietnam National University of Agriculture
+University of Lac Hong 10 Huynh Van Nghe
+"Nanyang Technological University, 2University of California San Diego"
+"National Cheng Kung University, Tainan, Taiwan, ROC"
+University of Cagliari
+"German Research Center for Arti cial Intelligence (DFKI), Kaiserslautern, Germany"
+"PES Institute of Technology, Bangalore, Karnataka, India"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+Intelligence Computing Research Center
+"Technical University of Ostrava, FEECS"
+"Much is known on how facial expressions of emotion are produced, including which individual muscles are most active in"
+"Institute for Medical Engineering Science, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+"Image and Video Laboratory, Queensland University of Technology (QUT), Brisbane, QLD, Australia"
+"School of Information Technology and Management, University of International"
+"Gokaraju Rangaraju Institute of Engineering and Technology, Hyd"
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+Polytechnic University of Bucharest
+"Scienti c Visualization and Computer Graphics, University of Groningen, Nijenborgh 9, Groningen, The Netherlands"
+"University of California, Santa Cruz"
+"b Computer Technology Institute, Beijing Union University, 100101, China"
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"School of Computing Science, Simon Fraser University, Burnaby, B.C., Canada"
+"Faculty of Computers and Information, Cairo University, Cairo, Egypt"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China"
+"University, Taiwan, R.O.C"
+"Beckman Institute, University of Illinois at Urbana-Champaign, IL USA"
+Dietrich College Honors Theses
+"Institute AIFB, Karlsruhe Institute of Technology, Germany"
+St. Anne s College
+"FI-90014 University of Oulu, Finland"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742, USA"
+"Centre for Intelligent Machines, McGill University, Montreal, Canada"
+"University of Milano-Bicocca, Italy"
+"Michigan State University, 3115 Engineering Building"
+"Aristotle University of Thessaloniki, Thessaloniki 541 24, Greece"
+"B.Tech (C.S.E), Bharath University, Chennai"
+"Address correspondence to: Karen L. Schmidt, University of"
+"School of Computer Science and Engineering, Southeast University, Nanjing 210096, China"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"Graduate School of Doshisha University, Kyoto, Japan"
+"EEMCS, University of Twente"
+"The Hong Kong Polytechnic University, Hong Kong, SAR, 2University of Technology Sydney, Australia"
+Compi`egne University of Technology
+"Information Systems Design, Doshisha University, Kyoto, Japan"
+"Human Centered Multimedia, Augsburg University, Germany"
+"School of Computing and Info. Sciences, Florida International University"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"Computer Science and Engineering, University of Washington, Seattle, WA, USA"
+"Mangalore Institute of Engineering and Technology, Badaga"
+"University of Barcelona and Computer Vision Centre, Barcelona, Spain"
+"Information and Media Processing Research Laboratories, NEC Corporation"
+"cid:2)Imperial College London, U.K"
+cid:1)Institute for Neural Computation
+Opus College of Engineering
+"University of Cambridge, The Computer Laboratory, Cambridge CB3 0FD, U.K"
+"Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College"
+"Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India"
+"Human Media Interaction, University of Twente, P.O. Box"
+"Recognition, Institute of Automation, Chinese Academy of Sciences"
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+Kyung Hee University South of Korea
+"cid:93) Faculty of Science and Technology, University of Macau"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"Institute of Psychology, Chinese"
+Allen Institute for Arti cial Intelligence (AI
+"Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA"
+Tsinghua University 4SenseTime
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Electrical, Computer, Rensselaer Polytechnic Institute"
+"College of Computer Science, Zhejiang University, Zhejiang, China"
+"Language Technologies Institute, Carnegie Mellon University, PA, USA"
+"Imperial College, 180 Queens Gate"
+"MIRACL-FSEG, University of Sfax"
+"and Mathematical Biosciences Institute, The Ohio State University"
+Ministry of Higher Education and Scientific Research / The University of Mustsnsiriyah/Baghdad IRAQ
+"Leiden, the Netherlands, 3 Delft University of Technology"
+"MIRACL-FS, University of Sfax"
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Imperial College London, On do"
+"Institute of Media and Information Technology, Chiba University"
+"Facebook AI Research, 2Dartmouth College"
+Toyota Technological Institute at Chicago
+"Graduate School of Information Science and Technology, The University of Tokyo"
+"Recanati Genetic Institute, Rabin Medical Center and Schneider Children s Medical Center, Petah Tikva, Israel"
+"Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+"China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of"
+"Sensor-enhanced Social Media (SeSaMe) Centre, National University of Singapore, Singapore"
+"Computer Vision Group, Xerox Research Center Europe, Meylan, France"
+Nokia Bell Labs and University of Oxford
+cid:1) Honda Research Institute
+"Institute of Cognitive Neuroscience, University College London, London WC1N 3AR, UK. 2Affective Brain"
+"RIEB, Kobe University, Kobe, 657-8501, Japan"
+"Queen Mary, University of London, London E1 4NS, UK"
+St. Francis Institute of Technology
+AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of
+"Anjuman College of Engineering and Technology, Sadar, Nagpur, India"
+Electronics and Telecommunications Research Institute
+"Deva Ramanan, University of California at Irvine"
+"Sathyabama University, Chennai, India"
+"State Key Laboratory of Pulp and Paper Engineering, South China University of Technology, Guangzhou 510640, China"
+"National Key Laboratory for Novel Software Technology, Nanjing University, China"
+"School of Mathematics and Computer Science, Northeastern State University, Tahlequah, OK 74464, USA"
+Xerox Research Center Webster
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"B.S., E.E., Bo azi i University"
+in The University of Michigan
+"Kitware, Inc"
+University of Toronto and Recognyz Systems Technologies
+"IHCC, RSCS, CECS, Australian National University"
+"School of Software, Dalian University of Technology, Tuqiang St. 321, Dalian 116620, China"
+"Faculty of EEMCS, University of Twente, The Netherlands"
+"Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB"
+"Institute for Robotics and Intelligent Systems, USC, CA, USA"
+"MTech Student 1, 2, Disha Institute of"
+"Michigan State University, NEC Laboratories America"
+"MATS University, MATS School of Engineering and Technology, Arang, Raipur, India"
+"Head and Neck Surgery, Seoul National University"
+"cid:63) Imperial College London, UK"
+"Institute for Human-Machine Communication, Technische Universit at M unchen, Germany"
+"University of Maryland, College Park"
+"Visualization and Computer Vision Lab, GE Global Research Center"
+"University of California at Los Angeles, Los Angeles, CA, USA"
+"aResearch Scholar, Anna University, Chennai, Inida"
+"Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal"
+massachusetts institute of technology artificial intelligence laboratory
+University of Colorado at Colorado Springs
+National Institute of Informatics
+"School of Information Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"State Key Lab of CADandCG, Zhejiang University, Hangzhou, Zhejiang, China"
+"cid:93)Peking University Shenzhen Graduate School, Shenzhen, P.R.China"
+Downloaded from the University of Groningen/UMCG research database (Pure): http://www.rug.nl/research/portal. For technical reasons the
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia"
+"EEMCS, University of Twente, The Netherlands"
+This work was supported by Grant MOP102637 from the Canadian Institutes of Health Research to E.D.R. and the
+Queen's University Belfast - Research Portal
+"Graduate School of Science and Engineering, Saitama University"
+"QCIS, University of Technology Sydney, Sydney, Australia"
+at The Australian National University
+"Montreal Institute for Learning Algorithms, Universit e de Montr eal"
+"Sapienza University of Rome, Italy"
+Xidian University 2Xi an Jiaotong University 3Microsoft Research Asia
+"Academic Center for Computing and Media Studies, Kyoto University, Kyoto 606-8501, Japan"
+"Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India"
+ATR Interpreting Telecommunications Research Laboratories
+ATR Human Information Processing Research Laboratory
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"Institute for Human-Machine Communication, Technische Universit at M unchen"
+The University of Adelaide; and Australian Centre for Robotic Vision
+Sanghvi Institute of Management and Science
+"Chalmers University of Technology, SAFER"
+"general term, including collaboration. Interaction determines action on someone"
+Boston University Computer Science Technical Report No
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"School of Information Engineering, Nanchang University, China"
+Mahatma Gandhi Institute of Technology
+"Computer Science, Princeton University, Princeton, NJ, USA"
+by grants from the National Institute of Mental Health (MH 15279 and MH067976 (K. Schmidt
+"Innopolis University, Kazan, Russia"
+"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA"
+"Bo gazi ci University, Turkey"
+Honda Research Institute
+Electronic Engineering and Computer Science Queen Mary University of London
+"Graduate Institute of Electronics Engineering, National Taiwan University"
+"Informatics and Telematics Institute, Centre for Research and Technology Hellas"
+"Cambridge University, Trumpington Street, Cambridge CB21PZ, UK"
+"Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching"
+"School of Electronics and Information, Northwestern Polytechnical University, China"
+"Institute of Scienti c and Industrial Research, Osaka University, Ibaraki-shi 567-0047, Japan"
+"School of Computer Science and Technology, Shandong University"
+"Australian National University and NICTA, Australia"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu"
+"Faculty of Electrical Engineering, Czech Technical University in Prague"
+"Section of Pathology, Second University of Naples, Via L. Armanni"
+"Medical Image Analysis Lab, School of Computing Science, Simon Fraser University, Canada"
+"Psychology, University of Illinois, Beckman Institute, Urbana-Champaign, Illinois 61801, University of"
+"aSchool of Computing and Mathematics, Charles Sturt University, Bathurst, NSW"
+Signal Processing Institute
+"School of Computer Science and Technology, Harbin Institute of Technology, China"
+"RCC Institute of Information Technology, Kolkata, India"
+"Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan"
+Psychiatry at the University of Pittsburgh
+"USC Information Sciences Institute (ISI), Marina Del Rey, CA"
+Xerox Research Center India
+"The Robotics Institute, Carnegie Mellon University"
+"Numediart Institute, University of Mons"
+"Jilin University, Changchun 130012, China"
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Interdisciplinary Program of Bioengineering, Seoul National University, Seoul 03080, Korea"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"NEC Laboratories America, Inc., Cupertino, CA"
+"National Institute of Informatics, Tokyo, Japan"
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+"Boston University, Linguistics Program, 621 Commonwealth Avenue, Boston, MA"
+"Institute Polythechnic of Leiria, Portugal"
+"Faculty of Electrical Engineering, Czech Technical University"
+"College of Computer Science, Chongqing University, Chongqing, China"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100080, China"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology"
+"ADSIP Research Centre, University of Central Lancashire"
+"Research Center for Intelligent Security Technology, CIGIT"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"University of Pennsylvania, 2Ryerson University"
+Baidu IDL and Tsinghua University
+The Chinese University of Hong Kong holds the copyright of this thesis. Any
+"National Laboratory of Pattern Recognition, Institute of Automation"
+"Islamic Azad University, Mashhad Branch, Mashhad, Iran"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Kumamoto University, 2-39-1 Kurokami, Kumamoto shi"
+"Bilgi University, Dolapdere, Istanbul, TR"
+"LIUM Laboratory, Le Mans, France, 2 Idiap Research Institute, Martigny, Switzerland"
+"SSN College of Engineering, Chennai, India"
+"University Street, Montreal, QC H3A 0E9, Canada"
+"2015 Wiley Periodicals, Inc"
+"Technical University of Cluj Napoca, 28 Memorandumului Street"
+"FX Palo Alto Laboratory, Inc., California, USA"
+Nqtional Institute of Standards and Technology
+"Aditya College of Engineering, Surampalem, East Godavari"
+"bSchool of Computer and Control Engineering, University of Chinese Academy of Sciences"
+Purdue Institute for Integrative Neuroscience
+"Center for Machine Vision Research, University of Oulu, Finland"
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"Research Center in Information Technologies, Universit e de Mons, Belgium"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+"Rochester Institute of Technology, Rochester, NY"
+"Elect. Eng. Faculty, Tabriz University, Tabriz, Iran"
+Carnegie Mellon University (CMU
+"Facial Image Processing and Analysis Group, Institute for Anthropomatics"
+National Institute of Advanced Industrial Science and Technology (AIST
+"NICTA , Queensland Research Laboratory, QLD, Australia"
+"School of Computer Science, Fudan University, Shanghai, China"
+"University at Buffalo, SUNY"
+"Foundation University, Rawalpindi 46000, Pakistan"
+College of Information Science and Engineering
+"Most of the earlier studies mentioned above, including ours"
+"Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+DVMM Lab - Columbia University
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 518055, China"
+"Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China"
+College of Information Engineering
+"ment of Psychology, University of California, Berkeley"
+"University of Freiburg, Germany"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"Madanapalle Institute of Technology and Science, Madanapalle, Andhra Pradesh"
+"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+cid:63)Stanford University
+"Institute of Data Science and Technology, Alibaba Group"
+"University of Queensland, St Lucia QLD Australia, 5 Institut Universitaire de France, Paris, France"
+Rowland Institute at Harvard
+"University of Kentucky, 329 Rose St., Lexington, KY, 40508, U.S.A"
+"Several methods exists to induce anxiety in healthy individuals, including threat of shock (ToS), the Trier"
+Federal University of Campina Grande (UFCG
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK"
+"Metron, Inc"
+"Motorola, Inc"
+"National Cheng Kung University, Tainan, Taiwan, R.O.C"
+VEER SURENDRA SAI UNIVERSITY OF
+The Australian National University Queensland University of Technology
+"Central Mechanical Engineering Research Institute, Durgapur, West Bengal, India"
+"1E1 WC Mackenzie Health Sciences Centre, University of Alberta, Edmonton, AB, Canada T6G 2R"
+"JDL, Institute of Computing Technology, CAS, P.O. Box 2704, Beijing, China"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Vision Science Group, University of California"
+Institute for Anthropomatics
+"Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of"
+"Computational Science and Engineering Program, Bo gazic i University, Istanbul, Turkey"
+"School of Mechanical Engineering, Southwest Jiaotong University, Chengdu 610031, China"
+"B.A. Earlham College, Richmond Indiana"
+"Institute of Computing Technology, CAS, Beijing 100190, China"
+Canadian Institute for Advanced Research
+"Savitri Bai Phule Pune University, Maharashtra India"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"B.E, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"School of Software, Dalian University of Technology, Dalian 116621, China"
+"Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India"
+"cid:63)Queen Mary University of London, Imperial College London"
+"IBM T. J. Watson Research Center, Yorktown Heights, NY, USA"
+"Kwangwoon University, 447-1 Wolge-dong, Nowon-Gu, Seoul 139-701, Korea"
+"Gujarat Technological University, V.V.Nagar, India"
+Khulna University of Engineering and Technology
+"Tarbiat Modarres University, Tehran, Iran"
+"School of Psychology, Cardiff University, Cardiff, United Kingdom, College of"
+"University of Sk vde, Sweden"
+"University of Science, Vietnam National University-Ho Chi Minh city"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+Toyota Technological Institute Chicago (TTIC
+"ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+"D Research Center, Kwangwoon University and Springer"
+"Marine Institute, via Torre Bianca, 98164 Messina Italy"
+Ho Chi Minh City University of
+"School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China"
+"University of Twente, EEMCS, Netherlands"
+"Electronics and Communication Engineering, Chuo University"
+"applications, including texture classification [16], face recognition [12], object detection [10], and"
+Beckman Institute for Advanced Science and Technology
+"College of Computer and Information Engineering, Nanyang Institute of Technology"
+Institute of Informatics - ISLA
+"Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu"
+"cCentre of Intelligent Machines, McGill University, Montr eal, QC H3A 0E9, Canada"
+"Institute for Robotics and Intelligent Systems, University of Southern California, CA, USA"
+"Shanghai Institute of Applied Physics, Chinese Academy of Sciences"
+"Computer Laboratory, University of Cambridge, Cambridge, UK"
+"t2i Lab, Chalmers University of Technology, Gothenburg, Sweden"
+"CVSSP, University of Surrey, UK"
+"King Saud University, P.O. Box 51178, Riyadh 11543, Saudi Arabia"
+Institute for studies in theoretical Physics and Mathematics(IPM
+"University of Minnesota-Twin Cities, Minneapolis"
+"State Key Laboratory of Brain and Cognitive Science, Institute of Psychology"
+"Asian University, Taichung, Taiwan"
+"Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India"
+"do, Rep. of Korea, Kyung Hee University, Suwon, Rep. of Korea"
+"Robotics Institute, Carnegie Mellon University 3University of Pittsburgh, USA"
+Federal University of Technology - Paran a
+"Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen 518057, China"
+Indraprastha Institute of Information Technology
+"Assam University, Silchar-788011 Assam University, Silchar"
+"Key Laboratory of Machine Perception (MOE), School of EECS, Peking University"
+"Intel Lab, 2200 Mission College Blvd, Santa Clara, CA 95054, USA"
+"Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA"
+IBM T. J. Watson Research Center
+"ples of such ne-grained descriptions, including attributes covering detailed"
+"Victoria University of Wellington, PO Box 600, Wellington 6140, New Zealand"
+"University at Buffalo, State University of New York"
+"Brown University, 2University of California, San Diego, 3California Institute of Technology"
+"aLawrence Technological University, 21000 W Ten Mile Rd., South eld, MI 48075, United States"
+"School of Computer Science, Tianjin University"
+"The School of Computer Science, Tel-Aviv University, Israel"
+"IBM Research, Australia, 2 IBM T.J. Watson Research Center, 3 National University of Singapore"
+"School of Mathematics and Computational Science, Sun Yat-sen University, China"
+"Intelligent Recognition and Image Processing Lab, Beihang University, Beijing"
+"Lund University, Cognimatics AB"
+"Gonda Brain Research Center, Bar Ilan University, Israel"
+Departm nt of Information Engin ering Th Chines University of Hong Kong
+University of Freiburg
+"School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China"
+"VSI Lab, Goethe University, Frankfurt, Germany"
+Al-Khwarizmi Institute of Computer Science
+"University of Zagreb, Unska 3, 10 000 Zagreb"
+"School of Computer Science, CECS, Australian National University, Australia"
+HELSINKI UNIVERSITY OF TECHNOLOGY
+Federal University of Para ba
+"Zhejang University, Hangzhou 310027, P.R.China"
+"Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom"
+"Pattern Recognition Group, University of Siegen"
+"University of California at Berkeley, USA"
+"Idiap Research Institute, Martigny, Switzerland, 2LIUM, University of Maine, Le Mans, France"
+The University of Queensland in
+"School of Engineering, Taylor s University"
+"Florian Metze, Chair (Carnegie Mellon University"
+TechnicalUniversityofDenmark
+"Human Genome Center, Institute of Medical Science"
+Sridevi Women's Engineering College
+"Dipartimento di Sistemi e Informatica, University of Florence"
+"School of Computing and Communications, University of Technology Sydney, Sydney, Australia"
+"P.A. College of Engnineering, Mangalore"
+"Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne"
+"University of Washington, Bothell"
+"Sighthound, Inc"
+"Nam k Kemal University, Tekirda g, Turkey"
+"Shenzhen Key Lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"cSchool of Astronautics at Beihang University, 100191, Beijing, P.R.C"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS, China"
+University of Toulouse II Le Mirail
+"Myongji University, Yongin, 449-728 South"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+CUNY Graduate Center and City College
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+"atry, University of Pennsylvania School of Medicine, Philadelphia, PA"
+b Institute for Robotics and Intelligent Systems
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+The Hong Kong Polytechnic University 2Harbin Institute of Technology
+Samsung Advanced Institute of Technology
+"National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, 100190, China"
+Baidu Research Institute of Deep Learning
+"The Amsterdam School of Communication Research, University of Amsterdam"
+"College of Information Science and Engineering, Ritsumeikan University, Shiga 525-8577, Japan"
+MICC - University of Florence
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"Shanghai Jiao Tong University, 800 Dongchuan Road, Shanghai 200240, China"
+"School of Computer Science, University of Lincoln, U.K"
+Tomas Bata University in Zl n
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Institiude of Computer Science and Technology, Peking University"
+"Kyung Hee University, Yongin, Rep. of Korea"
+cid:3)The Salk Institute and Howard Hughes Medical Institute
+"School of Information Science and Technology, Donghua University, Shanghai 200051, China"
+"P.S.R Engineering College, Sivakasi, Tamilnadu, India"
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Computer Vision Laboratory, University of Nottingham, Nottingham, UK"
+"Visual Geometry Group, University of Oxford"
+"School of EECS, Queen Mary University of London, UK"
+"Xi'an Institute of Optics and Precision Mechanics, Chinese Academy of Sciences"
+years. According to the definition by the National Institute
+"G.H.Raisoni College of Engg. and Mgmt., Pune, India"
+"Institute for Infocomm Research (I2R), A*STAR, Singapore"
+"instance has been detected (e.g., a face), it is be possible to obtain further information, including: (i"
+In the Graduate College
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+Helsinki University of Technology Laboratory of Computational Engineering Publications
+Conference on CyberGames and Interactive Entertainment (pp. 52-58). Western Australia: Murdoch university
+"University of Exceter, Exceter, UK"
+"Recognition, Institute of Automation"
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"Key Laboratory of Computer Network and Information Integration of Ministry of Education, Southeast University, Nanjing"
+"DISI, University of Trento, Trento, Italy"
+Institute of Electrical and Electronics Engineers (IEEE). DOI
+"Link oping University, SE-581 83 Link oping, Sweden"
+"Mackenzie Presbyterian University, S o Paulo, S o Paulo, Brazil"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Dayananda Sagar College of Engg., India"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"Electrical Engineering Institute, EPFL"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+"Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"Center for Cognitive Science, University of Turin, Turin, Italy, 2 Neuroscience Institute of Turin"
+"Engg, Priyadarshini College of"
+"Cognitive Systems Lab, Karlsruhe Institute of Technology, Karlsruhe, Germany"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+Mitsubishi Electric Research Labs (MERL
+"School of Electrical and Computer Engineering, Cornell University"
+Slovak University of Technology in
+"College of Computer Science and Software Engineering, Shenzhen University, Shenzhen 518060, China"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"Center for Cognitive Neuroscience, Duke University, Durham, North Carolina"
+"Computer Vision Group, Friedrich Schiller University Jena"
diff --git a/scraper/reports/institutions_not_found/not-found-3.csv b/scraper/reports/institutions_not_found/not-found-3.csv
new file mode 100644
index 00000000..e3bc00ca
--- /dev/null
+++ b/scraper/reports/institutions_not_found/not-found-3.csv
@@ -0,0 +1,1373 @@
+"MIRACL-FS, University of Sfax"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"Deparment of Computing, Imperial College London, UK"
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"School of Computer Science, University of Birmingham, UK"
+Stevens Institute of Technology Adobe Systems Inc
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+Tomas Bata University in Zl n
+"School of Computing and Info. Sciences, Florida International University"
+"Numediart Institute, University of Mons"
+in the College of Engineering and Computer Science
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+The University of Queensland in
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+M. Mark Everingham University of Leeds
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"School of Computer Science, Fudan University, Shanghai, China"
+"BECS, Aalto University, Helsinki, Finland"
+"Beihang University 2Gri th University 3University of York, UK"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+Sabanc University
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"School of Computer Science, University of Nottingham"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"University at Buffalo, SUNY"
+"University of Maryland, Center for Automation Research"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"IslamicAzad University, Qazvin, Iran"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+This article was downloaded from Harvard University s DASH
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"DISI, University of Trento, Italy"
+College of Information Engineering
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+University of California at Berkeley / ICSI
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+Honda Fundamental Research Labs
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"University of Zaragoza, Spain"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"Grif th University, QLD, Australia"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"Asian University, Taichung, Taiwan"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Sapienza University of Rome, Italy"
+"University of Milano-Bicocca, Italy"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"Machine Perception Laboratory, University of California, San Diego"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+University of Cagliari
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"Baidu Research, USA 3John Hopkins University"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Vision Laboratory, The University of Nottingham"
+"L3S Research Center, Hannover, Germany"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"he University of Hong Kong, Pokfulam"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"image being generated by the model, include Active Appearance"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"Queen Mary, University of London"
+University of Sfax
+SUS college of Engineering and Technology
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+College of Information Science and Engineering
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Institute for Neural Computation, University of California, San Diego"
+"Proto Labs, Inc"
+Institute of Psychology and Behavioral Sciences
+ATR Interpreting Telecommunications Research Laboratories
+Fraunhofer Institute for Integrated Circuits IIS
+"Chalmers University of Technology, SAFER"
+"Computer Vision Research Group, COMSATS Institute of Information"
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+"School of Computer Science, University of Lincoln, U.K"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+Language Technologies Institute
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Birkbeck College, University of London"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+Nokia Bell Labs and University of Oxford
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"VISLab, EBUII-216, University of California Riverside"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Assiut University, Assiut 71515, Egypt"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Computer Science Division, The Open University of Israel"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Facebook AI Research, 2Dartmouth College"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+Thesis. Rochester Institute of Technology. Accessed from
+cid:63)Stanford University
+"The Robotics Institute, Carnegie Mellon University"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+State University of Feira de Santana (UEFS
+University of Siegen
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+"USC IRIS Lab, University of Southern California"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"EEMCS, University of Twente, Netherlands"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"Electrical and Computer Engineering, The University of Memphis"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"University Health Board, Swansea, United Kingdom"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Governance, Keio University"
+"cid:63)Queen Mary University of London, Imperial College London"
+"RGPV University, Indore"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Honda Research Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+Sarhad University of Science and Information Technology
+"University of Twente, EEMCS, Netherlands"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Center for Arti cial Vision Research, Korea University"
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Technical University of Munich, Germany"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Laboratory, University of Houston, Houston, TX, USA"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+Institute of Deep Learning
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"FI-90014 University of Oulu, Finland"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Arts Media and Engineering, Arizona State University"
+University of Beira Interior
+"Institute of Biochemistry, University of Balochistan, Quetta"
+Ho Chi Minh City University of
+"Asia University, Taichung, Taiwan"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+Facebook 4Texas AandM University 5IBM Research
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+yAristotle University of Thessaloniki
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"CVSSP, University of Surrey"
+ShahidBeheshti University
+"Electronics Engineering, National Institute of Technical Teachers"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+"Institute for Electronics, Signal Processing and Communications"
+National Institute of Development Administration
+"EEMCS, University of Twente, The Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+Dietrich College of Humanities and Social Sciences
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Center for Automation Research, University of Maryland"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+University of Science and Technology Beijing
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+Nam k Kemal University
+University of Colorado at Colorado Springs
+University of Freiburg
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+Interactive and Digital Media Institute
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+National Institute of Advanced Industrial
+USC Institute for Creative Technologies
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+Institute for Vision Systems Engineering
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+eBay Research Labs
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+Howard Hughes Medical Institute (HHMI
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"and Modeling, Rutgers University"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"National Institute of Informatics, Japan"
+"Charotar University of Science and Technology, Changa, India"
+UniversityofMaryland
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"J. P. College of Engineering, India"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+Technical University of Kaiserslautern
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"learning. As a result of this research, many applications, including video surveillance systems"
+Taizhou University
+"Goldsmiths, University of London, London, UK"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"P.A. College of Engnineering, Mangalore"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Computer Science Division, The Open University of Israel, Israel"
+Achariya college of Engineering Technology
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+High Institute of Medical Technologies
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+Sun Yat-Sen (Zhongshan) University
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"Institute for Infocomm Research, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"Xi an Jiaotong University, China"
+"North Dakota State University, Fargo, ND58105, USA"
+University of Twente 2Dublin City University 3Oxford University
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"Queen Mary, University of London, E1 4NS, UK"
+University of Wollongong. For further information contact the UOW
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+College of Computer Science and Technology
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+Gangnung-Wonju National University
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"cid:2)Imperial College London, U.K"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"Friedrich Schiller University, D-07740 Jena"
+Mahatma Gandhi Institute of Technology
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+B.S. University of Indonesia
+"University of T ubingen, T ubingen, Germany"
+"School of Computer Science, The University of Adelaide, Australia"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+Honda Research Institute USA
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"University of Nottingham, UK, School of Computer Science"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Sogang University, Seoul 121-742, Republic of Korea"
+Imperial College London / Twente University
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"SRV Engineering College, sembodai, india"
+Central Mechanical Engineering Research Institute
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"CVSSP, University of Surrey, UK"
+Sanghvi Institute of Management and Science
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+College of Information and Electrical Engineering
+"aResearch Scholar, Anna University, Chennai, Inida"
+"School of Computer Science, Tianjin University"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+University of California at San Diego
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"DIEI, University of Perugia, Italy"
+"Michigan State University, 3115 Engineering Building"
+"Publication details, including instructions for authors and subscription"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"Jawaharlal Technological University, Anantapur"
+SAMSI and Duke University
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"SRV Engineering College, sembodai, india"
+"Institute of Computing Technology, CAS"
+Institute of control science and engineering
+National Institute of Informatics
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+Institute for Robotics and Intelligent Systems
+St. Anne s College
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"J. P. College of Engineering, India"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+SUS college of Engineering and Technology
+"methods, including graph matching, optical- ow-based"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"Michigan State University, NEC Laboratories America"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+M. Mark Everingham University of Leeds
+University of California at San Diego
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Cornell University, Ithaca, NY, U.S.A"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+University of Sfax
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Education, Yunnan Normal University, Kunming, China"
+"Arti cial Intelligence Institute, China"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+"Institute of Biochemistry, University of Balochistan, Quetta"
+"School of Engineering, University of Portsmouth, United Kingdom"
+"S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Najafabad Branch, Islamic Azad University"
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"The school of Data Science, Fudan University"
+"School of Mathematical Science, Peking University, China"
+"School of Computer Science, The University of Adelaide, Australia"
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"School of Computing, National University of Singapore, SG"
+University of California at Berkeley / ICSI
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+Massachusettes Institute of Technology
+"IBM Watson Research Center, Armonk, NY, USA"
+B.S. (Cornell University
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"Assiut University, Assiut 71515, Egypt"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University"
+Shandong Women s University
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"School of Computer Science and Technology, Harbin Institute of"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"Faculty of Science and Engineering, Waseda University, Tokyo, Japan"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+Research Center E. Piaggio
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+Thesis. Rochester Institute of Technology. Accessed from
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+"Beihang University 2Gri th University 3University of York, UK"
+"University of Oviedo, Campus de Viesques, 33204 Gij n"
+"Institute for Vision and Graphics, University of Siegen, Germany"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+Malaviya National Institute of Technology
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Faculty of Science, University of Amsterdam, The Netherlands"
+National Taiwan University of Science and
+"Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"CollegePark, MD"
+"Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of"
+"School of Computer Science, Wuhan University, P.R. China"
+"School of Electronic and Computer Engineering, Peking University"
+COMSATS Institute of Information Technology Wah Cantt
+yThe University of Tokyo
+"School of Electronics and Computer Engineering, Peking University"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+Computer and Vision Research Center
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"Institute of Radioelectronics, Warsaw University of Technology, Warsaw, Poland"
+"Robotics Institute, Carnegie Mellon University, USA"
+"Paran a Federal University, Curitiba, Brazil"
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Nanjing, 210094, China, 3 School of Automation, Nanjing University of Posts and Telecommunications"
+"Tsinghua University, State Key Lab. of Intelligent"
+PES Modern College of Engg
+K. N. Toosi University of
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+Research Center for Information
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+"College of Computer Science, Zhejiang University, Hangzhou, China"
+"School of Computer Science, University of Birmingham, UK"
+"University of Tokyo, 4-6-1 Shirokanedai"
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+Achariya college of Engineering Technology
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+at West Virginia University
+"Grad. School at Shenzhen, Tsinghua University"
+State University of Feira de Santana (UEFS
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Catholic University of Rio de Janeiro, Brazil"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Indian Institute of Informaiton Technology, Allahabad, India"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+yAristotle University of Thessaloniki
+Korea Advanced Institute of Science and Technology (KAIST
+"B.S. Abdur Rahman University, Chennai-48, India"
+Imperial College London / Twente University
+University of Chinese Academy of
+Mans eld College
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"University of Santiago de Compostela, Santiago de Compostela, Spain"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"Middlesex University London, 4International Hellenic University"
+"International Islamic University, Islamabad 44000, Pakistan"
+"College of Computing, Georgia Institute of Technology"
+"Australian National University, and NICTA"
+"M.P.M. College, Bhopal, India"
+"California State University, Fullerton, USA"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"IIIS, Tsinghua University"
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+M.S. Brunel University of West London
+"Research Center CENTIA, Electronics and Mechatronics"
+IBM China Research Lab
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+North Carolina AandT State University
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+The Author 2014. Published by Oxford University Press
+"Institute of Automation, Chinese Academy of"
+"University of Lincoln, School of Computer Science, U.K"
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"cid:2) Imperial College London, United Kingdom"
+"Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+Australian National University and NICTA
+"Waseda University, Tokyo, Japan"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+German Research Center for Arti cial Intelligence (DFKI
+"Baingio Pinna, University of"
+"abroad, or from public or private research centers"
+"IslamicAzad University, Qazvin, Iran"
+"Institute for Neural Computation, University of California, San Diego"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Israel"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+"School of Information Science and Engineering, Central South University, Changsha"
+"Vision Systems, Inc"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Graduate School of Information Science, Nagoya University, Japan"
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+"Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland"
+"Sathyabama University Old Mamallapuram Road, Chennai, India"
+"Southeast University, Nanjing 211189, China"
+"China-Singapore Institute of Digital Media, Singapore"
+"University of Notre Dame, 2IIIT-Delhi"
+"Image Processing Center, Beihang University"
+"Caarmel Engineering College, MG University, Kerala, India"
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+College of Computer Science and Technology
+"School of Informatics, University of Edinburgh, UK"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"Iftm University, Moradabad-244001 U.P"
+"Biometric and Image Processing Lab, University of Salerno, Italy"
+"School of Data and Computer Science, Sun Yat-sen University"
+Northeastern University 2Microsoft Research 3City University of New York
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"University of California at San Diego, La Jolla, CA"
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"University of Toronto, Toronto, ON M5S 2G4, Canada"
+"University of California at Irvine, Irvine, CA"
+"University Hospital Jena, Germany"
+"RGPV University, Indore"
+Facebook 4Texas AandM University 5IBM Research
+"Mancha, Spain, Imperial College, London, UK"
+"Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea"
+"University of Zaragoza, Spain"
+"Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China"
+"University of Florence, Italy"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"College of Information and Control Engineering, China University of Petroleum, Qingdao 266580, China"
+"University of Cambridge, UK 2Carnegie Mellon University, USA"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr a ka cesta 25, SI-1000 Ljubljana, Slovenia"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Bo gazic i University, Istanbul, Turkey"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+Gangnung-Wonju National University
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"The School of Electrical Electronic and Control Engineering, Kongju National University"
+Taizhou University
+"Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic"
+"ECE dept, University of Miami"
+"Qihoo 360 AI Institute, Beijing, China"
+University of California at Berkeley
+"Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen"
+"Max Planck Institute for Biological Cybernetics, Spemannstr. 38, 72076 T bingen, Germany"
+Research Center and Laboratoire
+"EECS, Syracuse University, Syracuse, NY, USA"
+"University of Cambridge, Computer Laboratory, UK"
+Moradabad Institute of Technology
+"ITCS, Tsinghua University"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"Dnyanopasak College Parbhani, M.S, India"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"School of Computer Science and Software Engineering, Shenzhen University"
+"Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India"
+Palo Alto Research Center (PARC
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"Institute of Industrial Science, The University of Tokyo"
+"State Key Laboratory of Integrated Services Networks, Xidian University, Xi an 710071 China"
+"National Tsing-Hua University, Hsin-Chu, Taiwan"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"ITEE, The University of Queensland, Australia"
+"ECE, National University of Singapore, Singapore"
+Central Mechanical Engineering Research Institute
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"Computer Science and Engineering, Anna University, India"
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+Curtin University of Technology
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+"School of Psychology, University of Central Lancashire"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"MES College of Engineering, Kuttippuram"
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+"Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai"
+"School of Computer Science, The University of Nottingham"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+University of Applied Sciences Darmstadt - CASED
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+"Birkbeck College, University of London"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+"University of California, Los Angeles"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"School of Electronic and Computer Engineering, Peking University"
+"RGPV University, Indore"
+"School of Engineering, University of Portsmouth, United Kingdom"
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"K.S.R. College Of Engineering, Tiruchengode, India"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"University of Tokyo, 4-6-1 Shirokanedai"
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+yAristotle University of Thessaloniki
+"School of Data Science, Fudan University, China"
+University of Applied Sciences Darmstadt - CASED
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"School of Electrical and Computer Engineering, RMIT University"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"Faculty of EEMCS, Delft University of Technology, The Netherlands"
+China University of Mining and Technol
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+"The Blavatnik School of Computer Science, Tel-Aviv University, Israel"
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"School of Electronics and Computer Engineering, Peking University"
+"Catholic University of Rio de Janeiro, Brazil"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+University of Washington and Google Inc
+Imperial College London / Twente University
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"University of Zaragoza, Spain"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA"
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Shaoguan University, Da Tang Lu"
+"International Islamic University, Islamabad 44000, Pakistan"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+"School of Information Science and Technology, Northwestern University, Xi an710127, Shanxi, China"
+"Computer Science Division, The Open University of Israel, Israel"
+Mans eld College
+"PanimalarInstitute of Technology, Tamilnadu, India"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"University of Lincoln, School of Computer Science, U.K"
+"School of Computer Science and Software Engineering, Shenzhen University"
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"P.G. Student, SRV Engineering College, sembodai, India"
+"Najafabad Branch, Islamic Azad University"
+Ho Chi Minh City University of Science
+"School of Computer Science, The University of Adelaide, Australia"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"School of Electronic and Computer Engineering, Peking University"
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"Institute of Industrial Science, The University of Tokyo"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"in signed languages, including American Sign Language (ASL). Gestures such"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"BECS, Aalto University School of Science and Technology, Finland"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+"State Key Laboratory of Robotics and System, Harbin Institute of Technology, Harbin, China"
+"Amazon, Inc"
+"Correspondence should be addressed to: Astrid C. Homan, University of Amsterdam, Weesperplein"
+Mitsubishi Electric Research Laboratory
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Laboratory, University of Houston, Houston, TX, USA"
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"The school of Data Science, Fudan University"
+College of Image Arts and Sciences
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney"
+Australian National University and NICTA
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Center for Automation Research, University of Maryland"
+"Chonbuk National University, Jeonju 561-756, Korea"
+c(cid:13)The Chinese University of Hong Kong
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+"Computer Science, Brown University, Providence, RI, USA"
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"Institute of Biochemistry, University of Balochistan, Quetta"
+"ITCS, Tsinghua University"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+University of California at Berkeley / ICSI
+"ECE, National University of Singapore, Singapore"
+"Institute of Automation, Chinese Academy of"
+This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E
+"Sogang University, Seoul 121-742, Republic of Korea"
+"Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA"
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"School of Information Science and Engineering, Southeast University, Nanjing, China"
+"Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+"K.S.R. College Of Engineering, Tiruchengode, India"
+"Institute of Arti cial Intelligence and Cognitive Engineering (ALICE), University of Groningen"
+"School of Computer Science and Engineering, Nanjing University of Science and Technology"
+Formerly: Texas AandM University
+"IslamicAzad University, Qazvin, Iran"
+K. N. Toosi University of
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"School of Electronics Engineering and Computer Science, Peking University"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+Achariya college of Engineering Technology
+"School of Information Systems, Singapore Management University, Singapore"
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Bo gazic i University, Istanbul, Turkey"
+Research Center and Laboratoire
+"Division of Information and Computer Engineering, Ajou University, Suwon 443-749, Korea"
+"UniversityofMaryland, CollegePark, MD"
+"Shenyang Institute of Automation, Chinese Academy of Sciences, Shenyang 110016, China"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+"Catholic University of Rio de Janeiro, Brazil"
+"Middlesex University London, 4International Hellenic University"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+University of Applied Sciences Darmstadt - CASED
+"Computer Science and Engineering, Anna University, India"
+"International Islamic University, Islamabad 44000, Pakistan"
+"University of Notre Dame, 2IIIT-Delhi"
+"Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest"
+IBM China Research Lab
+"cid:2) Imperial College London, United Kingdom"
+"Indian Institute of Technology, Madras"
+"CVSSP, University of Surrey"
+Curtin University of Technology
+"SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University"
+German Research Center for Arti cial Intelligence (DFKI
+"Robotics Institute, Carnegie Mellon University, USA"
+"Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular"
+Research Center for Information
+"Advanced Engineering, The Chinese University of Hong Kong"
+"Image Processing Center, Beihang University"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+"California State University, Fullerton, USA"
+"School of Information Science and Technology, Sun Yat-sen University, China"
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+"Interactive and Digital Media Institute, National University of Singapore, SG"
+"University of Technology, Sydney, 15 Broadway, Ultimo, NSW 2007, Australia"
+"National Tsing-Hua University, Hsin-Chu, Taiwan"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+"School of Electronics and Computer Engineering, Peking University"
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"Tsinghua University, State Key Lab. of Intelligent"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"Computer Vision Group, Friedrich Schiller University of Jena, Germany"
+"University of Pisa, Largo Lucio"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+B.S. (Cornell University
+"Center for Brains, Minds and Machines, McGovern Institute, MIT"
+Facebook 4Texas AandM University 5IBM Research
+National Taiwan University of Science and
+"Engineering, G.H.Raisoni College of Engineering"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+"and education use, including for instruction at the authors institution"
+"Southeast University, Nanjing 211189, China"
+"School of Computer Science and Engineering, Sichuan University, China"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK"
+"Jawaharlal Technological University, Anantapur"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+China University of Mining and Technol
+"KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc"
+"Language and Brain Lab, Simon Fraser University, Canada"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+"School of Data and Computer Science, Sun Yat-sen University"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+Massachusettes Institute of Technology
+Submitted to the Institute for Graduate Studies in
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+"Electronics and Computer Science, University of Southampton, Southampton, Hampshire"
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"Academy of Sciences (Grant No. KGZD-EW-T03), and project MMT-8115038 of the Shun Hing Institute of"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+"China-Singapore Institute of Digital Media, Singapore"
+Datta Meghe College of Engineering
+"College of Computer and Information Science, Southwest University, Chongqing 400715, China"
+"Institute for Vision and Graphics, University of Siegen, Germany"
+"Islamic Azad University, Qazvin, Iran"
+"The Open University of Israel, Israel"
+Nanjing University of Science and
+"School of Mathematical Science, Peking University, China"
+"University of Santiago de Compostela, Santiago de Compostela, Spain"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+University of California at Berkeley / ICSI
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+University of California at Berkeley
+"Faculty of Electronics, Telecommunications and Informatics, Gdansk University of Technology, Poland"
+"School of Computing, Staffordshire University"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+"School of Electrical Engineering and Automation, Harbin Institute of Technology"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland"
+Max Planck Institute f ur biologische Kybernetik
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+"Engineering, University of Dundee"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"VSB Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic"
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"Graduate School of System Informatics, Kobe University, Kobe, 657-8501, Japan"
+"abroad, or from public or private research centers"
+This work was supported in part by National Institute of Mental Health Award R01 MH 087610 to T.E
+"Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Australian National University, and NICTA"
+"GRASP Laboratory, University of Pennsylvania, 3330 Walnut Street, Philadelphia, PA, USA"
+and the institute of engineering and science
+"Deparment of Computer Science, Queen Mary, University of London, London, E1 4NS, UK"
+"aDivision of Biology and Biological Engineering 156-29, Howard Hughes Medical Institute, California Institute of Technology, Pasadena, CA"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"Qihoo 360 AI Institute, Beijing, China"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"Center for Information Science, Peking University, Beijing 100871, China"
+"Alin Moldoveanu, Faculty of Automatic Control and Computers, University POLITEHNICA of Bucharest"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Waseda University, Tokyo, Japan"
+"College of Medicine, Seoul National University"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+Submitted to the Institute for Graduate Studies in
+"PanimalarInstitute of Technology, Tamilnadu, India"
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"University of Zaragoza, Spain"
+"Shaoguan University, Da Tang Lu"
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"California State University, Fullerton, USA"
+"ZHAW Datalab, Zurich University of Applied Sciences"
+University of Applied Sciences Darmstadt - CASED
+"Sackler Faculty of Medicine, Tel Aviv University, Tel Aviv, Israel"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"School of Information Science and Engineering, Southeast University, Nanjing, China"
+"Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
+"The School of Electrical Electronic and Control Engineering, Kongju National University"
+"to process in all the illumination conditions, including total"
+Mitsubishi Electric Research Laboratory
+"Grad. School at Shenzhen, Tsinghua University"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+"School of Information Science and Technology, Sun Yat-sen University, China"
+"School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications"
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+Northeastern University 2Microsoft Research 3City University of New York
+Korea Advanced Institute of Science and Technology (KAIST
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"University of Notre Dame, 2IIIT-Delhi"
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"IBM Watson Research Center, Armonk, NY, USA"
+"Middlesex University London, 4International Hellenic University"
+Australian National University and NICTA
+"UniversityofMaryland, CollegePark, MD"
+"University of Freiburg, Instit ut f ur Informatik"
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+"Institute for Neural Computation, University of California, San Diego"
+"School of Computing, National University of Singapore, SG"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"School of Data and Computer Science, Sun Yat-sen University"
+"School of Computer Science and Software Engineering, Shenzhen University"
+"School of Mathematical Science, Peking University, China"
+"KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc"
+"Faculty of Computer Science, Dalhousie University, Halifax, Canada"
+"International Islamic University, Islamabad 44000, Pakistan"
+"Image Processing Center, Beihang University"
+"School of Computer Science and Technology, Harbin Institute of"
+Dalle Molle Institute for Arti cial Intelligence
+"Dnyanopasak College Parbhani, M.S, India"
+"Current Address: Research Institute of Child Development and Education, University of Amsterdam"
+IBM China Research Lab
+"Shenzhen Key Laboratory of High Performance Data Mining, Shenzhen Institutes of Advanced Technology"
+"The Remote Sensing Technology Institute (IMF), German Aerospace Center"
+Nanjing University of Science and
+"SRI International, Menlo Park California / *Brooklyn College, Brooklyn New York"
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+"Tsinghua University, State Key Lab. of Intelligent"
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+University of Wollongong. For further information contact the UOW
+"School of Psychology, University of Central Lancashire"
+"RGPV University, Indore"
+"P. O. Box 4500 Fin-90014 University of Oulu, Finland"
+University of Washington and Google Inc
+"United States of America, State University of New York Albany, Albany"
+"engineering, Government College of Engineering Kannur, Kerala, India"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"UC Lab, Kyung Hee University, Yongin-Si 446-701, Korea"
+"Nokia Research Center, Tampere, Finland"
+"Technical University in Prague, 166 27 Prague 6, Technick a 2 Czech Republic"
+Research Center for Information
+Research Center E. Piaggio
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+"Institute of Software, Chinese Academy of Sciences, Beijing 100190, China"
+"School of Computer Science, The University of Nottingham"
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+B.S. (Cornell University
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+Taizhou University
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"School of Computer Science and Technology, Tianjin University, China"
+"Sorbonne Universit s, UPMC University Paris 06, Paris, France"
+"Center for Brain Science, Harvard University, Cambridge, MA 02138 USA"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"Biometric and Image Processing Lab, University of Salerno, Italy"
+"ITEE, The University of Queensland, Australia"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+"School of Electronic and Information Engineering, Beihang University, Beijing, 100191, China"
+PES Modern College of Engg
+Facebook 4Texas AandM University 5IBM Research
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"University Hospital Jena, Germany"
+"GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar echal Juin"
+"b School of Business, Reykjavik University, Reykjavik, Iceland"
+University of Bristol - Explore Bristol Research
+COMSATS Institute of Information Technology Wah Cantt
+Moradabad Institute of Technology
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+"Federal Institute of Science and Technology, Mookkannoor"
+"Hong Kong Applied Science and Technology Research Institute Company Limited, Hong Kong, China"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou 510006, China"
+"Laboratory, University of Houston, Houston, TX, USA"
+"College of Computer Science and Technology, Chongqing"
+State University of Feira de Santana (UEFS
+National Taiwan University of Science and
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA"
+"School of IoT Engineering, Jiangnan University, Wuxi, 214122, China"
+"School of Computer Science and Engineering, South China University of Technology"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+Malaviya National Institute of Technology
+"School of Electronics Engineering and Computer Science, Peking University"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"Indian Institute of Technology, Madras, Chennai 600036, INDIA"
+"Institute of Industrial Science, The University of Tokyo"
+"Faculty of Science, University of Amsterdam, The Netherlands"
+Formerly: Texas AandM University
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+University of Chinese Academy of
+"College of Computing, Georgia Institute of Technology"
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+"Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India"
+"and IBUG [32]. All of them cover large variations, including different"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Bo gazic i University, Istanbul, Turkey"
+"University of the South Paci c, Fiji"
+"and 2Center for Cognitive Neuroscience, Duke University, Durham, North Carolina 27708"
+"Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India"
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+Achariya college of Engineering Technology
+Shandong Women s University
+"Interactive and Digital Media Institute, National University of Singapore, SG"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+"Language and Brain Lab, Simon Fraser University, Canada"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Paran a Federal University, Curitiba, Brazil"
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China"
+"University of Cambridge, Computer Laboratory, UK"
+"School of Computer Science, The University of Manchester"
+"USA, 2Unit for Experimental Psychiatry, University of Pennsylvania School of Medicine"
+B.S. University of Indonesia
+"Jawaharlal Technological University, Anantapur"
+"Institute of Automation, Chinese Academy of"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China"
+"Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan"
+"Dartmouth College, 6211 Sudiko Lab, Hanover, NH 03755, USA"
+"Vision Systems, Inc"
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+China University of Mining and Technol
+"ECE dept, University of Miami"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+Mans eld College
+"P.G. Student, SRV Engineering College, sembodai, India"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"School of Computer Science, University of Birmingham, UK"
+"Learning Systems Group, California Institute of Technology"
+Curtin University of Technology
+"The school of Data Science, Fudan University"
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"Division of Computer Science, University of California, Berkeley, CA, USA e-mail"
+"School of Engineering, University of Portsmouth, United Kingdom"
+"School of Information Systems, Singapore Management University, Singapore"
+"CollegePark, MD"
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Rowland Institute at Harvard, Cambridge, MA 02142, USA"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"Assiut University, Assiut 71515, Egypt"
+"PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India"
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+"University of Toronto, Toronto, ON M5S 2G4, Canada"
+"EECS, Syracuse University, Syracuse, NY, USA"
+Massachusetts Institute of Technology Rapporteur
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Sathyabama University Old Mamallapuram Road, Chennai, India"
+"University of Oradea 410087, Universitatii 1, Romania"
+"Heilongjiang University, College of Computer Science and Technology, China"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"University of Cambridge, UK 2Carnegie Mellon University, USA"
+"Sogang University, Seoul 121-742, Republic of Korea"
+"Final Year Student, M.Tech IT, Vel Tech Dr. RR andDr. SR Technical University, Chennai"
+"VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain"
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+"IIIT-Delhi, India, 2West Virginia University"
+"Key Lab. of Machine Perception, School of EECS, Peking University, China"
+"and education use, including for instruction at the authors institution"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"Graduate School of Information Science, Nagoya University, Japan"
+"Electronics and Computer Science, University of Southampton, Southampton, Hampshire"
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"Computer Science, Brown University, Providence, RI, USA"
+"School of Data Science, Fudan University, China"
+"Institute of Deep Learning, Baidu Research"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+"School of Computer Science, Wuhan University, P.R. China"
+"Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea"
+"Center for Computational Biomedicine Imaging and Modeling Center, Rutgers University, New Brunswick, NJ"
+"NEC Laboratories America, Inc"
+"b Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"cid:2) Imperial College London, United Kingdom"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+Moradabad Institute of Technology
+"Narayana Pharmacy College, Nellore, India"
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"University of California, Los Angeles"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+yAristotle University of Thessaloniki
+"College of Information and Communication Engineering, Sungkyunkwan University, Suwon-si, Gyeonggi"
+"School of Information Science and Engineering, Central South University, Changsha"
+University of Washington and Google Inc
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA"
+"Heilongjiang University, College of Computer Science and Technology, China"
+Vector Institute for Arti cial Intelligence
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Global Big Data Technologies Centre (GBDTC), University of Technology Sydney, Australia"
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+"Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea"
+"Institute of Systems and Robotics - University of Coimbra, Portugal"
+Central Mechanical Engineering Research Institute
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"Pompeu Fabra University, Spain"
+"Australian National University, and NICTA"
+"International Islamic University, Islamabad 44000, Pakistan"
+"Center for Cognitive Ubiquitous Computing, Arizona State University, Tempe, AZ, USA"
+"BECS, Aalto University, Helsinki, Finland"
+"NEC Laboratories America, Inc"
+"Caarmel Engineering College, MG University, Kerala, India"
+Malaviya National Institute of Technology
+"Faculty of Computer Science, Mathematics, and Engineering, University of Twente, Enschede, Netherlands"
+"DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy"
+"EECS, Syracuse University, Syracuse, NY, USA"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+K. N. Toosi University of
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+Massachusetts Institute of Technology Rapporteur
+"Chandigarh Engg. College, Mohali, Punjab, India"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+B.S. (Cornell University
+"QCIS Centre, FEIT, University of Technology, Sydney, NSW 2007, Australia"
+"L3S Research Center, Hannover, Germany"
+"College of Computing, Georgia Institute of Technology"
+"School of Electrical and Computer Engineering, RMIT University"
+"School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China"
+"Institute for Neural Computation, University of California, San Diego"
+"The Robotics Institute, Carnegie Mellon University, 5000 Forbes Ave, PA, USA"
+yThe University of Tokyo
+"Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+"Shaoguan University, Da Tang Lu"
+"University Hospital Jena, Germany"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+University of California at Berkeley / ICSI
+"University of Cambridge, Computer Laboratory, UK"
+"School of Computer Science, Sichuan University, Chengdu, China"
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+Max Planck Institute f ur biologische Kybernetik
+"Learning Systems Group, California Institute of Technology"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"Vision Systems, Inc"
+"ZHAW Datalab, Zurich University of Applied Sciences"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+"School of Psychology, University of Central Lancashire"
+B.S. University of Indonesia
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"School of Automation, Beijing University of Posts and Telecommunications, Beijing 100876, China"
+"University of California at Irvine, Irvine, CA"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+"IslamicAzad University, Qazvin, Iran"
+"Environment, Northumbria University, Newcastle, NE1 8ST, United Kingdom"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"School of Informatics, University of Edinburgh, UK"
+"United States of America, State University of New York Albany, Albany, New York"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+University of California at Berkeley
+Formerly: Texas AandM University
+"University of Pisa, Largo Lucio"
+"Islamic Azad University, Qazvin, Iran"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"Institute for Vision and Graphics, University of Siegen, Germany"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"University of Notre Dame, 2IIIT-Delhi"
+College of Computer Science and Technology
+China University of Mining and Technol
+"Universit Paris-Dauphine, PSL Research University, CNRS, UMR"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. 2Advanced Digital Sciences Center, Singapore"
+Central Mechanical Engineering Research Institute
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"Southeast University, Nanjing 211189, China"
+"Iftm University, Moradabad-244001 U.P"
+"Kurukshetra University, Kurukshetra-136 119, Haryana, INDIA"
+"Exploratory Computer Vision Group, IBM T. J. Watson Research Center"
+"CAS), Institute of Computing Technology, CAS, Beijing 100190, China"
+"University of California at San Diego, La Jolla, CA"
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"Samsung RandD Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+"Division of Computer Science, University of California, Berkeley, CA, USA e-mail"
+"Faculty of Engineering and Natural Sciences, Sabanc University, stanbul, Turkey"
+"School of Computer Science and Technology, Shandong Institute of Business and Technology"
+"School of Computing, Staffordshire University"
+University of Bristol - Explore Bristol Research
+Northeastern University 2Microsoft Research 3City University of New York
+"National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210093, China"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark"
+University of Chinese Academy of
+Max Planck Institute f ur biologische Kybernetik
+"ZHAW Datalab, Zurich University of Applied Sciences"
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"University of Queensland, School of ITEE, QLD 4072, Australia"
+University of California at Berkeley / ICSI
+"School of Data and Computer Science, Sun Yat-sen University"
+"Grad. School at Shenzhen, Tsinghua University"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"Korean Research Institute of Standards and Science (KRISS), Korea"
+"University of Ulsan, Ulsan, Republic of Korea"
+"University of Oradea 410087, Universitatii 1, Romania"
+"Beijing University of Posts and Telecommunications, Beijing, China. 2School of"
+"Research Center CENTIA, Electronics and Mechatronics"
+Palo Alto Research Center (PARC
+"Institute of Software, Chinese Academy of Sciences, Beijing 100190, China"
+"Center for Information Science, Peking University, Beijing 100871, China"
+"bDiscipline of Business Analytics, The University of Sydney Business School"
+"Image Processing Center, Beihang University"
+"School of Computer Science, University of Birmingham, UK"
+"Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Center for Automation Research (CfAR), University of Maryland, College Park, MD"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+"IBM Watson Research Center, Armonk, NY, USA"
+"EECS, Syracuse University, Syracuse, NY, USA"
+PES Modern College of Engg
+"ECE dept, University of Miami"
+"School of Computer Science, The University of Manchester"
+"P.G. Student, SRV Engineering College, sembodai, India"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"Key Laboratory of Machine Perception, Peking University, Beijing"
+"State Key Lab of CADandCG, College of Computer Science, Zhejiang University, Hangzhou, China"
+"School of Computer Science and Engineering, Sichuan University, China"
+"School of Info. and Commu. Engineering, Beijing University of Posts and Telecommunications"
+"Engineering and Natural Science, Sabanci University, Istanbul, Turkey"
+"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"
+A dissertation submitted to the University of Bristol in accordance with the requirements
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+"Sogang University, Seoul 121-742, Republic of Korea"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"School of Computing Sciences, University of East Anglia, Norwich, UK"
+"Center for Automation Research, University of Maryland, College Park, MD 20740, USA"
+"Center for Automation Research, University of Maryland, College Park, MD 20742, USA"
+University of Washington and Google Inc
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"College of Computer and Information Science, Southwest University, Chongqing 400715, China"
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Dep. of Applied Mathematics and Analysis, University of Barcelona, Spain"
+"Dnyanopasak College Parbhani, M.S, India"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Center for Brain Science, Harvard University, Cambridge, MA 02138 USA"
+"Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan"
+"M.Tech Scholar, MES College of Engineering, Kuttippuram"
+"Medical Research Council Human Genetics Unit, Institute of Genetics and Molecular"
+"School of Computer Science and Technology, Zhejiang University of Technology, Hangzhou 310023, China"
+"National Tsing Hua University, Hsin-Chu, Taiwan"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"School of Computer Science and Engineering, Water Resources University, Hanoi 10000, Vietnam"
+"EEMCS, University of Twente Enschede, The Netherlands"
+"Computer Vision Group, Friedrich Schiller University of Jena, Germany"
+"School of Computer Science, Carnegie Mellon University, PA, USA"
+"cid:2) Imperial College London, United Kingdom"
+"BECS, Aalto University, Helsinki, Finland"
+"Laboratory, University of Houston, Houston, TX, USA"
+Research Center and Laboratoire
+"Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of"
+"MES College of Engineering, Kuttippuram"
+"University of the South Paci c, Fiji"
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+"College of Medical Informatics, Chongqing Medical University, Chongqing, China"
+"EECS, Syracuse University, Syracuse, NY, USA"
+"Gatsby Computational Neuroscience Unit, University College London, London, UK"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta"
+"Faculty of Engineering, Ain Shams University, Cairo, Egypt"
+"Visual Computing and Communications Lab, Arizona State University"
+"Research Reports of CMP, Czech Technical University in Prague, No"
+"Robotics Institute, Carnegie Mellon University, USA"
+"Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+and the institute of engineering and science
+"School of Electronic Engineering and Computer Science, Peking University, 100871, China"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"Computer Science Division, The Open University of Israel, Israel"
+University of Washington and Google Inc
+"College of Computing, Georgia Institute of Technology"
+State University of Feira de Santana (UEFS
+"Computer Vision Research Group, COMSATS Institute of Information"
+"Tsinghua-CUHK Joint Research Center for Media Sciences, Technologies and Systems"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+"University of Maryland, College Park; 2Arizona State University; 3Xerox Research Centre"
+"Faculty of Computer Science, University of A Coru na, Coru na, Spain"
+"S J B Institute of Technology, Bangalore, Karnataka, India"
+"University of Tokyo, 4-6-1 Shirokanedai"
+"Technology, Nanjing University of Aero"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France"
+"School of Informatics, University of Edinburgh, UK"
+"Institute of Industrial Science, The University of Tokyo"
+"Federal University Technology Akure, PMB 704, Akure, Nigeria"
+"University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"ECE, National University of Singapore, Singapore"
+"ECE dept, University of Miami"
+Datta Meghe College of Engineering
+"University Hospital Jena, Germany"
+"University of Ljubljana, Faculty of Electrical Engineering, Trzaska 25, 1000 Ljubljana, Slovenia"
+"Interdisciplinary Program in Visual Information Processing, Korea University, Seoul, Korea"
+"Islamic Azad University, Qazvin, Iran"
+"School of Data and Computer Science, Sun Yat-sen University, P.R.China"
+"b School of Business, Reykjavik University, Reykjavik, Iceland"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"Siemens Corporate Research, 755 College Road East, Princeton, NJ"
+"B.S. Abdur Rahman University, Chennai-48, India"
+"School of Data and Computer Science, Sun Yat-Sen University, China"
+"School of Computer Science, The University of Nottingham"
+"Kong Polytechnic University, Kowloon, Hong Kong"
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+c(cid:176) Massachusetts Institute of Technology 2006. All rights reserved
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+COMSATS Institute of Information Technology Wah Cantt
+"The Open University of Israel, Israel"
+"State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"Amazon, Inc"
+"Jawaharlal Technological University, Anantapur"
+Institute of Interdisciplinary Studies in Identity Sciences (IISIS
+"Brain and Cognitive Sciences, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"Adobe Systems, Inc., 345 Park Ave, San Jose, CA"
+"aSchool of Technology, University of Campinas"
+"Center for Research in Computer Vision (CRCV), University of Central Florida (UCF"
+"Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht"
+"State Key Lab. LIESMARS, Wuhan University, China"
+"School of Computer Science, University of Adelaide, Australia"
+"Cooperative Medianet Innovation Center, Shanghai Jiaotong University"
+"Graduate University of Chinese Academy of Sciences, Beijing 100049, China"
+Digital Media Research Center
+"School of Internet of Things Engineering, Jiangnan University, Wuxi 214122, China"
+"Lab of Science and Technology, Southeast University, Nanjing 210096, China"
+"Institute for Information Technology and Communications (IIKT), Otto-von-Guericke-University"
+Institute of Automatic Control Engineering (LSR
+"Research Center for Learning Science, Southeast University, Nanjing 210096, China"
+"Electrical and Computer Engineering, University of Toronto, M5S 3G4, Canada"
+"Ultra College of Engineering and Technology for Women, India"
+"Y ld z Teknik University, Istanbul, TR"
+"School of Computing Science, Simon Fraser University, Canada"
+"Institute of Child Health, University College London, UK"
+"cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA"
+"Computer vision and Remote Sensing, Berlin university of Technology"
+"Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute"
diff --git a/scraper/reports/institutions_not_found/not-found-4.csv b/scraper/reports/institutions_not_found/not-found-4.csv
new file mode 100644
index 00000000..2e5facbb
--- /dev/null
+++ b/scraper/reports/institutions_not_found/not-found-4.csv
@@ -0,0 +1,1439 @@
+"MIRACL-FS, University of Sfax"
+"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Science"
+"Division of Computer Engineering, Chonbuk National University, Jeonju-si, Jeollabuk-do"
+"The Blavatnik School of Computer Science, The Tel-Aviv University"
+"Deparment of Computing, Imperial College London, UK"
+"University of Toronto, 6 Kings College Road, Toronto, ON M5S 3G4 CANADA"
+"SSN College of Engineering, Kalavakkam, Tamil Nadu, India"
+"BRIC, University of North Carolina at Chapel Hill, NC 27599, USA"
+"IIE, Universidad de la Rep ublica, Uruguay. 2ECE, Duke University, USA"
+"SRM University, Kattankulathur, Chennai-603 203, Tamilnadu, India"
+"School of Computer Science, University of Birmingham, UK"
+Stevens Institute of Technology Adobe Systems Inc
+"College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+Tomas Bata University in Zl n
+"School of Computing and Info. Sciences, Florida International University"
+"Numediart Institute, University of Mons"
+in the College of Engineering and Computer Science
+"Pursuing M.Tech, Caarmel Engineering College, MG University, Kerala, India"
+The University of Queensland in
+"University Politehnica of Bucharest, Romania, Address Splaiul Independent ei"
+M. Mark Everingham University of Leeds
+"CVIP Lab, University of Louisville, Louisville, KY, USA"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"University of Shef eld, Regent Court, 211 Portobello, Shef eld"
+"Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca"
+"Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+"University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+"RoboticsResearchGroup, UniversityofOxford, Oxford, UK"
+"School of Computer Science, Fudan University, Shanghai, China"
+"BECS, Aalto University, Helsinki, Finland"
+"Beihang University 2Gri th University 3University of York, UK"
+"School of Comm. and Info. Engineering, Beijing University of Posts and Telecom., Beijing China"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+Sabanc University
+"Viswajyothi College of Engineering and Technology Kerala, India"
+"Fudan University, 2Microsoft Research Asia, 3University of Maryland"
+"School of Computer Science, University of Nottingham"
+"Advanced Digital Sciences Center (ADSC), University of Illinois at Urbana-Champaign, Singapore"
+"University at Buffalo, SUNY"
+"University of Maryland, Center for Automation Research"
+"MRC Laboratory For Molecular Cell Biology, University College London"
+"IslamicAzad University, Qazvin, Iran"
+KIT University of the State of Baden-W rttemberg and National Laboratory of the Helmholtz Association
+"Research Center E. Piaggio , University of Pisa, Pisa, Italy, 2 Faculty of Psychology, University of Florence, Florence, Italy"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+This article was downloaded from Harvard University s DASH
+"Program of Computational Science and Engineering, Bo gazi ci University"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"DISI, University of Trento, Italy"
+College of Information Engineering
+"Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan"
+"bUniversity of Nottingham, School of Computer Science, Nottingham, UK"
+"P. O. Box 4500 FI-90014 University of Oulu, Finland"
+University of California at Berkeley / ICSI
+"Intelligent User Interfaces Lab, Ko c University, Turkey"
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+Honda Fundamental Research Labs
+"Queensland Micro- and Nanotechnology Centre and Grif th School of Engineering, Grif th University"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"University of Zaragoza, Spain"
+"Assam University, Silchar-788011 Assam University, Silchar"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain"
+"Grif th University, QLD, Australia"
+"Ecole Polytechnique Federale de Lausanne, Signal Processing Institute"
+"Lab, University College London, London WC1H 0AP, UK. 3Clinical"
+"Institute for Neural Computation, University of California, San Diego, La Jolla, CA"
+"Asian University, Taichung, Taiwan"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Sapienza University of Rome, Italy"
+"University of Milano-Bicocca, Italy"
+"Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney, Sydney, Australia"
+Sponsors: Machine Intelligence Research Labs (MIR Labs
+"University of S ao Paulo - USP, S ao Paulo - Brazil"
+"Machine Perception Laboratory, University of California, San Diego"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"University of Barcelona, Gran Via de les Corts Catalanes 585, 08007 Barcelona, Spain"
+University of Cagliari
+"LIP6, UPMC - Sorbonne University, Paris, France"
+"Baidu Research, USA 3John Hopkins University"
+"Computer Science and Software Engineering, The University of Western Australia"
+"Computer Vision Laboratory, The University of Nottingham"
+"L3S Research Center, Hannover, Germany"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"School of Computer Science and Technology, Tianjin University, 300072 Tianjin, China"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"he University of Hong Kong, Pokfulam"
+"bSchool of Computer Science and Technology, Harbin Institute of Technology, Harbin 150001, China"
+"image being generated by the model, include Active Appearance"
+"Institute of Neuroscience, State Key Laboratory of Neuroscience, CAS Center for Excellence in Brain"
+"Institute of Advanced Technology, Nanjing University of Posts and Telecommunications, Nanjing"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, 15213. USA"
+"f Neuropsychiatry Section, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"School of Computer Science and Information Systems, Birkbeck College, University of London"
+"Queen Mary, University of London"
+University of Sfax
+SUS college of Engineering and Technology
+"School of Psychology, Cardiff University, Cardiff, CF10 3AT, UK"
+"Faculty of Information Technology, Vietnam National University of Agriculture, Hanoi 10000, Vietnam"
+College of Information Science and Engineering
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Institute for Neural Computation, University of California, San Diego"
+"Proto Labs, Inc"
+Institute of Psychology and Behavioral Sciences
+ATR Interpreting Telecommunications Research Laboratories
+Fraunhofer Institute for Integrated Circuits IIS
+"Chalmers University of Technology, SAFER"
+"Computer Vision Research Group, COMSATS Institute of Information"
+"TNLIST, Tsinghua University, Beijing, 100084, China"
+"School of Computer Science, University of Lincoln, U.K"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"The School of Electrical and Electronic Engineering, Yonsei University, 134 Shinchon-Dong"
+"St. Xavier s Catholic College of Engineering, Nagercoil, India"
+"Applied computing and mechanics laboratory, Swiss Federal Institute of Technology, 1015 Lausanne, Switzerland"
+"Human-friendly Welfare Robotic System Engineering Research Center, KAIST"
+Language Technologies Institute
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska 25, SI-1000 Ljubljana, Slovenia"
+Northwestern University) to T.E. We thank Vincent De Gardelle for helpful comments on an earlier version of
+"Faculty of Electrical Engineering, University of Ljubljana, Tr za ska cesta 25, SI-1000 Ljubljana, Slovenia"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Birkbeck College, University of London"
+"The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+"School of Communication Engineering, Hangzhou Dianzi University, Xiasha Higher Education Zone"
+Nokia Bell Labs and University of Oxford
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"VISLab, EBUII-216, University of California Riverside"
+"University of Waterloo, Waterloo ON N2L3G1, Canada"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"Faculty of Electrical Engineering, University of Ljubljana, Slovenia"
+"Assiut University, Assiut 71515, Egypt"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"Institute of Anthropomatics, Karlsruhe Institute of Technology, Germany"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Computer Science Division, The Open University of Israel"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Facebook AI Research, 2Dartmouth College"
+"Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+"Seattle Paci c University, Seattle, WA 98119-1957, USA"
+"Key Lab. of Machine Perception, School of EECS, Peking University"
+"Electrical and Computer Engineering, University of Auckland, New Zealand"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+Thesis. Rochester Institute of Technology. Accessed from
+cid:63)Stanford University
+"The Robotics Institute, Carnegie Mellon University"
+"School of Computer and Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R"
+"Institute of Informatics, Istanbul Technical University, Istanbul, 34469, TURKEY"
+State University of Feira de Santana (UEFS
+University of Siegen
+"Neuroscience, Icahn School of Medicine at Mount Sinai, Friedman Brain Institute, New York, NY, United States"
+"M.Tech, Information Technology, Madras Institute of Technology, TamilNadu, India"
+"cid:2)Honda RandD Americas, Inc., Boston, MA, USA"
+tional Taipei University for his help in performing simulations. The author would like to thank Mr. Ming
+"Skolkovo Institute of Science and Technology (Skoltech), Russia"
+"USC IRIS Lab, University of Southern California"
+"Teaching Affairs Office, Chongqing Normal University, Chongqing 401331, China"
+"EEMCS, University of Twente, Netherlands"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"Electrical and Computer Engineering, The University of Memphis"
+"Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule, Maharastra, India"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"Machine Intelligence Lab (MIL), Cambridge University"
+"Co-Guide, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India"
+"Nanjing, 210023, China, 4 School of Computer Science and Technology, Nanjing University of Posts and"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouhe r Safi 1006; 3Faculty of Medicine of Tunis; Address"
+"CAS), Institute of Computing Technology, CAS, Beijing, 100190, China"
+"ACRV, The Australian National University University of Oxford QUVA Lab, University of Amsterdam"
+"Technology, University of Oradea 410087, Universitatii 1, Romania"
+"University Health Board, Swansea, United Kingdom"
+facultyofmathematicsandnaturalsciencesarti cialintelligence22-09-2016|1ATitleA.UthorRijksuniversiteitGroningenSomeFaculty
+"Institute for Computer Graphics and Vision, Graz University of Technology"
+"School of Computer Engineering, Sejong University, Seoul, Korea"
+"Anatomy and Genetics, University of Oxford, Oxford, United Kingdom; 3The Wellcome"
+"Calgary, 2500 University Dr., N.W. Calgary, AB, Canada T2N 1N4. Tel"
+"Governance, Keio University"
+"cid:63)Queen Mary University of London, Imperial College London"
+"RGPV University, Indore"
+"Shaanxi Provincial Key Lab on Speech and Image Information Processing, Northwestern Polytechnical University, Xi an, China"
+Honda Research Institute
+"cid:5)School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN. 47907, USA"
+Sarhad University of Science and Information Technology
+"University of Twente, EEMCS, Netherlands"
+"School of Computer Science and Technology, Nanjing University of Science and Technology, China"
+"University of Illinois, Urbana-Champaign University of California, San Diego"
+"Aristotle University of Thessaloniki, GR-541 24 Thessaloniki, Greece"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Michigan State University, East Lansing, MI 48824, U.S.A"
+"Rutgers, The State University of New Jersey, Piscataway, NJ"
+"Asian Institute of Technology, Pathumthani, Thailand"
+"Center for Arti cial Vision Research, Korea University"
+"Korea Electronics Technology Institute, 203-103 B/D 192, Yakdae-Dong"
+"Technical University of Munich, Germany"
+"City University of Hong Kong, Kowloon 999077, Hong Kong, China"
+"Laboratory, University of Houston, Houston, TX, USA"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Electrical and Computer Engineering, RMIT University, Melbourne, Australia"
+"Ponti cal Catholic University of Rio de Janeiro, Brazil"
+"School of Computer Science, Wuyi University, Jiangmen 529020, China"
+Institute of Deep Learning
+"Institute of Information Science, Beijing Jiaotong University, Beijing 100044, P.R. China"
+"Computational Biomedicine Laboratory, University of Houston, Houston, Texas 77204, USA"
+"FI-90014 University of Oulu, Finland"
+"Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+"Arts Media and Engineering, Arizona State University"
+University of Beira Interior
+"Institute of Biochemistry, University of Balochistan, Quetta"
+Ho Chi Minh City University of
+"Asia University, Taichung, Taiwan"
+"National Kaohsiung University of Applied Sciences, Kaohsiung, Kaohsiung, Taiwan, ROC"
+"USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA"
+"New York University Shanghai, 1555 Century Ave, Pudong"
+Facebook 4Texas AandM University 5IBM Research
+"Institute of Experimental Biology of Polish Academy of Sciences, Warsaw, Poland"
+yAristotle University of Thessaloniki
+"School of Electronics and Information Engineering, Tongji University, Caoan Road 4800, Shanghai"
+"CVSSP, University of Surrey"
+ShahidBeheshti University
+"Electronics Engineering, National Institute of Technical Teachers"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"tion [11, 10] is making possible very large scale visual recognition both in my own ongoing work, including"
+"Institute for Electronics, Signal Processing and Communications"
+National Institute of Development Administration
+"EEMCS, University of Twente, The Netherlands"
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl
+"College of Sciences, Northeastern University, Shenyang 110819, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+Dietrich College of Humanities and Social Sciences
+"School of Electronic and Information Engineering, Tongji University, Shanghai, China"
+"School of Information Technology and Electrical Engineering, The University of Queensland"
+"Institute of Systems and Robotics, University of Coimbra, Portugal"
+"Synergy Systems, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Guangdong"
+"Center for Automation Research, University of Maryland"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+University of Science and Technology Beijing
+"Computational Biomedicine Lab, University of Houston, TX, USA"
+Nam k Kemal University
+University of Colorado at Colorado Springs
+University of Freiburg
+"DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+Interactive and Digital Media Institute
+"College of Computer and Information Science, Northeastern University, MA, USA"
+"School of Computer and Information, Hefei University of Technology, Hefei"
+"Research Groups on Intelligent Machines, University of Sfax, Sfax 3038, Tunisia"
+National Institute of Advanced Industrial
+USC Institute for Creative Technologies
+"University of Canberra, Australia, Data61 - CSIRO and ANU, Australia"
+Institute for Vision Systems Engineering
+"DCMandB, University of Michigan, Ann Arbor, USA 4 SCS, Carnegie Mellon University, Pittsburgh, USA"
+"School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore"
+eBay Research Labs
+"Research Institute of Child Development and Education, University of Amsterdam, Utrecht, The"
+Howard Hughes Medical Institute (HHMI
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"and Modeling, Rutgers University"
+"Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil, e-mail: ander"
+"National Institute of Informatics, Japan"
+"Charotar University of Science and Technology, Changa, India"
+UniversityofMaryland
+"Science and Technology, Sun Yat-Sen University, Guangzhou, China, 3 SYSU-CMU Shunde International"
+"Caarmel Engineering College, MG University, Kerala, India"
+"Universitat Polit cnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+"Computer Science and Engineering, University of Michigan, Ann Arbor"
+"J. P. College of Engineering, India"
+"Universite Catholique de Louvain, Louvain-la-Neuve, Belgium, 4 The Queensland Brain Institute, The"
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"Engineering Institute, Autonomous University of Baja California, Blvd. Benito Ju rez"
+Technical University of Kaiserslautern
+"M.Tech Student, SSG Engineering College, Odisha, India"
+"learning. As a result of this research, many applications, including video surveillance systems"
+Taizhou University
+"Goldsmiths, University of London, London, UK"
+"China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+"Human and Health Sciences, Swansea University, Swansea, United Kingdom, 3 Abertawe Bro-Morgannwg"
+"Multimodal Computing and Interaction, Saarland University, Germany"
+"School of Computer Science, Northwestern Polytechnical University, China"
+"Intelligent Systems Group, University of Groningen, The Netherlands"
+"Center for Automation Research, UMIACS, University of Maryland, College Park, MD 20742 USA"
+"College of software, Chongqing University of Posts and Telecommunications Chongqing"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Computer Science and Arti cial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA"
+"P.A. College of Engnineering, Mangalore"
+"Beijing Institute of Technology, Beijing 100081, PR China"
+"M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Computer Science Division, The Open University of Israel, Israel"
+Achariya college of Engineering Technology
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+High Institute of Medical Technologies
+"cid:63) Faculty of Computing, Information Systems and Mathematics, Kingston University London"
+Sun Yat-Sen (Zhongshan) University
+"School of Electrical and Information Engineering, The University of Sydney, Sydney, NSW, Australia, 2 Sydney Medical"
+"Institute for Infocomm Research, Singapore"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+"National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+"Xi an Jiaotong University, China"
+"North Dakota State University, Fargo, ND58105, USA"
+University of Twente 2Dublin City University 3Oxford University
+"School of Electrical, Computer and Energy Engineering, Arizona State University"
+"Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"tum Computation and Intelligent Systems, Faculty of Engineering and Information Technology, University"
+"Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and"
+"University of Technology, Guangzhou, 510640, P.R.China"
+"Queen Mary, University of London, E1 4NS, UK"
+University of Wollongong. For further information contact the UOW
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+College of Computer Science and Technology
+"Coordinated Science Lab, University of Illinois at Urbana-Champaign"
+Gangnung-Wonju National University
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"cid:2)Imperial College London, U.K"
+"School of Computer Science and Engineering, Southeast University, Nanjing 211189, China"
+"Cognitive Arti cial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht"
+"School of Computer Science, Northwestern Polytechnical University, P.R.China"
+"Friedrich Schiller University, D-07740 Jena"
+Mahatma Gandhi Institute of Technology
+"Research Center for Information Technology Innovation, Academia Sinica, Taipei, Taiwan"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany (MPI-INF.MPG.DE"
+B.S. University of Indonesia
+"University of T ubingen, T ubingen, Germany"
+"School of Computer Science, The University of Adelaide, Australia"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+Honda Research Institute USA
+"Rm 1365, Stanford University, 401 Quarry Road, Stanford, CA"
+"Machine Intelligence Laboratory, College of Computer Science, Sichuan University"
+"University of Nottingham, UK, School of Computer Science"
+"Massachusetts Institute of Technology, 2Facebook Applied Machine Learning, 3Dartmouth College"
+"Sogang University, Seoul 121-742, Republic of Korea"
+Imperial College London / Twente University
+"National University of Singapore, 2Shanghai Jiao Tong University"
+"School of Software, Tsinghua University, Beijing 100084, China"
+"SRV Engineering College, sembodai, india"
+Central Mechanical Engineering Research Institute
+"Shenzhen Key Laboratory of Spatial Smart Sensing and Service, Shenzhen University, P.R. China"
+"Gayathri.S, M.E., Vins Christian college of Engineering"
+"Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands"
+"CVSSP, University of Surrey, UK"
+Sanghvi Institute of Management and Science
+"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong"
+"U.S. Army Research Laboratory, 2800 Powder Mill Road, Adelphi, MD USA"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+College of Information and Electrical Engineering
+"aResearch Scholar, Anna University, Chennai, Inida"
+"School of Computer Science, Tianjin University"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"b DEI - University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+"Amsterdam; and 3Center for Experimental Economics and Political Decision Making, University of Amsterdam"
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+University of California at San Diego
+"Engineering, University of Akron, Akron, OH 44325-3904, USA"
+"DIEI, University of Perugia, Italy"
+"Michigan State University, 3115 Engineering Building"
+"Publication details, including instructions for authors and subscription"
+"Max Planck Institute for Informatics, Saarbr ucken, Germany"
+"iCV Research Group, Institute of Technology, University of Tartu, Tartu 50411, Estonia"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"Jawaharlal Technological University, Anantapur"
+SAMSI and Duke University
+"School of Electrical and Information Engineering, Xi an Jiaotong University, Xi an, China"
+"MISC Laboratory, Constantine 2 University, Constantine, Algeria"
+"SRV Engineering College, sembodai, india"
+"Institute of Computing Technology, CAS"
+Institute of control science and engineering
+National Institute of Informatics
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+Institute for Robotics and Intelligent Systems
+St. Anne s College
+"School of Mathematics and Statistics, Xi an Jiaotong University, P. R. China"
+"DSP Lab, Sharif University of Technology, Tehran, Iran"
+"J. P. College of Engineering, India"
+"National Laboratory on Machine Perception, Peking University, Beijing, P.R. China"
+SUS college of Engineering and Technology
+"methods, including graph matching, optical- ow-based"
+"Turin, Italy, 3 Faculty of Humanities, Research Unit of Logopedics, Child Language Research Center, University of Oulu, Oulu"
+"Michigan State University, NEC Laboratories America"
+"Polytechnic University of Milan, Milan, 20156, Italy, 3 Applied Electronics"
+"The Blavatnik School of Computer Science, Tel Aviv University, Israel"
+"School of Information Engineering, Guangdong Medical College, Song Shan Hu"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+M. Mark Everingham University of Leeds
+University of California at San Diego
+"Institute of Computing Technology, Chinese Academy of Sciences, Beijing 100190, China"
+"PG Scholar, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India"
+"Cognitive Brain Research Unit, Institute of Behavioural Sciences, University of"
+"Cornell University, Ithaca, NY, U.S.A"
+"Pathological anxiety is associated with disrupted cognitive processing, including working memory and"
+"Carnegie Mellon University, Electrical and Computer Engineering"
+"Key Laboratory of Embedded System and Service Computing, Ministry of Education, Tongji University, Shanghai, China"
+University of Sfax
+"Arts, Commerce and Science College, Gangakhed, M.S, India"
+"Sejong University, 98 Gunja, Gwangjin, Seoul 143-747, Korea"
+"Education, Yunnan Normal University, Kunming, China"
+"Arti cial Intelligence Institute, China"
+"bFaculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+"University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia"
+"AncyRijaV, Author is currently pursuing M.E (Software Engineering) in Vins Christian College of"
+"The Chinese University of Hong Kong, HKSAR, China"
+"Institute for Infocomm Research, A*STAR, Singapore"
+U.S. Army Research Laboratory
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"Texas AandM University, College Station, TX, USA"
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"National University of Singapore Research Institute, Suzhou, China"
+"Faculty of Science and Technology, University of Macau"
+Nam k Kemal University
+"Doctor of Philosophy in Computing of Imperial College, February"
+"Shenzhen key lab of Comp. Vis. and Pat. Rec., Shenzhen Institutes of Advanced Technology, CAS, China"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+"c Cardiff Business School, Cardiff University, Cardiff, United Kingdom"
+"Psychology, American University"
+"H. He, Honkong Polytechnic University"
+College of Computer Science and Information Sciences
+College of Information and Electrical Engineering
+"ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis"
+"recognition, such as human computer interfaces and e-services, including e-home"
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan"
+"School of Software, Tianjin University"
+High Institute of Medical Technologies
+Funding was provided by the U.S. National Institutes of Mental
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+at the University of Central Florida
+"School of Computing and Communications University of Technology, Sydney"
+"additional details of DCS descriptors, including visualization. For extending the evaluation"
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+Language Technologies Institute
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+"Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands"
+"School of Computer Science, Nanjing University of Science and Technology"
+"National Lab of Pattern Recognition, Institute of Automation"
+"Virudhunagar Hindu Nadars Senthikumara Nadar College, Virudhunagar"
+"Indraprastha Institute of Information Technology (Delhi, India"
+"Machine Perception Laboratory, University of California, San Diego"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas"
+"Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C"
+"State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China"
+"Priyadarshini College of Engg, Nagpur, India"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"Center for Healthy Aging, University of"
+"J. P. College of Engineering, India"
+Vietnam National University Ho Chi
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+B.S./M.S. Brandeis University
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+Carnegie Melon University
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"School of Computer Engineering, Nanyang Technological University, Singapore"
+"University of Caen, France"
+USC Institute for Creative Technologies
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"State University of Rio de Janeiro, Brazil"
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+ATR Human Information Processing Research Laboratories
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"Deparment of Computing, Imperial College London, UK"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"IIIS, Tsinghua University, Beijing, China"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"2 School of Computing, National University of Singapore"
+"ISLA Lab, Informatics Institute, University of Amsterdam"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"Queen Mary, University of London, E1 4NS, UK"
+"MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry"
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+"Visual Geometry Group, University of Oxford, UK"
+"Publication details, including instructions for authors and subscription information"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"College of Science, Baghdad University, Baghdad, Iraq"
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+University of North Carolina Wilmington in Partial Ful llment
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN"
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand"
+"2 School of Computing, National University of Singapore"
+"University of Maryland, College Park, USA"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"University Technology of Malaysia, 81310 Skudai, Johor, Malaysia"
+"Sri SidarthaInstitute of Technology, Tumkur"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+Bharath Institute of Science and Technology
+"Institute ofInformation Science, Academia Sinica, Taipei, Taiwan"
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"University of Siena, Siena, Italy"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences"
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Indraprastha Institute of Information Technology (Delhi, India"
+"State University of Rio de Janeiro, Brazil"
+"Montefiore Institute, University of Li ge, 4000 Li ge, Belgium"
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+"Institute of Computing Technology, CAS, Beijing, 100190, China"
+"Deparment of Computing, Goldsmiths, University of London, UK"
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+"College of Science, Baghdad University, Baghdad, Iraq"
+Vietnam National University Ho Chi
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"NICTA, and Australian National University"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"ISLA Lab, Informatics Institute"
+"Minia University, Egypt"
+"Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden"
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+Link to publication in University of Groningen/UMCG research database
+"GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco"
+"applications has different requirements, including: processing time (off-line, on-line, or real-time"
+"School of Mathematical Science, Dalian University of Technology, Dalian, China"
+Institute of Deep Learning
+"Courant Institute of Mathematical Sciences and Google Research, New York, NY"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"College of Electronics and Information, Northwestern Polytechnic University"
+"cid:3) School of Software, Tsinghua University"
+"Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China"
+"of Engineering and Information Technology, University of Technology, Sydney, Australia"
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+"versity of Amsterdam, Amsterdam and University of Trento"
+"Publication details, including instructions for authors and subscription information"
+"Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health"
+"IES College of Technology, Bhopal"
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Springer Science + Business Media, Inc. Manufactured in The Netherlands"
+Institute for Computer Graphics and Vision
+"IIIS, Tsinghua University, Beijing, China"
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of"
+"MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry"
+"2 School of Computing, National University of Singapore"
+Economy (MKE) and the Korea Evaluation Institute of Industrial Technology (KEIT
+"Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India"
+"Hasan Kalyoncu University, Gaziantep, Turkey"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"and especially light angle, drastically change the appearance of a face [1]. Facial expressions, including"
+A dissertation submitted to the Faculty of the University of Delaware in partial
+"EEMCS, University of Twente, Netherlands"
+"Arts Media and Engineering, Arizona State University"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+University of Cambridge Computer Laboratory
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+No Institute Given
+"Institute of Computing Technology, Chinese Academy of Sciences"
+"The Chinese University of Hong Kong, HKSAR, China"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+"Visual Geometry Group, University of Oxford, UK"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+"Psychonomic Society, Inc"
+"B. Tech., Indian Institute of Technology Jodhpur"
+"QCIS, University of Technology, Sydney"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"Division of IT Convergence, Daegu Gyeongbuk Institute of Science and Technology"
+"ITCS, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, PA 15213, USA"
+"ESTeM, University of Canberra"
+"Center for Machine Vision and Signal Analysis, University of Oulu, Finland"
+Stevens Institute of Technology Adobe Systems Inc
+NSS College of Engineering
+"J. P. College of Engineering, India"
+"School of EECS, Queen Mary University of London"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan"
+"Research Scholar (M.Tech, IT), Institute of Engineering and Technology"
+"Faculty of Informatics, E otv os Lor and University, Budapest, Hungary"
+"M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli"
+"University of Balochistan, Quetta"
+"Multimedia University (MMU), Cyberjaya, Malaysia"
+eBay Research Labs
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+Engineering Chaoyang University Nankai Institute of
+"School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave"
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+AristotleUniversityofThessaloniki
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India"
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"Government College of Engineering, Aurangabad [Autonomous"
+Carnegie Melon University
+"School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+"State Key Laboratory for Novel Software Technology, Nanjing University, China"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+USC Institute for Creative Technologies
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology, The Netherlands"
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+Institute for Robotics and Intelligent Systems
+"Research Center for Learning Science, Southeast University, Nanjing, China"
+University of Science and Technology Beijing
+B. S. Rochester Institute of Technology
+"National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"
+"College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China"
+The Robotics Institute Carnegie Mellon University
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+University of California at San Diego
+Institute for Neural Computation
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+Institute for Advanced Computer Studies
+"Solapur University, INDIA"
+High Institute of Medical Technologies
+"Psychology, American University"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand"
+"c Cardiff Business School, Cardiff University, Cardiff, United Kingdom"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+The Chinese University ofHong Kong
+"University of Caen, France"
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore"
+"ESAT, Katholieke Universiteit Leuven, Leuven, Belgium"
+Tsinghua-CUHK Joint Research Center for Media Sciences
+ATR Human Information Processing Research Laboratories
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"University of Verona, Verona, Italy"
+"University of Maryland, College Park, USA"
+"Universitat Polit`ecnica de Catalunya, Columbia University"
+"Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan"
+aInformation Sciences Institute
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+"face processing, including age (Berry, 1990), sex (Hill"
+"School of Computer Science, Carnegie Mellon University, USA"
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+Idiap Research Institute and EPF Lausanne
+"Graduate School of Information Science, Nagoya University; Furo-cho, Chikusa-ku, Nagoya, 464-8601, Japan"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+"Machine Perception Laboratory, University of California, San Diego"
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"Lotus Hill Institute for Computer Vision and Information Science, 436000, China"
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+University Lecturer Veli-Matti Ulvinen
+"School of Software, Tianjin University"
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+"Aditya institute of Technology and Management, Tekkalli-532 201, A.P"
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"Public University of Navarra, Spain"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Deparment of Computing, Imperial College London, UK"
+"Faculty of Engineering and Technology, Multimedia University (Melaka Campus"
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun"
+"Institute of Computing Technology, CAS"
+"aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA"
+"Publication details, including instructions for authors and subscription"
+"School of Computing and Communications University of Technology, Sydney"
+"School of Management Engineering, Henan Institute of Engineering, Zhengzhou 451191, P.R. China"
+"Motorola China Research Center, Shanghai, 210000, P.R.China"
+"the Diploma of Imperial College London. This thesis is entirely my own work, and, except"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+"Psychopharmacology Unit, Educational and Health Psychology, University College"
+"Graduate University of Chinese Academy of Sciences(CAS), 100190, China"
+"University of Southampton, UK, 2University of Warwick, UK"
+"Robotics Institute, Carnegie Mellon University"
+"of Psychology, University of Michigan, Ann Arbor, MI, United States, University of Michigan, Ann"
+University of North Carolina Wilmington in Partial Ful llment
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"State Key Laboratory of CAD and CG, ZHE JIANG University, HangZhou, 310058 China"
+U.S. Army Research Laboratory
+"ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis"
+"Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul"
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"Faculty of Computer and Information Science, University of Ljubljana, Ve cna pot 113, SI-1000 Ljubljana"
+Nam k Kemal University
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"Kodak Research Laboratories, Rochester, New York"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"SenseTime, 2Tsinghua University"
+UNIVERSITY OF OULU GRADUATE SCHOOL
+"Bo gazici University, Istanbul, TR"
+"aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia"
+"RTMNU Nagpur University, India"
+College of Computer Science and Information Sciences
+"Michigan State University, East Lansing, MI, U.S.A"
+"Computer Science and Technology, Tsinghua University, Beijing, China"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+"Inst. Neural Computation, University of California"
+K S Rangasamy College of Technology
+"Institute for Infocomm Research, A*STAR, Singapore"
+"Image and Video Research Laboratory, Queensland University of Technology"
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+"North Dakota State University, Fargo, ND58105, USA"
+University of Insubria
+"MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff"
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+"University of Georgia, Athens, GA, U.S.A"
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+"School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+Islamic Azad University of AHAR
+"Indian Institute of Technology, Kharagpur"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"Faculty of Science and Technology, University of Macau"
+"University of Victoria, Victoria, Canada"
+"EIMT, Open University of"
+"Australian Centre for Visual Technologies, The University of Adelaide, Australia (b"
+"Priyadarshini College of Engg, Nagpur, India"
+"aImperial College London, London, UK"
+"University of Alberta, Edmonton, AB T6G 2E8, Canada"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+"Government College of Engineering, Aurangabad"
+"Queen Mary, University of London"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+"University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+Boston University Theses and Dissertations
+"Helen Wills Neuroscience Institute, University of"
+Institute for Vision Systems Engineering
+"Informatics and Telematics Institute, Centre of Research and Technology - Hellas"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"School of Computer Science, Nanjing University of Science and Technology"
+"Imperial College of Science, Technology and Medicine"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"College of Computer Science, Sichuan University, Chengdu 610065, P.R. China"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+Biometric Research Center
+"Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA"
+"Electronic and Information Engineering, University of Bologna, Italy"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+"National Lab of Pattern Recognition, Institute of Automation"
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Center for Research in Computer Vision, University of Central Florida, Orlando, USA"
+"Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA"
+University of Twente 2Dublin City University 3Oxford University
+"C.L. Teo, University of Maryland"
+Technical University of Kaiserslautern
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"Kobe University, NICT and University of Siegen"
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"Graduate University of CAS, 100190, Beijing, China"
+"ISLA Lab, Informatics Institute, University of Amsterdam"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"University of Shef eld, UK"
+"aIBM China Research Lab, Beijing, China"
+"Institute for Genomic Statistic and Bioinformatics, University Hospital Bonn"
+"Doctor of Philosophy in Computing of Imperial College, February"
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+"University Campus, 54124, Thessaloniki, Greece"
+"Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia"
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+"School of Electrical Engineering and Computer Science, Peking University"
+"Research School of Engineering, The Australian National University, ACT 2601, Australia"
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+Howard Hughes Medical Institute (HHMI
+"learning. As a result of this research, many applications, including video surveillance systems"
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"M.S. (University of California, Berkeley"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"University of Szeged, 2 E tv s Lor nd University"
+at the University of Central Florida
+"Institute of Automation, Chinese Academy of Sciences, 100190, Beijing, P.R.C"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"Kulhare, Sourabh, ""Deep Learning for Semantic Video Understanding"" (2017). Thesis. Rochester Institute of Technology. Accessed"
+"Aristotle University of Thessaloniki, Box 451, 54124 Thessaloniki, Greece"
+"ECSE, Rensselaer Polytechnic Institute, Troy, NY"
+Language Technologies Institute
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+"Sichuan Fine Arts Institute, Chongqing, China"
+SAMSI and Duke University
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+Alex Waibel (Carnegie Mellon University
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"School of Business, Aalto University, Finland"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+"the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam"
+"Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands"
+"Rutgers University, Computer and Information Sciences, 110 Frelinghuysen Road, Piscataway, NJ"
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+"Center for Healthy Aging, University of"
+DICGIM - University of Palermo
+College of Engineering (Poly
+Link to publication from Aalborg University
+"image being generated by the model, include Active Appearance"
+"University of Science, Ho Chi Minh city"
+"Queen Mary, University of London, E1 4NS, UK"
+"University of Basel, Departement Informatik, Basel, Switzerland"
+"School of Computer Science, University of Lincoln, United Kingdom"
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"Hua Zhong University of Science and Technology, Wuhan, China"
+"University of Maryland, College Park, MD"
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"recognition, such as human computer interfaces and e-services, including e-home"
+"University of Pennsylvania School of Medicine, 1013 Blockley Hall"
+The Graduate University for Advanced Studies (SOKENDAI
+Honda Research Institute USA
+DAP - University of Sassari
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+"CVL, Link oping University, Link oping, Sweden"
+"Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy"
+"ColumbiaUniversity, NY, USA"
+"Imaging Science and Biomedical Engineering, The University of Manchester, UK"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Quantitative Employee unit, Finnish Institute of Occupational Health"
+"KTH, Royal Institute of Technology"
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+"School of Business, Aalto University, Finland"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+U.S. Army Research Laboratory
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Carnegie Mellon University, CyLab Biometrics Center, Pittsburgh, PA, USA"
+"University of Shef eld, UK"
+"point, lighting, and appearance. Many applications, including video surveillance systems"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"QCIS, University of Technology, Sydney"
+"Government College of Engineering, Aurangabad"
+"Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Honda RandD Americas, Inc., Boston, MA, USA"
+"Institute for Advanced Computer Studies, University of Maryland, College Park, MD"
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Helen Wills Neuroscience Institute, University of"
+"Kodak Research Laboratories, Rochester, New York"
+A dissertation submitted to the Faculty of the University of Delaware in partial
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+eBay Research Labs
+"School of Computer Science, Carnegie Mellon University, USA"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"IES College of Technology, Bhopal"
+"Centre for Quantum Computation and Intelligent Systems, FEIT, University of Technology Sydney, Australia"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+"aIBM China Research Lab, Beijing, China"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"School of Electrical Engineering and Computer Science, Peking University"
+Institute for Human-Machine Communication
+University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento
+"Leiden Institute of Advanced Computer Science, Leiden University, The Netherlands"
+"Robotics Institute, Carnegie Mellon University"
+"the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam"
+Idiap Research Institute and EPF Lausanne
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"Computer Vision for Human Computer Interaction, Karlsruhe Institute of Technology, Germany"
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"Integrated Research Center, Universit`a Campus Bio-Medico di Roma"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+eBay Research Labs
+"Center for Machine Vision and Signal Analysis (CMVS), University of Oulu, Finland"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+K S Rangasamy College of Technology
+"School of Software, Tianjin University"
+"Research Center for Learning Science, Southeast University, Nanjing, China"
+"Lomonosov Moscow State University, 2Video Analysis Technologies, LLC"
+Stevens Institute of Technology Adobe Systems Inc
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+"University Campus, 54124, Thessaloniki, Greece"
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+A dissertation submitted to the Faculty of the University of Delaware in partial
+"SBK Women s University, Quetta, Balochistan"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+"University of Balochistan, Quetta"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+"Doctor of Philosophy in Computer Science at Cardi University, July 24th"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+Honda Research Institute USA
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"C.L. Teo, University of Maryland"
+"National University of Singapore Research Institute, Suzhou, China"
+"Psychology, American University"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+Howard Hughes Medical Institute (HHMI
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+"Electronic and Information Engineering, University of Bologna, Italy"
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"Computer Information Systems, Missouri State University, 901 S. National, Springfield, MO 65804, USA"
+"Indian Institute of Technology, Kharagpur"
+"NICTA, and Australian National University"
+"IES College of Technology, Bhopal"
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"Institute for Infocomm Research, Agency for Science, Technology and Research (A*STAR), Singapore"
+"Research School of Engineering, The Australian National University, ACT 2601, Australia"
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+"Lister Hill National Center for Biomedical Communications, National Library of Medicine, National Institutes of Health"
+"Center for Biometrics and Security Research and National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences"
+"the Diploma of Imperial College London. This thesis is entirely my own work, and, except"
+"Institute of Automation, Chinese Academy of Sciences (CASIA"
+"Sichuan Fine Arts Institute, Chongqing, China"
+"University of Victoria, Victoria, Canada"
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"Computer Science and Technology, Tsinghua University, Beijing, China"
+"M.S. (University of California, Berkeley"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+"Faculty of Informatics, E otv os Lor and University, Budapest, Hungary"
+DICGIM - University of Palermo
+"School of Computing and Communications University of Technology, Sydney"
+USC Institute for Creative Technologies
+"Aditya institute of Technology and Management, Tekkalli-532 201, A.P"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+"Lotus Hill Institute for Computer Vision and Information Science, 436000, China"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Queen Mary, University of London, E1 4NS, UK"
+"CNRS , Institute of Automation of the Chinese Academy of Sciences"
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+"Facial expression gures prominently in research on almost every aspect of emotion, including psychophys"
+"Quanti ed Employee unit, Finnish Institute of Occupational Health"
+"National Chung Cheng University, Chiayi, Taiwan, R.O.C"
+"University of Shef eld, UK"
+"Inst. Neural Computation, University of California"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"University of Georgia, Athens, GA, U.S.A"
+"ICT-ISVISION Joint RandD Laboratory for Face Recognition, Institute of Computer Technology, The Chinese Academy of Sciences"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+D.J. Sanghvi College of Engineering
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+Carnegie Melon University
+Vietnam National University Ho Chi
+"Kodak Research Laboratories, Rochester, New York"
+"College of Computer Science and Information Technology, Northeast Normal University, Changchun"
+"J. P. College of Engineering, India"
+"Institute for Electronics, Signal Processing and Communications"
+"of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China"
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+"Queen Mary, University of London"
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+University Lecturer Anu Soikkeli
+IstanbulTechnicalUniversity
+"aMILab, LCSEE, West Virginia University, Morgantown, West Virginia, USA"
+"University of Siena, Siena, Italy"
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"and bDivision of Engineering and Applied Sciences 136-93, California Institute of Technology, Pasadena, CA"
+"Pattern Recognition and Bioinformatics Group, Delft University of Technology"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"School of Behavioral and Brain Sciences, The University of Texas at Dallas, USA"
+"Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of"
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+"Computer Vision Laboratory, Link oping University, SE-581 83 Link oping, Sweden"
+Bo gazi ci University
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+"Kulhare, Sourabh, ""Deep Learning for Semantic Video Understanding"" (2017). Thesis. Rochester Institute of Technology. Accessed"
+B. S. Rochester Institute of Technology
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+Nam k Kemal University
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Institute for Infocomm Research, Singapore"
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"Science and Intelligence Technology, Shanghai Institutes for Biological Sciences, CAS"
+"Institute of Computing Technology, CAS"
+"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+"aFaculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia"
+"Graduate University of Chinese Academy of Sciences(CAS), 100190, China"
+"College of Engineering, Purdue University"
+"Hector Research Institute of Education Sciences and Psychology, T ubingen"
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+DAP - University of Sassari
+Fraunhofer Institute for Integrated Circuits IIS
+Link to publication in University of Groningen/UMCG research database
+"Government College of Engineering, Aurangabad"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"University of Amsterdam, Amsterdam, the Netherlands, 2 Leiden University"
+"Research Institute of Shenzhen, Wuhan University, Shenzhen, China"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"Government College of Engineering, Aurangabad [Autonomous"
+"Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"Institute for Arts, Science and Technology"
+"Psychonomic Society, Inc"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+Language Technologies Institute
+"Faculty of Science and Technology, University of Macau"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+Funding was provided by the U.S. National Institutes of Mental
+"Minia University, Egypt"
+"North Dakota State University, Fargo, ND58105, USA"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+College of Information and Electrical Engineering
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+Institute for Vision Systems Engineering
+University of North Carolina Wilmington in Partial Ful llment
+"School of Computer Science, Carnegie Mellon University, USA"
+"aImperial College London, London, UK"
+aInformation Sciences Institute
+"Imperial College of Science, Technology and Medicine"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"Graduate University of CAS, 100190, Beijing, China"
+"ESTeM, University of Canberra"
+"College of Science, Baghdad University, Baghdad, Iraq"
+High Institute of Medical Technologies
+"the Chinese University of Hong Kong, Shatin, Hong Kong"
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+A Thesis submitted to McGill University in partial fulfillment of the requirements for the
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"QCIS, University of Technology, Sydney"
+"Lille 1 University, France"
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"Institute of Image Communication and Network Engineering, Shanghai Jiao Tong University"
+"Motorola China Research Center, Shanghai, 210000, P.R.China"
+"Priyadarshini College of Engg, Nagpur, India"
+AristotleUniversityofThessaloniki
+"Institute for Advanced Computer Studies, University of Maryland, College Park, MD"
+"Author s addresses: Z. Li and D. Gong, Shenzhen Institutes of Advanced Technology, Chinese Academy"
+"Institute for Arts, Science and Technology"
+"School of Psychology, The University of New South Wales, Sydney, Australia, 2 School of Psychology"
+STANBUL TECHNICAL UNIVERSITY INSTITUTE OF SCIENCE AND TECHNOLOGY
+Funding was provided by the U.S. National Institutes of Mental
+"Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"Graduate University of CAS, 100190, Beijing, China"
+K S Rangasamy College of Technology
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+"University of Verona, Verona, Italy"
+"Priyadarshini College of Engg, Nagpur, India"
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"EIMT, Open University of"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"Sri SidarthaInstitute of Technology, Tumkur"
+"KU Phonetics and Psycholinguistics Lab, University of Kansas"
+"Institute of Electrical Measurement and Measurement Signal Processing, TU Graz, Austria"
+"University of Victoria, Victoria, Canada"
+"College of Computer Science, Zhejiang University"
+"School of Control Science and Engineering, Shandong University, Jinan 250061, China"
+"Toyota Technological Institute, Chicago (TTIC"
+"M.Tech, Sri Sunflower College of Engineering and Technology, Lankapalli"
+"Computer Science, Beijing Institute of Technology, Beijing 100081, P.R.China"
+"MRC Centre for Neuropsychiatric Genetics and Genomics, Cardiff University, Cardiff"
+"University Campus, 54124, Thessaloniki, Greece"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"College of Electronics and Information, Northwestern Polytechnic University"
+"Publication details, including instructions for authors and subscription"
+"State Key Laboratory for Novel Software Technology, Nanjing University, China"
+"Institute for Electronics, Signal Processing and Communications"
+Bharath Institute of Science and Technology
+"National Laboratory of Pattern Recognition (NLPR), Institute of Automation"
+"School of Computer, Beijing Institute of Technology, Beijing, China"
+"School of EECS, Queen Mary University of London"
+"C.L. Teo, University of Maryland"
+Technical University of Kaiserslautern
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"Australian National University, 2Smart Vision Systems, CSIRO, 3CVLab, EPFL"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+Idiap Research Institute and EPF Lausanne
+"Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara"
+"Trust Centre for Human Genetics, University of Oxford, Oxford, United Kingdom"
+"Electrical and Computer Engineering, National University of Singapore, Singapore"
+DICGIM - University of Palermo
+"Faculty of Engineering and Technology, Multimedia University (Melaka Campus"
+"School of Electrical Engineering and Computer Science, Peking University"
+University of Verona. 2Vienna Institute of Technology. 3ISTC CNR (Trento). 4University of Trento
+"UMIACS | University of Maryland, College Park"
+"aCentre for Neuroscience, Indian Institute of Science, Bangalore, India"
+"School of Computing and Communications University of Technology, Sydney"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"College of Electronics and Information, Northwestern Polytechnic University"
+"Computer Science and Engineering, Easwari Engineering College, India"
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+"Imperial College, South Kensington Campus, London SW7 2AZ, UK"
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Helen Wills Neuroscience Institute, University of"
+SAMSI and Duke University
+"School of Computer Engineering, Nanyang Technological University, Singapore"
+"2 School of Computing, National University of Singapore"
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+"face processing, including age (Berry, 1990), sex (Hill"
+"J. P. College of Engineering, India"
+"Institute for Infocomm Research, Agency for Science, Technology and Research, Singapore"
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"recognition, such as human computer interfaces and e-services, including e-home"
+Science and the Robotics Institute at Carnegie Mellon University. This study was supported in part
+"University of Basel, Departement Informatik, Basel, Switzerland"
+"learning. As a result of this research, many applications, including video surveillance systems"
+"Priyadarshini College of Engg, Nagpur, India"
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+Multimedia Laboratory at The Chinese University of Hong Kong
+"The Chinese University of Hong Kong, HKSAR, China"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+"B.Sc., University of Science and Technology of China"
+"Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+"c School of Arts and Sciences, University of Pennsylvania Medical Center, Hospital of the University of Pennsylvania"
+"puter Engineering, National University of Singapore, Singapore (e-mails"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"Netherlands, Utrecht University, Utrecht, The Netherlands"
+"School of Computer Science and Technology, Tianjin University, Tianjin, China"
+"MIT, McGovern Institute, Center for Brains, Minds and Machines"
+"Utrecht Centre for Child and Adolescent Studies, Utrecht University, Utrecht, The Netherlands"
+"IES College of Technology, Bhopal"
+"Nanjing University of Information Science and Technology, Nanjing, 210044, China"
+"Solapur University, INDIA"
+"Multimedia University (MMU), Cyberjaya, Malaysia"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"ColumbiaUniversity, NY, USA"
+"School of Automation Engineering, University of Electronic Science and Technology of China, No. 2006, Xiyuan Ave"
+"Inst. Neural Computation, University of California"
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"additional details of DCS descriptors, including visualization. For extending the evaluation"
+"North Dakota State University, Fargo, ND58105, USA"
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+"H. He, Honkong Polytechnic University"
+"Graduate School at Shenzhen, Tsinghua University, Shenzhen 518055, China"
+IstanbulTechnicalUniversity
+"HOD, St. Joseph College of Information Technology, Songea, Tanzania"
+"System Research Center, NOKIA Research Center, Beijing, 100176, China"
+"Texas AandM University, College Station, TX, USA"
+"of Psychology, Princeton University, Princeton, NJ 08540. E-mail"
+"Ross School of Business, University of Michigan, Ann Arbor, MI, USA"
+"School of Information Science and Technology, ShanghaiTech University, Shanghai, 200031, China"
+"Institute of Information Science, Academia Sinica, Taipei, Taiwan"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"Doctor of Philosophy in Computing of Imperial College, February"
+Bharath Institute of Science and Technology
+"Michigan State University, East Lansing, MI, U.S.A"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+Funding was provided by the U.S. National Institutes of Mental
+"Human Interface Technology Lab New Zealand, University of Canterbury, New Zealand"
+"image being generated by the model, include Active Appearance"
+"Queen Mary, University of London"
+"Computer Science and Software Engineering, Concordia University, Montr eal, Qu ebec, Canada"
+"State University of Rio de Janeiro, Brazil"
+"Lecturer, Amity school of Engineering and Technology, Amity University, Haryana, India"
+UNIVERSITY OF OULU GRADUATE SCHOOL
+Link to publication in University of Groningen/UMCG research database
+"School of Software, Tianjin University"
+"School of Computer Science and Engineering, Nanyang Technological University, Singapore"
+University of Cambridge Computer Laboratory
+Institute for Robotics and Intelligent Systems
+A dissertation submitted to the Faculty of the University of Delaware in partial
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"Center for Automation Research, UMIACS, University of Maryland, College Park"
+"Shenzhen Institutes of Advanced Technology, Chinese Academy of Science, China"
+"KTH, Royal Institute of Technology"
+"Public University of Navarra, Spain"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"School of Data of Computer Science, Sun Yat-sen University, P.R. China"
+"Gannan Normal University, Ganzhou 341000, China"
+"Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+"Government College of Engineering, Aurangabad [Autonomous"
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+"Key Laboratory of Transient Optics and Photonics, Xi an Institute of Optics and Precision Mechanics, Chi"
+"University of Colorado at Colorado Springs and Securics, Inc., Colorado Springs, CO, USA"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+Bo gazi ci University
+Autonomous University of Barcelona
+Howard Hughes Medical Institute (HHMI
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"versity of Amsterdam, Amsterdam and University of Trento"
+"Minia University, Egypt"
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+"Psychopharmacology Unit, Educational and Health Psychology, University College"
+"National Institute of Advanced Industrial Science and Technology (AIST), Tsukuba 305-8560, Japan"
+"a Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+The Graduate University for Advanced Studies (SOKENDAI
+Link to publication from Aalborg University
+Institute for Advanced Computer Studies
+"The Robotics Institute, Carnegie Mellon University, Pittsburgh PA"
+Biometric Research Center
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+"Faculty of Mathematics and Computer Science, University of Barcelona, Barcelona, Spain"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology"
+"Institute for Infocomm Research, A*STAR, Singapore"
+"GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS"
+"Southeast University, Nanjing 210096, China"
+NSS College of Engineering
+"National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"
+"KU Phonetics and Psycholinguistics Lab, University of Kansas"
+"College of Science, Menou a University, Menou a 32721, Egypt"
+"recognition, such as human computer interfaces and e-services, including e-home"
+University Lecturer Anu Soikkeli
+"School of Electrical Engineering and Computer Science, Peking University"
+"College of Engineering, Purdue University"
+Stevens Institute of Technology Adobe Systems Inc
+"EIMT, Open University of"
+"School of Computer Science, Carnegie Mellon University, Pittsburgh, USA"
+"School of Computing and Communications Infolab21, Lancaster University, Lancaster LA1 4WA, UK"
+"College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University"
+The Chinese University ofHong Kong
+"Faculty of Electrical Engineering, Mathematics and Computer Science, University"
+"Kodak Research Laboratories, Rochester, New York"
+Funding was provided by the U.S. National Institutes of Mental
+"Institute of Computing Technology, CAS"
+"Center for Sensor Systems (ZESS) and Institute for Vision and Graphics#, University of Siegen"
+"Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India"
+"Institute of Computer Science and Technology, Chongqing University of Posts and"
+"Language Technology Institute, Carnegie Mellon University, Pittsburgh, PA, USA"
+K S Rangasamy College of Technology
+"Minia University, Egypt"
+"Suhaila N. Mohammed, Baghdad University, College of Science, Baghdad, Iraq"
+"The University of Queensland, School of ITEE, QLD 4072, Australia"
+"Technical University in Prague, Technick a 2, 166 27 Prague 6 Czech Republic"
+"Hua Zhong University of Science and Technology, Wuhan, China"
+Language Technologies Institute
+"Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg"
+"Center for Healthy Aging, University of"
+"School of Business, Aalto University, Finland"
+"School of Computer Software, Tianjin University, 300072 Tianjin, China"
+"M.S. (University of California, Berkeley"
+Multimedia Laboratory at The Chinese University of Hong Kong
+"The Chinese University of Hong Kong, Hong Kong SAR, China"
+"Doctoral School of Automatic Control and Computers, University POLITEHNICA of Bucharest, Romania"
+"Information Technology, Madras Institute of Technology, TamilNadu, India, email"
+D.J. Sanghvi College of Engineering
+"School of Electromechanical Engineering, Guangdong University of Technology, 510006 Guangzhou, China"
+"cid:63)Sharif University of Technology, University College London, Queen Mary University of London"
+eBay Research Labs
+"Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman"
+UNIVERSITY OF OULU P.O. Box 8000 FI-90014 UNIVERSITY OF OULU FINLAND
+"IIIS, Tsinghua University, Beijing, China"
+"SBK Women s University, Quetta, Balochistan"
+"College of Electronics and Information Engineering, Sichuan University, Chengdu 610064, China"
+"Institute of Digital Media, Peking University, Beijing, 100871, China"
+High Institute of Medical Technologies
+Nam k Kemal University
+Institute for Neural Computation
+"Technological Educational Institute of Athens, 12210 Athens, Greece"
+USC Institute for Creative Technologies
+"Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+"Korea Electronics Technology Institute, Jeonju-si, Jeollabuk-do 561-844, Rep. of"
+"H. He, Honkong Polytechnic University"
+"Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China"
+University Lecturer Veli-Matti Ulvinen
+"Institute of Computing Technology, CAS, Beijing, 100190, China"
+"Institute of Digital Media, Peking University, Beijing 100871, China"
+Systems and Telematics - Neurolab
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD
+"School of Computer Science, Carnegie Mellon University, 15213, USA"
+"Informatics and Telematics Institute, Centre of Research and Technology - Hellas"
+"Psychology, American University"
+"Gannan Normal University, Ganzhou 341000, China"
+"Texas AandM University, College Station, TX, USA"
+"Northumbria University, Newcastle Upon-Tyne NE21XE, UK"
+Islamic Azad University of AHAR
+"University of Michigan, Ann Arbor, MI, USA (UMICH.EDU"
+"ISLA Lab, Informatics Institute"
+"Vision and Sensing, HCC, ESTeM, University of Canberra"
+University of Massachusetts Amherst in partial ful llment
+"Swiss Federal Institute of Technology, Lausanne (EPFL"
+"Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu"
+Vietnam National University Ho Chi
+"Moscow Institute of Physics and Technology, Institutskiy per., 9, Dolgoprudny, 141701, Russia"
+Carnegie Melon University
+"Graduate Institute ofNetworking and Multimedia, National Taiwan University, Taipei, Taiwan"
+"University Campus, 54124, Thessaloniki, Greece"
+"University of Caen, France"
+"University of Southampton, UK, 2University of Warwick, UK"
+"B.Sc., University of Science and Technology of China"
+"to visually detectable changes in facial appearance, including blushing and tears. These"
+"School of Psychology, University of Auckland, Auckland, New Zealand"
+"Institute ofInformation Science, Academia Sinica, Taipei, Taiwan"
+"a Section of Biomedical Image Analysis, University of Pennsylvania, 3600 Market, Suite 380, Philadelphia, PA 19104, USA"
+"Honda RandD Americas, Inc., Boston, MA, USA"
+B. S. Rochester Institute of Technology
+"College of Science, Baghdad University, Baghdad, Iraq"
+Bharath Institute of Science and Technology
+"School of E.C.E., National Technical University of Athens, 15773 Athens, Greece"
+"National University of Computer and Emerging Sciences (NUCES-FAST), Islamabad, Pakistan"
+"Priyadarshini College of Engg, Nagpur, India"
+"School of Computer Science and Technology, Tianjin University, Tianjin 300072, China"
+"Faculty of Computer, Khoy Branch, Islamic Azad University, Khoy, Iran"
+"Key Laboratory of Pervasive Computing (Tsinghua University), Ministry of Education"
+"Psychonomic Society, Inc"
+"Doctor of Philosophy in Computing of Imperial College, February"
+"North Dakota State University, Fargo, ND58105, USA"
+"Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA"
+"Graduate Institute of Networking and Multimedia, National Taiwan University, Taipei, Taiwan"
+UNIVERSITY OF OULU GRADUATE SCHOOL
+"University College London, 12 Queen Square, London WC1N 3BG, UK"
+"The American University In Cairo, Road 90, New Cairo, Cairo, Egypt"
+"Montefiore Institute, University of Li ge, 4000 Li ge, Belgium"
+ATR Human Information Processing Research Laboratories
+"Solapur University, INDIA"
+"Katholieke Universiteit Leuven, ESAT/VISICS"
+"Vision and Sensing, HCC Lab, ESTeM, University of Canberra"
+"Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China"
+"Netherlands, Utrecht University, Utrecht, The Netherlands"
+"Integrated Research Center, Universit`a Campus Bio-Medico di Roma"
+"University of Ioannina, Ioannina, Greece, 2 Computational Biomedicine"
+"School of Computer Science, Northwestern Polytechnical University, Xi an China"
+"Shenzhen Key Lab of CVPR, Shenzhen Institutes of Advanced Technology, CAS"
+Autonomous University of Barcelona
+"Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany"
+"University Center of FEI, S ao Bernardo do Campo, Brazil"
+University Lecturer Anu Soikkeli
+"University of Alberta, Edmonton, AB T6G 2E8, Canada"
+"Institute of Systems Engineering, Southeast University, Nanjing, China"
+"Assistant Lecturer, College of Science, Baghdad University, Baghdad, Iraq"
+"Toyota Technological Institute, Chicago (TTIC"
+University of California at San Diego
+"University of Balochistan, Quetta"
+"Intelligent Systems Lab Amsterdam, University of Amsterdam"
+"Graduate University of CAS, 100190, Beijing, China"
+"applications has different requirements, including: processing time (off-line, on-line, or real-time"
+"School of Control Science and Engineering, Shandong University, Jinan 250061, China"
+"Southeast University, Nanjing 210096, China"
+"Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA"
+"B. Tech., Indian Institute of Technology Jodhpur"
+"Robotics Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania"
+"versity of Amsterdam, Amsterdam and University of Trento"
+"University of Pennsylvania School of Medicine, 1013 Blockley Hall"
+College of Information and Electrical Engineering
+"University of Amsterdam; 2Amsterdam Brain and Cognition Center, University of"
+"School of Computer Science, Fudan University, Shanghai 200433, China"
+"National Research University Higher School of Economics, Nizhny Novgorod, Russian"
+"Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara"
+"IES College of Technology, Bhopal"
+A dissertation submitted to the Faculty of the University of Delaware in partial
+College of Engineering (Poly
+"Institute of Human Genetics, University Hospital Magdeburg, Magdeburg, Germany"
+"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, GU2 7XH"
+"CVAP, KTH (Royal Institute of Technology), Stockholm, SE"
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+"PG scholar, Communication Systems, Adhiyamaan College of Engineeing, Hosur, (India"
+"Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India"
+"GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco"
+"School of Optics and Electronics, Beijing Institute of Technology, Beijing"
+"Inst. Neural Computation, University of California"
+Bo gazi ci University
+Biometric Research Center
+"face processing, including age (Berry, 1990), sex (Hill"
+"Amal Jyothi College of Engineering, Kanjirappally, India"
diff --git a/scraper/reports/leaflet.arc.js b/scraper/reports/leaflet.arc.js
new file mode 100644
index 00000000..062b22a0
--- /dev/null
+++ b/scraper/reports/leaflet.arc.js
@@ -0,0 +1,2 @@
+!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define("leaflet-arc",[],e):"object"==typeof exports?exports["leaflet-arc"]=e():t["leaflet-arc"]=e()}(this,function(){return function(t){function e(o){if(r[o])return r[o].exports;var s=r[o]={exports:{},id:o,loaded:!1};return t[o].call(s.exports,s,s.exports,e),s.loaded=!0,s.exports}var r={};return e.m=t,e.c=r,e.p="",e(0)}([function(t,e,r){"use strict";function o(t){return t&&t.__esModule?t:{"default":t}}function s(t,e){if(!t.geometries[0]||!t.geometries[0].coords[0])return[];var r=function(){var r=e.lng-t.geometries[0].coords[0][0]-360;return{v:t.geometries.map(function(t){return r+=360,t.coords.map(function(t){return L.latLng([t[1],t[0]+r])})}).reduce(function(t,e){return t.concat(e)})}}();return"object"===("undefined"==typeof r?"undefined":n(r))?r.v:void 0}var i=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var o in r)Object.prototype.hasOwnProperty.call(r,o)&&(t[o]=r[o])}return t},n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol?"symbol":typeof t},a=r(2),h=o(a),p=function(t){return{x:t.lng,y:t.lat}};if(!L)throw new Error("Leaflet is not defined");L.Polyline.Arc=function(t,e,r){var o=L.latLng(t),n=L.latLng(e),a=i({vertices:10,offset:10},r),u=new h["default"].GreatCircle(p(o),p(n)),c=u.Arc(a.vertices,{offset:a.offset}),f=s(c,o);return L.polyline(f,a)}},function(t,e){"use strict";var r=Math.PI/180,o=180/Math.PI,s=function(t,e){this.lon=t,this.lat=e,this.x=r*t,this.y=r*e};s.prototype.view=function(){return String(this.lon).slice(0,4)+","+String(this.lat).slice(0,4)},s.prototype.antipode=function(){var t=-1*this.lat,e=this.lon<0?180+this.lon:(180-this.lon)*-1;return new s(e,t)};var i=function(){this.coords=[],this.length=0};i.prototype.move_to=function(t){this.length++,this.coords.push(t)};var n=function(t){this.properties=t||{},this.geometries=[]};n.prototype.json=function(){if(this.geometries.length<=0)return{geometry:{type:"LineString",coordinates:null},type:"Feature",properties:this.properties};if(1==this.geometries.length)return{geometry:{type:"LineString",coordinates:this.geometries[0].coords},type:"Feature",properties:this.properties};for(var t=[],e=0;e<this.geometries.length;e++)t.push(this.geometries[e].coords);return{geometry:{type:"MultiLineString",coordinates:t},type:"Feature",properties:this.properties}},n.prototype.wkt=function(){for(var t="",e="LINESTRING(",r=function(t){e+=t[0]+" "+t[1]+","},o=0;o<this.geometries.length;o++){if(0===this.geometries[o].coords.length)return"LINESTRING(empty)";var s=this.geometries[o].coords;s.forEach(r),t+=e.substring(0,e.length-1)+")"}return t};var a=function(t,e,r){if(!t||void 0===t.x||void 0===t.y)throw new Error("GreatCircle constructor expects two args: start and end objects with x and y properties");if(!e||void 0===e.x||void 0===e.y)throw new Error("GreatCircle constructor expects two args: start and end objects with x and y properties");this.start=new s(t.x,t.y),this.end=new s(e.x,e.y),this.properties=r||{};var o=this.start.x-this.end.x,i=this.start.y-this.end.y,n=Math.pow(Math.sin(i/2),2)+Math.cos(this.start.y)*Math.cos(this.end.y)*Math.pow(Math.sin(o/2),2);if(this.g=2*Math.asin(Math.sqrt(n)),this.g==Math.PI)throw new Error("it appears "+t.view()+" and "+e.view()+" are 'antipodal', e.g diametrically opposite, thus there is no single route but rather infinite");if(isNaN(this.g))throw new Error("could not calculate great circle between "+t+" and "+e)};if(a.prototype.interpolate=function(t){var e=Math.sin((1-t)*this.g)/Math.sin(this.g),r=Math.sin(t*this.g)/Math.sin(this.g),s=e*Math.cos(this.start.y)*Math.cos(this.start.x)+r*Math.cos(this.end.y)*Math.cos(this.end.x),i=e*Math.cos(this.start.y)*Math.sin(this.start.x)+r*Math.cos(this.end.y)*Math.sin(this.end.x),n=e*Math.sin(this.start.y)+r*Math.sin(this.end.y),a=o*Math.atan2(n,Math.sqrt(Math.pow(s,2)+Math.pow(i,2))),h=o*Math.atan2(i,s);return[h,a]},a.prototype.Arc=function(t,e){var r=[];if(!t||t<=2)r.push([this.start.lon,this.start.lat]),r.push([this.end.lon,this.end.lat]);else for(var o=1/(t-1),s=0;s<t;++s){var a=o*s,h=this.interpolate(a);r.push(h)}for(var p=!1,u=0,c=e&&e.offset?e.offset:10,f=180-c,l=-180+c,d=360-c,y=1;y<r.length;++y){var g=r[y-1][0],v=r[y][0],M=Math.abs(v-g);M>d&&(v>f&&g<l||g>f&&v<l)?p=!0:M>u&&(u=M)}var m=[];if(p&&u<c){var w=[];m.push(w);for(var x=0;x<r.length;++x){var b=parseFloat(r[x][0]);if(x>0&&Math.abs(b-r[x-1][0])>d){var L=parseFloat(r[x-1][0]),S=parseFloat(r[x-1][1]),j=parseFloat(r[x][0]),E=parseFloat(r[x][1]);if(L>-180&&L<l&&180==j&&x+1<r.length&&r[x-1][0]>-180&&r[x-1][0]<l){w.push([-180,r[x][1]]),x++,w.push([r[x][0],r[x][1]]);continue}if(L>f&&L<180&&j==-180&&x+1<r.length&&r[x-1][0]>f&&r[x-1][0]<180){w.push([180,r[x][1]]),x++,w.push([r[x][0],r[x][1]]);continue}if(L<l&&j>f){var F=L;L=j,j=F;var C=S;S=E,E=C}if(L>f&&j<l&&(j+=360),L<=180&&j>=180&&L<j){var G=(180-L)/(j-L),I=G*E+(1-G)*S;w.push([r[x-1][0]>f?180:-180,I]),w=[],w.push([r[x-1][0]>f?-180:180,I]),m.push(w)}else w=[],m.push(w);w.push([b,r[x][1]])}else w.push([r[x][0],r[x][1]])}}else{var N=[];m.push(N);for(var A=0;A<r.length;++A)N.push([r[A][0],r[A][1]])}for(var P=new n(this.properties),_=0;_<m.length;++_){var O=new i;P.geometries.push(O);for(var q=m[_],R=0;R<q.length;++R)O.move_to(q[R])}return P},"undefined"!=typeof t&&"undefined"!=typeof t.exports)t.exports.Coord=s,t.exports.Arc=n,t.exports.GreatCircle=a;else{var h={};h.Coord=s,h.Arc=n,h.GreatCircle=a}},function(t,e,r){"use strict";t.exports=r(1)}])});
+//# sourceMappingURL=leaflet-arc.min.js.map \ No newline at end of file
diff --git a/scraper/reports/leaflet.bezier.js b/scraper/reports/leaflet.bezier.js
new file mode 100644
index 00000000..387e0717
--- /dev/null
+++ b/scraper/reports/leaflet.bezier.js
@@ -0,0 +1,254 @@
+L.SVG.include({
+ _updatecurve: function (layer) {
+ let svg_path = this._curvePointsToPath(layer._points);
+ this._setPath(layer, svg_path);
+
+ if (layer.options.animate) {
+ let path = layer._path;
+ let length = path.getTotalLength();
+
+ if (!layer.options.dashArray) {
+ path.style.strokeDasharray = length + ' ' + length;
+ }
+
+ if (layer._initialUpdate) {
+ path.animate([
+ {strokeDashoffset: length},
+ {strokeDashoffset: 0}
+ ], layer.options.animate);
+ layer._initialUpdate = false;
+ }
+ }
+
+ return svg_path;
+ },
+
+
+ _curvePointsToPath: function (points) {
+ let point, curCommand, str = '';
+ for (let i = 0; i < points.length; i++) {
+ point = points[i];
+ if (typeof point === 'string' || point instanceof String) {
+ curCommand = point;
+ str += curCommand;
+ } else
+ str += point.x + ',' + point.y + ' ';
+
+
+ }
+ return str || 'M0 0';
+ },
+
+});
+
+let Bezier = L.Path.extend({
+ options: {},
+ initialize: function (path, icon, options) {
+
+ if (!path.mid || path.mid[0] === undefined) {
+ path.mid = this.getMidPoint(path.from, path.to, (path.from.deep ? path.from.deep : 4), path.from.slide);
+ }
+
+ L.setOptions(this, options);
+ this._initialUpdate = true;
+ this.setPath(path);
+ this.icon = icon;
+
+ },
+ //Juast after path is added
+ onAdd: function (map) {
+ this._renderer._initPath(this);
+ this._reset();
+ this._renderer._addPath(this);
+
+ // TODO ajust plane acording to zoom
+ map.on('zoom', function(){
+
+ });
+
+ },
+ // setAnimatePlane: function(path) {
+
+ // if (this.spaceship_img)
+ // this.spaceship_img.remove();
+
+ // let SnapSvg = Snap('.leaflet-overlay-pane>svg');
+
+ // let spaceship_img = this.spaceship_img = SnapSvg.image(this.icon.path).attr({
+ // visibility: "hidden"
+ // });
+
+
+ // let spaceship = SnapSvg.group(spaceship_img);
+ // let flight_path = SnapSvg.path(path).attr({
+ // 'fill': 'none',
+ // 'stroke': 'none'
+ // });
+
+ // let full_path_length = Snap.path.getTotalLength(flight_path);
+ // let half_path_length = full_path_length / 2;
+ // let third_path_length = full_path_length / 3;
+ // let forth_path_length = full_path_length / 4;
+
+
+ // let width = forth_path_length / this._map.getZoom();
+ // let height = forth_path_length / this._map.getZoom();
+
+ // width = Math.min(Math.max(width, 30), 64);
+ // height = Math.min(Math.max(height, 30), 64);
+
+
+ // let last_step = 0;
+
+
+ // Snap.animate(0, forth_path_length, function (step) {
+
+ // //show image when plane start to animate
+ // spaceship_img.attr({
+ // visibility: "visible"
+ // });
+
+ // spaceship_img.attr({width: width, height: height});
+
+ // last_step = step;
+
+ // let moveToPoint = Snap.path.getPointAtLength(flight_path, step);
+
+ // let x = moveToPoint.x - (width / 2);
+ // let y = moveToPoint.y - (height / 2);
+
+
+ // spaceship.transform('translate(' + x + ',' + y + ') rotate(' + (moveToPoint.alpha - 90) + ', ' + width / 2 + ', ' + height / 2 + ')');
+
+ // }, 2500, mina.easeout, function () {
+
+ // Snap.animate(forth_path_length, half_path_length, function (step) {
+
+ // last_step = step;
+ // let moveToPoint = Snap.path.getPointAtLength(flight_path, step);
+
+ // let x = moveToPoint.x - width / 2;
+ // let y = moveToPoint.y - height / 2;
+ // spaceship.transform('translate(' + x + ',' + y + ') rotate(' + (moveToPoint.alpha - 90) + ', ' + width / 2 + ', ' + height / 2 + ')');
+ // }, 7000, mina.easein, function () {
+ // //done
+
+ // });
+
+ // });
+
+
+ // },
+ getPath: function () {
+ return this._coords;
+ },
+ setPath: function (path) {
+ this._setPath(path);
+ return this.redraw();
+ },
+ getBounds: function () {
+ return this._bounds;
+ },
+ getMidPoint: function (from, to, deep, round_side = 'LEFT_ROUND') {
+
+ let offset = 3.14;
+
+ if (round_side === 'RIGHT_ROUND')
+ offset = offset * -1;
+
+ let latlngs = [];
+
+ let latlng1 = from,
+ latlng2 = to;
+
+ let offsetX = latlng2.lng - latlng1.lng,
+ offsetY = latlng2.lat - latlng1.lat;
+
+ let r = Math.sqrt(Math.pow(offsetX, 2) + Math.pow(offsetY, 2)),
+ theta = Math.atan2(offsetY, offsetX);
+
+ let thetaOffset = (offset / (deep ? deep : 4));
+
+ let r2 = (r / 2) / (Math.cos(thetaOffset)),
+ theta2 = theta + thetaOffset;
+
+ let midpointX = (r2 * Math.cos(theta2)) + latlng1.lng,
+ midpointY = (r2 * Math.sin(theta2)) + latlng1.lat;
+
+ let midpointLatLng = [midpointY, midpointX];
+
+ latlngs.push(latlng1, midpointLatLng, latlng2);
+
+ return midpointLatLng;
+ },
+ _setPath: function (path) {
+ this._coords = path;
+ this._bounds = this._computeBounds();
+ },
+ _computeBounds: function () {
+
+ let bound = new L.LatLngBounds();
+
+ bound.extend(this._coords.from);
+ bound.extend(this._coords.to);//for single destination
+ bound.extend(this._coords.mid);
+
+ return bound;
+ },
+ getCenter: function () {
+ return this._bounds.getCenter();
+ },
+ _update: function () {
+ if (!this._map) {
+ return;
+ }
+ this._updatePath();
+ },
+ _updatePath: function () {
+ //animated plane
+ let path = this._renderer._updatecurve(this);
+ // this.setAnimatePlane(path);
+ },
+ _project: function () {
+
+ this._points = [];
+
+ this._points.push('M');
+
+ let curPoint = this._map.latLngToLayerPoint(this._coords.from);
+ this._points.push(curPoint);
+
+ if (this._coords.mid) {
+ this._points.push('Q');
+ curPoint = this._map.latLngToLayerPoint(this._coords.mid);
+ this._points.push(curPoint);
+ }
+ curPoint = this._map.latLngToLayerPoint(this._coords.to);
+ this._points.push(curPoint);
+
+
+ },
+
+
+});
+
+L.bezier = function (config, options) {
+ let paths = [];
+ for (let i = 0; config.path.length > i; i++) {
+ let last_destination = false;
+ for (let c = 0; config.path[i].length > c; c++) {
+
+ let current_destination = config.path[i][c];
+ if (last_destination) {
+ let path_pair = {from: last_destination, to: current_destination};
+ paths.push(new Bezier(path_pair, config.icon, options));
+ }
+
+ last_destination = config.path[i][c];
+ }
+ }
+ return L.layerGroup(paths);
+
+};
+
+
diff --git a/scraper/reports/map.js b/scraper/reports/map.js
new file mode 100644
index 00000000..58984c8e
--- /dev/null
+++ b/scraper/reports/map.js
@@ -0,0 +1,92 @@
+function read_json(selector) {
+ try {
+ return JSON.parse(document.querySelector('#' + selector).innerText)
+ } catch(e) {
+ console.log("json error!")
+ return []
+ }
+}
+
+let map_mode = false
+if (window.location.hash.indexOf('map') !== -1) {
+ document.body.parentNode.classList.add('map')
+ map_mode = true
+}
+
+let map = L.map('mapid').setView([25, 0], 2);
+L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
+ attribution: 'Map data &copy; <a href="https://www.openstreetmap.org/">OpenStreetMap</a> contributors, <a href="https://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="https://www.mapbox.com/">Mapbox</a>',
+ maxZoom: 18,
+ id: 'mapbox.dark',
+ style: 'mapbox://styles/mapbox/dark-v9',
+ accessToken: 'pk.eyJ1IjoiZmFuc2FsY3kiLCJhIjoiY2pvN3I1czJwMHF5NDNrbWRoMWpteHlrdCJ9.kMpM5syQUhVjKkn1iVx9fg'
+}).addTo(map);
+let points = read_json('citations')
+let address = read_json('address')
+let source = [0,0]
+if (address) {
+ source = address.slice(3,5).map(n => parseFloat(n))
+ console.log(address, source)
+}
+
+var redDot = L.icon({
+ iconUrl: '../reddot.png',
+ iconSize: [17, 17], // size of the icon
+ iconAnchor: [8, 8], // point of the icon which will correspond to marker's location
+ popupAnchor: [0, -5] // point from which the popup should open relative to the iconAnchor
+});
+
+points.forEach(point => {
+ /*
+ [
+ "Face Alignment by Local Deep Descriptor Regression",
+ "Rutgers University",
+ [
+ "Rutgers University",
+ "40.47913175",
+ "-74.431688684404",
+ "Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA"
+ ]
+ ]
+ */
+
+ const latlng = point.slice(5,7).map(n => parseFloat(n))
+ // console.log(point)
+ if (!latlng.length || isNaN(latlng[0]) || isNaN(latlng[1])) return
+ var marker = L.marker(latlng, { icon: redDot }).addTo(map);
+ marker.bindPopup([
+ "<b>", point[0], "</b>",
+ "<br>",
+ point[1],
+ ].join(''))
+ // var arcStyle = {
+ // color: 'rgb(245, 246, 150)',
+ // fillColor: 'rgb(245, 246, 150)',
+ // opacity: 0.8,
+ // weight: '1',
+ // vertices: 100,
+ // }
+ // L.Polyline.Arc(source, latlng, arcStyle).addTo(map);
+ // console.log(latlng)
+ var pathStyle = {
+ color: 'rgb(245, 246, 150)',
+ fillColor: 'rgb(245, 246, 150)',
+ opacity: 0.8,
+ weight: '1',
+ }
+ L.bezier({
+ path: [
+ [
+ {lat: source[0], lng: source[1]},
+ {lat: latlng[0], lng: latlng[1]},
+ ],
+ ]
+ }, pathStyle).addTo(map)
+})
+
+var marker = L.marker(source, { icon: redDot }).addTo(map);
+marker.bindPopup([
+ "<b>", document.querySelector('h2').innerText, "</b>",
+ '<br/>',
+ address[0]
+].join(''))
diff --git a/scraper/reports/misc/all_doi-1.csv b/scraper/reports/misc/all_doi-1.csv
new file mode 100644
index 00000000..16e74d90
--- /dev/null
+++ b/scraper/reports/misc/all_doi-1.csv
@@ -0,0 +1,749 @@
+95d858b39227edeaf75b7fad71f3dc081e415d16,http://doi.org/10.1007/s11042-017-5073-3
+6a38e4bb35673a73f041e34d3f2db7067482a9b5,http://doi.acm.org/10.1145/2663204.2666277
+045275adac94cced8a898a815293700401e9955f,https://doi.org/10.1007/s00138-012-0447-z
+55c46ae1154ed310610bdf5f6d9e7023d14c7eb4,http://doi.acm.org/10.1145/1027933.1028013
+6ad5ac867c5ca56e0edaece153269d989b383b59,https://doi.org/10.1109/CISP-BMEI.2016.7852723
+b598f7761b153ecb26e9d08d3c5817aac5b34b52,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4618852
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1334680
+998542e5e3882bb0ce563d390b1e1bff5460e80c,https://doi.org/10.1109/AFGR.2008.4813471
+217aa3aa0b3d9f6f394b5d26f03418187d775596,http://doi.acm.org/10.1145/3123266.3123298
+af9419f2155785961a5c16315c70b8228435d5f8,http://doi.org/10.1016/j.patrec.2015.12.013
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4527244
+346752e3ab96c93483413be4feaa024ccfe9499f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6960834
+602f772c69e4a1a65de00443c30d51fdd47a80aa,https://doi.org/10.1109/IISA.2013.6623705
+d0dd1364411a130448517ba532728d5c2fe78ed9,https://doi.org/10.1109/ISCAS.2016.7527183
+0a4a8768c1ed419baebe1c420bd9051760875cbe,https://doi.org/10.1109/EUSIPCO.2016.7760451
+170aa0f16cd655fdd4d087f5e9c99518949a1b5c,https://doi.org/10.1007/s11263-007-0074-8
+2f837ff8b134b785ee185a9c24e1f82b4e54df04,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5739539
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2,http://doi.org/10.1007/s11042-018-5658-5
+d42dbc995318e2936714c65c028700bfd3633049,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592
+0e6f422c3f79c552c0c3d7eda0145aed8680f0ea,https://doi.org/10.1016/j.patrec.2012.09.008
+2a98351aef0eec1003bd5524933aed8d3f303927,https://doi.org/10.1109/CIRA.2007.382901
+c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478,https://doi.org/10.1109/TCYB.2016.2535122
+00a38ebce124879738b04ffc1536018e75399193,https://doi.org/10.1109/BTAS.2017.8272766
+3f4711c315d156a972af37fe23642dc970a60acf,https://doi.org/10.1109/IJCNN.2008.4634393
+f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6,http://doi.ieeecomputersociety.org/10.1109/TKDE.2015.2448547
+68f19f06f49aa98b676fc6e315b25e23a1efb1f0,https://doi.org/10.1109/ICIP.2015.7351080
+a136ccaa67f660c45d3abb8551c5ed357faf7081,https://www.ncbi.nlm.nih.gov/pubmed/27078863
+e0162dea3746d58083dd1d061fb276015d875b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014992
+d4ec62efcc631fa720dfaa1cbc5692b39e649008,https://doi.org/10.1109/ICDM.2016.0026
+70d0bffa288e317bc62376f4f577c5bd7712e521,https://doi.org/10.1049/iet-cvi.2012.0094
+110919f803740912e02bb7e1424373d325f558a9,http://doi.acm.org/10.1145/3123266.3123421
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637
+cc2a9f4be1e465cb4ba702539f0f088ac3383834,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344595
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7422069
+965c4a8087ae208c08e58aaf630ad412ac8ce6e2,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.100
+598744c8620e4ecbf449d14d7081fbf1cd05851f,https://www.ncbi.nlm.nih.gov/pubmed/29731533
+81af86e3d343a40ce06a3927b6aa8c8853f6811a,http://doi.acm.org/10.1145/3009977.3009996
+64a08beb073f62d2ce44e25c4f887de9208625a4,https://doi.org/10.1080/09540090701725557
+62e61f9f7445e8dec336415ac0c7e677f9f5f7c1,https://doi.org/10.1142/S0219467814500065
+9b1a70d6771547cbcf6ba646f8775614c0162aca,https://doi.org/10.1016/j.patrec.2016.11.005
+4a03f07397c5d32463750facf010c532f45233a5,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.32
+972e044f69443dfc5c987e29250b2b88a6d2f986,http://doi.org/10.1134/S1054661811020738
+f486624efa750d718a670fba3c7f21b1c84ebaeb,https://doi.org/10.1109/TCYB.2016.2581861
+fefaa892f1f3ff78db4da55391f4a76d6536c49a,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2497689
+27a586a435efdcecb151c275947fe5b5b21cf59b,https://doi.org/10.1007/s12559-017-9530-0
+71c4b8e1bb25ee80f4317411ea8180dae6499524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463396
+1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db,https://doi.org/10.1109/ICIP.2016.7533026
+60777fbca8bff210398ec8b1179bc4ecb72dfec0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751535
+dec5b11b01f35f72adb41d2be26b9b95870c5c00,http://ieeexplore.ieee.org/document/7071948/
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,http://doi.org/10.1007/s11263-016-0967-5
+120b9c271c3a4ea0ad12bbc71054664d4d460bc3,https://doi.org/10.1109/DICTA.2015.7371259
+1723227710869a111079be7d61ae3df48604e653,https://doi.org/10.1109/INISTA.2014.6873606
+b0f59b71f86f18495b9f4de7c5dbbebed4ae1607,https://doi.org/10.1016/j.neucom.2015.04.085
+82953e7b3d28ccd1534eedbb6de7984c59d38cd4,https://doi.org/10.1109/TNNLS.2014.2356856
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c,http://doi.org/10.1007/s11042-017-4818-3
+c07ab025d9e3c885ad5386e6f000543efe091c4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601
+c459014131cbcd85f5bd5c0a89115b5cc1512be9,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.23
+4b9b30066a05bdeb0e05025402668499ebf99a6b,https://doi.org/10.1109/ISPACS.2012.6473448
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,http://doi.org/10.1007/s11227-018-2408-4
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4284739
+f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,https://doi.org/10.1109/TMM.2015.2476657
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,http://doi.org/10.1007/978-3-030-00470-5
+dc3dc18b6831c867a8d65da130a9ff147a736745,http://dl.acm.org/citation.cfm?id=2750679
+bb2f61a057bbf176e402d171d79df2635ccda9f6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296311
+ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,https://doi.org/10.1109/ICDSP.2016.7868598
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900056
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411434
+a2b76ab614d92f5e71312b530f0b6281d0c500f7,https://doi.org/10.1007/s10898-014-0231-x
+70444627cb765a67a2efba17b0f4b81ce1fc20ff,https://doi.org/10.1109/TNNLS.2016.2609434
+72d110df78a7931f5f2beaa29f1eb528cf0995d3,https://doi.org/10.1007/s11517-015-1346-z
+bf30477f4bd70a585588528355b7418d2f37953e,https://doi.org/10.1109/ICPR.2016.7900280
+d6e08345ba293565086cb282ba08b225326022fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7490397
+9729930ab0f9cbcd07f1105bc69c540330cda50a,https://doi.org/10.1109/ACCESS.2017.2749331
+7783095a565094ae5b3dccf082d504ddd7255a5c,http://dl.acm.org/citation.cfm?id=2502258
+0629bc2b12245195af989e21573369329b7ef2b7,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2553038
+b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0,https://doi.org/10.1109/SERA.2017.7965717
+daa4cfde41d37b2ab497458e331556d13dd14d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477
+587b8c147c6253878128ddacf6e5faf8272842a4,http://dl.acm.org/citation.cfm?id=2638549
+569988e19ab36582d4bd0ec98e344cbacf177f45,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2347960
+f7be8956639e66e534ed6195d929aed4e0b90cad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4117059
+f28d549feffd414f38147d5e0460883fb487e2d3,https://doi.org/10.1007/s10462-011-9273-3
+a2af07176a38fe844b0e2fdf4abae65472628b38,https://doi.org/10.1109/ICIP.2014.7026060
+57ca530e9acb63487e8591cb6efb89473aa1e5b4,https://doi.org/10.1109/TIP.2014.2356292
+9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23,https://doi.org/10.1109/FCV.2015.7103729
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014
+556875fb04ed6043620d7ca04dfe3d8b3a9284f5,https://doi.org/10.1109/ICPR.2014.437
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100204
+6554ca3187b3cbe5d1221592eb546dfc11aac14b,http://doi.acm.org/10.1145/2501643.2501647
+68d566ed4041a7519acb87753036610bd64dcc09,https://doi.org/10.1007/s11390-013-1347-z
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8,http://arxiv.org/abs/1504.07339
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0,http://doi.org/10.1007/s11263-016-0920-7
+9745a7f38c9bba9d2fd076813fc9ab7a128a3e19,http://doi.acm.org/10.1145/2393347.2396335
+aae31f092fadd09a843e1ca62af52dc15fc33c56,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273609
+2b5005c2abf2d9a8c16afa50306b6959dfc72275,https://doi.org/10.1109/ICARCV.2010.5707216
+b8a16fcb65a8cee8dd32310a03fe36b5dff9266a,https://doi.org/10.1109/SIU.2014.6830473
+5f1cd82343f4bd6972f674d50aecb453d06f04ad,http://doi.acm.org/10.1145/3125739.3125756
+10e2f2ad1dedec6066e063cb2098b089b35905a8,http://doi.acm.org/10.1145/3052930
+c83d142a47babe84e8c4addafa9e2bb9e9b757a5,https://doi.org/10.1109/MLSP.2012.6349762
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e,http://doi.org/10.1007/s11042-018-5608-2
+83b54b8c97dc14e302dad191327407ec0d5fb4a6,https://doi.org/10.1109/ICIP.2017.8296913
+34dd83115195676e7a8b008eb0e9abe84b330b32,https://doi.org/10.1007/s00371-014-0931-8
+81b8a6cabcd6451b21d5b44e69b0a355d9229cc4,https://doi.org/10.1109/ICDSP.2017.8096137
+a26fd9df58bb76d6c7a3254820143b3da5bd584b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446759
+cf7a4442a6aad0e08d4aade8ec379c44f84bca8a,http://doi.acm.org/10.1145/1873951.1874054
+b3050dc48600acf2f75edf1f580a1f9e9cb3c14a,https://doi.org/10.1007/s00138-013-0584-z
+214072c84378802a0a0fde0b93ffb17bc04f3759,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301397
+65fc8393610fceec665726fe4e48f00dc90f55fb,https://doi.org/10.1109/CYBConf.2013.6617455
+098363b29eef1471c494382338687f2fe98f6e15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411212
+a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4,https://doi.org/10.1049/iet-ipr.2016.1074
+4c648fe9b7bfd25236164333beb51ed364a73253,http://doi.acm.org/10.1145/3038924
+18145b0b13aa477eeabef9ceec4299b60e87c563,https://doi.org/10.1007/s11042-011-0834-x
+fd9ab411dc6258763c95b7741e3d51adf5504040,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595808
+3060ac37dec4633ef69e7bc63488548ab3511f61,https://doi.org/10.1007/s00521-018-3358-8
+628f9c1454b85ff528a60cd8e43ec7874cf17931,http://doi.acm.org/10.1145/2993148.2993193
+7ee7b0602ef517b445316ca8aa525e28ea79307e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418530
+7ebfa8f1c92ac213ff35fa27287dee94ae5735a1,https://doi.org/10.1109/TMM.2016.2614429
+84c5b45328dee855c4855a104ac9c0558cc8a328,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213
+940e5c45511b63f609568dce2ad61437c5e39683,https://doi.org/10.1109/TIP.2015.2390976
+0f2461a265be997c962fa562ae48378fb964b7b4,https://doi.org/10.1109/BigData.2016.7841028
+a92147bed9c17c311c6081beb0ef4c3165b6268e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6805594
+d916602f694ebb9cf95d85e08dd53f653b6196c3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237607
+d046030f7138e5a2dbe2b3eec1b948ad8c787538,https://doi.org/10.1109/ICIP.2009.5413447
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880
+566563a02dbaebec07429046122426acd7039166,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461618
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6636646
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7945277
+0e02dadab802128f6155e099135d03ca6b72f42c,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2365793
+0e8a28511d8484ad220d3e8dde39220c74fab14b,https://doi.org/10.1109/TNNLS.2015.2477826
+4686df20f0ee40cd411e4b43860ef56de5531d9e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301536
+3ba74755c530347f14ec8261996dd9eae896e383,https://doi.org/10.1109/JSSC.2017.2767705
+3d89f9b4da3d6fb1fdb33dea7592b5992069a096,https://doi.org/10.1109/CISP-BMEI.2017.8302003
+2a2df7e790737a026434187f9605c4763ff71292,http://doi.org/10.1007/s11042-017-4665-2
+0c1314d98bb6b99af00817644c1803dbc0fb5ff5,http://doi.ieeecomputersociety.org/10.1109/BigMM.2015.29
+47d07217c501644d63adfec740346f244abaaae8,https://doi.org/10.1016/j.patcog.2016.05.017
+eed05da2c0ab7d2b0a3c665a5368efa81b185099,https://doi.org/10.1016/j.neucom.2014.05.020
+85ae6fa48e07857e17ac4bd48fb804785483e268,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7755833
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995492
+2ed7d95588200c8c738c7dd61b8338538e04ea30,https://doi.org/10.1109/ICIP.2010.5654063
+a5b6a3234e15343d2e5417cff46c0a5f0943521e,https://doi.org/10.1109/TNNLS.2014.2321420
+3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b,https://doi.org/10.1109/TIFS.2015.2512561
+5c4f9260762a450892856b189df240f25b5ed333,https://doi.org/10.1109/TIP.2017.2651396
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96,http://doi.org/10.1007/s11042-017-5245-1
+48a402593ca4896ac34fbebf1e725ab1226ecdb7,http://doi.org/10.1016/j.patcog.2015.01.022
+359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9,https://doi.org/10.1007/s10044-014-0377-7
+2be9284d531b8c573a4c39503ca50606446041a3,https://doi.org/10.1109/ICIP.2005.1530004
+0387b32d0ebd034dc778972367e7d4194223785d,http://doi.acm.org/10.1145/2522848.2531740
+397022a4460750c762dbb0aaebcacc829dee8002,https://doi.org/10.1109/TIFS.2013.2258152
+7914c3f510e84a3d83d66717aad0d852d6a4d148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532448
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,https://doi.org/10.1109/VCIP.2017.8305137
+39c8ed5213882d4dbc74332245ffe201882c5de1,https://doi.org/10.1109/ICASSP.2013.6638045
+cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b,http://doi.ieeecomputersociety.org/10.1109/IJCNN.2009.5179002
+771a6a80dd08212d83a4e976522e1ce108881401,https://doi.org/10.1109/IPTA.2016.7820979
+c65d2ee433ae095652abe3860eeafe6082c636c6,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553714
+414fdfe5f2e4f32a59bf15062b6e524cbf970637,https://doi.org/10.1109/TIFS.2014.2361028
+eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2,https://doi.org/10.1109/FSKD.2016.7603398
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774
+1aeef2ab062c27e0dbba481047e818d4c471ca57,https://doi.org/10.1109/ICACCI.2015.7275860
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,http://doi.org/10.1007/s11042-017-4343-4
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,https://doi.org/10.1007/978-3-642-14922-1_68
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5,http://doi.org/10.1007/s11704-018-8015-y
+7d18e9165312cf669b799aa1b883c6bbe95bf40e,http://doi.org/10.1007/s11042-016-3492-1
+edfce091688bc88389dd4877950bd58e00ff1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700
+708f4787bec9d7563f4bb8b33834de445147133b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449
+259ddd3c618feec51576baac7eaaf80ea924b791,https://doi.org/10.1007/s11257-007-9039-4
+0bab5213911c19c40e936b08d2f8fba01e286b85,https://doi.org/10.1109/BigMM.2017.81
+df6e68db278bedf5486a80697dec6623958edba8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952696
+b209608a534957ec61e7a8f4b9d08286ae3d1d7f,https://doi.org/10.1111/j.1468-0394.2011.00589.x
+0d7652652c742149d925c4fb5c851f7c17382ab8,https://doi.org/10.1016/j.neucom.2015.05.057
+2a84f7934365f05b6707ea0ac225210f78e547af,https://doi.org/10.1109/ICPR.2016.7899690
+ca458f189c1167e42d3a5aaf81efc92a4c008976,https://doi.org/10.1109/TIP.2012.2202678
+1ea4347def5868c622d7ce57cbe171fa68207e2b,https://doi.org/10.1007/978-3-642-41181-6_23
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885
+e73b1137099368dd7909d203b80c3d5164885e44,http://doi.ieeecomputersociety.org/10.1109/FSKD.2008.116
+97f3d35d3567cd3d973c4c435cdd6832461b7c3c,http://doi.ieeecomputersociety.org/10.1109/FG.2017.75
+e957d0673af7454dbf0a14813201b0e2570577e9,https://doi.org/10.1109/ICPR.2016.7899699
+a100595c66f84c3ddd3da8d362a53f7a82f6e3eb,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.46
+8ebe2df4d82af79f0f082ced70f3a73d7fb93b66,https://doi.org/10.1109/URAI.2015.7358851
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099873
+c9527df51e63b56c61cbf16f83d1a3c5c2c82499,http://doi.acm.org/10.1145/2072298.2072311
+a2e0966f303f38b58b898d388d1c83e40b605262,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125
+352a620f0b96a7e76b9195a7038d5eec257fd994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823
+179545c1fc645cb2ad9b31a30f48352d541876ff,https://doi.org/10.1109/IJCNN.2007.4371116
+e75a589ca27dc4f05c2715b9d54206dee37af266,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409973
+b5f9180666924a3215ab0b1faf712e70b353444d,http://doi.org/10.1007/s11042-017-4661-6
+099053f2cbfa06c0141371b9f34e26970e316426,http://doi.org/10.1007/s11042-016-4079-6
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675
+ff402bd06c9c4e94aa47ad80ccc4455efa869af3,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334120
+434f1442533754b3098afd4e24abf1e3792b24db,https://doi.org/10.1109/CBMI.2015.7153627
+d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584,https://doi.org/10.1109/CVPRW.2010.5543816
+bd25c4ad7471580ed9787eae041b80a3c4fe97bb,https://doi.org/10.1016/j.sigpro.2010.01.019
+72345fed8d068229e50f9ea694c4babfd23244a0,http://doi.acm.org/10.1145/2632856.2632937
+d289ce63055c10937e5715e940a4bb9d0af7a8c5,http://dl.acm.org/citation.cfm?id=3081360
+239e305c24155add73f2a0ba5ccbd66b37f77e14,http://dl.acm.org/citation.cfm?id=1219097
+55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,https://doi.org/10.1109/ICB.2016.7550064
+3690af0af51a067750f664c08e48b486d1cd476d,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2012.41
+681399aa0ea4cbffd9ab22bf17661d6df4047349,http://doi.ieeecomputersociety.org/10.1109/CISIS.2012.207
+8cd0855ca967ce47b0225b58bbadd38d8b1b41a1,https://doi.org/10.1109/TIP.2017.2721106
+5e9ec3b8daa95d45138e30c07321e386590f8ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6967830
+d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4,https://doi.org/10.1109/TSMCB.2009.2032155
+ae73f771d0e429a74b04a6784b1b46dfe98f53e4,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.326
+d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f,https://doi.org/10.1109/ACPR.2013.98
+b7043048b4ba748c9c6317b6d8206192c34f57ff,https://doi.org/10.1109/ICIP.2016.7533061
+535cdce8264ac0813d5bb8b19ceafa77a1674adf,http://doi.org/10.1007/s12559-016-9402-z
+a532cfc69259254192aee3fc5be614d9197e7824,http://doi.org/10.1016/j.patcog.2016.12.028
+3f88ea8cf2eade325b0f32832561483185db5c10,https://doi.org/10.1109/TIP.2017.2721838
+021e008282714eaefc0796303f521c9e4f199d7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354319
+437642cfc8c34e445ea653929e2d183aaaeeb704,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815
+69b2a7533e38c2c8c9a0891a728abb423ad2c7e7,https://doi.org/10.1016/j.imavis.2013.03.003
+0c0db39cac8cb76b52cfdbe10bde1c53d68d202f,http://doi.acm.org/10.1145/3123266.3123334
+751fb994b2c553dc843774a5620bfcab8bc657fd,https://doi.org/10.1007/978-3-319-67180-2_47
+3980dadd27933d99b2f576c3b36fe0d22ffc4746,https://doi.org/10.1109/ROBIO.2017.8324597
+459eb3cfd9b52a0d416571e4bc4e75f979f4b901,https://doi.org/10.1109/ROBIO.2015.7418998
+e51f1ee5535017e10a5f77100ff892509ec6b221,https://doi.org/10.1109/ICSMC.2007.4413825
+1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5,https://doi.org/10.1016/j.imavis.2015.01.007
+26949c1ba7f55f0c389000aa234238bf01a32d3b,https://doi.org/10.1109/ICIP.2017.8296814
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,https://doi.org/10.1109/ICIP.2017.8296549
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371
+68eb6e0e3660009e8a046bff15cef6fe87d46477,https://doi.org/10.1109/ICIP.2017.8296999
+7f904093e6933cab876e87532111db94c71a304f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117544
+82e1692467969940a6d6ac40eae606b8b4981f7e,https://doi.org/10.1109/ICMEW.2012.56
+a20036b7fbf6c0db454c8711e72d78f145560dc8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761890
+e9cebf627c204c6949dcc077d04c57eb66b2c038,https://doi.org/10.1109/SIU.2013.6531371
+1195f0bf8f745ba69da915203bcd79589b94aec5,https://doi.org/10.1016/j.procs.2010.11.004
+06b4e41185734f70ce432fdb2b121a7eb01140af,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362753
+6b8329730b2e13178a577b878631735a1cd58a71,http://doi.ieeecomputersociety.org/10.1109/FiCloud.2015.78
+812d3f6975f4cb87e9905ef18696c5c779227634,https://doi.org/10.1186/s13640-016-0151-4
+4f8b4784d0fca31840307650f7052b0dde736a76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7017496
+bd26faef48080b5af294b19139c804ffec70825e,https://doi.org/10.1007/s11390-015-1526-1
+9635493998ad60764d7bbf883351af57a668d159,https://doi.org/10.1109/IJCNN.2017.7966005
+5ee0103048e1ce46e34a04c45ff2c2c31529b466,https://doi.org/10.1109/ICIP.2015.7350886
+66f4d7c381bd1798703977de2e38b696c6641b77,https://doi.org/10.1109/FSKD.2015.7382360
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,https://doi.org/10.1007/s12193-017-0256-9
+f8162276f3b21a3873dde7a507fd68b4ab858bcc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761923
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,http://doi.acm.org/10.1145/3090311
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8444452
+d0a8889f694422614bf3ecccd69aa1d4f7822606,https://doi.org/10.1007/978-0-85729-997-0_22
+b8e5800dfc590f82a0f7eedefce9abebf8088d12,https://doi.org/10.1109/DCC.2017.87
+7123e510dea783035b02f6c35e35a1a09677c5ab,https://doi.org/10.1109/ICPR.2016.7900297
+bccb35704cdd3f2765b1a3f0296d1bff3be019c1,https://doi.org/10.1109/ICMLA.2016.0145
+2564920d6976be68bb22e299b0b8098090bbf259,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8407761
+d7bd37920a3a4a4d681151131e23a839695c8d5b,https://doi.org/10.1109/ICRA.2011.5979870
+5e806d8fa48216041fe719309534e3fa903f7b5b,https://doi.org/10.1109/BTAS.2010.5634501
+ad7b6d2e8d66f720cc83323a0700c25006d49609,https://doi.org/10.1109/TIP.2009.2028255
+62f017907e19766c76887209d01d4307be0cc573,http://doi.org/10.1016/j.imavis.2012.02.001
+b5747ecfa0f3be0adaad919d78763b1133c4d662,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397022
+239958d6778643101ab631ec354ea1bc4d33e7e0,http://doi.org/10.1016/j.patcog.2017.06.009
+1ed49161e58559be399ce7092569c19ddd39ca0b,https://doi.org/10.1109/ICPR.2016.7899973
+e3b9863e583171ac9ae7b485f88e503852c747b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7494596
+a9506c60ec48056087ee3e10d28ff7774fbbd553,https://doi.org/10.1109/TCSVT.2014.2376136
+21959bc56a160ebd450606867dce1462a913afab,http://doi.org/10.1007/s11042-018-6071-9
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301520
+8c048be9dd2b601808b893b5d3d51f00907bdee0,https://doi.org/10.1631/FITEE.1600041
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354290
+fa72e39971855dff6beb8174b5fa654e0ab7d324,https://doi.org/10.1007/s11042-013-1793-1
+6584c3c877400e1689a11ef70133daa86a238602,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231
+c1581b5175994e33549b8e6d07b4ea0baf7fe517,https://doi.org/10.1109/IJCNN.2011.6033478
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,https://doi.org/10.1007/978-3-319-71928-3_18
+e7697c7b626ba3a426106d83f4c3a052fcde02a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553713
+fff31548617f208cd5ae5c32917afd48abc4ff6a,http://doi.acm.org/10.1145/3139295.3139309
+cd3b713722ccb1e2ae3b050837ca296b2a2dd82a,https://doi.org/10.1016/j.jvcir.2016.07.015
+b2ddea9c71cd73fa63e09e8121bc7a098fae70b4,https://doi.org/10.1109/ISCCSP.2012.6217849
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699580
+f41e80f941a45b5880f4c88e5bf721872db3400f,http://doi.ieeecomputersociety.org/10.1109/IC3.2017.8284359
+9d46485ca2c562d5e295251530a99dd5df99b589,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813386
+1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac,https://doi.org/10.1109/ICIP.2014.7025273
+98c5dc00bd21a39df1d4411641329bdd6928de8a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995447
+3bdaf59665e6effe323a1b61308bcac2da4c1b73,https://doi.org/10.1109/ROMAN.2012.6343736
+65869cc5ef00d581c637ae8ea6ca02ae4bb2b996,http://doi.ieeecomputersociety.org/10.1109/ICDM.2007.65
+ef7b8f73e95faa7a747e0b04363fced0a38d33b0,https://doi.org/10.1109/ICIP.2017.8297028
+d91a5589fd870bf62b7e4979d9d47e8acf6c655d,http://doi.acm.org/10.1145/2382336.2382343
+493bc7071e35e7428336a515d1d26020a5fb9015,https://doi.org/10.1109/ACSSC.2013.6810420
+0d3ff34d8490a9a53de1aac1dea70172cb02e013,https://doi.org/10.1109/ICPR.2014.542
+286a5c19a43382a21c8d96d847b52bba6b715a71,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6876188
+adb040081974369c46b943e9f75be4e405623102,http://doi.ieeecomputersociety.org/10.1109/PACCS.2009.191
+e295c1aa47422eb35123053038e62e9aa50a2e3a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389
+fba386ac63fe87ee5a0cf64bf4fb90324b657d61,https://doi.org/10.1109/ICIP.2015.7351752
+aa581b481d400982a7e2a88830a33ec42ad0414f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7313922
+997c7ebf467c579b55859315c5a7f15c1df43432,http://doi.ieeecomputersociety.org/10.1109/FG.2017.141
+cbbd9880fb28bef4e33da418a3795477d3a1616e,http://doi.org/10.1016/j.patcog.2016.02.002
+00d4c2db10f3a32d505d7b8adc7179e421443dec,https://doi.org/10.1109/GlobalSIP.2014.7032080
+104ee18b513b52386f871e959c1f9e5072604e93,https://doi.org/10.1109/GlobalSIP.2017.8309189
+258b3b1df82186dd76064ef86b28555e91389b73,https://doi.org/10.1109/ACCESS.2017.2739822
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,http://doi.acm.org/10.1145/2797143.2797165
+a71bd4b94f67a71bc5c3563884bb9d12134ee46a,https://doi.org/10.1016/j.asoc.2015.05.006
+234c106036964131c0f2daf76c47ced802652046,http://doi.org/10.1016/j.cviu.2015.07.007
+b85d0aef3ee2883daca2835a469f5756917e76b7,https://doi.org/10.1007/s41095-015-0015-3
+9aab33ce8d6786b3b77900a9b25f5f4577cea461,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739
+d77f18917a58e7d4598d31af4e7be2762d858370,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,https://doi.org/10.1007/s11063-017-9648-9
+84a74ef8680b66e6dccbc69ae80321a52780a68e,http://doi.org/10.1007/978-0-85729-932-1_19
+913062218c7498b2617bb9d7821fe1201659c5cc,https://doi.org/10.1109/ICMLA.2012.178
+157647b0968d95f9288b27d6d9179a8e1ef5c970,https://doi.org/10.1049/iet-bmt.2014.0086
+b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,http://doi.acm.org/10.1145/2911996.2911999
+f1af714b92372c8e606485a3982eab2f16772ad8,http://ieeexplore.ieee.org/document/5617662/
+49068538b7eef66b4254cc11914128097302fab8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339040
+27e0684fa5b57715162ac6c58a6ea283c7db1719,https://doi.org/10.1109/ICARCV.2004.1468857
+252f202bfb14d363a969fce19df2972b83fa7ec0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.120
+ff42ec628b0980909bbb84225d0c4f8d9ac51e03,https://doi.org/10.1109/TCSVT.2008.2005799
+e14cc2715b806288fe457d88c1ad07ef55c65318,http://dl.acm.org/citation.cfm?id=2830583
+add6d96fc018986f51a1aac47eae9ee3fc62fb66,http://doi.acm.org/10.1145/3009977.3010074
+7923742e2af655dee4f9a99e39916d164bc30178,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272743
+4d90d7834ae25ee6176c096d5d6608555766c0b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115
+1ed617d14dbc53b20287d3405b14c68d8dad3965,https://doi.org/10.1109/TCYB.2016.2582918
+244293024aebbb0ff42a7cf2ba49b1164697a127,https://doi.org/10.1109/BTAS.2016.7791187
+1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.537
+af3b803188344971aa89fee861a6a598f30c6f10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404811
+9da63f089b8ee23120bfa8b4d9d9c8f605f421fc,http://doi.acm.org/10.1145/2072298.2072043
+610779e90b644cc18696d7ac7820d3e0598e24d0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7067419
+fed8cc533037d7d925df572a440fd89f34d9c1fd,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194
+6f68c49106b66a5bd71ba118273b4c5c64b6619f,http://doi.ieeecomputersociety.org/10.1109/TKDE.2007.190720
+0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.142
+16c1b592d85d13f1ba4eff0afb4441bb78650785,https://doi.org/10.1109/TIP.2017.2685343
+2546dc7e2c2390233de16502413fe1097ecf3fb5,https://doi.org/10.1016/j.patrec.2011.01.009
+c06b13d0ec3f5c43e2782cd22542588e233733c3,https://doi.org/10.1016/j.cviu.2016.02.001
+6c01b349edb2d33530e8bb07ba338f009663a9dd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5332299
+8fc36452a49cb0fd43d986da56f84b375a05b4c1,http://doi.acm.org/10.1145/2542355.2542388
+a6902db7972a7631d186bbf59c5ef116c205b1e8,http://dl.acm.org/citation.cfm?id=1276381
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595,http://doi.org/10.1016/j.patcog.2013.03.010
+f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7,https://doi.org/10.1109/ICIP.2012.6467434
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909426
+abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18,https://doi.org/10.1007/s11042-015-2536-2
+c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e,https://doi.org/10.1109/IROS.2012.6386178
+193bc8b663d041bc34134a8407adc3e546daa9cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373908
+cb160c5c2a0b34aba7b0f39f5dda6aca8135f880,https://doi.org/10.1109/SIU.2016.7496023
+91167aceafbc9c1560381b33c8adbc32a417231b,https://doi.org/10.1109/TCSVT.2009.2020337
+9776a9f3c59907f45baaeda4b8907dcdac98aef1,https://doi.org/10.1109/CISP-BMEI.2017.8301924
+d9b4b49378fcd77dcd5e755975b99ed4c7962f17,https://doi.org/10.1109/TIP.2015.2473105
+e74a2159f0f7afb35c7318a6e035bc31b8e69634,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019503
+9ce4541d21ee3511bf3dc55bc3cd01222194d95a,https://doi.org/10.1016/j.cviu.2017.05.008
+2400c4994655c4dd59f919c4d6e9640f57f2009f,https://doi.org/10.1109/IPTA.2015.7367096
+4ea63435d7b58d41a5cbcdd34812201f302ca061,https://doi.org/10.1109/ICIP.2014.7025066
+81c21f4aafab39b7f5965829ec9e0f828d6a6182,https://doi.org/10.1109/BTAS.2015.7358744
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,https://doi.org/10.1109/TNNLS.2015.2504724
+58684a925693a0e3e4bb1dd2ebe604885be034d2,https://doi.org/10.1109/ICASSP.2008.4517869
+51d6a8a61ea9588a795b20353c97efccec73f5db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460308
+8e8a6623b4abd2452779c43f3c2085488dfcb323,http://doi.acm.org/10.1145/2993148.2997630
+75a74a74d6abbbb302a99de3225c8870fa149aee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914657
+37866fea39deeff453802cde529dd9d32e0205a5,http://dl.acm.org/citation.cfm?id=2393385
+93d903d2e48d6a8ad3e3d2aff2e57622efe649cd,https://doi.org/10.1109/ICIP.2016.7532432
+e198a7b9e61dd19c620e454aaa81ae8f7377ade0,https://doi.org/10.1109/CVPRW.2010.5543611
+163ba5a998973f9ead6be0ca873aed5934d5022e,https://doi.org/10.1109/ACPR.2013.53
+ea03a569272d329090fe60d6bff8d119e18057d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906
+df550cb749858648209707bec5410431ea95e027,https://doi.org/10.1109/TCYB.2015.2433926
+cd85f71907f1c27349947690b48bfb84e44a3db0,https://doi.org/10.1007/978-981-10-4840-1
+8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58,https://doi.org/10.1109/ICIP.2014.7025276
+dc2f16f967eac710cb9b7553093e9c977e5b761d,https://doi.org/10.1109/ICPR.2016.7900141
+9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,https://doi.org/10.1007/s10044-006-0033-y
+90221884fe2643b80203991686af78a9da0f9791,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995467
+46976097c54e86032932d559c8eb82ffea4bb6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738868
+0561bed18b6278434deae562d646e8adad72e75d,https://doi.org/10.1016/j.neucom.2014.09.052
+ac03849956ac470c41585d2ee34d8bb58bb3c764,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853690
+fffe5ab3351deab81f7562d06764551422dbd9c4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114
+3ff418ac82df0b5c2f09f3571557e8a4b500a62c,https://doi.org/10.1007/s11554-007-0039-8
+80677676b127b67938c8db06a15d87f5dd4bd7f1,https://doi.org/10.1007/s11760-014-0623-x
+d383ba7bbf8b7b49dcef9f8abab47521966546bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995471
+fcb276874cd932c8f6204f767157420500c64bd0,https://doi.org/10.1007/978-3-319-04960-1_3
+052cec9fdbfe12ccd02688f3b7f538c0d73555b3,https://doi.org/10.1109/ICIP.2016.7533172
+2ce84465b9759166effc7302c2f5339766cc523d,https://doi.org/10.1109/VCIP.2015.7457830
+e68869499471bcd6fa8b4dc02aa00633673c0917,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595885
+db3984b143c59584a32d762d712d21c0e8cf38b8,https://doi.org/10.1109/SMC.2015.324
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9,https://www.ncbi.nlm.nih.gov/pubmed/29334821
+2e535b8cd02c2f767670ba47a43ad449fa1faad7,https://doi.org/10.1109/MSP.2017.2740460
+faa46ef96493b04694555738100d9f983915cf9b,https://doi.org/10.1007/s10489-015-0735-1
+b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41,https://doi.org/10.1109/IJCNN.2016.7727203
+a38dd439209b0913b14b1c3c71143457d8cf9b78,https://doi.org/10.1109/IJCNN.2015.7280803
+256b46b12ab47283e6ada05fad6a2b501de35323,https://doi.org/10.1109/ICPR.2016.7900275
+f7911b9ff58d07d19c68f4a30f40621f63c0f385,http://dl.acm.org/citation.cfm?id=3007693
+b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a,https://doi.org/10.1080/10798587.2014.934592
+e41246837c25d629ca0fad74643fb9eb8bf38009,https://doi.org/10.1109/ICSIPA.2011.6144064
+8a8127a06f432982bfb0150df3212f379b36840b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373884
+5b4bbba68053d67d12bd3789286e8a9be88f7b9d,https://doi.org/10.1109/ICSMC.2008.4811353
+0da3c329ae14a4032b3ba38d4ea808cf6d115c4a,https://doi.org/10.1007/s00138-015-0709-7
+ded8252fc6df715753e75ba7b7fee518361266ef,https://doi.org/10.1109/SIU.2012.6204837
+99cd84a62edb2bda2fc2fdc362a72413941f6aa4,http://doi.ieeecomputersociety.org/10.1109/FG.2017.109
+60462b981fda63c5f9d780528a37c46884fe0b54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397015
+2a7058a720fa9da4b9b607ea00bfdb63652dff95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590031
+69adf2f122ff18848ff85e8de3ee3b2bc495838e,http://arxiv.org/abs/1711.10678
+ad08426ca57da2be0e9f8c1f673e491582edb896,http://doi.ieeecomputersociety.org/10.1109/TKDE.2013.98
+e4c3587392d477b7594086c6f28a00a826abf004,https://doi.org/10.1109/ICIP.2017.8296998
+9939498315777b40bed9150d8940fc1ac340e8ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583
+872ff48a3acfbf96376fd048348372f5137615e4,https://doi.org/10.1007/s41095-016-0051-7
+681d222f91b12b00e9a4217b80beaa11d032f540,https://doi.org/10.1007/s10044-015-0493-z
+be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f,https://doi.org/10.1109/UIC-ATC.2013.32
+80d4cf7747abfae96328183dd1f84133023c2668,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369786
+b1bb517bd87a1212174033fc786b2237844b04e6,https://doi.org/10.1016/j.neucom.2015.03.078
+39d6339a39151b5f88ec2d7acc38fe0618d71b5f,https://doi.org/10.1109/MMSP.2013.6659285
+4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,https://doi.org/10.1109/TIP.2014.2387379
+a9426cb98c8aedf79ea19839643a7cf1e435aeaa,https://doi.org/10.1109/GlobalSIP.2016.7905998
+68e6cfb0d7423d3fae579919046639c8e2d04ad7,https://doi.org/10.1109/ICB.2016.7550058
+e1179a5746b4bf12e1c8a033192326bf7f670a4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104
+ebc2a3e8a510c625353637e8e8f07bd34410228f,https://doi.org/10.1109/TIP.2015.2502485
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,http://doi.org/10.1007/s41095-016-0068-y
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762938
+550351edcfd59d3666984771f5248d95548f465a,https://doi.org/10.1109/TIP.2014.2327805
+e8c051d9e7eb8891b23cde6cbfad203011318a4f,http://doi.acm.org/10.1145/3013971.3014015
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1,http://doi.org/10.1007/s11263-016-0986-2
+44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb,https://doi.org/10.1016/j.neucom.2017.07.052
+8027a9093f9007200e8e69e05616778a910f4a5f,https://doi.org/10.1109/ICB.2013.6612997
+7477cf04c6b086108f459f693a60272523c134db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618937
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,http://dl.acm.org/citation.cfm?id=3123271
+17de5a9ce09f4834629cd76b8526071a956c9c6d,https://doi.org/10.1007/978-3-319-68063-7_8
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462222
+cae41c3d5508f57421faf672ee1bea0da4be66e0,https://doi.org/10.1109/ICPR.2016.7900298
+ffb1cb0f9fd65247f02c92cfcb152590a5d68741,https://doi.org/10.1109/CISS.2012.6310782
+ffea2b26e422c1009afa7e200a43b31a1fae86a9,https://doi.org/10.1007/s00500-009-0441-1
+14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1,https://doi.org/10.1109/TIP.2006.884932
+d37ca68742b2999667faf464f78d2fbf81e0cb07,https://doi.org/10.1007/978-3-319-25417-3_76
+60284c37249532fe7ff6b14834a2ae4d2a7fda02,https://doi.org/10.1109/SIU.2016.7495971
+335435a94f8fa9c128b9f278d929c9d0e45e2510,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849440
+17d03da4db3bb89537d644b682b2a091d563af4a,https://doi.org/10.1109/TNN.2010.2050600
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,http://doi.org/10.1007/s11042-017-5571-3
+11d73f4f19077e6806d05dc7ecd17fbeb15bdf39,http://doi.ieeecomputersociety.org/10.1109/FG.2017.28
+184dba921b932143d196c833310dee6884fa4a0a,https://doi.org/10.1109/SIU.2017.7960393
+7f5346a169c9784ca79aca5d95ae8bf2ebab58e3,https://doi.org/10.1109/ICIP.2015.7351304
+e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344671
+0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.40
+d9e66b877b277d73f8876f537206395e71f58269,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7225130
+63c74794aedb40dd6b1650352a2da7a968180302,https://doi.org/10.1016/j.neucom.2016.09.015
+3157be811685c93d0cef7fa4c489efea581f9b8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411222
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195
+35ccc836df60cd99c731412fe44156c7fd057b99,https://doi.org/10.1109/ICCIS.2017.8274819
+578117ff493d691166fefc52fd61bad70d8752a9,https://doi.org/10.1109/CCST.2016.7815707
+e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,https://doi.org/10.1109/BTAS.2017.8272722
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120
+f39783847499dd56ba39c1f3b567f64dfdfa8527,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791189
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,http://doi.org/10.1007/s11042-018-6047-9
+d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3,https://doi.org/10.23919/MVA.2017.7986886
+9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f,http://doi.ieeecomputersociety.org/10.1109/CGIV.2011.28
+5798055e11e25c404b1b0027bc9331bcc6e00555,http://doi.acm.org/10.1145/2393347.2396357
+4672513d0dbc398719d66bba36183f6e2b78947b,https://doi.org/10.1016/j.ipm.2015.05.007
+2a826273e856939b58be8779d2136bffa0dddb08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373892
+41e5d92b13d36da61287c7ffd77ee71de9eb2942,https://doi.org/10.1016/j.asoc.2016.12.033
+5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,https://doi.org/10.1109/ICT.2017.7998256
+9c6dfd3a38374399d998d5a130ffc2864c37f554,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553738
+fbc9ba70e36768efff130c7d970ce52810b044ff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500
+747dc0add50b86f5ba9e3e7315943d520e08f9eb,http://doi.ieeecomputersociety.org/10.1109/FG.2017.78
+6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7,https://doi.org/10.1109/IJCNN.2016.7727803
+ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,https://doi.org/10.1109/LSP.2016.2602538
+d36a1e4637618304c2093f72702dcdcc4dcd41d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961791
+331d6ace8d59fa211e5bc84a93fdc65695238c69,https://doi.org/10.1007/s10115-017-1115-4
+58d43e32660446669ff54f29658961fe8bb6cc72,https://doi.org/10.1109/ISBI.2017.7950504
+8605e8f5d84b8325b1a81d968c296a5a5d741f31,https://doi.org/10.1016/j.patcog.2017.04.010
+d8b99eada922bd2ce4e20dc09c61a0e3cc640a62,https://doi.org/10.1109/IJCNN.2014.6889675
+3d9e44d8f8bc2663192c7ce668ccbbb084e466e4,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019505
+9f5e22fbc22e1b0a61bcd75202d299232e68de5d,https://doi.org/10.1109/IJCNN.2016.7727391
+22e121a8dea49e3042de305574356477ecacadda,http://doi.org/10.1007/s00138-018-0935-x
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595920
+d98a36081a434451184fa4becb59bf5ec55f3a1e,https://doi.org/10.1016/j.neucom.2016.09.110
+5fe3a9d54d5070308803dd8ef611594f59805400,http://doi.org/10.1016/j.patcog.2016.02.006
+b7b8e7813fbc12849f2daba5cab604abd8cbaab6,https://doi.org/10.1109/ICCE.2014.6775938
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408
+a317083d9aac4062e77aa0854513383c87e47ece,https://doi.org/10.1016/j.patcog.2015.06.003
+f6f2a212505a118933ef84110e487551b6591553,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952474
+d2baa43471d959075fc4c93485643cbd009797fd,http://doi.ieeecomputersociety.org/10.1109/MM.2017.4241350
+d2d9612d3d67582d0cd7c1833599b88d84288fab,https://doi.org/10.1049/iet-cvi.2015.0222
+a2cc3193ed56ef4cedaaf4402c844df28edb5639,https://doi.org/10.1016/j.patrec.2012.01.005
+a735c6330430c0ff0752d117c54281b1396b16bf,https://doi.org/10.1109/SMC.2014.6974118
+3ca6adc90aae5912baa376863807191ffd56b34e,https://doi.org/10.1109/LSP.2014.2316918
+cbaa17be8c22e219a9c656559e028867dfb2c2ed,https://doi.org/10.1109/ICIP.2016.7532636
+1672becb287ae3eaece3e216ba37677ed045db55,https://doi.org/10.1016/j.eswa.2015.10.047
+c847de9faa1f1a06d5647949a23f523f84aba7f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199761
+d10cfcf206b0991e3bc20ac28df1f61c63516f30,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776
+844e3e6992c98e53b45e4eb88368d0d6e27fc1d6,https://doi.org/10.1109/ICIP.2014.7026057
+9f43caad22803332400f498ca4dd0429fe7da0aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6239186
+9fab78015e6e91ba7241a923222acd6c576c6e27,http://doi.ieeecomputersociety.org/10.1109/ICSS.2016.10
+ca8f23d9b9a40016eaf0467a3df46720ac718e1d,https://doi.org/10.1109/ICASSP.2015.7178214
+0a451fc7d2c6b3509d213c210ae880645edf90ed,https://doi.org/10.1109/IJCNN.2014.6889591
+4c72a51a7c7288e6e17dfefe4f87df47929608e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736912
+d60e3eef429ed2a51bbd806125fa31f5bea072a4,https://doi.org/10.1109/HIS.2013.6920481
+4d19401e44848fe65b721971bc71a9250870ed5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462612
+0974677f59e78649a40f0a1d85735410d21b906a,https://doi.org/10.1109/ISCAS.2017.8050798
+90eb66e75381cce7146b3953a2ae479a7beec539,http://doi.ieeecomputersociety.org/10.1109/AIPR.2015.7444542
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7591180
+8694cd9748fb1c128f91a572119978075fede848,http://doi.org/10.1016/j.neucom.2017.08.028
+651cafb2620ab60a0e4f550c080231f20ae6d26e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717
+c61a8940d66eed9850b35dd3768f18b59471ca34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1374768
+d4331a8dd47b03433f8390da2eaa618751861c64,https://doi.org/10.1109/TIP.2012.2192125
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,https://doi.org/10.1007/978-981-10-3005-5_57
+d340a135a55ecf7506010e153d5f23155dcfa7e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7884781
+3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a,https://doi.org/10.1109/LSP.2016.2639341
+c83e26622b275fdf878135e71c23325a31d0e5fc,http://dl.acm.org/citation.cfm?id=3164611
+0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,https://doi.org/10.1109/LSP.2018.2810121
+93108f1548e8766621565bdb780455023349d2b2,https://doi.org/10.1109/ICIP.2010.5653914
+af2d30fdb8c611dc5b883b90311d873e336fc534,https://doi.org/10.1109/ISCAS.2017.8050275
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,http://doi.org/10.1007/s00138-016-0820-4
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70,http://doi.org/10.1142/S0218001415560078
+81513764b73dae486a9d2df28269c7db75e9beb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7839217
+4317856a1458baa427dc00e8ea505d2fc5f118ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296449
+fc8fb68a7e3b79c37108588671c0e1abf374f501,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565615
+fb3ff56ab12bd250caf8254eca30cd97984a949a,https://doi.org/10.3103/S0146411617010072
+243cd27dce38fd756a840b397c28ad21cfb78897,https://doi.org/10.1049/iet-ipr.2013.0003
+31f905d40a4ac3c16c91d5be8427762fa91277f1,https://doi.org/10.1109/TIP.2017.2704661
+a4725a5b43e7c36d9e30028dff66958f892254a0,http://doi.acm.org/10.1145/2663204.2666271
+def934edb7c7355757802a95218c6e4ed6122a72,http://doi.org/10.1007/978-0-387-31439-6
+dbfe62c02b544b48354fac741d90eb4edf815db5,https://doi.org/10.1109/SITIS.2016.43
+22a10d8d2a2cb9055557a3b335d6706100890afb,https://doi.org/10.1109/SIU.2016.7496121
+15ef65fd68d61f3d47326e358c446b0f054f093a,https://doi.org/10.1109/MLSP.2017.8168180
+0141cb33c822e87e93b0c1bad0a09db49b3ad470,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876
+5304cd17f9d6391bf31276e4419100f17d4423b2,https://doi.org/10.1109/ICIP.2012.6466930
+ff0617d750fa49416514c1363824b8f61baf8fb5,https://doi.org/10.1587/elex.7.1125
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373845
+539ffd51f18404e1ef83371488cf5a27cd16d064,https://doi.org/10.1049/iet-ipr.2014.0733
+a4e75766ef93b43608c463c233b8646439ce2415,https://doi.org/10.1109/ICCVW.2011.6130492
+57dc55edade7074f0b32db02939c00f4da8fe3a6,https://doi.org/10.1109/TITS.2014.2313371
+01f0a4e1442a7804e1fe95798eff777d08e42014,https://doi.org/10.1016/j.knosys.2017.09.005
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,https://doi.org/10.1109/TSP.2011.2179539
+695426275dee2ec56bc0c0afe1c5b4227a350840,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7878535
+f201baf618574108bcee50e9a8b65f5174d832ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057
+794a51097385648e3909a1acae7188f5ab881710,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813382
+64e82b42e1c41250bdf9eb952686631287cfd410,https://doi.org/10.1111/cgf.12760
+5594beb2b314f5433bd7581f64bdbc58f2933dc4,https://doi.org/10.1016/j.neucom.2016.12.013
+0cf1287c8fd41dcef4ac03ebeab20482f02dce20,https://doi.org/10.1109/MSN.2016.032
+d20ea5a4fa771bc4121b5654a7483ced98b39148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430554
+518a3ce2a290352afea22027b64bf3950bffc65a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174
+50333790dd98c052dfafe1f9bf7bf8b4fc9530ba,https://doi.org/10.1109/ICIP.2015.7351001
+aba9acb4a607071af10684f2cfbdefa0507a4e9a,https://doi.org/10.1016/j.patcog.2016.06.010
+29f298dd5f806c99951cb434834bc8dcc765df18,https://doi.org/10.1109/ICPR.2016.7899837
+289cfcd081c4393c7d6f63510747b5372202f855,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873
+4f37f71517420c93c6841beb33ca0926354fa11d,http://doi.org/10.1016/j.neucom.2017.08.062
+1966bddc083886a9b547e1817fe6abc352a00ec3,http://doi.acm.org/10.1145/2733373.2806312
+2960500033eb31777ed1af1fcb133dcab1b4a857,http://doi.acm.org/10.1145/3005467.3005471
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703
+27aa23d7a05368a6b5e3d95627f9bab34284e5c4,https://doi.org/10.1109/IJCNN.2012.6252705
+c244c3c797574048d6931b6714ebac64d820dbb3,http://doi.acm.org/10.1145/2808492.2808500
+496d62741e8baf3859c24bb22eaccd3043322126,http://doi.ieeecomputersociety.org/10.1109/TKDE.2017.2728531
+22ccd537857aca1ee4b961f081f07c58d42a7f32,https://doi.org/10.1109/DICTA.2015.7371260
+5a259f2f5337435f841d39dada832ab24e7b3325,http://doi.acm.org/10.1145/2964284.2984059
+d09fd7e0bb5d997963cfef45452724416b2bb052,https://doi.org/10.1109/EMEIT.2011.6023179
+939d28859c8bd2cca2d692901e174cfd599dac74,https://doi.org/10.1109/WOCC.2016.7506582
+51410d6bd9a41eacb105f15dbdaee520e050d646,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412888
+57eeaceb14a01a2560d0b90d38205e512dcca691,https://doi.org/10.1109/TIP.2017.2778563
+6f74c3885b684e52096497b811692bd766071530,https://doi.org/10.1016/j.neucom.2013.06.013
+e6f3707a75d760c8590292b54bc8a48582da2cd4,https://doi.org/10.1007/s11760-012-0410-5
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265336
+4aa27c1f8118dbb39809a0f79a28c0cbc3ede276,http://doi.acm.org/10.1145/2683483.2683530
+993934822a42e70dd35fb366693d847164ca15ff,https://doi.org/10.1109/ICME.2009.5202753
+e2b3aae594035e58f72125e313e92c7c4cc9d5bb,https://doi.org/10.1007/s00138-014-0597-2
+cb9921d5fc4ffa50be537332e111f03d74622442,https://doi.org/10.1007/978-3-319-46654-5_79
+57f4e54a63ef95596dbc743f391c3fff461f278b,https://doi.org/10.1109/ICMEW.2012.86
+950bf95da60fd4e77d5159254fed906d5ed5fbcb,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.24
+8a1e95b82d8cf27e0034e127091396efd4c8bd9e,https://doi.org/10.1109/IGARSS.2016.7729015
+42441f1fee81c8fd42a74504df21b3226a648739,https://doi.org/10.1007/s11554-008-0072-2
+aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,http://doi.acm.org/10.1145/3129416.3129451
+dcea30602c4e0b7525a1bf4088620128d4cbb800,https://doi.org/10.1109/VCIP.2013.6706430
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad,http://doi.org/10.1007/s11390-018-1835-2
+5632ba72b2652df3b648b2ee698233e76a4eee65,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387
+cc5edaa1b0e91bc3577547fc30ea094aa2722bf0,https://doi.org/10.1109/CICARE.2014.7007832
+254964096e523d5e48e03390ce440c9af337d200,http://dl.acm.org/citation.cfm?id=3005378
+c5c56e9c884ac4070880ac481909bb6b621d2a3f,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126466
+30188b836f2fa82209d7afbf0e4d0ee29c6b9a87,https://doi.org/10.1109/TIP.2013.2249077
+8b1f697d81de1245c283b4f8f055b9b76badfa66,https://doi.org/10.1142/S0218126616500171
+b8b9cef0938975c5b640b7ada4e3dea6c06d64e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.119
+84ae55603bffda40c225fe93029d39f04793e01f,https://doi.org/10.1109/ICB.2016.7550066
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308,http://dl.acm.org/citation.cfm?id=2396318
+ce3304119ba6391cb6bb25c4b3dff79164df9ac6,https://doi.org/10.1016/j.imavis.2016.03.004
+ab703224e3d6718bc28f7b9987eb6a5e5cce3b01,https://doi.org/10.1631/FITEE.1500235
+f2cc459ada3abd9d8aa82e92710676973aeff275,http://ieeexplore.ieee.org/document/5967185/
+e180572400b64860e190a8bc04ef839fa491e056,http://doi.org/10.1038/s41598-017-12097-w
+0d90c992dd08bfb06df50ab5c5c77ce83061e830,https://doi.org/10.1109/UIC-ATC.2013.85
+48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf,https://doi.org/10.1016/j.imavis.2015.12.003
+9590b09c34fffda08c8f54faffa379e478f84b04,https://doi.org/10.1109/TNNLS.2013.2275170
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,http://dl.acm.org/citation.cfm?id=2964287
+d066575b48b552a38e63095bb1f7b56cbb1fbea4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359888
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,http://doi.org/10.1007/s11042-016-4261-x
+21f5f65e832c5472d6d08f6ee280d65ff0202e29,https://doi.org/10.1007/978-3-319-70353-4_44
+43261920d2615f135d6e72b333fe55d3f2659145,http://doi.acm.org/10.1145/3136273.3136301
+151b87de997e55db892b122c211f9c749f4293de,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237481
+a9215666b4bcdf8d510de8952cf0d55b635727dc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7498613
+443f4421e44d4f374c265e6f2551bf9830de5597,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771467
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5509290
+31ffc95167a2010ce7aab23db7d5fc7ec439f5fb,https://doi.org/10.1109/TNNLS.2017.2651169
+5305bfdff39ae74d2958ba28d42c16495ce2ff86,https://doi.org/10.1109/DICTA.2014.7008128
+c0b02be66a5a1907e8cfb8117de50f80b90a65a8,http://doi.acm.org/10.1145/2808492.2808523
+16eaa26a84468b27e559215db01c53286808ec2a,https://doi.org/10.1007/s11263-015-0859-0
+562f7555e5cb79ce0fe834c4613264d8378dd007,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7153112
+0b82bf595e76898993ed4f4b2883c42720c0f277,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229
+61831364ddc8db869618f1c7f0ad35ab2ab6bcf7,https://doi.org/10.1109/ICIP.2013.6738496
+0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9,https://doi.org/10.1109/SIU.2014.6830193
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,https://doi.org/10.1007/s11042-013-1803-3
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f,http://dl.acm.org/citation.cfm?id=3164593
+935924ddb5992c11f3202bf995183130ad83d07b,https://doi.org/10.1117/1.JEI.24.2.023015
+22894c7a84984bd4822dcfe7c76a74673a242c36,http://doi.acm.org/10.1145/2993148.2997634
+8e21399bb102e993edd82b003c306a068a2474da,https://doi.org/10.1109/ICIP.2013.6738758
+40dd736c803720890d6bfc1e083f6050e35d8f7a,http://doi.acm.org/10.1145/3139958.3140055
+aaf2436bc63a58d18192b71cc8100768e2f8a6cb,http://doi.ieeecomputersociety.org/10.1109/ICDIP.2009.77
+1ad780e02edf155c09ea84251289a054b671b98a,https://doi.org/10.1109/ICNIDC.2012.6418787
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363515
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4,http://doi.org/10.1007/s11042-017-5141-8
+24eeb748a5e431510381ec7c8253bcb70eff8526,https://doi.org/10.1109/TIP.2017.2746270
+19c82eacd77b35f57ac8815b979716e08e3339ca,http://doi.ieeecomputersociety.org/10.1109/ICITCS.2015.7292981
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846,http://doi.org/10.1007/s11263-017-0996-8
+d119443de1d75cad384d897c2ed5a7b9c1661d98,https://doi.org/10.1109/ICIP.2010.5650873
+539cb169fb65a5542c84f42efcd5d2d925e87ebb,https://doi.org/10.1109/ICB.2015.7139098
+1d10010ea7af43d59e1909d27e4e0e987264c667,https://doi.org/10.1016/j.neunet.2004.06.006
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7873272
+0c378c8dcf707145e1e840a9951519d4176a301f,https://doi.org/10.1109/ICARCV.2010.5707434
+04f56dc5abee683b1e00cbb493d031d303c815fd,http://doi.acm.org/10.1145/2808492.2808557
+5feee69ed183954fa76c58735daa7dd3549e434d,https://doi.org/10.1109/ICIP.2008.4711697
+480858e55abdbc07ca47b7dc10204613fdd9783c,https://doi.org/10.1109/ICPR.2014.786
+779d3f0cf74b7d33344eea210170c7c981a7e27b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8115237
+5ddfd3d372f7679518db8fd763d5f8bc5899ed67,https://doi.org/10.1109/ICPR.2014.797
+7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2353635
+4d4736173a5e72c266e52f3a43bdcb2b58f237a2,https://doi.org/10.1109/ISSPA.2012.6310583
+5e19d7307ea67799eb830d5ce971f893e2b8a9ca,https://doi.org/10.1007/s11063-012-9214-4
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392234
+6e9de9c3af3258dd18142e9bef2977b7ce153bd5,https://doi.org/10.1007/978-3-319-48881-3
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8439631
+2b2924af7ec219bd1fadcbd2c57014ed54efec86,http://doi.ieeecomputersociety.org/10.1109/SSIAI.2014.6806053
+327ae6742cca4a6a684a632b0d160dd84d0d8632,https://doi.org/10.1007/s10851-015-0629-1
+4551194408383b12db19a22cca5db0f185cced5c,https://doi.org/10.1109/TNNLS.2014.2341634
+bed8feb11e8077df158e16bce064853cf217ba62,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6191360
+23e824d1dfc33f3780dd18076284f07bd99f1c43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686
+a8faeef97e2a00eddfb17a44d4892c179a7cc277,https://doi.org/10.1109/FG.2011.5771459
+6e2041a9b5d840b0c3e4195241cd110640b1f5f3,https://doi.org/10.1007/s10044-013-0349-3
+c0f67e850176bb778b6c048d81c3d7e4d8c41003,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296441
+0e2d956790d3b8ab18cee8df6c949504ee78ad42,https://doi.org/10.1109/IVCNZ.2013.6727024
+f09d5b6433f63d7403df5650893b78cdcf7319b3,https://doi.org/10.1109/AFGR.2008.4813384
+6a6406906470be10f6d6d94a32741ba370a1db68,http://doi.org/10.1007/s11042-016-4213-5
+f2004fff215a17ac132310882610ddafe25ba153,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.124
+7d61b70d922d20c52a4e629b09465076af71ddfd,https://doi.org/10.1007/s10044-011-0258-2
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,http://doi.org/10.1007/s11042-018-6110-6
+d5b445c5716952be02172ca4d40c44f4f04067fa,https://doi.org/10.1109/ICICS.2011.6173537
+58217ae5423828ed5e1569bee93d491569d79970,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1578742
+edf60d081ffdfa80243217a50a411ab5407c961d,http://doi.org/10.1007/s11263-016-0893-6
+f4fc77660665ae58993065c6a336367e9a6c85f7,https://doi.org/10.1016/j.patcog.2012.12.009
+7753e3b9e158289cbaa22203166424ca9c229f68,http://doi.ieeecomputersociety.org/10.1109/ICDM.2014.29
+e790a2538579c8e2ef9b314962ab26197d6664c6,https://doi.org/10.1109/ICIP.2016.7532915
+8fe5feeaa72eddc62e7e65665c98e5cb0acffa87,https://doi.org/10.1007/s12193-015-0209-0
+fd809ee36fa6832dda57a0a2403b4b52c207549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409768
+33b61be191e63b0c9974be708180275c9d5b3057,https://doi.org/10.1109/ICRA.2011.5979705
+f6311d6b3f4d3bd192d866d2e898c30eea37d7d5,http://ieeexplore.ieee.org/document/6460511/
+2149d49c84a83848d6051867290d9c8bfcef0edb,https://doi.org/10.1109/TIFS.2017.2746062
+5bfad0355cdb62b22970777d140ea388a7057d4c,https://doi.org/10.1016/j.patcog.2011.05.006
+c41a3c31972cf0c1be6b6895f3bf97181773fcfb,https://doi.org/10.1109/ICPR.2014.103
+126076774da192d4d3f4efcd1accc719ee5f9683,https://doi.org/10.1109/SIU.2012.6204774
+05c5134125a333855e8d25500bf97a31496c9b3f,http://doi.acm.org/10.1145/3132515.3132517
+1eb48895d86404251aa21323e5a811c19f9a55f9,http://doi.ieeecomputersociety.org/10.1109/CIS.2015.22
+4f03ba35440436cfa06a2ed2a571fea01cb36598,https://doi.org/10.1109/SPAC.2017.8304260
+1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,http://doi.acm.org/10.1145/2964284.2984061
+4a8480d58c30dc484bda08969e754cd13a64faa1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406475
+a4543226f6592786e9c38752440d9659993d3cb3,http://doi.ieeecomputersociety.org/10.1109/FG.2017.112
+32dfd4545c87d9820cc92ca912c7d490794a81d6,https://doi.org/10.1007/978-3-319-50551-0
+64b9ad39d115f3e375bde4f70fb8fdef5d681df8,https://doi.org/10.1109/ICB.2016.7550088
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373805
+b631f3c212aab45d73ddc119f1f7d00c3c502a72,https://doi.org/10.1109/TIFS.2009.2035976
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7172556
+0647c9d56cf11215894d57d677997826b22f6a13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373864
+4c6886c489e93ccab5a1124555a6f3e5b0104464,https://doi.org/10.1109/ICIP.2017.8296921
+bb83d5c7c17832d1eef14aa5d303d9dd65748956,http://doi.acm.org/10.1145/3139513.3139514
+e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056,https://doi.org/10.1109/TNN.2011.2170220
+c4ca092972abb74ee1c20b7cae6e69c654479e2c,https://doi.org/10.1109/ICIP.2016.7532960
+592f14f4b12225fc691477a180a2a3226a5ef4f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789592
+0f22b89341d162a7a0ebaa3c622d9731e5551064,http://doi.ieeecomputersociety.org/10.1109/AIPR.2011.6176352
+9f131b4e036208f2402182a1af2a59e3c5d7dd44,http://dl.acm.org/citation.cfm?id=3206038
+39af06d29a74ad371a1846259e01c14b5343e3d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8046026
+ec28217290897a059348dcdf287540a2e2c68204,https://doi.org/10.1504/IJBM.2015.070928
+506ea19145838a035e7dba535519fb40a3a0018c,http://arxiv.org/abs/1806.08251
+6dd8d8be00376ac760dc92f9c5f20520872c5355,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2417578
+27812db1d2f68611cc284d65d11818082e572008,https://doi.org/10.1109/MIPRO.2016.7522323
+22648dcd3100432fe0cc71e09de5ee855c61f12b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393188
+8bebb26880274bdb840ebcca530caf26c393bf45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369529
+c138c76809b8da9e5822fb0ae38457e5d75287e0,https://doi.org/10.1109/TIP.2014.2378017
+e4b825bf9d5df47e01e8d7829371d05208fc272d,http://doi.acm.org/10.1145/3055635.3056618
+bbc21d6b7c6e807c6886d237a04b501158ca6bb8,https://doi.org/10.1109/TMM.2016.2523421
+70769def1284fe88fd57a477cde8a9c9a3dff13f,https://doi.org/10.1016/j.neucom.2006.10.036
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9,https://www.ncbi.nlm.nih.gov/pubmed/29069621
+f3553148e322f4f64545d6667dfbc7607c82703a,http://doi.org/10.1007/s00138-016-0763-9
+8cedb92694845854f3ad0daf6c9adb6b81c293de,http://doi.acm.org/10.1145/1839379.1839431
+c98b13871a3bc767df0bdd51ff00c5254ede8b22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909913
+c872d6310f2079db0cee0e69cc96da1470055225,https://doi.org/10.1007/978-3-319-46675-0_68
+a4898f55f12e6393b1c078803909ea715bf71730,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6957817
+465faf9974a60da00950be977f3bc2fc3e56f5d2,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273631
+eef0be751e9aca7776d83f25c8ffdc1a18201fd8,https://doi.org/10.1016/j.patcog.2016.10.015
+1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043872
+4e1d89149fc4aa057a8becce2d730ec6afd60efa,https://doi.org/10.1109/ICSMC.2009.5346047
+918fc4c77a436b8a588f63b2b37420b7868fbbf8,https://doi.org/10.1016/j.inffus.2015.03.005
+7f1078a2ebfa23a58adb050084d9034bd48a8a99,https://doi.org/10.1007/s00371-015-1169-9
+7e8c8b1d72c67e2e241184448715a8d4bd88a727,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314
+2bb36c875754a2a8919f2f9b00a336c00006e453,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373869
+9e5690cdb4dfa30d98dff653be459e1c270cde7f,https://doi.org/10.1109/ICIP.2017.8297080
+4113269f916117f975d5d2a0e60864735b73c64c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613059
+235a347cb96ef22bf35b4cf37e2b4ee5cde9df77,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.13
+7e456e94f3080c761f858264428ee4c91cd187b2,http://ieeexplore.ieee.org/document/6460899/
+e0ab926cd48a47a8c7b16e27583421141f71f6df,https://doi.org/10.1109/HPCSim.2016.7568383
+ec6a2093059fd6eada9944212f64a659881abb95,https://doi.org/10.1016/j.patcog.2016.02.022
+8a6033cbba8598945bfadd2dd04023c2a9f31681,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014991
+4097fef623185557bb1842501cfdc97f812fc66d,http://doi.acm.org/10.1145/3126686.3126755
+8697ccb156982d40e88fda7fbf4297fa5171f24d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2011.101
+d1bd956a8523629ed4e2533b01272f22cea534c6,https://doi.org/10.1016/j.patrec.2010.01.021
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597559
+56e25358ebfaf8a8b3c7c33ed007e24f026065d0,https://doi.org/10.1007/s10994-015-5541-9
+1ef6ad9e1742d0b2588deaf506ef83b894fb9956,https://doi.org/10.1007/s12193-016-0213-z
+9961f1e5cf8fda29912344773bc75c47f18333a0,http://doi.org/10.1007/s10044-017-0618-7
+cf6851c24f489dabff0238e01554edea6aa0fc7c,https://doi.org/10.1109/ICSMC.2011.6083637
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7528404
+8633732d9f787f8497c2696309c7d70176995c15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967
+e896389891ba84af58a8c279cf8ab5de3e9320ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6958874
+ede5982980aa76deae8f9dc5143a724299d67742,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081396
+0341405252c80ff029a0d0065ca46d0ade943b03,http://doi.ieeecomputersociety.org/10.1109/FG.2017.40
+d4ccc4f18a824af08649657660e60b67c6868d9c,https://doi.org/10.1142/S021800141655020X
+afba76d0fe40e1be381182aec822431e20de8153,https://doi.org/10.1007/s00521-014-1768-9
+7c8909da44e89a78fe88e815c83a4ced34f99149,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326
+78f244dc2a171944836a89874b8f60e9fe80865d,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.181
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354285
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952499
+9abf6d56a7d336bc58f4e3328d2ee807032589f1,https://doi.org/10.1109/CEC.2017.7969500
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6218178
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373821
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,http://doi.org/10.1007/s11042-016-4324-z
+93978ba84c8e95ff82e8b5960eab64e54ca36296,http://doi.acm.org/10.1145/3136755.3136806
+0ed96cc68b1b61e9eb4096f67d3dcab9169148b9,http://doi.acm.org/10.1145/2663204.2666279
+e2faaebd17d10e2919bd69492787e7565546a63f,http://doi.org/10.1007/s11042-017-4514-3
+7c11fa4fd91cb57e6e216117febcdd748e595760,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453
+cc1b093cfb97475faabab414878fa7e4a2d97cd7,http://doi.ieeecomputersociety.org/10.1109/ICALT.2017.141
+90c4a6c6f790dbcef9a29c9a755458be09e319b6,http://doi.acm.org/10.1145/2964284.2967242
+71e95c3a31dceabe9cde9f117615be8bf8f6d40e,https://doi.org/10.1109/ICIP.2010.5653024
+3888d7a40f3cea5e4a851c8ca97a2d7810a62867,https://doi.org/10.1109/CCECE.2016.7726684
+23ee7b7a9ca5948e81555aaf3a044cfec778f148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771385
+85c90ad5eebb637f048841ebfded05942bb786b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977163
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404767
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,http://dl.acm.org/citation.cfm?id=3134264
+00301c250d667700276b1e573640ff2fd7be574d,https://doi.org/10.1109/BTAS.2014.6996242
+ecd08edab496801fd4fde45362dde462d00ee91c,https://www.ncbi.nlm.nih.gov/pubmed/29994561
+7cfbf90368553333b47731729e0e358479c25340,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7346480
+76dff7008d9b8bf44ec5348f294d5518877c6182,https://doi.org/10.1016/j.imavis.2014.09.004
+1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9,https://doi.org/10.1109/IJCNN.2017.7965955
+b8d8501595f38974e001a66752dc7098db13dfec,http://arxiv.org/abs/1711.09265
+43bb2b58f906262035ef61e41768375bc8d99ae3,https://doi.org/10.1016/j.procs.2016.04.072
+e6c491fb6a57c9a7c2d71522a1a066be2e681c84,https://doi.org/10.1016/j.imavis.2016.06.002
+c222f8079c246ead285894c47bdbb2dfc7741044,https://doi.org/10.1109/ICIP.2015.7351631
+be4faea0971ef74096ec9800750648b7601dda65,http://doi.org/10.1007/s11063-017-9724-1
+b910590a0eb191d03e1aedb3d55c905129e92e6b,http://doi.acm.org/10.1145/2808492.2808570
+9ab126760f68071a78cabe006cf92995d6427025,https://doi.org/10.1007/s11042-013-1703-6
+47fb74785fbd8870c2e819fc91d04b9d9722386f,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.161
+ad9ba7eade9d4299159512d6d5d07d7d3d26ae58,https://doi.org/10.1007/s11063-012-9252-y
+e2f78d2f75a807b89a13115a206da4661361fa71,https://doi.org/10.1109/TMM.2017.2696825
+fecccc79548001ecbd6cafd3067bcf14de80b11a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354157
+7d45f1878d8048f6b3de5b3ec912c49742d5e968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7747479
+4e6e5cb93e7e564bc426b5b27888d55101504c50,https://doi.org/10.1109/ICPR.2016.7900299
+572dbaee6648eefa4c9de9b42551204b985ff863,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151
+898ff1bafee2a6fb3c848ad07f6f292416b5f07d,https://doi.org/10.1109/TIP.2016.2518867
+db0379c9b02e514f10f778cccff0d6a6acf40519,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6130343
+e7cfaff65541cde4298a04882e00608d992f6703,http://doi.org/10.1007/s00521-018-3554-6
+884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e,https://doi.org/10.1109/SMC.2016.7844717
diff --git a/scraper/reports/misc/all_doi-2.csv b/scraper/reports/misc/all_doi-2.csv
new file mode 100644
index 00000000..d798b9ad
--- /dev/null
+++ b/scraper/reports/misc/all_doi-2.csv
@@ -0,0 +1,749 @@
+a094e52771baabe4ab37ef7853f9a4f534227457,https://doi.org/10.1109/TITS.2016.2551298
+5da827fe558fb2e1124dcc84ef08311241761726,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096
+bcf2710d46941695e421226372397c9544994214,https://doi.org/10.1109/ICNC.2015.7378076
+136aae348c7ebc6fd9df970b0657241983075795,https://doi.org/10.1109/ICIP.2015.7351542
+ac37285f2f5ccf99e9054735a36465ee35a6afdd,https://doi.org/10.1109/ISCAS.2006.1693880
+98d1b5515b079492c8e7f0f9688df7d42d96da8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204260
+b69e7e2a7705a58a0e3f1b80ae542907b89ce02e,https://doi.org/10.1007/s11042-015-2614-5
+c570d1247e337f91e555c3be0e8c8a5aba539d9f,https://doi.org/10.1007/s11042-012-1352-1
+44834929e56f2a8f16844fde519039d647006216,http://doi.acm.org/10.1145/1460096.1460150
+8f3675e979629ca9cee9436d37763f546edb8d40,https://doi.org/10.1109/SIU.2017.7960446
+6aa0a47f4b986870370c622be51f00f3a1b9d364,https://doi.org/10.1109/TIP.2012.2192285
+6c0ad77af4c0850bd01bb118e175ecc313476f27,http://doi.acm.org/10.1145/3009977.3010026
+218139e5262cb4f012cd2e119074aa59b89ebc32,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.265
+5f2c210644c1e567435d78522258e0ae036deedb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4036602
+eca706b4d77708452bdad1c98a23e4e88ce941ab,https://doi.org/10.1142/S0218001416550144
+2cf3564d7421b661e84251d280d159d4b3ebb336,https://doi.org/10.1109/BTAS.2014.6996287
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,http://doi.org/10.1007/s11704-017-6613-8
+9aba281955117eb4a7aed36775f55f27e4dde42f,http://doi.ieeecomputersociety.org/10.1109/AFGR.2000.840635
+9989ad33b64accea8042e386ff3f1216386ba7f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320
+56fd4c05869e11e4935d48aa1d7abb96072ac242,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397536
+548233d67f859491e50c5c343d7d77a7531d4221,https://doi.org/10.1007/s11042-007-0176-x
+d63bd06340dd35590a22222509e455c49165ee13,https://doi.org/10.1109/IJCNN.2016.7727234
+bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c,https://doi.org/10.1109/ICDSP.2016.7868528
+e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca,http://doi.ieeecomputersociety.org/10.1109/BLISS.2008.25
+51dcb36a6c247189be4420562f19feb00c9487f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1394433
+b999364980e4c21d9c22cc5a9f14501432999ca4,http://doi.org/10.1007/s10044-018-0727-y
+c3d3d2229500c555c7a7150a8b126ef874cbee1c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406478
+2f841ff062053f38725030aa1b77db903dad1efb,https://doi.org/10.1109/ICRA.2014.6907748
+1ab19e516b318ed6ab64822efe9b2328836107a4,https://doi.org/10.1109/TIP.2010.2083674
+ace1e0f50fe39eb9a42586f841d53980c6f04b11,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043849
+946b4d840b026d91608758d04f2763e9b981234e,http://doi.acm.org/10.1145/2388676.2388792
+cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66,https://doi.org/10.1142/s0218213015400199
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,https://doi.org/10.1109/ACCESS.2017.2752176
+fcceea054cb59f1409dda181198ed4070ed762c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8388318
+eb3c45e78acee0824c8f7d997c6104d74e7213a8,http://doi.ieeecomputersociety.org/10.1109/iThings/CPSCom.2011.116
+6318d3842b36362bb45527b717e1a45ae46151d5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780708
+ccb95192001b07bb25fc924587f9682b0df3de8e,https://doi.org/10.1109/ICACCI.2016.7732123
+aa8341cb5d8f0b95f619d9949131ed5c896d6470,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2007.403
+13907865a97afde053d7bb7134d58a7bbc12043c,https://doi.org/10.1016/j.patcog.2014.05.001
+6c6f0e806e4e286f3b18b934f42c72b67030ce17,https://doi.org/10.1109/FG.2011.5771345
+96e0b67f34208b85bd90aecffdb92bc5134befc8,https://doi.org/10.1016/j.patcog.2007.10.002
+6932baa348943507d992aba75402cfe8545a1a9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987
+30cc1ddd7a9b4878cca7783a59086bdc49dc4044,https://doi.org/10.1007/s11042-015-2599-0
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849828
+092dd7cb6c9b415eb83afb104fa63d7d4290ac33,https://doi.org/10.1109/SPLIM.2016.7528409
+cd22e6532211f679ba6057d15a801ba448b9915c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434092
+d569c3e62f471aa75ed53e631ec05c1a3d594595,https://doi.org/10.1109/NNSP.2002.1030072
+baafe3253702955c6904f0b233e661b47aa067e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776926
+6cce5ccc5d366996f5a32de17a403341db5fddc6,http://doi.org/10.1016/j.cviu.2016.04.012
+76669f166ddd3fb830dbaacb3daa875cfedc24d9,https://doi.org/10.1109/ICPR.2016.7899840
+3e9ab40e6e23f09d16c852b74d40264067ac6abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619307
+d141c31e3f261d7d5214f07886c1a29ac734d6fc,http://doi.org/10.1007/s11063-018-9812-x
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e,http://doi.org/10.1007/s11042-017-5062-6
+4eeccbbb98de4f2e992600482fd6b881ace014bb,http://doi.acm.org/10.1145/2964284.2967240
+4ed40e6bb66dfa38a75d864d804d175a26b6c6f6,http://doi.ieeecomputersociety.org/10.1109/CRV.2011.41
+4932b929a2e09ddebedcb1abe8c62f269e7d4e33,https://doi.org/10.1109/SIU.2016.7496076
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,https://doi.org/10.1109/TIM.2011.2141270
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,http://doi.org/10.1007/s00779-018-1171-0
+026e96c3c4751e1583bfe78b8c28bdfe854c4988,https://doi.org/10.1109/ICIP.2017.8296442
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,https://doi.org/10.1109/TII.2013.2271914
+84f3c4937cd006888b82f2eb78e884f2247f0c4e,https://doi.org/10.1109/CCNC.2012.6181097
+17768efd76a681902a33994da4d3163262bf657f,https://doi.org/10.1007/s12559-017-9472-6
+398558817e05e8de184cc4c247d4ea51ab9d4d58,https://doi.org/10.1109/ICPR.2014.14
+5dbb2d556f2e63a783a695a517f5deb11aafd7ea,https://doi.org/10.1109/ICB.2015.7139079
+3d1f976db6495e2bb654115b939b863d13dd3d05,https://doi.org/10.1007/s11042-015-2581-x
+c7b58827b2d07ece676271ae0425e369e3bd2310,https://doi.org/10.1142/S0218001415560042
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055,http://dl.acm.org/citation.cfm?id=3234150
+768f6a14a7903099729872e0db231ea814eb05e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205
+d8c9ce0bd5e4b6d1465402a760845e23af5ac259,https://doi.org/10.1109/ITSC.2015.380
+5811944e93a1f3e35ece7a70a43a3de95c69b5ab,https://doi.org/10.1109/BTAS.2016.7791163
+c12260540ec14910f5ec6e38d95bdb606826b32e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459
+7788fa76f1488b1597ee2bebc462f628e659f61e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888
+1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,https://doi.org/10.1109/CIST.2016.7805097
+d06bcb2d46342ee011e652990edf290a0876b502,http://arxiv.org/abs/1708.00980
+583e0d218e1e7aaf9763a5493e7c18c2b8dd7464,http://doi.acm.org/10.1145/2988240.2988243
+963a004e208ce4bd26fa79a570af61d31651b3c3,https://doi.org/10.1016/j.jvlc.2009.01.011
+4e343c66c5fe7426132869d552f0f205d1bc5307,https://doi.org/10.1109/ICPR.2014.452
+009bf86913f1c366d9391bf236867d84d12fa20c,https://doi.org/10.1109/CVPRW.2010.5544620
+67386772c289cd40db343bdc4cb8cb4f58271df2,http://doi.org/10.1038/s41598-017-10745-9
+e1312b0b0fd660de87fa42de39316b28f9336e70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369055
+7c66e7f357553fd4b362d00ff377bffb9197410e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961231
+9fd1b8abbad25cb38f0c009288fb5db0fc862db6,https://doi.org/10.1109/ICASSP.2003.1199147
+1a53ca294bbe5923c46a339955e8207907e9c8c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870
+745e74ae84e1b2b8690d07db523531642023d6c4,https://doi.org/10.1109/FSKD.2016.7603417
+789b8fff223b0db0fe3babf46ea98b1d5197f0c0,https://doi.org/10.1002/ima.20245
+4b9c47856f8314ecbe4d0efc65278c2ededb2738,https://doi.org/10.1109/LSP.2012.2188890
+b749ca71c60904d7dad6fc8fa142bf81f6e56a62,https://doi.org/10.1109/TIP.2013.2292560
+2138ccf78dcf428c22951cc066a11ba397f6fcef,https://doi.org/10.1109/BHI.2012.6211519
+845f45f8412905137bf4e46a0d434f5856cd3aec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418618
+486f5e85944404a1b57333443070b0b8c588c262,http://doi.ieeecomputersociety.org/10.1109/IRI.2014.7051957
+715d3eb3665f46cd2fab74d35578a72aafbad799,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2013.118
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,https://doi.org/10.1109/ACSSC.2015.7421092
+a53f988d16f5828c961553e8efd38fed15e70bcc,https://doi.org/10.1109/BTAS.2015.7358787
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7358748
+13901473a12061f080b9d54219f16db7d406e769,https://doi.org/10.1109/TIP.2012.2222895
+1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a,https://doi.org/10.1007/s11042-013-1754-8
+6a527eeb0b2480109fe987ed7eb671e0d847fca8,https://doi.org/10.1007/978-3-319-28515-3
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6998872
+9255d3b2bfee4aaae349f68e67c76a077d2d07ad,https://doi.org/10.1109/TIP.2017.2713041
+9cda3e56cec21bd8f91f7acfcefc04ac10973966,https://doi.org/10.1109/IWBF.2016.7449688
+907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224,https://doi.org/10.1016/j.patcog.2014.07.010
+eb8a3948c4be0d23eb7326d27f2271be893b3409,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701
+62750d78e819d745b9200b0c5c35fcae6fb9f404,http://doi.org/10.1007/s11042-016-4085-8
+fb915bcc1623cdf999c0e95992c0e0cf85e64d8e,http://doi.ieeecomputersociety.org/10.1109/iThings.2014.83
+7de8a8b437ec7a18e395be9bf7c8f2d502025cc6,https://doi.org/10.1109/SIU.2017.7960528
+4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef,https://doi.org/10.1049/iet-cvi.2012.0127
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804
+cf6c59d359466c41643017d2c212125aa0ee84b2,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552983
+de79437f74e8e3b266afc664decf4e6e4bdf34d7,https://doi.org/10.1109/IVCNZ.2016.7804415
+79fd4baca5f840d6534a053b22e0029948b9075e,https://doi.org/10.1109/ISDA.2012.6416647
+f2d605985821597773bc6b956036bdbc5d307386,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8027090
+a63ec22e84106685c15c869aeb157aa48259e855,https://doi.org/10.1142/S0219691312500294
+36bb5cca0f6a75be8e66f58cba214b90982ee52f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.73
+91d0e8610348ef4d5d4975e6de99bb2d429af778,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.61
+4ba2f445fcbbad464f107b036c57aa807ac5c0c2,https://doi.org/10.1109/TCSVT.2014.2367357
+c2dc29e0db76122dfed075c3b9ee48503b027809,https://doi.org/10.1109/ICIP.2016.7532632
+771505abd38641454757de75fe751d41e87f89a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561
+3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.117
+a200885bf6bfa0493d85e7617e65cdabe30a2dab,https://doi.org/10.1109/ICIP.2015.7351272
+f63b3b8388bc4dcd4a0330402af37a59ce37e4f3,https://doi.org/10.1109/SIU.2013.6531214
+77c5437107f8138d48cb7e10b2b286fa51473678,https://doi.org/10.1109/URAI.2016.7734005
+35d90beea6b4dca8d949aae93f86cf53da72971f,https://doi.org/10.1109/ICIP.2011.6116672
+d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.16
+3769e65690e424808361e3eebfdec8ab91908aa9,http://doi.acm.org/10.1145/2647868.2655035
+7e2f7c0eeaeb47b163a7258665324643669919e8,http://doi.org/10.1007/s11042-018-5801-3
+6cbde27d9a287ae926979dbb18dfef61cf49860e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8253589
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,http://doi.org/10.1007/s10044-017-0633-8
+4492914df003d690e5ff3cb3e0e0509a51f7753e,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2014.6921443
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,http://doi.acm.org/10.1145/1877972.1877994
+20d6a4aaf5abf2925fdce2780e38ab1771209f76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446795
+4512b87d68458d9ba0956c0f74b60371b6c69df4,https://doi.org/10.1109/TIP.2017.2708504
+b6a23f72007cb40223d7e1e1cc47e466716de945,https://doi.org/10.1109/CVPRW.2010.5544598
+19705579b8e7d955092ef54a22f95f557a455338,https://doi.org/10.1109/ICIP.2014.7025277
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,http://doi.org/10.1007/978-3-319-26561-2
+5721cd4b898f0e7df8de1e0215f630af94656be9,http://doi.acm.org/10.1145/3095140.3095164
+180bd019eab85bbf01d9cddc837242e111825750,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239690
+c2b10909a0dd068b8e377a55b0a1827c8319118a,https://doi.org/10.1109/TCYB.2016.2565898
+06a799ad89a2a45aee685b9e892805e3e0251770,https://doi.org/10.1007/978-3-319-42147-6
+ea86b75427f845f04e96bdaadfc0d67b3f460005,https://doi.org/10.1109/ICIP.2016.7532686
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,http://doi.org/10.1007/s11263-017-1012-z
+6baaa8b763cc5553715766e7fbe7abb235fae33c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789589
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2,http://doi.org/10.1117/12.650555
+d4df31006798ee091b86e091a7bf5dce6e51ba3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1612996
+c4b00e86841db3fced2a5d8ac65f80d0d3bbe352,http://doi.ieeecomputersociety.org/10.1109/AIPR.2004.4
+bf00071a7c4c559022272ca5d39e07f727ebb479,https://doi.org/10.1109/MMSP.2016.7813388
+828d7553a45eb0c3132e406105732a254369eb4d,https://doi.org/10.1016/j.neunet.2017.09.001
+bb4f83458976755e9310b241a689c8d21b481238,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265393
+81d81a2060366f29fd100f793c11acf000bd2a7f,https://doi.org/10.1007/11795131_112
+c26b43c2e1e2da96e7caabd46e1d7314acac0992,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466510
+88ed558bff3600f5354963d1abe762309f66111e,https://doi.org/10.1109/TIFS.2015.2393553
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344632
+37f25732397864b739714aac001ea1574d813b0d,https://doi.org/10.1016/j.ijar.2017.09.002
+a3bf6129d1ae136709063a5639eafd8018f50feb,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2017.8109741
+4b936847f39094d6cb0bde68cea654d948c4735d,http://doi.org/10.1007/s11042-016-3470-7
+a76e57c1b2e385b68ffdf7609802d71244804c1d,https://doi.org/10.1016/j.patrec.2016.05.027
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,https://doi.org/10.1109/ICB.2013.6612990
+03333e7ec198208c13627066bc76b0367f5e270f,https://doi.org/10.1109/IJCNN.2017.7966100
+f6fc112ff7e4746b040c13f28700a9c47992045e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7442559
+9b1c218a55ead45296bfd7ad315aaeff1ae9983e,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2481396
+b5bda4e1374acc7414107cde529ad8b3263fae4b,https://doi.org/10.1007/s11370-010-0066-3
+789a43f51e0a3814327dab4299e4eda8165a5748,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.189
+01729cb766b1016bac217a6a6cf24bbde19f56c8,https://doi.org/10.1109/CBMI.2010.5529888
+92292fffc36336d63f4f77d6b8fc23b0c54090e9,http://doi.org/10.1016/j.jvcir.2015.03.001
+6afe1f668eea8dfdd43f0780634073ed4545af23,https://doi.org/10.1007/s11042-017-4962-9
+c648d2394be3ff0c0ee5360787ff3777a3881b02,https://doi.org/10.1080/01449290903353047
+2e6776cd582c015b46faf616f29c98ce9cff51a2,https://doi.org/10.1109/TNN.2005.860849
+e57ce6244ec696ff9aa42d6af7f09eed176153a8,https://doi.org/10.1109/ICIP.2015.7351449
+18bfda16116e76c2b21eb2b54494506cbb25e243,https://doi.org/10.1109/TIFS.2010.2051544
+06ab24721d7117974a6039eb2e57d1545eee5e46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373809
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892186
+b41d585246360646c677a8238ec35e8605b083b0,http://doi.org/10.1007/s11042-018-6017-2
+ce8db0fe11e7c96d08de561506f9f8f399dabbb2,https://doi.org/10.1109/ICIP.2015.7351677
+492116d16a39eb54454c7ffb1754cea27ad3a171,http://doi.acm.org/10.1145/3132525.3134823
+5039834df68600a24e7e8eefb6ba44a5124e67fc,https://doi.org/10.1109/ICIP.2013.6738761
+773ce00841a23d32727aa1f54c29865fefd4ce02,http://doi.ieeecomputersociety.org/10.1109/AIPR.2006.24
+895081d6a5545ad6385bfc6fcf460fc0b13bac86,http://doi.org/10.1016/S0167-8655%2899%2900134-8
+57178b36c21fd7f4529ac6748614bb3374714e91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217
+87610276ccbc12d0912b23fd493019f06256f94e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706757
+36486944b4feeb88c0499fecd253c5a53034a23f,https://doi.org/10.1109/CISP-BMEI.2017.8301986
+b2470969e4fba92f7909eac26b77d08cc5575533,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8326475
+9c686b318cb7774b6da5e2c712743a5a6cafa423,https://doi.org/10.1016/j.neuroimage.2015.12.036
+c1a70d63d1667abfb1f6267f3564110d55c79c0d,https://doi.org/10.1007/s00138-013-0488-y
+cb27b45329d61f5f95ed213798d4b2a615e76be2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236
+2348f1fa2940b01ec90e023fac8cc96812189774,http://doi.ieeecomputersociety.org/10.1109/EWDTS.2017.8110157
+bd74c3ca2ff03396109ac2d1131708636bd0d4d3,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.228
+1135a818b756b057104e45d976546970ba84e612,http://doi.ieeecomputersociety.org/10.1109/FG.2017.118
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771329
+ba931c3f90dd40a5db4301a8f0c71779a23043d6,https://doi.org/10.1109/ICPR.2014.136
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,http://doi.org/10.1007/s11704-017-6114-9
+f7ae38a073be7c9cd1b92359131b9c8374579b13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7487053
+f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c,http://doi.ieeecomputersociety.org/10.1109/SNPD.2016.7515888
+15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a,https://doi.org/10.5220/0004736505740580
+947cdeb52f694fb1c87fc16836f8877cd83dc652,https://doi.org/10.1109/SMAP.2017.8022671
+84574aa43a98ad8a29470977e7b091f5a5ec2366,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321
+5760d29574d78e79e8343b74e6e30b3555e48676,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8447743
+891b31be76e2baa83745f24c2e2013851dc83cbb,https://doi.org/10.1109/TSMCB.2009.2018137
+055cd8173536031e189628c879a2acad6cf2a5d0,https://doi.org/10.1109/BTAS.2017.8272740
+53de11d144cd2eda7cf1bb644ae27f8ef2489289,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424637
+557115454c1b8e6eaf8dbb65122c5b00dc713d51,https://doi.org/10.1109/LSP.2011.2140370
+152683f3ac99f829b476ea1b1b976dec6e17b911,https://doi.org/10.1109/MIXDES.2016.7529773
+e51927b125640bfc47bbf1aa00c3c026748c75bd,http://doi.acm.org/10.1145/2647868.2655015
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0,http://doi.org/10.1007/s11042-017-5028-8
+da928ac611e4e14e454e0b69dfbf697f7a09fb38,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477718
+67e6ddce6fea17bb2b171c949ee224936d36c0d1,https://doi.org/10.1109/ICIP.2008.4712157
+7177649ece5506b315cb73c36098baac1681b8d2,http://doi.ieeecomputersociety.org/10.1109/FG.2017.130
+a9756ca629f73dc8f84ee97cfa8b34b8207392dc,https://doi.org/10.1109/ICIP.2017.8296542
+e94168c35be1d4b4d2aaf42ef892e64a3874ed8c,https://doi.org/10.1109/TSMCB.2008.2010715
+1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119
+33c2131cc85c0f0fef0f15ac18f28312347d9ba4,https://doi.org/10.1016/j.neucom.2010.06.024
+a324d61c79fe2e240e080f0dab358aa72dd002b3,https://doi.org/10.1016/j.patcog.2016.02.005
+5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,https://doi.org/10.1109/ICPR.2016.7900278
+35208eda874591eac70286441d19785726578946,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789507
+acff2dc5d601887741002a78f8c0c35a799e6403,http://doi.org/10.1007/978-3-662-44654-6
+057b80e235b10799d03876ad25465208a4c64caf,http://dl.acm.org/citation.cfm?id=3123427
+1125760c14ea6182b85a09bf3f5bad1bdad43ef5,https://doi.org/10.1109/CVPR.2004.286
+9d24812d942e69f86279a26932df53c0a68c4111,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8417316
+1dabb080e3e968633f4b3774f19192f8378f5b67,https://doi.org/10.1109/ICPR.2016.7899664
+0fee3b9191dc1cef21f54232a23530cd8169d3b2,https://doi.org/10.1109/ICDM.2016.0050
+00049f989067d082f7f8d0581608ad5441d09f8b,https://doi.org/10.1109/LSP.2016.2555480
+417c2fa930bb7078fdf10cb85c503bd5270b9dc2,https://doi.org/10.1109/ICSIPA.2015.7412169
+e1449be4951ba7519945cd1ad50656c3516113da,https://doi.org/10.1109/TCSVT.2016.2603535
+0db371a6bc8794557b1bffc308814f53470e885a,https://doi.org/10.1007/s13042-015-0380-3
+cf4c1099bef189838877c8785812bc9baa5441ed,https://doi.org/10.1109/ICPR.2016.7899862
+272e487dfa32f241b622ac625f42eae783b7d9aa,https://doi.org/10.1109/ICSIPA.2015.7412207
+a939e287feb3166983e36b8573cd161d12097ad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7550048
+99e0c03686f7bc9d7add6cff39a941a047c3600a,https://doi.org/10.1109/ACCESS.2017.2712788
+af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2,https://doi.org/10.1007/s00521-014-1569-1
+df87193e15a19d5620f5a6458b05fee0cf03729f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363421
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699097
+3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48,http://doi.ieeecomputersociety.org/10.1109/WF-IoT.2016.7845505
+0f1cb558b32c516e2b6919fea0f97a307aaa9091,https://doi.org/10.1007/s41095-017-0091-7
+c48b68dc780c71ab0f0f530cd160aa564ed08ade,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1357193
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4,http://doi.org/10.1007/s13735-017-0144-9
+4500888fd4db5d7c453617ee2b0047cedccf2a27,http://doi.acm.org/10.1145/2647750
+405d9a71350c9a13adea41f9d7f7f9274793824f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373834
+f812347d46035d786de40c165a158160bb2988f0,https://doi.org/10.1007/s10339-016-0765-6
+82dad0941a7cada11d2e2f2359293fe5fabf913f,https://doi.org/10.1109/ICIP.2017.8296810
+ad5a35a251e07628dd035c68e44a64c53652be6b,https://doi.org/10.1016/j.patcog.2016.12.024
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc,http://doi.org/10.1007/11527923_7
+b839bc95794dc65340b6e5fea098fa6e6ea5e430,https://doi.org/10.1109/WACVW.2017.8
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163111
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,http://dl.acm.org/citation.cfm?id=2591684
+f0a9d69028edd1a39147848ad1116ca308d7491e,https://doi.org/10.1007/11573548_11
+0a0b9a9ff827065e4ff11022b0e417ddf1d3734e,http://dl.acm.org/citation.cfm?id=2935856
+da2b2be4c33e221c7f417875a6c5c74043b1b227,https://doi.org/10.1109/BTAS.2017.8272712
+e7b7df786cf5960d55cbac4e696ca37b7cee8dcd,https://doi.org/10.1109/IJCNN.2012.6252728
+c3e53788370341afe426f2216bed452cbbdaf117,http://doi.ieeecomputersociety.org/10.1109/ATNAC.2017.8215436
+cce2f036d0c5f47c25e459b2f2c49fa992595654,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.93
+18d3532298fb7b8fb418453107f786178ca82e4a,https://doi.org/10.1109/TIFS.2017.2668221
+8e55486aa456cae7f04fe922689b3e99a0e409fe,http://doi.acm.org/10.1145/3123266.3123342
+02a92b79391ddac0acef4f665b396f7f39ca2972,https://doi.org/10.1016/j.patcog.2016.10.021
+a3201e955d6607d383332f3a12a7befa08c5a18c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276
+a2646865d7c3d7fb346cf714caf146de2ea0e68f,https://doi.org/10.1109/SMC.2016.7844390
+53f5cb365806c57811319a42659c9f68b879454a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8356995
+913961d716a4102d3428224f999295f12438399f,https://doi.org/10.1016/j.patcog.2014.01.016
+a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825,http://doi.ieeecomputersociety.org/10.1109/CSIE.2009.808
+6bacd4347f67ec60a69e24ed7cc0ac8073004e6f,https://doi.org/10.1109/VCIP.2014.7051528
+f652cb159a2cf2745aabcbf6a7beed4415e79e34,http://doi.acm.org/10.1145/1460096.1460119
+cec70cf159b51a18b39c80fac1ad34f65f3691ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7949100
+3d2c89676fcc9d64aaed38718146055152d22b39,https://doi.org/10.1109/ACPR.2013.10
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450
+803803b5c2c61046d63674f85ecf0123f9d2c4b8,https://doi.org/10.1049/iet-bmt.2013.0089
+521aa8dcd66428b07728b91722cc8f2b5a73944b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367126
+e71c15f5650a59755619b2a62fa93ac922151fd6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.22
+2f160a6526ebf10773680dadaba44b006bcec2cb,https://doi.org/10.1016/j.neucom.2012.03.007
+f5c57979ec3d8baa6f934242965350865c0121bd,http://doi.org/10.1007/s12539-018-0281-8
+603231c507bb98cc8807b6cbe2c860f79e8f6645,https://doi.org/10.1109/EUSIPCO.2015.7362819
+32e9c9520cf6acb55dde672b73760442b2f166f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7970176
+993374c1c9d58a3dec28160188ff6ac1227d02f5,https://doi.org/10.1109/ICARCV.2016.7838650
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,http://doi.org/10.1007/s10851-017-0771-z
+2360ecf058393141ead1ca6b587efa2461e120e4,https://doi.org/10.1007/s00138-017-0895-6
+cccd0edb5dafb3a160179a60f75fd8c835c0be82,http://doi.org/10.1007/s12193-017-0241-3
+31ba9d0bfaa2a44bae039e5625eb580afd962892,https://doi.org/10.1016/j.cviu.2016.03.014
+9f1a854d574d0bd14786c41247db272be6062581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8360155
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593
+4562ea84ebfc8d9864e943ed9e44d35997bbdf43,http://doi.ieeecomputersociety.org/10.1109/FG.2017.19
+d963bdff2ce5212fa585a83ca8fad96875bc0057,https://doi.org/10.1016/j.neucom.2016.03.091
+3d2c932f4f2693a87a0b855048e60f142214f475,http://doi.ieeecomputersociety.org/10.1109/CSE.2014.354
+70341f61dfe2b92d8607814b52dfd0863a94310e,http://doi.ieeecomputersociety.org/10.1109/AVSS.2015.7301750
+3d1959048eba5495e765a80c8e0bbd3d65b3d544,https://doi.org/10.1016/j.neucom.2016.07.038
+c4d439fe07a65b735d0c8604bd5fdaea13f6b072,http://doi.acm.org/10.1145/2671188.2749294
+e6da1fcd2a8cda0c69b3d94812caa7d844903007,http://dl.acm.org/citation.cfm?id=3137154
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e,http://doi.org/10.1016/j.patrec.2015.02.006
+81a4397d5108f6582813febc9ddbeff905474120,https://doi.org/10.1109/ICPR.2016.7899883
+8845c03bee88fdd2f400ed2bddba038366c82abe,http://doi.ieeecomputersociety.org/10.1109/TCBB.2011.135
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69,http://doi.org/10.1162/jocn_a_00645
+3ec860cfbd5d953f29c43c4e926d3647e532c8b0,https://doi.org/10.1109/TCSVT.2008.924108
+d2b3166b8a6a3e6e7bc116257e718e4fe94a0638,https://doi.org/10.1007/s00521-010-0411-7
+ad1679295a5e5ebe7ad05ea1502bce961ec68057,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344631
+c5022fbeb65b70f6fe11694575b8ad1b53412a0d,https://doi.org/10.1109/ICIP.2005.1530209
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595883
+69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b,https://doi.org/10.1016/j.patcog.2014.06.016
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17,http://dl.acm.org/citation.cfm?id=3173582
+0733ec1953f6c774eb3a723618e1268586b46359,https://doi.org/10.1109/TMM.2006.870737
+8a4893d825db22f398b81d6a82ad2560832cd890,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5349489
+948f35344e6e063ffc35f10c547d5dd9204dee4e,https://doi.org/10.1016/j.eswa.2017.07.037
+24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,https://doi.org/10.1109/GlobalSIP.2016.7906030
+501076313de90aca7848e0249e7f0e7283d669a1,https://doi.org/10.1109/SOCPAR.2014.7007987
+7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8,https://doi.org/10.1007/s00138-015-0677-y
+a3add3268c26876eb76decdf5d7dd78a0d5cf304,https://doi.org/10.1016/j.specom.2017.07.003
+a1cda8e30ce35445e4f51b47ab65b775f75c9f18,https://doi.org/10.1109/ISBA.2018.8311462
+ccfebdf7917cb50b5fcd56fb837f841a2246a149,https://doi.org/10.1109/ICIP.2015.7351065
+7b618a699b79c1272f6c83101917ad021a58d96b,https://doi.org/10.1007/s11042-014-1986-2
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4586390
+fcc6fe6007c322641796cb8792718641856a22a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994
+1979e270093b343d62e97816eeed956062e155a0,https://doi.org/10.1016/j.micpro.2005.07.003
+9213a415d798426c8d84efc6d2a69a2cbfa2af84,https://doi.org/10.1016/j.cviu.2013.03.008
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373816
+516f8728ad1d4f9f2701a2b5385f8c8e71b9d356,https://doi.org/10.1109/ACCESS.2017.2745903
+fb2bd6c2959a4f811b712840e599f695dad2967e,https://doi.org/10.1109/ISPA.2015.7306038
+af97a51f56cd6b793cf96692931a8d1ddbe4e3cc,https://doi.org/10.1109/ICPR.2014.57
+14ae16e9911f6504d994503989db34d2d1cb2cd4,https://doi.org/10.1007/s11042-013-1616-4
+f2eab39cf68de880ee7264b454044a55098e8163,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5539989
+9ce97efc1d520dadaa0d114192ca789f23442727,http://doi.acm.org/10.1145/2597627
+aee3427d0814d8a398fd31f4f46941e9e5488d83,http://dl.acm.org/citation.cfm?id=1924573
+5a0ae814be58d319dfc9fd98b058a2476801201c,https://doi.org/10.1007/s00521-012-1124-x
+9649a19b49607459cef32f43db4f6e6727080bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395207
+a006cd95c14de399706c5709b86ac17fce93fcba,https://doi.org/10.1109/ICPR.2014.343
+95023e3505263fac60b1759975f33090275768f3,http://doi.acm.org/10.1145/2856767.2856770
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c,http://dl.acm.org/citation.cfm?id=3184081
+97b5800e144a8df48f1f7e91383b0f37bc37cf60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237657
+5613cb13ab381c8a8b81181ac786255705691626,https://doi.org/10.1109/VCIP.2015.7457876
+f6532bf13a4649b7599eb40f826aa5281e392c61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6202713
+c61eaf172820fcafaabf39005bd4536f0c45f995,http://doi.org/10.1007/978-3-319-58771-4_1
+7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7,https://doi.org/10.1007/s11063-015-9464-z
+3080026f2f0846d520bd5bacb0cb2acea0ffe16b,https://doi.org/10.1109/BTAS.2017.8272690
+dc5d9399b3796db7fd850990402dce221b98c8be,http://dl.acm.org/citation.cfm?id=3220016
+85205914a99374fa87e004735fe67fc6aec29d36,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2392774
+86597fe787e0bdd05935d25158790727257a40bd,http://doi.ieeecomputersociety.org/10.1109/3DV.2016.72
+143571c2fc9b1b69d3172f8a35b8fad50bc8202a,https://doi.org/10.1016/j.neucom.2014.07.066
+1176a74fb9351ac2de81c198c4861d78e58f172d,https://doi.org/10.1016/j.patrec.2011.03.023
+af7553d833886663550ce83b087a592a04b36419,https://doi.org/10.1109/TIFS.2015.2390138
+36b23007420b98f368d092bab196a8f3cbcf6f93,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.106
+0de1450369cb57e77ef61cd334c3192226e2b4c2,https://doi.org/10.1109/BTAS.2017.8272747
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,https://doi.org/10.1109/ICIP.2015.7351544
+659dc6aa517645a118b79f0f0273e46ab7b53cd9,https://doi.org/10.1109/ACPR.2015.7486608
+5b64584d6b01e66dfd0b6025b2552db1447ccdeb,https://doi.org/10.1109/BTAS.2017.8272697
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776921
+034b3f3bac663fb814336a69a9fd3514ca0082b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298991
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,http://doi.acm.org/10.1145/2783258.2783280
+de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6,https://doi.org/10.1002/cpe.3850
+55cfc3c08000f9d21879582c6296f2a864b657e8,http://doi.org/10.1049/iet-cvi.2015.0287
+9e7646b7e9e89be525cda1385cc1351cc28a896e,http://doi.ieeecomputersociety.org/10.1109/TMC.2017.2702634
+349c909abf937ef0a5a12c28a28e98500598834b,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890672
+383e64d9ef1fca9de677ac82486b4df42e96e861,http://doi.ieeecomputersociety.org/10.1109/DSC.2017.78
+b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23,https://doi.org/10.1109/SMC.2017.8122808
+71bbda43b97e8dc8b67b2bde3c873fa6aacd439f,https://doi.org/10.1016/j.patcog.2015.09.012
+3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c,https://doi.org/10.1109/ICIP.2014.7025145
+4eb8030b31ff86bdcb063403eef24e53b9ad4329,http://doi.acm.org/10.1145/2993148.2997640
+f423d8be5e13d9ef979debd3baf0a1b2e1d3682f,https://doi.org/10.1016/j.imavis.2015.11.004
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411910
+7fcecaef60a681c47f0476e54e08712ee05d6154,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7299097
+1d51b256af68c5546d230f3e6f41da029e0f5852,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590015
+d6bdc70d259b38bbeb3a78db064232b4b4acc88f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.27
+9378ead3a09bc9f89fb711e2746facf399dd942e,https://doi.org/10.1109/TCSVT.2010.2045817
+6b44543571fe69f088be577d0c383ffc65eceb2a,http://doi.ieeecomputersociety.org/10.1109/EST.2012.24
+5e62b2ab6fd3886e673fd5cbee160a5bee414507,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.31
+da1477b4a65ae5a013e646b57e004f0cd60619a2,https://doi.org/10.1109/ICB.2012.6199764
+2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2,https://doi.org/10.1109/IC3.2013.6612218
+0fdc3cbf92027cb1200f3f94927bef017d7325ae,https://doi.org/10.1109/BTAS.2015.7358771
+f925879459848a3eeb0035fe206c4645e3f20d42,http://doi.acm.org/10.1145/3025453.3025472
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163116
+0553c6b9ee3f7d24f80e204d758c94a9d6b375d2,https://doi.org/10.1109/ICIP.2004.1419764
+ca096e158912080493a898b0b8a4bd2902674fed,http://dl.acm.org/citation.cfm?id=3264899
+78d4d861c766af2a8da8855bece5da4e6eed2e1c,http://doi.acm.org/10.1145/3129416.3129455
+e9d1b3767c06c896f89690deea7a95401ae4582b,https://doi.org/10.1109/VCIP.2016.7805565
+d57982dc55dbed3d0f89589e319dc2d2bd598532,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099760
+89497854eada7e32f06aa8f3c0ceedc0e91ecfef,https://doi.org/10.1109/TIP.2017.2784571
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791166
+a73405038fdc0d8bf986539ef755a80ebd341e97,https://doi.org/10.1109/TIP.2017.2698918
+045e83272db5e92aa4dc8bdfee908534c2608711,http://doi.ieeecomputersociety.org/10.1109/ICCABS.2016.7802775
+b6c00e51590c48a48fae51385b3534c4d282f76c,https://doi.org/10.1109/TIFS.2015.2427778
+3251f40ed1113d592c61d2017e67beca66e678bb,https://doi.org/10.1007/978-3-319-65172-9_17
+ff82825a04a654ca70e6d460c8d88080ee4a7fcc,http://doi.acm.org/10.1145/2683483.2683533
+3a0425c25beea6c4c546771adaf5d2ced4954e0d,https://link.springer.com/book/10.1007/978-3-319-58347-1
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c,http://doi.org/10.1007/s11263-017-1029-3
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354277
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163152
+93af335bf8c610f34ce0cadc15d1dd592debc706,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475
+1a8d40bcfb087591cc221086440d9891749d47b8,https://doi.org/10.1109/ICCE.2012.6161859
+b84f164dbccb16da75a61323adaca730f528edde,https://doi.org/10.1109/TIP.2013.2237914
+76d1c6c6b67e67ced1f19a89a5034dafc9599f25,http://doi.acm.org/10.1145/2590296.2590315
+4b8c736524d548472d0725c971ee29240ae683f6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.31
+b6259115b819424de53bb92f64cc459dcb649f31,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466
+deb89950939ae9847f0a1a4bb198e6dbfed62778,https://doi.org/10.1109/LSP.2016.2543019
+c64502696438b4c9f9e12e64daaf7605f62ce3f0,http://doi.ieeecomputersociety.org/10.1109/WKDD.2009.195
+7f5b379b12505d60f9303aab1fea48515d36d098,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411873
+b1f4423c227fa37b9680787be38857069247a307,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254
+c49075ead6eb07ede5ada4fe372899bd0cfb83ac,https://doi.org/10.1109/ICSPCS.2015.7391782
+0831794eddcbac1f601dcb9be9d45531a56dbf7e,http://doi.org/10.1007/s11042-017-4416-4
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,http://doi.org/10.1007/s10994-014-5463-y
+f03a82fd4a039c1b94a0e8719284a777f776fb22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355453
+c1f05b723e53ac4eb1133249b445c0011d42ca79,https://doi.org/10.1162/neco_a_00990
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832,http://dl.acm.org/citation.cfm?id=3078988
+97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0085
+6cb8c52bb421ce04898fa42cb997c04097ddd328,http://doi.org/10.1007/978-3-319-11289-3
+b13b101b6197048710e82f044ad2eda6b93affd8,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.91
+7bd37e6721d198c555bf41a2d633c4f0a5aeecc1,https://doi.org/10.1109/ACPR.2013.58
+914d7527678b514e3ee9551655f55ffbd3f0eb0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404350
+39c10888a470b92b917788c57a6fd154c97b421c,https://doi.org/10.1109/VCIP.2017.8305036
+fb557b79157a6dda15f3abdeb01a3308528f71f2,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.310
+b42b535fcd0d9bd41a6594a910ea4623e907ceb9,https://doi.org/10.1109/ICTAI.2012.153
+3fc173805ed43602eebb7f64eea4d60c0386c612,http://doi.ieeecomputersociety.org/10.1109/CyberC.2015.94
+12c4ba96eaa37586f07be0d82b2e99964048dcb5,https://doi.org/10.1109/LSP.2017.2694460
+8d3e95c31c93548b8c71dbeee2e9f7180067a888,https://doi.org/10.1109/ICPR.2016.7899841
+71d68af11df855f886b511e4fc1635c1e9e789b0,https://doi.org/10.1109/TCSVT.2011.2133210
+dea409847d52bb0ad54bf586cb0482a29a584a7e,http://doi.ieeecomputersociety.org/10.1109/ISM.2009.115
+c17c7b201cfd0bcd75441afeaa734544c6ca3416,https://doi.org/10.1109/TCSVT.2016.2587389
+8de5dc782178114d9424d33d9adabb2f29a1ab17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7053946
+44c278cbecd6c1123bfa5df92e0bda156895fa48,https://doi.org/10.1109/ICPR.2014.316
+6688b2b1c1162bc00047075005ec5c7fca7219fd,https://doi.org/10.1109/SACI.2013.6608958
+e0793fd343aa63b5f366c8ace61b9c5489c51a4d,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.46
+c15b68986ecfa1e13e3791686ae9024f66983f14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014747
+5dafab3c936763294257af73baf9fb3bb1696654,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5514556
+58483028445bf6b2d1ad6e4b1382939587513fe1,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247763
+ebde9b9c714ed326157f41add8c781f826c1d864,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014758
+33bbf01413910bca26ed287112d32fe88c1cc0df,https://doi.org/10.1109/ICIP.2014.7026204
+11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de,http://doi.acm.org/10.1145/2522968.2522978
+b034cc919af30e96ee7bed769b93ea5828ae361b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915
+48255c9e1d6e1d030728d33a71699757e337be08,https://doi.org/10.1109/ISSNIP.2013.6529832
+ee56823f2f00c8c773e4ebc725ca57d2f9242947,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7110235
+2f8ef56c1007a02cdc016219553479d6b7e097fb,https://doi.org/10.1007/978-3-642-14834-7_2
+9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3,https://doi.org/10.1109/ICPR.2016.7899782
+969626c52d30ea803064ddef8fb4613fa73ba11d,http://doi.org/10.1007/BF02683992
+6e46d8aa63db3285417c8ebb65340b5045ca106f,http://dl.acm.org/citation.cfm?id=3183751
+cf185d0d8fcad2c7f0a28b7906353d4eca5a098b,https://doi.org/10.1186/s13640-017-0190-5
+3779e0599481f11fc1acee60d5108d63e55819b3,http://doi.org/10.1007/s11280-018-0581-2
+9aade3d26996ce7ef6d657130464504b8d812534,https://doi.org/10.1109/TNNLS.2016.2618340
+5dd57b7e0e82a33420c054da7ea3f435d49e910e,https://doi.org/10.1007/s10851-014-0493-4
+43fce0c6b11eb50f597aa573611ac6dc47e088d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8465617
+a11ce3c9b78bf3f868b1467b620219ff651fe125,http://doi.acm.org/10.1145/2911996.2912073
+d1dd80d77655876fb45b9420fe72444c303b219e,https://doi.org/10.1109/FG.2011.5771371
+eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,https://doi.org/10.1109/ICPR.2016.7899719
+1ddea58d04e29069b583ac95bc0ae9bebb0bed07,https://doi.org/10.1109/KSE.2015.50
+298c2be98370de8af538c06c957ce35d00e93af8,https://doi.org/10.1109/IPTA.2016.7820988
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899896
+6ffdbac58e15e0ff084310b0a804520ad4bd013e,https://doi.org/10.1049/iet-bmt.2015.0078
+2debdb6a772312788251cc3bd1cb7cc8a6072214,https://doi.org/10.1142/S0218001415560157
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820
+0247998a1c045e601dc4d65c53282b5e655be62b,https://doi.org/10.1109/ITSC.2017.8317782
+c81b27932069e6c7016bfcaa5e861b99ac617934,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019469
+0fc5c6f06e40014a56f492172f44c073d269e95c,https://doi.org/10.1108/17563781311301490
+faf19885431cb39360158982c3a1127f6090a1f6,https://doi.org/10.1109/BTAS.2015.7358768
+acd4280453b995cb071c33f7c9db5760432f4279,https://doi.org/10.1007/s00138-018-0907-1
+f2896dd2701fbb3564492a12c64f11a5ad456a67,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414
+b8048a7661bdb73d3613fde9d710bd45a20d13e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8468792
+6359fcb0b4546979c54818df8271debc0d653257,http://doi.org/10.1007/s11704-017-6275-6
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62,http://arxiv.org/abs/1412.0767
+9436170c648c40b6f4cc3751fca3674aa82ffe9a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6811741
+7081958a390d3033f5f33e22bbfec7055ea8d601,https://doi.org/10.1109/MCI.2015.2437318
+060f67c8a0de8fee9c1732b63ab40627993f93d0,https://doi.org/10.1007/978-3-642-33564-8
+e101bab97bce2733222db9cfbb92a82779966508,https://doi.org/10.1109/TCYB.2016.2549639
+5101368f986aa9837fdb3a71cb4299dff6f6325d,https://doi.org/10.1109/ICIP.2008.4712155
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,https://doi.org/10.1007/s00521-012-1042-y
+857c64060963dd8d28e4740f190d321298ddd503,http://doi.org/10.1007/s11042-015-3103-6
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,https://doi.org/10.1007/s10044-012-0279-5
+49fdafef327069516d887d8e69b5e96c983c3dd0,https://doi.org/10.1109/DICTA.2017.8227433
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda,http://doi.org/10.1007/s00530-017-0566-5
+e111624fb4c5dc60b9e8223abfbf7c4196d34b21,http://doi.ieeecomputersociety.org/10.1109/BIBM.2016.7822814
+4aea1213bdb5aa6c74b99fca1afc72d8a99503c6,https://doi.org/10.1109/ICDIM.2010.5664688
+b7fa06b76f4b9263567875b2988fb7bbc753e69f,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469282
+e6d46d923f201da644ae8d8bd04721dd9ac0e73d,https://doi.org/10.1109/ISBA.2016.7477226
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4,http://doi.org/10.1016/j.asoc.2018.03.030
+826015d9ade1637b3fcbeca071e3137d3ac1ef56,https://doi.org/10.1109/WACV.2017.84
+f95321f4348cfacc52084aae2a19127d74426047,https://doi.org/10.1109/ICMLC.2013.6890897
+11bb2abe0ca614c15701961428eb2f260e3e2eef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343867
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952626
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13,http://doi.org/10.1007/s11063-017-9693-4
+de92951ea021ec56492d76381a8ae560a972dd68,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738246
+606dff86a34c67c79d93f1e536487847a5bb7002,https://doi.org/10.1109/WACV.2011.5711538
+b55e70df03d9b80c91446a97957bc95772dcc45b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329
+cd33b3ca8d7f00c1738c41b2071a3164ba42ea61,https://doi.org/10.1142/S0218213008003832
+a07f78124f83eef1ed3a6f54ba982664ae7ca82a,http://ieeexplore.ieee.org/document/6460481/
+76a52ebfc5afd547f8b73430ec81456cf25ddd69,http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914
+e6c834c816b5366875cf3060ccc20e16f19a9fc6,https://doi.org/10.1109/BTAS.2016.7791185
+cb2470aade8e5630dcad5e479ab220db94ecbf91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018
+4c19690889fb3a12ec03e65bae6f5f20420b4ba4,https://doi.org/10.1049/iet-ipr.2015.0699
+23675cb2180aac466944df0edda4677a77c455cd,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142
+ec576efd18203bcb8273539fa277839ec92232a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7994601
+bca39960ba46dc3193defe0b286ee0bea4424041,https://doi.org/10.1016/j.patrec.2009.05.018
+81f101cea3c451754506bf1c7edf80a661fa4dd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163081
+f856532a729bd337fae1eb7dbe55129ae7788f45,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.26
+f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf,https://doi.org/10.1109/ICIP.2011.6115736
+f0dac9a55443aa39fd9832bdff202a579b835e88,https://doi.org/10.1109/JSTSP.2016.2543681
+1831800ef8b1f262c92209f1ee16567105da35d6,https://doi.org/10.1016/j.sigpro.2014.01.010
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef,https://www.ncbi.nlm.nih.gov/pubmed/29994550
+8bf945166305eb8e304a9471c591139b3b01a1e1,https://doi.org/10.1109/ACCESS.2017.2756451
+655e94eccddbe1b1662432c1237e61cf13a7d57b,http://doi.ieeecomputersociety.org/10.1109/ISIP.2008.147
+b5f9306c3207ac12ac761e7d028c78b3009a219c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779
+a713a01971e73d0c3118d0409dc7699a24f521d6,https://doi.org/10.1109/SSCI.2017.8285381
+5c19c4c6a663fe185a739a5f50cef6a12a4635a1,https://doi.org/10.1016/j.imavis.2012.08.016
+ec5c63609cf56496715b0eba0e906de3231ad6d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651
+ec1a57e609eda72b4eb60155fac12db1da31f6c0,https://doi.org/10.1007/11744085_41
+a35ed55dc330d470be2f610f4822f5152fcac4e1,https://doi.org/10.1109/ISBA.2015.7126369
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,http://dl.acm.org/citation.cfm?id=3190784
+329b2781007604652deb72139d14315df3bc2771,http://doi.acm.org/10.1145/2671188.2749358
+d628aabf1a666a875e77c3d3fee857cd25891947,https://doi.org/10.1109/SMC.2016.7844663
+c4d0d09115a0df856cdb389fbccb20f62b07b14e,https://doi.org/10.1109/ICIP.2012.6466925
+cb7a743b9811d20682c13c4ee7b791ff01c62155,https://doi.org/10.1109/MMSP.2015.7340789
+6dcf418c778f528b5792104760f1fbfe90c6dd6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984
+7f68a5429f150f9eb7550308bb47a363f2989cb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977004
+8dd3f05071fd70fb1c349460b526b0e69dcc65bf,https://doi.org/10.1109/TIP.2017.2726010
+262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19,http://dl.acm.org/citation.cfm?id=3007694
+26bbe76d1ae9e05da75b0507510b92e7e6308c73,https://doi.org/10.1007/s00371-014-1049-8
+80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,https://doi.org/10.1109/TNNLS.2016.2522431
+34546ef7e6148d9a1fb42cfab5f0ce11c92c760a,https://doi.org/10.1016/j.jvcir.2015.09.005
+dac34b590adddef2fc31f26e2aeb0059115d07a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,https://doi.org/10.1007/978-3-319-15762-7
+ed94e7689cdae87891f08428596dec2a2dc6a002,https://doi.org/10.1109/CAMSAP.2017.8313130
+4e37cd250130c6fd60e066f0c8efb3cbb778c421,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8419742
+38e7f3fe450b126367ec358be9b4cc04e82fa8c7,https://doi.org/10.1109/TIP.2014.2351265
+73ba33e933e834b815f62a50aa1a0e15c6547e83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368754
+8eb40d0a0a1339469a05711f532839e8ffd8126c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7890464
+ef2bb8bd93fa8b44414565b32735334fa6823b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393076
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,http://doi.org/10.1007/s11263-018-1088-0
+2336de3a81dada63eb00ea82f7570c4069342fb5,http://doi.acm.org/10.1145/2361407.2361428
+a60db9ca8bc144a37fe233b08232d9c91641cbb5,http://doi.org/10.1007/s11280-018-0615-9
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771409
+cef73d305e5368ee269baff53ec20ea3ae7cdd82,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461485
+070c8ee3876c06f9a65693e536d61097ace40417,https://doi.org/10.1109/ACPR.2013.161
+ddf577e8b7c86b1122c1bc90cba79f641d2b33fa,http://doi.acm.org/10.1145/3013971.3014026
+ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee,http://doi.acm.org/10.1145/2903220.2903255
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7781761
+a75de488eaacb1dafffbe667465390f101498aaf,http://doi.ieeecomputersociety.org/10.1109/FG.2017.47
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389
+1e8fd77d4717e9cb6079e10771dd2ed772098cb3,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2016.7574681
+314c4c95694ff12b3419733db387476346969932,http://dl.acm.org/citation.cfm?id=3007672
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463262
+46e0703044811c941f0b5418139f89d46b360aa3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883945
+ea026456729f0ec54c697198e1fd089310de4ae2,https://doi.org/10.1109/CIBIM.2013.6607917
+59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862
+b85d953de16eecaecccaa8fad4081bd6abda9b1b,https://doi.org/10.1016/j.neuroimage.2015.12.020
+4e43408a59852c1bbaa11596a5da3e42034d9380,http://doi.org/10.1007/s11042-018-6040-3
+661c78a0e2b63cbdb9c20dcf89854ba029b6bc87,https://doi.org/10.1109/ICIP.2014.7025093
+9a98dd6d6aaba05c9e46411ea263f74df908203d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7859405
+d2a415365f997c8fe2dbdd4e06ceab2e654172f6,http://doi.acm.org/10.1145/2425333.2425361
+ec1bec7344d07417fb04e509a9d3198da850349f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342699
+f4b5a8f6462a68e79d643648c780efe588e4b6ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995700
+88535dba55b0a80975df179d31a6cc80cae1cc92,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355366
+dc1510110c23f7b509035a1eda22879ef2506e61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909642
+6014eeb333998c2b2929657d233ebbcb1c3412c9,http://doi.acm.org/10.1145/2647868.2656406
+fa80344137c4d158bf59be4ac5591d074483157a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1470219
+5fea59ccdab484873081eaa37af88e26e3db2aed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8263394
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8453893
+8686b15802529ff8aea50995ef14079681788110,https://doi.org/10.1109/TNNLS.2014.2376936
+61262450d4d814865a4f9a84299c24daa493f66e,http://doi.org/10.1007/s10462-016-9474-x
+2724ba85ec4a66de18da33925e537f3902f21249,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298
+d91f9e8cbf271004ef1a293401197a10a26ccd1b,https://doi.org/10.1109/SOCPAR.2015.7492801
+fc5538e60952f86fff22571c334a403619c742c3,http://ieeexplore.ieee.org/document/6460202/
+8f71c97206a03c366ddefaa6812f865ac6df87e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342943
+20b405d658b7bb88d176653758384e2e3e367039,https://doi.org/10.1109/IJCNN.2012.6252677
+9b78ce9fdac30864d1694a56328b3c8a96cccef5,https://doi.org/10.1089/cpb.2004.7.635
+46f48211716062744ddec5824e9de9322704dea1,http://doi.org/10.1007/s11263-016-0923-4
+d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1,https://doi.org/10.1109/TIP.2015.2502144
+8a2210bedeb1468f223c08eea4ad15a48d3bc894,http://doi.acm.org/10.1145/2513383.2513438
+fde611bf25a89fe11e077692070f89dcdede043a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7322904
+39d6f8b791995dc5989f817373391189d7ac478a,http://doi.org/10.1016/j.patrec.2015.09.015
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198
+93dcea2419ca95b96a47e541748c46220d289d77,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014993
+10cb39e93fac194220237f15dae084136fdc6740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457972
+17189cfedbdbd219849b8e7f8cf0293d49465f9c,http://doi.acm.org/10.1145/2393347.2396505
+84f86f8c559a38752ddfb417e58f98e1f8402f17,http://doi.ieeecomputersociety.org/10.1109/EST.2013.10
+73dcb4c452badb3ee39a2f222298b234d08c21eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6779478
+d723ebf3288126fa8cbb10ba7e2a6308aede857c,https://doi.org/10.1117/12.968586
+9771e04f48d8a1d7ae262539de8924117a04c20d,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.70
+9806d3dc7805dd8c9c20d7222c915fc4beee7099,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755972
+0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261,https://doi.org/10.1109/IJCB.2011.6117508
+28e1982d20b6eff33989abbef3e9e74400dbf508,http://doi.org/10.1007/s11042-015-3007-5
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499,http://doi.org/10.1016/j.csi.2015.06.004
+cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2581168
+e8f4ded98f5955aad114f55e7aca6b540599236b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7047804
+ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff,http://doi.ieeecomputersociety.org/10.1109/FG.2017.113
+6dcf6b028a6042a9904628a3395520995b1d0ef9,http://dl.acm.org/citation.cfm?id=3158392
+7a666a91a47da0d371a9ba288912673bcd5881e4,https://doi.org/10.1016/j.patrec.2009.05.011
+ee1f9637f372d2eccc447461ef834a9859011ec1,http://doi.org/10.1007/s11042-016-3950-9
+b351575e3eab724d62d0703e24ecae55025eef00,https://doi.org/10.1007/s10209-014-0369-9
+9e8382aa1de8f2012fd013d3b39838c6dad8fb4d,http://doi.acm.org/10.1145/3123266.3123349
+f545b121b9612707339dfdc40eca32def5e60430,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.33
+96a8f115df9e2c938453282feb7d7b9fde6f4f95,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2593719
+cb522b2e16b11dde48203bef97131ddca3cdaebd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979
+d9eed86e53ce5f7cba379fe77bbefb42e83c0d88,https://doi.org/10.1109/TIP.2017.2764262
+f374ac9307be5f25145b44931f5a53b388a77e49,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060
+5779e3e439c90d43648db107e848aeb954d3e347,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7927417
+1e2770ce52d581d9a39642b40bfa827e3abf7ea2,http://doi.acm.org/10.1145/2425333.2425362
+f58f30932e3464fc808e539897efa4ee4e7ac59f,https://doi.org/10.1109/DICTA.2016.7797023
+e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,https://doi.org/10.1109/TIP.2017.2782366
+9a84588fe7e758cfbe7062686a648fab787fc32f,https://doi.org/10.1007/s11042-014-2333-3
+23edcd0d2011d9c0d421193af061f2eb3e155da3,http://doi.org/10.1007/s00371-015-1137-4
+ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf,http://doi.ieeecomputersociety.org/10.1109/ICEBE.2017.36
+eac97959f2fcd882e8236c5dd6035870878eb36b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890147
+452ea180cf4d08d7500fc4bc046fd7141fd3d112,https://doi.org/10.1109/BTAS.2012.6374569
+f231e9408da20498ba51d93459b3fcdb7b666efb,https://doi.org/10.1016/j.micpro.2012.01.002
+aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.80
+7782627fa2e545276996ff9e9a1686ac496df081,http://doi.acm.org/10.1145/2663204.2666276
+396b2963f0403109d92a4d4f26205f279ea79d2c,https://doi.org/10.1109/TSMCB.2005.845399
+e1d1540a718bb7a933e21339f1a2d90660af7353,http://doi.org/10.1007/s11063-018-9852-2
+c12034ca237ee330dd25843f2d05a6e1cfde1767,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.298
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,https://doi.org/10.1109/SMC.2016.7844934
+46b2ecef197b465abc43e0e017543b1af61921ac,https://doi.org/10.1109/ICPR.2016.7899652
+159caaa56c2291bedbd41d12af5546a7725c58d4,https://doi.org/10.1109/ICIP.2016.7532910
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,https://doi.org/10.1007/s11042-017-5019-9
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,https://doi.org/10.1007/s11042-015-2497-5
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7801496
+aad7b12936e0ced60bc0be95e8670b60b5d5ce20,https://doi.org/10.1109/URAI.2013.6677383
+8f051647bd8d23482c6c3866c0ce1959b8bd40f6,https://doi.org/10.1016/j.asoc.2017.04.041
+cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,https://doi.org/10.1109/SIU.2016.7495874
+83f80fd4eb614777285202fa99e8314e3e5b169c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265544
+8c85ef961826575bc2c2f4da7784bc3bfcf8b188,https://doi.org/10.1109/ICIP.2015.7350871
+5f4219118556d2c627137827a617cf4e26242a6e,https://doi.org/10.1109/TMM.2017.2751143
+c65cfc9d3568c586faf18611c4124f6b7c0c1a13,https://doi.org/10.1109/ICACCI.2014.6968322
+ab0981d1da654f37620ca39c6b42de21d7eb58eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8016651
+1617f56c86bf8ea61de62062a97961d23fcf03d3,https://doi.org/10.1007/s11390-015-1540-3
+de45bf9e5593a5549a60ca01f2988266d04d77da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404529
+34c1e9a6166f4732d1738db803467f7abc47ba87,https://doi.org/10.1109/WACV.2017.137
+d12bea587989fc78b47584470fd8f689b6ab81d2,https://doi.org/10.1109/TIP.2013.2246523
+f2d5bb329c09a5867045721112a7dad82ca757a3,http://doi.org/10.1007/s11042-015-3009-3
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,http://dl.acm.org/citation.cfm?id=3213539
+5c526ee00ec0e80ba9678fee5134dae3f497ff08,https://doi.org/10.1109/TCE.2010.5606299
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624
+453bf941f77234cb5abfda4e015b2b337cea4f17,https://doi.org/10.1007/s11042-014-2340-4
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725
+e4754afaa15b1b53e70743880484b8d0736990ff,http://doi.org/10.1016/j.imavis.2016.01.002
+2e27667421a7eeab278e0b761db4d2c725683c3f,https://doi.org/10.1007/s11042-013-1815-z
+85e78aa374d85f9a61da693e5010e40decd3f986,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619100
+e96cef8732f3021080c362126518455562606f2d,http://dl.acm.org/citation.cfm?id=3206058
+f557df59cd088ffb8e27506d8612d062407e96f4,https://doi.org/10.1007/s00521-014-1810-y
+0c65226edb466204189b5aec8f1033542e2c17aa,https://doi.org/10.1109/ICIP.2017.8296997
+a7f188a7161b6605d58e48b2537c18a69bd2446f,https://doi.org/10.1109/PIMRC.2011.6139898
+d92084e376a795d3943df577d3b3f3b7d12eeae5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.85
+a192845a7695bdb372cccf008e6590a14ed82761,https://doi.org/10.1109/TIP.2014.2321495
+5c3eb40b06543f00b2345f3291619a870672c450,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.539
+d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc,http://doi.ieeecomputersociety.org/10.1109/FG.2017.101
+b161d261fabb507803a9e5834571d56a3b87d147,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913
+952138ae6534fad573dca0e6b221cdf042a36412,http://doi.ieeecomputersociety.org/10.1109/DICTA.2005.38
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,http://doi.org/10.1007/s11063-016-9558-2
+dcb6f06631021811091ce691592b12a237c12907,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8438999
+b5690409be6c4e98bd37181d41121adfef218537,https://doi.org/10.1109/ICIP.2008.4711920
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270392
+34c2ea3c7e794215588c58adf0eaad6dc267d082,http://doi.acm.org/10.1145/3136755.3143005
+10e4172dd4f4a633f10762fc5d4755e61d52dc36,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100146
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100195
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995354
+ce11b2d7905d2955c4282db5b68482edb846f29f,http://doi.acm.org/10.1145/3126686.3126705
+c3a53b308c7a75c66759cbfdf52359d9be4f552b,http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1605391
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1385984
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237686
+99a1180c3d39532efecfc5fa251d6893375c91a1,https://doi.org/10.1109/ICARCV.2012.6485394
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,https://doi.org/10.1007/s11042-015-2665-7
+cc7c63473c5bef5ae09f26b2258691d9ffdd5f93,https://doi.org/10.1109/ICMLA.2012.17
+9d4692e243e25eb465a0480376beb60a5d2f0f13,https://doi.org/10.1109/ICCE.2016.7430617
+321db1059032b828b223ca30f3304257f0c41e4c,https://doi.org/10.1109/ICACCI.2015.7275951
+645f09f4bc2e6a13663564ee9032ca16e35fc52d,http://dl.acm.org/citation.cfm?id=3193542
+3826e47f0572ab4d0fe34f0ed6a49aa8303e0428,https://doi.org/10.1109/ACPR.2013.66
+1b4b3d0ce900996a6da8928e16370e21d15ed83e,https://doi.org/10.1109/BigDataService.2017.38
+6316a4b689706b0f01b40f9a3cef47b92bc52411,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699534
+eba4cfd76f99159ccc0a65cab0a02db42b548d85,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751379
+abf573864b8fbc0f1c491ca60b60527a3e75f0f5,https://doi.org/10.1007/s11042-014-2204-y
+cb4d8cef8cec9406b1121180d47c14dfef373882,https://doi.org/10.1109/ICPR.2014.301
+2480f8dccd9054372d696e1e521e057d9ac9de17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8396968
+42a5dc91852c8c14ed5f4c3b451c9dc98348bc02,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021
+0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e,https://doi.org/10.1109/SCIS-ISIS.2016.0166
+90ae02da16b750a9fd43f8a38440f848309c2fe0,https://doi.org/10.1007/s10044-015-0499-6
+f6ebfa0cb3865c316f9072ded26725fd9881e73e,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.109
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a,http://doi.org/10.1007/s11042-017-4353-2
+ed273b5434013dcdb9029c1a9f1718da494a23a2,https://doi.org/10.1109/LSP.2018.2810106
+1773d65c1dc566fd6128db65e907ac91b4583bed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8328914
+8b1fa60b9164b60d1ca2705611fab063505a3ef5,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618337
+4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af,https://doi.org/10.1016/j.imavis.2014.06.004
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8432363
+dc964b9c7242a985eb255b2410a9c45981c2f4d0,http://doi.org/10.1007/s10851-018-0837-6
+191b70fdd6678ef9a00fd63710c70b022d075362,https://doi.org/10.1109/ICIP.2003.1247347
+ce70dd0d613b840754dce528c14c0ebadd20ffaa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7973159
+745d49a2ff70450113f07124c2c5263105125f58,https://doi.org/10.1109/ICPR.2016.7899972
+293d69d042fe9bc4fea256c61915978ddaf7cc92,https://doi.org/10.1007/978-981-10-7302-1_6
+7f8cef6ba2f059e465b1b23057a6dbb23fba1c63,https://doi.org/10.1109/TCSVT.2016.2539541
+2f17c0514bb71e0ca20780d71ea0d50ff0da4938,http://doi.acm.org/10.1145/1943403.1943490
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,https://doi.org/10.1109/TIP.2013.2255300
+a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260,http://doi.ieeecomputersociety.org/10.1109/WI-IATW.2006.88
+013305c13cfabaea82c218b841dbe71e108d2b97,http://doi.org/10.1007/s11063-016-9554-6
+41c42cb001f34c43d4d8dd8fb72a982854e173fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5308445
+a45e6172713a56736a2565ddea9cb8b1d94721cd,http://doi.org/10.1038/s41746-018-0035-3
+3d0b2da6169d38b56c58fe5f13342cf965992ece,https://doi.org/10.1109/ICIP.2016.7532909
+332d773b70f2f6fb725d49f314f57b8f8349a067,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.220
+2a92bda6dbd5cce5894f7d370d798c07fa8783f4,https://doi.org/10.1109/TIFS.2014.2359587
+9ac2960f646a46b701963230e6949abd9ac0a9b3,http://doi.org/10.1162/jocn_a_01174
+3e59d97d42f36fc96d33a5658951856a555e997b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163128
+61971f8e6fff5b35faed610d02ad14ccfc186c70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373843
+ca447d6479554b27b4afbd0fd599b2ed39f2c335,https://doi.org/10.1109/ICPR.2014.459
+3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548,https://doi.org/10.1109/ITSC.2015.252
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,https://doi.org/10.1007/s10278-015-9808-2
+942f6eb2ec56809430c2243a71d03cc975d0a673,https://doi.org/10.1109/BigMM.2017.64
+c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,https://doi.org/10.1109/BTAS.2017.8272773
+c3d874336eb8fae92ab335393fd801fa8df98412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952438
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019,http://doi.org/10.1016/j.neucom.2017.07.014
+6440d6c7081efe4538a1c75e93144f3d142feb41,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.280
+2cd426f10178bd95fef3dede69ae7b67e73bb70c,https://doi.org/10.1109/ROBIO.2016.7866457
+abbc6dcbd032ff80e0535850f1bc27c4610b0d45,https://doi.org/10.1109/ICIP.2015.7350983
+945ef646679b6c575d3bbef9c6fc0a9629ac1b62,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477689
+c5437496932dcb9d33519a120821da755951e1a9,http://doi.acm.org/10.1145/2487575.2487604
+54e988bc0764073a5db2955705d4bfa8365b7fa9,http://doi.acm.org/10.1145/2522848.2531749
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf,http://dl.acm.org/citation.cfm?id=2820910
+cab3c6069387461c3a9e5d77defe9a84fe9c9032,https://doi.org/10.1016/j.neucom.2016.12.056
+44855e53801d09763c1fb5f90ab73e5c3758a728,http://doi.org/10.1007/s11263-017-1018-6
+72167c9e4e03e78152f6df44c782571c3058050e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771464
+fc970d7694b1d2438dd101a146d2e4f29087963e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.86
+407a26fff7fac195b74de9fcb556005e8785a4e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.29
+7e27d946d23229220bcb6672aacab88e09516d39,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900131
+24b637c98b22cd932f74acfeecdb50533abea9ae,https://doi.org/10.1109/TIP.2015.2492819
+9285f4a6a06e975bde3ae3267fccd971d4fff98a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099853
+1b8541ec28564db66a08185510c8b300fa4dc793,https://doi.org/10.1109/LSP.2015.2499778
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8203756
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854428
+071ec4f3fb4bfe6ae9980477d208a7b12691710e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6552193
+4d1f77d9418a212c61a3c75c04a5b3884f6441ba,https://doi.org/10.1109/TIP.2017.2788196
+e8c6853135856515fc88fff7c55737a292b0a15b,http://doi.ieeecomputersociety.org/10.1109/FG.2017.46
+7fcd03407c084023606c901e8933746b80d2ad57,https://doi.org/10.1109/BTAS.2017.8272694
+aef58a54d458ab76f62c9b6de61af4f475e0f616,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706790
+abe4c1d6b964c4f5443b0334a44f0b03dd1909f4,https://doi.org/10.1109/IJCNN.2017.7965950
+f27fd2a1bc229c773238f1912db94991b8bf389a,https://doi.org/10.1109/IVCNZ.2016.7804414
+608b01c70f0d1166c10c3829c411424d9ef550e7,https://doi.org/10.1109/CISP-BMEI.2017.8301920
+0abfb5b89e9546f8a5c569ab35b39b888e7cea46,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.68
+7f415aee0137acab659c664eb1dff15f7b726bdd,https://doi.org/10.1109/TCSVT.2014.2302522
+3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1,https://doi.org/10.1109/IJCNN.2017.7966210
+4735fa28fa2a2af98f7b266efd300a00e60dddf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460647
+f49aebe58d30241f12c1d7d9f4e04b6e524d7a45,https://doi.org/10.1109/ICB.2016.7550074
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6,http://doi.org/10.1038/s41598-017-07122-x
+d5dc78eae7a3cb5c953c89376e06531d39b34836,https://doi.org/10.1007/s00521-009-0242-6
+84ec0983adb8821f0655f83b8ce47f36896ca9ee,https://doi.org/10.1109/SMC.2017.8122985
+e5ea7295b89ef679e74919bf957f58d55ad49489,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401948
+aa5a7a9900548a1f1381389fc8695ced0c34261a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900274
+2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76,https://doi.org/10.1109/ACPR.2015.7486607
+8562b4f63e49847692b8cb31ef0bdec416b9a87a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8128909
+041b51a81a977b5c64682c55414ad8d165c1f2ce,https://doi.org/10.1109/TCE.2014.7027339
+56f57786516dcc8ea3c0ffe877c1363bfb9981d2,https://doi.org/10.1109/CBMI.2014.6849823
+4e061a302816f5890a621eb278c6efa6e37d7e2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909638
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a,http://dl.acm.org/citation.cfm?id=2984060
+0133d1fe8a3138871075cd742c761a3de93a42ec,https://doi.org/10.1109/ICDSP.2015.7251932
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553
+e88988f4696e7e2925ed96467fde4314bfa95eff,https://doi.org/10.1016/j.neucom.2015.01.076
+bf0836e5c10add0b13005990ba019a9c4b744b06,https://doi.org/10.1109/TCE.2009.5373791
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382330
+01e27c91c7cef926389f913d12410725e7dd35ab,https://doi.org/10.1007/s11760-017-1140-5
+f762afd65f3b680330e390f88d4cc39485345a01,http://doi.ieeecomputersociety.org/10.1109/ACIIW.2017.8272606
+9cd4f72d33d1cedc89870b4f4421d496aa702897,https://doi.org/10.1117/1.JEI.22.2.023010
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,https://doi.org/10.1109/TNNLS.2015.2499273
diff --git a/scraper/reports/misc/all_doi-3.csv b/scraper/reports/misc/all_doi-3.csv
new file mode 100644
index 00000000..9517ab77
--- /dev/null
+++ b/scraper/reports/misc/all_doi-3.csv
@@ -0,0 +1,749 @@
+915ff2bedfa0b73eded2e2e08b17f861c0e82a58,https://doi.org/10.1109/UEMCON.2017.8249000
+454bf5b99607b4418e931092476ad1798ce5efa4,https://doi.org/10.1155/2011/790598
+abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c,https://doi.org/10.1109/FUZZ-IEEE.2014.6891864
+166ef5d3fd96d99caeabe928eba291c082ec75a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237597
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,http://doi.org/10.1007/s11042-015-2945-2
+f9fb7979af4233c2dd14813da94ec7c38ce9232a,http://doi.acm.org/10.1145/3131902
+c175ebe550761b18bac24d394d85bdfaf3b7718c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301582
+1f5725a4a2eb6cdaefccbc20dccadf893936df12,https://doi.org/10.1109/CCST.2012.6393544
+361eaef45fccfffd5b7df12fba902490a7d24a8d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319
+dfb8a04a80d4b0794c0679d797cb90ec101e162c,http://doi.ieeecomputersociety.org/10.1109/AVSS.2014.6918665
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3,http://doi.org/10.1007/s13735-016-0117-4
+b6ac33d2c470077fa8dcbfe9b113beccfbd739f8,http://doi.acm.org/10.1145/2509896.2509905
+35d42f4e7a1d898bc8e2d052c38e1106f3e80188,https://doi.org/10.1109/BTAS.2015.7358765
+9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,https://doi.org/10.1109/ICIP.2015.7351174
+f41d7f891a1fc4569fe2df66e67f277a1adef229,https://doi.org/10.1109/ICIP.2015.7351552
+12b533f7c6847616393591dcfe4793cfe9c4bb17,https://doi.org/10.1109/TIFS.2017.2765519
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344664
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4497872
+189e5a2fa51ed471c0e7227d82dffb52736070d8,https://doi.org/10.1109/ICIP.2017.8296995
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,https://doi.org/10.1109/IWCIA.2013.6624793
+71f9861df104b90399dc15e12bbb14cd03f16e0b,http://doi.ieeecomputersociety.org/10.1109/CGIV.2009.7
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,https://doi.org/10.1109/IJCNN.2010.5596793
+067fe74aec42cb82b92cf6742c7cfb4a65f16951,http://doi.acm.org/10.1145/2601434
+4f742c09ce12859b20deaa372c8f1575acfc99c9,https://doi.org/10.1016/j.neucom.2017.01.020
+e7436b8e68bb7139b823a7572af3decd96241e78,https://doi.org/10.1109/ROBIO.2011.6181560
+26727dc7347e3338d22e8cf6092e3a3c7568d763,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163088
+3ebb0209d5e99b22c67e425a67a959f4db8d1f47,https://doi.org/10.1109/ICDAR.2017.173
+78f2c8671d1a79c08c80ac857e89315197418472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443
+5491478ae2c58af21389ed3af21babd362511a8e,http://doi.acm.org/10.1145/2949035.2949048
+c8fb8872203ee694d95da47a1f9929ac27186d87,https://doi.org/10.1109/ICIP.2005.1530305
+47cd161546c59ab1e05f8841b82e985f72e5ddcb,https://doi.org/10.1109/ICIP.2017.8296552
+2e12c5ea432004de566684b29a8e148126ef5b70,https://doi.org/10.1007/s12193-015-0204-5
+607aebe7568407421e8ffc7b23a5fda52650ad93,https://doi.org/10.1109/ISBA.2016.7477237
+9055b155cbabdce3b98e16e5ac9c0edf00f9552f,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78
+cacce7f4ce74e3269f5555aa6fd83e48baaf9c96,http://doi.acm.org/10.1145/2632165
+b208f2fc776097e98b41a4ff71c18b393e0a0018,http://doi.ieeecomputersociety.org/10.1109/AVSS.2003.1217900
+e52272f92fa553687f1ac068605f1de929efafc2,https://doi.org/10.1016/j.engappai.2017.06.003
+dac8fc521dfafb2d082faa4697f491eae00472c7,http://dl.acm.org/citation.cfm?id=3123423
+2d79dece7890121469f515a6e773ba0251fc2d98,https://doi.org/10.1109/ICIP.2017.8296756
+609d81ddf393164581b3e3bf11609a712ac47522,https://doi.org/10.1109/APSIPA.2017.8282300
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5598629
+5f9dc3919fb088eb84accb1e490921a134232466,http://doi.ieeecomputersociety.org/10.1109/WACV.2007.49
+e8aa1f207b4b0bb710f79ab47a671d5639696a56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8000333
+4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,https://doi.org/10.1109/TIFS.2017.2788002
+0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,https://doi.org/10.1109/TMM.2015.2500730
+b934f730a81c071dbfc08eb4c360d6fca2daa08f,http://doi.ieeecomputersociety.org/10.1109/ICME.2015.7177496
+b388bf63c79e429dafee16c62b2732bcbea0d026,https://doi.org/10.1109/ICIP.2016.7533051
+be51854ef513362bc236b85dd6f0e2c2da51614b,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.298
+178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4,https://doi.org/10.1007/s11042-013-1578-6
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea,http://dl.acm.org/citation.cfm?id=2806218
+eb9867f5efc98d3203ce1037f9a8814b0d15d0aa,https://doi.org/10.1109/ICIP.2014.7026008
+fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2,https://doi.org/10.1109/ICSMC.2012.6377834
+c843f591658ca9dbb77944a89372a92006defe68,http://doi.org/10.1007/s11042-015-2550-4
+00eccc565b64f34ad53bf67dfaf44ffa3645adff,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618328
+ddfae3a96bd341109d75cedeaebb5ed2362b903f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6837429
+1277b1b8b609a18b94e4907d76a117c9783a5373,http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438
+dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6,https://doi.org/10.1109/TCYB.2014.2311033
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7888593
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771439
+3b8c830b200f1df8ef705de37cbfe83945a3d307,https://doi.org/10.1007/s00138-017-0887-6
+48db8bf18e2f6f19e07e88384be855c8b7ea0ead,http://doi.acm.org/10.1145/2964284.2967225
+87552622efd0e85c2a71d4d2590e53d45f021dbf,https://doi.org/10.1109/ICIP.2016.7532435
+7a6e3ed956f71b20c41fbec008b1fa8dacad31a6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163117
+113b06e70b7eead8ae7450bafe9c91656705024c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,http://doi.org/10.1007/s11042-018-5679-0
+d0b67ec62086b55f00dc461ab58dc87b85388b2b,https://doi.org/10.1109/ICIP.2014.7026206
+78e1798c3077f4f8a4df04ca35cd73f82e9a38f3,http://ieeexplore.ieee.org/document/6460640/
+95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6,https://doi.org/10.1007/s11063-013-9322-9
+aca728cab26b95fbe04ec230b389878656d8af5b,http://doi.org/10.1007/978-981-10-8258-0
+2fd007088a75916d0bf50c493d94f950bf55c5e6,https://doi.org/10.1007/978-981-10-7302-1_1
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a,http://doi.org/10.1111/cgf.13218
+9eb13f8e8d948146bfbae1260e505ba209c7fdc1,https://doi.org/10.1109/AFGR.2008.4813404
+85785ae222c6a9e01830d73a120cdac75d0b838a,https://doi.org/10.1007/978-3-319-11782-9
+2e7e1ee7e3ee1445939480efd615e8828b9838f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5643167
+0c6a18b0cee01038eb1f9373c369835b236373ae,https://doi.org/10.1007/s11042-017-4359-9
+539f55c0e2501c1d86791c8b54b225d9b3187b9c,https://doi.org/10.1109/TIP.2017.2738560
+4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4,https://doi.org/10.1109/DICTA.2017.8227494
+4268ae436db79c4eee8bc06e9475caff3ff70d57,http://doi.ieeecomputersociety.org/10.1109/FG.2017.146
+11f8d0a54e55c5e6537eef431cd548fa292ef90b,https://doi.org/10.1016/j.neucom.2017.05.042
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4426825
+52e270ca8f5b53eabfe00a21850a17b5cc10f6d5,https://doi.org/10.1109/ROBIO.2013.6739643
+6fdf2f4f7ae589af6016305a17d460617d9ef345,https://doi.org/10.1109/ICIP.2015.7350767
+5e8de234b20f98f467581f6666f1ed90fd2a81be,http://doi.acm.org/10.1145/2647868.2655042
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,http://doi.acm.org/10.1145/2987378
+d9218c2bbc7449dbccac351f55675efd810535db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5699141
+cb992fe67f0d4025e876161bfd2dda467eaec741,https://doi.org/10.1109/IPTA.2015.7367144
+1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12,https://doi.org/10.1016/j.patcog.2017.01.007
+988849863c3a45bcedacf8bd5beae3cc9210ce28,http://doi.ieeecomputersociety.org/10.1109/TPDS.2016.2539164
+113cd9e5a4081ce5a0585107951a0d36456ce7a8,https://doi.org/10.1109/ICSMC.2006.384939
+947ee3452e4f3d657b16325c6b959f8b8768efad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952677
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08,http://doi.org/10.1007/s11042-015-3120-5
+61b22b1016bf13aca8d2e57c4e5e004d423f4865,https://doi.org/10.1109/TCYB.2016.2526630
+b5fdd7778503f27c9d9bf77fab193b475fab6076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373891
+609c35a6fa80af8b2e4ce46b1b16ec36578fd07f,https://doi.org/10.1155/2014/950349
+6345c0062885b82ccb760c738a9ab7fdce8cd577,https://doi.org/10.1109/EMBC.2016.7590729
+bc66685acc64fa3c425c0ee6c443d3fa87db7364,https://doi.org/10.1109/TMM.2013.2279658
+d8c9bad8d07ae4196027dfb8343b9d9aefb130ff,https://doi.org/10.1007/s00138-017-0848-0
+ad4d1ecf5c5473c050e11f6876ce148de1c8920a,https://doi.org/10.1109/IJCNN.2017.7965886
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237344
+5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6,https://doi.org/10.1109/ICTAI.2012.40
+36b13627ee8a5a8cd04645213aabfa917bbd32f5,https://doi.org/10.1109/TCSVT.2016.2602812
+7c13fa0c742123a6a927771ce67da270492b588c,http://doi.acm.org/10.1145/3152114
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439
+59b83666c1031c3f509f063b9963c7ad9781ca23,http://dl.acm.org/citation.cfm?id=2830590
+e5fbaeddbf98c667ec7c5575bda2158a36b55409,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.25
+10df1d4b278da991848fb71b572f687bd189c10e,https://doi.org/10.1109/ICPR.2016.7899739
+0874734e2af06883599ed449532a015738a1e779,https://doi.org/10.1007/s10115-013-0702-2
+09138ad5ad1aeef381f825481d1b4f6b345c438c,https://doi.org/10.1109/IIH-MSP.2012.41
+c631a31be2c793d398175ceef7daff1848bb6408,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466318
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995556
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,https://doi.org/10.1109/IJCNN.2017.7966191
+5375a3344017d9502ebb4170325435de3da1fa16,https://doi.org/10.1007/978-3-642-37444-9
+92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,https://doi.org/10.1109/ICIP.2016.7533062
+1a81c722727299e45af289d905d7dcf157174248,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995466
+65475ce4430fb524675ebab6bcb570dfa07e0041,https://doi.org/10.1109/ISR.2013.6695696
+cefaad8241bceb24827a71bf7c2556e458e57faa,https://doi.org/10.1109/TIP.2013.2264676
+18855be5e7a60269c0652e9567484ce5b9617caa,http://doi.org/10.1007/s11042-017-4579-z
+8f713e3c5b6b166c213e00a3873f750fb5939c9a,https://doi.org/10.1109/EUSIPCO.2015.7362563
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,http://doi.acm.org/10.1145/3078971.3079026
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434
+39d0de660e2116f32088ce07c3376759d0fdaff5,https://doi.org/10.1109/ICPR.2016.7900043
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7872382
+de878384f00b6ce1caa66ac01735fb4b63ad0279,https://doi.org/10.1049/iet-ipr.2014.0670
+f888c165f45febf3d17b8604a99a2f684d689cbc,http://doi.ieeecomputersociety.org/10.1109/CIT.2004.1357196
+73d53a7c27716ae9a6d3484e78883545e53117ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8371978
+35d272877b178aa97c678e3fcbb619ff512af4c2,https://doi.org/10.1109/SMC.2017.8122743
+88c21e06ed44da518a7e346fce416efedc771704,https://doi.org/10.1109/ICIP.2015.7351455
+bc6a7390135bf127b93b90a21b1fdebbfb56ad30,https://doi.org/10.1109/TIFS.2017.2766039
+e9d77a85bc2fa672cc1bd10258c896c8d89b41e8,https://doi.org/10.1109/ICTAI.2012.25
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0,http://dl.acm.org/citation.cfm?id=3264947
+96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40,https://doi.org/10.1109/ICIP.2016.7532959
+ebeb0546efeab2be404c41a94f586c9107952bc3,http://doi.acm.org/10.1145/2733373.2806290
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,http://doi.org/10.1007/s00371-018-1477-y
+9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534,https://doi.org/10.1016/j.neucom.2016.02.063
+8b4124bb68e5b3e6b8b77888beae7350dc594a40,https://doi.org/10.1109/ICSMC.2005.1571395
+04c07ecaf5e962ac847059ece3ae7b6962b4e5c4,http://doi.acm.org/10.1145/2993148.2997631
+8aff9c8a0e17be91f55328e5be5e94aea5227a35,https://doi.org/10.1109/TNNLS.2012.2191620
+eb240521d008d582af37f0497f12c51f4bab16c8,https://doi.org/10.1023/A:1012365806338
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7114333
+f180cb7111e9a6ba7cfe0b251c0c35daaef4f517,https://doi.org/10.1109/TIP.2015.2417502
+6486b36c6f7fd7675257d26e896223a02a1881d9,https://doi.org/10.1109/THMS.2014.2376874
+0d9815f62498db21f06ee0a9cc8b166acc93888e,https://doi.org/10.1016/j.neucom.2007.12.018
+b85c198ce09ffc4037582a544c7ffb6ebaeff198,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100113
+e060e32f8ad98f10277b582393df50ac17f2836c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099600
+c59b62864a6d86eead075c88137a87070a984550,https://doi.org/10.1109/IVCNZ.2015.7761546
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,https://doi.org/10.1109/ROMAN.2014.6926354
+1902288256839539aeb5feb3e1699b963a15aa1a,https://doi.org/10.1109/IJCNN.2016.7727435
+c270aff2b066ee354b4fe7e958a40a37f7bfca45,https://doi.org/10.1109/WCSP.2017.8170910
+49358915ae259271238c7690694e6a887b16f7ed,http://doi.org/10.1007/BF02884429
+95b5296f7ec70455b0cf1748cddeaa099284bfed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8443886
+dc84d3f29c52e6d296b5d457962c02074aa75d0f,https://doi.org/10.1109/TIP.2016.2580939
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,https://doi.org/10.1109/FG.2011.5771332
+387b54cf6c186c12d83f95df6bd458c5eb1254ee,https://doi.org/10.1109/VCIP.2017.8305123
+6f0caff7c6de636486ff4ae913953f2a6078a0ab,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583081
+2c06781ba75d51f5246d65d1acf66ab182e9bde6,https://doi.org/10.1016/j.imavis.2016.11.002
+1890470d07a090e7b762091c7b9670b5c2e1c348,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20
+cdcfc75f54405c77478ab776eb407c598075d9f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410829
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907
+c362116a358320e71fb6bc8baa559142677622d2,http://doi.org/10.1016/j.patcog.2011.07.009
+b856d8d6bff745bb1b4beb67e4b821fc20073840,https://doi.org/10.1109/ICMLC.2016.7872935
+29322b9a3744afaa5fc986b805d9edb6ff5ea9fe,https://doi.org/10.1109/TNNLS.2011.2178037
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3,http://doi.org/10.1016/j.patcog.2015.11.008
+e52f57a7de675d14aed28e5d0f2f3c5a01715337,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319987
+5d9f468a2841ea2f27bbe3ef2c6fe531d444be68,https://doi.org/10.1109/GlobalSIP.2017.8309167
+53873fe7bbd5a2d171e2b1babc9cacaad6cabe45,https://doi.org/10.1109/TCYB.2015.2417211
+c553f0334fcadf43607925733685adef81fbe406,https://doi.org/10.1109/ICSIPA.2017.8120636
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29,http://dl.acm.org/citation.cfm?id=3206041
+559645d2447004355c83737a19c9a811b45780f1,https://doi.org/10.1109/ICB.2015.7139114
+b6620027b441131a18f383d544779521b119c1aa,http://doi.org/10.1016/j.patcog.2013.04.013
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,https://doi.org/10.1109/TIP.2017.2694226
+2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f,https://doi.org/10.1109/ICIP.2014.7026056
+ad77056780328bdcc6b7a21bce4ddd49c49e2013,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398021
+b63b6ed78b39166d87d4c56f8890873aa65976a2,https://doi.org/10.1109/ICRA.2011.5979953
+26973cf1552250f402c82e9a4445f03fe6757b58,http://doi.acm.org/10.1145/3126686.3130239
+5df17c81c266cf2ebb0778e48e825905e161a8d9,https://doi.org/10.1109/TMM.2016.2520091
+660c99ac408b535bb0468ab3708d0d1d5db30180,http://doi.org/10.1007/s11042-015-3083-6
+784a83437b3dba49c0d7ccc10ac40497b84661a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100224
+7831ab4f8c622d91974579c1ff749dadc170c73c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6712699
+3ce37af3ac0ed2eba08267a3605730b2e0433da5,https://doi.org/10.1109/TIP.2016.2609811
+fe556c18b7ab65ceb57e1dd054a2ca21cefe153c,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.145
+2f43b614607163abf41dfe5d17ef6749a1b61304,https://doi.org/10.1109/TIFS.2014.2361479
+dd0086da7c4efe61abb70dd012538f5deb9a8d16,http://doi.org/10.1007/s11704-016-5024-6
+df7af280771a6c8302b75ed0a14ffe7854cca679,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293
+29a5d38390857e234c111f8bb787724c08f39110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813387
+4f3b652c75b1d7cf4997e0baaef2067b61e3a79b,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552910
+59b6ff409ae6f57525faff4b369af85c37a8dd80,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.28
+7c457c9a658327af6f6490729b4cab1239c22005,https://doi.org/10.1109/ACCESS.2017.2672829
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,https://doi.org/10.1007/s11554-007-0031-3
+a03448488950ee5bf50e9e1d744129fbba066c50,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367180
+fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,http://doi.acm.org/10.1145/2502081.2502148
+6c28b3550f57262889fe101e5d027912eb39564e,https://doi.org/10.1109/LSP.2014.2338911
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530
+b8f64a94f536b46ef34a0223272e02f9be785ef9,https://doi.org/10.1109/EMBC.2012.6346590
+31a36014354ee7c89aa6d94e656db77922b180a5,http://doi.acm.org/10.1145/2304496.2304509
+c2474202d56bb80663e7bece5924245978425fc1,https://doi.org/10.1109/ICIP.2016.7532771
+ef3a0b454370991a9c18ac7bfd228cf15ad53da0,https://doi.org/10.1109/ICNC.2010.5582886
+e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548
+cc9d068cf6c4a30da82fd6350a348467cb5086d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411204
+07dc9f3b34284cc915dea7575f40ef0c04338126,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2537337
+6e7ffd67329ca6027357a133437505bc56044e65,https://doi.org/10.1109/IJCNN.2014.6889754
+ce75deb5c645eeb08254e9a7962c74cab1e4c480,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373839
+94806f0967931d376d1729c29702f3d3bb70167c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780581
+ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b,https://doi.org/10.1109/FRUCT-ISPIT.2016.7561522
+87806c51dc8c1077953178367dcf5c75c553ce34,https://doi.org/10.1109/ICMLA.2015.146
+20eabf10e9591443de95b726d90cda8efa7e53bb,https://doi.org/10.1007/s11390-017-1740-0
+d9deafd9d9e60657a7f34df5f494edff546c4fb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100124
+2b0d14dbd079b3d78631117b1304d6c1579e1940,https://doi.org/10.1007/s11063-016-9524-z
+5551a03353f571b552125dd4ee57301b69a10c46,https://doi.org/10.1016/j.neucom.2015.09.083
+268c4bb54902433bf00d11391178a162e5d674c9,https://doi.org/10.1109/CVPRW.2010.5543261
+e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.225
+f88ce52c5042f9f200405f58dbe94b4e82cf0d34,https://doi.org/10.1109/TNNLS.2015.2508025
+a6793de9a01afe47ffbb516cc32f66625f313231,http://doi.acm.org/10.1145/2939672.2939853
+ffc81ced9ee8223ab0adb18817321cbee99606e6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157
+3a9fbd05aaab081189a8eea6f23ed730fa6db03c,https://doi.org/10.1109/ICASSP.2013.6638305
+2facf3e85240042a02f289a0d40fee376c478d0f,https://doi.org/10.1109/BTAS.2010.5634544
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6894202
+195b61470720c7faa523e10e68d0c8d8f27d7c7a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995618
+134cea33099cafc6615e57437e29d7c3906a2b48,http://doi.ieeecomputersociety.org/10.1109/ICETET.2010.80
+18dd3867d68187519097c84b7be1da71771d01a3,http://doi.acm.org/10.1145/2448556.2448563
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7,http://doi.org/10.1007/s00371-018-1585-8
+0ba5369c5e1e87ea172089d84a5610435c73de00,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347111
+2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4,https://doi.org/10.1109/ICASSP.2015.7178367
+18010284894ed0edcca74e5bf768ee2e15ef7841,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493
+3288e16c62a215254e2ed7c39675482b356c3bef,https://doi.org/10.1109/SACI.2016.7507341
+cbe1df2213a88eafc5dcaf55264f2523fe3ec981,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.34
+322488c4000c686e9bfb7514ccdeacae33e53358,http://doi.acm.org/10.1145/2671188.2749301
+b1891010a0722117c57e98809e1f2b26cd8e9ee3,http://doi.acm.org/10.1145/2330784.2331026
+5d2e5833ca713f95adcf4267148ac2ccf2318539,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6121744
+ae7604b1840753e9c2e1ab7a97e02f91a9d81860,https://doi.org/10.1007/s10586-016-0535-3
+81d232e1f432db7de67baf4f30f240c62d1a9055,https://doi.org/10.1109/ICIP.2017.8296405
+25960f0a2ed38a89fa8076a448ca538de2f1e183,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411220
+c6382de52636705be5898017f2f8ed7c70d7ae96,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089
+ada063ce9a1ff230791c48b6afa29c401a9007f1,http://doi.org/10.1007/978-3-319-97909-0
+d116bac3b6ad77084c12bea557d42ed4c9d78433,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471886
+0e37d70794d5ccfef8b4cc22b4203245f33eec6e,https://doi.org/10.1109/ICIP.2010.5653034
+76640cb1a683a479ce2e0d6681d821ff39126d63,https://doi.org/10.1109/IJCNN.2011.6033408
+dab795b562c7cc270c9099b925d685bea0abe82a,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2382599
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853687
+e26a7e343fe109e2b52d1eeea5b02dae836f3502,https://doi.org/10.1109/ACCESS.2017.2676238
+ffea4184a0b24807b5f4ed87f9a985c2a27027d9,https://doi.org/10.1007/s00530-012-0297-6
+8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4,http://doi.ieeecomputersociety.org/10.1109/CSSE.2008.575
+fe5d6c65e51386f4d36f7434fe6fcd9494fe9361,https://doi.org/10.1109/ACCESS.2017.2730281
+939f9fa056f8be445da19b43da64bd2405851a43,https://doi.org/10.1109/ICSMC.2007.4413713
+ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76,https://doi.org/10.1109/LSP.2010.2093600
+bdd203bcd3c41c336c5635fb026a78279d75b4be,https://doi.org/10.1109/ICPR.2016.7899761
+b50edfea790f86373407a964b4255bf8e436d377,http://doi.acm.org/10.1145/3136755.3143008
+e82a0976db908e6f074b926f58223ac685533c65,https://doi.org/10.1007/s11042-015-2848-2
+fadbb3a447d697d52771e237173b80782caaa936,https://doi.org/10.1007/s00530-012-0290-0
+b784bb1d2b2720dac8d4b92851a8d6360c35b0b2,https://doi.org/10.1109/ICDM.2016.0041
+8c50869b745fc094a4fb1b27861934c3c14d7199,https://doi.org/10.1109/EMBC.2016.7591826
+1a0e1ba4408d12f8a28049da0ff8cad4f91690d5,https://doi.org/10.1007/s12559-016-9445-1
+96d34c1a749e74af0050004162d9dc5132098a79,https://doi.org/10.1109/TNN.2005.844909
+d62d82c312c40437bc4c1c91caedac2ba5beb292,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461322
+261a80216dda39b127d2b7497c068ec7e0fdf183,https://doi.org/10.1109/TCSVT.2013.2265571
+f4003cbbff3b3d008aa64c76fed163c10d9c68bd,https://doi.org/10.1016/j.neucom.2016.08.055
+cbc2de9b919bc63590b6ee2dfd9dda134af45286,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477561
+d79530e1745b33f3b771d0b38d090b40afc04191,https://doi.org/10.1007/s11042-015-2485-9
+fa32b29e627086d4302db4d30c07a9d11dcd6b84,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354123
+cce332405ce9cd9dccc45efac26d1d614eaa982d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533
+09f9409430bba2afb84aa8214dbbb43bfd4cf056,https://doi.org/10.1109/TNN.2006.883012
+e084b0e477ee07d78c32c3696ea22c94f5fdfbec,https://doi.org/10.1109/ICIP.2013.6738565
+5b5b9c6c67855ede21a60c834aea5379df7d51b7,http://hdl.handle.net/10044/1/45280
+fcf393a90190e376b617cc02e4a473106684d066,http://doi.org/10.1007/s10044-015-0507-x
+1cb0c11620bde2734c1a428c789158ffff0d6c7b,http://doi.ieeecomputersociety.org/10.1109/BigMM.2016.62
+440b94b1624ca516b07e72ea8b3488072adc5e26,https://doi.org/10.1109/ITSC.2015.153
+565f7c767e6b150ebda491e04e6b1de759fda2d4,https://doi.org/10.1016/j.patcog.2016.11.023
+6ec275755f8776b620d0a4550be0e65caf2bc87a,https://doi.org/10.1109/IS.2016.7737496
+2983cf95743be82671a71528004036bd19172712,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7915734
+88399c7fa890f1252178cd5e4979971509bd904f,https://doi.org/10.1142/S0219878906000915
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2,http://dl.acm.org/citation.cfm?id=2967199
+57b7325b8027745b130490c8f736445c407f4c4c,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.27
+ab80582807506c0f840bd1ba03a8b84f8ac72f79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462326
+b1efefcc9a5d30be90776571a6cc0071f3679753,https://doi.org/10.1109/ROBIO.2016.7866471
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9,http://doi.org/10.1007/s00371-016-1323-z
+1da1299088a6bf28167c58bbd46ca247de41eb3c,https://doi.org/10.1109/ICASSP.2002.5745055
+982fcead58be419e4f34df6e806204674a4bc579,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012
+b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,http://doi.acm.org/10.1145/2733373.2807962
+57ba4b6de23a6fc9d45ff052ed2563e5de00b968,https://doi.org/10.1109/ICIP.2017.8296993
+bec0c33d330385d73a5b6a05ad642d6954a6d632,http://doi.org/10.1007/s11042-017-4491-6
+9ab963e473829739475b9e47514f454ab467a5af,http://doi.ieeecomputersociety.org/10.1109/FG.2017.33
+187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b,https://doi.org/10.1109/LSP.2015.2437883
+9f3c9e41f46df9c94d714b1f080dafad6b4de1de,https://doi.org/10.1109/ICT.2017.7998260
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7777820
+fd5376fcb09001a3acccc03159e8ff5801129683,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373899
+cba090a5bfae7dd8a60a973259f0870ed68c4dd3,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.22
+d9c0310203179d5328c4f1475fa4d68c5f0c7324,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11
+b5ae8b69677fb962421fe7072f1e842e71f3bea5,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273641
+30044dd951133187cb8b57e53a22cf9306fa7612,https://doi.org/10.1109/WACV.2017.52
+4ed6c7740ba93d75345397ef043f35c0562fb0fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117516
+0b58b3a5f153f653c138257426bf8d572ae35a67,https://doi.org/10.1109/SMC.2016.7844481
+5e87f5076952cd442718d6b4addce905bae1a1a4,https://doi.org/10.1109/ICMLC.2016.7872938
+f4465454811acb2021a46d84d94fc88e2dda00a6,https://doi.org/10.1007/s11042-007-0184-x
+568ced900cbf7437c9e87b60a17e16f0c1e0c442,https://doi.org/10.1109/CCECE.2012.6335026
+2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd,https://doi.org/10.1016/j.neucom.2015.07.031
+36bb93c4f381adca267191811abb8cc7812363f9,https://doi.org/10.1109/CISP-BMEI.2017.8301987
+8c4042191431e9eb43f00b0f14c23765ab9c6688,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532956
+fe50efe9e282c63941ec23eb9b8c7510b6283228,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7314574
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,https://doi.org/10.1007/s00521-010-0519-9
+dc107e7322f7059430b4ef4991507cb18bcc5d95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995338
+47109343e502a4097cb7efee54bc5fbb14598c05,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.182
+1dae2f492d3ca2351349a73df6ee8a99b05ffc30,https://doi.org/10.1137/110842570
+0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,https://doi.org/10.1109/CIBIM.2014.7015437
+a180dc9766490416246e7fbafadca14a3c500a46,https://doi.org/10.1016/S0167-8655(03)00112-0
+23ecc496eaa238ac884e6bae5763f6138a9c90a3,https://doi.org/10.1109/ICB.2016.7550085
+c84991fe3bf0635e326a05e34b11ccaf74d233dc,https://doi.org/10.1016/j.neucom.2016.08.069
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,https://doi.org/10.1109/RIISS.2014.7009163
+834736698f2cc5c221c22369abe95515243a9fc3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249
+8fb2ec3bbd862f680be05ef348b595e142463524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699880
+356a144d2aa5cc5e74d178dae3963003871aa8a1,https://doi.org/10.1007/978-3-319-27671-7_41
+9077365c9486e54e251dd0b6f6edaeda30ae52b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373910
+a6ab23f67d85da26592055c0eac4c34f05c26519,http://doi.ieeecomputersociety.org/10.1109/ICTAI.2006.15
+344c0917c8d9e13c6b3546da8695332f86b57bd3,https://doi.org/10.1109/ICIP.2017.8296715
+bf3bf5400b617fef2825eb987eb496fea99804b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461385
+7d7b036ed01765c9473d695f029142128d442aaa,https://doi.org/10.1109/TIP.2018.2791180
+c291f0e29871c8b9509d1a2876c3e305839ad4ac,https://doi.org/10.1109/ICARCV.2014.7064432
+8e272978dd1500ce6e4c2ef5e91d4332078ff757,https://doi.org/10.1007/11848035_5
+15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,https://doi.org/10.1109/TMM.2011.2167317
+afdbbc5c84eb4e535c7c478b5227c0138b57af64,http://doi.ieeecomputersociety.org/10.1109/TMC.2016.2593919
+854b1f0581f5d3340f15eb79452363cbf38c04c8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648
+cb8382f43ce073322eba82809f02d3084dad7969,http://dl.acm.org/citation.cfm?id=3232664
+26ebe98753acec806b7281d085110c06d9cd1e16,http://doi.ieeecomputersociety.org/10.1109/FG.2017.22
+30cace74a7d51e9a928287e25bcefb968c49f331,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344634
+373c4d6af0ee233f0d669c3955c3a3ef2a009638,https://doi.org/10.1109/APSIPA.2015.7415420
+647b2e162e9c476728172f62463a8547d245cde3,https://doi.org/10.1109/ICPR.2016.7899898
+d4353952a408e1eae8c27a45cc358976d38dde00,https://doi.org/10.1007/s00138-014-0594-5
+6d70344ae6f6108144a15e9debc7b0be4e3335f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8390318
+761304bbd259a9e419a2518193e1ff1face9fd2d,https://doi.org/10.1007/978-3-642-33885-4_57
+bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5,https://doi.org/10.1109/VSMM.2014.7136653
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373833
+32f62da99ec9f58dd93e3be667612abcf00df16a,http://doi.org/10.1007/s11042-017-5583-z
+24b5ea4e262e22768813e7b6581f60e4ab9a8de7,https://doi.org/10.1109/TIFS.2018.2807791
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,https://doi.org/10.1109/VCIP.2016.7805483
+b2add9fad0bcf7bf0660f99f389672cdf7cc6a70,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.226
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406479
+e96ce25d11296fce4e2ecc2da03bd207dc118724,https://doi.org/10.1007/s00138-007-0095-x
+b1534888673e6119f324082246016d28eba249aa,https://doi.org/10.1109/MMSP.2017.8122229
+013d0acff1e5410fd9f6e15520d16f4ea02f03f6,https://doi.org/10.1109/TMM.2015.2477681
+dc5d04d34b278b944097b8925a9147773bbb80cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354149
+9b8830655d4a5a837e3ffe835d14d6d71932a4f2,https://doi.org/10.1109/TSMCB.2011.2169452
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,http://doi.org/10.1007/s11042-017-4572-6
+d8288322f32ee4501cef5a9b667e5bb79ebd7018,https://doi.org/10.1016/j.patcog.2011.12.018
+36219a3196aac2bd149bc786f083957a6e6da125,https://doi.org/10.1016/j.jvcir.2015.12.003
+0bf1f999a16461a730dd80e3a187d0675c216292,http://doi.ieeecomputersociety.org/10.1109/CW.2017.26
+58eb9174211d58af76023ce33ee05769de57236c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2636827
+cf54e9776d799aa183d7466094525251d66389a4,https://doi.org/10.1109/ICCE-Berlin.2017.8210589
+9888edfb6276887eb56a6da7fe561e508e72a517,http://dl.acm.org/citation.cfm?id=3243904
+633c851ebf625ad7abdda2324e9de093cf623141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7,http://dl.acm.org/citation.cfm?id=2926713
+c36f3cabeddce0263c944e9fe4afd510b5bae816,https://doi.org/10.1109/DICTA.2017.8227399
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534
+9f428db0d3cf26b9b929dd333a0445bcc7514cdf,https://doi.org/10.1016/j.cviu.2010.11.015
+9efdb73c6833df57732b727c6aeac510cadb53fe,http://dl.acm.org/citation.cfm?id=3184071
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,http://doi.org/10.1007/s10055-018-0357-0
+e03f69bad7e6537794a50a99da807c9df4ff5186,http://doi.acm.org/10.1145/2708463.2709060
+e57e1dce81e888eb07054923602e35bfb5ef3eb8,https://doi.org/10.1109/IROS.2012.6385544
+06518858bd99cddf9bc9200fac5311fc29ac33b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777
+e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc,https://doi.org/10.1109/ICPR.2014.440
+dd8a851f2a0c63bb97e33aaff1841695f601c863,https://doi.org/10.1109/BTAS.2014.6996260
+778c1e95b6ea4ccf89067b83364036ab08797256,https://doi.org/10.1109/TIFS.2012.2224866
+beae35eb5b2c7f63dfa9115f07b5ba0319709951,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163096
+bd8d579715d58405dfd5a77f32920aafe018fce4,http://doi.org/10.1016/j.imavis.2008.08.005
+8da32ff9e3759dc236878ac240728b344555e4e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5720366
+31697737707d7f661cbc6785b76cf9a79fee3ccd,http://doi.ieeecomputersociety.org/10.1109/FG.2017.100
+8fee7b38358815e443f8316fa18768d76dba12e3,http://doi.acm.org/10.1145/2063576.2063676
+4ffd744a5f079c2d65f36e3ee0979b978f522a13,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.15
+c7cd490e43ee4ff81e8f86f790063695369c2830,https://doi.org/10.1109/VCIP.2016.7805472
+b05943b05ef45e8ea8278e8f0870f23db5c83b23,https://doi.org/10.1109/ROBIO.2010.5723349
+8db609d84190b905913eb2f17f4e558c6e982208,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.182
+aab9a617be6e5507beb457b1e6c2e5b046f9cff0,https://doi.org/10.1109/ICIP.2008.4712153
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8385089
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397046
+4b94f531c203743a9f7f1e9dd009cdbee22ea197,https://doi.org/10.1109/ICSMC.2005.1571393
+cdf0dc4e06d56259f6c621741b1ada5c88963c6d,https://doi.org/10.1109/ICIP.2014.7025061
+235bebe7d0db37e6727dfa1246663be34027d96b,https://doi.org/10.1109/NAFIPS.2016.7851625
+3e03d19b950edadc74ca047dec86227282eccf71,https://doi.org/10.1109/ACCESS.2017.2777003
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,http://doi.org/10.1007/s11042-018-5945-1
+1025c4922491745534d5d4e8c6e74ba2dc57b138,http://doi.org/10.1007/s11263-017-1014-x
+d1079444ceddb1de316983f371ecd1db7a0c2f38,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460478
+ad8bd7016132a2f98ff1f41dac695285e71cc4b1,https://doi.org/10.1109/CISP-BMEI.2017.8301964
+398e0771e64cab6ca5d21754e32dce63f9e3c223,http://dl.acm.org/citation.cfm?id=3206028
+af29ad70ab148c83e1faa8b3098396bc1cd87790,http://doi.org/10.1007/s40012-016-0149-1
+0a5b2e642683ff20b6f0cee16a32a68ba0099908,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2012.6239342
+a1081cb856faae25df14e25045cd682db8028141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122
+126204b377029feb500e9b081136e7a9010e3b6b,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2010.50
+b972683d702a65d3ee7a25bc931a5890d1072b6b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035
+892400017e5c93611dc8361e7749135520d66f25,https://doi.org/10.1109/ICARCV.2010.5707394
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231,http://arxiv.org/abs/1603.02814
+16fc82d44188eb49a151bd5836a29911b3bfabcb,https://doi.org/10.1007/978-981-10-7302-1_50
+4344ba6e33faaa616d01248368e66799548ca48b,https://doi.org/10.1007/s10044-015-0474-2
+902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb,https://doi.org/10.1016/j.patcog.2016.03.002
+a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134,https://doi.org/10.1142/S0218001405004071
+642417f2bb1ff98989e0a0aa855253fed1fffe04,https://doi.org/10.1117/12.2004255
+7a91617ec959acedc5ec8b65e55b9490b76ab871,https://doi.org/10.1109/RAIT.2012.6194481
+6d67a7fd9a4fa99624721f37b077c71dad675805,https://doi.org/10.1007/s12193-015-0202-7
+8576d0031f2b0fe1a0f93dd454e73d48d98a4c63,http://doi.acm.org/10.1145/2522848.2531743
+3266fcd1886e8ad883714e38203e66c0c6487f7b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7533149
+4e1258db62e4762fd8647b250fda9c3567f86eb8,http://doi.ieeecomputersociety.org/10.1109/CRV.2013.17
+83bce0907937f09f5ccde26c361d52fe55fc8979,http://doi.acm.org/10.1145/2993148.2993185
+e14b046a564604508ea8e3369e7e9f612e148511,https://doi.org/10.1007/978-3-642-17829-0_4
+604a281100784b4d5bc1a6db993d423abc5dc8f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8402469
+6966d9d30fa9b7c01523425726ab417fd8428790,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291
+503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99,https://doi.org/10.1109/ACCESS.2017.2733718
+cc70fb1ab585378c79a2ab94776723e597afe379,https://doi.org/10.1109/ICIP.2017.8297067
+6159908dec4bc2c1102f416f8a52a31bf3e666a4,https://doi.org/10.1109/ICIP.2012.6467431
+468bb5344f74842a9a43a7e1a3333ebd394929b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373896
+9d3377313759dfdc1a702b341d8d8e4b1469460c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7342926
+6b742055a664bcbd1c6a85ae6796bd15bc945367,http://doi.org/10.1007/s00138-006-0052-0
+3c09fb7fe1886072670e0c4dd632d052102a3733,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101020
+1f8656e2254e353a91cceb08b33c25643a1b1fb7,https://doi.org/10.1109/LSP.2017.2736542
+77816b9567d5fed1f6085f33e1ddbcc73af2010e,https://doi.org/10.1109/MRA.2012.2201574
+4e581831d24fd90b0b5228b9136e76fa3e8f8279,https://doi.org/10.1109/TIP.2014.2303648
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7350093
+62648f91e38b0e8f69dded13b9858bd3a86bb6ed,http://doi.acm.org/10.1145/2647868.2655016
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18,https://www.sciencedirect.com/science/article/pii/S0197458017301859
+096ffc1ea5493242ba0c113178dab0c096412f81,http://doi.acm.org/10.1145/3123266.3123441
+934efd61b20f5b8b151a2df7cd373f0b387c02b0,https://doi.org/10.5220/0004673003290336
+519f1486f0755ef3c1f05700ea8a05f52f83387b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595846
+90ddf1aabf1c73b5fc45254a2de46e53a0bde857,https://doi.org/10.1109/ROBIO.2015.7418917
+cbf3e848c5d2130dd640d9bd546403b8d78ce0f9,https://doi.org/10.1109/IJCNN.2012.6252385
+27e5b7ae3506a0f7472ee9089cd2472442e71c14,https://doi.org/10.1007/s00521-015-1834-y
+ef35c30529df914a6975af62aca1b9428f678e9f,https://doi.org/10.1007/s00138-016-0817-z
+12226bca7a891e25b7d1e1a34a089521bba75731,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373861
+d5c66a48bc0a324750db3d295803f47f6060043d,http://doi.ieeecomputersociety.org/10.1109/AVSS.2006.109
+f73174cfcc5c329b63f19fffdd706e1df4cc9e20,http://doi.ieeecomputersociety.org/10.1109/FIT.2015.13
+197efbef17f92e5cb5076961b6cd9f59e88ffd9a,https://doi.org/10.1109/ICMLA.2017.00-59
+fe866887d3c26ee72590c440ed86ffc80e980293,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,https://doi.org/10.1109/ICMLC.2010.5580938
+daca9d03c1c951ed518248de7f75ff51e5c272cb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6976977
+bc607bee2002c6c6bf694a15efd0a5d049767237,http://doi.org/10.1007/s11042-017-4364-z
+e55f7250f3b8ee722814f8809620a851c31e5b0e,https://doi.org/10.3182/20130902-3-CN-3020.00030
+d0f9143f6f43a39bff47daf8c596681581db72ea,https://doi.org/10.1007/s11042-017-5241-5
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961838
+d6791b98353aa113d79f6fb96335aa6c7ea3b759,https://doi.org/10.1109/TNNLS.2017.2648122
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,http://doi.org/10.1007/s00138-018-0943-x
+2bcd9b2b78eb353ea57cf50387083900eae5384a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329
+bb070c019c0885232f114c7dca970d2afd9cd828,https://doi.org/10.1109/DICTA.2014.7008089
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5346008
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553718
+aed6af12148b43e4a24ee6e2bc3604ca59bd99a5,https://doi.org/10.1109/TIP.2017.2717505
+f6dabb4d91bf7389f3af219d486d4e67cec18c17,https://doi.org/10.1016/j.compeleceng.2014.08.010
+9c23859ec7313f2e756a3e85575735e0c52249f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788
+f79e4ba09402adab54d2efadd1c4bfe4e20c5da5,https://doi.org/10.1109/ICIP.2017.8296364
+fdbc602a749ef070a7ac11c78dc8d468c0b60154,https://doi.org/10.1049/iet-ipr.2015.0519
+6622776d1696e79223f999af51e3086ba075dbd1,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019454
+cd74d606e76ecddee75279679d9770cdc0b49861,https://doi.org/10.1109/TIP.2014.2365725
+ebb3d5c70bedf2287f9b26ac0031004f8f617b97,https://doi.org/10.1109/MSP.2017.2764116
+d0b7d3f9a59034d44e7cd1b434cfd27136a7c029,https://doi.org/10.1109/INCoS.2013.143
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401921
+637b31157386efbde61505365c0720545248fbae,https://doi.org/10.1109/BTAS.2017.8272721
+bdf5434648356ce22bdbf81d2951e4bb00228e4d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.415
+30c93fec078b98453a71f9f21fbc9512ab3e916f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395274
+b472f91390781611d4e197564b0016d9643a5518,http://doi.acm.org/10.1145/2382336.2382345
+287de191c49a3caa38ad7594093045dfba1eb420,https://doi.org/10.23919/MVA.2017.7986829
+f25aa838fb44087668206bf3d556d31ffd75235d,http://doi.acm.org/10.1145/2911996.2912038
+0e454686f83284ced2ffc5740829552a032671a3,https://doi.org/10.1109/IJCNN.2015.7280802
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7902214
+7813d405450013bbdb0b3a917319d5964a89484a,https://doi.org/10.1109/WACV.2017.62
+83d41f6548bb76241737dcd3fed9e182ee901ff9,http://dl.acm.org/citation.cfm?id=2964328
+1fcb905e4505a781fb0b375eb470f5661e38ae39,http://doi.acm.org/10.1145/3123266.3123450
+34fd227f4fdbc7fe028cc1f7d92cb59204333718,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446331
+4014d74e8f5ea4d76c2c1add81d0c88d6e342478,http://doi.acm.org/10.1145/3136755.3143010
+86fa086d02f424705bbea53943390f009191740a,https://doi.org/10.1109/ICIP.2015.7351651
+dad6b36fd515bda801f3d22a462cc62348f6aad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531
+2a4984fb48c175d1e42c6460c5f00963da9f26b6,https://doi.org/10.1109/MIPRO.2015.7160445
+5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,https://doi.org/10.1109/VCIP.2017.8305113
+32c5c65db2af9691f8bb749c953c978959329f8f,https://doi.org/10.1109/ICIP.2015.7351469
+0343f9401b98de36be957a30209fef45dd684270,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163134
+5213549200bccec57232fc3ff788ddf1043af7b3,http://doi.acm.org/10.1145/2601097.2601204
+fa54ab106c7f6dbd3c004cea4ef74ea580cf50bf,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.18
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb,http://doi.org/10.1016/j.patcog.2015.03.011
+86881ce8f80adea201304ca6bb3aa413d94e9dd0,https://doi.org/10.1109/ICIP.2017.8297133
+0e4baf74dfccef7a99c6954bb0968a2e35315c1f,https://doi.org/10.1109/SIU.2012.6204517
+d40c16285d762f7a1c862b8ac05a0fdb24af1202,https://doi.org/10.1109/BESC.2017.8256378
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403
+b86c49c6e3117ea116ec2d8174fa957f83502e89,https://doi.org/10.1109/CIT/IUCC/DASC/PICOM.2015.149
+ce2945e369603fcec1fcdc6e19aac5996325cba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771366
+ff012c56b9b1de969328dacd13e26b7138ff298b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921
+8185be0689442db83813b49e215bf30870017459,https://doi.org/10.1109/TNNLS.2013.2293418
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,http://doi.org/10.1007/978-3-319-16865-4
+fcb97ede372c5bddde7a61924ac2fd29788c82ce,https://doi.org/10.1109/TSMCC.2012.2192727
+64ec02e1056de4b400f9547ce56e69ba8393e2ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446491
+ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c,https://doi.org/10.1109/TIFS.2017.2680246
+e73f2839fc232c03e9f027c78bc419ee15810fe8,https://doi.org/10.1109/ICIP.2017.8296413
+5810ce61fda464d4de2769bd899e12727bee0382,https://doi.org/10.1109/IJCNN.2016.7727484
+8229f2735a0db0ad41f4d7252129311f06959907,https://doi.org/10.1109/TIP.2011.2106794
+0931bef0a9c8c153184a1f9c286cf4883cbe99b6,https://doi.org/10.1007/s12193-015-0203-6
+809e5884cf26b71dc7abc56ac0bad40fb29c671c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6247842
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7,http://dl.acm.org/citation.cfm?id=3234805
+7a09e8f65bd85d4c79f0ae90d4e2685869a9894f,https://doi.org/10.1109/TMM.2016.2551698
+da7bbfa905d88834f8929cb69f41a1b683639f4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752
+15ef449ac443c494ceeea8a9c425043f4079522e,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477583
+d2598c088b0664c084413796f39697c6f821d56e,https://doi.org/10.1109/VCIP.2016.7805451
+f1da4d705571312b244ebfd2b450692fd875cd1f,https://doi.org/10.1109/TIP.2014.2322446
+e66a6ae542907d6a0ebc45da60a62d3eecf17839,https://doi.org/10.1109/EUVIP.2014.7018366
+77869f274d4be4d4b4c438dbe7dff4baed521bd8,https://doi.org/10.1109/TIP.2016.2551362
+98e098ba9ff98fc58f22fed6d3d8540116284b91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8332532
+49be50efc87c5df7a42905e58b092729ea04c2f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7177489
+c2be82ed0db509087b08423c8cf39ab3c36549c3,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019363
+7c6686fa4d8c990e931f1d16deabf647bf3b1986,http://arxiv.org/abs/1504.07550
+534159e498e9cc61ea10917347637a59af38142d,https://doi.org/10.1016/j.neucom.2016.01.126
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7539153
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7129813
+e9331ae2a887c02e0a908ebae2810a681aedee29,https://doi.org/10.1016/j.image.2011.05.003
+9652f154f4ae7807bdaff32d3222cc0c485a6762,https://doi.org/10.1007/s00138-016-0760-z
+03e1480f1de2ffbd85655d68aae63a01685c5862,https://doi.org/10.1109/ICPR.2014.771
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265376
+ae96fc36c89e5c6c3c433c1163c25db1359e13ea,https://doi.org/10.1007/s10489-013-0485-x
+f85ccab7173e543f2bfd4c7a81fb14e147695740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5946910
+51b770e6b2af994ffc8793f59b24a9f619033a3a,https://doi.org/10.1109/ICDSC.2011.6042899
+cd55fb30737625e86454a2861302b96833ed549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094
+5167e16b53283be5587659ea8eaa3b8ef3fddd33,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813364
+4bf85ef995c684b841d0a5a002d175fadd922ff0,http://dl.acm.org/citation.cfm?id=3199668
+984edce0b961418d81203ec477b9bfa5a8197ba3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732
+4342a2b63c9c344d78cf153600cd918a5fecad59,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237671
+ae425a2654a1064c2eda29b08a492c8d5aab27a2,https://doi.org/10.23919/MVA.2017.7986845
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014781
+03babadaaa7e71d4b65203e27e8957db649155c6,https://doi.org/10.1109/TIP.2017.2725578
+b72eebffe697008048781ab7b768e0c96e52236a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100092
+d2f2b10a8f29165d815e652f8d44955a12d057e6,http://doi.org/10.1007/s10044-015-0475-1
+cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.87
+3a1c40eced07d59a3ea7acda94fa833c493909c1,http://doi.ieeecomputersociety.org/10.1109/FG.2017.111
+ab2c07c9867243fad2d66fa6aeabfb780433f319,http://doi.acm.org/10.1145/2967878.2967887
+ae8240095c9cca2c395f173fece2f46277b94929,https://doi.org/10.1016/j.neucom.2017.06.045
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,http://doi.org/10.1007/s11042-016-3768-5
+88e2efab01e883e037a416c63a03075d66625c26,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507
+e66b4aa85524f493dafde8c75176ac0afad5b79c,https://doi.org/10.1109/SSCI.2017.8285219
+1e344b99583b782e3eaf152cdfa15f217b781181,http://doi.acm.org/10.1145/2499788.2499789
+973022a1f9e30a624f5e8f7158b5bbb114f4af32,http://doi.acm.org/10.1145/3011077.3011138
+5763b09ebca9a756b4adebf74d6d7de27e80e298,https://doi.org/10.1109/BTAS.2013.6712738
+dcb50e1f439d1f9b14ae85866f4542e51b830a07,https://doi.org/10.1109/FSKD.2012.6234354
+a5acda0e8c0937bfed013e6382da127103e41395,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb,http://doi.org/10.1007/s11042-018-6146-7
+ab6886252aea103b3d974462f589b4886ef2735a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4371439
+525da67fb524d46f2afa89478cd482a68be8a42b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354128
+a4bb791b135bdc721c8fcc5bdef612ca654d7377,https://doi.org/10.1109/BTAS.2017.8272703
+008528d5e27919ee95c311266041e4fb1711c254,https://doi.org/10.1007/s13735-015-0092-1
+6c1227659878e867a01888eef472dd96b679adb6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354280
+0b3144cdc9d6d5a1498d6178db20d1c49fb64de9,http://doi.acm.org/10.1145/1322192.1322203
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342368
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6,http://doi.org/10.1007/s10044-016-0531-5
+016194dbcd538ab5a129ef1bcff3c6e073db63f9,https://doi.org/10.1007/s10462-012-9334-2
+ab7923968660d04434271559c4634790dc68c58e,https://doi.org/10.1109/ICIP.2015.7351111
+2adffdffa16475ae71bb2adcf65840f01f1e53f7,https://doi.org/10.1049/iet-cvi.2014.0094
+7f2a234ad5c256733a837dbf98f25ed5aad214e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7207289
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7090979
+2238dddb76499b19035641d97711cf30d899dadb,https://doi.org/10.1109/SIU.2016.7496098
+5f7094ba898a248e1e6b37e3d9fb795e59131cdc,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026246
+9f2984081ef88c20d43b29788fdf732ceabd5d6a,http://arxiv.org/abs/1806.01547
+f86c6942a7e187c41dd0714531efd2be828e18ad,https://doi.org/10.1109/VCIP.2016.7805514
+40f06e5c052d34190832b8c963b462ade739cbf0,https://doi.org/10.1109/ICNC.2010.5583821
+54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d,https://doi.org/10.1007/978-3-319-50832-0_36
+be0a0e563445119b82d664d370e646e53e69a4c5,https://doi.org/10.1016/j.eswa.2017.05.037
+805a0f4b99f162ac4db0ef6e0456138c8d498c3a,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2465373
+67214e8d2f83eb41c14bfc86698eb6620e72e87c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.263
+1071dde48a77f81c35ad5f0ca90a9daedb54e893,http://ieeexplore.ieee.org/document/7881657/
+68c1090f912b69b76437644dd16922909dd40d60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6987312
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699428
+754626bd5fb06fee5e10962fdfeddd495513e84b,https://doi.org/10.1109/SIU.2017.7960646
+72a3bb0fb490355a926c5a689e12268bff9ff842,https://doi.org/10.1109/ICIP.2006.312862
+e6d6203fa911429d76f026e2ec2de260ec520432,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663
+2d411826cd7865638b65e1b5f92043c245f009f9,http://doi.acm.org/10.1145/2733373.2806239
+2ca10da4b59b406533ad1dc7740156e01782658f,https://doi.org/10.1109/SIU.2016.7496207
+df767f62a6bf3b09e6417d801726f2d5d642a202,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699727
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,https://doi.org/10.1007/s11760-015-0808-y
+33b915476f798ca18ae80183bf40aea4aaf57d1e,https://doi.org/10.1109/TIP.2013.2271548
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308
+1b211f8221162ce7ef212956b637b50e30ad48f4,https://doi.org/10.1109/ICIP.2016.7532925
+44b827df6c433ca49bcf44f9f3ebfdc0774ee952,https://doi.org/10.1109/LSP.2017.2726105
+beb2f1a6f3f781443580ffec9161d9ce6852bf48,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424735
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,https://doi.org/10.1109/ICSIPA.2011.6144081
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462692
+bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b,https://doi.org/10.1109/IJCNN.2017.7966271
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8358588
+89e31777f221ddb3bc9940d7f520c8114c4148a2,https://doi.org/10.1007/s11063-012-9224-2
+94b729f9d9171e7c4489995e6e1cb134c8521f4e,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.055
+cf736f596bf881ca97ec4b29776baaa493b9d50e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629
+9ba358281f2946cba12fff266019193a2b059590,http://doi.ieeecomputersociety.org/10.1109/ISM.2008.27
+4786638ffb3b2fb385cec80720cc6e7c3588b773,https://doi.org/10.1007/s11042-015-2598-1
+fc8990088e0f1f017540900bc3f5a4996192ff05,https://doi.org/10.1109/ICIP.2017.8296314
+98856ab9dc0eab6dccde514ab50c823684f0855c,https://doi.org/10.1109/TIFS.2012.2191962
+85f27ec70474fe93f32864dd03c1d0f321979100,https://doi.org/10.1109/IJCNN.2014.6889381
+ed9de242a23ad546902e1d5ec022dbb029cc2282,https://doi.org/10.1109/ICASSP.2015.7178138
+dca2bb023b076de1ccd0c6b8d71faeb3fccb3978,http://doi.acm.org/10.1145/3152118
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e,http://doi.org/10.1016/j.patrec.2015.08.006
+2f5b51af8053cf82ab52bbfd46b56999222ec21c,https://doi.org/10.1109/ICPR.2014.788
+b712f08f819b925ff7587b6c09a8855bc295d795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450858
+fdd19fee07f2404952e629cc7f7ffaac14febe01,https://doi.org/10.1109/CISP-BMEI.2016.7852754
+75858dbee2c248a60741fbc64dcad4f8b63d51cb,https://doi.org/10.1109/TIP.2015.2460464
+204f1cf56794bb23f9516b5f225a6ae00d3d30b8,https://doi.org/10.1109/JSYST.2015.2418680
+b2ae5c496fe01bb2e2dee107f75b82c6a2a23374,http://doi.ieeecomputersociety.org/10.1109/FG.2017.116
+63fd7a159e58add133b9c71c4b1b37b899dd646f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6603332
+3ce96f03874d42345c0727edc78b6949b20b4a11,https://doi.org/10.1007/s11042-015-2630-5
+328da943e22adef5957c08b6909bda09d931a350,https://doi.org/10.1109/ICARCV.2008.4795605
+7d40e7e5c01bd551edf65902386401e1b8b8014b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876
+279459cbbc5c6db4802e9c737cc72a612d76f7fc,https://doi.org/10.1109/SSCI.2017.8285296
+e45a556df61e2357a8f422bdf864b7a5ed3b8627,http://doi.org/10.1016/j.image.2017.08.001
+c444c4dab97dd6d6696f56c1cacda051dde60448,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37
+eb5c1e526fe2d17778c68f60c874c3da0129fabd,https://doi.org/10.1109/VCIP.2015.7457856
+e287ff7997297ce1197359ed0fb2a0bd381638c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7795253
+270acff7916589a6cc9ca915b0012ffcb75d4899,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659
+80aa455068018c63237c902001b58844fcc6f160,https://doi.org/10.1109/FG.2011.5771327
+782a05fbe30269ff8ab427109f5c4d0a577e5284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8038860
+c1fb854d9a04b842ff38bd844b50115e33113539,https://doi.org/10.1007/s11042-016-3883-3
+45edb29fb7eed5a52040300e1fd3cd53f1bdb429,https://doi.org/10.1109/ICIP.2015.7351570
+ec89f2307e29cc4222b887eb0619e0b697cf110d,https://doi.org/10.1109/TIP.2009.2027361
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,http://doi.acm.org/10.1145/2994258.2994270
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402
+8274069feeff6392b6c5d45d8bfaaacd36daedad,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019312
+d691440030394c2e00a2ab47aba4f8b5fca5f25a,https://doi.org/10.1109/ICIP.2016.7532921
+a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2011.117
+eef432868e85b95a7d9d9c7b8c461637052318ca,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.236
+8c2b663f8be1702ed3e377b5e6e85921fe7c6389,https://doi.org/10.1109/IPTA.2016.7821006
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014745
+78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.99
+1a03dcc811131b0b702bd5a75c54ed26cd27151a,https://doi.org/10.1007/s11760-015-0810-4
+0b45aeb0aede5e0c19b508ede802bdfec668aefd,http://dl.acm.org/citation.cfm?id=1963206
+c60601bdb5465d8270fdf444e5d8aeccab744e29,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583363
+3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3,https://doi.org/10.1016/j.imavis.2015.06.009
+dff38cac0a1004037024f0ed2a72f76f4e49318b,https://doi.org/10.1109/TNNLS.2015.2495268
+1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5,https://doi.org/10.1109/ICPR.2016.7899945
+28a45770faf256f294ce3bbd5de25c6d5700976e,https://doi.org/10.1109/ICDSP.2016.7868531
+8882d39edae556a351b6445e7324ec2c473cadb1,https://doi.org/10.1109/TIP.2017.2755766
+aa1129780cc496918085cd0603a774345c353c54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,https://doi.org/10.1007/978-3-319-02714-2
+ac48ecbc7c3c1a7eab08820845d47d6ce197707c,https://doi.org/10.1109/TIP.2017.2681841
+5b5b568a0ba63d00e16a263051c73e09ab83e245,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8416840
+1280b35e4a20036fcfd82ee09f45a3fca190276f,http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166
+a6d47f7aa361ab9b37c7f3f868280318f355fadc,https://ora.ox.ac.uk/objects/uuid:7704244a-b327-4e5c-a58e-7bfe769ed988
+2d7c2c015053fff5300515a7addcd74b523f3f66,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422
+bcead1a92744e76c38caaa13159de4abfb81b1d0,https://doi.org/10.1109/ICIP.2014.7025310
+480ccd25cb2a851745f5e6e95d33edb703efb49e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461792
+f16599e4ec666c6390c90ff9a253162178a70ef5,http://dl.acm.org/citation.cfm?id=3206050
+3ff79cf6df1937949cc9bc522041a9a39d314d83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771444
+68021c333559ab95ca10e0dbbcc8a4840c31e157,https://doi.org/10.1109/ICPR.2016.7900281
+ddd9d7cb809589b701fba9f326d7cf998a63b14f,http://doi.acm.org/10.1145/2647868.2654992
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3,http://doi.org/10.1016/j.patcog.2015.08.026
+e8951cc76af80da43e3528fe6d984071f17f57e7,https://doi.org/10.1109/WACVW.2017.9
+dbced84d839165d9b494982449aa2eb9109b8467,http://arxiv.org/abs/1712.05083
+7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35,http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61
+5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4,https://doi.org/10.1109/ICIP.2016.7532567
+8879083463a471898ff9ed9403b84db277be5bf6,https://doi.org/10.1016/j.patcog.2016.08.031
+31d51e48dbd9e7253eafe0719f3788adb564a971,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410588
+e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24,http://doi.ieeecomputersociety.org/10.1109/MMUL.2016.27
+4a733a0862bd5f7be73fb4040c1375a6d17c9276,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618949
+8e63868e552e433dc536ba732f4c2af095602869,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699730
+dee6609615b73b10540f32537a242baa3c9fca4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8015006
+a119844792fd9157dec87e3937685c8319cac62f,https://doi.org/10.1109/APSIPA.2015.7415395
+58e7dbbb58416b785b4a1733bf611f8106511aca,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273597
+bddc822cf20b31d8f714925bec192c39294184f7,http://doi.org/10.1134/S1054661807040190
+876583a059154def7a4bc503b21542f80859affd,https://doi.org/10.1109/IWBF.2016.7449697
+4215b34597d8ce1e8985afa8043400caf0ec7230,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.71
+2601b679fdd637f3cd978753ae2f15e8759dd267,https://doi.org/10.1109/ICIP.2015.7351306
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699435
+2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.104
+1252727e8096f48096ef89483d30c3a74500dd15,https://doi.org/10.1007/s00138-016-0746-x
+c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd,http://doi.acm.org/10.1145/2043674.2043677
+728b1b2a86a7ffda402e7ec1a97cd1988dcde868,https://doi.org/10.1016/j.procs.2016.04.083
+b5f3b0f45cf7f462a9c463a941e34e102a029506,http://dl.acm.org/citation.cfm?id=3143004
+496f3d14cf466f054d395a3c71fa2cd6a3dda61d,http://doi.acm.org/10.1145/3009977.3010055
+4c0846bcfa64d9e810802c5b7ef0f8b43523fe54,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2324594
+4ca9753ab023accbfa75a547a65344ee17b549ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5457710
+e9b731f00d16a10a31ceea446b2baa38719a31f1,https://doi.org/10.1109/ICSMC.2012.6378271
+194f5d3c240d06575403c9a422a0ebc86d43b91e,https://doi.org/10.1007/s11042-015-2580-y
+028e237cb539b01ec72c244f57fdcfb65bbe53d4,http://doi.ieeecomputersociety.org/10.1109/CIS.2010.65
+772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,https://doi.org/10.1109/BTAS.2017.8272716
+b47a3c909ee9b099854619054fd00e200b944aa9,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,https://doi.org/10.1109/TNNLS.2012.2237038
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4453844
+6f48e5e258da11e6ba45eeabe65a5698f17e58ef,https://doi.org/10.1109/ICASSP.2013.6637968
+24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852,http://doi.ieeecomputersociety.org/10.1109/FG.2017.30
+f0f854f8cfe826fd08385c0c3c8097488f468076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406454
+80ed678ef28ccc1b942e197e0393229cd99d55c8,http://doi.org/10.1007/s10044-015-0456-4
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c,http://dl.acm.org/citation.cfm?id=3025149
+c8fb8994190c1aa03c5c54c0af64c2c5c99139b4,https://doi.org/10.1007/s00138-016-0794-2
+86afb1e38a96f2ac00e792ef353a971fd13c8474,https://doi.org/10.1109/BigData.2016.7840742
+3e3227c8e9f44593d2499f4d1302575c77977b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347112
+4848a48a2b8bacd2092e87961cd86818da8e7151,https://doi.org/10.1109/VCIP.2017.8305080
+d7c87f4ca39f79d93c954ffacac32bc6eb527e2c,https://doi.org/10.1007/978-3-642-15696-0_57
+64fd48fae4d859583c4a031b51ce76ecb5de614c,https://doi.org/10.1109/ICARCV.2008.4795556
+ba1c0600d3bdb8ed9d439e8aa736a96214156284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081394
+c59a9151cef054984607b7253ef189c12122a625,https://doi.org/10.1007/s00138-016-0791-5
+dec76940896a41a8a7b6e9684df326b23737cd5d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607638
+f2902f5956d7e2dca536d9131d4334f85f52f783,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191
+f6511d8156058737ec5354c66ef6fdcf035d714d,http://doi.ieeecomputersociety.org/10.1109/BWCCA.2014.115
+1afef6b389bd727c566cd6fbcd99adefe4c0cf32,https://doi.org/10.1109/ICB.2016.7550087
+51bb86dc8748088a198b216f7e97616634147388,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a,https://www.ncbi.nlm.nih.gov/pubmed/24314504
+f65b47093e4d45013f54c3ba09bbcce7140af6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354117
+1ca1b4f787712ede215030d22a0eea41534a601e,https://doi.org/10.1109/CVPRW.2010.5543609
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,http://doi.org/10.1007/s11042-016-4105-8
+4033ac52dba394e390a86cd149b9838f1d7834b5,https://doi.org/10.1109/ICMLC.2012.6359009
+0532cbcf616f27e5f6a4054f818d4992b99d201d,http://doi.org/10.1007/s11042-015-3042-2
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,http://doi.org/10.1007/s13735-016-0112-9
+41781474d834c079e8fafea154d7916b77991b15,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.60
+ca37933b6297cdca211aa7250cbe6b59f8be40e5,http://doi.acm.org/10.1145/3155133.3155207
+052fb35f731680d9d4e7d89c8f70f14173efb015,http://doi.acm.org/10.1145/2893487
+b2f9e0497901d22b05b9699b0ea8147861c2e2cc,https://doi.org/10.1007/978-3-319-70353-4_3
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf,http://doi.org/10.1007/s00371-015-1158-z
+1063be2ad265751fb958b396ee26167fa0e844d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369056
+2884ff0d58a66d42371b548526d685760e514043,https://doi.org/10.1109/ICIP.2015.7351242
+48dcf45a1e38adbb9826594f7ffaa5e95ef78395,https://doi.org/10.1109/VCIP.2017.8305111
+a7a3ec1128f920066c25cb86fbc33445ce613919,https://doi.org/10.1109/VCIP.2017.8305115
+6a3fa483c64e72d9c96663ff031446a2bdb6b2eb,https://doi.org/10.1016/j.patcog.2017.02.003
+24e42e6889314099549583c7e19b1cb4cc995226,https://doi.org/10.1109/ACPR.2011.6166646
+5217ab9b723158b3ba2235e807d165e72fd33007,http://doi.acm.org/10.1145/2043674.2043710
+7acbf0b060e948589b38d5501ca217463cfd5c2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6940304
+b7ec41005ce4384e76e3be854ecccd564d2f89fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009
+7d8798e7430dcc68fcdbd93053c884fc44978906,http://doi.acm.org/10.1145/2506364.2506369
+e79bacc03152ea55343e6af97bcd17d8904cf5ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669
+6a931e7b7475635f089dd33e8d9a2899ae963804,http://doi.org/10.1007/s00371-018-1561-3
+427bec487c330e7e34cc2c8fc2d6558690421ea0,http://doi.ieeecomputersociety.org/10.1109/ISCSCT.2008.352
+d5f8827fc7d66643bf018d5636e81ed41026b61a,http://doi.ieeecomputersociety.org/10.1109/FG.2017.36
+48de3ca194c3830daa7495603712496fe908375c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619283
+c7745f941532b7d6fa70db09e81eb1167f70f8a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1640757
+55432723c728a2ce90d817e9e9877ae9fbad6fe5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412925
+cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,https://doi.org/10.1109/TIFS.2013.2286265
+8127b7654d6e5c46caaf2404270b74c6b0967e19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813406
+d9072e6b7999bc2d5750eb58c67a643f38d176d6,https://doi.org/10.1109/LSP.2009.2027636
+7049187c5155d9652747413ce1ebc8dbb209fd69,https://doi.org/10.1109/ICPR.2016.7899808
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55,http://doi.org/10.1016/j.jvcir.2015.11.002
+3ad56aed164190e1124abea4a3c4e1e868b07dee,https://doi.org/10.1016/j.patcog.2015.12.016
+2bf646a6efd15ab830344ae9d43e10cc89e29f34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8387808
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1275543
+c0270a57ad78da6c3982a4034ffa195b9e932fda,http://doi.ieeecomputersociety.org/10.1109/FG.2017.131
+43eb03f95adc0df61af2c3b12a913c725b08d4f5,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2011.101
+b728e7db6e5559a77dc59381bfb8df96d482a721,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.28
+27b451abfe321a696c852215bb7efb4c2e50c89f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7898447
+017e94ad51c9be864b98c9b75582753ce6ee134f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240
+8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889,http://doi.ieeecomputersociety.org/10.1109/FG.2017.138
+3b64b8be33887e77e6def4c385985e43e2c15eea,https://doi.org/10.1109/TIP.2016.2576278
+245d98726674297208e76308c3a11ce3fc43bee2,https://doi.org/10.1007/s11042-015-2699-x
+acab402d706dbde4bea4b7df52812681011f435e,https://doi.org/10.1109/HIS.2012.6421377
+96b1f2bde46fe4f6cc637398a6a71e8454291a6e,https://doi.org/10.1109/TIP.2010.2073476
+1b5d445741473ced3d4d33732c9c9225148ed4a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8452894
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,https://doi.org/10.1007/s11265-010-0541-2
+d983dda8b03ed60fa3afafe5c50f1d9a495f260b,https://doi.org/10.1016/j.patcog.2007.03.020
+471bef061653366ba66a7ac4f29268e8444f146e,https://doi.org/10.1109/SMC.2015.524
+c833c2fb73decde1ad5b5432d16af9c7bee1c165,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.143
+a7c066e636b8953481b4a8d8ff25a43a96dd348f,https://doi.org/10.1109/ATSIP.2017.8075517
+4d6d6369664a49f6992f65af4148cefef95055bc,https://doi.org/10.1109/ICIP.2014.7025407
+142e233adceed9171f718a214a7eba8497af4324,https://doi.org/10.1109/IJCNN.2014.6889504
+484bac2a9ff3a43a6f85d109bbc579a4346397f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6011991
+b262a2a543971e10fcbfc7f65f46115ae895d69e,https://doi.org/10.1109/DICTA.2015.7371266
+699b8250fb93b3fa64b2fc8f59fef036e172564d,https://doi.org/10.1109/ICMLA.2016.0147
diff --git a/scraper/reports/misc/all_doi-4.csv b/scraper/reports/misc/all_doi-4.csv
new file mode 100644
index 00000000..81bb4df3
--- /dev/null
+++ b/scraper/reports/misc/all_doi-4.csv
@@ -0,0 +1,748 @@
+d790093cb85fc556c0089610026e0ec3466ab845,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4721612
+0450dacc43171c6e623d0d5078600dd570de777e,http://doi.org/10.1007/s10339-016-0774-5
+91e17338a12b5e570907e816bff296b13177971e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4,http://dl.acm.org/citation.cfm?id=3206048
+30a4b4ef252cb509b58834e7c40862124c737b61,https://doi.org/10.1142/S0218001416560061
+363f540dc82ba8620262a04a67cfd6d3c85b0582,http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031445
+bd66dc891270d858de3adf97d42ed714860ae94d,https://doi.org/10.1109/ACPR.2015.7486598
+0e4fa61871755b5548a5c970c8103f7b2ada24f3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19
+06c956d4aac65752672ce4bd5a379f10a7fd6148,https://doi.org/10.1109/LSP.2017.2749763
+bab2f4949a38a712a78aafbc0a3c392227c65f56,https://doi.org/10.1109/CISP-BMEI.2017.8302191
+8be60114634caa0eff8566f3252cb9a1b7d5ef10,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890133
+29fd98f096fc9d507cd5ee7d692600b1feaf7ed1,http://doi.acm.org/10.1145/2988257.2988270
+69ad67e204fb3763d4c222a6c3d05d6725b638ed,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890538
+247a8040447b6577aa33648395d95d80441a0cf3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362745
+2f67d5448b5372f639633d8d29aac9c0295b4d72,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460923
+d4288daef6519f6852f59ac6b85e21b8910f2207,https://www.ncbi.nlm.nih.gov/pubmed/29994505
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,https://doi.org/10.1007/s11760-015-0822-0
+8383faea09b4b4bef8117a1da897495ebd68691b,https://doi.org/10.1109/TCYB.2015.2493538
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014982
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a,http://dl.acm.org/citation.cfm?id=3268909
+f095b5770f0ff13ba9670e3d480743c5e9ad1036,http://doi.org/10.1007/s11263-016-0950-1
+4c141534210df53e58352f30bab558a077fec3c6,https://doi.org/10.1109/TMM.2016.2557722
+a62997208fec1b2fbca6557198eb7bc9340b2409,https://doi.org/10.1109/HPCC.and.EUC.2013.241
+e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f,http://doi.acm.org/10.1145/2791405.2791505
+77cea27494499dd162221d1476bf70a87391790a,https://doi.org/10.1109/VCIP.2015.7457930
+e6d6d1b0a8b414160f67142fc18e1321fe3f1c49,https://doi.org/10.1109/FSKD.2015.7382037
+16b0c171fb094f677fcdf78bbb9aaef0d5404942,https://doi.org/10.1109/TIP.2017.2733739
+49fe4f387ac7e5852a78b327ec42cc7300c5f8e0,https://doi.org/10.1007/s11042-014-2055-6
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8,http://doi.org/10.1007/s11063-017-9578-6
+58d0c140597aa658345230615fb34e2c750d164c,http://doi.acm.org/10.1145/3098954.3098969
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4427488
+87ee56feefdb39938cda7f872e784d9d986713af,http://dl.acm.org/citation.cfm?id=3022247
+6f22628d34a486d73c6b46eb071200a00e3abae3,https://www.ncbi.nlm.nih.gov/pubmed/29994497
+5a547df635a9a56ac224d556333d36ff68cbf088,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359041
+f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c,https://doi.org/10.1007/s00138-016-0790-6
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429,http://doi.org/10.1007/s12559-017-9506-0
+85f7f03b79d03da5fae3a7f79d9aac228a635166,https://doi.org/10.1109/WACV.2009.5403085
+6af75a8572965207c2b227ad35d5c61a5bd69f45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433687
+43dce79cf815b5c7068b1678f6200dabf8f5de31,http://arxiv.org/abs/1709.03196
+62e834114b58a58a2ea2d7b6dd7b0ce657a64317,https://doi.org/10.1109/SMC.2014.6973987
+c29fe5ed41d2240352fcb8d8196eb2f31d009522,http://doi.org/10.1007/s11042-015-3230-0
+df7ff512e8324894d20103fd8ab5da650e4d86db,http://doi.acm.org/10.1145/2043674.2043709
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211,http://dl.acm.org/citation.cfm?id=3130971
+d878a67b2ef6a0a5dec72db15291f12419040ab1,https://doi.org/10.1109/IPTA.2016.7821012
+f61829274cfe64b94361e54351f01a0376cd1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784
+397257783ccc8cace5b67cc71e0c73034d559a4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6918513
+2be24e8a3f2b89bdaccd02521eff3b7bb917003e,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.96
+bc36badb6606b8162d821a227dda09a94aac537f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337442
+8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3,https://doi.org/10.1007/11608288_28
+55aafdef9d9798611ade1a387d1e4689f2975e51,http://doi.org/10.1007/s11263-017-1044-4
+eefdb69ac2c461e7791603d0f8c02ff3c8600adc,https://doi.org/10.1016/j.jvcir.2017.02.007
+72119cb98f9502ec639de317dccea57fd4b9ee55,https://doi.org/10.1109/GlobalSIP.2015.7418230
+2f61d91033a06dd904ff9d1765d57e5b4d7f57a6,https://doi.org/10.1109/ICIP.2016.7532953
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553717
+b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4,https://doi.org/10.1109/ACCESS.2018.2805861
+e585dc6c810264d9f07e38c412379734a920714e,http://doi.acm.org/10.1145/2531923.2531926
+9e28243f047cc9f62a946bf87abedb65b0da0f0a,https://doi.org/10.1109/ICMLA.2013.141
+e97ba85a4550667b8a28f83a98808d489e0ff3bc,http://doi.org/10.1155/2018%2F9729014
+281b91c35a1af97b1405bc724a04e2be6e24971b,https://doi.org/10.1109/ICMLC.2010.5580557
+cead57f2f7f7b733f4524c4b5a7ba7f271749b5f,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.46
+2babf665198a91932a4ce557f627c28e7e8f31f2,http://doi.acm.org/10.1145/3009977.3010004
+cd63759842a56bd2ede3999f6e11a74ccbec318b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404
+e4d53e7f4c2052940841abc08f9574655f3f7fb4,http://doi.acm.org/10.1145/3078971.3079039
+cdfa7dccbc9e9d466f8a5847004973a33c7fcc89,https://doi.org/10.1109/TIFS.2013.2263498
+6adecb82edbf84a0097ff623428f4f1936e31de0,https://doi.org/10.1007/s11760-011-0246-4
+e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111,https://doi.org/10.1117/1.JEI.24.5.053028
+c4cfdcf19705f9095fb60fb2e569a9253a475f11,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237333
+95f1790da3d0a4a5310a050512ce355b3c5aac86,https://doi.org/10.1109/ICIP.2016.7533142
+05785cb0dcaace54801aa486d4f8fdad3245b27a,https://doi.org/10.1109/ICPR.2016.7899760
+7196b3832065aec49859c61318037b0c8c12363a,https://doi.org/10.1007/s11432-014-5151-3
+d7a84db2a1bf7b97657b0250f354f249394dd700,https://doi.org/10.1109/ICIP.2010.5653518
+3150e329e01be31ba08b6d76fc46b0da88a5ddeb,http://doi.acm.org/10.1145/2927006.2927012
+746c0205fdf191a737df7af000eaec9409ede73f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119
+8184a92e1ccc7fdeb4a198b226feb325c63d6870,https://doi.org/10.1109/ICCE.2017.7889290
+6813208b94ffa1052760d318169307d1d1c2438e,http://doi.acm.org/10.1145/2818346.2830582
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237606
+c2422c975d9f9b62fbb19738e5ce5e818a6e1752,https://doi.org/10.1109/TNNLS.2015.2481006
+c0f9fae059745e50658d9605bd8875fc3a2d0b4b,http://doi.ieeecomputersociety.org/10.1109/BIGCOMP.2014.6741422
+38345264a9ca188c4facffe6e18a7e6865fb2966,http://doi.ieeecomputersociety.org/10.1109/BIBM.2017.8217969
+b5f79df712ad535d88ae784a617a30c02e0551ca,https://doi.org/10.1109/LSP.2015.2480758
+c907104680ad53bdc673f2648d713e4d26335825,http://doi.acm.org/10.1145/3077286.3077304
+7935f644c8044c0d3b81e2842e5ecc3672698bbb,https://doi.org/10.1109/ICIP.2011.6116258
+77223849321d57a03e0571a08e71eba06e38834a,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.20
+eb3066de677f9f6131aab542d9d426aaf50ed2ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373860
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7292416
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8023876
+3cb057a24a8adba6fe964b5d461ba4e4af68af14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6701391
+bc9bad25f8149318314971d8b8c170064e220ea8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078542
+fb1b6138aeb081adf853316c0d83ef4c5626a7fa,https://doi.org/10.1109/ICIP.2017.8296302
+ccebd3bf069f5c73ea2ccc5791976f894bc6023d,https://doi.org/10.1109/ICPR.2016.7900186
+663efaa0671eace1100fdbdecacd94216a17b1db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619243
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7052327
+9026eb610916ec4ce77f0d7d543b7c2482ba4173,https://doi.org/10.1016/j.patrec.2012.03.006
+71f07c95a2b039cc21854c602f29e5be053f2aba,https://doi.org/10.1007/s00138-010-0250-7
+efc78a7d95b14abacdfde5c78007eabf9a21689c,http://dl.acm.org/citation.cfm?id=2939840
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,http://doi.acm.org/10.1145/2708463.2709059
+c858c74d30c02be2d992f82a821b925669bfca13,http://doi.org/10.1007/978-3-319-10605-2
+6ba6045e4b404c44f9b4dfce2d946019f0e85a72,https://doi.org/10.1109/ICPR.2016.7899962
+c900e0ad4c95948baaf0acd8449fde26f9b4952a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969
+da23d90bacf246b75ef752a2cbb138c4fcd789b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406360
+85567174a61b5b526e95cd148da018fa2a041d43,https://doi.org/10.1109/TMM.2016.2515367
+66490b5869822b31d32af7108eaff193fbdb37b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857
+9790ec6042fb2665c7d9369bf28566b0ce75a936,http://doi.acm.org/10.1145/3056540.3056546
+b54fe193b6faf228e5ffc4b88818d6aa234b5bb9,http://doi.acm.org/10.1145/2964284.2967287
+1e62ca5845a6f0492574a5da049e9b43dbeadb1b,https://doi.org/10.1109/LSP.2016.2637400
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644
+ebbceab4e15bf641f74e335b70c6c4490a043961,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349
+2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,https://doi.org/10.1109/CVPRW.2010.5543608
+9d1cebed7672210f9c411c5ba422a931980da833,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0078
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,https://doi.org/10.1109/AICCSA.2016.7945716
+2e5b160892b70a1e846aa9dcdf132b8011937ec6,https://doi.org/10.1109/LSP.2017.2689921
+c9b958c2494b7ba08b5b460f19a06814dba8aee0,https://www.ncbi.nlm.nih.gov/pubmed/30080142
+55266ddbe9d5366e8cd1b0b645971cad6d12157a,https://doi.org/10.1109/SIU.2017.7960368
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,https://doi.org/10.1007/s11760-016-0882-9
+ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee,https://doi.org/10.1007/s11042-014-2345-z
+2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,https://doi.org/10.1109/CVPRW.2011.5981801
+71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f,https://doi.org/10.1109/SMC.2016.7844680
+0e05b365af662bc6744106a7cdf5e77c9900e967,https://doi.org/10.1007/s11042-014-2234-5
+a6e75b4ccc793a58ef0f6dbe990633f7658c7241,https://doi.org/10.1016/j.cviu.2016.10.007
+ca0185529706df92745e656639179675c717d8d5,https://doi.org/10.1504/IJCVR.2014.065571
+642a386c451e94d9c44134e03052219a7512b9de,http://doi.org/10.1016/j.imavis.2008.04.018
+e57108607d94aa158eb22ae50540ae6080e48d4b,http://doi.ieeecomputersociety.org/10.1109/ICMI.2002.1167051
+0f64e26d6dd6f1c99fe2050887fac26cafe9ed60,https://doi.org/10.1109/MCI.2016.2627668
+c675534be881e59a78a5986b8fb4e649ddd2abbe,https://doi.org/10.1109/ICIP.2017.8296548
+82e3f4099503633c042a425e9217bfe47cfe9d4b,http://doi.org/10.1007/s11042-015-2819-7
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316891
+28d55935cc36df297fe21b98b4e2b07b5720612e,https://doi.org/10.1109/CISS.2016.7460569
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334
+9774430006f1ed017156b17f3cf669071e398c58,https://doi.org/10.1109/SMC.2013.513
+3598d10d7d4f2b543afa8bcf6b2c34a3696ef155,https://doi.org/10.1109/SPAC.2017.8304347
+d6a5eb4377e2a67420778eab61b5a89046307bae,http://doi.ieeecomputersociety.org/10.1109/CRV.2014.37
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8241843
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c,https://www.sciencedirect.com/science/article/pii/S0042698914002739
+c3390711f5ce6f5f0728ef88c54148bf9d8783a2,https://doi.org/10.1016/j.engappai.2015.03.016
+aa7c72f874951ff7ca3769439f2f39b7cfd4b202,https://doi.org/10.1109/JPROC.2009.2032355
+defd44b02a1532f47bdd8c8f2375e3df64ac5d79,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.139
+0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa,https://doi.org/10.1163/016918610X538534
+753a277c1632dd61233c488cc55d648de3caaaa3,https://doi.org/10.1016/j.patcog.2011.02.013
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890187
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771415
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099824
+10bfa4cecd64b9584c901075d6b50f4fad898d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728013
+5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8,https://doi.org/10.1186/s13640-017-0178-1
+c90427085909029afd2af01d1967e80b78e01b88,https://doi.org/10.1109/ACCESS.2017.2753830
+1584edf8106e8f697f19b726e011b9717de0e4db,https://doi.org/10.1049/iet-cvi.2015.0350
+a1cecbb759c266133084d98747d022c1e638340d,http://doi.acm.org/10.1145/2670473.2670501
+95288fa7ff4683e32fe021a78cbf7d3376e6e400,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014759
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,http://dl.acm.org/citation.cfm?id=3173789
+ec983394f800da971d243f4143ab7f8421aa967c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8340635
+919cb6160db66a8fe0b84cb7f171aded48a13632,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2327978
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958,http://doi.org/10.1007/s10766-017-0552-8
+b65b51c796ed667c4c7914bf12b1926fd6bbaa0c,https://doi.org/10.1016/j.neuroimage.2013.05.108
+9ef06cc958af2274afd193a1dca705c08234bcd3,https://doi.org/10.1109/ICIP.2014.7026207
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410635
+0034e37a0faf0f71395245b266aacbf5412f190a,https://doi.org/10.1109/TMM.2014.2355134
+9101363521de0ec1cf50349da701996e4d1148c8,http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.28
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8017477
+45877ff4694576f59c2a9ca45aa65f935378492a,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.38
+4958c06da5581fd0b4904d3bf0ee09958ecdba5b,https://doi.org/10.1016/j.knosys.2016.12.005
+3c6542295cf7fe362d7d629ac10670bf30cdabce,https://doi.org/10.1109/DICTA.2015.7371264
+fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4,https://doi.org/10.1109/TIP.2015.2421437
+e7e8c0bbee09b5af6f7df1de8f0f26da992737c4,https://doi.org/10.1109/IJCNN.2011.6033417
+1bcb1c6d6cebc9737f9933fcefbf3da8a612f994,https://doi.org/10.1016/j.jvcir.2017.10.008
+a8f1fc34089c4f2bc618a122be71c25813cae354,https://doi.org/10.1142/S0219467816500194
+4b9ec224949c79a980a5a66664d0ac6233c3d575,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501
+520782f07474616879f94aae0d9d1fff48910254,https://doi.org/10.1016/j.neucom.2014.11.038
+5748652924084b7b0220cddcd28f6b2222004359,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7492255
+c58ece1a3fa23608f022e424ec5a93cddda31308,https://doi.org/10.1109/JSYST.2014.2325957
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306
+28f1542c63f5949ee6f2d51a6422244192b5a900,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780475
+08872d801f134e41753601e85971769b28314ca2,http://doi.acm.org/10.1145/2683483.2683560
+978b32ff990d636f7e2050bb05b8df7dfcbb42a1,https://doi.org/10.1109/BTAS.2014.6996270
+e40cb4369c6402ae53c81ce52b73df3ef89f578b,http://doi.org/10.1016/j.image.2015.01.009
+77d929b3c4bf546557815b41ed5c076a5792dc6b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265399
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2,http://doi.org/10.1007/s00371-016-1290-4
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30,http://doi.org/10.1007/s00138-018-0956-5
+982d4f1dee188f662a4b5616a045d69fc5c21b54,https://doi.org/10.1109/IJCNN.2016.7727859
+c808c784237f167c78a87cc5a9d48152579c27a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407641
+f834c50e249c9796eb7f03da7459b71205dc0737,https://doi.org/10.1109/TIP.2011.2166974
+0ed4b4d6d1a0c49c4eb619aab36db559b620d99f,https://doi.org/10.1016/j.neucom.2015.11.115
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884
+7a94936ce558627afde4d5b439ec15c59dbcdaa4,https://doi.org/10.1007/s11263-013-0665-5
+5da98f7590c08e83889f3cec7b0304b3610abf42,https://doi.org/10.1016/j.eswa.2017.07.018
+a96c45ed3a44ad79a72499be238264ae38857988,http://doi.org/10.1007/s00138-016-0786-2
+0eed55ea9f401f25e1474cdbaf09367f44b4f490,https://doi.org/10.1016/j.neucom.2013.05.032
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,http://doi.org/10.1007/978-3-319-75420-8
+74cec83ee694b5d0e07d5d0bacd0aa48a80776aa,https://doi.org/10.1109/ISCAS.2013.6572506
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7045492
+5957936195c10521dadc9b90ca9b159eb1fc4871,https://doi.org/10.1109/TCE.2016.7838098
+55ee484f9cbd62111512485e3c1c3eadbf2e15c0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.25
+88780bd55615c58d9bacc4d66fc2198e603a1714,https://doi.org/10.1109/EMBC.2016.7590730
+b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef,https://doi.org/10.1109/ICACCI.2015.7275752
+eb38f20eaa1b849cabec99815883390f84daf279,https://doi.org/10.1016/j.patcog.2008.11.026
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466467
+26b9d546a4e64c1d759c67cd134120f98a43c2a6,https://doi.org/10.1109/ICMLA.2012.120
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,http://doi.org/10.1007/s11042-016-4321-2
+edbddf8c176d6e914f0babe64ad56c051597d415,https://doi.org/10.1109/TMM.2016.2644866
+d2bad850d30973a61b1a7d7dc582241a41e5c326,http://doi.ieeecomputersociety.org/10.1109/ICICIC.2006.12
+d00e9a6339e34c613053d3b2c132fccbde547b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154
+8db9188e5137e167bffb3ee974732c1fe5f7a7dc,https://doi.org/10.1109/TIP.2016.2612885
+6da3ff4250103369f4a6a39c8fb982438a97525c,https://doi.org/10.1109/THMS.2015.2404913
+405cf40f3ce74210f7e9862b2b828ce002b409ed,https://doi.org/10.1109/IJCNN.2017.7966244
+80f72b26c6571aee2ff04704bc7fd1a69bfa0b3f,https://doi.org/10.1016/j.patcog.2016.12.029
+e95895262f66f7c5e47dd46a70110d89c3b4c203,https://doi.org/10.1016/j.neucom.2016.09.023
+5226296884b3e151ce317a37f94827dbda0b9d16,https://doi.org/10.1109/IWBF.2016.7449690
+66ec085c362f698b40d6e0e7b10629462280c062,https://doi.org/10.1109/ICARCV.2004.1468855
+053ee4a4793f54b02dfabde5436fd7ee479e79eb,http://doi.acm.org/10.1145/3160504.3160507
+3e0035b447d0d4e11ceda45936c898256f321382,https://doi.org/10.1109/BMEI.2014.7002762
+188abc5bad3a3663d042ce98c7a7327e5a1ae298,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6152129
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8172386
+97c59db934ff85c60c460a4591106682b5ab9caa,https://doi.org/10.1109/BTAS.2012.6374568
+6267dbeb54889be5bdb50c338a7c6ef82287084c,https://doi.org/10.1109/ICMLC.2010.5580567
+3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6,https://doi.org/10.1109/ICIP.2015.7351140
+a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b,https://doi.org/10.1109/ICIP.2017.8296805
+67af3ec65f1dc535018f3671624e72c96a611c39,http://doi.org/10.1007/s11042-016-4058-y
+1fe1a78c941e03abe942498249c041b2703fd3d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393355
+56bcc89fb1e05d21a8b7b880c6b4df79271ceca5,https://doi.org/10.1007/s11760-013-0441-6
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793,https://www.ncbi.nlm.nih.gov/pubmed/19167865
+1d30f813798c55ae4fe454829be6e2948ee841da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270396
+3e1190655cc7c1159944d88bdbe591b53f48d761,https://doi.org/10.1007/s10489-013-0464-2
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,http://doi.acm.org/10.1145/2742060.2743769
+45e043dffc57a9070f483ac4aec2c5cd2cec22cb,http://doi.acm.org/10.1145/3130977
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316962
+250b73ec5a4f78b7b4ea3aba65c27fc1352154d5,https://doi.org/10.1109/TIP.2015.2463223
+9e99f818b37d44ec6aac345fb2c5356d83d511c7,https://doi.org/10.1109/ISSPA.2012.6310540
+61bc124537f414f6fcb4d1ff476681b5a0ee222a,http://doi.ieeecomputersociety.org/10.1109/WIW.2016.043
+445e3ba7eabcc55b5d24f951b029196b47830684,https://doi.org/10.1109/TMM.2016.2591508
+97c1f68fb7162af326cd0f1bc546908218ec5da6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471977
+2961e14c327341d22d5f266a6872aa174add8ac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6654170
+de8657e9eab0296ac062c60a6e10339ccf173ec1,http://doi.ieeecomputersociety.org/10.1109/BRACIS.2014.51
+2a41388040141ef6b016c100ef833a2a73ab8b42,https://doi.org/10.1016/j.neucom.2017.03.033
+61e2044184d86d0f13e50ecaa3da6a4913088c76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7572183
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368755
+9753ee59db115e1e84a7c045f2234a3f63f255b1,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344683
+5f448ab700528888019542e6fea1d1e0db6c35f2,https://doi.org/10.1109/LSP.2016.2533721
+37c5e3b6175db9eaadee425dc51bc7ce05b69a4e,https://doi.org/10.1007/s00521-013-1387-x
+5fb9944b18f5a4a6d20778816290ed647f5e3853,http://doi.acm.org/10.1145/3080538.3080540
+8ef465ff12ee1d2be2a99d1c628117a4ce890a6b,https://doi.org/10.1016/j.camwa.2010.08.082
+eaf020bc8a3ed5401fc3852f7037a03b2525586a,http://arxiv.org/abs/1710.07735
+992e4119d885f866cb715f4fbf0250449ce0db05,https://doi.org/10.1007/s00138-015-0674-1
+052c5ef6b20bf3e88bc955b6b2e86571be08ba64,https://doi.org/10.1109/TIFS.2011.2170068
+b2cb335ded99b10f37002d09753bd5a6ea522ef1,https://doi.org/10.1109/ISBA.2017.7947679
+122f52fadd4854cf6c9287013520eced3c91e71a,https://doi.org/10.1109/TIP.2016.2515987
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,https://doi.org/10.1007/978-3-319-68121-4
+780c8a795baca1ba4cb4956cded877dd3d1ca313,http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879
+c9be1001706bcdd8b35fa9cae733c592e90c7ec3,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.54
+0750c796467b6ef60b0caff5fb199337d54d431e,https://doi.org/10.1109/ICMLC.2016.7873015
+522a4ca705c06a0436bbe62f46efe24d67a82422,http://doi.org/10.1007/s11042-017-5475-2
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344635
+31ba7f5e09a2f0fe9cf7ea95314723206dcb6059,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.300
+bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317,https://doi.org/10.1007/978-3-319-69900-4_70
+11ba01ce7d606bab5c2d7e998c6d94325521b8a0,https://doi.org/10.1109/ICIP.2015.7350911
+553a605243b77a76c1ed4c1ad4f9a43ff45e391b,https://doi.org/10.1109/CISP-BMEI.2017.8302001
+4db99a2268a120c7af636387241188064ea42338,https://www.ncbi.nlm.nih.gov/pubmed/21820862
+414d78e32ac41e6ff8b192bc095fe55f865a02f4,http://arxiv.org/abs/1706.00631
+f402e088dddfaad7667bd4def26092d05f247206,https://doi.org/10.1109/TITS.2015.2475721
+de162d4b8450bf2b80f672478f987f304b7e6ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237454
+a5f35880477ae82902c620245e258cf854c09be9,http://doi.org/10.1016/j.imavis.2013.12.004
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908,http://arxiv.org/abs/1711.08238
+22dbdace88c8f4bda2843ed421e3708ec0744237,https://doi.org/10.1016/j.cviu.2013.12.010
+3960882a7a1cd19dfb711e35a5fc1843ed9002e7,http://doi.acm.org/10.1145/2487575.2487701
+57c270a9f468f7129643852945cf3562cbb76e07,https://doi.org/10.1016/j.imavis.2016.07.004
+b84dde74dddf6a3281a0b22c68999942d2722919,http://dl.acm.org/citation.cfm?id=2910703
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3,http://doi.org/10.1007/s11042-017-5319-0
+e865908ed5e5d7469b412b081ca8abd738c72121,https://doi.org/10.1109/TIP.2016.2621667
+e1c50cf0c08d70ff90cf515894b2b360b2bc788b,https://doi.org/10.1109/ICSMC.2007.4414085
+24603ed946cb9385ec541c86d2e42db47361c102,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373865
+49e4f05fa98f63510de76e7abd8856ff8db0f38d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.110
+1e3068886b138304ec5a7296702879cc8788143d,http://doi.org/10.1007/s11263-013-0630-3
+ebce3f5c1801511de9e2e14465482260ba5933cc,http://doi.acm.org/10.1145/3126594.3126640
+710c3aaffef29730ffd909a63798e9185f488327,https://doi.org/10.1109/ICPR.2016.7900095
+f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4,https://doi.org/10.1109/ICIP.2012.6467433
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb,https://www.ncbi.nlm.nih.gov/pubmed/30040629
+8a2bedaa38abf173823944f0de2c84f5b2549609,https://doi.org/10.1109/TNNLS.2016.2573644
+b91f54e1581fbbf60392364323d00a0cd43e493c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084
+d1184939e06dbc3b495c883c53b684c6d6aa9e48,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477669
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b,http://dl.acm.org/citation.cfm?id=3240893
+023decb4c56f2e97d345593e4f7b89b667a6763d,http://doi.org/10.1007/s10994-005-3561-6
+fb1627ed224bf7b1e3d80c097316ed7703951df2,https://doi.org/10.1109/VCIP.2017.8305094
+43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,https://doi.org/10.1109/ICIP.2017.8296394
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,https://doi.org/10.1007/s10044-014-0388-4
+5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2,http://doi.ieeecomputersociety.org/10.1109/TVCG.2016.2641442
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5326314
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337733
+838dad9d1d68d29be280d92e69410eaac40084bc,https://doi.org/10.1109/HPCSim.2014.6903749
+eedb2c34c36017b9c5aa6ce8bff2ab152e713cee,https://doi.org/10.1007/s00521-008-0225-z
+bfdafe932f93b01632a5ba590627f0d41034705d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6134770
+cde7901c0945683d0c677b1bb415786e4f6081e6,http://doi.ieeecomputersociety.org/10.1109/IRI.2015.44
+70516aede32cf0dbc539abd9416c44faafc868bd,https://doi.org/10.1109/MICAI.2013.16
+7b455cbb320684f78cd8f2443f14ecf5f50426db,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33
+874da338c01fb7a87d605fcde6c52835eee03d5e,http://doi.ieeecomputersociety.org/10.1109/ICAPR.2009.20
+ce30ddb5ceaddc0e7d308880a45c135287573d0e,https://doi.org/10.1109/ICSMC.2012.6378304
+f33bd953d2df0a5305fc8a93a37ff754459a906c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961800
+32b76220ed3a76310e3be72dab4e7d2db34aa490,https://doi.org/10.1109/SMC.2014.6974364
+1cfe8c1d341dbf8cc43040b37ca3552385adb10b,http://doi.acm.org/10.1145/2461466.2461473
+22c06284a908d8ad0994ad52119773a034eed7ee,http://doi.acm.org/10.1145/2964284.2967236
+1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb,https://doi.org/10.1007/s11760-016-1052-9
+0f7e9199dad3237159e985e430dd2bf619ef2db5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883882
+636c786d4e4ac530ac85e3883a2f2cf469e45fe2,https://doi.org/10.1016/j.neucom.2016.12.043
+13f065d4e6dfe2a130bd64d73eee97d10d9f7d33,https://doi.org/10.1109/DICTA.2015.7371222
+cbfcd1ec8aa30e31faf205c73d350d447704afee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7955089
+785eeac2e236a85a45b4e0356c0745279c31e089,https://doi.org/10.1109/TIFS.2014.2359543
+127c7f87f289b1d32e729738475b337a6b042cf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436988
+eacf974e235add458efb815ada1e5b82a05878fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4577667
+283d381c5c2ba243013b1c4f5e3b29eb906fa823,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.222
+77c3574a020757769b2ca807ff4b95a88eaa2a37,https://doi.org/10.1109/MSP.2015.2410783
+6196f4be3b28684f6528b8687adccbdf9ac5c67c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.267
+d855791bc23b4aa8e751d6a4e2ae7f5566a991e8,http://doi.acm.org/10.1145/3012941
+b0358af78b7c5ee7adc883ef513bbcc84a18a02b,https://doi.org/10.1109/WACV.2017.10
+26a5136ee4502500fb50cd5ade814aad45422771,https://doi.org/10.1142/S0218001413560028
+050e51268b0fb03033428ac777ccfef2db752ab3,https://doi.org/10.1109/DICTA.2007.4426834
+c997744db532767ee757197491d8ac28d10f1c0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364339
+68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5,https://doi.org/10.1109/CISP-BMEI.2017.8301919
+e7144f5c19848e037bb96e225d1cfd961f82bd9f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.126
+ab68837d09986c592dcab7d08ee6dfb40e02916f,https://doi.org/10.1007/978-3-319-11289-3_23
+b5979489e11edd76607c219a8bdc83ba4a88ab38,https://doi.org/10.1109/ACCESS.2017.2778011
+8706c3d49d1136035f298041f03bb70dc074f24d,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.12
+11a6593e6e35f95ebeb5233897d1d8bcad6f9c87,https://doi.org/10.1007/s11063-017-9615-5
+84be18c7683417786c13d59026f30daeed8bd8c9,https://doi.org/10.1007/s00138-016-0755-9
+635d2696aa597a278dd6563f079be06aa76a33c0,https://doi.org/10.1109/ICIP.2016.7532429
+3bd10f7603c4f5a4737c5613722124787d0dd818,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866
+b961e512242ddad7712855ab00b4d37723376e5d,http://doi.org/10.1007/s11554-010-0178-1
+57de1a09db680e0b4878ceda68d626ae4e44ccfe,https://doi.org/10.1016/j.neucom.2014.10.111
+c8585c95215bc53e28edb740678b3a0460ca8aa4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373829
+93eb3963bc20e28af26c53ef3bce1e76b15e3209,https://doi.org/10.1109/ICIP.2017.8296992
+a52a69bf304d49fba6eac6a73c5169834c77042d,https://doi.org/10.1109/LSP.2017.2789251
+d3a3d15a32644beffaac4322b9f165ed51cfd99b,https://doi.org/10.1109/SIU.2016.7496197
+05184f01e66d7139530729b281da74db35a178d2,http://ieeexplore.ieee.org/document/6460470/
+ad50f6899103eff0ee4504e539c38eb965fd1309,https://doi.org/10.1109/IJCNN.2010.5596374
+a92e24c8c53e31fc444a13bd75b434b7207c58f1,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2317711
+049186d674173ebb76496f9ecee55e17ed1ca41b,https://doi.org/10.1109/ACCESS.2017.2724763
+3cbd3124b1b4f95fcdf53abd358d7ceec7861dda,http://doi.acm.org/10.1145/3019612.3019641
+176d9121e4e645344de4706dfb345ad456bfb84a,https://doi.org/10.1117/1.JEI.24.2.023009
+864d50327a88d1ff588601bf14139299ced2356f,https://doi.org/10.1109/FSKD.2016.7603151
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a,http://doi.org/10.1007/s11063-017-9649-8
+5278b7a6f1178bf5f90cd3388908925edff5ad46,https://doi.org/10.1007/s11704-015-4291-y
+524c25217a6f1ed17f47871e947a5581d775fa56,https://doi.org/10.1117/12.2030875
+b6bb883dd14f2737d0d6225cf4acbf050d307634,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306
+15a9f812e781cf85c283f7cf2aa2928b370329c5,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469281
+982ede05154c1afdcf6fc623ba45186a34f4b9f2,https://doi.org/10.1109/TMM.2017.2659221
+5642bafa7955b69f05c11230151cd59fcbe43b8e,https://doi.org/10.1007/s11760-012-0404-3
+7f9be0e08784835de0f8bc3a82fcca02b3721dc1,https://doi.org/10.1109/IJCNN.2014.6889744
+228ea13041910c41b50d0052bdce924037c3bc6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495
+09903df21a38e069273b80e94c8c29324963a832,http://doi.org/10.1007/s11042-017-4980-7
+2b286ed9f36240e1d11b585d65133db84b52122c,http://doi.acm.org/10.1145/3130800.3130837
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122,http://doi.org/10.1007/s11704-016-6066-5
+93e1e195f294c463f4832c4686775bf386b3de39,https://doi.org/10.1109/TIP.2015.2490551
+40c1de7b1b0a087c590537df55ecd089c86e8bfc,http://doi.org/10.1162/NECO_a_00401
+adf9998214598469f7a097bc50de4c23784f2a5a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.745
+ccb2ecb30a50460c9189bb55ba594f2300882747,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8334751
+b11b71b704629357fe13ed97b216b9554b0e7463,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736040
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0,http://doi.org/10.1007/978-3-319-99978-4
+bf37a81d572bb154581845b65a766fab1e5c7dda,http://doi.org/10.1007/s11760-017-1111-x
+f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2569436
+5fb59cf5b31a80d8c70d91660092ef86494be577,https://doi.org/10.1109/CISP-BMEI.2017.8301923
+ed70d1a9435c0b32c0c75c1a062f4f07556f7016,https://doi.org/10.1109/ICIP.2015.7350774
+897aa4aaa474fed41233faec9b70b802aea5fdea,https://doi.org/10.1142/S0218001414560126
+35b3dc0e961a15a7a60b95490a989f91680acc7c,http://doi.ieeecomputersociety.org/10.1109/TDSC.2016.2550459
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,https://doi.org/10.1109/ACCESS.2017.2784096
+b58d381f9f953bfe24915246b65da872aa94f9aa,https://doi.org/10.1109/SMAP.2013.13
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430537
+5d9971c6a9d5c56463ea186850b16f8969a58e67,http://doi.org/10.1007/s11042-017-5354-x
+32a440720ee988b7b41de204b2910775171ee12c,https://doi.org/10.1109/ICIP.2011.6116351
+8e9b92a805d1ce0bf4e0c04133d26e28db036e6a,https://doi.org/10.1109/DICTA.2017.8227428
+d9e34af95c21c0e114b61abccbc653480b370c3b,https://doi.org/10.1016/j.patcog.2005.10.020
+c586463b8dbedce2bfce3ee90517085a9d9e2e13,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2006.9
+f531ce18befc03489f647560ad3e5639566b39dc,http://doi.ieeecomputersociety.org/10.1109/ACOMP.2015.9
+8a63a2b10068b6a917e249fdc73173f5fd918db0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8120021
+e42f3c27391821f9873539fc3da125b83bffd5a2,https://doi.org/10.1109/HPCS.2010.5547096
+be7444c891caf295d162233bdae0e1c79791d566,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,http://dl.acm.org/citation.cfm?id=3123323
+9989eda2f5392cfe1f789bb0f6213a46d92d1302,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477584
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,http://doi.org/10.1007/s11554-016-0645-4
+e12b2c468850acb456b0097d5535fc6a0d34efe3,https://doi.org/10.1016/j.neucom.2011.03.009
+2a98b850139b911df5a336d6ebf33be7819ae122,https://doi.org/10.1109/ICIP.2015.7350806
+9ca542d744149f0efc8b8aac8289f5e38e6d200c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789587
+ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835994
+696236fb6f986f6d5565abb01f402d09db68e5fa,http://doi.org/10.1007/s41095-018-0112-1
+ee2217f9d22d6a18aaf97f05768035c38305d1fa,https://doi.org/10.1109/APSIPA.2015.7415501
+11df25b4e074b7610ec304a8733fa47625d9faca,http://doi.org/10.1016/j.patrec.2012.09.024
+8ccbbd9da0749d96f09164e28480d54935ee171c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597578
+c05ae45c262b270df1e99a32efa35036aae8d950,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354120
+4909ed22b1310f1c6f2005be5ce3349e3259ff6a,https://doi.org/10.1109/ROBIO.2009.4913106
+61a3c45c9f802f9d5fa8d94fee811e203bac6487,https://doi.org/10.1109/TIFS.2016.2567318
+640e12837241d52d04379d3649d050ee3760048c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5692624
+0fcf04fda0bea5265b73c85d2cc2f7f70416537b,https://doi.org/10.1109/TCSVT.2015.2409012
+56fb30b24e7277b47d366ca2c491749eee4d6bb1,https://doi.org/10.1109/ICAPR.2015.7050658
+1473e6f2d250307f0421f1e2ea68b6485d3bd481,https://doi.org/10.1109/IJCNN.2016.7727333
+af3e6e20de06b03c33f8e85eced74c2d096730ea,https://doi.org/10.1109/CISP-BMEI.2017.8301972
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba,http://arxiv.org/abs/1503.01521
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,http://doi.org/10.1007/978-3-319-11071-4
+2ef1b1b5ed732634e005df779fd9b21da0ffe60c,https://doi.org/10.1016/j.image.2017.03.012
+ae89e464576209b1082da38e0cee7aeabd03d932,https://doi.org/10.1007/s00521-005-0017-7
+c9832564d5dc601113b4d80e5a05ede6fee9f7dd,https://doi.org/10.1109/ISBA.2017.7947687
+7ec431e36919e29524eceb1431d3e1202637cf19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8365242
+2a612a7037646276ff98141d3e7abbc9c91fccb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909615
+5fa6f72d3fe16f9160d221e28da35c1e67a5d951,http://doi.acm.org/10.1145/3061639.3062182
+a168ca2e199121258fbb2b6c821207456e5bf994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553808
+8f99f7ccb85af6d4b9e015a9b215c529126e7844,https://doi.org/10.1109/ROMAN.2017.8172359
+003ba2001bd2614d309d6ec15e9e2cbe86db03a1,https://doi.org/10.1109/ISCAS.2005.1465264
+f27e5a13c1c424504b63a9084c50f491c1b17978,http://dl.acm.org/citation.cfm?id=3097991
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,http://dl.acm.org/citation.cfm?id=3126693
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404159
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099713
+782eee555067b2d6d24db87775e1ded5fb047491,https://doi.org/10.1109/MMSP.2008.4665158
+e57014b4106dd1355e69a0f60bb533615a705606,http://doi.org/10.1007/s13748-018-0143-y
+b6f15bf8723b2d5390122442ab04630d2d3878d8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163142
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613024
+f839ae810338e3b12c8e2f8db6ce4d725738d2d9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.115
+919bdc161485615d5ee571b1585c1eb0539822c8,http://ieeexplore.ieee.org/document/6460332/
+4328933890f5a89ad0af69990926d8484f403e4b,http://doi.acm.org/10.1145/2072298.2071993
+b484141b99d3478a12b8a6854864c4b875d289b8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117595
+7361b900018f22e37499443643be1ff9d20edfd6,http://doi.org/10.1049/iet-bmt.2016.0169
+d4b4020e289c095ce2c2941685c6cd37667f5cc9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7489442
+a7da7e5a6a4b53bf8736c470ff8381a654e8c965,https://doi.org/10.1007/s13042-011-0045-9
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373838
+3dce635ce4b55fb63fc6d41b38640403b152a048,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225
+62fddae74c553ac9e34f511a2957b1614eb4f937,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406684
+f0b4f5104571020206b2d5e606c4d70f496983f9,https://doi.org/10.1109/FUZZ-IEEE.2014.6891674
+c98def5f9d0c6ae519fe0aeebe5378f65b14e496,https://doi.org/10.1117/12.2064730
+29db16efc3b378c50511f743e5197a4c0b9e902f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401
+7f8d2d7eaa03132caefe0f3b126b5b369a712c9d,http://doi.ieeecomputersociety.org/10.1109/ACHI.2009.33
+fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7,https://doi.org/10.1109/FSKD.2016.7603418
+95289007f2f336e6636cf8f920225b8d47c6e94f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796
+42fff5b37006009c2dbfab63c0375c7c7d7d8ee3,https://doi.org/10.1007/s11042-014-2228-3
+c252bc84356ed69ccf53507752135b6e98de8db4,https://doi.org/10.1016/j.neucom.2015.02.067
+705e086bb666d129a6969882cfa49282116a638e,https://doi.org/10.1109/TNNLS.2014.2376963
+205f035ec90a7fa50fd04fdca390ce83c0eea958,http://doi.acm.org/10.1145/3131287
+dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd,http://doi.acm.org/10.1145/2818346.2823313
+1c0acf9c2f2c43be47b34acbd4e7338de360e555,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461986
+110359824a0e3b6480102b108372793265a24a86,https://doi.org/10.1016/j.image.2016.03.011
+6256b47342f080c62acd106095cf164df2be6020,https://doi.org/10.1007/978-3-319-24702-1_6
+b34fdab6864782ce60fd90d09f5d886bd83f84f5,https://doi.org/10.1002/cpe.3766
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613000
+61329bc767152f01aa502989abc854b53047e52c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450832
+aeb36fac7516753a14c3c690f352de78e70f8c6e,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.13
+aa892fe17c06e2b18db2b12314499a741e755df7,https://doi.org/10.1109/IJCNN.2017.7966089
+f11c76efdc9651db329c8c862652820d61933308,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471957
+9d5bfaf6191484022a6731ce13ac1b866d21ad18,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139086
+a0f6196d27a39cde2dbf62c08d89cbe489600bb0,https://doi.org/10.1016/j.cose.2016.03.007
+3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd,https://doi.org/10.1109/TCSVT.2014.2317887
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099709
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6196236
+54ba18952fe36c9be9f2ab11faecd43d123b389b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085
+f5603ceaebe3caf6a812edef9c4b38def78cbf34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4455998
+ca44a838da4187617dca9f6249d8c4b604661ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7351564
+8cffe360a05085d4bcba111a3a3cd113d96c0369,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248
+1eb9c859ff7537182a25556635954bcd11830822,https://doi.org/10.1109/ICDSP.2015.7252004
+4f8345f31e38f65f1155569238d14bd8517606f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618941
+7e48711c627edf90e9b232f2cbc0e3576c8f2f2a,https://doi.org/10.1007/s11760-015-0777-1
+e69a765d033ef6ea55c57ca41c146b27964c5cf2,https://doi.org/10.1109/ISCAS.2017.8050764
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,http://doi.org/10.1134/S1054661818030136
+b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,https://doi.org/10.1109/GlobalSIP.2017.8309138
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099900
+4dbfbe5fd96c9efc8c3c2fd54406b62979482678,https://doi.org/10.1016/j.jvcir.2014.07.007
+0701b01bc99bf3b64050690ceadb58a8800e81ed,https://doi.org/10.1007/s11042-015-3107-2
+4e8f301dbedc9063831da1306b294f2bd5b10477,https://doi.org/10.1109/BIOSIG.2016.7736919
+9057044c0347fb9798a9b552910a9aff150385db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6778411
+22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505,http://doi.acm.org/10.1145/2808469.2810102
+a2b4a6c6b32900a066d0257ae6d4526db872afe2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466
+080ab68a898a3703feead145e2c38361ae84a0a8,https://doi.org/10.1109/TIFS.2014.2343833
+f472cb8380a41c540cfea32ebb4575da241c0288,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284869
+8598d31c7ca9c8f5bb433409af5e472a75037b4d,https://doi.org/10.1109/JPROC.2008.916364
+3c086601ce0bac61047b5b931b253bd4035e1e7a,https://doi.org/10.1109/ICIP.2015.7350897
+44d93039eec244083ac7c46577b9446b3a071f3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,http://dl.acm.org/citation.cfm?id=3159691
+1aa61dd85d3a5a2fe819cba21192ec4471c08628,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359518
+d34f546e61eccbac2450ca7490f558e751e13ec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461800
+3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836097
+ed32df6b122b15a52238777c9993ed31107b4bed,http://doi.org/10.1016/j.eswa.2017.03.008
+f4ba07d2ae6c9673502daf50ee751a5e9262848f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810
+52b102620fff029b80b3193bec147fe6afd6f42e,http://dl.acm.org/citation.cfm?id=3028863
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a,http://doi.org/10.1007/978-3-319-68548-9_19
+c23bd1917badd27093c8284bd324332b8c45bfcf,https://doi.org/10.1109/IJCNN.2010.5596316
+32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99,http://doi.ieeecomputersociety.org/10.1109/CIS.2007.196
+64e216c128164f56bc91a33c18ab461647384869,http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738017
+7f4040b482d16354d5938c1d1b926b544652bf5b,http://doi.acm.org/10.1145/2502081.2502115
+2ae2e29c3e9cc2d94a26da5730df7845de0d631b,https://doi.org/10.1109/TCSVT.2011.2129670
+9abab00de61dd722b3ad1b8fa9bffd0001763f8b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2420563
+a76969df111f9ee9f0b898b51ad23a721d289bdc,https://doi.org/10.1109/ICMLA.2015.185
+53509017a25ac074b5010bb1cdba293cdf399e9b,http://doi.ieeecomputersociety.org/10.1109/AVSS.2012.41
+f1e44e64957397d167d13f8f551cae99e5c16c75,https://doi.org/10.1007/s11042-013-1548-z
+f1061b2b5b7ca32edd5aa486aecc63a0972c84f3,https://doi.org/10.1109/TIP.2017.2760512
+9166f46aa3e58befaefd3537e5a11b31ebeea4d0,https://doi.org/10.1109/ICIP.2015.7351505
+2f73203fd71b755a9601d00fc202bbbd0a595110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752
+1951dc9dd4601168ab5acf4c14043b124a8e2f67,http://doi.org/10.1162/neco_a_01116
+cca476114c48871d05537abb303061de5ab010d6,https://doi.org/10.15439/2016F472
+ecdd83002f69c2ccc644d07abb44dd939542d89d,https://doi.org/10.1016/j.neucom.2015.07.011
+b98e7a8f605c21e25ac5e32bfb1851a01f30081b,http://doi.acm.org/10.1145/2393347.2396303
+48906f609446afcdaacbe1d65770d7a6165a8eee,https://doi.org/10.1007/s12559-017-9482-4
+93c0405b1f5432eab11cb5180229720604ffd030,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462228
+dac6e9d708a9757f848409f25df99c5a561c863c,https://doi.org/10.1109/LSP.2014.2334656
+a216f7863fc6ab15e2bb7a538dfe00924e1da0ab,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163087
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,http://doi.org/10.1007/s11263-008-0143-7
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,http://doi.org/10.1007/s11042-017-4646-5
+179564f157a96787b1b3380a9f79701e3394013d,http://dl.acm.org/citation.cfm?id=2493502
+d2fac640086ba89271ad7c1ebf36239ecd64605e,http://ieeexplore.ieee.org/document/6460449/
+9c2f20ed168743071db6268480a966d5d238a7ee,http://dl.acm.org/citation.cfm?id=1456304
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,https://doi.org/10.1109/SMC.2017.8122640
+ca0804050cf9d7e3ed311f9be9c7f829e5e6a003,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333904
+05a116cb6e220f96837e4418de4aa8e39839c996,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.30
+bc08dfa22949fbe54e15b1a6379afade71835968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899970
+16fadde3e68bba301f9829b3f99157191106bd0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953
+e546572f8205570de4518bcf8d0345465e51d7a0,https://doi.org/10.1109/ICIP.2015.7351318
+b7845e0b0ce17cde7db37d5524ef2a61dee3e540,https://doi.org/10.1109/ICPR.2016.7899608
+a3ed0f15824802359e05d9777cacd5488dfa7dba,http://doi.acm.org/10.1145/2851581.2892282
+1a47f12a2490f6775c0ad863ac856de27f5b3e03,https://doi.org/10.1016/j.sigpro.2014.11.010
+a9af0dc1e7a724464d4b9d174c9cf2441e34d487,https://doi.org/10.1142/S0219691316500351
+ee6e4324123b99d94a7a23d9bddf026f39903693,https://doi.org/10.1109/ISMICT.2013.6521709
+43cbe3522f356fbf07b1ff0def73756391dc3454,https://doi.org/10.1109/WIFS.2011.6123140
+cf784156547c3be146706e2763c1a52d939d1722,https://doi.org/10.1007/s11042-017-5038-6
+0f29bc5d8458358d74dc8c4fd6968b4182dd71d2,https://doi.org/10.1109/ICIP.2016.7532637
+2340d810c515dc0c9fd319f598fa8012dc0368a0,https://doi.org/10.1109/AFGR.2008.4813420
+af97e792827438ddea1d5900960571939fc0533e,https://doi.org/10.1109/ICSMC.2005.1571460
+ec39e9c21d6e2576f21936b1ecc1574dadaf291e,https://doi.org/10.1109/WACV.2017.130
+a92c207031b0778572bf41803dba1a21076e128b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433557
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909629
+3d4d3f70352dc833e454a5756d682f27eca46e5d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.32
+aaec8141d57d29aa3cedf1baec9633180ddb7a3d,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552916
+edd6ed94207ab614c71ac0591d304a708d708e7b,http://doi.org/10.1016/j.neucom.2012.02.001
+87b607b8d4858a16731144d17f457a54e488f15d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532
+1a40c2a2d17c52c8b9d20648647d0886e30a60fa,https://doi.org/10.1109/ICPR.2016.7900283
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26,http://doi.org/10.1016/j.ijar.2012.01.003
+552122432b92129d7e7059ef40dc5f6045f422b5,http://doi.org/10.1007/s11263-017-1000-3
+6489ad111fee8224b34f99d1bcfb5122786508cd,https://doi.org/10.1109/ICIP.2014.7025280
+9e5809122c0880183c7e42c7edd997f92de6d81e,http://doi.acm.org/10.1145/2451176.2451209
+d05759932001aa6f1f71e7dc261c4716f57a5397,https://doi.org/10.1109/ISBA.2015.7126365
+f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93,https://doi.org/10.1109/TCSVT.2007.903317
+2f1485994ef2c09a7bb2874eb8252be8fe710db1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780700
+b40c001b3e304dccb28c745bd54aa281c8ff1f29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361072
+b299c292b84aeb4f080a8b39677a8e0d07d51b27,http://doi.ieeecomputersociety.org/10.1109/ICDM.2015.23
+eefecac463ebfc0694b9831e842b574f3954fed6,http://doi.ieeecomputersociety.org/10.1109/SNPD.2013.15
+7bc1e7d000ab517161a83b1fedf353e619516ddf,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068
+e4df98e4b45a598661a47a0a8900065716dafd6d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2015.219
+d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,https://doi.org/10.1109/LSP.2017.2661983
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3,http://dl.acm.org/citation.cfm?id=3209659
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,http://dl.acm.org/citation.cfm?id=3230921
+ec00ecb64fa206cea8b2e716955a738a96424084,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265512
+ba30cc9d8bac724dafc0aea247159cc7e7105784,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019360
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7995928
+cfba667644508853844c45bfe5d0b8a2ffb756d3,https://doi.org/10.1109/ISBA.2018.8311455
+865d4ce1751ff3c0a8eb41077a9aa7bd94603c47,https://doi.org/10.1007/s12193-015-0210-7
+cc6d3ccc9e3dd0a43313a714316c8783cd879572,https://doi.org/10.1109/ICIP.2017.8296802
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f,http://doi.org/10.1007/s11042-015-2847-3
+383ff2d66fecdc2fd02a31ac1fa392f48e578296,https://doi.org/10.1016/j.cviu.2015.07.005
+10f4bbf87a44bab3d79e330e486c897e95f5f33f,https://doi.org/10.1109/TIFS.2012.2186292
+4398afa0aeb5749a12772f2d81ca688066636019,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2496320
+8f73af52d87c94d0bd43242462fd68d974eda331,https://doi.org/10.1109/ICB.2013.6613009
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393012
+46c82cfadd9f885f5480b2d7155f0985daf949fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780537
+bf1e0545785b05b47caa3ffe7d16982769986f38,https://doi.org/10.1016/j.asoc.2010.12.002
+5ed5e534c8defd683909200c1dc31692942b7b5f,http://doi.acm.org/10.1145/2983926
+310fe4e6cb6d090f7817de4c1034e35567b56e34,https://doi.org/10.1109/ICPR.2014.313
+5de9670f72d10682bf2cb3156988346257e0489f,http://doi.org/10.1016/j.inffus.2015.12.004
+3b350afd8b82487aa97097170c269a25daa0c82d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8248664
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,https://doi.org/10.1007/s10462-010-9172-z
+3fa628e7cff0b1dad3f15de98f99b0fdb09df834,http://doi.ieeecomputersociety.org/10.1109/ICME.2013.6607603
+25bcd5aa3bbe56c992547fba683418655b46fc4a,https://doi.org/10.1016/j.eswa.2017.03.030
+dbf2d2ca28582031be6d16519ab887248f5e8ad8,https://doi.org/10.1109/TMM.2015.2410135
+526c79c6ce39882310b814b7918449d48662e2a9,https://doi.org/10.1109/ICASSP.2005.1416338
+21bd60919e2e182a29af455353141ba4907b1b41,https://doi.org/10.1109/ACCESS.2018.2798573
+6d5f876a73799cc628e4ad2d9cfcd88091272342,https://doi.org/10.1109/TSMCC.2005.848193
+047ce307ad0c871bc2c9a5c1e4649cefae2ba50d,https://doi.org/10.1109/ICRA.2012.6224587
+60821d447e5b8a96dd9294a0514911e1141ff620,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813328
+605f6817018a572797095b83bec7fae7195b2abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339020
+f9752fd07b14505d0438bc3e14b23d7f0fe7f48b,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2009.114
+6f16f4bd01aeefdd03d6783beacb7de118f5af8a,https://doi.org/10.1109/VCIP.2013.6706330
+3cd22b5b81a0172d608ff14be71b755d1f68c201,https://doi.org/10.1109/ACCESS.2018.2812725
+75b51140d08acdc7f0af11b0ffa1edb40ebbd059,https://doi.org/10.1007/s00521-010-0381-9
+a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,https://doi.org/10.1109/TIP.2015.2481327
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c,http://doi.org/10.1038/nn870
+d75bd05865224a1341731da66b8d812a7924d6f6,https://doi.org/10.1109/TSMCB.2012.2217127
+2e36b63fdf1353425a57a0665b0d0274efe92963,http://doi.acm.org/10.1145/3152771.3156179
+ca60d007af691558de377cab5e865b5373d80a44,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273630
+502d30c5eac92c7db587d85d080343fbd9bc469e,https://doi.org/10.1109/TIFS.2016.2538744
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415357
+9e105c4a176465d14434fb3f5bae67f57ff5fba2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354230
+90c4deaa538da42b9b044d7b68c3692cced66036,http://doi.ieeecomputersociety.org/10.1109/SITIS.2007.89
+673541a8cb1aa3ac63a288523ba71aec2a38280e,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552971
+b759936982d6fb25c55c98955f6955582bdaeb27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7472169
+0b8839945259ec764ef0fad47471f34db39f40c3,https://doi.org/10.1109/DESEC.2017.8073838
+4b7f21b48c7e0dc7334e36108f558d54642c17c0,https://doi.org/10.1109/WACV.2017.106
+b234d429c9ea682e54fca52f4b889b3170f65ffc,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22
+193474d008cab9fa1c1fa81ce094d415f00b075c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466415
+4c0cc732314ba3ccccd9036e019b1cfc27850c17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854473
+a5d4cc596446517dfaa4d92276a12d5e1c0a284c,https://doi.org/10.1016/j.patrec.2009.06.002
+a6b5ca99432c23392cec682aebb8295c0283728b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302395
+de048065ea2c5b3e306e2c963533df055e7dfcaa,https://doi.org/10.1109/LSP.2016.2598878
+4cec3e5776090852bef015a8bbe74fed862aa2dd,https://doi.org/10.1109/TSP.2013.2271479
+55c4efc082a8410b528af7325de8148b80cf41e3,http://dl.acm.org/citation.cfm?id=3231899
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8246530
+c92e36689ef561df726a7ae861d9c166c3934908,https://doi.org/10.1109/ICPR.2016.7900140
+7234468db46b37e2027ab2978c67b48b8581f796,https://doi.org/10.1109/ACPR.2015.7486464
+ac2e166c76c103f17fdea2b4ecb137200b8d4703,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5373798
+bef926d63512dbffcf1af59f72295ef497f5acf9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6990726
+13d430257d595231bda216ef859950caa736ad1d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394947
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,https://doi.org/10.1007/s11390-017-1738-7
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,http://doi.org/10.1007/s11042-018-5806-y
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a,http://doi.org/10.1007/s11063-017-9715-2
+6f8cffd9904415c8fa3a1e650ac143867a04f40a,https://doi.org/10.1016/j.neucom.2015.01.099
+9df86395c11565afa8683f6f0a9ca005485c5589,https://doi.org/10.1007/s00530-014-0400-2
+aa90a466a2ff7781c36e7da7df0013aa5b117510,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.159
+0821028073981f9bd2dba2ad2557b25403fe7d7d,http://doi.acm.org/10.1145/2733373.2806318
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336
+6343bc0013343b6a5f96154f02d18dcd36a3f74c,https://doi.org/10.1007/s11042-014-2083-2
+0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d,https://doi.org/10.1007/s12193-013-0133-0
+489b7e12a420eff0d585f3f866e76b838c2cd275,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477675
+c4541802086461420afb1ecb5bb8ccd5962a9f02,https://doi.org/10.1109/TSMCB.2009.2029076
+01e14d8ffd6767336d50c2b817a7b7744903e567,http://doi.ieeecomputersociety.org/10.1109/FG.2017.128
+592370b4c7b58a2a141e507f3a2cc5bbd247a62e,https://doi.org/10.1109/IJCNN.2017.7965911
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35,http://dl.acm.org/citation.cfm?id=31310887
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811
+0c247ac797a5d4035469abc3f9a0a2ccba49f4d8,https://doi.org/10.1109/ICMLC.2011.6016715
+942b89d8d17e89e58c82453de2bfcbbeb09adc81,https://doi.org/10.1016/j.patcog.2016.02.019
+3ffbc912de7bad720c995385e1fdc439b1046148,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2008.347
+f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,https://doi.org/10.1109/ACCESS.2017.2737544
+46c1af268d4b3c61a0a12be091ca008a3a60e4cd,https://doi.org/10.1007/s11042-016-3592-y
+14d7bce17265738f10f48987bb7bffb3eafc676e,http://ieeexplore.ieee.org/document/7514504/
+affa61d044daa1a7d43a6803a743eab47c89c45d,https://doi.org/10.1109/TNNLS.2015.2405574
+21b5af67618fcc047b495d2d5d7c2bf145753633,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771442
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,http://doi.org/10.1007/s11042-016-3361-y
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8,https://www.sciencedirect.com/science/article/pii/S0006322316331110
+2d2fb01f761d21a459cfb34935bc47ab45a9913b,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2346515
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49,http://dl.acm.org/citation.cfm?id=2501650
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,http://doi.acm.org/10.1145/3123266.3123451
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411214
+5bed2453a5b0c54a4a4a294f29c9658658a9881e,https://doi.org/10.1109/TIP.2015.2451173
+be40014beffaa9faacee12bb3412969f98b6a43d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.454
+6da711d07b63c9f24d143ca3991070736baeb412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7000295
+425ea5656c7cf57f14781bafed51182b2e6da65f,https://doi.org/10.1109/TIP.2017.2718187
+a6ce1a1de164f41cb8999c728bceedf65d66bb23,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7170694
+e0423788eb91772de9d708a17799179cf3230d63,http://doi.acm.org/10.1145/3093241.3093277
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301635
+f4411787688ca40466ee059ec64bf56d746733c1,https://doi.org/10.1007/s12652-012-0107-1
+863ad2838b9b90d4461995f498a39bcd2fb87c73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265580
+021469757d626a39639e260492eea7d3e8563820,https://doi.org/10.1007/b116723
+464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb,https://doi.org/10.1109/TIP.2014.2359765
+3aebaaf888cba25be25097173d0b3af73d9ce7f9,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.49
+c9c2de3628be7e249722b12911bebad84b567ce6,https://doi.org/10.1016/j.patcog.2017.06.028
+c0945953506a3d531331caf6c2b2a6d027e319f0,https://doi.org/10.1002/cav.49
+24286ef164f0e12c3e9590ec7f636871ba253026,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721
+2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84,https://doi.org/10.1109/CVPRW.2011.5981817
+51f626540860ad75b68206025a45466a6d087aa6,https://doi.org/10.1109/ICIP.2017.8296595
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,http://doi.acm.org/10.1145/1386352.1386401
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7106467
+3fe3d6ff7e5320f4395571131708ecaef6ef4550,https://doi.org/10.1109/SITIS.2016.60
+720763bcb5e0507f13a8a319018676eb24270ff0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5202783
+fc7f140fcedfe54dd63769268a36ff3f175662b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8013122
+b3e60bb5627312b72c99c5ef18aa41bcc1d21aea,https://doi.org/10.1109/SPAC.2014.6982690
+51b42da0706a1260430f27badcf9ee6694768b9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471882
+fd38163654a0551ed7f4e442851508106e6105d9,https://doi.org/10.1109/ICNSC.2008.4525311
+2dbc57abf3ceda80827b85593ce1f457b76a870b,http://doi.org/10.1007/s11042-018-6133-z
+85a136b48c2036b16f444f93b086e2bd8539a498,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7885525
+ebfdb4842c69177b65022f00d3d038d645f3260b,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.154
+076c97826df63f70d55ea11f0b7ae47a7ad81ad3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.40
+158aa18c724107587bcc4137252d0ba10debf417,https://doi.org/10.1109/ACSSC.2016.7869522
+9944c451b4a487940d3fd8819080fe16d627892d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612967
+0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3,https://doi.org/10.1109/ICIP.2015.7351013
+64ba203c8cfc631d5f3f20419880523155fbeeb2,http://doi.acm.org/10.1145/3009977.3010008
+b36a80d15c3e48870ea6118b855055cc34307658,https://doi.org/10.1109/ICPR.2014.17
+74d3ff8324e02503c18fb2566ed29e2e22ce0d1b,http://doi.ieeecomputersociety.org/10.1109/IAS.2009.266
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619296
+70e14e216b12bed2211c4df66ef5f0bdeaffe774,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237666
+ea8d217231d4380071132ce37bf997164b60ec44,https://doi.org/10.1109/SIU.2016.7496031
+9c59bb28054eee783a40b467c82f38021c19ff3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311
+af12a79892bd030c19dfea392f7a7ccb0e7ebb72,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972
+7f203f2ff6721e73738720589ea83adddb7fdd27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301513
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5,http://doi.org/10.1007/s00138-012-0439-z
+5951e9e13ff99f97f301a336f24a14d80459c659,https://doi.org/10.1016/j.neucom.2017.09.009
+2b300985a507533db3ec9bd38ade16a32345968e,https://doi.org/10.1007/s11042-015-3070-y
+504d2675da7a56a36386568ee668938df6d82bbe,https://doi.org/10.1109/TCSVT.2016.2539604
+4cfe921ac4650470b0473fd52a2b801f4494ee64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6467429
+14efb131bed66f1874dd96170f714def8db45d90,http://doi.acm.org/10.1145/2818346.2830585
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183
+a961f1234e963a7945fed70197015678149b37d8,http://dl.acm.org/citation.cfm?id=3206068
+4f1249369127cc2e2894f6b2f1052d399794919a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663
+b084ad222c1fc9409d355d8e54ac3d1e86f2ca18,https://doi.org/10.1016/j.neucom.2017.04.001
+3e7070323bca6106f19bea4c97ef67bd6249cb5d,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477448
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255
+176e6ba56e04c98e1997ffdef964ece90fd827b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8322125
+9048732c8591a92a1f4f589b520a733f07578f80,https://doi.org/10.1109/CISP-BMEI.2017.8301921
+4fac09969ee80d485876e3198c7177181c600a4a,http://doi.ieeecomputersociety.org/10.1109/CRV.2015.32
+02e668f9b75f4a526c6fdf7268c8c1936d8e6f09,https://doi.org/10.1142/S0218001411008968
+f070d739fb812d38571ec77490ccd8777e95ce7a,http://doi.org/10.1016/j.patcog.2014.09.007
+adad7446e371d27fdaee39475856e2058f3045e5,https://doi.org/10.1109/ISCAS.2013.6572295
+8355d095d3534ef511a9af68a3b2893339e3f96b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390
+1b2d9a1c067f692dd48991beff03cd62b9faebf2,https://doi.org/10.1109/ICIP.2011.6116302
+fddca9e7d892a97073ada88eec39e03e44b8c46a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.305
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392250
+bb4be8e24d7b8ed56d81edec435b7b59bad96214,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7060677
+fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,https://doi.org/10.1109/TIM.2015.2415012
+3e0377af0087b9b836bf6d95bc1c7085dfde4897,http://doi.acm.org/10.1145/2671188.2749320
+0be418e63d111e3b94813875f75909e4dc27d13a,https://doi.org/10.1109/ICB.2016.7550057
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b,http://doi.org/10.1016/j.patrec.2013.03.022
+53507e2de66eaba996f14fd2f54a5535056f1e59,http://doi.org/10.1016/j.sigpro.2017.10.024
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728015
+4349f17ec319ac8b25c14c2ec8c35f374b958066,https://doi.org/10.1109/THMS.2017.2681425
+263ed62f94ea615c747c00ebbb4008385285b33b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319974
+f5a95f857496db376d69f7ac844d1f56e3577b75,https://doi.org/10.1007/s12193-012-0107-7
+7535e3995deb84a879dc13857e2bc0796a2f7ce2,https://doi.org/10.1007/s10618-010-0207-5
+d4f0960c6587379ad7df7928c256776e25952c60,https://www.ncbi.nlm.nih.gov/pubmed/29107889
+19bbecead81e34b94111a2f584cf55db9a80e60c,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248025
+657e702326a1cbc561e059476e9be4d417c37795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343704
+89272b78b651038ff4d294b9ccca0018d2c9033b,https://doi.org/10.1109/ICPR.2014.777
+fa641327dc5873276f0af453a2caa1634c16f143,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789590
+fb7bf10cbc583db5d5eee945aa633fcb968e01ad,https://doi.org/10.1007/s00521-012-0962-x
+9a59abdf3460970de53e09cb397f47d86744f472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995399
+2945cc9e821ab87fa17afc8802f3858435d1264c,https://doi.org/10.1109/ICPR.2016.7899839
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e,http://dl.acm.org/citation.cfm?id=3078973
+b8978a5251b6e341a1171e4fd9177aec1432dd3a,https://doi.org/10.1016/j.image.2016.04.004
+7a595800b490ff437ab06fe7612a678d5fe2b57d,https://doi.org/10.1109/MMSP.2009.5293285
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8335166
+eb02daee558e483427ebcf5d1f142f6443a6de6b,http://doi.acm.org/10.1145/2911996.2912019
+c3c463a9ee464bb610423b7203300a83a166b500,https://doi.org/10.1109/ICIP.2014.7025069
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d,http://doi.org/10.1007/s11042-016-3830-3
+a5eb36f1e77245dfc9e5c0c03998529331e4c89b,https://doi.org/10.1109/BTAS.2014.6996222
+dd715a98dab34437ad05758b20cc640c2cdc5715,https://doi.org/10.1007/s41095-017-0082-8
+e83e5960c2aabab654e1545eb419ef64c25800d5,https://doi.org/10.1016/j.neunet.2016.08.011
+3b75681f0162752865d85befd8b15e7d954ebfe6,https://doi.org/10.1109/CLEI.2014.6965097
+9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c,http://doi.acm.org/10.1145/3136755.3137032
+a78b5495a4223b9784cc53670cc10b6f0beefd32,http://doi.org/10.1007/s11042-018-6260-6
+d8e5d94c3c8688f0ca0ee656c79847c7df04c77d,https://doi.org/10.1007/s12193-015-0187-2
+5180c98815d7034e753a14ef6f54583f115da3aa,http://doi.ieeecomputersociety.org/10.1109/iV.2017.40
+ff3859917d4121f47de0d46922a103c78514fcab,https://doi.org/10.1109/ICB.2016.7550050
+265a88a8805f6ba3efae3fcc93d810be1ea68866,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342346
+ad339a5fdaab95f3c8aad83b60ceba8d76107fa2,https://doi.org/10.1023/B:VISI.0000013090.39095.d5
+141cb9ee401f223220d3468592effa90f0c255fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403
+196c12571ab51273f44ea3469d16301d5b8d2828,http://doi.org/10.1007/s00371-018-1494-x
+758d481bbf24d12615b751fd9ec121500a648bce,http://doi.org/10.1007/s11042-015-2914-9
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed,http://doi.org/10.1038/s41598-017-18993-5
+83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284834
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6802347
+39acf4bb06b889686ca17fd8c89887a3cec26554,http://www.springerlink.com/index/10.1007/s10044-004-0223-4
+58df849378fbcfb6b1a8ebddfbe4caa450226b9d,https://doi.org/10.1109/ICIP.2017.8296770
+be632b206f1cd38eab0c01c5f2004d1e8fc72880,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607601
+7dc498d45f9fcb97acee552c6f587b65d5122c35,https://doi.org/10.1109/ICIP.2015.7351618
+19b492d426f092d80825edba3b02e354c312295f,http://doi.org/10.1007/s00371-016-1332-y
+8b3c867e67b263d7a0577a112173a64009a3b4ba,https://doi.org/10.1109/ICIP.2010.5652374
+2b43100a13811b33cc9f905fa1334bfd8b1873ba,https://doi.org/10.1109/IVCNZ.2015.7761564
+0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6,https://doi.org/10.1109/LSP.2005.863661
+ea5c9d5438cde6d907431c28c2f1f35e02b64b33,https://doi.org/10.1109/SPAC.2017.8304257
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,https://doi.org/10.1109/TCSVT.2015.2452782
diff --git a/scraper/reports/misc/all_doi.csv b/scraper/reports/misc/all_doi.csv
new file mode 100644
index 00000000..83caf30a
--- /dev/null
+++ b/scraper/reports/misc/all_doi.csv
@@ -0,0 +1,2995 @@
+61831364ddc8db869618f1c7f0ad35ab2ab6bcf7,https://doi.org/10.1109/ICIP.2013.6738496
+61a3c45c9f802f9d5fa8d94fee811e203bac6487,https://doi.org/10.1109/TIFS.2016.2567318
+6159908dec4bc2c1102f416f8a52a31bf3e666a4,https://doi.org/10.1109/ICIP.2012.6467431
+6196f4be3b28684f6528b8687adccbdf9ac5c67c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.267
+61b22b1016bf13aca8d2e57c4e5e004d423f4865,https://doi.org/10.1109/TCYB.2016.2526630
+61bc124537f414f6fcb4d1ff476681b5a0ee222a,http://doi.ieeecomputersociety.org/10.1109/WIW.2016.043
+0d90c992dd08bfb06df50ab5c5c77ce83061e830,https://doi.org/10.1109/UIC-ATC.2013.85
+0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261,https://doi.org/10.1109/IJCB.2011.6117508
+0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9,https://doi.org/10.1109/SIU.2014.6830193
+0d9815f62498db21f06ee0a9cc8b166acc93888e,https://doi.org/10.1016/j.neucom.2007.12.018
+0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,https://doi.org/10.1109/LSP.2018.2810121
+0d3ff34d8490a9a53de1aac1dea70172cb02e013,https://doi.org/10.1109/ICPR.2014.542
+0de1450369cb57e77ef61cd334c3192226e2b4c2,https://doi.org/10.1109/BTAS.2017.8272747
+0d7652652c742149d925c4fb5c851f7c17382ab8,https://doi.org/10.1016/j.neucom.2015.05.057
+0da3c329ae14a4032b3ba38d4ea808cf6d115c4a,https://doi.org/10.1007/s00138-015-0709-7
+0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6,https://doi.org/10.1109/LSP.2005.863661
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,https://doi.org/10.1109/ACSSC.2015.7421092
+0db371a6bc8794557b1bffc308814f53470e885a,https://doi.org/10.1007/s13042-015-0380-3
+95f1790da3d0a4a5310a050512ce355b3c5aac86,https://doi.org/10.1109/ICIP.2016.7533142
+95023e3505263fac60b1759975f33090275768f3,http://doi.acm.org/10.1145/2856767.2856770
+952138ae6534fad573dca0e6b221cdf042a36412,http://doi.ieeecomputersociety.org/10.1109/DICTA.2005.38
+950bf95da60fd4e77d5159254fed906d5ed5fbcb,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.24
+9590b09c34fffda08c8f54faffa379e478f84b04,https://doi.org/10.1109/TNNLS.2013.2275170
+95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6,https://doi.org/10.1007/s11063-013-9322-9
+5957936195c10521dadc9b90ca9b159eb1fc4871,https://doi.org/10.1109/TCE.2016.7838098
+59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862
+592370b4c7b58a2a141e507f3a2cc5bbd247a62e,https://doi.org/10.1109/IJCNN.2017.7965911
+59b6ff409ae6f57525faff4b369af85c37a8dd80,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.28
+5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,https://doi.org/10.1109/ICT.2017.7998256
+5951e9e13ff99f97f301a336f24a14d80459c659,https://doi.org/10.1016/j.neucom.2017.09.009
+9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23,https://doi.org/10.1109/FCV.2015.7103729
+9255d3b2bfee4aaae349f68e67c76a077d2d07ad,https://doi.org/10.1109/TIP.2017.2713041
+92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,https://doi.org/10.1109/ICIP.2016.7533062
+9213a415d798426c8d84efc6d2a69a2cbfa2af84,https://doi.org/10.1016/j.cviu.2013.03.008
+0c378c8dcf707145e1e840a9951519d4176a301f,https://doi.org/10.1109/ICARCV.2010.5707434
+0c65226edb466204189b5aec8f1033542e2c17aa,https://doi.org/10.1109/ICIP.2017.8296997
+0c247ac797a5d4035469abc3f9a0a2ccba49f4d8,https://doi.org/10.1109/ICMLC.2011.6016715
+0cf1287c8fd41dcef4ac03ebeab20482f02dce20,https://doi.org/10.1109/MSN.2016.032
+0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,https://doi.org/10.1109/CIBIM.2014.7015437
+0c0db39cac8cb76b52cfdbe10bde1c53d68d202f,http://doi.acm.org/10.1145/3123266.3123334
+0c1314d98bb6b99af00817644c1803dbc0fb5ff5,http://doi.ieeecomputersociety.org/10.1109/BigMM.2015.29
+0c6a18b0cee01038eb1f9373c369835b236373ae,https://doi.org/10.1007/s11042-017-4359-9
+66ec085c362f698b40d6e0e7b10629462280c062,https://doi.org/10.1109/ICARCV.2004.1468855
+661c78a0e2b63cbdb9c20dcf89854ba029b6bc87,https://doi.org/10.1109/ICIP.2014.7025093
+66f4d7c381bd1798703977de2e38b696c6641b77,https://doi.org/10.1109/FSKD.2015.7382360
+6688b2b1c1162bc00047075005ec5c7fca7219fd,https://doi.org/10.1109/SACI.2013.6608958
+6622776d1696e79223f999af51e3086ba075dbd1,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019454
+3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836097
+3ebb0209d5e99b22c67e425a67a959f4db8d1f47,https://doi.org/10.1109/ICDAR.2017.173
+3e0035b447d0d4e11ceda45936c898256f321382,https://doi.org/10.1109/BMEI.2014.7002762
+3e1190655cc7c1159944d88bdbe591b53f48d761,https://doi.org/10.1007/s10489-013-0464-2
+3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48,http://doi.ieeecomputersociety.org/10.1109/WF-IoT.2016.7845505
+3ec860cfbd5d953f29c43c4e926d3647e532c8b0,https://doi.org/10.1109/TCSVT.2008.924108
+3e0377af0087b9b836bf6d95bc1c7085dfde4897,http://doi.acm.org/10.1145/2671188.2749320
+3e7070323bca6106f19bea4c97ef67bd6249cb5d,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477448
+3e03d19b950edadc74ca047dec86227282eccf71,https://doi.org/10.1109/ACCESS.2017.2777003
+503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99,https://doi.org/10.1109/ACCESS.2017.2733718
+504d2675da7a56a36386568ee668938df6d82bbe,https://doi.org/10.1109/TCSVT.2016.2539604
+502d30c5eac92c7db587d85d080343fbd9bc469e,https://doi.org/10.1109/TIFS.2016.2538744
+50333790dd98c052dfafe1f9bf7bf8b4fc9530ba,https://doi.org/10.1109/ICIP.2015.7351001
+5039834df68600a24e7e8eefb6ba44a5124e67fc,https://doi.org/10.1109/ICIP.2013.6738761
+501076313de90aca7848e0249e7f0e7283d669a1,https://doi.org/10.1109/SOCPAR.2014.7007987
+681d222f91b12b00e9a4217b80beaa11d032f540,https://doi.org/10.1007/s10044-015-0493-z
+68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5,https://doi.org/10.1109/CISP-BMEI.2017.8301919
+68eb6e0e3660009e8a046bff15cef6fe87d46477,https://doi.org/10.1109/ICIP.2017.8296999
+68e6cfb0d7423d3fae579919046639c8e2d04ad7,https://doi.org/10.1109/ICB.2016.7550058
+6813208b94ffa1052760d318169307d1d1c2438e,http://doi.acm.org/10.1145/2818346.2830582
+68f19f06f49aa98b676fc6e315b25e23a1efb1f0,https://doi.org/10.1109/ICIP.2015.7351080
+68d566ed4041a7519acb87753036610bd64dcc09,https://doi.org/10.1007/s11390-013-1347-z
+68021c333559ab95ca10e0dbbcc8a4840c31e157,https://doi.org/10.1109/ICPR.2016.7900281
+681399aa0ea4cbffd9ab22bf17661d6df4047349,http://doi.ieeecomputersociety.org/10.1109/CISIS.2012.207
+57b7325b8027745b130490c8f736445c407f4c4c,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.27
+5798055e11e25c404b1b0027bc9331bcc6e00555,http://doi.acm.org/10.1145/2393347.2396357
+57eeaceb14a01a2560d0b90d38205e512dcca691,https://doi.org/10.1109/TIP.2017.2778563
+5763b09ebca9a756b4adebf74d6d7de27e80e298,https://doi.org/10.1109/BTAS.2013.6712738
+57f4e54a63ef95596dbc743f391c3fff461f278b,https://doi.org/10.1109/ICMEW.2012.86
+57ca530e9acb63487e8591cb6efb89473aa1e5b4,https://doi.org/10.1109/TIP.2014.2356292
+578117ff493d691166fefc52fd61bad70d8752a9,https://doi.org/10.1109/CCST.2016.7815707
+57ba4b6de23a6fc9d45ff052ed2563e5de00b968,https://doi.org/10.1109/ICIP.2017.8296993
+5721cd4b898f0e7df8de1e0215f630af94656be9,http://doi.acm.org/10.1145/3095140.3095164
+57c270a9f468f7129643852945cf3562cbb76e07,https://doi.org/10.1016/j.imavis.2016.07.004
+57de1a09db680e0b4878ceda68d626ae4e44ccfe,https://doi.org/10.1016/j.neucom.2014.10.111
+57dc55edade7074f0b32db02939c00f4da8fe3a6,https://doi.org/10.1109/TITS.2014.2313371
+3ba74755c530347f14ec8261996dd9eae896e383,https://doi.org/10.1109/JSSC.2017.2767705
+3b8c830b200f1df8ef705de37cbfe83945a3d307,https://doi.org/10.1007/s00138-017-0887-6
+3bdaf59665e6effe323a1b61308bcac2da4c1b73,https://doi.org/10.1109/ROMAN.2012.6343736
+3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6,https://doi.org/10.1109/ICIP.2015.7351140
+3b75681f0162752865d85befd8b15e7d954ebfe6,https://doi.org/10.1109/CLEI.2014.6965097
+3b64b8be33887e77e6def4c385985e43e2c15eea,https://doi.org/10.1109/TIP.2016.2576278
+6f74c3885b684e52096497b811692bd766071530,https://doi.org/10.1016/j.neucom.2013.06.013
+6f68c49106b66a5bd71ba118273b4c5c64b6619f,http://doi.ieeecomputersociety.org/10.1109/TKDE.2007.190720
+6ffdbac58e15e0ff084310b0a804520ad4bd013e,https://doi.org/10.1049/iet-bmt.2015.0078
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,https://doi.org/10.1109/ICB.2013.6612990
+6f16f4bd01aeefdd03d6783beacb7de118f5af8a,https://doi.org/10.1109/VCIP.2013.6706330
+6f0caff7c6de636486ff4ae913953f2a6078a0ab,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583081
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,http://doi.acm.org/10.1145/1877972.1877994
+6fdf2f4f7ae589af6016305a17d460617d9ef345,https://doi.org/10.1109/ICIP.2015.7350767
+6f48e5e258da11e6ba45eeabe65a5698f17e58ef,https://doi.org/10.1109/ICASSP.2013.6637968
+6f8cffd9904415c8fa3a1e650ac143867a04f40a,https://doi.org/10.1016/j.neucom.2015.01.099
+0387b32d0ebd034dc778972367e7d4194223785d,http://doi.acm.org/10.1145/2522848.2531740
+03333e7ec198208c13627066bc76b0367f5e270f,https://doi.org/10.1109/IJCNN.2017.7966100
+03e1480f1de2ffbd85655d68aae63a01685c5862,https://doi.org/10.1109/ICPR.2014.771
+0341405252c80ff029a0d0065ca46d0ade943b03,http://doi.ieeecomputersociety.org/10.1109/FG.2017.40
+03babadaaa7e71d4b65203e27e8957db649155c6,https://doi.org/10.1109/TIP.2017.2725578
+0343f9401b98de36be957a30209fef45dd684270,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163134
+9b78ce9fdac30864d1694a56328b3c8a96cccef5,https://doi.org/10.1089/cpb.2004.7.635
+9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3,https://doi.org/10.1109/ICPR.2016.7899782
+9b8830655d4a5a837e3ffe835d14d6d71932a4f2,https://doi.org/10.1109/TSMCB.2011.2169452
+9ba358281f2946cba12fff266019193a2b059590,http://doi.ieeecomputersociety.org/10.1109/ISM.2008.27
+9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534,https://doi.org/10.1016/j.neucom.2016.02.063
+9b1a70d6771547cbcf6ba646f8775614c0162aca,https://doi.org/10.1016/j.patrec.2016.11.005
+9b1c218a55ead45296bfd7ad315aaeff1ae9983e,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2481396
+9e8382aa1de8f2012fd013d3b39838c6dad8fb4d,http://doi.acm.org/10.1145/3123266.3123349
+9e5690cdb4dfa30d98dff653be459e1c270cde7f,https://doi.org/10.1109/ICIP.2017.8297080
+9e5809122c0880183c7e42c7edd997f92de6d81e,http://doi.acm.org/10.1145/2451176.2451209
+9e7646b7e9e89be525cda1385cc1351cc28a896e,http://doi.ieeecomputersociety.org/10.1109/TMC.2017.2702634
+9e99f818b37d44ec6aac345fb2c5356d83d511c7,https://doi.org/10.1109/ISSPA.2012.6310540
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,https://doi.org/10.1109/IWCIA.2013.6624793
+9eb13f8e8d948146bfbae1260e505ba209c7fdc1,https://doi.org/10.1109/AFGR.2008.4813404
+9e28243f047cc9f62a946bf87abedb65b0da0f0a,https://doi.org/10.1109/ICMLA.2013.141
+9ef06cc958af2274afd193a1dca705c08234bcd3,https://doi.org/10.1109/ICIP.2014.7026207
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,https://doi.org/10.1109/ICIP.2015.7351544
+049186d674173ebb76496f9ecee55e17ed1ca41b,https://doi.org/10.1109/ACCESS.2017.2724763
+045e83272db5e92aa4dc8bdfee908534c2608711,http://doi.ieeecomputersociety.org/10.1109/ICCABS.2016.7802775
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,https://doi.org/10.1109/FG.2011.5771332
+041b51a81a977b5c64682c55414ad8d165c1f2ce,https://doi.org/10.1109/TCE.2014.7027339
+04f56dc5abee683b1e00cbb493d031d303c815fd,http://doi.acm.org/10.1145/2808492.2808557
+04c07ecaf5e962ac847059ece3ae7b6962b4e5c4,http://doi.acm.org/10.1145/2993148.2997631
+047ce307ad0c871bc2c9a5c1e4649cefae2ba50d,https://doi.org/10.1109/ICRA.2012.6224587
+045275adac94cced8a898a815293700401e9955f,https://doi.org/10.1007/s00138-012-0447-z
+6a3fa483c64e72d9c96663ff031446a2bdb6b2eb,https://doi.org/10.1016/j.patcog.2017.02.003
+6a38e4bb35673a73f041e34d3f2db7067482a9b5,http://doi.acm.org/10.1145/2663204.2666277
+6afe1f668eea8dfdd43f0780634073ed4545af23,https://doi.org/10.1007/s11042-017-4962-9
+6a527eeb0b2480109fe987ed7eb671e0d847fca8,https://doi.org/10.1007/978-3-319-28515-3
+6adecb82edbf84a0097ff623428f4f1936e31de0,https://doi.org/10.1007/s11760-011-0246-4
+6aa0a47f4b986870370c622be51f00f3a1b9d364,https://doi.org/10.1109/TIP.2012.2192285
+6ad5ac867c5ca56e0edaece153269d989b383b59,https://doi.org/10.1109/CISP-BMEI.2016.7852723
+321db1059032b828b223ca30f3304257f0c41e4c,https://doi.org/10.1109/ICACCI.2015.7275951
+32b76220ed3a76310e3be72dab4e7d2db34aa490,https://doi.org/10.1109/SMC.2014.6974364
+32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99,http://doi.ieeecomputersociety.org/10.1109/CIS.2007.196
+327ae6742cca4a6a684a632b0d160dd84d0d8632,https://doi.org/10.1007/s10851-015-0629-1
+32c5c65db2af9691f8bb749c953c978959329f8f,https://doi.org/10.1109/ICIP.2015.7351469
+322488c4000c686e9bfb7514ccdeacae33e53358,http://doi.acm.org/10.1145/2671188.2749301
+32dfd4545c87d9820cc92ca912c7d490794a81d6,https://doi.org/10.1007/978-3-319-50551-0
+328da943e22adef5957c08b6909bda09d931a350,https://doi.org/10.1109/ICARCV.2008.4795605
+3288e16c62a215254e2ed7c39675482b356c3bef,https://doi.org/10.1109/SACI.2016.7507341
+329b2781007604652deb72139d14315df3bc2771,http://doi.acm.org/10.1145/2671188.2749358
+32a440720ee988b7b41de204b2910775171ee12c,https://doi.org/10.1109/ICIP.2011.6116351
+3251f40ed1113d592c61d2017e67beca66e678bb,https://doi.org/10.1007/978-3-319-65172-9_17
+356a144d2aa5cc5e74d178dae3963003871aa8a1,https://doi.org/10.1007/978-3-319-27671-7_41
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,https://doi.org/10.1007/978-3-319-02714-2
+35d90beea6b4dca8d949aae93f86cf53da72971f,https://doi.org/10.1109/ICIP.2011.6116672
+35ccc836df60cd99c731412fe44156c7fd057b99,https://doi.org/10.1109/ICCIS.2017.8274819
+3598d10d7d4f2b543afa8bcf6b2c34a3696ef155,https://doi.org/10.1109/SPAC.2017.8304347
+359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9,https://doi.org/10.1007/s10044-014-0377-7
+35d272877b178aa97c678e3fcbb619ff512af4c2,https://doi.org/10.1109/SMC.2017.8122743
+35b3dc0e961a15a7a60b95490a989f91680acc7c,http://doi.ieeecomputersociety.org/10.1109/TDSC.2016.2550459
+35d42f4e7a1d898bc8e2d052c38e1106f3e80188,https://doi.org/10.1109/BTAS.2015.7358765
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,https://doi.org/10.1007/s11390-017-1738-7
+699b8250fb93b3fa64b2fc8f59fef036e172564d,https://doi.org/10.1109/ICMLA.2016.0147
+69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b,https://doi.org/10.1016/j.patcog.2014.06.016
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,https://doi.org/10.1109/TNNLS.2015.2504724
+69ad67e204fb3763d4c222a6c3d05d6725b638ed,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890538
+69b2a7533e38c2c8c9a0891a728abb423ad2c7e7,https://doi.org/10.1016/j.imavis.2013.03.003
+3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3,https://doi.org/10.1016/j.imavis.2015.06.009
+3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a,https://doi.org/10.1109/LSP.2016.2639341
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,http://doi.acm.org/10.1145/2987378
+3c086601ce0bac61047b5b931b253bd4035e1e7a,https://doi.org/10.1109/ICIP.2015.7350897
+3cbd3124b1b4f95fcdf53abd358d7ceec7861dda,http://doi.acm.org/10.1145/3019612.3019641
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,https://doi.org/10.1109/IJCNN.2010.5596793
+3ce96f03874d42345c0727edc78b6949b20b4a11,https://doi.org/10.1007/s11042-015-2630-5
+3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c,https://doi.org/10.1109/ICIP.2014.7025145
+3c6542295cf7fe362d7d629ac10670bf30cdabce,https://doi.org/10.1109/DICTA.2015.7371264
+3ce37af3ac0ed2eba08267a3605730b2e0433da5,https://doi.org/10.1109/TIP.2016.2609811
+3cd22b5b81a0172d608ff14be71b755d1f68c201,https://doi.org/10.1109/ACCESS.2018.2812725
+3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd,https://doi.org/10.1109/TCSVT.2014.2317887
+3ca6adc90aae5912baa376863807191ffd56b34e,https://doi.org/10.1109/LSP.2014.2316918
+5642bafa7955b69f05c11230151cd59fcbe43b8e,https://doi.org/10.1007/s11760-012-0404-3
+56fb30b24e7277b47d366ca2c491749eee4d6bb1,https://doi.org/10.1109/ICAPR.2015.7050658
+56bcc89fb1e05d21a8b7b880c6b4df79271ceca5,https://doi.org/10.1007/s11760-013-0441-6
+56e25358ebfaf8a8b3c7c33ed007e24f026065d0,https://doi.org/10.1007/s10994-015-5541-9
+568ced900cbf7437c9e87b60a17e16f0c1e0c442,https://doi.org/10.1109/CCECE.2012.6335026
+5613cb13ab381c8a8b81181ac786255705691626,https://doi.org/10.1109/VCIP.2015.7457876
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,https://doi.org/10.1007/s11554-007-0031-3
+569988e19ab36582d4bd0ec98e344cbacf177f45,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2347960
+56f57786516dcc8ea3c0ffe877c1363bfb9981d2,https://doi.org/10.1109/CBMI.2014.6849823
+565f7c767e6b150ebda491e04e6b1de759fda2d4,https://doi.org/10.1016/j.patcog.2016.11.023
+51f626540860ad75b68206025a45466a6d087aa6,https://doi.org/10.1109/ICIP.2017.8296595
+51b770e6b2af994ffc8793f59b24a9f619033a3a,https://doi.org/10.1109/ICDSC.2011.6042899
+516f8728ad1d4f9f2701a2b5385f8c8e71b9d356,https://doi.org/10.1109/ACCESS.2017.2745903
+5101368f986aa9837fdb3a71cb4299dff6f6325d,https://doi.org/10.1109/ICIP.2008.4712155
+5180c98815d7034e753a14ef6f54583f115da3aa,http://doi.ieeecomputersociety.org/10.1109/iV.2017.40
+3d2c932f4f2693a87a0b855048e60f142214f475,http://doi.ieeecomputersociety.org/10.1109/CSE.2014.354
+3d1959048eba5495e765a80c8e0bbd3d65b3d544,https://doi.org/10.1016/j.neucom.2016.07.038
+3d2c89676fcc9d64aaed38718146055152d22b39,https://doi.org/10.1109/ACPR.2013.10
+3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b,https://doi.org/10.1109/TIFS.2015.2512561
+3d1f976db6495e2bb654115b939b863d13dd3d05,https://doi.org/10.1007/s11042-015-2581-x
+3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548,https://doi.org/10.1109/ITSC.2015.252
+3d4d3f70352dc833e454a5756d682f27eca46e5d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.32
+3d0b2da6169d38b56c58fe5f13342cf965992ece,https://doi.org/10.1109/ICIP.2016.7532909
+3d89f9b4da3d6fb1fdb33dea7592b5992069a096,https://doi.org/10.1109/CISP-BMEI.2017.8302003
+3d9e44d8f8bc2663192c7ce668ccbbb084e466e4,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019505
+3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.117
+5810ce61fda464d4de2769bd899e12727bee0382,https://doi.org/10.1109/IJCNN.2016.7727484
+58d43e32660446669ff54f29658961fe8bb6cc72,https://doi.org/10.1109/ISBI.2017.7950504
+583e0d218e1e7aaf9763a5493e7c18c2b8dd7464,http://doi.acm.org/10.1145/2988240.2988243
+58684a925693a0e3e4bb1dd2ebe604885be034d2,https://doi.org/10.1109/ICASSP.2008.4517869
+58483028445bf6b2d1ad6e4b1382939587513fe1,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247763
+5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,https://doi.org/10.1109/ICPR.2016.7900278
+58eb9174211d58af76023ce33ee05769de57236c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2636827
+58d0c140597aa658345230615fb34e2c750d164c,http://doi.acm.org/10.1145/3098954.3098969
+5811944e93a1f3e35ece7a70a43a3de95c69b5ab,https://doi.org/10.1109/BTAS.2016.7791163
+58df849378fbcfb6b1a8ebddfbe4caa450226b9d,https://doi.org/10.1109/ICIP.2017.8296770
+58e7dbbb58416b785b4a1733bf611f8106511aca,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273597
+673541a8cb1aa3ac63a288523ba71aec2a38280e,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552971
+67214e8d2f83eb41c14bfc86698eb6620e72e87c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.263
+67e6ddce6fea17bb2b171c949ee224936d36c0d1,https://doi.org/10.1109/ICIP.2008.4712157
+0b58b3a5f153f653c138257426bf8d572ae35a67,https://doi.org/10.1109/SMC.2016.7844481
+0b3144cdc9d6d5a1498d6178db20d1c49fb64de9,http://doi.acm.org/10.1145/1322192.1322203
+0bab5213911c19c40e936b08d2f8fba01e286b85,https://doi.org/10.1109/BigMM.2017.81
+0b8839945259ec764ef0fad47471f34db39f40c3,https://doi.org/10.1109/DESEC.2017.8073838
+0be418e63d111e3b94813875f75909e4dc27d13a,https://doi.org/10.1109/ICB.2016.7550057
+0bf1f999a16461a730dd80e3a187d0675c216292,http://doi.ieeecomputersociety.org/10.1109/CW.2017.26
+0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d,https://doi.org/10.1007/s12193-013-0133-0
+93d903d2e48d6a8ad3e3d2aff2e57622efe649cd,https://doi.org/10.1109/ICIP.2016.7532432
+935924ddb5992c11f3202bf995183130ad83d07b,https://doi.org/10.1117/1.JEI.24.2.023015
+93e1e195f294c463f4832c4686775bf386b3de39,https://doi.org/10.1109/TIP.2015.2490551
+93108f1548e8766621565bdb780455023349d2b2,https://doi.org/10.1109/ICIP.2010.5653914
+939f9fa056f8be445da19b43da64bd2405851a43,https://doi.org/10.1109/ICSMC.2007.4413713
+939d28859c8bd2cca2d692901e174cfd599dac74,https://doi.org/10.1109/WOCC.2016.7506582
+9378ead3a09bc9f89fb711e2746facf399dd942e,https://doi.org/10.1109/TCSVT.2010.2045817
+93978ba84c8e95ff82e8b5960eab64e54ca36296,http://doi.acm.org/10.1145/3136755.3136806
+934efd61b20f5b8b151a2df7cd373f0b387c02b0,https://doi.org/10.5220/0004673003290336
+93eb3963bc20e28af26c53ef3bce1e76b15e3209,https://doi.org/10.1109/ICIP.2017.8296992
+945ef646679b6c575d3bbef9c6fc0a9629ac1b62,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477689
+947cdeb52f694fb1c87fc16836f8877cd83dc652,https://doi.org/10.1109/SMAP.2017.8022671
+946b4d840b026d91608758d04f2763e9b981234e,http://doi.acm.org/10.1145/2388676.2388792
+942f6eb2ec56809430c2243a71d03cc975d0a673,https://doi.org/10.1109/BigMM.2017.64
+942b89d8d17e89e58c82453de2bfcbbeb09adc81,https://doi.org/10.1016/j.patcog.2016.02.019
+94b729f9d9171e7c4489995e6e1cb134c8521f4e,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.055
+948f35344e6e063ffc35f10c547d5dd9204dee4e,https://doi.org/10.1016/j.eswa.2017.07.037
+940e5c45511b63f609568dce2ad61437c5e39683,https://doi.org/10.1109/TIP.2015.2390976
+0eed55ea9f401f25e1474cdbaf09367f44b4f490,https://doi.org/10.1016/j.neucom.2013.05.032
+0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa,https://doi.org/10.1163/016918610X538534
+0e05b365af662bc6744106a7cdf5e77c9900e967,https://doi.org/10.1007/s11042-014-2234-5
+0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.40
+0e37d70794d5ccfef8b4cc22b4203245f33eec6e,https://doi.org/10.1109/ICIP.2010.5653034
+0e8a28511d8484ad220d3e8dde39220c74fab14b,https://doi.org/10.1109/TNNLS.2015.2477826
+0e454686f83284ced2ffc5740829552a032671a3,https://doi.org/10.1109/IJCNN.2015.7280802
+0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,https://doi.org/10.1109/TMM.2015.2500730
+0ed4b4d6d1a0c49c4eb619aab36db559b620d99f,https://doi.org/10.1016/j.neucom.2015.11.115
+0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3,https://doi.org/10.1109/ICIP.2015.7351013
+0e2d956790d3b8ab18cee8df6c949504ee78ad42,https://doi.org/10.1109/IVCNZ.2013.6727024
+0e4baf74dfccef7a99c6954bb0968a2e35315c1f,https://doi.org/10.1109/SIU.2012.6204517
+0ed96cc68b1b61e9eb4096f67d3dcab9169148b9,http://doi.acm.org/10.1145/2663204.2666279
+0e4fa61871755b5548a5c970c8103f7b2ada24f3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19
+0e02dadab802128f6155e099135d03ca6b72f42c,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2365793
+0e6f422c3f79c552c0c3d7eda0145aed8680f0ea,https://doi.org/10.1016/j.patrec.2012.09.008
+608b01c70f0d1166c10c3829c411424d9ef550e7,https://doi.org/10.1109/CISP-BMEI.2017.8301920
+606dff86a34c67c79d93f1e536487847a5bb7002,https://doi.org/10.1109/WACV.2011.5711538
+607aebe7568407421e8ffc7b23a5fda52650ad93,https://doi.org/10.1109/ISBA.2016.7477237
+609c35a6fa80af8b2e4ce46b1b16ec36578fd07f,https://doi.org/10.1155/2014/950349
+602f772c69e4a1a65de00443c30d51fdd47a80aa,https://doi.org/10.1109/IISA.2013.6623705
+609d81ddf393164581b3e3bf11609a712ac47522,https://doi.org/10.1109/APSIPA.2017.8282300
+603231c507bb98cc8807b6cbe2c860f79e8f6645,https://doi.org/10.1109/EUSIPCO.2015.7362819
+60284c37249532fe7ff6b14834a2ae4d2a7fda02,https://doi.org/10.1109/SIU.2016.7495971
+6014eeb333998c2b2929657d233ebbcb1c3412c9,http://doi.acm.org/10.1145/2647868.2656406
+34546ef7e6148d9a1fb42cfab5f0ce11c92c760a,https://doi.org/10.1016/j.jvcir.2015.09.005
+34c2ea3c7e794215588c58adf0eaad6dc267d082,http://doi.acm.org/10.1145/3136755.3143005
+34c1e9a6166f4732d1738db803467f7abc47ba87,https://doi.org/10.1109/WACV.2017.137
+344c0917c8d9e13c6b3546da8695332f86b57bd3,https://doi.org/10.1109/ICIP.2017.8296715
+349c909abf937ef0a5a12c28a28e98500598834b,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890672
+34dd83115195676e7a8b008eb0e9abe84b330b32,https://doi.org/10.1007/s00371-014-0931-8
+5a259f2f5337435f841d39dada832ab24e7b3325,http://doi.acm.org/10.1145/2964284.2984059
+5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8,https://doi.org/10.1186/s13640-017-0178-1
+5a0ae814be58d319dfc9fd98b058a2476801201c,https://doi.org/10.1007/s00521-012-1124-x
+5feee69ed183954fa76c58735daa7dd3549e434d,https://doi.org/10.1109/ICIP.2008.4711697
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,http://doi.acm.org/10.1145/2742060.2743769
+5f7094ba898a248e1e6b37e3d9fb795e59131cdc,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026246
+5fb9944b18f5a4a6d20778816290ed647f5e3853,http://doi.acm.org/10.1145/3080538.3080540
+5f1cd82343f4bd6972f674d50aecb453d06f04ad,http://doi.acm.org/10.1145/3125739.3125756
+5f4219118556d2c627137827a617cf4e26242a6e,https://doi.org/10.1109/TMM.2017.2751143
+5fa6f72d3fe16f9160d221e28da35c1e67a5d951,http://doi.acm.org/10.1145/3061639.3062182
+5fb59cf5b31a80d8c70d91660092ef86494be577,https://doi.org/10.1109/CISP-BMEI.2017.8301923
+5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6,https://doi.org/10.1109/ICTAI.2012.40
+5f448ab700528888019542e6fea1d1e0db6c35f2,https://doi.org/10.1109/LSP.2016.2533721
+5f9dc3919fb088eb84accb1e490921a134232466,http://doi.ieeecomputersociety.org/10.1109/WACV.2007.49
+33c2131cc85c0f0fef0f15ac18f28312347d9ba4,https://doi.org/10.1016/j.neucom.2010.06.024
+33b915476f798ca18ae80183bf40aea4aaf57d1e,https://doi.org/10.1109/TIP.2013.2271548
+332d773b70f2f6fb725d49f314f57b8f8349a067,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.220
+33b61be191e63b0c9974be708180275c9d5b3057,https://doi.org/10.1109/ICRA.2011.5979705
+33bbf01413910bca26ed287112d32fe88c1cc0df,https://doi.org/10.1109/ICIP.2014.7026204
+331d6ace8d59fa211e5bc84a93fdc65695238c69,https://doi.org/10.1007/s10115-017-1115-4
+05184f01e66d7139530729b281da74db35a178d2,http://ieeexplore.ieee.org/document/6460470/
+052fb35f731680d9d4e7d89c8f70f14173efb015,http://doi.acm.org/10.1145/2893487
+05785cb0dcaace54801aa486d4f8fdad3245b27a,https://doi.org/10.1109/ICPR.2016.7899760
+053ee4a4793f54b02dfabde5436fd7ee479e79eb,http://doi.acm.org/10.1145/3160504.3160507
+052c5ef6b20bf3e88bc955b6b2e86571be08ba64,https://doi.org/10.1109/TIFS.2011.2170068
+0561bed18b6278434deae562d646e8adad72e75d,https://doi.org/10.1016/j.neucom.2014.09.052
+0553c6b9ee3f7d24f80e204d758c94a9d6b375d2,https://doi.org/10.1109/ICIP.2004.1419764
+055cd8173536031e189628c879a2acad6cf2a5d0,https://doi.org/10.1109/BTAS.2017.8272740
+05c5134125a333855e8d25500bf97a31496c9b3f,http://doi.acm.org/10.1145/3132515.3132517
+05a116cb6e220f96837e4418de4aa8e39839c996,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.30
+050e51268b0fb03033428ac777ccfef2db752ab3,https://doi.org/10.1109/DICTA.2007.4426834
+052cec9fdbfe12ccd02688f3b7f538c0d73555b3,https://doi.org/10.1109/ICIP.2016.7533172
+9d1cebed7672210f9c411c5ba422a931980da833,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0078
+9da63f089b8ee23120bfa8b4d9d9c8f605f421fc,http://doi.acm.org/10.1145/2072298.2072043
+9d4692e243e25eb465a0480376beb60a5d2f0f13,https://doi.org/10.1109/ICCE.2016.7430617
+9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c,http://doi.acm.org/10.1145/3136755.3137032
+9df86395c11565afa8683f6f0a9ca005485c5589,https://doi.org/10.1007/s00530-014-0400-2
+9c686b318cb7774b6da5e2c712743a5a6cafa423,https://doi.org/10.1016/j.neuroimage.2015.12.036
+9cda3e56cec21bd8f91f7acfcefc04ac10973966,https://doi.org/10.1109/IWBF.2016.7449688
+9ce4541d21ee3511bf3dc55bc3cd01222194d95a,https://doi.org/10.1016/j.cviu.2017.05.008
+9ce97efc1d520dadaa0d114192ca789f23442727,http://doi.acm.org/10.1145/2597627
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,https://doi.org/10.1007/s11042-017-5019-9
+9cd4f72d33d1cedc89870b4f4421d496aa702897,https://doi.org/10.1117/1.JEI.22.2.023010
+9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,https://doi.org/10.1109/ICIP.2015.7351174
+02e668f9b75f4a526c6fdf7268c8c1936d8e6f09,https://doi.org/10.1142/S0218001411008968
+028e237cb539b01ec72c244f57fdcfb65bbe53d4,http://doi.ieeecomputersociety.org/10.1109/CIS.2010.65
+026e96c3c4751e1583bfe78b8c28bdfe854c4988,https://doi.org/10.1109/ICIP.2017.8296442
+0247998a1c045e601dc4d65c53282b5e655be62b,https://doi.org/10.1109/ITSC.2017.8317782
+021469757d626a39639e260492eea7d3e8563820,https://doi.org/10.1007/b116723
+02a92b79391ddac0acef4f665b396f7f39ca2972,https://doi.org/10.1016/j.patcog.2016.10.021
+a4bb791b135bdc721c8fcc5bdef612ca654d7377,https://doi.org/10.1109/BTAS.2017.8272703
+a4725a5b43e7c36d9e30028dff66958f892254a0,http://doi.acm.org/10.1145/2663204.2666271
+a4543226f6592786e9c38752440d9659993d3cb3,http://doi.ieeecomputersociety.org/10.1109/FG.2017.112
+a4e75766ef93b43608c463c233b8646439ce2415,https://doi.org/10.1109/ICCVW.2011.6130492
+a317083d9aac4062e77aa0854513383c87e47ece,https://doi.org/10.1016/j.patcog.2015.06.003
+a35ed55dc330d470be2f610f4822f5152fcac4e1,https://doi.org/10.1109/ISBA.2015.7126369
+a324d61c79fe2e240e080f0dab358aa72dd002b3,https://doi.org/10.1016/j.patcog.2016.02.005
+a3add3268c26876eb76decdf5d7dd78a0d5cf304,https://doi.org/10.1016/j.specom.2017.07.003
+a3ed0f15824802359e05d9777cacd5488dfa7dba,http://doi.acm.org/10.1145/2851581.2892282
+a3bf6129d1ae136709063a5639eafd8018f50feb,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2017.8109741
+a38dd439209b0913b14b1c3c71143457d8cf9b78,https://doi.org/10.1109/IJCNN.2015.7280803
+b5ae8b69677fb962421fe7072f1e842e71f3bea5,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273641
+b5979489e11edd76607c219a8bdc83ba4a88ab38,https://doi.org/10.1109/ACCESS.2017.2778011
+b5bda4e1374acc7414107cde529ad8b3263fae4b,https://doi.org/10.1007/s11370-010-0066-3
+b54fe193b6faf228e5ffc4b88818d6aa234b5bb9,http://doi.acm.org/10.1145/2964284.2967287
+b5690409be6c4e98bd37181d41121adfef218537,https://doi.org/10.1109/ICIP.2008.4711920
+b58d381f9f953bfe24915246b65da872aa94f9aa,https://doi.org/10.1109/SMAP.2013.13
+b5f79df712ad535d88ae784a617a30c02e0551ca,https://doi.org/10.1109/LSP.2015.2480758
+b50edfea790f86373407a964b4255bf8e436d377,http://doi.acm.org/10.1145/3136755.3143008
+b299c292b84aeb4f080a8b39677a8e0d07d51b27,http://doi.ieeecomputersociety.org/10.1109/ICDM.2015.23
+b2add9fad0bcf7bf0660f99f389672cdf7cc6a70,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.226
+b2ae5c496fe01bb2e2dee107f75b82c6a2a23374,http://doi.ieeecomputersociety.org/10.1109/FG.2017.116
+b208f2fc776097e98b41a4ff71c18b393e0a0018,http://doi.ieeecomputersociety.org/10.1109/AVSS.2003.1217900
+b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23,https://doi.org/10.1109/SMC.2017.8122808
+b2f9e0497901d22b05b9699b0ea8147861c2e2cc,https://doi.org/10.1007/978-3-319-70353-4_3
+b209608a534957ec61e7a8f4b9d08286ae3d1d7f,https://doi.org/10.1111/j.1468-0394.2011.00589.x
+b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4,https://doi.org/10.1109/ACCESS.2018.2805861
+b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a,https://doi.org/10.1080/10798587.2014.934592
+b234d429c9ea682e54fca52f4b889b3170f65ffc,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22
+b2ddea9c71cd73fa63e09e8121bc7a098fae70b4,https://doi.org/10.1109/ISCCSP.2012.6217849
+b262a2a543971e10fcbfc7f65f46115ae895d69e,https://doi.org/10.1109/DICTA.2015.7371266
+b2cb335ded99b10f37002d09753bd5a6ea522ef1,https://doi.org/10.1109/ISBA.2017.7947679
+d9c0310203179d5328c4f1475fa4d68c5f0c7324,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11
+d98a36081a434451184fa4becb59bf5ec55f3a1e,https://doi.org/10.1016/j.neucom.2016.09.110
+d9072e6b7999bc2d5750eb58c67a643f38d176d6,https://doi.org/10.1109/LSP.2009.2027636
+d92084e376a795d3943df577d3b3f3b7d12eeae5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.85
+d963bdff2ce5212fa585a83ca8fad96875bc0057,https://doi.org/10.1016/j.neucom.2016.03.091
+d983dda8b03ed60fa3afafe5c50f1d9a495f260b,https://doi.org/10.1016/j.patcog.2007.03.020
+d9e34af95c21c0e114b61abccbc653480b370c3b,https://doi.org/10.1016/j.patcog.2005.10.020
+d91a5589fd870bf62b7e4979d9d47e8acf6c655d,http://doi.acm.org/10.1145/2382336.2382343
+d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.16
+d9eed86e53ce5f7cba379fe77bbefb42e83c0d88,https://doi.org/10.1109/TIP.2017.2764262
+d9b4b49378fcd77dcd5e755975b99ed4c7962f17,https://doi.org/10.1109/TIP.2015.2473105
+d91f9e8cbf271004ef1a293401197a10a26ccd1b,https://doi.org/10.1109/SOCPAR.2015.7492801
+ace1e0f50fe39eb9a42586f841d53980c6f04b11,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043849
+acab402d706dbde4bea4b7df52812681011f435e,https://doi.org/10.1109/HIS.2012.6421377
+acd4280453b995cb071c33f7c9db5760432f4279,https://doi.org/10.1007/s00138-018-0907-1
+ac48ecbc7c3c1a7eab08820845d47d6ce197707c,https://doi.org/10.1109/TIP.2017.2681841
+ac37285f2f5ccf99e9054735a36465ee35a6afdd,https://doi.org/10.1109/ISCAS.2006.1693880
+ad08426ca57da2be0e9f8c1f673e491582edb896,http://doi.ieeecomputersociety.org/10.1109/TKDE.2013.98
+adad7446e371d27fdaee39475856e2058f3045e5,https://doi.org/10.1109/ISCAS.2013.6572295
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,https://doi.org/10.1007/s10462-010-9172-z
+ad4d1ecf5c5473c050e11f6876ce148de1c8920a,https://doi.org/10.1109/IJCNN.2017.7965886
+ad9ba7eade9d4299159512d6d5d07d7d3d26ae58,https://doi.org/10.1007/s11063-012-9252-y
+ad8bd7016132a2f98ff1f41dac695285e71cc4b1,https://doi.org/10.1109/CISP-BMEI.2017.8301964
+add6d96fc018986f51a1aac47eae9ee3fc62fb66,http://doi.acm.org/10.1145/3009977.3010074
+ad5a35a251e07628dd035c68e44a64c53652be6b,https://doi.org/10.1016/j.patcog.2016.12.024
+ad7b6d2e8d66f720cc83323a0700c25006d49609,https://doi.org/10.1109/TIP.2009.2028255
+adb040081974369c46b943e9f75be4e405623102,http://doi.ieeecomputersociety.org/10.1109/PACCS.2009.191
+ad339a5fdaab95f3c8aad83b60ceba8d76107fa2,https://doi.org/10.1023/B:VISI.0000013090.39095.d5
+ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c,https://doi.org/10.1109/TIFS.2017.2680246
+ad1679295a5e5ebe7ad05ea1502bce961ec68057,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344631
+adf9998214598469f7a097bc50de4c23784f2a5a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.745
+ad50f6899103eff0ee4504e539c38eb965fd1309,https://doi.org/10.1109/IJCNN.2010.5596374
+bbc21d6b7c6e807c6886d237a04b501158ca6bb8,https://doi.org/10.1109/TMM.2016.2523421
+bb070c019c0885232f114c7dca970d2afd9cd828,https://doi.org/10.1109/DICTA.2014.7008089
+bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b,https://doi.org/10.1109/IJCNN.2017.7966271
+bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c,https://doi.org/10.1109/ICDSP.2016.7868528
+bb83d5c7c17832d1eef14aa5d303d9dd65748956,http://doi.acm.org/10.1145/3139513.3139514
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,https://doi.org/10.1007/s11042-015-2497-5
+d7c87f4ca39f79d93c954ffacac32bc6eb527e2c,https://doi.org/10.1007/978-3-642-15696-0_57
+d75bd05865224a1341731da66b8d812a7924d6f6,https://doi.org/10.1109/TSMCB.2012.2217127
+d79530e1745b33f3b771d0b38d090b40afc04191,https://doi.org/10.1007/s11042-015-2485-9
+d7bd37920a3a4a4d681151131e23a839695c8d5b,https://doi.org/10.1109/ICRA.2011.5979870
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,https://doi.org/10.1109/TII.2013.2271914
+d723ebf3288126fa8cbb10ba7e2a6308aede857c,https://doi.org/10.1117/12.968586
+d7a84db2a1bf7b97657b0250f354f249394dd700,https://doi.org/10.1109/ICIP.2010.5653518
+d05759932001aa6f1f71e7dc261c4716f57a5397,https://doi.org/10.1109/ISBA.2015.7126365
+d046030f7138e5a2dbe2b3eec1b948ad8c787538,https://doi.org/10.1109/ICIP.2009.5413447
+d0b67ec62086b55f00dc461ab58dc87b85388b2b,https://doi.org/10.1109/ICIP.2014.7026206
+d0a8889f694422614bf3ecccd69aa1d4f7822606,https://doi.org/10.1007/978-0-85729-997-0_22
+d0f9143f6f43a39bff47daf8c596681581db72ea,https://doi.org/10.1007/s11042-017-5241-5
+d0b7d3f9a59034d44e7cd1b434cfd27136a7c029,https://doi.org/10.1109/INCoS.2013.143
+d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1,https://doi.org/10.1109/TIP.2015.2502144
+d09fd7e0bb5d997963cfef45452724416b2bb052,https://doi.org/10.1109/EMEIT.2011.6023179
+d0dd1364411a130448517ba532728d5c2fe78ed9,https://doi.org/10.1109/ISCAS.2016.7527183
+be51854ef513362bc236b85dd6f0e2c2da51614b,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.298
+be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f,https://doi.org/10.1109/UIC-ATC.2013.32
+be40014beffaa9faacee12bb3412969f98b6a43d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.454
+be0a0e563445119b82d664d370e646e53e69a4c5,https://doi.org/10.1016/j.eswa.2017.05.037
+b3050dc48600acf2f75edf1f580a1f9e9cb3c14a,https://doi.org/10.1007/s00138-013-0584-z
+b388bf63c79e429dafee16c62b2732bcbea0d026,https://doi.org/10.1109/ICIP.2016.7533051
+b351575e3eab724d62d0703e24ecae55025eef00,https://doi.org/10.1007/s10209-014-0369-9
+b34fdab6864782ce60fd90d09f5d886bd83f84f5,https://doi.org/10.1002/cpe.3766
+b36a80d15c3e48870ea6118b855055cc34307658,https://doi.org/10.1109/ICPR.2014.17
+b3e60bb5627312b72c99c5ef18aa41bcc1d21aea,https://doi.org/10.1109/SPAC.2014.6982690
+dfb8a04a80d4b0794c0679d797cb90ec101e162c,http://doi.ieeecomputersociety.org/10.1109/AVSS.2014.6918665
+dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd,http://doi.acm.org/10.1145/2818346.2823313
+df550cb749858648209707bec5410431ea95e027,https://doi.org/10.1109/TCYB.2015.2433926
+df7ff512e8324894d20103fd8ab5da650e4d86db,http://doi.acm.org/10.1145/2043674.2043709
+dff38cac0a1004037024f0ed2a72f76f4e49318b,https://doi.org/10.1109/TNNLS.2015.2495268
+df7af280771a6c8302b75ed0a14ffe7854cca679,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293
+da1477b4a65ae5a013e646b57e004f0cd60619a2,https://doi.org/10.1109/ICB.2012.6199764
+da2b2be4c33e221c7f417875a6c5c74043b1b227,https://doi.org/10.1109/BTAS.2017.8272712
+dab795b562c7cc270c9099b925d685bea0abe82a,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2382599
+dac6e9d708a9757f848409f25df99c5a561c863c,https://doi.org/10.1109/LSP.2014.2334656
+da928ac611e4e14e454e0b69dfbf697f7a09fb38,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477718
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,https://doi.org/10.1007/s11760-015-0822-0
+b472f91390781611d4e197564b0016d9643a5518,http://doi.acm.org/10.1145/2382336.2382345
+b47a3c909ee9b099854619054fd00e200b944aa9,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77
+b42b535fcd0d9bd41a6594a910ea4623e907ceb9,https://doi.org/10.1109/ICTAI.2012.153
+b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,https://doi.org/10.1109/GlobalSIP.2017.8309138
+a216f7863fc6ab15e2bb7a538dfe00924e1da0ab,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163087
+a2646865d7c3d7fb346cf714caf146de2ea0e68f,https://doi.org/10.1109/SMC.2016.7844390
+a200885bf6bfa0493d85e7617e65cdabe30a2dab,https://doi.org/10.1109/ICIP.2015.7351272
+a2cc3193ed56ef4cedaaf4402c844df28edb5639,https://doi.org/10.1016/j.patrec.2012.01.005
+a2af07176a38fe844b0e2fdf4abae65472628b38,https://doi.org/10.1109/ICIP.2014.7026060
+a2b76ab614d92f5e71312b530f0b6281d0c500f7,https://doi.org/10.1007/s10898-014-0231-x
+a5eb36f1e77245dfc9e5c0c03998529331e4c89b,https://doi.org/10.1109/BTAS.2014.6996222
+a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,https://doi.org/10.1109/TIP.2015.2481327
+a5b6a3234e15343d2e5417cff46c0a5f0943521e,https://doi.org/10.1109/TNNLS.2014.2321420
+a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260,http://doi.ieeecomputersociety.org/10.1109/WI-IATW.2006.88
+a5d4cc596446517dfaa4d92276a12d5e1c0a284c,https://doi.org/10.1016/j.patrec.2009.06.002
+a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2011.117
+a53f988d16f5828c961553e8efd38fed15e70bcc,https://doi.org/10.1109/BTAS.2015.7358787
+a52a69bf304d49fba6eac6a73c5169834c77042d,https://doi.org/10.1109/LSP.2017.2789251
+bdf5434648356ce22bdbf81d2951e4bb00228e4d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.415
+bd26faef48080b5af294b19139c804ffec70825e,https://doi.org/10.1007/s11390-015-1526-1
+bdd203bcd3c41c336c5635fb026a78279d75b4be,https://doi.org/10.1109/ICPR.2016.7899761
+bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5,https://doi.org/10.1109/VSMM.2014.7136653
+bd25c4ad7471580ed9787eae041b80a3c4fe97bb,https://doi.org/10.1016/j.sigpro.2010.01.019
+bd66dc891270d858de3adf97d42ed714860ae94d,https://doi.org/10.1109/ACPR.2015.7486598
+bd74c3ca2ff03396109ac2d1131708636bd0d4d3,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.228
+d119443de1d75cad384d897c2ed5a7b9c1661d98,https://doi.org/10.1109/ICIP.2010.5650873
+d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,https://doi.org/10.1109/LSP.2017.2661983
+d1184939e06dbc3b495c883c53b684c6d6aa9e48,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477669
+d1dd80d77655876fb45b9420fe72444c303b219e,https://doi.org/10.1109/FG.2011.5771371
+d12bea587989fc78b47584470fd8f689b6ab81d2,https://doi.org/10.1109/TIP.2013.2246523
+d1bd956a8523629ed4e2533b01272f22cea534c6,https://doi.org/10.1016/j.patrec.2010.01.021
+d60e3eef429ed2a51bbd806125fa31f5bea072a4,https://doi.org/10.1109/HIS.2013.6920481
+d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f,https://doi.org/10.1109/ACPR.2013.98
+d691440030394c2e00a2ab47aba4f8b5fca5f25a,https://doi.org/10.1109/ICIP.2016.7532921
+d6bdc70d259b38bbeb3a78db064232b4b4acc88f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.27
+d63bd06340dd35590a22222509e455c49165ee13,https://doi.org/10.1109/IJCNN.2016.7727234
+d6a5eb4377e2a67420778eab61b5a89046307bae,http://doi.ieeecomputersociety.org/10.1109/CRV.2014.37
+d628aabf1a666a875e77c3d3fee857cd25891947,https://doi.org/10.1109/SMC.2016.7844663
+d6791b98353aa113d79f6fb96335aa6c7ea3b759,https://doi.org/10.1109/TNNLS.2017.2648122
+bcf2710d46941695e421226372397c9544994214,https://doi.org/10.1109/ICNC.2015.7378076
+bc66685acc64fa3c425c0ee6c443d3fa87db7364,https://doi.org/10.1109/TMM.2013.2279658
+bccb35704cdd3f2765b1a3f0296d1bff3be019c1,https://doi.org/10.1109/ICMLA.2016.0145
+bcead1a92744e76c38caaa13159de4abfb81b1d0,https://doi.org/10.1109/ICIP.2014.7025310
+bca39960ba46dc3193defe0b286ee0bea4424041,https://doi.org/10.1016/j.patrec.2009.05.018
+bc6a7390135bf127b93b90a21b1fdebbfb56ad30,https://doi.org/10.1109/TIFS.2017.2766039
+ae73f771d0e429a74b04a6784b1b46dfe98f53e4,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.326
+ae425a2654a1064c2eda29b08a492c8d5aab27a2,https://doi.org/10.23919/MVA.2017.7986845
+ae89e464576209b1082da38e0cee7aeabd03d932,https://doi.org/10.1007/s00521-005-0017-7
+ae7604b1840753e9c2e1ab7a97e02f91a9d81860,https://doi.org/10.1007/s10586-016-0535-3
+aeb36fac7516753a14c3c690f352de78e70f8c6e,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.13
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,https://doi.org/10.1007/s12193-017-0256-9
+aed6af12148b43e4a24ee6e2bc3604ca59bd99a5,https://doi.org/10.1109/TIP.2017.2717505
+ae8240095c9cca2c395f173fece2f46277b94929,https://doi.org/10.1016/j.neucom.2017.06.045
+ae96fc36c89e5c6c3c433c1163c25db1359e13ea,https://doi.org/10.1007/s10489-013-0485-x
+d8c9bad8d07ae4196027dfb8343b9d9aefb130ff,https://doi.org/10.1007/s00138-017-0848-0
+d8b99eada922bd2ce4e20dc09c61a0e3cc640a62,https://doi.org/10.1109/IJCNN.2014.6889675
+d878a67b2ef6a0a5dec72db15291f12419040ab1,https://doi.org/10.1109/IPTA.2016.7821012
+d8e5d94c3c8688f0ca0ee656c79847c7df04c77d,https://doi.org/10.1007/s12193-015-0187-2
+d855791bc23b4aa8e751d6a4e2ae7f5566a991e8,http://doi.acm.org/10.1145/3012941
+d8288322f32ee4501cef5a9b667e5bb79ebd7018,https://doi.org/10.1016/j.patcog.2011.12.018
+d8c9ce0bd5e4b6d1465402a760845e23af5ac259,https://doi.org/10.1109/ITSC.2015.380
+ab7923968660d04434271559c4634790dc68c58e,https://doi.org/10.1109/ICIP.2015.7351111
+abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18,https://doi.org/10.1007/s11042-015-2536-2
+abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c,https://doi.org/10.1109/FUZZ-IEEE.2014.6891864
+ab68837d09986c592dcab7d08ee6dfb40e02916f,https://doi.org/10.1007/978-3-319-11289-3_23
+aba9acb4a607071af10684f2cfbdefa0507a4e9a,https://doi.org/10.1016/j.patcog.2016.06.010
+ab703224e3d6718bc28f7b9987eb6a5e5cce3b01,https://doi.org/10.1631/FITEE.1500235
+abe4c1d6b964c4f5443b0334a44f0b03dd1909f4,https://doi.org/10.1109/IJCNN.2017.7965950
+ab2c07c9867243fad2d66fa6aeabfb780433f319,http://doi.acm.org/10.1145/2967878.2967887
+ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835994
+abbc6dcbd032ff80e0535850f1bc27c4610b0d45,https://doi.org/10.1109/ICIP.2015.7350983
+abf573864b8fbc0f1c491ca60b60527a3e75f0f5,https://doi.org/10.1007/s11042-014-2204-y
+e52272f92fa553687f1ac068605f1de929efafc2,https://doi.org/10.1016/j.engappai.2017.06.003
+e585dc6c810264d9f07e38c412379734a920714e,http://doi.acm.org/10.1145/2531923.2531926
+e51f1ee5535017e10a5f77100ff892509ec6b221,https://doi.org/10.1109/ICSMC.2007.4413825
+e57108607d94aa158eb22ae50540ae6080e48d4b,http://doi.ieeecomputersociety.org/10.1109/ICMI.2002.1167051
+e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056,https://doi.org/10.1109/TNN.2011.2170220
+e57ce6244ec696ff9aa42d6af7f09eed176153a8,https://doi.org/10.1109/ICIP.2015.7351449
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890187
+e51927b125640bfc47bbf1aa00c3c026748c75bd,http://doi.acm.org/10.1145/2647868.2655015
+e55f7250f3b8ee722814f8809620a851c31e5b0e,https://doi.org/10.3182/20130902-3-CN-3020.00030
+e5fbaeddbf98c667ec7c5575bda2158a36b55409,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.25
+e57e1dce81e888eb07054923602e35bfb5ef3eb8,https://doi.org/10.1109/IROS.2012.6385544
+e546572f8205570de4518bcf8d0345465e51d7a0,https://doi.org/10.1109/ICIP.2015.7351318
+e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.225
+e26a7e343fe109e2b52d1eeea5b02dae836f3502,https://doi.org/10.1109/ACCESS.2017.2676238
+e2b3aae594035e58f72125e313e92c7c4cc9d5bb,https://doi.org/10.1007/s00138-014-0597-2
+e2f78d2f75a807b89a13115a206da4661361fa71,https://doi.org/10.1109/TMM.2017.2696825
+f41d7f891a1fc4569fe2df66e67f277a1adef229,https://doi.org/10.1109/ICIP.2015.7351552
+f4411787688ca40466ee059ec64bf56d746733c1,https://doi.org/10.1007/s12652-012-0107-1
+f402e088dddfaad7667bd4def26092d05f247206,https://doi.org/10.1109/TITS.2015.2475721
+f4465454811acb2021a46d84d94fc88e2dda00a6,https://doi.org/10.1007/s11042-007-0184-x
+f41e80f941a45b5880f4c88e5bf721872db3400f,http://doi.ieeecomputersociety.org/10.1109/IC3.2017.8284359
+f4fc77660665ae58993065c6a336367e9a6c85f7,https://doi.org/10.1016/j.patcog.2012.12.009
+f4003cbbff3b3d008aa64c76fed163c10d9c68bd,https://doi.org/10.1016/j.neucom.2016.08.055
+f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6,http://doi.ieeecomputersociety.org/10.1109/TKDE.2015.2448547
+f423d8be5e13d9ef979debd3baf0a1b2e1d3682f,https://doi.org/10.1016/j.imavis.2015.11.004
+f486624efa750d718a670fba3c7f21b1c84ebaeb,https://doi.org/10.1109/TCYB.2016.2581861
+f49aebe58d30241f12c1d7d9f4e04b6e524d7a45,https://doi.org/10.1109/ICB.2016.7550074
+eb3c45e78acee0824c8f7d997c6104d74e7213a8,http://doi.ieeecomputersociety.org/10.1109/iThings/CPSCom.2011.116
+eb38f20eaa1b849cabec99815883390f84daf279,https://doi.org/10.1016/j.patcog.2008.11.026
+eb9867f5efc98d3203ce1037f9a8814b0d15d0aa,https://doi.org/10.1109/ICIP.2014.7026008
+eb02daee558e483427ebcf5d1f142f6443a6de6b,http://doi.acm.org/10.1145/2911996.2912019
+ebc2a3e8a510c625353637e8e8f07bd34410228f,https://doi.org/10.1109/TIP.2015.2502485
+eb5c1e526fe2d17778c68f60c874c3da0129fabd,https://doi.org/10.1109/VCIP.2015.7457856
+ebce3f5c1801511de9e2e14465482260ba5933cc,http://doi.acm.org/10.1145/3126594.3126640
+eb240521d008d582af37f0497f12c51f4bab16c8,https://doi.org/10.1023/A:1012365806338
+ebb3d5c70bedf2287f9b26ac0031004f8f617b97,https://doi.org/10.1109/MSP.2017.2764116
+ebeb0546efeab2be404c41a94f586c9107952bc3,http://doi.acm.org/10.1145/2733373.2806290
+ebfdb4842c69177b65022f00d3d038d645f3260b,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.154
+eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,https://doi.org/10.1109/ICPR.2016.7899719
+c7cd490e43ee4ff81e8f86f790063695369c2830,https://doi.org/10.1109/VCIP.2016.7805472
+c7b58827b2d07ece676271ae0425e369e3bd2310,https://doi.org/10.1142/S0218001415560042
+c0270a57ad78da6c3982a4034ffa195b9e932fda,http://doi.ieeecomputersociety.org/10.1109/FG.2017.131
+c0f9fae059745e50658d9605bd8875fc3a2d0b4b,http://doi.ieeecomputersociety.org/10.1109/BIGCOMP.2014.6741422
+c0945953506a3d531331caf6c2b2a6d027e319f0,https://doi.org/10.1002/cav.49
+c06b13d0ec3f5c43e2782cd22542588e233733c3,https://doi.org/10.1016/j.cviu.2016.02.001
+c0b02be66a5a1907e8cfb8117de50f80b90a65a8,http://doi.acm.org/10.1145/2808492.2808523
+eefecac463ebfc0694b9831e842b574f3954fed6,http://doi.ieeecomputersociety.org/10.1109/SNPD.2013.15
+eedb2c34c36017b9c5aa6ce8bff2ab152e713cee,https://doi.org/10.1007/s00521-008-0225-z
+ee6e4324123b99d94a7a23d9bddf026f39903693,https://doi.org/10.1109/ISMICT.2013.6521709
+eef432868e85b95a7d9d9c7b8c461637052318ca,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.236
+eef0be751e9aca7776d83f25c8ffdc1a18201fd8,https://doi.org/10.1016/j.patcog.2016.10.015
+ee2217f9d22d6a18aaf97f05768035c38305d1fa,https://doi.org/10.1109/APSIPA.2015.7415501
+eed05da2c0ab7d2b0a3c665a5368efa81b185099,https://doi.org/10.1016/j.neucom.2014.05.020
+eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2,https://doi.org/10.1109/FSKD.2016.7603398
+eefdb69ac2c461e7791603d0f8c02ff3c8600adc,https://doi.org/10.1016/j.jvcir.2017.02.007
+ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,https://doi.org/10.1109/LSP.2016.2602538
+c98def5f9d0c6ae519fe0aeebe5378f65b14e496,https://doi.org/10.1117/12.2064730
+c92e36689ef561df726a7ae861d9c166c3934908,https://doi.org/10.1109/ICPR.2016.7900140
+c907104680ad53bdc673f2648d713e4d26335825,http://doi.acm.org/10.1145/3077286.3077304
+c9c2de3628be7e249722b12911bebad84b567ce6,https://doi.org/10.1016/j.patcog.2017.06.028
+c9be1001706bcdd8b35fa9cae733c592e90c7ec3,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.54
+c9527df51e63b56c61cbf16f83d1a3c5c2c82499,http://doi.acm.org/10.1145/2072298.2072311
+c9832564d5dc601113b4d80e5a05ede6fee9f7dd,https://doi.org/10.1109/ISBA.2017.7947687
+c90427085909029afd2af01d1967e80b78e01b88,https://doi.org/10.1109/ACCESS.2017.2753830
+fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,http://doi.acm.org/10.1145/2502081.2502148
+fc8990088e0f1f017540900bc3f5a4996192ff05,https://doi.org/10.1109/ICIP.2017.8296314
+fcb97ede372c5bddde7a61924ac2fd29788c82ce,https://doi.org/10.1109/TSMCC.2012.2192727
+fc5538e60952f86fff22571c334a403619c742c3,http://ieeexplore.ieee.org/document/6460202/
+fc970d7694b1d2438dd101a146d2e4f29087963e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.86
+fcb276874cd932c8f6204f767157420500c64bd0,https://doi.org/10.1007/978-3-319-04960-1_3
+fdd19fee07f2404952e629cc7f7ffaac14febe01,https://doi.org/10.1109/CISP-BMEI.2016.7852754
+fdbc602a749ef070a7ac11c78dc8d468c0b60154,https://doi.org/10.1049/iet-ipr.2015.0519
+fddca9e7d892a97073ada88eec39e03e44b8c46a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.305
+fd38163654a0551ed7f4e442851508106e6105d9,https://doi.org/10.1109/ICNSC.2008.4525311
+f28d549feffd414f38147d5e0460883fb487e2d3,https://doi.org/10.1007/s10462-011-9273-3
+f25aa838fb44087668206bf3d556d31ffd75235d,http://doi.acm.org/10.1145/2911996.2912038
+f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf,https://doi.org/10.1109/ICIP.2011.6115736
+f2cc459ada3abd9d8aa82e92710676973aeff275,http://ieeexplore.ieee.org/document/5967185/
+f27fd2a1bc229c773238f1912db94991b8bf389a,https://doi.org/10.1109/IVCNZ.2016.7804414
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,https://doi.org/10.1109/VCIP.2016.7805483
+f2004fff215a17ac132310882610ddafe25ba153,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.124
+f231e9408da20498ba51d93459b3fcdb7b666efb,https://doi.org/10.1016/j.micpro.2012.01.002
+f5a95f857496db376d69f7ac844d1f56e3577b75,https://doi.org/10.1007/s12193-012-0107-7
+f531ce18befc03489f647560ad3e5639566b39dc,http://doi.ieeecomputersociety.org/10.1109/ACOMP.2015.9
+f545b121b9612707339dfdc40eca32def5e60430,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.33
+f58f30932e3464fc808e539897efa4ee4e7ac59f,https://doi.org/10.1109/DICTA.2016.7797023
+f557df59cd088ffb8e27506d8612d062407e96f4,https://doi.org/10.1007/s00521-014-1810-y
+e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24,http://doi.ieeecomputersociety.org/10.1109/MMUL.2016.27
+cf4c1099bef189838877c8785812bc9baa5441ed,https://doi.org/10.1109/ICPR.2016.7899862
+cf6c59d359466c41643017d2c212125aa0ee84b2,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552983
+cf7a4442a6aad0e08d4aade8ec379c44f84bca8a,http://doi.acm.org/10.1145/1873951.1874054
+cf784156547c3be146706e2763c1a52d939d1722,https://doi.org/10.1007/s11042-017-5038-6
+cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,https://doi.org/10.1109/TIFS.2013.2286265
+cf185d0d8fcad2c7f0a28b7906353d4eca5a098b,https://doi.org/10.1186/s13640-017-0190-5
+cf54e9776d799aa183d7466094525251d66389a4,https://doi.org/10.1109/ICCE-Berlin.2017.8210589
+cf6851c24f489dabff0238e01554edea6aa0fc7c,https://doi.org/10.1109/ICSMC.2011.6083637
+cfba667644508853844c45bfe5d0b8a2ffb756d3,https://doi.org/10.1109/ISBA.2018.8311455
+ca0185529706df92745e656639179675c717d8d5,https://doi.org/10.1504/IJCVR.2014.065571
+cae41c3d5508f57421faf672ee1bea0da4be66e0,https://doi.org/10.1109/ICPR.2016.7900298
+ca447d6479554b27b4afbd0fd599b2ed39f2c335,https://doi.org/10.1109/ICPR.2014.459
+ca0804050cf9d7e3ed311f9be9c7f829e5e6a003,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333904
+ca458f189c1167e42d3a5aaf81efc92a4c008976,https://doi.org/10.1109/TIP.2012.2202678
+ca8f23d9b9a40016eaf0467a3df46720ac718e1d,https://doi.org/10.1109/ICASSP.2015.7178214
+cacce7f4ce74e3269f5555aa6fd83e48baaf9c96,http://doi.acm.org/10.1145/2632165
+ca60d007af691558de377cab5e865b5373d80a44,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273630
+cab3c6069387461c3a9e5d77defe9a84fe9c9032,https://doi.org/10.1016/j.neucom.2016.12.056
+ca37933b6297cdca211aa7250cbe6b59f8be40e5,http://doi.acm.org/10.1145/3155133.3155207
+e41246837c25d629ca0fad74643fb9eb8bf38009,https://doi.org/10.1109/ICSIPA.2011.6144064
+e4d53e7f4c2052940841abc08f9574655f3f7fb4,http://doi.acm.org/10.1145/3078971.3079039
+e4df98e4b45a598661a47a0a8900065716dafd6d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2015.219
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,https://doi.org/10.1109/AICCSA.2016.7945716
+e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,https://doi.org/10.1109/TIP.2017.2782366
+e42f3c27391821f9873539fc3da125b83bffd5a2,https://doi.org/10.1109/HPCS.2010.5547096
+e4b825bf9d5df47e01e8d7829371d05208fc272d,http://doi.acm.org/10.1145/3055635.3056618
+e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,https://doi.org/10.1109/BTAS.2017.8272722
+e4c3587392d477b7594086c6f28a00a826abf004,https://doi.org/10.1109/ICIP.2017.8296998
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,http://doi.acm.org/10.1145/3123266.3123451
+fe556c18b7ab65ceb57e1dd054a2ca21cefe153c,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.145
+fed8cc533037d7d925df572a440fd89f34d9c1fd,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194
+fefaa892f1f3ff78db4da55391f4a76d6536c49a,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2497689
+fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2,https://doi.org/10.1109/ICSMC.2012.6377834
+fe5d6c65e51386f4d36f7434fe6fcd9494fe9361,https://doi.org/10.1109/ACCESS.2017.2730281
+c83d142a47babe84e8c4addafa9e2bb9e9b757a5,https://doi.org/10.1109/MLSP.2012.6349762
+c833c2fb73decde1ad5b5432d16af9c7bee1c165,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.143
+c8fb8872203ee694d95da47a1f9929ac27186d87,https://doi.org/10.1109/ICIP.2005.1530305
+c8fb8994190c1aa03c5c54c0af64c2c5c99139b4,https://doi.org/10.1007/s00138-016-0794-2
+c84991fe3bf0635e326a05e34b11ccaf74d233dc,https://doi.org/10.1016/j.neucom.2016.08.069
+c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e,https://doi.org/10.1109/IROS.2012.6386178
+c81b27932069e6c7016bfcaa5e861b99ac617934,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019469
+c872d6310f2079db0cee0e69cc96da1470055225,https://doi.org/10.1007/978-3-319-46675-0_68
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,https://doi.org/10.1109/ACCESS.2017.2784096
+fb1b6138aeb081adf853316c0d83ef4c5626a7fa,https://doi.org/10.1109/ICIP.2017.8296302
+fb7bf10cbc583db5d5eee945aa633fcb968e01ad,https://doi.org/10.1007/s00521-012-0962-x
+fb915bcc1623cdf999c0e95992c0e0cf85e64d8e,http://doi.ieeecomputersociety.org/10.1109/iThings.2014.83
+fb557b79157a6dda15f3abdeb01a3308528f71f2,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.310
+fb1627ed224bf7b1e3d80c097316ed7703951df2,https://doi.org/10.1109/VCIP.2017.8305094
+fb3ff56ab12bd250caf8254eca30cd97984a949a,https://doi.org/10.3103/S0146411617010072
+fb2bd6c2959a4f811b712840e599f695dad2967e,https://doi.org/10.1109/ISPA.2015.7306038
+fba386ac63fe87ee5a0cf64bf4fb90324b657d61,https://doi.org/10.1109/ICIP.2015.7351752
+ed9de242a23ad546902e1d5ec022dbb029cc2282,https://doi.org/10.1109/ICASSP.2015.7178138
+edbddf8c176d6e914f0babe64ad56c051597d415,https://doi.org/10.1109/TMM.2016.2644866
+ed94e7689cdae87891f08428596dec2a2dc6a002,https://doi.org/10.1109/CAMSAP.2017.8313130
+ed273b5434013dcdb9029c1a9f1718da494a23a2,https://doi.org/10.1109/LSP.2018.2810106
+ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee,https://doi.org/10.1007/s11042-014-2345-z
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,https://doi.org/10.1007/978-981-10-3005-5_57
+ed70d1a9435c0b32c0c75c1a062f4f07556f7016,https://doi.org/10.1109/ICIP.2015.7350774
+ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf,http://doi.ieeecomputersociety.org/10.1109/ICEBE.2017.36
+ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76,https://doi.org/10.1109/LSP.2010.2093600
+c1a70d63d1667abfb1f6267f3564110d55c79c0d,https://doi.org/10.1007/s00138-013-0488-y
+c138c76809b8da9e5822fb0ae38457e5d75287e0,https://doi.org/10.1109/TIP.2014.2378017
+c1581b5175994e33549b8e6d07b4ea0baf7fe517,https://doi.org/10.1109/IJCNN.2011.6033478
+c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478,https://doi.org/10.1109/TCYB.2016.2535122
+c1f05b723e53ac4eb1133249b445c0011d42ca79,https://doi.org/10.1162/neco_a_00990
+c1fb854d9a04b842ff38bd844b50115e33113539,https://doi.org/10.1007/s11042-016-3883-3
+c17c7b201cfd0bcd75441afeaa734544c6ca3416,https://doi.org/10.1109/TCSVT.2016.2587389
+c12034ca237ee330dd25843f2d05a6e1cfde1767,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.298
+c64502696438b4c9f9e12e64daaf7605f62ce3f0,http://doi.ieeecomputersociety.org/10.1109/WKDD.2009.195
+c65cfc9d3568c586faf18611c4124f6b7c0c1a13,https://doi.org/10.1109/ICACCI.2014.6968322
+c648d2394be3ff0c0ee5360787ff3777a3881b02,https://doi.org/10.1080/01449290903353047
+c65d2ee433ae095652abe3860eeafe6082c636c6,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553714
+c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd,http://doi.acm.org/10.1145/2043674.2043677
+c675534be881e59a78a5986b8fb4e649ddd2abbe,https://doi.org/10.1109/ICIP.2017.8296548
+c60601bdb5465d8270fdf444e5d8aeccab744e29,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583363
+ec6a2093059fd6eada9944212f64a659881abb95,https://doi.org/10.1016/j.patcog.2016.02.022
+ec89f2307e29cc4222b887eb0619e0b697cf110d,https://doi.org/10.1109/TIP.2009.2027361
+ec1a57e609eda72b4eb60155fac12db1da31f6c0,https://doi.org/10.1007/11744085_41
+ec28217290897a059348dcdf287540a2e2c68204,https://doi.org/10.1504/IJBM.2015.070928
+eca706b4d77708452bdad1c98a23e4e88ce941ab,https://doi.org/10.1142/S0218001416550144
+ec39e9c21d6e2576f21936b1ecc1574dadaf291e,https://doi.org/10.1109/WACV.2017.130
+ecdd83002f69c2ccc644d07abb44dd939542d89d,https://doi.org/10.1016/j.neucom.2015.07.011
+4e8f301dbedc9063831da1306b294f2bd5b10477,https://doi.org/10.1109/BIOSIG.2016.7736919
+4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,https://doi.org/10.1109/TIFS.2017.2788002
+4e1d89149fc4aa057a8becce2d730ec6afd60efa,https://doi.org/10.1109/ICSMC.2009.5346047
+4ea63435d7b58d41a5cbcdd34812201f302ca061,https://doi.org/10.1109/ICIP.2014.7025066
+4e6e5cb93e7e564bc426b5b27888d55101504c50,https://doi.org/10.1109/ICPR.2016.7900299
+4e343c66c5fe7426132869d552f0f205d1bc5307,https://doi.org/10.1109/ICPR.2014.452
+4e1258db62e4762fd8647b250fda9c3567f86eb8,http://doi.ieeecomputersociety.org/10.1109/CRV.2013.17
+4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4,https://doi.org/10.1109/DICTA.2017.8227494
+4eeccbbb98de4f2e992600482fd6b881ace014bb,http://doi.acm.org/10.1145/2964284.2967240
+4e581831d24fd90b0b5228b9136e76fa3e8f8279,https://doi.org/10.1109/TIP.2014.2303648
+4eb8030b31ff86bdcb063403eef24e53b9ad4329,http://doi.acm.org/10.1145/2993148.2997640
+4ed40e6bb66dfa38a75d864d804d175a26b6c6f6,http://doi.ieeecomputersociety.org/10.1109/CRV.2011.41
+204f1cf56794bb23f9516b5f225a6ae00d3d30b8,https://doi.org/10.1109/JSYST.2015.2418680
+20b405d658b7bb88d176653758384e2e3e367039,https://doi.org/10.1109/IJCNN.2012.6252677
+20eabf10e9591443de95b726d90cda8efa7e53bb,https://doi.org/10.1007/s11390-017-1740-0
+205f035ec90a7fa50fd04fdca390ce83c0eea958,http://doi.acm.org/10.1145/3131287
+189e5a2fa51ed471c0e7227d82dffb52736070d8,https://doi.org/10.1109/ICIP.2017.8296995
+18bfda16116e76c2b21eb2b54494506cbb25e243,https://doi.org/10.1109/TIFS.2010.2051544
+18d3532298fb7b8fb418453107f786178ca82e4a,https://doi.org/10.1109/TIFS.2017.2668221
+184dba921b932143d196c833310dee6884fa4a0a,https://doi.org/10.1109/SIU.2017.7960393
+18dd3867d68187519097c84b7be1da71771d01a3,http://doi.acm.org/10.1145/2448556.2448563
+18145b0b13aa477eeabef9ceec4299b60e87c563,https://doi.org/10.1007/s11042-011-0834-x
+187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b,https://doi.org/10.1109/LSP.2015.2437883
+1831800ef8b1f262c92209f1ee16567105da35d6,https://doi.org/10.1016/j.sigpro.2014.01.010
+1890470d07a090e7b762091c7b9670b5c2e1c348,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20
+27e0684fa5b57715162ac6c58a6ea283c7db1719,https://doi.org/10.1109/ICARCV.2004.1468857
+27812db1d2f68611cc284d65d11818082e572008,https://doi.org/10.1109/MIPRO.2016.7522323
+27e5b7ae3506a0f7472ee9089cd2472442e71c14,https://doi.org/10.1007/s00521-015-1834-y
+27aa23d7a05368a6b5e3d95627f9bab34284e5c4,https://doi.org/10.1109/IJCNN.2012.6252705
+27a586a435efdcecb151c275947fe5b5b21cf59b,https://doi.org/10.1007/s12559-017-9530-0
+279459cbbc5c6db4802e9c737cc72a612d76f7fc,https://doi.org/10.1109/SSCI.2017.8285296
+272e487dfa32f241b622ac625f42eae783b7d9aa,https://doi.org/10.1109/ICSIPA.2015.7412207
+4b9b30066a05bdeb0e05025402668499ebf99a6b,https://doi.org/10.1109/ISPACS.2012.6473448
+4b8c736524d548472d0725c971ee29240ae683f6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.31
+4b7f21b48c7e0dc7334e36108f558d54642c17c0,https://doi.org/10.1109/WACV.2017.106
+4ba2f445fcbbad464f107b036c57aa807ac5c0c2,https://doi.org/10.1109/TCSVT.2014.2367357
+4b94f531c203743a9f7f1e9dd009cdbee22ea197,https://doi.org/10.1109/ICSMC.2005.1571393
+4b9c47856f8314ecbe4d0efc65278c2ededb2738,https://doi.org/10.1109/LSP.2012.2188890
+1176a74fb9351ac2de81c198c4861d78e58f172d,https://doi.org/10.1016/j.patrec.2011.03.023
+11ba01ce7d606bab5c2d7e998c6d94325521b8a0,https://doi.org/10.1109/ICIP.2015.7350911
+110919f803740912e02bb7e1424373d325f558a9,http://doi.acm.org/10.1145/3123266.3123421
+11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de,http://doi.acm.org/10.1145/2522968.2522978
+113cd9e5a4081ce5a0585107951a0d36456ce7a8,https://doi.org/10.1109/ICSMC.2006.384939
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,https://doi.org/10.1109/ICSIPA.2011.6144081
+1195f0bf8f745ba69da915203bcd79589b94aec5,https://doi.org/10.1016/j.procs.2010.11.004
+11f8d0a54e55c5e6537eef431cd548fa292ef90b,https://doi.org/10.1016/j.neucom.2017.05.042
+110359824a0e3b6480102b108372793265a24a86,https://doi.org/10.1016/j.image.2016.03.011
+1125760c14ea6182b85a09bf3f5bad1bdad43ef5,https://doi.org/10.1109/CVPR.2004.286
+11a6593e6e35f95ebeb5233897d1d8bcad6f9c87,https://doi.org/10.1007/s11063-017-9615-5
+11d73f4f19077e6806d05dc7ecd17fbeb15bdf39,http://doi.ieeecomputersociety.org/10.1109/FG.2017.28
+1135a818b756b057104e45d976546970ba84e612,http://doi.ieeecomputersociety.org/10.1109/FG.2017.118
+7d8798e7430dcc68fcdbd93053c884fc44978906,http://doi.acm.org/10.1145/2506364.2506369
+7d61b70d922d20c52a4e629b09465076af71ddfd,https://doi.org/10.1007/s10044-011-0258-2
+7d7b036ed01765c9473d695f029142128d442aaa,https://doi.org/10.1109/TIP.2018.2791180
+7dc498d45f9fcb97acee552c6f587b65d5122c35,https://doi.org/10.1109/ICIP.2015.7351618
+7de8a8b437ec7a18e395be9bf7c8f2d502025cc6,https://doi.org/10.1109/SIU.2017.7960528
+298c2be98370de8af538c06c957ce35d00e93af8,https://doi.org/10.1109/IPTA.2016.7820988
+29322b9a3744afaa5fc986b805d9edb6ff5ea9fe,https://doi.org/10.1109/TNNLS.2011.2178037
+2945cc9e821ab87fa17afc8802f3858435d1264c,https://doi.org/10.1109/ICPR.2016.7899839
+2960500033eb31777ed1af1fcb133dcab1b4a857,http://doi.acm.org/10.1145/3005467.3005471
+29f298dd5f806c99951cb434834bc8dcc765df18,https://doi.org/10.1109/ICPR.2016.7899837
+293d69d042fe9bc4fea256c61915978ddaf7cc92,https://doi.org/10.1007/978-981-10-7302-1_6
+29fd98f096fc9d507cd5ee7d692600b1feaf7ed1,http://doi.acm.org/10.1145/2988257.2988270
+7c8909da44e89a78fe88e815c83a4ced34f99149,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326
+7c457c9a658327af6f6490729b4cab1239c22005,https://doi.org/10.1109/ACCESS.2017.2672829
+7c13fa0c742123a6a927771ce67da270492b588c,http://doi.acm.org/10.1145/3152114
+163ba5a998973f9ead6be0ca873aed5934d5022e,https://doi.org/10.1109/ACPR.2013.53
+16b0c171fb094f677fcdf78bbb9aaef0d5404942,https://doi.org/10.1109/TIP.2017.2733739
+1617f56c86bf8ea61de62062a97961d23fcf03d3,https://doi.org/10.1007/s11390-015-1540-3
+1672becb287ae3eaece3e216ba37677ed045db55,https://doi.org/10.1016/j.eswa.2015.10.047
+16eaa26a84468b27e559215db01c53286808ec2a,https://doi.org/10.1007/s11263-015-0859-0
+16c1b592d85d13f1ba4eff0afb4441bb78650785,https://doi.org/10.1109/TIP.2017.2685343
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,https://doi.org/10.1007/978-3-319-71928-3_18
+16fc82d44188eb49a151bd5836a29911b3bfabcb,https://doi.org/10.1007/978-981-10-7302-1_50
+42441f1fee81c8fd42a74504df21b3226a648739,https://doi.org/10.1007/s11554-008-0072-2
+4268ae436db79c4eee8bc06e9475caff3ff70d57,http://doi.ieeecomputersociety.org/10.1109/FG.2017.146
+42fff5b37006009c2dbfab63c0375c7c7d7d8ee3,https://doi.org/10.1007/s11042-014-2228-3
+42a5dc91852c8c14ed5f4c3b451c9dc98348bc02,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021
+425ea5656c7cf57f14781bafed51182b2e6da65f,https://doi.org/10.1109/TIP.2017.2718187
+427bec487c330e7e34cc2c8fc2d6558690421ea0,http://doi.ieeecomputersociety.org/10.1109/ISCSCT.2008.352
+4215b34597d8ce1e8985afa8043400caf0ec7230,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.71
+89e31777f221ddb3bc9940d7f520c8114c4148a2,https://doi.org/10.1007/s11063-012-9224-2
+897aa4aaa474fed41233faec9b70b802aea5fdea,https://doi.org/10.1142/S0218001414560126
+89272b78b651038ff4d294b9ccca0018d2c9033b,https://doi.org/10.1109/ICPR.2014.777
+89497854eada7e32f06aa8f3c0ceedc0e91ecfef,https://doi.org/10.1109/TIP.2017.2784571
+891b31be76e2baa83745f24c2e2013851dc83cbb,https://doi.org/10.1109/TSMCB.2009.2018137
+892400017e5c93611dc8361e7749135520d66f25,https://doi.org/10.1109/ICARCV.2010.5707394
+898ff1bafee2a6fb3c848ad07f6f292416b5f07d,https://doi.org/10.1109/TIP.2016.2518867
+454bf5b99607b4418e931092476ad1798ce5efa4,https://doi.org/10.1155/2011/790598
+45877ff4694576f59c2a9ca45aa65f935378492a,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.38
+4551194408383b12db19a22cca5db0f185cced5c,https://doi.org/10.1109/TNNLS.2014.2341634
+45e043dffc57a9070f483ac4aec2c5cd2cec22cb,http://doi.acm.org/10.1145/3130977
+452ea180cf4d08d7500fc4bc046fd7141fd3d112,https://doi.org/10.1109/BTAS.2012.6374569
+45edb29fb7eed5a52040300e1fd3cd53f1bdb429,https://doi.org/10.1109/ICIP.2015.7351570
+4512b87d68458d9ba0956c0f74b60371b6c69df4,https://doi.org/10.1109/TIP.2017.2708504
+4500888fd4db5d7c453617ee2b0047cedccf2a27,http://doi.acm.org/10.1145/2647750
+4562ea84ebfc8d9864e943ed9e44d35997bbdf43,http://doi.ieeecomputersociety.org/10.1109/FG.2017.19
+459eb3cfd9b52a0d416571e4bc4e75f979f4b901,https://doi.org/10.1109/ROBIO.2015.7418998
+453bf941f77234cb5abfda4e015b2b337cea4f17,https://doi.org/10.1007/s11042-014-2340-4
+1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,https://doi.org/10.1109/CIST.2016.7805097
+1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9,https://doi.org/10.1109/IJCNN.2017.7965955
+1f8656e2254e353a91cceb08b33c25643a1b1fb7,https://doi.org/10.1109/LSP.2017.2736542
+1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb,https://doi.org/10.1007/s11760-016-1052-9
+1f5725a4a2eb6cdaefccbc20dccadf893936df12,https://doi.org/10.1109/CCST.2012.6393544
+1fcb905e4505a781fb0b375eb470f5661e38ae39,http://doi.acm.org/10.1145/3123266.3123450
+874da338c01fb7a87d605fcde6c52835eee03d5e,http://doi.ieeecomputersociety.org/10.1109/ICAPR.2009.20
+87806c51dc8c1077953178367dcf5c75c553ce34,https://doi.org/10.1109/ICMLA.2015.146
+87ee56feefdb39938cda7f872e784d9d986713af,http://dl.acm.org/citation.cfm?id=3022247
+87552622efd0e85c2a71d4d2590e53d45f021dbf,https://doi.org/10.1109/ICIP.2016.7532435
+872ff48a3acfbf96376fd048348372f5137615e4,https://doi.org/10.1007/s41095-016-0051-7
+8706c3d49d1136035f298041f03bb70dc074f24d,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.12
+876583a059154def7a4bc503b21542f80859affd,https://doi.org/10.1109/IWBF.2016.7449697
+80677676b127b67938c8db06a15d87f5dd4bd7f1,https://doi.org/10.1007/s11760-014-0623-x
+80f72b26c6571aee2ff04704bc7fd1a69bfa0b3f,https://doi.org/10.1016/j.patcog.2016.12.029
+8027a9093f9007200e8e69e05616778a910f4a5f,https://doi.org/10.1109/ICB.2013.6612997
+805a0f4b99f162ac4db0ef6e0456138c8d498c3a,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2465373
+803803b5c2c61046d63674f85ecf0123f9d2c4b8,https://doi.org/10.1049/iet-bmt.2013.0089
+80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,https://doi.org/10.1109/TNNLS.2016.2522431
+80aa455068018c63237c902001b58844fcc6f160,https://doi.org/10.1109/FG.2011.5771327
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,https://doi.org/10.1007/978-3-319-15762-7
+74cec83ee694b5d0e07d5d0bacd0aa48a80776aa,https://doi.org/10.1109/ISCAS.2013.6572506
+745d49a2ff70450113f07124c2c5263105125f58,https://doi.org/10.1109/ICPR.2016.7899972
+745e74ae84e1b2b8690d07db523531642023d6c4,https://doi.org/10.1109/FSKD.2016.7603417
+747dc0add50b86f5ba9e3e7315943d520e08f9eb,http://doi.ieeecomputersociety.org/10.1109/FG.2017.78
+74d3ff8324e02503c18fb2566ed29e2e22ce0d1b,http://doi.ieeecomputersociety.org/10.1109/IAS.2009.266
+1ab19e516b318ed6ab64822efe9b2328836107a4,https://doi.org/10.1109/TIP.2010.2083674
+1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a,https://doi.org/10.1007/s11042-013-1754-8
+1a0e1ba4408d12f8a28049da0ff8cad4f91690d5,https://doi.org/10.1007/s12559-016-9445-1
+1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db,https://doi.org/10.1109/ICIP.2016.7533026
+1a47f12a2490f6775c0ad863ac856de27f5b3e03,https://doi.org/10.1016/j.sigpro.2014.11.010
+1a8d40bcfb087591cc221086440d9891749d47b8,https://doi.org/10.1109/ICCE.2012.6161859
+1afef6b389bd727c566cd6fbcd99adefe4c0cf32,https://doi.org/10.1109/ICB.2016.7550087
+1aeef2ab062c27e0dbba481047e818d4c471ca57,https://doi.org/10.1109/ICACCI.2015.7275860
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,https://doi.org/10.1007/s10044-012-0279-5
+1a40c2a2d17c52c8b9d20648647d0886e30a60fa,https://doi.org/10.1109/ICPR.2016.7900283
+1a03dcc811131b0b702bd5a75c54ed26cd27151a,https://doi.org/10.1007/s11760-015-0810-4
+1ad780e02edf155c09ea84251289a054b671b98a,https://doi.org/10.1109/ICNIDC.2012.6418787
+287de191c49a3caa38ad7594093045dfba1eb420,https://doi.org/10.23919/MVA.2017.7986829
+281b91c35a1af97b1405bc724a04e2be6e24971b,https://doi.org/10.1109/ICMLC.2010.5580557
+28d55935cc36df297fe21b98b4e2b07b5720612e,https://doi.org/10.1109/CISS.2016.7460569
+28a45770faf256f294ce3bbd5de25c6d5700976e,https://doi.org/10.1109/ICDSP.2016.7868531
+283d381c5c2ba243013b1c4f5e3b29eb906fa823,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.222
+2884ff0d58a66d42371b548526d685760e514043,https://doi.org/10.1109/ICIP.2015.7351242
+17768efd76a681902a33994da4d3163262bf657f,https://doi.org/10.1007/s12559-017-9472-6
+176d9121e4e645344de4706dfb345ad456bfb84a,https://doi.org/10.1117/1.JEI.24.2.023009
+17189cfedbdbd219849b8e7f8cf0293d49465f9c,http://doi.acm.org/10.1145/2393347.2396505
+170aa0f16cd655fdd4d087f5e9c99518949a1b5c,https://doi.org/10.1007/s11263-007-0074-8
+179545c1fc645cb2ad9b31a30f48352d541876ff,https://doi.org/10.1109/IJCNN.2007.4371116
+17de5a9ce09f4834629cd76b8526071a956c9c6d,https://doi.org/10.1007/978-3-319-68063-7_8
+1723227710869a111079be7d61ae3df48604e653,https://doi.org/10.1109/INISTA.2014.6873606
+178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4,https://doi.org/10.1007/s11042-013-1578-6
+17d03da4db3bb89537d644b682b2a091d563af4a,https://doi.org/10.1109/TNN.2010.2050600
+7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35,http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61
+7bc1e7d000ab517161a83b1fedf353e619516ddf,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068
+7b618a699b79c1272f6c83101917ad021a58d96b,https://doi.org/10.1007/s11042-014-1986-2
+7bd37e6721d198c555bf41a2d633c4f0a5aeecc1,https://doi.org/10.1109/ACPR.2013.58
+7b455cbb320684f78cd8f2443f14ecf5f50426db,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33
+8f3675e979629ca9cee9436d37763f546edb8d40,https://doi.org/10.1109/SIU.2017.7960446
+8fee7b38358815e443f8316fa18768d76dba12e3,http://doi.acm.org/10.1145/2063576.2063676
+8fe5feeaa72eddc62e7e65665c98e5cb0acffa87,https://doi.org/10.1007/s12193-015-0209-0
+8f73af52d87c94d0bd43242462fd68d974eda331,https://doi.org/10.1109/ICB.2013.6613009
+8f99f7ccb85af6d4b9e015a9b215c529126e7844,https://doi.org/10.1109/ROMAN.2017.8172359
+8f051647bd8d23482c6c3866c0ce1959b8bd40f6,https://doi.org/10.1016/j.asoc.2017.04.041
+8f713e3c5b6b166c213e00a3873f750fb5939c9a,https://doi.org/10.1109/EUSIPCO.2015.7362563
+8fc36452a49cb0fd43d986da56f84b375a05b4c1,http://doi.acm.org/10.1145/2542355.2542388
+8aff9c8a0e17be91f55328e5be5e94aea5227a35,https://doi.org/10.1109/TNNLS.2012.2191620
+8a1e95b82d8cf27e0034e127091396efd4c8bd9e,https://doi.org/10.1109/IGARSS.2016.7729015
+8a2210bedeb1468f223c08eea4ad15a48d3bc894,http://doi.acm.org/10.1145/2513383.2513438
+8a2bedaa38abf173823944f0de2c84f5b2549609,https://doi.org/10.1109/TNNLS.2016.2573644
+8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4,http://doi.ieeecomputersociety.org/10.1109/CSSE.2008.575
+7ebfa8f1c92ac213ff35fa27287dee94ae5735a1,https://doi.org/10.1109/TMM.2016.2614429
+7e456e94f3080c761f858264428ee4c91cd187b2,http://ieeexplore.ieee.org/document/6460899/
+7e48711c627edf90e9b232f2cbc0e3576c8f2f2a,https://doi.org/10.1007/s11760-015-0777-1
+10e2f2ad1dedec6066e063cb2098b089b35905a8,http://doi.acm.org/10.1145/3052930
+10df1d4b278da991848fb71b572f687bd189c10e,https://doi.org/10.1109/ICPR.2016.7899739
+104ee18b513b52386f871e959c1f9e5072604e93,https://doi.org/10.1109/GlobalSIP.2017.8309189
+10f4bbf87a44bab3d79e330e486c897e95f5f33f,https://doi.org/10.1109/TIFS.2012.2186292
+1071dde48a77f81c35ad5f0ca90a9daedb54e893,http://ieeexplore.ieee.org/document/7881657/
+1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043872
+1966bddc083886a9b547e1817fe6abc352a00ec3,http://doi.acm.org/10.1145/2733373.2806312
+19705579b8e7d955092ef54a22f95f557a455338,https://doi.org/10.1109/ICIP.2014.7025277
+1979e270093b343d62e97816eeed956062e155a0,https://doi.org/10.1016/j.micpro.2005.07.003
+194f5d3c240d06575403c9a422a0ebc86d43b91e,https://doi.org/10.1007/s11042-015-2580-y
+197efbef17f92e5cb5076961b6cd9f59e88ffd9a,https://doi.org/10.1109/ICMLA.2017.00-59
+19bbecead81e34b94111a2f584cf55db9a80e60c,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248025
+195b61470720c7faa523e10e68d0c8d8f27d7c7a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995618
+1902288256839539aeb5feb3e1699b963a15aa1a,https://doi.org/10.1109/IJCNN.2016.7727435
+19c82eacd77b35f57ac8815b979716e08e3339ca,http://doi.ieeecomputersociety.org/10.1109/ICITCS.2015.7292981
+191b70fdd6678ef9a00fd63710c70b022d075362,https://doi.org/10.1109/ICIP.2003.1247347
+4c141534210df53e58352f30bab558a077fec3c6,https://doi.org/10.1109/TMM.2016.2557722
+4c19690889fb3a12ec03e65bae6f5f20420b4ba4,https://doi.org/10.1049/iet-ipr.2015.0699
+4c6886c489e93ccab5a1124555a6f3e5b0104464,https://doi.org/10.1109/ICIP.2017.8296921
+4c648fe9b7bfd25236164333beb51ed364a73253,http://doi.acm.org/10.1145/3038924
+4c0846bcfa64d9e810802c5b7ef0f8b43523fe54,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2324594
+4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,https://doi.org/10.1109/TIP.2014.2387379
+4cec3e5776090852bef015a8bbe74fed862aa2dd,https://doi.org/10.1109/TSP.2013.2271479
+4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af,https://doi.org/10.1016/j.imavis.2014.06.004
+268c4bb54902433bf00d11391178a162e5d674c9,https://doi.org/10.1109/CVPRW.2010.5543261
+261a80216dda39b127d2b7497c068ec7e0fdf183,https://doi.org/10.1109/TCSVT.2013.2265571
+26ebe98753acec806b7281d085110c06d9cd1e16,http://doi.ieeecomputersociety.org/10.1109/FG.2017.22
+26973cf1552250f402c82e9a4445f03fe6757b58,http://doi.acm.org/10.1145/3126686.3130239
+2601b679fdd637f3cd978753ae2f15e8759dd267,https://doi.org/10.1109/ICIP.2015.7351306
+262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19,http://dl.acm.org/citation.cfm?id=3007694
+26b9d546a4e64c1d759c67cd134120f98a43c2a6,https://doi.org/10.1109/ICMLA.2012.120
+26bbe76d1ae9e05da75b0507510b92e7e6308c73,https://doi.org/10.1007/s00371-014-1049-8
+26949c1ba7f55f0c389000aa234238bf01a32d3b,https://doi.org/10.1109/ICIP.2017.8296814
+26a5136ee4502500fb50cd5ade814aad45422771,https://doi.org/10.1142/S0218001413560028
+26727dc7347e3338d22e8cf6092e3a3c7568d763,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163088
+2138ccf78dcf428c22951cc066a11ba397f6fcef,https://doi.org/10.1109/BHI.2012.6211519
+21bd60919e2e182a29af455353141ba4907b1b41,https://doi.org/10.1109/ACCESS.2018.2798573
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,https://doi.org/10.1109/VCIP.2017.8305137
+21f5f65e832c5472d6d08f6ee280d65ff0202e29,https://doi.org/10.1007/978-3-319-70353-4_44
+218139e5262cb4f012cd2e119074aa59b89ebc32,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.265
+217aa3aa0b3d9f6f394b5d26f03418187d775596,http://doi.acm.org/10.1145/3123266.3123298
+2149d49c84a83848d6051867290d9c8bfcef0edb,https://doi.org/10.1109/TIFS.2017.2746062
+4dbfbe5fd96c9efc8c3c2fd54406b62979482678,https://doi.org/10.1016/j.jvcir.2014.07.007
+4d1f77d9418a212c61a3c75c04a5b3884f6441ba,https://doi.org/10.1109/TIP.2017.2788196
+4d4736173a5e72c266e52f3a43bdcb2b58f237a2,https://doi.org/10.1109/ISSPA.2012.6310583
+4d6d6369664a49f6992f65af4148cefef95055bc,https://doi.org/10.1109/ICIP.2014.7025407
+75858dbee2c248a60741fbc64dcad4f8b63d51cb,https://doi.org/10.1109/TIP.2015.2460464
+7535e3995deb84a879dc13857e2bc0796a2f7ce2,https://doi.org/10.1007/s10618-010-0207-5
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,https://doi.org/10.1007/s00521-012-1042-y
+75b51140d08acdc7f0af11b0ffa1edb40ebbd059,https://doi.org/10.1007/s00521-010-0381-9
+754626bd5fb06fee5e10962fdfeddd495513e84b,https://doi.org/10.1109/SIU.2017.7960646
+751fb994b2c553dc843774a5620bfcab8bc657fd,https://doi.org/10.1007/978-3-319-67180-2_47
+753a277c1632dd61233c488cc55d648de3caaaa3,https://doi.org/10.1016/j.patcog.2011.02.013
+81a4397d5108f6582813febc9ddbeff905474120,https://doi.org/10.1109/ICPR.2016.7899883
+812d3f6975f4cb87e9905ef18696c5c779227634,https://doi.org/10.1186/s13640-016-0151-4
+8184a92e1ccc7fdeb4a198b226feb325c63d6870,https://doi.org/10.1109/ICCE.2017.7889290
+8185be0689442db83813b49e215bf30870017459,https://doi.org/10.1109/TNNLS.2013.2293418
+81b8a6cabcd6451b21d5b44e69b0a355d9229cc4,https://doi.org/10.1109/ICDSP.2017.8096137
+81d81a2060366f29fd100f793c11acf000bd2a7f,https://doi.org/10.1007/11795131_112
+81af86e3d343a40ce06a3927b6aa8c8853f6811a,http://doi.acm.org/10.1145/3009977.3009996
+81c21f4aafab39b7f5965829ec9e0f828d6a6182,https://doi.org/10.1109/BTAS.2015.7358744
+81d232e1f432db7de67baf4f30f240c62d1a9055,https://doi.org/10.1109/ICIP.2017.8296405
+86fa086d02f424705bbea53943390f009191740a,https://doi.org/10.1109/ICIP.2015.7351651
+865d4ce1751ff3c0a8eb41077a9aa7bd94603c47,https://doi.org/10.1007/s12193-015-0210-7
+86597fe787e0bdd05935d25158790727257a40bd,http://doi.ieeecomputersociety.org/10.1109/3DV.2016.72
+86afb1e38a96f2ac00e792ef353a971fd13c8474,https://doi.org/10.1109/BigData.2016.7840742
+8686b15802529ff8aea50995ef14079681788110,https://doi.org/10.1109/TNNLS.2014.2376936
+864d50327a88d1ff588601bf14139299ced2356f,https://doi.org/10.1109/FSKD.2016.7603151
+8697ccb156982d40e88fda7fbf4297fa5171f24d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2011.101
+86881ce8f80adea201304ca6bb3aa413d94e9dd0,https://doi.org/10.1109/ICIP.2017.8297133
+8605e8f5d84b8325b1a81d968c296a5a5d741f31,https://doi.org/10.1016/j.patcog.2017.04.010
+72345fed8d068229e50f9ea694c4babfd23244a0,http://doi.acm.org/10.1145/2632856.2632937
+728b1b2a86a7ffda402e7ec1a97cd1988dcde868,https://doi.org/10.1016/j.procs.2016.04.083
+72a3bb0fb490355a926c5a689e12268bff9ff842,https://doi.org/10.1109/ICIP.2006.312862
+7234468db46b37e2027ab2978c67b48b8581f796,https://doi.org/10.1109/ACPR.2015.7486464
+72119cb98f9502ec639de317dccea57fd4b9ee55,https://doi.org/10.1109/GlobalSIP.2015.7418230
+72d110df78a7931f5f2beaa29f1eb528cf0995d3,https://doi.org/10.1007/s11517-015-1346-z
+440b94b1624ca516b07e72ea8b3488072adc5e26,https://doi.org/10.1109/ITSC.2015.153
+44b827df6c433ca49bcf44f9f3ebfdc0774ee952,https://doi.org/10.1109/LSP.2017.2726105
+44c278cbecd6c1123bfa5df92e0bda156895fa48,https://doi.org/10.1109/ICPR.2014.316
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,https://doi.org/10.1007/s11063-017-9648-9
+4492914df003d690e5ff3cb3e0e0509a51f7753e,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2014.6921443
+44834929e56f2a8f16844fde519039d647006216,http://doi.acm.org/10.1145/1460096.1460150
+44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb,https://doi.org/10.1016/j.neucom.2017.07.052
+445e3ba7eabcc55b5d24f951b029196b47830684,https://doi.org/10.1109/TMM.2016.2591508
+2a92bda6dbd5cce5894f7d370d798c07fa8783f4,https://doi.org/10.1109/TIFS.2014.2359587
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,https://doi.org/10.1109/TIP.2013.2255300
+2a98b850139b911df5a336d6ebf33be7819ae122,https://doi.org/10.1109/ICIP.2015.7350806
+2ae2e29c3e9cc2d94a26da5730df7845de0d631b,https://doi.org/10.1109/TCSVT.2011.2129670
+2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,https://doi.org/10.1109/CVPRW.2010.5543608
+2a98351aef0eec1003bd5524933aed8d3f303927,https://doi.org/10.1109/CIRA.2007.382901
+2a41388040141ef6b016c100ef833a2a73ab8b42,https://doi.org/10.1016/j.neucom.2017.03.033
+2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.104
+2a84f7934365f05b6707ea0ac225210f78e547af,https://doi.org/10.1109/ICPR.2016.7899690
+2adffdffa16475ae71bb2adcf65840f01f1e53f7,https://doi.org/10.1049/iet-cvi.2014.0094
+2a4984fb48c175d1e42c6460c5f00963da9f26b6,https://doi.org/10.1109/MIPRO.2015.7160445
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,http://doi.acm.org/10.1145/3090311
+2f8ef56c1007a02cdc016219553479d6b7e097fb,https://doi.org/10.1007/978-3-642-14834-7_2
+2fd007088a75916d0bf50c493d94f950bf55c5e6,https://doi.org/10.1007/978-981-10-7302-1_1
+2f43b614607163abf41dfe5d17ef6749a1b61304,https://doi.org/10.1109/TIFS.2014.2361479
+2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76,https://doi.org/10.1109/ACPR.2015.7486607
+2f5b51af8053cf82ab52bbfd46b56999222ec21c,https://doi.org/10.1109/ICPR.2014.788
+2f841ff062053f38725030aa1b77db903dad1efb,https://doi.org/10.1109/ICRA.2014.6907748
+2facf3e85240042a02f289a0d40fee376c478d0f,https://doi.org/10.1109/BTAS.2010.5634544
+2f61d91033a06dd904ff9d1765d57e5b4d7f57a6,https://doi.org/10.1109/ICIP.2016.7532953
+2f160a6526ebf10773680dadaba44b006bcec2cb,https://doi.org/10.1016/j.neucom.2012.03.007
+2f17c0514bb71e0ca20780d71ea0d50ff0da4938,http://doi.acm.org/10.1145/1943403.1943490
+43261920d2615f135d6e72b333fe55d3f2659145,http://doi.acm.org/10.1145/3136273.3136301
+4349f17ec319ac8b25c14c2ec8c35f374b958066,https://doi.org/10.1109/THMS.2017.2681425
+43cbe3522f356fbf07b1ff0def73756391dc3454,https://doi.org/10.1109/WIFS.2011.6123140
+4398afa0aeb5749a12772f2d81ca688066636019,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2496320
+4344ba6e33faaa616d01248368e66799548ca48b,https://doi.org/10.1007/s10044-015-0474-2
+43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,https://doi.org/10.1109/ICIP.2017.8296394
+43bb2b58f906262035ef61e41768375bc8d99ae3,https://doi.org/10.1016/j.procs.2016.04.072
+4328933890f5a89ad0af69990926d8484f403e4b,http://doi.acm.org/10.1145/2072298.2071993
+434f1442533754b3098afd4e24abf1e3792b24db,https://doi.org/10.1109/CBMI.2015.7153627
+43eb03f95adc0df61af2c3b12a913c725b08d4f5,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2011.101
+88780bd55615c58d9bacc4d66fc2198e603a1714,https://doi.org/10.1109/EMBC.2016.7590730
+8879083463a471898ff9ed9403b84db277be5bf6,https://doi.org/10.1016/j.patcog.2016.08.031
+884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e,https://doi.org/10.1109/SMC.2016.7844717
+88ed558bff3600f5354963d1abe762309f66111e,https://doi.org/10.1109/TIFS.2015.2393553
+88399c7fa890f1252178cd5e4979971509bd904f,https://doi.org/10.1142/S0219878906000915
+8845c03bee88fdd2f400ed2bddba038366c82abe,http://doi.ieeecomputersociety.org/10.1109/TCBB.2011.135
+8882d39edae556a351b6445e7324ec2c473cadb1,https://doi.org/10.1109/TIP.2017.2755766
+88c21e06ed44da518a7e346fce416efedc771704,https://doi.org/10.1109/ICIP.2015.7351455
+9f5e22fbc22e1b0a61bcd75202d299232e68de5d,https://doi.org/10.1109/IJCNN.2016.7727391
+9fab78015e6e91ba7241a923222acd6c576c6e27,http://doi.ieeecomputersociety.org/10.1109/ICSS.2016.10
+9f3c9e41f46df9c94d714b1f080dafad6b4de1de,https://doi.org/10.1109/ICT.2017.7998260
+9f428db0d3cf26b9b929dd333a0445bcc7514cdf,https://doi.org/10.1016/j.cviu.2010.11.015
+9fd1b8abbad25cb38f0c009288fb5db0fc862db6,https://doi.org/10.1109/ICASSP.2003.1199147
+9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,https://doi.org/10.1007/s10044-006-0033-y
+6b44543571fe69f088be577d0c383ffc65eceb2a,http://doi.ieeecomputersociety.org/10.1109/EST.2012.24
+6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7,https://doi.org/10.1109/IJCNN.2016.7727803
+6bacd4347f67ec60a69e24ed7cc0ac8073004e6f,https://doi.org/10.1109/VCIP.2014.7051528
+6ba6045e4b404c44f9b4dfce2d946019f0e85a72,https://doi.org/10.1109/ICPR.2016.7899962
+6b8329730b2e13178a577b878631735a1cd58a71,http://doi.ieeecomputersociety.org/10.1109/FiCloud.2015.78
+07dc9f3b34284cc915dea7575f40ef0c04338126,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2537337
+070c8ee3876c06f9a65693e536d61097ace40417,https://doi.org/10.1109/ACPR.2013.161
+0733ec1953f6c774eb3a723618e1268586b46359,https://doi.org/10.1109/TMM.2006.870737
+0750c796467b6ef60b0caff5fb199337d54d431e,https://doi.org/10.1109/ICMLC.2016.7873015
+0701b01bc99bf3b64050690ceadb58a8800e81ed,https://doi.org/10.1007/s11042-015-3107-2
+076c97826df63f70d55ea11f0b7ae47a7ad81ad3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.40
+38e7f3fe450b126367ec358be9b4cc04e82fa8c7,https://doi.org/10.1109/TIP.2014.2351265
+3888d7a40f3cea5e4a851c8ca97a2d7810a62867,https://doi.org/10.1109/CCECE.2016.7726684
+383ff2d66fecdc2fd02a31ac1fa392f48e578296,https://doi.org/10.1016/j.cviu.2015.07.005
+387b54cf6c186c12d83f95df6bd458c5eb1254ee,https://doi.org/10.1109/VCIP.2017.8305123
+3826e47f0572ab4d0fe34f0ed6a49aa8303e0428,https://doi.org/10.1109/ACPR.2013.66
+383e64d9ef1fca9de677ac82486b4df42e96e861,http://doi.ieeecomputersociety.org/10.1109/DSC.2017.78
+38345264a9ca188c4facffe6e18a7e6865fb2966,http://doi.ieeecomputersociety.org/10.1109/BIBM.2017.8217969
+008528d5e27919ee95c311266041e4fb1711c254,https://doi.org/10.1007/s13735-015-0092-1
+00d4c2db10f3a32d505d7b8adc7179e421443dec,https://doi.org/10.1109/GlobalSIP.2014.7032080
+00049f989067d082f7f8d0581608ad5441d09f8b,https://doi.org/10.1109/LSP.2016.2555480
+003ba2001bd2614d309d6ec15e9e2cbe86db03a1,https://doi.org/10.1109/ISCAS.2005.1465264
+00eccc565b64f34ad53bf67dfaf44ffa3645adff,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618328
+00301c250d667700276b1e573640ff2fd7be574d,https://doi.org/10.1109/BTAS.2014.6996242
+00a38ebce124879738b04ffc1536018e75399193,https://doi.org/10.1109/BTAS.2017.8272766
+009bf86913f1c366d9391bf236867d84d12fa20c,https://doi.org/10.1109/CVPRW.2010.5544620
+0034e37a0faf0f71395245b266aacbf5412f190a,https://doi.org/10.1109/TMM.2014.2355134
+6e9de9c3af3258dd18142e9bef2977b7ce153bd5,https://doi.org/10.1007/978-3-319-48881-3
+6e2041a9b5d840b0c3e4195241cd110640b1f5f3,https://doi.org/10.1007/s10044-013-0349-3
+6e7ffd67329ca6027357a133437505bc56044e65,https://doi.org/10.1109/IJCNN.2014.6889754
+6ec275755f8776b620d0a4550be0e65caf2bc87a,https://doi.org/10.1109/IS.2016.7737496
+9ab963e473829739475b9e47514f454ab467a5af,http://doi.ieeecomputersociety.org/10.1109/FG.2017.33
+9abf6d56a7d336bc58f4e3328d2ee807032589f1,https://doi.org/10.1109/CEC.2017.7969500
+9abab00de61dd722b3ad1b8fa9bffd0001763f8b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2420563
+9ab126760f68071a78cabe006cf92995d6427025,https://doi.org/10.1007/s11042-013-1703-6
+9a84588fe7e758cfbe7062686a648fab787fc32f,https://doi.org/10.1007/s11042-014-2333-3
+9aade3d26996ce7ef6d657130464504b8d812534,https://doi.org/10.1109/TNNLS.2016.2618340
+9aba281955117eb4a7aed36775f55f27e4dde42f,http://doi.ieeecomputersociety.org/10.1109/AFGR.2000.840635
+36bb5cca0f6a75be8e66f58cba214b90982ee52f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.73
+36219a3196aac2bd149bc786f083957a6e6da125,https://doi.org/10.1016/j.jvcir.2015.12.003
+3690af0af51a067750f664c08e48b486d1cd476d,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2012.41
+36486944b4feeb88c0499fecd253c5a53034a23f,https://doi.org/10.1109/CISP-BMEI.2017.8301986
+36b23007420b98f368d092bab196a8f3cbcf6f93,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.106
+36b13627ee8a5a8cd04645213aabfa917bbd32f5,https://doi.org/10.1109/TCSVT.2016.2602812
+363f540dc82ba8620262a04a67cfd6d3c85b0582,http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031445
+36bb93c4f381adca267191811abb8cc7812363f9,https://doi.org/10.1109/CISP-BMEI.2017.8301987
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,https://doi.org/10.1109/TCSVT.2015.2452782
+5c3eb40b06543f00b2345f3291619a870672c450,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.539
+5c19c4c6a663fe185a739a5f50cef6a12a4635a1,https://doi.org/10.1016/j.imavis.2012.08.016
+5c526ee00ec0e80ba9678fee5134dae3f497ff08,https://doi.org/10.1109/TCE.2010.5606299
+5c4f9260762a450892856b189df240f25b5ed333,https://doi.org/10.1109/TIP.2017.2651396
+09f9409430bba2afb84aa8214dbbb43bfd4cf056,https://doi.org/10.1109/TNN.2006.883012
+0974677f59e78649a40f0a1d85735410d21b906a,https://doi.org/10.1109/ISCAS.2017.8050798
+0931bef0a9c8c153184a1f9c286cf4883cbe99b6,https://doi.org/10.1007/s12193-015-0203-6
+09138ad5ad1aeef381f825481d1b4f6b345c438c,https://doi.org/10.1109/IIH-MSP.2012.41
+096ffc1ea5493242ba0c113178dab0c096412f81,http://doi.acm.org/10.1145/3123266.3123441
+092dd7cb6c9b415eb83afb104fa63d7d4290ac33,https://doi.org/10.1109/SPLIM.2016.7528409
+5dbb2d556f2e63a783a695a517f5deb11aafd7ea,https://doi.org/10.1109/ICB.2015.7139079
+5dd57b7e0e82a33420c054da7ea3f435d49e910e,https://doi.org/10.1007/s10851-014-0493-4
+5df17c81c266cf2ebb0778e48e825905e161a8d9,https://doi.org/10.1109/TMM.2016.2520091
+5da98f7590c08e83889f3cec7b0304b3610abf42,https://doi.org/10.1016/j.eswa.2017.07.018
+5d9f468a2841ea2f27bbe3ef2c6fe531d444be68,https://doi.org/10.1109/GlobalSIP.2017.8309167
+5ddfd3d372f7679518db8fd763d5f8bc5899ed67,https://doi.org/10.1109/ICPR.2014.797
+31ba7f5e09a2f0fe9cf7ea95314723206dcb6059,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.300
+3150e329e01be31ba08b6d76fc46b0da88a5ddeb,http://doi.acm.org/10.1145/2927006.2927012
+310fe4e6cb6d090f7817de4c1034e35567b56e34,https://doi.org/10.1109/ICPR.2014.313
+31697737707d7f661cbc6785b76cf9a79fee3ccd,http://doi.ieeecomputersociety.org/10.1109/FG.2017.100
+31a36014354ee7c89aa6d94e656db77922b180a5,http://doi.acm.org/10.1145/2304496.2304509
+31ffc95167a2010ce7aab23db7d5fc7ec439f5fb,https://doi.org/10.1109/TNNLS.2017.2651169
+31ba9d0bfaa2a44bae039e5625eb580afd962892,https://doi.org/10.1016/j.cviu.2016.03.014
+314c4c95694ff12b3419733db387476346969932,http://dl.acm.org/citation.cfm?id=3007672
+31f905d40a4ac3c16c91d5be8427762fa91277f1,https://doi.org/10.1109/TIP.2017.2704661
+91167aceafbc9c1560381b33c8adbc32a417231b,https://doi.org/10.1109/TCSVT.2009.2020337
+915ff2bedfa0b73eded2e2e08b17f861c0e82a58,https://doi.org/10.1109/UEMCON.2017.8249000
+919bdc161485615d5ee571b1585c1eb0539822c8,http://ieeexplore.ieee.org/document/6460332/
+9101363521de0ec1cf50349da701996e4d1148c8,http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.28
+919cb6160db66a8fe0b84cb7f171aded48a13632,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2327978
+9166f46aa3e58befaefd3537e5a11b31ebeea4d0,https://doi.org/10.1109/ICIP.2015.7351505
+91d0e8610348ef4d5d4975e6de99bb2d429af778,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.61
+913961d716a4102d3428224f999295f12438399f,https://doi.org/10.1016/j.patcog.2014.01.016
+913062218c7498b2617bb9d7821fe1201659c5cc,https://doi.org/10.1109/ICMLA.2012.178
+918fc4c77a436b8a588f63b2b37420b7868fbbf8,https://doi.org/10.1016/j.inffus.2015.03.005
+655e94eccddbe1b1662432c1237e61cf13a7d57b,http://doi.ieeecomputersociety.org/10.1109/ISIP.2008.147
+6554ca3187b3cbe5d1221592eb546dfc11aac14b,http://doi.acm.org/10.1145/2501643.2501647
+65475ce4430fb524675ebab6bcb570dfa07e0041,https://doi.org/10.1109/ISR.2013.6695696
+65869cc5ef00d581c637ae8ea6ca02ae4bb2b996,http://doi.ieeecomputersociety.org/10.1109/ICDM.2007.65
+659dc6aa517645a118b79f0f0273e46ab7b53cd9,https://doi.org/10.1109/ACPR.2015.7486608
+65fc8393610fceec665726fe4e48f00dc90f55fb,https://doi.org/10.1109/CYBConf.2013.6617455
+6256b47342f080c62acd106095cf164df2be6020,https://doi.org/10.1007/978-3-319-24702-1_6
+62648f91e38b0e8f69dded13b9858bd3a86bb6ed,http://doi.acm.org/10.1145/2647868.2655016
+628f9c1454b85ff528a60cd8e43ec7874cf17931,http://doi.acm.org/10.1145/2993148.2993193
+62e834114b58a58a2ea2d7b6dd7b0ce657a64317,https://doi.org/10.1109/SMC.2014.6973987
+62e61f9f7445e8dec336415ac0c7e677f9f5f7c1,https://doi.org/10.1142/S0219467814500065
+6267dbeb54889be5bdb50c338a7c6ef82287084c,https://doi.org/10.1109/ICMLC.2010.5580567
+963a004e208ce4bd26fa79a570af61d31651b3c3,https://doi.org/10.1016/j.jvlc.2009.01.011
+9635493998ad60764d7bbf883351af57a668d159,https://doi.org/10.1109/IJCNN.2017.7966005
+96a8f115df9e2c938453282feb7d7b9fde6f4f95,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2593719
+965c4a8087ae208c08e58aaf630ad412ac8ce6e2,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.100
+96b1f2bde46fe4f6cc637398a6a71e8454291a6e,https://doi.org/10.1109/TIP.2010.2073476
+96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40,https://doi.org/10.1109/ICIP.2016.7532959
+9652f154f4ae7807bdaff32d3222cc0c485a6762,https://doi.org/10.1007/s00138-016-0760-z
+96d34c1a749e74af0050004162d9dc5132098a79,https://doi.org/10.1109/TNN.2005.844909
+96e0b67f34208b85bd90aecffdb92bc5134befc8,https://doi.org/10.1016/j.patcog.2007.10.002
+3a9fbd05aaab081189a8eea6f23ed730fa6db03c,https://doi.org/10.1109/ICASSP.2013.6638305
+3aebaaf888cba25be25097173d0b3af73d9ce7f9,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.49
+3a1c40eced07d59a3ea7acda94fa833c493909c1,http://doi.ieeecomputersociety.org/10.1109/FG.2017.111
+3ad56aed164190e1124abea4a3c4e1e868b07dee,https://doi.org/10.1016/j.patcog.2015.12.016
+3a0425c25beea6c4c546771adaf5d2ced4954e0d,https://link.springer.com/book/10.1007/978-3-319-58347-1
+54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d,https://doi.org/10.1007/978-3-319-50832-0_36
+548233d67f859491e50c5c343d7d77a7531d4221,https://doi.org/10.1007/s11042-007-0176-x
+5491478ae2c58af21389ed3af21babd362511a8e,http://doi.acm.org/10.1145/2949035.2949048
+54e988bc0764073a5db2955705d4bfa8365b7fa9,http://doi.acm.org/10.1145/2522848.2531749
+98856ab9dc0eab6dccde514ab50c823684f0855c,https://doi.org/10.1109/TIFS.2012.2191962
+982ede05154c1afdcf6fc623ba45186a34f4b9f2,https://doi.org/10.1109/TMM.2017.2659221
+982d4f1dee188f662a4b5616a045d69fc5c21b54,https://doi.org/10.1109/IJCNN.2016.7727859
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,https://doi.org/10.1007/978-3-319-68121-4
+988849863c3a45bcedacf8bd5beae3cc9210ce28,http://doi.ieeecomputersociety.org/10.1109/TPDS.2016.2539164
+98c5dc00bd21a39df1d4411641329bdd6928de8a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995447
+5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,https://doi.org/10.1109/VCIP.2017.8305113
+539cb169fb65a5542c84f42efcd5d2d925e87ebb,https://doi.org/10.1109/ICB.2015.7139098
+5375a3344017d9502ebb4170325435de3da1fa16,https://doi.org/10.1007/978-3-642-37444-9
+5304cd17f9d6391bf31276e4419100f17d4423b2,https://doi.org/10.1109/ICIP.2012.6466930
+53873fe7bbd5a2d171e2b1babc9cacaad6cabe45,https://doi.org/10.1109/TCYB.2015.2417211
+534159e498e9cc61ea10917347637a59af38142d,https://doi.org/10.1016/j.neucom.2016.01.126
+53509017a25ac074b5010bb1cdba293cdf399e9b,http://doi.ieeecomputersociety.org/10.1109/AVSS.2012.41
+539f55c0e2501c1d86791c8b54b225d9b3187b9c,https://doi.org/10.1109/TIP.2017.2738560
+539ffd51f18404e1ef83371488cf5a27cd16d064,https://doi.org/10.1049/iet-ipr.2014.0733
+5305bfdff39ae74d2958ba28d42c16495ce2ff86,https://doi.org/10.1109/DICTA.2014.7008128
+3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1,https://doi.org/10.1109/IJCNN.2017.7966210
+3fa628e7cff0b1dad3f15de98f99b0fdb09df834,http://doi.ieeecomputersociety.org/10.1109/ICME.2013.6607603
+3ffbc912de7bad720c995385e1fdc439b1046148,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2008.347
+3fe3d6ff7e5320f4395571131708ecaef6ef4550,https://doi.org/10.1109/SITIS.2016.60
+3f88ea8cf2eade325b0f32832561483185db5c10,https://doi.org/10.1109/TIP.2017.2721838
+3f4711c315d156a972af37fe23642dc970a60acf,https://doi.org/10.1109/IJCNN.2008.4634393
+3ff418ac82df0b5c2f09f3571557e8a4b500a62c,https://doi.org/10.1007/s11554-007-0039-8
+3fc173805ed43602eebb7f64eea4d60c0386c612,http://doi.ieeecomputersociety.org/10.1109/CyberC.2015.94
+30cc1ddd7a9b4878cca7783a59086bdc49dc4044,https://doi.org/10.1007/s11042-015-2599-0
+30a4b4ef252cb509b58834e7c40862124c737b61,https://doi.org/10.1142/S0218001416560061
+3060ac37dec4633ef69e7bc63488548ab3511f61,https://doi.org/10.1007/s00521-018-3358-8
+30044dd951133187cb8b57e53a22cf9306fa7612,https://doi.org/10.1109/WACV.2017.52
+30188b836f2fa82209d7afbf0e4d0ee29c6b9a87,https://doi.org/10.1109/TIP.2013.2249077
+3080026f2f0846d520bd5bacb0cb2acea0ffe16b,https://doi.org/10.1109/BTAS.2017.8272690
+30cace74a7d51e9a928287e25bcefb968c49f331,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344634
+5ee0103048e1ce46e34a04c45ff2c2c31529b466,https://doi.org/10.1109/ICIP.2015.7350886
+5e8de234b20f98f467581f6666f1ed90fd2a81be,http://doi.acm.org/10.1145/2647868.2655042
+5e87f5076952cd442718d6b4addce905bae1a1a4,https://doi.org/10.1109/ICMLC.2016.7872938
+5e19d7307ea67799eb830d5ce971f893e2b8a9ca,https://doi.org/10.1007/s11063-012-9214-4
+5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4,https://doi.org/10.1109/ICIP.2016.7532567
+5ed5e534c8defd683909200c1dc31692942b7b5f,http://doi.acm.org/10.1145/2983926
+5e62b2ab6fd3886e673fd5cbee160a5bee414507,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.31
+5e806d8fa48216041fe719309534e3fa903f7b5b,https://doi.org/10.1109/BTAS.2010.5634501
+5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2,http://doi.ieeecomputersociety.org/10.1109/TVCG.2016.2641442
+5bed2453a5b0c54a4a4a294f29c9658658a9881e,https://doi.org/10.1109/TIP.2015.2451173
+5b64584d6b01e66dfd0b6025b2552db1447ccdeb,https://doi.org/10.1109/BTAS.2017.8272697
+5bfad0355cdb62b22970777d140ea388a7057d4c,https://doi.org/10.1016/j.patcog.2011.05.006
+5b4bbba68053d67d12bd3789286e8a9be88f7b9d,https://doi.org/10.1109/ICSMC.2008.4811353
+37c5e3b6175db9eaadee425dc51bc7ce05b69a4e,https://doi.org/10.1007/s00521-013-1387-x
+3769e65690e424808361e3eebfdec8ab91908aa9,http://doi.acm.org/10.1145/2647868.2655035
+37f25732397864b739714aac001ea1574d813b0d,https://doi.org/10.1016/j.ijar.2017.09.002
+373c4d6af0ee233f0d669c3955c3a3ef2a009638,https://doi.org/10.1109/APSIPA.2015.7415420
+0874734e2af06883599ed449532a015738a1e779,https://doi.org/10.1007/s10115-013-0702-2
+0821028073981f9bd2dba2ad2557b25403fe7d7d,http://doi.acm.org/10.1145/2733373.2806318
+08872d801f134e41753601e85971769b28314ca2,http://doi.acm.org/10.1145/2683483.2683560
+080ab68a898a3703feead145e2c38361ae84a0a8,https://doi.org/10.1109/TIFS.2014.2343833
+6d5f876a73799cc628e4ad2d9cfcd88091272342,https://doi.org/10.1109/TSMCC.2005.848193
+6da3ff4250103369f4a6a39c8fb982438a97525c,https://doi.org/10.1109/THMS.2015.2404913
+6dd8d8be00376ac760dc92f9c5f20520872c5355,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2417578
+6d67a7fd9a4fa99624721f37b077c71dad675805,https://doi.org/10.1007/s12193-015-0202-7
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,https://doi.org/10.1109/TNNLS.2015.2499273
+01729cb766b1016bac217a6a6cf24bbde19f56c8,https://doi.org/10.1109/CBMI.2010.5529888
+013d0acff1e5410fd9f6e15520d16f4ea02f03f6,https://doi.org/10.1109/TMM.2015.2477681
+01e14d8ffd6767336d50c2b817a7b7744903e567,http://doi.ieeecomputersociety.org/10.1109/FG.2017.128
+0133d1fe8a3138871075cd742c761a3de93a42ec,https://doi.org/10.1109/ICDSP.2015.7251932
+016194dbcd538ab5a129ef1bcff3c6e073db63f9,https://doi.org/10.1007/s10462-012-9334-2
+01f0a4e1442a7804e1fe95798eff777d08e42014,https://doi.org/10.1016/j.knosys.2017.09.005
+01e27c91c7cef926389f913d12410725e7dd35ab,https://doi.org/10.1007/s11760-017-1140-5
+067fe74aec42cb82b92cf6742c7cfb4a65f16951,http://doi.acm.org/10.1145/2601434
+06a799ad89a2a45aee685b9e892805e3e0251770,https://doi.org/10.1007/978-3-319-42147-6
+060f67c8a0de8fee9c1732b63ab40627993f93d0,https://doi.org/10.1007/978-3-642-33564-8
+06c956d4aac65752672ce4bd5a379f10a7fd6148,https://doi.org/10.1109/LSP.2017.2749763
+0629bc2b12245195af989e21573369329b7ef2b7,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2553038
+6c6f0e806e4e286f3b18b934f42c72b67030ce17,https://doi.org/10.1109/FG.2011.5771345
+6c28b3550f57262889fe101e5d027912eb39564e,https://doi.org/10.1109/LSP.2014.2338911
+6c0ad77af4c0850bd01bb118e175ecc313476f27,http://doi.acm.org/10.1145/3009977.3010026
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,http://doi.acm.org/10.1145/2708463.2709059
+39c8ed5213882d4dbc74332245ffe201882c5de1,https://doi.org/10.1109/ICASSP.2013.6638045
+396b2963f0403109d92a4d4f26205f279ea79d2c,https://doi.org/10.1109/TSMCB.2005.845399
+397022a4460750c762dbb0aaebcacc829dee8002,https://doi.org/10.1109/TIFS.2013.2258152
+39acf4bb06b889686ca17fd8c89887a3cec26554,http://www.springerlink.com/index/10.1007/s10044-004-0223-4
+39c10888a470b92b917788c57a6fd154c97b421c,https://doi.org/10.1109/VCIP.2017.8305036
+39d0de660e2116f32088ce07c3376759d0fdaff5,https://doi.org/10.1109/ICPR.2016.7900043
+39d6339a39151b5f88ec2d7acc38fe0618d71b5f,https://doi.org/10.1109/MMSP.2013.6659285
+3980dadd27933d99b2f576c3b36fe0d22ffc4746,https://doi.org/10.1109/ROBIO.2017.8324597
+3960882a7a1cd19dfb711e35a5fc1843ed9002e7,http://doi.acm.org/10.1145/2487575.2487701
+398558817e05e8de184cc4c247d4ea51ab9d4d58,https://doi.org/10.1109/ICPR.2014.14
+993934822a42e70dd35fb366693d847164ca15ff,https://doi.org/10.1109/ICME.2009.5202753
+99a1180c3d39532efecfc5fa251d6893375c91a1,https://doi.org/10.1109/ICARCV.2012.6485394
+99e0c03686f7bc9d7add6cff39a941a047c3600a,https://doi.org/10.1109/ACCESS.2017.2712788
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,https://doi.org/10.1109/ACCESS.2017.2752176
+998542e5e3882bb0ce563d390b1e1bff5460e80c,https://doi.org/10.1109/AFGR.2008.4813471
+992e4119d885f866cb715f4fbf0250449ce0db05,https://doi.org/10.1007/s00138-015-0674-1
+9989eda2f5392cfe1f789bb0f6213a46d92d1302,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477584
+997c7ebf467c579b55859315c5a7f15c1df43432,http://doi.ieeecomputersociety.org/10.1109/FG.2017.141
+993374c1c9d58a3dec28160188ff6ac1227d02f5,https://doi.org/10.1109/ICARCV.2016.7838650
+99cd84a62edb2bda2fc2fdc362a72413941f6aa4,http://doi.ieeecomputersociety.org/10.1109/FG.2017.109
+5278b7a6f1178bf5f90cd3388908925edff5ad46,https://doi.org/10.1007/s11704-015-4291-y
+520782f07474616879f94aae0d9d1fff48910254,https://doi.org/10.1016/j.neucom.2014.11.038
+5217ab9b723158b3ba2235e807d165e72fd33007,http://doi.acm.org/10.1145/2043674.2043710
+524c25217a6f1ed17f47871e947a5581d775fa56,https://doi.org/10.1117/12.2030875
+52e270ca8f5b53eabfe00a21850a17b5cc10f6d5,https://doi.org/10.1109/ROBIO.2013.6739643
+5226296884b3e151ce317a37f94827dbda0b9d16,https://doi.org/10.1109/IWBF.2016.7449690
+5213549200bccec57232fc3ff788ddf1043af7b3,http://doi.acm.org/10.1145/2601097.2601204
+526c79c6ce39882310b814b7918449d48662e2a9,https://doi.org/10.1109/ICASSP.2005.1416338
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,http://doi.acm.org/10.1145/2797143.2797165
+52b102620fff029b80b3193bec147fe6afd6f42e,http://dl.acm.org/citation.cfm?id=3028863
+5551a03353f571b552125dd4ee57301b69a10c46,https://doi.org/10.1016/j.neucom.2015.09.083
+55c46ae1154ed310610bdf5f6d9e7023d14c7eb4,http://doi.acm.org/10.1145/1027933.1028013
+55ee484f9cbd62111512485e3c1c3eadbf2e15c0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.25
+559645d2447004355c83737a19c9a811b45780f1,https://doi.org/10.1109/ICB.2015.7139114
+550351edcfd59d3666984771f5248d95548f465a,https://doi.org/10.1109/TIP.2014.2327805
+5594beb2b314f5433bd7581f64bdbc58f2933dc4,https://doi.org/10.1016/j.neucom.2016.12.013
+55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,https://doi.org/10.1109/ICB.2016.7550064
+553a605243b77a76c1ed4c1ad4f9a43ff45e391b,https://doi.org/10.1109/CISP-BMEI.2017.8302001
+557115454c1b8e6eaf8dbb65122c5b00dc713d51,https://doi.org/10.1109/LSP.2011.2140370
+55266ddbe9d5366e8cd1b0b645971cad6d12157a,https://doi.org/10.1109/SIU.2017.7960368
+556875fb04ed6043620d7ca04dfe3d8b3a9284f5,https://doi.org/10.1109/ICPR.2014.437
+9745a7f38c9bba9d2fd076813fc9ab7a128a3e19,http://doi.acm.org/10.1145/2393347.2396335
+97f3d35d3567cd3d973c4c435cdd6832461b7c3c,http://doi.ieeecomputersociety.org/10.1109/FG.2017.75
+97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0085
+9776a9f3c59907f45baaeda4b8907dcdac98aef1,https://doi.org/10.1109/CISP-BMEI.2017.8301924
+97c59db934ff85c60c460a4591106682b5ab9caa,https://doi.org/10.1109/BTAS.2012.6374568
+978b32ff990d636f7e2050bb05b8df7dfcbb42a1,https://doi.org/10.1109/BTAS.2014.6996270
+9729930ab0f9cbcd07f1105bc69c540330cda50a,https://doi.org/10.1109/ACCESS.2017.2749331
+9790ec6042fb2665c7d9369bf28566b0ce75a936,http://doi.acm.org/10.1145/3056540.3056546
+973022a1f9e30a624f5e8f7158b5bbb114f4af32,http://doi.acm.org/10.1145/3011077.3011138
+9774430006f1ed017156b17f3cf669071e398c58,https://doi.org/10.1109/SMC.2013.513
+9753ee59db115e1e84a7c045f2234a3f63f255b1,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344683
+9771e04f48d8a1d7ae262539de8924117a04c20d,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.70
+63c74794aedb40dd6b1650352a2da7a968180302,https://doi.org/10.1016/j.neucom.2016.09.015
+637b31157386efbde61505365c0720545248fbae,https://doi.org/10.1109/BTAS.2017.8272721
+6345c0062885b82ccb760c738a9ab7fdce8cd577,https://doi.org/10.1109/EMBC.2016.7590729
+635d2696aa597a278dd6563f079be06aa76a33c0,https://doi.org/10.1109/ICIP.2016.7532429
+636c786d4e4ac530ac85e3883a2f2cf469e45fe2,https://doi.org/10.1016/j.neucom.2016.12.043
+6343bc0013343b6a5f96154f02d18dcd36a3f74c,https://doi.org/10.1007/s11042-014-2083-2
+0fc5c6f06e40014a56f492172f44c073d269e95c,https://doi.org/10.1108/17563781311301490
+0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.142
+0fee3b9191dc1cef21f54232a23530cd8169d3b2,https://doi.org/10.1109/ICDM.2016.0050
+0f2461a265be997c962fa562ae48378fb964b7b4,https://doi.org/10.1109/BigData.2016.7841028
+0f22b89341d162a7a0ebaa3c622d9731e5551064,http://doi.ieeecomputersociety.org/10.1109/AIPR.2011.6176352
+0fdc3cbf92027cb1200f3f94927bef017d7325ae,https://doi.org/10.1109/BTAS.2015.7358771
+0f29bc5d8458358d74dc8c4fd6968b4182dd71d2,https://doi.org/10.1109/ICIP.2016.7532637
+0f1cb558b32c516e2b6919fea0f97a307aaa9091,https://doi.org/10.1007/s41095-017-0091-7
+0fcf04fda0bea5265b73c85d2cc2f7f70416537b,https://doi.org/10.1109/TCSVT.2015.2409012
+0f64e26d6dd6f1c99fe2050887fac26cafe9ed60,https://doi.org/10.1109/MCI.2016.2627668
+0a4a8768c1ed419baebe1c420bd9051760875cbe,https://doi.org/10.1109/EUSIPCO.2016.7760451
+0a5b2e642683ff20b6f0cee16a32a68ba0099908,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2012.6239342
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183
+0a0b9a9ff827065e4ff11022b0e417ddf1d3734e,http://dl.acm.org/citation.cfm?id=2935856
+0a451fc7d2c6b3509d213c210ae880645edf90ed,https://doi.org/10.1109/IJCNN.2014.6889591
+0abfb5b89e9546f8a5c569ab35b39b888e7cea46,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.68
+0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e,https://doi.org/10.1109/SCIS-ISIS.2016.0166
+642417f2bb1ff98989e0a0aa855253fed1fffe04,https://doi.org/10.1117/12.2004255
+6440d6c7081efe4538a1c75e93144f3d142feb41,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.280
+6486b36c6f7fd7675257d26e896223a02a1881d9,https://doi.org/10.1109/THMS.2014.2376874
+647b2e162e9c476728172f62463a8547d245cde3,https://doi.org/10.1109/ICPR.2016.7899898
+64e216c128164f56bc91a33c18ab461647384869,http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738017
+6489ad111fee8224b34f99d1bcfb5122786508cd,https://doi.org/10.1109/ICIP.2014.7025280
+64a08beb073f62d2ce44e25c4f887de9208625a4,https://doi.org/10.1080/09540090701725557
+64e82b42e1c41250bdf9eb952686631287cfd410,https://doi.org/10.1111/cgf.12760
+64b9ad39d115f3e375bde4f70fb8fdef5d681df8,https://doi.org/10.1109/ICB.2016.7550088
+64fd48fae4d859583c4a031b51ce76ecb5de614c,https://doi.org/10.1109/ICARCV.2008.4795556
+64ba203c8cfc631d5f3f20419880523155fbeeb2,http://doi.acm.org/10.1145/3009977.3010008
+90ddf1aabf1c73b5fc45254a2de46e53a0bde857,https://doi.org/10.1109/ROBIO.2015.7418917
+907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224,https://doi.org/10.1016/j.patcog.2014.07.010
+9048732c8591a92a1f4f589b520a733f07578f80,https://doi.org/10.1109/CISP-BMEI.2017.8301921
+9055b155cbabdce3b98e16e5ac9c0edf00f9552f,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78
+902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb,https://doi.org/10.1016/j.patcog.2016.03.002
+90eb66e75381cce7146b3953a2ae479a7beec539,http://doi.ieeecomputersociety.org/10.1109/AIPR.2015.7444542
+90ae02da16b750a9fd43f8a38440f848309c2fe0,https://doi.org/10.1007/s10044-015-0499-6
+9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f,http://doi.ieeecomputersociety.org/10.1109/CGIV.2011.28
+90c4a6c6f790dbcef9a29c9a755458be09e319b6,http://doi.acm.org/10.1145/2964284.2967242
+9026eb610916ec4ce77f0d7d543b7c2482ba4173,https://doi.org/10.1016/j.patrec.2012.03.006
+90c4deaa538da42b9b044d7b68c3692cced66036,http://doi.ieeecomputersociety.org/10.1109/SITIS.2007.89
+bf30477f4bd70a585588528355b7418d2f37953e,https://doi.org/10.1109/ICPR.2016.7900280
+bf1e0545785b05b47caa3ffe7d16982769986f38,https://doi.org/10.1016/j.asoc.2010.12.002
+bf0836e5c10add0b13005990ba019a9c4b744b06,https://doi.org/10.1109/TCE.2009.5373791
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,http://doi.acm.org/10.1145/2783258.2783280
+bf00071a7c4c559022272ca5d39e07f727ebb479,https://doi.org/10.1109/MMSP.2016.7813388
+bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317,https://doi.org/10.1007/978-3-319-69900-4_70
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,https://doi.org/10.1007/s11042-015-2665-7
+d37ca68742b2999667faf464f78d2fbf81e0cb07,https://doi.org/10.1007/978-3-319-25417-3_76
+d3a3d15a32644beffaac4322b9f165ed51cfd99b,https://doi.org/10.1109/SIU.2016.7496197
+d42dbc995318e2936714c65c028700bfd3633049,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592
+d4331a8dd47b03433f8390da2eaa618751861c64,https://doi.org/10.1109/TIP.2012.2192125
+d4353952a408e1eae8c27a45cc358976d38dde00,https://doi.org/10.1007/s00138-014-0594-5
+d4ccc4f18a824af08649657660e60b67c6868d9c,https://doi.org/10.1142/S021800141655020X
+d40c16285d762f7a1c862b8ac05a0fdb24af1202,https://doi.org/10.1109/BESC.2017.8256378
+d4ec62efcc631fa720dfaa1cbc5692b39e649008,https://doi.org/10.1109/ICDM.2016.0026
+d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4,https://doi.org/10.1109/TSMCB.2009.2032155
+d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3,https://doi.org/10.23919/MVA.2017.7986886
+bab2f4949a38a712a78aafbc0a3c392227c65f56,https://doi.org/10.1109/CISP-BMEI.2017.8302191
+ba30cc9d8bac724dafc0aea247159cc7e7105784,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019360
+ba931c3f90dd40a5db4301a8f0c71779a23043d6,https://doi.org/10.1109/ICPR.2014.136
+a07f78124f83eef1ed3a6f54ba982664ae7ca82a,http://ieeexplore.ieee.org/document/6460481/
+a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b,https://doi.org/10.1109/ICIP.2017.8296805
+a094e52771baabe4ab37ef7853f9a4f534227457,https://doi.org/10.1109/TITS.2016.2551298
+a0f6196d27a39cde2dbf62c08d89cbe489600bb0,https://doi.org/10.1016/j.cose.2016.03.007
+a006cd95c14de399706c5709b86ac17fce93fcba,https://doi.org/10.1109/ICPR.2014.343
+a7c066e636b8953481b4a8d8ff25a43a96dd348f,https://doi.org/10.1109/ATSIP.2017.8075517
+a76e57c1b2e385b68ffdf7609802d71244804c1d,https://doi.org/10.1016/j.patrec.2016.05.027
+a7da7e5a6a4b53bf8736c470ff8381a654e8c965,https://doi.org/10.1007/s13042-011-0045-9
+a7a3ec1128f920066c25cb86fbc33445ce613919,https://doi.org/10.1109/VCIP.2017.8305115
+a71bd4b94f67a71bc5c3563884bb9d12134ee46a,https://doi.org/10.1016/j.asoc.2015.05.006
+a735c6330430c0ff0752d117c54281b1396b16bf,https://doi.org/10.1109/SMC.2014.6974118
+a73405038fdc0d8bf986539ef755a80ebd341e97,https://doi.org/10.1109/TIP.2017.2698918
+a713a01971e73d0c3118d0409dc7699a24f521d6,https://doi.org/10.1109/SSCI.2017.8285381
+a7f188a7161b6605d58e48b2537c18a69bd2446f,https://doi.org/10.1109/PIMRC.2011.6139898
+a76969df111f9ee9f0b898b51ad23a721d289bdc,https://doi.org/10.1109/ICMLA.2015.185
+a75de488eaacb1dafffbe667465390f101498aaf,http://doi.ieeecomputersociety.org/10.1109/FG.2017.47
+b839bc95794dc65340b6e5fea098fa6e6ea5e430,https://doi.org/10.1109/WACVW.2017.8
+b8e5800dfc590f82a0f7eedefce9abebf8088d12,https://doi.org/10.1109/DCC.2017.87
+b86c49c6e3117ea116ec2d8174fa957f83502e89,https://doi.org/10.1109/CIT/IUCC/DASC/PICOM.2015.149
+b85d0aef3ee2883daca2835a469f5756917e76b7,https://doi.org/10.1007/s41095-015-0015-3
+b856d8d6bff745bb1b4beb67e4b821fc20073840,https://doi.org/10.1109/ICMLC.2016.7872935
+b84dde74dddf6a3281a0b22c68999942d2722919,http://dl.acm.org/citation.cfm?id=2910703
+b8a16fcb65a8cee8dd32310a03fe36b5dff9266a,https://doi.org/10.1109/SIU.2014.6830473
+b8b9cef0938975c5b640b7ada4e3dea6c06d64e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.119
+b85d953de16eecaecccaa8fad4081bd6abda9b1b,https://doi.org/10.1016/j.neuroimage.2015.12.020
+b84f164dbccb16da75a61323adaca730f528edde,https://doi.org/10.1109/TIP.2013.2237914
+b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,http://doi.acm.org/10.1145/2911996.2911999
+b8978a5251b6e341a1171e4fd9177aec1432dd3a,https://doi.org/10.1016/j.image.2016.04.004
+b8f64a94f536b46ef34a0223272e02f9be785ef9,https://doi.org/10.1109/EMBC.2012.6346590
+b1891010a0722117c57e98809e1f2b26cd8e9ee3,http://doi.acm.org/10.1145/2330784.2331026
+b1efefcc9a5d30be90776571a6cc0071f3679753,https://doi.org/10.1109/ROBIO.2016.7866471
+b1bb517bd87a1212174033fc786b2237844b04e6,https://doi.org/10.1016/j.neucom.2015.03.078
+b1534888673e6119f324082246016d28eba249aa,https://doi.org/10.1109/MMSP.2017.8122229
+b13b101b6197048710e82f044ad2eda6b93affd8,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.91
+ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,https://doi.org/10.1109/ICDSP.2016.7868598
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344664
+dd8a851f2a0c63bb97e33aaff1841695f601c863,https://doi.org/10.1109/BTAS.2014.6996260
+ddd9d7cb809589b701fba9f326d7cf998a63b14f,http://doi.acm.org/10.1145/2647868.2654992
+ddf577e8b7c86b1122c1bc90cba79f641d2b33fa,http://doi.acm.org/10.1145/3013971.3014026
+dd715a98dab34437ad05758b20cc640c2cdc5715,https://doi.org/10.1007/s41095-017-0082-8
+dcb50e1f439d1f9b14ae85866f4542e51b830a07,https://doi.org/10.1109/FSKD.2012.6234354
+dcea30602c4e0b7525a1bf4088620128d4cbb800,https://doi.org/10.1109/VCIP.2013.6706430
+dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6,https://doi.org/10.1109/TCYB.2014.2311033
+dc2f16f967eac710cb9b7553093e9c977e5b761d,https://doi.org/10.1109/ICPR.2016.7900141
+dc84d3f29c52e6d296b5d457962c02074aa75d0f,https://doi.org/10.1109/TIP.2016.2580939
+dca2bb023b076de1ccd0c6b8d71faeb3fccb3978,http://doi.acm.org/10.1145/3152118
+b69e7e2a7705a58a0e3f1b80ae542907b89ce02e,https://doi.org/10.1007/s11042-015-2614-5
+b6259115b819424de53bb92f64cc459dcb649f31,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466
+b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef,https://doi.org/10.1109/ICACCI.2015.7275752
+b6ac33d2c470077fa8dcbfe9b113beccfbd739f8,http://doi.acm.org/10.1145/2509896.2509905
+b65b51c796ed667c4c7914bf12b1926fd6bbaa0c,https://doi.org/10.1016/j.neuroimage.2013.05.108
+b6a23f72007cb40223d7e1e1cc47e466716de945,https://doi.org/10.1109/CVPRW.2010.5544598
+b6c00e51590c48a48fae51385b3534c4d282f76c,https://doi.org/10.1109/TIFS.2015.2427778
+b631f3c212aab45d73ddc119f1f7d00c3c502a72,https://doi.org/10.1109/TIFS.2009.2035976
+b63b6ed78b39166d87d4c56f8890873aa65976a2,https://doi.org/10.1109/ICRA.2011.5979953
+a92e24c8c53e31fc444a13bd75b434b7207c58f1,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2317711
+a9756ca629f73dc8f84ee97cfa8b34b8207392dc,https://doi.org/10.1109/ICIP.2017.8296542
+a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134,https://doi.org/10.1142/S0218001405004071
+a9af0dc1e7a724464d4b9d174c9cf2441e34d487,https://doi.org/10.1142/S0219691316500351
+a9506c60ec48056087ee3e10d28ff7774fbbd553,https://doi.org/10.1109/TCSVT.2014.2376136
+a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4,https://doi.org/10.1049/iet-ipr.2016.1074
+a9426cb98c8aedf79ea19839643a7cf1e435aeaa,https://doi.org/10.1109/GlobalSIP.2016.7905998
+d5f8827fc7d66643bf018d5636e81ed41026b61a,http://doi.ieeecomputersociety.org/10.1109/FG.2017.36
+d569c3e62f471aa75ed53e631ec05c1a3d594595,https://doi.org/10.1109/NNSP.2002.1030072
+d5b445c5716952be02172ca4d40c44f4f04067fa,https://doi.org/10.1109/ICICS.2011.6173537
+d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584,https://doi.org/10.1109/CVPRW.2010.5543816
+d5c66a48bc0a324750db3d295803f47f6060043d,http://doi.ieeecomputersociety.org/10.1109/AVSS.2006.109
+d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc,http://doi.ieeecomputersociety.org/10.1109/FG.2017.101
+d5dc78eae7a3cb5c953c89376e06531d39b34836,https://doi.org/10.1007/s00521-009-0242-6
+d2d9612d3d67582d0cd7c1833599b88d84288fab,https://doi.org/10.1049/iet-cvi.2015.0222
+d2a415365f997c8fe2dbdd4e06ceab2e654172f6,http://doi.acm.org/10.1145/2425333.2425361
+d2bad850d30973a61b1a7d7dc582241a41e5c326,http://doi.ieeecomputersociety.org/10.1109/ICICIC.2006.12
+d2baa43471d959075fc4c93485643cbd009797fd,http://doi.ieeecomputersociety.org/10.1109/MM.2017.4241350
+d2598c088b0664c084413796f39697c6f821d56e,https://doi.org/10.1109/VCIP.2016.7805451
+d2fac640086ba89271ad7c1ebf36239ecd64605e,http://ieeexplore.ieee.org/document/6460449/
+d2b3166b8a6a3e6e7bc116257e718e4fe94a0638,https://doi.org/10.1007/s00521-010-0411-7
+aa7c72f874951ff7ca3769439f2f39b7cfd4b202,https://doi.org/10.1109/JPROC.2009.2032355
+aaf2436bc63a58d18192b71cc8100768e2f8a6cb,http://doi.ieeecomputersociety.org/10.1109/ICDIP.2009.77
+aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.80
+aa892fe17c06e2b18db2b12314499a741e755df7,https://doi.org/10.1109/IJCNN.2017.7966089
+aab9a617be6e5507beb457b1e6c2e5b046f9cff0,https://doi.org/10.1109/ICIP.2008.4712153
+aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,http://doi.acm.org/10.1145/3129416.3129451
+aad7b12936e0ced60bc0be95e8670b60b5d5ce20,https://doi.org/10.1109/URAI.2013.6677383
+aa90a466a2ff7781c36e7da7df0013aa5b117510,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.159
+aa8341cb5d8f0b95f619d9949131ed5c896d6470,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2007.403
+aaec8141d57d29aa3cedf1baec9633180ddb7a3d,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552916
+aae31f092fadd09a843e1ca62af52dc15fc33c56,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273609
+affa61d044daa1a7d43a6803a743eab47c89c45d,https://doi.org/10.1109/TNNLS.2015.2405574
+afba76d0fe40e1be381182aec822431e20de8153,https://doi.org/10.1007/s00521-014-1768-9
+af12a79892bd030c19dfea392f7a7ccb0e7ebb72,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972
+afdbbc5c84eb4e535c7c478b5227c0138b57af64,http://doi.ieeecomputersociety.org/10.1109/TMC.2016.2593919
+af2d30fdb8c611dc5b883b90311d873e336fc534,https://doi.org/10.1109/ISCAS.2017.8050275
+af3e6e20de06b03c33f8e85eced74c2d096730ea,https://doi.org/10.1109/CISP-BMEI.2017.8301972
+af7553d833886663550ce83b087a592a04b36419,https://doi.org/10.1109/TIFS.2015.2390138
+af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2,https://doi.org/10.1007/s00521-014-1569-1
+af97e792827438ddea1d5900960571939fc0533e,https://doi.org/10.1109/ICSMC.2005.1571460
+af97a51f56cd6b793cf96692931a8d1ddbe4e3cc,https://doi.org/10.1109/ICPR.2014.57
+b749ca71c60904d7dad6fc8fa142bf81f6e56a62,https://doi.org/10.1109/TIP.2013.2292560
+b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0,https://doi.org/10.1109/SERA.2017.7965717
+b7845e0b0ce17cde7db37d5524ef2a61dee3e540,https://doi.org/10.1109/ICPR.2016.7899608
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,https://doi.org/10.1007/s10278-015-9808-2
+b7b8e7813fbc12849f2daba5cab604abd8cbaab6,https://doi.org/10.1109/ICCE.2014.6775938
+b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41,https://doi.org/10.1109/IJCNN.2016.7727203
+b784bb1d2b2720dac8d4b92851a8d6360c35b0b2,https://doi.org/10.1109/ICDM.2016.0041
+b728e7db6e5559a77dc59381bfb8df96d482a721,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.28
+b7fa06b76f4b9263567875b2988fb7bbc753e69f,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469282
+b7043048b4ba748c9c6317b6d8206192c34f57ff,https://doi.org/10.1109/ICIP.2016.7533061
+db3984b143c59584a32d762d712d21c0e8cf38b8,https://doi.org/10.1109/SMC.2015.324
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,https://doi.org/10.1007/s00521-010-0519-9
+dbf2d2ca28582031be6d16519ab887248f5e8ad8,https://doi.org/10.1109/TMM.2015.2410135
+dbfe62c02b544b48354fac741d90eb4edf815db5,https://doi.org/10.1109/SITIS.2016.43
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,http://doi.acm.org/10.1145/1386352.1386401
+a8faeef97e2a00eddfb17a44d4892c179a7cc277,https://doi.org/10.1109/FG.2011.5771459
+a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825,http://doi.ieeecomputersociety.org/10.1109/CSIE.2009.808
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,https://doi.org/10.1007/s11760-015-0808-y
+a8f1fc34089c4f2bc618a122be71c25813cae354,https://doi.org/10.1142/S0219467816500194
+de048065ea2c5b3e306e2c963533df055e7dfcaa,https://doi.org/10.1109/LSP.2016.2598878
+ded8252fc6df715753e75ba7b7fee518361266ef,https://doi.org/10.1109/SIU.2012.6204837
+de79437f74e8e3b266afc664decf4e6e4bdf34d7,https://doi.org/10.1109/IVCNZ.2016.7804415
+de8657e9eab0296ac062c60a6e10339ccf173ec1,http://doi.ieeecomputersociety.org/10.1109/BRACIS.2014.51
+dea409847d52bb0ad54bf586cb0482a29a584a7e,http://doi.ieeecomputersociety.org/10.1109/ISM.2009.115
+de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6,https://doi.org/10.1002/cpe.3850
+dec5b11b01f35f72adb41d2be26b9b95870c5c00,http://ieeexplore.ieee.org/document/7071948/
+deb89950939ae9847f0a1a4bb198e6dbfed62778,https://doi.org/10.1109/LSP.2016.2543019
+de878384f00b6ce1caa66ac01735fb4b63ad0279,https://doi.org/10.1049/iet-ipr.2014.0670
+defd44b02a1532f47bdd8c8f2375e3df64ac5d79,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.139
+b05943b05ef45e8ea8278e8f0870f23db5c83b23,https://doi.org/10.1109/ROBIO.2010.5723349
+b084ad222c1fc9409d355d8e54ac3d1e86f2ca18,https://doi.org/10.1016/j.neucom.2017.04.001
+b0358af78b7c5ee7adc883ef513bbcc84a18a02b,https://doi.org/10.1109/WACV.2017.10
+b0f59b71f86f18495b9f4de7c5dbbebed4ae1607,https://doi.org/10.1016/j.neucom.2015.04.085
+a63ec22e84106685c15c869aeb157aa48259e855,https://doi.org/10.1142/S0219691312500294
+a6e75b4ccc793a58ef0f6dbe990633f7658c7241,https://doi.org/10.1016/j.cviu.2016.10.007
+a62997208fec1b2fbca6557198eb7bc9340b2409,https://doi.org/10.1109/HPCC.and.EUC.2013.241
+a6ab23f67d85da26592055c0eac4c34f05c26519,http://doi.ieeecomputersociety.org/10.1109/ICTAI.2006.15
+a6793de9a01afe47ffbb516cc32f66625f313231,http://doi.acm.org/10.1145/2939672.2939853
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,https://doi.org/10.1109/TSP.2011.2179539
+b934f730a81c071dbfc08eb4c360d6fca2daa08f,http://doi.ieeecomputersociety.org/10.1109/ICME.2015.7177496
+b98e7a8f605c21e25ac5e32bfb1851a01f30081b,http://doi.acm.org/10.1145/2393347.2396303
+b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,http://doi.acm.org/10.1145/2733373.2807962
+b972683d702a65d3ee7a25bc931a5890d1072b6b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035
+b910590a0eb191d03e1aedb3d55c905129e92e6b,http://doi.acm.org/10.1145/2808492.2808570
+a180dc9766490416246e7fbafadca14a3c500a46,https://doi.org/10.1016/S0167-8655(03)00112-0
+a100595c66f84c3ddd3da8d362a53f7a82f6e3eb,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.46
+a1cda8e30ce35445e4f51b47ab65b775f75c9f18,https://doi.org/10.1109/ISBA.2018.8311462
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,https://doi.org/10.1109/SMC.2016.7844934
+a1cecbb759c266133084d98747d022c1e638340d,http://doi.acm.org/10.1145/2670473.2670501
+a11ce3c9b78bf3f868b1467b620219ff651fe125,http://doi.acm.org/10.1145/2911996.2912073
+a192845a7695bdb372cccf008e6590a14ed82761,https://doi.org/10.1109/TIP.2014.2321495
+a119844792fd9157dec87e3937685c8319cac62f,https://doi.org/10.1109/APSIPA.2015.7415395
+ef7b8f73e95faa7a747e0b04363fced0a38d33b0,https://doi.org/10.1109/ICIP.2017.8297028
+ef35c30529df914a6975af62aca1b9428f678e9f,https://doi.org/10.1007/s00138-016-0817-z
+ef3a0b454370991a9c18ac7bfd228cf15ad53da0,https://doi.org/10.1109/ICNC.2010.5582886
+c3c463a9ee464bb610423b7203300a83a166b500,https://doi.org/10.1109/ICIP.2014.7025069
+c3390711f5ce6f5f0728ef88c54148bf9d8783a2,https://doi.org/10.1016/j.engappai.2015.03.016
+c3e53788370341afe426f2216bed452cbbdaf117,http://doi.ieeecomputersociety.org/10.1109/ATNAC.2017.8215436
+c3a53b308c7a75c66759cbfdf52359d9be4f552b,http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16
+c36f3cabeddce0263c944e9fe4afd510b5bae816,https://doi.org/10.1109/DICTA.2017.8227399
+c4b00e86841db3fced2a5d8ac65f80d0d3bbe352,http://doi.ieeecomputersociety.org/10.1109/AIPR.2004.4
+c41a3c31972cf0c1be6b6895f3bf97181773fcfb,https://doi.org/10.1109/ICPR.2014.103
+c4ca092972abb74ee1c20b7cae6e69c654479e2c,https://doi.org/10.1109/ICIP.2016.7532960
+c444c4dab97dd6d6696f56c1cacda051dde60448,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37
+c459014131cbcd85f5bd5c0a89115b5cc1512be9,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.23
+c49075ead6eb07ede5ada4fe372899bd0cfb83ac,https://doi.org/10.1109/ICSPCS.2015.7391782
+c4541802086461420afb1ecb5bb8ccd5962a9f02,https://doi.org/10.1109/TSMCB.2009.2029076
+c4d439fe07a65b735d0c8604bd5fdaea13f6b072,http://doi.acm.org/10.1145/2671188.2749294
+c4d0d09115a0df856cdb389fbccb20f62b07b14e,https://doi.org/10.1109/ICIP.2012.6466925
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,https://doi.org/10.1109/ICIP.2017.8296549
+ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff,http://doi.ieeecomputersociety.org/10.1109/FG.2017.113
+ea8d217231d4380071132ce37bf997164b60ec44,https://doi.org/10.1109/SIU.2016.7496031
+ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b,https://doi.org/10.1109/FRUCT-ISPIT.2016.7561522
+ea026456729f0ec54c697198e1fd089310de4ae2,https://doi.org/10.1109/CIBIM.2013.6607917
+ea86b75427f845f04e96bdaadfc0d67b3f460005,https://doi.org/10.1109/ICIP.2016.7532686
+ea5c9d5438cde6d907431c28c2f1f35e02b64b33,https://doi.org/10.1109/SPAC.2017.8304257
+e12b2c468850acb456b0097d5535fc6a0d34efe3,https://doi.org/10.1016/j.neucom.2011.03.009
+e1c50cf0c08d70ff90cf515894b2b360b2bc788b,https://doi.org/10.1109/ICSMC.2007.4414085
+e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111,https://doi.org/10.1117/1.JEI.24.5.053028
+e111624fb4c5dc60b9e8223abfbf7c4196d34b21,http://doi.ieeecomputersociety.org/10.1109/BIBM.2016.7822814
+e101bab97bce2733222db9cfbb92a82779966508,https://doi.org/10.1109/TCYB.2016.2549639
+e14b046a564604508ea8e3369e7e9f612e148511,https://doi.org/10.1007/978-3-642-17829-0_4
+e198a7b9e61dd19c620e454aaa81ae8f7377ade0,https://doi.org/10.1109/CVPRW.2010.5543611
+e1449be4951ba7519945cd1ad50656c3516113da,https://doi.org/10.1109/TCSVT.2016.2603535
+cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2581168
+cde7901c0945683d0c677b1bb415786e4f6081e6,http://doi.ieeecomputersociety.org/10.1109/IRI.2015.44
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,https://doi.org/10.1007/s11265-010-0541-2
+cd33b3ca8d7f00c1738c41b2071a3164ba42ea61,https://doi.org/10.1142/S0218213008003832
+cdf0dc4e06d56259f6c621741b1ada5c88963c6d,https://doi.org/10.1109/ICIP.2014.7025061
+cd85f71907f1c27349947690b48bfb84e44a3db0,https://doi.org/10.1007/978-981-10-4840-1
+cdfa7dccbc9e9d466f8a5847004973a33c7fcc89,https://doi.org/10.1109/TIFS.2013.2263498
+cd3b713722ccb1e2ae3b050837ca296b2a2dd82a,https://doi.org/10.1016/j.jvcir.2016.07.015
+cd74d606e76ecddee75279679d9770cdc0b49861,https://doi.org/10.1109/TIP.2014.2365725
+cc1b093cfb97475faabab414878fa7e4a2d97cd7,http://doi.ieeecomputersociety.org/10.1109/ICALT.2017.141
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,https://doi.org/10.1007/s10044-014-0388-4
+cc7c63473c5bef5ae09f26b2258691d9ffdd5f93,https://doi.org/10.1109/ICMLA.2012.17
+cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.87
+ccb95192001b07bb25fc924587f9682b0df3de8e,https://doi.org/10.1109/ICACCI.2016.7732123
+cc70fb1ab585378c79a2ab94776723e597afe379,https://doi.org/10.1109/ICIP.2017.8297067
+cc6d3ccc9e3dd0a43313a714316c8783cd879572,https://doi.org/10.1109/ICIP.2017.8296802
+cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66,https://doi.org/10.1142/s0218213015400199
+cc5edaa1b0e91bc3577547fc30ea094aa2722bf0,https://doi.org/10.1109/CICARE.2014.7007832
+cce2f036d0c5f47c25e459b2f2c49fa992595654,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.93
+cca476114c48871d05537abb303061de5ab010d6,https://doi.org/10.15439/2016F472
+cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,https://doi.org/10.1109/SIU.2016.7495874
+ccebd3bf069f5c73ea2ccc5791976f894bc6023d,https://doi.org/10.1109/ICPR.2016.7900186
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,https://doi.org/10.1109/RIISS.2014.7009163
+ccfebdf7917cb50b5fcd56fb837f841a2246a149,https://doi.org/10.1109/ICIP.2015.7351065
+e6f3707a75d760c8590292b54bc8a48582da2cd4,https://doi.org/10.1007/s11760-012-0410-5
+e6c491fb6a57c9a7c2d71522a1a066be2e681c84,https://doi.org/10.1016/j.imavis.2016.06.002
+e6d46d923f201da644ae8d8bd04721dd9ac0e73d,https://doi.org/10.1109/ISBA.2016.7477226
+e6c834c816b5366875cf3060ccc20e16f19a9fc6,https://doi.org/10.1109/BTAS.2016.7791185
+e66a6ae542907d6a0ebc45da60a62d3eecf17839,https://doi.org/10.1109/EUVIP.2014.7018366
+e66b4aa85524f493dafde8c75176ac0afad5b79c,https://doi.org/10.1109/SSCI.2017.8285219
+e6d6d1b0a8b414160f67142fc18e1321fe3f1c49,https://doi.org/10.1109/FSKD.2015.7382037
+e69a765d033ef6ea55c57ca41c146b27964c5cf2,https://doi.org/10.1109/ISCAS.2017.8050764
+f9fb7979af4233c2dd14813da94ec7c38ce9232a,http://doi.acm.org/10.1145/3131902
+f9752fd07b14505d0438bc3e14b23d7f0fe7f48b,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2009.114
+f95321f4348cfacc52084aae2a19127d74426047,https://doi.org/10.1109/ICMLC.2013.6890897
+f925879459848a3eeb0035fe206c4645e3f20d42,http://doi.acm.org/10.1145/3025453.3025472
+f0dac9a55443aa39fd9832bdff202a579b835e88,https://doi.org/10.1109/JSTSP.2016.2543681
+f0a9d69028edd1a39147848ad1116ca308d7491e,https://doi.org/10.1007/11573548_11
+f09d5b6433f63d7403df5650893b78cdcf7319b3,https://doi.org/10.1109/AFGR.2008.4813384
+f0b4f5104571020206b2d5e606c4d70f496983f9,https://doi.org/10.1109/FUZZ-IEEE.2014.6891674
+f7911b9ff58d07d19c68f4a30f40621f63c0f385,http://dl.acm.org/citation.cfm?id=3007693
+f762afd65f3b680330e390f88d4cc39485345a01,http://doi.ieeecomputersociety.org/10.1109/ACIIW.2017.8272606
+f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93,https://doi.org/10.1109/TCSVT.2007.903317
+f73174cfcc5c329b63f19fffdd706e1df4cc9e20,http://doi.ieeecomputersociety.org/10.1109/FIT.2015.13
+f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7,https://doi.org/10.1109/ICIP.2012.6467434
+f79e4ba09402adab54d2efadd1c4bfe4e20c5da5,https://doi.org/10.1109/ICIP.2017.8296364
+e83e5960c2aabab654e1545eb419ef64c25800d5,https://doi.org/10.1016/j.neunet.2016.08.011
+e8951cc76af80da43e3528fe6d984071f17f57e7,https://doi.org/10.1109/WACVW.2017.9
+e8c051d9e7eb8891b23cde6cbfad203011318a4f,http://doi.acm.org/10.1145/3013971.3014015
+e88988f4696e7e2925ed96467fde4314bfa95eff,https://doi.org/10.1016/j.neucom.2015.01.076
+e82a0976db908e6f074b926f58223ac685533c65,https://doi.org/10.1007/s11042-015-2848-2
+e865908ed5e5d7469b412b081ca8abd738c72121,https://doi.org/10.1109/TIP.2016.2621667
+e8c6853135856515fc88fff7c55737a292b0a15b,http://doi.ieeecomputersociety.org/10.1109/FG.2017.46
+fa54ab106c7f6dbd3c004cea4ef74ea580cf50bf,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.18
+faf19885431cb39360158982c3a1127f6090a1f6,https://doi.org/10.1109/BTAS.2015.7358768
+fa72e39971855dff6beb8174b5fa654e0ab7d324,https://doi.org/10.1007/s11042-013-1793-1
+faa46ef96493b04694555738100d9f983915cf9b,https://doi.org/10.1007/s10489-015-0735-1
+fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,https://doi.org/10.1109/TIM.2015.2415012
+fadbb3a447d697d52771e237173b80782caaa936,https://doi.org/10.1007/s00530-012-0290-0
+fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7,https://doi.org/10.1109/FSKD.2016.7603418
+fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4,https://doi.org/10.1109/TIP.2015.2421437
+ff82825a04a654ca70e6d460c8d88080ee4a7fcc,http://doi.acm.org/10.1145/2683483.2683533
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,https://doi.org/10.1007/s11760-016-0882-9
+fff31548617f208cd5ae5c32917afd48abc4ff6a,http://doi.acm.org/10.1145/3139295.3139309
+ff3859917d4121f47de0d46922a103c78514fcab,https://doi.org/10.1109/ICB.2016.7550050
+ff402bd06c9c4e94aa47ad80ccc4455efa869af3,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334120
+ff42ec628b0980909bbb84225d0c4f8d9ac51e03,https://doi.org/10.1109/TCSVT.2008.2005799
+ffea4184a0b24807b5f4ed87f9a985c2a27027d9,https://doi.org/10.1007/s00530-012-0297-6
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,http://doi.acm.org/10.1145/3078971.3079026
+ffea2b26e422c1009afa7e200a43b31a1fae86a9,https://doi.org/10.1007/s00500-009-0441-1
+ffb1cb0f9fd65247f02c92cfcb152590a5d68741,https://doi.org/10.1109/CISS.2012.6310782
+ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee,http://doi.acm.org/10.1145/2903220.2903255
+ff0617d750fa49416514c1363824b8f61baf8fb5,https://doi.org/10.1587/elex.7.1125
+c570d1247e337f91e555c3be0e8c8a5aba539d9f,https://doi.org/10.1007/s11042-012-1352-1
+c586463b8dbedce2bfce3ee90517085a9d9e2e13,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2006.9
+c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,https://doi.org/10.1109/BTAS.2017.8272773
+c5022fbeb65b70f6fe11694575b8ad1b53412a0d,https://doi.org/10.1109/ICIP.2005.1530209
+c5c56e9c884ac4070880ac481909bb6b621d2a3f,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126466
+c553f0334fcadf43607925733685adef81fbe406,https://doi.org/10.1109/ICSIPA.2017.8120636
+c58ece1a3fa23608f022e424ec5a93cddda31308,https://doi.org/10.1109/JSYST.2014.2325957
+c59a9151cef054984607b7253ef189c12122a625,https://doi.org/10.1007/s00138-016-0791-5
+c59b62864a6d86eead075c88137a87070a984550,https://doi.org/10.1109/IVCNZ.2015.7761546
+c5437496932dcb9d33519a120821da755951e1a9,http://doi.acm.org/10.1145/2487575.2487604
+c2b10909a0dd068b8e377a55b0a1827c8319118a,https://doi.org/10.1109/TCYB.2016.2565898
+c270aff2b066ee354b4fe7e958a40a37f7bfca45,https://doi.org/10.1109/WCSP.2017.8170910
+c252bc84356ed69ccf53507752135b6e98de8db4,https://doi.org/10.1016/j.neucom.2015.02.067
+c291f0e29871c8b9509d1a2876c3e305839ad4ac,https://doi.org/10.1109/ICARCV.2014.7064432
+c244c3c797574048d6931b6714ebac64d820dbb3,http://doi.acm.org/10.1145/2808492.2808500
+c222f8079c246ead285894c47bdbb2dfc7741044,https://doi.org/10.1109/ICIP.2015.7351631
+c2be82ed0db509087b08423c8cf39ab3c36549c3,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019363
+c23bd1917badd27093c8284bd324332b8c45bfcf,https://doi.org/10.1109/IJCNN.2010.5596316
+c2474202d56bb80663e7bece5924245978425fc1,https://doi.org/10.1109/ICIP.2016.7532771
+c2422c975d9f9b62fbb19738e5ce5e818a6e1752,https://doi.org/10.1109/TNNLS.2015.2481006
+c2dc29e0db76122dfed075c3b9ee48503b027809,https://doi.org/10.1109/ICIP.2016.7532632
+f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2569436
+f6311d6b3f4d3bd192d866d2e898c30eea37d7d5,http://ieeexplore.ieee.org/document/6460511/
+f63b3b8388bc4dcd4a0330402af37a59ce37e4f3,https://doi.org/10.1109/SIU.2013.6531214
+f6ebfa0cb3865c316f9072ded26725fd9881e73e,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.109
+f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,https://doi.org/10.1109/TMM.2015.2476657
+f6511d8156058737ec5354c66ef6fdcf035d714d,http://doi.ieeecomputersociety.org/10.1109/BWCCA.2014.115
+f652cb159a2cf2745aabcbf6a7beed4415e79e34,http://doi.acm.org/10.1145/1460096.1460119
+f6dabb4d91bf7389f3af219d486d4e67cec18c17,https://doi.org/10.1016/j.compeleceng.2014.08.010
+e95895262f66f7c5e47dd46a70110d89c3b4c203,https://doi.org/10.1016/j.neucom.2016.09.023
+e957d0673af7454dbf0a14813201b0e2570577e9,https://doi.org/10.1109/ICPR.2016.7899699
+e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f,http://doi.acm.org/10.1145/2791405.2791505
+e9cebf627c204c6949dcc077d04c57eb66b2c038,https://doi.org/10.1109/SIU.2013.6531371
+e9b731f00d16a10a31ceea446b2baa38719a31f1,https://doi.org/10.1109/ICSMC.2012.6378271
+e9d1b3767c06c896f89690deea7a95401ae4582b,https://doi.org/10.1109/VCIP.2016.7805565
+e9d77a85bc2fa672cc1bd10258c896c8d89b41e8,https://doi.org/10.1109/ICTAI.2012.25
+e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548
+e94168c35be1d4b4d2aaf42ef892e64a3874ed8c,https://doi.org/10.1109/TSMCB.2008.2010715
+e96ce25d11296fce4e2ecc2da03bd207dc118724,https://doi.org/10.1007/s00138-007-0095-x
+e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc,https://doi.org/10.1109/ICPR.2014.440
+e9331ae2a887c02e0a908ebae2810a681aedee29,https://doi.org/10.1016/j.image.2011.05.003
+f1e44e64957397d167d13f8f551cae99e5c16c75,https://doi.org/10.1007/s11042-013-1548-z
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,https://doi.org/10.1007/s11042-013-1803-3
+f1da4d705571312b244ebfd2b450692fd875cd1f,https://doi.org/10.1109/TIP.2014.2322446
+f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,https://doi.org/10.1109/ACCESS.2017.2737544
+f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c,http://doi.ieeecomputersociety.org/10.1109/SNPD.2016.7515888
+f1061b2b5b7ca32edd5aa486aecc63a0972c84f3,https://doi.org/10.1109/TIP.2017.2760512
+f180cb7111e9a6ba7cfe0b251c0c35daaef4f517,https://doi.org/10.1109/TIP.2015.2417502
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,https://doi.org/10.1109/SMC.2017.8122640
+f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4,https://doi.org/10.1109/ICIP.2012.6467433
+f1af714b92372c8e606485a3982eab2f16772ad8,http://ieeexplore.ieee.org/document/5617662/
+e7436b8e68bb7139b823a7572af3decd96241e78,https://doi.org/10.1109/ROBIO.2011.6181560
+e7144f5c19848e037bb96e225d1cfd961f82bd9f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.126
+e73b1137099368dd7909d203b80c3d5164885e44,http://doi.ieeecomputersociety.org/10.1109/FSKD.2008.116
+e73f2839fc232c03e9f027c78bc419ee15810fe8,https://doi.org/10.1109/ICIP.2017.8296413
+e71c15f5650a59755619b2a62fa93ac922151fd6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.22
+e74a2159f0f7afb35c7318a6e035bc31b8e69634,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019503
+e790a2538579c8e2ef9b314962ab26197d6664c6,https://doi.org/10.1109/ICIP.2016.7532915
+e7e8c0bbee09b5af6f7df1de8f0f26da992737c4,https://doi.org/10.1109/IJCNN.2011.6033417
+e7b7df786cf5960d55cbac4e696ca37b7cee8dcd,https://doi.org/10.1109/IJCNN.2012.6252728
+cba090a5bfae7dd8a60a973259f0870ed68c4dd3,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.22
+cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b,http://doi.ieeecomputersociety.org/10.1109/IJCNN.2009.5179002
+cb992fe67f0d4025e876161bfd2dda467eaec741,https://doi.org/10.1109/IPTA.2015.7367144
+cbc2de9b919bc63590b6ee2dfd9dda134af45286,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477561
+cbf3e848c5d2130dd640d9bd546403b8d78ce0f9,https://doi.org/10.1109/IJCNN.2012.6252385
+cbe1df2213a88eafc5dcaf55264f2523fe3ec981,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.34
+cb4d8cef8cec9406b1121180d47c14dfef373882,https://doi.org/10.1109/ICPR.2014.301
+cb7a743b9811d20682c13c4ee7b791ff01c62155,https://doi.org/10.1109/MMSP.2015.7340789
+cb9921d5fc4ffa50be537332e111f03d74622442,https://doi.org/10.1007/978-3-319-46654-5_79
+cbaa17be8c22e219a9c656559e028867dfb2c2ed,https://doi.org/10.1109/ICIP.2016.7532636
+cb160c5c2a0b34aba7b0f39f5dda6aca8135f880,https://doi.org/10.1109/SIU.2016.7496023
+f839ae810338e3b12c8e2f8db6ce4d725738d2d9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.115
+f888c165f45febf3d17b8604a99a2f684d689cbc,http://doi.ieeecomputersociety.org/10.1109/CIT.2004.1357196
+f812347d46035d786de40c165a158160bb2988f0,https://doi.org/10.1007/s10339-016-0765-6
+f856532a729bd337fae1eb7dbe55129ae7788f45,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.26
+f88ce52c5042f9f200405f58dbe94b4e82cf0d34,https://doi.org/10.1109/TNNLS.2015.2508025
+f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c,https://doi.org/10.1007/s00138-016-0790-6
+f86c6942a7e187c41dd0714531efd2be828e18ad,https://doi.org/10.1109/VCIP.2016.7805514
+f834c50e249c9796eb7f03da7459b71205dc0737,https://doi.org/10.1109/TIP.2011.2166974
+cead57f2f7f7b733f4524c4b5a7ba7f271749b5f,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.46
+cefaad8241bceb24827a71bf7c2556e458e57faa,https://doi.org/10.1109/TIP.2013.2264676
+ce3304119ba6391cb6bb25c4b3dff79164df9ac6,https://doi.org/10.1016/j.imavis.2016.03.004
+ce8db0fe11e7c96d08de561506f9f8f399dabbb2,https://doi.org/10.1109/ICIP.2015.7351677
+ce11b2d7905d2955c4282db5b68482edb846f29f,http://doi.acm.org/10.1145/3126686.3126705
+ce30ddb5ceaddc0e7d308880a45c135287573d0e,https://doi.org/10.1109/ICSMC.2012.6378304
+e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344671
+e084b0e477ee07d78c32c3696ea22c94f5fdfbec,https://doi.org/10.1109/ICIP.2013.6738565
+e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca,http://doi.ieeecomputersociety.org/10.1109/BLISS.2008.25
+e0ab926cd48a47a8c7b16e27583421141f71f6df,https://doi.org/10.1109/HPCSim.2016.7568383
+e0423788eb91772de9d708a17799179cf3230d63,http://doi.acm.org/10.1145/3093241.3093277
+e03f69bad7e6537794a50a99da807c9df4ff5186,http://doi.acm.org/10.1145/2708463.2709060
+e0793fd343aa63b5f366c8ace61b9c5489c51a4d,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.46
+465faf9974a60da00950be977f3bc2fc3e56f5d2,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273631
+46b2ecef197b465abc43e0e017543b1af61921ac,https://doi.org/10.1109/ICPR.2016.7899652
+464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb,https://doi.org/10.1109/TIP.2014.2359765
+4672513d0dbc398719d66bba36183f6e2b78947b,https://doi.org/10.1016/j.ipm.2015.05.007
+46c1af268d4b3c61a0a12be091ca008a3a60e4cd,https://doi.org/10.1007/s11042-016-3592-y
+2cf3564d7421b661e84251d280d159d4b3ebb336,https://doi.org/10.1109/BTAS.2014.6996287
+2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2,https://doi.org/10.1109/IC3.2013.6612218
+2ca10da4b59b406533ad1dc7740156e01782658f,https://doi.org/10.1109/SIU.2016.7496207
+2cd426f10178bd95fef3dede69ae7b67e73bb70c,https://doi.org/10.1109/ROBIO.2016.7866457
+2c06781ba75d51f5246d65d1acf66ab182e9bde6,https://doi.org/10.1016/j.imavis.2016.11.002
+2ce84465b9759166effc7302c2f5339766cc523d,https://doi.org/10.1109/VCIP.2015.7457830
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,https://doi.org/10.1109/TNNLS.2012.2237038
+7935f644c8044c0d3b81e2842e5ecc3672698bbb,https://doi.org/10.1109/ICIP.2011.6116258
+79fd4baca5f840d6534a053b22e0029948b9075e,https://doi.org/10.1109/ISDA.2012.6416647
+2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84,https://doi.org/10.1109/CVPRW.2011.5981817
+2debdb6a772312788251cc3bd1cb7cc8a6072214,https://doi.org/10.1142/S0218001415560157
+2d411826cd7865638b65e1b5f92043c245f009f9,http://doi.acm.org/10.1145/2733373.2806239
+2d79dece7890121469f515a6e773ba0251fc2d98,https://doi.org/10.1109/ICIP.2017.8296756
+2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f,https://doi.org/10.1109/ICIP.2014.7026056
+2d2fb01f761d21a459cfb34935bc47ab45a9913b,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2346515
+41e5d92b13d36da61287c7ffd77ee71de9eb2942,https://doi.org/10.1016/j.asoc.2016.12.033
+41781474d834c079e8fafea154d7916b77991b15,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.60
+417c2fa930bb7078fdf10cb85c503bd5270b9dc2,https://doi.org/10.1109/ICSIPA.2015.7412169
+414fdfe5f2e4f32a59bf15062b6e524cbf970637,https://doi.org/10.1109/TIFS.2014.2361028
+83b54b8c97dc14e302dad191327407ec0d5fb4a6,https://doi.org/10.1109/ICIP.2017.8296913
+8383faea09b4b4bef8117a1da897495ebd68691b,https://doi.org/10.1109/TCYB.2015.2493538
+838dad9d1d68d29be280d92e69410eaac40084bc,https://doi.org/10.1109/HPCSim.2014.6903749
+83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284834
+83bce0907937f09f5ccde26c361d52fe55fc8979,http://doi.acm.org/10.1145/2993148.2993185
+1b8541ec28564db66a08185510c8b300fa4dc793,https://doi.org/10.1109/LSP.2015.2499778
+1b211f8221162ce7ef212956b637b50e30ad48f4,https://doi.org/10.1109/ICIP.2016.7532925
+1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.537
+1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12,https://doi.org/10.1016/j.patcog.2017.01.007
+1b4b3d0ce900996a6da8928e16370e21d15ed83e,https://doi.org/10.1109/BigDataService.2017.38
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,https://doi.org/10.1109/ROMAN.2014.6926354
+1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,http://doi.acm.org/10.1145/2964284.2984061
+1bcb1c6d6cebc9737f9933fcefbf3da8a612f994,https://doi.org/10.1016/j.jvcir.2017.10.008
+1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac,https://doi.org/10.1109/ICIP.2014.7025273
+1b2d9a1c067f692dd48991beff03cd62b9faebf2,https://doi.org/10.1109/ICIP.2011.6116302
+7782627fa2e545276996ff9e9a1686ac496df081,http://doi.acm.org/10.1145/2663204.2666276
+771a6a80dd08212d83a4e976522e1ce108881401,https://doi.org/10.1109/IPTA.2016.7820979
+77223849321d57a03e0571a08e71eba06e38834a,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.20
+77c5437107f8138d48cb7e10b2b286fa51473678,https://doi.org/10.1109/URAI.2016.7734005
+77c3574a020757769b2ca807ff4b95a88eaa2a37,https://doi.org/10.1109/MSP.2015.2410783
+77cea27494499dd162221d1476bf70a87391790a,https://doi.org/10.1109/VCIP.2015.7457930
+77816b9567d5fed1f6085f33e1ddbcc73af2010e,https://doi.org/10.1109/MRA.2012.2201574
+778c1e95b6ea4ccf89067b83364036ab08797256,https://doi.org/10.1109/TIFS.2012.2224866
+7753e3b9e158289cbaa22203166424ca9c229f68,http://doi.ieeecomputersociety.org/10.1109/ICDM.2014.29
+77869f274d4be4d4b4c438dbe7dff4baed521bd8,https://doi.org/10.1109/TIP.2016.2551362
+773ce00841a23d32727aa1f54c29865fefd4ce02,http://doi.ieeecomputersociety.org/10.1109/AIPR.2006.24
+772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,https://doi.org/10.1109/BTAS.2017.8272716
+480858e55abdbc07ca47b7dc10204613fdd9783c,https://doi.org/10.1109/ICPR.2014.786
+48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf,https://doi.org/10.1016/j.imavis.2015.12.003
+489b7e12a420eff0d585f3f866e76b838c2cd275,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477675
+48dcf45a1e38adbb9826594f7ffaa5e95ef78395,https://doi.org/10.1109/VCIP.2017.8305111
+48db8bf18e2f6f19e07e88384be855c8b7ea0ead,http://doi.acm.org/10.1145/2964284.2967225
+4848a48a2b8bacd2092e87961cd86818da8e7151,https://doi.org/10.1109/VCIP.2017.8305080
+48255c9e1d6e1d030728d33a71699757e337be08,https://doi.org/10.1109/ISSNIP.2013.6529832
+48906f609446afcdaacbe1d65770d7a6165a8eee,https://doi.org/10.1007/s12559-017-9482-4
+486f5e85944404a1b57333443070b0b8c588c262,http://doi.ieeecomputersociety.org/10.1109/IRI.2014.7051957
+7049187c5155d9652747413ce1ebc8dbb209fd69,https://doi.org/10.1109/ICPR.2016.7899808
+70769def1284fe88fd57a477cde8a9c9a3dff13f,https://doi.org/10.1016/j.neucom.2006.10.036
+70341f61dfe2b92d8607814b52dfd0863a94310e,http://doi.ieeecomputersociety.org/10.1109/AVSS.2015.7301750
+70444627cb765a67a2efba17b0f4b81ce1fc20ff,https://doi.org/10.1109/TNNLS.2016.2609434
+70516aede32cf0dbc539abd9416c44faafc868bd,https://doi.org/10.1109/MICAI.2013.16
+7081958a390d3033f5f33e22bbfec7055ea8d601,https://doi.org/10.1109/MCI.2015.2437318
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,https://doi.org/10.1109/ICMLC.2010.5580938
+705e086bb666d129a6969882cfa49282116a638e,https://doi.org/10.1109/TNNLS.2014.2376963
+70d0bffa288e317bc62376f4f577c5bd7712e521,https://doi.org/10.1049/iet-cvi.2012.0094
+1e2770ce52d581d9a39642b40bfa827e3abf7ea2,http://doi.acm.org/10.1145/2425333.2425362
+1eb48895d86404251aa21323e5a811c19f9a55f9,http://doi.ieeecomputersociety.org/10.1109/CIS.2015.22
+1e8fd77d4717e9cb6079e10771dd2ed772098cb3,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2016.7574681
+1e62ca5845a6f0492574a5da049e9b43dbeadb1b,https://doi.org/10.1109/LSP.2016.2637400
+1e344b99583b782e3eaf152cdfa15f217b781181,http://doi.acm.org/10.1145/2499788.2499789
+1eb9c859ff7537182a25556635954bcd11830822,https://doi.org/10.1109/ICDSP.2015.7252004
+1ef6ad9e1742d0b2588deaf506ef83b894fb9956,https://doi.org/10.1007/s12193-016-0213-z
+1ed617d14dbc53b20287d3405b14c68d8dad3965,https://doi.org/10.1109/TCYB.2016.2582918
+1ed49161e58559be399ce7092569c19ddd39ca0b,https://doi.org/10.1109/ICPR.2016.7899973
+1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5,https://doi.org/10.1109/ICPR.2016.7899945
+1ea4347def5868c622d7ce57cbe171fa68207e2b,https://doi.org/10.1007/978-3-642-41181-6_23
+84f3c4937cd006888b82f2eb78e884f2247f0c4e,https://doi.org/10.1109/CCNC.2012.6181097
+84be18c7683417786c13d59026f30daeed8bd8c9,https://doi.org/10.1007/s00138-016-0755-9
+84f86f8c559a38752ddfb417e58f98e1f8402f17,http://doi.ieeecomputersociety.org/10.1109/EST.2013.10
+844e3e6992c98e53b45e4eb88368d0d6e27fc1d6,https://doi.org/10.1109/ICIP.2014.7026057
+84ae55603bffda40c225fe93029d39f04793e01f,https://doi.org/10.1109/ICB.2016.7550066
+84ec0983adb8821f0655f83b8ce47f36896ca9ee,https://doi.org/10.1109/SMC.2017.8122985
+4aa27c1f8118dbb39809a0f79a28c0cbc3ede276,http://doi.acm.org/10.1145/2683483.2683530
+4a03f07397c5d32463750facf010c532f45233a5,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.32
+4aea1213bdb5aa6c74b99fca1afc72d8a99503c6,https://doi.org/10.1109/ICDIM.2010.5664688
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,https://doi.org/10.1007/978-3-642-14922-1_68
+243cd27dce38fd756a840b397c28ad21cfb78897,https://doi.org/10.1049/iet-ipr.2013.0003
+24b5ea4e262e22768813e7b6581f60e4ab9a8de7,https://doi.org/10.1109/TIFS.2018.2807791
+244293024aebbb0ff42a7cf2ba49b1164697a127,https://doi.org/10.1109/BTAS.2016.7791187
+24eeb748a5e431510381ec7c8253bcb70eff8526,https://doi.org/10.1109/TIP.2017.2746270
+2400c4994655c4dd59f919c4d6e9640f57f2009f,https://doi.org/10.1109/IPTA.2015.7367096
+24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,https://doi.org/10.1109/GlobalSIP.2016.7906030
+24b637c98b22cd932f74acfeecdb50533abea9ae,https://doi.org/10.1109/TIP.2015.2492819
+24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852,http://doi.ieeecomputersociety.org/10.1109/FG.2017.30
+24e42e6889314099549583c7e19b1cb4cc995226,https://doi.org/10.1109/ACPR.2011.6166646
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,http://doi.acm.org/10.1145/2994258.2994270
+245d98726674297208e76308c3a11ce3fc43bee2,https://doi.org/10.1007/s11042-015-2699-x
+2348f1fa2940b01ec90e023fac8cc96812189774,http://doi.ieeecomputersociety.org/10.1109/EWDTS.2017.8110157
+2360ecf058393141ead1ca6b587efa2461e120e4,https://doi.org/10.1007/s00138-017-0895-6
+235a347cb96ef22bf35b4cf37e2b4ee5cde9df77,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.13
+23ecc496eaa238ac884e6bae5763f6138a9c90a3,https://doi.org/10.1109/ICB.2016.7550085
+2336de3a81dada63eb00ea82f7570c4069342fb5,http://doi.acm.org/10.1145/2361407.2361428
+235bebe7d0db37e6727dfa1246663be34027d96b,https://doi.org/10.1109/NAFIPS.2016.7851625
+2340d810c515dc0c9fd319f598fa8012dc0368a0,https://doi.org/10.1109/AFGR.2008.4813420
+23675cb2180aac466944df0edda4677a77c455cd,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142
+4ffd744a5f079c2d65f36e3ee0979b978f522a13,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.15
+4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef,https://doi.org/10.1049/iet-cvi.2012.0127
+4f742c09ce12859b20deaa372c8f1575acfc99c9,https://doi.org/10.1016/j.neucom.2017.01.020
+4f03ba35440436cfa06a2ed2a571fea01cb36598,https://doi.org/10.1109/SPAC.2017.8304260
+4fac09969ee80d485876e3198c7177181c600a4a,http://doi.ieeecomputersociety.org/10.1109/CRV.2015.32
+4f3b652c75b1d7cf4997e0baaef2067b61e3a79b,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552910
+8dd3f05071fd70fb1c349460b526b0e69dcc65bf,https://doi.org/10.1109/TIP.2017.2726010
+8d3e95c31c93548b8c71dbeee2e9f7180067a888,https://doi.org/10.1109/ICPR.2016.7899841
+8db9188e5137e167bffb3ee974732c1fe5f7a7dc,https://doi.org/10.1109/TIP.2016.2612885
+8db609d84190b905913eb2f17f4e558c6e982208,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.182
+15ef449ac443c494ceeea8a9c425043f4079522e,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477583
+157647b0968d95f9288b27d6d9179a8e1ef5c970,https://doi.org/10.1049/iet-bmt.2014.0086
+15ef65fd68d61f3d47326e358c446b0f054f093a,https://doi.org/10.1109/MLSP.2017.8168180
+1584edf8106e8f697f19b726e011b9717de0e4db,https://doi.org/10.1049/iet-cvi.2015.0350
+15a9f812e781cf85c283f7cf2aa2928b370329c5,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469281
+158aa18c724107587bcc4137252d0ba10debf417,https://doi.org/10.1109/ACSSC.2016.7869522
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,https://doi.org/10.1109/TIP.2017.2694226
+152683f3ac99f829b476ea1b1b976dec6e17b911,https://doi.org/10.1109/MIXDES.2016.7529773
+159caaa56c2291bedbd41d12af5546a7725c58d4,https://doi.org/10.1109/ICIP.2016.7532910
+15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,https://doi.org/10.1109/TMM.2011.2167317
+15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a,https://doi.org/10.5220/0004736505740580
+1277b1b8b609a18b94e4907d76a117c9783a5373,http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438
+12c4ba96eaa37586f07be0d82b2e99964048dcb5,https://doi.org/10.1109/LSP.2017.2694460
+122f52fadd4854cf6c9287013520eced3c91e71a,https://doi.org/10.1109/TIP.2016.2515987
+1280b35e4a20036fcfd82ee09f45a3fca190276f,http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166
+1252727e8096f48096ef89483d30c3a74500dd15,https://doi.org/10.1007/s00138-016-0746-x
+126204b377029feb500e9b081136e7a9010e3b6b,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2010.50
+126076774da192d4d3f4efcd1accc719ee5f9683,https://doi.org/10.1109/SIU.2012.6204774
+120b9c271c3a4ea0ad12bbc71054664d4d460bc3,https://doi.org/10.1109/DICTA.2015.7371259
+12b533f7c6847616393591dcfe4793cfe9c4bb17,https://doi.org/10.1109/TIFS.2017.2765519
+8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889,http://doi.ieeecomputersociety.org/10.1109/FG.2017.138
+8cffe360a05085d4bcba111a3a3cd113d96c0369,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248
+8c85ef961826575bc2c2f4da7784bc3bfcf8b188,https://doi.org/10.1109/ICIP.2015.7350871
+8c50869b745fc094a4fb1b27861934c3c14d7199,https://doi.org/10.1109/EMBC.2016.7591826
+8cedb92694845854f3ad0daf6c9adb6b81c293de,http://doi.acm.org/10.1145/1839379.1839431
+8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58,https://doi.org/10.1109/ICIP.2014.7025276
+8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3,https://doi.org/10.1007/11608288_28
+8c2b663f8be1702ed3e377b5e6e85921fe7c6389,https://doi.org/10.1109/IPTA.2016.7821006
+8cd0855ca967ce47b0225b58bbadd38d8b1b41a1,https://doi.org/10.1109/TIP.2017.2721106
+8c048be9dd2b601808b893b5d3d51f00907bdee0,https://doi.org/10.1631/FITEE.1600041
+85785ae222c6a9e01830d73a120cdac75d0b838a,https://doi.org/10.1007/978-3-319-11782-9
+85567174a61b5b526e95cd148da018fa2a041d43,https://doi.org/10.1109/TMM.2016.2515367
+8576d0031f2b0fe1a0f93dd454e73d48d98a4c63,http://doi.acm.org/10.1145/2522848.2531743
+8598d31c7ca9c8f5bb433409af5e472a75037b4d,https://doi.org/10.1109/JPROC.2008.916364
+85f27ec70474fe93f32864dd03c1d0f321979100,https://doi.org/10.1109/IJCNN.2014.6889381
+85f7f03b79d03da5fae3a7f79d9aac228a635166,https://doi.org/10.1109/WACV.2009.5403085
+85205914a99374fa87e004735fe67fc6aec29d36,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2392774
+1ddea58d04e29069b583ac95bc0ae9bebb0bed07,https://doi.org/10.1109/KSE.2015.50
+1dabb080e3e968633f4b3774f19192f8378f5b67,https://doi.org/10.1109/ICPR.2016.7899664
+1d10010ea7af43d59e1909d27e4e0e987264c667,https://doi.org/10.1016/j.neunet.2004.06.006
+1dae2f492d3ca2351349a73df6ee8a99b05ffc30,https://doi.org/10.1137/110842570
+1da1299088a6bf28167c58bbd46ca247de41eb3c,https://doi.org/10.1109/ICASSP.2002.5745055
+71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f,https://doi.org/10.1109/SMC.2016.7844680
+710c3aaffef29730ffd909a63798e9185f488327,https://doi.org/10.1109/ICPR.2016.7900095
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,https://doi.org/10.1109/TIM.2011.2141270
+71e95c3a31dceabe9cde9f117615be8bf8f6d40e,https://doi.org/10.1109/ICIP.2010.5653024
+71f07c95a2b039cc21854c602f29e5be053f2aba,https://doi.org/10.1007/s00138-010-0250-7
+7123e510dea783035b02f6c35e35a1a09677c5ab,https://doi.org/10.1109/ICPR.2016.7900297
+715d3eb3665f46cd2fab74d35578a72aafbad799,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2013.118
+7177649ece5506b315cb73c36098baac1681b8d2,http://doi.ieeecomputersociety.org/10.1109/FG.2017.130
+71d68af11df855f886b511e4fc1635c1e9e789b0,https://doi.org/10.1109/TCSVT.2011.2133210
+71bbda43b97e8dc8b67b2bde3c873fa6aacd439f,https://doi.org/10.1016/j.patcog.2015.09.012
+7196b3832065aec49859c61318037b0c8c12363a,https://doi.org/10.1007/s11432-014-5151-3
+71f9861df104b90399dc15e12bbb14cd03f16e0b,http://doi.ieeecomputersociety.org/10.1109/CGIV.2009.7
+7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7,https://doi.org/10.1007/s11063-015-9464-z
+76669f166ddd3fb830dbaacb3daa875cfedc24d9,https://doi.org/10.1109/ICPR.2016.7899840
+76dff7008d9b8bf44ec5348f294d5518877c6182,https://doi.org/10.1016/j.imavis.2014.09.004
+76640cb1a683a479ce2e0d6681d821ff39126d63,https://doi.org/10.1109/IJCNN.2011.6033408
+76a52ebfc5afd547f8b73430ec81456cf25ddd69,http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914
+76d1c6c6b67e67ced1f19a89a5034dafc9599f25,http://doi.acm.org/10.1145/2590296.2590315
+761304bbd259a9e419a2518193e1ff1face9fd2d,https://doi.org/10.1007/978-3-642-33885-4_57
+1ca1b4f787712ede215030d22a0eea41534a601e,https://doi.org/10.1109/CVPRW.2010.5543609
+1cb0c11620bde2734c1a428c789158ffff0d6c7b,http://doi.ieeecomputersociety.org/10.1109/BigMM.2016.62
+1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119
+1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5,https://doi.org/10.1016/j.imavis.2015.01.007
+1cfe8c1d341dbf8cc43040b37ca3552385adb10b,http://doi.acm.org/10.1145/2461466.2461473
+82e1692467969940a6d6ac40eae606b8b4981f7e,https://doi.org/10.1109/ICMEW.2012.56
+8274069feeff6392b6c5d45d8bfaaacd36daedad,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019312
+826015d9ade1637b3fcbeca071e3137d3ac1ef56,https://doi.org/10.1109/WACV.2017.84
+828d7553a45eb0c3132e406105732a254369eb4d,https://doi.org/10.1016/j.neunet.2017.09.001
+82953e7b3d28ccd1534eedbb6de7984c59d38cd4,https://doi.org/10.1109/TNNLS.2014.2356856
+8229f2735a0db0ad41f4d7252129311f06959907,https://doi.org/10.1109/TIP.2011.2106794
+82dad0941a7cada11d2e2f2359293fe5fabf913f,https://doi.org/10.1109/ICIP.2017.8296810
+493bc7071e35e7428336a515d1d26020a5fb9015,https://doi.org/10.1109/ACSSC.2013.6810420
+4958c06da5581fd0b4904d3bf0ee09958ecdba5b,https://doi.org/10.1016/j.knosys.2016.12.005
+4909ed22b1310f1c6f2005be5ce3349e3259ff6a,https://doi.org/10.1109/ROBIO.2009.4913106
+49e4f05fa98f63510de76e7abd8856ff8db0f38d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.110
+4932b929a2e09ddebedcb1abe8c62f269e7d4e33,https://doi.org/10.1109/SIU.2016.7496076
+492116d16a39eb54454c7ffb1754cea27ad3a171,http://doi.acm.org/10.1145/3132525.3134823
+496f3d14cf466f054d395a3c71fa2cd6a3dda61d,http://doi.acm.org/10.1145/3009977.3010055
+49fdafef327069516d887d8e69b5e96c983c3dd0,https://doi.org/10.1109/DICTA.2017.8227433
+496d62741e8baf3859c24bb22eaccd3043322126,http://doi.ieeecomputersociety.org/10.1109/TKDE.2017.2728531
+49fe4f387ac7e5852a78b327ec42cc7300c5f8e0,https://doi.org/10.1007/s11042-014-2055-6
+4033ac52dba394e390a86cd149b9838f1d7834b5,https://doi.org/10.1109/ICMLC.2012.6359009
+4014d74e8f5ea4d76c2c1add81d0c88d6e342478,http://doi.acm.org/10.1145/3136755.3143010
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,https://doi.org/10.1109/IJCNN.2017.7966191
+4097fef623185557bb1842501cfdc97f812fc66d,http://doi.acm.org/10.1145/3126686.3126755
+40dd736c803720890d6bfc1e083f6050e35d8f7a,http://doi.acm.org/10.1145/3139958.3140055
+40f06e5c052d34190832b8c963b462ade739cbf0,https://doi.org/10.1109/ICNC.2010.5583821
+405cf40f3ce74210f7e9862b2b828ce002b409ed,https://doi.org/10.1109/IJCNN.2017.7966244
+407a26fff7fac195b74de9fcb556005e8785a4e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.29
+2e36b63fdf1353425a57a0665b0d0274efe92963,http://doi.acm.org/10.1145/3152771.3156179
+2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4,https://doi.org/10.1109/ICASSP.2015.7178367
+2e535b8cd02c2f767670ba47a43ad449fa1faad7,https://doi.org/10.1109/MSP.2017.2740460
+2ed7d95588200c8c738c7dd61b8338538e04ea30,https://doi.org/10.1109/ICIP.2010.5654063
+2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd,https://doi.org/10.1016/j.neucom.2015.07.031
+2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,https://doi.org/10.1109/CVPRW.2011.5981801
+2ef1b1b5ed732634e005df779fd9b21da0ffe60c,https://doi.org/10.1016/j.image.2017.03.012
+2e5b160892b70a1e846aa9dcdf132b8011937ec6,https://doi.org/10.1109/LSP.2017.2689921
+2e27667421a7eeab278e0b761db4d2c725683c3f,https://doi.org/10.1007/s11042-013-1815-z
+2e6776cd582c015b46faf616f29c98ce9cff51a2,https://doi.org/10.1109/TNN.2005.860849
+2e12c5ea432004de566684b29a8e148126ef5b70,https://doi.org/10.1007/s12193-015-0204-5
+2b286ed9f36240e1d11b585d65133db84b52122c,http://doi.acm.org/10.1145/3130800.3130837
+2babf665198a91932a4ce557f627c28e7e8f31f2,http://doi.acm.org/10.1145/3009977.3010004
+2b300985a507533db3ec9bd38ade16a32345968e,https://doi.org/10.1007/s11042-015-3070-y
+2b5005c2abf2d9a8c16afa50306b6959dfc72275,https://doi.org/10.1109/ICARCV.2010.5707216
+2b0d14dbd079b3d78631117b1304d6c1579e1940,https://doi.org/10.1007/s11063-016-9524-z
+2b43100a13811b33cc9f905fa1334bfd8b1873ba,https://doi.org/10.1109/IVCNZ.2015.7761564
+2b2924af7ec219bd1fadcbd2c57014ed54efec86,http://doi.ieeecomputersociety.org/10.1109/SSIAI.2014.6806053
+2be9284d531b8c573a4c39503ca50606446041a3,https://doi.org/10.1109/ICIP.2005.1530004
+2be24e8a3f2b89bdaccd02521eff3b7bb917003e,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.96
+47cd161546c59ab1e05f8841b82e985f72e5ddcb,https://doi.org/10.1109/ICIP.2017.8296552
+47109343e502a4097cb7efee54bc5fbb14598c05,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.182
+4786638ffb3b2fb385cec80720cc6e7c3588b773,https://doi.org/10.1007/s11042-015-2598-1
+471bef061653366ba66a7ac4f29268e8444f146e,https://doi.org/10.1109/SMC.2015.524
+47fb74785fbd8870c2e819fc91d04b9d9722386f,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.161
+47d07217c501644d63adfec740346f244abaaae8,https://doi.org/10.1016/j.patcog.2016.05.017
+78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.99
+78d4d861c766af2a8da8855bece5da4e6eed2e1c,http://doi.acm.org/10.1145/3129416.3129455
+78e1798c3077f4f8a4df04ca35cd73f82e9a38f3,http://ieeexplore.ieee.org/document/6460640/
+78f244dc2a171944836a89874b8f60e9fe80865d,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.181
+780c8a795baca1ba4cb4956cded877dd3d1ca313,http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879
+789b8fff223b0db0fe3babf46ea98b1d5197f0c0,https://doi.org/10.1002/ima.20245
+785eeac2e236a85a45b4e0356c0745279c31e089,https://doi.org/10.1109/TIFS.2014.2359543
+7813d405450013bbdb0b3a917319d5964a89484a,https://doi.org/10.1109/WACV.2017.62
+789a43f51e0a3814327dab4299e4eda8165a5748,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.189
+782eee555067b2d6d24db87775e1ded5fb047491,https://doi.org/10.1109/MMSP.2008.4665158
+8be60114634caa0eff8566f3252cb9a1b7d5ef10,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890133
+8b4124bb68e5b3e6b8b77888beae7350dc594a40,https://doi.org/10.1109/ICSMC.2005.1571395
+8bf945166305eb8e304a9471c591139b3b01a1e1,https://doi.org/10.1109/ACCESS.2017.2756451
+8b1fa60b9164b60d1ca2705611fab063505a3ef5,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618337
+8b3c867e67b263d7a0577a112173a64009a3b4ba,https://doi.org/10.1109/ICIP.2010.5652374
+8b1f697d81de1245c283b4f8f055b9b76badfa66,https://doi.org/10.1142/S0218126616500171
+13907865a97afde053d7bb7134d58a7bbc12043c,https://doi.org/10.1016/j.patcog.2014.05.001
+134cea33099cafc6615e57437e29d7c3906a2b48,http://doi.ieeecomputersociety.org/10.1109/ICETET.2010.80
+136aae348c7ebc6fd9df970b0657241983075795,https://doi.org/10.1109/ICIP.2015.7351542
+13f065d4e6dfe2a130bd64d73eee97d10d9f7d33,https://doi.org/10.1109/DICTA.2015.7371222
+13901473a12061f080b9d54219f16db7d406e769,https://doi.org/10.1109/TIP.2012.2222895
+7f9be0e08784835de0f8bc3a82fcca02b3721dc1,https://doi.org/10.1109/IJCNN.2014.6889744
+7f415aee0137acab659c664eb1dff15f7b726bdd,https://doi.org/10.1109/TCSVT.2014.2302522
+7f5346a169c9784ca79aca5d95ae8bf2ebab58e3,https://doi.org/10.1109/ICIP.2015.7351304
+7f4040b482d16354d5938c1d1b926b544652bf5b,http://doi.acm.org/10.1145/2502081.2502115
+7f8d2d7eaa03132caefe0f3b126b5b369a712c9d,http://doi.ieeecomputersociety.org/10.1109/ACHI.2009.33
+7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2353635
+7fcd03407c084023606c901e8933746b80d2ad57,https://doi.org/10.1109/BTAS.2017.8272694
+7f8cef6ba2f059e465b1b23057a6dbb23fba1c63,https://doi.org/10.1109/TCSVT.2016.2539541
+7f1078a2ebfa23a58adb050084d9034bd48a8a99,https://doi.org/10.1007/s00371-015-1169-9
+7a595800b490ff437ab06fe7612a678d5fe2b57d,https://doi.org/10.1109/MMSP.2009.5293285
+7a09e8f65bd85d4c79f0ae90d4e2685869a9894f,https://doi.org/10.1109/TMM.2016.2551698
+7a6e3ed956f71b20c41fbec008b1fa8dacad31a6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163117
+7a91617ec959acedc5ec8b65e55b9490b76ab871,https://doi.org/10.1109/RAIT.2012.6194481
+7a666a91a47da0d371a9ba288912673bcd5881e4,https://doi.org/10.1016/j.patrec.2009.05.011
+7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8,https://doi.org/10.1007/s00138-015-0677-y
+7a94936ce558627afde4d5b439ec15c59dbcdaa4,https://doi.org/10.1007/s11263-013-0665-5
+14d7bce17265738f10f48987bb7bffb3eafc676e,http://ieeexplore.ieee.org/document/7514504/
+143571c2fc9b1b69d3172f8a35b8fad50bc8202a,https://doi.org/10.1016/j.neucom.2014.07.066
+142e233adceed9171f718a214a7eba8497af4324,https://doi.org/10.1109/IJCNN.2014.6889504
+14efb131bed66f1874dd96170f714def8db45d90,http://doi.acm.org/10.1145/2818346.2830585
+14ae16e9911f6504d994503989db34d2d1cb2cd4,https://doi.org/10.1007/s11042-013-1616-4
+14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1,https://doi.org/10.1109/TIP.2006.884932
+1473e6f2d250307f0421f1e2ea68b6485d3bd481,https://doi.org/10.1109/IJCNN.2016.7727333
+8e9b92a805d1ce0bf4e0c04133d26e28db036e6a,https://doi.org/10.1109/DICTA.2017.8227428
+8ef465ff12ee1d2be2a99d1c628117a4ce890a6b,https://doi.org/10.1016/j.camwa.2010.08.082
+8e55486aa456cae7f04fe922689b3e99a0e409fe,http://doi.acm.org/10.1145/3123266.3123342
+8ebe2df4d82af79f0f082ced70f3a73d7fb93b66,https://doi.org/10.1109/URAI.2015.7358851
+8e272978dd1500ce6e4c2ef5e91d4332078ff757,https://doi.org/10.1007/11848035_5
+8e8a6623b4abd2452779c43f3c2085488dfcb323,http://doi.acm.org/10.1145/2993148.2997630
+8e21399bb102e993edd82b003c306a068a2474da,https://doi.org/10.1109/ICIP.2013.6738758
+22c06284a908d8ad0994ad52119773a034eed7ee,http://doi.acm.org/10.1145/2964284.2967236
+2238dddb76499b19035641d97711cf30d899dadb,https://doi.org/10.1109/SIU.2016.7496098
+22894c7a84984bd4822dcfe7c76a74673a242c36,http://doi.acm.org/10.1145/2993148.2997634
+22a10d8d2a2cb9055557a3b335d6706100890afb,https://doi.org/10.1109/SIU.2016.7496121
+22ccd537857aca1ee4b961f081f07c58d42a7f32,https://doi.org/10.1109/DICTA.2015.7371260
+22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505,http://doi.acm.org/10.1145/2808469.2810102
+22dbdace88c8f4bda2843ed421e3708ec0744237,https://doi.org/10.1016/j.cviu.2013.12.010
+259ddd3c618feec51576baac7eaaf80ea924b791,https://doi.org/10.1007/s11257-007-9039-4
+254964096e523d5e48e03390ce440c9af337d200,http://dl.acm.org/citation.cfm?id=3005378
+250b73ec5a4f78b7b4ea3aba65c27fc1352154d5,https://doi.org/10.1109/TIP.2015.2463223
+256b46b12ab47283e6ada05fad6a2b501de35323,https://doi.org/10.1109/ICPR.2016.7900275
+252f202bfb14d363a969fce19df2972b83fa7ec0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.120
+25bcd5aa3bbe56c992547fba683418655b46fc4a,https://doi.org/10.1016/j.eswa.2017.03.030
+2546dc7e2c2390233de16502413fe1097ecf3fb5,https://doi.org/10.1016/j.patrec.2011.01.009
+258b3b1df82186dd76064ef86b28555e91389b73,https://doi.org/10.1109/ACCESS.2017.2739822
+610779e90b644cc18696d7ac7820d3e0598e24d0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7067419
+61262450d4d814865a4f9a84299c24daa493f66e,http://doi.org/10.1007/s10462-016-9474-x
+61971f8e6fff5b35faed610d02ad14ccfc186c70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373843
+61e2044184d86d0f13e50ecaa3da6a4913088c76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7572183
+61329bc767152f01aa502989abc854b53047e52c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450832
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc,http://doi.org/10.1007/11527923_7
+95289007f2f336e6636cf8f920225b8d47c6e94f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796
+95b5296f7ec70455b0cf1748cddeaa099284bfed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8443886
+95d858b39227edeaf75b7fad71f3dc081e415d16,http://doi.org/10.1007/s11042-017-5073-3
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,http://doi.org/10.1007/s11042-016-4261-x
+95288fa7ff4683e32fe021a78cbf7d3376e6e400,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014759
+598744c8620e4ecbf449d14d7081fbf1cd05851f,https://www.ncbi.nlm.nih.gov/pubmed/29731533
+59b83666c1031c3f509f063b9963c7ad9781ca23,http://dl.acm.org/citation.cfm?id=2830590
+592f14f4b12225fc691477a180a2a3226a5ef4f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789592
+9285f4a6a06e975bde3ae3267fccd971d4fff98a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099853
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6,http://doi.org/10.1038/s41598-017-07122-x
+92292fffc36336d63f4f77d6b8fc23b0c54090e9,http://doi.org/10.1016/j.jvcir.2015.03.001
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595,http://doi.org/10.1016/j.patcog.2013.03.010
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8335166
+660c99ac408b535bb0468ab3708d0d1d5db30180,http://doi.org/10.1007/s11042-015-3083-6
+66490b5869822b31d32af7108eaff193fbdb37b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857
+663efaa0671eace1100fdbdecacd94216a17b1db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619243
+3e3227c8e9f44593d2499f4d1302575c77977b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347112
+3e59d97d42f36fc96d33a5658951856a555e997b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163128
+3e9ab40e6e23f09d16c852b74d40264067ac6abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619307
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553717
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995354
+506ea19145838a035e7dba535519fb40a3a0018c,http://arxiv.org/abs/1806.08251
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,http://doi.org/10.1134/S1054661818030136
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771409
+68c1090f912b69b76437644dd16922909dd40d60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6987312
+5760d29574d78e79e8343b74e6e30b3555e48676,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8447743
+572dbaee6648eefa4c9de9b42551204b985ff863,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151
+5779e3e439c90d43648db107e848aeb954d3e347,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7927417
+5748652924084b7b0220cddcd28f6b2222004359,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7492255
+57178b36c21fd7f4529ac6748614bb3374714e91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217
+3b350afd8b82487aa97097170c269a25daa0c82d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8248664
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099900
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b,http://dl.acm.org/citation.cfm?id=3240893
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393012
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270392
+3bd10f7603c4f5a4737c5613722124787d0dd818,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949
+6f22628d34a486d73c6b46eb071200a00e3abae3,https://www.ncbi.nlm.nih.gov/pubmed/29994497
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,http://doi.org/10.1007/s00138-018-0943-x
+034b3f3bac663fb814336a69a9fd3514ca0082b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298991
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf,http://doi.org/10.1007/s00371-015-1158-z
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,http://dl.acm.org/citation.cfm?id=3123323
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316962
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354290
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7114333
+9efdb73c6833df57732b727c6aeac510cadb53fe,http://dl.acm.org/citation.cfm?id=3184071
+9e105c4a176465d14434fb3f5bae67f57ff5fba2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354230
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,http://doi.org/10.1007/s11042-018-5679-0
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771444
+0450dacc43171c6e623d0d5078600dd570de777e,http://doi.org/10.1007/s10339-016-0774-5
+6af75a8572965207c2b227ad35d5c61a5bd69f45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433687
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6,http://doi.org/10.1007/s10044-016-0531-5
+6a931e7b7475635f089dd33e8d9a2899ae963804,http://doi.org/10.1007/s00371-018-1561-3
+6a6406906470be10f6d6d94a32741ba370a1db68,http://doi.org/10.1007/s11042-016-4213-5
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408
+3266fcd1886e8ad883714e38203e66c0c6487f7b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7533149
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4527244
+32f62da99ec9f58dd93e3be667612abcf00df16a,http://doi.org/10.1007/s11042-017-5583-z
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1334680
+32e9c9520cf6acb55dde672b73760442b2f166f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7970176
+35208eda874591eac70286441d19785726578946,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789507
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7052327
+352a620f0b96a7e76b9195a7038d5eec257fd994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823
+69adf2f122ff18848ff85e8de3ee3b2bc495838e,http://arxiv.org/abs/1711.10678
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8444452
+695426275dee2ec56bc0c0afe1c5b4227a350840,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7878535
+696236fb6f986f6d5565abb01f402d09db68e5fa,http://doi.org/10.1007/s41095-018-0112-1
+6932baa348943507d992aba75402cfe8545a1a9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987
+6966d9d30fa9b7c01523425726ab417fd8428790,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291
+3cb057a24a8adba6fe964b5d461ba4e4af68af14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6701391
+3c09fb7fe1886072670e0c4dd632d052102a3733,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101020
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373845
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9,http://doi.org/10.1007/s00371-016-1323-z
+562f7555e5cb79ce0fe834c4613264d8378dd007,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7153112
+56fd4c05869e11e4935d48aa1d7abb96072ac242,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812
+566563a02dbaebec07429046122426acd7039166,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461618
+5632ba72b2652df3b648b2ee698233e76a4eee65,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387
+51b42da0706a1260430f27badcf9ee6694768b9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471882
+51410d6bd9a41eacb105f15dbdaee520e050d646,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412888
+51d6a8a61ea9588a795b20353c97efccec73f5db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460308
+518a3ce2a290352afea22027b64bf3950bffc65a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174
+51dcb36a6c247189be4420562f19feb00c9487f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1394433
+519f1486f0755ef3c1f05700ea8a05f52f83387b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595846
+5167e16b53283be5587659ea8eaa3b8ef3fddd33,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813364
+51bb86dc8748088a198b216f7e97616634147388,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496
+3dce635ce4b55fb63fc6d41b38640403b152a048,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8402469
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389
+58217ae5423828ed5e1569bee93d491569d79970,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1578742
+587b8c147c6253878128ddacf6e5faf8272842a4,http://dl.acm.org/citation.cfm?id=2638549
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,http://doi.org/10.1007/s11042-017-4343-4
+67386772c289cd40db343bdc4cb8cb4f58271df2,http://doi.org/10.1038/s41598-017-10745-9
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699428
+67af3ec65f1dc535018f3671624e72c96a611c39,http://doi.org/10.1007/s11042-016-4058-y
+0b45aeb0aede5e0c19b508ede802bdfec668aefd,http://dl.acm.org/citation.cfm?id=1963206
+0ba5369c5e1e87ea172089d84a5610435c73de00,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347111
+0b82bf595e76898993ed4f4b2883c42720c0f277,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229
+93af335bf8c610f34ce0cadc15d1dd592debc706,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e,http://doi.org/10.1007/s11042-017-5062-6
+93dcea2419ca95b96a47e541748c46220d289d77,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014993
+93c0405b1f5432eab11cb5180229720604ffd030,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462228
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952499
+94806f0967931d376d1729c29702f3d3bb70167c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780581
+9436170c648c40b6f4cc3751fca3674aa82ffe9a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6811741
+947ee3452e4f3d657b16325c6b959f8b8768efad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952677
+604a281100784b4d5bc1a6db993d423abc5dc8f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681
+60777fbca8bff210398ec8b1179bc4ecb72dfec0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751535
+60821d447e5b8a96dd9294a0514911e1141ff620,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813328
+605f6817018a572797095b83bec7fae7195b2abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339020
+60462b981fda63c5f9d780528a37c46884fe0b54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397015
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7872382
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9,https://www.ncbi.nlm.nih.gov/pubmed/29069621
+346752e3ab96c93483413be4feaa024ccfe9499f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6960834
+34fd227f4fdbc7fe028cc1f7d92cb59204333718,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446331
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3,http://doi.org/10.1007/s13735-016-0117-4
+5a547df635a9a56ac224d556333d36ff68cbf088,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359041
+5fea59ccdab484873081eaa37af88e26e3db2aed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8263394
+5f2c210644c1e567435d78522258e0ae036deedb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4036602
+5fe3a9d54d5070308803dd8ef611594f59805400,http://doi.org/10.1016/j.patcog.2016.02.006
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853687
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762938
+335435a94f8fa9c128b9f278d929c9d0e45e2510,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849440
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675
+057b80e235b10799d03876ad25465208a4c64caf,http://dl.acm.org/citation.cfm?id=3123427
+0532cbcf616f27e5f6a4054f818d4992b99d201d,http://doi.org/10.1007/s11042-015-3042-2
+9d5bfaf6191484022a6731ce13ac1b866d21ad18,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139086
+9d24812d942e69f86279a26932df53c0a68c4111,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8417316
+9d46485ca2c562d5e295251530a99dd5df99b589,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813386
+9d3377313759dfdc1a702b341d8d8e4b1469460c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7342926
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1385984
+9c2f20ed168743071db6268480a966d5d238a7ee,http://dl.acm.org/citation.cfm?id=1456304
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536
+9c6dfd3a38374399d998d5a130ffc2864c37f554,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553738
+9c23859ec7313f2e756a3e85575735e0c52249f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788
+9ca542d744149f0efc8b8aac8289f5e38e6d200c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789587
+9c59bb28054eee783a40b467c82f38021c19ff3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311
+023decb4c56f2e97d345593e4f7b89b667a6763d,http://doi.org/10.1007/s10994-005-3561-6
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb,http://doi.org/10.1007/s11042-018-6146-7
+021e008282714eaefc0796303f521c9e4f199d7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354319
+a4898f55f12e6393b1c078803909ea715bf71730,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6957817
+a45e6172713a56736a2565ddea9cb8b1d94721cd,http://doi.org/10.1038/s41746-018-0035-3
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909426
+a3201e955d6607d383332f3a12a7befa08c5a18c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,http://doi.org/10.1007/s11042-016-4324-z
+b5f9180666924a3215ab0b1faf712e70b353444d,http://doi.org/10.1007/s11042-017-4661-6
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,http://doi.org/10.1007/s11263-017-1012-z
+b5747ecfa0f3be0adaad919d78763b1133c4d662,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397022
+b5f3b0f45cf7f462a9c463a941e34e102a029506,http://dl.acm.org/citation.cfm?id=3143004
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8358588
+b5fdd7778503f27c9d9bf77fab193b475fab6076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373891
+b598f7761b153ecb26e9d08d3c5817aac5b34b52,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4618852
+b55e70df03d9b80c91446a97957bc95772dcc45b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624
+b5f9306c3207ac12ac761e7d028c78b3009a219c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462692
+b2470969e4fba92f7909eac26b77d08cc5575533,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8326475
+d916602f694ebb9cf95d85e08dd53f653b6196c3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237607
+d9e66b877b277d73f8876f537206395e71f58269,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7225130
+d9deafd9d9e60657a7f34df5f494edff546c4fb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100124
+d9218c2bbc7449dbccac351f55675efd810535db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5699141
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55,http://doi.org/10.1016/j.jvcir.2015.11.002
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8,http://doi.org/10.1007/s11063-017-9578-6
+aca728cab26b95fbe04ec230b389878656d8af5b,http://doi.org/10.1007/978-981-10-8258-0
+acff2dc5d601887741002a78f8c0c35a799e6403,http://doi.org/10.1007/978-3-662-44654-6
+ac2e166c76c103f17fdea2b4ecb137200b8d4703,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5373798
+ac03849956ac470c41585d2ee34d8bb58bb3c764,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853690
+ad77056780328bdcc6b7a21bce4ddd49c49e2013,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398021
+ada063ce9a1ff230791c48b6afa29c401a9007f1,http://doi.org/10.1007/978-3-319-97909-0
+bb4f83458976755e9310b241a689c8d21b481238,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265393
+bb4be8e24d7b8ed56d81edec435b7b59bad96214,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7060677
+bb2f61a057bbf176e402d171d79df2635ccda9f6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296311
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373838
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f,http://dl.acm.org/citation.cfm?id=3164593
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,http://doi.org/10.1007/s10994-014-5463-y
+d790093cb85fc556c0089610026e0ec3466ab845,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4721612
+d77f18917a58e7d4598d31af4e7be2762d858370,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062
+d00e9a6339e34c613053d3b2c132fccbde547b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154
+d06bcb2d46342ee011e652990edf290a0876b502,http://arxiv.org/abs/1708.00980
+d066575b48b552a38e63095bb1f7b56cbb1fbea4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359888
+bed8feb11e8077df158e16bce064853cf217ba62,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6191360
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344632
+be7444c891caf295d162233bdae0e1c79791d566,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816
+bec0c33d330385d73a5b6a05ad642d6954a6d632,http://doi.org/10.1007/s11042-017-4491-6
+bef926d63512dbffcf1af59f72295ef497f5acf9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6990726
+be632b206f1cd38eab0c01c5f2004d1e8fc72880,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607601
+beb2f1a6f3f781443580ffec9161d9ce6852bf48,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424735
+beae35eb5b2c7f63dfa9115f07b5ba0319709951,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163096
+be4faea0971ef74096ec9800750648b7601dda65,http://doi.org/10.1007/s11063-017-9724-1
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,http://doi.org/10.1007/s11263-008-0143-7
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29,http://dl.acm.org/citation.cfm?id=3206041
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958,http://doi.org/10.1007/s10766-017-0552-8
+df767f62a6bf3b09e6417d801726f2d5d642a202,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699727
+df87193e15a19d5620f5a6458b05fee0cf03729f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363421
+df6e68db278bedf5486a80697dec6623958edba8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952696
+da7bbfa905d88834f8929cb69f41a1b683639f4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593
+dad6b36fd515bda801f3d22a462cc62348f6aad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531
+daca9d03c1c951ed518248de7f75ff51e5c272cb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6976977
+dac8fc521dfafb2d082faa4697f491eae00472c7,http://dl.acm.org/citation.cfm?id=3123423
+daa4cfde41d37b2ab497458e331556d13dd14d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477
+da23d90bacf246b75ef752a2cbb138c4fcd789b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406360
+dac34b590adddef2fc31f26e2aeb0059115d07a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078
+b484141b99d3478a12b8a6854864c4b875d289b8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117595
+b41d585246360646c677a8238ec35e8605b083b0,http://doi.org/10.1007/s11042-018-6017-2
+b40c001b3e304dccb28c745bd54aa281c8ff1f29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361072
+a2e0966f303f38b58b898d388d1c83e40b605262,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125
+a2b4a6c6b32900a066d0257ae6d4526db872afe2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466
+a20036b7fbf6c0db454c8711e72d78f145560dc8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761890
+a26fd9df58bb76d6c7a3254820143b3da5bd584b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446759
+a5acda0e8c0937bfed013e6382da127103e41395,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672
+a532cfc69259254192aee3fc5be614d9197e7824,http://doi.org/10.1016/j.patcog.2016.12.028
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7350093
+a5f35880477ae82902c620245e258cf854c09be9,http://doi.org/10.1016/j.imavis.2013.12.004
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a,http://doi.org/10.1007/s11063-017-9649-8
+bddc822cf20b31d8f714925bec192c39294184f7,http://doi.org/10.1134/S1054661807040190
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62,http://arxiv.org/abs/1412.0767
+bd8d579715d58405dfd5a77f32920aafe018fce4,http://doi.org/10.1016/j.imavis.2008.08.005
+d141c31e3f261d7d5214f07886c1a29ac734d6fc,http://doi.org/10.1007/s11063-018-9812-x
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613000
+d10cfcf206b0991e3bc20ac28df1f61c63516f30,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,http://doi.org/10.1007/s13735-016-0112-9
+d116bac3b6ad77084c12bea557d42ed4c9d78433,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471886
+d1079444ceddb1de316983f371ecd1db7a0c2f38,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460478
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35,http://dl.acm.org/citation.cfm?id=31310887
+d6e08345ba293565086cb282ba08b225326022fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7490397
+d62d82c312c40437bc4c1c91caedac2ba5beb292,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461322
+bc607bee2002c6c6bf694a15efd0a5d049767237,http://doi.org/10.1007/s11042-017-4364-z
+bc9bad25f8149318314971d8b8c170064e220ea8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078542
+bc08dfa22949fbe54e15b1a6379afade71835968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899970
+bc36badb6606b8162d821a227dda09a94aac537f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337442
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466467
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373805
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644
+aef58a54d458ab76f62c9b6de61af4f475e0f616,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706790
+aee3427d0814d8a398fd31f4f46941e9e5488d83,http://dl.acm.org/citation.cfm?id=1924573
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,http://doi.org/10.1007/s11042-018-6047-9
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5598629
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8439631
+ab0981d1da654f37620ca39c6b42de21d7eb58eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8016651
+ab80582807506c0f840bd1ba03a8b84f8ac72f79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462326
+ab6886252aea103b3d974462f589b4886ef2735a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4371439
+e5ea7295b89ef679e74919bf957f58d55ad49489,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401948
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099713
+e52f57a7de675d14aed28e5d0f2f3c5a01715337,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319987
+e57014b4106dd1355e69a0f60bb533615a705606,http://doi.org/10.1007/s13748-018-0143-y
+e295c1aa47422eb35123053038e62e9aa50a2e3a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389
+e287ff7997297ce1197359ed0fb2a0bd381638c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7795253
+e2faaebd17d10e2919bd69492787e7565546a63f,http://doi.org/10.1007/s11042-017-4514-3
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952626
+f472cb8380a41c540cfea32ebb4575da241c0288,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284869
+f4ba07d2ae6c9673502daf50ee751a5e9262848f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a,https://www.ncbi.nlm.nih.gov/pubmed/24314504
+f4b5a8f6462a68e79d643648c780efe588e4b6ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995700
+f39783847499dd56ba39c1f3b567f64dfdfa8527,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791189
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b,http://doi.org/10.1016/j.patrec.2013.03.022
+f374ac9307be5f25145b44931f5a53b388a77e49,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354277
+f3553148e322f4f64545d6667dfbc7607c82703a,http://doi.org/10.1007/s00138-016-0763-9
+f33bd953d2df0a5305fc8a93a37ff754459a906c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961800
+ebbceab4e15bf641f74e335b70c6c4490a043961,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a,http://doi.org/10.1111/cgf.13218
+eba4cfd76f99159ccc0a65cab0a02db42b548d85,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751379
+ebde9b9c714ed326157f41add8c781f826c1d864,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014758
+eb3066de677f9f6131aab542d9d426aaf50ed2ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373860
+eb8a3948c4be0d23eb7326d27f2271be893b3409,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2,http://doi.org/10.1117/12.650555
+c7745f941532b7d6fa70db09e81eb1167f70f8a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1640757
+c05ae45c262b270df1e99a32efa35036aae8d950,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354120
+c07ab025d9e3c885ad5386e6f000543efe091c4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909629
+c0f67e850176bb778b6c048d81c3d7e4d8c41003,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296441
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014982
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6998872
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18,https://www.sciencedirect.com/science/article/pii/S0197458017301859
+ee1f9637f372d2eccc447461ef834a9859011ec1,http://doi.org/10.1007/s11042-016-3950-9
+ee56823f2f00c8c773e4ebc725ca57d2f9242947,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7110235
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368755
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4453844
+c9b958c2494b7ba08b5b460f19a06814dba8aee0,https://www.ncbi.nlm.nih.gov/pubmed/30080142
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237606
+c997744db532767ee757197491d8ac28d10f1c0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364339
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a,http://dl.acm.org/citation.cfm?id=2984060
+c900e0ad4c95948baaf0acd8449fde26f9b4952a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,http://doi.org/10.1007/978-3-319-11071-4
+c98b13871a3bc767df0bdd51ff00c5254ede8b22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909913
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7045492
+fcc6fe6007c322641796cb8792718641856a22a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994
+fc8fb68a7e3b79c37108588671c0e1abf374f501,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565615
+fcf393a90190e376b617cc02e4a473106684d066,http://doi.org/10.1007/s10044-015-0507-x
+fcceea054cb59f1409dda181198ed4070ed762c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8388318
+fc7f140fcedfe54dd63769268a36ff3f175662b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8013122
+fd9ab411dc6258763c95b7741e3d51adf5504040,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595808
+fd809ee36fa6832dda57a0a2403b4b52c207549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409768
+fde611bf25a89fe11e077692070f89dcdede043a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7322904
+fd5376fcb09001a3acccc03159e8ff5801129683,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373899
+f2902f5956d7e2dca536d9131d4334f85f52f783,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191
+f2d605985821597773bc6b956036bdbc5d307386,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8027090
+f2896dd2701fbb3564492a12c64f11a5ad456a67,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237686
+f27e5a13c1c424504b63a9084c50f491c1b17978,http://dl.acm.org/citation.cfm?id=3097991
+f2eab39cf68de880ee7264b454044a55098e8163,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5539989
+f2d5bb329c09a5867045721112a7dad82ca757a3,http://doi.org/10.1007/s11042-015-3009-3
+f201baf618574108bcee50e9a8b65f5174d832ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057
+f5c57979ec3d8baa6f934242965350865c0121bd,http://doi.org/10.1007/s12539-018-0281-8
+f5603ceaebe3caf6a812edef9c4b38def78cbf34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4455998
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099824
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907
+e3b9863e583171ac9ae7b485f88e503852c747b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7494596
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950
+cf736f596bf881ca97ec4b29776baaa493b9d50e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba,http://arxiv.org/abs/1503.01521
+ca096e158912080493a898b0b8a4bd2902674fed,http://dl.acm.org/citation.cfm?id=3264899
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,http://doi.org/10.1007/s11042-018-5945-1
+ca44a838da4187617dca9f6249d8c4b604661ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7351564
+e4754afaa15b1b53e70743880484b8d0736990ff,http://doi.org/10.1016/j.imavis.2016.01.002
+e40cb4369c6402ae53c81ce52b73df3ef89f578b,http://doi.org/10.1016/j.image.2015.01.009
+e45a556df61e2357a8f422bdf864b7a5ed3b8627,http://doi.org/10.1016/j.image.2017.08.001
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8023876
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407641
+fe866887d3c26ee72590c440ed86ffc80e980293,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011
+fe50efe9e282c63941ec23eb9b8c7510b6283228,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7314574
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,http://dl.acm.org/citation.cfm?id=3126693
+fecccc79548001ecbd6cafd3067bcf14de80b11a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354157
+c847de9faa1f1a06d5647949a23f523f84aba7f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199761
+c8585c95215bc53e28edb740678b3a0460ca8aa4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373829
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7090979
+c83e26622b275fdf878135e71c23325a31d0e5fc,http://dl.acm.org/citation.cfm?id=3164611
+c808c784237f167c78a87cc5a9d48152579c27a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437
+c858c74d30c02be2d992f82a821b925669bfca13,http://doi.org/10.1007/978-3-319-10605-2
+c843f591658ca9dbb77944a89372a92006defe68,http://doi.org/10.1007/s11042-015-2550-4
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad,http://doi.org/10.1007/s11390-018-1835-2
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463262
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf,http://dl.acm.org/citation.cfm?id=2820910
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb,https://www.ncbi.nlm.nih.gov/pubmed/30040629
+fbc9ba70e36768efff130c7d970ce52810b044ff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef,https://www.ncbi.nlm.nih.gov/pubmed/29994550
+edfce091688bc88389dd4877950bd58e00ff1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700
+ed32df6b122b15a52238777c9993ed31107b4bed,http://doi.org/10.1016/j.eswa.2017.03.008
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8241843
+ede5982980aa76deae8f9dc5143a724299d67742,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081396
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6802347
+edd6ed94207ab614c71ac0591d304a708d708e7b,http://doi.org/10.1016/j.neucom.2012.02.001
+edf60d081ffdfa80243217a50a411ab5407c961d,http://doi.org/10.1007/s11263-016-0893-6
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343
+c15b68986ecfa1e13e3791686ae9024f66983f14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014747
+c12260540ec14910f5ec6e38d95bdb606826b32e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6196236
+c175ebe550761b18bac24d394d85bdfaf3b7718c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301582
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793,https://www.ncbi.nlm.nih.gov/pubmed/19167865
+c61eaf172820fcafaabf39005bd4536f0c45f995,http://doi.org/10.1007/978-3-319-58771-4_1
+c6382de52636705be5898017f2f8ed7c70d7ae96,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089
+c631a31be2c793d398175ceef7daff1848bb6408,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466318
+c61a8940d66eed9850b35dd3768f18b59471ca34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1374768
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301635
+ec576efd18203bcb8273539fa277839ec92232a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7994601
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771439
+ec1bec7344d07417fb04e509a9d3198da850349f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342699
+ec983394f800da971d243f4143ab7f8421aa967c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8340635
+ecd08edab496801fd4fde45362dde462d00ee91c,https://www.ncbi.nlm.nih.gov/pubmed/29994561
+ec5c63609cf56496715b0eba0e906de3231ad6d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651
+ec00ecb64fa206cea8b2e716955a738a96424084,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265512
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb,http://doi.org/10.1016/j.patcog.2015.03.011
+4e061a302816f5890a621eb278c6efa6e37d7e2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909638
+4e43408a59852c1bbaa11596a5da3e42034d9380,http://doi.org/10.1007/s11042-018-6040-3
+4ed6c7740ba93d75345397ef043f35c0562fb0fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117516
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,http://doi.org/10.1007/s11227-018-2408-4
+4e37cd250130c6fd60e066f0c8efb3cbb778c421,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8419742
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,http://doi.org/10.1007/s10055-018-0357-0
+20d6a4aaf5abf2925fdce2780e38ab1771209f76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446795
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30,http://doi.org/10.1007/s00138-018-0956-5
+18855be5e7a60269c0652e9567484ce5b9617caa,http://doi.org/10.1007/s11042-017-4579-z
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195
+18010284894ed0edcca74e5bf768ee2e15ef7841,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7902214
+188abc5bad3a3663d042ce98c7a7327e5a1ae298,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6152129
+180bd019eab85bbf01d9cddc837242e111825750,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239690
+270acff7916589a6cc9ca915b0012ffcb75d4899,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659
+27b451abfe321a696c852215bb7efb4c2e50c89f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7898447
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,http://doi.org/10.1007/s11042-017-5571-3
+2724ba85ec4a66de18da33925e537f3902f21249,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8172386
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306
+4b9ec224949c79a980a5a66664d0ac6233c3d575,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501
+4bf85ef995c684b841d0a5a002d175fadd922ff0,http://dl.acm.org/citation.cfm?id=3199668
+4b936847f39094d6cb0bde68cea654d948c4735d,http://doi.org/10.1007/s11042-016-3470-7
+11bb2abe0ca614c15701961428eb2f260e3e2eef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343867
+113b06e70b7eead8ae7450bafe9c91656705024c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373864
+11df25b4e074b7610ec304a8733fa47625d9faca,http://doi.org/10.1016/j.patrec.2012.09.024
+7d18e9165312cf669b799aa1b883c6bbe95bf40e,http://doi.org/10.1007/s11042-016-3492-1
+7d45f1878d8048f6b3de5b3ec912c49742d5e968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7747479
+7d40e7e5c01bd551edf65902386401e1b8b8014b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876
+29db16efc3b378c50511f743e5197a4c0b9e902f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401
+2961e14c327341d22d5f266a6872aa174add8ac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6654170
+2983cf95743be82671a71528004036bd19172712,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7915734
+29a5d38390857e234c111f8bb787724c08f39110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813387
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301520
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6218178
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,http://doi.org/10.1007/s11263-016-0967-5
+7c11fa4fd91cb57e6e216117febcdd748e595760,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0,http://doi.org/10.1007/s11263-016-0920-7
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,http://doi.org/10.1007/978-3-030-00470-5
+7cfbf90368553333b47731729e0e358479c25340,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7346480
+7c66e7f357553fd4b362d00ff377bffb9197410e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961231
+7c6686fa4d8c990e931f1d16deabf647bf3b1986,http://arxiv.org/abs/1504.07550
+166ef5d3fd96d99caeabe928eba291c082ec75a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237597
+16fadde3e68bba301f9829b3f99157191106bd0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,http://doi.org/10.1007/s11704-017-6613-8
+895081d6a5545ad6385bfc6fcf460fc0b13bac86,http://doi.org/10.1016/S0167-8655%2899%2900134-8
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7888593
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69,http://doi.org/10.1162/jocn_a_00645
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397536
+1fe1a78c941e03abe942498249c041b2703fd3d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393355
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8385089
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100195
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,http://dl.acm.org/citation.cfm?id=3213539
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373833
+73ba33e933e834b815f62a50aa1a0e15c6547e83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368754
+7361b900018f22e37499443643be1ff9d20edfd6,http://doi.org/10.1049/iet-bmt.2016.0169
+73d53a7c27716ae9a6d3484e78883545e53117ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8371978
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499,http://doi.org/10.1016/j.csi.2015.06.004
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198
+73dcb4c452badb3ee39a2f222298b234d08c21eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6779478
+87610276ccbc12d0912b23fd493019f06256f94e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706757
+87b607b8d4858a16731144d17f457a54e488f15d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532
+80d4cf7747abfae96328183dd1f84133023c2668,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369786
+80ed678ef28ccc1b942e197e0393229cd99d55c8,http://doi.org/10.1007/s10044-015-0456-4
+809e5884cf26b71dc7abc56ac0bad40fb29c671c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6247842
+7477cf04c6b086108f459f693a60272523c134db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618937
+746c0205fdf191a737df7af000eaec9409ede73f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119
+1aa61dd85d3a5a2fe819cba21192ec4471c08628,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359518
+1a53ca294bbe5923c46a339955e8207907e9c8c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870
+1a81c722727299e45af289d905d7dcf157174248,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995466
+286a5c19a43382a21c8d96d847b52bba6b715a71,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6876188
+289cfcd081c4393c7d6f63510747b5372202f855,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873
+28e1982d20b6eff33989abbef3e9e74400dbf508,http://doi.org/10.1007/s11042-015-3007-5
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402
+28f1542c63f5949ee6f2d51a6422244192b5a900,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780475
+176e6ba56e04c98e1997ffdef964ece90fd827b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8322125
+179564f157a96787b1b3380a9f79701e3394013d,http://dl.acm.org/citation.cfm?id=2493502
+1773d65c1dc566fd6128db65e907ac91b4583bed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8328914
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13,http://doi.org/10.1007/s11063-017-9693-4
+8f71c97206a03c366ddefaa6812f865ac6df87e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342943
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96,http://doi.org/10.1007/s11042-017-5245-1
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771329
+8fb2ec3bbd862f680be05ef348b595e142463524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699880
+8a8127a06f432982bfb0150df3212f379b36840b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373884
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7539153
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,http://doi.org/10.1007/s11042-015-2945-2
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,http://dl.acm.org/citation.cfm?id=3123271
+8a6033cbba8598945bfadd2dd04023c2a9f31681,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014991
+8a63a2b10068b6a917e249fdc73173f5fd918db0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8120021
+8a4893d825db22f398b81d6a82ad2560832cd890,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5349489
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832,http://dl.acm.org/citation.cfm?id=3078988
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4284739
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1275543
+7ee7b0602ef517b445316ca8aa525e28ea79307e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418530
+7e8c8b1d72c67e2e241184448715a8d4bd88a727,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314
+7e2f7c0eeaeb47b163a7258665324643669919e8,http://doi.org/10.1007/s11042-018-5801-3
+7e27d946d23229220bcb6672aacab88e09516d39,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900131
+7ec431e36919e29524eceb1431d3e1202637cf19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8365242
+10cb39e93fac194220237f15dae084136fdc6740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457972
+10bfa4cecd64b9584c901075d6b50f4fad898d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728013
+10e4172dd4f4a633f10762fc5d4755e61d52dc36,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100146
+1025c4922491745534d5d4e8c6e74ba2dc57b138,http://doi.org/10.1007/s11263-017-1014-x
+1063be2ad265751fb958b396ee26167fa0e844d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369056
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8000333
+193474d008cab9fa1c1fa81ce094d415f00b075c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466415
+196c12571ab51273f44ea3469d16301d5b8d2828,http://doi.org/10.1007/s00371-018-1494-x
+19b492d426f092d80825edba3b02e354c312295f,http://doi.org/10.1007/s00371-016-1332-y
+1951dc9dd4601168ab5acf4c14043b124a8e2f67,http://doi.org/10.1162/neco_a_01116
+193bc8b663d041bc34134a8407adc3e546daa9cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373908
+4c72a51a7c7288e6e17dfefe4f87df47929608e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736912
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d,http://doi.org/10.1007/s11042-016-3830-3
+4ca9753ab023accbfa75a547a65344ee17b549ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5457710
+4cfe921ac4650470b0473fd52a2b801f4494ee64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6467429
+4c0cc732314ba3ccccd9036e019b1cfc27850c17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854473
+263ed62f94ea615c747c00ebbb4008385285b33b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319974
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,http://doi.org/10.1007/s10044-017-0633-8
+265a88a8805f6ba3efae3fcc93d810be1ea68866,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342346
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411910
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70,http://doi.org/10.1142/S0218001415560078
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7358748
+21b5af67618fcc047b495d2d5d7c2bf145753633,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771442
+21959bc56a160ebd450606867dce1462a913afab,http://doi.org/10.1007/s11042-018-6071-9
+214072c84378802a0a0fde0b93ffb17bc04f3759,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301397
+4d90d7834ae25ee6176c096d5d6608555766c0b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,http://doi.org/10.1007/s00371-018-1477-y
+4d19401e44848fe65b721971bc71a9250870ed5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462612
+4db99a2268a120c7af636387241188064ea42338,https://www.ncbi.nlm.nih.gov/pubmed/21820862
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163152
+75a74a74d6abbbb302a99de3225c8870fa149aee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914657
+758d481bbf24d12615b751fd9ec121500a648bce,http://doi.org/10.1007/s11042-015-2914-9
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597559
+81513764b73dae486a9d2df28269c7db75e9beb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7839217
+8127b7654d6e5c46caaf2404270b74c6b0967e19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813406
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3,http://doi.org/10.1016/j.patcog.2015.08.026
+81f101cea3c451754506bf1c7edf80a661fa4dd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163081
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725
+863ad2838b9b90d4461995f498a39bcd2fb87c73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265580
+8633732d9f787f8497c2696309c7d70176995c15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967
+8694cd9748fb1c128f91a572119978075fede848,http://doi.org/10.1016/j.neucom.2017.08.028
+720763bcb5e0507f13a8a319018676eb24270ff0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5202783
+72167c9e4e03e78152f6df44c782571c3058050e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771464
+443f4421e44d4f374c265e6f2551bf9830de5597,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771467
+44855e53801d09763c1fb5f90ab73e5c3758a728,http://doi.org/10.1007/s11263-017-1018-6
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342368
+44d93039eec244083ac7c46577b9446b3a071f3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571
+2a826273e856939b58be8779d2136bffa0dddb08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373892
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8432363
+2a7058a720fa9da4b9b607ea00bfdb63652dff95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590031
+2a612a7037646276ff98141d3e7abbc9c91fccb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909615
+2a2df7e790737a026434187f9605c4763ff71292,http://doi.org/10.1007/s11042-017-4665-2
+2f1485994ef2c09a7bb2874eb8252be8fe710db1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780700
+2f67d5448b5372f639633d8d29aac9c0295b4d72,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460923
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7801496
+2f837ff8b134b785ee185a9c24e1f82b4e54df04,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5739539
+2f73203fd71b755a9601d00fc202bbbd0a595110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868
+43fce0c6b11eb50f597aa573611ac6dc47e088d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8465617
+43dce79cf815b5c7068b1678f6200dabf8f5de31,http://arxiv.org/abs/1709.03196
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,http://dl.acm.org/citation.cfm?id=3190784
+437642cfc8c34e445ea653929e2d183aaaeeb704,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815
+4317856a1458baa427dc00e8ea505d2fc5f118ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296449
+4342a2b63c9c344d78cf153600cd918a5fecad59,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237671
+88535dba55b0a80975df179d31a6cc80cae1cc92,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355366
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255
+88e2efab01e883e037a416c63a03075d66625c26,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637
+9f1a854d574d0bd14786c41247db272be6062581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8360155
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4426825
+9f131b4e036208f2402182a1af2a59e3c5d7dd44,http://dl.acm.org/citation.cfm?id=3206038
+9f2984081ef88c20d43b29788fdf732ceabd5d6a,http://arxiv.org/abs/1806.01547
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534
+9f43caad22803332400f498ca4dd0429fe7da0aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6239186
+6baaa8b763cc5553715766e7fbe7abb235fae33c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789589
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8246530
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100204
+6b742055a664bcbd1c6a85ae6796bd15bc945367,http://doi.org/10.1007/s00138-006-0052-0
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08,http://doi.org/10.1007/s11042-015-3120-5
+071ec4f3fb4bfe6ae9980477d208a7b12691710e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6552193
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4,http://doi.org/10.1016/j.asoc.2018.03.030
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3,http://dl.acm.org/citation.cfm?id=3209659
+6e46d8aa63db3285417c8ebb65340b5045ca106f,http://dl.acm.org/citation.cfm?id=3183751
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265376
+9a98dd6d6aaba05c9e46411ea263f74df908203d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7859405
+9a59abdf3460970de53e09cb397f47d86744f472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995399
+9aab33ce8d6786b3b77900a9b25f5f4577cea461,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739
+9ac2960f646a46b701963230e6949abd9ac0a9b3,http://doi.org/10.1162/jocn_a_01174
+361eaef45fccfffd5b7df12fba902490a7d24a8d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319
+09903df21a38e069273b80e94c8c29324963a832,http://doi.org/10.1007/s11042-017-4980-7
+098363b29eef1471c494382338687f2fe98f6e15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411212
+099053f2cbfa06c0141371b9f34e26970e316426,http://doi.org/10.1007/s11042-016-4079-6
+5dafab3c936763294257af73baf9fb3bb1696654,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5514556
+5d9971c6a9d5c56463ea186850b16f8969a58e67,http://doi.org/10.1007/s11042-017-5354-x
+5da827fe558fb2e1124dcc84ef08311241761726,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908,http://arxiv.org/abs/1711.08238
+5de9670f72d10682bf2cb3156988346257e0489f,http://doi.org/10.1016/j.inffus.2015.12.004
+5d2e5833ca713f95adcf4267148ac2ccf2318539,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6121744
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7292416
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776921
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337733
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336
+31d51e48dbd9e7253eafe0719f3788adb564a971,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410588
+3157be811685c93d0cef7fa4c489efea581f9b8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411222
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354285
+914d7527678b514e3ee9551655f55ffbd3f0eb0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404350
+91e17338a12b5e570907e816bff296b13177971e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,http://doi.org/10.1007/s41095-016-0068-y
+657e702326a1cbc561e059476e9be4d417c37795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343704
+651cafb2620ab60a0e4f550c080231f20ae6d26e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717
+6584c3c877400e1689a11ef70133daa86a238602,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4427488
+62fddae74c553ac9e34f511a2957b1614eb4f937,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406684
+62750d78e819d745b9200b0c5c35fcae6fb9f404,http://doi.org/10.1007/s11042-016-4085-8
+62f017907e19766c76887209d01d4307be0cc573,http://doi.org/10.1016/j.imavis.2012.02.001
+969626c52d30ea803064ddef8fb4613fa73ba11d,http://doi.org/10.1007/BF02683992
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2,http://doi.org/10.1007/s11042-018-5658-5
+9649a19b49607459cef32f43db4f6e6727080bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395207
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6636646
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c,http://dl.acm.org/citation.cfm?id=3184081
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c,http://dl.acm.org/citation.cfm?id=3025149
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3,http://doi.org/10.1007/s11042-017-5319-0
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7591180
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,http://doi.org/10.1007/s11042-016-4321-2
+54ba18952fe36c9be9f2ab11faecd43d123b389b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334
+982fcead58be419e4f34df6e806204674a4bc579,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012
+9888edfb6276887eb56a6da7fe561e508e72a517,http://dl.acm.org/citation.cfm?id=3243904
+984edce0b961418d81203ec477b9bfa5a8197ba3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732
+98d1b5515b079492c8e7f0f9688df7d42d96da8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204260
+9806d3dc7805dd8c9c20d7222c915fc4beee7099,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755972
+98e098ba9ff98fc58f22fed6d3d8540116284b91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8332532
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122,http://doi.org/10.1007/s11704-016-6066-5
+53507e2de66eaba996f14fd2f54a5535056f1e59,http://doi.org/10.1016/j.sigpro.2017.10.024
+53de11d144cd2eda7cf1bb644ae27f8ef2489289,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424637
+535cdce8264ac0813d5bb8b19ceafa77a1674adf,http://doi.org/10.1007/s12559-016-9402-z
+53f5cb365806c57811319a42659c9f68b879454a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8356995
+3ff79cf6df1937949cc9bc522041a9a39d314d83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880
+30c93fec078b98453a71f9f21fbc9512ab3e916f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395274
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392250
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4,http://dl.acm.org/citation.cfm?id=3206048
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8017477
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163116
+5e9ec3b8daa95d45138e30c07321e386590f8ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6967830
+5b5b9c6c67855ede21a60c834aea5379df7d51b7,http://hdl.handle.net/10044/1/45280
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019,http://doi.org/10.1016/j.neucom.2017.07.014
+5b5b568a0ba63d00e16a263051c73e09ab83e245,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8416840
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,http://doi.org/10.1007/978-3-319-75420-8
+37866fea39deeff453802cde529dd9d32e0205a5,http://dl.acm.org/citation.cfm?id=2393385
+3779e0599481f11fc1acee60d5108d63e55819b3,http://doi.org/10.1007/s11280-018-0581-2
+0831794eddcbac1f601dcb9be9d45531a56dbf7e,http://doi.org/10.1007/s11042-017-4416-4
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,http://doi.org/10.1007/s10851-017-0771-z
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a,http://doi.org/10.1007/s11063-017-9715-2
+6dcf6b028a6042a9904628a3395520995b1d0ef9,http://dl.acm.org/citation.cfm?id=3158392
+6dcf418c778f528b5792104760f1fbfe90c6dd6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c,http://doi.org/10.1007/s11263-017-1029-3
+6da711d07b63c9f24d143ca3991070736baeb412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7000295
+6d70344ae6f6108144a15e9debc7b0be4e3335f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8390318
+013305c13cfabaea82c218b841dbe71e108d2b97,http://doi.org/10.1007/s11063-016-9554-6
+017e94ad51c9be864b98c9b75582753ce6ee134f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846,http://doi.org/10.1007/s11263-017-0996-8
+0141cb33c822e87e93b0c1bad0a09db49b3ad470,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876
+0647c9d56cf11215894d57d677997826b22f6a13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557
+06518858bd99cddf9bc9200fac5311fc29ac33b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777
+06ab24721d7117974a6039eb2e57d1545eee5e46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373809
+06b4e41185734f70ce432fdb2b121a7eb01140af,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362753
+6c1227659878e867a01888eef472dd96b679adb6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354280
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7,http://dl.acm.org/citation.cfm?id=3234805
+6cb8c52bb421ce04898fa42cb997c04097ddd328,http://doi.org/10.1007/978-3-319-11289-3
+6c01b349edb2d33530e8bb07ba338f009663a9dd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5332299
+6cce5ccc5d366996f5a32de17a403341db5fddc6,http://doi.org/10.1016/j.cviu.2016.04.012
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49,http://dl.acm.org/citation.cfm?id=2501650
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553718
+6cbde27d9a287ae926979dbb18dfef61cf49860e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8253589
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899896
+397257783ccc8cace5b67cc71e0c73034d559a4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6918513
+398e0771e64cab6ca5d21754e32dce63f9e3c223,http://dl.acm.org/citation.cfm?id=3206028
+39af06d29a74ad371a1846259e01c14b5343e3d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8046026
+39d6f8b791995dc5989f817373391189d7ac478a,http://doi.org/10.1016/j.patrec.2015.09.015
+9944c451b4a487940d3fd8819080fe16d627892d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612967
+9939498315777b40bed9150d8940fc1ac340e8ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2,http://dl.acm.org/citation.cfm?id=2967199
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a,http://dl.acm.org/citation.cfm?id=3268909
+9989ad33b64accea8042e386ff3f1216386ba7f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320
+9961f1e5cf8fda29912344773bc75c47f18333a0,http://doi.org/10.1007/s10044-017-0618-7
+521aa8dcd66428b07728b91722cc8f2b5a73944b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367126
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7,http://doi.org/10.1007/s00371-018-1585-8
+525da67fb524d46f2afa89478cd482a68be8a42b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354128
+522a4ca705c06a0436bbe62f46efe24d67a82422,http://doi.org/10.1007/s11042-017-5475-2
+55432723c728a2ce90d817e9e9877ae9fbad6fe5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412925
+55cfc3c08000f9d21879582c6296f2a864b657e8,http://doi.org/10.1049/iet-cvi.2015.0287
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7,http://dl.acm.org/citation.cfm?id=2926713
+552122432b92129d7e7059ef40dc5f6045f422b5,http://doi.org/10.1007/s11263-017-1000-3
+55aafdef9d9798611ade1a387d1e4689f2975e51,http://doi.org/10.1007/s11263-017-1044-4
+55c4efc082a8410b528af7325de8148b80cf41e3,http://dl.acm.org/citation.cfm?id=3231899
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231,http://arxiv.org/abs/1603.02814
+97b5800e144a8df48f1f7e91383b0f37bc37cf60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237657
+972e044f69443dfc5c987e29250b2b88a6d2f986,http://doi.org/10.1134/S1054661811020738
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4,http://doi.org/10.1007/s11042-017-5141-8
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0,http://doi.org/10.1007/978-3-319-99978-4
+97c1f68fb7162af326cd0f1bc546908218ec5da6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471977
+63fd7a159e58add133b9c71c4b1b37b899dd646f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6603332
+6318d3842b36362bb45527b717e1a45ae46151d5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780708
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308
+6359fcb0b4546979c54818df8271debc0d653257,http://doi.org/10.1007/s11704-017-6275-6
+633c851ebf625ad7abdda2324e9de093cf623141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727
+6316a4b689706b0f01b40f9a3cef47b92bc52411,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699534
+0f7e9199dad3237159e985e430dd2bf619ef2db5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883882
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,http://doi.org/10.1007/s11042-016-4105-8
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,http://dl.acm.org/citation.cfm?id=3173789
+642a386c451e94d9c44134e03052219a7512b9de,http://doi.org/10.1016/j.imavis.2008.04.018
+640e12837241d52d04379d3649d050ee3760048c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5692624
+64ec02e1056de4b400f9547ce56e69ba8393e2ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446491
+645f09f4bc2e6a13663564ee9032ca16e35fc52d,http://dl.acm.org/citation.cfm?id=3193542
+9057044c0347fb9798a9b552910a9aff150385db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6778411
+9077365c9486e54e251dd0b6f6edaeda30ae52b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373910
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c,https://www.sciencedirect.com/science/article/pii/S0042698914002739
+90221884fe2643b80203991686af78a9da0f9791,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995467
+bfdafe932f93b01632a5ba590627f0d41034705d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6134770
+bf3bf5400b617fef2825eb987eb496fea99804b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461385
+bf37a81d572bb154581845b65a766fab1e5c7dda,http://doi.org/10.1007/s11760-017-1111-x
+d34f546e61eccbac2450ca7490f558e751e13ec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461800
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea,http://dl.acm.org/citation.cfm?id=2806218
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728015
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866
+d36a1e4637618304c2093f72702dcdcc4dcd41d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961791
+d383ba7bbf8b7b49dcef9f8abab47521966546bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995471
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619296
+d340a135a55ecf7506010e153d5f23155dcfa7e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7884781
+d4f0960c6587379ad7df7928c256776e25952c60,https://www.ncbi.nlm.nih.gov/pubmed/29107889
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,http://doi.org/10.1007/s11042-016-3361-y
+d4288daef6519f6852f59ac6b85e21b8910f2207,https://www.ncbi.nlm.nih.gov/pubmed/29994505
+d4b4020e289c095ce2c2941685c6cd37667f5cc9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7489442
+d4df31006798ee091b86e091a7bf5dce6e51ba3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1612996
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265336
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda,http://doi.org/10.1007/s00530-017-0566-5
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344635
+ba1c0600d3bdb8ed9d439e8aa736a96214156284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081394
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363515
+baafe3253702955c6904f0b233e661b47aa067e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776926
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5509290
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163111
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892186
+a03448488950ee5bf50e9e1d744129fbba066c50,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367180
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699580
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357
+a78b5495a4223b9784cc53670cc10b6f0beefd32,http://doi.org/10.1007/s11042-018-6260-6
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411214
+b8048a7661bdb73d3613fde9d710bd45a20d13e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8468792
+b85c198ce09ffc4037582a544c7ffb6ebaeff198,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100113
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6894202
+b8d8501595f38974e001a66752dc7098db13dfec,http://arxiv.org/abs/1711.09265
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7995928
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699097
+b161d261fabb507803a9e5834571d56a3b87d147,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913
+b1f4423c227fa37b9680787be38857069247a307,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415357
+b11b71b704629357fe13ed97b216b9554b0e7463,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736040
+dd0086da7c4efe61abb70dd012538f5deb9a8d16,http://doi.org/10.1007/s11704-016-5024-6
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7106467
+ddfae3a96bd341109d75cedeaebb5ed2362b903f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6837429
+dc1510110c23f7b509035a1eda22879ef2506e61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909642
+dc107e7322f7059430b4ef4991507cb18bcc5d95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995338
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120
+dc964b9c7242a985eb255b2410a9c45981c2f4d0,http://doi.org/10.1007/s10851-018-0837-6
+dc5d04d34b278b944097b8925a9147773bbb80cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354149
+dc5d9399b3796db7fd850990402dce221b98c8be,http://dl.acm.org/citation.cfm?id=3220016
+dc3dc18b6831c867a8d65da130a9ff147a736745,http://dl.acm.org/citation.cfm?id=2750679
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462222
+dcb6f06631021811091ce691592b12a237c12907,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8438999
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,http://doi.org/10.1007/s11042-017-4646-5
+b6bb883dd14f2737d0d6225cf4acbf050d307634,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306
+b6f15bf8723b2d5390122442ab04630d2d3878d8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163142
+b6620027b441131a18f383d544779521b119c1aa,http://doi.org/10.1016/j.patcog.2013.04.013
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613024
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316891
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014745
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9,https://www.ncbi.nlm.nih.gov/pubmed/29334821
+a92147bed9c17c311c6081beb0ef4c3165b6268e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6805594
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a,http://doi.org/10.1007/978-3-319-68548-9_19
+a939e287feb3166983e36b8573cd161d12097ad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7550048
+a961f1234e963a7945fed70197015678149b37d8,http://dl.acm.org/citation.cfm?id=3206068
+a96c45ed3a44ad79a72499be238264ae38857988,http://doi.org/10.1007/s00138-016-0786-2
+a92c207031b0778572bf41803dba1a21076e128b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433557
+a9215666b4bcdf8d510de8952cf0d55b635727dc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7498613
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,http://doi.org/10.1007/s11042-018-5806-y
+d57982dc55dbed3d0f89589e319dc2d2bd598532,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099760
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,http://doi.org/10.1007/s11042-016-3768-5
+d289ce63055c10937e5715e940a4bb9d0af7a8c5,http://dl.acm.org/citation.cfm?id=3081360
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392234
+d2f2b10a8f29165d815e652f8d44955a12d057e6,http://doi.org/10.1007/s10044-015-0475-1
+d20ea5a4fa771bc4121b5654a7483ced98b39148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430554
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7777820
+aa581b481d400982a7e2a88830a33ec42ad0414f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7313922
+aa5a7a9900548a1f1381389fc8695ced0c34261a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900274
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014781
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8203756
+aa1129780cc496918085cd0603a774345c353c54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8,https://www.sciencedirect.com/science/article/pii/S0006322316331110
+af29ad70ab148c83e1faa8b3098396bc1cd87790,http://doi.org/10.1007/s40012-016-0149-1
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595920
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7873272
+af3b803188344971aa89fee861a6a598f30c6f10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404811
+af9419f2155785961a5c16315c70b8228435d5f8,http://doi.org/10.1016/j.patrec.2015.12.013
+b712f08f819b925ff7587b6c09a8855bc295d795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450858
+b759936982d6fb25c55c98955f6955582bdaeb27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7472169
+b7ec41005ce4384e76e3be854ecccd564d2f89fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009
+b72eebffe697008048781ab7b768e0c96e52236a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100092
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7528404
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5,http://doi.org/10.1007/s11704-018-8015-y
+db0379c9b02e514f10f778cccff0d6a6acf40519,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6130343
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7422069
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961838
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396
+dbced84d839165d9b494982449aa2eb9109b8467,http://arxiv.org/abs/1712.05083
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771415
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401921
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434
+de162d4b8450bf2b80f672478f987f304b7e6ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237454
+def934edb7c7355757802a95218c6e4ed6122a72,http://doi.org/10.1007/978-0-387-31439-6
+dec76940896a41a8a7b6e9684df326b23737cd5d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607638
+de92951ea021ec56492d76381a8ae560a972dd68,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738246
+dee6609615b73b10540f32537a242baa3c9fca4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8015006
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791166
+de45bf9e5593a5549a60ca01f2988266d04d77da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404529
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,http://dl.acm.org/citation.cfm?id=2591684
+b034cc919af30e96ee7bed769b93ea5828ae361b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915
+a6b5ca99432c23392cec682aebb8295c0283728b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302395
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,http://doi.org/10.1007/s11042-017-4572-6
+a60db9ca8bc144a37fe233b08232d9c91641cbb5,http://doi.org/10.1007/s11280-018-0615-9
+a6902db7972a7631d186bbf59c5ef116c205b1e8,http://dl.acm.org/citation.cfm?id=1276381
+a6ce1a1de164f41cb8999c728bceedf65d66bb23,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7170694
+a6d47f7aa361ab9b37c7f3f868280318f355fadc,https://ora.ox.ac.uk/objects/uuid:7704244a-b327-4e5c-a58e-7bfe769ed988
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430537
+b999364980e4c21d9c22cc5a9f14501432999ca4,http://doi.org/10.1007/s10044-018-0727-y
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,http://dl.acm.org/citation.cfm?id=2964287
+b91f54e1581fbbf60392364323d00a0cd43e493c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788
+b961e512242ddad7712855ab00b4d37723376e5d,http://doi.org/10.1007/s11554-010-0178-1
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900056
+a168ca2e199121258fbb2b6c821207456e5bf994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553808
+a1081cb856faae25df14e25045cd682db8028141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122
+a136ccaa67f660c45d3abb8551c5ed357faf7081,https://www.ncbi.nlm.nih.gov/pubmed/27078863
+ef2bb8bd93fa8b44414565b32735334fa6823b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393076
+efc78a7d95b14abacdfde5c78007eabf9a21689c,http://dl.acm.org/citation.cfm?id=2939840
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e,http://doi.org/10.1016/j.patrec.2015.08.006
+c3d3d2229500c555c7a7150a8b126ef874cbee1c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406478
+c3d874336eb8fae92ab335393fd801fa8df98412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952438
+c362116a358320e71fb6bc8baa559142677622d2,http://doi.org/10.1016/j.patcog.2011.07.009
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,http://doi.org/10.1007/978-3-319-26561-2
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5326314
+c4cfdcf19705f9095fb60fb2e569a9253a475f11,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237333
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5,http://doi.org/10.1007/s00138-012-0439-z
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,http://dl.acm.org/citation.cfm?id=3230921
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e,http://doi.org/10.1016/j.patrec.2015.02.006
+c48b68dc780c71ab0f0f530cd160aa564ed08ade,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1357193
+eaf020bc8a3ed5401fc3852f7037a03b2525586a,http://arxiv.org/abs/1710.07735
+eac97959f2fcd882e8236c5dd6035870878eb36b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890147
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411434
+eacf974e235add458efb815ada1e5b82a05878fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4577667
+ea03a569272d329090fe60d6bff8d119e18057d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906
+e1312b0b0fd660de87fa42de39316b28f9336e70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369055
+e1d1540a718bb7a933e21339f1a2d90660af7353,http://doi.org/10.1007/s11063-018-9852-2
+e1179a5746b4bf12e1c8a033192326bf7f670a4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373816
+e14cc2715b806288fe457d88c1ad07ef55c65318,http://dl.acm.org/citation.cfm?id=2830583
+e180572400b64860e190a8bc04ef839fa491e056,http://doi.org/10.1038/s41598-017-12097-w
+cdcfc75f54405c77478ab776eb407c598075d9f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410829
+cd22e6532211f679ba6057d15a801ba448b9915c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434092
+cd55fb30737625e86454a2861302b96833ed549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094
+cd63759842a56bd2ede3999f6e11a74ccbec318b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7945277
+cc9d068cf6c4a30da82fd6350a348467cb5086d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411204
+ccb2ecb30a50460c9189bb55ba594f2300882747,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8334751
+cccd0edb5dafb3a160179a60f75fd8c835c0be82,http://doi.org/10.1007/s12193-017-0241-3
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854428
+cce332405ce9cd9dccc45efac26d1d614eaa982d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3,http://doi.org/10.1016/j.patcog.2015.11.008
+cc2a9f4be1e465cb4ba702539f0f088ac3383834,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344595
+e6d6203fa911429d76f026e2ec2de260ec520432,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663
+e6da1fcd2a8cda0c69b3d94812caa7d844903007,http://dl.acm.org/citation.cfm?id=3137154
+e68869499471bcd6fa8b4dc02aa00633673c0917,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595885
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,http://doi.org/10.1007/s11042-018-6110-6
+f03a82fd4a039c1b94a0e8719284a777f776fb22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355453
+f095b5770f0ff13ba9670e3d480743c5e9ad1036,http://doi.org/10.1007/s11263-016-0950-1
+f0f854f8cfe826fd08385c0c3c8097488f468076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406454
+f070d739fb812d38571ec77490ccd8777e95ce7a,http://doi.org/10.1016/j.patcog.2014.09.007
+f7ae38a073be7c9cd1b92359131b9c8374579b13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7487053
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429,http://doi.org/10.1007/s12559-017-9506-0
+f7be8956639e66e534ed6195d929aed4e0b90cad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4117059
+e8aa1f207b4b0bb710f79ab47a671d5639696a56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211,http://dl.acm.org/citation.cfm?id=3130971
+e8f4ded98f5955aad114f55e7aca6b540599236b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7047804
+e896389891ba84af58a8c279cf8ab5de3e9320ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6958874
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595883
+fa641327dc5873276f0af453a2caa1634c16f143,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789590
+fa80344137c4d158bf59be4ac5591d074483157a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1470219
+fa32b29e627086d4302db4d30c07a9d11dcd6b84,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354123
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17,http://dl.acm.org/citation.cfm?id=3173582
+ffc81ced9ee8223ab0adb18817321cbee99606e6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157
+fffe5ab3351deab81f7562d06764551422dbd9c4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114
+ff012c56b9b1de969328dacd13e26b7138ff298b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,http://doi.org/10.1007/s11263-018-1088-0
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e,http://dl.acm.org/citation.cfm?id=3078973
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237344
+c26b43c2e1e2da96e7caabd46e1d7314acac0992,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466510
+c29fe5ed41d2240352fcb8d8196eb2f31d009522,http://doi.org/10.1007/s11042-015-3230-0
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699435
+f6fc112ff7e4746b040c13f28700a9c47992045e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7442559
+f6532bf13a4649b7599eb40f826aa5281e392c61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6202713
+f61829274cfe64b94361e54351f01a0376cd1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784
+f6f2a212505a118933ef84110e487551b6591553,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952474
+f65b47093e4d45013f54c3ba09bbcce7140af6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354117
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7172556
+e97ba85a4550667b8a28f83a98808d489e0ff3bc,http://doi.org/10.1155/2018%2F9729014
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471957
+e96cef8732f3021080c362126518455562606f2d,http://dl.acm.org/citation.cfm?id=3206058
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099873
+f16599e4ec666c6390c90ff9a253162178a70ef5,http://dl.acm.org/citation.cfm?id=3206050
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8,http://arxiv.org/abs/1504.07339
+f11c76efdc9651db329c8c862652820d61933308,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100
+e75a589ca27dc4f05c2715b9d54206dee37af266,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409973
+e7cfaff65541cde4298a04882e00608d992f6703,http://doi.org/10.1007/s00521-018-3554-6
+e7697c7b626ba3a426106d83f4c3a052fcde02a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553713
+e79bacc03152ea55343e6af97bcd17d8904cf5ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669
+cb8382f43ce073322eba82809f02d3084dad7969,http://dl.acm.org/citation.cfm?id=3232664
+cbbd9880fb28bef4e33da418a3795477d3a1616e,http://doi.org/10.1016/j.patcog.2016.02.002
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406479
+cb522b2e16b11dde48203bef97131ddca3cdaebd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979
+cbfcd1ec8aa30e31faf205c73d350d447704afee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7955089
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055,http://dl.acm.org/citation.cfm?id=3234150
+cb27b45329d61f5f95ed213798d4b2a615e76be2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236
+cb2470aade8e5630dcad5e479ab220db94ecbf91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018
+f85ccab7173e543f2bfd4c7a81fb14e147695740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5946910
+f8162276f3b21a3873dde7a507fd68b4ab858bcc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761923
+cef73d305e5368ee269baff53ec20ea3ae7cdd82,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461485
+cec70cf159b51a18b39c80fac1ad34f65f3691ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7949100
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f,http://doi.org/10.1007/s11042-015-2847-3
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,http://dl.acm.org/citation.cfm?id=3134264
+ce70dd0d613b840754dce528c14c0ebadd20ffaa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7973159
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7129813
+ce75deb5c645eeb08254e9a7962c74cab1e4c480,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373839
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1605391
+ce2945e369603fcec1fcdc6e19aac5996325cba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771366
+e060e32f8ad98f10277b582393df50ac17f2836c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099600
+e0162dea3746d58083dd1d061fb276015d875b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014992
+46f48211716062744ddec5824e9de9322704dea1,http://doi.org/10.1007/s11263-016-0923-4
+468bb5344f74842a9a43a7e1a3333ebd394929b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373896
+46e0703044811c941f0b5418139f89d46b360aa3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883945
+4686df20f0ee40cd411e4b43860ef56de5531d9e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301536
+46c82cfadd9f885f5480b2d7155f0985daf949fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780537
+46976097c54e86032932d559c8eb82ffea4bb6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738868
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099709
+7923742e2af655dee4f9a99e39916d164bc30178,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272743
+7914c3f510e84a3d83d66717aad0d852d6a4d148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532448
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995492
+794a51097385648e3909a1acae7188f5ab881710,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813382
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c,http://doi.org/10.1007/s11042-017-4818-3
+2d7c2c015053fff5300515a7addcd74b523f3f66,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422
+2dbc57abf3ceda80827b85593ce1f457b76a870b,http://doi.org/10.1007/s11042-018-6133-z
+4113269f916117f975d5d2a0e60864735b73c64c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613059
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5720366
+41c42cb001f34c43d4d8dd8fb72a982854e173fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5308445
+414d78e32ac41e6ff8b192bc095fe55f865a02f4,http://arxiv.org/abs/1706.00631
+834736698f2cc5c221c22369abe95515243a9fc3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249
+83d41f6548bb76241737dcd3fed9e182ee901ff9,http://dl.acm.org/citation.cfm?id=2964328
+8355d095d3534ef511a9af68a3b2893339e3f96b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390
+83f80fd4eb614777285202fa99e8314e3e5b169c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265544
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c,http://doi.org/10.1038/nn870
+1b5d445741473ced3d4d33732c9c9225148ed4a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8452894
+7783095a565094ae5b3dccf082d504ddd7255a5c,http://dl.acm.org/citation.cfm?id=2502258
+77d929b3c4bf546557815b41ed5c076a5792dc6b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265399
+779d3f0cf74b7d33344eea210170c7c981a7e27b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8115237
+7788fa76f1488b1597ee2bebc462f628e659f61e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888
+771505abd38641454757de75fe751d41e87f89a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561
+48a402593ca4896ac34fbebf1e725ab1226ecdb7,http://doi.org/10.1016/j.patcog.2015.01.022
+48de3ca194c3830daa7495603712496fe908375c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619283
+480ccd25cb2a851745f5e6e95d33edb703efb49e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461792
+484bac2a9ff3a43a6f85d109bbc579a4346397f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6011991
+70e14e216b12bed2211c4df66ef5f0bdeaffe774,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237666
+708f4787bec9d7563f4bb8b33834de445147133b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e,http://doi.org/10.1007/s11042-018-5608-2
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4497872
+1e3068886b138304ec5a7296702879cc8788143d,http://doi.org/10.1007/s11263-013-0630-3
+84c5b45328dee855c4855a104ac9c0558cc8a328,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213
+84574aa43a98ad8a29470977e7b091f5a5ec2366,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321
+84a74ef8680b66e6dccbc69ae80321a52780a68e,http://doi.org/10.1007/978-0-85729-932-1_19
+845f45f8412905137bf4e46a0d434f5856cd3aec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418618
+4a733a0862bd5f7be73fb4040c1375a6d17c9276,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618949
+4a8480d58c30dc484bda08969e754cd13a64faa1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406475
+24603ed946cb9385ec541c86d2e42db47361c102,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373865
+24286ef164f0e12c3e9590ec7f636871ba253026,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721
+2480f8dccd9054372d696e1e521e057d9ac9de17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8396968
+247a8040447b6577aa33648395d95d80441a0cf3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362745
+23edcd0d2011d9c0d421193af061f2eb3e155da3,http://doi.org/10.1007/s00371-015-1137-4
+23ee7b7a9ca5948e81555aaf3a044cfec778f148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771385
+239e305c24155add73f2a0ba5ccbd66b37f77e14,http://dl.acm.org/citation.cfm?id=1219097
+23e824d1dfc33f3780dd18076284f07bd99f1c43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686
+239958d6778643101ab631ec354ea1bc4d33e7e0,http://doi.org/10.1016/j.patcog.2017.06.009
+234c106036964131c0f2daf76c47ced802652046,http://doi.org/10.1016/j.cviu.2015.07.007
+4f37f71517420c93c6841beb33ca0926354fa11d,http://doi.org/10.1016/j.neucom.2017.08.062
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308,http://dl.acm.org/citation.cfm?id=2396318
+4f1249369127cc2e2894f6b2f1052d399794919a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663
+4f8345f31e38f65f1155569238d14bd8517606f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618941
+4f8b4784d0fca31840307650f7052b0dde736a76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7017496
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,http://doi.org/10.1007/978-3-319-16865-4
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404159
+8da32ff9e3759dc236878ac240728b344555e4e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed,http://doi.org/10.1038/s41598-017-18993-5
+8de5dc782178114d9424d33d9adabb2f29a1ab17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7053946
+151b87de997e55db892b122c211f9c749f4293de,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237481
+127c7f87f289b1d32e729738475b337a6b042cf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436988
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a,http://doi.org/10.1007/s11042-017-4353-2
+12226bca7a891e25b7d1e1a34a089521bba75731,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373861
+8c4042191431e9eb43f00b0f14c23765ab9c6688,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532956
+8ccbbd9da0749d96f09164e28480d54935ee171c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597578
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014
+85a136b48c2036b16f444f93b086e2bd8539a498,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7885525
+85e78aa374d85f9a61da693e5010e40decd3f986,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619100
+854b1f0581f5d3340f15eb79452363cbf38c04c8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0,http://dl.acm.org/citation.cfm?id=3264947
+85ae6fa48e07857e17ac4bd48fb804785483e268,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7755833
+85c90ad5eebb637f048841ebfded05942bb786b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977163
+8562b4f63e49847692b8cb31ef0bdec416b9a87a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8128909
+857c64060963dd8d28e4740f190d321298ddd503,http://doi.org/10.1007/s11042-015-3103-6
+1d30f813798c55ae4fe454829be6e2948ee841da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270396
+1d51b256af68c5546d230f3e6f41da029e0f5852,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590015
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,http://doi.org/10.1007/s11063-016-9558-2
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820
+71c4b8e1bb25ee80f4317411ea8180dae6499524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463396
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373821
+768f6a14a7903099729872e0db231ea814eb05e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2,http://doi.org/10.1007/s00371-016-1290-4
+1c0acf9c2f2c43be47b34acbd4e7338de360e555,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461986
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26,http://doi.org/10.1016/j.ijar.2012.01.003
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849828
+82e3f4099503633c042a425e9217bfe47cfe9d4b,http://doi.org/10.1007/s11042-015-2819-7
+49358915ae259271238c7690694e6a887b16f7ed,http://doi.org/10.1007/BF02884429
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,http://doi.org/10.1007/s11704-017-6114-9
+49068538b7eef66b4254cc11914128097302fab8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339040
+49be50efc87c5df7a42905e58b092729ea04c2f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7177489
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,http://dl.acm.org/citation.cfm?id=3159691
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5346008
+405d9a71350c9a13adea41f9d7f7f9274793824f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373834
+40c1de7b1b0a087c590537df55ecd089c86e8bfc,http://doi.org/10.1162/NECO_a_00401
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4,http://doi.org/10.1007/s13735-017-0144-9
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0,http://doi.org/10.1007/s11042-017-5028-8
+2e7e1ee7e3ee1445939480efd615e8828b9838f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5643167
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397046
+2bb36c875754a2a8919f2f9b00a336c00006e453,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373869
+2bf646a6efd15ab830344ae9d43e10cc89e29f34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8387808
+2bcd9b2b78eb353ea57cf50387083900eae5384a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329
+4735fa28fa2a2af98f7b266efd300a00e60dddf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460647
+7831ab4f8c622d91974579c1ff749dadc170c73c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6712699
+78f2c8671d1a79c08c80ac857e89315197418472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443
+784a83437b3dba49c0d7ccc10ac40497b84661a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100224
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995556
+782a05fbe30269ff8ab427109f5c4d0a577e5284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8038860
+8bebb26880274bdb840ebcca530caf26c393bf45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369529
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1,http://doi.org/10.1007/s11263-016-0986-2
+13d430257d595231bda216ef859950caa736ad1d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394947
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,http://doi.org/10.1007/s00779-018-1171-0
+7fcecaef60a681c47f0476e54e08712ee05d6154,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7299097
+7f203f2ff6721e73738720589ea83adddb7fdd27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301513
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404767
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,http://doi.org/10.1007/s11554-016-0645-4
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382330
+7f904093e6933cab876e87532111db94c71a304f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117544
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7781761
+7f2a234ad5c256733a837dbf98f25ed5aad214e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7207289
+7f5b379b12505d60f9303aab1fea48515d36d098,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411873
+7f68a5429f150f9eb7550308bb47a363f2989cb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977004
+7acbf0b060e948589b38d5501ca217463cfd5c2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6940304
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8453893
+141cb9ee401f223220d3468592effa90f0c255fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,http://doi.org/10.1007/s00138-016-0820-4
+8e63868e552e433dc536ba732f4c2af095602869,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699730
+8eb40d0a0a1339469a05711f532839e8ffd8126c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7890464
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4586390
+22648dcd3100432fe0cc71e09de5ee855c61f12b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393188
+228ea13041910c41b50d0052bdce924037c3bc6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495
+22e121a8dea49e3042de305574356477ecacadda,http://doi.org/10.1007/s00138-018-0935-x
+25960f0a2ed38a89fa8076a448ca538de2f1e183,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411220
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410635
+2564920d6976be68bb22e299b0b8098090bbf259,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8407761
diff --git a/scraper/reports/misc/db_paper_doi.csv b/scraper/reports/misc/db_paper_doi.csv
new file mode 100644
index 00000000..69384d5d
--- /dev/null
+++ b/scraper/reports/misc/db_paper_doi.csv
@@ -0,0 +1,1928 @@
+61831364ddc8db869618f1c7f0ad35ab2ab6bcf7,https://doi.org/10.1109/ICIP.2013.6738496
+61a3c45c9f802f9d5fa8d94fee811e203bac6487,https://doi.org/10.1109/TIFS.2016.2567318
+6159908dec4bc2c1102f416f8a52a31bf3e666a4,https://doi.org/10.1109/ICIP.2012.6467431
+6196f4be3b28684f6528b8687adccbdf9ac5c67c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.267
+61b22b1016bf13aca8d2e57c4e5e004d423f4865,https://doi.org/10.1109/TCYB.2016.2526630
+61bc124537f414f6fcb4d1ff476681b5a0ee222a,http://doi.ieeecomputersociety.org/10.1109/WIW.2016.043
+0d90c992dd08bfb06df50ab5c5c77ce83061e830,https://doi.org/10.1109/UIC-ATC.2013.85
+0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261,https://doi.org/10.1109/IJCB.2011.6117508
+0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9,https://doi.org/10.1109/SIU.2014.6830193
+0d9815f62498db21f06ee0a9cc8b166acc93888e,https://doi.org/10.1016/j.neucom.2007.12.018
+0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,https://doi.org/10.1109/LSP.2018.2810121
+0d3ff34d8490a9a53de1aac1dea70172cb02e013,https://doi.org/10.1109/ICPR.2014.542
+0de1450369cb57e77ef61cd334c3192226e2b4c2,https://doi.org/10.1109/BTAS.2017.8272747
+0d7652652c742149d925c4fb5c851f7c17382ab8,https://doi.org/10.1016/j.neucom.2015.05.057
+0da3c329ae14a4032b3ba38d4ea808cf6d115c4a,https://doi.org/10.1007/s00138-015-0709-7
+0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6,https://doi.org/10.1109/LSP.2005.863661
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,https://doi.org/10.1109/ACSSC.2015.7421092
+0db371a6bc8794557b1bffc308814f53470e885a,https://doi.org/10.1007/s13042-015-0380-3
+95f1790da3d0a4a5310a050512ce355b3c5aac86,https://doi.org/10.1109/ICIP.2016.7533142
+95023e3505263fac60b1759975f33090275768f3,http://doi.acm.org/10.1145/2856767.2856770
+952138ae6534fad573dca0e6b221cdf042a36412,http://doi.ieeecomputersociety.org/10.1109/DICTA.2005.38
+950bf95da60fd4e77d5159254fed906d5ed5fbcb,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.24
+9590b09c34fffda08c8f54faffa379e478f84b04,https://doi.org/10.1109/TNNLS.2013.2275170
+95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6,https://doi.org/10.1007/s11063-013-9322-9
+5957936195c10521dadc9b90ca9b159eb1fc4871,https://doi.org/10.1109/TCE.2016.7838098
+59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862
+592370b4c7b58a2a141e507f3a2cc5bbd247a62e,https://doi.org/10.1109/IJCNN.2017.7965911
+59b6ff409ae6f57525faff4b369af85c37a8dd80,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.28
+5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,https://doi.org/10.1109/ICT.2017.7998256
+5951e9e13ff99f97f301a336f24a14d80459c659,https://doi.org/10.1016/j.neucom.2017.09.009
+9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23,https://doi.org/10.1109/FCV.2015.7103729
+9255d3b2bfee4aaae349f68e67c76a077d2d07ad,https://doi.org/10.1109/TIP.2017.2713041
+92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,https://doi.org/10.1109/ICIP.2016.7533062
+9213a415d798426c8d84efc6d2a69a2cbfa2af84,https://doi.org/10.1016/j.cviu.2013.03.008
+0c378c8dcf707145e1e840a9951519d4176a301f,https://doi.org/10.1109/ICARCV.2010.5707434
+0c65226edb466204189b5aec8f1033542e2c17aa,https://doi.org/10.1109/ICIP.2017.8296997
+0c247ac797a5d4035469abc3f9a0a2ccba49f4d8,https://doi.org/10.1109/ICMLC.2011.6016715
+0cf1287c8fd41dcef4ac03ebeab20482f02dce20,https://doi.org/10.1109/MSN.2016.032
+0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,https://doi.org/10.1109/CIBIM.2014.7015437
+0c0db39cac8cb76b52cfdbe10bde1c53d68d202f,http://doi.acm.org/10.1145/3123266.3123334
+0c1314d98bb6b99af00817644c1803dbc0fb5ff5,http://doi.ieeecomputersociety.org/10.1109/BigMM.2015.29
+0c6a18b0cee01038eb1f9373c369835b236373ae,https://doi.org/10.1007/s11042-017-4359-9
+66ec085c362f698b40d6e0e7b10629462280c062,https://doi.org/10.1109/ICARCV.2004.1468855
+661c78a0e2b63cbdb9c20dcf89854ba029b6bc87,https://doi.org/10.1109/ICIP.2014.7025093
+66f4d7c381bd1798703977de2e38b696c6641b77,https://doi.org/10.1109/FSKD.2015.7382360
+6688b2b1c1162bc00047075005ec5c7fca7219fd,https://doi.org/10.1109/SACI.2013.6608958
+6622776d1696e79223f999af51e3086ba075dbd1,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019454
+3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836097
+3ebb0209d5e99b22c67e425a67a959f4db8d1f47,https://doi.org/10.1109/ICDAR.2017.173
+3e0035b447d0d4e11ceda45936c898256f321382,https://doi.org/10.1109/BMEI.2014.7002762
+3e1190655cc7c1159944d88bdbe591b53f48d761,https://doi.org/10.1007/s10489-013-0464-2
+3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48,http://doi.ieeecomputersociety.org/10.1109/WF-IoT.2016.7845505
+3ec860cfbd5d953f29c43c4e926d3647e532c8b0,https://doi.org/10.1109/TCSVT.2008.924108
+3e0377af0087b9b836bf6d95bc1c7085dfde4897,http://doi.acm.org/10.1145/2671188.2749320
+3e7070323bca6106f19bea4c97ef67bd6249cb5d,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477448
+3e03d19b950edadc74ca047dec86227282eccf71,https://doi.org/10.1109/ACCESS.2017.2777003
+503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99,https://doi.org/10.1109/ACCESS.2017.2733718
+504d2675da7a56a36386568ee668938df6d82bbe,https://doi.org/10.1109/TCSVT.2016.2539604
+502d30c5eac92c7db587d85d080343fbd9bc469e,https://doi.org/10.1109/TIFS.2016.2538744
+50333790dd98c052dfafe1f9bf7bf8b4fc9530ba,https://doi.org/10.1109/ICIP.2015.7351001
+5039834df68600a24e7e8eefb6ba44a5124e67fc,https://doi.org/10.1109/ICIP.2013.6738761
+501076313de90aca7848e0249e7f0e7283d669a1,https://doi.org/10.1109/SOCPAR.2014.7007987
+681d222f91b12b00e9a4217b80beaa11d032f540,https://doi.org/10.1007/s10044-015-0493-z
+68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5,https://doi.org/10.1109/CISP-BMEI.2017.8301919
+68eb6e0e3660009e8a046bff15cef6fe87d46477,https://doi.org/10.1109/ICIP.2017.8296999
+68e6cfb0d7423d3fae579919046639c8e2d04ad7,https://doi.org/10.1109/ICB.2016.7550058
+6813208b94ffa1052760d318169307d1d1c2438e,http://doi.acm.org/10.1145/2818346.2830582
+68f19f06f49aa98b676fc6e315b25e23a1efb1f0,https://doi.org/10.1109/ICIP.2015.7351080
+68d566ed4041a7519acb87753036610bd64dcc09,https://doi.org/10.1007/s11390-013-1347-z
+68021c333559ab95ca10e0dbbcc8a4840c31e157,https://doi.org/10.1109/ICPR.2016.7900281
+681399aa0ea4cbffd9ab22bf17661d6df4047349,http://doi.ieeecomputersociety.org/10.1109/CISIS.2012.207
+57b7325b8027745b130490c8f736445c407f4c4c,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.27
+5798055e11e25c404b1b0027bc9331bcc6e00555,http://doi.acm.org/10.1145/2393347.2396357
+57eeaceb14a01a2560d0b90d38205e512dcca691,https://doi.org/10.1109/TIP.2017.2778563
+5763b09ebca9a756b4adebf74d6d7de27e80e298,https://doi.org/10.1109/BTAS.2013.6712738
+57f4e54a63ef95596dbc743f391c3fff461f278b,https://doi.org/10.1109/ICMEW.2012.86
+57ca530e9acb63487e8591cb6efb89473aa1e5b4,https://doi.org/10.1109/TIP.2014.2356292
+578117ff493d691166fefc52fd61bad70d8752a9,https://doi.org/10.1109/CCST.2016.7815707
+57ba4b6de23a6fc9d45ff052ed2563e5de00b968,https://doi.org/10.1109/ICIP.2017.8296993
+5721cd4b898f0e7df8de1e0215f630af94656be9,http://doi.acm.org/10.1145/3095140.3095164
+57c270a9f468f7129643852945cf3562cbb76e07,https://doi.org/10.1016/j.imavis.2016.07.004
+57de1a09db680e0b4878ceda68d626ae4e44ccfe,https://doi.org/10.1016/j.neucom.2014.10.111
+57dc55edade7074f0b32db02939c00f4da8fe3a6,https://doi.org/10.1109/TITS.2014.2313371
+3ba74755c530347f14ec8261996dd9eae896e383,https://doi.org/10.1109/JSSC.2017.2767705
+3b8c830b200f1df8ef705de37cbfe83945a3d307,https://doi.org/10.1007/s00138-017-0887-6
+3bdaf59665e6effe323a1b61308bcac2da4c1b73,https://doi.org/10.1109/ROMAN.2012.6343736
+3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6,https://doi.org/10.1109/ICIP.2015.7351140
+3b75681f0162752865d85befd8b15e7d954ebfe6,https://doi.org/10.1109/CLEI.2014.6965097
+3b64b8be33887e77e6def4c385985e43e2c15eea,https://doi.org/10.1109/TIP.2016.2576278
+6f74c3885b684e52096497b811692bd766071530,https://doi.org/10.1016/j.neucom.2013.06.013
+6f68c49106b66a5bd71ba118273b4c5c64b6619f,http://doi.ieeecomputersociety.org/10.1109/TKDE.2007.190720
+6ffdbac58e15e0ff084310b0a804520ad4bd013e,https://doi.org/10.1049/iet-bmt.2015.0078
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,https://doi.org/10.1109/ICB.2013.6612990
+6f16f4bd01aeefdd03d6783beacb7de118f5af8a,https://doi.org/10.1109/VCIP.2013.6706330
+6f0caff7c6de636486ff4ae913953f2a6078a0ab,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583081
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,http://doi.acm.org/10.1145/1877972.1877994
+6fdf2f4f7ae589af6016305a17d460617d9ef345,https://doi.org/10.1109/ICIP.2015.7350767
+6f48e5e258da11e6ba45eeabe65a5698f17e58ef,https://doi.org/10.1109/ICASSP.2013.6637968
+6f8cffd9904415c8fa3a1e650ac143867a04f40a,https://doi.org/10.1016/j.neucom.2015.01.099
+0387b32d0ebd034dc778972367e7d4194223785d,http://doi.acm.org/10.1145/2522848.2531740
+03333e7ec198208c13627066bc76b0367f5e270f,https://doi.org/10.1109/IJCNN.2017.7966100
+03e1480f1de2ffbd85655d68aae63a01685c5862,https://doi.org/10.1109/ICPR.2014.771
+0341405252c80ff029a0d0065ca46d0ade943b03,http://doi.ieeecomputersociety.org/10.1109/FG.2017.40
+03babadaaa7e71d4b65203e27e8957db649155c6,https://doi.org/10.1109/TIP.2017.2725578
+0343f9401b98de36be957a30209fef45dd684270,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163134
+9b78ce9fdac30864d1694a56328b3c8a96cccef5,https://doi.org/10.1089/cpb.2004.7.635
+9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3,https://doi.org/10.1109/ICPR.2016.7899782
+9b8830655d4a5a837e3ffe835d14d6d71932a4f2,https://doi.org/10.1109/TSMCB.2011.2169452
+9ba358281f2946cba12fff266019193a2b059590,http://doi.ieeecomputersociety.org/10.1109/ISM.2008.27
+9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534,https://doi.org/10.1016/j.neucom.2016.02.063
+9b1a70d6771547cbcf6ba646f8775614c0162aca,https://doi.org/10.1016/j.patrec.2016.11.005
+9b1c218a55ead45296bfd7ad315aaeff1ae9983e,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2481396
+9e8382aa1de8f2012fd013d3b39838c6dad8fb4d,http://doi.acm.org/10.1145/3123266.3123349
+9e5690cdb4dfa30d98dff653be459e1c270cde7f,https://doi.org/10.1109/ICIP.2017.8297080
+9e5809122c0880183c7e42c7edd997f92de6d81e,http://doi.acm.org/10.1145/2451176.2451209
+9e7646b7e9e89be525cda1385cc1351cc28a896e,http://doi.ieeecomputersociety.org/10.1109/TMC.2017.2702634
+9e99f818b37d44ec6aac345fb2c5356d83d511c7,https://doi.org/10.1109/ISSPA.2012.6310540
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,https://doi.org/10.1109/IWCIA.2013.6624793
+9eb13f8e8d948146bfbae1260e505ba209c7fdc1,https://doi.org/10.1109/AFGR.2008.4813404
+9e28243f047cc9f62a946bf87abedb65b0da0f0a,https://doi.org/10.1109/ICMLA.2013.141
+9ef06cc958af2274afd193a1dca705c08234bcd3,https://doi.org/10.1109/ICIP.2014.7026207
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,https://doi.org/10.1109/ICIP.2015.7351544
+049186d674173ebb76496f9ecee55e17ed1ca41b,https://doi.org/10.1109/ACCESS.2017.2724763
+045e83272db5e92aa4dc8bdfee908534c2608711,http://doi.ieeecomputersociety.org/10.1109/ICCABS.2016.7802775
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,https://doi.org/10.1109/FG.2011.5771332
+041b51a81a977b5c64682c55414ad8d165c1f2ce,https://doi.org/10.1109/TCE.2014.7027339
+04f56dc5abee683b1e00cbb493d031d303c815fd,http://doi.acm.org/10.1145/2808492.2808557
+04c07ecaf5e962ac847059ece3ae7b6962b4e5c4,http://doi.acm.org/10.1145/2993148.2997631
+047ce307ad0c871bc2c9a5c1e4649cefae2ba50d,https://doi.org/10.1109/ICRA.2012.6224587
+045275adac94cced8a898a815293700401e9955f,https://doi.org/10.1007/s00138-012-0447-z
+6a3fa483c64e72d9c96663ff031446a2bdb6b2eb,https://doi.org/10.1016/j.patcog.2017.02.003
+6a38e4bb35673a73f041e34d3f2db7067482a9b5,http://doi.acm.org/10.1145/2663204.2666277
+6afe1f668eea8dfdd43f0780634073ed4545af23,https://doi.org/10.1007/s11042-017-4962-9
+6a527eeb0b2480109fe987ed7eb671e0d847fca8,https://doi.org/10.1007/978-3-319-28515-3
+6adecb82edbf84a0097ff623428f4f1936e31de0,https://doi.org/10.1007/s11760-011-0246-4
+6aa0a47f4b986870370c622be51f00f3a1b9d364,https://doi.org/10.1109/TIP.2012.2192285
+6ad5ac867c5ca56e0edaece153269d989b383b59,https://doi.org/10.1109/CISP-BMEI.2016.7852723
+321db1059032b828b223ca30f3304257f0c41e4c,https://doi.org/10.1109/ICACCI.2015.7275951
+32b76220ed3a76310e3be72dab4e7d2db34aa490,https://doi.org/10.1109/SMC.2014.6974364
+32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99,http://doi.ieeecomputersociety.org/10.1109/CIS.2007.196
+327ae6742cca4a6a684a632b0d160dd84d0d8632,https://doi.org/10.1007/s10851-015-0629-1
+32c5c65db2af9691f8bb749c953c978959329f8f,https://doi.org/10.1109/ICIP.2015.7351469
+322488c4000c686e9bfb7514ccdeacae33e53358,http://doi.acm.org/10.1145/2671188.2749301
+32dfd4545c87d9820cc92ca912c7d490794a81d6,https://doi.org/10.1007/978-3-319-50551-0
+328da943e22adef5957c08b6909bda09d931a350,https://doi.org/10.1109/ICARCV.2008.4795605
+3288e16c62a215254e2ed7c39675482b356c3bef,https://doi.org/10.1109/SACI.2016.7507341
+329b2781007604652deb72139d14315df3bc2771,http://doi.acm.org/10.1145/2671188.2749358
+32a440720ee988b7b41de204b2910775171ee12c,https://doi.org/10.1109/ICIP.2011.6116351
+3251f40ed1113d592c61d2017e67beca66e678bb,https://doi.org/10.1007/978-3-319-65172-9_17
+356a144d2aa5cc5e74d178dae3963003871aa8a1,https://doi.org/10.1007/978-3-319-27671-7_41
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,https://doi.org/10.1007/978-3-319-02714-2
+35d90beea6b4dca8d949aae93f86cf53da72971f,https://doi.org/10.1109/ICIP.2011.6116672
+35ccc836df60cd99c731412fe44156c7fd057b99,https://doi.org/10.1109/ICCIS.2017.8274819
+3598d10d7d4f2b543afa8bcf6b2c34a3696ef155,https://doi.org/10.1109/SPAC.2017.8304347
+359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9,https://doi.org/10.1007/s10044-014-0377-7
+35d272877b178aa97c678e3fcbb619ff512af4c2,https://doi.org/10.1109/SMC.2017.8122743
+35b3dc0e961a15a7a60b95490a989f91680acc7c,http://doi.ieeecomputersociety.org/10.1109/TDSC.2016.2550459
+35d42f4e7a1d898bc8e2d052c38e1106f3e80188,https://doi.org/10.1109/BTAS.2015.7358765
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,https://doi.org/10.1007/s11390-017-1738-7
+699b8250fb93b3fa64b2fc8f59fef036e172564d,https://doi.org/10.1109/ICMLA.2016.0147
+69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b,https://doi.org/10.1016/j.patcog.2014.06.016
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,https://doi.org/10.1109/TNNLS.2015.2504724
+69ad67e204fb3763d4c222a6c3d05d6725b638ed,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890538
+69b2a7533e38c2c8c9a0891a728abb423ad2c7e7,https://doi.org/10.1016/j.imavis.2013.03.003
+3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3,https://doi.org/10.1016/j.imavis.2015.06.009
+3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a,https://doi.org/10.1109/LSP.2016.2639341
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,http://doi.acm.org/10.1145/2987378
+3c086601ce0bac61047b5b931b253bd4035e1e7a,https://doi.org/10.1109/ICIP.2015.7350897
+3cbd3124b1b4f95fcdf53abd358d7ceec7861dda,http://doi.acm.org/10.1145/3019612.3019641
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,https://doi.org/10.1109/IJCNN.2010.5596793
+3ce96f03874d42345c0727edc78b6949b20b4a11,https://doi.org/10.1007/s11042-015-2630-5
+3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c,https://doi.org/10.1109/ICIP.2014.7025145
+3c6542295cf7fe362d7d629ac10670bf30cdabce,https://doi.org/10.1109/DICTA.2015.7371264
+3ce37af3ac0ed2eba08267a3605730b2e0433da5,https://doi.org/10.1109/TIP.2016.2609811
+3cd22b5b81a0172d608ff14be71b755d1f68c201,https://doi.org/10.1109/ACCESS.2018.2812725
+3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd,https://doi.org/10.1109/TCSVT.2014.2317887
+3ca6adc90aae5912baa376863807191ffd56b34e,https://doi.org/10.1109/LSP.2014.2316918
+5642bafa7955b69f05c11230151cd59fcbe43b8e,https://doi.org/10.1007/s11760-012-0404-3
+56fb30b24e7277b47d366ca2c491749eee4d6bb1,https://doi.org/10.1109/ICAPR.2015.7050658
+56bcc89fb1e05d21a8b7b880c6b4df79271ceca5,https://doi.org/10.1007/s11760-013-0441-6
+56e25358ebfaf8a8b3c7c33ed007e24f026065d0,https://doi.org/10.1007/s10994-015-5541-9
+568ced900cbf7437c9e87b60a17e16f0c1e0c442,https://doi.org/10.1109/CCECE.2012.6335026
+5613cb13ab381c8a8b81181ac786255705691626,https://doi.org/10.1109/VCIP.2015.7457876
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,https://doi.org/10.1007/s11554-007-0031-3
+569988e19ab36582d4bd0ec98e344cbacf177f45,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2347960
+56f57786516dcc8ea3c0ffe877c1363bfb9981d2,https://doi.org/10.1109/CBMI.2014.6849823
+565f7c767e6b150ebda491e04e6b1de759fda2d4,https://doi.org/10.1016/j.patcog.2016.11.023
+51f626540860ad75b68206025a45466a6d087aa6,https://doi.org/10.1109/ICIP.2017.8296595
+51b770e6b2af994ffc8793f59b24a9f619033a3a,https://doi.org/10.1109/ICDSC.2011.6042899
+516f8728ad1d4f9f2701a2b5385f8c8e71b9d356,https://doi.org/10.1109/ACCESS.2017.2745903
+5101368f986aa9837fdb3a71cb4299dff6f6325d,https://doi.org/10.1109/ICIP.2008.4712155
+5180c98815d7034e753a14ef6f54583f115da3aa,http://doi.ieeecomputersociety.org/10.1109/iV.2017.40
+3d2c932f4f2693a87a0b855048e60f142214f475,http://doi.ieeecomputersociety.org/10.1109/CSE.2014.354
+3d1959048eba5495e765a80c8e0bbd3d65b3d544,https://doi.org/10.1016/j.neucom.2016.07.038
+3d2c89676fcc9d64aaed38718146055152d22b39,https://doi.org/10.1109/ACPR.2013.10
+3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b,https://doi.org/10.1109/TIFS.2015.2512561
+3d1f976db6495e2bb654115b939b863d13dd3d05,https://doi.org/10.1007/s11042-015-2581-x
+3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548,https://doi.org/10.1109/ITSC.2015.252
+3d4d3f70352dc833e454a5756d682f27eca46e5d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.32
+3d0b2da6169d38b56c58fe5f13342cf965992ece,https://doi.org/10.1109/ICIP.2016.7532909
+3d89f9b4da3d6fb1fdb33dea7592b5992069a096,https://doi.org/10.1109/CISP-BMEI.2017.8302003
+3d9e44d8f8bc2663192c7ce668ccbbb084e466e4,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019505
+3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.117
+5810ce61fda464d4de2769bd899e12727bee0382,https://doi.org/10.1109/IJCNN.2016.7727484
+58d43e32660446669ff54f29658961fe8bb6cc72,https://doi.org/10.1109/ISBI.2017.7950504
+583e0d218e1e7aaf9763a5493e7c18c2b8dd7464,http://doi.acm.org/10.1145/2988240.2988243
+58684a925693a0e3e4bb1dd2ebe604885be034d2,https://doi.org/10.1109/ICASSP.2008.4517869
+58483028445bf6b2d1ad6e4b1382939587513fe1,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247763
+5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,https://doi.org/10.1109/ICPR.2016.7900278
+58eb9174211d58af76023ce33ee05769de57236c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2636827
+58d0c140597aa658345230615fb34e2c750d164c,http://doi.acm.org/10.1145/3098954.3098969
+5811944e93a1f3e35ece7a70a43a3de95c69b5ab,https://doi.org/10.1109/BTAS.2016.7791163
+58df849378fbcfb6b1a8ebddfbe4caa450226b9d,https://doi.org/10.1109/ICIP.2017.8296770
+58e7dbbb58416b785b4a1733bf611f8106511aca,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273597
+673541a8cb1aa3ac63a288523ba71aec2a38280e,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552971
+67214e8d2f83eb41c14bfc86698eb6620e72e87c,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.263
+67e6ddce6fea17bb2b171c949ee224936d36c0d1,https://doi.org/10.1109/ICIP.2008.4712157
+0b58b3a5f153f653c138257426bf8d572ae35a67,https://doi.org/10.1109/SMC.2016.7844481
+0b3144cdc9d6d5a1498d6178db20d1c49fb64de9,http://doi.acm.org/10.1145/1322192.1322203
+0bab5213911c19c40e936b08d2f8fba01e286b85,https://doi.org/10.1109/BigMM.2017.81
+0b8839945259ec764ef0fad47471f34db39f40c3,https://doi.org/10.1109/DESEC.2017.8073838
+0be418e63d111e3b94813875f75909e4dc27d13a,https://doi.org/10.1109/ICB.2016.7550057
+0bf1f999a16461a730dd80e3a187d0675c216292,http://doi.ieeecomputersociety.org/10.1109/CW.2017.26
+0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d,https://doi.org/10.1007/s12193-013-0133-0
+93d903d2e48d6a8ad3e3d2aff2e57622efe649cd,https://doi.org/10.1109/ICIP.2016.7532432
+935924ddb5992c11f3202bf995183130ad83d07b,https://doi.org/10.1117/1.JEI.24.2.023015
+93e1e195f294c463f4832c4686775bf386b3de39,https://doi.org/10.1109/TIP.2015.2490551
+93108f1548e8766621565bdb780455023349d2b2,https://doi.org/10.1109/ICIP.2010.5653914
+939f9fa056f8be445da19b43da64bd2405851a43,https://doi.org/10.1109/ICSMC.2007.4413713
+939d28859c8bd2cca2d692901e174cfd599dac74,https://doi.org/10.1109/WOCC.2016.7506582
+9378ead3a09bc9f89fb711e2746facf399dd942e,https://doi.org/10.1109/TCSVT.2010.2045817
+93978ba84c8e95ff82e8b5960eab64e54ca36296,http://doi.acm.org/10.1145/3136755.3136806
+934efd61b20f5b8b151a2df7cd373f0b387c02b0,https://doi.org/10.5220/0004673003290336
+93eb3963bc20e28af26c53ef3bce1e76b15e3209,https://doi.org/10.1109/ICIP.2017.8296992
+945ef646679b6c575d3bbef9c6fc0a9629ac1b62,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477689
+947cdeb52f694fb1c87fc16836f8877cd83dc652,https://doi.org/10.1109/SMAP.2017.8022671
+946b4d840b026d91608758d04f2763e9b981234e,http://doi.acm.org/10.1145/2388676.2388792
+942f6eb2ec56809430c2243a71d03cc975d0a673,https://doi.org/10.1109/BigMM.2017.64
+942b89d8d17e89e58c82453de2bfcbbeb09adc81,https://doi.org/10.1016/j.patcog.2016.02.019
+94b729f9d9171e7c4489995e6e1cb134c8521f4e,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.055
+948f35344e6e063ffc35f10c547d5dd9204dee4e,https://doi.org/10.1016/j.eswa.2017.07.037
+940e5c45511b63f609568dce2ad61437c5e39683,https://doi.org/10.1109/TIP.2015.2390976
+0eed55ea9f401f25e1474cdbaf09367f44b4f490,https://doi.org/10.1016/j.neucom.2013.05.032
+0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa,https://doi.org/10.1163/016918610X538534
+0e05b365af662bc6744106a7cdf5e77c9900e967,https://doi.org/10.1007/s11042-014-2234-5
+0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.40
+0e37d70794d5ccfef8b4cc22b4203245f33eec6e,https://doi.org/10.1109/ICIP.2010.5653034
+0e8a28511d8484ad220d3e8dde39220c74fab14b,https://doi.org/10.1109/TNNLS.2015.2477826
+0e454686f83284ced2ffc5740829552a032671a3,https://doi.org/10.1109/IJCNN.2015.7280802
+0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,https://doi.org/10.1109/TMM.2015.2500730
+0ed4b4d6d1a0c49c4eb619aab36db559b620d99f,https://doi.org/10.1016/j.neucom.2015.11.115
+0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3,https://doi.org/10.1109/ICIP.2015.7351013
+0e2d956790d3b8ab18cee8df6c949504ee78ad42,https://doi.org/10.1109/IVCNZ.2013.6727024
+0e4baf74dfccef7a99c6954bb0968a2e35315c1f,https://doi.org/10.1109/SIU.2012.6204517
+0ed96cc68b1b61e9eb4096f67d3dcab9169148b9,http://doi.acm.org/10.1145/2663204.2666279
+0e4fa61871755b5548a5c970c8103f7b2ada24f3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19
+0e02dadab802128f6155e099135d03ca6b72f42c,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2365793
+0e6f422c3f79c552c0c3d7eda0145aed8680f0ea,https://doi.org/10.1016/j.patrec.2012.09.008
+608b01c70f0d1166c10c3829c411424d9ef550e7,https://doi.org/10.1109/CISP-BMEI.2017.8301920
+606dff86a34c67c79d93f1e536487847a5bb7002,https://doi.org/10.1109/WACV.2011.5711538
+607aebe7568407421e8ffc7b23a5fda52650ad93,https://doi.org/10.1109/ISBA.2016.7477237
+609c35a6fa80af8b2e4ce46b1b16ec36578fd07f,https://doi.org/10.1155/2014/950349
+602f772c69e4a1a65de00443c30d51fdd47a80aa,https://doi.org/10.1109/IISA.2013.6623705
+609d81ddf393164581b3e3bf11609a712ac47522,https://doi.org/10.1109/APSIPA.2017.8282300
+603231c507bb98cc8807b6cbe2c860f79e8f6645,https://doi.org/10.1109/EUSIPCO.2015.7362819
+60284c37249532fe7ff6b14834a2ae4d2a7fda02,https://doi.org/10.1109/SIU.2016.7495971
+6014eeb333998c2b2929657d233ebbcb1c3412c9,http://doi.acm.org/10.1145/2647868.2656406
+34546ef7e6148d9a1fb42cfab5f0ce11c92c760a,https://doi.org/10.1016/j.jvcir.2015.09.005
+34c2ea3c7e794215588c58adf0eaad6dc267d082,http://doi.acm.org/10.1145/3136755.3143005
+34c1e9a6166f4732d1738db803467f7abc47ba87,https://doi.org/10.1109/WACV.2017.137
+344c0917c8d9e13c6b3546da8695332f86b57bd3,https://doi.org/10.1109/ICIP.2017.8296715
+349c909abf937ef0a5a12c28a28e98500598834b,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890672
+34dd83115195676e7a8b008eb0e9abe84b330b32,https://doi.org/10.1007/s00371-014-0931-8
+5a259f2f5337435f841d39dada832ab24e7b3325,http://doi.acm.org/10.1145/2964284.2984059
+5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8,https://doi.org/10.1186/s13640-017-0178-1
+5a0ae814be58d319dfc9fd98b058a2476801201c,https://doi.org/10.1007/s00521-012-1124-x
+5feee69ed183954fa76c58735daa7dd3549e434d,https://doi.org/10.1109/ICIP.2008.4711697
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,http://doi.acm.org/10.1145/2742060.2743769
+5f7094ba898a248e1e6b37e3d9fb795e59131cdc,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026246
+5fb9944b18f5a4a6d20778816290ed647f5e3853,http://doi.acm.org/10.1145/3080538.3080540
+5f1cd82343f4bd6972f674d50aecb453d06f04ad,http://doi.acm.org/10.1145/3125739.3125756
+5f4219118556d2c627137827a617cf4e26242a6e,https://doi.org/10.1109/TMM.2017.2751143
+5fa6f72d3fe16f9160d221e28da35c1e67a5d951,http://doi.acm.org/10.1145/3061639.3062182
+5fb59cf5b31a80d8c70d91660092ef86494be577,https://doi.org/10.1109/CISP-BMEI.2017.8301923
+5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6,https://doi.org/10.1109/ICTAI.2012.40
+5f448ab700528888019542e6fea1d1e0db6c35f2,https://doi.org/10.1109/LSP.2016.2533721
+5f9dc3919fb088eb84accb1e490921a134232466,http://doi.ieeecomputersociety.org/10.1109/WACV.2007.49
+33c2131cc85c0f0fef0f15ac18f28312347d9ba4,https://doi.org/10.1016/j.neucom.2010.06.024
+33b915476f798ca18ae80183bf40aea4aaf57d1e,https://doi.org/10.1109/TIP.2013.2271548
+332d773b70f2f6fb725d49f314f57b8f8349a067,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.220
+33b61be191e63b0c9974be708180275c9d5b3057,https://doi.org/10.1109/ICRA.2011.5979705
+33bbf01413910bca26ed287112d32fe88c1cc0df,https://doi.org/10.1109/ICIP.2014.7026204
+331d6ace8d59fa211e5bc84a93fdc65695238c69,https://doi.org/10.1007/s10115-017-1115-4
+05184f01e66d7139530729b281da74db35a178d2,http://ieeexplore.ieee.org/document/6460470/
+052fb35f731680d9d4e7d89c8f70f14173efb015,http://doi.acm.org/10.1145/2893487
+05785cb0dcaace54801aa486d4f8fdad3245b27a,https://doi.org/10.1109/ICPR.2016.7899760
+053ee4a4793f54b02dfabde5436fd7ee479e79eb,http://doi.acm.org/10.1145/3160504.3160507
+052c5ef6b20bf3e88bc955b6b2e86571be08ba64,https://doi.org/10.1109/TIFS.2011.2170068
+0561bed18b6278434deae562d646e8adad72e75d,https://doi.org/10.1016/j.neucom.2014.09.052
+0553c6b9ee3f7d24f80e204d758c94a9d6b375d2,https://doi.org/10.1109/ICIP.2004.1419764
+055cd8173536031e189628c879a2acad6cf2a5d0,https://doi.org/10.1109/BTAS.2017.8272740
+05c5134125a333855e8d25500bf97a31496c9b3f,http://doi.acm.org/10.1145/3132515.3132517
+05a116cb6e220f96837e4418de4aa8e39839c996,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.30
+050e51268b0fb03033428ac777ccfef2db752ab3,https://doi.org/10.1109/DICTA.2007.4426834
+052cec9fdbfe12ccd02688f3b7f538c0d73555b3,https://doi.org/10.1109/ICIP.2016.7533172
+9d1cebed7672210f9c411c5ba422a931980da833,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0078
+9da63f089b8ee23120bfa8b4d9d9c8f605f421fc,http://doi.acm.org/10.1145/2072298.2072043
+9d4692e243e25eb465a0480376beb60a5d2f0f13,https://doi.org/10.1109/ICCE.2016.7430617
+9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c,http://doi.acm.org/10.1145/3136755.3137032
+9df86395c11565afa8683f6f0a9ca005485c5589,https://doi.org/10.1007/s00530-014-0400-2
+9c686b318cb7774b6da5e2c712743a5a6cafa423,https://doi.org/10.1016/j.neuroimage.2015.12.036
+9cda3e56cec21bd8f91f7acfcefc04ac10973966,https://doi.org/10.1109/IWBF.2016.7449688
+9ce4541d21ee3511bf3dc55bc3cd01222194d95a,https://doi.org/10.1016/j.cviu.2017.05.008
+9ce97efc1d520dadaa0d114192ca789f23442727,http://doi.acm.org/10.1145/2597627
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,https://doi.org/10.1007/s11042-017-5019-9
+9cd4f72d33d1cedc89870b4f4421d496aa702897,https://doi.org/10.1117/1.JEI.22.2.023010
+9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,https://doi.org/10.1109/ICIP.2015.7351174
+02e668f9b75f4a526c6fdf7268c8c1936d8e6f09,https://doi.org/10.1142/S0218001411008968
+028e237cb539b01ec72c244f57fdcfb65bbe53d4,http://doi.ieeecomputersociety.org/10.1109/CIS.2010.65
+026e96c3c4751e1583bfe78b8c28bdfe854c4988,https://doi.org/10.1109/ICIP.2017.8296442
+0247998a1c045e601dc4d65c53282b5e655be62b,https://doi.org/10.1109/ITSC.2017.8317782
+021469757d626a39639e260492eea7d3e8563820,https://doi.org/10.1007/b116723
+02a92b79391ddac0acef4f665b396f7f39ca2972,https://doi.org/10.1016/j.patcog.2016.10.021
+a4bb791b135bdc721c8fcc5bdef612ca654d7377,https://doi.org/10.1109/BTAS.2017.8272703
+a4725a5b43e7c36d9e30028dff66958f892254a0,http://doi.acm.org/10.1145/2663204.2666271
+a4543226f6592786e9c38752440d9659993d3cb3,http://doi.ieeecomputersociety.org/10.1109/FG.2017.112
+a4e75766ef93b43608c463c233b8646439ce2415,https://doi.org/10.1109/ICCVW.2011.6130492
+a317083d9aac4062e77aa0854513383c87e47ece,https://doi.org/10.1016/j.patcog.2015.06.003
+a35ed55dc330d470be2f610f4822f5152fcac4e1,https://doi.org/10.1109/ISBA.2015.7126369
+a324d61c79fe2e240e080f0dab358aa72dd002b3,https://doi.org/10.1016/j.patcog.2016.02.005
+a3add3268c26876eb76decdf5d7dd78a0d5cf304,https://doi.org/10.1016/j.specom.2017.07.003
+a3ed0f15824802359e05d9777cacd5488dfa7dba,http://doi.acm.org/10.1145/2851581.2892282
+a3bf6129d1ae136709063a5639eafd8018f50feb,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2017.8109741
+a38dd439209b0913b14b1c3c71143457d8cf9b78,https://doi.org/10.1109/IJCNN.2015.7280803
+b5ae8b69677fb962421fe7072f1e842e71f3bea5,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273641
+b5979489e11edd76607c219a8bdc83ba4a88ab38,https://doi.org/10.1109/ACCESS.2017.2778011
+b5bda4e1374acc7414107cde529ad8b3263fae4b,https://doi.org/10.1007/s11370-010-0066-3
+b54fe193b6faf228e5ffc4b88818d6aa234b5bb9,http://doi.acm.org/10.1145/2964284.2967287
+b5690409be6c4e98bd37181d41121adfef218537,https://doi.org/10.1109/ICIP.2008.4711920
+b58d381f9f953bfe24915246b65da872aa94f9aa,https://doi.org/10.1109/SMAP.2013.13
+b5f79df712ad535d88ae784a617a30c02e0551ca,https://doi.org/10.1109/LSP.2015.2480758
+b50edfea790f86373407a964b4255bf8e436d377,http://doi.acm.org/10.1145/3136755.3143008
+b299c292b84aeb4f080a8b39677a8e0d07d51b27,http://doi.ieeecomputersociety.org/10.1109/ICDM.2015.23
+b2add9fad0bcf7bf0660f99f389672cdf7cc6a70,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.226
+b2ae5c496fe01bb2e2dee107f75b82c6a2a23374,http://doi.ieeecomputersociety.org/10.1109/FG.2017.116
+b208f2fc776097e98b41a4ff71c18b393e0a0018,http://doi.ieeecomputersociety.org/10.1109/AVSS.2003.1217900
+b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23,https://doi.org/10.1109/SMC.2017.8122808
+b2f9e0497901d22b05b9699b0ea8147861c2e2cc,https://doi.org/10.1007/978-3-319-70353-4_3
+b209608a534957ec61e7a8f4b9d08286ae3d1d7f,https://doi.org/10.1111/j.1468-0394.2011.00589.x
+b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4,https://doi.org/10.1109/ACCESS.2018.2805861
+b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a,https://doi.org/10.1080/10798587.2014.934592
+b234d429c9ea682e54fca52f4b889b3170f65ffc,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22
+b2ddea9c71cd73fa63e09e8121bc7a098fae70b4,https://doi.org/10.1109/ISCCSP.2012.6217849
+b262a2a543971e10fcbfc7f65f46115ae895d69e,https://doi.org/10.1109/DICTA.2015.7371266
+b2cb335ded99b10f37002d09753bd5a6ea522ef1,https://doi.org/10.1109/ISBA.2017.7947679
+d9c0310203179d5328c4f1475fa4d68c5f0c7324,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11
+d98a36081a434451184fa4becb59bf5ec55f3a1e,https://doi.org/10.1016/j.neucom.2016.09.110
+d9072e6b7999bc2d5750eb58c67a643f38d176d6,https://doi.org/10.1109/LSP.2009.2027636
+d92084e376a795d3943df577d3b3f3b7d12eeae5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.85
+d963bdff2ce5212fa585a83ca8fad96875bc0057,https://doi.org/10.1016/j.neucom.2016.03.091
+d983dda8b03ed60fa3afafe5c50f1d9a495f260b,https://doi.org/10.1016/j.patcog.2007.03.020
+d9e34af95c21c0e114b61abccbc653480b370c3b,https://doi.org/10.1016/j.patcog.2005.10.020
+d91a5589fd870bf62b7e4979d9d47e8acf6c655d,http://doi.acm.org/10.1145/2382336.2382343
+d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.16
+d9eed86e53ce5f7cba379fe77bbefb42e83c0d88,https://doi.org/10.1109/TIP.2017.2764262
+d9b4b49378fcd77dcd5e755975b99ed4c7962f17,https://doi.org/10.1109/TIP.2015.2473105
+d91f9e8cbf271004ef1a293401197a10a26ccd1b,https://doi.org/10.1109/SOCPAR.2015.7492801
+ace1e0f50fe39eb9a42586f841d53980c6f04b11,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043849
+acab402d706dbde4bea4b7df52812681011f435e,https://doi.org/10.1109/HIS.2012.6421377
+acd4280453b995cb071c33f7c9db5760432f4279,https://doi.org/10.1007/s00138-018-0907-1
+ac48ecbc7c3c1a7eab08820845d47d6ce197707c,https://doi.org/10.1109/TIP.2017.2681841
+ac37285f2f5ccf99e9054735a36465ee35a6afdd,https://doi.org/10.1109/ISCAS.2006.1693880
+ad08426ca57da2be0e9f8c1f673e491582edb896,http://doi.ieeecomputersociety.org/10.1109/TKDE.2013.98
+adad7446e371d27fdaee39475856e2058f3045e5,https://doi.org/10.1109/ISCAS.2013.6572295
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,https://doi.org/10.1007/s10462-010-9172-z
+ad4d1ecf5c5473c050e11f6876ce148de1c8920a,https://doi.org/10.1109/IJCNN.2017.7965886
+ad9ba7eade9d4299159512d6d5d07d7d3d26ae58,https://doi.org/10.1007/s11063-012-9252-y
+ad8bd7016132a2f98ff1f41dac695285e71cc4b1,https://doi.org/10.1109/CISP-BMEI.2017.8301964
+add6d96fc018986f51a1aac47eae9ee3fc62fb66,http://doi.acm.org/10.1145/3009977.3010074
+ad5a35a251e07628dd035c68e44a64c53652be6b,https://doi.org/10.1016/j.patcog.2016.12.024
+ad7b6d2e8d66f720cc83323a0700c25006d49609,https://doi.org/10.1109/TIP.2009.2028255
+adb040081974369c46b943e9f75be4e405623102,http://doi.ieeecomputersociety.org/10.1109/PACCS.2009.191
+ad339a5fdaab95f3c8aad83b60ceba8d76107fa2,https://doi.org/10.1023/B:VISI.0000013090.39095.d5
+ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c,https://doi.org/10.1109/TIFS.2017.2680246
+ad1679295a5e5ebe7ad05ea1502bce961ec68057,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344631
+adf9998214598469f7a097bc50de4c23784f2a5a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.745
+ad50f6899103eff0ee4504e539c38eb965fd1309,https://doi.org/10.1109/IJCNN.2010.5596374
+bbc21d6b7c6e807c6886d237a04b501158ca6bb8,https://doi.org/10.1109/TMM.2016.2523421
+bb070c019c0885232f114c7dca970d2afd9cd828,https://doi.org/10.1109/DICTA.2014.7008089
+bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b,https://doi.org/10.1109/IJCNN.2017.7966271
+bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c,https://doi.org/10.1109/ICDSP.2016.7868528
+bb83d5c7c17832d1eef14aa5d303d9dd65748956,http://doi.acm.org/10.1145/3139513.3139514
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,https://doi.org/10.1007/s11042-015-2497-5
+d7c87f4ca39f79d93c954ffacac32bc6eb527e2c,https://doi.org/10.1007/978-3-642-15696-0_57
+d75bd05865224a1341731da66b8d812a7924d6f6,https://doi.org/10.1109/TSMCB.2012.2217127
+d79530e1745b33f3b771d0b38d090b40afc04191,https://doi.org/10.1007/s11042-015-2485-9
+d7bd37920a3a4a4d681151131e23a839695c8d5b,https://doi.org/10.1109/ICRA.2011.5979870
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,https://doi.org/10.1109/TII.2013.2271914
+d723ebf3288126fa8cbb10ba7e2a6308aede857c,https://doi.org/10.1117/12.968586
+d7a84db2a1bf7b97657b0250f354f249394dd700,https://doi.org/10.1109/ICIP.2010.5653518
+d05759932001aa6f1f71e7dc261c4716f57a5397,https://doi.org/10.1109/ISBA.2015.7126365
+d046030f7138e5a2dbe2b3eec1b948ad8c787538,https://doi.org/10.1109/ICIP.2009.5413447
+d0b67ec62086b55f00dc461ab58dc87b85388b2b,https://doi.org/10.1109/ICIP.2014.7026206
+d0a8889f694422614bf3ecccd69aa1d4f7822606,https://doi.org/10.1007/978-0-85729-997-0_22
+d0f9143f6f43a39bff47daf8c596681581db72ea,https://doi.org/10.1007/s11042-017-5241-5
+d0b7d3f9a59034d44e7cd1b434cfd27136a7c029,https://doi.org/10.1109/INCoS.2013.143
+d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1,https://doi.org/10.1109/TIP.2015.2502144
+d09fd7e0bb5d997963cfef45452724416b2bb052,https://doi.org/10.1109/EMEIT.2011.6023179
+d0dd1364411a130448517ba532728d5c2fe78ed9,https://doi.org/10.1109/ISCAS.2016.7527183
+be51854ef513362bc236b85dd6f0e2c2da51614b,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.298
+be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f,https://doi.org/10.1109/UIC-ATC.2013.32
+be40014beffaa9faacee12bb3412969f98b6a43d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.454
+be0a0e563445119b82d664d370e646e53e69a4c5,https://doi.org/10.1016/j.eswa.2017.05.037
+b3050dc48600acf2f75edf1f580a1f9e9cb3c14a,https://doi.org/10.1007/s00138-013-0584-z
+b388bf63c79e429dafee16c62b2732bcbea0d026,https://doi.org/10.1109/ICIP.2016.7533051
+b351575e3eab724d62d0703e24ecae55025eef00,https://doi.org/10.1007/s10209-014-0369-9
+b34fdab6864782ce60fd90d09f5d886bd83f84f5,https://doi.org/10.1002/cpe.3766
+b36a80d15c3e48870ea6118b855055cc34307658,https://doi.org/10.1109/ICPR.2014.17
+b3e60bb5627312b72c99c5ef18aa41bcc1d21aea,https://doi.org/10.1109/SPAC.2014.6982690
+dfb8a04a80d4b0794c0679d797cb90ec101e162c,http://doi.ieeecomputersociety.org/10.1109/AVSS.2014.6918665
+dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd,http://doi.acm.org/10.1145/2818346.2823313
+df550cb749858648209707bec5410431ea95e027,https://doi.org/10.1109/TCYB.2015.2433926
+df7ff512e8324894d20103fd8ab5da650e4d86db,http://doi.acm.org/10.1145/2043674.2043709
+dff38cac0a1004037024f0ed2a72f76f4e49318b,https://doi.org/10.1109/TNNLS.2015.2495268
+df7af280771a6c8302b75ed0a14ffe7854cca679,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293
+da1477b4a65ae5a013e646b57e004f0cd60619a2,https://doi.org/10.1109/ICB.2012.6199764
+da2b2be4c33e221c7f417875a6c5c74043b1b227,https://doi.org/10.1109/BTAS.2017.8272712
+dab795b562c7cc270c9099b925d685bea0abe82a,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2382599
+dac6e9d708a9757f848409f25df99c5a561c863c,https://doi.org/10.1109/LSP.2014.2334656
+da928ac611e4e14e454e0b69dfbf697f7a09fb38,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477718
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,https://doi.org/10.1007/s11760-015-0822-0
+b472f91390781611d4e197564b0016d9643a5518,http://doi.acm.org/10.1145/2382336.2382345
+b47a3c909ee9b099854619054fd00e200b944aa9,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77
+b42b535fcd0d9bd41a6594a910ea4623e907ceb9,https://doi.org/10.1109/ICTAI.2012.153
+b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,https://doi.org/10.1109/GlobalSIP.2017.8309138
+a216f7863fc6ab15e2bb7a538dfe00924e1da0ab,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163087
+a2646865d7c3d7fb346cf714caf146de2ea0e68f,https://doi.org/10.1109/SMC.2016.7844390
+a200885bf6bfa0493d85e7617e65cdabe30a2dab,https://doi.org/10.1109/ICIP.2015.7351272
+a2cc3193ed56ef4cedaaf4402c844df28edb5639,https://doi.org/10.1016/j.patrec.2012.01.005
+a2af07176a38fe844b0e2fdf4abae65472628b38,https://doi.org/10.1109/ICIP.2014.7026060
+a2b76ab614d92f5e71312b530f0b6281d0c500f7,https://doi.org/10.1007/s10898-014-0231-x
+a5eb36f1e77245dfc9e5c0c03998529331e4c89b,https://doi.org/10.1109/BTAS.2014.6996222
+a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,https://doi.org/10.1109/TIP.2015.2481327
+a5b6a3234e15343d2e5417cff46c0a5f0943521e,https://doi.org/10.1109/TNNLS.2014.2321420
+a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260,http://doi.ieeecomputersociety.org/10.1109/WI-IATW.2006.88
+a5d4cc596446517dfaa4d92276a12d5e1c0a284c,https://doi.org/10.1016/j.patrec.2009.06.002
+a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2011.117
+a53f988d16f5828c961553e8efd38fed15e70bcc,https://doi.org/10.1109/BTAS.2015.7358787
+a52a69bf304d49fba6eac6a73c5169834c77042d,https://doi.org/10.1109/LSP.2017.2789251
+bdf5434648356ce22bdbf81d2951e4bb00228e4d,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.415
+bd26faef48080b5af294b19139c804ffec70825e,https://doi.org/10.1007/s11390-015-1526-1
+bdd203bcd3c41c336c5635fb026a78279d75b4be,https://doi.org/10.1109/ICPR.2016.7899761
+bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5,https://doi.org/10.1109/VSMM.2014.7136653
+bd25c4ad7471580ed9787eae041b80a3c4fe97bb,https://doi.org/10.1016/j.sigpro.2010.01.019
+bd66dc891270d858de3adf97d42ed714860ae94d,https://doi.org/10.1109/ACPR.2015.7486598
+bd74c3ca2ff03396109ac2d1131708636bd0d4d3,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.228
+d119443de1d75cad384d897c2ed5a7b9c1661d98,https://doi.org/10.1109/ICIP.2010.5650873
+d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,https://doi.org/10.1109/LSP.2017.2661983
+d1184939e06dbc3b495c883c53b684c6d6aa9e48,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477669
+d1dd80d77655876fb45b9420fe72444c303b219e,https://doi.org/10.1109/FG.2011.5771371
+d12bea587989fc78b47584470fd8f689b6ab81d2,https://doi.org/10.1109/TIP.2013.2246523
+d1bd956a8523629ed4e2533b01272f22cea534c6,https://doi.org/10.1016/j.patrec.2010.01.021
+d60e3eef429ed2a51bbd806125fa31f5bea072a4,https://doi.org/10.1109/HIS.2013.6920481
+d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f,https://doi.org/10.1109/ACPR.2013.98
+d691440030394c2e00a2ab47aba4f8b5fca5f25a,https://doi.org/10.1109/ICIP.2016.7532921
+d6bdc70d259b38bbeb3a78db064232b4b4acc88f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.27
+d63bd06340dd35590a22222509e455c49165ee13,https://doi.org/10.1109/IJCNN.2016.7727234
+d6a5eb4377e2a67420778eab61b5a89046307bae,http://doi.ieeecomputersociety.org/10.1109/CRV.2014.37
+d628aabf1a666a875e77c3d3fee857cd25891947,https://doi.org/10.1109/SMC.2016.7844663
+d6791b98353aa113d79f6fb96335aa6c7ea3b759,https://doi.org/10.1109/TNNLS.2017.2648122
+bcf2710d46941695e421226372397c9544994214,https://doi.org/10.1109/ICNC.2015.7378076
+bc66685acc64fa3c425c0ee6c443d3fa87db7364,https://doi.org/10.1109/TMM.2013.2279658
+bccb35704cdd3f2765b1a3f0296d1bff3be019c1,https://doi.org/10.1109/ICMLA.2016.0145
+bcead1a92744e76c38caaa13159de4abfb81b1d0,https://doi.org/10.1109/ICIP.2014.7025310
+bca39960ba46dc3193defe0b286ee0bea4424041,https://doi.org/10.1016/j.patrec.2009.05.018
+bc6a7390135bf127b93b90a21b1fdebbfb56ad30,https://doi.org/10.1109/TIFS.2017.2766039
+ae73f771d0e429a74b04a6784b1b46dfe98f53e4,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.326
+ae425a2654a1064c2eda29b08a492c8d5aab27a2,https://doi.org/10.23919/MVA.2017.7986845
+ae89e464576209b1082da38e0cee7aeabd03d932,https://doi.org/10.1007/s00521-005-0017-7
+ae7604b1840753e9c2e1ab7a97e02f91a9d81860,https://doi.org/10.1007/s10586-016-0535-3
+aeb36fac7516753a14c3c690f352de78e70f8c6e,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.13
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,https://doi.org/10.1007/s12193-017-0256-9
+aed6af12148b43e4a24ee6e2bc3604ca59bd99a5,https://doi.org/10.1109/TIP.2017.2717505
+ae8240095c9cca2c395f173fece2f46277b94929,https://doi.org/10.1016/j.neucom.2017.06.045
+ae96fc36c89e5c6c3c433c1163c25db1359e13ea,https://doi.org/10.1007/s10489-013-0485-x
+d8c9bad8d07ae4196027dfb8343b9d9aefb130ff,https://doi.org/10.1007/s00138-017-0848-0
+d8b99eada922bd2ce4e20dc09c61a0e3cc640a62,https://doi.org/10.1109/IJCNN.2014.6889675
+d878a67b2ef6a0a5dec72db15291f12419040ab1,https://doi.org/10.1109/IPTA.2016.7821012
+d8e5d94c3c8688f0ca0ee656c79847c7df04c77d,https://doi.org/10.1007/s12193-015-0187-2
+d855791bc23b4aa8e751d6a4e2ae7f5566a991e8,http://doi.acm.org/10.1145/3012941
+d8288322f32ee4501cef5a9b667e5bb79ebd7018,https://doi.org/10.1016/j.patcog.2011.12.018
+d8c9ce0bd5e4b6d1465402a760845e23af5ac259,https://doi.org/10.1109/ITSC.2015.380
+ab7923968660d04434271559c4634790dc68c58e,https://doi.org/10.1109/ICIP.2015.7351111
+abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18,https://doi.org/10.1007/s11042-015-2536-2
+abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c,https://doi.org/10.1109/FUZZ-IEEE.2014.6891864
+ab68837d09986c592dcab7d08ee6dfb40e02916f,https://doi.org/10.1007/978-3-319-11289-3_23
+aba9acb4a607071af10684f2cfbdefa0507a4e9a,https://doi.org/10.1016/j.patcog.2016.06.010
+ab703224e3d6718bc28f7b9987eb6a5e5cce3b01,https://doi.org/10.1631/FITEE.1500235
+abe4c1d6b964c4f5443b0334a44f0b03dd1909f4,https://doi.org/10.1109/IJCNN.2017.7965950
+ab2c07c9867243fad2d66fa6aeabfb780433f319,http://doi.acm.org/10.1145/2967878.2967887
+ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835994
+abbc6dcbd032ff80e0535850f1bc27c4610b0d45,https://doi.org/10.1109/ICIP.2015.7350983
+abf573864b8fbc0f1c491ca60b60527a3e75f0f5,https://doi.org/10.1007/s11042-014-2204-y
+e52272f92fa553687f1ac068605f1de929efafc2,https://doi.org/10.1016/j.engappai.2017.06.003
+e585dc6c810264d9f07e38c412379734a920714e,http://doi.acm.org/10.1145/2531923.2531926
+e51f1ee5535017e10a5f77100ff892509ec6b221,https://doi.org/10.1109/ICSMC.2007.4413825
+e57108607d94aa158eb22ae50540ae6080e48d4b,http://doi.ieeecomputersociety.org/10.1109/ICMI.2002.1167051
+e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056,https://doi.org/10.1109/TNN.2011.2170220
+e57ce6244ec696ff9aa42d6af7f09eed176153a8,https://doi.org/10.1109/ICIP.2015.7351449
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890187
+e51927b125640bfc47bbf1aa00c3c026748c75bd,http://doi.acm.org/10.1145/2647868.2655015
+e55f7250f3b8ee722814f8809620a851c31e5b0e,https://doi.org/10.3182/20130902-3-CN-3020.00030
+e5fbaeddbf98c667ec7c5575bda2158a36b55409,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.25
+e57e1dce81e888eb07054923602e35bfb5ef3eb8,https://doi.org/10.1109/IROS.2012.6385544
+e546572f8205570de4518bcf8d0345465e51d7a0,https://doi.org/10.1109/ICIP.2015.7351318
+e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.225
+e26a7e343fe109e2b52d1eeea5b02dae836f3502,https://doi.org/10.1109/ACCESS.2017.2676238
+e2b3aae594035e58f72125e313e92c7c4cc9d5bb,https://doi.org/10.1007/s00138-014-0597-2
+e2f78d2f75a807b89a13115a206da4661361fa71,https://doi.org/10.1109/TMM.2017.2696825
+f41d7f891a1fc4569fe2df66e67f277a1adef229,https://doi.org/10.1109/ICIP.2015.7351552
+f4411787688ca40466ee059ec64bf56d746733c1,https://doi.org/10.1007/s12652-012-0107-1
+f402e088dddfaad7667bd4def26092d05f247206,https://doi.org/10.1109/TITS.2015.2475721
+f4465454811acb2021a46d84d94fc88e2dda00a6,https://doi.org/10.1007/s11042-007-0184-x
+f41e80f941a45b5880f4c88e5bf721872db3400f,http://doi.ieeecomputersociety.org/10.1109/IC3.2017.8284359
+f4fc77660665ae58993065c6a336367e9a6c85f7,https://doi.org/10.1016/j.patcog.2012.12.009
+f4003cbbff3b3d008aa64c76fed163c10d9c68bd,https://doi.org/10.1016/j.neucom.2016.08.055
+f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6,http://doi.ieeecomputersociety.org/10.1109/TKDE.2015.2448547
+f423d8be5e13d9ef979debd3baf0a1b2e1d3682f,https://doi.org/10.1016/j.imavis.2015.11.004
+f486624efa750d718a670fba3c7f21b1c84ebaeb,https://doi.org/10.1109/TCYB.2016.2581861
+f49aebe58d30241f12c1d7d9f4e04b6e524d7a45,https://doi.org/10.1109/ICB.2016.7550074
+eb3c45e78acee0824c8f7d997c6104d74e7213a8,http://doi.ieeecomputersociety.org/10.1109/iThings/CPSCom.2011.116
+eb38f20eaa1b849cabec99815883390f84daf279,https://doi.org/10.1016/j.patcog.2008.11.026
+eb9867f5efc98d3203ce1037f9a8814b0d15d0aa,https://doi.org/10.1109/ICIP.2014.7026008
+eb02daee558e483427ebcf5d1f142f6443a6de6b,http://doi.acm.org/10.1145/2911996.2912019
+ebc2a3e8a510c625353637e8e8f07bd34410228f,https://doi.org/10.1109/TIP.2015.2502485
+eb5c1e526fe2d17778c68f60c874c3da0129fabd,https://doi.org/10.1109/VCIP.2015.7457856
+ebce3f5c1801511de9e2e14465482260ba5933cc,http://doi.acm.org/10.1145/3126594.3126640
+eb240521d008d582af37f0497f12c51f4bab16c8,https://doi.org/10.1023/A:1012365806338
+ebb3d5c70bedf2287f9b26ac0031004f8f617b97,https://doi.org/10.1109/MSP.2017.2764116
+ebeb0546efeab2be404c41a94f586c9107952bc3,http://doi.acm.org/10.1145/2733373.2806290
+ebfdb4842c69177b65022f00d3d038d645f3260b,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.154
+eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,https://doi.org/10.1109/ICPR.2016.7899719
+c7cd490e43ee4ff81e8f86f790063695369c2830,https://doi.org/10.1109/VCIP.2016.7805472
+c7b58827b2d07ece676271ae0425e369e3bd2310,https://doi.org/10.1142/S0218001415560042
+c0270a57ad78da6c3982a4034ffa195b9e932fda,http://doi.ieeecomputersociety.org/10.1109/FG.2017.131
+c0f9fae059745e50658d9605bd8875fc3a2d0b4b,http://doi.ieeecomputersociety.org/10.1109/BIGCOMP.2014.6741422
+c0945953506a3d531331caf6c2b2a6d027e319f0,https://doi.org/10.1002/cav.49
+c06b13d0ec3f5c43e2782cd22542588e233733c3,https://doi.org/10.1016/j.cviu.2016.02.001
+c0b02be66a5a1907e8cfb8117de50f80b90a65a8,http://doi.acm.org/10.1145/2808492.2808523
+eefecac463ebfc0694b9831e842b574f3954fed6,http://doi.ieeecomputersociety.org/10.1109/SNPD.2013.15
+eedb2c34c36017b9c5aa6ce8bff2ab152e713cee,https://doi.org/10.1007/s00521-008-0225-z
+ee6e4324123b99d94a7a23d9bddf026f39903693,https://doi.org/10.1109/ISMICT.2013.6521709
+eef432868e85b95a7d9d9c7b8c461637052318ca,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.236
+eef0be751e9aca7776d83f25c8ffdc1a18201fd8,https://doi.org/10.1016/j.patcog.2016.10.015
+ee2217f9d22d6a18aaf97f05768035c38305d1fa,https://doi.org/10.1109/APSIPA.2015.7415501
+eed05da2c0ab7d2b0a3c665a5368efa81b185099,https://doi.org/10.1016/j.neucom.2014.05.020
+eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2,https://doi.org/10.1109/FSKD.2016.7603398
+eefdb69ac2c461e7791603d0f8c02ff3c8600adc,https://doi.org/10.1016/j.jvcir.2017.02.007
+ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,https://doi.org/10.1109/LSP.2016.2602538
+c98def5f9d0c6ae519fe0aeebe5378f65b14e496,https://doi.org/10.1117/12.2064730
+c92e36689ef561df726a7ae861d9c166c3934908,https://doi.org/10.1109/ICPR.2016.7900140
+c907104680ad53bdc673f2648d713e4d26335825,http://doi.acm.org/10.1145/3077286.3077304
+c9c2de3628be7e249722b12911bebad84b567ce6,https://doi.org/10.1016/j.patcog.2017.06.028
+c9be1001706bcdd8b35fa9cae733c592e90c7ec3,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.54
+c9527df51e63b56c61cbf16f83d1a3c5c2c82499,http://doi.acm.org/10.1145/2072298.2072311
+c9832564d5dc601113b4d80e5a05ede6fee9f7dd,https://doi.org/10.1109/ISBA.2017.7947687
+c90427085909029afd2af01d1967e80b78e01b88,https://doi.org/10.1109/ACCESS.2017.2753830
+fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,http://doi.acm.org/10.1145/2502081.2502148
+fc8990088e0f1f017540900bc3f5a4996192ff05,https://doi.org/10.1109/ICIP.2017.8296314
+fcb97ede372c5bddde7a61924ac2fd29788c82ce,https://doi.org/10.1109/TSMCC.2012.2192727
+fc5538e60952f86fff22571c334a403619c742c3,http://ieeexplore.ieee.org/document/6460202/
+fc970d7694b1d2438dd101a146d2e4f29087963e,http://doi.ieeecomputersociety.org/10.1109/FG.2017.86
+fcb276874cd932c8f6204f767157420500c64bd0,https://doi.org/10.1007/978-3-319-04960-1_3
+fdd19fee07f2404952e629cc7f7ffaac14febe01,https://doi.org/10.1109/CISP-BMEI.2016.7852754
+fdbc602a749ef070a7ac11c78dc8d468c0b60154,https://doi.org/10.1049/iet-ipr.2015.0519
+fddca9e7d892a97073ada88eec39e03e44b8c46a,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.305
+fd38163654a0551ed7f4e442851508106e6105d9,https://doi.org/10.1109/ICNSC.2008.4525311
+f28d549feffd414f38147d5e0460883fb487e2d3,https://doi.org/10.1007/s10462-011-9273-3
+f25aa838fb44087668206bf3d556d31ffd75235d,http://doi.acm.org/10.1145/2911996.2912038
+f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf,https://doi.org/10.1109/ICIP.2011.6115736
+f2cc459ada3abd9d8aa82e92710676973aeff275,http://ieeexplore.ieee.org/document/5967185/
+f27fd2a1bc229c773238f1912db94991b8bf389a,https://doi.org/10.1109/IVCNZ.2016.7804414
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,https://doi.org/10.1109/VCIP.2016.7805483
+f2004fff215a17ac132310882610ddafe25ba153,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.124
+f231e9408da20498ba51d93459b3fcdb7b666efb,https://doi.org/10.1016/j.micpro.2012.01.002
+f5a95f857496db376d69f7ac844d1f56e3577b75,https://doi.org/10.1007/s12193-012-0107-7
+f531ce18befc03489f647560ad3e5639566b39dc,http://doi.ieeecomputersociety.org/10.1109/ACOMP.2015.9
+f545b121b9612707339dfdc40eca32def5e60430,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.33
+f58f30932e3464fc808e539897efa4ee4e7ac59f,https://doi.org/10.1109/DICTA.2016.7797023
+f557df59cd088ffb8e27506d8612d062407e96f4,https://doi.org/10.1007/s00521-014-1810-y
+e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24,http://doi.ieeecomputersociety.org/10.1109/MMUL.2016.27
+cf4c1099bef189838877c8785812bc9baa5441ed,https://doi.org/10.1109/ICPR.2016.7899862
+cf6c59d359466c41643017d2c212125aa0ee84b2,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552983
+cf7a4442a6aad0e08d4aade8ec379c44f84bca8a,http://doi.acm.org/10.1145/1873951.1874054
+cf784156547c3be146706e2763c1a52d939d1722,https://doi.org/10.1007/s11042-017-5038-6
+cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,https://doi.org/10.1109/TIFS.2013.2286265
+cf185d0d8fcad2c7f0a28b7906353d4eca5a098b,https://doi.org/10.1186/s13640-017-0190-5
+cf54e9776d799aa183d7466094525251d66389a4,https://doi.org/10.1109/ICCE-Berlin.2017.8210589
+cf6851c24f489dabff0238e01554edea6aa0fc7c,https://doi.org/10.1109/ICSMC.2011.6083637
+cfba667644508853844c45bfe5d0b8a2ffb756d3,https://doi.org/10.1109/ISBA.2018.8311455
+ca0185529706df92745e656639179675c717d8d5,https://doi.org/10.1504/IJCVR.2014.065571
+cae41c3d5508f57421faf672ee1bea0da4be66e0,https://doi.org/10.1109/ICPR.2016.7900298
+ca447d6479554b27b4afbd0fd599b2ed39f2c335,https://doi.org/10.1109/ICPR.2014.459
+ca0804050cf9d7e3ed311f9be9c7f829e5e6a003,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333904
+ca458f189c1167e42d3a5aaf81efc92a4c008976,https://doi.org/10.1109/TIP.2012.2202678
+ca8f23d9b9a40016eaf0467a3df46720ac718e1d,https://doi.org/10.1109/ICASSP.2015.7178214
+cacce7f4ce74e3269f5555aa6fd83e48baaf9c96,http://doi.acm.org/10.1145/2632165
+ca60d007af691558de377cab5e865b5373d80a44,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273630
+cab3c6069387461c3a9e5d77defe9a84fe9c9032,https://doi.org/10.1016/j.neucom.2016.12.056
+ca37933b6297cdca211aa7250cbe6b59f8be40e5,http://doi.acm.org/10.1145/3155133.3155207
+e41246837c25d629ca0fad74643fb9eb8bf38009,https://doi.org/10.1109/ICSIPA.2011.6144064
+e4d53e7f4c2052940841abc08f9574655f3f7fb4,http://doi.acm.org/10.1145/3078971.3079039
+e4df98e4b45a598661a47a0a8900065716dafd6d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2015.219
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,https://doi.org/10.1109/AICCSA.2016.7945716
+e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,https://doi.org/10.1109/TIP.2017.2782366
+e42f3c27391821f9873539fc3da125b83bffd5a2,https://doi.org/10.1109/HPCS.2010.5547096
+e4b825bf9d5df47e01e8d7829371d05208fc272d,http://doi.acm.org/10.1145/3055635.3056618
+e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,https://doi.org/10.1109/BTAS.2017.8272722
+e4c3587392d477b7594086c6f28a00a826abf004,https://doi.org/10.1109/ICIP.2017.8296998
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,http://doi.acm.org/10.1145/3123266.3123451
+fe556c18b7ab65ceb57e1dd054a2ca21cefe153c,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.145
+fed8cc533037d7d925df572a440fd89f34d9c1fd,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194
+fefaa892f1f3ff78db4da55391f4a76d6536c49a,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2497689
+fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2,https://doi.org/10.1109/ICSMC.2012.6377834
+fe5d6c65e51386f4d36f7434fe6fcd9494fe9361,https://doi.org/10.1109/ACCESS.2017.2730281
+c83d142a47babe84e8c4addafa9e2bb9e9b757a5,https://doi.org/10.1109/MLSP.2012.6349762
+c833c2fb73decde1ad5b5432d16af9c7bee1c165,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.143
+c8fb8872203ee694d95da47a1f9929ac27186d87,https://doi.org/10.1109/ICIP.2005.1530305
+c8fb8994190c1aa03c5c54c0af64c2c5c99139b4,https://doi.org/10.1007/s00138-016-0794-2
+c84991fe3bf0635e326a05e34b11ccaf74d233dc,https://doi.org/10.1016/j.neucom.2016.08.069
+c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e,https://doi.org/10.1109/IROS.2012.6386178
+c81b27932069e6c7016bfcaa5e861b99ac617934,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019469
+c872d6310f2079db0cee0e69cc96da1470055225,https://doi.org/10.1007/978-3-319-46675-0_68
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,https://doi.org/10.1109/ACCESS.2017.2784096
+fb1b6138aeb081adf853316c0d83ef4c5626a7fa,https://doi.org/10.1109/ICIP.2017.8296302
+fb7bf10cbc583db5d5eee945aa633fcb968e01ad,https://doi.org/10.1007/s00521-012-0962-x
+fb915bcc1623cdf999c0e95992c0e0cf85e64d8e,http://doi.ieeecomputersociety.org/10.1109/iThings.2014.83
+fb557b79157a6dda15f3abdeb01a3308528f71f2,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.310
+fb1627ed224bf7b1e3d80c097316ed7703951df2,https://doi.org/10.1109/VCIP.2017.8305094
+fb3ff56ab12bd250caf8254eca30cd97984a949a,https://doi.org/10.3103/S0146411617010072
+fb2bd6c2959a4f811b712840e599f695dad2967e,https://doi.org/10.1109/ISPA.2015.7306038
+fba386ac63fe87ee5a0cf64bf4fb90324b657d61,https://doi.org/10.1109/ICIP.2015.7351752
+ed9de242a23ad546902e1d5ec022dbb029cc2282,https://doi.org/10.1109/ICASSP.2015.7178138
+edbddf8c176d6e914f0babe64ad56c051597d415,https://doi.org/10.1109/TMM.2016.2644866
+ed94e7689cdae87891f08428596dec2a2dc6a002,https://doi.org/10.1109/CAMSAP.2017.8313130
+ed273b5434013dcdb9029c1a9f1718da494a23a2,https://doi.org/10.1109/LSP.2018.2810106
+ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee,https://doi.org/10.1007/s11042-014-2345-z
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,https://doi.org/10.1007/978-981-10-3005-5_57
+ed70d1a9435c0b32c0c75c1a062f4f07556f7016,https://doi.org/10.1109/ICIP.2015.7350774
+ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf,http://doi.ieeecomputersociety.org/10.1109/ICEBE.2017.36
+ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76,https://doi.org/10.1109/LSP.2010.2093600
+c1a70d63d1667abfb1f6267f3564110d55c79c0d,https://doi.org/10.1007/s00138-013-0488-y
+c138c76809b8da9e5822fb0ae38457e5d75287e0,https://doi.org/10.1109/TIP.2014.2378017
+c1581b5175994e33549b8e6d07b4ea0baf7fe517,https://doi.org/10.1109/IJCNN.2011.6033478
+c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478,https://doi.org/10.1109/TCYB.2016.2535122
+c1f05b723e53ac4eb1133249b445c0011d42ca79,https://doi.org/10.1162/neco_a_00990
+c1fb854d9a04b842ff38bd844b50115e33113539,https://doi.org/10.1007/s11042-016-3883-3
+c17c7b201cfd0bcd75441afeaa734544c6ca3416,https://doi.org/10.1109/TCSVT.2016.2587389
+c12034ca237ee330dd25843f2d05a6e1cfde1767,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.298
+c64502696438b4c9f9e12e64daaf7605f62ce3f0,http://doi.ieeecomputersociety.org/10.1109/WKDD.2009.195
+c65cfc9d3568c586faf18611c4124f6b7c0c1a13,https://doi.org/10.1109/ICACCI.2014.6968322
+c648d2394be3ff0c0ee5360787ff3777a3881b02,https://doi.org/10.1080/01449290903353047
+c65d2ee433ae095652abe3860eeafe6082c636c6,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553714
+c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd,http://doi.acm.org/10.1145/2043674.2043677
+c675534be881e59a78a5986b8fb4e649ddd2abbe,https://doi.org/10.1109/ICIP.2017.8296548
+c60601bdb5465d8270fdf444e5d8aeccab744e29,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583363
+ec6a2093059fd6eada9944212f64a659881abb95,https://doi.org/10.1016/j.patcog.2016.02.022
+ec89f2307e29cc4222b887eb0619e0b697cf110d,https://doi.org/10.1109/TIP.2009.2027361
+ec1a57e609eda72b4eb60155fac12db1da31f6c0,https://doi.org/10.1007/11744085_41
+ec28217290897a059348dcdf287540a2e2c68204,https://doi.org/10.1504/IJBM.2015.070928
+eca706b4d77708452bdad1c98a23e4e88ce941ab,https://doi.org/10.1142/S0218001416550144
+ec39e9c21d6e2576f21936b1ecc1574dadaf291e,https://doi.org/10.1109/WACV.2017.130
+ecdd83002f69c2ccc644d07abb44dd939542d89d,https://doi.org/10.1016/j.neucom.2015.07.011
+4e8f301dbedc9063831da1306b294f2bd5b10477,https://doi.org/10.1109/BIOSIG.2016.7736919
+4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,https://doi.org/10.1109/TIFS.2017.2788002
+4e1d89149fc4aa057a8becce2d730ec6afd60efa,https://doi.org/10.1109/ICSMC.2009.5346047
+4ea63435d7b58d41a5cbcdd34812201f302ca061,https://doi.org/10.1109/ICIP.2014.7025066
+4e6e5cb93e7e564bc426b5b27888d55101504c50,https://doi.org/10.1109/ICPR.2016.7900299
+4e343c66c5fe7426132869d552f0f205d1bc5307,https://doi.org/10.1109/ICPR.2014.452
+4e1258db62e4762fd8647b250fda9c3567f86eb8,http://doi.ieeecomputersociety.org/10.1109/CRV.2013.17
+4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4,https://doi.org/10.1109/DICTA.2017.8227494
+4eeccbbb98de4f2e992600482fd6b881ace014bb,http://doi.acm.org/10.1145/2964284.2967240
+4e581831d24fd90b0b5228b9136e76fa3e8f8279,https://doi.org/10.1109/TIP.2014.2303648
+4eb8030b31ff86bdcb063403eef24e53b9ad4329,http://doi.acm.org/10.1145/2993148.2997640
+4ed40e6bb66dfa38a75d864d804d175a26b6c6f6,http://doi.ieeecomputersociety.org/10.1109/CRV.2011.41
+204f1cf56794bb23f9516b5f225a6ae00d3d30b8,https://doi.org/10.1109/JSYST.2015.2418680
+20b405d658b7bb88d176653758384e2e3e367039,https://doi.org/10.1109/IJCNN.2012.6252677
+20eabf10e9591443de95b726d90cda8efa7e53bb,https://doi.org/10.1007/s11390-017-1740-0
+205f035ec90a7fa50fd04fdca390ce83c0eea958,http://doi.acm.org/10.1145/3131287
+189e5a2fa51ed471c0e7227d82dffb52736070d8,https://doi.org/10.1109/ICIP.2017.8296995
+18bfda16116e76c2b21eb2b54494506cbb25e243,https://doi.org/10.1109/TIFS.2010.2051544
+18d3532298fb7b8fb418453107f786178ca82e4a,https://doi.org/10.1109/TIFS.2017.2668221
+184dba921b932143d196c833310dee6884fa4a0a,https://doi.org/10.1109/SIU.2017.7960393
+18dd3867d68187519097c84b7be1da71771d01a3,http://doi.acm.org/10.1145/2448556.2448563
+18145b0b13aa477eeabef9ceec4299b60e87c563,https://doi.org/10.1007/s11042-011-0834-x
+187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b,https://doi.org/10.1109/LSP.2015.2437883
+1831800ef8b1f262c92209f1ee16567105da35d6,https://doi.org/10.1016/j.sigpro.2014.01.010
+1890470d07a090e7b762091c7b9670b5c2e1c348,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20
+27e0684fa5b57715162ac6c58a6ea283c7db1719,https://doi.org/10.1109/ICARCV.2004.1468857
+27812db1d2f68611cc284d65d11818082e572008,https://doi.org/10.1109/MIPRO.2016.7522323
+27e5b7ae3506a0f7472ee9089cd2472442e71c14,https://doi.org/10.1007/s00521-015-1834-y
+27aa23d7a05368a6b5e3d95627f9bab34284e5c4,https://doi.org/10.1109/IJCNN.2012.6252705
+27a586a435efdcecb151c275947fe5b5b21cf59b,https://doi.org/10.1007/s12559-017-9530-0
+279459cbbc5c6db4802e9c737cc72a612d76f7fc,https://doi.org/10.1109/SSCI.2017.8285296
+272e487dfa32f241b622ac625f42eae783b7d9aa,https://doi.org/10.1109/ICSIPA.2015.7412207
+4b9b30066a05bdeb0e05025402668499ebf99a6b,https://doi.org/10.1109/ISPACS.2012.6473448
+4b8c736524d548472d0725c971ee29240ae683f6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.31
+4b7f21b48c7e0dc7334e36108f558d54642c17c0,https://doi.org/10.1109/WACV.2017.106
+4ba2f445fcbbad464f107b036c57aa807ac5c0c2,https://doi.org/10.1109/TCSVT.2014.2367357
+4b94f531c203743a9f7f1e9dd009cdbee22ea197,https://doi.org/10.1109/ICSMC.2005.1571393
+4b9c47856f8314ecbe4d0efc65278c2ededb2738,https://doi.org/10.1109/LSP.2012.2188890
+1176a74fb9351ac2de81c198c4861d78e58f172d,https://doi.org/10.1016/j.patrec.2011.03.023
+11ba01ce7d606bab5c2d7e998c6d94325521b8a0,https://doi.org/10.1109/ICIP.2015.7350911
+110919f803740912e02bb7e1424373d325f558a9,http://doi.acm.org/10.1145/3123266.3123421
+11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de,http://doi.acm.org/10.1145/2522968.2522978
+113cd9e5a4081ce5a0585107951a0d36456ce7a8,https://doi.org/10.1109/ICSMC.2006.384939
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,https://doi.org/10.1109/ICSIPA.2011.6144081
+1195f0bf8f745ba69da915203bcd79589b94aec5,https://doi.org/10.1016/j.procs.2010.11.004
+11f8d0a54e55c5e6537eef431cd548fa292ef90b,https://doi.org/10.1016/j.neucom.2017.05.042
+110359824a0e3b6480102b108372793265a24a86,https://doi.org/10.1016/j.image.2016.03.011
+1125760c14ea6182b85a09bf3f5bad1bdad43ef5,https://doi.org/10.1109/CVPR.2004.286
+11a6593e6e35f95ebeb5233897d1d8bcad6f9c87,https://doi.org/10.1007/s11063-017-9615-5
+11d73f4f19077e6806d05dc7ecd17fbeb15bdf39,http://doi.ieeecomputersociety.org/10.1109/FG.2017.28
+1135a818b756b057104e45d976546970ba84e612,http://doi.ieeecomputersociety.org/10.1109/FG.2017.118
+7d8798e7430dcc68fcdbd93053c884fc44978906,http://doi.acm.org/10.1145/2506364.2506369
+7d61b70d922d20c52a4e629b09465076af71ddfd,https://doi.org/10.1007/s10044-011-0258-2
+7d7b036ed01765c9473d695f029142128d442aaa,https://doi.org/10.1109/TIP.2018.2791180
+7dc498d45f9fcb97acee552c6f587b65d5122c35,https://doi.org/10.1109/ICIP.2015.7351618
+7de8a8b437ec7a18e395be9bf7c8f2d502025cc6,https://doi.org/10.1109/SIU.2017.7960528
+298c2be98370de8af538c06c957ce35d00e93af8,https://doi.org/10.1109/IPTA.2016.7820988
+29322b9a3744afaa5fc986b805d9edb6ff5ea9fe,https://doi.org/10.1109/TNNLS.2011.2178037
+2945cc9e821ab87fa17afc8802f3858435d1264c,https://doi.org/10.1109/ICPR.2016.7899839
+2960500033eb31777ed1af1fcb133dcab1b4a857,http://doi.acm.org/10.1145/3005467.3005471
+29f298dd5f806c99951cb434834bc8dcc765df18,https://doi.org/10.1109/ICPR.2016.7899837
+293d69d042fe9bc4fea256c61915978ddaf7cc92,https://doi.org/10.1007/978-981-10-7302-1_6
+29fd98f096fc9d507cd5ee7d692600b1feaf7ed1,http://doi.acm.org/10.1145/2988257.2988270
+7c8909da44e89a78fe88e815c83a4ced34f99149,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326
+7c457c9a658327af6f6490729b4cab1239c22005,https://doi.org/10.1109/ACCESS.2017.2672829
+7c13fa0c742123a6a927771ce67da270492b588c,http://doi.acm.org/10.1145/3152114
+163ba5a998973f9ead6be0ca873aed5934d5022e,https://doi.org/10.1109/ACPR.2013.53
+16b0c171fb094f677fcdf78bbb9aaef0d5404942,https://doi.org/10.1109/TIP.2017.2733739
+1617f56c86bf8ea61de62062a97961d23fcf03d3,https://doi.org/10.1007/s11390-015-1540-3
+1672becb287ae3eaece3e216ba37677ed045db55,https://doi.org/10.1016/j.eswa.2015.10.047
+16eaa26a84468b27e559215db01c53286808ec2a,https://doi.org/10.1007/s11263-015-0859-0
+16c1b592d85d13f1ba4eff0afb4441bb78650785,https://doi.org/10.1109/TIP.2017.2685343
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,https://doi.org/10.1007/978-3-319-71928-3_18
+16fc82d44188eb49a151bd5836a29911b3bfabcb,https://doi.org/10.1007/978-981-10-7302-1_50
+42441f1fee81c8fd42a74504df21b3226a648739,https://doi.org/10.1007/s11554-008-0072-2
+4268ae436db79c4eee8bc06e9475caff3ff70d57,http://doi.ieeecomputersociety.org/10.1109/FG.2017.146
+42fff5b37006009c2dbfab63c0375c7c7d7d8ee3,https://doi.org/10.1007/s11042-014-2228-3
+42a5dc91852c8c14ed5f4c3b451c9dc98348bc02,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021
+425ea5656c7cf57f14781bafed51182b2e6da65f,https://doi.org/10.1109/TIP.2017.2718187
+427bec487c330e7e34cc2c8fc2d6558690421ea0,http://doi.ieeecomputersociety.org/10.1109/ISCSCT.2008.352
+4215b34597d8ce1e8985afa8043400caf0ec7230,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.71
+89e31777f221ddb3bc9940d7f520c8114c4148a2,https://doi.org/10.1007/s11063-012-9224-2
+897aa4aaa474fed41233faec9b70b802aea5fdea,https://doi.org/10.1142/S0218001414560126
+89272b78b651038ff4d294b9ccca0018d2c9033b,https://doi.org/10.1109/ICPR.2014.777
+89497854eada7e32f06aa8f3c0ceedc0e91ecfef,https://doi.org/10.1109/TIP.2017.2784571
+891b31be76e2baa83745f24c2e2013851dc83cbb,https://doi.org/10.1109/TSMCB.2009.2018137
+892400017e5c93611dc8361e7749135520d66f25,https://doi.org/10.1109/ICARCV.2010.5707394
+898ff1bafee2a6fb3c848ad07f6f292416b5f07d,https://doi.org/10.1109/TIP.2016.2518867
+454bf5b99607b4418e931092476ad1798ce5efa4,https://doi.org/10.1155/2011/790598
+45877ff4694576f59c2a9ca45aa65f935378492a,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.38
+4551194408383b12db19a22cca5db0f185cced5c,https://doi.org/10.1109/TNNLS.2014.2341634
+45e043dffc57a9070f483ac4aec2c5cd2cec22cb,http://doi.acm.org/10.1145/3130977
+452ea180cf4d08d7500fc4bc046fd7141fd3d112,https://doi.org/10.1109/BTAS.2012.6374569
+45edb29fb7eed5a52040300e1fd3cd53f1bdb429,https://doi.org/10.1109/ICIP.2015.7351570
+4512b87d68458d9ba0956c0f74b60371b6c69df4,https://doi.org/10.1109/TIP.2017.2708504
+4500888fd4db5d7c453617ee2b0047cedccf2a27,http://doi.acm.org/10.1145/2647750
+4562ea84ebfc8d9864e943ed9e44d35997bbdf43,http://doi.ieeecomputersociety.org/10.1109/FG.2017.19
+459eb3cfd9b52a0d416571e4bc4e75f979f4b901,https://doi.org/10.1109/ROBIO.2015.7418998
+453bf941f77234cb5abfda4e015b2b337cea4f17,https://doi.org/10.1007/s11042-014-2340-4
+1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,https://doi.org/10.1109/CIST.2016.7805097
+1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9,https://doi.org/10.1109/IJCNN.2017.7965955
+1f8656e2254e353a91cceb08b33c25643a1b1fb7,https://doi.org/10.1109/LSP.2017.2736542
+1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb,https://doi.org/10.1007/s11760-016-1052-9
+1f5725a4a2eb6cdaefccbc20dccadf893936df12,https://doi.org/10.1109/CCST.2012.6393544
+1fcb905e4505a781fb0b375eb470f5661e38ae39,http://doi.acm.org/10.1145/3123266.3123450
+874da338c01fb7a87d605fcde6c52835eee03d5e,http://doi.ieeecomputersociety.org/10.1109/ICAPR.2009.20
+87806c51dc8c1077953178367dcf5c75c553ce34,https://doi.org/10.1109/ICMLA.2015.146
+87ee56feefdb39938cda7f872e784d9d986713af,http://dl.acm.org/citation.cfm?id=3022247
+87552622efd0e85c2a71d4d2590e53d45f021dbf,https://doi.org/10.1109/ICIP.2016.7532435
+872ff48a3acfbf96376fd048348372f5137615e4,https://doi.org/10.1007/s41095-016-0051-7
+8706c3d49d1136035f298041f03bb70dc074f24d,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.12
+876583a059154def7a4bc503b21542f80859affd,https://doi.org/10.1109/IWBF.2016.7449697
+80677676b127b67938c8db06a15d87f5dd4bd7f1,https://doi.org/10.1007/s11760-014-0623-x
+80f72b26c6571aee2ff04704bc7fd1a69bfa0b3f,https://doi.org/10.1016/j.patcog.2016.12.029
+8027a9093f9007200e8e69e05616778a910f4a5f,https://doi.org/10.1109/ICB.2013.6612997
+805a0f4b99f162ac4db0ef6e0456138c8d498c3a,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2465373
+803803b5c2c61046d63674f85ecf0123f9d2c4b8,https://doi.org/10.1049/iet-bmt.2013.0089
+80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,https://doi.org/10.1109/TNNLS.2016.2522431
+80aa455068018c63237c902001b58844fcc6f160,https://doi.org/10.1109/FG.2011.5771327
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,https://doi.org/10.1007/978-3-319-15762-7
+74cec83ee694b5d0e07d5d0bacd0aa48a80776aa,https://doi.org/10.1109/ISCAS.2013.6572506
+745d49a2ff70450113f07124c2c5263105125f58,https://doi.org/10.1109/ICPR.2016.7899972
+745e74ae84e1b2b8690d07db523531642023d6c4,https://doi.org/10.1109/FSKD.2016.7603417
+747dc0add50b86f5ba9e3e7315943d520e08f9eb,http://doi.ieeecomputersociety.org/10.1109/FG.2017.78
+74d3ff8324e02503c18fb2566ed29e2e22ce0d1b,http://doi.ieeecomputersociety.org/10.1109/IAS.2009.266
+1ab19e516b318ed6ab64822efe9b2328836107a4,https://doi.org/10.1109/TIP.2010.2083674
+1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a,https://doi.org/10.1007/s11042-013-1754-8
+1a0e1ba4408d12f8a28049da0ff8cad4f91690d5,https://doi.org/10.1007/s12559-016-9445-1
+1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db,https://doi.org/10.1109/ICIP.2016.7533026
+1a47f12a2490f6775c0ad863ac856de27f5b3e03,https://doi.org/10.1016/j.sigpro.2014.11.010
+1a8d40bcfb087591cc221086440d9891749d47b8,https://doi.org/10.1109/ICCE.2012.6161859
+1afef6b389bd727c566cd6fbcd99adefe4c0cf32,https://doi.org/10.1109/ICB.2016.7550087
+1aeef2ab062c27e0dbba481047e818d4c471ca57,https://doi.org/10.1109/ICACCI.2015.7275860
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,https://doi.org/10.1007/s10044-012-0279-5
+1a40c2a2d17c52c8b9d20648647d0886e30a60fa,https://doi.org/10.1109/ICPR.2016.7900283
+1a03dcc811131b0b702bd5a75c54ed26cd27151a,https://doi.org/10.1007/s11760-015-0810-4
+1ad780e02edf155c09ea84251289a054b671b98a,https://doi.org/10.1109/ICNIDC.2012.6418787
+287de191c49a3caa38ad7594093045dfba1eb420,https://doi.org/10.23919/MVA.2017.7986829
+281b91c35a1af97b1405bc724a04e2be6e24971b,https://doi.org/10.1109/ICMLC.2010.5580557
+28d55935cc36df297fe21b98b4e2b07b5720612e,https://doi.org/10.1109/CISS.2016.7460569
+28a45770faf256f294ce3bbd5de25c6d5700976e,https://doi.org/10.1109/ICDSP.2016.7868531
+283d381c5c2ba243013b1c4f5e3b29eb906fa823,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.222
+2884ff0d58a66d42371b548526d685760e514043,https://doi.org/10.1109/ICIP.2015.7351242
+17768efd76a681902a33994da4d3163262bf657f,https://doi.org/10.1007/s12559-017-9472-6
+176d9121e4e645344de4706dfb345ad456bfb84a,https://doi.org/10.1117/1.JEI.24.2.023009
+17189cfedbdbd219849b8e7f8cf0293d49465f9c,http://doi.acm.org/10.1145/2393347.2396505
+170aa0f16cd655fdd4d087f5e9c99518949a1b5c,https://doi.org/10.1007/s11263-007-0074-8
+179545c1fc645cb2ad9b31a30f48352d541876ff,https://doi.org/10.1109/IJCNN.2007.4371116
+17de5a9ce09f4834629cd76b8526071a956c9c6d,https://doi.org/10.1007/978-3-319-68063-7_8
+1723227710869a111079be7d61ae3df48604e653,https://doi.org/10.1109/INISTA.2014.6873606
+178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4,https://doi.org/10.1007/s11042-013-1578-6
+17d03da4db3bb89537d644b682b2a091d563af4a,https://doi.org/10.1109/TNN.2010.2050600
+7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35,http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61
+7bc1e7d000ab517161a83b1fedf353e619516ddf,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068
+7b618a699b79c1272f6c83101917ad021a58d96b,https://doi.org/10.1007/s11042-014-1986-2
+7bd37e6721d198c555bf41a2d633c4f0a5aeecc1,https://doi.org/10.1109/ACPR.2013.58
+7b455cbb320684f78cd8f2443f14ecf5f50426db,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33
+8f3675e979629ca9cee9436d37763f546edb8d40,https://doi.org/10.1109/SIU.2017.7960446
+8fee7b38358815e443f8316fa18768d76dba12e3,http://doi.acm.org/10.1145/2063576.2063676
+8fe5feeaa72eddc62e7e65665c98e5cb0acffa87,https://doi.org/10.1007/s12193-015-0209-0
+8f73af52d87c94d0bd43242462fd68d974eda331,https://doi.org/10.1109/ICB.2013.6613009
+8f99f7ccb85af6d4b9e015a9b215c529126e7844,https://doi.org/10.1109/ROMAN.2017.8172359
+8f051647bd8d23482c6c3866c0ce1959b8bd40f6,https://doi.org/10.1016/j.asoc.2017.04.041
+8f713e3c5b6b166c213e00a3873f750fb5939c9a,https://doi.org/10.1109/EUSIPCO.2015.7362563
+8fc36452a49cb0fd43d986da56f84b375a05b4c1,http://doi.acm.org/10.1145/2542355.2542388
+8aff9c8a0e17be91f55328e5be5e94aea5227a35,https://doi.org/10.1109/TNNLS.2012.2191620
+8a1e95b82d8cf27e0034e127091396efd4c8bd9e,https://doi.org/10.1109/IGARSS.2016.7729015
+8a2210bedeb1468f223c08eea4ad15a48d3bc894,http://doi.acm.org/10.1145/2513383.2513438
+8a2bedaa38abf173823944f0de2c84f5b2549609,https://doi.org/10.1109/TNNLS.2016.2573644
+8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4,http://doi.ieeecomputersociety.org/10.1109/CSSE.2008.575
+7ebfa8f1c92ac213ff35fa27287dee94ae5735a1,https://doi.org/10.1109/TMM.2016.2614429
+7e456e94f3080c761f858264428ee4c91cd187b2,http://ieeexplore.ieee.org/document/6460899/
+7e48711c627edf90e9b232f2cbc0e3576c8f2f2a,https://doi.org/10.1007/s11760-015-0777-1
+10e2f2ad1dedec6066e063cb2098b089b35905a8,http://doi.acm.org/10.1145/3052930
+10df1d4b278da991848fb71b572f687bd189c10e,https://doi.org/10.1109/ICPR.2016.7899739
+104ee18b513b52386f871e959c1f9e5072604e93,https://doi.org/10.1109/GlobalSIP.2017.8309189
+10f4bbf87a44bab3d79e330e486c897e95f5f33f,https://doi.org/10.1109/TIFS.2012.2186292
+1071dde48a77f81c35ad5f0ca90a9daedb54e893,http://ieeexplore.ieee.org/document/7881657/
+1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043872
+1966bddc083886a9b547e1817fe6abc352a00ec3,http://doi.acm.org/10.1145/2733373.2806312
+19705579b8e7d955092ef54a22f95f557a455338,https://doi.org/10.1109/ICIP.2014.7025277
+1979e270093b343d62e97816eeed956062e155a0,https://doi.org/10.1016/j.micpro.2005.07.003
+194f5d3c240d06575403c9a422a0ebc86d43b91e,https://doi.org/10.1007/s11042-015-2580-y
+197efbef17f92e5cb5076961b6cd9f59e88ffd9a,https://doi.org/10.1109/ICMLA.2017.00-59
+19bbecead81e34b94111a2f584cf55db9a80e60c,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248025
+195b61470720c7faa523e10e68d0c8d8f27d7c7a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995618
+1902288256839539aeb5feb3e1699b963a15aa1a,https://doi.org/10.1109/IJCNN.2016.7727435
+19c82eacd77b35f57ac8815b979716e08e3339ca,http://doi.ieeecomputersociety.org/10.1109/ICITCS.2015.7292981
+191b70fdd6678ef9a00fd63710c70b022d075362,https://doi.org/10.1109/ICIP.2003.1247347
+4c141534210df53e58352f30bab558a077fec3c6,https://doi.org/10.1109/TMM.2016.2557722
+4c19690889fb3a12ec03e65bae6f5f20420b4ba4,https://doi.org/10.1049/iet-ipr.2015.0699
+4c6886c489e93ccab5a1124555a6f3e5b0104464,https://doi.org/10.1109/ICIP.2017.8296921
+4c648fe9b7bfd25236164333beb51ed364a73253,http://doi.acm.org/10.1145/3038924
+4c0846bcfa64d9e810802c5b7ef0f8b43523fe54,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2324594
+4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,https://doi.org/10.1109/TIP.2014.2387379
+4cec3e5776090852bef015a8bbe74fed862aa2dd,https://doi.org/10.1109/TSP.2013.2271479
+4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af,https://doi.org/10.1016/j.imavis.2014.06.004
+268c4bb54902433bf00d11391178a162e5d674c9,https://doi.org/10.1109/CVPRW.2010.5543261
+261a80216dda39b127d2b7497c068ec7e0fdf183,https://doi.org/10.1109/TCSVT.2013.2265571
+26ebe98753acec806b7281d085110c06d9cd1e16,http://doi.ieeecomputersociety.org/10.1109/FG.2017.22
+26973cf1552250f402c82e9a4445f03fe6757b58,http://doi.acm.org/10.1145/3126686.3130239
+2601b679fdd637f3cd978753ae2f15e8759dd267,https://doi.org/10.1109/ICIP.2015.7351306
+262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19,http://dl.acm.org/citation.cfm?id=3007694
+26b9d546a4e64c1d759c67cd134120f98a43c2a6,https://doi.org/10.1109/ICMLA.2012.120
+26bbe76d1ae9e05da75b0507510b92e7e6308c73,https://doi.org/10.1007/s00371-014-1049-8
+26949c1ba7f55f0c389000aa234238bf01a32d3b,https://doi.org/10.1109/ICIP.2017.8296814
+26a5136ee4502500fb50cd5ade814aad45422771,https://doi.org/10.1142/S0218001413560028
+26727dc7347e3338d22e8cf6092e3a3c7568d763,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163088
+2138ccf78dcf428c22951cc066a11ba397f6fcef,https://doi.org/10.1109/BHI.2012.6211519
+21bd60919e2e182a29af455353141ba4907b1b41,https://doi.org/10.1109/ACCESS.2018.2798573
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,https://doi.org/10.1109/VCIP.2017.8305137
+21f5f65e832c5472d6d08f6ee280d65ff0202e29,https://doi.org/10.1007/978-3-319-70353-4_44
+218139e5262cb4f012cd2e119074aa59b89ebc32,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.265
+217aa3aa0b3d9f6f394b5d26f03418187d775596,http://doi.acm.org/10.1145/3123266.3123298
+2149d49c84a83848d6051867290d9c8bfcef0edb,https://doi.org/10.1109/TIFS.2017.2746062
+4dbfbe5fd96c9efc8c3c2fd54406b62979482678,https://doi.org/10.1016/j.jvcir.2014.07.007
+4d1f77d9418a212c61a3c75c04a5b3884f6441ba,https://doi.org/10.1109/TIP.2017.2788196
+4d4736173a5e72c266e52f3a43bdcb2b58f237a2,https://doi.org/10.1109/ISSPA.2012.6310583
+4d6d6369664a49f6992f65af4148cefef95055bc,https://doi.org/10.1109/ICIP.2014.7025407
+75858dbee2c248a60741fbc64dcad4f8b63d51cb,https://doi.org/10.1109/TIP.2015.2460464
+7535e3995deb84a879dc13857e2bc0796a2f7ce2,https://doi.org/10.1007/s10618-010-0207-5
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,https://doi.org/10.1007/s00521-012-1042-y
+75b51140d08acdc7f0af11b0ffa1edb40ebbd059,https://doi.org/10.1007/s00521-010-0381-9
+754626bd5fb06fee5e10962fdfeddd495513e84b,https://doi.org/10.1109/SIU.2017.7960646
+751fb994b2c553dc843774a5620bfcab8bc657fd,https://doi.org/10.1007/978-3-319-67180-2_47
+753a277c1632dd61233c488cc55d648de3caaaa3,https://doi.org/10.1016/j.patcog.2011.02.013
+81a4397d5108f6582813febc9ddbeff905474120,https://doi.org/10.1109/ICPR.2016.7899883
+812d3f6975f4cb87e9905ef18696c5c779227634,https://doi.org/10.1186/s13640-016-0151-4
+8184a92e1ccc7fdeb4a198b226feb325c63d6870,https://doi.org/10.1109/ICCE.2017.7889290
+8185be0689442db83813b49e215bf30870017459,https://doi.org/10.1109/TNNLS.2013.2293418
+81b8a6cabcd6451b21d5b44e69b0a355d9229cc4,https://doi.org/10.1109/ICDSP.2017.8096137
+81d81a2060366f29fd100f793c11acf000bd2a7f,https://doi.org/10.1007/11795131_112
+81af86e3d343a40ce06a3927b6aa8c8853f6811a,http://doi.acm.org/10.1145/3009977.3009996
+81c21f4aafab39b7f5965829ec9e0f828d6a6182,https://doi.org/10.1109/BTAS.2015.7358744
+81d232e1f432db7de67baf4f30f240c62d1a9055,https://doi.org/10.1109/ICIP.2017.8296405
+86fa086d02f424705bbea53943390f009191740a,https://doi.org/10.1109/ICIP.2015.7351651
+865d4ce1751ff3c0a8eb41077a9aa7bd94603c47,https://doi.org/10.1007/s12193-015-0210-7
+86597fe787e0bdd05935d25158790727257a40bd,http://doi.ieeecomputersociety.org/10.1109/3DV.2016.72
+86afb1e38a96f2ac00e792ef353a971fd13c8474,https://doi.org/10.1109/BigData.2016.7840742
+8686b15802529ff8aea50995ef14079681788110,https://doi.org/10.1109/TNNLS.2014.2376936
+864d50327a88d1ff588601bf14139299ced2356f,https://doi.org/10.1109/FSKD.2016.7603151
+8697ccb156982d40e88fda7fbf4297fa5171f24d,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2011.101
+86881ce8f80adea201304ca6bb3aa413d94e9dd0,https://doi.org/10.1109/ICIP.2017.8297133
+8605e8f5d84b8325b1a81d968c296a5a5d741f31,https://doi.org/10.1016/j.patcog.2017.04.010
+72345fed8d068229e50f9ea694c4babfd23244a0,http://doi.acm.org/10.1145/2632856.2632937
+728b1b2a86a7ffda402e7ec1a97cd1988dcde868,https://doi.org/10.1016/j.procs.2016.04.083
+72a3bb0fb490355a926c5a689e12268bff9ff842,https://doi.org/10.1109/ICIP.2006.312862
+7234468db46b37e2027ab2978c67b48b8581f796,https://doi.org/10.1109/ACPR.2015.7486464
+72119cb98f9502ec639de317dccea57fd4b9ee55,https://doi.org/10.1109/GlobalSIP.2015.7418230
+72d110df78a7931f5f2beaa29f1eb528cf0995d3,https://doi.org/10.1007/s11517-015-1346-z
+440b94b1624ca516b07e72ea8b3488072adc5e26,https://doi.org/10.1109/ITSC.2015.153
+44b827df6c433ca49bcf44f9f3ebfdc0774ee952,https://doi.org/10.1109/LSP.2017.2726105
+44c278cbecd6c1123bfa5df92e0bda156895fa48,https://doi.org/10.1109/ICPR.2014.316
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,https://doi.org/10.1007/s11063-017-9648-9
+4492914df003d690e5ff3cb3e0e0509a51f7753e,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2014.6921443
+44834929e56f2a8f16844fde519039d647006216,http://doi.acm.org/10.1145/1460096.1460150
+44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb,https://doi.org/10.1016/j.neucom.2017.07.052
+445e3ba7eabcc55b5d24f951b029196b47830684,https://doi.org/10.1109/TMM.2016.2591508
+2a92bda6dbd5cce5894f7d370d798c07fa8783f4,https://doi.org/10.1109/TIFS.2014.2359587
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,https://doi.org/10.1109/TIP.2013.2255300
+2a98b850139b911df5a336d6ebf33be7819ae122,https://doi.org/10.1109/ICIP.2015.7350806
+2ae2e29c3e9cc2d94a26da5730df7845de0d631b,https://doi.org/10.1109/TCSVT.2011.2129670
+2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,https://doi.org/10.1109/CVPRW.2010.5543608
+2a98351aef0eec1003bd5524933aed8d3f303927,https://doi.org/10.1109/CIRA.2007.382901
+2a41388040141ef6b016c100ef833a2a73ab8b42,https://doi.org/10.1016/j.neucom.2017.03.033
+2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.104
+2a84f7934365f05b6707ea0ac225210f78e547af,https://doi.org/10.1109/ICPR.2016.7899690
+2adffdffa16475ae71bb2adcf65840f01f1e53f7,https://doi.org/10.1049/iet-cvi.2014.0094
+2a4984fb48c175d1e42c6460c5f00963da9f26b6,https://doi.org/10.1109/MIPRO.2015.7160445
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,http://doi.acm.org/10.1145/3090311
+2f8ef56c1007a02cdc016219553479d6b7e097fb,https://doi.org/10.1007/978-3-642-14834-7_2
+2fd007088a75916d0bf50c493d94f950bf55c5e6,https://doi.org/10.1007/978-981-10-7302-1_1
+2f43b614607163abf41dfe5d17ef6749a1b61304,https://doi.org/10.1109/TIFS.2014.2361479
+2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76,https://doi.org/10.1109/ACPR.2015.7486607
+2f5b51af8053cf82ab52bbfd46b56999222ec21c,https://doi.org/10.1109/ICPR.2014.788
+2f841ff062053f38725030aa1b77db903dad1efb,https://doi.org/10.1109/ICRA.2014.6907748
+2facf3e85240042a02f289a0d40fee376c478d0f,https://doi.org/10.1109/BTAS.2010.5634544
+2f61d91033a06dd904ff9d1765d57e5b4d7f57a6,https://doi.org/10.1109/ICIP.2016.7532953
+2f160a6526ebf10773680dadaba44b006bcec2cb,https://doi.org/10.1016/j.neucom.2012.03.007
+2f17c0514bb71e0ca20780d71ea0d50ff0da4938,http://doi.acm.org/10.1145/1943403.1943490
+43261920d2615f135d6e72b333fe55d3f2659145,http://doi.acm.org/10.1145/3136273.3136301
+4349f17ec319ac8b25c14c2ec8c35f374b958066,https://doi.org/10.1109/THMS.2017.2681425
+43cbe3522f356fbf07b1ff0def73756391dc3454,https://doi.org/10.1109/WIFS.2011.6123140
+4398afa0aeb5749a12772f2d81ca688066636019,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2496320
+4344ba6e33faaa616d01248368e66799548ca48b,https://doi.org/10.1007/s10044-015-0474-2
+43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,https://doi.org/10.1109/ICIP.2017.8296394
+43bb2b58f906262035ef61e41768375bc8d99ae3,https://doi.org/10.1016/j.procs.2016.04.072
+4328933890f5a89ad0af69990926d8484f403e4b,http://doi.acm.org/10.1145/2072298.2071993
+434f1442533754b3098afd4e24abf1e3792b24db,https://doi.org/10.1109/CBMI.2015.7153627
+43eb03f95adc0df61af2c3b12a913c725b08d4f5,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2011.101
+88780bd55615c58d9bacc4d66fc2198e603a1714,https://doi.org/10.1109/EMBC.2016.7590730
+8879083463a471898ff9ed9403b84db277be5bf6,https://doi.org/10.1016/j.patcog.2016.08.031
+884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e,https://doi.org/10.1109/SMC.2016.7844717
+88ed558bff3600f5354963d1abe762309f66111e,https://doi.org/10.1109/TIFS.2015.2393553
+88399c7fa890f1252178cd5e4979971509bd904f,https://doi.org/10.1142/S0219878906000915
+8845c03bee88fdd2f400ed2bddba038366c82abe,http://doi.ieeecomputersociety.org/10.1109/TCBB.2011.135
+8882d39edae556a351b6445e7324ec2c473cadb1,https://doi.org/10.1109/TIP.2017.2755766
+88c21e06ed44da518a7e346fce416efedc771704,https://doi.org/10.1109/ICIP.2015.7351455
+9f5e22fbc22e1b0a61bcd75202d299232e68de5d,https://doi.org/10.1109/IJCNN.2016.7727391
+9fab78015e6e91ba7241a923222acd6c576c6e27,http://doi.ieeecomputersociety.org/10.1109/ICSS.2016.10
+9f3c9e41f46df9c94d714b1f080dafad6b4de1de,https://doi.org/10.1109/ICT.2017.7998260
+9f428db0d3cf26b9b929dd333a0445bcc7514cdf,https://doi.org/10.1016/j.cviu.2010.11.015
+9fd1b8abbad25cb38f0c009288fb5db0fc862db6,https://doi.org/10.1109/ICASSP.2003.1199147
+9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,https://doi.org/10.1007/s10044-006-0033-y
+6b44543571fe69f088be577d0c383ffc65eceb2a,http://doi.ieeecomputersociety.org/10.1109/EST.2012.24
+6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7,https://doi.org/10.1109/IJCNN.2016.7727803
+6bacd4347f67ec60a69e24ed7cc0ac8073004e6f,https://doi.org/10.1109/VCIP.2014.7051528
+6ba6045e4b404c44f9b4dfce2d946019f0e85a72,https://doi.org/10.1109/ICPR.2016.7899962
+6b8329730b2e13178a577b878631735a1cd58a71,http://doi.ieeecomputersociety.org/10.1109/FiCloud.2015.78
+07dc9f3b34284cc915dea7575f40ef0c04338126,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2537337
+070c8ee3876c06f9a65693e536d61097ace40417,https://doi.org/10.1109/ACPR.2013.161
+0733ec1953f6c774eb3a723618e1268586b46359,https://doi.org/10.1109/TMM.2006.870737
+0750c796467b6ef60b0caff5fb199337d54d431e,https://doi.org/10.1109/ICMLC.2016.7873015
+0701b01bc99bf3b64050690ceadb58a8800e81ed,https://doi.org/10.1007/s11042-015-3107-2
+076c97826df63f70d55ea11f0b7ae47a7ad81ad3,http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.40
+38e7f3fe450b126367ec358be9b4cc04e82fa8c7,https://doi.org/10.1109/TIP.2014.2351265
+3888d7a40f3cea5e4a851c8ca97a2d7810a62867,https://doi.org/10.1109/CCECE.2016.7726684
+383ff2d66fecdc2fd02a31ac1fa392f48e578296,https://doi.org/10.1016/j.cviu.2015.07.005
+387b54cf6c186c12d83f95df6bd458c5eb1254ee,https://doi.org/10.1109/VCIP.2017.8305123
+3826e47f0572ab4d0fe34f0ed6a49aa8303e0428,https://doi.org/10.1109/ACPR.2013.66
+383e64d9ef1fca9de677ac82486b4df42e96e861,http://doi.ieeecomputersociety.org/10.1109/DSC.2017.78
+38345264a9ca188c4facffe6e18a7e6865fb2966,http://doi.ieeecomputersociety.org/10.1109/BIBM.2017.8217969
+008528d5e27919ee95c311266041e4fb1711c254,https://doi.org/10.1007/s13735-015-0092-1
+00d4c2db10f3a32d505d7b8adc7179e421443dec,https://doi.org/10.1109/GlobalSIP.2014.7032080
+00049f989067d082f7f8d0581608ad5441d09f8b,https://doi.org/10.1109/LSP.2016.2555480
+003ba2001bd2614d309d6ec15e9e2cbe86db03a1,https://doi.org/10.1109/ISCAS.2005.1465264
+00eccc565b64f34ad53bf67dfaf44ffa3645adff,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618328
+00301c250d667700276b1e573640ff2fd7be574d,https://doi.org/10.1109/BTAS.2014.6996242
+00a38ebce124879738b04ffc1536018e75399193,https://doi.org/10.1109/BTAS.2017.8272766
+009bf86913f1c366d9391bf236867d84d12fa20c,https://doi.org/10.1109/CVPRW.2010.5544620
+0034e37a0faf0f71395245b266aacbf5412f190a,https://doi.org/10.1109/TMM.2014.2355134
+6e9de9c3af3258dd18142e9bef2977b7ce153bd5,https://doi.org/10.1007/978-3-319-48881-3
+6e2041a9b5d840b0c3e4195241cd110640b1f5f3,https://doi.org/10.1007/s10044-013-0349-3
+6e7ffd67329ca6027357a133437505bc56044e65,https://doi.org/10.1109/IJCNN.2014.6889754
+6ec275755f8776b620d0a4550be0e65caf2bc87a,https://doi.org/10.1109/IS.2016.7737496
+9ab963e473829739475b9e47514f454ab467a5af,http://doi.ieeecomputersociety.org/10.1109/FG.2017.33
+9abf6d56a7d336bc58f4e3328d2ee807032589f1,https://doi.org/10.1109/CEC.2017.7969500
+9abab00de61dd722b3ad1b8fa9bffd0001763f8b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2420563
+9ab126760f68071a78cabe006cf92995d6427025,https://doi.org/10.1007/s11042-013-1703-6
+9a84588fe7e758cfbe7062686a648fab787fc32f,https://doi.org/10.1007/s11042-014-2333-3
+9aade3d26996ce7ef6d657130464504b8d812534,https://doi.org/10.1109/TNNLS.2016.2618340
+9aba281955117eb4a7aed36775f55f27e4dde42f,http://doi.ieeecomputersociety.org/10.1109/AFGR.2000.840635
+36bb5cca0f6a75be8e66f58cba214b90982ee52f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.73
+36219a3196aac2bd149bc786f083957a6e6da125,https://doi.org/10.1016/j.jvcir.2015.12.003
+3690af0af51a067750f664c08e48b486d1cd476d,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2012.41
+36486944b4feeb88c0499fecd253c5a53034a23f,https://doi.org/10.1109/CISP-BMEI.2017.8301986
+36b23007420b98f368d092bab196a8f3cbcf6f93,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.106
+36b13627ee8a5a8cd04645213aabfa917bbd32f5,https://doi.org/10.1109/TCSVT.2016.2602812
+363f540dc82ba8620262a04a67cfd6d3c85b0582,http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031445
+36bb93c4f381adca267191811abb8cc7812363f9,https://doi.org/10.1109/CISP-BMEI.2017.8301987
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,https://doi.org/10.1109/TCSVT.2015.2452782
+5c3eb40b06543f00b2345f3291619a870672c450,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.539
+5c19c4c6a663fe185a739a5f50cef6a12a4635a1,https://doi.org/10.1016/j.imavis.2012.08.016
+5c526ee00ec0e80ba9678fee5134dae3f497ff08,https://doi.org/10.1109/TCE.2010.5606299
+5c4f9260762a450892856b189df240f25b5ed333,https://doi.org/10.1109/TIP.2017.2651396
+09f9409430bba2afb84aa8214dbbb43bfd4cf056,https://doi.org/10.1109/TNN.2006.883012
+0974677f59e78649a40f0a1d85735410d21b906a,https://doi.org/10.1109/ISCAS.2017.8050798
+0931bef0a9c8c153184a1f9c286cf4883cbe99b6,https://doi.org/10.1007/s12193-015-0203-6
+09138ad5ad1aeef381f825481d1b4f6b345c438c,https://doi.org/10.1109/IIH-MSP.2012.41
+096ffc1ea5493242ba0c113178dab0c096412f81,http://doi.acm.org/10.1145/3123266.3123441
+092dd7cb6c9b415eb83afb104fa63d7d4290ac33,https://doi.org/10.1109/SPLIM.2016.7528409
+5dbb2d556f2e63a783a695a517f5deb11aafd7ea,https://doi.org/10.1109/ICB.2015.7139079
+5dd57b7e0e82a33420c054da7ea3f435d49e910e,https://doi.org/10.1007/s10851-014-0493-4
+5df17c81c266cf2ebb0778e48e825905e161a8d9,https://doi.org/10.1109/TMM.2016.2520091
+5da98f7590c08e83889f3cec7b0304b3610abf42,https://doi.org/10.1016/j.eswa.2017.07.018
+5d9f468a2841ea2f27bbe3ef2c6fe531d444be68,https://doi.org/10.1109/GlobalSIP.2017.8309167
+5ddfd3d372f7679518db8fd763d5f8bc5899ed67,https://doi.org/10.1109/ICPR.2014.797
+31ba7f5e09a2f0fe9cf7ea95314723206dcb6059,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.300
+3150e329e01be31ba08b6d76fc46b0da88a5ddeb,http://doi.acm.org/10.1145/2927006.2927012
+310fe4e6cb6d090f7817de4c1034e35567b56e34,https://doi.org/10.1109/ICPR.2014.313
+31697737707d7f661cbc6785b76cf9a79fee3ccd,http://doi.ieeecomputersociety.org/10.1109/FG.2017.100
+31a36014354ee7c89aa6d94e656db77922b180a5,http://doi.acm.org/10.1145/2304496.2304509
+31ffc95167a2010ce7aab23db7d5fc7ec439f5fb,https://doi.org/10.1109/TNNLS.2017.2651169
+31ba9d0bfaa2a44bae039e5625eb580afd962892,https://doi.org/10.1016/j.cviu.2016.03.014
+314c4c95694ff12b3419733db387476346969932,http://dl.acm.org/citation.cfm?id=3007672
+31f905d40a4ac3c16c91d5be8427762fa91277f1,https://doi.org/10.1109/TIP.2017.2704661
+91167aceafbc9c1560381b33c8adbc32a417231b,https://doi.org/10.1109/TCSVT.2009.2020337
+915ff2bedfa0b73eded2e2e08b17f861c0e82a58,https://doi.org/10.1109/UEMCON.2017.8249000
+919bdc161485615d5ee571b1585c1eb0539822c8,http://ieeexplore.ieee.org/document/6460332/
+9101363521de0ec1cf50349da701996e4d1148c8,http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.28
+919cb6160db66a8fe0b84cb7f171aded48a13632,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2327978
+9166f46aa3e58befaefd3537e5a11b31ebeea4d0,https://doi.org/10.1109/ICIP.2015.7351505
+91d0e8610348ef4d5d4975e6de99bb2d429af778,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.61
+913961d716a4102d3428224f999295f12438399f,https://doi.org/10.1016/j.patcog.2014.01.016
+913062218c7498b2617bb9d7821fe1201659c5cc,https://doi.org/10.1109/ICMLA.2012.178
+918fc4c77a436b8a588f63b2b37420b7868fbbf8,https://doi.org/10.1016/j.inffus.2015.03.005
+655e94eccddbe1b1662432c1237e61cf13a7d57b,http://doi.ieeecomputersociety.org/10.1109/ISIP.2008.147
+6554ca3187b3cbe5d1221592eb546dfc11aac14b,http://doi.acm.org/10.1145/2501643.2501647
+65475ce4430fb524675ebab6bcb570dfa07e0041,https://doi.org/10.1109/ISR.2013.6695696
+65869cc5ef00d581c637ae8ea6ca02ae4bb2b996,http://doi.ieeecomputersociety.org/10.1109/ICDM.2007.65
+659dc6aa517645a118b79f0f0273e46ab7b53cd9,https://doi.org/10.1109/ACPR.2015.7486608
+65fc8393610fceec665726fe4e48f00dc90f55fb,https://doi.org/10.1109/CYBConf.2013.6617455
+6256b47342f080c62acd106095cf164df2be6020,https://doi.org/10.1007/978-3-319-24702-1_6
+62648f91e38b0e8f69dded13b9858bd3a86bb6ed,http://doi.acm.org/10.1145/2647868.2655016
+628f9c1454b85ff528a60cd8e43ec7874cf17931,http://doi.acm.org/10.1145/2993148.2993193
+62e834114b58a58a2ea2d7b6dd7b0ce657a64317,https://doi.org/10.1109/SMC.2014.6973987
+62e61f9f7445e8dec336415ac0c7e677f9f5f7c1,https://doi.org/10.1142/S0219467814500065
+6267dbeb54889be5bdb50c338a7c6ef82287084c,https://doi.org/10.1109/ICMLC.2010.5580567
+963a004e208ce4bd26fa79a570af61d31651b3c3,https://doi.org/10.1016/j.jvlc.2009.01.011
+9635493998ad60764d7bbf883351af57a668d159,https://doi.org/10.1109/IJCNN.2017.7966005
+96a8f115df9e2c938453282feb7d7b9fde6f4f95,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2593719
+965c4a8087ae208c08e58aaf630ad412ac8ce6e2,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.100
+96b1f2bde46fe4f6cc637398a6a71e8454291a6e,https://doi.org/10.1109/TIP.2010.2073476
+96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40,https://doi.org/10.1109/ICIP.2016.7532959
+9652f154f4ae7807bdaff32d3222cc0c485a6762,https://doi.org/10.1007/s00138-016-0760-z
+96d34c1a749e74af0050004162d9dc5132098a79,https://doi.org/10.1109/TNN.2005.844909
+96e0b67f34208b85bd90aecffdb92bc5134befc8,https://doi.org/10.1016/j.patcog.2007.10.002
+3a9fbd05aaab081189a8eea6f23ed730fa6db03c,https://doi.org/10.1109/ICASSP.2013.6638305
+3aebaaf888cba25be25097173d0b3af73d9ce7f9,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.49
+3a1c40eced07d59a3ea7acda94fa833c493909c1,http://doi.ieeecomputersociety.org/10.1109/FG.2017.111
+3ad56aed164190e1124abea4a3c4e1e868b07dee,https://doi.org/10.1016/j.patcog.2015.12.016
+3a0425c25beea6c4c546771adaf5d2ced4954e0d,https://link.springer.com/book/10.1007/978-3-319-58347-1
+54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d,https://doi.org/10.1007/978-3-319-50832-0_36
+548233d67f859491e50c5c343d7d77a7531d4221,https://doi.org/10.1007/s11042-007-0176-x
+5491478ae2c58af21389ed3af21babd362511a8e,http://doi.acm.org/10.1145/2949035.2949048
+54e988bc0764073a5db2955705d4bfa8365b7fa9,http://doi.acm.org/10.1145/2522848.2531749
+98856ab9dc0eab6dccde514ab50c823684f0855c,https://doi.org/10.1109/TIFS.2012.2191962
+982ede05154c1afdcf6fc623ba45186a34f4b9f2,https://doi.org/10.1109/TMM.2017.2659221
+982d4f1dee188f662a4b5616a045d69fc5c21b54,https://doi.org/10.1109/IJCNN.2016.7727859
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,https://doi.org/10.1007/978-3-319-68121-4
+988849863c3a45bcedacf8bd5beae3cc9210ce28,http://doi.ieeecomputersociety.org/10.1109/TPDS.2016.2539164
+98c5dc00bd21a39df1d4411641329bdd6928de8a,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995447
+5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,https://doi.org/10.1109/VCIP.2017.8305113
+539cb169fb65a5542c84f42efcd5d2d925e87ebb,https://doi.org/10.1109/ICB.2015.7139098
+5375a3344017d9502ebb4170325435de3da1fa16,https://doi.org/10.1007/978-3-642-37444-9
+5304cd17f9d6391bf31276e4419100f17d4423b2,https://doi.org/10.1109/ICIP.2012.6466930
+53873fe7bbd5a2d171e2b1babc9cacaad6cabe45,https://doi.org/10.1109/TCYB.2015.2417211
+534159e498e9cc61ea10917347637a59af38142d,https://doi.org/10.1016/j.neucom.2016.01.126
+53509017a25ac074b5010bb1cdba293cdf399e9b,http://doi.ieeecomputersociety.org/10.1109/AVSS.2012.41
+539f55c0e2501c1d86791c8b54b225d9b3187b9c,https://doi.org/10.1109/TIP.2017.2738560
+539ffd51f18404e1ef83371488cf5a27cd16d064,https://doi.org/10.1049/iet-ipr.2014.0733
+5305bfdff39ae74d2958ba28d42c16495ce2ff86,https://doi.org/10.1109/DICTA.2014.7008128
+3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1,https://doi.org/10.1109/IJCNN.2017.7966210
+3fa628e7cff0b1dad3f15de98f99b0fdb09df834,http://doi.ieeecomputersociety.org/10.1109/ICME.2013.6607603
+3ffbc912de7bad720c995385e1fdc439b1046148,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2008.347
+3fe3d6ff7e5320f4395571131708ecaef6ef4550,https://doi.org/10.1109/SITIS.2016.60
+3f88ea8cf2eade325b0f32832561483185db5c10,https://doi.org/10.1109/TIP.2017.2721838
+3f4711c315d156a972af37fe23642dc970a60acf,https://doi.org/10.1109/IJCNN.2008.4634393
+3ff418ac82df0b5c2f09f3571557e8a4b500a62c,https://doi.org/10.1007/s11554-007-0039-8
+3fc173805ed43602eebb7f64eea4d60c0386c612,http://doi.ieeecomputersociety.org/10.1109/CyberC.2015.94
+30cc1ddd7a9b4878cca7783a59086bdc49dc4044,https://doi.org/10.1007/s11042-015-2599-0
+30a4b4ef252cb509b58834e7c40862124c737b61,https://doi.org/10.1142/S0218001416560061
+3060ac37dec4633ef69e7bc63488548ab3511f61,https://doi.org/10.1007/s00521-018-3358-8
+30044dd951133187cb8b57e53a22cf9306fa7612,https://doi.org/10.1109/WACV.2017.52
+30188b836f2fa82209d7afbf0e4d0ee29c6b9a87,https://doi.org/10.1109/TIP.2013.2249077
+3080026f2f0846d520bd5bacb0cb2acea0ffe16b,https://doi.org/10.1109/BTAS.2017.8272690
+30cace74a7d51e9a928287e25bcefb968c49f331,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344634
+5ee0103048e1ce46e34a04c45ff2c2c31529b466,https://doi.org/10.1109/ICIP.2015.7350886
+5e8de234b20f98f467581f6666f1ed90fd2a81be,http://doi.acm.org/10.1145/2647868.2655042
+5e87f5076952cd442718d6b4addce905bae1a1a4,https://doi.org/10.1109/ICMLC.2016.7872938
+5e19d7307ea67799eb830d5ce971f893e2b8a9ca,https://doi.org/10.1007/s11063-012-9214-4
+5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4,https://doi.org/10.1109/ICIP.2016.7532567
+5ed5e534c8defd683909200c1dc31692942b7b5f,http://doi.acm.org/10.1145/2983926
+5e62b2ab6fd3886e673fd5cbee160a5bee414507,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.31
+5e806d8fa48216041fe719309534e3fa903f7b5b,https://doi.org/10.1109/BTAS.2010.5634501
+5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2,http://doi.ieeecomputersociety.org/10.1109/TVCG.2016.2641442
+5bed2453a5b0c54a4a4a294f29c9658658a9881e,https://doi.org/10.1109/TIP.2015.2451173
+5b64584d6b01e66dfd0b6025b2552db1447ccdeb,https://doi.org/10.1109/BTAS.2017.8272697
+5bfad0355cdb62b22970777d140ea388a7057d4c,https://doi.org/10.1016/j.patcog.2011.05.006
+5b4bbba68053d67d12bd3789286e8a9be88f7b9d,https://doi.org/10.1109/ICSMC.2008.4811353
+37c5e3b6175db9eaadee425dc51bc7ce05b69a4e,https://doi.org/10.1007/s00521-013-1387-x
+3769e65690e424808361e3eebfdec8ab91908aa9,http://doi.acm.org/10.1145/2647868.2655035
+37f25732397864b739714aac001ea1574d813b0d,https://doi.org/10.1016/j.ijar.2017.09.002
+373c4d6af0ee233f0d669c3955c3a3ef2a009638,https://doi.org/10.1109/APSIPA.2015.7415420
+0874734e2af06883599ed449532a015738a1e779,https://doi.org/10.1007/s10115-013-0702-2
+0821028073981f9bd2dba2ad2557b25403fe7d7d,http://doi.acm.org/10.1145/2733373.2806318
+08872d801f134e41753601e85971769b28314ca2,http://doi.acm.org/10.1145/2683483.2683560
+080ab68a898a3703feead145e2c38361ae84a0a8,https://doi.org/10.1109/TIFS.2014.2343833
+6d5f876a73799cc628e4ad2d9cfcd88091272342,https://doi.org/10.1109/TSMCC.2005.848193
+6da3ff4250103369f4a6a39c8fb982438a97525c,https://doi.org/10.1109/THMS.2015.2404913
+6dd8d8be00376ac760dc92f9c5f20520872c5355,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2417578
+6d67a7fd9a4fa99624721f37b077c71dad675805,https://doi.org/10.1007/s12193-015-0202-7
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,https://doi.org/10.1109/TNNLS.2015.2499273
+01729cb766b1016bac217a6a6cf24bbde19f56c8,https://doi.org/10.1109/CBMI.2010.5529888
+013d0acff1e5410fd9f6e15520d16f4ea02f03f6,https://doi.org/10.1109/TMM.2015.2477681
+01e14d8ffd6767336d50c2b817a7b7744903e567,http://doi.ieeecomputersociety.org/10.1109/FG.2017.128
+0133d1fe8a3138871075cd742c761a3de93a42ec,https://doi.org/10.1109/ICDSP.2015.7251932
+016194dbcd538ab5a129ef1bcff3c6e073db63f9,https://doi.org/10.1007/s10462-012-9334-2
+01f0a4e1442a7804e1fe95798eff777d08e42014,https://doi.org/10.1016/j.knosys.2017.09.005
+01e27c91c7cef926389f913d12410725e7dd35ab,https://doi.org/10.1007/s11760-017-1140-5
+067fe74aec42cb82b92cf6742c7cfb4a65f16951,http://doi.acm.org/10.1145/2601434
+06a799ad89a2a45aee685b9e892805e3e0251770,https://doi.org/10.1007/978-3-319-42147-6
+060f67c8a0de8fee9c1732b63ab40627993f93d0,https://doi.org/10.1007/978-3-642-33564-8
+06c956d4aac65752672ce4bd5a379f10a7fd6148,https://doi.org/10.1109/LSP.2017.2749763
+0629bc2b12245195af989e21573369329b7ef2b7,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2553038
+6c6f0e806e4e286f3b18b934f42c72b67030ce17,https://doi.org/10.1109/FG.2011.5771345
+6c28b3550f57262889fe101e5d027912eb39564e,https://doi.org/10.1109/LSP.2014.2338911
+6c0ad77af4c0850bd01bb118e175ecc313476f27,http://doi.acm.org/10.1145/3009977.3010026
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,http://doi.acm.org/10.1145/2708463.2709059
+39c8ed5213882d4dbc74332245ffe201882c5de1,https://doi.org/10.1109/ICASSP.2013.6638045
+396b2963f0403109d92a4d4f26205f279ea79d2c,https://doi.org/10.1109/TSMCB.2005.845399
+397022a4460750c762dbb0aaebcacc829dee8002,https://doi.org/10.1109/TIFS.2013.2258152
+39acf4bb06b889686ca17fd8c89887a3cec26554,http://www.springerlink.com/index/10.1007/s10044-004-0223-4
+39c10888a470b92b917788c57a6fd154c97b421c,https://doi.org/10.1109/VCIP.2017.8305036
+39d0de660e2116f32088ce07c3376759d0fdaff5,https://doi.org/10.1109/ICPR.2016.7900043
+39d6339a39151b5f88ec2d7acc38fe0618d71b5f,https://doi.org/10.1109/MMSP.2013.6659285
+3980dadd27933d99b2f576c3b36fe0d22ffc4746,https://doi.org/10.1109/ROBIO.2017.8324597
+3960882a7a1cd19dfb711e35a5fc1843ed9002e7,http://doi.acm.org/10.1145/2487575.2487701
+398558817e05e8de184cc4c247d4ea51ab9d4d58,https://doi.org/10.1109/ICPR.2014.14
+993934822a42e70dd35fb366693d847164ca15ff,https://doi.org/10.1109/ICME.2009.5202753
+99a1180c3d39532efecfc5fa251d6893375c91a1,https://doi.org/10.1109/ICARCV.2012.6485394
+99e0c03686f7bc9d7add6cff39a941a047c3600a,https://doi.org/10.1109/ACCESS.2017.2712788
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,https://doi.org/10.1109/ACCESS.2017.2752176
+998542e5e3882bb0ce563d390b1e1bff5460e80c,https://doi.org/10.1109/AFGR.2008.4813471
+992e4119d885f866cb715f4fbf0250449ce0db05,https://doi.org/10.1007/s00138-015-0674-1
+9989eda2f5392cfe1f789bb0f6213a46d92d1302,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477584
+997c7ebf467c579b55859315c5a7f15c1df43432,http://doi.ieeecomputersociety.org/10.1109/FG.2017.141
+993374c1c9d58a3dec28160188ff6ac1227d02f5,https://doi.org/10.1109/ICARCV.2016.7838650
+99cd84a62edb2bda2fc2fdc362a72413941f6aa4,http://doi.ieeecomputersociety.org/10.1109/FG.2017.109
+5278b7a6f1178bf5f90cd3388908925edff5ad46,https://doi.org/10.1007/s11704-015-4291-y
+520782f07474616879f94aae0d9d1fff48910254,https://doi.org/10.1016/j.neucom.2014.11.038
+5217ab9b723158b3ba2235e807d165e72fd33007,http://doi.acm.org/10.1145/2043674.2043710
+524c25217a6f1ed17f47871e947a5581d775fa56,https://doi.org/10.1117/12.2030875
+52e270ca8f5b53eabfe00a21850a17b5cc10f6d5,https://doi.org/10.1109/ROBIO.2013.6739643
+5226296884b3e151ce317a37f94827dbda0b9d16,https://doi.org/10.1109/IWBF.2016.7449690
+5213549200bccec57232fc3ff788ddf1043af7b3,http://doi.acm.org/10.1145/2601097.2601204
+526c79c6ce39882310b814b7918449d48662e2a9,https://doi.org/10.1109/ICASSP.2005.1416338
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,http://doi.acm.org/10.1145/2797143.2797165
+52b102620fff029b80b3193bec147fe6afd6f42e,http://dl.acm.org/citation.cfm?id=3028863
+5551a03353f571b552125dd4ee57301b69a10c46,https://doi.org/10.1016/j.neucom.2015.09.083
+55c46ae1154ed310610bdf5f6d9e7023d14c7eb4,http://doi.acm.org/10.1145/1027933.1028013
+55ee484f9cbd62111512485e3c1c3eadbf2e15c0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.25
+559645d2447004355c83737a19c9a811b45780f1,https://doi.org/10.1109/ICB.2015.7139114
+550351edcfd59d3666984771f5248d95548f465a,https://doi.org/10.1109/TIP.2014.2327805
+5594beb2b314f5433bd7581f64bdbc58f2933dc4,https://doi.org/10.1016/j.neucom.2016.12.013
+55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,https://doi.org/10.1109/ICB.2016.7550064
+553a605243b77a76c1ed4c1ad4f9a43ff45e391b,https://doi.org/10.1109/CISP-BMEI.2017.8302001
+557115454c1b8e6eaf8dbb65122c5b00dc713d51,https://doi.org/10.1109/LSP.2011.2140370
+55266ddbe9d5366e8cd1b0b645971cad6d12157a,https://doi.org/10.1109/SIU.2017.7960368
+556875fb04ed6043620d7ca04dfe3d8b3a9284f5,https://doi.org/10.1109/ICPR.2014.437
+9745a7f38c9bba9d2fd076813fc9ab7a128a3e19,http://doi.acm.org/10.1145/2393347.2396335
+97f3d35d3567cd3d973c4c435cdd6832461b7c3c,http://doi.ieeecomputersociety.org/10.1109/FG.2017.75
+97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0085
+9776a9f3c59907f45baaeda4b8907dcdac98aef1,https://doi.org/10.1109/CISP-BMEI.2017.8301924
+97c59db934ff85c60c460a4591106682b5ab9caa,https://doi.org/10.1109/BTAS.2012.6374568
+978b32ff990d636f7e2050bb05b8df7dfcbb42a1,https://doi.org/10.1109/BTAS.2014.6996270
+9729930ab0f9cbcd07f1105bc69c540330cda50a,https://doi.org/10.1109/ACCESS.2017.2749331
+9790ec6042fb2665c7d9369bf28566b0ce75a936,http://doi.acm.org/10.1145/3056540.3056546
+973022a1f9e30a624f5e8f7158b5bbb114f4af32,http://doi.acm.org/10.1145/3011077.3011138
+9774430006f1ed017156b17f3cf669071e398c58,https://doi.org/10.1109/SMC.2013.513
+9753ee59db115e1e84a7c045f2234a3f63f255b1,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344683
+9771e04f48d8a1d7ae262539de8924117a04c20d,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.70
+63c74794aedb40dd6b1650352a2da7a968180302,https://doi.org/10.1016/j.neucom.2016.09.015
+637b31157386efbde61505365c0720545248fbae,https://doi.org/10.1109/BTAS.2017.8272721
+6345c0062885b82ccb760c738a9ab7fdce8cd577,https://doi.org/10.1109/EMBC.2016.7590729
+635d2696aa597a278dd6563f079be06aa76a33c0,https://doi.org/10.1109/ICIP.2016.7532429
+636c786d4e4ac530ac85e3883a2f2cf469e45fe2,https://doi.org/10.1016/j.neucom.2016.12.043
+6343bc0013343b6a5f96154f02d18dcd36a3f74c,https://doi.org/10.1007/s11042-014-2083-2
+0fc5c6f06e40014a56f492172f44c073d269e95c,https://doi.org/10.1108/17563781311301490
+0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5,http://doi.ieeecomputersociety.org/10.1109/FG.2017.142
+0fee3b9191dc1cef21f54232a23530cd8169d3b2,https://doi.org/10.1109/ICDM.2016.0050
+0f2461a265be997c962fa562ae48378fb964b7b4,https://doi.org/10.1109/BigData.2016.7841028
+0f22b89341d162a7a0ebaa3c622d9731e5551064,http://doi.ieeecomputersociety.org/10.1109/AIPR.2011.6176352
+0fdc3cbf92027cb1200f3f94927bef017d7325ae,https://doi.org/10.1109/BTAS.2015.7358771
+0f29bc5d8458358d74dc8c4fd6968b4182dd71d2,https://doi.org/10.1109/ICIP.2016.7532637
+0f1cb558b32c516e2b6919fea0f97a307aaa9091,https://doi.org/10.1007/s41095-017-0091-7
+0fcf04fda0bea5265b73c85d2cc2f7f70416537b,https://doi.org/10.1109/TCSVT.2015.2409012
+0f64e26d6dd6f1c99fe2050887fac26cafe9ed60,https://doi.org/10.1109/MCI.2016.2627668
+0a4a8768c1ed419baebe1c420bd9051760875cbe,https://doi.org/10.1109/EUSIPCO.2016.7760451
+0a5b2e642683ff20b6f0cee16a32a68ba0099908,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2012.6239342
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183
+0a0b9a9ff827065e4ff11022b0e417ddf1d3734e,http://dl.acm.org/citation.cfm?id=2935856
+0a451fc7d2c6b3509d213c210ae880645edf90ed,https://doi.org/10.1109/IJCNN.2014.6889591
+0abfb5b89e9546f8a5c569ab35b39b888e7cea46,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.68
+0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e,https://doi.org/10.1109/SCIS-ISIS.2016.0166
+642417f2bb1ff98989e0a0aa855253fed1fffe04,https://doi.org/10.1117/12.2004255
+6440d6c7081efe4538a1c75e93144f3d142feb41,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.280
+6486b36c6f7fd7675257d26e896223a02a1881d9,https://doi.org/10.1109/THMS.2014.2376874
+647b2e162e9c476728172f62463a8547d245cde3,https://doi.org/10.1109/ICPR.2016.7899898
+64e216c128164f56bc91a33c18ab461647384869,http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738017
+6489ad111fee8224b34f99d1bcfb5122786508cd,https://doi.org/10.1109/ICIP.2014.7025280
+64a08beb073f62d2ce44e25c4f887de9208625a4,https://doi.org/10.1080/09540090701725557
+64e82b42e1c41250bdf9eb952686631287cfd410,https://doi.org/10.1111/cgf.12760
+64b9ad39d115f3e375bde4f70fb8fdef5d681df8,https://doi.org/10.1109/ICB.2016.7550088
+64fd48fae4d859583c4a031b51ce76ecb5de614c,https://doi.org/10.1109/ICARCV.2008.4795556
+64ba203c8cfc631d5f3f20419880523155fbeeb2,http://doi.acm.org/10.1145/3009977.3010008
+90ddf1aabf1c73b5fc45254a2de46e53a0bde857,https://doi.org/10.1109/ROBIO.2015.7418917
+907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224,https://doi.org/10.1016/j.patcog.2014.07.010
+9048732c8591a92a1f4f589b520a733f07578f80,https://doi.org/10.1109/CISP-BMEI.2017.8301921
+9055b155cbabdce3b98e16e5ac9c0edf00f9552f,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78
+902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb,https://doi.org/10.1016/j.patcog.2016.03.002
+90eb66e75381cce7146b3953a2ae479a7beec539,http://doi.ieeecomputersociety.org/10.1109/AIPR.2015.7444542
+90ae02da16b750a9fd43f8a38440f848309c2fe0,https://doi.org/10.1007/s10044-015-0499-6
+9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f,http://doi.ieeecomputersociety.org/10.1109/CGIV.2011.28
+90c4a6c6f790dbcef9a29c9a755458be09e319b6,http://doi.acm.org/10.1145/2964284.2967242
+9026eb610916ec4ce77f0d7d543b7c2482ba4173,https://doi.org/10.1016/j.patrec.2012.03.006
+90c4deaa538da42b9b044d7b68c3692cced66036,http://doi.ieeecomputersociety.org/10.1109/SITIS.2007.89
+bf30477f4bd70a585588528355b7418d2f37953e,https://doi.org/10.1109/ICPR.2016.7900280
+bf1e0545785b05b47caa3ffe7d16982769986f38,https://doi.org/10.1016/j.asoc.2010.12.002
+bf0836e5c10add0b13005990ba019a9c4b744b06,https://doi.org/10.1109/TCE.2009.5373791
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,http://doi.acm.org/10.1145/2783258.2783280
+bf00071a7c4c559022272ca5d39e07f727ebb479,https://doi.org/10.1109/MMSP.2016.7813388
+bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317,https://doi.org/10.1007/978-3-319-69900-4_70
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,https://doi.org/10.1007/s11042-015-2665-7
+d37ca68742b2999667faf464f78d2fbf81e0cb07,https://doi.org/10.1007/978-3-319-25417-3_76
+d3a3d15a32644beffaac4322b9f165ed51cfd99b,https://doi.org/10.1109/SIU.2016.7496197
+d42dbc995318e2936714c65c028700bfd3633049,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592
+d4331a8dd47b03433f8390da2eaa618751861c64,https://doi.org/10.1109/TIP.2012.2192125
+d4353952a408e1eae8c27a45cc358976d38dde00,https://doi.org/10.1007/s00138-014-0594-5
+d4ccc4f18a824af08649657660e60b67c6868d9c,https://doi.org/10.1142/S021800141655020X
+d40c16285d762f7a1c862b8ac05a0fdb24af1202,https://doi.org/10.1109/BESC.2017.8256378
+d4ec62efcc631fa720dfaa1cbc5692b39e649008,https://doi.org/10.1109/ICDM.2016.0026
+d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4,https://doi.org/10.1109/TSMCB.2009.2032155
+d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3,https://doi.org/10.23919/MVA.2017.7986886
+bab2f4949a38a712a78aafbc0a3c392227c65f56,https://doi.org/10.1109/CISP-BMEI.2017.8302191
+ba30cc9d8bac724dafc0aea247159cc7e7105784,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019360
+ba931c3f90dd40a5db4301a8f0c71779a23043d6,https://doi.org/10.1109/ICPR.2014.136
+a07f78124f83eef1ed3a6f54ba982664ae7ca82a,http://ieeexplore.ieee.org/document/6460481/
+a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b,https://doi.org/10.1109/ICIP.2017.8296805
+a094e52771baabe4ab37ef7853f9a4f534227457,https://doi.org/10.1109/TITS.2016.2551298
+a0f6196d27a39cde2dbf62c08d89cbe489600bb0,https://doi.org/10.1016/j.cose.2016.03.007
+a006cd95c14de399706c5709b86ac17fce93fcba,https://doi.org/10.1109/ICPR.2014.343
+a7c066e636b8953481b4a8d8ff25a43a96dd348f,https://doi.org/10.1109/ATSIP.2017.8075517
+a76e57c1b2e385b68ffdf7609802d71244804c1d,https://doi.org/10.1016/j.patrec.2016.05.027
+a7da7e5a6a4b53bf8736c470ff8381a654e8c965,https://doi.org/10.1007/s13042-011-0045-9
+a7a3ec1128f920066c25cb86fbc33445ce613919,https://doi.org/10.1109/VCIP.2017.8305115
+a71bd4b94f67a71bc5c3563884bb9d12134ee46a,https://doi.org/10.1016/j.asoc.2015.05.006
+a735c6330430c0ff0752d117c54281b1396b16bf,https://doi.org/10.1109/SMC.2014.6974118
+a73405038fdc0d8bf986539ef755a80ebd341e97,https://doi.org/10.1109/TIP.2017.2698918
+a713a01971e73d0c3118d0409dc7699a24f521d6,https://doi.org/10.1109/SSCI.2017.8285381
+a7f188a7161b6605d58e48b2537c18a69bd2446f,https://doi.org/10.1109/PIMRC.2011.6139898
+a76969df111f9ee9f0b898b51ad23a721d289bdc,https://doi.org/10.1109/ICMLA.2015.185
+a75de488eaacb1dafffbe667465390f101498aaf,http://doi.ieeecomputersociety.org/10.1109/FG.2017.47
+b839bc95794dc65340b6e5fea098fa6e6ea5e430,https://doi.org/10.1109/WACVW.2017.8
+b8e5800dfc590f82a0f7eedefce9abebf8088d12,https://doi.org/10.1109/DCC.2017.87
+b86c49c6e3117ea116ec2d8174fa957f83502e89,https://doi.org/10.1109/CIT/IUCC/DASC/PICOM.2015.149
+b85d0aef3ee2883daca2835a469f5756917e76b7,https://doi.org/10.1007/s41095-015-0015-3
+b856d8d6bff745bb1b4beb67e4b821fc20073840,https://doi.org/10.1109/ICMLC.2016.7872935
+b84dde74dddf6a3281a0b22c68999942d2722919,http://dl.acm.org/citation.cfm?id=2910703
+b8a16fcb65a8cee8dd32310a03fe36b5dff9266a,https://doi.org/10.1109/SIU.2014.6830473
+b8b9cef0938975c5b640b7ada4e3dea6c06d64e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.119
+b85d953de16eecaecccaa8fad4081bd6abda9b1b,https://doi.org/10.1016/j.neuroimage.2015.12.020
+b84f164dbccb16da75a61323adaca730f528edde,https://doi.org/10.1109/TIP.2013.2237914
+b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,http://doi.acm.org/10.1145/2911996.2911999
+b8978a5251b6e341a1171e4fd9177aec1432dd3a,https://doi.org/10.1016/j.image.2016.04.004
+b8f64a94f536b46ef34a0223272e02f9be785ef9,https://doi.org/10.1109/EMBC.2012.6346590
+b1891010a0722117c57e98809e1f2b26cd8e9ee3,http://doi.acm.org/10.1145/2330784.2331026
+b1efefcc9a5d30be90776571a6cc0071f3679753,https://doi.org/10.1109/ROBIO.2016.7866471
+b1bb517bd87a1212174033fc786b2237844b04e6,https://doi.org/10.1016/j.neucom.2015.03.078
+b1534888673e6119f324082246016d28eba249aa,https://doi.org/10.1109/MMSP.2017.8122229
+b13b101b6197048710e82f044ad2eda6b93affd8,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.91
+ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,https://doi.org/10.1109/ICDSP.2016.7868598
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344664
+dd8a851f2a0c63bb97e33aaff1841695f601c863,https://doi.org/10.1109/BTAS.2014.6996260
+ddd9d7cb809589b701fba9f326d7cf998a63b14f,http://doi.acm.org/10.1145/2647868.2654992
+ddf577e8b7c86b1122c1bc90cba79f641d2b33fa,http://doi.acm.org/10.1145/3013971.3014026
+dd715a98dab34437ad05758b20cc640c2cdc5715,https://doi.org/10.1007/s41095-017-0082-8
+dcb50e1f439d1f9b14ae85866f4542e51b830a07,https://doi.org/10.1109/FSKD.2012.6234354
+dcea30602c4e0b7525a1bf4088620128d4cbb800,https://doi.org/10.1109/VCIP.2013.6706430
+dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6,https://doi.org/10.1109/TCYB.2014.2311033
+dc2f16f967eac710cb9b7553093e9c977e5b761d,https://doi.org/10.1109/ICPR.2016.7900141
+dc84d3f29c52e6d296b5d457962c02074aa75d0f,https://doi.org/10.1109/TIP.2016.2580939
+dca2bb023b076de1ccd0c6b8d71faeb3fccb3978,http://doi.acm.org/10.1145/3152118
+b69e7e2a7705a58a0e3f1b80ae542907b89ce02e,https://doi.org/10.1007/s11042-015-2614-5
+b6259115b819424de53bb92f64cc459dcb649f31,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466
+b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef,https://doi.org/10.1109/ICACCI.2015.7275752
+b6ac33d2c470077fa8dcbfe9b113beccfbd739f8,http://doi.acm.org/10.1145/2509896.2509905
+b65b51c796ed667c4c7914bf12b1926fd6bbaa0c,https://doi.org/10.1016/j.neuroimage.2013.05.108
+b6a23f72007cb40223d7e1e1cc47e466716de945,https://doi.org/10.1109/CVPRW.2010.5544598
+b6c00e51590c48a48fae51385b3534c4d282f76c,https://doi.org/10.1109/TIFS.2015.2427778
+b631f3c212aab45d73ddc119f1f7d00c3c502a72,https://doi.org/10.1109/TIFS.2009.2035976
+b63b6ed78b39166d87d4c56f8890873aa65976a2,https://doi.org/10.1109/ICRA.2011.5979953
+a92e24c8c53e31fc444a13bd75b434b7207c58f1,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2317711
+a9756ca629f73dc8f84ee97cfa8b34b8207392dc,https://doi.org/10.1109/ICIP.2017.8296542
+a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134,https://doi.org/10.1142/S0218001405004071
+a9af0dc1e7a724464d4b9d174c9cf2441e34d487,https://doi.org/10.1142/S0219691316500351
+a9506c60ec48056087ee3e10d28ff7774fbbd553,https://doi.org/10.1109/TCSVT.2014.2376136
+a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4,https://doi.org/10.1049/iet-ipr.2016.1074
+a9426cb98c8aedf79ea19839643a7cf1e435aeaa,https://doi.org/10.1109/GlobalSIP.2016.7905998
+d5f8827fc7d66643bf018d5636e81ed41026b61a,http://doi.ieeecomputersociety.org/10.1109/FG.2017.36
+d569c3e62f471aa75ed53e631ec05c1a3d594595,https://doi.org/10.1109/NNSP.2002.1030072
+d5b445c5716952be02172ca4d40c44f4f04067fa,https://doi.org/10.1109/ICICS.2011.6173537
+d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584,https://doi.org/10.1109/CVPRW.2010.5543816
+d5c66a48bc0a324750db3d295803f47f6060043d,http://doi.ieeecomputersociety.org/10.1109/AVSS.2006.109
+d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc,http://doi.ieeecomputersociety.org/10.1109/FG.2017.101
+d5dc78eae7a3cb5c953c89376e06531d39b34836,https://doi.org/10.1007/s00521-009-0242-6
+d2d9612d3d67582d0cd7c1833599b88d84288fab,https://doi.org/10.1049/iet-cvi.2015.0222
+d2a415365f997c8fe2dbdd4e06ceab2e654172f6,http://doi.acm.org/10.1145/2425333.2425361
+d2bad850d30973a61b1a7d7dc582241a41e5c326,http://doi.ieeecomputersociety.org/10.1109/ICICIC.2006.12
+d2baa43471d959075fc4c93485643cbd009797fd,http://doi.ieeecomputersociety.org/10.1109/MM.2017.4241350
+d2598c088b0664c084413796f39697c6f821d56e,https://doi.org/10.1109/VCIP.2016.7805451
+d2fac640086ba89271ad7c1ebf36239ecd64605e,http://ieeexplore.ieee.org/document/6460449/
+d2b3166b8a6a3e6e7bc116257e718e4fe94a0638,https://doi.org/10.1007/s00521-010-0411-7
+aa7c72f874951ff7ca3769439f2f39b7cfd4b202,https://doi.org/10.1109/JPROC.2009.2032355
+aaf2436bc63a58d18192b71cc8100768e2f8a6cb,http://doi.ieeecomputersociety.org/10.1109/ICDIP.2009.77
+aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.80
+aa892fe17c06e2b18db2b12314499a741e755df7,https://doi.org/10.1109/IJCNN.2017.7966089
+aab9a617be6e5507beb457b1e6c2e5b046f9cff0,https://doi.org/10.1109/ICIP.2008.4712153
+aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,http://doi.acm.org/10.1145/3129416.3129451
+aad7b12936e0ced60bc0be95e8670b60b5d5ce20,https://doi.org/10.1109/URAI.2013.6677383
+aa90a466a2ff7781c36e7da7df0013aa5b117510,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.159
+aa8341cb5d8f0b95f619d9949131ed5c896d6470,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2007.403
+aaec8141d57d29aa3cedf1baec9633180ddb7a3d,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552916
+aae31f092fadd09a843e1ca62af52dc15fc33c56,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273609
+affa61d044daa1a7d43a6803a743eab47c89c45d,https://doi.org/10.1109/TNNLS.2015.2405574
+afba76d0fe40e1be381182aec822431e20de8153,https://doi.org/10.1007/s00521-014-1768-9
+af12a79892bd030c19dfea392f7a7ccb0e7ebb72,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972
+afdbbc5c84eb4e535c7c478b5227c0138b57af64,http://doi.ieeecomputersociety.org/10.1109/TMC.2016.2593919
+af2d30fdb8c611dc5b883b90311d873e336fc534,https://doi.org/10.1109/ISCAS.2017.8050275
+af3e6e20de06b03c33f8e85eced74c2d096730ea,https://doi.org/10.1109/CISP-BMEI.2017.8301972
+af7553d833886663550ce83b087a592a04b36419,https://doi.org/10.1109/TIFS.2015.2390138
+af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2,https://doi.org/10.1007/s00521-014-1569-1
+af97e792827438ddea1d5900960571939fc0533e,https://doi.org/10.1109/ICSMC.2005.1571460
+af97a51f56cd6b793cf96692931a8d1ddbe4e3cc,https://doi.org/10.1109/ICPR.2014.57
+b749ca71c60904d7dad6fc8fa142bf81f6e56a62,https://doi.org/10.1109/TIP.2013.2292560
+b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0,https://doi.org/10.1109/SERA.2017.7965717
+b7845e0b0ce17cde7db37d5524ef2a61dee3e540,https://doi.org/10.1109/ICPR.2016.7899608
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,https://doi.org/10.1007/s10278-015-9808-2
+b7b8e7813fbc12849f2daba5cab604abd8cbaab6,https://doi.org/10.1109/ICCE.2014.6775938
+b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41,https://doi.org/10.1109/IJCNN.2016.7727203
+b784bb1d2b2720dac8d4b92851a8d6360c35b0b2,https://doi.org/10.1109/ICDM.2016.0041
+b728e7db6e5559a77dc59381bfb8df96d482a721,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.28
+b7fa06b76f4b9263567875b2988fb7bbc753e69f,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469282
+b7043048b4ba748c9c6317b6d8206192c34f57ff,https://doi.org/10.1109/ICIP.2016.7533061
+db3984b143c59584a32d762d712d21c0e8cf38b8,https://doi.org/10.1109/SMC.2015.324
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,https://doi.org/10.1007/s00521-010-0519-9
+dbf2d2ca28582031be6d16519ab887248f5e8ad8,https://doi.org/10.1109/TMM.2015.2410135
+dbfe62c02b544b48354fac741d90eb4edf815db5,https://doi.org/10.1109/SITIS.2016.43
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,http://doi.acm.org/10.1145/1386352.1386401
+a8faeef97e2a00eddfb17a44d4892c179a7cc277,https://doi.org/10.1109/FG.2011.5771459
+a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825,http://doi.ieeecomputersociety.org/10.1109/CSIE.2009.808
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,https://doi.org/10.1007/s11760-015-0808-y
+a8f1fc34089c4f2bc618a122be71c25813cae354,https://doi.org/10.1142/S0219467816500194
+de048065ea2c5b3e306e2c963533df055e7dfcaa,https://doi.org/10.1109/LSP.2016.2598878
+ded8252fc6df715753e75ba7b7fee518361266ef,https://doi.org/10.1109/SIU.2012.6204837
+de79437f74e8e3b266afc664decf4e6e4bdf34d7,https://doi.org/10.1109/IVCNZ.2016.7804415
+de8657e9eab0296ac062c60a6e10339ccf173ec1,http://doi.ieeecomputersociety.org/10.1109/BRACIS.2014.51
+dea409847d52bb0ad54bf586cb0482a29a584a7e,http://doi.ieeecomputersociety.org/10.1109/ISM.2009.115
+de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6,https://doi.org/10.1002/cpe.3850
+dec5b11b01f35f72adb41d2be26b9b95870c5c00,http://ieeexplore.ieee.org/document/7071948/
+deb89950939ae9847f0a1a4bb198e6dbfed62778,https://doi.org/10.1109/LSP.2016.2543019
+de878384f00b6ce1caa66ac01735fb4b63ad0279,https://doi.org/10.1049/iet-ipr.2014.0670
+defd44b02a1532f47bdd8c8f2375e3df64ac5d79,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.139
+b05943b05ef45e8ea8278e8f0870f23db5c83b23,https://doi.org/10.1109/ROBIO.2010.5723349
+b084ad222c1fc9409d355d8e54ac3d1e86f2ca18,https://doi.org/10.1016/j.neucom.2017.04.001
+b0358af78b7c5ee7adc883ef513bbcc84a18a02b,https://doi.org/10.1109/WACV.2017.10
+b0f59b71f86f18495b9f4de7c5dbbebed4ae1607,https://doi.org/10.1016/j.neucom.2015.04.085
+a63ec22e84106685c15c869aeb157aa48259e855,https://doi.org/10.1142/S0219691312500294
+a6e75b4ccc793a58ef0f6dbe990633f7658c7241,https://doi.org/10.1016/j.cviu.2016.10.007
+a62997208fec1b2fbca6557198eb7bc9340b2409,https://doi.org/10.1109/HPCC.and.EUC.2013.241
+a6ab23f67d85da26592055c0eac4c34f05c26519,http://doi.ieeecomputersociety.org/10.1109/ICTAI.2006.15
+a6793de9a01afe47ffbb516cc32f66625f313231,http://doi.acm.org/10.1145/2939672.2939853
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,https://doi.org/10.1109/TSP.2011.2179539
+b934f730a81c071dbfc08eb4c360d6fca2daa08f,http://doi.ieeecomputersociety.org/10.1109/ICME.2015.7177496
+b98e7a8f605c21e25ac5e32bfb1851a01f30081b,http://doi.acm.org/10.1145/2393347.2396303
+b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,http://doi.acm.org/10.1145/2733373.2807962
+b972683d702a65d3ee7a25bc931a5890d1072b6b,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035
+b910590a0eb191d03e1aedb3d55c905129e92e6b,http://doi.acm.org/10.1145/2808492.2808570
+a180dc9766490416246e7fbafadca14a3c500a46,https://doi.org/10.1016/S0167-8655(03)00112-0
+a100595c66f84c3ddd3da8d362a53f7a82f6e3eb,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.46
+a1cda8e30ce35445e4f51b47ab65b775f75c9f18,https://doi.org/10.1109/ISBA.2018.8311462
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,https://doi.org/10.1109/SMC.2016.7844934
+a1cecbb759c266133084d98747d022c1e638340d,http://doi.acm.org/10.1145/2670473.2670501
+a11ce3c9b78bf3f868b1467b620219ff651fe125,http://doi.acm.org/10.1145/2911996.2912073
+a192845a7695bdb372cccf008e6590a14ed82761,https://doi.org/10.1109/TIP.2014.2321495
+a119844792fd9157dec87e3937685c8319cac62f,https://doi.org/10.1109/APSIPA.2015.7415395
+ef7b8f73e95faa7a747e0b04363fced0a38d33b0,https://doi.org/10.1109/ICIP.2017.8297028
+ef35c30529df914a6975af62aca1b9428f678e9f,https://doi.org/10.1007/s00138-016-0817-z
+ef3a0b454370991a9c18ac7bfd228cf15ad53da0,https://doi.org/10.1109/ICNC.2010.5582886
+c3c463a9ee464bb610423b7203300a83a166b500,https://doi.org/10.1109/ICIP.2014.7025069
+c3390711f5ce6f5f0728ef88c54148bf9d8783a2,https://doi.org/10.1016/j.engappai.2015.03.016
+c3e53788370341afe426f2216bed452cbbdaf117,http://doi.ieeecomputersociety.org/10.1109/ATNAC.2017.8215436
+c3a53b308c7a75c66759cbfdf52359d9be4f552b,http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16
+c36f3cabeddce0263c944e9fe4afd510b5bae816,https://doi.org/10.1109/DICTA.2017.8227399
+c4b00e86841db3fced2a5d8ac65f80d0d3bbe352,http://doi.ieeecomputersociety.org/10.1109/AIPR.2004.4
+c41a3c31972cf0c1be6b6895f3bf97181773fcfb,https://doi.org/10.1109/ICPR.2014.103
+c4ca092972abb74ee1c20b7cae6e69c654479e2c,https://doi.org/10.1109/ICIP.2016.7532960
+c444c4dab97dd6d6696f56c1cacda051dde60448,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37
+c459014131cbcd85f5bd5c0a89115b5cc1512be9,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.23
+c49075ead6eb07ede5ada4fe372899bd0cfb83ac,https://doi.org/10.1109/ICSPCS.2015.7391782
+c4541802086461420afb1ecb5bb8ccd5962a9f02,https://doi.org/10.1109/TSMCB.2009.2029076
+c4d439fe07a65b735d0c8604bd5fdaea13f6b072,http://doi.acm.org/10.1145/2671188.2749294
+c4d0d09115a0df856cdb389fbccb20f62b07b14e,https://doi.org/10.1109/ICIP.2012.6466925
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,https://doi.org/10.1109/ICIP.2017.8296549
+ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff,http://doi.ieeecomputersociety.org/10.1109/FG.2017.113
+ea8d217231d4380071132ce37bf997164b60ec44,https://doi.org/10.1109/SIU.2016.7496031
+ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b,https://doi.org/10.1109/FRUCT-ISPIT.2016.7561522
+ea026456729f0ec54c697198e1fd089310de4ae2,https://doi.org/10.1109/CIBIM.2013.6607917
+ea86b75427f845f04e96bdaadfc0d67b3f460005,https://doi.org/10.1109/ICIP.2016.7532686
+ea5c9d5438cde6d907431c28c2f1f35e02b64b33,https://doi.org/10.1109/SPAC.2017.8304257
+e12b2c468850acb456b0097d5535fc6a0d34efe3,https://doi.org/10.1016/j.neucom.2011.03.009
+e1c50cf0c08d70ff90cf515894b2b360b2bc788b,https://doi.org/10.1109/ICSMC.2007.4414085
+e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111,https://doi.org/10.1117/1.JEI.24.5.053028
+e111624fb4c5dc60b9e8223abfbf7c4196d34b21,http://doi.ieeecomputersociety.org/10.1109/BIBM.2016.7822814
+e101bab97bce2733222db9cfbb92a82779966508,https://doi.org/10.1109/TCYB.2016.2549639
+e14b046a564604508ea8e3369e7e9f612e148511,https://doi.org/10.1007/978-3-642-17829-0_4
+e198a7b9e61dd19c620e454aaa81ae8f7377ade0,https://doi.org/10.1109/CVPRW.2010.5543611
+e1449be4951ba7519945cd1ad50656c3516113da,https://doi.org/10.1109/TCSVT.2016.2603535
+cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2581168
+cde7901c0945683d0c677b1bb415786e4f6081e6,http://doi.ieeecomputersociety.org/10.1109/IRI.2015.44
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,https://doi.org/10.1007/s11265-010-0541-2
+cd33b3ca8d7f00c1738c41b2071a3164ba42ea61,https://doi.org/10.1142/S0218213008003832
+cdf0dc4e06d56259f6c621741b1ada5c88963c6d,https://doi.org/10.1109/ICIP.2014.7025061
+cd85f71907f1c27349947690b48bfb84e44a3db0,https://doi.org/10.1007/978-981-10-4840-1
+cdfa7dccbc9e9d466f8a5847004973a33c7fcc89,https://doi.org/10.1109/TIFS.2013.2263498
+cd3b713722ccb1e2ae3b050837ca296b2a2dd82a,https://doi.org/10.1016/j.jvcir.2016.07.015
+cd74d606e76ecddee75279679d9770cdc0b49861,https://doi.org/10.1109/TIP.2014.2365725
+cc1b093cfb97475faabab414878fa7e4a2d97cd7,http://doi.ieeecomputersociety.org/10.1109/ICALT.2017.141
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,https://doi.org/10.1007/s10044-014-0388-4
+cc7c63473c5bef5ae09f26b2258691d9ffdd5f93,https://doi.org/10.1109/ICMLA.2012.17
+cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.87
+ccb95192001b07bb25fc924587f9682b0df3de8e,https://doi.org/10.1109/ICACCI.2016.7732123
+cc70fb1ab585378c79a2ab94776723e597afe379,https://doi.org/10.1109/ICIP.2017.8297067
+cc6d3ccc9e3dd0a43313a714316c8783cd879572,https://doi.org/10.1109/ICIP.2017.8296802
+cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66,https://doi.org/10.1142/s0218213015400199
+cc5edaa1b0e91bc3577547fc30ea094aa2722bf0,https://doi.org/10.1109/CICARE.2014.7007832
+cce2f036d0c5f47c25e459b2f2c49fa992595654,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.93
+cca476114c48871d05537abb303061de5ab010d6,https://doi.org/10.15439/2016F472
+cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,https://doi.org/10.1109/SIU.2016.7495874
+ccebd3bf069f5c73ea2ccc5791976f894bc6023d,https://doi.org/10.1109/ICPR.2016.7900186
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,https://doi.org/10.1109/RIISS.2014.7009163
+ccfebdf7917cb50b5fcd56fb837f841a2246a149,https://doi.org/10.1109/ICIP.2015.7351065
+e6f3707a75d760c8590292b54bc8a48582da2cd4,https://doi.org/10.1007/s11760-012-0410-5
+e6c491fb6a57c9a7c2d71522a1a066be2e681c84,https://doi.org/10.1016/j.imavis.2016.06.002
+e6d46d923f201da644ae8d8bd04721dd9ac0e73d,https://doi.org/10.1109/ISBA.2016.7477226
+e6c834c816b5366875cf3060ccc20e16f19a9fc6,https://doi.org/10.1109/BTAS.2016.7791185
+e66a6ae542907d6a0ebc45da60a62d3eecf17839,https://doi.org/10.1109/EUVIP.2014.7018366
+e66b4aa85524f493dafde8c75176ac0afad5b79c,https://doi.org/10.1109/SSCI.2017.8285219
+e6d6d1b0a8b414160f67142fc18e1321fe3f1c49,https://doi.org/10.1109/FSKD.2015.7382037
+e69a765d033ef6ea55c57ca41c146b27964c5cf2,https://doi.org/10.1109/ISCAS.2017.8050764
+f9fb7979af4233c2dd14813da94ec7c38ce9232a,http://doi.acm.org/10.1145/3131902
+f9752fd07b14505d0438bc3e14b23d7f0fe7f48b,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2009.114
+f95321f4348cfacc52084aae2a19127d74426047,https://doi.org/10.1109/ICMLC.2013.6890897
+f925879459848a3eeb0035fe206c4645e3f20d42,http://doi.acm.org/10.1145/3025453.3025472
+f0dac9a55443aa39fd9832bdff202a579b835e88,https://doi.org/10.1109/JSTSP.2016.2543681
+f0a9d69028edd1a39147848ad1116ca308d7491e,https://doi.org/10.1007/11573548_11
+f09d5b6433f63d7403df5650893b78cdcf7319b3,https://doi.org/10.1109/AFGR.2008.4813384
+f0b4f5104571020206b2d5e606c4d70f496983f9,https://doi.org/10.1109/FUZZ-IEEE.2014.6891674
+f7911b9ff58d07d19c68f4a30f40621f63c0f385,http://dl.acm.org/citation.cfm?id=3007693
+f762afd65f3b680330e390f88d4cc39485345a01,http://doi.ieeecomputersociety.org/10.1109/ACIIW.2017.8272606
+f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93,https://doi.org/10.1109/TCSVT.2007.903317
+f73174cfcc5c329b63f19fffdd706e1df4cc9e20,http://doi.ieeecomputersociety.org/10.1109/FIT.2015.13
+f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7,https://doi.org/10.1109/ICIP.2012.6467434
+f79e4ba09402adab54d2efadd1c4bfe4e20c5da5,https://doi.org/10.1109/ICIP.2017.8296364
+e83e5960c2aabab654e1545eb419ef64c25800d5,https://doi.org/10.1016/j.neunet.2016.08.011
+e8951cc76af80da43e3528fe6d984071f17f57e7,https://doi.org/10.1109/WACVW.2017.9
+e8c051d9e7eb8891b23cde6cbfad203011318a4f,http://doi.acm.org/10.1145/3013971.3014015
+e88988f4696e7e2925ed96467fde4314bfa95eff,https://doi.org/10.1016/j.neucom.2015.01.076
+e82a0976db908e6f074b926f58223ac685533c65,https://doi.org/10.1007/s11042-015-2848-2
+e865908ed5e5d7469b412b081ca8abd738c72121,https://doi.org/10.1109/TIP.2016.2621667
+e8c6853135856515fc88fff7c55737a292b0a15b,http://doi.ieeecomputersociety.org/10.1109/FG.2017.46
+fa54ab106c7f6dbd3c004cea4ef74ea580cf50bf,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.18
+faf19885431cb39360158982c3a1127f6090a1f6,https://doi.org/10.1109/BTAS.2015.7358768
+fa72e39971855dff6beb8174b5fa654e0ab7d324,https://doi.org/10.1007/s11042-013-1793-1
+faa46ef96493b04694555738100d9f983915cf9b,https://doi.org/10.1007/s10489-015-0735-1
+fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,https://doi.org/10.1109/TIM.2015.2415012
+fadbb3a447d697d52771e237173b80782caaa936,https://doi.org/10.1007/s00530-012-0290-0
+fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7,https://doi.org/10.1109/FSKD.2016.7603418
+fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4,https://doi.org/10.1109/TIP.2015.2421437
+ff82825a04a654ca70e6d460c8d88080ee4a7fcc,http://doi.acm.org/10.1145/2683483.2683533
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,https://doi.org/10.1007/s11760-016-0882-9
+fff31548617f208cd5ae5c32917afd48abc4ff6a,http://doi.acm.org/10.1145/3139295.3139309
+ff3859917d4121f47de0d46922a103c78514fcab,https://doi.org/10.1109/ICB.2016.7550050
+ff402bd06c9c4e94aa47ad80ccc4455efa869af3,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334120
+ff42ec628b0980909bbb84225d0c4f8d9ac51e03,https://doi.org/10.1109/TCSVT.2008.2005799
+ffea4184a0b24807b5f4ed87f9a985c2a27027d9,https://doi.org/10.1007/s00530-012-0297-6
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,http://doi.acm.org/10.1145/3078971.3079026
+ffea2b26e422c1009afa7e200a43b31a1fae86a9,https://doi.org/10.1007/s00500-009-0441-1
+ffb1cb0f9fd65247f02c92cfcb152590a5d68741,https://doi.org/10.1109/CISS.2012.6310782
+ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee,http://doi.acm.org/10.1145/2903220.2903255
+ff0617d750fa49416514c1363824b8f61baf8fb5,https://doi.org/10.1587/elex.7.1125
+c570d1247e337f91e555c3be0e8c8a5aba539d9f,https://doi.org/10.1007/s11042-012-1352-1
+c586463b8dbedce2bfce3ee90517085a9d9e2e13,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2006.9
+c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,https://doi.org/10.1109/BTAS.2017.8272773
+c5022fbeb65b70f6fe11694575b8ad1b53412a0d,https://doi.org/10.1109/ICIP.2005.1530209
+c5c56e9c884ac4070880ac481909bb6b621d2a3f,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126466
+c553f0334fcadf43607925733685adef81fbe406,https://doi.org/10.1109/ICSIPA.2017.8120636
+c58ece1a3fa23608f022e424ec5a93cddda31308,https://doi.org/10.1109/JSYST.2014.2325957
+c59a9151cef054984607b7253ef189c12122a625,https://doi.org/10.1007/s00138-016-0791-5
+c59b62864a6d86eead075c88137a87070a984550,https://doi.org/10.1109/IVCNZ.2015.7761546
+c5437496932dcb9d33519a120821da755951e1a9,http://doi.acm.org/10.1145/2487575.2487604
+c2b10909a0dd068b8e377a55b0a1827c8319118a,https://doi.org/10.1109/TCYB.2016.2565898
+c270aff2b066ee354b4fe7e958a40a37f7bfca45,https://doi.org/10.1109/WCSP.2017.8170910
+c252bc84356ed69ccf53507752135b6e98de8db4,https://doi.org/10.1016/j.neucom.2015.02.067
+c291f0e29871c8b9509d1a2876c3e305839ad4ac,https://doi.org/10.1109/ICARCV.2014.7064432
+c244c3c797574048d6931b6714ebac64d820dbb3,http://doi.acm.org/10.1145/2808492.2808500
+c222f8079c246ead285894c47bdbb2dfc7741044,https://doi.org/10.1109/ICIP.2015.7351631
+c2be82ed0db509087b08423c8cf39ab3c36549c3,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019363
+c23bd1917badd27093c8284bd324332b8c45bfcf,https://doi.org/10.1109/IJCNN.2010.5596316
+c2474202d56bb80663e7bece5924245978425fc1,https://doi.org/10.1109/ICIP.2016.7532771
+c2422c975d9f9b62fbb19738e5ce5e818a6e1752,https://doi.org/10.1109/TNNLS.2015.2481006
+c2dc29e0db76122dfed075c3b9ee48503b027809,https://doi.org/10.1109/ICIP.2016.7532632
+f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2569436
+f6311d6b3f4d3bd192d866d2e898c30eea37d7d5,http://ieeexplore.ieee.org/document/6460511/
+f63b3b8388bc4dcd4a0330402af37a59ce37e4f3,https://doi.org/10.1109/SIU.2013.6531214
+f6ebfa0cb3865c316f9072ded26725fd9881e73e,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.109
+f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,https://doi.org/10.1109/TMM.2015.2476657
+f6511d8156058737ec5354c66ef6fdcf035d714d,http://doi.ieeecomputersociety.org/10.1109/BWCCA.2014.115
+f652cb159a2cf2745aabcbf6a7beed4415e79e34,http://doi.acm.org/10.1145/1460096.1460119
+f6dabb4d91bf7389f3af219d486d4e67cec18c17,https://doi.org/10.1016/j.compeleceng.2014.08.010
+e95895262f66f7c5e47dd46a70110d89c3b4c203,https://doi.org/10.1016/j.neucom.2016.09.023
+e957d0673af7454dbf0a14813201b0e2570577e9,https://doi.org/10.1109/ICPR.2016.7899699
+e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f,http://doi.acm.org/10.1145/2791405.2791505
+e9cebf627c204c6949dcc077d04c57eb66b2c038,https://doi.org/10.1109/SIU.2013.6531371
+e9b731f00d16a10a31ceea446b2baa38719a31f1,https://doi.org/10.1109/ICSMC.2012.6378271
+e9d1b3767c06c896f89690deea7a95401ae4582b,https://doi.org/10.1109/VCIP.2016.7805565
+e9d77a85bc2fa672cc1bd10258c896c8d89b41e8,https://doi.org/10.1109/ICTAI.2012.25
+e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548
+e94168c35be1d4b4d2aaf42ef892e64a3874ed8c,https://doi.org/10.1109/TSMCB.2008.2010715
+e96ce25d11296fce4e2ecc2da03bd207dc118724,https://doi.org/10.1007/s00138-007-0095-x
+e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc,https://doi.org/10.1109/ICPR.2014.440
+e9331ae2a887c02e0a908ebae2810a681aedee29,https://doi.org/10.1016/j.image.2011.05.003
+f1e44e64957397d167d13f8f551cae99e5c16c75,https://doi.org/10.1007/s11042-013-1548-z
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,https://doi.org/10.1007/s11042-013-1803-3
+f1da4d705571312b244ebfd2b450692fd875cd1f,https://doi.org/10.1109/TIP.2014.2322446
+f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,https://doi.org/10.1109/ACCESS.2017.2737544
+f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c,http://doi.ieeecomputersociety.org/10.1109/SNPD.2016.7515888
+f1061b2b5b7ca32edd5aa486aecc63a0972c84f3,https://doi.org/10.1109/TIP.2017.2760512
+f180cb7111e9a6ba7cfe0b251c0c35daaef4f517,https://doi.org/10.1109/TIP.2015.2417502
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,https://doi.org/10.1109/SMC.2017.8122640
+f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4,https://doi.org/10.1109/ICIP.2012.6467433
+f1af714b92372c8e606485a3982eab2f16772ad8,http://ieeexplore.ieee.org/document/5617662/
+e7436b8e68bb7139b823a7572af3decd96241e78,https://doi.org/10.1109/ROBIO.2011.6181560
+e7144f5c19848e037bb96e225d1cfd961f82bd9f,http://doi.ieeecomputersociety.org/10.1109/FG.2017.126
+e73b1137099368dd7909d203b80c3d5164885e44,http://doi.ieeecomputersociety.org/10.1109/FSKD.2008.116
+e73f2839fc232c03e9f027c78bc419ee15810fe8,https://doi.org/10.1109/ICIP.2017.8296413
+e71c15f5650a59755619b2a62fa93ac922151fd6,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.22
+e74a2159f0f7afb35c7318a6e035bc31b8e69634,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019503
+e790a2538579c8e2ef9b314962ab26197d6664c6,https://doi.org/10.1109/ICIP.2016.7532915
+e7e8c0bbee09b5af6f7df1de8f0f26da992737c4,https://doi.org/10.1109/IJCNN.2011.6033417
+e7b7df786cf5960d55cbac4e696ca37b7cee8dcd,https://doi.org/10.1109/IJCNN.2012.6252728
+cba090a5bfae7dd8a60a973259f0870ed68c4dd3,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.22
+cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b,http://doi.ieeecomputersociety.org/10.1109/IJCNN.2009.5179002
+cb992fe67f0d4025e876161bfd2dda467eaec741,https://doi.org/10.1109/IPTA.2015.7367144
+cbc2de9b919bc63590b6ee2dfd9dda134af45286,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477561
+cbf3e848c5d2130dd640d9bd546403b8d78ce0f9,https://doi.org/10.1109/IJCNN.2012.6252385
+cbe1df2213a88eafc5dcaf55264f2523fe3ec981,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.34
+cb4d8cef8cec9406b1121180d47c14dfef373882,https://doi.org/10.1109/ICPR.2014.301
+cb7a743b9811d20682c13c4ee7b791ff01c62155,https://doi.org/10.1109/MMSP.2015.7340789
+cb9921d5fc4ffa50be537332e111f03d74622442,https://doi.org/10.1007/978-3-319-46654-5_79
+cbaa17be8c22e219a9c656559e028867dfb2c2ed,https://doi.org/10.1109/ICIP.2016.7532636
+cb160c5c2a0b34aba7b0f39f5dda6aca8135f880,https://doi.org/10.1109/SIU.2016.7496023
+f839ae810338e3b12c8e2f8db6ce4d725738d2d9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.115
+f888c165f45febf3d17b8604a99a2f684d689cbc,http://doi.ieeecomputersociety.org/10.1109/CIT.2004.1357196
+f812347d46035d786de40c165a158160bb2988f0,https://doi.org/10.1007/s10339-016-0765-6
+f856532a729bd337fae1eb7dbe55129ae7788f45,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.26
+f88ce52c5042f9f200405f58dbe94b4e82cf0d34,https://doi.org/10.1109/TNNLS.2015.2508025
+f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c,https://doi.org/10.1007/s00138-016-0790-6
+f86c6942a7e187c41dd0714531efd2be828e18ad,https://doi.org/10.1109/VCIP.2016.7805514
+f834c50e249c9796eb7f03da7459b71205dc0737,https://doi.org/10.1109/TIP.2011.2166974
+cead57f2f7f7b733f4524c4b5a7ba7f271749b5f,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.46
+cefaad8241bceb24827a71bf7c2556e458e57faa,https://doi.org/10.1109/TIP.2013.2264676
+ce3304119ba6391cb6bb25c4b3dff79164df9ac6,https://doi.org/10.1016/j.imavis.2016.03.004
+ce8db0fe11e7c96d08de561506f9f8f399dabbb2,https://doi.org/10.1109/ICIP.2015.7351677
+ce11b2d7905d2955c4282db5b68482edb846f29f,http://doi.acm.org/10.1145/3126686.3126705
+ce30ddb5ceaddc0e7d308880a45c135287573d0e,https://doi.org/10.1109/ICSMC.2012.6378304
+e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344671
+e084b0e477ee07d78c32c3696ea22c94f5fdfbec,https://doi.org/10.1109/ICIP.2013.6738565
+e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca,http://doi.ieeecomputersociety.org/10.1109/BLISS.2008.25
+e0ab926cd48a47a8c7b16e27583421141f71f6df,https://doi.org/10.1109/HPCSim.2016.7568383
+e0423788eb91772de9d708a17799179cf3230d63,http://doi.acm.org/10.1145/3093241.3093277
+e03f69bad7e6537794a50a99da807c9df4ff5186,http://doi.acm.org/10.1145/2708463.2709060
+e0793fd343aa63b5f366c8ace61b9c5489c51a4d,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.46
+465faf9974a60da00950be977f3bc2fc3e56f5d2,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273631
+46b2ecef197b465abc43e0e017543b1af61921ac,https://doi.org/10.1109/ICPR.2016.7899652
+464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb,https://doi.org/10.1109/TIP.2014.2359765
+4672513d0dbc398719d66bba36183f6e2b78947b,https://doi.org/10.1016/j.ipm.2015.05.007
+46c1af268d4b3c61a0a12be091ca008a3a60e4cd,https://doi.org/10.1007/s11042-016-3592-y
+2cf3564d7421b661e84251d280d159d4b3ebb336,https://doi.org/10.1109/BTAS.2014.6996287
+2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2,https://doi.org/10.1109/IC3.2013.6612218
+2ca10da4b59b406533ad1dc7740156e01782658f,https://doi.org/10.1109/SIU.2016.7496207
+2cd426f10178bd95fef3dede69ae7b67e73bb70c,https://doi.org/10.1109/ROBIO.2016.7866457
+2c06781ba75d51f5246d65d1acf66ab182e9bde6,https://doi.org/10.1016/j.imavis.2016.11.002
+2ce84465b9759166effc7302c2f5339766cc523d,https://doi.org/10.1109/VCIP.2015.7457830
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,https://doi.org/10.1109/TNNLS.2012.2237038
+7935f644c8044c0d3b81e2842e5ecc3672698bbb,https://doi.org/10.1109/ICIP.2011.6116258
+79fd4baca5f840d6534a053b22e0029948b9075e,https://doi.org/10.1109/ISDA.2012.6416647
+2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84,https://doi.org/10.1109/CVPRW.2011.5981817
+2debdb6a772312788251cc3bd1cb7cc8a6072214,https://doi.org/10.1142/S0218001415560157
+2d411826cd7865638b65e1b5f92043c245f009f9,http://doi.acm.org/10.1145/2733373.2806239
+2d79dece7890121469f515a6e773ba0251fc2d98,https://doi.org/10.1109/ICIP.2017.8296756
+2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f,https://doi.org/10.1109/ICIP.2014.7026056
+2d2fb01f761d21a459cfb34935bc47ab45a9913b,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2346515
+41e5d92b13d36da61287c7ffd77ee71de9eb2942,https://doi.org/10.1016/j.asoc.2016.12.033
+41781474d834c079e8fafea154d7916b77991b15,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.60
+417c2fa930bb7078fdf10cb85c503bd5270b9dc2,https://doi.org/10.1109/ICSIPA.2015.7412169
+414fdfe5f2e4f32a59bf15062b6e524cbf970637,https://doi.org/10.1109/TIFS.2014.2361028
+83b54b8c97dc14e302dad191327407ec0d5fb4a6,https://doi.org/10.1109/ICIP.2017.8296913
+8383faea09b4b4bef8117a1da897495ebd68691b,https://doi.org/10.1109/TCYB.2015.2493538
+838dad9d1d68d29be280d92e69410eaac40084bc,https://doi.org/10.1109/HPCSim.2014.6903749
+83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284834
+83bce0907937f09f5ccde26c361d52fe55fc8979,http://doi.acm.org/10.1145/2993148.2993185
+1b8541ec28564db66a08185510c8b300fa4dc793,https://doi.org/10.1109/LSP.2015.2499778
+1b211f8221162ce7ef212956b637b50e30ad48f4,https://doi.org/10.1109/ICIP.2016.7532925
+1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.537
+1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12,https://doi.org/10.1016/j.patcog.2017.01.007
+1b4b3d0ce900996a6da8928e16370e21d15ed83e,https://doi.org/10.1109/BigDataService.2017.38
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,https://doi.org/10.1109/ROMAN.2014.6926354
+1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,http://doi.acm.org/10.1145/2964284.2984061
+1bcb1c6d6cebc9737f9933fcefbf3da8a612f994,https://doi.org/10.1016/j.jvcir.2017.10.008
+1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac,https://doi.org/10.1109/ICIP.2014.7025273
+1b2d9a1c067f692dd48991beff03cd62b9faebf2,https://doi.org/10.1109/ICIP.2011.6116302
+7782627fa2e545276996ff9e9a1686ac496df081,http://doi.acm.org/10.1145/2663204.2666276
+771a6a80dd08212d83a4e976522e1ce108881401,https://doi.org/10.1109/IPTA.2016.7820979
+77223849321d57a03e0571a08e71eba06e38834a,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.20
+77c5437107f8138d48cb7e10b2b286fa51473678,https://doi.org/10.1109/URAI.2016.7734005
+77c3574a020757769b2ca807ff4b95a88eaa2a37,https://doi.org/10.1109/MSP.2015.2410783
+77cea27494499dd162221d1476bf70a87391790a,https://doi.org/10.1109/VCIP.2015.7457930
+77816b9567d5fed1f6085f33e1ddbcc73af2010e,https://doi.org/10.1109/MRA.2012.2201574
+778c1e95b6ea4ccf89067b83364036ab08797256,https://doi.org/10.1109/TIFS.2012.2224866
+7753e3b9e158289cbaa22203166424ca9c229f68,http://doi.ieeecomputersociety.org/10.1109/ICDM.2014.29
+77869f274d4be4d4b4c438dbe7dff4baed521bd8,https://doi.org/10.1109/TIP.2016.2551362
+773ce00841a23d32727aa1f54c29865fefd4ce02,http://doi.ieeecomputersociety.org/10.1109/AIPR.2006.24
+772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,https://doi.org/10.1109/BTAS.2017.8272716
+480858e55abdbc07ca47b7dc10204613fdd9783c,https://doi.org/10.1109/ICPR.2014.786
+48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf,https://doi.org/10.1016/j.imavis.2015.12.003
+489b7e12a420eff0d585f3f866e76b838c2cd275,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477675
+48dcf45a1e38adbb9826594f7ffaa5e95ef78395,https://doi.org/10.1109/VCIP.2017.8305111
+48db8bf18e2f6f19e07e88384be855c8b7ea0ead,http://doi.acm.org/10.1145/2964284.2967225
+4848a48a2b8bacd2092e87961cd86818da8e7151,https://doi.org/10.1109/VCIP.2017.8305080
+48255c9e1d6e1d030728d33a71699757e337be08,https://doi.org/10.1109/ISSNIP.2013.6529832
+48906f609446afcdaacbe1d65770d7a6165a8eee,https://doi.org/10.1007/s12559-017-9482-4
+486f5e85944404a1b57333443070b0b8c588c262,http://doi.ieeecomputersociety.org/10.1109/IRI.2014.7051957
+7049187c5155d9652747413ce1ebc8dbb209fd69,https://doi.org/10.1109/ICPR.2016.7899808
+70769def1284fe88fd57a477cde8a9c9a3dff13f,https://doi.org/10.1016/j.neucom.2006.10.036
+70341f61dfe2b92d8607814b52dfd0863a94310e,http://doi.ieeecomputersociety.org/10.1109/AVSS.2015.7301750
+70444627cb765a67a2efba17b0f4b81ce1fc20ff,https://doi.org/10.1109/TNNLS.2016.2609434
+70516aede32cf0dbc539abd9416c44faafc868bd,https://doi.org/10.1109/MICAI.2013.16
+7081958a390d3033f5f33e22bbfec7055ea8d601,https://doi.org/10.1109/MCI.2015.2437318
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,https://doi.org/10.1109/ICMLC.2010.5580938
+705e086bb666d129a6969882cfa49282116a638e,https://doi.org/10.1109/TNNLS.2014.2376963
+70d0bffa288e317bc62376f4f577c5bd7712e521,https://doi.org/10.1049/iet-cvi.2012.0094
+1e2770ce52d581d9a39642b40bfa827e3abf7ea2,http://doi.acm.org/10.1145/2425333.2425362
+1eb48895d86404251aa21323e5a811c19f9a55f9,http://doi.ieeecomputersociety.org/10.1109/CIS.2015.22
+1e8fd77d4717e9cb6079e10771dd2ed772098cb3,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2016.7574681
+1e62ca5845a6f0492574a5da049e9b43dbeadb1b,https://doi.org/10.1109/LSP.2016.2637400
+1e344b99583b782e3eaf152cdfa15f217b781181,http://doi.acm.org/10.1145/2499788.2499789
+1eb9c859ff7537182a25556635954bcd11830822,https://doi.org/10.1109/ICDSP.2015.7252004
+1ef6ad9e1742d0b2588deaf506ef83b894fb9956,https://doi.org/10.1007/s12193-016-0213-z
+1ed617d14dbc53b20287d3405b14c68d8dad3965,https://doi.org/10.1109/TCYB.2016.2582918
+1ed49161e58559be399ce7092569c19ddd39ca0b,https://doi.org/10.1109/ICPR.2016.7899973
+1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5,https://doi.org/10.1109/ICPR.2016.7899945
+1ea4347def5868c622d7ce57cbe171fa68207e2b,https://doi.org/10.1007/978-3-642-41181-6_23
+84f3c4937cd006888b82f2eb78e884f2247f0c4e,https://doi.org/10.1109/CCNC.2012.6181097
+84be18c7683417786c13d59026f30daeed8bd8c9,https://doi.org/10.1007/s00138-016-0755-9
+84f86f8c559a38752ddfb417e58f98e1f8402f17,http://doi.ieeecomputersociety.org/10.1109/EST.2013.10
+844e3e6992c98e53b45e4eb88368d0d6e27fc1d6,https://doi.org/10.1109/ICIP.2014.7026057
+84ae55603bffda40c225fe93029d39f04793e01f,https://doi.org/10.1109/ICB.2016.7550066
+84ec0983adb8821f0655f83b8ce47f36896ca9ee,https://doi.org/10.1109/SMC.2017.8122985
+4aa27c1f8118dbb39809a0f79a28c0cbc3ede276,http://doi.acm.org/10.1145/2683483.2683530
+4a03f07397c5d32463750facf010c532f45233a5,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.32
+4aea1213bdb5aa6c74b99fca1afc72d8a99503c6,https://doi.org/10.1109/ICDIM.2010.5664688
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,https://doi.org/10.1007/978-3-642-14922-1_68
+243cd27dce38fd756a840b397c28ad21cfb78897,https://doi.org/10.1049/iet-ipr.2013.0003
+24b5ea4e262e22768813e7b6581f60e4ab9a8de7,https://doi.org/10.1109/TIFS.2018.2807791
+244293024aebbb0ff42a7cf2ba49b1164697a127,https://doi.org/10.1109/BTAS.2016.7791187
+24eeb748a5e431510381ec7c8253bcb70eff8526,https://doi.org/10.1109/TIP.2017.2746270
+2400c4994655c4dd59f919c4d6e9640f57f2009f,https://doi.org/10.1109/IPTA.2015.7367096
+24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,https://doi.org/10.1109/GlobalSIP.2016.7906030
+24b637c98b22cd932f74acfeecdb50533abea9ae,https://doi.org/10.1109/TIP.2015.2492819
+24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852,http://doi.ieeecomputersociety.org/10.1109/FG.2017.30
+24e42e6889314099549583c7e19b1cb4cc995226,https://doi.org/10.1109/ACPR.2011.6166646
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,http://doi.acm.org/10.1145/2994258.2994270
+245d98726674297208e76308c3a11ce3fc43bee2,https://doi.org/10.1007/s11042-015-2699-x
+2348f1fa2940b01ec90e023fac8cc96812189774,http://doi.ieeecomputersociety.org/10.1109/EWDTS.2017.8110157
+2360ecf058393141ead1ca6b587efa2461e120e4,https://doi.org/10.1007/s00138-017-0895-6
+235a347cb96ef22bf35b4cf37e2b4ee5cde9df77,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.13
+23ecc496eaa238ac884e6bae5763f6138a9c90a3,https://doi.org/10.1109/ICB.2016.7550085
+2336de3a81dada63eb00ea82f7570c4069342fb5,http://doi.acm.org/10.1145/2361407.2361428
+235bebe7d0db37e6727dfa1246663be34027d96b,https://doi.org/10.1109/NAFIPS.2016.7851625
+2340d810c515dc0c9fd319f598fa8012dc0368a0,https://doi.org/10.1109/AFGR.2008.4813420
+23675cb2180aac466944df0edda4677a77c455cd,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142
+4ffd744a5f079c2d65f36e3ee0979b978f522a13,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.15
+4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef,https://doi.org/10.1049/iet-cvi.2012.0127
+4f742c09ce12859b20deaa372c8f1575acfc99c9,https://doi.org/10.1016/j.neucom.2017.01.020
+4f03ba35440436cfa06a2ed2a571fea01cb36598,https://doi.org/10.1109/SPAC.2017.8304260
+4fac09969ee80d485876e3198c7177181c600a4a,http://doi.ieeecomputersociety.org/10.1109/CRV.2015.32
+4f3b652c75b1d7cf4997e0baaef2067b61e3a79b,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552910
+8dd3f05071fd70fb1c349460b526b0e69dcc65bf,https://doi.org/10.1109/TIP.2017.2726010
+8d3e95c31c93548b8c71dbeee2e9f7180067a888,https://doi.org/10.1109/ICPR.2016.7899841
+8db9188e5137e167bffb3ee974732c1fe5f7a7dc,https://doi.org/10.1109/TIP.2016.2612885
+8db609d84190b905913eb2f17f4e558c6e982208,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.182
+15ef449ac443c494ceeea8a9c425043f4079522e,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477583
+157647b0968d95f9288b27d6d9179a8e1ef5c970,https://doi.org/10.1049/iet-bmt.2014.0086
+15ef65fd68d61f3d47326e358c446b0f054f093a,https://doi.org/10.1109/MLSP.2017.8168180
+1584edf8106e8f697f19b726e011b9717de0e4db,https://doi.org/10.1049/iet-cvi.2015.0350
+15a9f812e781cf85c283f7cf2aa2928b370329c5,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469281
+158aa18c724107587bcc4137252d0ba10debf417,https://doi.org/10.1109/ACSSC.2016.7869522
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,https://doi.org/10.1109/TIP.2017.2694226
+152683f3ac99f829b476ea1b1b976dec6e17b911,https://doi.org/10.1109/MIXDES.2016.7529773
+159caaa56c2291bedbd41d12af5546a7725c58d4,https://doi.org/10.1109/ICIP.2016.7532910
+15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,https://doi.org/10.1109/TMM.2011.2167317
+15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a,https://doi.org/10.5220/0004736505740580
+1277b1b8b609a18b94e4907d76a117c9783a5373,http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438
+12c4ba96eaa37586f07be0d82b2e99964048dcb5,https://doi.org/10.1109/LSP.2017.2694460
+122f52fadd4854cf6c9287013520eced3c91e71a,https://doi.org/10.1109/TIP.2016.2515987
+1280b35e4a20036fcfd82ee09f45a3fca190276f,http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166
+1252727e8096f48096ef89483d30c3a74500dd15,https://doi.org/10.1007/s00138-016-0746-x
+126204b377029feb500e9b081136e7a9010e3b6b,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2010.50
+126076774da192d4d3f4efcd1accc719ee5f9683,https://doi.org/10.1109/SIU.2012.6204774
+120b9c271c3a4ea0ad12bbc71054664d4d460bc3,https://doi.org/10.1109/DICTA.2015.7371259
+12b533f7c6847616393591dcfe4793cfe9c4bb17,https://doi.org/10.1109/TIFS.2017.2765519
+8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889,http://doi.ieeecomputersociety.org/10.1109/FG.2017.138
+8cffe360a05085d4bcba111a3a3cd113d96c0369,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248
+8c85ef961826575bc2c2f4da7784bc3bfcf8b188,https://doi.org/10.1109/ICIP.2015.7350871
+8c50869b745fc094a4fb1b27861934c3c14d7199,https://doi.org/10.1109/EMBC.2016.7591826
+8cedb92694845854f3ad0daf6c9adb6b81c293de,http://doi.acm.org/10.1145/1839379.1839431
+8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58,https://doi.org/10.1109/ICIP.2014.7025276
+8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3,https://doi.org/10.1007/11608288_28
+8c2b663f8be1702ed3e377b5e6e85921fe7c6389,https://doi.org/10.1109/IPTA.2016.7821006
+8cd0855ca967ce47b0225b58bbadd38d8b1b41a1,https://doi.org/10.1109/TIP.2017.2721106
+8c048be9dd2b601808b893b5d3d51f00907bdee0,https://doi.org/10.1631/FITEE.1600041
+85785ae222c6a9e01830d73a120cdac75d0b838a,https://doi.org/10.1007/978-3-319-11782-9
+85567174a61b5b526e95cd148da018fa2a041d43,https://doi.org/10.1109/TMM.2016.2515367
+8576d0031f2b0fe1a0f93dd454e73d48d98a4c63,http://doi.acm.org/10.1145/2522848.2531743
+8598d31c7ca9c8f5bb433409af5e472a75037b4d,https://doi.org/10.1109/JPROC.2008.916364
+85f27ec70474fe93f32864dd03c1d0f321979100,https://doi.org/10.1109/IJCNN.2014.6889381
+85f7f03b79d03da5fae3a7f79d9aac228a635166,https://doi.org/10.1109/WACV.2009.5403085
+85205914a99374fa87e004735fe67fc6aec29d36,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2392774
+1ddea58d04e29069b583ac95bc0ae9bebb0bed07,https://doi.org/10.1109/KSE.2015.50
+1dabb080e3e968633f4b3774f19192f8378f5b67,https://doi.org/10.1109/ICPR.2016.7899664
+1d10010ea7af43d59e1909d27e4e0e987264c667,https://doi.org/10.1016/j.neunet.2004.06.006
+1dae2f492d3ca2351349a73df6ee8a99b05ffc30,https://doi.org/10.1137/110842570
+1da1299088a6bf28167c58bbd46ca247de41eb3c,https://doi.org/10.1109/ICASSP.2002.5745055
+71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f,https://doi.org/10.1109/SMC.2016.7844680
+710c3aaffef29730ffd909a63798e9185f488327,https://doi.org/10.1109/ICPR.2016.7900095
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,https://doi.org/10.1109/TIM.2011.2141270
+71e95c3a31dceabe9cde9f117615be8bf8f6d40e,https://doi.org/10.1109/ICIP.2010.5653024
+71f07c95a2b039cc21854c602f29e5be053f2aba,https://doi.org/10.1007/s00138-010-0250-7
+7123e510dea783035b02f6c35e35a1a09677c5ab,https://doi.org/10.1109/ICPR.2016.7900297
+715d3eb3665f46cd2fab74d35578a72aafbad799,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2013.118
+7177649ece5506b315cb73c36098baac1681b8d2,http://doi.ieeecomputersociety.org/10.1109/FG.2017.130
+71d68af11df855f886b511e4fc1635c1e9e789b0,https://doi.org/10.1109/TCSVT.2011.2133210
+71bbda43b97e8dc8b67b2bde3c873fa6aacd439f,https://doi.org/10.1016/j.patcog.2015.09.012
+7196b3832065aec49859c61318037b0c8c12363a,https://doi.org/10.1007/s11432-014-5151-3
+71f9861df104b90399dc15e12bbb14cd03f16e0b,http://doi.ieeecomputersociety.org/10.1109/CGIV.2009.7
+7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7,https://doi.org/10.1007/s11063-015-9464-z
+76669f166ddd3fb830dbaacb3daa875cfedc24d9,https://doi.org/10.1109/ICPR.2016.7899840
+76dff7008d9b8bf44ec5348f294d5518877c6182,https://doi.org/10.1016/j.imavis.2014.09.004
+76640cb1a683a479ce2e0d6681d821ff39126d63,https://doi.org/10.1109/IJCNN.2011.6033408
+76a52ebfc5afd547f8b73430ec81456cf25ddd69,http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914
+76d1c6c6b67e67ced1f19a89a5034dafc9599f25,http://doi.acm.org/10.1145/2590296.2590315
+761304bbd259a9e419a2518193e1ff1face9fd2d,https://doi.org/10.1007/978-3-642-33885-4_57
+1ca1b4f787712ede215030d22a0eea41534a601e,https://doi.org/10.1109/CVPRW.2010.5543609
+1cb0c11620bde2734c1a428c789158ffff0d6c7b,http://doi.ieeecomputersociety.org/10.1109/BigMM.2016.62
+1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119
+1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5,https://doi.org/10.1016/j.imavis.2015.01.007
+1cfe8c1d341dbf8cc43040b37ca3552385adb10b,http://doi.acm.org/10.1145/2461466.2461473
+82e1692467969940a6d6ac40eae606b8b4981f7e,https://doi.org/10.1109/ICMEW.2012.56
+8274069feeff6392b6c5d45d8bfaaacd36daedad,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019312
+826015d9ade1637b3fcbeca071e3137d3ac1ef56,https://doi.org/10.1109/WACV.2017.84
+828d7553a45eb0c3132e406105732a254369eb4d,https://doi.org/10.1016/j.neunet.2017.09.001
+82953e7b3d28ccd1534eedbb6de7984c59d38cd4,https://doi.org/10.1109/TNNLS.2014.2356856
+8229f2735a0db0ad41f4d7252129311f06959907,https://doi.org/10.1109/TIP.2011.2106794
+82dad0941a7cada11d2e2f2359293fe5fabf913f,https://doi.org/10.1109/ICIP.2017.8296810
+493bc7071e35e7428336a515d1d26020a5fb9015,https://doi.org/10.1109/ACSSC.2013.6810420
+4958c06da5581fd0b4904d3bf0ee09958ecdba5b,https://doi.org/10.1016/j.knosys.2016.12.005
+4909ed22b1310f1c6f2005be5ce3349e3259ff6a,https://doi.org/10.1109/ROBIO.2009.4913106
+49e4f05fa98f63510de76e7abd8856ff8db0f38d,http://doi.ieeecomputersociety.org/10.1109/FG.2017.110
+4932b929a2e09ddebedcb1abe8c62f269e7d4e33,https://doi.org/10.1109/SIU.2016.7496076
+492116d16a39eb54454c7ffb1754cea27ad3a171,http://doi.acm.org/10.1145/3132525.3134823
+496f3d14cf466f054d395a3c71fa2cd6a3dda61d,http://doi.acm.org/10.1145/3009977.3010055
+49fdafef327069516d887d8e69b5e96c983c3dd0,https://doi.org/10.1109/DICTA.2017.8227433
+496d62741e8baf3859c24bb22eaccd3043322126,http://doi.ieeecomputersociety.org/10.1109/TKDE.2017.2728531
+49fe4f387ac7e5852a78b327ec42cc7300c5f8e0,https://doi.org/10.1007/s11042-014-2055-6
+4033ac52dba394e390a86cd149b9838f1d7834b5,https://doi.org/10.1109/ICMLC.2012.6359009
+4014d74e8f5ea4d76c2c1add81d0c88d6e342478,http://doi.acm.org/10.1145/3136755.3143010
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,https://doi.org/10.1109/IJCNN.2017.7966191
+4097fef623185557bb1842501cfdc97f812fc66d,http://doi.acm.org/10.1145/3126686.3126755
+40dd736c803720890d6bfc1e083f6050e35d8f7a,http://doi.acm.org/10.1145/3139958.3140055
+40f06e5c052d34190832b8c963b462ade739cbf0,https://doi.org/10.1109/ICNC.2010.5583821
+405cf40f3ce74210f7e9862b2b828ce002b409ed,https://doi.org/10.1109/IJCNN.2017.7966244
+407a26fff7fac195b74de9fcb556005e8785a4e9,http://doi.ieeecomputersociety.org/10.1109/FG.2017.29
+2e36b63fdf1353425a57a0665b0d0274efe92963,http://doi.acm.org/10.1145/3152771.3156179
+2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4,https://doi.org/10.1109/ICASSP.2015.7178367
+2e535b8cd02c2f767670ba47a43ad449fa1faad7,https://doi.org/10.1109/MSP.2017.2740460
+2ed7d95588200c8c738c7dd61b8338538e04ea30,https://doi.org/10.1109/ICIP.2010.5654063
+2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd,https://doi.org/10.1016/j.neucom.2015.07.031
+2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,https://doi.org/10.1109/CVPRW.2011.5981801
+2ef1b1b5ed732634e005df779fd9b21da0ffe60c,https://doi.org/10.1016/j.image.2017.03.012
+2e5b160892b70a1e846aa9dcdf132b8011937ec6,https://doi.org/10.1109/LSP.2017.2689921
+2e27667421a7eeab278e0b761db4d2c725683c3f,https://doi.org/10.1007/s11042-013-1815-z
+2e6776cd582c015b46faf616f29c98ce9cff51a2,https://doi.org/10.1109/TNN.2005.860849
+2e12c5ea432004de566684b29a8e148126ef5b70,https://doi.org/10.1007/s12193-015-0204-5
+2b286ed9f36240e1d11b585d65133db84b52122c,http://doi.acm.org/10.1145/3130800.3130837
+2babf665198a91932a4ce557f627c28e7e8f31f2,http://doi.acm.org/10.1145/3009977.3010004
+2b300985a507533db3ec9bd38ade16a32345968e,https://doi.org/10.1007/s11042-015-3070-y
+2b5005c2abf2d9a8c16afa50306b6959dfc72275,https://doi.org/10.1109/ICARCV.2010.5707216
+2b0d14dbd079b3d78631117b1304d6c1579e1940,https://doi.org/10.1007/s11063-016-9524-z
+2b43100a13811b33cc9f905fa1334bfd8b1873ba,https://doi.org/10.1109/IVCNZ.2015.7761564
+2b2924af7ec219bd1fadcbd2c57014ed54efec86,http://doi.ieeecomputersociety.org/10.1109/SSIAI.2014.6806053
+2be9284d531b8c573a4c39503ca50606446041a3,https://doi.org/10.1109/ICIP.2005.1530004
+2be24e8a3f2b89bdaccd02521eff3b7bb917003e,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.96
+47cd161546c59ab1e05f8841b82e985f72e5ddcb,https://doi.org/10.1109/ICIP.2017.8296552
+47109343e502a4097cb7efee54bc5fbb14598c05,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.182
+4786638ffb3b2fb385cec80720cc6e7c3588b773,https://doi.org/10.1007/s11042-015-2598-1
+471bef061653366ba66a7ac4f29268e8444f146e,https://doi.org/10.1109/SMC.2015.524
+47fb74785fbd8870c2e819fc91d04b9d9722386f,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.161
+47d07217c501644d63adfec740346f244abaaae8,https://doi.org/10.1016/j.patcog.2016.05.017
+78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.99
+78d4d861c766af2a8da8855bece5da4e6eed2e1c,http://doi.acm.org/10.1145/3129416.3129455
+78e1798c3077f4f8a4df04ca35cd73f82e9a38f3,http://ieeexplore.ieee.org/document/6460640/
+78f244dc2a171944836a89874b8f60e9fe80865d,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.181
+780c8a795baca1ba4cb4956cded877dd3d1ca313,http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879
+789b8fff223b0db0fe3babf46ea98b1d5197f0c0,https://doi.org/10.1002/ima.20245
+785eeac2e236a85a45b4e0356c0745279c31e089,https://doi.org/10.1109/TIFS.2014.2359543
+7813d405450013bbdb0b3a917319d5964a89484a,https://doi.org/10.1109/WACV.2017.62
+789a43f51e0a3814327dab4299e4eda8165a5748,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.189
+782eee555067b2d6d24db87775e1ded5fb047491,https://doi.org/10.1109/MMSP.2008.4665158
+8be60114634caa0eff8566f3252cb9a1b7d5ef10,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890133
+8b4124bb68e5b3e6b8b77888beae7350dc594a40,https://doi.org/10.1109/ICSMC.2005.1571395
+8bf945166305eb8e304a9471c591139b3b01a1e1,https://doi.org/10.1109/ACCESS.2017.2756451
+8b1fa60b9164b60d1ca2705611fab063505a3ef5,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618337
+8b3c867e67b263d7a0577a112173a64009a3b4ba,https://doi.org/10.1109/ICIP.2010.5652374
+8b1f697d81de1245c283b4f8f055b9b76badfa66,https://doi.org/10.1142/S0218126616500171
+13907865a97afde053d7bb7134d58a7bbc12043c,https://doi.org/10.1016/j.patcog.2014.05.001
+134cea33099cafc6615e57437e29d7c3906a2b48,http://doi.ieeecomputersociety.org/10.1109/ICETET.2010.80
+136aae348c7ebc6fd9df970b0657241983075795,https://doi.org/10.1109/ICIP.2015.7351542
+13f065d4e6dfe2a130bd64d73eee97d10d9f7d33,https://doi.org/10.1109/DICTA.2015.7371222
+13901473a12061f080b9d54219f16db7d406e769,https://doi.org/10.1109/TIP.2012.2222895
+7f9be0e08784835de0f8bc3a82fcca02b3721dc1,https://doi.org/10.1109/IJCNN.2014.6889744
+7f415aee0137acab659c664eb1dff15f7b726bdd,https://doi.org/10.1109/TCSVT.2014.2302522
+7f5346a169c9784ca79aca5d95ae8bf2ebab58e3,https://doi.org/10.1109/ICIP.2015.7351304
+7f4040b482d16354d5938c1d1b926b544652bf5b,http://doi.acm.org/10.1145/2502081.2502115
+7f8d2d7eaa03132caefe0f3b126b5b369a712c9d,http://doi.ieeecomputersociety.org/10.1109/ACHI.2009.33
+7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2353635
+7fcd03407c084023606c901e8933746b80d2ad57,https://doi.org/10.1109/BTAS.2017.8272694
+7f8cef6ba2f059e465b1b23057a6dbb23fba1c63,https://doi.org/10.1109/TCSVT.2016.2539541
+7f1078a2ebfa23a58adb050084d9034bd48a8a99,https://doi.org/10.1007/s00371-015-1169-9
+7a595800b490ff437ab06fe7612a678d5fe2b57d,https://doi.org/10.1109/MMSP.2009.5293285
+7a09e8f65bd85d4c79f0ae90d4e2685869a9894f,https://doi.org/10.1109/TMM.2016.2551698
+7a6e3ed956f71b20c41fbec008b1fa8dacad31a6,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163117
+7a91617ec959acedc5ec8b65e55b9490b76ab871,https://doi.org/10.1109/RAIT.2012.6194481
+7a666a91a47da0d371a9ba288912673bcd5881e4,https://doi.org/10.1016/j.patrec.2009.05.011
+7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8,https://doi.org/10.1007/s00138-015-0677-y
+7a94936ce558627afde4d5b439ec15c59dbcdaa4,https://doi.org/10.1007/s11263-013-0665-5
+14d7bce17265738f10f48987bb7bffb3eafc676e,http://ieeexplore.ieee.org/document/7514504/
+143571c2fc9b1b69d3172f8a35b8fad50bc8202a,https://doi.org/10.1016/j.neucom.2014.07.066
+142e233adceed9171f718a214a7eba8497af4324,https://doi.org/10.1109/IJCNN.2014.6889504
+14efb131bed66f1874dd96170f714def8db45d90,http://doi.acm.org/10.1145/2818346.2830585
+14ae16e9911f6504d994503989db34d2d1cb2cd4,https://doi.org/10.1007/s11042-013-1616-4
+14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1,https://doi.org/10.1109/TIP.2006.884932
+1473e6f2d250307f0421f1e2ea68b6485d3bd481,https://doi.org/10.1109/IJCNN.2016.7727333
+8e9b92a805d1ce0bf4e0c04133d26e28db036e6a,https://doi.org/10.1109/DICTA.2017.8227428
+8ef465ff12ee1d2be2a99d1c628117a4ce890a6b,https://doi.org/10.1016/j.camwa.2010.08.082
+8e55486aa456cae7f04fe922689b3e99a0e409fe,http://doi.acm.org/10.1145/3123266.3123342
+8ebe2df4d82af79f0f082ced70f3a73d7fb93b66,https://doi.org/10.1109/URAI.2015.7358851
+8e272978dd1500ce6e4c2ef5e91d4332078ff757,https://doi.org/10.1007/11848035_5
+8e8a6623b4abd2452779c43f3c2085488dfcb323,http://doi.acm.org/10.1145/2993148.2997630
+8e21399bb102e993edd82b003c306a068a2474da,https://doi.org/10.1109/ICIP.2013.6738758
+22c06284a908d8ad0994ad52119773a034eed7ee,http://doi.acm.org/10.1145/2964284.2967236
+2238dddb76499b19035641d97711cf30d899dadb,https://doi.org/10.1109/SIU.2016.7496098
+22894c7a84984bd4822dcfe7c76a74673a242c36,http://doi.acm.org/10.1145/2993148.2997634
+22a10d8d2a2cb9055557a3b335d6706100890afb,https://doi.org/10.1109/SIU.2016.7496121
+22ccd537857aca1ee4b961f081f07c58d42a7f32,https://doi.org/10.1109/DICTA.2015.7371260
+22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505,http://doi.acm.org/10.1145/2808469.2810102
+22dbdace88c8f4bda2843ed421e3708ec0744237,https://doi.org/10.1016/j.cviu.2013.12.010
+259ddd3c618feec51576baac7eaaf80ea924b791,https://doi.org/10.1007/s11257-007-9039-4
+254964096e523d5e48e03390ce440c9af337d200,http://dl.acm.org/citation.cfm?id=3005378
+250b73ec5a4f78b7b4ea3aba65c27fc1352154d5,https://doi.org/10.1109/TIP.2015.2463223
+256b46b12ab47283e6ada05fad6a2b501de35323,https://doi.org/10.1109/ICPR.2016.7900275
+252f202bfb14d363a969fce19df2972b83fa7ec0,http://doi.ieeecomputersociety.org/10.1109/FG.2017.120
+25bcd5aa3bbe56c992547fba683418655b46fc4a,https://doi.org/10.1016/j.eswa.2017.03.030
+2546dc7e2c2390233de16502413fe1097ecf3fb5,https://doi.org/10.1016/j.patrec.2011.01.009
+258b3b1df82186dd76064ef86b28555e91389b73,https://doi.org/10.1109/ACCESS.2017.2739822
diff --git a/scraper/reports/misc/db_paper_pdf-1.csv b/scraper/reports/misc/db_paper_pdf-1.csv
new file mode 100644
index 00000000..810fada9
--- /dev/null
+++ b/scraper/reports/misc/db_paper_pdf-1.csv
@@ -0,0 +1,1639 @@
+40b86ce698be51e36884edcc8937998979cd02ec,http://www.cs.bilkent.edu.tr/~duygulu/papers/SIU2006-face.pdf
+e465f596d73f3d2523dbf8334d29eb93a35f6da0,http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf
+29f4ac49fbd6ddc82b1bb697820100f50fa98ab6,http://dhoiem.cs.illinois.edu/publications/acvhl2010_annotation_ian.pdf
+ceb763d6657a07b47e48e8a2956bcfdf2cf10818,http://pdfs.semanticscholar.org/ceb7/63d6657a07b47e48e8a2956bcfdf2cf10818.pdf
+24b37016fee57057cf403fe2fc3dda78476a8262,http://pdfs.semanticscholar.org/24b3/7016fee57057cf403fe2fc3dda78476a8262.pdf
+235d5620d05bb7710f5c4fa6fceead0eb670dec5,http://pdfs.semanticscholar.org/7497/50d81dbd4d9fdcc9c1728b797dbb538a8747.pdf
+13f6ab2f245b4a871720b95045c41a4204626814,http://pdfs.semanticscholar.org/9d74/382b6c4209c49de7c2b0fab7b34483ba0ddb.pdf
+107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53,http://pdfs.semanticscholar.org/65ef/8706ae8c4e22d491550f5fff052ca3f5db21.pdf
+4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea,http://www.researchgate.net/profile/Roland_Goecke/publication/221429947_Static_facial_expression_analysis_in_tough_conditions_Data_evaluation_protocol_and_benchmark/links/0fcfd50e81697312d6000000.pdf
+e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2,http://pdfs.semanticscholar.org/e1e6/e6792e92f7110e26e27e80e0c30ec36ac9c2.pdf
+0db43ed25d63d801ce745fe04ca3e8b363bf3147,http://pdfs.semanticscholar.org/0db4/3ed25d63d801ce745fe04ca3e8b363bf3147.pdf
+5d33a10752af9ea30993139ac6e3a323992a5831,http://web.engr.illinois.edu/~iendres2/publications/cvpr2010_att.pdf
+7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d,http://pdfs.semanticscholar.org/7c2e/c6f4ab3eae86e0c1b4f586e9c158fb1d719d.pdf
+c74aba9a096379b3dbe1ff95e7af5db45c0fd680,http://pdfs.semanticscholar.org/c74a/ba9a096379b3dbe1ff95e7af5db45c0fd680.pdf
+03af8cf40283ff30f1da3637b024319d0c79bdf0,https://www.researchgate.net/profile/Gary_Mckeown/publication/224251574_The_Belfast_Induced_Natural_Emotion_Database/links/0fcfd510a6b4384822000000.pdf
+190b3caa2e1a229aa68fd6b1a360afba6f50fde4,http://pdfs.semanticscholar.org/190b/3caa2e1a229aa68fd6b1a360afba6f50fde4.pdf
+b599f323ee17f12bf251aba928b19a09bfbb13bb,http://pdfs.semanticscholar.org/b599/f323ee17f12bf251aba928b19a09bfbb13bb.pdf
+587c48ec417be8b0334fa39075b3bfd66cc29dbe,http://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf
+271df16f789bd2122f0268c3e2fa46bc0cb5f195,http://users.eecs.northwestern.edu/~mya671/mypapers/CVPR11_Yuan_Yang_Wu.pdf
+11269e98f072095ff94676d3dad34658f4876e0e,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2015/ACII2015_submission_70.pdf
+a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60,http://pdfs.semanticscholar.org/ac3b/033fd24913c31778cd4cb2d013239315d7a9.pdf
+713594c18978b965be87651bb553c28f8501df0a,http://pdfs.semanticscholar.org/fbfc/a34d52422cf8eac9d92d68dd16f95db5ef36.pdf
+fcd3d69b418d56ae6800a421c8b89ef363418665,http://pdfs.semanticscholar.org/fcd3/d69b418d56ae6800a421c8b89ef363418665.pdf
+4223666d1b0b1a60c74b14c2980069905088edc6,http://pdfs.semanticscholar.org/4223/666d1b0b1a60c74b14c2980069905088edc6.pdf
+9ed4ad41cbad645e7109e146ef6df73f774cd75d,http://pdfs.semanticscholar.org/a83e/175ad5b2066e207f5d2ec830ae05bac266b9.pdf
+5cfbeae360398de9e20e4165485837bd42b93217,http://pdfs.semanticscholar.org/5cfb/eae360398de9e20e4165485837bd42b93217.pdf
+2c92839418a64728438c351a42f6dc5ad0c6e686,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf
+9028fbbd1727215010a5e09bc5758492211dec19,http://pdfs.semanticscholar.org/9028/fbbd1727215010a5e09bc5758492211dec19.pdf
+5ac80e0b94200ee3ecd58a618fe6afd077be0a00,http://pdfs.semanticscholar.org/5ac8/0e0b94200ee3ecd58a618fe6afd077be0a00.pdf
+a75dfb5a839f0eb4b613d150f54a418b7812aa90,https://arxiv.org/pdf/1708.02314v1.pdf
+1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16,http://pdfs.semanticscholar.org/1cbd/3f96524ca2258fd2d5c504c7ea8da7fb1d16.pdf
+b33e8db8ccabdfc49211e46d78d09b14557d4cba,http://pdfs.semanticscholar.org/b33e/8db8ccabdfc49211e46d78d09b14557d4cba.pdf
+c91103e6612fa7e664ccbc3ed1b0b5deac865b02,http://pdfs.semanticscholar.org/c911/03e6612fa7e664ccbc3ed1b0b5deac865b02.pdf
+18dfc2434a95f149a6cbb583cca69a98c9de9887,http://pdfs.semanticscholar.org/18df/c2434a95f149a6cbb583cca69a98c9de9887.pdf
+4350bb360797a4ade4faf616ed2ac8e27315968e,http://www.merl.com/publications/docs/TR2006-058.pdf
+4fbef7ce1809d102215453c34bf22b5f9f9aab26,http://pdfs.semanticscholar.org/4fbe/f7ce1809d102215453c34bf22b5f9f9aab26.pdf
+a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb,http://pdfs.semanticscholar.org/a562/5cfe16d72bd00e987857d68eb4d8fc3ce4fb.pdf
+d28d697b578867500632b35b1b19d3d76698f4a9,http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf
+4511e09ee26044cb46073a8c2f6e1e0fbabe33e8,http://pdfs.semanticscholar.org/4511/e09ee26044cb46073a8c2f6e1e0fbabe33e8.pdf
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,https://arxiv.org/pdf/1510.00562v1.pdf
+78a4cabf0afc94da123e299df5b32550cd638939,http://pdfs.semanticscholar.org/78a4/cabf0afc94da123e299df5b32550cd638939.pdf
+dac2103843adc40191e48ee7f35b6d86a02ef019,http://www.chennaisunday.com/2015DOTNET/Unsupervised%20Celebrity%20Face%20Naming%20in%20Web%20Videos.pdf
+24aac045f1e1a4c13a58eab4c7618dccd4c0e671,https://arxiv.org/pdf/1706.04124v1.pdf
+08c1f8f0e69c0e2692a2d51040ef6364fb263a40,http://pdfs.semanticscholar.org/0b20/0cf032430d74fd612601cc59d5af5608ceb4.pdf
+6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2,http://pdfs.semanticscholar.org/6a1b/eb34a2dfcdf36ae3c16811f1aef6e64abff2.pdf
+3f14b504c2b37a0e8119fbda0eff52efb2eb2461,https://ibug.doc.ic.ac.uk/media/uploads/documents/eleftheriadis_tip_2016.pdf
+41a6196f88beced105d8bc48dd54d5494cc156fb,http://toc.proceedings.com/25848webtoc.pdf
+fbb6ee4f736519f7231830a8e337b263e91f06fe,http://pdfs.semanticscholar.org/fbb6/ee4f736519f7231830a8e337b263e91f06fe.pdf
+2d3482dcff69c7417c7b933f22de606a0e8e42d4,http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf
+2aaa6969c03f435b3ea8431574a91a0843bd320b,http://pdfs.semanticscholar.org/2aaa/6969c03f435b3ea8431574a91a0843bd320b.pdf
+2564848f094f7c1cd5e599aa907947b10b5c7df2,http://prr.hec.gov.pk/Thesis/252S.pdf
+0b85b50b6ff03a7886c702ceabad9ab8c8748fdc,http://pdfs.semanticscholar.org/0b85/b50b6ff03a7886c702ceabad9ab8c8748fdc.pdf
+0470b0ab569fac5bbe385fa5565036739d4c37f8,https://hal.inria.fr/inria-00321048/file/verbeek08cvpr.pdf
+b87b0fa1ac0aad0ca563844daecaeecb2df8debf,http://users.cs.cf.ac.uk/Paul.Rosin/resources/papers/portraits-CAe.pdf
+e16efd2ae73a325b7571a456618bfa682b51aef8,http://pdfs.semanticscholar.org/e16e/fd2ae73a325b7571a456618bfa682b51aef8.pdf
+aba770a7c45e82b2f9de6ea2a12738722566a149,http://pure.qub.ac.uk/portal/files/49719304/Face_Recognition_in_the_Scrambled.pdf
+0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d,https://arxiv.org/pdf/1609.00153v1.pdf
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,http://pdfs.semanticscholar.org/023e/d32ac3ea6029f09b8c582efbe3866de7d00a.pdf
+580054294ca761500ada71f7d5a78acb0e622f19,http://www.jdl.ac.cn/project/faceId/paperreading/Paper/hhan_20090305_TIP2008_FaceRelighting.pdf
+72a00953f3f60a792de019a948174bf680cd6c9f,http://pdfs.semanticscholar.org/72a0/0953f3f60a792de019a948174bf680cd6c9f.pdf
+87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd,http://pdfs.semanticscholar.org/87bb/183d8be0c2b4cfceb9ee158fee4bbf3e19fd.pdf
+3e69ed088f588f6ecb30969bc6e4dbfacb35133e,http://pdfs.semanticscholar.org/3e69/ed088f588f6ecb30969bc6e4dbfacb35133e.pdf
+fb084b1fe52017b3898c871514cffcc2bdb40b73,http://pdfs.semanticscholar.org/fb08/4b1fe52017b3898c871514cffcc2bdb40b73.pdf
+62f60039a95692baaeaae79a013c7f545e2a6c3d,http://www.researchgate.net/profile/G_Boato/publication/242336498_Identify_computer_generated_characters_by_analysing_facial_expressions_variation/links/0f3175360a34547478000000.pdf
+dd0760bda44d4e222c0a54d41681f97b3270122b,http://pdfs.semanticscholar.org/dd07/60bda44d4e222c0a54d41681f97b3270122b.pdf
+046a694bbb3669f2ff705c6c706ca3af95db798c,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Xiong_Conditional_Convolutional_Neural_ICCV_2015_paper.pdf
+0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhao_Memory-Augmented_Attribute_Manipulation_CVPR_2017_paper.pdf
+41aa8c1c90d74f2653ef4b3a2e02ac473af61e47,http://pdfs.semanticscholar.org/41aa/8c1c90d74f2653ef4b3a2e02ac473af61e47.pdf
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,http://www.site.uottawa.ca/~wslee/publication/CCECE2006.pdf
+6ec004e4c1171c4c4858eec7c927f567684b80bc,http://www.researchgate.net/profile/Bongnam_Kang/publication/221292310_The_POSTECH_face_database_(PF07)_and_performance_evaluation/links/00463531e60efa5310000000.pdf
+f935225e7811858fe9ef6b5fd3fdd59aec9abd1a,http://pdfs.semanticscholar.org/f935/225e7811858fe9ef6b5fd3fdd59aec9abd1a.pdf
+4e490cf3cf26fe46507bb55a548c403b9c685ba0,http://labnic.unige.ch/nic/papers/SJ_DG_SD_KND_IC_MIV_DS_PV_KRS_IEEETransac11.pdf
+5fa1724a79a9f7090c54925f6ac52f1697d6b570,http://pdfs.semanticscholar.org/5fa1/724a79a9f7090c54925f6ac52f1697d6b570.pdf
+0786a6d5ce6db8a68cef05bb5f5b84ec1b0c2cde,http://vipl.ict.ac.cn/sites/default/files/papers/files/2008_ACMMM_cxliu_Naming%20Faces%20in%20Broadcast%20News%20Video%20by%20Image%20Google.pdf
+3946b8f862ecae64582ef0912ca2aa6d3f6f84dc,http://pdfs.semanticscholar.org/3946/b8f862ecae64582ef0912ca2aa6d3f6f84dc.pdf
+2d88e7922d9f046ace0234f9f96f570ee848a5b5,http://pdfs.semanticscholar.org/2d88/e7922d9f046ace0234f9f96f570ee848a5b5.pdf
+66886f5af67b22d14177119520bd9c9f39cdd2e6,http://pdfs.semanticscholar.org/6688/6f5af67b22d14177119520bd9c9f39cdd2e6.pdf
+280d59fa99ead5929ebcde85407bba34b1fcfb59,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002662.pdf
+2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78,http://www3.ntu.edu.sg/home/wanggang/TIFS15.pdf
+e4aeaf1af68a40907fda752559e45dc7afc2de67,http://pdfs.semanticscholar.org/e4ae/af1af68a40907fda752559e45dc7afc2de67.pdf
+6e782073a013ce3dbc5b9b56087fd0300c510f67,http://pdfs.semanticscholar.org/6e78/2073a013ce3dbc5b9b56087fd0300c510f67.pdf
+a0f94e9400938cbd05c4b60b06d9ed58c3458303,http://people.ee.duke.edu/~lcarin/Hoey_Little07.pdf
+070ab604c3ced2c23cce2259043446c5ee342fd6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/24-p75.pdf
+b6c53891dff24caa1f2e690552a1a5921554f994,http://pdfs.semanticscholar.org/b6c5/3891dff24caa1f2e690552a1a5921554f994.pdf
+31ef5419e026ef57ff20de537d82fe3cfa9ee741,http://pdfs.semanticscholar.org/9a10/78b6e3810c95fc4b87154ad62c0f133caebb.pdf
+433a6d6d2a3ed8a6502982dccc992f91d665b9b3,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf
+28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d,http://pdfs.semanticscholar.org/28bc/f31f794dc27f73eb248e5a1b2c3294b3ec9d.pdf
+57a1466c5985fe7594a91d46588d969007210581,https://www.wjscheirer.com/projects/unconstrained-face/amfg_2010_poster.pdf
+133da0d8c7719a219537f4a11c915bf74c320da7,http://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf
+1d1a7ef193b958f9074f4f236060a5f5e7642fc1,http://pdfs.semanticscholar.org/db40/804914afbb7f8279ca9a4f52e0ade695f19e.pdf
+a503eb91c0bce3a83bf6f524545888524b29b166,http://pdfs.semanticscholar.org/a503/eb91c0bce3a83bf6f524545888524b29b166.pdf
+b3f7c772acc8bc42291e09f7a2b081024a172564,http://pdfs.semanticscholar.org/b3f7/c772acc8bc42291e09f7a2b081024a172564.pdf
+080c204edff49bf85b335d3d416c5e734a861151,http://pdfs.semanticscholar.org/d3d1/09d81dd0911dfde259b6878d737e50c834eb.pdf
+4fa0d73b8ba114578744c2ebaf610d2ca9694f45,http://pdfs.semanticscholar.org/4fa0/d73b8ba114578744c2ebaf610d2ca9694f45.pdf
+5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,http://pdfs.semanticscholar.org/5c86/72c0d2f28fd5d2d2c4b9818fcff43fb01a48.pdf
+621f656fedda378ceaa9c0096ebb1556a42e5e0f,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2016/07.19.17.24/doc/PID4367205.pdf?ibiurl.language=en
+411503a304a661b0c04c2b446a6e43e4a70942dc,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/CRV2010FaceClustFinal.pdf
+5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934,http://www.istc-cc.cmu.edu/publications/papers/2016/GeePS-cui-eurosys16.pdf
+1677d29a108a1c0f27a6a630e74856e7bddcb70d,http://pdfs.semanticscholar.org/1677/d29a108a1c0f27a6a630e74856e7bddcb70d.pdf
+30b103d59f8460d80bb9eac0aa09aaa56c98494f,http://pdfs.semanticscholar.org/30b1/03d59f8460d80bb9eac0aa09aaa56c98494f.pdf
+0278acdc8632f463232e961563e177aa8c6d6833,http://www.pitt.edu/~jeffcohn/biblio/TPAMI2547397%20FINAL.pdf
+4faded442b506ad0f200a608a69c039e92eaff11,http://pdfs.semanticscholar.org/4fad/ed442b506ad0f200a608a69c039e92eaff11.pdf
+3d0379688518cc0e8f896e30815d0b5e8452d4cd,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/007.pdf
+846c028643e60fefc86bae13bebd27341b87c4d1,http://pdfs.semanticscholar.org/a06f/510ee0f206abc4c44a2b68455d88a1748427.pdf
+3896c62af5b65d7ba9e52f87505841341bb3e8df,http://pdfs.semanticscholar.org/3896/c62af5b65d7ba9e52f87505841341bb3e8df.pdf
+38f7f3c72e582e116f6f079ec9ae738894785b96,http://pdfs.semanticscholar.org/38f7/f3c72e582e116f6f079ec9ae738894785b96.pdf
+1b70bbf7cdfc692873ce98dd3c0e191580a1b041,http://pdfs.semanticscholar.org/1b70/bbf7cdfc692873ce98dd3c0e191580a1b041.pdf
+b755505bdd5af078e06427d34b6ac2530ba69b12,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf
+d8bf148899f09a0aad18a196ce729384a4464e2b,http://pdfs.semanticscholar.org/d8bf/148899f09a0aad18a196ce729384a4464e2b.pdf
+a6e21438695dbc3a184d33b6cf5064ddf655a9ba,http://pdfs.semanticscholar.org/b673/ffe63c5d0723009042f0f922f19f093b7e34.pdf
+0323b618d3a4c24bdda4f42361e19a2a7d497da5,http://www.ecse.rpi.edu/homepages/qji/Papers/Simultaneous%20Paper_TIP_Revised_V4_email.pdf
+550858b7f5efaca2ebed8f3969cb89017bdb739f,http://pdfs.semanticscholar.org/5508/58b7f5efaca2ebed8f3969cb89017bdb739f.pdf
+111a9645ad0108ad472b2f3b243ed3d942e7ff16,http://pdfs.semanticscholar.org/111a/9645ad0108ad472b2f3b243ed3d942e7ff16.pdf
+44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf
+167736556bea7fd57cfabc692ec4ae40c445f144,http://pdfs.semanticscholar.org/1677/36556bea7fd57cfabc692ec4ae40c445f144.pdf
+1a878e4667fe55170252e3f41d38ddf85c87fcaf,http://pdfs.semanticscholar.org/1a87/8e4667fe55170252e3f41d38ddf85c87fcaf.pdf
+b4d7ca26deb83cec1922a6964c1193e8dd7270e7,http://pdfs.semanticscholar.org/b4d7/ca26deb83cec1922a6964c1193e8dd7270e7.pdf
+5aad5e7390211267f3511ffa75c69febe3b84cc7,http://pdfs.semanticscholar.org/5aad/5e7390211267f3511ffa75c69febe3b84cc7.pdf
+687e17db5043661f8921fb86f215e9ca2264d4d2,http://www.ece.northwestern.edu/~ganghua/publication/ICCV09a.pdf
+66d512342355fb77a4450decc89977efe7e55fa2,http://pdfs.semanticscholar.org/66d5/12342355fb77a4450decc89977efe7e55fa2.pdf
+5a34a9bb264a2594c02b5f46b038aa1ec3389072,http://www.mpi-inf.mpg.de/fileadmin/inf/d2/akata/TPAMI2487986.pdf
+0726a45eb129eed88915aa5a86df2af16a09bcc1,http://www.ri.cmu.edu/pub_files/2016/7/root-compressed.pdf
+530ce1097d0681a0f9d3ce877c5ba31617b1d709,https://pdfs.semanticscholar.org/530c/e1097d0681a0f9d3ce877c5ba31617b1d709.pdf
+bc2852fa0a002e683aad3fb0db5523d1190d0ca5,http://pdfs.semanticscholar.org/bc28/52fa0a002e683aad3fb0db5523d1190d0ca5.pdf
+e00d4e4ba25fff3583b180db078ef962bf7d6824,http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf
+4e6c17966efae956133bf8f22edeffc24a0470c1,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf
+ae4390873485c9432899977499c3bf17886fa149,http://pdfs.semanticscholar.org/ae43/90873485c9432899977499c3bf17886fa149.pdf
+1394ca71fc52db972366602a6643dc3e65ee8726,https://www.cl.cam.ac.uk/~tb346/pub/papers/icmi2016EmoReact.pdf
+968b983fa9967ff82e0798a5967920188a3590a8,http://pdfs.semanticscholar.org/968b/983fa9967ff82e0798a5967920188a3590a8.pdf
+6261eb75066f779e75b02209fbd3d0f02d3e1e45,http://pdfs.semanticscholar.org/6261/eb75066f779e75b02209fbd3d0f02d3e1e45.pdf
+e496d6be415038de1636bbe8202cac9c1cea9dbe,http://pdfs.semanticscholar.org/e496/d6be415038de1636bbe8202cac9c1cea9dbe.pdf
+174f46eccb5852c1f979d8c386e3805f7942bace,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Kae_The_Shape-Time_Random_2014_CVPR_paper.pdf
+9949ac42f39aeb7534b3478a21a31bc37fe2ffe3,http://pdfs.semanticscholar.org/9949/ac42f39aeb7534b3478a21a31bc37fe2ffe3.pdf
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,http://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf
+9103148dd87e6ff9fba28509f3b265e1873166c9,http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf
+86a8b3d0f753cb49ac3250fa14d277983e30a4b7,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W09/papers/Zhang_Exploiting_Unlabeled_Ages_2013_CVPR_paper.pdf
+a9be20954e9177d8b2bc39747acdea4f5496f394,http://acsweb.ucsd.edu/~yuw176/report/cvpr_2016.pdf
+4aabd6db4594212019c9af89b3e66f39f3108aac,http://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf
+174930cac7174257515a189cd3ecfdd80ee7dd54,https://arxiv.org/pdf/1502.02766v3.pdf
+a14db48785d41cd57d4eac75949a6b79fc684e70,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Barkan_Fast_High_Dimensional_2013_ICCV_paper.pdf
+25d514d26ecbc147becf4117512523412e1f060b,http://www.iab-rubric.org/papers/2015_ICB_CrowdVideoFaceDataset.pdf
+0be80da851a17dd33f1e6ffdd7d90a1dc7475b96,http://pdfs.semanticscholar.org/0be8/0da851a17dd33f1e6ffdd7d90a1dc7475b96.pdf
+3b092733f428b12f1f920638f868ed1e8663fe57,http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,http://pdfs.semanticscholar.org/e5c5/e5531aaa661c223088454572de11d2f266c3.pdf
+d50751da2997e7ebc89244c88a4d0d18405e8507,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553713.pdf
+f8f2d2910ce8b81cb4bbf84239f9229888158b34,http://pdfs.semanticscholar.org/f8f2/d2910ce8b81cb4bbf84239f9229888158b34.pdf
+488d3e32d046232680cc0ba80ce3879f92f35cac,http://pdfs.semanticscholar.org/488d/3e32d046232680cc0ba80ce3879f92f35cac.pdf
+fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9,http://pdfs.semanticscholar.org/fea0/a5ed1bc83dd1b545a5d75db2e37a69489ac9.pdf
+307a810d1bf6f747b1bd697a8a642afbd649613d,http://pdfs.semanticscholar.org/307a/810d1bf6f747b1bd697a8a642afbd649613d.pdf
+23172f9a397f13ae1ecb5793efd81b6aba9b4537,http://pdfs.semanticscholar.org/2317/2f9a397f13ae1ecb5793efd81b6aba9b4537.pdf
+57f7d8c6ec690bd436e70d7761bc5f46e993be4c,https://opus.lib.uts.edu.au/bitstream/10453/10785/3/2009001878_Du.pdf
+45215e330a4251801877070c85c81f42c2da60fb,http://pdfs.semanticscholar.org/4521/5e330a4251801877070c85c81f42c2da60fb.pdf
+b32631f456397462b3530757f3a73a2ccc362342,http://pdfs.semanticscholar.org/b326/31f456397462b3530757f3a73a2ccc362342.pdf
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,http://www.cs.fsu.edu/~liux/research/pub/papers/Wu-Two-Stage-CVIU-2008.pdf
+38787338ba659f0bfbeba11ec5b7748ffdbb1c3d,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2005/defevent/papers/cr1885.pdf
+41de109bca9343691f1d5720df864cdbeeecd9d0,http://pdfs.semanticscholar.org/41de/109bca9343691f1d5720df864cdbeeecd9d0.pdf
+e4bc529ced68fae154e125c72af5381b1185f34e,http://pdfs.semanticscholar.org/e4bc/529ced68fae154e125c72af5381b1185f34e.pdf
+3acb6b3e3f09f528c88d5dd765fee6131de931ea,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2017/novelRepresentation.pdf
+a93781e6db8c03668f277676d901905ef44ae49f,http://pdfs.semanticscholar.org/a937/81e6db8c03668f277676d901905ef44ae49f.pdf
+4c4236b62302957052f1bbfbd34dbf71ac1650ec,http://www.eurecom.fr/en/publication/3397/download/mm-publi-3397.pdf
+3d143cfab13ecd9c485f19d988242e7240660c86,http://pdfs.semanticscholar.org/3d14/3cfab13ecd9c485f19d988242e7240660c86.pdf
+b185f0a39384ceb3c4923196aeed6d68830a069f,http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf
+29f27448e8dd843e1c4d2a78e01caeaea3f46a2d,http://pdfs.semanticscholar.org/29f2/7448e8dd843e1c4d2a78e01caeaea3f46a2d.pdf
+6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4,http://arxiv.org/pdf/1411.7766v2.pdf
+3039627fa612c184228b0bed0a8c03c7f754748c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wu_Robust_Regression_on_2015_CVPR_paper.pdf
+11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8,http://elderlab.apps01.yorku.ca/wp-content/uploads/2016/12/PrincePAMI08.pdf
+df8da144a695269e159fb0120bf5355a558f4b02,http://pdfs.semanticscholar.org/df8d/a144a695269e159fb0120bf5355a558f4b02.pdf
+06fb92e110d077c27d401d2f9483964cd0615284,http://www.cs.sunysb.edu/~ial/content/papers/2009/wang_pami09.pdf
+1134a6be0f469ff2c8caab266bbdacf482f32179,http://pdfs.semanticscholar.org/1134/a6be0f469ff2c8caab266bbdacf482f32179.pdf
+6d4b5444c45880517213a2fdcdb6f17064b3fa91,http://pdfs.semanticscholar.org/6d4b/5444c45880517213a2fdcdb6f17064b3fa91.pdf
+9821669a989a3df9d598c1b4332d17ae8e35e294,http://pdfs.semanticscholar.org/9821/669a989a3df9d598c1b4332d17ae8e35e294.pdf
+1586871a1ddfe031b885b94efdbff647cf03eff1,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w18/papers/Ginosar_A_Century_of_ICCV_2015_paper.pdf
+205f3d654b7d28d00d15b034a8c5b2a8740bd8b6,https://www.researchgate.net/profile/Ya_Su4/publication/51686551_Discriminant_learning_through_multiple_principal_angles_for_visual_recognition/links/00b495253b0057832b000000.pdf
+02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7,http://members.e-inclusion.crim.ca/files/articles/CRV_2006.pdf
+4a5592ae1f5e9fa83d9fa17451c8ab49608421e4,http://sergioescalera.com/wp-content/uploads/2015/08/cha11g-lopezATS.pdf
+1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8,http://pdfs.semanticscholar.org/b1c8/4ab7cc0c85e8aa8be4c0ec32bad225c9c630.pdf
+d03265ea9200a993af857b473c6bf12a095ca178,http://pdfs.semanticscholar.org/d032/65ea9200a993af857b473c6bf12a095ca178.pdf
+a14ae81609d09fed217aa12a4df9466553db4859,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_TIP.pdf
+b249f10a30907a80f2a73582f696bc35ba4db9e2,http://pdfs.semanticscholar.org/f06d/6161eef9325285b32356e1c4b5527479eb9b.pdf
+db428d03e3dfd98624c23e0462817ad17ef14493,http://pdfs.semanticscholar.org/db42/8d03e3dfd98624c23e0462817ad17ef14493.pdf
+5ae970294aaba5e0225122552c019eb56f20af74,http://pdfs.semanticscholar.org/5ae9/70294aaba5e0225122552c019eb56f20af74.pdf
+90c2d4d9569866a0b930e91713ad1da01c2a6846,http://pdfs.semanticscholar.org/90c2/d4d9569866a0b930e91713ad1da01c2a6846.pdf
+26c884829897b3035702800937d4d15fef7010e4,http://pdfs.semanticscholar.org/9200/10cc55d2658e04b01783118b59b7d90420c6.pdf
+6a657995b02bc9dee130701138ea45183c18f4ae,http://pdfs.semanticscholar.org/6a65/7995b02bc9dee130701138ea45183c18f4ae.pdf
+3c8da376576938160cbed956ece838682fa50e9f,http://shodhganga.inflibnet.ac.in/bitstream/10603/49167/11/11_chapter%204.pdf
+8d71872d5877c575a52f71ad445c7e5124a4b174,http://pdfs.semanticscholar.org/8d71/872d5877c575a52f71ad445c7e5124a4b174.pdf
+33aff42530c2fd134553d397bf572c048db12c28,http://openaccess.thecvf.com/content_iccv_2015/papers/Ruiz_From_Emotions_to_ICCV_2015_paper.pdf
+89c51f73ec5ebd1c2a9000123deaf628acf3cdd8,http://pdfs.semanticscholar.org/89c5/1f73ec5ebd1c2a9000123deaf628acf3cdd8.pdf
+0172867f4c712b33168d9da79c6d3859b198ed4c,http://www.cin.ufpe.br/~rps/Artigos/Expression%20and%20Illumination%20Invariant%20Preprocessing%20Technique%20for%20Face%20Recognition.pdf
+5c435c4bc9c9667f968f891e207d241c3e45757a,http://pdfs.semanticscholar.org/eb6a/13c8a607dfc535e5f31b7c8843335674644c.pdf
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,http://pdfs.semanticscholar.org/28f5/138d63e4acafca49a94ae1dc44f7e9d84827.pdf
+6821113166b030d2123c3cd793dd63d2c909a110,http://pdfs.semanticscholar.org/6821/113166b030d2123c3cd793dd63d2c909a110.pdf
+e379e73e11868abb1728c3acdc77e2c51673eb0d,http://pdfs.semanticscholar.org/e379/e73e11868abb1728c3acdc77e2c51673eb0d.pdf
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,http://pdfs.semanticscholar.org/f3ec/7e58da49f39b807ff1c98d0bf574ef5f0720.pdf
+68cf263a17862e4dd3547f7ecc863b2dc53320d8,http://pdfs.semanticscholar.org/68cf/263a17862e4dd3547f7ecc863b2dc53320d8.pdf
+a8748a79e8d37e395354ba7a8b3038468cb37e1f,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w9/papers/Reale_Seeing_the_Forest_CVPR_2016_paper.pdf
+7d73adcee255469aadc5e926066f71c93f51a1a5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001283.pdf
+5ca14fa73da37855bfa880b549483ee2aba26669,http://pdfs.semanticscholar.org/5ca1/4fa73da37855bfa880b549483ee2aba26669.pdf
+54f442c7fa4603f1814ebd8eba912a00dceb5cb2,http://pdfs.semanticscholar.org/54f4/42c7fa4603f1814ebd8eba912a00dceb5cb2.pdf
+01379c50c392c104694ccb871a4b6a36d514f102,http://sse.tongji.edu.cn/hyli/Publications/icmla2010.pdf
+56c0b225fd57cfe173e5206a4bb0ce153bfecc29,http://www.sfu.ca/~wya16/ProfileFG08.pdf
+3504907a2e3c81d78e9dfe71c93ac145b1318f9c,https://arxiv.org/pdf/1605.02686v3.pdf
+80277fb3a8a981933533cf478245f262652a33b5,http://pdfs.semanticscholar.org/8027/7fb3a8a981933533cf478245f262652a33b5.pdf
+056294ff40584cdce81702b948f88cebd731a93e,https://arxiv.org/pdf/1506.08438v3.pdf
+353a89c277cca3e3e4e8c6a199ae3442cdad59b5,http://pdfs.semanticscholar.org/353a/89c277cca3e3e4e8c6a199ae3442cdad59b5.pdf
+66e6f08873325d37e0ec20a4769ce881e04e964e,http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf
+1c4ceae745fe812d8251fda7aad03210448ae25e,http://pdfs.semanticscholar.org/98d3/6d12cf6f2da181a9c1fb9d652ceaa57eb7bb.pdf
+834b15762f97b4da11a2d851840123dbeee51d33,http://pdfs.semanticscholar.org/834b/15762f97b4da11a2d851840123dbeee51d33.pdf
+927ad0dceacce2bb482b96f42f2fe2ad1873f37a,http://pdfs.semanticscholar.org/927a/d0dceacce2bb482b96f42f2fe2ad1873f37a.pdf
+bd8f3fef958ebed5576792078f84c43999b1b207,http://pdfs.semanticscholar.org/bd8f/3fef958ebed5576792078f84c43999b1b207.pdf
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,http://pdfs.semanticscholar.org/5bde/1718253ec28a753a892b0ba82d8e553b6bf3.pdf
+214db8a5872f7be48cdb8876e0233efecdcb6061,http://users.eecs.northwestern.edu/~mya671/mypapers/ICCV13_Zhang_Yang_Wang_Lin_Tian.pdf
+62e913431bcef5983955e9ca160b91bb19d9de42,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf
+9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d,http://pdfs.semanticscholar.org/9cbb/6e42a35f26cf1d19f4875cd7f6953f10b95d.pdf
+3327e21b46434f6441018922ef31bddba6cc8176,http://www.metaio.com/fileadmin/upload/research_files/paper/ISMAR2014_Real-Time_Illumination_Estimation_from_Faces_for_Coherent_Rendering_paper.pdf
+0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7,http://pdfs.semanticscholar.org/0b6a/5200c33434cbfa9bf24ba482f6e06bf5fff7.pdf
+28312c3a47c1be3a67365700744d3d6665b86f22,http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf
+98b2f21db344b8b9f7747feaf86f92558595990c,http://pdfs.semanticscholar.org/b9f0/29075a36f15202f0d213fe222dcf237fe65f.pdf
+cd436f05fb4aeeda5d1085f2fe0384526571a46e,http://pdfs.semanticscholar.org/cd43/6f05fb4aeeda5d1085f2fe0384526571a46e.pdf
+9a276c72acdb83660557489114a494b86a39f6ff,http://pdfs.semanticscholar.org/9a27/6c72acdb83660557489114a494b86a39f6ff.pdf
+47dabb566f2bdd6b3e4fa7efc941824d8b923a13,http://pdfs.semanticscholar.org/47da/bb566f2bdd6b3e4fa7efc941824d8b923a13.pdf
+d6cf3cab269877c58a16be011b74e07838d957c2,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0162.pdf
+2ff9618ea521df3c916abc88e7c85220d9f0ff06,http://pdfs.semanticscholar.org/bb08/f64565ee68e868dcab904cada9646dd5f676.pdf
+c178a86f4c120eca3850a4915134fff44cbccb48,http://pdfs.semanticscholar.org/c178/a86f4c120eca3850a4915134fff44cbccb48.pdf
+2d4b9fe3854ccce24040074c461d0c516c46baf4,https://arxiv.org/pdf/1704.04671v1.pdf
+c6096986b4d6c374ab2d20031e026b581e7bf7e9,http://pdfs.semanticscholar.org/c609/6986b4d6c374ab2d20031e026b581e7bf7e9.pdf
+237fa91c8e8098a0d44f32ce259ff0487aec02cf,http://ira.lib.polyu.edu.hk/bitstream/10397/241/1/SMCB_C_36_4_06_B.pdf
+966e36f15b05ef8436afecf57a97b73d6dcada94,http://pdfs.semanticscholar.org/966e/36f15b05ef8436afecf57a97b73d6dcada94.pdf
+1ef4815f41fa3a9217a8a8af12cc385f6ed137e1,https://www.d2.mpi-inf.mpg.de/sites/default/files/wood2015_iccv.pdf
+a2eb90e334575d9b435c01de4f4bf42d2464effc,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu04b.pdf
+c10b0a6ba98aa95d740a0d60e150ffd77c7895ad,http://pdfs.semanticscholar.org/c10b/0a6ba98aa95d740a0d60e150ffd77c7895ad.pdf
+d448d67c6371f9abf533ea0f894ef2f022b12503,http://pdfs.semanticscholar.org/d448/d67c6371f9abf533ea0f894ef2f022b12503.pdf
+68003e92a41d12647806d477dd7d20e4dcde1354,http://pdfs.semanticscholar.org/db86/41ed047da4a90d53414edfe126c845141d69.pdf
+fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46,http://pdfs.semanticscholar.org/fcf8/bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46.pdf
+3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0,http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf
+09f58353e48780c707cf24a0074e4d353da18934,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestrowdenBishtKlontzJain_CrowdsourcingHumanPeformance_IJCB2014.pdf
+4c523db33c56759255b2c58c024eb6112542014e,http://www0.cs.ucl.ac.uk/staff/P.Li/publication/ICCV09JaniaAghajanian.pdf
+a546fd229f99d7fe3cf634234e04bae920a2ec33,http://pdfs.semanticscholar.org/a546/fd229f99d7fe3cf634234e04bae920a2ec33.pdf
+75da1df4ed319926c544eefe17ec8d720feef8c0,http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf
+29631ca6cff21c9199c70bcdbbcd5f812d331a96,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf
+635158d2da146e9de559d2742a2fa234e06b52db,http://www.openu.ac.il/home/hassner/projects/cnn_emotions/LeviHassnerICMI15.pdf
+7a65fc9e78eff3ab6062707deaadde024d2fad40,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf
+062d67af7677db086ef35186dc936b4511f155d7,http://openaccess.thecvf.com/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf
+434d6726229c0f556841fad20391c18316806f73,https://arxiv.org/pdf/1704.03114v2.pdf
+06fe63b34fcc8ff68b72b5835c4245d3f9b8a016,http://chechiklab.biu.ac.il/~gal/Papers/Mesnil_MachineLearning2013_objects_and_their_parts.pdf
+8adb2fcab20dab5232099becbd640e9c4b6a905a,http://pdfs.semanticscholar.org/d0d1/50a51c46cfb3bdd9d5fb570018c6534b57ff.pdf
+05a312478618418a2efb0a014b45acf3663562d7,http://people.ee.duke.edu/~lcarin/AccelGibbs.pdf
+cd4c047f4d4df7937aff8fc76f4bae7718004f40,http://pdfs.semanticscholar.org/cd4c/047f4d4df7937aff8fc76f4bae7718004f40.pdf
+43fb9efa79178cb6f481387b7c6e9b0ca3761da8,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Katti_Mixture_of_Parts_2015_CVPR_paper.pdf
+09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081,http://acberg.com/papers/street2shop.pdf
+df054fa8ee6bb7d2a50909939d90ef417c73604c,http://pdfs.semanticscholar.org/df05/4fa8ee6bb7d2a50909939d90ef417c73604c.pdf
+3cc3cf57326eceb5f20a02aefae17108e8c8ab57,http://pdfs.semanticscholar.org/3cc3/cf57326eceb5f20a02aefae17108e8c8ab57.pdf
+3107316f243233d45e3c7e5972517d1ed4991f91,https://arxiv.org/pdf/1703.10155v1.pdf
+bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb,http://pdfs.semanticscholar.org/bb6b/f94bffc37ef2970410e74a6b6dc44a7f4feb.pdf
+ae85c822c6aec8b0f67762c625a73a5d08f5060d,http://tamaraberg.com/papers/yamaguchi2014retrieving.pdf
+15252b7af081761bb00535aac6bd1987391f9b79,http://cvsp.cs.ntua.gr/publications/confr/KoutrasMaragos_EyeGaze_ICIP15.pdf
+951f21a5671a4cd14b1ef1728dfe305bda72366f,http://pdfs.semanticscholar.org/951f/21a5671a4cd14b1ef1728dfe305bda72366f.pdf
+bb557f4af797cae9205d5c159f1e2fdfe2d8b096,http://pdfs.semanticscholar.org/bb55/7f4af797cae9205d5c159f1e2fdfe2d8b096.pdf
+c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f,http://pdfs.semanticscholar.org/c42a/8969cd76e9f54d43f7f4dd8f9b08da566c5f.pdf
+3f957142ef66f2921e7c8c7eadc8e548dccc1327,https://ibug.doc.ic.ac.uk/media/uploads/documents/combined_model_lda_&_svms.pdf
+293ade202109c7f23637589a637bdaed06dc37c9,http://pdfs.semanticscholar.org/293a/de202109c7f23637589a637bdaed06dc37c9.pdf
+64153df77fe137b7c6f820a58f0bdb4b3b1a879b,http://pdfs.semanticscholar.org/6415/3df77fe137b7c6f820a58f0bdb4b3b1a879b.pdf
+486a82f50835ea888fbc5c6babf3cf8e8b9807bc,http://pdfs.semanticscholar.org/486a/82f50835ea888fbc5c6babf3cf8e8b9807bc.pdf
+0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b,http://www.cs.tut.fi/~iosifidi/files/conference/2016_EUSIPCO_GRMCSVM.pdf?dl=0
+280bc9751593897091015aaf2cab39805768b463,http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf
+919d0e681c4ef687bf0b89fe7c0615221e9a1d30,http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf
+f6c70635241968a6d5fd5e03cde6907022091d64,http://pdfs.semanticscholar.org/f6c7/0635241968a6d5fd5e03cde6907022091d64.pdf
+016800413ebd1a87730a5cf828e197f43a08f4b3,http://arxiv.org/pdf/1605.00743v1.pdf
+42350e28d11e33641775bef4c7b41a2c3437e4fd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IP07_face02.pdf
+8f8c0243816f16a21dea1c20b5c81bc223088594,http://pdfs.semanticscholar.org/8f8c/0243816f16a21dea1c20b5c81bc223088594.pdf
+1dff919e51c262c22630955972968f38ba385d8a,http://pdfs.semanticscholar.org/1dff/919e51c262c22630955972968f38ba385d8a.pdf
+68a3f12382003bc714c51c85fb6d0557dcb15467,http://research.microsoft.com/pubs/217884/ZitnickSent2SceneICCV13.pdf
+84bc3ca61fc63b47ec3a1a6566ab8dcefb3d0015,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS%20144.pdf
+08f6ad0a3e75b715852f825d12b6f28883f5ca05,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf
+0c36c988acc9ec239953ff1b3931799af388ef70,http://pdfs.semanticscholar.org/0c36/c988acc9ec239953ff1b3931799af388ef70.pdf
+ce85d953086294d989c09ae5c41af795d098d5b2,http://mmlab.ie.cuhk.edu.hk/archive/2007/NN07_feature.pdf
+36df81e82ea5c1e5edac40b60b374979a43668a5,http://www.robots.ox.ac.uk/~vgg/publications/2012/Parkhi12b/parkhi12b.pdf
+aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9,http://pdfs.semanticscholar.org/aca7/5c032cfb0b2eb4c0ae56f3d060d8875e43f9.pdf
+00a967cb2d18e1394226ad37930524a31351f6cf,https://arxiv.org/pdf/1611.05377v1.pdf
+48cfc5789c246c6ad88ff841701204fc9d6577ed,http://pdfs.semanticscholar.org/48cf/c5789c246c6ad88ff841701204fc9d6577ed.pdf
+37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e,http://www.cse.iitm.ac.in/~amittal/wacv2015_review.pdf
+c207fd762728f3da4cddcfcf8bf19669809ab284,http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf
+4bd3de97b256b96556d19a5db71dda519934fd53,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wen_Latent_Factor_Guided_CVPR_2016_paper.pdf
+f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464,http://pdfs.semanticscholar.org/f8a5/bc2bd26790d474a1f6cc246b2ba0bcde9464.pdf
+202d8d93b7b747cdbd6e24e5a919640f8d16298a,http://pdfs.semanticscholar.org/202d/8d93b7b747cdbd6e24e5a919640f8d16298a.pdf
+570308801ff9614191cfbfd7da88d41fb441b423,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chu_Unsupervised_Synchrony_Discovery_ICCV_2015_paper.pdf
+c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774,http://pdfs.semanticscholar.org/c0ca/6b992cbe46ea3003f4e9b48f4ef57e5fb774.pdf
+4e7ed13e541b8ed868480375785005d33530e06d,http://arxiv.org/pdf/1603.07388v1.pdf
+a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc,http://pdfs.semanticscholar.org/a649/6553fb9ab9ca5d69eb45af1bdf0b60ed86dc.pdf
+b747fcad32484dfbe29530a15776d0df5688a7db,http://pdfs.semanticscholar.org/b747/fcad32484dfbe29530a15776d0df5688a7db.pdf
+5b7cb9b97c425b52b2e6f41ba8028836029c4432,http://www.cis.pku.edu.cn/faculty/vision/zlin/Publications/2014-CVPR-SMR.pdf
+1b5875dbebc76fec87e72cee7a5263d325a77376,http://arxiv.org/pdf/1603.00560v2.pdf
+d41c11ebcb06c82b7055e2964914b9af417abfb2,http://pdfs.semanticscholar.org/d41c/11ebcb06c82b7055e2964914b9af417abfb2.pdf
+697b0b9630213ca08a1ae1d459fabc13325bdcbb,http://pdfs.semanticscholar.org/697b/0b9630213ca08a1ae1d459fabc13325bdcbb.pdf
+4fc7a540efb24bea338f82c8bdc64c214744a3de,http://www.researchgate.net/profile/Touradj_Ebrahimi/publication/41083907_Object-based_Tag_Propagation_for_Semi-automatic_Annotation_of_Images/links/02e7e515b3de45cd50000000.pdf
+b9f2a755940353549e55690437eb7e13ea226bbf,http://pdfs.semanticscholar.org/b9f2/a755940353549e55690437eb7e13ea226bbf.pdf
+ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b,http://pdfs.semanticscholar.org/ccf4/3c62e4bf76b6a48ff588ef7ed51e87ddf50b.pdf
+b2c25af8a8e191c000f6a55d5f85cf60794c2709,http://pdfs.semanticscholar.org/b2c2/5af8a8e191c000f6a55d5f85cf60794c2709.pdf
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,http://pdfs.semanticscholar.org/5bc0/a89f4f73523967050374ed34d7bc89e4d9e1.pdf
+6d4e3616d0b27957c4107ae877dc0dd4504b69ab,http://pdfs.semanticscholar.org/6d4e/3616d0b27957c4107ae877dc0dd4504b69ab.pdf
+6eaf446dec00536858548fe7cc66025b70ce20eb,http://pdfs.semanticscholar.org/6eaf/446dec00536858548fe7cc66025b70ce20eb.pdf
+2f7fc778e3dec2300b4081ba2a1e52f669094fcd,http://pdfs.semanticscholar.org/2f7f/c778e3dec2300b4081ba2a1e52f669094fcd.pdf
+c0d5c3aab87d6e8dd3241db1d931470c15b9e39d,http://pdfs.semanticscholar.org/facb/edfe90956c720f70aab14767b5e25dcc6478.pdf
+47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa,http://pdfs.semanticscholar.org/d948/50abdd272a402cd2f00e5b85311d87c75b16.pdf
+65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf
+21ec41a6ee3c655cf54c6db659d56480fc76e742,http://www.liacs.nl/home/mlew/ivc2007.emotion.pdf
+559795d3f3b096ceddc03720ba62d79d50eae300,http://www3.nd.edu/~kwb/BarrBowyerFlynnTIFS_2014.pdf
+2b64a8c1f584389b611198d47a750f5d74234426,http://pdfs.semanticscholar.org/fb11/6f00320a37d80ec32561d1ab9b795c943202.pdf
+3ce2ecf3d6ace8d80303daf67345be6ec33b3a93,http://pdfs.semanticscholar.org/3ce2/ecf3d6ace8d80303daf67345be6ec33b3a93.pdf
+14c0f9dc9373bea1e27b11fa0594c86c9e632c8d,http://openaccess.thecvf.com/content_iccv_2015/papers/Dang_Adaptive_Exponential_Smoothing_ICCV_2015_paper.pdf
+3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e,http://pdfs.semanticscholar.org/5141/cf2e59fb2ec9bb489b9c1832447d3cd93110.pdf
+d82b93f848d5442f82154a6011d26df8a9cd00e7,http://pdfs.semanticscholar.org/d82b/93f848d5442f82154a6011d26df8a9cd00e7.pdf
+c3b3636080b9931ac802e2dd28b7b684d6cf4f8b,http://pdfs.semanticscholar.org/c3b3/636080b9931ac802e2dd28b7b684d6cf4f8b.pdf
+0b642f6d48a51df64502462372a38c50df2051b1,https://infoscience.epfl.ch/record/231128/files/Le_ICMI_2017.pdf
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,http://research.cs.rutgers.edu/~linzhong/PDF/Lin_cvpr2012.pdf
+24d376e4d580fb28fd66bc5e7681f1a8db3b6b78,http://pdfs.semanticscholar.org/24d3/76e4d580fb28fd66bc5e7681f1a8db3b6b78.pdf
+53c8cbc4a3a3752a74f79b74370ed8aeed97db85,http://pdfs.semanticscholar.org/53c8/cbc4a3a3752a74f79b74370ed8aeed97db85.pdf
+27a299b834a18e45d73e0bf784bbb5b304c197b3,http://ai.stanford.edu/~vigneshr/cvpr_13/cvpr13_social_roles.pdf
+a1b1442198f29072e907ed8cb02a064493737158,http://affect.media.mit.edu/pdfs/12.McDuff-etal-Crowdsourcing-TAC.pdf
+1c65f3b3c70e1ea89114f955624d7adab620a013,http://pdfs.semanticscholar.org/ef34/cc2a26e88abd6a03d1a831c750440c6147d2.pdf
+8160b3b5f07deaa104769a2abb7017e9c031f1c1,http://www.aiia.csd.auth.gr/EN/cor_baayen/Exploiting_Discriminant_Information_in_NMF_for_FFV.pdf
+f0f501e1e8726148d18e70c8e9f6feea9360d119,http://pdfs.semanticscholar.org/f0f5/01e1e8726148d18e70c8e9f6feea9360d119.pdf
+81dd68de9d88c49db1ae509dbc66c7a82809c026,http://atvs.ii.uam.es/files/2004_SPM_Biometrics_Ortega.pdf
+f43eeb578e0ca48abfd43397bbd15825f94302e4,http://pdfs.semanticscholar.org/f43e/eb578e0ca48abfd43397bbd15825f94302e4.pdf
+c398684270543e97e3194674d9cce20acaef3db3,http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf
+f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe,http://pdfs.semanticscholar.org/f301/5be0f9dbc1a55b6f3dc388d97bb566ff94fe.pdf
+1329206dbdb0a2b9e23102e1340c17bd2b2adcf5,http://pdfs.semanticscholar.org/a2f4/06c8babac96b2108c530974c4d3132106d42.pdf
+36b40c75a3e53c633c4afb5a9309d10e12c292c7,https://pdfs.semanticscholar.org/36b4/0c75a3e53c633c4afb5a9309d10e12c292c7.pdf
+02e43d9ca736802d72824892c864e8cfde13718e,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/10075/shi%20Transferring%20a%20semantic%20representation%202015%20Accepted.pdf?sequence=1
+1c1a98df3d0d5e2034ea723994bdc85af45934db,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Jaiswal_Guided_Unsupervised_Learning_2013_ICCV_paper.pdf
+0d538084f664b4b7c0e11899d08da31aead87c32,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Deformable_Part_Descriptors_2013_ICCV_paper.pdf
+3a2fc58222870d8bed62442c00341e8c0a39ec87,http://pdfs.semanticscholar.org/3a2f/c58222870d8bed62442c00341e8c0a39ec87.pdf
+438e7999c937b94f0f6384dbeaa3febff6d283b6,https://arxiv.org/pdf/1705.02402v2.pdf
+c40c23e4afc81c8b119ea361e5582aa3adecb157,http://pdfs.semanticscholar.org/c40c/23e4afc81c8b119ea361e5582aa3adecb157.pdf
+03b03f5a301b2ff88ab3bb4969f54fd9a35c7271,http://pdfs.semanticscholar.org/03b0/3f5a301b2ff88ab3bb4969f54fd9a35c7271.pdf
+da15344a4c10b91d6ee2e9356a48cb3a0eac6a97,http://pdfs.semanticscholar.org/da15/344a4c10b91d6ee2e9356a48cb3a0eac6a97.pdf
+181045164df86c72923906aed93d7f2f987bce6c,http://pdfs.semanticscholar.org/1810/45164df86c72923906aed93d7f2f987bce6c.pdf
+1e58d7e5277288176456c66f6b1433c41ca77415,http://pdfs.semanticscholar.org/1e58/d7e5277288176456c66f6b1433c41ca77415.pdf
+034addac4637121e953511301ef3a3226a9e75fd,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Parikh_Implied_Feedback_Learning_2013_ICCV_paper.pdf
+a2d9c9ed29bbc2619d5e03320e48b45c15155195,http://pdfs.semanticscholar.org/a2d9/c9ed29bbc2619d5e03320e48b45c15155195.pdf
+459960be65dd04317dd325af5b7cbb883d822ee4,http://pdfs.semanticscholar.org/876c/c40c6c470f39fbda48dd394d0a9d5f6b147d.pdf
+45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8,http://www.doc.ic.ac.uk/~maja/VukadinovicPantic-SMC05-FINAL.pdf
+d6102a7ddb19a185019fd2112d2f29d9258f6dec,http://pdfs.semanticscholar.org/d610/2a7ddb19a185019fd2112d2f29d9258f6dec.pdf
+adaf2b138094981edd615dbfc4b7787693dbc396,http://pdfs.semanticscholar.org/adaf/2b138094981edd615dbfc4b7787693dbc396.pdf
+0f829fee12e86f980a581480a9e0cefccb59e2c5,http://www.cs.columbia.edu/~liujx09/posters/birdpart_poster.pdf
+14b69626b64106bff20e17cf8681790254d1e81c,http://pdfs.semanticscholar.org/14b6/9626b64106bff20e17cf8681790254d1e81c.pdf
+0a64f4fec592662316764283575d05913eb2135b,http://pdfs.semanticscholar.org/0a64/f4fec592662316764283575d05913eb2135b.pdf
+26d407b911d1234e8e3601e586b49316f0818c95,https://arxiv.org/pdf/1709.00965v1.pdf
+2eb9f1dbea71bdc57821dedbb587ff04f3a25f07,http://pdfs.semanticscholar.org/2eb9/f1dbea71bdc57821dedbb587ff04f3a25f07.pdf
+8d1adf0ac74e901a94f05eca2f684528129a630a,http://www.denniscodd.com/dotnet-ieee/Facial%20Expression%20Recognition%20Using%20Facial.pdf
+346166da1a49e531923294300a731167e1436d5b,http://lear.inrialpes.fr/people/mpederso/papers/3DV14.pdf
+cbe859d151466315a050a6925d54a8d3dbad591f,http://homes.di.unimi.it/~boccignone/GiuseppeBoccignone_webpage/Stochastic_files/Euvip2010.pdf
+ac2e44622efbbab525d4301c83cb4d5d7f6f0e55,http://openaccess.thecvf.com/content_cvpr_2016/papers/Booth_A_3D_Morphable_CVPR_2016_paper.pdf
+503db524b9a99220d430e741c44cd9c91ce1ddf8,http://pdfs.semanticscholar.org/503d/b524b9a99220d430e741c44cd9c91ce1ddf8.pdf
+c696c9bbe27434cb6279223a79b17535cd6e88c8,http://pdfs.semanticscholar.org/c696/c9bbe27434cb6279223a79b17535cd6e88c8.pdf
+1f745215cda3a9f00a65166bd744e4ec35644b02,http://www.eurecom.fr/en/publication/4044/download/mm-publi-4044.pdf
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Li_Shape_Driven_Kernel_2015_CVPR_paper.pdf
+738c187d55745aac18d5fb5f6cc9e3568cd2d217,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICMR130-2015.pdf
+0178929595f505ef7655272cc2c339d7ed0b9507,http://pdfs.semanticscholar.org/7d84/151beccef17f71b3eeaca59ebc690561ab73.pdf
+6ed738ff03fd9042965abdfaa3ed8322de15c116,https://dr.ntu.edu.sg/bitstream/handle/10220/39690/kmeap_icdm2014.pdf?isAllowed=y&sequence=1
+56a653fea5c2a7e45246613049fb16b1d204fc96,http://ieeeprojectsmadurai.com/matlab2016base/Quaternion%20Collaborative%20and%20Sparse%20Representation.pdf
+184750382fe9b722e78d22a543e852a6290b3f70,http://pdfs.semanticscholar.org/1847/50382fe9b722e78d22a543e852a6290b3f70.pdf
+297d3df0cf84d24f7efea44f87c090c7d9be4bed,http://pdfs.semanticscholar.org/297d/3df0cf84d24f7efea44f87c090c7d9be4bed.pdf
+40dab43abef32deaf875c2652133ea1e2c089223,http://pdfs.semanticscholar.org/40da/b43abef32deaf875c2652133ea1e2c089223.pdf
+113e5678ed8c0af2b100245057976baf82fcb907,http://www.humansensing.cs.cmu.edu/sites/default/files/4Jeni_Metrics.pdf
+2d35a07c4fa03d78d5b622ab703ea44850de8d39,http://www.cs.sunysb.edu/~vislab/papers/Zhang2005cgi.pdf
+2fda461869f84a9298a0e93ef280f79b9fb76f94,https://www.cl.cam.ac.uk/research/rainbow/projects/openface/wacv2016.pdf
+306957285fea4ce11a14641c3497d01b46095989,http://pdfs.semanticscholar.org/3069/57285fea4ce11a14641c3497d01b46095989.pdf
+a3017bb14a507abcf8446b56243cfddd6cdb542b,http://pdfs.semanticscholar.org/a301/7bb14a507abcf8446b56243cfddd6cdb542b.pdf
+00e3957212517a252258baef833833921dd308d4,http://www.yugangjiang.info/publication/17MM-PersonAttribute.pdf
+46a4551a6d53a3cd10474ef3945f546f45ef76ee,http://cvrr.ucsd.edu/publications/2014/TawariTrivedi_IV2014.pdf
+833f6ab858f26b848f0d747de502127406f06417,http://mediatum.ub.tum.de/doc/980054/157447.pdf
+449808b7aa9ee6b13ad1a21d9f058efaa400639a,http://www.jdl.ac.cn/doc/2008/Recovering%203D%20Facial%20Shape%20via%20Coupled%202D-3D%20Space%20Learning.pdf
+b5857b5bd6cb72508a166304f909ddc94afe53e3,http://pdfs.semanticscholar.org/b585/7b5bd6cb72508a166304f909ddc94afe53e3.pdf
+a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4,http://pdfs.semanticscholar.org/a0f1/93c86e3dd7e0020c0de3ec1e24eaff343ce4.pdf
+3270b2672077cc345f188500902eaf7809799466,http://pdfs.semanticscholar.org/3270/b2672077cc345f188500902eaf7809799466.pdf
+56ae6d94fc6097ec4ca861f0daa87941d1c10b70,http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf
+a7e1327bd76945a315f2869bfae1ce55bb94d165,http://pdfs.semanticscholar.org/a7e1/327bd76945a315f2869bfae1ce55bb94d165.pdf
+391b86cf16c2702dcc4beee55a6dd6d3bd7cf27b,http://dayongwang.info/pdf/2014-MM.pdf
+46f3b113838e4680caa5fc8bda6e9ae0d35a038c,http://pdfs.semanticscholar.org/46f3/b113838e4680caa5fc8bda6e9ae0d35a038c.pdf
+5fac62a3de11125fc363877ba347122529b5aa50,http://openaccess.thecvf.com/content_ICCV_2017/papers/Saha_AMTnet_Action-Micro-Tube_Regression_ICCV_2017_paper.pdf
+9b318098f3660b453fbdb7a579778ab5e9118c4c,http://humansensing.cs.cmu.edu/sites/default/files/07471506.pdf
+0e652a99761d2664f28f8931fee5b1d6b78c2a82,http://pdfs.semanticscholar.org/0e65/2a99761d2664f28f8931fee5b1d6b78c2a82.pdf
+69de532d93ad8099f4d4902c4cad28db958adfea,http://pdfs.semanticscholar.org/e6bc/c30d2be78797e0e2506567bc0f09b8eae21a.pdf
+624e9d9d3d941bab6aaccdd93432fc45cac28d4b,https://arxiv.org/pdf/1505.00296v1.pdf
+9fa1be81d31fba07a1bde0275b9d35c528f4d0b8,http://pdfs.semanticscholar.org/9fa1/be81d31fba07a1bde0275b9d35c528f4d0b8.pdf
+8ed051be31309a71b75e584bc812b71a0344a019,http://www.vision.caltech.edu/~bart/Publications/2007/BartUllmanMBE.pdf
+c10a15e52c85654db9c9343ae1dd892a2ac4a279,http://www.cs.utexas.edu/~grauman/papers/ijcv-sungju.pdf
+62694828c716af44c300f9ec0c3236e98770d7cf,http://pdfs.semanticscholar.org/6269/4828c716af44c300f9ec0c3236e98770d7cf.pdf
+2525f336af31178b836e27f8c60056e18f1455d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/TEMPORALLY%20ENHANCED%20IMAGE%20OBJECT%20PROPOSALS%20FOR%20VIDEOS.pdf
+66029f1be1a5cee9a4e3e24ed8fcb65d5d293720,http://pdfs.semanticscholar.org/6602/9f1be1a5cee9a4e3e24ed8fcb65d5d293720.pdf
+b1df214e0f1c5065f53054195cd15012e660490a,http://pdfs.semanticscholar.org/b1df/214e0f1c5065f53054195cd15012e660490a.pdf
+1d79ec93a9feba817c75c31604c3f8df346eabe8,https://www.researchgate.net/profile/Manjunath_Aradhya/publication/254461422_The_study_of_different_similarity_measure_techniques_in_recognition_of_handwritten_characters/links/0046352049dae0d044000000.pdf
+20767ca3b932cbc7b8112db21980d7b9b3ea43a3,http://pdfs.semanticscholar.org/2076/7ca3b932cbc7b8112db21980d7b9b3ea43a3.pdf
+c62c910264658709e9bf0e769e011e7944c45c90,http://pdfs.semanticscholar.org/c62c/910264658709e9bf0e769e011e7944c45c90.pdf
+adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be,http://pdfs.semanticscholar.org/adf7/ccb81b8515a2d05fd3b4c7ce5adf5377d9be.pdf
+edef98d2b021464576d8d28690d29f5431fd5828,http://pdfs.semanticscholar.org/edef/98d2b021464576d8d28690d29f5431fd5828.pdf
+1462bc73834e070201acd6e3eaddd23ce3c1a114,http://pdfs.semanticscholar.org/1462/bc73834e070201acd6e3eaddd23ce3c1a114.pdf
+a83fc450c124b7e640adc762e95e3bb6b423b310,http://pdfs.semanticscholar.org/b908/edadad58c604a1e4b431f69ac8ded350589a.pdf
+b54c477885d53a27039c81f028e710ca54c83f11,http://coewww.rutgers.edu/riul/research/papers/pdf/skmspami.pdf
+84e6669b47670f9f4f49c0085311dce0e178b685,http://pdfs.semanticscholar.org/84e6/669b47670f9f4f49c0085311dce0e178b685.pdf
+1b150248d856f95da8316da868532a4286b9d58e,http://pdfs.semanticscholar.org/6724/41000751d58396790f4c993419d70f6af3f4.pdf
+b18858ad6ec88d8b443dffd3e944e653178bc28b,http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf
+497bf2df484906e5430aa3045cf04a40c9225f94,http://pdfs.semanticscholar.org/497b/f2df484906e5430aa3045cf04a40c9225f94.pdf
+0077cd8f97cafd2b389783858a6e4ab7887b0b6b,http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf
+2fdce3228d384456ea9faff108b9c6d0cf39e7c7,http://pdfs.semanticscholar.org/2fdc/e3228d384456ea9faff108b9c6d0cf39e7c7.pdf
+21d9d0deed16f0ad62a4865e9acf0686f4f15492,http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf
+a88640045d13fc0207ac816b0bb532e42bcccf36,http://pdfs.semanticscholar.org/a886/40045d13fc0207ac816b0bb532e42bcccf36.pdf
+1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf
+051f03bc25ec633592aa2ff5db1d416b705eac6c,http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf
+d6bfa9026a563ca109d088bdb0252ccf33b76bc6,http://pdfs.semanticscholar.org/d6bf/a9026a563ca109d088bdb0252ccf33b76bc6.pdf
+1ca8c09abb73a02519d8db77e4fe107acfc589b6,http://sci.pitt.edu/wp-content/uploads/2018/03/111_Zhang.pdf
+117f164f416ea68e8b88a3005e55a39dbdf32ce4,http://www.cs.toronto.edu/~fidler/papers/fashionCVPR15.pdf
+28d7029cfb73bcb4ad1997f3779c183972a406b4,https://arxiv.org/pdf/1705.00322v1.pdf
+0359f7357ea8191206b9da45298902de9f054c92,http://arxiv.org/pdf/1511.04110v1.pdf
+cc8bf03b3f5800ac23e1a833447c421440d92197,https://pdfs.semanticscholar.org/cc8b/f03b3f5800ac23e1a833447c421440d92197.pdf
+74b0095944c6e29837c208307a67116ebe1231c8,http://web.eecs.umich.edu/~hero/Preprints/EuclideanK-Nearest.pdf
+7aafeb9aab48fb2c34bed4b86755ac71e3f00338,http://pdfs.semanticscholar.org/7aaf/eb9aab48fb2c34bed4b86755ac71e3f00338.pdf
+16f940b4b5da79072d64a77692a876627092d39c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/10/10.pdf
+06262d14323f9e499b7c6e2a3dec76ad9877ba04,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Juranek_Real-Time_Pose_Estimation_ICCV_2015_paper.pdf
+82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d,http://pdfs.semanticscholar.org/82d7/81b7b6b7c8c992e0cb13f7ec3989c8eafb3d.pdf
+9d42df42132c3d76e3447ea61e900d3a6271f5fe,http://pdfs.semanticscholar.org/9d42/df42132c3d76e3447ea61e900d3a6271f5fe.pdf
+ab1dfcd96654af0bf6e805ffa2de0f55a73c025d,http://pdfs.semanticscholar.org/ab1d/fcd96654af0bf6e805ffa2de0f55a73c025d.pdf
+3d94f81cf4c3a7307e1a976dc6cb7bf38068a381,http://faculty.ucmerced.edu/mhyang/papers/tip17_age.pdf
+488e475eeb3bb39a145f23ede197cd3620f1d98a,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf
+8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf,http://pdfs.semanticscholar.org/8cb3/f421b55c78e56c8a1c1d96f23335ebd4a5bf.pdf
+c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af,http://pdfs.semanticscholar.org/c363/8b026c7f80a2199b5ae89c8fcbedfc0bd8af.pdf
+3af1a375c7c1decbcf5c3a29774e165cafce390c,https://www.cbica.upenn.edu/sbia/papers/540.pdf
+2c848cc514293414d916c0e5931baf1e8583eabc,http://pdfs.semanticscholar.org/2c84/8cc514293414d916c0e5931baf1e8583eabc.pdf
+54bb25a213944b08298e4e2de54f2ddea890954a,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf
+86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663,http://pdfs.semanticscholar.org/8661/4c2d2f6ebcb9c600d4aef85fd6bf6eab6663.pdf
+d65b82b862cf1dbba3dee6541358f69849004f30,http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf
+861b12f405c464b3ffa2af7408bff0698c6c9bf0,http://pdfs.semanticscholar.org/861b/12f405c464b3ffa2af7408bff0698c6c9bf0.pdf
+31f1e711fcf82c855f27396f181bf5e565a2f58d,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Age_iccv2015.pdf
+9f4078773c8ea3f37951bf617dbce1d4b3795839,http://pdfs.semanticscholar.org/9f40/78773c8ea3f37951bf617dbce1d4b3795839.pdf
+43aa40eaa59244c233f83d81f86e12eba8d74b59,http://pdfs.semanticscholar.org/43aa/40eaa59244c233f83d81f86e12eba8d74b59.pdf
+faca1c97ac2df9d972c0766a296efcf101aaf969,http://pdfs.semanticscholar.org/faca/1c97ac2df9d972c0766a296efcf101aaf969.pdf
+ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c,http://pdfs.semanticscholar.org/f727/b58b84ccd8e7ed51a90ccc913d704b451191.pdf
+7701952e405c3d8a0947e2a309de281aa76bd3f4,http://isl.ira.uka.de/~stiefel/papers/IEE_SIU_2LDA.pdf
+5e28673a930131b1ee50d11f69573c17db8fff3e,http://pdfs.semanticscholar.org/f28d/fadba11bd3489d008827d9b1a539b34b50df.pdf
+9abd35b37a49ee1295e8197aac59bde802a934f3,http://pdfs.semanticscholar.org/9abd/35b37a49ee1295e8197aac59bde802a934f3.pdf
+3505c9b0a9631539e34663310aefe9b05ac02727,https://ibug.doc.ic.ac.uk/media/uploads/documents/pid4666647.pdf
+8b19efa16a9e73125ab973429eb769d0ad5a8208,http://pdfs.semanticscholar.org/8b19/efa16a9e73125ab973429eb769d0ad5a8208.pdf
+69a68f9cf874c69e2232f47808016c2736b90c35,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf
+ab427f0c7d4b0eb22c045392107509451165b2ba,http://cs.uky.edu/~zach/assets/papers/li2012learning.pdf
+2f882ceaaf110046e63123b495212d7d4e99f33d,http://pdfs.semanticscholar.org/2f88/2ceaaf110046e63123b495212d7d4e99f33d.pdf
+43e99b76ca8e31765d4571d609679a689afdc99e,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yu_Learning_Dense_Facial_ICCV_2017_paper.pdf
+89cabb60aa369486a1ebe586dbe09e3557615ef8,http://pdfs.semanticscholar.org/89ca/bb60aa369486a1ebe586dbe09e3557615ef8.pdf
+419a6fca4c8d73a1e43003edc3f6b610174c41d2,http://www.robots.newcastle.edu.au/~chalup/chalup_publications/p058_preprint.pdf
+cb1b5e8b35609e470ce519303915236b907b13b6,http://dforte.ece.ufl.edu/Domenic_files/IJCB.pdf
+0058cbe110933f73c21fa6cc9ae0cd23e974a9c7,http://pdfs.semanticscholar.org/0058/cbe110933f73c21fa6cc9ae0cd23e974a9c7.pdf
+e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Liu2013.pdf
+1eec03527703114d15e98ef9e55bee5d6eeba736,http://pdfs.semanticscholar.org/1eec/03527703114d15e98ef9e55bee5d6eeba736.pdf
+44078d0daed8b13114cffb15b368acc467f96351,http://arxiv.org/pdf/1604.05417v1.pdf
+2cd7821fcf5fae53a185624f7eeda007434ae037,http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf
+0dccc881cb9b474186a01fd60eb3a3e061fa6546,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_104_ext.pdf
+bcee40c25e8819955263b89a433c735f82755a03,http://pdfs.semanticscholar.org/bcee/40c25e8819955263b89a433c735f82755a03.pdf
+558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f,http://pdfs.semanticscholar.org/558f/c9a2bce3d3993a9c1f41b6c7f290cefcf92f.pdf
+f2c568fe945e5743635c13fe5535af157b1903d1,http://pdfs.semanticscholar.org/f2c5/68fe945e5743635c13fe5535af157b1903d1.pdf
+b5160e95192340c848370f5092602cad8a4050cd,http://pdfs.semanticscholar.org/dd71/dc78e75f0de27263d508b3a8b29921cfea03.pdf
+5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9,http://pdfs.semanticscholar.org/e1dd/1c4de149c6b05eedd1728d57a18a074b9b2a.pdf
+b73d9e1af36aabb81353f29c40ecdcbdf731dbed,http://pdfs.semanticscholar.org/b73d/9e1af36aabb81353f29c40ecdcbdf731dbed.pdf
+4c6233765b5f83333f6c675d3389bbbf503805e3,https://perceptual.mpi-inf.mpg.de/files/2015/03/Yan_Vis13.pdf
+02d650d8a3a9daaba523433fbe93705df0a7f4b1,http://pdfs.semanticscholar.org/02d6/50d8a3a9daaba523433fbe93705df0a7f4b1.pdf
+4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9,https://arxiv.org/pdf/1608.01866v1.pdf
+3c4f6d24b55b1fd3c5b85c70308d544faef3f69a,http://pdfs.semanticscholar.org/3c4f/6d24b55b1fd3c5b85c70308d544faef3f69a.pdf
+5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0,http://pdfs.semanticscholar.org/c173/fa4456941b9c40d53d656b8ad84d24c16ec3.pdf
+964a3196d44f0fefa7de3403849d22bbafa73886,http://pdfs.semanticscholar.org/964a/3196d44f0fefa7de3403849d22bbafa73886.pdf
+cc589c499dcf323fe4a143bbef0074c3e31f9b60,http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf
+34c594abba9bb7e5813cfae830e2c4db78cf138c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_047_ext.pdf
+3b1260d78885e872cf2223f2c6f3d6f6ea254204,http://pdfs.semanticscholar.org/3b12/60d78885e872cf2223f2c6f3d6f6ea254204.pdf
+1630e839bc23811e340bdadad3c55b6723db361d,http://pdfs.semanticscholar.org/9fc9/f22e9e28eab53d426e9d848c0d7dcd2c2459.pdf
+cd6c2ae00157e3fb6ab56379843280eb4cbb01b4,http://www.umiacs.umd.edu/~yzyang/paper/ICRA_2013_Multi.pdf
+0fc254272db096a9305c760164520ad9914f4c9e,https://arxiv.org/pdf/1601.06087v1.pdf
+2be1e2f2b7208fdf7a379da37a2097cfe52bc196,http://www2.cvl.isy.liu.se/Education/Graduate/artikelklubb/aryananda_icra09.pdf
+af8fe1b602452cf7fc9ecea0fd4508ed4149834e,http://pdfs.semanticscholar.org/af8f/e1b602452cf7fc9ecea0fd4508ed4149834e.pdf
+8c6b9c9c26ead75ce549a57c4fd0a12b46142848,http://pdfs.semanticscholar.org/97fc/47ba1427b0e50cd815b8b1657fea6fb9e25a.pdf
+0c2875bb47db3698dbbb3304aca47066978897a4,http://slazebni.cs.illinois.edu/publications/iccv17_situation.pdf
+41000c3a3344676513ef4bfcd392d14c7a9a7599,http://pdfs.semanticscholar.org/d3ba/9ed56e9ddb73f0e0f2bea3fd3920db30f42e.pdf
+c1ff88493721af1940df0d00bcfeefaa14f1711f,http://pdfs.semanticscholar.org/c1ff/88493721af1940df0d00bcfeefaa14f1711f.pdf
+ab0f9bc35b777eaefff735cb0dd0663f0c34ad31,http://faculty.ucmerced.edu/snewsam/papers/Yang_ICPR14_SemiSupervisedLearning.pdf
+4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf
+29e96ec163cb12cd5bd33bdf3d32181c136abaf9,http://pdfs.semanticscholar.org/29e9/6ec163cb12cd5bd33bdf3d32181c136abaf9.pdf
+5c6de2d9f93b90034f07860ae485a2accf529285,http://pdfs.semanticscholar.org/5c6d/e2d9f93b90034f07860ae485a2accf529285.pdf
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,http://pdfs.semanticscholar.org/ac6c/3b3e92ff5fbcd8f7967696c7aae134bea209.pdf
+3d5a1be4c1595b4805a35414dfb55716e3bf80d8,http://pdfs.semanticscholar.org/9e8e/bf5447fcd5b2ba4cdd53253f0049dacb2985.pdf
+de398bd8b7b57a3362c0c677ba8bf9f1d8ade583,http://www.cs.wayne.edu/~mdong/TMM16.pdf
+64cf86ba3b23d3074961b485c16ecb99584401de,http://pdfs.semanticscholar.org/b54a/54a2f33c24123c6943597462ef02928ec99f.pdf
+a40f8881a36bc01f3ae356b3e57eac84e989eef0,http://pdfs.semanticscholar.org/a40f/8881a36bc01f3ae356b3e57eac84e989eef0.pdf
+b7426836ca364603ccab0e533891d8ac54cf2429,http://pdfs.semanticscholar.org/b742/6836ca364603ccab0e533891d8ac54cf2429.pdf
+5397c34a5e396658fa57e3ca0065a2878c3cced7,http://www.iis.sinica.edu.tw/papers/song/5959-F.pdf
+d59404354f84ad98fa809fd1295608bf3d658bdc,http://pdfs.semanticscholar.org/d594/04354f84ad98fa809fd1295608bf3d658bdc.pdf
+05318a267226f6d855d83e9338eaa9e718b2a8dd,https://fruct.org/publications/fruct16/files/Khr.pdf
+6dd5dbb6735846b214be72983e323726ef77c7a9,http://pdfs.semanticscholar.org/6dd5/dbb6735846b214be72983e323726ef77c7a9.pdf
+c660500b49f097e3af67bb14667de30d67db88e3,http://pdfs.semanticscholar.org/c660/500b49f097e3af67bb14667de30d67db88e3.pdf
+b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89,http://pdfs.semanticscholar.org/b03b/4d8b4190361ed2de66fcbb6fda0c9a0a7d89.pdf
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf
+b6f758be954d34817d4ebaa22b30c63a4b8ddb35,https://arxiv.org/pdf/1703.04835v1.pdf
+3f848d6424f3d666a1b6dd405a48a35a797dd147,http://pdfs.semanticscholar.org/4f69/233cd6f0b56833c9395528aa007b63158a1d.pdf
+2afdda6fb85732d830cea242c1ff84497cd5f3cb,http://www.iis.sinica.edu.tw/papers/song/11489-F.pdf
+4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/CNN_Gender_Recognition.pdf
+b073313325b6482e22032e259d7311fb9615356c,http://alumni.cs.ucr.edu/~hli/paper/hli05tumor.pdf
+223ec77652c268b98c298327d42aacea8f3ce23f,http://pdfs.semanticscholar.org/223e/c77652c268b98c298327d42aacea8f3ce23f.pdf
+1ebdfceebad642299e573a8995bc5ed1fad173e3,http://pdfs.semanticscholar.org/1ebd/fceebad642299e573a8995bc5ed1fad173e3.pdf
+d7312149a6b773d1d97c0c2b847609c07b5255ec,http://pdfs.semanticscholar.org/d731/2149a6b773d1d97c0c2b847609c07b5255ec.pdf
+be57d2aaab615ec8bc1dd2dba8bee41a4d038b85,https://www.cl.cam.ac.uk/~mmam3/pub/a19-mahmoud.pdf
+2faa09413162b0a7629db93fbb27eda5aeac54ca,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=905048
+346dbc7484a1d930e7cc44276c29d134ad76dc3f,http://pdfs.semanticscholar.org/346d/bc7484a1d930e7cc44276c29d134ad76dc3f.pdf
+44f23600671473c3ddb65a308ca97657bc92e527,http://arxiv.org/pdf/1604.06573v2.pdf
+0f395a49ff6cbc7e796656040dbf446a40e300aa,http://pdfs.semanticscholar.org/0f39/5a49ff6cbc7e796656040dbf446a40e300aa.pdf
+c574c72b5ef1759b7fd41cf19a9dcd67e5473739,http://pdfs.semanticscholar.org/c574/c72b5ef1759b7fd41cf19a9dcd67e5473739.pdf
+dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8,http://pdfs.semanticscholar.org/dbb0/a527612c828d43bcb9a9c41f1bf7110b1dc8.pdf
+4a6fcf714f663618657effc341ae5961784504c7,http://www.cs.tut.fi/~iosifidi/files/journal/2016_TIFS_ACSKDA.pdf?dl=0
+b446bcd7fb78adfe346cf7a01a38e4f43760f363,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf
+c7c03324833ba262eeaada0349afa1b5990c1ea7,http://pdfs.semanticscholar.org/c7c0/3324833ba262eeaada0349afa1b5990c1ea7.pdf
+089513ca240c6d672c79a46fa94a92cde28bd567,http://pdfs.semanticscholar.org/0895/13ca240c6d672c79a46fa94a92cde28bd567.pdf
+6d10beb027fd7213dd4bccf2427e223662e20b7d,http://pdfs.semanticscholar.org/6d10/beb027fd7213dd4bccf2427e223662e20b7d.pdf
+a35d3ba191137224576f312353e1e0267e6699a1,http://pdfs.semanticscholar.org/a35d/3ba191137224576f312353e1e0267e6699a1.pdf
+191674c64f89c1b5cba19732869aa48c38698c84,http://pdfs.semanticscholar.org/1916/74c64f89c1b5cba19732869aa48c38698c84.pdf
+0be764800507d2e683b3fb6576086e37e56059d1,http://pdfs.semanticscholar.org/0be7/64800507d2e683b3fb6576086e37e56059d1.pdf
+22e189a813529a8f43ad76b318207d9a4b6de71a,http://openaccess.thecvf.com/content_ICCV_2017/papers/Felsen_What_Will_Happen_ICCV_2017_paper.pdf
+1389ba6c3ff34cdf452ede130c738f37dca7e8cb,http://pdfs.semanticscholar.org/1389/ba6c3ff34cdf452ede130c738f37dca7e8cb.pdf
+3986161c20c08fb4b9b791b57198b012519ea58b,http://pdfs.semanticscholar.org/3986/161c20c08fb4b9b791b57198b012519ea58b.pdf
+1ef4aac0ebc34e76123f848c256840d89ff728d0,http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2017rapid.pdf
+b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad,http://pdfs.semanticscholar.org/b52c/0faba5e1dc578a3c32a7f5cfb6fb87be06ad.pdf
+33f2b44742cc828347ccc5ec488200c25838b664,http://pdfs.semanticscholar.org/33f2/b44742cc828347ccc5ec488200c25838b664.pdf
+e6c8f5067ec2ad6af33745312b45fab03e7e038b,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1297.pdf
+4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f,http://pdfs.semanticscholar.org/4467/a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f.pdf
+46a29a5026142c91e5655454aa2c2f122561db7f,http://vipl.ict.ac.cn/sites/default/files/papers/files/2011_FG_sxli_Margin%20Emphasized%20Metric%20Learning%20and%20Its%20Application%20to%20Gabor%20Feature%20Based%20Face%20Recognition.pdf
+0ad4a814b30e096ad0e027e458981f812c835aa0,http://arxiv.org/pdf/1602.01827v1.pdf
+b5da4943c348a6b4c934c2ea7330afaf1d655e79,http://pdfs.semanticscholar.org/b5da/4943c348a6b4c934c2ea7330afaf1d655e79.pdf
+7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d,https://graphics.ethz.ch/Downloads/Publications/Papers/2013/Zun13a/Zun13a.pdf
+01dc1e03f39901e212bdf291209b7686266aeb13,http://arxiv.org/pdf/1604.07279v1.pdf
+7754b708d6258fb8279aa5667ce805e9f925dfd0,https://www.ecse.rpi.edu/~qji/Papers/PAMI_AU.pdf
+9d3aa3b7d392fad596b067b13b9e42443bbc377c,http://pdfs.semanticscholar.org/9d3a/a3b7d392fad596b067b13b9e42443bbc377c.pdf
+2331df8ca9f29320dd3a33ce68a539953fa87ff5,http://faculty.ucmerced.edu/mhyang/papers/aaai02.pdf
+1198572784788a6d2c44c149886d4e42858d49e4,http://pdfs.semanticscholar.org/1198/572784788a6d2c44c149886d4e42858d49e4.pdf
+20e504782951e0c2979d9aec88c76334f7505393,https://arxiv.org/pdf/1612.08534v1.pdf
+6e379f2d34e14efd85ae51875a4fa7d7ae63a662,http://pdfs.semanticscholar.org/6e37/9f2d34e14efd85ae51875a4fa7d7ae63a662.pdf
+0b9ce839b3c77762fff947e60a0eb7ebbf261e84,http://pdfs.semanticscholar.org/0b9c/e839b3c77762fff947e60a0eb7ebbf261e84.pdf
+98a660c15c821ea6d49a61c5061cd88e26c18c65,http://pdfs.semanticscholar.org/98a6/60c15c821ea6d49a61c5061cd88e26c18c65.pdf
+6f75697a86d23d12a14be5466a41e5a7ffb79fad,https://www.computer.org/csdl/proceedings/icis/2016/0806/00/07550861.pdf
+e8b2a98f87b7b2593b4a046464c1ec63bfd13b51,http://pdfs.semanticscholar.org/e8b2/a98f87b7b2593b4a046464c1ec63bfd13b51.pdf
+b239a756f22201c2780e46754d06a82f108c1d03,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Fusion_FG_camera_ready.pdf
+3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd,http://pdfs.semanticscholar.org/3dd4/d719b2185f7c7f92cc97f3b5a65990fcd5dd.pdf
+ac820d67b313c38b9add05abef8891426edd5afb,http://pdfs.semanticscholar.org/da4e/76b789f7ea8ed6c6d26858ac8a12bb1413fe.pdf
+0d735e7552af0d1dcd856a8740401916e54b7eee,http://pdfs.semanticscholar.org/915f/f5da6658e800eb7ec1c8f3f26281e18d3cbf.pdf
+0653dcdff992ad980cd5ea5bc557efb6e2a53ba1,http://pdfs.semanticscholar.org/0653/dcdff992ad980cd5ea5bc557efb6e2a53ba1.pdf
+0eac652139f7ab44ff1051584b59f2dc1757f53b,http://pdfs.semanticscholar.org/0eac/652139f7ab44ff1051584b59f2dc1757f53b.pdf
+1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6,http://pdfs.semanticscholar.org/1a4b/6ee6cd846ef5e3030a6ae59f026e5f50eda6.pdf
+06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32,http://www.cs.utexas.edu/~grauman/papers/whittle-search-supp-cvpr2012.pdf
+7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5,http://www.cs.cmu.edu/~epxing/papers/2015/Zhao_Xing_IJCV15.pdf
+a57ee5a8fb7618004dd1def8e14ef97aadaaeef5,http://pdfs.semanticscholar.org/f1f5/b603dd34ec26939517348d77df10992798f0.pdf
+20532b1f80b509f2332b6cfc0126c0f80f438f10,https://arxiv.org/pdf/1509.03248v1.pdf
+6a184f111d26787703f05ce1507eef5705fdda83,http://pdfs.semanticscholar.org/6a18/4f111d26787703f05ce1507eef5705fdda83.pdf
+82ccd62f70e669ec770daf11d9611cab0a13047e,http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf
+0c75c7c54eec85e962b1720755381cdca3f57dfb,https://webpages.uncc.edu/~szhang16/paper/PAMI_face_landmark.pdf
+6ee2ea416382d659a0dddc7a88fc093accc2f8ee,https://pdfs.semanticscholar.org/6ee2/ea416382d659a0dddc7a88fc093accc2f8ee.pdf
+4d2975445007405f8cdcd74b7fd1dd547066f9b8,http://pdfs.semanticscholar.org/4d29/75445007405f8cdcd74b7fd1dd547066f9b8.pdf
+8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483,http://pdfs.semanticscholar.org/8fb6/11aca3bd8a3a0527ac0f38561a5a9a5b8483.pdf
+056d5d942084428e97c374bb188efc386791e36d,http://pdfs.semanticscholar.org/056d/5d942084428e97c374bb188efc386791e36d.pdf
+ffd81d784549ee51a9b0b7b8aaf20d5581031b74,http://pdfs.semanticscholar.org/ffd8/1d784549ee51a9b0b7b8aaf20d5581031b74.pdf
+8d646ac6e5473398d668c1e35e3daa964d9eb0f6,http://pdfs.semanticscholar.org/8d64/6ac6e5473398d668c1e35e3daa964d9eb0f6.pdf
+2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9,http://pdfs.semanticscholar.org/71f1/c8d39e1fbf1083a4616a3496f5c397a2daf5.pdf
+8b6fded4d08bf0b7c56966b60562ee096af1f0c4,http://pdfs.semanticscholar.org/8b6f/ded4d08bf0b7c56966b60562ee096af1f0c4.pdf
+970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3,http://pdfs.semanticscholar.org/970c/0d6c0fd2ebe7c5921a45bc70f6345c844ff3.pdf
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,http://pdfs.semanticscholar.org/9cd6/a81a519545bf8aa9023f6e879521f85d4cd1.pdf
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sun_Deep_Learning_Face_2014_CVPR_paper.pdf
+a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b,http://pdfs.semanticscholar.org/a3eb/acd8bcbc7ddbd5753935496e22a0f74dcf7b.pdf
+266ed43dcea2e7db9f968b164ca08897539ca8dd,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Duong_Beyond_Principal_Components_2015_CVPR_paper.pdf
+703890b7a50d6535900a5883e8d2a6813ead3a03,http://pdfs.semanticscholar.org/7038/90b7a50d6535900a5883e8d2a6813ead3a03.pdf
+b29b42f7ab8d25d244bfc1413a8d608cbdc51855,http://pdfs.semanticscholar.org/b29b/42f7ab8d25d244bfc1413a8d608cbdc51855.pdf
+1fcdc113a5df2f45a1f4b3249c041d942a3a730b,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Reconstruction-Based%20Metric%20Learning%20for%20Unconstrained%20Face%20Verification_TIFS2015.pdf
+72282287f25c5419dc6fd9e89ec9d86d660dc0b5,https://arxiv.org/pdf/1609.07495v1.pdf
+4b3eaedac75ac419c2609e131ea9377ba8c3d4b8,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_tzimiro_pantic_icip_2014.pdf
+3e3f305dac4fbb813e60ac778d6929012b4b745a,http://pdfs.semanticscholar.org/3e3f/305dac4fbb813e60ac778d6929012b4b745a.pdf
+090e4713bcccff52dcd0c01169591affd2af7e76,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Shao_What_Do_You_2013_ICCV_paper.pdf
+76d9f5623d3a478677d3f519c6e061813e58e833,http://pdfs.semanticscholar.org/76d9/f5623d3a478677d3f519c6e061813e58e833.pdf
+16820ccfb626dcdc893cc7735784aed9f63cbb70,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf
+7eb85bcb372261bad707c05e496a09609e27fdb3,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Sathyanarayana_A_Compute-Efficient_Algorithm_2014_CVPR_paper.pdf
+d1dae2993bdbb2667d1439ff538ac928c0a593dc,http://pdfs.semanticscholar.org/d1da/e2993bdbb2667d1439ff538ac928c0a593dc.pdf
+62f0d8446adee6a5e8102053a63a61af07ac4098,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C072_yamashita2015.pdf
+6e97a99b2879634ecae962ddb8af7c1a0a653a82,http://pdfs.semanticscholar.org/7d37/7ba82df9cba0959cb910288415e568007792.pdf
+e328d19027297ac796aae2470e438fe0bd334449,http://pdfs.semanticscholar.org/e328/d19027297ac796aae2470e438fe0bd334449.pdf
+94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81,http://ibug.doc.ic.ac.uk/media/uploads/documents/p148-cheng.pdf
+24e6a28c133b7539a57896393a79d43dba46e0f6,http://arxiv.org/pdf/1605.02057v2.pdf
+34b7e826db49a16773e8747bc8dfa48e344e425d,http://www.comp.leeds.ac.uk/me/Publications/cvpr09_bsl.pdf
+68a04a3ae2086986877fee2c82ae68e3631d0356,http://pdfs.semanticscholar.org/68a0/4a3ae2086986877fee2c82ae68e3631d0356.pdf
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/116.pdf
+56f812661c3248ed28859d3b2b39e033b04ae6ae,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/CIVR08.pdf
+800cbbe16be0f7cb921842d54967c9a94eaa2a65,http://pdfs.semanticscholar.org/800c/bbe16be0f7cb921842d54967c9a94eaa2a65.pdf
+1513949773e3a47e11ab87d9a429864716aba42d,http://pdfs.semanticscholar.org/1513/949773e3a47e11ab87d9a429864716aba42d.pdf
+158e32579e38c29b26dfd33bf93e772e6211e188,http://pdfs.semanticscholar.org/158e/32579e38c29b26dfd33bf93e772e6211e188.pdf
+2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_028_ext.pdf
+29756b6b16d7b06ea211f21cdaeacad94533e8b4,http://pdfs.semanticscholar.org/2975/6b6b16d7b06ea211f21cdaeacad94533e8b4.pdf
+b3c60b642a1c64699ed069e3740a0edeabf1922c,http://pdfs.semanticscholar.org/b3c6/0b642a1c64699ed069e3740a0edeabf1922c.pdf
+8d2c43759e221f39ab1b4bf70d6891ffd19fb8da,https://www.researchgate.net/profile/Zhang_Pinzheng/publication/224711010_An_Automatic_Facial_Expression_Recognition_Approach_Based_on_Confusion-Crossed_Support_Vector_Machine_Tree/links/54658c630cf2052b509f3391.pdf
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,http://pdfs.semanticscholar.org/66f0/2fbcad13c6ee5b421be2fc72485aaaf6fcb5.pdf
+81b2a541d6c42679e946a5281b4b9dc603bc171c,http://pdfs.semanticscholar.org/81b2/a541d6c42679e946a5281b4b9dc603bc171c.pdf
+031055c241b92d66b6984643eb9e05fd605f24e2,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cinbis_Multi-fold_MIL_Training_2014_CVPR_paper.pdf
+7f3a73babe733520112c0199ff8d26ddfc7038a0,http://pdfs.semanticscholar.org/7f3a/73babe733520112c0199ff8d26ddfc7038a0.pdf
+62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4,http://pdfs.semanticscholar.org/62d1/a31b8acd2141d3a994f2d2ec7a3baf0e6dc4.pdf
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,http://pdfs.semanticscholar.org/c84f/88b2a764ddcc22c4971827d58024b6017496.pdf
+31a38fd2d9d4f34d2b54318021209fe5565b8f7f,http://www.umiacs.umd.edu/~huytho/papers/HoChellappa_TIP2013.pdf
+93971a49ef6cc88a139420349a1dfd85fb5d3f5c,http://pdfs.semanticscholar.org/9397/1a49ef6cc88a139420349a1dfd85fb5d3f5c.pdf
+0a1138276c52c734b67b30de0bf3f76b0351f097,https://ibug.doc.ic.ac.uk/media/uploads/documents/georgakis_dica.pdf
+27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/NiinumaHanJain_MultiviewFaceRecognition_PoseRegularization_BTAS13.pdf
+6a806978ca5cd593d0ccd8b3711b6ef2a163d810,http://pdfs.semanticscholar.org/6a80/6978ca5cd593d0ccd8b3711b6ef2a163d810.pdf
+80193dd633513c2d756c3f568ffa0ebc1bb5213e,http://pdfs.semanticscholar.org/a3d8/8154a1253338b45f950bcf9cbe91ba5271ee.pdf
+26f03693c50eb50a42c9117f107af488865f3dc1,http://pdfs.semanticscholar.org/26f0/3693c50eb50a42c9117f107af488865f3dc1.pdf
+57893403f543db75d1f4e7355283bdca11f3ab1b,http://www.doc.ic.ac.uk/~maja/PAMI-KoelstraEtAl-accepted.pdf
+27cccf992f54966feb2ab4831fab628334c742d8,http://pdfs.semanticscholar.org/27cc/cf992f54966feb2ab4831fab628334c742d8.pdf
+9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73,http://www.vision.ee.ethz.ch/~zzhiwu/papers/COX-Face-DB-TIP-final.pdf
+08a98822739bb8e6b1388c266938e10eaa01d903,http://homes.cs.washington.edu/~yoshi/papers/SensorSift_ACSAC_2012.pdf
+38a2661b6b995a3c4d69e7d5160b7596f89ce0e6,http://www.cs.colostate.edu/~draper/papers/zhang_ijcb14.pdf
+6ae75eaa7e9f1379338eae94fbb43664bb3c898a,https://www.researchgate.net/profile/Beom_Seok_Oh/publication/254016039_Fusion_of_structured_projections_for_cancelable_face_identity_verification/links/559156c108ae15962d8e145e.pdf?origin=publication_detail
+38861d0d3a0292c1f54153b303b0d791cbba1d50,http://pdfs.semanticscholar.org/3886/1d0d3a0292c1f54153b303b0d791cbba1d50.pdf
+978a219e07daa046244821b341631c41f91daccd,http://pdfs.semanticscholar.org/e2b9/f8b66d3f9080ccb14f058cf4798cb4d89241.pdf
+47eba2f95679e106e463e8296c1f61f6ddfe815b,https://www.csie.ntu.edu.tw/~cyy/publications/papers/Shih2017DCF.pdf
+4c078c2919c7bdc26ca2238fa1a79e0331898b56,http://pdfs.semanticscholar.org/4c07/8c2919c7bdc26ca2238fa1a79e0331898b56.pdf
+b55d0c9a022874fb78653a0004998a66f8242cad,http://pdfs.semanticscholar.org/b55d/0c9a022874fb78653a0004998a66f8242cad.pdf
+83fd5c23204147844a0528c21e645b757edd7af9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W05/papers/Bulan_USDOT_Number_Localization_2015_CVPR_paper.pdf
+303517dfc327c3004ae866a6a340f16bab2ee3e3,http://pdfs.semanticscholar.org/3035/17dfc327c3004ae866a6a340f16bab2ee3e3.pdf
+06466276c4955257b15eff78ebc576662100f740,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/sigir12.pdf
+1679943d22d60639b4670eba86665371295f52c3,http://pdfs.semanticscholar.org/1679/943d22d60639b4670eba86665371295f52c3.pdf
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,http://pdfs.semanticscholar.org/7f82/f8a416170e259b217186c9e38a9b05cb3eb4.pdf
+7711a7404f1f1ac3a0107203936e6332f50ac30c,http://pdfs.semanticscholar.org/7711/a7404f1f1ac3a0107203936e6332f50ac30c.pdf
+66533107f9abdc7d1cb8f8795025fc7e78eb1122,http://pdfs.semanticscholar.org/6653/3107f9abdc7d1cb8f8795025fc7e78eb1122.pdf
+556545eec370b9d300fc044a1aa63fc44fd79b0f,http://www.cs.cmu.edu/~dhoiem/publications/cvpr2010_gangwang.pdf
+59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb,http://www.ccvcl.org/~wei/pdf/CNNExpRecog_CamReady.pdf
+141eab5f7e164e4ef40dd7bc19df9c31bd200c5e,http://www.jdl.ac.cn/doc/2006/Local%20Linear%20Regression%20(LLR)%20for%20Pose%20Invariant%20Face%20Recognition.pdf
+0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0,http://pdfs.semanticscholar.org/0694/b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0.pdf
+1e9f1bbb751fe538dde9f612f60eb946747defaa,http://pdfs.semanticscholar.org/1e9f/1bbb751fe538dde9f612f60eb946747defaa.pdf
+60ce4a9602c27ad17a1366165033fe5e0cf68078,http://pdfs.semanticscholar.org/60ce/4a9602c27ad17a1366165033fe5e0cf68078.pdf
+df0e280cae018cebd5b16ad701ad101265c369fa,http://pdfs.semanticscholar.org/df0e/280cae018cebd5b16ad701ad101265c369fa.pdf
+f78863f4e7c4c57744715abe524ae4256be884a9,http://pdfs.semanticscholar.org/f788/63f4e7c4c57744715abe524ae4256be884a9.pdf
+5df376748fe5ccd87a724ef31d4fdb579dab693f,http://pdfs.semanticscholar.org/5df3/76748fe5ccd87a724ef31d4fdb579dab693f.pdf
+d59f18fcb07648381aa5232842eabba1db52383e,http://pdfs.semanticscholar.org/d59f/18fcb07648381aa5232842eabba1db52383e.pdf
+499f1d647d938235e9186d968b7bb2ab20f2726d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Xiong_Face_Recognition_via_2013_ICCV_paper.pdf
+23d55061f7baf2ffa1c847d356d8f76d78ebc8c1,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0033-4?site=ipsjcva.springeropen.com
+d3c004125c71942846a9b32ae565c5216c068d1e,http://pdfs.semanticscholar.org/d3c0/04125c71942846a9b32ae565c5216c068d1e.pdf
+d0a21f94de312a0ff31657fd103d6b29db823caa,http://pdfs.semanticscholar.org/d0a2/1f94de312a0ff31657fd103d6b29db823caa.pdf
+443acd268126c777bc7194e185bec0984c3d1ae7,https://eprints.soton.ac.uk/402985/1/icpr-16.pdf
+8c8525e626c8857a4c6c385de34ffea31e7e41d1,http://arxiv.org/pdf/1505.07922.pdf
+38d8ff137ff753f04689e6b76119a44588e143f3,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf
+ec05078be14a11157ac0e1c6b430ac886124589b,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf
+52887969107956d59e1218abb84a1f834a314578,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/chen13travel.pdf
+2f489bd9bfb61a7d7165a2f05c03377a00072477,http://pdfs.semanticscholar.org/2f48/9bd9bfb61a7d7165a2f05c03377a00072477.pdf
+9c373438285101d47ab9332cdb0df6534e3b93d1,http://pdfs.semanticscholar.org/9c37/3438285101d47ab9332cdb0df6534e3b93d1.pdf
+6324fada2fb00bd55e7ff594cf1c41c918813030,http://pdfs.semanticscholar.org/6324/fada2fb00bd55e7ff594cf1c41c918813030.pdf
+1a7a17c4f97c68d68fbeefee1751d349b83eb14a,http://pdfs.semanticscholar.org/1a7a/17c4f97c68d68fbeefee1751d349b83eb14a.pdf
+4df3143922bcdf7db78eb91e6b5359d6ada004d2,http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf
+2098983dd521e78746b3b3fa35a22eb2fa630299,http://pdfs.semanticscholar.org/2098/983dd521e78746b3b3fa35a22eb2fa630299.pdf
+05e3acc8afabc86109d8da4594f3c059cf5d561f,https://www.cs.rochester.edu/u/cxu22/p/cvpr2016_a2s2_poster.pdf
+0163d847307fae508d8f40ad193ee542c1e051b4,http://www.alessandrobergamo.com/data/compact_descriptors_supplementary.pdf
+4f7967158b257e86d66bdabfdc556c697d917d24,http://pdfs.semanticscholar.org/4f79/67158b257e86d66bdabfdc556c697d917d24.pdf
+25337690fed69033ef1ce6944e5b78c4f06ffb81,http://pdfs.semanticscholar.org/2533/7690fed69033ef1ce6944e5b78c4f06ffb81.pdf
+74156a11c2997517061df5629be78428e1f09cbd,http://cvrr.ucsd.edu/publications/2016/MartinRangeshTrivediICPR2016.pdf
+19666b9eefcbf764df7c1f5b6938031bcf777191,https://arxiv.org/pdf/1212.3913v4.pdf
+51224ed7519e71346076060092462e3d59ca3ab9,http://www.iis.ee.ic.ac.uk/ComputerVision/docs/pubs/Chao_TM_2014.pdf
+4e444db884b5272f3a41e4b68dc0d453d4ec1f4c,http://pdfs.semanticscholar.org/4e44/4db884b5272f3a41e4b68dc0d453d4ec1f4c.pdf
+6f26ab7edd971148723d9b4dc8ddf71b36be9bf7,http://pdfs.semanticscholar.org/6f26/ab7edd971148723d9b4dc8ddf71b36be9bf7.pdf
+27218ff58c3f0e7d7779fba3bb465d746749ed7c,http://pdfs.semanticscholar.org/2721/8ff58c3f0e7d7779fba3bb465d746749ed7c.pdf
+1459d4d16088379c3748322ab0835f50300d9a38,https://arxiv.org/pdf/1605.04039v1.pdf
+12d8730da5aab242795bdff17b30b6e0bac82998,http://pdfs.semanticscholar.org/12d8/730da5aab242795bdff17b30b6e0bac82998.pdf
+5c624382057b55e46af4dc4c055a33c90e8bf08a,http://www.researchgate.net/profile/Ngoc_Son_Vu/publication/224114972_Illumination-robust_face_recognition_using_retina_modeling/links/0fcfd507f06292b0a5000000.pdf
+176e5abddb87d029f85f60d1bbff67c66500e8c3,http://www.researchgate.net/profile/Tony_Han3/publication/220930104_Efficient_Facial_Attribute_Recognition_with_a_Spatial_Codebook/links/0046351affdf1f0d96000000.pdf
+9fc04a13eef99851136eadff52e98eb9caac919d,http://pdfs.semanticscholar.org/9fc0/4a13eef99851136eadff52e98eb9caac919d.pdf
+86c053c162c08bc3fe093cc10398b9e64367a100,http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf
+c5be0feacec2860982fbbb4404cf98c654142489,http://pdfs.semanticscholar.org/c5be/0feacec2860982fbbb4404cf98c654142489.pdf
+4dd2be07b4f0393995b57196f8fc79d666b3aec5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p3572-lee.pdf
+9d941a99e6578b41e4e32d57ece580c10d578b22,http://pdfs.semanticscholar.org/9d94/1a99e6578b41e4e32d57ece580c10d578b22.pdf
+6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf
+2bab44d3a4c5ca79fb8f87abfef4456d326a0445,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mir25.pdf
+20a16efb03c366fa4180659c2b2a0c5024c679da,http://pdfs.semanticscholar.org/20a1/6efb03c366fa4180659c2b2a0c5024c679da.pdf
+60737db62fb5fab742371709485e4b2ddf64b7b2,http://dbgroup.cs.tsinghua.edu.cn/ligl/papers/p307-weng.pdf
+25f1f195c0efd84c221b62d1256a8625cb4b450c,http://www.ee.oulu.fi/~gyzhao/Papers/2007/04284844-ICME.pdf
+a79704c1ce7bf10c8753a8f51437ccbc61947d03,http://www.eecs.qmul.ac.uk/~cfshan/papers/shan-etal-icip05.pdf
+441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sundararajan_Head_Pose_Estimation_2015_CVPR_paper.pdf
+c2c3ff1778ed9c33c6e613417832505d33513c55,http://pdfs.semanticscholar.org/c2c3/ff1778ed9c33c6e613417832505d33513c55.pdf
+8e378ef01171b33c59c17ff5798f30293fe30686,http://pdfs.semanticscholar.org/8e37/8ef01171b33c59c17ff5798f30293fe30686.pdf
+1e799047e294267087ec1e2c385fac67074ee5c8,http://pdfs.semanticscholar.org/1e79/9047e294267087ec1e2c385fac67074ee5c8.pdf
+2e3d081c8f0e10f138314c4d2c11064a981c1327,http://arxiv.org/pdf/1603.06015v1.pdf
+51cc78bc719d7ff2956b645e2fb61bab59843d2b,http://pdfs.semanticscholar.org/51cc/78bc719d7ff2956b645e2fb61bab59843d2b.pdf
+3f63f9aaec8ba1fa801d131e3680900680f14139,http://dspace.nitrkl.ac.in/dspace/bitstream/2080/2288/1/4a.pdf
+0b6616f3ebff461e4b6c68205fcef1dae43e2a1a,http://pdfs.semanticscholar.org/0b66/16f3ebff461e4b6c68205fcef1dae43e2a1a.pdf
+33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13,http://pdfs.semanticscholar.org/33c3/702b0eee6fc26fc49f79f9133f3dd7fa3f13.pdf
+27846b464369095f4909f093d11ed481277c8bba,http://pdfs.semanticscholar.org/2784/6b464369095f4909f093d11ed481277c8bba.pdf
+6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9,http://pdfs.semanticscholar.org/6b9a/a288ce7740ec5ce9826c66d059ddcfd8dba9.pdf
+50eb2ee977f0f53ab4b39edc4be6b760a2b05f96,http://ajbasweb.com/old/ajbas/2017/April/1-11.pdf
+28a900a07c7cbce6b6297e4030be3229e094a950,http://pdfs.semanticscholar.org/28a9/00a07c7cbce6b6297e4030be3229e094a950.pdf
+4bbbee93519a4254736167b31be69ee1e537f942,https://arxiv.org/pdf/1611.05125v2.pdf
+5e97a1095f2811e0bc188f52380ea7c9c460c896,http://web.eecs.utk.edu/~rguo1/FacialParsing.pdf
+131e395c94999c55c53afead65d81be61cd349a4,http://pdfs.semanticscholar.org/2c3f/aeaf0fe103e1e6cb8c2116728e2a5c7b7f29.pdf
+6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c,http://openaccess.thecvf.com/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf
+265af79627a3d7ccf64e9fe51c10e5268fee2aae,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20JOURNAL%20PAPERS/A%20Mixture%20of%20Transformed%20Hidden%20Markov%20Models%20for%20Elastic%20Motion%20Estimation.pdf
+290136947fd44879d914085ee51d8a4f433765fa,http://www.cse.msu.edu/biometrics/Publications/Face/KlareJain_TaxonomyFacialFeatures_BTAS10.pdf
+9ed943f143d2deaac2efc9cf414b3092ed482610,http://www.jaist.ac.jp/~chen-fan/publication/ism2014-07032993.pdf
+6691dfa1a83a04fdc0177d8d70e3df79f606b10f,http://pdfs.semanticscholar.org/6691/dfa1a83a04fdc0177d8d70e3df79f606b10f.pdf
+918b72a47b7f378bde0ba29c908babf6dab6f833,http://pdfs.semanticscholar.org/918b/72a47b7f378bde0ba29c908babf6dab6f833.pdf
+c32c8bfadda8f44d40c6cd9058a4016ab1c27499,http://pdfs.semanticscholar.org/c32c/8bfadda8f44d40c6cd9058a4016ab1c27499.pdf
+58fa85ed57e661df93ca4cdb27d210afe5d2cdcd,http://www.dgcv.nii.ac.jp/Publications/Papers/2016/ICPR2016a.pdf
+f1748303cc02424704b3a35595610890229567f9,http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf
+17b46e2dad927836c689d6787ddb3387c6159ece,http://cs.uky.edu/~jacobs/papers/greenwell2014faceattributes.pdf
+60d765f2c0a1a674b68bee845f6c02741a49b44e,http://pdfs.semanticscholar.org/60d7/65f2c0a1a674b68bee845f6c02741a49b44e.pdf
+01c09acf0c046296643de4c8b55a9330e9c8a419,http://pdfs.semanticscholar.org/01c0/9acf0c046296643de4c8b55a9330e9c8a419.pdf
+ff7bc7a6d493e01ec8fa2b889bcaf6349101676e,http://pdfs.semanticscholar.org/ff7b/c7a6d493e01ec8fa2b889bcaf6349101676e.pdf
+9c4cc11d0df2de42d6593f5284cfdf3f05da402a,http://pdfs.semanticscholar.org/ce1a/f0e944260efced743f371ba0cb06878582b6.pdf
+fe961cbe4be0a35becd2d722f9f364ec3c26bd34,http://pdfs.semanticscholar.org/fe96/1cbe4be0a35becd2d722f9f364ec3c26bd34.pdf
+81fc86e86980a32c47410f0ba7b17665048141ec,http://pdfs.semanticscholar.org/81fc/86e86980a32c47410f0ba7b17665048141ec.pdf
+6ee8a94ccba10062172e5b31ee097c846821a822,http://pdfs.semanticscholar.org/6ee8/a94ccba10062172e5b31ee097c846821a822.pdf
+35490b021dcdec12882870a31dce9a687205ab5c,http://www.ecse.rpi.edu/homepages/qji/Papers/BN_learning_CVPR08.pdf
+1cee993dc42626caf5dbc26c0a7790ca6571d01a,http://www.iri.upc.edu/people/fmoreno/Publications/2005/pdf/Moreno_siggraphsketch2005.pdf
+1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f,http://pdfs.semanticscholar.org/1a93/37d70a87d0e30966ecd1d7a9b0bbc7be161f.pdf
+6d66c98009018ac1512047e6bdfb525c35683b16,http://pdfs.semanticscholar.org/6d66/c98009018ac1512047e6bdfb525c35683b16.pdf
+d24dafe10ec43ac8fb98715b0e0bd8e479985260,http://pdfs.semanticscholar.org/d24d/afe10ec43ac8fb98715b0e0bd8e479985260.pdf
+429c3588ce54468090cc2cf56c9b328b549a86dc,http://pdfs.semanticscholar.org/429c/3588ce54468090cc2cf56c9b328b549a86dc.pdf
+0517d08da7550241fb2afb283fc05d37fce5d7b7,http://pdfs.semanticscholar.org/0517/d08da7550241fb2afb283fc05d37fce5d7b7.pdf
+0055c7f32fa6d4b1ad586d5211a7afb030ca08cc,http://pdfs.semanticscholar.org/0055/c7f32fa6d4b1ad586d5211a7afb030ca08cc.pdf
+40ee38d7ff2871761663d8634c3a4970ed1dc058,http://pdfs.semanticscholar.org/40ee/38d7ff2871761663d8634c3a4970ed1dc058.pdf
+b16580d27bbf4e17053f2f91bc1d0be12045e00b,http://pdfs.semanticscholar.org/b165/80d27bbf4e17053f2f91bc1d0be12045e00b.pdf
+04c2cda00e5536f4b1508cbd80041e9552880e67,http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf
+1bc23c771688109bed9fd295ce82d7e702726327,http://pdfs.semanticscholar.org/1bc2/3c771688109bed9fd295ce82d7e702726327.pdf
+b03446a2de01126e6a06eb5d526df277fa36099f,http://pdfs.semanticscholar.org/b034/46a2de01126e6a06eb5d526df277fa36099f.pdf
+e3144f39f473e238374dd4005c8b83e19764ae9e,http://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf
+df51dfe55912d30fc2f792561e9e0c2b43179089,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1512.06009.pdf
+7bfe085c10761f5b0cc7f907bdafe1ff577223e0,http://pdfs.semanticscholar.org/c32b/aaa307da7376bcb5dfef7bb985c06d032a0f.pdf
+240d5390af19bb43761f112b0209771f19bfb696,http://pdfs.semanticscholar.org/4e10/0973f1540312df3465a087597018a7892310.pdf
+a0d6390dd28d802152f207940c7716fe5fae8760,http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf
+47a2727bd60e43f3253247b6d6f63faf2b67c54b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Fu_Semi-Supervised_Vocabulary-Informed_Learning_CVPR_2016_paper.pdf
+77b1db2281292372c38926cc4aca32ef056011dc,http://pdfs.semanticscholar.org/77b1/db2281292372c38926cc4aca32ef056011dc.pdf
+a0021e3bbf942a88e13b67d83db7cf52e013abfd,http://pdfs.semanticscholar.org/a002/1e3bbf942a88e13b67d83db7cf52e013abfd.pdf
+7644d90efef157e61fe4d773d8a3b0bad5feccec,http://pdfs.semanticscholar.org/7644/d90efef157e61fe4d773d8a3b0bad5feccec.pdf
+7d41b67a641426cb8c0f659f0ba74cdb60e7159a,http://eprints.soton.ac.uk/389641/1/isba-16-camera.pdf
+6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf,http://pdfs.semanticscholar.org/6dd2/a0f9ca8a5fee12edec1485c0699770b4cfdf.pdf
+356b431d4f7a2a0a38cf971c84568207dcdbf189,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf
+b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e,http://www.hamedkiani.com/uploads/5/1/8/8/51882963/176.pdf
+9c1860de6d6e991a45325c997bf9651c8a9d716f,http://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf
+96f4a1dd1146064d1586ebe86293d02e8480d181,http://pdfs.semanticscholar.org/96f4/a1dd1146064d1586ebe86293d02e8480d181.pdf
+8aae23847e1beb4a6d51881750ce36822ca7ed0b,http://pdfs.semanticscholar.org/8aae/23847e1beb4a6d51881750ce36822ca7ed0b.pdf
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,http://pdfs.semanticscholar.org/3152/e89963b8a4028c4abf6e1dc19e91c4c5a8f4.pdf
+28b9d92baea72ec665c54d9d32743cf7bc0912a7,http://pdfs.semanticscholar.org/a7f8/b6bf6aa7a12773ad9bcf1d040d4d74d12493.pdf
+366595171c9f4696ec5eef7c3686114fd3f116ad,http://pdfs.semanticscholar.org/3665/95171c9f4696ec5eef7c3686114fd3f116ad.pdf
+23ebbbba11c6ca785b0589543bf5675883283a57,https://pdfs.semanticscholar.org/23eb/bbba11c6ca785b0589543bf5675883283a57.pdf
+0066caed1238de95a431d836d8e6e551b3cde391,http://humansensing.cs.cmu.edu/sites/default/files/7de_la_torre_frade_fernando_2007_3.pdf
+9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf
+3c63fa505a44902f13698ec10d7f259b1d0878ee,http://www.ece.ucr.edu/~amitrc/publications/TMM2015.pdf
+293193d24d5c4d2975e836034bbb2329b71c4fe7,http://pdfs.semanticscholar.org/2931/93d24d5c4d2975e836034bbb2329b71c4fe7.pdf
+9391618c09a51f72a1c30b2e890f4fac1f595ebd,http://pdfs.semanticscholar.org/9391/618c09a51f72a1c30b2e890f4fac1f595ebd.pdf
+4b4106614c1d553365bad75d7866bff0de6056ed,http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf
+ac12ba5bf81de83991210b4cd95b4ad048317681,http://pdfs.semanticscholar.org/ac12/ba5bf81de83991210b4cd95b4ad048317681.pdf
+5ec94adc9e0f282597f943ea9f4502a2a34ecfc2,http://pdfs.semanticscholar.org/5ec9/4adc9e0f282597f943ea9f4502a2a34ecfc2.pdf
+9a42c519f0aaa68debbe9df00b090ca446d25bc4,http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf
+4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56,https://arxiv.org/pdf/1612.00523v1.pdf
+4a484d97e402ed0365d6cf162f5a60a4d8000ea0,http://pdfs.semanticscholar.org/4a48/4d97e402ed0365d6cf162f5a60a4d8000ea0.pdf
+4e4e8fc9bbee816e5c751d13f0d9218380d74b8f,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553711.pdf
+0744af11a025e9c072ef6ad102af208e79cc6f44,https://www.researchgate.net/profile/Pascal_Frossard/publication/233799235_Learning_Smooth_Pattern_Transformation_Manifolds/links/00463533951057e9bb000000.pdf
+01e12be4097fa8c94cabeef0ad61498c8e7762f2,http://pdfs.semanticscholar.org/10bf/f1957b8a4adce86efd10596186d905976c16.pdf
+6308e9c991125ee6734baa3ec93c697211237df8,http://www.ifp.illinois.edu/~jyang29/papers/ICME-SSR.pdf
+18c72175ddbb7d5956d180b65a96005c100f6014,http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf
+389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26,http://pdfs.semanticscholar.org/3893/34e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26.pdf
+1255afbf86423c171349e874b3ac297de19f00cd,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SSCI_2015/data/7560a203.pdf
+4ef0a6817a7736c5641dc52cbc62737e2e063420,http://pdfs.semanticscholar.org/4ef0/a6817a7736c5641dc52cbc62737e2e063420.pdf
+2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8,http://pdfs.semanticscholar.org/2d93/a9aa8bed51d0d1b940c73ac32c046ebf1eb8.pdf
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,http://pdfs.semanticscholar.org/6478/2a2bc5da11b1b18ca20cecf7bdc26a538d68.pdf
+3802c97f925cb03bac91d9db13d8b777dfd29dcc,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Martins_Non-Parametric_Bayesian_Constrained_2014_CVPR_paper.pdf
+7085d21f483743007cc6a8e3fa01d8bdf592ad33,http://www.meeting.edu.cn/meeting/UploadPapers/1282699022328.pdf
+316d51aaa37891d730ffded7b9d42946abea837f,http://pdfs.semanticscholar.org/9f00/3a5e727b99f792e600b93b6458b9cda3f0a5.pdf
+2d3c17ced03e4b6c4b014490fe3d40c62d02e914,http://pdfs.semanticscholar.org/2d3c/17ced03e4b6c4b014490fe3d40c62d02e914.pdf
+57c59011614c43f51a509e10717e47505c776389,http://users.cecs.anu.edu.au/~basura/papers/CVPR_2017_Workshop.pdf
+27a0a7837f9114143717fc63294a6500565294c2,http://pdfs.semanticscholar.org/27a0/a7837f9114143717fc63294a6500565294c2.pdf
+a51d5c2f8db48a42446cc4f1718c75ac9303cb7a,http://pdfs.semanticscholar.org/a51d/5c2f8db48a42446cc4f1718c75ac9303cb7a.pdf
+aaeb8b634bb96a372b972f63ec1dc4db62e7b62a,http://pdfs.semanticscholar.org/aaeb/8b634bb96a372b972f63ec1dc4db62e7b62a.pdf
+aa52910c8f95e91e9fc96a1aefd406ffa66d797d,http://pdfs.semanticscholar.org/aa52/910c8f95e91e9fc96a1aefd406ffa66d797d.pdf
+3b410ae97e4564bc19d6c37bc44ada2dcd608552,http://pdfs.semanticscholar.org/3b41/0ae97e4564bc19d6c37bc44ada2dcd608552.pdf
+ddaa8add8528857712424fd57179e5db6885df7c,http://pdfs.semanticscholar.org/ff63/a8e8e462d15c9d59ac66025a043d3c299aea.pdf
+2a9b398d358cf04dc608a298d36d305659e8f607,http://www.pitt.edu/~jeffcohn/biblio/MahoorFG2011.pdf
+7643861bb492bf303b25d0306462f8fb7dc29878,https://www-i6.informatik.rwth-aachen.de/publications/download/991/Hanselmann-FG-2015.pdf
+052880031be0a760a5b606b2ad3d22f237e8af70,http://pdfs.semanticscholar.org/0528/80031be0a760a5b606b2ad3d22f237e8af70.pdf
+7c4c442e9c04c6b98cd2aa221e9d7be15efd8663,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wang_Classifier_Learning_With_2015_CVPR_paper.pdf
+396a19e29853f31736ca171a3f40c506ef418a9f,http://pdfs.semanticscholar.org/396a/19e29853f31736ca171a3f40c506ef418a9f.pdf
+14fdec563788af3202ce71c021dd8b300ae33051,http://pdfs.semanticscholar.org/14fd/ec563788af3202ce71c021dd8b300ae33051.pdf
+0ccc535d12ad2142a8310d957cc468bbe4c63647,http://arxiv.org/pdf/1510.03979v1.pdf
+590628a9584e500f3e7f349ba7e2046c8c273fcf,http://pdfs.semanticscholar.org/6893/c573d7abd3847d6ea2f0e79b6924ca124372.pdf
+4534d78f8beb8aad409f7bfcd857ec7f19247715,http://pdfs.semanticscholar.org/4534/d78f8beb8aad409f7bfcd857ec7f19247715.pdf
+00f0ed04defec19b4843b5b16557d8d0ccc5bb42,http://pdfs.semanticscholar.org/00f0/ed04defec19b4843b5b16557d8d0ccc5bb42.pdf
+00d931eccab929be33caea207547989ae7c1ef39,http://pdfs.semanticscholar.org/00d9/31eccab929be33caea207547989ae7c1ef39.pdf
+65817963194702f059bae07eadbf6486f18f4a0a,http://arxiv.org/pdf/1505.04141v2.pdf
+a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670,http://webhost.uoradea.ro/ibuciu/ISCAS2006_Buciu.pdf
+4d9a02d080636e9666c4d1cc438b9893391ec6c7,http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf
+70db3a0d2ca8a797153cc68506b8650908cb0ada,http://pdfs.semanticscholar.org/70db/3a0d2ca8a797153cc68506b8650908cb0ada.pdf
+55138c2b127ebdcc508503112bf1d1eeb5395604,http://pdfs.semanticscholar.org/7815/368a8f6474910d3faf798198ff9dae836360.pdf
+62e0380a86e92709fe2c64e6a71ed94d152c6643,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2012/Facial%20emotion%20recognition%20with%20expression%20energy12.pdf
+7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed,http://pdfs.semanticscholar.org/7bce/4f4e85a3bfcd6bfb3b173b2769b064fce0ed.pdf
+adf5caca605e07ee40a3b3408f7c7c92a09b0f70,http://pdfs.semanticscholar.org/adf5/caca605e07ee40a3b3408f7c7c92a09b0f70.pdf
+8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa,http://pdfs.semanticscholar.org/8c7f/4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa.pdf
+0aa8a0203e5f406feb1815f9b3dd49907f5fd05b,http://www.iti.gr/~bmezaris/publications/spl11_preprint.pdf
+490a217a4e9a30563f3a4442a7d04f0ea34442c8,http://pdfs.semanticscholar.org/490a/217a4e9a30563f3a4442a7d04f0ea34442c8.pdf
+4ff11512e4fde3d1a109546d9c61a963d4391add,http://pdfs.semanticscholar.org/4ff1/1512e4fde3d1a109546d9c61a963d4391add.pdf
+59eefa01c067a33a0b9bad31c882e2710748ea24,http://pdfs.semanticscholar.org/59ee/fa01c067a33a0b9bad31c882e2710748ea24.pdf
+5850aab97e1709b45ac26bb7d205e2accc798a87,http://pdfs.semanticscholar.org/5850/aab97e1709b45ac26bb7d205e2accc798a87.pdf
+3a76e9fc2e89bdd10a9818f7249fbf61d216efc4,http://openaccess.thecvf.com/content_ICCV_2017/papers/Nagpal_Face_Sketch_Matching_ICCV_2017_paper.pdf
+8ee62f7d59aa949b4a943453824e03f4ce19e500,http://arxiv.org/pdf/1603.09732v1.pdf
+64e75f53ff3991099c3fb72ceca55b76544374e5,http://pdfs.semanticscholar.org/eb48/804eefe4c61f62178d2a83a9ae0097091897.pdf
+1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9,http://pdfs.semanticscholar.org/1bdf/b3deae6e6c0df6537efcd1d7edcb4d7a96e9.pdf
+09cf3f1764ab1029f3a7d57b70ae5d5954486d69,http://pdfs.semanticscholar.org/09cf/3f1764ab1029f3a7d57b70ae5d5954486d69.pdf
+daba8f0717f3f47c272f018d0a466a205eba6395,https://pdfs.semanticscholar.org/daba/8f0717f3f47c272f018d0a466a205eba6395.pdf
+1f9ae272bb4151817866511bd970bffb22981a49,http://pdfs.semanticscholar.org/1f9a/e272bb4151817866511bd970bffb22981a49.pdf
+9b246c88a0435fd9f6d10dc88f47a1944dd8f89e,http://pdfs.semanticscholar.org/ffe3/a5a7c0faebd1719f7c77b5f7e05cae61a9ad.pdf
+52f23e1a386c87b0dab8bfdf9694c781cd0a3984,http://pdfs.semanticscholar.org/52f2/3e1a386c87b0dab8bfdf9694c781cd0a3984.pdf
+230527d37421c28b7387c54e203deda64564e1b7,http://pdfs.semanticscholar.org/2305/27d37421c28b7387c54e203deda64564e1b7.pdf
+83e093a07efcf795db5e3aa3576531d61557dd0d,http://pdfs.semanticscholar.org/83e0/93a07efcf795db5e3aa3576531d61557dd0d.pdf
+40b10e330a5511a6a45f42c8b86da222504c717f,http://pdfs.semanticscholar.org/40b1/0e330a5511a6a45f42c8b86da222504c717f.pdf
+9990e0b05f34b586ffccdc89de2f8b0e5d427067,http://pdfs.semanticscholar.org/9990/e0b05f34b586ffccdc89de2f8b0e5d427067.pdf
+0e7c70321462694757511a1776f53d629a1b38f3,http://pdfs.semanticscholar.org/0e7c/70321462694757511a1776f53d629a1b38f3.pdf
+1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2,http://www.es.ele.tue.nl/~sander/publications/icme16.pdf
+2dced31a14401d465cd115902bf8f508d79de076,http://pdfs.semanticscholar.org/2dce/d31a14401d465cd115902bf8f508d79de076.pdf
+ab1900b5d7cf3317d17193e9327d57b97e24d2fc,http://pdfs.semanticscholar.org/ab19/00b5d7cf3317d17193e9327d57b97e24d2fc.pdf
+04250e037dce3a438d8f49a4400566457190f4e2,http://pdfs.semanticscholar.org/0425/0e037dce3a438d8f49a4400566457190f4e2.pdf
+2679e4f84c5e773cae31cef158eb358af475e22f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf
+c5a561c662fc2b195ff80d2655cc5a13a44ffd2d,http://www.cs.toronto.edu/~suzanne/papers/JamiesonEtAlPAMI.pdf
+c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d,http://pdfs.semanticscholar.org/c8db/8764f9d8f5d44e739bbcb663fbfc0a40fb3d.pdf
+d33b26794ea6d744bba7110d2d4365b752d7246f,http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf
+59d45281707b85a33d6f50c6ac6b148eedd71a25,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cheng_Rank_Minimization_across_2013_ICCV_paper.pdf
+0d467adaf936b112f570970c5210bdb3c626a717,http://pdfs.semanticscholar.org/0d46/7adaf936b112f570970c5210bdb3c626a717.pdf
+4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27,http://pdfs.semanticscholar.org/4d49/c6cff198cccb21f4fa35fd75cbe99cfcbf27.pdf
+556b9aaf1bc15c928718bc46322d70c691111158,https://www.ecse.rpi.edu/~cvrl/lwh/myPublications/ICPR08_BNlearning_camera.pdf
+047f6afa87f48de7e32e14229844d1587185ce45,http://pdfs.semanticscholar.org/047f/6afa87f48de7e32e14229844d1587185ce45.pdf
+32b8c9fd4e3f44c371960eb0074b42515f318ee7,http://pdfs.semanticscholar.org/32b8/c9fd4e3f44c371960eb0074b42515f318ee7.pdf
+12cb3bf6abf63d190f849880b1703ccc183692fe,http://pdfs.semanticscholar.org/12cb/3bf6abf63d190f849880b1703ccc183692fe.pdf
+1d0128b9f96f4c11c034d41581f23eb4b4dd7780,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Snape_Automatic_Construction_Of_2015_CVPR_paper.pdf
+407bb798ab153bf6156ba2956f8cf93256b6910a,http://pdfs.semanticscholar.org/407b/b798ab153bf6156ba2956f8cf93256b6910a.pdf
+6fe2efbcb860767f6bb271edbb48640adbd806c3,https://eprints.soton.ac.uk/359808/1/version9.pdf
+fa24bf887d3b3f6f58f8305dcd076f0ccc30272a,http://pdfs.semanticscholar.org/fa24/bf887d3b3f6f58f8305dcd076f0ccc30272a.pdf
+7a85b3ab0efb6b6fcb034ce13145156ee9d10598,http://pdfs.semanticscholar.org/7a85/b3ab0efb6b6fcb034ce13145156ee9d10598.pdf
+793e7f1ba18848908da30cbad14323b0389fd2a8,http://pdfs.semanticscholar.org/793e/7f1ba18848908da30cbad14323b0389fd2a8.pdf
+ae1de0359f4ed53918824271c888b7b36b8a5d41,http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf
+59f325e63f21b95d2b4e2700c461f0136aecc171,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh13.pdf
+9758f3fd94239a8d974217fe12599f88fb413f3d,http://pdfs.semanticscholar.org/9758/f3fd94239a8d974217fe12599f88fb413f3d.pdf
+69d29012d17cdf0a2e59546ccbbe46fa49afcd68,https://arxiv.org/pdf/1404.6818v1.pdf
+c30982d6d9bbe470a760c168002ed9d66e1718a2,http://facstaff.elon.edu/sspurlock/papers/spurlock15_head_pose.pdf
+162dfd0d2c9f3621d600e8a3790745395ab25ebc,http://cse.seu.edu.cn/people/xgeng/LDL/resource/cvpr14a.pdf
+2241eda10b76efd84f3c05bdd836619b4a3df97e,http://arxiv.org/pdf/1506.01342v5.pdf
+2988f24908e912259d7a34c84b0edaf7ea50e2b3,http://pdfs.semanticscholar.org/a779/e9432c3b6bfdcdbb1827757c3b8bf7c3aa4a.pdf
+3ebce6710135d1f9b652815e59323858a7c60025,http://pdfs.semanticscholar.org/3ebc/e6710135d1f9b652815e59323858a7c60025.pdf
+c590c6c171392e9f66aab1bce337470c43b48f39,http://pdfs.semanticscholar.org/c590/c6c171392e9f66aab1bce337470c43b48f39.pdf
+ae89b7748d25878c4dc17bdaa39dd63e9d442a0d,http://hal.inria.fr/docs/00/87/00/59/PDF/Ozerov_et_al_ICIP_2013.pdf
+3be027448ad49a79816cd21dcfcce5f4e1cec8a8,http://www.cs.utexas.edu/~grauman/papers/kovashka_iccv2011.pdf
+081286ede247c5789081502a700b378b6223f94b,http://pdfs.semanticscholar.org/0812/86ede247c5789081502a700b378b6223f94b.pdf
+02cc96ad997102b7c55e177ac876db3b91b4e72c,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf
+4307e8f33f9e6c07c8fc2aeafc30b22836649d8c,http://pdfs.semanticscholar.org/ebff/0956c07185f7bb4e4ee5c7cc0aaa74aca05e.pdf
+52885fa403efbab5ef21274282edd98b9ca70cbf,http://www.aiia.csd.auth.gr/EN/cor_baayen/Discriminant_Graph_Structures_FER.pdf
+2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb,http://jmcauley.ucsd.edu/data/amazon/sigir_draft.pdf
+0b835284b8f1f45f87b0ce004a4ad2aca1d9e153,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w16/papers/Kapadia_Cartooning_for_Enhanced_CVPR_2017_paper.pdf
+87309bdb2b9d1fb8916303e3866eca6e3452c27d,http://pdfs.semanticscholar.org/8730/9bdb2b9d1fb8916303e3866eca6e3452c27d.pdf
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,http://www.public.asu.edu/~bli24/Papers/ICPR2016_video2vec.pdf
+05a7be10fa9af8fb33ae2b5b72d108415519a698,http://jankautz.com/publications/MMFusion4Video_ACMM16.pdf
+0c377fcbc3bbd35386b6ed4768beda7b5111eec6,http://www.ecse.rpi.edu/~qji/Papers/face_exp_pami.pdf
+4cd0da974af9356027a31b8485a34a24b57b8b90,https://arxiv.org/pdf/1703.00862v2.pdf
+6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19,http://pdfs.semanticscholar.org/6d8e/ef8f8d6cd8436c55018e6ca5c5907b31ac19.pdf
+bff77a3b80f40cefe79550bf9e220fb82a74c084,http://pdfs.semanticscholar.org/bff7/7a3b80f40cefe79550bf9e220fb82a74c084.pdf
+02567fd428a675ca91a0c6786f47f3e35881bcbd,https://arxiv.org/pdf/1611.01731.pdf
+788a7b59ea72e23ef4f86dc9abb4450efefeca41,http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf
+3fe4109ded039ac9d58eb9f5baa5327af30ad8b6,http://www.cvc.uab.cat/~ahernandez/files/CVPR2010STGRABCUT.pdf
+58db008b204d0c3c6744f280e8367b4057173259,http://pdfs.semanticscholar.org/58db/008b204d0c3c6744f280e8367b4057173259.pdf
+7fc3442c8b4c96300ad3e860ee0310edb086de94,http://pdfs.semanticscholar.org/82f3/b7cacc15e026fd3a7639091d54162f6ae064.pdf
+3bebb79f8f49aa11dd4f6d60d903172db02bf4f3,http://hct.ece.ubc.ca/publications/pdf/oleinikov-etal-wacv2014.pdf
+dd033d4886f2e687b82d893a2c14dae02962ea70,http://pdfs.semanticscholar.org/dd03/3d4886f2e687b82d893a2c14dae02962ea70.pdf
+69b18d62330711bfd7f01a45f97aaec71e9ea6a5,http://pdfs.semanticscholar.org/69b1/8d62330711bfd7f01a45f97aaec71e9ea6a5.pdf
+ad37d01c4787d169daff7da52e80e2018aab6358,http://ibug.doc.ic.ac.uk/media/uploads/documents/bidirectional_newton_aam.pdf
+e4a1b46b5c639d433d21b34b788df8d81b518729,http://pdfs.semanticscholar.org/e4a1/b46b5c639d433d21b34b788df8d81b518729.pdf
+eb6ee56e085ebf473da990d032a4249437a3e462,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf
+bbf1396eb826b3826c5a800975047beabde2f0de,http://pdfs.semanticscholar.org/bbf1/396eb826b3826c5a800975047beabde2f0de.pdf
+1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b,http://pdfs.semanticscholar.org/9d44/ef9e28d7722c388091ec4c1fa7c05f085e53.pdf
+680d662c30739521f5c4b76845cb341dce010735,http://people.cs.umass.edu/~smaji/papers/maji15part.pdf
+0ca66283f4fb7dbc682f789fcf6d6732006befd5,http://pdfs.semanticscholar.org/0ca6/6283f4fb7dbc682f789fcf6d6732006befd5.pdf
+25d3e122fec578a14226dc7c007fb1f05ddf97f7,https://ibug.doc.ic.ac.uk/media/uploads/documents/pdf17.pdf
+f0681fc08f4d7198dcde803d69ca62f09f3db6c5,http://pdfs.semanticscholar.org/f068/1fc08f4d7198dcde803d69ca62f09f3db6c5.pdf
+92c4636962b719542deb984bd2bf75af405b574c,http://www.umiacs.umd.edu/~arijit/projects/Active_clustering/active_clustering_ijcv.pdf
+213a579af9e4f57f071b884aa872651372b661fd,http://www.robots.ox.ac.uk/~vgg/publications/2013/Charles13a/charles13a.pdf
+7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098,http://pdfs.semanticscholar.org/7a9e/f21a7f59a47ce53b1dff2dd49a8289bb5098.pdf
+023be757b1769ecb0db810c95c010310d7daf00b,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf
+941166547968081463398c9eb041f00eb04304f7,http://people.duke.edu/~qq3/pub/ExpressionDictionary_TIP.pdf
+d83ae5926b05894fcda0bc89bdc621e4f21272da,http://pdfs.semanticscholar.org/d83a/e5926b05894fcda0bc89bdc621e4f21272da.pdf
+faa29975169ba3bbb954e518bc9814a5819876f6,http://pdfs.semanticscholar.org/faa2/9975169ba3bbb954e518bc9814a5819876f6.pdf
+40205181ed1406a6f101c5e38c5b4b9b583d06bc,http://pdfs.semanticscholar.org/4020/5181ed1406a6f101c5e38c5b4b9b583d06bc.pdf
+68996c28bc050158f025a17908eb4bc805c3ee55,https://www.researchgate.net/profile/M_Yeasin/publication/4082331_From_facial_expression_to_level_of_interest_a_spatio-temporal_approach/links/54983d0a0cf2519f5a1dda62.pdf
+6d91da37627c05150cb40cac323ca12a91965759,http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf
+4b3f425274b0c2297d136f8833a31866db2f2aec,https://arxiv.org/pdf/1705.01567v2.pdf
+3765c26362ad1095dfe6744c6d52494ea106a42c,http://www.vision.ee.ethz.ch/~tquack/gammeter_quack_iccv2009.pdf
+ffc5a9610df0341369aa75c0331ef021de0a02a9,http://pdfs.semanticscholar.org/ffc5/a9610df0341369aa75c0331ef021de0a02a9.pdf
+69fb98e11df56b5d7ec7d45442af274889e4be52,http://pdfs.semanticscholar.org/69fb/98e11df56b5d7ec7d45442af274889e4be52.pdf
+35e4b6c20756cd6388a3c0012b58acee14ffa604,http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf
+8d91f06af4ef65193f3943005922f25dbb483ee4,http://pdfs.semanticscholar.org/8d91/f06af4ef65193f3943005922f25dbb483ee4.pdf
+5778d49c8d8d127351eee35047b8d0dc90defe85,http://pdfs.semanticscholar.org/ec31/6c1c182de9d7fe73c7fbbc1a121a7e43c100.pdf
+7e507370124a2ac66fb7a228d75be032ddd083cc,http://pdfs.semanticscholar.org/8992/4d7418df1380044af9ab706a019418952141.pdf
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,http://pdfs.semanticscholar.org/7f59/657c883f77dc26393c2f9ed3d19bdf51137b.pdf
+35f1bcff4552632419742bbb6e1927ef5e998eb4,https://arxiv.org/pdf/1703.02521v1.pdf
+10f66f6550d74b817a3fdcef7fdeba13ccdba51c,http://pdfs.semanticscholar.org/10f6/6f6550d74b817a3fdcef7fdeba13ccdba51c.pdf
+e0dc6f1b740479098c1d397a7bc0962991b5e294,http://pdfs.semanticscholar.org/e0dc/6f1b740479098c1d397a7bc0962991b5e294.pdf
+0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab,http://arxiv.org/pdf/1401.5311v1.pdf
+8b8728edc536020bc4871dc66b26a191f6658f7c,http://pdfs.semanticscholar.org/8b87/28edc536020bc4871dc66b26a191f6658f7c.pdf
+1439bf9ba7ff97df9a2da6dae4784e68794da184,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Ptucha_LGE-KSVD_Flexible_Dictionary_2013_CVPR_paper.pdf
+d6fb606e538763282e3942a5fb45c696ba38aee6,https://pdfs.semanticscholar.org/d6fb/606e538763282e3942a5fb45c696ba38aee6.pdf
+2642810e6c74d900f653f9a800c0e6a14ca2e1c7,http://openaccess.thecvf.com/content_iccv_2015/papers/Liu_Projection_Bank_From_ICCV_2015_paper.pdf
+2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40,http://www.cl.cam.ac.uk/~pr10/publications/fg17.pdf
+748e72af01ba4ee742df65e9c030cacec88ce506,http://pdfs.semanticscholar.org/748e/72af01ba4ee742df65e9c030cacec88ce506.pdf
+c9424d64b12a4abe0af201e7b641409e182babab,http://pdfs.semanticscholar.org/c942/4d64b12a4abe0af201e7b641409e182babab.pdf
+b43b6551ecc556557b63edb8b0dc39901ed0343b,http://pdfs.semanticscholar.org/b43b/6551ecc556557b63edb8b0dc39901ed0343b.pdf
+642c66df8d0085d97dc5179f735eed82abf110d0,http://research.microsoft.com/users/leizhang/Paper/CVPR05-Shuicheng-Coupled.pdf
+1768909f779869c0e83d53f6c91764f41c338ab5,http://arxiv.org/pdf/1506.08959v1.pdf
+bcac3a870501c5510df80c2a5631f371f2f6f74a,http://pdfs.semanticscholar.org/bcac/3a870501c5510df80c2a5631f371f2f6f74a.pdf
+89e7d23e0c6a1d636f2da68aaef58efee36b718b,http://pdfs.semanticscholar.org/89e7/d23e0c6a1d636f2da68aaef58efee36b718b.pdf
+8bf243817112ac0aa1348b40a065bb0b735cdb9c,http://pdfs.semanticscholar.org/8bf2/43817112ac0aa1348b40a065bb0b735cdb9c.pdf
+958c599a6f01678513849637bec5dc5dba592394,http://pdfs.semanticscholar.org/958c/599a6f01678513849637bec5dc5dba592394.pdf
+6448d23f317babb8d5a327f92e199aaa45f0efdc,http://pdfs.semanticscholar.org/6448/d23f317babb8d5a327f92e199aaa45f0efdc.pdf
+0c5ddfa02982dcad47704888b271997c4de0674b,http://pdfs.semanticscholar.org/0c5d/dfa02982dcad47704888b271997c4de0674b.pdf
+1922ad4978ab92ce0d23acc4c7441a8812f157e5,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf
+861c650f403834163a2c27467a50713ceca37a3e,http://personal.stevens.edu/~hli18/data/papers/PEPICCV2013_CameraReady.pdf
+2aea27352406a2066ddae5fad6f3f13afdc90be9,http://arxiv.org/pdf/1507.05699v4.pdf
+832e1d128059dd5ed5fa5a0b0f021a025903f9d5,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dapogny_Pairwise_Conditional_Random_ICCV_2015_paper.pdf
+9110c589c6e78daf4affd8e318d843dc750fb71a,http://pdfs.semanticscholar.org/9110/c589c6e78daf4affd8e318d843dc750fb71a.pdf
+5e0e516226413ea1e973f1a24e2fdedde98e7ec0,http://pdfs.semanticscholar.org/74ce/97da57ec848db660ee69dec709f226c74f43.pdf
+2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5,http://pdfs.semanticscholar.org/cb94/9e849b20ddc157aaf648dca1e8c71463c288.pdf
+0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e,http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf
+04522dc16114c88dfb0ebd3b95050fdbd4193b90,http://www.svcl.ucsd.edu/publications/conference/2005/crv05/FES.pdf
+c1dfabe36a4db26bf378417985a6aacb0f769735,http://pdfs.semanticscholar.org/c1df/abe36a4db26bf378417985a6aacb0f769735.pdf
+6ed22b934e382c6f72402747d51aa50994cfd97b,http://www.ifp.illinois.edu/~jyang29/papers/WACV16-Expression.pdf
+2b0102d77d3d3f9bc55420d862075934f5c85bec,http://openaccess.thecvf.com/content_cvpr_2016/papers/Shao_Slicing_Convolutional_Neural_CVPR_2016_paper.pdf
+99726ad232cef837f37914b63de70d8c5101f4e2,http://pdfs.semanticscholar.org/9972/6ad232cef837f37914b63de70d8c5101f4e2.pdf
+435642641312364e45f4989fac0901b205c49d53,http://pdfs.semanticscholar.org/4356/42641312364e45f4989fac0901b205c49d53.pdf
+b9cedd1960d5c025be55ade0a0aa81b75a6efa61,http://pdfs.semanticscholar.org/b9ce/dd1960d5c025be55ade0a0aa81b75a6efa61.pdf
+1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3,http://pdfs.semanticscholar.org/1be1/8a701d5af2d8088db3e6aaa5b9b1d54b6fd3.pdf
+961a5d5750f18e91e28a767b3cb234a77aac8305,http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf
+036c41d67b49e5b0a578a401eb31e5f46b3624e0,http://www.infomus.org/Events/proceedings/ACII2015/papers/Main_Conference/M2_Poster/Poster_Teaser_5/ACII2015_submission_19.pdf
+3dda181be266950ba1280b61eb63ac11777029f9,http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf
+a947c21a15fb0a02378c36271e1addf6b6e110eb,http://www.researchgate.net/profile/Bryan_Conroy/publication/220734216_The_grouped_two-sided_orthogonal_Procrustes_problem/links/02e7e52541c3f27987000000.pdf
+6d207360148ec3991b70952315cb3f1e8899e977,http://www.researchgate.net/profile/Edwin_Hancock/publication/224649584_Estimating_Cast_Shadows_using_SFS_and_Class-based_Surface_Completion/links/004635239fd1ed7ac5000000.pdf
+63b29886577a37032c7e32d8899a6f69b11a90de,http://pdfs.semanticscholar.org/63b2/9886577a37032c7e32d8899a6f69b11a90de.pdf
+3773e5d195f796b0b7df1fca6e0d1466ad84b5e7,http://pdfs.semanticscholar.org/3773/e5d195f796b0b7df1fca6e0d1466ad84b5e7.pdf
+8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0,http://pdfs.semanticscholar.org/8bb2/1b1f8d6952d77cae95b4e0b8964c9e0201b0.pdf
+7fc5b6130e9d474dfb49d9612b6aa0297d481c8e,http://pdfs.semanticscholar.org/7fc5/b6130e9d474dfb49d9612b6aa0297d481c8e.pdf
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,http://pdfs.semanticscholar.org/32ec/bbd76fdce249f9109594eee2d52a1cafdfc7.pdf
+fdf533eeb1306ba418b09210387833bdf27bb756,http://pdfs.semanticscholar.org/fdf5/33eeb1306ba418b09210387833bdf27bb756.pdf
+34a41ec648d082270697b9ee264f0baf4ffb5c8d,http://pdfs.semanticscholar.org/34a4/1ec648d082270697b9ee264f0baf4ffb5c8d.pdf
+9e1c3b8b1653337094c1b9dba389e8533bc885b0,http://pdfs.semanticscholar.org/9e1c/3b8b1653337094c1b9dba389e8533bc885b0.pdf
+47f5f740e225281c02c8a2ae809be201458a854f,http://pdfs.semanticscholar.org/5241/ad03e9276d4acd1c51eaa7f44e2d04d07b68.pdf
+71e6a46b32a8163c9eda69e1badcee6348f1f56a,http://pdfs.semanticscholar.org/71e6/a46b32a8163c9eda69e1badcee6348f1f56a.pdf
+790aa543151312aef3f7102d64ea699a1d15cb29,http://arxiv.org/pdf/1607.06290v1.pdf
+37b6d6577541ed991435eaf899a2f82fdd72c790,http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf
+6a8a3c604591e7dd4346611c14dbef0c8ce9ba54,http://pdfs.semanticscholar.org/6a8a/3c604591e7dd4346611c14dbef0c8ce9ba54.pdf
+7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b,http://pdfs.semanticscholar.org/7cf8/a841aad5b7bdbea46a7bb820790e9ce12d0b.pdf
+63eefc775bcd8ccad343433fc7a1dd8e1e5ee796,http://www.lv-nus.org/papers%5C2008%5C2008_J_6.pdf
+4542273a157bfd4740645a6129d1784d1df775d2,http://pdfs.semanticscholar.org/4542/273a157bfd4740645a6129d1784d1df775d2.pdf
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,http://pdfs.semanticscholar.org/52bf/00df3b970e017e4e2f8079202460f1c0e1bd.pdf
+e1f6e2651b7294951b5eab5d2322336af1f676dc,http://pdfs.semanticscholar.org/e1f6/e2651b7294951b5eab5d2322336af1f676dc.pdf
+a6db73f10084ce6a4186363ea9d7475a9a658a11,http://pdfs.semanticscholar.org/afce/ebbea6e9130cf22142206c19a19cda226b13.pdf
+bc910ca355277359130da841a589a36446616262,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf
+c0ee89dc2dad76147780f96294de9e421348c1f4,http://pdfs.semanticscholar.org/c0ee/89dc2dad76147780f96294de9e421348c1f4.pdf
+8a54f8fcaeeede72641d4b3701bab1fe3c2f730a,http://pdfs.semanticscholar.org/acf8/b9607ca39f20b9b1956b8761b37f14eb4284.pdf
+38183fe28add21693729ddeaf3c8a90a2d5caea3,https://arxiv.org/pdf/1706.09876v1.pdf
+e3bb83684817c7815f5005561a85c23942b1f46b,http://pdfs.semanticscholar.org/e3bb/83684817c7815f5005561a85c23942b1f46b.pdf
+2cdd5b50a67e4615cb0892beaac12664ec53b81f,http://people.eecs.berkeley.edu/~junyanz/projects/mirrormirror/mirrormirror_small.pdf
+92115b620c7f653c847f43b6c4ff0470c8e55dab,http://pdfs.semanticscholar.org/a77c/798d06060ece81c620458e4586819e75ae15.pdf
+e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc,http://pdfs.semanticscholar.org/e4e9/5b8bca585a15f13ef1ab4f48a884cd6ecfcc.pdf
+f8ed5f2c71e1a647a82677df24e70cc46d2f12a8,http://pdfs.semanticscholar.org/f8ed/5f2c71e1a647a82677df24e70cc46d2f12a8.pdf
+0830c9b9f207007d5e07f5269ffba003235e4eff,http://pdfs.semanticscholar.org/cf2e/1ebb9609f46af6de0c15b4f48d03e37e54ba.pdf
+dd2f6a1ba3650075245a422319d86002e1e87808,http://pdfs.semanticscholar.org/dd2f/6a1ba3650075245a422319d86002e1e87808.pdf
+37619564574856c6184005830deda4310d3ca580,http://arxiv.org/pdf/1508.04389v1.pdf
+4682fee7dc045aea7177d7f3bfe344aabf153bd5,http://www.cs.utexas.edu/~cv-fall2012/slides/elad-paper.pdf
+171d8a39b9e3d21231004f7008397d5056ff23af,http://openaccess.thecvf.com/content_cvpr_2017/papers/Wu_Simultaneous_Facial_Landmark_CVPR_2017_paper.pdf
+48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e,http://www.iri.upc.edu/files/scidoc/1938-Multi-Modal-Embedding-for-Main-Product-Detection-in-Fashion.pdf
+6fa7a1c8a858157deee3b582099e5e234798bb4a,http://biometrics.nist.gov/cs_links/ibpc2014/presentations/14_wednesday_gentric_IBPC14_morpho.pdf
+37eb666b7eb225ffdafc6f318639bea7f0ba9a24,http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf
+91a1945b9c40af4944a6cdcfe59a0999de4f650a,http://ccbr2017.org/ccbr%20PPT/95%E5%8F%B7%E8%AE%BA%E6%96%87-%E7%94%B3%E6%99%9A%E9%9C%9E%20wanxiahen-ccbr.pdf
+d3d71a110f26872c69cf25df70043f7615edcf92,https://www.cise.ufl.edu/~dihong/assets/07094272.pdf
+585260468d023ffc95f0e539c3fa87254c28510b,http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf
+5ea9cba00f74d2e113a10c484ebe4b5780493964,http://pdfs.semanticscholar.org/5ea9/cba00f74d2e113a10c484ebe4b5780493964.pdf
+97540905e4a9fdf425989a794f024776f28a3fa9,http://pdfs.semanticscholar.org/cc5a/1bf68ba00c20415e43684c6f75ce3fbc176c.pdf
+5366573e96a1dadfcd4fd592f83017e378a0e185,http://pdfs.semanticscholar.org/5366/573e96a1dadfcd4fd592f83017e378a0e185.pdf
+0aae88cf63090ea5b2c80cd014ef4837bcbaadd8,http://pdfs.semanticscholar.org/0aae/88cf63090ea5b2c80cd014ef4837bcbaadd8.pdf
+0ec67c69e0975cfcbd8ba787cc0889aec4cc5399,http://pdfs.semanticscholar.org/1af3/6a1fc18328e2a0310bc4208ef35ba882bdc1.pdf
+2910fcd11fafee3f9339387929221f4fc1160973,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf
+e73b9b16adcf4339ff4d6723e61502489c50c2d9,http://pdfs.semanticscholar.org/e73b/9b16adcf4339ff4d6723e61502489c50c2d9.pdf
+b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3,http://cs.adelaide.edu.au/~javen/pub/ShiLiShe10.pdf
+68f9cb5ee129e2b9477faf01181cd7e3099d1824,http://pdfs.semanticscholar.org/68f9/cb5ee129e2b9477faf01181cd7e3099d1824.pdf
+74325f3d9aea3a810fe4eab8863d1a48c099de11,http://pdfs.semanticscholar.org/7432/5f3d9aea3a810fe4eab8863d1a48c099de11.pdf
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,http://pdfs.semanticscholar.org/6eb1/b5935b0613a41b72fd9e7e53a3c0b32651e9.pdf
+935a7793cbb8f102924fa34fce1049727de865c2,https://ivi.fnwi.uva.nl/isis/publications/2015/AlnajarICIP20015/AlnajarICIP20015.pdf
+6888f3402039a36028d0a7e2c3df6db94f5cb9bb,http://pdfs.semanticscholar.org/6888/f3402039a36028d0a7e2c3df6db94f5cb9bb.pdf
+a25106a76af723ba9b09308a7dcf4f76d9283589,http://pdfs.semanticscholar.org/a251/06a76af723ba9b09308a7dcf4f76d9283589.pdf
+ea6f5c8e12513dbaca6bbdff495ef2975b8001bd,http://pdfs.semanticscholar.org/ea6f/5c8e12513dbaca6bbdff495ef2975b8001bd.pdf
+097104fc731a15fad07479f4f2c4be2e071054a2,http://pdfs.semanticscholar.org/dbad/94c3506a342f55f54388e162e8481ae8b184.pdf
+3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a,http://www.cse.msu.edu/~rossarun/BiometricsTextBook/Papers/Security/Teoh_BioHash_PAMI06.pdf
+33ba256d59aefe27735a30b51caf0554e5e3a1df,http://pdfs.semanticscholar.org/33ba/256d59aefe27735a30b51caf0554e5e3a1df.pdf
+4ed54d5093d240cc3644e4212f162a11ae7d1e3b,http://pdfs.semanticscholar.org/4ed5/4d5093d240cc3644e4212f162a11ae7d1e3b.pdf
+42dc36550912bc40f7faa195c60ff6ffc04e7cd6,http://pdfs.semanticscholar.org/42dc/36550912bc40f7faa195c60ff6ffc04e7cd6.pdf
+beb49072f5ba79ed24750108c593e8982715498e,http://pdfs.semanticscholar.org/beb4/9072f5ba79ed24750108c593e8982715498e.pdf
+a7267bc781a4e3e79213bb9c4925dd551ea1f5c4,http://pdfs.semanticscholar.org/a726/7bc781a4e3e79213bb9c4925dd551ea1f5c4.pdf
+994b52bf884c71a28b4f5be4eda6baaacad1beee,http://www.yugangjiang.info/publication/BIGMM15-summit-invited.pdf
+4d15254f6f31356963cc70319ce416d28d8924a3,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf
+2cdd9e445e7259117b995516025fcfc02fa7eebb,http://hub.hku.hk/bitstream/10722/61208/1/Content.pdf
+e200c3f2849d56e08056484f3b6183aa43c0f13a,http://pdfs.semanticscholar.org/e200/c3f2849d56e08056484f3b6183aa43c0f13a.pdf
+4d9c02567e7b9e065108eb83ea3f03fcff880462,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Towards_Facial_Expression_CVPR_2016_paper.pdf
+ed04e161c953d345bcf5b910991d7566f7c486f7,http://pdfs.semanticscholar.org/ed04/e161c953d345bcf5b910991d7566f7c486f7.pdf
+0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136,https://ibug.doc.ic.ac.uk/media/uploads/documents/booth2017large.pdf
+97f9c3bdb4668f3e140ded2da33fe704fc81f3ea,http://pdfs.semanticscholar.org/97f9/c3bdb4668f3e140ded2da33fe704fc81f3ea.pdf
+6515fe829d0b31a5e1f4dc2970a78684237f6edb,http://pdfs.semanticscholar.org/6515/fe829d0b31a5e1f4dc2970a78684237f6edb.pdf
+14ce7635ff18318e7094417d0f92acbec6669f1c,http://www.cs.tau.ac.il/~wolf/papers/deepface_11_01_2013.pdf
+096eb8b4b977aaf274c271058feff14c99d46af3,http://www.dtic.mil/dtic/tr/fulltext/u2/a585819.pdf
+ca54d0a128b96b150baef392bf7e498793a6371f,http://pdfs.semanticscholar.org/ca54/d0a128b96b150baef392bf7e498793a6371f.pdf
+43836d69f00275ba2f3d135f0ca9cf88d1209a87,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0030-7?site=ipsjcva.springeropen.com
+b3154d981eca98416074538e091778cbc031ca29,http://pdfs.semanticscholar.org/b315/4d981eca98416074538e091778cbc031ca29.pdf
+24bf94f8090daf9bda56d54e42009067839b20df,https://www.computer.org/csdl/trans/tp/2015/06/06940284.pdf
+ada73060c0813d957576be471756fa7190d1e72d,http://pdfs.semanticscholar.org/ada7/3060c0813d957576be471756fa7190d1e72d.pdf
+d9a1dd762383213741de4c1c1fd9fccf44e6480d,http://pdfs.semanticscholar.org/d9a1/dd762383213741de4c1c1fd9fccf44e6480d.pdf
+5f5906168235613c81ad2129e2431a0e5ef2b6e4,https://arxiv.org/pdf/1601.00199v1.pdf
+aece472ba64007f2e86300cc3486c84597f02ec7,http://doras.dcu.ie/439/1/ieee_smap_2007.pdf
+02c993d361dddba9737d79e7251feca026288c9c,http://eprints.eemcs.utwente.nl/26377/01/Automatic_player_detection_and_recognition_in_images_using_AdaBoost.pdf
+16395b40e19cbc6d5b82543039ffff2a06363845,https://arxiv.org/pdf/1605.03222v1.pdf
+498fd231d7983433dac37f3c97fb1eafcf065268,http://pdfs.semanticscholar.org/498f/d231d7983433dac37f3c97fb1eafcf065268.pdf
+a46086e210c98dcb6cb9a211286ef906c580f4e8,http://pdfs.semanticscholar.org/dc94/43e3ae2fe70282b1b30e3eda3717b58c0808.pdf
+b5667d087aafcf6b91f3c77aa90cee1ac185f8f1,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICIP17.pdf
+ff398e7b6584d9a692e70c2170b4eecaddd78357,http://pdfs.semanticscholar.org/ff39/8e7b6584d9a692e70c2170b4eecaddd78357.pdf
+2e8a0cc071017845ee6f67bd0633b8167a47abed,https://arxiv.org/pdf/1303.6021v1.pdf
+7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25,http://pdfs.semanticscholar.org/7bf0/a1aa1d0228a51d24c0c3a83eceb937a6ae25.pdf
+3538d2b5f7ab393387ce138611ffa325b6400774,http://pdfs.semanticscholar.org/3538/d2b5f7ab393387ce138611ffa325b6400774.pdf
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf
+e8686663aec64f4414eba6a0f821ab9eb9f93e38,http://pdfs.semanticscholar.org/e868/6663aec64f4414eba6a0f821ab9eb9f93e38.pdf
+06d7ef72fae1be206070b9119fb6b61ce4699587,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zafeiriou_On_One-Shot_Similarity_2013_ICCV_paper.pdf
+26a89701f4d41806ce8dbc8ca00d901b68442d45,http://pdfs.semanticscholar.org/b7d8/fea52643236bd9b0dd7eec5f1cde248d10f6.pdf
+205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2009/Vretos_2009_ICIP.pdf
+d1881993c446ea693bbf7f7d6e750798bf958900,http://pdfs.semanticscholar.org/d188/1993c446ea693bbf7f7d6e750798bf958900.pdf
+0cd8895b4a8f16618686f622522726991ca2a324,http://pdfs.semanticscholar.org/0cd8/895b4a8f16618686f622522726991ca2a324.pdf
+cda4fb9df653b5721ad4fe8b4a88468a410e55ec,http://pdfs.semanticscholar.org/cda4/fb9df653b5721ad4fe8b4a88468a410e55ec.pdf
+8886b21f97c114a23b24dc7025bbf42885adc3a7,http://researchprofiles.herts.ac.uk/portal/files/10195320/UH_eval_deid_face_final.pdf
+78f438ed17f08bfe71dfb205ac447ce0561250c6,http://pdfs.semanticscholar.org/78f4/38ed17f08bfe71dfb205ac447ce0561250c6.pdf
+cb9092fe74ea6a5b2bb56e9226f1c88f96094388,http://pdfs.semanticscholar.org/cb90/92fe74ea6a5b2bb56e9226f1c88f96094388.pdf
+5495e224ac7b45b9edc5cfeabbb754d8a40a879b,http://pdfs.semanticscholar.org/5495/e224ac7b45b9edc5cfeabbb754d8a40a879b.pdf
+176fc31a686fb70d73f1fa354bf043ad236f7aa3,http://www.cs.brown.edu/~black/Papers/ofevaltr.pdf
+c7685fdbee2d96ef056a89ab4fa43df5aeae7ba7,http://staff.science.uva.nl/~nicu/publications/SMC04.pdf
+6e12ba518816cbc2d987200c461dc907fd19f533,http://pdfs.semanticscholar.org/6e12/ba518816cbc2d987200c461dc907fd19f533.pdf
+be07f2950771d318a78d2b64de340394f7d6b717,http://pdfs.semanticscholar.org/be07/f2950771d318a78d2b64de340394f7d6b717.pdf
+8f5ce25e6e1047e1bf5b782d045e1dac29ca747e,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07b.pdf
+0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae,http://pdfs.semanticscholar.org/0ced/7b814ec3bb9aebe0fcf0cac3d78f36361eae.pdf
+8e461978359b056d1b4770508e7a567dbed49776,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Sikka_LOMo_Latent_Ordinal_CVPR_2016_paper.pdf
+00dc942f23f2d52ab8c8b76b6016d9deed8c468d,http://pdfs.semanticscholar.org/00dc/942f23f2d52ab8c8b76b6016d9deed8c468d.pdf
+68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5,http://pdfs.semanticscholar.org/68a2/ee5c5b76b6feeb3170aaff09b1566ec2cdf5.pdf
+437a720c6f6fc1959ba95e48e487eb3767b4e508,http://pdfs.semanticscholar.org/d4f0/960c6587379ad7df7928c256776e25952c60.pdf
+0ac664519b2b8abfb8966dafe60d093037275573,http://face.cs.kit.edu/download/publications/supplemental_material.pdf
+2d84c0d96332bb4fbd8acced98e726aabbf15591,http://pdfs.semanticscholar.org/2d84/c0d96332bb4fbd8acced98e726aabbf15591.pdf
+86ed5b9121c02bcf26900913f2b5ea58ba23508f,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wang_Actions__Transformations_CVPR_2016_paper.pdf
+74408cfd748ad5553cba8ab64e5f83da14875ae8,http://pdfs.semanticscholar.org/7440/8cfd748ad5553cba8ab64e5f83da14875ae8.pdf
+3b9b200e76a35178da940279d566bbb7dfebb787,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf
+6a5fe819d2b72b6ca6565a0de117c2b3be448b02,http://pdfs.semanticscholar.org/6a5f/e819d2b72b6ca6565a0de117c2b3be448b02.pdf
+8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82,http://pdfs.semanticscholar.org/e09e/aa666f354d4262d5ff4cf4ef54a960561bbe.pdf
+3f4c262d836b2867a53eefb959057350bf7219c9,http://pdfs.semanticscholar.org/3f4c/262d836b2867a53eefb959057350bf7219c9.pdf
+fffa2943808509fdbd2fc817cc5366752e57664a,http://pdfs.semanticscholar.org/fffa/2943808509fdbd2fc817cc5366752e57664a.pdf
+9c9ef6a46fb6395702fad622f03ceeffbada06e5,http://pdfs.semanticscholar.org/f1e3/d1d26e39f98608037b195761f61fa7532925.pdf
+7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae,http://pdfs.semanticscholar.org/7f97/a36a5a634c30de5a8e8b2d1c812ca9f971ae.pdf
+23aba7b878544004b5dfa64f649697d9f082b0cf,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Wang_Locality-Constrained_Discriminative_Learning_2015_CVPR_paper.pdf
+e1c59e00458b4dee3f0e683ed265735f33187f77,http://pdfs.semanticscholar.org/e1c5/9e00458b4dee3f0e683ed265735f33187f77.pdf
+5aad56cfa2bac5d6635df4184047e809f8fecca2,http://chenlab.ece.cornell.edu/people/Amir/publications/picture_password.pdf
+f9ccfe000092121a2016639732cdb368378256d5,http://pdfs.semanticscholar.org/f9cc/fe000092121a2016639732cdb368378256d5.pdf
+7859667ed6c05a467dfc8a322ecd0f5e2337db56,http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf
+60cdcf75e97e88638ec973f468598ae7f75c59b4,http://www.cse.cuhk.edu.hk/~lyu/paper_pdf/tmm08face.pdf
+0969e0dc05fca21ff572ada75cb4b703c8212e80,http://pdfs.semanticscholar.org/0969/e0dc05fca21ff572ada75cb4b703c8212e80.pdf
+d588dd4f305cdea37add2e9bb3d769df98efe880,http://pdfs.semanticscholar.org/d588/dd4f305cdea37add2e9bb3d769df98efe880.pdf
+57b052cf826b24739cd7749b632f85f4b7bcf90b,http://pdfs.semanticscholar.org/57b0/52cf826b24739cd7749b632f85f4b7bcf90b.pdf
+1f9b2f70c24a567207752989c5bd4907442a9d0f,http://pdfs.semanticscholar.org/1f9b/2f70c24a567207752989c5bd4907442a9d0f.pdf
+3db75962857a602cae65f60f202d311eb4627b41,https://pdfs.semanticscholar.org/3db7/5962857a602cae65f60f202d311eb4627b41.pdf
+ed28e8367fcb7df7e51963add9e2d85b46e2d5d6,http://pdfs.semanticscholar.org/ed28/e8367fcb7df7e51963add9e2d85b46e2d5d6.pdf
+00214fe1319113e6649435cae386019235474789,http://pdfs.semanticscholar.org/0021/4fe1319113e6649435cae386019235474789.pdf
+d67dcaf6e44afd30c5602172c4eec1e484fc7fb7,http://pdfs.semanticscholar.org/d67d/caf6e44afd30c5602172c4eec1e484fc7fb7.pdf
+91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0,http://pdfs.semanticscholar.org/94c3/624c54f8f070a9dc82a41cbf7a888fe8f477.pdf
+013909077ad843eb6df7a3e8e290cfd5575999d2,http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf
+90b11e095c807a23f517d94523a4da6ae6b12c76,https://arxiv.org/pdf/1609.08475v1.pdf
+08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d,http://pdfs.semanticscholar.org/b680/2fb123c594a9fd621ae576651201fcc4329a.pdf
+500b92578e4deff98ce20e6017124e6d2053b451,http://eprints.eemcs.utwente.nl/25818/01/Pantic_Incremental_Face_Alignment_in_the_Wild.pdf
+0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7,http://pdfs.semanticscholar.org/0ae9/cc6a06cfd03d95eee4eca9ed77b818b59cb7.pdf
+656ef752b363a24f84cc1aeba91e4fa3d5dd66ba,http://pdfs.semanticscholar.org/656e/f752b363a24f84cc1aeba91e4fa3d5dd66ba.pdf
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,http://pdfs.semanticscholar.org/7a49/4b4489408ec3adea15817978ecd2e733f5fe.pdf
+a6ebe013b639f0f79def4c219f585b8a012be04f,http://pdfs.semanticscholar.org/a6eb/e013b639f0f79def4c219f585b8a012be04f.pdf
+59690814e916d1c0e7aa9190678ba847cbd0046f,http://figment.cse.usf.edu/~sfefilat/data/papers/ThBCT8.7.pdf
+0b87d91fbda61cdea79a4b4dcdcb6d579f063884,http://pdfs.semanticscholar.org/0b87/d91fbda61cdea79a4b4dcdcb6d579f063884.pdf
+17035089959a14fe644ab1d3b160586c67327db2,http://pdfs.semanticscholar.org/1703/5089959a14fe644ab1d3b160586c67327db2.pdf
+bbe1332b4d83986542f5db359aee1fd9b9ba9967,http://pdfs.semanticscholar.org/bbe1/332b4d83986542f5db359aee1fd9b9ba9967.pdf
+bd07d1f68486052b7e4429dccecdb8deab1924db,http://pdfs.semanticscholar.org/bd07/d1f68486052b7e4429dccecdb8deab1924db.pdf
+0a511058edae582e8327e8b9d469588c25152dc6,http://pdfs.semanticscholar.org/0a51/1058edae582e8327e8b9d469588c25152dc6.pdf
+29156e4fe317b61cdcc87b0226e6f09e416909e0,http://pdfs.semanticscholar.org/b880/78d284c9f77172dd23970522856a7042c961.pdf
+b8dba0504d6b4b557d51a6cf4de5507141db60cf,http://pdfs.semanticscholar.org/b8db/a0504d6b4b557d51a6cf4de5507141db60cf.pdf
+38215c283ce4bf2c8edd597ab21410f99dc9b094,https://pure.qub.ac.uk/portal/files/9746839/IEEE_Transactions_on_Affective_Computing_2012_McKeown.pdf
+055530f7f771bb1d5f352e2758d1242408d34e4d,http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,http://pdfs.semanticscholar.org/247c/ab87b133bd0f4f9e8ce5e7fc682be6340eac.pdf
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,http://pdfs.semanticscholar.org/3411/ef1ff5ad11e45106f7863e8c7faf563f4ee1.pdf
+7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22,http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf
+92fada7564d572b72fd3be09ea3c39373df3e27c,http://pdfs.semanticscholar.org/b8a4/f51a85fb801e1a5f04c213725d60133233a0.pdf
+2717998d89d34f45a1cca8b663b26d8bf10608a9,http://wangzheallen.github.io/papers/ZhangWWQW_CVPR16.pdf
+0faee699eccb2da6cf4307ded67ba8434368257b,http://pdfs.semanticscholar.org/2396/5bd9b557b04b2c81a35ee5c16951c0e420f3.pdf
+842d82081f4b27ca2d4bc05c6c7e389378f0c7b8,http://pdfs.semanticscholar.org/842d/82081f4b27ca2d4bc05c6c7e389378f0c7b8.pdf
+2489a839d0a761ef8520393a7e412c36f5f26324,https://cs.adelaide.edu.au/~tjchin/lib/exe/fetch.php?media=eccv2014_hypergraph.pdf
+53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9,http://pdfs.semanticscholar.org/53d7/8c8dbac7c9be8eb148c6a9e1d672f1dd72f9.pdf
+35e87e06cf19908855a16ede8c79a0d3d7687b5c,http://pdfs.semanticscholar.org/35e8/7e06cf19908855a16ede8c79a0d3d7687b5c.pdf
+78fdf2b98cf6380623b0e20b0005a452e736181e,http://pdfs.semanticscholar.org/78fd/f2b98cf6380623b0e20b0005a452e736181e.pdf
+6f0d3610c4ee7b67e9d435d48bc98167761251e8,http://www.cs.washington.edu/homes/wufei/papers/IJCNN.pdf
+5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7,http://cbl.uh.edu/pub_files/ISBA-2016.pdf
+7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b,http://pdfs.semanticscholar.org/7af3/8f6dcfbe1cd89f2307776bcaa09c54c30a8b.pdf
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,https://arxiv.org/pdf/1608.00486v3.pdf
+15728d6fd5c9fc20b40364b733228caf63558c31,http://pdfs.semanticscholar.org/1572/8d6fd5c9fc20b40364b733228caf63558c31.pdf
+3fde656343d3fd4223e08e0bc835552bff4bda40,http://pdfs.semanticscholar.org/3fde/656343d3fd4223e08e0bc835552bff4bda40.pdf
+0562fc7eca23d47096472a1d42f5d4d086e21871,http://pdfs.semanticscholar.org/0562/fc7eca23d47096472a1d42f5d4d086e21871.pdf
+956c634343e49319a5e3cba4f2bd2360bdcbc075,http://www.cse.ust.hk/~jamesk/papers/tsmc06.pdf
+7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,http://pdfs.semanticscholar.org/7e1e/a2679a110241ed0dd38ff45cd4dfeb7a8e83.pdf
+430c4d7ad76e51d83bbd7ec9d3f856043f054915,http://pdfs.semanticscholar.org/5176/899c80b3d4b3b8be34d35549f95bf2d55e7d.pdf
+14e759cb019aaf812d6ac049fde54f40c4ed1468,http://pdfs.semanticscholar.org/14e7/59cb019aaf812d6ac049fde54f40c4ed1468.pdf
+6f2dc51d607f491dbe6338711c073620c85351ac,http://pdfs.semanticscholar.org/6f2d/c51d607f491dbe6338711c073620c85351ac.pdf
+28d99dc2d673d62118658f8375b414e5192eac6f,http://www.cs.wayne.edu/~mdong/cvpr17.pdf
+8c13f2900264b5cf65591e65f11e3f4a35408b48,http://cvhci.ira.uka.de/~stiefel/papers/Ekenel_Local_Appearance.pdf
+1e41a3fdaac9f306c0ef0a978ae050d884d77d2a,http://www.cs.huji.ac.il/~daphna/course/CoursePapers/SerreEtAl%20PAMI2007.pdf
+10550ee13855bd7403946032354b0cd92a10d0aa,http://www.public.asu.edu/~chaitali/confpapers/neuromorphic_dac12.pdf
+3ee7a8107a805370b296a53e355d111118e96b7c,http://pdfs.semanticscholar.org/3ee7/a8107a805370b296a53e355d111118e96b7c.pdf
+12692fbe915e6bb1c80733519371bbb90ae07539,http://pdfs.semanticscholar.org/50ef/4817a6e50a2ec525d6e417d05d2400983c11.pdf
+2c7c3a74da960cc76c00965bd3e343958464da45,http://pdfs.semanticscholar.org/2c7c/3a74da960cc76c00965bd3e343958464da45.pdf
+2d1f86e2c7ba81392c8914edbc079ac64d29b666,https://arxiv.org/pdf/1702.04471v1.pdf
+7862f646d640cbf9f88e5ba94a7d642e2a552ec9,http://pdfs.semanticscholar.org/7862/f646d640cbf9f88e5ba94a7d642e2a552ec9.pdf
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,https://www.cse.iitb.ac.in/~sharat/icvgip.org/icvgip2010/papers/53.sethuram.134.pdf
+7f6061c83dc36633911e4d726a497cdc1f31e58a,http://pdfs.semanticscholar.org/7f60/61c83dc36633911e4d726a497cdc1f31e58a.pdf
+89c84628b6f63554eec13830851a5d03d740261a,http://pdfs.semanticscholar.org/89c8/4628b6f63554eec13830851a5d03d740261a.pdf
+102b968d836177f9c436141e382915a4f8549276,https://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ACM-MM05-Proc.pdf
+45c31cde87258414f33412b3b12fc5bec7cb3ba9,http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf
+2b42f83a720bd4156113ba5350add2df2673daf0,http://pdfs.semanticscholar.org/2b42/f83a720bd4156113ba5350add2df2673daf0.pdf
+37c8514df89337f34421dc27b86d0eb45b660a5e,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Uricar_Facial_Landmark_Tracking_ICCV_2015_paper.pdf
+a52d9e9daf2cb26b31bf2902f78774bd31c0dd88,http://pdfs.semanticscholar.org/a52d/9e9daf2cb26b31bf2902f78774bd31c0dd88.pdf
+aadf4b077880ae5eee5dd298ab9e79a1b0114555,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Presti_Using_Hankel_Matrices_2015_CVPR_paper.pdf
+eed1dd2a5959647896e73d129272cb7c3a2e145c,http://s3.amazonaws.com/kvaccaro.com/documents/UIST16.pdf
+d5fa9d98c8da54a57abf353767a927d662b7f026,http://pdfs.semanticscholar.org/f15e/9712b8731e1f5fd9566aca513edda910b5b8.pdf
+c1d2d12ade031d57f8d6a0333cbe8a772d752e01,http://pdfs.semanticscholar.org/c1d2/d12ade031d57f8d6a0333cbe8a772d752e01.pdf
+db1f48a7e11174d4a724a4edb3a0f1571d649670,http://pdfs.semanticscholar.org/db1f/48a7e11174d4a724a4edb3a0f1571d649670.pdf
+0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5,http://ca.cs.cmu.edu/sites/default/files/9MMED_CVPR12.pdf
+41cfc9edbf36754746991c2a1e9a47c0d129d105,https://www.cs.princeton.edu/~ohad/papers/FriedShechtmanGoldmanFinkelstein_SIGGRAPH2016.pdf
+47fdbd64edd7d348713253cf362a9c21f98e4296,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C071_yamashita2015.pdf
+6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01293.pdf
+366d20f8fd25b4fe4f7dc95068abc6c6cabe1194,http://arxiv.org/pdf/1605.05411v1.pdf
+43bb20ccfda7b111850743a80a5929792cb031f0,http://pdfs.semanticscholar.org/43bb/20ccfda7b111850743a80a5929792cb031f0.pdf
+bd236913cfe07896e171ece9bda62c18b8c8197e,http://pdfs.semanticscholar.org/bd23/6913cfe07896e171ece9bda62c18b8c8197e.pdf
+5157dde17a69f12c51186ffc20a0a6c6847f1a29,http://arxiv.org/pdf/1505.04373v2.pdf
+ce56be1acffda599dec6cc2af2b35600488846c9,http://pdfs.semanticscholar.org/ce56/be1acffda599dec6cc2af2b35600488846c9.pdf
+f074e86e003d5b7a3b6e1780d9c323598d93f3bc,http://pdfs.semanticscholar.org/f074/e86e003d5b7a3b6e1780d9c323598d93f3bc.pdf
+9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6,http://pdfs.semanticscholar.org/9d8f/f782f68547cf72b7f3f3beda9dc3e8ecfce6.pdf
+1be498d4bbc30c3bfd0029114c784bc2114d67c0,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf
+191d30e7e7360d565b0c1e2814b5bcbd86a11d41,http://homepages.rpi.edu/~wuy9/DiscriminativeDeepFaceShape/DiscriminativeDeepFaceShape_IJCV.pdf
+31182c5ffc8c5d8772b6db01ec98144cd6e4e897,http://pdfs.semanticscholar.org/3118/2c5ffc8c5d8772b6db01ec98144cd6e4e897.pdf
+862d17895fe822f7111e737cbcdd042ba04377e8,http://pdfs.semanticscholar.org/862d/17895fe822f7111e737cbcdd042ba04377e8.pdf
+634541661d976c4b82d590ef6d1f3457d2857b19,http://pdfs.semanticscholar.org/6345/41661d976c4b82d590ef6d1f3457d2857b19.pdf
+f913bb65b62b0a6391ffa8f59b1d5527b7eba948,http://pdfs.semanticscholar.org/f913/bb65b62b0a6391ffa8f59b1d5527b7eba948.pdf
+5b73b7b335f33cda2d0662a8e9520f357b65f3ac,http://www.iis.sinica.edu.tw/papers/song/16795-F.pdf
+4b02387c2db968a70b69d98da3c443f139099e91,http://pdfs.semanticscholar.org/4b02/387c2db968a70b69d98da3c443f139099e91.pdf
+fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f,http://pdfs.semanticscholar.org/fd9f/eb21b3d1fab470ff82e3f03efce6a0e67a1f.pdf
+5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Yang_Improving_3D_Face_2014_CVPR_paper.pdf
+8f5facdc0a2a79283864aad03edc702e2a400346,http://pdfs.semanticscholar.org/8f5f/acdc0a2a79283864aad03edc702e2a400346.pdf
+31c34a5b42a640b824fa4e3d6187e3675226143e,http://pdfs.semanticscholar.org/31c3/4a5b42a640b824fa4e3d6187e3675226143e.pdf
+2b3ceb40dced78a824cf67054959e250aeaa573b,http://pdfs.semanticscholar.org/7493/4a2b65538f42701e15f7f532437db2beead2.pdf
+ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf,http://pdfs.semanticscholar.org/ae71/f69f1db840e0aa17f8c814316f0bd0f6fbbf.pdf
+55966926e7c28b1eee1c7eb7a0b11b10605a1af0,http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf
+25728e08b0ee482ee6ced79c74d4735bb5478e29,http://pdfs.semanticscholar.org/2572/8e08b0ee482ee6ced79c74d4735bb5478e29.pdf
+a3f684930c5c45fcb56a2b407d26b63879120cbf,http://pdfs.semanticscholar.org/a3f6/84930c5c45fcb56a2b407d26b63879120cbf.pdf
+e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef,http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf
+b62571691a23836b35719fc457e093b0db187956,http://pdfs.semanticscholar.org/b625/71691a23836b35719fc457e093b0db187956.pdf
+1d21e5beef23eecff6fff7d4edc16247f0fd984a,http://pdfs.semanticscholar.org/1d21/e5beef23eecff6fff7d4edc16247f0fd984a.pdf
+47ca2df3d657d7938d7253bed673505a6a819661,http://pdfs.semanticscholar.org/47ca/2df3d657d7938d7253bed673505a6a819661.pdf
+377c6563f97e76a4dc836a0bd23d7673492b1aae,http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf
+4270460b8bc5299bd6eaf821d5685c6442ea179a,http://www.cs.technion.ac.il/~ron/PAPERS/BronBronBrucKimIJCV09.pdf
+7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a,https://arxiv.org/pdf/1709.07598v1.pdf
+1e5a1619fe5586e5ded2c7a845e73f22960bbf5a,https://arxiv.org/pdf/1509.04783v1.pdf
+2a3e19d7c54cba3805115497c69069dd5a91da65,http://pdfs.semanticscholar.org/2a3e/19d7c54cba3805115497c69069dd5a91da65.pdf
+321c8ba38db118d8b02c0ba209be709e6792a2c7,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Yan_Learn_to_Combine_2013_ICCV_paper.pdf
+3bcd72be6fbc1a11492df3d36f6d51696fd6bdad,http://pdfs.semanticscholar.org/3bcd/72be6fbc1a11492df3d36f6d51696fd6bdad.pdf
+3fd90098551bf88c7509521adf1c0ba9b5dfeb57,http://pub.ist.ac.at/~chl/papers/lampert-pami2013.pdf
+15cd05baa849ab058b99a966c54d2f0bf82e7885,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_031_ext.pdf
+881066ec43bcf7476479a4146568414e419da804,http://pdfs.semanticscholar.org/8810/66ec43bcf7476479a4146568414e419da804.pdf
+2ad0ee93d029e790ebb50574f403a09854b65b7e,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf
+01bef320b83ac4405b3fc5b1cff788c124109fb9,http://pdfs.semanticscholar.org/49e4/37cc5b673c49b942e304607a0050dcc82dae.pdf
+1a45ddaf43bcd49d261abb4a27977a952b5fff12,http://pdfs.semanticscholar.org/1a45/ddaf43bcd49d261abb4a27977a952b5fff12.pdf
+e0638e0628021712ac76e3472663ccc17bd8838c,http://pdfs.semanticscholar.org/e063/8e0628021712ac76e3472663ccc17bd8838c.pdf
+aca273a9350b10b6e2ef84f0e3a327255207d0f5,http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf
+08e24f9df3d55364290d626b23f3d42b4772efb6,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu06c.pdf
+c6608fdd919f2bc4f8d7412bab287527dcbcf505,http://pdfs.semanticscholar.org/c660/8fdd919f2bc4f8d7412bab287527dcbcf505.pdf
+2f184c6e2c31d23ef083c881de36b9b9b6997ce9,http://pdfs.semanticscholar.org/2f18/4c6e2c31d23ef083c881de36b9b9b6997ce9.pdf
+df2494da8efa44d70c27abf23f73387318cf1ca8,http://pdfs.semanticscholar.org/df24/94da8efa44d70c27abf23f73387318cf1ca8.pdf
+7b63ed54345d8c06523f6b03c41a09b5c8f227e2,http://research.iaun.ac.ir/pd/pourghassem/pdfs/PaperC_1187.pdf
+14b87359f6874ff9b8ee234b18b418e57e75b762,http://pdfs.semanticscholar.org/1b62/6c14544f249cd52ef86a4efc17f3d3834003.pdf
+11408af8861fb0a977412e58c1a23d61b8df458c,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2014/0265.pdf
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,http://pdfs.semanticscholar.org/1f89/439524e87a6514f4fbe7ed34bda4fd1ce286.pdf
+1c147261f5ab1b8ee0a54021a3168fa191096df8,http://pdfs.semanticscholar.org/1c14/7261f5ab1b8ee0a54021a3168fa191096df8.pdf
+40389b941a6901c190fb74e95dc170166fd7639d,http://pdfs.semanticscholar.org/56f7/dad4d6d98292061a2c1e399d9a0ecfbbbde3.pdf
+621ed006945e9438910b5aa4f6214888dea3d791,http://figment.cse.usf.edu/~sfefilat/data/papers/ThAT9.20.pdf
+7a9c317734acaf4b9bd8e07dd99221c457b94171,http://pdfs.semanticscholar.org/7a9c/317734acaf4b9bd8e07dd99221c457b94171.pdf
+19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9,https://arxiv.org/pdf/1503.03832v2.pdf
+27d709f7b67204e1e5e05fe2cfac629afa21699d,http://pdfs.semanticscholar.org/2b88/db4294f11b0516a537b8720fcf416be80dbf.pdf
+f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3,http://pdfs.semanticscholar.org/f7c5/0d2be9fba0e4527fd9fbe3095e9d9a94fdd3.pdf
+2e6cfeba49d327de21ae3186532e56cadeb57c02,http://openaccess.thecvf.com/content_ICCV_2017/papers/Wang_Real_Time_Eye_ICCV_2017_paper.pdf
+a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51,http://pdfs.semanticscholar.org/a7d2/3c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51.pdf
+c95cd36779fcbe45e3831ffcd3314e19c85defc5,https://arxiv.org/pdf/1703.04853v1.pdf
+dbe0e533d715f8543bcf197f3b8e5cffa969dfc0,http://pdfs.semanticscholar.org/dbe0/e533d715f8543bcf197f3b8e5cffa969dfc0.pdf
+45c340c8e79077a5340387cfff8ed7615efa20fd,http://pdfs.semanticscholar.org/45c3/40c8e79077a5340387cfff8ed7615efa20fd.pdf
+a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3,http://pdfs.semanticscholar.org/a4a0/b5f08198f6d7ea2d1e81bd97fea21afe3fc3.pdf
+4300fa1221beb9dc81a496cd2f645c990a7ede53,http://pdfs.semanticscholar.org/da71/87e56b6da1b9c993d9a096d2f2b9d80fb14c.pdf
+0d06b3a4132d8a2effed115a89617e0a702c957a,http://arxiv.org/pdf/1605.08680v1.pdf
+b5f4e617ac3fc4700ec8129fcd0dcf5f71722923,http://pdfs.semanticscholar.org/c4dd/f94ed445bad0793cd4ba2813506d02221ec0.pdf
+397aeaea61ecdaa005b09198942381a7a11cd129,http://pdfs.semanticscholar.org/e30b/df82a358587f7d27ee4ea0b34762328c2a8d.pdf
+d6ca3dc01de060871839d5536e8112b551a7f9ff,https://arxiv.org/pdf/1802.08310v1.pdf
+294bd7eb5dc24052237669cdd7b4675144e22306,http://pdfs.semanticscholar.org/294b/d7eb5dc24052237669cdd7b4675144e22306.pdf
+061e29eae705f318eee703b9e17dc0989547ba0c,http://pdfs.semanticscholar.org/061e/29eae705f318eee703b9e17dc0989547ba0c.pdf
+12003a7d65c4f98fb57587fd0e764b44d0d10125,http://luks.fe.uni-lj.si/en/staff/simond/publications/Dobrisek2015.pdf
+e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=8BA80DE8A35C6665EB6C19D582E5689F?doi=10.1.1.227.7824&rep=rep1&type=pdf
+8699268ee81a7472a0807c1d3b1db0d0ab05f40d,http://pdfs.semanticscholar.org/8699/268ee81a7472a0807c1d3b1db0d0ab05f40d.pdf
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,http://pdfs.semanticscholar.org/61ff/edd8a70a78332c2bbdc9feba6c3d1fd4f1b8.pdf
+06400a24526dd9d131dfc1459fce5e5189b7baec,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01054.pdf
+64f9519f20acdf703984f02e05fd23f5e2451977,http://arxiv.org/pdf/1509.01343v1.pdf
+378ae5ca649f023003021f5a63e393da3a4e47f0,http://vision.ucsd.edu/~carolina/files/galleguillos_cvpr10.pdf
+36018404263b9bb44d1fddaddd9ee9af9d46e560,http://pdfs.semanticscholar.org/3601/8404263b9bb44d1fddaddd9ee9af9d46e560.pdf
+449b1b91029e84dab14b80852e35387a9275870e,https://pdfs.semanticscholar.org/608c/da0c14c3d134d9d18dd38f9682b23c31d367.pdf
+21a2f67b21905ff6e0afa762937427e92dc5aa0b,http://pdfs.semanticscholar.org/21a2/f67b21905ff6e0afa762937427e92dc5aa0b.pdf
+a57b37549edba625f5955759e259e52eb0af8773,http://learning.cs.toronto.edu/~hinton/absps/ranzato_cvpr2011.pdf
+3630324c2af04fd90f8668f9ee9709604fe980fd,http://www.yugangjiang.info/publication/TCSVT-Shu.pdf
+0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b,http://pdfs.semanticscholar.org/84b7/e2138a3701432c33ea70a1297328cd814ab5.pdf
+38a9ca2c49a77b540be52377784b9f734e0417e4,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_IJCB_Faces.pdf
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,http://pdfs.semanticscholar.org/0ebc/50b6e4b01eb5eba5279ce547c838890b1418.pdf
+1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91,http://www1.i2r.a-star.edu.sg/~htang/Unified_Framework_for_Subspace_Clustering-TNNLS.pdf
+7897c8a9361b427f7b07249d21eb9315db189496,https://arxiv.org/pdf/1102.2743v2.pdf
+24496e4acfb8840616b2960b0e2c80cc4c9e5a87,http://ai2-s2-pdfs.s3.amazonaws.com/2449/6e4acfb8840616b2960b0e2c80cc4c9e5a87.pdf
+fae83b145e5eeda8327de9f19df286edfaf5e60c,http://pdfs.semanticscholar.org/fae8/3b145e5eeda8327de9f19df286edfaf5e60c.pdf
+abeda55a7be0bbe25a25139fb9a3d823215d7536,http://pdfs.semanticscholar.org/abed/a55a7be0bbe25a25139fb9a3d823215d7536.pdf
+b6052dc718c72f2506cfd9d29422642ecf3992ef,http://pdfs.semanticscholar.org/b605/2dc718c72f2506cfd9d29422642ecf3992ef.pdf
+42df75080e14d32332b39ee5d91e83da8a914e34,http://www.imlab.tw/wp-content/uploads/2015/11/Illumination-Compensation-Using-Oriented-Local-Histogram-Equalization-and-its-Application-to-Face-Recognition.pdf
+f7452a12f9bd927398e036ea6ede02da79097e6e,http://pdfs.semanticscholar.org/f745/2a12f9bd927398e036ea6ede02da79097e6e.pdf
+c50d73557be96907f88b59cfbd1ab1b2fd696d41,http://pdfs.semanticscholar.org/c50d/73557be96907f88b59cfbd1ab1b2fd696d41.pdf
+ae753fd46a744725424690d22d0d00fb05e53350,http://pdfs.semanticscholar.org/ae75/3fd46a744725424690d22d0d00fb05e53350.pdf
+bb489e4de6f9b835d70ab46217f11e32887931a2,http://conteudo.icmc.usp.br/pessoas/moacir/p17sibgrapi-tutorial/2017-SIBGRAPI_Tutorial-Survey_Paper-Deep_Learning_for_Computer_Vision.pdf
+adfaf01773c8af859faa5a9f40fb3aa9770a8aa7,http://pdfs.semanticscholar.org/adfa/f01773c8af859faa5a9f40fb3aa9770a8aa7.pdf
+948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494,http://pdfs.semanticscholar.org/948a/f4b04b4a9ae4bff2777ffbcb29d5bfeeb494.pdf
+f8c94afd478821681a1565d463fc305337b02779,http://pdfs.semanticscholar.org/f8c9/4afd478821681a1565d463fc305337b02779.pdf
+4a4da3d1bbf10f15b448577e75112bac4861620a,http://pdfs.semanticscholar.org/4a4d/a3d1bbf10f15b448577e75112bac4861620a.pdf
+ff1f45bdad41d8b35435098041e009627e60d208,http://pdfs.semanticscholar.org/ff1f/45bdad41d8b35435098041e009627e60d208.pdf
+03c48d8376990cff9f541d542ef834728a2fcda2,http://dvmmweb.cs.columbia.edu/files/dvmm_scnn_paper.pdf
+a472d59cff9d822f15f326a874e666be09b70cfd,http://pdfs.semanticscholar.org/a472/d59cff9d822f15f326a874e666be09b70cfd.pdf
+527dda77a3864d88b35e017d542cb612f275a4ec,https://arxiv.org/pdf/1709.00531v1.pdf
+f19ab817dd1ef64ee94e94689b0daae0f686e849,http://pdfs.semanticscholar.org/f19a/b817dd1ef64ee94e94689b0daae0f686e849.pdf
+ae4e2c81c8a8354c93c4b21442c26773352935dd,http://pdfs.semanticscholar.org/ae4e/2c81c8a8354c93c4b21442c26773352935dd.pdf
+529b1f33aed49dbe025a99ac1d211c777ad881ec,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_bidirectional_icip.pdf
+3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3,http://pdfs.semanticscholar.org/51f7/3cfcc6d671bd99b5c3c512ff9b7bb959f33b.pdf
+d00c335fbb542bc628642c1db36791eae24e02b7,http://pdfs.semanticscholar.org/d00c/335fbb542bc628642c1db36791eae24e02b7.pdf
+5d09d5257139b563bd3149cfd5e6f9eae3c34776,http://pdfs.semanticscholar.org/5d09/d5257139b563bd3149cfd5e6f9eae3c34776.pdf
+6cd96f2b63c6b6f33f15c0ea366e6003f512a951,http://pdfs.semanticscholar.org/6cd9/6f2b63c6b6f33f15c0ea366e6003f512a951.pdf
+14e428f2ff3dc5cf96e5742eedb156c1ea12ece1,http://www.univ-soukahras.dz/eprints/2014-150-03190.pdf
+34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c,http://pdfs.semanticscholar.org/9e97/360b519d9912ded55618ccbb000d74d8e35c.pdf
+11b89011298e193d9e6a1d99302221c1d8645bda,http://openaccess.thecvf.com/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf
+6582f4ec2815d2106957215ca2fa298396dde274,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2007-PAMI-face-sets.pdf
+06f146dfcde10915d6284981b6b84b85da75acd4,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/tmm12.pdf
+4ea4116f57c5d5033569690871ba294dc3649ea5,http://pdfs.semanticscholar.org/4ea4/116f57c5d5033569690871ba294dc3649ea5.pdf
+661da40b838806a7effcb42d63a9624fcd684976,http://pdfs.semanticscholar.org/661d/a40b838806a7effcb42d63a9624fcd684976.pdf
+fd615118fb290a8e3883e1f75390de8a6c68bfde,http://pdfs.semanticscholar.org/fd61/5118fb290a8e3883e1f75390de8a6c68bfde.pdf
+3d0f9a3031bee4b89fab703ff1f1d6170493dc01,http://pdfs.semanticscholar.org/3d0f/9a3031bee4b89fab703ff1f1d6170493dc01.pdf
+2c8743089d9c7df04883405a31b5fbe494f175b4,http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf
+e4c2f8e4aace8cb851cb74478a63d9111ca550ae,http://pdfs.semanticscholar.org/e4c2/f8e4aace8cb851cb74478a63d9111ca550ae.pdf
+322b7a4ce006e4d14748dd064e80ffba573ebcd7,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_ROMAN.pdf
+4de757faa69c1632066391158648f8611889d862,http://pdfs.semanticscholar.org/4de7/57faa69c1632066391158648f8611889d862.pdf
+4fd29e5f4b7186e349ba34ea30738af7860cf21f,https://arxiv.org/pdf/1506.02588v1.pdf
+5922e26c9eaaee92d1d70eae36275bb226ecdb2e,http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf
+3f4798c7701da044bdb7feb61ebdbd1d53df5cfe,http://sip.unige.ch/articles/2015/2015.EUSIPCO.Vector.quantization.pdf
+5c473cfda1d7c384724fbb139dfe8cb39f79f626,http://www.cs.zju.edu.cn/~gpan/publication/2012-PAA-face-expression-onlinefirst.pdf
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,http://pdfs.semanticscholar.org/4541/c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6.pdf
+ab87dfccb1818bdf0b41d732da1f9335b43b74ae,http://pdfs.semanticscholar.org/ab87/dfccb1818bdf0b41d732da1f9335b43b74ae.pdf
+10195a163ab6348eef37213a46f60a3d87f289c5,https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/156130/eth-50296-01.pdf
+5bae9822d703c585a61575dced83fa2f4dea1c6d,http://pdfs.semanticscholar.org/5bae/9822d703c585a61575dced83fa2f4dea1c6d.pdf
+fd96432675911a702b8a4ce857b7c8619498bf9f,http://pdfs.semanticscholar.org/fd96/432675911a702b8a4ce857b7c8619498bf9f.pdf
+7071cd1ee46db4bc1824c4fd62d36f6d13cad08a,http://pdfs.semanticscholar.org/7071/cd1ee46db4bc1824c4fd62d36f6d13cad08a.pdf
+c3418f866a86dfd947c2b548cbdeac8ca5783c15,http://pdfs.semanticscholar.org/c341/8f866a86dfd947c2b548cbdeac8ca5783c15.pdf
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/08/icmr038-liA.pdf
+11fdff97f4511ae3d3691cfdeec5a19fa04db6ef,http://mclab.eic.hust.edu.cn/UpLoadFiles/Papers/SCA_TIP2016.pdf
+48f0055295be7b175a06df5bc6fa5c6b69725785,http://pdfs.semanticscholar.org/48f0/055295be7b175a06df5bc6fa5c6b69725785.pdf
+9ef2b2db11ed117521424c275c3ce1b5c696b9b3,http://pdfs.semanticscholar.org/c31b/dd00734807938dcfd8a12375bd9ffa556985.pdf
+4f773c8e7ca98ece9894ba3a22823127a70c6e6c,http://pdfs.semanticscholar.org/4f77/3c8e7ca98ece9894ba3a22823127a70c6e6c.pdf
+302c9c105d49c1348b8f1d8cc47bead70e2acf08,http://pdfs.semanticscholar.org/302c/9c105d49c1348b8f1d8cc47bead70e2acf08.pdf
+439647914236431c858535a2354988dde042ef4d,http://eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Face%20Illumination%20Normalization%20on%20Large%20and%20Small%20Scale%20Features.pdf
+519f4eb5fe15a25a46f1a49e2632b12a3b18c94d,https://www.cise.ufl.edu/~arunava/papers/pami-abrdf.pdf
+0568fc777081cbe6de95b653644fec7b766537b2,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Liu_Learning_Expressionlets_on_2014_CVPR_paper.pdf
+5bb684dfe64171b77df06ba68997fd1e8daffbe1,http://pdfs.semanticscholar.org/f096/9403b5dfa54445d911aedd88ab25b0b6cd99.pdf
+bcc172a1051be261afacdd5313619881cbe0f676,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002197.pdf
+e4e3faa47bb567491eaeaebb2213bf0e1db989e1,http://pdfs.semanticscholar.org/e4e3/faa47bb567491eaeaebb2213bf0e1db989e1.pdf
+5cb83eba8d265afd4eac49eb6b91cdae47def26d,http://www.kresttechnology.com/krest-academic-projects/krest-major-projects/ECE/B-Tech%20Papers/21.pdf
+7480d8739eb7ab97c12c14e75658e5444b852e9f,http://pdfs.semanticscholar.org/cfe4/b03951be323394e6749f6a30b2ac9b924479.pdf
+b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88,http://pdfs.semanticscholar.org/b59c/8b44a568587bc1b61d130f0ca2f7a2ae3b88.pdf
+7f6599e674a33ed64549cd512ad75bdbd28c7f6c,http://pdfs.semanticscholar.org/7f65/99e674a33ed64549cd512ad75bdbd28c7f6c.pdf
+4b7c110987c1d89109355b04f8597ce427a7cd72,http://pdfs.semanticscholar.org/4b7c/110987c1d89109355b04f8597ce427a7cd72.pdf
+521482c2089c62a59996425603d8264832998403,http://pdfs.semanticscholar.org/5214/82c2089c62a59996425603d8264832998403.pdf
+cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf
+711bb5f63139ee7a9b9aef21533f959671a7d80e,http://pdfs.semanticscholar.org/711b/b5f63139ee7a9b9aef21533f959671a7d80e.pdf
+a090d61bfb2c3f380c01c0774ea17929998e0c96,http://iitlab.bit.edu.cn/mcislab/~jiayunde/pdf/CVPR2012_BrickIllumDimension.pdf
+1a40092b493c6b8840257ab7f96051d1a4dbfeb2,http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf
+783f3fccde99931bb900dce91357a6268afecc52,http://pdfs.semanticscholar.org/d1ea/f2cc9dfc6cdbc5468ef2152c46e9111a3f3b.pdf
+1c3073b57000f9b6dbf1c5681c52d17c55d60fd7,http://pdfs.semanticscholar.org/1c30/73b57000f9b6dbf1c5681c52d17c55d60fd7.pdf
+ab8fb278db4405f7db08fa59404d9dd22d38bc83,http://pdfs.semanticscholar.org/ab8f/b278db4405f7db08fa59404d9dd22d38bc83.pdf
+2df4d05119fe3fbf1f8112b3ad901c33728b498a,http://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf
+9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4,http://pdfs.semanticscholar.org/9696/b172d66e402a2e9d0a8d2b3f204ad8b98cc4.pdf
+0c435e7f49f3e1534af0829b7461deb891cf540a,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Capturing_Global_Semantic_2013_ICCV_paper.pdf
+5aadd85e2a77e482d44ac2a215c1f21e4a30d91b,http://pdfs.semanticscholar.org/5aad/d85e2a77e482d44ac2a215c1f21e4a30d91b.pdf
+efd28eabebb9815e34031316624e7f095c7dfcfe,http://pdfs.semanticscholar.org/efd2/8eabebb9815e34031316624e7f095c7dfcfe.pdf
+31b58ced31f22eab10bd3ee2d9174e7c14c27c01,http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf
+b3c398da38d529b907b0bac7ec586c81b851708f,http://www.cbsr.ia.ac.cn/publications/Stan/WHT-FG2004.pdf
+4ac4e8d17132f2d9812a0088594d262a9a0d339b,http://pdfs.semanticscholar.org/4ac4/e8d17132f2d9812a0088594d262a9a0d339b.pdf
+32c9ebd2685f522821eddfc19c7c91fd6b3caf22,http://pdfs.semanticscholar.org/32c9/ebd2685f522821eddfc19c7c91fd6b3caf22.pdf
+148eb413bede35487198ce7851997bf8721ea2d6,http://pdfs.semanticscholar.org/148e/b413bede35487198ce7851997bf8721ea2d6.pdf
+287795991fad3c61d6058352879c7d7ae1fdd2b6,http://pdfs.semanticscholar.org/2877/95991fad3c61d6058352879c7d7ae1fdd2b6.pdf
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,http://arxiv.org/pdf/1604.04334v1.pdf
+b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e,http://pdfs.semanticscholar.org/b20c/fbb2348984b4e25b6b9174f3c7b65b6aed9e.pdf
+1a031378cf1d2b9088a200d9715d87db8a1bf041,http://pdfs.semanticscholar.org/1a03/1378cf1d2b9088a200d9715d87db8a1bf041.pdf
+926c67a611824bc5ba67db11db9c05626e79de96,http://www.ee.columbia.edu/ln/dvmm/publications/09/xu_ebsl.pdf
+9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc,http://pdfs.semanticscholar.org/9686/dcf40e6fdc4152f38bd12b929bcd4f3bbbcc.pdf
+03adcf58d947a412f3904a79f2ab51cfdf0e838a,http://pdfs.semanticscholar.org/03ad/cf58d947a412f3904a79f2ab51cfdf0e838a.pdf
+98519f3f615e7900578bc064a8fb4e5f429f3689,http://pdfs.semanticscholar.org/9851/9f3f615e7900578bc064a8fb4e5f429f3689.pdf
+8b30259a8ab07394d4dac971f3d3bd633beac811,http://pdfs.semanticscholar.org/8b30/259a8ab07394d4dac971f3d3bd633beac811.pdf
+016cbf0878db5c40566c1fbc237686fbad666a33,http://pdfs.semanticscholar.org/5a07/986f0a202eafbd1f1574fe2c3ae6abe2281f.pdf
+d68dbb71b34dfe98dee0680198a23d3b53056394,http://pdfs.semanticscholar.org/d68d/bb71b34dfe98dee0680198a23d3b53056394.pdf
+8b2e3805b37c18618b74b243e7a6098018556559,http://pdfs.semanticscholar.org/8b2e/3805b37c18618b74b243e7a6098018556559.pdf
+011e6146995d5d63c852bd776f782cc6f6e11b7b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhuang_Fast_Training_of_CVPR_2016_paper.pdf
+179e566a2c1a2a48aa3d0028209c11ebe7d6740e,http://homepages.rpi.edu/~wuy9/EyeDetectionDBM/DeepFeaturesEyeDetection.pdf
+dcb44fc19c1949b1eda9abe998935d567498467d,http://pdfs.semanticscholar.org/dcb4/4fc19c1949b1eda9abe998935d567498467d.pdf
+7ce03597b703a3b6754d1adac5fbc98536994e8f,http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf
+40217a8c60e0a7d1735d4f631171aa6ed146e719,http://pdfs.semanticscholar.org/4021/7a8c60e0a7d1735d4f631171aa6ed146e719.pdf
+47e3029a3d4cf0a9b0e96252c3dc1f646e750b14,http://mmi.tudelft.nl/pub/dragos/_CompSysTech07.pdf
+57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5,http://pdfs.semanticscholar.org/57b8/b28f8748d998951b5a863ff1bfd7ca4ae6a5.pdf
+40ca925befa1f7e039f0cd40d57dbef6007b4416,https://arxiv.org/pdf/1706.07567v1.pdf
+2a65d7d5336b377b7f5a98855767dd48fa516c0f,https://mug.ee.auth.gr/wp-content/uploads/fsLDA.pdf
+0c1d85a197a1f5b7376652a485523e616a406273,http://openaccess.thecvf.com/content_cvpr_2017/papers/Hayat_Joint_Registration_and_CVPR_2017_paper.pdf
+7c1e1c767f7911a390d49bed4f73952df8445936,http://cmp.felk.cvut.cz/~zimmerk/zimmermann-TPAMI-2014.pdf
+a000149e83b09d17e18ed9184155be140ae1266e,http://pdfs.semanticscholar.org/a000/149e83b09d17e18ed9184155be140ae1266e.pdf
+5d233e6f23b1c306cf62af49ce66faac2078f967,http://pdfs.semanticscholar.org/5d23/3e6f23b1c306cf62af49ce66faac2078f967.pdf
+3028690d00bd95f20842d4aec84dc96de1db6e59,http://pdfs.semanticscholar.org/775f/9b8bc0ff151ee62b5e777f0aa9b09484ef8a.pdf
+5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c,http://pdfs.semanticscholar.org/5fb5/d9389e2a2a4302c81bcfc068a4c8d4efe70c.pdf
+fc516a492cf09aaf1d319c8ff112c77cfb55a0e5,http://pdfs.semanticscholar.org/fc51/6a492cf09aaf1d319c8ff112c77cfb55a0e5.pdf
+6342a4c54835c1e14159495373ab18b4233d2d9b,http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf
+98a120802aef324599e8b9014decfeb2236a78a3,http://nyunetworks.com/Pubs/butler-chi16.pdf
+118ca3b2e7c08094e2a50137b1548ada7935e505,http://pdfs.semanticscholar.org/dc5c/273198b16dc615888256da74758f4a4b128b.pdf
+5e6ba16cddd1797853d8898de52c1f1f44a73279,http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf
+4688787d064e59023a304f7c9af950d192ddd33e,http://www.cse.msu.edu/~liuxm/publication/Roth_Liu_Ross_Metaxas_TIFS.pdf
+75e9a141b85d902224f849ea61ab135ae98e7bfb,http://pdfs.semanticscholar.org/d1a5/0fffd1c9cf033943636b9e18172ed68582b1.pdf
+4c170a0dcc8de75587dae21ca508dab2f9343974,http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf
+361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,http://pdfs.semanticscholar.org/361c/9ba853c7d69058ddc0f32cdbe94fbc2166d5.pdf
+b7eead8586ffe069edd190956bd338d82c69f880,http://pdfs.semanticscholar.org/b7ee/ad8586ffe069edd190956bd338d82c69f880.pdf
+11f17191bf74c80ad0b16b9f404df6d03f7c8814,http://pdfs.semanticscholar.org/11f5/c82e3a39b9c8b91370ef7286a748c19b658a.pdf
+40b0fced8bc45f548ca7f79922e62478d2043220,http://pdfs.semanticscholar.org/40b0/fced8bc45f548ca7f79922e62478d2043220.pdf
+4140498e96a5ff3ba816d13daf148fffb9a2be3f,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Li_Constrained.pdf
+e7b6887cd06d0c1aa4902335f7893d7640aef823,http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf
+76fd801981fd69ff1b18319c450cb80c4bc78959,http://pdfs.semanticscholar.org/76fd/801981fd69ff1b18319c450cb80c4bc78959.pdf
+703c9c8f20860a1b1be63e6df1622b2021b003ca,http://openaccess.thecvf.com/content_ICCV_2017/papers/Kobayashi_Flip-Invariant_Motion_Representation_ICCV_2017_paper.pdf
+2ca43325a5dbde91af90bf850b83b0984587b3cc,http://pdfs.semanticscholar.org/2ca4/3325a5dbde91af90bf850b83b0984587b3cc.pdf
+029b53f32079063047097fa59cfc788b2b550c4b,http://pdfs.semanticscholar.org/b71c/73fcae520f6a5cdbce18c813633fb3d66342.pdf
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/osb_iccv09_cam.pdf
+e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6,http://pdfs.semanticscholar.org/e48f/b3ee27eef1e503d7ba07df8eb1524c47f4a6.pdf
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf
+0c60eebe10b56dbffe66bb3812793dd514865935,http://arxiv.org/pdf/1502.07209.pdf
+13719bbb4bb8bbe0cbcdad009243a926d93be433,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Tian_Deep_LDA-Pruned_Nets_CVPR_2017_paper.pdf
+59bfeac0635d3f1f4891106ae0262b81841b06e4,http://pdfs.semanticscholar.org/59bf/eac0635d3f1f4891106ae0262b81841b06e4.pdf
+193ec7bb21321fcf43bbe42233aed06dbdecbc5c,http://pdfs.semanticscholar.org/d40e/f2ca85d8b7540948677c2ed07f1f3677cfdd.pdf
+44fbbaea6271e47ace47c27701ed05e15da8f7cf,http://pdfs.semanticscholar.org/44fb/baea6271e47ace47c27701ed05e15da8f7cf.pdf
+7e9df45ece7843fe050033c81014cc30b3a8903a,http://pdfs.semanticscholar.org/7e9d/f45ece7843fe050033c81014cc30b3a8903a.pdf
+4362368dae29cc66a47114d5ffeaf0534bf0159c,http://pdfs.semanticscholar.org/4362/368dae29cc66a47114d5ffeaf0534bf0159c.pdf
+4a64758786e3f49fc13781304197591ffbd69a6e,http://vicos.fri.uni-lj.si/alesl/files/2008/05/fidlerpami06.pdf
+99001ac9fdaf7649c0d0bd8d2078719bafd216d9,http://people.ee.duke.edu/~lcarin/TPAMI_2007_General_tensor_analysis.pdf
+17aa78bd4331ef490f24bdd4d4cd21d22a18c09c,http://pdfs.semanticscholar.org/17aa/78bd4331ef490f24bdd4d4cd21d22a18c09c.pdf
+a4f37cfdde3af723336205b361aefc9eca688f5c,http://pdfs.semanticscholar.org/a4f3/7cfdde3af723336205b361aefc9eca688f5c.pdf
+230c4a30f439700355b268e5f57d15851bcbf41f,http://arxiv.org/pdf/1509.01509v2.pdf
+0290523cabea481e3e147b84dcaab1ef7a914612,http://pdfs.semanticscholar.org/0290/523cabea481e3e147b84dcaab1ef7a914612.pdf
+5e821cb036010bef259046a96fe26e681f20266e,https://pdfs.semanticscholar.org/d7e6/d52748c5ed386a90118fa385647c55954ab9.pdf
+1c1f957d85b59d23163583c421755869f248ceef,http://homepages.rpi.edu/~wuy9/ICCV15/FLD_iccv15.pdf
+39ce143238ea1066edf0389d284208431b53b802,http://pdfs.semanticscholar.org/39ce/143238ea1066edf0389d284208431b53b802.pdf
+d522c162bd03e935b1417f2e564d1357e98826d2,http://pdfs.semanticscholar.org/d522/c162bd03e935b1417f2e564d1357e98826d2.pdf
+22f656d0f8426c84a33a267977f511f127bfd7f3,https://arxiv.org/pdf/1609.06426v2.pdf
+2c61a9e26557dd0fe824909adeadf22a6a0d86b0,http://pdfs.semanticscholar.org/f117/3a4c5e3501323b37c1ae9a6d7dd8a236eab8.pdf
+501096cca4d0b3d1ef407844642e39cd2ff86b37,http://pdfs.semanticscholar.org/5010/96cca4d0b3d1ef407844642e39cd2ff86b37.pdf
+fc20149dfdff5fdf020647b57e8a09c06e11434b,http://pdfs.semanticscholar.org/fc20/149dfdff5fdf020647b57e8a09c06e11434b.pdf
+1e64b2d2f0a8a608d0d9d913c4baee6973995952,http://sergioescalera.com/wp-content/uploads/2017/06/FG_presentation.pdf
+4967b0acc50995aa4b28e576c404dc85fefb0601,http://pdfs.semanticscholar.org/4967/b0acc50995aa4b28e576c404dc85fefb0601.pdf
+50e47857b11bfd3d420f6eafb155199f4b41f6d7,http://pdfs.semanticscholar.org/50e4/7857b11bfd3d420f6eafb155199f4b41f6d7.pdf
+31a2fb63a3fc67da9932474cda078c9ac43f85c5,http://www.researchgate.net/profile/Sadeep_Jayasumana2/publication/269040853_Kernel_Methods_on_Riemannian_Manifolds_with_Gaussian_RBF_Kernels/links/54858a6a0cf283750c37264b.pdf
+122ee00cc25c0137cab2c510494cee98bd504e9f,http://pdfs.semanticscholar.org/122e/e00cc25c0137cab2c510494cee98bd504e9f.pdf
+28c9198d30447ffe9c96176805c1cd81615d98c8,http://pdfs.semanticscholar.org/28c9/198d30447ffe9c96176805c1cd81615d98c8.pdf
+08fbe3187f31b828a38811cc8dc7ca17933b91e9,http://www.merl.com/publications/docs/TR2011-084.pdf
+74f643579949ccd566f2638b85374e7a6857a9fc,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICPR/MBP%20ICPR10(Revise%20final).pdf
+4be03fd3a76b07125cd39777a6875ee59d9889bd,http://homes.esat.kuleuven.be/~tuytelaa/Tuytelaars-BeyondConceptSearch-WIAMIS12.pdf
+1742ffea0e1051b37f22773613f10f69d2e4ed2c,http://pdfs.semanticscholar.org/1742/ffea0e1051b37f22773613f10f69d2e4ed2c.pdf
+584909d2220b52c0d037e8761d80cb22f516773f,http://www.cs.tau.ac.il/~nachumd/papers/OFTA.pdf
+2cfc28a96b57e0817cc9624a5d553b3aafba56f3,https://web.njit.edu/~borcea/papers/ieee-sarnoff16.pdf
+6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553757.pdf
+e6d689054e87ad3b8fbbb70714d48712ad84dc1c,http://pdfs.semanticscholar.org/e6d6/89054e87ad3b8fbbb70714d48712ad84dc1c.pdf
+0fae5d9d2764a8d6ea691b9835d497dd680bbccd,http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf
+1dc6c0ad19b41e5190fc9fe50e3ae27f49f18fa2,http://www.researchgate.net/profile/Stefano_Alletto/publication/265611795_Head_Pose_Estimation_in_First-Person_Camera_Views/links/5416b5ef0cf2788c4b35e14b.pdf
+42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553734.pdf
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,http://www.umiacs.umd.edu/~fyang/papers/iccv15.pdf
+2d84e30c61281d3d7cdd11676683d6e66a68aea6,http://pdfs.semanticscholar.org/2d84/e30c61281d3d7cdd11676683d6e66a68aea6.pdf
+22143664860c6356d3de3556ddebe3652f9c912a,http://pdfs.semanticscholar.org/2214/3664860c6356d3de3556ddebe3652f9c912a.pdf
+9ac82909d76b4c902e5dde5838130de6ce838c16,http://pdfs.semanticscholar.org/9ac8/2909d76b4c902e5dde5838130de6ce838c16.pdf
+a1dd806b8f4f418d01960e22fb950fe7a56c18f1,https://www.cc.gatech.edu/~parikh/Publications/ParikhGrauman_CVPR2011_nameable.pdf
+566038a3c2867894a08125efe41ef0a40824a090,http://mirlab.org/conference_papers/international_conference/icassp%202009/pdfs/0001945.pdf
+787c1bb6d1f2341c5909a0d6d7314bced96f4681,http://pdfs.semanticscholar.org/787c/1bb6d1f2341c5909a0d6d7314bced96f4681.pdf
+00f1e5e954f9eb7ffde3ca74009a8c3c27358b58,http://www.vision.caltech.edu/holub/public_html/Papers/PDF/holub_et_al_face_clustering.pdf
+52012b4ecb78f6b4b9ea496be98bcfe0944353cd,http://pdfs.semanticscholar.org/5201/2b4ecb78f6b4b9ea496be98bcfe0944353cd.pdf
+9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2,http://pdfs.semanticscholar.org/9d8f/d639a7aeab0dd1bc6eef9d11540199fd6fe2.pdf
+1e1e66783f51a206509b0a427e68b3f6e40a27c8,http://pdfs.semanticscholar.org/1e1e/66783f51a206509b0a427e68b3f6e40a27c8.pdf
+6e173ad91b288418c290aa8891193873933423b3,http://pdfs.semanticscholar.org/eb3b/021406fe5a5002535b392cac60832aa8f162.pdf
+4abaebe5137d40c9fcb72711cdefdf13d9fc3e62,http://pdfs.semanticscholar.org/4aba/ebe5137d40c9fcb72711cdefdf13d9fc3e62.pdf
+88850b73449973a34fefe491f8836293fc208580,http://pdfs.semanticscholar.org/8885/0b73449973a34fefe491f8836293fc208580.pdf
+1943c6bf8df8a64bd539a5cd6d4e68785eb590c2,http://ccs.njit.edu/inst/source/02MDDM08.pdf
+3be7b7eb11714e6191dd301a696c734e8d07435f,http://pdfs.semanticscholar.org/3be7/b7eb11714e6191dd301a696c734e8d07435f.pdf
+be86d88ecb4192eaf512f29c461e684eb6c35257,http://pdfs.semanticscholar.org/be86/d88ecb4192eaf512f29c461e684eb6c35257.pdf
+0d087aaa6e2753099789cd9943495fbbd08437c0,http://pdfs.semanticscholar.org/beab/b0d9d30871d517c5d915cf852f7f5293f52f.pdf
+dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e,http://pdfs.semanticscholar.org/dae4/20b776957e6b8cf5fbbacd7bc0ec226b3e2e.pdf
+009cd18ff06ff91c8c9a08a91d2516b264eee48e,http://pdfs.semanticscholar.org/009c/d18ff06ff91c8c9a08a91d2516b264eee48e.pdf
+972ef9ddd9059079bdec17abc8b33039ed25c99c,http://pdfs.semanticscholar.org/972e/f9ddd9059079bdec17abc8b33039ed25c99c.pdf
+9d36c81b27e67c515df661913a54a797cd1260bb,http://pdfs.semanticscholar.org/9d36/c81b27e67c515df661913a54a797cd1260bb.pdf
+05bcc5235721fd6a465a63774d28720bacc60858,http://www.site.uottawa.ca/~fshi098/papers/Gradient_Boundary_Histograms_for_Action_Recognition.pdf
+434bf475addfb580707208618f99c8be0c55cf95,http://pdfs.semanticscholar.org/8cea/404e8a5c4c11064923e5a6c023a0ae594a5a.pdf
+22dabd4f092e7f3bdaf352edd925ecc59821e168,http://dro.deakin.edu.au/eserv/DU:30044576/venkatesh-exploitingside-2008.pdf
+ddf099f0e0631da4a6396a17829160301796151c,http://pdfs.semanticscholar.org/ddf0/99f0e0631da4a6396a17829160301796151c.pdf
+5d197c8cd34473eb6cde6b65ced1be82a3a1ed14,http://cdn.intechopen.com/pdfs/20590/InTech-A_face_image_database_for_evaluating_out_of_focus_blur.pdf
+189b1859f77ddc08027e1e0f92275341e5c0fdc6,http://pdfs.semanticscholar.org/189b/1859f77ddc08027e1e0f92275341e5c0fdc6.pdf
+ccbfc004e29b3aceea091056b0ec536e8ea7c47e,http://research.microsoft.com/~yqxu/papers/IEEE%20ICIP2005.pdf
+38d56ddcea01ce99902dd75ad162213cbe4eaab7,http://pdfs.semanticscholar.org/38d5/6ddcea01ce99902dd75ad162213cbe4eaab7.pdf
+cbd004d4c5e3b64321dc1a8f05fa5d64500389c2,http://www.researchgate.net/profile/Wen_Li38/publication/261711227_POSE-ROBUST_REPRESENTATION_FOR_FACE_VERIFICATION_IN_UNCONSTRAINED_VIDEOS/links/00b7d53535ed96428c000000.pdf
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,http://pdfs.semanticscholar.org/a29a/22878e1881d6cbf6acff2d0b209c8d3f778b.pdf
+7c953868cd51f596300c8231192d57c9c514ae17,http://courses.cs.washington.edu/courses/cse590v/13au/CVPR13_FaceDetection.pdf
+73fd7e74457e0606704c5c3d3462549f1b2de1ad,http://pdfs.semanticscholar.org/73fd/7e74457e0606704c5c3d3462549f1b2de1ad.pdf
+ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a,http://pdfs.semanticscholar.org/ff44/d8938c52cfdca48c80f8e1618bbcbf91cb2a.pdf
+a74251efa970b92925b89eeef50a5e37d9281ad0,http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf
+8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09,http://pdfs.semanticscholar.org/8f3e/3f0f97844d3bfd9e9ec566ac7a54f6931b09.pdf
+45513d0f2f5c0dac5b61f9ff76c7e46cce62f402,http://pdfs.semanticscholar.org/4551/3d0f2f5c0dac5b61f9ff76c7e46cce62f402.pdf
+3bd56f4cf8a36dd2d754704bcb71415dcbc0a165,http://www.humansensing.cs.cmu.edu/sites/default/files/4robustreg.pdf
+1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,http://pdfs.semanticscholar.org/1fd3/dbb6e910708fa85c8a86e17ba0b6fef5617c.pdf
+49e85869fa2cbb31e2fd761951d0cdfa741d95f3,http://studentnet.cs.manchester.ac.uk/pgt/COMP61021/reference/adaptive-manifold-learning.pdf
+29b86534d4b334b670914038c801987e18eb5532,http://www.cs.toronto.edu/~makarand/papers/ICVGIP2014.pdf
+610a4451423ad7f82916c736cd8adb86a5a64c59,http://pdfs.semanticscholar.org/610a/4451423ad7f82916c736cd8adb86a5a64c59.pdf
+3026722b4cbe9223eda6ff2822140172e44ed4b1,http://chenlab.ece.cornell.edu/people/Andy/Andy_files/GallagherICCV09Demographics.pdf
+02e39f23e08c2cb24d188bf0ca34141f3cc72d47,http://luks.fe.uni-lj.si/sl/osebje/vitomir/pub/ICASSP2010.pdf
+c94b3a05f6f41d015d524169972ae8fd52871b67,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Yan_The_Fastest_Deformable_2014_CVPR_paper.pdf
+d61e794ec22a4d4882181da17316438b5b24890f,http://pdfs.semanticscholar.org/d61e/794ec22a4d4882181da17316438b5b24890f.pdf
+3e4f84ce00027723bdfdb21156c9003168bc1c80,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2011/papers/1569427521.pdf
+4866a5d6d7a40a26f038fc743e16345c064e9842,http://pdfs.semanticscholar.org/4866/a5d6d7a40a26f038fc743e16345c064e9842.pdf
+110c55b440b7c6a1692da9d8ee52389e43f6e76e,http://cs.brown.edu/people/ls/Publications/wacv2015dai_supplement.pdf
+26a72e9dd444d2861298d9df9df9f7d147186bcd,https://engineering.purdue.edu/~qobi/papers/mvap2016.pdf
+9f094341bea610a10346f072bf865cb550a1f1c1,http://zhiweizhu.com/papers/FIVR_MobileDevice_2009.pdf
+b1c5581f631dba78927aae4f86a839f43646220c,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553753.pdf
+9e8637a5419fec97f162153569ec4fc53579c21e,http://pdfs.semanticscholar.org/9e86/37a5419fec97f162153569ec4fc53579c21e.pdf
+124538b3db791e30e1b62f81d4101be435ee12ef,http://pdfs.semanticscholar.org/1245/38b3db791e30e1b62f81d4101be435ee12ef.pdf
+0b9d3a0c61ee498f8ed54aaa22d3c4e72aa56f40,http://www.researchgate.net/profile/Mark_Billinghurst/publication/221209697_A_Quadratic_Deformation_Model_for_Facial_Expression_Recognition/links/00b4952464de6e125e000000.pdf
+59e2037f5079794cb9128c7f0900a568ced14c2a,https://arxiv.org/pdf/1704.02231v1.pdf
+436d80cc1b52365ed7b2477c0b385b6fbbb51d3b,http://pdfs.semanticscholar.org/436d/80cc1b52365ed7b2477c0b385b6fbbb51d3b.pdf
+4aa286914f17cd8cefa0320e41800a99c142a1cd,http://www.vbettadapura.com/egocentric/food/Food-Bettadapura15.pdf
+59319c128c8ac3c88b4ab81088efe8ae9c458e07,http://pdfs.semanticscholar.org/5931/9c128c8ac3c88b4ab81088efe8ae9c458e07.pdf
+2bae810500388dd595f4ebe992c36e1443b048d2,http://pdfs.semanticscholar.org/2bae/810500388dd595f4ebe992c36e1443b048d2.pdf
+31e57fa83ac60c03d884774d2b515813493977b9,http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf
+3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5,http://pdfs.semanticscholar.org/3b80/bf5a69a1b0089192d73fa3ace2fbb52a4ad5.pdf
+83b4899d2899dd6a8d956eda3c4b89f27f1cd308,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0100377.pdf
+912a6a97af390d009773452814a401e258b77640,http://pdfs.semanticscholar.org/912a/6a97af390d009773452814a401e258b77640.pdf
+2d79d338c114ece1d97cde1aa06ab4cf17d38254,http://crcv.ucf.edu/papers/cvpr2016/Borji_CVPR2016.pdf
+e9fcd15bcb0f65565138dda292e0c71ef25ea8bb,http://pdfs.semanticscholar.org/e9fc/d15bcb0f65565138dda292e0c71ef25ea8bb.pdf
+75259a613285bdb339556ae30897cb7e628209fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Kodirov_Unsupervised_Domain_Adaptation_ICCV_2015_paper.pdf
+a5e5094a1e052fa44f539b0d62b54ef03c78bf6a,http://pdfs.semanticscholar.org/a5e5/094a1e052fa44f539b0d62b54ef03c78bf6a.pdf
+17027a05c1414c9a06a1c5046899abf382a1142d,http://www.cs.cmu.edu/~rahuls/pub/cvpr2015-alionment-rahuls.pdf
+f93606d362fcbe62550d0bf1b3edeb7be684b000,http://pdfs.semanticscholar.org/f936/06d362fcbe62550d0bf1b3edeb7be684b000.pdf
+2b632f090c09435d089ff76220fd31fd314838ae,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf
+d893f75206b122973cdbf2532f506912ccd6fbe0,http://pdfs.semanticscholar.org/d893/f75206b122973cdbf2532f506912ccd6fbe0.pdf
+51eba481dac6b229a7490f650dff7b17ce05df73,http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf
+ac9a331327cceda4e23f9873f387c9fd161fad76,http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf
+ff061f7e46a6213d15ac2eb2c49d9d3003612e49,http://pdfs.semanticscholar.org/ff06/1f7e46a6213d15ac2eb2c49d9d3003612e49.pdf
+760ba44792a383acd9ca8bef45765d11c55b48d4,http://class-specific.com/csf/papers/aes_tut.pdf
+09c586624ec65d7ef2d4d8d321e98f61698dcfe2,http://www.seas.upenn.edu/~timothee/papers/cvpr_2010_supplement.pdf
+9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6,http://pdfs.semanticscholar.org/9c78/1f7fd5d8168ddae1ce5bb4a77e3ca12b40b6.pdf
+7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f,http://pdfs.semanticscholar.org/7c45/b5824645ba6d96beec17ca8ecfb22dfcdd7f.pdf
+459e840ec58ef5ffcee60f49a94424eb503e8982,http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf
+1473a233465ea664031d985e10e21de927314c94,http://pdfs.semanticscholar.org/e985/0501e707f8783172ecacfe0cd29159abda34.pdf
+23aef683f60cb8af239b0906c45d11dac352fb4e,http://pdfs.semanticscholar.org/b6cd/e64dcf864e457a83b72b7742fd19984a7552.pdf
+b5c749f98710c19b6c41062c60fb605e1ef4312a,http://www.yugangjiang.info/publication/icmr15-eval2stream.pdf
+3d6943f1573f992d6897489b73ec46df983d776c,http://pdfs.semanticscholar.org/757d/223b8db29e4cfba9530c7f942304c78cfee1.pdf
+5de5848dc3fc35e40420ffec70a407e4770e3a8d,http://pdfs.semanticscholar.org/5de5/848dc3fc35e40420ffec70a407e4770e3a8d.pdf
+19d4855f064f0d53cb851e9342025bd8503922e2,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d468.pdf
+073eaa49ccde15b62425cda1d9feab0fea03a842,http://pdfs.semanticscholar.org/073e/aa49ccde15b62425cda1d9feab0fea03a842.pdf
+af54dd5da722e104740f9b6f261df9d4688a9712,http://pdfs.semanticscholar.org/af54/dd5da722e104740f9b6f261df9d4688a9712.pdf
+b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae,http://pdfs.semanticscholar.org/b6d3/c8322d8e6a0212456cf38c6ef59c13d062dd.pdf
+2e0addeffba4be98a6ad0460453fbab52616b139,http://pdfs.semanticscholar.org/3cd7/8b1f43ead1226554f450bafcb8fbe208b5f0.pdf
+7a0fb972e524cb9115cae655e24f2ae0cfe448e0,http://pdfs.semanticscholar.org/7a0f/b972e524cb9115cae655e24f2ae0cfe448e0.pdf
+098a1ccc13b8d6409aa333c8a1079b2c9824705b,http://people.cs.pitt.edu/~kovashka/ut/pivots-kovashka-iccv2013.pdf
+f3fcaae2ea3e998395a1443c87544f203890ae15,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553791.pdf
+13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd,http://pdfs.semanticscholar.org/1318/8a88bbf83a18dd4964e3f89d0bc0a4d3a0bd.pdf
+062d0813815c2b9864cd9bb4f5a1dc2c580e0d90,https://infoscience.epfl.ch/record/230310/files/AliakbarianEtAlICCV17.pdf?version=1
+28c0cb56e7f97046d6f3463378d084e9ea90a89a,http://www.robots.ox.ac.uk/~vgg/publications/2005/Arandjelovic05a/arandjelovic05a.pdf
+c32383330df27625592134edd72d69bb6b5cff5c,http://www.iis.sinica.edu.tw/papers/song/13690-F.pdf
+1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d,http://www.pitt.edu/~jeffcohn/biblio/dicta2010.pdf
+bbf01aa347982592b3e4c9e4f433e05d30e71305,https://pdfs.semanticscholar.org/bbf0/1aa347982592b3e4c9e4f433e05d30e71305.pdf
+2e86402b354516d0a8392f75430156d629ca6281,https://arxiv.org/pdf/1604.03628v2.pdf
+1ad97cce5fa8e9c2e001f53f6f3202bddcefba22,http://files.is.tue.mpg.de/black/papers/RGA2014.pdf
+49570b41bd9574bd9c600e24b269d945c645b7bd,http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf
+e5737ffc4e74374b0c799b65afdbf0304ff344cb,http://pdfs.semanticscholar.org/e573/7ffc4e74374b0c799b65afdbf0304ff344cb.pdf
+ad6c7cc5c0f4ab273fef105ff3761d2c08609a20,https://people.cs.clemson.edu/~jzwang/1701863/mm2016/p1405-huo-ACM%20MM-Jing%20HUO-2016-10-19.pdf
+28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68,https://www.cc.gatech.edu/~parikh/Publications/annoyance_prediction_CVPR2014.pdf
+1e07500b00fcd0f65cf30a11f9023f74fe8ce65c,http://vijaychan.github.io/Publications/2015%20ICIP%20-%20Whole%20Subspace%20Discriminant%20Analysis%20for%20Face%20Recognition.pdf
+0b183f5260667c16ef6f640e5da50272c36d599b,http://pdfs.semanticscholar.org/0b18/3f5260667c16ef6f640e5da50272c36d599b.pdf
+a15c728d008801f5ffc7898568097bbeac8270a4,http://pdfs.semanticscholar.org/a15c/728d008801f5ffc7898568097bbeac8270a4.pdf
+27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5,http://pdfs.semanticscholar.org/2717/3d0b9bb5ce3a75d05e4dbd8f063375f24bb5.pdf
+ac21c8aceea6b9495574f8f9d916e571e2fc497f,http://pdfs.semanticscholar.org/ac21/c8aceea6b9495574f8f9d916e571e2fc497f.pdf
+529e2ce6fb362bfce02d6d9a9e5de635bde81191,http://image.sciencenet.cn/olddata/kexue.com.cn/upload/blog/file/2011/1/20111721232398113.pdf
+6bb630dfa797168e6627d972560c3d438f71ea99,http://arxiv.org/pdf/1609.03056v1.pdf
+7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a,http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf
+4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c,http://www.ifp.illinois.edu/~dagli/papers/ICME07.pdf
+89bc311df99ad0127383a9149d1684dfd8a5aa34,http://pdfs.semanticscholar.org/89bc/311df99ad0127383a9149d1684dfd8a5aa34.pdf
+8b10383ef569ea0029a2c4a60cc2d8c87391b4db,http://pdfs.semanticscholar.org/fe2d/20dca6dcedc7944cc2d9fea76de6cbb9d90c.pdf
+71b376dbfa43a62d19ae614c87dd0b5f1312c966,http://www.cs.cmu.edu/~ltrutoiu/pdfs/FG2013_trutoiu.pdf
+0c20fd90d867fe1be2459223a3cb1a69fa3d44bf,http://pdfs.semanticscholar.org/0c20/fd90d867fe1be2459223a3cb1a69fa3d44bf.pdf
+4df889b10a13021928007ef32dc3f38548e5ee56,http://ww2.cs.fsu.edu/~ywu/PDF-files/IJCNN.pdf
+1384a83e557b96883a6bffdb8433517ec52d0bea,http://pdfs.semanticscholar.org/6be6/392550222ca07ba4c47931bffaedace72d24.pdf
+205b34b6035aa7b23d89f1aed2850b1d3780de35,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p504-jiang.pdf
+06bad0cdda63e3fd054e7b334a5d8a46d8542817,http://vision.cs.utexas.edu/projects/featuresharing/0975.pdf
+02467703b6e087799e04e321bea3a4c354c5487d,http://biometrics.cse.msu.edu/Publications/Face/AdamsAllenMillerKalkaJain_CVPRWB2016_GRPR.pdf
+5c8ae37d532c7bb8d7f00dfde84df4ba63f46297,http://pdfs.semanticscholar.org/5c8a/e37d532c7bb8d7f00dfde84df4ba63f46297.pdf
+46196735a201185db3a6d8f6e473baf05ba7b68f,http://pdfs.semanticscholar.org/4619/6735a201185db3a6d8f6e473baf05ba7b68f.pdf
+92c2dd6b3ac9227fce0a960093ca30678bceb364,https://aran.library.nuigalway.ie/bitstream/handle/10379/1350/On%20color%20texture%20normalization%20for%20active%20appearance%20models.pdf?isAllowed=y&sequence=1
+3b2d5585af59480531616fe970cb265bbdf63f5b,http://pdfs.semanticscholar.org/3b2d/5585af59480531616fe970cb265bbdf63f5b.pdf
+1bddad4dc0dfa8efa402aa5d18c29304a5760f12,https://www.researchgate.net/profile/Iickho_Song/publication/254062033_Complexity-Reduced_Scheme_for_Feature_Extraction_With_Linear_Discriminant_Analysis/links/53d694ce0cf228d363ea69d5.pdf
+649eb674fc963ce25e4e8ce53ac7ee20500fb0e3,http://chenlab.ece.cornell.edu/Publication/Kuan-Chuan/WACV16.pdf
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,http://pdfs.semanticscholar.org/bd42/e0a6a1082e8c197a7b0a9b710434cd7c5a47.pdf
+86b985b285c0982046650e8d9cf09565a939e4f9,http://pdfs.semanticscholar.org/86b9/85b285c0982046650e8d9cf09565a939e4f9.pdf
+0580edbd7865414c62a36da9504d1169dea78d6f,https://arxiv.org/pdf/1611.04251v1.pdf
+794ddb1f3b7598985d4d289b5b0664be736a50c4,http://pdfs.semanticscholar.org/794d/db1f3b7598985d4d289b5b0664be736a50c4.pdf
+0fad544edfc2cd2a127436a2126bab7ad31ec333,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=7D609FEFFC36336C4A45ECA3B56C336A?doi=10.1.1.476.9590&rep=rep1&type=pdf
+d231a81b38fde73bdbf13cfec57d6652f8546c3c,http://pdfs.semanticscholar.org/d231/a81b38fde73bdbf13cfec57d6652f8546c3c.pdf
+dba493caf6647214c8c58967a8251641c2bda4c2,http://pdfs.semanticscholar.org/dba4/93caf6647214c8c58967a8251641c2bda4c2.pdf
+4d356f347ab6647fb3e8ed8c2154dbd359e479ed,http://www.researchgate.net/profile/Anna_Esposito/publication/225441684_Extracting_and_Associating_Meta-features_for_Understanding_Peoples_Emotional_Behaviour_Face_and_Speech/links/02e7e52bed3a1b106e000000.pdf
+1a41831a3d7b0e0df688fb6d4f861176cef97136,http://pdfs.semanticscholar.org/1fae/8f87f83bb707c4b38c23e93ae2bcb900b962.pdf
+7f36dd9ead29649ed389306790faf3b390dc0aa2,http://pdfs.semanticscholar.org/7f36/dd9ead29649ed389306790faf3b390dc0aa2.pdf
+55804f85613b8584d5002a5b0ddfe86b0d0e3325,http://pdfs.semanticscholar.org/ba13/b161aa8e6f6cb511592016058882d976a898.pdf
+241d2c517dbc0e22d7b8698e06ace67de5f26fdf,http://pdfs.semanticscholar.org/bfc3/546fa119443fdcbac3a5723647c2ba0007ac.pdf
+4157e45f616233a0874f54a59c3df001b9646cd7,http://pdfs.semanticscholar.org/4157/e45f616233a0874f54a59c3df001b9646cd7.pdf
+37105ca0bc1f11fcc7c6b7946603f3d572571d76,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_TIST_dmzhai_Multi-view%20metric%20learning%20with%20global%20consistency%20and%20local%20smoothness.pdf
+a30869c5d4052ed1da8675128651e17f97b87918,http://pdfs.semanticscholar.org/a308/69c5d4052ed1da8675128651e17f97b87918.pdf
+729a9d35bc291cc7117b924219bef89a864ce62c,http://pdfs.semanticscholar.org/729a/9d35bc291cc7117b924219bef89a864ce62c.pdf
+d72973a72b5d891a4c2d873daeb1bc274b48cddf,http://pdfs.semanticscholar.org/d729/73a72b5d891a4c2d873daeb1bc274b48cddf.pdf
+abce06a96a7c3095bfc36eed8779d89263769b85,http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20others/Analyzing%20Asymmetry%20Biometric%20in%20the%20Frequency%20Domain%20for%20Face%20Recognition.pdf
+b171f9e4245b52ff96790cf4f8d23e822c260780,http://pdfs.semanticscholar.org/b171/f9e4245b52ff96790cf4f8d23e822c260780.pdf
+30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf
+07ac2e342db42589322b28ef291c2702f4a793a8,http://www.cs.illinois.edu/homes/dhoiem/publications/cvpr2009_santosh_context.pdf
+6eaeac9ae2a1697fa0aa8e394edc64f32762f578,http://pdfs.semanticscholar.org/6eae/ac9ae2a1697fa0aa8e394edc64f32762f578.pdf
+85674b1b6007634f362cbe9b921912b697c0a32c,http://pdfs.semanticscholar.org/8567/4b1b6007634f362cbe9b921912b697c0a32c.pdf
+1bc214c39536c940b12c3a2a6b78cafcbfddb59a,http://pdfs.semanticscholar.org/1bc2/14c39536c940b12c3a2a6b78cafcbfddb59a.pdf
+2c285dadfa6c07d392ee411d0213648a8a1cf68f,http://www.contrib.andrew.cmu.edu/~yzhiding/ICMI15.pdf
+e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec,http://pdfs.semanticscholar.org/e686/5b000cf4d4e84c3fe895b7ddfc65a9c4aaec.pdf
+7f533bd8f32525e2934a66a5b57d9143d7a89ee1,http://pdfs.semanticscholar.org/7f53/3bd8f32525e2934a66a5b57d9143d7a89ee1.pdf
+73f467b4358ac1cafb57f58e902c1cab5b15c590,http://pdfs.semanticscholar.org/73f4/67b4358ac1cafb57f58e902c1cab5b15c590.pdf
+55079a93b7d1eb789193d7fcdcf614e6829fad0f,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w10/papers/Conde_Efficient_and_Robust_ICCV_2015_paper.pdf
+13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a,http://www.sfu.ca/~smuralid/papers/thesis.pdf
+2d05e768c64628c034db858b7154c6cbd580b2d5,http://pdfs.semanticscholar.org/2d05/e768c64628c034db858b7154c6cbd580b2d5.pdf
+aae0e417bbfba701a1183d3d92cc7ad550ee59c3,https://staff.fnwi.uva.nl/th.gevers/pub/GeversTIP12-3.pdf
+a255a54b8758050ea1632bf5a88a201cd72656e1,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Tamersoy_Nonparametric_Facial_Feature_2013_CVPR_paper.pdf
+333e7ad7f915d8ee3bb43a93ea167d6026aa3c22,http://www.eurecom.fr/en/publication/4277/download/mm-publi-4277.pdf
+5e16f10f2d667d17c029622b9278b6b0a206d394,http://pdfs.semanticscholar.org/5e16/f10f2d667d17c029622b9278b6b0a206d394.pdf
+1270044a3fa1a469ec2f4f3bd364754f58a1cb56,http://pdfs.semanticscholar.org/1270/044a3fa1a469ec2f4f3bd364754f58a1cb56.pdf
+1d846934503e2bd7b8ea63b2eafe00e29507f06a,http://www.iipl.fudan.edu.cn/~zhangjp/literatures/MLF/manifold%20learning/20fa.pdf
+ad784332cc37720f03df1c576e442c9c828a587a,http://pdfs.semanticscholar.org/ad78/4332cc37720f03df1c576e442c9c828a587a.pdf
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhang_Low-Rank-Sparse_Subspace_Representation_CVPR_2017_paper.pdf
+915d4a0fb523249ecbc88eb62cb150a60cf60fa0,http://pdfs.semanticscholar.org/915d/4a0fb523249ecbc88eb62cb150a60cf60fa0.pdf
+4b321065f6a45e55cb7f9d7b1055e8ac04713b41,http://pdfs.semanticscholar.org/4b32/1065f6a45e55cb7f9d7b1055e8ac04713b41.pdf
+0fabb4a40f2e3a2502cd935e54e090a304006c1c,http://arxiv.org/pdf/1202.4207v2.pdf
+02239ae5e922075a354169f75f684cad8fdfd5ab,http://ai2-website.s3.amazonaws.com/publications/CVPR_2017_Situation.pdf
+9dcc6dde8d9f132577290d92a1e76b5decc6d755,http://pdfs.semanticscholar.org/a36a/3cd13c59777b6b07e41c4026e55b55e8096f.pdf
+426913f890f07a5d79e6c23b83cd928ffc00e494,http://www2012.wwwconference.org/proceedings/proceedings/p939.pdf
+439ca6ded75dffa5ddea203dde5e621dc4a88c3e,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrack_icpr2016.pdf
+5bb53fb36a47b355e9a6962257dd465cd7ad6827,http://pdfs.semanticscholar.org/5bb5/3fb36a47b355e9a6962257dd465cd7ad6827.pdf
+91883dabc11245e393786d85941fb99a6248c1fb,http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf
+9cadd166893f1b8aaecb27280a0915e6694441f5,http://pdfs.semanticscholar.org/9cad/d166893f1b8aaecb27280a0915e6694441f5.pdf
+e59813940c5c83b1ce63f3f451d03d34d2f68082,http://pdfs.semanticscholar.org/e598/13940c5c83b1ce63f3f451d03d34d2f68082.pdf
+6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2,http://pdfs.semanticscholar.org/6156/eaad00aad74c90cbcfd822fa0c9bd4eb14c2.pdf
+6b1b43d58faed7b457b1d4e8c16f5f7e7d819239,http://pdfs.semanticscholar.org/6b1b/43d58faed7b457b1d4e8c16f5f7e7d819239.pdf
+1de8f38c35f14a27831130060810cf9471a62b45,http://www.psy.miami.edu/faculty/dmessinger/c_c/rsrcs/rdgs/emot/Unsupervised_Discovery.IJCompVis.2017.pdf
+0c54e9ac43d2d3bab1543c43ee137fc47b77276e,http://pdfs.semanticscholar.org/0c54/e9ac43d2d3bab1543c43ee137fc47b77276e.pdf
+2c258eec8e4da9e65018f116b237f7e2e0b2ad17,http://openaccess.thecvf.com/content_cvpr_2017/papers/Qiu_Deep_Quantization_Encoding_CVPR_2017_paper.pdf
+dd8ad6ce8701d4b09be460a6cf058fcd5318c700,https://www.researchgate.net/profile/Daniel_Riccio/publication/260652311_Robust_Face_Recognition_for_Uncontrolled_Pose_and_Illumination_Changes/links/5402f4450cf23d9765a55fbc.pdf
+1885acea0d24e7b953485f78ec57b2f04e946eaf,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w36/Xiong_Combining_Local_and_ICCV_2017_paper.pdf
+d35c82588645b94ce3f629a0b98f6a531e4022a3,http://pdfs.semanticscholar.org/d35c/82588645b94ce3f629a0b98f6a531e4022a3.pdf
+ed08ac6da6f8ead590b390b1d14e8a9b97370794,http://pdfs.semanticscholar.org/ed08/ac6da6f8ead590b390b1d14e8a9b97370794.pdf
+718824256b4461d62d192ab9399cfc477d3660b4,http://pdfs.semanticscholar.org/7188/24256b4461d62d192ab9399cfc477d3660b4.pdf
+c81ee278d27423fd16c1a114dcae486687ee27ff,http://pdfs.semanticscholar.org/c81e/e278d27423fd16c1a114dcae486687ee27ff.pdf
+155199d7f10218e29ddaee36ebe611c95cae68c4,http://pdfs.semanticscholar.org/1551/99d7f10218e29ddaee36ebe611c95cae68c4.pdf
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,http://pdfs.semanticscholar.org/50e4/5e9c55c9e79aaae43aff7d9e2f079a2d787b.pdf
+4568063b7efb66801e67856b3f572069e774ad33,http://www.dbs.ifi.lmu.de/~yu_k/cvpr11_0712.pdf
+8fd9c22b00bd8c0bcdbd182e17694046f245335f,http://pdfs.semanticscholar.org/8fd9/c22b00bd8c0bcdbd182e17694046f245335f.pdf
+125d82fee1b9fbcc616622b0977f3d06771fc152,http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTcvpr12.pdf
+78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e,http://arxiv.org/pdf/1503.01224.pdf
+362ba8317aba71c78dafca023be60fb71320381d,http://pdfs.semanticscholar.org/362b/a8317aba71c78dafca023be60fb71320381d.pdf
+a70e36daf934092f40a338d61e0fe27be633f577,http://pdfs.semanticscholar.org/a70e/36daf934092f40a338d61e0fe27be633f577.pdf
+85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9,http://pdfs.semanticscholar.org/8518/8c77f3b2de3a45f7d4f709b6ea79e36bd0d9.pdf
+29c1f733a80c1e07acfdd228b7bcfb136c1dff98,http://pdfs.semanticscholar.org/29c1/f733a80c1e07acfdd228b7bcfb136c1dff98.pdf
+f1d090fcea63d9f9e835c49352a3cd576ec899c1,http://pdfs.semanticscholar.org/f1d0/90fcea63d9f9e835c49352a3cd576ec899c1.pdf
+923ede53b0842619831e94c7150e0fc4104e62f7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001293.pdf
+38682c7b19831e5d4f58e9bce9716f9c2c29c4e7,http://pdfs.semanticscholar.org/3868/2c7b19831e5d4f58e9bce9716f9c2c29c4e7.pdf
+00b29e319ff8b3a521b1320cb8ab5e39d7f42281,http://pdfs.semanticscholar.org/8007/b8afa13869d2a7c681db8bd7c2e7df1ef02d.pdf
+58b8588c01196070674ceabe5366b20f73c2912d,http://www.cse.ust.hk/~qnature/pdf/ICDM2015.pdf
+42f6f5454dda99d8989f9814989efd50fe807ee8,http://pdfs.semanticscholar.org/42f6/f5454dda99d8989f9814989efd50fe807ee8.pdf
+1f94734847c15fa1da68d4222973950d6b683c9e,https://arxiv.org/pdf/1512.02895v1.pdf
+b19e83eda4a602abc5a8ef57467c5f47f493848d,http://www.cs.jhu.edu/~hwang/papers/SPL10.pdf
+27ee8482c376ef282d5eb2e673ab042f5ded99d7,http://sylvain.legallou.fr/Fichiers/p_ICARCV06_NewNormalization_LeGallou.pdf
+70580ed8bc482cad66e059e838e4a779081d1648,http://pdfs.semanticscholar.org/7058/0ed8bc482cad66e059e838e4a779081d1648.pdf
+0b7d1386df0cf957690f0fe330160723633d2305,http://www.cs.rpi.edu/~magdon/ps/conference/AccentICMLA2009.pdf
+7dd578878e84337d6d0f5eb593f22cabeacbb94c,http://pdfs.semanticscholar.org/7dd5/78878e84337d6d0f5eb593f22cabeacbb94c.pdf
+652aac54a3caf6570b1c10c993a5af7fa2ef31ff,http://pdfs.semanticscholar.org/652a/ac54a3caf6570b1c10c993a5af7fa2ef31ff.pdf
+3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f,http://pdfs.semanticscholar.org/bca7/c0a8c5b0503a4ee43f3561f540918071aaa3.pdf
+190d8bd39c50b37b27b17ac1213e6dde105b21b8,https://dr.ntu.edu.sg/bitstream/handle/10220/18955/fp518-wang.pdf?isAllowed=y&sequence=1
+48f211a9764f2bf6d6dda4a467008eda5680837a,http://www.lv-nus.org/papers/2011/iccv2011-occupation.pdf
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,http://pdfs.semanticscholar.org/a6ff/e238eaf8632b4a8a6f718c8917e7f3261546.pdf
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,http://pdfs.semanticscholar.org/e12a/0f0bca1624965386ac9cf95f711c90441553.pdf
+1056347fc5e8cd86c875a2747b5f84fd570ba232,http://arxiv.org/pdf/1607.06408v1.pdf
+171ca25bc2cdfc79cad63933bcdd420d35a541ab,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Alnajar_Calibration-Free_Gaze_Estimation_2013_ICCV_paper.pdf
+16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb,http://pdfs.semanticscholar.org/1628/6fb0f14f6a7a1acc10fcd28b3ac43f12f3eb.pdf
+4cf3419dbf83a76ccac11828ca57b46bbbe54e0a,https://www.researchgate.net/profile/Muhammad_Sharif9/publication/224173583_Illumination_normalization_preprocessing_for_face_recognition/links/02e7e51a47972ae996000000.pdf
+77fb9e36196d7bb2b505340b6b94ba552a58b01b,http://pdfs.semanticscholar.org/77fb/9e36196d7bb2b505340b6b94ba552a58b01b.pdf
+9264b390aa00521f9bd01095ba0ba4b42bf84d7e,http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf
+76b9fe32d763e9abd75b427df413706c4170b95c,http://pdfs.semanticscholar.org/76b9/fe32d763e9abd75b427df413706c4170b95c.pdf
+bffbd04ee5c837cd919b946fecf01897b2d2d432,http://pdfs.semanticscholar.org/bffb/d04ee5c837cd919b946fecf01897b2d2d432.pdf
+23a8d02389805854cf41c9e5fa56c66ee4160ce3,http://www.advancedsourcecode.com/influencelow10.pdf
+1fe121925668743762ce9f6e157081e087171f4c,https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf
+2c34bf897bad780e124d5539099405c28f3279ac,http://pdfs.semanticscholar.org/2c34/bf897bad780e124d5539099405c28f3279ac.pdf
+d28d32af7ef9889ef9cb877345a90ea85e70f7f1,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Kim_Local.pdf
+6341274aca0c2977c3e1575378f4f2126aa9b050,http://arxiv.org/pdf/1609.03536v1.pdf
+dd8d53e67668067fd290eb500d7dfab5b6f730dd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_subspace.pdf
+5b6bed112e722c0629bcce778770d1b28e42fc96,http://pdfs.semanticscholar.org/5b6b/ed112e722c0629bcce778770d1b28e42fc96.pdf
+2983efadb1f2980ab5ef20175f488f77b6f059d7,http://pdfs.semanticscholar.org/2983/efadb1f2980ab5ef20175f488f77b6f059d7.pdf
+c29e33fbd078d9a8ab7adbc74b03d4f830714cd0,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Longbin.pdf
+304a306d2a55ea41c2355bd9310e332fa76b3cb0,http://pdfs.semanticscholar.org/95da/2d1137637e89da8b7a16e0dc6168cfceb693.pdf
+19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b,http://pdfs.semanticscholar.org/4088/3844c1ceab95cb92498a92bfdf45beaa288e.pdf
+4e0e49c280acbff8ae394b2443fcff1afb9bdce6,http://pdfs.semanticscholar.org/4e0e/49c280acbff8ae394b2443fcff1afb9bdce6.pdf
+09dd01e19b247a33162d71f07491781bdf4bfd00,http://pdfs.semanticscholar.org/5991/0d557b54566ec97280480daca02685f21907.pdf
diff --git a/scraper/reports/misc/db_paper_pdf-2.csv b/scraper/reports/misc/db_paper_pdf-2.csv
new file mode 100644
index 00000000..0adc7ca6
--- /dev/null
+++ b/scraper/reports/misc/db_paper_pdf-2.csv
@@ -0,0 +1,1639 @@
+b018fa5cb9793e260b8844ae155bd06380988584,http://pdfs.semanticscholar.org/b018/fa5cb9793e260b8844ae155bd06380988584.pdf
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf
+0052de4885916cf6949a6904d02336e59d98544c,https://rd.springer.com/content/pdf/10.1007/s10994-005-3561-6.pdf
+6974449ce544dc208b8cc88b606b03d95c8fd368,https://ibug.doc.ic.ac.uk/media/uploads/documents/martinezvalstar-pami_final.pdf
+751b26e7791b29e4e53ab915bfd263f96f531f56,http://affect.media.mit.edu/pdfs/12.Hernandez-Hoque-Drevo-Picard-MoodMeter-Ubicomp.pdf
+551fa37e8d6d03b89d195a5c00c74cc52ff1c67a,http://pdfs.semanticscholar.org/551f/a37e8d6d03b89d195a5c00c74cc52ff1c67a.pdf
+2b339ece73e3787f445c5b92078e8f82c9b1c522,http://pdfs.semanticscholar.org/7a2e/e06aaa3f342937225272951c0b6dd4309a7a.pdf
+b613b30a7cbe76700855479a8d25164fa7b6b9f1,http://www.cs.ucf.edu/~kienhua/classes/COP6731/Reading/AffectiveComputing.pdf
+a2b9cee7a3866eb2db53a7d81afda72051fe9732,http://pdfs.semanticscholar.org/a2b9/cee7a3866eb2db53a7d81afda72051fe9732.pdf
+21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1,http://www.eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Transductive%20VIS-NIR%20Face%20Matching.pdf
+2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d,http://arxiv.org/pdf/1604.02917v1.pdf
+042825549296ea419d95fcf0b5e71f72070a5f0d,http://eprints.pascal-network.org/archive/00008397/01/paper.pdf
+858901405086056361f8f1839c2f3d65fc86a748,http://pdfs.semanticscholar.org/8589/01405086056361f8f1839c2f3d65fc86a748.pdf
+ebb1c29145d31c4afa3c9be7f023155832776cd3,http://pdfs.semanticscholar.org/ebb1/c29145d31c4afa3c9be7f023155832776cd3.pdf
+98c2053e0c31fab5bcb9ce5386335b647160cc09,https://smartech.gatech.edu/bitstream/handle/1853/45502/GT-CS-12-10.pdf
+5fc664202208aaf01c9b62da5dfdcd71fdadab29,http://pdfs.semanticscholar.org/5fc6/64202208aaf01c9b62da5dfdcd71fdadab29.pdf
+2d25045ec63f9132371841c0beccd801d3733908,http://pdfs.semanticscholar.org/2d25/045ec63f9132371841c0beccd801d3733908.pdf
+3ea8a6dc79d79319f7ad90d663558c664cf298d4,http://pdfs.semanticscholar.org/3ea8/a6dc79d79319f7ad90d663558c664cf298d4.pdf
+621ff353960d5d9320242f39f85921f72be69dc8,http://www.research.rutgers.edu/~xiangyu/paper/FG_2013.pdf
+32d8e555441c47fc27249940991f80502cb70bd5,https://arxiv.org/pdf/1709.07886v1.pdf
+5e7cb894307f36651bdd055a85fdf1e182b7db30,http://pdfs.semanticscholar.org/5e7c/b894307f36651bdd055a85fdf1e182b7db30.pdf
+ec22eaa00f41a7f8e45ed833812d1ac44ee1174e,http://pdfs.semanticscholar.org/ec22/eaa00f41a7f8e45ed833812d1ac44ee1174e.pdf
+794c0dc199f0bf778e2d40ce8e1969d4069ffa7b,http://hcil2.cs.umd.edu/trs/2011-17/2011-17.pdf
+f9784db8ff805439f0a6b6e15aeaf892dba47ca0,http://pdfs.semanticscholar.org/f978/4db8ff805439f0a6b6e15aeaf892dba47ca0.pdf
+2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c,http://pdfs.semanticscholar.org/6abe/c94e0af01d9706d73dfd91fd76139c7d99e0.pdf
+9af1cf562377b307580ca214ecd2c556e20df000,http://pdfs.semanticscholar.org/9af1/cf562377b307580ca214ecd2c556e20df000.pdf
+309e17e6223e13b1f76b5b0eaa123b96ef22f51b,https://static.aminer.org/pdf/PDF/000/337/771/image_synthesis_and_face_recognition_based_on_d_face_model.pdf
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Luo_A_Deep_Sum-Product_2013_ICCV_paper.pdf
+2bbbbe1873ad2800954058c749a00f30fe61ab17,http://pdfs.semanticscholar.org/2bbb/be1873ad2800954058c749a00f30fe61ab17.pdf
+c2fa83e8a428c03c74148d91f60468089b80c328,http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf
+8bf647fed40bdc9e35560021636dfb892a46720e,https://arxiv.org/pdf/1612.04061v1.pdf
+c1fc70e0952f6a7587b84bf3366d2e57fc572fd7,http://pdfs.semanticscholar.org/c1fc/70e0952f6a7587b84bf3366d2e57fc572fd7.pdf
+14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d,https://www.comp.nus.edu.sg/~tsim/documents/cascade-cf-landmarks.pdf
+3506518d616343d3083f4fe257a5ee36b376b9e1,http://disi.unitn.it/~zen/data/icmi14_personalized.pdf
+27dafedccd7b049e87efed72cabaa32ec00fdd45,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_074.pdf
+18166432309000d9a5873f989b39c72a682932f5,http://pdfs.semanticscholar.org/1816/6432309000d9a5873f989b39c72a682932f5.pdf
+60970e124aa5fb964c9a2a5d48cd6eee769c73ef,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Tierney_Subspace_Clustering_for_2014_CVPR_paper.pdf
+1e213b03e1b8a6067bf37503904491e98b9e42df,http://figment.cse.usf.edu/~sfefilat/data/papers/TuAT10.9.pdf
+1fe990ca6df273de10583860933d106298655ec8,http://pdfs.semanticscholar.org/1fe9/90ca6df273de10583860933d106298655ec8.pdf
+2921719b57544cfe5d0a1614d5ae81710ba804fa,http://pdfs.semanticscholar.org/2921/719b57544cfe5d0a1614d5ae81710ba804fa.pdf
+779ad364cae60ca57af593c83851360c0f52c7bf,http://pdfs.semanticscholar.org/779a/d364cae60ca57af593c83851360c0f52c7bf.pdf
+6ae96f68187f1cdb9472104b5431ec66f4b2470f,http://pdfs.semanticscholar.org/6ae9/6f68187f1cdb9472104b5431ec66f4b2470f.pdf
+0d760e7d762fa449737ad51431f3ff938d6803fe,https://arxiv.org/pdf/1705.05922v1.pdf
+c5d13e42071813a0a9dd809d54268712eba7883f,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2016/PID2891229.pdf
+a065080353d18809b2597246bb0b48316234c29a,http://pdfs.semanticscholar.org/a065/080353d18809b2597246bb0b48316234c29a.pdf
+5feb1341a49dd7a597f4195004fe9b59f67e6707,http://pdfs.semanticscholar.org/5feb/1341a49dd7a597f4195004fe9b59f67e6707.pdf
+7a7f2403e3cc7207e76475e8f27a501c21320a44,http://www.apsipa2013.org/wp-content/uploads/2013/05/395_Emotion-recognition-Wu-2928773.pdf
+3f7723ab51417b85aa909e739fc4c43c64bf3e84,http://pdfs.semanticscholar.org/3f77/23ab51417b85aa909e739fc4c43c64bf3e84.pdf
+5028c0decfc8dd623c50b102424b93a8e9f2e390,http://pdfs.semanticscholar.org/5028/c0decfc8dd623c50b102424b93a8e9f2e390.pdf
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,https://arxiv.org/pdf/1711.06055v1.pdf
+ae9257f3be9f815db8d72819332372ac59c1316b,http://pdfs.semanticscholar.org/ae92/57f3be9f815db8d72819332372ac59c1316b.pdf
+353b6c1f431feac6edde12b2dde7e6e702455abd,http://pdfs.semanticscholar.org/8835/c80f8ad8ebd05771a9bce5a8637efbc4c8e3.pdf
+1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9,http://pdfs.semanticscholar.org/1af5/2c853ff1d0ddb8265727c1d70d81b4f9b3a9.pdf
+a5bf83f99f71e3840f651fbeef9f334d8e75fd75,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1927.pdf
+c1f07ec629be1c6fe562af0e34b04c54e238dcd1,http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf
+258a2dad71cb47c71f408fa0611a4864532f5eba,http://pdfs.semanticscholar.org/258a/2dad71cb47c71f408fa0611a4864532f5eba.pdf
+9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7,http://pdfs.semanticscholar.org/9eea/da49fc2cba846b4dad1012ba8a7ee78a8bb7.pdf
+8323af714efe9a3cadb31b309fcc2c36c8acba8f,http://pdfs.semanticscholar.org/8323/af714efe9a3cadb31b309fcc2c36c8acba8f.pdf
+a6f81619158d9caeaa0863738ab400b9ba2d77c2,http://pdfs.semanticscholar.org/a6f8/1619158d9caeaa0863738ab400b9ba2d77c2.pdf
+87147418f863e3d8ff8c97db0b42695a1c28195b,http://pdfs.semanticscholar.org/8714/7418f863e3d8ff8c97db0b42695a1c28195b.pdf
+bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5,https://ubicomp-mental-health.github.io/papers/2017/perception-syeda.pdf
+3be8964cef223698e587b4f71fc0c72c2eeef8cf,https://www.researchgate.net/profile/Mohammad_Reza_Mohammadi3/publication/264394830_Simultaneous_recognition_of_facial_expression_and_identity_via_sparse_representation/links/53df5c5b0cf2a76fb6682872.pdf?origin=publication_list
+2597b0dccdf3d89eaffd32e202570b1fbbedd1d6,http://pdfs.semanticscholar.org/26f3/03ae1912c16f08523a7d8db926e35114e8f0.pdf
+15ee80e86e75bf1413dc38f521b9142b28fe02d1,https://arxiv.org/pdf/1612.05322v1.pdf
+0f533bc9fdfb75a3680d71c84f906bbd59ee48f1,http://www.iis.sinica.edu.tw/papers/song/11837-F.pdf
+084bd02d171e36458f108f07265386f22b34a1ae,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ren_Face_Alignment_at_2014_CVPR_paper.pdf
+11a210835b87ccb4989e9ba31e7559bb7a9fd292,http://profdoc.um.ac.ir/articles/a/1020638.pdf
+519a724426b5d9ad384d38aaf2a4632d3824f243,http://pdfs.semanticscholar.org/519a/724426b5d9ad384d38aaf2a4632d3824f243.pdf
+0b3a146c474166bba71e645452b3a8276ac05998,http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf
+19e62a56b6772bbd37dfc6b8f948e260dbb474f5,http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf
+ab8f9a6bd8f582501c6b41c0e7179546e21c5e91,http://pdfs.semanticscholar.org/ab8f/9a6bd8f582501c6b41c0e7179546e21c5e91.pdf
+c5765590c294146a8e3c9987d394c0990ab6a35b,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2012%5D084_P1B-31-cvpr2012-wan.pdf
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,http://pdfs.semanticscholar.org/030e/f31b51bd4c8d0d8f4a9a32b80b9192fe4c3f.pdf
+d50a40f2d24363809a9ac57cf7fbb630644af0e5,http://pdfs.semanticscholar.org/d50a/40f2d24363809a9ac57cf7fbb630644af0e5.pdf
+7c2c9b083817f7a779d819afee383599d2e97ed8,http://pdfs.semanticscholar.org/bcad/d9c086ccd2f217da25f9550b06a429d53011.pdf
+6a16b91b2db0a3164f62bfd956530a4206b23fea,http://pdfs.semanticscholar.org/6a16/b91b2db0a3164f62bfd956530a4206b23fea.pdf
+2b435ee691718d0b55d057d9be4c3dbb8a81526e,http://pdfs.semanticscholar.org/43ef/472c2c09d1ae2f2e5fc35d6d3ab7578658b4.pdf
+48853c25dc75481b0c77f408a8a76383287ebe2a,http://qil.uh.edu/qil/websitecontent/pdf/2015-45.pdf
+0b0958493e43ca9c131315bcfb9a171d52ecbb8a,http://pdfs.semanticscholar.org/0b09/58493e43ca9c131315bcfb9a171d52ecbb8a.pdf
+a967426ec9b761a989997d6a213d890fc34c5fe3,http://vision.ucsd.edu/sites/default/files/043-wacv.pdf
+898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c,http://pdfs.semanticscholar.org/898a/66979c7e8b53a10fd58ac51fbfdb6e6e6e7c.pdf
+8a336e9a4c42384d4c505c53fb8628a040f2468e,http://pdfs.semanticscholar.org/8a33/6e9a4c42384d4c505c53fb8628a040f2468e.pdf
+bc866c2ced533252f29cf2111dd71a6d1724bd49,http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf
+8913a5b7ed91c5f6dec95349fbc6919deee4fc75,https://people.eecs.berkeley.edu/~pabbeel/papers/2014-ICRA-BigBIRD.pdf
+6b6493551017819a3d1f12bbf922a8a8c8cc2a03,http://pdfs.semanticscholar.org/6b64/93551017819a3d1f12bbf922a8a8c8cc2a03.pdf
+9117fd5695582961a456bd72b157d4386ca6a174,http://pdfs.semanticscholar.org/9117/fd5695582961a456bd72b157d4386ca6a174.pdf
+2d31ab536b3c8a05de0d24e0257ca4433d5a7c75,http://tamaraberg.com/papers/xray.pdf
+9b07084c074ba3710fee59ed749c001ae70aa408,http://pdfs.semanticscholar.org/9b07/084c074ba3710fee59ed749c001ae70aa408.pdf
+4c81c76f799c48c33bb63b9369d013f51eaf5ada,https://www.cmpe.boun.edu.tr/~salah/kaya17chalearn.pdf
+e378ce25579f3676ca50c8f6454e92a886b9e4d7,http://openaccess.thecvf.com/content_ICCV_2017/papers/Liu_Robust_Video_Super-Resolution_ICCV_2017_paper.pdf
+1d0dd20b9220d5c2e697888e23a8d9163c7c814b,http://pdfs.semanticscholar.org/1d0d/d20b9220d5c2e697888e23a8d9163c7c814b.pdf
+97d1d561362a8b6beb0fdbee28f3862fb48f1380,http://pages.cs.wisc.edu/~gdguo/myPapersOnWeb/PAMI10Guo.pdf
+6eba25166fe461dc388805cc2452d49f5d1cdadd,http://pdfs.semanticscholar.org/6eba/25166fe461dc388805cc2452d49f5d1cdadd.pdf
+31aa7c992692b74f17ddec665cd862faaeafd673,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221657297_Unsupervised_face_annotation_by_mining_the_web/links/0912f510a04034844d000000.pdf
+d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0,http://pdfs.semanticscholar.org/d0eb/3fd1b1750242f3bb39ce9ac27fc8cc7c5af0.pdf
+32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b,http://pdfs.semanticscholar.org/32a4/0c43a9bc1f1c1ed10be3b9f10609d7e0cb6b.pdf
+e726acda15d41b992b5a41feabd43617fab6dc23,http://pdfs.semanticscholar.org/e726/acda15d41b992b5a41feabd43617fab6dc23.pdf
+3f5cf3771446da44d48f1d5ca2121c52975bb3d3,http://pdfs.semanticscholar.org/3f5c/f3771446da44d48f1d5ca2121c52975bb3d3.pdf
+6e94c579097922f4bc659dd5d6c6238a428c4d22,http://pdfs.semanticscholar.org/6e94/c579097922f4bc659dd5d6c6238a428c4d22.pdf
+8395cf3535a6628c3bdc9b8d0171568d551f5ff0,http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf
+b41374f4f31906cf1a73c7adda6c50a78b4eb498,http://isp.uv.es/papers/Laparra11.pdf
+4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002352.pdf
+1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3,http://pdfs.semanticscholar.org/1e7a/e86a78a9b4860aa720fb0fd0bdc199b092c3.pdf
+8d3fbdb9783716c1832a0b7ab1da6390c2869c14,http://pdfs.semanticscholar.org/ae81/6e7e0077fe94f1e62629647dc04263a970b5.pdf
+0831a511435fd7d21e0cceddb4a532c35700a622,http://pdfs.semanticscholar.org/0831/a511435fd7d21e0cceddb4a532c35700a622.pdf
+11367581c308f4ba6a32aac1b4a7cdb32cd63137,https://pdfs.semanticscholar.org/82c3/367ca6fc95e705aa8f2270265d82e9d8eedd.pdf
+333aa36e80f1a7fa29cf069d81d4d2e12679bc67,http://pdfs.semanticscholar.org/333a/a36e80f1a7fa29cf069d81d4d2e12679bc67.pdf
+303a7099c01530fa0beb197eb1305b574168b653,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Occlusion-Free_Face_Alignment_CVPR_2016_paper.pdf
+f0ae807627f81acb63eb5837c75a1e895a92c376,http://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf
+b7f7a4df251ff26aca83d66d6b479f1dc6cd1085,http://pdfs.semanticscholar.org/b7f7/a4df251ff26aca83d66d6b479f1dc6cd1085.pdf
+0113b302a49de15a1d41ca4750191979ad756d2f,http://www.cecs.uci.edu/~papers/icme06/pdfs/0000537.pdf
+faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b,http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf
+5a5f0287484f0d480fed1ce585dbf729586f0edc,http://www.researchgate.net/profile/Mohammad_Mahoor/publication/248703363_DISFA_A_Spontaneous_Facial_Action_Intensity_Database/links/0c960520903b2b8153000000.pdf
+910524c0d0fe062bf806bb545627bf2c9a236a03,http://pdfs.semanticscholar.org/9105/24c0d0fe062bf806bb545627bf2c9a236a03.pdf
+1824b1ccace464ba275ccc86619feaa89018c0ad,http://www.csc.kth.se/~vahidk/papers/KazemiCVPR14.pdf
+71f36c8e17a5c080fab31fce1ffea9551fc49e47,http://openaccess.thecvf.com/content_cvpr_2014/papers/Zhang_Predicting_Failures_of_2014_CVPR_paper.pdf
+5b719410e7829c98c074bc2947697fac3b505b64,http://pdfs.semanticscholar.org/ecec/d5c8b2472364fd7816033e8355215e34bb1b.pdf
+daa02cf195818cbf651ef81941a233727f71591f,http://pdfs.semanticscholar.org/daa0/2cf195818cbf651ef81941a233727f71591f.pdf
+75bf3b6109d7a685236c8589f8ead7d769ea863f,http://pdfs.semanticscholar.org/75bf/3b6109d7a685236c8589f8ead7d769ea863f.pdf
+5bf70c1afdf4c16fd88687b4cf15580fd2f26102,http://pdfs.semanticscholar.org/5bf7/0c1afdf4c16fd88687b4cf15580fd2f26102.pdf
+20a88cc454a03d62c3368aa1f5bdffa73523827b,http://pdfs.semanticscholar.org/d620/7593c39255ac8ce7536e5958a99f52d6bb60.pdf
+42afe6d016e52c99e2c0d876052ade9c192d91e7,https://ibug.doc.ic.ac.uk/media/uploads/documents/ValstarEtAl-ICMI2006-FINAL.pdf
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,http://pdfs.semanticscholar.org/e0bf/cf965b402f3f209f26ae20ee88bc4d0002ab.pdf
+732686d799d760ccca8ad47b49a8308b1ab381fb,http://pdfs.semanticscholar.org/7326/86d799d760ccca8ad47b49a8308b1ab381fb.pdf
+36c473fc0bf3cee5fdd49a13cf122de8be736977,http://pdfs.semanticscholar.org/bc6c/051b66ecadac7bb3e6ace66665e42875d790.pdf
+0abf67e7bd470d9eb656ea2508beae13ca173198,http://www.cs.cmu.edu/~kkitani/pdf/MFK-CVPR16.pdf
+0b242d5123f79defd5f775d49d8a7047ad3153bc,http://pdfs.semanticscholar.org/84db/c0010ae4f5206d689cf9f5bb176d18990bcd.pdf
+2495ebdcb6da8d8c2e82cf57fcaab0ec003d571d,http://eprints.pascal-network.org/archive/00002118/01/russell06.pdf
+1fc249ec69b3e23856b42a4e591c59ac60d77118,http://cbl.uh.edu/pub_files/IJCB-2017-XX.pdf
+feb6e267923868bff6e2108603d00fdfd65251ca,http://pdfs.semanticscholar.org/feb6/e267923868bff6e2108603d00fdfd65251ca.pdf
+346c9100b2fab35b162d7779002c974da5f069ee,http://cmlab.csie.ntu.edu.tw/~yanying/paper/p651-lei.pdf
+56e4dead93a63490e6c8402a3c7adc493c230da5,http://pdfs.semanticscholar.org/56e4/dead93a63490e6c8402a3c7adc493c230da5.pdf
+855184c789bca7a56bb223089516d1358823db0b,http://pdfs.semanticscholar.org/8551/84c789bca7a56bb223089516d1358823db0b.pdf
+44a3ec27f92c344a15deb8e5dc3a5b3797505c06,http://pdfs.semanticscholar.org/44a3/ec27f92c344a15deb8e5dc3a5b3797505c06.pdf
+397085122a5cade71ef6c19f657c609f0a4f7473,http://pdfs.semanticscholar.org/db11/4901d09a07ab66bffa6986bc81303e133ae1.pdf
+cf875336d5a196ce0981e2e2ae9602580f3f6243,http://pdfs.semanticscholar.org/cf87/5336d5a196ce0981e2e2ae9602580f3f6243.pdf
+a87e37d43d4c47bef8992ace408de0f872739efc,http://pdfs.semanticscholar.org/a87e/37d43d4c47bef8992ace408de0f872739efc.pdf
+29921072d8628544114f68bdf84deaf20a8c8f91,https://arxiv.org/pdf/1610.03670v4.pdf
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,http://pdfs.semanticscholar.org/e1ab/3b9dee2da20078464f4ad8deb523b5b1792e.pdf
+28fe6e785b32afdcd2c366c9240a661091b850cf,http://pdfs.semanticscholar.org/28fe/6e785b32afdcd2c366c9240a661091b850cf.pdf
+89002a64e96a82486220b1d5c3f060654b24ef2a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Peng_PIEFA_Personalized_Incremental_ICCV_2015_paper.pdf
+89de30a75d3258816c2d4d5a733d2bef894b66b9,https://www.computer.org/csdl/trans/tp/2015/06/06915721.pdf
+2f8ef26bfecaaa102a55b752860dbb92f1a11dc6,http://pdfs.semanticscholar.org/2f8e/f26bfecaaa102a55b752860dbb92f1a11dc6.pdf
+17a85799c59c13f07d4b4d7cf9d7c7986475d01c,http://pdfs.semanticscholar.org/17a8/5799c59c13f07d4b4d7cf9d7c7986475d01c.pdf
+9b6d0b3fbf7d07a7bb0d86290f97058aa6153179,http://pdfs.semanticscholar.org/9b6d/0b3fbf7d07a7bb0d86290f97058aa6153179.pdf
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,http://pdfs.semanticscholar.org/7216/0aae43cd9b2c3aae5574acc0d00ea0993b9e.pdf
+d5b0e73b584be507198b6665bcddeba92b62e1e5,http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf
+e9e40e588f8e6510fa5537e0c9e083ceed5d07ad,http://pdfs.semanticscholar.org/e9e4/0e588f8e6510fa5537e0c9e083ceed5d07ad.pdf
+245f8ec4373e0a6c1cae36cd6fed5a2babed1386,http://pdfs.semanticscholar.org/245f/8ec4373e0a6c1cae36cd6fed5a2babed1386.pdf
+0b78fd881d0f402fd9b773249af65819e48ad36d,http://mirlab.org/conference_papers/International_Conference/ISCSLP%202008/pdfs/281.pdf
+7e0c75ce731131e613544e1a85ae0f2c28ee4c1f,http://pdfs.semanticscholar.org/7e0c/75ce731131e613544e1a85ae0f2c28ee4c1f.pdf
+b3330adb131fb4b6ebbfacce56f1aec2a61e0869,http://pdfs.semanticscholar.org/b333/0adb131fb4b6ebbfacce56f1aec2a61e0869.pdf
+e42998bbebddeeb4b2bedf5da23fa5c4efc976fa,http://pdfs.semanticscholar.org/e429/98bbebddeeb4b2bedf5da23fa5c4efc976fa.pdf
+936c7406de1dfdd22493785fc5d1e5614c6c2882,http://pdfs.semanticscholar.org/9d5e/1395e1ace37d9d5b7ce6854d518e7f128e79.pdf
+dfabe7ef245ca68185f4fcc96a08602ee1afb3f7,http://pdfs.semanticscholar.org/dfab/e7ef245ca68185f4fcc96a08602ee1afb3f7.pdf
+60bffecd79193d05742e5ab8550a5f89accd8488,http://pdfs.semanticscholar.org/60bf/fecd79193d05742e5ab8550a5f89accd8488.pdf
+1888bf50fd140767352158c0ad5748b501563833,http://pdfs.semanticscholar.org/1888/bf50fd140767352158c0ad5748b501563833.pdf
+0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf
+071099a4c3eed464388c8d1bff7b0538c7322422,http://arxiv.org/pdf/1601.02487v1.pdf
+e4391993f5270bdbc621b8d01702f626fba36fc2,http://pdfs.semanticscholar.org/e439/1993f5270bdbc621b8d01702f626fba36fc2.pdf
+3046baea53360a8c5653f09f0a31581da384202e,http://pdfs.semanticscholar.org/3046/baea53360a8c5653f09f0a31581da384202e.pdf
+500fbe18afd44312738cab91b4689c12b4e0eeee,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf
+5121f42de7cb9e41f93646e087df82b573b23311,http://pdfs.semanticscholar.org/5121/f42de7cb9e41f93646e087df82b573b23311.pdf
+9d357bbf014289fb5f64183c32aa64dc0bd9f454,http://pdfs.semanticscholar.org/9d35/7bbf014289fb5f64183c32aa64dc0bd9f454.pdf
+157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6,http://www.merl.com/publications/docs/TR2012-043.pdf
+3dbfd2fdbd28e4518e2ae05de8374057307e97b3,http://pdfs.semanticscholar.org/3dbf/d2fdbd28e4518e2ae05de8374057307e97b3.pdf
+5be3cc1650c918da1c38690812f74573e66b1d32,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sandeep_Relative_Parts_Distinctive_2014_CVPR_paper.pdf
+82d2af2ffa106160a183371946e466021876870d,http://pdfs.semanticscholar.org/82d2/af2ffa106160a183371946e466021876870d.pdf
+3167f415a861f19747ab5e749e78000179d685bc,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICCV_2009/contents/pdf/iccv2009_131.pdf
+04661729f0ff6afe4b4d6223f18d0da1d479accf,https://arxiv.org/pdf/1509.06451v1.pdf
+3f22a4383c55ceaafe7d3cfed1b9ef910559d639,http://pdfs.semanticscholar.org/3f22/a4383c55ceaafe7d3cfed1b9ef910559d639.pdf
+e35b09879a7df814b2be14d9102c4508e4db458b,http://pdfs.semanticscholar.org/e35b/09879a7df814b2be14d9102c4508e4db458b.pdf
+514a74aefb0b6a71933013155bcde7308cad2b46,http://pdfs.semanticscholar.org/514a/74aefb0b6a71933013155bcde7308cad2b46.pdf
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,https://graphics.stanford.edu/papers/ib-relighting/ib-relighting.pdf
+dfb6aa168177d4685420fcb184def0aa7db7cddb,http://pdfs.semanticscholar.org/dfb6/aa168177d4685420fcb184def0aa7db7cddb.pdf
+e4bf70e818e507b54f7d94856fecc42cc9e0f73d,http://pdfs.semanticscholar.org/e4bf/70e818e507b54f7d94856fecc42cc9e0f73d.pdf
+a6d621a5aae983a6996849db5e6bc63fe0a234af,http://mplab.ucsd.edu/~ksikka/pain_icmi14.pdf
+c8292aa152a962763185e12fd7391a1d6df60d07,http://pdfs.semanticscholar.org/c829/2aa152a962763185e12fd7391a1d6df60d07.pdf
+350da18d8f7455b0e2920bc4ac228764f8fac292,http://pdfs.semanticscholar.org/b1b1/19c94c8bf94da5c9974db537e356e4f80c67.pdf
+28de411a5b3eb8411e7bcb0003c426aa91f33e97,http://pdfs.semanticscholar.org/28de/411a5b3eb8411e7bcb0003c426aa91f33e97.pdf
+39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc,http://openaccess.thecvf.com/content_iccv_2015/papers/Lu_Simultaneous_Local_Binary_ICCV_2015_paper.pdf
+6a3a07deadcaaab42a0689fbe5879b5dfc3ede52,http://pdfs.semanticscholar.org/6a3a/07deadcaaab42a0689fbe5879b5dfc3ede52.pdf
+af278274e4bda66f38fd296cfa5c07804fbc26ee,http://pdfs.semanticscholar.org/af27/8274e4bda66f38fd296cfa5c07804fbc26ee.pdf
+14761b89152aa1fc280a33ea4d77b723df4e3864,http://pdfs.semanticscholar.org/1476/1b89152aa1fc280a33ea4d77b723df4e3864.pdf
+a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7,http://pdfs.semanticscholar.org/a3a6/a6a2eb1d32b4dead9e702824375ee76e3ce7.pdf
+1ea8085fe1c79d12adffb02bd157b54d799568e4,http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf
+28f311b16e4fe4cc0ff6560aae3bbd0cb6782966,http://pdfs.semanticscholar.org/4d59/7318188a9c7f7a78dadbe5b8f8385c1e1356.pdf
+7405ed035d1a4b9787b78e5566340a98fe4b63a0,http://pdfs.semanticscholar.org/7405/ed035d1a4b9787b78e5566340a98fe4b63a0.pdf
+4d530a4629671939d9ded1f294b0183b56a513ef,http://pdfs.semanticscholar.org/4d53/0a4629671939d9ded1f294b0183b56a513ef.pdf
+907475a4febf3f1d4089a3e775ea018fbec895fe,http://pdfs.semanticscholar.org/9074/75a4febf3f1d4089a3e775ea018fbec895fe.pdf
+e0b71d3c7d551684bd334af5b3671df7053a529d,http://mplab.ucsd.edu/~jake/locality.pdf
+5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0,http://pdfs.semanticscholar.org/5a8c/a0cfad32f04449099e2e3f3e3a1c8f6541c0.pdf
+1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4,http://www4.comp.polyu.edu.hk/~cslzhang/paper/MBC_TIFS_final.pdf
+1fd6004345245daf101c98935387e6ef651cbb55,http://pdfs.semanticscholar.org/1fd6/004345245daf101c98935387e6ef651cbb55.pdf
+c28461e266fe0f03c0f9a9525a266aa3050229f0,http://pdfs.semanticscholar.org/c284/61e266fe0f03c0f9a9525a266aa3050229f0.pdf
+4c87aafa779747828054cffee3125fcea332364d,http://pdfs.semanticscholar.org/4c87/aafa779747828054cffee3125fcea332364d.pdf
+e0e4910d575c4a8309f2069b38b99c972dbedc57,http://eprints.pascal-network.org/archive/00009548/01/PoseDetectRandomizedCascades.pdf
+63d8d69e90e79806a062cb8654ad78327c8957bb,http://pdfs.semanticscholar.org/63d8/d69e90e79806a062cb8654ad78327c8957bb.pdf
+ea218cebea2228b360680cb85ca133e8c2972e56,http://pdfs.semanticscholar.org/ea21/8cebea2228b360680cb85ca133e8c2972e56.pdf
+8862a573a42bbaedd392e9e634c1ccbfd177a01d,https://arxiv.org/pdf/1605.06764v1.pdf
+ceeb67bf53ffab1395c36f1141b516f893bada27,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf
+beab10d1bdb0c95b2f880a81a747f6dd17caa9c2,http://pdfs.semanticscholar.org/beab/10d1bdb0c95b2f880a81a747f6dd17caa9c2.pdf
+af0a8199328d4c806574866f419d1962def9305a,http://ttic.uchicago.edu/~smaji/papers/mr07mms.pdf
+ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18,http://ibug.doc.ic.ac.uk/media/uploads/documents/taud.pdf
+8dbe79830713925affc48d0afa04ed567c54724b,http://pdfs.semanticscholar.org/8dbe/79830713925affc48d0afa04ed567c54724b.pdf
+73fbdd57270b9f91f2e24989178e264f2d2eb7ae,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001945.pdf
+1e0add381031245b1d5129b482853ee738b498e1,http://eprints.pascal-network.org/archive/00001829/01/CVPR05_Romdhani.pdf
+981449cdd5b820268c0876477419cba50d5d1316,http://pdfs.semanticscholar.org/9814/49cdd5b820268c0876477419cba50d5d1316.pdf
+1f8304f4b51033d2671147b33bb4e51b9a1e16fe,http://pdfs.semanticscholar.org/1f83/04f4b51033d2671147b33bb4e51b9a1e16fe.pdf
+15f3d47b48a7bcbe877f596cb2cfa76e798c6452,http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf
+e0dedb6fc4d370f4399bf7d67e234dc44deb4333,http://pdfs.semanticscholar.org/e0de/db6fc4d370f4399bf7d67e234dc44deb4333.pdf
+8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958,http://pdfs.semanticscholar.org/bff6/c3acd48f34c671c48fae9b3fdf60f5d7b363.pdf
+0e21c9e5755c3dab6d8079d738d1188b03128a31,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wu_Constrained_Clustering_and_2013_CVPR_paper.pdf
+0faeec0d1c51623a511adb779dabb1e721a6309b,http://pdfs.semanticscholar.org/a075/782ea38167658fe28986755adddba7369b4f.pdf
+06850b60e33baa4ea9473811d58c0d5015da079e,http://pdfs.semanticscholar.org/4cff/901521af06d6a0c98c9dce253296dd88b496.pdf
+982f5c625d6ad0dac25d7acbce4dabfb35dd7f23,http://pdfs.semanticscholar.org/982f/5c625d6ad0dac25d7acbce4dabfb35dd7f23.pdf
+475e16577be1bfc0dd1f74f67bb651abd6d63524,http://pdfs.semanticscholar.org/475e/16577be1bfc0dd1f74f67bb651abd6d63524.pdf
+2450c618cca4cbd9b8cdbdb05bb57d67e63069b1,http://liris.cnrs.fr/Documents/Liris-6127.pdf
+4c1ce6bced30f5114f135cacf1a37b69bb709ea1,http://imag.pub.ro/common/staff/cflorea/papers/nlp_eye_MVA_site.pdf
+377a1be5113f38297716c4bb951ebef7a93f949a,http://www.cris.ucr.edu/IGERT/Presentation2013/CruzAbstract.pdf
+1d6068631a379adbcff5860ca2311b790df3a70f,http://pdfs.semanticscholar.org/c322/b1b998ec8f1892b29a1ebcbdc2f62e644cf1.pdf
+294d1fa4e1315e1cf7cc50be2370d24cc6363a41,http://pdfs.semanticscholar.org/294d/1fa4e1315e1cf7cc50be2370d24cc6363a41.pdf
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,http://pdfs.semanticscholar.org/c7c5/f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c.pdf
+77d31d2ec25df44781d999d6ff980183093fb3de,http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Littwin_The_Multiverse_Loss_2016_CVPR_supplemental.pdf
+582edc19f2b1ab2ac6883426f147196c8306685a,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf
+cfbb2d32586b58f5681e459afd236380acd86e28,http://www.professeurs.polymtl.ca/christopher.pal/2011/ROSE.v2.5.pdf
+ddf55fc9cf57dabf4eccbf9daab52108df5b69aa,http://pdfs.semanticscholar.org/ddf5/5fc9cf57dabf4eccbf9daab52108df5b69aa.pdf
+05e96d76ed4a044d8e54ef44dac004f796572f1a,http://www.cs.ucsb.edu/~mturk/595/papers/BRONSTEIN.pdf
+004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4,http://pdfs.semanticscholar.org/004a/1bb1a2c93b4f379468cca6b6cfc6d8746cc4.pdf
+00b08d22abc85361e1c781d969a1b09b97bc7010,http://www.umariqbal.info/uploads/1/4/8/3/14837880/visapp_2014.pdf
+03f98c175b4230960ac347b1100fbfc10c100d0c,http://courses.cs.washington.edu/courses/cse590v/13au/intraface.pdf
+b2e6944bebab8e018f71f802607e6e9164ad3537,http://pdfs.semanticscholar.org/b2e6/944bebab8e018f71f802607e6e9164ad3537.pdf
+044ba70e6744e80c6a09fa63ed6822ae241386f2,http://pdfs.semanticscholar.org/044b/a70e6744e80c6a09fa63ed6822ae241386f2.pdf
+beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2,http://pdfs.semanticscholar.org/beb3/fd2da7f8f3b0c3ebceaa2150a0e65736d1a2.pdf
+004e3292885463f97a70e1f511dc476289451ed5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Law_Quadruplet-Wise_Image_Similarity_2013_ICCV_paper.pdf
+046865a5f822346c77e2865668ec014ec3282033,http://www.csie.ntu.edu.tw/~winston/papers/chen12discovering.pdf
+140c95e53c619eac594d70f6369f518adfea12ef,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf
+2911e7f0fb6803851b0eddf8067a6fc06e8eadd6,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Jung_Joint_Fine-Tuning_in_ICCV_2015_paper.pdf
+362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002792.pdf
+6eb1e006b7758b636a569ca9e15aafd038d2c1b1,http://pdfs.semanticscholar.org/6eb1/e006b7758b636a569ca9e15aafd038d2c1b1.pdf
+8ea30ade85880b94b74b56a9bac013585cb4c34b,http://www.eurecom.fr/fr/publication/1392/download/mm-perrfl-040517.pdf
+82d79658805f6c1aedf7b0b88b47b9555584d7ae,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_IROS.pdf
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,http://pdfs.semanticscholar.org/a5c8/fc1ca4f06a344b53dc81ebc6d87f54896722.pdf
+51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6,http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf
+28be652db01273289499bc6e56379ca0237506c0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_018_ext.pdf
+575141e42740564f64d9be8ab88d495192f5b3bc,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf
+074af31bd9caa61fea3c4216731420bd7c08b96a,http://www.umiacs.umd.edu/~jhchoi/paper/cvprw2012_sfv.pdf
+3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2014/MM02014.pdf
+91811203c2511e919b047ebc86edad87d985a4fa,http://pdfs.semanticscholar.org/9181/1203c2511e919b047ebc86edad87d985a4fa.pdf
+bf8a520533f401347e2f55da17383a3e567ef6d8,http://pdfs.semanticscholar.org/bf8a/520533f401347e2f55da17383a3e567ef6d8.pdf
+e74816bc0803460e20edbd30a44ab857b06e288e,http://pdfs.semanticscholar.org/e748/16bc0803460e20edbd30a44ab857b06e288e.pdf
+487df616e981557c8e1201829a1d0ec1ecb7d275,http://www.citi.sinica.edu.tw/papers/yu.tsao/4293-F.pdf
+0ec1673609256b1e457f41ede5f21f05de0c054f,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d025.pdf
+247a6b0e97b9447850780fe8dbc4f94252251133,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Conf_Arman_CVPR2010.pdf
+1d5aad4f7fae6d414ffb212cec1f7ac876de48bf,http://biometrics.cse.msu.edu/Publications/Face/WangJain_FaceRetriever_ICB15.pdf
+66af2afd4c598c2841dbfd1053bf0c386579234e,http://www.ics.uci.edu/~dvk/pub/J17_IJMIR14_Liyan.pdf
+5517b28795d7a68777c9f3b2b46845dcdb425b2c,http://pdfs.semanticscholar.org/5517/b28795d7a68777c9f3b2b46845dcdb425b2c.pdf
+ff8315c1a0587563510195356c9153729b533c5b,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/Zapping%20IndexUsing%20Smile%20to%20MeasureAdvertisement14.pdf
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,http://pdfs.semanticscholar.org/b13b/f657ca6d34d0df90e7ae739c94a7efc30dc3.pdf
+2b7ef95822a4d577021df16607bf7b4a4514eb4b,http://pdfs.semanticscholar.org/b596/9178f843bfaecd0026d04c41e79bcb9edab5.pdf
+8f8a5be9dc16d73664285a29993af7dc6a598c83,http://pdfs.semanticscholar.org/8f8a/5be9dc16d73664285a29993af7dc6a598c83.pdf
+949699d0b865ef35b36f11564f9a4396f5c9cddb,http://pdfs.semanticscholar.org/9496/99d0b865ef35b36f11564f9a4396f5c9cddb.pdf
+2b1327a51412646fcf96aa16329f6f74b42aba89,http://pdfs.semanticscholar.org/8296/cb7fea317fcd0a7ff6b7e4486ab869a7231e.pdf
+6eece104e430829741677cadc1dfacd0e058d60f,http://pdfs.semanticscholar.org/7a42/6d0b98c8f52d61f9d89cd7be5ab6119f0a4a.pdf
+329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf
+9755554b13103df634f9b1ef50a147dd02eab02f,https://arxiv.org/pdf/1610.00134v1.pdf
+0a11b82aa207d43d1b4c0452007e9388a786be12,http://pdfs.semanticscholar.org/0a11/b82aa207d43d1b4c0452007e9388a786be12.pdf
+27f8b01e628f20ebfcb58d14ea40573d351bbaad,http://pdfs.semanticscholar.org/27f8/b01e628f20ebfcb58d14ea40573d351bbaad.pdf
+ee6b503ab512a293e3088fdd7a1c893a77902acb,http://pdfs.semanticscholar.org/ee6b/503ab512a293e3088fdd7a1c893a77902acb.pdf
+2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c,http://pdfs.semanticscholar.org/2ec7/d6a04c8c72cc194d7eab7456f73dfa501c8c.pdf
+add50a7d882eb38e35fe70d11cb40b1f0059c96f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_086_ext.pdf
+3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f,http://pdfs.semanticscholar.org/3d1a/6a5fd5915e0efb953ede5af0b23debd1fc7f.pdf
+4068574b8678a117d9a434360e9c12fe6232dae0,http://www.visionmeetscognition.org/fpic2014/Camera_Ready/Paper%2031.pdf
+f869601ae682e6116daebefb77d92e7c5dd2cb15,http://pdfs.semanticscholar.org/f869/601ae682e6116daebefb77d92e7c5dd2cb15.pdf
+76673de6d81bedd6b6be68953858c5f1aa467e61,http://pdfs.semanticscholar.org/8883/2abb9082af6a1395e1b9bd3d4c1b46d00616.pdf
+53cfe4817ac2eecbe4e286709a9140a5fe729b35,http://www.cv.iit.nrc.ca/VI/fpiv04/pdf/17fa.pdf
+243e9d490fe98d139003bb8dc95683b366866c57,http://pdfs.semanticscholar.org/243e/9d490fe98d139003bb8dc95683b366866c57.pdf
+6d4103762e159130b32335cbf8893ee4dca26859,http://homepage.tudelft.nl/19j49/Publications_files/cogn_proc.pdf
+c02847a04a99a5a6e784ab580907278ee3c12653,http://pdfs.semanticscholar.org/c028/47a04a99a5a6e784ab580907278ee3c12653.pdf
+05c91e8a29483ced50c5f2d869617b80f7dacdd9,http://www.cs.rochester.edu/~mehoque/Publications/2013/13.Hoque-etal-MACH-UbiComp.pdf
+b11bb6bd63ee6f246d278dd4edccfbe470263803,http://pdfs.semanticscholar.org/b11b/b6bd63ee6f246d278dd4edccfbe470263803.pdf
+58b0be2db0aeda2edb641273fe52946a24a714c3,http://www.cs.ucsb.edu/~daniel/publications/conferences/wacv09/VaqueroWACV09.pdf
+0f940d2cdfefc78c92ec6e533a6098985f47a377,https://www.ecse.rpi.edu/~cvrl/chenj/Expression_v6_submit.pdf
+baaaf73ec28226d60d923bc639f3c7d507345635,http://pdfs.semanticscholar.org/baaa/f73ec28226d60d923bc639f3c7d507345635.pdf
+198b6beb53e0e61357825d57938719f614685f75,http://pdfs.semanticscholar.org/198b/6beb53e0e61357825d57938719f614685f75.pdf
+c614450c9b1d89d5fda23a54dbf6a27a4b821ac0,http://pdfs.semanticscholar.org/c614/450c9b1d89d5fda23a54dbf6a27a4b821ac0.pdf
+e0c081a007435e0c64e208e9918ca727e2c1c44e,http://pdfs.semanticscholar.org/e0c0/81a007435e0c64e208e9918ca727e2c1c44e.pdf
+271e2856e332634eccc5e80ba6fa9bbccf61f1be,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/176.pdf
+507c9672e3673ed419075848b4b85899623ea4b0,http://pdfs.semanticscholar.org/507c/9672e3673ed419075848b4b85899623ea4b0.pdf
+057d5f66a873ec80f8ae2603f937b671030035e6,http://cs.stanford.edu/~roozbeh/papers/Mottaghi16cvpr_a.pdf
+883006c0f76cf348a5f8339bfcb649a3e46e2690,http://mplab.ucsd.edu/~marni/pubs/Sikka_FG2013.pdf
+751970d4fb6f61d1b94ca82682984fd03c74f127,http://pdfs.semanticscholar.org/7519/70d4fb6f61d1b94ca82682984fd03c74f127.pdf
+3d42e17266475e5d34a32103d879b13de2366561,http://pdfs.semanticscholar.org/7450/7306832bd71884365ed81e1cc7866e47c399.pdf
+0d3882b22da23497e5de8b7750b71f3a4b0aac6b,http://pdfs.semanticscholar.org/0d38/82b22da23497e5de8b7750b71f3a4b0aac6b.pdf
+fa08a4da5f2fa39632d90ce3a2e1688d147ece61,http://pdfs.semanticscholar.org/fa08/a4da5f2fa39632d90ce3a2e1688d147ece61.pdf
+286812ade95e6f1543193918e14ba84e5f8e852e,http://pdfs.semanticscholar.org/9b1d/a39168a7196c2f9c85e9b3d17debff04c988.pdf
+75d2ecbbcc934563dff6b39821605dc6f2d5ffcc,http://pdfs.semanticscholar.org/75d2/ecbbcc934563dff6b39821605dc6f2d5ffcc.pdf
+656a59954de3c9fcf82ffcef926af6ade2f3fdb5,http://pdfs.semanticscholar.org/656a/59954de3c9fcf82ffcef926af6ade2f3fdb5.pdf
+291f527598c589fb0519f890f1beb2749082ddfd,http://pdfs.semanticscholar.org/3215/ceb94227451a958bcf6b1205c710d17e53f5.pdf
+6f9824c5cb5ac08760b08e374031cbdabc953bae,https://eprints.soton.ac.uk/397973/1/PID4351119.pdf
+6ecd4025b7b5f4894c990614a9a65e3a1ac347b2,http://pdfs.semanticscholar.org/6ecd/4025b7b5f4894c990614a9a65e3a1ac347b2.pdf
+2dbde64ca75e7986a0fa6181b6940263bcd70684,http://www.micc.unifi.it/wp-content/uploads/2016/01/2014_pose_independent.pdf
+9f6d04ce617d24c8001a9a31f11a594bd6fe3510,http://pdfs.semanticscholar.org/9f6d/04ce617d24c8001a9a31f11a594bd6fe3510.pdf
+99ced8f36d66dce20d121f3a29f52d8b27a1da6c,http://pdfs.semanticscholar.org/99ce/d8f36d66dce20d121f3a29f52d8b27a1da6c.pdf
+ad8540379884ec03327076b562b63bc47e64a2c7,http://pdfs.semanticscholar.org/ad85/40379884ec03327076b562b63bc47e64a2c7.pdf
+044d9a8c61383312cdafbcc44b9d00d650b21c70,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf
+94e259345e82fa3015a381d6e91ec6cded3971b4,http://pdfs.semanticscholar.org/94e2/59345e82fa3015a381d6e91ec6cded3971b4.pdf
+b88ceded6467e9b286f048bb1b17be5998a077bd,http://pdfs.semanticscholar.org/b88c/eded6467e9b286f048bb1b17be5998a077bd.pdf
+7f268f29d2c8f58cea4946536f5e2325777fa8fa,http://pdfs.semanticscholar.org/7f26/8f29d2c8f58cea4946536f5e2325777fa8fa.pdf
+7ca337735ec4c99284e7c98f8d61fb901dbc9015,http://vision.psych.umn.edu/users/schrater/Papers/Veeretal05.pdf
+4015e8195db6edb0ef8520709ca9cb2c46f29be7,http://pdfs.semanticscholar.org/4015/e8195db6edb0ef8520709ca9cb2c46f29be7.pdf
+c180f22a9af4a2f47a917fd8f15121412f2d0901,http://pdfs.semanticscholar.org/c180/f22a9af4a2f47a917fd8f15121412f2d0901.pdf
+29908288392a9326d7a2996c6cd6b3e6cb137265,http://people.cs.ubc.ca/~pcarbo/ijcvss.pdf
+c8829013bbfb19ccb731bd54c1a885c245b6c7d7,http://pdfs.semanticscholar.org/c882/9013bbfb19ccb731bd54c1a885c245b6c7d7.pdf
+416b559402d0f3e2b785074fcee989d44d82b8e5,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cai_Multi-View_Super_Vector_2014_CVPR_paper.pdf
+153c8715f491272b06dc93add038fae62846f498,http://pdfs.semanticscholar.org/153c/8715f491272b06dc93add038fae62846f498.pdf
+f6ca29516cce3fa346673a2aec550d8e671929a6,http://pdfs.semanticscholar.org/f6ca/29516cce3fa346673a2aec550d8e671929a6.pdf
+153e5cddb79ac31154737b3e025b4fb639b3c9e7,http://pdfs.semanticscholar.org/d9f5/9178ef2d91c98e0f3108fe273cdc6c6590f4.pdf
+33402ee078a61c7d019b1543bb11cc127c2462d2,http://users.cecs.anu.edu.au/~sgould/papers/cvpr17-ooo.pdf
+40a74eea514b389b480d6fe8b359cb6ad31b644a,http://pdfs.semanticscholar.org/7ac4/2be6c1f01ccc42b28c0bfa77856cc75b65a2.pdf
+c2e03efd8c5217188ab685e73cc2e52c54835d1a,http://web.eecs.utk.edu/~ataalimi/wp-content/uploads/2016/09/Deep-Tree-structured-Face-A-Unified-Representation-for-Multi-task-Facial.pdf
+05ad478ca69b935c1bba755ac1a2a90be6679129,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Turakhia_Attribute_Dominance_What_2013_ICCV_paper.pdf
+f86ddd6561f522d115614c93520faad122eb3b56,http://pdfs.semanticscholar.org/f86d/dd6561f522d115614c93520faad122eb3b56.pdf
+cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2,https://www.computer.org/csdl/trans/ta/2017/03/07420600.pdf
+f909d04c809013b930bafca12c0f9a8192df9d92,http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf
+36c2db5ff76864d289781f93cbb3e6351f11984c,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569187194.pdf
+4bd088ba3f42aa1e43ae33b1988264465a643a1f,http://pdfs.semanticscholar.org/4bd0/88ba3f42aa1e43ae33b1988264465a643a1f.pdf
+a15d9d2ed035f21e13b688a78412cb7b5a04c469,http://pdfs.semanticscholar.org/a15d/9d2ed035f21e13b688a78412cb7b5a04c469.pdf
+43ae4867d058453e9abce760ff0f9427789bab3a,https://infoscience.epfl.ch/record/207780/files/tnnls_graph_embedding.pdf
+cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Functional_Faces_Groupwise_CVPR_2016_paper.pdf
+62374b9e0e814e672db75c2c00f0023f58ef442c,http://pdfs.semanticscholar.org/6237/4b9e0e814e672db75c2c00f0023f58ef442c.pdf
+0b2966101fa617b90510e145ed52226e79351072,http://www.cs.umanitoba.ca/~ywang/papers/icpr16_videotext.pdf
+22df6b6c87d26f51c0ccf3d4dddad07ce839deb0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yu_Fast_Action_Proposals_2015_CVPR_paper.pdf
+e48e94959c4ce799fc61f3f4aa8a209c00be8d7f,http://pdfs.semanticscholar.org/e48e/94959c4ce799fc61f3f4aa8a209c00be8d7f.pdf
+22ec256400e53cee35f999244fb9ba6ba11c1d06,http://pdfs.semanticscholar.org/2dbd/f0093228eee11ce9ef17365055dada756413.pdf
+eb716dd3dbd0f04e6d89f1703b9975cad62ffb09,http://pdfs.semanticscholar.org/eb71/6dd3dbd0f04e6d89f1703b9975cad62ffb09.pdf
+01c7a778cde86ad1b89909ea809d55230e569390,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Siyahjani_A_Supervised_Low-Rank_ICCV_2015_paper.pdf
+46e86cdb674440f61b6658ef3e84fea95ea51fb4,http://pdfs.semanticscholar.org/c075/e79a832d36e5b4c76b0f07c3b9d5f3be43e0.pdf
+676a136f5978783f75b5edbb38e8bb588e8efbbe,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_084_ext.pdf
+2c203050a6cca0a0bff80e574bda16a8c46fe9c2,http://pdfs.semanticscholar.org/608f/43ee003c7c2e7f170336fda7a00cccd06311.pdf
+80840df0802399838fe5725cce829e1b417d7a2e,http://pdfs.semanticscholar.org/8084/0df0802399838fe5725cce829e1b417d7a2e.pdf
+57911d7f347dde0398f964e0c7ed8fdd0a882449,http://amp.ece.cmu.edu/people/Andy/Andy_files/1424CVPR08Gallagher.pdf
+01b4b32c5ef945426b0396d32d2a12c69c282e29,http://pdfs.semanticscholar.org/1510/bfa3a31ccf47e0241d3528aeda4871597a0f.pdf
+a1e97c4043d5cc9896dc60ae7ca135782d89e5fc,http://pdfs.semanticscholar.org/a1e9/7c4043d5cc9896dc60ae7ca135782d89e5fc.pdf
+1d19c6857e798943cd0ecd110a7a0d514c671fec,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w2/papers/Khorrami_Do_Deep_Neural_ICCV_2015_paper.pdf
+301b0da87027d6472b98361729faecf6e1d5e5f6,http://pdfs.semanticscholar.org/301b/0da87027d6472b98361729faecf6e1d5e5f6.pdf
+641f34deb3bdd123c6b6e7b917519c3e56010cb7,https://pdfs.semanticscholar.org/878d/68c5d016a0a63f328d72adda6b135432b66d.pdf
+10e70a34d56258d10f468f8252a7762950830d2b,http://intechweb.org/downloadpdf.php?id=5889
+26437fb289cd7caeb3834361f0cc933a02267766,http://pdfs.semanticscholar.org/2643/7fb289cd7caeb3834361f0cc933a02267766.pdf
+50d961508ec192197f78b898ff5d44dc004ef26d,http://pdfs.semanticscholar.org/50d9/61508ec192197f78b898ff5d44dc004ef26d.pdf
+8d4f0517eae232913bf27f516101a75da3249d15,http://pdfs.semanticscholar.org/8d4f/0517eae232913bf27f516101a75da3249d15.pdf
+87e5b4d95f95a0975e855cf5ad402db7a3c64ff5,http://www.researchgate.net/profile/Paul_Bodesheim/publication/269314560_Local_Novelty_Detection_in_Multi-class_Recognition_Problems/links/5486c2420cf289302e2c35eb.pdf
+48fea82b247641c79e1994f4ac24cad6b6275972,http://wan.poly.edu/KDD2012/docs/p1469.pdf
+14e8dbc0db89ef722c3c198ae19bde58138e88bf,http://ascl.cis.fiu.edu/uploads/1/3/4/2/13423859/amini-lisetti-acii-2013-final.pdf
+140438a77a771a8fb656b39a78ff488066eb6b50,http://homes.cs.washington.edu/~neeraj/base/publications/base/papers/nk_cvpr2011_faceparts.pdf
+0badf61e8d3b26a0d8b60fe94ba5c606718daf0b,http://pdfs.semanticscholar.org/0bad/f61e8d3b26a0d8b60fe94ba5c606718daf0b.pdf
+9ac15845defcd0d6b611ecd609c740d41f0c341d,http://pdfs.semanticscholar.org/9ac1/5845defcd0d6b611ecd609c740d41f0c341d.pdf
+2cdde47c27a8ecd391cbb6b2dea64b73282c7491,http://pdfs.semanticscholar.org/2cdd/e47c27a8ecd391cbb6b2dea64b73282c7491.pdf
+34108098e1a378bc15a5824812bdf2229b938678,http://pdfs.semanticscholar.org/3410/8098e1a378bc15a5824812bdf2229b938678.pdf
+1eb4ea011a3122dc7ef3447e10c1dad5b69b0642,http://pdfs.semanticscholar.org/1eb4/ea011a3122dc7ef3447e10c1dad5b69b0642.pdf
+3399f8f0dff8fcf001b711174d29c9d4fde89379,http://pdfs.semanticscholar.org/3399/f8f0dff8fcf001b711174d29c9d4fde89379.pdf
+6f08885b980049be95a991f6213ee49bbf05c48d,http://pdfs.semanticscholar.org/6f08/885b980049be95a991f6213ee49bbf05c48d.pdf
+02bd665196bd50c4ecf05d6852a4b9ba027cd9d0,http://arxiv.org/pdf/1310.2880v6.pdf
+15e27f968458bf99dd34e402b900ac7b34b1d575,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p8362-mahanta.pdf
+238fc68b2e0ef9f5ec043d081451902573992a03,http://www.cbsr.ia.ac.cn/users/zlei/papers/ChuanxianRen-ELGOF-TCYB.pdf
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,http://pdfs.semanticscholar.org/7d7b/e6172fc2884e1da22d1e96d5899a29831ad2.pdf
+43a03cbe8b704f31046a5aba05153eb3d6de4142,http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf
+17cf838720f7892dbe567129dcf3f7a982e0b56e,http://pdfs.semanticscholar.org/6e0a/a9926e484e08b31fdeb85b73d1ae65ba47d6.pdf
+0fd1715da386d454b3d6571cf6d06477479f54fc,http://pdfs.semanticscholar.org/0fd1/715da386d454b3d6571cf6d06477479f54fc.pdf
+9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493,http://www.ifp.illinois.edu/~jyang29/papers/JRR_ICCV11.pdf
+e6ee36444038de5885473693fb206f49c1369138,http://pdfs.semanticscholar.org/e6ee/36444038de5885473693fb206f49c1369138.pdf
+33a1a049d15e22befc7ddefdd3ae719ced8394bf,http://pdfs.semanticscholar.org/33a1/a049d15e22befc7ddefdd3ae719ced8394bf.pdf
+cef841f27535c0865278ee9a4bc8ee113b4fb9f3,http://pdfs.semanticscholar.org/cef8/41f27535c0865278ee9a4bc8ee113b4fb9f3.pdf
+0394040749195937e535af4dda134206aa830258,http://web.eecs.umich.edu/~hero/Preprints/sp_mlsi_submitted_revised2.pdf
+10ce3a4724557d47df8f768670bfdd5cd5738f95,http://pdfs.semanticscholar.org/10ce/3a4724557d47df8f768670bfdd5cd5738f95.pdf
+31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a,http://pdfs.semanticscholar.org/31aa/20911cc7a2b556e7d273f0bdd5a2f0671e0a.pdf
+63d8110ac76f57b3ba8a5947bc6bdbb86f25a342,http://pdfs.semanticscholar.org/63d8/110ac76f57b3ba8a5947bc6bdbb86f25a342.pdf
+0641dbee7202d07b6c78a39eecd312c17607412e,http://users.cecs.anu.edu.au/~hongdong/JiZhongLiSalzmannICIP14.pdf
+43476cbf2a109f8381b398e7a1ddd794b29a9a16,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cao_A_Practical_Transfer_2013_ICCV_paper.pdf
+9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1,http://pdfs.semanticscholar.org/9cfb/3a68fb10a59ec2a6de1b24799bf9154a8fd1.pdf
+13bda03fc8984d5943ed8d02e49a779d27c84114,http://www-ljk.imag.fr/Publications/Basilic/com.lmc.publi.PUBLI_Inproceedings@13730f58c78_1669a2e/cevikalp-cvpr12.pdf
+6e60536c847ac25dba4c1c071e0355e5537fe061,http://www.cfar.umd.edu/~fer/postscript/CV_and_NLP.pdf
+9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a,http://arxiv.org/pdf/1602.01940v1.pdf
+8518b501425f2975ea6dcbf1e693d41e73d0b0af,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Relative_Hidden_Markov_2013_CVPR_paper.pdf
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,http://pdfs.semanticscholar.org/bce2/02717ce134b317b39f0a18151659d643875b.pdf
+47a003e6bbfc5bf04a099ca53c67ddfdbea71315,http://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf
+741485741734a99e933dd0302f457158c6842adf,http://pdfs.semanticscholar.org/7414/85741734a99e933dd0302f457158c6842adf.pdf
+e78394213ae07b682ce40dc600352f674aa4cb05,http://pdfs.semanticscholar.org/e783/94213ae07b682ce40dc600352f674aa4cb05.pdf
+a3dc109b1dff3846f5a2cc1fe2448230a76ad83f,http://pdfs.semanticscholar.org/a3dc/109b1dff3846f5a2cc1fe2448230a76ad83f.pdf
+9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e,http://pdfs.semanticscholar.org/9a1a/9dd3c471bba17e5ce80a53e52fcaaad4373e.pdf
+106732a010b1baf13c61d0994552aee8336f8c85,http://arxiv.org/pdf/1509.04186v2.pdf
+31b05f65405534a696a847dd19c621b7b8588263,https://arxiv.org/pdf/1611.01484v1.pdf
+2251a88fbccb0228d6d846b60ac3eeabe468e0f1,http://pdfs.semanticscholar.org/2251/a88fbccb0228d6d846b60ac3eeabe468e0f1.pdf
+3fb3c7dd12561e9443ac301f5527d539b1f4574e,http://www.research.rutgers.edu/~shaoting/paper/ICCV13.pdf
+02fda07735bdf84554c193811ba4267c24fe2e4a,http://www.cbsr.ia.ac.cn/Li%20Group/papers/Li-IR-Face-PAMI-07.pdf
+2cf5f2091f9c2d9ab97086756c47cd11522a6ef3,http://pdfs.semanticscholar.org/2cf5/f2091f9c2d9ab97086756c47cd11522a6ef3.pdf
+8e94ed0d7606408a0833e69c3185d6dcbe22bbbe,http://www.wjscheirer.com/papers/wjs_wacv2012_eyes.pdf
+0b3786a3a0ea7ec08f01636124c183dbee8f625f,http://www.cs.uiuc.edu/homes/dhoiem/publications/pami2012_FlickrSimilaritiesSIKMA_Gang.pdf
+27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba,http://pdfs.semanticscholar.org/27b1/670e1b91ab983b7b1ecfe9eb5e6ba951e0ba.pdf
+66e9fb4c2860eb4a15f713096020962553696e12,http://pdfs.semanticscholar.org/d42f/8e7283b20b89f55f8d36efcb1d8e2b774167.pdf
+ffc9d6a5f353e5aec3116a10cf685294979c63d9,http://pdfs.semanticscholar.org/ffc9/d6a5f353e5aec3116a10cf685294979c63d9.pdf
+0b174d4a67805b8796bfe86cd69a967d357ba9b6,http://pdfs.semanticscholar.org/0b17/4d4a67805b8796bfe86cd69a967d357ba9b6.pdf
+03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20,https://ias.in.tum.de/_media/spezial/bib/mayer08arealtime.pdf
+086131159999d79adf6b31c1e604b18809e70ba8,http://vinereactor.org/icpr2016.pdf
+58628e64e61bd2776a2a7258012eabe3c79ca90c,http://pdfs.semanticscholar.org/5862/8e64e61bd2776a2a7258012eabe3c79ca90c.pdf
+539ae0920815eb248939165dd5d1b0188ff7dca2,http://www.ele.puc-rio.br/~visao/Topicos/Prince%20and%20Helder%202007%20Probabilistic%20linear%20discriminant%20analysis.pdf
+718d3137adba9e3078fa1f698020b666449f3336,http://pdfs.semanticscholar.org/718d/3137adba9e3078fa1f698020b666449f3336.pdf
+71b07c537a9e188b850192131bfe31ef206a39a0,http://pdfs.semanticscholar.org/71b0/7c537a9e188b850192131bfe31ef206a39a0.pdf
+ccdea57234d38c7831f1e9231efcb6352c801c55,http://pdfs.semanticscholar.org/ccde/a57234d38c7831f1e9231efcb6352c801c55.pdf
+67b79c2336b9a2efbfc805b9a6912a0959e392a9,https://www.researchgate.net/profile/Engin_Erzin/publication/220716898_RANSAC-Based_Training_Data_Selection_on_Spectral_Features_for_Emotion_Recognition_from_Spontaneous_Speech/links/0912f5089705e67f21000000.pdf
+146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,https://arxiv.org/pdf/1611.06158v1.pdf
+4b4ecc1cb7f048235605975ab37bb694d69f63e5,http://pdfs.semanticscholar.org/4b4e/cc1cb7f048235605975ab37bb694d69f63e5.pdf
+fde0180735699ea31f6c001c71eae507848b190f,http://pdfs.semanticscholar.org/fde0/180735699ea31f6c001c71eae507848b190f.pdf
+f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf
+1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d,http://www.dabi.temple.edu/~hbling/publication/oria-12-final.pdf
+08cb294a08365e36dd7ed4167b1fd04f847651a9,http://pdfs.semanticscholar.org/f75f/56bb1dcf721449f2fcc3634265f1e08e012c.pdf
+ee461d060da58d6053d2f4988b54eff8655ecede,http://pdfs.semanticscholar.org/ee46/1d060da58d6053d2f4988b54eff8655ecede.pdf
+5b6f0a508c1f4097dd8dced751df46230450b01a,http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.pdf
+20a432a065a06f088d96965f43d0055675f0a6c1,http://pdfs.semanticscholar.org/20a4/32a065a06f088d96965f43d0055675f0a6c1.pdf
+3d0c21d4780489bd624a74b07e28c16175df6355,http://pdfs.semanticscholar.org/3d0c/21d4780489bd624a74b07e28c16175df6355.pdf
+20b437dc4fc44c17f131713ffcbb4a8bd672ef00,http://pdfs.semanticscholar.org/20b4/37dc4fc44c17f131713ffcbb4a8bd672ef00.pdf
+ee92d36d72075048a7c8b2af5cc1720c7bace6dd,http://pdfs.semanticscholar.org/ee92/d36d72075048a7c8b2af5cc1720c7bace6dd.pdf
+ca606186715e84d270fc9052af8500fe23befbda,http://www.amirtahmasbi.com/publications_repository/SDA_ICSPS2010.pdf
+aa127e6b2dc0aaccfb85e93e8b557f83ebee816b,http://pdfs.semanticscholar.org/aa12/7e6b2dc0aaccfb85e93e8b557f83ebee816b.pdf
+18a849b1f336e3c3b7c0ee311c9ccde582d7214f,http://pdfs.semanticscholar.org/18a8/49b1f336e3c3b7c0ee311c9ccde582d7214f.pdf
+227b18fab568472bf14f9665cedfb95ed33e5fce,https://arxiv.org/pdf/1308.0271v2.pdf
+b5930275813a7e7a1510035a58dd7ba7612943bc,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf
+7d94fd5b0ca25dd23b2e36a2efee93244648a27b,http://pdfs.semanticscholar.org/7d94/fd5b0ca25dd23b2e36a2efee93244648a27b.pdf
+a35dd69d63bac6f3296e0f1d148708cfa4ba80f6,http://pdfs.semanticscholar.org/a35d/d69d63bac6f3296e0f1d148708cfa4ba80f6.pdf
+26ad6ceb07a1dc265d405e47a36570cb69b2ace6,http://pdfs.semanticscholar.org/26ad/6ceb07a1dc265d405e47a36570cb69b2ace6.pdf
+55b4b1168c734eeb42882082bd131206dbfedd5b,http://pdfs.semanticscholar.org/76fd/f16bcc2cb260b9e6b2880c8fe128533bc2c6.pdf
+956e9b69b3366ed3e1670609b53ba4a7088b8b7e,http://pdfs.semanticscholar.org/956e/9b69b3366ed3e1670609b53ba4a7088b8b7e.pdf
+f7de943aa75406fe5568fdbb08133ce0f9a765d4,http://pdfs.semanticscholar.org/f7de/943aa75406fe5568fdbb08133ce0f9a765d4.pdf
+1369e9f174760ea592a94177dbcab9ed29be1649,http://geza.kzoo.edu/~erdi/IJCNN2013/HTMLFiles/PDFs/P393-1401.pdf
+beb4546ae95f79235c5f3c0e9cc301b5d6fc9374,http://pdfs.semanticscholar.org/beb4/546ae95f79235c5f3c0e9cc301b5d6fc9374.pdf
+4c6e1840451e1f86af3ef1cb551259cb259493ba,http://pdfs.semanticscholar.org/4c6e/1840451e1f86af3ef1cb551259cb259493ba.pdf
+fdb33141005ca1b208a725796732ab10a9c37d75,http://pdfs.semanticscholar.org/fdb3/3141005ca1b208a725796732ab10a9c37d75.pdf
+5d01283474b73a46d80745ad0cc0c4da14aae194,http://pdfs.semanticscholar.org/5d01/283474b73a46d80745ad0cc0c4da14aae194.pdf
+1c17450c4d616e1e1eece248c42eba4f87de9e0d,http://pdfs.semanticscholar.org/d269/39a00a8d3964de612cd3faa86764343d5622.pdf
+cebfafea92ed51b74a8d27c730efdacd65572c40,http://biometrics.cse.msu.edu/Publications/Face/LuJainColbry_Matching2.5DFaceScans_PAMI06.pdf
+937ffb1c303e0595317873eda5ce85b1a17f9943,https://ivi.fnwi.uva.nl/isis/publications/2010/DibekliogluICM2010/DibekliogluICM2010.pdf
+809ea255d144cff780300440d0f22c96e98abd53,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf
+8f3e120b030e6c1d035cb7bd9c22f6cc75782025,http://pdfs.semanticscholar.org/8f3e/120b030e6c1d035cb7bd9c22f6cc75782025.pdf
+367a786cfe930455cd3f6bd2492c304d38f6f488,http://pdfs.semanticscholar.org/367a/786cfe930455cd3f6bd2492c304d38f6f488.pdf
+0e7f277538142fb50ce2dd9179cffdc36b794054,http://nb.vse.cz/~svatek/mdm08.pdf
+581e920ddb6ecfc2a313a3aa6fed3d933b917ab0,http://pdfs.semanticscholar.org/581e/920ddb6ecfc2a313a3aa6fed3d933b917ab0.pdf
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,http://pdfs.semanticscholar.org/d2cd/9a7f19600370bce3ea29aba97d949fe0ceb9.pdf
+404042a1dcfde338cf24bc2742c57c0fb1f48359,http://pdfs.semanticscholar.org/4040/42a1dcfde338cf24bc2742c57c0fb1f48359.pdf
+b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24,http://grid.hust.edu.cn/xbliu/papers/ICDM09.pdf
+79617903c5cb56697f2e738e1463b9654e2d68ed,http://hal.cse.msu.edu/pdfs/papers/2013-mmcf-tip.pdf
+b03d6e268cde7380e090ddaea889c75f64560891,http://pdfs.semanticscholar.org/b03d/6e268cde7380e090ddaea889c75f64560891.pdf
+19f076998ba757602c8fec04ce6a4ca674de0e25,http://pdfs.semanticscholar.org/19f0/76998ba757602c8fec04ce6a4ca674de0e25.pdf
+d73d2c9a6cef79052f9236e825058d5d9cdc1321,http://pdfs.semanticscholar.org/d73d/2c9a6cef79052f9236e825058d5d9cdc1321.pdf
+401e6b9ada571603b67377b336786801f5b54eee,http://pdfs.semanticscholar.org/401e/6b9ada571603b67377b336786801f5b54eee.pdf
+86b51bd0c80eecd6acce9fc538f284b2ded5bcdd,http://pdfs.semanticscholar.org/86b5/1bd0c80eecd6acce9fc538f284b2ded5bcdd.pdf
+05f4d907ee2102d4c63a3dc337db7244c570d067,http://pdfs.semanticscholar.org/3c52/2c9707eb795e0dba69202f1ec946a9072661.pdf
+3dabf7d853769cfc4986aec443cc8b6699136ed0,http://pdfs.semanticscholar.org/3dab/f7d853769cfc4986aec443cc8b6699136ed0.pdf
+6e1802874ead801a7e1072aa870681aa2f555f35,http://www.cs.yale.edu/homes/hw5/WebContent/ICASSP07_Yan.pdf
+3a591a9b5c6d4c62963d7374d58c1ae79e3a4039,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W04/papers/Artan_Driver_Cell_Phone_2014_CVPR_paper.pdf
+261c3e30bae8b8bdc83541ffa9331b52fcf015e6,http://pdfs.semanticscholar.org/a751/04bc7dbaaf549d89f163560525031b49df38.pdf
+469ee1b00f7bbfe17c698ccded6f48be398f2a44,http://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf
+1fe59275142844ce3ade9e2aed900378dd025880,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Xiao_Facial_Landmark_Detection_ICCV_2015_paper.pdf
+5ea165d2bbd305dc125415487ef061bce75dac7d,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ICME2009-human-act-apd-final.pdf
+539287d8967cdeb3ef60d60157ee93e8724efcac,http://pdfs.semanticscholar.org/e5ae/05a05eefbf416eb2e13ec080f1a166dde735.pdf
+4439746eeb7c7328beba3f3ef47dc67fbb52bcb3,http://pdfs.semanticscholar.org/4439/746eeb7c7328beba3f3ef47dc67fbb52bcb3.pdf
+c82c147c4f13e79ad49ef7456473d86881428b89,http://pdfs.semanticscholar.org/c82c/147c4f13e79ad49ef7456473d86881428b89.pdf
+07da958db2e561cc7c24e334b543d49084dd1809,https://infoscience.epfl.ch/record/117525/files/Classification.pdf?version=1
+bd6099429bb7bf248b1fd6a1739e744512660d55,http://pdfs.semanticscholar.org/bd60/99429bb7bf248b1fd6a1739e744512660d55.pdf
+17fad2cc826d2223e882c9fda0715fcd5475acf3,http://pdfs.semanticscholar.org/8f64/def1fe17e2711405d66898a578e3b20da29e.pdf
+7d50df03d0c8a26eaaeaef47de68691f9ac73701,http://media-lab.engr.ccny.cuny.edu/Paper/2011/HCBA11.pdf
+46072f872eee3413f9d05482be6446f6b96b6c09,http://pdfs.semanticscholar.org/4607/2f872eee3413f9d05482be6446f6b96b6c09.pdf
+176bd61cc843d0ed6aa5af83c22e3feb13b89fe1,http://pdfs.semanticscholar.org/648b/f64ff77aeccf761b83dd85143a6eb832b258.pdf
+ee418372b0038bd3b8ae82bd1518d5c01a33a7ec,http://pdfs.semanticscholar.org/ee41/8372b0038bd3b8ae82bd1518d5c01a33a7ec.pdf
+6459f1e67e1ea701b8f96177214583b0349ed964,http://vision.ece.ucsb.edu/publications/karthik_icip2011.pdf
+0c05f60998628884a9ac60116453f1a91bcd9dda,http://pdfs.semanticscholar.org/7b19/80d4ac1730fd0145202a8cb125bf05d96f01.pdf
+00d9d88bb1bdca35663946a76d807fff3dc1c15f,http://arxiv.org/pdf/1604.04842v1.pdf
+847e07387142c1bcc65035109ccce681ef88362c,http://pdfs.semanticscholar.org/847e/07387142c1bcc65035109ccce681ef88362c.pdf
+1bad8a9640cdbc4fe7de12685651f44c4cff35ce,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W08/papers/Gourgari_THETIS_Three_Dimensional_2013_CVPR_paper.pdf
+f19777e37321f79e34462fc4c416bd56772031bf,http://pdfs.semanticscholar.org/f197/77e37321f79e34462fc4c416bd56772031bf.pdf
+e3a6e9ddbbfc4c5160082338d46808cea839848a,http://pdfs.semanticscholar.org/f5d0/2300271ab0f32f10bfbba5562c0fa83c5727.pdf
+8eb9aa6349db3dd1b724266fcd5fc39a83da022a,http://www.hcii-lab.net/2009/%5BICIP%202009%5D%20A%20Novel%20feature%20extraction%20using%20PHOG%20for%20Smile%20Recognition.pdf
+a955033ca6716bf9957b362b77092592461664b4,http://pdfs.semanticscholar.org/a955/033ca6716bf9957b362b77092592461664b4.pdf
+2cc4ae2e864321cdab13c90144d4810464b24275,http://pdfs.semanticscholar.org/f3d2/c66630176cbb1409ebacd2dac4b30d8e3145.pdf
+98fb3890c565f1d32049a524ec425ceda1da5c24,http://pdfs.semanticscholar.org/98fb/3890c565f1d32049a524ec425ceda1da5c24.pdf
+071135dfb342bff884ddb9a4d8af0e70055c22a1,http://pdfs.semanticscholar.org/0711/35dfb342bff884ddb9a4d8af0e70055c22a1.pdf
+ebedc841a2c1b3a9ab7357de833101648281ff0e,http://pdfs.semanticscholar.org/ebed/c841a2c1b3a9ab7357de833101648281ff0e.pdf
+b234cd7788a7f7fa410653ad2bafef5de7d5ad29,http://pdfs.semanticscholar.org/b234/cd7788a7f7fa410653ad2bafef5de7d5ad29.pdf
+4d16337cc0431cd43043dfef839ce5f0717c3483,http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf
+3cb488a3b71f221a8616716a1fc2b951dd0de549,http://cse.seu.edu.cn/people/xgeng/LDL/resource/icpr14.pdf
+5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43,http://www.cs.cmu.edu/~juny/Prof/papers/acmmm04a-jyang.pdf
+0af33f6b5fcbc5e718f24591b030250c6eec027a,http://pdfs.semanticscholar.org/fa2c/96273027ff92f98109dbcef5b65f34b36627.pdf
+31bb49ba7df94b88add9e3c2db72a4a98927bb05,http://pdfs.semanticscholar.org/31bb/49ba7df94b88add9e3c2db72a4a98927bb05.pdf
+3e685704b140180d48142d1727080d2fb9e52163,http://pdfs.semanticscholar.org/3e68/5704b140180d48142d1727080d2fb9e52163.pdf
+ad9cb522cc257e3c5d7f896fe6a526f6583ce46f,http://pdfs.semanticscholar.org/ad9c/b522cc257e3c5d7f896fe6a526f6583ce46f.pdf
+44c9b5c55ca27a4313daf3760a3f24a440ce17ad,http://pdfs.semanticscholar.org/44c9/b5c55ca27a4313daf3760a3f24a440ce17ad.pdf
+1aef6f7d2e3565f29125a4871cd60c4d86c48361,http://pdfs.semanticscholar.org/1aef/6f7d2e3565f29125a4871cd60c4d86c48361.pdf
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Yang_How_Related_Exemplars_2013_ICCV_paper.pdf
+3a60678ad2b862fa7c27b11f04c93c010cc6c430,http://ibug.doc.ic.ac.uk/media/uploads/documents/taffcsi-2010-11-0112-2.pdf
+aac934f2eed758d4a27562dae4e9c5415ff4cdb7,http://pdfs.semanticscholar.org/aac9/34f2eed758d4a27562dae4e9c5415ff4cdb7.pdf
+4858d014bb5119a199448fcd36746c413e60f295,http://pdfs.semanticscholar.org/4858/d014bb5119a199448fcd36746c413e60f295.pdf
+1e94cc91c5293c8fc89204d4b881552e5b2ce672,http://pdfs.semanticscholar.org/5893/7d427ff36e1470b18120245148355047e4ea.pdf
+385750bcf95036c808d63db0e0b14768463ff4c6,http://pdfs.semanticscholar.org/3857/50bcf95036c808d63db0e0b14768463ff4c6.pdf
+0742d051caebf8a5d452c03c5d55dfb02f84baab,http://research.cs.tamu.edu/keyser/Papers/CGI05Blur-JonesBW.pdf?origin=publication_detail
+8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125,https://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf
+0d14261e69a4ad4140ce17c1d1cea76af6546056,http://pdfs.semanticscholar.org/0d14/261e69a4ad4140ce17c1d1cea76af6546056.pdf
+60d4cef56efd2f5452362d4d9ac1ae05afa970d1,http://pdfs.semanticscholar.org/60d4/cef56efd2f5452362d4d9ac1ae05afa970d1.pdf
+5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f,http://pdfs.semanticscholar.org/7589/58f2340ba46c6708b73d5427985d5623a512.pdf
+056ba488898a1a1b32daec7a45e0d550e0c51ae4,http://pdfs.semanticscholar.org/056b/a488898a1a1b32daec7a45e0d550e0c51ae4.pdf
+587f81ae87b42c18c565694c694439c65557d6d5,http://pdfs.semanticscholar.org/aeff/403079022683b233decda556a6aee3225065.pdf
+40bb090a4e303f11168dce33ed992f51afe02ff7,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf
+831d661d657d97a07894da8639a048c430c5536d,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w4/papers/Zhu_Weakly_Supervised_Facial_CVPR_2016_paper.pdf
+bd8e2d27987be9e13af2aef378754f89ab20ce10,http://pdfs.semanticscholar.org/bd8e/2d27987be9e13af2aef378754f89ab20ce10.pdf
+b8a829b30381106b806066d40dd372045d49178d,http://gavrila.net/tits15.pdf
+8813368c6c14552539137aba2b6f8c55f561b75f,https://arxiv.org/pdf/1607.05427v1.pdf
+db36e682501582d1c7b903422993cf8d70bb0b42,http://pdfs.semanticscholar.org/db36/e682501582d1c7b903422993cf8d70bb0b42.pdf
+20c2a5166206e7ffbb11a23387b9c5edf42b5230,http://pdfs.semanticscholar.org/aff0/51003a43736001aeb76e08cb86ce67d6c70d.pdf
+08ff81f3f00f8f68b8abd910248b25a126a4dfa4,https://research-information.bristol.ac.uk/files/74279764/Ioannis_Pitas_Symmetric_Subspace_Learning_for_Image_Analysis_2014.pdf
+496074fcbeefd88664b7bd945012ca22615d812e,http://pdfs.semanticscholar.org/4960/74fcbeefd88664b7bd945012ca22615d812e.pdf
+0a9345ea6e488fb936e26a9ba70b0640d3730ba7,http://www1.ece.neu.edu/~yuewu/files/2016/p52-jiang.pdf
+1886b6d9c303135c5fbdc33e5f401e7fc4da6da4,https://arxiv.org/pdf/1610.01119v1.pdf
+97e569159d5658760eb00ca9cb662e6882d2ab0e,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989c291.pdf
+5a87bc1eae2ec715a67db4603be3d1bb8e53ace2,http://pdfs.semanticscholar.org/5a87/bc1eae2ec715a67db4603be3d1bb8e53ace2.pdf
+b1665e1ddf9253dcaebecb48ac09a7ab4095a83e,http://pdfs.semanticscholar.org/b166/5e1ddf9253dcaebecb48ac09a7ab4095a83e.pdf
+2c2786ea6386f2d611fc9dbf209362699b104f83,http://pdfs.semanticscholar.org/2c27/86ea6386f2d611fc9dbf209362699b104f83.pdf
+96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,http://megaface.cs.washington.edu/KemelmacherMegaFaceCVPR16.pdf
+09111da0aedb231c8484601444296c50ca0b5388,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553737.pdf
+325b048ecd5b4d14dce32f92bff093cd744aa7f8,http://pdfs.semanticscholar.org/325b/048ecd5b4d14dce32f92bff093cd744aa7f8.pdf
+4ba38262fe20fab3e4c80215147b498f83843b93,http://pdfs.semanticscholar.org/f2af/967e28c12de9d957c08ffbc7a982e4ccea1e.pdf
+04f0292d9a062634623516edd01d92595f03bd3f,http://www.cs.nott.ac.uk/~mfv/Documents/emotiw2013_cameraready.pdf
+170a5f5da9ac9187f1c88f21a88d35db38b4111a,https://arxiv.org/pdf/1611.08563v3.pdf
+745b42050a68a294e9300228e09b5748d2d20b81,http://pdfs.semanticscholar.org/745b/42050a68a294e9300228e09b5748d2d20b81.pdf
+86c5478f21c4a9f9de71b5ffa90f2a483ba5c497,http://pdfs.semanticscholar.org/86c5/478f21c4a9f9de71b5ffa90f2a483ba5c497.pdf
+bef503cdfe38e7940141f70524ee8df4afd4f954,https://pdfs.semanticscholar.org/bef5/03cdfe38e7940141f70524ee8df4afd4f954.pdf
+384945abd53f6a6af51faf254ba8ef0f0fb3f338,http://pdfs.semanticscholar.org/b42c/4b804d69a031aac797346acc337f486e4a09.pdf
+59031a35b0727925f8c47c3b2194224323489d68,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICCV13/SVDL.pdf
+1f35a65eab258f042edb8e1d4d5fff34f00a85bd,http://www.seattle.intel-research.net/~xren/publication/xren_cvpr08_casablanca.pdf
+4c822785c29ceaf67a0de9c699716c94fefbd37d,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhu_A_Key_Volume_CVPR_2016_paper.pdf
+d40cd10f0f3e64fd9b0c2728089e10e72bea9616,http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf
+bd2d7c7f0145028e85c102fe52655c2b6c26aeb5,http://rogerioferis.com/publications/FerisICMR2014.pdf
+03b99f5abe0e977ff4c902412c5cb832977cf18e,http://pdfs.semanticscholar.org/03b9/9f5abe0e977ff4c902412c5cb832977cf18e.pdf
+7f57e9939560562727344c1c987416285ef76cda,http://people.cs.vt.edu/~gangwang/class/cs6604/papers/face.pdf
+82be2ede6b7613286b80c3e2afe3b5353f322bed,http://www.eecs.berkeley.edu/~jiayq/papers/iccv11_mm.pdf
+aa331fe378056b6d6031bb8fe6676e035ed60d6d,http://pdfs.semanticscholar.org/aa33/1fe378056b6d6031bb8fe6676e035ed60d6d.pdf
+316e67550fbf0ba54f103b5924e6537712f06bee,http://lear.inrialpes.fr/pubs/2010/GVS10/slides.pdf
+dcc38db6c885444694f515d683bbb50521ff3990,http://pdfs.semanticscholar.org/dcc3/8db6c885444694f515d683bbb50521ff3990.pdf
+fb4545782d9df65d484009558e1824538030bbb1,http://pdfs.semanticscholar.org/fb45/45782d9df65d484009558e1824538030bbb1.pdf
+4f77a37753c03886ca9c9349723ec3bbfe4ee967,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Hasan_Localizing_Facial_Keypoints_2013_ICCV_paper.pdf
+29ce6b54a87432dc8371f3761a9568eb3c5593b0,https://kar.kent.ac.uk/43222/1/Yatie_EST2013_vfinal.pdf
+447a5e1caf847952d2bb526ab2fb75898466d1bc,http://pdfs.semanticscholar.org/447a/5e1caf847952d2bb526ab2fb75898466d1bc.pdf
+c03f48e211ac81c3867c0e787bea3192fcfe323e,http://pdfs.semanticscholar.org/c03f/48e211ac81c3867c0e787bea3192fcfe323e.pdf
+67c3c1194ee72c54bc011b5768e153a035068c43,http://pdfs.semanticscholar.org/67c3/c1194ee72c54bc011b5768e153a035068c43.pdf
+a59cdc49185689f3f9efdf7ee261c78f9c180789,http://pdfs.semanticscholar.org/a59c/dc49185689f3f9efdf7ee261c78f9c180789.pdf
+14ba910c46d659871843b31d5be6cba59843a8b8,http://www.crcv.ucf.edu/papers/cvpr2013/ortiz_vfr_trailers.pdf
+35f084ddee49072fdb6e0e2e6344ce50c02457ef,https://dash.harvard.edu/bitstream/handle/1/4238979/Lee_Bilinear.pdf?sequence=2
+9ab463d117219ed51f602ff0ddbd3414217e3166,http://pdfs.semanticscholar.org/d965/43e8ab524108cae8c12d3a65a54a295deae6.pdf
+f4373f5631329f77d85182ec2df6730cbd4686a9,http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf
+3cb64217ca2127445270000141cfa2959c84d9e7,http://staff.estem-uc.edu.au/roland/files/2009/05/Joshi_Goecke_Parker_Breakspear_FG2013_CanBodyExpressionsContributeToAutomaticDepressionAnalysis.pdf
+1b3b01513f99d13973e631c87ffa43904cd8a821,http://pdfs.semanticscholar.org/1b3b/01513f99d13973e631c87ffa43904cd8a821.pdf
+05270b68547a2cd5bda302779cfc5dda876ae538,http://www.cs.sfu.ca/~mori/courses/cmpt882/fall05/papers/laplacianfaces.pdf
+321bd4d5d80abb1bae675a48583f872af3919172,http://pdfs.semanticscholar.org/321b/d4d5d80abb1bae675a48583f872af3919172.pdf
+ea85378a6549bb9eb9bcc13e31aa6a61b655a9af,http://pdfs.semanticscholar.org/ea85/378a6549bb9eb9bcc13e31aa6a61b655a9af.pdf
+a3a2f3803bf403262b56ce88d130af15e984fff0,http://pdfs.semanticscholar.org/e538/e1f6557d2920b449249606f909b665fbb924.pdf
+3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e,http://www.chennaisunday.com/IEEE%202013%20Dotnet%20Basepaper/Local%20Directional%20Number%20Pattern%20for%20Face%20Analysis%20Face%20and%20Expression%20Recognition.pdf
+2a14b6d9f688714dc60876816c4b7cf763c029a9,http://tamaraberg.com/papers/wacv2016_combining.pdf
+abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72,http://pdfs.semanticscholar.org/abc1/ef570bb2d7ea92cbe69e101eefa9a53e1d72.pdf
+9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7,http://pdfs.semanticscholar.org/9b79/74d9ad19bb4ba1ea147c55e629ad7927c5d7.pdf
+1b794b944fd462a2742b6c2f8021fecc663004c9,https://www.ecse.rpi.edu/~cvrl/wuy/HierarchicalShape/CVPR14_facialfeaturedetection_cameraready.pdf
+1610d2d4947c03a89c0fda506a74ba1ae2bc54c2,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrackextreme_3dv2016.pdf
+176a3e9e118712251124c1347516a92d5e315297,http://eprints.pascal-network.org/archive/00008997/01/ICMR11.pdf
+d03baf17dff5177d07d94f05f5791779adf3cd5f,http://pdfs.semanticscholar.org/d03b/af17dff5177d07d94f05f5791779adf3cd5f.pdf
+34863ecc50722f0972e23ec117f80afcfe1411a9,http://nlpr-web.ia.ac.cn/2010papers/kz/gh3.pdf
+9958942a0b7832e0774708a832d8b7d1a5d287ae,https://engineering.purdue.edu/~bouman/publications/pdf/tip29.pdf
+88bee9733e96958444dc9e6bef191baba4fa6efa,http://homepages.dcc.ufmg.br/~william/papers/paper_2014_SIBGRAPI.pdf
+3991223b1dc3b87883cec7af97cf56534178f74a,http://www.ics.uci.edu/~dvk/pub/ICMR13_dvk.pdf
+05e03c48f32bd89c8a15ba82891f40f1cfdc7562,http://files.is.tue.mpg.de/black/papers/rgapami.pdf
+0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf,http://pdfs.semanticscholar.org/0cb7/e4c2f6355c73bfc8e6d5cdfad26f3fde0baf.pdf
+5c92355b2808621d237a89dc7b3faa5cdb990ab5,http://www.researchgate.net/profile/Brian_Lovell2/publication/236124723_Dynamic_Amelioration_of_Resolution_Mismatches_for_Local_Feature_Based_Identity_Inference/links/0fcfd50741a027e848000000.pdf
+13141284f1a7e1fe255f5c2b22c09e32f0a4d465,http://www.micc.unifi.it/pernici/index_files/ALIEN_final.pdf
+0b50e223ad4d9465bb92dbf17a7b79eccdb997fb,http://users.eecs.northwestern.edu/~ganghua/publication/CVPR08a.pdf
+36b9f46c12240898bafa10b0026a3fb5239f72f3,https://arxiv.org/pdf/1702.05573v1.pdf
+74eae724ef197f2822fb7f3029c63014625ce1ca,http://pdfs.semanticscholar.org/74ea/e724ef197f2822fb7f3029c63014625ce1ca.pdf
+0a6a818b634cca4eb75a37bfd23b5c5c21331b12,http://hal.cse.msu.edu/pdfs/papers/wacv-2015.pdf
+5c3dce55c61ee86073575ac75cc882a215cb49e6,http://pdfs.semanticscholar.org/8d93/b33c38a26b97442b2f160e75212739c60bc5.pdf
+59a35b63cf845ebf0ba31c290423e24eb822d245,http://biometrics.cse.msu.edu/Publications/Face/Klumetal_FaceSketchID_TIFS2014.pdf
+442f09ddb5bb7ba4e824c0795e37cad754967208,http://pdfs.semanticscholar.org/8c29/513c2621c26ac8491bb763674db475fe58c6.pdf
+5456166e3bfe78a353df988897ec0bd66cee937f,http://pdfs.semanticscholar.org/5456/166e3bfe78a353df988897ec0bd66cee937f.pdf
+a3eab933e1b3db1a7377a119573ff38e780ea6a3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0000838.pdf
+5fa932be4d30cad13ea3f3e863572372b915bec8,http://pdfs.semanticscholar.org/5fa9/32be4d30cad13ea3f3e863572372b915bec8.pdf
+43f6953804964037ff91a4f45d5b5d2f8edfe4d5,http://ias.cs.tum.edu/_media/spezial/bib/riaz09fit.pdf
+8d712cef3a5a8a7b1619fb841a191bebc2a17f15,http://pdfs.semanticscholar.org/8d71/2cef3a5a8a7b1619fb841a191bebc2a17f15.pdf
+19d3b02185ad36fb0b792f2a15a027c58ac91e8e,http://pdfs.semanticscholar.org/19d3/b02185ad36fb0b792f2a15a027c58ac91e8e.pdf
+bcb99d5150d792001a7d33031a3bd1b77bea706b,http://pdfs.semanticscholar.org/bcb9/9d5150d792001a7d33031a3bd1b77bea706b.pdf
+0334cc0374d9ead3dc69db4816d08c917316c6c4,http://pdfs.semanticscholar.org/0334/cc0374d9ead3dc69db4816d08c917316c6c4.pdf
+9636c7d3643fc598dacb83d71f199f1d2cc34415,http://pdfs.semanticscholar.org/9636/c7d3643fc598dacb83d71f199f1d2cc34415.pdf
+25e2d3122d4926edaab56a576925ae7a88d68a77,http://pdfs.semanticscholar.org/25e2/d3122d4926edaab56a576925ae7a88d68a77.pdf
+22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7,http://pdfs.semanticscholar.org/22a7/f1aebdb57eecd64be2a1f03aef25f9b0e9a7.pdf
+a54e0f2983e0b5af6eaafd4d3467b655a3de52f4,http://pdfs.semanticscholar.org/a54e/0f2983e0b5af6eaafd4d3467b655a3de52f4.pdf
+cbcf5da9f09b12f53d656446fd43bc6df4b2fa48,http://pdfs.semanticscholar.org/cbcf/5da9f09b12f53d656446fd43bc6df4b2fa48.pdf
+4ae291b070ad7940b3c9d3cb10e8c05955c9e269,http://www.cl.cam.ac.uk/~pr10/publications/icmi14.pdf
+4f591e243a8f38ee3152300bbf42899ac5aae0a5,http://pdfs.semanticscholar.org/4f59/1e243a8f38ee3152300bbf42899ac5aae0a5.pdf
+676f9eabf4cfc1fd625228c83ff72f6499c67926,http://pdfs.semanticscholar.org/676f/9eabf4cfc1fd625228c83ff72f6499c67926.pdf
+285472527c5dc1c620d9644849e7519766c2d655,http://lear.inrialpes.fr/people/mpederso/papers/ICCV15_Parts.pdf
+e510f2412999399149d8635a83eca89c338a99a1,http://pdfs.semanticscholar.org/e510/f2412999399149d8635a83eca89c338a99a1.pdf
+0a87d781fe2ae2e700237ddd00314dbc10b1429c,http://pdfs.semanticscholar.org/0a87/d781fe2ae2e700237ddd00314dbc10b1429c.pdf
+68c5238994e3f654adea0ccd8bca29f2a24087fc,http://web.fsktm.um.edu.my/~cschan/doc/ICIP2013.pdf
+acc548285f362e6b08c2b876b628efceceeb813e,http://pdfs.semanticscholar.org/acc5/48285f362e6b08c2b876b628efceceeb813e.pdf
+053c2f592a7f153e5f3746aa5ab58b62f2cf1d21,http://pdfs.semanticscholar.org/053c/2f592a7f153e5f3746aa5ab58b62f2cf1d21.pdf
+89945b7cd614310ebae05b8deed0533a9998d212,http://pdfs.semanticscholar.org/8994/5b7cd614310ebae05b8deed0533a9998d212.pdf
+a0061dae94d916f60a5a5373088f665a1b54f673,http://pdfs.semanticscholar.org/a006/1dae94d916f60a5a5373088f665a1b54f673.pdf
+6a7e464464f70afea78552c8386f4d2763ea1d9c,http://pdfs.semanticscholar.org/6a7e/464464f70afea78552c8386f4d2763ea1d9c.pdf
+900207b3bc3a4e5244cae9838643a9685a84fee0,http://pdfs.semanticscholar.org/9002/07b3bc3a4e5244cae9838643a9685a84fee0.pdf
+82a4a35b2bae3e5c51f4d24ea5908c52973bd5be,http://pdfs.semanticscholar.org/82a4/a35b2bae3e5c51f4d24ea5908c52973bd5be.pdf
+0ee661a1b6bbfadb5a482ec643573de53a9adf5e,http://epubs.surrey.ac.uk/812523/1/yunlian_TIFS2014.pdf
+352c53e56c52a49d33dcdbec5690c2ba604b07d0,http://www.cs.huji.ac.il/~zweiga/Alons_Zweig_Hompage/Homepage_files/Zweig_ICCV7.pdf
+60a006bdfe5b8bf3243404fae8a5f4a9d58fa892,http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf
+cd3005753012409361aba17f3f766e33e3a7320d,http://pdfs.semanticscholar.org/cd30/05753012409361aba17f3f766e33e3a7320d.pdf
+067126ce1f1a205f98e33db7a3b77b7aec7fb45a,http://pdfs.semanticscholar.org/0671/26ce1f1a205f98e33db7a3b77b7aec7fb45a.pdf
+3f540faf85e1f8de6ce04fb37e556700b67e4ad3,http://pdfs.semanticscholar.org/3f54/0faf85e1f8de6ce04fb37e556700b67e4ad3.pdf
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,http://pdfs.semanticscholar.org/0ec0/fc9ed165c40b1ef4a99e944abd8aa4e38056.pdf
+5c2a7518fb26a37139cebff76753d83e4da25159,http://pdfs.semanticscholar.org/5c2a/7518fb26a37139cebff76753d83e4da25159.pdf
+edbb8cce0b813d3291cae4088914ad3199736aa0,http://pdfs.semanticscholar.org/edbb/8cce0b813d3291cae4088914ad3199736aa0.pdf
+69a9da55bd20ce4b83e1680fbc6be2c976067631,http://pdfs.semanticscholar.org/a9b4/d257d16e876302e3318ade42fcb2ab9ffdf9.pdf
+211c42a567e02987a6f89b89527de3bf4d2e9f90,http://www.cs.dartmouth.edu/~dutran/papers/ijcv16_preprint.pdf
+09f853ce12f7361c4b50c494df7ce3b9fad1d221,http://files.is.tue.mpg.de/jgall/download/jgall_RFdepthFace_ijcv12.pdf
+368d59cf1733af511ed8abbcbeb4fb47afd4da1c,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf
+03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b,http://pdfs.semanticscholar.org/03b9/8b4a2c0b7cc7dae7724b5fe623a43eaf877b.pdf
+131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1,http://pdfs.semanticscholar.org/131b/fa2ae6a04fd3b921ccb82b1c3f18a400a9c1.pdf
+d3e04963ff42284c721f2bc6a90b7a9e20f0242f,http://pdfs.semanticscholar.org/d3e0/4963ff42284c721f2bc6a90b7a9e20f0242f.pdf
+8b7191a2b8ab3ba97423b979da6ffc39cb53f46b,http://www.eurecom.fr/fr/publication/3472/download/mm-publi-3472.pdf
+66dcd855a6772d2731b45cfdd75f084327b055c2,http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf
+421955c6d2f7a5ffafaf154a329a525e21bbd6d3,http://pdfs.semanticscholar.org/ea6c/4d71fafe4352e7c3aa2237f77af0c4050cef.pdf
+4a14a321a9b5101b14ed5ad6aa7636e757909a7c,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Learning_Semi-Supervised_Representation_ICCV_2015_paper.pdf
+d46e793b945c4f391031656357625e902c4405e8,http://140.118.9.222/publications/journal/faceoff.pdf
+532f7ec8e0c8f7331417dd4a45dc2e8930874066,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p6060-zoidi.pdf
+303065c44cf847849d04da16b8b1d9a120cef73a,http://pdfs.semanticscholar.org/3030/65c44cf847849d04da16b8b1d9a120cef73a.pdf
+9a6da02db99fcc0690d7ffdc15340b125726ab95,http://vision.ucla.edu/~vedaldi/assets/pubs/vedaldi07boosting.pdf
+af62621816fbbe7582a7d237ebae1a4d68fcf97d,http://pdfs.semanticscholar.org/af62/621816fbbe7582a7d237ebae1a4d68fcf97d.pdf
+c54f9f33382f9f656ec0e97d3004df614ec56434,http://pdfs.semanticscholar.org/c54f/9f33382f9f656ec0e97d3004df614ec56434.pdf
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,http://pdfs.semanticscholar.org/becd/5fd62f6301226b8e150e1a5ec3180f748ff8.pdf
+760a712f570f7a618d9385c0cee7e4d0d6a78ed2,http://pdfs.semanticscholar.org/760a/712f570f7a618d9385c0cee7e4d0d6a78ed2.pdf
+292c6b743ff50757b8230395c4a001f210283a34,https://labicvl.github.io/docs/pubs/Oscar_VISAPP_2014.pdf
+f52efc206432a0cb860155c6d92c7bab962757de,http://pdfs.semanticscholar.org/f52e/fc206432a0cb860155c6d92c7bab962757de.pdf
+c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e,http://pdfs.semanticscholar.org/c7e4/c7be0d37013de07b6d829a3bf73e1b95ad4e.pdf
+1b79628af96eb3ad64dbb859dae64f31a09027d5,http://pdfs.semanticscholar.org/1b79/628af96eb3ad64dbb859dae64f31a09027d5.pdf
+3bc376f29bc169279105d33f59642568de36f17f,http://www.dip.ee.uct.ac.za/~nicolls/publish/sm14-visapp.pdf
+0a6d344112b5af7d1abbd712f83c0d70105211d0,http://www.cl.cam.ac.uk/~tb346/pub/papers/iccv2013.pdf
+0903bb001c263e3c9a40f430116d1e629eaa616f,http://pdfs.semanticscholar.org/0903/bb001c263e3c9a40f430116d1e629eaa616f.pdf
+ee18e29a2b998eddb7f6663bb07891bfc7262248,http://or.nsfc.gov.cn/bitstream/00001903-5/13750/1/1000007562815.pdf
+1750db78b7394b8fb6f6f949d68f7c24d28d934f,https://www3.nd.edu/~kwb/Bharati_Singh_Vatsa_Bowyer_TIFS_2016.pdf
+d082f35534932dfa1b034499fc603f299645862d,http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf
+aff92784567095ee526a705e21be4f42226bbaab,http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf
+75fd9acf5e5b7ed17c658cc84090c4659e5de01d,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_035_ext.pdf
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,http://blogs.bu.edu/joewang/files/2013/06/allerton_2011_v2.pdf
+0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553740.pdf
+57101b29680208cfedf041d13198299e2d396314,http://pdfs.semanticscholar.org/5710/1b29680208cfedf041d13198299e2d396314.pdf
+b013cce42dd769db754a57351d49b7410b8e82ad,http://tlab.princeton.edu/publication_files/Rojas%20et%20al%20IEEE%202010.pdf
+29a013b2faace976f2c532533bd6ab4178ccd348,http://or.nsfc.gov.cn/bitstream/00001903-5/94894/1/1000006589627.pdf
+5b89744d2ac9021f468b3ffd32edf9c00ed7fed7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Bi_Beyond_Mahalanobis_Metric_2015_CVPR_paper.pdf
+102e374347698fe5404e1d83f441630b1abf62d9,https://infoscience.epfl.ch/record/209965/files/TBME-preprint-infoscience.pdf
+69c2ac04693d53251500557316c854a625af84ee,http://pdfs.semanticscholar.org/dc97/ceb1faf945e780a92be651b022a82e3bff5a.pdf
+8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff,http://publications.idiap.ch/downloads/papers/2017/Le_CBMI_2017.pdf
+48a9241edda07252c1aadca09875fabcfee32871,https://arxiv.org/pdf/1611.08657v5.pdf
+3661a34f302883c759b9fa2ce03de0c7173d2bb2,http://pdfs.semanticscholar.org/fd6d/14fb0bbca58e924c504d7dc57cb7f8d3707e.pdf
+1149c6ac37ae2310fe6be1feb6e7e18336552d95,http://pdfs.semanticscholar.org/1149/c6ac37ae2310fe6be1feb6e7e18336552d95.pdf
+25c108a56e4cb757b62911639a40e9caf07f1b4f,https://arxiv.org/pdf/1707.09531v2.pdf
+1b300a7858ab7870d36622a51b0549b1936572d4,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/Yimo-TIP2016.pdf
+82bef8481207de9970c4dc8b1d0e17dced706352,http://pdfs.semanticscholar.org/82be/f8481207de9970c4dc8b1d0e17dced706352.pdf
+aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52,http://www.gtti.it/gtti13/Presentazioni_GTTI13/25_Giugno/Sessioni_Scientifiche_Short_Presentation/Piacenza.pdf
+0d8415a56660d3969449e77095be46ef0254a448,http://www.lv-nus.org/papers/2004/2004_C_6.pdf
+3c97c32ff575989ef2869f86d89c63005fc11ba9,http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf
+ffaad0204f4af763e3390a2f6053c0e9875376be,http://pdfs.semanticscholar.org/ffaa/d0204f4af763e3390a2f6053c0e9875376be.pdf
+855bfc17e90ec1b240efba9100fb760c068a8efa,http://pdfs.semanticscholar.org/855b/fc17e90ec1b240efba9100fb760c068a8efa.pdf
+e43cc682453cf3874785584fca813665878adaa7,http://pdfs.semanticscholar.org/e43c/c682453cf3874785584fca813665878adaa7.pdf
+42ded74d4858bea1070dadb08b037115d9d15db5,http://pdfs.semanticscholar.org/42de/d74d4858bea1070dadb08b037115d9d15db5.pdf
+49a7949fabcdf01bbae1c2eb38946ee99f491857,http://pdfs.semanticscholar.org/49a7/949fabcdf01bbae1c2eb38946ee99f491857.pdf
+2288696b6558b7397bdebe3aed77bedec7b9c0a9,http://pdfs.semanticscholar.org/2288/696b6558b7397bdebe3aed77bedec7b9c0a9.pdf
+854dbb4a0048007a49df84e3f56124d387588d99,http://pdfs.semanticscholar.org/854d/bb4a0048007a49df84e3f56124d387588d99.pdf
+c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1,http://pdfs.semanticscholar.org/d6f1/42f5ddcb027e7b346eb20703abbf5cc4e883.pdf
+d05513c754966801f26e446db174b7f2595805ba,http://pdfs.semanticscholar.org/d055/13c754966801f26e446db174b7f2595805ba.pdf
+54bae57ed37ce50e859cbc4d94d70cc3a84189d5,http://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf
+1f05473c587e2a3b587f51eb808695a1c10bc153,http://pdfs.semanticscholar.org/7246/bbdf4c125d9d216e560c87c58a8613bd2602.pdf
+4e5dc3b397484326a4348ccceb88acf309960e86,http://pdfs.semanticscholar.org/4e5d/c3b397484326a4348ccceb88acf309960e86.pdf
+39b22bcbd452d5fea02a9ee63a56c16400af2b83,http://www.uoguelph.ca/~gwtaylor/publications/gwtaylor_crv2014.pdf
+352110778d2cc2e7110f0bf773398812fd905eb1,http://www.ca.cs.cmu.edu/sites/default/files/complete_14.pdf
+2fda164863a06a92d3a910b96eef927269aeb730,http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf
+01beab8f8293a30cf48f52caea6ca0fb721c8489,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553729.pdf
+a6634ff2f9c480e94ed8c01d64c9eb70e0d98487,http://pdfs.semanticscholar.org/a663/4ff2f9c480e94ed8c01d64c9eb70e0d98487.pdf
+334ac2a459190b41923be57744aa6989f9a54a51,http://pdfs.semanticscholar.org/334a/c2a459190b41923be57744aa6989f9a54a51.pdf
+42c9394ca1caaa36f535721fa9a64b2c8d4e0dee,http://pdfs.semanticscholar.org/5d2d/208fc245bb49148bffb3076b0660b98b4466.pdf
+19eb486dcfa1963c6404a9f146c378fc7ae3a1df,https://pdfs.semanticscholar.org/3b4d/bd7be0b5b0df2e0c61a977974b1fc78ad3e5.pdf
+46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4,http://ibug.doc.ic.ac.uk/media/uploads/documents/3d_local_features.pdf
+02820c1491b10a1ff486fed32c269e4077c36551,https://arxiv.org/pdf/1610.07930v1.pdf
+b3658514a0729694d86a8b89c875a66cde20480c,http://pdfs.semanticscholar.org/b365/8514a0729694d86a8b89c875a66cde20480c.pdf
+b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1,http://pdfs.semanticscholar.org/b144/4b3bf15eec84f6d9a2ade7989bb980ea7bd1.pdf
+34bb11bad04c13efd575224a5b4e58b9249370f3,http://cs.nju.edu.cn/wujx/paper/CVPR2014_Action.pdf
+cc7e66f2ba9ac0c639c80c65534ce6031997acd7,http://pdfs.semanticscholar.org/cc7e/66f2ba9ac0c639c80c65534ce6031997acd7.pdf
+ebabd1f7bc0274fec88a3dabaf115d3e226f198f,http://pdfs.semanticscholar.org/ebab/d1f7bc0274fec88a3dabaf115d3e226f198f.pdf
+2594a77a3f0dd5073f79ba620e2f287804cec630,https://arxiv.org/pdf/1702.06925v1.pdf
+e9bb045e702ee38e566ce46cc1312ed25cb59ea7,http://pdfs.semanticscholar.org/e9bb/045e702ee38e566ce46cc1312ed25cb59ea7.pdf
+4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7,https://arxiv.org/pdf/1611.09956v1.pdf
+5d185d82832acd430981ffed3de055db34e3c653,http://pdfs.semanticscholar.org/fc70/92e72a2bae6f60266147e0fb587b1771699a.pdf
+ba816806adad2030e1939450226c8647105e101c,http://pdfs.semanticscholar.org/ba81/6806adad2030e1939450226c8647105e101c.pdf
+3c03d95084ccbe7bf44b6d54151625c68f6e74d0,http://pdfs.semanticscholar.org/3c03/d95084ccbe7bf44b6d54151625c68f6e74d0.pdf
+0486214fb58ee9a04edfe7d6a74c6d0f661a7668,http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf
+1564bf0a268662df752b68bee5addc4b08868739,https://arxiv.org/pdf/1605.04129v2.pdf
+29f0414c5d566716a229ab4c5794eaf9304d78b6,http://pdfs.semanticscholar.org/29f0/414c5d566716a229ab4c5794eaf9304d78b6.pdf
+ac75c662568cbb7308400cc002469a14ff25edfd,http://www.dsp.toronto.edu/juwei/Publication/JuweiICIP04v2.pdf
+9d66de2a59ec20ca00a618481498a5320ad38481,http://www.cs.iit.edu/~xli/paper/Conf/POP-ICDCS15.pdf
+7c7b0550ec41e97fcfc635feffe2e53624471c59,http://cvrr.ucsd.edu/publications/2014/headhandeye.pdf
+2c17d36bab56083293456fe14ceff5497cc97d75,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf
+395a91d49e9283e1bf2d61a75c3dc846b347ea74,http://cake.fiu.edu/Publications/Reza+al-13-OV.On-demand_Virtual_Health.IEEE.downloaded.pdf
+a7191958e806fce2505a057196ccb01ea763b6ea,http://pdfs.semanticscholar.org/a719/1958e806fce2505a057196ccb01ea763b6ea.pdf
+17670b60dcfb5cbf8fdae0b266e18cf995f6014c,https://arxiv.org/pdf/1606.02254v1.pdf
+48a417cfeba06feb4c7ab30f06c57ffbc288d0b5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Chen_Robust_Dictionary_Learning_2013_ICCV_paper.pdf
+7c7ab59a82b766929defd7146fd039b89d67e984,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/wacv2014_ChaZhang.pdf
+24f9248f01df3020351347c2a3f632e01de72090,http://www.cs.utexas.edu/users/bwaters/publications/papers/luong-wacv2013.pdf
+eff87ecafed67cc6fc4f661cb077fed5440994bb,http://pdfs.semanticscholar.org/eff8/7ecafed67cc6fc4f661cb077fed5440994bb.pdf
+781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed,https://ivi.fnwi.uva.nl/isis/publications/2017/JainIJCV2017/JainIJCV2017.pdf
+133f01aec1534604d184d56de866a4bd531dac87,http://www.cs.tau.ac.il/~wolf/papers/jpatchlbp.pdf
+4d8ce7669d0346f63b20393ffaa438493e7adfec,http://pdfs.semanticscholar.org/4d8c/e7669d0346f63b20393ffaa438493e7adfec.pdf
+72f4aaf7e2e3f215cd8762ce283988220f182a5b,http://pdfs.semanticscholar.org/72f4/aaf7e2e3f215cd8762ce283988220f182a5b.pdf
+0cbc4dcf2aa76191bbf641358d6cecf38f644325,http://pdfs.semanticscholar.org/0cbc/4dcf2aa76191bbf641358d6cecf38f644325.pdf
+e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf,https://pdfs.semanticscholar.org/e476/cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf.pdf
+07e639abf1621ceff27c9e3f548fadfa2052c912,http://pdfs.semanticscholar.org/07e6/39abf1621ceff27c9e3f548fadfa2052c912.pdf
+0f0241124d6092a0bb56259ac091467c2c6938ca,http://mm.cs.uec.ac.jp/kitaha-a/research/maw2008.pdf?origin=publication_detail
+05f3d1e9fb254b275354ca69018e9ed321dd8755,http://pdfs.semanticscholar.org/05f3/d1e9fb254b275354ca69018e9ed321dd8755.pdf
+57f5711ca7ee5c7110b7d6d12c611d27af37875f,http://pdfs.semanticscholar.org/57f5/711ca7ee5c7110b7d6d12c611d27af37875f.pdf
+929bd1d11d4f9cbc638779fbaf958f0efb82e603,http://pdfs.semanticscholar.org/929b/d1d11d4f9cbc638779fbaf958f0efb82e603.pdf
+6080f26675e44f692dd722b61905af71c5260af8,https://arxiv.org/pdf/1603.05073v1.pdf
+11c04c4f0c234a72f94222efede9b38ba6b2306c,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ACMMM08-action-recog.pdf
+481fb0a74528fa7706669a5cce6a212ac46eaea3,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Recognizing_RGB_Images_2014_CVPR_paper.pdf
+051a84f0e39126c1ebeeb379a405816d5d06604d,http://static.springer.com/sgw/documents/1348632/application/pdf/Cognitive+Computation_Biometric+Recognition+Performing+in+a+Bioinspired+System.pdf
+32575ffa69d85bbc6aef5b21d73e809b37bf376d,http://www.sce.carleton.ca/faculty/adler//publications/2006/youmaran-adler-bcc2006-quality.pdf
+bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea,http://pdfs.semanticscholar.org/bac1/1ce0fb3e12c466f7ebfb6d036a9fe62628ea.pdf
+2cac70f9c8140a12b6a55cef834a3d7504200b62,http://www.eng.auburn.edu/~reevesj/Classes/ELEC6970-latex/posters/baposterex1.pdf
+2ef328e035b2b5501ceddc0052615d4cebac6f1f,http://mi.eng.cam.ac.uk/~ss965/semantic_transform.pdf
+b871d1b8495025ff8a6255514ed39f7765415935,http://pdfs.semanticscholar.org/b871/d1b8495025ff8a6255514ed39f7765415935.pdf
+b3b4a7e29b9186e00d2948a1d706ee1605fe5811,http://pdfs.semanticscholar.org/b3b4/a7e29b9186e00d2948a1d706ee1605fe5811.pdf
+8a40b6c75dd6392ee0d3af73cdfc46f59337efa9,http://pdfs.semanticscholar.org/f656/f6682655180162b67042d9d37c4d57c49238.pdf
+c78fdd080df01fff400a32fb4cc932621926021f,http://pdfs.semanticscholar.org/c78f/dd080df01fff400a32fb4cc932621926021f.pdf
+06ad99f19cf9cb4a40741a789e4acbf4433c19ae,http://pdfs.semanticscholar.org/06ad/99f19cf9cb4a40741a789e4acbf4433c19ae.pdf
+7e8016bef2c180238f00eecc6a50eac473f3f138,http://pdfs.semanticscholar.org/7e80/16bef2c180238f00eecc6a50eac473f3f138.pdf
+3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,http://pdfs.semanticscholar.org/3fdf/d6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3.pdf
+17fa1c2a24ba8f731c8b21f1244463bc4b465681,http://pdfs.semanticscholar.org/d5ba/a722b1bca1f95e4e1fad968b2b74ec1ecc7f.pdf
+6643a7feebd0479916d94fb9186e403a4e5f7cbf,http://pdfs.semanticscholar.org/6643/a7feebd0479916d94fb9186e403a4e5f7cbf.pdf
+ec12f805a48004a90e0057c7b844d8119cb21b4a,http://pdfs.semanticscholar.org/ec12/f805a48004a90e0057c7b844d8119cb21b4a.pdf
+e013c650c7c6b480a1b692bedb663947cd9d260f,http://www.nlpr.ia.ac.cn/2013papers/gjkw/gk25.pdf
+3a804cbf004f6d4e0b041873290ac8e07082b61f,http://pdfs.semanticscholar.org/5ce8/e665a6512c09f15d8528ce6bece1f6a4d138.pdf
+dce3dff9216d63c4a77a2fcb0ec1adf6d2489394,http://pdfs.semanticscholar.org/dce3/dff9216d63c4a77a2fcb0ec1adf6d2489394.pdf
+c3a3f7758bccbead7c9713cb8517889ea6d04687,http://pdfs.semanticscholar.org/c3a3/f7758bccbead7c9713cb8517889ea6d04687.pdf
+69526cdf6abbfc4bcd39616acde544568326d856,http://speech.iiit.ac.in/svlpubs/article/SaoA.K.Yegna2007.pdf
+8dc9de0c7324d098b537639c8214543f55392a6b,http://www.diva-portal.org/smash/get/diva2:280081/FULLTEXT01.pdf
+ca0363d29e790f80f924cedaf93cb42308365b3d,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07a.pdf
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,http://www.utdallas.edu/~cxc123730/ICIP_2017.pdf
+0e36ada8cb9c91f07c9dcaf196d036564e117536,http://pdfs.semanticscholar.org/d0d5/aa7f797113c825053f4c4fd3772dc3601139.pdf
+d61578468d267c2d50672077918c1cda9b91429b,http://pdfs.semanticscholar.org/d615/78468d267c2d50672077918c1cda9b91429b.pdf
+9ea73660fccc4da51c7bc6eb6eedabcce7b5cead,http://pdfs.semanticscholar.org/9ea7/3660fccc4da51c7bc6eb6eedabcce7b5cead.pdf
+51f311f724883218bcc511b0403b9a7745b9d40e,https://www.researchgate.net/profile/Xiangwei_Kong/publication/221190737_Biometrics-based_identifiers_for_digital_identity_management/links/00b7d51ca1f2a78c74000000.pdf
+1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de,https://arxiv.org/pdf/1410.3748v1.pdf
+14fb3283d4e37760b7dc044a1e2906e3cbf4d23a,http://crcv.ucf.edu/courses/CAP6412/Spring2013/papers/felix_yu_attribute_cvpr2012.pdf
+4f028efe6708fc252851eee4a14292b7ce79d378,http://pdfs.semanticscholar.org/ae17/aca92b4710efb00e3180a46e56e463ae2a6f.pdf
+6339e9385ae3609cb22f6b87175c7e6850f2c05b,http://vision.ucmerced.edu/papers/Yang_WACV12_EstimatingTheSpatialExtent.pdf
+240eb0b34872c431ecf9df504671281f59e7da37,http://www.ece.cmu.edu/~dbatra/publications/assets/cutout_tags_iv2009_small.pdf
+8509abbde2f4b42dc26a45cafddcccb2d370712f,http://pdfs.semanticscholar.org/ad9a/169042d887c33cfcec2716a453a0d3abcb0c.pdf
+58cb1414095f5eb6a8c6843326a6653403a0ee17,http://pdfs.semanticscholar.org/58cb/1414095f5eb6a8c6843326a6653403a0ee17.pdf
+fe5df5fe0e4745d224636a9ae196649176028990,http://pdfs.semanticscholar.org/fe5d/f5fe0e4745d224636a9ae196649176028990.pdf
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,http://pdfs.semanticscholar.org/c65e/4ffa2c07a37b0bb7781ca4ec2ed7542f18e3.pdf
+00d0b01d6a5f12216e078001b7c49225d2495b21,http://graphics.cs.uh.edu/publication/pub/2009_TVCJ_faceilluminationtransfer.pdf
+956317de62bd3024d4ea5a62effe8d6623a64e53,https://research-repository.griffith.edu.au/bitstream/handle/10072/17889/47024_1.pdf;jsessionid=2146D7EB83BAD65DE653E0056477D61A?sequence=1
+1a65cc5b2abde1754b8c9b1d932a68519bcb1ada,http://pdfs.semanticscholar.org/e4ae/821e234c281aed6ba629c130be7c8eac4a31.pdf
+cf54a133c89f730adc5ea12c3ac646971120781c,http://pdfs.semanticscholar.org/cf54/a133c89f730adc5ea12c3ac646971120781c.pdf
+236a4f38f79a4dcc2183e99b568f472cf45d27f4,https://jurie.users.greyc.fr/papers/moosman-nowak-jurie-pami08.pdf
+c7f752eea91bf5495a4f6e6a67f14800ec246d08,http://pdfs.semanticscholar.org/c7f7/52eea91bf5495a4f6e6a67f14800ec246d08.pdf
+42765c170c14bd58e7200b09b2e1e17911eed42b,http://pdfs.semanticscholar.org/4276/5c170c14bd58e7200b09b2e1e17911eed42b.pdf
+65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220,http://pdfs.semanticscholar.org/65d7/f95fcbabcc3cdafc0ad38e81d1f473bb6220.pdf
+1171e8a96ffb15fdb265aaba02be014a38137ad5,http://hal.cse.msu.edu/pdfs/papers/pdm-tifs-2015.pdf
+cf09e2cb82961128302b99a34bff91ec7d198c7c,http://pdfs.semanticscholar.org/cf09/e2cb82961128302b99a34bff91ec7d198c7c.pdf
+8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b,http://www.cs.unc.edu/~lazebnik/research/fall07/animals_on_the_web.pdf
+032a1c95388fb5c6e6016dd8597149be40bc9d4d,http://people.eecs.berkeley.edu/~gkioxari/ActionTubes/action_tubes.pdf
+5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf,https://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-RobustRegistration-TIP2016.pdf
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTiccv13.pdf
+416364cfdbc131d6544582e552daf25f585c557d,http://www.dcs.qmw.ac.uk/~sgg/papers/Zalewski_Gong_FG04.pdf
+1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43,http://pdfs.semanticscholar.org/676c/0fc58b6a0108326024f708e30d76cadbae58.pdf
+66a2c229ac82e38f1b7c77a786d8cf0d7e369598,http://pdfs.semanticscholar.org/66a2/c229ac82e38f1b7c77a786d8cf0d7e369598.pdf
+341ed69a6e5d7a89ff897c72c1456f50cfb23c96,http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf
+36e8ef2e5d52a78dddf0002e03918b101dcdb326,http://www.milbo.org/stasm-files/multiview-active-shape-models-with-sift-for-300w.pdf
+23e75f5ce7e73714b63f036d6247fa0172d97cb6,http://pdfs.semanticscholar.org/23e7/5f5ce7e73714b63f036d6247fa0172d97cb6.pdf
+831226405bb255527e9127b84e8eaedd7eb8e9f9,http://pdfs.semanticscholar.org/8312/26405bb255527e9127b84e8eaedd7eb8e9f9.pdf
+0dd72887465046b0f8fc655793c6eaaac9c03a3d,http://pdfs.semanticscholar.org/e112/df5539821a00dfa818617bf95f901f016763.pdf
+6de18708218988b0558f6c2f27050bb4659155e4,https://arxiv.org/pdf/1611.05216v1.pdf
+a66d89357ada66d98d242c124e1e8d96ac9b37a0,http://pdfs.semanticscholar.org/a66d/89357ada66d98d242c124e1e8d96ac9b37a0.pdf
+4026dc62475d2ff2876557fc2b0445be898cd380,http://pdfs.semanticscholar.org/4026/dc62475d2ff2876557fc2b0445be898cd380.pdf
+0773c320713dae62848fceac5a0ac346ba224eca,http://eudl.eu/pdf/10.4108/icst.intetain.2015.259444
+029317f260b3303c20dd58e8404a665c7c5e7339,http://www.nlpr.ia.ac.cn/2009papers/gjkw/gk32.pdf?origin=publication_detail
+0515e43c92e4e52254a14660718a9e498bd61cf5,http://pdfs.semanticscholar.org/3a78/5f86c2109fe1ff242dcb26211abfb9b0a870.pdf
+8878871ec2763f912102eeaff4b5a2febfc22fbe,http://www.ee.columbia.edu/~wliu/TIP15_action.pdf
+2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359,http://pdfs.semanticscholar.org/2d99/0b04c2bd61d3b7b922b8eed33aeeeb7b9359.pdf
+75b833dde2e76c5de5912db3444d62c4131d15dc,http://www.researchgate.net/profile/Vassilios_Solachidis/publication/4303365_A_Face_Tracker_Trajectories_Clustering_Using_Mutual_Information/links/09e4150ca146dba69c000000.pdf
+e5b301ee349ba8e96ea6c71782295c4f06be6c31,http://pdfs.semanticscholar.org/e5b3/01ee349ba8e96ea6c71782295c4f06be6c31.pdf
+b216040f110d2549f61e3f5a7261cab128cab361,http://pdfs.semanticscholar.org/b216/040f110d2549f61e3f5a7261cab128cab361.pdf
+0021f46bda27ea105d722d19690f5564f2b8869e,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Deep_Region_and_CVPR_2016_paper.pdf
+0f92e9121e9c0addc35eedbbd25d0a1faf3ab529,http://pdfs.semanticscholar.org/0f92/e9121e9c0addc35eedbbd25d0a1faf3ab529.pdf
+40c8cffd5aac68f59324733416b6b2959cb668fd,https://arxiv.org/pdf/1701.08341v1.pdf
+19da9f3532c2e525bf92668198b8afec14f9efea,http://pdfs.semanticscholar.org/19da/9f3532c2e525bf92668198b8afec14f9efea.pdf
+6c705285c554985ecfe1117e854e1fe1323f8c21,http://pdfs.semanticscholar.org/6c70/5285c554985ecfe1117e854e1fe1323f8c21.pdf
+c0723e0e154a33faa6ff959d084aebf07770ffaf,http://pdfs.semanticscholar.org/c072/3e0e154a33faa6ff959d084aebf07770ffaf.pdf
+8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff,http://pdfs.semanticscholar.org/8309/e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff.pdf
+39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df,https://pdfs.semanticscholar.org/39b5/f6d6f8d8127b2b97ea1a4987732c0db6f9df.pdf
+21765df4c0224afcc25eb780bef654cbe6f0bc3a,http://ci2cv.net/media/papers/2013_ICCV_Kiani.pdf
+0db8e6eb861ed9a70305c1839eaef34f2c85bbaf,https://arxiv.org/pdf/1704.06244v1.pdf
+5a86842ab586de9d62d5badb2ad8f4f01eada885,http://pdfs.semanticscholar.org/5a86/842ab586de9d62d5badb2ad8f4f01eada885.pdf
+0b605b40d4fef23baa5d21ead11f522d7af1df06,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a819.pdf
+d78077a7aa8a302d4a6a09fb9737ab489ae169a6,http://pdfs.semanticscholar.org/d780/77a7aa8a302d4a6a09fb9737ab489ae169a6.pdf
+defa8774d3c6ad46d4db4959d8510b44751361d8,http://pdfs.semanticscholar.org/defa/8774d3c6ad46d4db4959d8510b44751361d8.pdf
+126535430845361cd7a3a6f317797fe6e53f5a3b,http://pdfs.semanticscholar.org/1265/35430845361cd7a3a6f317797fe6e53f5a3b.pdf
+621e8882c41cdaf03a2c4a986a6404f0272ba511,http://conradsanderson.id.au/pdfs/wong_ijcnn_2012.pdf
+312afff739d1e0fcd3410adf78be1c66b3480396,http://pdfs.semanticscholar.org/312a/fff739d1e0fcd3410adf78be1c66b3480396.pdf
+6b35b15ceba2f26cf949f23347ec95bbbf7bed64,http://pdfs.semanticscholar.org/6b35/b15ceba2f26cf949f23347ec95bbbf7bed64.pdf
+181708b09bde7f4904f8fd92b3668d76e7aff527,http://mplab.ucsd.edu/~ksikka/emotiw14.pdf
+7a6d9f89e0925a220fe3dfba4f0d2745f8be6c9a,http://www.faceplusplus.com/wp-content/uploads/2014/11/Learning-Compact-Face-Representation-Packing-a-Face-into-an-int32.pdf
+35c973dba6e1225196566200cfafa150dd231fa8,http://pdfs.semanticscholar.org/8af7/72ea2389b555c0b193624add6a1c5a49ff24.pdf
+8f6263e4d3775757e804796e104631c7a2bb8679,http://pdfs.semanticscholar.org/8f62/63e4d3775757e804796e104631c7a2bb8679.pdf
+cadba72aa3e95d6dcf0acac828401ddda7ed8924,http://pdfs.semanticscholar.org/cadb/a72aa3e95d6dcf0acac828401ddda7ed8924.pdf
+44fb4dcf88eb482e2ab79fd4540caf941613b970,http://www.researchgate.net/profile/Masashi_Sugiyama/publication/220930547_Perceived_Age_Estimation_under_Lighting_Condition_Change_by_Covariate_Shift_Adaptation/links/0fcfd5122b4d406edd000000.pdf
+31d60b2af2c0e172c1a6a124718e99075818c408,http://pdfs.semanticscholar.org/31d6/0b2af2c0e172c1a6a124718e99075818c408.pdf
+2661f38aaa0ceb424c70a6258f7695c28b97238a,http://mplab.ucsd.edu/wordpress/wp-content/uploads/multilayer2012.pdf
+03104f9e0586e43611f648af1132064cadc5cc07,http://pdfs.semanticscholar.org/51c0/2f135d6c960b1141bde539059a279f9beb78.pdf
+4a0f98d7dbc31497106d4f652968c708f7da6692,http://arxiv.org/pdf/1605.05258v1.pdf
+08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7,http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf
+c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf
+a8638a07465fe388ae5da0e8a68e62a4ee322d68,http://pdfs.semanticscholar.org/a863/8a07465fe388ae5da0e8a68e62a4ee322d68.pdf
+b503f481120e69b62e076dcccf334ee50559451e,http://pdfs.semanticscholar.org/b503/f481120e69b62e076dcccf334ee50559451e.pdf
+47e8db3d9adb79a87c8c02b88f432f911eb45dc5,http://pdfs.semanticscholar.org/5f99/63990ab7dd888ab33393f712f8d5c1463348.pdf
+c0a8c0e6ccf9882969ba0eda0b898affa015437b,http://stanford.edu/~verroios/papers/waldo.pdf
+2dd6c988b279d89ab5fb5155baba65ce4ce53c1e,http://pdfs.semanticscholar.org/2dd6/c988b279d89ab5fb5155baba65ce4ce53c1e.pdf
+7c6dbaebfe14878f3aee400d1378d90d61373921,http://pdfs.semanticscholar.org/7c6d/baebfe14878f3aee400d1378d90d61373921.pdf
+4042bbb4e74e0934f4afbedbe92dd3e37336b2f4,http://pdfs.semanticscholar.org/b35a/6b2f335c28696eb78a02e0b30ee59a3e3fd2.pdf
+b8084d5e193633462e56f897f3d81b2832b72dff,http://pdfs.semanticscholar.org/b808/4d5e193633462e56f897f3d81b2832b72dff.pdf
+e6540d70e5ffeed9f447602ea3455c7f0b38113e,http://pdfs.semanticscholar.org/e654/0d70e5ffeed9f447602ea3455c7f0b38113e.pdf
+2d748f8ee023a5b1fbd50294d176981ded4ad4ee,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf
+81831ed8e5b304e9d28d2d8524d952b12b4cbf55,http://pdfs.semanticscholar.org/8183/1ed8e5b304e9d28d2d8524d952b12b4cbf55.pdf
+33ae696546eed070717192d393f75a1583cd8e2c,https://arxiv.org/pdf/1708.08508v2.pdf
+d4c7d1a7a03adb2338704d2be7467495f2eb6c7b,http://pdfs.semanticscholar.org/d4c7/d1a7a03adb2338704d2be7467495f2eb6c7b.pdf
+7a061e7eab865fc8d2ef00e029b7070719ad2e9a,http://cvrr.ucsd.edu/ece285/papers/from_WI13/Ramanan_IJCV2013.pdf
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf
+cb08f679f2cb29c7aa972d66fe9e9996c8dfae00,http://pdfs.semanticscholar.org/cb08/f679f2cb29c7aa972d66fe9e9996c8dfae00.pdf
+2e1fd8d57425b727fd850d7710d38194fa6e2654,http://www.cs.toronto.edu/~afsaneh/JamiesonEtAl2007.pdf
+b9cad920a00fc0e997fc24396872e03f13c0bb9c,http://www.ic.unicamp.br/~rocha/pub/papers/2011-icip-spoofing-detection.pdf
+2b8dfbd7cae8f412c6c943ab48c795514d53c4a7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p529-bordei.pdf
+5e0eb34aeb2b58000726540336771053ecd335fc,http://ies.anthropomatik.kit.edu/ies/download/publ/ies_2016_herrmann_low_quality.pdf
+523b2cbc48decfabffb66ecaeced4fe6a6f2ac78,https://arxiv.org/pdf/1708.09126v1.pdf
+90298f9f80ebe03cb8b158fd724551ad711d4e71,http://pdfs.semanticscholar.org/9029/8f9f80ebe03cb8b158fd724551ad711d4e71.pdf
+8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b,http://pdfs.semanticscholar.org/8ad0/d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b.pdf
+50614ff325f0c8ca20f99efc55d65a8d4cc768cd,http://www.genizah.org/professionalPapers/IJCinGeniza.pdf
+2edc6df161f6aadbef9c12408bdb367e72c3c967,http://www.infomus.org/Events/proceedings/ICMI2014/icmi/p514.pdf
+31ace8c9d0e4550a233b904a0e2aabefcc90b0e3,http://pdfs.semanticscholar.org/31ac/e8c9d0e4550a233b904a0e2aabefcc90b0e3.pdf
+370b5757a5379b15e30d619e4d3fb9e8e13f3256,http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf
+45f858f9e8d7713f60f52618e54089ba68dfcd6d,http://openaccess.thecvf.com/content_ICCV_2017/papers/Sigurdsson_What_Actions_Are_ICCV_2017_paper.pdf
+f9d1f12070e5267afc60828002137af949ff1544,http://pdfs.semanticscholar.org/f9d1/f12070e5267afc60828002137af949ff1544.pdf
+244b57cc4a00076efd5f913cc2833138087e1258,http://pdfs.semanticscholar.org/dfa8/d0afc548a8086902412fb0eae0fcf881ed8a.pdf
+3cd8ab6bb4b038454861a36d5396f4787a21cc68,http://pdfs.semanticscholar.org/3cd8/ab6bb4b038454861a36d5396f4787a21cc68.pdf
+094357c1a2ba3fda22aa6dd9e496530d784e1721,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_A_Unified_Probabilistic_2013_ICCV_paper.pdf
+4b6387e608afa83ac8d855de2c9b0ae3b86f31cc,http://www.researchgate.net/profile/Heng_Yang3/publication/263813517_Face_Sketch_Landmarks_Localization_in_the_Wild/links/53d3dd3b0cf220632f3ce8b3.pdf
+0cccf576050f493c8b8fec9ee0238277c0cfd69a,http://pdfs.semanticscholar.org/0ccc/f576050f493c8b8fec9ee0238277c0cfd69a.pdf
+2d23fa205acca9c21e3e1a04674f1e5a9528550e,http://pdfs.semanticscholar.org/2d23/fa205acca9c21e3e1a04674f1e5a9528550e.pdf
+3958db5769c927cfc2a9e4d1ee33ecfba86fe054,http://homes.cs.washington.edu/~neeraj/base/base/papers/nk_pami2011_faceattrs.pdf
+a51882cfd0706512bf50e12c0a7dd0775285030d,http://pdfs.semanticscholar.org/a518/82cfd0706512bf50e12c0a7dd0775285030d.pdf
+df5fe0c195eea34ddc8d80efedb25f1b9034d07d,http://www.andrew.cmu.edu/user/kseshadr/BTAS_2009_Paper_IEEE.pdf
+d44ca9e7690b88e813021e67b855d871cdb5022f,http://pdfs.semanticscholar.org/d44c/a9e7690b88e813021e67b855d871cdb5022f.pdf
+06526c52a999fdb0a9fd76e84f9795a69480cecf,http://pdfs.semanticscholar.org/0652/6c52a999fdb0a9fd76e84f9795a69480cecf.pdf
+9326d1390e8601e2efc3c4032152844483038f3f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Hsu_Landmark_Based_Facial_2014_CVPR_paper.pdf
+b69b239217d4e9a20fe4fe1417bf26c94ded9af9,http://pdfs.semanticscholar.org/b69b/239217d4e9a20fe4fe1417bf26c94ded9af9.pdf
+2a02355c1155f2d2e0cf7a8e197e0d0075437b19,http://pdfs.semanticscholar.org/cf2c/58a5efea263a878815e25148b1c6954a0cbe.pdf
+4a9d906935c9de019c61aedc10b77ee10e3aec63,http://openaccess.thecvf.com/content_cvpr_2016/papers/Gupta_Cross_Modal_Distillation_CVPR_2016_paper.pdf
+78436256ff8f2e448b28e854ebec5e8d8306cf21,http://pdfs.semanticscholar.org/7843/6256ff8f2e448b28e854ebec5e8d8306cf21.pdf
+4da735d2ed0deeb0cae4a9d4394449275e316df2,http://cvrr.ucsd.edu/publications/2016/0406.pdf
+4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8,http://pdfs.semanticscholar.org/4ab1/0174a4f98f7e2da7cf6ccfeb9bc64c8e7da8.pdf
+1b6394178dbc31d0867f0b44686d224a19d61cf4,http://pdfs.semanticscholar.org/ca8e/5419fd570f19643425b24da801283b706fc1.pdf
+d7d166aee5369b79ea2d71a6edd73b7599597aaa,http://pdfs.semanticscholar.org/d7d1/66aee5369b79ea2d71a6edd73b7599597aaa.pdf
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf
+156cd2a0e2c378e4c3649a1d046cd080d3338bca,http://pdfs.semanticscholar.org/156c/d2a0e2c378e4c3649a1d046cd080d3338bca.pdf
+541bccf19086755f8b5f57fd15177dc49e77d675,http://pdfs.semanticscholar.org/541b/ccf19086755f8b5f57fd15177dc49e77d675.pdf
+6c6bb85a08b0bdc50cf8f98408d790ccdb418798,http://pdfs.semanticscholar.org/6c6b/b85a08b0bdc50cf8f98408d790ccdb418798.pdf
+fcbf808bdf140442cddf0710defb2766c2d25c30,http://pdfs.semanticscholar.org/fcbf/808bdf140442cddf0710defb2766c2d25c30.pdf
+d78373de773c2271a10b89466fe1858c3cab677f,http://pdfs.semanticscholar.org/d783/73de773c2271a10b89466fe1858c3cab677f.pdf
+6aefe7460e1540438ffa63f7757c4750c844764d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Nascimento_Non-rigid_Segmentation_using_2014_CVPR_paper.pdf
+133900a0e7450979c9491951a5f1c2a403a180f0,http://rlair.cs.ucr.edu/papers/docs/socgroup.pdf
+6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Kim_Fusing_Aligned_and_CVPR_2016_paper.pdf
+2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3,http://pdfs.semanticscholar.org/ca31/53a726d8c212a7fd92f696c7e00a3ae3b31f.pdf
+5a4c6246758c522f68e75491eb65eafda375b701,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0001118.pdf
+841a5de1d71a0b51957d9be9d9bebed33fb5d9fa,http://mx.nthu.edu.tw/~tsunghan/papers/journal%20papers/TIP_PCANet.pdf
+288dbc40c027af002298b38954d648fddd4e2fd3,http://pdfs.semanticscholar.org/288d/bc40c027af002298b38954d648fddd4e2fd3.pdf
+0d746111135c2e7f91443869003d05cde3044beb,https://arxiv.org/pdf/1603.09364v1.pdf
+f2b13946d42a50fa36a2c6d20d28de2234aba3b4,http://npl.mcgill.ca/Papers/Adaptive%20Facial%20Expression%20Recognition%20Using%20Inter-modal%20top-down%20context.pdf
+085b5f9fd49432edab29e2c64f2a427fbce97f67,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects-actions-cvpr2015.pdf
+bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11,http://pdfs.semanticscholar.org/bf03/f0fe8f3ba5b118bdcbb935bacb62989ecb11.pdf
+8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152,http://pdfs.semanticscholar.org/8d02/43b8b663ca0ab7cbe613e3b886a5d1c8c152.pdf
+1a6c9ef99bf0ab9835a91fe5f1760d98a0606243,http://pdfs.semanticscholar.org/57ce/705f08ae7256b16eac2b8b40ae0c88d6cf23.pdf
+7ab930146f4b5946ec59459f8473c700bcc89233,http://pdfs.semanticscholar.org/7ab9/30146f4b5946ec59459f8473c700bcc89233.pdf
+6b17b219bd1a718b5cd63427032d93c603fcf24f,http://pdfs.semanticscholar.org/6b17/b219bd1a718b5cd63427032d93c603fcf24f.pdf
+28bc378a6b76142df8762cd3f80f737ca2b79208,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Vedaldi_Understanding_Objects_in_2014_CVPR_paper.pdf
+fba464cb8e3eff455fe80e8fb6d3547768efba2f,http://pdfs.semanticscholar.org/fba4/64cb8e3eff455fe80e8fb6d3547768efba2f.pdf
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,http://pdfs.semanticscholar.org/25bf/288b2d896f3c9dab7e7c3e9f9302e7d6806b.pdf
+3727ac3d50e31a394b200029b2c350073c1b69e3,http://arxiv.org/pdf/1605.03639v2.pdf
+8c81705e5e4a1e2068a5bd518adc6955d49ae434,http://pdfs.semanticscholar.org/8c81/705e5e4a1e2068a5bd518adc6955d49ae434.pdf
+82f8652c2059187b944ce65e87bacb6b765521f6,http://pdfs.semanticscholar.org/82f8/652c2059187b944ce65e87bacb6b765521f6.pdf
+37007af698b990a3ea8592b11d264b14d39c843f,http://acberg.com/papers/dcmsvm.pdf
+464de30d3310123644ab81a1f0adc51598586fd2,http://pdfs.semanticscholar.org/464d/e30d3310123644ab81a1f0adc51598586fd2.pdf
+0dbacb4fd069462841ebb26e1454b4d147cd8e98,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Nikitidis11c.pdf
+3b38c06caf54f301847db0dd622a6622c3843957,http://pdfs.semanticscholar.org/3b38/c06caf54f301847db0dd622a6622c3843957.pdf
+4223917177405eaa6bdedca061eb28f7b440ed8e,http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf
+3cc46bf79fb9225cf308815c7d41c8dd5625cc29,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf
+d1f58798db460996501f224fff6cceada08f59f9,http://pdfs.semanticscholar.org/d1f5/8798db460996501f224fff6cceada08f59f9.pdf
+f45d6a7bdb6741242da6192d18c97ac39e6308db,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2008%5D%5Bfg%5DPerson-Specific%20Face%20Recognition%20in%20Unconstrained%20Environments%20a%20Combination%20of%20Offline%20and%20Online%20Learning.pdf
+3393459600368be2c4c9878a3f65a57dcc0c2cfa,http://pdfs.semanticscholar.org/3393/459600368be2c4c9878a3f65a57dcc0c2cfa.pdf
+13fd0a4d06f30a665fc0f6938cea6572f3b496f7,http://pdfs.semanticscholar.org/13fd/0a4d06f30a665fc0f6938cea6572f3b496f7.pdf
+33792bb27ef392973e951ca5a5a3be4a22a0d0c6,http://plaza.ufl.edu/xsshi2015/paper_list/TPAMI2016.pdf
+f2e9494d0dca9fb6b274107032781d435a508de6,http://pdfs.semanticscholar.org/f2e9/494d0dca9fb6b274107032781d435a508de6.pdf
+d29eec5e047560627c16803029d2eb8a4e61da75,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,http://cs.nju.edu.cn/_upload/tpl/01/0b/267/template267/zhouzh.files/publication/aaai10LLD.pdf
+552c55c71bccfc6de7ce1343a1cd12208e9a63b3,https://ivi.fnwi.uva.nl/isis/publications/2008/ValentiCVPR2008/ValentiCVPR2008.pdf
+275b3cb7c780c663eabbf4d6c6cbc8fe24287c70,https://www.researchgate.net/profile/Bisan_Alsalibi/publication/280839254_The_Impact_of_Bio-Inspired_Approaches_Toward_the_Advancement_of_Face_Recognition/links/55c8ce4608aeca747d67062e.pdf?origin=publication_list
+88e2574af83db7281c2064e5194c7d5dfa649846,http://pdfs.semanticscholar.org/88e2/574af83db7281c2064e5194c7d5dfa649846.pdf
+46e866f58419ff4259c65e8256c1d4f14927b2c6,http://pdfs.semanticscholar.org/f03d/cfd956cf4404ec9f0c7fb451479d72a63e03.pdf
+5334ac0a6438483890d5eef64f6db93f44aacdf4,http://pdfs.semanticscholar.org/5334/ac0a6438483890d5eef64f6db93f44aacdf4.pdf
+7d53678ef6009a68009d62cd07c020706a2deac3,http://pdfs.semanticscholar.org/7d53/678ef6009a68009d62cd07c020706a2deac3.pdf
+23120f9b39e59bbac4438bf4a8a7889431ae8adb,http://pdfs.semanticscholar.org/2312/0f9b39e59bbac4438bf4a8a7889431ae8adb.pdf
+f5aee1529b98136194ef80961ba1a6de646645fe,http://pdfs.semanticscholar.org/f5ae/e1529b98136194ef80961ba1a6de646645fe.pdf
+919d3067bce76009ce07b070a13728f549ebba49,http://pdfs.semanticscholar.org/919d/3067bce76009ce07b070a13728f549ebba49.pdf
+071af21377cc76d5c05100a745fb13cb2e40500f,http://pdfs.semanticscholar.org/071a/f21377cc76d5c05100a745fb13cb2e40500f.pdf
+07d986b1005593eda1aeb3b1d24078db864f8f6a,http://pdfs.semanticscholar.org/07d9/86b1005593eda1aeb3b1d24078db864f8f6a.pdf
+63cf5fc2ee05eb9c6613043f585dba48c5561192,http://pdfs.semanticscholar.org/63cf/5fc2ee05eb9c6613043f585dba48c5561192.pdf
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,http://pdfs.semanticscholar.org/55eb/7ec9b9740f6c69d6e62062a24bfa091bbb0c.pdf
+2c8f24f859bbbc4193d4d83645ef467bcf25adc2,http://romisatriawahono.net/lecture/rm/survey/machine%20learning/Frenay%20-%20Classification%20in%20the%20Presence%20of%20Label%20Noise%20-%202014.pdf
+0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277,http://pdfs.semanticscholar.org/0fb8/317a8bf5feaf297af8e9b94c50c5ed0e8277.pdf
+6d8e3f3a83514381f890ab7cd2a1f1c5be597b69,http://pdfs.semanticscholar.org/aeb1/83983f4ae1ea9e01005f5d546480190e0345.pdf
+1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61,http://mplab.ucsd.edu/~marni/pubs/Bartlett_CVPR05.pdf
+2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924,http://pdfs.semanticscholar.org/2a6b/ba2e81d5fb3c0fd0e6b757cf50ba7bf8e924.pdf
+39ce2232452c0cd459e32a19c1abe2a2648d0c3f,http://pdfs.semanticscholar.org/4fac/61d638cf7a1ab995e2ee9a02d3672b12d2ca.pdf
+06f39834e870278243dda826658319be2d5d8ded,http://www.public.asu.edu/~bli24/Papers/ICIP2016_video.pdf
+cacd51221c592012bf2d9e4894178c1c1fa307ca,http://pdfs.semanticscholar.org/cacd/51221c592012bf2d9e4894178c1c1fa307ca.pdf
+a158c1e2993ac90a90326881dd5cb0996c20d4f3,http://pdfs.semanticscholar.org/a158/c1e2993ac90a90326881dd5cb0996c20d4f3.pdf
+e315959d6e806c8fbfc91f072c322fb26ce0862b,http://pdfs.semanticscholar.org/e315/959d6e806c8fbfc91f072c322fb26ce0862b.pdf
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,http://pdfs.semanticscholar.org/d5a4/c2757619a1f2c8d9a879e6f26f539a4a18f2.pdf
+3634b4dd263c0f330245c086ce646c9bb748cd6b,https://arxiv.org/pdf/1504.00983v2.pdf
+a2bd81be79edfa8dcfde79173b0a895682d62329,http://pdfs.semanticscholar.org/a2bd/81be79edfa8dcfde79173b0a895682d62329.pdf
+f0a3f12469fa55ad0d40c21212d18c02be0d1264,http://pdfs.semanticscholar.org/f0a3/f12469fa55ad0d40c21212d18c02be0d1264.pdf
+315a90543d60a5b6c5d1716fe9076736f0e90d24,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553721.pdf
+0209389b8369aaa2a08830ac3b2036d4901ba1f1,https://arxiv.org/pdf/1612.01202v2.pdf
+b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf
+72bf9c5787d7ff56a1697a3389f11d14654b4fcf,http://pdfs.semanticscholar.org/7910/a98a1fe9f4bec4c0dc4dc3476e9405b1930d.pdf
+047d7cf4301cae3d318468fe03a1c4ce43b086ed,http://webee.technion.ac.il/~yoav/publications/Delforge_taslp14R2.pdf
+ce5eac297174c17311ee28bda534faaa1d559bae,http://pdfs.semanticscholar.org/ce5e/ac297174c17311ee28bda534faaa1d559bae.pdf
+14d72dc9f78d65534c68c3ed57305f14bd4b5753,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yan_Exploiting_Multi-Grain_Ranking_ICCV_2017_paper.pdf
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,http://pdfs.semanticscholar.org/11aa/527c01e61ec3a7a67eef8d7ffe9d9ce63f1d.pdf
+2f04ba0f74df046b0080ca78e56898bd4847898b,https://arxiv.org/pdf/1407.4023v2.pdf
+09b0ef3248ff8f1a05b8704a1b4cf64951575be9,https://arxiv.org/pdf/1511.06783v1.pdf
+6e198f6cc4199e1c4173944e3df6f39a302cf787,http://pdfs.semanticscholar.org/6e19/8f6cc4199e1c4173944e3df6f39a302cf787.pdf
+4526992d4de4da2c5fae7a5ceaad6b65441adf9d,http://pdfs.semanticscholar.org/4526/992d4de4da2c5fae7a5ceaad6b65441adf9d.pdf
+1791f790b99471fc48b7e9ec361dc505955ea8b1,http://pdfs.semanticscholar.org/6fea/599d7b9fc72350d6e0947d3baaf44edc561b.pdf
+33695e0779e67c7722449e9a3e2e55fde64cfd99,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_064_ext.pdf
+5615d6045301ecbc5be35e46cab711f676aadf3a,https://arxiv.org/pdf/1705.10420v1.pdf
+ea2ee5c53747878f30f6d9c576fd09d388ab0e2b,http://pdfs.semanticscholar.org/ea2e/e5c53747878f30f6d9c576fd09d388ab0e2b.pdf
+716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0,http://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf
+237eba4822744a9eabb121fe7b50fd2057bf744c,http://pdfs.semanticscholar.org/ba2a/65bef17d9db7366fe8c1344ca918ba50b99a.pdf
+9c25e89c80b10919865b9c8c80aed98d223ca0c6,http://pdfs.semanticscholar.org/9c25/e89c80b10919865b9c8c80aed98d223ca0c6.pdf
+3b557c4fd6775afc80c2cf7c8b16edde125b270e,https://arxiv.org/pdf/1602.02999v1.pdf
+2f348a2ad3ba390ee178d400be0f09a0479ae17b,http://www.csee.wvu.edu/~richas/ML-Papers/Gabor-Based%20Kernel%20PCA.pdf
+86f8e6310d114bb24deb971e8bc7089df6ac3b57,http://ftp.ncbi.nlm.nih.gov/pub/pmc/84/69/40101_2015_Article_46.PMC4350291.pdf
+36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958,http://www.iab-rubric.org/papers/RGBD-Face.pdf
+81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f,http://pdfs.semanticscholar.org/8169/5fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f.pdf
+0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a,http://www.openu.ac.il/home/hassner/projects/LATCH/LATCH.pdf
+19296e129c70b332a8c0a67af8990f2f4d4f44d1,http://lear.inrialpes.fr/pubs/2009/GVS09/supplmat.pdf
+66810438bfb52367e3f6f62c24f5bc127cf92e56,http://pdfs.semanticscholar.org/6681/0438bfb52367e3f6f62c24f5bc127cf92e56.pdf
+50eb75dfece76ed9119ec543e04386dfc95dfd13,https://lirias.kuleuven.be/bitstream/123456789/197359/1/boiy-learningVisual.pdf
+82cd5a5fec8a27887a35f1ecec684ec55eefad73,http://www.researchgate.net/profile/Giuseppe_Boccignone/publication/265793480_Using_Sparse_Coding_for_Landmark_Localization_in_Facial_Expressions/links/541bf80b0cf241a65a0ba53a.pdf
+600025c9a13ff09c6d8b606a286a79c823d89db8,http://pdfs.semanticscholar.org/6000/25c9a13ff09c6d8b606a286a79c823d89db8.pdf
+d30050cfd16b29e43ed2024ae74787ac0bbcf2f7,http://pdfs.semanticscholar.org/d300/50cfd16b29e43ed2024ae74787ac0bbcf2f7.pdf
+d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e,http://pdfs.semanticscholar.org/d5ab/6aa15dad26a6ace5ab83ce62b7467a18a88e.pdf
+ada42b99f882ba69d70fff68c9ccbaff642d5189,http://pdfs.semanticscholar.org/ba11/4dfdd12b0f4323a8f28cd2bd770dfa74673e.pdf
+20e505cef6d40f896e9508e623bfc01aa1ec3120,http://pdfs.semanticscholar.org/20e5/05cef6d40f896e9508e623bfc01aa1ec3120.pdf
+2e1b1969ded4d63b69a5ec854350c0f74dc4de36,http://pdfs.semanticscholar.org/2e1b/1969ded4d63b69a5ec854350c0f74dc4de36.pdf
+03bd58a96f635059d4bf1a3c0755213a51478f12,https://arxiv.org/pdf/1401.7413v2.pdf
+ebf204e0a3e137b6c24e271b0d55fa49a6c52b41,http://pdfs.semanticscholar.org/ebf2/04e0a3e137b6c24e271b0d55fa49a6c52b41.pdf
+445461a34adc4bcdccac2e3c374f5921c93750f8,https://arxiv.org/pdf/1306.1913v1.pdf
+fc1e37fb16006b62848def92a51434fc74a2431a,http://pdfs.semanticscholar.org/fc1e/37fb16006b62848def92a51434fc74a2431a.pdf
+339937141ffb547af8e746718fbf2365cc1570c8,http://pdfs.semanticscholar.org/3399/37141ffb547af8e746718fbf2365cc1570c8.pdf
+955e2a39f51c0b6f967199942d77625009e580f9,http://pdfs.semanticscholar.org/955e/2a39f51c0b6f967199942d77625009e580f9.pdf
+4d6462fb78db88afff44561d06dd52227190689c,http://pdfs.semanticscholar.org/4d64/62fb78db88afff44561d06dd52227190689c.pdf
+014143aa16604ec3f334c1407ceaa496d2ed726e,http://www.cs.cmu.edu/~har/cvpr2008-manifold.pdf
+392425be1c9d9c2ee6da45de9df7bef0d278e85f,http://pdfs.semanticscholar.org/3924/25be1c9d9c2ee6da45de9df7bef0d278e85f.pdf
+3abc833f4d689f37cc8a28f47fb42e32deaa4b17,http://www.cs.virginia.edu/~vicente/files/ijcv_bigdata.pdf
+5160569ca88171d5fa257582d161e9063c8f898d,http://infoscience.epfl.ch/record/83324/files/heusch-AFGR-2006.pdf
+1b60b8e70859d5c85ac90510b370b501c5728620,http://pdfs.semanticscholar.org/1b60/b8e70859d5c85ac90510b370b501c5728620.pdf
+5b0ebb8430a04d9259b321fc3c1cc1090b8e600e,http://www.openu.ac.il/home/hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf
+026a9cfe3135b7b62279bc08e2fb97e0e9fad5c4,http://perso.telecom-paristech.fr/~sahbi/jstars2017.pdf
+4f0d9200647042e41dea71c35eb59e598e6018a7,http://pdfs.semanticscholar.org/4f0d/9200647042e41dea71c35eb59e598e6018a7.pdf
+feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc,http://pdfs.semanticscholar.org/feeb/0fd0e254f38b38fe5c1022e84aa43d63f7cc.pdf
+371f40f6d32ece05cc879b6954db408b3d4edaf3,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_100_ext.pdf
+14bca107bb25c4dce89210049bf39ecd55f18568,http://pdfs.semanticscholar.org/6f56/b0fada68f36d78cf20148fd13de8bce8a93d.pdf
+348a16b10d140861ece327886b85d96cce95711e,http://pdfs.semanticscholar.org/348a/16b10d140861ece327886b85d96cce95711e.pdf
+83ca4cca9b28ae58f461b5a192e08dffdc1c76f3,http://infoscience.epfl.ch/record/200407/files/icip1024-cam-ready.pdf
+612075999e82596f3b42a80e6996712cc52880a3,https://www.etsmtl.ca/Unites-de-recherche/LIVIA/Recherche-et-innovation/Publications/Publications-2017/PID4875389.pdf
+b6c047ab10dd86b1443b088029ffe05d79bbe257,http://pdfs.semanticscholar.org/b6c0/47ab10dd86b1443b088029ffe05d79bbe257.pdf
+5666ed763698295e41564efda627767ee55cc943,http://i.cs.hku.hk/~kykwong/publications/zkuang_ijcv15.pdf
+69063f7e0a60ad6ce16a877bc8f11b59e5f7348e,http://openaccess.thecvf.com/content_iccv_2015/papers/Anwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.pdf
+2b84630680e2c906f8d7ac528e2eb32c99ef203a,http://disi.unitn.it/~zen/data/acmmm14_zen3_orlando.pdf
+804b4c1b553d9d7bae70d55bf8767c603c1a09e3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001831.pdf
+f4ebbeb77249d1136c355f5bae30f02961b9a359,http://pdfs.semanticscholar.org/f4eb/beb77249d1136c355f5bae30f02961b9a359.pdf
+985cd420c00d2f53965faf63358e8c13d1951fa8,http://pdfs.semanticscholar.org/985c/d420c00d2f53965faf63358e8c13d1951fa8.pdf
+74ba4ab407b90592ffdf884a20e10006d2223015,http://pdfs.semanticscholar.org/74ba/4ab407b90592ffdf884a20e10006d2223015.pdf
+3852968082a16db8be19b4cb04fb44820ae823d4,https://infoscience.epfl.ch/record/230240/files/1701.01821.pdf
+1a8ccc23ed73db64748e31c61c69fe23c48a2bb1,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Zhou_Extensive_Facial_Landmark_2013_ICCV_paper.pdf
+70e79d7b64f5540d309465620b0dab19d9520df1,http://pdfs.semanticscholar.org/70e7/9d7b64f5540d309465620b0dab19d9520df1.pdf
+7002d6fc3e0453320da5c863a70dbb598415e7aa,http://www.cris.ucr.edu/IGERT/papers/SongfanAbstract.pdf
+334e65b31ad51b1c1f84ce12ef235096395f1ca7,http://pdfs.semanticscholar.org/334e/65b31ad51b1c1f84ce12ef235096395f1ca7.pdf
+1db23a0547700ca233aef9cfae2081cd8c5a04d7,http://pdfs.semanticscholar.org/1db2/3a0547700ca233aef9cfae2081cd8c5a04d7.pdf
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,http://pdfs.semanticscholar.org/a6b1/d79bc334c74cde199e26a7ef4c189e9acd46.pdf
+b8caf1b1bc3d7a26a91574b493c502d2128791f6,http://pdfs.semanticscholar.org/b8ca/f1b1bc3d7a26a91574b493c502d2128791f6.pdf
+77037a22c9b8169930d74d2ce6f50f1a999c1221,https://ueaeprints.uea.ac.uk/64308/1/Accepted_manuscript.pdf
+8d2c0c9155a1ed49ba576ac0446ec67725468d87,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20CONFERENCE%20PAPERS/A%20Study%20of%20Two%20Image%20Representations%20for%20Head%20Pose%20Estimation.pdf
+7dcd3f58aa75f7ae96fdac9b1c2332a4f0b2dbd3,https://www.researchgate.net/profile/Symeon_Nikitidis/publication/221122322_Facial_expression_recognition_using_clustering_discriminant_Non-negative_Matrix_Factorization/links/54fee98e0cf2eaf210b4506c.pdf
+47aeb3b82f54b5ae8142b4bdda7b614433e69b9a,http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf
+0a82860d11fcbf12628724333f1e7ada8f3cd255,http://pdfs.semanticscholar.org/0a82/860d11fcbf12628724333f1e7ada8f3cd255.pdf
+d79f9ada35e4410cd255db39d7cc557017f8111a,http://pdfs.semanticscholar.org/d79f/9ada35e4410cd255db39d7cc557017f8111a.pdf
+9887ab220254859ffc7354d5189083a87c9bca6e,http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf
+2609079d682998da2bc4315b55a29bafe4df414e,http://www.iab-rubric.org/papers/ICIP-13-camready.pdf
+08e995c080a566fe59884a527b72e13844b6f176,http://pdfs.semanticscholar.org/08e9/95c080a566fe59884a527b72e13844b6f176.pdf
+47d4838087a7ac2b995f3c5eba02ecdd2c28ba14,http://pdfs.semanticscholar.org/b2b5/35118c5c4dfcc96f547274cdc05dde629976.pdf
+1c93b48abdd3ef1021599095a1a5ab5e0e020dd5,http://www.stat.ucla.edu/~sczhu/papers/PAMI_FaceAging.pdf
+1d97735bb0f0434dde552a96e1844b064af08f62,http://www.apsipa.org/proceedings_2015/pdf/290.pdf
+56e03f8fcd16332f764352ba6e72c9c5092cac0f,http://www.cs.utexas.edu/~ssi/DHE.pdf
+2ad29b2921aba7738c51d9025b342a0ec770c6ea,http://arxiv.org/pdf/1510.02781v1.pdf
+677477e6d2ba5b99633aee3d60e77026fb0b9306,http://pdfs.semanticscholar.org/d105/b9b31106495f58fb951cfdbf64787ee89ab2.pdf
+1c30bb689a40a895bd089e55e0cad746e343d1e2,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.pdf
+adc4bc7639d5f1c5ead8728882e2390339d061ed,https://www.researchgate.net/profile/Fanbo_Meng2/publication/224144294_Emotional_Audio-Visual_Speech_Synthesis_Based_on_PAD/links/00b49538fd61d3280d000000.pdf?origin=publication_list
+51dc127f29d1bb076d97f515dca4cc42dda3d25b,http://pdfs.semanticscholar.org/7a1d/4a9ef5944217ee19aa642471b4746aaa2576.pdf
+91e57667b6fad7a996b24367119f4b22b6892eca,http://pdfs.semanticscholar.org/91e5/7667b6fad7a996b24367119f4b22b6892eca.pdf
+0235b2d2ae306b7755483ac4f564044f46387648,http://pdfs.semanticscholar.org/0235/b2d2ae306b7755483ac4f564044f46387648.pdf
+264a84f4d27cd4bca94270620907cffcb889075c,https://arxiv.org/pdf/1612.06615v1.pdf
+467b602a67cfd7c347fe7ce74c02b38c4bb1f332,http://pdfs.semanticscholar.org/467b/602a67cfd7c347fe7ce74c02b38c4bb1f332.pdf
+169076ffe5e7a2310e98087ef7da25aceb12b62d,http://pdfs.semanticscholar.org/1690/76ffe5e7a2310e98087ef7da25aceb12b62d.pdf
+232b6e2391c064d483546b9ee3aafe0ba48ca519,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_iccv2013.pdf
+1451e7b11e66c86104f9391b80d9fb422fb11c01,http://pdfs.semanticscholar.org/1451/e7b11e66c86104f9391b80d9fb422fb11c01.pdf
+2e8eb9dc07deb5142a99bc861e0b6295574d1fbd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Hejrati_Analysis_by_Synthesis_2014_CVPR_paper.pdf
+a75ee7f4c4130ef36d21582d5758f953dba03a01,http://pdfs.semanticscholar.org/a75e/e7f4c4130ef36d21582d5758f953dba03a01.pdf
+5d485501f9c2030ab33f97972aa7585d3a0d59a7,http://pdfs.semanticscholar.org/5d48/5501f9c2030ab33f97972aa7585d3a0d59a7.pdf
+49e1aa3ecda55465641b2c2acc6583b32f3f1fc6,http://pdfs.semanticscholar.org/49e1/aa3ecda55465641b2c2acc6583b32f3f1fc6.pdf
+0059b3dfc7056f26de1eabaafd1ad542e34c2c2e,http://pdfs.semanticscholar.org/0059/b3dfc7056f26de1eabaafd1ad542e34c2c2e.pdf
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,http://pdfs.semanticscholar.org/cf5c/c511c7fd556aaf113de02fc88d7ba10928b0.pdf
+0ad8149318912b5449085187eb3521786a37bc78,http://arxiv.org/pdf/1604.02975v1.pdf
+9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c,https://people.csail.mit.edu/khosla/papers/www2014_khosla.pdf
+b1a3b19700b8738b4510eecf78a35ff38406df22,http://pdfs.semanticscholar.org/b1a3/b19700b8738b4510eecf78a35ff38406df22.pdf
+8bfada57140aa1aa22a575e960c2a71140083293,http://pdfs.semanticscholar.org/8bfa/da57140aa1aa22a575e960c2a71140083293.pdf
+46c87fded035c97f35bb991fdec45634d15f9df2,https://arxiv.org/pdf/1707.09145v1.pdf
+f740bac1484f2f2c70777db6d2a11cf4280081d6,http://pdfs.semanticscholar.org/f740/bac1484f2f2c70777db6d2a11cf4280081d6.pdf
+e27c92255d7ccd1860b5fb71c5b1277c1648ed1e,http://pdfs.semanticscholar.org/e27c/92255d7ccd1860b5fb71c5b1277c1648ed1e.pdf
+f6abecc1f48f6ec6eede4143af33cc936f14d0d0,http://pdfs.semanticscholar.org/f6ab/ecc1f48f6ec6eede4143af33cc936f14d0d0.pdf
+df80fed59ffdf751a20af317f265848fe6bfb9c9,http://ivg.au.tsinghua.edu.cn/paper/2017_Learning%20deep%20sharable%20and%20structural%20detectors%20for%20face%20alignment.pdf
+2e0e056ed5927a4dc6e5c633715beb762628aeb0,http://pdfs.semanticscholar.org/2e0e/056ed5927a4dc6e5c633715beb762628aeb0.pdf
+10e0e6f1ec00b20bc78a5453a00c792f1334b016,http://pdfs.semanticscholar.org/672f/ae3da801b2a0d2bad65afdbbbf1b2320623e.pdf
+11a2ef92b6238055cf3f6dcac0ff49b7b803aee3,http://cs.adelaide.edu.au/~carneiro/publications/mainSPL.pdf
+0559fb9f5e8627fecc026c8ee6f7ad30e54ee929,http://pdfs.semanticscholar.org/0559/fb9f5e8627fecc026c8ee6f7ad30e54ee929.pdf
+a2d04db895dd17f2a8291b300a63604842c06d09,http://www4.comp.polyu.edu.hk/~csdct/Publications/2006/TCSVT.pdf
+f9e0209dc9e72d64b290d0622c1c1662aa2cc771,http://pdfs.semanticscholar.org/f9e0/209dc9e72d64b290d0622c1c1662aa2cc771.pdf
+03701e66eda54d5ab1dc36a3a6d165389be0ce79,http://www.eem.anadolu.edu.tr/atalaybarkan/EEM%20405%20(K)/icerik/improved%20pcr.pdf
+871f5f1114949e3ddb1bca0982086cc806ce84a8,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01169.pdf
+3463f12ad434d256cd5f94c1c1bfd2dd6df36947,http://pdfs.semanticscholar.org/3463/f12ad434d256cd5f94c1c1bfd2dd6df36947.pdf
+5ebb247963d2d898d420f1f4a2486102a9d05aa9,http://bcmi.sjtu.edu.cn/~zhzhang/papers/nncw.pdf
+72a5e181ee8f71b0b153369963ff9bfec1c6b5b0,http://pdfs.semanticscholar.org/72a5/e181ee8f71b0b153369963ff9bfec1c6b5b0.pdf
+3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f,http://pdfs.semanticscholar.org/3cb0/ef5aabc7eb4dd8d32a129cb12b3081ef264f.pdf
+80c8d143e7f61761f39baec5b6dfb8faeb814be9,http://pdfs.semanticscholar.org/80c8/d143e7f61761f39baec5b6dfb8faeb814be9.pdf
+5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65,http://pdfs.semanticscholar.org/5b9d/9f5a59c48bc8dd409a1bd5abf1d642463d65.pdf
+f26097a1a479fb6f32b27a93f8f32609cfe30fdc,http://pdfs.semanticscholar.org/f260/97a1a479fb6f32b27a93f8f32609cfe30fdc.pdf
+95f12d27c3b4914e0668a268360948bce92f7db3,http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf
+ce691a37060944c136d2795e10ed7ba751cd8394,http://pdfs.semanticscholar.org/ce69/1a37060944c136d2795e10ed7ba751cd8394.pdf
+47382cb7f501188a81bb2e10cfd7aed20285f376,http://pdfs.semanticscholar.org/4738/2cb7f501188a81bb2e10cfd7aed20285f376.pdf
+283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43,http://pdfs.semanticscholar.org/283d/226e346ac3e7685dd9a4ba8ae55ee4f2fe43.pdf
+c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5,http://pdfs.semanticscholar.org/c0cd/aeccff78f49f4604a6d263dc6eb1bb8707d5.pdf
+19dd371e1649ab55a46f4b98890d6937a411ec5d,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2011_11_17_DagliC_HST_FP.pdf
+28cd46a078e8fad370b1aba34762a874374513a5,http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf
+c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad,http://pdfs.semanticscholar.org/c4dc/f41506c23aa45c33a0a5e51b5b9f8990e8ad.pdf
+3b9c08381282e65649cd87dfae6a01fe6abea79b,http://pdfs.semanticscholar.org/3b9c/08381282e65649cd87dfae6a01fe6abea79b.pdf
+c88ce5ef33d5e544224ab50162d9883ff6429aa3,http://pdfs.semanticscholar.org/c88c/e5ef33d5e544224ab50162d9883ff6429aa3.pdf
+e171fba00d88710e78e181c3e807c2fdffc6798a,http://pdfs.semanticscholar.org/e171/fba00d88710e78e181c3e807c2fdffc6798a.pdf
+27aadf6e7441bf40675874df1cf4bb7e2dffdd9e,http://www1.icsi.berkeley.edu/~farrell/birdlets/iccv11-camera-ready.pdf
+c71f36c9376d444075de15b1102b4974481be84d,http://pdfs.semanticscholar.org/c71f/36c9376d444075de15b1102b4974481be84d.pdf
+36ce0b68a01b4c96af6ad8c26e55e5a30446f360,http://liris.cnrs.fr/Documents/Liris-6963.pdf
+25c19d8c85462b3b0926820ee5a92fc55b81c35a,http://www.brl.ntt.co.jp/people/kumano/papers/Kumano.IJCV2009.pdf
+1dacc2f4890431d867a038fd81c111d639cf4d7e,http://pdfs.semanticscholar.org/1dac/c2f4890431d867a038fd81c111d639cf4d7e.pdf
+2bf08d4cb8d1201a9866ee7c4852bfcbf8f8e7f1,http://mplab.ucsd.edu/~jake/haar.pdf
+0ba0f000baf877bc00a9e144b88fa6d373db2708,http://pdfs.semanticscholar.org/0ba0/f000baf877bc00a9e144b88fa6d373db2708.pdf
+b56530be665b0e65933adec4cc5ed05840c37fc4,http://kobus.ca/research/publications/07/cvpr-07-region-www.pdf
+c043f8924717a3023a869777d4c9bee33e607fb5,http://pdfs.semanticscholar.org/c043/f8924717a3023a869777d4c9bee33e607fb5.pdf
+60b3601d70f5cdcfef9934b24bcb3cc4dde663e7,http://pdfs.semanticscholar.org/60b3/601d70f5cdcfef9934b24bcb3cc4dde663e7.pdf
+d394bd9fbaad1f421df8a49347d4b3fca307db83,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_AVSS05.pdf
+aefc7c708269b874182a5c877fb6dae06da210d4,http://pdfs.semanticscholar.org/f6f4/60d4a4a5b4c077ab3ac7a972f52af17a4241.pdf
+683ec608442617d11200cfbcd816e86ce9ec0899,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Dual_Linear_Regression_2014_CVPR_paper.pdf
+60643bdab1c6261576e6610ea64ea0c0b200a28d,http://pdfs.semanticscholar.org/6064/3bdab1c6261576e6610ea64ea0c0b200a28d.pdf
+5394d42fd27b7e14bd875ec71f31fdd2fcc8f923,http://pdfs.semanticscholar.org/5394/d42fd27b7e14bd875ec71f31fdd2fcc8f923.pdf
+0861f86fb65aa915fbfbe918b28aabf31ffba364,http://pdfs.semanticscholar.org/0861/f86fb65aa915fbfbe918b28aabf31ffba364.pdf
+80be8624771104ff4838dcba9629bacfe6b3ea09,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf
+bcfeac1e5c31d83f1ed92a0783501244dde5a471,http://pdfs.semanticscholar.org/bcfe/ac1e5c31d83f1ed92a0783501244dde5a471.pdf
+09ce14b84af2dc2f76ae1cf227356fa0ba337d07,http://grail.cs.washington.edu/3dfaces/paper.pdf
+1467c4ab821c3b340abe05a1b13a19318ebbce98,http://pdfs.semanticscholar.org/1467/c4ab821c3b340abe05a1b13a19318ebbce98.pdf
+38bbca5f94d4494494860c5fe8ca8862dcf9676e,http://pdfs.semanticscholar.org/c322/b770d2c7d9e70d196577bf0ae6b05205ebd7.pdf
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,http://pdfs.semanticscholar.org/3b37/d95d2855c8db64bd6b1ee5659f87fce36881.pdf
+74618fb4ce8ce0209db85cc6069fe64b1f268ff4,https://ir.canterbury.ac.nz/bitstream/handle/10092/6229/12636740_Y10_ICCSIT.pdf?isAllowed=y&sequence=1
+2ea78e128bec30fb1a623c55ad5d55bb99190bd2,http://pdfs.semanticscholar.org/2ea7/8e128bec30fb1a623c55ad5d55bb99190bd2.pdf
+167f07b9d2babb8920acfa320ab04ee2758b5db6,http://eprints.pascal-network.org/archive/00008391/01/paper_express.pdf
+4dd71a097e6b3cd379d8c802460667ee0cbc8463,http://www.dgcv.nii.ac.jp/Publications/Papers/2015/BWILD2015.pdf
+0c4659b35ec2518914da924e692deb37e96d6206,https://cs.uwaterloo.ca/~jhoey/teaching/cs793/papers/OrchardTIP10.pdf
+66886997988358847615375ba7d6e9eb0f1bb27f,https://pdfs.semanticscholar.org/6688/6997988358847615375ba7d6e9eb0f1bb27f.pdf
+3d36f941d8ec613bb25e80fb8f4c160c1a2848df,https://arxiv.org/pdf/1502.02410v1.pdf
+24cb375a998f4af278998f8dee1d33603057e525,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_016_ext.pdf
+0d0b880e2b531c45ee8227166a489bf35a528cb9,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Structure_Preserving_Object_2013_CVPR_paper.pdf
+1a85956154c170daf7f15f32f29281269028ff69,http://ibug.doc.ic.ac.uk/media/uploads/documents/active_pictorial_structures.pdf
+aac101dd321e6d2199d8c0b48c543b541c181b66,http://pdfs.semanticscholar.org/aac1/01dd321e6d2199d8c0b48c543b541c181b66.pdf
+60824ee635777b4ee30fcc2485ef1e103b8e7af9,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-TIP-2015.pdf
+bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf
+6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf,http://arxiv.org/pdf/1512.05300v3.pdf
+a32d4195f7752a715469ad99cb1e6ebc1a099de6,http://pdfs.semanticscholar.org/a32d/4195f7752a715469ad99cb1e6ebc1a099de6.pdf
+8796f2d54afb0e5c924101f54d469a1d54d5775d,http://pdfs.semanticscholar.org/8796/f2d54afb0e5c924101f54d469a1d54d5775d.pdf
+f519723238701849f1160d5a9cedebd31017da89,http://pdfs.semanticscholar.org/f519/723238701849f1160d5a9cedebd31017da89.pdf
+5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a,http://pdfs.semanticscholar.org/5fa0/e6da81acece7026ac1bc6dcdbd8b204a5f0a.pdf
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,http://www.cs.colostate.edu/~vision/pasc/docs/fg2015videoEvalPreprint.pdf
+d122d66c51606a8157a461b9d7eb8b6af3d819b0,http://pdfs.semanticscholar.org/d122/d66c51606a8157a461b9d7eb8b6af3d819b0.pdf
+0a3863a0915256082aee613ba6dab6ede962cdcd,http://pdfs.semanticscholar.org/0a38/63a0915256082aee613ba6dab6ede962cdcd.pdf
+5bfc32d9457f43d2488583167af4f3175fdcdc03,http://pdfs.semanticscholar.org/5bfc/32d9457f43d2488583167af4f3175fdcdc03.pdf
+03167776e17bde31b50f294403f97ee068515578,http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf
+60040e4eae81ab6974ce12f1c789e0c05be00303,http://pdfs.semanticscholar.org/6004/0e4eae81ab6974ce12f1c789e0c05be00303.pdf
+b9cedd09bdae827dacb138d6b054449d5346caf1,http://www.cs.colostate.edu/~lui/Papers/BTAS09LUIa.pdf
+94ac3008bf6be6be6b0f5140a0bea738d4c75579,http://pdfs.semanticscholar.org/94ac/3008bf6be6be6b0f5140a0bea738d4c75579.pdf
+ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7,http://pdfs.semanticscholar.org/ade1/034d5daec9e3eba1d39ae3f33ebbe3e8e9a7.pdf
+3830047081ef4bc787f16edf5b244cb2793f75e5,https://www.cs.drexel.edu/~kon/publication/GSchwartz_CPCV13_slides.pdf
+09750c9bbb074bbc4eb66586b20822d1812cdb20,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001385.pdf
+05d80c59c6fcc4652cfc38ed63d4c13e2211d944,http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en/us/pubs/archive/35389.pdf
+2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd,http://research.microsoft.com/users/byzhang/publications/20-81_01.pdf
+bec31269632c17206deb90cd74367d1e6586f75f,http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf
+9825aa96f204c335ec23c2b872855ce0c98f9046,http://pdfs.semanticscholar.org/9825/aa96f204c335ec23c2b872855ce0c98f9046.pdf
+91067f298e1ece33c47df65236853704f6700a0b,http://pdfs.semanticscholar.org/9106/7f298e1ece33c47df65236853704f6700a0b.pdf
+6581c5b17db7006f4cc3575d04bfc6546854a785,http://pdfs.semanticscholar.org/6581/c5b17db7006f4cc3575d04bfc6546854a785.pdf
+25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8,http://arxiv.org/pdf/1408.6027v2.pdf
+ec0104286c96707f57df26b4f0a4f49b774c486b,http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf
+4d90bab42806d082e3d8729067122a35bbc15e8d,http://pdfs.semanticscholar.org/4d90/bab42806d082e3d8729067122a35bbc15e8d.pdf
+564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d,http://bcmi.sjtu.edu.cn/~pengyong/Pub2015/CCECE2015.pdf
+26ac607a101492bc86fd81a141311066cfe9e2b5,http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf
+4ed2d7ecb34a13e12474f75d803547ad2ad811b2,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yang_Common_Action_Discovery_ICCV_2017_paper.pdf
+b656abc4d1e9c8dc699906b70d6fcd609fae8182,http://pdfs.semanticscholar.org/b656/abc4d1e9c8dc699906b70d6fcd609fae8182.pdf
+2eb37a3f362cffdcf5882a94a20a1212dfed25d9,http://pdfs.semanticscholar.org/2eb3/7a3f362cffdcf5882a94a20a1212dfed25d9.pdf
+433bb1eaa3751519c2e5f17f47f8532322abbe6d,http://pdfs.semanticscholar.org/433b/b1eaa3751519c2e5f17f47f8532322abbe6d.pdf
+a46283e90bcdc0ee35c680411942c90df130f448,http://pdfs.semanticscholar.org/a462/83e90bcdc0ee35c680411942c90df130f448.pdf
+0573f3d2754df3a717368a6cbcd940e105d67f0b,http://cs.anu.edu.au/few/EmotiW_icmi_draft_ver_1_0.pdf
+4b3dd18882ff2738aa867b60febd2b35ab34dffc,http://pdfs.semanticscholar.org/4b3d/d18882ff2738aa867b60febd2b35ab34dffc.pdf
+d794ffece3533567d838f1bd7f442afee13148fd,http://pdfs.semanticscholar.org/d794/ffece3533567d838f1bd7f442afee13148fd.pdf
+16892074764386b74b6040fe8d6946b67a246a0b,http://pdfs.semanticscholar.org/5f92/7118a5634790fe660fea91aea163b7065ae2.pdf
+9e42d44c07fbd800f830b4e83d81bdb9d106ed6b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Rao_Learning_Discriminative_Aggregation_ICCV_2017_paper.pdf
+96578785836d7416bf2e9c154f687eed8f93b1e4,http://pdfs.semanticscholar.org/9657/8785836d7416bf2e9c154f687eed8f93b1e4.pdf
+86b105c3619a433b6f9632adcf9b253ff98aee87,http://www.cecs.uci.edu/~papers/icme06/pdfs/0001013.pdf
+5b86c36e3eb59c347b81125d5dd57dd2a2c377a9,http://pdfs.semanticscholar.org/5b86/c36e3eb59c347b81125d5dd57dd2a2c377a9.pdf
+a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa,http://pdfs.semanticscholar.org/d965/50536f2ff505f62aec841b3656d940e7f1cf.pdf
+217a21d60bb777d15cd9328970cab563d70b5d23,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf
+8d6c4af9d4c01ff47fe0be48155174158a9a5e08,http://pdfs.semanticscholar.org/8d6c/4af9d4c01ff47fe0be48155174158a9a5e08.pdf
+4c815f367213cc0fb8c61773cd04a5ca8be2c959,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0002470.pdf
+1c80bc91c74d4984e6422e7b0856cf3cf28df1fb,http://refbase.cvc.uab.es/files/xrv2014d.pdf
+486840f4f524e97f692a7f6b42cd19019ee71533,https://arxiv.org/pdf/1703.08388v2.pdf
+9e5c2d85a1caed701b68ddf6f239f3ff941bb707,http://pdfs.semanticscholar.org/ada4/4aa744f9703cacfcd0028372a2b1684a45a3.pdf
+19af008599fb17bbd9b12288c44f310881df951c,http://pdfs.semanticscholar.org/19af/008599fb17bbd9b12288c44f310881df951c.pdf
+8f60c343f76913c509ce623467bf086935bcadac,http://pdfs.semanticscholar.org/8f60/c343f76913c509ce623467bf086935bcadac.pdf
+ac1d97a465b7cc56204af5f2df0d54f819eef8a6,http://pdfs.semanticscholar.org/ac1d/97a465b7cc56204af5f2df0d54f819eef8a6.pdf
+a820941eaf03077d68536732a4d5f28d94b5864a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf
+abac0fa75281c9a0690bf67586280ed145682422,http://pdfs.semanticscholar.org/abac/0fa75281c9a0690bf67586280ed145682422.pdf
+4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b,http://pdfs.semanticscholar.org/4e7e/bf3c4c0c4ecc48348a769dd6ae1ebac3bf1b.pdf
+8bfec7afcf5015017406fc04c43c1f43eb723631,http://www.umiacs.umd.edu/users/pvishalm/Journal_pub/DCS_TAC_2013.pdf
+448ed201f6fceaa6533d88b0b29da3f36235e131,http://pdfs.semanticscholar.org/aa6a/0b92c60187c7fa9923b1c8433ec99a495df7.pdf
+20be15dac7d8a5ba4688bf206ad24cab57d532d6,http://pdfs.semanticscholar.org/20be/15dac7d8a5ba4688bf206ad24cab57d532d6.pdf
+afc7092987f0d05f5685e9332d83c4b27612f964,http://ci2cv.net/media/papers/2011_AFGR_Chew.pdf
+c17a332e59f03b77921942d487b4b102b1ee73b6,http://pdfs.semanticscholar.org/c17a/332e59f03b77921942d487b4b102b1ee73b6.pdf
+c5f1ae9f46dc44624591db3d5e9f90a6a8391111,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu_ICPR_2004.pdf
+1a96d54c326d19e32bed00642a177ea439341fa2,http://vc.cs.nthu.edu.tw/home/paper/codfiles/tychiu/200808151557/Principal_Component_Analysis_Based_on_L1-Norm_Maximization.pdf
+a949b8700ca6ba96ee40f75dfee1410c5bbdb3db,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Haase_Instance-weighted_Transfer_Learning_2014_CVPR_paper.pdf
+3dd906bc0947e56d2b7bf9530b11351bbdff2358,http://pdfs.semanticscholar.org/c57a/070724b48962935ff46ab1384d919e1d1089.pdf
+35f03f5cbcc21a9c36c84e858eeb15c5d6722309,http://www.ee.columbia.edu/ln/dvmm/publications/16/ACMMMVP_VAH_2016.pdf
+3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8,http://pdfs.semanticscholar.org/3c37/4cb8e730b64dacb9fbf6eb67f5987c7de3c8.pdf
+f8ec92f6d009b588ddfbb47a518dd5e73855547d,http://pdfs.semanticscholar.org/f8ec/92f6d009b588ddfbb47a518dd5e73855547d.pdf
+01c8d7a3460422412fba04e7ee14c4f6cdff9ad7,http://pdfs.semanticscholar.org/01c8/d7a3460422412fba04e7ee14c4f6cdff9ad7.pdf
+fd4ac1da699885f71970588f84316589b7d8317b,http://pdfs.semanticscholar.org/fd4a/c1da699885f71970588f84316589b7d8317b.pdf
+4f6adc53798d9da26369bea5a0d91ed5e1314df2,http://pdfs.semanticscholar.org/4f6a/dc53798d9da26369bea5a0d91ed5e1314df2.pdf
+ff5dd6f96e108d8233220cc262bc282229c1a582,http://pdfs.semanticscholar.org/ff5d/d6f96e108d8233220cc262bc282229c1a582.pdf
+7792fbc59f3eafc709323cdb63852c5d3a4b23e9,http://pdfs.semanticscholar.org/7792/fbc59f3eafc709323cdb63852c5d3a4b23e9.pdf
+064cd41d323441209ce1484a9bba02a22b625088,http://www.ri.cmu.edu/pub_files/2013/6/stm_final.pdf
+6742c0a26315d7354ab6b1fa62a5fffaea06da14,http://pdfs.semanticscholar.org/ae08/778d8003933a02fd90a49b2e5f67ba56ad8d.pdf
+980266ad6807531fea94252e8f2b771c20e173b3,http://pdfs.semanticscholar.org/9802/66ad6807531fea94252e8f2b771c20e173b3.pdf
+4686bdcee01520ed6a769943f112b2471e436208,http://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0024-5?site=ipsjcva.springeropen.com
+f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0,http://pdfs.semanticscholar.org/f4c0/1fc79c7ead67899f6fe7b79dd1ad249f71b0.pdf
+e30dc2abac4ecc48aa51863858f6f60c7afdf82a,http://pdfs.semanticscholar.org/e30d/c2abac4ecc48aa51863858f6f60c7afdf82a.pdf
+d647099e571f9af3a1762f895fd8c99760a3916e,http://cbim.rutgers.edu/dmdocuments/CVPR10_Peng_Yang.pdf
+eefb8768f60c17d76fe156b55b8a00555eb40f4d,http://pdfs.semanticscholar.org/eefb/8768f60c17d76fe156b55b8a00555eb40f4d.pdf
+59420fd595ae745ad62c26ae55a754b97170b01f,http://pdfs.semanticscholar.org/5942/0fd595ae745ad62c26ae55a754b97170b01f.pdf
+d50c6d22449cc9170ab868b42f8c72f8d31f9b6c,http://pdfs.semanticscholar.org/d50c/6d22449cc9170ab868b42f8c72f8d31f9b6c.pdf
+833fa04463d90aab4a9fe2870d480f0b40df446e,http://static.cs.brown.edu/~gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf
+f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,http://pdfs.semanticscholar.org/f06b/015bb19bd3c39ac5b1e4320566f8d83a0c84.pdf
+f83dd9ff002a40228bbe3427419b272ab9d5c9e4,http://pdfs.semanticscholar.org/f83d/d9ff002a40228bbe3427419b272ab9d5c9e4.pdf
+0b3f354e6796ef7416bf6dde9e0779b2fcfabed2,http://pdfs.semanticscholar.org/fd60/5d123a0f777716f798f258fbbcd73d75fa8b.pdf
+85041e48b51a2c498f22850ce7228df4e2263372,http://pdfs.semanticscholar.org/8504/1e48b51a2c498f22850ce7228df4e2263372.pdf
+21e828071249d25e2edaca0596e27dcd63237346,http://research.microsoft.com/pubs/122158/cvpr2010.pdf
+98127346920bdce9773aba6a2ffc8590b9558a4a,http://disi.unitn.it/~duta/pubs/MTAP2017_Duta.pdf
+64d5772f44efe32eb24c9968a3085bc0786bfca7,http://pdfs.semanticscholar.org/64d5/772f44efe32eb24c9968a3085bc0786bfca7.pdf
+0319332ded894bf1afe43f174f5aa405b49305f0,http://pdfs.semanticscholar.org/0319/332ded894bf1afe43f174f5aa405b49305f0.pdf
+2f13dd8c82f8efb25057de1517746373e05b04c4,http://www.cfar.umd.edu/~rama/Publications/Ni_ICIP.pdf
+12cd96a419b1bd14cc40942b94d9c4dffe5094d2,http://pdfs.semanticscholar.org/12cd/96a419b1bd14cc40942b94d9c4dffe5094d2.pdf
+8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d,http://pdfs.semanticscholar.org/8a1e/d5e23231e86216c9bdd62419c3b05f1e0b4d.pdf
+74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8,http://pdfs.semanticscholar.org/74e8/69bc7c99093a5ff9f8cfc3f533ccf1b135d8.pdf
+808b685d09912cbef4a009e74e10476304b4cccf,http://pdfs.semanticscholar.org/808b/685d09912cbef4a009e74e10476304b4cccf.pdf
+10ab1b48b2a55ec9e2920a5397febd84906a7769,http://pdfs.semanticscholar.org/10ab/1b48b2a55ec9e2920a5397febd84906a7769.pdf
+4805f41c4f8cfb932b011dfdd7f8907152590d1a,http://www.affectiva.com/wp-content/uploads/2014/09/From_Dials_to_Facial_Coding_Automated_Detection_of_Spontaneous_Facial_Expressions_fo.pdf
+49f70f707c2e030fe16059635df85c7625b5dc7e,http://pdfs.semanticscholar.org/55b7/59b3e94088488334e3af2d17710c5e1fce4b.pdf
+7be60f8c34a16f30735518d240a01972f3530e00,http://www.cs.utexas.edu/~suyog/expression_recog.pdf
+eb9312458f84a366e98bd0a2265747aaed40b1a6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0400473.pdf
+470dbd3238b857f349ebf0efab0d2d6e9779073a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_062_ext.pdf
+6fa3857faba887ed048a9e355b3b8642c6aab1d8,http://pdfs.semanticscholar.org/6fa3/857faba887ed048a9e355b3b8642c6aab1d8.pdf
+769461ff717d987482b28b32b1e2a6e46570e3ff,http://pdfs.semanticscholar.org/7694/61ff717d987482b28b32b1e2a6e46570e3ff.pdf
+ac51d9ddbd462d023ec60818bac6cdae83b66992,http://pdfs.semanticscholar.org/ac51/d9ddbd462d023ec60818bac6cdae83b66992.pdf
+554b9478fd285f2317214396e0ccd81309963efd,http://pdfs.semanticscholar.org/554b/9478fd285f2317214396e0ccd81309963efd.pdf
+533bfb82c54f261e6a2b7ed7d31a2fd679c56d18,http://biometrics.cse.msu.edu/Publications/Face/BestRowdenetal_UnconstrainedFaceRecognition_TechReport_MSU-CSE-14-1.pdf
+a125bc55bdf4bec7484111eea9ae537be314ec62,http://pdfs.semanticscholar.org/a125/bc55bdf4bec7484111eea9ae537be314ec62.pdf
+870433ba89d8cab1656e57ac78f1c26f4998edfb,https://arxiv.org/pdf/1612.04904v1.pdf
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/CVPR10_FaceReco.pdf
+7f6cd03e3b7b63fca7170e317b3bb072ec9889e0,http://pdfs.semanticscholar.org/7f6c/d03e3b7b63fca7170e317b3bb072ec9889e0.pdf
+4b89cf7197922ee9418ae93896586c990e0d2867,http://www.cs.cmu.edu/~ftorre/paper1.pdf
+b7b461f82c911f2596b310e2b18dd0da1d5d4491,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p2961-wang.pdf
+2042aed660796b14925db17c0a8b9fbdd7f3ebac,http://pdfs.semanticscholar.org/4a19/fd2eb09976128e33bd8f9411972146ac6c41.pdf
+3fb26f3abcf0d287243646426cd5ddeee33624d4,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Qin_Joint_Training_of_CVPR_2016_paper.pdf
+8e4808e71c9b9f852dc9558d7ef41566639137f3,http://pdfs.semanticscholar.org/8e48/08e71c9b9f852dc9558d7ef41566639137f3.pdf
+2b10a07c35c453144f22e8c539bf9a23695e85fc,http://pdfs.semanticscholar.org/2b10/a07c35c453144f22e8c539bf9a23695e85fc.pdf
+a6e43b73f9f87588783988333997a81b4487e2d5,http://pdfs.semanticscholar.org/a6e4/3b73f9f87588783988333997a81b4487e2d5.pdf
+52258ec5ec73ce30ca8bc215539c017d279517cf,http://pdfs.semanticscholar.org/5225/8ec5ec73ce30ca8bc215539c017d279517cf.pdf
+5dfebcb7bfefb1af1cfef61a151abfe98a7e7cfa,http://vision.ucsd.edu/sites/default/files/cwah_cvpr2013_unfamiliar.pdf
+2af620e17d0ed67d9ccbca624250989ce372e255,http://www.alessandrobergamo.com/data/bt_cvpr12.pdf
+0b9db62b26b811e8c24eb9edc37901a4b79a897f,https://eng.ucmerced.edu/people/cyang35/CVPR13/cvpr13_hallucination.pdf
+36fe39ed69a5c7ff9650fd5f4fe950b5880760b0,http://pdfs.semanticscholar.org/36fe/39ed69a5c7ff9650fd5f4fe950b5880760b0.pdf
+72ecaff8b57023f9fbf8b5b2588f3c7019010ca7,http://pdfs.semanticscholar.org/72ec/aff8b57023f9fbf8b5b2588f3c7019010ca7.pdf
+6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6,http://pdfs.semanticscholar.org/6b7f/7817b2e5a7e7d409af2254a903fc0d6e02b6.pdf
+2baec98c19804bf19b480a9a0aa814078e28bb3d,http://eprints.eemcs.utwente.nl/26841/01/Pantic_Multi-conditional_Latent_Variable_Model.pdf
+90d735cffd84e8f2ae4d0c9493590f3a7d99daf1,http://pdfs.semanticscholar.org/90d7/35cffd84e8f2ae4d0c9493590f3a7d99daf1.pdf
+521b625eebea73b5deb171a350e3709a4910eebf,https://arxiv.org/pdf/1604.06397v1.pdf
+4353d0dcaf450743e9eddd2aeedee4d01a1be78b,http://pdfs.semanticscholar.org/4353/d0dcaf450743e9eddd2aeedee4d01a1be78b.pdf
+f77c9bf5beec7c975584e8087aae8d679664a1eb,http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf
+1c5d7d02a26aa052ecc47d301de4929083e5d320,https://www.ll.mit.edu/news/avec2014_mitll.pdf
+0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc,http://pdfs.semanticscholar.org/0c6e/29d82a5a080dc1db9eeabbd7d1529e78a3dc.pdf
+c3fb2399eb4bcec22723715556e31c44d086e054,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p499-srinivasan.pdf
+0f32df6ae76402b98b0823339bd115d33d3ec0a0,http://perceptual.mpi-inf.mpg.de/files/2015/07/Mueller15_ACII.pdf
+bf4825474673246ae855979034c8ffdb12c80a98,http://pdfs.semanticscholar.org/bf48/25474673246ae855979034c8ffdb12c80a98.pdf
+7a84368ebb1a20cc0882237a4947efc81c56c0c0,https://ibug.doc.ic.ac.uk/media/uploads/documents/iccv_final.pdf
+446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03,http://www.isir.upmc.fr/files/2014ACTI3172.pdf
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf
+90a754f597958a2717862fbaa313f67b25083bf9,http://pdfs.semanticscholar.org/90a7/54f597958a2717862fbaa313f67b25083bf9.pdf
+68f69e6c6c66cfde3d02237a6918c9d1ee678e1b,http://www.cs.fiu.edu/~chens/PDF/ISM09_Pruning.pdf
+1b4bc7447f500af2601c5233879afc057a5876d8,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Wang2015g.pdf
+cffebdf88e406c27b892857d1520cb2d7ccda573,http://pdfs.semanticscholar.org/cffe/bdf88e406c27b892857d1520cb2d7ccda573.pdf
+dbab6ac1a9516c360cdbfd5f3239a351a64adde7,http://pdfs.semanticscholar.org/dbab/6ac1a9516c360cdbfd5f3239a351a64adde7.pdf
+9e0285debd4b0ba7769b389181bd3e0fd7a02af6,http://pdfs.semanticscholar.org/9e02/85debd4b0ba7769b389181bd3e0fd7a02af6.pdf
+e5342233141a1d3858ed99ccd8ca0fead519f58b,http://pdfs.semanticscholar.org/e534/2233141a1d3858ed99ccd8ca0fead519f58b.pdf
+705a24f4e1766a44bbba7cf335f74229ed443c7b,http://web.ing.puc.cl/~asoto/papers/Maturana-09.pdf
+ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6,http://eprints.whiterose.ac.uk/104654/9/07289412.pdf
+41d9a240b711ff76c5448d4bf4df840cc5dad5fc,https://arxiv.org/pdf/1206.2627v2.pdf
+862f2d84b4230d64ddb3e48967ad417089f2c291,http://www.umiacs.umd.edu/users/pvishalm/Conference_pub/ICIP14_landmarks.pdf
+0c93cb1af3bba1bd90a03e921ff2d55acf35c01f,http://www.researchgate.net/profile/Mohammed_Bennamoun/publication/220928947_Robust_Regression_for_Face_Recognition/links/542157f20cf203f155c65a23.pdf
+9d757c0fede931b1c6ac344f67767533043cba14,http://pdfs.semanticscholar.org/9d75/7c0fede931b1c6ac344f67767533043cba14.pdf
+12055b8f82d5411f9ad196b60698d76fbd07ac1e,https://zhzhanp.github.io/papers/TCSVT2014.pdf
+5c02bd53c0a6eb361972e8a4df60cdb30c6e3930,http://arxiv.org/pdf/1303.4893v2.pdf
+a3a97bb5131e7e67316b649bbc2432aaa1a6556e,http://pdfs.semanticscholar.org/a3a9/7bb5131e7e67316b649bbc2432aaa1a6556e.pdf
+84fe5b4ac805af63206012d29523a1e033bc827e,http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf
+91835984eaeb538606972de47c372c5fcfe8b6aa,http://www.cse.ust.hk/~qnature/pdf/IEEESMC2015.pdf
+fc45e44dd50915957e498186618f7a499953c6be,http://www.pami.sjtu.edu.cn/people/wangxh/Gabor%20Filter/Quaternion%20Correlation%20Filters%20for%20Face%20Recognition%20in%20Wavelet%20Domain.pdf
+4698a599425c3a6bae1c698456029519f8f2befe,http://pdfs.semanticscholar.org/4698/a599425c3a6bae1c698456029519f8f2befe.pdf
+1d696a1beb42515ab16f3a9f6f72584a41492a03,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTcvpr15.pdf
+167ea1631476e8f9332cef98cf470cb3d4847bc6,http://www.kevinjing.com/visual_search_at_pinterest.pdf
+143f7a51058b743a0d43026a523d9bbbc1ae43a8,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221368838_An_efficient_method_for_face_retrieval_from_large_video_datasets/links/0912f510a0404c605f000000.pdf
+9931c6b050e723f5b2a189dd38c81322ac0511de,http://pdfs.semanticscholar.org/9931/c6b050e723f5b2a189dd38c81322ac0511de.pdf
+2271d554787fdad561fafc6e9f742eea94d35518,http://pdfs.semanticscholar.org/2271/d554787fdad561fafc6e9f742eea94d35518.pdf
+56f86bef26209c85f2ef66ec23b6803d12ca6cd6,https://arxiv.org/pdf/1710.00307v1.pdf
+00f7f7b72a92939c36e2ef9be97397d8796ee07c,http://pdfs.semanticscholar.org/00f7/f7b72a92939c36e2ef9be97397d8796ee07c.pdf
+877100f430b72c5d60de199603ab5c65f611ce17,http://pdfs.semanticscholar.org/8771/00f430b72c5d60de199603ab5c65f611ce17.pdf
+aa0c30bd923774add6e2f27ac74acd197b9110f2,http://research.gold.ac.uk/20200/1/dplda.pdf
+79581c364cefe53bff6bdd224acd4f4bbc43d6d4,http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf
+4b605e6a9362485bfe69950432fa1f896e7d19bf,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf
+ad08c97a511091e0f59fc6a383615c0cc704f44a,http://pdfs.semanticscholar.org/ad08/c97a511091e0f59fc6a383615c0cc704f44a.pdf
+060820f110a72cbf02c14a6d1085bd6e1d994f6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_113_ext.pdf
+59bece468ed98397d54865715f40af30221aa08c,https://bib.irb.hr/datoteka/833608.BiForD2016_11.pdf
+0f9bf5d8f9087fcba419379600b86ae9e9940013,http://pdfs.semanticscholar.org/0f9b/f5d8f9087fcba419379600b86ae9e9940013.pdf
+bc871497626afb469d25c4975aa657159269aefe,http://ir.ia.ac.cn/bitstream/173211/10560/1/Adaptive%20Learning%20Algorithm%20for%20Pattern%20Classification.pdf
+8b74252625c91375f55cbdd2e6415e752a281d10,http://epubs.surrey.ac.uk/813060/1/camgoz2016icprw.pdf
+d24d3370b2e7d254e999140024d8a7bddf701502,https://www.researchgate.net/profile/Thang_Hoang2/publication/252047382_SVM_classifier_based_face_detection_system_using_BDIP_and_BVLC_moments/links/53f0b8be0cf2711e0c431012.pdf
+0b4c4ea4a133b9eab46b217e22bda4d9d13559e6,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_morph_random_forests.pdf
+853bd61bc48a431b9b1c7cab10c603830c488e39,http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf
+07c90e85ac0f74b977babe245dea0f0abcf177e3,http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf
+5da740682f080a70a30dc46b0fc66616884463ec,http://pdfs.semanticscholar.org/5da7/40682f080a70a30dc46b0fc66616884463ec.pdf
+42ecfc3221c2e1377e6ff849afb705ecd056b6ff,http://pdfs.semanticscholar.org/42ec/fc3221c2e1377e6ff849afb705ecd056b6ff.pdf
+55a158f4e7c38fe281d06ae45eb456e05516af50,http://pdfs.semanticscholar.org/55a1/58f4e7c38fe281d06ae45eb456e05516af50.pdf
+6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94,http://pdfs.semanticscholar.org/6e0a/05d87b3cc7e16b4b2870ca24cf5e806c0a94.pdf
+4c8ef4f98c6c8d340b011cfa0bb65a9377107970,http://pdfs.semanticscholar.org/4c8e/f4f98c6c8d340b011cfa0bb65a9377107970.pdf
+7c3e09e0bd992d3f4670ffacb4ec3a911141c51f,http://pdfs.semanticscholar.org/7c3e/09e0bd992d3f4670ffacb4ec3a911141c51f.pdf
+ce6d60b69eb95477596535227958109e07c61e1e,http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf
+43010792bf5cdb536a95fba16b8841c534ded316,https://www.comp.nus.edu.sg/~tsim/documents/general-face-motion.pdf
+4d0b3921345ae373a4e04f068867181647d57d7d,http://people.cs.pitt.edu/~kovashka/murrugarra_llerena_kovashka_wacv2017_slides.pdf
+834f5ab0cb374b13a6e19198d550e7a32901a4b2,http://pdfs.semanticscholar.org/834f/5ab0cb374b13a6e19198d550e7a32901a4b2.pdf
+1a3eee980a2252bb092666cf15dd1301fa84860e,https://www.uv.es/vista/vistavalencia/papers/ICIP09_GPCA.pdf
+1ac2882559a4ff552a1a9956ebeadb035cb6df5b,http://www.pitt.edu/~jeffcohn/biblio/TrainData.pdf
+62c435bc714f13a373926e3b1914786592ed1fef,http://assistech.iitd.ernet.in/mavi-embedded-device.pdf
+2f2406551c693d616a840719ae1e6ea448e2f5d3,http://biometrics.cse.msu.edu/Presentations/CharlesOtto_ICB13_AgeEstimationFaceImages_HumanVsMachinePerformance.pdf
+2ac21d663c25d11cda48381fb204a37a47d2a574,http://pdfs.semanticscholar.org/2ac2/1d663c25d11cda48381fb204a37a47d2a574.pdf
+d5e1173dcb2a51b483f86694889b015d55094634,http://pdfs.semanticscholar.org/d5e1/173dcb2a51b483f86694889b015d55094634.pdf
+1a41e5d93f1ef5b23b95b7163f5f9aedbe661394,http://pdfs.semanticscholar.org/1a41/e5d93f1ef5b23b95b7163f5f9aedbe661394.pdf
+2d080662a1653f523321974a57518e7cb67ecb41,http://pdfs.semanticscholar.org/2d08/0662a1653f523321974a57518e7cb67ecb41.pdf
+17d01f34dfe2136b404e8d7f59cebfb467b72b26,http://pdfs.semanticscholar.org/4cfb/51d3b8478d7e63ba2661385337abf94d2c48.pdf
+83ac942d71ba908c8d76fc68de6173151f012b38,http://pdfs.semanticscholar.org/83ac/942d71ba908c8d76fc68de6173151f012b38.pdf
+0431e8a01bae556c0d8b2b431e334f7395dd803a,https://people.cs.umass.edu/~smaji/papers/localized-wacv15.pdf
+286adff6eff2f53e84fe5b4d4eb25837b46cae23,http://pdfs.semanticscholar.org/b17e/61972e674f8f734bd428cb882a9bb797abe2.pdf
+2c883977e4292806739041cf8409b2f6df171aee,http://pdfs.semanticscholar.org/c5fb/ef530eb28d4f787990e0b962a6a68e420e49.pdf
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,http://pdfs.semanticscholar.org/7c42/371bae54050dbbf7ded1e7a9b4109a23a482.pdf
+00a3cfe3ce35a7ffb8214f6db15366f4e79761e3,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20150414135701.pdf
+96e731e82b817c95d4ce48b9e6b08d2394937cf8,http://arxiv.org/pdf/1508.01722v2.pdf
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,http://pdfs.semanticscholar.org/d0d7/671c816ed7f37b16be86fa792a1b29ddd79b.pdf
+3e04feb0b6392f94554f6d18e24fadba1a28b65f,http://pdfs.semanticscholar.org/b72c/5119c0aafa64f32e8e773638b5738f31b33c.pdf
+57ebeff9273dea933e2a75c306849baf43081a8c,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sun_Deep_Convolutional_Network_2013_CVPR_paper.pdf
+0a4f3a423a37588fde9a2db71f114b293fc09c50,http://pdfs.semanticscholar.org/0a4f/3a423a37588fde9a2db71f114b293fc09c50.pdf
+57fd229097e4822292d19329a17ceb013b2cb648,http://pdfs.semanticscholar.org/57fd/229097e4822292d19329a17ceb013b2cb648.pdf
+176f26a6a8e04567ea71677b99e9818f8a8819d0,http://pdfs.semanticscholar.org/176f/26a6a8e04567ea71677b99e9818f8a8819d0.pdf
+473366f025c4a6e0783e6174ca914f9cb328fe70,http://pdfs.semanticscholar.org/f021/cbfa5f3483889c3980b62c6cec329c8c5aec.pdf
+7ed2c84fdfc7d658968221d78e745dfd1def6332,http://pdfs.semanticscholar.org/7ed2/c84fdfc7d658968221d78e745dfd1def6332.pdf
+f355e54ca94a2d8bbc598e06e414a876eb62ef99,http://pdfs.semanticscholar.org/f355/e54ca94a2d8bbc598e06e414a876eb62ef99.pdf
+936227f7483938097cc1cdd3032016df54dbd5b6,http://pdfs.semanticscholar.org/9362/27f7483938097cc1cdd3032016df54dbd5b6.pdf
+13940d0cc90dbf854a58f92d533ce7053aac024a,http://pdfs.semanticscholar.org/949c/a8a6997aba88a162a36d48047f35ba8d0aab.pdf
+a56c1331750bf3ac33ee07004e083310a1e63ddc,http://pdfs.semanticscholar.org/de99/1e4c18c21b3cdf6389b439c88709d62f4252.pdf
+9854145f2f64d52aac23c0301f4bb6657e32e562,http://www.ucsp.edu.pe/sibgrapi2013/eproceedings/technical/114953_2.pdf
+65b1760d9b1541241c6c0222cc4ee9df078b593a,http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf
+47f8b3b3f249830b6e17888df4810f3d189daac1,http://pdfs.semanticscholar.org/fd44/c0c238fe90d6ca61864010abd94768fcde0c.pdf
+78d645d5b426247e9c8f359694080186681f57db,http://pdfs.semanticscholar.org/78d6/45d5b426247e9c8f359694080186681f57db.pdf
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,http://face.cs.kit.edu/download/publications/gehrig-emotiw2013.pdf
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,http://pdfs.semanticscholar.org/e726/174d516605f80ff359e71f68b6e8e6ec6d5d.pdf
+04dcdb7cb0d3c462bdefdd05508edfcff5a6d315,http://pdfs.semanticscholar.org/04dc/db7cb0d3c462bdefdd05508edfcff5a6d315.pdf
+1246534c3104da030fdb9e041819257e0d57dcbf,http://home.isr.uc.pt/~joaoluis/papers/cvpr2015_2.pdf
+3312eb79e025b885afe986be8189446ba356a507,http://pdfs.semanticscholar.org/6007/292075f8a8538fa6f4c3d7a8676a595ab1f4.pdf
+a8e75978a5335fd3deb04572bb6ca43dbfad4738,http://pdfs.semanticscholar.org/a8e7/5978a5335fd3deb04572bb6ca43dbfad4738.pdf
+5c7adde982efb24c3786fa2d1f65f40a64e2afbf,http://pdfs.semanticscholar.org/bd40/dee4f2bbb0e512575cc96a0e3a7918a0ce42.pdf
+f66f3d1e6e33cb9e9b3315d3374cd5f121144213,http://pdfs.semanticscholar.org/f66f/3d1e6e33cb9e9b3315d3374cd5f121144213.pdf
+05a0d04693b2a51a8131d195c68ad9f5818b2ce1,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf
+58bb77dff5f6ee0fb5ab7f5079a5e788276184cc,https://ram-lab.com/papers/2016/rcar_lyp_192.pdf
+87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5,http://pdfs.semanticscholar.org/87dd/3fd36bccbe1d5f1484ac05f1848b51c6eab5.pdf
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,http://pdfs.semanticscholar.org/aa67/719e839d035e4d67e4434794b6cccaf091d6.pdf
+2fa057a20a2b4a4f344988fee0a49fce85b0dc33,http://next.comp.nus.edu.sg/sites/default/files/publication-attachments/eHeritage.pdf
+6cddc7e24c0581c50adef92d01bb3c73d8b80b41,http://users.soe.ucsc.edu/~milanfar/publications/journal/TIFS_Final.pdf
+3c0bbfe664fb083644301c67c04a7f1331d9515f,http://pdfs.semanticscholar.org/3c0b/bfe664fb083644301c67c04a7f1331d9515f.pdf
+20cfb4136c1a984a330a2a9664fcdadc2228b0bc,http://www.eecs.harvard.edu/~htk/publication/2015-amfg-chen-comiter-kung-mcdanel.pdf
+17579791ead67262fcfb62ed8765e115fb5eca6f,http://pdfs.semanticscholar.org/1757/9791ead67262fcfb62ed8765e115fb5eca6f.pdf
+edd7504be47ebc28b0d608502ca78c0aea6a65a2,http://pdfs.semanticscholar.org/edd7/504be47ebc28b0d608502ca78c0aea6a65a2.pdf
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,http://feiwang03.googlepages.com/CVPRposter.pdf
+bb22104d2128e323051fb58a6fe1b3d24a9e9a46,http://pdfs.semanticscholar.org/bb22/104d2128e323051fb58a6fe1b3d24a9e9a46.pdf
+08d2f655361335bdd6c1c901642981e650dff5ec,http://dro.deakin.edu.au/eserv/DU:30058435/arandjelovic-automaticcastlisting-2006.pdf
+2cf9088e9faa81872b355a4ea0a9fae46d3c8a08,http://www.cvg.unibe.ch/tpapadhimitri/tech.pdf
+13841d54c55bd74964d877b4b517fa94650d9b65,http://www98.griffith.edu.au/dspace/bitstream/handle/10072/30001/60226_1.pdf?sequence=1
+276dbb667a66c23545534caa80be483222db7769,http://pdfs.semanticscholar.org/276d/bb667a66c23545534caa80be483222db7769.pdf
+d0ac9913a3b1784f94446db2f1fb4cf3afda151f,http://pdfs.semanticscholar.org/d0ac/9913a3b1784f94446db2f1fb4cf3afda151f.pdf
+cc9057d2762e077c53e381f90884595677eceafa,http://pdfs.semanticscholar.org/cc90/57d2762e077c53e381f90884595677eceafa.pdf
+009a18d04a5e3ec23f8ffcfc940402fd8ec9488f,http://pdfs.semanticscholar.org/009a/18d04a5e3ec23f8ffcfc940402fd8ec9488f.pdf
+98142103c311b67eeca12127aad9229d56b4a9ff,http://pdfs.semanticscholar.org/9814/2103c311b67eeca12127aad9229d56b4a9ff.pdf
+205af28b4fcd6b569d0241bb6b255edb325965a4,http://pdfs.semanticscholar.org/205a/f28b4fcd6b569d0241bb6b255edb325965a4.pdf
+0c3f7272a68c8e0aa6b92d132d1bf8541c062141,http://pdfs.semanticscholar.org/0c3f/7272a68c8e0aa6b92d132d1bf8541c062141.pdf
+a4c430b7d849a8f23713dc283794d8c1782198b2,http://pdfs.semanticscholar.org/a4c4/30b7d849a8f23713dc283794d8c1782198b2.pdf
+fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5,http://pdfs.semanticscholar.org/fe7e/3cc1f3412bbbf37d277eeb3b17b8b21d71d5.pdf
+749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7,http://pdfs.semanticscholar.org/7493/82d19bfe9fb8d0c5e94d0c9b0a63ab531cb7.pdf
+8557914593e8540fcdd9b11aef076f68d41d3b4b,http://elwilber.com/papers/ecodes-2014.pdf
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,http://disi.unitn.it/~duta/pubs/ICPR2016_Duta.pdf
+93cbb3b3e40321c4990c36f89a63534b506b6daf,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/IEEESMC05Guo.pdf
+451c42da244edcb1088e3c09d0f14c064ed9077e,https://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_conf.pdf
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,http://www.cs.toronto.edu/~rfm/pubs/morphBM.pdf
+679b7fa9e74b2aa7892eaea580def6ed4332a228,http://pdfs.semanticscholar.org/679b/7fa9e74b2aa7892eaea580def6ed4332a228.pdf
+050a3346e44ca720a54afbf57d56b1ee45ffbe49,https://www.d2.mpi-inf.mpg.de/sites/default/files/cvpr16.pdf
+a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6,http://pdfs.semanticscholar.org/a928/6519e12675302b1d7d2fe0ca3cc4dc7d17f6.pdf
+80a6bb337b8fdc17bffb8038f3b1467d01204375,http://pdfs.semanticscholar.org/80a6/bb337b8fdc17bffb8038f3b1467d01204375.pdf
+7ad1638f7d76c7e885bc84cd694c60f109f02159,https://www.researchgate.net/profile/Wen-Jing_Yan/publication/236120483_Face_Recognition_and_Micro-expression_Recognition_Based_on_Discriminant_Tensor_Subspace_Analysis_Plus_Extreme_Learning_Machine/links/0deec51adcddd72a4f000000.pdf
+77a9b1856ebbc9a6170ee4c572a515d6db062cef,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1291.pdf
+38cc2f1c13420170c7adac30f9dfac69b297fb76,http://pdfs.semanticscholar.org/38cc/2f1c13420170c7adac30f9dfac69b297fb76.pdf
+8d8461ed57b81e05cc46be8e83260cd68a2ebb4d,http://pdfs.semanticscholar.org/8d84/61ed57b81e05cc46be8e83260cd68a2ebb4d.pdf
+044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa,http://www.ee.oulu.fi/mvg/files/pdf/pdf_740.pdf
+86d1fbaecd02b44309383830e6d985dc09e786aa,http://feng-xu.com/papers/ExpressionSynthesis_CVPR.pdf
+04bb3fa0824d255b01e9db4946ead9f856cc0b59,http://pdfs.semanticscholar.org/c1de/db5ac05c955e53d7ef1f6367fb7badea49b1.pdf
+59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Zafeiriou_The_Menpo_Facial_CVPR_2017_paper.pdf
+1b589016fbabe607a1fb7ce0c265442be9caf3a9,http://pdfs.semanticscholar.org/5efe/b55fe3f03cd16aa0c268d74a5ad2e03170cf.pdf
+8210fd10ef1de44265632589f8fc28bc439a57e6,http://www.ytzhang.net/files/publications/2015-tifs-sup-ae.pdf
+16c884be18016cc07aec0ef7e914622a1a9fb59d,http://pdfs.semanticscholar.org/16c8/84be18016cc07aec0ef7e914622a1a9fb59d.pdf
+55e18e0dde592258882134d2dceeb86122b366ab,http://pdfs.semanticscholar.org/f863/ba982068d676084032146e8053d4791114e9.pdf
+795ea140df2c3d29753f40ccc4952ef24f46576c,http://pdfs.semanticscholar.org/795e/a140df2c3d29753f40ccc4952ef24f46576c.pdf
+61f1b14f04d2fa1d8a556adbdf93050b4637f44b,http://www.caam.rice.edu/~wy1/paperfiles/T.Chen%20W.Yin%20X.Zhou%20D.Comaniciu%20T.Huang%20-%20Total%20variation%20models%20for%20variable%20lighting%20face%20recognition.pdf
+08d55271589f989d90a7edce3345f78f2468a7e0,https://arxiv.org/pdf/1704.03373v1.pdf
+c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4,http://pdfs.semanticscholar.org/c2c5/206f6a539b02f5d5a19bdb3a90584f7e6ba4.pdf
+19878141fbb3117d411599b1a74a44fc3daf296d,http://pdfs.semanticscholar.org/1987/8141fbb3117d411599b1a74a44fc3daf296d.pdf
+eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a,http://pdfs.semanticscholar.org/eb7b/387a3a006609b89ca5ed0e6b3a1d5ecb5e5a.pdf
+a6583c8daa7927eedb3e892a60fc88bdfe89a486,http://pdfs.semanticscholar.org/a658/3c8daa7927eedb3e892a60fc88bdfe89a486.pdf
+055de0519da7fdf27add848e691087e0af166637,http://pdfs.semanticscholar.org/d3f9/cf3fb66326e456587acb18cf3196d1e314c7.pdf
+fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192,http://pdfs.semanticscholar.org/fc5b/db98ff97581d7c1e5eb2d24d3f10714aa192.pdf
+0115f260069e2e501850a14845feb400142e2443,http://pdfs.semanticscholar.org/0115/f260069e2e501850a14845feb400142e2443.pdf
+a0848d7b1bb43f4b4f1b4016e58c830f40944817,http://lhncbc.nlm.nih.gov/system/files/pub8893.pdf
+40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a,http://pdfs.semanticscholar.org/cea3/8a329e98900923e9c962b0d58bf8e15405d6.pdf
+3cd5da596060819e2b156e8b3a28331ef633036b,http://pdfs.semanticscholar.org/3cd5/da596060819e2b156e8b3a28331ef633036b.pdf
+1e917fe7462445996837934a7e46eeec14ebc65f,http://pdfs.semanticscholar.org/1e91/7fe7462445996837934a7e46eeec14ebc65f.pdf
+365f67fe670bf55dc9ccdcd6888115264b2a2c56,http://pdfs.semanticscholar.org/f431/d3d7a0323bf1150420c826dade2093a7dfa1.pdf
+0e49a23fafa4b2e2ac097292acf00298458932b4,http://pdfs.semanticscholar.org/0e49/a23fafa4b2e2ac097292acf00298458932b4.pdf
+9c7444c6949427994b430787a153d5cceff46d5c,http://pdfs.semanticscholar.org/9c74/44c6949427994b430787a153d5cceff46d5c.pdf
+80bd795930837330e3ced199f5b9b75398336b87,http://pdfs.semanticscholar.org/80bd/795930837330e3ced199f5b9b75398336b87.pdf
+d8722ffbca906a685abe57f3b7b9c1b542adfa0c,http://pdfs.semanticscholar.org/d872/2ffbca906a685abe57f3b7b9c1b542adfa0c.pdf
+5db075a308350c083c3fa6722af4c9765c4b8fef,http://pdfs.semanticscholar.org/5db0/75a308350c083c3fa6722af4c9765c4b8fef.pdf
+0ac442bb570b086d04c4d51a8410fcbfd0b1779d,http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/cvpr16_warpnet.pdf
+9f8ebf149aed8a0eda5c3375c9947c6b26eb7873,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp21-wang.pdf
+61084a25ebe736e8f6d7a6e53b2c20d9723c4608,http://pdfs.semanticscholar.org/6108/4a25ebe736e8f6d7a6e53b2c20d9723c4608.pdf
+d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0003031.pdf
+ff60d4601adabe04214c67e12253ea3359f4e082,http://pdfs.semanticscholar.org/ff60/d4601adabe04214c67e12253ea3359f4e082.pdf
+1e8eee51fd3bf7a9570d6ee6aa9a09454254689d,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf
+c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0,http://pdfs.semanticscholar.org/c3bc/c4ee9e81ce9c5c0845f34e9992872a8defc0.pdf
+3af130e2fd41143d5fc49503830bbd7bafd01f8b,http://pdfs.semanticscholar.org/db76/002794c12e5febc30510de58b54bb9344ea9.pdf
+4d423acc78273b75134e2afd1777ba6d3a398973,http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf
+afef2b1d35fb807f422cfec0a370f7d08d4651d1,http://www.researchgate.net/profile/Dong_Yi3/publication/228853254_A_robust_eye_localization_method_for_low_quality_face_images/links/0912f509c4d7ec1630000000.pdf
+33ad23377eaead8955ed1c2b087a5e536fecf44e,http://vis-www.cs.umass.edu/papers/gloc_cvpr13.pdf
+282a3ee79a08486f0619caf0ada210f5c3572367,http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf
+dff838ba0567ef0a6c8fbfff9837ea484314efc6,http://pdfs.semanticscholar.org/dff8/38ba0567ef0a6c8fbfff9837ea484314efc6.pdf
+a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf
+e52be9a083e621d9ed29c8e9914451a6a327ff59,http://pdfs.semanticscholar.org/e52b/e9a083e621d9ed29c8e9914451a6a327ff59.pdf
+5b6d05ce368e69485cb08dd97903075e7f517aed,http://pdfs.semanticscholar.org/5b6d/05ce368e69485cb08dd97903075e7f517aed.pdf
+17ded725602b4329b1c494bfa41527482bf83a6f,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf
+0ff23392e1cb62a600d10bb462d7a1f171f579d0,http://www.umiacs.umd.edu/~jhchoi/paper/icpr2014_slide.pdf
+0e8760fc198a7e7c9f4193478c0e0700950a86cd,http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf
+19fb5e5207b4a964e5ab50d421e2549ce472baa8,http://mmi.tudelft.nl/sites/default/files/e-FEDCompSys14final.pdf
+9963c73b03e4649959f021ef6f4fb1eac0b617d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/Person%20Re-identification%20Using%20Multiple%20Egocentric%20Views.pdf
+574751dbb53777101502419127ba8209562c4758,http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,http://pdfs.semanticscholar.org/3f4b/fa4e3655ef392eb5ad609d31c05f29826b45.pdf
+c32fb755856c21a238857b77d7548f18e05f482d,http://pdfs.semanticscholar.org/c32f/b755856c21a238857b77d7548f18e05f482d.pdf
+2be0ab87dc8f4005c37c523f712dd033c0685827,http://www3.ntu.edu.sg/home/EXDJiang/ICIP2013_4.pdf
+bd0201b32e7eca7818468f2b5cb1fb4374de75b9,http://pdfs.semanticscholar.org/bd02/01b32e7eca7818468f2b5cb1fb4374de75b9.pdf
+45efd6c2dd4ca19eed38ceeb7c2c5568231451e1,http://pdfs.semanticscholar.org/45ef/d6c2dd4ca19eed38ceeb7c2c5568231451e1.pdf
+01d23cbac762b0e46251f5dbde08f49f2d13b9f8,http://pdfs.semanticscholar.org/01d2/3cbac762b0e46251f5dbde08f49f2d13b9f8.pdf
+1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,http://arxiv.org/pdf/1607.01450v1.pdf
+cfd8c66e71e98410f564babeb1c5fd6f77182c55,http://pdfs.semanticscholar.org/cfd8/c66e71e98410f564babeb1c5fd6f77182c55.pdf
+1bd50926079e68a6e32dc4412e9d5abe331daefb,https://pdfs.semanticscholar.org/544d/6cd24db5adad8453033e0cc1aa7d3d6224ab.pdf
+1b27ca161d2e1d4dd7d22b1247acee5c53db5104,http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf
+458677de7910a5455283a2be99f776a834449f61,http://pdfs.semanticscholar.org/4586/77de7910a5455283a2be99f776a834449f61.pdf
+8a09668efc95eafd6c3056ff1f0fbc43bb5774db,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Robust%20Principal%20Component%20Analysis%20Based%20on%20Maximum%20Correntropy%20Criterion.pdf
+01733018a79aa447a27f269a1b9a58cd5f39603e,http://vc.sce.ntu.edu.sg/index_files/Semi-supervised%20Bilinear%20Subspace%20Learning.pdf
+399a2c23bd2592ebe20aa35a8ea37d07c14199da,http://pdfs.semanticscholar.org/399a/2c23bd2592ebe20aa35a8ea37d07c14199da.pdf
+471befc1b5167fcfbf5280aa7f908eff0489c72b,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Goudelis07a.pdf
+f558af209dd4c48e4b2f551b01065a6435c3ef33,http://pdfs.semanticscholar.org/f558/af209dd4c48e4b2f551b01065a6435c3ef33.pdf
+07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf
+060034b59275c13746413ca9c67d6304cba50da6,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W14/papers/Murthy_Ordered_Trajectories_for_2013_ICCV_paper.pdf
+8bbbdff11e88327816cad3c565f4ab1bb3ee20db,https://eprints.soton.ac.uk/410731/1/FG_soton_paper.pdf
+025720574ef67672c44ba9e7065a83a5d6075c36,http://pdfs.semanticscholar.org/915f/dd2fdc7880074bd1c1d596f7e7d19ab34e8f.pdf
+816bd8a7f91824097f098e4f3e0f4b69f481689d,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00334.pdf
+b4362cd87ad219790800127ddd366cc465606a78,http://pdfs.semanticscholar.org/b436/2cd87ad219790800127ddd366cc465606a78.pdf
+aeeea6eec2f063c006c13be865cec0c350244e5b,http://pdfs.semanticscholar.org/aeee/a6eec2f063c006c13be865cec0c350244e5b.pdf
+d708ce7103a992634b1b4e87612815f03ba3ab24,http://pdfs.semanticscholar.org/d708/ce7103a992634b1b4e87612815f03ba3ab24.pdf
+4217473596b978f13a211cdf47b7d3f6588c785f,http://biometrics.cse.msu.edu/Publications/Face/OttoKlareJain_EfficientApproachClusteringFaceImages_ICB15.pdf
+780557daaa39a445b24c41f637d5fc9b216a0621,http://www.ee.columbia.edu/ln/dvmm/publications/15/EventNetDemo.pdf
+72a87f509817b3369f2accd7024b2e4b30a1f588,http://hal.inria.fr/docs/00/75/05/89/PDF/paa2010last.pdf
+41c97af4801ac302f09902aeec2af17b481563ab,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2016/Collaborative%20Multi-View%20Metric%20Learning%20for%20Visual%20Classification.pdf
+76ce3d35d9370f0e2e27cfd29ea0941f1462895f,http://pdfs.semanticscholar.org/76ce/3d35d9370f0e2e27cfd29ea0941f1462895f.pdf
+0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1,http://faculty.iiit.ac.in/~anoop/papers/Vijay2014Face.pdf
+94498fae459167841e8b2f4b911493fc3c7da22f,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/cvpr2016_ROF.pdf
+14fa27234fa2112014eda23da16af606db7f3637,http://pdfs.semanticscholar.org/14fa/27234fa2112014eda23da16af606db7f3637.pdf
+df674dc0fc813c2a6d539e892bfc74f9a761fbc8,http://pdfs.semanticscholar.org/df67/4dc0fc813c2a6d539e892bfc74f9a761fbc8.pdf
+6c8c7065d1041146a3604cbe15c6207f486021ba,http://pdfs.semanticscholar.org/6c8c/7065d1041146a3604cbe15c6207f486021ba.pdf
+747d5fe667519acea1bee3df5cf94d9d6f874f20,http://pdfs.semanticscholar.org/747d/5fe667519acea1bee3df5cf94d9d6f874f20.pdf
+76e2d7621019bd45a5851740bd2742afdcf62837,http://pdfs.semanticscholar.org/76e2/d7621019bd45a5851740bd2742afdcf62837.pdf
+329394480fc5e9e96de4250cc1a2b060c3677c94,https://arxiv.org/pdf/1604.08826v1.pdf
+45dbf1b6fbc7fdae09e2a1928b18fbfff331a979,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0854.pdf
+3edb0fa2d6b0f1984e8e2c523c558cb026b2a983,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tpami07.pdf
+026b5b8062e5a8d86c541cfa976f8eee97b30ab8,http://www.iab-rubric.org/papers/deeplearningvideo-CR.pdf
+56c700693b63e3da3b985777da6d9256e2e0dc21,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_079.pdf
+28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b,http://pdfs.semanticscholar.org/28b5/b5f20ad584e560cd9fb4d81b0a22279b2e7b.pdf
+2a35d20b2c0a045ea84723f328321c18be6f555c,http://pdfs.semanticscholar.org/d1be/cba3c460892453939f9f3639d8beddf2a133.pdf
+ce933821661a0139a329e6c8243e335bfa1022b1,http://pdfs.semanticscholar.org/ce93/3821661a0139a329e6c8243e335bfa1022b1.pdf
+776835eb176ed4655d6e6c308ab203126194c41e,http://pdfs.semanticscholar.org/7768/35eb176ed4655d6e6c308ab203126194c41e.pdf
+af13c355a2a14bb74847aedeafe990db3fc9cbd4,http://publications.idiap.ch/downloads/papers/2015/Chavez-Martinez_MUM2015_2015.pdf
+23fc83c8cfff14a16df7ca497661264fc54ed746,http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf
+5721216f2163d026e90d7cd9942aeb4bebc92334,http://pdfs.semanticscholar.org/5721/216f2163d026e90d7cd9942aeb4bebc92334.pdf
+2d0363a3ebda56d91d704d5ff5458a527775b609,http://pdfs.semanticscholar.org/2e07/a4c0f87ac078fcccf057d109f9387f4703a9.pdf
+05e658fed4a1ce877199a4ce1a8f8cf6f449a890,http://pdfs.semanticscholar.org/05e6/58fed4a1ce877199a4ce1a8f8cf6f449a890.pdf
+19808134b780b342e21f54b60095b181dfc7a600,http://www.openu.ac.il/home/hassner/projects/siftscales/HassneretalTPAMI16.pdf
+37ce1d3a6415d6fc1760964e2a04174c24208173,http://www.cse.msu.edu/~liuxm/publication/Jourabloo_Liu_ICCV2015.pdf
+36a3a96ef54000a0cd63de867a5eb7e84396de09,http://www.cs.toronto.edu/~guerzhoy/oriviz/crv17.pdf
+0d3bb75852098b25d90f31d2f48fd0cb4944702b,http://stefan.winklerbros.net/Publications/icip2014a.pdf
+78f79c83b50ff94d3e922bed392737b47f93aa06,http://mplab.ucsd.edu/wp-content/uploads/2011-LittlewortEtAl-FG-CERT.pdf
+b73fdae232270404f96754329a1a18768974d3f6,http://pdfs.semanticscholar.org/b73f/dae232270404f96754329a1a18768974d3f6.pdf
+0e986f51fe45b00633de9fd0c94d082d2be51406,http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf
+097f674aa9e91135151c480734dda54af5bc4240,http://pdfs.semanticscholar.org/097f/674aa9e91135151c480734dda54af5bc4240.pdf
+3dc522a6576c3475e4a166377cbbf4ba389c041f,http://pdfs.semanticscholar.org/3dc5/22a6576c3475e4a166377cbbf4ba389c041f.pdf
+2c4b96f6c1a520e75eb37c6ee8b844332bc0435c,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w12/papers/Leo_Automatic_Emotion_Recognition_ICCV_2015_paper.pdf
+10fcbf30723033a5046db791fec2d3d286e34daa,http://pdfs.semanticscholar.org/10fc/bf30723033a5046db791fec2d3d286e34daa.pdf
+21258aa3c48437a2831191b71cd069c05fb84cf7,http://pdfs.semanticscholar.org/2125/8aa3c48437a2831191b71cd069c05fb84cf7.pdf
+c05441dd1bc418fb912a6fafa84c0659a6850bf0,http://pdfs.semanticscholar.org/c054/41dd1bc418fb912a6fafa84c0659a6850bf0.pdf
+85639cefb8f8deab7017ce92717674d6178d43cc,http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf
+142e5b4492bc83b36191be4445ef0b8b770bf4b0,http://pdfs.semanticscholar.org/142e/5b4492bc83b36191be4445ef0b8b770bf4b0.pdf
+10e12d11cb98ffa5ae82343f8904cfe321ae8004,http://pdfs.semanticscholar.org/10e1/2d11cb98ffa5ae82343f8904cfe321ae8004.pdf
+06f8aa1f436a33014e9883153b93581eea8c5c70,http://pdfs.semanticscholar.org/8926/471921ff608f70c6c81777782974a91086ae.pdf
+4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c,http://pdfs.semanticscholar.org/4a1a/5316e85528f4ff7a5f76699dfa8c70f6cc5c.pdf
+30c5d2ec584e7b8273af6915aab420fc23ff2761,http://imi.ntu.edu.sg/IMIGraduatePrograms/IMIResearchSeminars/Documents/29_April_2014/REN_Jianfeng_29_April_2014.pdf
+82f4e8f053d20be64d9318529af9fadd2e3547ef,http://pdfs.semanticscholar.org/82f4/e8f053d20be64d9318529af9fadd2e3547ef.pdf
+624496296af19243d5f05e7505fd927db02fd0ce,http://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_cvpr_2014.pdf
+10e704c82616fb5d9c48e0e68ee86d4f83789d96,http://pdfs.semanticscholar.org/10e7/04c82616fb5d9c48e0e68ee86d4f83789d96.pdf
+a2fbaa0b849ecc74f34ebb36d1442d63212b29d2,http://pdfs.semanticscholar.org/a2fb/aa0b849ecc74f34ebb36d1442d63212b29d2.pdf
+28e0ed749ebe7eb778cb13853c1456cb6817a166,http://pdfs.semanticscholar.org/28e0/ed749ebe7eb778cb13853c1456cb6817a166.pdf
+7b43326477795a772c08aee750d3e433f00f20be,http://pdfs.semanticscholar.org/7b43/326477795a772c08aee750d3e433f00f20be.pdf
+334d6c71b6bce8dfbd376c4203004bd4464c2099,http://pdfs.semanticscholar.org/ebbf/a07476257e1b7f4e259b29531a12eab575bd.pdf
+808656563eea17470159e6540b05fe6f7ae58c2b,http://www.researchgate.net/profile/Songul_Varli_Albayrak/publication/235248598_Classification_with_Emotional_Faces_via_a_Robust_Sparse_Classifier/links/0912f510a44fb84bef000000.pdf
+1e5ca4183929929a4e6f09b1e1d54823b8217b8e,http://pdfs.semanticscholar.org/1e5c/a4183929929a4e6f09b1e1d54823b8217b8e.pdf
+84e4b7469f9c4b6c9e73733fa28788730fd30379,http://pdfs.semanticscholar.org/84e4/b7469f9c4b6c9e73733fa28788730fd30379.pdf
+9606b1c88b891d433927b1f841dce44b8d3af066,http://pdfs.semanticscholar.org/9606/b1c88b891d433927b1f841dce44b8d3af066.pdf
+ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9,http://www.mmp.rwth-aachen.de/publications/pdf/rafi_chalearn2015.pdf
+76dc11b2f141314343d1601635f721fdeef86fdb,http://pdfs.semanticscholar.org/8d19/1804f5b260807dac107b89a5837ac15857aa.pdf
+6fed504da4e192fe4c2d452754d23d3db4a4e5e3,http://pdfs.semanticscholar.org/85ee/d639f7367c794a6d8b38619697af3efaacfe.pdf
+a34d75da87525d1192bda240b7675349ee85c123,http://pdfs.semanticscholar.org/a34d/75da87525d1192bda240b7675349ee85c123.pdf
+6332a99e1680db72ae1145d65fa0cccb37256828,http://pdfs.semanticscholar.org/6332/a99e1680db72ae1145d65fa0cccb37256828.pdf
+2f16459e2e24dc91b3b4cac7c6294387d4a0eacf,http://pdfs.semanticscholar.org/2f16/459e2e24dc91b3b4cac7c6294387d4a0eacf.pdf
+6889d649c6bbd9c0042fadec6c813f8e894ac6cc,http://pdfs.semanticscholar.org/6889/d649c6bbd9c0042fadec6c813f8e894ac6cc.pdf
+d280bcbb387b1d548173917ae82cb6944e3ceca6,https://cse.sc.edu/~mengz/papers/ICIP2014.pdf
+375435fb0da220a65ac9e82275a880e1b9f0a557,https://ibug.doc.ic.ac.uk/media/uploads/documents/tpami_alignment.pdf
+3933416f88c36023a0cba63940eb92f5cef8001a,http://pdfs.semanticscholar.org/3933/416f88c36023a0cba63940eb92f5cef8001a.pdf
+740e095a65524d569244947f6eea3aefa3cca526,http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf
+07ea3dd22d1ecc013b6649c9846d67f2bf697008,http://pdfs.semanticscholar.org/07ea/3dd22d1ecc013b6649c9846d67f2bf697008.pdf
+3e687d5ace90c407186602de1a7727167461194a,http://pdfs.semanticscholar.org/3e68/7d5ace90c407186602de1a7727167461194a.pdf
+063a3be18cc27ba825bdfb821772f9f59038c207,http://eprints.whiterose.ac.uk/125231/1/kaiser_et_al_17.pdf
+2e1415a814ae9abace5550e4893e13bd988c7ba1,http://pdfs.semanticscholar.org/2e14/15a814ae9abace5550e4893e13bd988c7ba1.pdf
+225fb9181545f8750061c7693661b62d715dc542,http://pdfs.semanticscholar.org/c592/e408d95c838bced90b79640bead7c226fe64.pdf
+4a1d640f5e25bb60bb2347d36009718249ce9230,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Xing_Towards_Multi-view_and_2014_CVPR_paper.pdf
+fd7b6c77b46420c27725757553fcd1fb24ea29a8,http://pdfs.semanticscholar.org/fd7b/6c77b46420c27725757553fcd1fb24ea29a8.pdf
+98c548a4be0d3b62971e75259d7514feab14f884,http://pdfs.semanticscholar.org/98c5/48a4be0d3b62971e75259d7514feab14f884.pdf
+5b59e6b980d2447b2f3042bd811906694e4b0843,https://bib.irb.hr/datoteka/832723.PID4276755.pdf
+22e2066acfb795ac4db3f97d2ac176d6ca41836c,http://pdfs.semanticscholar.org/26f5/3a1abb47b1f0ea1f213dc7811257775dc6e6.pdf
+5dc056fe911a3e34a932513abe637076250d96da,http://www.vision.ee.ethz.ch/~gfanelli/pubs/cvpr12.pdf
+218b2c5c9d011eb4432be4728b54e39f366354c1,http://infolab.stanford.edu/~wangz/project/imsearch/ALIP/TIP13/sawant.pdf
+00616b487d4094805107bb766da1c234c3c75e73,http://vision.ucmerced.edu/papers/Newsam_ACMGIS_2008.pdf
+7e00fb79576fe213853aeea39a6bc51df9fdca16,http://www.ics.ele.tue.nl/~tbasten/papers/AVSS2015_final.pdf
+7de386bf2a1b2436c836c0cc1f1f23fccb24aad6,http://pdfs.semanticscholar.org/7de3/86bf2a1b2436c836c0cc1f1f23fccb24aad6.pdf
+4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb,http://pdfs.semanticscholar.org/4c8e/5fc0877d066516bb63e6c31eb1b8b5f967eb.pdf
+081cb09791e7ff33c5d86fd39db00b2f29653fa8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/09/22.pdf
+20a0b23741824a17c577376fdd0cf40101af5880,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Weinzaepfel_Learning_to_Track_ICCV_2015_paper.pdf
+aed321909bb87c81121c841b21d31509d6c78f69,http://pdfs.semanticscholar.org/aed3/21909bb87c81121c841b21d31509d6c78f69.pdf
+4571626d4d71c0d11928eb99a3c8b10955a74afe,http://pdfs.semanticscholar.org/4571/626d4d71c0d11928eb99a3c8b10955a74afe.pdf
+131178dad3c056458e0400bed7ee1a36de1b2918,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Deng_Visual_Reranking_through_2013_ICCV_paper.pdf
+081a431107eb38812b74a8cd036ca5e97235b499,http://webhost.uoradea.ro/ibuciu/IEEE_TNN_2008.pdf
+2cdc40f20b70ca44d9fd8e7716080ee05ca7924a,http://pdfs.semanticscholar.org/2cdc/40f20b70ca44d9fd8e7716080ee05ca7924a.pdf
+aa912375eaf50439bec23de615aa8a31a3395ad3,http://pdfs.semanticscholar.org/aa91/2375eaf50439bec23de615aa8a31a3395ad3.pdf
+7373c4a23684e2613f441f2236ed02e3f9942dd4,https://dr.ntu.edu.sg/bitstream/handle/10220/18012/Feature%20Extraction%20through%20Binary%20Pattern%20of%20Phase%20Congruency%20for%20Facial%20Expression%20Recognition.pdf?isAllowed=y&sequence=1
+0ce8a45a77e797e9d52604c29f4c1e227f604080,http://pdfs.semanticscholar.org/0ce8/a45a77e797e9d52604c29f4c1e227f604080.pdf
+cd9666858f6c211e13aa80589d75373fd06f6246,http://pdfs.semanticscholar.org/cd96/66858f6c211e13aa80589d75373fd06f6246.pdf
+d04d5692461d208dd5f079b98082eda887b62323,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/ZLEI-ICB-15.pdf
+ce9a61bcba6decba72f91497085807bface02daf,http://www.jdl.ac.cn/user/sgshan/pub/FG04_Qing_LY.pdf
+0447bdb71490c24dd9c865e187824dee5813a676,http://pdfs.semanticscholar.org/0447/bdb71490c24dd9c865e187824dee5813a676.pdf
+68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf
+52c91fcf996af72d191520d659af44e310f86ef9,http://pdfs.semanticscholar.org/52c9/1fcf996af72d191520d659af44e310f86ef9.pdf
+1648cf24c042122af2f429641ba9599a2187d605,http://www.eurecom.fr/en/publication/5333/download/sec-publi-5333.pdf
+1afdedba774f6689eb07e048056f7844c9083be9,http://ibug.doc.ic.ac.uk/media/uploads/documents/sandbach2013markov.pdf
+5a7520380d9960ff3b4f5f0fe526a00f63791e99,http://arxiv.org/pdf/1512.00932v1.pdf
+c1482491f553726a8349337351692627a04d5dbe,http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf
+c4b58ceafdf4cf55586b036b9eb4d6d3d9ecd9c4,http://www.serc.iisc.ernet.in/~venky/Papers/Action_Recognition_CD_ISSNIP14.pdf
+c37a971f7a57f7345fdc479fa329d9b425ee02be,http://pdfs.semanticscholar.org/c37a/971f7a57f7345fdc479fa329d9b425ee02be.pdf
+18a9f3d855bd7728ed4f988675fa9405b5478845,http://pdfs.semanticscholar.org/18a9/f3d855bd7728ed4f988675fa9405b5478845.pdf
+4188bd3ef976ea0dec24a2512b44d7673fd4ad26,http://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tip2010.pdf
+22e678d3e915218a7c09af0d1602e73080658bb7,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/13.pdf
+9d06d43e883930ddb3aa6fe57c6a865425f28d44,http://pdfs.semanticscholar.org/dd08/039eb271af93810ba392728ff481d8ce7496.pdf
+47541d04ec24662c0be438531527323d983e958e,http://pdfs.semanticscholar.org/4754/1d04ec24662c0be438531527323d983e958e.pdf
+0952ac6ce94c98049d518d29c18d136b1f04b0c0,http://pdfs.semanticscholar.org/0952/ac6ce94c98049d518d29c18d136b1f04b0c0.pdf
+f214bcc6ecc3309e2efefdc21062441328ff6081,http://pdfs.semanticscholar.org/f214/bcc6ecc3309e2efefdc21062441328ff6081.pdf
+0f53ab8b6c428127753281dd77cf94bdb889b624,https://www.researchgate.net/profile/Dian_Tjondronegoro/publication/224257559_Toward_a_more_robust_facial_expression_recognition_in_occluded_images_using_randomly_sampled_Gabor_based_templates/links/00b7d51f84babec8ad000000.pdf
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,http://pdfs.semanticscholar.org/84f9/04a71bee129a1cf00dc97f6cdbe1011657e6.pdf
+40fb4e8932fb6a8fef0dddfdda57a3e142c3e823,http://gavrila.net/Publications/cvpr08.pdf
+3aa9c8c65ce63eb41580ba27d47babb1100df8a3,http://www.csb.uncw.edu/mscsis/complete/pdf/VandeventerJason_Final.pdf
+39f7878f447df7703f2c4ddeeffd7eb0e21f6cd4,http://dev.pubs.doc.ic.ac.uk/Pantic-CVPR05/Pantic-CVPR05.pdf
+d84a48f7d242d73b32a9286f9b148f5575acf227,http://pdfs.semanticscholar.org/d84a/48f7d242d73b32a9286f9b148f5575acf227.pdf
+de15af84b1257211a11889b6c2adf0a2bcf59b42,http://pdfs.semanticscholar.org/de15/af84b1257211a11889b6c2adf0a2bcf59b42.pdf
+5287d8fef49b80b8d500583c07e935c7f9798933,http://pdfs.semanticscholar.org/8e65/13b642dcd5dc0fb60173dd0da1d8440eba8d.pdf
+63a2e2155193dc2da9764ae7380cdbd044ff2b94,http://pdfs.semanticscholar.org/a8fb/2c65a23d1e75c4923c36fdd6e3d2a4b3d8f7.pdf
+632441c9324cd29489cee3da773a9064a46ae26b,http://pdfs.semanticscholar.org/6324/41c9324cd29489cee3da773a9064a46ae26b.pdf
+2c3430e0cbe6c8d7be3316a88a5c13a50e90021d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Wang_Multi-feature_Spectral_Clustering_2014_CVPR_paper.pdf
+a611c978e05d7feab01fb8a37737996ad6e88bd9,http://cbl.uh.edu/pub_files/3_Benchmarking3DPoseEstimationForFaceRecognition_ICPR2014_v8.pdf
+c1e76c6b643b287f621135ee0c27a9c481a99054,http://pdfs.semanticscholar.org/c1e7/6c6b643b287f621135ee0c27a9c481a99054.pdf
+43b8b5eeb4869372ef896ca2d1e6010552cdc4d4,http://pdfs.semanticscholar.org/43b8/b5eeb4869372ef896ca2d1e6010552cdc4d4.pdf
+088aabe3da627432fdccf5077969e3f6402f0a80,http://pdfs.semanticscholar.org/088a/abe3da627432fdccf5077969e3f6402f0a80.pdf
+4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2,https://arxiv.org/pdf/1705.07750v3.pdf
+b689d344502419f656d482bd186a5ee6b0140891,http://pdfs.semanticscholar.org/b689/d344502419f656d482bd186a5ee6b0140891.pdf
+09718bf335b926907ded5cb4c94784fd20e5ccd8,http://parnec.nuaa.edu.cn/papers/journal/2005/xtan-TNN05.pdf
+0ba449e312894bca0d16348f3aef41ca01872383,http://pdfs.semanticscholar.org/0ba4/49e312894bca0d16348f3aef41ca01872383.pdf
+0db36bf08140d53807595b6313201a7339470cfe,http://www.cfar.umd.edu/~rama/Publications/Shroff_CVPR_2010.pdf
+0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23,http://vipl.ict.ac.cn/sites/default/files/papers/files/2013_TIP_mnkan_Learning%20Prototype%20Hyperplanes%20for%20Face%20Verification%20in%20the%20Wild.pdf
+3bd50e33220af76ffc32a7e57688e248843b7f25,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana_Murthy_Goecke_DICTA2014_TheInfluenceOfTemporalInformationOnHumanActionRecognitionWithLargeNumberOfClasses.pdf
+b13a882e6168afc4058fe14cc075c7e41434f43e,http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Fast_Subspace_Search_2013_ICCV_paper.pdf
+357963a46dfc150670061dbc23da6ba7d6da786e,http://pdfs.semanticscholar.org/3579/63a46dfc150670061dbc23da6ba7d6da786e.pdf
+b89862f38fff416d2fcda389f5c59daba56241db,http://pdfs.semanticscholar.org/b898/62f38fff416d2fcda389f5c59daba56241db.pdf
+db82f9101f64d396a86fc2bd05b352e433d88d02,http://pdfs.semanticscholar.org/db82/f9101f64d396a86fc2bd05b352e433d88d02.pdf
+c089c7d8d1413b54f59fc410d88e215902e51638,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh122.pdf
+8820d1d3fa73cde623662d92ecf2e3faf1e3f328,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w2/papers/Victor_Continuous_Video_to_CVPR_2017_paper.pdf
+0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698,http://pdfs.semanticscholar.org/0e78/af9bd0f9a0ce4ceb5f09f24bc4e4823bd698.pdf
+0e677f2b798f5c1f7143ba983467321a7851565a,http://www.cse.iitk.ac.in/users/rahulaaj/papers/BillyYL.pdf
+6a4ebd91c4d380e21da0efb2dee276897f56467a,http://ibug.doc.ic.ac.uk/media/uploads/documents/07025044.pdf
+e4df83b7424842ff5864c10fa55d38eae1c45fac,http://pdfs.semanticscholar.org/e4df/83b7424842ff5864c10fa55d38eae1c45fac.pdf
+151481703aa8352dc78e2577f0601782b8c41b34,http://pdfs.semanticscholar.org/943c/f990952712673320b011e1e8092fad65eedd.pdf
+8a3bb63925ac2cdf7f9ecf43f71d65e210416e17,https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf
+6d618657fa5a584d805b562302fe1090957194ba,http://pdfs.semanticscholar.org/6d61/8657fa5a584d805b562302fe1090957194ba.pdf
+33e20449aa40488c6d4b430a48edf5c4b43afdab,http://mplab.ucsd.edu/wordpress/wp-content/uploads/EngagementRecognitionFinal.pdf
+d511e903a882658c9f6f930d6dd183007f508eda,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553766.pdf
+988d1295ec32ce41d06e7cf928f14a3ee079a11e,http://pdfs.semanticscholar.org/988d/1295ec32ce41d06e7cf928f14a3ee079a11e.pdf
+3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07,http://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf
+9c1305383ce2c108421e9f5e75f092eaa4a5aa3c,http://pdfs.semanticscholar.org/9c13/05383ce2c108421e9f5e75f092eaa4a5aa3c.pdf
+70a69569ba61f3585cd90c70ca5832e838fa1584,http://pdfs.semanticscholar.org/70a6/9569ba61f3585cd90c70ca5832e838fa1584.pdf
+492f41e800c52614c5519f830e72561db205e86c,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf
+6f84e61f33564e5188136474f9570b1652a0606f,https://arxiv.org/pdf/1708.00284v1.pdf
+93747de3d40376761d1ef83ffa72ec38cd385833,http://pdfs.semanticscholar.org/9374/7de3d40376761d1ef83ffa72ec38cd385833.pdf
+c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4,https://vision.cornell.edu/se3/wp-content/uploads/2015/02/ijcv2014.pdf
+bb06ef67a49849c169781657be0bb717587990e0,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2017/papers/1570342773.pdf
+021a19e240f0ae0554eff814e838e1e396be6572,http://ci2cv.net/static/papers/2009_ICCV_Saragih_2.pdf
+6e3a181bf388dd503c83dc324561701b19d37df1,http://pdfs.semanticscholar.org/9d91/213394fb411743b11bae74cf22f0ffca9191.pdf
+21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc,http://pdfs.semanticscholar.org/21f3/c5b173503185c1e02a3eb4e76e13d7e9c5bc.pdf
+6d7a32f594d46f4087b71e2a2bb66a4b25da5e30,http://pdfs.semanticscholar.org/6d7a/32f594d46f4087b71e2a2bb66a4b25da5e30.pdf
+d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f,http://pdfs.semanticscholar.org/d7d9/c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f.pdf
+6479b61ea89e9d474ffdefa71f068fbcde22cc44,http://pdfs.semanticscholar.org/6479/b61ea89e9d474ffdefa71f068fbcde22cc44.pdf
+f35a493afa78a671b9d2392c69642dcc3dd2cdc2,http://pdfs.semanticscholar.org/f35a/493afa78a671b9d2392c69642dcc3dd2cdc2.pdf
+889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_face.pdf
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,http://yugangjiang.info/publication/JCST-nameface.pdf
+d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea,http://pdfs.semanticscholar.org/d03e/4e938bcbc25aa0feb83d8a0830f9cd3eb3ea.pdf
+f6742010372210d06e531e7df7df9c01a185e241,http://pdfs.semanticscholar.org/f674/2010372210d06e531e7df7df9c01a185e241.pdf
+28aa89b2c827e5dd65969a5930a0520fdd4a3dc7,http://pdfs.semanticscholar.org/28aa/89b2c827e5dd65969a5930a0520fdd4a3dc7.pdf
+6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d,http://pdfs.semanticscholar.org/6a67/e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d.pdf
+100105d6c97b23059f7aa70589ead2f61969fbc3,http://www.rci.rutgers.edu/~vmp93/Conference_pub/WACV2016_CFP.pdf
+2d294c58b2afb529b26c49d3c92293431f5f98d0,https://ibug.doc.ic.ac.uk/media/uploads/documents/mmpp_journal.pdf
+86b6de59f17187f6c238853810e01596d37f63cd,http://pdfs.semanticscholar.org/86b6/de59f17187f6c238853810e01596d37f63cd.pdf
+ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906,http://pdfs.semanticscholar.org/ba8a/99d35aee2c4e5e8a40abfdd37813bfdd0906.pdf
+51528cdce7a92835657c0a616c0806594de7513b,http://pdfs.semanticscholar.org/5152/8cdce7a92835657c0a616c0806594de7513b.pdf
+da5bfddcfe703ca60c930e79d6df302920ab9465,http://pdfs.semanticscholar.org/da5b/fddcfe703ca60c930e79d6df302920ab9465.pdf
+d9ef1a80738bbdd35655c320761f95ee609b8f49,http://pdfs.semanticscholar.org/d9ef/1a80738bbdd35655c320761f95ee609b8f49.pdf
+5506a1a1e1255353fde05d9188cb2adc20553af5,http://pdfs.semanticscholar.org/ff69/cb49c8cb86d0afadbcfa0baa607d7065965a.pdf
diff --git a/scraper/reports/misc/db_paper_pdf-3.csv b/scraper/reports/misc/db_paper_pdf-3.csv
new file mode 100644
index 00000000..93605c7b
--- /dev/null
+++ b/scraper/reports/misc/db_paper_pdf-3.csv
@@ -0,0 +1,1639 @@
+292eba47ef77495d2613373642b8372d03f7062b,http://pdfs.semanticscholar.org/292e/ba47ef77495d2613373642b8372d03f7062b.pdf
+fab2fc6882872746498b362825184c0fb7d810e4,http://pdfs.semanticscholar.org/fab2/fc6882872746498b362825184c0fb7d810e4.pdf
+690d669115ad6fabd53e0562de95e35f1078dfbb,http://pdfs.semanticscholar.org/690d/669115ad6fabd53e0562de95e35f1078dfbb.pdf
+e64b683e32525643a9ddb6b6af8b0472ef5b6a37,http://pdfs.semanticscholar.org/e64b/683e32525643a9ddb6b6af8b0472ef5b6a37.pdf
+1d6c09019149be2dc84b0c067595f782a5d17316,http://pdfs.semanticscholar.org/3e27/b747e272c2ab778df92ea802d30af15e43d6.pdf
+192723085945c1d44bdd47e516c716169c06b7c0,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/VisionandAttentionTheoryBasedSampling14.pdf
+d074b33afd95074d90360095b6ecd8bc4e5bb6a2,http://pdfs.semanticscholar.org/d074/b33afd95074d90360095b6ecd8bc4e5bb6a2.pdf
+5b01d4338734aefb16ee82c4c59763d3abc008e6,http://pdfs.semanticscholar.org/5b01/d4338734aefb16ee82c4c59763d3abc008e6.pdf
+810f5606a4769fc3dd99611acf805596fb79223d,http://pdfs.semanticscholar.org/810f/5606a4769fc3dd99611acf805596fb79223d.pdf
+c035c193eed5d72c7f187f0bc880a17d217dada0,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf
+10f2b8188c745d43c1580f5ee6de71ad8d538b4d,http://staff.eng.bahcesehir.edu.tr/~cigdemeroglu/papers/international_conference_papers/2015_EmotiW.pdf
+43ed518e466ff13118385f4e5d039ae4d1c000fb,https://arxiv.org/pdf/1505.01350v1.pdf
+4bfce41cc72be315770861a15e467aa027d91641,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Branson_Active_Annotation_Translation_2014_CVPR_paper.pdf
+11ddf5e47854e4e6109762835d2ce086bbdfbc5b,http://eprints.pascal-network.org/archive/00008322/01/schroff11.pdf
+ebb9d53668205c5797045ba130df18842e3eadef,http://pdfs.semanticscholar.org/ebb9/d53668205c5797045ba130df18842e3eadef.pdf
+1b635f494eff2e5501607ebe55eda7bdfa8263b8,http://pdfs.semanticscholar.org/1b63/5f494eff2e5501607ebe55eda7bdfa8263b8.pdf
+af6e351d58dba0962d6eb1baf4c9a776eb73533f,http://pdfs.semanticscholar.org/af6e/351d58dba0962d6eb1baf4c9a776eb73533f.pdf
+31625522950e82ad4dffef7ed0df00fdd2401436,http://pdfs.semanticscholar.org/3162/5522950e82ad4dffef7ed0df00fdd2401436.pdf
+566a39d753c494f57b4464d6bde61bf3593f7ceb,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W01/papers/Hassner_A_Critical_Review_2013_CVPR_paper.pdf
+e10a257f1daf279e55f17f273a1b557141953ce2,http://pdfs.semanticscholar.org/e10a/257f1daf279e55f17f273a1b557141953ce2.pdf
+88ad82e6f2264f75f7783232ba9185a2f931a5d1,http://pdfs.semanticscholar.org/88ad/82e6f2264f75f7783232ba9185a2f931a5d1.pdf
+340d1a9852747b03061e5358a8d12055136599b0,http://pdfs.semanticscholar.org/340d/1a9852747b03061e5358a8d12055136599b0.pdf
+70c2c2d2b7e34ff533a8477eff9763be196cd03a,http://iplab.dmi.unict.it/sites/default/files/_9.pdf
+17cf6195fd2dfa42670dc7ada476e67b381b8f69,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf
+747fddd7345b60da121fc13c5440a18039b912e6,http://pdfs.semanticscholar.org/747f/ddd7345b60da121fc13c5440a18039b912e6.pdf
+0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306,http://pdfs.semanticscholar.org/0d6b/28691e1aa2a17ffaa98b9b38ac3140fb3306.pdf
+14b016c7a87d142f4b9a0e6dc470dcfc073af517,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm31.pdf
+670531f3925c1ee6921f1550a988a034db727c3b,http://neerajkumar.org/base/papers/nk_www2014_photorecall.pdf
+08ae100805d7406bf56226e9c3c218d3f9774d19,http://pdfs.semanticscholar.org/08ae/100805d7406bf56226e9c3c218d3f9774d19.pdf
+228558a2a38a6937e3c7b1775144fea290d65d6c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Smith_Nonparametric_Context_Modeling_2014_CVPR_paper.pdf
+c5421a18583f629b49ca20577022f201692c4f5d,http://pdfs.semanticscholar.org/c542/1a18583f629b49ca20577022f201692c4f5d.pdf
+23d5b2dccd48a17e743d3a5a4d596111a2f16c41,http://pdfs.semanticscholar.org/8cda/dc4d5e7e4fe6a0dbe15611f6fc8b7c0f103e.pdf
+0efdd82a4753a8309ff0a3c22106c570d8a84c20,http://pdfs.semanticscholar.org/0efd/d82a4753a8309ff0a3c22106c570d8a84c20.pdf
+857ad04fca2740b016f0066b152bd1fa1171483f,http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf
+c92da368a6a886211dc759fe7b1b777a64d8b682,http://pdfs.semanticscholar.org/c92d/a368a6a886211dc759fe7b1b777a64d8b682.pdf
+cefd9936e91885ba7af9364d50470f6cb54315a4,http://pdfs.semanticscholar.org/cefd/9936e91885ba7af9364d50470f6cb54315a4.pdf
+94a7c97d1e3eb5dbfb20b180780451486597a9be,http://pdfs.semanticscholar.org/94a7/c97d1e3eb5dbfb20b180780451486597a9be.pdf
+dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57,http://pdfs.semanticscholar.org/dbb7/f37fb9b41d1aa862aaf2d2e721a470fd2c57.pdf
+b3f3d6be11ace907c804c2d916830c85643e468d,http://pdfs.semanticscholar.org/b3f3/d6be11ace907c804c2d916830c85643e468d.pdf
+c5366f412f2e8e78280afcccc544156f63b516e3,http://lep.unige.ch/system/files/biblio/2012_Valstar_MetaAnalysisGEMEP-FERA.pdf
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,http://pdfs.semanticscholar.org/a6d7/cf29f333ea3d2aeac67cde39a73898e270b7.pdf
+01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,http://openaccess.thecvf.com/content_ICCV_2017/papers/Jackson_Large_Pose_3D_ICCV_2017_paper.pdf
+14b162c2581aea1c0ffe84e7e9273ab075820f52,http://pdfs.semanticscholar.org/4b87/c72e53f19e29f2ccf4d24f9432ebbafcf1a8.pdf
+162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e,https://research-information.bristol.ac.uk/files/75922781/Ioannis_Pitas_Large_scale_classification_by_an_approximate_least_squares_one_class_support_vector_machine_ensemble_2015.pdf
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf
+6fda12c43b53c679629473806c2510d84358478f,http://pdfs.semanticscholar.org/6fda/12c43b53c679629473806c2510d84358478f.pdf
+054756fa720bdcf1d320ad7a353e54ca53d4d3af,http://www.stat.ucla.edu/~yuille/Pubs15/JianyuWangSemanticCVPR2015%20(1).pdf
+fe9c460d5ca625402aa4d6dd308d15a40e1010fa,http://pdfs.semanticscholar.org/fe9c/460d5ca625402aa4d6dd308d15a40e1010fa.pdf
+0b0eb562d7341231c3f82a65cf51943194add0bb,http://pdfs.semanticscholar.org/0b0e/b562d7341231c3f82a65cf51943194add0bb.pdf
+75859ac30f5444f0d9acfeff618444ae280d661d,http://www.cse.msu.edu/rgroups/biometrics/Publications/SecureBiometrics/NagarNandakumarJain_MultibiometricCryptosystems_TIFS11.pdf
+016f49a54b79ec787e701cc8c7d0280273f9b1ef,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotropoulos06a.pdf
+9b474d6e81e3b94e0c7881210e249689139b3e04,http://pdfs.semanticscholar.org/a43c/c0c2f1d0e29cf1ee88f3bde4289a94b70409.pdf
+6a0368b4e132f4aa3bbdeada8d894396f201358a,http://pdfs.semanticscholar.org/6a03/68b4e132f4aa3bbdeada8d894396f201358a.pdf
+6dd052df6b0e89d394192f7f2af4a3e3b8f89875,http://pdfs.semanticscholar.org/6dd0/52df6b0e89d394192f7f2af4a3e3b8f89875.pdf
+0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,https://arxiv.org/pdf/1501.05152v1.pdf
+47d3b923730746bfaabaab29a35634c5f72c3f04,http://pdfs.semanticscholar.org/47d3/b923730746bfaabaab29a35634c5f72c3f04.pdf
+13a994d489c15d440c1238fc1ac37dad06dd928c,http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf
+5c820e47981d21c9dddde8d2f8020146e600368f,http://pdfs.semanticscholar.org/5c82/0e47981d21c9dddde8d2f8020146e600368f.pdf
+78216cd51e6e1cc014b83e27e7e78631ad44b899,http://www.ami-lab.org/uploads/Publications/Conference/WP4/Tracking%20facial%20features%20under%20occlusions%20and%20recognizing%20facial%20expressions%20in%20sign%20language.pdf
+ab6776f500ed1ab23b7789599f3a6153cdac84f7,http://pdfs.semanticscholar.org/ab67/76f500ed1ab23b7789599f3a6153cdac84f7.pdf
+1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f,http://pdfs.semanticscholar.org/1a6c/3c37c2e62b21ebc0f3533686dde4d0103b3f.pdf
+ba29ba8ec180690fca702ad5d516c3e43a7f0bb8,http://pdfs.semanticscholar.org/ba29/ba8ec180690fca702ad5d516c3e43a7f0bb8.pdf
+b261439b5cde39ec52d932a222450df085eb5a91,http://pdfs.semanticscholar.org/b261/439b5cde39ec52d932a222450df085eb5a91.pdf
+93675f86d03256f9a010033d3c4c842a732bf661,http://pdfs.semanticscholar.org/9367/5f86d03256f9a010033d3c4c842a732bf661.pdf
+91e507d2d8375bf474f6ffa87788aa3e742333ce,http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf
+4e1836914bbcf94dc00e604b24b1b0d6d7b61e66,http://pdfs.semanticscholar.org/4e18/36914bbcf94dc00e604b24b1b0d6d7b61e66.pdf
+12c713166c46ac87f452e0ae383d04fb44fe4eb2,http://pdfs.semanticscholar.org/98dc/a90e43c7592ef81cf84445d73c8baa719686.pdf
+93721023dd6423ab06ff7a491d01bdfe83db7754,http://pdfs.semanticscholar.org/9372/1023dd6423ab06ff7a491d01bdfe83db7754.pdf
+7fb6bc6c920ca574677f0d3a40c5c377a095885b,http://www.cs.bris.ac.uk/Publications/Papers/2000124.pdf
+edc5a0a8b9fc6ae0e8d8091a2391767f645095d9,http://www.es.mdh.se/pdf_publications/3948.pdf
+a695c2240382e362262db72017ceae0365d63f8f,http://www3.nd.edu/~kwb/AggarwalBiswasFlynnBowyerWACV_2012.pdf
+9a7858eda9b40b16002c6003b6db19828f94a6c6,https://www1.icsi.berkeley.edu/~twke/pdfs/pubs/mooney_icip2017.pdf
+14b66748d7c8f3752dca23991254fca81b6ee86c,http://pdfs.semanticscholar.org/4e92/a8dcfd802c3248d56ba16d2613dceacaef59.pdf
+33ec047f1084e290c8a6f516bc75345b6bcf02a0,https://www.researchgate.net/profile/Peter_Corcoran/publication/220168274_Smart_Cameras_2D_Affine_Models_for_Determining_Subject_Facial_Expressions/links/02bfe5118f52d3d59d000000.pdf
+d1082eff91e8009bf2ce933ac87649c686205195,http://epubs.surrey.ac.uk/807279/1/ML_Akyuz_Windeatt_Raymond.pdf
+9d61b0beb3c5903fc3032655dc0fd834ec0b2af3,http://pdfs.semanticscholar.org/c5ac/a3f653e2e8a58888492524fc1480608457b7.pdf
+4ca1fcfd7650eeb0ac8d51cff31b70717cdddfdd,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1563.pdf
+0d5824e14593bcb349d636d255ba274f98bbb88f,http://www.researchgate.net/profile/Claus_Neubauer/publication/224716248_A_Variational_Bayesian_Approach_for_Classification_with_Corrupted_Inputs/links/00b7d52dd1f690da64000000.pdf
+dc7df544d7c186723d754e2e7b7217d38a12fcf7,http://pdfs.semanticscholar.org/dc7d/f544d7c186723d754e2e7b7217d38a12fcf7.pdf
+22bebedc1a5f3556cb4f577bdbe032299a2865e8,http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf
+71e56f2aebeb3c4bb3687b104815e09bb4364102,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Guo_Video_Co-segmentation_for_2013_ICCV_paper.pdf
+b446cf353744a4b640af88d1848a1b958169c9f2,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553744.pdf
+0f4cfcaca8d61b1f895aa8c508d34ad89456948e,http://signal.ee.bilkent.edu.tr/defevent/abstract/a2051.pdf
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf
+8fa3478aaf8e1f94e849d7ffbd12146946badaba,http://pdfs.semanticscholar.org/8fa3/478aaf8e1f94e849d7ffbd12146946badaba.pdf
+14811696e75ce09fd84b75fdd0569c241ae02f12,https://jurie.users.greyc.fr/papers/cvpr08-cevikalp.pdf
+19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54,http://cvrr.ucsd.edu/publications/2006/McCallTrivedi_v4hci_cvpr2006.pdf
+814b05113ba0397d236736f94c01e85bb034c833,http://pdfs.semanticscholar.org/814b/05113ba0397d236736f94c01e85bb034c833.pdf
+14418ae9a6a8de2b428acb2c00064da129632f3e,http://fanyix.cs.ucdavis.edu/project/discovery/files/ext_abstract.pdf
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,http://pdfs.semanticscholar.org/56c2/fb2438f32529aec604e6fc3b06a595ddbfcc.pdf
+3a0a839012575ba455f2b84c2d043a35133285f9,http://pdfs.semanticscholar.org/76a1/dca3a9c2b0229c1b12c95752dcf40dc95a11.pdf
+278e1441a77fbeebb22c45932d76c557e5663197,http://sist.sysu.edu.cn/~zhwshi/research/preprintversion/two-stage%20nonnegative%20sparse%20representation%20for%20large-scale%20face%20recognition.pdf
+5040f7f261872a30eec88788f98326395a44db03,http://pdfs.semanticscholar.org/5040/f7f261872a30eec88788f98326395a44db03.pdf
+c5844de3fdf5e0069d08e235514863c8ef900eb7,http://pdfs.semanticscholar.org/c584/4de3fdf5e0069d08e235514863c8ef900eb7.pdf
+d9318c7259e394b3060b424eb6feca0f71219179,http://biometrics.cse.msu.edu/Publications/Face/ParkJainFaceSoftBio_TIFS10.pdf
+09628e9116e7890bc65ebeabaaa5f607c9847bae,https://arxiv.org/pdf/1704.03039.pdf
+2b1129efcbafa61da1d660de3b5c84b646540311,http://www.researchgate.net/profile/Haizhou_Ai/publication/221368891_Distributing_expressional_faces_in_2-D_emotional_space/links/546b431f0cf20dedafd52906.pdf
+bba281fe9c309afe4e5cc7d61d7cff1413b29558,http://pdfs.semanticscholar.org/bba2/81fe9c309afe4e5cc7d61d7cff1413b29558.pdf
+1f24cef78d1de5aa1eefaf344244dcd1972797e8,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhou_Outlier-Robust_Tensor_PCA_CVPR_2017_paper.pdf
+fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f,http://pdfs.semanticscholar.org/fc68/c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f.pdf
+4118b4fc7d61068b9b448fd499876d139baeec81,http://www.cs.utexas.edu/~ssi/TKDE2010.pdf
+91e58c39608c6eb97b314b0c581ddaf7daac075e,http://pdfs.semanticscholar.org/91e5/8c39608c6eb97b314b0c581ddaf7daac075e.pdf
+baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143,http://pdfs.semanticscholar.org/baa0/fe4d0ac0c7b664d4c4dd00b318b6d4e09143.pdf
+88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002697.pdf
+0182d090478be67241392df90212d6cd0fb659e6,http://www.cs.utexas.edu/~grauman/papers/localized_attributes_cvpr2012.pdf
+96faccdddef887673d6007fed8ff2574580cae1f,http://pdfs.semanticscholar.org/96fa/ccdddef887673d6007fed8ff2574580cae1f.pdf
+33403e9b4bbd913ae9adafc6751b52debbd45b0e,http://pdfs.semanticscholar.org/3340/3e9b4bbd913ae9adafc6751b52debbd45b0e.pdf
+22fdd8d65463f520f054bf4f6d2d216b54fc5677,http://pdfs.semanticscholar.org/22fd/d8d65463f520f054bf4f6d2d216b54fc5677.pdf
+8983485996d5d9d162e70d66399047c5d01ac451,https://arxiv.org/pdf/1602.04868v1.pdf
+3ca5d3b8f5f071148cb50f22955fd8c1c1992719,http://pdfs.semanticscholar.org/3ca5/d3b8f5f071148cb50f22955fd8c1c1992719.pdf
+03f4c0fe190e5e451d51310bca61c704b39dcac8,http://pdfs.semanticscholar.org/03f4/c0fe190e5e451d51310bca61c704b39dcac8.pdf
+420782499f38c1d114aabde7b8a8104c9e40a974,http://openaccess.thecvf.com/content_cvpr_2016/papers/Simo-Serra_Fashion_Style_in_CVPR_2016_paper.pdf
+6c9266aa77ea01b9d26a98a483b56e9e8b80eeba,https://www.researchgate.net/profile/Stefano_Tubaro/publication/224641232_Mixed_2D-3D_Information_for_Pose_Estimation_and_Face_Recognition/links/00b7d5178477f30fb3000000.pdf
+75cd81d2513b7e41ac971be08bbb25c63c37029a,http://pdfs.semanticscholar.org/75cd/81d2513b7e41ac971be08bbb25c63c37029a.pdf
+c32f04ccde4f11f8717189f056209eb091075254,http://pdfs.semanticscholar.org/c32f/04ccde4f11f8717189f056209eb091075254.pdf
+cb669c1d1e17c2a54d78711fa6a9f556b83f1987,http://satoh-lab.ex.nii.ac.jp/users/ledduy/pub/Ngo-RobustFaceTrackFindingUnsingTrackedPoints.pdf
+1d1caaa2312390260f7d20ad5f1736099818d358,https://eprints.soton.ac.uk/271401/1/paperOnIEEEexplore.pdf
+a308077e98a611a977e1e85b5a6073f1a9bae6f0,http://pdfs.semanticscholar.org/a308/077e98a611a977e1e85b5a6073f1a9bae6f0.pdf
+0cdb49142f742f5edb293eb9261f8243aee36e12,https://arxiv.org/pdf/1303.2783v1.pdf
+2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522,http://pdfs.semanticscholar.org/2ef5/1b57c4a3743ac33e47e0dc6a40b0afcdd522.pdf
+02431ed90700d5cfe4e3d3a20f1e97de3e131569,http://www.di.ens.fr/~bojanowski/papers/bojanowski13finding.pdf
+1afd481036d57320bf52d784a22dcb07b1ca95e2,http://pdfs.semanticscholar.org/e206/144fc1dee7f10079facf3b6a3d5d2bf5f8db.pdf
+e2d265f606cd25f1fd72e5ee8b8f4c5127b764df,http://pdfs.semanticscholar.org/e2d2/65f606cd25f1fd72e5ee8b8f4c5127b764df.pdf
+6bcee7dba5ed67b3f9926d2ae49f9a54dee64643,http://pdfs.semanticscholar.org/6bce/e7dba5ed67b3f9926d2ae49f9a54dee64643.pdf
+182f3aa4b02248ff9c0f9816432a56d3c8880706,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Quan_Sparse_Coding_for_CVPR_2016_paper.pdf
+22646cf884cc7093b0db2c1731bd52f43682eaa8,http://pdfs.semanticscholar.org/2264/6cf884cc7093b0db2c1731bd52f43682eaa8.pdf
+10a285260e822b49023c4324d0fbbca7df8e128b,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects2action-iccv2015.pdf
+a0dfb8aae58bd757b801e2dcb717a094013bc178,http://pdfs.semanticscholar.org/a0df/b8aae58bd757b801e2dcb717a094013bc178.pdf
+7ee53d931668fbed1021839db4210a06e4f33190,http://crcv.ucf.edu/projects/videolocalization_images/CVPR16_Waqas_AL.pdf
+c822bd0a005efe4ec1fea74de534900a9aa6fb93,http://pdfs.semanticscholar.org/c822/bd0a005efe4ec1fea74de534900a9aa6fb93.pdf
+e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf
+4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7,http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf
+306127c3197eb5544ab1e1bf8279a01e0df26120,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Huang_Sparse_Coding_and_CVPR_2016_paper.pdf
+20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6,http://pdfs.semanticscholar.org/ee89/f47ebfbebed7d6793a6774356ba63398f0d0.pdf
+2be9144a1e66de127192b01907c862381f4011d1,http://www1.cs.columbia.edu/~belhumeur/conference/eye-iccv05.pdf
+a0e7f8771c7d83e502d52c276748a33bae3d5f81,http://pdfs.semanticscholar.org/a0e7/f8771c7d83e502d52c276748a33bae3d5f81.pdf
+2ee817981e02c4709d65870c140665ed25b005cc,http://www.umiacs.umd.edu/users/rama/Publications/Patel_ICARCV_2010.pdf
+86b69b3718b9350c9d2008880ce88cd035828432,http://pdfs.semanticscholar.org/86b6/9b3718b9350c9d2008880ce88cd035828432.pdf
+5d44c675addcb6e74cbc5a9c48df0d754bdbcd98,http://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf
+cd687ddbd89a832f51d5510c478942800a3e6854,http://pdfs.semanticscholar.org/cd68/7ddbd89a832f51d5510c478942800a3e6854.pdf
+2465fc22e03faf030e5a319479a95ef1dfc46e14,https://www.fruct.org/publications/fruct20/files/Bel.pdf
+5253c94f955146ba7d3566196e49fe2edea1c8f4,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Kemelmacher-Shlizerman_Internet_Based_Morphable_2013_ICCV_paper.pdf
+8cc07ae9510854ec6e79190cc150f9f1fe98a238,http://pdfs.semanticscholar.org/8cc0/7ae9510854ec6e79190cc150f9f1fe98a238.pdf
+1b0a071450c419138432c033f722027ec88846ea,http://cvrr.ucsd.edu/publications/2016/YuenMartinTrivediITSC2016.pdf
+47506951d2dc7c4bb4d2d33dd25b67a767e56680,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2015_04_15_BradyJ_IEEEHST_FP.pdf
+2e0f5e72ad893b049f971bc99b67ebf254e194f7,http://pdfs.semanticscholar.org/2e0f/5e72ad893b049f971bc99b67ebf254e194f7.pdf
+fc23a386c2189f221b25dbd0bb34fcd26ccf60fa,http://pdfs.semanticscholar.org/fc23/a386c2189f221b25dbd0bb34fcd26ccf60fa.pdf
+87bee0e68dfc86b714f0107860d600fffdaf7996,http://mi.informatik.uni-siegen.de/publications/piotraschke_autoreconst_cvpr16.pdf
+ae936628e78db4edb8e66853f59433b8cc83594f,http://pdfs.semanticscholar.org/ae93/6628e78db4edb8e66853f59433b8cc83594f.pdf
+1c6be6874e150898d9db984dd546e9e85c85724e,http://research.microsoft.com/~szli/papers/WHT-CVPR2004.pdf
+77c53ec6ea448db4dad586e002a395c4a47ecf66,http://pdfs.semanticscholar.org/77c5/3ec6ea448db4dad586e002a395c4a47ecf66.pdf
+97032b13f1371c8a813802ade7558e816d25c73f,http://pdfs.semanticscholar.org/9703/2b13f1371c8a813802ade7558e816d25c73f.pdf
+53698b91709112e5bb71eeeae94607db2aefc57c,http://pdfs.semanticscholar.org/5369/8b91709112e5bb71eeeae94607db2aefc57c.pdf
+66aad5b42b7dda077a492e5b2c7837a2a808c2fa,http://pdfs.semanticscholar.org/66aa/d5b42b7dda077a492e5b2c7837a2a808c2fa.pdf
+6bca0d1f46b0f7546ad4846e89b6b842d538ee4e,http://pdfs.semanticscholar.org/6bca/0d1f46b0f7546ad4846e89b6b842d538ee4e.pdf
+97b8249914e6b4f8757d22da51e8347995a40637,http://rogerioferis.com/VisualRecognitionAndSearch2014/material/papers/FerisTransMultimedia2012.pdf
+6a38c575733b0f7118970238e8f9b480522a2dbc,http://pdfs.semanticscholar.org/fbee/265a61fd5ec15a6ed8f490a8fd8d3359506e.pdf
+37d6f0eb074d207b53885bd2eb78ccc8a04be597,http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf
+3feb69531653e83d0986a0643e4a6210a088e3e5,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/10-1569042275.pdf
+fdca08416bdadda91ae977db7d503e8610dd744f,http://pdfs.semanticscholar.org/fdca/08416bdadda91ae977db7d503e8610dd744f.pdf
+564035f1b8f06e9bb061255f40e3139fa57ea879,http://pdfs.semanticscholar.org/fcbf/61524a3d775947ea8bcef46d1b0a9cce7bfb.pdf
+830e5b1043227fe189b3f93619ef4c58868758a7,http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf
+84b4eb66ad75a74f77299f1ecb6aa6305362e8cd,https://www.researchgate.net/profile/Joao_Carvalho8/publication/4285113_A_Learning-based_Eye_Detector_Coupled_with_Eye_Candidate_Filtering_and_PCA_Features/links/0f31752d6b19aa31ec000000.pdf
+ae5bb02599244d6d88c4fe466a7fdd80aeb91af4,http://pdfs.semanticscholar.org/ae5b/b02599244d6d88c4fe466a7fdd80aeb91af4.pdf
+8ba67f45fbb1ce47a90df38f21834db37c840079,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf
+a3f1db123ce1818971a57330d82901683d7c2b67,http://pdfs.semanticscholar.org/a3f1/db123ce1818971a57330d82901683d7c2b67.pdf
+aa577652ce4dad3ca3dde44f881972ae6e1acce7,http://pdfs.semanticscholar.org/aa57/7652ce4dad3ca3dde44f881972ae6e1acce7.pdf
+ee7093e91466b81d13f4d6933bcee48e4ee63a16,http://pdfs.semanticscholar.org/ee70/93e91466b81d13f4d6933bcee48e4ee63a16.pdf
+0af48a45e723f99b712a8ce97d7826002fe4d5a5,http://vision.seas.harvard.edu/papers/WideAngle_PAMI2013.pdf
+208a2c50edb5271a050fa9f29d3870f891daa4dc,http://pdfs.semanticscholar.org/c17c/55f43af5db44b6a4c17932aa3d7031985749.pdf
+560e0e58d0059259ddf86fcec1fa7975dee6a868,http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf
+55c81f15c89dc8f6eedab124ba4ccab18cf38327,http://pdfs.semanticscholar.org/d31e/258f6af40f457c27ce118986ea157673c9c4.pdf
+3bd1d41a656c8159305ba2aa395f68f41ab84f31,http://pdfs.semanticscholar.org/3bd1/d41a656c8159305ba2aa395f68f41ab84f31.pdf
+18d5b0d421332c9321920b07e0e8ac4a240e5f1f,http://pdfs.semanticscholar.org/18d5/b0d421332c9321920b07e0e8ac4a240e5f1f.pdf
+d9739d1b4478b0bf379fe755b3ce5abd8c668f89,http://pdfs.semanticscholar.org/d973/9d1b4478b0bf379fe755b3ce5abd8c668f89.pdf
+a44590528b18059b00d24ece4670668e86378a79,http://pdfs.semanticscholar.org/a445/90528b18059b00d24ece4670668e86378a79.pdf
+2d83ba2d43306e3c0587ef16f327d59bf4888dc3,http://www.cs.colby.edu/courses/S16/cs365/papers/karpath-deepVideo-CVPR14.pdf
+68d4056765c27fbcac233794857b7f5b8a6a82bf,http://pdfs.semanticscholar.org/68d4/056765c27fbcac233794857b7f5b8a6a82bf.pdf
+3cc3e01ac1369a0d1aa88fedda61d3c99a98b890,http://mi.eng.cam.ac.uk/~bdrs2/papers/mita_pami08.pdf
+102b27922e9bd56667303f986404f0e1243b68ab,https://applied-informatics-j.springeropen.com/track/pdf/10.1186/s40535-017-0042-5?site=applied-informatics-j.springeropen.com
+7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e,http://jpinfotech.org/wp-content/plugins/infotech/file/upload/pdf/8962Face-Sketch-Synthesis-via-Sparse-Representation-Based-Greedy-Search-pdf.pdf
+739d400cb6fb730b894182b29171faaae79e3f01,http://pdfs.semanticscholar.org/739d/400cb6fb730b894182b29171faaae79e3f01.pdf
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Sequential_Face_Alignment_CVPR_2016_paper.pdf
+75908b6460eb0781130ed0aa94585be25a584996,http://pdfs.semanticscholar.org/7590/8b6460eb0781130ed0aa94585be25a584996.pdf
+e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66,http://pdfs.semanticscholar.org/e9ed/17fd8bf1f3d343198e206a4a7e0561ad7e66.pdf
+06d93a40365da90f30a624f15bf22a90d9cfe6bb,http://pdfs.semanticscholar.org/6940/40e59bffd860640e45c54ca7b093630caa39.pdf
+0229829e9a1eed5769a2b5eccddcaa7cd9460b92,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_098_ext.pdf
+142dcfc3c62b1f30a13f1f49c608be3e62033042,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Tsai_Adaptive_Region_Pooling_2015_CVPR_paper.pdf
+4cb8a691a15e050756640c0a35880cdd418e2b87,http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf
+f16a605abb5857c39a10709bd9f9d14cdaa7918f,http://pdfs.semanticscholar.org/f16a/605abb5857c39a10709bd9f9d14cdaa7918f.pdf
+3b9d94752f8488106b2c007e11c193f35d941e92,http://pdfs.semanticscholar.org/3b9d/94752f8488106b2c007e11c193f35d941e92.pdf
+11cc0774365b0cc0d3fa1313bef3d32c345507b1,http://pdfs.semanticscholar.org/11cc/0774365b0cc0d3fa1313bef3d32c345507b1.pdf
+1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9,https://web.stanford.edu/~bgirod/pdfs/ChenHuizhongTransPAMISep2014.pdf
+48319e611f0daaa758ed5dcf5a6496b4c6ef45f2,http://pdfs.semanticscholar.org/4831/9e611f0daaa758ed5dcf5a6496b4c6ef45f2.pdf
+516d0d9eb08825809e4618ca73a0697137ebabd5,http://web.engr.oregonstate.edu/~sinisa/talks/cvpr16_multimodal_oral.pdf
+cc3c273bb213240515147e8be68c50f7ea22777c,http://pdfs.semanticscholar.org/cc3c/273bb213240515147e8be68c50f7ea22777c.pdf
+ff46c41e9ea139d499dd349e78d7cc8be19f936c,http://pdfs.semanticscholar.org/ff46/c41e9ea139d499dd349e78d7cc8be19f936c.pdf
+5c2e264d6ac253693469bd190f323622c457ca05,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2013/Improving%20large%20scale%20image%20retrieval%20using%20multi-level%20features13.pdf
+20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba,http://pdfs.semanticscholar.org/e805/bc872e18277c7cbfce82206cf1667cce22cc.pdf
+14fdce01c958043140e3af0a7f274517b235adf3,http://pdfs.semanticscholar.org/14fd/ce01c958043140e3af0a7f274517b235adf3.pdf
+ce6f459462ea9419ca5adcc549d1d10e616c0213,http://pdfs.semanticscholar.org/ce6f/459462ea9419ca5adcc549d1d10e616c0213.pdf
+6f5151c7446552fd6a611bf6263f14e729805ec7,http://pdfs.semanticscholar.org/6f51/51c7446552fd6a611bf6263f14e729805ec7.pdf
+1177977134f6663fff0137f11b81be9c64c1f424,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_003.pdf
+122f51cee489ba4da5ab65064457fbe104713526,http://www.speakit.cn/Group/file/2015_LongShortTerm_ACMAVEC@MM15_EI.pdf
+370e0d9b89518a6b317a9f54f18d5398895a7046,http://pdfs.semanticscholar.org/370e/0d9b89518a6b317a9f54f18d5398895a7046.pdf
+dfd934ae448a1b8947d404b01303951b79b13801,http://pdfs.semanticscholar.org/dfd9/34ae448a1b8947d404b01303951b79b13801.pdf
+44f65e3304bdde4be04823fd7ca770c1c05c2cef,http://pdfs.semanticscholar.org/44f6/5e3304bdde4be04823fd7ca770c1c05c2cef.pdf
+63d865c66faaba68018defee0daf201db8ca79ed,http://pdfs.semanticscholar.org/63d8/65c66faaba68018defee0daf201db8ca79ed.pdf
+8af411697e73f6cfe691fe502d4bfb42510b4835,http://pdfs.semanticscholar.org/8af4/11697e73f6cfe691fe502d4bfb42510b4835.pdf
+b0de0892d2092c8c70aa22500fed31aa7eb4dd3f,http://arxiv.org/pdf/1504.05524.pdf
+18636347b8741d321980e8f91a44ee054b051574,http://biometrics.cse.msu.edu/Publications/SoftBiometrics/JainParkFacemarks_ICIP09.pdf
+1c530de1a94ac70bf9086e39af1712ea8d2d2781,http://pdfs.semanticscholar.org/1c53/0de1a94ac70bf9086e39af1712ea8d2d2781.pdf
+2d164f88a579ba53e06b601d39959aaaae9016b7,http://pdfs.semanticscholar.org/a666/2bf767df8f8a5bcb655142ac0fb7c4f524f1.pdf
+1828b1b0f5395b163fef087a72df0605249300c2,http://pdfs.semanticscholar.org/8b18/66a150521bfa18c3e6ec633e1acc79683749.pdf
+22043cbd2b70cb8195d8d0500460ddc00ddb1a62,http://uir.ulster.ac.uk/37137/2/Separability-Oriented%20Subclass%20Discriminant%20Analysis.pdf
+6fbb179a4ad39790f4558dd32316b9f2818cd106,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf
+81da427270c100241c07143885ba3051ec4a2ecb,http://pdfs.semanticscholar.org/81da/427270c100241c07143885ba3051ec4a2ecb.pdf
+9329523dc0bd4e2896d5f63cf2440f21b7a16f16,http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,http://studentlife.cs.dartmouth.edu/facelogging.pdf
+7b9b3794f79f87ca8a048d86954e0a72a5f97758,http://pdfs.semanticscholar.org/7b9b/3794f79f87ca8a048d86954e0a72a5f97758.pdf
+b84b7b035c574727e4c30889e973423fe15560d7,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf
+d83d2fb5403c823287f5889b44c1971f049a1c93,http://pdfs.semanticscholar.org/d83d/2fb5403c823287f5889b44c1971f049a1c93.pdf
+68bf7fc874c2db44d0446cdbb1e05f19c2239282,http://pdfs.semanticscholar.org/68bf/7fc874c2db44d0446cdbb1e05f19c2239282.pdf
+d5b5c63c5611d7b911bc1f7e161a0863a34d44ea,http://pdfs.semanticscholar.org/d5b5/c63c5611d7b911bc1f7e161a0863a34d44ea.pdf
+6859b891a079a30ef16f01ba8b85dc45bd22c352,http://pdfs.semanticscholar.org/6859/b891a079a30ef16f01ba8b85dc45bd22c352.pdf
+50c0de2cccf7084a81debad5fdb34a9139496da0,http://pdfs.semanticscholar.org/50c0/de2cccf7084a81debad5fdb34a9139496da0.pdf
+3a4f522fa9d2c37aeaed232b39fcbe1b64495134,http://ijireeice.com/upload/2016/may-16/IJIREEICE%20101.pdf
+00e9011f58a561500a2910a4013e6334627dee60,http://library.utia.cas.cz/separaty/2008/RO/somol-facial%20expression%20recognition%20using%20angle-related%20information%20from%20facial%20meshes.pdf
+3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0,http://pdfs.semanticscholar.org/3b15/a48ffe3c6b3f2518a7c395280a11a5f58ab0.pdf
+75503aff70a61ff4810e85838a214be484a674ba,https://www.ri.cmu.edu/pub_files/2012/0/Improved-Facial-Expression.pdf
+141768ab49a5a9f5adcf0cf7e43a23471a7e5d82,http://arxiv.org/pdf/1405.0085v1.pdf
+03a8f53058127798bc2bc0245d21e78354f6c93b,http://www.robots.ox.ac.uk/~vgg/rg/slides/additiveclassifiers.pdf
+019e471667c72b5b3728b4a9ba9fe301a7426fb2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_012.pdf
+56e885b9094391f7d55023a71a09822b38b26447,http://pdfs.semanticscholar.org/56e8/85b9094391f7d55023a71a09822b38b26447.pdf
+a02f0aad91c2d88b49c443e1e39c3acfc067a705,http://www.cs.columbia.edu/~wfan/PAPERS/SMC10cher.pdf
+dcce3d7e8d59041e84fcdf4418702fb0f8e35043,http://www.cfar.umd.edu/~rama/Conf.pdf-files/zhou04cvpr-10.pdf
+c44c84540db1c38ace232ef34b03bda1c81ba039,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf
+4aa8db1a3379f00db2403bba7dade5d6e258b9e9,http://pdfs.semanticscholar.org/4aa8/db1a3379f00db2403bba7dade5d6e258b9e9.pdf
+28e1668d7b61ce21bf306009a62b06593f1819e3,http://pdfs.semanticscholar.org/28e1/668d7b61ce21bf306009a62b06593f1819e3.pdf
+3a92de0a4a0ef4f88e1647633f1fbb13cd6a3c95,http://impca.cs.curtin.edu.au/pubs/2007/conferences/an_liu_venkatesh_cvpr07.pdf
+29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea,http://www.umiacs.umd.edu/~nshroff/DomainAdapt.pdf
+bfb98423941e51e3cd067cb085ebfa3087f3bfbe,http://pdfs.semanticscholar.org/bfb9/8423941e51e3cd067cb085ebfa3087f3bfbe.pdf
+8e0ede53dc94a4bfcf1238869bf1113f2a37b667,http://www.ri.cmu.edu/pub_files/2015/6/jpml_final.pdf
+39ecdbad173e45964ffe589b9ced9f1ebfe2d44e,http://measuringbehavior.org/files/ProceedingsPDF(website)/Gonzalez_FullPaper3.4.pdf
+034c2ed71c31cb0d984d66c7ca753ef2cb6196ca,http://pdfs.semanticscholar.org/034c/2ed71c31cb0d984d66c7ca753ef2cb6196ca.pdf
+25c3cdbde7054fbc647d8be0d746373e7b64d150,http://openaccess.thecvf.com/content_cvpr_2016/papers/Ouyang_ForgetMeNot_Memory-Aware_Forensic_CVPR_2016_paper.pdf
+c8adbe00b5661ab9b3726d01c6842c0d72c8d997,http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf
+d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d,http://pdfs.semanticscholar.org/d4eb/f0a4f48275ecd8dbc2840b2a31cc07bd676d.pdf
+472ba8dd4ec72b34e85e733bccebb115811fd726,http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf
+c5468665d98ce7349d38afb620adbf51757ab86f,http://pdfs.semanticscholar.org/c546/8665d98ce7349d38afb620adbf51757ab86f.pdf
+137aa2f891d474fce1e7a1d1e9b3aefe21e22b34,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%20139/PID2859389.pdf
+207798603e3089a1c807c93e5f36f7767055ec06,http://www1.se.cuhk.edu.hk/~hccl/publications/pub/2012_APSIPA_FacialExpression.pdf
+60a20d5023f2bcc241eb9e187b4ddece695c2b9b,http://pdfs.semanticscholar.org/60a2/0d5023f2bcc241eb9e187b4ddece695c2b9b.pdf
+b5d7c5aba7b1ededdf61700ca9d8591c65e84e88,http://pdfs.semanticscholar.org/b5d7/c5aba7b1ededdf61700ca9d8591c65e84e88.pdf
+ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea,http://pdfs.semanticscholar.org/ac9d/fbeb58d591b5aea13d13a83b1e23e7ef1fea.pdf
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,http://pdfs.semanticscholar.org/6e3b/778ad384101f792284b42844518f620143aa.pdf
+5d479f77ecccfac9f47d91544fd67df642dfab3c,http://pdfs.semanticscholar.org/7880/c21bb0de02cd4db095e011ac7aff47b35ee8.pdf
+63340c00896d76f4b728dbef85674d7ea8d5ab26,https://www.comp.nus.edu.sg/~tsim/documents/fkt-dsa-pami-published.pdf
+038ce930a02d38fb30d15aac654ec95640fe5cb0,http://www.robots.ox.ac.uk/~tvg/publications/2013/BVGFacialFeatureTrackerMobile.pdf
+9bcfadd22b2c84a717c56a2725971b6d49d3a804,http://pdfs.semanticscholar.org/9bcf/add22b2c84a717c56a2725971b6d49d3a804.pdf
+8f9f599c05a844206b1bd4947d0524234940803d,http://pdfs.semanticscholar.org/8f9f/599c05a844206b1bd4947d0524234940803d.pdf
+5083c6be0f8c85815ead5368882b584e4dfab4d1,http://pdfs.semanticscholar.org/5083/c6be0f8c85815ead5368882b584e4dfab4d1.pdf
+270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0,http://pdfs.semanticscholar.org/270e/5266a1f6e76954dedbc2caf6ff61a5fbf8d0.pdf
+380dd0ddd5d69adc52defc095570d1c22952f5cc,http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf
+2a171f8d14b6b8735001a11c217af9587d095848,http://openaccess.thecvf.com/content_iccv_2015/papers/Zhang_Learning_Social_Relation_ICCV_2015_paper.pdf
+8b547b87fd95c8ff6a74f89a2b072b60ec0a3351,http://pdfs.semanticscholar.org/8b54/7b87fd95c8ff6a74f89a2b072b60ec0a3351.pdf
+35308a3fd49d4f33bdbd35fefee39e39fe6b30b7,https://biblio.ugent.be/publication/7238034/file/7238038.pdf
+33ac7fd3a622da23308f21b0c4986ae8a86ecd2b,http://pdfs.semanticscholar.org/33ac/7fd3a622da23308f21b0c4986ae8a86ecd2b.pdf
+f963967e52a5fd97fa3ebd679fd098c3cb70340e,http://pdfs.semanticscholar.org/f963/967e52a5fd97fa3ebd679fd098c3cb70340e.pdf
+39dc2ce4cce737e78010642048b6ed1b71e8ac2f,http://www.mirlab.org/conference_papers/International_Conference/ICME%202004/html/papers/P59890.pdf
+7553fba5c7f73098524fbb58ca534a65f08e91e7,http://pdfs.semanticscholar.org/7553/fba5c7f73098524fbb58ca534a65f08e91e7.pdf
+969dd8bc1179c047523d257516ade5d831d701ad,http://pdfs.semanticscholar.org/969d/d8bc1179c047523d257516ade5d831d701ad.pdf
+195df1106f4d7aff0e9cb609358abbf80f54a716,https://arxiv.org/pdf/1511.02917v1.pdf
+778c9f88839eb26129427e1b8633caa4bd4d275e,http://www.cs.berkeley.edu/~nzhang/papers/cvpr12_ppk.pdf
+7ad7897740e701eae455457ea74ac10f8b307bed,http://pdfs.semanticscholar.org/7ad7/897740e701eae455457ea74ac10f8b307bed.pdf
+4180978dbcd09162d166f7449136cb0b320adf1f,http://pdfs.semanticscholar.org/4180/978dbcd09162d166f7449136cb0b320adf1f.pdf
+6be0ab66c31023762e26d309a4a9d0096f72a7f0,http://pdfs.semanticscholar.org/6be0/ab66c31023762e26d309a4a9d0096f72a7f0.pdf
+67a50752358d5d287c2b55e7a45cc39be47bf7d0,http://pdfs.semanticscholar.org/67a5/0752358d5d287c2b55e7a45cc39be47bf7d0.pdf
+06a9ed612c8da85cb0ebb17fbe87f5a137541603,http://pdfs.semanticscholar.org/06a9/ed612c8da85cb0ebb17fbe87f5a137541603.pdf
+3998c5aa6be58cce8cb65a64cb168864093a9a3e,http://cvrr.ucsd.edu/publications/2014/HeadHand.pdf
+27c9ddb72360f4cd0f715cd7ea82fa399af91f11,http://pdfs.semanticscholar.org/27c9/ddb72360f4cd0f715cd7ea82fa399af91f11.pdf
+4b6be933057d939ddfa665501568ec4704fabb39,http://pdfs.semanticscholar.org/59c4/c6ba21354675401a173eb6c70500b99571cd.pdf
+4fc936102e2b5247473ea2dd94c514e320375abb,http://pdfs.semanticscholar.org/4fc9/36102e2b5247473ea2dd94c514e320375abb.pdf
+fc2bad3544c7c8dc7cd182f54888baf99ed75e53,http://pdfs.semanticscholar.org/fc2b/ad3544c7c8dc7cd182f54888baf99ed75e53.pdf
+061c84a4143e859a7caf6e6d283dfb30c23ee56e,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_008_ext.pdf
+24de12df6953151ef5cd0379e205eb0f57ff9d1f,http://www.researchgate.net/profile/Sebastian_Ventura/publication/270337594_A_Tutorial_on_Multi-Label_Learning/links/54bcd8460cf253b50e2d697b.pdf?origin=publication_list
+c41de506423e301ef2a10ea6f984e9e19ba091b4,http://www.ee.columbia.edu/ln/dvmm/publications/14/felixyu_llp_mm2014.pdf
+e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5,http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf
+a1f40bcfadbeee66f67ab0755dd3037c030a7450,http://www.researchgate.net/profile/Jiansheng_Chen/publication/265016758_Face_Image_Quality_Assessment_Based_on_Learning_to_Rank/links/546d662d0cf2193b94c5852b.pdf
+3d62b2f9cef997fc37099305dabff356d39ed477,http://pdfs.semanticscholar.org/3d62/b2f9cef997fc37099305dabff356d39ed477.pdf
+c418a3441f992fea523926f837f4bfb742548c16,http://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf
+42e3dac0df30d754c7c7dab9e1bb94990034a90d,https://arxiv.org/pdf/1311.5591v2.pdf
+a60907b7ee346b567972074e3e03c82f64d7ea30,http://pdfs.semanticscholar.org/a609/07b7ee346b567972074e3e03c82f64d7ea30.pdf
+14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6,http://pdfs.semanticscholar.org/4b76/694ff2efb302074adf1ba6052d643177abd1.pdf
+7636f94ddce79f3dea375c56fbdaaa0f4d9854aa,http://pdfs.semanticscholar.org/7636/f94ddce79f3dea375c56fbdaaa0f4d9854aa.pdf
+402f6db00251a15d1d92507887b17e1c50feebca,http://pdfs.semanticscholar.org/402f/6db00251a15d1d92507887b17e1c50feebca.pdf
+473cbc5ec2609175041e1410bc6602b187d03b23,http://pdfs.semanticscholar.org/473c/bc5ec2609175041e1410bc6602b187d03b23.pdf
+d350a9390f0818703f886138da27bf8967fe8f51,http://mi.informatik.uni-siegen.de/publications/shahlaei_icip2016.pdf
+0037bff7be6d463785d4e5b2671da664cd7ef746,http://pdfs.semanticscholar.org/0037/bff7be6d463785d4e5b2671da664cd7ef746.pdf
+33f7e78950455c37236b31a6318194cfb2c302a4,http://pdfs.semanticscholar.org/33f7/e78950455c37236b31a6318194cfb2c302a4.pdf
+32728e1eb1da13686b69cc0bd7cce55a5c963cdd,http://pdfs.semanticscholar.org/3272/8e1eb1da13686b69cc0bd7cce55a5c963cdd.pdf
+193debca0be1c38dabc42dc772513e6653fd91d8,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf
+3fbd68d1268922ee50c92b28bd23ca6669ff87e5,http://pdfs.semanticscholar.org/f563/6a8021c09870c350e7505c87625fe1681bd4.pdf
+c32cd207855e301e6d1d9ddd3633c949630c793a,http://pdfs.semanticscholar.org/c32c/d207855e301e6d1d9ddd3633c949630c793a.pdf
+571f493c0ade12bbe960cfefc04b0e4607d8d4b2,http://pdfs.semanticscholar.org/571f/493c0ade12bbe960cfefc04b0e4607d8d4b2.pdf
+2e68190ebda2db8fb690e378fa213319ca915cf8,http://pdfs.semanticscholar.org/a705/804fa2e97ce23619b4f43da1b75fb138296d.pdf
+50f0c495a214b8d57892d43110728e54e413d47d,http://pdfs.semanticscholar.org/50f0/c495a214b8d57892d43110728e54e413d47d.pdf
+f6fa97fbfa07691bc9ff28caf93d0998a767a5c1,http://pdfs.semanticscholar.org/f6fa/97fbfa07691bc9ff28caf93d0998a767a5c1.pdf
+4bb03b27bc625e53d8d444c0ba3ee235d2f17e86,http://www.cs.utexas.edu/~grauman/papers/hwang_cvpr2010.pdf
+29d414bfde0dfb1478b2bdf67617597dd2d57fc6,http://pdfs.semanticscholar.org/29d4/14bfde0dfb1478b2bdf67617597dd2d57fc6.pdf
+365866dc937529c3079a962408bffaa9b87c1f06,http://pdfs.semanticscholar.org/3658/66dc937529c3079a962408bffaa9b87c1f06.pdf
+27eb7a6e1fb6b42516041def6fe64bd028b7614d,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zafeiriou_Joint_Unsupervised_Deformable_CVPR_2016_paper.pdf
+7577a1ddf9195513a5c976887ad806d1386bb1e9,http://pdfs.semanticscholar.org/7577/a1ddf9195513a5c976887ad806d1386bb1e9.pdf
+132527383890565d18f1b7ad50d76dfad2f14972,http://pdfs.semanticscholar.org/1325/27383890565d18f1b7ad50d76dfad2f14972.pdf
+0e50fe28229fea45527000b876eb4068abd6ed8c,http://pdfs.semanticscholar.org/0e50/fe28229fea45527000b876eb4068abd6ed8c.pdf
+5aafca76dbbbbaefd82f5f0265776afb5320dafe,http://pdfs.semanticscholar.org/5aaf/ca76dbbbbaefd82f5f0265776afb5320dafe.pdf
+9441253b638373a0027a5b4324b4ee5f0dffd670,http://pdfs.semanticscholar.org/9441/253b638373a0027a5b4324b4ee5f0dffd670.pdf
+4e4d034caa72dce6fca115e77c74ace826884c66,http://pdfs.semanticscholar.org/4e4d/034caa72dce6fca115e77c74ace826884c66.pdf
+0cf7da0df64557a4774100f6fde898bc4a3c4840,https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/shape/berg-cvpr05.pdf
+b88d5e12089f6f598b8c72ebeffefc102cad1fc0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w24/papers/Wang_Robust_2DPCA_and_CVPR_2016_paper.pdf
+69eb6c91788e7c359ddd3500d01fb73433ce2e65,http://pdfs.semanticscholar.org/69eb/6c91788e7c359ddd3500d01fb73433ce2e65.pdf
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,http://pdfs.semanticscholar.org/4817/4c414cfce7f1d71c4401d2b3d49ba91c5338.pdf
+ddea3c352f5041fb34433b635399711a90fde0e8,http://pdfs.semanticscholar.org/fc6b/2eb9253f33197b1ba8a045525487a16e8756.pdf
+4aeb87c11fb3a8ad603311c4650040fd3c088832,http://pdfs.semanticscholar.org/4aeb/87c11fb3a8ad603311c4650040fd3c088832.pdf
+5050807e90a925120cbc3a9cd13431b98965f4b9,http://pdfs.semanticscholar.org/5050/807e90a925120cbc3a9cd13431b98965f4b9.pdf
+60efdb2e204b2be6701a8e168983fa666feac1be,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01387.pdf
+8147ee02ec5ff3a585dddcd000974896cb2edc53,http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2012aePAMI.pdf
+c903af0d69edacf8d1bff3bfd85b9470f6c4c243,http://pdfs.semanticscholar.org/c903/af0d69edacf8d1bff3bfd85b9470f6c4c243.pdf
+2a7bca56e2539c8cf1ae4e9da521879b7951872d,http://pdfs.semanticscholar.org/2a7b/ca56e2539c8cf1ae4e9da521879b7951872d.pdf
+0f4eb63402a4f3bae8f396e12133684fb760def1,http://pdfs.semanticscholar.org/8c4e/b15de264af9f92a93d6e89d36295c5c4bf37.pdf
+63f2d1a64737afa1608588b9651b1e4207e82d1c,http://staff.estem-uc.edu.au/roland/files/2009/05/Rajagopalan_Goecke_ICIP2014_DetectingSelf-StimulatoryBehavioursForAutismDiagnosis.pdf
+a9adb6dcccab2d45828e11a6f152530ba8066de6,http://pdfs.semanticscholar.org/a9ad/b6dcccab2d45828e11a6f152530ba8066de6.pdf
+aea4128ba18689ff1af27b90c111bbd34013f8d5,http://pdfs.semanticscholar.org/aea4/128ba18689ff1af27b90c111bbd34013f8d5.pdf
+56e6f472090030a6f172a3e2f46ef9daf6cad757,http://pdfs.semanticscholar.org/56e6/f472090030a6f172a3e2f46ef9daf6cad757.pdf
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,http://pdfs.semanticscholar.org/a3d7/8bc94d99fdec9f44a7aa40c175d5a106f0b9.pdf
+89d7cc9bbcd2fdc4f4434d153ecb83764242227b,http://pdfs.semanticscholar.org/89d7/cc9bbcd2fdc4f4434d153ecb83764242227b.pdf
+113c22eed8383c74fe6b218743395532e2897e71,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sapp_MODEC_Multimodal_Decomposable_2013_CVPR_paper.pdf
+f3f77b803b375f0c63971b59d0906cb700ea24ed,http://pdfs.semanticscholar.org/f3f7/7b803b375f0c63971b59d0906cb700ea24ed.pdf
+f7093b138fd31956e30d411a7043741dcb8ca4aa,http://pdfs.semanticscholar.org/f709/3b138fd31956e30d411a7043741dcb8ca4aa.pdf
+97865d31b5e771cf4162bc9eae7de6991ceb8bbf,http://pdfs.semanticscholar.org/9786/5d31b5e771cf4162bc9eae7de6991ceb8bbf.pdf
+b7f05d0771da64192f73bdb2535925b0e238d233,http://pdfs.semanticscholar.org/b7f0/5d0771da64192f73bdb2535925b0e238d233.pdf
+75ebe1e0ae9d42732e31948e2e9c03d680235c39,http://pdfs.semanticscholar.org/75eb/e1e0ae9d42732e31948e2e9c03d680235c39.pdf
+ba99c37a9220e08e1186f21cab11956d3f4fccc2,https://arxiv.org/pdf/1609.08677v1.pdf
+ec54000c6c0e660dd99051bdbd7aed2988e27ab8,http://pdfs.semanticscholar.org/ec54/000c6c0e660dd99051bdbd7aed2988e27ab8.pdf
+392d35bb359a3b61cca1360272a65690a97a2b3f,http://pdfs.semanticscholar.org/9cc1/0842f7701bfb92725b4dda4df391b0b341e3.pdf
+501eda2d04b1db717b7834800d74dacb7df58f91,http://pdfs.semanticscholar.org/501e/da2d04b1db717b7834800d74dacb7df58f91.pdf
+dd0a334b767e0065c730873a95312a89ef7d1c03,http://pdfs.semanticscholar.org/dd0a/334b767e0065c730873a95312a89ef7d1c03.pdf
+91b1a59b9e0e7f4db0828bf36654b84ba53b0557,http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/ECE/MTech%20DSP%202015-16/MTech%20DSP%20BasePaper%202015-16/50.pdf
+072db5ba5b375d439ba6dbb6427c63cd7da6e940,http://users.ece.cmu.edu/~juefeix/tip_2014_felix.pdf
+4c1528bab3142ec957700ab502531e1a67e7f2f6,http://www.researchgate.net/profile/Xiaohua_Xie/publication/220932399_Restoration_of_a_Frontal_Illuminated_Face_Image_Based_on_KPCA/links/00b49522adfc6b1435000000.pdf
+d671a210990f67eba9b2d3dda8c2cb91575b4a7a,http://pdfs.semanticscholar.org/d671/a210990f67eba9b2d3dda8c2cb91575b4a7a.pdf
+2969f822b118637af29d8a3a0811ede2751897b5,http://iip.ict.ac.cn/sites/default/files/publication/2013_ICCV_xwzhao_Cascaded%20Shape%20Space%20Pruning%20for%20Robust%20Facial%20Landmark%20Detection.pdf
+76d939f73a327bf1087d91daa6a7824681d76ea1,http://pdfs.semanticscholar.org/76d9/39f73a327bf1087d91daa6a7824681d76ea1.pdf
+d95e6185f82e3ef3880a98122522eca8c8c3f34e,http://bbs.utdallas.edu/facelab/docs/4_05_otoole-pami.pdf
+3d0ef9bfd08a9252db6acfece3b83f3aa58b4cae,http://perso.telecom-paristech.fr/~chollet/Biblio/Articles/Domaines/BIOMET/Face/Kumar/CoreFaceCVPR04.pdf
+098fa9b4c3f7fb41c7a178d36f5dbb50a3ffa377,http://oui.csail.mit.edu/camera_readys/13.pdf
+0717b47ab84b848de37dbefd81cf8bf512b544ac,http://pdfs.semanticscholar.org/0717/b47ab84b848de37dbefd81cf8bf512b544ac.pdf
+6aa61d28750629febe257d1cb69379e14c66c67f,http://pdfs.semanticscholar.org/6aa6/1d28750629febe257d1cb69379e14c66c67f.pdf
+2f7e9b45255c9029d2ae97bbb004d6072e70fa79,http://pdfs.semanticscholar.org/2f7e/9b45255c9029d2ae97bbb004d6072e70fa79.pdf
+3a95eea0543cf05670e9ae28092a114e3dc3ab5c,https://arxiv.org/pdf/1209.0841v7.pdf
+8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3,http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR10pose.pdf
+d140c5add2cddd4a572f07358d666fe00e8f4fe1,http://pdfs.semanticscholar.org/d140/c5add2cddd4a572f07358d666fe00e8f4fe1.pdf
+df2841a1d2a21a0fc6f14fe53b6124519f3812f9,http://pdfs.semanticscholar.org/df28/41a1d2a21a0fc6f14fe53b6124519f3812f9.pdf
+72450d7e5cbe79b05839c30a4f0284af5aa80053,http://pdfs.semanticscholar.org/7245/0d7e5cbe79b05839c30a4f0284af5aa80053.pdf
+7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2,http://maths.dur.ac.uk/users/kasper.peeters/pdf/face_recognition/PCA/Togneri2010LinearRegressionFaceRecognition.pdf
+d00787e215bd74d32d80a6c115c4789214da5edb,http://pdfs.semanticscholar.org/d007/87e215bd74d32d80a6c115c4789214da5edb.pdf
+a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be,http://pdfs.semanticscholar.org/d788/2e6bd512b190e47be944dc9b58b612f12581.pdf
+133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d,http://www.stat.ucla.edu/~caiming/pubs/1402.1783v2.pdf
+843e6f1e226480e8a6872d8fd7b7b2cd74b637a4,http://pdfs.semanticscholar.org/843e/6f1e226480e8a6872d8fd7b7b2cd74b637a4.pdf
+727ecf8c839c9b5f7b6c7afffe219e8b270e7e15,http://pdfs.semanticscholar.org/727e/cf8c839c9b5f7b6c7afffe219e8b270e7e15.pdf
+00ebc3fa871933265711558fa9486057937c416e,http://pdfs.semanticscholar.org/00eb/c3fa871933265711558fa9486057937c416e.pdf
+0145dc4505041bf39efa70ea6d95cf392cfe7f19,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_046_ext.pdf
+7ff42ee09c9b1a508080837a3dc2ea780a1a839b,http://pdfs.semanticscholar.org/7ff4/2ee09c9b1a508080837a3dc2ea780a1a839b.pdf
+2a0efb1c17fbe78470acf01e4601a75735a805cc,http://pdfs.semanticscholar.org/2a0e/fb1c17fbe78470acf01e4601a75735a805cc.pdf
+7fa2605676c589a7d1a90d759f8d7832940118b5,http://www.ces.clemson.edu/~stb/publications/willimon_clothing_classification_icra2013.pdf
+bbcb4920b312da201bf4d2359383fb4ee3b17ed9,http://pdfs.semanticscholar.org/bbcb/4920b312da201bf4d2359383fb4ee3b17ed9.pdf
+37278ffce3a0fe2c2bbf6232e805dd3f5267eba3,http://arxiv.org/pdf/1602.04504v1.pdf
+14e9158daf17985ccbb15c9cd31cf457e5551990,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf
+7a8c2743db1749c2d9f16f62ee633574c1176e34,http://pdfs.semanticscholar.org/7a8c/2743db1749c2d9f16f62ee633574c1176e34.pdf
+d4c2d26523f577e2d72fc80109e2540c887255c8,http://pdfs.semanticscholar.org/d4c2/d26523f577e2d72fc80109e2540c887255c8.pdf
+4d21a2866cfd1f0fb2a223aab9eecfdec963059a,http://pdfs.semanticscholar.org/ddb3/5264ae7a74811bf8eb63d0eca7b7db07a4b1.pdf
+0294f992f8dfd8748703f953925f9aee14e1b2a2,http://pdfs.semanticscholar.org/0294/f992f8dfd8748703f953925f9aee14e1b2a2.pdf
+c46a4db7247d26aceafed3e4f38ce52d54361817,http://pdfs.semanticscholar.org/c46a/4db7247d26aceafed3e4f38ce52d54361817.pdf
+cac8bb0e393474b9fb3b810c61efdbc2e2c25c29,http://pdfs.semanticscholar.org/cac8/bb0e393474b9fb3b810c61efdbc2e2c25c29.pdf
+16d9b983796ffcd151bdb8e75fc7eb2e31230809,http://pdfs.semanticscholar.org/16d9/b983796ffcd151bdb8e75fc7eb2e31230809.pdf
+133f42368e63928dc860cce7618f30ee186d328c,http://pdfs.semanticscholar.org/50bd/1c76a5051db0b13fd76e7a633884ad49d5a8.pdf
+29fc4de6b680733e9447240b42db13d5832e408f,http://pdfs.semanticscholar.org/29fc/4de6b680733e9447240b42db13d5832e408f.pdf
+e5799fd239531644ad9270f49a3961d7540ce358,http://chenlab.ece.cornell.edu/people/ruogu/publications/ICIP13_Kinship.pdf
+53e081f5af505374c3b8491e9c4470fe77fe7934,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Hsieh_Unconstrained_Realtime_Facial_2015_CVPR_paper.pdf
+473031328c58b7461753e81251379331467f7a69,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W09/papers/Wang_Exploring_Fisher_Vector_2015_CVPR_paper.pdf
+1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9,http://pdfs.semanticscholar.org/1ee2/7c66fabde8ffe90bd2f4ccee5835f8dedbb9.pdf
+400e6c777d5894db2f6538c8ebd1124352b1c064,http://www.ee.ucr.edu/~lan/papers/FG13.pdf
+41b997f6cec7a6a773cd09f174cb6d2f036b36cd,http://pdfs.semanticscholar.org/41b9/97f6cec7a6a773cd09f174cb6d2f036b36cd.pdf
+24959d1a9c9faf29238163b6bcaf523e2b05a053,http://pdfs.semanticscholar.org/2495/9d1a9c9faf29238163b6bcaf523e2b05a053.pdf
+22ec8af0f0e5469e40592d29e28cfbdf1154c666,http://pdfs.semanticscholar.org/aa07/2c823da778a2b8bf1fc79141b3b228a14e99.pdf
+488375ae857a424febed7c0347cc9590989f01f7,http://pdfs.semanticscholar.org/4883/75ae857a424febed7c0347cc9590989f01f7.pdf
+50ff21e595e0ebe51ae808a2da3b7940549f4035,http://export.arxiv.org/pdf/1710.02985
+9294739e24e1929794330067b84f7eafd286e1c8,http://pdfs.semanticscholar.org/9294/739e24e1929794330067b84f7eafd286e1c8.pdf
+8855d6161d7e5b35f6c59e15b94db9fa5bbf2912,http://pdfs.semanticscholar.org/8855/d6161d7e5b35f6c59e15b94db9fa5bbf2912.pdf
+160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b,https://infoscience.epfl.ch/record/207802/files/Discriminant-multilabel-Yuce.pdf
+2f59f28a1ca3130d413e8e8b59fb30d50ac020e2,http://pralab.diee.unica.it/sites/default/files/Satta_ICPR2014.pdf
+0334a8862634988cc684dacd4279c5c0d03704da,https://arxiv.org/pdf/1609.06591v1.pdf
+6bb95a0f3668cd36407c85899b71c9fe44bf9573,http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf
+7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889,http://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf
+2cf92ee60f719098acc3aae3981cedc47fa726b3,http://eksl.isi.edu/files/papers/sinjini_2007_1172280675.pdf
+539ca9db570b5e43be0576bb250e1ba7a727d640,http://pdfs.semanticscholar.org/539c/a9db570b5e43be0576bb250e1ba7a727d640.pdf
+cfd933f71f4a69625390819b7645598867900eab,http://pdfs.semanticscholar.org/cfd9/33f71f4a69625390819b7645598867900eab.pdf
+31afdb6fa95ded37e5871587df38976fdb8c0d67,http://www3.ntu.edu.sg/home/EXDJiang/ICASSP15.pdf
+69ff40fd5ce7c3e6db95a2b63d763edd8db3a102,http://pdfs.semanticscholar.org/69ff/40fd5ce7c3e6db95a2b63d763edd8db3a102.pdf
+2f8183b549ec51b67f7dad717f0db6bf342c9d02,http://www.wisdom.weizmann.ac.il/~ronen/papers/Kemelmacher%20Basri%20-%203D%20Face%20Reconstruction%20from%20a%20Single%20Image%20Using%20a%20Single%20Reference%20Face%20Shape.pdf
+5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372,http://pdfs.semanticscholar.org/5a02/9a0b0ae8ae7fc9043f0711b7c0d442bfd372.pdf
+87f285782d755eb85d8922840e67ed9602cfd6b9,http://pdfs.semanticscholar.org/87f2/85782d755eb85d8922840e67ed9602cfd6b9.pdf
+1e19ea6e7f1c04a18c952ce29386252485e4031e,http://pdfs.semanticscholar.org/1e19/ea6e7f1c04a18c952ce29386252485e4031e.pdf
+f437b3884a9e5fab66740ca2a6f1f3a5724385ea,http://pdfs.semanticscholar.org/f437/b3884a9e5fab66740ca2a6f1f3a5724385ea.pdf
+28d06fd508d6f14cd15f251518b36da17909b79e,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf
+dbaf89ca98dda2c99157c46abd136ace5bdc33b3,http://pdfs.semanticscholar.org/dbaf/89ca98dda2c99157c46abd136ace5bdc33b3.pdf
+e8f0f9b74db6794830baa2cab48d99d8724e8cb6,http://pdfs.semanticscholar.org/e8f0/f9b74db6794830baa2cab48d99d8724e8cb6.pdf
+aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5,http://pdfs.semanticscholar.org/aaa4/c625f5f9b65c7f3df5c7bfe8a6595d0195a5.pdf
+2654ef92491cebeef0997fd4b599ac903e48d07a,http://www.ee.oulu.fi/~gyzhao/Papers/2008/Facial%20Expression%20Recognition%20from%20Near-Infrared%20Video%20Sequences.pdf
+3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Vahdat_Handling_Uncertain_Tags_2013_ICCV_paper.pdf
+7ad77b6e727795a12fdacd1f328f4f904471233f,https://ueaeprints.uea.ac.uk/65008/1/Accepted_manuscript.pdf
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,http://pdfs.semanticscholar.org/7e18/b5f5b678aebc8df6246716bf63ea5d8d714e.pdf
+360d66e210f7011423364327b7eccdf758b5fdd2,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569190652.pdf
+24c442ac3f6802296d71b1a1914b5d44e48b4f29,http://vision.caltech.edu/~xpburgos/papers/ICCVW15%20Burgos-Artizzu.pdf
+e9f1cdd9ea95810efed306a338de9e0de25990a0,http://pdfs.semanticscholar.org/e9f1/cdd9ea95810efed306a338de9e0de25990a0.pdf
+81bfe562e42f2eab3ae117c46c2e07b3d142dade,http://pdfs.semanticscholar.org/81bf/e562e42f2eab3ae117c46c2e07b3d142dade.pdf
+66a9935e958a779a3a2267c85ecb69fbbb75b8dc,http://pdfs.semanticscholar.org/66a9/935e958a779a3a2267c85ecb69fbbb75b8dc.pdf
+45fbeed124a8956477dbfc862c758a2ee2681278,http://pdfs.semanticscholar.org/fb2a/66f842ca2577d9ea8a8300b555b71bd9cee8.pdf
+1b69b860e22278a6f482507b8ce879082dd00c44,http://www.cs.utexas.edu/~chaoyeh/cvpr_2014_Inferring_Analogous_Attributes.pdf
+d115c4a66d765fef596b0b171febca334cea15b5,http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf
+439ac8edfa1e7cbc65474cab544a5b8c4c65d5db,http://pdfs.semanticscholar.org/439a/c8edfa1e7cbc65474cab544a5b8c4c65d5db.pdf
+fb5280b80edcf088f9dd1da769463d48e7b08390,http://pdfs.semanticscholar.org/fb52/80b80edcf088f9dd1da769463d48e7b08390.pdf
+499f2b005e960a145619305814a4e9aa6a1bba6a,http://pdfs.semanticscholar.org/499f/2b005e960a145619305814a4e9aa6a1bba6a.pdf
+dced05d28f353be971ea2c14517e85bc457405f3,http://pdfs.semanticscholar.org/dced/05d28f353be971ea2c14517e85bc457405f3.pdf
+7fd700f4a010d765c506841de9884df394c1de1c,http://www.kyb.tuebingen.mpg.de/publications/attachments/CVPR2008-Blaschko_5069%5B0%5D.pdf
+29c7dfbbba7a74e9aafb6a6919629b0a7f576530,http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf
+8384e104796488fa2667c355dd15b65d6d5ff957,http://pdfs.semanticscholar.org/feea/803c1eaedc825509e24a8c1279ffe0251d9d.pdf
+c23153aade9be0c941390909c5d1aad8924821db,http://pdfs.semanticscholar.org/c231/53aade9be0c941390909c5d1aad8924821db.pdf
+d4b88be6ce77164f5eea1ed2b16b985c0670463a,http://pdfs.semanticscholar.org/d4b8/8be6ce77164f5eea1ed2b16b985c0670463a.pdf
+c220f457ad0b28886f8b3ef41f012dd0236cd91a,http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf
+1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113,http://pdfs.semanticscholar.org/1bc9/aaa41c08bbd0c01dd5d7d7ebf3e48ae78113.pdf
+88f2952535df5859c8f60026f08b71976f8e19ec,http://pdfs.semanticscholar.org/88f2/952535df5859c8f60026f08b71976f8e19ec.pdf
+68f89c1ee75a018c8eff86e15b1d2383c250529b,http://pdfs.semanticscholar.org/68f8/9c1ee75a018c8eff86e15b1d2383c250529b.pdf
+4b04247c7f22410681b6aab053d9655cf7f3f888,http://pdfs.semanticscholar.org/60e5/0494dc26bd30e3c49b93ca85d0f79bf5c53f.pdf
+2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf
+384f972c81c52fe36849600728865ea50a0c4670,http://pdfs.semanticscholar.org/dad7/3d70b4fa77d67c5c02e3ecba21c52ab9a386.pdf
+22f94c43dd8b203f073f782d91e701108909690b,http://pdfs.semanticscholar.org/22f9/4c43dd8b203f073f782d91e701108909690b.pdf
+74875368649f52f74bfc4355689b85a724c3db47,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yan_Object_Detection_by_2015_CVPR_paper.pdf
+03dba79518434ba4a937b2980fbdc8bafc048b36,http://people.ee.duke.edu/~jh313/resource/TRAIT.pdf
+f5af4e9086b0c3aee942cb93ece5820bdc9c9748,http://pdfs.semanticscholar.org/f5af/4e9086b0c3aee942cb93ece5820bdc9c9748.pdf
+2f78e471d2ec66057b7b718fab8bfd8e5183d8f4,http://pdfs.semanticscholar.org/2f78/e471d2ec66057b7b718fab8bfd8e5183d8f4.pdf
+6769cfbd85329e4815bb1332b118b01119975a95,http://pdfs.semanticscholar.org/6769/cfbd85329e4815bb1332b118b01119975a95.pdf
+087002ab569e35432cdeb8e63b2c94f1abc53ea9,http://sergioescalera.com/wp-content/uploads/2015/07/CVPR2015MoeslundSlides.pdf
+c00f402b9cfc3f8dd2c74d6b3552acbd1f358301,http://pdfs.semanticscholar.org/c00f/402b9cfc3f8dd2c74d6b3552acbd1f358301.pdf
+5dd496e58cfedfc11b4b43c4ffe44ac72493bf55,http://pdfs.semanticscholar.org/5dd4/96e58cfedfc11b4b43c4ffe44ac72493bf55.pdf
+3af8d38469fb21368ee947d53746ea68cd64eeae,http://pdfs.semanticscholar.org/3af8/d38469fb21368ee947d53746ea68cd64eeae.pdf
+7f23a4bb0c777dd72cca7665a5f370ac7980217e,http://pdfs.semanticscholar.org/ce70/fecc7150816e081b422cbc157bd9019cdf25.pdf
+287900f41dd880802aa57f602e4094a8a9e5ae56,https://www.comp.nus.edu.sg/~tsim/documents/cross-expression.pdf
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,http://pdfs.semanticscholar.org/7fc7/6446d2b11fc0479df6e285723ceb4244d4ef.pdf
+4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367,http://pdfs.semanticscholar.org/4cac/9eda716a0addb73bd7ffea2a5fb0e6ec2367.pdf
+411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8,http://pdfs.semanticscholar.org/411e/e9236095f8f5ca3b9ef18fd3381c1c68c4b8.pdf
+b81cae2927598253da37954fb36a2549c5405cdb,http://pdfs.semanticscholar.org/d892/753827950a227179b691e6df85820ab7c417.pdf
+0b84f07af44f964817675ad961def8a51406dd2e,https://arxiv.org/pdf/1604.02531v2.pdf
+7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f,http://cvrr.ucsd.edu/publications/2012/Martin_AutoUI2012.pdf
+757e4cb981e807d83539d9982ad325331cb59b16,http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf
+f67a73c9dd1e05bfc51219e70536dbb49158f7bc,http://pdfs.semanticscholar.org/f67a/73c9dd1e05bfc51219e70536dbb49158f7bc.pdf
+fa398c6d6bd03df839dce7b59e04f473bc0ed660,https://www.researchgate.net/profile/Sujata_Pandey/publication/4308761_A_Novel_Approach_for_Face_Recognition_Using_DCT_Coefficients_Re-scaling_for_Illumination_Normalization/links/004635211c385bb7e3000000.pdf
+83b7578e2d9fa60d33d9336be334f6f2cc4f218f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_101_ext.pdf
+13d9da779138af990d761ef84556e3e5c1e0eb94,http://www.cs.berkeley.edu/~malik/papers/ferencz-learnedmiller-malik08.pdf
+3dcebd4a1d66313dcd043f71162d677761b07a0d,http://cvhci.ira.uka.de/download/publications/2008/siu2008_lbp.pdf
+3f0e0739677eb53a9d16feafc2d9a881b9677b63,http://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,http://humansensing.cs.cmu.edu/sites/default/files/15parda.pdf
+0b2277a0609565c30a8ee3e7e193ce7f79ab48b0,http://ivg.au.tsinghua.edu.cn/paper/2012_Cost-sensitive%20semi-supervised%20discriminant%20analysis%20for%20face%20recognition.pdf
+8ec82da82416bb8da8cdf2140c740e1574eaf84f,http://pdfs.semanticscholar.org/8ec8/2da82416bb8da8cdf2140c740e1574eaf84f.pdf
+477811ff147f99b21e3c28309abff1304106dbbe,http://pdfs.semanticscholar.org/f0f8/23511188d8c10b67512d23eb9cb7f3dd2f9a.pdf
+68d40176e878ebffbc01ffb0556e8cb2756dd9e9,http://pdfs.semanticscholar.org/68d4/0176e878ebffbc01ffb0556e8cb2756dd9e9.pdf
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,http://pdfs.semanticscholar.org/51be/ffe5f96ccb6b64057a540a7874185ccad8d7.pdf
+0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64,http://mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm1039.pdf
+7d8c2d29deb80ceed3c8568100376195ce0914cb,https://arxiv.org/pdf/1708.01988v1.pdf
+3edc43e336be075dca77c7e173b555b6c14274d8,http://pdfs.semanticscholar.org/3edc/43e336be075dca77c7e173b555b6c14274d8.pdf
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,http://pdfs.semanticscholar.org/0e5d/cc6ae52625fd0637c6bba46a973e46d58b9c.pdf
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,http://pdfs.semanticscholar.org/304b/1f14ca6a37552dbfac443f3d5b36dbe1a451.pdf
+2e091b311ac48c18aaedbb5117e94213f1dbb529,http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf
+b5402c03a02b059b76be829330d38db8e921e4b5,http://pdfs.semanticscholar.org/b540/2c03a02b059b76be829330d38db8e921e4b5.pdf
+1862cb5728990f189fa91c67028f6d77b5ac94f6,http://lvdmaaten.github.io/publications/papers/CVPR_2014.pdf
+8b2704a5218a6ef70e553eaf0a463bd55129b69d,http://pdfs.semanticscholar.org/8b27/04a5218a6ef70e553eaf0a463bd55129b69d.pdf
+8f89aed13cb3555b56fccd715753f9ea72f27f05,http://pdfs.semanticscholar.org/8f89/aed13cb3555b56fccd715753f9ea72f27f05.pdf
+1dc241ee162db246882f366644171c11f7aed96d,http://pdfs.semanticscholar.org/1dc2/41ee162db246882f366644171c11f7aed96d.pdf
+197eaa59a003a4c7cc77c1abe0f99d942f716942,http://www.lv-nus.org/papers%5C2009%5C2009_mm_age.pdf
+6412d8bbcc01f595a2982d6141e4b93e7e982d0f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Kang_Deep_Convolutional_Neural_CVPR_2017_paper.pdf
+143bee9120bcd7df29a0f2ad6f0f0abfb23977b8,http://pdfs.semanticscholar.org/143b/ee9120bcd7df29a0f2ad6f0f0abfb23977b8.pdf
+132f88626f6760d769c95984212ed0915790b625,http://pdfs.semanticscholar.org/132f/88626f6760d769c95984212ed0915790b625.pdf
+1962e4c9f60864b96c49d85eb897141486e9f6d1,http://www.patternrecognition.cn/~zhongjin/2011/2011Lai_NCP.pdf
+3042d3727b2f80453ff5378b4b3043abb2d685a1,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0219.pdf
+9863dd1e2a3d3b4910a91176ac0f2fee5eb3b5e1,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/kim-ieee-2006.pdf
+4adca62f888226d3a16654ca499bf2a7d3d11b71,http://pdfs.semanticscholar.org/5525/119941f6710fcde85cf71cc2ca25484e78c6.pdf
+413a184b584dc2b669fbe731ace1e48b22945443,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_00911.pdf
+22717ad3ad1dfcbb0fd2f866da63abbde9af0b09,http://pdfs.semanticscholar.org/2271/7ad3ad1dfcbb0fd2f866da63abbde9af0b09.pdf
+7ab7befcd319d55d26c1e4b7b9560da5763906f3,http://www.researchgate.net/profile/Lee_Ping-Han/publication/236160185_Facial_Trait_Code/links/0c96051e26825bd65a000000.pdf
+4b71d1ff7e589b94e0f97271c052699157e6dc4a,http://pdfs.semanticscholar.org/4b71/d1ff7e589b94e0f97271c052699157e6dc4a.pdf
+52a9f957f776c8b3d913cfcd20452b9e31c27845,http://pdfs.semanticscholar.org/52a9/f957f776c8b3d913cfcd20452b9e31c27845.pdf
+721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,http://pdfs.semanticscholar.org/74f1/9d0986c9d39aabb359abaa2a87a248a48deb.pdf
+a3c8c7da177cd08978b2ad613c1d5cb89e0de741,http://pdfs.semanticscholar.org/a3c8/c7da177cd08978b2ad613c1d5cb89e0de741.pdf
+616d3d6d82dbc2697d150e879996d878ef74faef,https://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2016_Khorrami_ICIP_FP.pdf
+3e51d634faacf58e7903750f17111d0d172a0bf1,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924869.pdf
+ca83053d9a790319b11a04eac5ab412e7fcab914,http://pdfs.semanticscholar.org/ca83/053d9a790319b11a04eac5ab412e7fcab914.pdf
+3083d2c6d4f456e01cbb72930dc2207af98a6244,http://pdfs.semanticscholar.org/3083/d2c6d4f456e01cbb72930dc2207af98a6244.pdf
+a784a0d1cea26f18626682ab108ce2c9221d1e53,http://openaccess.thecvf.com/content_ICCV_2017/papers/Agustsson_Anchored_Regression_Networks_ICCV_2017_paper.pdf
+05ea7930ae26165e7e51ff11b91c7aa8d7722002,http://www.stat.ucla.edu/~sczhu/papers/PAMI_car_occlusion_AOG.pdf
+e3917d6935586b90baae18d938295e5b089b5c62,http://www.iti.gr/files/tip05tsalakanidou.pdf
+81e366ed1834a8d01c4457eccae4d57d169cb932,http://www-public.int-edu.eu/~horain/Publications/Wesierski%20ICCV_2013.pdf
+8e33183a0ed7141aa4fa9d87ef3be334727c76c0,http://pdfs.semanticscholar.org/8e33/183a0ed7141aa4fa9d87ef3be334727c76c0.pdf
+0dfa460a35f7cab4705726b6367557b9f7842c65,https://arxiv.org/pdf/1504.01561v1.pdf
+5dcf78de4d3d867d0fd4a3105f0defae2234b9cb,http://pdfs.semanticscholar.org/5dcf/78de4d3d867d0fd4a3105f0defae2234b9cb.pdf
+33554ff9d1d3b32f67020598320d3d761d7ec81f,http://pdfs.semanticscholar.org/3355/4ff9d1d3b32f67020598320d3d761d7ec81f.pdf
+41aa209e9d294d370357434f310d49b2b0baebeb,https://arxiv.org/pdf/1605.05440v1.pdf
+63488398f397b55552f484409b86d812dacde99a,http://pdfs.semanticscholar.org/6348/8398f397b55552f484409b86d812dacde99a.pdf
+1c2724243b27a18a2302f12dea79d9a1d4460e35,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf
+2c0acaec54ab2585ff807e18b6b9550c44651eab,http://pdfs.semanticscholar.org/2c0a/caec54ab2585ff807e18b6b9550c44651eab.pdf
+19841b721bfe31899e238982a22257287b9be66a,http://pdfs.semanticscholar.org/1984/1b721bfe31899e238982a22257287b9be66a.pdf
+d6c7092111a8619ed7a6b01b00c5f75949f137bf,http://pdfs.semanticscholar.org/d6c7/092111a8619ed7a6b01b00c5f75949f137bf.pdf
+310da8bd81c963bd510bf9aaa4d028a643555c84,http://www.cs.sunysb.edu/~ial/content/papers/2005/Zhang2005cvpr2.pdf
+78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c,http://pdfs.semanticscholar.org/78a1/1b7d2d7e1b19d92d2afd51bd3624eca86c3c.pdf
+345bea5f7d42926f857f395c371118a00382447f,http://grail.cs.washington.edu/wp-content/uploads/2016/09/kemelmacher2016tp.pdf
+9be94fa0330dd493f127d51e4ef7f9fd64613cfc,http://pdfs.semanticscholar.org/9be9/4fa0330dd493f127d51e4ef7f9fd64613cfc.pdf
+bafb8812817db7445fe0e1362410a372578ec1fc,http://www.cin.ufpe.br/~rps/Artigos/Image-Quality-Based%20Adaptive%20Face%20Recognition.pdf
+0c7f27d23a162d4f3896325d147f412c40160b52,http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf
+5860cf0f24f2ec3f8cbc39292976eed52ba2eafd,http://pdfs.semanticscholar.org/5860/cf0f24f2ec3f8cbc39292976eed52ba2eafd.pdf
+a694180a683f7f4361042c61648aa97d222602db,http://www.iab-rubric.org/papers/ICB16-Autoscat.pdf
+50ccc98d9ce06160cdf92aaf470b8f4edbd8b899,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Qu_Towards_Robust_Cascaded_2015_CVPR_paper.pdf
+b375db63742f8a67c2a7d663f23774aedccc84e5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Conti_Brain-inspired_Classroom_Occupancy_2014_CVPR_paper.pdf
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf
+6e93fd7400585f5df57b5343699cb7cda20cfcc2,http://pdfs.semanticscholar.org/a52f/4d315adf0aa60ba284fd4caf22485625cedf.pdf
+75fcbb01bc7e53e9de89cb1857a527f97ea532ce,http://pdfs.semanticscholar.org/75fc/bb01bc7e53e9de89cb1857a527f97ea532ce.pdf
+2d38fd1df95f5025e2cee5bc439ba92b369a93df,http://pdfs.semanticscholar.org/2d38/fd1df95f5025e2cee5bc439ba92b369a93df.pdf
+dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb,http://pdfs.semanticscholar.org/dc2e/805d0038f9d1b3d1bc79192f1d90f6091ecb.pdf
+7fce5769a7d9c69248178989a99d1231daa4fce9,http://pdfs.semanticscholar.org/7fce/5769a7d9c69248178989a99d1231daa4fce9.pdf
+fc798314994bf94d1cde8d615ba4d5e61b6268b6,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf
+5b6593a6497868a0d19312952d2b753232414c23,http://pdfs.semanticscholar.org/5b65/93a6497868a0d19312952d2b753232414c23.pdf
+ef2a5a26448636570986d5cda8376da83d96ef87,http://pdfs.semanticscholar.org/ef2a/5a26448636570986d5cda8376da83d96ef87.pdf
+543f21d81bbea89f901dfcc01f4e332a9af6682d,http://pdfs.semanticscholar.org/543f/21d81bbea89f901dfcc01f4e332a9af6682d.pdf
+8323529cf37f955fb3fc6674af6e708374006a28,http://researcher.ibm.com/researcher/files/us-smiyaza/FPIV04.pdf
+2a4153655ad1169d482e22c468d67f3bc2c49f12,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf
+8c9c8111e18f8798a612e7386e88536dfe26455e,http://pdfs.semanticscholar.org/8c9c/8111e18f8798a612e7386e88536dfe26455e.pdf
+3bfb9ba4b74b2b952868f590ff2f164de0c7d402,http://qil.uh.edu/qil/websitecontent/pdf/2015-8.pdf
+11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0,http://pdfs.semanticscholar.org/11ac/88aebe0230e743c7ea2c2a76b5d4acbfecd0.pdf
+852ff0d410a25ebb7936043a05efe2469c699e4b,http://pdfs.semanticscholar.org/852f/f0d410a25ebb7936043a05efe2469c699e4b.pdf
+5fba1b179ac80fee80548a0795d3f72b1b6e49cd,http://pdfs.semanticscholar.org/fe88/e30cfca9161b598ea8a26985df5832259924.pdf
+2162654cb02bcd10794ae7e7d610c011ce0fb51b,http://www.jdl.ac.cn/doc/2011/201511610103648366_%E5%88%98%E8%B4%A4%E6%98%8E.pdf
+b133b2d7df9b848253b9d75e2ca5c68e21eba008,http://pdfs.semanticscholar.org/c2c1/ab9eac2907e15618d80f5ce0c9b60f2c36cc.pdf
+134aad8153ab78345b2581efac2fe175a3084154,http://www.cs.utexas.edu/~ai-lab/pubs/vijayanarasimhan_grauman_cvpr2008.pdf
+161eb88031f382e6a1d630cd9a1b9c4bc6b47652,http://arxiv.org/pdf/1505.04026v1.pdf
+2a88541448be2eb1b953ac2c0c54da240b47dd8a,http://pdfs.semanticscholar.org/2c44/0d01738a2fed3e3bd6520471acacb6c96e3b.pdf
+3fa738ab3c79eacdbfafa4c9950ef74f115a3d84,http://pdfs.semanticscholar.org/3fa7/38ab3c79eacdbfafa4c9950ef74f115a3d84.pdf
+d930ec59b87004fd172721f6684963e00137745f,http://pdfs.semanticscholar.org/d930/ec59b87004fd172721f6684963e00137745f.pdf
+d4001826cc6171c821281e2771af3a36dd01ffc0,http://pdfs.semanticscholar.org/d400/1826cc6171c821281e2771af3a36dd01ffc0.pdf
+185360fe1d024a3313042805ee201a75eac50131,http://cvit.iiit.ac.in/papers/deidentTCSVT2k11.pdf
+1a9a192b700c080c7887e5862c1ec578012f9ed1,http://pdfs.semanticscholar.org/1a9a/192b700c080c7887e5862c1ec578012f9ed1.pdf
+34c8de02a5064e27760d33b861b7e47161592e65,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Han_Video_Action_Recognition_CVPR_2017_paper.pdf
+763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff,http://pdfs.semanticscholar.org/7631/58cef9d1e4041f24fce4cf9d6a3b7a7f08ff.pdf
+01cc8a712e67384f9ef9f30580b7415bfd71e980,http://pdfs.semanticscholar.org/01cc/8a712e67384f9ef9f30580b7415bfd71e980.pdf
+744db9bd550bf5e109d44c2edabffec28c867b91,http://pdfs.semanticscholar.org/744d/b9bd550bf5e109d44c2edabffec28c867b91.pdf
+65bba9fba03e420c96ec432a2a82521ddd848c09,http://pdfs.semanticscholar.org/65bb/a9fba03e420c96ec432a2a82521ddd848c09.pdf
+3d24b386d003bee176a942c26336dbe8f427aadd,https://arxiv.org/pdf/1611.09967v1.pdf
+21104bcf07ef0269ab133471a3200b9bf94b2948,http://www.cs.utexas.edu/~grauman/papers/liang-cvpr2014.pdf
+3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e,http://www.wjscheirer.com/papers/wjs_cswb2010_grab.pdf
+5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725,http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf
+5859774103306113707db02fe2dd3ac9f91f1b9e,http://www.wisdom.weizmann.ac.il/~shimon/papers/IJCV29_98.pdf
+04f55f81bbd879773e2b8df9c6b7c1d324bc72d8,http://pdfs.semanticscholar.org/04f5/5f81bbd879773e2b8df9c6b7c1d324bc72d8.pdf
+5e59193a0fc22a0c37301fb05b198dd96df94266,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dessein_Example-Based_Modeling_of_ICCV_2015_paper.pdf
+8c955f3827a27e92b6858497284a9559d2d0623a,http://pdfs.semanticscholar.org/8c95/5f3827a27e92b6858497284a9559d2d0623a.pdf
+c6ea6fee4823b511eecf41f6c2574a0728055baf,http://pdfs.semanticscholar.org/c6ea/6fee4823b511eecf41f6c2574a0728055baf.pdf
+d185f4f05c587e23c0119f2cdfac8ea335197ac0,http://pdfs.semanticscholar.org/d185/f4f05c587e23c0119f2cdfac8ea335197ac0.pdf
+4e4fa167d772f34dfffc374e021ab3044566afc3,http://pdfs.semanticscholar.org/4e4f/a167d772f34dfffc374e021ab3044566afc3.pdf
+0601416ade6707c689b44a5bb67dab58d5c27814,http://pdfs.semanticscholar.org/0601/416ade6707c689b44a5bb67dab58d5c27814.pdf
+0c069a870367b54dd06d0da63b1e3a900a257298,http://pdfs.semanticscholar.org/cdb8/36785579a4ea3d0eff26dbba8cf845a347d2.pdf
+b5cd8151f9354ee38b73be1d1457d28e39d3c2c6,http://pdfs.semanticscholar.org/b5cd/8151f9354ee38b73be1d1457d28e39d3c2c6.pdf
+71fd29c2ae9cc9e4f959268674b6b563c06d9480,http://pdfs.semanticscholar.org/71fd/29c2ae9cc9e4f959268674b6b563c06d9480.pdf
+48729e4de8aa478ee5eeeb08a72a446b0f5367d5,http://faculty.ucmerced.edu/mhyang/papers/icip14_cfh.pdf
+21b16df93f0fab4864816f35ccb3207778a51952,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2015/06.18.19.06/doc/PID3766353.pdf
+94b9c0a6515913bad345f0940ee233cdf82fffe1,http://pdfs.semanticscholar.org/94b9/c0a6515913bad345f0940ee233cdf82fffe1.pdf
+8bf57dc0dd45ed969ad9690033d44af24fd18e05,http://pdfs.semanticscholar.org/8bf5/7dc0dd45ed969ad9690033d44af24fd18e05.pdf
+6c2b392b32b2fd0fe364b20c496fcf869eac0a98,http://www3.ntu.edu.sg/home/EXDJiang/JiangX.D.-MVA-13.pdf
+26433d86b9c215b5a6871c70197ff4081d63054a,https://www.researchgate.net/profile/WL_Woo/publication/221093080_Multimodal_biometric_fusion_at_feature_level_Face_and_palmprint/links/0fcfd5134b4f62c892000000.pdf
+2050847bc7a1a0453891f03aeeb4643e360fde7d,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf
+0be2245b2b016de1dcce75ffb3371a5e4b1e731b,http://pdfs.semanticscholar.org/0be2/245b2b016de1dcce75ffb3371a5e4b1e731b.pdf
+39150acac6ce7fba56d54248f9c0badbfaeef0ea,http://pdfs.semanticscholar.org/3915/0acac6ce7fba56d54248f9c0badbfaeef0ea.pdf
+7b9961094d3e664fc76b12211f06e12c47a7e77d,http://pdfs.semanticscholar.org/7b99/61094d3e664fc76b12211f06e12c47a7e77d.pdf
+922838dd98d599d1d229cc73896d55e7a769aa7c,http://www.cs.umass.edu/~elm/papers/HuangCVPR12.pdf
+78f08cc9f845dc112f892a67e279a8366663e26d,http://pdfs.semanticscholar.org/78f0/8cc9f845dc112f892a67e279a8366663e26d.pdf
+0729628db4bb99f1f70dd6cb2353d7b76a9fce47,http://pdfs.semanticscholar.org/f02a/dc21a307d32c1145f4ade65504b016b0faac.pdf
+02601d184d79742c7cd0c0ed80e846d95def052e,http://arxiv.org/pdf/1503.00488v3.pdf
+ad75330953d9aacc05b5ca1a50c4fed3e7ca1e21,http://www.science.uva.nl/~asalah/dibeklioglu11design.pdf
+214ac8196d8061981bef271b37a279526aab5024,http://pdfs.semanticscholar.org/214a/c8196d8061981bef271b37a279526aab5024.pdf
+4b28de1ebf6b6cb2479b9176fab50add6ed75b78,http://vision.ucsd.edu/sites/default/files/cvpr05a.pdf
+3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001,http://pdfs.semanticscholar.org/4032/8c9de5a0a90a8c24e80db7924f0281b46484.pdf
+31c0968fb5f587918f1c49bf7fa51453b3e89cf7,http://pdfs.semanticscholar.org/31c0/968fb5f587918f1c49bf7fa51453b3e89cf7.pdf
+6180bc0816b1776ca4b32ced8ea45c3c9ce56b47,http://pdfs.semanticscholar.org/793e/92ed3f89c8636c8ca1175c1183ba812da245.pdf
+5ea9063b44b56d9c1942b8484572790dff82731e,https://ibug.doc.ic.ac.uk/media/uploads/documents/mlsp_2007_kotsia.pdf
+2a0623ae989f2236f5e1fe3db25ab708f5d02955,http://pdfs.semanticscholar.org/2a06/23ae989f2236f5e1fe3db25ab708f5d02955.pdf
+3d68cedd80babfbb04ab197a0b69054e3c196cd9,http://www.cim.mcgill.ca/~mrl/pubs/malika/Meghjani09_Masters_Thesis.pdf
+25695abfe51209798f3b68fb42cfad7a96356f1f,http://pdfs.semanticscholar.org/2569/5abfe51209798f3b68fb42cfad7a96356f1f.pdf
+a8583e80a455507a0f146143abeb35e769d25e4e,http://pdfs.semanticscholar.org/a858/3e80a455507a0f146143abeb35e769d25e4e.pdf
+5da139fc43216c86d779938d1c219b950dd82a4c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0200205.pdf
+3a2cf589f5e11ca886417b72c2592975ff1d8472,http://pdfs.semanticscholar.org/3a2c/f589f5e11ca886417b72c2592975ff1d8472.pdf
+90fb58eeb32f15f795030c112f5a9b1655ba3624,http://pdfs.semanticscholar.org/90fb/58eeb32f15f795030c112f5a9b1655ba3624.pdf
+08c18b2f57c8e6a3bfe462e599a6e1ce03005876,http://ca.cs.cmu.edu/sites/default/files/8uca_final_revision.pdf
+8a3c5507237957d013a0fe0f082cab7f757af6ee,http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf
+0435a34e93b8dda459de49b499dd71dbb478dc18,http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf
+251281d9cbd207038efbde0515f4077541967239,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana-Murthy_Radwan_Goecke_ICIP2014_DenseBodyPartTrajectoriesForHumanActionRecognition.pdf
+60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09,https://arxiv.org/pdf/1706.03947v1.pdf
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,http://pdfs.semanticscholar.org/e9a4/1f856a474aa346491fe76151869e3f548172.pdf
+0716e1ad868f5f446b1c367721418ffadfcf0519,http://pdfs.semanticscholar.org/6e05/5db22fbddb524ccb0006145db7944d1ed31c.pdf
+2f16baddac6af536451b3216b02d3480fc361ef4,http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf
+0b8b8776684009e537b9e2c0d87dbd56708ddcb4,http://pdfs.semanticscholar.org/0b8b/8776684009e537b9e2c0d87dbd56708ddcb4.pdf
+bd9157331104a0708aa4f8ae79b7651a5be797c6,http://pdfs.semanticscholar.org/bd91/57331104a0708aa4f8ae79b7651a5be797c6.pdf
+9993f1a7cfb5b0078f339b9a6bfa341da76a3168,http://pdfs.semanticscholar.org/9993/f1a7cfb5b0078f339b9a6bfa341da76a3168.pdf
+22137ce9c01a8fdebf92ef35407a5a5d18730dde,http://pdfs.semanticscholar.org/2213/7ce9c01a8fdebf92ef35407a5a5d18730dde.pdf
+732e8d8f5717f8802426e1b9debc18a8361c1782,http://pdfs.semanticscholar.org/732e/8d8f5717f8802426e1b9debc18a8361c1782.pdf
+e39a0834122e08ba28e7b411db896d0fdbbad9ba,http://www.ece.ualberta.ca/~djoseph/publications/journal/TPAMI_2012.pdf
+08d40ee6e1c0060d3b706b6b627e03d4b123377a,http://pdfs.semanticscholar.org/3daa/fe6389d877fe15d8823cdf5ac15fd919676f.pdf
+670637d0303a863c1548d5b19f705860a23e285c,https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,http://pdfs.semanticscholar.org/c92b/b26238f6e30196b0c4a737d8847e61cfb7d4.pdf
+831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9,http://pdfs.semanticscholar.org/831f/bef657cc5e1bbf298ce6aad6b62f00a5b5d9.pdf
+1ecb56e7c06a380b3ce582af3a629f6ef0104457,http://pdfs.semanticscholar.org/1ecb/56e7c06a380b3ce582af3a629f6ef0104457.pdf
+4b507a161af8a7dd41e909798b9230f4ac779315,http://pdfs.semanticscholar.org/5202/4d271f516c7d0dfa73009bf7537549ef74f7.pdf
+6fa0c206873dcc5812f7ea74a48bb4bf4b273494,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W03/papers/Suk_Real-time_Mobile_Facial_2014_CVPR_paper.pdf
+36ea75e14b69bed454fde6076ea6b85ed87fbb14,http://pdfs.semanticscholar.org/36ea/75e14b69bed454fde6076ea6b85ed87fbb14.pdf
+6ee64c19efa89f955011531cde03822c2d1787b8,http://pdfs.semanticscholar.org/6ee6/4c19efa89f955011531cde03822c2d1787b8.pdf
+2d244d70ed1a2ba03d152189f1f90ff2b4f16a79,http://pdfs.semanticscholar.org/2d24/4d70ed1a2ba03d152189f1f90ff2b4f16a79.pdf
+6ab8f2081b1420a6214a6c127e5828c14979d414,http://pdfs.semanticscholar.org/6ab8/f2081b1420a6214a6c127e5828c14979d414.pdf
+22dada4a7ba85625824489375184ba1c3f7f0c8f,http://arxiv.org/pdf/1506.02328v1.pdf
+5bcc8ef74efbb959407adfda15a01dad8fcf1648,http://pdfs.semanticscholar.org/5bcc/8ef74efbb959407adfda15a01dad8fcf1648.pdf
+4f298d6d0c8870acdbf94fe473ebf6814681bd1f,http://pdfs.semanticscholar.org/9979/b794d0bd06a1959a6b169f2cf32ba8ba376b.pdf
+4cb0e0c0e9b92e457f2c546dc25b9a4ff87ff819,http://dayongwang.info/pdf/2012-CIKM.pdf
+0aa405447a8797e509521f0570e4679a42fdac9b,http://mplab.ucsd.edu/~jake/AISeminar26Sep2011.pdf
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,http://pdfs.semanticscholar.org/b3e7/4cbe27454e32b4b35014af831783d3480ad5.pdf
+367f2668b215e32aff9d5122ce1f1207c20336c8,http://pdfs.semanticscholar.org/367f/2668b215e32aff9d5122ce1f1207c20336c8.pdf
+41b38da2f4137c957537908f9cb70cbd2fac8bc1,https://arxiv.org/pdf/1701.01879v1.pdf
+37179032085e710d1d62a1ba2e9c1f63bb4dde91,http://eprints.soton.ac.uk/363288/1/tome%20tifs.pdf
+7c119e6bdada2882baca232da76c35ae9b5277f8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/1070.pdf
+a33f20773b46283ea72412f9b4473a8f8ad751ae,http://pdfs.semanticscholar.org/a33f/20773b46283ea72412f9b4473a8f8ad751ae.pdf
+3abe50d0a806a9f5a5626f60f590632a6d87f0c4,http://vis.uky.edu/~gravity/publications/2008/Estimating_Xinyu.pdf
+026e4ee480475e63ae68570d73388f8dfd4b4cde,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf
+085ceda1c65caf11762b3452f87660703f914782,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf
+1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d,http://pdfs.semanticscholar.org/1fef/b2f8dd1efcdb57d5c2966d81f9ab22c1c58d.pdf
+839a2155995acc0a053a326e283be12068b35cb8,http://pdfs.semanticscholar.org/839a/2155995acc0a053a326e283be12068b35cb8.pdf
+b7c5f885114186284c51e863b58292583047a8b4,http://pdfs.semanticscholar.org/b7c5/f885114186284c51e863b58292583047a8b4.pdf
+d9327b9621a97244d351b5b93e057f159f24a21e,http://www.cil.pku.edu.cn/publications/papers/CS2010gusuicheng.pdf
+8cb55413f1c5b6bda943697bba1dc0f8fc880d28,http://cvhci.anthropomatik.kit.edu/~stiefel/papers/ICCV07_031.pdf
+226a5ff790b969593596a52b55b3718dcdd7bb7f,https://www.cise.ufl.edu/~jho/papers/IEEE06.pdf
+4c6daffd092d02574efbf746d086e6dc0d3b1e91,http://pdfs.semanticscholar.org/4c6d/affd092d02574efbf746d086e6dc0d3b1e91.pdf
+7c61d21446679776f7bdc7afd13aedc96f9acac1,http://pdfs.semanticscholar.org/e199/9cee8e6d717ad1181ae9e17c366e152e805e.pdf
+0daf696253a1b42d2c9d23f1008b32c65a9e4c1e,http://ca.cs.cmu.edu/sites/default/files/132010_CVPR_AU_Long.pdf
+32c20afb5c91ed7cdbafb76408c3a62b38dd9160,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Hassner_Viewing_Real-World_Faces_2013_ICCV_paper.pdf
+00075519a794ea546b2ca3ca105e2f65e2f5f471,http://pdfs.semanticscholar.org/0007/5519a794ea546b2ca3ca105e2f65e2f5f471.pdf
+9820920d4544173e97228cb4ab8b71ecf4548475,http://pdfs.semanticscholar.org/9820/920d4544173e97228cb4ab8b71ecf4548475.pdf
+4414a328466db1e8ab9651bf4e0f9f1fe1a163e4,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569290719.pdf
+433d2d5528d1401a402f2c1db40b933c494f11ba,https://www.researchgate.net/profile/Xudong_Jiang3/publication/4248964_Face_Recognition_Based_on_Discriminant_Evaluation_in_the_Whole_Space/links/0046351ef2d1c48d55000000.pdf
+5c124b57699be19cd4eb4e1da285b4a8c84fc80d,http://www.iis.ee.ic.ac.uk/icvl/doc/cvpr14_xiaowei.pdf
+3107085973617bbfc434c6cb82c87f2a952021b7,http://pdfs.semanticscholar.org/cee6/6bd89d1e25355e78573220adcd017a2d97d8.pdf
+976e0264bb57786952a987d4456850e274714fb8,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Dehghan_Improving_Semantic_Concept_2014_CVPR_paper.pdf
+435dc062d565ce87c6c20a5f49430eb9a4b573c4,http://pdfs.semanticscholar.org/435d/c062d565ce87c6c20a5f49430eb9a4b573c4.pdf
+d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b,http://iplab.dmi.unict.it/sites/default/files/_11.pdf
+e4c3d5d43cb62ac5b57d74d55925bdf76205e306,http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,http://pdfs.semanticscholar.org/83fd/2d2d5ad6e4e153672c9b6d1a3785f754b60e.pdf
+2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f,http://pdfs.semanticscholar.org/2b0f/f4b82bac85c4f980c40b3dc4fde05d3cc23f.pdf
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,https://people.cs.umass.edu/~smaji/presentations/similarity-poster-cvpr14.pdf
+a703d51c200724517f099ee10885286ddbd8b587,http://pdfs.semanticscholar.org/a703/d51c200724517f099ee10885286ddbd8b587.pdf
+5892f8367639e9c1e3cf27fdf6c09bb3247651ed,http://pdfs.semanticscholar.org/5892/f8367639e9c1e3cf27fdf6c09bb3247651ed.pdf
+924b14a9e36d0523a267293c6d149bca83e73f3b,http://pdfs.semanticscholar.org/924b/14a9e36d0523a267293c6d149bca83e73f3b.pdf
+63ce37da6c0c789099307337bb913e1104473854,http://pdfs.semanticscholar.org/63ce/37da6c0c789099307337bb913e1104473854.pdf
+0394e684bd0a94fc2ff09d2baef8059c2652ffb0,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/TIP2522378.pdf
+1ca815327e62c70f4ee619a836e05183ef629567,http://www.humansensing.cs.cmu.edu/sites/default/files/Xiong_Global_Supervised_Descent_2015_CVPR_paper.pdf
+d46b4e6871fc9974542215f001e92e3035aa08d9,http://pdfs.semanticscholar.org/d46b/4e6871fc9974542215f001e92e3035aa08d9.pdf
+0aa74ad36064906e165ac4b79dec298911a7a4db,http://pdfs.semanticscholar.org/7645/11b63b0eeba9f3dfe1e5ec9ff261cdc59d25.pdf
+2784d9212dee2f8a660814f4b85ba564ec333720,http://people.cs.umass.edu/~elm/papers/cvpr2010_imagetrans.pdf
+4f9958946ad9fc71c2299847e9ff16741401c591,http://pdfs.semanticscholar.org/4f99/58946ad9fc71c2299847e9ff16741401c591.pdf
+7003d903d5e88351d649b90d378f3fc5f211282b,http://pdfs.semanticscholar.org/7003/d903d5e88351d649b90d378f3fc5f211282b.pdf
+429d4848d03d2243cc6a1b03695406a6de1a7abd,http://pdfs.semanticscholar.org/429d/4848d03d2243cc6a1b03695406a6de1a7abd.pdf
+5502dfe47ac26e60e0fb25fc0f810cae6f5173c0,http://pdfs.semanticscholar.org/5502/dfe47ac26e60e0fb25fc0f810cae6f5173c0.pdf
+4adb97b096b700af9a58d00e45a2f980136fcbb5,http://pdfs.semanticscholar.org/9ea2/23c070ec9a00f4cb5ca0de35d098eb9a8e32.pdf
+7808937b46acad36e43c30ae4e9f3fd57462853d,http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf
+ea96bc017fb56593a59149e10d5f14011a3744a0,http://pdfs.semanticscholar.org/ea96/bc017fb56593a59149e10d5f14011a3744a0.pdf
+40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d,http://pdfs.semanticscholar.org/40a5/b32e261dc5ccc1b5df5d5338b7d3fe10370d.pdf
+568cff415e7e1bebd4769c4a628b90db293c1717,http://pdfs.semanticscholar.org/568c/ff415e7e1bebd4769c4a628b90db293c1717.pdf
+55cc90968e5e6ed413dd607af2a850ac2f54e378,http://pdfs.semanticscholar.org/55cc/90968e5e6ed413dd607af2a850ac2f54e378.pdf
+a01f9461bc8cf8fe40c26d223ab1abea5d8e2812,http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf
+614a7c42aae8946c7ad4c36b53290860f6256441,https://arxiv.org/pdf/1604.02878.pdf
+95f26d1c80217706c00b6b4b605a448032b93b75,http://pdfs.semanticscholar.org/95f2/6d1c80217706c00b6b4b605a448032b93b75.pdf
+48734cb558b271d5809286447ff105fd2e9a6850,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w41/papers/Mahoor_Facial_Expression_Recognition_CVPR_2017_paper.pdf
+bc98027b331c090448492eb9e0b9721e812fac84,http://pdfs.semanticscholar.org/bc98/027b331c090448492eb9e0b9721e812fac84.pdf
+9d839dfc9b6a274e7c193039dfa7166d3c07040b,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00869.pdf
+27c6cd568d0623d549439edc98f6b92528d39bfe,http://openaccess.thecvf.com/content_iccv_2015/papers/Hsu_Regressive_Tree_Structured_ICCV_2015_paper.pdf
+4657d87aebd652a5920ed255dca993353575f441,http://pdfs.semanticscholar.org/4657/d87aebd652a5920ed255dca993353575f441.pdf
+d06c8e3c266fbae4026d122ec9bd6c911fcdf51d,http://pdfs.semanticscholar.org/d06c/8e3c266fbae4026d122ec9bd6c911fcdf51d.pdf
+9547a7bce2b85ef159b2d7c1b73dea82827a449f,http://tdlc.ucsd.edu/research/publications/Wu_Bartlett_Movellan_Facial_Expression_2010.pdf
+a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d,http://pdfs.semanticscholar.org/a2b5/4f4d73bdb80854aa78f0c5aca3d8b56b571d.pdf
+6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8,http://pdfs.semanticscholar.org/6af6/5e2a1eba6bd62843e7bf717b4ccc91bce2b8.pdf
+1a1118cd4339553ad0544a0a131512aee50cf7de,http://pdfs.semanticscholar.org/1a11/18cd4339553ad0544a0a131512aee50cf7de.pdf
+60496b400e70acfbbf5f2f35b4a49de2a90701b5,http://pdfs.semanticscholar.org/6049/6b400e70acfbbf5f2f35b4a49de2a90701b5.pdf
+2020e8c0be8fa00d773fd99b6da55029a6a83e3d,http://pdfs.semanticscholar.org/9ca3/806dd01f8aded02e88c7022716b7fef46423.pdf
+153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4,http://pdfs.semanticscholar.org/153f/5ad54dd101f7f9c2ae17e96c69fe84aa9de4.pdf
+4e97b53926d997f451139f74ec1601bbef125599,http://pdfs.semanticscholar.org/4e97/b53926d997f451139f74ec1601bbef125599.pdf
+6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3,http://www.ifp.uiuc.edu/~iracohen/publications/CohenSebeMS05.pdf
+79fa57dedafddd3f3720ca26eb41c82086bfb332,http://www.cis.pku.edu.cn/vision/Visual&Robot/publication/doc/IROS05_wu.pdf
+291265db88023e92bb8c8e6390438e5da148e8f5,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf
+f2c30594d917ea915028668bc2a481371a72a14d,http://pdfs.semanticscholar.org/f2c3/0594d917ea915028668bc2a481371a72a14d.pdf
+c65a394118d34beda5dd01ae0df163c3db88fceb,http://pdfs.semanticscholar.org/c65a/394118d34beda5dd01ae0df163c3db88fceb.pdf
+ce5e50467e43e3178cbd86cfc3348e3f577c4489,https://www.computer.org/csdl/proceedings/avss/2013/9999/00/06636683.pdf
+80135ed7e34ac1dcc7f858f880edc699a920bf53,http://pdfs.semanticscholar.org/8013/5ed7e34ac1dcc7f858f880edc699a920bf53.pdf
+50d15cb17144344bb1879c0a5de7207471b9ff74,http://pdfs.semanticscholar.org/50d1/5cb17144344bb1879c0a5de7207471b9ff74.pdf
+51c3050fb509ca685de3d9ac2e965f0de1fb21cc,http://www.cs.toronto.edu/~law/publications/CVPR/2014/fantope_regularization.pdf
+b340f275518aa5dd2c3663eed951045a5b8b0ab1,http://www.eecs.qmul.ac.uk/~sgg/papers/GongShanXiang_ACM_ICMI2007.pdf
+392c3cabe516c0108b478152902a9eee94f4c81e,http://pdfs.semanticscholar.org/392c/3cabe516c0108b478152902a9eee94f4c81e.pdf
+29e793271370c1f9f5ac03d7b1e70d1efa10577c,http://pdfs.semanticscholar.org/29e7/93271370c1f9f5ac03d7b1e70d1efa10577c.pdf
+0bc0f9178999e5c2f23a45325fa50300961e0226,http://pdfs.semanticscholar.org/0bc0/f9178999e5c2f23a45325fa50300961e0226.pdf
+91df860368cbcebebd83d59ae1670c0f47de171d,http://pdfs.semanticscholar.org/91df/860368cbcebebd83d59ae1670c0f47de171d.pdf
+1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6,http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,http://pdfs.semanticscholar.org/3d94/8e4813a6856e5b8b54c20e50cc5050e66abe.pdf
+995d55fdf5b6fe7fb630c93a424700d4bc566104,http://openaccess.thecvf.com/content_iccv_2015/papers/Nilsson_The_One_Triangle_ICCV_2015_paper.pdf
+c980443ca996402de4b5e5424f872acda0368831,http://homepage.tudelft.nl/19j49/Publications_files/Final_CVPR10.pdf
+405b43f4a52f70336ac1db36d5fa654600e9e643,http://pdfs.semanticscholar.org/405b/43f4a52f70336ac1db36d5fa654600e9e643.pdf
+11b3877df0213271676fa8aa347046fd4b1a99ad,http://pdfs.semanticscholar.org/11b3/877df0213271676fa8aa347046fd4b1a99ad.pdf
+ce3f3088d0c0bf236638014a299a28e492069753,http://pdfs.semanticscholar.org/ce3f/3088d0c0bf236638014a299a28e492069753.pdf
+0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd,http://media.cs.tsinghua.edu.cn/~ahz/papers/%5B2011%5D%5Bacpr%5Dwang%20nan.pdf
+2b507f659b341ed0f23106446de8e4322f4a3f7e,http://pdfs.semanticscholar.org/2b50/7f659b341ed0f23106446de8e4322f4a3f7e.pdf
+2cae619d0209c338dc94593892a787ee712d9db0,http://vis-www.cs.umass.edu/papers/cvpr08shrf.pdf
+9d55ec73cab779403cd933e6eb557fb04892b634,http://pdfs.semanticscholar.org/9d55/ec73cab779403cd933e6eb557fb04892b634.pdf
+7f2a4cd506fe84dee26c0fb41848cb219305173f,http://pdfs.semanticscholar.org/7f2a/4cd506fe84dee26c0fb41848cb219305173f.pdf
+3c78b642289d6a15b0fb8a7010a1fb829beceee2,http://pdfs.semanticscholar.org/3c78/b642289d6a15b0fb8a7010a1fb829beceee2.pdf
+7aa4c16a8e1481629f16167dea313fe9256abb42,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002981.pdf
+07de8371ad4901356145722aa29abaeafd0986b9,http://pdfs.semanticscholar.org/07de/8371ad4901356145722aa29abaeafd0986b9.pdf
+bbc4b376ebd296fb9848b857527a72c82828fc52,http://pdfs.semanticscholar.org/bbc4/b376ebd296fb9848b857527a72c82828fc52.pdf
+f05ad40246656a977cf321c8299158435e3f3b61,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Lu_Face_Recognition_Using_2013_ICCV_paper.pdf
+1a7a2221fed183b6431e29a014539e45d95f0804,http://www.cs.colostate.edu/~vision/publications/Bolme2007b.pdf
+ded41c9b027c8a7f4800e61b7cfb793edaeb2817,http://pdfs.semanticscholar.org/ded4/1c9b027c8a7f4800e61b7cfb793edaeb2817.pdf
+738a985fba44f9f5acd516e07d0d9578f2ffaa4e,http://pdfs.semanticscholar.org/738a/985fba44f9f5acd516e07d0d9578f2ffaa4e.pdf
+146bbf00298ee1caecde3d74e59a2b8773d2c0fc,http://pdfs.semanticscholar.org/146b/bf00298ee1caecde3d74e59a2b8773d2c0fc.pdf
+902114feaf33deac209225c210bbdecbd9ef33b1,http://pdfs.semanticscholar.org/b5b0/8aaf56df40260abea890813503003485bda3.pdf
+c9f588d295437009994ddaabb64fd4e4c499b294,http://pdfs.semanticscholar.org/c9f5/88d295437009994ddaabb64fd4e4c499b294.pdf
+b75cee96293c11fe77ab733fc1147950abbe16f9,http://pdfs.semanticscholar.org/e1a6/16674f63dd54b495d06cf1b7bd59f4cb772e.pdf
+8fbec9105d346cd23d48536eb20c80b7c2bbbe30,http://conradsanderson.id.au/reading_group/Barr_Effectiveness_Face_WACV_2014.pdf
+169618b8dc9b348694a31c6e9e17b989735b4d39,http://vllab.ucmerced.edu/hylee/publication/ICCV17_OPN.pdf
+4f0bf2508ae801aee082b37f684085adf0d06d23,http://pdfs.semanticscholar.org/4f0b/f2508ae801aee082b37f684085adf0d06d23.pdf
+dad7b8be074d7ea6c3f970bd18884d496cbb0f91,http://pdfs.semanticscholar.org/dad7/b8be074d7ea6c3f970bd18884d496cbb0f91.pdf
+601834a4150e9af028df90535ab61d812c45082c,http://pdfs.semanticscholar.org/6018/34a4150e9af028df90535ab61d812c45082c.pdf
+02e133aacde6d0977bca01ffe971c79097097b7f,http://pdfs.semanticscholar.org/02e1/33aacde6d0977bca01ffe971c79097097b7f.pdf
+47638197d83a8f8174cdddc44a2c7101fa8301b7,http://grail.cs.washington.edu/wp-content/uploads/2015/08/saleh2013oad.pdf
+d8896861126b7fd5d2ceb6fed8505a6dff83414f,http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf
+4be774af78f5bf55f7b7f654f9042b6e288b64bd,http://pdfs.semanticscholar.org/4be7/74af78f5bf55f7b7f654f9042b6e288b64bd.pdf
+2914e8c62f0432f598251fae060447f98141e935,http://pdfs.semanticscholar.org/2914/e8c62f0432f598251fae060447f98141e935.pdf
+57d37ad025b5796457eee7392d2038910988655a,http://pdfs.semanticscholar.org/57d3/7ad025b5796457eee7392d2038910988655a.pdf
+2b773fe8f0246536c9c40671dfa307e98bf365ad,http://pdfs.semanticscholar.org/2b77/3fe8f0246536c9c40671dfa307e98bf365ad.pdf
+017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637,http://pdfs.semanticscholar.org/017c/e398e1eb9f2eed82d0b22fb1c21d3bcf9637.pdf
+ad247138e751cefa3bb891c2fe69805da9c293d7,http://pdfs.semanticscholar.org/ad24/7138e751cefa3bb891c2fe69805da9c293d7.pdf
+7d9fe410f24142d2057695ee1d6015fb1d347d4a,http://pdfs.semanticscholar.org/7d9f/e410f24142d2057695ee1d6015fb1d347d4a.pdf
+7d306512b545df98243f87cb8173df83b4672b18,http://pdfs.semanticscholar.org/7d30/6512b545df98243f87cb8173df83b4672b18.pdf
+0a68747d001aba014acd3b6ec83ba9534946a0da,http://staff.estem-uc.edu.au/roland/files/2009/05/Dhall_Goecke_Gedeon_TAC2015_AutomaticGroupHappinessIntensityAnalysis.pdf
+47b508abdaa5661fe14c13e8eb21935b8940126b,http://pdfs.semanticscholar.org/47b5/08abdaa5661fe14c13e8eb21935b8940126b.pdf
+13afc4f8d08f766479577db2083f9632544c7ea6,https://cs.anu.edu.au/few/KSikka_EmotiW.pdf
+7171b46d233810df57eaba44ccd8eabd0ad1f53a,http://pdfs.semanticscholar.org/7171/b46d233810df57eaba44ccd8eabd0ad1f53a.pdf
+0a5ffc55b584da7918c2650f9d8602675d256023,http://pdfs.semanticscholar.org/0a5f/fc55b584da7918c2650f9d8602675d256023.pdf
+51683eac8bbcd2944f811d9074a74d09d395c7f3,http://pdfs.semanticscholar.org/5168/3eac8bbcd2944f811d9074a74d09d395c7f3.pdf
+4e8168fbaa615009d1618a9d6552bfad809309e9,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf
+2db05ef11041447dbc735362db68b04e562c1e35,http://www.cs.berkeley.edu/~daf/eccv-sft.pdf
+5239001571bc64de3e61be0be8985860f08d7e7e,http://pdfs.semanticscholar.org/5239/001571bc64de3e61be0be8985860f08d7e7e.pdf
+7f205b9fca7e66ac80758c4d6caabe148deb8581,http://pdfs.semanticscholar.org/7f20/5b9fca7e66ac80758c4d6caabe148deb8581.pdf
+8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4,http://www.apsipa.org/proceedings_2013/papers/280_automatic-facial-hsu-2931731.pdf
+3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94,http://pdfs.semanticscholar.org/3624/ca25f09f3acbcf4d3a4c40b9e45a29c22b94.pdf
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,http://www.dcs.gla.ac.uk/~vincia/papers/shortsurvey.pdf
+a75edf8124f5b52690c08ff35b0c7eb8355fe950,http://pdfs.semanticscholar.org/a75e/df8124f5b52690c08ff35b0c7eb8355fe950.pdf
+4377b03bbee1f2cf99950019a8d4111f8de9c34a,http://www.umiacs.umd.edu/~morariu/publications/LiSelectiveEncoderICCV15.pdf
+72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_094_ext.pdf
+3b64efa817fd609d525c7244a0e00f98feacc8b4,https://arxiv.org/pdf/1502.04383v3.pdf
+3f204a413d9c8c16f146c306c8d96b91839fed0c,http://www.menpo.org/pages/paper/Menpo_ACM_MM_2014.pdf
+13c250fb740cb5616aeb474869db6ab11560e2a6,http://pdfs.semanticscholar.org/13c2/50fb740cb5616aeb474869db6ab11560e2a6.pdf
+2e157e8b57f679c2f1b8e16d6e934f52312f08f6,http://pdfs.semanticscholar.org/2e15/7e8b57f679c2f1b8e16d6e934f52312f08f6.pdf
+c472436764a30278337aca9681eee456bee95c34,http://pdfs.semanticscholar.org/c472/436764a30278337aca9681eee456bee95c34.pdf
+c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3,http://www.isir.upmc.fr/files/2013ACTI2846.pdf
+95aef5184b89daebd0c820c8102f331ea7cae1ad,http://www.dia.fi.upm.es/~pcr/publications/paa2008.pdf
+70f189798c8b9f2b31c8b5566a5cf3107050b349,http://www.cs.colostate.edu/~vision/pasc/docs/pasc2013_NISTIR_061013.pdf
+61f93ed515b3bfac822deed348d9e21d5dffe373,http://dvmmweb.cs.columbia.edu/files/set_hash_wacv17.pdf
+01c9dc5c677aaa980f92c4680229db482d5860db,https://pages.iai.uni-bonn.de/gall_juergen/download/jgall_actiondetect_cvpr16.pdf
+57bf9888f0dfcc41c5ed5d4b1c2787afab72145a,http://pdfs.semanticscholar.org/57bf/9888f0dfcc41c5ed5d4b1c2787afab72145a.pdf
+d1959ba4637739dcc6cc6995e10fd41fd6604713,http://pdfs.semanticscholar.org/d195/9ba4637739dcc6cc6995e10fd41fd6604713.pdf
+42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Saxena_Coordinated_Local_Metric_ICCV_2015_paper.pdf
+e3657ab4129a7570230ff25ae7fbaccb4ba9950c,http://pdfs.semanticscholar.org/e365/7ab4129a7570230ff25ae7fbaccb4ba9950c.pdf
+4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d,http://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf
+3137a3fedf23717c411483c7b4bd2ed646258401,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_iccv_13.pdf
+cd596a2682d74bdfa7b7160dd070b598975e89d9,http://pdfs.semanticscholar.org/cd59/6a2682d74bdfa7b7160dd070b598975e89d9.pdf
+21bd9374c211749104232db33f0f71eab4df35d5,http://www.eurecom.fr/en/publication/5184/download/sec-publi-5184.pdf
+a458b319f5a2763ff9c6dc959eefa77673c56671,http://people.tamu.edu/~amir.tahmasbi/publications/Fisher_ICCEA2010.pdf
+081fb4e97d6bb357506d1b125153111b673cc128,http://pdfs.semanticscholar.org/081f/b4e97d6bb357506d1b125153111b673cc128.pdf
+a52581a7b48138d7124afc7ccfcf8ec3b48359d0,http://pdfs.semanticscholar.org/a525/81a7b48138d7124afc7ccfcf8ec3b48359d0.pdf
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,http://pdfs.semanticscholar.org/af52/4ffcedaa50cff30607e6ad8e270ad0d7bf71.pdf
+867e709a298024a3c9777145e037e239385c0129,http://pdfs.semanticscholar.org/867e/709a298024a3c9777145e037e239385c0129.pdf
+6257a622ed6bd1b8759ae837b50580657e676192,http://pdfs.semanticscholar.org/b8d8/501595f38974e001a66752dc7098db13dfec.pdf
+4d0ef449de476631a8d107c8ec225628a67c87f9,http://www.wjscheirer.com/papers/wjs_btas2010b_photohead.pdf
+650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772,http://pdfs.semanticscholar.org/650b/fe7acc3f03eb4ba91d9f93da8ef0ae8ba772.pdf
+a8035ca71af8cc68b3e0ac9190a89fed50c92332,http://pdfs.semanticscholar.org/a803/5ca71af8cc68b3e0ac9190a89fed50c92332.pdf
+5c717afc5a9a8ccb1767d87b79851de8d3016294,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001845.pdf
+79b669abf65c2ca323098cf3f19fa7bdd837ff31,http://dro.deakin.edu.au/eserv/DU:30044585/venkatesh-efficienttensor-2008.pdf
+bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103,http://pdfs.semanticscholar.org/bf0f/0eb0fb31ee498da4ae2ca9b467f730ea9103.pdf
+10e7dd3bbbfbc25661213155e0de1a9f043461a2,http://pdfs.semanticscholar.org/eb9c/24686d2d8a65894e6d708c6107724f2b6c04.pdf
+405526dfc79de98f5bf3c97bf4aa9a287700f15d,http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf
+259706f1fd85e2e900e757d2656ca289363e74aa,http://pdfs.semanticscholar.org/6f98/3e8f26066f2ea486f6653b87154360d948ca.pdf
+68e9c837431f2ba59741b55004df60235e50994d,http://pdfs.semanticscholar.org/68e9/c837431f2ba59741b55004df60235e50994d.pdf
+8a91ad8c46ca8f4310a442d99b98c80fb8f7625f,http://vislab.isr.ist.utl.pt/wp-content/uploads/2016/02/2015_TIP.pdf
+1862bfca2f105fddfc79941c90baea7db45b8b16,http://vision.cs.utexas.edu/projects/rationales/rationales.pdf
+0b79356e58a0df1d0efcf428d0c7c4651afa140d,http://pdfs.semanticscholar.org/7725/05d940a31ca237563cfb2d5c05c62742993f.pdf
+7cee802e083c5e1731ee50e731f23c9b12da7d36,http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf
+100428708e4884300e4c1ac1f84cbb16e7644ccf,http://www.math.uh.edu/~dlabate/ICASSP_2014.pdf
+3419af6331e4099504255a38de6f6b7b3b1e5c14,http://pdfs.semanticscholar.org/3419/af6331e4099504255a38de6f6b7b3b1e5c14.pdf
+5d88702cdc879396b8b2cc674e233895de99666b,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_Exploiting_Feature_Hierarchies_ICCV_2015_paper.pdf
+1e8eec6fc0e4538e21909ab6037c228547a678ba,http://pdfs.semanticscholar.org/1e8e/ec6fc0e4538e21909ab6037c228547a678ba.pdf
+2f53b97f0de2194d588bc7fb920b89cd7bcf7663,http://pdfs.semanticscholar.org/2f53/b97f0de2194d588bc7fb920b89cd7bcf7663.pdf
+089b5e8eb549723020b908e8eb19479ba39812f5,http://www.face-recognition-challenge.com/RobustnessOfDCNN-preprint.pdf
+58081cb20d397ce80f638d38ed80b3384af76869,http://pdfs.semanticscholar.org/5808/1cb20d397ce80f638d38ed80b3384af76869.pdf
+88bef50410cea3c749c61ed68808fcff84840c37,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiropoulos2011sparse.pdf
+e475e857b2f5574eb626e7e01be47b416deff268,http://pdfs.semanticscholar.org/e475/e857b2f5574eb626e7e01be47b416deff268.pdf
+656aeb92e4f0e280576cbac57d4abbfe6f9439ea,http://pdfs.semanticscholar.org/656a/eb92e4f0e280576cbac57d4abbfe6f9439ea.pdf
+bd13f50b8997d0733169ceba39b6eb1bda3eb1aa,http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf
+2d146cc0908c931d87f6e6e5d08b117c30a69b8d,http://www.cs.cityu.edu.hk/~yihong/download/TSMC.pdf
+861802ac19653a7831b314cd751fd8e89494ab12,http://btpwpdf.ijoy365.com/time-of-flight-and-depth-imaging-marcin-63540537.pdf
+7c0a6824b556696ad7bdc6623d742687655852db,http://2010.telfor.rs/files/radovi//TELFOR2010_05_35.pdf
+499343a2fd9421dca608d206e25e53be84489f44,http://pdfs.semanticscholar.org/4993/43a2fd9421dca608d206e25e53be84489f44.pdf
+f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7,http://pdfs.semanticscholar.org/f3d9/e347eadcf0d21cb0e92710bc906b22f2b3e7.pdf
+33030c23f6e25e30b140615bb190d5e1632c3d3b,http://pdfs.semanticscholar.org/3303/0c23f6e25e30b140615bb190d5e1632c3d3b.pdf
+0eff410cd6a93d0e37048e236f62e209bc4383d1,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICRA_2010/data/papers/0516.pdf
+3e3a87eb24628ab075a3d2bde3abfd185591aa4c,http://pdfs.semanticscholar.org/3e3a/87eb24628ab075a3d2bde3abfd185591aa4c.pdf
+729dbe38538fbf2664bc79847601f00593474b05,http://pdfs.semanticscholar.org/729d/be38538fbf2664bc79847601f00593474b05.pdf
+fcd77f3ca6b40aad6edbd1dab9681d201f85f365,http://pdfs.semanticscholar.org/fcd7/7f3ca6b40aad6edbd1dab9681d201f85f365.pdf
+09b80d8eea809529b08a8b0ff3417950c048d474,http://openaccess.thecvf.com/content_cvpr_2013/papers/Choi_Adding_Unlabeled_Samples_2013_CVPR_paper.pdf
+726b8aba2095eef076922351e9d3a724bb71cb51,http://pdfs.semanticscholar.org/d06b/cb2d46342ee011e652990edf290a0876b502.pdf
+03264e2e2709d06059dd79582a5cc791cbef94b1,http://pdfs.semanticscholar.org/0326/4e2e2709d06059dd79582a5cc791cbef94b1.pdf
+090ff8f992dc71a1125636c1adffc0634155b450,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf
+1939168a275013d9bc1afaefc418684caf99ba66,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR11_FaceAPModel.pdf
+cad52d74c1a21043f851ae14c924ac689e197d1f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W16/papers/Alletto_From_Ego_to_2014_CVPR_paper.pdf
+5058a7ec68c32984c33f357ebaee96c59e269425,http://pdfs.semanticscholar.org/5058/a7ec68c32984c33f357ebaee96c59e269425.pdf
+8dffbb6d75877d7d9b4dcde7665888b5675deee1,http://pdfs.semanticscholar.org/8dff/bb6d75877d7d9b4dcde7665888b5675deee1.pdf
+c34e48d637705ffb52360c2afb6b03efdeb680bf,http://pdfs.semanticscholar.org/c34e/48d637705ffb52360c2afb6b03efdeb680bf.pdf
+466184b10fb7ce9857e6b5bd6b4e5003e09a0b16,http://pdfs.semanticscholar.org/a42f/433e500661589e567340fe7f7d761d1f14df.pdf
+511b06c26b0628175c66ab70dd4c1a4c0c19aee9,http://pdfs.semanticscholar.org/511b/06c26b0628175c66ab70dd4c1a4c0c19aee9.pdf
+e0ed0e2d189ff73701ec72e167d44df4eb6e864d,http://pdfs.semanticscholar.org/e0ed/0e2d189ff73701ec72e167d44df4eb6e864d.pdf
+06c2dfe1568266ad99368fc75edf79585e29095f,http://ibug.doc.ic.ac.uk/media/uploads/documents/joan_cvpr2014.pdf
+d687fa99586a9ad229284229f20a157ba2d41aea,http://pdfs.semanticscholar.org/d687/fa99586a9ad229284229f20a157ba2d41aea.pdf
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,http://pdfs.semanticscholar.org/c8e8/4cdff569dd09f8d31e9f9ba3218dee65e961.pdf
+4563b46d42079242f06567b3f2e2f7a80cb3befe,http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf
+9d24179aa33a94c8c61f314203bf9e906d6b64de,http://www.decom.ufop.br/sibgrapi2012/eproceedings/technical/ts9/102146_3.pdf
+826c66bd182b54fea3617192a242de1e4f16d020,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001602.pdf
+841bf196ee0086c805bd5d1d0bddfadc87e424ec,http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf
+bb69f750ccec9624f6dabd334251def2bbddf166,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Yuxiao.pdf
+e0d878cc095eaae220ad1f681b33d7d61eb5e425,http://pdfs.semanticscholar.org/e0d8/78cc095eaae220ad1f681b33d7d61eb5e425.pdf
+3a04eb72aa64760dccd73e68a3b2301822e4cdc3,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Peng_Scalable_Sparse_Subspace_2013_CVPR_paper.pdf
+cf86616b5a35d5ee777585196736dfafbb9853b5,http://www.research.rutgers.edu/~linzhong/PDF/TC_Facial.pdf
+37ef18d71c1ca71c0a33fc625ef439391926bfbb,http://pdfs.semanticscholar.org/37ef/18d71c1ca71c0a33fc625ef439391926bfbb.pdf
+14d4c019c3eac3c3fa888cb8c184f31457eced02,http://pdfs.semanticscholar.org/14d4/c019c3eac3c3fa888cb8c184f31457eced02.pdf
+351158e4481e3197bd63acdafd73a5df8336143b,http://pdfs.semanticscholar.org/3511/58e4481e3197bd63acdafd73a5df8336143b.pdf
+f75852386e563ca580a48b18420e446be45fcf8d,http://pdfs.semanticscholar.org/f758/52386e563ca580a48b18420e446be45fcf8d.pdf
+066d71fcd997033dce4ca58df924397dfe0b5fd1,http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf
+006f283a50d325840433f4cf6d15876d475bba77,http://lvdmaaten.github.io/publications/papers/TPAMI_2014.pdf
+02e628e99f9a1b295458cb453c09863ea1641b67,http://pdfs.semanticscholar.org/02e6/28e99f9a1b295458cb453c09863ea1641b67.pdf
+26af867977f90342c9648ccf7e30f94470d40a73,http://pdfs.semanticscholar.org/26af/867977f90342c9648ccf7e30f94470d40a73.pdf
+082ad50ac59fc694ba4369d0f9b87430553b11db,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553696.pdf
+ff01bc3f49130d436fca24b987b7e3beedfa404d,http://pdfs.semanticscholar.org/ff01/bc3f49130d436fca24b987b7e3beedfa404d.pdf
+34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd,http://pdfs.semanticscholar.org/34cc/dec6c3f1edeeecae6a8f92e8bdb290ce40fd.pdf
+493ec9e567c5587c4cbeb5f08ca47408ca2d6571,http://pdfs.semanticscholar.org/493e/c9e567c5587c4cbeb5f08ca47408ca2d6571.pdf
+10d334a98c1e2a9e96c6c3713aadd42a557abb8b,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Shi_Scene_Text_Recognition_2013_CVPR_paper.pdf
+8ee5b1c9fb0bded3578113c738060290403ed472,https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf
+70569810e46f476515fce80a602a210f8d9a2b95,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Antipov_Apparent_Age_Estimation_CVPR_2016_paper.pdf
+25127c2d9f14d36f03d200a65de8446f6a0e3bd6,http://pdfs.semanticscholar.org/2512/7c2d9f14d36f03d200a65de8446f6a0e3bd6.pdf
+030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f,http://www.cfar.umd.edu/~shaohua/papers/zhou07tpami_gps.pdf
+38192a0f9261d9727b119e294a65f2e25f72d7e6,http://pdfs.semanticscholar.org/3819/2a0f9261d9727b119e294a65f2e25f72d7e6.pdf
+3918b425bb9259ddff9eca33e5d47bde46bd40aa,http://pdfs.semanticscholar.org/3918/b425bb9259ddff9eca33e5d47bde46bd40aa.pdf
+3fac7c60136a67b320fc1c132fde45205cd2ac66,http://pdfs.semanticscholar.org/3fac/7c60136a67b320fc1c132fde45205cd2ac66.pdf
+2f5ae4d6cd240ec7bc3f8ada47030e8439125df2,http://users.eecs.northwestern.edu/~xsh835/CVPR14_ExemplarFaceDetection.pdf
+84dcf04802743d9907b5b3ae28b19cbbacd97981,http://pdfs.semanticscholar.org/84dc/f04802743d9907b5b3ae28b19cbbacd97981.pdf
+8c643e1a61f3f563ec382c1e450f4b2b28122614,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS147.pdf
+c2e6daebb95c9dfc741af67464c98f1039127627,http://pdfs.semanticscholar.org/c2e6/daebb95c9dfc741af67464c98f1039127627.pdf
+15cf7bdc36ec901596c56d04c934596cf7b43115,http://pdfs.semanticscholar.org/15cf/7bdc36ec901596c56d04c934596cf7b43115.pdf
+18cd79f3c93b74d856bff6da92bfc87be1109f80,http://pdfs.semanticscholar.org/18cd/79f3c93b74d856bff6da92bfc87be1109f80.pdf
+0339459a5b5439d38acd9c40a0c5fea178ba52fb,http://pdfs.semanticscholar.org/0339/459a5b5439d38acd9c40a0c5fea178ba52fb.pdf
+70bf1769d2d5737fc82de72c24adbb7882d2effd,http://pdfs.semanticscholar.org/70bf/1769d2d5737fc82de72c24adbb7882d2effd.pdf
+6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75,http://hoques.com/Publications/2015/2015-ubicomp_rocspeak_Fung-etal.pdf
+37ba12271d09d219dd1a8283bc0b4659faf3a6c6,http://www.eecs.qmul.ac.uk/~sgg/papers/LayneEtAl_ARTERMIS2013.pdf
+b6a01cd4572b5f2f3a82732ef07d7296ab0161d3,http://pdfs.semanticscholar.org/b6a0/1cd4572b5f2f3a82732ef07d7296ab0161d3.pdf
+d56fe69cbfd08525f20679ffc50707b738b88031,http://pdfs.semanticscholar.org/d56f/e69cbfd08525f20679ffc50707b738b88031.pdf
+7e3367b9b97f291835cfd0385f45c75ff84f4dc5,https://infoscience.epfl.ch/record/182226/files/fg2013.pdf
+0bc53b338c52fc635687b7a6c1e7c2b7191f42e5,http://pdfs.semanticscholar.org/a32a/8d6d4c3b4d69544763be48ffa7cb0d7f2f23.pdf
+74de03923a069ffc0fb79e492ee447299401001f,http://pdfs.semanticscholar.org/74de/03923a069ffc0fb79e492ee447299401001f.pdf
+79dd787b2877cf9ce08762d702589543bda373be,http://fipa.cs.kit.edu/befit/workshop2011/pdf/slides/jianguo_li-slides.pdf
+0c8a0a81481ceb304bd7796e12f5d5fa869ee448,http://pdfs.semanticscholar.org/0c8a/0a81481ceb304bd7796e12f5d5fa869ee448.pdf
+03d9ccce3e1b4d42d234dba1856a9e1b28977640,http://pdfs.semanticscholar.org/03d9/ccce3e1b4d42d234dba1856a9e1b28977640.pdf
+8b1db0894a23c4d6535b5adf28692f795559be90,http://pdfs.semanticscholar.org/8b1d/b0894a23c4d6535b5adf28692f795559be90.pdf
+34d484b47af705e303fc6987413dc0180f5f04a9,http://pdfs.semanticscholar.org/34d4/84b47af705e303fc6987413dc0180f5f04a9.pdf
+12ccfc188de0b40c84d6a427999239c6a379cd66,http://pdfs.semanticscholar.org/12cc/fc188de0b40c84d6a427999239c6a379cd66.pdf
+f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1,http://pdfs.semanticscholar.org/f96b/dd1e2a940030fb0a89abbe6c69b8d7f6f0c1.pdf
+2b4d092d70efc13790d0c737c916b89952d4d8c7,http://pdfs.semanticscholar.org/2b4d/092d70efc13790d0c737c916b89952d4d8c7.pdf
+5b9d41e2985fa815c0f38a2563cca4311ce82954,http://www.iti.gr/files/3dpvt04tsalakanidou.pdf
+40cd062438c280c76110e7a3a0b2cf5ef675052c,http://pdfs.semanticscholar.org/40cd/062438c280c76110e7a3a0b2cf5ef675052c.pdf
+939123cf21dc9189a03671484c734091b240183e,http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,http://pdfs.semanticscholar.org/6409/b8879c7e61acf3ca17bcc62f49edca627d4c.pdf
+199c2df5f2847f685796c2523221c6436f022464,https://static.aminer.org/pdf/PDF/000/322/051/self_quotient_image_for_face_recognition.pdf
+887b7676a4efde616d13f38fcbfe322a791d1413,http://pdfs.semanticscholar.org/b4a0/cff84c35f75bcdb7aec3a0b1395edd15189b.pdf
+7dffe7498c67e9451db2d04bb8408f376ae86992,http://pdfs.semanticscholar.org/7dff/e7498c67e9451db2d04bb8408f376ae86992.pdf
+35f921def890210dda4b72247849ad7ba7d35250,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhou_Exemplar-Based_Graph_Matching_2013_ICCV_paper.pdf
+134db6ca13f808a848321d3998e4fe4cdc52fbc2,http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticPatras-SMCB-2005-FINAL.pdf
+82b43bc9213230af9db17322301cbdf81e2ce8cc,http://pdfs.semanticscholar.org/82b4/3bc9213230af9db17322301cbdf81e2ce8cc.pdf
+d963e640d0bf74120f147329228c3c272764932b,http://pdfs.semanticscholar.org/d963/e640d0bf74120f147329228c3c272764932b.pdf
+1aa766bbd49bac8484e2545c20788d0f86e73ec2,http://inside.mines.edu/~jpaone/papers/IV15_BaselineFaceDetection_SHRP2NDS.pdf
+bbe949c06dc4872c7976950b655788555fe513b8,http://www.quaero.org/media/files/bibliographie/ekenel_automaticfrequency.pdf
+bbfe0527e277e0213aafe068113d719b2e62b09c,http://pdfs.semanticscholar.org/bbfe/0527e277e0213aafe068113d719b2e62b09c.pdf
+7966146d72f9953330556baa04be746d18702047,http://pdfs.semanticscholar.org/7966/146d72f9953330556baa04be746d18702047.pdf
+8c66378df977606d332fc3b0047989e890a6ac76,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf
+374a0df2aa63b26737ee89b6c7df01e59b4d8531,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yuan_Temporal_Action_Localization_CVPR_2016_paper.pdf
+bebb8a97b2940a4e5f6e9d3caf6d71af21585eda,http://pdfs.semanticscholar.org/bebb/8a97b2940a4e5f6e9d3caf6d71af21585eda.pdf
+06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc,https://www.cs.umd.edu/sites/default/files/scholarly_papers/Biswas_1.pdf
+224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db,http://www.ifp.illinois.edu/~jyang29/papers/CVPR13-PEM.pdf
+30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,http://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf
+081189493ca339ca49b1913a12122af8bb431984,http://pdfs.semanticscholar.org/0811/89493ca339ca49b1913a12122af8bb431984.pdf
+3daf1191d43e21a8302d98567630b0e2025913b0,http://pdfs.semanticscholar.org/3daf/1191d43e21a8302d98567630b0e2025913b0.pdf
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,https://cs.uwaterloo.ca/~jhoey/papers/DhallICMI16.pdf
+39c48309b930396a5a8903fdfe781d3e40d415d0,http://www.ri.cmu.edu/pub_files/2017/5/ant_low.pdf
+3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5,http://www.rci.rutgers.edu/~vmp93/Journal_pub/T-pami_openset.pdf
+b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172,https://arxiv.org/pdf/1802.00237v1.pdf
+03baf00a3d00887dd7c828c333d4a29f3aacd5f5,http://pdfs.semanticscholar.org/03ba/f00a3d00887dd7c828c333d4a29f3aacd5f5.pdf
+04644c97784700c449f2c885cb4cab86447f0bd4,http://www.seekdl.org/upload/files/20131209_014911.pdf
+0e1a18576a7d3b40fe961ef42885101f4e2630f8,http://pdfs.semanticscholar.org/0e1a/18576a7d3b40fe961ef42885101f4e2630f8.pdf
+8c7bceba769762126fd3dae78d622908bb83c3d3,http://qil.uh.edu/qil/websitecontent/pdf/2015-33.pdf
+531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab,http://pdfs.semanticscholar.org/531f/d9be964d18ba7970bd1ca6c3b9dc91b8d2ab.pdf
+db93049981abca0a281918b8d0655572922553de,http://www.cs.odu.edu/~sji/papers/pdf/Ji_TKDE08.pdf
+38cbb500823057613494bacd0078aa0e57b30af8,https://ibug.doc.ic.ac.uk/media/uploads/documents/08014986.pdf
+de8381903c579a4fed609dff3e52a1dc51154951,http://pdfs.semanticscholar.org/de83/81903c579a4fed609dff3e52a1dc51154951.pdf
+73c5bab5c664afa96b1c147ff21439135c7d968b,http://uclab.khu.ac.kr/resources/publication/C_109.pdf
+5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0,http://www.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf
+82c303cf4852ad18116a2eea31e2291325bc19c3,http://pdfs.semanticscholar.org/82c3/03cf4852ad18116a2eea31e2291325bc19c3.pdf
+03f14159718cb495ca50786f278f8518c0d8c8c9,http://www.acscrg.com/iccsce/2015/wp-content/uploads/2015/11/The-Latest-Schedule-23-Nov-2015.pdf
+258a8c6710a9b0c2dc3818333ec035730062b1a5,http://pdfs.semanticscholar.org/258a/8c6710a9b0c2dc3818333ec035730062b1a5.pdf
+100641ed8a5472536dde53c1f50fa2dd2d4e9be9,https://filebox.ece.vt.edu/~parikh/Publications/Parikh_hum_mac_com_Allerton_2013.pdf
+171389529df11cc5a8b1fbbe659813f8c3be024d,http://pdfs.semanticscholar.org/1713/89529df11cc5a8b1fbbe659813f8c3be024d.pdf
+29f0a868644462aa7ebc21f4510d4209932a1b8c,http://yamdrok.stanford.edu/crowd/icmr.pdf
+6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Afshar_Facial_Expression_Recognition_CVPR_2016_paper.pdf
+fdfaf46910012c7cdf72bba12e802a318b5bef5a,http://pdfs.semanticscholar.org/fdfa/f46910012c7cdf72bba12e802a318b5bef5a.pdf
+1a140d9265df8cf50a3cd69074db7e20dc060d14,http://pdfs.semanticscholar.org/1a14/0d9265df8cf50a3cd69074db7e20dc060d14.pdf
+8c6c0783d90e4591a407a239bf6684960b72f34e,http://pdfs.semanticscholar.org/8c6c/0783d90e4591a407a239bf6684960b72f34e.pdf
+3328674d71a18ed649e828963a0edb54348ee598,http://ai.pku.edu.cn/application/files/1415/1124/8089/A_face_and_palmprint_recognition_approach_based_on_discriminant_DCT_feature_extraction.pdf
+14014a1bdeb5d63563b68b52593e3ac1e3ce7312,http://pdfs.semanticscholar.org/1401/4a1bdeb5d63563b68b52593e3ac1e3ce7312.pdf
+eacba5e8fbafb1302866c0860fc260a2bdfff232,http://pdfs.semanticscholar.org/eacb/a5e8fbafb1302866c0860fc260a2bdfff232.pdf
+49659fb64b1d47fdd569e41a8a6da6aa76612903,http://pdfs.semanticscholar.org/4965/9fb64b1d47fdd569e41a8a6da6aa76612903.pdf
+42e155ea109eae773dadf74d713485be83fca105,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924805.pdf
+38679355d4cfea3a791005f211aa16e76b2eaa8d,http://hub.hku.hk/bitstream/10722/127357/1/Content.pdf
+18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae,http://pdfs.semanticscholar.org/18c6/c92c39c8a5a2bb8b5673f339d3c26b8dcaae.pdf
+b235b4ccd01a204b95f7408bed7a10e080623d2e,http://pdfs.semanticscholar.org/b235/b4ccd01a204b95f7408bed7a10e080623d2e.pdf
+5b0bf1063b694e4b1575bb428edb4f3451d9bf04,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Yang_Facial_Shape_Tracking_ICCV_2015_paper.pdf
+13db9466d2ddf3c30b0fd66db8bfe6289e880802,http://pdfs.semanticscholar.org/13db/9466d2ddf3c30b0fd66db8bfe6289e880802.pdf
+3f623bb0c9c766a5ac612df248f4a59288e4d29f,http://pdfs.semanticscholar.org/3f62/3bb0c9c766a5ac612df248f4a59288e4d29f.pdf
+adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Two_Birds_One_ICCV_2015_paper.pdf
+7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4,http://pdfs.semanticscholar.org/7dda/2eb0054eb1aeda576ed2b27a84ddf09b07d4.pdf
+221252be5d5be3b3e53b3bbbe7a9930d9d8cad69,http://pdfs.semanticscholar.org/2212/52be5d5be3b3e53b3bbbe7a9930d9d8cad69.pdf
+9f65319b8a33c8ec11da2f034731d928bf92e29d,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf
+0241513eeb4320d7848364e9a7ef134a69cbfd55,http://videolectures.net/site/normal_dl/tag=71121/cvpr2010_yang_stis_01.v1.pdf
+6462ef39ca88f538405616239471a8ea17d76259,http://pdfs.semanticscholar.org/6462/ef39ca88f538405616239471a8ea17d76259.pdf
+4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,https://ibug.doc.ic.ac.uk/media/uploads/documents/zafeiriou_the_3d_menpo_iccv_2017_paper.pdf
+1f8e44593eb335c2253d0f22f7f9dc1025af8c0d,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22607/Patras%20Fine-tuning%20regression%202014%20Accepted.pdf?sequence=1
+424259e9e917c037208125ccc1a02f8276afb667,http://arxiv.org/pdf/1604.06433v1.pdf
+63213d080a43660ac59ea12e3c35e6953f6d7ce8,https://arxiv.org/pdf/1704.02895v1.pdf
+7c95449a5712aac7e8c9a66d131f83a038bb7caa,http://pdfs.semanticscholar.org/7c95/449a5712aac7e8c9a66d131f83a038bb7caa.pdf
+cef6cffd7ad15e7fa5632269ef154d32eaf057af,http://pdfs.semanticscholar.org/cef6/cffd7ad15e7fa5632269ef154d32eaf057af.pdf
+d3424761e06a8f5f3c1f042f1f1163a469872129,http://pdfs.semanticscholar.org/d342/4761e06a8f5f3c1f042f1f1163a469872129.pdf
+248db911e3a6a63ecd5ff6b7397a5d48ac15e77a,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Matthews_Enriching_Texture_Analysis_2013_CVPR_paper.pdf
+6502cf30c088c6c7c4b2a05b7777b032c9dde7cd,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Learning%20compact%20binary%20face%20descriptor%20for%20face%20recognition_PAMI2015.pdf
+0a23d374c6cf71a65e845569230420362fe4903a,http://mplab.ucsd.edu/~ksikka/in_the_wild.pdf
+cc91001f9d299ad70deb6453d55b2c0b967f8c0d,http://pdfs.semanticscholar.org/cc91/001f9d299ad70deb6453d55b2c0b967f8c0d.pdf
+c068263bb09968fe69c053906279b16532b778f4,http://www.researchgate.net/profile/Mahdi_Bejani/publication/257435889_Audiovisual_emotion_recognition_using_ANOVA_feature_selection_method_and_multi-classifier_neural_networks/links/0c960529aee6234edd000000.pdf
+09b43b59879d59493df2a93c216746f2cf50f4ac,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_036_ext.pdf
+d46fda4b49bbc219e37ef6191053d4327e66c74b,http://pdfs.semanticscholar.org/d46f/da4b49bbc219e37ef6191053d4327e66c74b.pdf
+267c6e8af71bab68547d17966adfaab3b4711e6b,http://pdfs.semanticscholar.org/3097/60122ce6215876c013b2b0211f1df8239df5.pdf
+75e5ba7621935b57b2be7bf4a10cad66a9c445b9,http://pdfs.semanticscholar.org/75e5/ba7621935b57b2be7bf4a10cad66a9c445b9.pdf
+121fe33daf55758219e53249cf8bcb0eb2b4db4b,http://pdfs.semanticscholar.org/121f/e33daf55758219e53249cf8bcb0eb2b4db4b.pdf
+363ca0a3f908859b1b55c2ff77cc900957653748,http://pdfs.semanticscholar.org/363c/a0a3f908859b1b55c2ff77cc900957653748.pdf
+16671b2dc89367ce4ed2a9c241246a0cec9ec10e,http://www.bsp.brain.riken.jp/publications/2010/PAMI-clustering-He-cichocki.pdf
+8f08b2101d43b1c0829678d6a824f0f045d57da5,http://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf
+c0d1d9a585ef961f1c8e6a1e922822811181615c,http://pdfs.semanticscholar.org/c0d1/d9a585ef961f1c8e6a1e922822811181615c.pdf
+1ef5ce743a44d8a454dbfc2657e1e2e2d025e366,http://pdfs.semanticscholar.org/1ef5/ce743a44d8a454dbfc2657e1e2e2d025e366.pdf
+0ea38a5ba0c8739d1196da5d20efb13406bb6550,https://filebox.ece.vt.edu/~parikh/Publications/ParikhGrauman_ICCV2011_relative.pdf
+63cff99eff0c38b633c8a3a2fec8269869f81850,http://pdfs.semanticscholar.org/63cf/f99eff0c38b633c8a3a2fec8269869f81850.pdf
+3bc776eb1f4e2776f98189e17f0d5a78bb755ef4,http://pdfs.semanticscholar.org/3bc7/76eb1f4e2776f98189e17f0d5a78bb755ef4.pdf
+0181fec8e42d82bfb03dc8b82381bb329de00631,http://users.isy.liu.se/en/cvl/zografos/publications/CVPR2013.pdf
+cda8fd9dd8b485e6854b1733d2294f69666c66f7,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2014/Activity%20Recognition%20in%20Unconstrained%20RGB-D%20Video%20using%203D%20Trajectories.pdf
+14070478b8f0d84e5597c3e67c30af91b5c3a917,http://pdfs.semanticscholar.org/f0a5/f885aa14ac2bbb3cc8e4c7530f2449b2f160.pdf
+dbd5e9691cab2c515b50dda3d0832bea6eef79f2,http://pdfs.semanticscholar.org/dbd5/e9691cab2c515b50dda3d0832bea6eef79f2.pdf
+86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6,http://pdfs.semanticscholar.org/86e1/bdbfd13b9ed137e4c4b8b459a3980eb257f6.pdf
+960ad662c2bb454d69006492cc3f52d1550de55d,http://www.research.att.com/~yifanhu/PUB/gmap_cga.pdf
+35a39c7da14b1d288c0f9201374b307f667d63a3,http://media.au.tsinghua.edu.cn/liuyebin_files/TMM.pdf
+0a85bdff552615643dd74646ac881862a7c7072d,https://fbcdn-dragon-a.akamaihd.net/hphotos-ak-xpa1/t39.2365-6/10000000_1672336992989417_1391274031_n/Beyond_Frontal_Faces_Improving_Person_Recognition_Using_Multiple_Cues.pdf
+961939e96eed6620b1752721ab520745ac5329c6,http://www.cs.umd.edu/~gaurav/research/frgcWorkshop.pdf
+5f676d6eca4c72d1a3f3acf5a4081c29140650fb,http://www.cs.ucr.edu/~mkafai/papers/Paper_fg.pdf
+076d3fc800d882445c11b9af466c3af7d2afc64f,http://slsp.kaist.ac.kr/paperdata/Face_attribute_classification.pdf
+4acd683b5f91589002e6f50885df51f48bc985f4,http://www.albany.edu/faculty/mchang2/files/2015_09_ICIP_Darpa.pdf
+9306f61c7c3bdcdcb257cd437ca59df8e599e326,http://www.umiacs.umd.edu/~pvishalm/Conference_pub/ACPR2011_v2.pdf
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,http://pdfs.semanticscholar.org/eeb6/d084f9906c53ec8da8c34583105ab5ab8284.pdf
+7ebd323ddfe3b6de8368c4682db6d0db7b70df62,http://pdfs.semanticscholar.org/7ebd/323ddfe3b6de8368c4682db6d0db7b70df62.pdf
+688754568623f62032820546ae3b9ca458ed0870,http://pdfs.semanticscholar.org/d6c2/108259edf97fabcbe608766a6baa98ac893d.pdf
+1ec98785ac91808455b753d4bc00441d8572c416,https://www.cl.cam.ac.uk/~tb346/pub/papers/fg2017_curriculum.pdf
+57f8e1f461ab25614f5fe51a83601710142f8e88,http://pdfs.semanticscholar.org/57f8/e1f461ab25614f5fe51a83601710142f8e88.pdf
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,http://pdfs.semanticscholar.org/3b02/aaccc9f063ae696c9d28bb06a8cd84b2abb8.pdf
+24e099e77ae7bae3df2bebdc0ee4e00acca71250,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22467/Yang%20Robust%20Face%20Alignment%20Under%20Occlusion%20via%20Regional%20Predictive%20Power%20Estimation%202015%20Accepted.pdf?sequence=1
+24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd,http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf
+743e582c3e70c6ec07094887ce8dae7248b970ad,http://pdfs.semanticscholar.org/743e/582c3e70c6ec07094887ce8dae7248b970ad.pdf
+5b5962bdb75c72848c1fb4b34c113ff6101b5a87,http://research.microsoft.com/en-us/um/people/leizhang/paper/TMM2011_Xiao.pdf
+0595d18e8d8c9fb7689f636341d8a55cc15b3e6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_102.pdf
+282503fa0285240ef42b5b4c74ae0590fe169211,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf
+2cb5db4df50921d276ad9e7186119a276324e465,http://cbcl.mit.edu/projects/cbcl/publications/ps/Leibo_Liao_Poggio_VISAPP_2014.pdf
+26ec75b8ad066b36f814379a79ad57089c82c079,http://www.seas.upenn.edu/~bensapp/papers/ambig-tech-report-2009.pdf
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,http://pdfs.semanticscholar.org/4846/3a119f67ff2c43b7c38f0a722a32f590dfeb.pdf
+ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9,http://pdfs.semanticscholar.org/ae18/ccb35a1a5d7b22f2a5760f706b1c11bf39a9.pdf
+e8c9dcbf56714db53063b9c367e3e44300141ff6,http://faculty.virginia.edu/humandynamicslab/pubs/BrickHunterCohn-ACII2009.pdf
+35ec9b8811f2d755c7ad377bdc29741b55b09356,http://pdfs.semanticscholar.org/35ec/9b8811f2d755c7ad377bdc29741b55b09356.pdf
+30aa681ab80a830c3890090b0da3f1e786bd66ff,https://arxiv.org/pdf/1708.02337v1.pdf
+0f1cbe4e26d584c82008ccef9fb1e4669b82de1f,http://figment.cse.usf.edu/~sfefilat/data/papers/MoBT9.24.pdf
+4a2d54ea1da851151d43b38652b7ea30cdb6dfb2,http://pdfs.semanticscholar.org/4a2d/54ea1da851151d43b38652b7ea30cdb6dfb2.pdf
+004d5491f673cd76150f43b0a0429214f5bfd823,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp130-wang.pdf
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,http://openaccess.thecvf.com/content_cvpr_2017/papers/Feichtenhofer_Spatiotemporal_Multiplier_Networks_CVPR_2017_paper.pdf
+4f0d5cbcd30fef3978b9691c2e736daed2f841c1,http://www.ics.uci.edu/~dramanan/papers/localdist_journal.pdf
+a378fc39128107815a9a68b0b07cffaa1ed32d1f,http://pdfs.semanticscholar.org/a378/fc39128107815a9a68b0b07cffaa1ed32d1f.pdf
+b3b532e8ea6304446b1623e83b0b9a96968f926c,http://pdfs.semanticscholar.org/b3b5/32e8ea6304446b1623e83b0b9a96968f926c.pdf
+d9810786fccee5f5affaef59bc58d2282718af9b,http://pdfs.semanticscholar.org/d981/0786fccee5f5affaef59bc58d2282718af9b.pdf
+488a61e0a1c3768affdcd3c694706e5bb17ae548,http://pdfs.semanticscholar.org/916b/f08e66c3dd11bec809dd8cbe384e8860bb66.pdf
+6b089627a4ea24bff193611e68390d1a4c3b3644,http://publications.idiap.ch/downloads/reports/2012/Wallace_Idiap-RR-03-2012.pdf
+3b7f6035a113b560760c5e8000540fc46f91fed5,http://www.vision.ee.ethz.ch/~zzhiwu/posters/ICCV13_Poster_ZhiwuHuang_v2.0.pdf
+0f0366070b46972fcb2976775b45681e62a94a26,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Bendale_Reliable_Posterior_Probability_2014_CVPR_paper.pdf
+1be785355ae29e32d85d86285bb8f90ea83171df,http://staff.estem-uc.edu.au/roland/files/2009/05/Sharma_Dhall_Gedeon_Goecke_ACII2013_ModelingStressUsingThermalFacialPatterns_ASpatio-TemporalApproach.pdf
+6bb0425baac448297fbd29a00e9c9b9926ce8870,http://pdfs.semanticscholar.org/6bb0/425baac448297fbd29a00e9c9b9926ce8870.pdf
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,http://pdfs.semanticscholar.org/521c/fbc1949289a7ffc3ff90af7c55adeb43db2a.pdf
+90b7619eabe94731722ae884d0802256462457dc,https://arxiv.org/pdf/1511.09319v1.pdf
+fafe69a00565895c7d57ad09ef44ce9ddd5a6caa,http://pdfs.semanticscholar.org/fafe/69a00565895c7d57ad09ef44ce9ddd5a6caa.pdf
+439ec47725ae4a3660e509d32828599a495559bf,http://pdfs.semanticscholar.org/439e/c47725ae4a3660e509d32828599a495559bf.pdf
+a52c72cd8538c62156aaa4d7e5c54946be53b9bb,http://pdfs.semanticscholar.org/a52c/72cd8538c62156aaa4d7e5c54946be53b9bb.pdf
+41ab4939db641fa4d327071ae9bb0df4a612dc89,http://pdfs.semanticscholar.org/41ab/4939db641fa4d327071ae9bb0df4a612dc89.pdf
+0f112e49240f67a2bd5aaf46f74a924129f03912,http://www.cse.msu.edu/biometrics/Publications/Face/ParkTongJain_AgeInvariantFaceRecognition_PAMI10.pdf
+4ae59d2a28abd76e6d9fb53c9e7ece833dce7733,http://pdfs.semanticscholar.org/4ae5/9d2a28abd76e6d9fb53c9e7ece833dce7733.pdf
+774cbb45968607a027ae4729077734db000a1ec5,http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf
+7384c39a2d084c93566b98bc4d81532b5ad55892,http://pdfs.semanticscholar.org/d0a5/0940a1bf951adaf22bd1fc72ea861b606cdb.pdf
+0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7,http://pdfs.semanticscholar.org/0aa9/872daf2876db8d8e5d6197c1ce0f8efee4b7.pdf
+b9c9c7ef82f31614c4b9226e92ab45de4394c5f6,http://pdfs.semanticscholar.org/b9c9/c7ef82f31614c4b9226e92ab45de4394c5f6.pdf
+0b51197109813d921835cb9c4153b9d1e12a9b34,http://pdfs.semanticscholar.org/0b51/197109813d921835cb9c4153b9d1e12a9b34.pdf
+759a3b3821d9f0e08e0b0a62c8b693230afc3f8d,http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf
+61542874efb0b4c125389793d8131f9f99995671,http://pdfs.semanticscholar.org/6154/2874efb0b4c125389793d8131f9f99995671.pdf
+bb750b4c485bc90a47d4b2f723be4e4b74229f7a,http://pdfs.semanticscholar.org/bb75/0b4c485bc90a47d4b2f723be4e4b74229f7a.pdf
+5865e824e3d8560e07840dd5f75cfe9bf68f9d96,http://pdfs.semanticscholar.org/5865/e824e3d8560e07840dd5f75cfe9bf68f9d96.pdf
+512b4c8f0f3fb23445c0c2dab768bcd848fa8392,http://pdfs.semanticscholar.org/b85d/ac54bfa985137b3b071593b986ac92f32bed.pdf
+973e3d9bc0879210c9fad145a902afca07370b86,http://pdfs.semanticscholar.org/973e/3d9bc0879210c9fad145a902afca07370b86.pdf
+3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd,http://pdfs.semanticscholar.org/3c11/a1f2bd4b9ce70f699fb6ad6398171a8ad3bd.pdf
+25885e9292957feb89dcb4a30e77218ffe7b9868,http://pdfs.semanticscholar.org/2588/5e9292957feb89dcb4a30e77218ffe7b9868.pdf
+32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347,https://arxiv.org/pdf/1410.1606v2.pdf
+30b74e60ec11c0ebc4e640637d56d85872dd17ce,http://pdfs.semanticscholar.org/c810/9382eea8f3fc49b3e6ed13d36eb95a06d0ed.pdf
+1b90507f02967ff143fce993a5abbfba173b1ed0,http://mrl.cs.vsb.cz/publications/fusek_ipta_2014.pdf
+0509c442550571907258f07aad9da9d00b1e468b,https://pdfs.semanticscholar.org/0509/c442550571907258f07aad9da9d00b1e468b.pdf
+2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3,http://pdfs.semanticscholar.org/77c1/56969e3b7fbc86432c5238a95679d25ac579.pdf
+334166a942acb15ccc4517cefde751a381512605,http://pdfs.semanticscholar.org/3341/66a942acb15ccc4517cefde751a381512605.pdf
+3983637022992a329f1d721bed246ae76bc934f7,http://www.cs.umd.edu/~djacobs/pubs_files/SlantCVPRFinal.pdf
+6aa43f673cc42ed2fa351cbc188408b724cb8d50,http://pdfs.semanticscholar.org/6aa4/3f673cc42ed2fa351cbc188408b724cb8d50.pdf
+064b797aa1da2000640e437cacb97256444dee82,http://pdfs.semanticscholar.org/064b/797aa1da2000640e437cacb97256444dee82.pdf
+593234ba1d2e16a887207bf65d6b55bbc7ea2247,http://pdfs.semanticscholar.org/73c4/47ea9f75b0ffbdd35c957aed88fe80b2ac07.pdf
+d961617db4e95382ba869a7603006edc4d66ac3b,http://pdfs.semanticscholar.org/d961/617db4e95382ba869a7603006edc4d66ac3b.pdf
+288d2704205d9ca68660b9f3a8fda17e18329c13,http://arxiv.org/pdf/1601.04153v2.pdf
+f68f20868a6c46c2150ca70f412dc4b53e6a03c2,http://pdfs.semanticscholar.org/f68f/20868a6c46c2150ca70f412dc4b53e6a03c2.pdf
+7a1ce696e260899688cb705f243adf73c679f0d9,http://www.cse.msu.edu/~rossarun/pubs/SwearingenRossLabelPropagation_BIOSIG2016.pdf
+e5eb7fa8c9a812d402facfe8e4672670541ed108,http://pdfs.semanticscholar.org/e5eb/7fa8c9a812d402facfe8e4672670541ed108.pdf
+3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab,http://pdfs.semanticscholar.org/3b40/8a3ca6fb39b0fda4d77e6a9679003b2dc9ab.pdf
+f781e50caa43be13c5ceb13f4ccc2abc7d1507c5,http://pdfs.semanticscholar.org/f781/e50caa43be13c5ceb13f4ccc2abc7d1507c5.pdf
+27169761aeab311a428a9dd964c7e34950a62a6b,http://academicjournals.org/article/article1380818227_Mostayed%20et%20al.pdf
+1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0,http://pdfs.semanticscholar.org/d91a/de2712c65f45ed8b917414829ecb24c3c183.pdf
+b37f57edab685dba5c23de00e4fa032a3a6e8841,http://pdfs.semanticscholar.org/b37f/57edab685dba5c23de00e4fa032a3a6e8841.pdf
+5042b358705e8d8e8b0655d07f751be6a1565482,http://pdfs.semanticscholar.org/5042/b358705e8d8e8b0655d07f751be6a1565482.pdf
+90ad0daa279c3e30b360f9fe9371293d68f4cebf,http://pdfs.semanticscholar.org/90ad/0daa279c3e30b360f9fe9371293d68f4cebf.pdf
+9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0,http://pdfs.semanticscholar.org/9eb8/6327c82b76d77fee3fd72e2d9eff03bbe5e0.pdf
+0ef96d97365899af797628e80f8d1020c4c7e431,http://media.adelaide.edu.au/acvt/Publications/2006/2006-Improving%20the%20Speed%20of%20Kernel%20PCA%20on%20Large%20Scale%20Datasets.pdf
+390f3d7cdf1ce127ecca65afa2e24c563e9db93b,https://arxiv.org/pdf/1408.3967v2.pdf
+d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e,http://pdfs.semanticscholar.org/d850/aff9d10a01ad5f1d8a1b489fbb3998d0d80e.pdf
+474b461cd12c6d1a2fbd67184362631681defa9e,http://toc.proceedings.com/24478webtoc.pdf
+c4934d9f9c41dbc46f4173aad2775432fe02e0e6,http://pdfs.semanticscholar.org/c493/4d9f9c41dbc46f4173aad2775432fe02e0e6.pdf
+d6a9ea9b40a7377c91c705f4c7f206a669a9eea2,http://pdfs.semanticscholar.org/d6a9/ea9b40a7377c91c705f4c7f206a669a9eea2.pdf
+e0765de5cabe7e287582532456d7f4815acd74c1,http://pdfs.semanticscholar.org/e076/5de5cabe7e287582532456d7f4815acd74c1.pdf
+3403cb92192dc6b2943d8dbfa8212cc65880159e,http://pdfs.semanticscholar.org/3403/cb92192dc6b2943d8dbfa8212cc65880159e.pdf
+0c5afb209b647456e99ce42a6d9d177764f9a0dd,http://pdfs.semanticscholar.org/49ee/5e1f1cfa45aa105e4120e6b7fb5b14cc2877.pdf
+6afed8dc29bc568b58778f066dc44146cad5366c,http://pdfs.semanticscholar.org/6afe/d8dc29bc568b58778f066dc44146cad5366c.pdf
+0019925779bff96448f0c75492717e4473f88377,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w3/papers/Reale_Deep_Heterogeneous_Face_CVPR_2017_paper.pdf
+67ba3524e135c1375c74fe53ebb03684754aae56,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001767.pdf
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,http://pdfs.semanticscholar.org/0d1d/9a603b08649264f6e3b6d5a66bf1e1ac39d2.pdf
+4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b,https://opus.lib.uts.edu.au/bitstream/10453/43339/1/APSIPA_ASC_2015_submission_313.pdf
+a7c39a4e9977a85673892b714fc9441c959bf078,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/06-p71.pdf
+0ba99a709cd34654ac296418a4f41a9543928149,https://pdfs.semanticscholar.org/0ba9/9a709cd34654ac296418a4f41a9543928149.pdf
+0b02bfa5f3a238716a83aebceb0e75d22c549975,http://pdfs.semanticscholar.org/0b02/bfa5f3a238716a83aebceb0e75d22c549975.pdf
+182470fd0c18d0c5979dff75d089f1da176ceeeb,https://repositori.upf.edu/bitstream/handle/10230/27207/dominguez_MARMI16_mult.pdf?isAllowed=y&sequence=1
+4159663f0b292fd8cc7411929be9d669bb98b386,http://www.researchgate.net/profile/Pradeep_Khosla/publication/224752362_Cancelable_biometric_filters_for_face_recognition/links/00b4952ade904b0db4000000.pdf
+44dd150b9020b2253107b4a4af3644f0a51718a3,http://www.andrew.cmu.edu/user/kseshadr/TIFS_2012_Paper_Final_Submission.pdf
+a56b0f76919aabe8b768f5fbaeca412276365aa2,http://www.mingzhao.org/Publications/ZM_2006_FG_3DReconstruction.pdf
+324b9369a1457213ec7a5a12fe77c0ee9aef1ad4,http://research.nvidia.com/sites/default/files/pubs/2017-07_Dynamic-Facial-Analysis/rnnface.pdf
+1fa3948af1c338f9ae200038c45adadd2b39a3e4,http://pdfs.semanticscholar.org/7655/4182b4b0f3301afe8cfbc96a9d289b75254f.pdf
+858ddff549ae0a3094c747fb1f26aa72821374ec,https://arxiv.org/pdf/1606.03237v1.pdf
+b1301c722886b6028d11e4c2084ee96466218be4,http://pdfs.semanticscholar.org/b130/1c722886b6028d11e4c2084ee96466218be4.pdf
+a3a34c1b876002e0393038fcf2bcb00821737105,http://pdfs.semanticscholar.org/a3a3/4c1b876002e0393038fcf2bcb00821737105.pdf
+256ef946b4cecd8889df8d799d0c9175ae986af9,https://pdfs.semanticscholar.org/cd73/8347673151b378f447119fe2665f5c8c2215.pdf
+4b5eeea5dd8bd69331bd4bd4c66098b125888dea,http://pdfs.semanticscholar.org/4b5e/eea5dd8bd69331bd4bd4c66098b125888dea.pdf
+2742a61d32053761bcc14bd6c32365bfcdbefe35,http://pdfs.semanticscholar.org/ee39/96dc3f451f480134e1a468c32762d688c51b.pdf
+a9791544baa14520379d47afd02e2e7353df87e5,http://pdfs.semanticscholar.org/a979/1544baa14520379d47afd02e2e7353df87e5.pdf
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,http://pdfs.semanticscholar.org/b191/aa2c5b8ece06c221c3a4a0914e8157a16129.pdf
+6f0900a7fe8a774a1977c5f0a500b2898bcbe149,http://pdfs.semanticscholar.org/6f09/00a7fe8a774a1977c5f0a500b2898bcbe149.pdf
+0754e769eb613fd3968b6e267a301728f52358be,http://www.umiacs.umd.edu/~cteo/public-shared/ICRA2012_ActionObjects_preprint.pdf
+1033ca56c7e88d8b3e80546848826f572c4cd63e,http://alumni.cs.ucsb.edu/~daniel/publications/conferences/fg11/DattaFerisVaqueroFG2011.pdf
+afe9cfba90d4b1dbd7db1cf60faf91f24d12b286,http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf
+2ae139b247057c02cda352f6661f46f7feb38e45,http://www.iro.umontreal.ca/~memisevr/pubs/icmi_emotiw.pdf
+81e11e33fc5785090e2d459da3ac3d3db5e43f65,http://pdfs.semanticscholar.org/81e1/1e33fc5785090e2d459da3ac3d3db5e43f65.pdf
+04c5268d7a4e3819344825e72167332240a69717,http://longwood.cs.ucf.edu/~vision/papers/cvpr2008/7.pdf
+07a472ea4b5a28b93678a2dcf89028b086e481a2,http://pdfs.semanticscholar.org/07a4/72ea4b5a28b93678a2dcf89028b086e481a2.pdf
+06959f9cf3226179fa1b05efade843b7844fb2bc,http://www.researchgate.net/profile/Fei_Wu2/publication/4090506_Relevant_linear_feature_extraction_using_side-information_and_unlabeled_data/links/549062220cf214269f2668c9.pdf
+968f472477a8afbadb5d92ff1b9c7fdc89f0c009,http://pdfs.semanticscholar.org/968f/472477a8afbadb5d92ff1b9c7fdc89f0c009.pdf
+46538b0d841654a0934e4c75ccd659f6c5309b72,http://pdfs.semanticscholar.org/4653/8b0d841654a0934e4c75ccd659f6c5309b72.pdf
+41f8477a6be9cd992a674d84062108c68b7a9520,http://pdfs.semanticscholar.org/41f8/477a6be9cd992a674d84062108c68b7a9520.pdf
+b730908bc1f80b711c031f3ea459e4de09a3d324,http://ibug.doc.ic.ac.uk/media/uploads/documents/tifs_aoms.pdf
+7ae0212d6bf8a067b468f2a78054c64ea6a577ce,http://pdfs.semanticscholar.org/7ae0/212d6bf8a067b468f2a78054c64ea6a577ce.pdf
+30c96cc041bafa4f480b7b1eb5c45999701fe066,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/DiscreteCosineTransformLocality-SensitiveHashes14.pdf
+7d1688ce0b48096e05a66ead80e9270260cb8082,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w44/Saxen_Real_vs._Fake_ICCV_2017_paper.pdf
+7bdcd85efd1e3ce14b7934ff642b76f017419751,http://www.cbsr.ia.ac.cn/users/zlei/papers/Lei-DFD-PAMI-14.pdf
+a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2,http://pdfs.semanticscholar.org/a4cc/626da29ac48f9b4ed6ceb63081f6a4b304a2.pdf
+b07582d1a59a9c6f029d0d8328414c7bef64dca0,http://pdfs.semanticscholar.org/b075/82d1a59a9c6f029d0d8328414c7bef64dca0.pdf
+6a2b83c4ae18651f1a3496e48a35b0cd7a2196df,http://openaccess.thecvf.com/content_iccv_2015/papers/Song_Top_Rank_Supervised_ICCV_2015_paper.pdf
+3cd5b1d71c1d6a50fcc986589f2d0026c68d9803,http://www.openu.ac.il/home/hassner/projects/siftscales/OnSiftsAndTheirScales-CVPR12.pdf
+0c59071ddd33849bd431165bc2d21bbe165a81e0,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf
+505e55d0be8e48b30067fb132f05a91650666c41,http://pdfs.semanticscholar.org/505e/55d0be8e48b30067fb132f05a91650666c41.pdf
+b51b4ef97238940aaa4f43b20a861eaf66f67253,http://pdfs.semanticscholar.org/b51b/4ef97238940aaa4f43b20a861eaf66f67253.pdf
+816eff5e92a6326a8ab50c4c50450a6d02047b5e,http://pdfs.semanticscholar.org/816e/ff5e92a6326a8ab50c4c50450a6d02047b5e.pdf
+43776d1bfa531e66d5e9826ff5529345b792def7,http://cvrr.ucsd.edu/scmartin/presentation/DriveAnalysisByLookingIn-ITSC2015-NDS.pdf
+2e19371a2d797ab9929b99c80d80f01a1fbf9479,http://pdfs.semanticscholar.org/2e19/371a2d797ab9929b99c80d80f01a1fbf9479.pdf
+01c948d2b73abe8be1ac128a6439c1081ebca95a,http://mla.sdu.edu.cn/PeopleInfo/lixuzhou/A%20hybrid%20biometric%20identification%20framework%20for%20high%20security%20applications.pdf
+187d4d9ba8e10245a34f72be96dd9d0fb393b1aa,http://pdfs.semanticscholar.org/187d/4d9ba8e10245a34f72be96dd9d0fb393b1aa.pdf
+537d8c4c53604fd419918ec90d6ef28d045311d0,https://arxiv.org/pdf/1704.08821v1.pdf
+27883967d3dac734c207074eed966e83afccb8c3,http://www.ee.cuhk.edu.hk/~xgwang/papers/gaoGZHW.pdf
+2574860616d7ffa653eb002bbaca53686bc71cdd,http://pdfs.semanticscholar.org/e01d/f3e6faffad3f304f6c40b133ae1dcf326662.pdf
+bd78a853df61d03b7133aea58e45cd27d464c3cf,http://pdfs.semanticscholar.org/bd78/a853df61d03b7133aea58e45cd27d464c3cf.pdf
+d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa,http://pdfs.semanticscholar.org/d912/b8d88d63a2f0cb5d58164e7414bfa6b41dfa.pdf
+3f12701449a82a5e01845001afab3580b92da858,http://pdfs.semanticscholar.org/e4f5/2f5e116f0cc486d033e4b8fc737944343db7.pdf
+5a93f9084e59cb9730a498ff602a8c8703e5d8a5,http://pdfs.semanticscholar.org/5a93/f9084e59cb9730a498ff602a8c8703e5d8a5.pdf
+4d01d78544ae0de3075304ff0efa51a077c903b7,http://pdfs.semanticscholar.org/8f82/71d557ae862866c692e556f610ab45dcc399.pdf
+0273414ba7d56ab9ff894959b9d46e4b2fef7fd0,http://pdfs.semanticscholar.org/3ae9/29d33dd1e6acdf6c907a1115e5a21f6cb076.pdf
+4e93a8a47473bf57e24aec048cb870ab366a43d6,http://pdfs.semanticscholar.org/4e93/a8a47473bf57e24aec048cb870ab366a43d6.pdf
+406431d2286a50205a71f04e0b311ba858fc7b6c,http://pdfs.semanticscholar.org/4064/31d2286a50205a71f04e0b311ba858fc7b6c.pdf
+0cb2dd5f178e3a297a0c33068961018659d0f443,http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf
+c0ff7dc0d575658bf402719c12b676a34271dfcd,http://pdfs.semanticscholar.org/c0ff/7dc0d575658bf402719c12b676a34271dfcd.pdf
+05b8673d810fadf888c62b7e6c7185355ffa4121,https://nannanwang.github.io/My_Papers/IJCV2013.pdf
+965f8bb9a467ce9538dec6bef57438964976d6d9,http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ISBA2016.pdf
+7c349932a3d083466da58ab1674129600b12b81c,http://pdfs.semanticscholar.org/7c34/9932a3d083466da58ab1674129600b12b81c.pdf
+765b2cb322646c52e20417c3b44b81f89860ff71,http://cg.cs.tsinghua.edu.cn/papers/TVCG_2013_poseshop.pdf
+aafb271684a52a0b23debb3a5793eb618940c5dd,http://pdfs.semanticscholar.org/aafb/271684a52a0b23debb3a5793eb618940c5dd.pdf
+3fefc856a47726d19a9f1441168480cee6e9f5bb,http://pdfs.semanticscholar.org/e0e6/bf37d374f9c5cb2461ea87190e234c466d63.pdf
+11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Caseiro_Rolling_Riemannian_Manifolds_2013_CVPR_paper.pdf
+07d95be4922670ef2f8b11997e0c00eb643f3fca,http://eprints.eemcs.utwente.nl/26833/01/Pantic_The_First_Facial_Landmark_Tracking_in-the-Wild_Challenge.pdf
+5145e42dc46845f3aeb8307452765ba8dc59d2da,http://pdcat13.csie.ntust.edu.tw/download/papers/P10003.pdf
+591a737c158be7b131121d87d9d81b471c400dba,http://affect.media.mit.edu/pdfs/10.McDuff-etal-Affect-2010.pdf
+fcbec158e6a4ace3d4311b26195482b8388f0ee9,http://pdfs.semanticscholar.org/fcbe/c158e6a4ace3d4311b26195482b8388f0ee9.pdf
+b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807,http://pdfs.semanticscholar.org/f269/c3573b39d26a5ad0754edb67a46ef57816c7.pdf
+706236308e1c8d8b8ba7749869c6b9c25fa9f957,http://affect.media.mit.edu/pdfs/11.McDuff-etal-Crowdsourced-2011.pdf
+3ada7640b1c525056e6fcd37eea26cd638815cd6,http://pdfs.semanticscholar.org/3ada/7640b1c525056e6fcd37eea26cd638815cd6.pdf
+b40290a694075868e0daef77303f2c4ca1c43269,http://pdfs.semanticscholar.org/b402/90a694075868e0daef77303f2c4ca1c43269.pdf
+9f499948121abb47b31ca904030243e924585d5f,http://pdfs.semanticscholar.org/9f49/9948121abb47b31ca904030243e924585d5f.pdf
+d142e74c6a7457e77237cf2a3ded4e20f8894e1a,http://pdfs.semanticscholar.org/d142/e74c6a7457e77237cf2a3ded4e20f8894e1a.pdf
+131130f105661a47e0ffb85c2fe21595785f948a,http://pdfs.semanticscholar.org/1311/30f105661a47e0ffb85c2fe21595785f948a.pdf
+b51e3d59d1bcbc023f39cec233f38510819a2cf9,http://pdfs.semanticscholar.org/b51e/3d59d1bcbc023f39cec233f38510819a2cf9.pdf
+2f95340b01cfa48b867f336185e89acfedfa4d92,https://www2.informatik.uni-hamburg.de/wtm/ps/Hamester_IJCNN2015.pdf
+8d42a24d570ad8f1e869a665da855628fcb1378f,http://pdfs.semanticscholar.org/8d42/a24d570ad8f1e869a665da855628fcb1378f.pdf
+efd308393b573e5410455960fe551160e1525f49,http://pdfs.semanticscholar.org/efd3/08393b573e5410455960fe551160e1525f49.pdf
+45e616093a92e5f1e61a7c6037d5f637aa8964af,http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf
+0fba39bf12486c7684fd3d51322e3f0577d3e4e8,http://vision.ucsd.edu/~pdollar/files/papers/BabenkoICCV07boom.pdf
+02a98118ce990942432c0147ff3c0de756b4b76a,http://eprints.pascal-network.org/archive/00005029/01/LaptevMarszalekSchmidRozenfeld-CVPR08-HumanActions.pdf
+457cf73263d80a1a1338dc750ce9a50313745d1d,http://pdfs.semanticscholar.org/457c/f73263d80a1a1338dc750ce9a50313745d1d.pdf
+eee8a37a12506ff5df72c402ccc3d59216321346,http://pdfs.semanticscholar.org/eee8/a37a12506ff5df72c402ccc3d59216321346.pdf
+eb526174fa071345ff7b1fad1fad240cd943a6d7,http://pdfs.semanticscholar.org/eb52/6174fa071345ff7b1fad1fad240cd943a6d7.pdf
+aebb9649bc38e878baef082b518fa68f5cda23a5,http://pdfs.semanticscholar.org/aebb/9649bc38e878baef082b518fa68f5cda23a5.pdf
+7c9622ad1d8971cd74cc9e838753911fe27ccac4,http://pdfs.semanticscholar.org/7c96/22ad1d8971cd74cc9e838753911fe27ccac4.pdf
+9a4c45e5c6e4f616771a7325629d167a38508691,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf
+4793f11fbca4a7dba898b9fff68f70d868e2497c,http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf
+15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4,https://arxiv.org/pdf/1707.07196v1.pdf
+90c4f15f1203a3a8a5bf307f8641ba54172ead30,http://pdfs.semanticscholar.org/90c4/f15f1203a3a8a5bf307f8641ba54172ead30.pdf
+79cdc8c786c535366cafeced1f3bdeb18ff04e66,http://www.researchgate.net/profile/Ziga_Spiclin/publication/221795259_Groupwise_registration_of_multimodal_images_by_an_efficient_joint_entropy_minimization_scheme/links/0deec520dd49e7bc24000000.pdf
+3a0ea368d7606030a94eb5527a12e6789f727994,http://pdfs.semanticscholar.org/c7ca/eb8ecb6a38bdd65ddd25aca4fdd79203ddef.pdf
+f8ddb2cac276812c25021b5b79bf720e97063b1e,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_HCI2006.pdf
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,http://pdfs.semanticscholar.org/a1ee/0176a9c71863d812fe012b5c6b9c15f9aa8a.pdf
+86904aee566716d9bef508aa9f0255dc18be3960,http://pdfs.semanticscholar.org/8690/4aee566716d9bef508aa9f0255dc18be3960.pdf
+326613b5528b7806d6a06f43211800b54f34965e,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/377.pdf
+16e95a907b016951da7c9327927bb039534151da,http://pdfs.semanticscholar.org/16e9/5a907b016951da7c9327927bb039534151da.pdf
+54756f824befa3f0c2af404db0122f5b5bbf16e0,http://pdfs.semanticscholar.org/5475/6f824befa3f0c2af404db0122f5b5bbf16e0.pdf
+2ebc35d196cd975e1ccbc8e98694f20d7f52faf3,http://pdfs.semanticscholar.org/2ebc/35d196cd975e1ccbc8e98694f20d7f52faf3.pdf
+0acf23485ded5cb9cd249d1e4972119239227ddb,http://pdfs.semanticscholar.org/507e/2bad4851f04a686ae6e964e15bbef28583e9.pdf
+32925200665a1bbb4fc8131cd192cb34c2d7d9e3,http://pdfs.semanticscholar.org/3292/5200665a1bbb4fc8131cd192cb34c2d7d9e3.pdf
+4bc9a767d7e63c5b94614ebdc24a8775603b15c9,http://pdfs.semanticscholar.org/4bc9/a767d7e63c5b94614ebdc24a8775603b15c9.pdf
+b8378ab83bc165bc0e3692f2ce593dcc713df34a,http://cmp.felk.cvut.cz/ftp/articles/cech/Cech-ICPR-2014.pdf
+3b3482e735698819a6a28dcac84912ec01a9eb8a,http://vislab.ee.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2003/Individual%20Recognition%20Using%20Gait%20Energy%20Image03.pdf
+1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee,https://arxiv.org/pdf/1611.00142v2.pdf
+dc5cde7e4554db012d39fc41ac8580f4f6774045,http://pdfs.semanticscholar.org/dc5c/de7e4554db012d39fc41ac8580f4f6774045.pdf
+bd9eb65d9f0df3379ef96e5491533326e9dde315,http://pdfs.semanticscholar.org/bd9e/b65d9f0df3379ef96e5491533326e9dde315.pdf
+44eb4d128b60485377e74ffb5facc0bf4ddeb022,https://pdfs.semanticscholar.org/44eb/4d128b60485377e74ffb5facc0bf4ddeb022.pdf
+3d9db1cacf9c3bb7af57b8112787b59f45927355,http://pdfs.semanticscholar.org/3d9d/b1cacf9c3bb7af57b8112787b59f45927355.pdf
+0ed0e48b245f2d459baa3d2779bfc18fee04145b,http://pdfs.semanticscholar.org/0ed0/e48b245f2d459baa3d2779bfc18fee04145b.pdf
+795aa8064b34c4bf4acdd8be3f1e5d06da5a7756,http://pdfs.semanticscholar.org/795a/a8064b34c4bf4acdd8be3f1e5d06da5a7756.pdf
+016a8ed8f6ba49bc669dbd44de4ff31a79963078,http://www.jdl.ac.cn/user/sgshan/pub/icassp04_qing.pdf
+2965d092ed72822432c547830fa557794ae7e27b,http://pdfs.semanticscholar.org/f038/9424ab8c27e01843931fcbef7e3ca997e891.pdf
+d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12,http://pdfs.semanticscholar.org/d537/5f51eeb0c6eff71d6c6ad73e11e9353c1f12.pdf
+27c66b87e0fbb39f68ddb783d11b5b7e807c76e8,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Rodriguez_Fast_Simplex-HMM_for_CVPR_2017_paper.pdf
+f02f0f6fcd56a9b1407045de6634df15c60a85cd,http://pdfs.semanticscholar.org/f02f/0f6fcd56a9b1407045de6634df15c60a85cd.pdf
+ae0765ebdffffd6e6cc33c7705df33b7e8478627,http://pdfs.semanticscholar.org/ae07/65ebdffffd6e6cc33c7705df33b7e8478627.pdf
+050eda213ce29da7212db4e85f948b812a215660,http://pdfs.semanticscholar.org/b598/4a1044d72224f99e959746a452fc1927a257.pdf
+c27f64eaf48e88758f650e38fa4e043c16580d26,http://pdfs.semanticscholar.org/c27f/64eaf48e88758f650e38fa4e043c16580d26.pdf
+d22785eae6b7503cb16402514fd5bd9571511654,http://pdfs.semanticscholar.org/d227/85eae6b7503cb16402514fd5bd9571511654.pdf
+7ed6ff077422f156932fde320e6b3bd66f8ffbcb,http://pdfs.semanticscholar.org/7ed6/ff077422f156932fde320e6b3bd66f8ffbcb.pdf
+caaa6e8e83abb97c78ff9b813b849d5ab56b5050,http://digital.cs.usu.edu/~xqi/Promotion/JSPL.FaceRecognition.14.pdf
+518edcd112991a1717856841c1a03dd94a250090,http://pdfs.semanticscholar.org/518e/dcd112991a1717856841c1a03dd94a250090.pdf
+1176c886afbd8685ecf0094450a02eb96b950f71,http://pdfs.semanticscholar.org/1176/c886afbd8685ecf0094450a02eb96b950f71.pdf
+8de2dbe2b03be8a99628ffa000ac78f8b66a1028,http://pdfs.semanticscholar.org/8de2/dbe2b03be8a99628ffa000ac78f8b66a1028.pdf
+374c7a2898180723f3f3980cbcb31c8e8eb5d7af,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotsia07a.pdf
+dda35768681f74dafd02a667dac2e6101926a279,http://www.cim.mcgill.ca/~clark/vmrl/web-content/papers/jjclark_icip_2014.pdf
+620e1dbf88069408b008347cd563e16aeeebeb83,http://pdfs.semanticscholar.org/620e/1dbf88069408b008347cd563e16aeeebeb83.pdf
+c74b1643a108939c6ba42ae4de55cb05b2191be5,http://pdfs.semanticscholar.org/c74b/1643a108939c6ba42ae4de55cb05b2191be5.pdf
+e76798bddd0f12ae03de26b7c7743c008d505215,http://pdfs.semanticscholar.org/e767/98bddd0f12ae03de26b7c7743c008d505215.pdf
+2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a,http://pdfs.semanticscholar.org/464c/21d54339c3f6e624ce026fef53b19c1edd86.pdf
+14a5feadd4209d21fa308e7a942967ea7c13b7b6,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001025.pdf
+078d507703fc0ac4bf8ca758be101e75ea286c80,http://pdfs.semanticscholar.org/078d/507703fc0ac4bf8ca758be101e75ea286c80.pdf
+fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6,http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf
+6daccf3d15c617873954bb75de26f6b6b0a42772,http://arts.buaa.edu.cn/papers/Learning%20Templates%20for%20Artistic%20Portrait%20Lighting%20Analysis.pdf
+bab88235a30e179a6804f506004468aa8c28ce4f,http://pdfs.semanticscholar.org/bab8/8235a30e179a6804f506004468aa8c28ce4f.pdf
+17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735,http://pdfs.semanticscholar.org/17d5/e5c9a9ee4cf85dfbb9d9322968a6329c3735.pdf
+daf05febbe8406a480306683e46eb5676843c424,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Feng_Robust_Subspace_Segmentation_2014_CVPR_paper.pdf
+aecb15e3e9191eb135bdba2426967bfac3f068db,http://www.cvip.uofl.edu/wwwcvip/research/publications/Pub_Pdf/2010/3D%20Face%20Rcovery%20From%20Intensities_2010.pdf
+17738b0972571e7b4ae471d1b2dccea5ce057511,http://dayongwang.info/pdf/2011-MM.pdf
+b7cf7bb574b2369f4d7ebc3866b461634147041a,http://www.patternrecognition.cn/~zhongjin/2012/2012_yinjun_NCA.pdf
+2e475f1d496456831599ce86d8bbbdada8ee57ed,http://www.l3s.de/~siersdorfer/sources/2015/www2015groupsourcing.pdf
+1450296fb936d666f2f11454cc8f0108e2306741,http://pdfs.semanticscholar.org/1450/296fb936d666f2f11454cc8f0108e2306741.pdf
+19c0069f075b5b2d8ac48ad28a7409179bd08b86,http://people.csail.mit.edu/torralba/publications/iccv2013_khosla.pdf
+fbf196d83a41d57dfe577b3a54b1b7fa06666e3b,http://pdfs.semanticscholar.org/fbf1/96d83a41d57dfe577b3a54b1b7fa06666e3b.pdf
+265e76285e18587065a1e28246971f003c5267f3,http://cortex.informatik.tu-ilmenau.de/~wilhelm/wilhelm-soave-2004a.pdf
+bf1e0279a13903e1d43f8562aaf41444afca4fdc,http://pdfs.semanticscholar.org/bf1e/0279a13903e1d43f8562aaf41444afca4fdc.pdf
+0df0d1adea39a5bef318b74faa37de7f3e00b452,https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf
+744d23991a2c48d146781405e299e9b3cc14b731,http://www.cise.ufl.edu/~dihong/assets/LPS2016.pdf
+685f8df14776457c1c324b0619c39b3872df617b,http://pdfs.semanticscholar.org/685f/8df14776457c1c324b0619c39b3872df617b.pdf
+1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b,http://www.l3s.de/~siersdorfer/sources/2014/mtgame-2014.pdf
+cc38942825d3a2c9ee8583c153d2c56c607e61a7,http://pdfs.semanticscholar.org/cc38/942825d3a2c9ee8583c153d2c56c607e61a7.pdf
+a608c5f8fd42af6e9bd332ab516c8c2af7063c61,http://mcl.usc.edu/wp-content/uploads/2016/01/Liu-TIFS-2015-10.pdf
+06aab105d55c88bd2baa058dc51fa54580746424,http://www4.comp.polyu.edu.hk/~cslzhang/paper/ISCRC_TIFS.pdf
+9e4b052844d154c3431120ec27e78813b637b4fc,http://pdfs.semanticscholar.org/9e4b/052844d154c3431120ec27e78813b637b4fc.pdf
+3802da31c6d33d71b839e260f4022ec4fbd88e2d,http://pdfs.semanticscholar.org/3802/da31c6d33d71b839e260f4022ec4fbd88e2d.pdf
+4836b084a583d2e794eb6a94982ea30d7990f663,http://pdfs.semanticscholar.org/4836/b084a583d2e794eb6a94982ea30d7990f663.pdf
+09733129161ca7d65cf56a7ad63c17f493386027,http://pdfs.semanticscholar.org/0973/3129161ca7d65cf56a7ad63c17f493386027.pdf
+46551095a2cc4976d6be0165c31c37b0c5638719,http://staff.estem-uc.edu.au/roland/wp-content/uploads/file/roland/publications/Journal/JMUI/joshi_goecke_alghowinem_dhall_wagner_epps_parker_breakspear_JMUI2013_MultimodalAssistiveTechnologiesForDepressionDiagnosisAndMonitoring.pdf
+67c703a864aab47eba80b94d1935e6d244e00bcb,http://pdfs.semanticscholar.org/67c7/03a864aab47eba80b94d1935e6d244e00bcb.pdf
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,https://arxiv.org/pdf/1801.08329v1.pdf
+0d88ab0250748410a1bc990b67ab2efb370ade5d,http://signal.ee.bilkent.edu.tr/defevent/abstract/a1795.pdf
+bd8b7599acf53e3053aa27cfd522764e28474e57,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf
+4b60e45b6803e2e155f25a2270a28be9f8bec130,http://www.cs.washington.edu/ai/Mobile_Robotics/postscripts/attribute-objects-icra-2013.pdf
+3cfbe1f100619a932ba7e2f068cd4c41505c9f58,http://pdfs.semanticscholar.org/3cfb/e1f100619a932ba7e2f068cd4c41505c9f58.pdf
+bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4,http://pdfs.semanticscholar.org/bd57/2e9cbec095bcf5700cb7cd73d1cdc2fe02f4.pdf
+23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3,http://pdfs.semanticscholar.org/23ba/9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3.pdf
+177d1e7bbea4318d379f46d8d17720ecef3086ac,http://pdfs.semanticscholar.org/177d/1e7bbea4318d379f46d8d17720ecef3086ac.pdf
+b017963d83b3edf71e1673d7ffdec13a6d350a87,http://pdfs.semanticscholar.org/b017/963d83b3edf71e1673d7ffdec13a6d350a87.pdf
+10f17534dba06af1ddab96c4188a9c98a020a459,http://www.cs.umass.edu/~mccallum/papers/peoplelda-iccv07.pdf
+274f87ad659cd90382ef38f7c6fafc4fc7f0d74d,http://www.deepkernel.com/Papers/mm2014.pdf
+6eddea1d991e81c1c3024a6cea422bc59b10a1dc,http://pdfs.semanticscholar.org/6edd/ea1d991e81c1c3024a6cea422bc59b10a1dc.pdf
+5f871838710a6b408cf647aacb3b198983719c31,http://www.jdl.ac.cn/user/xlchen/Paper/TIP07b.pdf
+0c167008408c301935bade9536084a527527ec74,http://www.micc.unifi.it/publications/2006/BDN06/bertini_nunziati-mm06.pdf
+0b20f75dbb0823766d8c7b04030670ef7147ccdd,http://pdfs.semanticscholar.org/0b20/f75dbb0823766d8c7b04030670ef7147ccdd.pdf
+03c1fc9c3339813ed81ad0de540132f9f695a0f8,http://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf
+ea482bf1e2b5b44c520fc77eab288caf8b3f367a,http://pdfs.semanticscholar.org/ea48/2bf1e2b5b44c520fc77eab288caf8b3f367a.pdf
+ffcbedb92e76fbab083bb2c57d846a2a96b5ae30,http://pdfs.semanticscholar.org/ffcb/edb92e76fbab083bb2c57d846a2a96b5ae30.pdf
+9e9052256442f4e254663ea55c87303c85310df9,http://pdfs.semanticscholar.org/9e90/52256442f4e254663ea55c87303c85310df9.pdf
+db227f72bb13a5acca549fab0dc76bce1fb3b948,http://pdfs.semanticscholar.org/e83d/6fd4502d6d31134ffddb80b6d5c752cf3123.pdf
+59c9d416f7b3d33141cc94567925a447d0662d80,http://pdfs.semanticscholar.org/59c9/d416f7b3d33141cc94567925a447d0662d80.pdf
+44aeda8493ad0d44ca1304756cc0126a2720f07b,http://pdfs.semanticscholar.org/afbb/c0ea429ba0f5cf7790d23fc40d7d5342a53c.pdf
+50ce3f8744c219871fbdcab1342d49d589f2626b,http://www.public.asu.edu/~jye02/Publications/Papers/AML_cvpr07.pdf
+7c45339253841b6f0efb28c75f2c898c79dfd038,http://vis-www.cs.umass.edu/papers/iccv07alignment.pdf
+d5afd7b76f1391321a1340a19ba63eec9e0f9833,http://pdfs.semanticscholar.org/d5af/d7b76f1391321a1340a19ba63eec9e0f9833.pdf
+a538b05ebb01a40323997629e171c91aa28b8e2f,http://pdfs.semanticscholar.org/a538/b05ebb01a40323997629e171c91aa28b8e2f.pdf
+217de4ff802d4904d3f90d2e24a29371307942fe,http://www.cs.columbia.edu/~tberg/papers/poof-cvpr13.pdf
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf
+6ef1996563835b4dfb7fda1d14abe01c8bd24a05,http://hera.inf-cv.uni-jena.de:6680/pdf/Goering14:NPT
+050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371,http://www.springer.com/cda/content/document/cda_downloaddocument/9783319587707-t1.pdf?SGWID=0-0-45-1607395-p180855259
+40f127fa4459a69a9a21884ee93d286e99b54c5f,http://graphics.tu-bs.de/media/publications/stengel2013resolution.pdf
+19c0c7835dba1a319b59359adaa738f0410263e8,http://www.svcl.ucsd.edu/publications/journal/2009/pami09-fs.pdf
+2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1,http://pdfs.semanticscholar.org/f39c/e446b7c76d24cc63df7837cb3be0ee235df2.pdf
+00d94b35ffd6cabfb70b9a1d220b6823ae9154ee,https://arxiv.org/pdf/1503.07989v1.pdf
+d3b73e06d19da6b457924269bb208878160059da,http://pdfs.semanticscholar.org/d3b7/3e06d19da6b457924269bb208878160059da.pdf
+dedabf9afe2ae4a1ace1279150e5f1d495e565da,http://www.citi.sinica.edu.tw/papers/ycwang/4156-F.pdf
+66330846a03dcc10f36b6db9adf3b4d32e7a3127,http://pdfs.semanticscholar.org/6633/0846a03dcc10f36b6db9adf3b4d32e7a3127.pdf
+8411fe1142935a86b819f065cd1f879f16e77401,http://pdfs.semanticscholar.org/8411/fe1142935a86b819f065cd1f879f16e77401.pdf
+b64cfb39840969b1c769e336a05a30e7f9efcd61,http://pdfs.semanticscholar.org/fde2/b8943eb429d35e649c56ce95658b44c49243.pdf
+bc704680b5032eadf78c4e49f548ba14040965bf,http://pdfs.semanticscholar.org/ccbc/c676546a43cd4b714f0c85cbd493f9c61396.pdf
+58823377757e7dc92f3b70a973be697651089756,http://pdfs.semanticscholar.org/fa88/52e5b7849adf8e96a103ca67e4ca60bdf244.pdf
+88f7a3d6f0521803ca59fde45601e94c3a34a403,http://pdfs.semanticscholar.org/88f7/a3d6f0521803ca59fde45601e94c3a34a403.pdf
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,http://pdfs.semanticscholar.org/a8c8/a96b78e7b8e0d4a4a422fcb083e53ad06531.pdf
+e50ee29ca12028cb903cd498bb9cacd41bd5ce3a,http://pdfs.semanticscholar.org/e50e/e29ca12028cb903cd498bb9cacd41bd5ce3a.pdf
+0081e2188c8f34fcea3e23c49fb3e17883b33551,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,http://pdfs.semanticscholar.org/210b/98394c3be96e7fd75d3eb11a391da1b3a6ca.pdf
+2ad7cef781f98fd66101fa4a78e012369d064830,http://arxiv.org/pdf/1603.05474v1.pdf
+a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd,http://pdfs.semanticscholar.org/a9eb/6e436cfcbded5a9f4b82f6b914c7f390adbd.pdf
+9b000ccc04a2605f6aab867097ebf7001a52b459,http://pdfs.semanticscholar.org/9b00/0ccc04a2605f6aab867097ebf7001a52b459.pdf
+a50b4d404576695be7cd4194a064f0602806f3c4,http://pdfs.semanticscholar.org/a50b/4d404576695be7cd4194a064f0602806f3c4.pdf
+19746957aa0d800d550da246a025ad44409cdb03,http://pdfs.semanticscholar.org/1974/6957aa0d800d550da246a025ad44409cdb03.pdf
+5f57a1a3a1e5364792b35e8f5f259f92ad561c1f,http://pdfs.semanticscholar.org/5f57/a1a3a1e5364792b35e8f5f259f92ad561c1f.pdf
+4919663c62174a9bc0cc7f60da8f96974b397ad2,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/EBIF_5-2-2010_v_5.pdf
+07f31bef7a7035792e3791473b3c58d03928abbf,http://videolectures.net/site/normal_dl/tag=977248/fgconference2015_phillips_biometric_samples_01.pdf
+15f70a0ad8903017250927595ae2096d8b263090,http://pdfs.semanticscholar.org/15f7/0a0ad8903017250927595ae2096d8b263090.pdf
+63c022198cf9f084fe4a94aa6b240687f21d8b41,http://pdfs.semanticscholar.org/63c0/22198cf9f084fe4a94aa6b240687f21d8b41.pdf
+11a47a91471f40af5cf00449954474fd6e9f7694,http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf
+23fd653b094c7e4591a95506416a72aeb50a32b5,http://pdfs.semanticscholar.org/8a92/17f540845a7d11d24f2d76c0b752ca439457.pdf
+5789f8420d8f15e7772580ec373112f864627c4b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Schneider_Efficient_Global_Illumination_ICCV_2017_paper.pdf
+4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41,http://pdfs.semanticscholar.org/4dd6/d511a8bbc4d9965d22d79ae6714ba48c8e41.pdf
+677ebde61ba3936b805357e27fce06c44513a455,http://pdfs.semanticscholar.org/677e/bde61ba3936b805357e27fce06c44513a455.pdf
+b2e5df82c55295912194ec73f0dca346f7c113f6,http://pdfs.semanticscholar.org/b2e5/df82c55295912194ec73f0dca346f7c113f6.pdf
+f47404424270f6a20ba1ba8c2211adfba032f405,http://pdfs.semanticscholar.org/f474/04424270f6a20ba1ba8c2211adfba032f405.pdf
+0e3840ea3227851aaf4633133dd3cbf9bbe89e5b,http://pdfs.semanticscholar.org/8d59/98cd984e7cce307da7d46f155f9db99c6590.pdf
+6b18628cc8829c3bf851ea3ee3bcff8543391819,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20151221082702_2.pdf
+1130c38e88108cf68b92ecc61a9fc5aeee8557c9,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_058.pdf
+48910f9b6ccc40226cd4f105ed5291571271b39e,http://pdfs.semanticscholar.org/4891/0f9b6ccc40226cd4f105ed5291571271b39e.pdf
+36fc4120fc0638b97c23f97b53e2184107c52233,http://pdfs.semanticscholar.org/36fc/4120fc0638b97c23f97b53e2184107c52233.pdf
+04616814f1aabe3799f8ab67101fbaf9fd115ae4,http://pdfs.semanticscholar.org/0461/6814f1aabe3799f8ab67101fbaf9fd115ae4.pdf
+0aeb5020003e0c89219031b51bd30ff1bceea363,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTarxiv15.pdf
+447d8893a4bdc29fa1214e53499ffe67b28a6db5,http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf
+574705812f7c0e776ad5006ae5e61d9b071eebdb,http://pdfs.semanticscholar.org/5747/05812f7c0e776ad5006ae5e61d9b071eebdb.pdf
+3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,http://nms.csail.mit.edu/papers/sen060-chenA.pdf
+c758b9c82b603904ba8806e6193c5fefa57e9613,http://pdfs.semanticscholar.org/c758/b9c82b603904ba8806e6193c5fefa57e9613.pdf
+10b06d05b8b3a2c925b951a6d1d5919f536ffed4,http://gamesstudio.org/chek/wp-content/uploads/2014/01/interactivity_befaced.pdf
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf
+68c17aa1ecbff0787709be74d1d98d9efd78f410,http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf
+e6f20e7431172c68f7fce0d4595100445a06c117,http://pdfs.semanticscholar.org/e6f2/0e7431172c68f7fce0d4595100445a06c117.pdf
+732e4016225280b485c557a119ec50cffb8fee98,http://pdfs.semanticscholar.org/732e/4016225280b485c557a119ec50cffb8fee98.pdf
+18206e1b988389eaab86ef8c852662accf3c3663,http://pdfs.semanticscholar.org/d13e/5b4249cfe9672672eb573d15e7dc0a235e04.pdf
+16572c545384174f8136d761d2b0866e968120a8,http://pdfs.semanticscholar.org/1657/2c545384174f8136d761d2b0866e968120a8.pdf
+f4210309f29d4bbfea9642ecadfb6cf9581ccec7,http://pdfs.semanticscholar.org/f421/0309f29d4bbfea9642ecadfb6cf9581ccec7.pdf
+8efda5708bbcf658d4f567e3866e3549fe045bbb,http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf
+a956ff50ca958a3619b476d16525c6c3d17ca264,http://ce.sharif.edu/~amiryanj/downloads/novel_bidirectional_nn_for_face_recognition.pdf
+46ae4d593d89b72e1a479a91806c39095cd96615,http://www.idiap.ch/~odobez/publications/GayKhouryMeignierOdobezDeleglise-FaceNaming-ICIP-2014.pdf
+72a55554b816b66a865a1ec1b4a5b17b5d3ba784,http://vislab.ucr.edu/Biometrics16/CVPRW_Vizilter.pdf
+d915e634aec40d7ee00cbea96d735d3e69602f1a,http://pdfs.semanticscholar.org/d915/e634aec40d7ee00cbea96d735d3e69602f1a.pdf
+a2bcfba155c990f64ffb44c0a1bb53f994b68a15,http://ibug.doc.ic.ac.uk/media/uploads/documents/cvprw_photoface.pdf
+c9e955cb9709f16faeb0c840f4dae92eb875450a,http://pdfs.semanticscholar.org/c9e9/55cb9709f16faeb0c840f4dae92eb875450a.pdf
+a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a,http://cvrr.ucsd.edu/publications/2014/TawariMartinTrivedi_IEEETITS2014.pdf
+6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1,http://disi.unitn.it/~sebe/publications/MIR03.pdf
+19868a469dc25ee0db00947e06c804b88ea94fd0,http://pdfs.semanticscholar.org/1986/8a469dc25ee0db00947e06c804b88ea94fd0.pdf
+46f32991ebb6235509a6d297928947a8c483f29e,http://pdfs.semanticscholar.org/46f3/2991ebb6235509a6d297928947a8c483f29e.pdf
+c5fe40875358a286594b77fa23285fcfb7bda68e,http://pdfs.semanticscholar.org/edd1/cfb1caff16f80d807ff0821883ae855950c5.pdf
+22264e60f1dfbc7d0b52549d1de560993dd96e46,http://arxiv.org/pdf/1608.01471v1.pdf
+8f92cccacf2c84f5d69db3597a7c2670d93be781,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2006/papers/1568982203.pdf
+0b8c92463f8f5087696681fb62dad003c308ebe2,https://www.iiitd.edu.in/~richa/papers/BTAS10-Sketch.pdf
+d861c658db2fd03558f44c265c328b53e492383a,http://www.cs.washington.edu/research/VACE/Multimedia/Jia_EMBC2014_final.pdf
+fac5a9a18157962cff38df6d4ae69f8a7da1cfa8,http://www.cs.sunysb.edu/~vislab/papers/01580481.pdf
+8149c30a86e1a7db4b11965fe209fe0b75446a8c,http://www.cfar.umd.edu/~kale/ICVGIP2012.pdf
+100da509d4fa74afc6e86a49352751d365fceee5,http://vision.ucsd.edu/sites/default/files/iccv2011_20q_parts_final.pdf
+e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5,http://pdfs.semanticscholar.org/e43e/a078749d1f9b8254e0c3df4c51ba2f4eebd5.pdf
+e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd,http://pdfs.semanticscholar.org/e475/deadd1e284428b5e6efd8fe0e6a5b83b9dcd.pdf
+daa52dd09b61ee94945655f0dde216cce0ebd505,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yonetani_Recognizing_Micro-Actions_and_CVPR_2016_paper.pdf
+4d625677469be99e0a765a750f88cfb85c522cce,http://pdfs.semanticscholar.org/cccc/378e98218bbedfd93da956e4a07b9971b928.pdf
+1565721ebdbd2518224f54388ed4f6b21ebd26f3,http://cmp.felk.cvut.cz/ftp/articles/franc/Cevilkalp-FaceDetector-FG2013.pdf
+3b470b76045745c0ef5321e0f1e0e6a4b1821339,http://pdfs.semanticscholar.org/8e72/fa02f2d90ba31f31e0a7aa96a6d3e10a66fc.pdf
+476f177b026830f7b31e94bdb23b7a415578f9a4,http://vision.ece.ucsb.edu/sites/vision.ece.ucsb.edu/files/publications/karthikeyan_icip2012_subspace_final.pdf
+1da83903c8d476c64c14d6851c85060411830129,http://pdfs.semanticscholar.org/90c3/b003b85bd60ae06630bcef6abc03c3b1ef96.pdf
+0de91641f37b0a81a892e4c914b46d05d33fd36e,https://ibug.doc.ic.ac.uk/media/uploads/documents/raps.pdf
+a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2,http://pdfs.semanticscholar.org/a4a5/ad6f1cc489427ac1021da7d7b70fa9a770f2.pdf
+9d896605fbf93315b68d4ee03be0770077f84e40,http://pdfs.semanticscholar.org/9d89/6605fbf93315b68d4ee03be0770077f84e40.pdf
+8f992ed6686710164005c20ab16cef6c6ad8d0ea,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Half-quadratic%20based%20Iterative%20Minimization%20for%20Robust%20Sparse%20Representation.pdf
+a660390654498dff2470667b64ea656668c98ecc,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf
+01d2cf5398c2b3e0f4fc8e8318a4492c95a0b242,http://webee.technion.ac.il/~lihi/Publications/10-ANS-PAMI.pdf
+8320dbdd3e4712cca813451cd94a909527652d63,http://pdfs.semanticscholar.org/d921/1df11080fa5eb0dc1d62fb683b10c055673a.pdf
+51348e24d2199b06273e7b65ae5f3fc764a2efc7,http://pdfs.semanticscholar.org/c4b4/cbc801a4430be5fdd16ae34c68f53f772582.pdf
+2f0b8579829b3d4efdbc03c96821e33d7cc65e1d,http://thoth.inrialpes.fr/people/mpederso/papers/cvpr14-facial.pdf
+1e058b3af90d475bf53b3f977bab6f4d9269e6e8,http://pdfs.semanticscholar.org/30b9/7c36bcb99e857cd78fc55e2600d7851dc117.pdf
+1d58d83ee4f57351b6f3624ac7e727c944c0eb8d,http://parnec.nuaa.edu.cn/xtan/paper/amfg07_talk.pdf
+528069963f0bd0861f380f53270c96c269a3ea1c,http://pdfs.semanticscholar.org/5280/69963f0bd0861f380f53270c96c269a3ea1c.pdf
+4c8581246ed4d90c942a23ed7c0e007221fa684d,http://welcome.isr.ist.utl.pt/img/pdfs/3439_14-ICIPb.pdf
+108b2581e07c6b7ca235717c749d45a1fa15bb24,http://www.cs.umd.edu/~djacobs/pubs_files/TPAMI_Proofs.pdf
+a9fc23d612e848250d5b675e064dba98f05ad0d9,http://pdfs.semanticscholar.org/a9fc/23d612e848250d5b675e064dba98f05ad0d9.pdf
+23fdbef123bcda0f07d940c72f3b15704fd49a98,http://pdfs.semanticscholar.org/23fd/bef123bcda0f07d940c72f3b15704fd49a98.pdf
+b2a0e5873c1a8f9a53a199eecae4bdf505816ecb,http://pdfs.semanticscholar.org/b2a0/e5873c1a8f9a53a199eecae4bdf505816ecb.pdf
+e793f8644c94b81b7a0f89395937a7f8ad428a89,http://pdfs.semanticscholar.org/e793/f8644c94b81b7a0f89395937a7f8ad428a89.pdf
+21ef129c063bad970b309a24a6a18cbcdfb3aff5,http://pdfs.semanticscholar.org/21ef/129c063bad970b309a24a6a18cbcdfb3aff5.pdf
+6ab33fa51467595f18a7a22f1d356323876f8262,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf
+0fdcfb4197136ced766d538b9f505729a15f0daf,https://arxiv.org/pdf/0907.5321v2.pdf
+fec6648b4154fc7e0892c74f98898f0b51036dfe,http://pdfs.semanticscholar.org/fec6/648b4154fc7e0892c74f98898f0b51036dfe.pdf
+666300af8ffb8c903223f32f1fcc5c4674e2430b,http://pdfs.semanticscholar.org/6663/00af8ffb8c903223f32f1fcc5c4674e2430b.pdf
+8de06a584955f04f399c10f09f2eed77722f6b1c,http://pdfs.semanticscholar.org/8de0/6a584955f04f399c10f09f2eed77722f6b1c.pdf
+f4f9697f2519f1fe725ee7e3788119ed217dca34,http://pdfs.semanticscholar.org/f4f9/697f2519f1fe725ee7e3788119ed217dca34.pdf
+5bdd9f807eec399bb42972a33b83afc8b607c05c,http://www.umiacs.umd.edu/~pvishalm/Journal_pub/SPM_DA_v9.pdf
+549c719c4429812dff4d02753d2db11dd490b2ae,http://openaccess.thecvf.com/content_cvpr_2017/papers/Real_YouTube-BoundingBoxes_A_Large_CVPR_2017_paper.pdf
+1606b1475e125bba1b2d87bcf1e33b06f42c5f0d,http://users.eecs.northwestern.edu/~xsh835/CVPR2015_CasCNN.pdf
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,http://pdfs.semanticscholar.org/badc/fb7d4e2ef0d3e332a19a3f93d59b4f85668e.pdf
+324f39fb5673ec2296d90142cf9a909e595d82cf,http://pdfs.semanticscholar.org/324f/39fb5673ec2296d90142cf9a909e595d82cf.pdf
+2f5e057e35a97278a9d824545d7196c301072ebf,http://vision.ics.uci.edu/papers/ZhuAR_CVPR_2014/ZhuAR_CVPR_2014.pdf
+4a3d96b2a53114da4be3880f652a6eef3f3cc035,https://www.micc.unifi.it/wp-content/uploads/2018/01/07932891.pdf
+a57b92ed2d8aa5b41fe513c3e98cbf83b7141741,http://pdfs.semanticscholar.org/a57b/92ed2d8aa5b41fe513c3e98cbf83b7141741.pdf
+b5cd9e5d81d14868f1a86ca4f3fab079f63a366d,https://ivi.fnwi.uva.nl/isis/publications/2016/AgharwalWCACV2016/AgharwalWCACV2016.pdf
+5f344a4ef7edfd87c5c4bc531833774c3ed23542,http://pdfs.semanticscholar.org/5f34/4a4ef7edfd87c5c4bc531833774c3ed23542.pdf
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,http://pdfs.semanticscholar.org/3a2f/aa145c5fe63ab906568a29fa4100220e03d9.pdf
+65b737e5cc4a565011a895c460ed8fd07b333600,http://pdfs.semanticscholar.org/7574/f999d2325803f88c4915ba8f304cccc232d1.pdf
+94f74c6314ffd02db581e8e887b5fd81ce288dbf,http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf
+2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3,http://pdfs.semanticscholar.org/f9f4/9f8347db35e721672955c3e24f60574553c0.pdf
+611961abc4dfc02b67edd8124abb08c449f5280a,http://pdfs.semanticscholar.org/6119/61abc4dfc02b67edd8124abb08c449f5280a.pdf
+b87db5ac17312db60e26394f9e3e1a51647cca66,http://pdfs.semanticscholar.org/b87d/b5ac17312db60e26394f9e3e1a51647cca66.pdf
+2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475,http://pdfs.semanticscholar.org/2fa1/fc116731b2b5bb97f06d2ac494cb2b2fe475.pdf
+be8c517406528edc47c4ec0222e2a603950c2762,http://pdfs.semanticscholar.org/be8c/517406528edc47c4ec0222e2a603950c2762.pdf
+4abd49538d04ea5c7e6d31701b57ea17bc349412,http://resources.mpi-inf.mpg.de/publications/D2/2015/rohrbach15ijcv.pdf
+6f288a12033fa895fb0e9ec3219f3115904f24de,https://arxiv.org/pdf/1511.05204v1.pdf
+a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9,http://pdfs.semanticscholar.org/a7a6/eb53bee5e2224f2ecd56a14e3a5a717e55b9.pdf
+b5fc4f9ad751c3784eaf740880a1db14843a85ba,http://pdfs.semanticscholar.org/b5fc/4f9ad751c3784eaf740880a1db14843a85ba.pdf
+cc96eab1e55e771e417b758119ce5d7ef1722b43,http://pdfs.semanticscholar.org/cc96/eab1e55e771e417b758119ce5d7ef1722b43.pdf
+992ebd81eb448d1eef846bfc416fc929beb7d28b,http://pdfs.semanticscholar.org/992e/bd81eb448d1eef846bfc416fc929beb7d28b.pdf
+768c332650a44dee02f3d1d2be1debfa90a3946c,http://mmlab.ie.cuhk.edu.hk/archive/2004/CVPR04_Face3.pdf
+55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96,http://pdfs.semanticscholar.org/55b9/b1c1c5487f5f62b44340104a9c4cc2ed7c96.pdf
+dfa80e52b0489bc2585339ad3351626dee1a8395,http://pdfs.semanticscholar.org/dfa8/0e52b0489bc2585339ad3351626dee1a8395.pdf
+a8affc2819f7a722a41bb913dea9149ee0e23a1f,http://robotics.szpku.edu.cn/c/publication/paper/ICIP2014-gaoyuan1.pdf
+41971dfbf404abeb8cf73fea29dc37b9aae12439,http://pdfs.semanticscholar.org/4197/1dfbf404abeb8cf73fea29dc37b9aae12439.pdf
+28b26597a7237f9ea6a9255cde4e17ee18122904,http://pdfs.semanticscholar.org/28b2/6597a7237f9ea6a9255cde4e17ee18122904.pdf
+5c4ce36063dd3496a5926afd301e562899ff53ea,http://pdfs.semanticscholar.org/5c4c/e36063dd3496a5926afd301e562899ff53ea.pdf
+007250c2dce81dd839a55f9108677b4f13f2640a,http://pdfs.semanticscholar.org/0db7/735e7adbe6e34dd058af31e278033040ab18.pdf
+65126e0b1161fc8212643b8ff39c1d71d262fbc1,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ghiasi_Occlusion_Coherence_Localizing_2014_CVPR_paper.pdf
+466a5add15bb5f91e0cfd29a55f5fb159a7980e5,http://pdfs.semanticscholar.org/466a/5add15bb5f91e0cfd29a55f5fb159a7980e5.pdf
+2dd2c7602d7f4a0b78494ac23ee1e28ff489be88,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_cvpr_2012.pdf
+fe108803ee97badfa2a4abb80f27fa86afd9aad9,http://pdfs.semanticscholar.org/fe10/8803ee97badfa2a4abb80f27fa86afd9aad9.pdf
+7c36afc9828379de97f226e131390af719dbc18d,http://www.cs.cornell.edu/~chenxiawu/papers/ufna.pdf
+3c47022955c3274250630b042b53d3de2df8eeda,http://research.microsoft.com/en-us/um/people/leizhang/paper/cvpr05-shuicheng-discriminant.pdf
+1921e0a97904bdf61e17a165ab159443414308ed,http://pdfs.semanticscholar.org/1921/e0a97904bdf61e17a165ab159443414308ed.pdf
+cb84229e005645e8623a866d3d7956c197f85e11,http://pdfs.semanticscholar.org/cb84/229e005645e8623a866d3d7956c197f85e11.pdf
+969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,http://pdfs.semanticscholar.org/969f/d48e1a668ab5d3c6a80a3d2aeab77067c6ce.pdf
+6c27eccf8c4b22510395baf9f0d0acc3ee547862,http://pdfs.semanticscholar.org/6c27/eccf8c4b22510395baf9f0d0acc3ee547862.pdf
+12150d8b51a2158e574e006d4fbdd3f3d01edc93,https://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ss16/DeepEnd2EndV2V.pdf
+0ce3a786aed896d128f5efdf78733cc675970854,http://pdfs.semanticscholar.org/3689/2b6bb4848a9c21158b8eded7f14a6654dd7e.pdf
+8cb403c733a5f23aefa6f583a17cf9b972e35c90,http://pdfs.semanticscholar.org/e4ca/1fa70823c4350888607df470248be0ed4c56.pdf
+14318d2b5f2cf731134a6964d8193ad761d86942,http://pdfs.semanticscholar.org/1431/8d2b5f2cf731134a6964d8193ad761d86942.pdf
+438b88fe40a6f9b5dcf08e64e27b2719940995e0,http://www.csd.uwo.ca/~olga/Courses/Fall2006/StudentPapers/ferenczMillerMalikICCV05.pdf
+51c7c5dfda47647aef2797ac3103cf0e108fdfb4,http://pdfs.semanticscholar.org/51c7/c5dfda47647aef2797ac3103cf0e108fdfb4.pdf
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,http://pdfs.semanticscholar.org/9a35/35cabf5d0f662bff1d897fb5b777a412d82e.pdf
+c5c379a807e02cab2e57de45699ababe8d13fb6d,http://pdfs.semanticscholar.org/c5c3/79a807e02cab2e57de45699ababe8d13fb6d.pdf
+15e0b9ba3389a7394c6a1d267b6e06f8758ab82b,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0035-2?site=ipsjcva.springeropen.com
+d8b568392970b68794a55c090c4dd2d7f90909d2,http://pdfs.semanticscholar.org/d8b5/68392970b68794a55c090c4dd2d7f90909d2.pdf
+a4876b7493d8110d4be720942a0f98c2d116d2a0,http://pdfs.semanticscholar.org/a487/6b7493d8110d4be720942a0f98c2d116d2a0.pdf
+5f1dcaff475ef18a2ecec0e114a9849a0a8002b9,http://pdfs.semanticscholar.org/5f1d/caff475ef18a2ecec0e114a9849a0a8002b9.pdf
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf
+887745c282edf9af40d38425d5fdc9b3fe139c08,https://arxiv.org/pdf/1407.2987v1.pdf
+4622b82a8aff4ac1e87b01d2708a333380b5913b,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/Zhu-ICB-15.pdf
+e4c81c56966a763e021938be392718686ba9135e,http://pdfs.semanticscholar.org/e4c8/1c56966a763e021938be392718686ba9135e.pdf
+250ebcd1a8da31f0071d07954eea4426bb80644c,http://pdfs.semanticscholar.org/2e26/8598d9c2fd9757ba43f7967e57b8a2a871f4.pdf
+e82360682c4da11f136f3fccb73a31d7fd195694,http://pdfs.semanticscholar.org/e823/60682c4da11f136f3fccb73a31d7fd195694.pdf
+62a30f1b149843860938de6dd6d1874954de24b7,http://mmlab.ie.cuhk.edu.hk/archive/2009/09_fast_algorithm.pdf
+b52886610eda6265a2c1aaf04ce209c047432b6d,http://infolab.stanford.edu/~wangz/project/imsearch/Aesthetics/TAC16/xu.pdf
+98af221afd64a23e82c40fd28d25210c352e41b7,http://pdfs.semanticscholar.org/d2fb/a31b394ea016b57f45bead77534fd8f7fbfa.pdf
+660b73b0f39d4e644bf13a1745d6ee74424d4a16,http://pdfs.semanticscholar.org/660b/73b0f39d4e644bf13a1745d6ee74424d4a16.pdf
+7f9260c00a86a0d53df14469f1fa10e318ee2a3c,http://www.cse.msu.edu/~stockman/Book/projects.html/F06Docs/Papers/daugemanIrisICIP02.pdf
+b85580ff2d8d8be0a2c40863f04269df4cd766d9,http://pdfs.semanticscholar.org/b855/80ff2d8d8be0a2c40863f04269df4cd766d9.pdf
+1048c753e9488daa2441c50577fe5fdba5aa5d7c,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/473.pdf
+f69de2b6770f0a8de6d3ec1a65cb7996b3c99317,http://pdfs.semanticscholar.org/f69d/e2b6770f0a8de6d3ec1a65cb7996b3c99317.pdf
+322c063e97cd26f75191ae908f09a41c534eba90,https://jurie.users.greyc.fr/papers/12_SEMATR_IJCV.pdf
+1de690714f143a8eb0d6be35d98390257a3f4a47,http://www.cs.fsu.edu/~liux/research/publications/papers/waring-liu-face-detection-smcb-2005.pdf
+3ec05713a1eed6fa9b57fef718f369f68bbbe09f,http://pdfs.semanticscholar.org/3ec0/5713a1eed6fa9b57fef718f369f68bbbe09f.pdf
+dd600e7d6e4443ebe87ab864d62e2f4316431293,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553774.pdf
+2c811b647a6aac924920c06e607e9e8d4b8d872d,http://pdfs.semanticscholar.org/2c81/1b647a6aac924920c06e607e9e8d4b8d872d.pdf
+90dd2a53236b058c79763459b9d8a7ba5e58c4f1,http://pdfs.semanticscholar.org/90dd/2a53236b058c79763459b9d8a7ba5e58c4f1.pdf
+442d3aeca486de787de10bc41bfeb0b42c81803f,http://pdfs.semanticscholar.org/442d/3aeca486de787de10bc41bfeb0b42c81803f.pdf
+d78fbd11f12cbc194e8ede761d292dc2c02d38a2,http://pdfs.semanticscholar.org/d78f/bd11f12cbc194e8ede761d292dc2c02d38a2.pdf
+3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171,http://research.microsoft.com/en-us/um/people/luyuan/paper/skyfinder_siggraph09.pdf
+6afeb764ee97fbdedfa8f66810dfc22feae3fa1f,http://pdfs.semanticscholar.org/928c/dc2049462f66460dc30aef5aaaa15e427d12.pdf
+f8015e31d1421f6aee5e17fc3907070b8e0a5e59,http://pdfs.semanticscholar.org/f801/5e31d1421f6aee5e17fc3907070b8e0a5e59.pdf
+c53352a4239568cc915ad968aff51c49924a3072,http://pdfs.semanticscholar.org/c533/52a4239568cc915ad968aff51c49924a3072.pdf
+1f2d12531a1421bafafe71b3ad53cb080917b1a7,http://pdfs.semanticscholar.org/1f2d/12531a1421bafafe71b3ad53cb080917b1a7.pdf
+a702fc36f0644a958c08de169b763b9927c175eb,http://www.apsipa.org/proceedings_2013/papers/170_PID2935307.pdf
+abb396490ba8b112f10fbb20a0a8ce69737cd492,http://pdfs.semanticscholar.org/abb3/96490ba8b112f10fbb20a0a8ce69737cd492.pdf
+51a8dabe4dae157aeffa5e1790702d31368b9161,http://pdfs.semanticscholar.org/5621/adae20c1bc781a36c43a9ddbe5475ea4b6e8.pdf
+9b93406f3678cf0f16451140ea18be04784faeee,http://pdfs.semanticscholar.org/9b93/406f3678cf0f16451140ea18be04784faeee.pdf
+63c109946ffd401ee1195ed28f2fb87c2159e63d,http://pdfs.semanticscholar.org/63c1/09946ffd401ee1195ed28f2fb87c2159e63d.pdf
+24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39,http://openaccess.thecvf.com/content_ICCV_2017/papers/Benitez-Quiroz_Recognition_of_Action_ICCV_2017_paper.pdf
+466f80b066215e85da63e6f30e276f1a9d7c843b,http://cbl.uh.edu/pub_files/07961802.pdf
+195d331c958f2da3431f37a344559f9bce09c0f7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_066_ext.pdf
+54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3,http://www.cs.toronto.edu/~vnair/iccv11.pdf
+1eba6fc35a027134aa8997413647b49685f6fbd1,https://ubicomp-mental-health.github.io/papers/voss-glass.pdf
+5b693cb3bedaa2f1e84161a4261df9b3f8e77353,http://pdfs.semanticscholar.org/5b69/3cb3bedaa2f1e84161a4261df9b3f8e77353.pdf
+e8410c4cd1689829c15bd1f34995eb3bd4321069,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553731.pdf
+17e563af203d469c456bb975f3f88a741e43fb71,https://cvhci.anthropomatik.kit.edu/~mhaurile/papers/WACV2016.pdf
+91d513af1f667f64c9afc55ea1f45b0be7ba08d4,http://pdfs.semanticscholar.org/91d5/13af1f667f64c9afc55ea1f45b0be7ba08d4.pdf
+aac39ca161dfc52aade063901f02f56d01a1693c,http://pdfs.semanticscholar.org/aac3/9ca161dfc52aade063901f02f56d01a1693c.pdf
+920a92900fbff22fdaaef4b128ca3ca8e8d54c3e,http://pdfs.semanticscholar.org/920a/92900fbff22fdaaef4b128ca3ca8e8d54c3e.pdf
+03e88bf3c5ddd44ebf0e580d4bd63072566613ad,http://pdfs.semanticscholar.org/03e8/8bf3c5ddd44ebf0e580d4bd63072566613ad.pdf
+541f1436c8ffef1118a0121088584ddbfd3a0a8a,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/A%20Spatio-Temporal%20Feature%20based%20on%20Triangulation%20of%20Dense%20SURF.pdf
+778bff335ae1b77fd7ec67404f71a1446624331b,http://pdfs.semanticscholar.org/778b/ff335ae1b77fd7ec67404f71a1446624331b.pdf
+9b164cef4b4ad93e89f7c1aada81ae7af802f3a4,http://pdfs.semanticscholar.org/9b16/4cef4b4ad93e89f7c1aada81ae7af802f3a4.pdf
+1ae642a8d756c6aa7bc049c5c89d5072d8749637,http://www.cs.umd.edu/~behjat/papers/ICMR14_poster.pdf
+d0e895a272d684a91c1b1b1af29747f92919d823,http://pdfs.semanticscholar.org/d0e8/95a272d684a91c1b1b1af29747f92919d823.pdf
+ad5a1621190d18dd429930ab5125c849ce7e4506,http://www.cs.csub.edu/~acruz/papers/10.1109-ICIP.2014.7025275.pdf
+f4aed1314b2d38fd8f1b9d2bc154295bbd45f523,http://pdfs.semanticscholar.org/f4ae/d1314b2d38fd8f1b9d2bc154295bbd45f523.pdf
+0a325d70cc381b136a8f4e471b406cda6d27668c,http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf
+a481e394f58f2d6e998aa320dad35c0d0e15d43c,http://www.cs.colostate.edu/~draper/papers/wigness_wacv14.pdf
+7698ba9fd1f49157ca2666a93311afbf1ff4e66c,http://www.ics.uci.edu/~dramanan/papers/dpm_acm.pdf
+32d555faaaa0a6f6f9dfc9263e4dba75a38c3193,http://pdfs.semanticscholar.org/e119/eeee5025235c6f8dacc7c1812c0c52d595b9.pdf
+68bf34e383092eb827dd6a61e9b362fcba36a83a,http://pdfs.semanticscholar.org/68bf/34e383092eb827dd6a61e9b362fcba36a83a.pdf
+7735f63e5790006cb3d989c8c19910e40200abfc,http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf
+209324c152fa8fab9f3553ccb62b693b5b10fb4d,http://pdfs.semanticscholar.org/2093/24c152fa8fab9f3553ccb62b693b5b10fb4d.pdf
+90cb074a19c5e7d92a1c0d328a1ade1295f4f311,http://pdfs.semanticscholar.org/90cb/074a19c5e7d92a1c0d328a1ade1295f4f311.pdf
+02dd0af998c3473d85bdd1f77254ebd71e6158c6,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_PPP_Joint_Pointwise_CVPR_2016_paper.pdf
+465d5bb11912005f0a4f0569c6524981df18a7de,http://pdfs.semanticscholar.org/465d/5bb11912005f0a4f0569c6524981df18a7de.pdf
+64cf1cda80a23ed6fc1c8e66065614ef7bdeadf3,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/PAMI_LIV.pdf
+19994e667d908bc0aacfb663ab0a2bb5ad16b221,http://pdfs.semanticscholar.org/65b1/70e5ec86f5fc500fd5cbd7bfe7b2ec4ef045.pdf
+19a9f658ea14701502d169dc086651b1d9b2a8ea,http://www.cbsr.ia.ac.cn/users/zlei/papers/JJYan-FG2013.pdf
+d8f0bda19a345fac81a1d560d7db73f2b4868836,http://pdfs.semanticscholar.org/d8f0/bda19a345fac81a1d560d7db73f2b4868836.pdf
+655d9ba828eeff47c600240e0327c3102b9aba7c,http://cs.gmu.edu/~carlotta/publications/kpools.pdf
+9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c,http://pdfs.semanticscholar.org/9626/bcb3fc7c7df2c5a423ae8d0a046b2f69180c.pdf
+88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320,http://pdfs.semanticscholar.org/88c6/d4b73bd36e7b5a72f3c61536c8c93f8d2320.pdf
+2836d68c86f29bb87537ea6066d508fde838ad71,http://arxiv.org/pdf/1510.06503v1.pdf
+63a6c256ec2cf2e0e0c9a43a085f5bc94af84265,http://www.cs.tau.ac.il/~wolf/papers/complexity-multiverse-networks.pdf
+6f957df9a7d3fc4eeba53086d3d154fc61ae88df,http://pdfs.semanticscholar.org/6f95/7df9a7d3fc4eeba53086d3d154fc61ae88df.pdf
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,http://pdfs.semanticscholar.org/38b9/57e2b5ec0ea852d22d1481ef924fbf7f72e2.pdf
+4b74f2d56cd0dda6f459319fec29559291c61bff,http://pdfs.semanticscholar.org/96d1/e2686725f69b38b510a75b716caf3a48b3e2.pdf
+25b2811118ed73c64682544fe78023bb8242c709,http://www.researchgate.net/profile/Xueyin_Lin/publication/4193803_Kernel-based_multifactor_analysis_for_image_synthesis_and_recognition/links/00b7d51a9fd4fb9962000000.pdf
+3df8cc0384814c3fb05c44e494ced947a7d43f36,http://openaccess.thecvf.com/content_ICCV_2017/papers/Walker_The_Pose_Knows_ICCV_2017_paper.pdf
+8812aef6bdac056b00525f0642702ecf8d57790b,http://pdfs.semanticscholar.org/8812/aef6bdac056b00525f0642702ecf8d57790b.pdf
+231a6d2ee1cc76f7e0c5912a530912f766e0b459,http://pdfs.semanticscholar.org/231a/6d2ee1cc76f7e0c5912a530912f766e0b459.pdf
+666939690c564641b864eed0d60a410b31e49f80,http://pdfs.semanticscholar.org/6669/39690c564641b864eed0d60a410b31e49f80.pdf
+ecca2a2b84ea01ea425b8d2d9f376f15a295a7f5,http://smie2.sysu.edu.cn/~wcd/Papers/2013_TPAMI_Wang_MEAP.pdf
+1fbde67e87890e5d45864e66edb86136fbdbe20e,http://www.openu.ac.il/home/hassner/data/ASLAN/Papers/ASLAN_TPAMI12.pdf
+ad6745dd793073f81abd1f3246ba4102046da022,http://pdfs.semanticscholar.org/ad67/45dd793073f81abd1f3246ba4102046da022.pdf
+7ffc5c58e5b61ac7c45d8e6ed076248051ebea34,http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/238/1/SMCB_C_34_5_04.pdf
+2912c3ea67678a1052d7d5cbe734a6ad90fc360e,http://pdfs.semanticscholar.org/2912/c3ea67678a1052d7d5cbe734a6ad90fc360e.pdf
+a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sikka_Exemplar_Hidden_Markov_2015_CVPR_paper.pdf
+3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9,https://arxiv.org/pdf/1611.06638.pdf
+6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0000937.pdf
+1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf
+49820ae612b3c0590a8a78a725f4f378cb605cd1,http://pdfs.semanticscholar.org/4982/0ae612b3c0590a8a78a725f4f378cb605cd1.pdf
+22ad2c8c0f4d6aa4328b38d894b814ec22579761,http://nichol.as/papers/Gallagher/Clothing%20Cosegmentation%20for%20Recognizing%20People.pdf
+ce54e891e956d5b502a834ad131616786897dc91,http://pdfs.semanticscholar.org/ce54/e891e956d5b502a834ad131616786897dc91.pdf
+11dc744736a30a189f88fa81be589be0b865c9fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Liang_A_Unified_Multiplicative_ICCV_2015_paper.pdf
+1667a77db764e03a87a3fd167d88b060ef47bb56,http://pdfs.semanticscholar.org/1667/a77db764e03a87a3fd167d88b060ef47bb56.pdf
+32df63d395b5462a8a4a3c3574ae7916b0cd4d1d,http://www.ppgia.pucpr.br/~alekoe/Papers/ALEKOE-FacialExpression-ICASSP2011.pdf
+d9c4586269a142faee309973e2ce8cde27bda718,http://pdfs.semanticscholar.org/d9c4/586269a142faee309973e2ce8cde27bda718.pdf
+aa94f214bb3e14842e4056fdef834a51aecef39c,http://pdfs.semanticscholar.org/aa94/f214bb3e14842e4056fdef834a51aecef39c.pdf
+04470861408d14cc860f24e73d93b3bb476492d0,http://pdfs.semanticscholar.org/0447/0861408d14cc860f24e73d93b3bb476492d0.pdf
+0ad90118b4c91637ee165f53d557da7141c3fde0,http://pdfs.semanticscholar.org/0ad9/0118b4c91637ee165f53d557da7141c3fde0.pdf
+c466ad258d6262c8ce7796681f564fec9c2b143d,http://pdfs.semanticscholar.org/c466/ad258d6262c8ce7796681f564fec9c2b143d.pdf
+982fed5c11e76dfef766ad9ff081bfa25e62415a,https://pdfs.semanticscholar.org/c7fa/d91ba4e33f64d584c928b1200327815f09e6.pdf
+9513503867b29b10223f17c86e47034371b6eb4f,http://pdfs.semanticscholar.org/9513/503867b29b10223f17c86e47034371b6eb4f.pdf
+b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57,http://pdfs.semanticscholar.org/b506/aa23949b6d1f0c868ad03aaaeb5e5f7f6b57.pdf
+5003754070f3a87ab94a2abb077c899fcaf936a6,http://pdfs.semanticscholar.org/5003/754070f3a87ab94a2abb077c899fcaf936a6.pdf
+17370f848801871deeed22af152489e39b6e1454,http://mml.citi.sinica.edu.tw/papers/ICME_2015_Wei.pdf
+1576ed0f3926c6ce65e0ca770475bca6adcfdbb4,http://openaccess.thecvf.com/content_cvpr_workshops_2015/W09/papers/Bagheri_Keep_it_Accurate_2015_CVPR_paper.pdf
+59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1,http://pdfs.semanticscholar.org/59cd/afed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1.pdf
+6ce23cf4f440021b7b05aa3c1c2700cc7560b557,http://pdfs.semanticscholar.org/6ce2/3cf4f440021b7b05aa3c1c2700cc7560b557.pdf
+5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Facial_Expression_Intensity_CVPR_2016_paper.pdf
+0728f788107122d76dfafa4fb0c45c20dcf523ca,http://arxiv.org/pdf/1505.04427v1.pdf
+0cbe059c181278a373292a6af1667c54911e7925,http://pdfs.semanticscholar.org/ea4e/15a4cf256599d11291040ad5e487f55ae514.pdf
+7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b,http://pdfs.semanticscholar.org/7d98/dcd15e28bcc57c9c59b7401fa4a5fdaa632b.pdf
+dc77287bb1fcf64358767dc5b5a8a79ed9abaa53,http://pdfs.semanticscholar.org/dc77/287bb1fcf64358767dc5b5a8a79ed9abaa53.pdf
+5173a20304ea7baa6bfe97944a5c7a69ea72530f,http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf
+1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR12_FaceAlignRegression.pdf
+9d60ad72bde7b62be3be0c30c09b7d03f9710c5f,http://pdfs.semanticscholar.org/9d60/ad72bde7b62be3be0c30c09b7d03f9710c5f.pdf
+89f4bcbfeb29966ab969682eae235066a89fc151,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/short-fgr-2004.pdf
+78fede85d6595e7a0939095821121f8bfae05da6,http://pdfs.semanticscholar.org/78fe/de85d6595e7a0939095821121f8bfae05da6.pdf
+d5f751d31a9d2d754d0d136d5b02c24b28fb94a0,http://www.researchgate.net/profile/Marie-Francine_Moens/publication/220634584_Naming_People_in_News_Videos_with_Label_Propagation/links/0a85e52ecd01912489000000.pdf
+cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a,http://pdfs.semanticscholar.org/cba4/5a87fc6cf12b3b0b6f57ba1a5282ef7fee7a.pdf
+c49aed65fcf9ded15c44f9cbb4b161f851c6fa88,http://pdfs.semanticscholar.org/c49a/ed65fcf9ded15c44f9cbb4b161f851c6fa88.pdf
+08ee541925e4f7f376538bc289503dd80399536f,http://pdfs.semanticscholar.org/08ee/541925e4f7f376538bc289503dd80399536f.pdf
+23c3eb6ad8e5f18f672f187a6e9e9b0d94042970,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_095_ext.pdf
+bb451dc2420e1a090c4796c19716f93a9ef867c9,http://pdfs.semanticscholar.org/bb45/1dc2420e1a090c4796c19716f93a9ef867c9.pdf
+e3e2c106ccbd668fb9fca851498c662add257036,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf
+8aed6ec62cfccb4dba0c19ee000e6334ec585d70,http://pdfs.semanticscholar.org/8aed/6ec62cfccb4dba0c19ee000e6334ec585d70.pdf
+08f1e9e14775757298afd9039f46ec56e80677f9,http://pdfs.semanticscholar.org/08f1/e9e14775757298afd9039f46ec56e80677f9.pdf
+faeefc5da67421ecd71d400f1505cfacb990119c,http://pdfs.semanticscholar.org/faee/fc5da67421ecd71d400f1505cfacb990119c.pdf
+23086a13b83d1b408b98346cf44f3e11920b404d,http://pdfs.semanticscholar.org/2308/6a13b83d1b408b98346cf44f3e11920b404d.pdf
+2d98a1cb0d1a37c79a7ebcb727066f9ccc781703,https://arxiv.org/pdf/1706.07525v1.pdf
+c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6,http://pdfs.semanticscholar.org/c6ff/a09c4a6cacbbd3c41c8ae7a728b0de6e10b6.pdf
+580e48d3e7fe1ae0ceed2137976139852b1755df,http://pdfs.semanticscholar.org/580e/48d3e7fe1ae0ceed2137976139852b1755df.pdf
+9282239846d79a29392aa71fc24880651826af72,http://pdfs.semanticscholar.org/9282/239846d79a29392aa71fc24880651826af72.pdf
+34b42bcf84d79e30e26413f1589a9cf4b37076f9,http://pdfs.semanticscholar.org/34b4/2bcf84d79e30e26413f1589a9cf4b37076f9.pdf
+10ca2e03ff995023a701e6d8d128455c6e8db030,http://pdfs.semanticscholar.org/a941/e5f8778cbac75e21172985a0575b51ea819b.pdf
+7fd6bb30ad5d7eb3078efbb85f94d2d60e701115,http://pdfs.semanticscholar.org/7fd6/bb30ad5d7eb3078efbb85f94d2d60e701115.pdf
+5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e,http://pdfs.semanticscholar.org/5d5c/d6fa5c41eb9d3d2bab3359b3e5eb60ae194e.pdf
+59e75aad529b8001afc7e194e21668425119b864,http://pdfs.semanticscholar.org/59e7/5aad529b8001afc7e194e21668425119b864.pdf
+bcc5cbbb540ee66dc8b9a3453b506e895d8395de,http://pdfs.semanticscholar.org/bcc5/cbbb540ee66dc8b9a3453b506e895d8395de.pdf
+b6145d3268032da70edc9cfececa1f9ffa4e3f11,http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf
+468c8f09d2ad8b558b65d11ec5ad49208c4da2f2,http://www.public.asu.edu/~bli24/Papers/ICPR2016_MSR-CNN.pdf
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/11-1569042551.pdf
+0bf3513d18ec37efb1d2c7934a837dabafe9d091,http://pdfs.semanticscholar.org/14ff/c760c1655524fc2a035357ad354664b5af5e.pdf
+054738ce39920975b8dcc97e01b3b6cc0d0bdf32,http://ita.ucsd.edu/workshop/16/files/paper/paper_2663.pdf
+0d902541c26f03ff95221e0e71d67c39e094a61d,https://arxiv.org/pdf/1506.05085v1.pdf
+13be4f13dac6c9a93f969f823c4b8c88f607a8c4,http://www1.ece.neu.edu/~yuewu/files/2016/p242-robinson.pdf
+0697bd81844d54064d992d3229162fe8afcd82cb,http://pdfs.semanticscholar.org/0697/bd81844d54064d992d3229162fe8afcd82cb.pdf
+7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/He_Robust_FEC-CNN_A_CVPR_2017_paper.pdf
+a06b6d30e2b31dc600f622ab15afe5e2929581a7,https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,http://pdfs.semanticscholar.org/6226/f2ea345f5f4716ac4ddca6715a47162d5b92.pdf
+e1f790bbedcba3134277f545e56946bc6ffce48d,http://pdfs.semanticscholar.org/e1f7/90bbedcba3134277f545e56946bc6ffce48d.pdf
+2559b15f8d4a57694a0a33bdc4ac95c479a3c79a,http://vision.ucsd.edu/~carolina/files/mklmnn.pdf
+159e792096756b1ec02ec7a980d5ef26b434ff78,http://pdfs.semanticscholar.org/159e/792096756b1ec02ec7a980d5ef26b434ff78.pdf
+3df7401906ae315e6aef3b4f13126de64b894a54,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/067.pdf
+8000c4f278e9af4d087c0d0895fff7012c5e3d78,https://www.cse.ust.hk/~yuzhangcse/papers/Zhang_Yeung_CVPR10.pdf
+d3b550e587379c481392fb07f2cbbe11728cf7a6,http://pdfs.semanticscholar.org/d3b5/50e587379c481392fb07f2cbbe11728cf7a6.pdf
+0d3068b352c3733c9e1cc75e449bf7df1f7b10a4,http://users.cecs.anu.edu.au/~adhall/Dhall_ACII_DC_2013.pdf
+079edd5cf7968ac4759dfe72af2042cf6e990efc,http://pdfs.semanticscholar.org/079e/dd5cf7968ac4759dfe72af2042cf6e990efc.pdf
+a8117a4733cce9148c35fb6888962f665ae65b1e,http://pdfs.semanticscholar.org/a811/7a4733cce9148c35fb6888962f665ae65b1e.pdf
+0a79d0ba1a4876086e64fc0041ece5f0de90fbea,http://pdfs.semanticscholar.org/0a79/d0ba1a4876086e64fc0041ece5f0de90fbea.pdf
+0363e93d49d2a3dbe057cc7754825ebf30f0f816,http://nichol.as/papers/Everingham/Identifying%20individuals%20in%20video%20by%20combining%20generative.pdf
+b7740dba37a3cbd5c832a8deb9a710a28966486a,http://pdfs.semanticscholar.org/b774/0dba37a3cbd5c832a8deb9a710a28966486a.pdf
+318a81acdd15a0ab2f706b5f53ee9d4d5d86237f,http://pdfs.semanticscholar.org/318a/81acdd15a0ab2f706b5f53ee9d4d5d86237f.pdf
+6577c76395896dd4d352f7b1ee8b705b1a45fa90,http://ai.stanford.edu/~kdtang/papers/icip10_kinship.pdf
+2e98329fdec27d4b3b9b894687e7d1352d828b1d,http://pdfs.semanticscholar.org/2e98/329fdec27d4b3b9b894687e7d1352d828b1d.pdf
+72c0c8deb9ea6f59fde4f5043bff67366b86bd66,http://pdfs.semanticscholar.org/72c0/c8deb9ea6f59fde4f5043bff67366b86bd66.pdf
+82e66c4832386cafcec16b92ac88088ffd1a1bc9,http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf
+b4f4b0d39fd10baec34d3412d53515f1a4605222,http://pdfs.semanticscholar.org/eaae/d23a2d94feb2f1c3ff22a25777c7a78f3141.pdf
+3146fabd5631a7d1387327918b184103d06c2211,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Jeni_Person-Independent_3D_Gaze_CVPR_2016_paper.pdf
+55bc7abcef8266d76667896bbc652d081d00f797,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf
+404776aa18031828f3d5dbceed39907f038a47fe,http://pdfs.semanticscholar.org/4047/76aa18031828f3d5dbceed39907f038a47fe.pdf
+9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5,http://pdfs.semanticscholar.org/9893/865afdb1de55fdd21e5d86bbdb5daa5fa3d5.pdf
+03f7041515d8a6dcb9170763d4f6debd50202c2b,http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf
+2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58,http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf
+d22b378fb4ef241d8d210202893518d08e0bb213,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Random_Faces_Guided_2013_ICCV_paper.pdf
+2f2aa67c5d6dbfaf218c104184a8c807e8b29286,http://sesame.comp.nus.edu.sg/components/com_flexicontent/uploads/lekhaicon13.pdf
+2d072cd43de8d17ce3198fae4469c498f97c6277,http://www.patrikhuber.ch/files/RCRC_SPL_2015.pdf
+855882a5943fc12fa9c0e8439c482e055b4b46f3,http://humansensing.cs.cmu.edu/papers/Automated.pdf
+1ce3a91214c94ed05f15343490981ec7cc810016,http://grail.cs.washington.edu/photobios/paper.pdf
+acb83d68345fe9a6eb9840c6e1ff0e41fa373229,http://pdfs.semanticscholar.org/acb8/3d68345fe9a6eb9840c6e1ff0e41fa373229.pdf
+0b5bd3ce90bf732801642b9f55a781e7de7fdde0,http://pdfs.semanticscholar.org/0b5b/d3ce90bf732801642b9f55a781e7de7fdde0.pdf
+4ea53e76246afae94758c1528002808374b75cfa,http://pdfs.semanticscholar.org/4ea5/3e76246afae94758c1528002808374b75cfa.pdf
+126214ef0dcef2b456cb413905fa13160c73ec8e,http://infoscience.epfl.ch/record/125056/files/MHFE_fg08.pdf
+455204fa201e9936b42756d362f62700597874c4,http://pdfs.semanticscholar.org/4552/04fa201e9936b42756d362f62700597874c4.pdf
+2bcec23ac1486f4106a3aa588b6589e9299aba70,http://pdfs.semanticscholar.org/2bce/c23ac1486f4106a3aa588b6589e9299aba70.pdf
+561ae67de137e75e9642ab3512d3749b34484310,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf
+999289b0ef76c4c6daa16a4f42df056bf3d68377,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf
+3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Hsu_Face_Recognition_across_2013_CVPR_paper.pdf
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,http://www.umiacs.umd.edu/~cteo/public-shared/language_robotsMethods_PerMIS2012.pdf
+ba2bbef34f05551291410103e3de9e82fdf9dddd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Guo_A_Study_on_2014_CVPR_paper.pdf
+31835472821c7e3090abb42e57c38f7043dc3636,http://pdfs.semanticscholar.org/3183/5472821c7e3090abb42e57c38f7043dc3636.pdf
+1cad5d682393ffbb00fd26231532d36132582bb4,http://pdfs.semanticscholar.org/1cad/5d682393ffbb00fd26231532d36132582bb4.pdf
+49dd4b359f8014e85ed7c106e7848049f852a304,http://pdfs.semanticscholar.org/49dd/4b359f8014e85ed7c106e7848049f852a304.pdf
+9e5acdda54481104aaf19974dca6382ed5ff21ed,http://pdfs.semanticscholar.org/dd52/0f2ebcf8034cb168ab4e82acec9a69fe0188.pdf
+9c1cdb795fd771003da4378f9a0585730d1c3784,http://pdfs.semanticscholar.org/9c1c/db795fd771003da4378f9a0585730d1c3784.pdf
diff --git a/scraper/reports/misc/db_paper_pdf.csv b/scraper/reports/misc/db_paper_pdf.csv
new file mode 100644
index 00000000..5547d808
--- /dev/null
+++ b/scraper/reports/misc/db_paper_pdf.csv
@@ -0,0 +1,4917 @@
+611961abc4dfc02b67edd8124abb08c449f5280a,http://pdfs.semanticscholar.org/6119/61abc4dfc02b67edd8124abb08c449f5280a.pdf
+610a4451423ad7f82916c736cd8adb86a5a64c59,http://pdfs.semanticscholar.org/610a/4451423ad7f82916c736cd8adb86a5a64c59.pdf
+6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2,http://pdfs.semanticscholar.org/6156/eaad00aad74c90cbcfd822fa0c9bd4eb14c2.pdf
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,http://pdfs.semanticscholar.org/61ff/edd8a70a78332c2bbdc9feba6c3d1fd4f1b8.pdf
+61084a25ebe736e8f6d7a6e53b2c20d9723c4608,http://pdfs.semanticscholar.org/6108/4a25ebe736e8f6d7a6e53b2c20d9723c4608.pdf
+61542874efb0b4c125389793d8131f9f99995671,http://pdfs.semanticscholar.org/6154/2874efb0b4c125389793d8131f9f99995671.pdf
+61f93ed515b3bfac822deed348d9e21d5dffe373,http://dvmmweb.cs.columbia.edu/files/set_hash_wacv17.pdf
+6180bc0816b1776ca4b32ced8ea45c3c9ce56b47,http://pdfs.semanticscholar.org/793e/92ed3f89c8636c8ca1175c1183ba812da245.pdf
+61f1b14f04d2fa1d8a556adbdf93050b4637f44b,http://www.caam.rice.edu/~wy1/paperfiles/T.Chen%20W.Yin%20X.Zhou%20D.Comaniciu%20T.Huang%20-%20Total%20variation%20models%20for%20variable%20lighting%20face%20recognition.pdf
+612075999e82596f3b42a80e6996712cc52880a3,https://www.etsmtl.ca/Unites-de-recherche/LIVIA/Recherche-et-innovation/Publications/Publications-2017/PID4875389.pdf
+614a7c42aae8946c7ad4c36b53290860f6256441,https://arxiv.org/pdf/1604.02878.pdf
+616d3d6d82dbc2697d150e879996d878ef74faef,https://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2016_Khorrami_ICIP_FP.pdf
+0d746111135c2e7f91443869003d05cde3044beb,https://arxiv.org/pdf/1603.09364v1.pdf
+0d88ab0250748410a1bc990b67ab2efb370ade5d,http://signal.ee.bilkent.edu.tr/defevent/abstract/a1795.pdf
+0db43ed25d63d801ce745fe04ca3e8b363bf3147,http://pdfs.semanticscholar.org/0db4/3ed25d63d801ce745fe04ca3e8b363bf3147.pdf
+0daf696253a1b42d2c9d23f1008b32c65a9e4c1e,http://ca.cs.cmu.edu/sites/default/files/132010_CVPR_AU_Long.pdf
+0d538084f664b4b7c0e11899d08da31aead87c32,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Deformable_Part_Descriptors_2013_ICCV_paper.pdf
+0dccc881cb9b474186a01fd60eb3a3e061fa6546,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_104_ext.pdf
+0d467adaf936b112f570970c5210bdb3c626a717,http://pdfs.semanticscholar.org/0d46/7adaf936b112f570970c5210bdb3c626a717.pdf
+0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306,http://pdfs.semanticscholar.org/0d6b/28691e1aa2a17ffaa98b9b38ac3140fb3306.pdf
+0de91641f37b0a81a892e4c914b46d05d33fd36e,https://ibug.doc.ic.ac.uk/media/uploads/documents/raps.pdf
+0df0d1adea39a5bef318b74faa37de7f3e00b452,https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf
+0d3bb75852098b25d90f31d2f48fd0cb4944702b,http://stefan.winklerbros.net/Publications/icip2014a.pdf
+0db8e6eb861ed9a70305c1839eaef34f2c85bbaf,https://arxiv.org/pdf/1704.06244v1.pdf
+0d902541c26f03ff95221e0e71d67c39e094a61d,https://arxiv.org/pdf/1506.05085v1.pdf
+0d0b880e2b531c45ee8227166a489bf35a528cb9,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Structure_Preserving_Object_2013_CVPR_paper.pdf
+0d3882b22da23497e5de8b7750b71f3a4b0aac6b,http://pdfs.semanticscholar.org/0d38/82b22da23497e5de8b7750b71f3a4b0aac6b.pdf
+0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553740.pdf
+0d760e7d762fa449737ad51431f3ff938d6803fe,https://arxiv.org/pdf/1705.05922v1.pdf
+0d3068b352c3733c9e1cc75e449bf7df1f7b10a4,http://users.cecs.anu.edu.au/~adhall/Dhall_ACII_DC_2013.pdf
+0dd72887465046b0f8fc655793c6eaaac9c03a3d,http://pdfs.semanticscholar.org/e112/df5539821a00dfa818617bf95f901f016763.pdf
+0d087aaa6e2753099789cd9943495fbbd08437c0,http://pdfs.semanticscholar.org/beab/b0d9d30871d517c5d915cf852f7f5293f52f.pdf
+0d5824e14593bcb349d636d255ba274f98bbb88f,http://www.researchgate.net/profile/Claus_Neubauer/publication/224716248_A_Variational_Bayesian_Approach_for_Classification_with_Corrupted_Inputs/links/00b7d52dd1f690da64000000.pdf
+0d8415a56660d3969449e77095be46ef0254a448,http://www.lv-nus.org/papers/2004/2004_C_6.pdf
+0dfa460a35f7cab4705726b6367557b9f7842c65,https://arxiv.org/pdf/1504.01561v1.pdf
+0d14261e69a4ad4140ce17c1d1cea76af6546056,http://pdfs.semanticscholar.org/0d14/261e69a4ad4140ce17c1d1cea76af6546056.pdf
+0dbacb4fd069462841ebb26e1454b4d147cd8e98,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Nikitidis11c.pdf
+0db36bf08140d53807595b6313201a7339470cfe,http://www.cfar.umd.edu/~rama/Publications/Shroff_CVPR_2010.pdf
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,https://cs.uwaterloo.ca/~jhoey/papers/DhallICMI16.pdf
+0d735e7552af0d1dcd856a8740401916e54b7eee,http://pdfs.semanticscholar.org/915f/f5da6658e800eb7ec1c8f3f26281e18d3cbf.pdf
+0d06b3a4132d8a2effed115a89617e0a702c957a,http://arxiv.org/pdf/1605.08680v1.pdf
+0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e,http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,http://pdfs.semanticscholar.org/0d1d/9a603b08649264f6e3b6d5a66bf1e1ac39d2.pdf
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/osb_iccv09_cam.pdf
+956e9b69b3366ed3e1670609b53ba4a7088b8b7e,http://pdfs.semanticscholar.org/956e/9b69b3366ed3e1670609b53ba4a7088b8b7e.pdf
+956317de62bd3024d4ea5a62effe8d6623a64e53,https://research-repository.griffith.edu.au/bitstream/handle/10072/17889/47024_1.pdf;jsessionid=2146D7EB83BAD65DE653E0056477D61A?sequence=1
+951f21a5671a4cd14b1ef1728dfe305bda72366f,http://pdfs.semanticscholar.org/951f/21a5671a4cd14b1ef1728dfe305bda72366f.pdf
+95f26d1c80217706c00b6b4b605a448032b93b75,http://pdfs.semanticscholar.org/95f2/6d1c80217706c00b6b4b605a448032b93b75.pdf
+95f12d27c3b4914e0668a268360948bce92f7db3,http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf
+95aef5184b89daebd0c820c8102f331ea7cae1ad,http://www.dia.fi.upm.es/~pcr/publications/paa2008.pdf
+9547a7bce2b85ef159b2d7c1b73dea82827a449f,http://tdlc.ucsd.edu/research/publications/Wu_Bartlett_Movellan_Facial_Expression_2010.pdf
+9513503867b29b10223f17c86e47034371b6eb4f,http://pdfs.semanticscholar.org/9513/503867b29b10223f17c86e47034371b6eb4f.pdf
+955e2a39f51c0b6f967199942d77625009e580f9,http://pdfs.semanticscholar.org/955e/2a39f51c0b6f967199942d77625009e580f9.pdf
+956c634343e49319a5e3cba4f2bd2360bdcbc075,http://www.cse.ust.hk/~jamesk/papers/tsmc06.pdf
+958c599a6f01678513849637bec5dc5dba592394,http://pdfs.semanticscholar.org/958c/599a6f01678513849637bec5dc5dba592394.pdf
+59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb,http://www.ccvcl.org/~wei/pdf/CNNExpRecog_CamReady.pdf
+591a737c158be7b131121d87d9d81b471c400dba,http://affect.media.mit.edu/pdfs/10.McDuff-etal-Affect-2010.pdf
+59690814e916d1c0e7aa9190678ba847cbd0046f,http://figment.cse.usf.edu/~sfefilat/data/papers/ThBCT8.7.pdf
+59bfeac0635d3f1f4891106ae0262b81841b06e4,http://pdfs.semanticscholar.org/59bf/eac0635d3f1f4891106ae0262b81841b06e4.pdf
+59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1,http://pdfs.semanticscholar.org/59cd/afed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1.pdf
+590628a9584e500f3e7f349ba7e2046c8c273fcf,http://pdfs.semanticscholar.org/6893/c573d7abd3847d6ea2f0e79b6924ca124372.pdf
+593234ba1d2e16a887207bf65d6b55bbc7ea2247,http://pdfs.semanticscholar.org/73c4/47ea9f75b0ffbdd35c957aed88fe80b2ac07.pdf
+59eefa01c067a33a0b9bad31c882e2710748ea24,http://pdfs.semanticscholar.org/59ee/fa01c067a33a0b9bad31c882e2710748ea24.pdf
+59e2037f5079794cb9128c7f0900a568ced14c2a,https://arxiv.org/pdf/1704.02231v1.pdf
+59c9d416f7b3d33141cc94567925a447d0662d80,http://pdfs.semanticscholar.org/59c9/d416f7b3d33141cc94567925a447d0662d80.pdf
+59bece468ed98397d54865715f40af30221aa08c,https://bib.irb.hr/datoteka/833608.BiForD2016_11.pdf
+59a35b63cf845ebf0ba31c290423e24eb822d245,http://biometrics.cse.msu.edu/Publications/Face/Klumetal_FaceSketchID_TIFS2014.pdf
+59f325e63f21b95d2b4e2700c461f0136aecc171,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh13.pdf
+59420fd595ae745ad62c26ae55a754b97170b01f,http://pdfs.semanticscholar.org/5942/0fd595ae745ad62c26ae55a754b97170b01f.pdf
+5922e26c9eaaee92d1d70eae36275bb226ecdb2e,http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf
+59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Zafeiriou_The_Menpo_Facial_CVPR_2017_paper.pdf
+59e75aad529b8001afc7e194e21668425119b864,http://pdfs.semanticscholar.org/59e7/5aad529b8001afc7e194e21668425119b864.pdf
+59d45281707b85a33d6f50c6ac6b148eedd71a25,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cheng_Rank_Minimization_across_2013_ICCV_paper.pdf
+59319c128c8ac3c88b4ab81088efe8ae9c458e07,http://pdfs.semanticscholar.org/5931/9c128c8ac3c88b4ab81088efe8ae9c458e07.pdf
+59031a35b0727925f8c47c3b2194224323489d68,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICCV13/SVDL.pdf
+926c67a611824bc5ba67db11db9c05626e79de96,http://www.ee.columbia.edu/ln/dvmm/publications/09/xu_ebsl.pdf
+923ede53b0842619831e94c7150e0fc4104e62f7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001293.pdf
+9264b390aa00521f9bd01095ba0ba4b42bf84d7e,http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf
+920a92900fbff22fdaaef4b128ca3ca8e8d54c3e,http://pdfs.semanticscholar.org/920a/92900fbff22fdaaef4b128ca3ca8e8d54c3e.pdf
+924b14a9e36d0523a267293c6d149bca83e73f3b,http://pdfs.semanticscholar.org/924b/14a9e36d0523a267293c6d149bca83e73f3b.pdf
+9282239846d79a29392aa71fc24880651826af72,http://pdfs.semanticscholar.org/9282/239846d79a29392aa71fc24880651826af72.pdf
+92115b620c7f653c847f43b6c4ff0470c8e55dab,http://pdfs.semanticscholar.org/a77c/798d06060ece81c620458e4586819e75ae15.pdf
+92c4636962b719542deb984bd2bf75af405b574c,http://www.umiacs.umd.edu/~arijit/projects/Active_clustering/active_clustering_ijcv.pdf
+92c2dd6b3ac9227fce0a960093ca30678bceb364,https://aran.library.nuigalway.ie/bitstream/handle/10379/1350/On%20color%20texture%20normalization%20for%20active%20appearance%20models.pdf?isAllowed=y&sequence=1
+922838dd98d599d1d229cc73896d55e7a769aa7c,http://www.cs.umass.edu/~elm/papers/HuangCVPR12.pdf
+9294739e24e1929794330067b84f7eafd286e1c8,http://pdfs.semanticscholar.org/9294/739e24e1929794330067b84f7eafd286e1c8.pdf
+92fada7564d572b72fd3be09ea3c39373df3e27c,http://pdfs.semanticscholar.org/b8a4/f51a85fb801e1a5f04c213725d60133233a0.pdf
+927ad0dceacce2bb482b96f42f2fe2ad1873f37a,http://pdfs.semanticscholar.org/927a/d0dceacce2bb482b96f42f2fe2ad1873f37a.pdf
+929bd1d11d4f9cbc638779fbaf958f0efb82e603,http://pdfs.semanticscholar.org/929b/d1d11d4f9cbc638779fbaf958f0efb82e603.pdf
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,http://cs.nju.edu.cn/_upload/tpl/01/0b/267/template267/zhouzh.files/publication/aaai10LLD.pdf
+0c435e7f49f3e1534af0829b7461deb891cf540a,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Capturing_Global_Semantic_2013_ICCV_paper.pdf
+0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf,http://pdfs.semanticscholar.org/0cb7/e4c2f6355c73bfc8e6d5cdfad26f3fde0baf.pdf
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,http://humansensing.cs.cmu.edu/sites/default/files/15parda.pdf
+0ccc535d12ad2142a8310d957cc468bbe4c63647,http://arxiv.org/pdf/1510.03979v1.pdf
+0c8a0a81481ceb304bd7796e12f5d5fa869ee448,http://pdfs.semanticscholar.org/0c8a/0a81481ceb304bd7796e12f5d5fa869ee448.pdf
+0c36c988acc9ec239953ff1b3931799af388ef70,http://pdfs.semanticscholar.org/0c36/c988acc9ec239953ff1b3931799af388ef70.pdf
+0c5ddfa02982dcad47704888b271997c4de0674b,http://pdfs.semanticscholar.org/0c5d/dfa02982dcad47704888b271997c4de0674b.pdf
+0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1,http://faculty.iiit.ac.in/~anoop/papers/Vijay2014Face.pdf
+0cccf576050f493c8b8fec9ee0238277c0cfd69a,http://pdfs.semanticscholar.org/0ccc/f576050f493c8b8fec9ee0238277c0cfd69a.pdf
+0cdb49142f742f5edb293eb9261f8243aee36e12,https://arxiv.org/pdf/1303.2783v1.pdf
+0c069a870367b54dd06d0da63b1e3a900a257298,http://pdfs.semanticscholar.org/cdb8/36785579a4ea3d0eff26dbba8cf845a347d2.pdf
+0c75c7c54eec85e962b1720755381cdca3f57dfb,https://webpages.uncc.edu/~szhang16/paper/PAMI_face_landmark.pdf
+0c167008408c301935bade9536084a527527ec74,http://www.micc.unifi.it/publications/2006/BDN06/bertini_nunziati-mm06.pdf
+0c1d85a197a1f5b7376652a485523e616a406273,http://openaccess.thecvf.com/content_cvpr_2017/papers/Hayat_Joint_Registration_and_CVPR_2017_paper.pdf
+0ca66283f4fb7dbc682f789fcf6d6732006befd5,http://pdfs.semanticscholar.org/0ca6/6283f4fb7dbc682f789fcf6d6732006befd5.pdf
+0c7f27d23a162d4f3896325d147f412c40160b52,http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf
+0c20fd90d867fe1be2459223a3cb1a69fa3d44bf,http://pdfs.semanticscholar.org/0c20/fd90d867fe1be2459223a3cb1a69fa3d44bf.pdf
+0c2875bb47db3698dbbb3304aca47066978897a4,http://slazebni.cs.illinois.edu/publications/iccv17_situation.pdf
+0c3f7272a68c8e0aa6b92d132d1bf8541c062141,http://pdfs.semanticscholar.org/0c3f/7272a68c8e0aa6b92d132d1bf8541c062141.pdf
+0cbc4dcf2aa76191bbf641358d6cecf38f644325,http://pdfs.semanticscholar.org/0cbc/4dcf2aa76191bbf641358d6cecf38f644325.pdf
+0ce8a45a77e797e9d52604c29f4c1e227f604080,http://pdfs.semanticscholar.org/0ce8/a45a77e797e9d52604c29f4c1e227f604080.pdf
+0ce3a786aed896d128f5efdf78733cc675970854,http://pdfs.semanticscholar.org/3689/2b6bb4848a9c21158b8eded7f14a6654dd7e.pdf
+0c93cb1af3bba1bd90a03e921ff2d55acf35c01f,http://www.researchgate.net/profile/Mohammed_Bennamoun/publication/220928947_Robust_Regression_for_Face_Recognition/links/542157f20cf203f155c65a23.pdf
+0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23,http://vipl.ict.ac.cn/sites/default/files/papers/files/2013_TIP_mnkan_Learning%20Prototype%20Hyperplanes%20for%20Face%20Verification%20in%20the%20Wild.pdf
+0c54e9ac43d2d3bab1543c43ee137fc47b77276e,http://pdfs.semanticscholar.org/0c54/e9ac43d2d3bab1543c43ee137fc47b77276e.pdf
+0c5afb209b647456e99ce42a6d9d177764f9a0dd,http://pdfs.semanticscholar.org/49ee/5e1f1cfa45aa105e4120e6b7fb5b14cc2877.pdf
+0c59071ddd33849bd431165bc2d21bbe165a81e0,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf
+0c377fcbc3bbd35386b6ed4768beda7b5111eec6,http://www.ecse.rpi.edu/~qji/Papers/face_exp_pami.pdf
+0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhao_Memory-Augmented_Attribute_Manipulation_CVPR_2017_paper.pdf
+0cb2dd5f178e3a297a0c33068961018659d0f443,http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf
+0cd8895b4a8f16618686f622522726991ca2a324,http://pdfs.semanticscholar.org/0cd8/895b4a8f16618686f622522726991ca2a324.pdf
+0cf7da0df64557a4774100f6fde898bc4a3c4840,https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/shape/berg-cvpr05.pdf
+0cbe059c181278a373292a6af1667c54911e7925,http://pdfs.semanticscholar.org/ea4e/15a4cf256599d11291040ad5e487f55ae514.pdf
+0c4659b35ec2518914da924e692deb37e96d6206,https://cs.uwaterloo.ca/~jhoey/teaching/cs793/papers/OrchardTIP10.pdf
+0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc,http://pdfs.semanticscholar.org/0c6e/29d82a5a080dc1db9eeabbd7d1529e78a3dc.pdf
+0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae,http://pdfs.semanticscholar.org/0ced/7b814ec3bb9aebe0fcf0cac3d78f36361eae.pdf
+0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d,https://arxiv.org/pdf/1609.00153v1.pdf
+0c60eebe10b56dbffe66bb3812793dd514865935,http://arxiv.org/pdf/1502.07209.pdf
+0c05f60998628884a9ac60116453f1a91bcd9dda,http://pdfs.semanticscholar.org/7b19/80d4ac1730fd0145202a8cb125bf05d96f01.pdf
+660b73b0f39d4e644bf13a1745d6ee74424d4a16,http://pdfs.semanticscholar.org/660b/73b0f39d4e644bf13a1745d6ee74424d4a16.pdf
+66d512342355fb77a4450decc89977efe7e55fa2,http://pdfs.semanticscholar.org/66d5/12342355fb77a4450decc89977efe7e55fa2.pdf
+66aad5b42b7dda077a492e5b2c7837a2a808c2fa,http://pdfs.semanticscholar.org/66aa/d5b42b7dda077a492e5b2c7837a2a808c2fa.pdf
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,http://pdfs.semanticscholar.org/f3ec/7e58da49f39b807ff1c98d0bf574ef5f0720.pdf
+6643a7feebd0479916d94fb9186e403a4e5f7cbf,http://pdfs.semanticscholar.org/6643/a7feebd0479916d94fb9186e403a4e5f7cbf.pdf
+66dcd855a6772d2731b45cfdd75f084327b055c2,http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf
+666939690c564641b864eed0d60a410b31e49f80,http://pdfs.semanticscholar.org/6669/39690c564641b864eed0d60a410b31e49f80.pdf
+66330846a03dcc10f36b6db9adf3b4d32e7a3127,http://pdfs.semanticscholar.org/6633/0846a03dcc10f36b6db9adf3b4d32e7a3127.pdf
+6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c,http://openaccess.thecvf.com/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf
+666300af8ffb8c903223f32f1fcc5c4674e2430b,http://pdfs.semanticscholar.org/6663/00af8ffb8c903223f32f1fcc5c4674e2430b.pdf
+66029f1be1a5cee9a4e3e24ed8fcb65d5d293720,http://pdfs.semanticscholar.org/6602/9f1be1a5cee9a4e3e24ed8fcb65d5d293720.pdf
+6691dfa1a83a04fdc0177d8d70e3df79f606b10f,http://pdfs.semanticscholar.org/6691/dfa1a83a04fdc0177d8d70e3df79f606b10f.pdf
+66a2c229ac82e38f1b7c77a786d8cf0d7e369598,http://pdfs.semanticscholar.org/66a2/c229ac82e38f1b7c77a786d8cf0d7e369598.pdf
+66886997988358847615375ba7d6e9eb0f1bb27f,https://pdfs.semanticscholar.org/6688/6997988358847615375ba7d6e9eb0f1bb27f.pdf
+66a9935e958a779a3a2267c85ecb69fbbb75b8dc,http://pdfs.semanticscholar.org/66a9/935e958a779a3a2267c85ecb69fbbb75b8dc.pdf
+66533107f9abdc7d1cb8f8795025fc7e78eb1122,http://pdfs.semanticscholar.org/6653/3107f9abdc7d1cb8f8795025fc7e78eb1122.pdf
+66810438bfb52367e3f6f62c24f5bc127cf92e56,http://pdfs.semanticscholar.org/6681/0438bfb52367e3f6f62c24f5bc127cf92e56.pdf
+66af2afd4c598c2841dbfd1053bf0c386579234e,http://www.ics.uci.edu/~dvk/pub/J17_IJMIR14_Liyan.pdf
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,http://pdfs.semanticscholar.org/66f0/2fbcad13c6ee5b421be2fc72485aaaf6fcb5.pdf
+66e9fb4c2860eb4a15f713096020962553696e12,http://pdfs.semanticscholar.org/d42f/8e7283b20b89f55f8d36efcb1d8e2b774167.pdf
+66e6f08873325d37e0ec20a4769ce881e04e964e,http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf
+661da40b838806a7effcb42d63a9624fcd684976,http://pdfs.semanticscholar.org/661d/a40b838806a7effcb42d63a9624fcd684976.pdf
+66886f5af67b22d14177119520bd9c9f39cdd2e6,http://pdfs.semanticscholar.org/6688/6f5af67b22d14177119520bd9c9f39cdd2e6.pdf
+3edb0fa2d6b0f1984e8e2c523c558cb026b2a983,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tpami07.pdf
+3e69ed088f588f6ecb30969bc6e4dbfacb35133e,http://pdfs.semanticscholar.org/3e69/ed088f588f6ecb30969bc6e4dbfacb35133e.pdf
+3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07,http://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf
+3ee7a8107a805370b296a53e355d111118e96b7c,http://pdfs.semanticscholar.org/3ee7/a8107a805370b296a53e355d111118e96b7c.pdf
+3ebce6710135d1f9b652815e59323858a7c60025,http://pdfs.semanticscholar.org/3ebc/e6710135d1f9b652815e59323858a7c60025.pdf
+3ec05713a1eed6fa9b57fef718f369f68bbbe09f,http://pdfs.semanticscholar.org/3ec0/5713a1eed6fa9b57fef718f369f68bbbe09f.pdf
+3e3f305dac4fbb813e60ac778d6929012b4b745a,http://pdfs.semanticscholar.org/3e3f/305dac4fbb813e60ac778d6929012b4b745a.pdf
+3ea8a6dc79d79319f7ad90d663558c664cf298d4,http://pdfs.semanticscholar.org/3ea8/a6dc79d79319f7ad90d663558c664cf298d4.pdf
+3e4f84ce00027723bdfdb21156c9003168bc1c80,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2011/papers/1569427521.pdf
+3e04feb0b6392f94554f6d18e24fadba1a28b65f,http://pdfs.semanticscholar.org/b72c/5119c0aafa64f32e8e773638b5738f31b33c.pdf
+3e685704b140180d48142d1727080d2fb9e52163,http://pdfs.semanticscholar.org/3e68/5704b140180d48142d1727080d2fb9e52163.pdf
+3e51d634faacf58e7903750f17111d0d172a0bf1,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924869.pdf
+3e687d5ace90c407186602de1a7727167461194a,http://pdfs.semanticscholar.org/3e68/7d5ace90c407186602de1a7727167461194a.pdf
+3e3a87eb24628ab075a3d2bde3abfd185591aa4c,http://pdfs.semanticscholar.org/3e3a/87eb24628ab075a3d2bde3abfd185591aa4c.pdf
+3edc43e336be075dca77c7e173b555b6c14274d8,http://pdfs.semanticscholar.org/3edc/43e336be075dca77c7e173b555b6c14274d8.pdf
+3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f,http://pdfs.semanticscholar.org/bca7/c0a8c5b0503a4ee43f3561f540918071aaa3.pdf
+5040f7f261872a30eec88788f98326395a44db03,http://pdfs.semanticscholar.org/5040/f7f261872a30eec88788f98326395a44db03.pdf
+50f0c495a214b8d57892d43110728e54e413d47d,http://pdfs.semanticscholar.org/50f0/c495a214b8d57892d43110728e54e413d47d.pdf
+501096cca4d0b3d1ef407844642e39cd2ff86b37,http://pdfs.semanticscholar.org/5010/96cca4d0b3d1ef407844642e39cd2ff86b37.pdf
+500fbe18afd44312738cab91b4689c12b4e0eeee,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf
+501eda2d04b1db717b7834800d74dacb7df58f91,http://pdfs.semanticscholar.org/501e/da2d04b1db717b7834800d74dacb7df58f91.pdf
+5083c6be0f8c85815ead5368882b584e4dfab4d1,http://pdfs.semanticscholar.org/5083/c6be0f8c85815ead5368882b584e4dfab4d1.pdf
+50ce3f8744c219871fbdcab1342d49d589f2626b,http://www.public.asu.edu/~jye02/Publications/Papers/AML_cvpr07.pdf
+500b92578e4deff98ce20e6017124e6d2053b451,http://eprints.eemcs.utwente.nl/25818/01/Pantic_Incremental_Face_Alignment_in_the_Wild.pdf
+5058a7ec68c32984c33f357ebaee96c59e269425,http://pdfs.semanticscholar.org/5058/a7ec68c32984c33f357ebaee96c59e269425.pdf
+50ff21e595e0ebe51ae808a2da3b7940549f4035,http://export.arxiv.org/pdf/1710.02985
+5042b358705e8d8e8b0655d07f751be6a1565482,http://pdfs.semanticscholar.org/5042/b358705e8d8e8b0655d07f751be6a1565482.pdf
+50e47857b11bfd3d420f6eafb155199f4b41f6d7,http://pdfs.semanticscholar.org/50e4/7857b11bfd3d420f6eafb155199f4b41f6d7.pdf
+50614ff325f0c8ca20f99efc55d65a8d4cc768cd,http://www.genizah.org/professionalPapers/IJCinGeniza.pdf
+50eb75dfece76ed9119ec543e04386dfc95dfd13,https://lirias.kuleuven.be/bitstream/123456789/197359/1/boiy-learningVisual.pdf
+5050807e90a925120cbc3a9cd13431b98965f4b9,http://pdfs.semanticscholar.org/5050/807e90a925120cbc3a9cd13431b98965f4b9.pdf
+50eb2ee977f0f53ab4b39edc4be6b760a2b05f96,http://ajbasweb.com/old/ajbas/2017/April/1-11.pdf
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,http://pdfs.semanticscholar.org/50e4/5e9c55c9e79aaae43aff7d9e2f079a2d787b.pdf
+5003754070f3a87ab94a2abb077c899fcaf936a6,http://pdfs.semanticscholar.org/5003/754070f3a87ab94a2abb077c899fcaf936a6.pdf
+503db524b9a99220d430e741c44cd9c91ce1ddf8,http://pdfs.semanticscholar.org/503d/b524b9a99220d430e741c44cd9c91ce1ddf8.pdf
+50d15cb17144344bb1879c0a5de7207471b9ff74,http://pdfs.semanticscholar.org/50d1/5cb17144344bb1879c0a5de7207471b9ff74.pdf
+50d961508ec192197f78b898ff5d44dc004ef26d,http://pdfs.semanticscholar.org/50d9/61508ec192197f78b898ff5d44dc004ef26d.pdf
+50ccc98d9ce06160cdf92aaf470b8f4edbd8b899,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Qu_Towards_Robust_Cascaded_2015_CVPR_paper.pdf
+5028c0decfc8dd623c50b102424b93a8e9f2e390,http://pdfs.semanticscholar.org/5028/c0decfc8dd623c50b102424b93a8e9f2e390.pdf
+505e55d0be8e48b30067fb132f05a91650666c41,http://pdfs.semanticscholar.org/505e/55d0be8e48b30067fb132f05a91650666c41.pdf
+507c9672e3673ed419075848b4b85899623ea4b0,http://pdfs.semanticscholar.org/507c/9672e3673ed419075848b4b85899623ea4b0.pdf
+50c0de2cccf7084a81debad5fdb34a9139496da0,http://pdfs.semanticscholar.org/50c0/de2cccf7084a81debad5fdb34a9139496da0.pdf
+680d662c30739521f5c4b76845cb341dce010735,http://people.cs.umass.edu/~smaji/papers/maji15part.pdf
+68f89c1ee75a018c8eff86e15b1d2383c250529b,http://pdfs.semanticscholar.org/68f8/9c1ee75a018c8eff86e15b1d2383c250529b.pdf
+68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5,http://pdfs.semanticscholar.org/68a2/ee5c5b76b6feeb3170aaff09b1566ec2cdf5.pdf
+68a3f12382003bc714c51c85fb6d0557dcb15467,http://research.microsoft.com/pubs/217884/ZitnickSent2SceneICCV13.pdf
+6859b891a079a30ef16f01ba8b85dc45bd22c352,http://pdfs.semanticscholar.org/6859/b891a079a30ef16f01ba8b85dc45bd22c352.pdf
+68003e92a41d12647806d477dd7d20e4dcde1354,http://pdfs.semanticscholar.org/db86/41ed047da4a90d53414edfe126c845141d69.pdf
+68d4056765c27fbcac233794857b7f5b8a6a82bf,http://pdfs.semanticscholar.org/68d4/056765c27fbcac233794857b7f5b8a6a82bf.pdf
+68996c28bc050158f025a17908eb4bc805c3ee55,https://www.researchgate.net/profile/M_Yeasin/publication/4082331_From_facial_expression_to_level_of_interest_a_spatio-temporal_approach/links/54983d0a0cf2519f5a1dda62.pdf
+68c5238994e3f654adea0ccd8bca29f2a24087fc,http://web.fsktm.um.edu.my/~cschan/doc/ICIP2013.pdf
+68bf7fc874c2db44d0446cdbb1e05f19c2239282,http://pdfs.semanticscholar.org/68bf/7fc874c2db44d0446cdbb1e05f19c2239282.pdf
+68cf263a17862e4dd3547f7ecc863b2dc53320d8,http://pdfs.semanticscholar.org/68cf/263a17862e4dd3547f7ecc863b2dc53320d8.pdf
+68e9c837431f2ba59741b55004df60235e50994d,http://pdfs.semanticscholar.org/68e9/c837431f2ba59741b55004df60235e50994d.pdf
+685f8df14776457c1c324b0619c39b3872df617b,http://pdfs.semanticscholar.org/685f/8df14776457c1c324b0619c39b3872df617b.pdf
+687e17db5043661f8921fb86f215e9ca2264d4d2,http://www.ece.northwestern.edu/~ganghua/publication/ICCV09a.pdf
+688754568623f62032820546ae3b9ca458ed0870,http://pdfs.semanticscholar.org/d6c2/108259edf97fabcbe608766a6baa98ac893d.pdf
+68f9cb5ee129e2b9477faf01181cd7e3099d1824,http://pdfs.semanticscholar.org/68f9/cb5ee129e2b9477faf01181cd7e3099d1824.pdf
+68bf34e383092eb827dd6a61e9b362fcba36a83a,http://pdfs.semanticscholar.org/68bf/34e383092eb827dd6a61e9b362fcba36a83a.pdf
+68d40176e878ebffbc01ffb0556e8cb2756dd9e9,http://pdfs.semanticscholar.org/68d4/0176e878ebffbc01ffb0556e8cb2756dd9e9.pdf
+68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf
+6889d649c6bbd9c0042fadec6c813f8e894ac6cc,http://pdfs.semanticscholar.org/6889/d649c6bbd9c0042fadec6c813f8e894ac6cc.pdf
+68f69e6c6c66cfde3d02237a6918c9d1ee678e1b,http://www.cs.fiu.edu/~chens/PDF/ISM09_Pruning.pdf
+683ec608442617d11200cfbcd816e86ce9ec0899,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Dual_Linear_Regression_2014_CVPR_paper.pdf
+68c17aa1ecbff0787709be74d1d98d9efd78f410,http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf
+6821113166b030d2123c3cd793dd63d2c909a110,http://pdfs.semanticscholar.org/6821/113166b030d2123c3cd793dd63d2c909a110.pdf
+68a04a3ae2086986877fee2c82ae68e3631d0356,http://pdfs.semanticscholar.org/68a0/4a3ae2086986877fee2c82ae68e3631d0356.pdf
+6888f3402039a36028d0a7e2c3df6db94f5cb9bb,http://pdfs.semanticscholar.org/6888/f3402039a36028d0a7e2c3df6db94f5cb9bb.pdf
+57f5711ca7ee5c7110b7d6d12c611d27af37875f,http://pdfs.semanticscholar.org/57f5/711ca7ee5c7110b7d6d12c611d27af37875f.pdf
+570308801ff9614191cfbfd7da88d41fb441b423,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chu_Unsupervised_Synchrony_Discovery_ICCV_2015_paper.pdf
+57bf9888f0dfcc41c5ed5d4b1c2787afab72145a,http://pdfs.semanticscholar.org/57bf/9888f0dfcc41c5ed5d4b1c2787afab72145a.pdf
+57ebeff9273dea933e2a75c306849baf43081a8c,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sun_Deep_Convolutional_Network_2013_CVPR_paper.pdf
+574751dbb53777101502419127ba8209562c4758,http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf
+5778d49c8d8d127351eee35047b8d0dc90defe85,http://pdfs.semanticscholar.org/ec31/6c1c182de9d7fe73c7fbbc1a121a7e43c100.pdf
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,http://www.cs.toronto.edu/~rfm/pubs/morphBM.pdf
+57fd229097e4822292d19329a17ceb013b2cb648,http://pdfs.semanticscholar.org/57fd/229097e4822292d19329a17ceb013b2cb648.pdf
+57c59011614c43f51a509e10717e47505c776389,http://users.cecs.anu.edu.au/~basura/papers/CVPR_2017_Workshop.pdf
+57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5,http://pdfs.semanticscholar.org/57b8/b28f8748d998951b5a863ff1bfd7ca4ae6a5.pdf
+57101b29680208cfedf041d13198299e2d396314,http://pdfs.semanticscholar.org/5710/1b29680208cfedf041d13198299e2d396314.pdf
+57893403f543db75d1f4e7355283bdca11f3ab1b,http://www.doc.ic.ac.uk/~maja/PAMI-KoelstraEtAl-accepted.pdf
+571f493c0ade12bbe960cfefc04b0e4607d8d4b2,http://pdfs.semanticscholar.org/571f/493c0ade12bbe960cfefc04b0e4607d8d4b2.pdf
+57f8e1f461ab25614f5fe51a83601710142f8e88,http://pdfs.semanticscholar.org/57f8/e1f461ab25614f5fe51a83601710142f8e88.pdf
+57a1466c5985fe7594a91d46588d969007210581,https://www.wjscheirer.com/projects/unconstrained-face/amfg_2010_poster.pdf
+5721216f2163d026e90d7cd9942aeb4bebc92334,http://pdfs.semanticscholar.org/5721/216f2163d026e90d7cd9942aeb4bebc92334.pdf
+575141e42740564f64d9be8ab88d495192f5b3bc,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf
+57911d7f347dde0398f964e0c7ed8fdd0a882449,http://amp.ece.cmu.edu/people/Andy/Andy_files/1424CVPR08Gallagher.pdf
+5789f8420d8f15e7772580ec373112f864627c4b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Schneider_Efficient_Global_Illumination_ICCV_2017_paper.pdf
+574705812f7c0e776ad5006ae5e61d9b071eebdb,http://pdfs.semanticscholar.org/5747/05812f7c0e776ad5006ae5e61d9b071eebdb.pdf
+5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725,http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf
+57b052cf826b24739cd7749b632f85f4b7bcf90b,http://pdfs.semanticscholar.org/57b0/52cf826b24739cd7749b632f85f4b7bcf90b.pdf
+57d37ad025b5796457eee7392d2038910988655a,http://pdfs.semanticscholar.org/57d3/7ad025b5796457eee7392d2038910988655a.pdf
+57f7d8c6ec690bd436e70d7761bc5f46e993be4c,https://opus.lib.uts.edu.au/bitstream/10453/10785/3/2009001878_Du.pdf
+3b1260d78885e872cf2223f2c6f3d6f6ea254204,http://pdfs.semanticscholar.org/3b12/60d78885e872cf2223f2c6f3d6f6ea254204.pdf
+3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5,http://www.rci.rutgers.edu/~vmp93/Journal_pub/T-pami_openset.pdf
+3b092733f428b12f1f920638f868ed1e8663fe57,http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf
+3b2d5585af59480531616fe970cb265bbdf63f5b,http://pdfs.semanticscholar.org/3b2d/5585af59480531616fe970cb265bbdf63f5b.pdf
+3b64efa817fd609d525c7244a0e00f98feacc8b4,https://arxiv.org/pdf/1502.04383v3.pdf
+3bc776eb1f4e2776f98189e17f0d5a78bb755ef4,http://pdfs.semanticscholar.org/3bc7/76eb1f4e2776f98189e17f0d5a78bb755ef4.pdf
+3b7f6035a113b560760c5e8000540fc46f91fed5,http://www.vision.ee.ethz.ch/~zzhiwu/posters/ICCV13_Poster_ZhiwuHuang_v2.0.pdf
+3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e,http://pdfs.semanticscholar.org/5141/cf2e59fb2ec9bb489b9c1832447d3cd93110.pdf
+3bd1d41a656c8159305ba2aa395f68f41ab84f31,http://pdfs.semanticscholar.org/3bd1/d41a656c8159305ba2aa395f68f41ab84f31.pdf
+3bcd72be6fbc1a11492df3d36f6d51696fd6bdad,http://pdfs.semanticscholar.org/3bcd/72be6fbc1a11492df3d36f6d51696fd6bdad.pdf
+3b9c08381282e65649cd87dfae6a01fe6abea79b,http://pdfs.semanticscholar.org/3b9c/08381282e65649cd87dfae6a01fe6abea79b.pdf
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,http://nms.csail.mit.edu/papers/sen060-chenA.pdf
+3bc376f29bc169279105d33f59642568de36f17f,http://www.dip.ee.uct.ac.za/~nicolls/publish/sm14-visapp.pdf
+3b38c06caf54f301847db0dd622a6622c3843957,http://pdfs.semanticscholar.org/3b38/c06caf54f301847db0dd622a6622c3843957.pdf
+3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0,http://pdfs.semanticscholar.org/3b15/a48ffe3c6b3f2518a7c395280a11a5f58ab0.pdf
+3b9b200e76a35178da940279d566bbb7dfebb787,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf
+3be8964cef223698e587b4f71fc0c72c2eeef8cf,https://www.researchgate.net/profile/Mohammad_Reza_Mohammadi3/publication/264394830_Simultaneous_recognition_of_facial_expression_and_identity_via_sparse_representation/links/53df5c5b0cf2a76fb6682872.pdf?origin=publication_list
+3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab,http://pdfs.semanticscholar.org/3b40/8a3ca6fb39b0fda4d77e6a9679003b2dc9ab.pdf
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,http://pdfs.semanticscholar.org/3b02/aaccc9f063ae696c9d28bb06a8cd84b2abb8.pdf
+3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e,http://www.chennaisunday.com/IEEE%202013%20Dotnet%20Basepaper/Local%20Directional%20Number%20Pattern%20for%20Face%20Analysis%20Face%20and%20Expression%20Recognition.pdf
+3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5,http://pdfs.semanticscholar.org/3b80/bf5a69a1b0089192d73fa3ace2fbb52a4ad5.pdf
+3b9d94752f8488106b2c007e11c193f35d941e92,http://pdfs.semanticscholar.org/3b9d/94752f8488106b2c007e11c193f35d941e92.pdf
+3bebb79f8f49aa11dd4f6d60d903172db02bf4f3,http://hct.ece.ubc.ca/publications/pdf/oleinikov-etal-wacv2014.pdf
+3b557c4fd6775afc80c2cf7c8b16edde125b270e,https://arxiv.org/pdf/1602.02999v1.pdf
+3b3482e735698819a6a28dcac84912ec01a9eb8a,http://vislab.ee.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2003/Individual%20Recognition%20Using%20Gait%20Energy%20Image03.pdf
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,http://pdfs.semanticscholar.org/3b37/d95d2855c8db64bd6b1ee5659f87fce36881.pdf
+3bfb9ba4b74b2b952868f590ff2f164de0c7d402,http://qil.uh.edu/qil/websitecontent/pdf/2015-8.pdf
+3be7b7eb11714e6191dd301a696c734e8d07435f,http://pdfs.semanticscholar.org/3be7/b7eb11714e6191dd301a696c734e8d07435f.pdf
+3bd50e33220af76ffc32a7e57688e248843b7f25,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana_Murthy_Goecke_DICTA2014_TheInfluenceOfTemporalInformationOnHumanActionRecognitionWithLargeNumberOfClasses.pdf
+3be027448ad49a79816cd21dcfcce5f4e1cec8a8,http://www.cs.utexas.edu/~grauman/papers/kovashka_iccv2011.pdf
+3bd56f4cf8a36dd2d754704bcb71415dcbc0a165,http://www.humansensing.cs.cmu.edu/sites/default/files/4robustreg.pdf
+3b410ae97e4564bc19d6c37bc44ada2dcd608552,http://pdfs.semanticscholar.org/3b41/0ae97e4564bc19d6c37bc44ada2dcd608552.pdf
+3b470b76045745c0ef5321e0f1e0e6a4b1821339,http://pdfs.semanticscholar.org/8e72/fa02f2d90ba31f31e0a7aa96a6d3e10a66fc.pdf
+6fa7a1c8a858157deee3b582099e5e234798bb4a,http://biometrics.nist.gov/cs_links/ibpc2014/presentations/14_wednesday_gentric_IBPC14_morpho.pdf
+6f288a12033fa895fb0e9ec3219f3115904f24de,https://arxiv.org/pdf/1511.05204v1.pdf
+6fa0c206873dcc5812f7ea74a48bb4bf4b273494,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W03/papers/Suk_Real-time_Mobile_Facial_2014_CVPR_paper.pdf
+6f9824c5cb5ac08760b08e374031cbdabc953bae,https://eprints.soton.ac.uk/397973/1/PID4351119.pdf
+6f2dc51d607f491dbe6338711c073620c85351ac,http://pdfs.semanticscholar.org/6f2d/c51d607f491dbe6338711c073620c85351ac.pdf
+6fed504da4e192fe4c2d452754d23d3db4a4e5e3,http://pdfs.semanticscholar.org/85ee/d639f7367c794a6d8b38619697af3efaacfe.pdf
+6f957df9a7d3fc4eeba53086d3d154fc61ae88df,http://pdfs.semanticscholar.org/6f95/7df9a7d3fc4eeba53086d3d154fc61ae88df.pdf
+6f0d3610c4ee7b67e9d435d48bc98167761251e8,http://www.cs.washington.edu/homes/wufei/papers/IJCNN.pdf
+6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553757.pdf
+6f26ab7edd971148723d9b4dc8ddf71b36be9bf7,http://pdfs.semanticscholar.org/6f26/ab7edd971148723d9b4dc8ddf71b36be9bf7.pdf
+6f75697a86d23d12a14be5466a41e5a7ffb79fad,https://www.computer.org/csdl/proceedings/icis/2016/0806/00/07550861.pdf
+6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01293.pdf
+6f08885b980049be95a991f6213ee49bbf05c48d,http://pdfs.semanticscholar.org/6f08/885b980049be95a991f6213ee49bbf05c48d.pdf
+6f0900a7fe8a774a1977c5f0a500b2898bcbe149,http://pdfs.semanticscholar.org/6f09/00a7fe8a774a1977c5f0a500b2898bcbe149.pdf
+6fbb179a4ad39790f4558dd32316b9f2818cd106,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf
+6f84e61f33564e5188136474f9570b1652a0606f,https://arxiv.org/pdf/1708.00284v1.pdf
+6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3,http://www.ifp.uiuc.edu/~iracohen/publications/CohenSebeMS05.pdf
+6fa3857faba887ed048a9e355b3b8642c6aab1d8,http://pdfs.semanticscholar.org/6fa3/857faba887ed048a9e355b3b8642c6aab1d8.pdf
+6fda12c43b53c679629473806c2510d84358478f,http://pdfs.semanticscholar.org/6fda/12c43b53c679629473806c2510d84358478f.pdf
+6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0000937.pdf
+6fe2efbcb860767f6bb271edbb48640adbd806c3,https://eprints.soton.ac.uk/359808/1/version9.pdf
+6f5151c7446552fd6a611bf6263f14e729805ec7,http://pdfs.semanticscholar.org/6f51/51c7446552fd6a611bf6263f14e729805ec7.pdf
+030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f,http://www.cfar.umd.edu/~shaohua/papers/zhou07tpami_gps.pdf
+03d9ccce3e1b4d42d234dba1856a9e1b28977640,http://pdfs.semanticscholar.org/03d9/ccce3e1b4d42d234dba1856a9e1b28977640.pdf
+036c41d67b49e5b0a578a401eb31e5f46b3624e0,http://www.infomus.org/Events/proceedings/ACII2015/papers/Main_Conference/M2_Poster/Poster_Teaser_5/ACII2015_submission_19.pdf
+03b03f5a301b2ff88ab3bb4969f54fd9a35c7271,http://pdfs.semanticscholar.org/03b0/3f5a301b2ff88ab3bb4969f54fd9a35c7271.pdf
+03f7041515d8a6dcb9170763d4f6debd50202c2b,http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf
+03b99f5abe0e977ff4c902412c5cb832977cf18e,http://pdfs.semanticscholar.org/03b9/9f5abe0e977ff4c902412c5cb832977cf18e.pdf
+038ce930a02d38fb30d15aac654ec95640fe5cb0,http://www.robots.ox.ac.uk/~tvg/publications/2013/BVGFacialFeatureTrackerMobile.pdf
+03167776e17bde31b50f294403f97ee068515578,http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf
+0334a8862634988cc684dacd4279c5c0d03704da,https://arxiv.org/pdf/1609.06591v1.pdf
+03c1fc9c3339813ed81ad0de540132f9f695a0f8,http://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf
+0339459a5b5439d38acd9c40a0c5fea178ba52fb,http://pdfs.semanticscholar.org/0339/459a5b5439d38acd9c40a0c5fea178ba52fb.pdf
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,http://pdfs.semanticscholar.org/030e/f31b51bd4c8d0d8f4a9a32b80b9192fe4c3f.pdf
+03f98c175b4230960ac347b1100fbfc10c100d0c,http://courses.cs.washington.edu/courses/cse590v/13au/intraface.pdf
+0323b618d3a4c24bdda4f42361e19a2a7d497da5,http://www.ecse.rpi.edu/homepages/qji/Papers/Simultaneous%20Paper_TIP_Revised_V4_email.pdf
+03264e2e2709d06059dd79582a5cc791cbef94b1,http://pdfs.semanticscholar.org/0326/4e2e2709d06059dd79582a5cc791cbef94b1.pdf
+03dba79518434ba4a937b2980fbdc8bafc048b36,http://people.ee.duke.edu/~jh313/resource/TRAIT.pdf
+03a8f53058127798bc2bc0245d21e78354f6c93b,http://www.robots.ox.ac.uk/~vgg/rg/slides/additiveclassifiers.pdf
+03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20,https://ias.in.tum.de/_media/spezial/bib/mayer08arealtime.pdf
+0363e93d49d2a3dbe057cc7754825ebf30f0f816,http://nichol.as/papers/Everingham/Identifying%20individuals%20in%20video%20by%20combining%20generative.pdf
+03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b,http://pdfs.semanticscholar.org/03b9/8b4a2c0b7cc7dae7724b5fe623a43eaf877b.pdf
+03adcf58d947a412f3904a79f2ab51cfdf0e838a,http://pdfs.semanticscholar.org/03ad/cf58d947a412f3904a79f2ab51cfdf0e838a.pdf
+03104f9e0586e43611f648af1132064cadc5cc07,http://pdfs.semanticscholar.org/51c0/2f135d6c960b1141bde539059a279f9beb78.pdf
+03f14159718cb495ca50786f278f8518c0d8c8c9,http://www.acscrg.com/iccsce/2015/wp-content/uploads/2015/11/The-Latest-Schedule-23-Nov-2015.pdf
+0394040749195937e535af4dda134206aa830258,http://web.eecs.umich.edu/~hero/Preprints/sp_mlsi_submitted_revised2.pdf
+0334cc0374d9ead3dc69db4816d08c917316c6c4,http://pdfs.semanticscholar.org/0334/cc0374d9ead3dc69db4816d08c917316c6c4.pdf
+03c48d8376990cff9f541d542ef834728a2fcda2,http://dvmmweb.cs.columbia.edu/files/dvmm_scnn_paper.pdf
+0319332ded894bf1afe43f174f5aa405b49305f0,http://pdfs.semanticscholar.org/0319/332ded894bf1afe43f174f5aa405b49305f0.pdf
+03af8cf40283ff30f1da3637b024319d0c79bdf0,https://www.researchgate.net/profile/Gary_Mckeown/publication/224251574_The_Belfast_Induced_Natural_Emotion_Database/links/0fcfd510a6b4384822000000.pdf
+03baf00a3d00887dd7c828c333d4a29f3aacd5f5,http://pdfs.semanticscholar.org/03ba/f00a3d00887dd7c828c333d4a29f3aacd5f5.pdf
+0359f7357ea8191206b9da45298902de9f054c92,http://arxiv.org/pdf/1511.04110v1.pdf
+0394e684bd0a94fc2ff09d2baef8059c2652ffb0,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/TIP2522378.pdf
+03e88bf3c5ddd44ebf0e580d4bd63072566613ad,http://pdfs.semanticscholar.org/03e8/8bf3c5ddd44ebf0e580d4bd63072566613ad.pdf
+03f4c0fe190e5e451d51310bca61c704b39dcac8,http://pdfs.semanticscholar.org/03f4/c0fe190e5e451d51310bca61c704b39dcac8.pdf
+03bd58a96f635059d4bf1a3c0755213a51478f12,https://arxiv.org/pdf/1401.7413v2.pdf
+031055c241b92d66b6984643eb9e05fd605f24e2,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cinbis_Multi-fold_MIL_Training_2014_CVPR_paper.pdf
+0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136,https://ibug.doc.ic.ac.uk/media/uploads/documents/booth2017large.pdf
+032a1c95388fb5c6e6016dd8597149be40bc9d4d,http://people.eecs.berkeley.edu/~gkioxari/ActionTubes/action_tubes.pdf
+034addac4637121e953511301ef3a3226a9e75fd,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Parikh_Implied_Feedback_Learning_2013_ICCV_paper.pdf
+03701e66eda54d5ab1dc36a3a6d165389be0ce79,http://www.eem.anadolu.edu.tr/atalaybarkan/EEM%20405%20(K)/icerik/improved%20pcr.pdf
+034c2ed71c31cb0d984d66c7ca753ef2cb6196ca,http://pdfs.semanticscholar.org/034c/2ed71c31cb0d984d66c7ca753ef2cb6196ca.pdf
+9b318098f3660b453fbdb7a579778ab5e9118c4c,http://humansensing.cs.cmu.edu/sites/default/files/07471506.pdf
+9be94fa0330dd493f127d51e4ef7f9fd64613cfc,http://pdfs.semanticscholar.org/9be9/4fa0330dd493f127d51e4ef7f9fd64613cfc.pdf
+9b000ccc04a2605f6aab867097ebf7001a52b459,http://pdfs.semanticscholar.org/9b00/0ccc04a2605f6aab867097ebf7001a52b459.pdf
+9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf
+9b474d6e81e3b94e0c7881210e249689139b3e04,http://pdfs.semanticscholar.org/a43c/c0c2f1d0e29cf1ee88f3bde4289a94b70409.pdf
+9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493,http://www.ifp.illinois.edu/~jyang29/papers/JRR_ICCV11.pdf
+9bcfadd22b2c84a717c56a2725971b6d49d3a804,http://pdfs.semanticscholar.org/9bcf/add22b2c84a717c56a2725971b6d49d3a804.pdf
+9b07084c074ba3710fee59ed749c001ae70aa408,http://pdfs.semanticscholar.org/9b07/084c074ba3710fee59ed749c001ae70aa408.pdf
+9b246c88a0435fd9f6d10dc88f47a1944dd8f89e,http://pdfs.semanticscholar.org/ffe3/a5a7c0faebd1719f7c77b5f7e05cae61a9ad.pdf
+9b164cef4b4ad93e89f7c1aada81ae7af802f3a4,http://pdfs.semanticscholar.org/9b16/4cef4b4ad93e89f7c1aada81ae7af802f3a4.pdf
+9b93406f3678cf0f16451140ea18be04784faeee,http://pdfs.semanticscholar.org/9b93/406f3678cf0f16451140ea18be04784faeee.pdf
+9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7,http://pdfs.semanticscholar.org/9b79/74d9ad19bb4ba1ea147c55e629ad7927c5d7.pdf
+9b6d0b3fbf7d07a7bb0d86290f97058aa6153179,http://pdfs.semanticscholar.org/9b6d/0b3fbf7d07a7bb0d86290f97058aa6153179.pdf
+9e8637a5419fec97f162153569ec4fc53579c21e,http://pdfs.semanticscholar.org/9e86/37a5419fec97f162153569ec4fc53579c21e.pdf
+9e4b052844d154c3431120ec27e78813b637b4fc,http://pdfs.semanticscholar.org/9e4b/052844d154c3431120ec27e78813b637b4fc.pdf
+9e42d44c07fbd800f830b4e83d81bdb9d106ed6b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Rao_Learning_Discriminative_Aggregation_ICCV_2017_paper.pdf
+9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0,http://pdfs.semanticscholar.org/9eb8/6327c82b76d77fee3fd72e2d9eff03bbe5e0.pdf
+9ea73660fccc4da51c7bc6eb6eedabcce7b5cead,http://pdfs.semanticscholar.org/9ea7/3660fccc4da51c7bc6eb6eedabcce7b5cead.pdf
+9e9052256442f4e254663ea55c87303c85310df9,http://pdfs.semanticscholar.org/9e90/52256442f4e254663ea55c87303c85310df9.pdf
+9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7,http://pdfs.semanticscholar.org/9eea/da49fc2cba846b4dad1012ba8a7ee78a8bb7.pdf
+9ef2b2db11ed117521424c275c3ce1b5c696b9b3,http://pdfs.semanticscholar.org/c31b/dd00734807938dcfd8a12375bd9ffa556985.pdf
+9e5acdda54481104aaf19974dca6382ed5ff21ed,http://pdfs.semanticscholar.org/dd52/0f2ebcf8034cb168ab4e82acec9a69fe0188.pdf
+9ed943f143d2deaac2efc9cf414b3092ed482610,http://www.jaist.ac.jp/~chen-fan/publication/ism2014-07032993.pdf
+9e1c3b8b1653337094c1b9dba389e8533bc885b0,http://pdfs.semanticscholar.org/9e1c/3b8b1653337094c1b9dba389e8533bc885b0.pdf
+9e0285debd4b0ba7769b389181bd3e0fd7a02af6,http://pdfs.semanticscholar.org/9e02/85debd4b0ba7769b389181bd3e0fd7a02af6.pdf
+9ed4ad41cbad645e7109e146ef6df73f774cd75d,http://pdfs.semanticscholar.org/a83e/175ad5b2066e207f5d2ec830ae05bac266b9.pdf
+9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c,https://people.csail.mit.edu/khosla/papers/www2014_khosla.pdf
+9e5c2d85a1caed701b68ddf6f239f3ff941bb707,http://pdfs.semanticscholar.org/ada4/4aa744f9703cacfcd0028372a2b1684a45a3.pdf
+044d9a8c61383312cdafbcc44b9d00d650b21c70,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf
+04bb3fa0824d255b01e9db4946ead9f856cc0b59,http://pdfs.semanticscholar.org/c1de/db5ac05c955e53d7ef1f6367fb7badea49b1.pdf
+04f0292d9a062634623516edd01d92595f03bd3f,http://www.cs.nott.ac.uk/~mfv/Documents/emotiw2013_cameraready.pdf
+047f6afa87f48de7e32e14229844d1587185ce45,http://pdfs.semanticscholar.org/047f/6afa87f48de7e32e14229844d1587185ce45.pdf
+04522dc16114c88dfb0ebd3b95050fdbd4193b90,http://www.svcl.ucsd.edu/publications/conference/2005/crv05/FES.pdf
+04470861408d14cc860f24e73d93b3bb476492d0,http://pdfs.semanticscholar.org/0447/0861408d14cc860f24e73d93b3bb476492d0.pdf
+0486214fb58ee9a04edfe7d6a74c6d0f661a7668,http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf
+0447bdb71490c24dd9c865e187824dee5813a676,http://pdfs.semanticscholar.org/0447/bdb71490c24dd9c865e187824dee5813a676.pdf
+0435a34e93b8dda459de49b499dd71dbb478dc18,http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf
+044ba70e6744e80c6a09fa63ed6822ae241386f2,http://pdfs.semanticscholar.org/044b/a70e6744e80c6a09fa63ed6822ae241386f2.pdf
+04661729f0ff6afe4b4d6223f18d0da1d479accf,https://arxiv.org/pdf/1509.06451v1.pdf
+04dcdb7cb0d3c462bdefdd05508edfcff5a6d315,http://pdfs.semanticscholar.org/04dc/db7cb0d3c462bdefdd05508edfcff5a6d315.pdf
+044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa,http://www.ee.oulu.fi/mvg/files/pdf/pdf_740.pdf
+04f55f81bbd879773e2b8df9c6b7c1d324bc72d8,http://pdfs.semanticscholar.org/04f5/5f81bbd879773e2b8df9c6b7c1d324bc72d8.pdf
+04250e037dce3a438d8f49a4400566457190f4e2,http://pdfs.semanticscholar.org/0425/0e037dce3a438d8f49a4400566457190f4e2.pdf
+0431e8a01bae556c0d8b2b431e334f7395dd803a,https://people.cs.umass.edu/~smaji/papers/localized-wacv15.pdf
+04616814f1aabe3799f8ab67101fbaf9fd115ae4,http://pdfs.semanticscholar.org/0461/6814f1aabe3799f8ab67101fbaf9fd115ae4.pdf
+04c5268d7a4e3819344825e72167332240a69717,http://longwood.cs.ucf.edu/~vision/papers/cvpr2008/7.pdf
+04c2cda00e5536f4b1508cbd80041e9552880e67,http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf
+04644c97784700c449f2c885cb4cab86447f0bd4,http://www.seekdl.org/upload/files/20131209_014911.pdf
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Sequential_Face_Alignment_CVPR_2016_paper.pdf
+046a694bbb3669f2ff705c6c706ca3af95db798c,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Xiong_Conditional_Convolutional_Neural_ICCV_2015_paper.pdf
+047d7cf4301cae3d318468fe03a1c4ce43b086ed,http://webee.technion.ac.il/~yoav/publications/Delforge_taslp14R2.pdf
+046865a5f822346c77e2865668ec014ec3282033,http://www.csie.ntu.edu.tw/~winston/papers/chen12discovering.pdf
+042825549296ea419d95fcf0b5e71f72070a5f0d,http://eprints.pascal-network.org/archive/00008397/01/paper.pdf
+0470b0ab569fac5bbe385fa5565036739d4c37f8,https://hal.inria.fr/inria-00321048/file/verbeek08cvpr.pdf
+6a3a07deadcaaab42a0689fbe5879b5dfc3ede52,http://pdfs.semanticscholar.org/6a3a/07deadcaaab42a0689fbe5879b5dfc3ede52.pdf
+6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d,http://pdfs.semanticscholar.org/6a67/e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d.pdf
+6afed8dc29bc568b58778f066dc44146cad5366c,http://pdfs.semanticscholar.org/6afe/d8dc29bc568b58778f066dc44146cad5366c.pdf
+6a184f111d26787703f05ce1507eef5705fdda83,http://pdfs.semanticscholar.org/6a18/4f111d26787703f05ce1507eef5705fdda83.pdf
+6a16b91b2db0a3164f62bfd956530a4206b23fea,http://pdfs.semanticscholar.org/6a16/b91b2db0a3164f62bfd956530a4206b23fea.pdf
+6a806978ca5cd593d0ccd8b3711b6ef2a163d810,http://pdfs.semanticscholar.org/6a80/6978ca5cd593d0ccd8b3711b6ef2a163d810.pdf
+6a8a3c604591e7dd4346611c14dbef0c8ce9ba54,http://pdfs.semanticscholar.org/6a8a/3c604591e7dd4346611c14dbef0c8ce9ba54.pdf
+6aa43f673cc42ed2fa351cbc188408b724cb8d50,http://pdfs.semanticscholar.org/6aa4/3f673cc42ed2fa351cbc188408b724cb8d50.pdf
+6a2b83c4ae18651f1a3496e48a35b0cd7a2196df,http://openaccess.thecvf.com/content_iccv_2015/papers/Song_Top_Rank_Supervised_ICCV_2015_paper.pdf
+6a5fe819d2b72b6ca6565a0de117c2b3be448b02,http://pdfs.semanticscholar.org/6a5f/e819d2b72b6ca6565a0de117c2b3be448b02.pdf
+6afeb764ee97fbdedfa8f66810dfc22feae3fa1f,http://pdfs.semanticscholar.org/928c/dc2049462f66460dc30aef5aaaa15e427d12.pdf
+6aa61d28750629febe257d1cb69379e14c66c67f,http://pdfs.semanticscholar.org/6aa6/1d28750629febe257d1cb69379e14c66c67f.pdf
+6ae96f68187f1cdb9472104b5431ec66f4b2470f,http://pdfs.semanticscholar.org/6ae9/6f68187f1cdb9472104b5431ec66f4b2470f.pdf
+6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8,http://pdfs.semanticscholar.org/6af6/5e2a1eba6bd62843e7bf717b4ccc91bce2b8.pdf
+6a657995b02bc9dee130701138ea45183c18f4ae,http://pdfs.semanticscholar.org/6a65/7995b02bc9dee130701138ea45183c18f4ae.pdf
+6a0368b4e132f4aa3bbdeada8d894396f201358a,http://pdfs.semanticscholar.org/6a03/68b4e132f4aa3bbdeada8d894396f201358a.pdf
+6ab33fa51467595f18a7a22f1d356323876f8262,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf
+6ae75eaa7e9f1379338eae94fbb43664bb3c898a,https://www.researchgate.net/profile/Beom_Seok_Oh/publication/254016039_Fusion_of_structured_projections_for_cancelable_face_identity_verification/links/559156c108ae15962d8e145e.pdf?origin=publication_detail
+6aefe7460e1540438ffa63f7757c4750c844764d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Nascimento_Non-rigid_Segmentation_using_2014_CVPR_paper.pdf
+6ab8f2081b1420a6214a6c127e5828c14979d414,http://pdfs.semanticscholar.org/6ab8/f2081b1420a6214a6c127e5828c14979d414.pdf
+6a38c575733b0f7118970238e8f9b480522a2dbc,http://pdfs.semanticscholar.org/fbee/265a61fd5ec15a6ed8f490a8fd8d3359506e.pdf
+6a4ebd91c4d380e21da0efb2dee276897f56467a,http://ibug.doc.ic.ac.uk/media/uploads/documents/07025044.pdf
+6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2,http://pdfs.semanticscholar.org/6a1b/eb34a2dfcdf36ae3c16811f1aef6e64abff2.pdf
+6a7e464464f70afea78552c8386f4d2763ea1d9c,http://pdfs.semanticscholar.org/6a7e/464464f70afea78552c8386f4d2763ea1d9c.pdf
+32925200665a1bbb4fc8131cd192cb34c2d7d9e3,http://pdfs.semanticscholar.org/3292/5200665a1bbb4fc8131cd192cb34c2d7d9e3.pdf
+322c063e97cd26f75191ae908f09a41c534eba90,https://jurie.users.greyc.fr/papers/12_SEMATR_IJCV.pdf
+325b048ecd5b4d14dce32f92bff093cd744aa7f8,http://pdfs.semanticscholar.org/325b/048ecd5b4d14dce32f92bff093cd744aa7f8.pdf
+32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347,https://arxiv.org/pdf/1410.1606v2.pdf
+32d8e555441c47fc27249940991f80502cb70bd5,https://arxiv.org/pdf/1709.07886v1.pdf
+32d555faaaa0a6f6f9dfc9263e4dba75a38c3193,http://pdfs.semanticscholar.org/e119/eeee5025235c6f8dacc7c1812c0c52d595b9.pdf
+324f39fb5673ec2296d90142cf9a909e595d82cf,http://pdfs.semanticscholar.org/324f/39fb5673ec2296d90142cf9a909e595d82cf.pdf
+321bd4d5d80abb1bae675a48583f872af3919172,http://pdfs.semanticscholar.org/321b/d4d5d80abb1bae675a48583f872af3919172.pdf
+32b8c9fd4e3f44c371960eb0074b42515f318ee7,http://pdfs.semanticscholar.org/32b8/c9fd4e3f44c371960eb0074b42515f318ee7.pdf
+32575ffa69d85bbc6aef5b21d73e809b37bf376d,http://www.sce.carleton.ca/faculty/adler//publications/2006/youmaran-adler-bcc2006-quality.pdf
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,http://pdfs.semanticscholar.org/32ec/bbd76fdce249f9109594eee2d52a1cafdfc7.pdf
+32c20afb5c91ed7cdbafb76408c3a62b38dd9160,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Hassner_Viewing_Real-World_Faces_2013_ICCV_paper.pdf
+32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b,http://pdfs.semanticscholar.org/32a4/0c43a9bc1f1c1ed10be3b9f10609d7e0cb6b.pdf
+329394480fc5e9e96de4250cc1a2b060c3677c94,https://arxiv.org/pdf/1604.08826v1.pdf
+326613b5528b7806d6a06f43211800b54f34965e,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/377.pdf
+32728e1eb1da13686b69cc0bd7cce55a5c963cdd,http://pdfs.semanticscholar.org/3272/8e1eb1da13686b69cc0bd7cce55a5c963cdd.pdf
+32c9ebd2685f522821eddfc19c7c91fd6b3caf22,http://pdfs.semanticscholar.org/32c9/ebd2685f522821eddfc19c7c91fd6b3caf22.pdf
+322b7a4ce006e4d14748dd064e80ffba573ebcd7,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_ROMAN.pdf
+3270b2672077cc345f188500902eaf7809799466,http://pdfs.semanticscholar.org/3270/b2672077cc345f188500902eaf7809799466.pdf
+321c8ba38db118d8b02c0ba209be709e6792a2c7,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Yan_Learn_to_Combine_2013_ICCV_paper.pdf
+324b9369a1457213ec7a5a12fe77c0ee9aef1ad4,http://research.nvidia.com/sites/default/files/pubs/2017-07_Dynamic-Facial-Analysis/rnnface.pdf
+329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf
+32df63d395b5462a8a4a3c3574ae7916b0cd4d1d,http://www.ppgia.pucpr.br/~alekoe/Papers/ALEKOE-FacialExpression-ICASSP2011.pdf
+35308a3fd49d4f33bdbd35fefee39e39fe6b30b7,https://biblio.ugent.be/publication/7238034/file/7238038.pdf
+353b6c1f431feac6edde12b2dde7e6e702455abd,http://pdfs.semanticscholar.org/8835/c80f8ad8ebd05771a9bce5a8637efbc4c8e3.pdf
+350da18d8f7455b0e2920bc4ac228764f8fac292,http://pdfs.semanticscholar.org/b1b1/19c94c8bf94da5c9974db537e356e4f80c67.pdf
+3538d2b5f7ab393387ce138611ffa325b6400774,http://pdfs.semanticscholar.org/3538/d2b5f7ab393387ce138611ffa325b6400774.pdf
+3504907a2e3c81d78e9dfe71c93ac145b1318f9c,https://arxiv.org/pdf/1605.02686v3.pdf
+35f03f5cbcc21a9c36c84e858eeb15c5d6722309,http://www.ee.columbia.edu/ln/dvmm/publications/16/ACMMMVP_VAH_2016.pdf
+35e4b6c20756cd6388a3c0012b58acee14ffa604,http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf
+356b431d4f7a2a0a38cf971c84568207dcdbf189,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf
+35f921def890210dda4b72247849ad7ba7d35250,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhou_Exemplar-Based_Graph_Matching_2013_ICCV_paper.pdf
+357963a46dfc150670061dbc23da6ba7d6da786e,http://pdfs.semanticscholar.org/3579/63a46dfc150670061dbc23da6ba7d6da786e.pdf
+35ec9b8811f2d755c7ad377bdc29741b55b09356,http://pdfs.semanticscholar.org/35ec/9b8811f2d755c7ad377bdc29741b55b09356.pdf
+35f1bcff4552632419742bbb6e1927ef5e998eb4,https://arxiv.org/pdf/1703.02521v1.pdf
+35c973dba6e1225196566200cfafa150dd231fa8,http://pdfs.semanticscholar.org/8af7/72ea2389b555c0b193624add6a1c5a49ff24.pdf
+35a39c7da14b1d288c0f9201374b307f667d63a3,http://media.au.tsinghua.edu.cn/liuyebin_files/TMM.pdf
+35f084ddee49072fdb6e0e2e6344ce50c02457ef,https://dash.harvard.edu/bitstream/handle/1/4238979/Lee_Bilinear.pdf?sequence=2
+352c53e56c52a49d33dcdbec5690c2ba604b07d0,http://www.cs.huji.ac.il/~zweiga/Alons_Zweig_Hompage/Homepage_files/Zweig_ICCV7.pdf
+3505c9b0a9631539e34663310aefe9b05ac02727,https://ibug.doc.ic.ac.uk/media/uploads/documents/pid4666647.pdf
+3506518d616343d3083f4fe257a5ee36b376b9e1,http://disi.unitn.it/~zen/data/icmi14_personalized.pdf
+353a89c277cca3e3e4e8c6a199ae3442cdad59b5,http://pdfs.semanticscholar.org/353a/89c277cca3e3e4e8c6a199ae3442cdad59b5.pdf
+35e87e06cf19908855a16ede8c79a0d3d7687b5c,http://pdfs.semanticscholar.org/35e8/7e06cf19908855a16ede8c79a0d3d7687b5c.pdf
+352110778d2cc2e7110f0bf773398812fd905eb1,http://www.ca.cs.cmu.edu/sites/default/files/complete_14.pdf
+351158e4481e3197bd63acdafd73a5df8336143b,http://pdfs.semanticscholar.org/3511/58e4481e3197bd63acdafd73a5df8336143b.pdf
+35490b021dcdec12882870a31dce9a687205ab5c,http://www.ecse.rpi.edu/homepages/qji/Papers/BN_learning_CVPR08.pdf
+697b0b9630213ca08a1ae1d459fabc13325bdcbb,http://pdfs.semanticscholar.org/697b/0b9630213ca08a1ae1d459fabc13325bdcbb.pdf
+69ff40fd5ce7c3e6db95a2b63d763edd8db3a102,http://pdfs.semanticscholar.org/69ff/40fd5ce7c3e6db95a2b63d763edd8db3a102.pdf
+69d29012d17cdf0a2e59546ccbbe46fa49afcd68,https://arxiv.org/pdf/1404.6818v1.pdf
+69a68f9cf874c69e2232f47808016c2736b90c35,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf
+69de532d93ad8099f4d4902c4cad28db958adfea,http://pdfs.semanticscholar.org/e6bc/c30d2be78797e0e2506567bc0f09b8eae21a.pdf
+69b18d62330711bfd7f01a45f97aaec71e9ea6a5,http://pdfs.semanticscholar.org/69b1/8d62330711bfd7f01a45f97aaec71e9ea6a5.pdf
+69526cdf6abbfc4bcd39616acde544568326d856,http://speech.iiit.ac.in/svlpubs/article/SaoA.K.Yegna2007.pdf
+690d669115ad6fabd53e0562de95e35f1078dfbb,http://pdfs.semanticscholar.org/690d/669115ad6fabd53e0562de95e35f1078dfbb.pdf
+69eb6c91788e7c359ddd3500d01fb73433ce2e65,http://pdfs.semanticscholar.org/69eb/6c91788e7c359ddd3500d01fb73433ce2e65.pdf
+69063f7e0a60ad6ce16a877bc8f11b59e5f7348e,http://openaccess.thecvf.com/content_iccv_2015/papers/Anwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.pdf
+69a9da55bd20ce4b83e1680fbc6be2c976067631,http://pdfs.semanticscholar.org/a9b4/d257d16e876302e3318ade42fcb2ab9ffdf9.pdf
+69c2ac04693d53251500557316c854a625af84ee,http://pdfs.semanticscholar.org/dc97/ceb1faf945e780a92be651b022a82e3bff5a.pdf
+6974449ce544dc208b8cc88b606b03d95c8fd368,https://ibug.doc.ic.ac.uk/media/uploads/documents/martinezvalstar-pami_final.pdf
+69fb98e11df56b5d7ec7d45442af274889e4be52,http://pdfs.semanticscholar.org/69fb/98e11df56b5d7ec7d45442af274889e4be52.pdf
+3c78b642289d6a15b0fb8a7010a1fb829beceee2,http://pdfs.semanticscholar.org/3c78/b642289d6a15b0fb8a7010a1fb829beceee2.pdf
+3cc3cf57326eceb5f20a02aefae17108e8c8ab57,http://pdfs.semanticscholar.org/3cc3/cf57326eceb5f20a02aefae17108e8c8ab57.pdf
+3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171,http://research.microsoft.com/en-us/um/people/luyuan/paper/skyfinder_siggraph09.pdf
+3c63fa505a44902f13698ec10d7f259b1d0878ee,http://www.ece.ucr.edu/~amitrc/publications/TMM2015.pdf
+3cb488a3b71f221a8616716a1fc2b951dd0de549,http://cse.seu.edu.cn/people/xgeng/LDL/resource/icpr14.pdf
+3cfbe1f100619a932ba7e2f068cd4c41505c9f58,http://pdfs.semanticscholar.org/3cfb/e1f100619a932ba7e2f068cd4c41505c9f58.pdf
+3c03d95084ccbe7bf44b6d54151625c68f6e74d0,http://pdfs.semanticscholar.org/3c03/d95084ccbe7bf44b6d54151625c68f6e74d0.pdf
+3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0,http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf
+3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3,http://pdfs.semanticscholar.org/51f7/3cfcc6d671bd99b5c3c512ff9b7bb959f33b.pdf
+3c97c32ff575989ef2869f86d89c63005fc11ba9,http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf
+3c47022955c3274250630b042b53d3de2df8eeda,http://research.microsoft.com/en-us/um/people/leizhang/paper/cvpr05-shuicheng-discriminant.pdf
+3cd5b1d71c1d6a50fcc986589f2d0026c68d9803,http://www.openu.ac.il/home/hassner/projects/siftscales/OnSiftsAndTheirScales-CVPR12.pdf
+3ce2ecf3d6ace8d80303daf67345be6ec33b3a93,http://pdfs.semanticscholar.org/3ce2/ecf3d6ace8d80303daf67345be6ec33b3a93.pdf
+3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8,http://pdfs.semanticscholar.org/3c37/4cb8e730b64dacb9fbf6eb67f5987c7de3c8.pdf
+3c0bbfe664fb083644301c67c04a7f1331d9515f,http://pdfs.semanticscholar.org/3c0b/bfe664fb083644301c67c04a7f1331d9515f.pdf
+3cc3e01ac1369a0d1aa88fedda61d3c99a98b890,http://mi.eng.cam.ac.uk/~bdrs2/papers/mita_pami08.pdf
+3c4f6d24b55b1fd3c5b85c70308d544faef3f69a,http://pdfs.semanticscholar.org/3c4f/6d24b55b1fd3c5b85c70308d544faef3f69a.pdf
+3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f,http://pdfs.semanticscholar.org/3cb0/ef5aabc7eb4dd8d32a129cb12b3081ef264f.pdf
+3cb64217ca2127445270000141cfa2959c84d9e7,http://staff.estem-uc.edu.au/roland/files/2009/05/Joshi_Goecke_Parker_Breakspear_FG2013_CanBodyExpressionsContributeToAutomaticDepressionAnalysis.pdf
+3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd,http://pdfs.semanticscholar.org/3c11/a1f2bd4b9ce70f699fb6ad6398171a8ad3bd.pdf
+3cd8ab6bb4b038454861a36d5396f4787a21cc68,http://pdfs.semanticscholar.org/3cd8/ab6bb4b038454861a36d5396f4787a21cc68.pdf
+3cd5da596060819e2b156e8b3a28331ef633036b,http://pdfs.semanticscholar.org/3cd5/da596060819e2b156e8b3a28331ef633036b.pdf
+3ca5d3b8f5f071148cb50f22955fd8c1c1992719,http://pdfs.semanticscholar.org/3ca5/d3b8f5f071148cb50f22955fd8c1c1992719.pdf
+3cc46bf79fb9225cf308815c7d41c8dd5625cc29,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf
+3c8da376576938160cbed956ece838682fa50e9f,http://shodhganga.inflibnet.ac.in/bitstream/10603/49167/11/11_chapter%204.pdf
+56e4dead93a63490e6c8402a3c7adc493c230da5,http://pdfs.semanticscholar.org/56e4/dead93a63490e6c8402a3c7adc493c230da5.pdf
+56e885b9094391f7d55023a71a09822b38b26447,http://pdfs.semanticscholar.org/56e8/85b9094391f7d55023a71a09822b38b26447.pdf
+56c700693b63e3da3b985777da6d9256e2e0dc21,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_079.pdf
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Li_Shape_Driven_Kernel_2015_CVPR_paper.pdf
+56e6f472090030a6f172a3e2f46ef9daf6cad757,http://pdfs.semanticscholar.org/56e6/f472090030a6f172a3e2f46ef9daf6cad757.pdf
+56e03f8fcd16332f764352ba6e72c9c5092cac0f,http://www.cs.utexas.edu/~ssi/DHE.pdf
+564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d,http://bcmi.sjtu.edu.cn/~pengyong/Pub2015/CCECE2015.pdf
+56a653fea5c2a7e45246613049fb16b1d204fc96,http://ieeeprojectsmadurai.com/matlab2016base/Quaternion%20Collaborative%20and%20Sparse%20Representation.pdf
+56f86bef26209c85f2ef66ec23b6803d12ca6cd6,https://arxiv.org/pdf/1710.00307v1.pdf
+5666ed763698295e41564efda627767ee55cc943,http://i.cs.hku.hk/~kykwong/publications/zkuang_ijcv15.pdf
+566a39d753c494f57b4464d6bde61bf3593f7ceb,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W01/papers/Hassner_A_Critical_Review_2013_CVPR_paper.pdf
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,http://pdfs.semanticscholar.org/56c2/fb2438f32529aec604e6fc3b06a595ddbfcc.pdf
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf
+5615d6045301ecbc5be35e46cab711f676aadf3a,https://arxiv.org/pdf/1705.10420v1.pdf
+561ae67de137e75e9642ab3512d3749b34484310,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf
+568cff415e7e1bebd4769c4a628b90db293c1717,http://pdfs.semanticscholar.org/568c/ff415e7e1bebd4769c4a628b90db293c1717.pdf
+564035f1b8f06e9bb061255f40e3139fa57ea879,http://pdfs.semanticscholar.org/fcbf/61524a3d775947ea8bcef46d1b0a9cce7bfb.pdf
+560e0e58d0059259ddf86fcec1fa7975dee6a868,http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf
+56c0b225fd57cfe173e5206a4bb0ce153bfecc29,http://www.sfu.ca/~wya16/ProfileFG08.pdf
+566038a3c2867894a08125efe41ef0a40824a090,http://mirlab.org/conference_papers/international_conference/icassp%202009/pdfs/0001945.pdf
+56ae6d94fc6097ec4ca861f0daa87941d1c10b70,http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf
+56f812661c3248ed28859d3b2b39e033b04ae6ae,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/CIVR08.pdf
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,http://pdfs.semanticscholar.org/bd42/e0a6a1082e8c197a7b0a9b710434cd7c5a47.pdf
+5145e42dc46845f3aeb8307452765ba8dc59d2da,http://pdcat13.csie.ntust.edu.tw/download/papers/P10003.pdf
+51c3050fb509ca685de3d9ac2e965f0de1fb21cc,http://www.cs.toronto.edu/~law/publications/CVPR/2014/fantope_regularization.pdf
+516d0d9eb08825809e4618ca73a0697137ebabd5,http://web.engr.oregonstate.edu/~sinisa/talks/cvpr16_multimodal_oral.pdf
+519a724426b5d9ad384d38aaf2a4632d3824f243,http://pdfs.semanticscholar.org/519a/724426b5d9ad384d38aaf2a4632d3824f243.pdf
+51c7c5dfda47647aef2797ac3103cf0e108fdfb4,http://pdfs.semanticscholar.org/51c7/c5dfda47647aef2797ac3103cf0e108fdfb4.pdf
+519f4eb5fe15a25a46f1a49e2632b12a3b18c94d,https://www.cise.ufl.edu/~arunava/papers/pami-abrdf.pdf
+518edcd112991a1717856841c1a03dd94a250090,http://pdfs.semanticscholar.org/518e/dcd112991a1717856841c1a03dd94a250090.pdf
+51683eac8bbcd2944f811d9074a74d09d395c7f3,http://pdfs.semanticscholar.org/5168/3eac8bbcd2944f811d9074a74d09d395c7f3.pdf
+51cc78bc719d7ff2956b645e2fb61bab59843d2b,http://pdfs.semanticscholar.org/51cc/78bc719d7ff2956b645e2fb61bab59843d2b.pdf
+511b06c26b0628175c66ab70dd4c1a4c0c19aee9,http://pdfs.semanticscholar.org/511b/06c26b0628175c66ab70dd4c1a4c0c19aee9.pdf
+51528cdce7a92835657c0a616c0806594de7513b,http://pdfs.semanticscholar.org/5152/8cdce7a92835657c0a616c0806594de7513b.pdf
+514a74aefb0b6a71933013155bcde7308cad2b46,http://pdfs.semanticscholar.org/514a/74aefb0b6a71933013155bcde7308cad2b46.pdf
+51a8dabe4dae157aeffa5e1790702d31368b9161,http://pdfs.semanticscholar.org/5621/adae20c1bc781a36c43a9ddbe5475ea4b6e8.pdf
+51224ed7519e71346076060092462e3d59ca3ab9,http://www.iis.ee.ic.ac.uk/ComputerVision/docs/pubs/Chao_TM_2014.pdf
+512b4c8f0f3fb23445c0c2dab768bcd848fa8392,http://pdfs.semanticscholar.org/b85d/ac54bfa985137b3b071593b986ac92f32bed.pdf
+51eba481dac6b229a7490f650dff7b17ce05df73,http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf
+51348e24d2199b06273e7b65ae5f3fc764a2efc7,http://pdfs.semanticscholar.org/c4b4/cbc801a4430be5fdd16ae34c68f53f772582.pdf
+5173a20304ea7baa6bfe97944a5c7a69ea72530f,http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf
+51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6,http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf
+51f311f724883218bcc511b0403b9a7745b9d40e,https://www.researchgate.net/profile/Xiangwei_Kong/publication/221190737_Biometrics-based_identifiers_for_digital_identity_management/links/00b7d51ca1f2a78c74000000.pdf
+5121f42de7cb9e41f93646e087df82b573b23311,http://pdfs.semanticscholar.org/5121/f42de7cb9e41f93646e087df82b573b23311.pdf
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/11-1569042551.pdf
+5160569ca88171d5fa257582d161e9063c8f898d,http://infoscience.epfl.ch/record/83324/files/heusch-AFGR-2006.pdf
+5157dde17a69f12c51186ffc20a0a6c6847f1a29,http://arxiv.org/pdf/1505.04373v2.pdf
+51dc127f29d1bb076d97f515dca4cc42dda3d25b,http://pdfs.semanticscholar.org/7a1d/4a9ef5944217ee19aa642471b4746aaa2576.pdf
+3d143cfab13ecd9c485f19d988242e7240660c86,http://pdfs.semanticscholar.org/3d14/3cfab13ecd9c485f19d988242e7240660c86.pdf
+3dabf7d853769cfc4986aec443cc8b6699136ed0,http://pdfs.semanticscholar.org/3dab/f7d853769cfc4986aec443cc8b6699136ed0.pdf
+3db75962857a602cae65f60f202d311eb4627b41,https://pdfs.semanticscholar.org/3db7/5962857a602cae65f60f202d311eb4627b41.pdf
+3daf1191d43e21a8302d98567630b0e2025913b0,http://pdfs.semanticscholar.org/3daf/1191d43e21a8302d98567630b0e2025913b0.pdf
+3d36f941d8ec613bb25e80fb8f4c160c1a2848df,https://arxiv.org/pdf/1502.02410v1.pdf
+3d5a1be4c1595b4805a35414dfb55716e3bf80d8,http://pdfs.semanticscholar.org/9e8e/bf5447fcd5b2ba4cdd53253f0049dacb2985.pdf
+3d62b2f9cef997fc37099305dabff356d39ed477,http://pdfs.semanticscholar.org/3d62/b2f9cef997fc37099305dabff356d39ed477.pdf
+3dc522a6576c3475e4a166377cbbf4ba389c041f,http://pdfs.semanticscholar.org/3dc5/22a6576c3475e4a166377cbbf4ba389c041f.pdf
+3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd,http://pdfs.semanticscholar.org/3dd4/d719b2185f7c7f92cc97f3b5a65990fcd5dd.pdf
+3d0ef9bfd08a9252db6acfece3b83f3aa58b4cae,http://perso.telecom-paristech.fr/~chollet/Biblio/Articles/Domaines/BIOMET/Face/Kumar/CoreFaceCVPR04.pdf
+3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f,http://pdfs.semanticscholar.org/3d1a/6a5fd5915e0efb953ede5af0b23debd1fc7f.pdf
+3d0379688518cc0e8f896e30815d0b5e8452d4cd,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/007.pdf
+3dda181be266950ba1280b61eb63ac11777029f9,http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf
+3d24b386d003bee176a942c26336dbe8f427aadd,https://arxiv.org/pdf/1611.09967v1.pdf
+3dcebd4a1d66313dcd043f71162d677761b07a0d,http://cvhci.ira.uka.de/download/publications/2008/siu2008_lbp.pdf
+3d0f9a3031bee4b89fab703ff1f1d6170493dc01,http://pdfs.semanticscholar.org/3d0f/9a3031bee4b89fab703ff1f1d6170493dc01.pdf
+3d0c21d4780489bd624a74b07e28c16175df6355,http://pdfs.semanticscholar.org/3d0c/21d4780489bd624a74b07e28c16175df6355.pdf
+3df8cc0384814c3fb05c44e494ced947a7d43f36,http://openaccess.thecvf.com/content_ICCV_2017/papers/Walker_The_Pose_Knows_ICCV_2017_paper.pdf
+3d42e17266475e5d34a32103d879b13de2366561,http://pdfs.semanticscholar.org/7450/7306832bd71884365ed81e1cc7866e47c399.pdf
+3dd906bc0947e56d2b7bf9530b11351bbdff2358,http://pdfs.semanticscholar.org/c57a/070724b48962935ff46ab1384d919e1d1089.pdf
+3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf
+3dbfd2fdbd28e4518e2ae05de8374057307e97b3,http://pdfs.semanticscholar.org/3dbf/d2fdbd28e4518e2ae05de8374057307e97b3.pdf
+3df7401906ae315e6aef3b4f13126de64b894a54,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/067.pdf
+3d68cedd80babfbb04ab197a0b69054e3c196cd9,http://www.cim.mcgill.ca/~mrl/pubs/malika/Meghjani09_Masters_Thesis.pdf
+3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a,http://www.cse.msu.edu/~rossarun/BiometricsTextBook/Papers/Security/Teoh_BioHash_PAMI06.pdf
+3d6943f1573f992d6897489b73ec46df983d776c,http://pdfs.semanticscholar.org/757d/223b8db29e4cfba9530c7f942304c78cfee1.pdf
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,http://pdfs.semanticscholar.org/3d94/8e4813a6856e5b8b54c20e50cc5050e66abe.pdf
+3d94f81cf4c3a7307e1a976dc6cb7bf38068a381,http://faculty.ucmerced.edu/mhyang/papers/tip17_age.pdf
+3d9db1cacf9c3bb7af57b8112787b59f45927355,http://pdfs.semanticscholar.org/3d9d/b1cacf9c3bb7af57b8112787b59f45927355.pdf
+582edc19f2b1ab2ac6883426f147196c8306685a,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf
+5859774103306113707db02fe2dd3ac9f91f1b9e,http://www.wisdom.weizmann.ac.il/~shimon/papers/IJCV29_98.pdf
+5892f8367639e9c1e3cf27fdf6c09bb3247651ed,http://pdfs.semanticscholar.org/5892/f8367639e9c1e3cf27fdf6c09bb3247651ed.pdf
+5850aab97e1709b45ac26bb7d205e2accc798a87,http://pdfs.semanticscholar.org/5850/aab97e1709b45ac26bb7d205e2accc798a87.pdf
+587f81ae87b42c18c565694c694439c65557d6d5,http://pdfs.semanticscholar.org/aeff/403079022683b233decda556a6aee3225065.pdf
+580054294ca761500ada71f7d5a78acb0e622f19,http://www.jdl.ac.cn/project/faceId/paperreading/Paper/hhan_20090305_TIP2008_FaceRelighting.pdf
+587c48ec417be8b0334fa39075b3bfd66cc29dbe,http://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf
+58081cb20d397ce80f638d38ed80b3384af76869,http://pdfs.semanticscholar.org/5808/1cb20d397ce80f638d38ed80b3384af76869.pdf
+581e920ddb6ecfc2a313a3aa6fed3d933b917ab0,http://pdfs.semanticscholar.org/581e/920ddb6ecfc2a313a3aa6fed3d933b917ab0.pdf
+58fa85ed57e661df93ca4cdb27d210afe5d2cdcd,http://www.dgcv.nii.ac.jp/Publications/Papers/2016/ICPR2016a.pdf
+5860cf0f24f2ec3f8cbc39292976eed52ba2eafd,http://pdfs.semanticscholar.org/5860/cf0f24f2ec3f8cbc39292976eed52ba2eafd.pdf
+584909d2220b52c0d037e8761d80cb22f516773f,http://www.cs.tau.ac.il/~nachumd/papers/OFTA.pdf
+58823377757e7dc92f3b70a973be697651089756,http://pdfs.semanticscholar.org/fa88/52e5b7849adf8e96a103ca67e4ca60bdf244.pdf
+580e48d3e7fe1ae0ceed2137976139852b1755df,http://pdfs.semanticscholar.org/580e/48d3e7fe1ae0ceed2137976139852b1755df.pdf
+5865e824e3d8560e07840dd5f75cfe9bf68f9d96,http://pdfs.semanticscholar.org/5865/e824e3d8560e07840dd5f75cfe9bf68f9d96.pdf
+58bb77dff5f6ee0fb5ab7f5079a5e788276184cc,https://ram-lab.com/papers/2016/rcar_lyp_192.pdf
+58b8588c01196070674ceabe5366b20f73c2912d,http://www.cse.ust.hk/~qnature/pdf/ICDM2015.pdf
+58b0be2db0aeda2edb641273fe52946a24a714c3,http://www.cs.ucsb.edu/~daniel/publications/conferences/wacv09/VaqueroWACV09.pdf
+585260468d023ffc95f0e539c3fa87254c28510b,http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf
+58cb1414095f5eb6a8c6843326a6653403a0ee17,http://pdfs.semanticscholar.org/58cb/1414095f5eb6a8c6843326a6653403a0ee17.pdf
+58db008b204d0c3c6744f280e8367b4057173259,http://pdfs.semanticscholar.org/58db/008b204d0c3c6744f280e8367b4057173259.pdf
+58628e64e61bd2776a2a7258012eabe3c79ca90c,http://pdfs.semanticscholar.org/5862/8e64e61bd2776a2a7258012eabe3c79ca90c.pdf
+676a136f5978783f75b5edbb38e8bb588e8efbbe,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_084_ext.pdf
+676f9eabf4cfc1fd625228c83ff72f6499c67926,http://pdfs.semanticscholar.org/676f/9eabf4cfc1fd625228c83ff72f6499c67926.pdf
+677477e6d2ba5b99633aee3d60e77026fb0b9306,http://pdfs.semanticscholar.org/d105/b9b31106495f58fb951cfdbf64787ee89ab2.pdf
+670531f3925c1ee6921f1550a988a034db727c3b,http://neerajkumar.org/base/papers/nk_www2014_photorecall.pdf
+679b7fa9e74b2aa7892eaea580def6ed4332a228,http://pdfs.semanticscholar.org/679b/7fa9e74b2aa7892eaea580def6ed4332a228.pdf
+670637d0303a863c1548d5b19f705860a23e285c,https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf
+67b79c2336b9a2efbfc805b9a6912a0959e392a9,https://www.researchgate.net/profile/Engin_Erzin/publication/220716898_RANSAC-Based_Training_Data_Selection_on_Spectral_Features_for_Emotion_Recognition_from_Spontaneous_Speech/links/0912f5089705e67f21000000.pdf
+6742c0a26315d7354ab6b1fa62a5fffaea06da14,http://pdfs.semanticscholar.org/ae08/778d8003933a02fd90a49b2e5f67ba56ad8d.pdf
+67a50752358d5d287c2b55e7a45cc39be47bf7d0,http://pdfs.semanticscholar.org/67a5/0752358d5d287c2b55e7a45cc39be47bf7d0.pdf
+67c3c1194ee72c54bc011b5768e153a035068c43,http://pdfs.semanticscholar.org/67c3/c1194ee72c54bc011b5768e153a035068c43.pdf
+67c703a864aab47eba80b94d1935e6d244e00bcb,http://pdfs.semanticscholar.org/67c7/03a864aab47eba80b94d1935e6d244e00bcb.pdf
+677ebde61ba3936b805357e27fce06c44513a455,http://pdfs.semanticscholar.org/677e/bde61ba3936b805357e27fce06c44513a455.pdf
+67ba3524e135c1375c74fe53ebb03684754aae56,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001767.pdf
+6769cfbd85329e4815bb1332b118b01119975a95,http://pdfs.semanticscholar.org/6769/cfbd85329e4815bb1332b118b01119975a95.pdf
+0bc53b338c52fc635687b7a6c1e7c2b7191f42e5,http://pdfs.semanticscholar.org/a32a/8d6d4c3b4d69544763be48ffa7cb0d7f2f23.pdf
+0b2277a0609565c30a8ee3e7e193ce7f79ab48b0,http://ivg.au.tsinghua.edu.cn/paper/2012_Cost-sensitive%20semi-supervised%20discriminant%20analysis%20for%20face%20recognition.pdf
+0b9ce839b3c77762fff947e60a0eb7ebbf261e84,http://pdfs.semanticscholar.org/0b9c/e839b3c77762fff947e60a0eb7ebbf261e84.pdf
+0b8b8776684009e537b9e2c0d87dbd56708ddcb4,http://pdfs.semanticscholar.org/0b8b/8776684009e537b9e2c0d87dbd56708ddcb4.pdf
+0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b,http://pdfs.semanticscholar.org/84b7/e2138a3701432c33ea70a1297328cd814ab5.pdf
+0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7,http://pdfs.semanticscholar.org/0b6a/5200c33434cbfa9bf24ba482f6e06bf5fff7.pdf
+0b605b40d4fef23baa5d21ead11f522d7af1df06,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a819.pdf
+0b0eb562d7341231c3f82a65cf51943194add0bb,http://pdfs.semanticscholar.org/0b0e/b562d7341231c3f82a65cf51943194add0bb.pdf
+0b3a146c474166bba71e645452b3a8276ac05998,http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf
+0b78fd881d0f402fd9b773249af65819e48ad36d,http://mirlab.org/conference_papers/International_Conference/ISCSLP%202008/pdfs/281.pdf
+0b835284b8f1f45f87b0ce004a4ad2aca1d9e153,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w16/papers/Kapadia_Cartooning_for_Enhanced_CVPR_2017_paper.pdf
+0b5bd3ce90bf732801642b9f55a781e7de7fdde0,http://pdfs.semanticscholar.org/0b5b/d3ce90bf732801642b9f55a781e7de7fdde0.pdf
+0b0958493e43ca9c131315bcfb9a171d52ecbb8a,http://pdfs.semanticscholar.org/0b09/58493e43ca9c131315bcfb9a171d52ecbb8a.pdf
+0b51197109813d921835cb9c4153b9d1e12a9b34,http://pdfs.semanticscholar.org/0b51/197109813d921835cb9c4153b9d1e12a9b34.pdf
+0bf3513d18ec37efb1d2c7934a837dabafe9d091,http://pdfs.semanticscholar.org/14ff/c760c1655524fc2a035357ad354664b5af5e.pdf
+0b20f75dbb0823766d8c7b04030670ef7147ccdd,http://pdfs.semanticscholar.org/0b20/f75dbb0823766d8c7b04030670ef7147ccdd.pdf
+0b174d4a67805b8796bfe86cd69a967d357ba9b6,http://pdfs.semanticscholar.org/0b17/4d4a67805b8796bfe86cd69a967d357ba9b6.pdf
+0ba449e312894bca0d16348f3aef41ca01872383,http://pdfs.semanticscholar.org/0ba4/49e312894bca0d16348f3aef41ca01872383.pdf
+0b87d91fbda61cdea79a4b4dcdcb6d579f063884,http://pdfs.semanticscholar.org/0b87/d91fbda61cdea79a4b4dcdcb6d579f063884.pdf
+0be2245b2b016de1dcce75ffb3371a5e4b1e731b,http://pdfs.semanticscholar.org/0be2/245b2b016de1dcce75ffb3371a5e4b1e731b.pdf
+0b79356e58a0df1d0efcf428d0c7c4651afa140d,http://pdfs.semanticscholar.org/7725/05d940a31ca237563cfb2d5c05c62742993f.pdf
+0b85b50b6ff03a7886c702ceabad9ab8c8748fdc,http://pdfs.semanticscholar.org/0b85/b50b6ff03a7886c702ceabad9ab8c8748fdc.pdf
+0b84f07af44f964817675ad961def8a51406dd2e,https://arxiv.org/pdf/1604.02531v2.pdf
+0b242d5123f79defd5f775d49d8a7047ad3153bc,http://pdfs.semanticscholar.org/84db/c0010ae4f5206d689cf9f5bb176d18990bcd.pdf
+0b3786a3a0ea7ec08f01636124c183dbee8f625f,http://www.cs.uiuc.edu/homes/dhoiem/publications/pami2012_FlickrSimilaritiesSIKMA_Gang.pdf
+0b50e223ad4d9465bb92dbf17a7b79eccdb997fb,http://users.eecs.northwestern.edu/~ganghua/publication/CVPR08a.pdf
+0badf61e8d3b26a0d8b60fe94ba5c606718daf0b,http://pdfs.semanticscholar.org/0bad/f61e8d3b26a0d8b60fe94ba5c606718daf0b.pdf
+0b02bfa5f3a238716a83aebceb0e75d22c549975,http://pdfs.semanticscholar.org/0b02/bfa5f3a238716a83aebceb0e75d22c549975.pdf
+0b2966101fa617b90510e145ed52226e79351072,http://www.cs.umanitoba.ca/~ywang/papers/icpr16_videotext.pdf
+0ba0f000baf877bc00a9e144b88fa6d373db2708,http://pdfs.semanticscholar.org/0ba0/f000baf877bc00a9e144b88fa6d373db2708.pdf
+0be80da851a17dd33f1e6ffdd7d90a1dc7475b96,http://pdfs.semanticscholar.org/0be8/0da851a17dd33f1e6ffdd7d90a1dc7475b96.pdf
+0b183f5260667c16ef6f640e5da50272c36d599b,http://pdfs.semanticscholar.org/0b18/3f5260667c16ef6f640e5da50272c36d599b.pdf
+0b4c4ea4a133b9eab46b217e22bda4d9d13559e6,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_morph_random_forests.pdf
+0b9db62b26b811e8c24eb9edc37901a4b79a897f,https://eng.ucmerced.edu/people/cyang35/CVPR13/cvpr13_hallucination.pdf
+0ba99a709cd34654ac296418a4f41a9543928149,https://pdfs.semanticscholar.org/0ba9/9a709cd34654ac296418a4f41a9543928149.pdf
+0be764800507d2e683b3fb6576086e37e56059d1,http://pdfs.semanticscholar.org/0be7/64800507d2e683b3fb6576086e37e56059d1.pdf
+0b642f6d48a51df64502462372a38c50df2051b1,https://infoscience.epfl.ch/record/231128/files/Le_ICMI_2017.pdf
+0b7d1386df0cf957690f0fe330160723633d2305,http://www.cs.rpi.edu/~magdon/ps/conference/AccentICMLA2009.pdf
+0b6616f3ebff461e4b6c68205fcef1dae43e2a1a,http://pdfs.semanticscholar.org/0b66/16f3ebff461e4b6c68205fcef1dae43e2a1a.pdf
+0b8c92463f8f5087696681fb62dad003c308ebe2,https://www.iiitd.edu.in/~richa/papers/BTAS10-Sketch.pdf
+0bc0f9178999e5c2f23a45325fa50300961e0226,http://pdfs.semanticscholar.org/0bc0/f9178999e5c2f23a45325fa50300961e0226.pdf
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf
+0b3f354e6796ef7416bf6dde9e0779b2fcfabed2,http://pdfs.semanticscholar.org/fd60/5d123a0f777716f798f258fbbcd73d75fa8b.pdf
+0b9d3a0c61ee498f8ed54aaa22d3c4e72aa56f40,http://www.researchgate.net/profile/Mark_Billinghurst/publication/221209697_A_Quadratic_Deformation_Model_for_Facial_Expression_Recognition/links/00b4952464de6e125e000000.pdf
+9391618c09a51f72a1c30b2e890f4fac1f595ebd,http://pdfs.semanticscholar.org/9391/618c09a51f72a1c30b2e890f4fac1f595ebd.pdf
+93675f86d03256f9a010033d3c4c842a732bf661,http://pdfs.semanticscholar.org/9367/5f86d03256f9a010033d3c4c842a732bf661.pdf
+935a7793cbb8f102924fa34fce1049727de865c2,https://ivi.fnwi.uva.nl/isis/publications/2015/AlnajarICIP20015/AlnajarICIP20015.pdf
+9326d1390e8601e2efc3c4032152844483038f3f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Hsu_Landmark_Based_Facial_2014_CVPR_paper.pdf
+93747de3d40376761d1ef83ffa72ec38cd385833,http://pdfs.semanticscholar.org/9374/7de3d40376761d1ef83ffa72ec38cd385833.pdf
+936c7406de1dfdd22493785fc5d1e5614c6c2882,http://pdfs.semanticscholar.org/9d5e/1395e1ace37d9d5b7ce6854d518e7f128e79.pdf
+93721023dd6423ab06ff7a491d01bdfe83db7754,http://pdfs.semanticscholar.org/9372/1023dd6423ab06ff7a491d01bdfe83db7754.pdf
+93971a49ef6cc88a139420349a1dfd85fb5d3f5c,http://pdfs.semanticscholar.org/9397/1a49ef6cc88a139420349a1dfd85fb5d3f5c.pdf
+93cbb3b3e40321c4990c36f89a63534b506b6daf,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/IEEESMC05Guo.pdf
+937ffb1c303e0595317873eda5ce85b1a17f9943,https://ivi.fnwi.uva.nl/isis/publications/2010/DibekliogluICM2010/DibekliogluICM2010.pdf
+9329523dc0bd4e2896d5f63cf2440f21b7a16f16,http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf
+9306f61c7c3bdcdcb257cd437ca59df8e599e326,http://www.umiacs.umd.edu/~pvishalm/Conference_pub/ACPR2011_v2.pdf
+936227f7483938097cc1cdd3032016df54dbd5b6,http://pdfs.semanticscholar.org/9362/27f7483938097cc1cdd3032016df54dbd5b6.pdf
+939123cf21dc9189a03671484c734091b240183e,http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf
+94b9c0a6515913bad345f0940ee233cdf82fffe1,http://pdfs.semanticscholar.org/94b9/c0a6515913bad345f0940ee233cdf82fffe1.pdf
+94498fae459167841e8b2f4b911493fc3c7da22f,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/cvpr2016_ROF.pdf
+94a7c97d1e3eb5dbfb20b180780451486597a9be,http://pdfs.semanticscholar.org/94a7/c97d1e3eb5dbfb20b180780451486597a9be.pdf
+9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73,http://www.vision.ee.ethz.ch/~zzhiwu/papers/COX-Face-DB-TIP-final.pdf
+948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494,http://pdfs.semanticscholar.org/948a/f4b04b4a9ae4bff2777ffbcb29d5bfeeb494.pdf
+94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81,http://ibug.doc.ic.ac.uk/media/uploads/documents/p148-cheng.pdf
+94f74c6314ffd02db581e8e887b5fd81ce288dbf,http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf
+941166547968081463398c9eb041f00eb04304f7,http://people.duke.edu/~qq3/pub/ExpressionDictionary_TIP.pdf
+9441253b638373a0027a5b4324b4ee5f0dffd670,http://pdfs.semanticscholar.org/9441/253b638373a0027a5b4324b4ee5f0dffd670.pdf
+949699d0b865ef35b36f11564f9a4396f5c9cddb,http://pdfs.semanticscholar.org/9496/99d0b865ef35b36f11564f9a4396f5c9cddb.pdf
+94ac3008bf6be6be6b0f5140a0bea738d4c75579,http://pdfs.semanticscholar.org/94ac/3008bf6be6be6b0f5140a0bea738d4c75579.pdf
+94e259345e82fa3015a381d6e91ec6cded3971b4,http://pdfs.semanticscholar.org/94e2/59345e82fa3015a381d6e91ec6cded3971b4.pdf
+0efdd82a4753a8309ff0a3c22106c570d8a84c20,http://pdfs.semanticscholar.org/0efd/d82a4753a8309ff0a3c22106c570d8a84c20.pdf
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,http://pdfs.semanticscholar.org/0e5d/cc6ae52625fd0637c6bba46a973e46d58b9c.pdf
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,https://people.cs.umass.edu/~smaji/presentations/similarity-poster-cvpr14.pdf
+0ed0e48b245f2d459baa3d2779bfc18fee04145b,http://pdfs.semanticscholar.org/0ed0/e48b245f2d459baa3d2779bfc18fee04145b.pdf
+0eac652139f7ab44ff1051584b59f2dc1757f53b,http://pdfs.semanticscholar.org/0eac/652139f7ab44ff1051584b59f2dc1757f53b.pdf
+0ef96d97365899af797628e80f8d1020c4c7e431,http://media.adelaide.edu.au/acvt/Publications/2006/2006-Improving%20the%20Speed%20of%20Kernel%20PCA%20on%20Large%20Scale%20Datasets.pdf
+0e7f277538142fb50ce2dd9179cffdc36b794054,http://nb.vse.cz/~svatek/mdm08.pdf
+0e8760fc198a7e7c9f4193478c0e0700950a86cd,http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,http://pdfs.semanticscholar.org/0ec0/fc9ed165c40b1ef4a99e944abd8aa4e38056.pdf
+0e652a99761d2664f28f8931fee5b1d6b78c2a82,http://pdfs.semanticscholar.org/0e65/2a99761d2664f28f8931fee5b1d6b78c2a82.pdf
+0e50fe28229fea45527000b876eb4068abd6ed8c,http://pdfs.semanticscholar.org/0e50/fe28229fea45527000b876eb4068abd6ed8c.pdf
+0eff410cd6a93d0e37048e236f62e209bc4383d1,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICRA_2010/data/papers/0516.pdf
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,https://arxiv.org/pdf/1711.06055v1.pdf
+0ee661a1b6bbfadb5a482ec643573de53a9adf5e,http://epubs.surrey.ac.uk/812523/1/yunlian_TIFS2014.pdf
+0e36ada8cb9c91f07c9dcaf196d036564e117536,http://pdfs.semanticscholar.org/d0d5/aa7f797113c825053f4c4fd3772dc3601139.pdf
+0e986f51fe45b00633de9fd0c94d082d2be51406,http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,http://pdfs.semanticscholar.org/0ebc/50b6e4b01eb5eba5279ce547c838890b1418.pdf
+0e49a23fafa4b2e2ac097292acf00298458932b4,http://pdfs.semanticscholar.org/0e49/a23fafa4b2e2ac097292acf00298458932b4.pdf
+0ec1673609256b1e457f41ede5f21f05de0c054f,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d025.pdf
+0e3840ea3227851aaf4633133dd3cbf9bbe89e5b,http://pdfs.semanticscholar.org/8d59/98cd984e7cce307da7d46f155f9db99c6590.pdf
+0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a,http://www.openu.ac.il/home/hassner/projects/LATCH/LATCH.pdf
+0ea38a5ba0c8739d1196da5d20efb13406bb6550,https://filebox.ece.vt.edu/~parikh/Publications/ParikhGrauman_ICCV2011_relative.pdf
+0e21c9e5755c3dab6d8079d738d1188b03128a31,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wu_Constrained_Clustering_and_2013_CVPR_paper.pdf
+0e677f2b798f5c1f7143ba983467321a7851565a,http://www.cse.iitk.ac.in/users/rahulaaj/papers/BillyYL.pdf
+0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698,http://pdfs.semanticscholar.org/0e78/af9bd0f9a0ce4ceb5f09f24bc4e4823bd698.pdf
+0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5,http://ca.cs.cmu.edu/sites/default/files/9MMED_CVPR12.pdf
+0e7c70321462694757511a1776f53d629a1b38f3,http://pdfs.semanticscholar.org/0e7c/70321462694757511a1776f53d629a1b38f3.pdf
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Fast_Subspace_Search_2013_ICCV_paper.pdf
+0ec67c69e0975cfcbd8ba787cc0889aec4cc5399,http://pdfs.semanticscholar.org/1af3/6a1fc18328e2a0310bc4208ef35ba882bdc1.pdf
+0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64,http://mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm1039.pdf
+0e1a18576a7d3b40fe961ef42885101f4e2630f8,http://pdfs.semanticscholar.org/0e1a/18576a7d3b40fe961ef42885101f4e2630f8.pdf
+6080f26675e44f692dd722b61905af71c5260af8,https://arxiv.org/pdf/1603.05073v1.pdf
+60a006bdfe5b8bf3243404fae8a5f4a9d58fa892,http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/CVPR10_FaceReco.pdf
+600025c9a13ff09c6d8b606a286a79c823d89db8,http://pdfs.semanticscholar.org/6000/25c9a13ff09c6d8b606a286a79c823d89db8.pdf
+60d765f2c0a1a674b68bee845f6c02741a49b44e,http://pdfs.semanticscholar.org/60d7/65f2c0a1a674b68bee845f6c02741a49b44e.pdf
+60d4cef56efd2f5452362d4d9ac1ae05afa970d1,http://pdfs.semanticscholar.org/60d4/cef56efd2f5452362d4d9ac1ae05afa970d1.pdf
+60ce4a9602c27ad17a1366165033fe5e0cf68078,http://pdfs.semanticscholar.org/60ce/4a9602c27ad17a1366165033fe5e0cf68078.pdf
+6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf,http://arxiv.org/pdf/1512.05300v3.pdf
+60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09,https://arxiv.org/pdf/1706.03947v1.pdf
+60970e124aa5fb964c9a2a5d48cd6eee769c73ef,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Tierney_Subspace_Clustering_for_2014_CVPR_paper.pdf
+60efdb2e204b2be6701a8e168983fa666feac1be,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01387.pdf
+60824ee635777b4ee30fcc2485ef1e103b8e7af9,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-TIP-2015.pdf
+60643bdab1c6261576e6610ea64ea0c0b200a28d,http://pdfs.semanticscholar.org/6064/3bdab1c6261576e6610ea64ea0c0b200a28d.pdf
+60a20d5023f2bcc241eb9e187b4ddece695c2b9b,http://pdfs.semanticscholar.org/60a2/0d5023f2bcc241eb9e187b4ddece695c2b9b.pdf
+60cdcf75e97e88638ec973f468598ae7f75c59b4,http://www.cse.cuhk.edu.hk/~lyu/paper_pdf/tmm08face.pdf
+60040e4eae81ab6974ce12f1c789e0c05be00303,http://pdfs.semanticscholar.org/6004/0e4eae81ab6974ce12f1c789e0c05be00303.pdf
+60b3601d70f5cdcfef9934b24bcb3cc4dde663e7,http://pdfs.semanticscholar.org/60b3/601d70f5cdcfef9934b24bcb3cc4dde663e7.pdf
+60737db62fb5fab742371709485e4b2ddf64b7b2,http://dbgroup.cs.tsinghua.edu.cn/ligl/papers/p307-weng.pdf
+60496b400e70acfbbf5f2f35b4a49de2a90701b5,http://pdfs.semanticscholar.org/6049/6b400e70acfbbf5f2f35b4a49de2a90701b5.pdf
+60bffecd79193d05742e5ab8550a5f89accd8488,http://pdfs.semanticscholar.org/60bf/fecd79193d05742e5ab8550a5f89accd8488.pdf
+601834a4150e9af028df90535ab61d812c45082c,http://pdfs.semanticscholar.org/6018/34a4150e9af028df90535ab61d812c45082c.pdf
+346dbc7484a1d930e7cc44276c29d134ad76dc3f,http://pdfs.semanticscholar.org/346d/bc7484a1d930e7cc44276c29d134ad76dc3f.pdf
+34a41ec648d082270697b9ee264f0baf4ffb5c8d,http://pdfs.semanticscholar.org/34a4/1ec648d082270697b9ee264f0baf4ffb5c8d.pdf
+34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c,http://pdfs.semanticscholar.org/9e97/360b519d9912ded55618ccbb000d74d8e35c.pdf
+34bb11bad04c13efd575224a5b4e58b9249370f3,http://cs.nju.edu.cn/wujx/paper/CVPR2014_Action.pdf
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,http://pdfs.semanticscholar.org/3411/ef1ff5ad11e45106f7863e8c7faf563f4ee1.pdf
+34d484b47af705e303fc6987413dc0180f5f04a9,http://pdfs.semanticscholar.org/34d4/84b47af705e303fc6987413dc0180f5f04a9.pdf
+346166da1a49e531923294300a731167e1436d5b,http://lear.inrialpes.fr/people/mpederso/papers/3DV14.pdf
+345bea5f7d42926f857f395c371118a00382447f,http://grail.cs.washington.edu/wp-content/uploads/2016/09/kemelmacher2016tp.pdf
+3403cb92192dc6b2943d8dbfa8212cc65880159e,http://pdfs.semanticscholar.org/3403/cb92192dc6b2943d8dbfa8212cc65880159e.pdf
+3463f12ad434d256cd5f94c1c1bfd2dd6df36947,http://pdfs.semanticscholar.org/3463/f12ad434d256cd5f94c1c1bfd2dd6df36947.pdf
+346c9100b2fab35b162d7779002c974da5f069ee,http://cmlab.csie.ntu.edu.tw/~yanying/paper/p651-lei.pdf
+34863ecc50722f0972e23ec117f80afcfe1411a9,http://nlpr-web.ia.ac.cn/2010papers/kz/gh3.pdf
+34b7e826db49a16773e8747bc8dfa48e344e425d,http://www.comp.leeds.ac.uk/me/Publications/cvpr09_bsl.pdf
+34c594abba9bb7e5813cfae830e2c4db78cf138c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_047_ext.pdf
+34108098e1a378bc15a5824812bdf2229b938678,http://pdfs.semanticscholar.org/3410/8098e1a378bc15a5824812bdf2229b938678.pdf
+341ed69a6e5d7a89ff897c72c1456f50cfb23c96,http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf
+348a16b10d140861ece327886b85d96cce95711e,http://pdfs.semanticscholar.org/348a/16b10d140861ece327886b85d96cce95711e.pdf
+3419af6331e4099504255a38de6f6b7b3b1e5c14,http://pdfs.semanticscholar.org/3419/af6331e4099504255a38de6f6b7b3b1e5c14.pdf
+34c8de02a5064e27760d33b861b7e47161592e65,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Han_Video_Action_Recognition_CVPR_2017_paper.pdf
+340d1a9852747b03061e5358a8d12055136599b0,http://pdfs.semanticscholar.org/340d/1a9852747b03061e5358a8d12055136599b0.pdf
+34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd,http://pdfs.semanticscholar.org/34cc/dec6c3f1edeeecae6a8f92e8bdb290ce40fd.pdf
+34b42bcf84d79e30e26413f1589a9cf4b37076f9,http://pdfs.semanticscholar.org/34b4/2bcf84d79e30e26413f1589a9cf4b37076f9.pdf
+5aafca76dbbbbaefd82f5f0265776afb5320dafe,http://pdfs.semanticscholar.org/5aaf/ca76dbbbbaefd82f5f0265776afb5320dafe.pdf
+5a93f9084e59cb9730a498ff602a8c8703e5d8a5,http://pdfs.semanticscholar.org/5a93/f9084e59cb9730a498ff602a8c8703e5d8a5.pdf
+5a87bc1eae2ec715a67db4603be3d1bb8e53ace2,http://pdfs.semanticscholar.org/5a87/bc1eae2ec715a67db4603be3d1bb8e53ace2.pdf
+5aad56cfa2bac5d6635df4184047e809f8fecca2,http://chenlab.ece.cornell.edu/people/Amir/publications/picture_password.pdf
+5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0,http://pdfs.semanticscholar.org/5a8c/a0cfad32f04449099e2e3f3e3a1c8f6541c0.pdf
+5ac80e0b94200ee3ecd58a618fe6afd077be0a00,http://pdfs.semanticscholar.org/5ac8/0e0b94200ee3ecd58a618fe6afd077be0a00.pdf
+5a5f0287484f0d480fed1ce585dbf729586f0edc,http://www.researchgate.net/profile/Mohammad_Mahoor/publication/248703363_DISFA_A_Spontaneous_Facial_Action_Intensity_Database/links/0c960520903b2b8153000000.pdf
+5aadd85e2a77e482d44ac2a215c1f21e4a30d91b,http://pdfs.semanticscholar.org/5aad/d85e2a77e482d44ac2a215c1f21e4a30d91b.pdf
+5a34a9bb264a2594c02b5f46b038aa1ec3389072,http://www.mpi-inf.mpg.de/fileadmin/inf/d2/akata/TPAMI2487986.pdf
+5a4c6246758c522f68e75491eb65eafda375b701,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0001118.pdf
+5aad5e7390211267f3511ffa75c69febe3b84cc7,http://pdfs.semanticscholar.org/5aad/5e7390211267f3511ffa75c69febe3b84cc7.pdf
+5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372,http://pdfs.semanticscholar.org/5a02/9a0b0ae8ae7fc9043f0711b7c0d442bfd372.pdf
+5ae970294aaba5e0225122552c019eb56f20af74,http://pdfs.semanticscholar.org/5ae9/70294aaba5e0225122552c019eb56f20af74.pdf
+5a86842ab586de9d62d5badb2ad8f4f01eada885,http://pdfs.semanticscholar.org/5a86/842ab586de9d62d5badb2ad8f4f01eada885.pdf
+5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0,http://pdfs.semanticscholar.org/c173/fa4456941b9c40d53d656b8ad84d24c16ec3.pdf
+5a7520380d9960ff3b4f5f0fe526a00f63791e99,http://arxiv.org/pdf/1512.00932v1.pdf
+5f871838710a6b408cf647aacb3b198983719c31,http://www.jdl.ac.cn/user/xlchen/Paper/TIP07b.pdf
+5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9,http://pdfs.semanticscholar.org/e1dd/1c4de149c6b05eedd1728d57a18a074b9b2a.pdf
+5f344a4ef7edfd87c5c4bc531833774c3ed23542,http://pdfs.semanticscholar.org/5f34/4a4ef7edfd87c5c4bc531833774c3ed23542.pdf
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,http://www.public.asu.edu/~bli24/Papers/ICPR2016_video2vec.pdf
+5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a,http://pdfs.semanticscholar.org/5fa0/e6da81acece7026ac1bc6dcdbd8b204a5f0a.pdf
+5feb1341a49dd7a597f4195004fe9b59f67e6707,http://pdfs.semanticscholar.org/5feb/1341a49dd7a597f4195004fe9b59f67e6707.pdf
+5f57a1a3a1e5364792b35e8f5f259f92ad561c1f,http://pdfs.semanticscholar.org/5f57/a1a3a1e5364792b35e8f5f259f92ad561c1f.pdf
+5fa932be4d30cad13ea3f3e863572372b915bec8,http://pdfs.semanticscholar.org/5fa9/32be4d30cad13ea3f3e863572372b915bec8.pdf
+5f5906168235613c81ad2129e2431a0e5ef2b6e4,https://arxiv.org/pdf/1601.00199v1.pdf
+5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c,http://pdfs.semanticscholar.org/5fb5/d9389e2a2a4302c81bcfc068a4c8d4efe70c.pdf
+5f1dcaff475ef18a2ecec0e114a9849a0a8002b9,http://pdfs.semanticscholar.org/5f1d/caff475ef18a2ecec0e114a9849a0a8002b9.pdf
+5f676d6eca4c72d1a3f3acf5a4081c29140650fb,http://www.cs.ucr.edu/~mkafai/papers/Paper_fg.pdf
+5fc664202208aaf01c9b62da5dfdcd71fdadab29,http://pdfs.semanticscholar.org/5fc6/64202208aaf01c9b62da5dfdcd71fdadab29.pdf
+5fac62a3de11125fc363877ba347122529b5aa50,http://openaccess.thecvf.com/content_ICCV_2017/papers/Saha_AMTnet_Action-Micro-Tube_Regression_ICCV_2017_paper.pdf
+5fa1724a79a9f7090c54925f6ac52f1697d6b570,http://pdfs.semanticscholar.org/5fa1/724a79a9f7090c54925f6ac52f1697d6b570.pdf
+5fba1b179ac80fee80548a0795d3f72b1b6e49cd,http://pdfs.semanticscholar.org/fe88/e30cfca9161b598ea8a26985df5832259924.pdf
+33f7e78950455c37236b31a6318194cfb2c302a4,http://pdfs.semanticscholar.org/33f7/e78950455c37236b31a6318194cfb2c302a4.pdf
+33ac7fd3a622da23308f21b0c4986ae8a86ecd2b,http://pdfs.semanticscholar.org/33ac/7fd3a622da23308f21b0c4986ae8a86ecd2b.pdf
+33030c23f6e25e30b140615bb190d5e1632c3d3b,http://pdfs.semanticscholar.org/3303/0c23f6e25e30b140615bb190d5e1632c3d3b.pdf
+33ba256d59aefe27735a30b51caf0554e5e3a1df,http://pdfs.semanticscholar.org/33ba/256d59aefe27735a30b51caf0554e5e3a1df.pdf
+33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13,http://pdfs.semanticscholar.org/33c3/702b0eee6fc26fc49f79f9133f3dd7fa3f13.pdf
+33aff42530c2fd134553d397bf572c048db12c28,http://openaccess.thecvf.com/content_iccv_2015/papers/Ruiz_From_Emotions_to_ICCV_2015_paper.pdf
+33a1a049d15e22befc7ddefdd3ae719ced8394bf,http://pdfs.semanticscholar.org/33a1/a049d15e22befc7ddefdd3ae719ced8394bf.pdf
+33ec047f1084e290c8a6f516bc75345b6bcf02a0,https://www.researchgate.net/profile/Peter_Corcoran/publication/220168274_Smart_Cameras_2D_Affine_Models_for_Determining_Subject_Facial_Expressions/links/02bfe5118f52d3d59d000000.pdf
+334e65b31ad51b1c1f84ce12ef235096395f1ca7,http://pdfs.semanticscholar.org/334e/65b31ad51b1c1f84ce12ef235096395f1ca7.pdf
+3399f8f0dff8fcf001b711174d29c9d4fde89379,http://pdfs.semanticscholar.org/3399/f8f0dff8fcf001b711174d29c9d4fde89379.pdf
+333aa36e80f1a7fa29cf069d81d4d2e12679bc67,http://pdfs.semanticscholar.org/333a/a36e80f1a7fa29cf069d81d4d2e12679bc67.pdf
+3312eb79e025b885afe986be8189446ba356a507,http://pdfs.semanticscholar.org/6007/292075f8a8538fa6f4c3d7a8676a595ab1f4.pdf
+33792bb27ef392973e951ca5a5a3be4a22a0d0c6,http://plaza.ufl.edu/xsshi2015/paper_list/TPAMI2016.pdf
+3328674d71a18ed649e828963a0edb54348ee598,http://ai.pku.edu.cn/application/files/1415/1124/8089/A_face_and_palmprint_recognition_approach_based_on_discriminant_DCT_feature_extraction.pdf
+339937141ffb547af8e746718fbf2365cc1570c8,http://pdfs.semanticscholar.org/3399/37141ffb547af8e746718fbf2365cc1570c8.pdf
+33402ee078a61c7d019b1543bb11cc127c2462d2,http://users.cecs.anu.edu.au/~sgould/papers/cvpr17-ooo.pdf
+33ae696546eed070717192d393f75a1583cd8e2c,https://arxiv.org/pdf/1708.08508v2.pdf
+33554ff9d1d3b32f67020598320d3d761d7ec81f,http://pdfs.semanticscholar.org/3355/4ff9d1d3b32f67020598320d3d761d7ec81f.pdf
+33f2b44742cc828347ccc5ec488200c25838b664,http://pdfs.semanticscholar.org/33f2/b44742cc828347ccc5ec488200c25838b664.pdf
+3393459600368be2c4c9878a3f65a57dcc0c2cfa,http://pdfs.semanticscholar.org/3393/459600368be2c4c9878a3f65a57dcc0c2cfa.pdf
+3327e21b46434f6441018922ef31bddba6cc8176,http://www.metaio.com/fileadmin/upload/research_files/paper/ISMAR2014_Real-Time_Illumination_Estimation_from_Faces_for_Coherent_Rendering_paper.pdf
+334d6c71b6bce8dfbd376c4203004bd4464c2099,http://pdfs.semanticscholar.org/ebbf/a07476257e1b7f4e259b29531a12eab575bd.pdf
+33695e0779e67c7722449e9a3e2e55fde64cfd99,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_064_ext.pdf
+334ac2a459190b41923be57744aa6989f9a54a51,http://pdfs.semanticscholar.org/334a/c2a459190b41923be57744aa6989f9a54a51.pdf
+33e20449aa40488c6d4b430a48edf5c4b43afdab,http://mplab.ucsd.edu/wordpress/wp-content/uploads/EngagementRecognitionFinal.pdf
+333e7ad7f915d8ee3bb43a93ea167d6026aa3c22,http://www.eurecom.fr/en/publication/4277/download/mm-publi-4277.pdf
+334166a942acb15ccc4517cefde751a381512605,http://pdfs.semanticscholar.org/3341/66a942acb15ccc4517cefde751a381512605.pdf
+33403e9b4bbd913ae9adafc6751b52debbd45b0e,http://pdfs.semanticscholar.org/3340/3e9b4bbd913ae9adafc6751b52debbd45b0e.pdf
+33ad23377eaead8955ed1c2b087a5e536fecf44e,http://vis-www.cs.umass.edu/papers/gloc_cvpr13.pdf
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,http://pdfs.semanticscholar.org/7a49/4b4489408ec3adea15817978ecd2e733f5fe.pdf
+054756fa720bdcf1d320ad7a353e54ca53d4d3af,http://www.stat.ucla.edu/~yuille/Pubs15/JianyuWangSemanticCVPR2015%20(1).pdf
+05b8673d810fadf888c62b7e6c7185355ffa4121,https://nannanwang.github.io/My_Papers/IJCV2013.pdf
+056d5d942084428e97c374bb188efc386791e36d,http://pdfs.semanticscholar.org/056d/5d942084428e97c374bb188efc386791e36d.pdf
+05e658fed4a1ce877199a4ce1a8f8cf6f449a890,http://pdfs.semanticscholar.org/05e6/58fed4a1ce877199a4ce1a8f8cf6f449a890.pdf
+05ad478ca69b935c1bba755ac1a2a90be6679129,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Turakhia_Attribute_Dominance_What_2013_ICCV_paper.pdf
+0595d18e8d8c9fb7689f636341d8a55cc15b3e6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_102.pdf
+0573f3d2754df3a717368a6cbcd940e105d67f0b,http://cs.anu.edu.au/few/EmotiW_icmi_draft_ver_1_0.pdf
+05a0d04693b2a51a8131d195c68ad9f5818b2ce1,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf
+0562fc7eca23d47096472a1d42f5d4d086e21871,http://pdfs.semanticscholar.org/0562/fc7eca23d47096472a1d42f5d4d086e21871.pdf
+054738ce39920975b8dcc97e01b3b6cc0d0bdf32,http://ita.ucsd.edu/workshop/16/files/paper/paper_2663.pdf
+05bcc5235721fd6a465a63774d28720bacc60858,http://www.site.uottawa.ca/~fshi098/papers/Gradient_Boundary_Histograms_for_Action_Recognition.pdf
+05e03c48f32bd89c8a15ba82891f40f1cfdc7562,http://files.is.tue.mpg.de/black/papers/rgapami.pdf
+05a312478618418a2efb0a014b45acf3663562d7,http://people.ee.duke.edu/~lcarin/AccelGibbs.pdf
+056ba488898a1a1b32daec7a45e0d550e0c51ae4,http://pdfs.semanticscholar.org/056b/a488898a1a1b32daec7a45e0d550e0c51ae4.pdf
+050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371,http://www.springer.com/cda/content/document/cda_downloaddocument/9783319587707-t1.pdf?SGWID=0-0-45-1607395-p180855259
+0509c442550571907258f07aad9da9d00b1e468b,https://pdfs.semanticscholar.org/0509/c442550571907258f07aad9da9d00b1e468b.pdf
+056294ff40584cdce81702b948f88cebd731a93e,https://arxiv.org/pdf/1506.08438v3.pdf
+052880031be0a760a5b606b2ad3d22f237e8af70,http://pdfs.semanticscholar.org/0528/80031be0a760a5b606b2ad3d22f237e8af70.pdf
+055de0519da7fdf27add848e691087e0af166637,http://pdfs.semanticscholar.org/d3f9/cf3fb66326e456587acb18cf3196d1e314c7.pdf
+0515e43c92e4e52254a14660718a9e498bd61cf5,http://pdfs.semanticscholar.org/3a78/5f86c2109fe1ff242dcb26211abfb9b0a870.pdf
+053c2f592a7f153e5f3746aa5ab58b62f2cf1d21,http://pdfs.semanticscholar.org/053c/2f592a7f153e5f3746aa5ab58b62f2cf1d21.pdf
+0568fc777081cbe6de95b653644fec7b766537b2,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Liu_Learning_Expressionlets_on_2014_CVPR_paper.pdf
+05d80c59c6fcc4652cfc38ed63d4c13e2211d944,http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en/us/pubs/archive/35389.pdf
+05ea7930ae26165e7e51ff11b91c7aa8d7722002,http://www.stat.ucla.edu/~sczhu/papers/PAMI_car_occlusion_AOG.pdf
+055530f7f771bb1d5f352e2758d1242408d34e4d,http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf
+050eda213ce29da7212db4e85f948b812a215660,http://pdfs.semanticscholar.org/b598/4a1044d72224f99e959746a452fc1927a257.pdf
+051a84f0e39126c1ebeeb379a405816d5d06604d,http://static.springer.com/sgw/documents/1348632/application/pdf/Cognitive+Computation_Biometric+Recognition+Performing+in+a+Bioinspired+System.pdf
+05e3acc8afabc86109d8da4594f3c059cf5d561f,https://www.cs.rochester.edu/u/cxu22/p/cvpr2016_a2s2_poster.pdf
+05f4d907ee2102d4c63a3dc337db7244c570d067,http://pdfs.semanticscholar.org/3c52/2c9707eb795e0dba69202f1ec946a9072661.pdf
+0559fb9f5e8627fecc026c8ee6f7ad30e54ee929,http://pdfs.semanticscholar.org/0559/fb9f5e8627fecc026c8ee6f7ad30e54ee929.pdf
+05a7be10fa9af8fb33ae2b5b72d108415519a698,http://jankautz.com/publications/MMFusion4Video_ACMM16.pdf
+05318a267226f6d855d83e9338eaa9e718b2a8dd,https://fruct.org/publications/fruct16/files/Khr.pdf
+057d5f66a873ec80f8ae2603f937b671030035e6,http://cs.stanford.edu/~roozbeh/papers/Mottaghi16cvpr_a.pdf
+05c91e8a29483ced50c5f2d869617b80f7dacdd9,http://www.cs.rochester.edu/~mehoque/Publications/2013/13.Hoque-etal-MACH-UbiComp.pdf
+0580edbd7865414c62a36da9504d1169dea78d6f,https://arxiv.org/pdf/1611.04251v1.pdf
+050a3346e44ca720a54afbf57d56b1ee45ffbe49,https://www.d2.mpi-inf.mpg.de/sites/default/files/cvpr16.pdf
+0517d08da7550241fb2afb283fc05d37fce5d7b7,http://pdfs.semanticscholar.org/0517/d08da7550241fb2afb283fc05d37fce5d7b7.pdf
+05f3d1e9fb254b275354ca69018e9ed321dd8755,http://pdfs.semanticscholar.org/05f3/d1e9fb254b275354ca69018e9ed321dd8755.pdf
+05e96d76ed4a044d8e54ef44dac004f796572f1a,http://www.cs.ucsb.edu/~mturk/595/papers/BRONSTEIN.pdf
+051f03bc25ec633592aa2ff5db1d416b705eac6c,http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf
+05270b68547a2cd5bda302779cfc5dda876ae538,http://www.cs.sfu.ca/~mori/courses/cmpt882/fall05/papers/laplacianfaces.pdf
+9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6,http://pdfs.semanticscholar.org/9d8f/f782f68547cf72b7f3f3beda9dc3e8ecfce6.pdf
+9d42df42132c3d76e3447ea61e900d3a6271f5fe,http://pdfs.semanticscholar.org/9d42/df42132c3d76e3447ea61e900d3a6271f5fe.pdf
+9d55ec73cab779403cd933e6eb557fb04892b634,http://pdfs.semanticscholar.org/9d55/ec73cab779403cd933e6eb557fb04892b634.pdf
+9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2,http://pdfs.semanticscholar.org/9d8f/d639a7aeab0dd1bc6eef9d11540199fd6fe2.pdf
+9d357bbf014289fb5f64183c32aa64dc0bd9f454,http://pdfs.semanticscholar.org/9d35/7bbf014289fb5f64183c32aa64dc0bd9f454.pdf
+9d66de2a59ec20ca00a618481498a5320ad38481,http://www.cs.iit.edu/~xli/paper/Conf/POP-ICDCS15.pdf
+9d839dfc9b6a274e7c193039dfa7166d3c07040b,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00869.pdf
+9dcc6dde8d9f132577290d92a1e76b5decc6d755,http://pdfs.semanticscholar.org/a36a/3cd13c59777b6b07e41c4026e55b55e8096f.pdf
+9d36c81b27e67c515df661913a54a797cd1260bb,http://pdfs.semanticscholar.org/9d36/c81b27e67c515df661913a54a797cd1260bb.pdf
+9d757c0fede931b1c6ac344f67767533043cba14,http://pdfs.semanticscholar.org/9d75/7c0fede931b1c6ac344f67767533043cba14.pdf
+9d941a99e6578b41e4e32d57ece580c10d578b22,http://pdfs.semanticscholar.org/9d94/1a99e6578b41e4e32d57ece580c10d578b22.pdf
+9d60ad72bde7b62be3be0c30c09b7d03f9710c5f,http://pdfs.semanticscholar.org/9d60/ad72bde7b62be3be0c30c09b7d03f9710c5f.pdf
+9d896605fbf93315b68d4ee03be0770077f84e40,http://pdfs.semanticscholar.org/9d89/6605fbf93315b68d4ee03be0770077f84e40.pdf
+9d61b0beb3c5903fc3032655dc0fd834ec0b2af3,http://pdfs.semanticscholar.org/c5ac/a3f653e2e8a58888492524fc1480608457b7.pdf
+9d24179aa33a94c8c61f314203bf9e906d6b64de,http://www.decom.ufop.br/sibgrapi2012/eproceedings/technical/ts9/102146_3.pdf
+9d3aa3b7d392fad596b067b13b9e42443bbc377c,http://pdfs.semanticscholar.org/9d3a/a3b7d392fad596b067b13b9e42443bbc377c.pdf
+9d06d43e883930ddb3aa6fe57c6a865425f28d44,http://pdfs.semanticscholar.org/dd08/039eb271af93810ba392728ff481d8ce7496.pdf
+9c1305383ce2c108421e9f5e75f092eaa4a5aa3c,http://pdfs.semanticscholar.org/9c13/05383ce2c108421e9f5e75f092eaa4a5aa3c.pdf
+9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1,http://pdfs.semanticscholar.org/9cfb/3a68fb10a59ec2a6de1b24799bf9154a8fd1.pdf
+9c1860de6d6e991a45325c997bf9651c8a9d716f,http://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf
+9c9ef6a46fb6395702fad622f03ceeffbada06e5,http://pdfs.semanticscholar.org/f1e3/d1d26e39f98608037b195761f61fa7532925.pdf
+9c1cdb795fd771003da4378f9a0585730d1c3784,http://pdfs.semanticscholar.org/9c1c/db795fd771003da4378f9a0585730d1c3784.pdf
+9c25e89c80b10919865b9c8c80aed98d223ca0c6,http://pdfs.semanticscholar.org/9c25/e89c80b10919865b9c8c80aed98d223ca0c6.pdf
+9c7444c6949427994b430787a153d5cceff46d5c,http://pdfs.semanticscholar.org/9c74/44c6949427994b430787a153d5cceff46d5c.pdf
+9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6,http://pdfs.semanticscholar.org/9c78/1f7fd5d8168ddae1ce5bb4a77e3ca12b40b6.pdf
+9c373438285101d47ab9332cdb0df6534e3b93d1,http://pdfs.semanticscholar.org/9c37/3438285101d47ab9332cdb0df6534e3b93d1.pdf
+9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d,http://pdfs.semanticscholar.org/9cbb/6e42a35f26cf1d19f4875cd7f6953f10b95d.pdf
+9c4cc11d0df2de42d6593f5284cfdf3f05da402a,http://pdfs.semanticscholar.org/ce1a/f0e944260efced743f371ba0cb06878582b6.pdf
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,http://pdfs.semanticscholar.org/9cd6/a81a519545bf8aa9023f6e879521f85d4cd1.pdf
+9cadd166893f1b8aaecb27280a0915e6694441f5,http://pdfs.semanticscholar.org/9cad/d166893f1b8aaecb27280a0915e6694441f5.pdf
+02601d184d79742c7cd0c0ed80e846d95def052e,http://arxiv.org/pdf/1503.00488v3.pdf
+02cc96ad997102b7c55e177ac876db3b91b4e72c,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf
+02a98118ce990942432c0147ff3c0de756b4b76a,http://eprints.pascal-network.org/archive/00005029/01/LaptevMarszalekSchmidRozenfeld-CVPR08-HumanActions.pdf
+02e43d9ca736802d72824892c864e8cfde13718e,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/10075/shi%20Transferring%20a%20semantic%20representation%202015%20Accepted.pdf?sequence=1
+02fda07735bdf84554c193811ba4267c24fe2e4a,http://www.cbsr.ia.ac.cn/Li%20Group/papers/Li-IR-Face-PAMI-07.pdf
+02431ed90700d5cfe4e3d3a20f1e97de3e131569,http://www.di.ens.fr/~bojanowski/papers/bojanowski13finding.pdf
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,http://pdfs.semanticscholar.org/023e/d32ac3ea6029f09b8c582efbe3866de7d00a.pdf
+0241513eeb4320d7848364e9a7ef134a69cbfd55,http://videolectures.net/site/normal_dl/tag=71121/cvpr2010_yang_stis_01.v1.pdf
+02dd0af998c3473d85bdd1f77254ebd71e6158c6,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_PPP_Joint_Pointwise_CVPR_2016_paper.pdf
+0290523cabea481e3e147b84dcaab1ef7a914612,http://pdfs.semanticscholar.org/0290/523cabea481e3e147b84dcaab1ef7a914612.pdf
+0229829e9a1eed5769a2b5eccddcaa7cd9460b92,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_098_ext.pdf
+025720574ef67672c44ba9e7065a83a5d6075c36,http://pdfs.semanticscholar.org/915f/dd2fdc7880074bd1c1d596f7e7d19ab34e8f.pdf
+029317f260b3303c20dd58e8404a665c7c5e7339,http://www.nlpr.ia.ac.cn/2009papers/gjkw/gk32.pdf?origin=publication_detail
+026e4ee480475e63ae68570d73388f8dfd4b4cde,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf
+02e628e99f9a1b295458cb453c09863ea1641b67,http://pdfs.semanticscholar.org/02e6/28e99f9a1b295458cb453c09863ea1641b67.pdf
+0273414ba7d56ab9ff894959b9d46e4b2fef7fd0,http://pdfs.semanticscholar.org/3ae9/29d33dd1e6acdf6c907a1115e5a21f6cb076.pdf
+02e133aacde6d0977bca01ffe971c79097097b7f,http://pdfs.semanticscholar.org/02e1/33aacde6d0977bca01ffe971c79097097b7f.pdf
+02567fd428a675ca91a0c6786f47f3e35881bcbd,https://arxiv.org/pdf/1611.01731.pdf
+029b53f32079063047097fa59cfc788b2b550c4b,http://pdfs.semanticscholar.org/b71c/73fcae520f6a5cdbce18c813633fb3d66342.pdf
+02bd665196bd50c4ecf05d6852a4b9ba027cd9d0,http://arxiv.org/pdf/1310.2880v6.pdf
+02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7,http://members.e-inclusion.crim.ca/files/articles/CRV_2006.pdf
+021a19e240f0ae0554eff814e838e1e396be6572,http://ci2cv.net/static/papers/2009_ICCV_Saragih_2.pdf
+026b5b8062e5a8d86c541cfa976f8eee97b30ab8,http://www.iab-rubric.org/papers/deeplearningvideo-CR.pdf
+0235b2d2ae306b7755483ac4f564044f46387648,http://pdfs.semanticscholar.org/0235/b2d2ae306b7755483ac4f564044f46387648.pdf
+02467703b6e087799e04e321bea3a4c354c5487d,http://biometrics.cse.msu.edu/Publications/Face/AdamsAllenMillerKalkaJain_CVPRWB2016_GRPR.pdf
+02e39f23e08c2cb24d188bf0ca34141f3cc72d47,http://luks.fe.uni-lj.si/sl/osebje/vitomir/pub/ICASSP2010.pdf
+023be757b1769ecb0db810c95c010310d7daf00b,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf
+026a9cfe3135b7b62279bc08e2fb97e0e9fad5c4,http://perso.telecom-paristech.fr/~sahbi/jstars2017.pdf
+0278acdc8632f463232e961563e177aa8c6d6833,http://www.pitt.edu/~jeffcohn/biblio/TPAMI2547397%20FINAL.pdf
+0209389b8369aaa2a08830ac3b2036d4901ba1f1,https://arxiv.org/pdf/1612.01202v2.pdf
+02c993d361dddba9737d79e7251feca026288c9c,http://eprints.eemcs.utwente.nl/26377/01/Automatic_player_detection_and_recognition_in_images_using_AdaBoost.pdf
+02239ae5e922075a354169f75f684cad8fdfd5ab,http://ai2-website.s3.amazonaws.com/publications/CVPR_2017_Situation.pdf
+02d650d8a3a9daaba523433fbe93705df0a7f4b1,http://pdfs.semanticscholar.org/02d6/50d8a3a9daaba523433fbe93705df0a7f4b1.pdf
+0294f992f8dfd8748703f953925f9aee14e1b2a2,http://pdfs.semanticscholar.org/0294/f992f8dfd8748703f953925f9aee14e1b2a2.pdf
+02820c1491b10a1ff486fed32c269e4077c36551,https://arxiv.org/pdf/1610.07930v1.pdf
+a458b319f5a2763ff9c6dc959eefa77673c56671,http://people.tamu.edu/~amir.tahmasbi/publications/Fisher_ICCEA2010.pdf
+a46283e90bcdc0ee35c680411942c90df130f448,http://pdfs.semanticscholar.org/a462/83e90bcdc0ee35c680411942c90df130f448.pdf
+a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2,http://pdfs.semanticscholar.org/a4a5/ad6f1cc489427ac1021da7d7b70fa9a770f2.pdf
+a4876b7493d8110d4be720942a0f98c2d116d2a0,http://pdfs.semanticscholar.org/a487/6b7493d8110d4be720942a0f98c2d116d2a0.pdf
+a40f8881a36bc01f3ae356b3e57eac84e989eef0,http://pdfs.semanticscholar.org/a40f/8881a36bc01f3ae356b3e57eac84e989eef0.pdf
+a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3,http://pdfs.semanticscholar.org/a4a0/b5f08198f6d7ea2d1e81bd97fea21afe3fc3.pdf
+a46086e210c98dcb6cb9a211286ef906c580f4e8,http://pdfs.semanticscholar.org/dc94/43e3ae2fe70282b1b30e3eda3717b58c0808.pdf
+a44590528b18059b00d24ece4670668e86378a79,http://pdfs.semanticscholar.org/a445/90528b18059b00d24ece4670668e86378a79.pdf
+a472d59cff9d822f15f326a874e666be09b70cfd,http://pdfs.semanticscholar.org/a472/d59cff9d822f15f326a874e666be09b70cfd.pdf
+a4c430b7d849a8f23713dc283794d8c1782198b2,http://pdfs.semanticscholar.org/a4c4/30b7d849a8f23713dc283794d8c1782198b2.pdf
+a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2,http://pdfs.semanticscholar.org/a4cc/626da29ac48f9b4ed6ceb63081f6a4b304a2.pdf
+a4f37cfdde3af723336205b361aefc9eca688f5c,http://pdfs.semanticscholar.org/a4f3/7cfdde3af723336205b361aefc9eca688f5c.pdf
+a481e394f58f2d6e998aa320dad35c0d0e15d43c,http://www.cs.colostate.edu/~draper/papers/wigness_wacv14.pdf
+a30869c5d4052ed1da8675128651e17f97b87918,http://pdfs.semanticscholar.org/a308/69c5d4052ed1da8675128651e17f97b87918.pdf
+a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b,http://pdfs.semanticscholar.org/a3eb/acd8bcbc7ddbd5753935496e22a0f74dcf7b.pdf
+a3017bb14a507abcf8446b56243cfddd6cdb542b,http://pdfs.semanticscholar.org/a301/7bb14a507abcf8446b56243cfddd6cdb542b.pdf
+a3c8c7da177cd08978b2ad613c1d5cb89e0de741,http://pdfs.semanticscholar.org/a3c8/c7da177cd08978b2ad613c1d5cb89e0de741.pdf
+a378fc39128107815a9a68b0b07cffaa1ed32d1f,http://pdfs.semanticscholar.org/a378/fc39128107815a9a68b0b07cffaa1ed32d1f.pdf
+a34d75da87525d1192bda240b7675349ee85c123,http://pdfs.semanticscholar.org/a34d/75da87525d1192bda240b7675349ee85c123.pdf
+a3dc109b1dff3846f5a2cc1fe2448230a76ad83f,http://pdfs.semanticscholar.org/a3dc/109b1dff3846f5a2cc1fe2448230a76ad83f.pdf
+a3f684930c5c45fcb56a2b407d26b63879120cbf,http://pdfs.semanticscholar.org/a3f6/84930c5c45fcb56a2b407d26b63879120cbf.pdf
+a33f20773b46283ea72412f9b4473a8f8ad751ae,http://pdfs.semanticscholar.org/a33f/20773b46283ea72412f9b4473a8f8ad751ae.pdf
+a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7,http://pdfs.semanticscholar.org/a3a6/a6a2eb1d32b4dead9e702824375ee76e3ce7.pdf
+a32d4195f7752a715469ad99cb1e6ebc1a099de6,http://pdfs.semanticscholar.org/a32d/4195f7752a715469ad99cb1e6ebc1a099de6.pdf
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,http://pdfs.semanticscholar.org/a3d7/8bc94d99fdec9f44a7aa40c175d5a106f0b9.pdf
+a3eab933e1b3db1a7377a119573ff38e780ea6a3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0000838.pdf
+a308077e98a611a977e1e85b5a6073f1a9bae6f0,http://pdfs.semanticscholar.org/a308/077e98a611a977e1e85b5a6073f1a9bae6f0.pdf
+a35dd69d63bac6f3296e0f1d148708cfa4ba80f6,http://pdfs.semanticscholar.org/a35d/d69d63bac6f3296e0f1d148708cfa4ba80f6.pdf
+a3a34c1b876002e0393038fcf2bcb00821737105,http://pdfs.semanticscholar.org/a3a3/4c1b876002e0393038fcf2bcb00821737105.pdf
+a3f1db123ce1818971a57330d82901683d7c2b67,http://pdfs.semanticscholar.org/a3f1/db123ce1818971a57330d82901683d7c2b67.pdf
+a3a97bb5131e7e67316b649bbc2432aaa1a6556e,http://pdfs.semanticscholar.org/a3a9/7bb5131e7e67316b649bbc2432aaa1a6556e.pdf
+a35d3ba191137224576f312353e1e0267e6699a1,http://pdfs.semanticscholar.org/a35d/3ba191137224576f312353e1e0267e6699a1.pdf
+a3a2f3803bf403262b56ce88d130af15e984fff0,http://pdfs.semanticscholar.org/e538/e1f6557d2920b449249606f909b665fbb924.pdf
+b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae,http://pdfs.semanticscholar.org/b6d3/c8322d8e6a0212456cf38c6ef59c13d062dd.pdf
+b5cd9e5d81d14868f1a86ca4f3fab079f63a366d,https://ivi.fnwi.uva.nl/isis/publications/2016/AgharwalWCACV2016/AgharwalWCACV2016.pdf
+b5cd8151f9354ee38b73be1d1457d28e39d3c2c6,http://pdfs.semanticscholar.org/b5cd/8151f9354ee38b73be1d1457d28e39d3c2c6.pdf
+b5fc4f9ad751c3784eaf740880a1db14843a85ba,http://pdfs.semanticscholar.org/b5fc/4f9ad751c3784eaf740880a1db14843a85ba.pdf
+b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57,http://pdfs.semanticscholar.org/b506/aa23949b6d1f0c868ad03aaaeb5e5f7f6b57.pdf
+b599f323ee17f12bf251aba928b19a09bfbb13bb,http://pdfs.semanticscholar.org/b599/f323ee17f12bf251aba928b19a09bfbb13bb.pdf
+b5da4943c348a6b4c934c2ea7330afaf1d655e79,http://pdfs.semanticscholar.org/b5da/4943c348a6b4c934c2ea7330afaf1d655e79.pdf
+b5402c03a02b059b76be829330d38db8e921e4b5,http://pdfs.semanticscholar.org/b540/2c03a02b059b76be829330d38db8e921e4b5.pdf
+b5160e95192340c848370f5092602cad8a4050cd,http://pdfs.semanticscholar.org/dd71/dc78e75f0de27263d508b3a8b29921cfea03.pdf
+b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad,http://pdfs.semanticscholar.org/b52c/0faba5e1dc578a3c32a7f5cfb6fb87be06ad.pdf
+b56530be665b0e65933adec4cc5ed05840c37fc4,http://kobus.ca/research/publications/07/cvpr-07-region-www.pdf
+b5f4e617ac3fc4700ec8129fcd0dcf5f71722923,http://pdfs.semanticscholar.org/c4dd/f94ed445bad0793cd4ba2813506d02221ec0.pdf
+b52886610eda6265a2c1aaf04ce209c047432b6d,http://infolab.stanford.edu/~wangz/project/imsearch/Aesthetics/TAC16/xu.pdf
+b51b4ef97238940aaa4f43b20a861eaf66f67253,http://pdfs.semanticscholar.org/b51b/4ef97238940aaa4f43b20a861eaf66f67253.pdf
+b5d7c5aba7b1ededdf61700ca9d8591c65e84e88,http://pdfs.semanticscholar.org/b5d7/c5aba7b1ededdf61700ca9d8591c65e84e88.pdf
+b5c749f98710c19b6c41062c60fb605e1ef4312a,http://www.yugangjiang.info/publication/icmr15-eval2stream.pdf
+b5667d087aafcf6b91f3c77aa90cee1ac185f8f1,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICIP17.pdf
+b5857b5bd6cb72508a166304f909ddc94afe53e3,http://pdfs.semanticscholar.org/b585/7b5bd6cb72508a166304f909ddc94afe53e3.pdf
+b51e3d59d1bcbc023f39cec233f38510819a2cf9,http://pdfs.semanticscholar.org/b51e/3d59d1bcbc023f39cec233f38510819a2cf9.pdf
+b54c477885d53a27039c81f028e710ca54c83f11,http://coewww.rutgers.edu/riul/research/papers/pdf/skmspami.pdf
+b503f481120e69b62e076dcccf334ee50559451e,http://pdfs.semanticscholar.org/b503/f481120e69b62e076dcccf334ee50559451e.pdf
+b55d0c9a022874fb78653a0004998a66f8242cad,http://pdfs.semanticscholar.org/b55d/0c9a022874fb78653a0004998a66f8242cad.pdf
+b5930275813a7e7a1510035a58dd7ba7612943bc,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf
+b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88,http://pdfs.semanticscholar.org/b59c/8b44a568587bc1b61d130f0ca2f7a2ae3b88.pdf
+b249f10a30907a80f2a73582f696bc35ba4db9e2,http://pdfs.semanticscholar.org/f06d/6161eef9325285b32356e1c4b5527479eb9b.pdf
+b2a0e5873c1a8f9a53a199eecae4bdf505816ecb,http://pdfs.semanticscholar.org/b2a0/e5873c1a8f9a53a199eecae4bdf505816ecb.pdf
+b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf
+b216040f110d2549f61e3f5a7261cab128cab361,http://pdfs.semanticscholar.org/b216/040f110d2549f61e3f5a7261cab128cab361.pdf
+b261439b5cde39ec52d932a222450df085eb5a91,http://pdfs.semanticscholar.org/b261/439b5cde39ec52d932a222450df085eb5a91.pdf
+b234cd7788a7f7fa410653ad2bafef5de7d5ad29,http://pdfs.semanticscholar.org/b234/cd7788a7f7fa410653ad2bafef5de7d5ad29.pdf
+b235b4ccd01a204b95f7408bed7a10e080623d2e,http://pdfs.semanticscholar.org/b235/b4ccd01a204b95f7408bed7a10e080623d2e.pdf
+b29b42f7ab8d25d244bfc1413a8d608cbdc51855,http://pdfs.semanticscholar.org/b29b/42f7ab8d25d244bfc1413a8d608cbdc51855.pdf
+b2e5df82c55295912194ec73f0dca346f7c113f6,http://pdfs.semanticscholar.org/b2e5/df82c55295912194ec73f0dca346f7c113f6.pdf
+b2e6944bebab8e018f71f802607e6e9164ad3537,http://pdfs.semanticscholar.org/b2e6/944bebab8e018f71f802607e6e9164ad3537.pdf
+b2c25af8a8e191c000f6a55d5f85cf60794c2709,http://pdfs.semanticscholar.org/b2c2/5af8a8e191c000f6a55d5f85cf60794c2709.pdf
+b239a756f22201c2780e46754d06a82f108c1d03,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Fusion_FG_camera_ready.pdf
+b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e,http://pdfs.semanticscholar.org/b20c/fbb2348984b4e25b6b9174f3c7b65b6aed9e.pdf
+d961617db4e95382ba869a7603006edc4d66ac3b,http://pdfs.semanticscholar.org/d961/617db4e95382ba869a7603006edc4d66ac3b.pdf
+d9810786fccee5f5affaef59bc58d2282718af9b,http://pdfs.semanticscholar.org/d981/0786fccee5f5affaef59bc58d2282718af9b.pdf
+d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0003031.pdf
+d930ec59b87004fd172721f6684963e00137745f,http://pdfs.semanticscholar.org/d930/ec59b87004fd172721f6684963e00137745f.pdf
+d9739d1b4478b0bf379fe755b3ce5abd8c668f89,http://pdfs.semanticscholar.org/d973/9d1b4478b0bf379fe755b3ce5abd8c668f89.pdf
+d9c4586269a142faee309973e2ce8cde27bda718,http://pdfs.semanticscholar.org/d9c4/586269a142faee309973e2ce8cde27bda718.pdf
+d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa,http://pdfs.semanticscholar.org/d912/b8d88d63a2f0cb5d58164e7414bfa6b41dfa.pdf
+d9318c7259e394b3060b424eb6feca0f71219179,http://biometrics.cse.msu.edu/Publications/Face/ParkJainFaceSoftBio_TIFS10.pdf
+d9a1dd762383213741de4c1c1fd9fccf44e6480d,http://pdfs.semanticscholar.org/d9a1/dd762383213741de4c1c1fd9fccf44e6480d.pdf
+d963e640d0bf74120f147329228c3c272764932b,http://pdfs.semanticscholar.org/d963/e640d0bf74120f147329228c3c272764932b.pdf
+d95e6185f82e3ef3880a98122522eca8c8c3f34e,http://bbs.utdallas.edu/facelab/docs/4_05_otoole-pami.pdf
+d9ef1a80738bbdd35655c320761f95ee609b8f49,http://pdfs.semanticscholar.org/d9ef/1a80738bbdd35655c320761f95ee609b8f49.pdf
+d9327b9621a97244d351b5b93e057f159f24a21e,http://www.cil.pku.edu.cn/publications/papers/CS2010gusuicheng.pdf
+d915e634aec40d7ee00cbea96d735d3e69602f1a,http://pdfs.semanticscholar.org/d915/e634aec40d7ee00cbea96d735d3e69602f1a.pdf
+ac1d97a465b7cc56204af5f2df0d54f819eef8a6,http://pdfs.semanticscholar.org/ac1d/97a465b7cc56204af5f2df0d54f819eef8a6.pdf
+ac2e44622efbbab525d4301c83cb4d5d7f6f0e55,http://openaccess.thecvf.com/content_cvpr_2016/papers/Booth_A_3D_Morphable_CVPR_2016_paper.pdf
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,http://pdfs.semanticscholar.org/ac6c/3b3e92ff5fbcd8f7967696c7aae134bea209.pdf
+ac21c8aceea6b9495574f8f9d916e571e2fc497f,http://pdfs.semanticscholar.org/ac21/c8aceea6b9495574f8f9d916e571e2fc497f.pdf
+ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6,http://eprints.whiterose.ac.uk/104654/9/07289412.pdf
+aca273a9350b10b6e2ef84f0e3a327255207d0f5,http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf
+aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9,http://pdfs.semanticscholar.org/aca7/5c032cfb0b2eb4c0ae56f3d060d8875e43f9.pdf
+ac51d9ddbd462d023ec60818bac6cdae83b66992,http://pdfs.semanticscholar.org/ac51/d9ddbd462d023ec60818bac6cdae83b66992.pdf
+acc548285f362e6b08c2b876b628efceceeb813e,http://pdfs.semanticscholar.org/acc5/48285f362e6b08c2b876b628efceceeb813e.pdf
+ac820d67b313c38b9add05abef8891426edd5afb,http://pdfs.semanticscholar.org/da4e/76b789f7ea8ed6c6d26858ac8a12bb1413fe.pdf
+ac9a331327cceda4e23f9873f387c9fd161fad76,http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf
+ac12ba5bf81de83991210b4cd95b4ad048317681,http://pdfs.semanticscholar.org/ac12/ba5bf81de83991210b4cd95b4ad048317681.pdf
+ac75c662568cbb7308400cc002469a14ff25edfd,http://www.dsp.toronto.edu/juwei/Publication/JuweiICIP04v2.pdf
+ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea,http://pdfs.semanticscholar.org/ac9d/fbeb58d591b5aea13d13a83b1e23e7ef1fea.pdf
+acb83d68345fe9a6eb9840c6e1ff0e41fa373229,http://pdfs.semanticscholar.org/acb8/3d68345fe9a6eb9840c6e1ff0e41fa373229.pdf
+ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7,http://pdfs.semanticscholar.org/ade1/034d5daec9e3eba1d39ae3f33ebbe3e8e9a7.pdf
+ad8540379884ec03327076b562b63bc47e64a2c7,http://pdfs.semanticscholar.org/ad85/40379884ec03327076b562b63bc47e64a2c7.pdf
+adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Two_Birds_One_ICCV_2015_paper.pdf
+adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be,http://pdfs.semanticscholar.org/adf7/ccb81b8515a2d05fd3b4c7ce5adf5377d9be.pdf
+ada73060c0813d957576be471756fa7190d1e72d,http://pdfs.semanticscholar.org/ada7/3060c0813d957576be471756fa7190d1e72d.pdf
+add50a7d882eb38e35fe70d11cb40b1f0059c96f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_086_ext.pdf
+ad784332cc37720f03df1c576e442c9c828a587a,http://pdfs.semanticscholar.org/ad78/4332cc37720f03df1c576e442c9c828a587a.pdf
+ada42b99f882ba69d70fff68c9ccbaff642d5189,http://pdfs.semanticscholar.org/ba11/4dfdd12b0f4323a8f28cd2bd770dfa74673e.pdf
+ad6c7cc5c0f4ab273fef105ff3761d2c08609a20,https://people.cs.clemson.edu/~jzwang/1701863/mm2016/p1405-huo-ACM%20MM-Jing%20HUO-2016-10-19.pdf
+adfaf01773c8af859faa5a9f40fb3aa9770a8aa7,http://pdfs.semanticscholar.org/adfa/f01773c8af859faa5a9f40fb3aa9770a8aa7.pdf
+adf5caca605e07ee40a3b3408f7c7c92a09b0f70,http://pdfs.semanticscholar.org/adf5/caca605e07ee40a3b3408f7c7c92a09b0f70.pdf
+adaf2b138094981edd615dbfc4b7787693dbc396,http://pdfs.semanticscholar.org/adaf/2b138094981edd615dbfc4b7787693dbc396.pdf
+adc4bc7639d5f1c5ead8728882e2390339d061ed,https://www.researchgate.net/profile/Fanbo_Meng2/publication/224144294_Emotional_Audio-Visual_Speech_Synthesis_Based_on_PAD/links/00b49538fd61d3280d000000.pdf?origin=publication_list
+ad6745dd793073f81abd1f3246ba4102046da022,http://pdfs.semanticscholar.org/ad67/45dd793073f81abd1f3246ba4102046da022.pdf
+ad9cb522cc257e3c5d7f896fe6a526f6583ce46f,http://pdfs.semanticscholar.org/ad9c/b522cc257e3c5d7f896fe6a526f6583ce46f.pdf
+ad08c97a511091e0f59fc6a383615c0cc704f44a,http://pdfs.semanticscholar.org/ad08/c97a511091e0f59fc6a383615c0cc704f44a.pdf
+ad5a1621190d18dd429930ab5125c849ce7e4506,http://www.cs.csub.edu/~acruz/papers/10.1109-ICIP.2014.7025275.pdf
+ad37d01c4787d169daff7da52e80e2018aab6358,http://ibug.doc.ic.ac.uk/media/uploads/documents/bidirectional_newton_aam.pdf
+ad247138e751cefa3bb891c2fe69805da9c293d7,http://pdfs.semanticscholar.org/ad24/7138e751cefa3bb891c2fe69805da9c293d7.pdf
+ad75330953d9aacc05b5ca1a50c4fed3e7ca1e21,http://www.science.uva.nl/~asalah/dibeklioglu11design.pdf
+bbc4b376ebd296fb9848b857527a72c82828fc52,http://pdfs.semanticscholar.org/bbc4/b376ebd296fb9848b857527a72c82828fc52.pdf
+bb489e4de6f9b835d70ab46217f11e32887931a2,http://conteudo.icmc.usp.br/pessoas/moacir/p17sibgrapi-tutorial/2017-SIBGRAPI_Tutorial-Survey_Paper-Deep_Learning_for_Computer_Vision.pdf
+bba281fe9c309afe4e5cc7d61d7cff1413b29558,http://pdfs.semanticscholar.org/bba2/81fe9c309afe4e5cc7d61d7cff1413b29558.pdf
+bb557f4af797cae9205d5c159f1e2fdfe2d8b096,http://pdfs.semanticscholar.org/bb55/7f4af797cae9205d5c159f1e2fdfe2d8b096.pdf
+bb06ef67a49849c169781657be0bb717587990e0,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2017/papers/1570342773.pdf
+bb22104d2128e323051fb58a6fe1b3d24a9e9a46,http://pdfs.semanticscholar.org/bb22/104d2128e323051fb58a6fe1b3d24a9e9a46.pdf
+bbe1332b4d83986542f5db359aee1fd9b9ba9967,http://pdfs.semanticscholar.org/bbe1/332b4d83986542f5db359aee1fd9b9ba9967.pdf
+bbe949c06dc4872c7976950b655788555fe513b8,http://www.quaero.org/media/files/bibliographie/ekenel_automaticfrequency.pdf
+bbcb4920b312da201bf4d2359383fb4ee3b17ed9,http://pdfs.semanticscholar.org/bbcb/4920b312da201bf4d2359383fb4ee3b17ed9.pdf
+bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb,http://pdfs.semanticscholar.org/bb6b/f94bffc37ef2970410e74a6b6dc44a7f4feb.pdf
+bbf01aa347982592b3e4c9e4f433e05d30e71305,https://pdfs.semanticscholar.org/bbf0/1aa347982592b3e4c9e4f433e05d30e71305.pdf
+bbfe0527e277e0213aafe068113d719b2e62b09c,http://pdfs.semanticscholar.org/bbfe/0527e277e0213aafe068113d719b2e62b09c.pdf
+bbf1396eb826b3826c5a800975047beabde2f0de,http://pdfs.semanticscholar.org/bbf1/396eb826b3826c5a800975047beabde2f0de.pdf
+bb451dc2420e1a090c4796c19716f93a9ef867c9,http://pdfs.semanticscholar.org/bb45/1dc2420e1a090c4796c19716f93a9ef867c9.pdf
+bb69f750ccec9624f6dabd334251def2bbddf166,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Yuxiao.pdf
+bb750b4c485bc90a47d4b2f723be4e4b74229f7a,http://pdfs.semanticscholar.org/bb75/0b4c485bc90a47d4b2f723be4e4b74229f7a.pdf
+d73d2c9a6cef79052f9236e825058d5d9cdc1321,http://pdfs.semanticscholar.org/d73d/2c9a6cef79052f9236e825058d5d9cdc1321.pdf
+d794ffece3533567d838f1bd7f442afee13148fd,http://pdfs.semanticscholar.org/d794/ffece3533567d838f1bd7f442afee13148fd.pdf
+d78077a7aa8a302d4a6a09fb9737ab489ae169a6,http://pdfs.semanticscholar.org/d780/77a7aa8a302d4a6a09fb9737ab489ae169a6.pdf
+d7312149a6b773d1d97c0c2b847609c07b5255ec,http://pdfs.semanticscholar.org/d731/2149a6b773d1d97c0c2b847609c07b5255ec.pdf
+d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f,http://pdfs.semanticscholar.org/d7d9/c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f.pdf
+d708ce7103a992634b1b4e87612815f03ba3ab24,http://pdfs.semanticscholar.org/d708/ce7103a992634b1b4e87612815f03ba3ab24.pdf
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,http://research.cs.rutgers.edu/~linzhong/PDF/Lin_cvpr2012.pdf
+d78373de773c2271a10b89466fe1858c3cab677f,http://pdfs.semanticscholar.org/d783/73de773c2271a10b89466fe1858c3cab677f.pdf
+d78fbd11f12cbc194e8ede761d292dc2c02d38a2,http://pdfs.semanticscholar.org/d78f/bd11f12cbc194e8ede761d292dc2c02d38a2.pdf
+d72973a72b5d891a4c2d873daeb1bc274b48cddf,http://pdfs.semanticscholar.org/d729/73a72b5d891a4c2d873daeb1bc274b48cddf.pdf
+d7d166aee5369b79ea2d71a6edd73b7599597aaa,http://pdfs.semanticscholar.org/d7d1/66aee5369b79ea2d71a6edd73b7599597aaa.pdf
+d79f9ada35e4410cd255db39d7cc557017f8111a,http://pdfs.semanticscholar.org/d79f/9ada35e4410cd255db39d7cc557017f8111a.pdf
+d0e895a272d684a91c1b1b1af29747f92919d823,http://pdfs.semanticscholar.org/d0e8/95a272d684a91c1b1b1af29747f92919d823.pdf
+d082f35534932dfa1b034499fc603f299645862d,http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf
+d03265ea9200a993af857b473c6bf12a095ca178,http://pdfs.semanticscholar.org/d032/65ea9200a993af857b473c6bf12a095ca178.pdf
+d0ac9913a3b1784f94446db2f1fb4cf3afda151f,http://pdfs.semanticscholar.org/d0ac/9913a3b1784f94446db2f1fb4cf3afda151f.pdf
+d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0,http://pdfs.semanticscholar.org/d0eb/3fd1b1750242f3bb39ce9ac27fc8cc7c5af0.pdf
+d00c335fbb542bc628642c1db36791eae24e02b7,http://pdfs.semanticscholar.org/d00c/335fbb542bc628642c1db36791eae24e02b7.pdf
+d06c8e3c266fbae4026d122ec9bd6c911fcdf51d,http://pdfs.semanticscholar.org/d06c/8e3c266fbae4026d122ec9bd6c911fcdf51d.pdf
+d074b33afd95074d90360095b6ecd8bc4e5bb6a2,http://pdfs.semanticscholar.org/d074/b33afd95074d90360095b6ecd8bc4e5bb6a2.pdf
+d04d5692461d208dd5f079b98082eda887b62323,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/ZLEI-ICB-15.pdf
+d05513c754966801f26e446db174b7f2595805ba,http://pdfs.semanticscholar.org/d055/13c754966801f26e446db174b7f2595805ba.pdf
+d03baf17dff5177d07d94f05f5791779adf3cd5f,http://pdfs.semanticscholar.org/d03b/af17dff5177d07d94f05f5791779adf3cd5f.pdf
+d0a21f94de312a0ff31657fd103d6b29db823caa,http://pdfs.semanticscholar.org/d0a2/1f94de312a0ff31657fd103d6b29db823caa.pdf
+d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea,http://pdfs.semanticscholar.org/d03e/4e938bcbc25aa0feb83d8a0830f9cd3eb3ea.pdf
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,http://pdfs.semanticscholar.org/d0d7/671c816ed7f37b16be86fa792a1b29ddd79b.pdf
+d00787e215bd74d32d80a6c115c4789214da5edb,http://pdfs.semanticscholar.org/d007/87e215bd74d32d80a6c115c4789214da5edb.pdf
+be8c517406528edc47c4ec0222e2a603950c2762,http://pdfs.semanticscholar.org/be8c/517406528edc47c4ec0222e2a603950c2762.pdf
+beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2,http://pdfs.semanticscholar.org/beb3/fd2da7f8f3b0c3ebceaa2150a0e65736d1a2.pdf
+be86d88ecb4192eaf512f29c461e684eb6c35257,http://pdfs.semanticscholar.org/be86/d88ecb4192eaf512f29c461e684eb6c35257.pdf
+beb49072f5ba79ed24750108c593e8982715498e,http://pdfs.semanticscholar.org/beb4/9072f5ba79ed24750108c593e8982715498e.pdf
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,http://pdfs.semanticscholar.org/becd/5fd62f6301226b8e150e1a5ec3180f748ff8.pdf
+bebb8a97b2940a4e5f6e9d3caf6d71af21585eda,http://pdfs.semanticscholar.org/bebb/8a97b2940a4e5f6e9d3caf6d71af21585eda.pdf
+be07f2950771d318a78d2b64de340394f7d6b717,http://pdfs.semanticscholar.org/be07/f2950771d318a78d2b64de340394f7d6b717.pdf
+beb4546ae95f79235c5f3c0e9cc301b5d6fc9374,http://pdfs.semanticscholar.org/beb4/546ae95f79235c5f3c0e9cc301b5d6fc9374.pdf
+bec31269632c17206deb90cd74367d1e6586f75f,http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf
+be57d2aaab615ec8bc1dd2dba8bee41a4d038b85,https://www.cl.cam.ac.uk/~mmam3/pub/a19-mahmoud.pdf
+bef503cdfe38e7940141f70524ee8df4afd4f954,https://pdfs.semanticscholar.org/bef5/03cdfe38e7940141f70524ee8df4afd4f954.pdf
+beab10d1bdb0c95b2f880a81a747f6dd17caa9c2,http://pdfs.semanticscholar.org/beab/10d1bdb0c95b2f880a81a747f6dd17caa9c2.pdf
+b3b532e8ea6304446b1623e83b0b9a96968f926c,http://pdfs.semanticscholar.org/b3b5/32e8ea6304446b1623e83b0b9a96968f926c.pdf
+b37f57edab685dba5c23de00e4fa032a3a6e8841,http://pdfs.semanticscholar.org/b37f/57edab685dba5c23de00e4fa032a3a6e8841.pdf
+b3154d981eca98416074538e091778cbc031ca29,http://pdfs.semanticscholar.org/b315/4d981eca98416074538e091778cbc031ca29.pdf
+b340f275518aa5dd2c3663eed951045a5b8b0ab1,http://www.eecs.qmul.ac.uk/~sgg/papers/GongShanXiang_ACM_ICMI2007.pdf
+b375db63742f8a67c2a7d663f23774aedccc84e5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Conti_Brain-inspired_Classroom_Occupancy_2014_CVPR_paper.pdf
+b3330adb131fb4b6ebbfacce56f1aec2a61e0869,http://pdfs.semanticscholar.org/b333/0adb131fb4b6ebbfacce56f1aec2a61e0869.pdf
+b3c60b642a1c64699ed069e3740a0edeabf1922c,http://pdfs.semanticscholar.org/b3c6/0b642a1c64699ed069e3740a0edeabf1922c.pdf
+b3f3d6be11ace907c804c2d916830c85643e468d,http://pdfs.semanticscholar.org/b3f3/d6be11ace907c804c2d916830c85643e468d.pdf
+b3f7c772acc8bc42291e09f7a2b081024a172564,http://pdfs.semanticscholar.org/b3f7/c772acc8bc42291e09f7a2b081024a172564.pdf
+b3c398da38d529b907b0bac7ec586c81b851708f,http://www.cbsr.ia.ac.cn/publications/Stan/WHT-FG2004.pdf
+b3658514a0729694d86a8b89c875a66cde20480c,http://pdfs.semanticscholar.org/b365/8514a0729694d86a8b89c875a66cde20480c.pdf
+b3b4a7e29b9186e00d2948a1d706ee1605fe5811,http://pdfs.semanticscholar.org/b3b4/a7e29b9186e00d2948a1d706ee1605fe5811.pdf
+b32631f456397462b3530757f3a73a2ccc362342,http://pdfs.semanticscholar.org/b326/31f456397462b3530757f3a73a2ccc362342.pdf
+b33e8db8ccabdfc49211e46d78d09b14557d4cba,http://pdfs.semanticscholar.org/b33e/8db8ccabdfc49211e46d78d09b14557d4cba.pdf
+df8da144a695269e159fb0120bf5355a558f4b02,http://pdfs.semanticscholar.org/df8d/a144a695269e159fb0120bf5355a558f4b02.pdf
+dfd934ae448a1b8947d404b01303951b79b13801,http://pdfs.semanticscholar.org/dfd9/34ae448a1b8947d404b01303951b79b13801.pdf
+df0e280cae018cebd5b16ad701ad101265c369fa,http://pdfs.semanticscholar.org/df0e/280cae018cebd5b16ad701ad101265c369fa.pdf
+dfabe7ef245ca68185f4fcc96a08602ee1afb3f7,http://pdfs.semanticscholar.org/dfab/e7ef245ca68185f4fcc96a08602ee1afb3f7.pdf
+df51dfe55912d30fc2f792561e9e0c2b43179089,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1512.06009.pdf
+df054fa8ee6bb7d2a50909939d90ef417c73604c,http://pdfs.semanticscholar.org/df05/4fa8ee6bb7d2a50909939d90ef417c73604c.pdf
+df80fed59ffdf751a20af317f265848fe6bfb9c9,http://ivg.au.tsinghua.edu.cn/paper/2017_Learning%20deep%20sharable%20and%20structural%20detectors%20for%20face%20alignment.pdf
+dff838ba0567ef0a6c8fbfff9837ea484314efc6,http://pdfs.semanticscholar.org/dff8/38ba0567ef0a6c8fbfff9837ea484314efc6.pdf
+dfa80e52b0489bc2585339ad3351626dee1a8395,http://pdfs.semanticscholar.org/dfa8/0e52b0489bc2585339ad3351626dee1a8395.pdf
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,http://studentlife.cs.dartmouth.edu/facelogging.pdf
+dfb6aa168177d4685420fcb184def0aa7db7cddb,http://pdfs.semanticscholar.org/dfb6/aa168177d4685420fcb184def0aa7db7cddb.pdf
+df2841a1d2a21a0fc6f14fe53b6124519f3812f9,http://pdfs.semanticscholar.org/df28/41a1d2a21a0fc6f14fe53b6124519f3812f9.pdf
+df5fe0c195eea34ddc8d80efedb25f1b9034d07d,http://www.andrew.cmu.edu/user/kseshadr/BTAS_2009_Paper_IEEE.pdf
+df2494da8efa44d70c27abf23f73387318cf1ca8,http://pdfs.semanticscholar.org/df24/94da8efa44d70c27abf23f73387318cf1ca8.pdf
+df674dc0fc813c2a6d539e892bfc74f9a761fbc8,http://pdfs.semanticscholar.org/df67/4dc0fc813c2a6d539e892bfc74f9a761fbc8.pdf
+dad7b8be074d7ea6c3f970bd18884d496cbb0f91,http://pdfs.semanticscholar.org/dad7/b8be074d7ea6c3f970bd18884d496cbb0f91.pdf
+daf05febbe8406a480306683e46eb5676843c424,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Feng_Robust_Subspace_Segmentation_2014_CVPR_paper.pdf
+da15344a4c10b91d6ee2e9356a48cb3a0eac6a97,http://pdfs.semanticscholar.org/da15/344a4c10b91d6ee2e9356a48cb3a0eac6a97.pdf
+da5bfddcfe703ca60c930e79d6df302920ab9465,http://pdfs.semanticscholar.org/da5b/fddcfe703ca60c930e79d6df302920ab9465.pdf
+dac2103843adc40191e48ee7f35b6d86a02ef019,http://www.chennaisunday.com/2015DOTNET/Unsupervised%20Celebrity%20Face%20Naming%20in%20Web%20Videos.pdf
+dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e,http://pdfs.semanticscholar.org/dae4/20b776957e6b8cf5fbbacd7bc0ec226b3e2e.pdf
+daa02cf195818cbf651ef81941a233727f71591f,http://pdfs.semanticscholar.org/daa0/2cf195818cbf651ef81941a233727f71591f.pdf
+daa52dd09b61ee94945655f0dde216cce0ebd505,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yonetani_Recognizing_Micro-Actions_and_CVPR_2016_paper.pdf
+daba8f0717f3f47c272f018d0a466a205eba6395,https://pdfs.semanticscholar.org/daba/8f0717f3f47c272f018d0a466a205eba6395.pdf
+b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3,http://cs.adelaide.edu.au/~javen/pub/ShiLiShe10.pdf
+b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807,http://pdfs.semanticscholar.org/f269/c3573b39d26a5ad0754edb67a46ef57816c7.pdf
+b446bcd7fb78adfe346cf7a01a38e4f43760f363,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf
+b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172,https://arxiv.org/pdf/1802.00237v1.pdf
+b446cf353744a4b640af88d1848a1b958169c9f2,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553744.pdf
+b41374f4f31906cf1a73c7adda6c50a78b4eb498,http://isp.uv.es/papers/Laparra11.pdf
+b4d7ca26deb83cec1922a6964c1193e8dd7270e7,http://pdfs.semanticscholar.org/b4d7/ca26deb83cec1922a6964c1193e8dd7270e7.pdf
+b40290a694075868e0daef77303f2c4ca1c43269,http://pdfs.semanticscholar.org/b402/90a694075868e0daef77303f2c4ca1c43269.pdf
+b4362cd87ad219790800127ddd366cc465606a78,http://pdfs.semanticscholar.org/b436/2cd87ad219790800127ddd366cc465606a78.pdf
+b4f4b0d39fd10baec34d3412d53515f1a4605222,http://pdfs.semanticscholar.org/eaae/d23a2d94feb2f1c3ff22a25777c7a78f3141.pdf
+b43b6551ecc556557b63edb8b0dc39901ed0343b,http://pdfs.semanticscholar.org/b43b/6551ecc556557b63edb8b0dc39901ed0343b.pdf
+a255a54b8758050ea1632bf5a88a201cd72656e1,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Tamersoy_Nonparametric_Facial_Feature_2013_CVPR_paper.pdf
+a2b9cee7a3866eb2db53a7d81afda72051fe9732,http://pdfs.semanticscholar.org/a2b9/cee7a3866eb2db53a7d81afda72051fe9732.pdf
+a2d04db895dd17f2a8291b300a63604842c06d09,http://www4.comp.polyu.edu.hk/~csdct/Publications/2006/TCSVT.pdf
+a2bd81be79edfa8dcfde79173b0a895682d62329,http://pdfs.semanticscholar.org/a2bd/81be79edfa8dcfde79173b0a895682d62329.pdf
+a2eb90e334575d9b435c01de4f4bf42d2464effc,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu04b.pdf
+a25106a76af723ba9b09308a7dcf4f76d9283589,http://pdfs.semanticscholar.org/a251/06a76af723ba9b09308a7dcf4f76d9283589.pdf
+a2d9c9ed29bbc2619d5e03320e48b45c15155195,http://pdfs.semanticscholar.org/a2d9/c9ed29bbc2619d5e03320e48b45c15155195.pdf
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,http://pdfs.semanticscholar.org/a29a/22878e1881d6cbf6acff2d0b209c8d3f778b.pdf
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,http://pdfs.semanticscholar.org/e12a/0f0bca1624965386ac9cf95f711c90441553.pdf
+a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d,http://pdfs.semanticscholar.org/a2b5/4f4d73bdb80854aa78f0c5aca3d8b56b571d.pdf
+a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa,http://pdfs.semanticscholar.org/d965/50536f2ff505f62aec841b3656d940e7f1cf.pdf
+a2bcfba155c990f64ffb44c0a1bb53f994b68a15,http://ibug.doc.ic.ac.uk/media/uploads/documents/cvprw_photoface.pdf
+a2fbaa0b849ecc74f34ebb36d1442d63212b29d2,http://pdfs.semanticscholar.org/a2fb/aa0b849ecc74f34ebb36d1442d63212b29d2.pdf
+a50b4d404576695be7cd4194a064f0602806f3c4,http://pdfs.semanticscholar.org/a50b/4d404576695be7cd4194a064f0602806f3c4.pdf
+a59cdc49185689f3f9efdf7ee261c78f9c180789,http://pdfs.semanticscholar.org/a59c/dc49185689f3f9efdf7ee261c78f9c180789.pdf
+a5e5094a1e052fa44f539b0d62b54ef03c78bf6a,http://pdfs.semanticscholar.org/a5e5/094a1e052fa44f539b0d62b54ef03c78bf6a.pdf
+a52c72cd8538c62156aaa4d7e5c54946be53b9bb,http://pdfs.semanticscholar.org/a52c/72cd8538c62156aaa4d7e5c54946be53b9bb.pdf
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,http://pdfs.semanticscholar.org/a5c8/fc1ca4f06a344b53dc81ebc6d87f54896722.pdf
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,http://face.cs.kit.edu/download/publications/gehrig-emotiw2013.pdf
+a56c1331750bf3ac33ee07004e083310a1e63ddc,http://pdfs.semanticscholar.org/de99/1e4c18c21b3cdf6389b439c88709d62f4252.pdf
+a56b0f76919aabe8b768f5fbaeca412276365aa2,http://www.mingzhao.org/Publications/ZM_2006_FG_3DReconstruction.pdf
+a54e0f2983e0b5af6eaafd4d3467b655a3de52f4,http://pdfs.semanticscholar.org/a54e/0f2983e0b5af6eaafd4d3467b655a3de52f4.pdf
+a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb,http://pdfs.semanticscholar.org/a562/5cfe16d72bd00e987857d68eb4d8fc3ce4fb.pdf
+a5bf83f99f71e3840f651fbeef9f334d8e75fd75,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1927.pdf
+a546fd229f99d7fe3cf634234e04bae920a2ec33,http://pdfs.semanticscholar.org/a546/fd229f99d7fe3cf634234e04bae920a2ec33.pdf
+a538b05ebb01a40323997629e171c91aa28b8e2f,http://pdfs.semanticscholar.org/a538/b05ebb01a40323997629e171c91aa28b8e2f.pdf
+a57ee5a8fb7618004dd1def8e14ef97aadaaeef5,http://pdfs.semanticscholar.org/f1f5/b603dd34ec26939517348d77df10992798f0.pdf
+a57b37549edba625f5955759e259e52eb0af8773,http://learning.cs.toronto.edu/~hinton/absps/ranzato_cvpr2011.pdf
+a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sikka_Exemplar_Hidden_Markov_2015_CVPR_paper.pdf
+a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a,http://cvrr.ucsd.edu/publications/2014/TawariMartinTrivedi_IEEETITS2014.pdf
+a51d5c2f8db48a42446cc4f1718c75ac9303cb7a,http://pdfs.semanticscholar.org/a51d/5c2f8db48a42446cc4f1718c75ac9303cb7a.pdf
+a57b92ed2d8aa5b41fe513c3e98cbf83b7141741,http://pdfs.semanticscholar.org/a57b/92ed2d8aa5b41fe513c3e98cbf83b7141741.pdf
+a52d9e9daf2cb26b31bf2902f78774bd31c0dd88,http://pdfs.semanticscholar.org/a52d/9e9daf2cb26b31bf2902f78774bd31c0dd88.pdf
+a51882cfd0706512bf50e12c0a7dd0775285030d,http://pdfs.semanticscholar.org/a518/82cfd0706512bf50e12c0a7dd0775285030d.pdf
+a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be,http://pdfs.semanticscholar.org/d788/2e6bd512b190e47be944dc9b58b612f12581.pdf
+a503eb91c0bce3a83bf6f524545888524b29b166,http://pdfs.semanticscholar.org/a503/eb91c0bce3a83bf6f524545888524b29b166.pdf
+a52581a7b48138d7124afc7ccfcf8ec3b48359d0,http://pdfs.semanticscholar.org/a525/81a7b48138d7124afc7ccfcf8ec3b48359d0.pdf
+bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4,http://pdfs.semanticscholar.org/bd57/2e9cbec095bcf5700cb7cd73d1cdc2fe02f4.pdf
+bd6099429bb7bf248b1fd6a1739e744512660d55,http://pdfs.semanticscholar.org/bd60/99429bb7bf248b1fd6a1739e744512660d55.pdf
+bd8f3fef958ebed5576792078f84c43999b1b207,http://pdfs.semanticscholar.org/bd8f/3fef958ebed5576792078f84c43999b1b207.pdf
+bd9eb65d9f0df3379ef96e5491533326e9dde315,http://pdfs.semanticscholar.org/bd9e/b65d9f0df3379ef96e5491533326e9dde315.pdf
+bd07d1f68486052b7e4429dccecdb8deab1924db,http://pdfs.semanticscholar.org/bd07/d1f68486052b7e4429dccecdb8deab1924db.pdf
+bd0201b32e7eca7818468f2b5cb1fb4374de75b9,http://pdfs.semanticscholar.org/bd02/01b32e7eca7818468f2b5cb1fb4374de75b9.pdf
+bd8e2d27987be9e13af2aef378754f89ab20ce10,http://pdfs.semanticscholar.org/bd8e/2d27987be9e13af2aef378754f89ab20ce10.pdf
+bd236913cfe07896e171ece9bda62c18b8c8197e,http://pdfs.semanticscholar.org/bd23/6913cfe07896e171ece9bda62c18b8c8197e.pdf
+bd13f50b8997d0733169ceba39b6eb1bda3eb1aa,http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf
+bd8b7599acf53e3053aa27cfd522764e28474e57,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf
+bd78a853df61d03b7133aea58e45cd27d464c3cf,http://pdfs.semanticscholar.org/bd78/a853df61d03b7133aea58e45cd27d464c3cf.pdf
+bd2d7c7f0145028e85c102fe52655c2b6c26aeb5,http://rogerioferis.com/publications/FerisICMR2014.pdf
+bd9157331104a0708aa4f8ae79b7651a5be797c6,http://pdfs.semanticscholar.org/bd91/57331104a0708aa4f8ae79b7651a5be797c6.pdf
+d185f4f05c587e23c0119f2cdfac8ea335197ac0,http://pdfs.semanticscholar.org/d185/f4f05c587e23c0119f2cdfac8ea335197ac0.pdf
+d140c5add2cddd4a572f07358d666fe00e8f4fe1,http://pdfs.semanticscholar.org/d140/c5add2cddd4a572f07358d666fe00e8f4fe1.pdf
+d1dae2993bdbb2667d1439ff538ac928c0a593dc,http://pdfs.semanticscholar.org/d1da/e2993bdbb2667d1439ff538ac928c0a593dc.pdf
+d1f58798db460996501f224fff6cceada08f59f9,http://pdfs.semanticscholar.org/d1f5/8798db460996501f224fff6cceada08f59f9.pdf
+d115c4a66d765fef596b0b171febca334cea15b5,http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf
+d122d66c51606a8157a461b9d7eb8b6af3d819b0,http://pdfs.semanticscholar.org/d122/d66c51606a8157a461b9d7eb8b6af3d819b0.pdf
+d142e74c6a7457e77237cf2a3ded4e20f8894e1a,http://pdfs.semanticscholar.org/d142/e74c6a7457e77237cf2a3ded4e20f8894e1a.pdf
+d1082eff91e8009bf2ce933ac87649c686205195,http://epubs.surrey.ac.uk/807279/1/ML_Akyuz_Windeatt_Raymond.pdf
+d1959ba4637739dcc6cc6995e10fd41fd6604713,http://pdfs.semanticscholar.org/d195/9ba4637739dcc6cc6995e10fd41fd6604713.pdf
+d1881993c446ea693bbf7f7d6e750798bf958900,http://pdfs.semanticscholar.org/d188/1993c446ea693bbf7f7d6e750798bf958900.pdf
+d6cf3cab269877c58a16be011b74e07838d957c2,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0162.pdf
+d61578468d267c2d50672077918c1cda9b91429b,http://pdfs.semanticscholar.org/d615/78468d267c2d50672077918c1cda9b91429b.pdf
+d687fa99586a9ad229284229f20a157ba2d41aea,http://pdfs.semanticscholar.org/d687/fa99586a9ad229284229f20a157ba2d41aea.pdf
+d647099e571f9af3a1762f895fd8c99760a3916e,http://cbim.rutgers.edu/dmdocuments/CVPR10_Peng_Yang.pdf
+d6a9ea9b40a7377c91c705f4c7f206a669a9eea2,http://pdfs.semanticscholar.org/d6a9/ea9b40a7377c91c705f4c7f206a669a9eea2.pdf
+d6ca3dc01de060871839d5536e8112b551a7f9ff,https://arxiv.org/pdf/1802.08310v1.pdf
+d671a210990f67eba9b2d3dda8c2cb91575b4a7a,http://pdfs.semanticscholar.org/d671/a210990f67eba9b2d3dda8c2cb91575b4a7a.pdf
+d61e794ec22a4d4882181da17316438b5b24890f,http://pdfs.semanticscholar.org/d61e/794ec22a4d4882181da17316438b5b24890f.pdf
+d65b82b862cf1dbba3dee6541358f69849004f30,http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf
+d6102a7ddb19a185019fd2112d2f29d9258f6dec,http://pdfs.semanticscholar.org/d610/2a7ddb19a185019fd2112d2f29d9258f6dec.pdf
+d6bfa9026a563ca109d088bdb0252ccf33b76bc6,http://pdfs.semanticscholar.org/d6bf/a9026a563ca109d088bdb0252ccf33b76bc6.pdf
+d67dcaf6e44afd30c5602172c4eec1e484fc7fb7,http://pdfs.semanticscholar.org/d67d/caf6e44afd30c5602172c4eec1e484fc7fb7.pdf
+d6c7092111a8619ed7a6b01b00c5f75949f137bf,http://pdfs.semanticscholar.org/d6c7/092111a8619ed7a6b01b00c5f75949f137bf.pdf
+d68dbb71b34dfe98dee0680198a23d3b53056394,http://pdfs.semanticscholar.org/d68d/bb71b34dfe98dee0680198a23d3b53056394.pdf
+d6fb606e538763282e3942a5fb45c696ba38aee6,https://pdfs.semanticscholar.org/d6fb/606e538763282e3942a5fb45c696ba38aee6.pdf
+bcee40c25e8819955263b89a433c735f82755a03,http://pdfs.semanticscholar.org/bcee/40c25e8819955263b89a433c735f82755a03.pdf
+bc704680b5032eadf78c4e49f548ba14040965bf,http://pdfs.semanticscholar.org/ccbc/c676546a43cd4b714f0c85cbd493f9c61396.pdf
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,https://arxiv.org/pdf/1801.08329v1.pdf
+bcc172a1051be261afacdd5313619881cbe0f676,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002197.pdf
+bcfeac1e5c31d83f1ed92a0783501244dde5a471,http://pdfs.semanticscholar.org/bcfe/ac1e5c31d83f1ed92a0783501244dde5a471.pdf
+bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf
+bc910ca355277359130da841a589a36446616262,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf
+bcc5cbbb540ee66dc8b9a3453b506e895d8395de,http://pdfs.semanticscholar.org/bcc5/cbbb540ee66dc8b9a3453b506e895d8395de.pdf
+bc871497626afb469d25c4975aa657159269aefe,http://ir.ia.ac.cn/bitstream/173211/10560/1/Adaptive%20Learning%20Algorithm%20for%20Pattern%20Classification.pdf
+bc2852fa0a002e683aad3fb0db5523d1190d0ca5,http://pdfs.semanticscholar.org/bc28/52fa0a002e683aad3fb0db5523d1190d0ca5.pdf
+bc866c2ced533252f29cf2111dd71a6d1724bd49,http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf
+bcb99d5150d792001a7d33031a3bd1b77bea706b,http://pdfs.semanticscholar.org/bcb9/9d5150d792001a7d33031a3bd1b77bea706b.pdf
+bc98027b331c090448492eb9e0b9721e812fac84,http://pdfs.semanticscholar.org/bc98/027b331c090448492eb9e0b9721e812fac84.pdf
+bcac3a870501c5510df80c2a5631f371f2f6f74a,http://pdfs.semanticscholar.org/bcac/3a870501c5510df80c2a5631f371f2f6f74a.pdf
+aed321909bb87c81121c841b21d31509d6c78f69,http://pdfs.semanticscholar.org/aed3/21909bb87c81121c841b21d31509d6c78f69.pdf
+aecb15e3e9191eb135bdba2426967bfac3f068db,http://www.cvip.uofl.edu/wwwcvip/research/publications/Pub_Pdf/2010/3D%20Face%20Rcovery%20From%20Intensities_2010.pdf
+ae936628e78db4edb8e66853f59433b8cc83594f,http://pdfs.semanticscholar.org/ae93/6628e78db4edb8e66853f59433b8cc83594f.pdf
+ae0765ebdffffd6e6cc33c7705df33b7e8478627,http://pdfs.semanticscholar.org/ae07/65ebdffffd6e6cc33c7705df33b7e8478627.pdf
+aefc7c708269b874182a5c877fb6dae06da210d4,http://pdfs.semanticscholar.org/f6f4/60d4a4a5b4c077ab3ac7a972f52af17a4241.pdf
+aebb9649bc38e878baef082b518fa68f5cda23a5,http://pdfs.semanticscholar.org/aebb/9649bc38e878baef082b518fa68f5cda23a5.pdf
+aece472ba64007f2e86300cc3486c84597f02ec7,http://doras.dcu.ie/439/1/ieee_smap_2007.pdf
+ae5bb02599244d6d88c4fe466a7fdd80aeb91af4,http://pdfs.semanticscholar.org/ae5b/b02599244d6d88c4fe466a7fdd80aeb91af4.pdf
+ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9,http://pdfs.semanticscholar.org/ae18/ccb35a1a5d7b22f2a5760f706b1c11bf39a9.pdf
+aeeea6eec2f063c006c13be865cec0c350244e5b,http://pdfs.semanticscholar.org/aeee/a6eec2f063c006c13be865cec0c350244e5b.pdf
+ae9257f3be9f815db8d72819332372ac59c1316b,http://pdfs.semanticscholar.org/ae92/57f3be9f815db8d72819332372ac59c1316b.pdf
+ae89b7748d25878c4dc17bdaa39dd63e9d442a0d,http://hal.inria.fr/docs/00/87/00/59/PDF/Ozerov_et_al_ICIP_2013.pdf
+ae1de0359f4ed53918824271c888b7b36b8a5d41,http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf
+ae4390873485c9432899977499c3bf17886fa149,http://pdfs.semanticscholar.org/ae43/90873485c9432899977499c3bf17886fa149.pdf
+ae753fd46a744725424690d22d0d00fb05e53350,http://pdfs.semanticscholar.org/ae75/3fd46a744725424690d22d0d00fb05e53350.pdf
+aea4128ba18689ff1af27b90c111bbd34013f8d5,http://pdfs.semanticscholar.org/aea4/128ba18689ff1af27b90c111bbd34013f8d5.pdf
+ae4e2c81c8a8354c93c4b21442c26773352935dd,http://pdfs.semanticscholar.org/ae4e/2c81c8a8354c93c4b21442c26773352935dd.pdf
+ae85c822c6aec8b0f67762c625a73a5d08f5060d,http://tamaraberg.com/papers/yamaguchi2014retrieving.pdf
+ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf,http://pdfs.semanticscholar.org/ae71/f69f1db840e0aa17f8c814316f0bd0f6fbbf.pdf
+d893f75206b122973cdbf2532f506912ccd6fbe0,http://pdfs.semanticscholar.org/d893/f75206b122973cdbf2532f506912ccd6fbe0.pdf
+d861c658db2fd03558f44c265c328b53e492383a,http://www.cs.washington.edu/research/VACE/Multimedia/Jia_EMBC2014_final.pdf
+d84a48f7d242d73b32a9286f9b148f5575acf227,http://pdfs.semanticscholar.org/d84a/48f7d242d73b32a9286f9b148f5575acf227.pdf
+d8f0bda19a345fac81a1d560d7db73f2b4868836,http://pdfs.semanticscholar.org/d8f0/bda19a345fac81a1d560d7db73f2b4868836.pdf
+d82b93f848d5442f82154a6011d26df8a9cd00e7,http://pdfs.semanticscholar.org/d82b/93f848d5442f82154a6011d26df8a9cd00e7.pdf
+d8722ffbca906a685abe57f3b7b9c1b542adfa0c,http://pdfs.semanticscholar.org/d872/2ffbca906a685abe57f3b7b9c1b542adfa0c.pdf
+d8896861126b7fd5d2ceb6fed8505a6dff83414f,http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf
+d83d2fb5403c823287f5889b44c1971f049a1c93,http://pdfs.semanticscholar.org/d83d/2fb5403c823287f5889b44c1971f049a1c93.pdf
+d8b568392970b68794a55c090c4dd2d7f90909d2,http://pdfs.semanticscholar.org/d8b5/68392970b68794a55c090c4dd2d7f90909d2.pdf
+d83ae5926b05894fcda0bc89bdc621e4f21272da,http://pdfs.semanticscholar.org/d83a/e5926b05894fcda0bc89bdc621e4f21272da.pdf
+d8bf148899f09a0aad18a196ce729384a4464e2b,http://pdfs.semanticscholar.org/d8bf/148899f09a0aad18a196ce729384a4464e2b.pdf
+d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e,http://pdfs.semanticscholar.org/d850/aff9d10a01ad5f1d8a1b489fbb3998d0d80e.pdf
+ab8f9a6bd8f582501c6b41c0e7179546e21c5e91,http://pdfs.semanticscholar.org/ab8f/9a6bd8f582501c6b41c0e7179546e21c5e91.pdf
+abce06a96a7c3095bfc36eed8779d89263769b85,http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20others/Analyzing%20Asymmetry%20Biometric%20in%20the%20Frequency%20Domain%20for%20Face%20Recognition.pdf
+aba770a7c45e82b2f9de6ea2a12738722566a149,http://pure.qub.ac.uk/portal/files/49719304/Face_Recognition_in_the_Scrambled.pdf
+ab0f9bc35b777eaefff735cb0dd0663f0c34ad31,http://faculty.ucmerced.edu/snewsam/papers/Yang_ICPR14_SemiSupervisedLearning.pdf
+abb396490ba8b112f10fbb20a0a8ce69737cd492,http://pdfs.semanticscholar.org/abb3/96490ba8b112f10fbb20a0a8ce69737cd492.pdf
+abac0fa75281c9a0690bf67586280ed145682422,http://pdfs.semanticscholar.org/abac/0fa75281c9a0690bf67586280ed145682422.pdf
+ab6776f500ed1ab23b7789599f3a6153cdac84f7,http://pdfs.semanticscholar.org/ab67/76f500ed1ab23b7789599f3a6153cdac84f7.pdf
+ab87dfccb1818bdf0b41d732da1f9335b43b74ae,http://pdfs.semanticscholar.org/ab87/dfccb1818bdf0b41d732da1f9335b43b74ae.pdf
+abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72,http://pdfs.semanticscholar.org/abc1/ef570bb2d7ea92cbe69e101eefa9a53e1d72.pdf
+ab1dfcd96654af0bf6e805ffa2de0f55a73c025d,http://pdfs.semanticscholar.org/ab1d/fcd96654af0bf6e805ffa2de0f55a73c025d.pdf
+abeda55a7be0bbe25a25139fb9a3d823215d7536,http://pdfs.semanticscholar.org/abed/a55a7be0bbe25a25139fb9a3d823215d7536.pdf
+ab427f0c7d4b0eb22c045392107509451165b2ba,http://cs.uky.edu/~zach/assets/papers/li2012learning.pdf
+ab1900b5d7cf3317d17193e9327d57b97e24d2fc,http://pdfs.semanticscholar.org/ab19/00b5d7cf3317d17193e9327d57b97e24d2fc.pdf
+ab8fb278db4405f7db08fa59404d9dd22d38bc83,http://pdfs.semanticscholar.org/ab8f/b278db4405f7db08fa59404d9dd22d38bc83.pdf
+e5737ffc4e74374b0c799b65afdbf0304ff344cb,http://pdfs.semanticscholar.org/e573/7ffc4e74374b0c799b65afdbf0304ff344cb.pdf
+e50ee29ca12028cb903cd498bb9cacd41bd5ce3a,http://pdfs.semanticscholar.org/e50e/e29ca12028cb903cd498bb9cacd41bd5ce3a.pdf
+e510f2412999399149d8635a83eca89c338a99a1,http://pdfs.semanticscholar.org/e510/f2412999399149d8635a83eca89c338a99a1.pdf
+e59813940c5c83b1ce63f3f451d03d34d2f68082,http://pdfs.semanticscholar.org/e598/13940c5c83b1ce63f3f451d03d34d2f68082.pdf
+e5b301ee349ba8e96ea6c71782295c4f06be6c31,http://pdfs.semanticscholar.org/e5b3/01ee349ba8e96ea6c71782295c4f06be6c31.pdf
+e5342233141a1d3858ed99ccd8ca0fead519f58b,http://pdfs.semanticscholar.org/e534/2233141a1d3858ed99ccd8ca0fead519f58b.pdf
+e52be9a083e621d9ed29c8e9914451a6a327ff59,http://pdfs.semanticscholar.org/e52b/e9a083e621d9ed29c8e9914451a6a327ff59.pdf
+e5799fd239531644ad9270f49a3961d7540ce358,http://chenlab.ece.cornell.edu/people/ruogu/publications/ICIP13_Kinship.pdf
+e5eb7fa8c9a812d402facfe8e4672670541ed108,http://pdfs.semanticscholar.org/e5eb/7fa8c9a812d402facfe8e4672670541ed108.pdf
+e27c92255d7ccd1860b5fb71c5b1277c1648ed1e,http://pdfs.semanticscholar.org/e27c/92255d7ccd1860b5fb71c5b1277c1648ed1e.pdf
+e200c3f2849d56e08056484f3b6183aa43c0f13a,http://pdfs.semanticscholar.org/e200/c3f2849d56e08056484f3b6183aa43c0f13a.pdf
+e2d265f606cd25f1fd72e5ee8b8f4c5127b764df,http://pdfs.semanticscholar.org/e2d2/65f606cd25f1fd72e5ee8b8f4c5127b764df.pdf
+f45d6a7bdb6741242da6192d18c97ac39e6308db,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2008%5D%5Bfg%5DPerson-Specific%20Face%20Recognition%20in%20Unconstrained%20Environments%20a%20Combination%20of%20Offline%20and%20Online%20Learning.pdf
+f437b3884a9e5fab66740ca2a6f1f3a5724385ea,http://pdfs.semanticscholar.org/f437/b3884a9e5fab66740ca2a6f1f3a5724385ea.pdf
+f43eeb578e0ca48abfd43397bbd15825f94302e4,http://pdfs.semanticscholar.org/f43e/eb578e0ca48abfd43397bbd15825f94302e4.pdf
+f4f9697f2519f1fe725ee7e3788119ed217dca34,http://pdfs.semanticscholar.org/f4f9/697f2519f1fe725ee7e3788119ed217dca34.pdf
+f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0,http://pdfs.semanticscholar.org/f4c0/1fc79c7ead67899f6fe7b79dd1ad249f71b0.pdf
+f4373f5631329f77d85182ec2df6730cbd4686a9,http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf
+f4210309f29d4bbfea9642ecadfb6cf9581ccec7,http://pdfs.semanticscholar.org/f421/0309f29d4bbfea9642ecadfb6cf9581ccec7.pdf
+f47404424270f6a20ba1ba8c2211adfba032f405,http://pdfs.semanticscholar.org/f474/04424270f6a20ba1ba8c2211adfba032f405.pdf
+f4ebbeb77249d1136c355f5bae30f02961b9a359,http://pdfs.semanticscholar.org/f4eb/beb77249d1136c355f5bae30f02961b9a359.pdf
+f4aed1314b2d38fd8f1b9d2bc154295bbd45f523,http://pdfs.semanticscholar.org/f4ae/d1314b2d38fd8f1b9d2bc154295bbd45f523.pdf
+f3fcaae2ea3e998395a1443c87544f203890ae15,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553791.pdf
+f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe,http://pdfs.semanticscholar.org/f301/5be0f9dbc1a55b6f3dc388d97bb566ff94fe.pdf
+f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7,http://pdfs.semanticscholar.org/f3d9/e347eadcf0d21cb0e92710bc906b22f2b3e7.pdf
+f3f77b803b375f0c63971b59d0906cb700ea24ed,http://pdfs.semanticscholar.org/f3f7/7b803b375f0c63971b59d0906cb700ea24ed.pdf
+f355e54ca94a2d8bbc598e06e414a876eb62ef99,http://pdfs.semanticscholar.org/f355/e54ca94a2d8bbc598e06e414a876eb62ef99.pdf
+f35a493afa78a671b9d2392c69642dcc3dd2cdc2,http://pdfs.semanticscholar.org/f35a/493afa78a671b9d2392c69642dcc3dd2cdc2.pdf
+ebedc841a2c1b3a9ab7357de833101648281ff0e,http://pdfs.semanticscholar.org/ebed/c841a2c1b3a9ab7357de833101648281ff0e.pdf
+eb526174fa071345ff7b1fad1fad240cd943a6d7,http://pdfs.semanticscholar.org/eb52/6174fa071345ff7b1fad1fad240cd943a6d7.pdf
+eb6ee56e085ebf473da990d032a4249437a3e462,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf
+ebb1c29145d31c4afa3c9be7f023155832776cd3,http://pdfs.semanticscholar.org/ebb1/c29145d31c4afa3c9be7f023155832776cd3.pdf
+eb9312458f84a366e98bd0a2265747aaed40b1a6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0400473.pdf
+eb716dd3dbd0f04e6d89f1703b9975cad62ffb09,http://pdfs.semanticscholar.org/eb71/6dd3dbd0f04e6d89f1703b9975cad62ffb09.pdf
+ebabd1f7bc0274fec88a3dabaf115d3e226f198f,http://pdfs.semanticscholar.org/ebab/d1f7bc0274fec88a3dabaf115d3e226f198f.pdf
+ebb9d53668205c5797045ba130df18842e3eadef,http://pdfs.semanticscholar.org/ebb9/d53668205c5797045ba130df18842e3eadef.pdf
+eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a,http://pdfs.semanticscholar.org/eb7b/387a3a006609b89ca5ed0e6b3a1d5ecb5e5a.pdf
+ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c,http://pdfs.semanticscholar.org/f727/b58b84ccd8e7ed51a90ccc913d704b451191.pdf
+ebf204e0a3e137b6c24e271b0d55fa49a6c52b41,http://pdfs.semanticscholar.org/ebf2/04e0a3e137b6c24e271b0d55fa49a6c52b41.pdf
+c71f36c9376d444075de15b1102b4974481be84d,http://pdfs.semanticscholar.org/c71f/36c9376d444075de15b1102b4974481be84d.pdf
+c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e,http://pdfs.semanticscholar.org/c7e4/c7be0d37013de07b6d829a3bf73e1b95ad4e.pdf
+c74aba9a096379b3dbe1ff95e7af5db45c0fd680,http://pdfs.semanticscholar.org/c74a/ba9a096379b3dbe1ff95e7af5db45c0fd680.pdf
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,http://pdfs.semanticscholar.org/c7c5/f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c.pdf
+c7685fdbee2d96ef056a89ab4fa43df5aeae7ba7,http://staff.science.uva.nl/~nicu/publications/SMC04.pdf
+c7f752eea91bf5495a4f6e6a67f14800ec246d08,http://pdfs.semanticscholar.org/c7f7/52eea91bf5495a4f6e6a67f14800ec246d08.pdf
+c758b9c82b603904ba8806e6193c5fefa57e9613,http://pdfs.semanticscholar.org/c758/b9c82b603904ba8806e6193c5fefa57e9613.pdf
+c7c03324833ba262eeaada0349afa1b5990c1ea7,http://pdfs.semanticscholar.org/c7c0/3324833ba262eeaada0349afa1b5990c1ea7.pdf
+c78fdd080df01fff400a32fb4cc932621926021f,http://pdfs.semanticscholar.org/c78f/dd080df01fff400a32fb4cc932621926021f.pdf
+c74b1643a108939c6ba42ae4de55cb05b2191be5,http://pdfs.semanticscholar.org/c74b/1643a108939c6ba42ae4de55cb05b2191be5.pdf
+c0723e0e154a33faa6ff959d084aebf07770ffaf,http://pdfs.semanticscholar.org/c072/3e0e154a33faa6ff959d084aebf07770ffaf.pdf
+c03f48e211ac81c3867c0e787bea3192fcfe323e,http://pdfs.semanticscholar.org/c03f/48e211ac81c3867c0e787bea3192fcfe323e.pdf
+c043f8924717a3023a869777d4c9bee33e607fb5,http://pdfs.semanticscholar.org/c043/f8924717a3023a869777d4c9bee33e607fb5.pdf
+c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf
+c0ff7dc0d575658bf402719c12b676a34271dfcd,http://pdfs.semanticscholar.org/c0ff/7dc0d575658bf402719c12b676a34271dfcd.pdf
+c02847a04a99a5a6e784ab580907278ee3c12653,http://pdfs.semanticscholar.org/c028/47a04a99a5a6e784ab580907278ee3c12653.pdf
+c035c193eed5d72c7f187f0bc880a17d217dada0,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf
+c0d1d9a585ef961f1c8e6a1e922822811181615c,http://pdfs.semanticscholar.org/c0d1/d9a585ef961f1c8e6a1e922822811181615c.pdf
+c0a8c0e6ccf9882969ba0eda0b898affa015437b,http://stanford.edu/~verroios/papers/waldo.pdf
+c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5,http://pdfs.semanticscholar.org/c0cd/aeccff78f49f4604a6d263dc6eb1bb8707d5.pdf
+c00f402b9cfc3f8dd2c74d6b3552acbd1f358301,http://pdfs.semanticscholar.org/c00f/402b9cfc3f8dd2c74d6b3552acbd1f358301.pdf
+c089c7d8d1413b54f59fc410d88e215902e51638,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh122.pdf
+c068263bb09968fe69c053906279b16532b778f4,http://www.researchgate.net/profile/Mahdi_Bejani/publication/257435889_Audiovisual_emotion_recognition_using_ANOVA_feature_selection_method_and_multi-classifier_neural_networks/links/0c960529aee6234edd000000.pdf
+c0ee89dc2dad76147780f96294de9e421348c1f4,http://pdfs.semanticscholar.org/c0ee/89dc2dad76147780f96294de9e421348c1f4.pdf
+c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774,http://pdfs.semanticscholar.org/c0ca/6b992cbe46ea3003f4e9b48f4ef57e5fb774.pdf
+c0d5c3aab87d6e8dd3241db1d931470c15b9e39d,http://pdfs.semanticscholar.org/facb/edfe90956c720f70aab14767b5e25dcc6478.pdf
+c05441dd1bc418fb912a6fafa84c0659a6850bf0,http://pdfs.semanticscholar.org/c054/41dd1bc418fb912a6fafa84c0659a6850bf0.pdf
+eee8a37a12506ff5df72c402ccc3d59216321346,http://pdfs.semanticscholar.org/eee8/a37a12506ff5df72c402ccc3d59216321346.pdf
+ee6b503ab512a293e3088fdd7a1c893a77902acb,http://pdfs.semanticscholar.org/ee6b/503ab512a293e3088fdd7a1c893a77902acb.pdf
+ee18e29a2b998eddb7f6663bb07891bfc7262248,http://or.nsfc.gov.cn/bitstream/00001903-5/13750/1/1000007562815.pdf
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,http://pdfs.semanticscholar.org/eeb6/d084f9906c53ec8da8c34583105ab5ab8284.pdf
+ee7093e91466b81d13f4d6933bcee48e4ee63a16,http://pdfs.semanticscholar.org/ee70/93e91466b81d13f4d6933bcee48e4ee63a16.pdf
+ee461d060da58d6053d2f4988b54eff8655ecede,http://pdfs.semanticscholar.org/ee46/1d060da58d6053d2f4988b54eff8655ecede.pdf
+eefb8768f60c17d76fe156b55b8a00555eb40f4d,http://pdfs.semanticscholar.org/eefb/8768f60c17d76fe156b55b8a00555eb40f4d.pdf
+eed1dd2a5959647896e73d129272cb7c3a2e145c,http://s3.amazonaws.com/kvaccaro.com/documents/UIST16.pdf
+ee92d36d72075048a7c8b2af5cc1720c7bace6dd,http://pdfs.semanticscholar.org/ee92/d36d72075048a7c8b2af5cc1720c7bace6dd.pdf
+ee418372b0038bd3b8ae82bd1518d5c01a33a7ec,http://pdfs.semanticscholar.org/ee41/8372b0038bd3b8ae82bd1518d5c01a33a7ec.pdf
+c94b3a05f6f41d015d524169972ae8fd52871b67,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Yan_The_Fastest_Deformable_2014_CVPR_paper.pdf
+c9424d64b12a4abe0af201e7b641409e182babab,http://pdfs.semanticscholar.org/c942/4d64b12a4abe0af201e7b641409e182babab.pdf
+c91103e6612fa7e664ccbc3ed1b0b5deac865b02,http://pdfs.semanticscholar.org/c911/03e6612fa7e664ccbc3ed1b0b5deac865b02.pdf
+c903af0d69edacf8d1bff3bfd85b9470f6c4c243,http://pdfs.semanticscholar.org/c903/af0d69edacf8d1bff3bfd85b9470f6c4c243.pdf
+c95cd36779fcbe45e3831ffcd3314e19c85defc5,https://arxiv.org/pdf/1703.04853v1.pdf
+c9e955cb9709f16faeb0c840f4dae92eb875450a,http://pdfs.semanticscholar.org/c9e9/55cb9709f16faeb0c840f4dae92eb875450a.pdf
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,http://pdfs.semanticscholar.org/c92b/b26238f6e30196b0c4a737d8847e61cfb7d4.pdf
+c980443ca996402de4b5e5424f872acda0368831,http://homepage.tudelft.nl/19j49/Publications_files/Final_CVPR10.pdf
+c9f588d295437009994ddaabb64fd4e4c499b294,http://pdfs.semanticscholar.org/c9f5/88d295437009994ddaabb64fd4e4c499b294.pdf
+c92da368a6a886211dc759fe7b1b777a64d8b682,http://pdfs.semanticscholar.org/c92d/a368a6a886211dc759fe7b1b777a64d8b682.pdf
+fc1e37fb16006b62848def92a51434fc74a2431a,http://pdfs.semanticscholar.org/fc1e/37fb16006b62848def92a51434fc74a2431a.pdf
+fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192,http://pdfs.semanticscholar.org/fc5b/db98ff97581d7c1e5eb2d24d3f10714aa192.pdf
+fc20149dfdff5fdf020647b57e8a09c06e11434b,http://pdfs.semanticscholar.org/fc20/149dfdff5fdf020647b57e8a09c06e11434b.pdf
+fc516a492cf09aaf1d319c8ff112c77cfb55a0e5,http://pdfs.semanticscholar.org/fc51/6a492cf09aaf1d319c8ff112c77cfb55a0e5.pdf
+fcbec158e6a4ace3d4311b26195482b8388f0ee9,http://pdfs.semanticscholar.org/fcbe/c158e6a4ace3d4311b26195482b8388f0ee9.pdf
+fcd3d69b418d56ae6800a421c8b89ef363418665,http://pdfs.semanticscholar.org/fcd3/d69b418d56ae6800a421c8b89ef363418665.pdf
+fcd77f3ca6b40aad6edbd1dab9681d201f85f365,http://pdfs.semanticscholar.org/fcd7/7f3ca6b40aad6edbd1dab9681d201f85f365.pdf
+fc798314994bf94d1cde8d615ba4d5e61b6268b6,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf
+fc45e44dd50915957e498186618f7a499953c6be,http://www.pami.sjtu.edu.cn/people/wangxh/Gabor%20Filter/Quaternion%20Correlation%20Filters%20for%20Face%20Recognition%20in%20Wavelet%20Domain.pdf
+fc23a386c2189f221b25dbd0bb34fcd26ccf60fa,http://pdfs.semanticscholar.org/fc23/a386c2189f221b25dbd0bb34fcd26ccf60fa.pdf
+fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f,http://pdfs.semanticscholar.org/fc68/c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f.pdf
+fc2bad3544c7c8dc7cd182f54888baf99ed75e53,http://pdfs.semanticscholar.org/fc2b/ad3544c7c8dc7cd182f54888baf99ed75e53.pdf
+fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46,http://pdfs.semanticscholar.org/fcf8/bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46.pdf
+fcbf808bdf140442cddf0710defb2766c2d25c30,http://pdfs.semanticscholar.org/fcbf/808bdf140442cddf0710defb2766c2d25c30.pdf
+fd4ac1da699885f71970588f84316589b7d8317b,http://pdfs.semanticscholar.org/fd4a/c1da699885f71970588f84316589b7d8317b.pdf
+fdf533eeb1306ba418b09210387833bdf27bb756,http://pdfs.semanticscholar.org/fdf5/33eeb1306ba418b09210387833bdf27bb756.pdf
+fdfaf46910012c7cdf72bba12e802a318b5bef5a,http://pdfs.semanticscholar.org/fdfa/f46910012c7cdf72bba12e802a318b5bef5a.pdf
+fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f,http://pdfs.semanticscholar.org/fd9f/eb21b3d1fab470ff82e3f03efce6a0e67a1f.pdf
+fdca08416bdadda91ae977db7d503e8610dd744f,http://pdfs.semanticscholar.org/fdca/08416bdadda91ae977db7d503e8610dd744f.pdf
+fd96432675911a702b8a4ce857b7c8619498bf9f,http://pdfs.semanticscholar.org/fd96/432675911a702b8a4ce857b7c8619498bf9f.pdf
+fd7b6c77b46420c27725757553fcd1fb24ea29a8,http://pdfs.semanticscholar.org/fd7b/6c77b46420c27725757553fcd1fb24ea29a8.pdf
+fdb33141005ca1b208a725796732ab10a9c37d75,http://pdfs.semanticscholar.org/fdb3/3141005ca1b208a725796732ab10a9c37d75.pdf
+fde0180735699ea31f6c001c71eae507848b190f,http://pdfs.semanticscholar.org/fde0/180735699ea31f6c001c71eae507848b190f.pdf
+fd615118fb290a8e3883e1f75390de8a6c68bfde,http://pdfs.semanticscholar.org/fd61/5118fb290a8e3883e1f75390de8a6c68bfde.pdf
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf
+f2b13946d42a50fa36a2c6d20d28de2234aba3b4,http://npl.mcgill.ca/Papers/Adaptive%20Facial%20Expression%20Recognition%20Using%20Inter-modal%20top-down%20context.pdf
+f2c30594d917ea915028668bc2a481371a72a14d,http://pdfs.semanticscholar.org/f2c3/0594d917ea915028668bc2a481371a72a14d.pdf
+f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf
+f2e9494d0dca9fb6b274107032781d435a508de6,http://pdfs.semanticscholar.org/f2e9/494d0dca9fb6b274107032781d435a508de6.pdf
+f2c568fe945e5743635c13fe5535af157b1903d1,http://pdfs.semanticscholar.org/f2c5/68fe945e5743635c13fe5535af157b1903d1.pdf
+f26097a1a479fb6f32b27a93f8f32609cfe30fdc,http://pdfs.semanticscholar.org/f260/97a1a479fb6f32b27a93f8f32609cfe30fdc.pdf
+f214bcc6ecc3309e2efefdc21062441328ff6081,http://pdfs.semanticscholar.org/f214/bcc6ecc3309e2efefdc21062441328ff6081.pdf
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhang_Low-Rank-Sparse_Subspace_Representation_CVPR_2017_paper.pdf
+f5af4e9086b0c3aee942cb93ece5820bdc9c9748,http://pdfs.semanticscholar.org/f5af/4e9086b0c3aee942cb93ece5820bdc9c9748.pdf
+f5aee1529b98136194ef80961ba1a6de646645fe,http://pdfs.semanticscholar.org/f5ae/e1529b98136194ef80961ba1a6de646645fe.pdf
+f52efc206432a0cb860155c6d92c7bab962757de,http://pdfs.semanticscholar.org/f52e/fc206432a0cb860155c6d92c7bab962757de.pdf
+f519723238701849f1160d5a9cedebd31017da89,http://pdfs.semanticscholar.org/f519/723238701849f1160d5a9cedebd31017da89.pdf
+f558af209dd4c48e4b2f551b01065a6435c3ef33,http://pdfs.semanticscholar.org/f558/af209dd4c48e4b2f551b01065a6435c3ef33.pdf
+e378ce25579f3676ca50c8f6454e92a886b9e4d7,http://openaccess.thecvf.com/content_ICCV_2017/papers/Liu_Robust_Video_Super-Resolution_ICCV_2017_paper.pdf
+e35b09879a7df814b2be14d9102c4508e4db458b,http://pdfs.semanticscholar.org/e35b/09879a7df814b2be14d9102c4508e4db458b.pdf
+e3657ab4129a7570230ff25ae7fbaccb4ba9950c,http://pdfs.semanticscholar.org/e365/7ab4129a7570230ff25ae7fbaccb4ba9950c.pdf
+e315959d6e806c8fbfc91f072c322fb26ce0862b,http://pdfs.semanticscholar.org/e315/959d6e806c8fbfc91f072c322fb26ce0862b.pdf
+e39a0834122e08ba28e7b411db896d0fdbbad9ba,http://www.ece.ualberta.ca/~djoseph/publications/journal/TPAMI_2012.pdf
+e3bb83684817c7815f5005561a85c23942b1f46b,http://pdfs.semanticscholar.org/e3bb/83684817c7815f5005561a85c23942b1f46b.pdf
+e30dc2abac4ecc48aa51863858f6f60c7afdf82a,http://pdfs.semanticscholar.org/e30d/c2abac4ecc48aa51863858f6f60c7afdf82a.pdf
+e3e2c106ccbd668fb9fca851498c662add257036,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf
+e379e73e11868abb1728c3acdc77e2c51673eb0d,http://pdfs.semanticscholar.org/e379/e73e11868abb1728c3acdc77e2c51673eb0d.pdf
+e3a6e9ddbbfc4c5160082338d46808cea839848a,http://pdfs.semanticscholar.org/f5d0/2300271ab0f32f10bfbba5562c0fa83c5727.pdf
+e3917d6935586b90baae18d938295e5b089b5c62,http://www.iti.gr/files/tip05tsalakanidou.pdf
+e328d19027297ac796aae2470e438fe0bd334449,http://pdfs.semanticscholar.org/e328/d19027297ac796aae2470e438fe0bd334449.pdf
+e3144f39f473e238374dd4005c8b83e19764ae9e,http://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf
+cffebdf88e406c27b892857d1520cb2d7ccda573,http://pdfs.semanticscholar.org/cffe/bdf88e406c27b892857d1520cb2d7ccda573.pdf
+cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2,https://www.computer.org/csdl/trans/ta/2017/03/07420600.pdf
+cfd933f71f4a69625390819b7645598867900eab,http://pdfs.semanticscholar.org/cfd9/33f71f4a69625390819b7645598867900eab.pdf
+cf875336d5a196ce0981e2e2ae9602580f3f6243,http://pdfs.semanticscholar.org/cf87/5336d5a196ce0981e2e2ae9602580f3f6243.pdf
+cfd8c66e71e98410f564babeb1c5fd6f77182c55,http://pdfs.semanticscholar.org/cfd8/c66e71e98410f564babeb1c5fd6f77182c55.pdf
+cf54a133c89f730adc5ea12c3ac646971120781c,http://pdfs.semanticscholar.org/cf54/a133c89f730adc5ea12c3ac646971120781c.pdf
+cfbb2d32586b58f5681e459afd236380acd86e28,http://www.professeurs.polymtl.ca/christopher.pal/2011/ROSE.v2.5.pdf
+cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf
+cf09e2cb82961128302b99a34bff91ec7d198c7c,http://pdfs.semanticscholar.org/cf09/e2cb82961128302b99a34bff91ec7d198c7c.pdf
+cf86616b5a35d5ee777585196736dfafbb9853b5,http://www.research.rutgers.edu/~linzhong/PDF/TC_Facial.pdf
+cacd51221c592012bf2d9e4894178c1c1fa307ca,http://pdfs.semanticscholar.org/cacd/51221c592012bf2d9e4894178c1c1fa307ca.pdf
+ca0363d29e790f80f924cedaf93cb42308365b3d,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07a.pdf
+cad52d74c1a21043f851ae14c924ac689e197d1f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W16/papers/Alletto_From_Ego_to_2014_CVPR_paper.pdf
+cac8bb0e393474b9fb3b810c61efdbc2e2c25c29,http://pdfs.semanticscholar.org/cac8/bb0e393474b9fb3b810c61efdbc2e2c25c29.pdf
+caaa6e8e83abb97c78ff9b813b849d5ab56b5050,http://digital.cs.usu.edu/~xqi/Promotion/JSPL.FaceRecognition.14.pdf
+ca54d0a128b96b150baef392bf7e498793a6371f,http://pdfs.semanticscholar.org/ca54/d0a128b96b150baef392bf7e498793a6371f.pdf
+ca83053d9a790319b11a04eac5ab412e7fcab914,http://pdfs.semanticscholar.org/ca83/053d9a790319b11a04eac5ab412e7fcab914.pdf
+cadba72aa3e95d6dcf0acac828401ddda7ed8924,http://pdfs.semanticscholar.org/cadb/a72aa3e95d6dcf0acac828401ddda7ed8924.pdf
+ca606186715e84d270fc9052af8500fe23befbda,http://www.amirtahmasbi.com/publications_repository/SDA_ICSPS2010.pdf
+e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6,http://pdfs.semanticscholar.org/e48f/b3ee27eef1e503d7ba07df8eb1524c47f4a6.pdf
+e4bf70e818e507b54f7d94856fecc42cc9e0f73d,http://pdfs.semanticscholar.org/e4bf/70e818e507b54f7d94856fecc42cc9e0f73d.pdf
+e4bc529ced68fae154e125c72af5381b1185f34e,http://pdfs.semanticscholar.org/e4bc/529ced68fae154e125c72af5381b1185f34e.pdf
+e465f596d73f3d2523dbf8334d29eb93a35f6da0,http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf
+e4aeaf1af68a40907fda752559e45dc7afc2de67,http://pdfs.semanticscholar.org/e4ae/af1af68a40907fda752559e45dc7afc2de67.pdf
+e4c3d5d43cb62ac5b57d74d55925bdf76205e306,http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf
+e42998bbebddeeb4b2bedf5da23fa5c4efc976fa,http://pdfs.semanticscholar.org/e429/98bbebddeeb4b2bedf5da23fa5c4efc976fa.pdf
+e4a1b46b5c639d433d21b34b788df8d81b518729,http://pdfs.semanticscholar.org/e4a1/b46b5c639d433d21b34b788df8d81b518729.pdf
+e4c81c56966a763e021938be392718686ba9135e,http://pdfs.semanticscholar.org/e4c8/1c56966a763e021938be392718686ba9135e.pdf
+e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc,http://pdfs.semanticscholar.org/e4e9/5b8bca585a15f13ef1ab4f48a884cd6ecfcc.pdf
+e4df83b7424842ff5864c10fa55d38eae1c45fac,http://pdfs.semanticscholar.org/e4df/83b7424842ff5864c10fa55d38eae1c45fac.pdf
+e4e3faa47bb567491eaeaebb2213bf0e1db989e1,http://pdfs.semanticscholar.org/e4e3/faa47bb567491eaeaebb2213bf0e1db989e1.pdf
+e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5,http://pdfs.semanticscholar.org/e43e/a078749d1f9b8254e0c3df4c51ba2f4eebd5.pdf
+e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf,https://pdfs.semanticscholar.org/e476/cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf.pdf
+e4c2f8e4aace8cb851cb74478a63d9111ca550ae,http://pdfs.semanticscholar.org/e4c2/f8e4aace8cb851cb74478a63d9111ca550ae.pdf
+e475e857b2f5574eb626e7e01be47b416deff268,http://pdfs.semanticscholar.org/e475/e857b2f5574eb626e7e01be47b416deff268.pdf
+e4391993f5270bdbc621b8d01702f626fba36fc2,http://pdfs.semanticscholar.org/e439/1993f5270bdbc621b8d01702f626fba36fc2.pdf
+e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd,http://pdfs.semanticscholar.org/e475/deadd1e284428b5e6efd8fe0e6a5b83b9dcd.pdf
+e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Liu2013.pdf
+e48e94959c4ce799fc61f3f4aa8a209c00be8d7f,http://pdfs.semanticscholar.org/e48e/94959c4ce799fc61f3f4aa8a209c00be8d7f.pdf
+e496d6be415038de1636bbe8202cac9c1cea9dbe,http://pdfs.semanticscholar.org/e496/d6be415038de1636bbe8202cac9c1cea9dbe.pdf
+e43cc682453cf3874785584fca813665878adaa7,http://pdfs.semanticscholar.org/e43c/c682453cf3874785584fca813665878adaa7.pdf
+fec6648b4154fc7e0892c74f98898f0b51036dfe,http://pdfs.semanticscholar.org/fec6/648b4154fc7e0892c74f98898f0b51036dfe.pdf
+fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9,http://pdfs.semanticscholar.org/fea0/a5ed1bc83dd1b545a5d75db2e37a69489ac9.pdf
+fe9c460d5ca625402aa4d6dd308d15a40e1010fa,http://pdfs.semanticscholar.org/fe9c/460d5ca625402aa4d6dd308d15a40e1010fa.pdf
+fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5,http://pdfs.semanticscholar.org/fe7e/3cc1f3412bbbf37d277eeb3b17b8b21d71d5.pdf
+fe5df5fe0e4745d224636a9ae196649176028990,http://pdfs.semanticscholar.org/fe5d/f5fe0e4745d224636a9ae196649176028990.pdf
+fe961cbe4be0a35becd2d722f9f364ec3c26bd34,http://pdfs.semanticscholar.org/fe96/1cbe4be0a35becd2d722f9f364ec3c26bd34.pdf
+feb6e267923868bff6e2108603d00fdfd65251ca,http://pdfs.semanticscholar.org/feb6/e267923868bff6e2108603d00fdfd65251ca.pdf
+feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc,http://pdfs.semanticscholar.org/feeb/0fd0e254f38b38fe5c1022e84aa43d63f7cc.pdf
+fe108803ee97badfa2a4abb80f27fa86afd9aad9,http://pdfs.semanticscholar.org/fe10/8803ee97badfa2a4abb80f27fa86afd9aad9.pdf
+c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d,http://pdfs.semanticscholar.org/c8db/8764f9d8f5d44e739bbcb663fbfc0a40fb3d.pdf
+c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3,http://www.isir.upmc.fr/files/2013ACTI2846.pdf
+c8292aa152a962763185e12fd7391a1d6df60d07,http://pdfs.semanticscholar.org/c829/2aa152a962763185e12fd7391a1d6df60d07.pdf
+c82c147c4f13e79ad49ef7456473d86881428b89,http://pdfs.semanticscholar.org/c82c/147c4f13e79ad49ef7456473d86881428b89.pdf
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,http://pdfs.semanticscholar.org/c8e8/4cdff569dd09f8d31e9f9ba3218dee65e961.pdf
+c8829013bbfb19ccb731bd54c1a885c245b6c7d7,http://pdfs.semanticscholar.org/c882/9013bbfb19ccb731bd54c1a885c245b6c7d7.pdf
+c81ee278d27423fd16c1a114dcae486687ee27ff,http://pdfs.semanticscholar.org/c81e/e278d27423fd16c1a114dcae486687ee27ff.pdf
+c88ce5ef33d5e544224ab50162d9883ff6429aa3,http://pdfs.semanticscholar.org/c88c/e5ef33d5e544224ab50162d9883ff6429aa3.pdf
+c822bd0a005efe4ec1fea74de534900a9aa6fb93,http://pdfs.semanticscholar.org/c822/bd0a005efe4ec1fea74de534900a9aa6fb93.pdf
+c8adbe00b5661ab9b3726d01c6842c0d72c8d997,http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf
+fb4545782d9df65d484009558e1824538030bbb1,http://pdfs.semanticscholar.org/fb45/45782d9df65d484009558e1824538030bbb1.pdf
+fbf196d83a41d57dfe577b3a54b1b7fa06666e3b,http://pdfs.semanticscholar.org/fbf1/96d83a41d57dfe577b3a54b1b7fa06666e3b.pdf
+fbb6ee4f736519f7231830a8e337b263e91f06fe,http://pdfs.semanticscholar.org/fbb6/ee4f736519f7231830a8e337b263e91f06fe.pdf
+fb5280b80edcf088f9dd1da769463d48e7b08390,http://pdfs.semanticscholar.org/fb52/80b80edcf088f9dd1da769463d48e7b08390.pdf
+fba464cb8e3eff455fe80e8fb6d3547768efba2f,http://pdfs.semanticscholar.org/fba4/64cb8e3eff455fe80e8fb6d3547768efba2f.pdf
+fb084b1fe52017b3898c871514cffcc2bdb40b73,http://pdfs.semanticscholar.org/fb08/4b1fe52017b3898c871514cffcc2bdb40b73.pdf
+ed28e8367fcb7df7e51963add9e2d85b46e2d5d6,http://pdfs.semanticscholar.org/ed28/e8367fcb7df7e51963add9e2d85b46e2d5d6.pdf
+ed08ac6da6f8ead590b390b1d14e8a9b97370794,http://pdfs.semanticscholar.org/ed08/ac6da6f8ead590b390b1d14e8a9b97370794.pdf
+edef98d2b021464576d8d28690d29f5431fd5828,http://pdfs.semanticscholar.org/edef/98d2b021464576d8d28690d29f5431fd5828.pdf
+edc5a0a8b9fc6ae0e8d8091a2391767f645095d9,http://www.es.mdh.se/pdf_publications/3948.pdf
+ed04e161c953d345bcf5b910991d7566f7c486f7,http://pdfs.semanticscholar.org/ed04/e161c953d345bcf5b910991d7566f7c486f7.pdf
+edd7504be47ebc28b0d608502ca78c0aea6a65a2,http://pdfs.semanticscholar.org/edd7/504be47ebc28b0d608502ca78c0aea6a65a2.pdf
+edbb8cce0b813d3291cae4088914ad3199736aa0,http://pdfs.semanticscholar.org/edbb/8cce0b813d3291cae4088914ad3199736aa0.pdf
+c178a86f4c120eca3850a4915134fff44cbccb48,http://pdfs.semanticscholar.org/c178/a86f4c120eca3850a4915134fff44cbccb48.pdf
+c1d2d12ade031d57f8d6a0333cbe8a772d752e01,http://pdfs.semanticscholar.org/c1d2/d12ade031d57f8d6a0333cbe8a772d752e01.pdf
+c180f22a9af4a2f47a917fd8f15121412f2d0901,http://pdfs.semanticscholar.org/c180/f22a9af4a2f47a917fd8f15121412f2d0901.pdf
+c1f07ec629be1c6fe562af0e34b04c54e238dcd1,http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf
+c10a15e52c85654db9c9343ae1dd892a2ac4a279,http://www.cs.utexas.edu/~grauman/papers/ijcv-sungju.pdf
+c1fc70e0952f6a7587b84bf3366d2e57fc572fd7,http://pdfs.semanticscholar.org/c1fc/70e0952f6a7587b84bf3366d2e57fc572fd7.pdf
+c1dfabe36a4db26bf378417985a6aacb0f769735,http://pdfs.semanticscholar.org/c1df/abe36a4db26bf378417985a6aacb0f769735.pdf
+c1482491f553726a8349337351692627a04d5dbe,http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf
+c1ff88493721af1940df0d00bcfeefaa14f1711f,http://pdfs.semanticscholar.org/c1ff/88493721af1940df0d00bcfeefaa14f1711f.pdf
+c17a332e59f03b77921942d487b4b102b1ee73b6,http://pdfs.semanticscholar.org/c17a/332e59f03b77921942d487b4b102b1ee73b6.pdf
+c1e76c6b643b287f621135ee0c27a9c481a99054,http://pdfs.semanticscholar.org/c1e7/6c6b643b287f621135ee0c27a9c481a99054.pdf
+c10b0a6ba98aa95d740a0d60e150ffd77c7895ad,http://pdfs.semanticscholar.org/c10b/0a6ba98aa95d740a0d60e150ffd77c7895ad.pdf
+c696c9bbe27434cb6279223a79b17535cd6e88c8,http://pdfs.semanticscholar.org/c696/c9bbe27434cb6279223a79b17535cd6e88c8.pdf
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,http://pdfs.semanticscholar.org/c65e/4ffa2c07a37b0bb7781ca4ec2ed7542f18e3.pdf
+c614450c9b1d89d5fda23a54dbf6a27a4b821ac0,http://pdfs.semanticscholar.org/c614/450c9b1d89d5fda23a54dbf6a27a4b821ac0.pdf
+c6096986b4d6c374ab2d20031e026b581e7bf7e9,http://pdfs.semanticscholar.org/c609/6986b4d6c374ab2d20031e026b581e7bf7e9.pdf
+c6608fdd919f2bc4f8d7412bab287527dcbcf505,http://pdfs.semanticscholar.org/c660/8fdd919f2bc4f8d7412bab287527dcbcf505.pdf
+c6ea6fee4823b511eecf41f6c2574a0728055baf,http://pdfs.semanticscholar.org/c6ea/6fee4823b511eecf41f6c2574a0728055baf.pdf
+c62c910264658709e9bf0e769e011e7944c45c90,http://pdfs.semanticscholar.org/c62c/910264658709e9bf0e769e011e7944c45c90.pdf
+c660500b49f097e3af67bb14667de30d67db88e3,http://pdfs.semanticscholar.org/c660/500b49f097e3af67bb14667de30d67db88e3.pdf
+c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6,http://pdfs.semanticscholar.org/c6ff/a09c4a6cacbbd3c41c8ae7a728b0de6e10b6.pdf
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf
+c65a394118d34beda5dd01ae0df163c3db88fceb,http://pdfs.semanticscholar.org/c65a/394118d34beda5dd01ae0df163c3db88fceb.pdf
+ec12f805a48004a90e0057c7b844d8119cb21b4a,http://pdfs.semanticscholar.org/ec12/f805a48004a90e0057c7b844d8119cb21b4a.pdf
+ec22eaa00f41a7f8e45ed833812d1ac44ee1174e,http://pdfs.semanticscholar.org/ec22/eaa00f41a7f8e45ed833812d1ac44ee1174e.pdf
+ec54000c6c0e660dd99051bdbd7aed2988e27ab8,http://pdfs.semanticscholar.org/ec54/000c6c0e660dd99051bdbd7aed2988e27ab8.pdf
+ec0104286c96707f57df26b4f0a4f49b774c486b,http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf
+ecca2a2b84ea01ea425b8d2d9f376f15a295a7f5,http://smie2.sysu.edu.cn/~wcd/Papers/2013_TPAMI_Wang_MEAP.pdf
+ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9,http://www.mmp.rwth-aachen.de/publications/pdf/rafi_chalearn2015.pdf
+ec05078be14a11157ac0e1c6b430ac886124589b,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf
+4e7ed13e541b8ed868480375785005d33530e06d,http://arxiv.org/pdf/1603.07388v1.pdf
+4e490cf3cf26fe46507bb55a548c403b9c685ba0,http://labnic.unige.ch/nic/papers/SJ_DG_SD_KND_IC_MIV_DS_PV_KRS_IEEETransac11.pdf
+4e5dc3b397484326a4348ccceb88acf309960e86,http://pdfs.semanticscholar.org/4e5d/c3b397484326a4348ccceb88acf309960e86.pdf
+4e6c17966efae956133bf8f22edeffc24a0470c1,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf
+4e1836914bbcf94dc00e604b24b1b0d6d7b61e66,http://pdfs.semanticscholar.org/4e18/36914bbcf94dc00e604b24b1b0d6d7b61e66.pdf
+4e4fa167d772f34dfffc374e021ab3044566afc3,http://pdfs.semanticscholar.org/4e4f/a167d772f34dfffc374e021ab3044566afc3.pdf
+4ed54d5093d240cc3644e4212f162a11ae7d1e3b,http://pdfs.semanticscholar.org/4ed5/4d5093d240cc3644e4212f162a11ae7d1e3b.pdf
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Yang_How_Related_Exemplars_2013_ICCV_paper.pdf
+4ea53e76246afae94758c1528002808374b75cfa,http://pdfs.semanticscholar.org/4ea5/3e76246afae94758c1528002808374b75cfa.pdf
+4ed2d7ecb34a13e12474f75d803547ad2ad811b2,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yang_Common_Action_Discovery_ICCV_2017_paper.pdf
+4e97b53926d997f451139f74ec1601bbef125599,http://pdfs.semanticscholar.org/4e97/b53926d997f451139f74ec1601bbef125599.pdf
+4e93a8a47473bf57e24aec048cb870ab366a43d6,http://pdfs.semanticscholar.org/4e93/a8a47473bf57e24aec048cb870ab366a43d6.pdf
+4e8168fbaa615009d1618a9d6552bfad809309e9,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf
+4ea4116f57c5d5033569690871ba294dc3649ea5,http://pdfs.semanticscholar.org/4ea4/116f57c5d5033569690871ba294dc3649ea5.pdf
+4e444db884b5272f3a41e4b68dc0d453d4ec1f4c,http://pdfs.semanticscholar.org/4e44/4db884b5272f3a41e4b68dc0d453d4ec1f4c.pdf
+4ef0a6817a7736c5641dc52cbc62737e2e063420,http://pdfs.semanticscholar.org/4ef0/a6817a7736c5641dc52cbc62737e2e063420.pdf
+4e4d034caa72dce6fca115e77c74ace826884c66,http://pdfs.semanticscholar.org/4e4d/034caa72dce6fca115e77c74ace826884c66.pdf
+4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b,http://pdfs.semanticscholar.org/4e7e/bf3c4c0c4ecc48348a769dd6ae1ebac3bf1b.pdf
+4e0e49c280acbff8ae394b2443fcff1afb9bdce6,http://pdfs.semanticscholar.org/4e0e/49c280acbff8ae394b2443fcff1afb9bdce6.pdf
+4e4e8fc9bbee816e5c751d13f0d9218380d74b8f,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553711.pdf
+20a88cc454a03d62c3368aa1f5bdffa73523827b,http://pdfs.semanticscholar.org/d620/7593c39255ac8ce7536e5958a99f52d6bb60.pdf
+20a432a065a06f088d96965f43d0055675f0a6c1,http://pdfs.semanticscholar.org/20a4/32a065a06f088d96965f43d0055675f0a6c1.pdf
+20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba,http://pdfs.semanticscholar.org/e805/bc872e18277c7cbfce82206cf1667cce22cc.pdf
+20e504782951e0c2979d9aec88c76334f7505393,https://arxiv.org/pdf/1612.08534v1.pdf
+209324c152fa8fab9f3553ccb62b693b5b10fb4d,http://pdfs.semanticscholar.org/2093/24c152fa8fab9f3553ccb62b693b5b10fb4d.pdf
+2050847bc7a1a0453891f03aeeb4643e360fde7d,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf
+205f3d654b7d28d00d15b034a8c5b2a8740bd8b6,https://www.researchgate.net/profile/Ya_Su4/publication/51686551_Discriminant_learning_through_multiple_principal_angles_for_visual_recognition/links/00b495253b0057832b000000.pdf
+202d8d93b7b747cdbd6e24e5a919640f8d16298a,http://pdfs.semanticscholar.org/202d/8d93b7b747cdbd6e24e5a919640f8d16298a.pdf
+20767ca3b932cbc7b8112db21980d7b9b3ea43a3,http://pdfs.semanticscholar.org/2076/7ca3b932cbc7b8112db21980d7b9b3ea43a3.pdf
+20a16efb03c366fa4180659c2b2a0c5024c679da,http://pdfs.semanticscholar.org/20a1/6efb03c366fa4180659c2b2a0c5024c679da.pdf
+205b34b6035aa7b23d89f1aed2850b1d3780de35,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p504-jiang.pdf
+20c2a5166206e7ffbb11a23387b9c5edf42b5230,http://pdfs.semanticscholar.org/aff0/51003a43736001aeb76e08cb86ce67d6c70d.pdf
+20e505cef6d40f896e9508e623bfc01aa1ec3120,http://pdfs.semanticscholar.org/20e5/05cef6d40f896e9508e623bfc01aa1ec3120.pdf
+205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2009/Vretos_2009_ICIP.pdf
+2098983dd521e78746b3b3fa35a22eb2fa630299,http://pdfs.semanticscholar.org/2098/983dd521e78746b3b3fa35a22eb2fa630299.pdf
+20b437dc4fc44c17f131713ffcbb4a8bd672ef00,http://pdfs.semanticscholar.org/20b4/37dc4fc44c17f131713ffcbb4a8bd672ef00.pdf
+208a2c50edb5271a050fa9f29d3870f891daa4dc,http://pdfs.semanticscholar.org/c17c/55f43af5db44b6a4c17932aa3d7031985749.pdf
+207798603e3089a1c807c93e5f36f7767055ec06,http://www1.se.cuhk.edu.hk/~hccl/publications/pub/2012_APSIPA_FacialExpression.pdf
+20be15dac7d8a5ba4688bf206ad24cab57d532d6,http://pdfs.semanticscholar.org/20be/15dac7d8a5ba4688bf206ad24cab57d532d6.pdf
+2042aed660796b14925db17c0a8b9fbdd7f3ebac,http://pdfs.semanticscholar.org/4a19/fd2eb09976128e33bd8f9411972146ac6c41.pdf
+20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6,http://pdfs.semanticscholar.org/ee89/f47ebfbebed7d6793a6774356ba63398f0d0.pdf
+20532b1f80b509f2332b6cfc0126c0f80f438f10,https://arxiv.org/pdf/1509.03248v1.pdf
+205af28b4fcd6b569d0241bb6b255edb325965a4,http://pdfs.semanticscholar.org/205a/f28b4fcd6b569d0241bb6b255edb325965a4.pdf
+20cfb4136c1a984a330a2a9664fcdadc2228b0bc,http://www.eecs.harvard.edu/~htk/publication/2015-amfg-chen-comiter-kung-mcdanel.pdf
+2020e8c0be8fa00d773fd99b6da55029a6a83e3d,http://pdfs.semanticscholar.org/9ca3/806dd01f8aded02e88c7022716b7fef46423.pdf
+20a0b23741824a17c577376fdd0cf40101af5880,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Weinzaepfel_Learning_to_Track_ICCV_2015_paper.pdf
+18c72175ddbb7d5956d180b65a96005c100f6014,http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf
+18636347b8741d321980e8f91a44ee054b051574,http://biometrics.cse.msu.edu/Publications/SoftBiometrics/JainParkFacemarks_ICIP09.pdf
+18206e1b988389eaab86ef8c852662accf3c3663,http://pdfs.semanticscholar.org/d13e/5b4249cfe9672672eb573d15e7dc0a235e04.pdf
+189b1859f77ddc08027e1e0f92275341e5c0fdc6,http://pdfs.semanticscholar.org/189b/1859f77ddc08027e1e0f92275341e5c0fdc6.pdf
+18a9f3d855bd7728ed4f988675fa9405b5478845,http://pdfs.semanticscholar.org/18a9/f3d855bd7728ed4f988675fa9405b5478845.pdf
+181045164df86c72923906aed93d7f2f987bce6c,http://pdfs.semanticscholar.org/1810/45164df86c72923906aed93d7f2f987bce6c.pdf
+18166432309000d9a5873f989b39c72a682932f5,http://pdfs.semanticscholar.org/1816/6432309000d9a5873f989b39c72a682932f5.pdf
+18d5b0d421332c9321920b07e0e8ac4a240e5f1f,http://pdfs.semanticscholar.org/18d5/b0d421332c9321920b07e0e8ac4a240e5f1f.pdf
+18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae,http://pdfs.semanticscholar.org/18c6/c92c39c8a5a2bb8b5673f339d3c26b8dcaae.pdf
+1885acea0d24e7b953485f78ec57b2f04e946eaf,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w36/Xiong_Combining_Local_and_ICCV_2017_paper.pdf
+184750382fe9b722e78d22a543e852a6290b3f70,http://pdfs.semanticscholar.org/1847/50382fe9b722e78d22a543e852a6290b3f70.pdf
+18a849b1f336e3c3b7c0ee311c9ccde582d7214f,http://pdfs.semanticscholar.org/18a8/49b1f336e3c3b7c0ee311c9ccde582d7214f.pdf
+18cd79f3c93b74d856bff6da92bfc87be1109f80,http://pdfs.semanticscholar.org/18cd/79f3c93b74d856bff6da92bfc87be1109f80.pdf
+182470fd0c18d0c5979dff75d089f1da176ceeeb,https://repositori.upf.edu/bitstream/handle/10230/27207/dominguez_MARMI16_mult.pdf?isAllowed=y&sequence=1
+1862cb5728990f189fa91c67028f6d77b5ac94f6,http://lvdmaaten.github.io/publications/papers/CVPR_2014.pdf
+1862bfca2f105fddfc79941c90baea7db45b8b16,http://vision.cs.utexas.edu/projects/rationales/rationales.pdf
+1886b6d9c303135c5fbdc33e5f401e7fc4da6da4,https://arxiv.org/pdf/1610.01119v1.pdf
+1888bf50fd140767352158c0ad5748b501563833,http://pdfs.semanticscholar.org/1888/bf50fd140767352158c0ad5748b501563833.pdf
+187d4d9ba8e10245a34f72be96dd9d0fb393b1aa,http://pdfs.semanticscholar.org/187d/4d9ba8e10245a34f72be96dd9d0fb393b1aa.pdf
+182f3aa4b02248ff9c0f9816432a56d3c8880706,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Quan_Sparse_Coding_for_CVPR_2016_paper.pdf
+1828b1b0f5395b163fef087a72df0605249300c2,http://pdfs.semanticscholar.org/8b18/66a150521bfa18c3e6ec633e1acc79683749.pdf
+185360fe1d024a3313042805ee201a75eac50131,http://cvit.iiit.ac.in/papers/deidentTCSVT2k11.pdf
+1824b1ccace464ba275ccc86619feaa89018c0ad,http://www.csc.kth.se/~vahidk/papers/KazemiCVPR14.pdf
+18dfc2434a95f149a6cbb583cca69a98c9de9887,http://pdfs.semanticscholar.org/18df/c2434a95f149a6cbb583cca69a98c9de9887.pdf
+181708b09bde7f4904f8fd92b3668d76e7aff527,http://mplab.ucsd.edu/~ksikka/emotiw14.pdf
+271e2856e332634eccc5e80ba6fa9bbccf61f1be,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/176.pdf
+27846b464369095f4909f093d11ed481277c8bba,http://pdfs.semanticscholar.org/2784/6b464369095f4909f093d11ed481277c8bba.pdf
+27eb7a6e1fb6b42516041def6fe64bd028b7614d,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zafeiriou_Joint_Unsupervised_Deformable_CVPR_2016_paper.pdf
+2717998d89d34f45a1cca8b663b26d8bf10608a9,http://wangzheallen.github.io/papers/ZhangWWQW_CVPR16.pdf
+27c66b87e0fbb39f68ddb783d11b5b7e807c76e8,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Rodriguez_Fast_Simplex-HMM_for_CVPR_2017_paper.pdf
+27a0a7837f9114143717fc63294a6500565294c2,http://pdfs.semanticscholar.org/27a0/a7837f9114143717fc63294a6500565294c2.pdf
+27aadf6e7441bf40675874df1cf4bb7e2dffdd9e,http://www1.icsi.berkeley.edu/~farrell/birdlets/iccv11-camera-ready.pdf
+27d709f7b67204e1e5e05fe2cfac629afa21699d,http://pdfs.semanticscholar.org/2b88/db4294f11b0516a537b8720fcf416be80dbf.pdf
+27c9ddb72360f4cd0f715cd7ea82fa399af91f11,http://pdfs.semanticscholar.org/27c9/ddb72360f4cd0f715cd7ea82fa399af91f11.pdf
+271df16f789bd2122f0268c3e2fa46bc0cb5f195,http://users.eecs.northwestern.edu/~mya671/mypapers/CVPR11_Yuan_Yang_Wu.pdf
+27218ff58c3f0e7d7779fba3bb465d746749ed7c,http://pdfs.semanticscholar.org/2721/8ff58c3f0e7d7779fba3bb465d746749ed7c.pdf
+276dbb667a66c23545534caa80be483222db7769,http://pdfs.semanticscholar.org/276d/bb667a66c23545534caa80be483222db7769.pdf
+27c6cd568d0623d549439edc98f6b92528d39bfe,http://openaccess.thecvf.com/content_iccv_2015/papers/Hsu_Regressive_Tree_Structured_ICCV_2015_paper.pdf
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,http://arxiv.org/pdf/1604.04334v1.pdf
+27169761aeab311a428a9dd964c7e34950a62a6b,http://academicjournals.org/article/article1380818227_Mostayed%20et%20al.pdf
+27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/NiinumaHanJain_MultiviewFaceRecognition_PoseRegularization_BTAS13.pdf
+27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5,http://pdfs.semanticscholar.org/2717/3d0b9bb5ce3a75d05e4dbd8f063375f24bb5.pdf
+2784d9212dee2f8a660814f4b85ba564ec333720,http://people.cs.umass.edu/~elm/papers/cvpr2010_imagetrans.pdf
+275b3cb7c780c663eabbf4d6c6cbc8fe24287c70,https://www.researchgate.net/profile/Bisan_Alsalibi/publication/280839254_The_Impact_of_Bio-Inspired_Approaches_Toward_the_Advancement_of_Face_Recognition/links/55c8ce4608aeca747d67062e.pdf?origin=publication_list
+278e1441a77fbeebb22c45932d76c557e5663197,http://sist.sysu.edu.cn/~zhwshi/research/preprintversion/two-stage%20nonnegative%20sparse%20representation%20for%20large-scale%20face%20recognition.pdf
+27cccf992f54966feb2ab4831fab628334c742d8,http://pdfs.semanticscholar.org/27cc/cf992f54966feb2ab4831fab628334c742d8.pdf
+27883967d3dac734c207074eed966e83afccb8c3,http://www.ee.cuhk.edu.hk/~xgwang/papers/gaoGZHW.pdf
+270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0,http://pdfs.semanticscholar.org/270e/5266a1f6e76954dedbc2caf6ff61a5fbf8d0.pdf
+27f8b01e628f20ebfcb58d14ea40573d351bbaad,http://pdfs.semanticscholar.org/27f8/b01e628f20ebfcb58d14ea40573d351bbaad.pdf
+2742a61d32053761bcc14bd6c32365bfcdbefe35,http://pdfs.semanticscholar.org/ee39/96dc3f451f480134e1a468c32762d688c51b.pdf
+27dafedccd7b049e87efed72cabaa32ec00fdd45,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_074.pdf
+27a299b834a18e45d73e0bf784bbb5b304c197b3,http://ai.stanford.edu/~vigneshr/cvpr_13/cvpr13_social_roles.pdf
+27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba,http://pdfs.semanticscholar.org/27b1/670e1b91ab983b7b1ecfe9eb5e6ba951e0ba.pdf
+274f87ad659cd90382ef38f7c6fafc4fc7f0d74d,http://www.deepkernel.com/Papers/mm2014.pdf
+27ee8482c376ef282d5eb2e673ab042f5ded99d7,http://sylvain.legallou.fr/Fichiers/p_ICARCV06_NewNormalization_LeGallou.pdf
+4b28de1ebf6b6cb2479b9176fab50add6ed75b78,http://vision.ucsd.edu/sites/default/files/cvpr05a.pdf
+4b4106614c1d553365bad75d7866bff0de6056ed,http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf
+4bb03b27bc625e53d8d444c0ba3ee235d2f17e86,http://www.cs.utexas.edu/~grauman/papers/hwang_cvpr2010.pdf
+4b89cf7197922ee9418ae93896586c990e0d2867,http://www.cs.cmu.edu/~ftorre/paper1.pdf
+4bc9a767d7e63c5b94614ebdc24a8775603b15c9,http://pdfs.semanticscholar.org/4bc9/a767d7e63c5b94614ebdc24a8775603b15c9.pdf
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf
+4b3f425274b0c2297d136f8833a31866db2f2aec,https://arxiv.org/pdf/1705.01567v2.pdf
+4b7c110987c1d89109355b04f8597ce427a7cd72,http://pdfs.semanticscholar.org/4b7c/110987c1d89109355b04f8597ce427a7cd72.pdf
+4bd088ba3f42aa1e43ae33b1988264465a643a1f,http://pdfs.semanticscholar.org/4bd0/88ba3f42aa1e43ae33b1988264465a643a1f.pdf
+4bfce41cc72be315770861a15e467aa027d91641,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Branson_Active_Annotation_Translation_2014_CVPR_paper.pdf
+4bd3de97b256b96556d19a5db71dda519934fd53,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wen_Latent_Factor_Guided_CVPR_2016_paper.pdf
+4b04247c7f22410681b6aab053d9655cf7f3f888,http://pdfs.semanticscholar.org/60e5/0494dc26bd30e3c49b93ca85d0f79bf5c53f.pdf
+4b6387e608afa83ac8d855de2c9b0ae3b86f31cc,http://www.researchgate.net/profile/Heng_Yang3/publication/263813517_Face_Sketch_Landmarks_Localization_in_the_Wild/links/53d3dd3b0cf220632f3ce8b3.pdf
+4b60e45b6803e2e155f25a2270a28be9f8bec130,http://www.cs.washington.edu/ai/Mobile_Robotics/postscripts/attribute-objects-icra-2013.pdf
+4b5eeea5dd8bd69331bd4bd4c66098b125888dea,http://pdfs.semanticscholar.org/4b5e/eea5dd8bd69331bd4bd4c66098b125888dea.pdf
+4bbbee93519a4254736167b31be69ee1e537f942,https://arxiv.org/pdf/1611.05125v2.pdf
+4b74f2d56cd0dda6f459319fec29559291c61bff,http://pdfs.semanticscholar.org/96d1/e2686725f69b38b510a75b716caf3a48b3e2.pdf
+4ba38262fe20fab3e4c80215147b498f83843b93,http://pdfs.semanticscholar.org/f2af/967e28c12de9d957c08ffbc7a982e4ccea1e.pdf
+4b3eaedac75ac419c2609e131ea9377ba8c3d4b8,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_tzimiro_pantic_icip_2014.pdf
+4b507a161af8a7dd41e909798b9230f4ac779315,http://pdfs.semanticscholar.org/5202/4d271f516c7d0dfa73009bf7537549ef74f7.pdf
+4b02387c2db968a70b69d98da3c443f139099e91,http://pdfs.semanticscholar.org/4b02/387c2db968a70b69d98da3c443f139099e91.pdf
+4b6be933057d939ddfa665501568ec4704fabb39,http://pdfs.semanticscholar.org/59c4/c6ba21354675401a173eb6c70500b99571cd.pdf
+4b71d1ff7e589b94e0f97271c052699157e6dc4a,http://pdfs.semanticscholar.org/4b71/d1ff7e589b94e0f97271c052699157e6dc4a.pdf
+4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2,https://arxiv.org/pdf/1705.07750v3.pdf
+4b4ecc1cb7f048235605975ab37bb694d69f63e5,http://pdfs.semanticscholar.org/4b4e/cc1cb7f048235605975ab37bb694d69f63e5.pdf
+4be03fd3a76b07125cd39777a6875ee59d9889bd,http://homes.esat.kuleuven.be/~tuytelaa/Tuytelaars-BeyondConceptSearch-WIAMIS12.pdf
+4be774af78f5bf55f7b7f654f9042b6e288b64bd,http://pdfs.semanticscholar.org/4be7/74af78f5bf55f7b7f654f9042b6e288b64bd.pdf
+4b321065f6a45e55cb7f9d7b1055e8ac04713b41,http://pdfs.semanticscholar.org/4b32/1065f6a45e55cb7f9d7b1055e8ac04713b41.pdf
+4b605e6a9362485bfe69950432fa1f896e7d19bf,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf
+4b3dd18882ff2738aa867b60febd2b35ab34dffc,http://pdfs.semanticscholar.org/4b3d/d18882ff2738aa867b60febd2b35ab34dffc.pdf
+11a2ef92b6238055cf3f6dcac0ff49b7b803aee3,http://cs.adelaide.edu.au/~carneiro/publications/mainSPL.pdf
+11dc744736a30a189f88fa81be589be0b865c9fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Liang_A_Unified_Multiplicative_ICCV_2015_paper.pdf
+1171e8a96ffb15fdb265aaba02be014a38137ad5,http://hal.cse.msu.edu/pdfs/papers/pdm-tifs-2015.pdf
+11a210835b87ccb4989e9ba31e7559bb7a9fd292,http://profdoc.um.ac.ir/articles/a/1020638.pdf
+118ca3b2e7c08094e2a50137b1548ada7935e505,http://pdfs.semanticscholar.org/dc5c/273198b16dc615888256da74758f4a4b128b.pdf
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,http://pdfs.semanticscholar.org/11aa/527c01e61ec3a7a67eef8d7ffe9d9ce63f1d.pdf
+11ddf5e47854e4e6109762835d2ce086bbdfbc5b,http://eprints.pascal-network.org/archive/00008322/01/schroff11.pdf
+113c22eed8383c74fe6b218743395532e2897e71,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sapp_MODEC_Multimodal_Decomposable_2013_CVPR_paper.pdf
+110c55b440b7c6a1692da9d8ee52389e43f6e76e,http://cs.brown.edu/people/ls/Publications/wacv2015dai_supplement.pdf
+11408af8861fb0a977412e58c1a23d61b8df458c,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2014/0265.pdf
+11cc0774365b0cc0d3fa1313bef3d32c345507b1,http://pdfs.semanticscholar.org/11cc/0774365b0cc0d3fa1313bef3d32c345507b1.pdf
+11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Caseiro_Rolling_Riemannian_Manifolds_2013_CVPR_paper.pdf
+11269e98f072095ff94676d3dad34658f4876e0e,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2015/ACII2015_submission_70.pdf
+1176c886afbd8685ecf0094450a02eb96b950f71,http://pdfs.semanticscholar.org/1176/c886afbd8685ecf0094450a02eb96b950f71.pdf
+113e5678ed8c0af2b100245057976baf82fcb907,http://www.humansensing.cs.cmu.edu/sites/default/files/4Jeni_Metrics.pdf
+11c04c4f0c234a72f94222efede9b38ba6b2306c,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ACMMM08-action-recog.pdf
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/08/icmr038-liA.pdf
+1149c6ac37ae2310fe6be1feb6e7e18336552d95,http://pdfs.semanticscholar.org/1149/c6ac37ae2310fe6be1feb6e7e18336552d95.pdf
+11f17191bf74c80ad0b16b9f404df6d03f7c8814,http://pdfs.semanticscholar.org/11f5/c82e3a39b9c8b91370ef7286a748c19b658a.pdf
+11367581c308f4ba6a32aac1b4a7cdb32cd63137,https://pdfs.semanticscholar.org/82c3/367ca6fc95e705aa8f2270265d82e9d8eedd.pdf
+11a47a91471f40af5cf00449954474fd6e9f7694,http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf
+11fdff97f4511ae3d3691cfdeec5a19fa04db6ef,http://mclab.eic.hust.edu.cn/UpLoadFiles/Papers/SCA_TIP2016.pdf
+1198572784788a6d2c44c149886d4e42858d49e4,http://pdfs.semanticscholar.org/1198/572784788a6d2c44c149886d4e42858d49e4.pdf
+11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8,http://elderlab.apps01.yorku.ca/wp-content/uploads/2016/12/PrincePAMI08.pdf
+1134a6be0f469ff2c8caab266bbdacf482f32179,http://pdfs.semanticscholar.org/1134/a6be0f469ff2c8caab266bbdacf482f32179.pdf
+11b3877df0213271676fa8aa347046fd4b1a99ad,http://pdfs.semanticscholar.org/11b3/877df0213271676fa8aa347046fd4b1a99ad.pdf
+1130c38e88108cf68b92ecc61a9fc5aeee8557c9,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_058.pdf
+11b89011298e193d9e6a1d99302221c1d8645bda,http://openaccess.thecvf.com/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf
+111a9645ad0108ad472b2f3b243ed3d942e7ff16,http://pdfs.semanticscholar.org/111a/9645ad0108ad472b2f3b243ed3d942e7ff16.pdf
+1177977134f6663fff0137f11b81be9c64c1f424,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_003.pdf
+11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0,http://pdfs.semanticscholar.org/11ac/88aebe0230e743c7ea2c2a76b5d4acbfecd0.pdf
+117f164f416ea68e8b88a3005e55a39dbdf32ce4,http://www.cs.toronto.edu/~fidler/papers/fashionCVPR15.pdf
+7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4,http://pdfs.semanticscholar.org/7dda/2eb0054eb1aeda576ed2b27a84ddf09b07d4.pdf
+7d94fd5b0ca25dd23b2e36a2efee93244648a27b,http://pdfs.semanticscholar.org/7d94/fd5b0ca25dd23b2e36a2efee93244648a27b.pdf
+7d8c2d29deb80ceed3c8568100376195ce0914cb,https://arxiv.org/pdf/1708.01988v1.pdf
+7d50df03d0c8a26eaaeaef47de68691f9ac73701,http://media-lab.engr.ccny.cuny.edu/Paper/2011/HCBA11.pdf
+7d306512b545df98243f87cb8173df83b4672b18,http://pdfs.semanticscholar.org/7d30/6512b545df98243f87cb8173df83b4672b18.pdf
+7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b,http://pdfs.semanticscholar.org/7d98/dcd15e28bcc57c9c59b7401fa4a5fdaa632b.pdf
+7d41b67a641426cb8c0f659f0ba74cdb60e7159a,http://eprints.soton.ac.uk/389641/1/isba-16-camera.pdf
+7d1688ce0b48096e05a66ead80e9270260cb8082,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w44/Saxen_Real_vs._Fake_ICCV_2017_paper.pdf
+7d53678ef6009a68009d62cd07c020706a2deac3,http://pdfs.semanticscholar.org/7d53/678ef6009a68009d62cd07c020706a2deac3.pdf
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,http://pdfs.semanticscholar.org/7d7b/e6172fc2884e1da22d1e96d5899a29831ad2.pdf
+7dcd3f58aa75f7ae96fdac9b1c2332a4f0b2dbd3,https://www.researchgate.net/profile/Symeon_Nikitidis/publication/221122322_Facial_expression_recognition_using_clustering_discriminant_Non-negative_Matrix_Factorization/links/54fee98e0cf2eaf210b4506c.pdf
+7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22,http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf
+7d73adcee255469aadc5e926066f71c93f51a1a5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001283.pdf
+7d9fe410f24142d2057695ee1d6015fb1d347d4a,http://pdfs.semanticscholar.org/7d9f/e410f24142d2057695ee1d6015fb1d347d4a.pdf
+7dd578878e84337d6d0f5eb593f22cabeacbb94c,http://pdfs.semanticscholar.org/7dd5/78878e84337d6d0f5eb593f22cabeacbb94c.pdf
+7dffe7498c67e9451db2d04bb8408f376ae86992,http://pdfs.semanticscholar.org/7dff/e7498c67e9451db2d04bb8408f376ae86992.pdf
+7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2,http://maths.dur.ac.uk/users/kasper.peeters/pdf/face_recognition/PCA/Togneri2010LinearRegressionFaceRecognition.pdf
+7de386bf2a1b2436c836c0cc1f1f23fccb24aad6,http://pdfs.semanticscholar.org/7de3/86bf2a1b2436c836c0cc1f1f23fccb24aad6.pdf
+29ce6b54a87432dc8371f3761a9568eb3c5593b0,https://kar.kent.ac.uk/43222/1/Yatie_EST2013_vfinal.pdf
+2914e8c62f0432f598251fae060447f98141e935,http://pdfs.semanticscholar.org/2914/e8c62f0432f598251fae060447f98141e935.pdf
+292eba47ef77495d2613373642b8372d03f7062b,http://pdfs.semanticscholar.org/292e/ba47ef77495d2613373642b8372d03f7062b.pdf
+29e96ec163cb12cd5bd33bdf3d32181c136abaf9,http://pdfs.semanticscholar.org/29e9/6ec163cb12cd5bd33bdf3d32181c136abaf9.pdf
+29e793271370c1f9f5ac03d7b1e70d1efa10577c,http://pdfs.semanticscholar.org/29e7/93271370c1f9f5ac03d7b1e70d1efa10577c.pdf
+29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea,http://www.umiacs.umd.edu/~nshroff/DomainAdapt.pdf
+29c7dfbbba7a74e9aafb6a6919629b0a7f576530,http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf
+292c6b743ff50757b8230395c4a001f210283a34,https://labicvl.github.io/docs/pubs/Oscar_VISAPP_2014.pdf
+29fc4de6b680733e9447240b42db13d5832e408f,http://pdfs.semanticscholar.org/29fc/4de6b680733e9447240b42db13d5832e408f.pdf
+29c1f733a80c1e07acfdd228b7bcfb136c1dff98,http://pdfs.semanticscholar.org/29c1/f733a80c1e07acfdd228b7bcfb136c1dff98.pdf
+29f0a868644462aa7ebc21f4510d4209932a1b8c,http://yamdrok.stanford.edu/crowd/icmr.pdf
+29f27448e8dd843e1c4d2a78e01caeaea3f46a2d,http://pdfs.semanticscholar.org/29f2/7448e8dd843e1c4d2a78e01caeaea3f46a2d.pdf
+294d1fa4e1315e1cf7cc50be2370d24cc6363a41,http://pdfs.semanticscholar.org/294d/1fa4e1315e1cf7cc50be2370d24cc6363a41.pdf
+29d414bfde0dfb1478b2bdf67617597dd2d57fc6,http://pdfs.semanticscholar.org/29d4/14bfde0dfb1478b2bdf67617597dd2d57fc6.pdf
+2912c3ea67678a1052d7d5cbe734a6ad90fc360e,http://pdfs.semanticscholar.org/2912/c3ea67678a1052d7d5cbe734a6ad90fc360e.pdf
+29f4ac49fbd6ddc82b1bb697820100f50fa98ab6,http://dhoiem.cs.illinois.edu/publications/acvhl2010_annotation_ian.pdf
+2910fcd11fafee3f9339387929221f4fc1160973,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm31.pdf
+290136947fd44879d914085ee51d8a4f433765fa,http://www.cse.msu.edu/biometrics/Publications/Face/KlareJain_TaxonomyFacialFeatures_BTAS10.pdf
+291f527598c589fb0519f890f1beb2749082ddfd,http://pdfs.semanticscholar.org/3215/ceb94227451a958bcf6b1205c710d17e53f5.pdf
+291265db88023e92bb8c8e6390438e5da148e8f5,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf
+297d3df0cf84d24f7efea44f87c090c7d9be4bed,http://pdfs.semanticscholar.org/297d/3df0cf84d24f7efea44f87c090c7d9be4bed.pdf
+29b86534d4b334b670914038c801987e18eb5532,http://www.cs.toronto.edu/~makarand/papers/ICVGIP2014.pdf
+29631ca6cff21c9199c70bcdbbcd5f812d331a96,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf
+2965d092ed72822432c547830fa557794ae7e27b,http://pdfs.semanticscholar.org/f038/9424ab8c27e01843931fcbef7e3ca997e891.pdf
+2983efadb1f2980ab5ef20175f488f77b6f059d7,http://pdfs.semanticscholar.org/2983/efadb1f2980ab5ef20175f488f77b6f059d7.pdf
+2911e7f0fb6803851b0eddf8067a6fc06e8eadd6,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Jung_Joint_Fine-Tuning_in_ICCV_2015_paper.pdf
+2921719b57544cfe5d0a1614d5ae81710ba804fa,http://pdfs.semanticscholar.org/2921/719b57544cfe5d0a1614d5ae81710ba804fa.pdf
+29a013b2faace976f2c532533bd6ab4178ccd348,http://or.nsfc.gov.cn/bitstream/00001903-5/94894/1/1000006589627.pdf
+29921072d8628544114f68bdf84deaf20a8c8f91,https://arxiv.org/pdf/1610.03670v4.pdf
+2969f822b118637af29d8a3a0811ede2751897b5,http://iip.ict.ac.cn/sites/default/files/publication/2013_ICCV_xwzhao_Cascaded%20Shape%20Space%20Pruning%20for%20Robust%20Facial%20Landmark%20Detection.pdf
+29756b6b16d7b06ea211f21cdaeacad94533e8b4,http://pdfs.semanticscholar.org/2975/6b6b16d7b06ea211f21cdaeacad94533e8b4.pdf
+293193d24d5c4d2975e836034bbb2329b71c4fe7,http://pdfs.semanticscholar.org/2931/93d24d5c4d2975e836034bbb2329b71c4fe7.pdf
+294bd7eb5dc24052237669cdd7b4675144e22306,http://pdfs.semanticscholar.org/294b/d7eb5dc24052237669cdd7b4675144e22306.pdf
+2988f24908e912259d7a34c84b0edaf7ea50e2b3,http://pdfs.semanticscholar.org/a779/e9432c3b6bfdcdbb1827757c3b8bf7c3aa4a.pdf
+29156e4fe317b61cdcc87b0226e6f09e416909e0,http://pdfs.semanticscholar.org/b880/78d284c9f77172dd23970522856a7042c961.pdf
+29f0414c5d566716a229ab4c5794eaf9304d78b6,http://pdfs.semanticscholar.org/29f0/414c5d566716a229ab4c5794eaf9304d78b6.pdf
+29908288392a9326d7a2996c6cd6b3e6cb137265,http://people.cs.ubc.ca/~pcarbo/ijcvss.pdf
+293ade202109c7f23637589a637bdaed06dc37c9,http://pdfs.semanticscholar.org/293a/de202109c7f23637589a637bdaed06dc37c9.pdf
+7c61d21446679776f7bdc7afd13aedc96f9acac1,http://pdfs.semanticscholar.org/e199/9cee8e6d717ad1181ae9e17c366e152e805e.pdf
+7cee802e083c5e1731ee50e731f23c9b12da7d36,http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf
+7c7ab59a82b766929defd7146fd039b89d67e984,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/wacv2014_ChaZhang.pdf
+7ca337735ec4c99284e7c98f8d61fb901dbc9015,http://vision.psych.umn.edu/users/schrater/Papers/Veeretal05.pdf
+7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f,http://pdfs.semanticscholar.org/7c45/b5824645ba6d96beec17ca8ecfb22dfcdd7f.pdf
+7c0a6824b556696ad7bdc6623d742687655852db,http://2010.telfor.rs/files/radovi//TELFOR2010_05_35.pdf
+7c95449a5712aac7e8c9a66d131f83a038bb7caa,http://pdfs.semanticscholar.org/7c95/449a5712aac7e8c9a66d131f83a038bb7caa.pdf
+7c4c442e9c04c6b98cd2aa221e9d7be15efd8663,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wang_Classifier_Learning_With_2015_CVPR_paper.pdf
+7c3e09e0bd992d3f4670ffacb4ec3a911141c51f,http://pdfs.semanticscholar.org/7c3e/09e0bd992d3f4670ffacb4ec3a911141c51f.pdf
+7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d,http://pdfs.semanticscholar.org/7c2e/c6f4ab3eae86e0c1b4f586e9c158fb1d719d.pdf
+7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b,http://pdfs.semanticscholar.org/7cf8/a841aad5b7bdbea46a7bb820790e9ce12d0b.pdf
+7c9622ad1d8971cd74cc9e838753911fe27ccac4,http://pdfs.semanticscholar.org/7c96/22ad1d8971cd74cc9e838753911fe27ccac4.pdf
+7c2c9b083817f7a779d819afee383599d2e97ed8,http://pdfs.semanticscholar.org/bcad/d9c086ccd2f217da25f9550b06a429d53011.pdf
+7c45339253841b6f0efb28c75f2c898c79dfd038,http://vis-www.cs.umass.edu/papers/iccv07alignment.pdf
+7c7b0550ec41e97fcfc635feffe2e53624471c59,http://cvrr.ucsd.edu/publications/2014/headhandeye.pdf
+7ce03597b703a3b6754d1adac5fbc98536994e8f,http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf
+7c36afc9828379de97f226e131390af719dbc18d,http://www.cs.cornell.edu/~chenxiawu/papers/ufna.pdf
+7c119e6bdada2882baca232da76c35ae9b5277f8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/1070.pdf
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,http://pdfs.semanticscholar.org/7c42/371bae54050dbbf7ded1e7a9b4109a23a482.pdf
+7c953868cd51f596300c8231192d57c9c514ae17,http://courses.cs.washington.edu/courses/cse590v/13au/CVPR13_FaceDetection.pdf
+7c6dbaebfe14878f3aee400d1378d90d61373921,http://pdfs.semanticscholar.org/7c6d/baebfe14878f3aee400d1378d90d61373921.pdf
+7c1e1c767f7911a390d49bed4f73952df8445936,http://cmp.felk.cvut.cz/~zimmerk/zimmermann-TPAMI-2014.pdf
+7c349932a3d083466da58ab1674129600b12b81c,http://pdfs.semanticscholar.org/7c34/9932a3d083466da58ab1674129600b12b81c.pdf
+1648cf24c042122af2f429641ba9599a2187d605,http://www.eurecom.fr/en/publication/5333/download/sec-publi-5333.pdf
+160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b,https://infoscience.epfl.ch/record/207802/files/Discriminant-multilabel-Yuce.pdf
+16671b2dc89367ce4ed2a9c241246a0cec9ec10e,http://www.bsp.brain.riken.jp/publications/2010/PAMI-clustering-He-cichocki.pdf
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf
+16892074764386b74b6040fe8d6946b67a246a0b,http://pdfs.semanticscholar.org/5f92/7118a5634790fe660fea91aea163b7065ae2.pdf
+16395b40e19cbc6d5b82543039ffff2a06363845,https://arxiv.org/pdf/1605.03222v1.pdf
+1677d29a108a1c0f27a6a630e74856e7bddcb70d,http://pdfs.semanticscholar.org/1677/d29a108a1c0f27a6a630e74856e7bddcb70d.pdf
+16c884be18016cc07aec0ef7e914622a1a9fb59d,http://pdfs.semanticscholar.org/16c8/84be18016cc07aec0ef7e914622a1a9fb59d.pdf
+162dfd0d2c9f3621d600e8a3790745395ab25ebc,http://cse.seu.edu.cn/people/xgeng/LDL/resource/cvpr14a.pdf
+1606b1475e125bba1b2d87bcf1e33b06f42c5f0d,http://users.eecs.northwestern.edu/~xsh835/CVPR2015_CasCNN.pdf
+16f940b4b5da79072d64a77692a876627092d39c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/10/10.pdf
+16572c545384174f8136d761d2b0866e968120a8,http://pdfs.semanticscholar.org/1657/2c545384174f8136d761d2b0866e968120a8.pdf
+16820ccfb626dcdc893cc7735784aed9f63cbb70,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf
+1630e839bc23811e340bdadad3c55b6723db361d,http://pdfs.semanticscholar.org/9fc9/f22e9e28eab53d426e9d848c0d7dcd2c2459.pdf
+167f07b9d2babb8920acfa320ab04ee2758b5db6,http://eprints.pascal-network.org/archive/00008391/01/paper_express.pdf
+16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb,http://pdfs.semanticscholar.org/1628/6fb0f14f6a7a1acc10fcd28b3ac43f12f3eb.pdf
+1667a77db764e03a87a3fd167d88b060ef47bb56,http://pdfs.semanticscholar.org/1667/a77db764e03a87a3fd167d88b060ef47bb56.pdf
+169618b8dc9b348694a31c6e9e17b989735b4d39,http://vllab.ucmerced.edu/hylee/publication/ICCV17_OPN.pdf
+16e95a907b016951da7c9327927bb039534151da,http://pdfs.semanticscholar.org/16e9/5a907b016951da7c9327927bb039534151da.pdf
+16d9b983796ffcd151bdb8e75fc7eb2e31230809,http://pdfs.semanticscholar.org/16d9/b983796ffcd151bdb8e75fc7eb2e31230809.pdf
+1679943d22d60639b4670eba86665371295f52c3,http://pdfs.semanticscholar.org/1679/943d22d60639b4670eba86665371295f52c3.pdf
+162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e,https://research-information.bristol.ac.uk/files/75922781/Ioannis_Pitas_Large_scale_classification_by_an_approximate_least_squares_one_class_support_vector_machine_ensemble_2015.pdf
+1610d2d4947c03a89c0fda506a74ba1ae2bc54c2,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrackextreme_3dv2016.pdf
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,http://www.dcs.gla.ac.uk/~vincia/papers/shortsurvey.pdf
+169076ffe5e7a2310e98087ef7da25aceb12b62d,http://pdfs.semanticscholar.org/1690/76ffe5e7a2310e98087ef7da25aceb12b62d.pdf
+167736556bea7fd57cfabc692ec4ae40c445f144,http://pdfs.semanticscholar.org/1677/36556bea7fd57cfabc692ec4ae40c445f144.pdf
+167ea1631476e8f9332cef98cf470cb3d4847bc6,http://www.kevinjing.com/visual_search_at_pinterest.pdf
+161eb88031f382e6a1d630cd9a1b9c4bc6b47652,http://arxiv.org/pdf/1505.04026v1.pdf
+420782499f38c1d114aabde7b8a8104c9e40a974,http://openaccess.thecvf.com/content_cvpr_2016/papers/Simo-Serra_Fashion_Style_in_CVPR_2016_paper.pdf
+42e3dac0df30d754c7c7dab9e1bb94990034a90d,https://arxiv.org/pdf/1311.5591v2.pdf
+4217473596b978f13a211cdf47b7d3f6588c785f,http://biometrics.cse.msu.edu/Publications/Face/OttoKlareJain_EfficientApproachClusteringFaceImages_ICB15.pdf
+4223666d1b0b1a60c74b14c2980069905088edc6,http://pdfs.semanticscholar.org/4223/666d1b0b1a60c74b14c2980069905088edc6.pdf
+42afe6d016e52c99e2c0d876052ade9c192d91e7,https://ibug.doc.ic.ac.uk/media/uploads/documents/ValstarEtAl-ICMI2006-FINAL.pdf
+42765c170c14bd58e7200b09b2e1e17911eed42b,http://pdfs.semanticscholar.org/4276/5c170c14bd58e7200b09b2e1e17911eed42b.pdf
+429c3588ce54468090cc2cf56c9b328b549a86dc,http://pdfs.semanticscholar.org/429c/3588ce54468090cc2cf56c9b328b549a86dc.pdf
+42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Saxena_Coordinated_Local_Metric_ICCV_2015_paper.pdf
+42350e28d11e33641775bef4c7b41a2c3437e4fd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IP07_face02.pdf
+42e155ea109eae773dadf74d713485be83fca105,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924805.pdf
+426913f890f07a5d79e6c23b83cd928ffc00e494,http://www2012.wwwconference.org/proceedings/proceedings/p939.pdf
+4223917177405eaa6bdedca061eb28f7b440ed8e,http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf
+42c9394ca1caaa36f535721fa9a64b2c8d4e0dee,http://pdfs.semanticscholar.org/5d2d/208fc245bb49148bffb3076b0660b98b4466.pdf
+4270460b8bc5299bd6eaf821d5685c6442ea179a,http://www.cs.technion.ac.il/~ron/PAPERS/BronBronBrucKimIJCV09.pdf
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,http://www.utdallas.edu/~cxc123730/ICIP_2017.pdf
+42ded74d4858bea1070dadb08b037115d9d15db5,http://pdfs.semanticscholar.org/42de/d74d4858bea1070dadb08b037115d9d15db5.pdf
+42f6f5454dda99d8989f9814989efd50fe807ee8,http://pdfs.semanticscholar.org/42f6/f5454dda99d8989f9814989efd50fe807ee8.pdf
+429d4848d03d2243cc6a1b03695406a6de1a7abd,http://pdfs.semanticscholar.org/429d/4848d03d2243cc6a1b03695406a6de1a7abd.pdf
+42dc36550912bc40f7faa195c60ff6ffc04e7cd6,http://pdfs.semanticscholar.org/42dc/36550912bc40f7faa195c60ff6ffc04e7cd6.pdf
+424259e9e917c037208125ccc1a02f8276afb667,http://arxiv.org/pdf/1604.06433v1.pdf
+42ecfc3221c2e1377e6ff849afb705ecd056b6ff,http://pdfs.semanticscholar.org/42ec/fc3221c2e1377e6ff849afb705ecd056b6ff.pdf
+421955c6d2f7a5ffafaf154a329a525e21bbd6d3,http://pdfs.semanticscholar.org/ea6c/4d71fafe4352e7c3aa2237f77af0c4050cef.pdf
+42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553734.pdf
+42df75080e14d32332b39ee5d91e83da8a914e34,http://www.imlab.tw/wp-content/uploads/2015/11/Illumination-Compensation-Using-Oriented-Local-Histogram-Equalization-and-its-Application-to-Face-Recognition.pdf
+4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf
+89945b7cd614310ebae05b8deed0533a9998d212,http://pdfs.semanticscholar.org/8994/5b7cd614310ebae05b8deed0533a9998d212.pdf
+89de30a75d3258816c2d4d5a733d2bef894b66b9,https://www.computer.org/csdl/trans/tp/2015/06/06915721.pdf
+89002a64e96a82486220b1d5c3f060654b24ef2a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Peng_PIEFA_Personalized_Incremental_ICCV_2015_paper.pdf
+89c84628b6f63554eec13830851a5d03d740261a,http://pdfs.semanticscholar.org/89c8/4628b6f63554eec13830851a5d03d740261a.pdf
+89c51f73ec5ebd1c2a9000123deaf628acf3cdd8,http://pdfs.semanticscholar.org/89c5/1f73ec5ebd1c2a9000123deaf628acf3cdd8.pdf
+89e7d23e0c6a1d636f2da68aaef58efee36b718b,http://pdfs.semanticscholar.org/89e7/d23e0c6a1d636f2da68aaef58efee36b718b.pdf
+89f4bcbfeb29966ab969682eae235066a89fc151,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/short-fgr-2004.pdf
+8913a5b7ed91c5f6dec95349fbc6919deee4fc75,https://people.eecs.berkeley.edu/~pabbeel/papers/2014-ICRA-BigBIRD.pdf
+89cabb60aa369486a1ebe586dbe09e3557615ef8,http://pdfs.semanticscholar.org/89ca/bb60aa369486a1ebe586dbe09e3557615ef8.pdf
+8983485996d5d9d162e70d66399047c5d01ac451,https://arxiv.org/pdf/1602.04868v1.pdf
+89bc311df99ad0127383a9149d1684dfd8a5aa34,http://pdfs.semanticscholar.org/89bc/311df99ad0127383a9149d1684dfd8a5aa34.pdf
+898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c,http://pdfs.semanticscholar.org/898a/66979c7e8b53a10fd58ac51fbfdb6e6e6e7c.pdf
+89d7cc9bbcd2fdc4f4434d153ecb83764242227b,http://pdfs.semanticscholar.org/89d7/cc9bbcd2fdc4f4434d153ecb83764242227b.pdf
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,http://pdfs.semanticscholar.org/cf5c/c511c7fd556aaf113de02fc88d7ba10928b0.pdf
+45c340c8e79077a5340387cfff8ed7615efa20fd,http://pdfs.semanticscholar.org/45c3/40c8e79077a5340387cfff8ed7615efa20fd.pdf
+45dbf1b6fbc7fdae09e2a1928b18fbfff331a979,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0854.pdf
+455204fa201e9936b42756d362f62700597874c4,http://pdfs.semanticscholar.org/4552/04fa201e9936b42756d362f62700597874c4.pdf
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,http://pdfs.semanticscholar.org/4541/c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6.pdf
+459960be65dd04317dd325af5b7cbb883d822ee4,http://pdfs.semanticscholar.org/876c/c40c6c470f39fbda48dd394d0a9d5f6b147d.pdf
+45f858f9e8d7713f60f52618e54089ba68dfcd6d,http://openaccess.thecvf.com/content_ICCV_2017/papers/Sigurdsson_What_Actions_Are_ICCV_2017_paper.pdf
+45215e330a4251801877070c85c81f42c2da60fb,http://pdfs.semanticscholar.org/4521/5e330a4251801877070c85c81f42c2da60fb.pdf
+457cf73263d80a1a1338dc750ce9a50313745d1d,http://pdfs.semanticscholar.org/457c/f73263d80a1a1338dc750ce9a50313745d1d.pdf
+4526992d4de4da2c5fae7a5ceaad6b65441adf9d,http://pdfs.semanticscholar.org/4526/992d4de4da2c5fae7a5ceaad6b65441adf9d.pdf
+45e616093a92e5f1e61a7c6037d5f637aa8964af,http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf
+45efd6c2dd4ca19eed38ceeb7c2c5568231451e1,http://pdfs.semanticscholar.org/45ef/d6c2dd4ca19eed38ceeb7c2c5568231451e1.pdf
+45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8,http://www.doc.ic.ac.uk/~maja/VukadinovicPantic-SMC05-FINAL.pdf
+4571626d4d71c0d11928eb99a3c8b10955a74afe,http://pdfs.semanticscholar.org/4571/626d4d71c0d11928eb99a3c8b10955a74afe.pdf
+4534d78f8beb8aad409f7bfcd857ec7f19247715,http://pdfs.semanticscholar.org/4534/d78f8beb8aad409f7bfcd857ec7f19247715.pdf
+4563b46d42079242f06567b3f2e2f7a80cb3befe,http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf
+459e840ec58ef5ffcee60f49a94424eb503e8982,http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf
+45fbeed124a8956477dbfc862c758a2ee2681278,http://pdfs.semanticscholar.org/fb2a/66f842ca2577d9ea8a8300b555b71bd9cee8.pdf
+451c42da244edcb1088e3c09d0f14c064ed9077e,https://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_conf.pdf
+4568063b7efb66801e67856b3f572069e774ad33,http://www.dbs.ifi.lmu.de/~yu_k/cvpr11_0712.pdf
+45c31cde87258414f33412b3b12fc5bec7cb3ba9,http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf
+4542273a157bfd4740645a6129d1784d1df775d2,http://pdfs.semanticscholar.org/4542/273a157bfd4740645a6129d1784d1df775d2.pdf
+4511e09ee26044cb46073a8c2f6e1e0fbabe33e8,http://pdfs.semanticscholar.org/4511/e09ee26044cb46073a8c2f6e1e0fbabe33e8.pdf
+45513d0f2f5c0dac5b61f9ff76c7e46cce62f402,http://pdfs.semanticscholar.org/4551/3d0f2f5c0dac5b61f9ff76c7e46cce62f402.pdf
+458677de7910a5455283a2be99f776a834449f61,http://pdfs.semanticscholar.org/4586/77de7910a5455283a2be99f776a834449f61.pdf
+1f9b2f70c24a567207752989c5bd4907442a9d0f,http://pdfs.semanticscholar.org/1f9b/2f70c24a567207752989c5bd4907442a9d0f.pdf
+1f05473c587e2a3b587f51eb808695a1c10bc153,http://pdfs.semanticscholar.org/7246/bbdf4c125d9d216e560c87c58a8613bd2602.pdf
+1fa3948af1c338f9ae200038c45adadd2b39a3e4,http://pdfs.semanticscholar.org/7655/4182b4b0f3301afe8cfbc96a9d289b75254f.pdf
+1f8304f4b51033d2671147b33bb4e51b9a1e16fe,http://pdfs.semanticscholar.org/1f83/04f4b51033d2671147b33bb4e51b9a1e16fe.pdf
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,http://pdfs.semanticscholar.org/1f89/439524e87a6514f4fbe7ed34bda4fd1ce286.pdf
+1f9ae272bb4151817866511bd970bffb22981a49,http://pdfs.semanticscholar.org/1f9a/e272bb4151817866511bd970bffb22981a49.pdf
+1fd6004345245daf101c98935387e6ef651cbb55,http://pdfs.semanticscholar.org/1fd6/004345245daf101c98935387e6ef651cbb55.pdf
+1fc249ec69b3e23856b42a4e591c59ac60d77118,http://cbl.uh.edu/pub_files/IJCB-2017-XX.pdf
+1fbde67e87890e5d45864e66edb86136fbdbe20e,http://www.openu.ac.il/home/hassner/data/ASLAN/Papers/ASLAN_TPAMI12.pdf
+1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6,http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf
+1fcdc113a5df2f45a1f4b3249c041d942a3a730b,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Reconstruction-Based%20Metric%20Learning%20for%20Unconstrained%20Face%20Verification_TIFS2015.pdf
+1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0,http://pdfs.semanticscholar.org/d91a/de2712c65f45ed8b917414829ecb24c3c183.pdf
+1fe59275142844ce3ade9e2aed900378dd025880,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Xiao_Facial_Landmark_Detection_ICCV_2015_paper.pdf
+1f2d12531a1421bafafe71b3ad53cb080917b1a7,http://pdfs.semanticscholar.org/1f2d/12531a1421bafafe71b3ad53cb080917b1a7.pdf
+1f35a65eab258f042edb8e1d4d5fff34f00a85bd,http://www.seattle.intel-research.net/~xren/publication/xren_cvpr08_casablanca.pdf
+1fe121925668743762ce9f6e157081e087171f4c,https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf
+1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d,http://pdfs.semanticscholar.org/1fef/b2f8dd1efcdb57d5c2966d81f9ab22c1c58d.pdf
+1f8e44593eb335c2253d0f22f7f9dc1025af8c0d,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22607/Patras%20Fine-tuning%20regression%202014%20Accepted.pdf?sequence=1
+1f94734847c15fa1da68d4222973950d6b683c9e,https://arxiv.org/pdf/1512.02895v1.pdf
+1f745215cda3a9f00a65166bd744e4ec35644b02,http://www.eurecom.fr/en/publication/4044/download/mm-publi-4044.pdf
+1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,http://pdfs.semanticscholar.org/1fd3/dbb6e910708fa85c8a86e17ba0b6fef5617c.pdf
+1f24cef78d1de5aa1eefaf344244dcd1972797e8,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhou_Outlier-Robust_Tensor_PCA_CVPR_2017_paper.pdf
+1fe990ca6df273de10583860933d106298655ec8,http://pdfs.semanticscholar.org/1fe9/90ca6df273de10583860933d106298655ec8.pdf
+73f467b4358ac1cafb57f58e902c1cab5b15c590,http://pdfs.semanticscholar.org/73f4/67b4358ac1cafb57f58e902c1cab5b15c590.pdf
+732e8d8f5717f8802426e1b9debc18a8361c1782,http://pdfs.semanticscholar.org/732e/8d8f5717f8802426e1b9debc18a8361c1782.pdf
+7384c39a2d084c93566b98bc4d81532b5ad55892,http://pdfs.semanticscholar.org/d0a5/0940a1bf951adaf22bd1fc72ea861b606cdb.pdf
+739d400cb6fb730b894182b29171faaae79e3f01,http://pdfs.semanticscholar.org/739d/400cb6fb730b894182b29171faaae79e3f01.pdf
+732e4016225280b485c557a119ec50cffb8fee98,http://pdfs.semanticscholar.org/732e/4016225280b485c557a119ec50cffb8fee98.pdf
+7373c4a23684e2613f441f2236ed02e3f9942dd4,https://dr.ntu.edu.sg/bitstream/handle/10220/18012/Feature%20Extraction%20through%20Binary%20Pattern%20of%20Phase%20Congruency%20for%20Facial%20Expression%20Recognition.pdf?isAllowed=y&sequence=1
+732686d799d760ccca8ad47b49a8308b1ab381fb,http://pdfs.semanticscholar.org/7326/86d799d760ccca8ad47b49a8308b1ab381fb.pdf
+73fbdd57270b9f91f2e24989178e264f2d2eb7ae,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001945.pdf
+738c187d55745aac18d5fb5f6cc9e3568cd2d217,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICMR130-2015.pdf
+738a985fba44f9f5acd516e07d0d9578f2ffaa4e,http://pdfs.semanticscholar.org/738a/985fba44f9f5acd516e07d0d9578f2ffaa4e.pdf
+73fd7e74457e0606704c5c3d3462549f1b2de1ad,http://pdfs.semanticscholar.org/73fd/7e74457e0606704c5c3d3462549f1b2de1ad.pdf
+73c5bab5c664afa96b1c147ff21439135c7d968b,http://uclab.khu.ac.kr/resources/publication/C_109.pdf
+877100f430b72c5d60de199603ab5c65f611ce17,http://pdfs.semanticscholar.org/8771/00f430b72c5d60de199603ab5c65f611ce17.pdf
+87e5b4d95f95a0975e855cf5ad402db7a3c64ff5,http://www.researchgate.net/profile/Paul_Bodesheim/publication/269314560_Local_Novelty_Detection_in_Multi-class_Recognition_Problems/links/5486c2420cf289302e2c35eb.pdf
+870433ba89d8cab1656e57ac78f1c26f4998edfb,https://arxiv.org/pdf/1612.04904v1.pdf
+8796f2d54afb0e5c924101f54d469a1d54d5775d,http://pdfs.semanticscholar.org/8796/f2d54afb0e5c924101f54d469a1d54d5775d.pdf
+87f285782d755eb85d8922840e67ed9602cfd6b9,http://pdfs.semanticscholar.org/87f2/85782d755eb85d8922840e67ed9602cfd6b9.pdf
+871f5f1114949e3ddb1bca0982086cc806ce84a8,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01169.pdf
+87bee0e68dfc86b714f0107860d600fffdaf7996,http://mi.informatik.uni-siegen.de/publications/piotraschke_autoreconst_cvpr16.pdf
+87309bdb2b9d1fb8916303e3866eca6e3452c27d,http://pdfs.semanticscholar.org/8730/9bdb2b9d1fb8916303e3866eca6e3452c27d.pdf
+87147418f863e3d8ff8c97db0b42695a1c28195b,http://pdfs.semanticscholar.org/8714/7418f863e3d8ff8c97db0b42695a1c28195b.pdf
+87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5,http://pdfs.semanticscholar.org/87dd/3fd36bccbe1d5f1484ac05f1848b51c6eab5.pdf
+87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd,http://pdfs.semanticscholar.org/87bb/183d8be0c2b4cfceb9ee158fee4bbf3e19fd.pdf
+80193dd633513c2d756c3f568ffa0ebc1bb5213e,http://pdfs.semanticscholar.org/a3d8/8154a1253338b45f950bcf9cbe91ba5271ee.pdf
+808b685d09912cbef4a009e74e10476304b4cccf,http://pdfs.semanticscholar.org/808b/685d09912cbef4a009e74e10476304b4cccf.pdf
+804b4c1b553d9d7bae70d55bf8767c603c1a09e3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001831.pdf
+800cbbe16be0f7cb921842d54967c9a94eaa2a65,http://pdfs.semanticscholar.org/800c/bbe16be0f7cb921842d54967c9a94eaa2a65.pdf
+808656563eea17470159e6540b05fe6f7ae58c2b,http://www.researchgate.net/profile/Songul_Varli_Albayrak/publication/235248598_Classification_with_Emotional_Faces_via_a_Robust_Sparse_Classifier/links/0912f510a44fb84bef000000.pdf
+80135ed7e34ac1dcc7f858f880edc699a920bf53,http://pdfs.semanticscholar.org/8013/5ed7e34ac1dcc7f858f880edc699a920bf53.pdf
+80277fb3a8a981933533cf478245f262652a33b5,http://pdfs.semanticscholar.org/8027/7fb3a8a981933533cf478245f262652a33b5.pdf
+80840df0802399838fe5725cce829e1b417d7a2e,http://pdfs.semanticscholar.org/8084/0df0802399838fe5725cce829e1b417d7a2e.pdf
+80c8d143e7f61761f39baec5b6dfb8faeb814be9,http://pdfs.semanticscholar.org/80c8/d143e7f61761f39baec5b6dfb8faeb814be9.pdf
+809ea255d144cff780300440d0f22c96e98abd53,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf
+80a6bb337b8fdc17bffb8038f3b1467d01204375,http://pdfs.semanticscholar.org/80a6/bb337b8fdc17bffb8038f3b1467d01204375.pdf
+80be8624771104ff4838dcba9629bacfe6b3ea09,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf
+8000c4f278e9af4d087c0d0895fff7012c5e3d78,https://www.cse.ust.hk/~yuzhangcse/papers/Zhang_Yeung_CVPR10.pdf
+80bd795930837330e3ced199f5b9b75398336b87,http://pdfs.semanticscholar.org/80bd/795930837330e3ced199f5b9b75398336b87.pdf
+74de03923a069ffc0fb79e492ee447299401001f,http://pdfs.semanticscholar.org/74de/03923a069ffc0fb79e492ee447299401001f.pdf
+74f643579949ccd566f2638b85374e7a6857a9fc,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICPR/MBP%20ICPR10(Revise%20final).pdf
+74408cfd748ad5553cba8ab64e5f83da14875ae8,http://pdfs.semanticscholar.org/7440/8cfd748ad5553cba8ab64e5f83da14875ae8.pdf
+747fddd7345b60da121fc13c5440a18039b912e6,http://pdfs.semanticscholar.org/747f/ddd7345b60da121fc13c5440a18039b912e6.pdf
+747d5fe667519acea1bee3df5cf94d9d6f874f20,http://pdfs.semanticscholar.org/747d/5fe667519acea1bee3df5cf94d9d6f874f20.pdf
+740e095a65524d569244947f6eea3aefa3cca526,http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf
+74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8,http://pdfs.semanticscholar.org/74e8/69bc7c99093a5ff9f8cfc3f533ccf1b135d8.pdf
+741485741734a99e933dd0302f457158c6842adf,http://pdfs.semanticscholar.org/7414/85741734a99e933dd0302f457158c6842adf.pdf
+743e582c3e70c6ec07094887ce8dae7248b970ad,http://pdfs.semanticscholar.org/743e/582c3e70c6ec07094887ce8dae7248b970ad.pdf
+74b0095944c6e29837c208307a67116ebe1231c8,http://web.eecs.umich.edu/~hero/Preprints/EuclideanK-Nearest.pdf
+74156a11c2997517061df5629be78428e1f09cbd,http://cvrr.ucsd.edu/publications/2016/MartinRangeshTrivediICPR2016.pdf
+748e72af01ba4ee742df65e9c030cacec88ce506,http://pdfs.semanticscholar.org/748e/72af01ba4ee742df65e9c030cacec88ce506.pdf
+745b42050a68a294e9300228e09b5748d2d20b81,http://pdfs.semanticscholar.org/745b/42050a68a294e9300228e09b5748d2d20b81.pdf
+749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7,http://pdfs.semanticscholar.org/7493/82d19bfe9fb8d0c5e94d0c9b0a63ab531cb7.pdf
+74618fb4ce8ce0209db85cc6069fe64b1f268ff4,https://ir.canterbury.ac.nz/bitstream/handle/10092/6229/12636740_Y10_ICCSIT.pdf?isAllowed=y&sequence=1
+74875368649f52f74bfc4355689b85a724c3db47,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yan_Object_Detection_by_2015_CVPR_paper.pdf
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,https://ibug.doc.ic.ac.uk/media/uploads/documents/zafeiriou_the_3d_menpo_iccv_2017_paper.pdf
+74eae724ef197f2822fb7f3029c63014625ce1ca,http://pdfs.semanticscholar.org/74ea/e724ef197f2822fb7f3029c63014625ce1ca.pdf
+7480d8739eb7ab97c12c14e75658e5444b852e9f,http://pdfs.semanticscholar.org/cfe4/b03951be323394e6749f6a30b2ac9b924479.pdf
+74ba4ab407b90592ffdf884a20e10006d2223015,http://pdfs.semanticscholar.org/74ba/4ab407b90592ffdf884a20e10006d2223015.pdf
+7405ed035d1a4b9787b78e5566340a98fe4b63a0,http://pdfs.semanticscholar.org/7405/ed035d1a4b9787b78e5566340a98fe4b63a0.pdf
+744db9bd550bf5e109d44c2edabffec28c867b91,http://pdfs.semanticscholar.org/744d/b9bd550bf5e109d44c2edabffec28c867b91.pdf
+74325f3d9aea3a810fe4eab8863d1a48c099de11,http://pdfs.semanticscholar.org/7432/5f3d9aea3a810fe4eab8863d1a48c099de11.pdf
+744d23991a2c48d146781405e299e9b3cc14b731,http://www.cise.ufl.edu/~dihong/assets/LPS2016.pdf
+1a45ddaf43bcd49d261abb4a27977a952b5fff12,http://pdfs.semanticscholar.org/1a45/ddaf43bcd49d261abb4a27977a952b5fff12.pdf
+1a41e5d93f1ef5b23b95b7163f5f9aedbe661394,http://pdfs.semanticscholar.org/1a41/e5d93f1ef5b23b95b7163f5f9aedbe661394.pdf
+1a65cc5b2abde1754b8c9b1d932a68519bcb1ada,http://pdfs.semanticscholar.org/e4ae/821e234c281aed6ba629c130be7c8eac4a31.pdf
+1aa766bbd49bac8484e2545c20788d0f86e73ec2,http://inside.mines.edu/~jpaone/papers/IV15_BaselineFaceDetection_SHRP2NDS.pdf
+1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d,http://www.dabi.temple.edu/~hbling/publication/oria-12-final.pdf
+1a878e4667fe55170252e3f41d38ddf85c87fcaf,http://pdfs.semanticscholar.org/1a87/8e4667fe55170252e3f41d38ddf85c87fcaf.pdf
+1a41831a3d7b0e0df688fb6d4f861176cef97136,http://pdfs.semanticscholar.org/1fae/8f87f83bb707c4b38c23e93ae2bcb900b962.pdf
+1ac2882559a4ff552a1a9956ebeadb035cb6df5b,http://www.pitt.edu/~jeffcohn/biblio/TrainData.pdf
+1a7a17c4f97c68d68fbeefee1751d349b83eb14a,http://pdfs.semanticscholar.org/1a7a/17c4f97c68d68fbeefee1751d349b83eb14a.pdf
+1aef6f7d2e3565f29125a4871cd60c4d86c48361,http://pdfs.semanticscholar.org/1aef/6f7d2e3565f29125a4871cd60c4d86c48361.pdf
+1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f,http://pdfs.semanticscholar.org/1a6c/3c37c2e62b21ebc0f3533686dde4d0103b3f.pdf
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,http://disi.unitn.it/~duta/pubs/ICPR2016_Duta.pdf
+1a3eee980a2252bb092666cf15dd1301fa84860e,https://www.uv.es/vista/vistavalencia/papers/ICIP09_GPCA.pdf
+1a140d9265df8cf50a3cd69074db7e20dc060d14,http://pdfs.semanticscholar.org/1a14/0d9265df8cf50a3cd69074db7e20dc060d14.pdf
+1a85956154c170daf7f15f32f29281269028ff69,http://ibug.doc.ic.ac.uk/media/uploads/documents/active_pictorial_structures.pdf
+1a031378cf1d2b9088a200d9715d87db8a1bf041,http://pdfs.semanticscholar.org/1a03/1378cf1d2b9088a200d9715d87db8a1bf041.pdf
+1a96d54c326d19e32bed00642a177ea439341fa2,http://vc.cs.nthu.edu.tw/home/paper/codfiles/tychiu/200808151557/Principal_Component_Analysis_Based_on_L1-Norm_Maximization.pdf
+1afd481036d57320bf52d784a22dcb07b1ca95e2,http://pdfs.semanticscholar.org/e206/144fc1dee7f10079facf3b6a3d5d2bf5f8db.pdf
+1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f,http://pdfs.semanticscholar.org/1a93/37d70a87d0e30966ecd1d7a9b0bbc7be161f.pdf
+1ae642a8d756c6aa7bc049c5c89d5072d8749637,http://www.cs.umd.edu/~behjat/papers/ICMR14_poster.pdf
+1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6,http://pdfs.semanticscholar.org/1a4b/6ee6cd846ef5e3030a6ae59f026e5f50eda6.pdf
+1a9a192b700c080c7887e5862c1ec578012f9ed1,http://pdfs.semanticscholar.org/1a9a/192b700c080c7887e5862c1ec578012f9ed1.pdf
+1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9,http://pdfs.semanticscholar.org/1af5/2c853ff1d0ddb8265727c1d70d81b4f9b3a9.pdf
+1a8ccc23ed73db64748e31c61c69fe23c48a2bb1,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Zhou_Extensive_Facial_Landmark_2013_ICCV_paper.pdf
+1a40092b493c6b8840257ab7f96051d1a4dbfeb2,http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf
+1ad97cce5fa8e9c2e001f53f6f3202bddcefba22,http://files.is.tue.mpg.de/black/papers/RGA2014.pdf
+1a1118cd4339553ad0544a0a131512aee50cf7de,http://pdfs.semanticscholar.org/1a11/18cd4339553ad0544a0a131512aee50cf7de.pdf
+1a6c9ef99bf0ab9835a91fe5f1760d98a0606243,http://pdfs.semanticscholar.org/57ce/705f08ae7256b16eac2b8b40ae0c88d6cf23.pdf
+1afdedba774f6689eb07e048056f7844c9083be9,http://ibug.doc.ic.ac.uk/media/uploads/documents/sandbach2013markov.pdf
+1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43,http://pdfs.semanticscholar.org/676c/0fc58b6a0108326024f708e30d76cadbae58.pdf
+1a7a2221fed183b6431e29a014539e45d95f0804,http://www.cs.colostate.edu/~vision/publications/Bolme2007b.pdf
+1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b,http://www.l3s.de/~siersdorfer/sources/2014/mtgame-2014.pdf
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,http://www.cs.fsu.edu/~liux/research/pub/papers/Wu-Two-Stage-CVIU-2008.pdf
+287795991fad3c61d6058352879c7d7ae1fdd2b6,http://pdfs.semanticscholar.org/2877/95991fad3c61d6058352879c7d7ae1fdd2b6.pdf
+28a900a07c7cbce6b6297e4030be3229e094a950,http://pdfs.semanticscholar.org/28a9/00a07c7cbce6b6297e4030be3229e094a950.pdf
+282503fa0285240ef42b5b4c74ae0590fe169211,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf
+28e0ed749ebe7eb778cb13853c1456cb6817a166,http://pdfs.semanticscholar.org/28e0/ed749ebe7eb778cb13853c1456cb6817a166.pdf
+28b9d92baea72ec665c54d9d32743cf7bc0912a7,http://pdfs.semanticscholar.org/a7f8/b6bf6aa7a12773ad9bcf1d040d4d74d12493.pdf
+283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43,http://pdfs.semanticscholar.org/283d/226e346ac3e7685dd9a4ba8ae55ee4f2fe43.pdf
+28d7029cfb73bcb4ad1997f3779c183972a406b4,https://arxiv.org/pdf/1705.00322v1.pdf
+280d59fa99ead5929ebcde85407bba34b1fcfb59,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002662.pdf
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,http://pdfs.semanticscholar.org/28f5/138d63e4acafca49a94ae1dc44f7e9d84827.pdf
+28e1668d7b61ce21bf306009a62b06593f1819e3,http://pdfs.semanticscholar.org/28e1/668d7b61ce21bf306009a62b06593f1819e3.pdf
+28cd46a078e8fad370b1aba34762a874374513a5,http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf
+286adff6eff2f53e84fe5b4d4eb25837b46cae23,http://pdfs.semanticscholar.org/b17e/61972e674f8f734bd428cb882a9bb797abe2.pdf
+286812ade95e6f1543193918e14ba84e5f8e852e,http://pdfs.semanticscholar.org/9b1d/a39168a7196c2f9c85e9b3d17debff04c988.pdf
+282a3ee79a08486f0619caf0ada210f5c3572367,http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf
+288dbc40c027af002298b38954d648fddd4e2fd3,http://pdfs.semanticscholar.org/288d/bc40c027af002298b38954d648fddd4e2fd3.pdf
+28f311b16e4fe4cc0ff6560aae3bbd0cb6782966,http://pdfs.semanticscholar.org/4d59/7318188a9c7f7a78dadbe5b8f8385c1e1356.pdf
+28312c3a47c1be3a67365700744d3d6665b86f22,http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf
+28d06fd508d6f14cd15f251518b36da17909b79e,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf
+28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b,http://pdfs.semanticscholar.org/28b5/b5f20ad584e560cd9fb4d81b0a22279b2e7b.pdf
+28bc378a6b76142df8762cd3f80f737ca2b79208,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Vedaldi_Understanding_Objects_in_2014_CVPR_paper.pdf
+287900f41dd880802aa57f602e4094a8a9e5ae56,https://www.comp.nus.edu.sg/~tsim/documents/cross-expression.pdf
+28c0cb56e7f97046d6f3463378d084e9ea90a89a,http://www.robots.ox.ac.uk/~vgg/publications/2005/Arandjelovic05a/arandjelovic05a.pdf
+28be652db01273289499bc6e56379ca0237506c0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_018_ext.pdf
+28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d,http://pdfs.semanticscholar.org/28bc/f31f794dc27f73eb248e5a1b2c3294b3ec9d.pdf
+2836d68c86f29bb87537ea6066d508fde838ad71,http://arxiv.org/pdf/1510.06503v1.pdf
+28de411a5b3eb8411e7bcb0003c426aa91f33e97,http://pdfs.semanticscholar.org/28de/411a5b3eb8411e7bcb0003c426aa91f33e97.pdf
+28b26597a7237f9ea6a9255cde4e17ee18122904,http://pdfs.semanticscholar.org/28b2/6597a7237f9ea6a9255cde4e17ee18122904.pdf
+28fe6e785b32afdcd2c366c9240a661091b850cf,http://pdfs.semanticscholar.org/28fe/6e785b32afdcd2c366c9240a661091b850cf.pdf
+28c9198d30447ffe9c96176805c1cd81615d98c8,http://pdfs.semanticscholar.org/28c9/198d30447ffe9c96176805c1cd81615d98c8.pdf
+28d99dc2d673d62118658f8375b414e5192eac6f,http://www.cs.wayne.edu/~mdong/cvpr17.pdf
+280bc9751593897091015aaf2cab39805768b463,http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf
+28aa89b2c827e5dd65969a5930a0520fdd4a3dc7,http://pdfs.semanticscholar.org/28aa/89b2c827e5dd65969a5930a0520fdd4a3dc7.pdf
+28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68,https://www.cc.gatech.edu/~parikh/Publications/annoyance_prediction_CVPR2014.pdf
+285472527c5dc1c620d9644849e7519766c2d655,http://lear.inrialpes.fr/people/mpederso/papers/ICCV15_Parts.pdf
+288d2704205d9ca68660b9f3a8fda17e18329c13,http://arxiv.org/pdf/1601.04153v2.pdf
+17b46e2dad927836c689d6787ddb3387c6159ece,http://cs.uky.edu/~jacobs/papers/greenwell2014faceattributes.pdf
+176a3e9e118712251124c1347516a92d5e315297,http://eprints.pascal-network.org/archive/00008997/01/ICMR11.pdf
+17a85799c59c13f07d4b4d7cf9d7c7986475d01c,http://pdfs.semanticscholar.org/17a8/5799c59c13f07d4b4d7cf9d7c7986475d01c.pdf
+1768909f779869c0e83d53f6c91764f41c338ab5,http://arxiv.org/pdf/1506.08959v1.pdf
+171ca25bc2cdfc79cad63933bcdd420d35a541ab,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Alnajar_Calibration-Free_Gaze_Estimation_2013_ICCV_paper.pdf
+176bd61cc843d0ed6aa5af83c22e3feb13b89fe1,http://pdfs.semanticscholar.org/648b/f64ff77aeccf761b83dd85143a6eb832b258.pdf
+17d01f34dfe2136b404e8d7f59cebfb467b72b26,http://pdfs.semanticscholar.org/4cfb/51d3b8478d7e63ba2661385337abf94d2c48.pdf
+176f26a6a8e04567ea71677b99e9818f8a8819d0,http://pdfs.semanticscholar.org/176f/26a6a8e04567ea71677b99e9818f8a8819d0.pdf
+17cf838720f7892dbe567129dcf3f7a982e0b56e,http://pdfs.semanticscholar.org/6e0a/a9926e484e08b31fdeb85b73d1ae65ba47d6.pdf
+17035089959a14fe644ab1d3b160586c67327db2,http://pdfs.semanticscholar.org/1703/5089959a14fe644ab1d3b160586c67327db2.pdf
+17370f848801871deeed22af152489e39b6e1454,http://mml.citi.sinica.edu.tw/papers/ICME_2015_Wei.pdf
+17fa1c2a24ba8f731c8b21f1244463bc4b465681,http://pdfs.semanticscholar.org/d5ba/a722b1bca1f95e4e1fad968b2b74ec1ecc7f.pdf
+179e566a2c1a2a48aa3d0028209c11ebe7d6740e,http://homepages.rpi.edu/~wuy9/EyeDetectionDBM/DeepFeaturesEyeDetection.pdf
+17579791ead67262fcfb62ed8765e115fb5eca6f,http://pdfs.semanticscholar.org/1757/9791ead67262fcfb62ed8765e115fb5eca6f.pdf
+177d1e7bbea4318d379f46d8d17720ecef3086ac,http://pdfs.semanticscholar.org/177d/1e7bbea4318d379f46d8d17720ecef3086ac.pdf
+17aa78bd4331ef490f24bdd4d4cd21d22a18c09c,http://pdfs.semanticscholar.org/17aa/78bd4331ef490f24bdd4d4cd21d22a18c09c.pdf
+170a5f5da9ac9187f1c88f21a88d35db38b4111a,https://arxiv.org/pdf/1611.08563v3.pdf
+176fc31a686fb70d73f1fa354bf043ad236f7aa3,http://www.cs.brown.edu/~black/Papers/ofevaltr.pdf
+1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91,http://www1.i2r.a-star.edu.sg/~htang/Unified_Framework_for_Subspace_Clustering-TNNLS.pdf
+1742ffea0e1051b37f22773613f10f69d2e4ed2c,http://pdfs.semanticscholar.org/1742/ffea0e1051b37f22773613f10f69d2e4ed2c.pdf
+1791f790b99471fc48b7e9ec361dc505955ea8b1,http://pdfs.semanticscholar.org/6fea/599d7b9fc72350d6e0947d3baaf44edc561b.pdf
+171d8a39b9e3d21231004f7008397d5056ff23af,http://openaccess.thecvf.com/content_cvpr_2017/papers/Wu_Simultaneous_Facial_Landmark_CVPR_2017_paper.pdf
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,http://pdfs.semanticscholar.org/38b9/57e2b5ec0ea852d22d1481ef924fbf7f72e2.pdf
+176e5abddb87d029f85f60d1bbff67c66500e8c3,http://www.researchgate.net/profile/Tony_Han3/publication/220930104_Efficient_Facial_Attribute_Recognition_with_a_Spatial_Codebook/links/0046351affdf1f0d96000000.pdf
+174930cac7174257515a189cd3ecfdd80ee7dd54,https://arxiv.org/pdf/1502.02766v3.pdf
+17fad2cc826d2223e882c9fda0715fcd5475acf3,http://pdfs.semanticscholar.org/8f64/def1fe17e2711405d66898a578e3b20da29e.pdf
+17e563af203d469c456bb975f3f88a741e43fb71,https://cvhci.anthropomatik.kit.edu/~mhaurile/papers/WACV2016.pdf
+171389529df11cc5a8b1fbbe659813f8c3be024d,http://pdfs.semanticscholar.org/1713/89529df11cc5a8b1fbbe659813f8c3be024d.pdf
+17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735,http://pdfs.semanticscholar.org/17d5/e5c9a9ee4cf85dfbb9d9322968a6329c3735.pdf
+1750db78b7394b8fb6f6f949d68f7c24d28d934f,https://www3.nd.edu/~kwb/Bharati_Singh_Vatsa_Bowyer_TIFS_2016.pdf
+17cf6195fd2dfa42670dc7ada476e67b381b8f69,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf
+174f46eccb5852c1f979d8c386e3805f7942bace,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Kae_The_Shape-Time_Random_2014_CVPR_paper.pdf
+17670b60dcfb5cbf8fdae0b266e18cf995f6014c,https://arxiv.org/pdf/1606.02254v1.pdf
+17027a05c1414c9a06a1c5046899abf382a1142d,http://www.cs.cmu.edu/~rahuls/pub/cvpr2015-alionment-rahuls.pdf
+17ded725602b4329b1c494bfa41527482bf83a6f,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf
+17738b0972571e7b4ae471d1b2dccea5ce057511,http://dayongwang.info/pdf/2011-MM.pdf
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sun_Deep_Learning_Face_2014_CVPR_paper.pdf
+7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889,http://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf
+7b63ed54345d8c06523f6b03c41a09b5c8f227e2,http://research.iaun.ac.ir/pd/pourghassem/pdfs/PaperC_1187.pdf
+7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25,http://pdfs.semanticscholar.org/7bf0/a1aa1d0228a51d24c0c3a83eceb937a6ae25.pdf
+7b9961094d3e664fc76b12211f06e12c47a7e77d,http://pdfs.semanticscholar.org/7b99/61094d3e664fc76b12211f06e12c47a7e77d.pdf
+7bfe085c10761f5b0cc7f907bdafe1ff577223e0,http://pdfs.semanticscholar.org/c32b/aaa307da7376bcb5dfef7bb985c06d032a0f.pdf
+7b43326477795a772c08aee750d3e433f00f20be,http://pdfs.semanticscholar.org/7b43/326477795a772c08aee750d3e433f00f20be.pdf
+7b9b3794f79f87ca8a048d86954e0a72a5f97758,http://pdfs.semanticscholar.org/7b9b/3794f79f87ca8a048d86954e0a72a5f97758.pdf
+7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed,http://pdfs.semanticscholar.org/7bce/4f4e85a3bfcd6bfb3b173b2769b064fce0ed.pdf
+7be60f8c34a16f30735518d240a01972f3530e00,http://www.cs.utexas.edu/~suyog/expression_recog.pdf
+7bdcd85efd1e3ce14b7934ff642b76f017419751,http://www.cbsr.ia.ac.cn/users/zlei/papers/Lei-DFD-PAMI-14.pdf
+7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f,http://cvrr.ucsd.edu/publications/2012/Martin_AutoUI2012.pdf
+8f3e120b030e6c1d035cb7bd9c22f6cc75782025,http://pdfs.semanticscholar.org/8f3e/120b030e6c1d035cb7bd9c22f6cc75782025.pdf
+8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483,http://pdfs.semanticscholar.org/8fb6/11aca3bd8a3a0527ac0f38561a5a9a5b8483.pdf
+8fa3478aaf8e1f94e849d7ffbd12146946badaba,http://pdfs.semanticscholar.org/8fa3/478aaf8e1f94e849d7ffbd12146946badaba.pdf
+8f8c0243816f16a21dea1c20b5c81bc223088594,http://pdfs.semanticscholar.org/8f8c/0243816f16a21dea1c20b5c81bc223088594.pdf
+8f08b2101d43b1c0829678d6a824f0f045d57da5,http://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf
+8f992ed6686710164005c20ab16cef6c6ad8d0ea,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Half-quadratic%20based%20Iterative%20Minimization%20for%20Robust%20Sparse%20Representation.pdf
+8fbec9105d346cd23d48536eb20c80b7c2bbbe30,http://conradsanderson.id.au/reading_group/Barr_Effectiveness_Face_WACV_2014.pdf
+8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09,http://pdfs.semanticscholar.org/8f3e/3f0f97844d3bfd9e9ec566ac7a54f6931b09.pdf
+8f8a5be9dc16d73664285a29993af7dc6a598c83,http://pdfs.semanticscholar.org/8f8a/5be9dc16d73664285a29993af7dc6a598c83.pdf
+8f5ce25e6e1047e1bf5b782d045e1dac29ca747e,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07b.pdf
+8f89aed13cb3555b56fccd715753f9ea72f27f05,http://pdfs.semanticscholar.org/8f89/aed13cb3555b56fccd715753f9ea72f27f05.pdf
+8f92cccacf2c84f5d69db3597a7c2670d93be781,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2006/papers/1568982203.pdf
+8f6263e4d3775757e804796e104631c7a2bb8679,http://pdfs.semanticscholar.org/8f62/63e4d3775757e804796e104631c7a2bb8679.pdf
+8f9f599c05a844206b1bd4947d0524234940803d,http://pdfs.semanticscholar.org/8f9f/599c05a844206b1bd4947d0524234940803d.pdf
+8f60c343f76913c509ce623467bf086935bcadac,http://pdfs.semanticscholar.org/8f60/c343f76913c509ce623467bf086935bcadac.pdf
+8fd9c22b00bd8c0bcdbd182e17694046f245335f,http://pdfs.semanticscholar.org/8fd9/c22b00bd8c0bcdbd182e17694046f245335f.pdf
+8f5facdc0a2a79283864aad03edc702e2a400346,http://pdfs.semanticscholar.org/8f5f/acdc0a2a79283864aad03edc702e2a400346.pdf
+8a09668efc95eafd6c3056ff1f0fbc43bb5774db,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Robust%20Principal%20Component%20Analysis%20Based%20on%20Maximum%20Correntropy%20Criterion.pdf
+8a3c5507237957d013a0fe0f082cab7f757af6ee,http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf
+8af411697e73f6cfe691fe502d4bfb42510b4835,http://pdfs.semanticscholar.org/8af4/11697e73f6cfe691fe502d4bfb42510b4835.pdf
+8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d,http://pdfs.semanticscholar.org/8a1e/d5e23231e86216c9bdd62419c3b05f1e0b4d.pdf
+8a54f8fcaeeede72641d4b3701bab1fe3c2f730a,http://pdfs.semanticscholar.org/acf8/b9607ca39f20b9b1956b8761b37f14eb4284.pdf
+8aae23847e1beb4a6d51881750ce36822ca7ed0b,http://pdfs.semanticscholar.org/8aae/23847e1beb4a6d51881750ce36822ca7ed0b.pdf
+8a40b6c75dd6392ee0d3af73cdfc46f59337efa9,http://pdfs.semanticscholar.org/f656/f6682655180162b67042d9d37c4d57c49238.pdf
+8a3bb63925ac2cdf7f9ecf43f71d65e210416e17,https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf
+8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b,http://pdfs.semanticscholar.org/8ad0/d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b.pdf
+8adb2fcab20dab5232099becbd640e9c4b6a905a,http://pdfs.semanticscholar.org/d0d1/50a51c46cfb3bdd9d5fb570018c6534b57ff.pdf
+8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b,http://www.cs.unc.edu/~lazebnik/research/fall07/animals_on_the_web.pdf
+8a91ad8c46ca8f4310a442d99b98c80fb8f7625f,http://vislab.isr.ist.utl.pt/wp-content/uploads/2016/02/2015_TIP.pdf
+8aed6ec62cfccb4dba0c19ee000e6334ec585d70,http://pdfs.semanticscholar.org/8aed/6ec62cfccb4dba0c19ee000e6334ec585d70.pdf
+8a336e9a4c42384d4c505c53fb8628a040f2468e,http://pdfs.semanticscholar.org/8a33/6e9a4c42384d4c505c53fb8628a040f2468e.pdf
+7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e,http://jpinfotech.org/wp-content/plugins/infotech/file/upload/pdf/8962Face-Sketch-Synthesis-via-Sparse-Representation-Based-Greedy-Search-pdf.pdf
+7e8016bef2c180238f00eecc6a50eac473f3f138,http://pdfs.semanticscholar.org/7e80/16bef2c180238f00eecc6a50eac473f3f138.pdf
+7ed2c84fdfc7d658968221d78e745dfd1def6332,http://pdfs.semanticscholar.org/7ed2/c84fdfc7d658968221d78e745dfd1def6332.pdf
+7e3367b9b97f291835cfd0385f45c75ff84f4dc5,https://infoscience.epfl.ch/record/182226/files/fg2013.pdf
+7e00fb79576fe213853aeea39a6bc51df9fdca16,http://www.ics.ele.tue.nl/~tbasten/papers/AVSS2015_final.pdf
+7ee53d931668fbed1021839db4210a06e4f33190,http://crcv.ucf.edu/projects/videolocalization_images/CVPR16_Waqas_AL.pdf
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,http://pdfs.semanticscholar.org/7e18/b5f5b678aebc8df6246716bf63ea5d8d714e.pdf
+7e9df45ece7843fe050033c81014cc30b3a8903a,http://pdfs.semanticscholar.org/7e9d/f45ece7843fe050033c81014cc30b3a8903a.pdf
+7ebd323ddfe3b6de8368c4682db6d0db7b70df62,http://pdfs.semanticscholar.org/7ebd/323ddfe3b6de8368c4682db6d0db7b70df62.pdf
+7eb85bcb372261bad707c05e496a09609e27fdb3,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Sathyanarayana_A_Compute-Efficient_Algorithm_2014_CVPR_paper.pdf
+7ed6ff077422f156932fde320e6b3bd66f8ffbcb,http://pdfs.semanticscholar.org/7ed6/ff077422f156932fde320e6b3bd66f8ffbcb.pdf
+7e0c75ce731131e613544e1a85ae0f2c28ee4c1f,http://pdfs.semanticscholar.org/7e0c/75ce731131e613544e1a85ae0f2c28ee4c1f.pdf
+7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,http://pdfs.semanticscholar.org/7e1e/a2679a110241ed0dd38ff45cd4dfeb7a8e83.pdf
+7e507370124a2ac66fb7a228d75be032ddd083cc,http://pdfs.semanticscholar.org/8992/4d7418df1380044af9ab706a019418952141.pdf
+1056347fc5e8cd86c875a2747b5f84fd570ba232,http://arxiv.org/pdf/1607.06408v1.pdf
+10550ee13855bd7403946032354b0cd92a10d0aa,http://www.public.asu.edu/~chaitali/confpapers/neuromorphic_dac12.pdf
+10e12d11cb98ffa5ae82343f8904cfe321ae8004,http://pdfs.semanticscholar.org/10e1/2d11cb98ffa5ae82343f8904cfe321ae8004.pdf
+10e7dd3bbbfbc25661213155e0de1a9f043461a2,http://pdfs.semanticscholar.org/eb9c/24686d2d8a65894e6d708c6107724f2b6c04.pdf
+10a285260e822b49023c4324d0fbbca7df8e128b,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects2action-iccv2015.pdf
+100105d6c97b23059f7aa70589ead2f61969fbc3,http://www.rci.rutgers.edu/~vmp93/Conference_pub/WACV2016_CFP.pdf
+100da509d4fa74afc6e86a49352751d365fceee5,http://vision.ucsd.edu/sites/default/files/iccv2011_20q_parts_final.pdf
+10ab1b48b2a55ec9e2920a5397febd84906a7769,http://pdfs.semanticscholar.org/10ab/1b48b2a55ec9e2920a5397febd84906a7769.pdf
+10ce3a4724557d47df8f768670bfdd5cd5738f95,http://pdfs.semanticscholar.org/10ce/3a4724557d47df8f768670bfdd5cd5738f95.pdf
+100428708e4884300e4c1ac1f84cbb16e7644ccf,http://www.math.uh.edu/~dlabate/ICASSP_2014.pdf
+102e374347698fe5404e1d83f441630b1abf62d9,https://infoscience.epfl.ch/record/209965/files/TBME-preprint-infoscience.pdf
+1033ca56c7e88d8b3e80546848826f572c4cd63e,http://alumni.cs.ucsb.edu/~daniel/publications/conferences/fg11/DattaFerisVaqueroFG2011.pdf
+10f17534dba06af1ddab96c4188a9c98a020a459,http://www.cs.umass.edu/~mccallum/papers/peoplelda-iccv07.pdf
+10e0e6f1ec00b20bc78a5453a00c792f1334b016,http://pdfs.semanticscholar.org/672f/ae3da801b2a0d2bad65afdbbbf1b2320623e.pdf
+102b968d836177f9c436141e382915a4f8549276,https://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ACM-MM05-Proc.pdf
+100641ed8a5472536dde53c1f50fa2dd2d4e9be9,https://filebox.ece.vt.edu/~parikh/Publications/Parikh_hum_mac_com_Allerton_2013.pdf
+10195a163ab6348eef37213a46f60a3d87f289c5,https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/156130/eth-50296-01.pdf
+10b06d05b8b3a2c925b951a6d1d5919f536ffed4,http://gamesstudio.org/chek/wp-content/uploads/2014/01/interactivity_befaced.pdf
+10e704c82616fb5d9c48e0e68ee86d4f83789d96,http://pdfs.semanticscholar.org/10e7/04c82616fb5d9c48e0e68ee86d4f83789d96.pdf
+10f2b8188c745d43c1580f5ee6de71ad8d538b4d,http://staff.eng.bahcesehir.edu.tr/~cigdemeroglu/papers/international_conference_papers/2015_EmotiW.pdf
+106732a010b1baf13c61d0994552aee8336f8c85,http://arxiv.org/pdf/1509.04186v2.pdf
+10e70a34d56258d10f468f8252a7762950830d2b,http://intechweb.org/downloadpdf.php?id=5889
+102b27922e9bd56667303f986404f0e1243b68ab,https://applied-informatics-j.springeropen.com/track/pdf/10.1186/s40535-017-0042-5?site=applied-informatics-j.springeropen.com
+10fcbf30723033a5046db791fec2d3d286e34daa,http://pdfs.semanticscholar.org/10fc/bf30723033a5046db791fec2d3d286e34daa.pdf
+108b2581e07c6b7ca235717c749d45a1fa15bb24,http://www.cs.umd.edu/~djacobs/pubs_files/TPAMI_Proofs.pdf
+10d334a98c1e2a9e96c6c3713aadd42a557abb8b,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Shi_Scene_Text_Recognition_2013_CVPR_paper.pdf
+10f66f6550d74b817a3fdcef7fdeba13ccdba51c,http://pdfs.semanticscholar.org/10f6/6f6550d74b817a3fdcef7fdeba13ccdba51c.pdf
+107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53,http://pdfs.semanticscholar.org/65ef/8706ae8c4e22d491550f5fff052ca3f5db21.pdf
+1048c753e9488daa2441c50577fe5fdba5aa5d7c,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/473.pdf
+10ca2e03ff995023a701e6d8d128455c6e8db030,http://pdfs.semanticscholar.org/a941/e5f8778cbac75e21172985a0575b51ea819b.pdf
+1921e0a97904bdf61e17a165ab159443414308ed,http://pdfs.semanticscholar.org/1921/e0a97904bdf61e17a165ab159443414308ed.pdf
+19dd371e1649ab55a46f4b98890d6937a411ec5d,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2011_11_17_DagliC_HST_FP.pdf
+19841b721bfe31899e238982a22257287b9be66a,http://pdfs.semanticscholar.org/1984/1b721bfe31899e238982a22257287b9be66a.pdf
+19746957aa0d800d550da246a025ad44409cdb03,http://pdfs.semanticscholar.org/1974/6957aa0d800d550da246a025ad44409cdb03.pdf
+1922ad4978ab92ce0d23acc4c7441a8812f157e5,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf
+19e62a56b6772bbd37dfc6b8f948e260dbb474f5,http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf
+192723085945c1d44bdd47e516c716169c06b7c0,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/VisionandAttentionTheoryBasedSampling14.pdf
+1943c6bf8df8a64bd539a5cd6d4e68785eb590c2,http://ccs.njit.edu/inst/source/02MDDM08.pdf
+19fb5e5207b4a964e5ab50d421e2549ce472baa8,http://mmi.tudelft.nl/sites/default/files/e-FEDCompSys14final.pdf
+1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2,http://www.es.ele.tue.nl/~sander/publications/icme16.pdf
+1962e4c9f60864b96c49d85eb897141486e9f6d1,http://www.patternrecognition.cn/~zhongjin/2011/2011Lai_NCP.pdf
+195df1106f4d7aff0e9cb609358abbf80f54a716,https://arxiv.org/pdf/1511.02917v1.pdf
+193debca0be1c38dabc42dc772513e6653fd91d8,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf
+191674c64f89c1b5cba19732869aa48c38698c84,http://pdfs.semanticscholar.org/1916/74c64f89c1b5cba19732869aa48c38698c84.pdf
+190d8bd39c50b37b27b17ac1213e6dde105b21b8,https://dr.ntu.edu.sg/bitstream/handle/10220/18955/fp518-wang.pdf?isAllowed=y&sequence=1
+19af008599fb17bbd9b12288c44f310881df951c,http://pdfs.semanticscholar.org/19af/008599fb17bbd9b12288c44f310881df951c.pdf
+19296e129c70b332a8c0a67af8990f2f4d4f44d1,http://lear.inrialpes.fr/pubs/2009/GVS09/supplmat.pdf
+19666b9eefcbf764df7c1f5b6938031bcf777191,https://arxiv.org/pdf/1212.3913v4.pdf
+198b6beb53e0e61357825d57938719f614685f75,http://pdfs.semanticscholar.org/198b/6beb53e0e61357825d57938719f614685f75.pdf
+1939168a275013d9bc1afaefc418684caf99ba66,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR11_FaceAPModel.pdf
+190b3caa2e1a229aa68fd6b1a360afba6f50fde4,http://pdfs.semanticscholar.org/190b/3caa2e1a229aa68fd6b1a360afba6f50fde4.pdf
+19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b,http://pdfs.semanticscholar.org/4088/3844c1ceab95cb92498a92bfdf45beaa288e.pdf
+19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54,http://cvrr.ucsd.edu/publications/2006/McCallTrivedi_v4hci_cvpr2006.pdf
+1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4,http://www4.comp.polyu.edu.hk/~cslzhang/paper/MBC_TIFS_final.pdf
+195d331c958f2da3431f37a344559f9bce09c0f7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_066_ext.pdf
+199c2df5f2847f685796c2523221c6436f022464,https://static.aminer.org/pdf/PDF/000/322/051/self_quotient_image_for_face_recognition.pdf
+19c0069f075b5b2d8ac48ad28a7409179bd08b86,http://people.csail.mit.edu/torralba/publications/iccv2013_khosla.pdf
+19c0c7835dba1a319b59359adaa738f0410263e8,http://www.svcl.ucsd.edu/publications/journal/2009/pami09-fs.pdf
+19808134b780b342e21f54b60095b181dfc7a600,http://www.openu.ac.il/home/hassner/projects/siftscales/HassneretalTPAMI16.pdf
+19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9,https://arxiv.org/pdf/1503.03832v2.pdf
+19a9f658ea14701502d169dc086651b1d9b2a8ea,http://www.cbsr.ia.ac.cn/users/zlei/papers/JJYan-FG2013.pdf
+19d4855f064f0d53cb851e9342025bd8503922e2,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d468.pdf
+19d3b02185ad36fb0b792f2a15a027c58ac91e8e,http://pdfs.semanticscholar.org/19d3/b02185ad36fb0b792f2a15a027c58ac91e8e.pdf
+193ec7bb21321fcf43bbe42233aed06dbdecbc5c,http://pdfs.semanticscholar.org/d40e/f2ca85d8b7540948677c2ed07f1f3677cfdd.pdf
+19da9f3532c2e525bf92668198b8afec14f9efea,http://pdfs.semanticscholar.org/19da/9f3532c2e525bf92668198b8afec14f9efea.pdf
+19868a469dc25ee0db00947e06c804b88ea94fd0,http://pdfs.semanticscholar.org/1986/8a469dc25ee0db00947e06c804b88ea94fd0.pdf
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf
+197eaa59a003a4c7cc77c1abe0f99d942f716942,http://www.lv-nus.org/papers%5C2009%5C2009_mm_age.pdf
+19878141fbb3117d411599b1a74a44fc3daf296d,http://pdfs.semanticscholar.org/1987/8141fbb3117d411599b1a74a44fc3daf296d.pdf
+19f076998ba757602c8fec04ce6a4ca674de0e25,http://pdfs.semanticscholar.org/19f0/76998ba757602c8fec04ce6a4ca674de0e25.pdf
+191d30e7e7360d565b0c1e2814b5bcbd86a11d41,http://homepages.rpi.edu/~wuy9/DiscriminativeDeepFaceShape/DiscriminativeDeepFaceShape_IJCV.pdf
+19994e667d908bc0aacfb663ab0a2bb5ad16b221,http://pdfs.semanticscholar.org/65b1/70e5ec86f5fc500fd5cbd7bfe7b2ec4ef045.pdf
+19eb486dcfa1963c6404a9f146c378fc7ae3a1df,https://pdfs.semanticscholar.org/3b4d/bd7be0b5b0df2e0c61a977974b1fc78ad3e5.pdf
+4c6daffd092d02574efbf746d086e6dc0d3b1e91,http://pdfs.semanticscholar.org/4c6d/affd092d02574efbf746d086e6dc0d3b1e91.pdf
+4cb8a691a15e050756640c0a35880cdd418e2b87,http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf
+4c8581246ed4d90c942a23ed7c0e007221fa684d,http://welcome.isr.ist.utl.pt/img/pdfs/3439_14-ICIPb.pdf
+4ca1fcfd7650eeb0ac8d51cff31b70717cdddfdd,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1563.pdf
+4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d,http://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf
+4c6e1840451e1f86af3ef1cb551259cb259493ba,http://pdfs.semanticscholar.org/4c6e/1840451e1f86af3ef1cb551259cb259493ba.pdf
+4cf3419dbf83a76ccac11828ca57b46bbbe54e0a,https://www.researchgate.net/profile/Muhammad_Sharif9/publication/224173583_Illumination_normalization_preprocessing_for_face_recognition/links/02e7e51a47972ae996000000.pdf
+4c87aafa779747828054cffee3125fcea332364d,http://pdfs.semanticscholar.org/4c87/aafa779747828054cffee3125fcea332364d.pdf
+4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56,https://arxiv.org/pdf/1612.00523v1.pdf
+4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb,http://pdfs.semanticscholar.org/4c8e/5fc0877d066516bb63e6c31eb1b8b5f967eb.pdf
+4cb0e0c0e9b92e457f2c546dc25b9a4ff87ff819,http://dayongwang.info/pdf/2012-CIKM.pdf
+4c8ef4f98c6c8d340b011cfa0bb65a9377107970,http://pdfs.semanticscholar.org/4c8e/f4f98c6c8d340b011cfa0bb65a9377107970.pdf
+4c822785c29ceaf67a0de9c699716c94fefbd37d,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhu_A_Key_Volume_CVPR_2016_paper.pdf
+4c815f367213cc0fb8c61773cd04a5ca8be2c959,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0002470.pdf
+4c1528bab3142ec957700ab502531e1a67e7f2f6,http://www.researchgate.net/profile/Xiaohua_Xie/publication/220932399_Restoration_of_a_Frontal_Illuminated_Face_Image_Based_on_KPCA/links/00b49522adfc6b1435000000.pdf
+4c6233765b5f83333f6c675d3389bbbf503805e3,https://perceptual.mpi-inf.mpg.de/files/2015/03/Yan_Vis13.pdf
+4c078c2919c7bdc26ca2238fa1a79e0331898b56,http://pdfs.semanticscholar.org/4c07/8c2919c7bdc26ca2238fa1a79e0331898b56.pdf
+4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7,https://arxiv.org/pdf/1611.09956v1.pdf
+4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367,http://pdfs.semanticscholar.org/4cac/9eda716a0addb73bd7ffea2a5fb0e6ec2367.pdf
+4c4236b62302957052f1bbfbd34dbf71ac1650ec,http://www.eurecom.fr/en/publication/3397/download/mm-publi-3397.pdf
+4cd0da974af9356027a31b8485a34a24b57b8b90,https://arxiv.org/pdf/1703.00862v2.pdf
+4c170a0dcc8de75587dae21ca508dab2f9343974,http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf
+4c81c76f799c48c33bb63b9369d013f51eaf5ada,https://www.cmpe.boun.edu.tr/~salah/kaya17chalearn.pdf
+4c1ce6bced30f5114f135cacf1a37b69bb709ea1,http://imag.pub.ro/common/staff/cflorea/papers/nlp_eye_MVA_site.pdf
+4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b,https://opus.lib.uts.edu.au/bitstream/10453/43339/1/APSIPA_ASC_2015_submission_313.pdf
+4c523db33c56759255b2c58c024eb6112542014e,http://www0.cs.ucl.ac.uk/staff/P.Li/publication/ICCV09JaniaAghajanian.pdf
+261c3e30bae8b8bdc83541ffa9331b52fcf015e6,http://pdfs.semanticscholar.org/a751/04bc7dbaaf549d89f163560525031b49df38.pdf
+26f03693c50eb50a42c9117f107af488865f3dc1,http://pdfs.semanticscholar.org/26f0/3693c50eb50a42c9117f107af488865f3dc1.pdf
+2661f38aaa0ceb424c70a6258f7695c28b97238a,http://mplab.ucsd.edu/wordpress/wp-content/uploads/multilayer2012.pdf
+2609079d682998da2bc4315b55a29bafe4df414e,http://www.iab-rubric.org/papers/ICIP-13-camready.pdf
+264a84f4d27cd4bca94270620907cffcb889075c,https://arxiv.org/pdf/1612.06615v1.pdf
+26d407b911d1234e8e3601e586b49316f0818c95,https://arxiv.org/pdf/1709.00965v1.pdf
+26a72e9dd444d2861298d9df9df9f7d147186bcd,https://engineering.purdue.edu/~qobi/papers/mvap2016.pdf
+26433d86b9c215b5a6871c70197ff4081d63054a,https://www.researchgate.net/profile/WL_Woo/publication/221093080_Multimodal_biometric_fusion_at_feature_level_Face_and_palmprint/links/0fcfd5134b4f62c892000000.pdf
+265af79627a3d7ccf64e9fe51c10e5268fee2aae,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20JOURNAL%20PAPERS/A%20Mixture%20of%20Transformed%20Hidden%20Markov%20Models%20for%20Elastic%20Motion%20Estimation.pdf
+267c6e8af71bab68547d17966adfaab3b4711e6b,http://pdfs.semanticscholar.org/3097/60122ce6215876c013b2b0211f1df8239df5.pdf
+26af867977f90342c9648ccf7e30f94470d40a73,http://pdfs.semanticscholar.org/26af/867977f90342c9648ccf7e30f94470d40a73.pdf
+26a89701f4d41806ce8dbc8ca00d901b68442d45,http://pdfs.semanticscholar.org/b7d8/fea52643236bd9b0dd7eec5f1cde248d10f6.pdf
+26c884829897b3035702800937d4d15fef7010e4,http://pdfs.semanticscholar.org/9200/10cc55d2658e04b01783118b59b7d90420c6.pdf
+266ed43dcea2e7db9f968b164ca08897539ca8dd,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Duong_Beyond_Principal_Components_2015_CVPR_paper.pdf
+26ad6ceb07a1dc265d405e47a36570cb69b2ace6,http://pdfs.semanticscholar.org/26ad/6ceb07a1dc265d405e47a36570cb69b2ace6.pdf
+26ec75b8ad066b36f814379a79ad57089c82c079,http://www.seas.upenn.edu/~bensapp/papers/ambig-tech-report-2009.pdf
+2642810e6c74d900f653f9a800c0e6a14ca2e1c7,http://openaccess.thecvf.com/content_iccv_2015/papers/Liu_Projection_Bank_From_ICCV_2015_paper.pdf
+26437fb289cd7caeb3834361f0cc933a02267766,http://pdfs.semanticscholar.org/2643/7fb289cd7caeb3834361f0cc933a02267766.pdf
+2654ef92491cebeef0997fd4b599ac903e48d07a,http://www.ee.oulu.fi/~gyzhao/Papers/2008/Facial%20Expression%20Recognition%20from%20Near-Infrared%20Video%20Sequences.pdf
+2679e4f84c5e773cae31cef158eb358af475e22f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf
+265e76285e18587065a1e28246971f003c5267f3,http://cortex.informatik.tu-ilmenau.de/~wilhelm/wilhelm-soave-2004a.pdf
+26ac607a101492bc86fd81a141311066cfe9e2b5,http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf
+21ef129c063bad970b309a24a6a18cbcdfb3aff5,http://pdfs.semanticscholar.org/21ef/129c063bad970b309a24a6a18cbcdfb3aff5.pdf
+218b2c5c9d011eb4432be4728b54e39f366354c1,http://infolab.stanford.edu/~wangz/project/imsearch/ALIP/TIP13/sawant.pdf
+217a21d60bb777d15cd9328970cab563d70b5d23,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf
+21e828071249d25e2edaca0596e27dcd63237346,http://research.microsoft.com/pubs/122158/cvpr2010.pdf
+21a2f67b21905ff6e0afa762937427e92dc5aa0b,http://pdfs.semanticscholar.org/21a2/f67b21905ff6e0afa762937427e92dc5aa0b.pdf
+2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3,http://pdfs.semanticscholar.org/f9f4/9f8347db35e721672955c3e24f60574553c0.pdf
+2162654cb02bcd10794ae7e7d610c011ce0fb51b,http://www.jdl.ac.cn/doc/2011/201511610103648366_%E5%88%98%E8%B4%A4%E6%98%8E.pdf
+21258aa3c48437a2831191b71cd069c05fb84cf7,http://pdfs.semanticscholar.org/2125/8aa3c48437a2831191b71cd069c05fb84cf7.pdf
+211c42a567e02987a6f89b89527de3bf4d2e9f90,http://www.cs.dartmouth.edu/~dutran/papers/ijcv16_preprint.pdf
+21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc,http://pdfs.semanticscholar.org/21f3/c5b173503185c1e02a3eb4e76e13d7e9c5bc.pdf
+21bd9374c211749104232db33f0f71eab4df35d5,http://www.eurecom.fr/en/publication/5184/download/sec-publi-5184.pdf
+214db8a5872f7be48cdb8876e0233efecdcb6061,http://users.eecs.northwestern.edu/~mya671/mypapers/ICCV13_Zhang_Yang_Wang_Lin_Tian.pdf
+21104bcf07ef0269ab133471a3200b9bf94b2948,http://www.cs.utexas.edu/~grauman/papers/liang-cvpr2014.pdf
+21d9d0deed16f0ad62a4865e9acf0686f4f15492,http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf
+214ac8196d8061981bef271b37a279526aab5024,http://pdfs.semanticscholar.org/214a/c8196d8061981bef271b37a279526aab5024.pdf
+213a579af9e4f57f071b884aa872651372b661fd,http://www.robots.ox.ac.uk/~vgg/publications/2013/Charles13a/charles13a.pdf
+21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1,http://www.eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Transductive%20VIS-NIR%20Face%20Matching.pdf
+21ec41a6ee3c655cf54c6db659d56480fc76e742,http://www.liacs.nl/home/mlew/ivc2007.emotion.pdf
+217de4ff802d4904d3f90d2e24a29371307942fe,http://www.cs.columbia.edu/~tberg/papers/poof-cvpr13.pdf
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,http://pdfs.semanticscholar.org/210b/98394c3be96e7fd75d3eb11a391da1b3a6ca.pdf
+21765df4c0224afcc25eb780bef654cbe6f0bc3a,http://ci2cv.net/media/papers/2013_ICCV_Kiani.pdf
+21b16df93f0fab4864816f35ccb3207778a51952,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2015/06.18.19.06/doc/PID3766353.pdf
+4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27,http://pdfs.semanticscholar.org/4d49/c6cff198cccb21f4fa35fd75cbe99cfcbf27.pdf
+4d625677469be99e0a765a750f88cfb85c522cce,http://pdfs.semanticscholar.org/cccc/378e98218bbedfd93da956e4a07b9971b928.pdf
+4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea,http://www.researchgate.net/profile/Roland_Goecke/publication/221429947_Static_facial_expression_analysis_in_tough_conditions_Data_evaluation_protocol_and_benchmark/links/0fcfd50e81697312d6000000.pdf
+4da735d2ed0deeb0cae4a9d4394449275e316df2,http://cvrr.ucsd.edu/publications/2016/0406.pdf
+4d15254f6f31356963cc70319ce416d28d8924a3,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf
+4d530a4629671939d9ded1f294b0183b56a513ef,http://pdfs.semanticscholar.org/4d53/0a4629671939d9ded1f294b0183b56a513ef.pdf
+4d2975445007405f8cdcd74b7fd1dd547066f9b8,http://pdfs.semanticscholar.org/4d29/75445007405f8cdcd74b7fd1dd547066f9b8.pdf
+4df889b10a13021928007ef32dc3f38548e5ee56,http://ww2.cs.fsu.edu/~ywu/PDF-files/IJCNN.pdf
+4d6462fb78db88afff44561d06dd52227190689c,http://pdfs.semanticscholar.org/4d64/62fb78db88afff44561d06dd52227190689c.pdf
+4d423acc78273b75134e2afd1777ba6d3a398973,http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf
+4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41,http://pdfs.semanticscholar.org/4dd6/d511a8bbc4d9965d22d79ae6714ba48c8e41.pdf
+4de757faa69c1632066391158648f8611889d862,http://pdfs.semanticscholar.org/4de7/57faa69c1632066391158648f8611889d862.pdf
+4dd71a097e6b3cd379d8c802460667ee0cbc8463,http://www.dgcv.nii.ac.jp/Publications/Papers/2015/BWILD2015.pdf
+4d9a02d080636e9666c4d1cc438b9893391ec6c7,http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf
+4d9c02567e7b9e065108eb83ea3f03fcff880462,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Towards_Facial_Expression_CVPR_2016_paper.pdf
+4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002352.pdf
+4d90bab42806d082e3d8729067122a35bbc15e8d,http://pdfs.semanticscholar.org/4d90/bab42806d082e3d8729067122a35bbc15e8d.pdf
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTiccv13.pdf
+4d01d78544ae0de3075304ff0efa51a077c903b7,http://pdfs.semanticscholar.org/8f82/71d557ae862866c692e556f610ab45dcc399.pdf
+4dd2be07b4f0393995b57196f8fc79d666b3aec5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p3572-lee.pdf
+4d356f347ab6647fb3e8ed8c2154dbd359e479ed,http://www.researchgate.net/profile/Anna_Esposito/publication/225441684_Extracting_and_Associating_Meta-features_for_Understanding_Peoples_Emotional_Behaviour_Face_and_Speech/links/02e7e52bed3a1b106e000000.pdf
+4d8ce7669d0346f63b20393ffaa438493e7adfec,http://pdfs.semanticscholar.org/4d8c/e7669d0346f63b20393ffaa438493e7adfec.pdf
+4d21a2866cfd1f0fb2a223aab9eecfdec963059a,http://pdfs.semanticscholar.org/ddb3/5264ae7a74811bf8eb63d0eca7b7db07a4b1.pdf
+4d16337cc0431cd43043dfef839ce5f0717c3483,http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf
+4d0b3921345ae373a4e04f068867181647d57d7d,http://people.cs.pitt.edu/~kovashka/murrugarra_llerena_kovashka_wacv2017_slides.pdf
+4d0ef449de476631a8d107c8ec225628a67c87f9,http://www.wjscheirer.com/papers/wjs_btas2010b_photohead.pdf
+4df3143922bcdf7db78eb91e6b5359d6ada004d2,http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf
+75fcbb01bc7e53e9de89cb1857a527f97ea532ce,http://pdfs.semanticscholar.org/75fc/bb01bc7e53e9de89cb1857a527f97ea532ce.pdf
+7577a1ddf9195513a5c976887ad806d1386bb1e9,http://pdfs.semanticscholar.org/7577/a1ddf9195513a5c976887ad806d1386bb1e9.pdf
+757e4cb981e807d83539d9982ad325331cb59b16,http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf
+75e9a141b85d902224f849ea61ab135ae98e7bfb,http://pdfs.semanticscholar.org/d1a5/0fffd1c9cf033943636b9e18172ed68582b1.pdf
+75b833dde2e76c5de5912db3444d62c4131d15dc,http://www.researchgate.net/profile/Vassilios_Solachidis/publication/4303365_A_Face_Tracker_Trajectories_Clustering_Using_Mutual_Information/links/09e4150ca146dba69c000000.pdf
+75503aff70a61ff4810e85838a214be484a674ba,https://www.ri.cmu.edu/pub_files/2012/0/Improved-Facial-Expression.pdf
+75fd9acf5e5b7ed17c658cc84090c4659e5de01d,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_035_ext.pdf
+75908b6460eb0781130ed0aa94585be25a584996,http://pdfs.semanticscholar.org/7590/8b6460eb0781130ed0aa94585be25a584996.pdf
+75cd81d2513b7e41ac971be08bbb25c63c37029a,http://pdfs.semanticscholar.org/75cd/81d2513b7e41ac971be08bbb25c63c37029a.pdf
+75bf3b6109d7a685236c8589f8ead7d769ea863f,http://pdfs.semanticscholar.org/75bf/3b6109d7a685236c8589f8ead7d769ea863f.pdf
+751970d4fb6f61d1b94ca82682984fd03c74f127,http://pdfs.semanticscholar.org/7519/70d4fb6f61d1b94ca82682984fd03c74f127.pdf
+759a3b3821d9f0e08e0b0a62c8b693230afc3f8d,http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf
+75ebe1e0ae9d42732e31948e2e9c03d680235c39,http://pdfs.semanticscholar.org/75eb/e1e0ae9d42732e31948e2e9c03d680235c39.pdf
+75e5ba7621935b57b2be7bf4a10cad66a9c445b9,http://pdfs.semanticscholar.org/75e5/ba7621935b57b2be7bf4a10cad66a9c445b9.pdf
+75859ac30f5444f0d9acfeff618444ae280d661d,http://www.cse.msu.edu/rgroups/biometrics/Publications/SecureBiometrics/NagarNandakumarJain_MultibiometricCryptosystems_TIFS11.pdf
+7553fba5c7f73098524fbb58ca534a65f08e91e7,http://pdfs.semanticscholar.org/7553/fba5c7f73098524fbb58ca534a65f08e91e7.pdf
+751b26e7791b29e4e53ab915bfd263f96f531f56,http://affect.media.mit.edu/pdfs/12.Hernandez-Hoque-Drevo-Picard-MoodMeter-Ubicomp.pdf
+75da1df4ed319926c544eefe17ec8d720feef8c0,http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf
+75259a613285bdb339556ae30897cb7e628209fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Kodirov_Unsupervised_Domain_Adaptation_ICCV_2015_paper.pdf
+75d2ecbbcc934563dff6b39821605dc6f2d5ffcc,http://pdfs.semanticscholar.org/75d2/ecbbcc934563dff6b39821605dc6f2d5ffcc.pdf
+81bfe562e42f2eab3ae117c46c2e07b3d142dade,http://pdfs.semanticscholar.org/81bf/e562e42f2eab3ae117c46c2e07b3d142dade.pdf
+81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f,http://pdfs.semanticscholar.org/8169/5fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f.pdf
+8147ee02ec5ff3a585dddcd000974896cb2edc53,http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2012aePAMI.pdf
+814b05113ba0397d236736f94c01e85bb034c833,http://pdfs.semanticscholar.org/814b/05113ba0397d236736f94c01e85bb034c833.pdf
+816bd8a7f91824097f098e4f3e0f4b69f481689d,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00334.pdf
+81831ed8e5b304e9d28d2d8524d952b12b4cbf55,http://pdfs.semanticscholar.org/8183/1ed8e5b304e9d28d2d8524d952b12b4cbf55.pdf
+81b2a541d6c42679e946a5281b4b9dc603bc171c,http://pdfs.semanticscholar.org/81b2/a541d6c42679e946a5281b4b9dc603bc171c.pdf
+81e11e33fc5785090e2d459da3ac3d3db5e43f65,http://pdfs.semanticscholar.org/81e1/1e33fc5785090e2d459da3ac3d3db5e43f65.pdf
+81e366ed1834a8d01c4457eccae4d57d169cb932,http://www-public.int-edu.eu/~horain/Publications/Wesierski%20ICCV_2013.pdf
+81fc86e86980a32c47410f0ba7b17665048141ec,http://pdfs.semanticscholar.org/81fc/86e86980a32c47410f0ba7b17665048141ec.pdf
+8160b3b5f07deaa104769a2abb7017e9c031f1c1,http://www.aiia.csd.auth.gr/EN/cor_baayen/Exploiting_Discriminant_Information_in_NMF_for_FFV.pdf
+816eff5e92a6326a8ab50c4c50450a6d02047b5e,http://pdfs.semanticscholar.org/816e/ff5e92a6326a8ab50c4c50450a6d02047b5e.pdf
+8149c30a86e1a7db4b11965fe209fe0b75446a8c,http://www.cfar.umd.edu/~kale/ICVGIP2012.pdf
+81dd68de9d88c49db1ae509dbc66c7a82809c026,http://atvs.ii.uam.es/files/2004_SPM_Biometrics_Ortega.pdf
+81da427270c100241c07143885ba3051ec4a2ecb,http://pdfs.semanticscholar.org/81da/427270c100241c07143885ba3051ec4a2ecb.pdf
+810f5606a4769fc3dd99611acf805596fb79223d,http://pdfs.semanticscholar.org/810f/5606a4769fc3dd99611acf805596fb79223d.pdf
+861c650f403834163a2c27467a50713ceca37a3e,http://personal.stevens.edu/~hli18/data/papers/PEPICCV2013_CameraReady.pdf
+86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663,http://pdfs.semanticscholar.org/8661/4c2d2f6ebcb9c600d4aef85fd6bf6eab6663.pdf
+86b69b3718b9350c9d2008880ce88cd035828432,http://pdfs.semanticscholar.org/86b6/9b3718b9350c9d2008880ce88cd035828432.pdf
+86904aee566716d9bef508aa9f0255dc18be3960,http://pdfs.semanticscholar.org/8690/4aee566716d9bef508aa9f0255dc18be3960.pdf
+867e709a298024a3c9777145e037e239385c0129,http://pdfs.semanticscholar.org/867e/709a298024a3c9777145e037e239385c0129.pdf
+86c5478f21c4a9f9de71b5ffa90f2a483ba5c497,http://pdfs.semanticscholar.org/86c5/478f21c4a9f9de71b5ffa90f2a483ba5c497.pdf
+86c053c162c08bc3fe093cc10398b9e64367a100,http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf
+86b985b285c0982046650e8d9cf09565a939e4f9,http://pdfs.semanticscholar.org/86b9/85b285c0982046650e8d9cf09565a939e4f9.pdf
+861802ac19653a7831b314cd751fd8e89494ab12,http://btpwpdf.ijoy365.com/time-of-flight-and-depth-imaging-marcin-63540537.pdf
+86ed5b9121c02bcf26900913f2b5ea58ba23508f,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wang_Actions__Transformations_CVPR_2016_paper.pdf
+861b12f405c464b3ffa2af7408bff0698c6c9bf0,http://pdfs.semanticscholar.org/861b/12f405c464b3ffa2af7408bff0698c6c9bf0.pdf
+862d17895fe822f7111e737cbcdd042ba04377e8,http://pdfs.semanticscholar.org/862d/17895fe822f7111e737cbcdd042ba04377e8.pdf
+86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6,http://pdfs.semanticscholar.org/86e1/bdbfd13b9ed137e4c4b8b459a3980eb257f6.pdf
+86b6de59f17187f6c238853810e01596d37f63cd,http://pdfs.semanticscholar.org/86b6/de59f17187f6c238853810e01596d37f63cd.pdf
+86b105c3619a433b6f9632adcf9b253ff98aee87,http://www.cecs.uci.edu/~papers/icme06/pdfs/0001013.pdf
+862f2d84b4230d64ddb3e48967ad417089f2c291,http://www.umiacs.umd.edu/users/pvishalm/Conference_pub/ICIP14_landmarks.pdf
+86d1fbaecd02b44309383830e6d985dc09e786aa,http://feng-xu.com/papers/ExpressionSynthesis_CVPR.pdf
+86a8b3d0f753cb49ac3250fa14d277983e30a4b7,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W09/papers/Zhang_Exploiting_Unlabeled_Ages_2013_CVPR_paper.pdf
+86b51bd0c80eecd6acce9fc538f284b2ded5bcdd,http://pdfs.semanticscholar.org/86b5/1bd0c80eecd6acce9fc538f284b2ded5bcdd.pdf
+8699268ee81a7472a0807c1d3b1db0d0ab05f40d,http://pdfs.semanticscholar.org/8699/268ee81a7472a0807c1d3b1db0d0ab05f40d.pdf
+86f8e6310d114bb24deb971e8bc7089df6ac3b57,http://ftp.ncbi.nlm.nih.gov/pub/pmc/84/69/40101_2015_Article_46.PMC4350291.pdf
+72282287f25c5419dc6fd9e89ec9d86d660dc0b5,https://arxiv.org/pdf/1609.07495v1.pdf
+72a87f509817b3369f2accd7024b2e4b30a1f588,http://hal.inria.fr/docs/00/75/05/89/PDF/paa2010last.pdf
+72a00953f3f60a792de019a948174bf680cd6c9f,http://pdfs.semanticscholar.org/72a0/0953f3f60a792de019a948174bf680cd6c9f.pdf
+726b8aba2095eef076922351e9d3a724bb71cb51,http://pdfs.semanticscholar.org/d06b/cb2d46342ee011e652990edf290a0876b502.pdf
+727ecf8c839c9b5f7b6c7afffe219e8b270e7e15,http://pdfs.semanticscholar.org/727e/cf8c839c9b5f7b6c7afffe219e8b270e7e15.pdf
+72a5e181ee8f71b0b153369963ff9bfec1c6b5b0,http://pdfs.semanticscholar.org/72a5/e181ee8f71b0b153369963ff9bfec1c6b5b0.pdf
+72ecaff8b57023f9fbf8b5b2588f3c7019010ca7,http://pdfs.semanticscholar.org/72ec/aff8b57023f9fbf8b5b2588f3c7019010ca7.pdf
+729dbe38538fbf2664bc79847601f00593474b05,http://pdfs.semanticscholar.org/729d/be38538fbf2664bc79847601f00593474b05.pdf
+729a9d35bc291cc7117b924219bef89a864ce62c,http://pdfs.semanticscholar.org/729a/9d35bc291cc7117b924219bef89a864ce62c.pdf
+72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_094_ext.pdf
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,http://pdfs.semanticscholar.org/7216/0aae43cd9b2c3aae5574acc0d00ea0993b9e.pdf
+72c0c8deb9ea6f59fde4f5043bff67366b86bd66,http://pdfs.semanticscholar.org/72c0/c8deb9ea6f59fde4f5043bff67366b86bd66.pdf
+721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,http://pdfs.semanticscholar.org/74f1/9d0986c9d39aabb359abaa2a87a248a48deb.pdf
+72f4aaf7e2e3f215cd8762ce283988220f182a5b,http://pdfs.semanticscholar.org/72f4/aaf7e2e3f215cd8762ce283988220f182a5b.pdf
+72a55554b816b66a865a1ec1b4a5b17b5d3ba784,http://vislab.ucr.edu/Biometrics16/CVPRW_Vizilter.pdf
+72450d7e5cbe79b05839c30a4f0284af5aa80053,http://pdfs.semanticscholar.org/7245/0d7e5cbe79b05839c30a4f0284af5aa80053.pdf
+72bf9c5787d7ff56a1697a3389f11d14654b4fcf,http://pdfs.semanticscholar.org/7910/a98a1fe9f4bec4c0dc4dc3476e9405b1930d.pdf
+445461a34adc4bcdccac2e3c374f5921c93750f8,https://arxiv.org/pdf/1306.1913v1.pdf
+4414a328466db1e8ab9651bf4e0f9f1fe1a163e4,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569290719.pdf
+442f09ddb5bb7ba4e824c0795e37cad754967208,http://pdfs.semanticscholar.org/8c29/513c2621c26ac8491bb763674db475fe58c6.pdf
+443acd268126c777bc7194e185bec0984c3d1ae7,https://eprints.soton.ac.uk/402985/1/icpr-16.pdf
+442d3aeca486de787de10bc41bfeb0b42c81803f,http://pdfs.semanticscholar.org/442d/3aeca486de787de10bc41bfeb0b42c81803f.pdf
+44f23600671473c3ddb65a308ca97657bc92e527,http://arxiv.org/pdf/1604.06573v2.pdf
+4439746eeb7c7328beba3f3ef47dc67fbb52bcb3,http://pdfs.semanticscholar.org/4439/746eeb7c7328beba3f3ef47dc67fbb52bcb3.pdf
+446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03,http://www.isir.upmc.fr/files/2014ACTI3172.pdf
+4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f,http://pdfs.semanticscholar.org/4467/a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f.pdf
+44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf
+44a3ec27f92c344a15deb8e5dc3a5b3797505c06,http://pdfs.semanticscholar.org/44a3/ec27f92c344a15deb8e5dc3a5b3797505c06.pdf
+44aeda8493ad0d44ca1304756cc0126a2720f07b,http://pdfs.semanticscholar.org/afbb/c0ea429ba0f5cf7790d23fc40d7d5342a53c.pdf
+449b1b91029e84dab14b80852e35387a9275870e,https://pdfs.semanticscholar.org/608c/da0c14c3d134d9d18dd38f9682b23c31d367.pdf
+44078d0daed8b13114cffb15b368acc467f96351,http://arxiv.org/pdf/1604.05417v1.pdf
+44c9b5c55ca27a4313daf3760a3f24a440ce17ad,http://pdfs.semanticscholar.org/44c9/b5c55ca27a4313daf3760a3f24a440ce17ad.pdf
+44dd150b9020b2253107b4a4af3644f0a51718a3,http://www.andrew.cmu.edu/user/kseshadr/TIFS_2012_Paper_Final_Submission.pdf
+447d8893a4bdc29fa1214e53499ffe67b28a6db5,http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf
+44f65e3304bdde4be04823fd7ca770c1c05c2cef,http://pdfs.semanticscholar.org/44f6/5e3304bdde4be04823fd7ca770c1c05c2cef.pdf
+44fbbaea6271e47ace47c27701ed05e15da8f7cf,http://pdfs.semanticscholar.org/44fb/baea6271e47ace47c27701ed05e15da8f7cf.pdf
+44fb4dcf88eb482e2ab79fd4540caf941613b970,http://www.researchgate.net/profile/Masashi_Sugiyama/publication/220930547_Perceived_Age_Estimation_under_Lighting_Condition_Change_by_Covariate_Shift_Adaptation/links/0fcfd5122b4d406edd000000.pdf
+44eb4d128b60485377e74ffb5facc0bf4ddeb022,https://pdfs.semanticscholar.org/44eb/4d128b60485377e74ffb5facc0bf4ddeb022.pdf
+448ed201f6fceaa6533d88b0b29da3f36235e131,http://pdfs.semanticscholar.org/aa6a/0b92c60187c7fa9923b1c8433ec99a495df7.pdf
+441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sundararajan_Head_Pose_Estimation_2015_CVPR_paper.pdf
+447a5e1caf847952d2bb526ab2fb75898466d1bc,http://pdfs.semanticscholar.org/447a/5e1caf847952d2bb526ab2fb75898466d1bc.pdf
+449808b7aa9ee6b13ad1a21d9f058efaa400639a,http://www.jdl.ac.cn/doc/2008/Recovering%203D%20Facial%20Shape%20via%20Coupled%202D-3D%20Space%20Learning.pdf
+2a7bca56e2539c8cf1ae4e9da521879b7951872d,http://pdfs.semanticscholar.org/2a7b/ca56e2539c8cf1ae4e9da521879b7951872d.pdf
+2a65d7d5336b377b7f5a98855767dd48fa516c0f,https://mug.ee.auth.gr/wp-content/uploads/fsLDA.pdf
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,http://yugangjiang.info/publication/JCST-nameface.pdf
+2aaa6969c03f435b3ea8431574a91a0843bd320b,http://pdfs.semanticscholar.org/2aaa/6969c03f435b3ea8431574a91a0843bd320b.pdf
+2af620e17d0ed67d9ccbca624250989ce372e255,http://www.alessandrobergamo.com/data/bt_cvpr12.pdf
+2a35d20b2c0a045ea84723f328321c18be6f555c,http://pdfs.semanticscholar.org/d1be/cba3c460892453939f9f3639d8beddf2a133.pdf
+2ad7cef781f98fd66101fa4a78e012369d064830,http://arxiv.org/pdf/1603.05474v1.pdf
+2ad29b2921aba7738c51d9025b342a0ec770c6ea,http://arxiv.org/pdf/1510.02781v1.pdf
+2a9b398d358cf04dc608a298d36d305659e8f607,http://www.pitt.edu/~jeffcohn/biblio/MahoorFG2011.pdf
+2a0efb1c17fbe78470acf01e4601a75735a805cc,http://pdfs.semanticscholar.org/2a0e/fb1c17fbe78470acf01e4601a75735a805cc.pdf
+2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924,http://pdfs.semanticscholar.org/2a6b/ba2e81d5fb3c0fd0e6b757cf50ba7bf8e924.pdf
+2ac21d663c25d11cda48381fb204a37a47d2a574,http://pdfs.semanticscholar.org/2ac2/1d663c25d11cda48381fb204a37a47d2a574.pdf
+2a4153655ad1169d482e22c468d67f3bc2c49f12,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf
+2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40,http://www.cl.cam.ac.uk/~pr10/publications/fg17.pdf
+2ae139b247057c02cda352f6661f46f7feb38e45,http://www.iro.umontreal.ca/~memisevr/pubs/icmi_emotiw.pdf
+2a3e19d7c54cba3805115497c69069dd5a91da65,http://pdfs.semanticscholar.org/2a3e/19d7c54cba3805115497c69069dd5a91da65.pdf
+2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf
+2a14b6d9f688714dc60876816c4b7cf763c029a9,http://tamaraberg.com/papers/wacv2016_combining.pdf
+2a88541448be2eb1b953ac2c0c54da240b47dd8a,http://pdfs.semanticscholar.org/2c44/0d01738a2fed3e3bd6520471acacb6c96e3b.pdf
+2a02355c1155f2d2e0cf7a8e197e0d0075437b19,http://pdfs.semanticscholar.org/cf2c/58a5efea263a878815e25148b1c6954a0cbe.pdf
+2a171f8d14b6b8735001a11c217af9587d095848,http://openaccess.thecvf.com/content_iccv_2015/papers/Zhang_Learning_Social_Relation_ICCV_2015_paper.pdf
+2aea27352406a2066ddae5fad6f3f13afdc90be9,http://arxiv.org/pdf/1507.05699v4.pdf
+2a0623ae989f2236f5e1fe3db25ab708f5d02955,http://pdfs.semanticscholar.org/2a06/23ae989f2236f5e1fe3db25ab708f5d02955.pdf
+2ad0ee93d029e790ebb50574f403a09854b65b7e,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf
+2afdda6fb85732d830cea242c1ff84497cd5f3cb,http://www.iis.sinica.edu.tw/papers/song/11489-F.pdf
+2ff9618ea521df3c916abc88e7c85220d9f0ff06,http://pdfs.semanticscholar.org/bb08/f64565ee68e868dcab904cada9646dd5f676.pdf
+2fda461869f84a9298a0e93ef280f79b9fb76f94,https://www.cl.cam.ac.uk/research/rainbow/projects/openface/wacv2016.pdf
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,http://pdfs.semanticscholar.org/6e3b/778ad384101f792284b42844518f620143aa.pdf
+2fdce3228d384456ea9faff108b9c6d0cf39e7c7,http://pdfs.semanticscholar.org/2fdc/e3228d384456ea9faff108b9c6d0cf39e7c7.pdf
+2f7e9b45255c9029d2ae97bbb004d6072e70fa79,http://pdfs.semanticscholar.org/2f7e/9b45255c9029d2ae97bbb004d6072e70fa79.pdf
+2f53b97f0de2194d588bc7fb920b89cd7bcf7663,http://pdfs.semanticscholar.org/2f53/b97f0de2194d588bc7fb920b89cd7bcf7663.pdf
+2f16baddac6af536451b3216b02d3480fc361ef4,http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf
+2f489bd9bfb61a7d7165a2f05c03377a00072477,http://pdfs.semanticscholar.org/2f48/9bd9bfb61a7d7165a2f05c03377a00072477.pdf
+2f2aa67c5d6dbfaf218c104184a8c807e8b29286,http://sesame.comp.nus.edu.sg/components/com_flexicontent/uploads/lekhaicon13.pdf
+2f16459e2e24dc91b3b4cac7c6294387d4a0eacf,http://pdfs.semanticscholar.org/2f16/459e2e24dc91b3b4cac7c6294387d4a0eacf.pdf
+2f0b8579829b3d4efdbc03c96821e33d7cc65e1d,http://thoth.inrialpes.fr/people/mpederso/papers/cvpr14-facial.pdf
+2f59f28a1ca3130d413e8e8b59fb30d50ac020e2,http://pralab.diee.unica.it/sites/default/files/Satta_ICPR2014.pdf
+2f78e471d2ec66057b7b718fab8bfd8e5183d8f4,http://pdfs.semanticscholar.org/2f78/e471d2ec66057b7b718fab8bfd8e5183d8f4.pdf
+2fda164863a06a92d3a910b96eef927269aeb730,http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf
+2fa057a20a2b4a4f344988fee0a49fce85b0dc33,http://next.comp.nus.edu.sg/sites/default/files/publication-attachments/eHeritage.pdf
+2f8ef26bfecaaa102a55b752860dbb92f1a11dc6,http://pdfs.semanticscholar.org/2f8e/f26bfecaaa102a55b752860dbb92f1a11dc6.pdf
+2f5ae4d6cd240ec7bc3f8ada47030e8439125df2,http://users.eecs.northwestern.edu/~xsh835/CVPR14_ExemplarFaceDetection.pdf
+2f184c6e2c31d23ef083c881de36b9b9b6997ce9,http://pdfs.semanticscholar.org/2f18/4c6e2c31d23ef083c881de36b9b9b6997ce9.pdf
+2f348a2ad3ba390ee178d400be0f09a0479ae17b,http://www.csee.wvu.edu/~richas/ML-Papers/Gabor-Based%20Kernel%20PCA.pdf
+2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d,http://arxiv.org/pdf/1604.02917v1.pdf
+2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a,http://pdfs.semanticscholar.org/464c/21d54339c3f6e624ce026fef53b19c1edd86.pdf
+2f8183b549ec51b67f7dad717f0db6bf342c9d02,http://www.wisdom.weizmann.ac.il/~ronen/papers/Kemelmacher%20Basri%20-%203D%20Face%20Reconstruction%20from%20a%20Single%20Image%20Using%20a%20Single%20Reference%20Face%20Shape.pdf
+2f13dd8c82f8efb25057de1517746373e05b04c4,http://www.cfar.umd.edu/~rama/Publications/Ni_ICIP.pdf
+2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475,http://pdfs.semanticscholar.org/2fa1/fc116731b2b5bb97f06d2ac494cb2b2fe475.pdf
+2f2406551c693d616a840719ae1e6ea448e2f5d3,http://biometrics.cse.msu.edu/Presentations/CharlesOtto_ICB13_AgeEstimationFaceImages_HumanVsMachinePerformance.pdf
+2f882ceaaf110046e63123b495212d7d4e99f33d,http://pdfs.semanticscholar.org/2f88/2ceaaf110046e63123b495212d7d4e99f33d.pdf
+2f95340b01cfa48b867f336185e89acfedfa4d92,https://www2.informatik.uni-hamburg.de/wtm/ps/Hamester_IJCNN2015.pdf
+2f7fc778e3dec2300b4081ba2a1e52f669094fcd,http://pdfs.semanticscholar.org/2f7f/c778e3dec2300b4081ba2a1e52f669094fcd.pdf
+2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1,http://pdfs.semanticscholar.org/f39c/e446b7c76d24cc63df7837cb3be0ee235df2.pdf
+2faa09413162b0a7629db93fbb27eda5aeac54ca,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=905048
+2f5e057e35a97278a9d824545d7196c301072ebf,http://vision.ics.uci.edu/papers/ZhuAR_CVPR_2014/ZhuAR_CVPR_2014.pdf
+2f04ba0f74df046b0080ca78e56898bd4847898b,https://arxiv.org/pdf/1407.4023v2.pdf
+433bb1eaa3751519c2e5f17f47f8532322abbe6d,http://pdfs.semanticscholar.org/433b/b1eaa3751519c2e5f17f47f8532322abbe6d.pdf
+4300fa1221beb9dc81a496cd2f645c990a7ede53,http://pdfs.semanticscholar.org/da71/87e56b6da1b9c993d9a096d2f2b9d80fb14c.pdf
+43010792bf5cdb536a95fba16b8841c534ded316,https://www.comp.nus.edu.sg/~tsim/documents/general-face-motion.pdf
+43bb20ccfda7b111850743a80a5929792cb031f0,http://pdfs.semanticscholar.org/43bb/20ccfda7b111850743a80a5929792cb031f0.pdf
+439ac8edfa1e7cbc65474cab544a5b8c4c65d5db,http://pdfs.semanticscholar.org/439a/c8edfa1e7cbc65474cab544a5b8c4c65d5db.pdf
+43f6953804964037ff91a4f45d5b5d2f8edfe4d5,http://ias.cs.tum.edu/_media/spezial/bib/riaz09fit.pdf
+439ec47725ae4a3660e509d32828599a495559bf,http://pdfs.semanticscholar.org/439e/c47725ae4a3660e509d32828599a495559bf.pdf
+43e99b76ca8e31765d4571d609679a689afdc99e,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yu_Learning_Dense_Facial_ICCV_2017_paper.pdf
+4377b03bbee1f2cf99950019a8d4111f8de9c34a,http://www.umiacs.umd.edu/~morariu/publications/LiSelectiveEncoderICCV15.pdf
+43a03cbe8b704f31046a5aba05153eb3d6de4142,http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf
+434bf475addfb580707208618f99c8be0c55cf95,http://pdfs.semanticscholar.org/8cea/404e8a5c4c11064923e5a6c023a0ae594a5a.pdf
+43836d69f00275ba2f3d135f0ca9cf88d1209a87,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0030-7?site=ipsjcva.springeropen.com
+4307e8f33f9e6c07c8fc2aeafc30b22836649d8c,http://pdfs.semanticscholar.org/ebff/0956c07185f7bb4e4ee5c7cc0aaa74aca05e.pdf
+435642641312364e45f4989fac0901b205c49d53,http://pdfs.semanticscholar.org/4356/42641312364e45f4989fac0901b205c49d53.pdf
+43aa40eaa59244c233f83d81f86e12eba8d74b59,http://pdfs.semanticscholar.org/43aa/40eaa59244c233f83d81f86e12eba8d74b59.pdf
+4362368dae29cc66a47114d5ffeaf0534bf0159c,http://pdfs.semanticscholar.org/4362/368dae29cc66a47114d5ffeaf0534bf0159c.pdf
+4350bb360797a4ade4faf616ed2ac8e27315968e,http://www.merl.com/publications/docs/TR2006-058.pdf
+43476cbf2a109f8381b398e7a1ddd794b29a9a16,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cao_A_Practical_Transfer_2013_ICCV_paper.pdf
+4353d0dcaf450743e9eddd2aeedee4d01a1be78b,http://pdfs.semanticscholar.org/4353/d0dcaf450743e9eddd2aeedee4d01a1be78b.pdf
+433d2d5528d1401a402f2c1db40b933c494f11ba,https://www.researchgate.net/profile/Xudong_Jiang3/publication/4248964_Face_Recognition_Based_on_Discriminant_Evaluation_in_the_Whole_Space/links/0046351ef2d1c48d55000000.pdf
+437a720c6f6fc1959ba95e48e487eb3767b4e508,http://pdfs.semanticscholar.org/d4f0/960c6587379ad7df7928c256776e25952c60.pdf
+436d80cc1b52365ed7b2477c0b385b6fbbb51d3b,http://pdfs.semanticscholar.org/436d/80cc1b52365ed7b2477c0b385b6fbbb51d3b.pdf
+434d6726229c0f556841fad20391c18316806f73,https://arxiv.org/pdf/1704.03114v2.pdf
+43b8b5eeb4869372ef896ca2d1e6010552cdc4d4,http://pdfs.semanticscholar.org/43b8/b5eeb4869372ef896ca2d1e6010552cdc4d4.pdf
+43ae4867d058453e9abce760ff0f9427789bab3a,https://infoscience.epfl.ch/record/207780/files/tnnls_graph_embedding.pdf
+435dc062d565ce87c6c20a5f49430eb9a4b573c4,http://pdfs.semanticscholar.org/435d/c062d565ce87c6c20a5f49430eb9a4b573c4.pdf
+430c4d7ad76e51d83bbd7ec9d3f856043f054915,http://pdfs.semanticscholar.org/5176/899c80b3d4b3b8be34d35549f95bf2d55e7d.pdf
+438b88fe40a6f9b5dcf08e64e27b2719940995e0,http://www.csd.uwo.ca/~olga/Courses/Fall2006/StudentPapers/ferenczMillerMalikICCV05.pdf
+433a6d6d2a3ed8a6502982dccc992f91d665b9b3,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf
+438e7999c937b94f0f6384dbeaa3febff6d283b6,https://arxiv.org/pdf/1705.02402v2.pdf
+43776d1bfa531e66d5e9826ff5529345b792def7,http://cvrr.ucsd.edu/scmartin/presentation/DriveAnalysisByLookingIn-ITSC2015-NDS.pdf
+43fb9efa79178cb6f481387b7c6e9b0ca3761da8,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Katti_Mixture_of_Parts_2015_CVPR_paper.pdf
+43ed518e466ff13118385f4e5d039ae4d1c000fb,https://arxiv.org/pdf/1505.01350v1.pdf
+439647914236431c858535a2354988dde042ef4d,http://eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Face%20Illumination%20Normalization%20on%20Large%20and%20Small%20Scale%20Features.pdf
+439ca6ded75dffa5ddea203dde5e621dc4a88c3e,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrack_icpr2016.pdf
+8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4,http://www.apsipa.org/proceedings_2013/papers/280_automatic-facial-hsu-2931731.pdf
+88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320,http://pdfs.semanticscholar.org/88c6/d4b73bd36e7b5a72f3c61536c8c93f8d2320.pdf
+88ad82e6f2264f75f7783232ba9185a2f931a5d1,http://pdfs.semanticscholar.org/88ad/82e6f2264f75f7783232ba9185a2f931a5d1.pdf
+8886b21f97c114a23b24dc7025bbf42885adc3a7,http://researchprofiles.herts.ac.uk/portal/files/10195320/UH_eval_deid_face_final.pdf
+889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_face.pdf
+88f7a3d6f0521803ca59fde45601e94c3a34a403,http://pdfs.semanticscholar.org/88f7/a3d6f0521803ca59fde45601e94c3a34a403.pdf
+8812aef6bdac056b00525f0642702ecf8d57790b,http://pdfs.semanticscholar.org/8812/aef6bdac056b00525f0642702ecf8d57790b.pdf
+881066ec43bcf7476479a4146568414e419da804,http://pdfs.semanticscholar.org/8810/66ec43bcf7476479a4146568414e419da804.pdf
+8813368c6c14552539137aba2b6f8c55f561b75f,https://arxiv.org/pdf/1607.05427v1.pdf
+88e2574af83db7281c2064e5194c7d5dfa649846,http://pdfs.semanticscholar.org/88e2/574af83db7281c2064e5194c7d5dfa649846.pdf
+88bef50410cea3c749c61ed68808fcff84840c37,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiropoulos2011sparse.pdf
+883006c0f76cf348a5f8339bfcb649a3e46e2690,http://mplab.ucsd.edu/~marni/pubs/Sikka_FG2013.pdf
+88850b73449973a34fefe491f8836293fc208580,http://pdfs.semanticscholar.org/8885/0b73449973a34fefe491f8836293fc208580.pdf
+8820d1d3fa73cde623662d92ecf2e3faf1e3f328,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w2/papers/Victor_Continuous_Video_to_CVPR_2017_paper.pdf
+88f2952535df5859c8f60026f08b71976f8e19ec,http://pdfs.semanticscholar.org/88f2/952535df5859c8f60026f08b71976f8e19ec.pdf
+8862a573a42bbaedd392e9e634c1ccbfd177a01d,https://arxiv.org/pdf/1605.06764v1.pdf
+887b7676a4efde616d13f38fcbfe322a791d1413,http://pdfs.semanticscholar.org/b4a0/cff84c35f75bcdb7aec3a0b1395edd15189b.pdf
+8878871ec2763f912102eeaff4b5a2febfc22fbe,http://www.ee.columbia.edu/~wliu/TIP15_action.pdf
+8855d6161d7e5b35f6c59e15b94db9fa5bbf2912,http://pdfs.semanticscholar.org/8855/d6161d7e5b35f6c59e15b94db9fa5bbf2912.pdf
+88bee9733e96958444dc9e6bef191baba4fa6efa,http://homepages.dcc.ufmg.br/~william/papers/paper_2014_SIBGRAPI.pdf
+88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002697.pdf
+887745c282edf9af40d38425d5fdc9b3fe139c08,https://arxiv.org/pdf/1407.2987v1.pdf
+9f8ebf149aed8a0eda5c3375c9947c6b26eb7873,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp21-wang.pdf
+9f6d04ce617d24c8001a9a31f11a594bd6fe3510,http://pdfs.semanticscholar.org/9f6d/04ce617d24c8001a9a31f11a594bd6fe3510.pdf
+9f499948121abb47b31ca904030243e924585d5f,http://pdfs.semanticscholar.org/9f49/9948121abb47b31ca904030243e924585d5f.pdf
+9fc04a13eef99851136eadff52e98eb9caac919d,http://pdfs.semanticscholar.org/9fc0/4a13eef99851136eadff52e98eb9caac919d.pdf
+9f4078773c8ea3f37951bf617dbce1d4b3795839,http://pdfs.semanticscholar.org/9f40/78773c8ea3f37951bf617dbce1d4b3795839.pdf
+9f65319b8a33c8ec11da2f034731d928bf92e29d,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf
+9fa1be81d31fba07a1bde0275b9d35c528f4d0b8,http://pdfs.semanticscholar.org/9fa1/be81d31fba07a1bde0275b9d35c528f4d0b8.pdf
+9f094341bea610a10346f072bf865cb550a1f1c1,http://zhiweizhu.com/papers/FIVR_MobileDevice_2009.pdf
+6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9,http://pdfs.semanticscholar.org/6b9a/a288ce7740ec5ce9826c66d059ddcfd8dba9.pdf
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf
+6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Afshar_Facial_Expression_Recognition_CVPR_2016_paper.pdf
+6bca0d1f46b0f7546ad4846e89b6b842d538ee4e,http://pdfs.semanticscholar.org/6bca/0d1f46b0f7546ad4846e89b6b842d538ee4e.pdf
+6b089627a4ea24bff193611e68390d1a4c3b3644,http://publications.idiap.ch/downloads/reports/2012/Wallace_Idiap-RR-03-2012.pdf
+6be0ab66c31023762e26d309a4a9d0096f72a7f0,http://pdfs.semanticscholar.org/6be0/ab66c31023762e26d309a4a9d0096f72a7f0.pdf
+6bcee7dba5ed67b3f9926d2ae49f9a54dee64643,http://pdfs.semanticscholar.org/6bce/e7dba5ed67b3f9926d2ae49f9a54dee64643.pdf
+6b18628cc8829c3bf851ea3ee3bcff8543391819,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20151221082702_2.pdf
+6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6,http://pdfs.semanticscholar.org/6b7f/7817b2e5a7e7d409af2254a903fc0d6e02b6.pdf
+6bb95a0f3668cd36407c85899b71c9fe44bf9573,http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf
+6b1b43d58faed7b457b1d4e8c16f5f7e7d819239,http://pdfs.semanticscholar.org/6b1b/43d58faed7b457b1d4e8c16f5f7e7d819239.pdf
+6bb0425baac448297fbd29a00e9c9b9926ce8870,http://pdfs.semanticscholar.org/6bb0/425baac448297fbd29a00e9c9b9926ce8870.pdf
+6b35b15ceba2f26cf949f23347ec95bbbf7bed64,http://pdfs.semanticscholar.org/6b35/b15ceba2f26cf949f23347ec95bbbf7bed64.pdf
+6b6493551017819a3d1f12bbf922a8a8c8cc2a03,http://pdfs.semanticscholar.org/6b64/93551017819a3d1f12bbf922a8a8c8cc2a03.pdf
+6b17b219bd1a718b5cd63427032d93c603fcf24f,http://pdfs.semanticscholar.org/6b17/b219bd1a718b5cd63427032d93c603fcf24f.pdf
+6bb630dfa797168e6627d972560c3d438f71ea99,http://arxiv.org/pdf/1609.03056v1.pdf
+0729628db4bb99f1f70dd6cb2353d7b76a9fce47,http://pdfs.semanticscholar.org/f02a/dc21a307d32c1145f4ade65504b016b0faac.pdf
+0728f788107122d76dfafa4fb0c45c20dcf523ca,http://arxiv.org/pdf/1505.04427v1.pdf
+07c90e85ac0f74b977babe245dea0f0abcf177e3,http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf
+07ea3dd22d1ecc013b6649c9846d67f2bf697008,http://pdfs.semanticscholar.org/07ea/3dd22d1ecc013b6649c9846d67f2bf697008.pdf
+071099a4c3eed464388c8d1bff7b0538c7322422,http://arxiv.org/pdf/1601.02487v1.pdf
+07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf
+076d3fc800d882445c11b9af466c3af7d2afc64f,http://slsp.kaist.ac.kr/paperdata/Face_attribute_classification.pdf
+07ac2e342db42589322b28ef291c2702f4a793a8,http://www.cs.illinois.edu/homes/dhoiem/publications/cvpr2009_santosh_context.pdf
+071af21377cc76d5c05100a745fb13cb2e40500f,http://pdfs.semanticscholar.org/071a/f21377cc76d5c05100a745fb13cb2e40500f.pdf
+070ab604c3ced2c23cce2259043446c5ee342fd6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/24-p75.pdf
+0786a6d5ce6db8a68cef05bb5f5b84ec1b0c2cde,http://vipl.ict.ac.cn/sites/default/files/papers/files/2008_ACMMM_cxliu_Naming%20Faces%20in%20Broadcast%20News%20Video%20by%20Image%20Google.pdf
+071135dfb342bff884ddb9a4d8af0e70055c22a1,http://pdfs.semanticscholar.org/0711/35dfb342bff884ddb9a4d8af0e70055c22a1.pdf
+0754e769eb613fd3968b6e267a301728f52358be,http://www.umiacs.umd.edu/~cteo/public-shared/ICRA2012_ActionObjects_preprint.pdf
+0773c320713dae62848fceac5a0ac346ba224eca,http://eudl.eu/pdf/10.4108/icst.intetain.2015.259444
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf
+079edd5cf7968ac4759dfe72af2042cf6e990efc,http://pdfs.semanticscholar.org/079e/dd5cf7968ac4759dfe72af2042cf6e990efc.pdf
+072db5ba5b375d439ba6dbb6427c63cd7da6e940,http://users.ece.cmu.edu/~juefeix/tip_2014_felix.pdf
+0744af11a025e9c072ef6ad102af208e79cc6f44,https://www.researchgate.net/profile/Pascal_Frossard/publication/233799235_Learning_Smooth_Pattern_Transformation_Manifolds/links/00463533951057e9bb000000.pdf
+07a472ea4b5a28b93678a2dcf89028b086e481a2,http://pdfs.semanticscholar.org/07a4/72ea4b5a28b93678a2dcf89028b086e481a2.pdf
+0717b47ab84b848de37dbefd81cf8bf512b544ac,http://pdfs.semanticscholar.org/0717/b47ab84b848de37dbefd81cf8bf512b544ac.pdf
+0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b,http://www.cs.tut.fi/~iosifidi/files/conference/2016_EUSIPCO_GRMCSVM.pdf?dl=0
+074af31bd9caa61fea3c4216731420bd7c08b96a,http://www.umiacs.umd.edu/~jhchoi/paper/cvprw2012_sfv.pdf
+078d507703fc0ac4bf8ca758be101e75ea286c80,http://pdfs.semanticscholar.org/078d/507703fc0ac4bf8ca758be101e75ea286c80.pdf
+0716e1ad868f5f446b1c367721418ffadfcf0519,http://pdfs.semanticscholar.org/6e05/5db22fbddb524ccb0006145db7944d1ed31c.pdf
+073eaa49ccde15b62425cda1d9feab0fea03a842,http://pdfs.semanticscholar.org/073e/aa49ccde15b62425cda1d9feab0fea03a842.pdf
+07d95be4922670ef2f8b11997e0c00eb643f3fca,http://eprints.eemcs.utwente.nl/26833/01/Pantic_The_First_Facial_Landmark_Tracking_in-the-Wild_Challenge.pdf
+07f31bef7a7035792e3791473b3c58d03928abbf,http://videolectures.net/site/normal_dl/tag=977248/fgconference2015_phillips_biometric_samples_01.pdf
+0726a45eb129eed88915aa5a86df2af16a09bcc1,http://www.ri.cmu.edu/pub_files/2016/7/root-compressed.pdf
+07de8371ad4901356145722aa29abaeafd0986b9,http://pdfs.semanticscholar.org/07de/8371ad4901356145722aa29abaeafd0986b9.pdf
+07e639abf1621ceff27c9e3f548fadfa2052c912,http://pdfs.semanticscholar.org/07e6/39abf1621ceff27c9e3f548fadfa2052c912.pdf
+07da958db2e561cc7c24e334b543d49084dd1809,https://infoscience.epfl.ch/record/117525/files/Classification.pdf?version=1
+0742d051caebf8a5d452c03c5d55dfb02f84baab,http://research.cs.tamu.edu/keyser/Papers/CGI05Blur-JonesBW.pdf?origin=publication_detail
+07d986b1005593eda1aeb3b1d24078db864f8f6a,http://pdfs.semanticscholar.org/07d9/86b1005593eda1aeb3b1d24078db864f8f6a.pdf
+38d56ddcea01ce99902dd75ad162213cbe4eaab7,http://pdfs.semanticscholar.org/38d5/6ddcea01ce99902dd75ad162213cbe4eaab7.pdf
+389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26,http://pdfs.semanticscholar.org/3893/34e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26.pdf
+38f7f3c72e582e116f6f079ec9ae738894785b96,http://pdfs.semanticscholar.org/38f7/f3c72e582e116f6f079ec9ae738894785b96.pdf
+380dd0ddd5d69adc52defc095570d1c22952f5cc,http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf
+38679355d4cfea3a791005f211aa16e76b2eaa8d,http://hub.hku.hk/bitstream/10722/127357/1/Content.pdf
+3802c97f925cb03bac91d9db13d8b777dfd29dcc,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Martins_Non-Parametric_Bayesian_Constrained_2014_CVPR_paper.pdf
+38a2661b6b995a3c4d69e7d5160b7596f89ce0e6,http://www.cs.colostate.edu/~draper/papers/zhang_ijcb14.pdf
+38682c7b19831e5d4f58e9bce9716f9c2c29c4e7,http://pdfs.semanticscholar.org/3868/2c7b19831e5d4f58e9bce9716f9c2c29c4e7.pdf
+38787338ba659f0bfbeba11ec5b7748ffdbb1c3d,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2005/defevent/papers/cr1885.pdf
+385750bcf95036c808d63db0e0b14768463ff4c6,http://pdfs.semanticscholar.org/3857/50bcf95036c808d63db0e0b14768463ff4c6.pdf
+3852968082a16db8be19b4cb04fb44820ae823d4,https://infoscience.epfl.ch/record/230240/files/1701.01821.pdf
+38cc2f1c13420170c7adac30f9dfac69b297fb76,http://pdfs.semanticscholar.org/38cc/2f1c13420170c7adac30f9dfac69b297fb76.pdf
+38cbb500823057613494bacd0078aa0e57b30af8,https://ibug.doc.ic.ac.uk/media/uploads/documents/08014986.pdf
+384f972c81c52fe36849600728865ea50a0c4670,http://pdfs.semanticscholar.org/dad7/3d70b4fa77d67c5c02e3ecba21c52ab9a386.pdf
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,http://pdfs.semanticscholar.org/e9a4/1f856a474aa346491fe76151869e3f548172.pdf
+384945abd53f6a6af51faf254ba8ef0f0fb3f338,http://pdfs.semanticscholar.org/b42c/4b804d69a031aac797346acc337f486e4a09.pdf
+38215c283ce4bf2c8edd597ab21410f99dc9b094,https://pure.qub.ac.uk/portal/files/9746839/IEEE_Transactions_on_Affective_Computing_2012_McKeown.pdf
+38861d0d3a0292c1f54153b303b0d791cbba1d50,http://pdfs.semanticscholar.org/3886/1d0d3a0292c1f54153b303b0d791cbba1d50.pdf
+3830047081ef4bc787f16edf5b244cb2793f75e5,https://www.cs.drexel.edu/~kon/publication/GSchwartz_CPCV13_slides.pdf
+38d8ff137ff753f04689e6b76119a44588e143f3,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf
+3896c62af5b65d7ba9e52f87505841341bb3e8df,http://pdfs.semanticscholar.org/3896/c62af5b65d7ba9e52f87505841341bb3e8df.pdf
+38192a0f9261d9727b119e294a65f2e25f72d7e6,http://pdfs.semanticscholar.org/3819/2a0f9261d9727b119e294a65f2e25f72d7e6.pdf
+38bbca5f94d4494494860c5fe8ca8862dcf9676e,http://pdfs.semanticscholar.org/c322/b770d2c7d9e70d196577bf0ae6b05205ebd7.pdf
+38183fe28add21693729ddeaf3c8a90a2d5caea3,https://arxiv.org/pdf/1706.09876v1.pdf
+38a9ca2c49a77b540be52377784b9f734e0417e4,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_IJCB_Faces.pdf
+3802da31c6d33d71b839e260f4022ec4fbd88e2d,http://pdfs.semanticscholar.org/3802/da31c6d33d71b839e260f4022ec4fbd88e2d.pdf
+00f7f7b72a92939c36e2ef9be97397d8796ee07c,http://pdfs.semanticscholar.org/00f7/f7b72a92939c36e2ef9be97397d8796ee07c.pdf
+0021f46bda27ea105d722d19690f5564f2b8869e,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Deep_Region_and_CVPR_2016_paper.pdf
+0081e2188c8f34fcea3e23c49fb3e17883b33551,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf
+00dc942f23f2d52ab8c8b76b6016d9deed8c468d,http://pdfs.semanticscholar.org/00dc/942f23f2d52ab8c8b76b6016d9deed8c468d.pdf
+0077cd8f97cafd2b389783858a6e4ab7887b0b6b,http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf
+0055c7f32fa6d4b1ad586d5211a7afb030ca08cc,http://pdfs.semanticscholar.org/0055/c7f32fa6d4b1ad586d5211a7afb030ca08cc.pdf
+009cd18ff06ff91c8c9a08a91d2516b264eee48e,http://pdfs.semanticscholar.org/009c/d18ff06ff91c8c9a08a91d2516b264eee48e.pdf
+00214fe1319113e6649435cae386019235474789,http://pdfs.semanticscholar.org/0021/4fe1319113e6649435cae386019235474789.pdf
+004e3292885463f97a70e1f511dc476289451ed5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Law_Quadruplet-Wise_Image_Similarity_2013_ICCV_paper.pdf
+00b08d22abc85361e1c781d969a1b09b97bc7010,http://www.umariqbal.info/uploads/1/4/8/3/14837880/visapp_2014.pdf
+004d5491f673cd76150f43b0a0429214f5bfd823,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp130-wang.pdf
+007250c2dce81dd839a55f9108677b4f13f2640a,http://pdfs.semanticscholar.org/0db7/735e7adbe6e34dd058af31e278033040ab18.pdf
+00e3957212517a252258baef833833921dd308d4,http://www.yugangjiang.info/publication/17MM-PersonAttribute.pdf
+00616b487d4094805107bb766da1c234c3c75e73,http://vision.ucmerced.edu/papers/Newsam_ACMGIS_2008.pdf
+00f0ed04defec19b4843b5b16557d8d0ccc5bb42,http://pdfs.semanticscholar.org/00f0/ed04defec19b4843b5b16557d8d0ccc5bb42.pdf
+0037bff7be6d463785d4e5b2671da664cd7ef746,http://pdfs.semanticscholar.org/0037/bff7be6d463785d4e5b2671da664cd7ef746.pdf
+009a18d04a5e3ec23f8ffcfc940402fd8ec9488f,http://pdfs.semanticscholar.org/009a/18d04a5e3ec23f8ffcfc940402fd8ec9488f.pdf
+0066caed1238de95a431d836d8e6e551b3cde391,http://humansensing.cs.cmu.edu/sites/default/files/7de_la_torre_frade_fernando_2007_3.pdf
+00075519a794ea546b2ca3ca105e2f65e2f5f471,http://pdfs.semanticscholar.org/0007/5519a794ea546b2ca3ca105e2f65e2f5f471.pdf
+0019925779bff96448f0c75492717e4473f88377,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w3/papers/Reale_Deep_Heterogeneous_Face_CVPR_2017_paper.pdf
+00e9011f58a561500a2910a4013e6334627dee60,http://library.utia.cas.cz/separaty/2008/RO/somol-facial%20expression%20recognition%20using%20angle-related%20information%20from%20facial%20meshes.pdf
+00d9d88bb1bdca35663946a76d807fff3dc1c15f,http://arxiv.org/pdf/1604.04842v1.pdf
+00a967cb2d18e1394226ad37930524a31351f6cf,https://arxiv.org/pdf/1611.05377v1.pdf
+00f1e5e954f9eb7ffde3ca74009a8c3c27358b58,http://www.vision.caltech.edu/holub/public_html/Papers/PDF/holub_et_al_face_clustering.pdf
+00a3cfe3ce35a7ffb8214f6db15366f4e79761e3,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20150414135701.pdf
+0058cbe110933f73c21fa6cc9ae0cd23e974a9c7,http://pdfs.semanticscholar.org/0058/cbe110933f73c21fa6cc9ae0cd23e974a9c7.pdf
+004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4,http://pdfs.semanticscholar.org/004a/1bb1a2c93b4f379468cca6b6cfc6d8746cc4.pdf
+00d94b35ffd6cabfb70b9a1d220b6823ae9154ee,https://arxiv.org/pdf/1503.07989v1.pdf
+00ebc3fa871933265711558fa9486057937c416e,http://pdfs.semanticscholar.org/00eb/c3fa871933265711558fa9486057937c416e.pdf
+006f283a50d325840433f4cf6d15876d475bba77,http://lvdmaaten.github.io/publications/papers/TPAMI_2014.pdf
+00b29e319ff8b3a521b1320cb8ab5e39d7f42281,http://pdfs.semanticscholar.org/8007/b8afa13869d2a7c681db8bd7c2e7df1ef02d.pdf
+00d931eccab929be33caea207547989ae7c1ef39,http://pdfs.semanticscholar.org/00d9/31eccab929be33caea207547989ae7c1ef39.pdf
+0059b3dfc7056f26de1eabaafd1ad542e34c2c2e,http://pdfs.semanticscholar.org/0059/b3dfc7056f26de1eabaafd1ad542e34c2c2e.pdf
+0052de4885916cf6949a6904d02336e59d98544c,https://rd.springer.com/content/pdf/10.1007/s10994-005-3561-6.pdf
+00d0b01d6a5f12216e078001b7c49225d2495b21,http://graphics.cs.uh.edu/publication/pub/2009_TVCJ_faceilluminationtransfer.pdf
+6e60536c847ac25dba4c1c071e0355e5537fe061,http://www.cfar.umd.edu/~fer/postscript/CV_and_NLP.pdf
+6e198f6cc4199e1c4173944e3df6f39a302cf787,http://pdfs.semanticscholar.org/6e19/8f6cc4199e1c4173944e3df6f39a302cf787.pdf
+6eaf446dec00536858548fe7cc66025b70ce20eb,http://pdfs.semanticscholar.org/6eaf/446dec00536858548fe7cc66025b70ce20eb.pdf
+6e173ad91b288418c290aa8891193873933423b3,http://pdfs.semanticscholar.org/eb3b/021406fe5a5002535b392cac60832aa8f162.pdf
+6eba25166fe461dc388805cc2452d49f5d1cdadd,http://pdfs.semanticscholar.org/6eba/25166fe461dc388805cc2452d49f5d1cdadd.pdf
+6ed738ff03fd9042965abdfaa3ed8322de15c116,https://dr.ntu.edu.sg/bitstream/handle/10220/39690/kmeap_icdm2014.pdf?isAllowed=y&sequence=1
+6ecd4025b7b5f4894c990614a9a65e3a1ac347b2,http://pdfs.semanticscholar.org/6ecd/4025b7b5f4894c990614a9a65e3a1ac347b2.pdf
+6eddea1d991e81c1c3024a6cea422bc59b10a1dc,http://pdfs.semanticscholar.org/6edd/ea1d991e81c1c3024a6cea422bc59b10a1dc.pdf
+6eaeac9ae2a1697fa0aa8e394edc64f32762f578,http://pdfs.semanticscholar.org/6eae/ac9ae2a1697fa0aa8e394edc64f32762f578.pdf
+6ee2ea416382d659a0dddc7a88fc093accc2f8ee,https://pdfs.semanticscholar.org/6ee2/ea416382d659a0dddc7a88fc093accc2f8ee.pdf
+6e97a99b2879634ecae962ddb8af7c1a0a653a82,http://pdfs.semanticscholar.org/7d37/7ba82df9cba0959cb910288415e568007792.pdf
+6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Kim_Fusing_Aligned_and_CVPR_2016_paper.pdf
+6ec004e4c1171c4c4858eec7c927f567684b80bc,http://www.researchgate.net/profile/Bongnam_Kang/publication/221292310_The_POSTECH_face_database_(PF07)_and_performance_evaluation/links/00463531e60efa5310000000.pdf
+6e3a181bf388dd503c83dc324561701b19d37df1,http://pdfs.semanticscholar.org/9d91/213394fb411743b11bae74cf22f0ffca9191.pdf
+6ef1996563835b4dfb7fda1d14abe01c8bd24a05,http://hera.inf-cv.uni-jena.de:6680/pdf/Goering14:NPT
+6ee8a94ccba10062172e5b31ee097c846821a822,http://pdfs.semanticscholar.org/6ee8/a94ccba10062172e5b31ee097c846821a822.pdf
+6ee64c19efa89f955011531cde03822c2d1787b8,http://pdfs.semanticscholar.org/6ee6/4c19efa89f955011531cde03822c2d1787b8.pdf
+6e94c579097922f4bc659dd5d6c6238a428c4d22,http://pdfs.semanticscholar.org/6e94/c579097922f4bc659dd5d6c6238a428c4d22.pdf
+6e379f2d34e14efd85ae51875a4fa7d7ae63a662,http://pdfs.semanticscholar.org/6e37/9f2d34e14efd85ae51875a4fa7d7ae63a662.pdf
+6eb1e006b7758b636a569ca9e15aafd038d2c1b1,http://pdfs.semanticscholar.org/6eb1/e006b7758b636a569ca9e15aafd038d2c1b1.pdf
+6eece104e430829741677cadc1dfacd0e058d60f,http://pdfs.semanticscholar.org/7a42/6d0b98c8f52d61f9d89cd7be5ab6119f0a4a.pdf
+6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94,http://pdfs.semanticscholar.org/6e0a/05d87b3cc7e16b4b2870ca24cf5e806c0a94.pdf
+6e1802874ead801a7e1072aa870681aa2f555f35,http://www.cs.yale.edu/homes/hw5/WebContent/ICASSP07_Yan.pdf
+6ed22b934e382c6f72402747d51aa50994cfd97b,http://www.ifp.illinois.edu/~jyang29/papers/WACV16-Expression.pdf
+6e93fd7400585f5df57b5343699cb7cda20cfcc2,http://pdfs.semanticscholar.org/a52f/4d315adf0aa60ba284fd4caf22485625cedf.pdf
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,http://pdfs.semanticscholar.org/6eb1/b5935b0613a41b72fd9e7e53a3c0b32651e9.pdf
+6e12ba518816cbc2d987200c461dc907fd19f533,http://pdfs.semanticscholar.org/6e12/ba518816cbc2d987200c461dc907fd19f533.pdf
+6e782073a013ce3dbc5b9b56087fd0300c510f67,http://pdfs.semanticscholar.org/6e78/2073a013ce3dbc5b9b56087fd0300c510f67.pdf
+9ab463d117219ed51f602ff0ddbd3414217e3166,http://pdfs.semanticscholar.org/d965/43e8ab524108cae8c12d3a65a54a295deae6.pdf
+9ac82909d76b4c902e5dde5838130de6ce838c16,http://pdfs.semanticscholar.org/9ac8/2909d76b4c902e5dde5838130de6ce838c16.pdf
+9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a,http://arxiv.org/pdf/1602.01940v1.pdf
+9ac15845defcd0d6b611ecd609c740d41f0c341d,http://pdfs.semanticscholar.org/9ac1/5845defcd0d6b611ecd609c740d41f0c341d.pdf
+9af1cf562377b307580ca214ecd2c556e20df000,http://pdfs.semanticscholar.org/9af1/cf562377b307580ca214ecd2c556e20df000.pdf
+9a4c45e5c6e4f616771a7325629d167a38508691,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf
+9a7858eda9b40b16002c6003b6db19828f94a6c6,https://www1.icsi.berkeley.edu/~twke/pdfs/pubs/mooney_icip2017.pdf
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,http://pdfs.semanticscholar.org/9a35/35cabf5d0f662bff1d897fb5b777a412d82e.pdf
+9abd35b37a49ee1295e8197aac59bde802a934f3,http://pdfs.semanticscholar.org/9abd/35b37a49ee1295e8197aac59bde802a934f3.pdf
+9a276c72acdb83660557489114a494b86a39f6ff,http://pdfs.semanticscholar.org/9a27/6c72acdb83660557489114a494b86a39f6ff.pdf
+9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e,http://pdfs.semanticscholar.org/9a1a/9dd3c471bba17e5ce80a53e52fcaaad4373e.pdf
+9a6da02db99fcc0690d7ffdc15340b125726ab95,http://vision.ucla.edu/~vedaldi/assets/pubs/vedaldi07boosting.pdf
+9a42c519f0aaa68debbe9df00b090ca446d25bc4,http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf
+36b40c75a3e53c633c4afb5a9309d10e12c292c7,https://pdfs.semanticscholar.org/36b4/0c75a3e53c633c4afb5a9309d10e12c292c7.pdf
+363ca0a3f908859b1b55c2ff77cc900957653748,http://pdfs.semanticscholar.org/363c/a0a3f908859b1b55c2ff77cc900957653748.pdf
+365f67fe670bf55dc9ccdcd6888115264b2a2c56,http://pdfs.semanticscholar.org/f431/d3d7a0323bf1150420c826dade2093a7dfa1.pdf
+36fe39ed69a5c7ff9650fd5f4fe950b5880760b0,http://pdfs.semanticscholar.org/36fe/39ed69a5c7ff9650fd5f4fe950b5880760b0.pdf
+36a3a96ef54000a0cd63de867a5eb7e84396de09,http://www.cs.toronto.edu/~guerzhoy/oriviz/crv17.pdf
+36fc4120fc0638b97c23f97b53e2184107c52233,http://pdfs.semanticscholar.org/36fc/4120fc0638b97c23f97b53e2184107c52233.pdf
+36ce0b68a01b4c96af6ad8c26e55e5a30446f360,http://liris.cnrs.fr/Documents/Liris-6963.pdf
+360d66e210f7011423364327b7eccdf758b5fdd2,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569190652.pdf
+365866dc937529c3079a962408bffaa9b87c1f06,http://pdfs.semanticscholar.org/3658/66dc937529c3079a962408bffaa9b87c1f06.pdf
+361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,http://pdfs.semanticscholar.org/361c/9ba853c7d69058ddc0f32cdbe94fbc2166d5.pdf
+362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002792.pdf
+36ea75e14b69bed454fde6076ea6b85ed87fbb14,http://pdfs.semanticscholar.org/36ea/75e14b69bed454fde6076ea6b85ed87fbb14.pdf
+36df81e82ea5c1e5edac40b60b374979a43668a5,http://www.robots.ox.ac.uk/~vgg/publications/2012/Parkhi12b/parkhi12b.pdf
+366d20f8fd25b4fe4f7dc95068abc6c6cabe1194,http://arxiv.org/pdf/1605.05411v1.pdf
+3630324c2af04fd90f8668f9ee9709604fe980fd,http://www.yugangjiang.info/publication/TCSVT-Shu.pdf
+362ba8317aba71c78dafca023be60fb71320381d,http://pdfs.semanticscholar.org/362b/a8317aba71c78dafca023be60fb71320381d.pdf
+36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958,http://www.iab-rubric.org/papers/RGBD-Face.pdf
+36e8ef2e5d52a78dddf0002e03918b101dcdb326,http://www.milbo.org/stasm-files/multiview-active-shape-models-with-sift-for-300w.pdf
+36018404263b9bb44d1fddaddd9ee9af9d46e560,http://pdfs.semanticscholar.org/3601/8404263b9bb44d1fddaddd9ee9af9d46e560.pdf
+367f2668b215e32aff9d5122ce1f1207c20336c8,http://pdfs.semanticscholar.org/367f/2668b215e32aff9d5122ce1f1207c20336c8.pdf
+36c2db5ff76864d289781f93cbb3e6351f11984c,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569187194.pdf
+3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94,http://pdfs.semanticscholar.org/3624/ca25f09f3acbcf4d3a4c40b9e45a29c22b94.pdf
+3661a34f302883c759b9fa2ce03de0c7173d2bb2,http://pdfs.semanticscholar.org/fd6d/14fb0bbca58e924c504d7dc57cb7f8d3707e.pdf
+36c473fc0bf3cee5fdd49a13cf122de8be736977,http://pdfs.semanticscholar.org/bc6c/051b66ecadac7bb3e6ace66665e42875d790.pdf
+368d59cf1733af511ed8abbcbeb4fb47afd4da1c,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf
+366595171c9f4696ec5eef7c3686114fd3f116ad,http://pdfs.semanticscholar.org/3665/95171c9f4696ec5eef7c3686114fd3f116ad.pdf
+36b9f46c12240898bafa10b0026a3fb5239f72f3,https://arxiv.org/pdf/1702.05573v1.pdf
+3634b4dd263c0f330245c086ce646c9bb748cd6b,https://arxiv.org/pdf/1504.00983v2.pdf
+367a786cfe930455cd3f6bd2492c304d38f6f488,http://pdfs.semanticscholar.org/367a/786cfe930455cd3f6bd2492c304d38f6f488.pdf
+5c4ce36063dd3496a5926afd301e562899ff53ea,http://pdfs.semanticscholar.org/5c4c/e36063dd3496a5926afd301e562899ff53ea.pdf
+5c6de2d9f93b90034f07860ae485a2accf529285,http://pdfs.semanticscholar.org/5c6d/e2d9f93b90034f07860ae485a2accf529285.pdf
+5c624382057b55e46af4dc4c055a33c90e8bf08a,http://www.researchgate.net/profile/Ngoc_Son_Vu/publication/224114972_Illumination-robust_face_recognition_using_retina_modeling/links/0fcfd507f06292b0a5000000.pdf
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/116.pdf
+5c2a7518fb26a37139cebff76753d83e4da25159,http://pdfs.semanticscholar.org/5c2a/7518fb26a37139cebff76753d83e4da25159.pdf
+5cb83eba8d265afd4eac49eb6b91cdae47def26d,http://www.kresttechnology.com/krest-academic-projects/krest-major-projects/ECE/B-Tech%20Papers/21.pdf
+5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,http://pdfs.semanticscholar.org/5c86/72c0d2f28fd5d2d2c4b9818fcff43fb01a48.pdf
+5c3dce55c61ee86073575ac75cc882a215cb49e6,http://pdfs.semanticscholar.org/8d93/b33c38a26b97442b2f160e75212739c60bc5.pdf
+5c2e264d6ac253693469bd190f323622c457ca05,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2013/Improving%20large%20scale%20image%20retrieval%20using%20multi-level%20features13.pdf
+5c473cfda1d7c384724fbb139dfe8cb39f79f626,http://www.cs.zju.edu.cn/~gpan/publication/2012-PAA-face-expression-onlinefirst.pdf
+5c820e47981d21c9dddde8d2f8020146e600368f,http://pdfs.semanticscholar.org/5c82/0e47981d21c9dddde8d2f8020146e600368f.pdf
+5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0,http://www.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf
+5c124b57699be19cd4eb4e1da285b4a8c84fc80d,http://www.iis.ee.ic.ac.uk/icvl/doc/cvpr14_xiaowei.pdf
+5c435c4bc9c9667f968f891e207d241c3e45757a,http://pdfs.semanticscholar.org/eb6a/13c8a607dfc535e5f31b7c8843335674644c.pdf
+5c7adde982efb24c3786fa2d1f65f40a64e2afbf,http://pdfs.semanticscholar.org/bd40/dee4f2bbb0e512575cc96a0e3a7918a0ce42.pdf
+5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934,http://www.istc-cc.cmu.edu/publications/papers/2016/GeePS-cui-eurosys16.pdf
+5cfbeae360398de9e20e4165485837bd42b93217,http://pdfs.semanticscholar.org/5cfb/eae360398de9e20e4165485837bd42b93217.pdf
+5ca14fa73da37855bfa880b549483ee2aba26669,http://pdfs.semanticscholar.org/5ca1/4fa73da37855bfa880b549483ee2aba26669.pdf
+5c92355b2808621d237a89dc7b3faa5cdb990ab5,http://www.researchgate.net/profile/Brian_Lovell2/publication/236124723_Dynamic_Amelioration_of_Resolution_Mismatches_for_Local_Feature_Based_Identity_Inference/links/0fcfd50741a027e848000000.pdf
+5c02bd53c0a6eb361972e8a4df60cdb30c6e3930,http://arxiv.org/pdf/1303.4893v2.pdf
+5c8ae37d532c7bb8d7f00dfde84df4ba63f46297,http://pdfs.semanticscholar.org/5c8a/e37d532c7bb8d7f00dfde84df4ba63f46297.pdf
+5c717afc5a9a8ccb1767d87b79851de8d3016294,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001845.pdf
+5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Facial_Expression_Intensity_CVPR_2016_paper.pdf
+09b80d8eea809529b08a8b0ff3417950c048d474,http://openaccess.thecvf.com/content_cvpr_2013/papers/Choi_Adding_Unlabeled_Samples_2013_CVPR_paper.pdf
+09f58353e48780c707cf24a0074e4d353da18934,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestrowdenBishtKlontzJain_CrowdsourcingHumanPeformance_IJCB2014.pdf
+096eb8b4b977aaf274c271058feff14c99d46af3,http://www.dtic.mil/dtic/tr/fulltext/u2/a585819.pdf
+0952ac6ce94c98049d518d29c18d136b1f04b0c0,http://pdfs.semanticscholar.org/0952/ac6ce94c98049d518d29c18d136b1f04b0c0.pdf
+0969e0dc05fca21ff572ada75cb4b703c8212e80,http://pdfs.semanticscholar.org/0969/e0dc05fca21ff572ada75cb4b703c8212e80.pdf
+09dd01e19b247a33162d71f07491781bdf4bfd00,http://pdfs.semanticscholar.org/5991/0d557b54566ec97280480daca02685f21907.pdf
+09cf3f1764ab1029f3a7d57b70ae5d5954486d69,http://pdfs.semanticscholar.org/09cf/3f1764ab1029f3a7d57b70ae5d5954486d69.pdf
+09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081,http://acberg.com/papers/street2shop.pdf
+09628e9116e7890bc65ebeabaaa5f607c9847bae,https://arxiv.org/pdf/1704.03039.pdf
+09733129161ca7d65cf56a7ad63c17f493386027,http://pdfs.semanticscholar.org/0973/3129161ca7d65cf56a7ad63c17f493386027.pdf
+09c586624ec65d7ef2d4d8d321e98f61698dcfe2,http://www.seas.upenn.edu/~timothee/papers/cvpr_2010_supplement.pdf
+09718bf335b926907ded5cb4c94784fd20e5ccd8,http://parnec.nuaa.edu.cn/papers/journal/2005/xtan-TNN05.pdf
+098a1ccc13b8d6409aa333c8a1079b2c9824705b,http://people.cs.pitt.edu/~kovashka/ut/pivots-kovashka-iccv2013.pdf
+0903bb001c263e3c9a40f430116d1e629eaa616f,http://pdfs.semanticscholar.org/0903/bb001c263e3c9a40f430116d1e629eaa616f.pdf
+090ff8f992dc71a1125636c1adffc0634155b450,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf
+09b43b59879d59493df2a93c216746f2cf50f4ac,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_036_ext.pdf
+098fa9b4c3f7fb41c7a178d36f5dbb50a3ffa377,http://oui.csail.mit.edu/camera_readys/13.pdf
+09b0ef3248ff8f1a05b8704a1b4cf64951575be9,https://arxiv.org/pdf/1511.06783v1.pdf
+097104fc731a15fad07479f4f2c4be2e071054a2,http://pdfs.semanticscholar.org/dbad/94c3506a342f55f54388e162e8481ae8b184.pdf
+094357c1a2ba3fda22aa6dd9e496530d784e1721,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_A_Unified_Probabilistic_2013_ICCV_paper.pdf
+09f853ce12f7361c4b50c494df7ce3b9fad1d221,http://files.is.tue.mpg.de/jgall/download/jgall_RFdepthFace_ijcv12.pdf
+09111da0aedb231c8484601444296c50ca0b5388,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553737.pdf
+09750c9bbb074bbc4eb66586b20822d1812cdb20,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001385.pdf
+09ce14b84af2dc2f76ae1cf227356fa0ba337d07,http://grail.cs.washington.edu/3dfaces/paper.pdf
+090e4713bcccff52dcd0c01169591affd2af7e76,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Shao_What_Do_You_2013_ICCV_paper.pdf
+097f674aa9e91135151c480734dda54af5bc4240,http://pdfs.semanticscholar.org/097f/674aa9e91135151c480734dda54af5bc4240.pdf
+5d485501f9c2030ab33f97972aa7585d3a0d59a7,http://pdfs.semanticscholar.org/5d48/5501f9c2030ab33f97972aa7585d3a0d59a7.pdf
+5da740682f080a70a30dc46b0fc66616884463ec,http://pdfs.semanticscholar.org/5da7/40682f080a70a30dc46b0fc66616884463ec.pdf
+5de5848dc3fc35e40420ffec70a407e4770e3a8d,http://pdfs.semanticscholar.org/5de5/848dc3fc35e40420ffec70a407e4770e3a8d.pdf
+5da139fc43216c86d779938d1c219b950dd82a4c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0200205.pdf
+5d33a10752af9ea30993139ac6e3a323992a5831,http://web.engr.illinois.edu/~iendres2/publications/cvpr2010_att.pdf
+5dc056fe911a3e34a932513abe637076250d96da,http://www.vision.ee.ethz.ch/~gfanelli/pubs/cvpr12.pdf
+5d185d82832acd430981ffed3de055db34e3c653,http://pdfs.semanticscholar.org/fc70/92e72a2bae6f60266147e0fb587b1771699a.pdf
+5d233e6f23b1c306cf62af49ce66faac2078f967,http://pdfs.semanticscholar.org/5d23/3e6f23b1c306cf62af49ce66faac2078f967.pdf
+5dd496e58cfedfc11b4b43c4ffe44ac72493bf55,http://pdfs.semanticscholar.org/5dd4/96e58cfedfc11b4b43c4ffe44ac72493bf55.pdf
+5db075a308350c083c3fa6722af4c9765c4b8fef,http://pdfs.semanticscholar.org/5db0/75a308350c083c3fa6722af4c9765c4b8fef.pdf
+5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf,https://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-RobustRegistration-TIP2016.pdf
+5dcf78de4d3d867d0fd4a3105f0defae2234b9cb,http://pdfs.semanticscholar.org/5dcf/78de4d3d867d0fd4a3105f0defae2234b9cb.pdf
+5dfebcb7bfefb1af1cfef61a151abfe98a7e7cfa,http://vision.ucsd.edu/sites/default/files/cwah_cvpr2013_unfamiliar.pdf
+5d88702cdc879396b8b2cc674e233895de99666b,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_Exploiting_Feature_Hierarchies_ICCV_2015_paper.pdf
+5d44c675addcb6e74cbc5a9c48df0d754bdbcd98,http://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf
+5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e,http://pdfs.semanticscholar.org/5d5c/d6fa5c41eb9d3d2bab3359b3e5eb60ae194e.pdf
+5d09d5257139b563bd3149cfd5e6f9eae3c34776,http://pdfs.semanticscholar.org/5d09/d5257139b563bd3149cfd5e6f9eae3c34776.pdf
+5d479f77ecccfac9f47d91544fd67df642dfab3c,http://pdfs.semanticscholar.org/7880/c21bb0de02cd4db095e011ac7aff47b35ee8.pdf
+5d01283474b73a46d80745ad0cc0c4da14aae194,http://pdfs.semanticscholar.org/5d01/283474b73a46d80745ad0cc0c4da14aae194.pdf
+5d197c8cd34473eb6cde6b65ced1be82a3a1ed14,http://cdn.intechopen.com/pdfs/20590/InTech-A_face_image_database_for_evaluating_out_of_focus_blur.pdf
+5df376748fe5ccd87a724ef31d4fdb579dab693f,http://pdfs.semanticscholar.org/5df3/76748fe5ccd87a724ef31d4fdb579dab693f.pdf
+31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a,http://pdfs.semanticscholar.org/31aa/20911cc7a2b556e7d273f0bdd5a2f0671e0a.pdf
+31b05f65405534a696a847dd19c621b7b8588263,https://arxiv.org/pdf/1611.01484v1.pdf
+31625522950e82ad4dffef7ed0df00fdd2401436,http://pdfs.semanticscholar.org/3162/5522950e82ad4dffef7ed0df00fdd2401436.pdf
+3167f415a861f19747ab5e749e78000179d685bc,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICCV_2009/contents/pdf/iccv2009_131.pdf
+3107316f243233d45e3c7e5972517d1ed4991f91,https://arxiv.org/pdf/1703.10155v1.pdf
+31c0968fb5f587918f1c49bf7fa51453b3e89cf7,http://pdfs.semanticscholar.org/31c0/968fb5f587918f1c49bf7fa51453b3e89cf7.pdf
+31e57fa83ac60c03d884774d2b515813493977b9,http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf
+31a2fb63a3fc67da9932474cda078c9ac43f85c5,http://www.researchgate.net/profile/Sadeep_Jayasumana2/publication/269040853_Kernel_Methods_on_Riemannian_Manifolds_with_Gaussian_RBF_Kernels/links/54858a6a0cf283750c37264b.pdf
+3137a3fedf23717c411483c7b4bd2ed646258401,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_iccv_13.pdf
+31c34a5b42a640b824fa4e3d6187e3675226143e,http://pdfs.semanticscholar.org/31c3/4a5b42a640b824fa4e3d6187e3675226143e.pdf
+316e67550fbf0ba54f103b5924e6537712f06bee,http://lear.inrialpes.fr/pubs/2010/GVS10/slides.pdf
+31ef5419e026ef57ff20de537d82fe3cfa9ee741,http://pdfs.semanticscholar.org/9a10/78b6e3810c95fc4b87154ad62c0f133caebb.pdf
+310da8bd81c963bd510bf9aaa4d028a643555c84,http://www.cs.sunysb.edu/~ial/content/papers/2005/Zhang2005cvpr2.pdf
+31b58ced31f22eab10bd3ee2d9174e7c14c27c01,http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf
+31835472821c7e3090abb42e57c38f7043dc3636,http://pdfs.semanticscholar.org/3183/5472821c7e3090abb42e57c38f7043dc3636.pdf
+31a38fd2d9d4f34d2b54318021209fe5565b8f7f,http://www.umiacs.umd.edu/~huytho/papers/HoChellappa_TIP2013.pdf
+31aa7c992692b74f17ddec665cd862faaeafd673,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221657297_Unsupervised_face_annotation_by_mining_the_web/links/0912f510a04034844d000000.pdf
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,http://pdfs.semanticscholar.org/3152/e89963b8a4028c4abf6e1dc19e91c4c5a8f4.pdf
+318a81acdd15a0ab2f706b5f53ee9d4d5d86237f,http://pdfs.semanticscholar.org/318a/81acdd15a0ab2f706b5f53ee9d4d5d86237f.pdf
+31ace8c9d0e4550a233b904a0e2aabefcc90b0e3,http://pdfs.semanticscholar.org/31ac/e8c9d0e4550a233b904a0e2aabefcc90b0e3.pdf
+316d51aaa37891d730ffded7b9d42946abea837f,http://pdfs.semanticscholar.org/9f00/3a5e727b99f792e600b93b6458b9cda3f0a5.pdf
+31afdb6fa95ded37e5871587df38976fdb8c0d67,http://www3.ntu.edu.sg/home/EXDJiang/ICASSP15.pdf
+31d60b2af2c0e172c1a6a124718e99075818c408,http://pdfs.semanticscholar.org/31d6/0b2af2c0e172c1a6a124718e99075818c408.pdf
+31f1e711fcf82c855f27396f181bf5e565a2f58d,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Age_iccv2015.pdf
+312afff739d1e0fcd3410adf78be1c66b3480396,http://pdfs.semanticscholar.org/312a/fff739d1e0fcd3410adf78be1c66b3480396.pdf
+315a90543d60a5b6c5d1716fe9076736f0e90d24,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553721.pdf
+3107085973617bbfc434c6cb82c87f2a952021b7,http://pdfs.semanticscholar.org/cee6/6bd89d1e25355e78573220adcd017a2d97d8.pdf
+31182c5ffc8c5d8772b6db01ec98144cd6e4e897,http://pdfs.semanticscholar.org/3118/2c5ffc8c5d8772b6db01ec98144cd6e4e897.pdf
+31bb49ba7df94b88add9e3c2db72a4a98927bb05,http://pdfs.semanticscholar.org/31bb/49ba7df94b88add9e3c2db72a4a98927bb05.pdf
+3146fabd5631a7d1387327918b184103d06c2211,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Jeni_Person-Independent_3D_Gaze_CVPR_2016_paper.pdf
+91811203c2511e919b047ebc86edad87d985a4fa,http://pdfs.semanticscholar.org/9181/1203c2511e919b047ebc86edad87d985a4fa.pdf
+910524c0d0fe062bf806bb545627bf2c9a236a03,http://pdfs.semanticscholar.org/9105/24c0d0fe062bf806bb545627bf2c9a236a03.pdf
+9117fd5695582961a456bd72b157d4386ca6a174,http://pdfs.semanticscholar.org/9117/fd5695582961a456bd72b157d4386ca6a174.pdf
+91df860368cbcebebd83d59ae1670c0f47de171d,http://pdfs.semanticscholar.org/91df/860368cbcebebd83d59ae1670c0f47de171d.pdf
+91067f298e1ece33c47df65236853704f6700a0b,http://pdfs.semanticscholar.org/9106/7f298e1ece33c47df65236853704f6700a0b.pdf
+91a1945b9c40af4944a6cdcfe59a0999de4f650a,http://ccbr2017.org/ccbr%20PPT/95%E5%8F%B7%E8%AE%BA%E6%96%87-%E7%94%B3%E6%99%9A%E9%9C%9E%20wanxiahen-ccbr.pdf
+919d3067bce76009ce07b070a13728f549ebba49,http://pdfs.semanticscholar.org/919d/3067bce76009ce07b070a13728f549ebba49.pdf
+9110c589c6e78daf4affd8e318d843dc750fb71a,http://pdfs.semanticscholar.org/9110/c589c6e78daf4affd8e318d843dc750fb71a.pdf
+91e57667b6fad7a996b24367119f4b22b6892eca,http://pdfs.semanticscholar.org/91e5/7667b6fad7a996b24367119f4b22b6892eca.pdf
+91883dabc11245e393786d85941fb99a6248c1fb,http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf
+91b1a59b9e0e7f4db0828bf36654b84ba53b0557,http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/ECE/MTech%20DSP%202015-16/MTech%20DSP%20BasePaper%202015-16/50.pdf
+919d0e681c4ef687bf0b89fe7c0615221e9a1d30,http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf
+912a6a97af390d009773452814a401e258b77640,http://pdfs.semanticscholar.org/912a/6a97af390d009773452814a401e258b77640.pdf
+91d513af1f667f64c9afc55ea1f45b0be7ba08d4,http://pdfs.semanticscholar.org/91d5/13af1f667f64c9afc55ea1f45b0be7ba08d4.pdf
+91e507d2d8375bf474f6ffa87788aa3e742333ce,http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf
+918b72a47b7f378bde0ba29c908babf6dab6f833,http://pdfs.semanticscholar.org/918b/72a47b7f378bde0ba29c908babf6dab6f833.pdf
+91e58c39608c6eb97b314b0c581ddaf7daac075e,http://pdfs.semanticscholar.org/91e5/8c39608c6eb97b314b0c581ddaf7daac075e.pdf
+91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0,http://pdfs.semanticscholar.org/94c3/624c54f8f070a9dc82a41cbf7a888fe8f477.pdf
+91835984eaeb538606972de47c372c5fcfe8b6aa,http://www.cse.ust.hk/~qnature/pdf/IEEESMC2015.pdf
+9103148dd87e6ff9fba28509f3b265e1873166c9,http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf
+915d4a0fb523249ecbc88eb62cb150a60cf60fa0,http://pdfs.semanticscholar.org/915d/4a0fb523249ecbc88eb62cb150a60cf60fa0.pdf
+65126e0b1161fc8212643b8ff39c1d71d262fbc1,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ghiasi_Occlusion_Coherence_Localizing_2014_CVPR_paper.pdf
+65b737e5cc4a565011a895c460ed8fd07b333600,http://pdfs.semanticscholar.org/7574/f999d2325803f88c4915ba8f304cccc232d1.pdf
+6582f4ec2815d2106957215ca2fa298396dde274,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2007-PAMI-face-sets.pdf
+65b1760d9b1541241c6c0222cc4ee9df078b593a,http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf
+65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220,http://pdfs.semanticscholar.org/65d7/f95fcbabcc3cdafc0ad38e81d1f473bb6220.pdf
+65bba9fba03e420c96ec432a2a82521ddd848c09,http://pdfs.semanticscholar.org/65bb/a9fba03e420c96ec432a2a82521ddd848c09.pdf
+655d9ba828eeff47c600240e0327c3102b9aba7c,http://cs.gmu.edu/~carlotta/publications/kpools.pdf
+656a59954de3c9fcf82ffcef926af6ade2f3fdb5,http://pdfs.semanticscholar.org/656a/59954de3c9fcf82ffcef926af6ade2f3fdb5.pdf
+652aac54a3caf6570b1c10c993a5af7fa2ef31ff,http://pdfs.semanticscholar.org/652a/ac54a3caf6570b1c10c993a5af7fa2ef31ff.pdf
+656ef752b363a24f84cc1aeba91e4fa3d5dd66ba,http://pdfs.semanticscholar.org/656e/f752b363a24f84cc1aeba91e4fa3d5dd66ba.pdf
+656aeb92e4f0e280576cbac57d4abbfe6f9439ea,http://pdfs.semanticscholar.org/656a/eb92e4f0e280576cbac57d4abbfe6f9439ea.pdf
+6502cf30c088c6c7c4b2a05b7777b032c9dde7cd,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Learning%20compact%20binary%20face%20descriptor%20for%20face%20recognition_PAMI2015.pdf
+6577c76395896dd4d352f7b1ee8b705b1a45fa90,http://ai.stanford.edu/~kdtang/papers/icip10_kinship.pdf
+650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772,http://pdfs.semanticscholar.org/650b/fe7acc3f03eb4ba91d9f93da8ef0ae8ba772.pdf
+65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf
+65817963194702f059bae07eadbf6486f18f4a0a,http://arxiv.org/pdf/1505.04141v2.pdf
+6581c5b17db7006f4cc3575d04bfc6546854a785,http://pdfs.semanticscholar.org/6581/c5b17db7006f4cc3575d04bfc6546854a785.pdf
+6515fe829d0b31a5e1f4dc2970a78684237f6edb,http://pdfs.semanticscholar.org/6515/fe829d0b31a5e1f4dc2970a78684237f6edb.pdf
+62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4,http://pdfs.semanticscholar.org/62d1/a31b8acd2141d3a994f2d2ec7a3baf0e6dc4.pdf
+62694828c716af44c300f9ec0c3236e98770d7cf,http://pdfs.semanticscholar.org/6269/4828c716af44c300f9ec0c3236e98770d7cf.pdf
+6261eb75066f779e75b02209fbd3d0f02d3e1e45,http://pdfs.semanticscholar.org/6261/eb75066f779e75b02209fbd3d0f02d3e1e45.pdf
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,http://pdfs.semanticscholar.org/af52/4ffcedaa50cff30607e6ad8e270ad0d7bf71.pdf
+62f0d8446adee6a5e8102053a63a61af07ac4098,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C072_yamashita2015.pdf
+62f60039a95692baaeaae79a013c7f545e2a6c3d,http://www.researchgate.net/profile/G_Boato/publication/242336498_Identify_computer_generated_characters_by_analysing_facial_expressions_variation/links/0f3175360a34547478000000.pdf
+62374b9e0e814e672db75c2c00f0023f58ef442c,http://pdfs.semanticscholar.org/6237/4b9e0e814e672db75c2c00f0023f58ef442c.pdf
+6257a622ed6bd1b8759ae837b50580657e676192,http://pdfs.semanticscholar.org/b8d8/501595f38974e001a66752dc7098db13dfec.pdf
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,http://pdfs.semanticscholar.org/6226/f2ea345f5f4716ac4ddca6715a47162d5b92.pdf
+62e913431bcef5983955e9ca160b91bb19d9de42,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf
+62c435bc714f13a373926e3b1914786592ed1fef,http://assistech.iitd.ernet.in/mavi-embedded-device.pdf
+624e9d9d3d941bab6aaccdd93432fc45cac28d4b,https://arxiv.org/pdf/1505.00296v1.pdf
+620e1dbf88069408b008347cd563e16aeeebeb83,http://pdfs.semanticscholar.org/620e/1dbf88069408b008347cd563e16aeeebeb83.pdf
+624496296af19243d5f05e7505fd927db02fd0ce,http://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_cvpr_2014.pdf
+621ed006945e9438910b5aa4f6214888dea3d791,http://figment.cse.usf.edu/~sfefilat/data/papers/ThAT9.20.pdf
+621ff353960d5d9320242f39f85921f72be69dc8,http://www.research.rutgers.edu/~xiangyu/paper/FG_2013.pdf
+62a30f1b149843860938de6dd6d1874954de24b7,http://mmlab.ie.cuhk.edu.hk/archive/2009/09_fast_algorithm.pdf
+621e8882c41cdaf03a2c4a986a6404f0272ba511,http://conradsanderson.id.au/pdfs/wong_ijcnn_2012.pdf
+62e0380a86e92709fe2c64e6a71ed94d152c6643,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2012/Facial%20emotion%20recognition%20with%20expression%20energy12.pdf
+621f656fedda378ceaa9c0096ebb1556a42e5e0f,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2016/07.19.17.24/doc/PID4367205.pdf?ibiurl.language=en
+965f8bb9a467ce9538dec6bef57438964976d6d9,http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ISBA2016.pdf
+961a5d5750f18e91e28a767b3cb234a77aac8305,http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf
+9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c,http://pdfs.semanticscholar.org/9626/bcb3fc7c7df2c5a423ae8d0a046b2f69180c.pdf
+968b983fa9967ff82e0798a5967920188a3590a8,http://pdfs.semanticscholar.org/968b/983fa9967ff82e0798a5967920188a3590a8.pdf
+969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,http://pdfs.semanticscholar.org/969f/d48e1a668ab5d3c6a80a3d2aeab77067c6ce.pdf
+96faccdddef887673d6007fed8ff2574580cae1f,http://pdfs.semanticscholar.org/96fa/ccdddef887673d6007fed8ff2574580cae1f.pdf
+961939e96eed6620b1752721ab520745ac5329c6,http://www.cs.umd.edu/~gaurav/research/frgcWorkshop.pdf
+960ad662c2bb454d69006492cc3f52d1550de55d,http://www.research.att.com/~yifanhu/PUB/gmap_cga.pdf
+9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4,http://pdfs.semanticscholar.org/9696/b172d66e402a2e9d0a8d2b3f204ad8b98cc4.pdf
+964a3196d44f0fefa7de3403849d22bbafa73886,http://pdfs.semanticscholar.org/964a/3196d44f0fefa7de3403849d22bbafa73886.pdf
+96f4a1dd1146064d1586ebe86293d02e8480d181,http://pdfs.semanticscholar.org/96f4/a1dd1146064d1586ebe86293d02e8480d181.pdf
+9606b1c88b891d433927b1f841dce44b8d3af066,http://pdfs.semanticscholar.org/9606/b1c88b891d433927b1f841dce44b8d3af066.pdf
+966e36f15b05ef8436afecf57a97b73d6dcada94,http://pdfs.semanticscholar.org/966e/36f15b05ef8436afecf57a97b73d6dcada94.pdf
+969dd8bc1179c047523d257516ade5d831d701ad,http://pdfs.semanticscholar.org/969d/d8bc1179c047523d257516ade5d831d701ad.pdf
+96578785836d7416bf2e9c154f687eed8f93b1e4,http://pdfs.semanticscholar.org/9657/8785836d7416bf2e9c154f687eed8f93b1e4.pdf
+96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,http://megaface.cs.washington.edu/KemelmacherMegaFaceCVPR16.pdf
+968f472477a8afbadb5d92ff1b9c7fdc89f0c009,http://pdfs.semanticscholar.org/968f/472477a8afbadb5d92ff1b9c7fdc89f0c009.pdf
+96e731e82b817c95d4ce48b9e6b08d2394937cf8,http://arxiv.org/pdf/1508.01722v2.pdf
+9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc,http://pdfs.semanticscholar.org/9686/dcf40e6fdc4152f38bd12b929bcd4f3bbbcc.pdf
+9636c7d3643fc598dacb83d71f199f1d2cc34415,http://pdfs.semanticscholar.org/9636/c7d3643fc598dacb83d71f199f1d2cc34415.pdf
+3abe50d0a806a9f5a5626f60f590632a6d87f0c4,http://vis.uky.edu/~gravity/publications/2008/Estimating_Xinyu.pdf
+3af8d38469fb21368ee947d53746ea68cd64eeae,http://pdfs.semanticscholar.org/3af8/d38469fb21368ee947d53746ea68cd64eeae.pdf
+3a2fc58222870d8bed62442c00341e8c0a39ec87,http://pdfs.semanticscholar.org/3a2f/c58222870d8bed62442c00341e8c0a39ec87.pdf
+3a76e9fc2e89bdd10a9818f7249fbf61d216efc4,http://openaccess.thecvf.com/content_ICCV_2017/papers/Nagpal_Face_Sketch_Matching_ICCV_2017_paper.pdf
+3a92de0a4a0ef4f88e1647633f1fbb13cd6a3c95,http://impca.cs.curtin.edu.au/pubs/2007/conferences/an_liu_venkatesh_cvpr07.pdf
+3a0ea368d7606030a94eb5527a12e6789f727994,http://pdfs.semanticscholar.org/c7ca/eb8ecb6a38bdd65ddd25aca4fdd79203ddef.pdf
+3a804cbf004f6d4e0b041873290ac8e07082b61f,http://pdfs.semanticscholar.org/5ce8/e665a6512c09f15d8528ce6bece1f6a4d138.pdf
+3a04eb72aa64760dccd73e68a3b2301822e4cdc3,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Peng_Scalable_Sparse_Subspace_2013_CVPR_paper.pdf
+3af130e2fd41143d5fc49503830bbd7bafd01f8b,http://pdfs.semanticscholar.org/db76/002794c12e5febc30510de58b54bb9344ea9.pdf
+3a2cf589f5e11ca886417b72c2592975ff1d8472,http://pdfs.semanticscholar.org/3a2c/f589f5e11ca886417b72c2592975ff1d8472.pdf
+3ada7640b1c525056e6fcd37eea26cd638815cd6,http://pdfs.semanticscholar.org/3ada/7640b1c525056e6fcd37eea26cd638815cd6.pdf
+3abc833f4d689f37cc8a28f47fb42e32deaa4b17,http://www.cs.virginia.edu/~vicente/files/ijcv_bigdata.pdf
+3acb6b3e3f09f528c88d5dd765fee6131de931ea,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2017/novelRepresentation.pdf
+3a60678ad2b862fa7c27b11f04c93c010cc6c430,http://ibug.doc.ic.ac.uk/media/uploads/documents/taffcsi-2010-11-0112-2.pdf
+3a591a9b5c6d4c62963d7374d58c1ae79e3a4039,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W04/papers/Artan_Driver_Cell_Phone_2014_CVPR_paper.pdf
+3aa9c8c65ce63eb41580ba27d47babb1100df8a3,http://www.csb.uncw.edu/mscsis/complete/pdf/VandeventerJason_Final.pdf
+3a0a839012575ba455f2b84c2d043a35133285f9,http://pdfs.semanticscholar.org/76a1/dca3a9c2b0229c1b12c95752dcf40dc95a11.pdf
+3af1a375c7c1decbcf5c3a29774e165cafce390c,https://www.cbica.upenn.edu/sbia/papers/540.pdf
+3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e,http://www.wjscheirer.com/papers/wjs_cswb2010_grab.pdf
+3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Vahdat_Handling_Uncertain_Tags_2013_ICCV_paper.pdf
+3a95eea0543cf05670e9ae28092a114e3dc3ab5c,https://arxiv.org/pdf/1209.0841v7.pdf
+3a4f522fa9d2c37aeaed232b39fcbe1b64495134,http://ijireeice.com/upload/2016/may-16/IJIREEICE%20101.pdf
+54bb25a213944b08298e4e2de54f2ddea890954a,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf
+54bae57ed37ce50e859cbc4d94d70cc3a84189d5,http://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf
+54f442c7fa4603f1814ebd8eba912a00dceb5cb2,http://pdfs.semanticscholar.org/54f4/42c7fa4603f1814ebd8eba912a00dceb5cb2.pdf
+543f21d81bbea89f901dfcc01f4e332a9af6682d,http://pdfs.semanticscholar.org/543f/21d81bbea89f901dfcc01f4e332a9af6682d.pdf
+5456166e3bfe78a353df988897ec0bd66cee937f,http://pdfs.semanticscholar.org/5456/166e3bfe78a353df988897ec0bd66cee937f.pdf
+541f1436c8ffef1118a0121088584ddbfd3a0a8a,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/A%20Spatio-Temporal%20Feature%20based%20on%20Triangulation%20of%20Dense%20SURF.pdf
+54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3,http://www.cs.toronto.edu/~vnair/iccv11.pdf
+541bccf19086755f8b5f57fd15177dc49e77d675,http://pdfs.semanticscholar.org/541b/ccf19086755f8b5f57fd15177dc49e77d675.pdf
+5495e224ac7b45b9edc5cfeabbb754d8a40a879b,http://pdfs.semanticscholar.org/5495/e224ac7b45b9edc5cfeabbb754d8a40a879b.pdf
+54756f824befa3f0c2af404db0122f5b5bbf16e0,http://pdfs.semanticscholar.org/5475/6f824befa3f0c2af404db0122f5b5bbf16e0.pdf
+549c719c4429812dff4d02753d2db11dd490b2ae,http://openaccess.thecvf.com/content_cvpr_2017/papers/Real_YouTube-BoundingBoxes_A_Large_CVPR_2017_paper.pdf
+98b2f21db344b8b9f7747feaf86f92558595990c,http://pdfs.semanticscholar.org/b9f0/29075a36f15202f0d213fe222dcf237fe65f.pdf
+98142103c311b67eeca12127aad9229d56b4a9ff,http://pdfs.semanticscholar.org/9814/2103c311b67eeca12127aad9229d56b4a9ff.pdf
+9820920d4544173e97228cb4ab8b71ecf4548475,http://pdfs.semanticscholar.org/9820/920d4544173e97228cb4ab8b71ecf4548475.pdf
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf
+982f5c625d6ad0dac25d7acbce4dabfb35dd7f23,http://pdfs.semanticscholar.org/982f/5c625d6ad0dac25d7acbce4dabfb35dd7f23.pdf
+98af221afd64a23e82c40fd28d25210c352e41b7,http://pdfs.semanticscholar.org/d2fb/a31b394ea016b57f45bead77534fd8f7fbfa.pdf
+9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5,http://pdfs.semanticscholar.org/9893/865afdb1de55fdd21e5d86bbdb5daa5fa3d5.pdf
+988d1295ec32ce41d06e7cf928f14a3ee079a11e,http://pdfs.semanticscholar.org/988d/1295ec32ce41d06e7cf928f14a3ee079a11e.pdf
+98a120802aef324599e8b9014decfeb2236a78a3,http://nyunetworks.com/Pubs/butler-chi16.pdf
+98c548a4be0d3b62971e75259d7514feab14f884,http://pdfs.semanticscholar.org/98c5/48a4be0d3b62971e75259d7514feab14f884.pdf
+9887ab220254859ffc7354d5189083a87c9bca6e,http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf
+985cd420c00d2f53965faf63358e8c13d1951fa8,http://pdfs.semanticscholar.org/985c/d420c00d2f53965faf63358e8c13d1951fa8.pdf
+981449cdd5b820268c0876477419cba50d5d1316,http://pdfs.semanticscholar.org/9814/49cdd5b820268c0876477419cba50d5d1316.pdf
+9863dd1e2a3d3b4910a91176ac0f2fee5eb3b5e1,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/kim-ieee-2006.pdf
+9821669a989a3df9d598c1b4332d17ae8e35e294,http://pdfs.semanticscholar.org/9821/669a989a3df9d598c1b4332d17ae8e35e294.pdf
+9854145f2f64d52aac23c0301f4bb6657e32e562,http://www.ucsp.edu.pe/sibgrapi2013/eproceedings/technical/114953_2.pdf
+98c2053e0c31fab5bcb9ce5386335b647160cc09,https://smartech.gatech.edu/bitstream/handle/1853/45502/GT-CS-12-10.pdf
+98127346920bdce9773aba6a2ffc8590b9558a4a,http://disi.unitn.it/~duta/pubs/MTAP2017_Duta.pdf
+98a660c15c821ea6d49a61c5061cd88e26c18c65,http://pdfs.semanticscholar.org/98a6/60c15c821ea6d49a61c5061cd88e26c18c65.pdf
+982fed5c11e76dfef766ad9ff081bfa25e62415a,https://pdfs.semanticscholar.org/c7fa/d91ba4e33f64d584c928b1200327815f09e6.pdf
+98fb3890c565f1d32049a524ec425ceda1da5c24,http://pdfs.semanticscholar.org/98fb/3890c565f1d32049a524ec425ceda1da5c24.pdf
+98519f3f615e7900578bc064a8fb4e5f429f3689,http://pdfs.semanticscholar.org/9851/9f3f615e7900578bc064a8fb4e5f429f3689.pdf
+9825aa96f204c335ec23c2b872855ce0c98f9046,http://pdfs.semanticscholar.org/9825/aa96f204c335ec23c2b872855ce0c98f9046.pdf
+980266ad6807531fea94252e8f2b771c20e173b3,http://pdfs.semanticscholar.org/9802/66ad6807531fea94252e8f2b771c20e173b3.pdf
+53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9,http://pdfs.semanticscholar.org/53d7/8c8dbac7c9be8eb148c6a9e1d672f1dd72f9.pdf
+53cfe4817ac2eecbe4e286709a9140a5fe729b35,http://www.cv.iit.nrc.ca/VI/fpiv04/pdf/17fa.pdf
+5334ac0a6438483890d5eef64f6db93f44aacdf4,http://pdfs.semanticscholar.org/5334/ac0a6438483890d5eef64f6db93f44aacdf4.pdf
+53e081f5af505374c3b8491e9c4470fe77fe7934,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Hsieh_Unconstrained_Realtime_Facial_2015_CVPR_paper.pdf
+53698b91709112e5bb71eeeae94607db2aefc57c,http://pdfs.semanticscholar.org/5369/8b91709112e5bb71eeeae94607db2aefc57c.pdf
+531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab,http://pdfs.semanticscholar.org/531f/d9be964d18ba7970bd1ca6c3b9dc91b8d2ab.pdf
+5394d42fd27b7e14bd875ec71f31fdd2fcc8f923,http://pdfs.semanticscholar.org/5394/d42fd27b7e14bd875ec71f31fdd2fcc8f923.pdf
+5397c34a5e396658fa57e3ca0065a2878c3cced7,http://www.iis.sinica.edu.tw/papers/song/5959-F.pdf
+539ca9db570b5e43be0576bb250e1ba7a727d640,http://pdfs.semanticscholar.org/539c/a9db570b5e43be0576bb250e1ba7a727d640.pdf
+539287d8967cdeb3ef60d60157ee93e8724efcac,http://pdfs.semanticscholar.org/e5ae/05a05eefbf416eb2e13ec080f1a166dde735.pdf
+532f7ec8e0c8f7331417dd4a45dc2e8930874066,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p6060-zoidi.pdf
+53c8cbc4a3a3752a74f79b74370ed8aeed97db85,http://pdfs.semanticscholar.org/53c8/cbc4a3a3752a74f79b74370ed8aeed97db85.pdf
+5366573e96a1dadfcd4fd592f83017e378a0e185,http://pdfs.semanticscholar.org/5366/573e96a1dadfcd4fd592f83017e378a0e185.pdf
+533bfb82c54f261e6a2b7ed7d31a2fd679c56d18,http://biometrics.cse.msu.edu/Publications/Face/BestRowdenetal_UnconstrainedFaceRecognition_TechReport_MSU-CSE-14-1.pdf
+539ae0920815eb248939165dd5d1b0188ff7dca2,http://www.ele.puc-rio.br/~visao/Topicos/Prince%20and%20Helder%202007%20Probabilistic%20linear%20discriminant%20analysis.pdf
+537d8c4c53604fd419918ec90d6ef28d045311d0,https://arxiv.org/pdf/1704.08821v1.pdf
+530ce1097d0681a0f9d3ce877c5ba31617b1d709,https://pdfs.semanticscholar.org/530c/e1097d0681a0f9d3ce877c5ba31617b1d709.pdf
+3fbd68d1268922ee50c92b28bd23ca6669ff87e5,http://pdfs.semanticscholar.org/f563/6a8021c09870c350e7505c87625fe1681bd4.pdf
+3fe4109ded039ac9d58eb9f5baa5327af30ad8b6,http://www.cvc.uab.cat/~ahernandez/files/CVPR2010STGRABCUT.pdf
+3f22a4383c55ceaafe7d3cfed1b9ef910559d639,http://pdfs.semanticscholar.org/3f22/a4383c55ceaafe7d3cfed1b9ef910559d639.pdf
+3fefc856a47726d19a9f1441168480cee6e9f5bb,http://pdfs.semanticscholar.org/e0e6/bf37d374f9c5cb2461ea87190e234c466d63.pdf
+3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001,http://pdfs.semanticscholar.org/4032/8c9de5a0a90a8c24e80db7924f0281b46484.pdf
+3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2014/MM02014.pdf
+3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9,https://arxiv.org/pdf/1611.06638.pdf
+3f848d6424f3d666a1b6dd405a48a35a797dd147,http://pdfs.semanticscholar.org/4f69/233cd6f0b56833c9395528aa007b63158a1d.pdf
+3fa738ab3c79eacdbfafa4c9950ef74f115a3d84,http://pdfs.semanticscholar.org/3fa7/38ab3c79eacdbfafa4c9950ef74f115a3d84.pdf
+3fb26f3abcf0d287243646426cd5ddeee33624d4,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Qin_Joint_Training_of_CVPR_2016_paper.pdf
+3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Hsu_Face_Recognition_across_2013_CVPR_paper.pdf
+3feb69531653e83d0986a0643e4a6210a088e3e5,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/10-1569042275.pdf
+3f12701449a82a5e01845001afab3580b92da858,http://pdfs.semanticscholar.org/e4f5/2f5e116f0cc486d033e4b8fc737944343db7.pdf
+3f204a413d9c8c16f146c306c8d96b91839fed0c,http://www.menpo.org/pages/paper/Menpo_ACM_MM_2014.pdf
+3fde656343d3fd4223e08e0bc835552bff4bda40,http://pdfs.semanticscholar.org/3fde/656343d3fd4223e08e0bc835552bff4bda40.pdf
+3f957142ef66f2921e7c8c7eadc8e548dccc1327,https://ibug.doc.ic.ac.uk/media/uploads/documents/combined_model_lda_&_svms.pdf
+3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,http://pdfs.semanticscholar.org/3fdf/d6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3.pdf
+3f540faf85e1f8de6ce04fb37e556700b67e4ad3,http://pdfs.semanticscholar.org/3f54/0faf85e1f8de6ce04fb37e556700b67e4ad3.pdf
+3fb3c7dd12561e9443ac301f5527d539b1f4574e,http://www.research.rutgers.edu/~shaoting/paper/ICCV13.pdf
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,http://pdfs.semanticscholar.org/3f4b/fa4e3655ef392eb5ad609d31c05f29826b45.pdf
+3f5cf3771446da44d48f1d5ca2121c52975bb3d3,http://pdfs.semanticscholar.org/3f5c/f3771446da44d48f1d5ca2121c52975bb3d3.pdf
+3f14b504c2b37a0e8119fbda0eff52efb2eb2461,https://ibug.doc.ic.ac.uk/media/uploads/documents/eleftheriadis_tip_2016.pdf
+3fac7c60136a67b320fc1c132fde45205cd2ac66,http://pdfs.semanticscholar.org/3fac/7c60136a67b320fc1c132fde45205cd2ac66.pdf
+3fd90098551bf88c7509521adf1c0ba9b5dfeb57,http://pub.ist.ac.at/~chl/papers/lampert-pami2013.pdf
+3f623bb0c9c766a5ac612df248f4a59288e4d29f,http://pdfs.semanticscholar.org/3f62/3bb0c9c766a5ac612df248f4a59288e4d29f.pdf
+3f4798c7701da044bdb7feb61ebdbd1d53df5cfe,http://sip.unige.ch/articles/2015/2015.EUSIPCO.Vector.quantization.pdf
+3f4c262d836b2867a53eefb959057350bf7219c9,http://pdfs.semanticscholar.org/3f4c/262d836b2867a53eefb959057350bf7219c9.pdf
+3f7723ab51417b85aa909e739fc4c43c64bf3e84,http://pdfs.semanticscholar.org/3f77/23ab51417b85aa909e739fc4c43c64bf3e84.pdf
+3f63f9aaec8ba1fa801d131e3680900680f14139,http://dspace.nitrkl.ac.in/dspace/bitstream/2080/2288/1/4a.pdf
+3f0e0739677eb53a9d16feafc2d9a881b9677b63,http://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf
+3039627fa612c184228b0bed0a8c03c7f754748c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wu_Robust_Regression_on_2015_CVPR_paper.pdf
+303065c44cf847849d04da16b8b1d9a120cef73a,http://pdfs.semanticscholar.org/3030/65c44cf847849d04da16b8b1d9a120cef73a.pdf
+303a7099c01530fa0beb197eb1305b574168b653,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Occlusion-Free_Face_Alignment_CVPR_2016_paper.pdf
+30aa681ab80a830c3890090b0da3f1e786bd66ff,https://arxiv.org/pdf/1708.02337v1.pdf
+30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,http://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf
+303517dfc327c3004ae866a6a340f16bab2ee3e3,http://pdfs.semanticscholar.org/3035/17dfc327c3004ae866a6a340f16bab2ee3e3.pdf
+309e17e6223e13b1f76b5b0eaa123b96ef22f51b,https://static.aminer.org/pdf/PDF/000/337/771/image_synthesis_and_face_recognition_based_on_d_face_model.pdf
+3046baea53360a8c5653f09f0a31581da384202e,http://pdfs.semanticscholar.org/3046/baea53360a8c5653f09f0a31581da384202e.pdf
+3026722b4cbe9223eda6ff2822140172e44ed4b1,http://chenlab.ece.cornell.edu/people/Andy/Andy_files/GallagherICCV09Demographics.pdf
+3028690d00bd95f20842d4aec84dc96de1db6e59,http://pdfs.semanticscholar.org/775f/9b8bc0ff151ee62b5e777f0aa9b09484ef8a.pdf
+30c96cc041bafa4f480b7b1eb5c45999701fe066,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/DiscreteCosineTransformLocality-SensitiveHashes14.pdf
+306957285fea4ce11a14641c3497d01b46095989,http://pdfs.semanticscholar.org/3069/57285fea4ce11a14641c3497d01b46095989.pdf
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,http://pdfs.semanticscholar.org/304b/1f14ca6a37552dbfac443f3d5b36dbe1a451.pdf
+306127c3197eb5544ab1e1bf8279a01e0df26120,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Huang_Sparse_Coding_and_CVPR_2016_paper.pdf
+307a810d1bf6f747b1bd697a8a642afbd649613d,http://pdfs.semanticscholar.org/307a/810d1bf6f747b1bd697a8a642afbd649613d.pdf
+30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf
+30c5d2ec584e7b8273af6915aab420fc23ff2761,http://imi.ntu.edu.sg/IMIGraduatePrograms/IMIResearchSeminars/Documents/29_April_2014/REN_Jianfeng_29_April_2014.pdf
+3083d2c6d4f456e01cbb72930dc2207af98a6244,http://pdfs.semanticscholar.org/3083/d2c6d4f456e01cbb72930dc2207af98a6244.pdf
+302c9c105d49c1348b8f1d8cc47bead70e2acf08,http://pdfs.semanticscholar.org/302c/9c105d49c1348b8f1d8cc47bead70e2acf08.pdf
+30b74e60ec11c0ebc4e640637d56d85872dd17ce,http://pdfs.semanticscholar.org/c810/9382eea8f3fc49b3e6ed13d36eb95a06d0ed.pdf
+304a306d2a55ea41c2355bd9310e332fa76b3cb0,http://pdfs.semanticscholar.org/95da/2d1137637e89da8b7a16e0dc6168cfceb693.pdf
+3042d3727b2f80453ff5378b4b3043abb2d685a1,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0219.pdf
+301b0da87027d6472b98361729faecf6e1d5e5f6,http://pdfs.semanticscholar.org/301b/0da87027d6472b98361729faecf6e1d5e5f6.pdf
+30b103d59f8460d80bb9eac0aa09aaa56c98494f,http://pdfs.semanticscholar.org/30b1/03d59f8460d80bb9eac0aa09aaa56c98494f.pdf
+5e97a1095f2811e0bc188f52380ea7c9c460c896,http://web.eecs.utk.edu/~rguo1/FacialParsing.pdf
+5e59193a0fc22a0c37301fb05b198dd96df94266,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dessein_Example-Based_Modeling_of_ICCV_2015_paper.pdf
+5e0eb34aeb2b58000726540336771053ecd335fc,http://ies.anthropomatik.kit.edu/ies/download/publ/ies_2016_herrmann_low_quality.pdf
+5ebb247963d2d898d420f1f4a2486102a9d05aa9,http://bcmi.sjtu.edu.cn/~zhzhang/papers/nncw.pdf
+5e28673a930131b1ee50d11f69573c17db8fff3e,http://pdfs.semanticscholar.org/f28d/fadba11bd3489d008827d9b1a539b34b50df.pdf
+5ea9063b44b56d9c1942b8484572790dff82731e,https://ibug.doc.ic.ac.uk/media/uploads/documents/mlsp_2007_kotsia.pdf
+5e16f10f2d667d17c029622b9278b6b0a206d394,http://pdfs.semanticscholar.org/5e16/f10f2d667d17c029622b9278b6b0a206d394.pdf
+5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7,http://cbl.uh.edu/pub_files/ISBA-2016.pdf
+5ea165d2bbd305dc125415487ef061bce75dac7d,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ICME2009-human-act-apd-final.pdf
+5e6ba16cddd1797853d8898de52c1f1f44a73279,http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf
+5ea9cba00f74d2e113a10c484ebe4b5780493964,http://pdfs.semanticscholar.org/5ea9/cba00f74d2e113a10c484ebe4b5780493964.pdf
+5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43,http://www.cs.cmu.edu/~juny/Prof/papers/acmmm04a-jyang.pdf
+5ec94adc9e0f282597f943ea9f4502a2a34ecfc2,http://pdfs.semanticscholar.org/5ec9/4adc9e0f282597f943ea9f4502a2a34ecfc2.pdf
+5e0e516226413ea1e973f1a24e2fdedde98e7ec0,http://pdfs.semanticscholar.org/74ce/97da57ec848db660ee69dec709f226c74f43.pdf
+5e821cb036010bef259046a96fe26e681f20266e,https://pdfs.semanticscholar.org/d7e6/d52748c5ed386a90118fa385647c55954ab9.pdf
+5e7cb894307f36651bdd055a85fdf1e182b7db30,http://pdfs.semanticscholar.org/5e7c/b894307f36651bdd055a85fdf1e182b7db30.pdf
+5b693cb3bedaa2f1e84161a4261df9b3f8e77353,http://pdfs.semanticscholar.org/5b69/3cb3bedaa2f1e84161a4261df9b3f8e77353.pdf
+5b73b7b335f33cda2d0662a8e9520f357b65f3ac,http://www.iis.sinica.edu.tw/papers/song/16795-F.pdf
+5b6d05ce368e69485cb08dd97903075e7f517aed,http://pdfs.semanticscholar.org/5b6d/05ce368e69485cb08dd97903075e7f517aed.pdf
+5b0bf1063b694e4b1575bb428edb4f3451d9bf04,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Yang_Facial_Shape_Tracking_ICCV_2015_paper.pdf
+5b59e6b980d2447b2f3042bd811906694e4b0843,https://bib.irb.hr/datoteka/832723.PID4276755.pdf
+5bb53fb36a47b355e9a6962257dd465cd7ad6827,http://pdfs.semanticscholar.org/5bb5/3fb36a47b355e9a6962257dd465cd7ad6827.pdf
+5b89744d2ac9021f468b3ffd32edf9c00ed7fed7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Bi_Beyond_Mahalanobis_Metric_2015_CVPR_paper.pdf
+5bfc32d9457f43d2488583167af4f3175fdcdc03,http://pdfs.semanticscholar.org/5bfc/32d9457f43d2488583167af4f3175fdcdc03.pdf
+5b7cb9b97c425b52b2e6f41ba8028836029c4432,http://www.cis.pku.edu.cn/faculty/vision/zlin/Publications/2014-CVPR-SMR.pdf
+5b6f0a508c1f4097dd8dced751df46230450b01a,http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.pdf
+5b9d41e2985fa815c0f38a2563cca4311ce82954,http://www.iti.gr/files/3dpvt04tsalakanidou.pdf
+5b6593a6497868a0d19312952d2b753232414c23,http://pdfs.semanticscholar.org/5b65/93a6497868a0d19312952d2b753232414c23.pdf
+5bb684dfe64171b77df06ba68997fd1e8daffbe1,http://pdfs.semanticscholar.org/f096/9403b5dfa54445d911aedd88ab25b0b6cd99.pdf
+5b719410e7829c98c074bc2947697fac3b505b64,http://pdfs.semanticscholar.org/ecec/d5c8b2472364fd7816033e8355215e34bb1b.pdf
+5bae9822d703c585a61575dced83fa2f4dea1c6d,http://pdfs.semanticscholar.org/5bae/9822d703c585a61575dced83fa2f4dea1c6d.pdf
+5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f,http://pdfs.semanticscholar.org/7589/58f2340ba46c6708b73d5427985d5623a512.pdf
+5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65,http://pdfs.semanticscholar.org/5b9d/9f5a59c48bc8dd409a1bd5abf1d642463d65.pdf
+5bf70c1afdf4c16fd88687b4cf15580fd2f26102,http://pdfs.semanticscholar.org/5bf7/0c1afdf4c16fd88687b4cf15580fd2f26102.pdf
+5b5962bdb75c72848c1fb4b34c113ff6101b5a87,http://research.microsoft.com/en-us/um/people/leizhang/paper/TMM2011_Xiao.pdf
+5bcc8ef74efbb959407adfda15a01dad8fcf1648,http://pdfs.semanticscholar.org/5bcc/8ef74efbb959407adfda15a01dad8fcf1648.pdf
+5b01d4338734aefb16ee82c4c59763d3abc008e6,http://pdfs.semanticscholar.org/5b01/d4338734aefb16ee82c4c59763d3abc008e6.pdf
+5bdd9f807eec399bb42972a33b83afc8b607c05c,http://www.umiacs.umd.edu/~pvishalm/Journal_pub/SPM_DA_v9.pdf
+5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Yang_Improving_3D_Face_2014_CVPR_paper.pdf
+5b86c36e3eb59c347b81125d5dd57dd2a2c377a9,http://pdfs.semanticscholar.org/5b86/c36e3eb59c347b81125d5dd57dd2a2c377a9.pdf
+5be3cc1650c918da1c38690812f74573e66b1d32,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sandeep_Relative_Parts_Distinctive_2014_CVPR_paper.pdf
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,http://pdfs.semanticscholar.org/5bc0/a89f4f73523967050374ed34d7bc89e4d9e1.pdf
+5b6bed112e722c0629bcce778770d1b28e42fc96,http://pdfs.semanticscholar.org/5b6b/ed112e722c0629bcce778770d1b28e42fc96.pdf
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,http://pdfs.semanticscholar.org/5bde/1718253ec28a753a892b0ba82d8e553b6bf3.pdf
+5b0ebb8430a04d9259b321fc3c1cc1090b8e600e,http://www.openu.ac.il/home/hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf
+37c8514df89337f34421dc27b86d0eb45b660a5e,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Uricar_Facial_Landmark_Tracking_ICCV_2015_paper.pdf
+371f40f6d32ece05cc879b6954db408b3d4edaf3,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_100_ext.pdf
+374c7a2898180723f3f3980cbcb31c8e8eb5d7af,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotsia07a.pdf
+37007af698b990a3ea8592b11d264b14d39c843f,http://acberg.com/papers/dcmsvm.pdf
+374a0df2aa63b26737ee89b6c7df01e59b4d8531,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yuan_Temporal_Action_Localization_CVPR_2016_paper.pdf
+378ae5ca649f023003021f5a63e393da3a4e47f0,http://vision.ucsd.edu/~carolina/files/galleguillos_cvpr10.pdf
+37619564574856c6184005830deda4310d3ca580,http://arxiv.org/pdf/1508.04389v1.pdf
+37ce1d3a6415d6fc1760964e2a04174c24208173,http://www.cse.msu.edu/~liuxm/publication/Jourabloo_Liu_ICCV2015.pdf
+3765c26362ad1095dfe6744c6d52494ea106a42c,http://www.vision.ee.ethz.ch/~tquack/gammeter_quack_iccv2009.pdf
+37179032085e710d1d62a1ba2e9c1f63bb4dde91,http://eprints.soton.ac.uk/363288/1/tome%20tifs.pdf
+3727ac3d50e31a394b200029b2c350073c1b69e3,http://arxiv.org/pdf/1605.03639v2.pdf
+37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e,http://www.cse.iitm.ac.in/~amittal/wacv2015_review.pdf
+37278ffce3a0fe2c2bbf6232e805dd3f5267eba3,http://arxiv.org/pdf/1602.04504v1.pdf
+377a1be5113f38297716c4bb951ebef7a93f949a,http://www.cris.ucr.edu/IGERT/Presentation2013/CruzAbstract.pdf
+377c6563f97e76a4dc836a0bd23d7673492b1aae,http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf
+370e0d9b89518a6b317a9f54f18d5398895a7046,http://pdfs.semanticscholar.org/370e/0d9b89518a6b317a9f54f18d5398895a7046.pdf
+37105ca0bc1f11fcc7c6b7946603f3d572571d76,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_TIST_dmzhai_Multi-view%20metric%20learning%20with%20global%20consistency%20and%20local%20smoothness.pdf
+37ba12271d09d219dd1a8283bc0b4659faf3a6c6,http://www.eecs.qmul.ac.uk/~sgg/papers/LayneEtAl_ARTERMIS2013.pdf
+3773e5d195f796b0b7df1fca6e0d1466ad84b5e7,http://pdfs.semanticscholar.org/3773/e5d195f796b0b7df1fca6e0d1466ad84b5e7.pdf
+37eb666b7eb225ffdafc6f318639bea7f0ba9a24,http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf
+375435fb0da220a65ac9e82275a880e1b9f0a557,https://ibug.doc.ic.ac.uk/media/uploads/documents/tpami_alignment.pdf
+37b6d6577541ed991435eaf899a2f82fdd72c790,http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf
+37d6f0eb074d207b53885bd2eb78ccc8a04be597,http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf
+37ef18d71c1ca71c0a33fc625ef439391926bfbb,http://pdfs.semanticscholar.org/37ef/18d71c1ca71c0a33fc625ef439391926bfbb.pdf
+370b5757a5379b15e30d619e4d3fb9e8e13f3256,http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf
+081189493ca339ca49b1913a12122af8bb431984,http://pdfs.semanticscholar.org/0811/89493ca339ca49b1913a12122af8bb431984.pdf
+08ee541925e4f7f376538bc289503dd80399536f,http://pdfs.semanticscholar.org/08ee/541925e4f7f376538bc289503dd80399536f.pdf
+08d2f655361335bdd6c1c901642981e650dff5ec,http://dro.deakin.edu.au/eserv/DU:30058435/arandjelovic-automaticcastlisting-2006.pdf
+08fbe3187f31b828a38811cc8dc7ca17933b91e9,http://www.merl.com/publications/docs/TR2011-084.pdf
+08ae100805d7406bf56226e9c3c218d3f9774d19,http://pdfs.semanticscholar.org/08ae/100805d7406bf56226e9c3c218d3f9774d19.pdf
+085b5f9fd49432edab29e2c64f2a427fbce97f67,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects-actions-cvpr2015.pdf
+08c18b2f57c8e6a3bfe462e599a6e1ce03005876,http://ca.cs.cmu.edu/sites/default/files/8uca_final_revision.pdf
+08f6ad0a3e75b715852f825d12b6f28883f5ca05,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf
+08ff81f3f00f8f68b8abd910248b25a126a4dfa4,https://research-information.bristol.ac.uk/files/74279764/Ioannis_Pitas_Symmetric_Subspace_Learning_for_Image_Analysis_2014.pdf
+081a431107eb38812b74a8cd036ca5e97235b499,http://webhost.uoradea.ro/ibuciu/IEEE_TNN_2008.pdf
+084bd02d171e36458f108f07265386f22b34a1ae,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ren_Face_Alignment_at_2014_CVPR_paper.pdf
+081cb09791e7ff33c5d86fd39db00b2f29653fa8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/09/22.pdf
+086131159999d79adf6b31c1e604b18809e70ba8,http://vinereactor.org/icpr2016.pdf
+0831a511435fd7d21e0cceddb4a532c35700a622,http://pdfs.semanticscholar.org/0831/a511435fd7d21e0cceddb4a532c35700a622.pdf
+0861f86fb65aa915fbfbe918b28aabf31ffba364,http://pdfs.semanticscholar.org/0861/f86fb65aa915fbfbe918b28aabf31ffba364.pdf
+089513ca240c6d672c79a46fa94a92cde28bd567,http://pdfs.semanticscholar.org/0895/13ca240c6d672c79a46fa94a92cde28bd567.pdf
+089b5e8eb549723020b908e8eb19479ba39812f5,http://www.face-recognition-challenge.com/RobustnessOfDCNN-preprint.pdf
+080c204edff49bf85b335d3d416c5e734a861151,http://pdfs.semanticscholar.org/d3d1/09d81dd0911dfde259b6878d737e50c834eb.pdf
+08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d,http://pdfs.semanticscholar.org/b680/2fb123c594a9fd621ae576651201fcc4329a.pdf
+08d40ee6e1c0060d3b706b6b627e03d4b123377a,http://pdfs.semanticscholar.org/3daa/fe6389d877fe15d8823cdf5ac15fd919676f.pdf
+08c1f8f0e69c0e2692a2d51040ef6364fb263a40,http://pdfs.semanticscholar.org/0b20/0cf032430d74fd612601cc59d5af5608ceb4.pdf
+088aabe3da627432fdccf5077969e3f6402f0a80,http://pdfs.semanticscholar.org/088a/abe3da627432fdccf5077969e3f6402f0a80.pdf
+087002ab569e35432cdeb8e63b2c94f1abc53ea9,http://sergioescalera.com/wp-content/uploads/2015/07/CVPR2015MoeslundSlides.pdf
+08cb294a08365e36dd7ed4167b1fd04f847651a9,http://pdfs.semanticscholar.org/f75f/56bb1dcf721449f2fcc3634265f1e08e012c.pdf
+081286ede247c5789081502a700b378b6223f94b,http://pdfs.semanticscholar.org/0812/86ede247c5789081502a700b378b6223f94b.pdf
+08e995c080a566fe59884a527b72e13844b6f176,http://pdfs.semanticscholar.org/08e9/95c080a566fe59884a527b72e13844b6f176.pdf
+08e24f9df3d55364290d626b23f3d42b4772efb6,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu06c.pdf
+085ceda1c65caf11762b3452f87660703f914782,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf
+0830c9b9f207007d5e07f5269ffba003235e4eff,http://pdfs.semanticscholar.org/cf2e/1ebb9609f46af6de0c15b4f48d03e37e54ba.pdf
+08d55271589f989d90a7edce3345f78f2468a7e0,https://arxiv.org/pdf/1704.03373v1.pdf
+081fb4e97d6bb357506d1b125153111b673cc128,http://pdfs.semanticscholar.org/081f/b4e97d6bb357506d1b125153111b673cc128.pdf
+08a98822739bb8e6b1388c266938e10eaa01d903,http://homes.cs.washington.edu/~yoshi/papers/SensorSift_ACSAC_2012.pdf
+08f1e9e14775757298afd9039f46ec56e80677f9,http://pdfs.semanticscholar.org/08f1/e9e14775757298afd9039f46ec56e80677f9.pdf
+08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7,http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf
+082ad50ac59fc694ba4369d0f9b87430553b11db,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553696.pdf
+6dd052df6b0e89d394192f7f2af4a3e3b8f89875,http://pdfs.semanticscholar.org/6dd0/52df6b0e89d394192f7f2af4a3e3b8f89875.pdf
+6d7a32f594d46f4087b71e2a2bb66a4b25da5e30,http://pdfs.semanticscholar.org/6d7a/32f594d46f4087b71e2a2bb66a4b25da5e30.pdf
+6dd5dbb6735846b214be72983e323726ef77c7a9,http://pdfs.semanticscholar.org/6dd5/dbb6735846b214be72983e323726ef77c7a9.pdf
+6d10beb027fd7213dd4bccf2427e223662e20b7d,http://pdfs.semanticscholar.org/6d10/beb027fd7213dd4bccf2427e223662e20b7d.pdf
+6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75,http://hoques.com/Publications/2015/2015-ubicomp_rocspeak_Fung-etal.pdf
+6d207360148ec3991b70952315cb3f1e8899e977,http://www.researchgate.net/profile/Edwin_Hancock/publication/224649584_Estimating_Cast_Shadows_using_SFS_and_Class-based_Surface_Completion/links/004635239fd1ed7ac5000000.pdf
+6de18708218988b0558f6c2f27050bb4659155e4,https://arxiv.org/pdf/1611.05216v1.pdf
+6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1,http://disi.unitn.it/~sebe/publications/MIR03.pdf
+6d91da37627c05150cb40cac323ca12a91965759,http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf
+6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf,http://pdfs.semanticscholar.org/6dd2/a0f9ca8a5fee12edec1485c0699770b4cfdf.pdf
+6d4b5444c45880517213a2fdcdb6f17064b3fa91,http://pdfs.semanticscholar.org/6d4b/5444c45880517213a2fdcdb6f17064b3fa91.pdf
+6d4e3616d0b27957c4107ae877dc0dd4504b69ab,http://pdfs.semanticscholar.org/6d4e/3616d0b27957c4107ae877dc0dd4504b69ab.pdf
+6daccf3d15c617873954bb75de26f6b6b0a42772,http://arts.buaa.edu.cn/papers/Learning%20Templates%20for%20Artistic%20Portrait%20Lighting%20Analysis.pdf
+6d8e3f3a83514381f890ab7cd2a1f1c5be597b69,http://pdfs.semanticscholar.org/aeb1/83983f4ae1ea9e01005f5d546480190e0345.pdf
+6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19,http://pdfs.semanticscholar.org/6d8e/ef8f8d6cd8436c55018e6ca5c5907b31ac19.pdf
+6d4103762e159130b32335cbf8893ee4dca26859,http://homepage.tudelft.nl/19j49/Publications_files/cogn_proc.pdf
+6d618657fa5a584d805b562302fe1090957194ba,http://pdfs.semanticscholar.org/6d61/8657fa5a584d805b562302fe1090957194ba.pdf
+6d66c98009018ac1512047e6bdfb525c35683b16,http://pdfs.semanticscholar.org/6d66/c98009018ac1512047e6bdfb525c35683b16.pdf
+016cbf0878db5c40566c1fbc237686fbad666a33,http://pdfs.semanticscholar.org/5a07/986f0a202eafbd1f1574fe2c3ae6abe2281f.pdf
+016800413ebd1a87730a5cf828e197f43a08f4b3,http://arxiv.org/pdf/1605.00743v1.pdf
+0172867f4c712b33168d9da79c6d3859b198ed4c,http://www.cin.ufpe.br/~rps/Artigos/Expression%20and%20Illumination%20Invariant%20Preprocessing%20Technique%20for%20Face%20Recognition.pdf
+0145dc4505041bf39efa70ea6d95cf392cfe7f19,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_046_ext.pdf
+01bef320b83ac4405b3fc5b1cff788c124109fb9,http://pdfs.semanticscholar.org/49e4/37cc5b673c49b942e304607a0050dcc82dae.pdf
+01c9dc5c677aaa980f92c4680229db482d5860db,https://pages.iai.uni-bonn.de/gall_juergen/download/jgall_actiondetect_cvpr16.pdf
+013909077ad843eb6df7a3e8e290cfd5575999d2,http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf
+01d2cf5398c2b3e0f4fc8e8318a4492c95a0b242,http://webee.technion.ac.il/~lihi/Publications/10-ANS-PAMI.pdf
+01c7a778cde86ad1b89909ea809d55230e569390,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Siyahjani_A_Supervised_Low-Rank_ICCV_2015_paper.pdf
+01c8d7a3460422412fba04e7ee14c4f6cdff9ad7,http://pdfs.semanticscholar.org/01c8/d7a3460422412fba04e7ee14c4f6cdff9ad7.pdf
+0115f260069e2e501850a14845feb400142e2443,http://pdfs.semanticscholar.org/0115/f260069e2e501850a14845feb400142e2443.pdf
+01cc8a712e67384f9ef9f30580b7415bfd71e980,http://pdfs.semanticscholar.org/01cc/8a712e67384f9ef9f30580b7415bfd71e980.pdf
+01e12be4097fa8c94cabeef0ad61498c8e7762f2,http://pdfs.semanticscholar.org/10bf/f1957b8a4adce86efd10596186d905976c16.pdf
+0163d847307fae508d8f40ad193ee542c1e051b4,http://www.alessandrobergamo.com/data/compact_descriptors_supplementary.pdf
+01dc1e03f39901e212bdf291209b7686266aeb13,http://arxiv.org/pdf/1604.07279v1.pdf
+016f49a54b79ec787e701cc8c7d0280273f9b1ef,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotropoulos06a.pdf
+017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637,http://pdfs.semanticscholar.org/017c/e398e1eb9f2eed82d0b22fb1c21d3bcf9637.pdf
+01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,http://openaccess.thecvf.com/content_ICCV_2017/papers/Jackson_Large_Pose_3D_ICCV_2017_paper.pdf
+01c09acf0c046296643de4c8b55a9330e9c8a419,http://pdfs.semanticscholar.org/01c0/9acf0c046296643de4c8b55a9330e9c8a419.pdf
+01d23cbac762b0e46251f5dbde08f49f2d13b9f8,http://pdfs.semanticscholar.org/01d2/3cbac762b0e46251f5dbde08f49f2d13b9f8.pdf
+014143aa16604ec3f334c1407ceaa496d2ed726e,http://www.cs.cmu.edu/~har/cvpr2008-manifold.pdf
+011e6146995d5d63c852bd776f782cc6f6e11b7b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhuang_Fast_Training_of_CVPR_2016_paper.pdf
+0182d090478be67241392df90212d6cd0fb659e6,http://www.cs.utexas.edu/~grauman/papers/localized_attributes_cvpr2012.pdf
+016a8ed8f6ba49bc669dbd44de4ff31a79963078,http://www.jdl.ac.cn/user/sgshan/pub/icassp04_qing.pdf
+01beab8f8293a30cf48f52caea6ca0fb721c8489,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553729.pdf
+0178929595f505ef7655272cc2c339d7ed0b9507,http://pdfs.semanticscholar.org/7d84/151beccef17f71b3eeaca59ebc690561ab73.pdf
+0181fec8e42d82bfb03dc8b82381bb329de00631,http://users.isy.liu.se/en/cvl/zografos/publications/CVPR2013.pdf
+01b4b32c5ef945426b0396d32d2a12c69c282e29,http://pdfs.semanticscholar.org/1510/bfa3a31ccf47e0241d3528aeda4871597a0f.pdf
+0113b302a49de15a1d41ca4750191979ad756d2f,http://www.cecs.uci.edu/~papers/icme06/pdfs/0000537.pdf
+01379c50c392c104694ccb871a4b6a36d514f102,http://sse.tongji.edu.cn/hyli/Publications/icmla2010.pdf
+01c948d2b73abe8be1ac128a6439c1081ebca95a,http://mla.sdu.edu.cn/PeopleInfo/lixuzhou/A%20hybrid%20biometric%20identification%20framework%20for%20high%20security%20applications.pdf
+01733018a79aa447a27f269a1b9a58cd5f39603e,http://vc.sce.ntu.edu.sg/index_files/Semi-supervised%20Bilinear%20Subspace%20Learning.pdf
+019e471667c72b5b3728b4a9ba9fe301a7426fb2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_012.pdf
+0601416ade6707c689b44a5bb67dab58d5c27814,http://pdfs.semanticscholar.org/0601/416ade6707c689b44a5bb67dab58d5c27814.pdf
+064b797aa1da2000640e437cacb97256444dee82,http://pdfs.semanticscholar.org/064b/797aa1da2000640e437cacb97256444dee82.pdf
+06f146dfcde10915d6284981b6b84b85da75acd4,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/tmm12.pdf
+067126ce1f1a205f98e33db7a3b77b7aec7fb45a,http://pdfs.semanticscholar.org/0671/26ce1f1a205f98e33db7a3b77b7aec7fb45a.pdf
+06466276c4955257b15eff78ebc576662100f740,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/sigir12.pdf
+0697bd81844d54064d992d3229162fe8afcd82cb,http://pdfs.semanticscholar.org/0697/bd81844d54064d992d3229162fe8afcd82cb.pdf
+06f8aa1f436a33014e9883153b93581eea8c5c70,http://pdfs.semanticscholar.org/8926/471921ff608f70c6c81777782974a91086ae.pdf
+061c84a4143e859a7caf6e6d283dfb30c23ee56e,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_008_ext.pdf
+06d93a40365da90f30a624f15bf22a90d9cfe6bb,http://pdfs.semanticscholar.org/6940/40e59bffd860640e45c54ca7b093630caa39.pdf
+061e29eae705f318eee703b9e17dc0989547ba0c,http://pdfs.semanticscholar.org/061e/29eae705f318eee703b9e17dc0989547ba0c.pdf
+06850b60e33baa4ea9473811d58c0d5015da079e,http://pdfs.semanticscholar.org/4cff/901521af06d6a0c98c9dce253296dd88b496.pdf
+06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32,http://www.cs.utexas.edu/~grauman/papers/whittle-search-supp-cvpr2012.pdf
+06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc,https://www.cs.umd.edu/sites/default/files/scholarly_papers/Biswas_1.pdf
+066d71fcd997033dce4ca58df924397dfe0b5fd1,http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf
+06526c52a999fdb0a9fd76e84f9795a69480cecf,http://pdfs.semanticscholar.org/0652/6c52a999fdb0a9fd76e84f9795a69480cecf.pdf
+06bad0cdda63e3fd054e7b334a5d8a46d8542817,http://vision.cs.utexas.edu/projects/featuresharing/0975.pdf
+06fe63b34fcc8ff68b72b5835c4245d3f9b8a016,http://chechiklab.biu.ac.il/~gal/Papers/Mesnil_MachineLearning2013_objects_and_their_parts.pdf
+06aab105d55c88bd2baa058dc51fa54580746424,http://www4.comp.polyu.edu.hk/~cslzhang/paper/ISCRC_TIFS.pdf
+0641dbee7202d07b6c78a39eecd312c17607412e,http://users.cecs.anu.edu.au/~hongdong/JiZhongLiSalzmannICIP14.pdf
+06262d14323f9e499b7c6e2a3dec76ad9877ba04,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Juranek_Real-Time_Pose_Estimation_ICCV_2015_paper.pdf
+06400a24526dd9d131dfc1459fce5e5189b7baec,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01054.pdf
+062d67af7677db086ef35186dc936b4511f155d7,http://openaccess.thecvf.com/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf
+0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0,http://pdfs.semanticscholar.org/0694/b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0.pdf
+060034b59275c13746413ca9c67d6304cba50da6,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W14/papers/Murthy_Ordered_Trajectories_for_2013_ICCV_paper.pdf
+060820f110a72cbf02c14a6d1085bd6e1d994f6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_113_ext.pdf
+0653dcdff992ad980cd5ea5bc557efb6e2a53ba1,http://pdfs.semanticscholar.org/0653/dcdff992ad980cd5ea5bc557efb6e2a53ba1.pdf
+063a3be18cc27ba825bdfb821772f9f59038c207,http://eprints.whiterose.ac.uk/125231/1/kaiser_et_al_17.pdf
+064cd41d323441209ce1484a9bba02a22b625088,http://www.ri.cmu.edu/pub_files/2013/6/stm_final.pdf
+06c2dfe1568266ad99368fc75edf79585e29095f,http://ibug.doc.ic.ac.uk/media/uploads/documents/joan_cvpr2014.pdf
+06f39834e870278243dda826658319be2d5d8ded,http://www.public.asu.edu/~bli24/Papers/ICIP2016_video.pdf
+06d7ef72fae1be206070b9119fb6b61ce4699587,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zafeiriou_On_One-Shot_Similarity_2013_ICCV_paper.pdf
+062d0813815c2b9864cd9bb4f5a1dc2c580e0d90,https://infoscience.epfl.ch/record/230310/files/AliakbarianEtAlICCV17.pdf?version=1
+06a9ed612c8da85cb0ebb17fbe87f5a137541603,http://pdfs.semanticscholar.org/06a9/ed612c8da85cb0ebb17fbe87f5a137541603.pdf
+06959f9cf3226179fa1b05efade843b7844fb2bc,http://www.researchgate.net/profile/Fei_Wu2/publication/4090506_Relevant_linear_feature_extraction_using_side-information_and_unlabeled_data/links/549062220cf214269f2668c9.pdf
+06ad99f19cf9cb4a40741a789e4acbf4433c19ae,http://pdfs.semanticscholar.org/06ad/99f19cf9cb4a40741a789e4acbf4433c19ae.pdf
+06fb92e110d077c27d401d2f9483964cd0615284,http://www.cs.sunysb.edu/~ial/content/papers/2009/wang_pami09.pdf
+6c27eccf8c4b22510395baf9f0d0acc3ee547862,http://pdfs.semanticscholar.org/6c27/eccf8c4b22510395baf9f0d0acc3ee547862.pdf
+6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,http://www.umiacs.umd.edu/~fyang/papers/iccv15.pdf
+6ce23cf4f440021b7b05aa3c1c2700cc7560b557,http://pdfs.semanticscholar.org/6ce2/3cf4f440021b7b05aa3c1c2700cc7560b557.pdf
+6c9266aa77ea01b9d26a98a483b56e9e8b80eeba,https://www.researchgate.net/profile/Stefano_Tubaro/publication/224641232_Mixed_2D-3D_Information_for_Pose_Estimation_and_Face_Recognition/links/00b7d5178477f30fb3000000.pdf
+6c2b392b32b2fd0fe364b20c496fcf869eac0a98,http://www3.ntu.edu.sg/home/EXDJiang/JiangX.D.-MVA-13.pdf
+6c6bb85a08b0bdc50cf8f98408d790ccdb418798,http://pdfs.semanticscholar.org/6c6b/b85a08b0bdc50cf8f98408d790ccdb418798.pdf
+6c705285c554985ecfe1117e854e1fe1323f8c21,http://pdfs.semanticscholar.org/6c70/5285c554985ecfe1117e854e1fe1323f8c21.pdf
+6cddc7e24c0581c50adef92d01bb3c73d8b80b41,http://users.soe.ucsc.edu/~milanfar/publications/journal/TIFS_Final.pdf
+6cd96f2b63c6b6f33f15c0ea366e6003f512a951,http://pdfs.semanticscholar.org/6cd9/6f2b63c6b6f33f15c0ea366e6003f512a951.pdf
+6c8c7065d1041146a3604cbe15c6207f486021ba,http://pdfs.semanticscholar.org/6c8c/7065d1041146a3604cbe15c6207f486021ba.pdf
+390f3d7cdf1ce127ecca65afa2e24c563e9db93b,https://arxiv.org/pdf/1408.3967v2.pdf
+391b86cf16c2702dcc4beee55a6dd6d3bd7cf27b,http://dayongwang.info/pdf/2014-MM.pdf
+395a91d49e9283e1bf2d61a75c3dc846b347ea74,http://cake.fiu.edu/Publications/Reza+al-13-OV.On-demand_Virtual_Health.IEEE.downloaded.pdf
+3918b425bb9259ddff9eca33e5d47bde46bd40aa,http://pdfs.semanticscholar.org/3918/b425bb9259ddff9eca33e5d47bde46bd40aa.pdf
+39ce143238ea1066edf0389d284208431b53b802,http://pdfs.semanticscholar.org/39ce/143238ea1066edf0389d284208431b53b802.pdf
+39ce2232452c0cd459e32a19c1abe2a2648d0c3f,http://pdfs.semanticscholar.org/4fac/61d638cf7a1ab995e2ee9a02d3672b12d2ca.pdf
+39f7878f447df7703f2c4ddeeffd7eb0e21f6cd4,http://dev.pubs.doc.ic.ac.uk/Pantic-CVPR05/Pantic-CVPR05.pdf
+3998c5aa6be58cce8cb65a64cb168864093a9a3e,http://cvrr.ucsd.edu/publications/2014/HeadHand.pdf
+39dc2ce4cce737e78010642048b6ed1b71e8ac2f,http://www.mirlab.org/conference_papers/International_Conference/ICME%202004/html/papers/P59890.pdf
+397aeaea61ecdaa005b09198942381a7a11cd129,http://pdfs.semanticscholar.org/e30b/df82a358587f7d27ee4ea0b34762328c2a8d.pdf
+3991223b1dc3b87883cec7af97cf56534178f74a,http://www.ics.uci.edu/~dvk/pub/ICMR13_dvk.pdf
+39b22bcbd452d5fea02a9ee63a56c16400af2b83,http://www.uoguelph.ca/~gwtaylor/publications/gwtaylor_crv2014.pdf
+399a2c23bd2592ebe20aa35a8ea37d07c14199da,http://pdfs.semanticscholar.org/399a/2c23bd2592ebe20aa35a8ea37d07c14199da.pdf
+396a19e29853f31736ca171a3f40c506ef418a9f,http://pdfs.semanticscholar.org/396a/19e29853f31736ca171a3f40c506ef418a9f.pdf
+392d35bb359a3b61cca1360272a65690a97a2b3f,http://pdfs.semanticscholar.org/9cc1/0842f7701bfb92725b4dda4df391b0b341e3.pdf
+397085122a5cade71ef6c19f657c609f0a4f7473,http://pdfs.semanticscholar.org/db11/4901d09a07ab66bffa6986bc81303e133ae1.pdf
+39c48309b930396a5a8903fdfe781d3e40d415d0,http://www.ri.cmu.edu/pub_files/2017/5/ant_low.pdf
+3986161c20c08fb4b9b791b57198b012519ea58b,http://pdfs.semanticscholar.org/3986/161c20c08fb4b9b791b57198b012519ea58b.pdf
+392425be1c9d9c2ee6da45de9df7bef0d278e85f,http://pdfs.semanticscholar.org/3924/25be1c9d9c2ee6da45de9df7bef0d278e85f.pdf
+392c3cabe516c0108b478152902a9eee94f4c81e,http://pdfs.semanticscholar.org/392c/3cabe516c0108b478152902a9eee94f4c81e.pdf
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf
+3946b8f862ecae64582ef0912ca2aa6d3f6f84dc,http://pdfs.semanticscholar.org/3946/b8f862ecae64582ef0912ca2aa6d3f6f84dc.pdf
+3933416f88c36023a0cba63940eb92f5cef8001a,http://pdfs.semanticscholar.org/3933/416f88c36023a0cba63940eb92f5cef8001a.pdf
+39150acac6ce7fba56d54248f9c0badbfaeef0ea,http://pdfs.semanticscholar.org/3915/0acac6ce7fba56d54248f9c0badbfaeef0ea.pdf
+39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc,http://openaccess.thecvf.com/content_iccv_2015/papers/Lu_Simultaneous_Local_Binary_ICCV_2015_paper.pdf
+3983637022992a329f1d721bed246ae76bc934f7,http://www.cs.umd.edu/~djacobs/pubs_files/SlantCVPRFinal.pdf
+3958db5769c927cfc2a9e4d1ee33ecfba86fe054,http://homes.cs.washington.edu/~neeraj/base/base/papers/nk_pami2011_faceattrs.pdf
+39ecdbad173e45964ffe589b9ced9f1ebfe2d44e,http://measuringbehavior.org/files/ProceedingsPDF(website)/Gonzalez_FullPaper3.4.pdf
+39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df,https://pdfs.semanticscholar.org/39b5/f6d6f8d8127b2b97ea1a4987732c0db6f9df.pdf
+99ced8f36d66dce20d121f3a29f52d8b27a1da6c,http://pdfs.semanticscholar.org/99ce/d8f36d66dce20d121f3a29f52d8b27a1da6c.pdf
+9949ac42f39aeb7534b3478a21a31bc37fe2ffe3,http://pdfs.semanticscholar.org/9949/ac42f39aeb7534b3478a21a31bc37fe2ffe3.pdf
+999289b0ef76c4c6daa16a4f42df056bf3d68377,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf
+9958942a0b7832e0774708a832d8b7d1a5d287ae,https://engineering.purdue.edu/~bouman/publications/pdf/tip29.pdf
+995d55fdf5b6fe7fb630c93a424700d4bc566104,http://openaccess.thecvf.com/content_iccv_2015/papers/Nilsson_The_One_Triangle_ICCV_2015_paper.pdf
+99726ad232cef837f37914b63de70d8c5101f4e2,http://pdfs.semanticscholar.org/9972/6ad232cef837f37914b63de70d8c5101f4e2.pdf
+9931c6b050e723f5b2a189dd38c81322ac0511de,http://pdfs.semanticscholar.org/9931/c6b050e723f5b2a189dd38c81322ac0511de.pdf
+994b52bf884c71a28b4f5be4eda6baaacad1beee,http://www.yugangjiang.info/publication/BIGMM15-summit-invited.pdf
+9963c73b03e4649959f021ef6f4fb1eac0b617d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/Person%20Re-identification%20Using%20Multiple%20Egocentric%20Views.pdf
+99001ac9fdaf7649c0d0bd8d2078719bafd216d9,http://people.ee.duke.edu/~lcarin/TPAMI_2007_General_tensor_analysis.pdf
+9993f1a7cfb5b0078f339b9a6bfa341da76a3168,http://pdfs.semanticscholar.org/9993/f1a7cfb5b0078f339b9a6bfa341da76a3168.pdf
+992ebd81eb448d1eef846bfc416fc929beb7d28b,http://pdfs.semanticscholar.org/992e/bd81eb448d1eef846bfc416fc929beb7d28b.pdf
+9990e0b05f34b586ffccdc89de2f8b0e5d427067,http://pdfs.semanticscholar.org/9990/e0b05f34b586ffccdc89de2f8b0e5d427067.pdf
+52012b4ecb78f6b4b9ea496be98bcfe0944353cd,http://pdfs.semanticscholar.org/5201/2b4ecb78f6b4b9ea496be98bcfe0944353cd.pdf
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,http://pdfs.semanticscholar.org/521c/fbc1949289a7ffc3ff90af7c55adeb43db2a.pdf
+529e2ce6fb362bfce02d6d9a9e5de635bde81191,http://image.sciencenet.cn/olddata/kexue.com.cn/upload/blog/file/2011/1/20111721232398113.pdf
+52887969107956d59e1218abb84a1f834a314578,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/chen13travel.pdf
+521482c2089c62a59996425603d8264832998403,http://pdfs.semanticscholar.org/5214/82c2089c62a59996425603d8264832998403.pdf
+521b625eebea73b5deb171a350e3709a4910eebf,https://arxiv.org/pdf/1604.06397v1.pdf
+52258ec5ec73ce30ca8bc215539c017d279517cf,http://pdfs.semanticscholar.org/5225/8ec5ec73ce30ca8bc215539c017d279517cf.pdf
+5253c94f955146ba7d3566196e49fe2edea1c8f4,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Kemelmacher-Shlizerman_Internet_Based_Morphable_2013_ICCV_paper.pdf
+527dda77a3864d88b35e017d542cb612f275a4ec,https://arxiv.org/pdf/1709.00531v1.pdf
+529b1f33aed49dbe025a99ac1d211c777ad881ec,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_bidirectional_icip.pdf
+523b2cbc48decfabffb66ecaeced4fe6a6f2ac78,https://arxiv.org/pdf/1708.09126v1.pdf
+5287d8fef49b80b8d500583c07e935c7f9798933,http://pdfs.semanticscholar.org/8e65/13b642dcd5dc0fb60173dd0da1d8440eba8d.pdf
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,http://blogs.bu.edu/joewang/files/2013/06/allerton_2011_v2.pdf
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,http://pdfs.semanticscholar.org/52bf/00df3b970e017e4e2f8079202460f1c0e1bd.pdf
+52c91fcf996af72d191520d659af44e310f86ef9,http://pdfs.semanticscholar.org/52c9/1fcf996af72d191520d659af44e310f86ef9.pdf
+52a9f957f776c8b3d913cfcd20452b9e31c27845,http://pdfs.semanticscholar.org/52a9/f957f776c8b3d913cfcd20452b9e31c27845.pdf
+52885fa403efbab5ef21274282edd98b9ca70cbf,http://www.aiia.csd.auth.gr/EN/cor_baayen/Discriminant_Graph_Structures_FER.pdf
+52f23e1a386c87b0dab8bfdf9694c781cd0a3984,http://pdfs.semanticscholar.org/52f2/3e1a386c87b0dab8bfdf9694c781cd0a3984.pdf
+528069963f0bd0861f380f53270c96c269a3ea1c,http://pdfs.semanticscholar.org/5280/69963f0bd0861f380f53270c96c269a3ea1c.pdf
+5239001571bc64de3e61be0be8985860f08d7e7e,http://pdfs.semanticscholar.org/5239/001571bc64de3e61be0be8985860f08d7e7e.pdf
+556b9aaf1bc15c928718bc46322d70c691111158,https://www.ecse.rpi.edu/~cvrl/lwh/myPublications/ICPR08_BNlearning_camera.pdf
+550858b7f5efaca2ebed8f3969cb89017bdb739f,http://pdfs.semanticscholar.org/5508/58b7f5efaca2ebed8f3969cb89017bdb739f.pdf
+554b9478fd285f2317214396e0ccd81309963efd,http://pdfs.semanticscholar.org/554b/9478fd285f2317214396e0ccd81309963efd.pdf
+55cc90968e5e6ed413dd607af2a850ac2f54e378,http://pdfs.semanticscholar.org/55cc/90968e5e6ed413dd607af2a850ac2f54e378.pdf
+559795d3f3b096ceddc03720ba62d79d50eae300,http://www3.nd.edu/~kwb/BarrBowyerFlynnTIFS_2014.pdf
+558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f,http://pdfs.semanticscholar.org/558f/c9a2bce3d3993a9c1f41b6c7f290cefcf92f.pdf
+55138c2b127ebdcc508503112bf1d1eeb5395604,http://pdfs.semanticscholar.org/7815/368a8f6474910d3faf798198ff9dae836360.pdf
+5502dfe47ac26e60e0fb25fc0f810cae6f5173c0,http://pdfs.semanticscholar.org/5502/dfe47ac26e60e0fb25fc0f810cae6f5173c0.pdf
+55e18e0dde592258882134d2dceeb86122b366ab,http://pdfs.semanticscholar.org/f863/ba982068d676084032146e8053d4791114e9.pdf
+556545eec370b9d300fc044a1aa63fc44fd79b0f,http://www.cs.cmu.edu/~dhoiem/publications/cvpr2010_gangwang.pdf
+55a158f4e7c38fe281d06ae45eb456e05516af50,http://pdfs.semanticscholar.org/55a1/58f4e7c38fe281d06ae45eb456e05516af50.pdf
+5506a1a1e1255353fde05d9188cb2adc20553af5,http://pdfs.semanticscholar.org/ff69/cb49c8cb86d0afadbcfa0baa607d7065965a.pdf
+55966926e7c28b1eee1c7eb7a0b11b10605a1af0,http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf
+552c55c71bccfc6de7ce1343a1cd12208e9a63b3,https://ivi.fnwi.uva.nl/isis/publications/2008/ValentiCVPR2008/ValentiCVPR2008.pdf
+5517b28795d7a68777c9f3b2b46845dcdb425b2c,http://pdfs.semanticscholar.org/5517/b28795d7a68777c9f3b2b46845dcdb425b2c.pdf
+55c81f15c89dc8f6eedab124ba4ccab18cf38327,http://pdfs.semanticscholar.org/d31e/258f6af40f457c27ce118986ea157673c9c4.pdf
+55bc7abcef8266d76667896bbc652d081d00f797,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf
+55b4b1168c734eeb42882082bd131206dbfedd5b,http://pdfs.semanticscholar.org/76fd/f16bcc2cb260b9e6b2880c8fe128533bc2c6.pdf
+55079a93b7d1eb789193d7fcdcf614e6829fad0f,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w10/papers/Conde_Efficient_and_Robust_ICCV_2015_paper.pdf
+55804f85613b8584d5002a5b0ddfe86b0d0e3325,http://pdfs.semanticscholar.org/ba13/b161aa8e6f6cb511592016058882d976a898.pdf
+551fa37e8d6d03b89d195a5c00c74cc52ff1c67a,http://pdfs.semanticscholar.org/551f/a37e8d6d03b89d195a5c00c74cc52ff1c67a.pdf
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,http://pdfs.semanticscholar.org/55eb/7ec9b9740f6c69d6e62062a24bfa091bbb0c.pdf
+55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96,http://pdfs.semanticscholar.org/55b9/b1c1c5487f5f62b44340104a9c4cc2ed7c96.pdf
+973e3d9bc0879210c9fad145a902afca07370b86,http://pdfs.semanticscholar.org/973e/3d9bc0879210c9fad145a902afca07370b86.pdf
+970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3,http://pdfs.semanticscholar.org/970c/0d6c0fd2ebe7c5921a45bc70f6345c844ff3.pdf
+97b8249914e6b4f8757d22da51e8347995a40637,http://rogerioferis.com/VisualRecognitionAndSearch2014/material/papers/FerisTransMultimedia2012.pdf
+972ef9ddd9059079bdec17abc8b33039ed25c99c,http://pdfs.semanticscholar.org/972e/f9ddd9059079bdec17abc8b33039ed25c99c.pdf
+97032b13f1371c8a813802ade7558e816d25c73f,http://pdfs.semanticscholar.org/9703/2b13f1371c8a813802ade7558e816d25c73f.pdf
+978a219e07daa046244821b341631c41f91daccd,http://pdfs.semanticscholar.org/e2b9/f8b66d3f9080ccb14f058cf4798cb4d89241.pdf
+976e0264bb57786952a987d4456850e274714fb8,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Dehghan_Improving_Semantic_Concept_2014_CVPR_paper.pdf
+9758f3fd94239a8d974217fe12599f88fb413f3d,http://pdfs.semanticscholar.org/9758/f3fd94239a8d974217fe12599f88fb413f3d.pdf
+97f9c3bdb4668f3e140ded2da33fe704fc81f3ea,http://pdfs.semanticscholar.org/97f9/c3bdb4668f3e140ded2da33fe704fc81f3ea.pdf
+97e569159d5658760eb00ca9cb662e6882d2ab0e,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989c291.pdf
+97d1d561362a8b6beb0fdbee28f3862fb48f1380,http://pages.cs.wisc.edu/~gdguo/myPapersOnWeb/PAMI10Guo.pdf
+97540905e4a9fdf425989a794f024776f28a3fa9,http://pdfs.semanticscholar.org/cc5a/1bf68ba00c20415e43684c6f75ce3fbc176c.pdf
+97865d31b5e771cf4162bc9eae7de6991ceb8bbf,http://pdfs.semanticscholar.org/9786/5d31b5e771cf4162bc9eae7de6991ceb8bbf.pdf
+9755554b13103df634f9b1ef50a147dd02eab02f,https://arxiv.org/pdf/1610.00134v1.pdf
+635158d2da146e9de559d2742a2fa234e06b52db,http://www.openu.ac.il/home/hassner/projects/cnn_emotions/LeviHassnerICMI15.pdf
+63d8110ac76f57b3ba8a5947bc6bdbb86f25a342,http://pdfs.semanticscholar.org/63d8/110ac76f57b3ba8a5947bc6bdbb86f25a342.pdf
+63f2d1a64737afa1608588b9651b1e4207e82d1c,http://staff.estem-uc.edu.au/roland/files/2009/05/Rajagopalan_Goecke_ICIP2014_DetectingSelf-StimulatoryBehavioursForAutismDiagnosis.pdf
+63cf5fc2ee05eb9c6613043f585dba48c5561192,http://pdfs.semanticscholar.org/63cf/5fc2ee05eb9c6613043f585dba48c5561192.pdf
+6339e9385ae3609cb22f6b87175c7e6850f2c05b,http://vision.ucmerced.edu/papers/Yang_WACV12_EstimatingTheSpatialExtent.pdf
+6324fada2fb00bd55e7ff594cf1c41c918813030,http://pdfs.semanticscholar.org/6324/fada2fb00bd55e7ff594cf1c41c918813030.pdf
+6308e9c991125ee6734baa3ec93c697211237df8,http://www.ifp.illinois.edu/~jyang29/papers/ICME-SSR.pdf
+6342a4c54835c1e14159495373ab18b4233d2d9b,http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf
+63d8d69e90e79806a062cb8654ad78327c8957bb,http://pdfs.semanticscholar.org/63d8/d69e90e79806a062cb8654ad78327c8957bb.pdf
+63c109946ffd401ee1195ed28f2fb87c2159e63d,http://pdfs.semanticscholar.org/63c1/09946ffd401ee1195ed28f2fb87c2159e63d.pdf
+63b29886577a37032c7e32d8899a6f69b11a90de,http://pdfs.semanticscholar.org/63b2/9886577a37032c7e32d8899a6f69b11a90de.pdf
+63a6c256ec2cf2e0e0c9a43a085f5bc94af84265,http://www.cs.tau.ac.il/~wolf/papers/complexity-multiverse-networks.pdf
+63213d080a43660ac59ea12e3c35e6953f6d7ce8,https://arxiv.org/pdf/1704.02895v1.pdf
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,http://pdfs.semanticscholar.org/aa67/719e839d035e4d67e4434794b6cccaf091d6.pdf
+63eefc775bcd8ccad343433fc7a1dd8e1e5ee796,http://www.lv-nus.org/papers%5C2008%5C2008_J_6.pdf
+63340c00896d76f4b728dbef85674d7ea8d5ab26,https://www.comp.nus.edu.sg/~tsim/documents/fkt-dsa-pami-published.pdf
+63ce37da6c0c789099307337bb913e1104473854,http://pdfs.semanticscholar.org/63ce/37da6c0c789099307337bb913e1104473854.pdf
+63a2e2155193dc2da9764ae7380cdbd044ff2b94,http://pdfs.semanticscholar.org/a8fb/2c65a23d1e75c4923c36fdd6e3d2a4b3d8f7.pdf
+63d865c66faaba68018defee0daf201db8ca79ed,http://pdfs.semanticscholar.org/63d8/65c66faaba68018defee0daf201db8ca79ed.pdf
+63cff99eff0c38b633c8a3a2fec8269869f81850,http://pdfs.semanticscholar.org/63cf/f99eff0c38b633c8a3a2fec8269869f81850.pdf
+634541661d976c4b82d590ef6d1f3457d2857b19,http://pdfs.semanticscholar.org/6345/41661d976c4b82d590ef6d1f3457d2857b19.pdf
+6332a99e1680db72ae1145d65fa0cccb37256828,http://pdfs.semanticscholar.org/6332/a99e1680db72ae1145d65fa0cccb37256828.pdf
+63488398f397b55552f484409b86d812dacde99a,http://pdfs.semanticscholar.org/6348/8398f397b55552f484409b86d812dacde99a.pdf
+6341274aca0c2977c3e1575378f4f2126aa9b050,http://arxiv.org/pdf/1609.03536v1.pdf
+63c022198cf9f084fe4a94aa6b240687f21d8b41,http://pdfs.semanticscholar.org/63c0/22198cf9f084fe4a94aa6b240687f21d8b41.pdf
+632441c9324cd29489cee3da773a9064a46ae26b,http://pdfs.semanticscholar.org/6324/41c9324cd29489cee3da773a9064a46ae26b.pdf
+0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab,http://arxiv.org/pdf/1401.5311v1.pdf
+0f112e49240f67a2bd5aaf46f74a924129f03912,http://www.cse.msu.edu/biometrics/Publications/Face/ParkTongJain_AgeInvariantFaceRecognition_PAMI10.pdf
+0fc254272db096a9305c760164520ad9914f4c9e,https://arxiv.org/pdf/1601.06087v1.pdf
+0fae5d9d2764a8d6ea691b9835d497dd680bbccd,http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf
+0f4cfcaca8d61b1f895aa8c508d34ad89456948e,http://signal.ee.bilkent.edu.tr/defevent/abstract/a2051.pdf
+0fdcfb4197136ced766d538b9f505729a15f0daf,https://arxiv.org/pdf/0907.5321v2.pdf
+0fad544edfc2cd2a127436a2126bab7ad31ec333,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=7D609FEFFC36336C4A45ECA3B56C336A?doi=10.1.1.476.9590&rep=rep1&type=pdf
+0f32df6ae76402b98b0823339bd115d33d3ec0a0,http://perceptual.mpi-inf.mpg.de/files/2015/07/Mueller15_ACII.pdf
+0fd1715da386d454b3d6571cf6d06477479f54fc,http://pdfs.semanticscholar.org/0fd1/715da386d454b3d6571cf6d06477479f54fc.pdf
+0f9bf5d8f9087fcba419379600b86ae9e9940013,http://pdfs.semanticscholar.org/0f9b/f5d8f9087fcba419379600b86ae9e9940013.pdf
+0f829fee12e86f980a581480a9e0cefccb59e2c5,http://www.cs.columbia.edu/~liujx09/posters/birdpart_poster.pdf
+0faee699eccb2da6cf4307ded67ba8434368257b,http://pdfs.semanticscholar.org/2396/5bd9b557b04b2c81a35ee5c16951c0e420f3.pdf
+0fabb4a40f2e3a2502cd935e54e090a304006c1c,http://arxiv.org/pdf/1202.4207v2.pdf
+0f92e9121e9c0addc35eedbbd25d0a1faf3ab529,http://pdfs.semanticscholar.org/0f92/e9121e9c0addc35eedbbd25d0a1faf3ab529.pdf
+0f0366070b46972fcb2976775b45681e62a94a26,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Bendale_Reliable_Posterior_Probability_2014_CVPR_paper.pdf
+0ff23392e1cb62a600d10bb462d7a1f171f579d0,http://www.umiacs.umd.edu/~jhchoi/paper/icpr2014_slide.pdf
+0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd,http://media.cs.tsinghua.edu.cn/~ahz/papers/%5B2011%5D%5Bacpr%5Dwang%20nan.pdf
+0f533bc9fdfb75a3680d71c84f906bbd59ee48f1,http://www.iis.sinica.edu.tw/papers/song/11837-F.pdf
+0f53ab8b6c428127753281dd77cf94bdb889b624,https://www.researchgate.net/profile/Dian_Tjondronegoro/publication/224257559_Toward_a_more_robust_facial_expression_recognition_in_occluded_images_using_randomly_sampled_Gabor_based_templates/links/00b7d51f84babec8ad000000.pdf
+0f4eb63402a4f3bae8f396e12133684fb760def1,http://pdfs.semanticscholar.org/8c4e/b15de264af9f92a93d6e89d36295c5c4bf37.pdf
+0fba39bf12486c7684fd3d51322e3f0577d3e4e8,http://vision.ucsd.edu/~pdollar/files/papers/BabenkoICCV07boom.pdf
+0f395a49ff6cbc7e796656040dbf446a40e300aa,http://pdfs.semanticscholar.org/0f39/5a49ff6cbc7e796656040dbf446a40e300aa.pdf
+0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277,http://pdfs.semanticscholar.org/0fb8/317a8bf5feaf297af8e9b94c50c5ed0e8277.pdf
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,http://pdfs.semanticscholar.org/51be/ffe5f96ccb6b64057a540a7874185ccad8d7.pdf
+0f1cbe4e26d584c82008ccef9fb1e4669b82de1f,http://figment.cse.usf.edu/~sfefilat/data/papers/MoBT9.24.pdf
+0f940d2cdfefc78c92ec6e533a6098985f47a377,https://www.ecse.rpi.edu/~cvrl/chenj/Expression_v6_submit.pdf
+0faeec0d1c51623a511adb779dabb1e721a6309b,http://pdfs.semanticscholar.org/a075/782ea38167658fe28986755adddba7369b4f.pdf
+0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,https://arxiv.org/pdf/1501.05152v1.pdf
+0f0241124d6092a0bb56259ac091467c2c6938ca,http://mm.cs.uec.ac.jp/kitaha-a/research/maw2008.pdf?origin=publication_detail
+0a6d344112b5af7d1abbd712f83c0d70105211d0,http://www.cl.cam.ac.uk/~tb346/pub/papers/iccv2013.pdf
+0a64f4fec592662316764283575d05913eb2135b,http://pdfs.semanticscholar.org/0a64/f4fec592662316764283575d05913eb2135b.pdf
+0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf
+0a5ffc55b584da7918c2650f9d8602675d256023,http://pdfs.semanticscholar.org/0a5f/fc55b584da7918c2650f9d8602675d256023.pdf
+0aeb5020003e0c89219031b51bd30ff1bceea363,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTarxiv15.pdf
+0a511058edae582e8327e8b9d469588c25152dc6,http://pdfs.semanticscholar.org/0a51/1058edae582e8327e8b9d469588c25152dc6.pdf
+0a4f3a423a37588fde9a2db71f114b293fc09c50,http://pdfs.semanticscholar.org/0a4f/3a423a37588fde9a2db71f114b293fc09c50.pdf
+0aa74ad36064906e165ac4b79dec298911a7a4db,http://pdfs.semanticscholar.org/7645/11b63b0eeba9f3dfe1e5ec9ff261cdc59d25.pdf
+0abf67e7bd470d9eb656ea2508beae13ca173198,http://www.cs.cmu.edu/~kkitani/pdf/MFK-CVPR16.pdf
+0af33f6b5fcbc5e718f24591b030250c6eec027a,http://pdfs.semanticscholar.org/fa2c/96273027ff92f98109dbcef5b65f34b36627.pdf
+0a3863a0915256082aee613ba6dab6ede962cdcd,http://pdfs.semanticscholar.org/0a38/63a0915256082aee613ba6dab6ede962cdcd.pdf
+0a85bdff552615643dd74646ac881862a7c7072d,https://fbcdn-dragon-a.akamaihd.net/hphotos-ak-xpa1/t39.2365-6/10000000_1672336992989417_1391274031_n/Beyond_Frontal_Faces_Improving_Person_Recognition_Using_Multiple_Cues.pdf
+0a325d70cc381b136a8f4e471b406cda6d27668c,http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf
+0ad8149318912b5449085187eb3521786a37bc78,http://arxiv.org/pdf/1604.02975v1.pdf
+0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7,http://pdfs.semanticscholar.org/0aa9/872daf2876db8d8e5d6197c1ce0f8efee4b7.pdf
+0aae88cf63090ea5b2c80cd014ef4837bcbaadd8,http://pdfs.semanticscholar.org/0aae/88cf63090ea5b2c80cd014ef4837bcbaadd8.pdf
+0a87d781fe2ae2e700237ddd00314dbc10b1429c,http://pdfs.semanticscholar.org/0a87/d781fe2ae2e700237ddd00314dbc10b1429c.pdf
+0ad90118b4c91637ee165f53d557da7141c3fde0,http://pdfs.semanticscholar.org/0ad9/0118b4c91637ee165f53d557da7141c3fde0.pdf
+0a82860d11fcbf12628724333f1e7ada8f3cd255,http://pdfs.semanticscholar.org/0a82/860d11fcbf12628724333f1e7ada8f3cd255.pdf
+0a23d374c6cf71a65e845569230420362fe4903a,http://mplab.ucsd.edu/~ksikka/in_the_wild.pdf
+0a6a818b634cca4eb75a37bfd23b5c5c21331b12,http://hal.cse.msu.edu/pdfs/papers/wacv-2015.pdf
+0ac442bb570b086d04c4d51a8410fcbfd0b1779d,http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/cvpr16_warpnet.pdf
+0af48a45e723f99b712a8ce97d7826002fe4d5a5,http://vision.seas.harvard.edu/papers/WideAngle_PAMI2013.pdf
+0aa8a0203e5f406feb1815f9b3dd49907f5fd05b,http://www.iti.gr/~bmezaris/publications/spl11_preprint.pdf
+0a68747d001aba014acd3b6ec83ba9534946a0da,http://staff.estem-uc.edu.au/roland/files/2009/05/Dhall_Goecke_Gedeon_TAC2015_AutomaticGroupHappinessIntensityAnalysis.pdf
+0ac664519b2b8abfb8966dafe60d093037275573,http://face.cs.kit.edu/download/publications/supplemental_material.pdf
+0a9345ea6e488fb936e26a9ba70b0640d3730ba7,http://www1.ece.neu.edu/~yuewu/files/2016/p52-jiang.pdf
+0a79d0ba1a4876086e64fc0041ece5f0de90fbea,http://pdfs.semanticscholar.org/0a79/d0ba1a4876086e64fc0041ece5f0de90fbea.pdf
+0a11b82aa207d43d1b4c0452007e9388a786be12,http://pdfs.semanticscholar.org/0a11/b82aa207d43d1b4c0452007e9388a786be12.pdf
+0a1138276c52c734b67b30de0bf3f76b0351f097,https://ibug.doc.ic.ac.uk/media/uploads/documents/georgakis_dica.pdf
+0aa405447a8797e509521f0570e4679a42fdac9b,http://mplab.ucsd.edu/~jake/AISeminar26Sep2011.pdf
+0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7,http://pdfs.semanticscholar.org/0ae9/cc6a06cfd03d95eee4eca9ed77b818b59cb7.pdf
+0acf23485ded5cb9cd249d1e4972119239227ddb,http://pdfs.semanticscholar.org/507e/2bad4851f04a686ae6e964e15bbef28583e9.pdf
+0ad4a814b30e096ad0e027e458981f812c835aa0,http://arxiv.org/pdf/1602.01827v1.pdf
+6448d23f317babb8d5a327f92e199aaa45f0efdc,http://pdfs.semanticscholar.org/6448/d23f317babb8d5a327f92e199aaa45f0efdc.pdf
+6412d8bbcc01f595a2982d6141e4b93e7e982d0f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Kang_Deep_Convolutional_Neural_CVPR_2017_paper.pdf
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,http://pdfs.semanticscholar.org/6409/b8879c7e61acf3ca17bcc62f49edca627d4c.pdf
+64153df77fe137b7c6f820a58f0bdb4b3b1a879b,http://pdfs.semanticscholar.org/6415/3df77fe137b7c6f820a58f0bdb4b3b1a879b.pdf
+649eb674fc963ce25e4e8ce53ac7ee20500fb0e3,http://chenlab.ece.cornell.edu/Publication/Kuan-Chuan/WACV16.pdf
+642c66df8d0085d97dc5179f735eed82abf110d0,http://research.microsoft.com/users/leizhang/Paper/CVPR05-Shuicheng-Coupled.pdf
+6459f1e67e1ea701b8f96177214583b0349ed964,http://vision.ece.ucsb.edu/publications/karthik_icip2011.pdf
+64cf86ba3b23d3074961b485c16ecb99584401de,http://pdfs.semanticscholar.org/b54a/54a2f33c24123c6943597462ef02928ec99f.pdf
+6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4,http://arxiv.org/pdf/1411.7766v2.pdf
+64cf1cda80a23ed6fc1c8e66065614ef7bdeadf3,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/PAMI_LIV.pdf
+6479b61ea89e9d474ffdefa71f068fbcde22cc44,http://pdfs.semanticscholar.org/6479/b61ea89e9d474ffdefa71f068fbcde22cc44.pdf
+64e75f53ff3991099c3fb72ceca55b76544374e5,http://pdfs.semanticscholar.org/eb48/804eefe4c61f62178d2a83a9ae0097091897.pdf
+64f9519f20acdf703984f02e05fd23f5e2451977,http://arxiv.org/pdf/1509.01343v1.pdf
+641f34deb3bdd123c6b6e7b917519c3e56010cb7,https://pdfs.semanticscholar.org/878d/68c5d016a0a63f328d72adda6b135432b66d.pdf
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,http://pdfs.semanticscholar.org/6478/2a2bc5da11b1b18ca20cecf7bdc26a538d68.pdf
+6462ef39ca88f538405616239471a8ea17d76259,http://pdfs.semanticscholar.org/6462/ef39ca88f538405616239471a8ea17d76259.pdf
+64d5772f44efe32eb24c9968a3085bc0786bfca7,http://pdfs.semanticscholar.org/64d5/772f44efe32eb24c9968a3085bc0786bfca7.pdf
+90d735cffd84e8f2ae4d0c9493590f3a7d99daf1,http://pdfs.semanticscholar.org/90d7/35cffd84e8f2ae4d0c9493590f3a7d99daf1.pdf
+90298f9f80ebe03cb8b158fd724551ad711d4e71,http://pdfs.semanticscholar.org/9029/8f9f80ebe03cb8b158fd724551ad711d4e71.pdf
+900207b3bc3a4e5244cae9838643a9685a84fee0,http://pdfs.semanticscholar.org/9002/07b3bc3a4e5244cae9838643a9685a84fee0.pdf
+90fb58eeb32f15f795030c112f5a9b1655ba3624,http://pdfs.semanticscholar.org/90fb/58eeb32f15f795030c112f5a9b1655ba3624.pdf
+90b7619eabe94731722ae884d0802256462457dc,https://arxiv.org/pdf/1511.09319v1.pdf
+90c4f15f1203a3a8a5bf307f8641ba54172ead30,http://pdfs.semanticscholar.org/90c4/f15f1203a3a8a5bf307f8641ba54172ead30.pdf
+902114feaf33deac209225c210bbdecbd9ef33b1,http://pdfs.semanticscholar.org/b5b0/8aaf56df40260abea890813503003485bda3.pdf
+90ad0daa279c3e30b360f9fe9371293d68f4cebf,http://pdfs.semanticscholar.org/90ad/0daa279c3e30b360f9fe9371293d68f4cebf.pdf
+90a754f597958a2717862fbaa313f67b25083bf9,http://pdfs.semanticscholar.org/90a7/54f597958a2717862fbaa313f67b25083bf9.pdf
+90dd2a53236b058c79763459b9d8a7ba5e58c4f1,http://pdfs.semanticscholar.org/90dd/2a53236b058c79763459b9d8a7ba5e58c4f1.pdf
+90cb074a19c5e7d92a1c0d328a1ade1295f4f311,http://pdfs.semanticscholar.org/90cb/074a19c5e7d92a1c0d328a1ade1295f4f311.pdf
+90b11e095c807a23f517d94523a4da6ae6b12c76,https://arxiv.org/pdf/1609.08475v1.pdf
+90c2d4d9569866a0b930e91713ad1da01c2a6846,http://pdfs.semanticscholar.org/90c2/d4d9569866a0b930e91713ad1da01c2a6846.pdf
+907475a4febf3f1d4089a3e775ea018fbec895fe,http://pdfs.semanticscholar.org/9074/75a4febf3f1d4089a3e775ea018fbec895fe.pdf
+9028fbbd1727215010a5e09bc5758492211dec19,http://pdfs.semanticscholar.org/9028/fbbd1727215010a5e09bc5758492211dec19.pdf
+bff77a3b80f40cefe79550bf9e220fb82a74c084,http://pdfs.semanticscholar.org/bff7/7a3b80f40cefe79550bf9e220fb82a74c084.pdf
+bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11,http://pdfs.semanticscholar.org/bf03/f0fe8f3ba5b118bdcbb935bacb62989ecb11.pdf
+bf1e0279a13903e1d43f8562aaf41444afca4fdc,http://pdfs.semanticscholar.org/bf1e/0279a13903e1d43f8562aaf41444afca4fdc.pdf
+bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103,http://pdfs.semanticscholar.org/bf0f/0eb0fb31ee498da4ae2ca9b467f730ea9103.pdf
+bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5,https://ubicomp-mental-health.github.io/papers/2017/perception-syeda.pdf
+bf4825474673246ae855979034c8ffdb12c80a98,http://pdfs.semanticscholar.org/bf48/25474673246ae855979034c8ffdb12c80a98.pdf
+bf8a520533f401347e2f55da17383a3e567ef6d8,http://pdfs.semanticscholar.org/bf8a/520533f401347e2f55da17383a3e567ef6d8.pdf
+bfb98423941e51e3cd067cb085ebfa3087f3bfbe,http://pdfs.semanticscholar.org/bfb9/8423941e51e3cd067cb085ebfa3087f3bfbe.pdf
+bffbd04ee5c837cd919b946fecf01897b2d2d432,http://pdfs.semanticscholar.org/bffb/d04ee5c837cd919b946fecf01897b2d2d432.pdf
+d3424761e06a8f5f3c1f042f1f1163a469872129,http://pdfs.semanticscholar.org/d342/4761e06a8f5f3c1f042f1f1163a469872129.pdf
+d33b26794ea6d744bba7110d2d4365b752d7246f,http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf
+d3b73e06d19da6b457924269bb208878160059da,http://pdfs.semanticscholar.org/d3b7/3e06d19da6b457924269bb208878160059da.pdf
+d3e04963ff42284c721f2bc6a90b7a9e20f0242f,http://pdfs.semanticscholar.org/d3e0/4963ff42284c721f2bc6a90b7a9e20f0242f.pdf
+d3d71a110f26872c69cf25df70043f7615edcf92,https://www.cise.ufl.edu/~dihong/assets/07094272.pdf
+d35c82588645b94ce3f629a0b98f6a531e4022a3,http://pdfs.semanticscholar.org/d35c/82588645b94ce3f629a0b98f6a531e4022a3.pdf
+d394bd9fbaad1f421df8a49347d4b3fca307db83,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_AVSS05.pdf
+d3b550e587379c481392fb07f2cbbe11728cf7a6,http://pdfs.semanticscholar.org/d3b5/50e587379c481392fb07f2cbbe11728cf7a6.pdf
+d30050cfd16b29e43ed2024ae74787ac0bbcf2f7,http://pdfs.semanticscholar.org/d300/50cfd16b29e43ed2024ae74787ac0bbcf2f7.pdf
+d3c004125c71942846a9b32ae565c5216c068d1e,http://pdfs.semanticscholar.org/d3c0/04125c71942846a9b32ae565c5216c068d1e.pdf
+d350a9390f0818703f886138da27bf8967fe8f51,http://mi.informatik.uni-siegen.de/publications/shahlaei_icip2016.pdf
+d41c11ebcb06c82b7055e2964914b9af417abfb2,http://pdfs.semanticscholar.org/d41c/11ebcb06c82b7055e2964914b9af417abfb2.pdf
+d46fda4b49bbc219e37ef6191053d4327e66c74b,http://pdfs.semanticscholar.org/d46f/da4b49bbc219e37ef6191053d4327e66c74b.pdf
+d448d67c6371f9abf533ea0f894ef2f022b12503,http://pdfs.semanticscholar.org/d448/d67c6371f9abf533ea0f894ef2f022b12503.pdf
+d4c7d1a7a03adb2338704d2be7467495f2eb6c7b,http://pdfs.semanticscholar.org/d4c7/d1a7a03adb2338704d2be7467495f2eb6c7b.pdf
+d4001826cc6171c821281e2771af3a36dd01ffc0,http://pdfs.semanticscholar.org/d400/1826cc6171c821281e2771af3a36dd01ffc0.pdf
+d46b4e6871fc9974542215f001e92e3035aa08d9,http://pdfs.semanticscholar.org/d46b/4e6871fc9974542215f001e92e3035aa08d9.pdf
+d40cd10f0f3e64fd9b0c2728089e10e72bea9616,http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf
+d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d,http://pdfs.semanticscholar.org/d4eb/f0a4f48275ecd8dbc2840b2a31cc07bd676d.pdf
+d46e793b945c4f391031656357625e902c4405e8,http://140.118.9.222/publications/journal/faceoff.pdf
+d4c2d26523f577e2d72fc80109e2540c887255c8,http://pdfs.semanticscholar.org/d4c2/d26523f577e2d72fc80109e2540c887255c8.pdf
+d4b88be6ce77164f5eea1ed2b16b985c0670463a,http://pdfs.semanticscholar.org/d4b8/8be6ce77164f5eea1ed2b16b985c0670463a.pdf
+d44ca9e7690b88e813021e67b855d871cdb5022f,http://pdfs.semanticscholar.org/d44c/a9e7690b88e813021e67b855d871cdb5022f.pdf
+baaaf73ec28226d60d923bc639f3c7d507345635,http://pdfs.semanticscholar.org/baaa/f73ec28226d60d923bc639f3c7d507345635.pdf
+ba2bbef34f05551291410103e3de9e82fdf9dddd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Guo_A_Study_on_2014_CVPR_paper.pdf
+bafb8812817db7445fe0e1362410a372578ec1fc,http://www.cin.ufpe.br/~rps/Artigos/Image-Quality-Based%20Adaptive%20Face%20Recognition.pdf
+baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143,http://pdfs.semanticscholar.org/baa0/fe4d0ac0c7b664d4c4dd00b318b6d4e09143.pdf
+ba99c37a9220e08e1186f21cab11956d3f4fccc2,https://arxiv.org/pdf/1609.08677v1.pdf
+ba816806adad2030e1939450226c8647105e101c,http://pdfs.semanticscholar.org/ba81/6806adad2030e1939450226c8647105e101c.pdf
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,http://pdfs.semanticscholar.org/badc/fb7d4e2ef0d3e332a19a3f93d59b4f85668e.pdf
+ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906,http://pdfs.semanticscholar.org/ba8a/99d35aee2c4e5e8a40abfdd37813bfdd0906.pdf
+bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea,http://pdfs.semanticscholar.org/bac1/1ce0fb3e12c466f7ebfb6d036a9fe62628ea.pdf
+ba29ba8ec180690fca702ad5d516c3e43a7f0bb8,http://pdfs.semanticscholar.org/ba29/ba8ec180690fca702ad5d516c3e43a7f0bb8.pdf
+bab88235a30e179a6804f506004468aa8c28ce4f,http://pdfs.semanticscholar.org/bab8/8235a30e179a6804f506004468aa8c28ce4f.pdf
+a065080353d18809b2597246bb0b48316234c29a,http://pdfs.semanticscholar.org/a065/080353d18809b2597246bb0b48316234c29a.pdf
+a0f94e9400938cbd05c4b60b06d9ed58c3458303,http://people.ee.duke.edu/~lcarin/Hoey_Little07.pdf
+a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4,http://pdfs.semanticscholar.org/a0f1/93c86e3dd7e0020c0de3ec1e24eaff343ce4.pdf
+a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf
+a0021e3bbf942a88e13b67d83db7cf52e013abfd,http://pdfs.semanticscholar.org/a002/1e3bbf942a88e13b67d83db7cf52e013abfd.pdf
+a0d6390dd28d802152f207940c7716fe5fae8760,http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf
+a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670,http://webhost.uoradea.ro/ibuciu/ISCAS2006_Buciu.pdf
+a02f0aad91c2d88b49c443e1e39c3acfc067a705,http://www.cs.columbia.edu/~wfan/PAPERS/SMC10cher.pdf
+a0dfb8aae58bd757b801e2dcb717a094013bc178,http://pdfs.semanticscholar.org/a0df/b8aae58bd757b801e2dcb717a094013bc178.pdf
+a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60,http://pdfs.semanticscholar.org/ac3b/033fd24913c31778cd4cb2d013239315d7a9.pdf
+a06b6d30e2b31dc600f622ab15afe5e2929581a7,https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf
+a090d61bfb2c3f380c01c0774ea17929998e0c96,http://iitlab.bit.edu.cn/mcislab/~jiayunde/pdf/CVPR2012_BrickIllumDimension.pdf
+a0e7f8771c7d83e502d52c276748a33bae3d5f81,http://pdfs.semanticscholar.org/a0e7/f8771c7d83e502d52c276748a33bae3d5f81.pdf
+a0061dae94d916f60a5a5373088f665a1b54f673,http://pdfs.semanticscholar.org/a006/1dae94d916f60a5a5373088f665a1b54f673.pdf
+a0848d7b1bb43f4b4f1b4016e58c830f40944817,http://lhncbc.nlm.nih.gov/system/files/pub8893.pdf
+a000149e83b09d17e18ed9184155be140ae1266e,http://pdfs.semanticscholar.org/a000/149e83b09d17e18ed9184155be140ae1266e.pdf
+a01f9461bc8cf8fe40c26d223ab1abea5d8e2812,http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf
+a702fc36f0644a958c08de169b763b9927c175eb,http://www.apsipa.org/proceedings_2013/papers/170_PID2935307.pdf
+a7267bc781a4e3e79213bb9c4925dd551ea1f5c4,http://pdfs.semanticscholar.org/a726/7bc781a4e3e79213bb9c4925dd551ea1f5c4.pdf
+a784a0d1cea26f18626682ab108ce2c9221d1e53,http://openaccess.thecvf.com/content_ICCV_2017/papers/Agustsson_Anchored_Regression_Networks_ICCV_2017_paper.pdf
+a74251efa970b92925b89eeef50a5e37d9281ad0,http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf
+a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51,http://pdfs.semanticscholar.org/a7d2/3c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51.pdf
+a70e36daf934092f40a338d61e0fe27be633f577,http://pdfs.semanticscholar.org/a70e/36daf934092f40a338d61e0fe27be633f577.pdf
+a7191958e806fce2505a057196ccb01ea763b6ea,http://pdfs.semanticscholar.org/a719/1958e806fce2505a057196ccb01ea763b6ea.pdf
+a7e1327bd76945a315f2869bfae1ce55bb94d165,http://pdfs.semanticscholar.org/a7e1/327bd76945a315f2869bfae1ce55bb94d165.pdf
+a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9,http://pdfs.semanticscholar.org/a7a6/eb53bee5e2224f2ecd56a14e3a5a717e55b9.pdf
+a79704c1ce7bf10c8753a8f51437ccbc61947d03,http://www.eecs.qmul.ac.uk/~cfshan/papers/shan-etal-icip05.pdf
+a7c39a4e9977a85673892b714fc9441c959bf078,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/06-p71.pdf
+a75edf8124f5b52690c08ff35b0c7eb8355fe950,http://pdfs.semanticscholar.org/a75e/df8124f5b52690c08ff35b0c7eb8355fe950.pdf
+a75ee7f4c4130ef36d21582d5758f953dba03a01,http://pdfs.semanticscholar.org/a75e/e7f4c4130ef36d21582d5758f953dba03a01.pdf
+a703d51c200724517f099ee10885286ddbd8b587,http://pdfs.semanticscholar.org/a703/d51c200724517f099ee10885286ddbd8b587.pdf
+a75dfb5a839f0eb4b613d150f54a418b7812aa90,https://arxiv.org/pdf/1708.02314v1.pdf
+b88ceded6467e9b286f048bb1b17be5998a077bd,http://pdfs.semanticscholar.org/b88c/eded6467e9b286f048bb1b17be5998a077bd.pdf
+b871d1b8495025ff8a6255514ed39f7765415935,http://pdfs.semanticscholar.org/b871/d1b8495025ff8a6255514ed39f7765415935.pdf
+b88d5e12089f6f598b8c72ebeffefc102cad1fc0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w24/papers/Wang_Robust_2DPCA_and_CVPR_2016_paper.pdf
+b84b7b035c574727e4c30889e973423fe15560d7,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf
+b8dba0504d6b4b557d51a6cf4de5507141db60cf,http://pdfs.semanticscholar.org/b8db/a0504d6b4b557d51a6cf4de5507141db60cf.pdf
+b89862f38fff416d2fcda389f5c59daba56241db,http://pdfs.semanticscholar.org/b898/62f38fff416d2fcda389f5c59daba56241db.pdf
+b8caf1b1bc3d7a26a91574b493c502d2128791f6,http://pdfs.semanticscholar.org/b8ca/f1b1bc3d7a26a91574b493c502d2128791f6.pdf
+b8084d5e193633462e56f897f3d81b2832b72dff,http://pdfs.semanticscholar.org/b808/4d5e193633462e56f897f3d81b2832b72dff.pdf
+b8378ab83bc165bc0e3692f2ce593dcc713df34a,http://cmp.felk.cvut.cz/ftp/articles/cech/Cech-ICPR-2014.pdf
+b85580ff2d8d8be0a2c40863f04269df4cd766d9,http://pdfs.semanticscholar.org/b855/80ff2d8d8be0a2c40863f04269df4cd766d9.pdf
+b87b0fa1ac0aad0ca563844daecaeecb2df8debf,http://users.cs.cf.ac.uk/Paul.Rosin/resources/papers/portraits-CAe.pdf
+b87db5ac17312db60e26394f9e3e1a51647cca66,http://pdfs.semanticscholar.org/b87d/b5ac17312db60e26394f9e3e1a51647cca66.pdf
+b81cae2927598253da37954fb36a2549c5405cdb,http://pdfs.semanticscholar.org/d892/753827950a227179b691e6df85820ab7c417.pdf
+b8a829b30381106b806066d40dd372045d49178d,http://gavrila.net/tits15.pdf
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,http://pdfs.semanticscholar.org/b191/aa2c5b8ece06c221c3a4a0914e8157a16129.pdf
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,http://pdfs.semanticscholar.org/b13b/f657ca6d34d0df90e7ae739c94a7efc30dc3.pdf
+b13a882e6168afc4058fe14cc075c7e41434f43e,http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf
+b1665e1ddf9253dcaebecb48ac09a7ab4095a83e,http://pdfs.semanticscholar.org/b166/5e1ddf9253dcaebecb48ac09a7ab4095a83e.pdf
+b16580d27bbf4e17053f2f91bc1d0be12045e00b,http://pdfs.semanticscholar.org/b165/80d27bbf4e17053f2f91bc1d0be12045e00b.pdf
+b11bb6bd63ee6f246d278dd4edccfbe470263803,http://pdfs.semanticscholar.org/b11b/b6bd63ee6f246d278dd4edccfbe470263803.pdf
+b171f9e4245b52ff96790cf4f8d23e822c260780,http://pdfs.semanticscholar.org/b171/f9e4245b52ff96790cf4f8d23e822c260780.pdf
+b1a3b19700b8738b4510eecf78a35ff38406df22,http://pdfs.semanticscholar.org/b1a3/b19700b8738b4510eecf78a35ff38406df22.pdf
+b1301c722886b6028d11e4c2084ee96466218be4,http://pdfs.semanticscholar.org/b130/1c722886b6028d11e4c2084ee96466218be4.pdf
+b1c5581f631dba78927aae4f86a839f43646220c,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553753.pdf
+b18858ad6ec88d8b443dffd3e944e653178bc28b,http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf
+b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1,http://pdfs.semanticscholar.org/b144/4b3bf15eec84f6d9a2ade7989bb980ea7bd1.pdf
+b133b2d7df9b848253b9d75e2ca5c68e21eba008,http://pdfs.semanticscholar.org/c2c1/ab9eac2907e15618d80f5ce0c9b60f2c36cc.pdf
+b1df214e0f1c5065f53054195cd15012e660490a,http://pdfs.semanticscholar.org/b1df/214e0f1c5065f53054195cd15012e660490a.pdf
+b185f0a39384ceb3c4923196aeed6d68830a069f,http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf
+b19e83eda4a602abc5a8ef57467c5f47f493848d,http://www.cs.jhu.edu/~hwang/papers/SPL10.pdf
+b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e,http://www.hamedkiani.com/uploads/5/1/8/8/51882963/176.pdf
+ddf55fc9cf57dabf4eccbf9daab52108df5b69aa,http://pdfs.semanticscholar.org/ddf5/5fc9cf57dabf4eccbf9daab52108df5b69aa.pdf
+dda35768681f74dafd02a667dac2e6101926a279,http://www.cim.mcgill.ca/~clark/vmrl/web-content/papers/jjclark_icip_2014.pdf
+dd0760bda44d4e222c0a54d41681f97b3270122b,http://pdfs.semanticscholar.org/dd07/60bda44d4e222c0a54d41681f97b3270122b.pdf
+ddea3c352f5041fb34433b635399711a90fde0e8,http://pdfs.semanticscholar.org/fc6b/2eb9253f33197b1ba8a045525487a16e8756.pdf
+dd033d4886f2e687b82d893a2c14dae02962ea70,http://pdfs.semanticscholar.org/dd03/3d4886f2e687b82d893a2c14dae02962ea70.pdf
+ddf099f0e0631da4a6396a17829160301796151c,http://pdfs.semanticscholar.org/ddf0/99f0e0631da4a6396a17829160301796151c.pdf
+dd0a334b767e0065c730873a95312a89ef7d1c03,http://pdfs.semanticscholar.org/dd0a/334b767e0065c730873a95312a89ef7d1c03.pdf
+dd8ad6ce8701d4b09be460a6cf058fcd5318c700,https://www.researchgate.net/profile/Daniel_Riccio/publication/260652311_Robust_Face_Recognition_for_Uncontrolled_Pose_and_Illumination_Changes/links/5402f4450cf23d9765a55fbc.pdf
+dd2f6a1ba3650075245a422319d86002e1e87808,http://pdfs.semanticscholar.org/dd2f/6a1ba3650075245a422319d86002e1e87808.pdf
+ddaa8add8528857712424fd57179e5db6885df7c,http://pdfs.semanticscholar.org/ff63/a8e8e462d15c9d59ac66025a043d3c299aea.pdf
+dd8d53e67668067fd290eb500d7dfab5b6f730dd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_subspace.pdf
+dd600e7d6e4443ebe87ab864d62e2f4316431293,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553774.pdf
+dcb44fc19c1949b1eda9abe998935d567498467d,http://pdfs.semanticscholar.org/dcb4/4fc19c1949b1eda9abe998935d567498467d.pdf
+dcc38db6c885444694f515d683bbb50521ff3990,http://pdfs.semanticscholar.org/dcc3/8db6c885444694f515d683bbb50521ff3990.pdf
+dc5cde7e4554db012d39fc41ac8580f4f6774045,http://pdfs.semanticscholar.org/dc5c/de7e4554db012d39fc41ac8580f4f6774045.pdf
+dc7df544d7c186723d754e2e7b7217d38a12fcf7,http://pdfs.semanticscholar.org/dc7d/f544d7c186723d754e2e7b7217d38a12fcf7.pdf
+dc77287bb1fcf64358767dc5b5a8a79ed9abaa53,http://pdfs.semanticscholar.org/dc77/287bb1fcf64358767dc5b5a8a79ed9abaa53.pdf
+dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb,http://pdfs.semanticscholar.org/dc2e/805d0038f9d1b3d1bc79192f1d90f6091ecb.pdf
+dced05d28f353be971ea2c14517e85bc457405f3,http://pdfs.semanticscholar.org/dced/05d28f353be971ea2c14517e85bc457405f3.pdf
+dcce3d7e8d59041e84fcdf4418702fb0f8e35043,http://www.cfar.umd.edu/~rama/Conf.pdf-files/zhou04cvpr-10.pdf
+dce3dff9216d63c4a77a2fcb0ec1adf6d2489394,http://pdfs.semanticscholar.org/dce3/dff9216d63c4a77a2fcb0ec1adf6d2489394.pdf
+b6f758be954d34817d4ebaa22b30c63a4b8ddb35,https://arxiv.org/pdf/1703.04835v1.pdf
+b62571691a23836b35719fc457e093b0db187956,http://pdfs.semanticscholar.org/b625/71691a23836b35719fc457e093b0db187956.pdf
+b69b239217d4e9a20fe4fe1417bf26c94ded9af9,http://pdfs.semanticscholar.org/b69b/239217d4e9a20fe4fe1417bf26c94ded9af9.pdf
+b6c047ab10dd86b1443b088029ffe05d79bbe257,http://pdfs.semanticscholar.org/b6c0/47ab10dd86b1443b088029ffe05d79bbe257.pdf
+b6052dc718c72f2506cfd9d29422642ecf3992ef,http://pdfs.semanticscholar.org/b605/2dc718c72f2506cfd9d29422642ecf3992ef.pdf
+b6145d3268032da70edc9cfececa1f9ffa4e3f11,http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf
+b6c53891dff24caa1f2e690552a1a5921554f994,http://pdfs.semanticscholar.org/b6c5/3891dff24caa1f2e690552a1a5921554f994.pdf
+b613b30a7cbe76700855479a8d25164fa7b6b9f1,http://www.cs.ucf.edu/~kienhua/classes/COP6731/Reading/AffectiveComputing.pdf
+b64cfb39840969b1c769e336a05a30e7f9efcd61,http://pdfs.semanticscholar.org/fde2/b8943eb429d35e649c56ce95658b44c49243.pdf
+b689d344502419f656d482bd186a5ee6b0140891,http://pdfs.semanticscholar.org/b689/d344502419f656d482bd186a5ee6b0140891.pdf
+b656abc4d1e9c8dc699906b70d6fcd609fae8182,http://pdfs.semanticscholar.org/b656/abc4d1e9c8dc699906b70d6fcd609fae8182.pdf
+b6a01cd4572b5f2f3a82732ef07d7296ab0161d3,http://pdfs.semanticscholar.org/b6a0/1cd4572b5f2f3a82732ef07d7296ab0161d3.pdf
+a9791544baa14520379d47afd02e2e7353df87e5,http://pdfs.semanticscholar.org/a979/1544baa14520379d47afd02e2e7353df87e5.pdf
+a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd,http://pdfs.semanticscholar.org/a9eb/6e436cfcbded5a9f4b82f6b914c7f390adbd.pdf
+a955033ca6716bf9957b362b77092592461664b4,http://pdfs.semanticscholar.org/a955/033ca6716bf9957b362b77092592461664b4.pdf
+a956ff50ca958a3619b476d16525c6c3d17ca264,http://ce.sharif.edu/~amiryanj/downloads/novel_bidirectional_nn_for_face_recognition.pdf
+a93781e6db8c03668f277676d901905ef44ae49f,http://pdfs.semanticscholar.org/a937/81e6db8c03668f277676d901905ef44ae49f.pdf
+a947c21a15fb0a02378c36271e1addf6b6e110eb,http://www.researchgate.net/profile/Bryan_Conroy/publication/220734216_The_grouped_two-sided_orthogonal_Procrustes_problem/links/02e7e52541c3f27987000000.pdf
+a9fc23d612e848250d5b675e064dba98f05ad0d9,http://pdfs.semanticscholar.org/a9fc/23d612e848250d5b675e064dba98f05ad0d9.pdf
+a9adb6dcccab2d45828e11a6f152530ba8066de6,http://pdfs.semanticscholar.org/a9ad/b6dcccab2d45828e11a6f152530ba8066de6.pdf
+a967426ec9b761a989997d6a213d890fc34c5fe3,http://vision.ucsd.edu/sites/default/files/043-wacv.pdf
+a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6,http://pdfs.semanticscholar.org/a928/6519e12675302b1d7d2fe0ca3cc4dc7d17f6.pdf
+a949b8700ca6ba96ee40f75dfee1410c5bbdb3db,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Haase_Instance-weighted_Transfer_Learning_2014_CVPR_paper.pdf
+a9be20954e9177d8b2bc39747acdea4f5496f394,http://acsweb.ucsd.edu/~yuw176/report/cvpr_2016.pdf
+d5afd7b76f1391321a1340a19ba63eec9e0f9833,http://pdfs.semanticscholar.org/d5af/d7b76f1391321a1340a19ba63eec9e0f9833.pdf
+d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12,http://pdfs.semanticscholar.org/d537/5f51eeb0c6eff71d6c6ad73e11e9353c1f12.pdf
+d50c6d22449cc9170ab868b42f8c72f8d31f9b6c,http://pdfs.semanticscholar.org/d50c/6d22449cc9170ab868b42f8c72f8d31f9b6c.pdf
+d522c162bd03e935b1417f2e564d1357e98826d2,http://pdfs.semanticscholar.org/d522/c162bd03e935b1417f2e564d1357e98826d2.pdf
+d59f18fcb07648381aa5232842eabba1db52383e,http://pdfs.semanticscholar.org/d59f/18fcb07648381aa5232842eabba1db52383e.pdf
+d5fa9d98c8da54a57abf353767a927d662b7f026,http://pdfs.semanticscholar.org/f15e/9712b8731e1f5fd9566aca513edda910b5b8.pdf
+d588dd4f305cdea37add2e9bb3d769df98efe880,http://pdfs.semanticscholar.org/d588/dd4f305cdea37add2e9bb3d769df98efe880.pdf
+d5f751d31a9d2d754d0d136d5b02c24b28fb94a0,http://www.researchgate.net/profile/Marie-Francine_Moens/publication/220634584_Naming_People_in_News_Videos_with_Label_Propagation/links/0a85e52ecd01912489000000.pdf
+d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e,http://pdfs.semanticscholar.org/d5ab/6aa15dad26a6ace5ab83ce62b7467a18a88e.pdf
+d5b0e73b584be507198b6665bcddeba92b62e1e5,http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf
+d56fe69cbfd08525f20679ffc50707b738b88031,http://pdfs.semanticscholar.org/d56f/e69cbfd08525f20679ffc50707b738b88031.pdf
+d50751da2997e7ebc89244c88a4d0d18405e8507,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553713.pdf
+d511e903a882658c9f6f930d6dd183007f508eda,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553766.pdf
+d50a40f2d24363809a9ac57cf7fbb630644af0e5,http://pdfs.semanticscholar.org/d50a/40f2d24363809a9ac57cf7fbb630644af0e5.pdf
+d5b5c63c5611d7b911bc1f7e161a0863a34d44ea,http://pdfs.semanticscholar.org/d5b5/c63c5611d7b911bc1f7e161a0863a34d44ea.pdf
+d59404354f84ad98fa809fd1295608bf3d658bdc,http://pdfs.semanticscholar.org/d594/04354f84ad98fa809fd1295608bf3d658bdc.pdf
+d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b,http://iplab.dmi.unict.it/sites/default/files/_11.pdf
+d5e1173dcb2a51b483f86694889b015d55094634,http://pdfs.semanticscholar.org/d5e1/173dcb2a51b483f86694889b015d55094634.pdf
+d28d32af7ef9889ef9cb877345a90ea85e70f7f1,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Kim_Local.pdf
+d28d697b578867500632b35b1b19d3d76698f4a9,http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf
+d231a81b38fde73bdbf13cfec57d6652f8546c3c,http://pdfs.semanticscholar.org/d231/a81b38fde73bdbf13cfec57d6652f8546c3c.pdf
+d22785eae6b7503cb16402514fd5bd9571511654,http://pdfs.semanticscholar.org/d227/85eae6b7503cb16402514fd5bd9571511654.pdf
+d24dafe10ec43ac8fb98715b0e0bd8e479985260,http://pdfs.semanticscholar.org/d24d/afe10ec43ac8fb98715b0e0bd8e479985260.pdf
+d29eec5e047560627c16803029d2eb8a4e61da75,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf
+d280bcbb387b1d548173917ae82cb6944e3ceca6,https://cse.sc.edu/~mengz/papers/ICIP2014.pdf
+d24d3370b2e7d254e999140024d8a7bddf701502,https://www.researchgate.net/profile/Thang_Hoang2/publication/252047382_SVM_classifier_based_face_detection_system_using_BDIP_and_BVLC_moments/links/53f0b8be0cf2711e0c431012.pdf
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,http://pdfs.semanticscholar.org/d2cd/9a7f19600370bce3ea29aba97d949fe0ceb9.pdf
+d22b378fb4ef241d8d210202893518d08e0bb213,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Random_Faces_Guided_2013_ICCV_paper.pdf
+aac39ca161dfc52aade063901f02f56d01a1693c,http://pdfs.semanticscholar.org/aac3/9ca161dfc52aade063901f02f56d01a1693c.pdf
+aadf4b077880ae5eee5dd298ab9e79a1b0114555,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Presti_Using_Hankel_Matrices_2015_CVPR_paper.pdf
+aa127e6b2dc0aaccfb85e93e8b557f83ebee816b,http://pdfs.semanticscholar.org/aa12/7e6b2dc0aaccfb85e93e8b557f83ebee816b.pdf
+aafb271684a52a0b23debb3a5793eb618940c5dd,http://pdfs.semanticscholar.org/aafb/271684a52a0b23debb3a5793eb618940c5dd.pdf
+aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52,http://www.gtti.it/gtti13/Presentazioni_GTTI13/25_Giugno/Sessioni_Scientifiche_Short_Presentation/Piacenza.pdf
+aa912375eaf50439bec23de615aa8a31a3395ad3,http://pdfs.semanticscholar.org/aa91/2375eaf50439bec23de615aa8a31a3395ad3.pdf
+aa52910c8f95e91e9fc96a1aefd406ffa66d797d,http://pdfs.semanticscholar.org/aa52/910c8f95e91e9fc96a1aefd406ffa66d797d.pdf
+aaeb8b634bb96a372b972f63ec1dc4db62e7b62a,http://pdfs.semanticscholar.org/aaeb/8b634bb96a372b972f63ec1dc4db62e7b62a.pdf
+aa0c30bd923774add6e2f27ac74acd197b9110f2,http://research.gold.ac.uk/20200/1/dplda.pdf
+aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5,http://pdfs.semanticscholar.org/aaa4/c625f5f9b65c7f3df5c7bfe8a6595d0195a5.pdf
+aac934f2eed758d4a27562dae4e9c5415ff4cdb7,http://pdfs.semanticscholar.org/aac9/34f2eed758d4a27562dae4e9c5415ff4cdb7.pdf
+aa331fe378056b6d6031bb8fe6676e035ed60d6d,http://pdfs.semanticscholar.org/aa33/1fe378056b6d6031bb8fe6676e035ed60d6d.pdf
+aae0e417bbfba701a1183d3d92cc7ad550ee59c3,https://staff.fnwi.uva.nl/th.gevers/pub/GeversTIP12-3.pdf
+aa577652ce4dad3ca3dde44f881972ae6e1acce7,http://pdfs.semanticscholar.org/aa57/7652ce4dad3ca3dde44f881972ae6e1acce7.pdf
+aa94f214bb3e14842e4056fdef834a51aecef39c,http://pdfs.semanticscholar.org/aa94/f214bb3e14842e4056fdef834a51aecef39c.pdf
+aac101dd321e6d2199d8c0b48c543b541c181b66,http://pdfs.semanticscholar.org/aac1/01dd321e6d2199d8c0b48c543b541c181b66.pdf
+af8fe1b602452cf7fc9ecea0fd4508ed4149834e,http://pdfs.semanticscholar.org/af8f/e1b602452cf7fc9ecea0fd4508ed4149834e.pdf
+af6e351d58dba0962d6eb1baf4c9a776eb73533f,http://pdfs.semanticscholar.org/af6e/351d58dba0962d6eb1baf4c9a776eb73533f.pdf
+aff92784567095ee526a705e21be4f42226bbaab,http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf
+af13c355a2a14bb74847aedeafe990db3fc9cbd4,http://publications.idiap.ch/downloads/papers/2015/Chavez-Martinez_MUM2015_2015.pdf
+af0a8199328d4c806574866f419d1962def9305a,http://ttic.uchicago.edu/~smaji/papers/mr07mms.pdf
+af62621816fbbe7582a7d237ebae1a4d68fcf97d,http://pdfs.semanticscholar.org/af62/621816fbbe7582a7d237ebae1a4d68fcf97d.pdf
+af54dd5da722e104740f9b6f261df9d4688a9712,http://pdfs.semanticscholar.org/af54/dd5da722e104740f9b6f261df9d4688a9712.pdf
+afe9cfba90d4b1dbd7db1cf60faf91f24d12b286,http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf
+af278274e4bda66f38fd296cfa5c07804fbc26ee,http://pdfs.semanticscholar.org/af27/8274e4bda66f38fd296cfa5c07804fbc26ee.pdf
+afef2b1d35fb807f422cfec0a370f7d08d4651d1,http://www.researchgate.net/profile/Dong_Yi3/publication/228853254_A_robust_eye_localization_method_for_low_quality_face_images/links/0912f509c4d7ec1630000000.pdf
+afc7092987f0d05f5685e9332d83c4b27612f964,http://ci2cv.net/media/papers/2011_AFGR_Chew.pdf
+b730908bc1f80b711c031f3ea459e4de09a3d324,http://ibug.doc.ic.ac.uk/media/uploads/documents/tifs_aoms.pdf
+b7426836ca364603ccab0e533891d8ac54cf2429,http://pdfs.semanticscholar.org/b742/6836ca364603ccab0e533891d8ac54cf2429.pdf
+b7cf7bb574b2369f4d7ebc3866b461634147041a,http://www.patternrecognition.cn/~zhongjin/2012/2012_yinjun_NCA.pdf
+b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24,http://grid.hust.edu.cn/xbliu/papers/ICDM09.pdf
+b7eead8586ffe069edd190956bd338d82c69f880,http://pdfs.semanticscholar.org/b7ee/ad8586ffe069edd190956bd338d82c69f880.pdf
+b75cee96293c11fe77ab733fc1147950abbe16f9,http://pdfs.semanticscholar.org/e1a6/16674f63dd54b495d06cf1b7bd59f4cb772e.pdf
+b7f05d0771da64192f73bdb2535925b0e238d233,http://pdfs.semanticscholar.org/b7f0/5d0771da64192f73bdb2535925b0e238d233.pdf
+b755505bdd5af078e06427d34b6ac2530ba69b12,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf
+b7b461f82c911f2596b310e2b18dd0da1d5d4491,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p2961-wang.pdf
+b7740dba37a3cbd5c832a8deb9a710a28966486a,http://pdfs.semanticscholar.org/b774/0dba37a3cbd5c832a8deb9a710a28966486a.pdf
+b73fdae232270404f96754329a1a18768974d3f6,http://pdfs.semanticscholar.org/b73f/dae232270404f96754329a1a18768974d3f6.pdf
+b7c5f885114186284c51e863b58292583047a8b4,http://pdfs.semanticscholar.org/b7c5/f885114186284c51e863b58292583047a8b4.pdf
+b73d9e1af36aabb81353f29c40ecdcbdf731dbed,http://pdfs.semanticscholar.org/b73d/9e1af36aabb81353f29c40ecdcbdf731dbed.pdf
+b747fcad32484dfbe29530a15776d0df5688a7db,http://pdfs.semanticscholar.org/b747/fcad32484dfbe29530a15776d0df5688a7db.pdf
+b7f7a4df251ff26aca83d66d6b479f1dc6cd1085,http://pdfs.semanticscholar.org/b7f7/a4df251ff26aca83d66d6b479f1dc6cd1085.pdf
+db1f48a7e11174d4a724a4edb3a0f1571d649670,http://pdfs.semanticscholar.org/db1f/48a7e11174d4a724a4edb3a0f1571d649670.pdf
+db227f72bb13a5acca549fab0dc76bce1fb3b948,http://pdfs.semanticscholar.org/e83d/6fd4502d6d31134ffddb80b6d5c752cf3123.pdf
+dbaf89ca98dda2c99157c46abd136ace5bdc33b3,http://pdfs.semanticscholar.org/dbaf/89ca98dda2c99157c46abd136ace5bdc33b3.pdf
+dbab6ac1a9516c360cdbfd5f3239a351a64adde7,http://pdfs.semanticscholar.org/dbab/6ac1a9516c360cdbfd5f3239a351a64adde7.pdf
+dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8,http://pdfs.semanticscholar.org/dbb0/a527612c828d43bcb9a9c41f1bf7110b1dc8.pdf
+db93049981abca0a281918b8d0655572922553de,http://www.cs.odu.edu/~sji/papers/pdf/Ji_TKDE08.pdf
+dba493caf6647214c8c58967a8251641c2bda4c2,http://pdfs.semanticscholar.org/dba4/93caf6647214c8c58967a8251641c2bda4c2.pdf
+dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57,http://pdfs.semanticscholar.org/dbb7/f37fb9b41d1aa862aaf2d2e721a470fd2c57.pdf
+db36e682501582d1c7b903422993cf8d70bb0b42,http://pdfs.semanticscholar.org/db36/e682501582d1c7b903422993cf8d70bb0b42.pdf
+dbe0e533d715f8543bcf197f3b8e5cffa969dfc0,http://pdfs.semanticscholar.org/dbe0/e533d715f8543bcf197f3b8e5cffa969dfc0.pdf
+dbd5e9691cab2c515b50dda3d0832bea6eef79f2,http://pdfs.semanticscholar.org/dbd5/e9691cab2c515b50dda3d0832bea6eef79f2.pdf
+db82f9101f64d396a86fc2bd05b352e433d88d02,http://pdfs.semanticscholar.org/db82/f9101f64d396a86fc2bd05b352e433d88d02.pdf
+db428d03e3dfd98624c23e0462817ad17ef14493,http://pdfs.semanticscholar.org/db42/8d03e3dfd98624c23e0462817ad17ef14493.pdf
+a83fc450c124b7e640adc762e95e3bb6b423b310,http://pdfs.semanticscholar.org/b908/edadad58c604a1e4b431f69ac8ded350589a.pdf
+a8117a4733cce9148c35fb6888962f665ae65b1e,http://pdfs.semanticscholar.org/a811/7a4733cce9148c35fb6888962f665ae65b1e.pdf
+a820941eaf03077d68536732a4d5f28d94b5864a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf
+a8affc2819f7a722a41bb913dea9149ee0e23a1f,http://robotics.szpku.edu.cn/c/publication/paper/ICIP2014-gaoyuan1.pdf
+a8035ca71af8cc68b3e0ac9190a89fed50c92332,http://pdfs.semanticscholar.org/a803/5ca71af8cc68b3e0ac9190a89fed50c92332.pdf
+a88640045d13fc0207ac816b0bb532e42bcccf36,http://pdfs.semanticscholar.org/a886/40045d13fc0207ac816b0bb532e42bcccf36.pdf
+a8638a07465fe388ae5da0e8a68e62a4ee322d68,http://pdfs.semanticscholar.org/a863/8a07465fe388ae5da0e8a68e62a4ee322d68.pdf
+a8e75978a5335fd3deb04572bb6ca43dbfad4738,http://pdfs.semanticscholar.org/a8e7/5978a5335fd3deb04572bb6ca43dbfad4738.pdf
+a8583e80a455507a0f146143abeb35e769d25e4e,http://pdfs.semanticscholar.org/a858/3e80a455507a0f146143abeb35e769d25e4e.pdf
+a87e37d43d4c47bef8992ace408de0f872739efc,http://pdfs.semanticscholar.org/a87e/37d43d4c47bef8992ace408de0f872739efc.pdf
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,http://pdfs.semanticscholar.org/a8c8/a96b78e7b8e0d4a4a422fcb083e53ad06531.pdf
+a8748a79e8d37e395354ba7a8b3038468cb37e1f,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w9/papers/Reale_Seeing_the_Forest_CVPR_2016_paper.pdf
+de8381903c579a4fed609dff3e52a1dc51154951,http://pdfs.semanticscholar.org/de83/81903c579a4fed609dff3e52a1dc51154951.pdf
+de15af84b1257211a11889b6c2adf0a2bcf59b42,http://pdfs.semanticscholar.org/de15/af84b1257211a11889b6c2adf0a2bcf59b42.pdf
+dedabf9afe2ae4a1ace1279150e5f1d495e565da,http://www.citi.sinica.edu.tw/papers/ycwang/4156-F.pdf
+de398bd8b7b57a3362c0c677ba8bf9f1d8ade583,http://www.cs.wayne.edu/~mdong/TMM16.pdf
+ded41c9b027c8a7f4800e61b7cfb793edaeb2817,http://pdfs.semanticscholar.org/ded4/1c9b027c8a7f4800e61b7cfb793edaeb2817.pdf
+defa8774d3c6ad46d4db4959d8510b44751361d8,http://pdfs.semanticscholar.org/defa/8774d3c6ad46d4db4959d8510b44751361d8.pdf
+b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89,http://pdfs.semanticscholar.org/b03b/4d8b4190361ed2de66fcbb6fda0c9a0a7d89.pdf
+b013cce42dd769db754a57351d49b7410b8e82ad,http://tlab.princeton.edu/publication_files/Rojas%20et%20al%20IEEE%202010.pdf
+b07582d1a59a9c6f029d0d8328414c7bef64dca0,http://pdfs.semanticscholar.org/b075/82d1a59a9c6f029d0d8328414c7bef64dca0.pdf
+b017963d83b3edf71e1673d7ffdec13a6d350a87,http://pdfs.semanticscholar.org/b017/963d83b3edf71e1673d7ffdec13a6d350a87.pdf
+b03d6e268cde7380e090ddaea889c75f64560891,http://pdfs.semanticscholar.org/b03d/6e268cde7380e090ddaea889c75f64560891.pdf
+b03446a2de01126e6a06eb5d526df277fa36099f,http://pdfs.semanticscholar.org/b034/46a2de01126e6a06eb5d526df277fa36099f.pdf
+b0de0892d2092c8c70aa22500fed31aa7eb4dd3f,http://arxiv.org/pdf/1504.05524.pdf
+b018fa5cb9793e260b8844ae155bd06380988584,http://pdfs.semanticscholar.org/b018/fa5cb9793e260b8844ae155bd06380988584.pdf
+b073313325b6482e22032e259d7311fb9615356c,http://alumni.cs.ucr.edu/~hli/paper/hli05tumor.pdf
+a6f81619158d9caeaa0863738ab400b9ba2d77c2,http://pdfs.semanticscholar.org/a6f8/1619158d9caeaa0863738ab400b9ba2d77c2.pdf
+a6d621a5aae983a6996849db5e6bc63fe0a234af,http://mplab.ucsd.edu/~ksikka/pain_icmi14.pdf
+a695c2240382e362262db72017ceae0365d63f8f,http://www3.nd.edu/~kwb/AggarwalBiswasFlynnBowyerWACV_2012.pdf
+a66d89357ada66d98d242c124e1e8d96ac9b37a0,http://pdfs.semanticscholar.org/a66d/89357ada66d98d242c124e1e8d96ac9b37a0.pdf
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,http://pdfs.semanticscholar.org/a6d7/cf29f333ea3d2aeac67cde39a73898e270b7.pdf
+a611c978e05d7feab01fb8a37737996ad6e88bd9,http://cbl.uh.edu/pub_files/3_Benchmarking3DPoseEstimationForFaceRecognition_ICPR2014_v8.pdf
+a608c5f8fd42af6e9bd332ab516c8c2af7063c61,http://mcl.usc.edu/wp-content/uploads/2016/01/Liu-TIFS-2015-10.pdf
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,http://pdfs.semanticscholar.org/a6ff/e238eaf8632b4a8a6f718c8917e7f3261546.pdf
+a6583c8daa7927eedb3e892a60fc88bdfe89a486,http://pdfs.semanticscholar.org/a658/3c8daa7927eedb3e892a60fc88bdfe89a486.pdf
+a660390654498dff2470667b64ea656668c98ecc,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf
+a60907b7ee346b567972074e3e03c82f64d7ea30,http://pdfs.semanticscholar.org/a609/07b7ee346b567972074e3e03c82f64d7ea30.pdf
+a6e43b73f9f87588783988333997a81b4487e2d5,http://pdfs.semanticscholar.org/a6e4/3b73f9f87588783988333997a81b4487e2d5.pdf
+a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc,http://pdfs.semanticscholar.org/a649/6553fb9ab9ca5d69eb45af1bdf0b60ed86dc.pdf
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,http://pdfs.semanticscholar.org/bce2/02717ce134b317b39f0a18151659d643875b.pdf
+a694180a683f7f4361042c61648aa97d222602db,http://www.iab-rubric.org/papers/ICB16-Autoscat.pdf
+a6db73f10084ce6a4186363ea9d7475a9a658a11,http://pdfs.semanticscholar.org/afce/ebbea6e9130cf22142206c19a19cda226b13.pdf
+a6634ff2f9c480e94ed8c01d64c9eb70e0d98487,http://pdfs.semanticscholar.org/a663/4ff2f9c480e94ed8c01d64c9eb70e0d98487.pdf
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,http://pdfs.semanticscholar.org/a6b1/d79bc334c74cde199e26a7ef4c189e9acd46.pdf
+a6ebe013b639f0f79def4c219f585b8a012be04f,http://pdfs.semanticscholar.org/a6eb/e013b639f0f79def4c219f585b8a012be04f.pdf
+a6e21438695dbc3a184d33b6cf5064ddf655a9ba,http://pdfs.semanticscholar.org/b673/ffe63c5d0723009042f0f922f19f093b7e34.pdf
+b9cedd09bdae827dacb138d6b054449d5346caf1,http://www.cs.colostate.edu/~lui/Papers/BTAS09LUIa.pdf
+b9cad920a00fc0e997fc24396872e03f13c0bb9c,http://www.ic.unicamp.br/~rocha/pub/papers/2011-icip-spoofing-detection.pdf
+b9c9c7ef82f31614c4b9226e92ab45de4394c5f6,http://pdfs.semanticscholar.org/b9c9/c7ef82f31614c4b9226e92ab45de4394c5f6.pdf
+b9f2a755940353549e55690437eb7e13ea226bbf,http://pdfs.semanticscholar.org/b9f2/a755940353549e55690437eb7e13ea226bbf.pdf
+b9cedd1960d5c025be55ade0a0aa81b75a6efa61,http://pdfs.semanticscholar.org/b9ce/dd1960d5c025be55ade0a0aa81b75a6efa61.pdf
+a1dd806b8f4f418d01960e22fb950fe7a56c18f1,https://www.cc.gatech.edu/~parikh/Publications/ParikhGrauman_CVPR2011_nameable.pdf
+a158c1e2993ac90a90326881dd5cb0996c20d4f3,http://pdfs.semanticscholar.org/a158/c1e2993ac90a90326881dd5cb0996c20d4f3.pdf
+a15d9d2ed035f21e13b688a78412cb7b5a04c469,http://pdfs.semanticscholar.org/a15d/9d2ed035f21e13b688a78412cb7b5a04c469.pdf
+a1b1442198f29072e907ed8cb02a064493737158,http://affect.media.mit.edu/pdfs/12.McDuff-etal-Crowdsourcing-TAC.pdf
+a14db48785d41cd57d4eac75949a6b79fc684e70,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Barkan_Fast_High_Dimensional_2013_ICCV_paper.pdf
+a15c728d008801f5ffc7898568097bbeac8270a4,http://pdfs.semanticscholar.org/a15c/728d008801f5ffc7898568097bbeac8270a4.pdf
+a125bc55bdf4bec7484111eea9ae537be314ec62,http://pdfs.semanticscholar.org/a125/bc55bdf4bec7484111eea9ae537be314ec62.pdf
+a14ae81609d09fed217aa12a4df9466553db4859,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_TIP.pdf
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,http://pdfs.semanticscholar.org/a1ee/0176a9c71863d812fe012b5c6b9c15f9aa8a.pdf
+a1f40bcfadbeee66f67ab0755dd3037c030a7450,http://www.researchgate.net/profile/Jiansheng_Chen/publication/265016758_Face_Image_Quality_Assessment_Based_on_Learning_to_Rank/links/546d662d0cf2193b94c5852b.pdf
+a1e97c4043d5cc9896dc60ae7ca135782d89e5fc,http://pdfs.semanticscholar.org/a1e9/7c4043d5cc9896dc60ae7ca135782d89e5fc.pdf
+efd308393b573e5410455960fe551160e1525f49,http://pdfs.semanticscholar.org/efd3/08393b573e5410455960fe551160e1525f49.pdf
+efd28eabebb9815e34031316624e7f095c7dfcfe,http://pdfs.semanticscholar.org/efd2/8eabebb9815e34031316624e7f095c7dfcfe.pdf
+eff87ecafed67cc6fc4f661cb077fed5440994bb,http://pdfs.semanticscholar.org/eff8/7ecafed67cc6fc4f661cb077fed5440994bb.pdf
+ef2a5a26448636570986d5cda8376da83d96ef87,http://pdfs.semanticscholar.org/ef2a/5a26448636570986d5cda8376da83d96ef87.pdf
+c32fb755856c21a238857b77d7548f18e05f482d,http://pdfs.semanticscholar.org/c32f/b755856c21a238857b77d7548f18e05f482d.pdf
+c34e48d637705ffb52360c2afb6b03efdeb680bf,http://pdfs.semanticscholar.org/c34e/48d637705ffb52360c2afb6b03efdeb680bf.pdf
+c3b3636080b9931ac802e2dd28b7b684d6cf4f8b,http://pdfs.semanticscholar.org/c3b3/636080b9931ac802e2dd28b7b684d6cf4f8b.pdf
+c398684270543e97e3194674d9cce20acaef3db3,http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf
+c3418f866a86dfd947c2b548cbdeac8ca5783c15,http://pdfs.semanticscholar.org/c341/8f866a86dfd947c2b548cbdeac8ca5783c15.pdf
+c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0,http://pdfs.semanticscholar.org/c3bc/c4ee9e81ce9c5c0845f34e9992872a8defc0.pdf
+c32383330df27625592134edd72d69bb6b5cff5c,http://www.iis.sinica.edu.tw/papers/song/13690-F.pdf
+c3a3f7758bccbead7c9713cb8517889ea6d04687,http://pdfs.semanticscholar.org/c3a3/f7758bccbead7c9713cb8517889ea6d04687.pdf
+c32f04ccde4f11f8717189f056209eb091075254,http://pdfs.semanticscholar.org/c32f/04ccde4f11f8717189f056209eb091075254.pdf
+c30982d6d9bbe470a760c168002ed9d66e1718a2,http://facstaff.elon.edu/sspurlock/papers/spurlock15_head_pose.pdf
+c32cd207855e301e6d1d9ddd3633c949630c793a,http://pdfs.semanticscholar.org/c32c/d207855e301e6d1d9ddd3633c949630c793a.pdf
+c37a971f7a57f7345fdc479fa329d9b425ee02be,http://pdfs.semanticscholar.org/c37a/971f7a57f7345fdc479fa329d9b425ee02be.pdf
+c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af,http://pdfs.semanticscholar.org/c363/8b026c7f80a2199b5ae89c8fcbedfc0bd8af.pdf
+c32c8bfadda8f44d40c6cd9058a4016ab1c27499,http://pdfs.semanticscholar.org/c32c/8bfadda8f44d40c6cd9058a4016ab1c27499.pdf
+c3fb2399eb4bcec22723715556e31c44d086e054,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p499-srinivasan.pdf
+c418a3441f992fea523926f837f4bfb742548c16,http://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf
+c4b58ceafdf4cf55586b036b9eb4d6d3d9ecd9c4,http://www.serc.iisc.ernet.in/~venky/Papers/Action_Recognition_CD_ISSNIP14.pdf
+c44c84540db1c38ace232ef34b03bda1c81ba039,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf
+c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4,https://vision.cornell.edu/se3/wp-content/uploads/2015/02/ijcv2014.pdf
+c46a4db7247d26aceafed3e4f38ce52d54361817,http://pdfs.semanticscholar.org/c46a/4db7247d26aceafed3e4f38ce52d54361817.pdf
+c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad,http://pdfs.semanticscholar.org/c4dc/f41506c23aa45c33a0a5e51b5b9f8990e8ad.pdf
+c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f,http://pdfs.semanticscholar.org/c42a/8969cd76e9f54d43f7f4dd8f9b08da566c5f.pdf
+c41de506423e301ef2a10ea6f984e9e19ba091b4,http://www.ee.columbia.edu/ln/dvmm/publications/14/felixyu_llp_mm2014.pdf
+c4934d9f9c41dbc46f4173aad2775432fe02e0e6,http://pdfs.semanticscholar.org/c493/4d9f9c41dbc46f4173aad2775432fe02e0e6.pdf
+c40c23e4afc81c8b119ea361e5582aa3adecb157,http://pdfs.semanticscholar.org/c40c/23e4afc81c8b119ea361e5582aa3adecb157.pdf
+c49aed65fcf9ded15c44f9cbb4b161f851c6fa88,http://pdfs.semanticscholar.org/c49a/ed65fcf9ded15c44f9cbb4b161f851c6fa88.pdf
+c472436764a30278337aca9681eee456bee95c34,http://pdfs.semanticscholar.org/c472/436764a30278337aca9681eee456bee95c34.pdf
+c466ad258d6262c8ce7796681f564fec9c2b143d,http://pdfs.semanticscholar.org/c466/ad258d6262c8ce7796681f564fec9c2b143d.pdf
+eacba5e8fbafb1302866c0860fc260a2bdfff232,http://pdfs.semanticscholar.org/eacb/a5e8fbafb1302866c0860fc260a2bdfff232.pdf
+ea482bf1e2b5b44c520fc77eab288caf8b3f367a,http://pdfs.semanticscholar.org/ea48/2bf1e2b5b44c520fc77eab288caf8b3f367a.pdf
+ea6f5c8e12513dbaca6bbdff495ef2975b8001bd,http://pdfs.semanticscholar.org/ea6f/5c8e12513dbaca6bbdff495ef2975b8001bd.pdf
+ea85378a6549bb9eb9bcc13e31aa6a61b655a9af,http://pdfs.semanticscholar.org/ea85/378a6549bb9eb9bcc13e31aa6a61b655a9af.pdf
+ea2ee5c53747878f30f6d9c576fd09d388ab0e2b,http://pdfs.semanticscholar.org/ea2e/e5c53747878f30f6d9c576fd09d388ab0e2b.pdf
+ea218cebea2228b360680cb85ca133e8c2972e56,http://pdfs.semanticscholar.org/ea21/8cebea2228b360680cb85ca133e8c2972e56.pdf
+ea96bc017fb56593a59149e10d5f14011a3744a0,http://pdfs.semanticscholar.org/ea96/bc017fb56593a59149e10d5f14011a3744a0.pdf
+e10a257f1daf279e55f17f273a1b557141953ce2,http://pdfs.semanticscholar.org/e10a/257f1daf279e55f17f273a1b557141953ce2.pdf
+e171fba00d88710e78e181c3e807c2fdffc6798a,http://pdfs.semanticscholar.org/e171/fba00d88710e78e181c3e807c2fdffc6798a.pdf
+e1c59e00458b4dee3f0e683ed265735f33187f77,http://pdfs.semanticscholar.org/e1c5/9e00458b4dee3f0e683ed265735f33187f77.pdf
+e1f790bbedcba3134277f545e56946bc6ffce48d,http://pdfs.semanticscholar.org/e1f7/90bbedcba3134277f545e56946bc6ffce48d.pdf
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,http://pdfs.semanticscholar.org/e1ab/3b9dee2da20078464f4ad8deb523b5b1792e.pdf
+e16efd2ae73a325b7571a456618bfa682b51aef8,http://pdfs.semanticscholar.org/e16e/fd2ae73a325b7571a456618bfa682b51aef8.pdf
+e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf
+e1f6e2651b7294951b5eab5d2322336af1f676dc,http://pdfs.semanticscholar.org/e1f6/e2651b7294951b5eab5d2322336af1f676dc.pdf
+e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2,http://pdfs.semanticscholar.org/e1e6/e6792e92f7110e26e27e80e0c30ec36ac9c2.pdf
+cd9666858f6c211e13aa80589d75373fd06f6246,http://pdfs.semanticscholar.org/cd96/66858f6c211e13aa80589d75373fd06f6246.pdf
+cd4c047f4d4df7937aff8fc76f4bae7718004f40,http://pdfs.semanticscholar.org/cd4c/047f4d4df7937aff8fc76f4bae7718004f40.pdf
+cd6c2ae00157e3fb6ab56379843280eb4cbb01b4,http://www.umiacs.umd.edu/~yzyang/paper/ICRA_2013_Multi.pdf
+cd596a2682d74bdfa7b7160dd070b598975e89d9,http://pdfs.semanticscholar.org/cd59/6a2682d74bdfa7b7160dd070b598975e89d9.pdf
+cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Functional_Faces_Groupwise_CVPR_2016_paper.pdf
+cda8fd9dd8b485e6854b1733d2294f69666c66f7,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2014/Activity%20Recognition%20in%20Unconstrained%20RGB-D%20Video%20using%203D%20Trajectories.pdf
+cda4fb9df653b5721ad4fe8b4a88468a410e55ec,http://pdfs.semanticscholar.org/cda4/fb9df653b5721ad4fe8b4a88468a410e55ec.pdf
+cd3005753012409361aba17f3f766e33e3a7320d,http://pdfs.semanticscholar.org/cd30/05753012409361aba17f3f766e33e3a7320d.pdf
+cd687ddbd89a832f51d5510c478942800a3e6854,http://pdfs.semanticscholar.org/cd68/7ddbd89a832f51d5510c478942800a3e6854.pdf
+cd436f05fb4aeeda5d1085f2fe0384526571a46e,http://pdfs.semanticscholar.org/cd43/6f05fb4aeeda5d1085f2fe0384526571a46e.pdf
+cc589c499dcf323fe4a143bbef0074c3e31f9b60,http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf
+ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18,http://ibug.doc.ic.ac.uk/media/uploads/documents/taud.pdf
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf
+ccbfc004e29b3aceea091056b0ec536e8ea7c47e,http://research.microsoft.com/~yqxu/papers/IEEE%20ICIP2005.pdf
+ccdea57234d38c7831f1e9231efcb6352c801c55,http://pdfs.semanticscholar.org/ccde/a57234d38c7831f1e9231efcb6352c801c55.pdf
+cc38942825d3a2c9ee8583c153d2c56c607e61a7,http://pdfs.semanticscholar.org/cc38/942825d3a2c9ee8583c153d2c56c607e61a7.pdf
+cc3c273bb213240515147e8be68c50f7ea22777c,http://pdfs.semanticscholar.org/cc3c/273bb213240515147e8be68c50f7ea22777c.pdf
+ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b,http://pdfs.semanticscholar.org/ccf4/3c62e4bf76b6a48ff588ef7ed51e87ddf50b.pdf
+cc8bf03b3f5800ac23e1a833447c421440d92197,https://pdfs.semanticscholar.org/cc8b/f03b3f5800ac23e1a833447c421440d92197.pdf
+cc91001f9d299ad70deb6453d55b2c0b967f8c0d,http://pdfs.semanticscholar.org/cc91/001f9d299ad70deb6453d55b2c0b967f8c0d.pdf
+cc96eab1e55e771e417b758119ce5d7ef1722b43,http://pdfs.semanticscholar.org/cc96/eab1e55e771e417b758119ce5d7ef1722b43.pdf
+cc7e66f2ba9ac0c639c80c65534ce6031997acd7,http://pdfs.semanticscholar.org/cc7e/66f2ba9ac0c639c80c65534ce6031997acd7.pdf
+cc9057d2762e077c53e381f90884595677eceafa,http://pdfs.semanticscholar.org/cc90/57d2762e077c53e381f90884595677eceafa.pdf
+e64b683e32525643a9ddb6b6af8b0472ef5b6a37,http://pdfs.semanticscholar.org/e64b/683e32525643a9ddb6b6af8b0472ef5b6a37.pdf
+e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef,http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf
+e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec,http://pdfs.semanticscholar.org/e686/5b000cf4d4e84c3fe895b7ddfc65a9c4aaec.pdf
+e6d689054e87ad3b8fbbb70714d48712ad84dc1c,http://pdfs.semanticscholar.org/e6d6/89054e87ad3b8fbbb70714d48712ad84dc1c.pdf
+e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=8BA80DE8A35C6665EB6C19D582E5689F?doi=10.1.1.227.7824&rep=rep1&type=pdf
+e6f20e7431172c68f7fce0d4595100445a06c117,http://pdfs.semanticscholar.org/e6f2/0e7431172c68f7fce0d4595100445a06c117.pdf
+e6540d70e5ffeed9f447602ea3455c7f0b38113e,http://pdfs.semanticscholar.org/e654/0d70e5ffeed9f447602ea3455c7f0b38113e.pdf
+e6ee36444038de5885473693fb206f49c1369138,http://pdfs.semanticscholar.org/e6ee/36444038de5885473693fb206f49c1369138.pdf
+e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5,http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf
+e6c8f5067ec2ad6af33745312b45fab03e7e038b,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1297.pdf
+f913bb65b62b0a6391ffa8f59b1d5527b7eba948,http://pdfs.semanticscholar.org/f913/bb65b62b0a6391ffa8f59b1d5527b7eba948.pdf
+f9784db8ff805439f0a6b6e15aeaf892dba47ca0,http://pdfs.semanticscholar.org/f978/4db8ff805439f0a6b6e15aeaf892dba47ca0.pdf
+f935225e7811858fe9ef6b5fd3fdd59aec9abd1a,http://pdfs.semanticscholar.org/f935/225e7811858fe9ef6b5fd3fdd59aec9abd1a.pdf
+f963967e52a5fd97fa3ebd679fd098c3cb70340e,http://pdfs.semanticscholar.org/f963/967e52a5fd97fa3ebd679fd098c3cb70340e.pdf
+f9e0209dc9e72d64b290d0622c1c1662aa2cc771,http://pdfs.semanticscholar.org/f9e0/209dc9e72d64b290d0622c1c1662aa2cc771.pdf
+f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1,http://pdfs.semanticscholar.org/f96b/dd1e2a940030fb0a89abbe6c69b8d7f6f0c1.pdf
+f93606d362fcbe62550d0bf1b3edeb7be684b000,http://pdfs.semanticscholar.org/f936/06d362fcbe62550d0bf1b3edeb7be684b000.pdf
+f909d04c809013b930bafca12c0f9a8192df9d92,http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf
+f9d1f12070e5267afc60828002137af949ff1544,http://pdfs.semanticscholar.org/f9d1/f12070e5267afc60828002137af949ff1544.pdf
+f9ccfe000092121a2016639732cdb368378256d5,http://pdfs.semanticscholar.org/f9cc/fe000092121a2016639732cdb368378256d5.pdf
+f02f0f6fcd56a9b1407045de6634df15c60a85cd,http://pdfs.semanticscholar.org/f02f/0f6fcd56a9b1407045de6634df15c60a85cd.pdf
+f0ae807627f81acb63eb5837c75a1e895a92c376,http://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf
+f074e86e003d5b7a3b6e1780d9c323598d93f3bc,http://pdfs.semanticscholar.org/f074/e86e003d5b7a3b6e1780d9c323598d93f3bc.pdf
+f0681fc08f4d7198dcde803d69ca62f09f3db6c5,http://pdfs.semanticscholar.org/f068/1fc08f4d7198dcde803d69ca62f09f3db6c5.pdf
+f0f501e1e8726148d18e70c8e9f6feea9360d119,http://pdfs.semanticscholar.org/f0f5/01e1e8726148d18e70c8e9f6feea9360d119.pdf
+f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,http://pdfs.semanticscholar.org/f06b/015bb19bd3c39ac5b1e4320566f8d83a0c84.pdf
+f0a3f12469fa55ad0d40c21212d18c02be0d1264,http://pdfs.semanticscholar.org/f0a3/f12469fa55ad0d40c21212d18c02be0d1264.pdf
+f05ad40246656a977cf321c8299158435e3f3b61,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Lu_Face_Recognition_Using_2013_ICCV_paper.pdf
+f781e50caa43be13c5ceb13f4ccc2abc7d1507c5,http://pdfs.semanticscholar.org/f781/e50caa43be13c5ceb13f4ccc2abc7d1507c5.pdf
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,http://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf
+f740bac1484f2f2c70777db6d2a11cf4280081d6,http://pdfs.semanticscholar.org/f740/bac1484f2f2c70777db6d2a11cf4280081d6.pdf
+f7452a12f9bd927398e036ea6ede02da79097e6e,http://pdfs.semanticscholar.org/f745/2a12f9bd927398e036ea6ede02da79097e6e.pdf
+f7093b138fd31956e30d411a7043741dcb8ca4aa,http://pdfs.semanticscholar.org/f709/3b138fd31956e30d411a7043741dcb8ca4aa.pdf
+f7de943aa75406fe5568fdbb08133ce0f9a765d4,http://pdfs.semanticscholar.org/f7de/943aa75406fe5568fdbb08133ce0f9a765d4.pdf
+f75852386e563ca580a48b18420e446be45fcf8d,http://pdfs.semanticscholar.org/f758/52386e563ca580a48b18420e446be45fcf8d.pdf
+f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3,http://pdfs.semanticscholar.org/f7c5/0d2be9fba0e4527fd9fbe3095e9d9a94fdd3.pdf
+f78863f4e7c4c57744715abe524ae4256be884a9,http://pdfs.semanticscholar.org/f788/63f4e7c4c57744715abe524ae4256be884a9.pdf
+f77c9bf5beec7c975584e8087aae8d679664a1eb,http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf
+e8686663aec64f4414eba6a0f821ab9eb9f93e38,http://pdfs.semanticscholar.org/e868/6663aec64f4414eba6a0f821ab9eb9f93e38.pdf
+e82360682c4da11f136f3fccb73a31d7fd195694,http://pdfs.semanticscholar.org/e823/60682c4da11f136f3fccb73a31d7fd195694.pdf
+e8410c4cd1689829c15bd1f34995eb3bd4321069,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553731.pdf
+e8f0f9b74db6794830baa2cab48d99d8724e8cb6,http://pdfs.semanticscholar.org/e8f0/f9b74db6794830baa2cab48d99d8724e8cb6.pdf
+e8b2a98f87b7b2593b4a046464c1ec63bfd13b51,http://pdfs.semanticscholar.org/e8b2/a98f87b7b2593b4a046464c1ec63bfd13b51.pdf
+e8c9dcbf56714db53063b9c367e3e44300141ff6,http://faculty.virginia.edu/humandynamicslab/pubs/BrickHunterCohn-ACII2009.pdf
+fac5a9a18157962cff38df6d4ae69f8a7da1cfa8,http://www.cs.sunysb.edu/~vislab/papers/01580481.pdf
+fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6,http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf
+faeefc5da67421ecd71d400f1505cfacb990119c,http://pdfs.semanticscholar.org/faee/fc5da67421ecd71d400f1505cfacb990119c.pdf
+fa08a4da5f2fa39632d90ce3a2e1688d147ece61,http://pdfs.semanticscholar.org/fa08/a4da5f2fa39632d90ce3a2e1688d147ece61.pdf
+fab2fc6882872746498b362825184c0fb7d810e4,http://pdfs.semanticscholar.org/fab2/fc6882872746498b362825184c0fb7d810e4.pdf
+faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b,http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf
+fa24bf887d3b3f6f58f8305dcd076f0ccc30272a,http://pdfs.semanticscholar.org/fa24/bf887d3b3f6f58f8305dcd076f0ccc30272a.pdf
+faa29975169ba3bbb954e518bc9814a5819876f6,http://pdfs.semanticscholar.org/faa2/9975169ba3bbb954e518bc9814a5819876f6.pdf
+fafe69a00565895c7d57ad09ef44ce9ddd5a6caa,http://pdfs.semanticscholar.org/fafe/69a00565895c7d57ad09ef44ce9ddd5a6caa.pdf
+faca1c97ac2df9d972c0766a296efcf101aaf969,http://pdfs.semanticscholar.org/faca/1c97ac2df9d972c0766a296efcf101aaf969.pdf
+fa398c6d6bd03df839dce7b59e04f473bc0ed660,https://www.researchgate.net/profile/Sujata_Pandey/publication/4308761_A_Novel_Approach_for_Face_Recognition_Using_DCT_Coefficients_Re-scaling_for_Illumination_Normalization/links/004635211c385bb7e3000000.pdf
+fae83b145e5eeda8327de9f19df286edfaf5e60c,http://pdfs.semanticscholar.org/fae8/3b145e5eeda8327de9f19df286edfaf5e60c.pdf
+ff8315c1a0587563510195356c9153729b533c5b,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/Zapping%20IndexUsing%20Smile%20to%20MeasureAdvertisement14.pdf
+ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a,http://pdfs.semanticscholar.org/ff44/d8938c52cfdca48c80f8e1618bbcbf91cb2a.pdf
+ff398e7b6584d9a692e70c2170b4eecaddd78357,http://pdfs.semanticscholar.org/ff39/8e7b6584d9a692e70c2170b4eecaddd78357.pdf
+ffc5a9610df0341369aa75c0331ef021de0a02a9,http://pdfs.semanticscholar.org/ffc5/a9610df0341369aa75c0331ef021de0a02a9.pdf
+ffd81d784549ee51a9b0b7b8aaf20d5581031b74,http://pdfs.semanticscholar.org/ffd8/1d784549ee51a9b0b7b8aaf20d5581031b74.pdf
+ff01bc3f49130d436fca24b987b7e3beedfa404d,http://pdfs.semanticscholar.org/ff01/bc3f49130d436fca24b987b7e3beedfa404d.pdf
+ff061f7e46a6213d15ac2eb2c49d9d3003612e49,http://pdfs.semanticscholar.org/ff06/1f7e46a6213d15ac2eb2c49d9d3003612e49.pdf
+ff1f45bdad41d8b35435098041e009627e60d208,http://pdfs.semanticscholar.org/ff1f/45bdad41d8b35435098041e009627e60d208.pdf
+ff60d4601adabe04214c67e12253ea3359f4e082,http://pdfs.semanticscholar.org/ff60/d4601adabe04214c67e12253ea3359f4e082.pdf
+ffc9d6a5f353e5aec3116a10cf685294979c63d9,http://pdfs.semanticscholar.org/ffc9/d6a5f353e5aec3116a10cf685294979c63d9.pdf
+ffaad0204f4af763e3390a2f6053c0e9875376be,http://pdfs.semanticscholar.org/ffaa/d0204f4af763e3390a2f6053c0e9875376be.pdf
+ffcbedb92e76fbab083bb2c57d846a2a96b5ae30,http://pdfs.semanticscholar.org/ffcb/edb92e76fbab083bb2c57d846a2a96b5ae30.pdf
+ff7bc7a6d493e01ec8fa2b889bcaf6349101676e,http://pdfs.semanticscholar.org/ff7b/c7a6d493e01ec8fa2b889bcaf6349101676e.pdf
+fffa2943808509fdbd2fc817cc5366752e57664a,http://pdfs.semanticscholar.org/fffa/2943808509fdbd2fc817cc5366752e57664a.pdf
+ff46c41e9ea139d499dd349e78d7cc8be19f936c,http://pdfs.semanticscholar.org/ff46/c41e9ea139d499dd349e78d7cc8be19f936c.pdf
+ff5dd6f96e108d8233220cc262bc282229c1a582,http://pdfs.semanticscholar.org/ff5d/d6f96e108d8233220cc262bc282229c1a582.pdf
+c5468665d98ce7349d38afb620adbf51757ab86f,http://pdfs.semanticscholar.org/c546/8665d98ce7349d38afb620adbf51757ab86f.pdf
+c5d13e42071813a0a9dd809d54268712eba7883f,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2016/PID2891229.pdf
+c50d73557be96907f88b59cfbd1ab1b2fd696d41,http://pdfs.semanticscholar.org/c50d/73557be96907f88b59cfbd1ab1b2fd696d41.pdf
+c54f9f33382f9f656ec0e97d3004df614ec56434,http://pdfs.semanticscholar.org/c54f/9f33382f9f656ec0e97d3004df614ec56434.pdf
+c574c72b5ef1759b7fd41cf19a9dcd67e5473739,http://pdfs.semanticscholar.org/c574/c72b5ef1759b7fd41cf19a9dcd67e5473739.pdf
+c5a561c662fc2b195ff80d2655cc5a13a44ffd2d,http://www.cs.toronto.edu/~suzanne/papers/JamiesonEtAlPAMI.pdf
+c5366f412f2e8e78280afcccc544156f63b516e3,http://lep.unige.ch/system/files/biblio/2012_Valstar_MetaAnalysisGEMEP-FERA.pdf
+c5fe40875358a286594b77fa23285fcfb7bda68e,http://pdfs.semanticscholar.org/edd1/cfb1caff16f80d807ff0821883ae855950c5.pdf
+c5c379a807e02cab2e57de45699ababe8d13fb6d,http://pdfs.semanticscholar.org/c5c3/79a807e02cab2e57de45699ababe8d13fb6d.pdf
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,http://openaccess.thecvf.com/content_cvpr_2017/papers/Feichtenhofer_Spatiotemporal_Multiplier_Networks_CVPR_2017_paper.pdf
+c5421a18583f629b49ca20577022f201692c4f5d,http://pdfs.semanticscholar.org/c542/1a18583f629b49ca20577022f201692c4f5d.pdf
+c5be0feacec2860982fbbb4404cf98c654142489,http://pdfs.semanticscholar.org/c5be/0feacec2860982fbbb4404cf98c654142489.pdf
+c5844de3fdf5e0069d08e235514863c8ef900eb7,http://pdfs.semanticscholar.org/c584/4de3fdf5e0069d08e235514863c8ef900eb7.pdf
+c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1,http://pdfs.semanticscholar.org/d6f1/42f5ddcb027e7b346eb20703abbf5cc4e883.pdf
+c590c6c171392e9f66aab1bce337470c43b48f39,http://pdfs.semanticscholar.org/c590/c6c171392e9f66aab1bce337470c43b48f39.pdf
+c5f1ae9f46dc44624591db3d5e9f90a6a8391111,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu_ICPR_2004.pdf
+c53352a4239568cc915ad968aff51c49924a3072,http://pdfs.semanticscholar.org/c533/52a4239568cc915ad968aff51c49924a3072.pdf
+c5765590c294146a8e3c9987d394c0990ab6a35b,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2012%5D084_P1B-31-cvpr2012-wan.pdf
+c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4,http://pdfs.semanticscholar.org/c2c5/206f6a539b02f5d5a19bdb3a90584f7e6ba4.pdf
+c2fa83e8a428c03c74148d91f60468089b80c328,http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf
+c2c3ff1778ed9c33c6e613417832505d33513c55,http://pdfs.semanticscholar.org/c2c3/ff1778ed9c33c6e613417832505d33513c55.pdf
+c27f64eaf48e88758f650e38fa4e043c16580d26,http://pdfs.semanticscholar.org/c27f/64eaf48e88758f650e38fa4e043c16580d26.pdf
+c23153aade9be0c941390909c5d1aad8924821db,http://pdfs.semanticscholar.org/c231/53aade9be0c941390909c5d1aad8924821db.pdf
+c207fd762728f3da4cddcfcf8bf19669809ab284,http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf
+c220f457ad0b28886f8b3ef41f012dd0236cd91a,http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf
+c2e03efd8c5217188ab685e73cc2e52c54835d1a,http://web.eecs.utk.edu/~ataalimi/wp-content/uploads/2016/09/Deep-Tree-structured-Face-A-Unified-Representation-for-Multi-task-Facial.pdf
+c28461e266fe0f03c0f9a9525a266aa3050229f0,http://pdfs.semanticscholar.org/c284/61e266fe0f03c0f9a9525a266aa3050229f0.pdf
+c29e33fbd078d9a8ab7adbc74b03d4f830714cd0,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Longbin.pdf
+c2e6daebb95c9dfc741af67464c98f1039127627,http://pdfs.semanticscholar.org/c2e6/daebb95c9dfc741af67464c98f1039127627.pdf
+f6742010372210d06e531e7df7df9c01a185e241,http://pdfs.semanticscholar.org/f674/2010372210d06e531e7df7df9c01a185e241.pdf
+f69de2b6770f0a8de6d3ec1a65cb7996b3c99317,http://pdfs.semanticscholar.org/f69d/e2b6770f0a8de6d3ec1a65cb7996b3c99317.pdf
+f6ca29516cce3fa346673a2aec550d8e671929a6,http://pdfs.semanticscholar.org/f6ca/29516cce3fa346673a2aec550d8e671929a6.pdf
+f67a73c9dd1e05bfc51219e70536dbb49158f7bc,http://pdfs.semanticscholar.org/f67a/73c9dd1e05bfc51219e70536dbb49158f7bc.pdf
+f6c70635241968a6d5fd5e03cde6907022091d64,http://pdfs.semanticscholar.org/f6c7/0635241968a6d5fd5e03cde6907022091d64.pdf
+f66f3d1e6e33cb9e9b3315d3374cd5f121144213,http://pdfs.semanticscholar.org/f66f/3d1e6e33cb9e9b3315d3374cd5f121144213.pdf
+f6abecc1f48f6ec6eede4143af33cc936f14d0d0,http://pdfs.semanticscholar.org/f6ab/ecc1f48f6ec6eede4143af33cc936f14d0d0.pdf
+f6fa97fbfa07691bc9ff28caf93d0998a767a5c1,http://pdfs.semanticscholar.org/f6fa/97fbfa07691bc9ff28caf93d0998a767a5c1.pdf
+f68f20868a6c46c2150ca70f412dc4b53e6a03c2,http://pdfs.semanticscholar.org/f68f/20868a6c46c2150ca70f412dc4b53e6a03c2.pdf
+e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66,http://pdfs.semanticscholar.org/e9ed/17fd8bf1f3d343198e206a4a7e0561ad7e66.pdf
+e9e40e588f8e6510fa5537e0c9e083ceed5d07ad,http://pdfs.semanticscholar.org/e9e4/0e588f8e6510fa5537e0c9e083ceed5d07ad.pdf
+e9bb045e702ee38e566ce46cc1312ed25cb59ea7,http://pdfs.semanticscholar.org/e9bb/045e702ee38e566ce46cc1312ed25cb59ea7.pdf
+e9fcd15bcb0f65565138dda292e0c71ef25ea8bb,http://pdfs.semanticscholar.org/e9fc/d15bcb0f65565138dda292e0c71ef25ea8bb.pdf
+e9f1cdd9ea95810efed306a338de9e0de25990a0,http://pdfs.semanticscholar.org/e9f1/cdd9ea95810efed306a338de9e0de25990a0.pdf
+f16a605abb5857c39a10709bd9f9d14cdaa7918f,http://pdfs.semanticscholar.org/f16a/605abb5857c39a10709bd9f9d14cdaa7918f.pdf
+f1748303cc02424704b3a35595610890229567f9,http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf
+f1d090fcea63d9f9e835c49352a3cd576ec899c1,http://pdfs.semanticscholar.org/f1d0/90fcea63d9f9e835c49352a3cd576ec899c1.pdf
+f19777e37321f79e34462fc4c416bd56772031bf,http://pdfs.semanticscholar.org/f197/77e37321f79e34462fc4c416bd56772031bf.pdf
+f19ab817dd1ef64ee94e94689b0daae0f686e849,http://pdfs.semanticscholar.org/f19a/b817dd1ef64ee94e94689b0daae0f686e849.pdf
+e76798bddd0f12ae03de26b7c7743c008d505215,http://pdfs.semanticscholar.org/e767/98bddd0f12ae03de26b7c7743c008d505215.pdf
+e793f8644c94b81b7a0f89395937a7f8ad428a89,http://pdfs.semanticscholar.org/e793/f8644c94b81b7a0f89395937a7f8ad428a89.pdf
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,http://pdfs.semanticscholar.org/e726/174d516605f80ff359e71f68b6e8e6ec6d5d.pdf
+e78394213ae07b682ce40dc600352f674aa4cb05,http://pdfs.semanticscholar.org/e783/94213ae07b682ce40dc600352f674aa4cb05.pdf
+e726acda15d41b992b5a41feabd43617fab6dc23,http://pdfs.semanticscholar.org/e726/acda15d41b992b5a41feabd43617fab6dc23.pdf
+e74816bc0803460e20edbd30a44ab857b06e288e,http://pdfs.semanticscholar.org/e748/16bc0803460e20edbd30a44ab857b06e288e.pdf
+e7b6887cd06d0c1aa4902335f7893d7640aef823,http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf
+e73b9b16adcf4339ff4d6723e61502489c50c2d9,http://pdfs.semanticscholar.org/e73b/9b16adcf4339ff4d6723e61502489c50c2d9.pdf
+cb669c1d1e17c2a54d78711fa6a9f556b83f1987,http://satoh-lab.ex.nii.ac.jp/users/ledduy/pub/Ngo-RobustFaceTrackFindingUnsingTrackedPoints.pdf
+cbcf5da9f09b12f53d656446fd43bc6df4b2fa48,http://pdfs.semanticscholar.org/cbcf/5da9f09b12f53d656446fd43bc6df4b2fa48.pdf
+cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a,http://pdfs.semanticscholar.org/cba4/5a87fc6cf12b3b0b6f57ba1a5282ef7fee7a.pdf
+cb9092fe74ea6a5b2bb56e9226f1c88f96094388,http://pdfs.semanticscholar.org/cb90/92fe74ea6a5b2bb56e9226f1c88f96094388.pdf
+cbd004d4c5e3b64321dc1a8f05fa5d64500389c2,http://www.researchgate.net/profile/Wen_Li38/publication/261711227_POSE-ROBUST_REPRESENTATION_FOR_FACE_VERIFICATION_IN_UNCONSTRAINED_VIDEOS/links/00b7d53535ed96428c000000.pdf
+cb08f679f2cb29c7aa972d66fe9e9996c8dfae00,http://pdfs.semanticscholar.org/cb08/f679f2cb29c7aa972d66fe9e9996c8dfae00.pdf
+cb84229e005645e8623a866d3d7956c197f85e11,http://pdfs.semanticscholar.org/cb84/229e005645e8623a866d3d7956c197f85e11.pdf
+cb1b5e8b35609e470ce519303915236b907b13b6,http://dforte.ece.ufl.edu/Domenic_files/IJCB.pdf
+cbe859d151466315a050a6925d54a8d3dbad591f,http://homes.di.unimi.it/~boccignone/GiuseppeBoccignone_webpage/Stochastic_files/Euvip2010.pdf
+f86ddd6561f522d115614c93520faad122eb3b56,http://pdfs.semanticscholar.org/f86d/dd6561f522d115614c93520faad122eb3b56.pdf
+f8015e31d1421f6aee5e17fc3907070b8e0a5e59,http://pdfs.semanticscholar.org/f801/5e31d1421f6aee5e17fc3907070b8e0a5e59.pdf
+f83dd9ff002a40228bbe3427419b272ab9d5c9e4,http://pdfs.semanticscholar.org/f83d/d9ff002a40228bbe3427419b272ab9d5c9e4.pdf
+f8c94afd478821681a1565d463fc305337b02779,http://pdfs.semanticscholar.org/f8c9/4afd478821681a1565d463fc305337b02779.pdf
+f8f2d2910ce8b81cb4bbf84239f9229888158b34,http://pdfs.semanticscholar.org/f8f2/d2910ce8b81cb4bbf84239f9229888158b34.pdf
+f8ec92f6d009b588ddfbb47a518dd5e73855547d,http://pdfs.semanticscholar.org/f8ec/92f6d009b588ddfbb47a518dd5e73855547d.pdf
+f869601ae682e6116daebefb77d92e7c5dd2cb15,http://pdfs.semanticscholar.org/f869/601ae682e6116daebefb77d92e7c5dd2cb15.pdf
+f8ddb2cac276812c25021b5b79bf720e97063b1e,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_HCI2006.pdf
+f8ed5f2c71e1a647a82677df24e70cc46d2f12a8,http://pdfs.semanticscholar.org/f8ed/5f2c71e1a647a82677df24e70cc46d2f12a8.pdf
+f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464,http://pdfs.semanticscholar.org/f8a5/bc2bd26790d474a1f6cc246b2ba0bcde9464.pdf
+cef841f27535c0865278ee9a4bc8ee113b4fb9f3,http://pdfs.semanticscholar.org/cef8/41f27535c0865278ee9a4bc8ee113b4fb9f3.pdf
+ce6d60b69eb95477596535227958109e07c61e1e,http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf
+ceb763d6657a07b47e48e8a2956bcfdf2cf10818,http://pdfs.semanticscholar.org/ceb7/63d6657a07b47e48e8a2956bcfdf2cf10818.pdf
+cefd9936e91885ba7af9364d50470f6cb54315a4,http://pdfs.semanticscholar.org/cefd/9936e91885ba7af9364d50470f6cb54315a4.pdf
+ce85d953086294d989c09ae5c41af795d098d5b2,http://mmlab.ie.cuhk.edu.hk/archive/2007/NN07_feature.pdf
+ce5eac297174c17311ee28bda534faaa1d559bae,http://pdfs.semanticscholar.org/ce5e/ac297174c17311ee28bda534faaa1d559bae.pdf
+ce5e50467e43e3178cbd86cfc3348e3f577c4489,https://www.computer.org/csdl/proceedings/avss/2013/9999/00/06636683.pdf
+ce691a37060944c136d2795e10ed7ba751cd8394,http://pdfs.semanticscholar.org/ce69/1a37060944c136d2795e10ed7ba751cd8394.pdf
+ce3f3088d0c0bf236638014a299a28e492069753,http://pdfs.semanticscholar.org/ce3f/3088d0c0bf236638014a299a28e492069753.pdf
+ceeb67bf53ffab1395c36f1141b516f893bada27,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf
+ce9a61bcba6decba72f91497085807bface02daf,http://www.jdl.ac.cn/user/sgshan/pub/FG04_Qing_LY.pdf
+cef6cffd7ad15e7fa5632269ef154d32eaf057af,http://pdfs.semanticscholar.org/cef6/cffd7ad15e7fa5632269ef154d32eaf057af.pdf
+cebfafea92ed51b74a8d27c730efdacd65572c40,http://biometrics.cse.msu.edu/Publications/Face/LuJainColbry_Matching2.5DFaceScans_PAMI06.pdf
+ce56be1acffda599dec6cc2af2b35600488846c9,http://pdfs.semanticscholar.org/ce56/be1acffda599dec6cc2af2b35600488846c9.pdf
+ce54e891e956d5b502a834ad131616786897dc91,http://pdfs.semanticscholar.org/ce54/e891e956d5b502a834ad131616786897dc91.pdf
+ce6f459462ea9419ca5adcc549d1d10e616c0213,http://pdfs.semanticscholar.org/ce6f/459462ea9419ca5adcc549d1d10e616c0213.pdf
+ce933821661a0139a329e6c8243e335bfa1022b1,http://pdfs.semanticscholar.org/ce93/3821661a0139a329e6c8243e335bfa1022b1.pdf
+e0b71d3c7d551684bd334af5b3671df7053a529d,http://mplab.ucsd.edu/~jake/locality.pdf
+e0e4910d575c4a8309f2069b38b99c972dbedc57,http://eprints.pascal-network.org/archive/00009548/01/PoseDetectRandomizedCascades.pdf
+e0dedb6fc4d370f4399bf7d67e234dc44deb4333,http://pdfs.semanticscholar.org/e0de/db6fc4d370f4399bf7d67e234dc44deb4333.pdf
+e0638e0628021712ac76e3472663ccc17bd8838c,http://pdfs.semanticscholar.org/e063/8e0628021712ac76e3472663ccc17bd8838c.pdf
+e0c081a007435e0c64e208e9918ca727e2c1c44e,http://pdfs.semanticscholar.org/e0c0/81a007435e0c64e208e9918ca727e2c1c44e.pdf
+e0d878cc095eaae220ad1f681b33d7d61eb5e425,http://pdfs.semanticscholar.org/e0d8/78cc095eaae220ad1f681b33d7d61eb5e425.pdf
+e00d4e4ba25fff3583b180db078ef962bf7d6824,http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,http://pdfs.semanticscholar.org/e0bf/cf965b402f3f209f26ae20ee88bc4d0002ab.pdf
+e0ed0e2d189ff73701ec72e167d44df4eb6e864d,http://pdfs.semanticscholar.org/e0ed/0e2d189ff73701ec72e167d44df4eb6e864d.pdf
+e0765de5cabe7e287582532456d7f4815acd74c1,http://pdfs.semanticscholar.org/e076/5de5cabe7e287582532456d7f4815acd74c1.pdf
+e013c650c7c6b480a1b692bedb663947cd9d260f,http://www.nlpr.ia.ac.cn/2013papers/gjkw/gk25.pdf
+e0dc6f1b740479098c1d397a7bc0962991b5e294,http://pdfs.semanticscholar.org/e0dc/6f1b740479098c1d397a7bc0962991b5e294.pdf
+468c8f09d2ad8b558b65d11ec5ad49208c4da2f2,http://www.public.asu.edu/~bli24/Papers/ICPR2016_MSR-CNN.pdf
+46a4551a6d53a3cd10474ef3945f546f45ef76ee,http://cvrr.ucsd.edu/publications/2014/TawariTrivedi_IV2014.pdf
+4686bdcee01520ed6a769943f112b2471e436208,http://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0024-5?site=ipsjcva.springeropen.com
+4688787d064e59023a304f7c9af950d192ddd33e,http://www.cse.msu.edu/~liuxm/publication/Roth_Liu_Ross_Metaxas_TIFS.pdf
+466184b10fb7ce9857e6b5bd6b4e5003e09a0b16,http://pdfs.semanticscholar.org/a42f/433e500661589e567340fe7f7d761d1f14df.pdf
+46e86cdb674440f61b6658ef3e84fea95ea51fb4,http://pdfs.semanticscholar.org/c075/e79a832d36e5b4c76b0f07c3b9d5f3be43e0.pdf
+46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4,http://ibug.doc.ic.ac.uk/media/uploads/documents/3d_local_features.pdf
+46ae4d593d89b72e1a479a91806c39095cd96615,http://www.idiap.ch/~odobez/publications/GayKhouryMeignierOdobezDeleglise-FaceNaming-ICIP-2014.pdf
+467b602a67cfd7c347fe7ce74c02b38c4bb1f332,http://pdfs.semanticscholar.org/467b/602a67cfd7c347fe7ce74c02b38c4bb1f332.pdf
+466f80b066215e85da63e6f30e276f1a9d7c843b,http://cbl.uh.edu/pub_files/07961802.pdf
+464de30d3310123644ab81a1f0adc51598586fd2,http://pdfs.semanticscholar.org/464d/e30d3310123644ab81a1f0adc51598586fd2.pdf
+466a5add15bb5f91e0cfd29a55f5fb159a7980e5,http://pdfs.semanticscholar.org/466a/5add15bb5f91e0cfd29a55f5fb159a7980e5.pdf
+46f3b113838e4680caa5fc8bda6e9ae0d35a038c,http://pdfs.semanticscholar.org/46f3/b113838e4680caa5fc8bda6e9ae0d35a038c.pdf
+465d5bb11912005f0a4f0569c6524981df18a7de,http://pdfs.semanticscholar.org/465d/5bb11912005f0a4f0569c6524981df18a7de.pdf
+46c87fded035c97f35bb991fdec45634d15f9df2,https://arxiv.org/pdf/1707.09145v1.pdf
+46f32991ebb6235509a6d297928947a8c483f29e,http://pdfs.semanticscholar.org/46f3/2991ebb6235509a6d297928947a8c483f29e.pdf
+46551095a2cc4976d6be0165c31c37b0c5638719,http://staff.estem-uc.edu.au/roland/wp-content/uploads/file/roland/publications/Journal/JMUI/joshi_goecke_alghowinem_dhall_wagner_epps_parker_breakspear_JMUI2013_MultimodalAssistiveTechnologiesForDepressionDiagnosisAndMonitoring.pdf
+46538b0d841654a0934e4c75ccd659f6c5309b72,http://pdfs.semanticscholar.org/4653/8b0d841654a0934e4c75ccd659f6c5309b72.pdf
+46a29a5026142c91e5655454aa2c2f122561db7f,http://vipl.ict.ac.cn/sites/default/files/papers/files/2011_FG_sxli_Margin%20Emphasized%20Metric%20Learning%20and%20Its%20Application%20to%20Gabor%20Feature%20Based%20Face%20Recognition.pdf
+469ee1b00f7bbfe17c698ccded6f48be398f2a44,http://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf
+46196735a201185db3a6d8f6e473baf05ba7b68f,http://pdfs.semanticscholar.org/4619/6735a201185db3a6d8f6e473baf05ba7b68f.pdf
+4682fee7dc045aea7177d7f3bfe344aabf153bd5,http://www.cs.utexas.edu/~cv-fall2012/slides/elad-paper.pdf
+4657d87aebd652a5920ed255dca993353575f441,http://pdfs.semanticscholar.org/4657/d87aebd652a5920ed255dca993353575f441.pdf
+4622b82a8aff4ac1e87b01d2708a333380b5913b,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/Zhu-ICB-15.pdf
+46e866f58419ff4259c65e8256c1d4f14927b2c6,http://pdfs.semanticscholar.org/f03d/cfd956cf4404ec9f0c7fb451479d72a63e03.pdf
+46072f872eee3413f9d05482be6446f6b96b6c09,http://pdfs.semanticscholar.org/4607/2f872eee3413f9d05482be6446f6b96b6c09.pdf
+4698a599425c3a6bae1c698456029519f8f2befe,http://pdfs.semanticscholar.org/4698/a599425c3a6bae1c698456029519f8f2befe.pdf
+2cf92ee60f719098acc3aae3981cedc47fa726b3,http://eksl.isi.edu/files/papers/sinjini_2007_1172280675.pdf
+2c258eec8e4da9e65018f116b237f7e2e0b2ad17,http://openaccess.thecvf.com/content_cvpr_2017/papers/Qiu_Deep_Quantization_Encoding_CVPR_2017_paper.pdf
+2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58,http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf
+2c8743089d9c7df04883405a31b5fbe494f175b4,http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf
+2c61a9e26557dd0fe824909adeadf22a6a0d86b0,http://pdfs.semanticscholar.org/f117/3a4c5e3501323b37c1ae9a6d7dd8a236eab8.pdf
+2c34bf897bad780e124d5539099405c28f3279ac,http://pdfs.semanticscholar.org/2c34/bf897bad780e124d5539099405c28f3279ac.pdf
+2c203050a6cca0a0bff80e574bda16a8c46fe9c2,http://pdfs.semanticscholar.org/608f/43ee003c7c2e7f170336fda7a00cccd06311.pdf
+2cc4ae2e864321cdab13c90144d4810464b24275,http://pdfs.semanticscholar.org/f3d2/c66630176cbb1409ebacd2dac4b30d8e3145.pdf
+2cb5db4df50921d276ad9e7186119a276324e465,http://cbcl.mit.edu/projects/cbcl/publications/ps/Leibo_Liao_Poggio_VISAPP_2014.pdf
+2c3430e0cbe6c8d7be3316a88a5c13a50e90021d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Wang_Multi-feature_Spectral_Clustering_2014_CVPR_paper.pdf
+2c2786ea6386f2d611fc9dbf209362699b104f83,http://pdfs.semanticscholar.org/2c27/86ea6386f2d611fc9dbf209362699b104f83.pdf
+2c92839418a64728438c351a42f6dc5ad0c6e686,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf
+2c848cc514293414d916c0e5931baf1e8583eabc,http://pdfs.semanticscholar.org/2c84/8cc514293414d916c0e5931baf1e8583eabc.pdf
+2c883977e4292806739041cf8409b2f6df171aee,http://pdfs.semanticscholar.org/c5fb/ef530eb28d4f787990e0b962a6a68e420e49.pdf
+2cdd9e445e7259117b995516025fcfc02fa7eebb,http://hub.hku.hk/bitstream/10722/61208/1/Content.pdf
+2cf9088e9faa81872b355a4ea0a9fae46d3c8a08,http://www.cvg.unibe.ch/tpapadhimitri/tech.pdf
+2cdc40f20b70ca44d9fd8e7716080ee05ca7924a,http://pdfs.semanticscholar.org/2cdc/40f20b70ca44d9fd8e7716080ee05ca7924a.pdf
+2cac70f9c8140a12b6a55cef834a3d7504200b62,http://www.eng.auburn.edu/~reevesj/Classes/ELEC6970-latex/posters/baposterex1.pdf
+2c8f24f859bbbc4193d4d83645ef467bcf25adc2,http://romisatriawahono.net/lecture/rm/survey/machine%20learning/Frenay%20-%20Classification%20in%20the%20Presence%20of%20Label%20Noise%20-%202014.pdf
+2ca43325a5dbde91af90bf850b83b0984587b3cc,http://pdfs.semanticscholar.org/2ca4/3325a5dbde91af90bf850b83b0984587b3cc.pdf
+2cfc28a96b57e0817cc9624a5d553b3aafba56f3,https://web.njit.edu/~borcea/papers/ieee-sarnoff16.pdf
+2cdd5b50a67e4615cb0892beaac12664ec53b81f,http://people.eecs.berkeley.edu/~junyanz/projects/mirrormirror/mirrormirror_small.pdf
+2cae619d0209c338dc94593892a787ee712d9db0,http://vis-www.cs.umass.edu/papers/cvpr08shrf.pdf
+2c0acaec54ab2585ff807e18b6b9550c44651eab,http://pdfs.semanticscholar.org/2c0a/caec54ab2585ff807e18b6b9550c44651eab.pdf
+2c811b647a6aac924920c06e607e9e8d4b8d872d,http://pdfs.semanticscholar.org/2c81/1b647a6aac924920c06e607e9e8d4b8d872d.pdf
+2cdde47c27a8ecd391cbb6b2dea64b73282c7491,http://pdfs.semanticscholar.org/2cdd/e47c27a8ecd391cbb6b2dea64b73282c7491.pdf
+2c7c3a74da960cc76c00965bd3e343958464da45,http://pdfs.semanticscholar.org/2c7c/3a74da960cc76c00965bd3e343958464da45.pdf
+2cf5f2091f9c2d9ab97086756c47cd11522a6ef3,http://pdfs.semanticscholar.org/2cf5/f2091f9c2d9ab97086756c47cd11522a6ef3.pdf
+2c285dadfa6c07d392ee411d0213648a8a1cf68f,http://www.contrib.andrew.cmu.edu/~yzhiding/ICMI15.pdf
+2c17d36bab56083293456fe14ceff5497cc97d75,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf
+2c4b96f6c1a520e75eb37c6ee8b844332bc0435c,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w12/papers/Leo_Automatic_Emotion_Recognition_ICCV_2015_paper.pdf
+2cd7821fcf5fae53a185624f7eeda007434ae037,http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf
+79581c364cefe53bff6bdd224acd4f4bbc43d6d4,http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf
+794ddb1f3b7598985d4d289b5b0664be736a50c4,http://pdfs.semanticscholar.org/794d/db1f3b7598985d4d289b5b0664be736a50c4.pdf
+790aa543151312aef3f7102d64ea699a1d15cb29,http://arxiv.org/pdf/1607.06290v1.pdf
+795aa8064b34c4bf4acdd8be3f1e5d06da5a7756,http://pdfs.semanticscholar.org/795a/a8064b34c4bf4acdd8be3f1e5d06da5a7756.pdf
+79617903c5cb56697f2e738e1463b9654e2d68ed,http://hal.cse.msu.edu/pdfs/papers/2013-mmcf-tip.pdf
+795ea140df2c3d29753f40ccc4952ef24f46576c,http://pdfs.semanticscholar.org/795e/a140df2c3d29753f40ccc4952ef24f46576c.pdf
+79b669abf65c2ca323098cf3f19fa7bdd837ff31,http://dro.deakin.edu.au/eserv/DU:30044585/venkatesh-efficienttensor-2008.pdf
+794c0dc199f0bf778e2d40ce8e1969d4069ffa7b,http://hcil2.cs.umd.edu/trs/2011-17/2011-17.pdf
+79dd787b2877cf9ce08762d702589543bda373be,http://fipa.cs.kit.edu/befit/workshop2011/pdf/slides/jianguo_li-slides.pdf
+7966146d72f9953330556baa04be746d18702047,http://pdfs.semanticscholar.org/7966/146d72f9953330556baa04be746d18702047.pdf
+79fa57dedafddd3f3720ca26eb41c82086bfb332,http://www.cis.pku.edu.cn/vision/Visual&Robot/publication/doc/IROS05_wu.pdf
+79cdc8c786c535366cafeced1f3bdeb18ff04e66,http://www.researchgate.net/profile/Ziga_Spiclin/publication/221795259_Groupwise_registration_of_multimodal_images_by_an_efficient_joint_entropy_minimization_scheme/links/0deec520dd49e7bc24000000.pdf
+793e7f1ba18848908da30cbad14323b0389fd2a8,http://pdfs.semanticscholar.org/793e/7f1ba18848908da30cbad14323b0389fd2a8.pdf
+2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359,http://pdfs.semanticscholar.org/2d99/0b04c2bd61d3b7b922b8eed33aeeeb7b9359.pdf
+2d25045ec63f9132371841c0beccd801d3733908,http://pdfs.semanticscholar.org/2d25/045ec63f9132371841c0beccd801d3733908.pdf
+2dd6c988b279d89ab5fb5155baba65ce4ce53c1e,http://pdfs.semanticscholar.org/2dd6/c988b279d89ab5fb5155baba65ce4ce53c1e.pdf
+2db05ef11041447dbc735362db68b04e562c1e35,http://www.cs.berkeley.edu/~daf/eccv-sft.pdf
+2d080662a1653f523321974a57518e7cb67ecb41,http://pdfs.semanticscholar.org/2d08/0662a1653f523321974a57518e7cb67ecb41.pdf
+2d4b9fe3854ccce24040074c461d0c516c46baf4,https://arxiv.org/pdf/1704.04671v1.pdf
+2d294c58b2afb529b26c49d3c92293431f5f98d0,https://ibug.doc.ic.ac.uk/media/uploads/documents/mmpp_journal.pdf
+2d1f86e2c7ba81392c8914edbc079ac64d29b666,https://arxiv.org/pdf/1702.04471v1.pdf
+2d164f88a579ba53e06b601d39959aaaae9016b7,http://pdfs.semanticscholar.org/a666/2bf767df8f8a5bcb655142ac0fb7c4f524f1.pdf
+2d23fa205acca9c21e3e1a04674f1e5a9528550e,http://pdfs.semanticscholar.org/2d23/fa205acca9c21e3e1a04674f1e5a9528550e.pdf
+2d244d70ed1a2ba03d152189f1f90ff2b4f16a79,http://pdfs.semanticscholar.org/2d24/4d70ed1a2ba03d152189f1f90ff2b4f16a79.pdf
+2d88e7922d9f046ace0234f9f96f570ee848a5b5,http://pdfs.semanticscholar.org/2d88/e7922d9f046ace0234f9f96f570ee848a5b5.pdf
+2d31ab536b3c8a05de0d24e0257ca4433d5a7c75,http://tamaraberg.com/papers/xray.pdf
+2dbde64ca75e7986a0fa6181b6940263bcd70684,http://www.micc.unifi.it/wp-content/uploads/2016/01/2014_pose_independent.pdf
+2d146cc0908c931d87f6e6e5d08b117c30a69b8d,http://www.cs.cityu.edu.hk/~yihong/download/TSMC.pdf
+2d0363a3ebda56d91d704d5ff5458a527775b609,http://pdfs.semanticscholar.org/2e07/a4c0f87ac078fcccf057d109f9387f4703a9.pdf
+2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8,http://pdfs.semanticscholar.org/2d93/a9aa8bed51d0d1b940c73ac32c046ebf1eb8.pdf
+2dd2c7602d7f4a0b78494ac23ee1e28ff489be88,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_cvpr_2012.pdf
+2d84e30c61281d3d7cdd11676683d6e66a68aea6,http://pdfs.semanticscholar.org/2d84/e30c61281d3d7cdd11676683d6e66a68aea6.pdf
+2d98a1cb0d1a37c79a7ebcb727066f9ccc781703,https://arxiv.org/pdf/1706.07525v1.pdf
+2dced31a14401d465cd115902bf8f508d79de076,http://pdfs.semanticscholar.org/2dce/d31a14401d465cd115902bf8f508d79de076.pdf
+2d05e768c64628c034db858b7154c6cbd580b2d5,http://pdfs.semanticscholar.org/2d05/e768c64628c034db858b7154c6cbd580b2d5.pdf
+2d072cd43de8d17ce3198fae4469c498f97c6277,http://www.patrikhuber.ch/files/RCRC_SPL_2015.pdf
+2d35a07c4fa03d78d5b622ab703ea44850de8d39,http://www.cs.sunysb.edu/~vislab/papers/Zhang2005cgi.pdf
+2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3,http://pdfs.semanticscholar.org/ca31/53a726d8c212a7fd92f696c7e00a3ae3b31f.pdf
+2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3,http://pdfs.semanticscholar.org/77c1/56969e3b7fbc86432c5238a95679d25ac579.pdf
+2d38fd1df95f5025e2cee5bc439ba92b369a93df,http://pdfs.semanticscholar.org/2d38/fd1df95f5025e2cee5bc439ba92b369a93df.pdf
+2d83ba2d43306e3c0587ef16f327d59bf4888dc3,http://www.cs.colby.edu/courses/S16/cs365/papers/karpath-deepVideo-CVPR14.pdf
+2d84c0d96332bb4fbd8acced98e726aabbf15591,http://pdfs.semanticscholar.org/2d84/c0d96332bb4fbd8acced98e726aabbf15591.pdf
+2d79d338c114ece1d97cde1aa06ab4cf17d38254,http://crcv.ucf.edu/papers/cvpr2016/Borji_CVPR2016.pdf
+2df4d05119fe3fbf1f8112b3ad901c33728b498a,http://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf
+2d3482dcff69c7417c7b933f22de606a0e8e42d4,http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf
+2d748f8ee023a5b1fbd50294d176981ded4ad4ee,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf
+2d3c17ced03e4b6c4b014490fe3d40c62d02e914,http://pdfs.semanticscholar.org/2d3c/17ced03e4b6c4b014490fe3d40c62d02e914.pdf
+4188bd3ef976ea0dec24a2512b44d7673fd4ad26,http://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tip2010.pdf
+416b559402d0f3e2b785074fcee989d44d82b8e5,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cai_Multi-View_Super_Vector_2014_CVPR_paper.pdf
+416364cfdbc131d6544582e552daf25f585c557d,http://www.dcs.qmw.ac.uk/~sgg/papers/Zalewski_Gong_FG04.pdf
+41b38da2f4137c957537908f9cb70cbd2fac8bc1,https://arxiv.org/pdf/1701.01879v1.pdf
+41cfc9edbf36754746991c2a1e9a47c0d129d105,https://www.cs.princeton.edu/~ohad/papers/FriedShechtmanGoldmanFinkelstein_SIGGRAPH2016.pdf
+41000c3a3344676513ef4bfcd392d14c7a9a7599,http://pdfs.semanticscholar.org/d3ba/9ed56e9ddb73f0e0f2bea3fd3920db30f42e.pdf
+411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8,http://pdfs.semanticscholar.org/411e/e9236095f8f5ca3b9ef18fd3381c1c68c4b8.pdf
+4159663f0b292fd8cc7411929be9d669bb98b386,http://www.researchgate.net/profile/Pradeep_Khosla/publication/224752362_Cancelable_biometric_filters_for_face_recognition/links/00b4952ade904b0db4000000.pdf
+4140498e96a5ff3ba816d13daf148fffb9a2be3f,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Li_Constrained.pdf
+41f8477a6be9cd992a674d84062108c68b7a9520,http://pdfs.semanticscholar.org/41f8/477a6be9cd992a674d84062108c68b7a9520.pdf
+411503a304a661b0c04c2b446a6e43e4a70942dc,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/CRV2010FaceClustFinal.pdf
+41aa8c1c90d74f2653ef4b3a2e02ac473af61e47,http://pdfs.semanticscholar.org/41aa/8c1c90d74f2653ef4b3a2e02ac473af61e47.pdf
+41ab4939db641fa4d327071ae9bb0df4a612dc89,http://pdfs.semanticscholar.org/41ab/4939db641fa4d327071ae9bb0df4a612dc89.pdf
+41971dfbf404abeb8cf73fea29dc37b9aae12439,http://pdfs.semanticscholar.org/4197/1dfbf404abeb8cf73fea29dc37b9aae12439.pdf
+4157e45f616233a0874f54a59c3df001b9646cd7,http://pdfs.semanticscholar.org/4157/e45f616233a0874f54a59c3df001b9646cd7.pdf
+41a6196f88beced105d8bc48dd54d5494cc156fb,http://toc.proceedings.com/25848webtoc.pdf
+41de109bca9343691f1d5720df864cdbeeecd9d0,http://pdfs.semanticscholar.org/41de/109bca9343691f1d5720df864cdbeeecd9d0.pdf
+41d9a240b711ff76c5448d4bf4df840cc5dad5fc,https://arxiv.org/pdf/1206.2627v2.pdf
+419a6fca4c8d73a1e43003edc3f6b610174c41d2,http://www.robots.newcastle.edu.au/~chalup/chalup_publications/p058_preprint.pdf
+41c97af4801ac302f09902aeec2af17b481563ab,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2016/Collaborative%20Multi-View%20Metric%20Learning%20for%20Visual%20Classification.pdf
+4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c,http://www.ifp.illinois.edu/~dagli/papers/ICME07.pdf
+4180978dbcd09162d166f7449136cb0b320adf1f,http://pdfs.semanticscholar.org/4180/978dbcd09162d166f7449136cb0b320adf1f.pdf
+41b997f6cec7a6a773cd09f174cb6d2f036b36cd,http://pdfs.semanticscholar.org/41b9/97f6cec7a6a773cd09f174cb6d2f036b36cd.pdf
+41aa209e9d294d370357434f310d49b2b0baebeb,https://arxiv.org/pdf/1605.05440v1.pdf
+4118b4fc7d61068b9b448fd499876d139baeec81,http://www.cs.utexas.edu/~ssi/TKDE2010.pdf
+413a184b584dc2b669fbe731ace1e48b22945443,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_00911.pdf
+83b7578e2d9fa60d33d9336be334f6f2cc4f218f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_101_ext.pdf
+839a2155995acc0a053a326e283be12068b35cb8,http://pdfs.semanticscholar.org/839a/2155995acc0a053a326e283be12068b35cb8.pdf
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,http://pdfs.semanticscholar.org/83fd/2d2d5ad6e4e153672c9b6d1a3785f754b60e.pdf
+83ca4cca9b28ae58f461b5a192e08dffdc1c76f3,http://infoscience.epfl.ch/record/200407/files/icip1024-cam-ready.pdf
+831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9,http://pdfs.semanticscholar.org/831f/bef657cc5e1bbf298ce6aad6b62f00a5b5d9.pdf
+832e1d128059dd5ed5fa5a0b0f021a025903f9d5,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dapogny_Pairwise_Conditional_Random_ICCV_2015_paper.pdf
+83e093a07efcf795db5e3aa3576531d61557dd0d,http://pdfs.semanticscholar.org/83e0/93a07efcf795db5e3aa3576531d61557dd0d.pdf
+831d661d657d97a07894da8639a048c430c5536d,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w4/papers/Zhu_Weakly_Supervised_Facial_CVPR_2016_paper.pdf
+83b4899d2899dd6a8d956eda3c4b89f27f1cd308,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0100377.pdf
+830e5b1043227fe189b3f93619ef4c58868758a7,http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf
+8323af714efe9a3cadb31b309fcc2c36c8acba8f,http://pdfs.semanticscholar.org/8323/af714efe9a3cadb31b309fcc2c36c8acba8f.pdf
+831226405bb255527e9127b84e8eaedd7eb8e9f9,http://pdfs.semanticscholar.org/8312/26405bb255527e9127b84e8eaedd7eb8e9f9.pdf
+83fd5c23204147844a0528c21e645b757edd7af9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W05/papers/Bulan_USDOT_Number_Localization_2015_CVPR_paper.pdf
+8384e104796488fa2667c355dd15b65d6d5ff957,http://pdfs.semanticscholar.org/feea/803c1eaedc825509e24a8c1279ffe0251d9d.pdf
+8323529cf37f955fb3fc6674af6e708374006a28,http://researcher.ibm.com/researcher/files/us-smiyaza/FPIV04.pdf
+8395cf3535a6628c3bdc9b8d0171568d551f5ff0,http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf
+83ac942d71ba908c8d76fc68de6173151f012b38,http://pdfs.semanticscholar.org/83ac/942d71ba908c8d76fc68de6173151f012b38.pdf
+834f5ab0cb374b13a6e19198d550e7a32901a4b2,http://pdfs.semanticscholar.org/834f/5ab0cb374b13a6e19198d550e7a32901a4b2.pdf
+8320dbdd3e4712cca813451cd94a909527652d63,http://pdfs.semanticscholar.org/d921/1df11080fa5eb0dc1d62fb683b10c055673a.pdf
+834b15762f97b4da11a2d851840123dbeee51d33,http://pdfs.semanticscholar.org/834b/15762f97b4da11a2d851840123dbeee51d33.pdf
+833fa04463d90aab4a9fe2870d480f0b40df446e,http://static.cs.brown.edu/~gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf
+833f6ab858f26b848f0d747de502127406f06417,http://mediatum.ub.tum.de/doc/980054/157447.pdf
+8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff,http://pdfs.semanticscholar.org/8309/e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff.pdf
+1b635f494eff2e5501607ebe55eda7bdfa8263b8,http://pdfs.semanticscholar.org/1b63/5f494eff2e5501607ebe55eda7bdfa8263b8.pdf
+1b6394178dbc31d0867f0b44686d224a19d61cf4,http://pdfs.semanticscholar.org/ca8e/5419fd570f19643425b24da801283b706fc1.pdf
+1bd50926079e68a6e32dc4412e9d5abe331daefb,https://pdfs.semanticscholar.org/544d/6cd24db5adad8453033e0cc1aa7d3d6224ab.pdf
+1b150248d856f95da8316da868532a4286b9d58e,http://pdfs.semanticscholar.org/6724/41000751d58396790f4c993419d70f6af3f4.pdf
+1be498d4bbc30c3bfd0029114c784bc2114d67c0,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf
+1be785355ae29e32d85d86285bb8f90ea83171df,http://staff.estem-uc.edu.au/roland/files/2009/05/Sharma_Dhall_Gedeon_Goecke_ACII2013_ModelingStressUsingThermalFacialPatterns_ASpatio-TemporalApproach.pdf
+1b5875dbebc76fec87e72cee7a5263d325a77376,http://arxiv.org/pdf/1603.00560v2.pdf
+1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9,http://pdfs.semanticscholar.org/1bdf/b3deae6e6c0df6537efcd1d7edcb4d7a96e9.pdf
+1b300a7858ab7870d36622a51b0549b1936572d4,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/Yimo-TIP2016.pdf
+1b90507f02967ff143fce993a5abbfba173b1ed0,http://mrl.cs.vsb.cz/publications/fusek_ipta_2014.pdf
+1b794b944fd462a2742b6c2f8021fecc663004c9,https://www.ecse.rpi.edu/~cvrl/wuy/HierarchicalShape/CVPR14_facialfeaturedetection_cameraready.pdf
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,http://pdfs.semanticscholar.org/3a2f/aa145c5fe63ab906568a29fa4100220e03d9.pdf
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,https://arxiv.org/pdf/1608.00486v3.pdf
+1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d,http://www.pitt.edu/~jeffcohn/biblio/dicta2010.pdf
+1b0a071450c419138432c033f722027ec88846ea,http://cvrr.ucsd.edu/publications/2016/YuenMartinTrivediITSC2016.pdf
+1b60b8e70859d5c85ac90510b370b501c5728620,http://pdfs.semanticscholar.org/1b60/b8e70859d5c85ac90510b370b501c5728620.pdf
+1b3b01513f99d13973e631c87ffa43904cd8a821,http://pdfs.semanticscholar.org/1b3b/01513f99d13973e631c87ffa43904cd8a821.pdf
+1bc214c39536c940b12c3a2a6b78cafcbfddb59a,http://pdfs.semanticscholar.org/1bc2/14c39536c940b12c3a2a6b78cafcbfddb59a.pdf
+1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113,http://pdfs.semanticscholar.org/1bc9/aaa41c08bbd0c01dd5d7d7ebf3e48ae78113.pdf
+1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3,http://pdfs.semanticscholar.org/1be1/8a701d5af2d8088db3e6aaa5b9b1d54b6fd3.pdf
+1b79628af96eb3ad64dbb859dae64f31a09027d5,http://pdfs.semanticscholar.org/1b79/628af96eb3ad64dbb859dae64f31a09027d5.pdf
+1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61,http://mplab.ucsd.edu/~marni/pubs/Bartlett_CVPR05.pdf
+1bddad4dc0dfa8efa402aa5d18c29304a5760f12,https://www.researchgate.net/profile/Iickho_Song/publication/254062033_Complexity-Reduced_Scheme_for_Feature_Extraction_With_Linear_Discriminant_Analysis/links/53d694ce0cf228d363ea69d5.pdf
+1b70bbf7cdfc692873ce98dd3c0e191580a1b041,http://pdfs.semanticscholar.org/1b70/bbf7cdfc692873ce98dd3c0e191580a1b041.pdf
+1bc23c771688109bed9fd295ce82d7e702726327,http://pdfs.semanticscholar.org/1bc2/3c771688109bed9fd295ce82d7e702726327.pdf
+1bad8a9640cdbc4fe7de12685651f44c4cff35ce,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W08/papers/Gourgari_THETIS_Three_Dimensional_2013_CVPR_paper.pdf
+1b589016fbabe607a1fb7ce0c265442be9caf3a9,http://pdfs.semanticscholar.org/5efe/b55fe3f03cd16aa0c268d74a5ad2e03170cf.pdf
+1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8,http://pdfs.semanticscholar.org/b1c8/4ab7cc0c85e8aa8be4c0ec32bad225c9c630.pdf
+1b4bc7447f500af2601c5233879afc057a5876d8,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Wang2015g.pdf
+1b27ca161d2e1d4dd7d22b1247acee5c53db5104,http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf
+1b69b860e22278a6f482507b8ce879082dd00c44,http://www.cs.utexas.edu/~chaoyeh/cvpr_2014_Inferring_Analogous_Attributes.pdf
+7711a7404f1f1ac3a0107203936e6332f50ac30c,http://pdfs.semanticscholar.org/7711/a7404f1f1ac3a0107203936e6332f50ac30c.pdf
+7701952e405c3d8a0947e2a309de281aa76bd3f4,http://isl.ira.uka.de/~stiefel/papers/IEE_SIU_2LDA.pdf
+778c9f88839eb26129427e1b8633caa4bd4d275e,http://www.cs.berkeley.edu/~nzhang/papers/cvpr12_ppk.pdf
+7735f63e5790006cb3d989c8c19910e40200abfc,http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf
+77b1db2281292372c38926cc4aca32ef056011dc,http://pdfs.semanticscholar.org/77b1/db2281292372c38926cc4aca32ef056011dc.pdf
+776835eb176ed4655d6e6c308ab203126194c41e,http://pdfs.semanticscholar.org/7768/35eb176ed4655d6e6c308ab203126194c41e.pdf
+77c53ec6ea448db4dad586e002a395c4a47ecf66,http://pdfs.semanticscholar.org/77c5/3ec6ea448db4dad586e002a395c4a47ecf66.pdf
+778bff335ae1b77fd7ec67404f71a1446624331b,http://pdfs.semanticscholar.org/778b/ff335ae1b77fd7ec67404f71a1446624331b.pdf
+7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d,https://graphics.ethz.ch/Downloads/Publications/Papers/2013/Zun13a/Zun13a.pdf
+774cbb45968607a027ae4729077734db000a1ec5,http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf
+7754b708d6258fb8279aa5667ce805e9f925dfd0,https://www.ecse.rpi.edu/~qji/Papers/PAMI_AU.pdf
+77037a22c9b8169930d74d2ce6f50f1a999c1221,https://ueaeprints.uea.ac.uk/64308/1/Accepted_manuscript.pdf
+779ad364cae60ca57af593c83851360c0f52c7bf,http://pdfs.semanticscholar.org/779a/d364cae60ca57af593c83851360c0f52c7bf.pdf
+77a9b1856ebbc9a6170ee4c572a515d6db062cef,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1291.pdf
+7792fbc59f3eafc709323cdb63852c5d3a4b23e9,http://pdfs.semanticscholar.org/7792/fbc59f3eafc709323cdb63852c5d3a4b23e9.pdf
+77d31d2ec25df44781d999d6ff980183093fb3de,http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Littwin_The_Multiverse_Loss_2016_CVPR_supplemental.pdf
+77fb9e36196d7bb2b505340b6b94ba552a58b01b,http://pdfs.semanticscholar.org/77fb/9e36196d7bb2b505340b6b94ba552a58b01b.pdf
+486840f4f524e97f692a7f6b42cd19019ee71533,https://arxiv.org/pdf/1703.08388v2.pdf
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,http://pdfs.semanticscholar.org/4846/3a119f67ff2c43b7c38f0a722a32f590dfeb.pdf
+488d3e32d046232680cc0ba80ce3879f92f35cac,http://pdfs.semanticscholar.org/488d/3e32d046232680cc0ba80ce3879f92f35cac.pdf
+486a82f50835ea888fbc5c6babf3cf8e8b9807bc,http://pdfs.semanticscholar.org/486a/82f50835ea888fbc5c6babf3cf8e8b9807bc.pdf
+48fea82b247641c79e1994f4ac24cad6b6275972,http://wan.poly.edu/KDD2012/docs/p1469.pdf
+48734cb558b271d5809286447ff105fd2e9a6850,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w41/papers/Mahoor_Facial_Expression_Recognition_CVPR_2017_paper.pdf
+48a417cfeba06feb4c7ab30f06c57ffbc288d0b5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Chen_Robust_Dictionary_Learning_2013_ICCV_paper.pdf
+48853c25dc75481b0c77f408a8a76383287ebe2a,http://qil.uh.edu/qil/websitecontent/pdf/2015-45.pdf
+48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e,http://www.iri.upc.edu/files/scidoc/1938-Multi-Modal-Embedding-for-Main-Product-Detection-in-Fashion.pdf
+488a61e0a1c3768affdcd3c694706e5bb17ae548,http://pdfs.semanticscholar.org/916b/f08e66c3dd11bec809dd8cbe384e8860bb66.pdf
+48910f9b6ccc40226cd4f105ed5291571271b39e,http://pdfs.semanticscholar.org/4891/0f9b6ccc40226cd4f105ed5291571271b39e.pdf
+48a9241edda07252c1aadca09875fabcfee32871,https://arxiv.org/pdf/1611.08657v5.pdf
+48f0055295be7b175a06df5bc6fa5c6b69725785,http://pdfs.semanticscholar.org/48f0/055295be7b175a06df5bc6fa5c6b69725785.pdf
+48729e4de8aa478ee5eeeb08a72a446b0f5367d5,http://faculty.ucmerced.edu/mhyang/papers/icip14_cfh.pdf
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,http://pdfs.semanticscholar.org/4817/4c414cfce7f1d71c4401d2b3d49ba91c5338.pdf
+488375ae857a424febed7c0347cc9590989f01f7,http://pdfs.semanticscholar.org/4883/75ae857a424febed7c0347cc9590989f01f7.pdf
+4836b084a583d2e794eb6a94982ea30d7990f663,http://pdfs.semanticscholar.org/4836/b084a583d2e794eb6a94982ea30d7990f663.pdf
+4866a5d6d7a40a26f038fc743e16345c064e9842,http://pdfs.semanticscholar.org/4866/a5d6d7a40a26f038fc743e16345c064e9842.pdf
+4805f41c4f8cfb932b011dfdd7f8907152590d1a,http://www.affectiva.com/wp-content/uploads/2014/09/From_Dials_to_Facial_Coding_Automated_Detection_of_Spontaneous_Facial_Expressions_fo.pdf
+488e475eeb3bb39a145f23ede197cd3620f1d98a,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf
+487df616e981557c8e1201829a1d0ec1ecb7d275,http://www.citi.sinica.edu.tw/papers/yu.tsao/4293-F.pdf
+48f211a9764f2bf6d6dda4a467008eda5680837a,http://www.lv-nus.org/papers/2011/iccv2011-occupation.pdf
+4858d014bb5119a199448fcd36746c413e60f295,http://pdfs.semanticscholar.org/4858/d014bb5119a199448fcd36746c413e60f295.pdf
+48319e611f0daaa758ed5dcf5a6496b4c6ef45f2,http://pdfs.semanticscholar.org/4831/9e611f0daaa758ed5dcf5a6496b4c6ef45f2.pdf
+48cfc5789c246c6ad88ff841701204fc9d6577ed,http://pdfs.semanticscholar.org/48cf/c5789c246c6ad88ff841701204fc9d6577ed.pdf
+481fb0a74528fa7706669a5cce6a212ac46eaea3,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Recognizing_RGB_Images_2014_CVPR_paper.pdf
+70f189798c8b9f2b31c8b5566a5cf3107050b349,http://www.cs.colostate.edu/~vision/pasc/docs/pasc2013_NISTIR_061013.pdf
+70580ed8bc482cad66e059e838e4a779081d1648,http://pdfs.semanticscholar.org/7058/0ed8bc482cad66e059e838e4a779081d1648.pdf
+703890b7a50d6535900a5883e8d2a6813ead3a03,http://pdfs.semanticscholar.org/7038/90b7a50d6535900a5883e8d2a6813ead3a03.pdf
+70db3a0d2ca8a797153cc68506b8650908cb0ada,http://pdfs.semanticscholar.org/70db/3a0d2ca8a797153cc68506b8650908cb0ada.pdf
+706236308e1c8d8b8ba7749869c6b9c25fa9f957,http://affect.media.mit.edu/pdfs/11.McDuff-etal-Crowdsourced-2011.pdf
+7002d6fc3e0453320da5c863a70dbb598415e7aa,http://www.cris.ucr.edu/IGERT/papers/SongfanAbstract.pdf
+7071cd1ee46db4bc1824c4fd62d36f6d13cad08a,http://pdfs.semanticscholar.org/7071/cd1ee46db4bc1824c4fd62d36f6d13cad08a.pdf
+70c2c2d2b7e34ff533a8477eff9763be196cd03a,http://iplab.dmi.unict.it/sites/default/files/_9.pdf
+70569810e46f476515fce80a602a210f8d9a2b95,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Antipov_Apparent_Age_Estimation_CVPR_2016_paper.pdf
+705a24f4e1766a44bbba7cf335f74229ed443c7b,http://web.ing.puc.cl/~asoto/papers/Maturana-09.pdf
+70e79d7b64f5540d309465620b0dab19d9520df1,http://pdfs.semanticscholar.org/70e7/9d7b64f5540d309465620b0dab19d9520df1.pdf
+7003d903d5e88351d649b90d378f3fc5f211282b,http://pdfs.semanticscholar.org/7003/d903d5e88351d649b90d378f3fc5f211282b.pdf
+703c9c8f20860a1b1be63e6df1622b2021b003ca,http://openaccess.thecvf.com/content_ICCV_2017/papers/Kobayashi_Flip-Invariant_Motion_Representation_ICCV_2017_paper.pdf
+70a69569ba61f3585cd90c70ca5832e838fa1584,http://pdfs.semanticscholar.org/70a6/9569ba61f3585cd90c70ca5832e838fa1584.pdf
+7085d21f483743007cc6a8e3fa01d8bdf592ad33,http://www.meeting.edu.cn/meeting/UploadPapers/1282699022328.pdf
+70bf1769d2d5737fc82de72c24adbb7882d2effd,http://pdfs.semanticscholar.org/70bf/1769d2d5737fc82de72c24adbb7882d2effd.pdf
+1e5ca4183929929a4e6f09b1e1d54823b8217b8e,http://pdfs.semanticscholar.org/1e5c/a4183929929a4e6f09b1e1d54823b8217b8e.pdf
+1e058b3af90d475bf53b3f977bab6f4d9269e6e8,http://pdfs.semanticscholar.org/30b9/7c36bcb99e857cd78fc55e2600d7851dc117.pdf
+1e799047e294267087ec1e2c385fac67074ee5c8,http://pdfs.semanticscholar.org/1e79/9047e294267087ec1e2c385fac67074ee5c8.pdf
+1ef4815f41fa3a9217a8a8af12cc385f6ed137e1,https://www.d2.mpi-inf.mpg.de/sites/default/files/wood2015_iccv.pdf
+1eb4ea011a3122dc7ef3447e10c1dad5b69b0642,http://pdfs.semanticscholar.org/1eb4/ea011a3122dc7ef3447e10c1dad5b69b0642.pdf
+1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3,http://pdfs.semanticscholar.org/1e7a/e86a78a9b4860aa720fb0fd0bdc199b092c3.pdf
+1e8eee51fd3bf7a9570d6ee6aa9a09454254689d,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf
+1ea8085fe1c79d12adffb02bd157b54d799568e4,http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf
+1ebdfceebad642299e573a8995bc5ed1fad173e3,http://pdfs.semanticscholar.org/1ebd/fceebad642299e573a8995bc5ed1fad173e3.pdf
+1eec03527703114d15e98ef9e55bee5d6eeba736,http://pdfs.semanticscholar.org/1eec/03527703114d15e98ef9e55bee5d6eeba736.pdf
+1e07500b00fcd0f65cf30a11f9023f74fe8ce65c,http://vijaychan.github.io/Publications/2015%20ICIP%20-%20Whole%20Subspace%20Discriminant%20Analysis%20for%20Face%20Recognition.pdf
+1e19ea6e7f1c04a18c952ce29386252485e4031e,http://pdfs.semanticscholar.org/1e19/ea6e7f1c04a18c952ce29386252485e4031e.pdf
+1ec98785ac91808455b753d4bc00441d8572c416,https://www.cl.cam.ac.uk/~tb346/pub/papers/fg2017_curriculum.pdf
+1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf
+1eba6fc35a027134aa8997413647b49685f6fbd1,https://ubicomp-mental-health.github.io/papers/voss-glass.pdf
+1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf
+1ef5ce743a44d8a454dbfc2657e1e2e2d025e366,http://pdfs.semanticscholar.org/1ef5/ce743a44d8a454dbfc2657e1e2e2d025e366.pdf
+1e58d7e5277288176456c66f6b1433c41ca77415,http://pdfs.semanticscholar.org/1e58/d7e5277288176456c66f6b1433c41ca77415.pdf
+1e5a1619fe5586e5ded2c7a845e73f22960bbf5a,https://arxiv.org/pdf/1509.04783v1.pdf
+1e213b03e1b8a6067bf37503904491e98b9e42df,http://figment.cse.usf.edu/~sfefilat/data/papers/TuAT10.9.pdf
+1e9f1bbb751fe538dde9f612f60eb946747defaa,http://pdfs.semanticscholar.org/1e9f/1bbb751fe538dde9f612f60eb946747defaa.pdf
+1e917fe7462445996837934a7e46eeec14ebc65f,http://pdfs.semanticscholar.org/1e91/7fe7462445996837934a7e46eeec14ebc65f.pdf
+1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de,https://arxiv.org/pdf/1410.3748v1.pdf
+1ef4aac0ebc34e76123f848c256840d89ff728d0,http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2017rapid.pdf
+1ecb56e7c06a380b3ce582af3a629f6ef0104457,http://pdfs.semanticscholar.org/1ecb/56e7c06a380b3ce582af3a629f6ef0104457.pdf
+1e64b2d2f0a8a608d0d9d913c4baee6973995952,http://sergioescalera.com/wp-content/uploads/2017/06/FG_presentation.pdf
+1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9,http://pdfs.semanticscholar.org/1ee2/7c66fabde8ffe90bd2f4ccee5835f8dedbb9.pdf
+1e41a3fdaac9f306c0ef0a978ae050d884d77d2a,http://www.cs.huji.ac.il/~daphna/course/CoursePapers/SerreEtAl%20PAMI2007.pdf
+1e94cc91c5293c8fc89204d4b881552e5b2ce672,http://pdfs.semanticscholar.org/5893/7d427ff36e1470b18120245148355047e4ea.pdf
+1e1e66783f51a206509b0a427e68b3f6e40a27c8,http://pdfs.semanticscholar.org/1e1e/66783f51a206509b0a427e68b3f6e40a27c8.pdf
+1e0add381031245b1d5129b482853ee738b498e1,http://eprints.pascal-network.org/archive/00001829/01/CVPR05_Romdhani.pdf
+1e8eec6fc0e4538e21909ab6037c228547a678ba,http://pdfs.semanticscholar.org/1e8e/ec6fc0e4538e21909ab6037c228547a678ba.pdf
+1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,http://arxiv.org/pdf/1607.01450v1.pdf
+84fe5b4ac805af63206012d29523a1e033bc827e,http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf
+84e4b7469f9c4b6c9e73733fa28788730fd30379,http://pdfs.semanticscholar.org/84e4/b7469f9c4b6c9e73733fa28788730fd30379.pdf
+84dcf04802743d9907b5b3ae28b19cbbacd97981,http://pdfs.semanticscholar.org/84dc/f04802743d9907b5b3ae28b19cbbacd97981.pdf
+841bf196ee0086c805bd5d1d0bddfadc87e424ec,http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf
+842d82081f4b27ca2d4bc05c6c7e389378f0c7b8,http://pdfs.semanticscholar.org/842d/82081f4b27ca2d4bc05c6c7e389378f0c7b8.pdf
+841a5de1d71a0b51957d9be9d9bebed33fb5d9fa,http://mx.nthu.edu.tw/~tsunghan/papers/journal%20papers/TIP_PCANet.pdf
+84e6669b47670f9f4f49c0085311dce0e178b685,http://pdfs.semanticscholar.org/84e6/669b47670f9f4f49c0085311dce0e178b685.pdf
+84bc3ca61fc63b47ec3a1a6566ab8dcefb3d0015,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS%20144.pdf
+847e07387142c1bcc65035109ccce681ef88362c,http://pdfs.semanticscholar.org/847e/07387142c1bcc65035109ccce681ef88362c.pdf
+8411fe1142935a86b819f065cd1f879f16e77401,http://pdfs.semanticscholar.org/8411/fe1142935a86b819f065cd1f879f16e77401.pdf
+843e6f1e226480e8a6872d8fd7b7b2cd74b637a4,http://pdfs.semanticscholar.org/843e/6f1e226480e8a6872d8fd7b7b2cd74b637a4.pdf
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,http://pdfs.semanticscholar.org/84f9/04a71bee129a1cf00dc97f6cdbe1011657e6.pdf
+84b4eb66ad75a74f77299f1ecb6aa6305362e8cd,https://www.researchgate.net/profile/Joao_Carvalho8/publication/4285113_A_Learning-based_Eye_Detector_Coupled_with_Eye_Candidate_Filtering_and_PCA_Features/links/0f31752d6b19aa31ec000000.pdf
+846c028643e60fefc86bae13bebd27341b87c4d1,http://pdfs.semanticscholar.org/a06f/510ee0f206abc4c44a2b68455d88a1748427.pdf
+4a14a321a9b5101b14ed5ad6aa7636e757909a7c,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Learning_Semi-Supervised_Representation_ICCV_2015_paper.pdf
+4adca62f888226d3a16654ca499bf2a7d3d11b71,http://pdfs.semanticscholar.org/5525/119941f6710fcde85cf71cc2ca25484e78c6.pdf
+4aa286914f17cd8cefa0320e41800a99c142a1cd,http://www.vbettadapura.com/egocentric/food/Food-Bettadapura15.pdf
+4a9d906935c9de019c61aedc10b77ee10e3aec63,http://openaccess.thecvf.com/content_cvpr_2016/papers/Gupta_Cross_Modal_Distillation_CVPR_2016_paper.pdf
+4a2d54ea1da851151d43b38652b7ea30cdb6dfb2,http://pdfs.semanticscholar.org/4a2d/54ea1da851151d43b38652b7ea30cdb6dfb2.pdf
+4ae59d2a28abd76e6d9fb53c9e7ece833dce7733,http://pdfs.semanticscholar.org/4ae5/9d2a28abd76e6d9fb53c9e7ece833dce7733.pdf
+4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8,http://pdfs.semanticscholar.org/4ab1/0174a4f98f7e2da7cf6ccfeb9bc64c8e7da8.pdf
+4a484d97e402ed0365d6cf162f5a60a4d8000ea0,http://pdfs.semanticscholar.org/4a48/4d97e402ed0365d6cf162f5a60a4d8000ea0.pdf
+4a64758786e3f49fc13781304197591ffbd69a6e,http://vicos.fri.uni-lj.si/alesl/files/2008/05/fidlerpami06.pdf
+4a4da3d1bbf10f15b448577e75112bac4861620a,http://pdfs.semanticscholar.org/4a4d/a3d1bbf10f15b448577e75112bac4861620a.pdf
+4abd49538d04ea5c7e6d31701b57ea17bc349412,http://resources.mpi-inf.mpg.de/publications/D2/2015/rohrbach15ijcv.pdf
+4a0f98d7dbc31497106d4f652968c708f7da6692,http://arxiv.org/pdf/1605.05258v1.pdf
+4aabd6db4594212019c9af89b3e66f39f3108aac,http://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf
+4adb97b096b700af9a58d00e45a2f980136fcbb5,http://pdfs.semanticscholar.org/9ea2/23c070ec9a00f4cb5ca0de35d098eb9a8e32.pdf
+4a5592ae1f5e9fa83d9fa17451c8ab49608421e4,http://sergioescalera.com/wp-content/uploads/2015/08/cha11g-lopezATS.pdf
+4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c,http://pdfs.semanticscholar.org/4a1a/5316e85528f4ff7a5f76699dfa8c70f6cc5c.pdf
+4ae291b070ad7940b3c9d3cb10e8c05955c9e269,http://www.cl.cam.ac.uk/~pr10/publications/icmi14.pdf
+4aa8db1a3379f00db2403bba7dade5d6e258b9e9,http://pdfs.semanticscholar.org/4aa8/db1a3379f00db2403bba7dade5d6e258b9e9.pdf
+4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9,https://arxiv.org/pdf/1608.01866v1.pdf
+4ac4e8d17132f2d9812a0088594d262a9a0d339b,http://pdfs.semanticscholar.org/4ac4/e8d17132f2d9812a0088594d262a9a0d339b.pdf
+4abaebe5137d40c9fcb72711cdefdf13d9fc3e62,http://pdfs.semanticscholar.org/4aba/ebe5137d40c9fcb72711cdefdf13d9fc3e62.pdf
+4acd683b5f91589002e6f50885df51f48bc985f4,http://www.albany.edu/faculty/mchang2/files/2015_09_ICIP_Darpa.pdf
+4a1d640f5e25bb60bb2347d36009718249ce9230,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Xing_Towards_Multi-view_and_2014_CVPR_paper.pdf
+4aeb87c11fb3a8ad603311c4650040fd3c088832,http://pdfs.semanticscholar.org/4aeb/87c11fb3a8ad603311c4650040fd3c088832.pdf
+4a3d96b2a53114da4be3880f652a6eef3f3cc035,https://www.micc.unifi.it/wp-content/uploads/2018/01/07932891.pdf
+4a6fcf714f663618657effc341ae5961784504c7,http://www.cs.tut.fi/~iosifidi/files/journal/2016_TIFS_ACSKDA.pdf?dl=0
+24b37016fee57057cf403fe2fc3dda78476a8262,http://pdfs.semanticscholar.org/24b3/7016fee57057cf403fe2fc3dda78476a8262.pdf
+24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd,http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf
+24c442ac3f6802296d71b1a1914b5d44e48b4f29,http://vision.caltech.edu/~xpburgos/papers/ICCVW15%20Burgos-Artizzu.pdf
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,http://pdfs.semanticscholar.org/247c/ab87b133bd0f4f9e8ce5e7fc682be6340eac.pdf
+245f8ec4373e0a6c1cae36cd6fed5a2babed1386,http://pdfs.semanticscholar.org/245f/8ec4373e0a6c1cae36cd6fed5a2babed1386.pdf
+24cb375a998f4af278998f8dee1d33603057e525,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_016_ext.pdf
+24aac045f1e1a4c13a58eab4c7618dccd4c0e671,https://arxiv.org/pdf/1706.04124v1.pdf
+240d5390af19bb43761f112b0209771f19bfb696,http://pdfs.semanticscholar.org/4e10/0973f1540312df3465a087597018a7892310.pdf
+24de12df6953151ef5cd0379e205eb0f57ff9d1f,http://www.researchgate.net/profile/Sebastian_Ventura/publication/270337594_A_Tutorial_on_Multi-Label_Learning/links/54bcd8460cf253b50e2d697b.pdf?origin=publication_list
+24f9248f01df3020351347c2a3f632e01de72090,http://www.cs.utexas.edu/users/bwaters/publications/papers/luong-wacv2013.pdf
+24e099e77ae7bae3df2bebdc0ee4e00acca71250,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22467/Yang%20Robust%20Face%20Alignment%20Under%20Occlusion%20via%20Regional%20Predictive%20Power%20Estimation%202015%20Accepted.pdf?sequence=1
+24959d1a9c9faf29238163b6bcaf523e2b05a053,http://pdfs.semanticscholar.org/2495/9d1a9c9faf29238163b6bcaf523e2b05a053.pdf
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,http://www.umiacs.umd.edu/~cteo/public-shared/language_robotsMethods_PerMIS2012.pdf
+2450c618cca4cbd9b8cdbdb05bb57d67e63069b1,http://liris.cnrs.fr/Documents/Liris-6127.pdf
+24496e4acfb8840616b2960b0e2c80cc4c9e5a87,http://ai2-s2-pdfs.s3.amazonaws.com/2449/6e4acfb8840616b2960b0e2c80cc4c9e5a87.pdf
+244b57cc4a00076efd5f913cc2833138087e1258,http://pdfs.semanticscholar.org/dfa8/d0afc548a8086902412fb0eae0fcf881ed8a.pdf
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,http://pdfs.semanticscholar.org/b3e7/4cbe27454e32b4b35014af831783d3480ad5.pdf
+241d2c517dbc0e22d7b8698e06ace67de5f26fdf,http://pdfs.semanticscholar.org/bfc3/546fa119443fdcbac3a5723647c2ba0007ac.pdf
+24e6a28c133b7539a57896393a79d43dba46e0f6,http://arxiv.org/pdf/1605.02057v2.pdf
+248db911e3a6a63ecd5ff6b7397a5d48ac15e77a,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Matthews_Enriching_Texture_Analysis_2013_CVPR_paper.pdf
+24d376e4d580fb28fd66bc5e7681f1a8db3b6b78,http://pdfs.semanticscholar.org/24d3/76e4d580fb28fd66bc5e7681f1a8db3b6b78.pdf
+24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39,http://openaccess.thecvf.com/content_ICCV_2017/papers/Benitez-Quiroz_Recognition_of_Action_ICCV_2017_paper.pdf
+2489a839d0a761ef8520393a7e412c36f5f26324,https://cs.adelaide.edu.au/~tjchin/lib/exe/fetch.php?media=eccv2014_hypergraph.pdf
+243e9d490fe98d139003bb8dc95683b366866c57,http://pdfs.semanticscholar.org/243e/9d490fe98d139003bb8dc95683b366866c57.pdf
+2465fc22e03faf030e5a319479a95ef1dfc46e14,https://www.fruct.org/publications/fruct20/files/Bel.pdf
+2495ebdcb6da8d8c2e82cf57fcaab0ec003d571d,http://eprints.pascal-network.org/archive/00002118/01/russell06.pdf
+247a6b0e97b9447850780fe8dbc4f94252251133,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Conf_Arman_CVPR2010.pdf
+24bf94f8090daf9bda56d54e42009067839b20df,https://www.computer.org/csdl/trans/tp/2015/06/06940284.pdf
+240eb0b34872c431ecf9df504671281f59e7da37,http://www.ece.cmu.edu/~dbatra/publications/assets/cutout_tags_iv2009_small.pdf
+230527d37421c28b7387c54e203deda64564e1b7,http://pdfs.semanticscholar.org/2305/27d37421c28b7387c54e203deda64564e1b7.pdf
+23fdbef123bcda0f07d940c72f3b15704fd49a98,http://pdfs.semanticscholar.org/23fd/bef123bcda0f07d940c72f3b15704fd49a98.pdf
+23ebbbba11c6ca785b0589543bf5675883283a57,https://pdfs.semanticscholar.org/23eb/bbba11c6ca785b0589543bf5675883283a57.pdf
+23aef683f60cb8af239b0906c45d11dac352fb4e,http://pdfs.semanticscholar.org/b6cd/e64dcf864e457a83b72b7742fd19984a7552.pdf
+235d5620d05bb7710f5c4fa6fceead0eb670dec5,http://pdfs.semanticscholar.org/7497/50d81dbd4d9fdcc9c1728b797dbb538a8747.pdf
+23fd653b094c7e4591a95506416a72aeb50a32b5,http://pdfs.semanticscholar.org/8a92/17f540845a7d11d24f2d76c0b752ca439457.pdf
+23172f9a397f13ae1ecb5793efd81b6aba9b4537,http://pdfs.semanticscholar.org/2317/2f9a397f13ae1ecb5793efd81b6aba9b4537.pdf
+231a6d2ee1cc76f7e0c5912a530912f766e0b459,http://pdfs.semanticscholar.org/231a/6d2ee1cc76f7e0c5912a530912f766e0b459.pdf
+236a4f38f79a4dcc2183e99b568f472cf45d27f4,https://jurie.users.greyc.fr/papers/moosman-nowak-jurie-pami08.pdf
+230c4a30f439700355b268e5f57d15851bcbf41f,http://arxiv.org/pdf/1509.01509v2.pdf
+237fa91c8e8098a0d44f32ce259ff0487aec02cf,http://ira.lib.polyu.edu.hk/bitstream/10397/241/1/SMCB_C_36_4_06_B.pdf
+23d5b2dccd48a17e743d3a5a4d596111a2f16c41,http://pdfs.semanticscholar.org/8cda/dc4d5e7e4fe6a0dbe15611f6fc8b7c0f103e.pdf
+23fc83c8cfff14a16df7ca497661264fc54ed746,http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf
+2331df8ca9f29320dd3a33ce68a539953fa87ff5,http://faculty.ucmerced.edu/mhyang/papers/aaai02.pdf
+232b6e2391c064d483546b9ee3aafe0ba48ca519,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_iccv2013.pdf
+23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3,http://pdfs.semanticscholar.org/23ba/9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3.pdf
+237eba4822744a9eabb121fe7b50fd2057bf744c,http://pdfs.semanticscholar.org/ba2a/65bef17d9db7366fe8c1344ca918ba50b99a.pdf
+23086a13b83d1b408b98346cf44f3e11920b404d,http://pdfs.semanticscholar.org/2308/6a13b83d1b408b98346cf44f3e11920b404d.pdf
+238fc68b2e0ef9f5ec043d081451902573992a03,http://www.cbsr.ia.ac.cn/users/zlei/papers/ChuanxianRen-ELGOF-TCYB.pdf
+23e75f5ce7e73714b63f036d6247fa0172d97cb6,http://pdfs.semanticscholar.org/23e7/5f5ce7e73714b63f036d6247fa0172d97cb6.pdf
+23aba7b878544004b5dfa64f649697d9f082b0cf,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Wang_Locality-Constrained_Discriminative_Learning_2015_CVPR_paper.pdf
+23120f9b39e59bbac4438bf4a8a7889431ae8adb,http://pdfs.semanticscholar.org/2312/0f9b39e59bbac4438bf4a8a7889431ae8adb.pdf
+23d55061f7baf2ffa1c847d356d8f76d78ebc8c1,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0033-4?site=ipsjcva.springeropen.com
+23c3eb6ad8e5f18f672f187a6e9e9b0d94042970,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_095_ext.pdf
+23a8d02389805854cf41c9e5fa56c66ee4160ce3,http://www.advancedsourcecode.com/influencelow10.pdf
+4fd29e5f4b7186e349ba34ea30738af7860cf21f,https://arxiv.org/pdf/1506.02588v1.pdf
+4f0d9200647042e41dea71c35eb59e598e6018a7,http://pdfs.semanticscholar.org/4f0d/9200647042e41dea71c35eb59e598e6018a7.pdf
+4faded442b506ad0f200a608a69c039e92eaff11,http://pdfs.semanticscholar.org/4fad/ed442b506ad0f200a608a69c039e92eaff11.pdf
+4f7967158b257e86d66bdabfdc556c697d917d24,http://pdfs.semanticscholar.org/4f79/67158b257e86d66bdabfdc556c697d917d24.pdf
+4fc7a540efb24bea338f82c8bdc64c214744a3de,http://www.researchgate.net/profile/Touradj_Ebrahimi/publication/41083907_Object-based_Tag_Propagation_for_Semi-automatic_Annotation_of_Images/links/02e7e515b3de45cd50000000.pdf
+4fc936102e2b5247473ea2dd94c514e320375abb,http://pdfs.semanticscholar.org/4fc9/36102e2b5247473ea2dd94c514e320375abb.pdf
+4f298d6d0c8870acdbf94fe473ebf6814681bd1f,http://pdfs.semanticscholar.org/9979/b794d0bd06a1959a6b169f2cf32ba8ba376b.pdf
+4f6adc53798d9da26369bea5a0d91ed5e1314df2,http://pdfs.semanticscholar.org/4f6a/dc53798d9da26369bea5a0d91ed5e1314df2.pdf
+4fbef7ce1809d102215453c34bf22b5f9f9aab26,http://pdfs.semanticscholar.org/4fbe/f7ce1809d102215453c34bf22b5f9f9aab26.pdf
+4fa0d73b8ba114578744c2ebaf610d2ca9694f45,http://pdfs.semanticscholar.org/4fa0/d73b8ba114578744c2ebaf610d2ca9694f45.pdf
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,https://arxiv.org/pdf/1510.00562v1.pdf
+4f591e243a8f38ee3152300bbf42899ac5aae0a5,http://pdfs.semanticscholar.org/4f59/1e243a8f38ee3152300bbf42899ac5aae0a5.pdf
+4f9958946ad9fc71c2299847e9ff16741401c591,http://pdfs.semanticscholar.org/4f99/58946ad9fc71c2299847e9ff16741401c591.pdf
+4f773c8e7ca98ece9894ba3a22823127a70c6e6c,http://pdfs.semanticscholar.org/4f77/3c8e7ca98ece9894ba3a22823127a70c6e6c.pdf
+4ff11512e4fde3d1a109546d9c61a963d4391add,http://pdfs.semanticscholar.org/4ff1/1512e4fde3d1a109546d9c61a963d4391add.pdf
+4f028efe6708fc252851eee4a14292b7ce79d378,http://pdfs.semanticscholar.org/ae17/aca92b4710efb00e3180a46e56e463ae2a6f.pdf
+4f0bf2508ae801aee082b37f684085adf0d06d23,http://pdfs.semanticscholar.org/4f0b/f2508ae801aee082b37f684085adf0d06d23.pdf
+4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/CNN_Gender_Recognition.pdf
+4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7,http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf
+4f0d5cbcd30fef3978b9691c2e736daed2f841c1,http://www.ics.uci.edu/~dramanan/papers/localdist_journal.pdf
+4f77a37753c03886ca9c9349723ec3bbfe4ee967,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Hasan_Localizing_Facial_Keypoints_2013_ICCV_paper.pdf
+4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf
+8d71872d5877c575a52f71ad445c7e5124a4b174,http://pdfs.semanticscholar.org/8d71/872d5877c575a52f71ad445c7e5124a4b174.pdf
+8de06a584955f04f399c10f09f2eed77722f6b1c,http://pdfs.semanticscholar.org/8de0/6a584955f04f399c10f09f2eed77722f6b1c.pdf
+8d4f0517eae232913bf27f516101a75da3249d15,http://pdfs.semanticscholar.org/8d4f/0517eae232913bf27f516101a75da3249d15.pdf
+8de2dbe2b03be8a99628ffa000ac78f8b66a1028,http://pdfs.semanticscholar.org/8de2/dbe2b03be8a99628ffa000ac78f8b66a1028.pdf
+8d3fbdb9783716c1832a0b7ab1da6390c2869c14,http://pdfs.semanticscholar.org/ae81/6e7e0077fe94f1e62629647dc04263a970b5.pdf
+8d42a24d570ad8f1e869a665da855628fcb1378f,http://pdfs.semanticscholar.org/8d42/a24d570ad8f1e869a665da855628fcb1378f.pdf
+8d8461ed57b81e05cc46be8e83260cd68a2ebb4d,http://pdfs.semanticscholar.org/8d84/61ed57b81e05cc46be8e83260cd68a2ebb4d.pdf
+8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3,http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR10pose.pdf
+8d2c0c9155a1ed49ba576ac0446ec67725468d87,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20CONFERENCE%20PAPERS/A%20Study%20of%20Two%20Image%20Representations%20for%20Head%20Pose%20Estimation.pdf
+8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152,http://pdfs.semanticscholar.org/8d02/43b8b663ca0ab7cbe613e3b886a5d1c8c152.pdf
+8d6c4af9d4c01ff47fe0be48155174158a9a5e08,http://pdfs.semanticscholar.org/8d6c/4af9d4c01ff47fe0be48155174158a9a5e08.pdf
+8d2c43759e221f39ab1b4bf70d6891ffd19fb8da,https://www.researchgate.net/profile/Zhang_Pinzheng/publication/224711010_An_Automatic_Facial_Expression_Recognition_Approach_Based_on_Confusion-Crossed_Support_Vector_Machine_Tree/links/54658c630cf2052b509f3391.pdf
+8dbe79830713925affc48d0afa04ed567c54724b,http://pdfs.semanticscholar.org/8dbe/79830713925affc48d0afa04ed567c54724b.pdf
+8d1adf0ac74e901a94f05eca2f684528129a630a,http://www.denniscodd.com/dotnet-ieee/Facial%20Expression%20Recognition%20Using%20Facial.pdf
+8d91f06af4ef65193f3943005922f25dbb483ee4,http://pdfs.semanticscholar.org/8d91/f06af4ef65193f3943005922f25dbb483ee4.pdf
+8dc9de0c7324d098b537639c8214543f55392a6b,http://www.diva-portal.org/smash/get/diva2:280081/FULLTEXT01.pdf
+8d712cef3a5a8a7b1619fb841a191bebc2a17f15,http://pdfs.semanticscholar.org/8d71/2cef3a5a8a7b1619fb841a191bebc2a17f15.pdf
+8d646ac6e5473398d668c1e35e3daa964d9eb0f6,http://pdfs.semanticscholar.org/8d64/6ac6e5473398d668c1e35e3daa964d9eb0f6.pdf
+8dffbb6d75877d7d9b4dcde7665888b5675deee1,http://pdfs.semanticscholar.org/8dff/bb6d75877d7d9b4dcde7665888b5675deee1.pdf
+8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff,http://publications.idiap.ch/downloads/papers/2017/Le_CBMI_2017.pdf
+153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4,http://pdfs.semanticscholar.org/153f/5ad54dd101f7f9c2ae17e96c69fe84aa9de4.pdf
+155199d7f10218e29ddaee36ebe611c95cae68c4,http://pdfs.semanticscholar.org/1551/99d7f10218e29ddaee36ebe611c95cae68c4.pdf
+15cd05baa849ab058b99a966c54d2f0bf82e7885,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_031_ext.pdf
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,http://feiwang03.googlepages.com/CVPRposter.pdf
+159e792096756b1ec02ec7a980d5ef26b434ff78,http://pdfs.semanticscholar.org/159e/792096756b1ec02ec7a980d5ef26b434ff78.pdf
+153e5cddb79ac31154737b3e025b4fb639b3c9e7,http://pdfs.semanticscholar.org/d9f5/9178ef2d91c98e0f3108fe273cdc6c6590f4.pdf
+1586871a1ddfe031b885b94efdbff647cf03eff1,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w18/papers/Ginosar_A_Century_of_ICCV_2015_paper.pdf
+15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4,https://arxiv.org/pdf/1707.07196v1.pdf
+15cf7bdc36ec901596c56d04c934596cf7b43115,http://pdfs.semanticscholar.org/15cf/7bdc36ec901596c56d04c934596cf7b43115.pdf
+1576ed0f3926c6ce65e0ca770475bca6adcfdbb4,http://openaccess.thecvf.com/content_cvpr_workshops_2015/W09/papers/Bagheri_Keep_it_Accurate_2015_CVPR_paper.pdf
+156cd2a0e2c378e4c3649a1d046cd080d3338bca,http://pdfs.semanticscholar.org/156c/d2a0e2c378e4c3649a1d046cd080d3338bca.pdf
+157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6,http://www.merl.com/publications/docs/TR2012-043.pdf
+15e0b9ba3389a7394c6a1d267b6e06f8758ab82b,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0035-2?site=ipsjcva.springeropen.com
+151481703aa8352dc78e2577f0601782b8c41b34,http://pdfs.semanticscholar.org/943c/f990952712673320b011e1e8092fad65eedd.pdf
+1565721ebdbd2518224f54388ed4f6b21ebd26f3,http://cmp.felk.cvut.cz/ftp/articles/franc/Cevilkalp-FaceDetector-FG2013.pdf
+15f3d47b48a7bcbe877f596cb2cfa76e798c6452,http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf
+15728d6fd5c9fc20b40364b733228caf63558c31,http://pdfs.semanticscholar.org/1572/8d6fd5c9fc20b40364b733228caf63558c31.pdf
+15252b7af081761bb00535aac6bd1987391f9b79,http://cvsp.cs.ntua.gr/publications/confr/KoutrasMaragos_EyeGaze_ICIP15.pdf
+1513949773e3a47e11ab87d9a429864716aba42d,http://pdfs.semanticscholar.org/1513/949773e3a47e11ab87d9a429864716aba42d.pdf
+15ee80e86e75bf1413dc38f521b9142b28fe02d1,https://arxiv.org/pdf/1612.05322v1.pdf
+153c8715f491272b06dc93add038fae62846f498,http://pdfs.semanticscholar.org/153c/8715f491272b06dc93add038fae62846f498.pdf
+15e27f968458bf99dd34e402b900ac7b34b1d575,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p8362-mahanta.pdf
+15f70a0ad8903017250927595ae2096d8b263090,http://pdfs.semanticscholar.org/15f7/0a0ad8903017250927595ae2096d8b263090.pdf
+1564bf0a268662df752b68bee5addc4b08868739,https://arxiv.org/pdf/1605.04129v2.pdf
+158e32579e38c29b26dfd33bf93e772e6211e188,http://pdfs.semanticscholar.org/158e/32579e38c29b26dfd33bf93e772e6211e188.pdf
+122f51cee489ba4da5ab65064457fbe104713526,http://www.speakit.cn/Group/file/2015_LongShortTerm_ACMAVEC@MM15_EI.pdf
+125d82fee1b9fbcc616622b0977f3d06771fc152,http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTcvpr12.pdf
+1255afbf86423c171349e874b3ac297de19f00cd,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SSCI_2015/data/7560a203.pdf
+126535430845361cd7a3a6f317797fe6e53f5a3b,http://pdfs.semanticscholar.org/1265/35430845361cd7a3a6f317797fe6e53f5a3b.pdf
+122ee00cc25c0137cab2c510494cee98bd504e9f,http://pdfs.semanticscholar.org/122e/e00cc25c0137cab2c510494cee98bd504e9f.pdf
+121fe33daf55758219e53249cf8bcb0eb2b4db4b,http://pdfs.semanticscholar.org/121f/e33daf55758219e53249cf8bcb0eb2b4db4b.pdf
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,http://www.cs.colostate.edu/~vision/pasc/docs/fg2015videoEvalPreprint.pdf
+12cb3bf6abf63d190f849880b1703ccc183692fe,http://pdfs.semanticscholar.org/12cb/3bf6abf63d190f849880b1703ccc183692fe.pdf
+1246534c3104da030fdb9e041819257e0d57dcbf,http://home.isr.uc.pt/~joaoluis/papers/cvpr2015_2.pdf
+12cd96a419b1bd14cc40942b94d9c4dffe5094d2,http://pdfs.semanticscholar.org/12cd/96a419b1bd14cc40942b94d9c4dffe5094d2.pdf
+12055b8f82d5411f9ad196b60698d76fbd07ac1e,https://zhzhanp.github.io/papers/TCSVT2014.pdf
+126214ef0dcef2b456cb413905fa13160c73ec8e,http://infoscience.epfl.ch/record/125056/files/MHFE_fg08.pdf
+12692fbe915e6bb1c80733519371bbb90ae07539,http://pdfs.semanticscholar.org/50ef/4817a6e50a2ec525d6e417d05d2400983c11.pdf
+12ccfc188de0b40c84d6a427999239c6a379cd66,http://pdfs.semanticscholar.org/12cc/fc188de0b40c84d6a427999239c6a379cd66.pdf
+12c713166c46ac87f452e0ae383d04fb44fe4eb2,http://pdfs.semanticscholar.org/98dc/a90e43c7592ef81cf84445d73c8baa719686.pdf
+1270044a3fa1a469ec2f4f3bd364754f58a1cb56,http://pdfs.semanticscholar.org/1270/044a3fa1a469ec2f4f3bd364754f58a1cb56.pdf
+12150d8b51a2158e574e006d4fbdd3f3d01edc93,https://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ss16/DeepEnd2EndV2V.pdf
+12003a7d65c4f98fb57587fd0e764b44d0d10125,http://luks.fe.uni-lj.si/en/staff/simond/publications/Dobrisek2015.pdf
+124538b3db791e30e1b62f81d4101be435ee12ef,http://pdfs.semanticscholar.org/1245/38b3db791e30e1b62f81d4101be435ee12ef.pdf
+12d8730da5aab242795bdff17b30b6e0bac82998,http://pdfs.semanticscholar.org/12d8/730da5aab242795bdff17b30b6e0bac82998.pdf
+8c643e1a61f3f563ec382c1e450f4b2b28122614,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS147.pdf
+8c13f2900264b5cf65591e65f11e3f4a35408b48,http://cvhci.ira.uka.de/~stiefel/papers/Ekenel_Local_Appearance.pdf
+8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf,http://pdfs.semanticscholar.org/8cb3/f421b55c78e56c8a1c1d96f23335ebd4a5bf.pdf
+8c955f3827a27e92b6858497284a9559d2d0623a,http://pdfs.semanticscholar.org/8c95/5f3827a27e92b6858497284a9559d2d0623a.pdf
+8c8525e626c8857a4c6c385de34ffea31e7e41d1,http://arxiv.org/pdf/1505.07922.pdf
+8c66378df977606d332fc3b0047989e890a6ac76,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf
+8c9c8111e18f8798a612e7386e88536dfe26455e,http://pdfs.semanticscholar.org/8c9c/8111e18f8798a612e7386e88536dfe26455e.pdf
+8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa,http://pdfs.semanticscholar.org/8c7f/4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa.pdf
+8c81705e5e4a1e2068a5bd518adc6955d49ae434,http://pdfs.semanticscholar.org/8c81/705e5e4a1e2068a5bd518adc6955d49ae434.pdf
+8cb403c733a5f23aefa6f583a17cf9b972e35c90,http://pdfs.semanticscholar.org/e4ca/1fa70823c4350888607df470248be0ed4c56.pdf
+8c6b9c9c26ead75ce549a57c4fd0a12b46142848,http://pdfs.semanticscholar.org/97fc/47ba1427b0e50cd815b8b1657fea6fb9e25a.pdf
+8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82,http://pdfs.semanticscholar.org/e09e/aa666f354d4262d5ff4cf4ef54a960561bbe.pdf
+8c7bceba769762126fd3dae78d622908bb83c3d3,http://qil.uh.edu/qil/websitecontent/pdf/2015-33.pdf
+8c6c0783d90e4591a407a239bf6684960b72f34e,http://pdfs.semanticscholar.org/8c6c/0783d90e4591a407a239bf6684960b72f34e.pdf
+8cb55413f1c5b6bda943697bba1dc0f8fc880d28,http://cvhci.anthropomatik.kit.edu/~stiefel/papers/ICCV07_031.pdf
+8cc07ae9510854ec6e79190cc150f9f1fe98a238,http://pdfs.semanticscholar.org/8cc0/7ae9510854ec6e79190cc150f9f1fe98a238.pdf
+8509abbde2f4b42dc26a45cafddcccb2d370712f,http://pdfs.semanticscholar.org/ad9a/169042d887c33cfcec2716a453a0d3abcb0c.pdf
+855bfc17e90ec1b240efba9100fb760c068a8efa,http://pdfs.semanticscholar.org/855b/fc17e90ec1b240efba9100fb760c068a8efa.pdf
+858ddff549ae0a3094c747fb1f26aa72821374ec,https://arxiv.org/pdf/1606.03237v1.pdf
+85041e48b51a2c498f22850ce7228df4e2263372,http://pdfs.semanticscholar.org/8504/1e48b51a2c498f22850ce7228df4e2263372.pdf
+857ad04fca2740b016f0066b152bd1fa1171483f,http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf
+858901405086056361f8f1839c2f3d65fc86a748,http://pdfs.semanticscholar.org/8589/01405086056361f8f1839c2f3d65fc86a748.pdf
+85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9,http://pdfs.semanticscholar.org/8518/8c77f3b2de3a45f7d4f709b6ea79e36bd0d9.pdf
+855882a5943fc12fa9c0e8439c482e055b4b46f3,http://humansensing.cs.cmu.edu/papers/Automated.pdf
+8518b501425f2975ea6dcbf1e693d41e73d0b0af,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Relative_Hidden_Markov_2013_CVPR_paper.pdf
+8557914593e8540fcdd9b11aef076f68d41d3b4b,http://elwilber.com/papers/ecodes-2014.pdf
+855184c789bca7a56bb223089516d1358823db0b,http://pdfs.semanticscholar.org/8551/84c789bca7a56bb223089516d1358823db0b.pdf
+853bd61bc48a431b9b1c7cab10c603830c488e39,http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf
+85639cefb8f8deab7017ce92717674d6178d43cc,http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf
+854dbb4a0048007a49df84e3f56124d387588d99,http://pdfs.semanticscholar.org/854d/bb4a0048007a49df84e3f56124d387588d99.pdf
+85674b1b6007634f362cbe9b921912b697c0a32c,http://pdfs.semanticscholar.org/8567/4b1b6007634f362cbe9b921912b697c0a32c.pdf
+852ff0d410a25ebb7936043a05efe2469c699e4b,http://pdfs.semanticscholar.org/852f/f0d410a25ebb7936043a05efe2469c699e4b.pdf
+1d21e5beef23eecff6fff7d4edc16247f0fd984a,http://pdfs.semanticscholar.org/1d21/e5beef23eecff6fff7d4edc16247f0fd984a.pdf
+1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b,http://pdfs.semanticscholar.org/9d44/ef9e28d7722c388091ec4c1fa7c05f085e53.pdf
+1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9,https://web.stanford.edu/~bgirod/pdfs/ChenHuizhongTransPAMISep2014.pdf
+1d846934503e2bd7b8ea63b2eafe00e29507f06a,http://www.iipl.fudan.edu.cn/~zhangjp/literatures/MLF/manifold%20learning/20fa.pdf
+1d19c6857e798943cd0ecd110a7a0d514c671fec,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w2/papers/Khorrami_Do_Deep_Neural_ICCV_2015_paper.pdf
+1d1a7ef193b958f9074f4f236060a5f5e7642fc1,http://pdfs.semanticscholar.org/db40/804914afbb7f8279ca9a4f52e0ade695f19e.pdf
+1d696a1beb42515ab16f3a9f6f72584a41492a03,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTcvpr15.pdf
+1d1caaa2312390260f7d20ad5f1736099818d358,https://eprints.soton.ac.uk/271401/1/paperOnIEEEexplore.pdf
+1dc241ee162db246882f366644171c11f7aed96d,http://pdfs.semanticscholar.org/1dc2/41ee162db246882f366644171c11f7aed96d.pdf
+1d0128b9f96f4c11c034d41581f23eb4b4dd7780,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Snape_Automatic_Construction_Of_2015_CVPR_paper.pdf
+1d79ec93a9feba817c75c31604c3f8df346eabe8,https://www.researchgate.net/profile/Manjunath_Aradhya/publication/254461422_The_study_of_different_similarity_measure_techniques_in_recognition_of_handwritten_characters/links/0046352049dae0d044000000.pdf
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf
+1d0dd20b9220d5c2e697888e23a8d9163c7c814b,http://pdfs.semanticscholar.org/1d0d/d20b9220d5c2e697888e23a8d9163c7c814b.pdf
+1d5aad4f7fae6d414ffb212cec1f7ac876de48bf,http://biometrics.cse.msu.edu/Publications/Face/WangJain_FaceRetriever_ICB15.pdf
+1db23a0547700ca233aef9cfae2081cd8c5a04d7,http://pdfs.semanticscholar.org/1db2/3a0547700ca233aef9cfae2081cd8c5a04d7.pdf
+1d97735bb0f0434dde552a96e1844b064af08f62,http://www.apsipa.org/proceedings_2015/pdf/290.pdf
+1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR12_FaceAlignRegression.pdf
+1dff919e51c262c22630955972968f38ba385d8a,http://pdfs.semanticscholar.org/1dff/919e51c262c22630955972968f38ba385d8a.pdf
+1de8f38c35f14a27831130060810cf9471a62b45,http://www.psy.miami.edu/faculty/dmessinger/c_c/rsrcs/rdgs/emot/Unsupervised_Discovery.IJCompVis.2017.pdf
+1da83903c8d476c64c14d6851c85060411830129,http://pdfs.semanticscholar.org/90c3/b003b85bd60ae06630bcef6abc03c3b1ef96.pdf
+1d6068631a379adbcff5860ca2311b790df3a70f,http://pdfs.semanticscholar.org/c322/b1b998ec8f1892b29a1ebcbdc2f62e644cf1.pdf
+1dacc2f4890431d867a038fd81c111d639cf4d7e,http://pdfs.semanticscholar.org/1dac/c2f4890431d867a038fd81c111d639cf4d7e.pdf
+1dc6c0ad19b41e5190fc9fe50e3ae27f49f18fa2,http://www.researchgate.net/profile/Stefano_Alletto/publication/265611795_Head_Pose_Estimation_in_First-Person_Camera_Views/links/5416b5ef0cf2788c4b35e14b.pdf
+1de690714f143a8eb0d6be35d98390257a3f4a47,http://www.cs.fsu.edu/~liux/research/publications/papers/waring-liu-face-detection-smcb-2005.pdf
+1d6c09019149be2dc84b0c067595f782a5d17316,http://pdfs.semanticscholar.org/3e27/b747e272c2ab778df92ea802d30af15e43d6.pdf
+1d58d83ee4f57351b6f3624ac7e727c944c0eb8d,http://parnec.nuaa.edu.cn/xtan/paper/amfg07_talk.pdf
+71b376dbfa43a62d19ae614c87dd0b5f1312c966,http://www.cs.cmu.edu/~ltrutoiu/pdfs/FG2013_trutoiu.pdf
+71b07c537a9e188b850192131bfe31ef206a39a0,http://pdfs.semanticscholar.org/71b0/7c537a9e188b850192131bfe31ef206a39a0.pdf
+71fd29c2ae9cc9e4f959268674b6b563c06d9480,http://pdfs.semanticscholar.org/71fd/29c2ae9cc9e4f959268674b6b563c06d9480.pdf
+71f36c8e17a5c080fab31fce1ffea9551fc49e47,http://openaccess.thecvf.com/content_cvpr_2014/papers/Zhang_Predicting_Failures_of_2014_CVPR_paper.pdf
+71e6a46b32a8163c9eda69e1badcee6348f1f56a,http://pdfs.semanticscholar.org/71e6/a46b32a8163c9eda69e1badcee6348f1f56a.pdf
+713594c18978b965be87651bb553c28f8501df0a,http://pdfs.semanticscholar.org/fbfc/a34d52422cf8eac9d92d68dd16f95db5ef36.pdf
+718824256b4461d62d192ab9399cfc477d3660b4,http://pdfs.semanticscholar.org/7188/24256b4461d62d192ab9399cfc477d3660b4.pdf
+718d3137adba9e3078fa1f698020b666449f3336,http://pdfs.semanticscholar.org/718d/3137adba9e3078fa1f698020b666449f3336.pdf
+716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0,http://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf
+7171b46d233810df57eaba44ccd8eabd0ad1f53a,http://pdfs.semanticscholar.org/7171/b46d233810df57eaba44ccd8eabd0ad1f53a.pdf
+71e56f2aebeb3c4bb3687b104815e09bb4364102,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Guo_Video_Co-segmentation_for_2013_ICCV_paper.pdf
+711bb5f63139ee7a9b9aef21533f959671a7d80e,http://pdfs.semanticscholar.org/711b/b5f63139ee7a9b9aef21533f959671a7d80e.pdf
+76fd801981fd69ff1b18319c450cb80c4bc78959,http://pdfs.semanticscholar.org/76fd/801981fd69ff1b18319c450cb80c4bc78959.pdf
+76dc11b2f141314343d1601635f721fdeef86fdb,http://pdfs.semanticscholar.org/8d19/1804f5b260807dac107b89a5837ac15857aa.pdf
+76673de6d81bedd6b6be68953858c5f1aa467e61,http://pdfs.semanticscholar.org/8883/2abb9082af6a1395e1b9bd3d4c1b46d00616.pdf
+7643861bb492bf303b25d0306462f8fb7dc29878,https://www-i6.informatik.rwth-aachen.de/publications/download/991/Hanselmann-FG-2015.pdf
+760a712f570f7a618d9385c0cee7e4d0d6a78ed2,http://pdfs.semanticscholar.org/760a/712f570f7a618d9385c0cee7e4d0d6a78ed2.pdf
+7698ba9fd1f49157ca2666a93311afbf1ff4e66c,http://www.ics.uci.edu/~dramanan/papers/dpm_acm.pdf
+76ce3d35d9370f0e2e27cfd29ea0941f1462895f,http://pdfs.semanticscholar.org/76ce/3d35d9370f0e2e27cfd29ea0941f1462895f.pdf
+76b9fe32d763e9abd75b427df413706c4170b95c,http://pdfs.semanticscholar.org/76b9/fe32d763e9abd75b427df413706c4170b95c.pdf
+768c332650a44dee02f3d1d2be1debfa90a3946c,http://mmlab.ie.cuhk.edu.hk/archive/2004/CVPR04_Face3.pdf
+769461ff717d987482b28b32b1e2a6e46570e3ff,http://pdfs.semanticscholar.org/7694/61ff717d987482b28b32b1e2a6e46570e3ff.pdf
+76d9f5623d3a478677d3f519c6e061813e58e833,http://pdfs.semanticscholar.org/76d9/f5623d3a478677d3f519c6e061813e58e833.pdf
+76e2d7621019bd45a5851740bd2742afdcf62837,http://pdfs.semanticscholar.org/76e2/d7621019bd45a5851740bd2742afdcf62837.pdf
+765b2cb322646c52e20417c3b44b81f89860ff71,http://cg.cs.tsinghua.edu.cn/papers/TVCG_2013_poseshop.pdf
+7644d90efef157e61fe4d773d8a3b0bad5feccec,http://pdfs.semanticscholar.org/7644/d90efef157e61fe4d773d8a3b0bad5feccec.pdf
+763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff,http://pdfs.semanticscholar.org/7631/58cef9d1e4041f24fce4cf9d6a3b7a7f08ff.pdf
+76d939f73a327bf1087d91daa6a7824681d76ea1,http://pdfs.semanticscholar.org/76d9/39f73a327bf1087d91daa6a7824681d76ea1.pdf
+760ba44792a383acd9ca8bef45765d11c55b48d4,http://class-specific.com/csf/papers/aes_tut.pdf
+7636f94ddce79f3dea375c56fbdaaa0f4d9854aa,http://pdfs.semanticscholar.org/7636/f94ddce79f3dea375c56fbdaaa0f4d9854aa.pdf
+1c80bc91c74d4984e6422e7b0856cf3cf28df1fb,http://refbase.cvc.uab.es/files/xrv2014d.pdf
+1ce3a91214c94ed05f15343490981ec7cc810016,http://grail.cs.washington.edu/photobios/paper.pdf
+1c2724243b27a18a2302f12dea79d9a1d4460e35,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf
+1ca8c09abb73a02519d8db77e4fe107acfc589b6,http://sci.pitt.edu/wp-content/uploads/2018/03/111_Zhang.pdf
+1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee,https://arxiv.org/pdf/1611.00142v2.pdf
+1c30bb689a40a895bd089e55e0cad746e343d1e2,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.pdf
+1c4ceae745fe812d8251fda7aad03210448ae25e,http://pdfs.semanticscholar.org/98d3/6d12cf6f2da181a9c1fb9d652ceaa57eb7bb.pdf
+1c3073b57000f9b6dbf1c5681c52d17c55d60fd7,http://pdfs.semanticscholar.org/1c30/73b57000f9b6dbf1c5681c52d17c55d60fd7.pdf
+1cee993dc42626caf5dbc26c0a7790ca6571d01a,http://www.iri.upc.edu/people/fmoreno/Publications/2005/pdf/Moreno_siggraphsketch2005.pdf
+1c147261f5ab1b8ee0a54021a3168fa191096df8,http://pdfs.semanticscholar.org/1c14/7261f5ab1b8ee0a54021a3168fa191096df8.pdf
+1c5d7d02a26aa052ecc47d301de4929083e5d320,https://www.ll.mit.edu/news/avec2014_mitll.pdf
+1c17450c4d616e1e1eece248c42eba4f87de9e0d,http://pdfs.semanticscholar.org/d269/39a00a8d3964de612cd3faa86764343d5622.pdf
+1c93b48abdd3ef1021599095a1a5ab5e0e020dd5,http://www.stat.ucla.edu/~sczhu/papers/PAMI_FaceAging.pdf
+1c1f957d85b59d23163583c421755869f248ceef,http://homepages.rpi.edu/~wuy9/ICCV15/FLD_iccv15.pdf
+1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16,http://pdfs.semanticscholar.org/1cbd/3f96524ca2258fd2d5c504c7ea8da7fb1d16.pdf
+1cad5d682393ffbb00fd26231532d36132582bb4,http://pdfs.semanticscholar.org/1cad/5d682393ffbb00fd26231532d36132582bb4.pdf
+1c1a98df3d0d5e2034ea723994bdc85af45934db,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Jaiswal_Guided_Unsupervised_Learning_2013_ICCV_paper.pdf
+1ca815327e62c70f4ee619a836e05183ef629567,http://www.humansensing.cs.cmu.edu/sites/default/files/Xiong_Global_Supervised_Descent_2015_CVPR_paper.pdf
+1c6be6874e150898d9db984dd546e9e85c85724e,http://research.microsoft.com/~szli/papers/WHT-CVPR2004.pdf
+1c65f3b3c70e1ea89114f955624d7adab620a013,http://pdfs.semanticscholar.org/ef34/cc2a26e88abd6a03d1a831c750440c6147d2.pdf
+1c530de1a94ac70bf9086e39af1712ea8d2d2781,http://pdfs.semanticscholar.org/1c53/0de1a94ac70bf9086e39af1712ea8d2d2781.pdf
+82f8652c2059187b944ce65e87bacb6b765521f6,http://pdfs.semanticscholar.org/82f8/652c2059187b944ce65e87bacb6b765521f6.pdf
+82bef8481207de9970c4dc8b1d0e17dced706352,http://pdfs.semanticscholar.org/82be/f8481207de9970c4dc8b1d0e17dced706352.pdf
+82d2af2ffa106160a183371946e466021876870d,http://pdfs.semanticscholar.org/82d2/af2ffa106160a183371946e466021876870d.pdf
+82be2ede6b7613286b80c3e2afe3b5353f322bed,http://www.eecs.berkeley.edu/~jiayq/papers/iccv11_mm.pdf
+82ccd62f70e669ec770daf11d9611cab0a13047e,http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf
+82c303cf4852ad18116a2eea31e2291325bc19c3,http://pdfs.semanticscholar.org/82c3/03cf4852ad18116a2eea31e2291325bc19c3.pdf
+8210fd10ef1de44265632589f8fc28bc439a57e6,http://www.ytzhang.net/files/publications/2015-tifs-sup-ae.pdf
+82a4a35b2bae3e5c51f4d24ea5908c52973bd5be,http://pdfs.semanticscholar.org/82a4/a35b2bae3e5c51f4d24ea5908c52973bd5be.pdf
+82cd5a5fec8a27887a35f1ecec684ec55eefad73,http://www.researchgate.net/profile/Giuseppe_Boccignone/publication/265793480_Using_Sparse_Coding_for_Landmark_Localization_in_Facial_Expressions/links/541bf80b0cf241a65a0ba53a.pdf
+82f4e8f053d20be64d9318529af9fadd2e3547ef,http://pdfs.semanticscholar.org/82f4/e8f053d20be64d9318529af9fadd2e3547ef.pdf
+82b43bc9213230af9db17322301cbdf81e2ce8cc,http://pdfs.semanticscholar.org/82b4/3bc9213230af9db17322301cbdf81e2ce8cc.pdf
+82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d,http://pdfs.semanticscholar.org/82d7/81b7b6b7c8c992e0cb13f7ec3989c8eafb3d.pdf
+82e66c4832386cafcec16b92ac88088ffd1a1bc9,http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf
+82d79658805f6c1aedf7b0b88b47b9555584d7ae,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_IROS.pdf
+826c66bd182b54fea3617192a242de1e4f16d020,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001602.pdf
+499f1d647d938235e9186d968b7bb2ab20f2726d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Xiong_Face_Recognition_via_2013_ICCV_paper.pdf
+4919663c62174a9bc0cc7f60da8f96974b397ad2,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/EBIF_5-2-2010_v_5.pdf
+49f70f707c2e030fe16059635df85c7625b5dc7e,http://pdfs.semanticscholar.org/55b7/59b3e94088488334e3af2d17710c5e1fce4b.pdf
+4967b0acc50995aa4b28e576c404dc85fefb0601,http://pdfs.semanticscholar.org/4967/b0acc50995aa4b28e576c404dc85fefb0601.pdf
+49820ae612b3c0590a8a78a725f4f378cb605cd1,http://pdfs.semanticscholar.org/4982/0ae612b3c0590a8a78a725f4f378cb605cd1.pdf
+49dd4b359f8014e85ed7c106e7848049f852a304,http://pdfs.semanticscholar.org/49dd/4b359f8014e85ed7c106e7848049f852a304.pdf
+49e85869fa2cbb31e2fd761951d0cdfa741d95f3,http://studentnet.cs.manchester.ac.uk/pgt/COMP61021/reference/adaptive-manifold-learning.pdf
+49659fb64b1d47fdd569e41a8a6da6aa76612903,http://pdfs.semanticscholar.org/4965/9fb64b1d47fdd569e41a8a6da6aa76612903.pdf
+490a217a4e9a30563f3a4442a7d04f0ea34442c8,http://pdfs.semanticscholar.org/490a/217a4e9a30563f3a4442a7d04f0ea34442c8.pdf
+49a7949fabcdf01bbae1c2eb38946ee99f491857,http://pdfs.semanticscholar.org/49a7/949fabcdf01bbae1c2eb38946ee99f491857.pdf
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Luo_A_Deep_Sum-Product_2013_ICCV_paper.pdf
+499343a2fd9421dca608d206e25e53be84489f44,http://pdfs.semanticscholar.org/4993/43a2fd9421dca608d206e25e53be84489f44.pdf
+498fd231d7983433dac37f3c97fb1eafcf065268,http://pdfs.semanticscholar.org/498f/d231d7983433dac37f3c97fb1eafcf065268.pdf
+49e1aa3ecda55465641b2c2acc6583b32f3f1fc6,http://pdfs.semanticscholar.org/49e1/aa3ecda55465641b2c2acc6583b32f3f1fc6.pdf
+499f2b005e960a145619305814a4e9aa6a1bba6a,http://pdfs.semanticscholar.org/499f/2b005e960a145619305814a4e9aa6a1bba6a.pdf
+497bf2df484906e5430aa3045cf04a40c9225f94,http://pdfs.semanticscholar.org/497b/f2df484906e5430aa3045cf04a40c9225f94.pdf
+492f41e800c52614c5519f830e72561db205e86c,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf
+493ec9e567c5587c4cbeb5f08ca47408ca2d6571,http://pdfs.semanticscholar.org/493e/c9e567c5587c4cbeb5f08ca47408ca2d6571.pdf
+49570b41bd9574bd9c600e24b269d945c645b7bd,http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf
+496074fcbeefd88664b7bd945012ca22615d812e,http://pdfs.semanticscholar.org/4960/74fcbeefd88664b7bd945012ca22615d812e.pdf
+40205181ed1406a6f101c5e38c5b4b9b583d06bc,http://pdfs.semanticscholar.org/4020/5181ed1406a6f101c5e38c5b4b9b583d06bc.pdf
+40dab43abef32deaf875c2652133ea1e2c089223,http://pdfs.semanticscholar.org/40da/b43abef32deaf875c2652133ea1e2c089223.pdf
+40b0fced8bc45f548ca7f79922e62478d2043220,http://pdfs.semanticscholar.org/40b0/fced8bc45f548ca7f79922e62478d2043220.pdf
+405b43f4a52f70336ac1db36d5fa654600e9e643,http://pdfs.semanticscholar.org/405b/43f4a52f70336ac1db36d5fa654600e9e643.pdf
+40b86ce698be51e36884edcc8937998979cd02ec,http://www.cs.bilkent.edu.tr/~duygulu/papers/SIU2006-face.pdf
+40a74eea514b389b480d6fe8b359cb6ad31b644a,http://pdfs.semanticscholar.org/7ac4/2be6c1f01ccc42b28c0bfa77856cc75b65a2.pdf
+40ee38d7ff2871761663d8634c3a4970ed1dc058,http://pdfs.semanticscholar.org/40ee/38d7ff2871761663d8634c3a4970ed1dc058.pdf
+402f6db00251a15d1d92507887b17e1c50feebca,http://pdfs.semanticscholar.org/402f/6db00251a15d1d92507887b17e1c50feebca.pdf
+404042a1dcfde338cf24bc2742c57c0fb1f48359,http://pdfs.semanticscholar.org/4040/42a1dcfde338cf24bc2742c57c0fb1f48359.pdf
+4015e8195db6edb0ef8520709ca9cb2c46f29be7,http://pdfs.semanticscholar.org/4015/e8195db6edb0ef8520709ca9cb2c46f29be7.pdf
+404776aa18031828f3d5dbceed39907f038a47fe,http://pdfs.semanticscholar.org/4047/76aa18031828f3d5dbceed39907f038a47fe.pdf
+407bb798ab153bf6156ba2956f8cf93256b6910a,http://pdfs.semanticscholar.org/407b/b798ab153bf6156ba2956f8cf93256b6910a.pdf
+400e6c777d5894db2f6538c8ebd1124352b1c064,http://www.ee.ucr.edu/~lan/papers/FG13.pdf
+40fb4e8932fb6a8fef0dddfdda57a3e142c3e823,http://gavrila.net/Publications/cvpr08.pdf
+405526dfc79de98f5bf3c97bf4aa9a287700f15d,http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf
+40cd062438c280c76110e7a3a0b2cf5ef675052c,http://pdfs.semanticscholar.org/40cd/062438c280c76110e7a3a0b2cf5ef675052c.pdf
+40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d,http://pdfs.semanticscholar.org/40a5/b32e261dc5ccc1b5df5d5338b7d3fe10370d.pdf
+40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a,http://pdfs.semanticscholar.org/cea3/8a329e98900923e9c962b0d58bf8e15405d6.pdf
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,http://www.site.uottawa.ca/~wslee/publication/CCECE2006.pdf
+40389b941a6901c190fb74e95dc170166fd7639d,http://pdfs.semanticscholar.org/56f7/dad4d6d98292061a2c1e399d9a0ecfbbbde3.pdf
+4068574b8678a117d9a434360e9c12fe6232dae0,http://www.visionmeetscognition.org/fpic2014/Camera_Ready/Paper%2031.pdf
+40c8cffd5aac68f59324733416b6b2959cb668fd,https://arxiv.org/pdf/1701.08341v1.pdf
+40b10e330a5511a6a45f42c8b86da222504c717f,http://pdfs.semanticscholar.org/40b1/0e330a5511a6a45f42c8b86da222504c717f.pdf
+40bb090a4e303f11168dce33ed992f51afe02ff7,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf
+40ca925befa1f7e039f0cd40d57dbef6007b4416,https://arxiv.org/pdf/1706.07567v1.pdf
+4042bbb4e74e0934f4afbedbe92dd3e37336b2f4,http://pdfs.semanticscholar.org/b35a/6b2f335c28696eb78a02e0b30ee59a3e3fd2.pdf
+4026dc62475d2ff2876557fc2b0445be898cd380,http://pdfs.semanticscholar.org/4026/dc62475d2ff2876557fc2b0445be898cd380.pdf
+40f127fa4459a69a9a21884ee93d286e99b54c5f,http://graphics.tu-bs.de/media/publications/stengel2013resolution.pdf
+401e6b9ada571603b67377b336786801f5b54eee,http://pdfs.semanticscholar.org/401e/6b9ada571603b67377b336786801f5b54eee.pdf
+406431d2286a50205a71f04e0b311ba858fc7b6c,http://pdfs.semanticscholar.org/4064/31d2286a50205a71f04e0b311ba858fc7b6c.pdf
+40217a8c60e0a7d1735d4f631171aa6ed146e719,http://pdfs.semanticscholar.org/4021/7a8c60e0a7d1735d4f631171aa6ed146e719.pdf
+2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9,http://pdfs.semanticscholar.org/71f1/c8d39e1fbf1083a4616a3496f5c397a2daf5.pdf
+2eb37a3f362cffdcf5882a94a20a1212dfed25d9,http://pdfs.semanticscholar.org/2eb3/7a3f362cffdcf5882a94a20a1212dfed25d9.pdf
+2e0addeffba4be98a6ad0460453fbab52616b139,http://pdfs.semanticscholar.org/3cd7/8b1f43ead1226554f450bafcb8fbe208b5f0.pdf
+2e091b311ac48c18aaedbb5117e94213f1dbb529,http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf
+2e1415a814ae9abace5550e4893e13bd988c7ba1,http://pdfs.semanticscholar.org/2e14/15a814ae9abace5550e4893e13bd988c7ba1.pdf
+2e0e056ed5927a4dc6e5c633715beb762628aeb0,http://pdfs.semanticscholar.org/2e0e/056ed5927a4dc6e5c633715beb762628aeb0.pdf
+2e8a0cc071017845ee6f67bd0633b8167a47abed,https://arxiv.org/pdf/1303.6021v1.pdf
+2e68190ebda2db8fb690e378fa213319ca915cf8,http://pdfs.semanticscholar.org/a705/804fa2e97ce23619b4f43da1b75fb138296d.pdf
+2e157e8b57f679c2f1b8e16d6e934f52312f08f6,http://pdfs.semanticscholar.org/2e15/7e8b57f679c2f1b8e16d6e934f52312f08f6.pdf
+2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd,http://research.microsoft.com/users/byzhang/publications/20-81_01.pdf
+2e475f1d496456831599ce86d8bbbdada8ee57ed,http://www.l3s.de/~siersdorfer/sources/2015/www2015groupsourcing.pdf
+2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522,http://pdfs.semanticscholar.org/2ef5/1b57c4a3743ac33e47e0dc6a40b0afcdd522.pdf
+2e6cfeba49d327de21ae3186532e56cadeb57c02,http://openaccess.thecvf.com/content_ICCV_2017/papers/Wang_Real_Time_Eye_ICCV_2017_paper.pdf
+2ee817981e02c4709d65870c140665ed25b005cc,http://www.umiacs.umd.edu/users/rama/Publications/Patel_ICARCV_2010.pdf
+2e98329fdec27d4b3b9b894687e7d1352d828b1d,http://pdfs.semanticscholar.org/2e98/329fdec27d4b3b9b894687e7d1352d828b1d.pdf
+2e19371a2d797ab9929b99c80d80f01a1fbf9479,http://pdfs.semanticscholar.org/2e19/371a2d797ab9929b99c80d80f01a1fbf9479.pdf
+2ebc35d196cd975e1ccbc8e98694f20d7f52faf3,http://pdfs.semanticscholar.org/2ebc/35d196cd975e1ccbc8e98694f20d7f52faf3.pdf
+2e3d081c8f0e10f138314c4d2c11064a981c1327,http://arxiv.org/pdf/1603.06015v1.pdf
+2ef328e035b2b5501ceddc0052615d4cebac6f1f,http://mi.eng.cam.ac.uk/~ss965/semantic_transform.pdf
+2e86402b354516d0a8392f75430156d629ca6281,https://arxiv.org/pdf/1604.03628v2.pdf
+2ea78e128bec30fb1a623c55ad5d55bb99190bd2,http://pdfs.semanticscholar.org/2ea7/8e128bec30fb1a623c55ad5d55bb99190bd2.pdf
+2e8eb9dc07deb5142a99bc861e0b6295574d1fbd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Hejrati_Analysis_by_Synthesis_2014_CVPR_paper.pdf
+2e0f5e72ad893b049f971bc99b67ebf254e194f7,http://pdfs.semanticscholar.org/2e0f/5e72ad893b049f971bc99b67ebf254e194f7.pdf
+2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5,http://pdfs.semanticscholar.org/cb94/9e849b20ddc157aaf648dca1e8c71463c288.pdf
+2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c,http://pdfs.semanticscholar.org/6abe/c94e0af01d9706d73dfd91fd76139c7d99e0.pdf
+2edc6df161f6aadbef9c12408bdb367e72c3c967,http://www.infomus.org/Events/proceedings/ICMI2014/icmi/p514.pdf
+2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c,http://pdfs.semanticscholar.org/2ec7/d6a04c8c72cc194d7eab7456f73dfa501c8c.pdf
+2eb9f1dbea71bdc57821dedbb587ff04f3a25f07,http://pdfs.semanticscholar.org/2eb9/f1dbea71bdc57821dedbb587ff04f3a25f07.pdf
+2e1fd8d57425b727fd850d7710d38194fa6e2654,http://www.cs.toronto.edu/~afsaneh/JamiesonEtAl2007.pdf
+2e1b1969ded4d63b69a5ec854350c0f74dc4de36,http://pdfs.semanticscholar.org/2e1b/1969ded4d63b69a5ec854350c0f74dc4de36.pdf
+2be0ab87dc8f4005c37c523f712dd033c0685827,http://www3.ntu.edu.sg/home/EXDJiang/ICIP2013_4.pdf
+2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb,http://jmcauley.ucsd.edu/data/amazon/sigir_draft.pdf
+2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78,http://www3.ntu.edu.sg/home/wanggang/TIFS15.pdf
+2b339ece73e3787f445c5b92078e8f82c9b1c522,http://pdfs.semanticscholar.org/7a2e/e06aaa3f342937225272951c0b6dd4309a7a.pdf
+2b4d092d70efc13790d0c737c916b89952d4d8c7,http://pdfs.semanticscholar.org/2b4d/092d70efc13790d0c737c916b89952d4d8c7.pdf
+2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f,http://pdfs.semanticscholar.org/2b0f/f4b82bac85c4f980c40b3dc4fde05d3cc23f.pdf
+2b3ceb40dced78a824cf67054959e250aeaa573b,http://pdfs.semanticscholar.org/7493/4a2b65538f42701e15f7f532437db2beead2.pdf
+2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_028_ext.pdf
+2bcec23ac1486f4106a3aa588b6589e9299aba70,http://pdfs.semanticscholar.org/2bce/c23ac1486f4106a3aa588b6589e9299aba70.pdf
+2b773fe8f0246536c9c40671dfa307e98bf365ad,http://pdfs.semanticscholar.org/2b77/3fe8f0246536c9c40671dfa307e98bf365ad.pdf
+2bf08d4cb8d1201a9866ee7c4852bfcbf8f8e7f1,http://mplab.ucsd.edu/~jake/haar.pdf
+2be9144a1e66de127192b01907c862381f4011d1,http://www1.cs.columbia.edu/~belhumeur/conference/eye-iccv05.pdf
+2bab44d3a4c5ca79fb8f87abfef4456d326a0445,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mir25.pdf
+2b0102d77d3d3f9bc55420d862075934f5c85bec,http://openaccess.thecvf.com/content_cvpr_2016/papers/Shao_Slicing_Convolutional_Neural_CVPR_2016_paper.pdf
+2b435ee691718d0b55d057d9be4c3dbb8a81526e,http://pdfs.semanticscholar.org/43ef/472c2c09d1ae2f2e5fc35d6d3ab7578658b4.pdf
+2b1327a51412646fcf96aa16329f6f74b42aba89,http://pdfs.semanticscholar.org/8296/cb7fea317fcd0a7ff6b7e4486ab869a7231e.pdf
+2be1e2f2b7208fdf7a379da37a2097cfe52bc196,http://www2.cvl.isy.liu.se/Education/Graduate/artikelklubb/aryananda_icra09.pdf
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,https://www.cse.iitb.ac.in/~sharat/icvgip.org/icvgip2010/papers/53.sethuram.134.pdf
+2b64a8c1f584389b611198d47a750f5d74234426,http://pdfs.semanticscholar.org/fb11/6f00320a37d80ec32561d1ab9b795c943202.pdf
+2b632f090c09435d089ff76220fd31fd314838ae,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf
+2b10a07c35c453144f22e8c539bf9a23695e85fc,http://pdfs.semanticscholar.org/2b10/a07c35c453144f22e8c539bf9a23695e85fc.pdf
+2b84630680e2c906f8d7ac528e2eb32c99ef203a,http://disi.unitn.it/~zen/data/acmmm14_zen3_orlando.pdf
+2b507f659b341ed0f23106446de8e4322f4a3f7e,http://pdfs.semanticscholar.org/2b50/7f659b341ed0f23106446de8e4322f4a3f7e.pdf
+2b7ef95822a4d577021df16607bf7b4a4514eb4b,http://pdfs.semanticscholar.org/b596/9178f843bfaecd0026d04c41e79bcb9edab5.pdf
+2b8dfbd7cae8f412c6c943ab48c795514d53c4a7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p529-bordei.pdf
+2b1129efcbafa61da1d660de3b5c84b646540311,http://www.researchgate.net/profile/Haizhou_Ai/publication/221368891_Distributing_expressional_faces_in_2-D_emotional_space/links/546b431f0cf20dedafd52906.pdf
+2bae810500388dd595f4ebe992c36e1443b048d2,http://pdfs.semanticscholar.org/2bae/810500388dd595f4ebe992c36e1443b048d2.pdf
+2b42f83a720bd4156113ba5350add2df2673daf0,http://pdfs.semanticscholar.org/2b42/f83a720bd4156113ba5350add2df2673daf0.pdf
+2bbbbe1873ad2800954058c749a00f30fe61ab17,http://pdfs.semanticscholar.org/2bbb/be1873ad2800954058c749a00f30fe61ab17.pdf
+2baec98c19804bf19b480a9a0aa814078e28bb3d,http://eprints.eemcs.utwente.nl/26841/01/Pantic_Multi-conditional_Latent_Variable_Model.pdf
+47fdbd64edd7d348713253cf362a9c21f98e4296,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C071_yamashita2015.pdf
+47382cb7f501188a81bb2e10cfd7aed20285f376,http://pdfs.semanticscholar.org/4738/2cb7f501188a81bb2e10cfd7aed20285f376.pdf
+473366f025c4a6e0783e6174ca914f9cb328fe70,http://pdfs.semanticscholar.org/f021/cbfa5f3483889c3980b62c6cec329c8c5aec.pdf
+4793f11fbca4a7dba898b9fff68f70d868e2497c,http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf
+470dbd3238b857f349ebf0efab0d2d6e9779073a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_062_ext.pdf
+473031328c58b7461753e81251379331467f7a69,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W09/papers/Wang_Exploring_Fisher_Vector_2015_CVPR_paper.pdf
+47638197d83a8f8174cdddc44a2c7101fa8301b7,http://grail.cs.washington.edu/wp-content/uploads/2015/08/saleh2013oad.pdf
+47541d04ec24662c0be438531527323d983e958e,http://pdfs.semanticscholar.org/4754/1d04ec24662c0be438531527323d983e958e.pdf
+476f177b026830f7b31e94bdb23b7a415578f9a4,http://vision.ece.ucsb.edu/sites/vision.ece.ucsb.edu/files/publications/karthikeyan_icip2012_subspace_final.pdf
+474b461cd12c6d1a2fbd67184362631681defa9e,http://toc.proceedings.com/24478webtoc.pdf
+472ba8dd4ec72b34e85e733bccebb115811fd726,http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf
+47ca2df3d657d7938d7253bed673505a6a819661,http://pdfs.semanticscholar.org/47ca/2df3d657d7938d7253bed673505a6a819661.pdf
+47d4838087a7ac2b995f3c5eba02ecdd2c28ba14,http://pdfs.semanticscholar.org/b2b5/35118c5c4dfcc96f547274cdc05dde629976.pdf
+47eba2f95679e106e463e8296c1f61f6ddfe815b,https://www.csie.ntu.edu.tw/~cyy/publications/papers/Shih2017DCF.pdf
+47a2727bd60e43f3253247b6d6f63faf2b67c54b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Fu_Semi-Supervised_Vocabulary-Informed_Learning_CVPR_2016_paper.pdf
+47d3b923730746bfaabaab29a35634c5f72c3f04,http://pdfs.semanticscholar.org/47d3/b923730746bfaabaab29a35634c5f72c3f04.pdf
+47e3029a3d4cf0a9b0e96252c3dc1f646e750b14,http://mmi.tudelft.nl/pub/dragos/_CompSysTech07.pdf
+475e16577be1bfc0dd1f74f67bb651abd6d63524,http://pdfs.semanticscholar.org/475e/16577be1bfc0dd1f74f67bb651abd6d63524.pdf
+471befc1b5167fcfbf5280aa7f908eff0489c72b,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Goudelis07a.pdf
+47f8b3b3f249830b6e17888df4810f3d189daac1,http://pdfs.semanticscholar.org/fd44/c0c238fe90d6ca61864010abd94768fcde0c.pdf
+47e8db3d9adb79a87c8c02b88f432f911eb45dc5,http://pdfs.semanticscholar.org/5f99/63990ab7dd888ab33393f712f8d5c1463348.pdf
+47aeb3b82f54b5ae8142b4bdda7b614433e69b9a,http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf
+47dabb566f2bdd6b3e4fa7efc941824d8b923a13,http://pdfs.semanticscholar.org/47da/bb566f2bdd6b3e4fa7efc941824d8b923a13.pdf
+47f5f740e225281c02c8a2ae809be201458a854f,http://pdfs.semanticscholar.org/5241/ad03e9276d4acd1c51eaa7f44e2d04d07b68.pdf
+47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa,http://pdfs.semanticscholar.org/d948/50abdd272a402cd2f00e5b85311d87c75b16.pdf
+47a003e6bbfc5bf04a099ca53c67ddfdbea71315,http://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf
+47b508abdaa5661fe14c13e8eb21935b8940126b,http://pdfs.semanticscholar.org/47b5/08abdaa5661fe14c13e8eb21935b8940126b.pdf
+477811ff147f99b21e3c28309abff1304106dbbe,http://pdfs.semanticscholar.org/f0f8/23511188d8c10b67512d23eb9cb7f3dd2f9a.pdf
+47506951d2dc7c4bb4d2d33dd25b67a767e56680,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2015_04_15_BradyJ_IEEEHST_FP.pdf
+473cbc5ec2609175041e1410bc6602b187d03b23,http://pdfs.semanticscholar.org/473c/bc5ec2609175041e1410bc6602b187d03b23.pdf
+78216cd51e6e1cc014b83e27e7e78631ad44b899,http://www.ami-lab.org/uploads/Publications/Conference/WP4/Tracking%20facial%20features%20under%20occlusions%20and%20recognizing%20facial%20expressions%20in%20sign%20language.pdf
+78a4cabf0afc94da123e299df5b32550cd638939,http://pdfs.semanticscholar.org/78a4/cabf0afc94da123e299df5b32550cd638939.pdf
+78f08cc9f845dc112f892a67e279a8366663e26d,http://pdfs.semanticscholar.org/78f0/8cc9f845dc112f892a67e279a8366663e26d.pdf
+78d645d5b426247e9c8f359694080186681f57db,http://pdfs.semanticscholar.org/78d6/45d5b426247e9c8f359694080186681f57db.pdf
+7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a,https://arxiv.org/pdf/1709.07598v1.pdf
+783f3fccde99931bb900dce91357a6268afecc52,http://pdfs.semanticscholar.org/d1ea/f2cc9dfc6cdbc5468ef2152c46e9111a3f3b.pdf
+7897c8a9361b427f7b07249d21eb9315db189496,https://arxiv.org/pdf/1102.2743v2.pdf
+7859667ed6c05a467dfc8a322ecd0f5e2337db56,http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf
+78436256ff8f2e448b28e854ebec5e8d8306cf21,http://pdfs.semanticscholar.org/7843/6256ff8f2e448b28e854ebec5e8d8306cf21.pdf
+78f438ed17f08bfe71dfb205ac447ce0561250c6,http://pdfs.semanticscholar.org/78f4/38ed17f08bfe71dfb205ac447ce0561250c6.pdf
+78f79c83b50ff94d3e922bed392737b47f93aa06,http://mplab.ucsd.edu/wp-content/uploads/2011-LittlewortEtAl-FG-CERT.pdf
+78fede85d6595e7a0939095821121f8bfae05da6,http://pdfs.semanticscholar.org/78fe/de85d6595e7a0939095821121f8bfae05da6.pdf
+7862f646d640cbf9f88e5ba94a7d642e2a552ec9,http://pdfs.semanticscholar.org/7862/f646d640cbf9f88e5ba94a7d642e2a552ec9.pdf
+78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c,http://pdfs.semanticscholar.org/78a1/1b7d2d7e1b19d92d2afd51bd3624eca86c3c.pdf
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,https://graphics.stanford.edu/papers/ib-relighting/ib-relighting.pdf
+781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed,https://ivi.fnwi.uva.nl/isis/publications/2017/JainIJCV2017/JainIJCV2017.pdf
+78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e,http://arxiv.org/pdf/1503.01224.pdf
+780557daaa39a445b24c41f637d5fc9b216a0621,http://www.ee.columbia.edu/ln/dvmm/publications/15/EventNetDemo.pdf
+78fdf2b98cf6380623b0e20b0005a452e736181e,http://pdfs.semanticscholar.org/78fd/f2b98cf6380623b0e20b0005a452e736181e.pdf
+788a7b59ea72e23ef4f86dc9abb4450efefeca41,http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf
+787c1bb6d1f2341c5909a0d6d7314bced96f4681,http://pdfs.semanticscholar.org/787c/1bb6d1f2341c5909a0d6d7314bced96f4681.pdf
+7808937b46acad36e43c30ae4e9f3fd57462853d,http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf
+8ba67f45fbb1ce47a90df38f21834db37c840079,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf
+8b547b87fd95c8ff6a74f89a2b072b60ec0a3351,http://pdfs.semanticscholar.org/8b54/7b87fd95c8ff6a74f89a2b072b60ec0a3351.pdf
+8b7191a2b8ab3ba97423b979da6ffc39cb53f46b,http://www.eurecom.fr/fr/publication/3472/download/mm-publi-3472.pdf
+8bf57dc0dd45ed969ad9690033d44af24fd18e05,http://pdfs.semanticscholar.org/8bf5/7dc0dd45ed969ad9690033d44af24fd18e05.pdf
+8bf243817112ac0aa1348b40a065bb0b735cdb9c,http://pdfs.semanticscholar.org/8bf2/43817112ac0aa1348b40a065bb0b735cdb9c.pdf
+8bfada57140aa1aa22a575e960c2a71140083293,http://pdfs.semanticscholar.org/8bfa/da57140aa1aa22a575e960c2a71140083293.pdf
+8b8728edc536020bc4871dc66b26a191f6658f7c,http://pdfs.semanticscholar.org/8b87/28edc536020bc4871dc66b26a191f6658f7c.pdf
+8bbbdff11e88327816cad3c565f4ab1bb3ee20db,https://eprints.soton.ac.uk/410731/1/FG_soton_paper.pdf
+8b10383ef569ea0029a2c4a60cc2d8c87391b4db,http://pdfs.semanticscholar.org/fe2d/20dca6dcedc7944cc2d9fea76de6cbb9d90c.pdf
+8bfec7afcf5015017406fc04c43c1f43eb723631,http://www.umiacs.umd.edu/users/pvishalm/Journal_pub/DCS_TAC_2013.pdf
+8b30259a8ab07394d4dac971f3d3bd633beac811,http://pdfs.semanticscholar.org/8b30/259a8ab07394d4dac971f3d3bd633beac811.pdf
+8b19efa16a9e73125ab973429eb769d0ad5a8208,http://pdfs.semanticscholar.org/8b19/efa16a9e73125ab973429eb769d0ad5a8208.pdf
+8b6fded4d08bf0b7c56966b60562ee096af1f0c4,http://pdfs.semanticscholar.org/8b6f/ded4d08bf0b7c56966b60562ee096af1f0c4.pdf
+8bf647fed40bdc9e35560021636dfb892a46720e,https://arxiv.org/pdf/1612.04061v1.pdf
+8b2704a5218a6ef70e553eaf0a463bd55129b69d,http://pdfs.semanticscholar.org/8b27/04a5218a6ef70e553eaf0a463bd55129b69d.pdf
+8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0,http://pdfs.semanticscholar.org/8bb2/1b1f8d6952d77cae95b4e0b8964c9e0201b0.pdf
+8b1db0894a23c4d6535b5adf28692f795559be90,http://pdfs.semanticscholar.org/8b1d/b0894a23c4d6535b5adf28692f795559be90.pdf
+8b2e3805b37c18618b74b243e7a6098018556559,http://pdfs.semanticscholar.org/8b2e/3805b37c18618b74b243e7a6098018556559.pdf
+8b74252625c91375f55cbdd2e6415e752a281d10,http://epubs.surrey.ac.uk/813060/1/camgoz2016icprw.pdf
+133f42368e63928dc860cce7618f30ee186d328c,http://pdfs.semanticscholar.org/50bd/1c76a5051db0b13fd76e7a633884ad49d5a8.pdf
+134aad8153ab78345b2581efac2fe175a3084154,http://www.cs.utexas.edu/~ai-lab/pubs/vijayanarasimhan_grauman_cvpr2008.pdf
+13719bbb4bb8bbe0cbcdad009243a926d93be433,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Tian_Deep_LDA-Pruned_Nets_CVPR_2017_paper.pdf
+134db6ca13f808a848321d3998e4fe4cdc52fbc2,http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticPatras-SMCB-2005-FINAL.pdf
+133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d,http://www.stat.ucla.edu/~caiming/pubs/1402.1783v2.pdf
+1329206dbdb0a2b9e23102e1340c17bd2b2adcf5,http://pdfs.semanticscholar.org/a2f4/06c8babac96b2108c530974c4d3132106d42.pdf
+1369e9f174760ea592a94177dbcab9ed29be1649,http://geza.kzoo.edu/~erdi/IJCNN2013/HTMLFiles/PDFs/P393-1401.pdf
+133900a0e7450979c9491951a5f1c2a403a180f0,http://rlair.cs.ucr.edu/papers/docs/socgroup.pdf
+13bda03fc8984d5943ed8d02e49a779d27c84114,http://www-ljk.imag.fr/Publications/Basilic/com.lmc.publi.PUBLI_Inproceedings@13730f58c78_1669a2e/cevikalp-cvpr12.pdf
+13db9466d2ddf3c30b0fd66db8bfe6289e880802,http://pdfs.semanticscholar.org/13db/9466d2ddf3c30b0fd66db8bfe6289e880802.pdf
+13a994d489c15d440c1238fc1ac37dad06dd928c,http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf
+131178dad3c056458e0400bed7ee1a36de1b2918,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Deng_Visual_Reranking_through_2013_ICCV_paper.pdf
+13141284f1a7e1fe255f5c2b22c09e32f0a4d465,http://www.micc.unifi.it/pernici/index_files/ALIEN_final.pdf
+132527383890565d18f1b7ad50d76dfad2f14972,http://pdfs.semanticscholar.org/1325/27383890565d18f1b7ad50d76dfad2f14972.pdf
+1394ca71fc52db972366602a6643dc3e65ee8726,https://www.cl.cam.ac.uk/~tb346/pub/papers/icmi2016EmoReact.pdf
+137aa2f891d474fce1e7a1d1e9b3aefe21e22b34,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%20139/PID2859389.pdf
+13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a,http://www.sfu.ca/~smuralid/papers/thesis.pdf
+131130f105661a47e0ffb85c2fe21595785f948a,http://pdfs.semanticscholar.org/1311/30f105661a47e0ffb85c2fe21595785f948a.pdf
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,http://pdfs.semanticscholar.org/e5c5/e5531aaa661c223088454572de11d2f266c3.pdf
+133da0d8c7719a219537f4a11c915bf74c320da7,http://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf
+13c250fb740cb5616aeb474869db6ab11560e2a6,http://pdfs.semanticscholar.org/13c2/50fb740cb5616aeb474869db6ab11560e2a6.pdf
+13940d0cc90dbf854a58f92d533ce7053aac024a,http://pdfs.semanticscholar.org/949c/a8a6997aba88a162a36d48047f35ba8d0aab.pdf
+133f01aec1534604d184d56de866a4bd531dac87,http://www.cs.tau.ac.il/~wolf/papers/jpatchlbp.pdf
+131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1,http://pdfs.semanticscholar.org/131b/fa2ae6a04fd3b921ccb82b1c3f18a400a9c1.pdf
+13841d54c55bd74964d877b4b517fa94650d9b65,http://www98.griffith.edu.au/dspace/bitstream/handle/10072/30001/60226_1.pdf?sequence=1
+1389ba6c3ff34cdf452ede130c738f37dca7e8cb,http://pdfs.semanticscholar.org/1389/ba6c3ff34cdf452ede130c738f37dca7e8cb.pdf
+131e395c94999c55c53afead65d81be61cd349a4,http://pdfs.semanticscholar.org/2c3f/aeaf0fe103e1e6cb8c2116728e2a5c7b7f29.pdf
+1384a83e557b96883a6bffdb8433517ec52d0bea,http://pdfs.semanticscholar.org/6be6/392550222ca07ba4c47931bffaedace72d24.pdf
+13fd0a4d06f30a665fc0f6938cea6572f3b496f7,http://pdfs.semanticscholar.org/13fd/0a4d06f30a665fc0f6938cea6572f3b496f7.pdf
+132f88626f6760d769c95984212ed0915790b625,http://pdfs.semanticscholar.org/132f/88626f6760d769c95984212ed0915790b625.pdf
+13f6ab2f245b4a871720b95045c41a4204626814,http://pdfs.semanticscholar.org/9d74/382b6c4209c49de7c2b0fab7b34483ba0ddb.pdf
+13be4f13dac6c9a93f969f823c4b8c88f607a8c4,http://www1.ece.neu.edu/~yuewu/files/2016/p242-robinson.pdf
+13afc4f8d08f766479577db2083f9632544c7ea6,https://cs.anu.edu.au/few/KSikka_EmotiW.pdf
+13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd,http://pdfs.semanticscholar.org/1318/8a88bbf83a18dd4964e3f89d0bc0a4d3a0bd.pdf
+13d9da779138af990d761ef84556e3e5c1e0eb94,http://www.cs.berkeley.edu/~malik/papers/ferencz-learnedmiller-malik08.pdf
+7f57e9939560562727344c1c987416285ef76cda,http://people.cs.vt.edu/~gangwang/class/cs6604/papers/face.pdf
+7fc5b6130e9d474dfb49d9612b6aa0297d481c8e,http://pdfs.semanticscholar.org/7fc5/b6130e9d474dfb49d9612b6aa0297d481c8e.pdf
+7fce5769a7d9c69248178989a99d1231daa4fce9,http://pdfs.semanticscholar.org/7fce/5769a7d9c69248178989a99d1231daa4fce9.pdf
+7fa2605676c589a7d1a90d759f8d7832940118b5,http://www.ces.clemson.edu/~stb/publications/willimon_clothing_classification_icra2013.pdf
+7ff42ee09c9b1a508080837a3dc2ea780a1a839b,http://pdfs.semanticscholar.org/7ff4/2ee09c9b1a508080837a3dc2ea780a1a839b.pdf
+7f533bd8f32525e2934a66a5b57d9143d7a89ee1,http://pdfs.semanticscholar.org/7f53/3bd8f32525e2934a66a5b57d9143d7a89ee1.pdf
+7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5,http://www.cs.cmu.edu/~epxing/papers/2015/Zhao_Xing_IJCV15.pdf
+7f6061c83dc36633911e4d726a497cdc1f31e58a,http://pdfs.semanticscholar.org/7f60/61c83dc36633911e4d726a497cdc1f31e58a.pdf
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,http://pdfs.semanticscholar.org/d5a4/c2757619a1f2c8d9a879e6f26f539a4a18f2.pdf
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,http://pdfs.semanticscholar.org/7f82/f8a416170e259b217186c9e38a9b05cb3eb4.pdf
+7f36dd9ead29649ed389306790faf3b390dc0aa2,http://pdfs.semanticscholar.org/7f36/dd9ead29649ed389306790faf3b390dc0aa2.pdf
+7f6cd03e3b7b63fca7170e317b3bb072ec9889e0,http://pdfs.semanticscholar.org/7f6c/d03e3b7b63fca7170e317b3bb072ec9889e0.pdf
+7f6599e674a33ed64549cd512ad75bdbd28c7f6c,http://pdfs.semanticscholar.org/7f65/99e674a33ed64549cd512ad75bdbd28c7f6c.pdf
+7f9260c00a86a0d53df14469f1fa10e318ee2a3c,http://www.cse.msu.edu/~stockman/Book/projects.html/F06Docs/Papers/daugemanIrisICIP02.pdf
+7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae,http://pdfs.semanticscholar.org/7f97/a36a5a634c30de5a8e8b2d1c812ca9f971ae.pdf
+7f2a4cd506fe84dee26c0fb41848cb219305173f,http://pdfs.semanticscholar.org/7f2a/4cd506fe84dee26c0fb41848cb219305173f.pdf
+7fd700f4a010d765c506841de9884df394c1de1c,http://www.kyb.tuebingen.mpg.de/publications/attachments/CVPR2008-Blaschko_5069%5B0%5D.pdf
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,http://pdfs.semanticscholar.org/7f59/657c883f77dc26393c2f9ed3d19bdf51137b.pdf
+7ffc5c58e5b61ac7c45d8e6ed076248051ebea34,http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/238/1/SMCB_C_34_5_04.pdf
+7f23a4bb0c777dd72cca7665a5f370ac7980217e,http://pdfs.semanticscholar.org/ce70/fecc7150816e081b422cbc157bd9019cdf25.pdf
+7fb6bc6c920ca574677f0d3a40c5c377a095885b,http://www.cs.bris.ac.uk/Publications/Papers/2000124.pdf
+7f268f29d2c8f58cea4946536f5e2325777fa8fa,http://pdfs.semanticscholar.org/7f26/8f29d2c8f58cea4946536f5e2325777fa8fa.pdf
+7fc3442c8b4c96300ad3e860ee0310edb086de94,http://pdfs.semanticscholar.org/82f3/b7cacc15e026fd3a7639091d54162f6ae064.pdf
+7f3a73babe733520112c0199ff8d26ddfc7038a0,http://pdfs.semanticscholar.org/7f3a/73babe733520112c0199ff8d26ddfc7038a0.pdf
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,http://pdfs.semanticscholar.org/c84f/88b2a764ddcc22c4971827d58024b6017496.pdf
+7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a,http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf
+7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/He_Robust_FEC-CNN_A_CVPR_2017_paper.pdf
+7f205b9fca7e66ac80758c4d6caabe148deb8581,http://pdfs.semanticscholar.org/7f20/5b9fca7e66ac80758c4d6caabe148deb8581.pdf
+7fd6bb30ad5d7eb3078efbb85f94d2d60e701115,http://pdfs.semanticscholar.org/7fd6/bb30ad5d7eb3078efbb85f94d2d60e701115.pdf
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,http://pdfs.semanticscholar.org/7fc7/6446d2b11fc0479df6e285723ceb4244d4ef.pdf
+7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098,http://pdfs.semanticscholar.org/7a9e/f21a7f59a47ce53b1dff2dd49a8289bb5098.pdf
+7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b,http://pdfs.semanticscholar.org/7af3/8f6dcfbe1cd89f2307776bcaa09c54c30a8b.pdf
+7ae0212d6bf8a067b468f2a78054c64ea6a577ce,http://pdfs.semanticscholar.org/7ae0/212d6bf8a067b468f2a78054c64ea6a577ce.pdf
+7a9c317734acaf4b9bd8e07dd99221c457b94171,http://pdfs.semanticscholar.org/7a9c/317734acaf4b9bd8e07dd99221c457b94171.pdf
+7a0fb972e524cb9115cae655e24f2ae0cfe448e0,http://pdfs.semanticscholar.org/7a0f/b972e524cb9115cae655e24f2ae0cfe448e0.pdf
+7ad77b6e727795a12fdacd1f328f4f904471233f,https://ueaeprints.uea.ac.uk/65008/1/Accepted_manuscript.pdf
+7a7f2403e3cc7207e76475e8f27a501c21320a44,http://www.apsipa2013.org/wp-content/uploads/2013/05/395_Emotion-recognition-Wu-2928773.pdf
+7aafeb9aab48fb2c34bed4b86755ac71e3f00338,http://pdfs.semanticscholar.org/7aaf/eb9aab48fb2c34bed4b86755ac71e3f00338.pdf
+7a84368ebb1a20cc0882237a4947efc81c56c0c0,https://ibug.doc.ic.ac.uk/media/uploads/documents/iccv_final.pdf
+7aa4c16a8e1481629f16167dea313fe9256abb42,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002981.pdf
+7ad1638f7d76c7e885bc84cd694c60f109f02159,https://www.researchgate.net/profile/Wen-Jing_Yan/publication/236120483_Face_Recognition_and_Micro-expression_Recognition_Based_on_Discriminant_Tensor_Subspace_Analysis_Plus_Extreme_Learning_Machine/links/0deec51adcddd72a4f000000.pdf
+7a6d9f89e0925a220fe3dfba4f0d2745f8be6c9a,http://www.faceplusplus.com/wp-content/uploads/2014/11/Learning-Compact-Face-Representation-Packing-a-Face-into-an-int32.pdf
+7a85b3ab0efb6b6fcb034ce13145156ee9d10598,http://pdfs.semanticscholar.org/7a85/b3ab0efb6b6fcb034ce13145156ee9d10598.pdf
+7ab930146f4b5946ec59459f8473c700bcc89233,http://pdfs.semanticscholar.org/7ab9/30146f4b5946ec59459f8473c700bcc89233.pdf
+7a65fc9e78eff3ab6062707deaadde024d2fad40,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf
+7ad7897740e701eae455457ea74ac10f8b307bed,http://pdfs.semanticscholar.org/7ad7/897740e701eae455457ea74ac10f8b307bed.pdf
+7a1ce696e260899688cb705f243adf73c679f0d9,http://www.cse.msu.edu/~rossarun/pubs/SwearingenRossLabelPropagation_BIOSIG2016.pdf
+7a061e7eab865fc8d2ef00e029b7070719ad2e9a,http://cvrr.ucsd.edu/ece285/papers/from_WI13/Ramanan_IJCV2013.pdf
+7ab7befcd319d55d26c1e4b7b9560da5763906f3,http://www.researchgate.net/profile/Lee_Ping-Han/publication/236160185_Facial_Trait_Code/links/0c96051e26825bd65a000000.pdf
+7a8c2743db1749c2d9f16f62ee633574c1176e34,http://pdfs.semanticscholar.org/7a8c/2743db1749c2d9f16f62ee633574c1176e34.pdf
+1451e7b11e66c86104f9391b80d9fb422fb11c01,http://pdfs.semanticscholar.org/1451/e7b11e66c86104f9391b80d9fb422fb11c01.pdf
+14761b89152aa1fc280a33ea4d77b723df4e3864,http://pdfs.semanticscholar.org/1476/1b89152aa1fc280a33ea4d77b723df4e3864.pdf
+14b87359f6874ff9b8ee234b18b418e57e75b762,http://pdfs.semanticscholar.org/1b62/6c14544f249cd52ef86a4efc17f3d3834003.pdf
+14fdec563788af3202ce71c021dd8b300ae33051,http://pdfs.semanticscholar.org/14fd/ec563788af3202ce71c021dd8b300ae33051.pdf
+142e5b4492bc83b36191be4445ef0b8b770bf4b0,http://pdfs.semanticscholar.org/142e/5b4492bc83b36191be4445ef0b8b770bf4b0.pdf
+14b016c7a87d142f4b9a0e6dc470dcfc073af517,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912
+14b66748d7c8f3752dca23991254fca81b6ee86c,http://pdfs.semanticscholar.org/4e92/a8dcfd802c3248d56ba16d2613dceacaef59.pdf
+14e8dbc0db89ef722c3c198ae19bde58138e88bf,http://ascl.cis.fiu.edu/uploads/1/3/4/2/13423859/amini-lisetti-acii-2013-final.pdf
+14fa27234fa2112014eda23da16af606db7f3637,http://pdfs.semanticscholar.org/14fa/27234fa2112014eda23da16af606db7f3637.pdf
+1459d4d16088379c3748322ab0835f50300d9a38,https://arxiv.org/pdf/1605.04039v1.pdf
+14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6,http://pdfs.semanticscholar.org/4b76/694ff2efb302074adf1ba6052d643177abd1.pdf
+146bbf00298ee1caecde3d74e59a2b8773d2c0fc,http://pdfs.semanticscholar.org/146b/bf00298ee1caecde3d74e59a2b8773d2c0fc.pdf
+14e9158daf17985ccbb15c9cd31cf457e5551990,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf
+14ce7635ff18318e7094417d0f92acbec6669f1c,http://www.cs.tau.ac.il/~wolf/papers/deepface_11_01_2013.pdf
+143f7a51058b743a0d43026a523d9bbbc1ae43a8,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221368838_An_efficient_method_for_face_retrieval_from_large_video_datasets/links/0912f510a0404c605f000000.pdf
+14d4c019c3eac3c3fa888cb8c184f31457eced02,http://pdfs.semanticscholar.org/14d4/c019c3eac3c3fa888cb8c184f31457eced02.pdf
+1450296fb936d666f2f11454cc8f0108e2306741,http://pdfs.semanticscholar.org/1450/296fb936d666f2f11454cc8f0108e2306741.pdf
+140438a77a771a8fb656b39a78ff488066eb6b50,http://homes.cs.washington.edu/~neeraj/base/publications/base/papers/nk_cvpr2011_faceparts.pdf
+143bee9120bcd7df29a0f2ad6f0f0abfb23977b8,http://pdfs.semanticscholar.org/143b/ee9120bcd7df29a0f2ad6f0f0abfb23977b8.pdf
+14d72dc9f78d65534c68c3ed57305f14bd4b5753,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yan_Exploiting_Multi-Grain_Ranking_ICCV_2017_paper.pdf
+14b162c2581aea1c0ffe84e7e9273ab075820f52,http://pdfs.semanticscholar.org/4b87/c72e53f19e29f2ccf4d24f9432ebbafcf1a8.pdf
+14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d,https://www.comp.nus.edu.sg/~tsim/documents/cascade-cf-landmarks.pdf
+14fdce01c958043140e3af0a7f274517b235adf3,http://pdfs.semanticscholar.org/14fd/ce01c958043140e3af0a7f274517b235adf3.pdf
+14b69626b64106bff20e17cf8681790254d1e81c,http://pdfs.semanticscholar.org/14b6/9626b64106bff20e17cf8681790254d1e81c.pdf
+14070478b8f0d84e5597c3e67c30af91b5c3a917,http://pdfs.semanticscholar.org/f0a5/f885aa14ac2bbb3cc8e4c7530f2449b2f160.pdf
+14fb3283d4e37760b7dc044a1e2906e3cbf4d23a,http://crcv.ucf.edu/courses/CAP6412/Spring2013/papers/felix_yu_attribute_cvpr2012.pdf
+14811696e75ce09fd84b75fdd0569c241ae02f12,https://jurie.users.greyc.fr/papers/cvpr08-cevikalp.pdf
+141eab5f7e164e4ef40dd7bc19df9c31bd200c5e,http://www.jdl.ac.cn/doc/2006/Local%20Linear%20Regression%20(LLR)%20for%20Pose%20Invariant%20Face%20Recognition.pdf
+14e759cb019aaf812d6ac049fde54f40c4ed1468,http://pdfs.semanticscholar.org/14e7/59cb019aaf812d6ac049fde54f40c4ed1468.pdf
+146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,https://arxiv.org/pdf/1611.06158v1.pdf
+148eb413bede35487198ce7851997bf8721ea2d6,http://pdfs.semanticscholar.org/148e/b413bede35487198ce7851997bf8721ea2d6.pdf
+1462bc73834e070201acd6e3eaddd23ce3c1a114,http://pdfs.semanticscholar.org/1462/bc73834e070201acd6e3eaddd23ce3c1a114.pdf
+14014a1bdeb5d63563b68b52593e3ac1e3ce7312,http://pdfs.semanticscholar.org/1401/4a1bdeb5d63563b68b52593e3ac1e3ce7312.pdf
+1473a233465ea664031d985e10e21de927314c94,http://pdfs.semanticscholar.org/e985/0501e707f8783172ecacfe0cd29159abda34.pdf
+140c95e53c619eac594d70f6369f518adfea12ef,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf
+14418ae9a6a8de2b428acb2c00064da129632f3e,http://fanyix.cs.ucdavis.edu/project/discovery/files/ext_abstract.pdf
+14ba910c46d659871843b31d5be6cba59843a8b8,http://www.crcv.ucf.edu/papers/cvpr2013/ortiz_vfr_trailers.pdf
+1467c4ab821c3b340abe05a1b13a19318ebbce98,http://pdfs.semanticscholar.org/1467/c4ab821c3b340abe05a1b13a19318ebbce98.pdf
+14318d2b5f2cf731134a6964d8193ad761d86942,http://pdfs.semanticscholar.org/1431/8d2b5f2cf731134a6964d8193ad761d86942.pdf
+142dcfc3c62b1f30a13f1f49c608be3e62033042,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Tsai_Adaptive_Region_Pooling_2015_CVPR_paper.pdf
+14c0f9dc9373bea1e27b11fa0594c86c9e632c8d,http://openaccess.thecvf.com/content_iccv_2015/papers/Dang_Adaptive_Exponential_Smoothing_ICCV_2015_paper.pdf
+1439bf9ba7ff97df9a2da6dae4784e68794da184,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Ptucha_LGE-KSVD_Flexible_Dictionary_2013_CVPR_paper.pdf
+141768ab49a5a9f5adcf0cf7e43a23471a7e5d82,http://arxiv.org/pdf/1405.0085v1.pdf
+14e428f2ff3dc5cf96e5742eedb156c1ea12ece1,http://www.univ-soukahras.dz/eprints/2014-150-03190.pdf
+14bca107bb25c4dce89210049bf39ecd55f18568,http://pdfs.semanticscholar.org/6f56/b0fada68f36d78cf20148fd13de8bce8a93d.pdf
+14a5feadd4209d21fa308e7a942967ea7c13b7b6,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001025.pdf
+8ec82da82416bb8da8cdf2140c740e1574eaf84f,http://pdfs.semanticscholar.org/8ec8/2da82416bb8da8cdf2140c740e1574eaf84f.pdf
+8ee62f7d59aa949b4a943453824e03f4ce19e500,http://arxiv.org/pdf/1603.09732v1.pdf
+8e0ede53dc94a4bfcf1238869bf1113f2a37b667,http://www.ri.cmu.edu/pub_files/2015/6/jpml_final.pdf
+8e33183a0ed7141aa4fa9d87ef3be334727c76c0,http://pdfs.semanticscholar.org/8e33/183a0ed7141aa4fa9d87ef3be334727c76c0.pdf
+8e94ed0d7606408a0833e69c3185d6dcbe22bbbe,http://www.wjscheirer.com/papers/wjs_wacv2012_eyes.pdf
+8eb9aa6349db3dd1b724266fcd5fc39a83da022a,http://www.hcii-lab.net/2009/%5BICIP%202009%5D%20A%20Novel%20feature%20extraction%20using%20PHOG%20for%20Smile%20Recognition.pdf
+8e461978359b056d1b4770508e7a567dbed49776,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Sikka_LOMo_Latent_Ordinal_CVPR_2016_paper.pdf
+8e4808e71c9b9f852dc9558d7ef41566639137f3,http://pdfs.semanticscholar.org/8e48/08e71c9b9f852dc9558d7ef41566639137f3.pdf
+8ea30ade85880b94b74b56a9bac013585cb4c34b,http://www.eurecom.fr/fr/publication/1392/download/mm-perrfl-040517.pdf
+8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958,http://pdfs.semanticscholar.org/bff6/c3acd48f34c671c48fae9b3fdf60f5d7b363.pdf
+8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125,https://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf
+8e378ef01171b33c59c17ff5798f30293fe30686,http://pdfs.semanticscholar.org/8e37/8ef01171b33c59c17ff5798f30293fe30686.pdf
+8ed051be31309a71b75e584bc812b71a0344a019,http://www.vision.caltech.edu/~bart/Publications/2007/BartUllmanMBE.pdf
+8ee5b1c9fb0bded3578113c738060290403ed472,https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf
+8efda5708bbcf658d4f567e3866e3549fe045bbb,http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf
+225fb9181545f8750061c7693661b62d715dc542,http://pdfs.semanticscholar.org/c592/e408d95c838bced90b79640bead7c226fe64.pdf
+22043cbd2b70cb8195d8d0500460ddc00ddb1a62,http://uir.ulster.ac.uk/37137/2/Separability-Oriented%20Subclass%20Discriminant%20Analysis.pdf
+22137ce9c01a8fdebf92ef35407a5a5d18730dde,http://pdfs.semanticscholar.org/2213/7ce9c01a8fdebf92ef35407a5a5d18730dde.pdf
+22e2066acfb795ac4db3f97d2ac176d6ca41836c,http://pdfs.semanticscholar.org/26f5/3a1abb47b1f0ea1f213dc7811257775dc6e6.pdf
+22717ad3ad1dfcbb0fd2f866da63abbde9af0b09,http://pdfs.semanticscholar.org/2271/7ad3ad1dfcbb0fd2f866da63abbde9af0b09.pdf
+224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db,http://www.ifp.illinois.edu/~jyang29/papers/CVPR13-PEM.pdf
+2288696b6558b7397bdebe3aed77bedec7b9c0a9,http://pdfs.semanticscholar.org/2288/696b6558b7397bdebe3aed77bedec7b9c0a9.pdf
+22bebedc1a5f3556cb4f577bdbe032299a2865e8,http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf
+22264e60f1dfbc7d0b52549d1de560993dd96e46,http://arxiv.org/pdf/1608.01471v1.pdf
+22dada4a7ba85625824489375184ba1c3f7f0c8f,http://arxiv.org/pdf/1506.02328v1.pdf
+221252be5d5be3b3e53b3bbbe7a9930d9d8cad69,http://pdfs.semanticscholar.org/2212/52be5d5be3b3e53b3bbbe7a9930d9d8cad69.pdf
+223ec77652c268b98c298327d42aacea8f3ce23f,http://pdfs.semanticscholar.org/223e/c77652c268b98c298327d42aacea8f3ce23f.pdf
+22df6b6c87d26f51c0ccf3d4dddad07ce839deb0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yu_Fast_Action_Proposals_2015_CVPR_paper.pdf
+228558a2a38a6937e3c7b1775144fea290d65d6c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Smith_Nonparametric_Context_Modeling_2014_CVPR_paper.pdf
+22fdd8d65463f520f054bf4f6d2d216b54fc5677,http://pdfs.semanticscholar.org/22fd/d8d65463f520f054bf4f6d2d216b54fc5677.pdf
+2251a88fbccb0228d6d846b60ac3eeabe468e0f1,http://pdfs.semanticscholar.org/2251/a88fbccb0228d6d846b60ac3eeabe468e0f1.pdf
+22e678d3e915218a7c09af0d1602e73080658bb7,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/13.pdf
+22ad2c8c0f4d6aa4328b38d894b814ec22579761,http://nichol.as/papers/Gallagher/Clothing%20Cosegmentation%20for%20Recognizing%20People.pdf
+226a5ff790b969593596a52b55b3718dcdd7bb7f,https://www.cise.ufl.edu/~jho/papers/IEEE06.pdf
+227b18fab568472bf14f9665cedfb95ed33e5fce,https://arxiv.org/pdf/1308.0271v2.pdf
+2241eda10b76efd84f3c05bdd836619b4a3df97e,http://arxiv.org/pdf/1506.01342v5.pdf
+22646cf884cc7093b0db2c1731bd52f43682eaa8,http://pdfs.semanticscholar.org/2264/6cf884cc7093b0db2c1731bd52f43682eaa8.pdf
+22f94c43dd8b203f073f782d91e701108909690b,http://pdfs.semanticscholar.org/22f9/4c43dd8b203f073f782d91e701108909690b.pdf
+22dabd4f092e7f3bdaf352edd925ecc59821e168,http://dro.deakin.edu.au/eserv/DU:30044576/venkatesh-exploitingside-2008.pdf
+22f656d0f8426c84a33a267977f511f127bfd7f3,https://arxiv.org/pdf/1609.06426v2.pdf
+22143664860c6356d3de3556ddebe3652f9c912a,http://pdfs.semanticscholar.org/2214/3664860c6356d3de3556ddebe3652f9c912a.pdf
+2271d554787fdad561fafc6e9f742eea94d35518,http://pdfs.semanticscholar.org/2271/d554787fdad561fafc6e9f742eea94d35518.pdf
+22ec256400e53cee35f999244fb9ba6ba11c1d06,http://pdfs.semanticscholar.org/2dbd/f0093228eee11ce9ef17365055dada756413.pdf
+22ec8af0f0e5469e40592d29e28cfbdf1154c666,http://pdfs.semanticscholar.org/aa07/2c823da778a2b8bf1fc79141b3b228a14e99.pdf
+22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7,http://pdfs.semanticscholar.org/22a7/f1aebdb57eecd64be2a1f03aef25f9b0e9a7.pdf
+22e189a813529a8f43ad76b318207d9a4b6de71a,http://openaccess.thecvf.com/content_ICCV_2017/papers/Felsen_What_Will_Happen_ICCV_2017_paper.pdf
+25d514d26ecbc147becf4117512523412e1f060b,http://www.iab-rubric.org/papers/2015_ICB_CrowdVideoFaceDataset.pdf
+25c19d8c85462b3b0926820ee5a92fc55b81c35a,http://www.brl.ntt.co.jp/people/kumano/papers/Kumano.IJCV2009.pdf
+258a8c6710a9b0c2dc3818333ec035730062b1a5,http://pdfs.semanticscholar.org/258a/8c6710a9b0c2dc3818333ec035730062b1a5.pdf
+25695abfe51209798f3b68fb42cfad7a96356f1f,http://pdfs.semanticscholar.org/2569/5abfe51209798f3b68fb42cfad7a96356f1f.pdf
+250ebcd1a8da31f0071d07954eea4426bb80644c,http://pdfs.semanticscholar.org/2e26/8598d9c2fd9757ba43f7967e57b8a2a871f4.pdf
+2525f336af31178b836e27f8c60056e18f1455d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/TEMPORALLY%20ENHANCED%20IMAGE%20OBJECT%20PROPOSALS%20FOR%20VIDEOS.pdf
+25337690fed69033ef1ce6944e5b78c4f06ffb81,http://pdfs.semanticscholar.org/2533/7690fed69033ef1ce6944e5b78c4f06ffb81.pdf
+25c3cdbde7054fbc647d8be0d746373e7b64d150,http://openaccess.thecvf.com/content_cvpr_2016/papers/Ouyang_ForgetMeNot_Memory-Aware_Forensic_CVPR_2016_paper.pdf
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,http://pdfs.semanticscholar.org/25bf/288b2d896f3c9dab7e7c3e9f9302e7d6806b.pdf
+251281d9cbd207038efbde0515f4077541967239,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana-Murthy_Radwan_Goecke_ICIP2014_DenseBodyPartTrajectoriesForHumanActionRecognition.pdf
+25d3e122fec578a14226dc7c007fb1f05ddf97f7,https://ibug.doc.ic.ac.uk/media/uploads/documents/pdf17.pdf
+2597b0dccdf3d89eaffd32e202570b1fbbedd1d6,http://pdfs.semanticscholar.org/26f3/03ae1912c16f08523a7d8db926e35114e8f0.pdf
+25c108a56e4cb757b62911639a40e9caf07f1b4f,https://arxiv.org/pdf/1707.09531v2.pdf
+2594a77a3f0dd5073f79ba620e2f287804cec630,https://arxiv.org/pdf/1702.06925v1.pdf
+25e2d3122d4926edaab56a576925ae7a88d68a77,http://pdfs.semanticscholar.org/25e2/d3122d4926edaab56a576925ae7a88d68a77.pdf
+25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8,http://arxiv.org/pdf/1408.6027v2.pdf
+2559b15f8d4a57694a0a33bdc4ac95c479a3c79a,http://vision.ucsd.edu/~carolina/files/mklmnn.pdf
+256ef946b4cecd8889df8d799d0c9175ae986af9,https://pdfs.semanticscholar.org/cd73/8347673151b378f447119fe2665f5c8c2215.pdf
+2574860616d7ffa653eb002bbaca53686bc71cdd,http://pdfs.semanticscholar.org/e01d/f3e6faffad3f304f6c40b133ae1dcf326662.pdf
+2564848f094f7c1cd5e599aa907947b10b5c7df2,http://prr.hec.gov.pk/Thesis/252S.pdf
+25f1f195c0efd84c221b62d1256a8625cb4b450c,http://www.ee.oulu.fi/~gyzhao/Papers/2007/04284844-ICME.pdf
+25885e9292957feb89dcb4a30e77218ffe7b9868,http://pdfs.semanticscholar.org/2588/5e9292957feb89dcb4a30e77218ffe7b9868.pdf
+259706f1fd85e2e900e757d2656ca289363e74aa,http://pdfs.semanticscholar.org/6f98/3e8f26066f2ea486f6653b87154360d948ca.pdf
+25b2811118ed73c64682544fe78023bb8242c709,http://www.researchgate.net/profile/Xueyin_Lin/publication/4193803_Kernel-based_multifactor_analysis_for_image_synthesis_and_recognition/links/00b7d51a9fd4fb9962000000.pdf
+25728e08b0ee482ee6ced79c74d4735bb5478e29,http://pdfs.semanticscholar.org/2572/8e08b0ee482ee6ced79c74d4735bb5478e29.pdf
+258a2dad71cb47c71f408fa0611a4864532f5eba,http://pdfs.semanticscholar.org/258a/2dad71cb47c71f408fa0611a4864532f5eba.pdf
+25127c2d9f14d36f03d200a65de8446f6a0e3bd6,http://pdfs.semanticscholar.org/2512/7c2d9f14d36f03d200a65de8446f6a0e3bd6.pdf
diff --git a/scraper/reports/misc/db_paper_pdf_list.csv b/scraper/reports/misc/db_paper_pdf_list.csv
new file mode 100644
index 00000000..e8a675d3
--- /dev/null
+++ b/scraper/reports/misc/db_paper_pdf_list.csv
@@ -0,0 +1,7615 @@
+Paper ID,PDF URL,IEEE URL,DOI URL,Extra URL
+611961abc4dfc02b67edd8124abb08c449f5280a,http://pdfs.semanticscholar.org/6119/61abc4dfc02b67edd8124abb08c449f5280a.pdf,,https://doi.org/10.5244/C.29.60,http://www.bmva.org/bmvc/2015/papers/paper060/abstract060.pdf
+61831364ddc8db869618f1c7f0ad35ab2ab6bcf7,,,https://doi.org/10.1109/ICIP.2013.6738496,
+61a3c45c9f802f9d5fa8d94fee811e203bac6487,,,https://doi.org/10.1109/TIFS.2016.2567318,
+6144af24ce06af7d8cdd606e79cea5d6e73e2135,,,,
+6159908dec4bc2c1102f416f8a52a31bf3e666a4,,,https://doi.org/10.1109/ICIP.2012.6467431,
+610a4451423ad7f82916c736cd8adb86a5a64c59,http://pdfs.semanticscholar.org/610a/4451423ad7f82916c736cd8adb86a5a64c59.pdf,,,http://www.ijarcsse.com/docs/papers/Volume_4/11_November2014/V4I11-0296.pdf
+6196f4be3b28684f6528b8687adccbdf9ac5c67c,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.267
+6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2,http://pdfs.semanticscholar.org/6156/eaad00aad74c90cbcfd822fa0c9bd4eb14c2.pdf,,https://doi.org/10.1007/978-3-642-33868-7_33,https://pdfs.semanticscholar.org/6156/eaad00aad74c90cbcfd822fa0c9bd4eb14c2.pdf
+612b8eda338fcde9400ea93779741282fe4132d6,,,,
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,http://pdfs.semanticscholar.org/61ff/edd8a70a78332c2bbdc9feba6c3d1fd4f1b8.pdf,,,http://www.ece.rice.edu/~eld1/pubs/Dyer_JMLR13.pdf
+61084a25ebe736e8f6d7a6e53b2c20d9723c4608,http://pdfs.semanticscholar.org/6108/4a25ebe736e8f6d7a6e53b2c20d9723c4608.pdf,,https://doi.org/10.1016/j.cviu.2013.09.004,http://briancbecker.com/files/downloads/publications/Ortiz_CVIU13.pdf
+61542874efb0b4c125389793d8131f9f99995671,http://pdfs.semanticscholar.org/6154/2874efb0b4c125389793d8131f9f99995671.pdf,,,https://arxiv.org/pdf/1802.02531v1.pdf
+61f93ed515b3bfac822deed348d9e21d5dffe373,http://dvmmweb.cs.columbia.edu/files/set_hash_wacv17.pdf,,https://doi.org/10.1109/WACV.2017.143,http://www.ee.columbia.edu/ln/dvmm/publications/17/set_hash_wacv17.pdf
+6180bc0816b1776ca4b32ced8ea45c3c9ce56b47,http://pdfs.semanticscholar.org/793e/92ed3f89c8636c8ca1175c1183ba812da245.pdf,,,https://cloudfront.escholarship.org/dist/prd/content/qt5hq130q6/qt5hq130q6.pdf
+61f1b14f04d2fa1d8a556adbdf93050b4637f44b,http://www.caam.rice.edu/~wy1/paperfiles/T.Chen%20W.Yin%20X.Zhou%20D.Comaniciu%20T.Huang%20-%20Total%20variation%20models%20for%20variable%20lighting%20face%20recognition.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.195
+612075999e82596f3b42a80e6996712cc52880a3,https://www.etsmtl.ca/Unites-de-recherche/LIVIA/Recherche-et-innovation/Publications/Publications-2017/PID4875389.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078554
+614a7c42aae8946c7ad4c36b53290860f6256441,https://arxiv.org/pdf/1604.02878.pdf,,https://doi.org/10.1109/LSP.2016.2603342,https://zhzhanp.github.io/papers/SPL2016.pdf
+61b22b1016bf13aca8d2e57c4e5e004d423f4865,,,https://doi.org/10.1109/TCYB.2016.2526630,
+616d3d6d82dbc2697d150e879996d878ef74faef,https://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2016_Khorrami_ICIP_FP.pdf,,https://doi.org/10.1109/ICIP.2016.7532431,http://arxiv.org/pdf/1602.07377v2.pdf
+61bc124537f414f6fcb4d1ff476681b5a0ee222a,,,,http://doi.ieeecomputersociety.org/10.1109/WIW.2016.043
+0d90c992dd08bfb06df50ab5c5c77ce83061e830,,,https://doi.org/10.1109/UIC-ATC.2013.85,
+0d746111135c2e7f91443869003d05cde3044beb,https://arxiv.org/pdf/1603.09364v1.pdf,,https://doi.org/10.1109/ICIP.2016.7532908,http://arxiv.org/abs/1603.09364
+0d7fcdb99dc0d65b510f2b0b09d3d3cfed390261,,,https://doi.org/10.1109/IJCB.2011.6117508,
+0d6d9c4b5dd282b8f29cd3c200df02a00141f0a9,,,https://doi.org/10.1109/SIU.2014.6830193,
+0d88ab0250748410a1bc990b67ab2efb370ade5d,http://signal.ee.bilkent.edu.tr/defevent/abstract/a1795.pdf,http://ieeexplore.ieee.org/document/7078366/,,http://signal.ee.bilkent.edu.tr/defevent/papers/cr1795.pdf
+0db43ed25d63d801ce745fe04ca3e8b363bf3147,http://pdfs.semanticscholar.org/0db4/3ed25d63d801ce745fe04ca3e8b363bf3147.pdf,,,http://arxiv.org/abs/1207.3538
+0daf696253a1b42d2c9d23f1008b32c65a9e4c1e,http://ca.cs.cmu.edu/sites/default/files/132010_CVPR_AU_Long.pdf,,,http://www.humansensing.cs.cmu.edu/projects/seg_au/2010_cvpr_au.pdf
+0d538084f664b4b7c0e11899d08da31aead87c32,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Deformable_Part_Descriptors_2013_ICCV_paper.pdf,,,https://people.eecs.berkeley.edu/~nzhang/papers/dpd_poster_Ning.pdf
+0dccc881cb9b474186a01fd60eb3a3e061fa6546,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_104_ext.pdf,,https://doi.org/10.1109/CVPR.2015.7299058,http://arxiv.org/abs/1411.7964
+0d467adaf936b112f570970c5210bdb3c626a717,http://pdfs.semanticscholar.org/0d46/7adaf936b112f570970c5210bdb3c626a717.pdf,,,https://lmb.informatik.uni-freiburg.de/Publications/2016/IMKDB16/FlowNet_2_0_Supplemental__arXiv.pdf
+0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306,http://pdfs.semanticscholar.org/0d6b/28691e1aa2a17ffaa98b9b38ac3140fb3306.pdf,,,http://www.ijcsit.com/docs/Volume%206/vol6issue01/ijcsit2015060131.pdf
+0d9815f62498db21f06ee0a9cc8b166acc93888e,,,https://doi.org/10.1016/j.neucom.2007.12.018,
+0d8cec1b3f9b6e25d9d31eeb54d8894a1f2ef84f,,,https://doi.org/10.1109/LSP.2018.2810121,
+0de91641f37b0a81a892e4c914b46d05d33fd36e,https://ibug.doc.ic.ac.uk/media/uploads/documents/raps.pdf,,,http://eprints.eemcs.utwente.nl/25816/01/Pantic_RAPS_Robust_and_Efficient_Automatic_Construction.pdf
+0d3ff34d8490a9a53de1aac1dea70172cb02e013,,,https://doi.org/10.1109/ICPR.2014.542,
+0df0d1adea39a5bef318b74faa37de7f3e00b452,https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf,,,https://arxiv.org/pdf/1504.02863v1.pdf
+0deea943ac4dc1be822c02f97d0c6c97e201ba8d,,,,
+0de1450369cb57e77ef61cd334c3192226e2b4c2,,,https://doi.org/10.1109/BTAS.2017.8272747,
+0d3bb75852098b25d90f31d2f48fd0cb4944702b,http://stefan.winklerbros.net/Publications/icip2014a.pdf,,https://doi.org/10.1109/ICIP.2014.7025068,http://stefan.winkler.net/Publications/icip2014a.pdf
+0d7652652c742149d925c4fb5c851f7c17382ab8,,,https://doi.org/10.1016/j.neucom.2015.05.057,
+0da3c329ae14a4032b3ba38d4ea808cf6d115c4a,,,https://doi.org/10.1007/s00138-015-0709-7,
+0db8e6eb861ed9a70305c1839eaef34f2c85bbaf,https://arxiv.org/pdf/1704.06244v1.pdf,,,http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/iccv17_face_frontalization.pdf
+0d902541c26f03ff95221e0e71d67c39e094a61d,https://arxiv.org/pdf/1506.05085v1.pdf,,https://doi.org/10.1109/TNNLS.2017.2651018,http://arxiv.org/pdf/1506.05085v2.pdf
+0d75c7d9a00f859cffe7d0bd78dd35d0b4bc7fa6,,,https://doi.org/10.1109/LSP.2005.863661,
+0db1207563a66343cc7cb7b54356c767fc8b876c,,,,
+0d0b880e2b531c45ee8227166a489bf35a528cb9,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Structure_Preserving_Object_2013_CVPR_paper.pdf,,,http://www.cs.ucf.edu/courses/cap6412/spr2014/papers/SPOT-CVPR2013.pdf
+0d3882b22da23497e5de8b7750b71f3a4b0aac6b,http://pdfs.semanticscholar.org/0d38/82b22da23497e5de8b7750b71f3a4b0aac6b.pdf,,,http://www.affective-science.org/pubs/2010/Barrett_Kensinger_2010.pdf
+0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553740.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553740
+0d760e7d762fa449737ad51431f3ff938d6803fe,https://arxiv.org/pdf/1705.05922v1.pdf,,,https://vision.cornell.edu/se3/wp-content/uploads/2017/07/LCDet_CVPRW.pdf
+0d3068b352c3733c9e1cc75e449bf7df1f7b10a4,http://users.cecs.anu.edu.au/~adhall/Dhall_ACII_DC_2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.111
+0dd72887465046b0f8fc655793c6eaaac9c03a3d,http://pdfs.semanticscholar.org/e112/df5539821a00dfa818617bf95f901f016763.pdf,,https://doi.org/10.1007/978-3-319-16811-1_6,https://pdfs.semanticscholar.org/0dd7/2887465046b0f8fc655793c6eaaac9c03a3d.pdf
+0d087aaa6e2753099789cd9943495fbbd08437c0,http://pdfs.semanticscholar.org/beab/b0d9d30871d517c5d915cf852f7f5293f52f.pdf,,,http://arxiv.org/abs/1712.00311
+0d98750028ea7b84b86e6fec3e67d61e4f690d09,,,https://doi.org/10.1109/ACSSC.2015.7421092,
+0d5824e14593bcb349d636d255ba274f98bbb88f,http://www.researchgate.net/profile/Claus_Neubauer/publication/224716248_A_Variational_Bayesian_Approach_for_Classification_with_Corrupted_Inputs/links/00b7d52dd1f690da64000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383102
+0d8415a56660d3969449e77095be46ef0254a448,http://www.lv-nus.org/papers/2004/2004_C_6.pdf,,https://doi.org/10.1109/TCSVT.2007.893837,
+0dfa460a35f7cab4705726b6367557b9f7842c65,https://arxiv.org/pdf/1504.01561v1.pdf,,,http://doi.acm.org/10.1145/2733373.2806222
+0db371a6bc8794557b1bffc308814f53470e885a,,,https://doi.org/10.1007/s13042-015-0380-3,
+0d3b167b52e9f0bf509e3af003ea320e6070b665,,,,
+0d14261e69a4ad4140ce17c1d1cea76af6546056,http://pdfs.semanticscholar.org/0d14/261e69a4ad4140ce17c1d1cea76af6546056.pdf,,https://doi.org/10.1007/978-3-642-17289-2_13,https://personalpages.manchester.ac.uk/staff/timothy.f.cootes/Projects/Toyota/Downloads/Caunce_ISVC10.pdf
+0dbacb4fd069462841ebb26e1454b4d147cd8e98,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Nikitidis11c.pdf,,https://doi.org/10.1109/ACPR.2011.6166712,http://ibug.doc.ic.ac.uk/media/uploads/documents/acpr2011.pdf
+0db36bf08140d53807595b6313201a7339470cfe,http://www.cfar.umd.edu/~rama/Publications/Shroff_CVPR_2010.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539864
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,https://cs.uwaterloo.ca/~jhoey/papers/DhallICMI16.pdf,,,http://doi.acm.org/10.1145/2993148.2997638
+0d735e7552af0d1dcd856a8740401916e54b7eee,http://pdfs.semanticscholar.org/915f/f5da6658e800eb7ec1c8f3f26281e18d3cbf.pdf,,,http://ivizlab.sfu.ca/arya/Papers/Others/Neural%20Network%20That%20Categorizes%20Facial%20Expressions.pdf
+0d06b3a4132d8a2effed115a89617e0a702c957a,http://arxiv.org/pdf/1605.08680v1.pdf,,https://doi.org/10.1109/IJCNN.2016.7727496,https://arxiv.org/pdf/1605.08680v1.pdf
+0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e,http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf,,https://doi.org/10.1016/j.patrec.2017.03.006,https://arxiv.org/pdf/1602.06149v1.pdf
+0d4d8ce029deead6f2ce7075047aa645299ddd41,,,,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,http://pdfs.semanticscholar.org/0d1d/9a603b08649264f6e3b6d5a66bf1e1ac39d2.pdf,,,http://digitalcommons.unl.edu/cgi/viewcontent.cgi?article=1339&context=usarmyresearch
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/osb_iccv09_cam.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/osb_iccv09_cam.pdf
+95f1790da3d0a4a5310a050512ce355b3c5aac86,,,https://doi.org/10.1109/ICIP.2016.7533142,
+956e9b69b3366ed3e1670609b53ba4a7088b8b7e,http://pdfs.semanticscholar.org/956e/9b69b3366ed3e1670609b53ba4a7088b8b7e.pdf,,,http://yqsong.googlepages.com/article.pdf
+956317de62bd3024d4ea5a62effe8d6623a64e53,https://research-repository.griffith.edu.au/bitstream/handle/10072/17889/47024_1.pdf;jsessionid=2146D7EB83BAD65DE653E0056477D61A?sequence=1,,https://doi.org/10.1109/DICTA.2007.4426825,
+95023e3505263fac60b1759975f33090275768f3,,,,http://doi.acm.org/10.1145/2856767.2856770
+951f21a5671a4cd14b1ef1728dfe305bda72366f,http://pdfs.semanticscholar.org/951f/21a5671a4cd14b1ef1728dfe305bda72366f.pdf,,,https://www.ijsr.net/archive/v3i11/T0NUMTQ4Mjg=.pdf
+95f26d1c80217706c00b6b4b605a448032b93b75,http://pdfs.semanticscholar.org/95f2/6d1c80217706c00b6b4b605a448032b93b75.pdf,,,http://www.researchgate.net/profile/Jin-Xing_Liu/publication/230646031_New_robust_face_recognition_methods_based_on_linear_regression/links/004635249518e588c8000000.pdf
+95f12d27c3b4914e0668a268360948bce92f7db3,http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf,,https://doi.org/10.1007/978-3-642-33712-3_49,http://www.ifp.illinois.edu/~vuongle2/helen/eccv2012_helen_final.pdf
+95aef5184b89daebd0c820c8102f331ea7cae1ad,http://www.dia.fi.upm.es/~pcr/publications/paa2008.pdf,,https://doi.org/10.1007/s10044-007-0084-8,http://www.springerlink.com/content/q075h33723m475k1/fulltext.pdf
+952138ae6534fad573dca0e6b221cdf042a36412,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2005.38
+9547a7bce2b85ef159b2d7c1b73dea82827a449f,http://tdlc.ucsd.edu/research/publications/Wu_Bartlett_Movellan_Facial_Expression_2010.pdf,,https://doi.org/10.1109/CVPRW.2010.5543267,http://mplab.ucsd.edu/~marni/pubs/Wu_CVPR_2010.pdf
+9513503867b29b10223f17c86e47034371b6eb4f,http://pdfs.semanticscholar.org/9513/503867b29b10223f17c86e47034371b6eb4f.pdf,,https://doi.org/10.1007/978-3-642-10520-3_105,https://users.isy.liu.se/en/cvl/zografos/publications/ISVC09.pdf
+955e2a39f51c0b6f967199942d77625009e580f9,http://pdfs.semanticscholar.org/955e/2a39f51c0b6f967199942d77625009e580f9.pdf,,,http://www.cs.bilkent.edu.tr/tech-reports/2010/BU-CE-1012.pdf
+956c634343e49319a5e3cba4f2bd2360bdcbc075,http://www.cse.ust.hk/~jamesk/papers/tsmc06.pdf,,,http://www.csee.wvu.edu/~richas/ML-Papers/incremental%20PCA.pdf
+950bf95da60fd4e77d5159254fed906d5ed5fbcb,,,,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.24
+95d0cd902ff0fa253b6757ba3c8e09ce25b494cc,,,,
+9590b09c34fffda08c8f54faffa379e478f84b04,,,https://doi.org/10.1109/TNNLS.2013.2275170,
+95008358a631a10ee3c24bfa2bf0c39d136a916e,,,,
+958c599a6f01678513849637bec5dc5dba592394,http://pdfs.semanticscholar.org/958c/599a6f01678513849637bec5dc5dba592394.pdf,,,https://arxiv.org/pdf/1710.07455v1.pdf
+95e7cf27a8ee62b63ed9d1ecb02a7016e9a680a6,,,https://doi.org/10.1007/s11063-013-9322-9,
+59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb,http://www.ccvcl.org/~wei/pdf/CNNExpRecog_CamReady.pdf,,https://doi.org/10.1109/MVA.2015.7153185,http://visionlab.engr.ccny.cuny.edu/ccvcl/assets/publications/140/paper/CNNExpRecog-MVA15_0029_Final.pdf
+591a737c158be7b131121d87d9d81b471c400dba,http://affect.media.mit.edu/pdfs/10.McDuff-etal-Affect-2010.pdf,,https://doi.org/10.1109/CVPRW.2010.5543833,
+59690814e916d1c0e7aa9190678ba847cbd0046f,http://figment.cse.usf.edu/~sfefilat/data/papers/ThBCT8.7.pdf,,https://doi.org/10.1109/ICPR.2008.4761296,
+59bfeac0635d3f1f4891106ae0262b81841b06e4,http://pdfs.semanticscholar.org/59bf/eac0635d3f1f4891106ae0262b81841b06e4.pdf,,,http://www.cse.ucsc.edu/~milanfar/publications/journal/TIFS_Revised.pdf
+59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1,http://pdfs.semanticscholar.org/59cd/afed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1.pdf,,,http://rspublication.com/ijca/JUNE12/37.pdf
+590628a9584e500f3e7f349ba7e2046c8c273fcf,http://pdfs.semanticscholar.org/6893/c573d7abd3847d6ea2f0e79b6924ca124372.pdf,,,http://arxiv.org/pdf/1603.06059v3.pdf
+593234ba1d2e16a887207bf65d6b55bbc7ea2247,http://pdfs.semanticscholar.org/73c4/47ea9f75b0ffbdd35c957aed88fe80b2ac07.pdf,,https://doi.org/10.1007/978-3-642-35749-7_2,http://www.researchgate.net/profile/Marcus_Rohrbach/publication/260106935_Combining_Language_Sources_and_Robust_Semantic_Relatedness_for_Attribute-Based_Knowledge_Transfer/links/54fb60070cf270426d0dcbfe.pdf
+5957936195c10521dadc9b90ca9b159eb1fc4871,,,https://doi.org/10.1109/TCE.2016.7838098,
+59fe66eeb06d1a7e1496a85f7ffc7b37512cd7e5,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552862
+59eefa01c067a33a0b9bad31c882e2710748ea24,http://pdfs.semanticscholar.org/59ee/fa01c067a33a0b9bad31c882e2710748ea24.pdf,,,https://arxiv.org/pdf/1708.09580v1.pdf
+59e2037f5079794cb9128c7f0900a568ced14c2a,https://arxiv.org/pdf/1704.02231v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.71
+592370b4c7b58a2a141e507f3a2cc5bbd247a62e,,,https://doi.org/10.1109/IJCNN.2017.7965911,
+59b6ff409ae6f57525faff4b369af85c37a8dd80,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.28
+59f788c69c2ce520fd6f0b80d01aca72f7f8d859,,,,
+59c9d416f7b3d33141cc94567925a447d0662d80,http://pdfs.semanticscholar.org/59c9/d416f7b3d33141cc94567925a447d0662d80.pdf,,,http://people.mpi-inf.mpg.de/~skaraev/master_thesis.pdf
+59bece468ed98397d54865715f40af30221aa08c,https://bib.irb.hr/datoteka/833608.BiForD2016_11.pdf,,https://doi.org/10.1109/MIPRO.2016.7522352,
+59a35b63cf845ebf0ba31c290423e24eb822d245,http://biometrics.cse.msu.edu/Publications/Face/Klumetal_FaceSketchID_TIFS2014.pdf,,https://doi.org/10.1109/TIFS.2014.2360825,http://biometrics.cse.msu.edu/Publications/Face/Klumetal_FaceSketchID_TechReport_MSU-CSE-14-6.pdf
+59f325e63f21b95d2b4e2700c461f0136aecc171,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh13.pdf,,https://doi.org/10.1109/ICIP.2011.6116296,http://www.cbsr.ia.ac.cn/users/scliao/papers/Kang-ICIP-2011-KernelSparseLBP.pdf
+59420fd595ae745ad62c26ae55a754b97170b01f,http://pdfs.semanticscholar.org/5942/0fd595ae745ad62c26ae55a754b97170b01f.pdf,,https://doi.org/10.1007/978-3-642-35749-7_5,http://cs.stanford.edu/groups/vision/pdf/LiSuLimFeiFei_ECCV2010.pdf
+59c21f5a24d0b408d528054b016915236bb85bf2,,,,
+5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,,,https://doi.org/10.1109/ICT.2017.7998256,
+5922e26c9eaaee92d1d70eae36275bb226ecdb2e,http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf,,https://doi.org/10.3233/978-1-61499-578-4-153,http://www.uv.es/grimo/publications/ccia2015b.pdf
+59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Zafeiriou_The_Menpo_Facial_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.263
+5951e9e13ff99f97f301a336f24a14d80459c659,,,https://doi.org/10.1016/j.neucom.2017.09.009,
+59e75aad529b8001afc7e194e21668425119b864,http://pdfs.semanticscholar.org/59e7/5aad529b8001afc7e194e21668425119b864.pdf,,https://doi.org/10.1007/978-3-642-15552-9_55,http://www.cs.drexel.edu/~kon/publication/GOxholm_ECCV10_preprint.pdf
+5990c2e78394388e8a81a4b52baf35c13b22d2c9,,,,
+594ec0a7839885169c65133cfe50164d4cc74b5c,,,,
+59d45281707b85a33d6f50c6ac6b148eedd71a25,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cheng_Rank_Minimization_across_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.77
+59319c128c8ac3c88b4ab81088efe8ae9c458e07,http://pdfs.semanticscholar.org/5931/9c128c8ac3c88b4ab81088efe8ae9c458e07.pdf,,,https://arxiv.org/pdf/1603.04550v1.pdf
+59031a35b0727925f8c47c3b2194224323489d68,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICCV13/SVDL.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.91
+926c67a611824bc5ba67db11db9c05626e79de96,http://www.ee.columbia.edu/ln/dvmm/publications/09/xu_ebsl.pdf,,,http://research.microsoft.com/en-us/UM/people/stevelin/papers/pami09xu.pdf
+923ede53b0842619831e94c7150e0fc4104e62f7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001293.pdf,,https://doi.org/10.1109/ICASSP.2016.7471885,
+9227c1a5b26556b9c34015b3ea5f9ae5f50e9b23,,,https://doi.org/10.1109/FCV.2015.7103729,
+9264b390aa00521f9bd01095ba0ba4b42bf84d7e,http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf,,https://doi.org/10.1007/978-3-642-33715-4_16,http://web.unbc.ca/~chenl/papers-new/ECCVPaper1208.pdf
+920a92900fbff22fdaaef4b128ca3ca8e8d54c3e,http://pdfs.semanticscholar.org/920a/92900fbff22fdaaef4b128ca3ca8e8d54c3e.pdf,,,http://infoscience.epfl.ch/record/163450/files/ParametricPTM.pdf
+9215d36c501d6ee57d74c1eeb1475efd800d92d3,,,,
+9277f1c5161bb41d4ed808c83d53509c8a1a2bdd,,,,
+924b14a9e36d0523a267293c6d149bca83e73f3b,http://pdfs.semanticscholar.org/924b/14a9e36d0523a267293c6d149bca83e73f3b.pdf,,,http://www.psychnology.org/File/PNJ5(2)/PSYCHNOLOGY_JOURNAL_5_2_AOYAMA.pdf
+9282239846d79a29392aa71fc24880651826af72,http://pdfs.semanticscholar.org/9282/239846d79a29392aa71fc24880651826af72.pdf,,https://doi.org/10.1186/1687-5281-2014-14,http://ibug.doc.ic.ac.uk/media/uploads/documents/antonakos_classification_2014.pdf
+92115b620c7f653c847f43b6c4ff0470c8e55dab,http://pdfs.semanticscholar.org/a77c/798d06060ece81c620458e4586819e75ae15.pdf,,https://doi.org/10.1007/978-3-319-10602-1_27,https://lmb.informatik.uni-freiburg.de/Publications/2014/DB14/ECCV_BenjaminDrayer.pdf
+92c4636962b719542deb984bd2bf75af405b574c,http://www.umiacs.umd.edu/~arijit/projects/Active_clustering/active_clustering_ijcv.pdf,,https://doi.org/10.1007/s11263-013-0680-6,http://www.umiacs.umd.edu/~arijit/active_clustering_ijcv.pdf
+92c2dd6b3ac9227fce0a960093ca30678bceb364,https://aran.library.nuigalway.ie/bitstream/handle/10379/1350/On%20color%20texture%20normalization%20for%20active%20appearance%20models.pdf?isAllowed=y&sequence=1,,https://doi.org/10.1109/TIP.2009.2017163,http://www.researchgate.net/profile/Peter_Corcoran/publication/24311132_On_color_texture_normalization_for_active_appearance_models/links/02bfe5118f5300f4dc000000.pdf
+9255d3b2bfee4aaae349f68e67c76a077d2d07ad,,,https://doi.org/10.1109/TIP.2017.2713041,
+922838dd98d599d1d229cc73896d55e7a769aa7c,http://www.cs.umass.edu/~elm/papers/HuangCVPR12.pdf,,,http://cs.umass.edu/~elm/papers/HuangCVPR12.pdf
+9294739e24e1929794330067b84f7eafd286e1c8,http://pdfs.semanticscholar.org/9294/739e24e1929794330067b84f7eafd286e1c8.pdf,,https://doi.org/10.1007/11573548_2,https://static.aminer.org/pdf/PDF/000/018/706/expression_recognition_using_elastic_graph_matching.pdf
+92fada7564d572b72fd3be09ea3c39373df3e27c,http://pdfs.semanticscholar.org/b8a4/f51a85fb801e1a5f04c213725d60133233a0.pdf,,https://doi.org/10.1016/j.patrec.2004.05.013,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Patrec_ICA_Face.pdf
+927ad0dceacce2bb482b96f42f2fe2ad1873f37a,http://pdfs.semanticscholar.org/927a/d0dceacce2bb482b96f42f2fe2ad1873f37a.pdf,,,http://cdn.intechopen.com/pdfs/10204/InTech-Interest_point_based_face_recognition_system.pdf
+92de9a54515f4ac8cc8e4e6b0dfab20e5e6bb09d,,,https://doi.org/10.1109/ICIP.2016.7533062,
+929bd1d11d4f9cbc638779fbaf958f0efb82e603,http://pdfs.semanticscholar.org/929b/d1d11d4f9cbc638779fbaf958f0efb82e603.pdf,,https://doi.org/10.1007/978-3-642-17534-3_72,http://www.researchgate.net/profile/Dian_Tjondronegoro/publication/221139535_Improving_the_Performance_of_Facial_Expression_Recognition_Using_Dynamic_Subtle_and_Regional_Features/links/00b7d51f84badba76c000000.pdf
+9213a415d798426c8d84efc6d2a69a2cbfa2af84,,,https://doi.org/10.1016/j.cviu.2013.03.008,
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,http://cs.nju.edu.cn/_upload/tpl/01/0b/267/template267/zhouzh.files/publication/aaai10LLD.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI10/paper/download/1626/2018
+0c435e7f49f3e1534af0829b7461deb891cf540a,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Capturing_Global_Semantic_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.410
+0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf,http://pdfs.semanticscholar.org/0cb7/e4c2f6355c73bfc8e6d5cdfad26f3fde0baf.pdf,,,http://aircconline.com/ijaia/V5N3/5314ijaia01.pdf
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,http://humansensing.cs.cmu.edu/sites/default/files/15parda.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539925
+0c378c8dcf707145e1e840a9951519d4176a301f,,,https://doi.org/10.1109/ICARCV.2010.5707434,
+0ccc535d12ad2142a8310d957cc468bbe4c63647,http://arxiv.org/pdf/1510.03979v1.pdf,,,http://wangzheallen.github.io/papers/WangWGQ_ChaLearnLAP15_slide.pdf
+0c8a0a81481ceb304bd7796e12f5d5fa869ee448,http://pdfs.semanticscholar.org/0c8a/0a81481ceb304bd7796e12f5d5fa869ee448.pdf,,https://doi.org/10.5391/IJFIS.2010.10.2.095,http://ocean.kisti.re.kr/downfile/volume/kfis/E1FLA5/2010/v10n2/E1FLA5_2010_v10n2_95.pdf
+0c36c988acc9ec239953ff1b3931799af388ef70,http://pdfs.semanticscholar.org/0c36/c988acc9ec239953ff1b3931799af388ef70.pdf,,,https://arxiv.org/pdf/1802.02142v1.pdf
+0c5ddfa02982dcad47704888b271997c4de0674b,http://pdfs.semanticscholar.org/0c5d/dfa02982dcad47704888b271997c4de0674b.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/11949/Gopalan_umd_0117E_12506.pdf?isAllowed=y&sequence=1
+0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1,http://faculty.iiit.ac.in/~anoop/papers/Vijay2014Face.pdf,,https://doi.org/10.1109/ICPR.2014.61,
+0cccf576050f493c8b8fec9ee0238277c0cfd69a,http://pdfs.semanticscholar.org/0ccc/f576050f493c8b8fec9ee0238277c0cfd69a.pdf,,,https://arxiv.org/pdf/1704.01358v1.pdf
+0cdb49142f742f5edb293eb9261f8243aee36e12,https://arxiv.org/pdf/1303.2783v1.pdf,,,http://conradsanderson.id.au/pdfs/sanderson_salient_local_descriptors_avss_2012.pdf
+0c069a870367b54dd06d0da63b1e3a900a257298,http://pdfs.semanticscholar.org/cdb8/36785579a4ea3d0eff26dbba8cf845a347d2.pdf,,https://doi.org/10.1007/978-3-642-21738-8_2,http://hal.inria.fr/docs/00/60/96/81/PDF/mrbm_heess.pdf
+0c75c7c54eec85e962b1720755381cdca3f57dfb,https://webpages.uncc.edu/~szhang16/paper/PAMI_face_landmark.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2509999
+0c65226edb466204189b5aec8f1033542e2c17aa,,,https://doi.org/10.1109/ICIP.2017.8296997,
+0c167008408c301935bade9536084a527527ec74,http://www.micc.unifi.it/publications/2006/BDN06/bertini_nunziati-mm06.pdf,,,http://doi.acm.org/10.1145/1180639.1180778
+0c1d85a197a1f5b7376652a485523e616a406273,http://openaccess.thecvf.com/content_cvpr_2017/papers/Hayat_Joint_Registration_and_CVPR_2017_paper.pdf,,,http://staff.estem-uc.edu.au/munawar/files/2017/05/CVPR17.pdf
+0ca66283f4fb7dbc682f789fcf6d6732006befd5,http://pdfs.semanticscholar.org/0ca6/6283f4fb7dbc682f789fcf6d6732006befd5.pdf,,,http://www.rci.rutgers.edu/~tw268/pubs/spie2015_activeDL.pdf
+0c7f27d23a162d4f3896325d147f412c40160b52,http://pdfs.semanticscholar.org/0c7f/27d23a162d4f3896325d147f412c40160b52.pdf,,,http://www.cs.cmu.edu/~ILIM/publications/PDFs/N-THESIS03.pdf
+0c20fd90d867fe1be2459223a3cb1a69fa3d44bf,http://pdfs.semanticscholar.org/0c20/fd90d867fe1be2459223a3cb1a69fa3d44bf.pdf,,https://doi.org/10.1007/978-3-642-40602-7_11,http://gravis.cs.unibas.ch/publications/2013/GCPR-Schoenborn2013.pdf
+0c247ac797a5d4035469abc3f9a0a2ccba49f4d8,,,https://doi.org/10.1109/ICMLC.2011.6016715,
+0cf1287c8fd41dcef4ac03ebeab20482f02dce20,,,https://doi.org/10.1109/MSN.2016.032,
+0c2370e156a4eb8d84a5fdb049c5a894c3431f1c,,,https://doi.org/10.1109/CIBIM.2014.7015437,
+0c2875bb47db3698dbbb3304aca47066978897a4,http://slazebni.cs.illinois.edu/publications/iccv17_situation.pdf,,,https://arxiv.org/pdf/1703.06233v2.pdf
+0c3f7272a68c8e0aa6b92d132d1bf8541c062141,http://pdfs.semanticscholar.org/0c3f/7272a68c8e0aa6b92d132d1bf8541c062141.pdf,,,https://pdfs.semanticscholar.org/0c3f/7272a68c8e0aa6b92d132d1bf8541c062141.pdf
+0cbc4dcf2aa76191bbf641358d6cecf38f644325,http://pdfs.semanticscholar.org/0cbc/4dcf2aa76191bbf641358d6cecf38f644325.pdf,,,http://niclane.org/pubs/visage.pdf
+0c0db39cac8cb76b52cfdbe10bde1c53d68d202f,,,,http://doi.acm.org/10.1145/3123266.3123334
+0c1314d98bb6b99af00817644c1803dbc0fb5ff5,,,,http://doi.ieeecomputersociety.org/10.1109/BigMM.2015.29
+0ce8a45a77e797e9d52604c29f4c1e227f604080,http://pdfs.semanticscholar.org/0ce8/a45a77e797e9d52604c29f4c1e227f604080.pdf,,,http://airccse.org/journal/ijcseit/papers/3613ijcseit01.pdf
+0ce3a786aed896d128f5efdf78733cc675970854,http://pdfs.semanticscholar.org/3689/2b6bb4848a9c21158b8eded7f14a6654dd7e.pdf,,https://doi.org/10.1007/978-3-319-10593-2_9,http://luchaochao.me/papers/LearnedBayesian.pdf
+0cc96359b1edba28d33fe9e663079c5674744672,,,,
+0c93cb1af3bba1bd90a03e921ff2d55acf35c01f,http://www.researchgate.net/profile/Mohammed_Bennamoun/publication/220928947_Robust_Regression_for_Face_Recognition/links/542157f20cf203f155c65a23.pdf,,https://doi.org/10.1016/j.patcog.2011.07.003,https://www.researchgate.net/profile/Mohammed_Bennamoun/publication/220928947_Robust_Regression_for_Face_Recognition/links/542157f20cf203f155c65a23.pdf
+0cf7741e1fdb11a77cdf39b4dda8c65a62af4f23,http://vipl.ict.ac.cn/sites/default/files/papers/files/2013_TIP_mnkan_Learning%20Prototype%20Hyperplanes%20for%20Face%20Verification%20in%20the%20Wild.pdf,,https://doi.org/10.1109/TIP.2013.2256918,
+0c54e9ac43d2d3bab1543c43ee137fc47b77276e,http://pdfs.semanticscholar.org/0c54/e9ac43d2d3bab1543c43ee137fc47b77276e.pdf,,https://doi.org/10.1016/j.image.2016.06.004,http://arxiv.org/abs/1606.02792
+0c5afb209b647456e99ce42a6d9d177764f9a0dd,http://pdfs.semanticscholar.org/49ee/5e1f1cfa45aa105e4120e6b7fb5b14cc2877.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/fulltext/2001/Tian_Recognizing.pdf
+0c59071ddd33849bd431165bc2d21bbe165a81e0,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Oh_Person_Recognition_in_ICCV_2015_paper.pdf,,,http://arxiv.org/abs/1509.03502
+0c377fcbc3bbd35386b6ed4768beda7b5111eec6,http://www.ecse.rpi.edu/~qji/Papers/face_exp_pami.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.293
+0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhao_Memory-Augmented_Attribute_Manipulation_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.652
+0cb2dd5f178e3a297a0c33068961018659d0f443,http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.87
+0cd8895b4a8f16618686f622522726991ca2a324,http://pdfs.semanticscholar.org/0cd8/895b4a8f16618686f622522726991ca2a324.pdf,,https://doi.org/10.1007/11864349_65,https://infoscience.epfl.ch/record/91005/files/Antonini2006_1495.pdf
+0cf7da0df64557a4774100f6fde898bc4a3c4840,https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/shape/berg-cvpr05.pdf,,,http://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/berg-cvpr-05.pdf
+0cbe059c181278a373292a6af1667c54911e7925,http://pdfs.semanticscholar.org/ea4e/15a4cf256599d11291040ad5e487f55ae514.pdf,,https://doi.org/10.1049/iet-cvi.2015.0296,https://arxiv.org/pdf/1508.04028v1.pdf
+0c4659b35ec2518914da924e692deb37e96d6206,https://cs.uwaterloo.ca/~jhoey/teaching/cs793/papers/OrchardTIP10.pdf,,https://doi.org/10.1109/TIP.2009.2039371,https://cs.uwaterloo.ca/~mannr/papers/OrchardMann2010.pdf
+0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc,http://pdfs.semanticscholar.org/0c6e/29d82a5a080dc1db9eeabbd7d1529e78a3dc.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2003.1211408
+0c66d6162695ecbfc248074f58ced10d70a359ac,,,,
+0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae,http://pdfs.semanticscholar.org/0ced/7b814ec3bb9aebe0fcf0cac3d78f36361eae.pdf,,,http://ijcsmc.com/docs/papers/January2017/V6I1201739.pdf
+0c6a18b0cee01038eb1f9373c369835b236373ae,,,https://doi.org/10.1007/s11042-017-4359-9,
+0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d,https://arxiv.org/pdf/1609.00153v1.pdf,,https://doi.org/10.1109/TIP.2017.2666739,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01377.pdf
+0c60eebe10b56dbffe66bb3812793dd514865935,http://arxiv.org/pdf/1502.07209.pdf,,,http://www.yugangjiang.info/publication/15VideoDNN-arXiv.pdf
+0c05f60998628884a9ac60116453f1a91bcd9dda,http://pdfs.semanticscholar.org/7b19/80d4ac1730fd0145202a8cb125bf05d96f01.pdf,,,http://arxiv.org/abs/1610.05377
+660b73b0f39d4e644bf13a1745d6ee74424d4a16,http://pdfs.semanticscholar.org/660b/73b0f39d4e644bf13a1745d6ee74424d4a16.pdf,,,https://cdn.intechopen.com/pdfs-wm/17177.pdf
+66d512342355fb77a4450decc89977efe7e55fa2,http://pdfs.semanticscholar.org/66d5/12342355fb77a4450decc89977efe7e55fa2.pdf,,,https://openreview.net/pdf?id=SJzmJEq6W
+66aad5b42b7dda077a492e5b2c7837a2a808c2fa,http://pdfs.semanticscholar.org/66aa/d5b42b7dda077a492e5b2c7837a2a808c2fa.pdf,,https://doi.org/10.1007/11608288_20,http://www.researchgate.net/profile/Zhong_Jin3/publication/221383625_A_Novel_PCA-Based_Bayes_Classifier_and_Face_Analysis/links/02bfe5102998036ea9000000.pdf
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,http://pdfs.semanticscholar.org/f3ec/7e58da49f39b807ff1c98d0bf574ef5f0720.pdf,,,http://www.ri.cmu.edu/pub_files/pub3/tian_ying_li_2002_1/tian_ying_li_2002_1.pdf
+6643a7feebd0479916d94fb9186e403a4e5f7cbf,http://pdfs.semanticscholar.org/6643/a7feebd0479916d94fb9186e403a4e5f7cbf.pdf,,,http://staffhome.ecm.uwa.edu.au/~00053650/papers/chapter8-3Dface.pdf
+66ec085c362f698b40d6e0e7b10629462280c062,,,https://doi.org/10.1109/ICARCV.2004.1468855,
+66dcd855a6772d2731b45cfdd75f084327b055c2,http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf,,,https://arxiv.org/pdf/1801.06445v1.pdf
+66ebb070ea8de63afa11cc856fe2754ea39a93ff,,,,
+661c78a0e2b63cbdb9c20dcf89854ba029b6bc87,,,https://doi.org/10.1109/ICIP.2014.7025093,
+666939690c564641b864eed0d60a410b31e49f80,http://pdfs.semanticscholar.org/6669/39690c564641b864eed0d60a410b31e49f80.pdf,,https://doi.org/10.1007/978-3-319-16865-4_16,http://research.microsoft.com/en-US/people/xjwang/accv2014finalpaper.pdf
+66330846a03dcc10f36b6db9adf3b4d32e7a3127,http://pdfs.semanticscholar.org/6633/0846a03dcc10f36b6db9adf3b4d32e7a3127.pdf,,,http://www.aifb.kit.edu/images/8/87/ECML-PKDD-Doctoral-Camera-Ready.pdf
+6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c,http://openaccess.thecvf.com/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf
+666300af8ffb8c903223f32f1fcc5c4674e2430b,http://pdfs.semanticscholar.org/6663/00af8ffb8c903223f32f1fcc5c4674e2430b.pdf,,,http://arxiv.org/abs/1703.07920
+66029f1be1a5cee9a4e3e24ed8fcb65d5d293720,http://pdfs.semanticscholar.org/6602/9f1be1a5cee9a4e3e24ed8fcb65d5d293720.pdf,,,http://www.bmva.org/bmvc/2010/conference/paper58/paper58.pdf
+6691dfa1a83a04fdc0177d8d70e3df79f606b10f,http://pdfs.semanticscholar.org/6691/dfa1a83a04fdc0177d8d70e3df79f606b10f.pdf,,,http://www.cbsr.ia.ac.cn/Li%20Group/papers/FaceLighting.pdf
+66f4d7c381bd1798703977de2e38b696c6641b77,,,https://doi.org/10.1109/FSKD.2015.7382360,
+6688b2b1c1162bc00047075005ec5c7fca7219fd,,,https://doi.org/10.1109/SACI.2013.6608958,
+66a2c229ac82e38f1b7c77a786d8cf0d7e369598,http://pdfs.semanticscholar.org/66a2/c229ac82e38f1b7c77a786d8cf0d7e369598.pdf,,,http://arxiv.org/abs/1604.08524
+66886997988358847615375ba7d6e9eb0f1bb27f,https://pdfs.semanticscholar.org/6688/6997988358847615375ba7d6e9eb0f1bb27f.pdf,,https://doi.org/10.1109/TCYB.2014.2376934,http://www.kinfacew.com/papers/PDFL_TCYB15.pdf
+66a9935e958a779a3a2267c85ecb69fbbb75b8dc,http://pdfs.semanticscholar.org/66a9/935e958a779a3a2267c85ecb69fbbb75b8dc.pdf,,,http://arxiv.org/abs/1503.03004
+66533107f9abdc7d1cb8f8795025fc7e78eb1122,http://pdfs.semanticscholar.org/6653/3107f9abdc7d1cb8f8795025fc7e78eb1122.pdf,,https://doi.org/10.1109/ROBOT.2001.933187,http://www.researchgate.net/profile/Daejin_Kim2/publication/3902643_Visual_servoing_for_a_user's_mouth_with_effective_intention_reading_in_a_wheelchair-based_robotic_arm/links/00b4951f2a22fa3894000000.pdf
+66810438bfb52367e3f6f62c24f5bc127cf92e56,http://pdfs.semanticscholar.org/6681/0438bfb52367e3f6f62c24f5bc127cf92e56.pdf,,https://doi.org/10.4304/jmm.9.1.83-91,http://ojs.academypublisher.com/index.php/jmm/article/download/jmm09018391/8389
+66af2afd4c598c2841dbfd1053bf0c386579234e,http://www.ics.uci.edu/~dvk/pub/J17_IJMIR14_Liyan.pdf,,https://doi.org/10.1007/s13735-014-0052-1,
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,http://pdfs.semanticscholar.org/66f0/2fbcad13c6ee5b421be2fc72485aaaf6fcb5.pdf,,,https://www.cs.rit.edu/~reu/products/2016/papers/P5_AliyaGangjiTrevorWaldenetal_2017HAAI(AAAI)_paper.pdf
+66e9fb4c2860eb4a15f713096020962553696e12,http://pdfs.semanticscholar.org/d42f/8e7283b20b89f55f8d36efcb1d8e2b774167.pdf,,,https://arxiv.org/pdf/1706.09308v1.pdf
+66e6f08873325d37e0ec20a4769ce881e04e964e,http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf,,https://doi.org/10.1007/s11263-013-0695-z,https://cs.brown.edu/people/gen/pub_papers/sun_attributes_ijcv.pdf
+661da40b838806a7effcb42d63a9624fcd684976,http://pdfs.semanticscholar.org/661d/a40b838806a7effcb42d63a9624fcd684976.pdf,,,http://hrcak.srce.hr/file/89702
+66886f5af67b22d14177119520bd9c9f39cdd2e6,http://pdfs.semanticscholar.org/6688/6f5af67b22d14177119520bd9c9f39cdd2e6.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper098/index.html
+6622776d1696e79223f999af51e3086ba075dbd1,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019454
+3edb0fa2d6b0f1984e8e2c523c558cb026b2a983,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/tpami07.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70733
+3e69ed088f588f6ecb30969bc6e4dbfacb35133e,http://pdfs.semanticscholar.org/3e69/ed088f588f6ecb30969bc6e4dbfacb35133e.pdf,,,http://searchdl.org/public/journals/2011/IJIT/1/2/165.pdf
+3e01f2fefe219bfeb112f1d82e76ebba4c0e2aac,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836097
+3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07,http://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf,,,http://www.ee.oulu.fi/mvg/files/pdf/pdf_545.pdf
+3ebb0209d5e99b22c67e425a67a959f4db8d1f47,,,https://doi.org/10.1109/ICDAR.2017.173,
+3ee7a8107a805370b296a53e355d111118e96b7c,http://pdfs.semanticscholar.org/3ee7/a8107a805370b296a53e355d111118e96b7c.pdf,,,http://people.ee.duke.edu/~lcarin/gLASSO5.pdf
+3ebce6710135d1f9b652815e59323858a7c60025,http://pdfs.semanticscholar.org/3ebc/e6710135d1f9b652815e59323858a7c60025.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2001.990537
+3ec05713a1eed6fa9b57fef718f369f68bbbe09f,http://pdfs.semanticscholar.org/3ec0/5713a1eed6fa9b57fef718f369f68bbbe09f.pdf,,https://doi.org/10.1016/j.patrec.2016.01.025,https://core.ac.uk/download/pdf/34657700.pdf
+3e0035b447d0d4e11ceda45936c898256f321382,,,https://doi.org/10.1109/BMEI.2014.7002762,
+3e3f305dac4fbb813e60ac778d6929012b4b745a,http://pdfs.semanticscholar.org/3e3f/305dac4fbb813e60ac778d6929012b4b745a.pdf,,,https://arxiv.org/pdf/1405.7545v1.pdf
+3e1190655cc7c1159944d88bdbe591b53f48d761,,,https://doi.org/10.1007/s10489-013-0464-2,
+3ea8a6dc79d79319f7ad90d663558c664cf298d4,http://pdfs.semanticscholar.org/3ea8/a6dc79d79319f7ad90d663558c664cf298d4.pdf,,,http://www.ifp.uiuc.edu/~iracohen/publications/IraCohenMSThesis.pdf
+3e4f84ce00027723bdfdb21156c9003168bc1c80,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2011/papers/1569427521.pdf,http://ieeexplore.ieee.org/document/7074232/,,http://www.eurecom.fr/en/publication/3408/download/mm-publi-3408.pdf
+3e04feb0b6392f94554f6d18e24fadba1a28b65f,http://pdfs.semanticscholar.org/b72c/5119c0aafa64f32e8e773638b5738f31b33c.pdf,,,http://webhost.uoradea.ro/ibuciu/buciu-pitas-chapter.pdf
+3ed46ef5344927a30d71089ae203c9a9e35e4977,,,,
+3e452ca67e17e4173ec8dfbd4a2b803ad2ee5a48,,,,http://doi.ieeecomputersociety.org/10.1109/WF-IoT.2016.7845505
+3e685704b140180d48142d1727080d2fb9e52163,http://pdfs.semanticscholar.org/3e68/5704b140180d48142d1727080d2fb9e52163.pdf,,,http://arxiv.org/abs/1705.04641
+3e51d634faacf58e7903750f17111d0d172a0bf1,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924869.pdf,http://ieeexplore.ieee.org/document/6952589/,,
+3ec860cfbd5d953f29c43c4e926d3647e532c8b0,,,https://doi.org/10.1109/TCSVT.2008.924108,
+3e0377af0087b9b836bf6d95bc1c7085dfde4897,,,,http://doi.acm.org/10.1145/2671188.2749320
+3e7070323bca6106f19bea4c97ef67bd6249cb5d,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477448
+3e03d19b950edadc74ca047dec86227282eccf71,,,https://doi.org/10.1109/ACCESS.2017.2777003,
+3e687d5ace90c407186602de1a7727167461194a,http://pdfs.semanticscholar.org/3e68/7d5ace90c407186602de1a7727167461194a.pdf,,,http://iris.sel.eesc.usp.br/wvc/Anais_WVC2012/pdf/97140.pdf
+3e3a87eb24628ab075a3d2bde3abfd185591aa4c,http://pdfs.semanticscholar.org/3e3a/87eb24628ab075a3d2bde3abfd185591aa4c.pdf,,,https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2011-27.pdf
+3edc43e336be075dca77c7e173b555b6c14274d8,http://pdfs.semanticscholar.org/3edc/43e336be075dca77c7e173b555b6c14274d8.pdf,,https://doi.org/10.1016/j.jvcir.2010.10.008,http://www.cs.ccu.edu.tw/~wtchu/papers/2011JVCI-chu.pdf
+3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f,http://pdfs.semanticscholar.org/bca7/c0a8c5b0503a4ee43f3561f540918071aaa3.pdf,,,http://calvin.inf.ed.ac.uk/wp-content/uploads/Publications/gonzalez18cvpr.pdf
+5040f7f261872a30eec88788f98326395a44db03,http://pdfs.semanticscholar.org/5040/f7f261872a30eec88788f98326395a44db03.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/bmvc_final.pdf
+50f0c495a214b8d57892d43110728e54e413d47d,http://pdfs.semanticscholar.org/50f0/c495a214b8d57892d43110728e54e413d47d.pdf,,,http://jmlr.org/papers/volume13/brunner12a/brunner12a.pdf
+505e5fe9e897ddbddcf4edab8c8a97d5e56e9d8d,,,,
+501096cca4d0b3d1ef407844642e39cd2ff86b37,http://pdfs.semanticscholar.org/5010/96cca4d0b3d1ef407844642e39cd2ff86b37.pdf,,https://doi.org/10.1007/978-3-642-16687-7_58,https://pdfs.semanticscholar.org/5010/96cca4d0b3d1ef407844642e39cd2ff86b37.pdf
+500fbe18afd44312738cab91b4689c12b4e0eeee,http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf,,https://doi.org/10.1109/IJCNN.2015.7280614,
+501eda2d04b1db717b7834800d74dacb7df58f91,http://pdfs.semanticscholar.org/501e/da2d04b1db717b7834800d74dacb7df58f91.pdf,,,https://estudogeral.sib.uc.pt/bitstream/10316/40573/1/Discriminative%20Sparse%20Representation%20for%20Expression%20Recognition%20in%20Natural%20Images.pdf
+5083c6be0f8c85815ead5368882b584e4dfab4d1,http://pdfs.semanticscholar.org/5083/c6be0f8c85815ead5368882b584e4dfab4d1.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/hac.pdf
+50ce3f8744c219871fbdcab1342d49d589f2626b,http://www.public.asu.edu/~jye02/Publications/Papers/AML_cvpr07.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383103
+500b92578e4deff98ce20e6017124e6d2053b451,http://eprints.eemcs.utwente.nl/25818/01/Pantic_Incremental_Face_Alignment_in_the_Wild.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Asthana_Incremental_Face_Alignment_2014_CVPR_paper.pdf
+503b6a490c2b24b9d2aaf642a0fdaf797a8cdb99,,,https://doi.org/10.1109/ACCESS.2017.2733718,
+504d2675da7a56a36386568ee668938df6d82bbe,,,https://doi.org/10.1109/TCSVT.2016.2539604,
+5058a7ec68c32984c33f357ebaee96c59e269425,http://pdfs.semanticscholar.org/5058/a7ec68c32984c33f357ebaee96c59e269425.pdf,,https://doi.org/10.1007/978-3-319-13737-7_12,http://www.hertasecurity.com/wp-content/uploads/2014/09/ICPRw14_CR_send1.pdf
+50ff21e595e0ebe51ae808a2da3b7940549f4035,http://export.arxiv.org/pdf/1710.02985,,https://doi.org/10.1109/ACCESS.2017.2761849,http://arxiv.org/abs/1710.02985
+5042b358705e8d8e8b0655d07f751be6a1565482,http://pdfs.semanticscholar.org/5042/b358705e8d8e8b0655d07f751be6a1565482.pdf,,,http://www.ermt.net/docs/papers/Volume_4/8_August2015/V4N8-162.pdf
+50e47857b11bfd3d420f6eafb155199f4b41f6d7,http://pdfs.semanticscholar.org/50e4/7857b11bfd3d420f6eafb155199f4b41f6d7.pdf,,,http://ij3c.ncuteecs.org/volume/paperfile/2-1/IJ3C_8.pdf
+50614ff325f0c8ca20f99efc55d65a8d4cc768cd,http://www.genizah.org/professionalPapers/IJCinGeniza.pdf,,https://doi.org/10.1007/s11263-010-0389-8,http://www.cs.tau.ac.il/~wolf/papers/genizahijcv.pdf
+50eb75dfece76ed9119ec543e04386dfc95dfd13,https://lirias.kuleuven.be/bitstream/123456789/197359/1/boiy-learningVisual.pdf,,,http://doi.ieeecomputersociety.org/10.1109/DEXA.2008.59
+5050807e90a925120cbc3a9cd13431b98965f4b9,http://pdfs.semanticscholar.org/5050/807e90a925120cbc3a9cd13431b98965f4b9.pdf,,https://doi.org/10.1007/978-3-642-33885-4_7,http://cs-people.bu.edu/shugaoma/eccv2012_PnA_shugao.pdf
+502d30c5eac92c7db587d85d080343fbd9bc469e,,,https://doi.org/10.1109/TIFS.2016.2538744,
+50333790dd98c052dfafe1f9bf7bf8b4fc9530ba,,,https://doi.org/10.1109/ICIP.2015.7351001,
+5039834df68600a24e7e8eefb6ba44a5124e67fc,,,https://doi.org/10.1109/ICIP.2013.6738761,
+50eb2ee977f0f53ab4b39edc4be6b760a2b05f96,http://ajbasweb.com/old/ajbas/2017/April/1-11.pdf,,,
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,http://pdfs.semanticscholar.org/50e4/5e9c55c9e79aaae43aff7d9e2f079a2d787b.pdf,,,
+5003754070f3a87ab94a2abb077c899fcaf936a6,http://pdfs.semanticscholar.org/5003/754070f3a87ab94a2abb077c899fcaf936a6.pdf,,,https://www.cs.umd.edu/sites/default/files/scholarly_papers/HyunjongCho.pdf
+503c0b83c64878eddec6f71798b7877f2ae1967e,,,,
+501076313de90aca7848e0249e7f0e7283d669a1,,,https://doi.org/10.1109/SOCPAR.2014.7007987,
+503db524b9a99220d430e741c44cd9c91ce1ddf8,http://pdfs.semanticscholar.org/503d/b524b9a99220d430e741c44cd9c91ce1ddf8.pdf,,,https://arxiv.org/pdf/1703.09913v2.pdf
+50d15cb17144344bb1879c0a5de7207471b9ff74,http://pdfs.semanticscholar.org/50d1/5cb17144344bb1879c0a5de7207471b9ff74.pdf,,,http://vision.cs.utexas.edu/projects/resistshare/book_chapter.pdf
+5039b2081eb3c8efbf9e96fd27775731f38f6fc7,,,,
+50d961508ec192197f78b898ff5d44dc004ef26d,http://pdfs.semanticscholar.org/50d9/61508ec192197f78b898ff5d44dc004ef26d.pdf,,,http://airccse.org/journal/jcsit/1109s5.pdf
+50ccc98d9ce06160cdf92aaf470b8f4edbd8b899,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Qu_Towards_Robust_Cascaded_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301348
+5028c0decfc8dd623c50b102424b93a8e9f2e390,http://pdfs.semanticscholar.org/5028/c0decfc8dd623c50b102424b93a8e9f2e390.pdf,,,https://openreview.net/pdf?id=SJkXfE5xx
+505e55d0be8e48b30067fb132f05a91650666c41,http://pdfs.semanticscholar.org/505e/55d0be8e48b30067fb132f05a91650666c41.pdf,,,http://www.eurecom.fr/util/publidownload.fr.htm?id=1290
+507c9672e3673ed419075848b4b85899623ea4b0,http://pdfs.semanticscholar.org/507c/9672e3673ed419075848b4b85899623ea4b0.pdf,,,http://face.cs.kit.edu/download/thesis/da-hesse.pdf
+50c0de2cccf7084a81debad5fdb34a9139496da0,http://pdfs.semanticscholar.org/50c0/de2cccf7084a81debad5fdb34a9139496da0.pdf,,https://doi.org/10.3389/fict.2016.00027,
+681d222f91b12b00e9a4217b80beaa11d032f540,,,https://doi.org/10.1007/s10044-015-0493-z,
+680d662c30739521f5c4b76845cb341dce010735,http://people.cs.umass.edu/~smaji/papers/maji15part.pdf,,https://doi.org/10.1007/s11263-014-0716-6,https://people.cs.umass.edu/~smaji/papers/maji15part.pdf
+68f89c1ee75a018c8eff86e15b1d2383c250529b,http://pdfs.semanticscholar.org/68f8/9c1ee75a018c8eff86e15b1d2383c250529b.pdf,,,http://www.clsp.jhu.edu/vfsrv/workshops/ws10/documents/loavhat2010report.pdf
+68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5,http://pdfs.semanticscholar.org/68a2/ee5c5b76b6feeb3170aaff09b1566ec2cdf5.pdf,,,http://www.enggjournals.com/ijcse/doc/IJCSE13-05-10-039.pdf
+68a3f12382003bc714c51c85fb6d0557dcb15467,http://research.microsoft.com/pubs/217884/ZitnickSent2SceneICCV13.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zitnick_Learning_the_Visual_2013_ICCV_paper.pdf
+6859b891a079a30ef16f01ba8b85dc45bd22c352,http://pdfs.semanticscholar.org/6859/b891a079a30ef16f01ba8b85dc45bd22c352.pdf,,,http://www.ijetae.com/files/Volume4Issue10/IJETAE_1014_67.pdf
+68003e92a41d12647806d477dd7d20e4dcde1354,http://pdfs.semanticscholar.org/db86/41ed047da4a90d53414edfe126c845141d69.pdf,,,http://ictactjournals.in/paper/IJIVP_V4_I2_Paper_4_695_701.pdf
+68d4056765c27fbcac233794857b7f5b8a6a82bf,http://pdfs.semanticscholar.org/68d4/056765c27fbcac233794857b7f5b8a6a82bf.pdf,,https://doi.org/10.1007/978-3-540-76631-5_72,http://biblioteca.cinvestav.mx/indicadores/texto_completo/cinvestav/2007/138375_1.pdf
+68c5b4d9ce2a0c75ba515870923a4bd1b7d8f9b5,,,https://doi.org/10.1109/CISP-BMEI.2017.8301919,
+68996c28bc050158f025a17908eb4bc805c3ee55,https://www.researchgate.net/profile/M_Yeasin/publication/4082331_From_facial_expression_to_level_of_interest_a_spatio-temporal_approach/links/54983d0a0cf2519f5a1dda62.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2004.112
+68c5238994e3f654adea0ccd8bca29f2a24087fc,http://web.fsktm.um.edu.my/~cschan/doc/ICIP2013.pdf,,https://doi.org/10.1109/ICIP.2013.6738885,
+68bf7fc874c2db44d0446cdbb1e05f19c2239282,http://pdfs.semanticscholar.org/68bf/7fc874c2db44d0446cdbb1e05f19c2239282.pdf,,https://doi.org/10.1016/j.procs.2015.05.352,http://www.cs.tut.fi/~iosifidi/files/conference/2015_ICCS_FKMC.pdf?dl=0
+68cf263a17862e4dd3547f7ecc863b2dc53320d8,http://pdfs.semanticscholar.org/68cf/263a17862e4dd3547f7ecc863b2dc53320d8.pdf,,https://doi.org/10.1016/j.patcog.2012.11.022,https://pdfs.semanticscholar.org/68cf/263a17862e4dd3547f7ecc863b2dc53320d8.pdf
+68e9c837431f2ba59741b55004df60235e50994d,http://pdfs.semanticscholar.org/68e9/c837431f2ba59741b55004df60235e50994d.pdf,,,https://export.arxiv.org/pdf/1709.05256
+685f8df14776457c1c324b0619c39b3872df617b,http://pdfs.semanticscholar.org/685f/8df14776457c1c324b0619c39b3872df617b.pdf,,,http://liu.diva-portal.org/smash/get/diva2:931705/FULLTEXT01.pdf
+68eb6e0e3660009e8a046bff15cef6fe87d46477,,,https://doi.org/10.1109/ICIP.2017.8296999,
+68d70d49ae5476181f3ceb4bc1caf493127b08b1,,,,
+687e17db5043661f8921fb86f215e9ca2264d4d2,http://www.ece.northwestern.edu/~ganghua/publication/ICCV09a.pdf,,https://doi.org/10.1109/ICCV.2009.5459457,http://users.eecs.northwestern.edu/~ganghua/publication/ICCV09a.pdf
+688754568623f62032820546ae3b9ca458ed0870,http://pdfs.semanticscholar.org/d6c2/108259edf97fabcbe608766a6baa98ac893d.pdf,,,http://biorxiv.org/content/biorxiv/early/2016/09/27/077784.full.pdf
+68070526920b387bfb91e4753d57d8e07fac51ee,,,,
+68f9cb5ee129e2b9477faf01181cd7e3099d1824,http://pdfs.semanticscholar.org/68f9/cb5ee129e2b9477faf01181cd7e3099d1824.pdf,,,http://www.cs.toronto.edu/~aliyari/papers/mvap.pdf
+68e6cfb0d7423d3fae579919046639c8e2d04ad7,,,https://doi.org/10.1109/ICB.2016.7550058,
+68bf34e383092eb827dd6a61e9b362fcba36a83a,http://pdfs.semanticscholar.org/68bf/34e383092eb827dd6a61e9b362fcba36a83a.pdf,,,http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/7112/2/b27472413_ir.pdf
+68d40176e878ebffbc01ffb0556e8cb2756dd9e9,http://pdfs.semanticscholar.org/68d4/0176e878ebffbc01ffb0556e8cb2756dd9e9.pdf,,,http://www.ijera.com/special_issue/Humming%20Bird_March_2014/Version%20%201/AA0105.pdf
+68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_AgeNet_Deeply_Learned_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.42
+6889d649c6bbd9c0042fadec6c813f8e894ac6cc,http://pdfs.semanticscholar.org/6889/d649c6bbd9c0042fadec6c813f8e894ac6cc.pdf,,,http://drops.dagstuhl.de/opus/volltexte/2009/2035/
+68f69e6c6c66cfde3d02237a6918c9d1ee678e1b,http://www.cs.fiu.edu/~chens/PDF/ISM09_Pruning.pdf,,,http://users.cis.fiu.edu/~chens/PDF/ISM09_Pruning.pdf
+6813208b94ffa1052760d318169307d1d1c2438e,,,,http://doi.acm.org/10.1145/2818346.2830582
+68f19f06f49aa98b676fc6e315b25e23a1efb1f0,,,https://doi.org/10.1109/ICIP.2015.7351080,
+683ec608442617d11200cfbcd816e86ce9ec0899,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Dual_Linear_Regression_2014_CVPR_paper.pdf,,,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Dual_Linear_Regression_2014_CVPR_paper.pdf
+68c17aa1ecbff0787709be74d1d98d9efd78f410,http://pdfs.semanticscholar.org/68c1/7aa1ecbff0787709be74d1d98d9efd78f410.pdf,,,http://www.cec.uchile.cl/~clperez/papers/Gender%20Classification%20From%20Face%20Images%20Using%20Mutual%20Information%20and%20Feature%20Fusion.pdf
+6821113166b030d2123c3cd793dd63d2c909a110,http://pdfs.semanticscholar.org/6821/113166b030d2123c3cd793dd63d2c909a110.pdf,,,http://studiainformatica.polsl.pl/index.php/SI/article/download/718/678
+68d566ed4041a7519acb87753036610bd64dcc09,,,https://doi.org/10.1007/s11390-013-1347-z,
+68021c333559ab95ca10e0dbbcc8a4840c31e157,,,https://doi.org/10.1109/ICPR.2016.7900281,
+681399aa0ea4cbffd9ab22bf17661d6df4047349,,,,http://doi.ieeecomputersociety.org/10.1109/CISIS.2012.207
+68a04a3ae2086986877fee2c82ae68e3631d0356,http://pdfs.semanticscholar.org/68a0/4a3ae2086986877fee2c82ae68e3631d0356.pdf,,,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2007_PAMI_paper2.pdf
+6888f3402039a36028d0a7e2c3df6db94f5cb9bb,http://pdfs.semanticscholar.org/6888/f3402039a36028d0a7e2c3df6db94f5cb9bb.pdf,,,https://openreview.net/pdf?id=SJOl4DlCZ
+57f5711ca7ee5c7110b7d6d12c611d27af37875f,http://pdfs.semanticscholar.org/57f5/711ca7ee5c7110b7d6d12c611d27af37875f.pdf,,,http://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.435214
+570308801ff9614191cfbfd7da88d41fb441b423,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Chu_Unsupervised_Synchrony_Discovery_ICCV_2015_paper.pdf,,,http://ca.cs.cmu.edu/sites/default/files/usd_final.pdf
+57b7325b8027745b130490c8f736445c407f4c4c,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.27
+57bf9888f0dfcc41c5ed5d4b1c2787afab72145a,http://pdfs.semanticscholar.org/57bf/9888f0dfcc41c5ed5d4b1c2787afab72145a.pdf,,,http://ocean.kisti.re.kr/downfile/volume/etri/HJTODO/2010/v32n5/HJTODO_2010_v32n5_784.pdf
+5798055e11e25c404b1b0027bc9331bcc6e00555,,,,http://doi.acm.org/10.1145/2393347.2396357
+57eeaceb14a01a2560d0b90d38205e512dcca691,,,https://doi.org/10.1109/TIP.2017.2778563,
+57ebeff9273dea933e2a75c306849baf43081a8c,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sun_Deep_Convolutional_Network_2013_CVPR_paper.pdf,,,http://www.iipl.fudan.edu.cn/~hdwu/liu_cvpr_01.pdf
+574751dbb53777101502419127ba8209562c4758,http://pdfs.semanticscholar.org/5747/51dbb53777101502419127ba8209562c4758.pdf,,https://doi.org/10.1016/j.ins.2012.09.008,http://ochoa.pc.cs.cmu.edu/wschu/papers/doc/ins13-gender.pdf
+5763b09ebca9a756b4adebf74d6d7de27e80e298,,,https://doi.org/10.1109/BTAS.2013.6712738,
+5778d49c8d8d127351eee35047b8d0dc90defe85,http://pdfs.semanticscholar.org/ec31/6c1c182de9d7fe73c7fbbc1a121a7e43c100.pdf,,https://doi.org/10.1007/978-3-319-16817-3_21,https://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-ACCV2014.pdf
+57034dc2d16ff1cbef24a61c0a415580820f9a15,,,,
+57f4e54a63ef95596dbc743f391c3fff461f278b,,,https://doi.org/10.1109/ICMEW.2012.86,
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,http://www.cs.toronto.edu/~rfm/pubs/morphBM.pdf,,,http://learning.cs.toronto.edu/~hinton/absps/morphBM.pdf
+57fd229097e4822292d19329a17ceb013b2cb648,http://pdfs.semanticscholar.org/57fd/229097e4822292d19329a17ceb013b2cb648.pdf,,,https://www.ijcai.org/Proceedings/16/Papers/288.pdf
+57c59011614c43f51a509e10717e47505c776389,http://users.cecs.anu.edu.au/~basura/papers/CVPR_2017_Workshop.pdf,,,https://arxiv.org/pdf/1612.00558v2.pdf
+57ca530e9acb63487e8591cb6efb89473aa1e5b4,,,https://doi.org/10.1109/TIP.2014.2356292,
+57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5,http://pdfs.semanticscholar.org/57b8/b28f8748d998951b5a863ff1bfd7ca4ae6a5.pdf,,,https://www-users.cs.york.ac.uk/~wsmith/papers/CGFsupp.pdf
+57101b29680208cfedf041d13198299e2d396314,http://pdfs.semanticscholar.org/5710/1b29680208cfedf041d13198299e2d396314.pdf,,,https://www.psychologie.uni-freiburg.de/Members/domes/domesoxteyetrack2012
+578117ff493d691166fefc52fd61bad70d8752a9,,,https://doi.org/10.1109/CCST.2016.7815707,
+576d0fea5a1ae9ce22996e726787c49023fc7522,,,,
+57893403f543db75d1f4e7355283bdca11f3ab1b,http://www.doc.ic.ac.uk/~maja/PAMI-KoelstraEtAl-accepted.pdf,,,http://eprints.eemcs.utwente.nl/19457/01/pantic_a_dynamic_texture-based_approach.pdf
+571f493c0ade12bbe960cfefc04b0e4607d8d4b2,http://pdfs.semanticscholar.org/571f/493c0ade12bbe960cfefc04b0e4607d8d4b2.pdf,,,http://ijrsset.org/pdfs/v3-i2/4.pdf
+57f8e1f461ab25614f5fe51a83601710142f8e88,http://pdfs.semanticscholar.org/57f8/e1f461ab25614f5fe51a83601710142f8e88.pdf,,,
+57a1466c5985fe7594a91d46588d969007210581,https://www.wjscheirer.com/projects/unconstrained-face/amfg_2010_poster.pdf,,https://doi.org/10.1109/CVPRW.2010.5543603,http://www.wjscheirer.com/projects/unconstrained-face/amfg_2010_poster.pdf
+57ba4b6de23a6fc9d45ff052ed2563e5de00b968,,,https://doi.org/10.1109/ICIP.2017.8296993,
+5721216f2163d026e90d7cd9942aeb4bebc92334,http://pdfs.semanticscholar.org/5721/216f2163d026e90d7cd9942aeb4bebc92334.pdf,,,http://arxiv.org/abs/1612.05038
+5721cd4b898f0e7df8de1e0215f630af94656be9,,,,http://doi.acm.org/10.1145/3095140.3095164
+575141e42740564f64d9be8ab88d495192f5b3bc,http://pdfs.semanticscholar.org/5751/41e42740564f64d9be8ab88d495192f5b3bc.pdf,,https://doi.org/10.1007/978-3-319-46654-5_21,http://www.cbsr.ia.ac.cn/users/zlei/papers/Liu-Age-CCBR2016.pdf
+57911d7f347dde0398f964e0c7ed8fdd0a882449,http://amp.ece.cmu.edu/people/Andy/Andy_files/1424CVPR08Gallagher.pdf,,,http://chenlab.ece.cornell.edu/people/Andy/publications/Andy_files/1424CVPR08Gallagher.pdf
+5789f8420d8f15e7772580ec373112f864627c4b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Schneider_Efficient_Global_Illumination_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.417
+574705812f7c0e776ad5006ae5e61d9b071eebdb,http://pdfs.semanticscholar.org/5747/05812f7c0e776ad5006ae5e61d9b071eebdb.pdf,,,http://ijcsmc.com/docs/papers/May2014/V3I5201499a75.pdf
+5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725,http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf,,https://doi.org/10.5244/C.27.58,http://www.vision.caltech.edu/~xpburgos/papers/BMVC13%20Burgos-Artizzu%20abstract.pdf
+57c270a9f468f7129643852945cf3562cbb76e07,,,https://doi.org/10.1016/j.imavis.2016.07.004,
+57b052cf826b24739cd7749b632f85f4b7bcf90b,http://pdfs.semanticscholar.org/57b0/52cf826b24739cd7749b632f85f4b7bcf90b.pdf,,https://doi.org/10.1007/978-3-319-54193-8_9,http://120.24.71.152/wp-content/themes/twentytwelve/pub_pdf/Fast%20Fashion%20Guided%20Clothing%20Image%20Retrieval.pdf
+57d37ad025b5796457eee7392d2038910988655a,http://pdfs.semanticscholar.org/57d3/7ad025b5796457eee7392d2038910988655a.pdf,,,http://www.cs.huji.ac.il/~daphna/theses/Dagan_Eshar_2009.pdf
+57f7d8c6ec690bd436e70d7761bc5f46e993be4c,https://opus.lib.uts.edu.au/bitstream/10453/10785/3/2009001878_Du.pdf,,https://doi.org/10.1109/WACV.2009.5403081,
+57de1a09db680e0b4878ceda68d626ae4e44ccfe,,,https://doi.org/10.1016/j.neucom.2014.10.111,
+57dc55edade7074f0b32db02939c00f4da8fe3a6,,,https://doi.org/10.1109/TITS.2014.2313371,
+3b1260d78885e872cf2223f2c6f3d6f6ea254204,http://pdfs.semanticscholar.org/3b12/60d78885e872cf2223f2c6f3d6f6ea254204.pdf,,,http://www.cse.msu.edu/biometrics/Publications/Face/Choietal_FaceTrackingRecognitionDistancePTZCameraSystem.pdf
+3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5,http://www.rci.rutgers.edu/~vmp93/Journal_pub/T-pami_openset.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2613924
+3ba74755c530347f14ec8261996dd9eae896e383,,,https://doi.org/10.1109/JSSC.2017.2767705,
+3b092733f428b12f1f920638f868ed1e8663fe57,http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf,,https://doi.org/10.1109/ICPR.2016.7900188,
+3b8c830b200f1df8ef705de37cbfe83945a3d307,,,https://doi.org/10.1007/s00138-017-0887-6,
+3b2d5585af59480531616fe970cb265bbdf63f5b,http://pdfs.semanticscholar.org/3b2d/5585af59480531616fe970cb265bbdf63f5b.pdf,,,http://wscg.zcu.cz/wscg2008/Papers_2008/journal/!_WSCG2008_Journal_final.zip
+3bf673a1f620015cb8b5106b85c7168431bb48ff,,,,
+3bcdb430b373fc0fafec93bdcd8125db338b20e4,,,,
+3b64efa817fd609d525c7244a0e00f98feacc8b4,https://arxiv.org/pdf/1502.04383v3.pdf,,,https://arxiv.org/pdf/1502.04383v2.pdf
+3bf690a6e2751b23bd8ae65c2ad133b249840bf9,,,,
+3bc776eb1f4e2776f98189e17f0d5a78bb755ef4,http://pdfs.semanticscholar.org/3bc7/76eb1f4e2776f98189e17f0d5a78bb755ef4.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/7318/umi-umd-4722.pdf?isAllowed=y&sequence=1
+3b7f6035a113b560760c5e8000540fc46f91fed5,http://www.vision.ee.ethz.ch/~zzhiwu/posters/ICCV13_Poster_ZhiwuHuang_v2.0.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Huang_Coupling_Alignments_with_2013_ICCV_paper.pdf
+3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e,http://pdfs.semanticscholar.org/5141/cf2e59fb2ec9bb489b9c1832447d3cd93110.pdf,,,https://arxiv.org/pdf/1706.00893v1.pdf
+3bd1d41a656c8159305ba2aa395f68f41ab84f31,http://pdfs.semanticscholar.org/3bd1/d41a656c8159305ba2aa395f68f41ab84f31.pdf,,https://doi.org/10.1007/978-3-319-18458-6_4,https://eprints.soton.ac.uk/379573/1/arcomem.pdf
+3bcd72be6fbc1a11492df3d36f6d51696fd6bdad,http://pdfs.semanticscholar.org/3bcd/72be6fbc1a11492df3d36f6d51696fd6bdad.pdf,,https://doi.org/10.1007/978-3-319-46475-6_22,http://arxiv.org/abs/1611.08663
+3b9c08381282e65649cd87dfae6a01fe6abea79b,http://pdfs.semanticscholar.org/3b9c/08381282e65649cd87dfae6a01fe6abea79b.pdf,,,https://arxiv.org/pdf/1608.00797v1.pdf
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,http://nms.csail.mit.edu/papers/sen060-chenA.pdf,,,http://inat.lcs.mit.edu/papers/sen060-chenA.pdf
+3bdaf59665e6effe323a1b61308bcac2da4c1b73,,,https://doi.org/10.1109/ROMAN.2012.6343736,
+3bc376f29bc169279105d33f59642568de36f17f,http://www.dip.ee.uct.ac.za/~nicolls/publish/sm14-visapp.pdf,,https://doi.org/10.5220/0004680003800387,http://www.milbo.org/stasm-files/active-shape-models-with-sift-and-mars.pdf
+3b38c06caf54f301847db0dd622a6622c3843957,http://pdfs.semanticscholar.org/3b38/c06caf54f301847db0dd622a6622c3843957.pdf,,,
+3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0,http://pdfs.semanticscholar.org/3b15/a48ffe3c6b3f2518a7c395280a11a5f58ab0.pdf,,,http://d-nb.info/1008035602
+3b9b200e76a35178da940279d566bbb7dfebb787,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf,,,https://arxiv.org/pdf/1711.10103v1.pdf
+3bfa630a6dc6d1ca98e7b43c90dd9e8b98e361d6,,,https://doi.org/10.1109/ICIP.2015.7351140,
+3be8964cef223698e587b4f71fc0c72c2eeef8cf,https://www.researchgate.net/profile/Mohammad_Reza_Mohammadi3/publication/264394830_Simultaneous_recognition_of_facial_expression_and_identity_via_sparse_representation/links/53df5c5b0cf2a76fb6682872.pdf?origin=publication_list,,,https://www.researchgate.net/profile/Mohammad_Reza_Mohammadi3/publication/264394830_Simultaneous_recognition_of_facial_expression_and_identity_via_sparse_representation/links/53df5c5b0cf2a76fb6682872.pdf
+3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab,http://pdfs.semanticscholar.org/3b40/8a3ca6fb39b0fda4d77e6a9679003b2dc9ab.pdf,,,https://arxiv.org/pdf/1703.08338v1.pdf
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,http://pdfs.semanticscholar.org/3b02/aaccc9f063ae696c9d28bb06a8cd84b2abb8.pdf,,,http://arxiv.org/pdf/1608.07444v1.pdf
+3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e,http://www.chennaisunday.com/IEEE%202013%20Dotnet%20Basepaper/Local%20Directional%20Number%20Pattern%20for%20Face%20Analysis%20Face%20and%20Expression%20Recognition.pdf,,https://doi.org/10.1109/TIP.2012.2235848,https://pdfs.semanticscholar.org/3ba8/f8b6bfb36465018430ffaef10d2caf3cfa7e.pdf
+3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5,http://pdfs.semanticscholar.org/3b80/bf5a69a1b0089192d73fa3ace2fbb52a4ad5.pdf,,https://doi.org/10.1016/j.jvlc.2015.01.001,http://ksiresearchorg.ipage.com/seke/dms14paper/paper22.pdf
+3b9d94752f8488106b2c007e11c193f35d941e92,http://pdfs.semanticscholar.org/3b9d/94752f8488106b2c007e11c193f35d941e92.pdf,,,http://www.research.att.com/export/sites/att_labs/techdocs/TD_101048.pdf
+3bebb79f8f49aa11dd4f6d60d903172db02bf4f3,http://hct.ece.ubc.ca/publications/pdf/oleinikov-etal-wacv2014.pdf,,,http://www.cs.ubc.ca/~little/links/papers/openvl.pdf
+3b557c4fd6775afc80c2cf7c8b16edde125b270e,https://arxiv.org/pdf/1602.02999v1.pdf,,https://doi.org/10.1109/ICARCV.2016.7838675,http://arxiv.org/pdf/1602.02999v1.pdf
+3b75681f0162752865d85befd8b15e7d954ebfe6,,,https://doi.org/10.1109/CLEI.2014.6965097,
+3b3482e735698819a6a28dcac84912ec01a9eb8a,http://vislab.ee.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2003/Individual%20Recognition%20Using%20Gait%20Energy%20Image03.pdf,,,http://vision.lbl.gov/People/han/tpami06.pdf
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,http://pdfs.semanticscholar.org/3b37/d95d2855c8db64bd6b1ee5659f87fce36881.pdf,,,https://arxiv.org/pdf/1710.07735v2.pdf
+3bfb9ba4b74b2b952868f590ff2f164de0c7d402,http://qil.uh.edu/qil/websitecontent/pdf/2015-8.pdf,,https://doi.org/10.1109/TCYB.2013.2291196,http://web.mit.edu/gevang/www/papers/Zhao_MinimizingIllumination3D2DFR_ieeeSMCB14/Zhao_MinimizingIllumination3D2DFR_ieeeSMCB14.pdf
+3b64b8be33887e77e6def4c385985e43e2c15eea,,,https://doi.org/10.1109/TIP.2016.2576278,
+3be7b7eb11714e6191dd301a696c734e8d07435f,http://pdfs.semanticscholar.org/3be7/b7eb11714e6191dd301a696c734e8d07435f.pdf,,,http://www.cs.unc.edu/~megha/user_interest_profiling_icme2015.pdf
+3bd50e33220af76ffc32a7e57688e248843b7f25,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana_Murthy_Goecke_DICTA2014_TheInfluenceOfTemporalInformationOnHumanActionRecognitionWithLargeNumberOfClasses.pdf,,https://doi.org/10.1109/DICTA.2014.7008131,
+3be027448ad49a79816cd21dcfcce5f4e1cec8a8,http://www.cs.utexas.edu/~grauman/papers/kovashka_iccv2011.pdf,,,http://vision.cs.utexas.edu/attributes_active/adriana_iccv11_poster.pdf
+3bd56f4cf8a36dd2d754704bcb71415dcbc0a165,http://www.humansensing.cs.cmu.edu/sites/default/files/4robustreg.pdf,,https://doi.org/10.1007/978-3-642-33765-9_44,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2448091
+3b410ae97e4564bc19d6c37bc44ada2dcd608552,http://pdfs.semanticscholar.org/3b41/0ae97e4564bc19d6c37bc44ada2dcd608552.pdf,,https://doi.org/10.1007/3-540-44887-X_87,ftp://ftp.idiap.ch/pub/reports/2003/czyz_2003_avbpa.ps.gz
+3b470b76045745c0ef5321e0f1e0e6a4b1821339,http://pdfs.semanticscholar.org/8e72/fa02f2d90ba31f31e0a7aa96a6d3e10a66fc.pdf,,https://doi.org/10.1007/978-3-319-10593-2_8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8692/86920105.pdf
+6f6ce988a13ac08071a0e3349f80b7c8adc7a49d,,,,
+6f5309d8cc76d3d300b72745887addd2a2480ba8,,,,
+6f9026627fb31d4cfb08dbcc4ab852945dc42252,,,,
+6f74c3885b684e52096497b811692bd766071530,,,https://doi.org/10.1016/j.neucom.2013.06.013,
+6f68c49106b66a5bd71ba118273b4c5c64b6619f,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2007.190720
+6fa7a1c8a858157deee3b582099e5e234798bb4a,http://biometrics.nist.gov/cs_links/ibpc2014/presentations/14_wednesday_gentric_IBPC14_morpho.pdf,,,http://doi.acm.org/10.1145/2501105.2501107
+6ffdbac58e15e0ff084310b0a804520ad4bd013e,,,https://doi.org/10.1049/iet-bmt.2015.0078,
+6f288a12033fa895fb0e9ec3219f3115904f24de,https://arxiv.org/pdf/1511.05204v1.pdf,,https://doi.org/10.1109/TIP.2016.2615424,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1511.05204.pdf
+6fa0c206873dcc5812f7ea74a48bb4bf4b273494,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W03/papers/Suk_Real-time_Mobile_Facial_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.25
+6f9824c5cb5ac08760b08e374031cbdabc953bae,https://eprints.soton.ac.uk/397973/1/PID4351119.pdf,,https://doi.org/10.1109/BTAS.2016.7791206,
+6f22324fab61fbc5df1aac2c0c9c497e0a7db608,,,https://doi.org/10.1109/ICB.2013.6612990,
+6f16f4bd01aeefdd03d6783beacb7de118f5af8a,,,https://doi.org/10.1109/VCIP.2013.6706330,
+6f2dc51d607f491dbe6338711c073620c85351ac,http://pdfs.semanticscholar.org/6f2d/c51d607f491dbe6338711c073620c85351ac.pdf,,https://doi.org/10.1016/j.neucom.2015.07.134,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/2Pool_nc_20150720.pdf
+6fed504da4e192fe4c2d452754d23d3db4a4e5e3,http://pdfs.semanticscholar.org/85ee/d639f7367c794a6d8b38619697af3efaacfe.pdf,,,https://arxiv.org/pdf/1702.06890v1.pdf
+6f957df9a7d3fc4eeba53086d3d154fc61ae88df,http://pdfs.semanticscholar.org/6f95/7df9a7d3fc4eeba53086d3d154fc61ae88df.pdf,,,https://tel.archives-ouvertes.fr/file/index/docid/185084/filename/2007.09.02_These_HM.pdf
+6f0d3610c4ee7b67e9d435d48bc98167761251e8,http://www.cs.washington.edu/homes/wufei/papers/IJCNN.pdf,,,
+6f0caff7c6de636486ff4ae913953f2a6078a0ab,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583081
+6ff0f804b8412a50ae2beea5cd020c94a5de5764,,,,http://doi.acm.org/10.1145/1877972.1877994
+6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553757.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553757
+6f26ab7edd971148723d9b4dc8ddf71b36be9bf7,http://pdfs.semanticscholar.org/6f26/ab7edd971148723d9b4dc8ddf71b36be9bf7.pdf,,,ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/37/9f/PLoS_One_2011_Mar_24_6(3)_e17481.tar.gz
+6f75697a86d23d12a14be5466a41e5a7ffb79fad,https://www.computer.org/csdl/proceedings/icis/2016/0806/00/07550861.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICIS.2016.7550861
+6fdf2f4f7ae589af6016305a17d460617d9ef345,,,https://doi.org/10.1109/ICIP.2015.7350767,
+6f48e5e258da11e6ba45eeabe65a5698f17e58ef,,,https://doi.org/10.1109/ICASSP.2013.6637968,
+6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01293.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Uricar_Structured_Output_SVM_CVPR_2016_paper.pdf
+6f08885b980049be95a991f6213ee49bbf05c48d,http://pdfs.semanticscholar.org/6f08/885b980049be95a991f6213ee49bbf05c48d.pdf,,,http://www.isir.upmc.fr/files/2013ACLI2838.pdf
+6f0900a7fe8a774a1977c5f0a500b2898bcbe149,http://pdfs.semanticscholar.org/6f09/00a7fe8a774a1977c5f0a500b2898bcbe149.pdf,,,http://ijcsi.org/papers/7-3-10-18-27.pdf
+6fbb179a4ad39790f4558dd32316b9f2818cd106,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf,,,http://arxiv.org/abs/1603.06655
+6f84e61f33564e5188136474f9570b1652a0606f,https://arxiv.org/pdf/1708.00284v1.pdf,,,https://arxiv.org/pdf/1708.00284v2.pdf
+6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3,http://www.ifp.uiuc.edu/~iracohen/publications/CohenSebeMS05.pdf,,https://doi.org/10.1007/s00530-005-0177-4,http://www.ifp.illinois.edu/~iracohen/publications/CohenSebeMS05.pdf
+6fa3857faba887ed048a9e355b3b8642c6aab1d8,http://pdfs.semanticscholar.org/6fa3/857faba887ed048a9e355b3b8642c6aab1d8.pdf,,https://doi.org/10.1007/978-3-319-28501-6_11,https://infoscience.epfl.ch/record/217475/files/Gunther_SPRINGER_2016.pdf
+6fda12c43b53c679629473806c2510d84358478f,http://pdfs.semanticscholar.org/6fda/12c43b53c679629473806c2510d84358478f.pdf,,,http://academians.org/Media/Default/Articles/June2011/paper2.pdf
+6f8cffd9904415c8fa3a1e650ac143867a04f40a,,,https://doi.org/10.1016/j.neucom.2015.01.099,
+6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0000937.pdf,,https://doi.org/10.1109/ICASSP.2012.6288039,
+6fe2efbcb860767f6bb271edbb48640adbd806c3,https://eprints.soton.ac.uk/359808/1/version9.pdf,,,http://eprints.soton.ac.uk/359808/1/version9.pdf
+6f5151c7446552fd6a611bf6263f14e729805ec7,http://pdfs.semanticscholar.org/6f51/51c7446552fd6a611bf6263f14e729805ec7.pdf,,,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/Smith-SUEMA-2010.pdf
+030c82b87e3cdc5ba35c443a93ff4a9d21c2bc2f,http://www.cfar.umd.edu/~shaohua/papers/zhou07tpami_gps.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.25
+03d9ccce3e1b4d42d234dba1856a9e1b28977640,http://pdfs.semanticscholar.org/03d9/ccce3e1b4d42d234dba1856a9e1b28977640.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/egpaper_final_1.pdf
+036c41d67b49e5b0a578a401eb31e5f46b3624e0,http://www.infomus.org/Events/proceedings/ACII2015/papers/Main_Conference/M2_Poster/Poster_Teaser_5/ACII2015_submission_19.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344639
+03b03f5a301b2ff88ab3bb4969f54fd9a35c7271,http://pdfs.semanticscholar.org/03b0/3f5a301b2ff88ab3bb4969f54fd9a35c7271.pdf,,,http://arxiv.org/abs/1707.06923
+03f7041515d8a6dcb9170763d4f6debd50202c2b,http://biometrics.cse.msu.edu/Publications/Face/OttoWangJain_ClusteringMillionsOfFacesByIdentity_TPAMI17.pdf,,,http://arxiv.org/abs/1604.00989
+0387b32d0ebd034dc778972367e7d4194223785d,,,,http://doi.acm.org/10.1145/2522848.2531740
+03b99f5abe0e977ff4c902412c5cb832977cf18e,http://pdfs.semanticscholar.org/03b9/9f5abe0e977ff4c902412c5cb832977cf18e.pdf,,https://doi.org/10.5244/C.27.39,http://www.robots.ox.ac.uk/~vgg/publications/2013/Crowley13/crowley13.pdf
+038ce930a02d38fb30d15aac654ec95640fe5cb0,http://www.robots.ox.ac.uk/~tvg/publications/2013/BVGFacialFeatureTrackerMobile.pdf,,,http://cms.brookes.ac.uk/research/visiongroup/publications/2013/BVGFacialFeatureTrackerMobile.pdf
+03167776e17bde31b50f294403f97ee068515578,http://pdfs.semanticscholar.org/0316/7776e17bde31b50f294403f97ee068515578.pdf,,,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/FR-Handbook.pdf
+0334a8862634988cc684dacd4279c5c0d03704da,https://arxiv.org/pdf/1609.06591v1.pdf,,,http://arxiv.org/pdf/1609.06591v2.pdf
+03c1fc9c3339813ed81ad0de540132f9f695a0f8,http://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf,,,https://dam-prod.media.mit.edu/x/2018/02/06/Gender%20Shades%20Intersectional%20Accuracy%20Disparities.pdf
+03333e7ec198208c13627066bc76b0367f5e270f,,,https://doi.org/10.1109/IJCNN.2017.7966100,
+03e1480f1de2ffbd85655d68aae63a01685c5862,,,https://doi.org/10.1109/ICPR.2014.771,
+0339459a5b5439d38acd9c40a0c5fea178ba52fb,http://pdfs.semanticscholar.org/0339/459a5b5439d38acd9c40a0c5fea178ba52fb.pdf,,,http://mmi.tudelft.nl/pub/dragos/Multimodal%20recognition%20of%20emotions%20in%20car%20environments.pdf
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,http://pdfs.semanticscholar.org/030e/f31b51bd4c8d0d8f4a9a32b80b9192fe4c3f.pdf,,,http://www.jneurosci.org/content/jneuro/35/34/11936.full.pdf
+03f98c175b4230960ac347b1100fbfc10c100d0c,http://courses.cs.washington.edu/courses/cse590v/13au/intraface.pdf,,,http://www.humansensing.cs.cmu.edu/sites/default/files/6main.pdf
+0341405252c80ff029a0d0065ca46d0ade943b03,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.40
+0323b618d3a4c24bdda4f42361e19a2a7d497da5,http://www.ecse.rpi.edu/homepages/qji/Papers/Simultaneous%20Paper_TIP_Revised_V4_email.pdf,,https://doi.org/10.1109/TIP.2013.2253477,http://www.ecse.rpi.edu/~qji/Papers/TIP_Exp_AU_Fea.pdf
+03264e2e2709d06059dd79582a5cc791cbef94b1,http://pdfs.semanticscholar.org/0326/4e2e2709d06059dd79582a5cc791cbef94b1.pdf,,,https://arxiv.org/pdf/1604.08865v2.pdf
+03dba79518434ba4a937b2980fbdc8bafc048b36,http://people.ee.duke.edu/~jh313/resource/TRAIT.pdf,,https://doi.org/10.1109/TSP.2015.2500889,http://arxiv.org/abs/1507.04230
+03a8f53058127798bc2bc0245d21e78354f6c93b,http://www.robots.ox.ac.uk/~vgg/rg/slides/additiveclassifiers.pdf,,https://doi.org/10.1109/ICCV.2009.5459203,http://people.cs.umass.edu/~smaji/papers/additive-iccv09.pdf
+03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20,https://ias.in.tum.de/_media/spezial/bib/mayer08arealtime.pdf,,https://doi.org/10.1109/AFGR.2008.4813440,http://ias.cs.tum.edu/_media/spezial/bib/mayer08arealtime.pdf
+03babadaaa7e71d4b65203e27e8957db649155c6,,,https://doi.org/10.1109/TIP.2017.2725578,
+0363e93d49d2a3dbe057cc7754825ebf30f0f816,http://nichol.as/papers/Everingham/Identifying%20individuals%20in%20video%20by%20combining%20generative.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/papers/everingham05.ps.gz
+03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b,http://pdfs.semanticscholar.org/03b9/8b4a2c0b7cc7dae7724b5fe623a43eaf877b.pdf,,,http://affect.media.mit.edu/pdfs/11.McDuff-etal-Acume-2011.pdf
+0329d9be8ab1e3a1d5e4b9e7db5af5bbcc64e36f,,,,
+03adcf58d947a412f3904a79f2ab51cfdf0e838a,http://pdfs.semanticscholar.org/03ad/cf58d947a412f3904a79f2ab51cfdf0e838a.pdf,,,http://www.stupros.com/site/postconcept/video_based_face_recognition.pdf
+03104f9e0586e43611f648af1132064cadc5cc07,http://pdfs.semanticscholar.org/51c0/2f135d6c960b1141bde539059a279f9beb78.pdf,,https://doi.org/10.1016/j.knosys.2017.02.031,https://arxiv.org/pdf/1403.2330v3.pdf
+03f14159718cb495ca50786f278f8518c0d8c8c9,http://www.acscrg.com/iccsce/2015/wp-content/uploads/2015/11/The-Latest-Schedule-23-Nov-2015.pdf,,https://doi.org/10.1109/ICCSCE.2015.7482159,
+0394040749195937e535af4dda134206aa830258,http://web.eecs.umich.edu/~hero/Preprints/sp_mlsi_submitted_revised2.pdf,,https://doi.org/10.1109/TSP.2004.831130,http://web.eecs.umich.edu/~hero/Preprints/GeodesicEntropicGraphs.pdf
+0334cc0374d9ead3dc69db4816d08c917316c6c4,http://pdfs.semanticscholar.org/0334/cc0374d9ead3dc69db4816d08c917316c6c4.pdf,,,https://arxiv.org/pdf/1708.02412v1.pdf
+03c48d8376990cff9f541d542ef834728a2fcda2,http://dvmmweb.cs.columbia.edu/files/dvmm_scnn_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Shou_Temporal_Action_Localization_CVPR_2016_paper.pdf
+0319332ded894bf1afe43f174f5aa405b49305f0,http://pdfs.semanticscholar.org/0319/332ded894bf1afe43f174f5aa405b49305f0.pdf,,https://doi.org/10.1007/978-3-642-41184-7_62,http://www.math.uh.edu/~dlabate/ICIAP_2013.pdf
+03af8cf40283ff30f1da3637b024319d0c79bdf0,https://www.researchgate.net/profile/Gary_Mckeown/publication/224251574_The_Belfast_Induced_Natural_Emotion_Database/links/0fcfd510a6b4384822000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2011.26
+03d1d0a665e358863ff4de9ee7d78f64edd7e756,,,,
+0343f9401b98de36be957a30209fef45dd684270,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163134
+03baf00a3d00887dd7c828c333d4a29f3aacd5f5,http://pdfs.semanticscholar.org/03ba/f00a3d00887dd7c828c333d4a29f3aacd5f5.pdf,,,http://i-rep.emu.edu.tr:8080/jspui/bitstream/11129/1686/1/YurtkanKamil.pdf
+0359f7357ea8191206b9da45298902de9f054c92,http://arxiv.org/pdf/1511.04110v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477450
+0394e684bd0a94fc2ff09d2baef8059c2652ffb0,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/TIP2522378.pdf,,https://doi.org/10.1109/TIP.2016.2522378,http://www.ee.cuhk.edu.hk/~xgwang/papers/liuLFGWPtip16.pdf
+03e88bf3c5ddd44ebf0e580d4bd63072566613ad,http://pdfs.semanticscholar.org/03e8/8bf3c5ddd44ebf0e580d4bd63072566613ad.pdf,,,https://arxiv.org/pdf/1709.06126v1.pdf
+03f4c0fe190e5e451d51310bca61c704b39dcac8,http://pdfs.semanticscholar.org/03f4/c0fe190e5e451d51310bca61c704b39dcac8.pdf,,https://doi.org/10.1007/s12652-016-0406-z,http://speakit.cn/Group/file/2016_CHEAVD_AIHC_SCI-Ya%20Li.pdf
+03bd58a96f635059d4bf1a3c0755213a51478f12,https://arxiv.org/pdf/1401.7413v2.pdf,,https://doi.org/10.1109/TIP.2014.2380155,http://www.cis.pku.edu.cn/faculty/vision/zlin/Publications/2015-TIP-SmoothedLRR.pdf
+031055c241b92d66b6984643eb9e05fd605f24e2,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cinbis_Multi-fold_MIL_Training_2014_CVPR_paper.pdf,,,https://hal.inria.fr/hal-00975746/PDF/paper.pdf
+0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136,https://ibug.doc.ic.ac.uk/media/uploads/documents/booth2017large.pdf,,https://doi.org/10.1007/s11263-017-1009-7,https://ibug.doc.ic.ac.uk/media/uploads/documents/ijcv-16_(1).pdf
+032a1c95388fb5c6e6016dd8597149be40bc9d4d,http://people.eecs.berkeley.edu/~gkioxari/ActionTubes/action_tubes.pdf,,,https://people.eecs.berkeley.edu/~gkioxari/ActionTubes/action_tubes.pdf
+034addac4637121e953511301ef3a3226a9e75fd,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Parikh_Implied_Feedback_Learning_2013_ICCV_paper.pdf,,,http://www.cs.utexas.edu/~grauman/papers/ParikhGrauman_ICCV_2013_nuances.pdf
+03701e66eda54d5ab1dc36a3a6d165389be0ce79,http://www.eem.anadolu.edu.tr/atalaybarkan/EEM%20405%20(K)/icerik/improved%20pcr.pdf,,https://doi.org/10.1109/LSP.2012.2185492,
+034c2ed71c31cb0d984d66c7ca753ef2cb6196ca,http://pdfs.semanticscholar.org/034c/2ed71c31cb0d984d66c7ca753ef2cb6196ca.pdf,,https://doi.org/10.1016/j.patcog.2017.03.034,https://panzhous.github.io/assets/pdf/2017-PR-FE-PDE.pdf
+9b318098f3660b453fbdb7a579778ab5e9118c4c,http://humansensing.cs.cmu.edu/sites/default/files/07471506.pdf,,https://doi.org/10.1109/TIP.2016.2570550,http://www.humansensing.cs.cmu.edu/sites/default/files/07471506.pdf
+9b78ce9fdac30864d1694a56328b3c8a96cccef5,,,https://doi.org/10.1089/cpb.2004.7.635,
+9be94fa0330dd493f127d51e4ef7f9fd64613cfc,http://pdfs.semanticscholar.org/9be9/4fa0330dd493f127d51e4ef7f9fd64613cfc.pdf,,https://doi.org/10.1049/iet-bmt.2015.0008,http://eprints.eemcs.utwente.nl/26381/01/M_K_2015_IET_Biometrics.pdf
+9b9ccd4954cf9dd605d49e9c3504224d06725ab7,,,,
+9bd3cafa16a411815f8f87ed3eb3cafefc25e5a3,,,https://doi.org/10.1109/ICPR.2016.7899782,
+9b000ccc04a2605f6aab867097ebf7001a52b459,http://pdfs.semanticscholar.org/9b00/0ccc04a2605f6aab867097ebf7001a52b459.pdf,,,http://arxiv.org/abs/1603.00944
+9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,http://pdfs.semanticscholar.org/9b04/89f2d5739213ef8c3e2e18739c4353c3a3b7.pdf,,,https://arxiv.org/pdf/1801.06665v1.pdf
+9b474d6e81e3b94e0c7881210e249689139b3e04,http://pdfs.semanticscholar.org/a43c/c0c2f1d0e29cf1ee88f3bde4289a94b70409.pdf,,,https://www.inf.ufes.br/~claudine/Papers/2009_InTech_De_Souza.pdf
+9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493,http://www.ifp.illinois.edu/~jyang29/papers/JRR_ICCV11.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126315
+9b43897c551b134852bda113355f340e605ad4e7,,,,
+9b8830655d4a5a837e3ffe835d14d6d71932a4f2,,,https://doi.org/10.1109/TSMCB.2011.2169452,
+9bcfadd22b2c84a717c56a2725971b6d49d3a804,http://pdfs.semanticscholar.org/9bcf/add22b2c84a717c56a2725971b6d49d3a804.pdf,,,http://hmi.ewi.utwente.nl/verslagen/capita-selecta/CS-Maat-Mark-ter.pdf
+9ba358281f2946cba12fff266019193a2b059590,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2008.27
+9b42fb48d5ac70b6ca5382f50e71ed8bf3a84710,,,,
+9b07084c074ba3710fee59ed749c001ae70aa408,http://pdfs.semanticscholar.org/9b07/084c074ba3710fee59ed749c001ae70aa408.pdf,,,http://cbcsl.ece.ohio-state.edu/CDPS698535_Martinez_REV.pdf
+9b246c88a0435fd9f6d10dc88f47a1944dd8f89e,http://pdfs.semanticscholar.org/ffe3/a5a7c0faebd1719f7c77b5f7e05cae61a9ad.pdf,,,http://cs.dartmouth.edu/~lorenzo/Papers/btf-nips11.pdf
+9b4d2cd2e5edbf5c8efddbdcce1db9a02a853534,,,https://doi.org/10.1016/j.neucom.2016.02.063,
+9b164cef4b4ad93e89f7c1aada81ae7af802f3a4,http://pdfs.semanticscholar.org/9b16/4cef4b4ad93e89f7c1aada81ae7af802f3a4.pdf,,,http://www.isca.in/IJMS/Archive/v2/i1/4.ISCA-RJRS-2012-355.pdf
+9b93406f3678cf0f16451140ea18be04784faeee,http://pdfs.semanticscholar.org/9b93/406f3678cf0f16451140ea18be04784faeee.pdf,,https://doi.org/10.1007/978-3-642-33786-4_18,http://people.csail.mit.edu/celiu/pdfs/ECCV12-ImageHallucination.pdf
+9b9a1f18749e969c8f246894e59c62ae86b079be,,,,
+9b1a70d6771547cbcf6ba646f8775614c0162aca,,,https://doi.org/10.1016/j.patrec.2016.11.005,
+9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7,http://pdfs.semanticscholar.org/9b79/74d9ad19bb4ba1ea147c55e629ad7927c5d7.pdf,,,http://bcmi.sjtu.edu.cn/~duruofei/papers/ses2012.pdf
+9b1c218a55ead45296bfd7ad315aaeff1ae9983e,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2481396
+9b6d0b3fbf7d07a7bb0d86290f97058aa6153179,http://pdfs.semanticscholar.org/9b6d/0b3fbf7d07a7bb0d86290f97058aa6153179.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2031%20(Supplementary).pdf
+9bc289a32bb5ab54b7a178b7234799f32e0568ce,,,,
+9e8637a5419fec97f162153569ec4fc53579c21e,http://pdfs.semanticscholar.org/9e86/37a5419fec97f162153569ec4fc53579c21e.pdf,,https://doi.org/10.1007/978-3-319-11599-3_16,https://www.dasec.h-da.de/wp-content/uploads/2014/07/PflugBuch-CPR-NordSec2014.pdf
+9e8382aa1de8f2012fd013d3b39838c6dad8fb4d,,,,http://doi.acm.org/10.1145/3123266.3123349
+9e4b052844d154c3431120ec27e78813b637b4fc,http://pdfs.semanticscholar.org/9e4b/052844d154c3431120ec27e78813b637b4fc.pdf,,,http://jad.shahroodut.ac.ir/article_147_65fecf9ec71ccfdba7f9ba1bf07251b1.pdf
+9e42d44c07fbd800f830b4e83d81bdb9d106ed6b,http://openaccess.thecvf.com/content_ICCV_2017/papers/Rao_Learning_Discriminative_Aggregation_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.408
+9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0,http://pdfs.semanticscholar.org/9eb8/6327c82b76d77fee3fd72e2d9eff03bbe5e0.pdf,,,http://arxiv.org/abs/1710.08585
+9ea73660fccc4da51c7bc6eb6eedabcce7b5cead,http://pdfs.semanticscholar.org/9ea7/3660fccc4da51c7bc6eb6eedabcce7b5cead.pdf,,,http://www.isca-speech.org/archive/slam_2014/slm4_009.html
+9e5690cdb4dfa30d98dff653be459e1c270cde7f,,,https://doi.org/10.1109/ICIP.2017.8297080,
+9e9052256442f4e254663ea55c87303c85310df9,http://pdfs.semanticscholar.org/9e90/52256442f4e254663ea55c87303c85310df9.pdf,,,http://ijarcet.org/wp-content/uploads/IJARCET-VOL-4-ISSUE-10-3772-3774.pdf
+9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7,http://pdfs.semanticscholar.org/9eea/da49fc2cba846b4dad1012ba8a7ee78a8bb7.pdf,,,http://www.matlabi.ir/wp-content/uploads/bank_papers/g_paper/g105_Matlabi.ir_A%20New%20Facial%20Expression%20Recognition%20Method%20Based%20on%20Local%20Gabor%20Filter%20Bank%20and%20PCA%20plus%20LDA.pdf
+9ef2b2db11ed117521424c275c3ce1b5c696b9b3,http://pdfs.semanticscholar.org/c31b/dd00734807938dcfd8a12375bd9ffa556985.pdf,,https://doi.org/10.1007/978-3-319-46454-1_50,http://arxiv.org/pdf/1511.04404v1.pdf
+9e5acdda54481104aaf19974dca6382ed5ff21ed,http://pdfs.semanticscholar.org/dd52/0f2ebcf8034cb168ab4e82acec9a69fe0188.pdf,,,http://www.sis.uta.fi/cs/reports/dsarja/D-2008-9.pdf
+9e5809122c0880183c7e42c7edd997f92de6d81e,,,,http://doi.acm.org/10.1145/2451176.2451209
+9e7646b7e9e89be525cda1385cc1351cc28a896e,,,,http://doi.ieeecomputersociety.org/10.1109/TMC.2017.2702634
+9ed943f143d2deaac2efc9cf414b3092ed482610,http://www.jaist.ac.jp/~chen-fan/publication/ism2014-07032993.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2014.48
+9e1c3b8b1653337094c1b9dba389e8533bc885b0,http://pdfs.semanticscholar.org/9e1c/3b8b1653337094c1b9dba389e8533bc885b0.pdf,,https://doi.org/10.1007/978-3-540-74549-5_49,http://media.cs.tsinghua.edu.cn/~imagevision/papers/ICB07_demographic.pdf
+9e99f818b37d44ec6aac345fb2c5356d83d511c7,,,https://doi.org/10.1109/ISSPA.2012.6310540,
+9eaa967d19fc66010b7ade7d94eaf7971a1957f3,,,https://doi.org/10.1109/IWCIA.2013.6624793,
+9eb13f8e8d948146bfbae1260e505ba209c7fdc1,,,https://doi.org/10.1109/AFGR.2008.4813404,
+9e28243f047cc9f62a946bf87abedb65b0da0f0a,,,https://doi.org/10.1109/ICMLA.2013.141,
+9e0285debd4b0ba7769b389181bd3e0fd7a02af6,http://pdfs.semanticscholar.org/9e02/85debd4b0ba7769b389181bd3e0fd7a02af6.pdf,,https://doi.org/10.1007/978-3-319-54187-7_21,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01314.pdf
+9ed4ad41cbad645e7109e146ef6df73f774cd75d,http://pdfs.semanticscholar.org/a83e/175ad5b2066e207f5d2ec830ae05bac266b9.pdf,,https://doi.org/10.5244/C.27.77,http://www.bmva.org/bmvc/2013/Papers/paper0077/paper0077.pdf
+9ef06cc958af2274afd193a1dca705c08234bcd3,,,https://doi.org/10.1109/ICIP.2014.7026207,
+9e60614fd57afe381ae42c6ee0b18f32f60bb493,,,https://doi.org/10.1109/ICIP.2015.7351544,
+9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c,https://people.csail.mit.edu/khosla/papers/www2014_khosla.pdf,,,http://people.csail.mit.edu/khosla/papers/www2014_khosla.pdf
+9e5c2d85a1caed701b68ddf6f239f3ff941bb707,http://pdfs.semanticscholar.org/ada4/4aa744f9703cacfcd0028372a2b1684a45a3.pdf,,,http://www.researchgate.net/profile/Ayseguel_Ucar/publication/258104959_A._Uar_Facial_Expression_Recognition_Based_on_Significant_Face_Components_Using_Steerable_Pyramid_Transform_2013_International_conference_on_Image_Processing_Computer_Vision_and_Pattern_Recognition_687-692_Las_Vegas_USA_22-25_July_2013/links/0deec526f5fb5bfdfb000000.pdf
+044d9a8c61383312cdafbcc44b9d00d650b21c70,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Sagonas_300_Faces_in-the-Wild_2013_ICCV_paper.pdf
+04bb3fa0824d255b01e9db4946ead9f856cc0b59,http://pdfs.semanticscholar.org/c1de/db5ac05c955e53d7ef1f6367fb7badea49b1.pdf,,,http://arxiv.org/abs/1708.07972
+04f0292d9a062634623516edd01d92595f03bd3f,http://www.cs.nott.ac.uk/~mfv/Documents/emotiw2013_cameraready.pdf,,,http://doi.acm.org/10.1145/2522848.2531742
+0486eb243d167ab4b197b682e9eff9684b273df4,,,,
+047f6afa87f48de7e32e14229844d1587185ce45,http://pdfs.semanticscholar.org/047f/6afa87f48de7e32e14229844d1587185ce45.pdf,,https://doi.org/10.1007/978-3-319-07998-1_59,http://mrl.cs.vsb.cz/publications/fusek_icisp_2014.pdf
+049186d674173ebb76496f9ecee55e17ed1ca41b,,,https://doi.org/10.1109/ACCESS.2017.2724763,
+04522dc16114c88dfb0ebd3b95050fdbd4193b90,http://www.svcl.ucsd.edu/publications/conference/2005/crv05/FES.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2005.53
+04470861408d14cc860f24e73d93b3bb476492d0,http://pdfs.semanticscholar.org/0447/0861408d14cc860f24e73d93b3bb476492d0.pdf,,,http://waset.org/publications/4963/face-recognition-using-features-combination-and-a-new-non-linear-kernel
+0486214fb58ee9a04edfe7d6a74c6d0f661a7668,http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf,,https://doi.org/10.1109/CVPRW.2011.5981881,https://www.researchgate.net/profile/Brian_Lovell2/publication/224253078_Patch-based_probabilistic_image_quality_assessment_for_face_selection_and_improved_video-based_face_recognition/links/0c960522665933c95d000000.pdf?origin=publication_list
+0447bdb71490c24dd9c865e187824dee5813a676,http://pdfs.semanticscholar.org/0447/bdb71490c24dd9c865e187824dee5813a676.pdf,,,http://www.vis.uky.edu/~cheung/doc/bmvc08.pdf
+0435a34e93b8dda459de49b499dd71dbb478dc18,http://pdfs.semanticscholar.org/0435/a34e93b8dda459de49b499dd71dbb478dc18.pdf,,,https://arxiv.org/pdf/1803.05719v1.pdf
+044ba70e6744e80c6a09fa63ed6822ae241386f2,http://pdfs.semanticscholar.org/044b/a70e6744e80c6a09fa63ed6822ae241386f2.pdf,,,https://arxiv.org/pdf/1709.09269v1.pdf
+045e83272db5e92aa4dc8bdfee908534c2608711,,,,http://doi.ieeecomputersociety.org/10.1109/ICCABS.2016.7802775
+046770df59c49c7ca9a1a4c268176ede2aa89e37,,,,
+047d3cb2a6a9628b28cac077b97d95b04ca9044c,,,https://doi.org/10.1109/FG.2011.5771332,
+04661729f0ff6afe4b4d6223f18d0da1d479accf,https://arxiv.org/pdf/1509.06451v1.pdf,,,http://arxiv.org/abs/1509.06451
+041b51a81a977b5c64682c55414ad8d165c1f2ce,,,https://doi.org/10.1109/TCE.2014.7027339,
+04f56dc5abee683b1e00cbb493d031d303c815fd,,,,http://doi.acm.org/10.1145/2808492.2808557
+04dcdb7cb0d3c462bdefdd05508edfcff5a6d315,http://pdfs.semanticscholar.org/04dc/db7cb0d3c462bdefdd05508edfcff5a6d315.pdf,,,http://diposit.ub.edu/dspace/bitstream/2445/67591/1/ADRIANA_ROMERO_PhD_THESIS.pdf
+044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa,http://www.ee.oulu.fi/mvg/files/pdf/pdf_740.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1110
+04f55f81bbd879773e2b8df9c6b7c1d324bc72d8,http://pdfs.semanticscholar.org/04f5/5f81bbd879773e2b8df9c6b7c1d324bc72d8.pdf,,,http://arxiv.org/pdf/1403.1327v1.pdf
+04c07ecaf5e962ac847059ece3ae7b6962b4e5c4,,,,http://doi.acm.org/10.1145/2993148.2997631
+04250e037dce3a438d8f49a4400566457190f4e2,http://pdfs.semanticscholar.org/0425/0e037dce3a438d8f49a4400566457190f4e2.pdf,,https://doi.org/10.1016/S0031-3203(00)00162-X,http://www.cs.cmu.edu/~hyu/rjpr.pdf
+0431e8a01bae556c0d8b2b431e334f7395dd803a,https://people.cs.umass.edu/~smaji/papers/localized-wacv15.pdf,,,http://vision.cs.utexas.edu/hmcv2014/wah_etal_hmcv2014.pdf
+04616814f1aabe3799f8ab67101fbaf9fd115ae4,http://pdfs.semanticscholar.org/0461/6814f1aabe3799f8ab67101fbaf9fd115ae4.pdf,,,http://hal.inria.fr/docs/00/76/76/99/PDF/thesis.pdf
+04c5268d7a4e3819344825e72167332240a69717,http://longwood.cs.ucf.edu/~vision/papers/cvpr2008/7.pdf,,,http://www.cs.ucf.edu/~vision/papers/cvpr2008/7.pdf
+04c2cda00e5536f4b1508cbd80041e9552880e67,http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf,,https://doi.org/10.1007/978-3-319-10590-1_31,http://tamaraberg.com/papers/hipster_eccv14.pdf
+04644c97784700c449f2c885cb4cab86447f0bd4,http://www.seekdl.org/upload/files/20131209_014911.pdf,,,
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Sequential_Face_Alignment_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.194
+047ce307ad0c871bc2c9a5c1e4649cefae2ba50d,,,https://doi.org/10.1109/ICRA.2012.6224587,
+046a694bbb3669f2ff705c6c706ca3af95db798c,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Xiong_Conditional_Convolutional_Neural_ICCV_2015_paper.pdf,,,http://openaccess.thecvf.com/content_iccv_2015/papers/Xiong_Conditional_Convolutional_Neural_ICCV_2015_paper.pdf
+047d7cf4301cae3d318468fe03a1c4ce43b086ed,http://webee.technion.ac.il/~yoav/publications/Delforge_taslp14R2.pdf,,https://doi.org/10.1109/TASLP.2015.2405475,http://arxiv.org/pdf/1408.2700v4.pdf
+045275adac94cced8a898a815293700401e9955f,,,https://doi.org/10.1007/s00138-012-0447-z,
+046865a5f822346c77e2865668ec014ec3282033,http://www.csie.ntu.edu.tw/~winston/papers/chen12discovering.pdf,,,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/fp060-chen.pdf
+042825549296ea419d95fcf0b5e71f72070a5f0d,http://eprints.pascal-network.org/archive/00008397/01/paper.pdf,,https://doi.org/10.1007/s11263-011-0447-x,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01053.pdf
+0470b0ab569fac5bbe385fa5565036739d4c37f8,https://hal.inria.fr/inria-00321048/file/verbeek08cvpr.pdf,,,http://lear.inrialpes.fr/pubs/2008/GMVS08/GMVS08.pdf
+6a3a07deadcaaab42a0689fbe5879b5dfc3ede52,http://pdfs.semanticscholar.org/6a3a/07deadcaaab42a0689fbe5879b5dfc3ede52.pdf,,,http://arxiv.org/abs/1704.04081
+6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d,http://pdfs.semanticscholar.org/6a67/e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d.pdf,,,https://arxiv.org/pdf/1705.05512v1.pdf
+6a3fa483c64e72d9c96663ff031446a2bdb6b2eb,,,https://doi.org/10.1016/j.patcog.2017.02.003,
+6afed8dc29bc568b58778f066dc44146cad5366c,http://pdfs.semanticscholar.org/6afe/d8dc29bc568b58778f066dc44146cad5366c.pdf,,,http://eprints.pascal-network.org/archive/00000376/01/slcv04.pdf
+6a184f111d26787703f05ce1507eef5705fdda83,http://pdfs.semanticscholar.org/6a18/4f111d26787703f05ce1507eef5705fdda83.pdf,,,http://centaur.reading.ac.uk/66125/1/1-s2.0-S1878929315301067-main.pdf
+6a16b91b2db0a3164f62bfd956530a4206b23fea,http://pdfs.semanticscholar.org/6a16/b91b2db0a3164f62bfd956530a4206b23fea.pdf,,,https://www.cpe.ku.ac.th/~jeab/papers/chinnawat_JCSSE2009.pdf
+6a806978ca5cd593d0ccd8b3711b6ef2a163d810,http://pdfs.semanticscholar.org/6a80/6978ca5cd593d0ccd8b3711b6ef2a163d810.pdf,,https://doi.org/10.1007/978-3-642-23687-7_45,http://www.isir.upmc.fr/files/2011ACTI2001.pdf
+6a38e4bb35673a73f041e34d3f2db7067482a9b5,,,,http://doi.acm.org/10.1145/2663204.2666277
+6a8a3c604591e7dd4346611c14dbef0c8ce9ba54,http://pdfs.semanticscholar.org/6a8a/3c604591e7dd4346611c14dbef0c8ce9ba54.pdf,,,http://www.science.uva.nl/research/publications/2010/DibekliogluEISWMI2010/report03.pdf
+6aa43f673cc42ed2fa351cbc188408b724cb8d50,http://pdfs.semanticscholar.org/6aa4/3f673cc42ed2fa351cbc188408b724cb8d50.pdf,,,http://arxiv.org/abs/1712.09915
+6a2b83c4ae18651f1a3496e48a35b0cd7a2196df,http://openaccess.thecvf.com/content_iccv_2015/papers/Song_Top_Rank_Supervised_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.223
+6afe1f668eea8dfdd43f0780634073ed4545af23,,,https://doi.org/10.1007/s11042-017-4962-9,
+6a5fe819d2b72b6ca6565a0de117c2b3be448b02,http://pdfs.semanticscholar.org/6a5f/e819d2b72b6ca6565a0de117c2b3be448b02.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI13/paper/download/6471/7181
+6afeb764ee97fbdedfa8f66810dfc22feae3fa1f,http://pdfs.semanticscholar.org/928c/dc2049462f66460dc30aef5aaaa15e427d12.pdf,,,http://jmlr.org/proceedings/papers/v32/zhao14.html
+6a527eeb0b2480109fe987ed7eb671e0d847fca8,,,https://doi.org/10.1007/978-3-319-28515-3,
+6aa61d28750629febe257d1cb69379e14c66c67f,http://pdfs.semanticscholar.org/6aa6/1d28750629febe257d1cb69379e14c66c67f.pdf,,,http://www.kyb.tuebingen.mpg.de/publications/pdfs/pdf2302.pdf
+6adecb82edbf84a0097ff623428f4f1936e31de0,,,https://doi.org/10.1007/s11760-011-0246-4,
+6ae96f68187f1cdb9472104b5431ec66f4b2470f,http://pdfs.semanticscholar.org/6ae9/6f68187f1cdb9472104b5431ec66f4b2470f.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1158&context=hsshonors
+6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8,http://pdfs.semanticscholar.org/6af6/5e2a1eba6bd62843e7bf717b4ccc91bce2b8.pdf,,https://doi.org/10.1007/978-3-642-40705-5_10,http://www.researchgate.net/profile/Hefeng_Yin/publication/259005850_A_New_Weighted_Sparse_Representation_Based_on_MSLBP_and_Its_Application_to_Face_Recognition/links/02e7e529b45ff5ab4a000000.pdf
+6a657995b02bc9dee130701138ea45183c18f4ae,http://pdfs.semanticscholar.org/6a65/7995b02bc9dee130701138ea45183c18f4ae.pdf,,https://doi.org/10.1142/S021969130400041X,http://www.pitt.edu/~emotion/fulltext/2004/Cohn_Timing.pdf
+6a0368b4e132f4aa3bbdeada8d894396f201358a,http://pdfs.semanticscholar.org/6a03/68b4e132f4aa3bbdeada8d894396f201358a.pdf,,https://doi.org/10.1007/978-3-642-37331-2_19,http://mc.eistar.net/UpLoadFiles/Papers/%5B33%5D%202012%20ACCV%20Wangxinggang.pdf
+6afccf6c6cebfaa0579a23e7cc7737837b090f0f,,,,
+6ab33fa51467595f18a7a22f1d356323876f8262,http://www.iis.sinica.edu.tw/~kuangyu/OHRank_files/0523.pdf,,,http://www.iis.sinica.edu.tw/papers/song/12038-F.pdf
+6ae75eaa7e9f1379338eae94fbb43664bb3c898a,https://www.researchgate.net/profile/Beom_Seok_Oh/publication/254016039_Fusion_of_structured_projections_for_cancelable_face_identity_verification/links/559156c108ae15962d8e145e.pdf?origin=publication_detail,,https://doi.org/10.1109/IJCB.2011.6117588,
+6a26893ed63830d00f6d011679d1b1ed2d8466a9,,,,
+6aefe7460e1540438ffa63f7757c4750c844764d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Nascimento_Non-rigid_Segmentation_using_2014_CVPR_paper.pdf,,,http://cs.adelaide.edu.au/~carneiro/publications/CVPR2014_TPS.pdf
+6aa0a47f4b986870370c622be51f00f3a1b9d364,,,https://doi.org/10.1109/TIP.2012.2192285,
+6ab8f2081b1420a6214a6c127e5828c14979d414,http://pdfs.semanticscholar.org/6ab8/f2081b1420a6214a6c127e5828c14979d414.pdf,,,http://face.cs.kit.edu/download/publications/Analysis_of_Local_Appearance_Biometrics_Ekenel_H_2.pdf
+6a38c575733b0f7118970238e8f9b480522a2dbc,http://pdfs.semanticscholar.org/fbee/265a61fd5ec15a6ed8f490a8fd8d3359506e.pdf,,,https://arxiv.org/pdf/1412.5083v3.pdf
+6a4ebd91c4d380e21da0efb2dee276897f56467a,http://ibug.doc.ic.ac.uk/media/uploads/documents/07025044.pdf,,https://doi.org/10.1109/ICIP.2014.7025044,https://ibug.doc.ic.ac.uk/media/uploads/documents/antonakos2014hog.pdf
+6ad5ac867c5ca56e0edaece153269d989b383b59,,,https://doi.org/10.1109/CISP-BMEI.2016.7852723,
+6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2,http://pdfs.semanticscholar.org/6a1b/eb34a2dfcdf36ae3c16811f1aef6e64abff2.pdf,,,http://psych.nyu.edu/vanbavel/lab/documents/Park.etal.2012.Emotion.pdf
+6a7e464464f70afea78552c8386f4d2763ea1d9c,http://pdfs.semanticscholar.org/6a7e/464464f70afea78552c8386f4d2763ea1d9c.pdf,,,http://inpressco.com/wp-content/uploads/2014/06/Paper1361901-1907.pdf
+32925200665a1bbb4fc8131cd192cb34c2d7d9e3,http://pdfs.semanticscholar.org/3292/5200665a1bbb4fc8131cd192cb34c2d7d9e3.pdf,,,http://www.mva-org.jp/Proceedings/2009CD/papers/03-09.pdf
+322c063e97cd26f75191ae908f09a41c534eba90,https://jurie.users.greyc.fr/papers/12_SEMATR_IJCV.pdf,,https://doi.org/10.1007/s11263-012-0529-4,https://hal.archives-ouvertes.fr/hal-00805996/document
+325b048ecd5b4d14dce32f92bff093cd744aa7f8,http://pdfs.semanticscholar.org/325b/048ecd5b4d14dce32f92bff093cd744aa7f8.pdf,,,http://chenlab.ece.cornell.edu/people/Andy/publications/Andy_files/2670CVPR08Gallagher.pdf
+32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347,https://arxiv.org/pdf/1410.1606v2.pdf,,https://doi.org/10.1109/ICASSP.2015.7178684,http://arxiv.org/abs/1410.1606
+32d8e555441c47fc27249940991f80502cb70bd5,https://arxiv.org/pdf/1709.07886v1.pdf,,,http://doi.acm.org/10.1145/3133956.3134077
+32d555faaaa0a6f6f9dfc9263e4dba75a38c3193,http://pdfs.semanticscholar.org/e119/eeee5025235c6f8dacc7c1812c0c52d595b9.pdf,,https://doi.org/10.1016/j.patcog.2015.09.024,http://www.comp.hkbu.edu.hk/~ymc/papers/journal/PR_5528_publication_version.pdf
+324f39fb5673ec2296d90142cf9a909e595d82cf,http://pdfs.semanticscholar.org/324f/39fb5673ec2296d90142cf9a909e595d82cf.pdf,,,http://www.maths.tcd.ie/EMIS/journals/HOA/MPE/Volume2011/864540.pdf
+321db1059032b828b223ca30f3304257f0c41e4c,,,https://doi.org/10.1109/ICACCI.2015.7275951,
+321bd4d5d80abb1bae675a48583f872af3919172,http://pdfs.semanticscholar.org/321b/d4d5d80abb1bae675a48583f872af3919172.pdf,,https://doi.org/10.1186/s13640-016-0152-3,https://www.springeropen.com/track/pdf/10.1186/s13640-016-0152-3?site=jivp-eurasipjournals.springeropen.com
+32b8c9fd4e3f44c371960eb0074b42515f318ee7,http://pdfs.semanticscholar.org/32b8/c9fd4e3f44c371960eb0074b42515f318ee7.pdf,,,https://arxiv.org/pdf/1707.00823v1.pdf
+32575ffa69d85bbc6aef5b21d73e809b37bf376d,http://www.sce.carleton.ca/faculty/adler//publications/2006/youmaran-adler-bcc2006-quality.pdf,,,http://www.sce.carleton.ca/faculty/adler/publications/2006/youmaran-adler-bcc2006-quality.pdf
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,http://pdfs.semanticscholar.org/32ec/bbd76fdce249f9109594eee2d52a1cafdfc7.pdf,,,http://arxiv.org/pdf/1609.01366v1.pdf
+32b76220ed3a76310e3be72dab4e7d2db34aa490,,,https://doi.org/10.1109/SMC.2014.6974364,
+32bab8fe6db08c9d1e906be8a9c7e8cf7a0f0b99,,,,http://doi.ieeecomputersociety.org/10.1109/CIS.2007.196
+32c20afb5c91ed7cdbafb76408c3a62b38dd9160,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Hassner_Viewing_Real-World_Faces_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.448
+327ae6742cca4a6a684a632b0d160dd84d0d8632,,,https://doi.org/10.1007/s10851-015-0629-1,
+32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b,http://pdfs.semanticscholar.org/32a4/0c43a9bc1f1c1ed10be3b9f10609d7e0cb6b.pdf,,https://doi.org/10.1007/978-3-642-15552-9_23,http://vipl.ict.ac.cn/sites/default/files/papers/files/2010_ECCV_hhan_Lighting%20Aware%20Preprocessing%20for%20Face%20Recognition%20across%20Varying%20Illumination.pdf
+32c5c65db2af9691f8bb749c953c978959329f8f,,,https://doi.org/10.1109/ICIP.2015.7351469,
+329394480fc5e9e96de4250cc1a2b060c3677c94,https://arxiv.org/pdf/1604.08826v1.pdf,,,http://arxiv.org/abs/1604.08826
+326613b5528b7806d6a06f43211800b54f34965e,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/377.pdf,,https://doi.org/10.1109/ICME.2007.4284848,http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587717
+322488c4000c686e9bfb7514ccdeacae33e53358,,,,http://doi.acm.org/10.1145/2671188.2749301
+32728e1eb1da13686b69cc0bd7cce55a5c963cdd,http://pdfs.semanticscholar.org/3272/8e1eb1da13686b69cc0bd7cce55a5c963cdd.pdf,,,http://jist.ir/WebUsers/jist/UploadFiles/OK/13951025102194610-F.pdf
+32c9ebd2685f522821eddfc19c7c91fd6b3caf22,http://pdfs.semanticscholar.org/32c9/ebd2685f522821eddfc19c7c91fd6b3caf22.pdf,,https://doi.org/10.1007/978-3-642-33715-4_24,http://mx.nthu.edu.tw/~tsunghan/papers/conference%20papers/Finding%20Correspondence%20from%20Multiple%20Images%20via%20Sparse%20and%20Low-Rank%20Decomposition.pdf
+322b7a4ce006e4d14748dd064e80ffba573ebcd7,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_ROMAN.pdf,,https://doi.org/10.1109/ROMAN.2008.4600644,
+3270b2672077cc345f188500902eaf7809799466,http://pdfs.semanticscholar.org/3270/b2672077cc345f188500902eaf7809799466.pdf,,,http://biometrics.cse.msu.edu/Publications/Thesis/Reserved/KarthikNandakumar_MultibiometricSystems_PhD08.pdf
+32dfd4545c87d9820cc92ca912c7d490794a81d6,,,https://doi.org/10.1007/978-3-319-50551-0,
+32adde2e33f4344900829c557c8533f8f0979f10,,,,
+321c8ba38db118d8b02c0ba209be709e6792a2c7,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Yan_Learn_to_Combine_2013_ICCV_paper.pdf,,,
+324b9369a1457213ec7a5a12fe77c0ee9aef1ad4,http://research.nvidia.com/sites/default/files/pubs/2017-07_Dynamic-Facial-Analysis/rnnface.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.167
+328da943e22adef5957c08b6909bda09d931a350,,,https://doi.org/10.1109/ICARCV.2008.4795605,
+3288e16c62a215254e2ed7c39675482b356c3bef,,,https://doi.org/10.1109/SACI.2016.7507341,
+329b2781007604652deb72139d14315df3bc2771,,,,http://doi.acm.org/10.1145/2671188.2749358
+32a440720ee988b7b41de204b2910775171ee12c,,,https://doi.org/10.1109/ICIP.2011.6116351,
+3251f40ed1113d592c61d2017e67beca66e678bb,,,https://doi.org/10.1007/978-3-319-65172-9_17,
+329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Wu_Leveraging_Intra_and_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.261
+32df63d395b5462a8a4a3c3574ae7916b0cd4d1d,http://www.ppgia.pucpr.br/~alekoe/Papers/ALEKOE-FacialExpression-ICASSP2011.pdf,,https://doi.org/10.1109/ICASSP.2011.5946775,http://mirlab.org/conference_papers/International_Conference/ICASSP%202011/pdfs/0001489.pdf
+35308a3fd49d4f33bdbd35fefee39e39fe6b30b7,https://biblio.ugent.be/publication/7238034/file/7238038.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163123
+353b6c1f431feac6edde12b2dde7e6e702455abd,http://pdfs.semanticscholar.org/8835/c80f8ad8ebd05771a9bce5a8637efbc4c8e3.pdf,,https://doi.org/10.1007/978-3-642-33718-5_59,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/MPCRC_eccv12.pdf
+350da18d8f7455b0e2920bc4ac228764f8fac292,http://pdfs.semanticscholar.org/b1b1/19c94c8bf94da5c9974db537e356e4f80c67.pdf,,,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/Neutralface.pdf
+3538d2b5f7ab393387ce138611ffa325b6400774,http://pdfs.semanticscholar.org/3538/d2b5f7ab393387ce138611ffa325b6400774.pdf,,https://doi.org/10.1109/ICASSP.2003.1202342,http://read.pudn.com/downloads159/doc/fileformat/713634/01202342.pdf
+3519241c9ac13ca43e533844e2d3644d162dde22,,,,
+3504907a2e3c81d78e9dfe71c93ac145b1318f9c,https://arxiv.org/pdf/1605.02686v3.pdf,,https://doi.org/10.1007/s11263-017-1029-3,http://arxiv.org/pdf/1605.02686v2.pdf
+35f03f5cbcc21a9c36c84e858eeb15c5d6722309,http://www.ee.columbia.edu/ln/dvmm/publications/16/ACMMMVP_VAH_2016.pdf,,,http://doi.acm.org/10.1145/2964284.2970929
+35e4b6c20756cd6388a3c0012b58acee14ffa604,http://pdfs.semanticscholar.org/35e4/b6c20756cd6388a3c0012b58acee14ffa604.pdf,,https://doi.org/10.1007/978-3-642-33275-3_9,https://acceda.ulpgc.es:8443/bitstream/10553/15085/5/C082_LNCS_CIARP12_postprint.pdf
+356b431d4f7a2a0a38cf971c84568207dcdbf189,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf,,https://doi.org/10.1109/CVPR.2015.7298768,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1B_054_ext.pdf
+356a144d2aa5cc5e74d178dae3963003871aa8a1,,,https://doi.org/10.1007/978-3-319-27671-7_41,
+359edbaa9cf56857dd5c7c94aaef77003ba8b860,,,https://doi.org/10.1007/978-3-319-02714-2,
+35d90beea6b4dca8d949aae93f86cf53da72971f,,,https://doi.org/10.1109/ICIP.2011.6116672,
+35f921def890210dda4b72247849ad7ba7d35250,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhou_Exemplar-Based_Graph_Matching_2013_ICCV_paper.pdf,,,http://www.ri.cmu.edu/pub_files/2013/12/2013_ICCV_EGM.pdf
+357963a46dfc150670061dbc23da6ba7d6da786e,http://pdfs.semanticscholar.org/3579/63a46dfc150670061dbc23da6ba7d6da786e.pdf,,,https://arxiv.org/pdf/1803.11521v1.pdf
+35ccc836df60cd99c731412fe44156c7fd057b99,,,https://doi.org/10.1109/ICCIS.2017.8274819,
+35ec9b8811f2d755c7ad377bdc29741b55b09356,http://pdfs.semanticscholar.org/35ec/9b8811f2d755c7ad377bdc29741b55b09356.pdf,,,http://ljk.imag.fr/membres/Bill.Triggs/events/iccv03/cdrom/iccv03/0059_romdhani.pdf
+35f1bcff4552632419742bbb6e1927ef5e998eb4,https://arxiv.org/pdf/1703.02521v1.pdf,,,http://cs.stanford.edu/groups/vision/pdf/huang2017cvpr.pdf
+355746e6e1770cfcc2e91479f8134c854a77ff96,,,,
+35c973dba6e1225196566200cfafa150dd231fa8,http://pdfs.semanticscholar.org/8af7/72ea2389b555c0b193624add6a1c5a49ff24.pdf,,https://doi.org/10.1016/j.imavis.2010.12.001,http://research.sabanciuniv.edu/16482/1/cosar_IVC11.pdf
+35a39c7da14b1d288c0f9201374b307f667d63a3,http://media.au.tsinghua.edu.cn/liuyebin_files/TMM.pdf,,https://doi.org/10.1109/TMM.2013.2293064,http://media.au.tsinghua.edu.cn/kaili/TMM2014.pdf
+35f084ddee49072fdb6e0e2e6344ce50c02457ef,https://dash.harvard.edu/bitstream/handle/1/4238979/Lee_Bilinear.pdf?sequence=2,,,http://vcg.seas.harvard.edu/files/pfister/files/iccv05_0.pdf
+352c53e56c52a49d33dcdbec5690c2ba604b07d0,http://www.cs.huji.ac.il/~zweiga/Alons_Zweig_Hompage/Homepage_files/Zweig_ICCV7.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2007.4409064
+3598d10d7d4f2b543afa8bcf6b2c34a3696ef155,,,https://doi.org/10.1109/SPAC.2017.8304347,
+3505c9b0a9631539e34663310aefe9b05ac02727,https://ibug.doc.ic.ac.uk/media/uploads/documents/pid4666647.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.24
+3506518d616343d3083f4fe257a5ee36b376b9e1,http://disi.unitn.it/~zen/data/icmi14_personalized.pdf,,,http://doi.acm.org/10.1145/2663204.2663247
+359b4a4c6cb58c8ab5e8eaaed0e8562c8c43a0f9,,,https://doi.org/10.1007/s10044-014-0377-7,
+353a89c277cca3e3e4e8c6a199ae3442cdad59b5,http://pdfs.semanticscholar.org/353a/89c277cca3e3e4e8c6a199ae3442cdad59b5.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/16629/Sharma_umd_0117E_16103.pdf?isAllowed=y&sequence=1
+35e87e06cf19908855a16ede8c79a0d3d7687b5c,http://pdfs.semanticscholar.org/35e8/7e06cf19908855a16ede8c79a0d3d7687b5c.pdf,,,http://manzaramesh.in/Publication/All%20Research%20Paper/Strategies%20for%20Multi-View%20Face%20Recognition%20for%20Identification%20of%20Human%20Faces-%20A%20Review_Pritesh%20G%20Shah.pdf
+352110778d2cc2e7110f0bf773398812fd905eb1,http://www.ca.cs.cmu.edu/sites/default/files/complete_14.pdf,,,http://humansensing.cs.cmu.edu/papers/complete_14.pdf
+35d272877b178aa97c678e3fcbb619ff512af4c2,,,https://doi.org/10.1109/SMC.2017.8122743,
+35b3dc0e961a15a7a60b95490a989f91680acc7c,,,,http://doi.ieeecomputersociety.org/10.1109/TDSC.2016.2550459
+351158e4481e3197bd63acdafd73a5df8336143b,http://pdfs.semanticscholar.org/3511/58e4481e3197bd63acdafd73a5df8336143b.pdf,,,http://www.www2015.it/documents/proceedings/companion/p893.pdf
+359e8703fd6ca8172a645c5b5a45b1d2b30b1d14,,,,
+35d42f4e7a1d898bc8e2d052c38e1106f3e80188,,,https://doi.org/10.1109/BTAS.2015.7358765,
+35490b021dcdec12882870a31dce9a687205ab5c,http://www.ecse.rpi.edu/homepages/qji/Papers/BN_learning_CVPR08.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/028.pdf
+35683a325c4fa02e9335dccbca9b67e2b55b87ec,,,,
+69a9cf9bc8e585782824666fa3fb5ce5cf07cef2,,,https://doi.org/10.1007/s11390-017-1738-7,
+699b8250fb93b3fa64b2fc8f59fef036e172564d,,,https://doi.org/10.1109/ICMLA.2016.0147,
+697b0b9630213ca08a1ae1d459fabc13325bdcbb,http://pdfs.semanticscholar.org/697b/0b9630213ca08a1ae1d459fabc13325bdcbb.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper029/index.html
+69064c7b349bf6e7f4a802f4fd0da676c1bd1d8b,,,https://doi.org/10.1016/j.patcog.2014.06.016,
+69ff40fd5ce7c3e6db95a2b63d763edd8db3a102,http://pdfs.semanticscholar.org/69ff/40fd5ce7c3e6db95a2b63d763edd8db3a102.pdf,,,http://vision.gyte.edu.tr/publications/2012/merve_HumanAgeEstimationviaGeometricandTexturalFeatures.pdf
+69ba86f7aac7b7be0ac41d990f5cd38400158f96,,,https://doi.org/10.1109/TNNLS.2015.2504724,
+69d29012d17cdf0a2e59546ccbbe46fa49afcd68,https://arxiv.org/pdf/1404.6818v1.pdf,,https://doi.org/10.1109/ISIT.2014.6875384,https://www.nari.ee.ethz.ch/commth/pubs/files/isit2014_dim_red.pdf
+6909cd34a1eceba2140e2c02a842cefcecf33645,,,,
+69a68f9cf874c69e2232f47808016c2736b90c35,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Huang_Learning_Deep_Representation_CVPR_2016_paper.pdf
+69ad67e204fb3763d4c222a6c3d05d6725b638ed,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890538
+69de532d93ad8099f4d4902c4cad28db958adfea,http://pdfs.semanticscholar.org/e6bc/c30d2be78797e0e2506567bc0f09b8eae21a.pdf,,,https://arxiv.org/pdf/1711.07246v2.pdf
+694bdadb720d4237b701a5c8c10417843ed89c6f,,,,
+69b18d62330711bfd7f01a45f97aaec71e9ea6a5,http://pdfs.semanticscholar.org/69b1/8d62330711bfd7f01a45f97aaec71e9ea6a5.pdf,,https://doi.org/10.1371/journal.pcbi.1005115,
+69526cdf6abbfc4bcd39616acde544568326d856,http://speech.iiit.ac.in/svlpubs/article/SaoA.K.Yegna2007.pdf,,https://doi.org/10.1109/TIFS.2007.902920,
+69b2a7533e38c2c8c9a0891a728abb423ad2c7e7,,,https://doi.org/10.1016/j.imavis.2013.03.003,
+690d669115ad6fabd53e0562de95e35f1078dfbb,http://pdfs.semanticscholar.org/690d/669115ad6fabd53e0562de95e35f1078dfbb.pdf,,,"http://www.ece.rice.edu/~av21/Documents/2011/Progressive%20versus%20Random%20Projections%20for%20Compressive%20Capture%20of%20Images,.pdf"
+69eb6c91788e7c359ddd3500d01fb73433ce2e65,http://pdfs.semanticscholar.org/69eb/6c91788e7c359ddd3500d01fb73433ce2e65.pdf,,,https://www.cc.gatech.edu/projects/up/publications/CAMGRAPH.pdf
+69063f7e0a60ad6ce16a877bc8f11b59e5f7348e,http://openaccess.thecvf.com/content_iccv_2015/papers/Anwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Anwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.pdf
+69a9da55bd20ce4b83e1680fbc6be2c976067631,http://pdfs.semanticscholar.org/a9b4/d257d16e876302e3318ade42fcb2ab9ffdf9.pdf,,https://doi.org/10.5244/C.25.22,http://eprints.pascal-network.org/archive/00008324/01/marin11bmvc.pdf
+69c2ac04693d53251500557316c854a625af84ee,http://pdfs.semanticscholar.org/dc97/ceb1faf945e780a92be651b022a82e3bff5a.pdf,,https://doi.org/10.1016/j.patrec.2015.12.013,http://www.cse.msu.edu/~rossarun/pubs/JainNandakumarRoss_50Years_PRL2016.pdf
+6974449ce544dc208b8cc88b606b03d95c8fd368,https://ibug.doc.ic.ac.uk/media/uploads/documents/martinezvalstar-pami_final.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.205
+69a77cb816a31c65699cd11c4a3b1b82ae44e903,,,,
+69fb98e11df56b5d7ec7d45442af274889e4be52,http://pdfs.semanticscholar.org/69fb/98e11df56b5d7ec7d45442af274889e4be52.pdf,,,http://arxiv.org/pdf/1512.06498v2.pdf
+3c78b642289d6a15b0fb8a7010a1fb829beceee2,http://pdfs.semanticscholar.org/3c78/b642289d6a15b0fb8a7010a1fb829beceee2.pdf,,https://doi.org/10.4304/jmm.1.6.10-21,http://www.cs.bris.ac.uk/Publications/Papers/2000698.pdf
+3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3,,,https://doi.org/10.1016/j.imavis.2015.06.009,
+3c1b73509cc09200e96ab9cfb28ebfd9d1d6aa9a,,,https://doi.org/10.1109/LSP.2016.2639341,
+3cc3cf57326eceb5f20a02aefae17108e8c8ab57,http://pdfs.semanticscholar.org/3cc3/cf57326eceb5f20a02aefae17108e8c8ab57.pdf,,,http://vision.ece.ucsb.edu/publications/gelasca_BIOW08.pdf
+3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171,http://research.microsoft.com/en-us/um/people/luyuan/paper/skyfinder_siggraph09.pdf,,,http://research.microsoft.com/en-us/um/people/jiansun/papers/skyfinder_siggraph09.pdf
+3c63fa505a44902f13698ec10d7f259b1d0878ee,http://www.ece.ucr.edu/~amitrc/publications/TMM2015.pdf,,https://doi.org/10.1109/TMM.2015.2477242,https://pdfs.semanticscholar.org/64e7/df0652f4c47482fd7ef49f011b7188d441fa.pdf
+3cb488a3b71f221a8616716a1fc2b951dd0de549,http://cse.seu.edu.cn/people/xgeng/LDL/resource/icpr14.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.764
+3cfbe1f100619a932ba7e2f068cd4c41505c9f58,http://pdfs.semanticscholar.org/3cfb/e1f100619a932ba7e2f068cd4c41505c9f58.pdf,,https://doi.org/10.1007/978-3-642-20217-9_2,https://www.researchgate.net/profile/Rodrigo_Verschae/publication/220797244_A_Realistic_Simulation_Tool_for_Testing_Face_Recognition_Systems_under_Real-World_Conditions/links/00b7d5144e5c428da2000000.pdf
+3c03d95084ccbe7bf44b6d54151625c68f6e74d0,http://pdfs.semanticscholar.org/3c03/d95084ccbe7bf44b6d54151625c68f6e74d0.pdf,,https://doi.org/10.1016/j.patrec.2010.12.001,http://www.cbsr.ia.ac.cn/users/zlei/papers/ZLEI-CCLDA-PRL-11.pdf
+3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0,http://pdfs.semanticscholar.org/73cc/fdedbd7d72a147925727ba1932f9488cfde3.pdf,,,https://arxiv.org/pdf/1609.00408v1.pdf
+3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3,http://pdfs.semanticscholar.org/51f7/3cfcc6d671bd99b5c3c512ff9b7bb959f33b.pdf,,,http://dl.acm.org/citation.cfm?id=2188386
+3c97c32ff575989ef2869f86d89c63005fc11ba9,http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.82
+3c7825dcf5a027bd07eb0fe4cce23910b89cf050,,,,http://doi.acm.org/10.1145/2987378
+3c4106f2c670362f620b33ad7715ab6fd3eb2458,,,,
+3c086601ce0bac61047b5b931b253bd4035e1e7a,,,https://doi.org/10.1109/ICIP.2015.7350897,
+3cbd3124b1b4f95fcdf53abd358d7ceec7861dda,,,,http://doi.acm.org/10.1145/3019612.3019641
+3c47022955c3274250630b042b53d3de2df8eeda,http://research.microsoft.com/en-us/um/people/leizhang/paper/cvpr05-shuicheng-discriminant.pdf,,,http://mmlab.ie.cuhk.edu.hk/2005/01467312.pdf
+3cd5b1d71c1d6a50fcc986589f2d0026c68d9803,http://www.openu.ac.il/home/hassner/projects/siftscales/OnSiftsAndTheirScales-CVPR12.pdf,,,http://webee.technion.ac.il/people/lihi/Publications/12-CVPR-OnSiftsAndTheirScales.pdf
+3ce2ecf3d6ace8d80303daf67345be6ec33b3a93,http://pdfs.semanticscholar.org/3ce2/ecf3d6ace8d80303daf67345be6ec33b3a93.pdf,,https://doi.org/10.1016/j.ijar.2007.02.003,http://tcts.fpms.ac.be/~couvreur/papers/ijar2007_paper.pdf
+3c09d15b3e78f38618b60388ec9402e616fc6f8e,,,https://doi.org/10.1109/IJCNN.2010.5596793,
+3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8,http://pdfs.semanticscholar.org/3c37/4cb8e730b64dacb9fbf6eb67f5987c7de3c8.pdf,,,http://ceur-ws.org/Vol-693/paper2.pdf
+3c0bbfe664fb083644301c67c04a7f1331d9515f,http://pdfs.semanticscholar.org/3c0b/bfe664fb083644301c67c04a7f1331d9515f.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w24/dibeklioglu.pdf
+3ce96f03874d42345c0727edc78b6949b20b4a11,,,https://doi.org/10.1007/s11042-015-2630-5,
+3cc3e01ac1369a0d1aa88fedda61d3c99a98b890,http://mi.eng.cam.ac.uk/~bdrs2/papers/mita_pami08.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70767
+3c4f6d24b55b1fd3c5b85c70308d544faef3f69a,http://pdfs.semanticscholar.org/3c4f/6d24b55b1fd3c5b85c70308d544faef3f69a.pdf,,,http://arxiv.org/abs/1703.02952
+3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f,http://pdfs.semanticscholar.org/3cb0/ef5aabc7eb4dd8d32a129cb12b3081ef264f.pdf,,,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/AMFG03.pdf
+3cb64217ca2127445270000141cfa2959c84d9e7,http://staff.estem-uc.edu.au/roland/files/2009/05/Joshi_Goecke_Parker_Breakspear_FG2013_CanBodyExpressionsContributeToAutomaticDepressionAnalysis.pdf,,,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553796.pdf
+3c2b6282811c3077b7807d84068e6a879d163854,,,,
+3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd,http://pdfs.semanticscholar.org/3c11/a1f2bd4b9ce70f699fb6ad6398171a8ad3bd.pdf,,,http://www.mirlabs.org/ijcisim/regular_papers_2009/IJCISIM_Vol_2_Paper_26.pdf
+3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c,,,https://doi.org/10.1109/ICIP.2014.7025145,
+3cd8ab6bb4b038454861a36d5396f4787a21cc68,http://pdfs.semanticscholar.org/3cd8/ab6bb4b038454861a36d5396f4787a21cc68.pdf,,,http://journal.iis.sinica.edu.tw/paper/1/140379-3.pdf?cd=12E58A4F52B293826
+3cc2d6ace4cf0bc3a6c4df5ca8da892275ca201f,,,,
+3c6542295cf7fe362d7d629ac10670bf30cdabce,,,https://doi.org/10.1109/DICTA.2015.7371264,
+3ce37af3ac0ed2eba08267a3605730b2e0433da5,,,https://doi.org/10.1109/TIP.2016.2609811,
+3cd5da596060819e2b156e8b3a28331ef633036b,http://pdfs.semanticscholar.org/3cd5/da596060819e2b156e8b3a28331ef633036b.pdf,,,https://www.sciencedirect.com/science/article/pii/S0042698915001662
+3ca5d3b8f5f071148cb50f22955fd8c1c1992719,http://pdfs.semanticscholar.org/3ca5/d3b8f5f071148cb50f22955fd8c1c1992719.pdf,,,https://arxiv.org/pdf/1707.02353v1.pdf
+3cd22b5b81a0172d608ff14be71b755d1f68c201,,,https://doi.org/10.1109/ACCESS.2018.2812725,
+3cc2a2eaaacbf96c6b9abc1cf91bfefabf6fcfdd,,,https://doi.org/10.1109/TCSVT.2014.2317887,
+3cc46bf79fb9225cf308815c7d41c8dd5625cc29,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2016/Pantraki2016.pdf,,https://doi.org/10.1109/IWBF.2016.7449694,
+3ca6adc90aae5912baa376863807191ffd56b34e,,,https://doi.org/10.1109/LSP.2014.2316918,
+3c8da376576938160cbed956ece838682fa50e9f,http://shodhganga.inflibnet.ac.in/bitstream/10603/49167/11/11_chapter%204.pdf,,https://doi.org/10.1109/BTAS.2014.6996266,
+56e4dead93a63490e6c8402a3c7adc493c230da5,http://pdfs.semanticscholar.org/56e4/dead93a63490e6c8402a3c7adc493c230da5.pdf,,,http://www.hrpub.org/download/201309/wjcat.2013.010204.pdf
+56e885b9094391f7d55023a71a09822b38b26447,http://pdfs.semanticscholar.org/56e8/85b9094391f7d55023a71a09822b38b26447.pdf,,,https://arxiv.org/pdf/1709.06508v1.pdf
+56c700693b63e3da3b985777da6d9256e2e0dc21,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_079.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1A_079_ext.pdf
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Li_Shape_Driven_Kernel_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_025.pdf
+5642bafa7955b69f05c11230151cd59fcbe43b8e,,,https://doi.org/10.1007/s11760-012-0404-3,
+56e6f472090030a6f172a3e2f46ef9daf6cad757,http://pdfs.semanticscholar.org/56e6/f472090030a6f172a3e2f46ef9daf6cad757.pdf,,,
+56e03f8fcd16332f764352ba6e72c9c5092cac0f,http://www.cs.utexas.edu/~ssi/DHE.pdf,,https://doi.org/10.1109/ICASSP.2010.5495241,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0005586.pdf
+561bbc758f995894f43351b4267abf9748890705,,,,
+564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d,http://bcmi.sjtu.edu.cn/~pengyong/Pub2015/CCECE2015.pdf,,https://doi.org/10.1109/CCECE.2015.7129176,
+56a653fea5c2a7e45246613049fb16b1d204fc96,http://ieeeprojectsmadurai.com/matlab2016base/Quaternion%20Collaborative%20and%20Sparse%20Representation.pdf,,https://doi.org/10.1109/TIP.2016.2567077,
+56f86bef26209c85f2ef66ec23b6803d12ca6cd6,https://arxiv.org/pdf/1710.00307v1.pdf,,,http://arxiv.org/abs/1710.00307
+5666ed763698295e41564efda627767ee55cc943,http://i.cs.hku.hk/~kykwong/publications/zkuang_ijcv15.pdf,,https://doi.org/10.1007/s11263-014-0783-8,
+56fb30b24e7277b47d366ca2c491749eee4d6bb1,,,https://doi.org/10.1109/ICAPR.2015.7050658,
+566a39d753c494f57b4464d6bde61bf3593f7ceb,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W01/papers/Hassner_A_Critical_Review_2013_CVPR_paper.pdf,,,http://www.openu.ac.il//home/hassner/Events/ACTS13/papers/ACTS13Hassner.pdf
+56bcc89fb1e05d21a8b7b880c6b4df79271ceca5,,,https://doi.org/10.1007/s11760-013-0441-6,
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,http://pdfs.semanticscholar.org/56c2/fb2438f32529aec604e6fc3b06a595ddbfcc.pdf,,,http://ceur-ws.org/Vol-1584/paper21.pdf
+56e25358ebfaf8a8b3c7c33ed007e24f026065d0,,,https://doi.org/10.1007/s10994-015-5541-9,
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf,,https://doi.org/10.1007/978-3-319-54187-7_14,http://shuaizhou.me/papers/ACCV2016_age.pdf
+5615d6045301ecbc5be35e46cab711f676aadf3a,https://arxiv.org/pdf/1705.10420v1.pdf,,https://doi.org/10.1007/s11263-017-1030-x,http://arxiv.org/abs/1705.10420
+568ced900cbf7437c9e87b60a17e16f0c1e0c442,,,https://doi.org/10.1109/CCECE.2012.6335026,
+561ae67de137e75e9642ab3512d3749b34484310,http://pdfs.semanticscholar.org/561a/e67de137e75e9642ab3512d3749b34484310.pdf,,,https://arxiv.org/pdf/1801.07637v1.pdf
+568cff415e7e1bebd4769c4a628b90db293c1717,http://pdfs.semanticscholar.org/568c/ff415e7e1bebd4769c4a628b90db293c1717.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/12259
+565590af15af3d02f0b592b2e201e36708e4fe50,,,,
+564035f1b8f06e9bb061255f40e3139fa57ea879,http://pdfs.semanticscholar.org/fcbf/61524a3d775947ea8bcef46d1b0a9cce7bfb.pdf,,https://doi.org/10.4304/jmm.1.6.22-35,http://www.academypublisher.com/jmm/vol01/no06/jmm01062235.pdf
+5613cb13ab381c8a8b81181ac786255705691626,,,https://doi.org/10.1109/VCIP.2015.7457876,
+560e0e58d0059259ddf86fcec1fa7975dee6a868,http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf,,,http://www.wisdom.weizmann.ac.il/~hassner/projects/ytfaces/ytfaces.pdf
+56c0b225fd57cfe173e5206a4bb0ce153bfecc29,http://www.sfu.ca/~wya16/ProfileFG08.pdf,,https://doi.org/10.1109/AFGR.2008.4813370,
+566038a3c2867894a08125efe41ef0a40824a090,http://mirlab.org/conference_papers/international_conference/icassp%202009/pdfs/0001945.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICASSP.2009.4959991
+56fa0872ed73f7acfbfe83677fecb2dbc6eaa2fe,,,https://doi.org/10.1007/s11554-007-0031-3,
+569988e19ab36582d4bd0ec98e344cbacf177f45,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2347960
+56f57786516dcc8ea3c0ffe877c1363bfb9981d2,,,https://doi.org/10.1109/CBMI.2014.6849823,
+56ae6d94fc6097ec4ca861f0daa87941d1c10b70,http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf,,https://doi.org/10.1007/978-3-319-10590-1_21,http://www.vision.caltech.edu/~xpburgos/papers/ECCV14%20Burgos-Artizzu.pdf
+565f7c767e6b150ebda491e04e6b1de759fda2d4,,,https://doi.org/10.1016/j.patcog.2016.11.023,
+56f812661c3248ed28859d3b2b39e033b04ae6ae,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/CIVR08.pdf,,,http://www.ifp.illinois.edu/~cao4/papers/fuyunCIVR08.pdf
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,http://pdfs.semanticscholar.org/bd42/e0a6a1082e8c197a7b0a9b710434cd7c5a47.pdf,,,http://cseweb.ucsd.edu/~gary/pubs/Dailey-CrossCultural.pdf
+5145e42dc46845f3aeb8307452765ba8dc59d2da,http://pdcat13.csie.ntust.edu.tw/download/papers/P10003.pdf,,,
+51c3050fb509ca685de3d9ac2e965f0de1fb21cc,http://www.cs.toronto.edu/~law/publications/CVPR/2014/fantope_regularization.pdf,,,http://www-poleia.lip6.fr/~lawm/publications/CVPR/2014/fantope_regularization.pdf
+516d0d9eb08825809e4618ca73a0697137ebabd5,http://web.engr.oregonstate.edu/~sinisa/talks/cvpr16_multimodal_oral.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.333
+519a724426b5d9ad384d38aaf2a4632d3824f243,http://pdfs.semanticscholar.org/519a/724426b5d9ad384d38aaf2a4632d3824f243.pdf,,https://doi.org/10.5244/C.23.2,http://www.bmva.org/bmvc/2009/Papers/Paper106/Abstract106.pdf
+51c7c5dfda47647aef2797ac3103cf0e108fdfb4,http://pdfs.semanticscholar.org/51c7/c5dfda47647aef2797ac3103cf0e108fdfb4.pdf,,,http://www.cs.utexas.edu/~quark/vision_project/report.pdf
+519f4eb5fe15a25a46f1a49e2632b12a3b18c94d,https://www.cise.ufl.edu/~arunava/papers/pami-abrdf.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.67
+518edcd112991a1717856841c1a03dd94a250090,http://pdfs.semanticscholar.org/518e/dcd112991a1717856841c1a03dd94a250090.pdf,,,http://www.ece.rice.edu/~eld1/pubs/Dyer_MSThesis.pdf
+51f626540860ad75b68206025a45466a6d087aa6,,,https://doi.org/10.1109/ICIP.2017.8296595,
+516f784f145390e22cb4607cb525175ff4c7109b,,,,
+51683eac8bbcd2944f811d9074a74d09d395c7f3,http://pdfs.semanticscholar.org/5168/3eac8bbcd2944f811d9074a74d09d395c7f3.pdf,,,http://www.ri.cmu.edu/pub_files/2017/2/w_chu_robotics_2017.pdf
+51cc78bc719d7ff2956b645e2fb61bab59843d2b,http://pdfs.semanticscholar.org/51cc/78bc719d7ff2956b645e2fb61bab59843d2b.pdf,,https://doi.org/10.1007/11573548_35,https://static.aminer.org/pdf/PDF/000/290/979/implementation_of_embedded_system_for_intelligent_image_recognition_and_processing.pdf
+516a556aa1019052f6a162ca9c1a345f553f7f25,,,,
+511b06c26b0628175c66ab70dd4c1a4c0c19aee9,http://pdfs.semanticscholar.org/511b/06c26b0628175c66ab70dd4c1a4c0c19aee9.pdf,,,http://ijergs.org/files/documents/FACE-8.pdf
+51528cdce7a92835657c0a616c0806594de7513b,http://pdfs.semanticscholar.org/5152/8cdce7a92835657c0a616c0806594de7513b.pdf,,https://doi.org/10.5244/C.29.95,http://www.bmva.org/bmvc/2015/papers/paper095/abstract095.pdf
+514a74aefb0b6a71933013155bcde7308cad2b46,http://pdfs.semanticscholar.org/514a/74aefb0b6a71933013155bcde7308cad2b46.pdf,,,http://www.ece.cmu.edu/research/publications/2007/CMU-ECE-2007-029.pdf
+51b770e6b2af994ffc8793f59b24a9f619033a3a,,,https://doi.org/10.1109/ICDSC.2011.6042899,
+51a8dabe4dae157aeffa5e1790702d31368b9161,http://pdfs.semanticscholar.org/5621/adae20c1bc781a36c43a9ddbe5475ea4b6e8.pdf,,https://doi.org/10.1142/S0218001405004186,http://159.226.42.3/doc/2005/Face%20Recognition%20under%20Generic%20Illumination%20Based%20on%20Harmonic%20Relighting.pdf
+51e87b14f39f44a9f2866d5cc6440e7496ed1298,,,,
+51224ed7519e71346076060092462e3d59ca3ab9,http://www.iis.ee.ic.ac.uk/ComputerVision/docs/pubs/Chao_TM_2014.pdf,,,https://labicvl.github.io/docs/pubs/Chao_TM_2014.pdf
+516f8728ad1d4f9f2701a2b5385f8c8e71b9d356,,,https://doi.org/10.1109/ACCESS.2017.2745903,
+512b4c8f0f3fb23445c0c2dab768bcd848fa8392,http://pdfs.semanticscholar.org/b85d/ac54bfa985137b3b071593b986ac92f32bed.pdf,,,http://up.jiaeee.org/_file_3b41b7edd75fcdb001522eb58ee9714a_4.pdf
+51eba481dac6b229a7490f650dff7b17ce05df73,http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf,,,https://homes.cs.washington.edu/~ali/papers/SituationRecognition.pdf
+51348e24d2199b06273e7b65ae5f3fc764a2efc7,http://pdfs.semanticscholar.org/c4b4/cbc801a4430be5fdd16ae34c68f53f772582.pdf,,,http://arxiv.org/abs/1307.7852
+5173a20304ea7baa6bfe97944a5c7a69ea72530f,http://pdfs.semanticscholar.org/5173/a20304ea7baa6bfe97944a5c7a69ea72530f.pdf,,https://doi.org/10.3390/s131012830,http://www.mdpi.com/1424-8220/13/10/12830/pdf
+51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6,http://pdfs.semanticscholar.org/51ed/4c92cab9336a2ac41fa8e0293c2f5f9bf3b6.pdf,,,"http://cai.type.sk/content/2003/2/a-survey-of-face-detection,-extraction-and-recognition/1217.pdf"
+51f311f724883218bcc511b0403b9a7745b9d40e,https://www.researchgate.net/profile/Xiangwei_Kong/publication/221190737_Biometrics-based_identifiers_for_digital_identity_management/links/00b7d51ca1f2a78c74000000.pdf,,,http://www.cerias.purdue.edu/ssl/techreports-ssl/2009-02.pdf
+5101368f986aa9837fdb3a71cb4299dff6f6325d,,,https://doi.org/10.1109/ICIP.2008.4712155,
+5121f42de7cb9e41f93646e087df82b573b23311,http://pdfs.semanticscholar.org/5121/f42de7cb9e41f93646e087df82b573b23311.pdf,,,https://arxiv.org/pdf/1803.04347v1.pdf
+5180c98815d7034e753a14ef6f54583f115da3aa,,,,http://doi.ieeecomputersociety.org/10.1109/iV.2017.40
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/11-1569042551.pdf,,,http://www.ifp.illinois.edu/~dagli/papers/CVPRSLAM2.pdf
+5160569ca88171d5fa257582d161e9063c8f898d,http://infoscience.epfl.ch/record/83324/files/heusch-AFGR-2006.pdf,,,http://www.idiap.ch/~rodrig/publications/pdf/heusch-idiap-rr-05-76.pdf
+5157dde17a69f12c51186ffc20a0a6c6847f1a29,http://arxiv.org/pdf/1505.04373v2.pdf,,https://doi.org/10.1109/TNNLS.2016.2607757,http://arxiv.org/pdf/1505.04373v1.pdf
+51dc127f29d1bb076d97f515dca4cc42dda3d25b,http://pdfs.semanticscholar.org/7a1d/4a9ef5944217ee19aa642471b4746aaa2576.pdf,,https://doi.org/10.1007/978-3-642-24600-5_24,http://www.cl.cam.ac.uk/~mmam3/pub/ACII2011.pdf
+3d2d439ead6e32877ce40e5568e62dee4a877836,,,,
+3d143cfab13ecd9c485f19d988242e7240660c86,http://pdfs.semanticscholar.org/3d14/3cfab13ecd9c485f19d988242e7240660c86.pdf,,https://doi.org/10.1007/978-3-319-16817-3_14,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/646.pdf
+3d2c932f4f2693a87a0b855048e60f142214f475,,,,http://doi.ieeecomputersociety.org/10.1109/CSE.2014.354
+3d1959048eba5495e765a80c8e0bbd3d65b3d544,,,https://doi.org/10.1016/j.neucom.2016.07.038,
+3dabf7d853769cfc4986aec443cc8b6699136ed0,http://pdfs.semanticscholar.org/3dab/f7d853769cfc4986aec443cc8b6699136ed0.pdf,,https://doi.org/10.1007/978-3-540-70872-8_1,http://tdlc.ucsd.edu/publications/2008-2009/data_mining.pdf
+3db75962857a602cae65f60f202d311eb4627b41,https://pdfs.semanticscholar.org/3db7/5962857a602cae65f60f202d311eb4627b41.pdf,,https://doi.org/10.1109/ICPR.2014.272,http://www.cripac.ia.ac.cn/People/lwang/M-MCG_EN/research/PHH-ICPR14/PHH2014ICPR.pdf
+3daf1191d43e21a8302d98567630b0e2025913b0,http://pdfs.semanticscholar.org/3daf/1191d43e21a8302d98567630b0e2025913b0.pdf,,,https://arxiv.org/pdf/1803.05181v2.pdf
+3d2c89676fcc9d64aaed38718146055152d22b39,,,https://doi.org/10.1109/ACPR.2013.10,
+3d36f941d8ec613bb25e80fb8f4c160c1a2848df,https://arxiv.org/pdf/1502.02410v1.pdf,,https://doi.org/10.1109/TIP.2016.2520368,http://arxiv.org/abs/1502.02410
+3d5a1be4c1595b4805a35414dfb55716e3bf80d8,http://pdfs.semanticscholar.org/9e8e/bf5447fcd5b2ba4cdd53253f0049dacb2985.pdf,,,https://arxiv.org/pdf/1704.00389v2.pdf
+3d62b2f9cef997fc37099305dabff356d39ed477,http://pdfs.semanticscholar.org/3d62/b2f9cef997fc37099305dabff356d39ed477.pdf,,,https://arxiv.org/pdf/1708.02734v1.pdf
+3dc522a6576c3475e4a166377cbbf4ba389c041f,http://pdfs.semanticscholar.org/3dc5/22a6576c3475e4a166377cbbf4ba389c041f.pdf,,,https://arxiv.org/pdf/1707.06642v1.pdf
+3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd,http://pdfs.semanticscholar.org/3dd4/d719b2185f7c7f92cc97f3b5a65990fcd5dd.pdf,,https://doi.org/10.1007/978-3-319-23234-8_54,http://arxiv.org/pdf/1507.03811v1.pdf
+3d4b76fe73ea16400d62d0d776b3f43cc5ecf72b,,,https://doi.org/10.1109/TIFS.2015.2512561,
+3d0ef9bfd08a9252db6acfece3b83f3aa58b4cae,http://perso.telecom-paristech.fr/~chollet/Biblio/Articles/Domaines/BIOMET/Face/Kumar/CoreFaceCVPR04.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2004.1
+3d1f976db6495e2bb654115b939b863d13dd3d05,,,https://doi.org/10.1007/s11042-015-2581-x,
+3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f,http://pdfs.semanticscholar.org/3d1a/6a5fd5915e0efb953ede5af0b23debd1fc7f.pdf,,,"http://paspk.org/wp-content/uploads/proceedings/52,%20No.1/26edde54Bimodal%20Human.pdf"
+3dce3bb30f0c19121a71e0bfe1d418f855cb13ce,,,,
+3d0379688518cc0e8f896e30815d0b5e8452d4cd,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/007.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2008.4562956
+3dfbd17bd9caf7bd1d908ff469dec2b61e8a9548,,,https://doi.org/10.1109/ITSC.2015.252,
+3dda181be266950ba1280b61eb63ac11777029f9,http://pdfs.semanticscholar.org/3dda/181be266950ba1280b61eb63ac11777029f9.pdf,,,https://arxiv.org/pdf/1712.09757v1.pdf
+3de5dc06f5d089dee111e048c7174a834f1363c1,,,,
+3d4d3f70352dc833e454a5756d682f27eca46e5d,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.32
+3d24b386d003bee176a942c26336dbe8f427aadd,https://arxiv.org/pdf/1611.09967v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Li_Sequential_Person_Recognition_CVPR_2017_paper.pdf
+3dcebd4a1d66313dcd043f71162d677761b07a0d,http://cvhci.ira.uka.de/download/publications/2008/siu2008_lbp.pdf,,,http://face.cs.kit.edu/download/publications/siu2008_lbp.pdf
+3d0f9a3031bee4b89fab703ff1f1d6170493dc01,http://pdfs.semanticscholar.org/3d0f/9a3031bee4b89fab703ff1f1d6170493dc01.pdf,,https://doi.org/10.1007/978-3-540-74549-5_17,https://rd.springer.com/content/pdf/10.1007/978-3-540-74549-5_17.pdf
+3d0c21d4780489bd624a74b07e28c16175df6355,http://pdfs.semanticscholar.org/3d0c/21d4780489bd624a74b07e28c16175df6355.pdf,,https://doi.org/10.1007/978-3-319-54427-4_32,http://pesona.mmu.edu.my/~johnsee/research/papers/files/deeporshallow_accvw16.pdf
+3df8cc0384814c3fb05c44e494ced947a7d43f36,http://openaccess.thecvf.com/content_ICCV_2017/papers/Walker_The_Pose_Knows_ICCV_2017_paper.pdf,,,https://arxiv.org/pdf/1705.00053v1.pdf
+3d42e17266475e5d34a32103d879b13de2366561,http://pdfs.semanticscholar.org/7450/7306832bd71884365ed81e1cc7866e47c399.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2000.840645
+3dd906bc0947e56d2b7bf9530b11351bbdff2358,http://pdfs.semanticscholar.org/c57a/070724b48962935ff46ab1384d919e1d1089.pdf,,https://doi.org/10.1016/j.cviu.2016.10.018,http://arxiv.org/pdf/1604.06182v1.pdf
+3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf,,,https://arxiv.org/pdf/1803.03827v1.pdf
+3dbfd2fdbd28e4518e2ae05de8374057307e97b3,http://pdfs.semanticscholar.org/3dbf/d2fdbd28e4518e2ae05de8374057307e97b3.pdf,,https://doi.org/10.1007/978-3-642-29139-5_7,http://www.researchgate.net/profile/Penousal_Machado/publication/232590119_Improving_Face_Detection/links/0fcfd5086be0106729000000.pdf
+3d0b2da6169d38b56c58fe5f13342cf965992ece,,,https://doi.org/10.1109/ICIP.2016.7532909,
+3df7401906ae315e6aef3b4f13126de64b894a54,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/067.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/067.pdf
+3d68cedd80babfbb04ab197a0b69054e3c196cd9,http://www.cim.mcgill.ca/~mrl/pubs/malika/Meghjani09_Masters_Thesis.pdf,,https://doi.org/10.1109/WACV.2009.5403035,http://www.cim.mcgill.edu/~mrl/pubs/malika/Meghjani09_Masters_Thesis.pdf
+3d89f9b4da3d6fb1fdb33dea7592b5992069a096,,,https://doi.org/10.1109/CISP-BMEI.2017.8302003,
+3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a,http://www.cse.msu.edu/~rossarun/BiometricsTextBook/Papers/Security/Teoh_BioHash_PAMI06.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.250
+3d6943f1573f992d6897489b73ec46df983d776c,http://pdfs.semanticscholar.org/757d/223b8db29e4cfba9530c7f942304c78cfee1.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1506&context=dissertations
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,http://pdfs.semanticscholar.org/3d94/8e4813a6856e5b8b54c20e50cc5050e66abe.pdf,,https://doi.org/10.1007/978-3-642-18405-5_8,http://www1.i2r.a-star.edu.sg/~ttng/papers/gao_iwdw10.pdf
+3d94f81cf4c3a7307e1a976dc6cb7bf38068a381,http://faculty.ucmerced.edu/mhyang/papers/tip17_age.pdf,,https://doi.org/10.1109/TIP.2017.2655445,
+3d9db1cacf9c3bb7af57b8112787b59f45927355,http://pdfs.semanticscholar.org/3d9d/b1cacf9c3bb7af57b8112787b59f45927355.pdf,,https://doi.org/10.3389/fict.2016.00011,
+3d9e44d8f8bc2663192c7ce668ccbbb084e466e4,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019505
+3d6f59e0f0e16d01b9c588a53d3b6b3b984e991e,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.117
+582edc19f2b1ab2ac6883426f147196c8306685a,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf,,https://doi.org/10.1007/978-3-319-46454-1_35,http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2016really.pdf
+5859774103306113707db02fe2dd3ac9f91f1b9e,http://www.wisdom.weizmann.ac.il/~shimon/papers/IJCV29_98.pdf,,https://doi.org/10.1023/A:1008088813977,
+5892f8367639e9c1e3cf27fdf6c09bb3247651ed,http://pdfs.semanticscholar.org/5892/f8367639e9c1e3cf27fdf6c09bb3247651ed.pdf,,,http://www.cs.uh.edu/~abagherj/publications/Bagherjeiran07Estimating.pdf
+5810ce61fda464d4de2769bd899e12727bee0382,,,https://doi.org/10.1109/IJCNN.2016.7727484,
+58d43e32660446669ff54f29658961fe8bb6cc72,,,https://doi.org/10.1109/ISBI.2017.7950504,
+583e0d218e1e7aaf9763a5493e7c18c2b8dd7464,,,,http://doi.acm.org/10.1145/2988240.2988243
+5850aab97e1709b45ac26bb7d205e2accc798a87,http://pdfs.semanticscholar.org/5850/aab97e1709b45ac26bb7d205e2accc798a87.pdf,,https://doi.org/10.1016/j.patcog.2015.04.012,http://www.ee.cuhk.edu.hk/~lma/welcome_files/lma_PR_2015.pdf
+58684a925693a0e3e4bb1dd2ebe604885be034d2,,,https://doi.org/10.1109/ICASSP.2008.4517869,
+587f81ae87b42c18c565694c694439c65557d6d5,http://pdfs.semanticscholar.org/aeff/403079022683b233decda556a6aee3225065.pdf,,,http://arxiv.org/abs/1701.01876
+580054294ca761500ada71f7d5a78acb0e622f19,http://www.jdl.ac.cn/project/faceId/paperreading/Paper/hhan_20090305_TIP2008_FaceRelighting.pdf,,https://doi.org/10.1109/TIP.2008.925390,
+58778fafdc43f5d5b973c57843b13c6d2f05cf68,,,,
+58483028445bf6b2d1ad6e4b1382939587513fe1,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247763
+587c48ec417be8b0334fa39075b3bfd66cc29dbe,http://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf,,,https://whitneylab.berkeley.edu/PDFs/Xia_JOV.pdf
+58081cb20d397ce80f638d38ed80b3384af76869,http://pdfs.semanticscholar.org/5808/1cb20d397ce80f638d38ed80b3384af76869.pdf,,,https://arxiv.org/pdf/1711.11200v1.pdf
+581e920ddb6ecfc2a313a3aa6fed3d933b917ab0,http://pdfs.semanticscholar.org/581e/920ddb6ecfc2a313a3aa6fed3d933b917ab0.pdf,,,http://www.ti.uni-tuebingen.de/uploads/tx_timitarbeiter/etel2017-classroom_camera-ready_01.pdf
+58fa85ed57e661df93ca4cdb27d210afe5d2cdcd,http://www.dgcv.nii.ac.jp/Publications/Papers/2016/ICPR2016a.pdf,,https://doi.org/10.1109/ICPR.2016.7900279,
+5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,,,https://doi.org/10.1109/ICPR.2016.7900278,
+5860cf0f24f2ec3f8cbc39292976eed52ba2eafd,http://pdfs.semanticscholar.org/5860/cf0f24f2ec3f8cbc39292976eed52ba2eafd.pdf,,,http://www.serialsjournals.com/serialjournalmanager/pdf/1329982460.pdf
+584909d2220b52c0d037e8761d80cb22f516773f,http://www.cs.tau.ac.il/~nachumd/papers/OFTA.pdf,,,http://www.cs.tau.ac.il/~wolf/papers/ofta-online-version.pdf
+58eb9174211d58af76023ce33ee05769de57236c,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2636827
+58d0c140597aa658345230615fb34e2c750d164c,,,,http://doi.acm.org/10.1145/3098954.3098969
+5811944e93a1f3e35ece7a70a43a3de95c69b5ab,,,https://doi.org/10.1109/BTAS.2016.7791163,http://arxiv.org/abs/1604.08865
+58823377757e7dc92f3b70a973be697651089756,http://pdfs.semanticscholar.org/fa88/52e5b7849adf8e96a103ca67e4ca60bdf244.pdf,,,https://www.cl.cam.ac.uk/~tb346/pub/thesis/phd_thesis.pdf
+580e48d3e7fe1ae0ceed2137976139852b1755df,http://pdfs.semanticscholar.org/580e/48d3e7fe1ae0ceed2137976139852b1755df.pdf,,,http://d-scholarship.pitt.edu/9210/1/main-file-etd-08202002-162757.pdf
+58ca110261680a70480eb0fd5d6f609c6689323f,,,,
+5865e824e3d8560e07840dd5f75cfe9bf68f9d96,http://pdfs.semanticscholar.org/5865/e824e3d8560e07840dd5f75cfe9bf68f9d96.pdf,,,
+58bb77dff5f6ee0fb5ab7f5079a5e788276184cc,https://ram-lab.com/papers/2016/rcar_lyp_192.pdf,,https://doi.org/10.1109/RCAR.2016.7784056,
+58ec93d804ceec167963d7ca1f6955a652b331aa,,,,
+58df849378fbcfb6b1a8ebddfbe4caa450226b9d,,,https://doi.org/10.1109/ICIP.2017.8296770,
+58b8588c01196070674ceabe5366b20f73c2912d,http://www.cse.ust.hk/~qnature/pdf/ICDM2015.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2015.52
+58b0be2db0aeda2edb641273fe52946a24a714c3,http://www.cs.ucsb.edu/~daniel/publications/conferences/wacv09/VaqueroWACV09.pdf,,https://doi.org/10.1109/WACV.2009.5403131,http://alumni.cs.ucsb.edu/~daniel/publications/conferences/wacv09/VaqueroWACV09.pdf
+585260468d023ffc95f0e539c3fa87254c28510b,http://pdfs.semanticscholar.org/5852/60468d023ffc95f0e539c3fa87254c28510b.pdf,,,http://arxiv.org/abs/1610.00889
+58cb1414095f5eb6a8c6843326a6653403a0ee17,http://pdfs.semanticscholar.org/58cb/1414095f5eb6a8c6843326a6653403a0ee17.pdf,,https://doi.org/10.1016/j.patrec.2006.04.003,http://www.ee.iitm.ac.in/~raju/journals/j25.pdf
+58db008b204d0c3c6744f280e8367b4057173259,http://pdfs.semanticscholar.org/58db/008b204d0c3c6744f280e8367b4057173259.pdf,,,http://inpressco.com/wp-content/uploads/2012/06/Paper8270-2781.pdf
+58628e64e61bd2776a2a7258012eabe3c79ca90c,http://pdfs.semanticscholar.org/5862/8e64e61bd2776a2a7258012eabe3c79ca90c.pdf,,,https://www.cs.swarthmore.edu/~meeden/cs81/f17/papers/mitchell17.pdf
+58e7dbbb58416b785b4a1733bf611f8106511aca,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273597
+676a136f5978783f75b5edbb38e8bb588e8efbbe,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_084_ext.pdf,,https://doi.org/10.1109/CVPR.2015.7299038,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_084_ext.pdf
+676f9eabf4cfc1fd625228c83ff72f6499c67926,http://pdfs.semanticscholar.org/676f/9eabf4cfc1fd625228c83ff72f6499c67926.pdf,,,http://arxiv.org/abs/1704.08328
+677477e6d2ba5b99633aee3d60e77026fb0b9306,http://pdfs.semanticscholar.org/d105/b9b31106495f58fb951cfdbf64787ee89ab2.pdf,,,https://export.arxiv.org/pdf/1704.07863
+670531f3925c1ee6921f1550a988a034db727c3b,http://neerajkumar.org/base/papers/nk_www2014_photorecall.pdf,,https://doi.org/10.1007/978-3-319-25781-5_17,http://doi.acm.org/10.1145/2567948.2577360
+679b7fa9e74b2aa7892eaea580def6ed4332a228,http://pdfs.semanticscholar.org/679b/7fa9e74b2aa7892eaea580def6ed4332a228.pdf,,,http://disi.unitn.it/~sebe/publications/affective11.pdf
+673541a8cb1aa3ac63a288523ba71aec2a38280e,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552971
+67a99c92166d77db02f6cc059f1aeddc32580d4b,,,,
+67ae7ee9557cb486d5e1129b9b24466ffb8c4766,,,,
+670637d0303a863c1548d5b19f705860a23e285c,https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf,,,http://www.cs.columbia.edu/~belhumeur/journal/siggraph08.pdf
+67b79c2336b9a2efbfc805b9a6912a0959e392a9,https://www.researchgate.net/profile/Engin_Erzin/publication/220716898_RANSAC-Based_Training_Data_Selection_on_Spectral_Features_for_Emotion_Recognition_from_Spontaneous_Speech/links/0912f5089705e67f21000000.pdf,,,http://www.researchgate.net/profile/Engin_Erzin/publication/220716898_RANSAC-Based_Training_Data_Selection_on_Spectral_Features_for_Emotion_Recognition_from_Spontaneous_Speech/links/0912f5089705e67f21000000.pdf
+6742c0a26315d7354ab6b1fa62a5fffaea06da14,http://pdfs.semanticscholar.org/ae08/778d8003933a02fd90a49b2e5f67ba56ad8d.pdf,,,http://arxiv.org/abs/1708.06703
+67a50752358d5d287c2b55e7a45cc39be47bf7d0,http://pdfs.semanticscholar.org/67a5/0752358d5d287c2b55e7a45cc39be47bf7d0.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/e6/70/pone.0117902.PMC4351105.pdf
+67c3c1194ee72c54bc011b5768e153a035068c43,http://pdfs.semanticscholar.org/67c3/c1194ee72c54bc011b5768e153a035068c43.pdf,,,http://hdl.handle.net/1721.1/37896
+67c703a864aab47eba80b94d1935e6d244e00bcb,http://pdfs.semanticscholar.org/67c7/03a864aab47eba80b94d1935e6d244e00bcb.pdf,,,http://thesai.org/Downloads/Volume7No6/Paper_32-Face_Retrieval_Based_on_Local_Binary_Pattern_and_Its_Variants.pdf
+67d7022462c98e6c5de9f2254b46f0b8d3b92089,,,,
+67214e8d2f83eb41c14bfc86698eb6620e72e87c,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.263
+67e6ddce6fea17bb2b171c949ee224936d36c0d1,,,https://doi.org/10.1109/ICIP.2008.4712157,
+677ebde61ba3936b805357e27fce06c44513a455,http://pdfs.semanticscholar.org/677e/bde61ba3936b805357e27fce06c44513a455.pdf,,,http://conf-scoop.org/ACV-2014/3.Z.Chi_ACV.pdf
+67ba3524e135c1375c74fe53ebb03684754aae56,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001767.pdf,,https://doi.org/10.1109/ICASSP.2017.7952460,
+6769cfbd85329e4815bb1332b118b01119975a95,http://pdfs.semanticscholar.org/6769/cfbd85329e4815bb1332b118b01119975a95.pdf,,,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/BMVC2006TiedFactorAnalysis.pdf
+0bc53b338c52fc635687b7a6c1e7c2b7191f42e5,http://pdfs.semanticscholar.org/a32a/8d6d4c3b4d69544763be48ffa7cb0d7f2f23.pdf,,,http://wrap.warwick.ac.uk/80626/7/WRAP_zhang-bhalerao-bmvc-2016_.pdf
+0b0c2d9db83b4f002f23f4a20cfc5a3d10295372,,,,
+0b2277a0609565c30a8ee3e7e193ce7f79ab48b0,http://ivg.au.tsinghua.edu.cn/paper/2012_Cost-sensitive%20semi-supervised%20discriminant%20analysis%20for%20face%20recognition.pdf,,https://doi.org/10.1109/TIFS.2012.2188389,
+0b58b3a5f153f653c138257426bf8d572ae35a67,,,https://doi.org/10.1109/SMC.2016.7844481,
+0b3144cdc9d6d5a1498d6178db20d1c49fb64de9,,,,http://doi.acm.org/10.1145/1322192.1322203
+0b9ce839b3c77762fff947e60a0eb7ebbf261e84,http://pdfs.semanticscholar.org/0b9c/e839b3c77762fff947e60a0eb7ebbf261e84.pdf,,,http://www.cmeri.res.in/rnd/srlab/cvision/All%20papers/7%20.pdf
+0b8b8776684009e537b9e2c0d87dbd56708ddcb4,http://pdfs.semanticscholar.org/0b8b/8776684009e537b9e2c0d87dbd56708ddcb4.pdf,,,https://arxiv.org/pdf/1709.03675v1.pdf
+0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b,http://pdfs.semanticscholar.org/84b7/e2138a3701432c33ea70a1297328cd814ab5.pdf,,,https://arxiv.org/pdf/1803.06542v2.pdf
+0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7,http://pdfs.semanticscholar.org/0b6a/5200c33434cbfa9bf24ba482f6e06bf5fff7.pdf,,,http://arxiv.org/abs/1605.09612
+0bab5213911c19c40e936b08d2f8fba01e286b85,,,https://doi.org/10.1109/BigMM.2017.81,
+0b605b40d4fef23baa5d21ead11f522d7af1df06,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a819.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Akata_Label-Embedding_for_Attribute-Based_2013_CVPR_paper.pdf
+0b0e679e6d3abe3adc8525d4fee49b388ccfdf9a,,,,
+0b0eb562d7341231c3f82a65cf51943194add0bb,http://pdfs.semanticscholar.org/0b0e/b562d7341231c3f82a65cf51943194add0bb.pdf,,,https://www.researchgate.net/profile/Mohsen_Ardabilian/publication/254762552_Facial_Image_Analysis_Based_on_Local_Binary_Patterns_A_Survey/links/54366c050cf2dc341db30747.pdf?origin=publication_list
+0b3a146c474166bba71e645452b3a8276ac05998,http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf,,,http://papers.nips.cc/paper/2708-whos-in-the-picture.pdf
+0b78fd881d0f402fd9b773249af65819e48ad36d,http://mirlab.org/conference_papers/International_Conference/ISCSLP%202008/pdfs/281.pdf,,https://doi.org/10.1109/CHINSL.2008.ECP.82,http://www.isca-speech.org/archive_open/archive_papers/iscslp2008/281.pdf
+0b835284b8f1f45f87b0ce004a4ad2aca1d9e153,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w16/papers/Kapadia_Cartooning_for_Enhanced_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.175
+0b8839945259ec764ef0fad47471f34db39f40c3,,,https://doi.org/10.1109/DESEC.2017.8073838,
+0b5bd3ce90bf732801642b9f55a781e7de7fdde0,http://pdfs.semanticscholar.org/0b5b/d3ce90bf732801642b9f55a781e7de7fdde0.pdf,,https://doi.org/10.1016/j.patrec.2011.01.004,http://lms.ctl.cyut.edu.tw/sysdata/26/21126/doc/555f4d2e898da8b3/attach/994939.pdf
+0b0958493e43ca9c131315bcfb9a171d52ecbb8a,http://pdfs.semanticscholar.org/0b09/58493e43ca9c131315bcfb9a171d52ecbb8a.pdf,,,https://sbelharbi.github.io/publications/2015/belharbiCAP2015.pdf
+0be418e63d111e3b94813875f75909e4dc27d13a,,,https://doi.org/10.1109/ICB.2016.7550057,
+0b51197109813d921835cb9c4153b9d1e12a9b34,http://pdfs.semanticscholar.org/0b51/197109813d921835cb9c4153b9d1e12a9b34.pdf,,,https://newtraell.cs.uchicago.edu/files/ms_paper/liwenz.pdf
+0bf3513d18ec37efb1d2c7934a837dabafe9d091,http://pdfs.semanticscholar.org/14ff/c760c1655524fc2a035357ad354664b5af5e.pdf,,,http://www1.i2r.a-star.edu.sg/~htang/AAAI2015_TRR.pdf
+0bf1f999a16461a730dd80e3a187d0675c216292,,,,http://doi.ieeecomputersociety.org/10.1109/CW.2017.26
+0b20f75dbb0823766d8c7b04030670ef7147ccdd,http://pdfs.semanticscholar.org/0b20/f75dbb0823766d8c7b04030670ef7147ccdd.pdf,,,http://arxiv.org/abs/1201.5946
+0be49fc1e0c9a6a50e449015945dd1cf92ccd07e,,,,
+0b174d4a67805b8796bfe86cd69a967d357ba9b6,http://pdfs.semanticscholar.org/0b17/4d4a67805b8796bfe86cd69a967d357ba9b6.pdf,,,http://www.isca.in/rjrs/archive/v3/i4/10.ISCA-RJRS-2013-216.pdf
+0ba449e312894bca0d16348f3aef41ca01872383,http://pdfs.semanticscholar.org/0ba4/49e312894bca0d16348f3aef41ca01872383.pdf,,,https://arxiv.org/pdf/1705.06884v1.pdf
+0b878d553f359b38753c6ea27d7acf500a90da15,,,,
+0b87d91fbda61cdea79a4b4dcdcb6d579f063884,http://pdfs.semanticscholar.org/0b87/d91fbda61cdea79a4b4dcdcb6d579f063884.pdf,,,http://benthamopen.com/contents/pdf/TOAUTOCJ/TOAUTOCJ-7-569.pdf
+0be2245b2b016de1dcce75ffb3371a5e4b1e731b,http://pdfs.semanticscholar.org/0be2/245b2b016de1dcce75ffb3371a5e4b1e731b.pdf,,https://doi.org/10.1007/11840817_45,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Moschou06a.pdf
+0b79356e58a0df1d0efcf428d0c7c4651afa140d,http://pdfs.semanticscholar.org/7725/05d940a31ca237563cfb2d5c05c62742993f.pdf,,,http://www.merl.com/papers/docs/TR99-13.pdf
+0be015e2f9a1d2acebc3afb6e0f6948dd2f9d23d,,,https://doi.org/10.1007/s12193-013-0133-0,
+0b85b50b6ff03a7886c702ceabad9ab8c8748fdc,http://pdfs.semanticscholar.org/0b85/b50b6ff03a7886c702ceabad9ab8c8748fdc.pdf,,,http://jov.arvojournals.org/pdfaccess.ashx?url=/data/journals/jov/933483/jov-11-3-17.pdf
+0b84f07af44f964817675ad961def8a51406dd2e,https://arxiv.org/pdf/1604.02531v2.pdf,,,https://arxiv.org/pdf/1604.02531v1.pdf
+0b242d5123f79defd5f775d49d8a7047ad3153bc,http://pdfs.semanticscholar.org/84db/c0010ae4f5206d689cf9f5bb176d18990bcd.pdf,,,https://arxiv.org/pdf/1510.05067v4.pdf
+0b3786a3a0ea7ec08f01636124c183dbee8f625f,http://www.cs.uiuc.edu/homes/dhoiem/publications/pami2012_FlickrSimilaritiesSIKMA_Gang.pdf,,,http://www.cs.illinois.edu/homes/dhoiem/publications/pami2012_FlickrSimilaritiesSIKMA_Gang.pdf
+0b50e223ad4d9465bb92dbf17a7b79eccdb997fb,http://users.eecs.northwestern.edu/~ganghua/publication/CVPR08a.pdf,,,http://projectsweb.cs.washington.edu/research/insects/CVPR2009/3D_data/randproj_facerecog.pdf
+0badf61e8d3b26a0d8b60fe94ba5c606718daf0b,http://pdfs.semanticscholar.org/0bad/f61e8d3b26a0d8b60fe94ba5c606718daf0b.pdf,,,http://tjfeonline.com/admin/archive/5008.04.20161460097855.pdf
+0b02bfa5f3a238716a83aebceb0e75d22c549975,http://pdfs.semanticscholar.org/0b02/bfa5f3a238716a83aebceb0e75d22c549975.pdf,,,http://www.cv.tu-berlin.de/fileadmin/fg140/Learning_Probabilistic.pdf
+0b2966101fa617b90510e145ed52226e79351072,http://www.cs.umanitoba.ca/~ywang/papers/icpr16_videotext.pdf,,https://doi.org/10.1109/ICPR.2016.7899903,
+0ba0f000baf877bc00a9e144b88fa6d373db2708,http://pdfs.semanticscholar.org/0ba0/f000baf877bc00a9e144b88fa6d373db2708.pdf,,,http://onlinepresent.org/proceedings/vol17_2013/29.pdf
+0be80da851a17dd33f1e6ffdd7d90a1dc7475b96,http://pdfs.semanticscholar.org/0be8/0da851a17dd33f1e6ffdd7d90a1dc7475b96.pdf,,https://doi.org/10.1155/2016/7696035,
+0b183f5260667c16ef6f640e5da50272c36d599b,http://pdfs.semanticscholar.org/0b18/3f5260667c16ef6f640e5da50272c36d599b.pdf,,https://doi.org/10.1007/978-3-319-10593-2_10,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8692/86920135.pdf
+0b4c4ea4a133b9eab46b217e22bda4d9d13559e6,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_morph_random_forests.pdf,,,http://www.micc.unifi.it/publications/2015/DKBD15/PID3774829.pdf
+0b9db62b26b811e8c24eb9edc37901a4b79a897f,https://eng.ucmerced.edu/people/cyang35/CVPR13/cvpr13_hallucination.pdf,,,http://faculty.ucmerced.edu/mhyang/papers/cvpr13_hallucination_sup.pdf
+0ba99a709cd34654ac296418a4f41a9543928149,https://pdfs.semanticscholar.org/0ba9/9a709cd34654ac296418a4f41a9543928149.pdf,,https://doi.org/10.1109/TIP.2010.2049235,http://vc.sce.ntu.edu.sg/index_files/TIPClustering_double.pdf
+0be764800507d2e683b3fb6576086e37e56059d1,http://pdfs.semanticscholar.org/0be7/64800507d2e683b3fb6576086e37e56059d1.pdf,,,http://dukespace.lib.duke.edu/dspace/bitstream/handle/10161/12206/Huang_duke_0066D_13412.pdf?sequence=1
+0b80fdb5b78422efdb3cdb840c78630de0af61f3,,,,
+0b642f6d48a51df64502462372a38c50df2051b1,https://infoscience.epfl.ch/record/231128/files/Le_ICMI_2017.pdf,,,http://doi.acm.org/10.1145/3136755.3136800
+0b7d1386df0cf957690f0fe330160723633d2305,http://www.cs.rpi.edu/~magdon/ps/conference/AccentICMLA2009.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2009.133
+0b6616f3ebff461e4b6c68205fcef1dae43e2a1a,http://pdfs.semanticscholar.org/0b66/16f3ebff461e4b6c68205fcef1dae43e2a1a.pdf,,,https://arxiv.org/pdf/1312.4384v1.pdf
+0b8c92463f8f5087696681fb62dad003c308ebe2,https://www.iiitd.edu.in/~richa/papers/BTAS10-Sketch.pdf,,https://doi.org/10.1109/BTAS.2010.5634507,
+0bc0f9178999e5c2f23a45325fa50300961e0226,http://pdfs.semanticscholar.org/0bc0/f9178999e5c2f23a45325fa50300961e0226.pdf,,,http://cs229.stanford.edu/proj2010/RaoThiagarajan-RecognizingFacialExpressionsFromVideosUsingDeepBeliefNetworks.pdf
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf,,,https://arxiv.org/pdf/1611.05916v2.pdf
+0b3f354e6796ef7416bf6dde9e0779b2fcfabed2,http://pdfs.semanticscholar.org/fd60/5d123a0f777716f798f258fbbcd73d75fa8b.pdf,,,http://scholar.lib.vt.edu/theses/available/etd-04022005-235756/unrestricted/CJonesDissertation-ColorFaceRecognition.pdf
+0b9d3a0c61ee498f8ed54aaa22d3c4e72aa56f40,http://www.researchgate.net/profile/Mark_Billinghurst/publication/221209697_A_Quadratic_Deformation_Model_for_Facial_Expression_Recognition/links/00b4952464de6e125e000000.pdf,,,https://www.researchgate.net/profile/Ramakrishnan_Mukundan/publication/221209697_A_Quadratic_Deformation_Model_for_Facial_Expression_Recognition/links/09e41510929d5d3c66000000.pdf?origin=publication_list
+937e89cdf056358d1d5befe334a0e1f497f7d643,,,,
+9391618c09a51f72a1c30b2e890f4fac1f595ebd,http://pdfs.semanticscholar.org/9391/618c09a51f72a1c30b2e890f4fac1f595ebd.pdf,,,https://arxiv.org/pdf/1503.08843v1.pdf
+93675f86d03256f9a010033d3c4c842a732bf661,http://pdfs.semanticscholar.org/9367/5f86d03256f9a010033d3c4c842a732bf661.pdf,,,http://hal.archives-ouvertes.fr/docs/00/46/03/28/PDF/Thesis_Tao_XU.pdf
+935a7793cbb8f102924fa34fce1049727de865c2,https://ivi.fnwi.uva.nl/isis/publications/2015/AlnajarICIP20015/AlnajarICIP20015.pdf,,https://doi.org/10.1109/ICIP.2015.7351554,
+9326d1390e8601e2efc3c4032152844483038f3f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Hsu_Landmark_Based_Facial_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.11
+93b7ee9842114bc15202ff97941892aa848c0716,,,,
+93747de3d40376761d1ef83ffa72ec38cd385833,http://pdfs.semanticscholar.org/9374/7de3d40376761d1ef83ffa72ec38cd385833.pdf,,,http://www.jeffreysanchezburks.com/blog/wp-content/uploads/Team-members-emotional-displays.pdf
+93d903d2e48d6a8ad3e3d2aff2e57622efe649cd,,,https://doi.org/10.1109/ICIP.2016.7532432,
+936c7406de1dfdd22493785fc5d1e5614c6c2882,http://pdfs.semanticscholar.org/9d5e/1395e1ace37d9d5b7ce6854d518e7f128e79.pdf,,,http://tamaraberg.com/papers/desctext.pdf
+93721023dd6423ab06ff7a491d01bdfe83db7754,http://pdfs.semanticscholar.org/9372/1023dd6423ab06ff7a491d01bdfe83db7754.pdf,,,http://liris.cnrs.fr/Documents/Liris-6081.pdf
+93115b81d1efc1f6d2788972bdb89908764890b6,,,,
+93971a49ef6cc88a139420349a1dfd85fb5d3f5c,http://pdfs.semanticscholar.org/9397/1a49ef6cc88a139420349a1dfd85fb5d3f5c.pdf,,,http://publications.idiap.ch/downloads/papers/2014/ElShafey_EABRA_2014.pdf
+935924ddb5992c11f3202bf995183130ad83d07b,,,https://doi.org/10.1117/1.JEI.24.2.023015,
+93d74b1315a09f568027b6d8b3068ef048d17889,,,,
+93cbb3b3e40321c4990c36f89a63534b506b6daf,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/IEEESMC05Guo.pdf,,https://doi.org/10.1109/TSMCB.2005.846658,http://ftp.cs.wisc.edu/computer-vision/repository/PDF/guo.2005.smc.pdf
+93e1e195f294c463f4832c4686775bf386b3de39,,,https://doi.org/10.1109/TIP.2015.2490551,
+93108f1548e8766621565bdb780455023349d2b2,,,https://doi.org/10.1109/ICIP.2010.5653914,
+937ffb1c303e0595317873eda5ce85b1a17f9943,https://ivi.fnwi.uva.nl/isis/publications/2010/DibekliogluICM2010/DibekliogluICM2010.pdf,,,http://www.science.uva.nl/research/publications/2010/DibekliogluICM2010/mmshc23317_dibeklioglu.pdf
+931f99bc6865d3d0c80c15d5b1c05338dfe98982,,,,
+939f9fa056f8be445da19b43da64bd2405851a43,,,https://doi.org/10.1109/ICSMC.2007.4413713,
+934647c80f484340adecc74ac7141ed0b1d21c2f,,,,
+9329523dc0bd4e2896d5f63cf2440f21b7a16f16,http://pdfs.semanticscholar.org/d853/107e81c3db4a7909b599bff82ab1c48772af.pdf,,,http://arxiv.org/abs/1610.01854
+939d28859c8bd2cca2d692901e174cfd599dac74,,,https://doi.org/10.1109/WOCC.2016.7506582,
+9306f61c7c3bdcdcb257cd437ca59df8e599e326,http://www.umiacs.umd.edu/~pvishalm/Conference_pub/ACPR2011_v2.pdf,,https://doi.org/10.1109/ACPR.2011.6166711,http://www.rci.rutgers.edu/~vmp93/Conference_pub/ACPR2011_v2.pdf
+9378ead3a09bc9f89fb711e2746facf399dd942e,,,https://doi.org/10.1109/TCSVT.2010.2045817,
+93978ba84c8e95ff82e8b5960eab64e54ca36296,,,,http://doi.acm.org/10.1145/3136755.3136806
+93e451f71245f8e5ba346a48de2d09c0bccc3c22,,,,
+934efd61b20f5b8b151a2df7cd373f0b387c02b0,,,https://doi.org/10.5220/0004673003290336,https://hal.inria.fr/hal-00925436/document
+936227f7483938097cc1cdd3032016df54dbd5b6,http://pdfs.semanticscholar.org/9362/27f7483938097cc1cdd3032016df54dbd5b6.pdf,,,http://arxiv.org/abs/1608.07639
+93eb3963bc20e28af26c53ef3bce1e76b15e3209,,,https://doi.org/10.1109/ICIP.2017.8296992,
+939123cf21dc9189a03671484c734091b240183e,http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf,,https://doi.org/10.1109/MMSP.2014.6958797,http://infoscience.epfl.ch/record/213064/files/Erdogmus_MMSP_2015.pdf
+93d11da02205bbc5ae68e521e421f70a4b74a7f7,,,,
+94b9c0a6515913bad345f0940ee233cdf82fffe1,http://pdfs.semanticscholar.org/94b9/c0a6515913bad345f0940ee233cdf82fffe1.pdf,,,http://www.ijsr.net/archive/v3i12/U1VCMTQ3MzM=.pdf
+94498fae459167841e8b2f4b911493fc3c7da22f,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/cvpr2016_ROF.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.142
+94a7c97d1e3eb5dbfb20b180780451486597a9be,http://pdfs.semanticscholar.org/94a7/c97d1e3eb5dbfb20b180780451486597a9be.pdf,,https://doi.org/10.1016/j.imavis.2016.05.004,http://www.rci.rutgers.edu/~vmp93/Journal_pub/attrspaper_final.pdf
+945ef646679b6c575d3bbef9c6fc0a9629ac1b62,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477689
+94b60008e5f576f46bd3c385398cf2ecbb16f499,,,,
+9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73,http://www.vision.ee.ethz.ch/~zzhiwu/papers/COX-Face-DB-TIP-final.pdf,,https://doi.org/10.1109/TIP.2015.2493448,http://vipl.ict.ac.cn/sites/default/files/people/attach/Revision_COX_TIP_v3.0_1.pdf
+947cdeb52f694fb1c87fc16836f8877cd83dc652,,,https://doi.org/10.1109/SMAP.2017.8022671,
+948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494,http://pdfs.semanticscholar.org/948a/f4b04b4a9ae4bff2777ffbcb29d5bfeeb494.pdf,,,https://pdfs.semanticscholar.org/948a/f4b04b4a9ae4bff2777ffbcb29d5bfeeb494.pdf
+946b4d840b026d91608758d04f2763e9b981234e,,,,http://doi.acm.org/10.1145/2388676.2388792
+942f6eb2ec56809430c2243a71d03cc975d0a673,,,https://doi.org/10.1109/BigMM.2017.64,
+942b89d8d17e89e58c82453de2bfcbbeb09adc81,,,https://doi.org/10.1016/j.patcog.2016.02.019,
+94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81,http://ibug.doc.ic.ac.uk/media/uploads/documents/p148-cheng.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/p148-cheng.pdf
+949fff3b0a73c81e7ff3d47caf7fbf9c664bcc70,,,,
+94f74c6314ffd02db581e8e887b5fd81ce288dbf,http://pdfs.semanticscholar.org/94f7/4c6314ffd02db581e8e887b5fd81ce288dbf.pdf,,,https://arxiv.org/pdf/1511.02683v3.pdf
+941166547968081463398c9eb041f00eb04304f7,http://people.duke.edu/~qq3/pub/ExpressionDictionary_TIP.pdf,,https://doi.org/10.1109/TIP.2014.2331141,https://www.cs.umd.edu/~qiu/pub/ExpressionDictionary_TIP.pdf
+9441253b638373a0027a5b4324b4ee5f0dffd670,http://pdfs.semanticscholar.org/9441/253b638373a0027a5b4324b4ee5f0dffd670.pdf,,,http://arxiv.org/pdf/1312.7511v1.pdf
+949699d0b865ef35b36f11564f9a4396f5c9cddb,http://pdfs.semanticscholar.org/9496/99d0b865ef35b36f11564f9a4396f5c9cddb.pdf,,,http://www.kyb.tuebingen.mpg.de/fileadmin/user_upload/files/publications/SchWalCunChi2006_3872%5B0%5D.pdf
+94ac3008bf6be6be6b0f5140a0bea738d4c75579,http://pdfs.semanticscholar.org/94ac/3008bf6be6be6b0f5140a0bea738d4c75579.pdf,,,https://arxiv.org/pdf/1712.01670v1.pdf
+94e259345e82fa3015a381d6e91ec6cded3971b4,http://pdfs.semanticscholar.org/94e2/59345e82fa3015a381d6e91ec6cded3971b4.pdf,,https://doi.org/10.1007/11612704_61,http://www.am.sanken.osaka-u.ac.jp/~mukaigaw/papers/ACCV2006-PL.pdf
+94b729f9d9171e7c4489995e6e1cb134c8521f4e,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.055
+948f35344e6e063ffc35f10c547d5dd9204dee4e,,,https://doi.org/10.1016/j.eswa.2017.07.037,
+940e5c45511b63f609568dce2ad61437c5e39683,,,https://doi.org/10.1109/TIP.2015.2390976,
+0eed55ea9f401f25e1474cdbaf09367f44b4f490,,,https://doi.org/10.1016/j.neucom.2013.05.032,
+0ea05bbc0b0c8b7df10f16e9429ef90177bf94fa,,,https://doi.org/10.1163/016918610X538534,
+0efdd82a4753a8309ff0a3c22106c570d8a84c20,http://pdfs.semanticscholar.org/0efd/d82a4753a8309ff0a3c22106c570d8a84c20.pdf,,,https://labicvl.github.io/docs/pubs/Hwang_WIAMIS_2004.pdf
+0e05b365af662bc6744106a7cdf5e77c9900e967,,,https://doi.org/10.1007/s11042-014-2234-5,
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,http://pdfs.semanticscholar.org/0e5d/cc6ae52625fd0637c6bba46a973e46d58b9c.pdf,,,http://humansensing.cs.cmu.edu/papers/parda_pr.pdf
+0ee83ed9bedc0cec5c3368144df0b6f4ee76ddff,,,,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.40
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,https://people.cs.umass.edu/~smaji/presentations/similarity-poster-cvpr14.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/cvpr2014_similarity.pdf
+0e37d70794d5ccfef8b4cc22b4203245f33eec6e,,,https://doi.org/10.1109/ICIP.2010.5653034,
+0e8a28511d8484ad220d3e8dde39220c74fab14b,,,https://doi.org/10.1109/TNNLS.2015.2477826,
+0e454686f83284ced2ffc5740829552a032671a3,,,https://doi.org/10.1109/IJCNN.2015.7280802,
+0e5557a0cc58194ad53fab5dd6f4d4195d19ce4e,,,https://doi.org/10.1109/TMM.2015.2500730,
+0ed0e48b245f2d459baa3d2779bfc18fee04145b,http://pdfs.semanticscholar.org/0ed0/e48b245f2d459baa3d2779bfc18fee04145b.pdf,,https://doi.org/10.1137/1.9781611972771.73,http://parnec.nuaa.edu.cn/papers/conference/2007/dqzhang-SDM07.pdf
+0ea6ee0931f2dc51b0440dfa197433faacd53010,,,,
+0ed4b4d6d1a0c49c4eb619aab36db559b620d99f,,,https://doi.org/10.1016/j.neucom.2015.11.115,
+0eac652139f7ab44ff1051584b59f2dc1757f53b,http://pdfs.semanticscholar.org/0eac/652139f7ab44ff1051584b59f2dc1757f53b.pdf,,,http://arxiv.org/abs/1611.01584
+0ef96d97365899af797628e80f8d1020c4c7e431,http://media.adelaide.edu.au/acvt/Publications/2006/2006-Improving%20the%20Speed%20of%20Kernel%20PCA%20on%20Large%20Scale%20Datasets.pdf,,,http://www1.i2r.a-star.edu.sg/~tjchin/chin_avss06.pdf
+0e7f277538142fb50ce2dd9179cffdc36b794054,http://nb.vse.cz/~svatek/mdm08.pdf,,,
+0e8760fc198a7e7c9f4193478c0e0700950a86cd,http://pdfs.semanticscholar.org/0e87/60fc198a7e7c9f4193478c0e0700950a86cd.pdf,,,https://arxiv.org/pdf/1802.01777v2.pdf
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,http://pdfs.semanticscholar.org/0ec0/fc9ed165c40b1ef4a99e944abd8aa4e38056.pdf,,,http://thescipub.com/PDF/crpsp.2015.22.30.pdf
+0e652a99761d2664f28f8931fee5b1d6b78c2a82,http://pdfs.semanticscholar.org/0e65/2a99761d2664f28f8931fee5b1d6b78c2a82.pdf,,,https://arxiv.org/pdf/1209.5111v1.pdf
+0e50fe28229fea45527000b876eb4068abd6ed8c,http://pdfs.semanticscholar.org/0e50/fe28229fea45527000b876eb4068abd6ed8c.pdf,,https://doi.org/10.24963/ijcai.2017/409,https://www.ijcai.org/proceedings/2017/0409.pdf
+0eff410cd6a93d0e37048e236f62e209bc4383d1,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICRA_2010/data/papers/0516.pdf,,https://doi.org/10.1109/ROBOT.2010.5509146,
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,https://arxiv.org/pdf/1711.06055v1.pdf,,,http://doi.acm.org/10.1145/3123266.3123438
+0ef20991e0ecc7dc3f6e0e5fd6ee93c4970206f3,,,https://doi.org/10.1109/ICIP.2015.7351013,
+0ee661a1b6bbfadb5a482ec643573de53a9adf5e,http://epubs.surrey.ac.uk/812523/1/yunlian_TIFS2014.pdf,,https://doi.org/10.1109/TIFS.2014.2362007,
+0e36ada8cb9c91f07c9dcaf196d036564e117536,http://pdfs.semanticscholar.org/d0d5/aa7f797113c825053f4c4fd3772dc3601139.pdf,,,http://ai2-website.s3.amazonaws.com/publications/much_ado.pdf
+0e986f51fe45b00633de9fd0c94d082d2be51406,http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf,,,http://www.ics.uci.edu/~dramanan/papers/face_2012.pdf
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,http://pdfs.semanticscholar.org/0ebc/50b6e4b01eb5eba5279ce547c838890b1418.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8225
+0e49a23fafa4b2e2ac097292acf00298458932b4,http://pdfs.semanticscholar.org/0e49/a23fafa4b2e2ac097292acf00298458932b4.pdf,,,http://www.uav.ro/applications/se/journal/index.php/TAMCS/article/download/70/51/
+0ec1673609256b1e457f41ede5f21f05de0c054f,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d025.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.389
+0eb077a3e227b19f032f980d3a3206e4ae15e429,,,,
+0e3840ea3227851aaf4633133dd3cbf9bbe89e5b,http://pdfs.semanticscholar.org/8d59/98cd984e7cce307da7d46f155f9db99c6590.pdf,,,http://arxiv.org/abs/1701.02664
+0e2d956790d3b8ab18cee8df6c949504ee78ad42,,,https://doi.org/10.1109/IVCNZ.2013.6727024,
+0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a,http://www.openu.ac.il/home/hassner/projects/LATCH/LATCH.pdf,,,https://arxiv.org/pdf/1501.03719v1.pdf
+0ea38a5ba0c8739d1196da5d20efb13406bb6550,https://filebox.ece.vt.edu/~parikh/Publications/ParikhGrauman_ICCV2011_relative.pdf,,,http://filebox.ece.vt.edu/~parikh/Publications/ParikhGrauman_ICCV2011_relative.pdf
+0e21c9e5755c3dab6d8079d738d1188b03128a31,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wu_Constrained_Clustering_and_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.450
+0e4baf74dfccef7a99c6954bb0968a2e35315c1f,,,https://doi.org/10.1109/SIU.2012.6204517,
+0ed96cc68b1b61e9eb4096f67d3dcab9169148b9,,,,http://doi.acm.org/10.1145/2663204.2666279
+0e4fa61871755b5548a5c970c8103f7b2ada24f3,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.19
+0e677f2b798f5c1f7143ba983467321a7851565a,http://www.cse.iitk.ac.in/users/rahulaaj/papers/BillyYL.pdf,,,https://pdfs.semanticscholar.org/cb0b/07abcddc5d88cb9d4aa212799a21a2e35508.pdf
+0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698,http://pdfs.semanticscholar.org/0e78/af9bd0f9a0ce4ceb5f09f24bc4e4823bd698.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/698.pdf
+0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5,http://ca.cs.cmu.edu/sites/default/files/9MMED_CVPR12.pdf,,https://doi.org/10.1007/s11263-013-0683-3,http://humansensing.cs.cmu.edu/sites/default/files/MMED_IJCV14.pdf
+0e02dadab802128f6155e099135d03ca6b72f42c,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2365793
+0e7c70321462694757511a1776f53d629a1b38f3,http://pdfs.semanticscholar.org/0e7c/70321462694757511a1776f53d629a1b38f3.pdf,,,http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.1136.pdf
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_Fast_Subspace_Search_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.345
+0ec67c69e0975cfcbd8ba787cc0889aec4cc5399,http://pdfs.semanticscholar.org/1af3/6a1fc18328e2a0310bc4208ef35ba882bdc1.pdf,,https://doi.org/10.5244/C.12.56,http://personalpages.manchester.ac.uk/staff/timothy.f.cootes/Papers/walker_bmvc98.pdf
+0e6f422c3f79c552c0c3d7eda0145aed8680f0ea,,,https://doi.org/10.1016/j.patrec.2012.09.008,
+0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64,http://mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm1039.pdf,,https://doi.org/10.1007/11612032_50,http://doi.acm.org/10.1145/1101149.1101364
+0e1a18576a7d3b40fe961ef42885101f4e2630f8,http://pdfs.semanticscholar.org/0e1a/18576a7d3b40fe961ef42885101f4e2630f8.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/papers/everingham05a.pdf
+608b01c70f0d1166c10c3829c411424d9ef550e7,,,https://doi.org/10.1109/CISP-BMEI.2017.8301920,
+6080f26675e44f692dd722b61905af71c5260af8,https://arxiv.org/pdf/1603.05073v1.pdf,,https://doi.org/10.1109/IJCNN.2016.7727584,http://arxiv.org/pdf/1603.05073v1.pdf
+60a006bdfe5b8bf3243404fae8a5f4a9d58fa892,http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284836
+606dff86a34c67c79d93f1e536487847a5bb7002,,,https://doi.org/10.1109/WACV.2011.5711538,
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/CVPR10_FaceReco.pdf,,,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR10_FaceReco.pdf
+600025c9a13ff09c6d8b606a286a79c823d89db8,http://pdfs.semanticscholar.org/6000/25c9a13ff09c6d8b606a286a79c823d89db8.pdf,,,http://airccse.org/journal/mlaij/papers/1114mlaij06.pdf
+60d765f2c0a1a674b68bee845f6c02741a49b44e,http://pdfs.semanticscholar.org/60d7/65f2c0a1a674b68bee845f6c02741a49b44e.pdf,,https://doi.org/10.1016/j.patrec.2005.09.026,https://www.researchgate.net/profile/Kin_Man_Lam/publication/220645679_An_efficient_illumination_normalization_method_for_face_recognition/links/0c96052f1d90e2224a000000.pdf
+600075a1009b8692480726c9cff5246484a22ec8,,,,
+607aebe7568407421e8ffc7b23a5fda52650ad93,,,https://doi.org/10.1109/ISBA.2016.7477237,
+60d4cef56efd2f5452362d4d9ac1ae05afa970d1,http://pdfs.semanticscholar.org/60d4/cef56efd2f5452362d4d9ac1ae05afa970d1.pdf,,,http://www.jmlr.org/proceedings/papers/v48/fernando16.pdf
+60ce4a9602c27ad17a1366165033fe5e0cf68078,http://pdfs.semanticscholar.org/60ce/4a9602c27ad17a1366165033fe5e0cf68078.pdf,,,http://atvs.ii.uam.es/audias/files/2015_JFO_FaceRegionFusion_Tome.pdf
+6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf,http://arxiv.org/pdf/1512.05300v3.pdf,,,https://arxiv.org/pdf/1512.05300v5.pdf
+60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09,https://arxiv.org/pdf/1706.03947v1.pdf,,https://doi.org/10.1109/VCIP.2017.8305029,http://arxiv.org/abs/1706.03947
+60970e124aa5fb964c9a2a5d48cd6eee769c73ef,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Tierney_Subspace_Clustering_for_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.134
+60efdb2e204b2be6701a8e168983fa666feac1be,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01387.pdf,,https://doi.org/10.1007/s11263-017-1043-5,
+60824ee635777b4ee30fcc2485ef1e103b8e7af9,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-TIP-2015.pdf,,https://doi.org/10.1109/TIP.2015.2446944,http://epubs.surrey.ac.uk/808177/1/Feng-TIP-2015.pdf
+60643bdab1c6261576e6610ea64ea0c0b200a28d,http://pdfs.semanticscholar.org/6064/3bdab1c6261576e6610ea64ea0c0b200a28d.pdf,,https://doi.org/10.1016/j.jvcir.2014.08.006,https://dr.ntu.edu.sg/bitstream/handle/10220/24578/Multi-Manifold%20Metric%20Learning%20for%20Face%20Recognition.pdf?isAllowed=y&sequence=1
+60f980b1f146d659f8f8f0b4755ae2d5df64ca8d,,,,
+609c35a6fa80af8b2e4ce46b1b16ec36578fd07f,,,https://doi.org/10.1155/2014/950349,
+602f772c69e4a1a65de00443c30d51fdd47a80aa,,,https://doi.org/10.1109/IISA.2013.6623705,
+60a20d5023f2bcc241eb9e187b4ddece695c2b9b,http://pdfs.semanticscholar.org/60a2/0d5023f2bcc241eb9e187b4ddece695c2b9b.pdf,,https://doi.org/10.1007/978-3-319-22482-4_32,https://www.gol.ei.tum.de/fileadmin/w00bhl/www/preprints/wei-lvaica15.pdf
+609d81ddf393164581b3e3bf11609a712ac47522,,,https://doi.org/10.1109/APSIPA.2017.8282300,
+60cdcf75e97e88638ec973f468598ae7f75c59b4,http://www.cse.cuhk.edu.hk/~lyu/paper_pdf/tmm08face.pdf,,https://doi.org/10.1109/TMM.2007.911245,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/TMM08TKFD.pdf
+60040e4eae81ab6974ce12f1c789e0c05be00303,http://pdfs.semanticscholar.org/6004/0e4eae81ab6974ce12f1c789e0c05be00303.pdf,,,http://www.utdallas.edu/~yonas.tadesse/data/GFEAD.pdf
+60b3601d70f5cdcfef9934b24bcb3cc4dde663e7,http://pdfs.semanticscholar.org/60b3/601d70f5cdcfef9934b24bcb3cc4dde663e7.pdf,,,http://personalpages.manchester.ac.uk/staff/hujun.yin/pubs/BGCP_PAMI_rev_23rd_9.pdf
+60bdff71e241f9afc411221bd20aaebb4608576b,,,,
+60737db62fb5fab742371709485e4b2ddf64b7b2,http://dbgroup.cs.tsinghua.edu.cn/ligl/papers/p307-weng.pdf,,,http://doi.acm.org/10.1145/3132847.3132891
+603231c507bb98cc8807b6cbe2c860f79e8f6645,,,https://doi.org/10.1109/EUSIPCO.2015.7362819,
+60284c37249532fe7ff6b14834a2ae4d2a7fda02,,,https://doi.org/10.1109/SIU.2016.7495971,
+601655a17ca199ef674079482c9b37cdf8e094a9,,,,
+60496b400e70acfbbf5f2f35b4a49de2a90701b5,http://pdfs.semanticscholar.org/6049/6b400e70acfbbf5f2f35b4a49de2a90701b5.pdf,,https://doi.org/10.1007/978-3-540-74958-5_40,http://graphics.cs.msu.su/en/publications/text/ecml2007vb.pdf
+60bffecd79193d05742e5ab8550a5f89accd8488,http://pdfs.semanticscholar.org/60bf/fecd79193d05742e5ab8550a5f89accd8488.pdf,,,https://ed-galilee.univ-paris13.fr/wp-content/uploads/Sujet-L2TI-Emmanuel-Viennet.pdf
+601834a4150e9af028df90535ab61d812c45082c,http://pdfs.semanticscholar.org/6018/34a4150e9af028df90535ab61d812c45082c.pdf,,,http://arxiv.org/abs/1609.08345
+6014eeb333998c2b2929657d233ebbcb1c3412c9,,,,http://doi.acm.org/10.1145/2647868.2656406
+34546ef7e6148d9a1fb42cfab5f0ce11c92c760a,,,https://doi.org/10.1016/j.jvcir.2015.09.005,
+346dbc7484a1d930e7cc44276c29d134ad76dc3f,http://pdfs.semanticscholar.org/346d/bc7484a1d930e7cc44276c29d134ad76dc3f.pdf,,,http://rifters.com/real/articles/Network_Redies_et_al_2007.pdf
+34a41ec648d082270697b9ee264f0baf4ffb5c8d,http://pdfs.semanticscholar.org/34a4/1ec648d082270697b9ee264f0baf4ffb5c8d.pdf,,https://doi.org/10.1016/j.imavis.2013.10.002,http://www.cs.zju.edu.cn/people/wangdh/papers/IVC.pdf?doi=10.1016/j.imavis.2013.10.002&domain=f
+34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c,http://pdfs.semanticscholar.org/9e97/360b519d9912ded55618ccbb000d74d8e35c.pdf,,,http://www-nlpir.nist.gov/projects/tvpubs/tv13.papers/axes.pdf
+34c2ea3c7e794215588c58adf0eaad6dc267d082,,,,http://doi.acm.org/10.1145/3136755.3143005
+34bb11bad04c13efd575224a5b4e58b9249370f3,http://cs.nju.edu.cn/wujx/paper/CVPR2014_Action.pdf,,,https://www.researchgate.net/profile/Weiyao_Lin/publication/266913116_Towards_Good_Practices_for_Action_Video_Encoding/links/543f65ee0cf2e76f02245267.pdf?origin=publication_list
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,http://pdfs.semanticscholar.org/3411/ef1ff5ad11e45106f7863e8c7faf563f4ee1.pdf,,https://doi.org/10.1007/978-3-319-10590-1_37,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8689/86890569.pdf
+34d484b47af705e303fc6987413dc0180f5f04a9,http://pdfs.semanticscholar.org/34d4/84b47af705e303fc6987413dc0180f5f04a9.pdf,,,http://www.cs.cmu.edu/~ftorre/nsf_grant_v2.pdf
+34c1e9a6166f4732d1738db803467f7abc47ba87,,,https://doi.org/10.1109/WACV.2017.137,
+346166da1a49e531923294300a731167e1436d5b,http://lear.inrialpes.fr/people/mpederso/papers/3DV14.pdf,,,http://thoth.inrialpes.fr/people/mpederso/papers/3DV14.pdf
+34301fbf4624139a40176dbde6f5954b2df6de7b,,,,
+345bea5f7d42926f857f395c371118a00382447f,http://grail.cs.washington.edu/wp-content/uploads/2016/09/kemelmacher2016tp.pdf,,,http://doi.acm.org/10.1145/2897824.2925871
+349434653429733f5f49fe0e160027d994cef115,,,,
+3403cb92192dc6b2943d8dbfa8212cc65880159e,http://pdfs.semanticscholar.org/3403/cb92192dc6b2943d8dbfa8212cc65880159e.pdf,,,http://www.wiau.man.ac.uk/~knw/bmvc99.ps
+3463f12ad434d256cd5f94c1c1bfd2dd6df36947,http://pdfs.semanticscholar.org/3463/f12ad434d256cd5f94c1c1bfd2dd6df36947.pdf,,https://doi.org/10.3390/s17040712,http://www.mdpi.com/1424-8220/17/4/712/pdf
+346c9100b2fab35b162d7779002c974da5f069ee,http://cmlab.csie.ntu.edu.tw/~yanying/paper/p651-lei.pdf,,,http://doi.acm.org/10.1145/2072298.2072410
+34863ecc50722f0972e23ec117f80afcfe1411a9,http://nlpr-web.ia.ac.cn/2010papers/kz/gh3.pdf,,,http://www.nlpr.ia.ac.cn/2010papers/kz/gh3.pdf
+34b7e826db49a16773e8747bc8dfa48e344e425d,http://www.comp.leeds.ac.uk/me/Publications/cvpr09_bsl.pdf,,,http://eprints.pascal-network.org/archive/00005401/01/buehler09.pdf
+34c594abba9bb7e5813cfae830e2c4db78cf138c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_047_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_047.pdf
+34108098e1a378bc15a5824812bdf2229b938678,http://pdfs.semanticscholar.org/3410/8098e1a378bc15a5824812bdf2229b938678.pdf,,https://doi.org/10.1007/978-3-319-16817-3_18,http://www.eecs.berkeley.edu/~stellayu/publication/doc/2014codeTransferACCV.pdf
+341ed69a6e5d7a89ff897c72c1456f50cfb23c96,http://pdfs.semanticscholar.org/cd7f/26c430363f90e530824446b3a4c85cfb94e5.pdf,,,http://arxiv.org/abs/1702.04280
+348a16b10d140861ece327886b85d96cce95711e,http://pdfs.semanticscholar.org/348a/16b10d140861ece327886b85d96cce95711e.pdf,,,
+3419af6331e4099504255a38de6f6b7b3b1e5c14,http://pdfs.semanticscholar.org/3419/af6331e4099504255a38de6f6b7b3b1e5c14.pdf,,,http://web.stanford.edu/class/ee368/Project_07/reports/ee368group07.pdf
+344c0917c8d9e13c6b3546da8695332f86b57bd3,,,https://doi.org/10.1109/ICIP.2017.8296715,
+34c8de02a5064e27760d33b861b7e47161592e65,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Han_Video_Action_Recognition_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.162
+340d1a9852747b03061e5358a8d12055136599b0,http://pdfs.semanticscholar.org/340d/1a9852747b03061e5358a8d12055136599b0.pdf,,,http://www.iaeng.org/IJCS/issues_v36/issue_2/IJCS_36_2_08.pdf
+349c909abf937ef0a5a12c28a28e98500598834b,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2014.6890672
+34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd,http://pdfs.semanticscholar.org/34cc/dec6c3f1edeeecae6a8f92e8bdb290ce40fd.pdf,,,http://www.ijcai.org/Abstract/16/642
+34b42bcf84d79e30e26413f1589a9cf4b37076f9,http://pdfs.semanticscholar.org/34b4/2bcf84d79e30e26413f1589a9cf4b37076f9.pdf,,,http://papers.nips.cc/paper/4400-learning-sparse-representations-of-high-dimensional-data-on-large-scale-dictionaries
+3409aa0ae519ee18043e347e60d85e53e452650a,,,,
+34dd83115195676e7a8b008eb0e9abe84b330b32,,,https://doi.org/10.1007/s00371-014-0931-8,
+5aafca76dbbbbaefd82f5f0265776afb5320dafe,http://pdfs.semanticscholar.org/5aaf/ca76dbbbbaefd82f5f0265776afb5320dafe.pdf,,https://doi.org/10.1016/j.imavis.2015.07.002,http://ibug.doc.ic.ac.uk/media/uploads/documents/jorozco_imavis_2015.pdf
+5a93f9084e59cb9730a498ff602a8c8703e5d8a5,http://pdfs.semanticscholar.org/5a93/f9084e59cb9730a498ff602a8c8703e5d8a5.pdf,,https://doi.org/10.5244/C.26.99,https://hal.archives-ouvertes.fr/hal-00806104/document
+5a259f2f5337435f841d39dada832ab24e7b3325,,,,http://doi.acm.org/10.1145/2964284.2984059
+5a87bc1eae2ec715a67db4603be3d1bb8e53ace2,http://pdfs.semanticscholar.org/5a87/bc1eae2ec715a67db4603be3d1bb8e53ace2.pdf,,,http://www.researchgate.net/profile/Monson_Hayes/publication/221364120_A_Novel_Convergence_Scheme_for_Active_Appearance_Models/links/0deec53509be947258000000.pdf
+5aad56cfa2bac5d6635df4184047e809f8fecca2,http://chenlab.ece.cornell.edu/people/Amir/publications/picture_password.pdf,,https://doi.org/10.1109/ICIP.2013.6738916,
+5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0,http://pdfs.semanticscholar.org/5a8c/a0cfad32f04449099e2e3f3e3a1c8f6541c0.pdf,,,https://core.ac.uk/download/pdf/82038403.pdf
+5ac80e0b94200ee3ecd58a618fe6afd077be0a00,http://pdfs.semanticscholar.org/5ac8/0e0b94200ee3ecd58a618fe6afd077be0a00.pdf,,,http://arxiv.org/abs/1606.00822
+5a5f0287484f0d480fed1ce585dbf729586f0edc,http://www.researchgate.net/profile/Mohammad_Mahoor/publication/248703363_DISFA_A_Spontaneous_Facial_Action_Intensity_Database/links/0c960520903b2b8153000000.pdf,,,http://mohammadmahoor.com/wp-content/uploads/2017/06/DiSFA_Paper_andAppendix_Final_OneColumn1-1.pdf
+5aadd85e2a77e482d44ac2a215c1f21e4a30d91b,http://pdfs.semanticscholar.org/5aad/d85e2a77e482d44ac2a215c1f21e4a30d91b.pdf,,,http://www.wseas.us/e-library/conferences/2009/cambridge/ISPRA/ISPRA42.pdf
+5a34a9bb264a2594c02b5f46b038aa1ec3389072,http://www.mpi-inf.mpg.de/fileadmin/inf/d2/akata/TPAMI2487986.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2487986
+5af06815baa4b8f53adc9dc22f6eb3f6f1ad8ff8,,,https://doi.org/10.1186/s13640-017-0178-1,
+5a5511dd059d732e60c62ef817532689f4e0ab46,,,,
+5a10d74c7fc3294f76d771df413fe0b0b35f2ab5,,,,
+5a4c6246758c522f68e75491eb65eafda375b701,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0001118.pdf,,https://doi.org/10.1109/ICASSP.2010.5495357,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0001118.pdf
+5aad5e7390211267f3511ffa75c69febe3b84cc7,http://pdfs.semanticscholar.org/5aad/5e7390211267f3511ffa75c69febe3b84cc7.pdf,,,https://arxiv.org/pdf/1507.04760v1.pdf
+5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372,http://pdfs.semanticscholar.org/5a02/9a0b0ae8ae7fc9043f0711b7c0d442bfd372.pdf,,,http://arxiv.org/abs/1710.08310
+5a4881bfcb4ae49229f39320197c2d01b2fbf1f5,,,,
+5ab96ace21bf54625f3d18ea11801f540519bd3a,,,,
+5ae970294aaba5e0225122552c019eb56f20af74,http://pdfs.semanticscholar.org/5ae9/70294aaba5e0225122552c019eb56f20af74.pdf,,,http://www.ijcee.org/papers/866-TD018.pdf
+5a86842ab586de9d62d5badb2ad8f4f01eada885,http://pdfs.semanticscholar.org/5a86/842ab586de9d62d5badb2ad8f4f01eada885.pdf,,,http://pnrsolution.org/Datacenter/Vol3/Issue3/105.pdf
+5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0,http://pdfs.semanticscholar.org/c173/fa4456941b9c40d53d656b8ad84d24c16ec3.pdf,,,http://www.ccis2k.org/iajit/index.php?Itemid=346&id=91&option=com_content&task=blogcategory
+5a7520380d9960ff3b4f5f0fe526a00f63791e99,http://arxiv.org/pdf/1512.00932v1.pdf,,,https://arxiv.org/pdf/1512.00932v2.pdf
+5a0ae814be58d319dfc9fd98b058a2476801201c,,,https://doi.org/10.1007/s00521-012-1124-x,
+5f871838710a6b408cf647aacb3b198983719c31,http://www.jdl.ac.cn/user/xlchen/Paper/TIP07b.pdf,,https://doi.org/10.1109/TIP.2007.899195,
+5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9,http://pdfs.semanticscholar.org/e1dd/1c4de149c6b05eedd1728d57a18a074b9b2a.pdf,,,http://arxiv.org/abs/1711.09822
+5f344a4ef7edfd87c5c4bc531833774c3ed23542,http://pdfs.semanticscholar.org/5f34/4a4ef7edfd87c5c4bc531833774c3ed23542.pdf,,,http://www.ifp.uiuc.edu/~iracohen/publications/IraCohenThesis.pdf
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,http://www.public.asu.edu/~bli24/Papers/ICPR2016_video2vec.pdf,,https://doi.org/10.1109/ICPR.2016.7899735,
+5feee69ed183954fa76c58735daa7dd3549e434d,,,https://doi.org/10.1109/ICIP.2008.4711697,
+5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a,http://pdfs.semanticscholar.org/5fa0/e6da81acece7026ac1bc6dcdbd8b204a5f0a.pdf,,https://doi.org/10.1016/j.patrec.2008.01.003,http://cse.cnu.ac.kr/~cheonghee/papers/multi4.pdf
+5fc97d6cb5af21ed196e44f22cee31ce8c51ef13,,,,http://doi.acm.org/10.1145/2742060.2743769
+5f7094ba898a248e1e6b37e3d9fb795e59131cdc,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026246
+5feb1341a49dd7a597f4195004fe9b59f67e6707,http://pdfs.semanticscholar.org/5feb/1341a49dd7a597f4195004fe9b59f67e6707.pdf,,,https://arxiv.org/pdf/1801.10312v1.pdf
+5f57a1a3a1e5364792b35e8f5f259f92ad561c1f,http://pdfs.semanticscholar.org/5f57/a1a3a1e5364792b35e8f5f259f92ad561c1f.pdf,,,http://arxiv.org/pdf/1512.00130v1.pdf
+5fa932be4d30cad13ea3f3e863572372b915bec8,http://pdfs.semanticscholar.org/5fa9/32be4d30cad13ea3f3e863572372b915bec8.pdf,,https://doi.org/10.1016/j.patrec.2011.11.028,http://www.nlpr.ia.ac.cn/2012papers/gjkw/gk39.pdf
+5fb9944b18f5a4a6d20778816290ed647f5e3853,,,,http://doi.acm.org/10.1145/3080538.3080540
+5f1cd82343f4bd6972f674d50aecb453d06f04ad,,,,http://doi.acm.org/10.1145/3125739.3125756
+5f5906168235613c81ad2129e2431a0e5ef2b6e4,https://arxiv.org/pdf/1601.00199v1.pdf,,https://doi.org/10.1007/s11263-016-0916-3,http://arxiv.org/pdf/1601.00199v1.pdf
+5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c,http://pdfs.semanticscholar.org/5fb5/d9389e2a2a4302c81bcfc068a4c8d4efe70c.pdf,,https://doi.org/10.1007/978-3-319-54427-4_29,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C082_fukui2016.pdf
+5f1dcaff475ef18a2ecec0e114a9849a0a8002b9,http://pdfs.semanticscholar.org/5f1d/caff475ef18a2ecec0e114a9849a0a8002b9.pdf,,https://doi.org/10.1016/j.parco.2015.07.002,http://www.math.uci.edu/~jxin/Parallel-CVG-July2015.pdf
+5f4219118556d2c627137827a617cf4e26242a6e,,,https://doi.org/10.1109/TMM.2017.2751143,
+5f676d6eca4c72d1a3f3acf5a4081c29140650fb,http://www.cs.ucr.edu/~mkafai/papers/Paper_fg.pdf,,,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2015/To%20Skip%20or%20not%20to%20Skip%20A%20Dataset%20of%20Spontaneous%20Affective%20Response%20of%20Online%20Advertising%20(SARA)%20for%20Audience%20Behavior%20Analysis.pdf
+5fa6f72d3fe16f9160d221e28da35c1e67a5d951,,,,http://doi.acm.org/10.1145/3061639.3062182
+5fb59cf5b31a80d8c70d91660092ef86494be577,,,https://doi.org/10.1109/CISP-BMEI.2017.8301923,
+5f01f14ca354266106d8aa1b07c45e8c9ac3e273,,,,
+5fc664202208aaf01c9b62da5dfdcd71fdadab29,http://pdfs.semanticscholar.org/5fc6/64202208aaf01c9b62da5dfdcd71fdadab29.pdf,,,https://arxiv.org/pdf/1504.05308v1.pdf
+5fce9d893a40c4e0f2ae335b2e68bfd02f1cb2c6,,,https://doi.org/10.1109/ICTAI.2012.40,
+5fac62a3de11125fc363877ba347122529b5aa50,http://openaccess.thecvf.com/content_ICCV_2017/papers/Saha_AMTnet_Action-Micro-Tube_Regression_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.473
+5f448ab700528888019542e6fea1d1e0db6c35f2,,,https://doi.org/10.1109/LSP.2016.2533721,
+5fa1724a79a9f7090c54925f6ac52f1697d6b570,http://pdfs.semanticscholar.org/5fa1/724a79a9f7090c54925f6ac52f1697d6b570.pdf,,,http://www.aclweb.org/anthology/W/W16/W16-3807.pdf
+5fba1b179ac80fee80548a0795d3f72b1b6e49cd,http://pdfs.semanticscholar.org/fe88/e30cfca9161b598ea8a26985df5832259924.pdf,,,https://www.usenix.org/system/files/conference/usenixsecurity16/sec16_paper_xu.pdf
+5f9dc3919fb088eb84accb1e490921a134232466,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2007.49
+337b5f0e70e04349da17e8069936e2260390aca0,,,,
+33f7e78950455c37236b31a6318194cfb2c302a4,http://pdfs.semanticscholar.org/33f7/e78950455c37236b31a6318194cfb2c302a4.pdf,,https://doi.org/10.1007/978-3-319-10593-2_30,http://cs-people.bu.edu/hekun/papers/posedet_eccv14_camready2.pdf
+33bba39be70f21e13769a10dbf96689aa4d3ecc6,,,,
+33ac7fd3a622da23308f21b0c4986ae8a86ecd2b,http://pdfs.semanticscholar.org/33ac/7fd3a622da23308f21b0c4986ae8a86ecd2b.pdf,,,http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS12/paper/download/4481/4789
+33c2131cc85c0f0fef0f15ac18f28312347d9ba4,,,https://doi.org/10.1016/j.neucom.2010.06.024,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,http://pdfs.semanticscholar.org/3303/0c23f6e25e30b140615bb190d5e1632c3d3b.pdf,,,http://tamaraberg.com/papers/workshopwhitepaper.pdf
+33ba256d59aefe27735a30b51caf0554e5e3a1df,http://pdfs.semanticscholar.org/33ba/256d59aefe27735a30b51caf0554e5e3a1df.pdf,,,http://www.ijcai.org/Proceedings/13/Papers/234.pdf
+33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13,http://pdfs.semanticscholar.org/33c3/702b0eee6fc26fc49f79f9133f3dd7fa3f13.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/rudovic-o-2013-phd-thesis.pdf
+33aff42530c2fd134553d397bf572c048db12c28,http://openaccess.thecvf.com/content_iccv_2015/papers/Ruiz_From_Emotions_to_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Ruiz_From_Emotions_to_ICCV_2015_paper.pdf
+33a1a049d15e22befc7ddefdd3ae719ced8394bf,http://pdfs.semanticscholar.org/33a1/a049d15e22befc7ddefdd3ae719ced8394bf.pdf,,,http://www.academypublisher.com/ijrte/vol02/no01/ijrte0201179182.pdf
+33ec047f1084e290c8a6f516bc75345b6bcf02a0,https://www.researchgate.net/profile/Peter_Corcoran/publication/220168274_Smart_Cameras_2D_Affine_Models_for_Determining_Subject_Facial_Expressions/links/02bfe5118f52d3d59d000000.pdf,,https://doi.org/10.1109/TCE.2010.5505930,http://www.researchgate.net/profile/Peter_Corcoran/publication/220168274_Smart_Cameras_2D_Affine_Models_for_Determining_Subject_Facial_Expressions/links/02bfe5118f52d3d59d000000.pdf
+334e65b31ad51b1c1f84ce12ef235096395f1ca7,http://pdfs.semanticscholar.org/334e/65b31ad51b1c1f84ce12ef235096395f1ca7.pdf,,,http://www.decf.berkeley.edu/~wendyju/restricted/Emotion.pdf
+3399f8f0dff8fcf001b711174d29c9d4fde89379,http://pdfs.semanticscholar.org/3399/f8f0dff8fcf001b711174d29c9d4fde89379.pdf,,,https://arxiv.org/pdf/1706.01061v1.pdf
+33b915476f798ca18ae80183bf40aea4aaf57d1e,,,https://doi.org/10.1109/TIP.2013.2271548,
+333aa36e80f1a7fa29cf069d81d4d2e12679bc67,http://pdfs.semanticscholar.org/333a/a36e80f1a7fa29cf069d81d4d2e12679bc67.pdf,,https://doi.org/10.1007/978-3-319-48881-3_59,https://s3-us-west-1.amazonaws.com/disneyresearch/wp-content/uploads/20161014182443/Suggesting-Sounds-for-Images-from-Video-Collections-Paper.pdf
+3312eb79e025b885afe986be8189446ba356a507,http://pdfs.semanticscholar.org/6007/292075f8a8538fa6f4c3d7a8676a595ab1f4.pdf,,https://doi.org/10.1007/978-3-319-46454-1_2,https://arxiv.org/pdf/1603.07027v2.pdf
+33792bb27ef392973e951ca5a5a3be4a22a0d0c6,http://plaza.ufl.edu/xsshi2015/paper_list/TPAMI2016.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2501810
+3328674d71a18ed649e828963a0edb54348ee598,http://ai.pku.edu.cn/application/files/1415/1124/8089/A_face_and_palmprint_recognition_approach_based_on_discriminant_DCT_feature_extraction.pdf,,https://doi.org/10.1109/TSMCB.2004.837586,http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20palmprint/A%20face%20and%20palmprint%20recognition%20approach%20based%20on%20discriminant%20DCT%20feature%20extraction.pdf
+339937141ffb547af8e746718fbf2365cc1570c8,http://pdfs.semanticscholar.org/3399/37141ffb547af8e746718fbf2365cc1570c8.pdf,,,http://cs231n.stanford.edu/reports/2016/pdfs/022_Report.pdf
+33402ee078a61c7d019b1543bb11cc127c2462d2,http://users.cecs.anu.edu.au/~sgould/papers/cvpr17-ooo.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Fernando_Self-Supervised_Video_Representation_CVPR_2017_paper.pdf
+33ae696546eed070717192d393f75a1583cd8e2c,https://arxiv.org/pdf/1708.08508v2.pdf,,https://doi.org/10.1109/BTAS.2017.8272730,https://arxiv.org/pdf/1708.08508v1.pdf
+33554ff9d1d3b32f67020598320d3d761d7ec81f,http://pdfs.semanticscholar.org/3355/4ff9d1d3b32f67020598320d3d761d7ec81f.pdf,,,http://arxiv.org/abs/1702.06086
+33f2b44742cc828347ccc5ec488200c25838b664,http://pdfs.semanticscholar.org/33f2/b44742cc828347ccc5ec488200c25838b664.pdf,,,http://arxiv.org/pdf/1511.02126v1.pdf
+332d773b70f2f6fb725d49f314f57b8f8349a067,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.220
+3393459600368be2c4c9878a3f65a57dcc0c2cfa,http://pdfs.semanticscholar.org/3393/459600368be2c4c9878a3f65a57dcc0c2cfa.pdf,,https://doi.org/10.1007/978-3-319-16811-1_2,http://users.eecs.northwestern.edu/~xsh835/EigenPepACCV2014.pdf
+3327e21b46434f6441018922ef31bddba6cc8176,http://www.metaio.com/fileadmin/upload/research_files/paper/ISMAR2014_Real-Time_Illumination_Estimation_from_Faces_for_Coherent_Rendering_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ISMAR.2014.6948416
+334d6c71b6bce8dfbd376c4203004bd4464c2099,http://pdfs.semanticscholar.org/ebbf/a07476257e1b7f4e259b29531a12eab575bd.pdf,,https://doi.org/10.1007/978-3-319-46466-4_43,http://www.csl.cornell.edu/~studer/papers/16ECCV-BCR.pdf
+33695e0779e67c7722449e9a3e2e55fde64cfd99,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_064_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_064_ext.pdf
+334ac2a459190b41923be57744aa6989f9a54a51,http://pdfs.semanticscholar.org/334a/c2a459190b41923be57744aa6989f9a54a51.pdf,,,https://cs.brown.edu/people/rebecca/rmason_naacl2012.pdf
+33b61be191e63b0c9974be708180275c9d5b3057,,,https://doi.org/10.1109/ICRA.2011.5979705,
+33e20449aa40488c6d4b430a48edf5c4b43afdab,http://mplab.ucsd.edu/wordpress/wp-content/uploads/EngagementRecognitionFinal.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2316163
+333e7ad7f915d8ee3bb43a93ea167d6026aa3c22,http://www.eurecom.fr/en/publication/4277/download/mm-publi-4277.pdf,,https://doi.org/10.1109/TIFS.2014.2309851,
+334166a942acb15ccc4517cefde751a381512605,http://pdfs.semanticscholar.org/3341/66a942acb15ccc4517cefde751a381512605.pdf,,,https://www.irjet.net/archives/V4/i10/IRJET-V4I1014.pdf
+33403e9b4bbd913ae9adafc6751b52debbd45b0e,http://pdfs.semanticscholar.org/3340/3e9b4bbd913ae9adafc6751b52debbd45b0e.pdf,,,http://www.itr-rescue.org/pubs/upload/358_McCall2005.pdf
+33ad23377eaead8955ed1c2b087a5e536fecf44e,http://vis-www.cs.umass.edu/papers/gloc_cvpr13.pdf,,,http://web.eecs.umich.edu/~honglak/cvpr13_gloc.pdf
+33bbf01413910bca26ed287112d32fe88c1cc0df,,,https://doi.org/10.1109/ICIP.2014.7026204,
+331d6ace8d59fa211e5bc84a93fdc65695238c69,,,https://doi.org/10.1007/s10115-017-1115-4,
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,http://pdfs.semanticscholar.org/7a49/4b4489408ec3adea15817978ecd2e733f5fe.pdf,,,https://arxiv.org/pdf/1404.3291v1.pdf
+05184f01e66d7139530729b281da74db35a178d2,,http://ieeexplore.ieee.org/document/6460470/,,
+054756fa720bdcf1d320ad7a353e54ca53d4d3af,http://www.stat.ucla.edu/~yuille/Pubs15/JianyuWangSemanticCVPR2015%20(1).pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_074_ext.pdf
+05b8673d810fadf888c62b7e6c7185355ffa4121,https://nannanwang.github.io/My_Papers/IJCV2013.pdf,,https://doi.org/10.1007/s11263-013-0645-9,https://pdfs.semanticscholar.org/05b8/673d810fadf888c62b7e6c7185355ffa4121.pdf
+056d5d942084428e97c374bb188efc386791e36d,http://pdfs.semanticscholar.org/056d/5d942084428e97c374bb188efc386791e36d.pdf,,https://doi.org/10.1007/978-3-319-46466-4_7,http://arxiv.org/pdf/1603.03968v1.pdf
+05e658fed4a1ce877199a4ce1a8f8cf6f449a890,http://pdfs.semanticscholar.org/05e6/58fed4a1ce877199a4ce1a8f8cf6f449a890.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/17349/Zheng_umd_0117E_16726.pdf?isAllowed=y&sequence=1
+05ad478ca69b935c1bba755ac1a2a90be6679129,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Turakhia_Attribute_Dominance_What_2013_ICCV_paper.pdf,,,https://www.cc.gatech.edu/~parikh/Publications/TurakhiaParikh_attribute_dominance_ICCV_2013.pdf
+0595d18e8d8c9fb7689f636341d8a55cc15b3e6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_102.pdf,,https://doi.org/10.1109/TIP.2017.2746993,http://vipl.ict.ac.cn/resources/codes/code/Discriminant%20Analysis%20on%20Riemannian%20Manifold%20of%20Gaussian%20Distributions%20for%20Face%20Recognition%20with%20Image%20Sets.pdf
+0573f3d2754df3a717368a6cbcd940e105d67f0b,http://cs.anu.edu.au/few/EmotiW_icmi_draft_ver_1_0.pdf,,,http://doi.acm.org/10.1145/2522848.2531739
+05a0d04693b2a51a8131d195c68ad9f5818b2ce1,http://pdfs.semanticscholar.org/05a0/d04693b2a51a8131d195c68ad9f5818b2ce1.pdf,,,http://arxiv.org/abs/1706.00631
+0562fc7eca23d47096472a1d42f5d4d086e21871,http://pdfs.semanticscholar.org/0562/fc7eca23d47096472a1d42f5d4d086e21871.pdf,,,https://arxiv.org/pdf/1712.08416v1.pdf
+052fb35f731680d9d4e7d89c8f70f14173efb015,,,,http://doi.acm.org/10.1145/2893487
+054738ce39920975b8dcc97e01b3b6cc0d0bdf32,http://ita.ucsd.edu/workshop/16/files/paper/paper_2663.pdf,,https://doi.org/10.1109/ITA.2016.7888183,https://arxiv.org/pdf/1601.07883v1.pdf
+05bcc5235721fd6a465a63774d28720bacc60858,http://www.site.uottawa.ca/~fshi098/papers/Gradient_Boundary_Histograms_for_Action_Recognition.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.152
+05e03c48f32bd89c8a15ba82891f40f1cfdc7562,http://files.is.tue.mpg.de/black/papers/rgapami.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2511743
+05a312478618418a2efb0a014b45acf3663562d7,http://people.ee.duke.edu/~lcarin/AccelGibbs.pdf,,,http://people.csail.mit.edu/finale/papers/finale_icml09.pdf
+05785cb0dcaace54801aa486d4f8fdad3245b27a,,,https://doi.org/10.1109/ICPR.2016.7899760,
+056ba488898a1a1b32daec7a45e0d550e0c51ae4,http://pdfs.semanticscholar.org/056b/a488898a1a1b32daec7a45e0d550e0c51ae4.pdf,,https://doi.org/10.1007/978-3-319-46484-8_39,http://arxiv.org/pdf/1608.01137v1.pdf
+053ee4a4793f54b02dfabde5436fd7ee479e79eb,,,,http://doi.acm.org/10.1145/3160504.3160507
+050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371,http://www.springer.com/cda/content/document/cda_downloaddocument/9783319587707-t1.pdf?SGWID=0-0-45-1607395-p180855259,,https://doi.org/10.1007/978-3-319-58771-4_1,
+0509c442550571907258f07aad9da9d00b1e468b,https://pdfs.semanticscholar.org/0509/c442550571907258f07aad9da9d00b1e468b.pdf,,https://doi.org/10.1109/IVCNZ.2013.6727016,http://www.researchgate.net/profile/Rodney_Nielsen/publication/267211830_On_multi-task_learning_for_facial_action_unit_detection/links/549449910cf20c4f741ecf30.pdf
+056294ff40584cdce81702b948f88cebd731a93e,https://arxiv.org/pdf/1506.08438v3.pdf,,,http://openaccess.thecvf.com/content_iccv_2015/papers/Sener_Unsupervised_Semantic_Parsing_ICCV_2015_paper.pdf
+052880031be0a760a5b606b2ad3d22f237e8af70,http://pdfs.semanticscholar.org/0528/80031be0a760a5b606b2ad3d22f237e8af70.pdf,,,http://arxiv.org/pdf/1607.00442v1.pdf
+05c974b9fde42f87e28458fb7febf7a05f2dfd18,,,,
+055de0519da7fdf27add848e691087e0af166637,http://pdfs.semanticscholar.org/d3f9/cf3fb66326e456587acb18cf3196d1e314c7.pdf,,https://doi.org/10.1007/978-3-319-10593-2_12,http://ibug.doc.ic.ac.uk/media/uploads/documents/eccv_2014.pdf
+0515e43c92e4e52254a14660718a9e498bd61cf5,http://pdfs.semanticscholar.org/3a78/5f86c2109fe1ff242dcb26211abfb9b0a870.pdf,,,http://research.sabanciuniv.edu/6565/1/vural_DSPinCars07.pdf
+053c2f592a7f153e5f3746aa5ab58b62f2cf1d21,http://pdfs.semanticscholar.org/053c/2f592a7f153e5f3746aa5ab58b62f2cf1d21.pdf,,,http://oaji.net/articles/2014/489-1392872280.pdf
+052c5ef6b20bf3e88bc955b6b2e86571be08ba64,,,https://doi.org/10.1109/TIFS.2011.2170068,
+0568fc777081cbe6de95b653644fec7b766537b2,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Liu_Learning_Expressionlets_on_2014_CVPR_paper.pdf,,,http://vipl.ict.ac.cn/resources/codes/code/2014_CVPR_Learning%20Expressionlets%20on%20Spatio-temporal%20Manifold%20for%20Dynamic%20Facial%20Expression%20Recognition.pdf
+0561bed18b6278434deae562d646e8adad72e75d,,,https://doi.org/10.1016/j.neucom.2014.09.052,
+05d80c59c6fcc4652cfc38ed63d4c13e2211d944,http://static.googleusercontent.com/external_content/untrusted_dlcp/research.google.com/en/us/pubs/archive/35389.pdf,,,http://www-2.cs.cmu.edu/~skumar/nys_col_ICML.pdf
+05ea7930ae26165e7e51ff11b91c7aa8d7722002,http://www.stat.ucla.edu/~sczhu/papers/PAMI_car_occlusion_AOG.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2497699
+055530f7f771bb1d5f352e2758d1242408d34e4d,http://pdfs.semanticscholar.org/0555/30f7f771bb1d5f352e2758d1242408d34e4d.pdf,,,http://worldcomp-proceedings.com/proc/p2014/IPC7095.pdf
+05287cbad6093deffe9a0fdb9115605595dfeaf0,,,,
+050eda213ce29da7212db4e85f948b812a215660,http://pdfs.semanticscholar.org/b598/4a1044d72224f99e959746a452fc1927a257.pdf,,,http://www-dev.ri.cmu.edu:8080/pub_files/pub3/sim_terence_2001_2/sim_terence_2001_2.pdf
+051a84f0e39126c1ebeeb379a405816d5d06604d,http://static.springer.com/sgw/documents/1348632/application/pdf/Cognitive+Computation_Biometric+Recognition+Performing+in+a+Bioinspired+System.pdf,,https://doi.org/10.1007/s12559-009-9018-7,http://static.springer.com/sgw/documents/1348632/application/pdf/Cognitive%20Computation_Biometric%20Recognition%20Performing%20in%20a%20Bioinspired%20System.pdf
+05e3acc8afabc86109d8da4594f3c059cf5d561f,https://www.cs.rochester.edu/u/cxu22/p/cvpr2016_a2s2_poster.pdf,,,http://www.cs.rochester.edu/u/cxu22/p/cvpr2016_a2s2_paper.pdf
+05f4d907ee2102d4c63a3dc337db7244c570d067,http://pdfs.semanticscholar.org/3c52/2c9707eb795e0dba69202f1ec946a9072661.pdf,,https://doi.org/10.1016/j.patcog.2006.03.013,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/pr06b.pdf
+0559fb9f5e8627fecc026c8ee6f7ad30e54ee929,http://pdfs.semanticscholar.org/0559/fb9f5e8627fecc026c8ee6f7ad30e54ee929.pdf,,,http://cdn.intechopen.com/pdfs/14644/InTech-Facial_expression_recognition.pdf
+05a7be10fa9af8fb33ae2b5b72d108415519a698,http://jankautz.com/publications/MMFusion4Video_ACMM16.pdf,,,http://doi.acm.org/10.1145/2964284.2964297
+0553c6b9ee3f7d24f80e204d758c94a9d6b375d2,,,https://doi.org/10.1109/ICIP.2004.1419764,
+05318a267226f6d855d83e9338eaa9e718b2a8dd,https://fruct.org/publications/fruct16/files/Khr.pdf,,https://doi.org/10.1109/FRUCT.2014.7000917,http://fruct.org/publications/fruct16/files/Khr.pdf
+057d5f66a873ec80f8ae2603f937b671030035e6,http://cs.stanford.edu/~roozbeh/papers/Mottaghi16cvpr_a.pdf,,,https://homes.cs.washington.edu/~hessam/uploads/files/N3.pdf
+055cd8173536031e189628c879a2acad6cf2a5d0,,,https://doi.org/10.1109/BTAS.2017.8272740,
+05c91e8a29483ced50c5f2d869617b80f7dacdd9,http://www.cs.rochester.edu/~mehoque/Publications/2013/13.Hoque-etal-MACH-UbiComp.pdf,,,http://www.cs.rochester.edu/u/www/u/mehoque/Publications/13.Hoque-etal-MACH-UbiComp.pdf
+05c5134125a333855e8d25500bf97a31496c9b3f,,,,http://doi.acm.org/10.1145/3132515.3132517
+0580edbd7865414c62a36da9504d1169dea78d6f,https://arxiv.org/pdf/1611.04251v1.pdf,,https://doi.org/10.1109/ROMAN.2016.7745199,http://arxiv.org/abs/1611.04251
+050a3346e44ca720a54afbf57d56b1ee45ffbe49,https://www.d2.mpi-inf.mpg.de/sites/default/files/cvpr16.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Akata_Multi-Cue_Zero-Shot_Learning_CVPR_2016_paper.pdf
+05a116cb6e220f96837e4418de4aa8e39839c996,,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.30
+050e51268b0fb03033428ac777ccfef2db752ab3,,,https://doi.org/10.1109/DICTA.2007.4426834,
+0517d08da7550241fb2afb283fc05d37fce5d7b7,http://pdfs.semanticscholar.org/0517/d08da7550241fb2afb283fc05d37fce5d7b7.pdf,,,http://www.sensorsportal.com/HTML/DIGEST/june_2013/P_1224.pdf
+052cec9fdbfe12ccd02688f3b7f538c0d73555b3,,,https://doi.org/10.1109/ICIP.2016.7533172,
+05f3d1e9fb254b275354ca69018e9ed321dd8755,http://pdfs.semanticscholar.org/05f3/d1e9fb254b275354ca69018e9ed321dd8755.pdf,,,http://arxiv.org/abs/1110.0264
+05e96d76ed4a044d8e54ef44dac004f796572f1a,http://www.cs.ucsb.edu/~mturk/595/papers/BRONSTEIN.pdf,,https://doi.org/10.1007/978-0-387-78414-4_2,http://www.cs.technion.ac.il/~bron/publications/BroBroKimIJCV05.pdf
+051f03bc25ec633592aa2ff5db1d416b705eac6c,http://www.cse.msu.edu/biometrics/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf,,https://doi.org/10.1109/IJCB.2011.6117573,http://biometrics.cse.msu.edu/Publications/Face/LiaoJain_PartialFR_AlignmentFreeApproach_ICJB11.pdf
+05270b68547a2cd5bda302779cfc5dda876ae538,http://www.cs.sfu.ca/~mori/courses/cmpt882/fall05/papers/laplacianfaces.pdf,,,http://www.cs.uchicago.edu/~niyogi/papersps/Laplacianface.pdf
+9d1cebed7672210f9c411c5ba422a931980da833,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0078
+9da63f089b8ee23120bfa8b4d9d9c8f605f421fc,,,,http://doi.acm.org/10.1145/2072298.2072043
+9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6,http://pdfs.semanticscholar.org/9d8f/f782f68547cf72b7f3f3beda9dc3e8ecfce6.pdf,,https://doi.org/10.1142/S0218001412500024,https://maxwell.ict.griffith.edu.au/spl/publications/papers/ImpPseudoinverse_IJPRAI12.pdf
+9d42df42132c3d76e3447ea61e900d3a6271f5fe,http://pdfs.semanticscholar.org/9d42/df42132c3d76e3447ea61e900d3a6271f5fe.pdf,,,http://research.ijcaonline.org/icaccthpa2014/number4/icaccthpa6045.pdf
+9d55ec73cab779403cd933e6eb557fb04892b634,http://pdfs.semanticscholar.org/9d55/ec73cab779403cd933e6eb557fb04892b634.pdf,,,https://arxiv.org/pdf/1512.06337v1.pdf
+9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2,http://pdfs.semanticscholar.org/9d8f/d639a7aeab0dd1bc6eef9d11540199fd6fe2.pdf,,,https://openreview.net/pdf?id=HkWTqLsIz
+9d357bbf014289fb5f64183c32aa64dc0bd9f454,http://pdfs.semanticscholar.org/9d35/7bbf014289fb5f64183c32aa64dc0bd9f454.pdf,,https://doi.org/10.1007/3-540-47979-1_1,http://mi.informatik.uni-siegen.de/publications/romdhani_eccv02.pdf
+9d66de2a59ec20ca00a618481498a5320ad38481,http://www.cs.iit.edu/~xli/paper/Conf/POP-ICDCS15.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDCS.2015.39
+9d839dfc9b6a274e7c193039dfa7166d3c07040b,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00869.pdf,,https://doi.org/10.1109/ICCVW.2011.6130218,http://www.vision.ee.ethz.ch/publications/papers/proceedings/eth_biwi_00869.pdf
+9d4692e243e25eb465a0480376beb60a5d2f0f13,,,https://doi.org/10.1109/ICCE.2016.7430617,
+9d01eca806e0f98c5b3c9a865cec1bd8c78e0f0c,,,,http://doi.acm.org/10.1145/3136755.3137032
+9dcc6dde8d9f132577290d92a1e76b5decc6d755,http://pdfs.semanticscholar.org/a36a/3cd13c59777b6b07e41c4026e55b55e8096f.pdf,,,http://tmt.unze.ba/zbornik/TMT2012/108-TMT12-125.pdf
+9d36c81b27e67c515df661913a54a797cd1260bb,http://pdfs.semanticscholar.org/9d36/c81b27e67c515df661913a54a797cd1260bb.pdf,,,http://www.ijera.com/papers/Vol2_issue1/DX21787792.pdf
+9d757c0fede931b1c6ac344f67767533043cba14,http://pdfs.semanticscholar.org/9d75/7c0fede931b1c6ac344f67767533043cba14.pdf,,,http://www.ijcsit.com/docs/Volume%206/vol6issue04/ijcsit20150604151.pdf
+9d941a99e6578b41e4e32d57ece580c10d578b22,http://pdfs.semanticscholar.org/9d94/1a99e6578b41e4e32d57ece580c10d578b22.pdf,,https://doi.org/10.3390/s150204326,http://www.mdpi.com/1424-8220/15/2/4326/pdf
+9df86395c11565afa8683f6f0a9ca005485c5589,,,https://doi.org/10.1007/s00530-014-0400-2,
+9d60ad72bde7b62be3be0c30c09b7d03f9710c5f,http://pdfs.semanticscholar.org/9d60/ad72bde7b62be3be0c30c09b7d03f9710c5f.pdf,,,http://ijcsit.com/docs/Volume%205/vol5issue06/ijcsit2014050676.pdf
+9d896605fbf93315b68d4ee03be0770077f84e40,http://pdfs.semanticscholar.org/9d89/6605fbf93315b68d4ee03be0770077f84e40.pdf,,,http://www.cs.sunysb.edu/~ychoi/Papers/cvpr11_generation.pdf
+9d61b0beb3c5903fc3032655dc0fd834ec0b2af3,http://pdfs.semanticscholar.org/c5ac/a3f653e2e8a58888492524fc1480608457b7.pdf,,,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=4A55C18E69EA86A0718B3F59562E4F9D?doi=10.1.1.105.2608&rep=rep1&type=pdf
+9dbd098975069d01efe7f5ddfb3dae6b6695be0d,,,,
+9d24179aa33a94c8c61f314203bf9e906d6b64de,http://www.decom.ufop.br/sibgrapi2012/eproceedings/technical/ts9/102146_3.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2012.45
+9d3aa3b7d392fad596b067b13b9e42443bbc377c,http://pdfs.semanticscholar.org/9d3a/a3b7d392fad596b067b13b9e42443bbc377c.pdf,,,http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-475/ArtIBio/15-pp-142-149-309.pdf
+9d06d43e883930ddb3aa6fe57c6a865425f28d44,http://pdfs.semanticscholar.org/dd08/039eb271af93810ba392728ff481d8ce7496.pdf,,,http://faculty.ucmerced.edu/mhyang/papers/cvpr03a.pdf
+9c686b318cb7774b6da5e2c712743a5a6cafa423,,,https://doi.org/10.1016/j.neuroimage.2015.12.036,
+9cda3e56cec21bd8f91f7acfcefc04ac10973966,,,https://doi.org/10.1109/IWBF.2016.7449688,
+9ce4541d21ee3511bf3dc55bc3cd01222194d95a,,,https://doi.org/10.1016/j.cviu.2017.05.008,
+9ce97efc1d520dadaa0d114192ca789f23442727,,,,http://doi.acm.org/10.1145/2597627
+9c1305383ce2c108421e9f5e75f092eaa4a5aa3c,http://pdfs.semanticscholar.org/9c13/05383ce2c108421e9f5e75f092eaa4a5aa3c.pdf,,,http://ceur-ws.org/Vol-379/paper2.pdf
+9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1,http://pdfs.semanticscholar.org/9cfb/3a68fb10a59ec2a6de1b24799bf9154a8fd1.pdf,,,http://research-repository.uwa.edu.au/files/11544841/THESIS_MASTER_BY_RESEARCH_MEHDIZADEH_Maryam_2016.pdf
+9c81d436b300494bc88d4de3ac3ec3cc9c43c161,,,https://doi.org/10.1007/s11042-017-5019-9,
+9c1860de6d6e991a45325c997bf9651c8a9d716f,http://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf,,https://doi.org/10.1016/j.eswa.2010.10.015,http://edi-info.ir/files/3D-reconstruction-and-face-recognition-using-kernel-based-ICA-and-neural-networks.pdf
+9c9ef6a46fb6395702fad622f03ceeffbada06e5,http://pdfs.semanticscholar.org/f1e3/d1d26e39f98608037b195761f61fa7532925.pdf,,https://doi.org/10.1111/j.1467-8659.2004.00799.x,http://domino.mpi-inf.mpg.de/intranet/ag4/ag4publ.nsf/0/CEA089FA1CCE94B7C1256F9D003AF9EB/$file/ExchangingFacesInImages.pdf
+9cd4f72d33d1cedc89870b4f4421d496aa702897,,,https://doi.org/10.1117/1.JEI.22.2.023010,
+9c1cdb795fd771003da4378f9a0585730d1c3784,http://pdfs.semanticscholar.org/9c1c/db795fd771003da4378f9a0585730d1c3784.pdf,,https://doi.org/10.1007/978-3-319-10605-2_37,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8690/86900568.pdf
+9cb7b3b14fd01cc2ed76784ab76304132dab6ff3,,,https://doi.org/10.1109/ICIP.2015.7351174,
+9ca93ad6200bfa9dd814ac64bfb1044c3a0c01ce,,,,
+9c25e89c80b10919865b9c8c80aed98d223ca0c6,http://pdfs.semanticscholar.org/9c25/e89c80b10919865b9c8c80aed98d223ca0c6.pdf,,,http://www.iiisci.org/Journal/CV$/sci/pdfs/SA224OU15.pdf
+9ce2fd6ae16b339886d0ce237faae811230c8ce6,,,,
+9c7444c6949427994b430787a153d5cceff46d5c,http://pdfs.semanticscholar.org/9c74/44c6949427994b430787a153d5cceff46d5c.pdf,,,http://www.scipub.org/fulltext/jcs/jcs511801-810.pdf
+9ccaa13a577b20e88420d0a4b8c9545d5560261d,,,,
+9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6,http://pdfs.semanticscholar.org/9c78/1f7fd5d8168ddae1ce5bb4a77e3ca12b40b6.pdf,,,https://www.irjet.net/archives/V3/i7/IRJET-V3I758.pdf
+9c373438285101d47ab9332cdb0df6534e3b93d1,http://pdfs.semanticscholar.org/9c37/3438285101d47ab9332cdb0df6534e3b93d1.pdf,,,http://arxiv.org/abs/1312.6024
+9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d,http://pdfs.semanticscholar.org/9cbb/6e42a35f26cf1d19f4875cd7f6953f10b95d.pdf,,https://doi.org/10.1007/978-3-319-54526-4_38,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2016/W12_02.pdf
+9c4521dd25628b517dac3656410242b83b91e1e0,,,,
+9c4cc11d0df2de42d6593f5284cfdf3f05da402a,http://pdfs.semanticscholar.org/ce1a/f0e944260efced743f371ba0cb06878582b6.pdf,,,http://www.cs.njit.edu/~liu/papers/icpr98.pdf
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,http://pdfs.semanticscholar.org/9cd6/a81a519545bf8aa9023f6e879521f85d4cd1.pdf,,,https://www.cs.umd.edu/~qiu/pub/1308.0275v1.pdf
+9cadd166893f1b8aaecb27280a0915e6694441f5,http://pdfs.semanticscholar.org/9cad/d166893f1b8aaecb27280a0915e6694441f5.pdf,,,http://naturalspublishing.com/files/published/529w55wf727af3.pdf
+02e668f9b75f4a526c6fdf7268c8c1936d8e6f09,,,https://doi.org/10.1142/S0218001411008968,
+02601d184d79742c7cd0c0ed80e846d95def052e,http://arxiv.org/pdf/1503.00488v3.pdf,,,http://export.arxiv.org/pdf/1503.00488v1
+02cc96ad997102b7c55e177ac876db3b91b4e72c,http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf,,,http://www.micc.unifi.it/publications/2015/BLSKD15/egpaper_for_review.pdf
+02a98118ce990942432c0147ff3c0de756b4b76a,http://eprints.pascal-network.org/archive/00005029/01/LaptevMarszalekSchmidRozenfeld-CVPR08-HumanActions.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/416.pdf
+02e43d9ca736802d72824892c864e8cfde13718e,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/10075/shi%20Transferring%20a%20semantic%20representation%202015%20Accepted.pdf?sequence=1,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7299046
+02fda07735bdf84554c193811ba4267c24fe2e4a,http://www.cbsr.ia.ac.cn/Li%20Group/papers/Li-IR-Face-PAMI-07.pdf,,,http://www.nlpr.ia.ac.cn/2007papers/gjkw/gk6.pdf
+02431ed90700d5cfe4e3d3a20f1e97de3e131569,http://www.di.ens.fr/~bojanowski/papers/bojanowski13finding.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Bojanowski_Finding_Actors_and_2013_ICCV_paper.pdf
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,http://pdfs.semanticscholar.org/023e/d32ac3ea6029f09b8c582efbe3866de7d00a.pdf,,,http://cyber.felk.cvut.cz/teaching/radaUIB/disertace_Antoniuk.pdf
+0241513eeb4320d7848364e9a7ef134a69cbfd55,http://videolectures.net/site/normal_dl/tag=71121/cvpr2010_yang_stis_01.v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539958
+02dd0af998c3473d85bdd1f77254ebd71e6158c6,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_PPP_Joint_Pointwise_CVPR_2016_paper.pdf,,,http://www.public.asu.edu/~bli24/Papers/CVPR2016PPP.pdf
+0290523cabea481e3e147b84dcaab1ef7a914612,http://pdfs.semanticscholar.org/0290/523cabea481e3e147b84dcaab1ef7a914612.pdf,,,https://openreview.net/pdf?id=S1ybBMw-W
+0229829e9a1eed5769a2b5eccddcaa7cd9460b92,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_098_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1A_098_ext.pdf
+025720574ef67672c44ba9e7065a83a5d6075c36,http://pdfs.semanticscholar.org/915f/dd2fdc7880074bd1c1d596f7e7d19ab34e8f.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/icml2015_srivastava15.pdf
+029317f260b3303c20dd58e8404a665c7c5e7339,http://www.nlpr.ia.ac.cn/2009papers/gjkw/gk32.pdf?origin=publication_detail,,https://doi.org/10.1109/TMM.2009.2030629,
+026e4ee480475e63ae68570d73388f8dfd4b4cde,http://pdfs.semanticscholar.org/026e/4ee480475e63ae68570d73388f8dfd4b4cde.pdf,,,https://arxiv.org/pdf/1711.09728v1.pdf
+02e628e99f9a1b295458cb453c09863ea1641b67,http://pdfs.semanticscholar.org/02e6/28e99f9a1b295458cb453c09863ea1641b67.pdf,,https://doi.org/10.1007/978-3-319-48881-3_43,https://www.adrianbulat.com/downloads/ECCV-W16/two-stage-3D-pose-eccvw16.pdf
+0273414ba7d56ab9ff894959b9d46e4b2fef7fd0,http://pdfs.semanticscholar.org/3ae9/29d33dd1e6acdf6c907a1115e5a21f6cb076.pdf,,,https://arxiv.org/pdf/1611.09942v1.pdf
+028e237cb539b01ec72c244f57fdcfb65bbe53d4,,,,http://doi.ieeecomputersociety.org/10.1109/CIS.2010.65
+02e133aacde6d0977bca01ffe971c79097097b7f,http://pdfs.semanticscholar.org/02e1/33aacde6d0977bca01ffe971c79097097b7f.pdf,,,https://arxiv.org/pdf/1606.02492v4.pdf
+0296ca8ffceef73d774dfd171447ff3ce2e764aa,,,,
+02567fd428a675ca91a0c6786f47f3e35881bcbd,https://arxiv.org/pdf/1611.01731.pdf,,https://doi.org/10.1109/TIP.2017.2689998,https://arxiv.org/pdf/1611.01731v1.pdf
+029b53f32079063047097fa59cfc788b2b550c4b,http://pdfs.semanticscholar.org/b71c/73fcae520f6a5cdbce18c813633fb3d66342.pdf,,https://doi.org/10.1007/978-3-319-10593-2_39,https://www.cl.cam.ac.uk/~tb346/pub/papers/eccv2014.pdf
+026e96c3c4751e1583bfe78b8c28bdfe854c4988,,,https://doi.org/10.1109/ICIP.2017.8296442,
+02bd665196bd50c4ecf05d6852a4b9ba027cd9d0,http://arxiv.org/pdf/1310.2880v6.pdf,,,https://arxiv.org/pdf/1310.2880v7.pdf
+0247998a1c045e601dc4d65c53282b5e655be62b,,,https://doi.org/10.1109/ITSC.2017.8317782,
+02c38fa9a8ada6040ef21de17daf8d5e5cdc60c7,http://members.e-inclusion.crim.ca/files/articles/CRV_2006.pdf,,,https://www.crim.ca/Publications/2006/documents/plein_texte/VIS_GagLals_CRV06.pdf
+021a19e240f0ae0554eff814e838e1e396be6572,http://ci2cv.net/static/papers/2009_ICCV_Saragih_2.pdf,,https://doi.org/10.1109/ICCV.2009.5459377,http://ri.cmu.edu/pub_files/2009/9/CameraReady-6.pdf
+026b5b8062e5a8d86c541cfa976f8eee97b30ab8,http://www.iab-rubric.org/papers/deeplearningvideo-CR.pdf,,https://doi.org/10.1109/BTAS.2014.6996299,
+0235b2d2ae306b7755483ac4f564044f46387648,http://pdfs.semanticscholar.org/0235/b2d2ae306b7755483ac4f564044f46387648.pdf,,https://doi.org/10.1007/978-3-319-16181-5_59,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w16/W16-12.pdf
+02467703b6e087799e04e321bea3a4c354c5487d,http://biometrics.cse.msu.edu/Publications/Face/AdamsAllenMillerKalkaJain_CVPRWB2016_GRPR.pdf,,,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/AdamsAllenMillerKalkaJain_CVPRWB2016_GRPR.pdf
+02e39f23e08c2cb24d188bf0ca34141f3cc72d47,http://luks.fe.uni-lj.si/sl/osebje/vitomir/pub/ICASSP2010.pdf,,https://doi.org/10.1109/ICASSP.2010.5495203,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0000846.pdf
+023be757b1769ecb0db810c95c010310d7daf00b,http://pdfs.semanticscholar.org/023b/e757b1769ecb0db810c95c010310d7daf00b.pdf,,https://doi.org/10.5244/C.29.130,https://arxiv.org/pdf/1507.03148v2.pdf
+021469757d626a39639e260492eea7d3e8563820,,,https://doi.org/10.1007/b116723,
+026a9cfe3135b7b62279bc08e2fb97e0e9fad5c4,http://perso.telecom-paristech.fr/~sahbi/jstars2017.pdf,,,
+0278acdc8632f463232e961563e177aa8c6d6833,http://www.pitt.edu/~jeffcohn/biblio/TPAMI2547397%20FINAL.pdf,,,http://ca.cs.cmu.edu/sites/default/files/07442563.pdf
+02a92b79391ddac0acef4f665b396f7f39ca2972,,,https://doi.org/10.1016/j.patcog.2016.10.021,
+0209389b8369aaa2a08830ac3b2036d4901ba1f1,https://arxiv.org/pdf/1612.01202v2.pdf,,,https://arxiv.org/pdf/1803.02188v1.pdf
+02c993d361dddba9737d79e7251feca026288c9c,http://eprints.eemcs.utwente.nl/26377/01/Automatic_player_detection_and_recognition_in_images_using_AdaBoost.pdf,,,
+02239ae5e922075a354169f75f684cad8fdfd5ab,http://ai2-website.s3.amazonaws.com/publications/CVPR_2017_Situation.pdf,,,https://arxiv.org/pdf/1612.00901v1.pdf
+02c2a29a4695eab7a8f859bf8697a5ca9f910d70,,,,
+02d650d8a3a9daaba523433fbe93705df0a7f4b1,http://pdfs.semanticscholar.org/02d6/50d8a3a9daaba523433fbe93705df0a7f4b1.pdf,,https://doi.org/10.1007/978-3-642-33868-7_19,http://www.cse.msu.edu/biometrics/Publications/Face/OttoHanJain_HowDoesAgingAffectFacialComponents_ECCV12.pdf
+022ec7d1642727b2cc3d9a9d7999ca84a280443f,,,,
+0294f992f8dfd8748703f953925f9aee14e1b2a2,http://pdfs.semanticscholar.org/0294/f992f8dfd8748703f953925f9aee14e1b2a2.pdf,,https://doi.org/10.1007/978-3-319-16634-6_2,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop12/pdffiles/w12-o8.pdf
+02820c1491b10a1ff486fed32c269e4077c36551,https://arxiv.org/pdf/1610.07930v1.pdf,,https://doi.org/10.1109/BTAS.2016.7791155,http://arxiv.org/abs/1610.07930
+a458b319f5a2763ff9c6dc959eefa77673c56671,http://people.tamu.edu/~amir.tahmasbi/publications/Fisher_ICCEA2010.pdf,,,http://www.amirtahmasbi.com/publications_repository/Fisher_ICCEA2010.pdf
+a42209dbfe6d2005295d790456ddb2138302cbe5,,,,
+a4bb791b135bdc721c8fcc5bdef612ca654d7377,,,https://doi.org/10.1109/BTAS.2017.8272703,
+a4725a5b43e7c36d9e30028dff66958f892254a0,,,,http://doi.acm.org/10.1145/2663204.2666271
+a46283e90bcdc0ee35c680411942c90df130f448,http://pdfs.semanticscholar.org/a462/83e90bcdc0ee35c680411942c90df130f448.pdf,,https://doi.org/10.1016/j.neucom.2012.06.031,https://pdfs.semanticscholar.org/a462/83e90bcdc0ee35c680411942c90df130f448.pdf
+a4543226f6592786e9c38752440d9659993d3cb3,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.112
+a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2,http://pdfs.semanticscholar.org/a4a5/ad6f1cc489427ac1021da7d7b70fa9a770f2.pdf,,https://doi.org/10.1186/s13640-017-0235-9,https://jivp-eurasipjournals.springeropen.com/track/pdf/10.1186/s13640-017-0235-9?site=jivp-eurasipjournals.springeropen.com
+a422f2d0212f54807ff678f209293a27c7791ec5,,,,
+a484243027b19b57b5063ad2e4b414e1d383d3e8,,,,
+a4876b7493d8110d4be720942a0f98c2d116d2a0,http://pdfs.semanticscholar.org/a487/6b7493d8110d4be720942a0f98c2d116d2a0.pdf,,,http://arxiv.org/abs/1603.06829
+a40f8881a36bc01f3ae356b3e57eac84e989eef0,http://pdfs.semanticscholar.org/a40f/8881a36bc01f3ae356b3e57eac84e989eef0.pdf,,,https://arxiv.org/pdf/1703.03305v1.pdf
+a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3,http://pdfs.semanticscholar.org/a4a0/b5f08198f6d7ea2d1e81bd97fea21afe3fc3.pdf,,,https://repository.tudelft.nl/islandora/object/uuid:04a446a8-546c-455d-8344-948c7e3cdff5/datastream/OBJ/download
+a4e75766ef93b43608c463c233b8646439ce2415,,,https://doi.org/10.1109/ICCVW.2011.6130492,
+a46086e210c98dcb6cb9a211286ef906c580f4e8,http://pdfs.semanticscholar.org/dc94/43e3ae2fe70282b1b30e3eda3717b58c0808.pdf,,,https://arxiv.org/pdf/1509.06086v1.pdf
+a44590528b18059b00d24ece4670668e86378a79,http://pdfs.semanticscholar.org/a445/90528b18059b00d24ece4670668e86378a79.pdf,,,https://arxiv.org/pdf/1803.07226v1.pdf
+a472d59cff9d822f15f326a874e666be09b70cfd,http://pdfs.semanticscholar.org/a472/d59cff9d822f15f326a874e666be09b70cfd.pdf,,,http://cs.stanford.edu/groups/vision/documents/Tang_PhD_thesis_2015.pdf
+a4c430b7d849a8f23713dc283794d8c1782198b2,http://pdfs.semanticscholar.org/a4c4/30b7d849a8f23713dc283794d8c1782198b2.pdf,,,http://www.andrew.cmu.edu/user/avemula1/docs/pgm.pdf
+a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2,http://pdfs.semanticscholar.org/a4cc/626da29ac48f9b4ed6ceb63081f6a4b304a2.pdf,,https://doi.org/10.1016/j.patcog.2015.04.014,http://arxiv.org/abs/1410.4673
+a4f37cfdde3af723336205b361aefc9eca688f5c,http://pdfs.semanticscholar.org/a4f3/7cfdde3af723336205b361aefc9eca688f5c.pdf,,,http://www.face-rec.org/journals-books/Delac_Grgic_Bartlett_Recent_Advances_in_Face_Recognition.pdf
+a481e394f58f2d6e998aa320dad35c0d0e15d43c,http://www.cs.colostate.edu/~draper/papers/wigness_wacv14.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836093
+a317083d9aac4062e77aa0854513383c87e47ece,,,https://doi.org/10.1016/j.patcog.2015.06.003,
+a30869c5d4052ed1da8675128651e17f97b87918,http://pdfs.semanticscholar.org/a308/69c5d4052ed1da8675128651e17f97b87918.pdf,,,http://www.cs.utexas.edu/~grauman/papers/aron-preprint-book-springer16.pdf.pdf
+a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b,http://pdfs.semanticscholar.org/a3eb/acd8bcbc7ddbd5753935496e22a0f74dcf7b.pdf,,,https://engineering.purdue.edu/ASL4GUP/ASL4GUP_Proceedings.pdf
+a35ed55dc330d470be2f610f4822f5152fcac4e1,,,https://doi.org/10.1109/ISBA.2015.7126369,
+a324d61c79fe2e240e080f0dab358aa72dd002b3,,,https://doi.org/10.1016/j.patcog.2016.02.005,
+a3017bb14a507abcf8446b56243cfddd6cdb542b,http://pdfs.semanticscholar.org/a301/7bb14a507abcf8446b56243cfddd6cdb542b.pdf,,,http://waset.org/publications/5662/face-localization-and-recognition-in-varied-expressions-and-illumination
+a3c8c7da177cd08978b2ad613c1d5cb89e0de741,http://pdfs.semanticscholar.org/a3c8/c7da177cd08978b2ad613c1d5cb89e0de741.pdf,,https://doi.org/10.1007/978-3-319-11755-3_47,http://perso.telecom-paristech.fr/~bloch/papers/proceedings/ICIAR2014-Henrique.pdf
+a378fc39128107815a9a68b0b07cffaa1ed32d1f,http://pdfs.semanticscholar.org/a378/fc39128107815a9a68b0b07cffaa1ed32d1f.pdf,,,http://www.researchgate.net/profile/Jordi_Vitria/publication/3974356_Determining_a_suitable_metric_when_using_non-negative_matrix_factorization/links/0c96052b0153b55bf8000000.pdf
+a3add3268c26876eb76decdf5d7dd78a0d5cf304,,,https://doi.org/10.1016/j.specom.2017.07.003,
+a3f689fa5d71bdc7e19a959ac5d0f995e8e56493,,,,
+a34d75da87525d1192bda240b7675349ee85c123,http://pdfs.semanticscholar.org/a34d/75da87525d1192bda240b7675349ee85c123.pdf,,,http://arxiv.org/abs/1501.04690
+a3dc109b1dff3846f5a2cc1fe2448230a76ad83f,http://pdfs.semanticscholar.org/a3dc/109b1dff3846f5a2cc1fe2448230a76ad83f.pdf,,,http://ijcsmc.com/docs/papers/April2015/V4I4201599a11.pdf
+a3f684930c5c45fcb56a2b407d26b63879120cbf,http://pdfs.semanticscholar.org/a3f6/84930c5c45fcb56a2b407d26b63879120cbf.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2021%20(Supplementary).pdf
+a3ed0f15824802359e05d9777cacd5488dfa7dba,,,,http://doi.acm.org/10.1145/2851581.2892282
+a33f20773b46283ea72412f9b4473a8f8ad751ae,http://pdfs.semanticscholar.org/a33f/20773b46283ea72412f9b4473a8f8ad751ae.pdf,,,https://www.cbica.upenn.edu/sbia/Birkan.Tunc/icerik/belgeler/birkan_phd.pdf
+a35849af340f80791c4a901ec2f2bbbac06660f5,,,,
+a3bf6129d1ae136709063a5639eafd8018f50feb,,,,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2017.8109741
+a3775b3a0e78b890d9ca79b0aabd982551474a88,,,,
+a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7,http://pdfs.semanticscholar.org/a3a6/a6a2eb1d32b4dead9e702824375ee76e3ce7.pdf,,https://doi.org/10.1007/978-3-319-02714-2_12,http://infoscience.epfl.ch/record/188196/files/hbu_yuceetal.pdf
+a32d4195f7752a715469ad99cb1e6ebc1a099de6,http://pdfs.semanticscholar.org/a32d/4195f7752a715469ad99cb1e6ebc1a099de6.pdf,,,
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,http://pdfs.semanticscholar.org/a3d7/8bc94d99fdec9f44a7aa40c175d5a106f0b9.pdf,,,http://www.seas.upenn.edu/~cse400/CSE400_2010_2011/CIS401_final_rep/8.pdf
+a3eab933e1b3db1a7377a119573ff38e780ea6a3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0000838.pdf,,https://doi.org/10.1109/ICASSP.2010.5494903,
+a308077e98a611a977e1e85b5a6073f1a9bae6f0,http://pdfs.semanticscholar.org/a308/077e98a611a977e1e85b5a6073f1a9bae6f0.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/97/97/TSWJ2014-810368.PMC4037632.pdf
+a35dd69d63bac6f3296e0f1d148708cfa4ba80f6,http://pdfs.semanticscholar.org/a35d/d69d63bac6f3296e0f1d148708cfa4ba80f6.pdf,,,http://arxiv.org/abs/1603.08321
+a3a34c1b876002e0393038fcf2bcb00821737105,http://pdfs.semanticscholar.org/a3a3/4c1b876002e0393038fcf2bcb00821737105.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2002.1004155
+a3f1db123ce1818971a57330d82901683d7c2b67,http://pdfs.semanticscholar.org/a3f1/db123ce1818971a57330d82901683d7c2b67.pdf,,,http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-52.pdf
+a3a97bb5131e7e67316b649bbc2432aaa1a6556e,http://pdfs.semanticscholar.org/a3a9/7bb5131e7e67316b649bbc2432aaa1a6556e.pdf,,,http://www.bumc.bu.edu/anatneuro/files/2013/05/Ross_Stern_CABN2013inpress1.pdf
+a38dd439209b0913b14b1c3c71143457d8cf9b78,,,https://doi.org/10.1109/IJCNN.2015.7280803,
+a35d3ba191137224576f312353e1e0267e6699a1,http://pdfs.semanticscholar.org/a35d/3ba191137224576f312353e1e0267e6699a1.pdf,,,http://www2.hh.se/staff/josef/public/publications/garcia04spm.pdf
+a3a2f3803bf403262b56ce88d130af15e984fff0,http://pdfs.semanticscholar.org/e538/e1f6557d2920b449249606f909b665fbb924.pdf,,https://doi.org/10.1007/978-3-540-88682-2_53,http://media.cs.tsinghua.edu.cn/~imagevision/papers/eccv08.pdf
+b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae,http://pdfs.semanticscholar.org/b6d3/c8322d8e6a0212456cf38c6ef59c13d062dd.pdf,,,http://pages.ucsd.edu/~ztu/publication/cogsci14_motion.pdf
+b5cd9e5d81d14868f1a86ca4f3fab079f63a366d,https://ivi.fnwi.uva.nl/isis/publications/2016/AgharwalWCACV2016/AgharwalWCACV2016.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477706
+b5ae8b69677fb962421fe7072f1e842e71f3bea5,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273641
+b5cd8151f9354ee38b73be1d1457d28e39d3c2c6,http://pdfs.semanticscholar.org/b5cd/8151f9354ee38b73be1d1457d28e39d3c2c6.pdf,,,http://digitalassets.lib.berkeley.edu/techreports/ucb/text/EECS-2006-77.pdf
+b5fc4f9ad751c3784eaf740880a1db14843a85ba,http://pdfs.semanticscholar.org/b5fc/4f9ad751c3784eaf740880a1db14843a85ba.pdf,,https://doi.org/10.1007/s11760-007-0016-5,http://www.researchgate.net/profile/B_Kumar8/publication/220437304_Significance_of_image_representation_for_face_verification/links/54a97f8e0cf256bf8bb95d00.pdf
+b5979489e11edd76607c219a8bdc83ba4a88ab38,,,https://doi.org/10.1109/ACCESS.2017.2778011,
+b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57,http://pdfs.semanticscholar.org/b506/aa23949b6d1f0c868ad03aaaeb5e5f7f6b57.pdf,,,https://www.base-search.net/Record/3b04a8f520744937ae3394bccbac1d98399bc9af219cd06cb618e7be7340943d
+b5bda4e1374acc7414107cde529ad8b3263fae4b,,,https://doi.org/10.1007/s11370-010-0066-3,
+b54fe193b6faf228e5ffc4b88818d6aa234b5bb9,,,,http://doi.acm.org/10.1145/2964284.2967287
+b599f323ee17f12bf251aba928b19a09bfbb13bb,http://pdfs.semanticscholar.org/b599/f323ee17f12bf251aba928b19a09bfbb13bb.pdf,,,http://ial.eecs.ucf.edu/pdf/ReyMS2015.pdf
+b5690409be6c4e98bd37181d41121adfef218537,,,https://doi.org/10.1109/ICIP.2008.4711920,
+b58d381f9f953bfe24915246b65da872aa94f9aa,,,https://doi.org/10.1109/SMAP.2013.13,
+b5da4943c348a6b4c934c2ea7330afaf1d655e79,http://pdfs.semanticscholar.org/b5da/4943c348a6b4c934c2ea7330afaf1d655e79.pdf,,,https://arxiv.org/pdf/1803.06598v1.pdf
+b5402c03a02b059b76be829330d38db8e921e4b5,http://pdfs.semanticscholar.org/b540/2c03a02b059b76be829330d38db8e921e4b5.pdf,,,http://www.lifesciencesite.com/lsj/life0601/12_life0601_61_66_Hybridized.pdf
+b5160e95192340c848370f5092602cad8a4050cd,http://pdfs.semanticscholar.org/dd71/dc78e75f0de27263d508b3a8b29921cfea03.pdf,,,https://arxiv.org/pdf/1710.05112v2.pdf
+b5f79df712ad535d88ae784a617a30c02e0551ca,,,https://doi.org/10.1109/LSP.2015.2480758,
+b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad,http://pdfs.semanticscholar.org/b52c/0faba5e1dc578a3c32a7f5cfb6fb87be06ad.pdf,,,http://www.redalyc.org/pdf/474/47436895009.pdf
+b56530be665b0e65933adec4cc5ed05840c37fc4,http://kobus.ca/research/publications/07/cvpr-07-region-www.pdf,,,http://www.researchgate.net/profile/Kobus_Barnard/publication/224716273_Reducing_correspondence_ambiguity_in_loosely_labeled_training_data/links/00b4951ed48b4a77dd000000.pdf
+b5f4e617ac3fc4700ec8129fcd0dcf5f71722923,http://pdfs.semanticscholar.org/c4dd/f94ed445bad0793cd4ba2813506d02221ec0.pdf,,,http://bokertov.cs.aue.auc.dk/krueger02fg_c.ps.gz
+b52886610eda6265a2c1aaf04ce209c047432b6d,http://infolab.stanford.edu/~wangz/project/imsearch/Aesthetics/TAC16/xu.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2518162
+b51b4ef97238940aaa4f43b20a861eaf66f67253,http://pdfs.semanticscholar.org/b51b/4ef97238940aaa4f43b20a861eaf66f67253.pdf,,https://doi.org/10.1155/2009/184618,http://amp.ece.cmu.edu/Publication/Devi/ParikhChen_hSO_journal_2008.pdf
+b5d7c5aba7b1ededdf61700ca9d8591c65e84e88,http://pdfs.semanticscholar.org/b5d7/c5aba7b1ededdf61700ca9d8591c65e84e88.pdf,,,http://www.isca-speech.org/archive/interspeech_2010/i10_0901.html
+b5c749f98710c19b6c41062c60fb605e1ef4312a,http://www.yugangjiang.info/publication/icmr15-eval2stream.pdf,,,http://arxiv.org/abs/1504.01920
+b5667d087aafcf6b91f3c77aa90cee1ac185f8f1,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICIP17.pdf,,https://doi.org/10.1109/ICIP.2017.8296599,
+b55f256bbd2e1a41ce6bfcd892dee12f5bcd7cb3,,,,
+b5857b5bd6cb72508a166304f909ddc94afe53e3,http://pdfs.semanticscholar.org/b585/7b5bd6cb72508a166304f909ddc94afe53e3.pdf,,,http://ceur-ws.org/Vol-1436/Paper68.pdf
+b51e3d59d1bcbc023f39cec233f38510819a2cf9,http://pdfs.semanticscholar.org/b51e/3d59d1bcbc023f39cec233f38510819a2cf9.pdf,,,http://arxiv.org/abs/1311.4082
+b503793943a17d2f569685cd17e86b5b4fffe3fd,,,,
+b5efe2e53aa417367314c1a907d0fe8053c71ecd,,,,
+b54c477885d53a27039c81f028e710ca54c83f11,http://coewww.rutgers.edu/riul/research/papers/pdf/skmspami.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.190
+b503f481120e69b62e076dcccf334ee50559451e,http://pdfs.semanticscholar.org/b503/f481120e69b62e076dcccf334ee50559451e.pdf,,https://doi.org/10.1007/978-3-319-16631-5_49,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop15/pdffiles/w15-p7.pdf
+b50edfea790f86373407a964b4255bf8e436d377,,,,http://doi.acm.org/10.1145/3136755.3143008
+b55d0c9a022874fb78653a0004998a66f8242cad,http://pdfs.semanticscholar.org/b55d/0c9a022874fb78653a0004998a66f8242cad.pdf,,,http://ocean.kisti.re.kr/downfile/volume/etri/HJTODO/2013/v35n6/HJTODO_2013_v35n6_1021.pdf
+b5930275813a7e7a1510035a58dd7ba7612943bc,http://pdfs.semanticscholar.org/b593/0275813a7e7a1510035a58dd7ba7612943bc.pdf,,,http://www.iis.sinica.edu.tw/page/jise/2010/201007_23.pdf
+b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88,http://pdfs.semanticscholar.org/b59c/8b44a568587bc1b61d130f0ca2f7a2ae3b88.pdf,,https://doi.org/10.1007/978-3-319-47665-0_10,http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110106.pdf
+b249f10a30907a80f2a73582f696bc35ba4db9e2,http://pdfs.semanticscholar.org/f06d/6161eef9325285b32356e1c4b5527479eb9b.pdf,,,http://arxiv.org/pdf/1601.03945v1.pdf
+b2a0e5873c1a8f9a53a199eecae4bdf505816ecb,http://pdfs.semanticscholar.org/b2a0/e5873c1a8f9a53a199eecae4bdf505816ecb.pdf,,,https://arxiv.org/pdf/1711.11566v1.pdf
+b299c292b84aeb4f080a8b39677a8e0d07d51b27,,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2015.23
+b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,http://pdfs.semanticscholar.org/b2cd/92d930ed9b8d3f9dfcfff733f8384aa93de8.pdf,,,https://arxiv.org/pdf/1603.01249v3.pdf
+b216040f110d2549f61e3f5a7261cab128cab361,http://pdfs.semanticscholar.org/b216/040f110d2549f61e3f5a7261cab128cab361.pdf,,,https://www.jstage.jst.go.jp/article/transinf/E100.D/11/E100.D_2017EDL8124/_pdf/-char/en
+b261439b5cde39ec52d932a222450df085eb5a91,http://pdfs.semanticscholar.org/b261/439b5cde39ec52d932a222450df085eb5a91.pdf,,,http://www.ijcttjournal.org/2015/Volume24/number-2/IJCTT-V24P114.pdf
+b234cd7788a7f7fa410653ad2bafef5de7d5ad29,http://pdfs.semanticscholar.org/b234/cd7788a7f7fa410653ad2bafef5de7d5ad29.pdf,,https://doi.org/10.1007/978-3-319-16628-5_6,http://ci2cv.net/media/papers/2014_ACCV_Fagg.pdf
+b2749caec0094e186d3ee850151c899b8508f47a,,,,
+b2add9fad0bcf7bf0660f99f389672cdf7cc6a70,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.226
+b2ae5c496fe01bb2e2dee107f75b82c6a2a23374,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.116
+b208f2fc776097e98b41a4ff71c18b393e0a0018,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2003.1217900
+b235b4ccd01a204b95f7408bed7a10e080623d2e,http://pdfs.semanticscholar.org/b235/b4ccd01a204b95f7408bed7a10e080623d2e.pdf,,,http://ijcai.org/Abstract/15/516
+b259f57f41f4b3b5b7ca29c5acb6f42186bbcf23,,,https://doi.org/10.1109/SMC.2017.8122808,
+b2f9e0497901d22b05b9699b0ea8147861c2e2cc,,,https://doi.org/10.1007/978-3-319-70353-4_3,
+b209608a534957ec61e7a8f4b9d08286ae3d1d7f,,,https://doi.org/10.1111/j.1468-0394.2011.00589.x,
+b29b42f7ab8d25d244bfc1413a8d608cbdc51855,http://pdfs.semanticscholar.org/b29b/42f7ab8d25d244bfc1413a8d608cbdc51855.pdf,,,https://arxiv.org/pdf/1702.02719v1.pdf
+b22317a0bbbcc79425f7c8a871b2bf211ba2e9c4,,,https://doi.org/10.1109/ACCESS.2018.2805861,
+b21bf45cd3aeaec3440eeca09a1c5a5ee3d24a3a,,,https://doi.org/10.1080/10798587.2014.934592,
+b2e5df82c55295912194ec73f0dca346f7c113f6,http://pdfs.semanticscholar.org/b2e5/df82c55295912194ec73f0dca346f7c113f6.pdf,,,http://wangzheallen.github.io/papers/THUMOS.pdf
+b2e6944bebab8e018f71f802607e6e9164ad3537,http://pdfs.semanticscholar.org/b2e6/944bebab8e018f71f802607e6e9164ad3537.pdf,,,http://ijcai.org/papers15/Papers/IJCAI15-514.pdf
+b2c25af8a8e191c000f6a55d5f85cf60794c2709,http://pdfs.semanticscholar.org/b2c2/5af8a8e191c000f6a55d5f85cf60794c2709.pdf,,https://doi.org/10.1007/s11760-015-0832-y,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2015/A_Novel_Dimensionality_SIVP.pdf
+b239a756f22201c2780e46754d06a82f108c1d03,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Fusion_FG_camera_ready.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163146
+b234d429c9ea682e54fca52f4b889b3170f65ffc,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.22
+b2ddea9c71cd73fa63e09e8121bc7a098fae70b4,,,https://doi.org/10.1109/ISCCSP.2012.6217849,
+b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e,http://pdfs.semanticscholar.org/b20c/fbb2348984b4e25b6b9174f3c7b65b6aed9e.pdf,,https://doi.org/10.1007/978-3-319-54187-7_22,http://vision.cs.tut.fi/data/publications/accv2016_cory.pdf
+b262a2a543971e10fcbfc7f65f46115ae895d69e,,,https://doi.org/10.1109/DICTA.2015.7371266,
+b2cb335ded99b10f37002d09753bd5a6ea522ef1,,,https://doi.org/10.1109/ISBA.2017.7947679,
+d91fd82332a0db1bb4a8ac563f406098cfe9c4bb,,,,
+d9c0310203179d5328c4f1475fa4d68c5f0c7324,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.11
+d961617db4e95382ba869a7603006edc4d66ac3b,http://pdfs.semanticscholar.org/d961/617db4e95382ba869a7603006edc4d66ac3b.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2037%20(Supplementary).pdf
+d9810786fccee5f5affaef59bc58d2282718af9b,http://pdfs.semanticscholar.org/d981/0786fccee5f5affaef59bc58d2282718af9b.pdf,,,http://www.cse.msu.edu/~rossarun/pubs/JillelaMSThesis_Fall2009.pdf
+d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0003031.pdf,,https://doi.org/10.1109/ICASSP.2017.7952713,
+d98a36081a434451184fa4becb59bf5ec55f3a1e,,,https://doi.org/10.1016/j.neucom.2016.09.110,
+d930ec59b87004fd172721f6684963e00137745f,http://pdfs.semanticscholar.org/d930/ec59b87004fd172721f6684963e00137745f.pdf,,,http://infoscience.epfl.ch/record/90995/files/CruzMota2006_1530.pdf
+d9739d1b4478b0bf379fe755b3ce5abd8c668f89,http://pdfs.semanticscholar.org/d973/9d1b4478b0bf379fe755b3ce5abd8c668f89.pdf,,https://doi.org/10.1117/1.JEI.22.3.033033,http://www.ino.it/home/cosimo/public/JEI2013.pdf
+d9c4586269a142faee309973e2ce8cde27bda718,http://pdfs.semanticscholar.org/d9c4/586269a142faee309973e2ce8cde27bda718.pdf,,,http://arxiv.org/abs/1612.02534
+d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa,http://pdfs.semanticscholar.org/d912/b8d88d63a2f0cb5d58164e7414bfa6b41dfa.pdf,,,http://www.u-bourgogne.fr/SITIS/05/download/Proceedings/Files/f044.pdf
+d9072e6b7999bc2d5750eb58c67a643f38d176d6,,,https://doi.org/10.1109/LSP.2009.2027636,
+d903292dc4e752f6a3bf2abe668d17a2575044d4,,,,
+d92084e376a795d3943df577d3b3f3b7d12eeae5,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.85
+d9318c7259e394b3060b424eb6feca0f71219179,http://biometrics.cse.msu.edu/Publications/Face/ParkJainFaceSoftBio_TIFS10.pdf,,https://doi.org/10.1109/TIFS.2010.2049842,http://www.cse.msu.edu/biometrics/Publications/Face/ParkJainFaceSoftBio_TIFS10.pdf
+d963bdff2ce5212fa585a83ca8fad96875bc0057,,,https://doi.org/10.1016/j.neucom.2016.03.091,
+d9a1dd762383213741de4c1c1fd9fccf44e6480d,http://pdfs.semanticscholar.org/d9a1/dd762383213741de4c1c1fd9fccf44e6480d.pdf,,https://doi.org/10.1016/j.patcog.2003.07.005,
+d963e640d0bf74120f147329228c3c272764932b,http://pdfs.semanticscholar.org/d963/e640d0bf74120f147329228c3c272764932b.pdf,,,http://www.sersc.org/journals/IJAST/vol64/1.pdf
+d983dda8b03ed60fa3afafe5c50f1d9a495f260b,,,https://doi.org/10.1016/j.patcog.2007.03.020,
+d9e34af95c21c0e114b61abccbc653480b370c3b,,,https://doi.org/10.1016/j.patcog.2005.10.020,
+d930c3d92a075d3f3dd9f5ea1a8f04e0d659b22b,,,,
+d91a5589fd870bf62b7e4979d9d47e8acf6c655d,,,,http://doi.acm.org/10.1145/2382336.2382343
+d95e6185f82e3ef3880a98122522eca8c8c3f34e,http://bbs.utdallas.edu/facelab/docs/4_05_otoole-pami.pdf,,,http://www.utdallas.edu/~herve/abdi-pami2005.pdf
+d9d7a4b64b13ed1bce89d3cbbabe62e78d70b3fb,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.16
+d9ef1a80738bbdd35655c320761f95ee609b8f49,http://pdfs.semanticscholar.org/d9ef/1a80738bbdd35655c320761f95ee609b8f49.pdf,,,http://ijarcsse.com/docs/papers/Volume_5/4_April2015/V5I4-0617.pdf
+d9eed86e53ce5f7cba379fe77bbefb42e83c0d88,,,https://doi.org/10.1109/TIP.2017.2764262,
+d9b4b49378fcd77dcd5e755975b99ed4c7962f17,,,https://doi.org/10.1109/TIP.2015.2473105,
+d975a535cbf3e0a502a30ff7ad037241f9b798ae,,,,
+d99743ab1760b09b1bb88bc6e1dc5b9d0e48baac,,,,
+d91f9e8cbf271004ef1a293401197a10a26ccd1b,,,https://doi.org/10.1109/SOCPAR.2015.7492801,
+d9327b9621a97244d351b5b93e057f159f24a21e,http://www.cil.pku.edu.cn/publications/papers/CS2010gusuicheng.pdf,,https://doi.org/10.1007/s11432-010-4099-1,
+d915e634aec40d7ee00cbea96d735d3e69602f1a,http://pdfs.semanticscholar.org/d915/e634aec40d7ee00cbea96d735d3e69602f1a.pdf,,,http://cs231n.stanford.edu/reports/ken_final_report.pdf
+ac1d97a465b7cc56204af5f2df0d54f819eef8a6,http://pdfs.semanticscholar.org/ac1d/97a465b7cc56204af5f2df0d54f819eef8a6.pdf,,,http://www.ic.unicamp.br/~rocha/pub/papers/a-look-at-eye-detection-for-unconstrained-environments.pdf
+ac2e44622efbbab525d4301c83cb4d5d7f6f0e55,http://openaccess.thecvf.com/content_cvpr_2016/papers/Booth_A_3D_Morphable_CVPR_2016_paper.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/0002.pdf
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,http://pdfs.semanticscholar.org/ac6c/3b3e92ff5fbcd8f7967696c7aae134bea209.pdf,,https://doi.org/10.1007/978-3-319-46454-1_37,http://personal.ie.cuhk.edu.hk/~ccloy/files/eccv_2016_hallucination.pdf
+ac21c8aceea6b9495574f8f9d916e571e2fc497f,http://pdfs.semanticscholar.org/ac21/c8aceea6b9495574f8f9d916e571e2fc497f.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop16/pdffiles/w16-p9.pdf
+ace1e0f50fe39eb9a42586f841d53980c6f04b11,,,,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043849
+ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6,http://eprints.whiterose.ac.uk/104654/9/07289412.pdf,,https://doi.org/10.1109/TFUZZ.2015.2486803,
+aca273a9350b10b6e2ef84f0e3a327255207d0f5,http://pdfs.semanticscholar.org/efb2/4d35d8f6a46e1ff3800a2481bc7e681e255e.pdf,,https://doi.org/10.1016/j.patrec.2015.08.006,http://vbn.aau.dk/files/219488771/on_softbiometrics_preprint.pdf
+aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9,http://pdfs.semanticscholar.org/aca7/5c032cfb0b2eb4c0ae56f3d060d8875e43f9.pdf,,,http://www.ijcai.org/Proceedings/13/Papers/207.pdf
+acab402d706dbde4bea4b7df52812681011f435e,,,https://doi.org/10.1109/HIS.2012.6421377,
+ac98e7c570eb4a9db23f85164010f94afba1251e,,,,
+ac51d9ddbd462d023ec60818bac6cdae83b66992,http://pdfs.semanticscholar.org/ac51/d9ddbd462d023ec60818bac6cdae83b66992.pdf,,https://doi.org/10.1155/2015/709072,http://ftp.ncbi.nlm.nih.gov/pub/pmc/67/f1/CIN2015-709072.PMC4609417.pdf
+acc548285f362e6b08c2b876b628efceceeb813e,http://pdfs.semanticscholar.org/acc5/48285f362e6b08c2b876b628efceceeb813e.pdf,,https://doi.org/10.1155/2014/427826,
+ac820d67b313c38b9add05abef8891426edd5afb,http://pdfs.semanticscholar.org/da4e/76b789f7ea8ed6c6d26858ac8a12bb1413fe.pdf,,https://doi.org/10.1016/j.patcog.2014.11.016,http://web.fsktm.um.edu.my/~cschan/doc/PR2015.pdf
+ac9a331327cceda4e23f9873f387c9fd161fad76,http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf,,,http://arxiv.org/abs/1709.01664
+acd4280453b995cb071c33f7c9db5760432f4279,,,https://doi.org/10.1007/s00138-018-0907-1,
+ac48ecbc7c3c1a7eab08820845d47d6ce197707c,,,https://doi.org/10.1109/TIP.2017.2681841,
+ac12ba5bf81de83991210b4cd95b4ad048317681,http://pdfs.semanticscholar.org/ac12/ba5bf81de83991210b4cd95b4ad048317681.pdf,,https://doi.org/10.1007/978-3-319-49409-8_30,https://www.cmpe.boun.edu.tr/~salah/gurpinar16combining.pdf
+ac37285f2f5ccf99e9054735a36465ee35a6afdd,,,https://doi.org/10.1109/ISCAS.2006.1693880,
+ac75c662568cbb7308400cc002469a14ff25edfd,http://www.dsp.toronto.edu/juwei/Publication/JuweiICIP04v2.pdf,,https://doi.org/10.1109/ICIP.2004.1418690,http://www.dsp.toronto.edu/~kostas/Publications2008/pub/proceed/113.pdf
+ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea,http://pdfs.semanticscholar.org/ac9d/fbeb58d591b5aea13d13a83b1e23e7ef1fea.pdf,,,http://luks.fe.uni-lj.si/sl/osebje/vitomir/pub/InTech.pdf
+acde297810059ca632ef3f7c002b63b40cb8796f,,,,
+acb83d68345fe9a6eb9840c6e1ff0e41fa373229,http://pdfs.semanticscholar.org/acb8/3d68345fe9a6eb9840c6e1ff0e41fa373229.pdf,,,http://class.inrialpes.fr/pub/blaschko-phd09.pdf
+ac206a97e981df4514dcae28442beaea31845f35,,,,
+ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7,http://pdfs.semanticscholar.org/ade1/034d5daec9e3eba1d39ae3f33ebbe3e8e9a7.pdf,,,http://www.enterface.net/enterface05/docs/results/reports/project2.pdf
+ad8540379884ec03327076b562b63bc47e64a2c7,http://pdfs.semanticscholar.org/ad85/40379884ec03327076b562b63bc47e64a2c7.pdf,,https://doi.org/10.1504/IJBIC.2013.055092,http://www.researchgate.net/profile/Md_Jan_Nordin/publication/249315811_Bee_royalty_offspring_algorithm_for_improvement_of_facial_expressions_classification_model/links/00b4951e4d046dddcd000000.pdf
+ad08426ca57da2be0e9f8c1f673e491582edb896,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2013.98
+adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Two_Birds_One_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Li_Two_Birds_One_ICCV_2015_paper.pdf
+ad27d13d163757b65110f98a0e7dd7f5bc8c8030,,,,
+adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be,http://pdfs.semanticscholar.org/adf7/ccb81b8515a2d05fd3b4c7ce5adf5377d9be.pdf,,https://doi.org/10.24348/coria.2015.27,http://coria2015.lip6.fr/wp-content/uploads/2015/03/27.pdf
+adad7446e371d27fdaee39475856e2058f3045e5,,,https://doi.org/10.1109/ISCAS.2013.6572295,
+ada73060c0813d957576be471756fa7190d1e72d,http://pdfs.semanticscholar.org/ada7/3060c0813d957576be471756fa7190d1e72d.pdf,,,http://arxiv.org/abs/1610.05402
+add50a7d882eb38e35fe70d11cb40b1f0059c96f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_086_ext.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298679
+ad6cc071b2585e4bdb6233b7ad8d63e12538537d,,,https://doi.org/10.1007/s10462-010-9172-z,
+ad4d1ecf5c5473c050e11f6876ce148de1c8920a,,,https://doi.org/10.1109/IJCNN.2017.7965886,
+ad784332cc37720f03df1c576e442c9c828a587a,http://pdfs.semanticscholar.org/ad78/4332cc37720f03df1c576e442c9c828a587a.pdf,,https://doi.org/10.1002/ima.10047,http://www.jdl.ac.cn/user/sgshan/pub/Shan-IJIST-2003.pdf
+ad9ba7eade9d4299159512d6d5d07d7d3d26ae58,,,https://doi.org/10.1007/s11063-012-9252-y,
+ad8bd7016132a2f98ff1f41dac695285e71cc4b1,,,https://doi.org/10.1109/CISP-BMEI.2017.8301964,
+ada42b99f882ba69d70fff68c9ccbaff642d5189,http://pdfs.semanticscholar.org/ba11/4dfdd12b0f4323a8f28cd2bd770dfa74673e.pdf,,,http://www.robots.ox.ac.uk/~vgg/presentations/Schroff_Thesis_Talk_onlineVersion.pdf
+add6d96fc018986f51a1aac47eae9ee3fc62fb66,,,,http://doi.acm.org/10.1145/3009977.3010074
+ad6c7cc5c0f4ab273fef105ff3761d2c08609a20,https://people.cs.clemson.edu/~jzwang/1701863/mm2016/p1405-huo-ACM%20MM-Jing%20HUO-2016-10-19.pdf,,,http://doi.acm.org/10.1145/2964284.2964311
+adfaf01773c8af859faa5a9f40fb3aa9770a8aa7,http://pdfs.semanticscholar.org/adfa/f01773c8af859faa5a9f40fb3aa9770a8aa7.pdf,,,http://cs.stanford.edu/groups/vision/documents/Deng_PhD_thesis_2012.pdf
+ad5a35a251e07628dd035c68e44a64c53652be6b,,,https://doi.org/10.1016/j.patcog.2016.12.024,
+adf5caca605e07ee40a3b3408f7c7c92a09b0f70,http://pdfs.semanticscholar.org/adf5/caca605e07ee40a3b3408f7c7c92a09b0f70.pdf,,https://doi.org/10.1007/11539117_17,
+adaf2b138094981edd615dbfc4b7787693dbc396,http://pdfs.semanticscholar.org/adaf/2b138094981edd615dbfc4b7787693dbc396.pdf,,,http://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.442367
+adc4bc7639d5f1c5ead8728882e2390339d061ed,https://www.researchgate.net/profile/Fanbo_Meng2/publication/224144294_Emotional_Audio-Visual_Speech_Synthesis_Based_on_PAD/links/00b49538fd61d3280d000000.pdf?origin=publication_list,,https://doi.org/10.1109/TASL.2010.2052246,
+ad6745dd793073f81abd1f3246ba4102046da022,http://pdfs.semanticscholar.org/ad67/45dd793073f81abd1f3246ba4102046da022.pdf,,https://doi.org/10.1016/j.patcog.2016.10.022,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Wu2016b.pdf
+adf31283550ff810540bad0edd2c8878ac252b20,,,,
+ad7b6d2e8d66f720cc83323a0700c25006d49609,,,https://doi.org/10.1109/TIP.2009.2028255,
+adb040081974369c46b943e9f75be4e405623102,,,,http://doi.ieeecomputersociety.org/10.1109/PACCS.2009.191
+ad9cb522cc257e3c5d7f896fe6a526f6583ce46f,http://pdfs.semanticscholar.org/ad9c/b522cc257e3c5d7f896fe6a526f6583ce46f.pdf,,,https://tspace.library.utoronto.ca/bitstream/1807/25221/1/Christopher%20Wang.pdf
+ad339a5fdaab95f3c8aad83b60ceba8d76107fa2,,,https://doi.org/10.1023/B:VISI.0000013090.39095.d5,
+ad624331dc5f8dc3a72b1d5baf69634b2f345656,,,,
+ad08c97a511091e0f59fc6a383615c0cc704f44a,http://pdfs.semanticscholar.org/ad08/c97a511091e0f59fc6a383615c0cc704f44a.pdf,,,http://ewic.bcs.org/upload/pdf/ewic_hci12_wip_paper10.pdf
+ad5a1621190d18dd429930ab5125c849ce7e4506,http://www.cs.csub.edu/~acruz/papers/10.1109-ICIP.2014.7025275.pdf,,https://doi.org/10.1109/ICIP.2014.7025275,
+ad37d01c4787d169daff7da52e80e2018aab6358,http://ibug.doc.ic.ac.uk/media/uploads/documents/bidirectional_newton_aam.pdf,,https://doi.org/10.1109/TIP.2016.2642828,http://ibug.doc.ic.ac.uk/media/uploads/documents/newton_and_bidirectional_aam.pdf
+ada56c9ceef50aa5159f1f8aa45ca2040d1ed15c,,,https://doi.org/10.1109/TIFS.2017.2680246,
+ad247138e751cefa3bb891c2fe69805da9c293d7,http://pdfs.semanticscholar.org/ad24/7138e751cefa3bb891c2fe69805da9c293d7.pdf,,,http://article.sciencepublishinggroup.com/pdf/10.11648.j.ajnc.20150404.12.pdf
+ad75330953d9aacc05b5ca1a50c4fed3e7ca1e21,http://www.science.uva.nl/~asalah/dibeklioglu11design.pdf,,https://doi.org/10.1007/s12193-011-0057-5,https://staff.fnwi.uva.nl/th.gevers/pub/Hamdi2011.pdf
+ad1679295a5e5ebe7ad05ea1502bce961ec68057,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344631
+adf9998214598469f7a097bc50de4c23784f2a5a,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.745
+ad50f6899103eff0ee4504e539c38eb965fd1309,,,https://doi.org/10.1109/IJCNN.2010.5596374,
+bbc21d6b7c6e807c6886d237a04b501158ca6bb8,,,https://doi.org/10.1109/TMM.2016.2523421,
+bbc4b376ebd296fb9848b857527a72c82828fc52,http://pdfs.semanticscholar.org/bbc4/b376ebd296fb9848b857527a72c82828fc52.pdf,,,https://www.cs.umd.edu/sites/default/files/scholarly_papers/EmilyHand.pdf
+bb489e4de6f9b835d70ab46217f11e32887931a2,http://conteudo.icmc.usp.br/pessoas/moacir/p17sibgrapi-tutorial/2017-SIBGRAPI_Tutorial-Survey_Paper-Deep_Learning_for_Computer_Vision.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI-T.2017.12
+bba281fe9c309afe4e5cc7d61d7cff1413b29558,http://pdfs.semanticscholar.org/bba2/81fe9c309afe4e5cc7d61d7cff1413b29558.pdf,,,
+bb557f4af797cae9205d5c159f1e2fdfe2d8b096,http://pdfs.semanticscholar.org/bb55/7f4af797cae9205d5c159f1e2fdfe2d8b096.pdf,,https://doi.org/10.1016/j.patcog.2015.02.020,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2015/8_N_Tsapanos_ADATKKMCBD.pdf
+bb070c019c0885232f114c7dca970d2afd9cd828,,,https://doi.org/10.1109/DICTA.2014.7008089,
+bb06ef67a49849c169781657be0bb717587990e0,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2017/papers/1570342773.pdf,,https://doi.org/10.23919/EUSIPCO.2017.8081357,
+bb22104d2128e323051fb58a6fe1b3d24a9e9a46,http://pdfs.semanticscholar.org/bb22/104d2128e323051fb58a6fe1b3d24a9e9a46.pdf,,https://doi.org/10.1007/978-3-540-76390-1_61,http://www.iis.sinica.edu.tw/papers/song/4971-F.pdf
+bbf20adb59b7461e0d040e665bf64ae5f478eda0,,,,
+bbc47f421ab161f22f2699ee7bbb7fc8aec1cb7b,,,https://doi.org/10.1109/IJCNN.2017.7966271,
+bbe1332b4d83986542f5db359aee1fd9b9ba9967,http://pdfs.semanticscholar.org/bbe1/332b4d83986542f5db359aee1fd9b9ba9967.pdf,,https://doi.org/10.1016/j.patcog.2017.10.030,http://arxiv.org/abs/1703.05530
+bbe949c06dc4872c7976950b655788555fe513b8,http://www.quaero.org/media/files/bibliographie/ekenel_automaticfrequency.pdf,,,https://cvhci.anthropomatik.kit.edu/~hgao/publications/icpr2010_Hazim.pdf
+bbcb4920b312da201bf4d2359383fb4ee3b17ed9,http://pdfs.semanticscholar.org/bbcb/4920b312da201bf4d2359383fb4ee3b17ed9.pdf,,,http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0159945&type=printable
+bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb,http://pdfs.semanticscholar.org/bb6b/f94bffc37ef2970410e74a6b6dc44a7f4feb.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/supplemental/Li_Situation_Recognition_With_ICCV_2017_supplemental.pdf
+bb3698df3b4f40c0b7cc523d26ffb8c5276d5a1c,,,https://doi.org/10.1109/ICDSP.2016.7868528,
+bb83d5c7c17832d1eef14aa5d303d9dd65748956,,,,http://doi.acm.org/10.1145/3139513.3139514
+bbc8ccd3f62615e3c0ce2c3aee5e4a223d215bbd,,,https://doi.org/10.1007/s11042-015-2497-5,
+bbf01aa347982592b3e4c9e4f433e05d30e71305,https://pdfs.semanticscholar.org/bbf0/1aa347982592b3e4c9e4f433e05d30e71305.pdf,,https://doi.org/10.1109/ICIP.2013.6738760,http://koasas.kaist.ac.kr/bitstream/10203/188066/1/79087.pdf
+bbfe0527e277e0213aafe068113d719b2e62b09c,http://pdfs.semanticscholar.org/bbfe/0527e277e0213aafe068113d719b2e62b09c.pdf,,https://doi.org/10.1007/978-3-642-33718-5_13,http://www.umiacs.umd.edu/~kanazawa/papers/eccv2012_dog_final.pdf
+bbf1396eb826b3826c5a800975047beabde2f0de,http://pdfs.semanticscholar.org/bbf1/396eb826b3826c5a800975047beabde2f0de.pdf,,https://doi.org/10.1016/j.cviu.2004.01.002,http://vicos.fri.uni-lj.si/alesl/files/2008/05/bischofcviu04.pdf
+bb451dc2420e1a090c4796c19716f93a9ef867c9,http://pdfs.semanticscholar.org/bb45/1dc2420e1a090c4796c19716f93a9ef867c9.pdf,,,http://research.ijcaonline.org/volume104/number5/pxc3899123.pdf
+bb69f750ccec9624f6dabd334251def2bbddf166,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Yuxiao.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301639
+bb750b4c485bc90a47d4b2f723be4e4b74229f7a,http://pdfs.semanticscholar.org/bb75/0b4c485bc90a47d4b2f723be4e4b74229f7a.pdf,,,http://carol.wins.uva.nl/~nicu/publications/book.pdf
+d73d2c9a6cef79052f9236e825058d5d9cdc1321,http://pdfs.semanticscholar.org/d73d/2c9a6cef79052f9236e825058d5d9cdc1321.pdf,,,http://www.eurecom.fr/en/publication/4335/download/mm-publi-4335.pdf
+d794ffece3533567d838f1bd7f442afee13148fd,http://pdfs.semanticscholar.org/d794/ffece3533567d838f1bd7f442afee13148fd.pdf,,https://doi.org/10.1007/978-3-319-16628-5_2,http://img.cs.uec.ac.jp/pub/conf14/141105dohang_0.pdf
+d7c87f4ca39f79d93c954ffacac32bc6eb527e2c,,,https://doi.org/10.1007/978-3-642-15696-0_57,
+d75bd05865224a1341731da66b8d812a7924d6f6,,,https://doi.org/10.1109/TSMCB.2012.2217127,
+d78077a7aa8a302d4a6a09fb9737ab489ae169a6,http://pdfs.semanticscholar.org/d780/77a7aa8a302d4a6a09fb9737ab489ae169a6.pdf,,https://doi.org/10.1016/j.patcog.2017.03.010,http://arxiv.org/abs/1506.00481
+d7ecfb6108a379a0abf76bf3105b4c9baca8f84f,,,,
+d7312149a6b773d1d97c0c2b847609c07b5255ec,http://pdfs.semanticscholar.org/d731/2149a6b773d1d97c0c2b847609c07b5255ec.pdf,,,http://s3.amazonaws.com/kvaccaro.com/documents/aaai17.pdf
+d79530e1745b33f3b771d0b38d090b40afc04191,,,https://doi.org/10.1007/s11042-015-2485-9,
+d778c46657a974e6e87df82b7ee2ced8e5c6f151,,,,
+d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f,http://pdfs.semanticscholar.org/d7d9/c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f.pdf,,,http://arxiv.org/abs/1702.00583
+d708ce7103a992634b1b4e87612815f03ba3ab24,http://pdfs.semanticscholar.org/d708/ce7103a992634b1b4e87612815f03ba3ab24.pdf,,,http://www.yugangjiang.info/publication/TPAMI17-supplementary.pdf
+d7bd37920a3a4a4d681151131e23a839695c8d5b,,,https://doi.org/10.1109/ICRA.2011.5979870,
+d7b7253f7d8b397d9d74057e1e72ed9c58e2ba6d,,,https://doi.org/10.1109/TII.2013.2271914,
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,http://research.cs.rutgers.edu/~linzhong/PDF/Lin_cvpr2012.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247974
+d723ebf3288126fa8cbb10ba7e2a6308aede857c,,,https://doi.org/10.1117/12.968586,
+d78373de773c2271a10b89466fe1858c3cab677f,http://pdfs.semanticscholar.org/d783/73de773c2271a10b89466fe1858c3cab677f.pdf,,https://doi.org/10.1016/j.imavis.2016.08.014,http://arxiv.org/abs/1503.07706
+d7a84db2a1bf7b97657b0250f354f249394dd700,,,https://doi.org/10.1109/ICIP.2010.5653518,
+d78fbd11f12cbc194e8ede761d292dc2c02d38a2,http://pdfs.semanticscholar.org/d78f/bd11f12cbc194e8ede761d292dc2c02d38a2.pdf,,,http://thesai.org/Downloads/Volume8No10/Paper_3-Enhancing_Gray_Scale_Images_for_Face_Detection.pdf
+d72973a72b5d891a4c2d873daeb1bc274b48cddf,http://pdfs.semanticscholar.org/d729/73a72b5d891a4c2d873daeb1bc274b48cddf.pdf,,,http://www.wseas.org/multimedia/journals/information/2013/045709-158.pdf
+d7b4d741b1dd4fb3f278efa5fdf2a5d8523caa0e,,,,
+d7d166aee5369b79ea2d71a6edd73b7599597aaa,http://pdfs.semanticscholar.org/d7d1/66aee5369b79ea2d71a6edd73b7599597aaa.pdf,,,https://arxiv.org/pdf/1803.05657v1.pdf
+d79f9ada35e4410cd255db39d7cc557017f8111a,http://pdfs.semanticscholar.org/d79f/9ada35e4410cd255db39d7cc557017f8111a.pdf,,,http://www.jemr.org/download/pictures/be/0nj39rd90voe5y7vsyg6wrwzsxjh4h/bengoechea_et_al_petmei_jemr_2014.pdf
+d05759932001aa6f1f71e7dc261c4716f57a5397,,,https://doi.org/10.1109/ISBA.2015.7126365,
+d0e895a272d684a91c1b1b1af29747f92919d823,http://pdfs.semanticscholar.org/d0e8/95a272d684a91c1b1b1af29747f92919d823.pdf,,,http://cs-people.bu.edu/sbargal/ipcv_2012.pdf
+d082f35534932dfa1b034499fc603f299645862d,http://pdfs.semanticscholar.org/d082/f35534932dfa1b034499fc603f299645862d.pdf,,,http://crcv.ucf.edu/papers/theses/Ortiz.pdf
+d03265ea9200a993af857b473c6bf12a095ca178,http://pdfs.semanticscholar.org/d032/65ea9200a993af857b473c6bf12a095ca178.pdf,,https://doi.org/10.1117/1.JEI.24.3.033013,http://www.famt.net/up/uppaper/2015120843996089.pdf
+d0ac9913a3b1784f94446db2f1fb4cf3afda151f,http://pdfs.semanticscholar.org/d0ac/9913a3b1784f94446db2f1fb4cf3afda151f.pdf,,,http://arxiv.org/pdf/1607.04780v1.pdf
+d046030f7138e5a2dbe2b3eec1b948ad8c787538,,,https://doi.org/10.1109/ICIP.2009.5413447,
+d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0,http://pdfs.semanticscholar.org/d0eb/3fd1b1750242f3bb39ce9ac27fc8cc7c5af0.pdf,,https://doi.org/10.1016/j.patrec.2015.11.011,http://www.eurecom.fr/en/publication/4768/download/mm-publi-4768.pdf
+d0296efc3c532269aaa7e8f856f5d1807af847fb,,,,
+d0b67ec62086b55f00dc461ab58dc87b85388b2b,,,https://doi.org/10.1109/ICIP.2014.7026206,
+d00c335fbb542bc628642c1db36791eae24e02b7,http://pdfs.semanticscholar.org/d00c/335fbb542bc628642c1db36791eae24e02b7.pdf,,https://doi.org/10.3390/s18020456,
+d06c8e3c266fbae4026d122ec9bd6c911fcdf51d,http://pdfs.semanticscholar.org/d06c/8e3c266fbae4026d122ec9bd6c911fcdf51d.pdf,,,
+d074b33afd95074d90360095b6ecd8bc4e5bb6a2,http://pdfs.semanticscholar.org/d074/b33afd95074d90360095b6ecd8bc4e5bb6a2.pdf,,https://doi.org/10.1142/S0219843608001303,http://www.lsr.ei.tum.de/fileadmin/publications/bauer-2008-ijhr.pdf
+d04d5692461d208dd5f079b98082eda887b62323,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/ZLEI-ICB-15.pdf,,https://doi.org/10.1109/ICB.2015.7139113,
+d0a8889f694422614bf3ecccd69aa1d4f7822606,,,https://doi.org/10.1007/978-0-85729-997-0_22,
+d0f9143f6f43a39bff47daf8c596681581db72ea,,,https://doi.org/10.1007/s11042-017-5241-5,
+d0b7d3f9a59034d44e7cd1b434cfd27136a7c029,,,https://doi.org/10.1109/INCoS.2013.143,
+d05513c754966801f26e446db174b7f2595805ba,http://pdfs.semanticscholar.org/d055/13c754966801f26e446db174b7f2595805ba.pdf,,https://doi.org/10.1007/978-3-319-16634-6_14,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop12/pdffiles/w12-o1.pdf
+d0d75a7116a76ccd98a3aeb6f6fff10ba91de1c1,,,https://doi.org/10.1109/TIP.2015.2502144,
+d03baf17dff5177d07d94f05f5791779adf3cd5f,http://pdfs.semanticscholar.org/d03b/af17dff5177d07d94f05f5791779adf3cd5f.pdf,,https://doi.org/10.1016/j.eswa.2008.08.001,http://cgit.nutn.edu.tw:8080/cgit/PaperDL/KYC_100705123805.PDF
+d09fd7e0bb5d997963cfef45452724416b2bb052,,,https://doi.org/10.1109/EMEIT.2011.6023179,
+d0a21f94de312a0ff31657fd103d6b29db823caa,http://pdfs.semanticscholar.org/d0a2/1f94de312a0ff31657fd103d6b29db823caa.pdf,,https://doi.org/10.1007/978-0-85729-997-0_19,http://www.ca.cs.cmu.edu/sites/default/files/9fea.pdf
+d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea,http://pdfs.semanticscholar.org/d03e/4e938bcbc25aa0feb83d8a0830f9cd3eb3ea.pdf,,https://doi.org/10.1007/978-3-642-15549-9_23,https://www.researchgate.net/profile/Ngoc_Son_Vu/publication/226170268_Face_Recognition_with_Patterns_of_Oriented_Edge_Magnitudes/links/0fcfd507f0628f03e4000000.pdf
+d0dd1364411a130448517ba532728d5c2fe78ed9,,,https://doi.org/10.1109/ISCAS.2016.7527183,
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,http://pdfs.semanticscholar.org/d0d7/671c816ed7f37b16be86fa792a1b29ddd79b.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9929
+d00787e215bd74d32d80a6c115c4789214da5edb,http://pdfs.semanticscholar.org/d007/87e215bd74d32d80a6c115c4789214da5edb.pdf,,,http://gip.cs.technion.ac.il/projects/uploads/52_preport_7.pdf
+be8c517406528edc47c4ec0222e2a603950c2762,http://pdfs.semanticscholar.org/be8c/517406528edc47c4ec0222e2a603950c2762.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/Cohn&Ekman2005.pdf
+beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2,http://pdfs.semanticscholar.org/beb3/fd2da7f8f3b0c3ebceaa2150a0e65736d1a2.pdf,,,http://ijrte.academypublisher.com/vol01/no01/ijrte0101318322.pdf
+be86d88ecb4192eaf512f29c461e684eb6c35257,http://pdfs.semanticscholar.org/be86/d88ecb4192eaf512f29c461e684eb6c35257.pdf,,https://doi.org/10.1007/978-3-642-15549-9_48,http://acberg.com/papers/berg_attributediscovery_eccv2010.pdf
+beb49072f5ba79ed24750108c593e8982715498e,http://pdfs.semanticscholar.org/beb4/9072f5ba79ed24750108c593e8982715498e.pdf,,,https://arxiv.org/pdf/1705.04932v1.pdf
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,http://pdfs.semanticscholar.org/becd/5fd62f6301226b8e150e1a5ec3180f748ff8.pdf,,https://doi.org/10.1007/978-3-642-33765-9_24,http://perception.csl.illinois.edu/recognition/Files/ECCV2012_Jia_Face_CameraReady.pdf
+bebb8a97b2940a4e5f6e9d3caf6d71af21585eda,http://pdfs.semanticscholar.org/bebb/8a97b2940a4e5f6e9d3caf6d71af21585eda.pdf,,,https://www.researchgate.net/profile/Xueyin_Lin/publication/3974383_Mapping_emotional_status_to_facial_expressions/links/00b7d51a9fd50b8e43000000.pdf
+be07f2950771d318a78d2b64de340394f7d6b717,http://pdfs.semanticscholar.org/be07/f2950771d318a78d2b64de340394f7d6b717.pdf,,,http://www.djamelbouchaffra.com/Papers/3D-HMM.pdf
+be51854ef513362bc236b85dd6f0e2c2da51614b,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.298
+be068ce0d5284dbd2c4c8ba4a31a41da2f794193,,,,
+be6bd94322dd0ecfc8ea99eb7f40a9a14dd3471f,,,https://doi.org/10.1109/UIC-ATC.2013.32,
+beb4546ae95f79235c5f3c0e9cc301b5d6fc9374,http://pdfs.semanticscholar.org/beb4/546ae95f79235c5f3c0e9cc301b5d6fc9374.pdf,,,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/expression_recognition.pdf
+be02c2ea2b54d8fa30e2528f91a801ecf9f2185c,,,,
+bec31269632c17206deb90cd74367d1e6586f75f,http://pdfs.semanticscholar.org/bec3/1269632c17206deb90cd74367d1e6586f75f.pdf,,,https://arxiv.org/pdf/1706.08690v1.pdf
+be57d2aaab615ec8bc1dd2dba8bee41a4d038b85,https://www.cl.cam.ac.uk/~mmam3/pub/a19-mahmoud.pdf,,,http://www.cl.cam.ac.uk/~mmam3/pub/a19-mahmoud.pdf
+be40014beffaa9faacee12bb3412969f98b6a43d,,,,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.454
+bef503cdfe38e7940141f70524ee8df4afd4f954,https://pdfs.semanticscholar.org/bef5/03cdfe38e7940141f70524ee8df4afd4f954.pdf,,https://doi.org/10.1007/s00138-011-0349-5,http://link.springer.com/content/pdf/10.1007/s00138-011-0349-5.pdf
+be0a0e563445119b82d664d370e646e53e69a4c5,,,https://doi.org/10.1016/j.eswa.2017.05.037,
+beab10d1bdb0c95b2f880a81a747f6dd17caa9c2,http://pdfs.semanticscholar.org/beab/10d1bdb0c95b2f880a81a747f6dd17caa9c2.pdf,,,https://arxiv.org/pdf/1711.09515v1.pdf
+b3b532e8ea6304446b1623e83b0b9a96968f926c,http://pdfs.semanticscholar.org/b3b5/32e8ea6304446b1623e83b0b9a96968f926c.pdf,,,http://arxiv.org/abs/1611.05215
+b37f57edab685dba5c23de00e4fa032a3a6e8841,http://pdfs.semanticscholar.org/b37f/57edab685dba5c23de00e4fa032a3a6e8841.pdf,,https://doi.org/10.1117/12.2228606,http://www.cbi.gatech.edu/fpv2016/abstracts/egocentric_social_interaction_abstract.pdf
+b3154d981eca98416074538e091778cbc031ca29,http://pdfs.semanticscholar.org/b315/4d981eca98416074538e091778cbc031ca29.pdf,,https://doi.org/10.1007/978-3-642-27355-1_50,https://pdfs.semanticscholar.org/b315/4d981eca98416074538e091778cbc031ca29.pdf
+b340f275518aa5dd2c3663eed951045a5b8b0ab1,http://www.eecs.qmul.ac.uk/~sgg/papers/GongShanXiang_ACM_ICMI2007.pdf,,,http://doi.acm.org/10.1145/1322192.1322199
+b3050dc48600acf2f75edf1f580a1f9e9cb3c14a,,,https://doi.org/10.1007/s00138-013-0584-z,
+b388bf63c79e429dafee16c62b2732bcbea0d026,,,https://doi.org/10.1109/ICIP.2016.7533051,
+b351575e3eab724d62d0703e24ecae55025eef00,,,https://doi.org/10.1007/s10209-014-0369-9,
+b34fdab6864782ce60fd90d09f5d886bd83f84f5,,,https://doi.org/10.1002/cpe.3766,
+b375db63742f8a67c2a7d663f23774aedccc84e5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Conti_Brain-inspired_Classroom_Occupancy_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.95
+b3330adb131fb4b6ebbfacce56f1aec2a61e0869,http://pdfs.semanticscholar.org/b333/0adb131fb4b6ebbfacce56f1aec2a61e0869.pdf,,,http://www.enggjournals.com/ijet/docs/IJET13-05-02-267.pdf
+b36a80d15c3e48870ea6118b855055cc34307658,,,https://doi.org/10.1109/ICPR.2014.17,
+b3c60b642a1c64699ed069e3740a0edeabf1922c,http://pdfs.semanticscholar.org/b3c6/0b642a1c64699ed069e3740a0edeabf1922c.pdf,,,https://arxiv.org/pdf/1502.00046v1.pdf
+b3f3d6be11ace907c804c2d916830c85643e468d,http://pdfs.semanticscholar.org/b3f3/d6be11ace907c804c2d916830c85643e468d.pdf,,,https://www.irit.fr/publis/LILAC/Theses_et_habilitations/2010_Nguyen_PhD_logic_for_trust_ralated_emotions.pdf
+b3067deb3110e3a7566c032ac0c1e1608668ef3d,,,,
+b3f7c772acc8bc42291e09f7a2b081024a172564,http://pdfs.semanticscholar.org/b3f7/c772acc8bc42291e09f7a2b081024a172564.pdf,,,http://www.ijmer.com/papers/Vol3_Issue5/DW3532253229.pdf
+b3c398da38d529b907b0bac7ec586c81b851708f,http://www.cbsr.ia.ac.cn/publications/Stan/WHT-FG2004.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301635
+b3658514a0729694d86a8b89c875a66cde20480c,http://pdfs.semanticscholar.org/b365/8514a0729694d86a8b89c875a66cde20480c.pdf,,https://doi.org/10.1007/978-3-642-15819-3_63,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Bolis10a.pdf
+b3e60bb5627312b72c99c5ef18aa41bcc1d21aea,,,https://doi.org/10.1109/SPAC.2014.6982690,
+b3b4a7e29b9186e00d2948a1d706ee1605fe5811,http://pdfs.semanticscholar.org/b3b4/a7e29b9186e00d2948a1d706ee1605fe5811.pdf,,,http://www.nit.eu/czasopisma/JTIT/2010/4/19.pdf
+b32631f456397462b3530757f3a73a2ccc362342,http://pdfs.semanticscholar.org/b326/31f456397462b3530757f3a73a2ccc362342.pdf,,https://doi.org/10.24963/ijcai.2017/428,http://www.ijcai.org/proceedings/2017/0428.pdf
+b33e8db8ccabdfc49211e46d78d09b14557d4cba,http://pdfs.semanticscholar.org/b33e/8db8ccabdfc49211e46d78d09b14557d4cba.pdf,,,http://www.vbettadapura.com/files/FaceExpressionRecSurvey.pdf
+df8da144a695269e159fb0120bf5355a558f4b02,http://pdfs.semanticscholar.org/df8d/a144a695269e159fb0120bf5355a558f4b02.pdf,,,http://research.ijcaonline.org/icrtet/number3/icrtet1328.pdf
+dfd934ae448a1b8947d404b01303951b79b13801,http://pdfs.semanticscholar.org/dfd9/34ae448a1b8947d404b01303951b79b13801.pdf,,,http://eprints.bournemouth.ac.uk/23013/1/Facial%20Features%20In%20Learning%20New%20Faces.pdf
+df0e280cae018cebd5b16ad701ad101265c369fa,http://pdfs.semanticscholar.org/df0e/280cae018cebd5b16ad701ad101265c369fa.pdf,,,http://arxiv.org/pdf/1509.02470v1.pdf
+dfb8a04a80d4b0794c0679d797cb90ec101e162c,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2014.6918665
+df1a10668eaad727ec3fdf0d5df405bbe29392c9,,,,
+df2899462e04559c024a773d91f6e06c262e136b,,,,
+dfbbe8100fcd70322a431bd5d2c2d52a65fd4bbd,,,,http://doi.acm.org/10.1145/2818346.2823313
+dfabe7ef245ca68185f4fcc96a08602ee1afb3f7,http://pdfs.semanticscholar.org/dfab/e7ef245ca68185f4fcc96a08602ee1afb3f7.pdf,,https://doi.org/10.1016/j.patcog.2016.10.026,http://ivg.au.tsinghua.edu.cn/paper/2017_Group-aware%20deep%20feature%20learning%20for%20facial%20age%20estimation.pdf
+df51dfe55912d30fc2f792561e9e0c2b43179089,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1512.06009.pdf,,https://doi.org/10.1109/TIP.2017.2717181,https://arxiv.org/pdf/1512.06009v1.pdf
+df550cb749858648209707bec5410431ea95e027,,,https://doi.org/10.1109/TCYB.2015.2433926,
+df054fa8ee6bb7d2a50909939d90ef417c73604c,http://pdfs.semanticscholar.org/df05/4fa8ee6bb7d2a50909939d90ef417c73604c.pdf,,https://doi.org/10.5220/0006626103510358,http://av.dfki.de/~pagani/papers/Selim2018_VISAPP.pdf
+df80fed59ffdf751a20af317f265848fe6bfb9c9,http://ivg.au.tsinghua.edu.cn/paper/2017_Learning%20deep%20sharable%20and%20structural%20detectors%20for%20face%20alignment.pdf,,https://doi.org/10.1109/TIP.2017.2657118,
+df7ff512e8324894d20103fd8ab5da650e4d86db,,,,http://doi.acm.org/10.1145/2043674.2043709
+dff38cac0a1004037024f0ed2a72f76f4e49318b,,,https://doi.org/10.1109/TNNLS.2015.2495268,
+dff838ba0567ef0a6c8fbfff9837ea484314efc6,http://pdfs.semanticscholar.org/dff8/38ba0567ef0a6c8fbfff9837ea484314efc6.pdf,,,https://studentnet.cs.manchester.ac.uk/resources/library/thesis_abstracts/ProjProgReptsMSc14/ChaparroAlvarez-GermanAlfonso-ProgressReport.pdf
+df7af280771a6c8302b75ed0a14ffe7854cca679,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2017.8026293
+dfa80e52b0489bc2585339ad3351626dee1a8395,http://pdfs.semanticscholar.org/dfa8/0e52b0489bc2585339ad3351626dee1a8395.pdf,,,https://arxiv.org/pdf/1709.06391v1.pdf
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,http://studentlife.cs.dartmouth.edu/facelogging.pdf,,,https://www.cs.dartmouth.edu/~xia/papers/mcss15-facelogging.pdf
+dfb6aa168177d4685420fcb184def0aa7db7cddb,http://pdfs.semanticscholar.org/dfb6/aa168177d4685420fcb184def0aa7db7cddb.pdf,,,http://cemrweb.cemr.wvu.edu/~sherbo/Publications/conference%20publications/The%20Effect%20of%20Lighting%20DirectionCondition%20on%20the%20Performance%20of%20Face%20Recognition%20Algorithms%20(SPIE%20Fahmy%20ElSherbeeny%20ety%20al%202005).pdf
+df2841a1d2a21a0fc6f14fe53b6124519f3812f9,http://pdfs.semanticscholar.org/df28/41a1d2a21a0fc6f14fe53b6124519f3812f9.pdf,,,https://cs.brown.edu/research/pubs/theses/ugrad/2012/changpinyo.pdf
+df5fe0c195eea34ddc8d80efedb25f1b9034d07d,http://www.andrew.cmu.edu/user/kseshadr/BTAS_2009_Paper_IEEE.pdf,,,http://www.andrew.cmu.edu/user/kseshadr/BTAS_2009_Paper_Camera_Ready.pdf
+df2494da8efa44d70c27abf23f73387318cf1ca8,http://pdfs.semanticscholar.org/df24/94da8efa44d70c27abf23f73387318cf1ca8.pdf,,,
+df674dc0fc813c2a6d539e892bfc74f9a761fbc8,http://pdfs.semanticscholar.org/df67/4dc0fc813c2a6d539e892bfc74f9a761fbc8.pdf,,,http://www.iosrjournals.org/iosr-jce/papers/Vol10-issue6/D01062129.pdf
+da1477b4a65ae5a013e646b57e004f0cd60619a2,,,https://doi.org/10.1109/ICB.2012.6199764,
+dae144d7b02aab7338b15d561ea18854df563cd4,,,,
+dad7b8be074d7ea6c3f970bd18884d496cbb0f91,http://pdfs.semanticscholar.org/dad7/b8be074d7ea6c3f970bd18884d496cbb0f91.pdf,,https://doi.org/10.1007/978-3-319-23234-8_51,http://pralab.diee.unica.it/sites/default/files/demontis15-iciap.pdf
+daf05febbe8406a480306683e46eb5676843c424,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Feng_Robust_Subspace_Segmentation_2014_CVPR_paper.pdf,,,http://www.cis.pku.edu.cn/faculty/vision/zlin/Publications/2014-CVPR-Block-Diagonal.pdf
+da2b2be4c33e221c7f417875a6c5c74043b1b227,,,https://doi.org/10.1109/BTAS.2017.8272712,
+da15344a4c10b91d6ee2e9356a48cb3a0eac6a97,http://pdfs.semanticscholar.org/da15/344a4c10b91d6ee2e9356a48cb3a0eac6a97.pdf,,https://doi.org/10.1016/j.comcom.2016.03.010,http://anrg.usc.edu/www/papers/Mano_ComCom_2016.pdf
+dab795b562c7cc270c9099b925d685bea0abe82a,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2382599
+dac6e9d708a9757f848409f25df99c5a561c863c,,,https://doi.org/10.1109/LSP.2014.2334656,
+da5bfddcfe703ca60c930e79d6df302920ab9465,http://pdfs.semanticscholar.org/da5b/fddcfe703ca60c930e79d6df302920ab9465.pdf,,https://doi.org/10.1016/j.imavis.2007.11.004,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2008/Kotsia_2008_Journal.pdf
+da928ac611e4e14e454e0b69dfbf697f7a09fb38,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477718
+dac2103843adc40191e48ee7f35b6d86a02ef019,http://www.chennaisunday.com/2015DOTNET/Unsupervised%20Celebrity%20Face%20Naming%20in%20Web%20Videos.pdf,,https://doi.org/10.1109/TMM.2015.2419452,
+dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e,http://pdfs.semanticscholar.org/dae4/20b776957e6b8cf5fbbacd7bc0ec226b3e2e.pdf,,,http://www.sfb588.uni-karlsruhe.de/publikationen/2006/P3_Grimm_ISYC06.pdf
+da54a3d6dc5827abba96edf5ec1e6791ad05760b,,,,
+daa02cf195818cbf651ef81941a233727f71591f,http://pdfs.semanticscholar.org/daa0/2cf195818cbf651ef81941a233727f71591f.pdf,,,http://bite.edi.lv/wp-content/uploads/2015/04/ICIPCE2015_Nikisins_paper.pdf
+daa52dd09b61ee94945655f0dde216cce0ebd505,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yonetani_Recognizing_Micro-Actions_and_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Yonetani_Recognizing_Micro-Actions_and_CVPR_2016_paper.pdf
+daba8f0717f3f47c272f018d0a466a205eba6395,https://pdfs.semanticscholar.org/daba/8f0717f3f47c272f018d0a466a205eba6395.pdf,,https://doi.org/10.1007/s11263-014-0750-4,http://staff.ustc.edu.cn/~lszhuang/Doc/2014-IJCV-Gao.pdf
+dab51ce14f59d552c0fc5c13b37ca64cae8d0164,,,,
+dae9d0a9b77366f0cd52e38847e47691ee97bc1f,,,https://doi.org/10.1007/s11760-015-0822-0,
+b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3,http://cs.adelaide.edu.au/~javen/pub/ShiLiShe10.pdf,,,http://eprints.pascal-network.org/archive/00007331/01/ShiLiShe10.pdf
+b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807,http://pdfs.semanticscholar.org/f269/c3573b39d26a5ad0754edb67a46ef57816c7.pdf,,,http://arxiv.org/abs/1608.08041
+b44ca5bb74b27d196f281b6741c645f425ff65c1,,,,
+b446bcd7fb78adfe346cf7a01a38e4f43760f363,http://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf,,,http://biometrics.cse.msu.edu/Publications/Face/Debetal_LongitudinalStudyOfChildFaceRecognition_ICB2018.pdf
+b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172,https://arxiv.org/pdf/1802.00237v1.pdf,,,http://liusi-group.com/pdf/faceaging-acmmm2017.pdf
+b446cf353744a4b640af88d1848a1b958169c9f2,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553744.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553744
+b41374f4f31906cf1a73c7adda6c50a78b4eb498,http://isp.uv.es/papers/Laparra11.pdf,,https://doi.org/10.1109/TNN.2011.2106511,http://www.uv.es/gcamps/papers/Laparra11.pdf
+b472f91390781611d4e197564b0016d9643a5518,,,,http://doi.acm.org/10.1145/2382336.2382345
+b4d7ca26deb83cec1922a6964c1193e8dd7270e7,http://pdfs.semanticscholar.org/b4d7/ca26deb83cec1922a6964c1193e8dd7270e7.pdf,,,https://arxiv.org/pdf/1802.02774v1.pdf
+b40290a694075868e0daef77303f2c4ca1c43269,http://pdfs.semanticscholar.org/b402/90a694075868e0daef77303f2c4ca1c43269.pdf,,,http://media.cs.tsinghua.edu.cn/~ahz/papers/%5B2014%5DaasHairModel.pdf
+b4362cd87ad219790800127ddd366cc465606a78,http://pdfs.semanticscholar.org/b436/2cd87ad219790800127ddd366cc465606a78.pdf,,https://doi.org/10.3390/s151026756,http://www.mdpi.com/1424-8220/15/10/26756/pdf
+b47a3c909ee9b099854619054fd00e200b944aa9,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.77
+b42b535fcd0d9bd41a6594a910ea4623e907ceb9,,,https://doi.org/10.1109/ICTAI.2012.153,
+b44f03b5fa8c6275238c2d13345652e6ff7e6ea9,,,https://doi.org/10.1109/GlobalSIP.2017.8309138,
+b4f4b0d39fd10baec34d3412d53515f1a4605222,http://pdfs.semanticscholar.org/eaae/d23a2d94feb2f1c3ff22a25777c7a78f3141.pdf,,https://doi.org/10.1007/978-3-642-15561-1_2,http://www.cs.cmu.edu/~afarhadi/papers/sentence.pdf
+b43b6551ecc556557b63edb8b0dc39901ed0343b,http://pdfs.semanticscholar.org/b43b/6551ecc556557b63edb8b0dc39901ed0343b.pdf,,https://doi.org/10.1109/ICIP.2003.1246815,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu_ICIP_2003.pdf
+a255a54b8758050ea1632bf5a88a201cd72656e1,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Tamersoy_Nonparametric_Facial_Feature_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.160
+a2b9cee7a3866eb2db53a7d81afda72051fe9732,http://pdfs.semanticscholar.org/a2b9/cee7a3866eb2db53a7d81afda72051fe9732.pdf,,,http://apps.cs.utexas.edu/tech_reports/reports/tr/TR-2038.pdf
+a2d04db895dd17f2a8291b300a63604842c06d09,http://www4.comp.polyu.edu.hk/~csdct/Publications/2006/TCSVT.pdf,,https://doi.org/10.1109/TCSVT.2006.877418,http://eprints.bbk.ac.uk/451/1/Binder1.pdf
+a2bd81be79edfa8dcfde79173b0a895682d62329,http://pdfs.semanticscholar.org/a2bd/81be79edfa8dcfde79173b0a895682d62329.pdf,,,https://arxiv.org/pdf/1801.00712v1.pdf
+a216f7863fc6ab15e2bb7a538dfe00924e1da0ab,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163087
+a2646865d7c3d7fb346cf714caf146de2ea0e68f,,,https://doi.org/10.1109/SMC.2016.7844390,
+a2eb90e334575d9b435c01de4f4bf42d2464effc,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu04b.pdf,,,
+a25106a76af723ba9b09308a7dcf4f76d9283589,http://pdfs.semanticscholar.org/a251/06a76af723ba9b09308a7dcf4f76d9283589.pdf,,,http://www.ijcsmc.com/docs/papers/April2014/V3I4201429.pdf
+a200885bf6bfa0493d85e7617e65cdabe30a2dab,,,https://doi.org/10.1109/ICIP.2015.7351272,
+a2d9c9ed29bbc2619d5e03320e48b45c15155195,http://pdfs.semanticscholar.org/a2d9/c9ed29bbc2619d5e03320e48b45c15155195.pdf,,https://doi.org/10.1016/j.cviu.2013.11.002,http://www.researchgate.net/profile/Taner_Eskil/publication/259142970_Facial_Expression_Recognition_Based_on_Anatomy/links/0c96052f37f9490839000000.pdf
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,http://pdfs.semanticscholar.org/a29a/22878e1881d6cbf6acff2d0b209c8d3f778b.pdf,,https://doi.org/10.1007/978-3-642-37444-9_46,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_ACCV_zwhuang_Benchmarking%20Still-to-Video%20Face%20Recognition%20via%20Partial%20and%20Local%20Linear%20Discriminant%20Analysis%20on%20COX-S2V%20Dataset.pdf
+a2cc3193ed56ef4cedaaf4402c844df28edb5639,,,https://doi.org/10.1016/j.patrec.2012.01.005,
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,http://pdfs.semanticscholar.org/e12a/0f0bca1624965386ac9cf95f711c90441553.pdf,,,https://mindmodeling.org/cogsci2014/papers/485/
+a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d,http://pdfs.semanticscholar.org/a2b5/4f4d73bdb80854aa78f0c5aca3d8b56b571d.pdf,,,http://www.jsnc.caltech.edu/2001/Proceedings/Smith-E.pdf
+a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa,http://pdfs.semanticscholar.org/d965/50536f2ff505f62aec841b3656d940e7f1cf.pdf,,,https://arxiv.org/pdf/1605.09526v4.pdf
+a26379d9993073d51611588c36f12db2b4ecb39a,,,,
+a2002279c36255c2c78cf5ec0c42cbfe32fe011f,,,,
+a2af07176a38fe844b0e2fdf4abae65472628b38,,,https://doi.org/10.1109/ICIP.2014.7026060,
+a2bcfba155c990f64ffb44c0a1bb53f994b68a15,http://ibug.doc.ic.ac.uk/media/uploads/documents/cvprw_photoface.pdf,,https://doi.org/10.1109/CVPRW.2011.5981840,https://ibug.doc.ic.ac.uk/media/uploads/documents/cvprw_photoface.pdf
+a2b76ab614d92f5e71312b530f0b6281d0c500f7,,,https://doi.org/10.1007/s10898-014-0231-x,
+a2fbaa0b849ecc74f34ebb36d1442d63212b29d2,http://pdfs.semanticscholar.org/a2fb/aa0b849ecc74f34ebb36d1442d63212b29d2.pdf,,,http://www.ijarcsse.com/docs/papers/Volume_5/6_June2015/V5I6-0501.pdf
+a2136b13aa0bb4ea4e7fa99a6c657b11dffff563,,,,
+a50b4d404576695be7cd4194a064f0602806f3c4,http://pdfs.semanticscholar.org/a50b/4d404576695be7cd4194a064f0602806f3c4.pdf,,https://doi.org/10.5244/C.20.7,http://www.dia.fi.upm.es/~pcr/publications/bmvc2006.pdf
+a55c0810e6c84f8e51953c0d8fd9971696d205f0,,,,
+a59cdc49185689f3f9efdf7ee261c78f9c180789,http://pdfs.semanticscholar.org/a59c/dc49185689f3f9efdf7ee261c78f9c180789.pdf,,,http://journal.iis.sinica.edu.tw/paper/1/150146-2.pdf?cd=607100EADB21EFEC9
+a5e5094a1e052fa44f539b0d62b54ef03c78bf6a,http://pdfs.semanticscholar.org/a5e5/094a1e052fa44f539b0d62b54ef03c78bf6a.pdf,,,http://vision.soic.indiana.edu/bright-and-dark-workshop-2017/Detection_without_Recognition_for_Redaction.pdf
+a52c72cd8538c62156aaa4d7e5c54946be53b9bb,http://pdfs.semanticscholar.org/a52c/72cd8538c62156aaa4d7e5c54946be53b9bb.pdf,,https://doi.org/10.1016/j.patcog.2013.11.025,http://cvrc.ece.utexas.edu/Publications/Shaohuawan_PR2014.pdf
+a5f200d52b588030c76dcc38c504f65d772a1f5e,,,,
+a5eb36f1e77245dfc9e5c0c03998529331e4c89b,,,https://doi.org/10.1109/BTAS.2014.6996222,
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,http://pdfs.semanticscholar.org/a5c8/fc1ca4f06a344b53dc81ebc6d87f54896722.pdf,,,https://arxiv.org/pdf/1705.04282v1.pdf
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,http://face.cs.kit.edu/download/publications/gehrig-emotiw2013.pdf,,,http://doi.acm.org/10.1145/2531923.2531924
+a56c1331750bf3ac33ee07004e083310a1e63ddc,http://pdfs.semanticscholar.org/de99/1e4c18c21b3cdf6389b439c88709d62f4252.pdf,,https://doi.org/10.1137/130936166,http://www.columbia.edu/~jw2966/papers/SZW13-SJIS.pdf
+a53d13b9110cddb2a5f38b9d7ed69d328e3c6db9,,,https://doi.org/10.1109/TIP.2015.2481327,
+a56b0f76919aabe8b768f5fbaeca412276365aa2,http://www.mingzhao.org/Publications/ZM_2006_FG_3DReconstruction.pdf,,,http://www.researchgate.net/profile/Terence_Sim/publication/4232748_Morphable_face_reconstruction_with_multiple_images/links/00b7d52a6b3ec06ee0000000.pdf
+a5b6a3234e15343d2e5417cff46c0a5f0943521e,,,https://doi.org/10.1109/TNNLS.2014.2321420,
+a5ae44070857aa00e54ea80394a04fda412b335c,,,,
+a54e0f2983e0b5af6eaafd4d3467b655a3de52f4,http://pdfs.semanticscholar.org/a54e/0f2983e0b5af6eaafd4d3467b655a3de52f4.pdf,,,http://ww1.ucmss.com/books/LFS/CSREA2006/ICA3925.pdf
+a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb,http://pdfs.semanticscholar.org/a562/5cfe16d72bd00e987857d68eb4d8fc3ce4fb.pdf,,https://doi.org/10.1007/978-3-319-54427-4_31,https://luannd.github.io/papers/ACCV2016.pdf
+a5bf83f99f71e3840f651fbeef9f334d8e75fd75,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1927.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206584
+a5b9c6aa52f91092b5a8ab04ed1f7b60c0ea5260,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IATW.2006.88
+a5d4cc596446517dfaa4d92276a12d5e1c0a284c,,,https://doi.org/10.1016/j.patrec.2009.06.002,
+a5d76710dc15ebc7d8b4dc976604315f1e2fc3ba,,,,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2011.117
+a546fd229f99d7fe3cf634234e04bae920a2ec33,http://pdfs.semanticscholar.org/a546/fd229f99d7fe3cf634234e04bae920a2ec33.pdf,,,http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0120448&type=printable
+a538b05ebb01a40323997629e171c91aa28b8e2f,http://pdfs.semanticscholar.org/a538/b05ebb01a40323997629e171c91aa28b8e2f.pdf,,,http://learning.cs.toronto.edu/~hinton/absps/reluICML.pdf
+a513977bcd8cecd2ed1836bf91b31a80a1ebe27b,,,,
+a57ee5a8fb7618004dd1def8e14ef97aadaaeef5,http://pdfs.semanticscholar.org/f1f5/b603dd34ec26939517348d77df10992798f0.pdf,,,https://infoscience.epfl.ch/record/140745/files/OLEN.pdf
+a57b37549edba625f5955759e259e52eb0af8773,http://learning.cs.toronto.edu/~hinton/absps/ranzato_cvpr2011.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995710
+a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sikka_Exemplar_Hidden_Markov_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301350
+a5e436bb88ff28c68f981308faefd6eee48b9c8b,,,,
+a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a,http://cvrr.ucsd.edu/publications/2014/TawariMartinTrivedi_IEEETITS2014.pdf,,https://doi.org/10.1109/TITS.2014.2300870,
+a591639bfcabc4091ff556364074c58521159ff9,,,,
+a51d5c2f8db48a42446cc4f1718c75ac9303cb7a,http://pdfs.semanticscholar.org/a51d/5c2f8db48a42446cc4f1718c75ac9303cb7a.pdf,,,http://www.lrec-conf.org/proceedings/lrec2016/pdf/591_Paper.pdf
+a57b92ed2d8aa5b41fe513c3e98cbf83b7141741,http://pdfs.semanticscholar.org/a57b/92ed2d8aa5b41fe513c3e98cbf83b7141741.pdf,,,https://infoscience.epfl.ch/record/87190/files/Sorci2005_1414.pdf
+a53f988d16f5828c961553e8efd38fed15e70bcc,,,https://doi.org/10.1109/BTAS.2015.7358787,
+a52d9e9daf2cb26b31bf2902f78774bd31c0dd88,http://pdfs.semanticscholar.org/a52d/9e9daf2cb26b31bf2902f78774bd31c0dd88.pdf,,,https://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-97.pdf
+a51882cfd0706512bf50e12c0a7dd0775285030d,http://pdfs.semanticscholar.org/a518/82cfd0706512bf50e12c0a7dd0775285030d.pdf,,https://doi.org/10.1007/978-3-319-16808-1_15,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/696.pdf
+a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be,http://pdfs.semanticscholar.org/d788/2e6bd512b190e47be944dc9b58b612f12581.pdf,,https://doi.org/10.1007/978-3-540-75773-3_13,http://ibug.doc.ic.ac.uk/media/uploads/documents/ICCV-HCI07_ValstarPantic-FINAL.pdf
+a503eb91c0bce3a83bf6f524545888524b29b166,http://pdfs.semanticscholar.org/a503/eb91c0bce3a83bf6f524545888524b29b166.pdf,,,http://arxiv.org/abs/1801.09086
+a575009c1c25e27cdba8cc2c6930759a5416f37d,,,,
+a52a69bf304d49fba6eac6a73c5169834c77042d,,,https://doi.org/10.1109/LSP.2017.2789251,
+a52581a7b48138d7124afc7ccfcf8ec3b48359d0,http://pdfs.semanticscholar.org/a525/81a7b48138d7124afc7ccfcf8ec3b48359d0.pdf,,,http://www.jdl.ac.cn/doc/2006/Pose%20and%20Illumination%20Invariant%20Face%20Recognition%20Based%20on%203D%20Face%20Reconstruction.pdf
+bdf5434648356ce22bdbf81d2951e4bb00228e4d,,,,http://doi.ieeecomputersociety.org/10.1109/SNPD.2007.415
+bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4,http://pdfs.semanticscholar.org/bd57/2e9cbec095bcf5700cb7cd73d1cdc2fe02f4.pdf,,https://doi.org/10.1155/2018/7068349,
+bd6099429bb7bf248b1fd6a1739e744512660d55,http://pdfs.semanticscholar.org/bd60/99429bb7bf248b1fd6a1739e744512660d55.pdf,,,http://www.jmlr.org/papers/volume11/zhang10b/zhang10b.pdf
+bdfcc45cfa495939789b73eec7e6e98a4d7e3f41,,,,
+bd63d56bebbc5d7babc7c47cedcb11b8e3ad199c,,,,
+bd8f3fef958ebed5576792078f84c43999b1b207,http://pdfs.semanticscholar.org/bd8f/3fef958ebed5576792078f84c43999b1b207.pdf,,,http://ceur-ws.org/Vol-1391/120-CR.pdf
+bd9eb65d9f0df3379ef96e5491533326e9dde315,http://pdfs.semanticscholar.org/bd9e/b65d9f0df3379ef96e5491533326e9dde315.pdf,,,https://arxiv.org/pdf/1712.00108v1.pdf
+bd07d1f68486052b7e4429dccecdb8deab1924db,http://pdfs.semanticscholar.org/bd07/d1f68486052b7e4429dccecdb8deab1924db.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2003.1221304
+bd0201b32e7eca7818468f2b5cb1fb4374de75b9,http://pdfs.semanticscholar.org/bd02/01b32e7eca7818468f2b5cb1fb4374de75b9.pdf,,,https://www.irjet.net/archives/V2/i2/Irjet-v2i276.pdf
+bd8e2d27987be9e13af2aef378754f89ab20ce10,http://pdfs.semanticscholar.org/bd8e/2d27987be9e13af2aef378754f89ab20ce10.pdf,,https://doi.org/10.1016/j.patrec.2014.11.004,http://bksy.zju.edu.cn/attachments/tlxjxj/2016-10/99999-1477633998-1097578.pdf
+bd236913cfe07896e171ece9bda62c18b8c8197e,http://pdfs.semanticscholar.org/bd23/6913cfe07896e171ece9bda62c18b8c8197e.pdf,,,http://arxiv.org/abs/1612.00986
+bd26faef48080b5af294b19139c804ffec70825e,,,https://doi.org/10.1007/s11390-015-1526-1,
+bdd203bcd3c41c336c5635fb026a78279d75b4be,,,https://doi.org/10.1109/ICPR.2016.7899761,
+bd13f50b8997d0733169ceba39b6eb1bda3eb1aa,http://pdfs.semanticscholar.org/bd13/f50b8997d0733169ceba39b6eb1bda3eb1aa.pdf,,,https://arxiv.org/pdf/1506.08347v2.pdf
+bd8b7599acf53e3053aa27cfd522764e28474e57,http://www.jdl.ac.cn/doc/2009/iccv09_Learning%20Long%20Term%20Face%20Aging%20Patterns%20from%20Partially%20Dense%20Aging%20Databases.pdf,,https://doi.org/10.1109/ICCV.2009.5459181,
+bd9e0b6a90b51cc19b65f51dacd08ce1a7ccaac5,,,https://doi.org/10.1109/VSMM.2014.7136653,
+bd78a853df61d03b7133aea58e45cd27d464c3cf,http://pdfs.semanticscholar.org/bd78/a853df61d03b7133aea58e45cd27d464c3cf.pdf,,,http://www.ijcsit.com/docs/Volume%204/Vol4Issue6/ijcsit2013040628.pdf
+bd2d7c7f0145028e85c102fe52655c2b6c26aeb5,http://rogerioferis.com/publications/FerisICMR2014.pdf,,,http://doi.acm.org/10.1145/2578726.2578732
+bd25c4ad7471580ed9787eae041b80a3c4fe97bb,,,https://doi.org/10.1016/j.sigpro.2010.01.019,
+bd9157331104a0708aa4f8ae79b7651a5be797c6,http://pdfs.semanticscholar.org/bd91/57331104a0708aa4f8ae79b7651a5be797c6.pdf,,,https://arxiv.org/pdf/1712.09374v1.pdf
+bd66dc891270d858de3adf97d42ed714860ae94d,,,https://doi.org/10.1109/ACPR.2015.7486598,
+bd74c3ca2ff03396109ac2d1131708636bd0d4d3,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.228
+d119443de1d75cad384d897c2ed5a7b9c1661d98,,,https://doi.org/10.1109/ICIP.2010.5650873,
+d185f4f05c587e23c0119f2cdfac8ea335197ac0,http://pdfs.semanticscholar.org/d185/f4f05c587e23c0119f2cdfac8ea335197ac0.pdf,,,https://www.researchgate.net/profile/Michael_Lyons3/publication/220013373_Facial_Expression_Analysis_Modeling_and_Synthesis/links/0912f51006f4029ccc000000.pdf
+d140c5add2cddd4a572f07358d666fe00e8f4fe1,http://pdfs.semanticscholar.org/d140/c5add2cddd4a572f07358d666fe00e8f4fe1.pdf,,https://doi.org/10.1007/978-3-319-16178-5_19,http://ibug.doc.ic.ac.uk/media/uploads/documents/paper_1.pdf
+d1dae2993bdbb2667d1439ff538ac928c0a593dc,http://pdfs.semanticscholar.org/d1da/e2993bdbb2667d1439ff538ac928c0a593dc.pdf,,,http://www.periyaruniversity.ac.in/ijcii/issue/Vol3No1June2013/IJCII%203-1-93.pdf
+d1b5b3e4b803dc4e50c5b80c1bc69c6d98751698,,,https://doi.org/10.1109/LSP.2017.2661983,
+d1775eb9d8898a9f66c28bb92b648c3174caec18,,,,
+d1184939e06dbc3b495c883c53b684c6d6aa9e48,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477669
+d1f58798db460996501f224fff6cceada08f59f9,http://pdfs.semanticscholar.org/d1f5/8798db460996501f224fff6cceada08f59f9.pdf,,,https://www.base-search.net/Record/fbecc46e223bba0d9f59521acb84a78488bbde1783da614b7f26af0046189b1d
+d115c4a66d765fef596b0b171febca334cea15b5,http://pdfs.semanticscholar.org/d115/c4a66d765fef596b0b171febca334cea15b5.pdf,,https://doi.org/10.1007/978-3-319-48680-2_31,http://csvision.swan.ac.uk/uploads/Site/Publication/acivs16jd.pdf
+d1dd80d77655876fb45b9420fe72444c303b219e,,,https://doi.org/10.1109/FG.2011.5771371,
+d122d66c51606a8157a461b9d7eb8b6af3d819b0,http://pdfs.semanticscholar.org/d122/d66c51606a8157a461b9d7eb8b6af3d819b0.pdf,,,http://ijariie.com/AdminUploadPdf/Interpretation_and_Recognition_of_Dynamic_Facial_Movement_from_the_Image_Or_Video_ijariie6240.pdf
+d142e74c6a7457e77237cf2a3ded4e20f8894e1a,http://pdfs.semanticscholar.org/d142/e74c6a7457e77237cf2a3ded4e20f8894e1a.pdf,,,http://airccj.org/CSCP/vol7/csit76404.pdf
+d12bea587989fc78b47584470fd8f689b6ab81d2,,,https://doi.org/10.1109/TIP.2013.2246523,
+d1bd956a8523629ed4e2533b01272f22cea534c6,,,https://doi.org/10.1016/j.patrec.2010.01.021,
+d1082eff91e8009bf2ce933ac87649c686205195,http://epubs.surrey.ac.uk/807279/1/ML_Akyuz_Windeatt_Raymond.pdf,,https://doi.org/10.1007/s10994-014-5477-5,
+d1959ba4637739dcc6cc6995e10fd41fd6604713,http://pdfs.semanticscholar.org/d195/9ba4637739dcc6cc6995e10fd41fd6604713.pdf,,,http://scholarworks.rit.edu/cgi/viewcontent.cgi?article=10689&context=theses
+d1881993c446ea693bbf7f7d6e750798bf958900,http://pdfs.semanticscholar.org/d188/1993c446ea693bbf7f7d6e750798bf958900.pdf,,,https://arxiv.org/pdf/1706.04488v1.pdf
+d18cca5e90884020e748e7fe2d13398d3cbd14fb,,,,
+d11d0151618987ce00a88ceda55d35f0bb89122e,,,,
+d60e3eef429ed2a51bbd806125fa31f5bea072a4,,,https://doi.org/10.1109/HIS.2013.6920481,
+d6e3bd948aae43f7654ea1d9e89d88f20d8cf25f,,,https://doi.org/10.1109/ACPR.2013.98,
+d6cf3cab269877c58a16be011b74e07838d957c2,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0162.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206830
+d61578468d267c2d50672077918c1cda9b91429b,http://pdfs.semanticscholar.org/d615/78468d267c2d50672077918c1cda9b91429b.pdf,,,http://ijcsmc.com/docs/papers/September2014/V3I9201452.pdf
+d687fa99586a9ad229284229f20a157ba2d41aea,http://pdfs.semanticscholar.org/d687/fa99586a9ad229284229f20a157ba2d41aea.pdf,,,http://file.scirp.org/pdf/JILSA_2013052209085035.pdf
+d6687d30a264974de234c48ac25616a112736f61,,,,
+d691440030394c2e00a2ab47aba4f8b5fca5f25a,,,https://doi.org/10.1109/ICIP.2016.7532921,
+d647099e571f9af3a1762f895fd8c99760a3916e,http://cbim.rutgers.edu/dmdocuments/CVPR10_Peng_Yang.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539978
+d6bdc70d259b38bbeb3a78db064232b4b4acc88f,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.27
+d6a9ea9b40a7377c91c705f4c7f206a669a9eea2,http://pdfs.semanticscholar.org/d6a9/ea9b40a7377c91c705f4c7f206a669a9eea2.pdf,,,https://www.base-search.net/Record/896c745aa5c994a4220032c0e15e0c3900c3d5edfa64e874781acea070176566
+d63bd06340dd35590a22222509e455c49165ee13,,,https://doi.org/10.1109/IJCNN.2016.7727234,
+d6ca3dc01de060871839d5536e8112b551a7f9ff,https://arxiv.org/pdf/1802.08310v1.pdf,,https://doi.org/10.1109/BigData.2017.8258154,http://arxiv.org/abs/1802.08310
+d671a210990f67eba9b2d3dda8c2cb91575b4a7a,http://pdfs.semanticscholar.org/d671/a210990f67eba9b2d3dda8c2cb91575b4a7a.pdf,,,http://www.cvc.uab.es/~petia/2008/Piero%20cvcrd'10PierluigiCasale.pdf
+d6a5eb4377e2a67420778eab61b5a89046307bae,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2014.37
+d61e794ec22a4d4882181da17316438b5b24890f,http://pdfs.semanticscholar.org/d61e/794ec22a4d4882181da17316438b5b24890f.pdf,,,http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ICDP2016.pdf
+d628aabf1a666a875e77c3d3fee857cd25891947,,,https://doi.org/10.1109/SMC.2016.7844663,
+d65b82b862cf1dbba3dee6541358f69849004f30,http://pdfs.semanticscholar.org/d65b/82b862cf1dbba3dee6541358f69849004f30.pdf,,https://doi.org/10.1016/j.cviu.2010.12.008,https://ibug.doc.ic.ac.uk/media/uploads/documents/2_5d_egm_cviu.pdf
+d6102a7ddb19a185019fd2112d2f29d9258f6dec,http://pdfs.semanticscholar.org/d610/2a7ddb19a185019fd2112d2f29d9258f6dec.pdf,,https://doi.org/10.24963/ijcai.2017/520,http://www.ijcai.org/proceedings/2017/0520.pdf
+d6791b98353aa113d79f6fb96335aa6c7ea3b759,,,https://doi.org/10.1109/TNNLS.2017.2648122,
+d6bfa9026a563ca109d088bdb0252ccf33b76bc6,http://pdfs.semanticscholar.org/d6bf/a9026a563ca109d088bdb0252ccf33b76bc6.pdf,,,http://www.cs.berkeley.edu/~akar/IITK_website/cs676/project/report.pdf
+d67dcaf6e44afd30c5602172c4eec1e484fc7fb7,http://pdfs.semanticscholar.org/d67d/caf6e44afd30c5602172c4eec1e484fc7fb7.pdf,,https://doi.org/10.1007/978-3-642-17277-9_8,https://www.researchgate.net/profile/Amnart_Petpon/publication/220844647_Illumination_Normalization_for_Robust_Face_Recognition_Using_Discrete_Wavelet_Transform/links/09e41508e3554471ac000000.pdf
+d6c7092111a8619ed7a6b01b00c5f75949f137bf,http://pdfs.semanticscholar.org/d6c7/092111a8619ed7a6b01b00c5f75949f137bf.pdf,,,http://www.ijcsi.org/papers/IJCSI-10-1-3-9-14.pdf
+d68dbb71b34dfe98dee0680198a23d3b53056394,http://pdfs.semanticscholar.org/d68d/bb71b34dfe98dee0680198a23d3b53056394.pdf,,,http://cvrr.ucsd.edu/publications/2015/Martin_CVPRW_WiCV_2015.pdf
+d6639263381c929ebc579a541045a85aa21680f8,,,,
+d6fb606e538763282e3942a5fb45c696ba38aee6,https://pdfs.semanticscholar.org/d6fb/606e538763282e3942a5fb45c696ba38aee6.pdf,,,http://web4.cs.ucl.ac.uk/uclic/people/n.berthouze/KleinsmithBerthouze12.pdf
+bcee40c25e8819955263b89a433c735f82755a03,http://pdfs.semanticscholar.org/bcee/40c25e8819955263b89a433c735f82755a03.pdf,,https://doi.org/10.1007/978-3-319-20681-3_48,https://kt54.host.cs.st-andrews.ac.uk/Papers/hci2015.pdf
+bcf2710d46941695e421226372397c9544994214,,,https://doi.org/10.1109/ICNC.2015.7378076,
+bc704680b5032eadf78c4e49f548ba14040965bf,http://pdfs.semanticscholar.org/ccbc/c676546a43cd4b714f0c85cbd493f9c61396.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.44
+bc66685acc64fa3c425c0ee6c443d3fa87db7364,,,https://doi.org/10.1109/TMM.2013.2279658,
+bccb35704cdd3f2765b1a3f0296d1bff3be019c1,,,https://doi.org/10.1109/ICMLA.2016.0145,
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,https://arxiv.org/pdf/1801.08329v1.pdf,,,http://arxiv.org/abs/1801.08329
+bcead1a92744e76c38caaa13159de4abfb81b1d0,,,https://doi.org/10.1109/ICIP.2014.7025310,
+bcc172a1051be261afacdd5313619881cbe0f676,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002197.pdf,,https://doi.org/10.1109/ICASSP.2017.7952546,
+bcfeac1e5c31d83f1ed92a0783501244dde5a471,http://pdfs.semanticscholar.org/bcfe/ac1e5c31d83f1ed92a0783501244dde5a471.pdf,,https://doi.org/10.1016/j.patcog.2012.06.024,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2013-PR-face-recognition.pdf
+bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,http://pdfs.semanticscholar.org/bc12/715a1ddf1a540dab06bf3ac4f3a32a26b135.pdf,,,http://arxiv.org/abs/1704.02781
+bc9ae4b87888202bfa174ec4e8caee1a087ab994,,,,
+bc910ca355277359130da841a589a36446616262,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf,,https://doi.org/10.1109/ICCV.2015.485,http://openaccess.thecvf.com/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf
+bcd162862b6d3a56b474039b2588a8f948d59fe0,,,,
+bcc5cbbb540ee66dc8b9a3453b506e895d8395de,http://pdfs.semanticscholar.org/bcc5/cbbb540ee66dc8b9a3453b506e895d8395de.pdf,,https://doi.org/10.1007/978-3-319-16817-3_20,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/518.pdf
+bc871497626afb469d25c4975aa657159269aefe,http://ir.ia.ac.cn/bitstream/173211/10560/1/Adaptive%20Learning%20Algorithm%20for%20Pattern%20Classification.pdf,,,
+bc955487a0b8d2fae3f2f44320389a12ae28f0f5,,,,
+bc2852fa0a002e683aad3fb0db5523d1190d0ca5,http://pdfs.semanticscholar.org/bc28/52fa0a002e683aad3fb0db5523d1190d0ca5.pdf,,,https://arxiv.org/pdf/1702.04455v1.pdf
+bcb79e3ac69508060c8cba105f6a8622eb929ab1,,,,
+bc866c2ced533252f29cf2111dd71a6d1724bd49,http://pdfs.semanticscholar.org/bc86/6c2ced533252f29cf2111dd71a6d1724bd49.pdf,,https://doi.org/10.3390/s141019561,http://ftp.ncbi.nlm.nih.gov/pub/pmc/d2/6d/sensors-14-19561.PMC4239878.pdf
+bcb99d5150d792001a7d33031a3bd1b77bea706b,http://pdfs.semanticscholar.org/bcb9/9d5150d792001a7d33031a3bd1b77bea706b.pdf,,https://doi.org/10.1016/j.patrec.2016.01.002,http://arxiv.org/pdf/1509.05366v1.pdf
+bc98027b331c090448492eb9e0b9721e812fac84,http://pdfs.semanticscholar.org/bc98/027b331c090448492eb9e0b9721e812fac84.pdf,,,http://file.scirp.org/pdf/JILSA20120400003_32881282.pdf
+bca39960ba46dc3193defe0b286ee0bea4424041,,,https://doi.org/10.1016/j.patrec.2009.05.018,
+bc6a7390135bf127b93b90a21b1fdebbfb56ad30,,,https://doi.org/10.1109/TIFS.2017.2766039,
+bcac3a870501c5510df80c2a5631f371f2f6f74a,http://pdfs.semanticscholar.org/bcac/3a870501c5510df80c2a5631f371f2f6f74a.pdf,,,https://eng.ucmerced.edu/people/cyang35/CVPR13/cvpr13_hallucination_v12.pdf
+ae73f771d0e429a74b04a6784b1b46dfe98f53e4,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.326
+aed321909bb87c81121c841b21d31509d6c78f69,http://pdfs.semanticscholar.org/aed3/21909bb87c81121c841b21d31509d6c78f69.pdf,,,http://www.sersc.org/journals/IJHIT/vol9_no11_2016/22.pdf
+aecb15e3e9191eb135bdba2426967bfac3f068db,http://www.cvip.uofl.edu/wwwcvip/research/publications/Pub_Pdf/2010/3D%20Face%20Rcovery%20From%20Intensities_2010.pdf,,https://doi.org/10.1109/ICIP.2010.5648990,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2010/3D%20Face%20Rcovery%20From%20Intensities_2010.pdf
+ae936628e78db4edb8e66853f59433b8cc83594f,http://pdfs.semanticscholar.org/ae93/6628e78db4edb8e66853f59433b8cc83594f.pdf,,,http://arxiv.org/abs/1406.4444
+ae0765ebdffffd6e6cc33c7705df33b7e8478627,http://pdfs.semanticscholar.org/ae07/65ebdffffd6e6cc33c7705df33b7e8478627.pdf,,,https://arxiv.org/pdf/1711.08624v1.pdf
+ae425a2654a1064c2eda29b08a492c8d5aab27a2,,,https://doi.org/10.23919/MVA.2017.7986845,
+aefc7c708269b874182a5c877fb6dae06da210d4,http://pdfs.semanticscholar.org/f6f4/60d4a4a5b4c077ab3ac7a972f52af17a4241.pdf,,,http://papers.nips.cc/paper/4730-deep-learning-of-invariant-features-via-simulated-fixations-in-video
+aebb9649bc38e878baef082b518fa68f5cda23a5,http://pdfs.semanticscholar.org/aebb/9649bc38e878baef082b518fa68f5cda23a5.pdf,,,http://www.iaeng.org/publication/IMECS2011/IMECS2011_pp563-568.pdf
+ae89e464576209b1082da38e0cee7aeabd03d932,,,https://doi.org/10.1007/s00521-005-0017-7,
+aece472ba64007f2e86300cc3486c84597f02ec7,http://doras.dcu.ie/439/1/ieee_smap_2007.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SMAP.2007.43
+ae7604b1840753e9c2e1ab7a97e02f91a9d81860,,,https://doi.org/10.1007/s10586-016-0535-3,
+ae5bb02599244d6d88c4fe466a7fdd80aeb91af4,http://pdfs.semanticscholar.org/ae5b/b02599244d6d88c4fe466a7fdd80aeb91af4.pdf,,,http://www.cs.colostate.edu/evalfacerec/papers/iccv784public.pdf
+ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9,http://pdfs.semanticscholar.org/ae18/ccb35a1a5d7b22f2a5760f706b1c11bf39a9.pdf,,,http://cecas.clemson.edu/~stb/students/willimon_phd_dissertation.pdf
+aeeea6eec2f063c006c13be865cec0c350244e5b,http://pdfs.semanticscholar.org/aeee/a6eec2f063c006c13be865cec0c350244e5b.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/EMOTION-2010-ValstarPantic-CAMERA.pdf
+aeb36fac7516753a14c3c690f352de78e70f8c6e,,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.13
+ae9257f3be9f815db8d72819332372ac59c1316b,http://pdfs.semanticscholar.org/ae92/57f3be9f815db8d72819332372ac59c1316b.pdf,,,https://www.researchgate.net/profile/Jeffrey_Cohn/publication/7868102_Deciphering_the_enigmatic_face_the_importance_of_facial_dynamics_in_interpreting_subtle_facial_expressions/links/09e4151226a7a402c6000000.pdf
+ae89b7748d25878c4dc17bdaa39dd63e9d442a0d,http://hal.inria.fr/docs/00/87/00/59/PDF/Ozerov_et_al_ICIP_2013.pdf,,https://doi.org/10.1109/ICIP.2013.6738618,http://hal.archives-ouvertes.fr/docs/00/87/00/59/PDF/Ozerov_et_al_ICIP_2013.pdf
+ae1de0359f4ed53918824271c888b7b36b8a5d41,http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf,,,http://www.cs.rug.nl/~alext/PAPERS/VISAPP13/finpaint.pdf
+ae4390873485c9432899977499c3bf17886fa149,http://pdfs.semanticscholar.org/ae43/90873485c9432899977499c3bf17886fa149.pdf,,,http://airccj.org/CSCP/vol6/csit65804.pdf
+ae753fd46a744725424690d22d0d00fb05e53350,http://pdfs.semanticscholar.org/ae75/3fd46a744725424690d22d0d00fb05e53350.pdf,,,http://chenlab.ece.cornell.edu/people/Andy/publications/ECCV2012_ClothingAttributes.pdf
+aea977a3b5556957ed5fb3ef21685ee84921eaa3,,,https://doi.org/10.1007/s12193-017-0256-9,
+aed6af12148b43e4a24ee6e2bc3604ca59bd99a5,,,https://doi.org/10.1109/TIP.2017.2717505,
+aea4128ba18689ff1af27b90c111bbd34013f8d5,http://pdfs.semanticscholar.org/aea4/128ba18689ff1af27b90c111bbd34013f8d5.pdf,,https://doi.org/10.1007/978-3-319-10605-2_40,http://ss.sysu.edu.cn/~py/papers/ECCV-KSP.pdf
+ae8240095c9cca2c395f173fece2f46277b94929,,,https://doi.org/10.1016/j.neucom.2017.06.045,
+ae4e2c81c8a8354c93c4b21442c26773352935dd,http://pdfs.semanticscholar.org/ae4e/2c81c8a8354c93c4b21442c26773352935dd.pdf,,https://doi.org/10.1016/j.patrec.2014.12.003,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2015/1_A_Iosifidis_PRL_onKELM.pdf
+ae85c822c6aec8b0f67762c625a73a5d08f5060d,http://tamaraberg.com/papers/yamaguchi2014retrieving.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2353624
+ae62c0a4b74ce672e8103dbf6d344d82c59f216c,,,,
+ae96fc36c89e5c6c3c433c1163c25db1359e13ea,,,https://doi.org/10.1007/s10489-013-0485-x,
+ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf,http://pdfs.semanticscholar.org/ae71/f69f1db840e0aa17f8c814316f0bd0f6fbbf.pdf,,https://doi.org/10.1016/j.chb.2017.02.029,http://orca-mwe.cf.ac.uk/98713/1/Fagerstr%C3%B8m_etal_2017.pdf
+d8c9bad8d07ae4196027dfb8343b9d9aefb130ff,,,https://doi.org/10.1007/s00138-017-0848-0,
+d893f75206b122973cdbf2532f506912ccd6fbe0,http://pdfs.semanticscholar.org/d893/f75206b122973cdbf2532f506912ccd6fbe0.pdf,,,http://www.enggjournals.com/ijcse/doc/IJCSE11-03-01-125.pdf
+d861c658db2fd03558f44c265c328b53e492383a,http://www.cs.washington.edu/research/VACE/Multimedia/Jia_EMBC2014_final.pdf,,https://doi.org/10.1109/EMBC.2014.6943699,http://homes.cs.washington.edu/~shapiro/Multimedia/Jia_EMBC2014_final.pdf
+d85813b58e10a35703df3a8acf41aafe4b6e1dd2,,,,
+d8b99eada922bd2ce4e20dc09c61a0e3cc640a62,,,https://doi.org/10.1109/IJCNN.2014.6889675,
+d878a67b2ef6a0a5dec72db15291f12419040ab1,,,https://doi.org/10.1109/IPTA.2016.7821012,
+d84a48f7d242d73b32a9286f9b148f5575acf227,http://pdfs.semanticscholar.org/d84a/48f7d242d73b32a9286f9b148f5575acf227.pdf,,,https://arxiv.org/pdf/1801.08390v1.pdf
+d8e5d94c3c8688f0ca0ee656c79847c7df04c77d,,,https://doi.org/10.1007/s12193-015-0187-2,
+d8f0bda19a345fac81a1d560d7db73f2b4868836,http://pdfs.semanticscholar.org/d8f0/bda19a345fac81a1d560d7db73f2b4868836.pdf,,,http://www.ee.ucr.edu/~amitrc/THESIS/thesis-hasan.pdf
+d82b93f848d5442f82154a6011d26df8a9cd00e7,http://pdfs.semanticscholar.org/d82b/93f848d5442f82154a6011d26df8a9cd00e7.pdf,,,http://interscience.in/ijic_vol1iss3/62-67.pdf
+d8f72f50cbe6e0fa4025bc990b7e8a52cc6bbad9,,,,
+d8722ffbca906a685abe57f3b7b9c1b542adfa0c,http://pdfs.semanticscholar.org/d872/2ffbca906a685abe57f3b7b9c1b542adfa0c.pdf,,,http://mattijs.tijsepijs.nl/publications/ghijsen04.pdf
+d8896861126b7fd5d2ceb6fed8505a6dff83414f,http://pdfs.semanticscholar.org/d889/6861126b7fd5d2ceb6fed8505a6dff83414f.pdf,,https://doi.org/10.5220/0005308303920399,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/VISAPP_2015_Rotation.pdf
+d855791bc23b4aa8e751d6a4e2ae7f5566a991e8,,,,http://doi.acm.org/10.1145/3012941
+d83d2fb5403c823287f5889b44c1971f049a1c93,http://pdfs.semanticscholar.org/d83d/2fb5403c823287f5889b44c1971f049a1c93.pdf,,,https://www2.bc.edu/~russeljm/publications/Sick%20Face%202013.pdf
+d8288322f32ee4501cef5a9b667e5bb79ebd7018,,,https://doi.org/10.1016/j.patcog.2011.12.018,
+d8b568392970b68794a55c090c4dd2d7f90909d2,http://pdfs.semanticscholar.org/d8b5/68392970b68794a55c090c4dd2d7f90909d2.pdf,,,http://www.ece.cmu.edu/research/publications/2005/CMU-ECE-2005-007.pdf
+d83ae5926b05894fcda0bc89bdc621e4f21272da,http://pdfs.semanticscholar.org/d83a/e5926b05894fcda0bc89bdc621e4f21272da.pdf,,,http://www.cs.utexas.edu/~grauman/research/theses/MS-JoshKelle2017.pdf
+d84e3254e3c4f4c17484643b8c3abdf5b0dbb761,,,,
+d8bf148899f09a0aad18a196ce729384a4464e2b,http://pdfs.semanticscholar.org/d8bf/148899f09a0aad18a196ce729384a4464e2b.pdf,,,https://rucore.libraries.rutgers.edu/rutgers-lib/34083/PDF/1/
+d83db03f8eae6dba91ce044c640c6b35ccf541f3,,,,
+d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e,http://pdfs.semanticscholar.org/d850/aff9d10a01ad5f1d8a1b489fbb3998d0d80e.pdf,,,http://vision.ics.uci.edu/papers/Ghiasi_THESIS_2016/Ghiasi_THESIS_2016.pdf
+d8c9ce0bd5e4b6d1465402a760845e23af5ac259,,,https://doi.org/10.1109/ITSC.2015.380,
+ab7923968660d04434271559c4634790dc68c58e,,,https://doi.org/10.1109/ICIP.2015.7351111,
+abf0aa1d8869d87f4ef62e2da058ccfb4bf46d18,,,https://doi.org/10.1007/s11042-015-2536-2,
+ab8f9a6bd8f582501c6b41c0e7179546e21c5e91,http://pdfs.semanticscholar.org/ab8f/9a6bd8f582501c6b41c0e7179546e21c5e91.pdf,,,http://www.soe.ucsc.edu/~milanfar/publications/journal/FaceVerification_LFW_Sep8.pdf
+abfba1dc9a9991897acd0e0d3d4ef9d4aef4151c,,,https://doi.org/10.1109/FUZZ-IEEE.2014.6891864,
+ab68837d09986c592dcab7d08ee6dfb40e02916f,,,https://doi.org/10.1007/978-3-319-11289-3_23,
+abce06a96a7c3095bfc36eed8779d89263769b85,http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20others/Analyzing%20Asymmetry%20Biometric%20in%20the%20Frequency%20Domain%20for%20Face%20Recognition.pdf,,https://doi.org/10.1109/ICASSP.2005.1415564,https://pdfs.semanticscholar.org/abce/06a96a7c3095bfc36eed8779d89263769b85.pdf
+aba9acb4a607071af10684f2cfbdefa0507a4e9a,,,https://doi.org/10.1016/j.patcog.2016.06.010,
+aba770a7c45e82b2f9de6ea2a12738722566a149,http://pure.qub.ac.uk/portal/files/49719304/Face_Recognition_in_the_Scrambled.pdf,,https://doi.org/10.1109/TIFS.2016.2555792,https://pure.qub.ac.uk/portal/files/49719304/Face_Recognition_in_the_Scrambled.pdf
+ab0f9bc35b777eaefff735cb0dd0663f0c34ad31,http://faculty.ucmerced.edu/snewsam/papers/Yang_ICPR14_SemiSupervisedLearning.pdf,,https://doi.org/10.1109/ICPR.2014.696,
+abb396490ba8b112f10fbb20a0a8ce69737cd492,http://pdfs.semanticscholar.org/abb3/96490ba8b112f10fbb20a0a8ce69737cd492.pdf,,https://doi.org/10.1007/978-3-642-01793-3_13,https://www.researchgate.net/profile/Zhiming_Liu10/publication/221383547_Robust_Face_Recognition_Using_Color_Information/links/542c32850cf27e39fa9338cf.pdf
+ab703224e3d6718bc28f7b9987eb6a5e5cce3b01,,,https://doi.org/10.1631/FITEE.1500235,
+abac0fa75281c9a0690bf67586280ed145682422,http://pdfs.semanticscholar.org/abac/0fa75281c9a0690bf67586280ed145682422.pdf,,,http://www.cs.columbia.edu/~neeraj/base/papers/nk_phd_thesis2011.pdf
+abe4c1d6b964c4f5443b0334a44f0b03dd1909f4,,,https://doi.org/10.1109/IJCNN.2017.7965950,
+ab6776f500ed1ab23b7789599f3a6153cdac84f7,http://pdfs.semanticscholar.org/ab67/76f500ed1ab23b7789599f3a6153cdac84f7.pdf,,,http://www.ijser.org/researchpaper/A-Survey-on-Various-Facial-Expression-Techniques.pdf
+ab2c07c9867243fad2d66fa6aeabfb780433f319,,,,http://doi.acm.org/10.1145/2967878.2967887
+ab540c5be9f7ef688d3cd76765fcb794b92531fb,,,,
+ab87dfccb1818bdf0b41d732da1f9335b43b74ae,http://pdfs.semanticscholar.org/ab87/dfccb1818bdf0b41d732da1f9335b43b74ae.pdf,,,http://arxiv.org/pdf/1406.1943.pdf
+abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72,http://pdfs.semanticscholar.org/abc1/ef570bb2d7ea92cbe69e101eefa9a53e1d72.pdf,,https://doi.org/10.3166/ria.31.11-39,https://perso.telecom-paristech.fr/bloch/papers/RIA2017_YANG.pdf
+ab00ea1aa2f81fbe139b4632ec3682dfb7312ef0,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835994
+abbc6dcbd032ff80e0535850f1bc27c4610b0d45,,,https://doi.org/10.1109/ICIP.2015.7350983,
+ab1dfcd96654af0bf6e805ffa2de0f55a73c025d,http://pdfs.semanticscholar.org/ab1d/fcd96654af0bf6e805ffa2de0f55a73c025d.pdf,,https://doi.org/10.1016/j.dsp.2010.03.004,http://www.researchgate.net/profile/Zahir_Hussain3/publication/223065453_Higher_order_orthogonal_moments_for_invariant_facial_expression_recognition/links/53ed365b0cf26b9b7dc4644c.pdf
+abeda55a7be0bbe25a25139fb9a3d823215d7536,http://pdfs.semanticscholar.org/abed/a55a7be0bbe25a25139fb9a3d823215d7536.pdf,,,http://arxiv.org/abs/1604.08164
+abf573864b8fbc0f1c491ca60b60527a3e75f0f5,,,https://doi.org/10.1007/s11042-014-2204-y,
+ab427f0c7d4b0eb22c045392107509451165b2ba,http://cs.uky.edu/~zach/assets/papers/li2012learning.pdf,,https://doi.org/10.1109/ICIP.2012.6467426,
+ab1900b5d7cf3317d17193e9327d57b97e24d2fc,http://pdfs.semanticscholar.org/ab19/00b5d7cf3317d17193e9327d57b97e24d2fc.pdf,,https://doi.org/10.1016/j.sigpro.2011.04.020,http://nlab.ee.tokushima-u.ac.jp/nishio/Pub-Data/PAPER/P088.pdf?origin=publication_detail
+ab133af7ec2726f712dd049213e6a27449d28c78,,,,
+ab8fb278db4405f7db08fa59404d9dd22d38bc83,http://pdfs.semanticscholar.org/ab8f/b278db4405f7db08fa59404d9dd22d38bc83.pdf,,,http://vision.unige.ch/publications/postscript/2011/Soleymani_thesis_2011.pdf
+e52272f92fa553687f1ac068605f1de929efafc2,,,https://doi.org/10.1016/j.engappai.2017.06.003,
+e5737ffc4e74374b0c799b65afdbf0304ff344cb,http://pdfs.semanticscholar.org/e573/7ffc4e74374b0c799b65afdbf0304ff344cb.pdf,,https://doi.org/10.1016/j.patcog.2013.05.009,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/pr13eye.pdf
+e585dc6c810264d9f07e38c412379734a920714e,,,,http://doi.acm.org/10.1145/2531923.2531926
+e51f1ee5535017e10a5f77100ff892509ec6b221,,,https://doi.org/10.1109/ICSMC.2007.4413825,
+e50ee29ca12028cb903cd498bb9cacd41bd5ce3a,http://pdfs.semanticscholar.org/e50e/e29ca12028cb903cd498bb9cacd41bd5ce3a.pdf,,https://doi.org/10.1016/j.patcog.2014.07.013,http://www.researchgate.net/profile/Jaeik_Jo/publication/266563101_Single-view-based_3D_facial_reconstruction_method_robust_against_pose_variations/links/543c56be0cf20af5cfbf5c7c.pdf
+e510f2412999399149d8635a83eca89c338a99a1,http://pdfs.semanticscholar.org/e510/f2412999399149d8635a83eca89c338a99a1.pdf,,,http://www.sciencepubco.com/index.php/JACST/article/download/484/414
+e57108607d94aa158eb22ae50540ae6080e48d4b,,,,http://doi.ieeecomputersociety.org/10.1109/ICMI.2002.1167051
+e577484e5c3ecc6f073faf124468c8ae2f827a0f,,,,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,http://pdfs.semanticscholar.org/e598/13940c5c83b1ce63f3f451d03d34d2f68082.pdf,,https://doi.org/10.1155/2008/542918,http://ro.uow.edu.au/cgi/viewcontent.cgi?article=9458&context=infopapers
+e5b301ee349ba8e96ea6c71782295c4f06be6c31,http://pdfs.semanticscholar.org/e5b3/01ee349ba8e96ea6c71782295c4f06be6c31.pdf,,,http://research.microsoft.com/pubs/196173/paper44_hotos13_han_philipose.pdf
+e5e9e7cae71b13aabb30f6fe1f97cd153400be6c,,,,
+e5c687c8c84f1cdb9d9fbc9b6ff7518ff4d71056,,,https://doi.org/10.1109/TNN.2011.2170220,
+e51e94cc3c74adf0cccfac3a8035a10016ce8a3b,,,,
+e57ce6244ec696ff9aa42d6af7f09eed176153a8,,,https://doi.org/10.1109/ICIP.2015.7351449,
+e5342233141a1d3858ed99ccd8ca0fead519f58b,http://pdfs.semanticscholar.org/e534/2233141a1d3858ed99ccd8ca0fead519f58b.pdf,,,http://ijarcsee.org/index.php/IJARCSEE/article/download/324/290
+e50ec6b6d1c189edc127eb403c41a64f34fc0a6c,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890187
+e52be9a083e621d9ed29c8e9914451a6a327ff59,http://pdfs.semanticscholar.org/e52b/e9a083e621d9ed29c8e9914451a6a327ff59.pdf,,,https://pure.uva.nl/ws/files/955352/147507_salah10affective.pdf
+e51927b125640bfc47bbf1aa00c3c026748c75bd,,,,http://doi.acm.org/10.1145/2647868.2655015
+e55f7250f3b8ee722814f8809620a851c31e5b0e,,,https://doi.org/10.3182/20130902-3-CN-3020.00030,
+e5fbaeddbf98c667ec7c5575bda2158a36b55409,,,,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.25
+e57e1dce81e888eb07054923602e35bfb5ef3eb8,,,https://doi.org/10.1109/IROS.2012.6385544,
+e546572f8205570de4518bcf8d0345465e51d7a0,,,https://doi.org/10.1109/ICIP.2015.7351318,
+e5799fd239531644ad9270f49a3961d7540ce358,http://chenlab.ece.cornell.edu/people/ruogu/publications/ICIP13_Kinship.pdf,,https://doi.org/10.1109/ICIP.2013.6738614,http://users.cis.fiu.edu/~rfang/publications/ICIP13_Kinship.pdf
+e5eb7fa8c9a812d402facfe8e4672670541ed108,http://pdfs.semanticscholar.org/e5eb/7fa8c9a812d402facfe8e4672670541ed108.pdf,,https://doi.org/10.4304/jmm.6.5.404-415,http://ojs.academypublisher.com/index.php/jmm/article/download/jmm0605404415/3824
+e27b2cabdfdd6bf3ffb3ebce1b4c55adb1e80c8f,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.225
+e27c92255d7ccd1860b5fb71c5b1277c1648ed1e,http://pdfs.semanticscholar.org/e27c/92255d7ccd1860b5fb71c5b1277c1648ed1e.pdf,,https://doi.org/10.1016/j.patrec.2017.10.027,http://arxiv.org/abs/1710.10695
+e2f91b21f3755914c193a546ba8718acf81c845b,,,,
+e23bc755f7e161d524fcc33b7d927d67dd4a5e76,,,,
+e200c3f2849d56e08056484f3b6183aa43c0f13a,http://pdfs.semanticscholar.org/e200/c3f2849d56e08056484f3b6183aa43c0f13a.pdf,,https://doi.org/10.1016/j.patcog.2013.07.017,http://www.cnel.ufl.edu/files/1376330205.pdf
+e26a7e343fe109e2b52d1eeea5b02dae836f3502,,,https://doi.org/10.1109/ACCESS.2017.2676238,
+e2b3aae594035e58f72125e313e92c7c4cc9d5bb,,,https://doi.org/10.1007/s00138-014-0597-2,
+e2d265f606cd25f1fd72e5ee8b8f4c5127b764df,http://pdfs.semanticscholar.org/e2d2/65f606cd25f1fd72e5ee8b8f4c5127b764df.pdf,,,https://arxiv.org/pdf/1802.08362v1.pdf
+e2f78d2f75a807b89a13115a206da4661361fa71,,,https://doi.org/10.1109/TMM.2017.2696825,
+f45d6a7bdb6741242da6192d18c97ac39e6308db,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2008%5D%5Bfg%5DPerson-Specific%20Face%20Recognition%20in%20Unconstrained%20Environments%20a%20Combination%20of%20Offline%20and%20Online%20Learning.pdf,,https://doi.org/10.1109/AFGR.2008.4813353,http://media.cs.tsinghua.edu.cn/~ahz/papers/%5B2008%5D%5Bfg%5DPerson-Specific%20Face%20Recognition%20in%20Unconstrained%20Environments%20a%20Combination%20of%20Offline%20and%20Online%20Learning.pdf
+f437b3884a9e5fab66740ca2a6f1f3a5724385ea,http://pdfs.semanticscholar.org/f437/b3884a9e5fab66740ca2a6f1f3a5724385ea.pdf,,https://doi.org/10.1109/ICIP.2002.1037956,http://anadolu.sdsu.edu/abut/biometrics/darpa.pdf
+f41d7f891a1fc4569fe2df66e67f277a1adef229,,,https://doi.org/10.1109/ICIP.2015.7351552,
+f4411787688ca40466ee059ec64bf56d746733c1,,,https://doi.org/10.1007/s12652-012-0107-1,
+f402e088dddfaad7667bd4def26092d05f247206,,,https://doi.org/10.1109/TITS.2015.2475721,
+f43eeb578e0ca48abfd43397bbd15825f94302e4,http://pdfs.semanticscholar.org/f43e/eb578e0ca48abfd43397bbd15825f94302e4.pdf,,,http://www.med.upenn.edu/uep/user_documents/dfd5.pdf
+f4465454811acb2021a46d84d94fc88e2dda00a6,,,https://doi.org/10.1007/s11042-007-0184-x,
+f4f9697f2519f1fe725ee7e3788119ed217dca34,http://pdfs.semanticscholar.org/f4f9/697f2519f1fe725ee7e3788119ed217dca34.pdf,,,https://www.cc.gatech.edu/~irfan/p/2017-Deeb-Swihart-SELLCSCI.pdf
+f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0,http://pdfs.semanticscholar.org/f4c0/1fc79c7ead67899f6fe7b79dd1ad249f71b0.pdf,,https://doi.org/10.1016/j.cviu.2010.12.006,http://www.researchgate.net/profile/Shervin_Arashloo/publication/220135084_Pose-invariant_face_recognition_by_matching_on_multi-resolution_MRFs_linked_by_supercoupling_transform/links/00b4953bd3b1f3b991000000.pdf
+f4373f5631329f77d85182ec2df6730cbd4686a9,http://pdfs.semanticscholar.org/f437/3f5631329f77d85182ec2df6730cbd4686a9.pdf,,,https://arxiv.org/pdf/1712.01661v1.pdf
+f4210309f29d4bbfea9642ecadfb6cf9581ccec7,http://pdfs.semanticscholar.org/f421/0309f29d4bbfea9642ecadfb6cf9581ccec7.pdf,,,http://mediatum.ub.tum.de/doc/1238173/130433.pdf
+f41e80f941a45b5880f4c88e5bf721872db3400f,,,,http://doi.ieeecomputersociety.org/10.1109/IC3.2017.8284359
+f4c32b8bcf753033835c14a66e9c04b06bf086a3,,,,
+f4fc77660665ae58993065c6a336367e9a6c85f7,,,https://doi.org/10.1016/j.patcog.2012.12.009,
+f47404424270f6a20ba1ba8c2211adfba032f405,http://pdfs.semanticscholar.org/f474/04424270f6a20ba1ba8c2211adfba032f405.pdf,,,http://www.ijetae.com/files/Volume2Issue5/IJETAE_0512_41.pdf
+f4ebbeb77249d1136c355f5bae30f02961b9a359,http://pdfs.semanticscholar.org/f4eb/beb77249d1136c355f5bae30f02961b9a359.pdf,,,http://www.cs.cmu.edu/afs/cs.cmu.edu/usr/mitchell/ftp/pubs/fgvc.pdf
+f4aed1314b2d38fd8f1b9d2bc154295bbd45f523,http://pdfs.semanticscholar.org/f4ae/d1314b2d38fd8f1b9d2bc154295bbd45f523.pdf,,,https://arxiv.org/pdf/1709.04744v1.pdf
+f4003cbbff3b3d008aa64c76fed163c10d9c68bd,,,https://doi.org/10.1016/j.neucom.2016.08.055,
+f449c85b8ba5fa67ead341c7ad4ec396f4ab2dd6,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2015.2448547
+f423d8be5e13d9ef979debd3baf0a1b2e1d3682f,,,https://doi.org/10.1016/j.imavis.2015.11.004,
+f486624efa750d718a670fba3c7f21b1c84ebaeb,,,https://doi.org/10.1109/TCYB.2016.2581861,
+f49aebe58d30241f12c1d7d9f4e04b6e524d7a45,,,https://doi.org/10.1109/ICB.2016.7550074,
+f3fcaae2ea3e998395a1443c87544f203890ae15,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553791.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553791
+f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe,http://pdfs.semanticscholar.org/f301/5be0f9dbc1a55b6f3dc388d97bb566ff94fe.pdf,,https://doi.org/10.1007/978-3-642-35136-5_5,http://www.researchgate.net/profile/Xiaohua_Xie/publication/256453652_A_Study_on_the_Effective_Approach_to_Illumination-Invariant_Face_Recognition_Based_on_a_Single_Image/links/00b49522ae66fb26e3000000.pdf
+f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7,http://pdfs.semanticscholar.org/f3d9/e347eadcf0d21cb0e92710bc906b22f2b3e7.pdf,,,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2016/09.13.14.02/doc/nose_pose_camera_ready.pdf?choice=briefTitleAuthorMisc&languagebutton=en&metadatarepository=sid.inpe.br/sibgrapi/2016/09.13.14.02.37&requiredmirror=sid.inpe.br/banon/2001/03.30.15.38.24&searchmirror=sid.inpe.br/banon/2001/03.30.15.38.24&searchsite=sibgrapi.sid.inpe.br:80
+f3f77b803b375f0c63971b59d0906cb700ea24ed,http://pdfs.semanticscholar.org/f3f7/7b803b375f0c63971b59d0906cb700ea24ed.pdf,,,http://www.aece.ro/archive/2009/3/2009_3_12.pdf
+f355e54ca94a2d8bbc598e06e414a876eb62ef99,http://pdfs.semanticscholar.org/f355/e54ca94a2d8bbc598e06e414a876eb62ef99.pdf,,https://doi.org/10.1016/j.imavis.2016.09.001,https://arxiv.org/pdf/1409.5114v2.pdf
+f345a05353f5784b64eefb7785661cc0be519521,,,,
+f3e005e567f16fa55c54b4c1b17f4538d799c7de,,,,
+f3b84a03985de3890b400b68e2a92c0a00afd9d0,,,,
+f35a493afa78a671b9d2392c69642dcc3dd2cdc2,http://pdfs.semanticscholar.org/f35a/493afa78a671b9d2392c69642dcc3dd2cdc2.pdf,,https://doi.org/10.1007/978-3-319-46493-0_16,https://arxiv.org/pdf/1607.07262v1.pdf
+ebedc841a2c1b3a9ab7357de833101648281ff0e,http://pdfs.semanticscholar.org/ebed/c841a2c1b3a9ab7357de833101648281ff0e.pdf,,https://doi.org/10.1016/j.imavis.2015.01.004,http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885615000116-main.pdf
+eb3c45e78acee0824c8f7d997c6104d74e7213a8,,,,http://doi.ieeecomputersociety.org/10.1109/iThings/CPSCom.2011.116
+eb38f20eaa1b849cabec99815883390f84daf279,,,https://doi.org/10.1016/j.patcog.2008.11.026,
+eb526174fa071345ff7b1fad1fad240cd943a6d7,http://pdfs.semanticscholar.org/eb52/6174fa071345ff7b1fad1fad240cd943a6d7.pdf,,https://doi.org/10.1049/iet-bmt.2017.0079,http://publications.idiap.ch/downloads/papers/2017/Mohammadi_IETBIOMETRICS_2017.pdf
+eb9867f5efc98d3203ce1037f9a8814b0d15d0aa,,,https://doi.org/10.1109/ICIP.2014.7026008,
+eb6ee56e085ebf473da990d032a4249437a3e462,http://www-scf.usc.edu/~chuntinh/doc/Age_Gender_Classification_APSIPA_2017.pdf,,https://doi.org/10.1109/APSIPA.2017.8282221,
+ebb1c29145d31c4afa3c9be7f023155832776cd3,http://pdfs.semanticscholar.org/ebb1/c29145d31c4afa3c9be7f023155832776cd3.pdf,,,http://cg.cs.tsinghua.edu.cn/people/~Yongjin/PLOS_ONE_2014.pdf
+eb309b11fd2b8d28cbaf7a72a49df14630ed696a,,,,
+eb9312458f84a366e98bd0a2265747aaed40b1a6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0400473.pdf,,https://doi.org/10.1109/ICIP.2007.4380057,
+eb716dd3dbd0f04e6d89f1703b9975cad62ffb09,http://pdfs.semanticscholar.org/eb71/6dd3dbd0f04e6d89f1703b9975cad62ffb09.pdf,,,http://www.cs.utexas.edu/~grauman/research/theses/PHD-YongJaeLee.pdf
+eb02daee558e483427ebcf5d1f142f6443a6de6b,,,,http://doi.acm.org/10.1145/2911996.2912019
+ebc2a3e8a510c625353637e8e8f07bd34410228f,,,https://doi.org/10.1109/TIP.2015.2502485,
+ebabd1f7bc0274fec88a3dabaf115d3e226f198f,http://pdfs.semanticscholar.org/ebab/d1f7bc0274fec88a3dabaf115d3e226f198f.pdf,,https://doi.org/10.1007/978-3-319-54526-4_12,http://slsp.kaist.ac.kr/paperdata/Driver%20drowsiness%20detection.pdf
+eb5c1e526fe2d17778c68f60c874c3da0129fabd,,,https://doi.org/10.1109/VCIP.2015.7457856,
+ebce3f5c1801511de9e2e14465482260ba5933cc,,,,http://doi.acm.org/10.1145/3126594.3126640
+ebb9d53668205c5797045ba130df18842e3eadef,http://pdfs.semanticscholar.org/ebb9/d53668205c5797045ba130df18842e3eadef.pdf,,,https://arxiv.org/pdf/1710.08518v1.pdf
+eb240521d008d582af37f0497f12c51f4bab16c8,,,https://doi.org/10.1023/A:1012365806338,
+ebb3d5c70bedf2287f9b26ac0031004f8f617b97,,,https://doi.org/10.1109/MSP.2017.2764116,
+ebeb0546efeab2be404c41a94f586c9107952bc3,,,,http://doi.acm.org/10.1145/2733373.2806290
+eb86c6642040944abc997848a32e631d1f25a2f5,,,,
+eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a,http://pdfs.semanticscholar.org/eb7b/387a3a006609b89ca5ed0e6b3a1d5ecb5e5a.pdf,,,
+ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c,http://pdfs.semanticscholar.org/f727/b58b84ccd8e7ed51a90ccc913d704b451191.pdf,,,https://arxiv.org/pdf/1403.6888v2.pdf
+eb9bcf9e3f8856c92e7720b63b7e846df37de0c3,,,,
+ebfdb4842c69177b65022f00d3d038d645f3260b,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.154
+eb48170a6e1e020f002a6a0a808c1934d5c760b8,,,,
+ebf204e0a3e137b6c24e271b0d55fa49a6c52b41,http://pdfs.semanticscholar.org/ebf2/04e0a3e137b6c24e271b0d55fa49a6c52b41.pdf,,,http://liu.diva-portal.org/smash/get/diva2:1071737/FULLTEXT01.pdf
+eb87151fd2796ff5b4bbcf1906d41d53ac6c5595,,,https://doi.org/10.1109/ICPR.2016.7899719,
+c71f36c9376d444075de15b1102b4974481be84d,http://pdfs.semanticscholar.org/c71f/36c9376d444075de15b1102b4974481be84d.pdf,,,http://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.538649
+c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e,http://pdfs.semanticscholar.org/c7e4/c7be0d37013de07b6d829a3bf73e1b95ad4e.pdf,,,http://airccse.org/journal/jma/5513ijma05.pdf
+c7cd490e43ee4ff81e8f86f790063695369c2830,,,https://doi.org/10.1109/VCIP.2016.7805472,
+c7b58827b2d07ece676271ae0425e369e3bd2310,,,https://doi.org/10.1142/S0218001415560042,
+c74aba9a096379b3dbe1ff95e7af5db45c0fd680,http://pdfs.semanticscholar.org/c74a/ba9a096379b3dbe1ff95e7af5db45c0fd680.pdf,,,http://www3.cis.fiu.edu/conferences/mipr09/Uploaded_Paper/39_Khademi_Neuro-fuzzy%20analysis%20of%20FAUs2.pdf
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,http://pdfs.semanticscholar.org/c7c5/f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c.pdf,,,http://aisel.aisnet.org/pacis2014/325
+c7685fdbee2d96ef056a89ab4fa43df5aeae7ba7,http://staff.science.uva.nl/~nicu/publications/SMC04.pdf,,https://doi.org/10.1109/ICSMC.2004.1398369,
+c7f752eea91bf5495a4f6e6a67f14800ec246d08,http://pdfs.semanticscholar.org/c7f7/52eea91bf5495a4f6e6a67f14800ec246d08.pdf,,,http://studentnet.cs.manchester.ac.uk/resources/library/thesis_abstracts/MSc15/FullText/Rodrigues-CrefedaFaviola-diss.pdf
+c758b9c82b603904ba8806e6193c5fefa57e9613,http://pdfs.semanticscholar.org/c758/b9c82b603904ba8806e6193c5fefa57e9613.pdf,,https://doi.org/10.1007/978-3-319-49409-8_40,http://adas.cvc.uab.es/task-cv2016/papers/0014.pdf
+c73199c180e5c01a5d53c19b8e079b0f6d07d618,,,,
+c7c03324833ba262eeaada0349afa1b5990c1ea7,http://pdfs.semanticscholar.org/c7c0/3324833ba262eeaada0349afa1b5990c1ea7.pdf,,https://doi.org/10.1007/978-3-319-16634-6_31,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop14/pdffiles/w14-p7.pdf
+c78fdd080df01fff400a32fb4cc932621926021f,http://pdfs.semanticscholar.org/c78f/dd080df01fff400a32fb4cc932621926021f.pdf,,,http://www.jsoftware.us/index.php?a=show&c=index&catid=86&id=1261&m=content
+c74b1643a108939c6ba42ae4de55cb05b2191be5,http://pdfs.semanticscholar.org/c74b/1643a108939c6ba42ae4de55cb05b2191be5.pdf,,,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/WangZK-ICARN-2008.pdf
+c0723e0e154a33faa6ff959d084aebf07770ffaf,http://pdfs.semanticscholar.org/c072/3e0e154a33faa6ff959d084aebf07770ffaf.pdf,,https://doi.org/10.1007/978-3-540-76390-1_76,http://www.murase.nuie.nagoya-u.ac.jp/seikaweb/paper/2007/E07-conference-ttakahashi-2.pdf
+c03f48e211ac81c3867c0e787bea3192fcfe323e,http://pdfs.semanticscholar.org/c03f/48e211ac81c3867c0e787bea3192fcfe323e.pdf,,https://doi.org/10.21437/Interspeech.2016-1071,http://www.isca-speech.org/archive/Interspeech_2016/pdfs/1071.PDF
+c043f8924717a3023a869777d4c9bee33e607fb5,http://pdfs.semanticscholar.org/c043/f8924717a3023a869777d4c9bee33e607fb5.pdf,,,ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/28/13/PLoS_One_2010_Mar_22_5(3)_e9790.tar.gz
+c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,http://pdfs.semanticscholar.org/c03e/01717b2d93f04cce9b5fd2dcfd1143bcc180.pdf,,https://doi.org/10.1007/978-3-642-37331-2_48,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_ACCV_xwzhao_Locality-constrained%20Active%20Appearance%20Model.pdf
+c051ea35a0d490c00e2b3b0a42eb6b7682d8e947,,,,
+c0270a57ad78da6c3982a4034ffa195b9e932fda,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.131
+c0f9fae059745e50658d9605bd8875fc3a2d0b4b,,,,http://doi.ieeecomputersociety.org/10.1109/BIGCOMP.2014.6741422
+c0ff7dc0d575658bf402719c12b676a34271dfcd,http://pdfs.semanticscholar.org/c0ff/7dc0d575658bf402719c12b676a34271dfcd.pdf,,https://doi.org/10.1007/978-3-540-74260-9_36,http://www.cs.toronto.edu/~aliyari/papers/ICIAR.pdf
+c02847a04a99a5a6e784ab580907278ee3c12653,http://pdfs.semanticscholar.org/c028/47a04a99a5a6e784ab580907278ee3c12653.pdf,,,http://web.engr.oregonstate.edu/~sinisa/students/Theses/MS_ChenyuWang.pdf
+c035c193eed5d72c7f187f0bc880a17d217dada0,http://pdfs.semanticscholar.org/c035/c193eed5d72c7f187f0bc880a17d217dada0.pdf,,,http://www.cse.msu.edu/~rossarun/pubs/ChenLGGP_SPIE2013.pdf
+c0d1d9a585ef961f1c8e6a1e922822811181615c,http://pdfs.semanticscholar.org/c0d1/d9a585ef961f1c8e6a1e922822811181615c.pdf,,,https://www.sciencedirect.com/science/article/pii/S0925492717300835
+c0945953506a3d531331caf6c2b2a6d027e319f0,,,https://doi.org/10.1002/cav.49,
+c06b13d0ec3f5c43e2782cd22542588e233733c3,,,https://doi.org/10.1016/j.cviu.2016.02.001,
+c0a8c0e6ccf9882969ba0eda0b898affa015437b,http://stanford.edu/~verroios/papers/waldo.pdf,,,http://ilpubs.stanford.edu:8090/1137/1/ERMultiItemTechRep.pdf
+c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5,http://pdfs.semanticscholar.org/c0cd/aeccff78f49f4604a6d263dc6eb1bb8707d5.pdf,,,http://worldcomp-proceedings.com/proc/p2016/IPC6061.pdf
+c0b02be66a5a1907e8cfb8117de50f80b90a65a8,,,,http://doi.acm.org/10.1145/2808492.2808523
+c00f402b9cfc3f8dd2c74d6b3552acbd1f358301,http://pdfs.semanticscholar.org/c00f/402b9cfc3f8dd2c74d6b3552acbd1f358301.pdf,,,http://arxiv.org/pdf/1608.00207v1.pdf
+c089c7d8d1413b54f59fc410d88e215902e51638,http://nlpr-web.ia.ac.cn/2011papers/gjhy/gh122.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995681
+c04843867ebbba4c3cac4febf9c500ba28ae66fc,,,,
+c068263bb09968fe69c053906279b16532b778f4,http://www.researchgate.net/profile/Mahdi_Bejani/publication/257435889_Audiovisual_emotion_recognition_using_ANOVA_feature_selection_method_and_multi-classifier_neural_networks/links/0c960529aee6234edd000000.pdf,,https://doi.org/10.1007/s00521-012-1228-3,https://www.researchgate.net/profile/Mahdi_Bejani/publication/257435889_Audiovisual_emotion_recognition_using_ANOVA_feature_selection_method_and_multi-classifier_neural_networks/links/0c960529aee6234edd000000.pdf
+c0ee89dc2dad76147780f96294de9e421348c1f4,http://pdfs.semanticscholar.org/c0ee/89dc2dad76147780f96294de9e421348c1f4.pdf,,,https://peerj.com/articles/1502.pdf
+c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774,http://pdfs.semanticscholar.org/c0ca/6b992cbe46ea3003f4e9b48f4ef57e5fb774.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2036%20(Supplementary).pdf
+c007ee91452b6c99c351b149cb8673f945bf0dd4,,,,
+c0d5c3aab87d6e8dd3241db1d931470c15b9e39d,http://pdfs.semanticscholar.org/facb/edfe90956c720f70aab14767b5e25dcc6478.pdf,,https://doi.org/10.1016/j.cviu.2016.03.013,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01301.pdf
+c05441dd1bc418fb912a6fafa84c0659a6850bf0,http://pdfs.semanticscholar.org/c054/41dd1bc418fb912a6fafa84c0659a6850bf0.pdf,,https://doi.org/10.1049/iet-cvi.2014.0200,http://digital.cs.usu.edu/~xqi/Promotion/IETCV.FR.14.pdf
+ee46e391288dd3bc3e71cb47715a83dacb9d2907,,,,
+ee72673c0394d0fff2b3d8372d8a9401867b8e13,,,,
+ee897a827bfc03e4682fb77018c27ec29a063d2c,,,,
+eee8a37a12506ff5df72c402ccc3d59216321346,http://pdfs.semanticscholar.org/eee8/a37a12506ff5df72c402ccc3d59216321346.pdf,,,http://nl.ijs.si/isjt08/IS-LTC08-Proceedings.pdf
+eefecac463ebfc0694b9831e842b574f3954fed6,,,,http://doi.ieeecomputersociety.org/10.1109/SNPD.2013.15
+eedb2c34c36017b9c5aa6ce8bff2ab152e713cee,,,https://doi.org/10.1007/s00521-008-0225-z,
+ee6e4324123b99d94a7a23d9bddf026f39903693,,,https://doi.org/10.1109/ISMICT.2013.6521709,
+ee5fe44871f5e36998a2fdfb20a511374cdd3877,,,,
+ee03ed3a8a9a8b6bf35dda832c34160e62893f92,,,,
+eef432868e85b95a7d9d9c7b8c461637052318ca,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.236
+eef0be751e9aca7776d83f25c8ffdc1a18201fd8,,,https://doi.org/10.1016/j.patcog.2016.10.015,
+ee6b503ab512a293e3088fdd7a1c893a77902acb,http://pdfs.semanticscholar.org/ee6b/503ab512a293e3088fdd7a1c893a77902acb.pdf,,,http://ijcai.org/papers13/Papers/IJCAI13-407.pdf
+ee2217f9d22d6a18aaf97f05768035c38305d1fa,,,https://doi.org/10.1109/APSIPA.2015.7415501,
+ee18e29a2b998eddb7f6663bb07891bfc7262248,http://or.nsfc.gov.cn/bitstream/00001903-5/13750/1/1000007562815.pdf,,https://doi.org/10.1109/TNN.2011.2152852,https://www.researchgate.net/profile/Zizhu_Fan/publication/224242477_Local_Linear_Discriminant_Analysis_Framework_Using_Sample_Neighbors/links/0fcfd50a0e920c6043000000.pdf
+eef725f4130ee326954e84e5f4ddf487da63c94e,,,,
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,http://pdfs.semanticscholar.org/eeb6/d084f9906c53ec8da8c34583105ab5ab8284.pdf,,,http://cdn.intechweb.org/pdfs/6067.pdf
+eefe8bd6384f565d2e42881f1f9a468d1672989d,,,,
+ee7093e91466b81d13f4d6933bcee48e4ee63a16,http://pdfs.semanticscholar.org/ee70/93e91466b81d13f4d6933bcee48e4ee63a16.pdf,,https://doi.org/10.1007/978-3-319-16634-6_44,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop16/pdffiles/w16-p10.pdf
+ee461d060da58d6053d2f4988b54eff8655ecede,http://pdfs.semanticscholar.org/ee46/1d060da58d6053d2f4988b54eff8655ecede.pdf,,https://doi.org/10.1016/S0031-3203(98)00066-1,http://www.dcs.qmw.ac.uk/~sgg/papers/pr98.ps.gz
+eed05da2c0ab7d2b0a3c665a5368efa81b185099,,,https://doi.org/10.1016/j.neucom.2014.05.020,
+eefb8768f60c17d76fe156b55b8a00555eb40f4d,http://pdfs.semanticscholar.org/eefb/8768f60c17d76fe156b55b8a00555eb40f4d.pdf,,,http://www.cameronmusco.com/personal_site/pdfs/subspace_scores.pdf
+eeaeca3a601d65d2d978bf3da43ab42fa5e08ed2,,,https://doi.org/10.1109/FSKD.2016.7603398,
+eefdb69ac2c461e7791603d0f8c02ff3c8600adc,,,https://doi.org/10.1016/j.jvcir.2017.02.007,
+ee65cee5151928c63d3ef36fcbb582fabb2b6d2c,,,https://doi.org/10.1109/LSP.2016.2602538,
+eed1dd2a5959647896e73d129272cb7c3a2e145c,http://s3.amazonaws.com/kvaccaro.com/documents/UIST16.pdf,,,http://doi.acm.org/10.1145/2984511.2984573
+ee92d36d72075048a7c8b2af5cc1720c7bace6dd,http://pdfs.semanticscholar.org/ee92/d36d72075048a7c8b2af5cc1720c7bace6dd.pdf,,https://doi.org/10.1109/ICIP.2002.1039897,http://amp.ece.cmu.edu/Publication/Deepak/icip2002_deepak.pdf
+ee418372b0038bd3b8ae82bd1518d5c01a33a7ec,http://pdfs.semanticscholar.org/ee41/8372b0038bd3b8ae82bd1518d5c01a33a7ec.pdf,,,http://cseweb.ucsd.edu/~jmcauley/cse255/reports/wi15/Yuen_Kevan.pdf
+ee744ea13a0bbeba5de85ca3c75c9749054835e7,,,,
+ee6f9a0f6eb5b615a36acc1444f4df1359cc2a63,,,,
+c94b3a05f6f41d015d524169972ae8fd52871b67,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Yan_The_Fastest_Deformable_2014_CVPR_paper.pdf,,,http://www.cbsr.ia.ac.cn/users/jjyan/Fastest_DPM.pdf
+c9424d64b12a4abe0af201e7b641409e182babab,http://pdfs.semanticscholar.org/c942/4d64b12a4abe0af201e7b641409e182babab.pdf,,https://doi.org/10.3390/a9040088,http://www.mdpi.com/1999-4893/9/4/88/pdf
+c91103e6612fa7e664ccbc3ed1b0b5deac865b02,http://pdfs.semanticscholar.org/c911/03e6612fa7e664ccbc3ed1b0b5deac865b02.pdf,,https://doi.org/10.1007/978-3-642-24085-0_60,http://www.researchgate.net/profile/Roberto_DAmbrosio/publication/221356704_Automatic_Facial_Expression_Recognition_Using_Statistical-Like_Moments/links/0912f50ebffed591b4000000.pdf
+c903af0d69edacf8d1bff3bfd85b9470f6c4c243,http://pdfs.semanticscholar.org/c903/af0d69edacf8d1bff3bfd85b9470f6c4c243.pdf,,https://doi.org/10.1016/j.patcog.2016.03.018,http://www.cs.tut.fi/~iosifidi/files/journal/2016_PR_NystromNPT.pdf?dl=0
+c95cd36779fcbe45e3831ffcd3314e19c85defc5,https://arxiv.org/pdf/1703.04853v1.pdf,,https://doi.org/10.1109/ICIP.2017.8296448,http://arxiv.org/abs/1703.04853
+c98def5f9d0c6ae519fe0aeebe5378f65b14e496,,,https://doi.org/10.1117/12.2064730,
+c92e36689ef561df726a7ae861d9c166c3934908,,,https://doi.org/10.1109/ICPR.2016.7900140,
+c9e955cb9709f16faeb0c840f4dae92eb875450a,http://pdfs.semanticscholar.org/c9e9/55cb9709f16faeb0c840f4dae92eb875450a.pdf,,https://doi.org/10.1007/11552499_38,http://www.peihuali.org/36870334.pdf
+c907104680ad53bdc673f2648d713e4d26335825,,,,http://doi.acm.org/10.1145/3077286.3077304
+c9c2de3628be7e249722b12911bebad84b567ce6,,,https://doi.org/10.1016/j.patcog.2017.06.028,
+c9be1001706bcdd8b35fa9cae733c592e90c7ec3,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.54
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,http://pdfs.semanticscholar.org/c92b/b26238f6e30196b0c4a737d8847e61cfb7d4.pdf,,,https://arxiv.org/pdf/1803.01555v1.pdf
+c9527df51e63b56c61cbf16f83d1a3c5c2c82499,,,,http://doi.acm.org/10.1145/2072298.2072311
+c980443ca996402de4b5e5424f872acda0368831,http://homepage.tudelft.nl/19j49/Publications_files/Final_CVPR10.pdf,,https://doi.org/10.1109/CVPRW.2010.5543270,http://homepage.tudelft.nl/19j49/Software_files/Final_CVPR10.pdf
+c9832564d5dc601113b4d80e5a05ede6fee9f7dd,,,https://doi.org/10.1109/ISBA.2017.7947687,
+c9f588d295437009994ddaabb64fd4e4c499b294,http://pdfs.semanticscholar.org/c9f5/88d295437009994ddaabb64fd4e4c499b294.pdf,,,http://www.aaai.org/ocs/index.php/WS/AAAIW13/paper/download/7043/6741
+c92da368a6a886211dc759fe7b1b777a64d8b682,http://pdfs.semanticscholar.org/c92d/a368a6a886211dc759fe7b1b777a64d8b682.pdf,,,http://www.ijsat.com/admin/download/11-01-02-017.pdf
+c90427085909029afd2af01d1967e80b78e01b88,,,https://doi.org/10.1109/ACCESS.2017.2753830,
+fc1e37fb16006b62848def92a51434fc74a2431a,http://pdfs.semanticscholar.org/fc1e/37fb16006b62848def92a51434fc74a2431a.pdf,,,https://arxiv.org/pdf/1803.08450v1.pdf
+fcd3d557863e71dd5ce8bcf918adbe22ec59e62f,,,,http://doi.acm.org/10.1145/2502081.2502148
+fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192,http://pdfs.semanticscholar.org/fc5b/db98ff97581d7c1e5eb2d24d3f10714aa192.pdf,,,http://arxiv.org/abs/1503.07274
+fc20149dfdff5fdf020647b57e8a09c06e11434b,http://pdfs.semanticscholar.org/fc20/149dfdff5fdf020647b57e8a09c06e11434b.pdf,,,http://dl.acm.org/citation.cfm?id=1314538
+fc516a492cf09aaf1d319c8ff112c77cfb55a0e5,http://pdfs.semanticscholar.org/fc51/6a492cf09aaf1d319c8ff112c77cfb55a0e5.pdf,,,http://ceur-ws.org/Vol-1957/CoSeCiVi17_paper_2.pdf
+fcbec158e6a4ace3d4311b26195482b8388f0ee9,http://pdfs.semanticscholar.org/fcbe/c158e6a4ace3d4311b26195482b8388f0ee9.pdf,,,http://www.cfar.umd.edu/~shaohua/papers/zhou05hivp.pdf
+fcd945eb1cf5f87eefa444660dbdf94f5bb0092e,,,,
+fcd3d69b418d56ae6800a421c8b89ef363418665,http://pdfs.semanticscholar.org/fcd3/d69b418d56ae6800a421c8b89ef363418665.pdf,,,http://bilgin.esme.org/Portals/0/PhD/EffectsOfAgingOverFacialFeatureAnalysisAndFaceRecognition.pdf
+fcd77f3ca6b40aad6edbd1dab9681d201f85f365,http://pdfs.semanticscholar.org/fcd7/7f3ca6b40aad6edbd1dab9681d201f85f365.pdf,,,https://homes.cs.washington.edu/~yoshi/papers/theses/miro-enev-dissertation.pdf
+fc00d634797c5378ca9a441c2d4ce88761d3c7eb,,,,
+fc798314994bf94d1cde8d615ba4d5e61b6268b6,http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf,,,http://www.cse.msu.edu/biometrics/Publications/Thesis/UnsangFaceRec_PhD09.pdf
+fc45e44dd50915957e498186618f7a499953c6be,http://www.pami.sjtu.edu.cn/people/wangxh/Gabor%20Filter/Quaternion%20Correlation%20Filters%20for%20Face%20Recognition%20in%20Wavelet%20Domain.pdf,,https://doi.org/10.1109/ICASSP.2005.1415347,http://www.perso.telecom-paristech.fr/~chollet/Biblio/Articles/Domaines/BIOMET/Face/Kumar/Quaternion01415347.pdf
+fc23a386c2189f221b25dbd0bb34fcd26ccf60fa,http://pdfs.semanticscholar.org/fc23/a386c2189f221b25dbd0bb34fcd26ccf60fa.pdf,,https://doi.org/10.1007/978-3-642-15555-0_12,http://www.cs.sfu.ca/~mori/research/papers/wang_eccv10.pdf
+fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f,http://pdfs.semanticscholar.org/fc68/c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f.pdf,,https://doi.org/10.1007/978-3-319-18422-7_52,http://kio.eti.pg.gda.pl/publications/files/BDAS2015_preprint.pdf
+fc8990088e0f1f017540900bc3f5a4996192ff05,,,https://doi.org/10.1109/ICIP.2017.8296314,
+fcb97ede372c5bddde7a61924ac2fd29788c82ce,,,https://doi.org/10.1109/TSMCC.2012.2192727,
+fc2bad3544c7c8dc7cd182f54888baf99ed75e53,http://pdfs.semanticscholar.org/fc2b/ad3544c7c8dc7cd182f54888baf99ed75e53.pdf,,https://doi.org/10.1007/978-3-642-40602-7_32,http://lrs.icg.tugraz.at/pubs/koestinger_dagm_13.pdf
+fc5538e60952f86fff22571c334a403619c742c3,,http://ieeexplore.ieee.org/document/6460202/,,
+fc970d7694b1d2438dd101a146d2e4f29087963e,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.86
+fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46,http://pdfs.semanticscholar.org/fcf8/bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46.pdf,,,http://arxiv.org/abs/1102.2748
+fcbf808bdf140442cddf0710defb2766c2d25c30,http://pdfs.semanticscholar.org/fcbf/808bdf140442cddf0710defb2766c2d25c30.pdf,,,https://arxiv.org/pdf/1605.03324v1.pdf
+fcb276874cd932c8f6204f767157420500c64bd0,,,https://doi.org/10.1007/978-3-319-04960-1_3,
+fd4ac1da699885f71970588f84316589b7d8317b,http://pdfs.semanticscholar.org/fd4a/c1da699885f71970588f84316589b7d8317b.pdf,,,http://arxiv.org/abs/1405.0601
+fdf533eeb1306ba418b09210387833bdf27bb756,http://pdfs.semanticscholar.org/fdf5/33eeb1306ba418b09210387833bdf27bb756.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2012_Romera-ParedesABP12.pdf
+fdfaf46910012c7cdf72bba12e802a318b5bef5a,http://pdfs.semanticscholar.org/fdfa/f46910012c7cdf72bba12e802a318b5bef5a.pdf,,,http://www.ee.ucr.edu/~amitrc/publications/spm2015.pdf
+fdd19fee07f2404952e629cc7f7ffaac14febe01,,,https://doi.org/10.1109/CISP-BMEI.2016.7852754,
+fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f,http://pdfs.semanticscholar.org/fd9f/eb21b3d1fab470ff82e3f03efce6a0e67a1f.pdf,,,http://essay.utwente.nl/71570/1/Hillerstrom_MA_SCS.pdf
+fdca08416bdadda91ae977db7d503e8610dd744f,http://pdfs.semanticscholar.org/fdca/08416bdadda91ae977db7d503e8610dd744f.pdf,,,http://cms.ieis.tue.nl/ksera/documents/KSERA_D3.1.pdf
+fdbc602a749ef070a7ac11c78dc8d468c0b60154,,,https://doi.org/10.1049/iet-ipr.2015.0519,
+fdd80b2139ff1b9becb17badd053b9a4a6a243f2,,,,
+fd126e36337999640a0b623611b5fec8de390d46,,,,
+fd96432675911a702b8a4ce857b7c8619498bf9f,http://pdfs.semanticscholar.org/fd96/432675911a702b8a4ce857b7c8619498bf9f.pdf,,,https://arxiv.org/pdf/1707.09364v1.pdf
+fd7b6c77b46420c27725757553fcd1fb24ea29a8,http://pdfs.semanticscholar.org/fd7b/6c77b46420c27725757553fcd1fb24ea29a8.pdf,,,http://www.cs.dartmouth.edu/reports/TR2013-726.pdf
+fdb33141005ca1b208a725796732ab10a9c37d75,http://pdfs.semanticscholar.org/fdb3/3141005ca1b208a725796732ab10a9c37d75.pdf,,https://doi.org/10.1515/amcs-2016-0032,https://rua.ua.es/dspace/bitstream/10045/56228/1/2016_Pujol_etal_AMCS.pdf
+fd60166c2619c0db5e5159a3dfe9068aa4f1b32f,,,,
+fddca9e7d892a97073ada88eec39e03e44b8c46a,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.305
+fde0180735699ea31f6c001c71eae507848b190f,http://pdfs.semanticscholar.org/fde0/180735699ea31f6c001c71eae507848b190f.pdf,,,http://research.ijcaonline.org/volume76/number3/pxc3890645.pdf
+fd649233d62bf43d589818fbb41295e2d0669aeb,,,,
+fd615118fb290a8e3883e1f75390de8a6c68bfde,http://pdfs.semanticscholar.org/fd61/5118fb290a8e3883e1f75390de8a6c68bfde.pdf,,https://doi.org/10.1007/978-3-642-33712-3_4,http://pages.cs.wisc.edu/~lizhang/projects/joint-align/SmithECCV12.pdf
+fd38163654a0551ed7f4e442851508106e6105d9,,,https://doi.org/10.1109/ICNSC.2008.4525311,
+f28d549feffd414f38147d5e0460883fb487e2d3,,,https://doi.org/10.1007/s10462-011-9273-3,
+f20ed84abcb1223f351a576ef10dfda9f277326b,,,,
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,http://pdfs.semanticscholar.org/f24e/379e942e134d41c4acec444ecf02b9d0d3a9.pdf,,,http://www.cis.temple.edu/~latecki/Papers/faceImageMV2012.pdf
+f2b13946d42a50fa36a2c6d20d28de2234aba3b4,http://npl.mcgill.ca/Papers/Adaptive%20Facial%20Expression%20Recognition%20Using%20Inter-modal%20top-down%20context.pdf,,,http://doi.acm.org/10.1145/2070481.2070488
+f2c30594d917ea915028668bc2a481371a72a14d,http://pdfs.semanticscholar.org/f2c3/0594d917ea915028668bc2a481371a72a14d.pdf,,,http://grail.cs.washington.edu/theses/SimonPhD.pdf
+f25aa838fb44087668206bf3d556d31ffd75235d,,,,http://doi.acm.org/10.1145/2911996.2912038
+f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,http://pdfs.semanticscholar.org/f2ad/9b43bac8c2bae9dea694f6a4e44c760e63da.pdf,,https://doi.org/10.1007/11427445_22,http://cs.ndsu.edu/~dxu/publications/MultiIlluminationStudy.pdf
+f2e9494d0dca9fb6b274107032781d435a508de6,http://pdfs.semanticscholar.org/f2e9/494d0dca9fb6b274107032781d435a508de6.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/1800/umi-umd-1782.pdf?isAllowed=y&sequence=1
+f2d15482e7055dd5f54cf4a8a8f60d8e75af7edf,,,https://doi.org/10.1109/ICIP.2011.6115736,
+f2c568fe945e5743635c13fe5535af157b1903d1,http://pdfs.semanticscholar.org/f2c5/68fe945e5743635c13fe5535af157b1903d1.pdf,,,
+f2cc459ada3abd9d8aa82e92710676973aeff275,,http://ieeexplore.ieee.org/document/5967185/,,
+f27fd2a1bc229c773238f1912db94991b8bf389a,,,https://doi.org/10.1109/IVCNZ.2016.7804414,
+f2abeb1a8dd32afb9a78856db38e115046afeb34,,,,
+f26097a1a479fb6f32b27a93f8f32609cfe30fdc,http://pdfs.semanticscholar.org/f260/97a1a479fb6f32b27a93f8f32609cfe30fdc.pdf,,https://doi.org/10.1016/j.patcog.2016.10.034,http://arxiv.org/abs/1610.04957
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,,,https://doi.org/10.1109/VCIP.2016.7805483,
+f2004fff215a17ac132310882610ddafe25ba153,,,,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.124
+f2f731feb9d376ac50b3347a93e73a0d6528cdd9,,,,
+f214bcc6ecc3309e2efefdc21062441328ff6081,http://pdfs.semanticscholar.org/f214/bcc6ecc3309e2efefdc21062441328ff6081.pdf,,https://doi.org/10.1016/j.csl.2012.12.005,http://www.mee.tcd.ie/~sigmedia/pmwiki/uploads/Main.Publications/kelly_sv_score_age_quality.pdf
+f231e9408da20498ba51d93459b3fcdb7b666efb,,,https://doi.org/10.1016/j.micpro.2012.01.002,
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhang_Low-Rank-Sparse_Subspace_Representation_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.317
+f5a95f857496db376d69f7ac844d1f56e3577b75,,,https://doi.org/10.1007/s12193-012-0107-7,
+f5af4e9086b0c3aee942cb93ece5820bdc9c9748,http://pdfs.semanticscholar.org/f5af/4e9086b0c3aee942cb93ece5820bdc9c9748.pdf,,,http://doras.dcu.ie/594/1/thesis-saman.pdf
+f5aee1529b98136194ef80961ba1a6de646645fe,http://pdfs.semanticscholar.org/f5ae/e1529b98136194ef80961ba1a6de646645fe.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/2013/Simonyan13c/simonyan13c.pdf
+f510071fd7fdc6926e3958ebb85518bcfea17f89,,,,
+f52efc206432a0cb860155c6d92c7bab962757de,http://pdfs.semanticscholar.org/f52e/fc206432a0cb860155c6d92c7bab962757de.pdf,,,http://www.metaverselab.org/pub/paper2/xiong_mugshot.pdf
+f519723238701849f1160d5a9cedebd31017da89,http://pdfs.semanticscholar.org/f519/723238701849f1160d5a9cedebd31017da89.pdf,,,http://www.eurecom.fr/fr/publication/5023/download/sec-publi-5023.pdf
+f531ce18befc03489f647560ad3e5639566b39dc,,,,http://doi.ieeecomputersociety.org/10.1109/ACOMP.2015.9
+f545b121b9612707339dfdc40eca32def5e60430,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.33
+f58f30932e3464fc808e539897efa4ee4e7ac59f,,,https://doi.org/10.1109/DICTA.2016.7797023,
+f557df59cd088ffb8e27506d8612d062407e96f4,,,https://doi.org/10.1007/s00521-014-1810-y,
+f56c407f918cf89ffa2ec3c51c383d53510c10e1,,,,
+f558af209dd4c48e4b2f551b01065a6435c3ef33,http://pdfs.semanticscholar.org/f558/af209dd4c48e4b2f551b01065a6435c3ef33.pdf,,,http://www.ijetcse.com/wp-content/plugins/ijetcse/file/upload/docx/574AN-ENHANCED-ATTRIBUTE-RERANKING-DESIGN-FOR-WEB-IMAGE-SEARCH-pdf.pdf
+f5acfc4c017447ea94c9d9cb19a9f1fcd4aa51e6,,,,
+e378ce25579f3676ca50c8f6454e92a886b9e4d7,http://openaccess.thecvf.com/content_ICCV_2017/papers/Liu_Robust_Video_Super-Resolution_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.274
+e35b09879a7df814b2be14d9102c4508e4db458b,http://pdfs.semanticscholar.org/e35b/09879a7df814b2be14d9102c4508e4db458b.pdf,,,https://arxiv.org/pdf/1310.4217.pdf
+e3657ab4129a7570230ff25ae7fbaccb4ba9950c,http://pdfs.semanticscholar.org/e365/7ab4129a7570230ff25ae7fbaccb4ba9950c.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/final-tpami-rjive.pdf
+e315959d6e806c8fbfc91f072c322fb26ce0862b,http://pdfs.semanticscholar.org/e315/959d6e806c8fbfc91f072c322fb26ce0862b.pdf,,,http://www.ijsce.org/attachments/File/Vol-1_Issue-6/F0333121611.pdf
+e3a8f18e507d9f2b537ec3c3fcc1b874b8ccfc24,,,,http://doi.ieeecomputersociety.org/10.1109/MMUL.2016.27
+e39a0834122e08ba28e7b411db896d0fdbbad9ba,http://www.ece.ualberta.ca/~djoseph/publications/journal/TPAMI_2012.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.249
+e3a70f8ee84af6372b482c0b8b6e8e553dd0e1e5,,,,
+e3bb83684817c7815f5005561a85c23942b1f46b,http://pdfs.semanticscholar.org/e3bb/83684817c7815f5005561a85c23942b1f46b.pdf,,,http://www.cedar.buffalo.edu/~govind/CSE717/papers/Face%20Verification%20using%20Correlation%20Filters.pdf
+e30dc2abac4ecc48aa51863858f6f60c7afdf82a,http://pdfs.semanticscholar.org/e30d/c2abac4ecc48aa51863858f6f60c7afdf82a.pdf,,https://doi.org/10.5220/0004934405550562,http://users.ics.forth.gr/~ggian/publications/conferences/2014%20HealthInf%20Facial%20Signs%20and%20Psycho-physical%20Status%20Estimation%20for%20Well-being.pdf
+e3e2c106ccbd668fb9fca851498c662add257036,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf,,https://doi.org/10.1109/BTAS.2013.6712723,http://vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf
+e379e73e11868abb1728c3acdc77e2c51673eb0d,http://pdfs.semanticscholar.org/e379/e73e11868abb1728c3acdc77e2c51673eb0d.pdf,,,http://www.ri.cmu.edu/pub_files/pub4/gross_ralph_2005_1/gross_ralph_2005_1.pdf
+e3a6e9ddbbfc4c5160082338d46808cea839848a,http://pdfs.semanticscholar.org/f5d0/2300271ab0f32f10bfbba5562c0fa83c5727.pdf,,https://doi.org/10.1007/978-3-319-46723-8_37,http://vision.stanford.edu/pdf/pusiol2016miccai.pdf
+e3917d6935586b90baae18d938295e5b089b5c62,http://www.iti.gr/files/tip05tsalakanidou.pdf,,https://doi.org/10.1109/TIP.2004.840714,
+e328d19027297ac796aae2470e438fe0bd334449,http://pdfs.semanticscholar.org/e328/d19027297ac796aae2470e438fe0bd334449.pdf,,https://doi.org/10.1007/978-3-319-54427-4_26,http://pesona.mmu.edu.my/~johnsee/research/papers/files/auto_accvw16.pdf
+e3144f39f473e238374dd4005c8b83e19764ae9e,http://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf,,,http://arxiv.org/abs/1612.03777
+e309715b7865b9aa3027b7eb6fef9fb75a0cba28,,,,
+cf4c1099bef189838877c8785812bc9baa5441ed,,,https://doi.org/10.1109/ICPR.2016.7899862,
+cf98565a19ec05a63dbaf650660b7c3f72de7b2b,,,,
+cf6c59d359466c41643017d2c212125aa0ee84b2,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552983
+cffebdf88e406c27b892857d1520cb2d7ccda573,http://pdfs.semanticscholar.org/cffe/bdf88e406c27b892857d1520cb2d7ccda573.pdf,,,http://www.cs.stanford.edu/people/asaxena/papers/ozan_sener_phdthesis_cornell_2016.pdf
+cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2,https://www.computer.org/csdl/trans/ta/2017/03/07420600.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2535291
+cfd933f71f4a69625390819b7645598867900eab,http://pdfs.semanticscholar.org/cfd9/33f71f4a69625390819b7645598867900eab.pdf,,,http://www.ijteee.org/final-print/mar2015/Person-Authentication-Using-Face-And-Palm-Vein-A-Survey-Of-Recognition-And-Fusion-Techniques.pdf
+cfdbcb796d028b073cdf7b91162384cd1c14e621,,,,
+cf875336d5a196ce0981e2e2ae9602580f3f6243,http://pdfs.semanticscholar.org/cf87/5336d5a196ce0981e2e2ae9602580f3f6243.pdf,,,http://www.bartneck.de/wp-content/uploads/2008/02/affectivedesignreader01.pdf
+cfd8c66e71e98410f564babeb1c5fd6f77182c55,http://pdfs.semanticscholar.org/cfd8/c66e71e98410f564babeb1c5fd6f77182c55.pdf,,,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/CoarseHeadPose.pdf
+cf54a133c89f730adc5ea12c3ac646971120781c,http://pdfs.semanticscholar.org/cf54/a133c89f730adc5ea12c3ac646971120781c.pdf,,https://doi.org/10.1016/j.image.2016.11.003,https://web.cs.hacettepe.edu.tr/~erkut/publications/Learning-Dynamic-Saliency.pdf
+cfbb2d32586b58f5681e459afd236380acd86e28,http://www.professeurs.polymtl.ca/christopher.pal/2011/ROSE.v2.5.pdf,,https://doi.org/10.1109/ROSE.2011.6058545,
+cf7a4442a6aad0e08d4aade8ec379c44f84bca8a,,,,http://doi.acm.org/10.1145/1873951.1874054
+cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,http://pdfs.semanticscholar.org/cf5c/9b521c958b84bb63bea9d5cbb522845e4ba7.pdf,,,http://arxiv.org/abs/1511.06627
+cf784156547c3be146706e2763c1a52d939d1722,,,https://doi.org/10.1007/s11042-017-5038-6,
+cfa40560fa74b2fb5c26bdd6ea7c610ba5130e2f,,,https://doi.org/10.1109/TIFS.2013.2286265,
+cf09e2cb82961128302b99a34bff91ec7d198c7c,http://pdfs.semanticscholar.org/cf09/e2cb82961128302b99a34bff91ec7d198c7c.pdf,,,https://www.csie.ntu.edu.tw/~fuh/personal/OfficeEntranceControlwithFaceRecognition.pdf
+cf185d0d8fcad2c7f0a28b7906353d4eca5a098b,,,https://doi.org/10.1186/s13640-017-0190-5,
+cf54e9776d799aa183d7466094525251d66389a4,,,https://doi.org/10.1109/ICCE-Berlin.2017.8210589,
+cf6851c24f489dabff0238e01554edea6aa0fc7c,,,https://doi.org/10.1109/ICSMC.2011.6083637,
+cf86616b5a35d5ee777585196736dfafbb9853b5,http://www.research.rutgers.edu/~linzhong/PDF/TC_Facial.pdf,,https://doi.org/10.1109/TCYB.2014.2354351,
+cfba667644508853844c45bfe5d0b8a2ffb756d3,,,https://doi.org/10.1109/ISBA.2018.8311455,
+ca0185529706df92745e656639179675c717d8d5,,,https://doi.org/10.1504/IJCVR.2014.065571,
+cae41c3d5508f57421faf672ee1bea0da4be66e0,,,https://doi.org/10.1109/ICPR.2016.7900298,
+cacd51221c592012bf2d9e4894178c1c1fa307ca,http://pdfs.semanticscholar.org/cacd/51221c592012bf2d9e4894178c1c1fa307ca.pdf,,,http://www.ijeit.com/Vol%204/Issue%2011/IJEIT1412201505_34.pdf
+ca0363d29e790f80f924cedaf93cb42308365b3d,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07a.pdf,,https://doi.org/10.1109/TIP.2006.884954,http://www.eecs.qmul.ac.uk/~ioannisp/pubs/ecopies/kotsia_tip.pdf
+ca447d6479554b27b4afbd0fd599b2ed39f2c335,,,https://doi.org/10.1109/ICPR.2014.459,
+cad2bd940e7580490da9cc739e597d029e166504,,,,
+ca9adaf5702a7eb9b69be98128e0cae7d6252f8b,,,,
+cad52d74c1a21043f851ae14c924ac689e197d1f,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W16/papers/Alletto_From_Ego_to_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.91
+cac8bb0e393474b9fb3b810c61efdbc2e2c25c29,http://pdfs.semanticscholar.org/cac8/bb0e393474b9fb3b810c61efdbc2e2c25c29.pdf,,,http://www.roboticsproceedings.org/rss07/p30.html
+cadab913f699adceebbd0f0abacb19d5f1deda84,,,,
+ca0804050cf9d7e3ed311f9be9c7f829e5e6a003,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1333904
+caaa6e8e83abb97c78ff9b813b849d5ab56b5050,http://digital.cs.usu.edu/~xqi/Promotion/JSPL.FaceRecognition.14.pdf,,https://doi.org/10.1109/LSP.2014.2343213,
+ca458f189c1167e42d3a5aaf81efc92a4c008976,,,https://doi.org/10.1109/TIP.2012.2202678,
+ca8f23d9b9a40016eaf0467a3df46720ac718e1d,,,https://doi.org/10.1109/ICASSP.2015.7178214,
+ca54d0a128b96b150baef392bf7e498793a6371f,http://pdfs.semanticscholar.org/ca54/d0a128b96b150baef392bf7e498793a6371f.pdf,,https://doi.org/10.1007/978-3-319-16634-6_40,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop16/pdffiles/w16-p14.pdf
+ca83053d9a790319b11a04eac5ab412e7fcab914,http://pdfs.semanticscholar.org/ca83/053d9a790319b11a04eac5ab412e7fcab914.pdf,,https://doi.org/10.1016/j.imavis.2014.02.006,http://gec.di.uminho.pt/psantos/Publications_ficheiros/IMAVIS2014-FaceRecognition.pdf
+ca3e88d87e1344d076c964ea89d91a75c417f5ee,,,,
+cadba72aa3e95d6dcf0acac828401ddda7ed8924,http://pdfs.semanticscholar.org/cadb/a72aa3e95d6dcf0acac828401ddda7ed8924.pdf,,,http://doc.rero.ch/record/5526/files/1_these_NagelJL.pdf
+cacce7f4ce74e3269f5555aa6fd83e48baaf9c96,,,,http://doi.acm.org/10.1145/2632165
+ca60d007af691558de377cab5e865b5373d80a44,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273630
+cab3c6069387461c3a9e5d77defe9a84fe9c9032,,,https://doi.org/10.1016/j.neucom.2016.12.056,
+ca606186715e84d270fc9052af8500fe23befbda,http://www.amirtahmasbi.com/publications_repository/SDA_ICSPS2010.pdf,,,http://people.tamu.edu/~amir.tahmasbi/publications/SDA_ICSPS2010.pdf
+ca37933b6297cdca211aa7250cbe6b59f8be40e5,,,,http://doi.acm.org/10.1145/3155133.3155207
+e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6,http://pdfs.semanticscholar.org/e48f/b3ee27eef1e503d7ba07df8eb1524c47f4a6.pdf,,,http://www.ece.cmu.edu/~casasent/rohit_face_illumination_SPIE05_final_dec2812.pdf
+e4bf70e818e507b54f7d94856fecc42cc9e0f73d,http://pdfs.semanticscholar.org/e4bf/70e818e507b54f7d94856fecc42cc9e0f73d.pdf,,,http://esatjournals.net/ijret/2016v05/i04/IJRET20160504070.pdf
+e480f8c00dfe217653c2569d0eec6e2ffa836d59,,,,
+e41246837c25d629ca0fad74643fb9eb8bf38009,,,https://doi.org/10.1109/ICSIPA.2011.6144064,
+e4bc529ced68fae154e125c72af5381b1185f34e,http://pdfs.semanticscholar.org/e4bc/529ced68fae154e125c72af5381b1185f34e.pdf,,,https://www.cc.gatech.edu/grads/a/aedwards/proposal_document.pdf
+e465f596d73f3d2523dbf8334d29eb93a35f6da0,http://pdfs.semanticscholar.org/e465/f596d73f3d2523dbf8334d29eb93a35f6da0.pdf,,,https://arxiv.org/pdf/1704.06729v1.pdf
+e4aeaf1af68a40907fda752559e45dc7afc2de67,http://pdfs.semanticscholar.org/e4ae/af1af68a40907fda752559e45dc7afc2de67.pdf,,,https://arxiv.org/pdf/1803.02504v1.pdf
+e43a18384695ae0acc820171236a39811ec2cd58,,,,
+e4c3d5d43cb62ac5b57d74d55925bdf76205e306,http://pdfs.semanticscholar.org/e4c3/d5d43cb62ac5b57d74d55925bdf76205e306.pdf,,,https://arxiv.org/pdf/1804.02051v1.pdf
+e40df008fd0e5fd169840bf7d72a951411d13c59,,,,
+e42998bbebddeeb4b2bedf5da23fa5c4efc976fa,http://pdfs.semanticscholar.org/e429/98bbebddeeb4b2bedf5da23fa5c4efc976fa.pdf,,https://doi.org/10.1007/978-3-642-37431-9_50,https://ibug.doc.ic.ac.uk/media/uploads/documents/accv2012finalpaper.pdf
+e4a1b46b5c639d433d21b34b788df8d81b518729,http://pdfs.semanticscholar.org/e4a1/b46b5c639d433d21b34b788df8d81b518729.pdf,,,https://arxiv.org/pdf/1801.07580v1.pdf
+e4d53e7f4c2052940841abc08f9574655f3f7fb4,,,,http://doi.acm.org/10.1145/3078971.3079039
+e4c81c56966a763e021938be392718686ba9135e,http://pdfs.semanticscholar.org/e4c8/1c56966a763e021938be392718686ba9135e.pdf,,,https://cdn.intechopen.com/pdfs-wm/39298.pdf
+e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc,http://pdfs.semanticscholar.org/e4e9/5b8bca585a15f13ef1ab4f48a884cd6ecfcc.pdf,,,https://www.researchgate.net/profile/Aytul_Ercil/publication/228354283_Face_recognition_with_independent_component-based_super-resolution/links/0fcfd50efc99268ee5000000.pdf
+e4df83b7424842ff5864c10fa55d38eae1c45fac,http://pdfs.semanticscholar.org/e4df/83b7424842ff5864c10fa55d38eae1c45fac.pdf,,,https://www.researchgate.net/profile/EK_Wong/publication/40892077_Locally_Linear_Discriminate_Embedding_for_Face_Recognition/links/0a85e53b4d2549f617000000.pdf
+e4df98e4b45a598661a47a0a8900065716dafd6d,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2015.219
+e4ad82afc563b783475ed45e9f2cd4c9e2a53e83,,,https://doi.org/10.1109/AICCSA.2016.7945716,
+e4e3faa47bb567491eaeaebb2213bf0e1db989e1,http://pdfs.semanticscholar.org/e4e3/faa47bb567491eaeaebb2213bf0e1db989e1.pdf,,,https://www.ijcai.org/Proceedings/16/Papers/323.pdf
+e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5,http://pdfs.semanticscholar.org/e43e/a078749d1f9b8254e0c3df4c51ba2f4eebd5.pdf,,,http://e-university.tu-sofia.bg/e-publ/files/2301_ICEST2015_NN_ID_all.pdf
+e47e8fa44decf9adbcdb02f8a64b802fe33b29ef,,,https://doi.org/10.1109/TIP.2017.2782366,
+e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf,https://pdfs.semanticscholar.org/e476/cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf.pdf,,https://doi.org/10.1109/TNNLS.2013.2245340,http://www4.comp.polyu.edu.hk/~cslzhang/paper/RKR-TNN.pdf
+e4c2f8e4aace8cb851cb74478a63d9111ca550ae,http://pdfs.semanticscholar.org/e4c2/f8e4aace8cb851cb74478a63d9111ca550ae.pdf,,,https://arxiv.org/pdf/1802.03583v1.pdf
+e475e857b2f5574eb626e7e01be47b416deff268,http://pdfs.semanticscholar.org/e475/e857b2f5574eb626e7e01be47b416deff268.pdf,,,http://www.naun.org/main/NAUN/fuzzy/2017/a062017-077.pdf
+e4391993f5270bdbc621b8d01702f626fba36fc2,http://pdfs.semanticscholar.org/e439/1993f5270bdbc621b8d01702f626fba36fc2.pdf,,https://doi.org/10.1007/978-3-642-38886-6_31,http://hal.inria.fr/docs/00/83/95/27/PDF/79440319.pdf
+e42f3c27391821f9873539fc3da125b83bffd5a2,,,https://doi.org/10.1109/HPCS.2010.5547096,
+e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd,http://pdfs.semanticscholar.org/e475/deadd1e284428b5e6efd8fe0e6a5b83b9dcd.pdf,,,https://arxiv.org/pdf/1803.07385v1.pdf
+e4b825bf9d5df47e01e8d7829371d05208fc272d,,,,http://doi.acm.org/10.1145/3055635.3056618
+e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Liu2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553767
+e48e94959c4ce799fc61f3f4aa8a209c00be8d7f,http://pdfs.semanticscholar.org/e48e/94959c4ce799fc61f3f4aa8a209c00be8d7f.pdf,,,
+e4e07f5f201c6986e93ddb42dcf11a43c339ea2e,,,https://doi.org/10.1109/BTAS.2017.8272722,
+e496d6be415038de1636bbe8202cac9c1cea9dbe,http://pdfs.semanticscholar.org/e496/d6be415038de1636bbe8202cac9c1cea9dbe.pdf,,,http://ceur-ws.org/Vol-2061/paper3.pdf
+e43cc682453cf3874785584fca813665878adaa7,http://pdfs.semanticscholar.org/e43c/c682453cf3874785584fca813665878adaa7.pdf,,,http://www.ijecs.in/issue/v3-i10/63%20ijecs.pdf
+e4c3587392d477b7594086c6f28a00a826abf004,,,https://doi.org/10.1109/ICIP.2017.8296998,
+fec6648b4154fc7e0892c74f98898f0b51036dfe,http://pdfs.semanticscholar.org/fec6/648b4154fc7e0892c74f98898f0b51036dfe.pdf,,,http://www.cse.cuhk.edu.hk/~lyu/student/mphil/fung/fung-thesis.pdf
+fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9,http://pdfs.semanticscholar.org/fea0/a5ed1bc83dd1b545a5d75db2e37a69489ac9.pdf,,https://doi.org/10.5220/0005861302430250,https://biblio.ugent.be/publication/8165069/file/8165102.pdf
+fe9c460d5ca625402aa4d6dd308d15a40e1010fa,http://pdfs.semanticscholar.org/fe9c/460d5ca625402aa4d6dd308d15a40e1010fa.pdf,,https://doi.org/10.1007/978-3-540-24842-2_5,http://www.informatik.uni-ulm.de/ni/staff/HNeumann/publicationsYear/PDFs/CONFERENCES/ADS04SchweigerEtAl-LNCS3068preprint.pdf
+fef6f1e04fa64f2f26ac9f01cd143dd19e549790,,,,http://doi.acm.org/10.1145/3123266.3123451
+fe6fefe5f2f8c97ed9a27f3171fc0afb62d5495e,,,,
+fe556c18b7ab65ceb57e1dd054a2ca21cefe153c,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.145
+fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5,http://pdfs.semanticscholar.org/fe7e/3cc1f3412bbbf37d277eeb3b17b8b21d71d5.pdf,,,http://www.iosrjournals.org/iosr-jvlsi/papers/vol6-issue2/Version-1/J0602014753.pdf
+fe5df5fe0e4745d224636a9ae196649176028990,http://pdfs.semanticscholar.org/fe5d/f5fe0e4745d224636a9ae196649176028990.pdf,,,http://scholarworks.umass.edu/cgi/viewcontent.cgi?article=1285&context=open_access_dissertations
+fe9d9c298d2e0c72408668fcff996e4bf58cc6c6,,,,
+fed8cc533037d7d925df572a440fd89f34d9c1fd,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.194
+fe961cbe4be0a35becd2d722f9f364ec3c26bd34,http://pdfs.semanticscholar.org/fe96/1cbe4be0a35becd2d722f9f364ec3c26bd34.pdf,,,http://paul.rutgers.edu/~jl1322/papers/LRECW2014_neidle.pdf
+feb6e267923868bff6e2108603d00fdfd65251ca,http://pdfs.semanticscholar.org/feb6/e267923868bff6e2108603d00fdfd65251ca.pdf,,https://doi.org/10.1142/S0218213012500297,http://www.cs.unr.edu/~bebis/IJAIT13_Categorization.pdf
+feb0bd4ad219dc5005da84561b97ae53f4207440,,,,
+feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc,http://pdfs.semanticscholar.org/feeb/0fd0e254f38b38fe5c1022e84aa43d63f7cc.pdf,,,http://www.eurecom.fr/en/publication/3413/download/mm-publi-3413.pdf
+fe97d46c34630d14235132a95fb2d2ed7b2c4663,,,,
+fe108803ee97badfa2a4abb80f27fa86afd9aad9,http://pdfs.semanticscholar.org/fe10/8803ee97badfa2a4abb80f27fa86afd9aad9.pdf,,https://doi.org/10.1016/j.patcog.2011.02.011,http://www.csie.kuas.edu.tw/~jcchen/pdf/Kernel%20discriminant%20transformation%20for%20image%20set-based%20face%20recognition.pdf
+fefaa892f1f3ff78db4da55391f4a76d6536c49a,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2497689
+fe14d8177cbdb7e5b4085302e6e044f7a4c19cb2,,,https://doi.org/10.1109/ICSMC.2012.6377834,
+fe5d6c65e51386f4d36f7434fe6fcd9494fe9361,,,https://doi.org/10.1109/ACCESS.2017.2730281,
+c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d,http://pdfs.semanticscholar.org/c8db/8764f9d8f5d44e739bbcb663fbfc0a40fb3d.pdf,,,https://d-nb.info/1021938211/34
+c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3,http://www.isir.upmc.fr/files/2013ACTI2846.pdf,,https://doi.org/10.1109/ICIP.2013.6738613,
+c83d142a47babe84e8c4addafa9e2bb9e9b757a5,,,https://doi.org/10.1109/MLSP.2012.6349762,
+c8292aa152a962763185e12fd7391a1d6df60d07,http://pdfs.semanticscholar.org/c829/2aa152a962763185e12fd7391a1d6df60d07.pdf,,https://doi.org/10.1007/978-3-642-41939-3_50,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/camera_distance_from_face_images.pdf
+c82c147c4f13e79ad49ef7456473d86881428b89,http://pdfs.semanticscholar.org/c82c/147c4f13e79ad49ef7456473d86881428b89.pdf,,https://doi.org/10.2197/ipsjtcva.7.104,https://www.jstage.jst.go.jp/article/ipsjtcva/7/0/7_104/_pdf/-char/en
+c833c2fb73decde1ad5b5432d16af9c7bee1c165,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.143
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,http://pdfs.semanticscholar.org/c8e8/4cdff569dd09f8d31e9f9ba3218dee65e961.pdf,,,http://www.rci.rutgers.edu/~vmp93/Journal_pub/JOSA_A_v2.pdf
+c8829013bbfb19ccb731bd54c1a885c245b6c7d7,http://pdfs.semanticscholar.org/c882/9013bbfb19ccb731bd54c1a885c245b6c7d7.pdf,,,http://www.cs.ucl.ac.uk/staff/V.Zografos/DICTA2005.pdf
+c8fb8872203ee694d95da47a1f9929ac27186d87,,,https://doi.org/10.1109/ICIP.2005.1530305,
+c8fb8994190c1aa03c5c54c0af64c2c5c99139b4,,,https://doi.org/10.1007/s00138-016-0794-2,
+c81ee278d27423fd16c1a114dcae486687ee27ff,http://pdfs.semanticscholar.org/c81e/e278d27423fd16c1a114dcae486687ee27ff.pdf,,,http://www.ijcsit.com/docs/Volume%206/vol6issue03/ijcsit20150603232.pdf
+c8b9217ee36aebb9735e525b718490dc27c8c1cb,,,,
+c84991fe3bf0635e326a05e34b11ccaf74d233dc,,,https://doi.org/10.1016/j.neucom.2016.08.069,
+c8bc8c99acd009e4d27ddd8d9a6e0b899d48543e,,,https://doi.org/10.1109/IROS.2012.6386178,
+c81b27932069e6c7016bfcaa5e861b99ac617934,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019469
+c88ce5ef33d5e544224ab50162d9883ff6429aa3,http://pdfs.semanticscholar.org/c88c/e5ef33d5e544224ab50162d9883ff6429aa3.pdf,,https://doi.org/10.4018/IJCVIP.2017040102,https://lhncbc.nlm.nih.gov/system/files/pub9530.pdf
+c87c07d44633eca2cc1d11d2d967fc66eb8de871,,,,
+c822bd0a005efe4ec1fea74de534900a9aa6fb93,http://pdfs.semanticscholar.org/c822/bd0a005efe4ec1fea74de534900a9aa6fb93.pdf,,,https://static.aminer.org/pdf/PDF/000/312/281/face_recognition_committee_machines_dynamic_vs_static_structures.pdf
+c872d6310f2079db0cee0e69cc96da1470055225,,,https://doi.org/10.1007/978-3-319-46675-0_68,
+c8adbe00b5661ab9b3726d01c6842c0d72c8d997,http://pdfs.semanticscholar.org/c8ad/be00b5661ab9b3726d01c6842c0d72c8d997.pdf,,https://doi.org/10.1007/978-3-319-54427-4_25,https://arxiv.org/pdf/1609.09018v1.pdf
+fb4545782d9df65d484009558e1824538030bbb1,http://pdfs.semanticscholar.org/fb45/45782d9df65d484009558e1824538030bbb1.pdf,,,https://drum.lib.umd.edu/bitstream/handle/1903/11691/Farrell_umd_0117E_12105.pdf?isAllowed=y&sequence=1
+fbf196d83a41d57dfe577b3a54b1b7fa06666e3b,http://pdfs.semanticscholar.org/fbf1/96d83a41d57dfe577b3a54b1b7fa06666e3b.pdf,,,http://crcv.ucf.edu/THUMOS14/papers/Bogazici%20University.pdf
+fb3aaf18ea07b30d1836e7cf2ab9fa898627fe93,,,https://doi.org/10.1109/ACCESS.2017.2784096,
+fbb6ee4f736519f7231830a8e337b263e91f06fe,http://pdfs.semanticscholar.org/fbb6/ee4f736519f7231830a8e337b263e91f06fe.pdf,,https://doi.org/10.1007/978-3-319-20801-5_23,http://vip.uwaterloo.ca/files/publications/bchwyl_illumination_face_detect.pdf
+fb1b6138aeb081adf853316c0d83ef4c5626a7fa,,,https://doi.org/10.1109/ICIP.2017.8296302,
+fb7bf10cbc583db5d5eee945aa633fcb968e01ad,,,https://doi.org/10.1007/s00521-012-0962-x,
+fb915bcc1623cdf999c0e95992c0e0cf85e64d8e,,,,http://doi.ieeecomputersociety.org/10.1109/iThings.2014.83
+fb557b79157a6dda15f3abdeb01a3308528f71f2,,,,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.310
+fb5280b80edcf088f9dd1da769463d48e7b08390,http://pdfs.semanticscholar.org/fb52/80b80edcf088f9dd1da769463d48e7b08390.pdf,,https://doi.org/10.1016/j.ins.2013.06.006,http://www.researchgate.net/profile/Marko_Tkalcic/publication/259038024_The_impact_of_weak_ground_truth_and_facial_expressiveness_on_affect_detection_accuracy_from_time-continuous_videos_of_facial_expressions/links/00b49529caac28b327000000.pdf
+fb3da9b47460eedf857e386a562cc5348d78d544,,,,
+fb1627ed224bf7b1e3d80c097316ed7703951df2,,,https://doi.org/10.1109/VCIP.2017.8305094,
+fb3ff56ab12bd250caf8254eca30cd97984a949a,,,https://doi.org/10.3103/S0146411617010072,
+fb0f5e06048c0274c2a4056e353fa31f5790e381,,,,
+fb2bd6c2959a4f811b712840e599f695dad2967e,,,https://doi.org/10.1109/ISPA.2015.7306038,
+fbc53ab5697ee6f4f270153dbdee2d93cfda7b5f,,,,
+fba464cb8e3eff455fe80e8fb6d3547768efba2f,http://pdfs.semanticscholar.org/fba4/64cb8e3eff455fe80e8fb6d3547768efba2f.pdf,,,https://www.ijeas.org/download_data/IJEAS0302003.pdf
+fb0774049f2f34be194592822c74e2f2e603dea8,,,,
+fba386ac63fe87ee5a0cf64bf4fb90324b657d61,,,https://doi.org/10.1109/ICIP.2015.7351752,
+fb228b214e28af26f77cc1195d03c9d851b78ec6,,,,
+fb084b1fe52017b3898c871514cffcc2bdb40b73,http://pdfs.semanticscholar.org/fb08/4b1fe52017b3898c871514cffcc2bdb40b73.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/97/d9/pone.0122200.PMC4408076.pdf
+ed9de242a23ad546902e1d5ec022dbb029cc2282,,,https://doi.org/10.1109/ICASSP.2015.7178138,
+ed28e8367fcb7df7e51963add9e2d85b46e2d5d6,http://pdfs.semanticscholar.org/ed28/e8367fcb7df7e51963add9e2d85b46e2d5d6.pdf,,,http://www.ascent-journals.com/IJERIA/Vol9No3/3-T.SYED%20AKHEEL-131.pdf
+edbddf8c176d6e914f0babe64ad56c051597d415,,,https://doi.org/10.1109/TMM.2016.2644866,
+ed94e7689cdae87891f08428596dec2a2dc6a002,,,https://doi.org/10.1109/CAMSAP.2017.8313130,
+ed0d8ca1701247b22516ffb1b47f28554b167608,,,,
+ed273b5434013dcdb9029c1a9f1718da494a23a2,,,https://doi.org/10.1109/LSP.2018.2810106,
+ed0d8997a4b7b80a7cd3592e98bdbe5c3aab0cee,,,https://doi.org/10.1007/s11042-014-2345-z,
+ed779cc4f026f6ac22f5ef0c34126138e1ebc8b2,,,https://doi.org/10.1007/978-981-10-3005-5_57,
+ed08ac6da6f8ead590b390b1d14e8a9b97370794,http://pdfs.semanticscholar.org/ed08/ac6da6f8ead590b390b1d14e8a9b97370794.pdf,,,http://www.ijircce.com/upload/2015/september/52_5_An.pdf
+ed70d1a9435c0b32c0c75c1a062f4f07556f7016,,,https://doi.org/10.1109/ICIP.2015.7350774,
+edef98d2b021464576d8d28690d29f5431fd5828,http://pdfs.semanticscholar.org/edef/98d2b021464576d8d28690d29f5431fd5828.pdf,,,https://arxiv.org/pdf/1802.02438v1.pdf
+edc5a0a8b9fc6ae0e8d8091a2391767f645095d9,http://www.es.mdh.se/pdf_publications/3948.pdf,,https://doi.org/10.1109/ITSC.2015.424,
+ed04e161c953d345bcf5b910991d7566f7c486f7,http://pdfs.semanticscholar.org/ed04/e161c953d345bcf5b910991d7566f7c486f7.pdf,,,http://www.lsr.ei.tum.de/fileadmin/publications/sosnowski/AISB_2010_Sosnowski_Mayer.pdf
+edd7504be47ebc28b0d608502ca78c0aea6a65a2,http://pdfs.semanticscholar.org/edd7/504be47ebc28b0d608502ca78c0aea6a65a2.pdf,,https://doi.org/10.1007/978-3-319-66709-6_11,https://arxiv.org/pdf/1706.08807v1.pdf
+ed82f10e5bfe1825b9fa5379a1d0017b96fa1ebf,,,,http://doi.ieeecomputersociety.org/10.1109/ICEBE.2017.36
+edbb8cce0b813d3291cae4088914ad3199736aa0,http://pdfs.semanticscholar.org/edbb/8cce0b813d3291cae4088914ad3199736aa0.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI11/paper/view/3435
+ed023651e31cdbcaa5ef2ee1d71ddbc2906c2f76,,,https://doi.org/10.1109/LSP.2010.2093600,
+edcb662834aae8878a209c769ed664f8bd48b751,,,,
+c178a86f4c120eca3850a4915134fff44cbccb48,http://pdfs.semanticscholar.org/c178/a86f4c120eca3850a4915134fff44cbccb48.pdf,,,http://waset.org/publications/16147/normalization-discriminant-independent-component-analysis
+c1d2d12ade031d57f8d6a0333cbe8a772d752e01,http://pdfs.semanticscholar.org/c1d2/d12ade031d57f8d6a0333cbe8a772d752e01.pdf,,,http://j-mi.org/contents_file/contents_files/loader/0/Article/166/file/default/JMI2010B-5.pdf
+c180f22a9af4a2f47a917fd8f15121412f2d0901,http://pdfs.semanticscholar.org/c180/f22a9af4a2f47a917fd8f15121412f2d0901.pdf,,https://doi.org/10.1007/11679363_117,http://www.jaist.ac.jp/~chen-fan/publication/ica2006.pdf
+c1a16ee838d977160821951e7264af4b2e7c8265,,,,
+c1f07ec629be1c6fe562af0e34b04c54e238dcd1,http://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf,,,https://pdfs.semanticscholar.org/c1f0/7ec629be1c6fe562af0e34b04c54e238dcd1.pdf
+c1a70d63d1667abfb1f6267f3564110d55c79c0d,,,https://doi.org/10.1007/s00138-013-0488-y,
+c138c76809b8da9e5822fb0ae38457e5d75287e0,,,https://doi.org/10.1109/TIP.2014.2378017,
+c1581b5175994e33549b8e6d07b4ea0baf7fe517,,,https://doi.org/10.1109/IJCNN.2011.6033478,
+c1173b8d8efb8c2d989ce0e51fe21f6b0b8d1478,,,https://doi.org/10.1109/TCYB.2016.2535122,
+c1f05b723e53ac4eb1133249b445c0011d42ca79,,,https://doi.org/10.1162/neco_a_00990,
+c10a15e52c85654db9c9343ae1dd892a2ac4a279,http://www.cs.utexas.edu/~grauman/papers/ijcv-sungju.pdf,,https://doi.org/10.1007/s11263-011-0494-3,
+c1fb854d9a04b842ff38bd844b50115e33113539,,,https://doi.org/10.1007/s11042-016-3883-3,
+c1fc70e0952f6a7587b84bf3366d2e57fc572fd7,http://pdfs.semanticscholar.org/c1fc/70e0952f6a7587b84bf3366d2e57fc572fd7.pdf,,https://doi.org/10.1016/j.patcog.2015.09.017,https://arxiv.org/pdf/1509.05536v1.pdf
+c1c253a822f984de73f02d6a29c8c7cadc8f090c,,,,
+c17c7b201cfd0bcd75441afeaa734544c6ca3416,,,https://doi.org/10.1109/TCSVT.2016.2587389,
+c1dfabe36a4db26bf378417985a6aacb0f769735,http://pdfs.semanticscholar.org/c1df/abe36a4db26bf378417985a6aacb0f769735.pdf,,,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/NWPJ-201109-50.pdf
+c13211a15abd3ca187ef36b9f816891f901ba788,,,,
+c1482491f553726a8349337351692627a04d5dbe,http://pdfs.semanticscholar.org/c148/2491f553726a8349337351692627a04d5dbe.pdf,,https://doi.org/10.1007/978-3-319-67217-5_25,https://arxiv.org/pdf/1702.00048v1.pdf
+c12034ca237ee330dd25843f2d05a6e1cfde1767,,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.298
+c1ff88493721af1940df0d00bcfeefaa14f1711f,http://pdfs.semanticscholar.org/c1ff/88493721af1940df0d00bcfeefaa14f1711f.pdf,,,http://www.cs.cmu.edu/~ftorre/sr_paper.pdf
+c18d537037caf399c4fabfdec896c376675af58a,,,,
+c17a332e59f03b77921942d487b4b102b1ee73b6,http://pdfs.semanticscholar.org/c17a/332e59f03b77921942d487b4b102b1ee73b6.pdf,,,https://perceptual.mpi-inf.mpg.de/files/2016/01/wood16_etra.pdf
+c1e76c6b643b287f621135ee0c27a9c481a99054,http://pdfs.semanticscholar.org/c1e7/6c6b643b287f621135ee0c27a9c481a99054.pdf,,https://doi.org/10.1016/j.procs.2016.07.009,http://www.tina-vision.net/~pab/download_files/procedia_CompSci_miua_2016_bromiley.pdf
+c10b0a6ba98aa95d740a0d60e150ffd77c7895ad,http://pdfs.semanticscholar.org/c10b/0a6ba98aa95d740a0d60e150ffd77c7895ad.pdf,,,https://www-i6.informatik.rwth-aachen.de/publications/download/1060/HanselmannHaraldYanShenNeyHermann--DeepFisherFaces--2017.pdf
+c696c9bbe27434cb6279223a79b17535cd6e88c8,http://pdfs.semanticscholar.org/c696/c9bbe27434cb6279223a79b17535cd6e88c8.pdf,,,http://www.icis.ntu.edu.sg/scs-ijit/119/119_11.pdf
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,http://pdfs.semanticscholar.org/c65e/4ffa2c07a37b0bb7781ca4ec2ed7542f18e3.pdf,,,http://www.prasa.org/proceedings/2010/prasa2010-45.pdf
+c614450c9b1d89d5fda23a54dbf6a27a4b821ac0,http://pdfs.semanticscholar.org/c614/450c9b1d89d5fda23a54dbf6a27a4b821ac0.pdf,,,http://www.scielo.br/pdf/babt/v60/1516-8913-babt-60-e17160480.pdf
+c69a66a8b9c71d6c3c19980969550090af854b89,,,,
+c6096986b4d6c374ab2d20031e026b581e7bf7e9,http://pdfs.semanticscholar.org/c609/6986b4d6c374ab2d20031e026b581e7bf7e9.pdf,,,http://chenlab.ece.cornell.edu/Publication/Andy/GallagherThesis.pdf
+c6608fdd919f2bc4f8d7412bab287527dcbcf505,http://pdfs.semanticscholar.org/c660/8fdd919f2bc4f8d7412bab287527dcbcf505.pdf,,,https://urresearch.rochester.edu/fileDownloadForInstitutionalItem.action?itemFileId=167632&itemId=30953
+c64502696438b4c9f9e12e64daaf7605f62ce3f0,,,,http://doi.ieeecomputersociety.org/10.1109/WKDD.2009.195
+c6a4b23ead2dab3d5dc02a5916d4c383f0c53007,,,,
+c65cfc9d3568c586faf18611c4124f6b7c0c1a13,,,https://doi.org/10.1109/ICACCI.2014.6968322,
+c6ea6fee4823b511eecf41f6c2574a0728055baf,http://pdfs.semanticscholar.org/c6ea/6fee4823b511eecf41f6c2574a0728055baf.pdf,,,https://arxiv.org/pdf/1802.00278v1.pdf
+c648d2394be3ff0c0ee5360787ff3777a3881b02,,,https://doi.org/10.1080/01449290903353047,
+c62c910264658709e9bf0e769e011e7944c45c90,http://pdfs.semanticscholar.org/c62c/910264658709e9bf0e769e011e7944c45c90.pdf,,,https://arxiv.org/pdf/1706.04717v1.pdf
+c66ecbae0f2bfa7cdbf5082fb8f0567878b4a599,,,,
+c65d2ee433ae095652abe3860eeafe6082c636c6,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553714
+c660500b49f097e3af67bb14667de30d67db88e3,http://pdfs.semanticscholar.org/c660/500b49f097e3af67bb14667de30d67db88e3.pdf,,https://doi.org/10.1016/S1077-3142(03)00078-X,https://www.researchgate.net/profile/Yanxi_Liu/publication/222529129_Facial_asymmetry_quantification_for_expression_invariant_human_identification/links/00b49525783de59f21000000.pdf
+c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6,http://pdfs.semanticscholar.org/c6ff/a09c4a6cacbbd3c41c8ae7a728b0de6e10b6.pdf,,https://doi.org/10.1016/j.patcog.2008.05.014,https://www.researchgate.net/profile/Kun_Hong_Liu/publication/222418323_Feature_extraction_using_constrained_maximum_variance_mapping/links/545331860cf26d5090a38868.pdf
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/99/98/srep24746.PMC4838853.pdf
+c6bbb56a26222bdb8ce7dd829cff38b67d4b03cd,,,,http://doi.acm.org/10.1145/2043674.2043677
+c675534be881e59a78a5986b8fb4e649ddd2abbe,,,https://doi.org/10.1109/ICIP.2017.8296548,
+c65a394118d34beda5dd01ae0df163c3db88fceb,http://pdfs.semanticscholar.org/c65a/394118d34beda5dd01ae0df163c3db88fceb.pdf,,https://doi.org/10.1007/978-3-540-78646-7_53,http://class.inrialpes.fr/pub/207-deschacht-ecir08.pdf
+c60601bdb5465d8270fdf444e5d8aeccab744e29,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2010.5583363
+ec6a2093059fd6eada9944212f64a659881abb95,,,https://doi.org/10.1016/j.patcog.2016.02.022,
+ec89f2307e29cc4222b887eb0619e0b697cf110d,,,https://doi.org/10.1109/TIP.2009.2027361,
+ece3407b15d7d2dcf37cfe9b8fc87542a2c1162d,,,,
+ec40df721a80c62d4a768fe29b58d86b1a07f435,,,,
+ec12f805a48004a90e0057c7b844d8119cb21b4a,http://pdfs.semanticscholar.org/ec12/f805a48004a90e0057c7b844d8119cb21b4a.pdf,,https://doi.org/10.1007/978-3-319-11752-2_40,http://mrl.cs.vsb.cz/publications/fusek_dagm_2014.pdf
+ec1a57e609eda72b4eb60155fac12db1da31f6c0,,,https://doi.org/10.1007/11744085_41,
+eccd9acba3f6a605053dbde7f0890836e52aa085,,,,
+ec22eaa00f41a7f8e45ed833812d1ac44ee1174e,http://pdfs.semanticscholar.org/ec22/eaa00f41a7f8e45ed833812d1ac44ee1174e.pdf,,https://doi.org/10.1016/j.patrec.2014.06.009,https://dr.ntu.edu.sg/bitstream/handle/10220/39591/A%20novel%20phase%20congruency%20based%20descriptor%20for%20dynamic%20facial%20expression%20analysis.pdf;sequence=1
+ec28217290897a059348dcdf287540a2e2c68204,,,https://doi.org/10.1504/IJBM.2015.070928,
+ec54000c6c0e660dd99051bdbd7aed2988e27ab8,http://pdfs.semanticscholar.org/ec54/000c6c0e660dd99051bdbd7aed2988e27ab8.pdf,,,http://gtav.upc.edu/en/publications/papers/2005/two-in-one-joint-pose-estimation-and-face-recognition-with-p2ca
+ecfb93de88394a244896bfe6ee7bf39fb250b820,,,,
+eca706b4d77708452bdad1c98a23e4e88ce941ab,,,https://doi.org/10.1142/S0218001416550144,
+ec39e9c21d6e2576f21936b1ecc1574dadaf291e,,,https://doi.org/10.1109/WACV.2017.130,
+ec44510ca9c0093c5eb860128d17506614168bcf,,,,
+ec0104286c96707f57df26b4f0a4f49b774c486b,http://www.cs.newpaltz.edu/~lik/publications/Mingxing-Duan-IEEE-TIFS-2018.pdf,,https://doi.org/10.1109/TIFS.2017.2766583,
+ecca2a2b84ea01ea425b8d2d9f376f15a295a7f5,http://smie2.sysu.edu.cn/~wcd/Papers/2013_TPAMI_Wang_MEAP.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.28
+ec7cd3fff8bdbbe7005bc8d6b7f6b87d72aac2d9,http://www.mmp.rwth-aachen.de/publications/pdf/rafi_chalearn2015.pdf,,,http://www.vision.rwth-aachen.de/publications/pdf/rafi_chalearn2015.pdf
+ecdd83002f69c2ccc644d07abb44dd939542d89d,,,https://doi.org/10.1016/j.neucom.2015.07.011,
+ecfa56b38ac2b58428d59c9b630b1437a9ff8278,,,,
+ec05078be14a11157ac0e1c6b430ac886124589b,http://pdfs.semanticscholar.org/ec05/078be14a11157ac0e1c6b430ac886124589b.pdf,,,https://arxiv.org/pdf/1802.08726v1.pdf
+4e7ed13e541b8ed868480375785005d33530e06d,http://arxiv.org/pdf/1603.07388v1.pdf,,,https://arxiv.org/pdf/1603.07388v1.pdf
+4e490cf3cf26fe46507bb55a548c403b9c685ba0,http://labnic.unige.ch/nic/papers/SJ_DG_SD_KND_IC_MIV_DS_PV_KRS_IEEETransac11.pdf,,,https://medweb4.unige.ch/labnic/papers/SJ_DG_SD_KND_IC_MIV_DS_PV_KRS_IEEETransac11.pdf
+4e8f301dbedc9063831da1306b294f2bd5b10477,,,https://doi.org/10.1109/BIOSIG.2016.7736919,
+4e94e7412d180da5a646f6a360e75ba2128f93aa,,,,
+4e5dc3b397484326a4348ccceb88acf309960e86,http://pdfs.semanticscholar.org/4e5d/c3b397484326a4348ccceb88acf309960e86.pdf,,,
+4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,,,https://doi.org/10.1109/TIFS.2017.2788002,
+4e6c17966efae956133bf8f22edeffc24a0470c1,http://pdfs.semanticscholar.org/4e6c/17966efae956133bf8f22edeffc24a0470c1.pdf,,https://doi.org/10.1007/978-3-319-46654-5_3,https://davidsonic.github.io/index/ccbr2016.pdf
+4e1836914bbcf94dc00e604b24b1b0d6d7b61e66,http://pdfs.semanticscholar.org/4e18/36914bbcf94dc00e604b24b1b0d6d7b61e66.pdf,,https://doi.org/10.1007/978-3-642-17691-3_29,http://www.ee.oulu.fi/~gyzhao/Papers/2010/ACVIS_Huang.pdf
+4e1d89149fc4aa057a8becce2d730ec6afd60efa,,,https://doi.org/10.1109/ICSMC.2009.5346047,
+4e4fa167d772f34dfffc374e021ab3044566afc3,http://pdfs.semanticscholar.org/4e4f/a167d772f34dfffc374e021ab3044566afc3.pdf,,,https://pdfs.semanticscholar.org/4e4f/a167d772f34dfffc374e021ab3044566afc3.pdf
+4ed54d5093d240cc3644e4212f162a11ae7d1e3b,http://pdfs.semanticscholar.org/4ed5/4d5093d240cc3644e4212f162a11ae7d1e3b.pdf,,https://doi.org/10.1007/978-3-540-69321-5_49,http://www.cs.toronto.edu/~sven/Papers/dagm2008.pdf
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Yang_How_Related_Exemplars_2013_ICCV_paper.pdf,,,http://www.cs.cmu.edu/~yiyang/related_1015.pdf
+4ea53e76246afae94758c1528002808374b75cfa,http://pdfs.semanticscholar.org/4ea5/3e76246afae94758c1528002808374b75cfa.pdf,,,http://www.luawms.edu.pk/lujstvolume4/lujst48.pdf
+4ed2d7ecb34a13e12474f75d803547ad2ad811b2,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yang_Common_Action_Discovery_ICCV_2017_paper.pdf,,,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/Common%20Action%20Discovery%20and%20Localization%20in%20Unconstrained%20Videos.pdf
+4e97b53926d997f451139f74ec1601bbef125599,http://pdfs.semanticscholar.org/4e97/b53926d997f451139f74ec1601bbef125599.pdf,,,https://arxiv.org/pdf/1602.03220v1.pdf
+4e93a8a47473bf57e24aec048cb870ab366a43d6,http://pdfs.semanticscholar.org/4e93/a8a47473bf57e24aec048cb870ab366a43d6.pdf,,,http://web.cse.msu.edu/~liuxm/publication/PR_biometric.pdf
+4e5760521356745548246b1cb74c8d69675d9923,,,,
+4e8168fbaa615009d1618a9d6552bfad809309e9,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf,,,http://arxiv.org/abs/1611.01751
+4ecfd4273b5418fd0f3121eaefda0a4c48f6aaf0,,,,
+4ea63435d7b58d41a5cbcdd34812201f302ca061,,,https://doi.org/10.1109/ICIP.2014.7025066,
+4e626b2502ee042cf4d7425a8e7a228789b23856,,,,
+4ea4116f57c5d5033569690871ba294dc3649ea5,http://pdfs.semanticscholar.org/4ea4/116f57c5d5033569690871ba294dc3649ea5.pdf,,https://doi.org/10.1007/978-3-642-01793-3_19,http://media.cs.tsinghua.edu.cn/~imagevision/papers/Multi-View%20Face%20Alignment%20Using%203D%20Shape%20Model%20for%20View%20Estimation.pdf
+4e444db884b5272f3a41e4b68dc0d453d4ec1f4c,http://pdfs.semanticscholar.org/4e44/4db884b5272f3a41e4b68dc0d453d4ec1f4c.pdf,,,http://arxiv.org/abs/1706.04589
+4e5c1284c3ca475d1b5715b1e7f6ca4c9902d28d,,,,
+4e6e5cb93e7e564bc426b5b27888d55101504c50,,,https://doi.org/10.1109/ICPR.2016.7900299,
+4e343c66c5fe7426132869d552f0f205d1bc5307,,,https://doi.org/10.1109/ICPR.2014.452,
+4e1258db62e4762fd8647b250fda9c3567f86eb8,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2013.17
+4ef0a6817a7736c5641dc52cbc62737e2e063420,http://pdfs.semanticscholar.org/4ef0/a6817a7736c5641dc52cbc62737e2e063420.pdf,,,http://www.accentsjournals.org/PaperDirectory/Journal/IJACR/2014/12/6.pdf
+4e4d034caa72dce6fca115e77c74ace826884c66,http://pdfs.semanticscholar.org/4e4d/034caa72dce6fca115e77c74ace826884c66.pdf,,,
+4ee94572ae1d9c090fe81baa7236c7efbe1ca5b4,,,https://doi.org/10.1109/DICTA.2017.8227494,
+4eeccbbb98de4f2e992600482fd6b881ace014bb,,,,http://doi.acm.org/10.1145/2964284.2967240
+4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b,http://pdfs.semanticscholar.org/4e7e/bf3c4c0c4ecc48348a769dd6ae1ebac3bf1b.pdf,,https://doi.org/10.1016/j.imavis.2012.07.003,http://www.doc.ic.ac.uk/~kb709/Bousmalis-ADA-IVCJ2013.pdf
+4e0e49c280acbff8ae394b2443fcff1afb9bdce6,http://pdfs.semanticscholar.org/4e0e/49c280acbff8ae394b2443fcff1afb9bdce6.pdf,,https://doi.org/10.1007/978-3-319-59147-6_23,http://arxiv.org/abs/1603.01006
+4e581831d24fd90b0b5228b9136e76fa3e8f8279,,,https://doi.org/10.1109/TIP.2014.2303648,
+4eb8030b31ff86bdcb063403eef24e53b9ad4329,,,,http://doi.acm.org/10.1145/2993148.2997640
+4e3b71b1aa6b6cb7aa55843d2214441f0076fe69,,,,
+4e4e8fc9bbee816e5c751d13f0d9218380d74b8f,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553711.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553711
+4ed40e6bb66dfa38a75d864d804d175a26b6c6f6,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2011.41
+20a88cc454a03d62c3368aa1f5bdffa73523827b,http://pdfs.semanticscholar.org/d620/7593c39255ac8ce7536e5958a99f52d6bb60.pdf,,https://doi.org/10.1016/j.patcog.2006.06.030,http://repository.ust.hk/ir/bitstream/1783.1-2970/1/yeung.pr2007a1.pdf
+20a432a065a06f088d96965f43d0055675f0a6c1,http://pdfs.semanticscholar.org/20a4/32a065a06f088d96965f43d0055675f0a6c1.pdf,,https://doi.org/10.1007/978-3-319-44781-0_10,https://www2.informatik.uni-hamburg.de/wtm/ps/Hinz_ICANN_2016.pdf
+20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba,http://pdfs.semanticscholar.org/e805/bc872e18277c7cbfce82206cf1667cce22cc.pdf,,https://doi.org/10.1007/978-3-319-64689-3_20,http://av.dfki.de/~pagani/papers/Selim2017_CAIP.pdf
+20da3ec27d221973c681ed8713f3e00ff10fef6b,,,,
+20e504782951e0c2979d9aec88c76334f7505393,https://arxiv.org/pdf/1612.08534v1.pdf,,https://doi.org/10.1109/TIP.2017.2771408,http://arxiv.org/abs/1612.08534
+209324c152fa8fab9f3553ccb62b693b5b10fb4d,http://pdfs.semanticscholar.org/2093/24c152fa8fab9f3553ccb62b693b5b10fb4d.pdf,,,http://ai.stanford.edu/~ranjaykrishna/papers/thesis.pdf
+203009d3608bdc31ffc3991a0310b9e98b630c4d,,,,
+2050847bc7a1a0453891f03aeeb4643e360fde7d,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/ICMR2015.pdf,,,https://cvhci.anthropomatik.kit.edu/~mtapaswi/papers/posters/ICMR2015.pdf
+205f3d654b7d28d00d15b034a8c5b2a8740bd8b6,https://www.researchgate.net/profile/Ya_Su4/publication/51686551_Discriminant_learning_through_multiple_principal_angles_for_visual_recognition/links/00b495253b0057832b000000.pdf,,https://doi.org/10.1109/TIP.2011.2169972,
+2045fe2f21c30f364d6e699ea0bf0ea21d7f460e,,,,
+202d8d93b7b747cdbd6e24e5a919640f8d16298a,http://pdfs.semanticscholar.org/202d/8d93b7b747cdbd6e24e5a919640f8d16298a.pdf,,https://doi.org/10.1007/978-3-642-19530-3_16,http://cmpe.bilgi.edu.tr/wp-content/uploads/2013/10/ebs_publications_bioid022.pdf
+20767ca3b932cbc7b8112db21980d7b9b3ea43a3,http://pdfs.semanticscholar.org/2076/7ca3b932cbc7b8112db21980d7b9b3ea43a3.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/12499
+20a16efb03c366fa4180659c2b2a0c5024c679da,http://pdfs.semanticscholar.org/20a1/6efb03c366fa4180659c2b2a0c5024c679da.pdf,,,http://arxiv.org/abs/1410.6880
+205b34b6035aa7b23d89f1aed2850b1d3780de35,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p504-jiang.pdf,,https://doi.org/10.1109/ICASSP.2014.6853647,
+20c2a5166206e7ffbb11a23387b9c5edf42b5230,http://pdfs.semanticscholar.org/aff0/51003a43736001aeb76e08cb86ce67d6c70d.pdf,,https://doi.org/10.1016/j.specom.2015.09.008,http://kuppl.ku.edu/sites/kuppl.ku.edu/files/docs/Tang%20Hannah%20Jongman%20Sereno%20Wang%20Harmarneh%20visible%20clear%20plain%20Speech%20Comm%202015.pdf
+20e505cef6d40f896e9508e623bfc01aa1ec3120,http://pdfs.semanticscholar.org/20e5/05cef6d40f896e9508e623bfc01aa1ec3120.pdf,,,http://world-comp.org/p2011/IPC3297.pdf
+2042f1cacea262ec924f74994e49d5e87d9d0445,,,,
+205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/2009/Vretos_2009_ICIP.pdf,,https://doi.org/10.1109/ICIP.2009.5413959,
+2063222c5ce0dd233fa3056ddc245fca26bd5cf2,,,,
+2098983dd521e78746b3b3fa35a22eb2fa630299,http://pdfs.semanticscholar.org/2098/983dd521e78746b3b3fa35a22eb2fa630299.pdf,,,http://arxiv.org/abs/1704.06925
+204f1cf56794bb23f9516b5f225a6ae00d3d30b8,,,https://doi.org/10.1109/JSYST.2015.2418680,
+20b437dc4fc44c17f131713ffcbb4a8bd672ef00,http://pdfs.semanticscholar.org/20b4/37dc4fc44c17f131713ffcbb4a8bd672ef00.pdf,,https://doi.org/10.1007/978-3-319-19941-2_20,http://home.elka.pw.edu.pl/~astrupcz/uploads/7/4/5/7/74570135/2015_head_pose_tracking_from_rgbd_sensor_based_on_direct_motion_estimation_premi.pdf
+20b405d658b7bb88d176653758384e2e3e367039,,,https://doi.org/10.1109/IJCNN.2012.6252677,
+208a2c50edb5271a050fa9f29d3870f891daa4dc,http://pdfs.semanticscholar.org/c17c/55f43af5db44b6a4c17932aa3d7031985749.pdf,,,http://jov.arvojournals.org/pdfaccess.ashx?url=/data/journals/jov/932792/jov-11-13-24.pdf
+207798603e3089a1c807c93e5f36f7767055ec06,http://www1.se.cuhk.edu.hk/~hccl/publications/pub/2012_APSIPA_FacialExpression.pdf,http://ieeexplore.ieee.org/document/6411903/,,
+20be15dac7d8a5ba4688bf206ad24cab57d532d6,http://pdfs.semanticscholar.org/20be/15dac7d8a5ba4688bf206ad24cab57d532d6.pdf,,https://doi.org/10.1007/978-3-540-74272-2_50,http://www.researchgate.net/profile/Mario_Castelan2/publication/216360512_Face_Shape_Recovery_and_Recognition_Using_a_Surface_Gradient_Based_Statistical_Model/links/0912f51113bbd7e773000000.pdf
+2042aed660796b14925db17c0a8b9fbdd7f3ebac,http://pdfs.semanticscholar.org/4a19/fd2eb09976128e33bd8f9411972146ac6c41.pdf,,https://doi.org/10.1007/978-3-319-10584-0_2,http://www-users.cs.umn.edu/~qzhao/publications/pdf/crowd_eccv14.pdf
+20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6,http://pdfs.semanticscholar.org/ee89/f47ebfbebed7d6793a6774356ba63398f0d0.pdf,,,http://www.face-rec.org/algorithms/ica/liu99comparative.pdf
+20532b1f80b509f2332b6cfc0126c0f80f438f10,https://arxiv.org/pdf/1509.03248v1.pdf,,,http://arxiv.org/pdf/1509.03248v1.pdf
+205af28b4fcd6b569d0241bb6b255edb325965a4,http://pdfs.semanticscholar.org/205a/f28b4fcd6b569d0241bb6b255edb325965a4.pdf,,https://doi.org/10.1007/s11370-007-0014-z,http://dhoiem.web.engr.illinois.edu/courses/cs598_spring09/papers/fulltext.pdf
+20eabf10e9591443de95b726d90cda8efa7e53bb,,,https://doi.org/10.1007/s11390-017-1740-0,
+201802c83b4f161de764bb1480735e0b090b5c3b,,,,
+20cfb4136c1a984a330a2a9664fcdadc2228b0bc,http://www.eecs.harvard.edu/~htk/publication/2015-amfg-chen-comiter-kung-mcdanel.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301357
+2020e8c0be8fa00d773fd99b6da55029a6a83e3d,http://pdfs.semanticscholar.org/9ca3/806dd01f8aded02e88c7022716b7fef46423.pdf,,,http://eudl.eu/pdf/10.1007/978-3-642-32615-8_48
+20a0b23741824a17c577376fdd0cf40101af5880,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Weinzaepfel_Learning_to_Track_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.362
+205f035ec90a7fa50fd04fdca390ce83c0eea958,,,,http://doi.acm.org/10.1145/3131287
+189e5a2fa51ed471c0e7227d82dffb52736070d8,,,https://doi.org/10.1109/ICIP.2017.8296995,
+18c72175ddbb7d5956d180b65a96005c100f6014,http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf,,,http://doi.ieeecomputersociety.org/10.1109/34.927464
+18636347b8741d321980e8f91a44ee054b051574,http://biometrics.cse.msu.edu/Publications/SoftBiometrics/JainParkFacemarks_ICIP09.pdf,,https://doi.org/10.1109/ICIP.2009.5413921,http://www.cse.msu.edu/biometrics/Publications/SoftBiometrics/JainParkFacemarks_ICIP09.pdf
+18206e1b988389eaab86ef8c852662accf3c3663,http://pdfs.semanticscholar.org/d13e/5b4249cfe9672672eb573d15e7dc0a235e04.pdf,,,http://arxiv.org/abs/1712.00636
+189b1859f77ddc08027e1e0f92275341e5c0fdc6,http://pdfs.semanticscholar.org/189b/1859f77ddc08027e1e0f92275341e5c0fdc6.pdf,,https://doi.org/10.1007/978-3-642-35749-7_3,http://rogerioferis.com/PartsAndAttributes/pages/material/SparsePnA2010.pdf
+18a9f3d855bd7728ed4f988675fa9405b5478845,http://pdfs.semanticscholar.org/18a9/f3d855bd7728ed4f988675fa9405b5478845.pdf,,,http://ictactjournals.in/paper/IJIVP_V4_I2_Paper_6_709_716.pdf
+18409c220a0f330c24f0e095653a787813c3c85a,,,,
+181045164df86c72923906aed93d7f2f987bce6c,http://pdfs.semanticscholar.org/1810/45164df86c72923906aed93d7f2f987bce6c.pdf,,,http://thomas.deselaers.de/teaching/files/belle_master.pdf
+18166432309000d9a5873f989b39c72a682932f5,http://pdfs.semanticscholar.org/1816/6432309000d9a5873f989b39c72a682932f5.pdf,,,http://web.cse.ohio-state.edu/~hamm.95/papers/visapp08jh.pdf
+18f57228614b1ea0f42e1376a78b94222e81bf7a,,,,
+18d5b0d421332c9321920b07e0e8ac4a240e5f1f,http://pdfs.semanticscholar.org/18d5/b0d421332c9321920b07e0e8ac4a240e5f1f.pdf,,,http://arxiv.org/abs/1507.08064
+18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae,http://pdfs.semanticscholar.org/18c6/c92c39c8a5a2bb8b5673f339d3c26b8dcaae.pdf,,,http://cbcl.mit.edu/projects/cbcl/publications/ps/liao_leibo_poggio_NIPS-2013.pdf
+18bfda16116e76c2b21eb2b54494506cbb25e243,,,https://doi.org/10.1109/TIFS.2010.2051544,
+1885acea0d24e7b953485f78ec57b2f04e946eaf,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w36/Xiong_Combining_Local_and_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.297
+18a013e1c72cf579d1b215f22d298521047e98a4,,,,
+18d3532298fb7b8fb418453107f786178ca82e4a,,,https://doi.org/10.1109/TIFS.2017.2668221,
+184750382fe9b722e78d22a543e852a6290b3f70,http://pdfs.semanticscholar.org/1847/50382fe9b722e78d22a543e852a6290b3f70.pdf,,https://doi.org/10.1016/j.patcog.2003.09.006,https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/pr04.pdf
+18a849b1f336e3c3b7c0ee311c9ccde582d7214f,http://pdfs.semanticscholar.org/18a8/49b1f336e3c3b7c0ee311c9ccde582d7214f.pdf,,,http://web.mit.edu/vondrick/vatic/ijcv.pdf
+18cd79f3c93b74d856bff6da92bfc87be1109f80,http://pdfs.semanticscholar.org/18cd/79f3c93b74d856bff6da92bfc87be1109f80.pdf,,,http://www.e-ijaet.org/media/41I8-IJAET0805954-AN-APPLICATION-TO-HUMAN.pdf
+184dba921b932143d196c833310dee6884fa4a0a,,,https://doi.org/10.1109/SIU.2017.7960393,
+18dd3867d68187519097c84b7be1da71771d01a3,,,,http://doi.acm.org/10.1145/2448556.2448563
+184fc019bbec7f07bd9e34406f95f07faf7ed96f,,,,
+182470fd0c18d0c5979dff75d089f1da176ceeeb,https://repositori.upf.edu/bitstream/handle/10230/27207/dominguez_MARMI16_mult.pdf?isAllowed=y&sequence=1,,,http://doi.acm.org/10.1145/2927006.2927008
+1862cb5728990f189fa91c67028f6d77b5ac94f6,http://lvdmaaten.github.io/publications/papers/CVPR_2014.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.165
+18145b0b13aa477eeabef9ceec4299b60e87c563,,,https://doi.org/10.1007/s11042-011-0834-x,
+1862bfca2f105fddfc79941c90baea7db45b8b16,http://vision.cs.utexas.edu/projects/rationales/rationales.pdf,,,http://jeffdonahue.com/papers/RationalesICCV2011.pdf
+1886b6d9c303135c5fbdc33e5f401e7fc4da6da4,https://arxiv.org/pdf/1610.01119v1.pdf,,https://doi.org/10.1109/TIP.2017.2675339,http://arxiv.org/abs/1610.01119
+18b344b5394988544c386783e7bb8e73e0466e0e,,,,
+1888bf50fd140767352158c0ad5748b501563833,http://pdfs.semanticscholar.org/1888/bf50fd140767352158c0ad5748b501563833.pdf,,,
+187d4d9ba8e10245a34f72be96dd9d0fb393b1aa,http://pdfs.semanticscholar.org/187d/4d9ba8e10245a34f72be96dd9d0fb393b1aa.pdf,,https://doi.org/10.5244/C.23.125,http://www.bmva.org/bmvc/2009/Papers/Paper164/Paper164.pdf
+182f3aa4b02248ff9c0f9816432a56d3c8880706,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Quan_Sparse_Coding_for_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Quan_Sparse_Coding_for_CVPR_2016_paper.pdf
+189a839c708f95772ccaad72bfb4d0321d1535d6,,,,
+1828b1b0f5395b163fef087a72df0605249300c2,http://pdfs.semanticscholar.org/8b18/66a150521bfa18c3e6ec633e1acc79683749.pdf,,https://doi.org/10.5244/C.24.55,http://www.bmva.org/bmvc/2010/conference/paper55/abstract55.pdf
+1821510693f5bed360c81706c97330d2fa7d1290,,,,
+187f3ee3bc50a1f2471edc80d707e4fa1cac5b0b,,,https://doi.org/10.1109/LSP.2015.2437883,
+1831800ef8b1f262c92209f1ee16567105da35d6,,,https://doi.org/10.1016/j.sigpro.2014.01.010,
+185360fe1d024a3313042805ee201a75eac50131,http://cvit.iiit.ac.in/papers/deidentTCSVT2k11.pdf,,https://doi.org/10.1007/978-3-642-12297-2_26,http://cvit.iiit.ac.in/papers/Prachi09Person.pdf
+1824b1ccace464ba275ccc86619feaa89018c0ad,http://www.csc.kth.se/~vahidk/papers/KazemiCVPR14.pdf,,,http://www.nada.kth.se/~sullivan/Papers/Kazemi_cvpr14.pdf
+1890470d07a090e7b762091c7b9670b5c2e1c348,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.20
+18dfc2434a95f149a6cbb583cca69a98c9de9887,http://pdfs.semanticscholar.org/18df/c2434a95f149a6cbb583cca69a98c9de9887.pdf,,,http://www.bmva.org/bmvc/2014/files/abstract039.pdf
+181708b09bde7f4904f8fd92b3668d76e7aff527,http://mplab.ucsd.edu/~ksikka/emotiw14.pdf,,,http://doi.acm.org/10.1145/2663204.2666275
+271e2856e332634eccc5e80ba6fa9bbccf61f1be,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/WorkShops/data/papers/176.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2008.4563125
+27846b464369095f4909f093d11ed481277c8bba,http://pdfs.semanticscholar.org/2784/6b464369095f4909f093d11ed481277c8bba.pdf,,,http://file.scirp.org/pdf/JSIP_2017051915464852.pdf
+27eb7a6e1fb6b42516041def6fe64bd028b7614d,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zafeiriou_Joint_Unsupervised_Deformable_CVPR_2016_paper.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/zafeiriou2016joint.pdf
+2717998d89d34f45a1cca8b663b26d8bf10608a9,http://wangzheallen.github.io/papers/ZhangWWQW_CVPR16.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Real-Time_Action_Recognition_CVPR_2016_paper.pdf
+27c66b87e0fbb39f68ddb783d11b5b7e807c76e8,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w14/papers/Rodriguez_Fast_Simplex-HMM_for_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.166
+27a0a7837f9114143717fc63294a6500565294c2,http://pdfs.semanticscholar.org/27a0/a7837f9114143717fc63294a6500565294c2.pdf,,,https://hal.inria.fr/inria-00326730/document
+27aadf6e7441bf40675874df1cf4bb7e2dffdd9e,http://www1.icsi.berkeley.edu/~farrell/birdlets/iccv11-camera-ready.pdf,,,http://www.umiacs.umd.edu/~morariu/publications/FarrellBirdletsICCV11.pdf
+27d709f7b67204e1e5e05fe2cfac629afa21699d,http://pdfs.semanticscholar.org/2b88/db4294f11b0516a537b8720fcf416be80dbf.pdf,,,http://www.cs.utexas.edu/~grauman/papers/latent-look-iccv2017.pdf
+27c9ddb72360f4cd0f715cd7ea82fa399af91f11,http://pdfs.semanticscholar.org/27c9/ddb72360f4cd0f715cd7ea82fa399af91f11.pdf,,https://doi.org/10.1016/j.imavis.2004.09.002,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Multiresolution_Face.pdf
+271df16f789bd2122f0268c3e2fa46bc0cb5f195,http://users.eecs.northwestern.edu/~mya671/mypapers/CVPR11_Yuan_Yang_Wu.pdf,,,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2011/Mining%20Discriminative%20Co-occurrence%20Patterns%20for%20Visual%20Recognition.pdf
+27e0684fa5b57715162ac6c58a6ea283c7db1719,,,https://doi.org/10.1109/ICARCV.2004.1468857,
+27218ff58c3f0e7d7779fba3bb465d746749ed7c,http://pdfs.semanticscholar.org/2721/8ff58c3f0e7d7779fba3bb465d746749ed7c.pdf,,,http://www.cs.utexas.edu/~grauman/research/theses/BS-LucyLiang.pdf
+276dbb667a66c23545534caa80be483222db7769,http://pdfs.semanticscholar.org/276d/bb667a66c23545534caa80be483222db7769.pdf,,,https://www.researchgate.net/profile/Steffen_Herbort/publication/225729523_An_introduction_to_image-based_3D_surface_reconstruction_and_a_survey_of_photometric_stereo_methods/links/00b7d53a1737cbc0cf000000.pdf
+27812db1d2f68611cc284d65d11818082e572008,,,https://doi.org/10.1109/MIPRO.2016.7522323,
+27c6cd568d0623d549439edc98f6b92528d39bfe,http://openaccess.thecvf.com/content_iccv_2015/papers/Hsu_Regressive_Tree_Structured_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Hsu_Regressive_Tree_Structured_ICCV_2015_paper.pdf
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,http://arxiv.org/pdf/1604.04334v1.pdf,,https://doi.org/10.1007/s11042-016-3428-9,https://arxiv.org/pdf/1604.04334v1.pdf
+27169761aeab311a428a9dd964c7e34950a62a6b,http://academicjournals.org/article/article1380818227_Mostayed%20et%20al.pdf,,,
+27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/NiinumaHanJain_MultiviewFaceRecognition_PoseRegularization_BTAS13.pdf,,https://doi.org/10.1109/BTAS.2013.6712735,http://www.cse.msu.edu/biometrics/Publications/Face/NiinumaHanJain_MultiviewFaceRecognition_PoseRegularization_BTAS13.pdf
+27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5,http://pdfs.semanticscholar.org/2717/3d0b9bb5ce3a75d05e4dbd8f063375f24bb5.pdf,,,http://www.ijera.com/papers/Vol4_issue10/Part%20-%203/G410034044.pdf
+27e5b7ae3506a0f7472ee9089cd2472442e71c14,,,https://doi.org/10.1007/s00521-015-1834-y,
+2784d9212dee2f8a660814f4b85ba564ec333720,http://people.cs.umass.edu/~elm/papers/cvpr2010_imagetrans.pdf,,https://doi.org/10.1109/CVPRW.2010.5543185,http://vis-www.cs.umass.edu/papers/cvpr2010_imagetrans.pdf
+275b3cb7c780c663eabbf4d6c6cbc8fe24287c70,https://www.researchgate.net/profile/Bisan_Alsalibi/publication/280839254_The_Impact_of_Bio-Inspired_Approaches_Toward_the_Advancement_of_Face_Recognition/links/55c8ce4608aeca747d67062e.pdf?origin=publication_list,,,http://doi.acm.org/10.1145/2791121
+278e1441a77fbeebb22c45932d76c557e5663197,http://sist.sysu.edu.cn/~zhwshi/research/preprintversion/two-stage%20nonnegative%20sparse%20representation%20for%20large-scale%20face%20recognition.pdf,,https://doi.org/10.1109/TNNLS.2012.2226471,http://www.cripac.ia.ac.cn/People/rhe/bare_jrnl_v5.pdf
+27cccf992f54966feb2ab4831fab628334c742d8,http://pdfs.semanticscholar.org/27cc/cf992f54966feb2ab4831fab628334c742d8.pdf,,,http://www.ijcaonline.org/archives/volume64/number18/10733-5573?format=pdf
+27883967d3dac734c207074eed966e83afccb8c3,http://www.ee.cuhk.edu.hk/~xgwang/papers/gaoGZHW.pdf,,https://doi.org/10.1109/TIP.2013.2262286,
+270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0,http://pdfs.semanticscholar.org/270e/5266a1f6e76954dedbc2caf6ff61a5fbf8d0.pdf,,,http://arxiv.org/abs/1703.01210
+27f8b01e628f20ebfcb58d14ea40573d351bbaad,http://pdfs.semanticscholar.org/27f8/b01e628f20ebfcb58d14ea40573d351bbaad.pdf,,,http://eprints-phd.biblio.unitn.it/2748/3/My_PhD_Thesis.pdf
+2742a61d32053761bcc14bd6c32365bfcdbefe35,http://pdfs.semanticscholar.org/ee39/96dc3f451f480134e1a468c32762d688c51b.pdf,,,http://jmlr.csail.mit.edu/papers/volume16/qiu15a/qiu15a.pdf
+27aa23d7a05368a6b5e3d95627f9bab34284e5c4,,,https://doi.org/10.1109/IJCNN.2012.6252705,
+2729e12ecb777a553e5ed0a1ac52dd37924e813d,,,,
+27dafedccd7b049e87efed72cabaa32ec00fdd45,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_074.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2A_074_ext.pdf
+27a586a435efdcecb151c275947fe5b5b21cf59b,,,https://doi.org/10.1007/s12559-017-9530-0,
+27a299b834a18e45d73e0bf784bbb5b304c197b3,http://ai.stanford.edu/~vigneshr/cvpr_13/cvpr13_social_roles.pdf,,,http://cs.stanford.edu/groups/vision/pdf/334.pdf
+2710e1c58476e1996466530af825de6376a92833,,,,
+279459cbbc5c6db4802e9c737cc72a612d76f7fc,,,https://doi.org/10.1109/SSCI.2017.8285296,
+272e487dfa32f241b622ac625f42eae783b7d9aa,,,https://doi.org/10.1109/ICSIPA.2015.7412207,
+27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba,http://pdfs.semanticscholar.org/27b1/670e1b91ab983b7b1ecfe9eb5e6ba951e0ba.pdf,,,https://pdfs.semanticscholar.org/27b1/670e1b91ab983b7b1ecfe9eb5e6ba951e0ba.pdf
+274f87ad659cd90382ef38f7c6fafc4fc7f0d74d,http://www.deepkernel.com/Papers/mm2014.pdf,,,http://doi.acm.org/10.1145/2647868.2654928
+27ee8482c376ef282d5eb2e673ab042f5ded99d7,http://sylvain.legallou.fr/Fichiers/p_ICARCV06_NewNormalization_LeGallou.pdf,,https://doi.org/10.1109/ICARCV.2006.345451,https://hal.archives-ouvertes.fr/hal-00143460/document
+4b28de1ebf6b6cb2479b9176fab50add6ed75b78,http://vision.ucsd.edu/sites/default/files/cvpr05a.pdf,,,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/cvpr05a_0.pdf
+4b4106614c1d553365bad75d7866bff0de6056ed,http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf,,https://doi.org/10.1007/978-3-319-27101-9_26,http://home.zcu.cz/~pkral/papers/kral_micai15.pdf
+4bb03b27bc625e53d8d444c0ba3ee235d2f17e86,http://www.cs.utexas.edu/~grauman/papers/hwang_cvpr2010.pdf,,,http://vision.cs.utexas.edu/projects/tag/0735.pdf
+4b9b30066a05bdeb0e05025402668499ebf99a6b,,,https://doi.org/10.1109/ISPACS.2012.6473448,
+4b89cf7197922ee9418ae93896586c990e0d2867,http://www.cs.cmu.edu/~ftorre/paper1.pdf,,,http://www.cs.sfu.ca/~mori/research/papers/wang_cluster_people_cvpr06.pdf
+4b8c736524d548472d0725c971ee29240ae683f6,,,,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.31
+4bc9a767d7e63c5b94614ebdc24a8775603b15c9,http://pdfs.semanticscholar.org/4bc9/a767d7e63c5b94614ebdc24a8775603b15c9.pdf,,,http://eprints-phd.biblio.unitn.it/1443/1/GZen_final_thesis.pdf
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,http://biometrics.cse.msu.edu/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf,,https://doi.org/10.1109/TIFS.2011.2156787,http://www.cse.msu.edu/biometrics/Publications/Face/LiParkJain_DiscriminativeModelAgeInvariantFR_TIFS11.pdf
+4b3f425274b0c2297d136f8833a31866db2f2aec,https://arxiv.org/pdf/1705.01567v2.pdf,,,http://www.vislab.ucr.edu/Biometrics2017/program_slides/TowardOpen-SetFaceRecognition.pdf
+4b7c110987c1d89109355b04f8597ce427a7cd72,http://pdfs.semanticscholar.org/4b7c/110987c1d89109355b04f8597ce427a7cd72.pdf,,,http://journal-cdn.frontiersin.org/article/108202/files/pubmed-zip/versions/1/pdf
+4bd088ba3f42aa1e43ae33b1988264465a643a1f,http://pdfs.semanticscholar.org/4bd0/88ba3f42aa1e43ae33b1988264465a643a1f.pdf,,,http://www.diva-portal.org/smash/get/diva2:239370/FULLTEXT01.pdf
+4bc4a7c4142e8b37389fddd1e2338298b8b56e96,,,,
+4bfce41cc72be315770861a15e467aa027d91641,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Branson_Active_Annotation_Translation_2014_CVPR_paper.pdf,,,http://www.vision.caltech.edu/~sbranson/files/cvpr14_annotation_translator.pdf
+4bd3de97b256b96556d19a5db71dda519934fd53,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wen_Latent_Factor_Guided_CVPR_2016_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wen_Latent_Factor_Guided_CVPR_2016_paper.pdf
+4b04247c7f22410681b6aab053d9655cf7f3f888,http://pdfs.semanticscholar.org/60e5/0494dc26bd30e3c49b93ca85d0f79bf5c53f.pdf,,,https://arxiv.org/pdf/1501.04717v1.pdf
+4b7f21b48c7e0dc7334e36108f558d54642c17c0,,,https://doi.org/10.1109/WACV.2017.106,
+4b6387e608afa83ac8d855de2c9b0ae3b86f31cc,http://www.researchgate.net/profile/Heng_Yang3/publication/263813517_Face_Sketch_Landmarks_Localization_in_the_Wild/links/53d3dd3b0cf220632f3ce8b3.pdf,,https://doi.org/10.1109/LSP.2014.2333544,
+4b60e45b6803e2e155f25a2270a28be9f8bec130,http://www.cs.washington.edu/ai/Mobile_Robotics/postscripts/attribute-objects-icra-2013.pdf,,https://doi.org/10.1109/ICRA.2013.6630858,http://homes.cs.washington.edu/~lfb/paper/icra13.pdf
+4ba2f445fcbbad464f107b036c57aa807ac5c0c2,,,https://doi.org/10.1109/TCSVT.2014.2367357,
+4b5eeea5dd8bd69331bd4bd4c66098b125888dea,http://pdfs.semanticscholar.org/4b5e/eea5dd8bd69331bd4bd4c66098b125888dea.pdf,,,http://www.cs.uoi.gr/tech_reports//publications/PD-2016-1.pdf
+4bbbee93519a4254736167b31be69ee1e537f942,https://arxiv.org/pdf/1611.05125v2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.16
+4b74f2d56cd0dda6f459319fec29559291c61bff,http://pdfs.semanticscholar.org/96d1/e2686725f69b38b510a75b716caf3a48b3e2.pdf,,https://doi.org/10.5244/C.26.101,http://www.bmva.org/bmvc/2012/BMVC/paper101/paper101.pdf
+4ba38262fe20fab3e4c80215147b498f83843b93,http://pdfs.semanticscholar.org/f2af/967e28c12de9d957c08ffbc7a982e4ccea1e.pdf,,https://doi.org/10.5244/C.23.39,http://www.bmva.org/bmvc/2009/Papers/Paper146/Abstract146.pdf
+4b94f531c203743a9f7f1e9dd009cdbee22ea197,,,https://doi.org/10.1109/ICSMC.2005.1571393,
+4b3eaedac75ac419c2609e131ea9377ba8c3d4b8,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_tzimiro_pantic_icip_2014.pdf,,https://doi.org/10.1109/ICIP.2014.7025284,https://ibug.doc.ic.ac.uk/media/uploads/documents/kossaifi_tzimiro_pantic_icip_2014.pdf
+4b507a161af8a7dd41e909798b9230f4ac779315,http://pdfs.semanticscholar.org/5202/4d271f516c7d0dfa73009bf7537549ef74f7.pdf,,,http://www1.cs.columbia.edu/~belhumeur/conference/multiplex-iccv03.pdf
+4b02387c2db968a70b69d98da3c443f139099e91,http://pdfs.semanticscholar.org/4b02/387c2db968a70b69d98da3c443f139099e91.pdf,,,http://arxiv.org/abs/1609.06441
+4b6be933057d939ddfa665501568ec4704fabb39,http://pdfs.semanticscholar.org/59c4/c6ba21354675401a173eb6c70500b99571cd.pdf,,https://doi.org/10.1162/NECO_a_00233,http://www.dsi.unive.it/~pelillo/papers/Neural%20Computation%202012.pdf
+4b71d1ff7e589b94e0f97271c052699157e6dc4a,http://pdfs.semanticscholar.org/4b71/d1ff7e589b94e0f97271c052699157e6dc4a.pdf,,https://doi.org/10.1155/2008/748483,http://asp.eurasipjournals.com/content/pdf/1687-6180-2008-748483.pdf
+4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2,https://arxiv.org/pdf/1705.07750v3.pdf,,,https://arxiv.org/pdf/1705.07750v2.pdf
+4b4ecc1cb7f048235605975ab37bb694d69f63e5,http://pdfs.semanticscholar.org/4b4e/cc1cb7f048235605975ab37bb694d69f63e5.pdf,,https://doi.org/10.1007/978-3-319-49409-8_36,http://arxiv.org/abs/1706.07524
+4be03fd3a76b07125cd39777a6875ee59d9889bd,http://homes.esat.kuleuven.be/~tuytelaa/Tuytelaars-BeyondConceptSearch-WIAMIS12.pdf,,https://doi.org/10.1109/WIAMIS.2012.6226770,
+4be774af78f5bf55f7b7f654f9042b6e288b64bd,http://pdfs.semanticscholar.org/4be7/74af78f5bf55f7b7f654f9042b6e288b64bd.pdf,,,https://arxiv.org/pdf/1603.01801v1.pdf
+4b321065f6a45e55cb7f9d7b1055e8ac04713b41,http://pdfs.semanticscholar.org/4b32/1065f6a45e55cb7f9d7b1055e8ac04713b41.pdf,,,http://www.researchgate.net/profile/Ricardo_Duarte6/publication/232708766_Affective_Computing_Models_for_Character_Animation/links/0fcfd508d0ec5a4a62000000.pdf
+4b605e6a9362485bfe69950432fa1f896e7d19bf,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w4/papers/Blanton_A_Comparison_of_CVPR_2016_paper.pdf
+4b3dd18882ff2738aa867b60febd2b35ab34dffc,http://pdfs.semanticscholar.org/4b3d/d18882ff2738aa867b60febd2b35ab34dffc.pdf,,,http://www.cl.cam.ac.uk/~re227/publications/facialfeatureanalysis-AIAC2002.pdf
+4b9c47856f8314ecbe4d0efc65278c2ededb2738,,,https://doi.org/10.1109/LSP.2012.2188890,
+11a2ef92b6238055cf3f6dcac0ff49b7b803aee3,http://cs.adelaide.edu.au/~carneiro/publications/mainSPL.pdf,,https://doi.org/10.1109/ICIP.2015.7351701,
+11dc744736a30a189f88fa81be589be0b865c9fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Liang_A_Unified_Multiplicative_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.288
+1176a74fb9351ac2de81c198c4861d78e58f172d,,,https://doi.org/10.1016/j.patrec.2011.03.023,
+1171e8a96ffb15fdb265aaba02be014a38137ad5,http://hal.cse.msu.edu/pdfs/papers/pdm-tifs-2015.pdf,,https://doi.org/10.1109/TIFS.2015.2434271,
+11a210835b87ccb4989e9ba31e7559bb7a9fd292,http://profdoc.um.ac.ir/articles/a/1020638.pdf,,https://doi.org/10.1109/ISDA.2010.5687029,
+11ba01ce7d606bab5c2d7e998c6d94325521b8a0,,,https://doi.org/10.1109/ICIP.2015.7350911,
+118ca3b2e7c08094e2a50137b1548ada7935e505,http://pdfs.semanticscholar.org/dc5c/273198b16dc615888256da74758f4a4b128b.pdf,,,http://arxiv.org/abs/1802.08936
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,http://pdfs.semanticscholar.org/11aa/527c01e61ec3a7a67eef8d7ffe9d9ce63f1d.pdf,,,http://www.its.caltech.edu/~whong/PDF/2015_Hong_PNAS.pdf
+11ddf5e47854e4e6109762835d2ce086bbdfbc5b,http://eprints.pascal-network.org/archive/00008322/01/schroff11.pdf,,,http://research.microsoft.com/users/antcrim/papers/Criminisi_iccv2007.pdf
+110919f803740912e02bb7e1424373d325f558a9,,,,http://doi.acm.org/10.1145/3123266.3123421
+11ad162b3165b4353df8d7b4153fb26d6a310d11,,,,
+113c22eed8383c74fe6b218743395532e2897e71,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Sapp_MODEC_Multimodal_Decomposable_2013_CVPR_paper.pdf,,,http://homes.cs.washington.edu/~taskar/pubs/modec_cvpr13.pdf
+110c55b440b7c6a1692da9d8ee52389e43f6e76e,http://cs.brown.edu/people/ls/Publications/wacv2015dai_supplement.pdf,,,http://web.engr.illinois.edu/~dhoiem/publications/dai_disney_wacv2015.pdf
+11408af8861fb0a977412e58c1a23d61b8df458c,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2014/0265.pdf,,,
+11cc0774365b0cc0d3fa1313bef3d32c345507b1,http://pdfs.semanticscholar.org/11cc/0774365b0cc0d3fa1313bef3d32c345507b1.pdf,,https://doi.org/10.5244/C.19.24,http://www.bmva.org/bmvc/2005/papers/143/paper.pdf
+11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Caseiro_Rolling_Riemannian_Manifolds_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.13
+11e6cf1cbb33d67a3e3c87dcaf7031d6654bc0de,,,,http://doi.acm.org/10.1145/2522968.2522978
+113cd9e5a4081ce5a0585107951a0d36456ce7a8,,,https://doi.org/10.1109/ICSMC.2006.384939,
+11269e98f072095ff94676d3dad34658f4876e0e,http://www.me.cs.scitec.kobe-u.ac.jp/~takigu/pdf/2015/ACII2015_submission_70.pdf,,,http://www.infomus.org/Events/proceedings/ACII2015/papers/Main_Conference/M2_Poster/Poster_Teaser_5/ACII2015_submission_70.pdf
+1176c886afbd8685ecf0094450a02eb96b950f71,http://pdfs.semanticscholar.org/1176/c886afbd8685ecf0094450a02eb96b950f71.pdf,,https://doi.org/10.1016/j.neucom.2016.05.097,http://www.yugangjiang.info/publication/Neurocomputing_BayesianHashing.pdf
+113e5678ed8c0af2b100245057976baf82fcb907,http://www.humansensing.cs.cmu.edu/sites/default/files/4Jeni_Metrics.pdf,,,http://humansensing.cs.cmu.edu/sites/default/files/4Jeni_Metrics.pdf
+11c2d40fc63ecd88febadd8a9cac9521a6b7de66,,,https://doi.org/10.1109/ICSIPA.2011.6144081,
+11bda1f054effb3116115b0699d74abec3e93a4b,,,,
+11c04c4f0c234a72f94222efede9b38ba6b2306c,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ACMMM08-action-recog.pdf,,,http://users.eecs.northwestern.edu/~zli/new_home/MyPublications/conf/ACMMM08-action-recog.pdf
+11ff2f54ecfda6c7f90ed84baf1cc5b4f07e726b,,,,
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/08/icmr038-liA.pdf,,,http://doi.acm.org/10.1145/2911996.2912001
+1195f0bf8f745ba69da915203bcd79589b94aec5,,,https://doi.org/10.1016/j.procs.2010.11.004,
+1149c6ac37ae2310fe6be1feb6e7e18336552d95,http://pdfs.semanticscholar.org/1149/c6ac37ae2310fe6be1feb6e7e18336552d95.pdf,,https://doi.org/10.1007/11550822_89,http://www.researchgate.net/profile/Horst-Michael_Gross/publication/225173459_Classification_of_Face_Images_for_Gender_Age_Facial_Expression_and_Identity/links/0912f50a5227991f45000000.pdf
+11f17191bf74c80ad0b16b9f404df6d03f7c8814,http://pdfs.semanticscholar.org/11f5/c82e3a39b9c8b91370ef7286a748c19b658a.pdf,,,https://arxiv.org/pdf/1602.01921v1.pdf
+11367581c308f4ba6a32aac1b4a7cdb32cd63137,https://pdfs.semanticscholar.org/82c3/367ca6fc95e705aa8f2270265d82e9d8eedd.pdf,,,http://mplab.ucsd.edu/wordpress/wp-content/uploads/CVPR2008/WorkShops/data/papers/100.pdf
+11a47a91471f40af5cf00449954474fd6e9f7694,http://pdfs.semanticscholar.org/11a4/7a91471f40af5cf00449954474fd6e9f7694.pdf,,https://doi.org/10.3390/info7040061,http://www.mdpi.com/2078-2489/7/4/61/pdf
+1181f1146db7170b09f28f7cc51c42c63547d84b,,,,
+11fdff97f4511ae3d3691cfdeec5a19fa04db6ef,http://mclab.eic.hust.edu.cn/UpLoadFiles/Papers/SCA_TIP2016.pdf,,https://doi.org/10.1109/TIP.2016.2514498,
+1198572784788a6d2c44c149886d4e42858d49e4,http://pdfs.semanticscholar.org/1198/572784788a6d2c44c149886d4e42858d49e4.pdf,,,https://arxiv.org/pdf/1607.01354v1.pdf
+11f8d0a54e55c5e6537eef431cd548fa292ef90b,,,https://doi.org/10.1016/j.neucom.2017.05.042,
+110359824a0e3b6480102b108372793265a24a86,,,https://doi.org/10.1016/j.image.2016.03.011,
+1125760c14ea6182b85a09bf3f5bad1bdad43ef5,,,https://doi.org/10.1109/CVPR.2004.286,
+11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8,http://elderlab.apps01.yorku.ca/wp-content/uploads/2016/12/PrincePAMI08.pdf,,,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/PAMI_FaceRecPose.pdf
+1134a6be0f469ff2c8caab266bbdacf482f32179,http://pdfs.semanticscholar.org/1134/a6be0f469ff2c8caab266bbdacf482f32179.pdf,,,http://esatjournals.net/ijret/2015v04/i12/IJRET20150412017.pdf
+11b3877df0213271676fa8aa347046fd4b1a99ad,http://pdfs.semanticscholar.org/11b3/877df0213271676fa8aa347046fd4b1a99ad.pdf,,https://doi.org/10.1007/978-3-540-76390-1_48,https://filebox.ece.vt.edu/~parikh/Publications/ParikhChen_ACCV_2007_dISCOVER.pdf
+116d57b4e5dda41d72e497517f65159e6f12c517,,,,
+11b904c9180686574e6047bbd9868c354ca46cb4,,,,
+1130c38e88108cf68b92ecc61a9fc5aeee8557c9,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_058.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Feichtenhofer_Dynamically_Encoded_Actions_2015_CVPR_paper.pdf
+11b89011298e193d9e6a1d99302221c1d8645bda,http://openaccess.thecvf.com/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf,,https://doi.org/10.1109/ICCV.2015.484,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Gao_Structured_Feature_Selection_ICCV_2015_paper.pdf
+11a6593e6e35f95ebeb5233897d1d8bcad6f9c87,,,https://doi.org/10.1007/s11063-017-9615-5,
+11d73f4f19077e6806d05dc7ecd17fbeb15bdf39,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.28
+1135a818b756b057104e45d976546970ba84e612,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.118
+111a9645ad0108ad472b2f3b243ed3d942e7ff16,http://pdfs.semanticscholar.org/111a/9645ad0108ad472b2f3b243ed3d942e7ff16.pdf,,,http://www.lvc.ele.puc-rio.br/users/raul_feitosa/publications/2001/Facial%20Expression%20Classification%20Using.pdf
+1177977134f6663fff0137f11b81be9c64c1f424,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_003.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_003_ext.pdf
+11fdd940c9a23a34f7ab59809c26a02bce35c5f3,,,,
+11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0,http://pdfs.semanticscholar.org/11ac/88aebe0230e743c7ea2c2a76b5d4acbfecd0.pdf,,https://doi.org/10.1007/978-3-319-64698-5_32,https://bib.irb.hr/datoteka/891183.MARCETIC_HCM.pdf
+117f164f416ea68e8b88a3005e55a39dbdf32ce4,http://www.cs.toronto.edu/~fidler/papers/fashionCVPR15.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_095.pdf
+7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4,http://pdfs.semanticscholar.org/7dda/2eb0054eb1aeda576ed2b27a84ddf09b07d4.pdf,,,https://www.researchgate.net/profile/Keun-Chang_Kwak/publication/229011764_Face_Recognition_and_Representation_by_Tensor-based_MPCA_Approach/links/09e41509c4e166d6a3000000.pdf
+7d94fd5b0ca25dd23b2e36a2efee93244648a27b,http://pdfs.semanticscholar.org/7d94/fd5b0ca25dd23b2e36a2efee93244648a27b.pdf,,,http://arxiv.org/pdf/1608.06434v1.pdf
+7d81b804e23ee2bd04c1def6201b91be6de0d88a,,,,
+7d8c2d29deb80ceed3c8568100376195ce0914cb,https://arxiv.org/pdf/1708.01988v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.209
+7d8798e7430dcc68fcdbd93053c884fc44978906,,,,http://doi.acm.org/10.1145/2506364.2506369
+7d50df03d0c8a26eaaeaef47de68691f9ac73701,http://media-lab.engr.ccny.cuny.edu/Paper/2011/HCBA11.pdf,,https://doi.org/10.1109/CVPRW.2011.5981880,https://pdfs.semanticscholar.org/90fe/65a5075ed16b4e76f7b3b87cd3a59b5c145b.pdf
+7d306512b545df98243f87cb8173df83b4672b18,http://pdfs.semanticscholar.org/7d30/6512b545df98243f87cb8173df83b4672b18.pdf,,https://doi.org/10.1007/978-3-319-10705-9_45,http://www.cs.colostate.edu/~draper/papers/marrinan_enumath14.pdf
+7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b,http://pdfs.semanticscholar.org/7d98/dcd15e28bcc57c9c59b7401fa4a5fdaa632b.pdf,,,http://www.hds.utc.fr/~fdavoine/mypublications/wiamis04_3.pdf
+7df277c37ac75851684f926fd3fb4daced3e79f8,,,,
+7da9464dbae52c8bda13461a4f44420c333b0342,,,,
+7d41b67a641426cb8c0f659f0ba74cdb60e7159a,http://eprints.soton.ac.uk/389641/1/isba-16-camera.pdf,,https://doi.org/10.1109/ISBA.2016.7477240,https://eprints.soton.ac.uk/389641/1/isba-16-camera.pdf
+7d1688ce0b48096e05a66ead80e9270260cb8082,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w44/Saxen_Real_vs._Fake_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.363
+7d61b70d922d20c52a4e629b09465076af71ddfd,,,https://doi.org/10.1007/s10044-011-0258-2,
+7d53678ef6009a68009d62cd07c020706a2deac3,http://pdfs.semanticscholar.org/7d53/678ef6009a68009d62cd07c020706a2deac3.pdf,,https://doi.org/10.1007/978-3-540-71457-6_38,https://www.researchgate.net/profile/Whoi-Yul_Kim/publication/221055022_Facial_Feature_Point_Extraction_Using_the_Adaptive_Mean_Shape_in_Active_Shape_Model/links/00b7d517955c225f42000000.pdf
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,http://pdfs.semanticscholar.org/7d7b/e6172fc2884e1da22d1e96d5899a29831ad2.pdf,,,https://arxiv.org/pdf/1703.01605v1.pdf
+7dcd3f58aa75f7ae96fdac9b1c2332a4f0b2dbd3,https://www.researchgate.net/profile/Symeon_Nikitidis/publication/221122322_Facial_expression_recognition_using_clustering_discriminant_Non-negative_Matrix_Factorization/links/54fee98e0cf2eaf210b4506c.pdf,,https://doi.org/10.1109/ICIP.2011.6116294,https://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_icip.pdf
+7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22,http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf,,,https://people.cs.umass.edu/~elm/papers/LFW_survey.pdf
+7d73adcee255469aadc5e926066f71c93f51a1a5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001283.pdf,,https://doi.org/10.1109/ICASSP.2016.7471883,
+7d9fe410f24142d2057695ee1d6015fb1d347d4a,http://pdfs.semanticscholar.org/7d9f/e410f24142d2057695ee1d6015fb1d347d4a.pdf,,,http://ojs.academypublisher.com/index.php/jsw/article/download/jsw081127902795/8004
+7dd578878e84337d6d0f5eb593f22cabeacbb94c,http://pdfs.semanticscholar.org/7dd5/78878e84337d6d0f5eb593f22cabeacbb94c.pdf,,,http://www.cs.cmu.edu/~harini/journalTRC.pdf
+7d7870b7633678db2d39d4a5d69d10337ca827d9,,,,
+7d7b036ed01765c9473d695f029142128d442aaa,,,https://doi.org/10.1109/TIP.2018.2791180,
+7dffe7498c67e9451db2d04bb8408f376ae86992,http://pdfs.semanticscholar.org/7dff/e7498c67e9451db2d04bb8408f376ae86992.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2039%20(Supplementary).pdf
+7dc498d45f9fcb97acee552c6f587b65d5122c35,,,https://doi.org/10.1109/ICIP.2015.7351618,
+7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2,http://maths.dur.ac.uk/users/kasper.peeters/pdf/face_recognition/PCA/Togneri2010LinearRegressionFaceRecognition.pdf,,,http://staffhome.ecm.uwa.edu.au/~00014742/research/papers/IEEETPAMI_32_2010.pdf
+7de386bf2a1b2436c836c0cc1f1f23fccb24aad6,http://pdfs.semanticscholar.org/7de3/86bf2a1b2436c836c0cc1f1f23fccb24aad6.pdf,,,http://www.cts.umn.edu/Publications/ResearchReports/pdfdownload.pl?id=802
+7de8a8b437ec7a18e395be9bf7c8f2d502025cc6,,,https://doi.org/10.1109/SIU.2017.7960528,
+29ce6b54a87432dc8371f3761a9568eb3c5593b0,https://kar.kent.ac.uk/43222/1/Yatie_EST2013_vfinal.pdf,,,http://doi.ieeecomputersociety.org/10.1109/EST.2013.8
+2914e8c62f0432f598251fae060447f98141e935,http://pdfs.semanticscholar.org/2914/e8c62f0432f598251fae060447f98141e935.pdf,,,http://digitalcommons.unl.edu/cgi/viewcontent.cgi?article=1122&context=computerscidiss
+292eba47ef77495d2613373642b8372d03f7062b,http://pdfs.semanticscholar.org/292e/ba47ef77495d2613373642b8372d03f7062b.pdf,,,http://arxiv.org/pdf/1506.04340v1.pdf
+29e96ec163cb12cd5bd33bdf3d32181c136abaf9,http://pdfs.semanticscholar.org/29e9/6ec163cb12cd5bd33bdf3d32181c136abaf9.pdf,,,http://www.cad.zju.edu.cn/home/dengcai/Publication/TR/UIUCDCS-R-2006-2748.pdf
+29e793271370c1f9f5ac03d7b1e70d1efa10577c,http://pdfs.semanticscholar.org/29e7/93271370c1f9f5ac03d7b1e70d1efa10577c.pdf,,,http://www.sersc.org/journals/IJSIP/vol6_no5/37.pdf
+298c2be98370de8af538c06c957ce35d00e93af8,,,https://doi.org/10.1109/IPTA.2016.7820988,
+29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea,http://www.umiacs.umd.edu/~nshroff/DomainAdapt.pdf,,,http://doi.acm.org/10.1145/2683483.2683499
+29c7dfbbba7a74e9aafb6a6919629b0a7f576530,http://pdfs.semanticscholar.org/29c7/dfbbba7a74e9aafb6a6919629b0a7f576530.pdf,,,http://cbcl.mit.edu/publications/theses/thesis-masters-fischer-robert.pdf
+292c6b743ff50757b8230395c4a001f210283a34,https://labicvl.github.io/docs/pubs/Oscar_VISAPP_2014.pdf,,https://doi.org/10.5220/0004695104780485,http://www.iis.ee.ic.ac.uk/icvl/doc/VISAPP2014.pdf
+29fc4de6b680733e9447240b42db13d5832e408f,http://pdfs.semanticscholar.org/29fc/4de6b680733e9447240b42db13d5832e408f.pdf,,,http://www.sersc.org/journals/IJMUE/vol10_no3_2015/4.pdf
+29c1f733a80c1e07acfdd228b7bcfb136c1dff98,http://pdfs.semanticscholar.org/29c1/f733a80c1e07acfdd228b7bcfb136c1dff98.pdf,,,https://arxiv.org/pdf/1608.02318v2.pdf
+29f0a868644462aa7ebc21f4510d4209932a1b8c,http://yamdrok.stanford.edu/crowd/icmr.pdf,,,http://doi.acm.org/10.1145/2578726.2578775
+29322b9a3744afaa5fc986b805d9edb6ff5ea9fe,,,https://doi.org/10.1109/TNNLS.2011.2178037,
+29f27448e8dd843e1c4d2a78e01caeaea3f46a2d,http://pdfs.semanticscholar.org/29f2/7448e8dd843e1c4d2a78e01caeaea3f46a2d.pdf,,https://doi.org/10.1016/j.patcog.2014.10.012,https://www.researchgate.net/profile/Ngo_Thanh_Trung/publication/270053105_Similar_gait_action_recognition_using_an_inertial_sensor/links/550f6a8b0cf2752610a03834.pdf?origin=publication_list
+294d1fa4e1315e1cf7cc50be2370d24cc6363a41,http://pdfs.semanticscholar.org/294d/1fa4e1315e1cf7cc50be2370d24cc6363a41.pdf,,,https://www.researchgate.net/profile/Ivan_Bajla/publication/238588191_A_modular_non-negative_matrix_factorization_for_parts-based_object_recognition_using_subspace_representation/links/5444f96a0cf2e6f0c0fbfdd4.pdf
+29d414bfde0dfb1478b2bdf67617597dd2d57fc6,http://pdfs.semanticscholar.org/29d4/14bfde0dfb1478b2bdf67617597dd2d57fc6.pdf,,,http://www.ece.uvic.ca/~wslu/Publications/Lu-Journal/10-2J.pdf
+2912c3ea67678a1052d7d5cbe734a6ad90fc360e,http://pdfs.semanticscholar.org/2912/c3ea67678a1052d7d5cbe734a6ad90fc360e.pdf,,,http://staff.science.uva.nl/~rvalenti/publications/ASCI07.pdf
+2945cc9e821ab87fa17afc8802f3858435d1264c,,,https://doi.org/10.1109/ICPR.2016.7899839,
+29f4ac49fbd6ddc82b1bb697820100f50fa98ab6,http://dhoiem.cs.illinois.edu/publications/acvhl2010_annotation_ian.pdf,,https://doi.org/10.1109/CVPRW.2010.5543183,http://www.cs.cmu.edu/~dhoiem/publications/acvhl2010_annotation_ian.pdf
+2910fcd11fafee3f9339387929221f4fc1160973,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Becker_Evaluating_Open-Universe_Face_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.133
+29659b6fc4dceb117cec687d8accda5f514080ed,,,,
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mm31.pdf,,,http://doi.acm.org/10.1145/1101149.1101155
+2960500033eb31777ed1af1fcb133dcab1b4a857,,,,http://doi.acm.org/10.1145/3005467.3005471
+290136947fd44879d914085ee51d8a4f433765fa,http://www.cse.msu.edu/biometrics/Publications/Face/KlareJain_TaxonomyFacialFeatures_BTAS10.pdf,,https://doi.org/10.1109/BTAS.2010.5634533,http://biometrics.cse.msu.edu/Publications/Face/KlareJain_TaxonomyFacialFeatures_BTAS10.pdf
+291f527598c589fb0519f890f1beb2749082ddfd,http://pdfs.semanticscholar.org/3215/ceb94227451a958bcf6b1205c710d17e53f5.pdf,,https://doi.org/10.1007/978-3-642-15555-0_13,http://courses.cs.washington.edu/courses/cse590v/11au/cse590v_07_faces_social_context.pdf
+291265db88023e92bb8c8e6390438e5da148e8f5,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf,,https://doi.org/10.1007/978-3-319-46487-9_6,http://arxiv.org/pdf/1607.08221v1.pdf
+297d3df0cf84d24f7efea44f87c090c7d9be4bed,http://pdfs.semanticscholar.org/297d/3df0cf84d24f7efea44f87c090c7d9be4bed.pdf,,https://doi.org/10.1007/3-540-45783-6_68,http://www.ri.cmu.edu/pub_files/pub3/krueger_volker_2002_1/krueger_volker_2002_1.pdf
+29b86534d4b334b670914038c801987e18eb5532,http://www.cs.toronto.edu/~makarand/papers/ICVGIP2014.pdf,,,https://cvhci.anthropomatik.kit.edu/~mtapaswi/presentations/2014_12_ICVGIP.pdf
+29631ca6cff21c9199c70bcdbbcd5f812d331a96,http://pdfs.semanticscholar.org/2963/1ca6cff21c9199c70bcdbbcd5f812d331a96.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/59/73/pone.0139827.PMC4605725.pdf
+2965d092ed72822432c547830fa557794ae7e27b,http://pdfs.semanticscholar.org/f038/9424ab8c27e01843931fcbef7e3ca997e891.pdf,,,http://conradsanderson.id.au/pdfs/sanin_phd_thesis.pdf
+291ce7be8daa99848bf13c32b237ad823d5738e9,,,,
+2983efadb1f2980ab5ef20175f488f77b6f059d7,http://pdfs.semanticscholar.org/2983/efadb1f2980ab5ef20175f488f77b6f059d7.pdf,,,http://www.researchgate.net/profile/Luis_Encarnacao2/publication/216183331_Faces_of_emotion_in_human-computer_interaction/links/0a85e53c6a0d27707b000000.pdf
+2911e7f0fb6803851b0eddf8067a6fc06e8eadd6,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Jung_Joint_Fine-Tuning_in_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.341
+29f298dd5f806c99951cb434834bc8dcc765df18,,,https://doi.org/10.1109/ICPR.2016.7899837,
+293d69d042fe9bc4fea256c61915978ddaf7cc92,,,https://doi.org/10.1007/978-981-10-7302-1_6,
+29fd98f096fc9d507cd5ee7d692600b1feaf7ed1,,,,http://doi.acm.org/10.1145/2988257.2988270
+2921719b57544cfe5d0a1614d5ae81710ba804fa,http://pdfs.semanticscholar.org/2921/719b57544cfe5d0a1614d5ae81710ba804fa.pdf,,,http://www.iaeng.org/publication/IMECS2014/IMECS2014_pp441-445.pdf
+29a9e9b5926e65512c25c845cceba42fc1be2958,,,,
+29a013b2faace976f2c532533bd6ab4178ccd348,http://or.nsfc.gov.cn/bitstream/00001903-5/94894/1/1000006589627.pdf,,https://doi.org/10.1109/TGRS.2013.2253559,
+29921072d8628544114f68bdf84deaf20a8c8f91,https://arxiv.org/pdf/1610.03670v4.pdf,,https://doi.org/10.1109/WACV.2017.64,http://arxiv.org/abs/1610.03670
+2969f822b118637af29d8a3a0811ede2751897b5,http://iip.ict.ac.cn/sites/default/files/publication/2013_ICCV_xwzhao_Cascaded%20Shape%20Space%20Pruning%20for%20Robust%20Facial%20Landmark%20Detection.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.132
+29756b6b16d7b06ea211f21cdaeacad94533e8b4,http://pdfs.semanticscholar.org/2975/6b6b16d7b06ea211f21cdaeacad94533e8b4.pdf,,,http://rcs.cic.ipn.mx/2016_112/Thresholding%20Approach%20based%20on%20GPU%20for%20Facial%20Expression%20Recognition.pdf
+293193d24d5c4d2975e836034bbb2329b71c4fe7,http://pdfs.semanticscholar.org/2931/93d24d5c4d2975e836034bbb2329b71c4fe7.pdf,,,http://rcs.cic.ipn.mx/2016_129/Building%20a%20Corpus%20of%20Facial%20Expressions%20for%20Learning-Centered%20Emotions.pdf
+294bd7eb5dc24052237669cdd7b4675144e22306,http://pdfs.semanticscholar.org/294b/d7eb5dc24052237669cdd7b4675144e22306.pdf,,,http://www.ijsr.net/archive/v4i2/SUB151446.pdf
+2988f24908e912259d7a34c84b0edaf7ea50e2b3,http://pdfs.semanticscholar.org/a779/e9432c3b6bfdcdbb1827757c3b8bf7c3aa4a.pdf,,https://doi.org/10.5244/C.22.47,http://www.bmva.org/bmvc/2008/papers/252.pdf
+29156e4fe317b61cdcc87b0226e6f09e416909e0,http://pdfs.semanticscholar.org/b880/78d284c9f77172dd23970522856a7042c961.pdf,,,https://arxiv.org/pdf/1706.00906v1.pdf
+29f0414c5d566716a229ab4c5794eaf9304d78b6,http://pdfs.semanticscholar.org/29f0/414c5d566716a229ab4c5794eaf9304d78b6.pdf,,https://doi.org/10.1155/2008/579416,http://biometrics.cse.msu.edu/Publications/SecureBiometrics/JainNandakumarNagar_TemplateSecuritySurvey_EURASIP08.pdf
+29908288392a9326d7a2996c6cd6b3e6cb137265,http://people.cs.ubc.ca/~pcarbo/ijcvss.pdf,,https://doi.org/10.1007/s11263-007-0067-7,http://www.cs.ubc.ca/spider/kueck/papers/LearningToRecognizeObjectsWithLittleSupervision.pdf
+293ade202109c7f23637589a637bdaed06dc37c9,http://pdfs.semanticscholar.org/293a/de202109c7f23637589a637bdaed06dc37c9.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/sup/antonakos2016adaptive_supp.pdf
+7c8909da44e89a78fe88e815c83a4ced34f99149,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.326
+7c61d21446679776f7bdc7afd13aedc96f9acac1,http://pdfs.semanticscholar.org/e199/9cee8e6d717ad1181ae9e17c366e152e805e.pdf,,,http://arxiv.org/abs/1706.05028
+7cee802e083c5e1731ee50e731f23c9b12da7d36,http://pdfs.semanticscholar.org/7cee/802e083c5e1731ee50e731f23c9b12da7d36.pdf,,,https://arxiv.org/pdf/1803.02181v1.pdf
+7c7ab59a82b766929defd7146fd039b89d67e984,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/wacv2014_ChaZhang.pdf,,,http://research.microsoft.com/en-us/um/people/chazhang/publications/wacv2014_chazhang.pdf
+7ca337735ec4c99284e7c98f8d61fb901dbc9015,http://vision.psych.umn.edu/users/schrater/Papers/Veeretal05.pdf,,,http://www.cs.cmu.edu/~harini/itsc05.pdf
+7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f,http://pdfs.semanticscholar.org/7c45/b5824645ba6d96beec17ca8ecfb22dfcdd7f.pdf,,,http://www.lrec-conf.org/proceedings/lrec2010/slides/772.pdf
+7c0a6824b556696ad7bdc6623d742687655852db,http://2010.telfor.rs/files/radovi//TELFOR2010_05_35.pdf,,,http://2010.telfor.rs/files/radovi/TELFOR2010_05_35.pdf
+7c95449a5712aac7e8c9a66d131f83a038bb7caa,http://pdfs.semanticscholar.org/7c95/449a5712aac7e8c9a66d131f83a038bb7caa.pdf,,,http://eprints.whiterose.ac.uk/102935/1/Sutherland_et_al_accepted_15_June_2016_BJP.pdf
+7c4c442e9c04c6b98cd2aa221e9d7be15efd8663,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wang_Classifier_Learning_With_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7299131
+7c54240c23d42703ddc85089d167f4985614cc3a,,,,
+7cee0311e71dca540aaf3d87bef3a6c97ca39bc3,,,,
+7c3e09e0bd992d3f4670ffacb4ec3a911141c51f,http://pdfs.semanticscholar.org/7c3e/09e0bd992d3f4670ffacb4ec3a911141c51f.pdf,,,http://arxiv.org/pdf/1609.00162v1.pdf
+7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d,http://pdfs.semanticscholar.org/7c2e/c6f4ab3eae86e0c1b4f586e9c158fb1d719d.pdf,,https://doi.org/10.1007/978-3-642-25085-9_50,http://homepage.tudelft.nl/a9p19/papers/ciarp_11_disspace.pdf
+7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b,http://pdfs.semanticscholar.org/7cf8/a841aad5b7bdbea46a7bb820790e9ce12d0b.pdf,,,http://www.cs.usu.edu/~xqi/Teaching/REU06/Website/Crystal/CrystalFinalPaper.pdf
+7c9622ad1d8971cd74cc9e838753911fe27ccac4,http://pdfs.semanticscholar.org/7c96/22ad1d8971cd74cc9e838753911fe27ccac4.pdf,,https://doi.org/10.1007/978-3-319-16808-1_6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/574.pdf
+7c457c9a658327af6f6490729b4cab1239c22005,,,https://doi.org/10.1109/ACCESS.2017.2672829,
+7c2c9b083817f7a779d819afee383599d2e97ed8,http://pdfs.semanticscholar.org/bcad/d9c086ccd2f217da25f9550b06a429d53011.pdf,,,https://arxiv.org/pdf/1707.04092v1.pdf
+7c45339253841b6f0efb28c75f2c898c79dfd038,http://vis-www.cs.umass.edu/papers/iccv07alignment.pdf,,,http://people.cs.umass.edu/~elm/papers/iccv07alignment.pdf
+7c7b0550ec41e97fcfc635feffe2e53624471c59,http://cvrr.ucsd.edu/publications/2014/headhandeye.pdf,,https://doi.org/10.1109/ICPR.2014.124,https://eshed1.github.io/papers/headhandeye.pdf
+7ce03597b703a3b6754d1adac5fbc98536994e8f,http://pdfs.semanticscholar.org/7ce0/3597b703a3b6754d1adac5fbc98536994e8f.pdf,,,https://arxiv.org/pdf/1803.09672v1.pdf
+7c36afc9828379de97f226e131390af719dbc18d,http://www.cs.cornell.edu/~chenxiawu/papers/ufna.pdf,,,http://doi.acm.org/10.1145/2393347.2393383
+7c119e6bdada2882baca232da76c35ae9b5277f8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/1070.pdf,,https://doi.org/10.1109/ICSMC.2009.5346339,
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,http://pdfs.semanticscholar.org/7c42/371bae54050dbbf7ded1e7a9b4109a23a482.pdf,,,http://ccis2k.org/iajit/?Itemid=373&id=97&option=com_content&task=blogcategory
+7c953868cd51f596300c8231192d57c9c514ae17,http://courses.cs.washington.edu/courses/cse590v/13au/CVPR13_FaceDetection.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.444
+7c6dbaebfe14878f3aee400d1378d90d61373921,http://pdfs.semanticscholar.org/7c6d/baebfe14878f3aee400d1378d90d61373921.pdf,,,http://www.wseas.us/e-library/conferences/2005corfu/c1/papers/498-618.pdf
+7c25213a7fa5fe13199d3112613ea0b9045320d1,,,,
+7c13fa0c742123a6a927771ce67da270492b588c,,,,http://doi.acm.org/10.1145/3152114
+7c1e1c767f7911a390d49bed4f73952df8445936,http://cmp.felk.cvut.cz/~zimmerk/zimmermann-TPAMI-2014.pdf,,,http://cmp.felk.cvut.cz/~hurycd1/data/publications/zimmermann-TPAMI2014.pdf
+7c349932a3d083466da58ab1674129600b12b81c,http://pdfs.semanticscholar.org/7c34/9932a3d083466da58ab1674129600b12b81c.pdf,,,https://drum.lib.umd.edu/bitstream/handle/1903/18230/Yang_umd_0117E_16964.pdf?isAllowed=y&sequence=1
+1648cf24c042122af2f429641ba9599a2187d605,http://www.eurecom.fr/en/publication/5333/download/sec-publi-5333.pdf,,https://doi.org/10.1109/BTAS.2017.8272698,
+160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b,https://infoscience.epfl.ch/record/207802/files/Discriminant-multilabel-Yuce.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284871
+16671b2dc89367ce4ed2a9c241246a0cec9ec10e,http://www.bsp.brain.riken.jp/publications/2010/PAMI-clustering-He-cichocki.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.15
+163ba5a998973f9ead6be0ca873aed5934d5022e,,,https://doi.org/10.1109/ACPR.2013.53,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,http://www.cise.ufl.edu/~dihong/assets/TIST-2014-10-0214.R2.pdf,,,http://doi.acm.org/10.1145/2807705
+16892074764386b74b6040fe8d6946b67a246a0b,http://pdfs.semanticscholar.org/5f92/7118a5634790fe660fea91aea163b7065ae2.pdf,,,http://journal-cdn.frontiersin.org/article/96929/files/pubmed-zip/versions/1/pdf
+16395b40e19cbc6d5b82543039ffff2a06363845,https://arxiv.org/pdf/1605.03222v1.pdf,,,http://arxiv.org/pdf/1605.03222v1.pdf
+16b0c171fb094f677fcdf78bbb9aaef0d5404942,,,https://doi.org/10.1109/TIP.2017.2733739,
+1617f56c86bf8ea61de62062a97961d23fcf03d3,,,https://doi.org/10.1007/s11390-015-1540-3,
+1677d29a108a1c0f27a6a630e74856e7bddcb70d,http://pdfs.semanticscholar.org/1677/d29a108a1c0f27a6a630e74856e7bddcb70d.pdf,,https://doi.org/10.1007/978-3-642-33718-5_61,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/MRR_eccv12.pdf
+16c884be18016cc07aec0ef7e914622a1a9fb59d,http://pdfs.semanticscholar.org/16c8/84be18016cc07aec0ef7e914622a1a9fb59d.pdf,,,http://lear.inrialpes.fr/pubs/2010/Gui10/ThesisGuillaumin.pdf
+162dfd0d2c9f3621d600e8a3790745395ab25ebc,http://cse.seu.edu.cn/people/xgeng/LDL/resource/cvpr14a.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Geng_Head_Pose_Estimation_2014_CVPR_paper.pdf
+1606b1475e125bba1b2d87bcf1e33b06f42c5f0d,http://users.eecs.northwestern.edu/~xsh835/CVPR2015_CasCNN.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Li_A_Convolutional_Neural_2015_CVPR_paper.pdf
+16f940b4b5da79072d64a77692a876627092d39c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/10/10.pdf,,https://doi.org/10.1109/CVPRW.2009.5204259,http://www.psy.miami.edu/faculty/dmessinger/c_c/rsrcs/rdgs/emot/MMahoor_CVPR_uploaded.pdf
+1672becb287ae3eaece3e216ba37677ed045db55,,,https://doi.org/10.1016/j.eswa.2015.10.047,
+16572c545384174f8136d761d2b0866e968120a8,http://pdfs.semanticscholar.org/1657/2c545384174f8136d761d2b0866e968120a8.pdf,,https://doi.org/10.1007/978-3-319-10578-9_27,http://ca.cs.cmu.edu/sites/default/files/sequential.pdf
+16820ccfb626dcdc893cc7735784aed9f63cbb70,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301367
+1630e839bc23811e340bdadad3c55b6723db361d,http://pdfs.semanticscholar.org/9fc9/f22e9e28eab53d426e9d848c0d7dcd2c2459.pdf,,https://doi.org/10.1016/j.cviu.2014.02.010,http://www.bmva.org/bmvc/2012/BMVC/paper027/paper027.pdf
+167f07b9d2babb8920acfa320ab04ee2758b5db6,http://eprints.pascal-network.org/archive/00008391/01/paper_express.pdf,,,http://www.robots.ox.ac.uk/~vgg/rg/papers/metric_face.pdf
+16eaa26a84468b27e559215db01c53286808ec2a,,,https://doi.org/10.1007/s11263-015-0859-0,
+16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb,http://pdfs.semanticscholar.org/1628/6fb0f14f6a7a1acc10fcd28b3ac43f12f3eb.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/Embarrassment.pdf
+1667a77db764e03a87a3fd167d88b060ef47bb56,http://pdfs.semanticscholar.org/1667/a77db764e03a87a3fd167d88b060ef47bb56.pdf,,https://doi.org/10.1007/978-3-319-71249-9_6,http://arxiv.org/abs/1706.09317
+169618b8dc9b348694a31c6e9e17b989735b4d39,http://vllab.ucmerced.edu/hylee/publication/ICCV17_OPN.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/papers/Lee_Unsupervised_Representation_Learning_ICCV_2017_paper.pdf
+16e95a907b016951da7c9327927bb039534151da,http://pdfs.semanticscholar.org/16e9/5a907b016951da7c9327927bb039534151da.pdf,,,http://journal.iis.sinica.edu.tw/paper/1/170008-2.pdf?cd=C0B3E61185F91E296
+16c1b592d85d13f1ba4eff0afb4441bb78650785,,,https://doi.org/10.1109/TIP.2017.2685343,
+16d9b983796ffcd151bdb8e75fc7eb2e31230809,http://pdfs.semanticscholar.org/16d9/b983796ffcd151bdb8e75fc7eb2e31230809.pdf,,,http://www.cl.cam.ac.uk/~pr10/publications/eg18.pdf
+1679943d22d60639b4670eba86665371295f52c3,http://pdfs.semanticscholar.org/1679/943d22d60639b4670eba86665371295f52c3.pdf,,https://doi.org/10.1016/j.cviu.2007.12.001,https://www.researchgate.net/profile/Hasan_Demirel/publication/222435585_Facial_feature_extraction_using_complex_dual-tree_wavelet_transform/links/09e41507fe3d2439a4000000.pdf
+163d0e6ea8c8b88b4383a4eaa740870e2458b9b0,,,https://doi.org/10.1007/978-3-319-71928-3_18,
+162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e,https://research-information.bristol.ac.uk/files/75922781/Ioannis_Pitas_Large_scale_classification_by_an_approximate_least_squares_one_class_support_vector_machine_ensemble_2015.pdf,,https://doi.org/10.1109/Trustcom.2015.555,http://www.cs.tut.fi/~iosifidi/files/conference/2015_BigDataSE_ALSOCSVM.pdf?dl=0
+1610d2d4947c03a89c0fda506a74ba1ae2bc54c2,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrackextreme_3dv2016.pdf,,,http://doi.ieeecomputersociety.org/10.1109/3DV.2016.54
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,http://www.dcs.gla.ac.uk/~vincia/papers/shortsurvey.pdf,,https://doi.org/10.1109/ICSMC.2011.6083695,http://www.cmpe.boun.edu.tr/~salah/salah11recent.pdf
+169076ffe5e7a2310e98087ef7da25aceb12b62d,http://pdfs.semanticscholar.org/1690/76ffe5e7a2310e98087ef7da25aceb12b62d.pdf,,,http://www.ursulakhess.com/resources/HDH15b.pdf
+167736556bea7fd57cfabc692ec4ae40c445f144,http://pdfs.semanticscholar.org/1677/36556bea7fd57cfabc692ec4ae40c445f144.pdf,,https://doi.org/10.3389/fict.2015.00028,https://ivi.fnwi.uva.nl/isis/publications/2016/JainFICT2016/JainFICT2016.pdf
+167ea1631476e8f9332cef98cf470cb3d4847bc6,http://www.kevinjing.com/visual_search_at_pinterest.pdf,,,http://arxiv.org/pdf/1505.07647v1.pdf
+16fc82d44188eb49a151bd5836a29911b3bfabcb,,,https://doi.org/10.1007/978-981-10-7302-1_50,
+161eb88031f382e6a1d630cd9a1b9c4bc6b47652,http://arxiv.org/pdf/1505.04026v1.pdf,,,http://arxiv.org/pdf/1505.04026.pdf
+420782499f38c1d114aabde7b8a8104c9e40a974,http://openaccess.thecvf.com/content_cvpr_2016/papers/Simo-Serra_Fashion_Style_in_CVPR_2016_paper.pdf,,,http://hi.cs.waseda.ac.jp/~esimo/publications/SimoSerraCVPR2016.pdf
+42e3dac0df30d754c7c7dab9e1bb94990034a90d,https://arxiv.org/pdf/1311.5591v2.pdf,,,http://arxiv.org/abs/1311.5591
+42441f1fee81c8fd42a74504df21b3226a648739,,,https://doi.org/10.1007/s11554-008-0072-2,
+4217473596b978f13a211cdf47b7d3f6588c785f,http://biometrics.cse.msu.edu/Publications/Face/OttoKlareJain_EfficientApproachClusteringFaceImages_ICB15.pdf,,https://doi.org/10.1109/ICB.2015.7139091,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/OttoKlareJain_EfficientApproachClusteringFaceImages_ICB15.pdf
+4223666d1b0b1a60c74b14c2980069905088edc6,http://pdfs.semanticscholar.org/4223/666d1b0b1a60c74b14c2980069905088edc6.pdf,,https://doi.org/10.1007/978-3-319-10599-4_20,http://www.math.nus.edu.sg/~matjh/depository/ECCV_2014_incoherent.pdf
+42afe6d016e52c99e2c0d876052ade9c192d91e7,https://ibug.doc.ic.ac.uk/media/uploads/documents/ValstarEtAl-ICMI2006-FINAL.pdf,,,http://dev.pubs.doc.ic.ac.uk/brow-deception/brow-deception.pdf
+42765c170c14bd58e7200b09b2e1e17911eed42b,http://pdfs.semanticscholar.org/4276/5c170c14bd58e7200b09b2e1e17911eed42b.pdf,,,http://cdn.intechopen.com/pdfs/36481/InTech-Feature_extraction_based_on_wavelet_moments_and_moment_invariants_in_machine_vision_systems.pdf
+429c3588ce54468090cc2cf56c9b328b549a86dc,http://pdfs.semanticscholar.org/429c/3588ce54468090cc2cf56c9b328b549a86dc.pdf,,https://doi.org/10.1016/j.patcog.2009.11.023,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2010-PR-thermal-faces.pdf
+4268ae436db79c4eee8bc06e9475caff3ff70d57,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.146
+42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Saxena_Coordinated_Local_Metric_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.56
+42350e28d11e33641775bef4c7b41a2c3437e4fd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IP07_face02.pdf,,https://doi.org/10.1109/TIP.2006.884929,
+42fff5b37006009c2dbfab63c0375c7c7d7d8ee3,,,https://doi.org/10.1007/s11042-014-2228-3,
+42e155ea109eae773dadf74d713485be83fca105,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2014/HTML/papers/1569924805.pdf,http://ieeexplore.ieee.org/document/6952588/,,
+426913f890f07a5d79e6c23b83cd928ffc00e494,http://www2012.wwwconference.org/proceedings/proceedings/p939.pdf,,,http://doi.acm.org/10.1145/2187836.2187962
+4223917177405eaa6bdedca061eb28f7b440ed8e,http://pdfs.semanticscholar.org/4223/917177405eaa6bdedca061eb28f7b440ed8e.pdf,,,https://arxiv.org/pdf/1601.05644v1.pdf
+42c9394ca1caaa36f535721fa9a64b2c8d4e0dee,http://pdfs.semanticscholar.org/5d2d/208fc245bb49148bffb3076b0660b98b4466.pdf,,,https://people.eecs.berkeley.edu/~jhoffman/papers/Luo_nips2017.pdf
+4270460b8bc5299bd6eaf821d5685c6442ea179a,http://www.cs.technion.ac.il/~ron/PAPERS/BronBronBrucKimIJCV09.pdf,,https://doi.org/10.1007/s11263-008-0147-3,http://visl.technion.ac.il/bron/publications/BroBroBruKimIJCV08.pdf
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,http://www.utdallas.edu/~cxc123730/ICIP_2017.pdf,,https://doi.org/10.1109/ICIP.2017.8296441,
+42ded74d4858bea1070dadb08b037115d9d15db5,http://pdfs.semanticscholar.org/42de/d74d4858bea1070dadb08b037115d9d15db5.pdf,,,http://groups.csail.mit.edu/icelab/sites/default/files/pdf/kao2015exigent.pdf
+42a5dc91852c8c14ed5f4c3b451c9dc98348bc02,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.021
+42f6f5454dda99d8989f9814989efd50fe807ee8,http://pdfs.semanticscholar.org/42f6/f5454dda99d8989f9814989efd50fe807ee8.pdf,,,http://www.foldl.me/uploads/2015/conditional-gans-face-generation/paper.pdf
+429d4848d03d2243cc6a1b03695406a6de1a7abd,http://pdfs.semanticscholar.org/429d/4848d03d2243cc6a1b03695406a6de1a7abd.pdf,,,http://www.ijsce.org/attachments/File/v2i3/C0832062312.pdf
+425ea5656c7cf57f14781bafed51182b2e6da65f,,,https://doi.org/10.1109/TIP.2017.2718187,
+42dc36550912bc40f7faa195c60ff6ffc04e7cd6,http://pdfs.semanticscholar.org/42dc/36550912bc40f7faa195c60ff6ffc04e7cd6.pdf,,,http://hal.inria.fr/docs/00/82/94/51/PDF/ISRN_Machine_Vision-2013.pdf
+424259e9e917c037208125ccc1a02f8276afb667,http://arxiv.org/pdf/1604.06433v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wang_Walk_and_Learn_CVPR_2016_paper.pdf
+42ecfc3221c2e1377e6ff849afb705ecd056b6ff,http://pdfs.semanticscholar.org/42ec/fc3221c2e1377e6ff849afb705ecd056b6ff.pdf,,https://doi.org/10.1007/978-3-540-25976-3_2,http://www3.cs.stonybrook.edu/~ial/content/papers/2004/Zhang2004bioaw.pdf
+427bec487c330e7e34cc2c8fc2d6558690421ea0,,,,http://doi.ieeecomputersociety.org/10.1109/ISCSCT.2008.352
+4215b34597d8ce1e8985afa8043400caf0ec7230,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2012.71
+421955c6d2f7a5ffafaf154a329a525e21bbd6d3,http://pdfs.semanticscholar.org/ea6c/4d71fafe4352e7c3aa2237f77af0c4050cef.pdf,,,https://frvp.njit.edu/images/new-slider/TPAMI00-EP.pdf
+42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553734.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553734
+42df75080e14d32332b39ee5d91e83da8a914e34,http://www.imlab.tw/wp-content/uploads/2015/11/Illumination-Compensation-Using-Oriented-Local-Histogram-Equalization-and-its-Application-to-Face-Recognition.pdf,,https://doi.org/10.1109/TIP.2012.2202670,
+424745b006491ae2caef924287e50fc6706c06ee,,,,
+4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,http://pdfs.semanticscholar.org/4276/eb27e2e4fc3e0ceb769eca75e3c73b7f2e99.pdf,,,http://www.cfar.umd.edu/~shaohua/papers/egvp_chapter.pdf
+89e31777f221ddb3bc9940d7f520c8114c4148a2,,,https://doi.org/10.1007/s11063-012-9224-2,
+89945b7cd614310ebae05b8deed0533a9998d212,http://pdfs.semanticscholar.org/8994/5b7cd614310ebae05b8deed0533a9998d212.pdf,,,https://arxiv.org/pdf/1202.5844v3.pdf
+8990f8ea6441f97597429686542b9cdc46ed47de,,,,
+8964524580ea2cff41a6b5858b623788bbefb8a4,,,,
+89de30a75d3258816c2d4d5a733d2bef894b66b9,https://www.computer.org/csdl/trans/tp/2015/06/06915721.pdf,,,http://zhqiang.org/wp-content/uploads/2014/10/relative_hmm_pami.pdf
+897aa4aaa474fed41233faec9b70b802aea5fdea,,,https://doi.org/10.1142/S0218001414560126,
+89002a64e96a82486220b1d5c3f060654b24ef2a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Peng_PIEFA_Personalized_Incremental_ICCV_2015_paper.pdf,,,http://webpages.uncc.edu/~szhang16/paper/ICCV15_face.pdf
+89c84628b6f63554eec13830851a5d03d740261a,http://pdfs.semanticscholar.org/89c8/4628b6f63554eec13830851a5d03d740261a.pdf,,,http://www.dtic.mil/dtic/tr/fulltext/u2/a521885.pdf
+89272b78b651038ff4d294b9ccca0018d2c9033b,,,https://doi.org/10.1109/ICPR.2014.777,
+89c51f73ec5ebd1c2a9000123deaf628acf3cdd8,http://pdfs.semanticscholar.org/89c5/1f73ec5ebd1c2a9000123deaf628acf3cdd8.pdf,,,http://thescipub.com/PDF/ajassp.2008.574.580.pdf
+89e7d23e0c6a1d636f2da68aaef58efee36b718b,http://pdfs.semanticscholar.org/89e7/d23e0c6a1d636f2da68aaef58efee36b718b.pdf,,,http://worldcomp-proceedings.com/proc/p2014/IPC2560.pdf
+89f4bcbfeb29966ab969682eae235066a89fc151,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/short-fgr-2004.pdf,,,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/short-fgr-2004.pdf
+8913a5b7ed91c5f6dec95349fbc6919deee4fc75,https://people.eecs.berkeley.edu/~pabbeel/papers/2014-ICRA-BigBIRD.pdf,,https://doi.org/10.1109/ICRA.2014.6906903,http://www.cs.berkeley.edu/~pabbeel/papers/2014-ICRA-BigBIRD.pdf
+89cabb60aa369486a1ebe586dbe09e3557615ef8,http://pdfs.semanticscholar.org/89ca/bb60aa369486a1ebe586dbe09e3557615ef8.pdf,,,http://publications.idiap.ch/downloads/papers/2009/Heusch_THESIS_2009.pdf
+8959e0e9a24c0fe79f3fd3acca9d139edc0abcfd,,,,
+8983485996d5d9d162e70d66399047c5d01ac451,https://arxiv.org/pdf/1602.04868v1.pdf,,https://doi.org/10.1109/ISBA.2016.7477230,http://www.rci.rutgers.edu/~vmp93/Conference_pub/ISBA_FD_v3_embed.pdf
+89bc311df99ad0127383a9149d1684dfd8a5aa34,http://pdfs.semanticscholar.org/89bc/311df99ad0127383a9149d1684dfd8a5aa34.pdf,,,https://arxiv.org/pdf/1605.09757v1.pdf
+89896474f007c99f5967bcc05a952654a3bbb736,,,,
+89497854eada7e32f06aa8f3c0ceedc0e91ecfef,,,https://doi.org/10.1109/TIP.2017.2784571,
+891b31be76e2baa83745f24c2e2013851dc83cbb,,,https://doi.org/10.1109/TSMCB.2009.2018137,
+892400017e5c93611dc8361e7749135520d66f25,,,https://doi.org/10.1109/ICARCV.2010.5707394,
+898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c,http://pdfs.semanticscholar.org/898a/66979c7e8b53a10fd58ac51fbfdb6e6e6e7c.pdf,,https://doi.org/10.1007/978-3-540-89617-3_2,http://www.cvc.uab.es/~bogdan/Publications/raducanu_AMI08.pdf
+89d7cc9bbcd2fdc4f4434d153ecb83764242227b,http://pdfs.semanticscholar.org/89d7/cc9bbcd2fdc4f4434d153ecb83764242227b.pdf,,,http://www.ijera.com/papers/Vol2_issue1/Vol3_issue2/BB32351355.pdf
+898ff1bafee2a6fb3c848ad07f6f292416b5f07d,,,https://doi.org/10.1109/TIP.2016.2518867,
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,http://pdfs.semanticscholar.org/cf5c/c511c7fd556aaf113de02fc88d7ba10928b0.pdf,,https://doi.org/10.1007/978-3-319-16178-5_36,http://wanglimin.github.io/contests/PengWCQ_LAP14_slide.pdf
+454bf5b99607b4418e931092476ad1798ce5efa4,,,https://doi.org/10.1155/2011/790598,
+45c340c8e79077a5340387cfff8ed7615efa20fd,http://pdfs.semanticscholar.org/45c3/40c8e79077a5340387cfff8ed7615efa20fd.pdf,,,http://mmi.tudelft.nl/pub/alin/Elearningberlin09.pdf
+45dbf1b6fbc7fdae09e2a1928b18fbfff331a979,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0854.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206509
+455204fa201e9936b42756d362f62700597874c4,http://pdfs.semanticscholar.org/4552/04fa201e9936b42756d362f62700597874c4.pdf,,,http://www.qbase.gr/sites/default/files/C1_019_Koutlas.pdf
+45877ff4694576f59c2a9ca45aa65f935378492a,,,,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.38
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,http://pdfs.semanticscholar.org/4541/c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6.pdf,,,http://www.pitt.edu/~kschmidt/schmidtetal06AE.pdf
+459960be65dd04317dd325af5b7cbb883d822ee4,http://pdfs.semanticscholar.org/876c/c40c6c470f39fbda48dd394d0a9d5f6b147d.pdf,,,http://www.fdg2015.org/papers/fdg2015_paper_73.pdf
+45f858f9e8d7713f60f52618e54089ba68dfcd6d,http://openaccess.thecvf.com/content_ICCV_2017/papers/Sigurdsson_What_Actions_Are_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.235
+4551194408383b12db19a22cca5db0f185cced5c,,,https://doi.org/10.1109/TNNLS.2014.2341634,
+45e043dffc57a9070f483ac4aec2c5cd2cec22cb,,,,http://doi.acm.org/10.1145/3130977
+452ea180cf4d08d7500fc4bc046fd7141fd3d112,,,https://doi.org/10.1109/BTAS.2012.6374569,
+45edb29fb7eed5a52040300e1fd3cd53f1bdb429,,,https://doi.org/10.1109/ICIP.2015.7351570,
+45215e330a4251801877070c85c81f42c2da60fb,http://pdfs.semanticscholar.org/4521/5e330a4251801877070c85c81f42c2da60fb.pdf,,https://doi.org/10.1007/978-3-642-33765-9_45,https://www.cs.umd.edu/~qiu/pub/dadl-eccv12.pdf
+457cf73263d80a1a1338dc750ce9a50313745d1d,http://pdfs.semanticscholar.org/457c/f73263d80a1a1338dc750ce9a50313745d1d.pdf,,,http://arxiv.org/abs/1706.08033
+4526992d4de4da2c5fae7a5ceaad6b65441adf9d,http://pdfs.semanticscholar.org/4526/992d4de4da2c5fae7a5ceaad6b65441adf9d.pdf,,https://doi.org/10.1007/978-3-319-19390-8_16,http://persoal.citius.usc.es/manuel.mucientes/pubs/Nieto-Rodriguez15_ibpria.pdf
+45e616093a92e5f1e61a7c6037d5f637aa8964af,http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163158
+45efd6c2dd4ca19eed38ceeb7c2c5568231451e1,http://pdfs.semanticscholar.org/45ef/d6c2dd4ca19eed38ceeb7c2c5568231451e1.pdf,,,http://www.ijarse.com/images/fullpdf/1506940940_529.pdf
+45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8,http://www.doc.ic.ac.uk/~maja/VukadinovicPantic-SMC05-FINAL.pdf,,https://doi.org/10.1109/ICSMC.2005.1571392,http://pubs.doc.ic.ac.uk/Pantic-SMC05-2/Pantic-SMC05-2.pdf
+4571626d4d71c0d11928eb99a3c8b10955a74afe,http://pdfs.semanticscholar.org/4571/626d4d71c0d11928eb99a3c8b10955a74afe.pdf,,,https://arxiv.org/pdf/1712.03474v1.pdf
+4512b87d68458d9ba0956c0f74b60371b6c69df4,,,https://doi.org/10.1109/TIP.2017.2708504,
+4500888fd4db5d7c453617ee2b0047cedccf2a27,,,,http://doi.acm.org/10.1145/2647750
+4534d78f8beb8aad409f7bfcd857ec7f19247715,http://pdfs.semanticscholar.org/4534/d78f8beb8aad409f7bfcd857ec7f19247715.pdf,,,https://arxiv.org/pdf/1701.08435v1.pdf
+4563b46d42079242f06567b3f2e2f7a80cb3befe,http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf,,https://doi.org/10.1109/ICCVW.2011.6130517,
+459e840ec58ef5ffcee60f49a94424eb503e8982,http://pdfs.semanticscholar.org/459e/840ec58ef5ffcee60f49a94424eb503e8982.pdf,,,http://arxiv.org/abs/1707.05574
+45fbeed124a8956477dbfc862c758a2ee2681278,http://pdfs.semanticscholar.org/fb2a/66f842ca2577d9ea8a8300b555b71bd9cee8.pdf,,https://doi.org/10.1007/978-3-642-33783-3_2,https://www.researchgate.net/profile/E_Mostafa/publication/262327040_Pose_invariant_approach_for_face_recognition_at_distance/links/54efff330cf25f74d72351c5.pdf
+451c42da244edcb1088e3c09d0f14c064ed9077e,https://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_conf.pdf,http://ieeexplore.ieee.org/document/7074112/,,http://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_conf.pdf
+4562ea84ebfc8d9864e943ed9e44d35997bbdf43,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.19
+459eb3cfd9b52a0d416571e4bc4e75f979f4b901,,,https://doi.org/10.1109/ROBIO.2015.7418998,
+4568063b7efb66801e67856b3f572069e774ad33,http://www.dbs.ifi.lmu.de/~yu_k/cvpr11_0712.pdf,,,http://users.eecs.northwestern.edu/~mya671/mypapers/CVPR11_Yang_Zhu_Lv_Yu.pdf
+454283ee7ea757dd25780807e4017cf43b4fc593,,,,
+45c31cde87258414f33412b3b12fc5bec7cb3ba9,http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf,,,http://www.mis.atr.co.jp/~mlyons/pub_pdf/fg98-1.pdf
+4542273a157bfd4740645a6129d1784d1df775d2,http://pdfs.semanticscholar.org/4542/273a157bfd4740645a6129d1784d1df775d2.pdf,,,http://mllab.csa.iisc.ernet.in/Thesis/ME_2007_mehul.pdf
+4511e09ee26044cb46073a8c2f6e1e0fbabe33e8,http://pdfs.semanticscholar.org/4511/e09ee26044cb46073a8c2f6e1e0fbabe33e8.pdf,,,http://www.cs.bilkent.edu.tr/~duygulu/Thesis/DeryaOzkanThesis.pdf
+45513d0f2f5c0dac5b61f9ff76c7e46cce62f402,http://pdfs.semanticscholar.org/4551/3d0f2f5c0dac5b61f9ff76c7e46cce62f402.pdf,,https://doi.org/10.5244/C.25.36,http://www.bmva.org/bmvc/2011/proceedings/paper36/paper36.pdf
+458677de7910a5455283a2be99f776a834449f61,http://pdfs.semanticscholar.org/4586/77de7910a5455283a2be99f776a834449f61.pdf,,,http://www.ijcsit.com/docs/Volume%205/vol5issue02/ijcsit20140502150.pdf
+45e9b5a7dba2f757567324fe35c2f2db87b015cc,,,,
+4572fd17feb5d098e8044fe085e963036fea2a6d,,,,
+453bf941f77234cb5abfda4e015b2b337cea4f17,,,https://doi.org/10.1007/s11042-014-2340-4,
+1f9b2f70c24a567207752989c5bd4907442a9d0f,http://pdfs.semanticscholar.org/1f9b/2f70c24a567207752989c5bd4907442a9d0f.pdf,,https://doi.org/10.1007/978-3-319-16865-4_1,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop3/pdffiles/w3-p10.pdf
+1fd7a17a6c630a122c1a3d1c0668d14c0c375de0,,,https://doi.org/10.1109/CIST.2016.7805097,
+1f05473c587e2a3b587f51eb808695a1c10bc153,http://pdfs.semanticscholar.org/7246/bbdf4c125d9d216e560c87c58a8613bd2602.pdf,,,https://arxiv.org/pdf/1507.02159v1.pdf
+1fa3948af1c338f9ae200038c45adadd2b39a3e4,http://pdfs.semanticscholar.org/7655/4182b4b0f3301afe8cfbc96a9d289b75254f.pdf,,,http://tdlc.ucsd.edu/publications/2007-2008/hsiao-cogsci07.pdf
+1ff79eba66d838d8c1cc90c22fab251bb7babc42,,,,
+1f8304f4b51033d2671147b33bb4e51b9a1e16fe,http://pdfs.semanticscholar.org/1f83/04f4b51033d2671147b33bb4e51b9a1e16fe.pdf,,,http://users.ece.cmu.edu/~dbatra/publications/assets/opd_ijcv_spi.pdf
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,http://pdfs.semanticscholar.org/1f89/439524e87a6514f4fbe7ed34bda4fd1ce286.pdf,,,http://www.stat.cmu.edu/tr/tr825/tr825.pdf
+1f41bf5e8b8562ac7ef0013f4d0cf1c9e1a431f9,,,https://doi.org/10.1109/IJCNN.2017.7965955,
+1f9ae272bb4151817866511bd970bffb22981a49,http://pdfs.semanticscholar.org/1f9a/e272bb4151817866511bd970bffb22981a49.pdf,,,https://arxiv.org/pdf/1709.03170v1.pdf
+1fd6004345245daf101c98935387e6ef651cbb55,http://pdfs.semanticscholar.org/1fd6/004345245daf101c98935387e6ef651cbb55.pdf,,https://doi.org/10.1007/978-3-319-02961-0_20,http://www.nlpr.ia.ac.cn/2013papers/gnhy/nh2.pdf
+1f8656e2254e353a91cceb08b33c25643a1b1fb7,,,https://doi.org/10.1109/LSP.2017.2736542,
+1fc249ec69b3e23856b42a4e591c59ac60d77118,http://cbl.uh.edu/pub_files/IJCB-2017-XX.pdf,,https://doi.org/10.1109/BTAS.2017.8272729,
+1fbde67e87890e5d45864e66edb86136fbdbe20e,http://www.openu.ac.il/home/hassner/data/ASLAN/Papers/ASLAN_TPAMI12.pdf,,,http://www.cs.tau.ac.il/~wolf/papers/aslan.pdf
+1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6,http://www.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf,,,http://web.cse.msu.edu/~liuxm/publication/Han_Otto_Liu_Jain_TPAMI14.pdf
+1fef53b07c6c625545fc071c7386d41f87925675,,,,
+1fcdc113a5df2f45a1f4b3249c041d942a3a730b,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Reconstruction-Based%20Metric%20Learning%20for%20Unconstrained%20Face%20Verification_TIFS2015.pdf,,https://doi.org/10.1109/TIFS.2014.2363792,
+1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0,http://pdfs.semanticscholar.org/d91a/de2712c65f45ed8b917414829ecb24c3c183.pdf,,,http://papers.nips.cc/paper/5620-generalized-unsupervised-manifold-alignment
+1fe59275142844ce3ade9e2aed900378dd025880,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Xiao_Facial_Landmark_Detection_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.130
+1f2d12531a1421bafafe71b3ad53cb080917b1a7,http://pdfs.semanticscholar.org/1f2d/12531a1421bafafe71b3ad53cb080917b1a7.pdf,,,http://scholarworks.rit.edu/cgi/viewcontent.cgi?article=5352&context=theses
+1f35a65eab258f042edb8e1d4d5fff34f00a85bd,http://www.seattle.intel-research.net/~xren/publication/xren_cvpr08_casablanca.pdf,,,http://www2.seattle.intel-research.net/~xren/publication/xren_cvpr08_casablanca.pdf
+1f02bf412a82ad99fe99dc3cfb3adec9dd41eabb,,,https://doi.org/10.1007/s11760-016-1052-9,
+1f5725a4a2eb6cdaefccbc20dccadf893936df12,,,https://doi.org/10.1109/CCST.2012.6393544,
+1fe121925668743762ce9f6e157081e087171f4c,https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf,,,http://www.ee.oulu.fi/~jkannala/publications/cvprw2015.pdf
+1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d,http://pdfs.semanticscholar.org/1fef/b2f8dd1efcdb57d5c2966d81f9ab22c1c58d.pdf,,,http://ceur-ws.org/Vol-1996/paper5.pdf
+1f8e44593eb335c2253d0f22f7f9dc1025af8c0d,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22607/Patras%20Fine-tuning%20regression%202014%20Accepted.pdf?sequence=1,,https://doi.org/10.1109/TIP.2014.2383325,
+1f94734847c15fa1da68d4222973950d6b683c9e,https://arxiv.org/pdf/1512.02895v1.pdf,,,http://webpages.uncc.edu/~szhang16/paper/CVPR16_structured_labels.pdf
+1f745215cda3a9f00a65166bd744e4ec35644b02,http://www.eurecom.fr/en/publication/4044/download/mm-publi-4044.pdf,,https://doi.org/10.1109/MMSP.2013.6659328,
+1f5b9ac2a37431b59fd1cecf8fe57b92b6b6398e,,,,
+1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,http://pdfs.semanticscholar.org/1fd3/dbb6e910708fa85c8a86e17ba0b6fef5617c.pdf,,,http://ikee.lib.auth.gr/record/284544/files/GRI-2016-17200.pdf
+1f24cef78d1de5aa1eefaf344244dcd1972797e8,http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhou_Outlier-Robust_Tensor_PCA_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.419
+1fe990ca6df273de10583860933d106298655ec8,http://pdfs.semanticscholar.org/1fe9/90ca6df273de10583860933d106298655ec8.pdf,,,http://www.iis.sinica.edu.tw/page/jise/2015/201509_12.html
+1f3ae376b22136a2fe2e96632d4383653a42e4d4,,,,
+1fcb905e4505a781fb0b375eb470f5661e38ae39,,,,http://doi.acm.org/10.1145/3123266.3123450
+73f467b4358ac1cafb57f58e902c1cab5b15c590,http://pdfs.semanticscholar.org/73f4/67b4358ac1cafb57f58e902c1cab5b15c590.pdf,,,http://research.ijcaonline.org/ICACT2011/number2/ICACT1108.pdf
+734d6049fe08d0a24f6aa70bf0d81c217dfca570,,,,
+732e8d8f5717f8802426e1b9debc18a8361c1782,http://pdfs.semanticscholar.org/732e/8d8f5717f8802426e1b9debc18a8361c1782.pdf,,,http://proceedings.mlr.press/v70/beckham17a/beckham17a.pdf
+73b05a7faf1b9363ffff125db101dbe2b0b3964f,,,,
+7384c39a2d084c93566b98bc4d81532b5ad55892,http://pdfs.semanticscholar.org/d0a5/0940a1bf951adaf22bd1fc72ea861b606cdb.pdf,,https://doi.org/10.1186/1687-5281-2013-13,http://jivp.eurasipjournals.com/content/pdf/1687-5281-2013-13.pdf
+739d400cb6fb730b894182b29171faaae79e3f01,http://pdfs.semanticscholar.org/739d/400cb6fb730b894182b29171faaae79e3f01.pdf,,,http://ijssst.info/Vol-17/No-47/paper25.pdf
+732e4016225280b485c557a119ec50cffb8fee98,http://pdfs.semanticscholar.org/732e/4016225280b485c557a119ec50cffb8fee98.pdf,,,https://arxiv.org/pdf/1311.6510v1.pdf
+7373c4a23684e2613f441f2236ed02e3f9942dd4,https://dr.ntu.edu.sg/bitstream/handle/10220/18012/Feature%20Extraction%20through%20Binary%20Pattern%20of%20Phase%20Congruency%20for%20Facial%20Expression%20Recognition.pdf?isAllowed=y&sequence=1,,https://doi.org/10.1109/ICARCV.2012.6485152,
+732686d799d760ccca8ad47b49a8308b1ab381fb,http://pdfs.semanticscholar.org/7326/86d799d760ccca8ad47b49a8308b1ab381fb.pdf,,,http://www.uva.nl/binaries/content/documents/personalpages/a/b/c.s.abacioglu/en/downloads/downloads/assets/asset?1475751440086=
+7384610776ec405dc84e47f2d353aa6d3cc03b1d,,,,
+73fbdd57270b9f91f2e24989178e264f2d2eb7ae,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001945.pdf,,https://doi.org/10.1109/ICASSP.2012.6288286,
+738c187d55745aac18d5fb5f6cc9e3568cd2d217,http://www-ee.ccny.cuny.edu/wwwn/yltian/Publications/ICMR130-2015.pdf,,,http://doi.acm.org/10.1145/2671188.2749339
+738a985fba44f9f5acd516e07d0d9578f2ffaa4e,http://pdfs.semanticscholar.org/738a/985fba44f9f5acd516e07d0d9578f2ffaa4e.pdf,,,http://mmi.tudelft.nl/pub/dragos/euromedia.pdf
+73fd7e74457e0606704c5c3d3462549f1b2de1ad,http://pdfs.semanticscholar.org/73fd/7e74457e0606704c5c3d3462549f1b2de1ad.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9521
+73c5bab5c664afa96b1c147ff21439135c7d968b,http://uclab.khu.ac.kr/resources/publication/C_109.pdf,,,http://doi.acm.org/10.1145/1282280.1282318
+874713dfa7ba8b3ffcc47ed5f8b60849d77f6ea8,,,,
+874da338c01fb7a87d605fcde6c52835eee03d5e,,,,http://doi.ieeecomputersociety.org/10.1109/ICAPR.2009.20
+87806c51dc8c1077953178367dcf5c75c553ce34,,,https://doi.org/10.1109/ICMLA.2015.146,
+877100f430b72c5d60de199603ab5c65f611ce17,http://pdfs.semanticscholar.org/8771/00f430b72c5d60de199603ab5c65f611ce17.pdf,,,https://peerj.com/articles/1801.pdf
+87e5b4d95f95a0975e855cf5ad402db7a3c64ff5,http://www.researchgate.net/profile/Paul_Bodesheim/publication/269314560_Local_Novelty_Detection_in_Multi-class_Recognition_Problems/links/5486c2420cf289302e2c35eb.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.113
+870433ba89d8cab1656e57ac78f1c26f4998edfb,https://arxiv.org/pdf/1612.04904v1.pdf,,,http://arxiv.org/abs/1612.04904
+87ee56feefdb39938cda7f872e784d9d986713af,,,,http://dl.acm.org/citation.cfm?id=3022247
+8796f2d54afb0e5c924101f54d469a1d54d5775d,http://pdfs.semanticscholar.org/8796/f2d54afb0e5c924101f54d469a1d54d5775d.pdf,,,http://file.scirp.org/pdf/JSIP20120100006_60760595.pdf
+87f285782d755eb85d8922840e67ed9602cfd6b9,http://pdfs.semanticscholar.org/87f2/85782d755eb85d8922840e67ed9602cfd6b9.pdf,,,http://scholarworks.umass.edu/cgi/viewcontent.cgi?article=1153&context=dissertations_2
+871f5f1114949e3ddb1bca0982086cc806ce84a8,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01169.pdf,,https://doi.org/10.1109/MVA.2015.7153120,
+87bee0e68dfc86b714f0107860d600fffdaf7996,http://mi.informatik.uni-siegen.de/publications/piotraschke_autoreconst_cvpr16.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.372
+87f738d3883fc56ef0841484478b89c0f241df02,,,,
+87309bdb2b9d1fb8916303e3866eca6e3452c27d,http://pdfs.semanticscholar.org/8730/9bdb2b9d1fb8916303e3866eca6e3452c27d.pdf,,,http://arxiv.org/abs/1409.0084
+8754b7dba08911fca67db5bf13a6e6abd546d2e2,,,,
+87552622efd0e85c2a71d4d2590e53d45f021dbf,,,https://doi.org/10.1109/ICIP.2016.7532435,
+872ff48a3acfbf96376fd048348372f5137615e4,,,https://doi.org/10.1007/s41095-016-0051-7,
+87147418f863e3d8ff8c97db0b42695a1c28195b,http://pdfs.semanticscholar.org/8714/7418f863e3d8ff8c97db0b42695a1c28195b.pdf,,,http://arxiv.org/pdf/1604.07360v1.pdf
+876bae52a5edd6c9deb8bb8ad90dc5b74b640615,,,,
+87a39f5002ef2de3143d1ea96ae19e002c44345b,,,,
+87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5,http://pdfs.semanticscholar.org/87dd/3fd36bccbe1d5f1484ac05f1848b51c6eab5.pdf,,,http://www.cs.ucf.edu/~vision/papers/theses/Rodriguez_Mikel.pdf
+87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd,http://pdfs.semanticscholar.org/87bb/183d8be0c2b4cfceb9ee158fee4bbf3e19fd.pdf,,https://doi.org/10.1007/978-3-319-17963-6_2,http://homes.cs.washington.edu/~shapiro/Multimedia/ezgi-Craniofacial.pdf
+8706c3d49d1136035f298041f03bb70dc074f24d,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.12
+876583a059154def7a4bc503b21542f80859affd,,,https://doi.org/10.1109/IWBF.2016.7449697,
+80677676b127b67938c8db06a15d87f5dd4bd7f1,,,https://doi.org/10.1007/s11760-014-0623-x,
+80f72b26c6571aee2ff04704bc7fd1a69bfa0b3f,,,https://doi.org/10.1016/j.patcog.2016.12.029,
+8027a9093f9007200e8e69e05616778a910f4a5f,,,https://doi.org/10.1109/ICB.2013.6612997,
+805a0f4b99f162ac4db0ef6e0456138c8d498c3a,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2465373
+80193dd633513c2d756c3f568ffa0ebc1bb5213e,http://pdfs.semanticscholar.org/a3d8/8154a1253338b45f950bcf9cbe91ba5271ee.pdf,,https://doi.org/10.1007/3-540-45404-7_25,http://www.cfar.umd.edu/~vok/krueger01dagm.ps.gz
+808b685d09912cbef4a009e74e10476304b4cccf,http://pdfs.semanticscholar.org/808b/685d09912cbef4a009e74e10476304b4cccf.pdf,,,http://datasets.d2.mpi-inf.mpg.de/joon17cvprw/poster.pdf
+804b4c1b553d9d7bae70d55bf8767c603c1a09e3,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0001831.pdf,,https://doi.org/10.1109/ICASSP.2016.7471993,
+800cbbe16be0f7cb921842d54967c9a94eaa2a65,http://pdfs.semanticscholar.org/800c/bbe16be0f7cb921842d54967c9a94eaa2a65.pdf,,,http://mmi.tudelft.nl/pub/dragos/Dragos%20Datcu_PhD_Thesis.pdf
+808656563eea17470159e6540b05fe6f7ae58c2b,http://www.researchgate.net/profile/Songul_Varli_Albayrak/publication/235248598_Classification_with_Emotional_Faces_via_a_Robust_Sparse_Classifier/links/0912f510a44fb84bef000000.pdf,,https://doi.org/10.1109/IPTA.2012.6469531,https://www.researchgate.net/profile/Songul_Varli_Albayrak/publication/235248598_Classification_with_Emotional_Faces_via_a_Robust_Sparse_Classifier/links/0912f510a44fb84bef000000.pdf
+80135ed7e34ac1dcc7f858f880edc699a920bf53,http://pdfs.semanticscholar.org/8013/5ed7e34ac1dcc7f858f880edc699a920bf53.pdf,,,https://tez.yok.gov.tr/UlusalTezMerkezi/TezGoster?key=X-M9ZoIuIoNTj2P7iY13hWe608GK8lqvFmtsZOLaUGG9GlzJk9ixrtaDRSSRfDtX
+80277fb3a8a981933533cf478245f262652a33b5,http://pdfs.semanticscholar.org/8027/7fb3a8a981933533cf478245f262652a33b5.pdf,,https://doi.org/10.1007/978-3-642-32717-9_20,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_dagm_2012.pdf
+803803b5c2c61046d63674f85ecf0123f9d2c4b8,,,https://doi.org/10.1049/iet-bmt.2013.0089,
+80840df0802399838fe5725cce829e1b417d7a2e,http://pdfs.semanticscholar.org/8084/0df0802399838fe5725cce829e1b417d7a2e.pdf,,,http://arxiv.org/abs/1304.1250
+80d42f74ee9bf03f3790c8d0f5a307deffe0b3b7,,,https://doi.org/10.1109/TNNLS.2016.2522431,
+80c8d143e7f61761f39baec5b6dfb8faeb814be9,http://pdfs.semanticscholar.org/80c8/d143e7f61761f39baec5b6dfb8faeb814be9.pdf,,,http://www.ijltet.org/journal/148604928843.pdf
+809ea255d144cff780300440d0f22c96e98abd53,http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf,,,https://arxiv.org/pdf/1801.07698v1.pdf
+80a6bb337b8fdc17bffb8038f3b1467d01204375,http://pdfs.semanticscholar.org/80a6/bb337b8fdc17bffb8038f3b1467d01204375.pdf,,,http://www.cs.nthu.edu.tw/~cchen/Research/2015CIST.pdf
+80aa455068018c63237c902001b58844fcc6f160,,,https://doi.org/10.1109/FG.2011.5771327,
+80a5afeb6968c7e736adc48bd4d5ec5b45b13f71,,,https://doi.org/10.1007/978-3-319-15762-7,
+80be8624771104ff4838dcba9629bacfe6b3ea09,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,,https://doi.org/10.1007/978-3-319-10590-1_18,http://www.ntu.edu.sg/home/wanggang/LuECCV2014.pdf
+8000c4f278e9af4d087c0d0895fff7012c5e3d78,https://www.cse.ust.hk/~yuzhangcse/papers/Zhang_Yeung_CVPR10.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539975
+80e9c28c369a6c49f9dd10473c663a25dc9716d5,,,,
+80bd795930837330e3ced199f5b9b75398336b87,http://pdfs.semanticscholar.org/80bd/795930837330e3ced199f5b9b75398336b87.pdf,,https://doi.org/10.1007/978-3-642-37331-2_24,http://www.jdl.ac.cn/doc/2011/201319112684605_2012_accv_sxli_relative%20forest%20for%20attribute%20prediction.pdf
+741950ae2e503a614f257cdac653d1bb30cb8e79,,,,
+74de03923a069ffc0fb79e492ee447299401001f,http://pdfs.semanticscholar.org/74de/03923a069ffc0fb79e492ee447299401001f.pdf,,,http://mi.eng.cam.ac.uk/~oa214/academic/publications/2006_IVAT_chapter1.pdf
+74f643579949ccd566f2638b85374e7a6857a9fc,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/ICPR/MBP%20ICPR10(Revise%20final).pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.657
+74cec83ee694b5d0e07d5d0bacd0aa48a80776aa,,,https://doi.org/10.1109/ISCAS.2013.6572506,
+74408cfd748ad5553cba8ab64e5f83da14875ae8,http://pdfs.semanticscholar.org/7440/8cfd748ad5553cba8ab64e5f83da14875ae8.pdf,,,http://arxiv.org/pdf/1506.00925v1.pdf
+747fddd7345b60da121fc13c5440a18039b912e6,http://pdfs.semanticscholar.org/747f/ddd7345b60da121fc13c5440a18039b912e6.pdf,,,http://arxiv.org/abs/1711.06106
+747d5fe667519acea1bee3df5cf94d9d6f874f20,http://pdfs.semanticscholar.org/747d/5fe667519acea1bee3df5cf94d9d6f874f20.pdf,,,https://arxiv.org/pdf/1804.01077v1.pdf
+745d49a2ff70450113f07124c2c5263105125f58,,,https://doi.org/10.1109/ICPR.2016.7899972,
+740e095a65524d569244947f6eea3aefa3cca526,http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf,,,http://referaat.cs.utwente.nl/conference/24/paper/7523/towards-human-like-performance-face-detection-a-convolutional-neural-network-approach.pdf
+74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8,http://pdfs.semanticscholar.org/74e8/69bc7c99093a5ff9f8cfc3f533ccf1b135d8.pdf,,,http://www.ri.cmu.edu/pub_files/2012/8/divvalaThesis.pdf
+741485741734a99e933dd0302f457158c6842adf,http://pdfs.semanticscholar.org/7414/85741734a99e933dd0302f457158c6842adf.pdf,,https://doi.org/10.4304/jcp.9.3.608-617,http://www.jcomputers.us/vol9/jcp0903-14.pdf
+743e582c3e70c6ec07094887ce8dae7248b970ad,http://pdfs.semanticscholar.org/743e/582c3e70c6ec07094887ce8dae7248b970ad.pdf,,,http://www.sersc.org/journals/IJSIP/vol8_no10/4.pdf
+74b0095944c6e29837c208307a67116ebe1231c8,http://web.eecs.umich.edu/~hero/Preprints/EuclideanK-Nearest.pdf,,https://doi.org/10.1109/ICASSP.2004.1326713,http://www.eecs.umich.edu/~hero/Preprints/EuclideanK-Nearest.pdf
+74c8116d647612e8cd20a2528eeed38f76d09126,,,,
+74156a11c2997517061df5629be78428e1f09cbd,http://cvrr.ucsd.edu/publications/2016/MartinRangeshTrivediICPR2016.pdf,,https://doi.org/10.1109/ICPR.2016.7900057,
+748e72af01ba4ee742df65e9c030cacec88ce506,http://pdfs.semanticscholar.org/748e/72af01ba4ee742df65e9c030cacec88ce506.pdf,,,http://www.ijcsi.org/papers/IJCSI-11-5-1-50-57.pdf
+745e74ae84e1b2b8690d07db523531642023d6c4,,,https://doi.org/10.1109/FSKD.2016.7603417,
+745b42050a68a294e9300228e09b5748d2d20b81,http://pdfs.semanticscholar.org/745b/42050a68a294e9300228e09b5748d2d20b81.pdf,,,https://arxiv.org/pdf/1803.05790v2.pdf
+749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7,http://pdfs.semanticscholar.org/7493/82d19bfe9fb8d0c5e94d0c9b0a63ab531cb7.pdf,,,http://subs.emis.de/LNI/Proceedings/Proceedings154/article2906.html
+74618fb4ce8ce0209db85cc6069fe64b1f268ff4,https://ir.canterbury.ac.nz/bitstream/handle/10092/6229/12636740_Y10_ICCSIT.pdf?isAllowed=y&sequence=1,,,http://ir.canterbury.ac.nz/bitstream/handle/10092/6229/12636740_Y10_ICCSIT.pdf?isAllowed=y&sequence=1
+74875368649f52f74bfc4355689b85a724c3db47,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yan_Object_Detection_by_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_072_ext.pdf
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,https://ibug.doc.ic.ac.uk/media/uploads/documents/zafeiriou_the_3d_menpo_iccv_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.16
+74eae724ef197f2822fb7f3029c63014625ce1ca,http://pdfs.semanticscholar.org/74ea/e724ef197f2822fb7f3029c63014625ce1ca.pdf,,,http://www.sersc.org/journals/IJBSBT/vol5_no2/11.pdf
+747dc0add50b86f5ba9e3e7315943d520e08f9eb,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.78
+7480d8739eb7ab97c12c14e75658e5444b852e9f,http://pdfs.semanticscholar.org/cfe4/b03951be323394e6749f6a30b2ac9b924479.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper103/abstract103.pdf
+74d3ff8324e02503c18fb2566ed29e2e22ce0d1b,,,,http://doi.ieeecomputersociety.org/10.1109/IAS.2009.266
+74ba4ab407b90592ffdf884a20e10006d2223015,http://pdfs.semanticscholar.org/74ba/4ab407b90592ffdf884a20e10006d2223015.pdf,,,http://arxiv.org/abs/1704.02117
+7405ed035d1a4b9787b78e5566340a98fe4b63a0,http://pdfs.semanticscholar.org/7405/ed035d1a4b9787b78e5566340a98fe4b63a0.pdf,,,http://arxiv.org/pdf/1505.00824v1.pdf
+744db9bd550bf5e109d44c2edabffec28c867b91,http://pdfs.semanticscholar.org/744d/b9bd550bf5e109d44c2edabffec28c867b91.pdf,,https://doi.org/10.1007/978-3-319-07635-5_61,http://groupware.les.inf.puc-rio.br/public/papers/85190643.pdf
+74325f3d9aea3a810fe4eab8863d1a48c099de11,http://pdfs.semanticscholar.org/7432/5f3d9aea3a810fe4eab8863d1a48c099de11.pdf,,,https://arxiv.org/pdf/1407.1957v1.pdf
+744d23991a2c48d146781405e299e9b3cc14b731,http://www.cise.ufl.edu/~dihong/assets/LPS2016.pdf,,https://doi.org/10.1109/TIP.2016.2535284,
+1a45ddaf43bcd49d261abb4a27977a952b5fff12,http://pdfs.semanticscholar.org/1a45/ddaf43bcd49d261abb4a27977a952b5fff12.pdf,,,https://arxiv.org/pdf/1803.07441v1.pdf
+1ab19e516b318ed6ab64822efe9b2328836107a4,,,https://doi.org/10.1109/TIP.2010.2083674,
+1a41e5d93f1ef5b23b95b7163f5f9aedbe661394,http://pdfs.semanticscholar.org/1a41/e5d93f1ef5b23b95b7163f5f9aedbe661394.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/4c/e7/TSWJ2014-903160.PMC3944647.pdf
+1a65cc5b2abde1754b8c9b1d932a68519bcb1ada,http://pdfs.semanticscholar.org/e4ae/821e234c281aed6ba629c130be7c8eac4a31.pdf,,,http://www.bmva.org/bmvc/2014/files/paper116.pdf
+1aa766bbd49bac8484e2545c20788d0f86e73ec2,http://inside.mines.edu/~jpaone/papers/IV15_BaselineFaceDetection_SHRP2NDS.pdf,,https://doi.org/10.1109/IVS.2015.7225682,
+1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d,http://www.dabi.temple.edu/~hbling/publication/oria-12-final.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247878
+1a71f9af98228f4d2b15cfaf415321813e29b087,,,,
+1a878e4667fe55170252e3f41d38ddf85c87fcaf,http://pdfs.semanticscholar.org/1a87/8e4667fe55170252e3f41d38ddf85c87fcaf.pdf,,,http://www.cs.berkeley.edu/~slacoste/research/pubs/lacoste-thesis09-discMLstruct.pdf
+1a41831a3d7b0e0df688fb6d4f861176cef97136,http://pdfs.semanticscholar.org/1fae/8f87f83bb707c4b38c23e93ae2bcb900b962.pdf,,,http://www.dtic.mil/get-tr-doc/pdf?AD=ADA455936
+1ab4fdcd431286a2fe9538cb9a9e3c67016fa98a,,,https://doi.org/10.1007/s11042-013-1754-8,
+1a0e1ba4408d12f8a28049da0ff8cad4f91690d5,,,https://doi.org/10.1007/s12559-016-9445-1,
+1ac2882559a4ff552a1a9956ebeadb035cb6df5b,http://www.pitt.edu/~jeffcohn/biblio/TrainData.pdf,,,http://ca.cs.cmu.edu/sites/default/files/Girard_2015_How.pdf
+1ad5cb4c1eec5a9666b5dbbb6fab43576d0935db,,,https://doi.org/10.1109/ICIP.2016.7533026,
+1a7a17c4f97c68d68fbeefee1751d349b83eb14a,http://pdfs.semanticscholar.org/1a7a/17c4f97c68d68fbeefee1751d349b83eb14a.pdf,,,http://www.jmlr.org/papers/volume17/14-460/14-460.pdf
+1aef6f7d2e3565f29125a4871cd60c4d86c48361,http://pdfs.semanticscholar.org/1aef/6f7d2e3565f29125a4871cd60c4d86c48361.pdf,,,http://www.cs.utexas.edu/~ml/papers/venugopalan.proposal15.pdf
+1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f,http://pdfs.semanticscholar.org/1a6c/3c37c2e62b21ebc0f3533686dde4d0103b3f.pdf,,,http://irdp.info/journals/j3/volume4/Implementation%20of%20Partial%20Face%20Recognition%20using%20Directional%20Binary%20Code.pdf
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,http://disi.unitn.it/~duta/pubs/ICPR2016_Duta.pdf,,https://doi.org/10.1109/ICPR.2016.7899964,http://imag.pub.ro/~bionescu/index_files/ICPR2016_DA-VLAD.pdf
+1a3eee980a2252bb092666cf15dd1301fa84860e,https://www.uv.es/vista/vistavalencia/papers/ICIP09_GPCA.pdf,,https://doi.org/10.1109/ICIP.2009.5413808,
+1a140d9265df8cf50a3cd69074db7e20dc060d14,http://pdfs.semanticscholar.org/1a14/0d9265df8cf50a3cd69074db7e20dc060d14.pdf,,https://doi.org/10.1007/978-3-642-37444-9_52,http://www.eecs.qmul.ac.uk/~hy300/papers/accv2012finalpaper.pdf
+1a47f12a2490f6775c0ad863ac856de27f5b3e03,,,https://doi.org/10.1016/j.sigpro.2014.11.010,
+1a862270ad9168e3bf5471bda2793c32d4043aa4,,,,
+1a85956154c170daf7f15f32f29281269028ff69,http://ibug.doc.ic.ac.uk/media/uploads/documents/active_pictorial_structures.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/active_pictorial_structures.pdf
+1a8d40bcfb087591cc221086440d9891749d47b8,,,https://doi.org/10.1109/ICCE.2012.6161859,
+1a031378cf1d2b9088a200d9715d87db8a1bf041,http://pdfs.semanticscholar.org/1a03/1378cf1d2b9088a200d9715d87db8a1bf041.pdf,,,https://openreview.net/pdf?id=rJVoeUkvG
+1a96d54c326d19e32bed00642a177ea439341fa2,http://vc.cs.nthu.edu.tw/home/paper/codfiles/tychiu/200808151557/Principal_Component_Analysis_Based_on_L1-Norm_Maximization.pdf,,,http://mipal.snu.ac.kr/images/b/be/L1PCA_TPAMI.pdf
+1afd481036d57320bf52d784a22dcb07b1ca95e2,http://pdfs.semanticscholar.org/e206/144fc1dee7f10079facf3b6a3d5d2bf5f8db.pdf,,https://doi.org/10.1093/comjnl/bxs146,http://www.research.att.com/export/sites/att_labs/people/Gibbon_David_C/library/publications/dg20121202050000.pdf?services
+1afef6b389bd727c566cd6fbcd99adefe4c0cf32,,,https://doi.org/10.1109/ICB.2016.7550087,
+1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f,http://pdfs.semanticscholar.org/1a93/37d70a87d0e30966ecd1d7a9b0bbc7be161f.pdf,,https://doi.org/10.1016/j.engappai.2014.04.006,http://www.researchgate.net/profile/Tapabrata_Chakraborti/publication/262340257_A_novel_binary_adaptive_weight_GSA_based_feature_selection_for_face_recognition_using_local_gradient_patterns_modified_census_transform_and_local_binary_patterns/links/54bf8c600cf2f6bf4e04fab5.pdf
+1ae642a8d756c6aa7bc049c5c89d5072d8749637,http://www.cs.umd.edu/~behjat/papers/ICMR14_poster.pdf,,,http://doi.acm.org/10.1145/2578726.2578767
+1aeef2ab062c27e0dbba481047e818d4c471ca57,,,https://doi.org/10.1109/ICACCI.2015.7275860,
+1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6,http://pdfs.semanticscholar.org/1a4b/6ee6cd846ef5e3030a6ae59f026e5f50eda6.pdf,,,https://arxiv.org/pdf/1609.06782v2.pdf
+1addc5c1fa80086d1ed58f71a9315ad13bd87ca2,,,https://doi.org/10.1007/s10044-012-0279-5,
+1a9a192b700c080c7887e5862c1ec578012f9ed1,http://pdfs.semanticscholar.org/1a9a/192b700c080c7887e5862c1ec578012f9ed1.pdf,,,http://www.ntu.edu.sg/home5/PG03454644/codes_and_paper/TSMC_Discriminant_Subspace_Analysis_for_Face_Recognition_with_Small_Number_of_Training_Samples.pdf
+1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9,http://pdfs.semanticscholar.org/1af5/2c853ff1d0ddb8265727c1d70d81b4f9b3a9.pdf,,,http://cdn.intechopen.com/pdfs/40176/InTech-Face_recognition_under_illumination_variation_using_shadow_compensation_and_pixel_selection.pdf
+1a8ccc23ed73db64748e31c61c69fe23c48a2bb1,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Zhou_Extensive_Facial_Landmark_2013_ICCV_paper.pdf,,,http://www.faceplusplus.com/wp-content/uploads/FacialLandmarkpaper.pdf
+1a40092b493c6b8840257ab7f96051d1a4dbfeb2,http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf,,,http://www.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf
+1ad97cce5fa8e9c2e001f53f6f3202bddcefba22,http://files.is.tue.mpg.de/black/papers/RGA2014.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Hauberg_Grassmann_Averages_for_2014_CVPR_paper.pdf
+1a1118cd4339553ad0544a0a131512aee50cf7de,http://pdfs.semanticscholar.org/1a11/18cd4339553ad0544a0a131512aee50cf7de.pdf,,,https://arxiv.org/pdf/1711.00088v1.pdf
+1a40c2a2d17c52c8b9d20648647d0886e30a60fa,,,https://doi.org/10.1109/ICPR.2016.7900283,
+1a6c9ef99bf0ab9835a91fe5f1760d98a0606243,http://pdfs.semanticscholar.org/57ce/705f08ae7256b16eac2b8b40ae0c88d6cf23.pdf,,https://doi.org/10.1007/978-3-319-10584-0_29,http://www.researchgate.net/profile/Eren_Golge/publication/265528981_ConceptMap_Mining_noisy_web_data_for_concept_learning/links/54118fa20cf264cee28b3fdd.pdf
+1a03dcc811131b0b702bd5a75c54ed26cd27151a,,,https://doi.org/10.1007/s11760-015-0810-4,
+1ad780e02edf155c09ea84251289a054b671b98a,,,https://doi.org/10.1109/ICNIDC.2012.6418787,
+1afdedba774f6689eb07e048056f7844c9083be9,http://ibug.doc.ic.ac.uk/media/uploads/documents/sandbach2013markov.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W22/papers/Sandbach_Markov_Random_Field_2013_ICCV_paper.pdf
+1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43,http://pdfs.semanticscholar.org/676c/0fc58b6a0108326024f708e30d76cadbae58.pdf,,https://doi.org/10.5244/C.25.112,http://www.bmva.org/bmvc/2011/proceedings/paper112/abstract.pdf
+1a7a2221fed183b6431e29a014539e45d95f0804,http://www.cs.colostate.edu/~vision/publications/Bolme2007b.pdf,,,
+1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b,http://www.l3s.de/~siersdorfer/sources/2014/mtgame-2014.pdf,,,http://doi.acm.org/10.1145/2661829.2661946
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,http://www.cs.fsu.edu/~liux/research/pub/papers/Wu-Two-Stage-CVIU-2008.pdf,,https://doi.org/10.1016/j.cviu.2007.04.005,http://ww2.cs.fsu.edu/~ywu/PDF-files/twostage.pdf
+287795991fad3c61d6058352879c7d7ae1fdd2b6,http://pdfs.semanticscholar.org/2877/95991fad3c61d6058352879c7d7ae1fdd2b6.pdf,,,http://research.ijcaonline.org/volume66/number8/pxc3885774.pdf
+287de191c49a3caa38ad7594093045dfba1eb420,,,https://doi.org/10.23919/MVA.2017.7986829,
+28a900a07c7cbce6b6297e4030be3229e094a950,http://pdfs.semanticscholar.org/28a9/00a07c7cbce6b6297e4030be3229e094a950.pdf,,,http://www.ccis2k.org/iajit/index.php?Itemid=327&id=81&option=com_content&task=blogcategory
+282503fa0285240ef42b5b4c74ae0590fe169211,http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf,,,https://arxiv.org/pdf/1801.07848v1.pdf
+28e0ed749ebe7eb778cb13853c1456cb6817a166,http://pdfs.semanticscholar.org/28e0/ed749ebe7eb778cb13853c1456cb6817a166.pdf,,https://doi.org/10.1016/j.neunet.2011.10.003,https://pdfs.semanticscholar.org/28e0/ed749ebe7eb778cb13853c1456cb6817a166.pdf
+28b9d92baea72ec665c54d9d32743cf7bc0912a7,http://pdfs.semanticscholar.org/a7f8/b6bf6aa7a12773ad9bcf1d040d4d74d12493.pdf,,,http://eprints.eemcs.utwente.nl/25829/01/Pantic_Parametric_temporal_alignment.pdf
+283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43,http://pdfs.semanticscholar.org/283d/226e346ac3e7685dd9a4ba8ae55ee4f2fe43.pdf,,,https://www.base-search.net/Record/3dbe9ce562db3466d1a42ef9ba0c3cadada3b193cd202d58984b6c8648d03c67
+28f7d3d894705a92cac9b08d22701fadb6472676,,,,
+28d7029cfb73bcb4ad1997f3779c183972a406b4,https://arxiv.org/pdf/1705.00322v1.pdf,,https://doi.org/10.1109/TIP.2017.2700761,http://arxiv.org/abs/1705.00322
+280d59fa99ead5929ebcde85407bba34b1fcfb59,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002662.pdf,,https://doi.org/10.1109/ICASSP.2016.7472160,http://arxiv.org/abs/1604.02634
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,http://pdfs.semanticscholar.org/28f5/138d63e4acafca49a94ae1dc44f7e9d84827.pdf,,,https://arxiv.org/pdf/1207.3438v1.pdf
+281b91c35a1af97b1405bc724a04e2be6e24971b,,,https://doi.org/10.1109/ICMLC.2010.5580557,
+28e1668d7b61ce21bf306009a62b06593f1819e3,http://pdfs.semanticscholar.org/28e1/668d7b61ce21bf306009a62b06593f1819e3.pdf,,,
+28cd46a078e8fad370b1aba34762a874374513a5,http://pdfs.semanticscholar.org/28cd/46a078e8fad370b1aba34762a874374513a5.pdf,,,https://arxiv.org/pdf/1707.06436v1.pdf
+286adff6eff2f53e84fe5b4d4eb25837b46cae23,http://pdfs.semanticscholar.org/b17e/61972e674f8f734bd428cb882a9bb797abe2.pdf,,,https://arxiv.org/pdf/1604.03901v1.pdf
+286812ade95e6f1543193918e14ba84e5f8e852e,http://pdfs.semanticscholar.org/9b1d/a39168a7196c2f9c85e9b3d17debff04c988.pdf,,,http://www.bmva.org/bmvc/2014/files/abstract130.pdf
+282a3ee79a08486f0619caf0ada210f5c3572367,http://pdfs.semanticscholar.org/282a/3ee79a08486f0619caf0ada210f5c3572367.pdf,,,https://arxiv.org/pdf/1801.01687v1.pdf
+288dbc40c027af002298b38954d648fddd4e2fd3,http://pdfs.semanticscholar.org/288d/bc40c027af002298b38954d648fddd4e2fd3.pdf,,https://doi.org/10.1007/978-3-642-33786-4_1,http://grvsharma.com/hpresources/sharma_lhs_eccv12.pdf
+28f311b16e4fe4cc0ff6560aae3bbd0cb6782966,http://pdfs.semanticscholar.org/4d59/7318188a9c7f7a78dadbe5b8f8385c1e1356.pdf,,,http://aclweb.org/anthology/E12-1061
+28312c3a47c1be3a67365700744d3d6665b86f22,http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf,,,http://www.cfar.umd.edu/ftp/TRs/FaceSurvey.ps.gz
+28d06fd508d6f14cd15f251518b36da17909b79e,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.432
+28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b,http://pdfs.semanticscholar.org/28b5/b5f20ad584e560cd9fb4d81b0a22279b2e7b.pdf,,,https://arxiv.org/pdf/1204.0171v5.pdf
+28bc378a6b76142df8762cd3f80f737ca2b79208,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Vedaldi_Understanding_Objects_in_2014_CVPR_paper.pdf,,,http://people.cs.umass.edu/~smaji/papers/oid-cvpr14.pdf
+287900f41dd880802aa57f602e4094a8a9e5ae56,https://www.comp.nus.edu.sg/~tsim/documents/cross-expression.pdf,http://ieeexplore.ieee.org/document/6460447/,,http://www.comp.nus.edu.sg/~tsim/documents/cross-expression.pdf
+28c0cb56e7f97046d6f3463378d084e9ea90a89a,http://www.robots.ox.ac.uk/~vgg/publications/2005/Arandjelovic05a/arandjelovic05a.pdf,,,http://dro.deakin.edu.au/eserv/DU:30058433/arandjelovic-automaticface-2005.pdf
+28be652db01273289499bc6e56379ca0237506c0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_018_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiao_FaLRR_A_Fast_2015_CVPR_paper.pdf
+28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d,http://pdfs.semanticscholar.org/28bc/f31f794dc27f73eb248e5a1b2c3294b3ec9d.pdf,,,http://research.ijcaonline.org/volume96/number13/pxc3896731.pdf
+2836d68c86f29bb87537ea6066d508fde838ad71,http://arxiv.org/pdf/1510.06503v1.pdf,,,http://arxiv.org/abs/1510.06503
+28de411a5b3eb8411e7bcb0003c426aa91f33e97,http://pdfs.semanticscholar.org/28de/411a5b3eb8411e7bcb0003c426aa91f33e97.pdf,,,http://www.ijarcsse.com/docs/papers/Volume_4/4_April2014/V4I4-0235.pdf
+28b26597a7237f9ea6a9255cde4e17ee18122904,http://pdfs.semanticscholar.org/28b2/6597a7237f9ea6a9255cde4e17ee18122904.pdf,,,http://cercor.oxfordjournals.org/content/25/9/2876.full.pdf
+28d55935cc36df297fe21b98b4e2b07b5720612e,,,https://doi.org/10.1109/CISS.2016.7460569,
+28a45770faf256f294ce3bbd5de25c6d5700976e,,,https://doi.org/10.1109/ICDSP.2016.7868531,
+28fe6e785b32afdcd2c366c9240a661091b850cf,http://pdfs.semanticscholar.org/28fe/6e785b32afdcd2c366c9240a661091b850cf.pdf,,,http://www.ijais.org/research/volume10/number7/chandran-2016-ijais-451526.pdf
+28c9198d30447ffe9c96176805c1cd81615d98c8,http://pdfs.semanticscholar.org/28c9/198d30447ffe9c96176805c1cd81615d98c8.pdf,,,
+28d99dc2d673d62118658f8375b414e5192eac6f,http://www.cs.wayne.edu/~mdong/cvpr17.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Chen_Using_Ranking-CNN_for_CVPR_2017_paper.pdf
+280bc9751593897091015aaf2cab39805768b463,http://pdfs.semanticscholar.org/280b/c9751593897091015aaf2cab39805768b463.pdf,,,http://ece.ubm.ro/cjece/vol/6-2013/102-6105.pdf
+283d381c5c2ba243013b1c4f5e3b29eb906fa823,,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.222
+28aa89b2c827e5dd65969a5930a0520fdd4a3dc7,http://pdfs.semanticscholar.org/28aa/89b2c827e5dd65969a5930a0520fdd4a3dc7.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/9240/Ramanathan_umd_0117E_10110.pdf?isAllowed=y&sequence=1
+2884ff0d58a66d42371b548526d685760e514043,,,https://doi.org/10.1109/ICIP.2015.7351242,
+28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68,https://www.cc.gatech.edu/~parikh/Publications/annoyance_prediction_CVPR2014.pdf,,,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Christie_Predicting_User_Annoyance_2014_CVPR_paper.pdf
+285472527c5dc1c620d9644849e7519766c2d655,http://lear.inrialpes.fr/people/mpederso/papers/ICCV15_Parts.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Pedersoli_Learning_Where_to_ICCV_2015_paper.pdf
+288d2704205d9ca68660b9f3a8fda17e18329c13,http://arxiv.org/pdf/1601.04153v2.pdf,,,https://arxiv.org/pdf/1601.04153v1.pdf
+17b46e2dad927836c689d6787ddb3387c6159ece,http://cs.uky.edu/~jacobs/papers/greenwell2014faceattributes.pdf,,,http://doi.acm.org/10.1145/2676440.2676443
+176a3e9e118712251124c1347516a92d5e315297,http://eprints.pascal-network.org/archive/00008997/01/ICMR11.pdf,,,http://eprints.pascal-network.org/archive/00008300/01/FerSidZhaPetetal11.pdf
+17a85799c59c13f07d4b4d7cf9d7c7986475d01c,http://pdfs.semanticscholar.org/17a8/5799c59c13f07d4b4d7cf9d7c7986475d01c.pdf,,,http://upcommons.upc.edu/bitstream/handle/2117/95700/TXPS1de1.pdf;jsessionid=DB2E1A09C46D3D29B463696BD798DC89?sequence=1
+1768909f779869c0e83d53f6c91764f41c338ab5,http://arxiv.org/pdf/1506.08959v1.pdf,,,http://arxiv.org/pdf/1506.08959v2.pdf
+171ca25bc2cdfc79cad63933bcdd420d35a541ab,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Alnajar_Calibration-Free_Gaze_Estimation_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.24
+176bd61cc843d0ed6aa5af83c22e3feb13b89fe1,http://pdfs.semanticscholar.org/648b/f64ff77aeccf761b83dd85143a6eb832b258.pdf,,,http://www.ri.cmu.edu/pub_files/pub4/lucey_simon_2007_2/lucey_simon_2007_2.pdf
+17768efd76a681902a33994da4d3163262bf657f,,,https://doi.org/10.1007/s12559-017-9472-6,
+17d01f34dfe2136b404e8d7f59cebfb467b72b26,http://pdfs.semanticscholar.org/4cfb/51d3b8478d7e63ba2661385337abf94d2c48.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/icml2013_cheng13.pdf
+176f26a6a8e04567ea71677b99e9818f8a8819d0,http://pdfs.semanticscholar.org/176f/26a6a8e04567ea71677b99e9818f8a8819d0.pdf,,https://doi.org/10.1007/978-3-319-23231-7_2,https://acceda.ulpgc.es/bitstream/10553/20097/5/C095-ICIAP15_preprint.pdf
+17cf838720f7892dbe567129dcf3f7a982e0b56e,http://pdfs.semanticscholar.org/6e0a/a9926e484e08b31fdeb85b73d1ae65ba47d6.pdf,,,http://arxiv.org/pdf/1603.07235v1.pdf
+176d9121e4e645344de4706dfb345ad456bfb84a,,,https://doi.org/10.1117/1.JEI.24.2.023009,
+17035089959a14fe644ab1d3b160586c67327db2,http://pdfs.semanticscholar.org/1703/5089959a14fe644ab1d3b160586c67327db2.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Li_VLAD3_Encoding_Dynamics_CVPR_2016_paper.pdf
+17370f848801871deeed22af152489e39b6e1454,http://mml.citi.sinica.edu.tw/papers/ICME_2015_Wei.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2015.7177451
+17fa1c2a24ba8f731c8b21f1244463bc4b465681,http://pdfs.semanticscholar.org/d5ba/a722b1bca1f95e4e1fad968b2b74ec1ecc7f.pdf,,,https://arxiv.org/pdf/1511.05440v4.pdf
+179e566a2c1a2a48aa3d0028209c11ebe7d6740e,http://homepages.rpi.edu/~wuy9/EyeDetectionDBM/DeepFeaturesEyeDetection.pdf,,,https://www.ecse.rpi.edu/~cvrl/wuy/EyeDetectionDBM/DeepFeaturesEyeDetection.pdf
+17579791ead67262fcfb62ed8765e115fb5eca6f,http://pdfs.semanticscholar.org/1757/9791ead67262fcfb62ed8765e115fb5eca6f.pdf,,,http://o9vkm05l0.bkt.clouddn.com/clothing_parsing_aaai17.pdf
+17189cfedbdbd219849b8e7f8cf0293d49465f9c,,,,http://doi.acm.org/10.1145/2393347.2396505
+170aa0f16cd655fdd4d087f5e9c99518949a1b5c,,,https://doi.org/10.1007/s11263-007-0074-8,
+1772a7614c9b7daf01ffcda499c901ab7c768c4a,,,,
+177d1e7bbea4318d379f46d8d17720ecef3086ac,http://pdfs.semanticscholar.org/177d/1e7bbea4318d379f46d8d17720ecef3086ac.pdf,,,http://jmlr.csail.mit.edu/proceedings/papers/v44/chen15learning.pdf
+179545c1fc645cb2ad9b31a30f48352d541876ff,,,https://doi.org/10.1109/IJCNN.2007.4371116,
+17aa78bd4331ef490f24bdd4d4cd21d22a18c09c,http://pdfs.semanticscholar.org/17aa/78bd4331ef490f24bdd4d4cd21d22a18c09c.pdf,,,http://www.cs.toronto.edu/~ranzato/publications/le_app_icml2012.pdf
+170a5f5da9ac9187f1c88f21a88d35db38b4111a,https://arxiv.org/pdf/1611.08563v3.pdf,,,http://arxiv.org/abs/1611.08563
+17de5a9ce09f4834629cd76b8526071a956c9c6d,,,https://doi.org/10.1007/978-3-319-68063-7_8,
+176fc31a686fb70d73f1fa354bf043ad236f7aa3,http://www.cs.brown.edu/~black/Papers/ofevaltr.pdf,,https://doi.org/10.1007/s11263-010-0390-2,http://research.microsoft.com/pubs/117766/ofevaltr2.pdf
+1742e6c347037d5d4ccbdf5c7a27dfbf0afedb91,http://www1.i2r.a-star.edu.sg/~htang/Unified_Framework_for_Subspace_Clustering-TNNLS.pdf,,https://doi.org/10.1109/TNNLS.2015.2490080,https://pdfs.semanticscholar.org/1742/e6c347037d5d4ccbdf5c7a27dfbf0afedb91.pdf
+1742ffea0e1051b37f22773613f10f69d2e4ed2c,http://pdfs.semanticscholar.org/1742/ffea0e1051b37f22773613f10f69d2e4ed2c.pdf,,,https://www.thinkmind.org/download.php?articleid=intsys_v9_n12_2016_13
+1791f790b99471fc48b7e9ec361dc505955ea8b1,http://pdfs.semanticscholar.org/6fea/599d7b9fc72350d6e0947d3baaf44edc561b.pdf,,,http://www.dcs.gla.ac.uk/~tao/docs/jn_brmic.pdf
+171d8a39b9e3d21231004f7008397d5056ff23af,http://openaccess.thecvf.com/content_cvpr_2017/papers/Wu_Simultaneous_Facial_Landmark_CVPR_2017_paper.pdf,,,https://arxiv.org/pdf/1709.08130v1.pdf
+1723227710869a111079be7d61ae3df48604e653,,,https://doi.org/10.1109/INISTA.2014.6873606,
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,http://pdfs.semanticscholar.org/38b9/57e2b5ec0ea852d22d1481ef924fbf7f72e2.pdf,,https://doi.org/10.1007/978-3-319-10578-9_43,http://pengxj.github.io/papers/PWQP_ECCV14_SHVLAD.pdf
+176e5abddb87d029f85f60d1bbff67c66500e8c3,http://www.researchgate.net/profile/Tony_Han3/publication/220930104_Efficient_Facial_Attribute_Recognition_with_a_Spatial_Codebook/links/0046351affdf1f0d96000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.361
+174930cac7174257515a189cd3ecfdd80ee7dd54,https://arxiv.org/pdf/1502.02766v3.pdf,,,http://soc.fudan.edu.cn/vip/attachments/download/3892/ICMR-2015-Multi-view-Face-Detection-Using-Deep-Convolutional-Neural-Networks.pdf
+17fad2cc826d2223e882c9fda0715fcd5475acf3,http://pdfs.semanticscholar.org/8f64/def1fe17e2711405d66898a578e3b20da29e.pdf,,,http://ivizlab.sfu.ca/arya/Papers/Others/Facial%20Expressions%20as%20Adaptations.pdf
+17e563af203d469c456bb975f3f88a741e43fb71,https://cvhci.anthropomatik.kit.edu/~mhaurile/papers/WACV2016.pdf,,,https://cvhci.anthropomatik.kit.edu/~zalhalah/papers/wacv_2016_personid.pdf
+171389529df11cc5a8b1fbbe659813f8c3be024d,http://pdfs.semanticscholar.org/1713/89529df11cc5a8b1fbbe659813f8c3be024d.pdf,,https://doi.org/10.1007/978-3-642-12307-8_4,http://vis.uky.edu/~gravity/publications/2009/accv_face.pdf
+17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735,http://pdfs.semanticscholar.org/17d5/e5c9a9ee4cf85dfbb9d9322968a6329c3735.pdf,,,http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS13/paper/download/5880/6121
+1750db78b7394b8fb6f6f949d68f7c24d28d934f,https://www3.nd.edu/~kwb/Bharati_Singh_Vatsa_Bowyer_TIFS_2016.pdf,,https://doi.org/10.1109/TIFS.2016.2561898,
+17cf6195fd2dfa42670dc7ada476e67b381b8f69,http://pdfs.semanticscholar.org/17cf/6195fd2dfa42670dc7ada476e67b381b8f69.pdf,,,http://iristown.engr.utk.edu/publications/papers/2003/kim_avsbs03.pdf
+174f46eccb5852c1f979d8c386e3805f7942bace,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Kae_The_Shape-Time_Random_2014_CVPR_paper.pdf,,,http://vis-www.cs.umass.edu/papers/strf_cvpr14.pdf
+177d03c5851f7082cb023a20fa8a2cd1dfb59467,,,,
+17501551acce05bfde4f0af77c21005f96e80553,,,,
+17670b60dcfb5cbf8fdae0b266e18cf995f6014c,https://arxiv.org/pdf/1606.02254v1.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Duong_Longitudinal_Face_Modeling_CVPR_2016_paper.pdf
+178b37392b2c6f1a167ebc1a5baa5f2f5916e4c4,,,https://doi.org/10.1007/s11042-013-1578-6,
+17027a05c1414c9a06a1c5046899abf382a1142d,http://www.cs.cmu.edu/~rahuls/pub/cvpr2015-alionment-rahuls.pdf,,https://doi.org/10.1109/CVPR.2015.7298827,https://arxiv.org/pdf/1411.7883v3.pdf
+17ded725602b4329b1c494bfa41527482bf83a6f,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf,,,http://arxiv.org/abs/1508.01292
+17738b0972571e7b4ae471d1b2dccea5ce057511,http://dayongwang.info/pdf/2011-MM.pdf,,,http://ink.library.smu.edu.sg/cgi/viewcontent.cgi?article=3285&context=sis_research
+17d03da4db3bb89537d644b682b2a091d563af4a,,,https://doi.org/10.1109/TNN.2010.2050600,
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sun_Deep_Learning_Face_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.244
+7b1ca9a74ab7fbfc32a69e8313ca2f2d78ac6c35,,,,http://doi.ieeecomputersociety.org/10.1109/ICSC.2017.61
+7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889,http://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf,,,https://arxiv.org/pdf/1606.07373v5.pdf
+7bc1e7d000ab517161a83b1fedf353e619516ddf,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836068
+7b63ed54345d8c06523f6b03c41a09b5c8f227e2,http://research.iaun.ac.ir/pd/pourghassem/pdfs/PaperC_1187.pdf,,,
+7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25,http://pdfs.semanticscholar.org/7bf0/a1aa1d0228a51d24c0c3a83eceb937a6ae25.pdf,,,http://vision.ucsd.edu/belongie-grp/research/carRec/dlagnekov_thesis_2005.pdf
+7b618a699b79c1272f6c83101917ad021a58d96b,,,https://doi.org/10.1007/s11042-014-1986-2,
+7b9961094d3e664fc76b12211f06e12c47a7e77d,http://pdfs.semanticscholar.org/7b99/61094d3e664fc76b12211f06e12c47a7e77d.pdf,,https://doi.org/10.1117/12.766810,http://www.ecs.syr.edu/research/dreamsnet/publications/C_SPIECA2008_EI118.pdf
+7bfe085c10761f5b0cc7f907bdafe1ff577223e0,http://pdfs.semanticscholar.org/c32b/aaa307da7376bcb5dfef7bb985c06d032a0f.pdf,,https://doi.org/10.24963/ijcai.2017/337,http://static.ijcai.org/proceedings-2017/0337.pdf
+7b43326477795a772c08aee750d3e433f00f20be,http://pdfs.semanticscholar.org/7b43/326477795a772c08aee750d3e433f00f20be.pdf,,,https://thesis.library.caltech.edu/10281/7/eyjolfsdottir_eyrun_2017.pdf
+7b9b3794f79f87ca8a048d86954e0a72a5f97758,http://pdfs.semanticscholar.org/7b9b/3794f79f87ca8a048d86954e0a72a5f97758.pdf,,https://doi.org/10.1515/jisys-2014-0085,http://oak.conncoll.edu/james-lee/publication/jisys-2013-0016.pdf
+7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed,http://pdfs.semanticscholar.org/7bce/4f4e85a3bfcd6bfb3b173b2769b064fce0ed.pdf,,https://doi.org/10.1007/978-3-642-24571-8_45,http://www.cris.ucr.edu/IGERT/papers/Cruz_Bhanu_Yang_2011.pdf
+7be60f8c34a16f30735518d240a01972f3530e00,http://www.cs.utexas.edu/~suyog/expression_recog.pdf,,https://doi.org/10.1109/ICCVW.2011.6130446,https://www.cs.utexas.edu/~suyog/expression_recog.pdf
+7bdcd85efd1e3ce14b7934ff642b76f017419751,http://www.cbsr.ia.ac.cn/users/zlei/papers/Lei-DFD-PAMI-14.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.112
+7bd37e6721d198c555bf41a2d633c4f0a5aeecc1,,,https://doi.org/10.1109/ACPR.2013.58,
+7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f,http://cvrr.ucsd.edu/publications/2012/Martin_AutoUI2012.pdf,,,http://doi.acm.org/10.1145/2390256.2390281
+7b455cbb320684f78cd8f2443f14ecf5f50426db,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.33
+8f3e120b030e6c1d035cb7bd9c22f6cc75782025,http://pdfs.semanticscholar.org/8f3e/120b030e6c1d035cb7bd9c22f6cc75782025.pdf,,https://doi.org/10.1007/978-3-642-22152-1_14,http://www.eeecs.qub.ac.uk/~c.decampos/publist/papers/decampos2011f.pdf
+8f3675e979629ca9cee9436d37763f546edb8d40,,,https://doi.org/10.1109/SIU.2017.7960446,
+8fee7b38358815e443f8316fa18768d76dba12e3,,,,http://doi.acm.org/10.1145/2063576.2063676
+8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483,http://pdfs.semanticscholar.org/8fb6/11aca3bd8a3a0527ac0f38561a5a9a5b8483.pdf,,,http://eprints.soton.ac.uk/388765/1/eprint.pdf
+8fa3478aaf8e1f94e849d7ffbd12146946badaba,http://pdfs.semanticscholar.org/8fa3/478aaf8e1f94e849d7ffbd12146946badaba.pdf,,https://doi.org/10.1007/978-3-642-33712-3_26,http://courses.cs.washington.edu/courses/cse590v/13au/ParkashParikh_ECCV_2012_attributes_feedback.pdf
+8fe5feeaa72eddc62e7e65665c98e5cb0acffa87,,,https://doi.org/10.1007/s12193-015-0209-0,
+8f8c0243816f16a21dea1c20b5c81bc223088594,http://pdfs.semanticscholar.org/8f8c/0243816f16a21dea1c20b5c81bc223088594.pdf,,,https://ijmter.com/papers/volume-2/issue-5/local-directional-number-based-classification-and-recognition-of-expre.pdf
+8f08b2101d43b1c0829678d6a824f0f045d57da5,http://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf,,,http://openaccess.thecvf.com/content_cvpr_2015/supplemental/Antonakos_Active_Pictorial_Structures_2015_CVPR_supplemental.pdf
+8f992ed6686710164005c20ab16cef6c6ad8d0ea,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Half-quadratic%20based%20Iterative%20Minimization%20for%20Robust%20Sparse%20Representation.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.102
+8ff1f263d91f192269f6f3b324bdb1d30761ae41,,,,
+8fbec9105d346cd23d48536eb20c80b7c2bbbe30,http://conradsanderson.id.au/reading_group/Barr_Effectiveness_Face_WACV_2014.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835992
+8f73af52d87c94d0bd43242462fd68d974eda331,,,https://doi.org/10.1109/ICB.2013.6613009,
+8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09,http://pdfs.semanticscholar.org/8f3e/3f0f97844d3bfd9e9ec566ac7a54f6931b09.pdf,,,http://ddd.uab.cat/pub/elcvia/elcvia_a2015v14n2/elcvia_a2015v14n2p24.pdf
+8f99f7ccb85af6d4b9e015a9b215c529126e7844,,,https://doi.org/10.1109/ROMAN.2017.8172359,
+8f8a5be9dc16d73664285a29993af7dc6a598c83,http://pdfs.semanticscholar.org/8f8a/5be9dc16d73664285a29993af7dc6a598c83.pdf,,,http://paper.ijcsns.org/07_book/201101/20110110.pdf
+8f5ce25e6e1047e1bf5b782d045e1dac29ca747e,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Kotsia07b.pdf,,https://doi.org/10.1109/TIFS.2007.902017,https://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tifs_2007_kotsia.pdf
+8f89aed13cb3555b56fccd715753f9ea72f27f05,http://pdfs.semanticscholar.org/8f89/aed13cb3555b56fccd715753f9ea72f27f05.pdf,,,http://arxiv.org/abs/1711.08690
+8f92cccacf2c84f5d69db3597a7c2670d93be781,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2006/papers/1568982203.pdf,http://ieeexplore.ieee.org/document/7071605/,,
+8f6263e4d3775757e804796e104631c7a2bb8679,http://pdfs.semanticscholar.org/8f62/63e4d3775757e804796e104631c7a2bb8679.pdf,,,https://icmlviz.github.io/icmlviz2016/assets/papers/7.pdf
+8f9f599c05a844206b1bd4947d0524234940803d,http://pdfs.semanticscholar.org/8f9f/599c05a844206b1bd4947d0524234940803d.pdf,,,http://www.jdl.ac.cn/doc/2005/Efficient%203D%20Reconstruction%20for%20Face%20Recognition.pdf
+8fcf7dfa30fa0c4194aef41c508a95d59be38f23,,,,
+8f60c343f76913c509ce623467bf086935bcadac,http://pdfs.semanticscholar.org/8f60/c343f76913c509ce623467bf086935bcadac.pdf,,,https://arxiv.org/pdf/1803.07835v1.pdf
+8f051647bd8d23482c6c3866c0ce1959b8bd40f6,,,https://doi.org/10.1016/j.asoc.2017.04.041,
+8f713e3c5b6b166c213e00a3873f750fb5939c9a,,,https://doi.org/10.1109/EUSIPCO.2015.7362563,
+8fd9c22b00bd8c0bcdbd182e17694046f245335f,http://pdfs.semanticscholar.org/8fd9/c22b00bd8c0bcdbd182e17694046f245335f.pdf,,,http://www.cim.mcgill.ca/~siddiqi/COMP-558-2012/subalazsi.pdf
+8fc36452a49cb0fd43d986da56f84b375a05b4c1,,,,http://doi.acm.org/10.1145/2542355.2542388
+8f5facdc0a2a79283864aad03edc702e2a400346,http://pdfs.semanticscholar.org/8f5f/acdc0a2a79283864aad03edc702e2a400346.pdf,,,http://www.ijeit.com/Vol%204/Issue%207/IJEIT1412201501_14.pdf
+8a09668efc95eafd6c3056ff1f0fbc43bb5774db,http://sist.sysu.edu.cn/~zhwshi/Research/PreprintVersion/Robust%20Principal%20Component%20Analysis%20Based%20on%20Maximum%20Correntropy%20Criterion.pdf,,https://doi.org/10.1109/TIP.2010.2103949,https://www.researchgate.net/profile/Bao-Gang_Hu/publication/49738766_Robust_Principal_Component_Analysis_Based_on_Maximum_Correntropy_Criterion/links/02bfe510b293206140000000.pdf?origin=publication_list
+8a3c5507237957d013a0fe0f082cab7f757af6ee,http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf,,https://doi.org/10.1007/978-3-319-10599-4_7,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8694/86940094.pdf
+8af411697e73f6cfe691fe502d4bfb42510b4835,http://pdfs.semanticscholar.org/8af4/11697e73f6cfe691fe502d4bfb42510b4835.pdf,,,http://www.wseas.us/e-library/conferences/2014/Tenerife/INFORM/INFORM-20.pdf
+8aff9c8a0e17be91f55328e5be5e94aea5227a35,,,https://doi.org/10.1109/TNNLS.2012.2191620,
+8a1e95b82d8cf27e0034e127091396efd4c8bd9e,,,https://doi.org/10.1109/IGARSS.2016.7729015,
+8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d,http://pdfs.semanticscholar.org/8a1e/d5e23231e86216c9bdd62419c3b05f1e0b4d.pdf,,,http://cs231n.stanford.edu/reports/2016/pdfs/010_Report.pdf
+8a12934c4cb793c6f1e40129f37847414c1cc5c0,,,,
+8a54f8fcaeeede72641d4b3701bab1fe3c2f730a,http://pdfs.semanticscholar.org/acf8/b9607ca39f20b9b1956b8761b37f14eb4284.pdf,,https://doi.org/10.1117/12.2082817,https://hal.archives-ouvertes.fr/hal-01149535/file/Mazza_HVEIXX.pdf
+8a2210bedeb1468f223c08eea4ad15a48d3bc894,,,,http://doi.acm.org/10.1145/2513383.2513438
+8aae23847e1beb4a6d51881750ce36822ca7ed0b,http://pdfs.semanticscholar.org/8aae/23847e1beb4a6d51881750ce36822ca7ed0b.pdf,,,http://www.mic.atr.co.jp/~mlyons/pub_pdf/fg98-2.pdf
+8a40b6c75dd6392ee0d3af73cdfc46f59337efa9,http://pdfs.semanticscholar.org/f656/f6682655180162b67042d9d37c4d57c49238.pdf,,https://doi.org/10.1142/S0218001499000495,http://research.microsoft.com/en-us/um/people/zhang/Papers/IJPRAI.pdf
+8a3bb63925ac2cdf7f9ecf43f71d65e210416e17,https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf,,https://doi.org/10.1109/ICPR.2014.317,http://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf
+8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b,http://pdfs.semanticscholar.org/8ad0/d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b.pdf,,,https://arxiv.org/pdf/1803.09760v1.pdf
+8adb2fcab20dab5232099becbd640e9c4b6a905a,http://pdfs.semanticscholar.org/d0d1/50a51c46cfb3bdd9d5fb570018c6534b57ff.pdf,,,http://www.merl.com/reports/TR98-08/TR98-08.ps.Z
+8a2bedaa38abf173823944f0de2c84f5b2549609,,,https://doi.org/10.1109/TNNLS.2016.2573644,
+8ab465c1a131ee4bee6ac0a0b19dfe68f5dcdcc4,,,,http://doi.ieeecomputersociety.org/10.1109/CSSE.2008.575
+8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b,http://www.cs.unc.edu/~lazebnik/research/fall07/animals_on_the_web.pdf,,,http://pages.cs.wisc.edu/~lizhang/courses/cs766-2008f/syllabus/11-08-texts/animals_on_the_web.pdf
+8a91ad8c46ca8f4310a442d99b98c80fb8f7625f,http://vislab.isr.ist.utl.pt/wp-content/uploads/2016/02/2015_TIP.pdf,,https://doi.org/10.1109/TIP.2015.2424311,
+8aed6ec62cfccb4dba0c19ee000e6334ec585d70,http://pdfs.semanticscholar.org/8aed/6ec62cfccb4dba0c19ee000e6334ec585d70.pdf,,,http://web.cs.ucdavis.edu/~yjlee/projects/attribute_springer_book_preprint.pdf
+8a336e9a4c42384d4c505c53fb8628a040f2468e,http://pdfs.semanticscholar.org/8a33/6e9a4c42384d4c505c53fb8628a040f2468e.pdf,,https://doi.org/10.1186/s13637-016-0048-7,https://bsb-eurasipjournals.springeropen.com/track/pdf/10.1186/s13637-016-0048-7?site=bsb-eurasipjournals.springeropen.com
+7e1c419065fdb9cf2a31aa4b5d0c0e03f7afd54e,http://jpinfotech.org/wp-content/plugins/infotech/file/upload/pdf/8962Face-Sketch-Synthesis-via-Sparse-Representation-Based-Greedy-Search-pdf.pdf,,https://doi.org/10.1109/TIP.2015.2422578,
+7e8016bef2c180238f00eecc6a50eac473f3f138,http://pdfs.semanticscholar.org/7e80/16bef2c180238f00eecc6a50eac473f3f138.pdf,,,http://mediatum.ub.tum.de/doc/1305094/381270.pdf
+7ed2c84fdfc7d658968221d78e745dfd1def6332,http://pdfs.semanticscholar.org/7ed2/c84fdfc7d658968221d78e745dfd1def6332.pdf,,,http://www.researchgate.net/profile/Vasileios_Zografos/publication/228943637_Evaluation_of_linear_combination_of_views_for_object_recognition_on_real_and_synthetic_datasets/links/02e7e51aef954ebf9c000000.pdf
+7ebfa8f1c92ac213ff35fa27287dee94ae5735a1,,,https://doi.org/10.1109/TMM.2016.2614429,
+7e3367b9b97f291835cfd0385f45c75ff84f4dc5,https://infoscience.epfl.ch/record/182226/files/fg2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553773
+7e456e94f3080c761f858264428ee4c91cd187b2,,http://ieeexplore.ieee.org/document/6460899/,,
+7e00fb79576fe213853aeea39a6bc51df9fdca16,http://www.ics.ele.tue.nl/~tbasten/papers/AVSS2015_final.pdf,,,http://www.es.ele.tue.nl/~sander/publications/avss15.pdf
+7ee53d931668fbed1021839db4210a06e4f33190,http://crcv.ucf.edu/projects/videolocalization_images/CVPR16_Waqas_AL.pdf,,,http://crcv.ucf.edu/projects/videolocalization_images//CVPR16_Waqas_AL.pdf
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,http://pdfs.semanticscholar.org/7e18/b5f5b678aebc8df6246716bf63ea5d8d714e.pdf,,,
+7e9df45ece7843fe050033c81014cc30b3a8903a,http://pdfs.semanticscholar.org/7e9d/f45ece7843fe050033c81014cc30b3a8903a.pdf,,,http://www.clsp.jhu.edu/ws2000/groups/av_speech/papers/icassp_pose.pdf
+7ebd323ddfe3b6de8368c4682db6d0db7b70df62,http://pdfs.semanticscholar.org/7ebd/323ddfe3b6de8368c4682db6d0db7b70df62.pdf,,,http://avestia.com/CIST2015_Proceedings/papers/111.pdf
+7eb85bcb372261bad707c05e496a09609e27fdb3,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W17/papers/Sathyanarayana_A_Compute-Efficient_Algorithm_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.101
+7ed5036a7c1eb2ea08fa2a12a446a9ccb6171c92,,,,
+7e48711c627edf90e9b232f2cbc0e3576c8f2f2a,,,https://doi.org/10.1007/s11760-015-0777-1,
+7ed6ff077422f156932fde320e6b3bd66f8ffbcb,http://pdfs.semanticscholar.org/7ed6/ff077422f156932fde320e6b3bd66f8ffbcb.pdf,,,http://i3dea.asu.edu/data/docs_pubs/ChapterV2_1.pdf
+7e0c75ce731131e613544e1a85ae0f2c28ee4c1f,http://pdfs.semanticscholar.org/7e0c/75ce731131e613544e1a85ae0f2c28ee4c1f.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/kaltwang2015regression.pdf
+7e5aa453a21f56737db5e02d540f1b70ee6634ad,,,,
+7ed5af241061a6d88e0632a51a91d59627b00c34,,,,
+7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,http://pdfs.semanticscholar.org/7e1e/a2679a110241ed0dd38ff45cd4dfeb7a8e83.pdf,,,https://www.ini.rub.de/upload/file/1488546854_195f616be695515b1579/Dissertation_Alberto_Escalante_2017.pdf
+7e507370124a2ac66fb7a228d75be032ddd083cc,http://pdfs.semanticscholar.org/8992/4d7418df1380044af9ab706a019418952141.pdf,,,http://arxiv.org/abs/1607.06250
+1056347fc5e8cd86c875a2747b5f84fd570ba232,http://arxiv.org/pdf/1607.06408v1.pdf,,https://doi.org/10.1109/WACV.2017.28,https://arxiv.org/pdf/1607.06408v3.pdf
+10550ee13855bd7403946032354b0cd92a10d0aa,http://www.public.asu.edu/~chaitali/confpapers/neuromorphic_dac12.pdf,,,http://doi.acm.org/10.1145/2228360.2228465
+10e12d11cb98ffa5ae82343f8904cfe321ae8004,http://pdfs.semanticscholar.org/10e1/2d11cb98ffa5ae82343f8904cfe321ae8004.pdf,,,http://ijcai.org/Abstract/15/502
+10e7dd3bbbfbc25661213155e0de1a9f043461a2,http://pdfs.semanticscholar.org/eb9c/24686d2d8a65894e6d708c6107724f2b6c04.pdf,,,http://arxiv.org/pdf/1608.04200v1.pdf
+10e2f2ad1dedec6066e063cb2098b089b35905a8,,,,http://doi.acm.org/10.1145/3052930
+10a285260e822b49023c4324d0fbbca7df8e128b,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects2action-iccv2015.pdf,,,https://staff.fnwi.uva.nl/t.e.j.mensink/zsl2016/zslpubs/jain15iccv.pdf
+100105d6c97b23059f7aa70589ead2f61969fbc3,http://www.rci.rutgers.edu/~vmp93/Conference_pub/WACV2016_CFP.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477558
+10df1d4b278da991848fb71b572f687bd189c10e,,,https://doi.org/10.1109/ICPR.2016.7899739,
+100da509d4fa74afc6e86a49352751d365fceee5,http://vision.ucsd.edu/sites/default/files/iccv2011_20q_parts_final.pdf,,,http://www.vision.caltech.edu/visipedia/papers/WahEtal11.pdf
+10ab1b48b2a55ec9e2920a5397febd84906a7769,http://pdfs.semanticscholar.org/10ab/1b48b2a55ec9e2920a5397febd84906a7769.pdf,,,https://people.mpi-sws.org/~druschel/publications/ipic.pdf
+104ee18b513b52386f871e959c1f9e5072604e93,,,https://doi.org/10.1109/GlobalSIP.2017.8309189,
+10ce3a4724557d47df8f768670bfdd5cd5738f95,http://pdfs.semanticscholar.org/10ce/3a4724557d47df8f768670bfdd5cd5738f95.pdf,,https://doi.org/10.1007/3-540-45783-6_58,http://www.ri.cmu.edu/pub_files/pub3/gross_ralph_2002_2/gross_ralph_2002_2.ps.gz
+100428708e4884300e4c1ac1f84cbb16e7644ccf,http://www.math.uh.edu/~dlabate/ICASSP_2014.pdf,,https://doi.org/10.1109/ICASSP.2014.6853649,https://www.math.uh.edu/~dlabate/ICASSP_2014.pdf
+10f4bbf87a44bab3d79e330e486c897e95f5f33f,,,https://doi.org/10.1109/TIFS.2012.2186292,
+102e374347698fe5404e1d83f441630b1abf62d9,https://infoscience.epfl.ch/record/209965/files/TBME-preprint-infoscience.pdf,,https://doi.org/10.1109/TBME.2015.2457032,http://infoscience.epfl.ch/record/209965/files/TBME-preprint-infoscience.pdf
+1033ca56c7e88d8b3e80546848826f572c4cd63e,http://alumni.cs.ucsb.edu/~daniel/publications/conferences/fg11/DattaFerisVaqueroFG2011.pdf,,https://doi.org/10.1109/FG.2011.5771429,http://www.cs.ucsb.edu/~daniel/publications/conferences/fg11/DattaFerisVaqueroFG2011.pdf
+10f17534dba06af1ddab96c4188a9c98a020a459,http://www.cs.umass.edu/~mccallum/papers/peoplelda-iccv07.pdf,,,http://vis-www.cs.umass.edu/papers/iccv07PeopleLDA.pdf
+1071dde48a77f81c35ad5f0ca90a9daedb54e893,,http://ieeexplore.ieee.org/document/7881657/,,
+10e0e6f1ec00b20bc78a5453a00c792f1334b016,http://pdfs.semanticscholar.org/672f/ae3da801b2a0d2bad65afdbbbf1b2320623e.pdf,,,http://arxiv.org/pdf/1609.07042v1.pdf
+102b968d836177f9c436141e382915a4f8549276,https://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ACM-MM05-Proc.pdf,,,http://doi.acm.org/10.1145/1101149.1101299
+100641ed8a5472536dde53c1f50fa2dd2d4e9be9,https://filebox.ece.vt.edu/~parikh/Publications/Parikh_hum_mac_com_Allerton_2013.pdf,,https://doi.org/10.1109/Allerton.2013.6736651,https://www.cc.gatech.edu/~parikh/Publications/Parikh_hum_mac_com_Allerton_2013.pdf
+10195a163ab6348eef37213a46f60a3d87f289c5,https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/156130/eth-50296-01.pdf,,https://doi.org/10.1007/s11263-016-0940-3,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf
+10b06d05b8b3a2c925b951a6d1d5919f536ffed4,http://gamesstudio.org/chek/wp-content/uploads/2014/01/interactivity_befaced.pdf,,,http://doi.acm.org/10.1145/2559206.2574773
+10e704c82616fb5d9c48e0e68ee86d4f83789d96,http://pdfs.semanticscholar.org/10e7/04c82616fb5d9c48e0e68ee86d4f83789d96.pdf,,,http://www.ks.informatik.uni-kiel.de/~vok/research/report_ifi_fill.ps.gz
+10f2b8188c745d43c1580f5ee6de71ad8d538b4d,http://staff.eng.bahcesehir.edu.tr/~cigdemeroglu/papers/international_conference_papers/2015_EmotiW.pdf,,,http://doi.acm.org/10.1145/2818346.2830594
+106732a010b1baf13c61d0994552aee8336f8c85,http://arxiv.org/pdf/1509.04186v2.pdf,,,http://arxiv.org/abs/1509.04186
+10e70a34d56258d10f468f8252a7762950830d2b,http://intechweb.org/downloadpdf.php?id=5889,,,http://doi.ieeecomputersociety.org/10.1109/CIS.2007.221
+102b27922e9bd56667303f986404f0e1243b68ab,https://applied-informatics-j.springeropen.com/track/pdf/10.1186/s40535-017-0042-5?site=applied-informatics-j.springeropen.com,,,
+10fcbf30723033a5046db791fec2d3d286e34daa,http://pdfs.semanticscholar.org/10fc/bf30723033a5046db791fec2d3d286e34daa.pdf,,,https://core.ac.uk/download/pdf/11784553.pdf
+108b2581e07c6b7ca235717c749d45a1fa15bb24,http://www.cs.umd.edu/~djacobs/pubs_files/TPAMI_Proofs.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2009.123
+10d334a98c1e2a9e96c6c3713aadd42a557abb8b,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Shi_Scene_Text_Recognition_2013_CVPR_paper.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989c961.pdf
+1050cd9bf281d0b7367c03d931e6e0b4fc08ccd3,,,,http://doi.ieeecomputersociety.org/10.1109/SMARTCOMP.2014.7043872
+10f66f6550d74b817a3fdcef7fdeba13ccdba51c,http://pdfs.semanticscholar.org/10f6/6f6550d74b817a3fdcef7fdeba13ccdba51c.pdf,,,http://fipa.cs.kit.edu/download/bfa.pdf
+107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53,http://pdfs.semanticscholar.org/65ef/8706ae8c4e22d491550f5fff052ca3f5db21.pdf,,https://doi.org/10.1007/978-3-319-46448-0_31,https://arxiv.org/pdf/1604.01753v3.pdf
+1048c753e9488daa2441c50577fe5fdba5aa5d7c,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/473.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587813
+10ca2e03ff995023a701e6d8d128455c6e8db030,http://pdfs.semanticscholar.org/a941/e5f8778cbac75e21172985a0575b51ea819b.pdf,,https://doi.org/10.1007/978-3-319-54184-6_9,http://grail.cs.washington.edu/wp-content/uploads/2016/09/aneja2016msc.pdf
+1921e0a97904bdf61e17a165ab159443414308ed,http://pdfs.semanticscholar.org/1921/e0a97904bdf61e17a165ab159443414308ed.pdf,,,http://aiweb.techfak.uni-bielefeld.de/files/Linke_WebImageRetrieval.pdf
+19dd371e1649ab55a46f4b98890d6937a411ec5d,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2011_11_17_DagliC_HST_FP.pdf,,,http://www.ll.mit.edu/mission/communications/publications/publication-files/full_papers/2011_11_17_DagliC_HST_FP.pdf
+19841b721bfe31899e238982a22257287b9be66a,http://pdfs.semanticscholar.org/1984/1b721bfe31899e238982a22257287b9be66a.pdf,,,https://openreview.net/pdf?id=HkwVAXyCW
+19746957aa0d800d550da246a025ad44409cdb03,http://pdfs.semanticscholar.org/1974/6957aa0d800d550da246a025ad44409cdb03.pdf,,,https://www.jstage.jst.go.jp/article/mta/3/3/3_156/_pdf
+1922ad4978ab92ce0d23acc4c7441a8812f157e5,http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2015_alignment.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_060_ext.pdf
+19e62a56b6772bbd37dfc6b8f948e260dbb474f5,http://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf,,,https://pdfs.semanticscholar.org/19e6/2a56b6772bbd37dfc6b8f948e260dbb474f5.pdf
+192723085945c1d44bdd47e516c716169c06b7c0,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/VisionandAttentionTheoryBasedSampling14.pdf,,,http://www.cs.csub.edu/~acruz/papers/10.1109-TAFFC.2014.2316151.pdf
+1943c6bf8df8a64bd539a5cd6d4e68785eb590c2,http://ccs.njit.edu/inst/source/02MDDM08.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDEW.2007.4400982
+19fb5e5207b4a964e5ab50d421e2549ce472baa8,http://mmi.tudelft.nl/sites/default/files/e-FEDCompSys14final.pdf,,,http://doi.acm.org/10.1145/2659532.2659627
+1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2,http://www.es.ele.tue.nl/~sander/publications/icme16.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552914
+1962e4c9f60864b96c49d85eb897141486e9f6d1,http://www.patternrecognition.cn/~zhongjin/2011/2011Lai_NCP.pdf,,https://doi.org/10.1007/s00521-011-0577-7,
+195df1106f4d7aff0e9cb609358abbf80f54a716,https://arxiv.org/pdf/1511.02917v1.pdf,,,http://arxiv.org/pdf/1511.02917v2.pdf
+193debca0be1c38dabc42dc772513e6653fd91d8,http://ibug.doc.ic.ac.uk/media/uploads/documents/trigeorgis2016mnemonic.pdf,,,http://research.gold.ac.uk/18543/1/trigeorgis2016mnemonic%5B1%5D.pdf
+191674c64f89c1b5cba19732869aa48c38698c84,http://pdfs.semanticscholar.org/1916/74c64f89c1b5cba19732869aa48c38698c84.pdf,,,http://www.ijates.com/images/short_pdf/1427398120_P166-174.pdf
+190d8bd39c50b37b27b17ac1213e6dde105b21b8,https://dr.ntu.edu.sg/bitstream/handle/10220/18955/fp518-wang.pdf?isAllowed=y&sequence=1,,,http://gmp.sce.ntu.edu.sg/papers/sigir.pdf
+19af008599fb17bbd9b12288c44f310881df951c,http://pdfs.semanticscholar.org/19af/008599fb17bbd9b12288c44f310881df951c.pdf,,,http://arxiv.org/pdf/1111.1947.pdf
+19296e129c70b332a8c0a67af8990f2f4d4f44d1,http://lear.inrialpes.fr/pubs/2009/GVS09/supplmat.pdf,,https://doi.org/10.1109/ICCV.2009.5459197,http://lear.inrialpes.fr/pubs/2009/GVS09/GVS09.pdf
+19666b9eefcbf764df7c1f5b6938031bcf777191,https://arxiv.org/pdf/1212.3913v4.pdf,,https://doi.org/10.1109/TNNLS.2015.2487364,https://arxiv.org/pdf/1212.3913v2.pdf
+198b6beb53e0e61357825d57938719f614685f75,http://pdfs.semanticscholar.org/198b/6beb53e0e61357825d57938719f614685f75.pdf,,,http://www.cs.uccs.edu/~jkalita/work/reu/REU2011/FinalPapers/Wilber.pdf
+1939168a275013d9bc1afaefc418684caf99ba66,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR11_FaceAPModel.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995494
+190b3caa2e1a229aa68fd6b1a360afba6f50fde4,http://pdfs.semanticscholar.org/190b/3caa2e1a229aa68fd6b1a360afba6f50fde4.pdf,,https://doi.org/10.1016/j.cviu.2017.10.011,http://arxiv.org/pdf/1607.01794v1.pdf
+19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b,http://pdfs.semanticscholar.org/4088/3844c1ceab95cb92498a92bfdf45beaa288e.pdf,,,http://arxiv.org/abs/1709.02848
+19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54,http://cvrr.ucsd.edu/publications/2006/McCallTrivedi_v4hci_cvpr2006.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.77
+1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4,http://www4.comp.polyu.edu.hk/~cslzhang/paper/MBC_TIFS_final.pdf,,https://doi.org/10.1109/TIFS.2012.2217332,
+1966bddc083886a9b547e1817fe6abc352a00ec3,,,,http://doi.acm.org/10.1145/2733373.2806312
+195d331c958f2da3431f37a344559f9bce09c0f7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_066_ext.pdf,,,https://arxiv.org/pdf/1412.1526v1.pdf
+199c2df5f2847f685796c2523221c6436f022464,https://static.aminer.org/pdf/PDF/000/322/051/self_quotient_image_for_face_recognition.pdf,,https://doi.org/10.1109/ICIP.2004.1419763,
+19c0069f075b5b2d8ac48ad28a7409179bd08b86,http://people.csail.mit.edu/torralba/publications/iccv2013_khosla.pdf,,,https://people.csail.mit.edu/khosla/papers/iccv2013_khosla.pdf
+19705579b8e7d955092ef54a22f95f557a455338,,,https://doi.org/10.1109/ICIP.2014.7025277,
+19c0c7835dba1a319b59359adaa738f0410263e8,http://www.svcl.ucsd.edu/publications/journal/2009/pami09-fs.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.77
+1979e270093b343d62e97816eeed956062e155a0,,,https://doi.org/10.1016/j.micpro.2005.07.003,
+19808134b780b342e21f54b60095b181dfc7a600,http://www.openu.ac.il/home/hassner/projects/siftscales/HassneretalTPAMI16.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2592916
+19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9,https://arxiv.org/pdf/1503.03832v2.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1A_089_ext.pdf
+194f5d3c240d06575403c9a422a0ebc86d43b91e,,,https://doi.org/10.1007/s11042-015-2580-y,
+197efbef17f92e5cb5076961b6cd9f59e88ffd9a,,,https://doi.org/10.1109/ICMLA.2017.00-59,
+19bbecead81e34b94111a2f584cf55db9a80e60c,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248025
+19a9f658ea14701502d169dc086651b1d9b2a8ea,http://www.cbsr.ia.ac.cn/users/zlei/papers/JJYan-FG2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553703
+195b61470720c7faa523e10e68d0c8d8f27d7c7a,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995618
+19d4855f064f0d53cb851e9342025bd8503922e2,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d468.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Li_Learning_SURF_Cascade_2013_CVPR_paper.pdf
+19d3b02185ad36fb0b792f2a15a027c58ac91e8e,http://pdfs.semanticscholar.org/19d3/b02185ad36fb0b792f2a15a027c58ac91e8e.pdf,,,http://www.cs.sunysb.edu/~vordonezroma/generation_nips2011.pdf
+193ec7bb21321fcf43bbe42233aed06dbdecbc5c,http://pdfs.semanticscholar.org/d40e/f2ca85d8b7540948677c2ed07f1f3677cfdd.pdf,,https://doi.org/10.1007/11564386_23,http://ilab.cs.ucsb.edu/projects/ya/AMFG_turk.pdf
+19da9f3532c2e525bf92668198b8afec14f9efea,http://pdfs.semanticscholar.org/19da/9f3532c2e525bf92668198b8afec14f9efea.pdf,,,http://face.cs.kit.edu/download/BeFIT_Face_verification_across_age_progression_using_real-world_datachallenge_overview.pdf
+1902288256839539aeb5feb3e1699b963a15aa1a,,,https://doi.org/10.1109/IJCNN.2016.7727435,
+19868a469dc25ee0db00947e06c804b88ea94fd0,http://pdfs.semanticscholar.org/1986/8a469dc25ee0db00947e06c804b88ea94fd0.pdf,,,http://web.ics.purdue.edu/~wang868/aaai2015_235_Wang.pdf
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,http://www.andrew.cmu.edu/user/sjayasur/iccv.pdf,,,https://arxiv.org/pdf/1705.04352v2.pdf
+197eaa59a003a4c7cc77c1abe0f99d942f716942,http://www.lv-nus.org/papers%5C2009%5C2009_mm_age.pdf,,,https://pdfs.semanticscholar.org/197e/aa59a003a4c7cc77c1abe0f99d942f716942.pdf
+19c82eacd77b35f57ac8815b979716e08e3339ca,,,,http://doi.ieeecomputersociety.org/10.1109/ICITCS.2015.7292981
+19878141fbb3117d411599b1a74a44fc3daf296d,http://pdfs.semanticscholar.org/1987/8141fbb3117d411599b1a74a44fc3daf296d.pdf,,https://doi.org/10.1007/3-540-40063-X_19,http://www.ri.cmu.edu/pub_files/pub4/tian_ying_li_2000_1/tian_ying_li_2000_1.pdf
+19f076998ba757602c8fec04ce6a4ca674de0e25,http://pdfs.semanticscholar.org/19f0/76998ba757602c8fec04ce6a4ca674de0e25.pdf,,,http://journals.tubitak.gov.tr/elektrik/issues/elk-16-24-1/elk-24-1-17-1304-139.pdf
+191d30e7e7360d565b0c1e2814b5bcbd86a11d41,http://homepages.rpi.edu/~wuy9/DiscriminativeDeepFaceShape/DiscriminativeDeepFaceShape_IJCV.pdf,,https://doi.org/10.1007/s11263-014-0775-8,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Wu2014.pdf
+19994e667d908bc0aacfb663ab0a2bb5ad16b221,http://pdfs.semanticscholar.org/65b1/70e5ec86f5fc500fd5cbd7bfe7b2ec4ef045.pdf,,https://doi.org/10.1007/978-3-319-10578-9_44,http://www.ee.columbia.edu/ln/dvmm/publications/14/key_dynamic_static_evidence.pdf
+19eb486dcfa1963c6404a9f146c378fc7ae3a1df,https://pdfs.semanticscholar.org/3b4d/bd7be0b5b0df2e0c61a977974b1fc78ad3e5.pdf,,,https://scl.ece.ucsb.edu/sites/scl.ece.ucsb.edu/files/publications/b05_1_1.pdf
+191b70fdd6678ef9a00fd63710c70b022d075362,,,https://doi.org/10.1109/ICIP.2003.1247347,
+4c6daffd092d02574efbf746d086e6dc0d3b1e91,http://pdfs.semanticscholar.org/4c6d/affd092d02574efbf746d086e6dc0d3b1e91.pdf,,,http://www.cs.cmu.edu/~lujiang/camera_ready_papers/informedia_MED14.pdf
+4cf68a0b1a3f49393a8c11f3a18cccc7912b8424,,,,
+4c141534210df53e58352f30bab558a077fec3c6,,,https://doi.org/10.1109/TMM.2016.2557722,
+4cb8a691a15e050756640c0a35880cdd418e2b87,http://www.vision.caltech.edu/~bart/Publications/2004/BartUllmanClassBasedMatching.pdf,,https://doi.org/10.1109/CVPR.2004.312,
+4c19690889fb3a12ec03e65bae6f5f20420b4ba4,,,https://doi.org/10.1049/iet-ipr.2015.0699,
+4c8581246ed4d90c942a23ed7c0e007221fa684d,http://welcome.isr.ist.utl.pt/img/pdfs/3439_14-ICIPb.pdf,,https://doi.org/10.1109/ICIP.2014.7026226,http://welcome.isr.tecnico.ulisboa.pt/wp-content/uploads/2015/05/3439_14-ICIPb.pdf
+4ca1fcfd7650eeb0ac8d51cff31b70717cdddfdd,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1563.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206612
+4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d,http://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf,,,https://escholarship.org/content/qt9gp7w2h3/qt9gp7w2h3.pdf
+4c6e1840451e1f86af3ef1cb551259cb259493ba,http://pdfs.semanticscholar.org/4c6e/1840451e1f86af3ef1cb551259cb259493ba.pdf,,,http://mozart.dis.ulpgc.es/Gias/Publications/luis-datasetCreation-2006.pdf
+4c6886c489e93ccab5a1124555a6f3e5b0104464,,,https://doi.org/10.1109/ICIP.2017.8296921,
+4cf3419dbf83a76ccac11828ca57b46bbbe54e0a,https://www.researchgate.net/profile/Muhammad_Sharif9/publication/224173583_Illumination_normalization_preprocessing_for_face_recognition/links/02e7e51a47972ae996000000.pdf,,,
+4c87aafa779747828054cffee3125fcea332364d,http://pdfs.semanticscholar.org/4c87/aafa779747828054cffee3125fcea332364d.pdf,,https://doi.org/10.1007/978-3-319-14364-4_28,https://teresaproject.eu/wp-content/uploads/2015/07/isvc_submission_final.pdf
+4c648fe9b7bfd25236164333beb51ed364a73253,,,,http://doi.acm.org/10.1145/3038924
+4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56,https://arxiv.org/pdf/1612.00523v1.pdf,,,http://arxiv.org/abs/1612.00523
+4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb,http://pdfs.semanticscholar.org/4c8e/5fc0877d066516bb63e6c31eb1b8b5f967eb.pdf,,,http://people.cs.pitt.edu/~kovashka/modi_kovashka_bmvc2017.pdf
+4cb0e0c0e9b92e457f2c546dc25b9a4ff87ff819,http://dayongwang.info/pdf/2012-CIKM.pdf,,,http://www.researchgate.net/profile/Dayong_Wang2/publication/262291079_A_unified_learning_framework_for_auto_face_annotation_by_mining_web_facial_images/links/564cf1fd08ae4988a7a410e8.pdf?origin=publication_list
+4c8ef4f98c6c8d340b011cfa0bb65a9377107970,http://pdfs.semanticscholar.org/4c8e/f4f98c6c8d340b011cfa0bb65a9377107970.pdf,,https://doi.org/10.1007/978-3-319-58838-4_52,http://arxiv.org/abs/1703.09933
+4c822785c29ceaf67a0de9c699716c94fefbd37d,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhu_A_Key_Volume_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.219
+4c0846bcfa64d9e810802c5b7ef0f8b43523fe54,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2014.2324594
+4c815f367213cc0fb8c61773cd04a5ca8be2c959,http://mirlab.org/conference_papers/International_Conference/ICASSP%202010/pdfs/0002470.pdf,,https://doi.org/10.1109/ICASSP.2010.5494892,https://www.researchgate.net/profile/Q_M_Jonathan_Wu/publication/224149596_Facial_expression_recognition_using_curvelet_based_local_binary_patterns/links/0046351f53b05566bc000000.pdf
+4c71b0cdb6b80889b976e8eb4457942bd4dd7b66,,,https://doi.org/10.1109/TIP.2014.2387379,
+4c1528bab3142ec957700ab502531e1a67e7f2f6,http://www.researchgate.net/profile/Xiaohua_Xie/publication/220932399_Restoration_of_a_Frontal_Illuminated_Face_Image_Based_on_KPCA/links/00b49522adfc6b1435000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.527
+4cec3e5776090852bef015a8bbe74fed862aa2dd,,,https://doi.org/10.1109/TSP.2013.2271479,
+4c6233765b5f83333f6c675d3389bbbf503805e3,https://perceptual.mpi-inf.mpg.de/files/2015/03/Yan_Vis13.pdf,,https://doi.org/10.1109/ICB.2013.6612972,http://perceptual.mpi-inf.mpg.de/files/2015/03/Yan_Vis13.pdf
+4c078c2919c7bdc26ca2238fa1a79e0331898b56,http://pdfs.semanticscholar.org/4c07/8c2919c7bdc26ca2238fa1a79e0331898b56.pdf,,,http://arxiv.org/pdf/1507.03409v1.pdf
+4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7,https://arxiv.org/pdf/1611.09956v1.pdf,,,http://arxiv.org/abs/1611.09956
+4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367,http://pdfs.semanticscholar.org/4cac/9eda716a0addb73bd7ffea2a5fb0e6ec2367.pdf,,,https://arxiv.org/pdf/1804.01429v1.pdf
+4c4236b62302957052f1bbfbd34dbf71ac1650ec,http://www.eurecom.fr/en/publication/3397/download/mm-publi-3397.pdf,,https://doi.org/10.1109/ICIP.2011.6116305,http://www.researchgate.net/profile/Nicholas_Evans3/publication/221125181_Semi-supervised_face_recognition_with_LDA_self-training/links/0deec51d2c4d78e47e000000.pdf
+4cd0da974af9356027a31b8485a34a24b57b8b90,https://arxiv.org/pdf/1703.00862v2.pdf,,,https://www.adrianbulat.com/downloads/BinaryHumanPose/binarized_cnn_keypoints.pdf
+4cb31f16e94067ce5eaeb8eae00eb0b0d49d46b2,,,,
+4c170a0dcc8de75587dae21ca508dab2f9343974,http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf,,https://doi.org/10.1007/978-3-540-88693-8_25,http://www.cs.columbia.edu/~belhumeur/conference/eccv08a.pdf
+4c81c76f799c48c33bb63b9369d013f51eaf5ada,https://www.cmpe.boun.edu.tr/~salah/kaya17chalearn.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w26/papers/Kaya_Multi-Modal_Score_Fusion_CVPR_2017_paper.pdf
+4c1ce6bced30f5114f135cacf1a37b69bb709ea1,http://imag.pub.ro/common/staff/cflorea/papers/nlp_eye_MVA_site.pdf,,https://doi.org/10.1007/s00138-014-0656-8,
+4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b,https://opus.lib.uts.edu.au/bitstream/10453/43339/1/APSIPA_ASC_2015_submission_313.pdf,,https://doi.org/10.1109/APSIPA.2015.7415453,
+4c523db33c56759255b2c58c024eb6112542014e,http://www0.cs.ucl.ac.uk/staff/P.Li/publication/ICCV09JaniaAghajanian.pdf,,https://doi.org/10.1109/ICCV.2009.5459352,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/ICCVCameraReadyVersion12.pdf
+4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af,,,https://doi.org/10.1016/j.imavis.2014.06.004,
+261c3e30bae8b8bdc83541ffa9331b52fcf015e6,http://pdfs.semanticscholar.org/a751/04bc7dbaaf549d89f163560525031b49df38.pdf,,https://doi.org/10.5244/C.23.22,http://www.bmva.org/bmvc/2009/Papers/Paper097/Paper097.pdf
+26f03693c50eb50a42c9117f107af488865f3dc1,http://pdfs.semanticscholar.org/26f0/3693c50eb50a42c9117f107af488865f3dc1.pdf,,https://doi.org/10.1016/S0031-3203(00)00031-5,http://doi.ieeecomputersociety.org/10.1109/ICPR.2000.906203
+2661f38aaa0ceb424c70a6258f7695c28b97238a,http://mplab.ucsd.edu/wordpress/wp-content/uploads/multilayer2012.pdf,,https://doi.org/10.1109/TSMCB.2012.2195170,http://mplab.ucsd.edu/~marni/pubs/Wu_fera_smc_2011.pdf
+2609079d682998da2bc4315b55a29bafe4df414e,http://www.iab-rubric.org/papers/ICIP-13-camready.pdf,,https://doi.org/10.1109/ICIP.2013.6738616,
+264a84f4d27cd4bca94270620907cffcb889075c,https://arxiv.org/pdf/1612.06615v1.pdf,,https://doi.org/10.1109/ICPR.2016.7899807,http://arxiv.org/abs/1612.06615
+268c4bb54902433bf00d11391178a162e5d674c9,,,https://doi.org/10.1109/CVPRW.2010.5543261,
+26d407b911d1234e8e3601e586b49316f0818c95,https://arxiv.org/pdf/1709.00965v1.pdf,,https://doi.org/10.1109/ISMAR-Adjunct.2017.29,http://arxiv.org/abs/1709.00965
+261a80216dda39b127d2b7497c068ec7e0fdf183,,,https://doi.org/10.1109/TCSVT.2013.2265571,
+266ee26a6115f1521ce374e4ab106d997c7b1407,,,,
+26ebe98753acec806b7281d085110c06d9cd1e16,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.22
+26973cf1552250f402c82e9a4445f03fe6757b58,,,,http://doi.acm.org/10.1145/3126686.3130239
+26a72e9dd444d2861298d9df9df9f7d147186bcd,https://engineering.purdue.edu/~qobi/papers/mvap2016.pdf,,https://doi.org/10.1007/s00138-016-0768-4,http://arxiv.org/pdf/1511.05914v1.pdf
+26433d86b9c215b5a6871c70197ff4081d63054a,https://www.researchgate.net/profile/WL_Woo/publication/221093080_Multimodal_biometric_fusion_at_feature_level_Face_and_palmprint/links/0fcfd5134b4f62c892000000.pdf,http://ieeexplore.ieee.org/document/5580324/,,http://www.researchgate.net/profile/WL_Woo/publication/221093080_Multimodal_biometric_fusion_at_feature_level_Face_and_palmprint/links/0fcfd5134b4f62c892000000.pdf
+2601b679fdd637f3cd978753ae2f15e8759dd267,,,https://doi.org/10.1109/ICIP.2015.7351306,
+26fcefb80af66391e07e6239933de943c1cddc6e,,,,
+265af79627a3d7ccf64e9fe51c10e5268fee2aae,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20JOURNAL%20PAPERS/A%20Mixture%20of%20Transformed%20Hidden%20Markov%20Models%20for%20Elastic%20Motion%20Estimation.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2009.111
+262cdbc57ecf5c18756046c0d8b9aa7eb10e3b19,,,,http://dl.acm.org/citation.cfm?id=3007694
+26b606ac6beb2977a7853b032416c23c7b36cb8a,,,,
+267c6e8af71bab68547d17966adfaab3b4711e6b,http://pdfs.semanticscholar.org/3097/60122ce6215876c013b2b0211f1df8239df5.pdf,,,https://export.arxiv.org/pdf/1711.03273
+26af867977f90342c9648ccf7e30f94470d40a73,http://pdfs.semanticscholar.org/26af/867977f90342c9648ccf7e30f94470d40a73.pdf,,,http://www.ijirst.org/articles/IJIRSTV3I4002.pdf
+26a89701f4d41806ce8dbc8ca00d901b68442d45,http://pdfs.semanticscholar.org/b7d8/fea52643236bd9b0dd7eec5f1cde248d10f6.pdf,,https://doi.org/10.1016/S0031-3203(03)00057-8,http://chenlab.ece.cornell.edu/Publication/Xiaoming/pr2002_xiaoming.pdf
+26c884829897b3035702800937d4d15fef7010e4,http://pdfs.semanticscholar.org/9200/10cc55d2658e04b01783118b59b7d90420c6.pdf,,https://doi.org/10.1093/ietisy/e91-d.2.341,https://www.researchgate.net/profile/Fan_Chen4/publication/235748000_Kotani_K._Facial_expression_recognition_by_supervised_independent_component_analysis_using_MAP_estimation._IEICE_Trans._Inf._Syst._E91-D(2)_341-350/links/0fcfd5109f01e01679000000.pdf
+266ed43dcea2e7db9f968b164ca08897539ca8dd,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Duong_Beyond_Principal_Components_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_037.pdf
+26c7eda262dfda1c3a3597a3bf1f2f1cc4013425,,,,
+26ad6ceb07a1dc265d405e47a36570cb69b2ace6,http://pdfs.semanticscholar.org/26ad/6ceb07a1dc265d405e47a36570cb69b2ace6.pdf,,,http://www.dtic.mil/dtic/tr/fulltext/u2/1017882.pdf
+26ec75b8ad066b36f814379a79ad57089c82c079,http://www.seas.upenn.edu/~bensapp/papers/ambig-tech-report-2009.pdf,,,https://pdfs.semanticscholar.org/8933/c8d7d0edbef90597b4555aa5e9569e66ae2c.pdf
+26b9d546a4e64c1d759c67cd134120f98a43c2a6,,,https://doi.org/10.1109/ICMLA.2012.120,
+2642810e6c74d900f653f9a800c0e6a14ca2e1c7,http://openaccess.thecvf.com/content_iccv_2015/papers/Liu_Projection_Bank_From_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Liu_Projection_Bank_From_ICCV_2015_paper.pdf
+26947c3ead54e571286fdea25f1fc4d121817850,,,,
+26bbe76d1ae9e05da75b0507510b92e7e6308c73,,,https://doi.org/10.1007/s00371-014-1049-8,
+26949c1ba7f55f0c389000aa234238bf01a32d3b,,,https://doi.org/10.1109/ICIP.2017.8296814,
+26437fb289cd7caeb3834361f0cc933a02267766,http://pdfs.semanticscholar.org/2643/7fb289cd7caeb3834361f0cc933a02267766.pdf,,,http://ipedr.com/vol37/030-ICMEI2012-E00070.pdf
+26a5136ee4502500fb50cd5ade814aad45422771,,,https://doi.org/10.1142/S0218001413560028,
+2654ef92491cebeef0997fd4b599ac903e48d07a,http://www.ee.oulu.fi/~gyzhao/Papers/2008/Facial%20Expression%20Recognition%20from%20Near-Infrared%20Video%20Sequences.pdf,,https://doi.org/10.1109/ICPR.2008.4761697,http://figment.cse.usf.edu/~sfefilat/data/papers/TuCT6.3.pdf
+2679e4f84c5e773cae31cef158eb358af475e22f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Liu_Adaptive_Deep_Metric_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.79
+265e76285e18587065a1e28246971f003c5267f3,http://cortex.informatik.tu-ilmenau.de/~wilhelm/wilhelm-soave-2004a.pdf,,https://doi.org/10.1109/ICSMC.2004.1400655,http://www.tu-ilmenau.de/fakia/fileadmin/template/startIA/neuroinformatik/misc/soave2004/Wilhelm-SOAVE-04a.pdf
+26727dc7347e3338d22e8cf6092e3a3c7568d763,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163088
+26ac607a101492bc86fd81a141311066cfe9e2b5,http://www.eecs.qmul.ac.uk/~hy300/papers/YangPatrasiccv2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.243
+21ef129c063bad970b309a24a6a18cbcdfb3aff5,http://pdfs.semanticscholar.org/21ef/129c063bad970b309a24a6a18cbcdfb3aff5.pdf,,,https://infoscience.epfl.ch/record/216839/files/EPFL_TH6837.pdf
+218b2c5c9d011eb4432be4728b54e39f366354c1,http://infolab.stanford.edu/~wangz/project/imsearch/ALIP/TIP13/sawant.pdf,,https://doi.org/10.1109/TIP.2013.2262289,
+217a21d60bb777d15cd9328970cab563d70b5d23,http://www.cise.ufl.edu/~dihong/assets/iccv2013.pdf,,,http://dahua.me/papers/dhlin_iccv13_hfa.pdf
+21e828071249d25e2edaca0596e27dcd63237346,http://research.microsoft.com/pubs/122158/cvpr2010.pdf,,,http://research.microsoft.com/en-us/um/people/jiansun/papers/cvpr10_facesearch.pdf
+21a2f67b21905ff6e0afa762937427e92dc5aa0b,http://pdfs.semanticscholar.org/21a2/f67b21905ff6e0afa762937427e92dc5aa0b.pdf,,https://doi.org/10.1155/2017/8710492,
+2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3,http://pdfs.semanticscholar.org/f9f4/9f8347db35e721672955c3e24f60574553c0.pdf,,,http://www.aprs.org.au/dicta2003/pdf/0899.pdf
+2162654cb02bcd10794ae7e7d610c011ce0fb51b,http://www.jdl.ac.cn/doc/2011/201511610103648366_%E5%88%98%E8%B4%A4%E6%98%8E.pdf,,https://doi.org/10.1109/ICIP.2014.7025952,
+21258aa3c48437a2831191b71cd069c05fb84cf7,http://pdfs.semanticscholar.org/2125/8aa3c48437a2831191b71cd069c05fb84cf7.pdf,,https://doi.org/10.1007/978-3-642-33765-9_46,http://www.cise.ufl.edu/~mliu/ECCV12_RegularizedMetricLearning_Liu.pdf
+211c42a567e02987a6f89b89527de3bf4d2e9f90,http://www.cs.dartmouth.edu/~dutran/papers/ijcv16_preprint.pdf,,https://doi.org/10.1007/s11263-016-0905-6,
+21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc,http://pdfs.semanticscholar.org/21f3/c5b173503185c1e02a3eb4e76e13d7e9c5bc.pdf,,,http://dspace.mit.edu/bitstream/handle/1721.1/7171/AIM-2001-010.pdf?sequence=2
+21bd9374c211749104232db33f0f71eab4df35d5,http://www.eurecom.fr/en/publication/5184/download/sec-publi-5184.pdf,,https://doi.org/10.1109/IWBF.2017.7935101,
+2138ccf78dcf428c22951cc066a11ba397f6fcef,,,https://doi.org/10.1109/BHI.2012.6211519,
+214db8a5872f7be48cdb8876e0233efecdcb6061,http://users.eecs.northwestern.edu/~mya671/mypapers/ICCV13_Zhang_Yang_Wang_Lin_Tian.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Semantic-Aware_Co-indexing_for_2013_ICCV_paper.pdf
+21104bcf07ef0269ab133471a3200b9bf94b2948,http://www.cs.utexas.edu/~grauman/papers/liang-cvpr2014.pdf,,,http://vision.cs.utexas.edu/projects/beyondpairs/liang-cvpr2014-poster.pdf
+21bd60919e2e182a29af455353141ba4907b1b41,,,https://doi.org/10.1109/ACCESS.2018.2798573,
+21d9d0deed16f0ad62a4865e9acf0686f4f15492,http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf,,,http://chenlab.ece.cornell.edu/people/Andy/Andy_files/cvpr09.pdf
+21cbf46c6adfb3a44ed2b30ff0b21a8391c18b13,,,https://doi.org/10.1109/VCIP.2017.8305137,
+214ac8196d8061981bef271b37a279526aab5024,http://pdfs.semanticscholar.org/214a/c8196d8061981bef271b37a279526aab5024.pdf,,https://doi.org/10.1007/978-3-319-19665-7_44,https://users.aalto.fi/~kannalj1/publications/scia2015.pdf
+213a579af9e4f57f071b884aa872651372b661fd,http://www.robots.ox.ac.uk/~vgg/publications/2013/Charles13a/charles13a.pdf,,https://doi.org/10.1007/s11263-013-0672-6,http://tomas.pfister.fi/files/charles13ijcv.pdf
+21f5f65e832c5472d6d08f6ee280d65ff0202e29,,,https://doi.org/10.1007/978-3-319-70353-4_44,
+21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1,http://www.eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Transductive%20VIS-NIR%20Face%20Matching.pdf,,https://doi.org/10.1109/ICIP.2012.6467140,
+21ec41a6ee3c655cf54c6db659d56480fc76e742,http://www.liacs.nl/home/mlew/ivc2007.emotion.pdf,,https://doi.org/10.1016/j.imavis.2005.12.021,http://carol.wins.uva.nl/~nicu/publications/FG04.pdf
+217de4ff802d4904d3f90d2e24a29371307942fe,http://www.cs.columbia.edu/~tberg/papers/poof-cvpr13.pdf,,,http://thomasberg.org/papers/poof-cvpr13.pdf
+218139e5262cb4f012cd2e119074aa59b89ebc32,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.265
+217aa3aa0b3d9f6f394b5d26f03418187d775596,,,,http://doi.acm.org/10.1145/3123266.3123298
+2182ca35e1a5b3cff9c5ce5308f5d0d12e4f911a,,,,
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,http://pdfs.semanticscholar.org/210b/98394c3be96e7fd75d3eb11a391da1b3a6ca.pdf,,https://doi.org/10.1007/978-3-319-16814-2_41,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/80.pdf
+21765df4c0224afcc25eb780bef654cbe6f0bc3a,http://ci2cv.net/media/papers/2013_ICCV_Kiani.pdf,,,http://ci2cv.net/static/papers/2013_ICCV_Kiani.pdf
+21b16df93f0fab4864816f35ccb3207778a51952,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2015/06.18.19.06/doc/PID3766353.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2015.26
+2116b13eb3af418ef02502715e8f3c98664e699a,,,,
+218ce079b9e64288faf20a87043dc32884105102,,,,
+2149d49c84a83848d6051867290d9c8bfcef0edb,,,https://doi.org/10.1109/TIFS.2017.2746062,
+4d83a25931ff8f73130a4d07e0209fcb3191db4b,,,,
+4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27,http://pdfs.semanticscholar.org/4d49/c6cff198cccb21f4fa35fd75cbe99cfcbf27.pdf,,,http://www.cvc.uab.es/~jordi/pub/TPCA_DEFINITIU.pdf
+4d625677469be99e0a765a750f88cfb85c522cce,http://pdfs.semanticscholar.org/cccc/378e98218bbedfd93da956e4a07b9971b928.pdf,,,http://www.roboticsproceedings.org/rss12/p34.html
+4d6c3a3f9410ca35eb3389ec7088f5e2c16ec3ea,http://www.researchgate.net/profile/Roland_Goecke/publication/221429947_Static_facial_expression_analysis_in_tough_conditions_Data_evaluation_protocol_and_benchmark/links/0fcfd50e81697312d6000000.pdf,,https://doi.org/10.1109/ICCVW.2011.6130508,http://staff.estem-uc.edu.au/roland/wp-content/uploads/file/roland/publications/Conference/ICCV/BeFIT2011/dhall_goecke_lucey_gedeon_BeFIT2011_StaticFacialExpressionAnalysisInToughConditions.pdf
+4da735d2ed0deeb0cae4a9d4394449275e316df2,http://cvrr.ucsd.edu/publications/2016/0406.pdf,,https://doi.org/10.1109/IVS.2016.7535575,http://cvrr.ucsd.edu/publications/2016/HeadEyeHand.pdf
+4dbfbe5fd96c9efc8c3c2fd54406b62979482678,,,https://doi.org/10.1016/j.jvcir.2014.07.007,
+4d8de4dad40faa835e8a01e3aa465e1bb3a996f4,,,,
+4d1f77d9418a212c61a3c75c04a5b3884f6441ba,,,https://doi.org/10.1109/TIP.2017.2788196,
+4d15254f6f31356963cc70319ce416d28d8924a3,http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf,,,http://www.ri.cmu.edu/pub_files/pub3/gross_ralph_2001_4/gross_ralph_2001_4.pdf
+4d530a4629671939d9ded1f294b0183b56a513ef,http://pdfs.semanticscholar.org/4d53/0a4629671939d9ded1f294b0183b56a513ef.pdf,,,http://ijmlc.org/papers/153-C00896-002.pdf
+4d2975445007405f8cdcd74b7fd1dd547066f9b8,http://pdfs.semanticscholar.org/4d29/75445007405f8cdcd74b7fd1dd547066f9b8.pdf,,,http://www.image.ece.ntua.gr/papers/591.pdf
+4df889b10a13021928007ef32dc3f38548e5ee56,http://ww2.cs.fsu.edu/~ywu/PDF-files/IJCNN.pdf,,https://doi.org/10.1109/IJCNN.2007.4371359,http://www.cs.fsu.edu/~liux/research/pub/papers/conferences/Wu-MOCA-IJCNN-2007.pdf
+4dce568994fb43095067ac893bbc079058494587,,,,
+4d6462fb78db88afff44561d06dd52227190689c,http://pdfs.semanticscholar.org/4d64/62fb78db88afff44561d06dd52227190689c.pdf,,https://doi.org/10.1007/978-3-642-02172-5_9,http://www.cvc.uab.es/~petia/2009/Face-to-Face%20Social%20Activity%20Detection%20Using%20Data%20Collected%20with%20a%20Wearable%20Device.pdf
+4d423acc78273b75134e2afd1777ba6d3a398973,http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf,,,http://www.ri.cmu.edu/pub_files/pub3/sim_terence_2002_1/sim_terence_2002_1.ps.gz
+4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41,http://pdfs.semanticscholar.org/4dd6/d511a8bbc4d9965d22d79ae6714ba48c8e41.pdf,,,http://www.ieice.org/proceedings/ITC-CSCC2008/pdf/p33_A2-4.pdf
+4de757faa69c1632066391158648f8611889d862,http://pdfs.semanticscholar.org/4de7/57faa69c1632066391158648f8611889d862.pdf,,,http://ijaers.com/Paper-1-2016/5%20IJAERS-MAR-2016-7-Review%20of%20Face%20Recognition%20Technology%20Using%20Feature%20Fusion%20Vector.pdf
+4dd71a097e6b3cd379d8c802460667ee0cbc8463,http://www.dgcv.nii.ac.jp/Publications/Papers/2015/BWILD2015.pdf,,https://doi.org/10.1016/j.imavis.2016.02.004,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284810
+4d9a02d080636e9666c4d1cc438b9893391ec6c7,http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf,,https://doi.org/10.1109/CVPRW.2010.5543262,http://www.pitt.edu/~jeffcohn/CVPR2010_CK+2.pdf
+4d9c02567e7b9e065108eb83ea3f03fcff880462,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Peng_Towards_Facial_Expression_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.192
+4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002352.pdf,,https://doi.org/10.1109/ICASSP.2017.7952577,
+4d90bab42806d082e3d8729067122a35bbc15e8d,http://pdfs.semanticscholar.org/4d90/bab42806d082e3d8729067122a35bbc15e8d.pdf,,https://doi.org/10.1016/j.patrec.2012.07.015,https://www.researchgate.net/profile/Xiaohua_Huang2/publication/258807129_Towards_a_dynamic_expression_recognition_system_under_facial_occlusion/links/55640e2c08ae6f4dcc98bee3.pdf?origin=publication_list
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTiccv13.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.188
+4d01d78544ae0de3075304ff0efa51a077c903b7,http://pdfs.semanticscholar.org/8f82/71d557ae862866c692e556f610ab45dcc399.pdf,,,http://www.ijcaonline.org/archives/volume77/number13/13541-0861?format=pdf
+4dd2be07b4f0393995b57196f8fc79d666b3aec5,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p3572-lee.pdf,,https://doi.org/10.1109/ICASSP.2014.6854261,
+4d356f347ab6647fb3e8ed8c2154dbd359e479ed,http://www.researchgate.net/profile/Anna_Esposito/publication/225441684_Extracting_and_Associating_Meta-features_for_Understanding_Peoples_Emotional_Behaviour_Face_and_Speech/links/02e7e52bed3a1b106e000000.pdf,,https://doi.org/10.1007/s12559-010-9072-1,
+4d8ce7669d0346f63b20393ffaa438493e7adfec,http://pdfs.semanticscholar.org/4d8c/e7669d0346f63b20393ffaa438493e7adfec.pdf,,https://doi.org/10.1007/978-3-540-88682-2_52,http://www.researchgate.net/profile/Dimitris_Metaxas/publication/221304074_Similarity_Features_for_Facial_Event_Analysis/links/0f317530e5f83f343f000000.pdf
+4d4736173a5e72c266e52f3a43bdcb2b58f237a2,,,https://doi.org/10.1109/ISSPA.2012.6310583,
+4d6d6369664a49f6992f65af4148cefef95055bc,,,https://doi.org/10.1109/ICIP.2014.7025407,
+4d21a2866cfd1f0fb2a223aab9eecfdec963059a,http://pdfs.semanticscholar.org/ddb3/5264ae7a74811bf8eb63d0eca7b7db07a4b1.pdf,,,http://www.ri.cmu.edu/pub_files/pub2/tian_ying_li_2000_2/tian_ying_li_2000_2.ps.gz
+4d16337cc0431cd43043dfef839ce5f0717c3483,http://pdfs.semanticscholar.org/4d16/337cc0431cd43043dfef839ce5f0717c3483.pdf,,,http://elijah.cs.cmu.edu/DOCS/wang-mmsys2017.pdf
+4d0b3921345ae373a4e04f068867181647d57d7d,http://people.cs.pitt.edu/~kovashka/murrugarra_llerena_kovashka_wacv2017_slides.pdf,,https://doi.org/10.1109/WACV.2017.63,http://people.cs.pitt.edu/~kovashka/murrugarra_llerena_kovashka_wacv2017_poster.pdf
+4d0ef449de476631a8d107c8ec225628a67c87f9,http://www.wjscheirer.com/papers/wjs_btas2010b_photohead.pdf,,https://doi.org/10.1109/BTAS.2010.5634517,https://www.wjscheirer.com/papers/wjs_btas2010b_photohead.pdf
+4df3143922bcdf7db78eb91e6b5359d6ada004d2,http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf,,,"http://www.csun.edu/~dma/Ma,%20Correll,%20&%20Wittenbrink,%202015.pdf"
+7588388b3f68c1a1a6b3b336d8387fee5c57c985,,,,
+75fcbb01bc7e53e9de89cb1857a527f97ea532ce,http://pdfs.semanticscholar.org/75fc/bb01bc7e53e9de89cb1857a527f97ea532ce.pdf,,,http://www.researchgate.net/profile/Veikko_Surakka/publication/221546584_Detection_of_Facial_Landmarks_from_Neutral_Happy_and_Disgust_Facial_Images/links/0fcfd50cadbc4a796f000000.pdf
+75858dbee2c248a60741fbc64dcad4f8b63d51cb,,,https://doi.org/10.1109/TIP.2015.2460464,
+7577a1ddf9195513a5c976887ad806d1386bb1e9,http://pdfs.semanticscholar.org/7577/a1ddf9195513a5c976887ad806d1386bb1e9.pdf,,,https://arxiv.org/pdf/1706.00699v1.pdf
+757e4cb981e807d83539d9982ad325331cb59b16,http://pdfs.semanticscholar.org/757e/4cb981e807d83539d9982ad325331cb59b16.pdf,,https://doi.org/10.1007/978-3-642-41181-6_48,http://www.researchgate.net/profile/Daniel_Riccio/publication/265013202_Demographics_versus_biometric_automatic_interoperability/links/53fb93330cf2dca8fffe7f73.pdf
+7535e3995deb84a879dc13857e2bc0796a2f7ce2,,,https://doi.org/10.1007/s10618-010-0207-5,
+75d7ba926ef1cc2adab6c5019afbb2f69a5ca27d,,,https://doi.org/10.1007/s00521-012-1042-y,
+75e9a141b85d902224f849ea61ab135ae98e7bfb,http://pdfs.semanticscholar.org/d1a5/0fffd1c9cf033943636b9e18172ed68582b1.pdf,,,https://www.sciencedirect.com/science/article/pii/S0042698914000960
+75b833dde2e76c5de5912db3444d62c4131d15dc,http://www.researchgate.net/profile/Vassilios_Solachidis/publication/4303365_A_Face_Tracker_Trajectories_Clustering_Using_Mutual_Information/links/09e4150ca146dba69c000000.pdf,,https://doi.org/10.1109/MMSP.2007.4412854,
+75503aff70a61ff4810e85838a214be484a674ba,https://www.ri.cmu.edu/pub_files/2012/0/Improved-Facial-Expression.pdf,,,http://www.ri.cmu.edu/pub_files/2012/0/Improved-Facial-Expression.pdf
+75fd9acf5e5b7ed17c658cc84090c4659e5de01d,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_035_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Tzimiropoulos_Project-Out_Cascaded_Regression_2015_CVPR_paper.pdf
+75908b6460eb0781130ed0aa94585be25a584996,http://pdfs.semanticscholar.org/7590/8b6460eb0781130ed0aa94585be25a584996.pdf,,,http://fas.sfu.ca/pub/cs/TH/2004/DarrylAndersonMSc.pdf
+75cd81d2513b7e41ac971be08bbb25c63c37029a,http://pdfs.semanticscholar.org/75cd/81d2513b7e41ac971be08bbb25c63c37029a.pdf,,https://doi.org/10.1016/j.image.2011.05.002,http://people.bu.edu/bsk/PDFs/Saghafi_SPIC12_Preprint.pdf
+75bf3b6109d7a685236c8589f8ead7d769ea863f,http://pdfs.semanticscholar.org/75bf/3b6109d7a685236c8589f8ead7d769ea863f.pdf,,,https://arxiv.org/pdf/1706.07527v1.pdf
+75b51140d08acdc7f0af11b0ffa1edb40ebbd059,,,https://doi.org/10.1007/s00521-010-0381-9,
+751970d4fb6f61d1b94ca82682984fd03c74f127,http://pdfs.semanticscholar.org/7519/70d4fb6f61d1b94ca82682984fd03c74f127.pdf,,,http://csl.anthropomatik.kit.edu/downloads/BS13_WandSchulteJankeSchultz_ArrayBasedEMGSSI.pdf
+7587a09d924cab41822a07cd1a988068b74baabb,,,,
+759a3b3821d9f0e08e0b0a62c8b693230afc3f8d,http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf,,https://doi.org/10.1109/ICCV.2009.5459250,http://homes.cs.washington.edu/~neeraj/base/publications/base/papers/nk_iccv2009_attrs.pdf
+754626bd5fb06fee5e10962fdfeddd495513e84b,,,https://doi.org/10.1109/SIU.2017.7960646,
+75ebe1e0ae9d42732e31948e2e9c03d680235c39,http://pdfs.semanticscholar.org/75eb/e1e0ae9d42732e31948e2e9c03d680235c39.pdf,,https://doi.org/10.5244/C.20.92,http://www.robots.ox.ac.uk:5000/~vgg/publications/papers/everingham06a.pdf
+75e5ba7621935b57b2be7bf4a10cad66a9c445b9,http://pdfs.semanticscholar.org/75e5/ba7621935b57b2be7bf4a10cad66a9c445b9.pdf,,https://doi.org/10.1016/j.patcog.2014.06.020,http://www.pris.net.cn/wp-content/uploads/2010/11/whdeng_pr4.pdf
+75859ac30f5444f0d9acfeff618444ae280d661d,http://www.cse.msu.edu/rgroups/biometrics/Publications/SecureBiometrics/NagarNandakumarJain_MultibiometricCryptosystems_TIFS11.pdf,,https://doi.org/10.1109/TIFS.2011.2166545,http://www.cse.msu.edu/biometrics/Publications/SecureBiometrics/NagarNandakumarJain_MultibiometricCryptosystems_TIFS11.pdf
+750c19d5bb23ac6956b6cfff15129f226a61dfe9,,,,
+7553fba5c7f73098524fbb58ca534a65f08e91e7,http://pdfs.semanticscholar.org/7553/fba5c7f73098524fbb58ca534a65f08e91e7.pdf,,,http://ijcsmc.com/docs/papers/June2014/V3I6201499a71.pdf
+751b26e7791b29e4e53ab915bfd263f96f531f56,http://affect.media.mit.edu/pdfs/12.Hernandez-Hoque-Drevo-Picard-MoodMeter-Ubicomp.pdf,,,http://web.media.mit.edu/~mehoque/Publications/Hoque-ubicomp2012_mit_mood_meter.pdf
+75da1df4ed319926c544eefe17ec8d720feef8c0,http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf,,,http://people.cs.umass.edu/~elm/papers/fddb.pdf
+751fb994b2c553dc843774a5620bfcab8bc657fd,,,https://doi.org/10.1007/978-3-319-67180-2_47,
+75259a613285bdb339556ae30897cb7e628209fa,http://openaccess.thecvf.com/content_iccv_2015/papers/Kodirov_Unsupervised_Domain_Adaptation_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.282
+753a277c1632dd61233c488cc55d648de3caaaa3,,,https://doi.org/10.1016/j.patcog.2011.02.013,
+75d2ecbbcc934563dff6b39821605dc6f2d5ffcc,http://pdfs.semanticscholar.org/75d2/ecbbcc934563dff6b39821605dc6f2d5ffcc.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2003.1238646
+81e2a458b894705cc21a9719f743bfa61f1e6436,,,,
+81bfe562e42f2eab3ae117c46c2e07b3d142dade,http://pdfs.semanticscholar.org/81bf/e562e42f2eab3ae117c46c2e07b3d142dade.pdf,,,https://arxiv.org/pdf/1209.3433v1.pdf
+81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f,http://pdfs.semanticscholar.org/8169/5fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f.pdf,,,https://comserv.cs.ut.ee/home/files/Uibo_informaatika_2016.pdf?reference=81695FBBBEA2972D7AB1BFB1F3A6A0DBD3475C0F&study=ATILoputoo
+81a4397d5108f6582813febc9ddbeff905474120,,,https://doi.org/10.1109/ICPR.2016.7899883,
+8147ee02ec5ff3a585dddcd000974896cb2edc53,http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2012aePAMI.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.107
+814b05113ba0397d236736f94c01e85bb034c833,http://pdfs.semanticscholar.org/814b/05113ba0397d236736f94c01e85bb034c833.pdf,,https://doi.org/10.1016/j.ins.2016.02.034,http://www.cs.nott.ac.uk/~psxdmt/turcsany16_lrfdnn_infsci.pdf
+81146c567fa5a3c83778c1c940780d00706fa2bf,,,,
+812d3f6975f4cb87e9905ef18696c5c779227634,,,https://doi.org/10.1186/s13640-016-0151-4,
+816bd8a7f91824097f098e4f3e0f4b69f481689d,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00334.pdf,,,http://doi.acm.org/10.1145/1026711.1026742
+8184a92e1ccc7fdeb4a198b226feb325c63d6870,,,https://doi.org/10.1109/ICCE.2017.7889290,
+81831ed8e5b304e9d28d2d8524d952b12b4cbf55,http://pdfs.semanticscholar.org/8183/1ed8e5b304e9d28d2d8524d952b12b4cbf55.pdf,,https://doi.org/10.1016/j.patcog.2013.03.005,https://pdfs.semanticscholar.org/8183/1ed8e5b304e9d28d2d8524d952b12b4cbf55.pdf
+8185be0689442db83813b49e215bf30870017459,,,https://doi.org/10.1109/TNNLS.2013.2293418,
+81b2a541d6c42679e946a5281b4b9dc603bc171c,http://pdfs.semanticscholar.org/81b2/a541d6c42679e946a5281b4b9dc603bc171c.pdf,,,http://vts.uni-ulm.de/docs/2011/7560/vts_7560_10802.pdf
+81e11e33fc5785090e2d459da3ac3d3db5e43f65,http://pdfs.semanticscholar.org/81e1/1e33fc5785090e2d459da3ac3d3db5e43f65.pdf,,,http://www.e-ijaet.org/media/52I7-IJAET0703792-A-NOVEL-FACE-RECOGNITION-APPROACH.pdf
+81b8a6cabcd6451b21d5b44e69b0a355d9229cc4,,,https://doi.org/10.1109/ICDSP.2017.8096137,
+81e366ed1834a8d01c4457eccae4d57d169cb932,http://www-public.int-edu.eu/~horain/Publications/Wesierski%20ICCV_2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.363
+81d81a2060366f29fd100f793c11acf000bd2a7f,,,https://doi.org/10.1007/11795131_112,
+81fc86e86980a32c47410f0ba7b17665048141ec,http://pdfs.semanticscholar.org/81fc/86e86980a32c47410f0ba7b17665048141ec.pdf,,,https://arxiv.org/pdf/1801.03546v1.pdf
+8160b3b5f07deaa104769a2abb7017e9c031f1c1,http://www.aiia.csd.auth.gr/EN/cor_baayen/Exploiting_Discriminant_Information_in_NMF_for_FFV.pdf,,https://doi.org/10.1109/TNN.2006.873291,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Zafeiriou06a.pdf
+816eff5e92a6326a8ab50c4c50450a6d02047b5e,http://pdfs.semanticscholar.org/816e/ff5e92a6326a8ab50c4c50450a6d02047b5e.pdf,,,http://www.pengxi.me/wp-content/uploads/file/2014-ELL-fastLRR.pdf
+81af86e3d343a40ce06a3927b6aa8c8853f6811a,,,,http://doi.acm.org/10.1145/3009977.3009996
+81c21f4aafab39b7f5965829ec9e0f828d6a6182,,,https://doi.org/10.1109/BTAS.2015.7358744,
+8149c30a86e1a7db4b11965fe209fe0b75446a8c,http://www.cfar.umd.edu/~kale/ICVGIP2012.pdf,,,http://doi.acm.org/10.1145/2425333.2425346
+81dd68de9d88c49db1ae509dbc66c7a82809c026,http://atvs.ii.uam.es/files/2004_SPM_Biometrics_Ortega.pdf,,,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/040301_Ortega.pdf
+813c93c54c19fd3ef850728e6d4a31d279d26021,,,,
+81d232e1f432db7de67baf4f30f240c62d1a9055,,,https://doi.org/10.1109/ICIP.2017.8296405,
+819c93dfe531ad6aba71cd48942c9e07b7a89b1b,,,,
+817321d4008bf95e9be00cf6cb1554a1aed40027,,,,
+81da427270c100241c07143885ba3051ec4a2ecb,http://pdfs.semanticscholar.org/81da/427270c100241c07143885ba3051ec4a2ecb.pdf,,,https://arxiv.org/pdf/1802.00941v1.pdf
+810f5606a4769fc3dd99611acf805596fb79223d,http://pdfs.semanticscholar.org/810f/5606a4769fc3dd99611acf805596fb79223d.pdf,,https://doi.org/10.1016/j.patcog.2010.06.019,http://eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Extraction%20of%20Illumination%20Invariant%20Facial%20Features%20from%20A%20Single%20Image%20Using%20Nonsubsampled%20Contourlet%20Transform.pdf
+861c650f403834163a2c27467a50713ceca37a3e,http://personal.stevens.edu/~hli18/data/papers/PEPICCV2013_CameraReady.pdf,,,http://www.ifp.illinois.edu/~jyang29/papers/ICCV13c.pdf
+86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663,http://pdfs.semanticscholar.org/8661/4c2d2f6ebcb9c600d4aef85fd6bf6eab6663.pdf,,,https://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-142.pdf
+86b69b3718b9350c9d2008880ce88cd035828432,http://pdfs.semanticscholar.org/86b6/9b3718b9350c9d2008880ce88cd035828432.pdf,,,https://lhncbc.nlm.nih.gov/system/files/pub9332.pdf
+86904aee566716d9bef508aa9f0255dc18be3960,http://pdfs.semanticscholar.org/8690/4aee566716d9bef508aa9f0255dc18be3960.pdf,,,https://arxiv.org/pdf/1802.09386v1.pdf
+86fa086d02f424705bbea53943390f009191740a,,,https://doi.org/10.1109/ICIP.2015.7351651,
+867e709a298024a3c9777145e037e239385c0129,http://pdfs.semanticscholar.org/867e/709a298024a3c9777145e037e239385c0129.pdf,,,http://ijpres.com/pdf30/41.pdf
+865d4ce1751ff3c0a8eb41077a9aa7bd94603c47,,,https://doi.org/10.1007/s12193-015-0210-7,
+86597fe787e0bdd05935d25158790727257a40bd,,,,http://doi.ieeecomputersociety.org/10.1109/3DV.2016.72
+865e9346b05f14f9bf85c1522c5aebe85420a517,,,,
+86afb1e38a96f2ac00e792ef353a971fd13c8474,,,https://doi.org/10.1109/BigData.2016.7840742,
+86c5478f21c4a9f9de71b5ffa90f2a483ba5c497,http://pdfs.semanticscholar.org/86c5/478f21c4a9f9de71b5ffa90f2a483ba5c497.pdf,,,http://arxiv.org/abs/1610.00660
+8686b15802529ff8aea50995ef14079681788110,,,https://doi.org/10.1109/TNNLS.2014.2376936,
+86c053c162c08bc3fe093cc10398b9e64367a100,http://pdfs.semanticscholar.org/86c0/53c162c08bc3fe093cc10398b9e64367a100.pdf,,https://doi.org/10.1049/iet-cvi.2014.0085,http://www.eecs.qmul.ac.uk/~hy300/papers/cascadeofforest.pdf
+86b985b285c0982046650e8d9cf09565a939e4f9,http://pdfs.semanticscholar.org/86b9/85b285c0982046650e8d9cf09565a939e4f9.pdf,,,http://search.ieice.org/bin/summary.php?id=e96-d_1_81
+861802ac19653a7831b314cd751fd8e89494ab12,http://btpwpdf.ijoy365.com/time-of-flight-and-depth-imaging-marcin-63540537.pdf,,https://doi.org/10.1007/978-3-642-44964-2,
+864d50327a88d1ff588601bf14139299ced2356f,,,https://doi.org/10.1109/FSKD.2016.7603151,
+86ed5b9121c02bcf26900913f2b5ea58ba23508f,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wang_Actions__Transformations_CVPR_2016_paper.pdf,,,https://arxiv.org/pdf/1512.00795v1.pdf
+861b12f405c464b3ffa2af7408bff0698c6c9bf0,http://pdfs.semanticscholar.org/861b/12f405c464b3ffa2af7408bff0698c6c9bf0.pdf,,,http://www.ijritcc.org/download/1433921207.pdf
+861a51e66553979535df2b41971150453ab26372,,,,
+862d17895fe822f7111e737cbcdd042ba04377e8,http://pdfs.semanticscholar.org/862d/17895fe822f7111e737cbcdd042ba04377e8.pdf,,,http://arxiv.org/abs/1704.02166
+861a832b87b071a5d479186bbb2822f9ddbb67e4,,,,
+8697ccb156982d40e88fda7fbf4297fa5171f24d,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2011.101
+86ab027a1930276bb2c4695d65668e6704538b01,,,,
+86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6,http://pdfs.semanticscholar.org/86e1/bdbfd13b9ed137e4c4b8b459a3980eb257f6.pdf,,,https://arxiv.org/pdf/1705.06950v1.pdf
+86b6de59f17187f6c238853810e01596d37f63cd,http://pdfs.semanticscholar.org/86b6/de59f17187f6c238853810e01596d37f63cd.pdf,,,http://thesai.org/Downloads/Volume7No3/Paper_14-Competitive_Representation_Based_Classification_Using_Facial_Noise_Detection.pdf
+86b105c3619a433b6f9632adcf9b253ff98aee87,http://www.cecs.uci.edu/~papers/icme06/pdfs/0001013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2006.262705
+862f2d84b4230d64ddb3e48967ad417089f2c291,http://www.umiacs.umd.edu/users/pvishalm/Conference_pub/ICIP14_landmarks.pdf,,https://doi.org/10.1109/ICIP.2014.7025147,http://www.umiacs.umd.edu/~pvishalm/Conference_pub/ICIP14_landmarks.pdf
+86d1fbaecd02b44309383830e6d985dc09e786aa,http://feng-xu.com/papers/ExpressionSynthesis_CVPR.pdf,,,http://media.au.tsinghua.edu.cn/kaili/CVPR2012.pdf
+86881ce8f80adea201304ca6bb3aa413d94e9dd0,,,https://doi.org/10.1109/ICIP.2017.8297133,
+86a8b3d0f753cb49ac3250fa14d277983e30a4b7,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W09/papers/Zhang_Exploiting_Unlabeled_Ages_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.75
+86b51bd0c80eecd6acce9fc538f284b2ded5bcdd,http://pdfs.semanticscholar.org/86b5/1bd0c80eecd6acce9fc538f284b2ded5bcdd.pdf,,,https://arxiv.org/pdf/1703.09911v1.pdf
+8699268ee81a7472a0807c1d3b1db0d0ab05f40d,http://pdfs.semanticscholar.org/8699/268ee81a7472a0807c1d3b1db0d0ab05f40d.pdf,,,https://arxiv.org/pdf/1706.03729v2.pdf
+86f8e6310d114bb24deb971e8bc7089df6ac3b57,http://ftp.ncbi.nlm.nih.gov/pub/pmc/84/69/40101_2015_Article_46.PMC4350291.pdf,,,https://jphysiolanthropol.biomedcentral.com/track/pdf/10.1186/s40101-015-0046-6?site=jphysiolanthropol.biomedcentral.com
+86ec0e331dd494533e16dd638661463b7e03edb7,,,,
+8605e8f5d84b8325b1a81d968c296a5a5d741f31,,,https://doi.org/10.1016/j.patcog.2017.04.010,
+86bbead2fb5b77ceff7994be9474648672f244d9,,,,
+72282287f25c5419dc6fd9e89ec9d86d660dc0b5,https://arxiv.org/pdf/1609.07495v1.pdf,,https://doi.org/10.1109/ICDM.2016.0156,http://www.vision.caltech.edu/~mronchi/papers/ICDM16_RotationInvariantMovemeDiscovery_PAPER_LONG.pdf
+7212e033b37efa9c96ee51cb810c303249ab21e4,,,,
+72a87f509817b3369f2accd7024b2e4b30a1f588,http://hal.inria.fr/docs/00/75/05/89/PDF/paa2010last.pdf,,https://doi.org/10.1007/s10044-011-0212-3,https://hal.archives-ouvertes.fr/hal-00750589/document
+72a00953f3f60a792de019a948174bf680cd6c9f,http://pdfs.semanticscholar.org/72a0/0953f3f60a792de019a948174bf680cd6c9f.pdf,,https://doi.org/10.1007/s11222-006-9004-9,http://www.researchgate.net/profile/Yanxi_Liu/publication/220286456_Understanding_the_role_of_facial_asymmetry_in_human_face_identification/links/0deec51794a979fdd6000000.pdf
+726b8aba2095eef076922351e9d3a724bb71cb51,http://pdfs.semanticscholar.org/d06b/cb2d46342ee011e652990edf290a0876b502.pdf,,,https://arxiv.org/pdf/1708.00980v1.pdf
+72345fed8d068229e50f9ea694c4babfd23244a0,,,,http://doi.acm.org/10.1145/2632856.2632937
+727ecf8c839c9b5f7b6c7afffe219e8b270e7e15,http://pdfs.semanticscholar.org/727e/cf8c839c9b5f7b6c7afffe219e8b270e7e15.pdf,,,http://infolab.stanford.edu/~mor/research/naamanthesis.pdf
+72a5e181ee8f71b0b153369963ff9bfec1c6b5b0,http://pdfs.semanticscholar.org/72a5/e181ee8f71b0b153369963ff9bfec1c6b5b0.pdf,,https://doi.org/10.1007/978-3-642-21227-7_53,https://www.researchgate.net/profile/Guoying_Zhao/publication/220809280_Expression_Recognition_in_Videos_Using_a_Weighted_Component-Based_Feature_Descriptor/links/02e7e528efcc42d041000000.pdf
+72e603083c8b1cfa09200eb333927e8ea848fbc8,,,,
+72ecaff8b57023f9fbf8b5b2588f3c7019010ca7,http://pdfs.semanticscholar.org/72ec/aff8b57023f9fbf8b5b2588f3c7019010ca7.pdf,,,https://arxiv.org/pdf/1710.05279v1.pdf
+72ffcc5b654b2468b9eff761279b29164f1df5d9,,,,
+72a03f06fcbf6af92fb3002e2fd9d43e75fd113e,,,,
+729dbe38538fbf2664bc79847601f00593474b05,http://pdfs.semanticscholar.org/729d/be38538fbf2664bc79847601f00593474b05.pdf,,,http://ilab.usc.edu/publications/doc/Borji_etal14jov.pdf
+729a9d35bc291cc7117b924219bef89a864ce62c,http://pdfs.semanticscholar.org/729a/9d35bc291cc7117b924219bef89a864ce62c.pdf,,,https://arxiv.org/pdf/1801.03127v1.pdf
+72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_094_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_094_ext.pdf
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,http://pdfs.semanticscholar.org/7216/0aae43cd9b2c3aae5574acc0d00ea0993b9e.pdf,,,http://www.ijcsi.org/papers/IJCSI-11-4-1-45-51.pdf
+72c0c8deb9ea6f59fde4f5043bff67366b86bd66,http://pdfs.semanticscholar.org/72c0/c8deb9ea6f59fde4f5043bff67366b86bd66.pdf,,,http://www.umiacs.umd.edu/~soma/pdf/FacialAging_survey.pdf
+721119b5f15ccccfd711571fb5a676d622d231bf,,,,
+728b1b2a86a7ffda402e7ec1a97cd1988dcde868,,,https://doi.org/10.1016/j.procs.2016.04.083,
+721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,http://pdfs.semanticscholar.org/74f1/9d0986c9d39aabb359abaa2a87a248a48deb.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper086/paper086.pdf
+72a3bb0fb490355a926c5a689e12268bff9ff842,,,https://doi.org/10.1109/ICIP.2006.312862,
+7234468db46b37e2027ab2978c67b48b8581f796,,,https://doi.org/10.1109/ACPR.2015.7486464,
+72f4aaf7e2e3f215cd8762ce283988220f182a5b,http://pdfs.semanticscholar.org/72f4/aaf7e2e3f215cd8762ce283988220f182a5b.pdf,,,http://journals.tubitak.gov.tr/elektrik/issues/elk-10-18-4/elk-18-4-13-0906-48.pdf
+72a55554b816b66a865a1ec1b4a5b17b5d3ba784,http://vislab.ucr.edu/Biometrics16/CVPRW_Vizilter.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.25
+72450d7e5cbe79b05839c30a4f0284af5aa80053,http://pdfs.semanticscholar.org/7245/0d7e5cbe79b05839c30a4f0284af5aa80053.pdf,,https://doi.org/10.1007/978-3-642-10331-5_68,http://www.cvc.uab.es/~bogdan/Publications/raducanu_ISVC2009.pdf
+72bf9c5787d7ff56a1697a3389f11d14654b4fcf,http://pdfs.semanticscholar.org/7910/a98a1fe9f4bec4c0dc4dc3476e9405b1930d.pdf,,,http://www.umiacs.umd.edu/users/wyzhao/IJCV_sfsface.ps.gz
+72119cb98f9502ec639de317dccea57fd4b9ee55,,,https://doi.org/10.1109/GlobalSIP.2015.7418230,
+72d110df78a7931f5f2beaa29f1eb528cf0995d3,,,https://doi.org/10.1007/s11517-015-1346-z,
+445461a34adc4bcdccac2e3c374f5921c93750f8,https://arxiv.org/pdf/1306.1913v1.pdf,,,http://arxiv.org/abs/1306.1913
+4414a328466db1e8ab9651bf4e0f9f1fe1a163e4,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2010/Contents/papers/1569290719.pdf,http://ieeexplore.ieee.org/document/7096350/,,
+442f09ddb5bb7ba4e824c0795e37cad754967208,http://pdfs.semanticscholar.org/8c29/513c2621c26ac8491bb763674db475fe58c6.pdf,,,http://www.seas.upenn.edu/~taskar/pubs/partial_labels_jmlr11.pdf
+440b94b1624ca516b07e72ea8b3488072adc5e26,,,https://doi.org/10.1109/ITSC.2015.153,
+44b827df6c433ca49bcf44f9f3ebfdc0774ee952,,,https://doi.org/10.1109/LSP.2017.2726105,
+443acd268126c777bc7194e185bec0984c3d1ae7,https://eprints.soton.ac.uk/402985/1/icpr-16.pdf,,https://doi.org/10.1109/ICPR.2016.7900105,
+442d3aeca486de787de10bc41bfeb0b42c81803f,http://pdfs.semanticscholar.org/442d/3aeca486de787de10bc41bfeb0b42c81803f.pdf,,,http://www.murase.m.is.nagoya-u.ac.jp/~ide/res/paper/E08-conference-ttakahashi-1pub.pdf
+44f23600671473c3ddb65a308ca97657bc92e527,http://arxiv.org/pdf/1604.06573v2.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Feichtenhofer_Convolutional_Two-Stream_Network_CVPR_2016_paper.pdf
+4439746eeb7c7328beba3f3ef47dc67fbb52bcb3,http://pdfs.semanticscholar.org/4439/746eeb7c7328beba3f3ef47dc67fbb52bcb3.pdf,,,http://ijssst.info/Vol-12/No-4/paper1.pdf
+44c278cbecd6c1123bfa5df92e0bda156895fa48,,,https://doi.org/10.1109/ICPR.2014.316,
+446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03,http://www.isir.upmc.fr/files/2014ACTI3172.pdf,,https://doi.org/10.1109/ICPR.2014.436,
+4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f,http://pdfs.semanticscholar.org/4467/a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f.pdf,,,http://www.sfu.ca/~smuralid/papers/activity_sportlogiq.pdf
+44d2ab6b7166274cc13b52d8f73a36839ca0d4a8,,,,
+44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,http://pdfs.semanticscholar.org/44f4/8a4b1ef94a9104d063e53bf88a69ff0f55f3.pdf,,,http://arxiv.org/abs/1611.08107
+44a3ec27f92c344a15deb8e5dc3a5b3797505c06,http://pdfs.semanticscholar.org/44a3/ec27f92c344a15deb8e5dc3a5b3797505c06.pdf,,,http://people.cs.umass.edu/~smaji/papers/chapter-attributes-preprint.pdf
+4490b8d8ab2ac693c670751d4c2bff0a56d7393d,,,https://doi.org/10.1007/s11063-017-9648-9,
+44aeda8493ad0d44ca1304756cc0126a2720f07b,http://pdfs.semanticscholar.org/afbb/c0ea429ba0f5cf7790d23fc40d7d5342a53c.pdf,,,http://people.cs.pitt.edu/~flying/File/Comp2005.pdf
+4492914df003d690e5ff3cb3e0e0509a51f7753e,,,,http://doi.ieeecomputersociety.org/10.1109/ICCI-CC.2014.6921443
+449b1b91029e84dab14b80852e35387a9275870e,https://pdfs.semanticscholar.org/608c/da0c14c3d134d9d18dd38f9682b23c31d367.pdf,http://ieeexplore.ieee.org/document/6411899/,,http://www.apsipa.org/proceedings_2012/papers/278.pdf
+44078d0daed8b13114cffb15b368acc467f96351,http://arxiv.org/pdf/1604.05417v1.pdf,,https://doi.org/10.1109/BTAS.2016.7791205,https://arxiv.org/pdf/1604.05417v1.pdf
+44834929e56f2a8f16844fde519039d647006216,,,,http://doi.acm.org/10.1145/1460096.1460150
+44c9b5c55ca27a4313daf3760a3f24a440ce17ad,http://pdfs.semanticscholar.org/44c9/b5c55ca27a4313daf3760a3f24a440ce17ad.pdf,,,https://arxiv.org/pdf/1711.10143v1.pdf
+44389d8e20cf9f1a8453f4ba033e03cff9bdfcbb,,,https://doi.org/10.1016/j.neucom.2017.07.052,
+44dd150b9020b2253107b4a4af3644f0a51718a3,http://www.andrew.cmu.edu/user/kseshadr/TIFS_2012_Paper_Final_Submission.pdf,,https://doi.org/10.1109/TIFS.2012.2195175,http://www.andrew.cmu.edu/user/kseshadr/TIFS_2012_Paper_IEEE.pdf
+447d8893a4bdc29fa1214e53499ffe67b28a6db5,http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf,,,http://hal.archives-ouvertes.fr/docs/00/34/33/78/PDF/MaximeBerthe_Memoire_de_these.pdf
+44f65e3304bdde4be04823fd7ca770c1c05c2cef,http://pdfs.semanticscholar.org/44f6/5e3304bdde4be04823fd7ca770c1c05c2cef.pdf,,https://doi.org/10.1007/s11760-009-0125-4,http://speech.iiit.ac.in/svlpubs/article/SaoAnilYegna2009.pdf
+44fbbaea6271e47ace47c27701ed05e15da8f7cf,http://pdfs.semanticscholar.org/44fb/baea6271e47ace47c27701ed05e15da8f7cf.pdf,,,http://www.mariskakret.com/wp-content/uploads/2015/10/Kret_pupilmimicry_PsychScie_20151.pdf
+44fb4dcf88eb482e2ab79fd4540caf941613b970,http://www.researchgate.net/profile/Masashi_Sugiyama/publication/220930547_Perceived_Age_Estimation_under_Lighting_Condition_Change_by_Covariate_Shift_Adaptation/links/0fcfd5122b4d406edd000000.pdf,,,http://www.ms.k.u-tokyo.ac.jp/2010/ICPR2010a.pdf
+44eb4d128b60485377e74ffb5facc0bf4ddeb022,https://pdfs.semanticscholar.org/44eb/4d128b60485377e74ffb5facc0bf4ddeb022.pdf,,https://doi.org/10.1109/ISSNIP.2014.6827690,http://www.serc.iisc.ernet.in/~venky/Papers/Emotion_Recognition_ISSNIP14.pdf
+448ed201f6fceaa6533d88b0b29da3f36235e131,http://pdfs.semanticscholar.org/aa6a/0b92c60187c7fa9923b1c8433ec99a495df7.pdf,,https://doi.org/10.1016/j.cviu.2014.12.005,http://arxiv.org/abs/1710.07831
+445e3ba7eabcc55b5d24f951b029196b47830684,,,https://doi.org/10.1109/TMM.2016.2591508,
+441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Sundararajan_Head_Pose_Estimation_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301354
+447a5e1caf847952d2bb526ab2fb75898466d1bc,http://pdfs.semanticscholar.org/447a/5e1caf847952d2bb526ab2fb75898466d1bc.pdf,,,https://openreview.net/pdf?id=SJzmJEq6W
+449808b7aa9ee6b13ad1a21d9f058efaa400639a,http://www.jdl.ac.cn/doc/2008/Recovering%203D%20Facial%20Shape%20via%20Coupled%202D-3D%20Space%20Learning.pdf,,https://doi.org/10.1109/AFGR.2008.4813403,
+2a7bca56e2539c8cf1ae4e9da521879b7951872d,http://pdfs.semanticscholar.org/2a7b/ca56e2539c8cf1ae4e9da521879b7951872d.pdf,,,http://ttic.uchicago.edu/~argyriou/papers/ortho-aistats.pdf
+2a65d7d5336b377b7f5a98855767dd48fa516c0f,https://mug.ee.auth.gr/wp-content/uploads/fsLDA.pdf,,,http://avg.is.tuebingen.mpg.de/uploads_file/attachment/attachment/377/poster.pdf
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,http://yugangjiang.info/publication/JCST-nameface.pdf,,https://doi.org/10.1007/s11390-014-1468-z,http://vireo.cs.cityu.edu.hk/papers/Chen-JCST2014.pdf
+2aaa6969c03f435b3ea8431574a91a0843bd320b,http://pdfs.semanticscholar.org/2aaa/6969c03f435b3ea8431574a91a0843bd320b.pdf,,,http://www.waset.org/journals/waset/v7/v7-49.pdf
+2a92bda6dbd5cce5894f7d370d798c07fa8783f4,,,https://doi.org/10.1109/TIFS.2014.2359587,
+2af620e17d0ed67d9ccbca624250989ce372e255,http://www.alessandrobergamo.com/data/bt_cvpr12.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248040
+2a35d20b2c0a045ea84723f328321c18be6f555c,http://pdfs.semanticscholar.org/d1be/cba3c460892453939f9f3639d8beddf2a133.pdf,,,https://arxiv.org/pdf/1602.05659v3.pdf
+2ad7cef781f98fd66101fa4a78e012369d064830,http://arxiv.org/pdf/1603.05474v1.pdf,,,https://arxiv.org/pdf/1603.05474v2.pdf
+2ad29b2921aba7738c51d9025b342a0ec770c6ea,http://arxiv.org/pdf/1510.02781v1.pdf,,https://doi.org/10.1007/s11042-016-3824-1,https://arxiv.org/pdf/1510.02781v1.pdf
+2a9b398d358cf04dc608a298d36d305659e8f607,http://www.pitt.edu/~jeffcohn/biblio/MahoorFG2011.pdf,,https://doi.org/10.1109/FG.2011.5771420,http://www.engr.du.edu/mmahoor/Papers/AU_sparse_rep_f&g11.pdf
+2afde207bd6f2e5fa20f3cf81940b18cc14e7dbb,,,https://doi.org/10.1109/TIP.2013.2255300,
+2a98b850139b911df5a336d6ebf33be7819ae122,,,https://doi.org/10.1109/ICIP.2015.7350806,
+2a0efb1c17fbe78470acf01e4601a75735a805cc,http://pdfs.semanticscholar.org/2a0e/fb1c17fbe78470acf01e4601a75735a805cc.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2000.855831
+2ae2e29c3e9cc2d94a26da5730df7845de0d631b,,,https://doi.org/10.1109/TCSVT.2011.2129670,
+2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924,http://pdfs.semanticscholar.org/2a6b/ba2e81d5fb3c0fd0e6b757cf50ba7bf8e924.pdf,,,https://apps.cs.utexas.edu/apps/sites/default/files/tech_reports/StevenChenThesis.pdf
+2a6783ae51d7ee781d584ef9a3eb8ab1997d0489,,,https://doi.org/10.1109/CVPRW.2010.5543608,
+2ac21d663c25d11cda48381fb204a37a47d2a574,http://pdfs.semanticscholar.org/2ac2/1d663c25d11cda48381fb204a37a47d2a574.pdf,,https://doi.org/10.1007/978-3-642-24571-8_27,https://www.cl.cam.ac.uk/research/rainbow/emotions/pdf/ACII2011-Doctoral-2011.pdf
+2a98351aef0eec1003bd5524933aed8d3f303927,,,https://doi.org/10.1109/CIRA.2007.382901,
+2a4153655ad1169d482e22c468d67f3bc2c49f12,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Face_Alignment_Across_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.23
+2a41388040141ef6b016c100ef833a2a73ab8b42,,,https://doi.org/10.1016/j.neucom.2017.03.033,
+2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40,http://www.cl.cam.ac.uk/~pr10/publications/fg17.pdf,,,https://www.cl.cam.ac.uk/~mmam3/pub/fg17.pdf
+2ae139b247057c02cda352f6661f46f7feb38e45,http://www.iro.umontreal.ca/~memisevr/pubs/icmi_emotiw.pdf,,,http://doi.acm.org/10.1145/2522848.2531745
+2a3e19d7c54cba3805115497c69069dd5a91da65,http://pdfs.semanticscholar.org/2a3e/19d7c54cba3805115497c69069dd5a91da65.pdf,,,https://arxiv.org/pdf/1804.01176v1.pdf
+2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,http://pdfs.semanticscholar.org/2af1/9b5ff2ca428fa42ef4b85ddbb576b5d9a5cc.pdf,,https://doi.org/10.1007/978-3-642-01793-3_21,http://ccc.inaoep.mx/~mdprl/documentos/1201.2207v1.pdf
+2a79bd36c56fd1634ca0f8089fe8aa9343eb92ce,,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.104
+2a14b6d9f688714dc60876816c4b7cf763c029a9,http://tamaraberg.com/papers/wacv2016_combining.pdf,,,http://www.tamaraberg.com/papers/wacv2016_combining.pdf
+2a84f7934365f05b6707ea0ac225210f78e547af,,,https://doi.org/10.1109/ICPR.2016.7899690,
+2a88541448be2eb1b953ac2c0c54da240b47dd8a,http://pdfs.semanticscholar.org/2c44/0d01738a2fed3e3bd6520471acacb6c96e3b.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2014_5332.pdf
+2adffdffa16475ae71bb2adcf65840f01f1e53f7,,,https://doi.org/10.1049/iet-cvi.2014.0094,
+2a02355c1155f2d2e0cf7a8e197e0d0075437b19,http://pdfs.semanticscholar.org/cf2c/58a5efea263a878815e25148b1c6954a0cbe.pdf,,,http://waset.org/publications/10902/on-face-recognition-using-gabor-filters-
+2a8c9e43459c1051f5b8048a3863c7bb8121abb2,,,,
+2a171f8d14b6b8735001a11c217af9587d095848,http://openaccess.thecvf.com/content_iccv_2015/papers/Zhang_Learning_Social_Relation_ICCV_2015_paper.pdf,,,http://mmlab.ie.cuhk.edu.hk/projects/socialrelation/support/ICCV15.pdf
+2aea27352406a2066ddae5fad6f3f13afdc90be9,http://arxiv.org/pdf/1507.05699v4.pdf,,,https://pdfs.semanticscholar.org/b558/be7e182809f5404ea0fcf8a1d1d9498dc01a.pdf
+2a4984fb48c175d1e42c6460c5f00963da9f26b6,,,https://doi.org/10.1109/MIPRO.2015.7160445,
+2a0623ae989f2236f5e1fe3db25ab708f5d02955,http://pdfs.semanticscholar.org/2a06/23ae989f2236f5e1fe3db25ab708f5d02955.pdf,,,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/tena-2007.pdf
+2ad0ee93d029e790ebb50574f403a09854b65b7e,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2005.92
+2afdda6fb85732d830cea242c1ff84497cd5f3cb,http://www.iis.sinica.edu.tw/papers/song/11489-F.pdf,,https://doi.org/10.1109/ICPR.2008.4761284,http://figment.cse.usf.edu/~sfefilat/data/papers/ThBCT8.41.pdf
+2a7e6a1b2638550370a47f2f6f6e38e76fe9ac13,,,,http://doi.acm.org/10.1145/3090311
+2ff9618ea521df3c916abc88e7c85220d9f0ff06,http://pdfs.semanticscholar.org/bb08/f64565ee68e868dcab904cada9646dd5f676.pdf,,,http://orzo.union.edu/Archives/SeniorProjects/2014/CS.2014/files/leveillc/leveille_thesis_presentation_slides.pdf
+2fda461869f84a9298a0e93ef280f79b9fb76f94,https://www.cl.cam.ac.uk/research/rainbow/projects/openface/wacv2016.pdf,,,https://www.cl.cam.ac.uk/~tb346/pub/papers/wacv2016.pdf
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,http://pdfs.semanticscholar.org/6e3b/778ad384101f792284b42844518f620143aa.pdf,,,http://www.crcv.ucf.edu/papers/CRCV-TR-13-01.pdf
+2fb8d7601fc3ad637781127620104aaab5122acd,,,,
+2fdce3228d384456ea9faff108b9c6d0cf39e7c7,http://pdfs.semanticscholar.org/2fdc/e3228d384456ea9faff108b9c6d0cf39e7c7.pdf,,https://doi.org/10.1109/FG.2011.5771370,http://mplab.ucsd.edu/~marni/pubs/Littlewort_FERA_FG2011.pdf
+2f7e9b45255c9029d2ae97bbb004d6072e70fa79,http://pdfs.semanticscholar.org/2f7e/9b45255c9029d2ae97bbb004d6072e70fa79.pdf,,,https://arxiv.org/pdf/1605.08247v1.pdf
+2f53b97f0de2194d588bc7fb920b89cd7bcf7663,http://pdfs.semanticscholar.org/2f53/b97f0de2194d588bc7fb920b89cd7bcf7663.pdf,,,http://arxiv.org/abs/1511.02023
+2f16baddac6af536451b3216b02d3480fc361ef4,http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf,,https://doi.org/10.1109/CVPR.2015.7298891,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2A_057_ext.pdf
+2f489bd9bfb61a7d7165a2f05c03377a00072477,http://pdfs.semanticscholar.org/2f48/9bd9bfb61a7d7165a2f05c03377a00072477.pdf,,,http://www.bmva.org/bmvc/2014/papers/paper068/index.html
+2f2aa67c5d6dbfaf218c104184a8c807e8b29286,http://sesame.comp.nus.edu.sg/components/com_flexicontent/uploads/lekhaicon13.pdf,,https://doi.org/10.1109/ICON.2013.6782002,
+2f7aa942313b1eb12ebfab791af71d0a3830b24c,,,,
+2fe86e9c115562df2114eeedc7db1aece07a3638,,,,
+2f16459e2e24dc91b3b4cac7c6294387d4a0eacf,http://pdfs.semanticscholar.org/2f16/459e2e24dc91b3b4cac7c6294387d4a0eacf.pdf,,https://doi.org/10.1016/j.bdr.2017.06.002,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2017/triantafyllidou_nousi_tefas_big_data_and_neural_networks.pdf
+2f8ef56c1007a02cdc016219553479d6b7e097fb,,,https://doi.org/10.1007/978-3-642-14834-7_2,
+2f0b8579829b3d4efdbc03c96821e33d7cc65e1d,http://thoth.inrialpes.fr/people/mpederso/papers/cvpr14-facial.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Pedersoli_Using_a_Deformation_2014_CVPR_paper.pdf
+2f3ec6d666d7b94b63a104f92859199428b77f78,,,,
+2f59f28a1ca3130d413e8e8b59fb30d50ac020e2,http://pralab.diee.unica.it/sites/default/files/Satta_ICPR2014.pdf,,https://doi.org/10.1109/ICPR.2014.70,
+2f78e471d2ec66057b7b718fab8bfd8e5183d8f4,http://pdfs.semanticscholar.org/2f78/e471d2ec66057b7b718fab8bfd8e5183d8f4.pdf,,,http://advances.utc.sk/index.php/AEEE/article/viewFile/1116/1199
+2fd007088a75916d0bf50c493d94f950bf55c5e6,,,https://doi.org/10.1007/978-981-10-7302-1_1,
+2fda164863a06a92d3a910b96eef927269aeb730,http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf,,,http://www.eecs.berkeley.edu/Research/Projects/CS/vision/human/berg-cvpr04.pdf
+2fa057a20a2b4a4f344988fee0a49fce85b0dc33,http://next.comp.nus.edu.sg/sites/default/files/publication-attachments/eHeritage.pdf,,,http://doi.acm.org/10.1145/2502081.2502104
+2f8ef26bfecaaa102a55b752860dbb92f1a11dc6,http://pdfs.semanticscholar.org/2f8e/f26bfecaaa102a55b752860dbb92f1a11dc6.pdf,,https://doi.org/10.1007/978-3-642-10467-1_89,http://www.researchgate.net/profile/Gerard_Chollet/publication/220762911_A_Graph_Based_Approach_to_Speaker_Retrieval_in_Talk_Show_Videos_with_Transcript-Based_Supervision/links/09e4150405d7a48000000000.pdf
+2ff6f7e489ae8ff054422444a5e0604e30f3e97b,,,,
+2f43b614607163abf41dfe5d17ef6749a1b61304,,,https://doi.org/10.1109/TIFS.2014.2361479,
+2f28db98e8250cff29bc64b569801c739036e4ef,,,,
+2f5ae4d6cd240ec7bc3f8ada47030e8439125df2,http://users.eecs.northwestern.edu/~xsh835/CVPR14_ExemplarFaceDetection.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.238
+2f1b521c29ab075a0cd9bbf56ba26ee13d5e4d76,,,https://doi.org/10.1109/ACPR.2015.7486607,
+2f5b51af8053cf82ab52bbfd46b56999222ec21c,,,https://doi.org/10.1109/ICPR.2014.788,
+2f184c6e2c31d23ef083c881de36b9b9b6997ce9,http://pdfs.semanticscholar.org/2f18/4c6e2c31d23ef083c881de36b9b9b6997ce9.pdf,,https://doi.org/10.1007/978-3-642-34166-3_33,http://www.researchgate.net/profile/Roberto_DAmbrosio/publication/234062424_Polichotomies_on_Imbalanced_Domains_by_One-per-Class_Compensated_Reconstruction_Rule/links/0912f50ec10226f8fa000000.pdf
+2f348a2ad3ba390ee178d400be0f09a0479ae17b,http://www.csee.wvu.edu/~richas/ML-Papers/Gabor-Based%20Kernel%20PCA.pdf,,,http://www.cs.njit.edu/~liu/papers/tPAMI_3.pdf
+2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d,http://arxiv.org/pdf/1604.02917v1.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/gpde.pdf
+2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a,http://pdfs.semanticscholar.org/464c/21d54339c3f6e624ce026fef53b19c1edd86.pdf,,,http://www.jmlr.org/papers/volume17/15-176/15-176.pdf
+2f8183b549ec51b67f7dad717f0db6bf342c9d02,http://www.wisdom.weizmann.ac.il/~ronen/papers/Kemelmacher%20Basri%20-%203D%20Face%20Reconstruction%20from%20a%20Single%20Image%20Using%20a%20Single%20Reference%20Face%20Shape.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.63
+2f841ff062053f38725030aa1b77db903dad1efb,,,https://doi.org/10.1109/ICRA.2014.6907748,
+2facf3e85240042a02f289a0d40fee376c478d0f,,,https://doi.org/10.1109/BTAS.2010.5634544,
+2f61d91033a06dd904ff9d1765d57e5b4d7f57a6,,,https://doi.org/10.1109/ICIP.2016.7532953,
+2f160a6526ebf10773680dadaba44b006bcec2cb,,,https://doi.org/10.1016/j.neucom.2012.03.007,
+2f13dd8c82f8efb25057de1517746373e05b04c4,http://www.cfar.umd.edu/~rama/Publications/Ni_ICIP.pdf,,https://doi.org/10.1109/ICIP.2010.5652608,http://www.umiacs.umd.edu/users/rama/Publications/Ni_ICIP.pdf
+2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475,http://pdfs.semanticscholar.org/2fa1/fc116731b2b5bb97f06d2ac494cb2b2fe475.pdf,,,https://www.researchgate.net/profile/Filippo_Vella/publication/236025118_A_novel_approach_to_personal_photo_album_representation_and_management/links/54f487400cf2ba6150634dc4.pdf
+2f2406551c693d616a840719ae1e6ea448e2f5d3,http://biometrics.cse.msu.edu/Presentations/CharlesOtto_ICB13_AgeEstimationFaceImages_HumanVsMachinePerformance.pdf,,https://doi.org/10.1109/ICB.2013.6613022,http://biometrics.cse.msu.edu/Publications/Face/HanOttoJain_AgeEstimationFaceImages_HumanvsMachinePerformance_ICB13.pdf
+2f17c0514bb71e0ca20780d71ea0d50ff0da4938,,,,http://doi.acm.org/10.1145/1943403.1943490
+2f882ceaaf110046e63123b495212d7d4e99f33d,http://pdfs.semanticscholar.org/2f88/2ceaaf110046e63123b495212d7d4e99f33d.pdf,,,http://www.itr-rescue.org/pubs/upload/377_Wu2005.pdf
+2f95340b01cfa48b867f336185e89acfedfa4d92,https://www2.informatik.uni-hamburg.de/wtm/ps/Hamester_IJCNN2015.pdf,,https://doi.org/10.1109/IJCNN.2015.7280539,
+2f7fc778e3dec2300b4081ba2a1e52f669094fcd,http://pdfs.semanticscholar.org/2f7f/c778e3dec2300b4081ba2a1e52f669094fcd.pdf,,,https://arxiv.org/pdf/1704.01716v1.pdf
+2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1,http://pdfs.semanticscholar.org/f39c/e446b7c76d24cc63df7837cb3be0ee235df2.pdf,,https://doi.org/10.1007/978-3-319-69456-6_12,https://arxiv.org/pdf/1608.02833v3.pdf
+2faa09413162b0a7629db93fbb27eda5aeac54ca,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=905048,,https://doi.org/10.1109/CVPRW.2010.5543228,http://www.cs.colostate.edu/~draper/papers/beveridge_cvprw10.pdf
+2f5e057e35a97278a9d824545d7196c301072ebf,http://vision.ics.uci.edu/papers/ZhuAR_CVPR_2014/ZhuAR_CVPR_2014.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Zhu_Capturing_Long-tail_Distributions_2014_CVPR_paper.pdf
+2f04ba0f74df046b0080ca78e56898bd4847898b,https://arxiv.org/pdf/1407.4023v2.pdf,,https://doi.org/10.1109/BTAS.2014.6996284,http://www.cbsr.ia.ac.cn/users/zlei/papers/Yang-IJCB-14.pdf
+433bb1eaa3751519c2e5f17f47f8532322abbe6d,http://pdfs.semanticscholar.org/433b/b1eaa3751519c2e5f17f47f8532322abbe6d.pdf,,,http://www.cps.msu.edu/~weng/research/BioChapter.ps
+4300fa1221beb9dc81a496cd2f645c990a7ede53,http://pdfs.semanticscholar.org/da71/87e56b6da1b9c993d9a096d2f2b9d80fb14c.pdf,,https://doi.org/10.1016/j.patcog.2007.07.022,http://www.cc.gatech.edu/~hpark/papers/Article_cor_nostyle.pdf
+43010792bf5cdb536a95fba16b8841c534ded316,https://www.comp.nus.edu.sg/~tsim/documents/general-face-motion.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539971
+43bb20ccfda7b111850743a80a5929792cb031f0,http://pdfs.semanticscholar.org/43bb/20ccfda7b111850743a80a5929792cb031f0.pdf,,,http://eprints-phd.biblio.unitn.it/1168/1/dnductien_PhDThesis.pdf
+439ac8edfa1e7cbc65474cab544a5b8c4c65d5db,http://pdfs.semanticscholar.org/439a/c8edfa1e7cbc65474cab544a5b8c4c65d5db.pdf,,https://doi.org/10.1007/s11760-011-0244-6,https://www.researchgate.net/profile/Daniel_Riccio/publication/220437296_Face_authentication_with_undercontrolled_pose_and_illumination/links/53fb66060cf2dca8fffe696f.pdf
+43f6953804964037ff91a4f45d5b5d2f8edfe4d5,http://ias.cs.tum.edu/_media/spezial/bib/riaz09fit.pdf,,,http://doi.acm.org/10.1145/1838002.1838039
+439ec47725ae4a3660e509d32828599a495559bf,http://pdfs.semanticscholar.org/439e/c47725ae4a3660e509d32828599a495559bf.pdf,,,https://www.researchgate.net/profile/Miguel_Dias2/publication/277722899_Facial_Expressions_Tracking_and_Recognition_Database_Protocols_for_Systems_Validation_and_Evaluation/links/55d10c1908aee19936fda410.pdf?origin=publication_list
+43261920d2615f135d6e72b333fe55d3f2659145,,,,http://doi.acm.org/10.1145/3136273.3136301
+43e99b76ca8e31765d4571d609679a689afdc99e,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yu_Learning_Dense_Facial_ICCV_2017_paper.pdf,,,https://arxiv.org/pdf/1709.00536v1.pdf
+4377b03bbee1f2cf99950019a8d4111f8de9c34a,http://www.umiacs.umd.edu/~morariu/publications/LiSelectiveEncoderICCV15.pdf,,,http://www.cs.umd.edu/~angli/paper/selectivecode-iccv2015.pdf
+43a03cbe8b704f31046a5aba05153eb3d6de4142,http://pdfs.semanticscholar.org/9594/3329cd6922a869dd6d58ef01e9492879034c.pdf,,,http://www.ornl.gov/~webworks/cppr/y2001/pres/112191.pdf
+434bf475addfb580707208618f99c8be0c55cf95,http://pdfs.semanticscholar.org/8cea/404e8a5c4c11064923e5a6c023a0ae594a5a.pdf,,,https://arxiv.org/pdf/1509.05371v1.pdf
+43836d69f00275ba2f3d135f0ca9cf88d1209a87,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0030-7?site=ipsjcva.springeropen.com,,https://doi.org/10.1186/s41074-017-0030-7,
+4307e8f33f9e6c07c8fc2aeafc30b22836649d8c,http://pdfs.semanticscholar.org/ebff/0956c07185f7bb4e4ee5c7cc0aaa74aca05e.pdf,,https://doi.org/10.1007/978-3-642-33718-5_32,http://geometry.stanford.edu//papers/wg-semdlcva-12/wg-semdlcva-12.pdf
+435642641312364e45f4989fac0901b205c49d53,http://pdfs.semanticscholar.org/4356/42641312364e45f4989fac0901b205c49d53.pdf,,,http://www.ee.cuhk.edu.hk/~xgwang/papers/luoZLWXaaai16.pdf
+43aa40eaa59244c233f83d81f86e12eba8d74b59,http://pdfs.semanticscholar.org/43aa/40eaa59244c233f83d81f86e12eba8d74b59.pdf,,https://doi.org/10.1016/j.patrec.2014.05.017,https://udrc.eng.ed.ac.uk/sites/udrc.eng.ed.ac.uk/files/publications/Pose_invariantPRL2014_kittler.pdf
+4349f17ec319ac8b25c14c2ec8c35f374b958066,,,https://doi.org/10.1109/THMS.2017.2681425,
+43cbe3522f356fbf07b1ff0def73756391dc3454,,,https://doi.org/10.1109/WIFS.2011.6123140,
+4362368dae29cc66a47114d5ffeaf0534bf0159c,http://pdfs.semanticscholar.org/4362/368dae29cc66a47114d5ffeaf0534bf0159c.pdf,,,http://www.seekdl.org/upload/files/20130309_062502.pdf
+4350bb360797a4ade4faf616ed2ac8e27315968e,http://www.merl.com/publications/docs/TR2006-058.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.106
+4398afa0aeb5749a12772f2d81ca688066636019,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2496320
+4344ba6e33faaa616d01248368e66799548ca48b,,,https://doi.org/10.1007/s10044-015-0474-2,
+43476cbf2a109f8381b398e7a1ddd794b29a9a16,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Cao_A_Practical_Transfer_2013_ICCV_paper.pdf,,,https://www.microsoft.com/en-us/research/wp-content/uploads/2013/01/TransferLearning.pdf
+43fe03ec1acb6ea9d05d2b22eeddb2631bd30437,,,https://doi.org/10.1109/ICIP.2017.8296394,
+4353d0dcaf450743e9eddd2aeedee4d01a1be78b,http://pdfs.semanticscholar.org/4353/d0dcaf450743e9eddd2aeedee4d01a1be78b.pdf,,https://doi.org/10.5244/C.22.27,http://www.comp.leeds.ac.uk/bmvc2008/proceedings/papers/100.pdf
+43bb2b58f906262035ef61e41768375bc8d99ae3,,,https://doi.org/10.1016/j.procs.2016.04.072,
+4328933890f5a89ad0af69990926d8484f403e4b,,,,http://doi.acm.org/10.1145/2072298.2071993
+433d2d5528d1401a402f2c1db40b933c494f11ba,https://www.researchgate.net/profile/Xudong_Jiang3/publication/4248964_Face_Recognition_Based_on_Discriminant_Evaluation_in_the_Whole_Space/links/0046351ef2d1c48d55000000.pdf,,https://doi.org/10.1109/ICASSP.2007.366218,
+437a720c6f6fc1959ba95e48e487eb3767b4e508,http://pdfs.semanticscholar.org/d4f0/960c6587379ad7df7928c256776e25952c60.pdf,,,https://perso.telecom-paristech.fr/bloch/AIC/articles/BenYosef2017.pdf
+436d80cc1b52365ed7b2477c0b385b6fbbb51d3b,http://pdfs.semanticscholar.org/436d/80cc1b52365ed7b2477c0b385b6fbbb51d3b.pdf,,,https://arxiv.org/pdf/1803.10837v1.pdf
+434f1442533754b3098afd4e24abf1e3792b24db,,,https://doi.org/10.1109/CBMI.2015.7153627,
+434d6726229c0f556841fad20391c18316806f73,https://arxiv.org/pdf/1704.03114v2.pdf,,,http://arxiv.org/abs/1704.03114
+43fca653880f4e4d238c73d864e964475e4b90c8,,,,
+43b8b5eeb4869372ef896ca2d1e6010552cdc4d4,http://pdfs.semanticscholar.org/43b8/b5eeb4869372ef896ca2d1e6010552cdc4d4.pdf,,,http://arxiv.org/abs/1407.1490
+43ae4867d058453e9abce760ff0f9427789bab3a,https://infoscience.epfl.ch/record/207780/files/tnnls_graph_embedding.pdf,,https://doi.org/10.1109/TNNLS.2014.2329240,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2015/GRAPH_TNN.pdf
+43eb03f95adc0df61af2c3b12a913c725b08d4f5,,,,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2011.101
+435dc062d565ce87c6c20a5f49430eb9a4b573c4,http://pdfs.semanticscholar.org/435d/c062d565ce87c6c20a5f49430eb9a4b573c4.pdf,,,https://www.researchgate.net/profile/Masashi_Sugiyama/publication/220239806_Lighting_Condition_Adaptation_for_Perceived_Age_Estimation/links/0fcfd51196d6cec38c000000.pdf
+430c4d7ad76e51d83bbd7ec9d3f856043f054915,http://pdfs.semanticscholar.org/5176/899c80b3d4b3b8be34d35549f95bf2d55e7d.pdf,,,https://arxiv.org/pdf/1612.06795v2.pdf
+438b88fe40a6f9b5dcf08e64e27b2719940995e0,http://www.csd.uwo.ca/~olga/Courses/Fall2006/StudentPapers/ferenczMillerMalikICCV05.pdf,,,https://people.cs.umass.edu/~elm/papers/ICCV_hyperfeatures.pdf
+43af016138d541c95e9d1880413e05356fa9a323,,,,
+433a6d6d2a3ed8a6502982dccc992f91d665b9b3,http://pdfs.semanticscholar.org/433a/6d6d2a3ed8a6502982dccc992f91d665b9b3.pdf,,,http://arxiv.org/abs/1409.0602
+438e7999c937b94f0f6384dbeaa3febff6d283b6,https://arxiv.org/pdf/1705.02402v2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.262
+43776d1bfa531e66d5e9826ff5529345b792def7,http://cvrr.ucsd.edu/scmartin/presentation/DriveAnalysisByLookingIn-ITSC2015-NDS.pdf,,https://doi.org/10.1109/ITSC.2015.367,http://cvrr.ucsd.edu/publications/2015/MartinOhnbarTrivedi_ITSC2015.pdf
+43fb9efa79178cb6f481387b7c6e9b0ca3761da8,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Katti_Mixture_of_Parts_2015_CVPR_paper.pdf,,,https://arxiv.org/pdf/1412.6791v1.pdf
+43ed518e466ff13118385f4e5d039ae4d1c000fb,https://arxiv.org/pdf/1505.01350v1.pdf,,https://doi.org/10.1109/ICMLA.2015.149,http://arxiv.org/pdf/1505.01350v1.pdf
+43a4dd79bb26e3b722ac8bea20f5916c30599851,,,,
+4309faac3248663ed56a6a841cac1855e302f090,,,,
+439647914236431c858535a2354988dde042ef4d,http://eecs.qmul.ac.uk/~jason/Research/PreprintVersion/Face%20Illumination%20Normalization%20on%20Large%20and%20Small%20Scale%20Features.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/471.pdf
+439ca6ded75dffa5ddea203dde5e621dc4a88c3e,http://research.cs.rutgers.edu/~hxp1/rc_images/hai_facetrack_icpr2016.pdf,,https://doi.org/10.1109/ICPR.2016.7899906,
+8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4,http://www.apsipa.org/proceedings_2013/papers/280_automatic-facial-hsu-2931731.pdf,,https://doi.org/10.1109/APSIPA.2013.6694238,
+88780bd55615c58d9bacc4d66fc2198e603a1714,,,https://doi.org/10.1109/EMBC.2016.7590730,
+88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320,http://pdfs.semanticscholar.org/88c6/d4b73bd36e7b5a72f3c61536c8c93f8d2320.pdf,,,http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-81.pdf
+88ad82e6f2264f75f7783232ba9185a2f931a5d1,http://pdfs.semanticscholar.org/88ad/82e6f2264f75f7783232ba9185a2f931a5d1.pdf,,,https://arxiv.org/pdf/1802.08784v1.pdf
+8886b21f97c114a23b24dc7025bbf42885adc3a7,http://researchprofiles.herts.ac.uk/portal/files/10195320/UH_eval_deid_face_final.pdf,,https://doi.org/10.1109/MIPRO.2016.7522350,
+8816f93e46a2c47e02d82294f94aa83f95ac379b,,,,
+8879083463a471898ff9ed9403b84db277be5bf6,,,https://doi.org/10.1016/j.patcog.2016.08.031,
+889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_face.pdf,,https://doi.org/10.1109/TIFS.2007.897247,http://vc.cs.nthu.edu.tw/home/paper/codfiles/clhsu/200706211216/Using_Support_Vector_Machines_to_Enhance_the_Performance.pdf
+88f7a3d6f0521803ca59fde45601e94c3a34a403,http://pdfs.semanticscholar.org/88f7/a3d6f0521803ca59fde45601e94c3a34a403.pdf,,https://doi.org/10.1007/978-3-319-10590-1_50,http://www-scf.usc.edu/~chensun/data/SunNevatia_ECCV14.pdf
+8812aef6bdac056b00525f0642702ecf8d57790b,http://pdfs.semanticscholar.org/8812/aef6bdac056b00525f0642702ecf8d57790b.pdf,,,http://ias.in.tum.de/_media/spezial/bib/riaz09acii.pdf
+881066ec43bcf7476479a4146568414e419da804,http://pdfs.semanticscholar.org/8810/66ec43bcf7476479a4146568414e419da804.pdf,,https://doi.org/10.1007/978-3-319-45886-1_20,https://arxiv.org/pdf/1610.05613v1.pdf
+884a9ce87d4d2338cb97bf4c8df3cdb079a87d5e,,,https://doi.org/10.1109/SMC.2016.7844717,
+8879fed9f8f51a4c0734af22c5632cf6e9b07689,,,,
+8813368c6c14552539137aba2b6f8c55f561b75f,https://arxiv.org/pdf/1607.05427v1.pdf,,,http://arxiv.org/pdf/1607.05427v1.pdf
+88e2574af83db7281c2064e5194c7d5dfa649846,http://pdfs.semanticscholar.org/88e2/574af83db7281c2064e5194c7d5dfa649846.pdf,,https://doi.org/10.1155/2017/4579398,
+88ed558bff3600f5354963d1abe762309f66111e,,,https://doi.org/10.1109/TIFS.2015.2393553,
+88bef50410cea3c749c61ed68808fcff84840c37,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiropoulos2011sparse.pdf,,https://doi.org/10.1109/CVPRW.2011.5981809,http://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiropoulos2011sparse.pdf
+88399c7fa890f1252178cd5e4979971509bd904f,,,https://doi.org/10.1142/S0219878906000915,
+883006c0f76cf348a5f8339bfcb649a3e46e2690,http://mplab.ucsd.edu/~marni/pubs/Sikka_FG2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553762
+88850b73449973a34fefe491f8836293fc208580,http://pdfs.semanticscholar.org/8885/0b73449973a34fefe491f8836293fc208580.pdf,,,http://www.ijaret.org/2.1/XBeats-An%20Emotion%20Based%20Music%20Player.pdf
+8875dcf2836315839741fd6944f249263408c27f,,,,
+8820d1d3fa73cde623662d92ecf2e3faf1e3f328,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w2/papers/Victor_Continuous_Video_to_CVPR_2017_paper.pdf,,,https://arxiv.org/pdf/1705.09894v1.pdf
+880be65e233d4302744e2154b2ef172291ee9779,,,,
+88f2952535df5859c8f60026f08b71976f8e19ec,http://pdfs.semanticscholar.org/88f2/952535df5859c8f60026f08b71976f8e19ec.pdf,,,http://www.inase.org/library/2015/barcelona/bypaper/ELECTR/ELECTR-12.pdf
+8845c03bee88fdd2f400ed2bddba038366c82abe,,,,http://doi.ieeecomputersociety.org/10.1109/TCBB.2011.135
+8862a573a42bbaedd392e9e634c1ccbfd177a01d,https://arxiv.org/pdf/1605.06764v1.pdf,,https://doi.org/10.1109/LSP.2016.2643284,http://arxiv.org/pdf/1605.06764v1.pdf
+8882d39edae556a351b6445e7324ec2c473cadb1,,,https://doi.org/10.1109/TIP.2017.2755766,
+88c21e06ed44da518a7e346fce416efedc771704,,,https://doi.org/10.1109/ICIP.2015.7351455,
+887b7676a4efde616d13f38fcbfe322a791d1413,http://pdfs.semanticscholar.org/b4a0/cff84c35f75bcdb7aec3a0b1395edd15189b.pdf,,,http://arxiv.org/abs/1503.01532
+8878871ec2763f912102eeaff4b5a2febfc22fbe,http://www.ee.columbia.edu/~wliu/TIP15_action.pdf,,https://doi.org/10.1109/TIP.2015.2456412,http://vireo.cs.cityu.edu.hk/papers/TIP-Action-Motion.pdf
+8855d6161d7e5b35f6c59e15b94db9fa5bbf2912,http://pdfs.semanticscholar.org/8855/d6161d7e5b35f6c59e15b94db9fa5bbf2912.pdf,,,https://macsphere.mcmaster.ca/bitstream/11375/11955/1/fulltext.pdf
+88d63a0cc0b8a5303bdef286d6df118bb1d44d26,,,,
+88bee9733e96958444dc9e6bef191baba4fa6efa,http://homepages.dcc.ufmg.br/~william/papers/paper_2014_SIBGRAPI.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2014.23
+888581e88c1cbfb8e905c317c6944b6ac2d4557c,,,,
+88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002697.pdf,,https://doi.org/10.1109/ICASSP.2016.7472167,
+887745c282edf9af40d38425d5fdc9b3fe139c08,https://arxiv.org/pdf/1407.2987v1.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Golge_FAME_Face_Association_2015_CVPR_paper.pdf
+9f5e22fbc22e1b0a61bcd75202d299232e68de5d,,,https://doi.org/10.1109/IJCNN.2016.7727391,
+9fab78015e6e91ba7241a923222acd6c576c6e27,,,,http://doi.ieeecomputersociety.org/10.1109/ICSS.2016.10
+9f8ebf149aed8a0eda5c3375c9947c6b26eb7873,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp21-wang.pdf,,,http://www2013.org/companion/p317.pdf
+9f6d04ce617d24c8001a9a31f11a594bd6fe3510,http://pdfs.semanticscholar.org/9f6d/04ce617d24c8001a9a31f11a594bd6fe3510.pdf,,,https://sites.ualberta.ca/~efujiwar/PAID_ArndtFujiwara12.pdf
+9f499948121abb47b31ca904030243e924585d5f,http://pdfs.semanticscholar.org/9f49/9948121abb47b31ca904030243e924585d5f.pdf,,,http://arxiv.org/abs/1607.06416
+9f3c9e41f46df9c94d714b1f080dafad6b4de1de,,,https://doi.org/10.1109/ICT.2017.7998260,
+9f428db0d3cf26b9b929dd333a0445bcc7514cdf,,,https://doi.org/10.1016/j.cviu.2010.11.015,
+9fc04a13eef99851136eadff52e98eb9caac919d,http://pdfs.semanticscholar.org/9fc0/4a13eef99851136eadff52e98eb9caac919d.pdf,,,http://www.andrew.cmu.edu/user/sjayasur/camera-wax.pdf
+9f4078773c8ea3f37951bf617dbce1d4b3795839,http://pdfs.semanticscholar.org/9f40/78773c8ea3f37951bf617dbce1d4b3795839.pdf,,,https://www.ri.cmu.edu/wp-content/uploads/2017/05/Masters_Thesis.pdf
+9fb701dd40e35a6abc973b6d89a455de45dd8616,,,,
+9f65319b8a33c8ec11da2f034731d928bf92e29d,http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf,,,https://dds.cct.lsu.edu/ddslab/pdf/gallo2018.pdf
+9fd1b8abbad25cb38f0c009288fb5db0fc862db6,,,https://doi.org/10.1109/ICASSP.2003.1199147,
+9fbcf40b0649c03ba0f38f940c34e7e6c9e04c03,,,https://doi.org/10.1007/s10044-006-0033-y,
+9fd8d24a9db7cbcdf607994051d89667e95d7186,,,,
+9fa1be81d31fba07a1bde0275b9d35c528f4d0b8,http://pdfs.semanticscholar.org/9fa1/be81d31fba07a1bde0275b9d35c528f4d0b8.pdf,,,http://nichol.as/papers/thesis.pdf
+9f094341bea610a10346f072bf865cb550a1f1c1,http://zhiweizhu.com/papers/FIVR_MobileDevice_2009.pdf,,https://doi.org/10.1109/WACV.2009.5403087,
+9f4f890f74ac91bdc4323e061502331945474b90,,,,
+9f49013657cbce384df9b16a2a17293bc4c9d967,,,,
+6b44543571fe69f088be577d0c383ffc65eceb2a,,,,http://doi.ieeecomputersociety.org/10.1109/EST.2012.24
+6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9,http://pdfs.semanticscholar.org/6b9a/a288ce7740ec5ce9826c66d059ddcfd8dba9.pdf,,https://doi.org/10.1016/j.image.2017.08.012,http://www.bnusei.net/wp-content/uploads/2017/11/BNU-LSVED-2.pdf
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553724.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553724
+6b0a2f9ab9b134d66a325525ea5d90ad546fe2b7,,,https://doi.org/10.1109/IJCNN.2016.7727803,
+6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Afshar_Facial_Expression_Recognition_CVPR_2016_paper.pdf,,,https://www.cmpe.boun.edu.tr/~salah/afshar06cvprw.pdf
+6b06b79ad1f1907e21380083b976b24a89a0f743,,,,
+6bf88e29ac04d72297e6f8f2971c5b8579786e7f,,,,
+6bca0d1f46b0f7546ad4846e89b6b842d538ee4e,http://pdfs.semanticscholar.org/6bca/0d1f46b0f7546ad4846e89b6b842d538ee4e.pdf,,,http://www3.nd.edu/~flynn/papers/DeborahThomasDissertation.pdf
+6b089627a4ea24bff193611e68390d1a4c3b3644,http://publications.idiap.ch/downloads/reports/2012/Wallace_Idiap-RR-03-2012.pdf,,https://doi.org/10.1109/TIFS.2012.2184095,
+6be0ab66c31023762e26d309a4a9d0096f72a7f0,http://pdfs.semanticscholar.org/6be0/ab66c31023762e26d309a4a9d0096f72a7f0.pdf,,,https://arxiv.org/pdf/1712.07732v1.pdf
+6bacd4347f67ec60a69e24ed7cc0ac8073004e6f,,,https://doi.org/10.1109/VCIP.2014.7051528,
+6bcee7dba5ed67b3f9926d2ae49f9a54dee64643,http://pdfs.semanticscholar.org/6bce/e7dba5ed67b3f9926d2ae49f9a54dee64643.pdf,,https://doi.org/10.1007/3-540-44887-X_6,https://www3.nd.edu/~kwb/Flynn_Bowyer_Phillips_AVBPA_2003.pdf
+6b18628cc8829c3bf851ea3ee3bcff8543391819,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20151221082702_2.pdf,,https://doi.org/10.1631/FITEE.1500085,
+6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6,http://pdfs.semanticscholar.org/6b7f/7817b2e5a7e7d409af2254a903fc0d6e02b6.pdf,,https://doi.org/10.1142/S021800140900717X,http://www.researchgate.net/profile/Ioan_Nafornita/publication/220360447_Feature_Extraction_through_Cross-Phase_Congruency_for_Facial_Expression_Analysis/links/004635260b2a4e24f8000000.pdf
+6bb95a0f3668cd36407c85899b71c9fe44bf9573,http://pdfs.semanticscholar.org/6bb9/5a0f3668cd36407c85899b71c9fe44bf9573.pdf,,,http://arxiv.org/pdf/1602.03935v1.pdf
+6ba6045e4b404c44f9b4dfce2d946019f0e85a72,,,https://doi.org/10.1109/ICPR.2016.7899962,
+6b8329730b2e13178a577b878631735a1cd58a71,,,,http://doi.ieeecomputersociety.org/10.1109/FiCloud.2015.78
+6b1b43d58faed7b457b1d4e8c16f5f7e7d819239,http://pdfs.semanticscholar.org/6b1b/43d58faed7b457b1d4e8c16f5f7e7d819239.pdf,,https://doi.org/10.1016/j.neucom.2015.06.079,https://pdfs.semanticscholar.org/6b1b/43d58faed7b457b1d4e8c16f5f7e7d819239.pdf
+6b14d2554d653b0c2fd0537535e3411864979a37,,,,
+6bb0425baac448297fbd29a00e9c9b9926ce8870,http://pdfs.semanticscholar.org/6bb0/425baac448297fbd29a00e9c9b9926ce8870.pdf,,,https://www.researchgate.net/profile/Zahir_Hussain3/publication/228525389_Facial_expression_recognition_using_log-Gabor_filters_and_local_binary_pattern_operators/links/0c96053a4a3f088f9b000000.pdf
+6b35b15ceba2f26cf949f23347ec95bbbf7bed64,http://pdfs.semanticscholar.org/6b35/b15ceba2f26cf949f23347ec95bbbf7bed64.pdf,,https://doi.org/10.1016/j.imavis.2015.06.010,http://lhncbc.nlm.nih.gov/system/files/pub9220.pdf
+6b6493551017819a3d1f12bbf922a8a8c8cc2a03,http://pdfs.semanticscholar.org/6b64/93551017819a3d1f12bbf922a8a8c8cc2a03.pdf,,https://doi.org/10.1007/978-3-642-01793-3_4,http://cvhci.ira.uka.de/download/publications/ICB_2009_208.pdf
+6b17b219bd1a718b5cd63427032d93c603fcf24f,http://pdfs.semanticscholar.org/6b17/b219bd1a718b5cd63427032d93c603fcf24f.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1243&context=lti
+6bb630dfa797168e6627d972560c3d438f71ea99,http://arxiv.org/pdf/1609.03056v1.pdf,,https://doi.org/10.1109/TMM.2017.2666540,https://arxiv.org/pdf/1609.03056v2.pdf
+0729628db4bb99f1f70dd6cb2353d7b76a9fce47,http://pdfs.semanticscholar.org/f02a/dc21a307d32c1145f4ade65504b016b0faac.pdf,,,http://www.nip-lr.info/V11N04-06/V11N04P4-91-100.pdf
+0728f788107122d76dfafa4fb0c45c20dcf523ca,http://arxiv.org/pdf/1505.04427v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.152
+07c90e85ac0f74b977babe245dea0f0abcf177e3,http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf,,https://doi.org/10.1007/3-540-44887-X_2,http://dagwood.vsam.ri.cmu.edu/ralph/Publications/avbpa03.pdf
+07ea3dd22d1ecc013b6649c9846d67f2bf697008,http://pdfs.semanticscholar.org/07ea/3dd22d1ecc013b6649c9846d67f2bf697008.pdf,,,http://ai.stanford.edu/~vigneshr/thesis/vignesh_ramanathan_dissertation_v3.pdf
+071099a4c3eed464388c8d1bff7b0538c7322422,http://arxiv.org/pdf/1601.02487v1.pdf,,https://doi.org/10.1109/ICIP.2015.7351443,http://arxiv.org/abs/1601.02487
+07dc9f3b34284cc915dea7575f40ef0c04338126,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2537337
+070c8ee3876c06f9a65693e536d61097ace40417,,,https://doi.org/10.1109/ACPR.2013.161,
+07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1,http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf,,https://doi.org/10.1109/BTAS.2013.6712756,http://vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf
+0733ec1953f6c774eb3a723618e1268586b46359,,,https://doi.org/10.1109/TMM.2006.870737,
+076d3fc800d882445c11b9af466c3af7d2afc64f,http://slsp.kaist.ac.kr/paperdata/Face_attribute_classification.pdf,,https://doi.org/10.1109/ICIP.2015.7351743,
+07ac2e342db42589322b28ef291c2702f4a793a8,http://www.cs.illinois.edu/homes/dhoiem/publications/cvpr2009_santosh_context.pdf,,,http://www.ri.cmu.edu/pub_files/2009/6/0987.pdf
+0750c796467b6ef60b0caff5fb199337d54d431e,,,https://doi.org/10.1109/ICMLC.2016.7873015,
+071af21377cc76d5c05100a745fb13cb2e40500f,http://pdfs.semanticscholar.org/071a/f21377cc76d5c05100a745fb13cb2e40500f.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=2174&context=robotics
+0701b01bc99bf3b64050690ceadb58a8800e81ed,,,https://doi.org/10.1007/s11042-015-3107-2,
+070ab604c3ced2c23cce2259043446c5ee342fd6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/24-p75.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383399
+0786a6d5ce6db8a68cef05bb5f5b84ec1b0c2cde,http://vipl.ict.ac.cn/sites/default/files/papers/files/2008_ACMMM_cxliu_Naming%20Faces%20in%20Broadcast%20News%20Video%20by%20Image%20Google.pdf,,,http://doi.acm.org/10.1145/1459359.1459468
+071135dfb342bff884ddb9a4d8af0e70055c22a1,http://pdfs.semanticscholar.org/0711/35dfb342bff884ddb9a4d8af0e70055c22a1.pdf,,,https://arxiv.org/pdf/1711.08200v1.pdf
+0754e769eb613fd3968b6e267a301728f52358be,http://www.umiacs.umd.edu/~cteo/public-shared/ICRA2012_ActionObjects_preprint.pdf,,https://doi.org/10.1109/ICRA.2012.6224589,http://www.umiacs.umd.edu/~yzyang/talk/ICRA12_ActionObjects.pdf
+0773c320713dae62848fceac5a0ac346ba224eca,http://eudl.eu/pdf/10.4108/icst.intetain.2015.259444,http://ieeexplore.ieee.org/document/7325479/,https://doi.org/10.4108/icst.intetain.2015.259444,
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126510
+079edd5cf7968ac4759dfe72af2042cf6e990efc,http://pdfs.semanticscholar.org/079e/dd5cf7968ac4759dfe72af2042cf6e990efc.pdf,,,https://arxiv.org/pdf/1511.06432v2.pdf
+072db5ba5b375d439ba6dbb6427c63cd7da6e940,http://users.ece.cmu.edu/~juefeix/tip_2014_felix.pdf,,https://doi.org/10.1109/TIP.2014.2329460,
+0744af11a025e9c072ef6ad102af208e79cc6f44,https://www.researchgate.net/profile/Pascal_Frossard/publication/233799235_Learning_Smooth_Pattern_Transformation_Manifolds/links/00463533951057e9bb000000.pdf,,https://doi.org/10.1109/TIP.2012.2227768,http://www.researchgate.net/profile/Pascal_Frossard/publication/233799235_Learning_Smooth_Pattern_Transformation_Manifolds/links/00463533951057e9bb000000.pdf
+07a472ea4b5a28b93678a2dcf89028b086e481a2,http://pdfs.semanticscholar.org/07a4/72ea4b5a28b93678a2dcf89028b086e481a2.pdf,,https://doi.org/10.1007/978-3-642-41190-8_58,http://cvrr.ucsd.edu/publications/2013/TawariTrivedi_ICIAP2013.pdf
+0717b47ab84b848de37dbefd81cf8bf512b544ac,http://pdfs.semanticscholar.org/0717/b47ab84b848de37dbefd81cf8bf512b544ac.pdf,,,http://www.ijera.com/special_issue/Humming%20Bird_March_2014/Version%20%204/DH4146.pdf
+0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b,http://www.cs.tut.fi/~iosifidi/files/conference/2016_EUSIPCO_GRMCSVM.pdf?dl=0,,https://doi.org/10.1109/EUSIPCO.2016.7760217,
+074af31bd9caa61fea3c4216731420bd7c08b96a,http://www.umiacs.umd.edu/~jhchoi/paper/cvprw2012_sfv.pdf,,https://doi.org/10.1109/CVPRW.2012.6239213,http://vipl.ict.ac.cn/homepage/rpwang/publications/Face%20Verification%20Using%20Sparse%20Representations_CVPRW2012.pdf
+078d507703fc0ac4bf8ca758be101e75ea286c80,http://pdfs.semanticscholar.org/078d/507703fc0ac4bf8ca758be101e75ea286c80.pdf,,,http://www.ijritcc.org/download/1441258440.pdf
+0716e1ad868f5f446b1c367721418ffadfcf0519,http://pdfs.semanticscholar.org/6e05/5db22fbddb524ccb0006145db7944d1ed31c.pdf,,https://doi.org/10.1007/978-3-319-10599-4_22,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/html/8694/86940333/esm1.pdf
+073eaa49ccde15b62425cda1d9feab0fea03a842,http://pdfs.semanticscholar.org/073e/aa49ccde15b62425cda1d9feab0fea03a842.pdf,,,http://pure.tudelft.nl/ws/files/11546461/1_s2.0_S1077314215002040_main.pdf
+0756efe121e37479157010e18723e0c8da02a34b,,,,
+076f2dca12b3e85c282fc678f0d22ad6a3e6dc14,,,,
+0748b29b046d0659765649f7831a319ec23967e2,,,,
+07d95be4922670ef2f8b11997e0c00eb643f3fca,http://eprints.eemcs.utwente.nl/26833/01/Pantic_The_First_Facial_Landmark_Tracking_in-the-Wild_Challenge.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/shen_the_first_facial_iccv_2015_paper.pdf
+07f31bef7a7035792e3791473b3c58d03928abbf,http://videolectures.net/site/normal_dl/tag=977248/fgconference2015_phillips_biometric_samples_01.pdf,,https://doi.org/10.1016/j.imavis.2016.08.004,https://www3.nd.edu/~kwb/Flynn_Phillips_Bowyer_FG_2015.pdf
+0726a45eb129eed88915aa5a86df2af16a09bcc1,http://www.ri.cmu.edu/pub_files/2016/7/root-compressed.pdf,,https://doi.org/10.1109/IROS.2016.7759279,http://ri.cmu.edu/pub_files/2016/7/root-compressed.pdf
+07de8371ad4901356145722aa29abaeafd0986b9,http://pdfs.semanticscholar.org/07de/8371ad4901356145722aa29abaeafd0986b9.pdf,,,"https://www.lti.cs.cmu.edu/sites/default/files/lan,%20zhenzhong%20-%20CMU-LTI-17-002.pdf"
+07e639abf1621ceff27c9e3f548fadfa2052c912,http://pdfs.semanticscholar.org/07e6/39abf1621ceff27c9e3f548fadfa2052c912.pdf,,,http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0141474&type=printable
+07da958db2e561cc7c24e334b543d49084dd1809,https://infoscience.epfl.ch/record/117525/files/Classification.pdf?version=1,,,
+0742d051caebf8a5d452c03c5d55dfb02f84baab,http://research.cs.tamu.edu/keyser/Papers/CGI05Blur-JonesBW.pdf?origin=publication_detail,,,http://research.cs.tamu.edu/keyser/Papers/BlurPoster-SCA2004.pdf
+07d986b1005593eda1aeb3b1d24078db864f8f6a,http://pdfs.semanticscholar.org/07d9/86b1005593eda1aeb3b1d24078db864f8f6a.pdf,,,http://pep.ijieee.org.in/journal_pdf/11-203-144896126418-21.pdf
+076c97826df63f70d55ea11f0b7ae47a7ad81ad3,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2011.40
+38d56ddcea01ce99902dd75ad162213cbe4eaab7,http://pdfs.semanticscholar.org/38d5/6ddcea01ce99902dd75ad162213cbe4eaab7.pdf,,https://doi.org/10.24963/ijcai.2017/369,http://www.ijcai.org/proceedings/2017/0369.pdf
+38e7f3fe450b126367ec358be9b4cc04e82fa8c7,,,https://doi.org/10.1109/TIP.2014.2351265,
+3813a77005fcc87e1a65c272c9c7a9a87c80c000,,,,
+3888d7a40f3cea5e4a851c8ca97a2d7810a62867,,,https://doi.org/10.1109/CCECE.2016.7726684,
+389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26,http://pdfs.semanticscholar.org/3893/34e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26.pdf,,https://doi.org/10.1109/ICIP.2001.959231,http://gps-tsc.upc.es/imatge/pub/ps/ICIP01_pardas_losada.pdf
+38f7f3c72e582e116f6f079ec9ae738894785b96,http://pdfs.semanticscholar.org/38f7/f3c72e582e116f6f079ec9ae738894785b96.pdf,,,http://www.ijarcce.com/upload/2015/november-15/IJARCCE%2063.pdf
+380dd0ddd5d69adc52defc095570d1c22952f5cc,http://pdfs.semanticscholar.org/380d/d0ddd5d69adc52defc095570d1c22952f5cc.pdf,,,https://arxiv.org/pdf/1712.00193v1.pdf
+38f1d8d25c0332798e0929594af2c43092d2c5c8,,,,
+38679355d4cfea3a791005f211aa16e76b2eaa8d,http://hub.hku.hk/bitstream/10722/127357/1/Content.pdf,,https://doi.org/10.1109/TIP.2009.2035867,http://www.cs.utexas.edu/~ssi/CDDHE.pdf
+3802c97f925cb03bac91d9db13d8b777dfd29dcc,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Martins_Non-Parametric_Bayesian_Constrained_2014_CVPR_paper.pdf,,,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Martins_Non-Parametric_Bayesian_Constrained_2014_CVPR_paper.pdf
+38a2661b6b995a3c4d69e7d5160b7596f89ce0e6,http://www.cs.colostate.edu/~draper/papers/zhang_ijcb14.pdf,,https://doi.org/10.1109/BTAS.2014.6996258,http://www.cs.colostate.edu/~haozhang/linked_files/RIDMBC_oral_presentation.pdf
+38682c7b19831e5d4f58e9bce9716f9c2c29c4e7,http://pdfs.semanticscholar.org/3868/2c7b19831e5d4f58e9bce9716f9c2c29c4e7.pdf,,,http://www.ijcttjournal.org/Volume18/number-5/IJCTT-V18P149.pdf
+38787338ba659f0bfbeba11ec5b7748ffdbb1c3d,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2005/defevent/papers/cr1885.pdf,http://ieeexplore.ieee.org/document/7078402/,,http://signal.ee.bilkent.edu.tr/defevent/abstract/a1885.pdf
+383ff2d66fecdc2fd02a31ac1fa392f48e578296,,,https://doi.org/10.1016/j.cviu.2015.07.005,
+387b54cf6c186c12d83f95df6bd458c5eb1254ee,,,https://doi.org/10.1109/VCIP.2017.8305123,
+3826e47f0572ab4d0fe34f0ed6a49aa8303e0428,,,https://doi.org/10.1109/ACPR.2013.66,
+385750bcf95036c808d63db0e0b14768463ff4c6,http://pdfs.semanticscholar.org/3857/50bcf95036c808d63db0e0b14768463ff4c6.pdf,,,http://jmlr.org/proceedings/papers/v48/larsen16.pdf
+3852968082a16db8be19b4cb04fb44820ae823d4,https://infoscience.epfl.ch/record/230240/files/1701.01821.pdf,,,https://arxiv.org/pdf/1701.01821v2.pdf
+38f61e422ef75df4b96fb6081ce866556b6b854f,,,,
+38cc2f1c13420170c7adac30f9dfac69b297fb76,http://pdfs.semanticscholar.org/38cc/2f1c13420170c7adac30f9dfac69b297fb76.pdf,,,http://scholarworks.rit.edu/cgi/viewcontent.cgi?article=4226&context=theses
+38cbb500823057613494bacd0078aa0e57b30af8,https://ibug.doc.ic.ac.uk/media/uploads/documents/08014986.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Chrysos_Deep_Face_Deblurring_CVPR_2017_paper.pdf
+383e64d9ef1fca9de677ac82486b4df42e96e861,,,,http://doi.ieeecomputersociety.org/10.1109/DSC.2017.78
+384f972c81c52fe36849600728865ea50a0c4670,http://pdfs.semanticscholar.org/dad7/3d70b4fa77d67c5c02e3ecba21c52ab9a386.pdf,,,https://arxiv.org/pdf/1604.07057v3.pdf
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,http://pdfs.semanticscholar.org/e9a4/1f856a474aa346491fe76151869e3f548172.pdf,,https://doi.org/10.1007/978-3-319-10584-0_30,http://www4.comp.polyu.edu.hk/~cslzhang/paper/conf/SEAML_eccv14_sup.pdf
+384945abd53f6a6af51faf254ba8ef0f0fb3f338,http://pdfs.semanticscholar.org/b42c/4b804d69a031aac797346acc337f486e4a09.pdf,,https://doi.org/10.1007/978-3-642-15561-1_32,http://people.cs.pitt.edu/~kovashka/cs3710_sp15/active_learning_yan.pdf
+38215c283ce4bf2c8edd597ab21410f99dc9b094,https://pure.qub.ac.uk/portal/files/9746839/IEEE_Transactions_on_Affective_Computing_2012_McKeown.pdf,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2011.20
+38861d0d3a0292c1f54153b303b0d791cbba1d50,http://pdfs.semanticscholar.org/3886/1d0d3a0292c1f54153b303b0d791cbba1d50.pdf,,https://doi.org/10.1016/j.neucom.2014.09.081,https://arxiv.org/pdf/1403.3610v2.pdf
+3830047081ef4bc787f16edf5b244cb2793f75e5,https://www.cs.drexel.edu/~kon/publication/GSchwartz_CPCV13_slides.pdf,,,https://www.cs.drexel.edu/~kon/pubs/GSchwartz_CPCV13.pdf
+38d8ff137ff753f04689e6b76119a44588e143f3,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf,,,http://arxiv.org/abs/1709.06532
+38198502b6579354931bfa35e88dba6df806721c,,,,
+3896c62af5b65d7ba9e52f87505841341bb3e8df,http://pdfs.semanticscholar.org/3896/c62af5b65d7ba9e52f87505841341bb3e8df.pdf,,https://doi.org/10.1007/978-1-4419-5906-5_739,http://www.ee.iisc.ernet.in/new/people/faculty/soma.biswas/pdf/Face_encyclopedia.pdf
+38192a0f9261d9727b119e294a65f2e25f72d7e6,http://pdfs.semanticscholar.org/3819/2a0f9261d9727b119e294a65f2e25f72d7e6.pdf,,https://doi.org/10.1016/j.neucom.2017.05.013,https://www.researchgate.net/profile/Nannan_Wang/publication/266560944_Facial_Feature_Point_Detection_A_Comprehensive_Survey/links/5441bb330cf2e6f0c0f65d55.pdf?origin=publication_list
+38345264a9ca188c4facffe6e18a7e6865fb2966,,,,http://doi.ieeecomputersociety.org/10.1109/BIBM.2017.8217969
+38bbca5f94d4494494860c5fe8ca8862dcf9676e,http://pdfs.semanticscholar.org/c322/b770d2c7d9e70d196577bf0ae6b05205ebd7.pdf,,,http://www.vision.caltech.edu/pmoreels/Publications/Thesis_PMoreels.pdf
+38183fe28add21693729ddeaf3c8a90a2d5caea3,https://arxiv.org/pdf/1706.09876v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Hao_Scale-Aware_Face_Detection_CVPR_2017_paper.pdf
+38a9ca2c49a77b540be52377784b9f734e0417e4,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_IJCB_Faces.pdf,,https://doi.org/10.1109/IJCB.2011.6117498,
+38bb66c97b35851051e95834639c205254771adc,,,,
+3802da31c6d33d71b839e260f4022ec4fbd88e2d,http://pdfs.semanticscholar.org/3802/da31c6d33d71b839e260f4022ec4fbd88e2d.pdf,,https://doi.org/10.1007/978-3-319-49409-8_44,http://adas.cvc.uab.es/task-cv2016/papers/0022.pdf
+00f7f7b72a92939c36e2ef9be97397d8796ee07c,http://pdfs.semanticscholar.org/00f7/f7b72a92939c36e2ef9be97397d8796ee07c.pdf,,,http://cs231n.stanford.edu/reports/kjchavez_final.pdf
+008528d5e27919ee95c311266041e4fb1711c254,,,https://doi.org/10.1007/s13735-015-0092-1,
+0021f46bda27ea105d722d19690f5564f2b8869e,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Deep_Region_and_CVPR_2016_paper.pdf,,,http://www.ri.cmu.edu/pub_files/2016/6/drml_low.pdf
+00d4c2db10f3a32d505d7b8adc7179e421443dec,,,https://doi.org/10.1109/GlobalSIP.2014.7032080,
+0081e2188c8f34fcea3e23c49fb3e17883b33551,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf,,,https://arxiv.org/pdf/1802.05891v1.pdf
+00dc942f23f2d52ab8c8b76b6016d9deed8c468d,http://pdfs.semanticscholar.org/00dc/942f23f2d52ab8c8b76b6016d9deed8c468d.pdf,,,https://www.cis.rit.edu/~cnspci/references/theses/phd/walvrood2008.pdf
+0077cd8f97cafd2b389783858a6e4ab7887b0b6b,http://pdfs.semanticscholar.org/b971/266b29fcecf1d5efe1c4dcdc2355cb188ab0.pdf,,,http://arxiv.org/abs/1703.00832
+00049f989067d082f7f8d0581608ad5441d09f8b,,,https://doi.org/10.1109/LSP.2016.2555480,
+003ba2001bd2614d309d6ec15e9e2cbe86db03a1,,,https://doi.org/10.1109/ISCAS.2005.1465264,
+0055c7f32fa6d4b1ad586d5211a7afb030ca08cc,http://pdfs.semanticscholar.org/0055/c7f32fa6d4b1ad586d5211a7afb030ca08cc.pdf,,,https://arxiv.org/pdf/1608.01529v1.pdf
+00af9945a3401bdad3cffa89f7e5a15660399282,,,,
+009cd18ff06ff91c8c9a08a91d2516b264eee48e,http://pdfs.semanticscholar.org/009c/d18ff06ff91c8c9a08a91d2516b264eee48e.pdf,,,https://cdn.intechopen.com/pdfs-wm/17175.pdf
+00214fe1319113e6649435cae386019235474789,http://pdfs.semanticscholar.org/0021/4fe1319113e6649435cae386019235474789.pdf,,,http://www-i6.informatik.rwth-aachen.de/publications/download/662/Hanselmann--Face-Recognition-Using-Distortion-Models--bachelor2009.pdf
+00eccc565b64f34ad53bf67dfaf44ffa3645adff,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618328
+004e3292885463f97a70e1f511dc476289451ed5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Law_Quadruplet-Wise_Image_Similarity_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.38
+00301c250d667700276b1e573640ff2fd7be574d,,,https://doi.org/10.1109/BTAS.2014.6996242,
+00b08d22abc85361e1c781d969a1b09b97bc7010,http://www.umariqbal.info/uploads/1/4/8/3/14837880/visapp_2014.pdf,,https://doi.org/10.5220/0004738801620173,
+004d5491f673cd76150f43b0a0429214f5bfd823,http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/fp130-wang.pdf,,,http://dayongwang.info/pdf/2013-SIGIR.pdf
+007250c2dce81dd839a55f9108677b4f13f2640a,http://pdfs.semanticscholar.org/0db7/735e7adbe6e34dd058af31e278033040ab18.pdf,,https://doi.org/10.1007/3-540-45665-1_11,http://cbcl.mit.edu/publications/ps/bileschi-01240837.pdf
+00e3957212517a252258baef833833921dd308d4,http://www.yugangjiang.info/publication/17MM-PersonAttribute.pdf,,,http://doi.acm.org/10.1145/3123266.3123424
+00a38ebce124879738b04ffc1536018e75399193,,,https://doi.org/10.1109/BTAS.2017.8272766,
+00616b487d4094805107bb766da1c234c3c75e73,http://vision.ucmerced.edu/papers/Newsam_ACMGIS_2008.pdf,,,http://faculty.ucmerced.edu/snewsam/papers/Newsam_ACMGIS_2008.pdf
+00f0ed04defec19b4843b5b16557d8d0ccc5bb42,http://pdfs.semanticscholar.org/00f0/ed04defec19b4843b5b16557d8d0ccc5bb42.pdf,,,http://arxiv.org/abs/1608.00911
+0037bff7be6d463785d4e5b2671da664cd7ef746,http://pdfs.semanticscholar.org/0037/bff7be6d463785d4e5b2671da664cd7ef746.pdf,,https://doi.org/10.1007/978-3-642-15549-9_46,http://lear.inrialpes.fr/pubs/2010/GVS10a/GVS10a.pdf
+009a18d04a5e3ec23f8ffcfc940402fd8ec9488f,http://pdfs.semanticscholar.org/009a/18d04a5e3ec23f8ffcfc940402fd8ec9488f.pdf,,,http://www.cs.ucf.edu/~smasood/publications/BMVC2014_ActionRecognition.pdf
+0066caed1238de95a431d836d8e6e551b3cde391,http://humansensing.cs.cmu.edu/sites/default/files/7de_la_torre_frade_fernando_2007_3.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383056
+00075519a794ea546b2ca3ca105e2f65e2f5f471,http://pdfs.semanticscholar.org/0007/5519a794ea546b2ca3ca105e2f65e2f5f471.pdf,,,http://www.cs.uccs.edu/~kalita/work/reu/REUFinalPapers2010/Mears.pdf
+00220a6783488054eb0fe7b915e882b1294f3318,,,,
+0019925779bff96448f0c75492717e4473f88377,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w3/papers/Reale_Deep_Heterogeneous_Face_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.34
+009bf86913f1c366d9391bf236867d84d12fa20c,,,https://doi.org/10.1109/CVPRW.2010.5544620,
+00bfef58353564f4e4bd7e2cb68cb66953cf9103,,,,
+00e9011f58a561500a2910a4013e6334627dee60,http://library.utia.cas.cz/separaty/2008/RO/somol-facial%20expression%20recognition%20using%20angle-related%20information%20from%20facial%20meshes.pdf,http://ieeexplore.ieee.org/document/7080565/,,
+00d9d88bb1bdca35663946a76d807fff3dc1c15f,http://arxiv.org/pdf/1604.04842v1.pdf,,https://doi.org/10.1007/s11263-016-0958-6,https://arxiv.org/pdf/1604.04842v1.pdf
+00a967cb2d18e1394226ad37930524a31351f6cf,https://arxiv.org/pdf/1611.05377v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lu_Fully-Adaptive_Feature_Sharing_CVPR_2017_paper.pdf
+00f1e5e954f9eb7ffde3ca74009a8c3c27358b58,http://www.vision.caltech.edu/holub/public_html/Papers/PDF/holub_et_al_face_clustering.pdf,,https://doi.org/10.1109/AFGR.2008.4813463,http://www.vision.caltech.edu/publications/holub_et_al_face_clustering.pdf
+00a3cfe3ce35a7ffb8214f6db15366f4e79761e3,http://engineering.cae.cn/fitee/fileup/2095-9184/SUPPL/20150414135701.pdf,,https://doi.org/10.1631/FITEE.1400209,
+0058cbe110933f73c21fa6cc9ae0cd23e974a9c7,http://pdfs.semanticscholar.org/0058/cbe110933f73c21fa6cc9ae0cd23e974a9c7.pdf,,,http://www.bmva.org/bmvc/2015/papers/paper010/paper010.pdf
+004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4,http://pdfs.semanticscholar.org/004a/1bb1a2c93b4f379468cca6b6cfc6d8746cc4.pdf,,,http://www.researchgate.net/profile/Feiping_Nie/publication/268748091_Balanced_k-Means_and_Min-Cut_Clustering/links/54ebbf900cf2ff89649e537f.pdf
+00d94b35ffd6cabfb70b9a1d220b6823ae9154ee,https://arxiv.org/pdf/1503.07989v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2527652
+00ebc3fa871933265711558fa9486057937c416e,http://pdfs.semanticscholar.org/00eb/c3fa871933265711558fa9486057937c416e.pdf,,,https://pdfs.semanticscholar.org/8ed8/261e3e85a156efc8c94523795ab6bb7cc287.pdf
+006f283a50d325840433f4cf6d15876d475bba77,http://lvdmaaten.github.io/publications/papers/TPAMI_2014.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.221
+00b29e319ff8b3a521b1320cb8ab5e39d7f42281,http://pdfs.semanticscholar.org/8007/b8afa13869d2a7c681db8bd7c2e7df1ef02d.pdf,,https://doi.org/10.1007/978-3-319-10599-4_24,http://www.ri.cmu.edu/pub_files/2014/0/characterizing_mistakes_eccv2014.pdf
+00d0f2ec2036fb26ffcf882eb0aa47da0693192e,,,,
+0034e37a0faf0f71395245b266aacbf5412f190a,,,https://doi.org/10.1109/TMM.2014.2355134,
+00d931eccab929be33caea207547989ae7c1ef39,http://pdfs.semanticscholar.org/00d9/31eccab929be33caea207547989ae7c1ef39.pdf,,,http://www.cogsci.northwestern.edu/cogsci2004/papers/paper435.pdf
+0059b3dfc7056f26de1eabaafd1ad542e34c2c2e,http://pdfs.semanticscholar.org/0059/b3dfc7056f26de1eabaafd1ad542e34c2c2e.pdf,,,http://ascl.cis.fiu.edu/uploads/1/3/4/2/13423859/lisetti-acm-tmis-2013-final.pdf
+0052de4885916cf6949a6904d02336e59d98544c,https://rd.springer.com/content/pdf/10.1007/s10994-005-3561-6.pdf,,https://doi.org/10.1007/s10994-005-3561-6,http://machinelearning.wustl.edu/mlpapers/paper_files/icml2004_Ye04.pdf
+00d0b01d6a5f12216e078001b7c49225d2495b21,http://graphics.cs.uh.edu/publication/pub/2009_TVCJ_faceilluminationtransfer.pdf,,https://doi.org/10.1007/s00371-009-0375-8,http://graphics.cs.uh.edu/website/Publications/2009_TVCJ_faceilluminationtransfer.pdf
+6e60536c847ac25dba4c1c071e0355e5537fe061,http://www.cfar.umd.edu/~fer/postscript/CV_and_NLP.pdf,,,http://www.cs.umd.edu/sites/default/files/scholarly_papers/PerathamW.pdf
+6e198f6cc4199e1c4173944e3df6f39a302cf787,http://pdfs.semanticscholar.org/6e19/8f6cc4199e1c4173944e3df6f39a302cf787.pdf,,,http://libres.uncg.edu/ir/uncw/f/wangy2017-1.pdf
+6eaf446dec00536858548fe7cc66025b70ce20eb,http://pdfs.semanticscholar.org/6eaf/446dec00536858548fe7cc66025b70ce20eb.pdf,,,https://arxiv.org/pdf/1710.00962v1.pdf
+6e173ad91b288418c290aa8891193873933423b3,http://pdfs.semanticscholar.org/eb3b/021406fe5a5002535b392cac60832aa8f162.pdf,,,https://arxiv.org/pdf/1703.07595v2.pdf
+6eba25166fe461dc388805cc2452d49f5d1cdadd,http://pdfs.semanticscholar.org/6eba/25166fe461dc388805cc2452d49f5d1cdadd.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper122/paper122.pdf
+6ed738ff03fd9042965abdfaa3ed8322de15c116,https://dr.ntu.edu.sg/bitstream/handle/10220/39690/kmeap_icdm2014.pdf?isAllowed=y&sequence=1,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2014.54
+6ecd4025b7b5f4894c990614a9a65e3a1ac347b2,http://pdfs.semanticscholar.org/6ecd/4025b7b5f4894c990614a9a65e3a1ac347b2.pdf,,,http://www.ijritcc.org/download/Automatic%20Naming%20of%20Character%20using%20Video%20Streaming%20for%20Face%20Recognition%20with%20Graph%20Matching.pdf
+6eddea1d991e81c1c3024a6cea422bc59b10a1dc,http://pdfs.semanticscholar.org/6edd/ea1d991e81c1c3024a6cea422bc59b10a1dc.pdf,,,http://www.cl.cam.ac.uk/~pr10/publications/eai16.pdf
+6eaeac9ae2a1697fa0aa8e394edc64f32762f578,http://pdfs.semanticscholar.org/6eae/ac9ae2a1697fa0aa8e394edc64f32762f578.pdf,,https://doi.org/10.1016/j.patcog.2007.10.009,http://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/pr08.pdf
+6ee2ea416382d659a0dddc7a88fc093accc2f8ee,https://pdfs.semanticscholar.org/6ee2/ea416382d659a0dddc7a88fc093accc2f8ee.pdf,,https://doi.org/10.1109/TSMCB.2010.2044788,http://people.kth.se/~mflierl/Publications/zhi10-GSNMF.pdf
+6e97a99b2879634ecae962ddb8af7c1a0a653a82,http://pdfs.semanticscholar.org/7d37/7ba82df9cba0959cb910288415e568007792.pdf,,,https://arxiv.org/pdf/1703.06246v2.pdf
+6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w28/papers/Kim_Fusing_Aligned_and_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.187
+6ec004e4c1171c4c4858eec7c927f567684b80bc,http://www.researchgate.net/profile/Bongnam_Kang/publication/221292310_The_POSTECH_face_database_(PF07)_and_performance_evaluation/links/00463531e60efa5310000000.pdf,,https://doi.org/10.1109/AFGR.2008.4813378,
+6e3a181bf388dd503c83dc324561701b19d37df1,http://pdfs.semanticscholar.org/9d91/213394fb411743b11bae74cf22f0ffca9191.pdf,,https://doi.org/10.1007/s10107-016-1042-2,http://arxiv.org/abs/1503.08601
+6ef1996563835b4dfb7fda1d14abe01c8bd24a05,http://hera.inf-cv.uni-jena.de:6680/pdf/Goering14:NPT,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.319
+6e9de9c3af3258dd18142e9bef2977b7ce153bd5,,,https://doi.org/10.1007/978-3-319-48881-3,
+6ee8a94ccba10062172e5b31ee097c846821a822,http://pdfs.semanticscholar.org/6ee8/a94ccba10062172e5b31ee097c846821a822.pdf,,,http://cogprints.org/8966/1/EscalanteWiskott-Cogprints-2013.pdf
+6ee64c19efa89f955011531cde03822c2d1787b8,http://pdfs.semanticscholar.org/6ee6/4c19efa89f955011531cde03822c2d1787b8.pdf,,,ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/9e/af/PLoS_One_2012_Mar_15_7(3)_e32321.tar.gz
+6e2041a9b5d840b0c3e4195241cd110640b1f5f3,,,https://doi.org/10.1007/s10044-013-0349-3,
+6ed27a41214716259676b6949999cdf4b12d0bdd,,,,
+6e7ffd67329ca6027357a133437505bc56044e65,,,https://doi.org/10.1109/IJCNN.2014.6889754,
+6e94c579097922f4bc659dd5d6c6238a428c4d22,http://pdfs.semanticscholar.org/6e94/c579097922f4bc659dd5d6c6238a428c4d22.pdf,,https://doi.org/10.1007/11815921_49,
+6e379f2d34e14efd85ae51875a4fa7d7ae63a662,http://pdfs.semanticscholar.org/6e37/9f2d34e14efd85ae51875a4fa7d7ae63a662.pdf,,,http://www.asafvarol.com/tezler/Naveed_Ahmed_Thesis.pdf
+6eb1e006b7758b636a569ca9e15aafd038d2c1b1,http://pdfs.semanticscholar.org/6eb1/e006b7758b636a569ca9e15aafd038d2c1b1.pdf,,,http://ias.in.tum.de/_media/spezial/bib/wimmer07human.pdf
+6eece104e430829741677cadc1dfacd0e058d60f,http://pdfs.semanticscholar.org/7a42/6d0b98c8f52d61f9d89cd7be5ab6119f0a4a.pdf,,,http://www.psychology.pitt.edu/research/publications/cohn_AFA_20April2004.pdf
+6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94,http://pdfs.semanticscholar.org/6e0a/05d87b3cc7e16b4b2870ca24cf5e806c0a94.pdf,,,http://www.eecs.umich.edu/~hero/Preprints/costa_thesis.pdf
+6e1802874ead801a7e1072aa870681aa2f555f35,http://www.cs.yale.edu/homes/hw5/WebContent/ICASSP07_Yan.pdf,,https://doi.org/10.1109/ICASSP.2007.365986,
+6ed22b934e382c6f72402747d51aa50994cfd97b,http://www.ifp.illinois.edu/~jyang29/papers/WACV16-Expression.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477449
+6e93fd7400585f5df57b5343699cb7cda20cfcc2,http://pdfs.semanticscholar.org/a52f/4d315adf0aa60ba284fd4caf22485625cedf.pdf,,,http://mapageweb.umontreal.ca/gosselif/hammaletal09.pdf
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,http://pdfs.semanticscholar.org/6eb1/b5935b0613a41b72fd9e7e53a3c0b32651e9.pdf,,,http://www.bartneck.de/publications/2015/LEGOPictorialScales/LegoPictorialScalesforAssessingAffectiveResponse.pdf
+6ec275755f8776b620d0a4550be0e65caf2bc87a,,,https://doi.org/10.1109/IS.2016.7737496,
+6e12ba518816cbc2d987200c461dc907fd19f533,http://pdfs.semanticscholar.org/6e12/ba518816cbc2d987200c461dc907fd19f533.pdf,,https://doi.org/10.1016/j.imavis.2013.03.001,http://pages.cs.wisc.edu/~gdguo/myPapers/BMIface2013.pdf
+6e782073a013ce3dbc5b9b56087fd0300c510f67,http://pdfs.semanticscholar.org/6e78/2073a013ce3dbc5b9b56087fd0300c510f67.pdf,,,http://iosrjournals.org/iosr-jce/papers/Vol17-issue3/Version-2/K017326168.pdf
+9ab463d117219ed51f602ff0ddbd3414217e3166,http://pdfs.semanticscholar.org/d965/43e8ab524108cae8c12d3a65a54a295deae6.pdf,,,http://hal.inria.fr/docs/00/64/56/08/PDF/RT-0415.pdf
+9ac82909d76b4c902e5dde5838130de6ce838c16,http://pdfs.semanticscholar.org/9ac8/2909d76b4c902e5dde5838130de6ce838c16.pdf,,https://doi.org/10.1007/978-0-387-93808-0_18,http://www.researchgate.net/profile/Ralph_Braspenning/publication/226201524_Recognizing_Facial_Expressions_Automatically_from_Video/links/00b4951ecd5cf6a038000000.pdf
+9ab963e473829739475b9e47514f454ab467a5af,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.33
+9abf6d56a7d336bc58f4e3328d2ee807032589f1,,,https://doi.org/10.1109/CEC.2017.7969500,
+9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a,http://arxiv.org/pdf/1602.01940v1.pdf,,,http://arxiv.org/abs/1602.01940
+9ac15845defcd0d6b611ecd609c740d41f0c341d,http://pdfs.semanticscholar.org/9ac1/5845defcd0d6b611ecd609c740d41f0c341d.pdf,,,http://apps.cs.utexas.edu/tech_reports/reports/tr/TR-2062.pdf
+9af1cf562377b307580ca214ecd2c556e20df000,http://pdfs.semanticscholar.org/9af1/cf562377b307580ca214ecd2c556e20df000.pdf,,,http://arxiv.org/pdf/1503.01646v1.pdf
+9abab00de61dd722b3ad1b8fa9bffd0001763f8b,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2420563
+9ab126760f68071a78cabe006cf92995d6427025,,,https://doi.org/10.1007/s11042-013-1703-6,
+9a4c45e5c6e4f616771a7325629d167a38508691,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Mostafa_A_Facial_Features_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301324
+9a84588fe7e758cfbe7062686a648fab787fc32f,,,https://doi.org/10.1007/s11042-014-2333-3,
+9a7858eda9b40b16002c6003b6db19828f94a6c6,https://www1.icsi.berkeley.edu/~twke/pdfs/pubs/mooney_icip2017.pdf,,https://doi.org/10.1109/ICIP.2017.8296637,http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2017mooneyICIP.pdf
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,http://pdfs.semanticscholar.org/9a35/35cabf5d0f662bff1d897fb5b777a412d82e.pdf,,https://doi.org/10.1186/s13640-015-0070-9,https://uknowledge.uky.edu/cgi/viewcontent.cgi?article=1006&context=cs_facpub&httpsredir=1&referer=
+9abd35b37a49ee1295e8197aac59bde802a934f3,http://pdfs.semanticscholar.org/9abd/35b37a49ee1295e8197aac59bde802a934f3.pdf,,https://doi.org/10.1007/978-3-319-46604-0_47,https://arxiv.org/pdf/1608.04339v1.pdf
+9aade3d26996ce7ef6d657130464504b8d812534,,,https://doi.org/10.1109/TNNLS.2016.2618340,
+9a276c72acdb83660557489114a494b86a39f6ff,http://pdfs.semanticscholar.org/9a27/6c72acdb83660557489114a494b86a39f6ff.pdf,,,http://www.humanpub.org/JMMT/ppl/JMMT8PPL.pdf
+9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e,http://pdfs.semanticscholar.org/9a1a/9dd3c471bba17e5ce80a53e52fcaaad4373e.pdf,,,http://mplab.ucsd.edu/pdfs/Bartlett-multimedia-inpress.pdf
+9a6da02db99fcc0690d7ffdc15340b125726ab95,http://vision.ucla.edu/~vedaldi/assets/pubs/vedaldi07boosting.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2007.4408840
+9a42c519f0aaa68debbe9df00b090ca446d25bc4,http://pdfs.semanticscholar.org/9a42/c519f0aaa68debbe9df00b090ca446d25bc4.pdf,,,https://arxiv.org/pdf/1801.05678v1.pdf
+9aba281955117eb4a7aed36775f55f27e4dde42f,,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2000.840635
+36b40c75a3e53c633c4afb5a9309d10e12c292c7,https://pdfs.semanticscholar.org/36b4/0c75a3e53c633c4afb5a9309d10e12c292c7.pdf,,,https://www.researchgate.net/profile/Weifeng_Liu/publication/200834313_Facial_Expression_Recognition_Based_on_Fusion_of_Multiple_Gabor_Features/links/00b7d51ee36a3dccd5000000.pdf
+363ca0a3f908859b1b55c2ff77cc900957653748,http://pdfs.semanticscholar.org/363c/a0a3f908859b1b55c2ff77cc900957653748.pdf,,,http://www.ijcttjournal.org/Volume1/Issue-3/number-4/IJCTT-V1I3N4P5.pdf
+36bb5cca0f6a75be8e66f58cba214b90982ee52f,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.73
+36219a3196aac2bd149bc786f083957a6e6da125,,,https://doi.org/10.1016/j.jvcir.2015.12.003,
+3645d85ccd5bb7ce5df8d24e6ddb358eb1656df5,,,,
+3690af0af51a067750f664c08e48b486d1cd476d,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2012.41
+365f67fe670bf55dc9ccdcd6888115264b2a2c56,http://pdfs.semanticscholar.org/f431/d3d7a0323bf1150420c826dade2093a7dfa1.pdf,,https://doi.org/10.1016/j.imavis.2016.04.017,http://arxiv.org/abs/1512.08212
+36fe39ed69a5c7ff9650fd5f4fe950b5880760b0,http://pdfs.semanticscholar.org/36fe/39ed69a5c7ff9650fd5f4fe950b5880760b0.pdf,,,http://ceur-ws.org/Vol-574/bvm2010_92.pdf
+36a3a96ef54000a0cd63de867a5eb7e84396de09,http://www.cs.toronto.edu/~guerzhoy/oriviz/crv17.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2017.59
+36486944b4feeb88c0499fecd253c5a53034a23f,,,https://doi.org/10.1109/CISP-BMEI.2017.8301986,
+36fc4120fc0638b97c23f97b53e2184107c52233,http://pdfs.semanticscholar.org/36fc/4120fc0638b97c23f97b53e2184107c52233.pdf,,,http://research.ijcaonline.org/ncipet2013/number3/ncipet1337.pdf
+36ce0b68a01b4c96af6ad8c26e55e5a30446f360,http://liris.cnrs.fr/Documents/Liris-6963.pdf,,https://doi.org/10.1007/s11042-014-2322-6,
+36b23007420b98f368d092bab196a8f3cbcf6f93,,,,http://doi.ieeecomputersociety.org/10.1109/ICNC.2009.106
+360d66e210f7011423364327b7eccdf758b5fdd2,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569190652.pdf,http://ieeexplore.ieee.org/document/7077513/,,
+365866dc937529c3079a962408bffaa9b87c1f06,http://pdfs.semanticscholar.org/3658/66dc937529c3079a962408bffaa9b87c1f06.pdf,,,http://ijiset.com/v1s3/IJISET_V1_I3_36.pdf
+36b13627ee8a5a8cd04645213aabfa917bbd32f5,,,https://doi.org/10.1109/TCSVT.2016.2602812,
+363f540dc82ba8620262a04a67cfd6d3c85b0582,,,,http://doi.ieeecomputersociety.org/10.1109/WIAMIS.2009.5031445
+361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,http://pdfs.semanticscholar.org/361c/9ba853c7d69058ddc0f32cdbe94fbc2166d5.pdf,,,http://www.ai.rug.nl/~mwiering/Thesis_Jos_vd_Wolfshaar.pdf
+362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c,http://mirlab.org/conference_papers/International_Conference/ICASSP%202016/pdfs/0002792.pdf,,https://doi.org/10.1109/ICASSP.2016.7472186,
+36ea75e14b69bed454fde6076ea6b85ed87fbb14,http://pdfs.semanticscholar.org/36ea/75e14b69bed454fde6076ea6b85ed87fbb14.pdf,,,http://www.waset.org/journals/waset/v62/v62-90.pdf
+36b19e6bf2f0abc0387052436956a25b37488134,,,,
+366e650a578a3732ebe10267f04bcf9d3129f076,,,,
+36df81e82ea5c1e5edac40b60b374979a43668a5,http://www.robots.ox.ac.uk/~vgg/publications/2012/Parkhi12b/parkhi12b.pdf,,https://doi.org/10.1109/WIAMIS.2012.6226775,
+366d20f8fd25b4fe4f7dc95068abc6c6cabe1194,http://arxiv.org/pdf/1605.05411v1.pdf,,https://doi.org/10.1109/ICPR.2016.7900114,https://arxiv.org/pdf/1605.05411v1.pdf
+3630324c2af04fd90f8668f9ee9709604fe980fd,http://www.yugangjiang.info/publication/TCSVT-Shu.pdf,,https://doi.org/10.1109/TCSVT.2016.2607345,
+362ba8317aba71c78dafca023be60fb71320381d,http://pdfs.semanticscholar.org/362b/a8317aba71c78dafca023be60fb71320381d.pdf,,https://doi.org/10.1016/j.patcog.2014.06.004,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/KangHanJainLee_NighttimeFRatLargeStandoff_P.R.2014.pdf
+36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958,http://www.iab-rubric.org/papers/RGBD-Face.pdf,,https://doi.org/10.1109/TIFS.2014.2343913,
+36e8ef2e5d52a78dddf0002e03918b101dcdb326,http://www.milbo.org/stasm-files/multiview-active-shape-models-with-sift-for-300w.pdf,,,
+36018404263b9bb44d1fddaddd9ee9af9d46e560,http://pdfs.semanticscholar.org/3601/8404263b9bb44d1fddaddd9ee9af9d46e560.pdf,,,http://www.researchgate.net/profile/Gozde_Akar/publication/267784070_OCCLUDED_FACE_RECOGNITION_BY_USING_GABOR_FEATURES/links/54be33140cf218d4a16a5385.pdf
+367f2668b215e32aff9d5122ce1f1207c20336c8,http://pdfs.semanticscholar.org/367f/2668b215e32aff9d5122ce1f1207c20336c8.pdf,,,"http://paspk.org/wp-content/uploads/proceedings/52,%20No.1/a72449e2Speaker%20dependent%20human.pdf"
+36c2db5ff76864d289781f93cbb3e6351f11984c,http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569187194.pdf,http://ieeexplore.ieee.org/document/7077417/,,
+3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94,http://pdfs.semanticscholar.org/3624/ca25f09f3acbcf4d3a4c40b9e45a29c22b94.pdf,,https://doi.org/10.1016/j.neucom.2011.01.024,http://sujingwang.name/publication/neucom11.pdf
+3661a34f302883c759b9fa2ce03de0c7173d2bb2,http://pdfs.semanticscholar.org/fd6d/14fb0bbca58e924c504d7dc57cb7f8d3707e.pdf,,https://doi.org/10.1007/978-3-319-46475-6_27,http://arxiv.org/abs/1607.06997
+36c473fc0bf3cee5fdd49a13cf122de8be736977,http://pdfs.semanticscholar.org/bc6c/051b66ecadac7bb3e6ace66665e42875d790.pdf,,https://doi.org/10.1007/978-3-319-46484-8_2,http://arxiv.org/abs/1608.00859
+368d59cf1733af511ed8abbcbeb4fb47afd4da1c,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,,,https://arxiv.org/pdf/1610.04823v1.pdf
+366595171c9f4696ec5eef7c3686114fd3f116ad,http://pdfs.semanticscholar.org/3665/95171c9f4696ec5eef7c3686114fd3f116ad.pdf,,,http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-53.pdf
+36b9f46c12240898bafa10b0026a3fb5239f72f3,https://arxiv.org/pdf/1702.05573v1.pdf,,,http://arxiv.org/abs/1702.05573
+3634b4dd263c0f330245c086ce646c9bb748cd6b,https://arxiv.org/pdf/1504.00983v2.pdf,,,http://arxiv.org/pdf/1504.00983.pdf
+367a786cfe930455cd3f6bd2492c304d38f6f488,http://pdfs.semanticscholar.org/367a/786cfe930455cd3f6bd2492c304d38f6f488.pdf,,,http://tigerprints.clemson.edu/cgi/viewcontent.cgi?article=3290&context=all_theses
+36bb93c4f381adca267191811abb8cc7812363f9,,,https://doi.org/10.1109/CISP-BMEI.2017.8301987,
+5c4ce36063dd3496a5926afd301e562899ff53ea,http://pdfs.semanticscholar.org/5c4c/e36063dd3496a5926afd301e562899ff53ea.pdf,,,https://arxiv.org/pdf/1703.01170v1.pdf
+5c6de2d9f93b90034f07860ae485a2accf529285,http://pdfs.semanticscholar.org/5c6d/e2d9f93b90034f07860ae485a2accf529285.pdf,,https://doi.org/10.1504/IJBM.2013.055971,http://socia-lab.di.ubi.pt/~ubipr/Chandra_Hugo_Pose_Compensation_2013.pdf
+5c8ab6a48bf7c5302b800c1077884f4898ad0beb,,,,
+5c624382057b55e46af4dc4c055a33c90e8bf08a,http://www.researchgate.net/profile/Ngoc_Son_Vu/publication/224114972_Illumination-robust_face_recognition_using_retina_modeling/links/0fcfd507f06292b0a5000000.pdf,,https://doi.org/10.1109/ICIP.2009.5413963,
+5c91fc106cfe9d57a9b149c1af29ca84d403fc7e,,,https://doi.org/10.1109/TCSVT.2015.2452782,
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SMC_2009/PDFs/116.pdf,,https://doi.org/10.1109/ICSMC.2009.5346225,
+5c3eb40b06543f00b2345f3291619a870672c450,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.539
+5c2a7518fb26a37139cebff76753d83e4da25159,http://pdfs.semanticscholar.org/5c2a/7518fb26a37139cebff76753d83e4da25159.pdf,,https://doi.org/10.1016/j.image.2016.05.020,http://researchprofiles.herts.ac.uk/portal/files/13112055/Accepted_Manuscript.pdf
+5cb83eba8d265afd4eac49eb6b91cdae47def26d,http://www.kresttechnology.com/krest-academic-projects/krest-major-projects/ECE/B-Tech%20Papers/21.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2009.123
+5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,http://pdfs.semanticscholar.org/5c86/72c0d2f28fd5d2d2c4b9818fcff43fb01a48.pdf,,,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_dagm_cvaw_2012.pdf
+5c3dce55c61ee86073575ac75cc882a215cb49e6,http://pdfs.semanticscholar.org/8d93/b33c38a26b97442b2f160e75212739c60bc5.pdf,,https://doi.org/10.1007/978-3-319-10590-1_38,http://sites.skoltech.ru/app/data/uploads/sites/25/2014/11/eccv14neuralcodes.pdf
+5c2e264d6ac253693469bd190f323622c457ca05,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2013/Improving%20large%20scale%20image%20retrieval%20using%20multi-level%20features13.pdf,,https://doi.org/10.1109/ICIP.2013.6738900,http://www.ee.ucr.edu/~lan/papers/ChenICIP13.pdf
+5c473cfda1d7c384724fbb139dfe8cb39f79f626,http://www.cs.zju.edu.cn/~gpan/publication/2012-PAA-face-expression-onlinefirst.pdf,,https://doi.org/10.1007/s10044-012-0315-5,
+5c19c4c6a663fe185a739a5f50cef6a12a4635a1,,,https://doi.org/10.1016/j.imavis.2012.08.016,
+5c820e47981d21c9dddde8d2f8020146e600368f,http://pdfs.semanticscholar.org/5c82/0e47981d21c9dddde8d2f8020146e600368f.pdf,,https://doi.org/10.1007/978-3-319-16634-6_6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop12/pdffiles/w12-p3.pdf
+5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0,http://www.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf,,https://doi.org/10.1109/TIFS.2014.2359548,http://alumni.cs.ucr.edu/~mkafai/papers/Paper_tifs2014.pdf
+5c124b57699be19cd4eb4e1da285b4a8c84fc80d,http://www.iis.ee.ic.ac.uk/icvl/doc/cvpr14_xiaowei.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.228
+5c435c4bc9c9667f968f891e207d241c3e45757a,http://pdfs.semanticscholar.org/eb6a/13c8a607dfc535e5f31b7c8843335674644c.pdf,,https://doi.org/10.5244/C.24.6,http://www.researchgate.net/profile/John_Alexander_Ruiz_Hernandez/publication/221259731_How_old_are_you__Age_Estimation_with_Tensors_of_Binary_Gaussian_Receptive_Maps/links/00b49522fa65f1298e000000.pdf
+5c7adde982efb24c3786fa2d1f65f40a64e2afbf,http://pdfs.semanticscholar.org/bd40/dee4f2bbb0e512575cc96a0e3a7918a0ce42.pdf,,https://doi.org/10.1007/978-3-319-10590-1_51,http://grail.cs.washington.edu/wp-content/uploads/2015/08/sun2014rdh.pdf
+5c526ee00ec0e80ba9678fee5134dae3f497ff08,,,https://doi.org/10.1109/TCE.2010.5606299,
+5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934,http://www.istc-cc.cmu.edu/publications/papers/2016/GeePS-cui-eurosys16.pdf,,,http://www.pdl.cmu.edu/PDL-FTP/CloudComputing/GeePS-cui-eurosys16.pdf
+5cfbeae360398de9e20e4165485837bd42b93217,http://pdfs.semanticscholar.org/5cfb/eae360398de9e20e4165485837bd42b93217.pdf,,,https://www.ijariit.com/manuscripts/v3i5/V3I5-1189.pdf?b23ae0&b23ae0
+5ca14fa73da37855bfa880b549483ee2aba26669,http://pdfs.semanticscholar.org/5ca1/4fa73da37855bfa880b549483ee2aba26669.pdf,,,http://www.ijceronline.com/papers/Vol7_issue7/J07076977.pdf
+5c92355b2808621d237a89dc7b3faa5cdb990ab5,http://www.researchgate.net/profile/Brian_Lovell2/publication/236124723_Dynamic_Amelioration_of_Resolution_Mismatches_for_Local_Feature_Based_Identity_Inference/links/0fcfd50741a027e848000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2010.299
+5c4f9260762a450892856b189df240f25b5ed333,,,https://doi.org/10.1109/TIP.2017.2651396,
+5c02bd53c0a6eb361972e8a4df60cdb30c6e3930,http://arxiv.org/pdf/1303.4893v2.pdf,,,https://arxiv.org/pdf/1303.4893v2.pdf
+5c8ae37d532c7bb8d7f00dfde84df4ba63f46297,http://pdfs.semanticscholar.org/5c8a/e37d532c7bb8d7f00dfde84df4ba63f46297.pdf,,,http://arxiv.org/abs/1801.07230
+5c717afc5a9a8ccb1767d87b79851de8d3016294,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001845.pdf,,https://doi.org/10.1109/ICASSP.2012.6288261,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001845.pdf
+5cb1dd76c672b99d9103db3842721289bacf6e1b,,,,
+5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhao_Facial_Expression_Intensity_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhao_Facial_Expression_Intensity_CVPR_2016_paper.pdf
+09b80d8eea809529b08a8b0ff3417950c048d474,http://openaccess.thecvf.com/content_cvpr_2013/papers/Choi_Adding_Unlabeled_Samples_2013_CVPR_paper.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a875.pdf
+09f58353e48780c707cf24a0074e4d353da18934,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestrowdenBishtKlontzJain_CrowdsourcingHumanPeformance_IJCB2014.pdf,,https://doi.org/10.1109/BTAS.2014.6996296,http://biometrics.cse.msu.edu/Presentations/Crowdsourcing_IJCB_2014_ppt.pdf
+096eb8b4b977aaf274c271058feff14c99d46af3,http://www.dtic.mil/dtic/tr/fulltext/u2/a585819.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126293
+0952ac6ce94c98049d518d29c18d136b1f04b0c0,http://pdfs.semanticscholar.org/0952/ac6ce94c98049d518d29c18d136b1f04b0c0.pdf,,https://doi.org/10.5244/C.20.96,http://www.macs.hw.ac.uk/bmvc2006/papers/006.pdf
+0969e0dc05fca21ff572ada75cb4b703c8212e80,http://pdfs.semanticscholar.org/0969/e0dc05fca21ff572ada75cb4b703c8212e80.pdf,,https://doi.org/10.3390/a9030048,http://www.mdpi.com/1999-4893/9/3/48/pdf
+09f9409430bba2afb84aa8214dbbb43bfd4cf056,,,https://doi.org/10.1109/TNN.2006.883012,
+09dd01e19b247a33162d71f07491781bdf4bfd00,http://pdfs.semanticscholar.org/5991/0d557b54566ec97280480daca02685f21907.pdf,,https://doi.org/10.1007/978-3-642-15561-1_44,http://luci.ics.uci.edu/websiteContent/weAreLuci/biographies/faculty/djp3/LocalCopy/turk.pdf
+09cf3f1764ab1029f3a7d57b70ae5d5954486d69,http://pdfs.semanticscholar.org/09cf/3f1764ab1029f3a7d57b70ae5d5954486d69.pdf,,https://doi.org/10.1007/s11760-008-0074-3,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Buciu08b.pdf
+09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081,http://acberg.com/papers/street2shop.pdf,,,http://openaccess.thecvf.com/content_iccv_2015/papers/Kiapour_Where_to_Buy_ICCV_2015_paper.pdf
+0974677f59e78649a40f0a1d85735410d21b906a,,,https://doi.org/10.1109/ISCAS.2017.8050798,
+0931bef0a9c8c153184a1f9c286cf4883cbe99b6,,,https://doi.org/10.1007/s12193-015-0203-6,
+09e7397fbcf4cc54ee085599a3b9bb72539ab251,,,,
+09138ad5ad1aeef381f825481d1b4f6b345c438c,,,https://doi.org/10.1109/IIH-MSP.2012.41,
+09628e9116e7890bc65ebeabaaa5f607c9847bae,https://arxiv.org/pdf/1704.03039.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Morgado_Semantically_Consistent_Regularization_CVPR_2017_paper.pdf
+09733129161ca7d65cf56a7ad63c17f493386027,http://pdfs.semanticscholar.org/0973/3129161ca7d65cf56a7ad63c17f493386027.pdf,,,http://www.cg.tuwien.ac.at/research/publications/2007/vucini_erald-2007-FRI/vucini_erald-2007-FRI-Paper.pdf
+09c586624ec65d7ef2d4d8d321e98f61698dcfe2,http://www.seas.upenn.edu/~timothee/papers/cvpr_2010_supplement.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5540106
+09718bf335b926907ded5cb4c94784fd20e5ccd8,http://parnec.nuaa.edu.cn/papers/journal/2005/xtan-TNN05.pdf,,https://doi.org/10.1109/TNN.2005.849817,http://www.researchgate.net/profile/Songcan_Chen/publication/7638460_Recognizing_partially_occluded_expression_variant_faces_from_single_training_image_per_person_with_SOM_and_soft_kappa-NN_ensemble/links/0046351406aaacc8d8000000.pdf
+098a1ccc13b8d6409aa333c8a1079b2c9824705b,http://people.cs.pitt.edu/~kovashka/ut/pivots-kovashka-iccv2013.pdf,,,http://www.cs.utexas.edu/~grauman/papers/pivots-kovashka-iccv2013.pdf
+0903bb001c263e3c9a40f430116d1e629eaa616f,http://pdfs.semanticscholar.org/0903/bb001c263e3c9a40f430116d1e629eaa616f.pdf,,,http://www.cs.cmu.edu/~santosh/projects/papers/cvpr_inReview.pdf
+090ff8f992dc71a1125636c1adffc0634155b450,http://pdfs.semanticscholar.org/090f/f8f992dc71a1125636c1adffc0634155b450.pdf,,https://doi.org/10.1007/978-3-319-16811-1_46,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop3/pdffiles/w3-p13.pdf
+09b43b59879d59493df2a93c216746f2cf50f4ac,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_036_ext.pdf,,https://doi.org/10.1109/TIP.2016.2612827,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Hu_Deep_Transfer_Metric_2015_CVPR_paper.pdf
+098fa9b4c3f7fb41c7a178d36f5dbb50a3ffa377,http://oui.csail.mit.edu/camera_readys/13.pdf,,,http://arxiv.org/abs/1505.00295
+09b0ef3248ff8f1a05b8704a1b4cf64951575be9,https://arxiv.org/pdf/1511.06783v1.pdf,,,https://arxiv.org/pdf/1511.06783v2.pdf
+097104fc731a15fad07479f4f2c4be2e071054a2,http://pdfs.semanticscholar.org/dbad/94c3506a342f55f54388e162e8481ae8b184.pdf,,https://doi.org/10.1016/j.patcog.2007.06.026,https://ibug.doc.ic.ac.uk/media/uploads/documents/pat_rec_2008.pdf
+096ffc1ea5493242ba0c113178dab0c096412f81,,,,http://doi.acm.org/10.1145/3123266.3123441
+094357c1a2ba3fda22aa6dd9e496530d784e1721,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Wang_A_Unified_Probabilistic_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.264
+09686fd5eb5ec6f47d5ec24276c78d23607ec01e,,,,
+092dd7cb6c9b415eb83afb104fa63d7d4290ac33,,,https://doi.org/10.1109/SPLIM.2016.7528409,
+09f853ce12f7361c4b50c494df7ce3b9fad1d221,http://files.is.tue.mpg.de/jgall/download/jgall_RFdepthFace_ijcv12.pdf,,https://doi.org/10.1007/s11263-012-0549-0,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_00957.pdf
+09111da0aedb231c8484601444296c50ca0b5388,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553737.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553737
+09750c9bbb074bbc4eb66586b20822d1812cdb20,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001385.pdf,,https://doi.org/10.1109/ICASSP.2012.6288149,http://staff.eng.bahcesehir.edu.tr/~cigdemeroglu/papers/international_conference_papers/C_2012_erdem_ICASSP.pdf
+09ce14b84af2dc2f76ae1cf227356fa0ba337d07,http://grail.cs.washington.edu/3dfaces/paper.pdf,,,http://grail.cs.washington.edu/pub/papers/kemelmacher2011fri.pdf
+090e4713bcccff52dcd0c01169591affd2af7e76,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Shao_What_Do_You_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.451
+097f674aa9e91135151c480734dda54af5bc4240,http://pdfs.semanticscholar.org/097f/674aa9e91135151c480734dda54af5bc4240.pdf,,,http://www.cmis.csiro.au/Hugues.Talbot/dicta2003/cdrom/pdf/0069.pdf
+5d0f72174e9ca1d620227b53ab1bbd8263fb4a9e,,,,
+5d485501f9c2030ab33f97972aa7585d3a0d59a7,http://pdfs.semanticscholar.org/5d48/5501f9c2030ab33f97972aa7585d3a0d59a7.pdf,,https://doi.org/10.1016/j.patcog.2009.04.006,https://www.ecse.rpi.edu/~qji/Papers/PR_BNlearning_revision_v2.pdf
+5da740682f080a70a30dc46b0fc66616884463ec,http://pdfs.semanticscholar.org/5da7/40682f080a70a30dc46b0fc66616884463ec.pdf,,https://doi.org/10.1007/978-3-319-23117-4_22,http://av.dfki.de/~pagani/papers/Selim2015_CAIP.pdf
+5dbb2d556f2e63a783a695a517f5deb11aafd7ea,,,https://doi.org/10.1109/ICB.2015.7139079,
+5de5848dc3fc35e40420ffec70a407e4770e3a8d,http://pdfs.semanticscholar.org/5de5/848dc3fc35e40420ffec70a407e4770e3a8d.pdf,,,https://www.vision.ee.ethz.ch/webvision/files/1708.02862.pdf
+5d9bed6974fb81efeaeeff605b075e73b119a2b5,,,,
+5da139fc43216c86d779938d1c219b950dd82a4c,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0200205.pdf,,https://doi.org/10.1109/ICIP.2007.4379128,
+5d33a10752af9ea30993139ac6e3a323992a5831,http://web.engr.illinois.edu/~iendres2/publications/cvpr2010_att.pdf,,,http://cs.uiuc.edu/homes/iendres2/publications/cvpr2010_att.pdf
+5dc056fe911a3e34a932513abe637076250d96da,http://www.vision.ee.ethz.ch/~gfanelli/pubs/cvpr12.pdf,,,http://www.vision.ee.ethz.ch/~gallju/download/jgall_facialfeatures_cvpr12.pdf
+5d185d82832acd430981ffed3de055db34e3c653,http://pdfs.semanticscholar.org/fc70/92e72a2bae6f60266147e0fb587b1771699a.pdf,,,http://www.redalyc.org/pdf/615/61520938004.pdf
+5da3bb198b087c15509f933215b141de9e8f43ed,,,,
+5dd57b7e0e82a33420c054da7ea3f435d49e910e,,,https://doi.org/10.1007/s10851-014-0493-4,
+5d233e6f23b1c306cf62af49ce66faac2078f967,http://pdfs.semanticscholar.org/5d23/3e6f23b1c306cf62af49ce66faac2078f967.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/91/93/pone.0149003.PMC4747560.pdf
+5dce578c8bc819592c9ec7bfab6302bbcd9a3f3d,,,,
+5dd496e58cfedfc11b4b43c4ffe44ac72493bf55,http://pdfs.semanticscholar.org/5dd4/96e58cfedfc11b4b43c4ffe44ac72493bf55.pdf,,,https://arxiv.org/pdf/1707.06119v1.pdf
+5db075a308350c083c3fa6722af4c9765c4b8fef,http://pdfs.semanticscholar.org/5db0/75a308350c083c3fa6722af4c9765c4b8fef.pdf,,,http://www.sensorsportal.com/HTML/DIGEST/july_2013/P_1260.pdf
+5df17c81c266cf2ebb0778e48e825905e161a8d9,,,https://doi.org/10.1109/TMM.2016.2520091,
+5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf,https://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-RobustRegistration-TIP2016.pdf,,https://doi.org/10.1109/TIP.2016.2639448,https://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-MUMIE-TIP2017.pdf
+5dcf78de4d3d867d0fd4a3105f0defae2234b9cb,http://pdfs.semanticscholar.org/5dcf/78de4d3d867d0fd4a3105f0defae2234b9cb.pdf,,https://doi.org/10.5244/C.26.59,http://www.bmva.org/bmvc/2012/BMVC/paper059/abstract059.pdf
+5da98f7590c08e83889f3cec7b0304b3610abf42,,,https://doi.org/10.1016/j.eswa.2017.07.018,
+5dfebcb7bfefb1af1cfef61a151abfe98a7e7cfa,http://vision.ucsd.edu/sites/default/files/cwah_cvpr2013_unfamiliar.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a779.pdf
+5d88702cdc879396b8b2cc674e233895de99666b,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Liu_Exploiting_Feature_Hierarchies_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.44
+5d44c675addcb6e74cbc5a9c48df0d754bdbcd98,http://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf,,,http://www.waset.org/journals/waset/v63/v63-92.pdf
+5d9f468a2841ea2f27bbe3ef2c6fe531d444be68,,,https://doi.org/10.1109/GlobalSIP.2017.8309167,
+5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e,http://pdfs.semanticscholar.org/5d5c/d6fa5c41eb9d3d2bab3359b3e5eb60ae194e.pdf,,,http://www.ehu.eus/ccwintco/uploads/e/eb/PFC-IonMarques.pdf
+5d09d5257139b563bd3149cfd5e6f9eae3c34776,http://pdfs.semanticscholar.org/5d09/d5257139b563bd3149cfd5e6f9eae3c34776.pdf,,,https://www.researchgate.net/profile/Victor_Diaz-Ramirez/publication/267511923_Pattern_recognition_with_composite_correlation_filters_designed_with_multi-objective_combinatorial_optimization/links/5451ab3e0cf2bf864cba99e0.pdf
+5d479f77ecccfac9f47d91544fd67df642dfab3c,http://pdfs.semanticscholar.org/7880/c21bb0de02cd4db095e011ac7aff47b35ee8.pdf,,https://doi.org/10.1007/978-3-319-10590-1_7,http://cs.stanford.edu/~pliang/papers/linking-eccv2014.pdf
+5d01283474b73a46d80745ad0cc0c4da14aae194,http://pdfs.semanticscholar.org/5d01/283474b73a46d80745ad0cc0c4da14aae194.pdf,,https://doi.org/10.1016/j.jvcir.2015.08.005,http://homepages.dcc.ufmg.br/~william/papers/paper_2015_JVCI.pdf
+5ddfd3d372f7679518db8fd763d5f8bc5899ed67,,,https://doi.org/10.1109/ICPR.2014.797,
+5d197c8cd34473eb6cde6b65ced1be82a3a1ed14,http://cdn.intechopen.com/pdfs/20590/InTech-A_face_image_database_for_evaluating_out_of_focus_blur.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ISDA.2008.300
+5df376748fe5ccd87a724ef31d4fdb579dab693f,http://pdfs.semanticscholar.org/5df3/76748fe5ccd87a724ef31d4fdb579dab693f.pdf,,,https://hotsoft.carleton.ca/hotsoft/wp-content/uploads/2015/06/submission_49447_UPDATE.pdf
+31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a,http://pdfs.semanticscholar.org/31aa/20911cc7a2b556e7d273f0bdd5a2f0671e0a.pdf,,,https://arxiv.org/pdf/1804.01417v1.pdf
+31ba7f5e09a2f0fe9cf7ea95314723206dcb6059,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.300
+3150e329e01be31ba08b6d76fc46b0da88a5ddeb,,,,http://doi.acm.org/10.1145/2927006.2927012
+31b05f65405534a696a847dd19c621b7b8588263,https://arxiv.org/pdf/1611.01484v1.pdf,,https://doi.org/10.1109/BTAS.2017.8272731,https://arxiv.org/pdf/1611.01484v2.pdf
+31625522950e82ad4dffef7ed0df00fdd2401436,http://pdfs.semanticscholar.org/3162/5522950e82ad4dffef7ed0df00fdd2401436.pdf,,https://doi.org/10.1007/978-3-319-49409-8_3,https://arxiv.org/pdf/1608.08395v1.pdf
+3167f415a861f19747ab5e749e78000179d685bc,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICCV_2009/contents/pdf/iccv2009_131.pdf,,https://doi.org/10.1109/ICCV.2009.5459371,
+3107316f243233d45e3c7e5972517d1ed4991f91,https://arxiv.org/pdf/1703.10155v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.299
+3168e52567d564f0871c3f9ed7757dae9d66c55a,,,,
+31c0968fb5f587918f1c49bf7fa51453b3e89cf7,http://pdfs.semanticscholar.org/31c0/968fb5f587918f1c49bf7fa51453b3e89cf7.pdf,,,https://arxiv.org/pdf/1611.05244v2.pdf
+310fe4e6cb6d090f7817de4c1034e35567b56e34,,,https://doi.org/10.1109/ICPR.2014.313,
+31e57fa83ac60c03d884774d2b515813493977b9,http://pdfs.semanticscholar.org/31e5/7fa83ac60c03d884774d2b515813493977b9.pdf,,https://doi.org/10.1016/j.patrec.2017.12.010,https://arxiv.org/pdf/1703.01597v1.pdf
+31a2fb63a3fc67da9932474cda078c9ac43f85c5,http://www.researchgate.net/profile/Sadeep_Jayasumana2/publication/269040853_Kernel_Methods_on_Riemannian_Manifolds_with_Gaussian_RBF_Kernels/links/54858a6a0cf283750c37264b.pdf,,,https://arxiv.org/pdf/1412.0265v2.pdf
+3137a3fedf23717c411483c7b4bd2ed646258401,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_iccv_13.pdf,,,http://lrs.icg.tugraz.at/pubs/koestinger_iccv_13.pdf
+31c34a5b42a640b824fa4e3d6187e3675226143e,http://pdfs.semanticscholar.org/31c3/4a5b42a640b824fa4e3d6187e3675226143e.pdf,,,http://dl.acm.org/citation.cfm?id=2616112
+310dcf9edb491b63d09a9eb55a99ad6bb46da1d4,,,,
+316e67550fbf0ba54f103b5924e6537712f06bee,http://lear.inrialpes.fr/pubs/2010/GVS10/slides.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5540120
+31697737707d7f661cbc6785b76cf9a79fee3ccd,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.100
+31ef5419e026ef57ff20de537d82fe3cfa9ee741,http://pdfs.semanticscholar.org/9a10/78b6e3810c95fc4b87154ad62c0f133caebb.pdf,,https://doi.org/10.1007/978-3-319-16181-5_10,http://www.professeurs.polymtl.ca/christopher.pal/eccv2014/eccv2014_LBP.pdf
+310da8bd81c963bd510bf9aaa4d028a643555c84,http://www.cs.sunysb.edu/~ial/content/papers/2005/Zhang2005cvpr2.pdf,,,http://www3.cs.stonybrook.edu/~cvl/content/papers/2005/Zhang2005cvpr2.pdf
+31146bd416626d2bf912e0a0d12ca619fb49011b,,,,
+31b58ced31f22eab10bd3ee2d9174e7c14c27c01,http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf,,,http://people.csail.mit.edu/torralba/publications/5papers/Torralba2008.pdf
+31a36014354ee7c89aa6d94e656db77922b180a5,,,,http://doi.acm.org/10.1145/2304496.2304509
+31835472821c7e3090abb42e57c38f7043dc3636,http://pdfs.semanticscholar.org/3183/5472821c7e3090abb42e57c38f7043dc3636.pdf,,https://doi.org/10.1007/978-3-642-33885-4_20,http://www.maths.lth.se/vision/publdb/reports/pdf/ardo-nilsson-etal-3wartemis-12.pdf
+31a38fd2d9d4f34d2b54318021209fe5565b8f7f,http://www.umiacs.umd.edu/~huytho/papers/HoChellappa_TIP2013.pdf,,https://doi.org/10.1109/TIP.2012.2233489,
+31aa7c992692b74f17ddec665cd862faaeafd673,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221657297_Unsupervised_face_annotation_by_mining_the_web/links/0912f510a04034844d000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2008.47
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,http://pdfs.semanticscholar.org/3152/e89963b8a4028c4abf6e1dc19e91c4c5a8f4.pdf,,,https://arxiv.org/pdf/1801.03261v1.pdf
+318a81acdd15a0ab2f706b5f53ee9d4d5d86237f,http://pdfs.semanticscholar.org/318a/81acdd15a0ab2f706b5f53ee9d4d5d86237f.pdf,,https://doi.org/10.1002/widm.1139,https://www.researchgate.net/profile/Sebastian_Ventura/publication/267154292_Multilabel_Learning_A_Review_of_the_State_of_The_Art_and_Ongoing_Research/links/54a6bb5f0cf257a6360a918e.pdf
+31ffc95167a2010ce7aab23db7d5fc7ec439f5fb,,,https://doi.org/10.1109/TNNLS.2017.2651169,
+31ace8c9d0e4550a233b904a0e2aabefcc90b0e3,http://pdfs.semanticscholar.org/31ac/e8c9d0e4550a233b904a0e2aabefcc90b0e3.pdf,,,https://arxiv.org/pdf/1403.2802v1.pdf
+31bf8d7f5d373a2dece747448306e2228be51016,,,,
+316d51aaa37891d730ffded7b9d42946abea837f,http://pdfs.semanticscholar.org/9f00/3a5e727b99f792e600b93b6458b9cda3f0a5.pdf,,,http://cbcl.mit.edu/publications/ps/LiaoLeiboPoggio_SfN2014.pdf
+31afdb6fa95ded37e5871587df38976fdb8c0d67,http://www3.ntu.edu.sg/home/EXDJiang/ICASSP15.pdf,,https://doi.org/10.1109/ICASSP.2015.7178221,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2015/Quantized%20Fuzzy%20LBP%20for%20Face%20Recognition.pdf
+31d60b2af2c0e172c1a6a124718e99075818c408,http://pdfs.semanticscholar.org/31d6/0b2af2c0e172c1a6a124718e99075818c408.pdf,,https://doi.org/10.20965/jaciii.2012.p0341,http://www.laszlojeni.com/pub/articles/Jeni12JACIII.pdf
+31f1e711fcf82c855f27396f181bf5e565a2f58d,http://www.rci.rutgers.edu/~vmp93/Conference_pub/Age_iccv2015.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Ranjan_Unconstrained_Age_Estimation_ICCV_2015_paper.pdf
+312afff739d1e0fcd3410adf78be1c66b3480396,http://pdfs.semanticscholar.org/312a/fff739d1e0fcd3410adf78be1c66b3480396.pdf,,,https://arxiv.org/pdf/1801.02480v1.pdf
+315a90543d60a5b6c5d1716fe9076736f0e90d24,https://www.computer.org/web/csdl/index/-/csdl/proceedings/fg/2013/5545/00/06553721.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553721
+3107085973617bbfc434c6cb82c87f2a952021b7,http://pdfs.semanticscholar.org/cee6/6bd89d1e25355e78573220adcd017a2d97d8.pdf,,,https://arxiv.org/pdf/1707.07213v2.pdf
+31ba9d0bfaa2a44bae039e5625eb580afd962892,,,https://doi.org/10.1016/j.cviu.2016.03.014,
+31182c5ffc8c5d8772b6db01ec98144cd6e4e897,http://pdfs.semanticscholar.org/3118/2c5ffc8c5d8772b6db01ec98144cd6e4e897.pdf,,,https://arxiv.org/pdf/1801.01089v1.pdf
+314c4c95694ff12b3419733db387476346969932,,,,http://dl.acm.org/citation.cfm?id=3007672
+31003ba1cf9f77ec5b7038996d2ce999fa04d0ea,,,,
+31bb49ba7df94b88add9e3c2db72a4a98927bb05,http://pdfs.semanticscholar.org/31bb/49ba7df94b88add9e3c2db72a4a98927bb05.pdf,,https://doi.org/10.1016/j.imavis.2012.06.005,https://ibug.doc.ic.ac.uk/media/uploads/documents/sandbach2012survey.pdf
+3146fabd5631a7d1387327918b184103d06c2211,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Jeni_Person-Independent_3D_Gaze_CVPR_2016_paper.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/3D-Gaze.pdf
+31f905d40a4ac3c16c91d5be8427762fa91277f1,,,https://doi.org/10.1109/TIP.2017.2704661,
+91811203c2511e919b047ebc86edad87d985a4fa,http://pdfs.semanticscholar.org/9181/1203c2511e919b047ebc86edad87d985a4fa.pdf,,,http://sharif.edu/~hoda/paper_2D.pdf
+91167aceafbc9c1560381b33c8adbc32a417231b,,,https://doi.org/10.1109/TCSVT.2009.2020337,
+915ff2bedfa0b73eded2e2e08b17f861c0e82a58,,,https://doi.org/10.1109/UEMCON.2017.8249000,
+910524c0d0fe062bf806bb545627bf2c9a236a03,http://pdfs.semanticscholar.org/9105/24c0d0fe062bf806bb545627bf2c9a236a03.pdf,,,http://isgwww.cs.uni-magdeburg.de/bv/theses/thesis_chaudhry.pdf
+9117fd5695582961a456bd72b157d4386ca6a174,http://pdfs.semanticscholar.org/9117/fd5695582961a456bd72b157d4386ca6a174.pdf,,,http://www.eee.hku.hk/optima/pub/conference/1509_ISTb.pdf
+91df860368cbcebebd83d59ae1670c0f47de171d,http://pdfs.semanticscholar.org/91df/860368cbcebebd83d59ae1670c0f47de171d.pdf,,https://doi.org/10.1007/978-3-319-46466-4_6,http://cs.brown.edu/~gen/website_imgs/cocottributes_eccv2016.pdf
+91067f298e1ece33c47df65236853704f6700a0b,http://pdfs.semanticscholar.org/9106/7f298e1ece33c47df65236853704f6700a0b.pdf,,,http://www.ijste.org/articles/IJSTEV2I11323.pdf
+916ad644614cccae728c8a12c089f01af62fb12e,,,,
+91a1945b9c40af4944a6cdcfe59a0999de4f650a,http://ccbr2017.org/ccbr%20PPT/95%E5%8F%B7%E8%AE%BA%E6%96%87-%E7%94%B3%E6%99%9A%E9%9C%9E%20wanxiahen-ccbr.pdf,,https://doi.org/10.1007/978-3-319-69923-3_10,
+919bdc161485615d5ee571b1585c1eb0539822c8,,http://ieeexplore.ieee.org/document/6460332/,,
+919d3067bce76009ce07b070a13728f549ebba49,http://pdfs.semanticscholar.org/919d/3067bce76009ce07b070a13728f549ebba49.pdf,,,http://www.ijsrp.org/research-paper-0614/ijsrp-p30113.pdf
+9110c589c6e78daf4affd8e318d843dc750fb71a,http://pdfs.semanticscholar.org/9110/c589c6e78daf4affd8e318d843dc750fb71a.pdf,,,http://www1.se.cuhk.edu.hk/~hccl/publications/pub/Facial%20Express%20Synthesis%20Based%20on%20Emotion%20Dimensions%20for%20Affective%20Talking%20Avatar.pdf
+916fbe5e8bec5e7757eeb9d452385db320204ee0,,,,
+9101363521de0ec1cf50349da701996e4d1148c8,,,,http://doi.ieeecomputersociety.org/10.1109/ICIAP.2007.28
+91e57667b6fad7a996b24367119f4b22b6892eca,http://pdfs.semanticscholar.org/91e5/7667b6fad7a996b24367119f4b22b6892eca.pdf,,,http://www.researchgate.net/profile/Marco_Morana2/publication/221355885_Probabilistic_Corner_Detection_for_Facial_Feature_Extraction/links/546bae460cf2397f7831c681.pdf
+91883dabc11245e393786d85941fb99a6248c1fb,http://pdfs.semanticscholar.org/9188/3dabc11245e393786d85941fb99a6248c1fb.pdf,,https://doi.org/10.1016/j.cviu.2017.08.008,http://arxiv.org/abs/1608.04188
+919cb6160db66a8fe0b84cb7f171aded48a13632,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2327978
+91b1a59b9e0e7f4db0828bf36654b84ba53b0557,http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/ECE/MTech%20DSP%202015-16/MTech%20DSP%20BasePaper%202015-16/50.pdf,,https://doi.org/10.1109/TCSVT.2015.2400772,
+9166f46aa3e58befaefd3537e5a11b31ebeea4d0,,,https://doi.org/10.1109/ICIP.2015.7351505,
+919d0e681c4ef687bf0b89fe7c0615221e9a1d30,http://pdfs.semanticscholar.org/919d/0e681c4ef687bf0b89fe7c0615221e9a1d30.pdf,,,http://eprints.qut.edu.au/16289/1/Hossein_Ebrahimpour-Komleh_Thesis.pdf
+91d0e8610348ef4d5d4975e6de99bb2d429af778,,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.61
+913961d716a4102d3428224f999295f12438399f,,,https://doi.org/10.1016/j.patcog.2014.01.016,
+912a6a97af390d009773452814a401e258b77640,http://pdfs.semanticscholar.org/912a/6a97af390d009773452814a401e258b77640.pdf,,https://doi.org/10.1016/j.cviu.2016.07.006,http://arxiv.org/abs/1509.01520
+913062218c7498b2617bb9d7821fe1201659c5cc,,,https://doi.org/10.1109/ICMLA.2012.178,
+91d513af1f667f64c9afc55ea1f45b0be7ba08d4,http://pdfs.semanticscholar.org/91d5/13af1f667f64c9afc55ea1f45b0be7ba08d4.pdf,,,https://arxiv.org/pdf/1706.09887v1.pdf
+91e507d2d8375bf474f6ffa87788aa3e742333ce,http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf,,https://doi.org/10.1007/978-3-642-15549-9_20,https://www.researchgate.net/profile/Gee-Sern_Hsu/publication/221304002_Robust_Face_Recognition_Using_Probabilistic_Facial_Trait_Code/links/09e4150f7d5c0d3ae0000000.pdf
+918b72a47b7f378bde0ba29c908babf6dab6f833,http://pdfs.semanticscholar.org/918b/72a47b7f378bde0ba29c908babf6dab6f833.pdf,,https://doi.org/10.1016/j.patrec.2010.11.008,http://www.researchgate.net/profile/Leihong_Zhang/publication/220646480_Uncorrelated_trace_ratio_linear_discriminant_analysis_for_undersampled_problems/links/02e7e52abf94f7b268000000.pdf
+918fc4c77a436b8a588f63b2b37420b7868fbbf8,,,https://doi.org/10.1016/j.inffus.2015.03.005,
+91e58c39608c6eb97b314b0c581ddaf7daac075e,http://pdfs.semanticscholar.org/91e5/8c39608c6eb97b314b0c581ddaf7daac075e.pdf,,,http://arxiv.org/abs/1702.00307
+91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0,http://pdfs.semanticscholar.org/94c3/624c54f8f070a9dc82a41cbf7a888fe8f477.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/396.pdf
+91835984eaeb538606972de47c372c5fcfe8b6aa,http://www.cse.ust.hk/~qnature/pdf/IEEESMC2015.pdf,,https://doi.org/10.1109/SMC.2015.385,
+9103148dd87e6ff9fba28509f3b265e1873166c9,http://pdfs.semanticscholar.org/9103/148dd87e6ff9fba28509f3b265e1873166c9.pdf,,,http://epubs.surrey.ac.uk/808011/1/thesis.pdf
+915d4a0fb523249ecbc88eb62cb150a60cf60fa0,http://pdfs.semanticscholar.org/915d/4a0fb523249ecbc88eb62cb150a60cf60fa0.pdf,,,http://audias.ii.uam.es/files/2000_carnahan_face_scruz.pdf
+65126e0b1161fc8212643b8ff39c1d71d262fbc1,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ghiasi_Occlusion_Coherence_Localizing_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.306
+65b737e5cc4a565011a895c460ed8fd07b333600,http://pdfs.semanticscholar.org/7574/f999d2325803f88c4915ba8f304cccc232d1.pdf,,,https://pdfs.semanticscholar.org/7574/f999d2325803f88c4915ba8f304cccc232d1.pdf
+6582f4ec2815d2106957215ca2fa298396dde274,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2007-PAMI-face-sets.pdf,,,http://svr-www.eng.cam.ac.uk/~tkk22/doc/tpami07_final.pdf
+65b1760d9b1541241c6c0222cc4ee9df078b593a,http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf,,,http://parnec.nuaa.edu.cn/xtan/paper/xtan-cvpr09.pdf
+655e94eccddbe1b1662432c1237e61cf13a7d57b,,,,http://doi.ieeecomputersociety.org/10.1109/ISIP.2008.147
+65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220,http://pdfs.semanticscholar.org/65d7/f95fcbabcc3cdafc0ad38e81d1f473bb6220.pdf,,,http://worldcomp-proceedings.com/proc/p2013/IKE3410.pdf
+65bba9fba03e420c96ec432a2a82521ddd848c09,http://pdfs.semanticscholar.org/65bb/a9fba03e420c96ec432a2a82521ddd848c09.pdf,,https://doi.org/10.1007/978-3-319-46493-0_9,http://arxiv.org/abs/1607.08584
+6554ca3187b3cbe5d1221592eb546dfc11aac14b,,,,http://doi.acm.org/10.1145/2501643.2501647
+655d9ba828eeff47c600240e0327c3102b9aba7c,http://cs.gmu.edu/~carlotta/publications/kpools.pdf,,https://doi.org/10.1109/TSMCB.2005.846641,http://cs.gmu.edu/~carlotta/publications/zhang-final.pdf
+656a59954de3c9fcf82ffcef926af6ade2f3fdb5,http://pdfs.semanticscholar.org/656a/59954de3c9fcf82ffcef926af6ade2f3fdb5.pdf,,,https://kth.diva-portal.org/smash/get/diva2:1054887/FULLTEXT02.pdf
+65475ce4430fb524675ebab6bcb570dfa07e0041,,,https://doi.org/10.1109/ISR.2013.6695696,
+652aac54a3caf6570b1c10c993a5af7fa2ef31ff,http://pdfs.semanticscholar.org/652a/ac54a3caf6570b1c10c993a5af7fa2ef31ff.pdf,,,http://amp.ece.cmu.edu/Publication/Deepak/thesis.pdf
+656ef752b363a24f84cc1aeba91e4fa3d5dd66ba,http://pdfs.semanticscholar.org/656e/f752b363a24f84cc1aeba91e4fa3d5dd66ba.pdf,,https://doi.org/10.1007/978-3-642-15986-2_40,http://fipa.cs.kit.edu/397.php
+656aeb92e4f0e280576cbac57d4abbfe6f9439ea,http://pdfs.semanticscholar.org/656a/eb92e4f0e280576cbac57d4abbfe6f9439ea.pdf,,,http://jestec.taylors.edu.my/Vol%2012%20issue%201%20January%202017/12_1_12.pdf
+6502cf30c088c6c7c4b2a05b7777b032c9dde7cd,http://vipl.ict.ac.cn/homepage/CVPR15Metric/ref/Learning%20compact%20binary%20face%20descriptor%20for%20face%20recognition_PAMI2015.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2408359
+6577c76395896dd4d352f7b1ee8b705b1a45fa90,http://ai.stanford.edu/~kdtang/papers/icip10_kinship.pdf,,https://doi.org/10.1109/ICIP.2010.5652590,http://users.cis.fiu.edu/~rfang/publications/ICIP10_Kinship.pdf
+65869cc5ef00d581c637ae8ea6ca02ae4bb2b996,,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2007.65
+659dc6aa517645a118b79f0f0273e46ab7b53cd9,,,https://doi.org/10.1109/ACPR.2015.7486608,
+659db2ceb304984a23f883ee5414168131c3567d,,,,
+650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772,http://pdfs.semanticscholar.org/650b/fe7acc3f03eb4ba91d9f93da8ef0ae8ba772.pdf,,,http://www.wseas.us/e-library/conferences/2013/Budapest/IPASRE/IPASRE-11.pdf
+6592dcd17fc4df707020904cf5ff0927684f9f23,,,,
+65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf,,,http://www.site.uottawa.ca/~laganier/publications/thesis/MEMP_Thesis.pdf
+65fc8393610fceec665726fe4e48f00dc90f55fb,,,https://doi.org/10.1109/CYBConf.2013.6617455,
+65817963194702f059bae07eadbf6486f18f4a0a,http://arxiv.org/pdf/1505.04141v2.pdf,,https://doi.org/10.1007/s11263-015-0814-0,https://arxiv.org/pdf/1505.04141v2.pdf
+65b9c71a4e5886e3ec8ff1f26038c3c08bd96dcb,,,,
+6581c5b17db7006f4cc3575d04bfc6546854a785,http://pdfs.semanticscholar.org/6581/c5b17db7006f4cc3575d04bfc6546854a785.pdf,,,https://publikationen.bibliothek.kit.edu/1000047232/3527119
+6515fe829d0b31a5e1f4dc2970a78684237f6edb,http://pdfs.semanticscholar.org/6515/fe829d0b31a5e1f4dc2970a78684237f6edb.pdf,,https://doi.org/10.1007/978-3-540-88690-7_13,http://www.ecse.rpi.edu/homepages/qji/Papers/eccv_BN_learning.pdf
+65f25a28629ecfe8bae42a33883a8b9ab3c7d047,,,,
+62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4,http://pdfs.semanticscholar.org/62d1/a31b8acd2141d3a994f2d2ec7a3baf0e6dc4.pdf,,https://doi.org/10.1186/s13640-017-0188-z,https://jivp-eurasipjournals.springeropen.com/track/pdf/10.1186/s13640-017-0188-z?site=jivp-eurasipjournals.springeropen.com
+6256b47342f080c62acd106095cf164df2be6020,,,https://doi.org/10.1007/978-3-319-24702-1_6,
+62694828c716af44c300f9ec0c3236e98770d7cf,http://pdfs.semanticscholar.org/6269/4828c716af44c300f9ec0c3236e98770d7cf.pdf,,,http://www.ifets.info/journals/19_2/7.pdf
+62648f91e38b0e8f69dded13b9858bd3a86bb6ed,,,,http://doi.acm.org/10.1145/2647868.2655016
+6261eb75066f779e75b02209fbd3d0f02d3e1e45,http://pdfs.semanticscholar.org/6261/eb75066f779e75b02209fbd3d0f02d3e1e45.pdf,,,http://yugangjiang.info/publication/MediaEval2015-Fudan-Huawei.pdf
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,http://pdfs.semanticscholar.org/af52/4ffcedaa50cff30607e6ad8e270ad0d7bf71.pdf,,,https://arxiv.org/pdf/1303.4778v2.pdf
+62c2d21f78fb89a11b436ab6ca9acd9abca145be,,,,
+62f0d8446adee6a5e8102053a63a61af07ac4098,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C072_yamashita2015.pdf,,https://doi.org/10.1109/ICIP.2015.7351298,
+622c84d79a9420ed6f3a78f29233d56b1e99cc21,,,,
+62f60039a95692baaeaae79a013c7f545e2a6c3d,http://www.researchgate.net/profile/G_Boato/publication/242336498_Identify_computer_generated_characters_by_analysing_facial_expressions_variation/links/0f3175360a34547478000000.pdf,,https://doi.org/10.1109/WIFS.2012.6412658,
+62374b9e0e814e672db75c2c00f0023f58ef442c,http://pdfs.semanticscholar.org/6237/4b9e0e814e672db75c2c00f0023f58ef442c.pdf,,,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/Kotropoulos00c/Kotropoulos00c.ps.Z
+6257a622ed6bd1b8759ae837b50580657e676192,http://pdfs.semanticscholar.org/b8d8/501595f38974e001a66752dc7098db13dfec.pdf,,,http://arxiv.org/abs/1711.09265
+62c2f898fe70c2c7ee2435cbe837be18184431d4,,,,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,http://pdfs.semanticscholar.org/6226/f2ea345f5f4716ac4ddca6715a47162d5b92.pdf,,https://doi.org/10.3389/frobt.2015.00029,
+62e913431bcef5983955e9ca160b91bb19d9de42,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,,,http://arxiv.org/pdf/1511.04031v1.pdf
+628f9c1454b85ff528a60cd8e43ec7874cf17931,,,,http://doi.acm.org/10.1145/2993148.2993193
+62c435bc714f13a373926e3b1914786592ed1fef,http://assistech.iitd.ernet.in/mavi-embedded-device.pdf,,,http://doi.ieeecomputersociety.org/10.1109/VLSID.2017.38
+62e834114b58a58a2ea2d7b6dd7b0ce657a64317,,,https://doi.org/10.1109/SMC.2014.6973987,
+62415bbd69270e6577136ba7120f4a682251cdbb,,,,
+624e9d9d3d941bab6aaccdd93432fc45cac28d4b,https://arxiv.org/pdf/1505.00296v1.pdf,,https://doi.org/10.1109/CVPRW.2015.7301333,http://wanglimin.github.io/papers/WangWDQ_ChaLearnLAP15_slide.pdf
+620e1dbf88069408b008347cd563e16aeeebeb83,http://pdfs.semanticscholar.org/620e/1dbf88069408b008347cd563e16aeeebeb83.pdf,,https://doi.org/10.1016/j.future.2012.08.013,http://captcharesearch.com/media/1003/facedcaptcha.pdf
+62e61f9f7445e8dec336415ac0c7e677f9f5f7c1,,,https://doi.org/10.1142/S0219467814500065,
+624496296af19243d5f05e7505fd927db02fd0ce,http://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_cvpr_2014.pdf,,,http://eprints.eemcs.utwente.nl/25815/01/Pantic_Gauss-Newton_Deformable_Part_Models.pdf
+621741b87258c745f8905d15ba81aaf2a8be60d2,,,,
+621ed006945e9438910b5aa4f6214888dea3d791,http://figment.cse.usf.edu/~sfefilat/data/papers/ThAT9.20.pdf,,https://doi.org/10.1109/ICPR.2008.4761211,
+621ff353960d5d9320242f39f85921f72be69dc8,http://www.research.rutgers.edu/~xiangyu/paper/FG_2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553723
+62a30f1b149843860938de6dd6d1874954de24b7,http://mmlab.ie.cuhk.edu.hk/archive/2009/09_fast_algorithm.pdf,,https://doi.org/10.1109/TIFS.2009.2025844,
+621e8882c41cdaf03a2c4a986a6404f0272ba511,http://conradsanderson.id.au/pdfs/wong_ijcnn_2012.pdf,,https://doi.org/10.1109/IJCNN.2012.6252611,https://www.researchgate.net/profile/Brian_Lovell2/publication/261086998_On_robust_biometric_identity_verification_via_sparse_encoding_of_faces_Holistic_vs_local_approaches/links/54980c4f0cf2eeefc30f6410.pdf?origin=publication_list
+62e0380a86e92709fe2c64e6a71ed94d152c6643,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2012/Facial%20emotion%20recognition%20with%20expression%20energy12.pdf,,,http://doi.acm.org/10.1145/2388676.2388777
+621f656fedda378ceaa9c0096ebb1556a42e5e0f,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2016/07.19.17.24/doc/PID4367205.pdf?ibiurl.language=en,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2016.022
+6267dbeb54889be5bdb50c338a7c6ef82287084c,,,https://doi.org/10.1109/ICMLC.2010.5580567,
+963a004e208ce4bd26fa79a570af61d31651b3c3,,,https://doi.org/10.1016/j.jvlc.2009.01.011,
+965f8bb9a467ce9538dec6bef57438964976d6d9,http://www4.comp.polyu.edu.hk/~csajaykr/myhome/papers/ISBA2016.pdf,,https://doi.org/10.1109/ISBA.2016.7477243,
+96b6f8ac898c8ef6b947c50bb66fe6b1e6f2fb11,,,,
+961a5d5750f18e91e28a767b3cb234a77aac8305,http://pdfs.semanticscholar.org/961a/5d5750f18e91e28a767b3cb234a77aac8305.pdf,,https://doi.org/10.1007/978-3-319-10593-2_47,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8692/86920720.pdf
+9635493998ad60764d7bbf883351af57a668d159,,,https://doi.org/10.1109/IJCNN.2017.7966005,
+96a8f115df9e2c938453282feb7d7b9fde6f4f95,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2593719
+965c4a8087ae208c08e58aaf630ad412ac8ce6e2,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.100
+96b1f2bde46fe4f6cc637398a6a71e8454291a6e,,,https://doi.org/10.1109/TIP.2010.2073476,
+9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c,http://pdfs.semanticscholar.org/9626/bcb3fc7c7df2c5a423ae8d0a046b2f69180c.pdf,,,http://uu.diva-portal.org/smash/get/diva2:1157319/FULLTEXT01.pdf
+968b983fa9967ff82e0798a5967920188a3590a8,http://pdfs.semanticscholar.org/968b/983fa9967ff82e0798a5967920188a3590a8.pdf,,,https://www2.bc.edu/~russeljm/publications/Children's%20recognition%20of%20disgust%20in%20others.pdf
+969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,http://pdfs.semanticscholar.org/969f/d48e1a668ab5d3c6a80a3d2aeab77067c6ce.pdf,,,http://arxiv.org/abs/1703.10818
+96faccdddef887673d6007fed8ff2574580cae1f,http://pdfs.semanticscholar.org/96fa/ccdddef887673d6007fed8ff2574580cae1f.pdf,,,https://arxiv.org/pdf/1703.09145v1.pdf
+96ec76d2579a3b877019e715da58d8c47d343399,,,,
+96fbadc5fa1393d59ce0b8fd3d71aebc1fe35b40,,,https://doi.org/10.1109/ICIP.2016.7532959,
+961939e96eed6620b1752721ab520745ac5329c6,http://www.cs.umd.edu/~gaurav/research/frgcWorkshop.pdf,,,http://www.umiacs.umd.edu/~soma/pdf/frgc_umd_data_cvpr05ws.pdf
+966cf4ca224e239a7192f9e79b60cc88aa604e27,,,,
+96ccd94151a348c9829ab1d943cb13e9e933952f,,,,
+960ad662c2bb454d69006492cc3f52d1550de55d,http://www.research.att.com/~yifanhu/PUB/gmap_cga.pdf,,,http://doi.ieeecomputersociety.org/10.1109/MCG.2010.101
+9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4,http://pdfs.semanticscholar.org/9696/b172d66e402a2e9d0a8d2b3f204ad8b98cc4.pdf,,https://doi.org/10.3745/JIPS.2013.9.1.173,https://pdfs.semanticscholar.org/9696/b172d66e402a2e9d0a8d2b3f204ad8b98cc4.pdf
+964a3196d44f0fefa7de3403849d22bbafa73886,http://pdfs.semanticscholar.org/964a/3196d44f0fefa7de3403849d22bbafa73886.pdf,,https://doi.org/10.1016/j.neucom.2015.05.079,http://www.escience.cn/system/download/82452
+96f4a1dd1146064d1586ebe86293d02e8480d181,http://pdfs.semanticscholar.org/96f4/a1dd1146064d1586ebe86293d02e8480d181.pdf,,,http://www.ijates.com/images/short_pdf/1457757107_1029B.pdf
+9652f154f4ae7807bdaff32d3222cc0c485a6762,,,https://doi.org/10.1007/s00138-016-0760-z,
+9606b1c88b891d433927b1f841dce44b8d3af066,http://pdfs.semanticscholar.org/9606/b1c88b891d433927b1f841dce44b8d3af066.pdf,,,https://arxiv.org/pdf/1803.05026v1.pdf
+966e36f15b05ef8436afecf57a97b73d6dcada94,http://pdfs.semanticscholar.org/966e/36f15b05ef8436afecf57a97b73d6dcada94.pdf,,,http://mediatum.ub.tum.de/doc/1243842/284129.pdf
+96d34c1a749e74af0050004162d9dc5132098a79,,,https://doi.org/10.1109/TNN.2005.844909,
+962812d28a169b3fc1d4323f8d0fca69a22dac4c,,,,
+969dd8bc1179c047523d257516ade5d831d701ad,http://pdfs.semanticscholar.org/969d/d8bc1179c047523d257516ade5d831d701ad.pdf,,https://doi.org/10.1016/j.patcog.2017.01.011,http://liusi-group.com/pdf/faceverification-pr-2017.pdf
+96578785836d7416bf2e9c154f687eed8f93b1e4,http://pdfs.semanticscholar.org/9657/8785836d7416bf2e9c154f687eed8f93b1e4.pdf,,,https://www.cbica.upenn.edu/sbia/papers/503.pdf
+965f3a60a762712c3fc040724e507d00357f8709,,,,
+96ab0367d0112b6092cc130c330c8c11c2eb8238,,,,
+96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,http://megaface.cs.washington.edu/KemelmacherMegaFaceCVPR16.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Kemelmacher-Shlizerman_The_MegaFace_Benchmark_CVPR_2016_paper.pdf
+968f472477a8afbadb5d92ff1b9c7fdc89f0c009,http://pdfs.semanticscholar.org/968f/472477a8afbadb5d92ff1b9c7fdc89f0c009.pdf,,,http://www.ifaamas.org/Proceedings/aamas2017/pdfs/p1643.pdf
+96e731e82b817c95d4ce48b9e6b08d2394937cf8,http://arxiv.org/pdf/1508.01722v2.pdf,,,http://www.rci.rutgers.edu/~vmp93/Conference_pub/WACV_2016_janus_DCNN.pdf
+9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc,http://pdfs.semanticscholar.org/9686/dcf40e6fdc4152f38bd12b929bcd4f3bbbcc.pdf,,,http://pnrsolution.org/Datacenter/Vol3/Issue1/95.pdf
+96e0b67f34208b85bd90aecffdb92bc5134befc8,,,https://doi.org/10.1016/j.patcog.2007.10.002,
+9636c7d3643fc598dacb83d71f199f1d2cc34415,http://pdfs.semanticscholar.org/9636/c7d3643fc598dacb83d71f199f1d2cc34415.pdf,,https://doi.org/10.1016/j.patrec.2015.05.005,http://web.ing.puc.cl/~dmery/Prints/ISI-Journals/2015-PLR.pdf
+3abe50d0a806a9f5a5626f60f590632a6d87f0c4,http://vis.uky.edu/~gravity/publications/2008/Estimating_Xinyu.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2008.4563129
+3af8d38469fb21368ee947d53746ea68cd64eeae,http://pdfs.semanticscholar.org/3af8/d38469fb21368ee947d53746ea68cd64eeae.pdf,,,http://www.ifaamas.org/Proceedings/aamas2013/docs/p1461.pdf
+3a2fc58222870d8bed62442c00341e8c0a39ec87,http://pdfs.semanticscholar.org/3a2f/c58222870d8bed62442c00341e8c0a39ec87.pdf,,,http://www.cs.technion.ac.il/users/wwwb/cgi-bin/tr-get.cgi/2014/MSC/MSC-2014-02.pdf
+3a9fbd05aaab081189a8eea6f23ed730fa6db03c,,,https://doi.org/10.1109/ICASSP.2013.6638305,
+3a76e9fc2e89bdd10a9818f7249fbf61d216efc4,http://openaccess.thecvf.com/content_ICCV_2017/papers/Nagpal_Face_Sketch_Matching_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.579
+3a92de0a4a0ef4f88e1647633f1fbb13cd6a3c95,http://impca.cs.curtin.edu.au/pubs/2007/conferences/an_liu_venkatesh_cvpr07.pdf,,,http://www.impca.cs.curtin.edu.au/pubs/2007/conferences/an_liu_venkatesh_cvpr07.pdf
+3a0ea368d7606030a94eb5527a12e6789f727994,http://pdfs.semanticscholar.org/c7ca/eb8ecb6a38bdd65ddd25aca4fdd79203ddef.pdf,,,http://www.bheisele.com/nips2001.pdf
+3a804cbf004f6d4e0b041873290ac8e07082b61f,http://pdfs.semanticscholar.org/5ce8/e665a6512c09f15d8528ce6bece1f6a4d138.pdf,,,http://www.umiacs.umd.edu/~hal/docs/daume11robotic.pdf
+3a04eb72aa64760dccd73e68a3b2301822e4cdc3,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Peng_Scalable_Sparse_Subspace_2013_CVPR_paper.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a430.pdf
+3ab036b680e8408ec74f78a918f3ffbf6c906d70,,,,
+3af130e2fd41143d5fc49503830bbd7bafd01f8b,http://pdfs.semanticscholar.org/db76/002794c12e5febc30510de58b54bb9344ea9.pdf,,,http://www.aaai.org/ocs/index.php/WS/AAAIW14/paper/download/8802/8329
+3a2cf589f5e11ca886417b72c2592975ff1d8472,http://pdfs.semanticscholar.org/3a2c/f589f5e11ca886417b72c2592975ff1d8472.pdf,,,http://www.ml.cmu.edu/research/dap-papers/F17/dap-wang-yijie.pdf
+3ada7640b1c525056e6fcd37eea26cd638815cd6,http://pdfs.semanticscholar.org/3ada/7640b1c525056e6fcd37eea26cd638815cd6.pdf,,,http://arxiv.org/abs/1411.2214
+3abc833f4d689f37cc8a28f47fb42e32deaa4b17,http://www.cs.virginia.edu/~vicente/files/ijcv_bigdata.pdf,,https://doi.org/10.1007/s11263-015-0840-y,http://www.cs.unc.edu/~vicente/files/ijcv_bigdata.pdf
+3aebaaf888cba25be25097173d0b3af73d9ce7f9,,,,http://doi.ieeecomputersociety.org/10.1109/MMUL.2014.49
+3acb6b3e3f09f528c88d5dd765fee6131de931ea,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2017/novelRepresentation.pdf,,https://doi.org/10.1109/ICIP.2017.8296393,
+3a05415356bd574cad1a9f1be21214e428bbc81b,,,,
+3a0796161d838f9dc51c0ee5f700e668fa206db3,,,,
+3a60678ad2b862fa7c27b11f04c93c010cc6c430,http://ibug.doc.ic.ac.uk/media/uploads/documents/taffcsi-2010-11-0112-2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2011.25
+3a591a9b5c6d4c62963d7374d58c1ae79e3a4039,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W04/papers/Artan_Driver_Cell_Phone_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.42
+3aa9c8c65ce63eb41580ba27d47babb1100df8a3,http://www.csb.uncw.edu/mscsis/complete/pdf/VandeventerJason_Final.pdf,,https://doi.org/10.1109/BTAS.2012.6374595,
+3a0a839012575ba455f2b84c2d043a35133285f9,http://pdfs.semanticscholar.org/76a1/dca3a9c2b0229c1b12c95752dcf40dc95a11.pdf,,,http://www.umiacs.umd.edu/~yzyang/paper/sengen_emnlp2011_final.pdf
+3af1a375c7c1decbcf5c3a29774e165cafce390c,https://www.cbica.upenn.edu/sbia/papers/540.pdf,,,http://www.researchgate.net/profile/Ruben_Gur/publication/232629150_Quantifying_Facial_Expression_Abnormality_in_Schizophrenia_by_Combining_2D_and_3D_Features/links/02e7e515c945fbbf9c000000.pdf
+3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e,http://www.wjscheirer.com/papers/wjs_cswb2010_grab.pdf,,https://doi.org/10.1109/CVPRW.2010.5544597,http://www.cse.lehigh.edu/~tboult/PAPERS/Sapkota-et-al-FACEGRAB-IEEE-Bioworkshop-2010.pdf
+3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Vahdat_Handling_Uncertain_Tags_2013_ICCV_paper.pdf,,,https://arash-vahdat.github.io/vahdat-iccv13a.pdf
+3a95eea0543cf05670e9ae28092a114e3dc3ab5c,https://arxiv.org/pdf/1209.0841v7.pdf,,https://doi.org/10.1109/TCYB.2016.2536752,http://www.pengxi.me/wp-content/uploads/Papers/2016-TCYB-L2graph.pdf
+3a1c40eced07d59a3ea7acda94fa833c493909c1,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.111
+3ad56aed164190e1124abea4a3c4e1e868b07dee,,,https://doi.org/10.1016/j.patcog.2015.12.016,
+3a0425c25beea6c4c546771adaf5d2ced4954e0d,,,,https://link.springer.com/book/10.1007/978-3-319-58347-1
+3a4f522fa9d2c37aeaed232b39fcbe1b64495134,http://ijireeice.com/upload/2016/may-16/IJIREEICE%20101.pdf,,https://doi.org/10.1109/TMM.2015.2420374,
+54bb25a213944b08298e4e2de54f2ddea890954a,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.250
+54bae57ed37ce50e859cbc4d94d70cc3a84189d5,http://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf,,https://doi.org/10.1109/ICASSP.2003.1202497,http://www.cse.cuhk.edu.hk/~lyu/paper_pdf/icassp03.pdf
+54f442c7fa4603f1814ebd8eba912a00dceb5cb2,http://pdfs.semanticscholar.org/54f4/42c7fa4603f1814ebd8eba912a00dceb5cb2.pdf,,,http://people.csail.mit.edu/finale/papers/finale_msc.pdf
+54058859a2ddf4ecfc0fe7ccbea7bb5f29d9201d,,,https://doi.org/10.1007/978-3-319-50832-0_36,
+54483d8b537e51317a8e6c6caf4949d4440c9368,,,,
+543f21d81bbea89f901dfcc01f4e332a9af6682d,http://pdfs.semanticscholar.org/543f/21d81bbea89f901dfcc01f4e332a9af6682d.pdf,,,https://arxiv.org/pdf/1511.06390v2.pdf
+5456166e3bfe78a353df988897ec0bd66cee937f,http://pdfs.semanticscholar.org/5456/166e3bfe78a353df988897ec0bd66cee937f.pdf,,,http://www.csc.kth.se/cvap/cvg/papers/prune_v1_2_camera_ready.pdf
+546b4a865af7e9493270ee2c8f644070b534019d,,,,
+541f1436c8ffef1118a0121088584ddbfd3a0a8a,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/A%20Spatio-Temporal%20Feature%20based%20on%20Triangulation%20of%20Dense%20SURF.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W14/papers/Nga_A_Spatio-temporal_Feature_2013_ICCV_paper.pdf
+548233d67f859491e50c5c343d7d77a7531d4221,,,https://doi.org/10.1007/s11042-007-0176-x,
+54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3,http://www.cs.toronto.edu/~vnair/iccv11.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126373
+5491478ae2c58af21389ed3af21babd362511a8e,,,,http://doi.acm.org/10.1145/2949035.2949048
+54e988bc0764073a5db2955705d4bfa8365b7fa9,,,,http://doi.acm.org/10.1145/2522848.2531749
+541bccf19086755f8b5f57fd15177dc49e77d675,http://pdfs.semanticscholar.org/541b/ccf19086755f8b5f57fd15177dc49e77d675.pdf,,,http://dspace.mit.edu/bitstream/handle/1721.1/37144/MIT-CSAIL-TR-2007-022.pdf?sequence=1
+5480aee1d01700bb98f5a0e06dd15bf36a4e45ea,,,,
+54966a5ac5a2aa19760fb5197889fa9dcccac1d1,,,,
+5495e224ac7b45b9edc5cfeabbb754d8a40a879b,http://pdfs.semanticscholar.org/5495/e224ac7b45b9edc5cfeabbb754d8a40a879b.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/supplemental/Peng_Reconstruction-Based_Disentanglement_for_ICCV_2017_supplemental.pdf
+54756f824befa3f0c2af404db0122f5b5bbf16e0,http://pdfs.semanticscholar.org/5475/6f824befa3f0c2af404db0122f5b5bbf16e0.pdf,,,http://www.cs.columbia.edu/~aberg/alex/aberg_research.pdf
+549c719c4429812dff4d02753d2db11dd490b2ae,http://openaccess.thecvf.com/content_cvpr_2017/papers/Real_YouTube-BoundingBoxes_A_Large_CVPR_2017_paper.pdf,,,https://arxiv.org/pdf/1702.00824v1.pdf
+98b2f21db344b8b9f7747feaf86f92558595990c,http://pdfs.semanticscholar.org/b9f0/29075a36f15202f0d213fe222dcf237fe65f.pdf,,,https://arxiv.org/pdf/1705.07904v1.pdf
+98142103c311b67eeca12127aad9229d56b4a9ff,http://pdfs.semanticscholar.org/9814/2103c311b67eeca12127aad9229d56b4a9ff.pdf,,,http://pubman.mpdl.mpg.de/pubman/item/escidoc:2460750:1/component/escidoc:2460749/arXiv:1704.08763.pdf
+9820920d4544173e97228cb4ab8b71ecf4548475,http://pdfs.semanticscholar.org/9820/920d4544173e97228cb4ab8b71ecf4548475.pdf,,,
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Wang_Recurrent_Face_Aging_CVPR_2016_paper.pdf
+982f5c625d6ad0dac25d7acbce4dabfb35dd7f23,http://pdfs.semanticscholar.org/982f/5c625d6ad0dac25d7acbce4dabfb35dd7f23.pdf,,,https://pdfs.semanticscholar.org/982f/5c625d6ad0dac25d7acbce4dabfb35dd7f23.pdf
+98af221afd64a23e82c40fd28d25210c352e41b7,http://pdfs.semanticscholar.org/d2fb/a31b394ea016b57f45bead77534fd8f7fbfa.pdf,,,http://www.isca-speech.org/archive/avsp10/papers/av10_P7.pdf
+9888ce5cb5cae8ba4f288806d126b1114e0a7f9b,,,,
+9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5,http://pdfs.semanticscholar.org/9893/865afdb1de55fdd21e5d86bbdb5daa5fa3d5.pdf,,https://doi.org/10.1007/3-540-44887-X_65,http://perso.telecom-paristech.fr/~chollet/Biblio/Articles/Domaines/BIOMET/Face/Kumar/IlluminLogAVBPA03.pdf
+988d1295ec32ce41d06e7cf928f14a3ee079a11e,http://pdfs.semanticscholar.org/988d/1295ec32ce41d06e7cf928f14a3ee079a11e.pdf,,,https://www.cs.uoregon.edu/Reports/ORAL-201509-Wang.pdf
+98a120802aef324599e8b9014decfeb2236a78a3,http://nyunetworks.com/Pubs/butler-chi16.pdf,,,http://doi.acm.org/10.1145/2851581.2892535
+98856ab9dc0eab6dccde514ab50c823684f0855c,,,https://doi.org/10.1109/TIFS.2012.2191962,
+98c548a4be0d3b62971e75259d7514feab14f884,http://pdfs.semanticscholar.org/98c5/48a4be0d3b62971e75259d7514feab14f884.pdf,,,https://arxiv.org/pdf/1703.07140v1.pdf
+9887ab220254859ffc7354d5189083a87c9bca6e,http://pdfs.semanticscholar.org/9887/ab220254859ffc7354d5189083a87c9bca6e.pdf,,,https://arxiv.org/pdf/1309.5594v2.pdf
+985cd420c00d2f53965faf63358e8c13d1951fa8,http://pdfs.semanticscholar.org/985c/d420c00d2f53965faf63358e8c13d1951fa8.pdf,,https://doi.org/10.1007/978-3-319-16817-3_5,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/704.pdf
+981449cdd5b820268c0876477419cba50d5d1316,http://pdfs.semanticscholar.org/9814/49cdd5b820268c0876477419cba50d5d1316.pdf,,,https://arxiv.org/pdf/1801.05365v1.pdf
+982ede05154c1afdcf6fc623ba45186a34f4b9f2,,,https://doi.org/10.1109/TMM.2017.2659221,
+9863dd1e2a3d3b4910a91176ac0f2fee5eb3b5e1,http://xm2vtsdb.ee.surrey.ac.uk/CVSSP/Publications/papers/kim-ieee-2006.pdf,,https://doi.org/10.1109/TCSVT.2006.881197,http://mi.eng.cam.ac.uk/~tkk22/doc/tcsvt06_pub.pdf
+982d4f1dee188f662a4b5616a045d69fc5c21b54,,,https://doi.org/10.1109/IJCNN.2016.7727859,
+9821669a989a3df9d598c1b4332d17ae8e35e294,http://pdfs.semanticscholar.org/9821/669a989a3df9d598c1b4332d17ae8e35e294.pdf,,https://doi.org/10.1007/978-3-642-33783-3_3,http://www.cs.tau.ac.il/~wolf/papers/minCorr.pdf
+9854145f2f64d52aac23c0301f4bb6657e32e562,http://www.ucsp.edu.pe/sibgrapi2013/eproceedings/technical/114953_2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2013.57
+985bbe1d47b843fa0b974b4db91be23f218d1ce7,,,https://doi.org/10.1007/978-3-319-68121-4,
+98c2053e0c31fab5bcb9ce5386335b647160cc09,https://smartech.gatech.edu/bitstream/handle/1853/45502/GT-CS-12-10.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDCSW.2013.44
+98127346920bdce9773aba6a2ffc8590b9558a4a,http://disi.unitn.it/~duta/pubs/MTAP2017_Duta.pdf,,https://doi.org/10.1007/s11042-017-4795-6,
+98a660c15c821ea6d49a61c5061cd88e26c18c65,http://pdfs.semanticscholar.org/98a6/60c15c821ea6d49a61c5061cd88e26c18c65.pdf,,,http://www.iosrjen.org/Papers/vol3_issue4%20(part-1)/F03414348.pdf
+982fed5c11e76dfef766ad9ff081bfa25e62415a,https://pdfs.semanticscholar.org/c7fa/d91ba4e33f64d584c928b1200327815f09e6.pdf,,https://doi.org/10.1109/TIP.2015.2409738,http://www.citi.sinica.edu.tw/papers/ycwang/4607-F.pdf
+988849863c3a45bcedacf8bd5beae3cc9210ce28,,,,http://doi.ieeecomputersociety.org/10.1109/TPDS.2016.2539164
+98c5dc00bd21a39df1d4411641329bdd6928de8a,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995447
+98fb3890c565f1d32049a524ec425ceda1da5c24,http://pdfs.semanticscholar.org/98fb/3890c565f1d32049a524ec425ceda1da5c24.pdf,,https://doi.org/10.1007/978-3-319-16631-5_46,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop15/pdffiles/w15-p3.pdf
+98519f3f615e7900578bc064a8fb4e5f429f3689,http://pdfs.semanticscholar.org/9851/9f3f615e7900578bc064a8fb4e5f429f3689.pdf,,https://doi.org/10.1007/978-1-4471-6296-4_13,https://www.cs.umd.edu/~qiu/pub/da_chapter.pdf
+9825aa96f204c335ec23c2b872855ce0c98f9046,http://pdfs.semanticscholar.org/9825/aa96f204c335ec23c2b872855ce0c98f9046.pdf,,,http://ijeee.in/wp-content/uploads/2014/05/jyoti-patil.pdf
+98218fa05a171a641435c154afa17bc99cf3375e,,,,
+980266ad6807531fea94252e8f2b771c20e173b3,http://pdfs.semanticscholar.org/9802/66ad6807531fea94252e8f2b771c20e173b3.pdf,,https://doi.org/10.1007/978-3-642-33786-4_19,http://humansensing.cs.cmu.edu/projects/contreg/contreg.pdf
+53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9,http://pdfs.semanticscholar.org/53d7/8c8dbac7c9be8eb148c6a9e1d672f1dd72f9.pdf,,,http://www.vision.caltech.edu/publications/phdthesis_holub.pdf
+5364e58ba1f4cdfcffb247c2421e8f56a75fad8d,,,https://doi.org/10.1109/VCIP.2017.8305113,
+53fdcc3a5a7e42590c21bbb4fe90d7f353ca21e5,,,,
+53cfe4817ac2eecbe4e286709a9140a5fe729b35,http://www.cv.iit.nrc.ca/VI/fpiv04/pdf/17fa.pdf,,https://doi.org/10.1109/CVPR.2004.349,http://www.ece.osu.edu/~aleix/fpiv04.pdf
+539cb169fb65a5542c84f42efcd5d2d925e87ebb,,,https://doi.org/10.1109/ICB.2015.7139098,
+5334ac0a6438483890d5eef64f6db93f44aacdf4,http://pdfs.semanticscholar.org/5334/ac0a6438483890d5eef64f6db93f44aacdf4.pdf,,,http://www.robots.ox.ac.uk/~minhhoai/papers/RMP_BMVC14.pdf
+53e081f5af505374c3b8491e9c4470fe77fe7934,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Hsieh_Unconstrained_Realtime_Facial_2015_CVPR_paper.pdf,,,http://www.cs.ubc.ca/~chyma/publications/ur/2015_ur_paper.pdf
+53698b91709112e5bb71eeeae94607db2aefc57c,http://pdfs.semanticscholar.org/5369/8b91709112e5bb71eeeae94607db2aefc57c.pdf,,,http://papers.nips.cc/paper/5353-two-stream-convolutional-networks-for-action-recognition-in-videos.pdf
+5375a3344017d9502ebb4170325435de3da1fa16,,,https://doi.org/10.1007/978-3-642-37444-9,
+5304cd17f9d6391bf31276e4419100f17d4423b2,,,https://doi.org/10.1109/ICIP.2012.6466930,
+531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab,http://pdfs.semanticscholar.org/531f/d9be964d18ba7970bd1ca6c3b9dc91b8d2ab.pdf,,,http://www.psych.nyu.edu/vanbavel/lab/documents/Park.etal.2012.BP.pdf
+53873fe7bbd5a2d171e2b1babc9cacaad6cabe45,,,https://doi.org/10.1109/TCYB.2015.2417211,
+5394d42fd27b7e14bd875ec71f31fdd2fcc8f923,http://pdfs.semanticscholar.org/5394/d42fd27b7e14bd875ec71f31fdd2fcc8f923.pdf,,,http://arxiv.org/abs/1504.04792
+534159e498e9cc61ea10917347637a59af38142d,,,https://doi.org/10.1016/j.neucom.2016.01.126,
+539bbf8e4916481bd089d5641175085edf4cf049,,,,
+5397c34a5e396658fa57e3ca0065a2878c3cced7,http://www.iis.sinica.edu.tw/papers/song/5959-F.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.144
+539ca9db570b5e43be0576bb250e1ba7a727d640,http://pdfs.semanticscholar.org/539c/a9db570b5e43be0576bb250e1ba7a727d640.pdf,,https://doi.org/10.5244/C.25.29,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00865.pdf
+539287d8967cdeb3ef60d60157ee93e8724efcac,http://pdfs.semanticscholar.org/e5ae/05a05eefbf416eb2e13ec080f1a166dde735.pdf,,,http://arxiv.org/pdf/1509.00153v2.pdf
+532f7ec8e0c8f7331417dd4a45dc2e8930874066,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p6060-zoidi.pdf,,https://doi.org/10.1109/ICASSP.2014.6854759,https://research-information.bris.ac.uk/ws/files/77093301/Ioannis_Pitas_Semi_Supervised_Dimensionality_Reduction_on_Data_with_Multiple_Representations_for_Label_Propagation_on_Facial_Images.pdf
+53c8cbc4a3a3752a74f79b74370ed8aeed97db85,http://pdfs.semanticscholar.org/53c8/cbc4a3a3752a74f79b74370ed8aeed97db85.pdf,,https://doi.org/10.1016/j.patrec.2013.02.002,http://www.cse.msu.edu/~liuxm/publication/chen_Liu_Tu_Aragones_PRL_2013.pdf
+5366573e96a1dadfcd4fd592f83017e378a0e185,http://pdfs.semanticscholar.org/5366/573e96a1dadfcd4fd592f83017e378a0e185.pdf,,,https://arxiv.org/pdf/1711.08801v1.pdf
+53509017a25ac074b5010bb1cdba293cdf399e9b,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2012.41
+539f55c0e2501c1d86791c8b54b225d9b3187b9c,,,https://doi.org/10.1109/TIP.2017.2738560,
+539ffd51f18404e1ef83371488cf5a27cd16d064,,,https://doi.org/10.1049/iet-ipr.2014.0733,
+533bfb82c54f261e6a2b7ed7d31a2fd679c56d18,http://biometrics.cse.msu.edu/Publications/Face/BestRowdenetal_UnconstrainedFaceRecognition_TechReport_MSU-CSE-14-1.pdf,,https://doi.org/10.1109/TIFS.2014.2359577,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/BestRowdenetal_UnconstrainedFaceRecognition_TIFS2014.pdf
+539ae0920815eb248939165dd5d1b0188ff7dca2,http://www.ele.puc-rio.br/~visao/Topicos/Prince%20and%20Helder%202007%20Probabilistic%20linear%20discriminant%20analysis.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2007.4409052
+537d8c4c53604fd419918ec90d6ef28d045311d0,https://arxiv.org/pdf/1704.08821v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078534
+530ce1097d0681a0f9d3ce877c5ba31617b1d709,https://pdfs.semanticscholar.org/530c/e1097d0681a0f9d3ce877c5ba31617b1d709.pdf,,https://doi.org/10.1109/CICAC.2013.6595214,http://ogma.newcastle.edu.au:8080/vital/access/services/Download/uon:14150/ATTACHMENT02
+5305bfdff39ae74d2958ba28d42c16495ce2ff86,,,https://doi.org/10.1109/DICTA.2014.7008128,
+53e34ff4639806b7599c846f219c02b025da9d13,,,,
+3fbd68d1268922ee50c92b28bd23ca6669ff87e5,http://pdfs.semanticscholar.org/f563/6a8021c09870c350e7505c87625fe1681bd4.pdf,,https://doi.org/10.1109/83.913594,http://www.cs.njit.edu/~liu/papers/ShapeTexture.pdf
+3fe4109ded039ac9d58eb9f5baa5327af30ad8b6,http://www.cvc.uab.cat/~ahernandez/files/CVPR2010STGRABCUT.pdf,,https://doi.org/10.1109/CVPRW.2010.5543824,http://www.cvc.uab.es/~petia/2010/Toni%20CVPR2010STGRABCUT.pdf
+3f22a4383c55ceaafe7d3cfed1b9ef910559d639,http://pdfs.semanticscholar.org/3f22/a4383c55ceaafe7d3cfed1b9ef910559d639.pdf,,,https://arxiv.org/pdf/1801.06432v1.pdf
+3fefc856a47726d19a9f1441168480cee6e9f5bb,http://pdfs.semanticscholar.org/e0e6/bf37d374f9c5cb2461ea87190e234c466d63.pdf,,,http://ri.cmu.edu/pub_files/2014/8/l_trutoiu_robotics_2014-1.pdf
+3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001,http://pdfs.semanticscholar.org/4032/8c9de5a0a90a8c24e80db7924f0281b46484.pdf,,https://doi.org/10.1007/978-3-319-22979-9_28,http://staffwww.dcs.shef.ac.uk/people/A.Damianou/papers/Damianou_LivMachines15.pdf
+3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2014/MM02014.pdf,,https://doi.org/10.1109/TMM.2014.2315595,
+3f2a44dcf0ba3fc72b24c7f09bb08e25797398c1,,,https://doi.org/10.1109/IJCNN.2017.7966210,
+3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9,https://arxiv.org/pdf/1611.06638.pdf,,,http://arxiv.org/abs/1611.06638
+3f848d6424f3d666a1b6dd405a48a35a797dd147,http://pdfs.semanticscholar.org/4f69/233cd6f0b56833c9395528aa007b63158a1d.pdf,,,http://www.bmva.org/bmvc/2014/files/abstract048.pdf
+3fe1cfd2dc69a23c0b0cdf9456c057e6ea1ee1b9,,,,
+3fa738ab3c79eacdbfafa4c9950ef74f115a3d84,http://pdfs.semanticscholar.org/3fa7/38ab3c79eacdbfafa4c9950ef74f115a3d84.pdf,,https://doi.org/10.1007/978-3-319-10578-9_47,http://www.cs.ucf.edu/~aroshan/index_files/DaMN_ECCV14.pdf
+3fb26f3abcf0d287243646426cd5ddeee33624d4,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Qin_Joint_Training_of_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Qin_Joint_Training_of_CVPR_2016_paper.pdf
+3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Hsu_Face_Recognition_across_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.128
+3fa628e7cff0b1dad3f15de98f99b0fdb09df834,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2013.6607603
+3feb69531653e83d0986a0643e4a6210a088e3e5,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/SLAM2007/papers/10-1569042275.pdf,,,http://amp.ece.cmu.edu/people/Andy/Andy_files/slam2007IEEE.pdf
+3ffbc912de7bad720c995385e1fdc439b1046148,,,,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2008.347
+3f12701449a82a5e01845001afab3580b92da858,http://pdfs.semanticscholar.org/e4f5/2f5e116f0cc486d033e4b8fc737944343db7.pdf,,https://doi.org/10.1007/978-3-319-10584-0_39,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8695/86950599.pdf
+3f204a413d9c8c16f146c306c8d96b91839fed0c,http://www.menpo.org/pages/paper/Menpo_ACM_MM_2014.pdf,,,http://doi.acm.org/10.1145/2647868.2654890
+3fde656343d3fd4223e08e0bc835552bff4bda40,http://pdfs.semanticscholar.org/3fde/656343d3fd4223e08e0bc835552bff4bda40.pdf,,,http://www.ijcsmc.com/docs/papers/April2013/abstracts/V2I4201364.pdf
+3fe3d6ff7e5320f4395571131708ecaef6ef4550,,,https://doi.org/10.1109/SITIS.2016.60,
+3fd092b96c3339507732263c9e6379b307c26073,,,,
+3f957142ef66f2921e7c8c7eadc8e548dccc1327,https://ibug.doc.ic.ac.uk/media/uploads/documents/combined_model_lda_&_svms.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.140
+3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,http://pdfs.semanticscholar.org/3fdf/d6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3.pdf,,https://doi.org/10.1007/978-3-319-26561-2_27,https://pdfs.semanticscholar.org/3fdf/d6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3.pdf
+3f540faf85e1f8de6ce04fb37e556700b67e4ad3,http://pdfs.semanticscholar.org/3f54/0faf85e1f8de6ce04fb37e556700b67e4ad3.pdf,,https://doi.org/10.3390/e19050228,http://www.mdpi.com/1099-4300/19/5/228/pdf
+3f88ea8cf2eade325b0f32832561483185db5c10,,,https://doi.org/10.1109/TIP.2017.2721838,
+3fb3c7dd12561e9443ac301f5527d539b1f4574e,http://www.research.rutgers.edu/~shaoting/paper/ICCV13.pdf,,,http://www.research.rutgers.edu/~xiangyu/paper/iccv13_face_final.pdf
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,http://pdfs.semanticscholar.org/3f4b/fa4e3655ef392eb5ad609d31c05f29826b45.pdf,,,http://arxiv.org/pdf/1003.5861v1.pdf
+3f5cf3771446da44d48f1d5ca2121c52975bb3d3,http://pdfs.semanticscholar.org/3f5c/f3771446da44d48f1d5ca2121c52975bb3d3.pdf,,https://doi.org/10.1007/3-540-47977-5_10,http://www.ri.cmu.edu/pub_files/pub4/narasimhan_srinivasa_g_2002_2/narasimhan_srinivasa_g_2002_2.pdf
+3f4711c315d156a972af37fe23642dc970a60acf,,,https://doi.org/10.1109/IJCNN.2008.4634393,
+3fbe4a46b94cdacbf076a66da7ea7e6546e96025,,,,
+3f14b504c2b37a0e8119fbda0eff52efb2eb2461,https://ibug.doc.ic.ac.uk/media/uploads/documents/eleftheriadis_tip_2016.pdf,,https://doi.org/10.1109/TIP.2016.2615288,https://ibug.doc.ic.ac.uk/media/uploads/documents/eleftheriadis_tip2.pdf
+3fac7c60136a67b320fc1c132fde45205cd2ac66,http://pdfs.semanticscholar.org/3fac/7c60136a67b320fc1c132fde45205cd2ac66.pdf,,https://doi.org/10.1007/978-3-319-11071-4_2,https://www.researchgate.net/profile/Yunduan_Cui2/publication/283296874_Remarks_on_Computational_Facial_Expression_Recognition_from_HOG_Features_Using_Quaternion_Multi-layer_Neural_Network/links/56314b3108ae0530378d2c6a.pdf?inViewer=0&origin=publication_detail&pdfJsDownload=0
+3fd90098551bf88c7509521adf1c0ba9b5dfeb57,http://pub.ist.ac.at/~chl/papers/lampert-pami2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.140
+3f623bb0c9c766a5ac612df248f4a59288e4d29f,http://pdfs.semanticscholar.org/3f62/3bb0c9c766a5ac612df248f4a59288e4d29f.pdf,,https://doi.org/10.1007/978-3-319-30668-1_4,http://homepages.ecs.vuw.ac.nz/~xuebing/Papers/lensen2016genetic.pdf
+3f4798c7701da044bdb7feb61ebdbd1d53df5cfe,http://sip.unige.ch/articles/2015/2015.EUSIPCO.Vector.quantization.pdf,,https://doi.org/10.1109/EUSIPCO.2015.7362361,
+3f4c262d836b2867a53eefb959057350bf7219c9,http://pdfs.semanticscholar.org/3f4c/262d836b2867a53eefb959057350bf7219c9.pdf,,,http://www.wseas.us/e-library/conferences/2008/istanbul/sip-wave/6-587-235.pdf?origin=publication_detail
+3f7723ab51417b85aa909e739fc4c43c64bf3e84,http://pdfs.semanticscholar.org/3f77/23ab51417b85aa909e739fc4c43c64bf3e84.pdf,,https://doi.org/10.1007/978-3-319-23234-8_48,https://www.researchgate.net/profile/Cosimo_Distante/publication/281589986_Improved_Performance_in_Facial_Expression_Recognition_Using_32_Geometric_Features/links/55eef99208aef559dc44a659.pdf?inViewer=0&origin=publication_detail&pdfJsDownload=0
+3ff418ac82df0b5c2f09f3571557e8a4b500a62c,,,https://doi.org/10.1007/s11554-007-0039-8,http://www.mip.informatik.uni-kiel.de/tiki-download_file.php?fileId=837
+3fc173805ed43602eebb7f64eea4d60c0386c612,,,,http://doi.ieeecomputersociety.org/10.1109/CyberC.2015.94
+3f63f9aaec8ba1fa801d131e3680900680f14139,http://dspace.nitrkl.ac.in/dspace/bitstream/2080/2288/1/4a.pdf,,,
+3f0e0739677eb53a9d16feafc2d9a881b9677b63,http://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf,,,http://arxiv.org/pdf/1608.08851v1.pdf
+3039627fa612c184228b0bed0a8c03c7f754748c,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Wu_Robust_Regression_on_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_034.pdf
+30cc1ddd7a9b4878cca7783a59086bdc49dc4044,,,https://doi.org/10.1007/s11042-015-2599-0,
+303065c44cf847849d04da16b8b1d9a120cef73a,http://pdfs.semanticscholar.org/3030/65c44cf847849d04da16b8b1d9a120cef73a.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.580
+303a7099c01530fa0beb197eb1305b574168b653,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Occlusion-Free_Face_Alignment_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.373
+30aa681ab80a830c3890090b0da3f1e786bd66ff,https://arxiv.org/pdf/1708.02337v1.pdf,,https://doi.org/10.1109/BTAS.2017.8272759,https://arxiv.org/pdf/1708.02337v2.pdf
+30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,http://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,,,http://arxiv.org/pdf/1510.09083v1.pdf
+303517dfc327c3004ae866a6a340f16bab2ee3e3,http://pdfs.semanticscholar.org/3035/17dfc327c3004ae866a6a340f16bab2ee3e3.pdf,,,http://ijetmas.com/admin/resources/project/paper/f201408191408462543.pdf
+309e17e6223e13b1f76b5b0eaa123b96ef22f51b,https://static.aminer.org/pdf/PDF/000/337/771/image_synthesis_and_face_recognition_based_on_d_face_model.pdf,,,http://mi.informatik.uni-siegen.de/publications/fg06_blanz.pdf
+3046baea53360a8c5653f09f0a31581da384202e,http://pdfs.semanticscholar.org/3046/baea53360a8c5653f09f0a31581da384202e.pdf,,,http://link.springer.com/content/pdf/10.1007/978-94-007-5446-1_8.pdf
+3026722b4cbe9223eda6ff2822140172e44ed4b1,http://chenlab.ece.cornell.edu/people/Andy/Andy_files/GallagherICCV09Demographics.pdf,,https://doi.org/10.1109/ICCV.2009.5459340,
+3028690d00bd95f20842d4aec84dc96de1db6e59,http://pdfs.semanticscholar.org/775f/9b8bc0ff151ee62b5e777f0aa9b09484ef8a.pdf,,,https://arxiv.org/pdf/1608.02146v1.pdf
+30a4b4ef252cb509b58834e7c40862124c737b61,,,https://doi.org/10.1142/S0218001416560061,
+30c96cc041bafa4f480b7b1eb5c45999701fe066,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/DiscreteCosineTransformLocality-SensitiveHashes14.pdf,,https://doi.org/10.1109/TMM.2014.2305633,
+3060ac37dec4633ef69e7bc63488548ab3511f61,,,https://doi.org/10.1007/s00521-018-3358-8,
+306957285fea4ce11a14641c3497d01b46095989,http://pdfs.semanticscholar.org/3069/57285fea4ce11a14641c3497d01b46095989.pdf,,https://doi.org/10.1007/978-3-540-30548-4_23,http://www.jdl.ac.cn/doc/2004/Face%20Recognition%20under%20Varying%20Lighting%20Based%20on%20Derivates%20of%20Log%20Image.pdf
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,http://pdfs.semanticscholar.org/304b/1f14ca6a37552dbfac443f3d5b36dbe1a451.pdf,,,https://arxiv.org/pdf/1704.03966v1.pdf
+30f62b05b9a69d671be4112d47eba90028a26c71,,,,
+306127c3197eb5544ab1e1bf8279a01e0df26120,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Huang_Sparse_Coding_and_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Huang_Sparse_Coding_and_CVPR_2016_paper.pdf
+307a810d1bf6f747b1bd697a8a642afbd649613d,http://pdfs.semanticscholar.org/307a/810d1bf6f747b1bd697a8a642afbd649613d.pdf,,,https://hal-univ-bourgogne.archives-ouvertes.fr/hal-01342810/file/Abstract%20WASC_pierre_final.pdf
+30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf,,,http://arxiv.org/pdf/1603.03958v2.pdf
+30044dd951133187cb8b57e53a22cf9306fa7612,,,https://doi.org/10.1109/WACV.2017.52,
+308025c378aef6acf9fe3acbddbfddcaa4271e8c,,,,
+30f6c4bd29b9a8c94f37f3818cf6145c1507826f,,,,
+30457461333c8797457c18636732327e6dde1d04,,,,
+30c5d2ec584e7b8273af6915aab420fc23ff2761,http://imi.ntu.edu.sg/IMIGraduatePrograms/IMIResearchSeminars/Documents/29_April_2014/REN_Jianfeng_29_April_2014.pdf,,https://doi.org/10.1109/TIP.2013.2268976,http://www3.ntu.edu.sg/home/EXDJiang/JiangX.D.-TIP-13-1.pdf
+3083d2c6d4f456e01cbb72930dc2207af98a6244,http://pdfs.semanticscholar.org/3083/d2c6d4f456e01cbb72930dc2207af98a6244.pdf,,,http://repositories.vnu.edu.vn/jspui/bitstream/123456789/16830/1/InTech-Perceived_age_estimation_from_face_images.pdf
+302c9c105d49c1348b8f1d8cc47bead70e2acf08,http://pdfs.semanticscholar.org/302c/9c105d49c1348b8f1d8cc47bead70e2acf08.pdf,,,http://eprints.lancs.ac.uk/87886/4/07936556.pdf
+30b74e60ec11c0ebc4e640637d56d85872dd17ce,http://pdfs.semanticscholar.org/c810/9382eea8f3fc49b3e6ed13d36eb95a06d0ed.pdf,,,https://arxiv.org/pdf/1706.07911v1.pdf
+30188b836f2fa82209d7afbf0e4d0ee29c6b9a87,,,https://doi.org/10.1109/TIP.2013.2249077,
+3080026f2f0846d520bd5bacb0cb2acea0ffe16b,,,https://doi.org/10.1109/BTAS.2017.8272690,
+303828619630ca295f772be0a7b9fe8007dfaea3,,,,
+304a306d2a55ea41c2355bd9310e332fa76b3cb0,http://pdfs.semanticscholar.org/95da/2d1137637e89da8b7a16e0dc6168cfceb693.pdf,,https://doi.org/10.1016/j.imavis.2016.04.009,https://www.doc.ic.ac.uk/~rw2614/pdfs/publications/vsl_crf_imavis_2016.pdf
+3042d3727b2f80453ff5378b4b3043abb2d685a1,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0219.pdf,,,http://dhoiem.web.engr.illinois.edu/publications/cvpr2009_wang_flickr.pdf
+301b0da87027d6472b98361729faecf6e1d5e5f6,http://pdfs.semanticscholar.org/301b/0da87027d6472b98361729faecf6e1d5e5f6.pdf,,,http://www.cv.tu-berlin.de/fileadmin/fg140/Head_Pose_Estimation.pdf
+30b103d59f8460d80bb9eac0aa09aaa56c98494f,http://pdfs.semanticscholar.org/30b1/03d59f8460d80bb9eac0aa09aaa56c98494f.pdf,,,http://www.araa.asn.au/acra/acra2015/papers/pap132.pdf
+30cace74a7d51e9a928287e25bcefb968c49f331,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344634
+5e97a1095f2811e0bc188f52380ea7c9c460c896,http://web.eecs.utk.edu/~rguo1/FacialParsing.pdf,,https://doi.org/10.1109/ICIP.2015.7351510,
+5e59193a0fc22a0c37301fb05b198dd96df94266,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dessein_Example-Based_Modeling_of_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.444
+5e0eb34aeb2b58000726540336771053ecd335fc,http://ies.anthropomatik.kit.edu/ies/download/publ/ies_2016_herrmann_low_quality.pdf,,https://doi.org/10.1109/DICTA.2016.7797061,
+5ee0103048e1ce46e34a04c45ff2c2c31529b466,,,https://doi.org/10.1109/ICIP.2015.7350886,
+5e8de234b20f98f467581f6666f1ed90fd2a81be,,,,http://doi.acm.org/10.1145/2647868.2655042
+5ebb247963d2d898d420f1f4a2486102a9d05aa9,http://bcmi.sjtu.edu.cn/~zhzhang/papers/nncw.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539934
+5e53f530871b5167be0f224993be8a38e85796e8,,,,
+5e99b49b4c5fb2a72392ea199edacd650bd122c5,,,,
+5e28673a930131b1ee50d11f69573c17db8fff3e,http://pdfs.semanticscholar.org/f28d/fadba11bd3489d008827d9b1a539b34b50df.pdf,,,http://www.cs.weizmann.ac.il/~hassner/projects/Patchlbp/WolfHassnerTaigman_ECCVW08.pdf
+5ea9063b44b56d9c1942b8484572790dff82731e,https://ibug.doc.ic.ac.uk/media/uploads/documents/mlsp_2007_kotsia.pdf,,,
+5e87f5076952cd442718d6b4addce905bae1a1a4,,,https://doi.org/10.1109/ICMLC.2016.7872938,
+5e16f10f2d667d17c029622b9278b6b0a206d394,http://pdfs.semanticscholar.org/5e16/f10f2d667d17c029622b9278b6b0a206d394.pdf,,,https://arxiv.org/pdf/1410.5524v1.pdf
+5e19d7307ea67799eb830d5ce971f893e2b8a9ca,,,https://doi.org/10.1007/s11063-012-9214-4,
+5e0b691e9e5812dd3cb120a8d77619a45aa8e4c4,,,https://doi.org/10.1109/ICIP.2016.7532567,
+5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7,http://cbl.uh.edu/pub_files/ISBA-2016.pdf,,https://doi.org/10.1109/ISBA.2016.7477244,
+5ea165d2bbd305dc125415487ef061bce75dac7d,http://www.ece.northwestern.edu/~zli/new_home/MyPublications/conf/ICME2009-human-act-apd-final.pdf,,https://doi.org/10.1109/ICME.2009.5202626,http://www1.ece.neu.edu/~yunfu/papers/ICME2009-human-act-apd-final.pdf
+5e6ba16cddd1797853d8898de52c1f1f44a73279,http://pdfs.semanticscholar.org/5e6b/a16cddd1797853d8898de52c1f1f44a73279.pdf,,,https://arxiv.org/pdf/1406.6818v2.pdf
+5ea9cba00f74d2e113a10c484ebe4b5780493964,http://pdfs.semanticscholar.org/5ea9/cba00f74d2e113a10c484ebe4b5780493964.pdf,,,http://research.sabanciuniv.edu/10444/1/icat.pdf
+5ed5e534c8defd683909200c1dc31692942b7b5f,,,,http://doi.acm.org/10.1145/2983926
+5e62b2ab6fd3886e673fd5cbee160a5bee414507,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2015.31
+5e09155cfb7a8bab2217e5d34cd0d6a4a0586868,,,,
+5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43,http://www.cs.cmu.edu/~juny/Prof/papers/acmmm04a-jyang.pdf,,,http://lastchance.inf.cs.cmu.edu/alex/p580-yang.pdf
+5ec94adc9e0f282597f943ea9f4502a2a34ecfc2,http://pdfs.semanticscholar.org/5ec9/4adc9e0f282597f943ea9f4502a2a34ecfc2.pdf,,,http://arxiv.org/abs/1506.04655
+5e0e516226413ea1e973f1a24e2fdedde98e7ec0,http://pdfs.semanticscholar.org/74ce/97da57ec848db660ee69dec709f226c74f43.pdf,,,http://cbcl.mit.edu/publications/theses/thesis-leibo.pdf
+5e806d8fa48216041fe719309534e3fa903f7b5b,,,https://doi.org/10.1109/BTAS.2010.5634501,
+5efdf48ca56b78e34dc2f2f0ce107a25793d3fc2,,,,http://doi.ieeecomputersociety.org/10.1109/TVCG.2016.2641442
+5e821cb036010bef259046a96fe26e681f20266e,https://pdfs.semanticscholar.org/d7e6/d52748c5ed386a90118fa385647c55954ab9.pdf,,,http://www.ee.oulu.fi/~hadid/IPTA2008.pdf
+5e7cb894307f36651bdd055a85fdf1e182b7db30,http://pdfs.semanticscholar.org/5e7c/b894307f36651bdd055a85fdf1e182b7db30.pdf,,,http://note.sonots.com/?openfile=report.pdf&plugin=attach&refer=SciSoftware/MSVM
+5b693cb3bedaa2f1e84161a4261df9b3f8e77353,http://pdfs.semanticscholar.org/5b69/3cb3bedaa2f1e84161a4261df9b3f8e77353.pdf,,,http://www.cmis.csiro.au/Hugues.Talbot/dicta2003/cdrom/pdf/0899.pdf
+5b73b7b335f33cda2d0662a8e9520f357b65f3ac,http://www.iis.sinica.edu.tw/papers/song/16795-F.pdf,,https://doi.org/10.1109/SMC.2013.538,
+5b6d05ce368e69485cb08dd97903075e7f517aed,http://pdfs.semanticscholar.org/5b6d/05ce368e69485cb08dd97903075e7f517aed.pdf,,,http://www.andrew.cmu.edu/user/kseshadr/CMU_2009_Tech_Rep.pdf
+5b0bf1063b694e4b1575bb428edb4f3451d9bf04,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Yang_Facial_Shape_Tracking_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.131
+5b59e6b980d2447b2f3042bd811906694e4b0843,https://bib.irb.hr/datoteka/832723.PID4276755.pdf,,https://doi.org/10.1109/SPLIM.2016.7528404,
+5bb6703bc01e4f7ab7e043964ec6579ac06a7c03,,,,
+5bb53fb36a47b355e9a6962257dd465cd7ad6827,http://pdfs.semanticscholar.org/5bb5/3fb36a47b355e9a6962257dd465cd7ad6827.pdf,,,http://arxiv.org/abs/1610.08481
+5b809871a895ea8422afc31c918056614ea94688,,,,
+5b89744d2ac9021f468b3ffd32edf9c00ed7fed7,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Bi_Beyond_Mahalanobis_Metric_2015_CVPR_paper.pdf,,https://doi.org/10.1109/CVPR.2015.7298847,http://openaccess.thecvf.com/content_cvpr_2015/supplemental/Bi_Beyond_Mahalanobis_Metric_2015_CVPR_supplemental.pdf
+5bfc32d9457f43d2488583167af4f3175fdcdc03,http://pdfs.semanticscholar.org/5bfc/32d9457f43d2488583167af4f3175fdcdc03.pdf,,,http://www.ijsr.net/archive/v2i8/MDIwMTMyODc=.pdf
+5bed2453a5b0c54a4a4a294f29c9658658a9881e,,,https://doi.org/10.1109/TIP.2015.2451173,
+5b7cb9b97c425b52b2e6f41ba8028836029c4432,http://www.cis.pku.edu.cn/faculty/vision/zlin/Publications/2014-CVPR-SMR.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.484
+5bff2ffe533eb53c2e0e13ce020cc76199c12c74,,,,
+5b6f0a508c1f4097dd8dced751df46230450b01a,http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.pdf,,,https://www2.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.pdf
+5b9d41e2985fa815c0f38a2563cca4311ce82954,http://www.iti.gr/files/3dpvt04tsalakanidou.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TDPVT.2004.1335141
+5b6593a6497868a0d19312952d2b753232414c23,http://pdfs.semanticscholar.org/5b65/93a6497868a0d19312952d2b753232414c23.pdf,,https://doi.org/10.1007/978-3-319-16199-0_53,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w22/W22-11.pdf
+5b01c4eef1e83f98751bb3ef1e4fca34abb8f530,,,,
+5bb684dfe64171b77df06ba68997fd1e8daffbe1,http://pdfs.semanticscholar.org/f096/9403b5dfa54445d911aedd88ab25b0b6cd99.pdf,,,https://arxiv.org/pdf/1706.00826v2.pdf
+5b64584d6b01e66dfd0b6025b2552db1447ccdeb,,,https://doi.org/10.1109/BTAS.2017.8272697,
+5b719410e7829c98c074bc2947697fac3b505b64,http://pdfs.semanticscholar.org/ecec/d5c8b2472364fd7816033e8355215e34bb1b.pdf,,,http://www.csb.uncw.edu/mscsis/complete/pdf/RatliffMatthew_Final.pdf
+5bae9822d703c585a61575dced83fa2f4dea1c6d,http://pdfs.semanticscholar.org/5bae/9822d703c585a61575dced83fa2f4dea1c6d.pdf,,,http://arxiv.org/abs/1504.01942
+5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f,http://pdfs.semanticscholar.org/7589/58f2340ba46c6708b73d5427985d5623a512.pdf,,,https://arxiv.org/pdf/1506.07310v2.pdf
+5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65,http://pdfs.semanticscholar.org/5b9d/9f5a59c48bc8dd409a1bd5abf1d642463d65.pdf,,,http://ncs.ethz.ch/projects/evospike/publications/file.2017-11-28.3773520729
+5bf70c1afdf4c16fd88687b4cf15580fd2f26102,http://pdfs.semanticscholar.org/5bf7/0c1afdf4c16fd88687b4cf15580fd2f26102.pdf,,,https://arxiv.org/pdf/1803.07386v1.pdf
+5b5962bdb75c72848c1fb4b34c113ff6101b5a87,http://research.microsoft.com/en-us/um/people/leizhang/paper/TMM2011_Xiao.pdf,,https://doi.org/10.1109/TMM.2012.2186121,http://research.microsoft.com/en-us/people/xjwang/tmm12_facedb.pdf
+5bcc8ef74efbb959407adfda15a01dad8fcf1648,http://pdfs.semanticscholar.org/5bcc/8ef74efbb959407adfda15a01dad8fcf1648.pdf,,,https://arxiv.org/pdf/1801.09103v1.pdf
+5b01d4338734aefb16ee82c4c59763d3abc008e6,http://pdfs.semanticscholar.org/5b01/d4338734aefb16ee82c4c59763d3abc008e6.pdf,,,http://ijssst.info/Vol-17/No-35/paper32.pdf
+5bdd9f807eec399bb42972a33b83afc8b607c05c,http://www.umiacs.umd.edu/~pvishalm/Journal_pub/SPM_DA_v9.pdf,,https://doi.org/10.1109/MSP.2014.2347059,http://www.umiacs.umd.edu/users/pvishalm/Journal_pub/SPM_DA_v9.pdf
+5b8237ae83bc457e3b29e7209126f61120fba082,,,,
+5bfad0355cdb62b22970777d140ea388a7057d4c,,,https://doi.org/10.1016/j.patcog.2011.05.006,
+5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Yang_Improving_3D_Face_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.7
+5b86c36e3eb59c347b81125d5dd57dd2a2c377a9,http://pdfs.semanticscholar.org/5b86/c36e3eb59c347b81125d5dd57dd2a2c377a9.pdf,,,http://www.murase.nuie.nagoya-u.ac.jp/seikaweb/paper/2007/E07-conference-ide-1.pdf
+5be3cc1650c918da1c38690812f74573e66b1d32,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sandeep_Relative_Parts_Distinctive_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.462
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,http://pdfs.semanticscholar.org/5bc0/a89f4f73523967050374ed34d7bc89e4d9e1.pdf,,,http://ursulakhess.com/resources/HDH15.pdf
+5b6bed112e722c0629bcce778770d1b28e42fc96,http://pdfs.semanticscholar.org/5b6b/ed112e722c0629bcce778770d1b28e42fc96.pdf,,https://doi.org/10.5244/C.27.60,http://www.bmva.org/bmvc/2013/Papers/paper0060/abstract0060.pdf
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,http://pdfs.semanticscholar.org/5bde/1718253ec28a753a892b0ba82d8e553b6bf3.pdf,,,http://www.jmlr.org/proceedings/papers/v13/kropotov10a.html
+5b0ebb8430a04d9259b321fc3c1cc1090b8e600e,http://www.openu.ac.il/home/hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf,,https://doi.org/10.1109/ICCV.2009.5459323,http://www.wisdom.weizmann.ac.il/~hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf
+5b4bbba68053d67d12bd3789286e8a9be88f7b9d,,,https://doi.org/10.1109/ICSMC.2008.4811353,
+37c8514df89337f34421dc27b86d0eb45b660a5e,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w25/papers/Uricar_Facial_Landmark_Tracking_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.127
+37c5e3b6175db9eaadee425dc51bc7ce05b69a4e,,,https://doi.org/10.1007/s00521-013-1387-x,
+3726d17fd7e57c75b8b9f7f57bdec9054534be5e,,,,
+371f40f6d32ece05cc879b6954db408b3d4edaf3,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_100_ext.pdf,,https://doi.org/10.1109/CVPR.2015.7299054,http://web.eecs.umich.edu/~jiadeng/paper/chao_cvpr2015.pdf
+374c7a2898180723f3f3980cbcb31c8e8eb5d7af,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotsia07a.pdf,,https://doi.org/10.1109/ICASSP.2007.366303,
+37007af698b990a3ea8592b11d264b14d39c843f,http://acberg.com/papers/dcmsvm.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248099
+374a0df2aa63b26737ee89b6c7df01e59b4d8531,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yuan_Temporal_Action_Localization_CVPR_2016_paper.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/papers/Yuan_Temporal_Action_Localization_CVPR_2016_paper.pdf
+378ae5ca649f023003021f5a63e393da3a4e47f0,http://vision.ucsd.edu/~carolina/files/galleguillos_cvpr10.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/1064.pdf
+37619564574856c6184005830deda4310d3ca580,http://arxiv.org/pdf/1508.04389v1.pdf,,https://doi.org/10.1109/BTAS.2015.7358755,http://www.rci.rutgers.edu/~vmp93/Conference_pub/DeepPyramid_btas2015.pdf
+3774ffc9523b8f4a148d5e93eaae317dc18af3e6,,,,
+37ce1d3a6415d6fc1760964e2a04174c24208173,http://www.cse.msu.edu/~liuxm/publication/Jourabloo_Liu_ICCV2015.pdf,,,http://cvlab.cse.msu.edu/pdfs/Jourabloo_Liu_ICCV2015.pdf
+3765c26362ad1095dfe6744c6d52494ea106a42c,http://www.vision.ee.ethz.ch/~tquack/gammeter_quack_iccv2009.pdf,,https://doi.org/10.1109/ICCV.2009.5459180,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICCV_2009/contents/pdf/iccv2009_079.pdf
+37179032085e710d1d62a1ba2e9c1f63bb4dde91,http://eprints.soton.ac.uk/363288/1/tome%20tifs.pdf,,https://doi.org/10.1109/TIFS.2014.2299975,http://atvs.ii.uam.es/files/2014_TIFS_SoftBio_Tome.pdf
+3769e65690e424808361e3eebfdec8ab91908aa9,,,,http://doi.acm.org/10.1145/2647868.2655035
+3727ac3d50e31a394b200029b2c350073c1b69e3,http://arxiv.org/pdf/1605.03639v2.pdf,,,http://arxiv.org/pdf/1605.03639v1.pdf
+37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e,http://www.cse.iitm.ac.in/~amittal/wacv2015_review.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.21
+37278ffce3a0fe2c2bbf6232e805dd3f5267eba3,http://arxiv.org/pdf/1602.04504v1.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2016/02/376.pdf
+377a1be5113f38297716c4bb951ebef7a93f949a,http://www.cris.ucr.edu/IGERT/Presentation2013/CruzAbstract.pdf,,https://doi.org/10.1109/ICIP.2013.6738868,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2013/FACIAL%20EMOTION%20RECOGNITION%20WITH13.pdf
+377c6563f97e76a4dc836a0bd23d7673492b1aae,http://pdfs.semanticscholar.org/377c/6563f97e76a4dc836a0bd23d7673492b1aae.pdf,,,https://arxiv.org/pdf/1803.03330v1.pdf
+370e0d9b89518a6b317a9f54f18d5398895a7046,http://pdfs.semanticscholar.org/370e/0d9b89518a6b317a9f54f18d5398895a7046.pdf,,,http://www.researchgate.net/profile/Sebastien_Marcel/publication/230775868_Cross-Pollination_of_Normalization_Techniques_From_Speaker_to_Face_Authentication_Using_Gaussian_Mixture_Models/links/0fcfd50503fcb9dcc5000000.pdf
+37105ca0bc1f11fcc7c6b7946603f3d572571d76,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_TIST_dmzhai_Multi-view%20metric%20learning%20with%20global%20consistency%20and%20local%20smoothness.pdf,,,http://doi.acm.org/10.1145/2168752.2168767
+37ba12271d09d219dd1a8283bc0b4659faf3a6c6,http://www.eecs.qmul.ac.uk/~sgg/papers/LayneEtAl_ARTERMIS2013.pdf,,,http://doi.acm.org/10.1145/2510650.2510658
+3773e5d195f796b0b7df1fca6e0d1466ad84b5e7,http://pdfs.semanticscholar.org/3773/e5d195f796b0b7df1fca6e0d1466ad84b5e7.pdf,,,http://alumni.cs.ucr.edu/~dyankov/DYankov_TSLearningWithNoise2008.pdf
+37eb666b7eb225ffdafc6f318639bea7f0ba9a24,http://pdfs.semanticscholar.org/37eb/666b7eb225ffdafc6f318639bea7f0ba9a24.pdf,,,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/HanJain_UnconstrainedAgeGenderRaceEstimation_MSUTechReport2014.pdf
+375435fb0da220a65ac9e82275a880e1b9f0a557,https://ibug.doc.ic.ac.uk/media/uploads/documents/tpami_alignment.pdf,,,http://eprints.lincoln.ac.uk/17528/7/__ddat02_staffhome_jpartridge_tzimiroTPAMI15.pdf
+37f25732397864b739714aac001ea1574d813b0d,,,https://doi.org/10.1016/j.ijar.2017.09.002,
+37b6d6577541ed991435eaf899a2f82fdd72c790,http://pdfs.semanticscholar.org/37b6/d6577541ed991435eaf899a2f82fdd72c790.pdf,,,https://arxiv.org/pdf/1204.1611.pdf
+37d6f0eb074d207b53885bd2eb78ccc8a04be597,http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf,,https://doi.org/10.1109/BTAS.2012.6374605,http://www.cse.msu.edu/~climer/pdf/DantchevaChenRossFaceCosmetics_BTAS2012.pdf
+373c4d6af0ee233f0d669c3955c3a3ef2a009638,,,https://doi.org/10.1109/APSIPA.2015.7415420,
+3753b9fcf95b97e2baf952993905cd6dfa8561cb,,,,
+37ef18d71c1ca71c0a33fc625ef439391926bfbb,http://pdfs.semanticscholar.org/37ef/18d71c1ca71c0a33fc625ef439391926bfbb.pdf,,https://doi.org/10.4304/jmm.3.2.60-67,http://www.academypublisher.com/jmm/vol03/no02/jmm03026067.pdf
+370b5757a5379b15e30d619e4d3fb9e8e13f3256,http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf,,,http://www.tamaraberg.com/papers/lfw.pdf
+081189493ca339ca49b1913a12122af8bb431984,http://pdfs.semanticscholar.org/0811/89493ca339ca49b1913a12122af8bb431984.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/supplemental/Saito_Photorealistic_Facial_Texture_2017_CVPR_supplemental.pdf
+08ee541925e4f7f376538bc289503dd80399536f,http://pdfs.semanticscholar.org/08ee/541925e4f7f376538bc289503dd80399536f.pdf,,,https://papers.nips.cc/paper/6813-runtime-neural-pruning.pdf
+08c76a4cc6f402c37a050cae5390427a5b66a467,,,,
+08d2f655361335bdd6c1c901642981e650dff5ec,http://dro.deakin.edu.au/eserv/DU:30058435/arandjelovic-automaticcastlisting-2006.pdf,,,http://nichol.as/papers/Arandjelovic/Automatic%20Cast%20Listing%20in%20Feature-Length%20Films.pdf
+08fbe3187f31b828a38811cc8dc7ca17933b91e9,http://www.merl.com/publications/docs/TR2011-084.pdf,,,http://www.cfar.umd.edu/~rama/Publications/Turaga_PAMI_2011.pdf
+08ae100805d7406bf56226e9c3c218d3f9774d19,http://pdfs.semanticscholar.org/08ae/100805d7406bf56226e9c3c218d3f9774d19.pdf,,https://doi.org/10.1186/s13640-017-0211-4,https://jivp-eurasipjournals.springeropen.com/track/pdf/10.1186/s13640-017-0211-4?site=jivp-eurasipjournals.springeropen.com
+085b5f9fd49432edab29e2c64f2a427fbce97f67,https://staff.fnwi.uva.nl/m.jain/pub/jain-objects-actions-cvpr2015.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1A_006_ext.pdf
+08c18b2f57c8e6a3bfe462e599a6e1ce03005876,http://ca.cs.cmu.edu/sites/default/files/8uca_final_revision.pdf,,,http://www.cs.cmu.edu/~ftorre/uca_new.pdf
+08f6ad0a3e75b715852f825d12b6f28883f5ca05,http://www.cse.msu.edu/biometrics/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf,,https://doi.org/10.1109/FG.2011.5771338,http://biometrics.cse.msu.edu/Publications/Face/JainKlarePark_FaceRecognition_ChallengesinForensics_FG11.pdf
+08ff81f3f00f8f68b8abd910248b25a126a4dfa4,https://research-information.bristol.ac.uk/files/74279764/Ioannis_Pitas_Symmetric_Subspace_Learning_for_Image_Analysis_2014.pdf,,https://doi.org/10.1109/TIP.2014.2367321,
+0874734e2af06883599ed449532a015738a1e779,,,https://doi.org/10.1007/s10115-013-0702-2,
+081a431107eb38812b74a8cd036ca5e97235b499,http://webhost.uoradea.ro/ibuciu/IEEE_TNN_2008.pdf,,https://doi.org/10.1109/TNN.2008.2000162,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/PNMF_final.pdf
+084bd02d171e36458f108f07265386f22b34a1ae,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Ren_Face_Alignment_at_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.218
+081cb09791e7ff33c5d86fd39db00b2f29653fa8,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/09/22.pdf,,https://doi.org/10.1109/CVPRW.2009.5204307,
+086655743dc5f16563c012ad43b2f9d06771d9c0,,,,
+086131159999d79adf6b31c1e604b18809e70ba8,http://vinereactor.org/icpr2016.pdf,,https://doi.org/10.1109/ICPR.2016.7900282,
+0831a511435fd7d21e0cceddb4a532c35700a622,http://pdfs.semanticscholar.org/0831/a511435fd7d21e0cceddb4a532c35700a622.pdf,,https://doi.org/10.1016/j.neucom.2015.05.132,http://arxiv.org/pdf/1502.00478v1.pdf
+0861f86fb65aa915fbfbe918b28aabf31ffba364,http://pdfs.semanticscholar.org/0861/f86fb65aa915fbfbe918b28aabf31ffba364.pdf,,,http://www.ijcttjournal.org/2015/Volume22/number-3/IJCTT-V22P122.pdf
+089513ca240c6d672c79a46fa94a92cde28bd567,http://pdfs.semanticscholar.org/0895/13ca240c6d672c79a46fa94a92cde28bd567.pdf,,https://doi.org/10.1007/978-3-319-46466-4_50,http://www.cs.tau.ac.il/~wolf/papers/rnnfv.pdf
+089b5e8eb549723020b908e8eb19479ba39812f5,http://www.face-recognition-challenge.com/RobustnessOfDCNN-preprint.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.89
+080c204edff49bf85b335d3d416c5e734a861151,http://pdfs.semanticscholar.org/d3d1/09d81dd0911dfde259b6878d737e50c834eb.pdf,,,https://arxiv.org/pdf/1709.03456v1.pdf
+08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d,http://pdfs.semanticscholar.org/b680/2fb123c594a9fd621ae576651201fcc4329a.pdf,,https://doi.org/10.5244/C.26.129,http://www.bmva.org/bmvc/2012/BMVC/paper129/abstract129.pdf
+0821028073981f9bd2dba2ad2557b25403fe7d7d,,,,http://doi.acm.org/10.1145/2733373.2806318
+08d40ee6e1c0060d3b706b6b627e03d4b123377a,http://pdfs.semanticscholar.org/3daa/fe6389d877fe15d8823cdf5ac15fd919676f.pdf,,,https://arxiv.org/pdf/1605.05197v2.pdf
+08c1f8f0e69c0e2692a2d51040ef6364fb263a40,http://pdfs.semanticscholar.org/0b20/0cf032430d74fd612601cc59d5af5608ceb4.pdf,,,ftp://whitechapel.media.mit.edu/pub/tech-reports/TR-443.ps.Z
+088aabe3da627432fdccf5077969e3f6402f0a80,http://pdfs.semanticscholar.org/088a/abe3da627432fdccf5077969e3f6402f0a80.pdf,,,https://openreview.net/pdf?id=SJOl4DlCZ
+087002ab569e35432cdeb8e63b2c94f1abc53ea9,http://sergioescalera.com/wp-content/uploads/2015/07/CVPR2015MoeslundSlides.pdf,,,http://openaccess.thecvf.com/content_cvpr_workshops_2015/W09/papers/Irani_Spatiotemporal_Analysis_of_2015_CVPR_paper.pdf
+08cb294a08365e36dd7ed4167b1fd04f847651a9,http://pdfs.semanticscholar.org/f75f/56bb1dcf721449f2fcc3634265f1e08e012c.pdf,,,https://www.cs.sfu.ca/~hamarneh/ecopy/icphs2015.pdf
+081286ede247c5789081502a700b378b6223f94b,http://pdfs.semanticscholar.org/0812/86ede247c5789081502a700b378b6223f94b.pdf,,,
+08e995c080a566fe59884a527b72e13844b6f176,http://pdfs.semanticscholar.org/08e9/95c080a566fe59884a527b72e13844b6f176.pdf,,https://doi.org/10.4304/jmm.6.1.39-47,http://ojs.academypublisher.com/index.php/jmm/article/download/06013947/2826
+08e24f9df3d55364290d626b23f3d42b4772efb6,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu06c.pdf,http://ieeexplore.ieee.org/document/7071425/,,
+08872d801f134e41753601e85971769b28314ca2,,,,http://doi.acm.org/10.1145/2683483.2683560
+08f69a82fae49a4a1f13d06cae32d77bb8e5be1a,,,,
+085ceda1c65caf11762b3452f87660703f914782,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf,,,http://cvlab.cse.msu.edu/pdfs/Jourabloo_Liu_CVPR2016.pdf
+0830c9b9f207007d5e07f5269ffba003235e4eff,http://pdfs.semanticscholar.org/cf2e/1ebb9609f46af6de0c15b4f48d03e37e54ba.pdf,,,https://pdfs.semanticscholar.org/0830/c9b9f207007d5e07f5269ffba003235e4eff.pdf
+08d55271589f989d90a7edce3345f78f2468a7e0,https://arxiv.org/pdf/1704.03373v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Liu_Quality_Aware_Network_CVPR_2017_paper.pdf
+081fb4e97d6bb357506d1b125153111b673cc128,http://pdfs.semanticscholar.org/081f/b4e97d6bb357506d1b125153111b673cc128.pdf,,,https://arxiv.org/pdf/1710.03144v2.pdf
+08a98822739bb8e6b1388c266938e10eaa01d903,http://homes.cs.washington.edu/~yoshi/papers/SensorSift_ACSAC_2012.pdf,,,http://research.cs.washington.edu/istc/lfb/paper/acsac12.pdf
+080ab68a898a3703feead145e2c38361ae84a0a8,,,https://doi.org/10.1109/TIFS.2014.2343833,
+08f1e9e14775757298afd9039f46ec56e80677f9,http://pdfs.semanticscholar.org/08f1/e9e14775757298afd9039f46ec56e80677f9.pdf,,,https://arxiv.org/pdf/1609.00072v1.pdf
+08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7,http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf,,https://doi.org/10.1109/TMM.2012.2187436,
+082ad50ac59fc694ba4369d0f9b87430553b11db,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553696.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553696
+6dd052df6b0e89d394192f7f2af4a3e3b8f89875,http://pdfs.semanticscholar.org/6dd0/52df6b0e89d394192f7f2af4a3e3b8f89875.pdf,,,http://cgit.nutn.edu.tw:8080/cgit/PaperDL/sars1013_130928095014.PDF
+6d7a32f594d46f4087b71e2a2bb66a4b25da5e30,http://pdfs.semanticscholar.org/6d7a/32f594d46f4087b71e2a2bb66a4b25da5e30.pdf,,,http://mi.eng.cam.ac.uk/~cipolla/publications/contributionToEditedBook/2007-MM-chapter1.pdf
+6dd5dbb6735846b214be72983e323726ef77c7a9,http://pdfs.semanticscholar.org/6dd5/dbb6735846b214be72983e323726ef77c7a9.pdf,,,http://libir.josai.ac.jp/il/user_contents/02/G0000284repository/pdf/JOS-13447777-07_25.pdf
+6d10beb027fd7213dd4bccf2427e223662e20b7d,http://pdfs.semanticscholar.org/6d10/beb027fd7213dd4bccf2427e223662e20b7d.pdf,,https://doi.org/10.1155/2016/4789803,http://www.image.ece.ntua.gr/papers/870.pdf
+6d4236a7a693555f701c0d149d1db89325035e23,,,,
+6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75,http://hoques.com/Publications/2015/2015-ubicomp_rocspeak_Fung-etal.pdf,,,http://web.media.mit.edu/~mehoque/Publications/2015/2015-ubicomp_rocspeak_Fung-etal.pdf
+6d207360148ec3991b70952315cb3f1e8899e977,http://www.researchgate.net/profile/Edwin_Hancock/publication/224649584_Estimating_Cast_Shadows_using_SFS_and_Class-based_Surface_Completion/links/004635239fd1ed7ac5000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.498
+6de18708218988b0558f6c2f27050bb4659155e4,https://arxiv.org/pdf/1611.05216v1.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/papers/Shi_Learning_Long-Term_Dependencies_ICCV_2017_paper.pdf
+6d5f876a73799cc628e4ad2d9cfcd88091272342,,,https://doi.org/10.1109/TSMCC.2005.848193,
+6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1,http://disi.unitn.it/~sebe/publications/MIR03.pdf,,,http://doi.acm.org/10.1145/973264.973268
+6dfe0dafb4ed4bcfce670f321e724682ab261060,,,,
+6da3ff4250103369f4a6a39c8fb982438a97525c,,,https://doi.org/10.1109/THMS.2015.2404913,
+6d91da37627c05150cb40cac323ca12a91965759,http://pdfs.semanticscholar.org/6d91/da37627c05150cb40cac323ca12a91965759.pdf,,https://doi.org/10.1007/978-3-319-60240-0_4,http://arxiv.org/abs/1611.02806
+6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf,http://pdfs.semanticscholar.org/6dd2/a0f9ca8a5fee12edec1485c0699770b4cfdf.pdf,,https://doi.org/10.1007/978-3-319-46487-9_52,http://crcv.ucf.edu/people/faculty/Gong/Paper/webly-supervised.pdf
+6dd8d8be00376ac760dc92f9c5f20520872c5355,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2417578
+6d4b5444c45880517213a2fdcdb6f17064b3fa91,http://pdfs.semanticscholar.org/6d4b/5444c45880517213a2fdcdb6f17064b3fa91.pdf,,,http://www.iiste.org/Journals/index.php/JIEA/article/download/1598/1547
+6d67a7fd9a4fa99624721f37b077c71dad675805,,,https://doi.org/10.1007/s12193-015-0202-7,
+6d670eb172355d46034a831d8dc569e17ab14d94,,,,
+6d4e3616d0b27957c4107ae877dc0dd4504b69ab,http://pdfs.semanticscholar.org/6d4e/3616d0b27957c4107ae877dc0dd4504b69ab.pdf,,,http://arxiv.org/pdf/1603.08561v1.pdf
+6daccf3d15c617873954bb75de26f6b6b0a42772,http://arts.buaa.edu.cn/papers/Learning%20Templates%20for%20Artistic%20Portrait%20Lighting%20Analysis.pdf,,https://doi.org/10.1109/TIP.2014.2369962,
+6d8e3f3a83514381f890ab7cd2a1f1c5be597b69,http://pdfs.semanticscholar.org/aeb1/83983f4ae1ea9e01005f5d546480190e0345.pdf,,,https://web.cs.umass.edu/publication/docs/2014/UM-CS-PhD-2014-003.pdf
+6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19,http://pdfs.semanticscholar.org/6d8e/ef8f8d6cd8436c55018e6ca5c5907b31ac19.pdf,,,http://theses.lib.vt.edu/theses/available/etd-03032016-170427/unrestricted/Cogswell_M_T_2016.pdf
+6d4103762e159130b32335cbf8893ee4dca26859,http://homepage.tudelft.nl/19j49/Publications_files/cogn_proc.pdf,,https://doi.org/10.1007/s10339-011-0419-7,
+6d618657fa5a584d805b562302fe1090957194ba,http://pdfs.semanticscholar.org/6d61/8657fa5a584d805b562302fe1090957194ba.pdf,,,http://ijnngt.org/upload/journal5/paper7.pdf
+6d4c64ca6936f868d793e1b164ddaf19243c19a7,,,https://doi.org/10.1109/TNNLS.2015.2499273,
+6d66c98009018ac1512047e6bdfb525c35683b16,http://pdfs.semanticscholar.org/6d66/c98009018ac1512047e6bdfb525c35683b16.pdf,,,http://www.cse.unr.edu/~bebis/CS790Q/PaperPresentations/FaceRecog_3D_Morphing.pdf
+016cbf0878db5c40566c1fbc237686fbad666a33,http://pdfs.semanticscholar.org/5a07/986f0a202eafbd1f1574fe2c3ae6abe2281f.pdf,,https://doi.org/10.1016/j.imavis.2008.04.015,http://www.dia.fi.upm.es/~pcr/publications/ivc08FinalVersion.pdf
+016800413ebd1a87730a5cf828e197f43a08f4b3,http://arxiv.org/pdf/1605.00743v1.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gan_Learning_Attributes_Equals_CVPR_2016_paper.pdf
+0172867f4c712b33168d9da79c6d3859b198ed4c,http://www.cin.ufpe.br/~rps/Artigos/Expression%20and%20Illumination%20Invariant%20Preprocessing%20Technique%20for%20Face%20Recognition.pdf,,,
+0145dc4505041bf39efa70ea6d95cf392cfe7f19,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_046_ext.pdf,,,http://web.eecs.umich.edu/~jjcorso/pubs/lu_corso_CVPR2015_actionvoxel.pdf
+01bef320b83ac4405b3fc5b1cff788c124109fb9,http://pdfs.semanticscholar.org/49e4/37cc5b673c49b942e304607a0050dcc82dae.pdf,,,http://www.educationaldatamining.org/EDM2015/proceedings/full320-326.pdf
+01729cb766b1016bac217a6a6cf24bbde19f56c8,,,https://doi.org/10.1109/CBMI.2010.5529888,
+01c9dc5c677aaa980f92c4680229db482d5860db,https://pages.iai.uni-bonn.de/gall_juergen/download/jgall_actiondetect_cvpr16.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Richard_Temporal_Action_Detection_CVPR_2016_paper.pdf
+013909077ad843eb6df7a3e8e290cfd5575999d2,http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/Workshops/4990a896.pdf
+01d2cf5398c2b3e0f4fc8e8318a4492c95a0b242,http://webee.technion.ac.il/~lihi/Publications/10-ANS-PAMI.pdf,,,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=E2065C91471A28DBCF992D5B41E94814?doi=10.1.1.180.4081&rep=rep1&type=pdf
+01c7a778cde86ad1b89909ea809d55230e569390,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Siyahjani_A_Supervised_Low-Rank_ICCV_2015_paper.pdf,,https://doi.org/10.1109/ICCV.2015.480,http://openaccess.thecvf.com/content_iccv_2015/papers/Siyahjani_A_Supervised_Low-Rank_ICCV_2015_paper.pdf
+01c8d7a3460422412fba04e7ee14c4f6cdff9ad7,http://pdfs.semanticscholar.org/01c8/d7a3460422412fba04e7ee14c4f6cdff9ad7.pdf,,,http://www.thesai.org/Downloads/Volume4No7/Paper_5-Rule_Based_System_for_Recognizing_Emotions_Using_Multimodal_Approach.pdf
+013d0acff1e5410fd9f6e15520d16f4ea02f03f6,,,https://doi.org/10.1109/TMM.2015.2477681,
+0115f260069e2e501850a14845feb400142e2443,http://pdfs.semanticscholar.org/0115/f260069e2e501850a14845feb400142e2443.pdf,,,http://cs.nyu.edu/csweb/Research/Theses/oh_jong.pdf
+01cc8a712e67384f9ef9f30580b7415bfd71e980,http://pdfs.semanticscholar.org/01cc/8a712e67384f9ef9f30580b7415bfd71e980.pdf,,,http://www.jneurosci.org/content/jneuro/30/44/14750.full.pdf
+01e12be4097fa8c94cabeef0ad61498c8e7762f2,http://pdfs.semanticscholar.org/10bf/f1957b8a4adce86efd10596186d905976c16.pdf,,,https://filebox.ece.vt.edu/~parikh/Publications/BiswasParikh_CVPR_2013_active_attributes_feedback.pdf
+0163d847307fae508d8f40ad193ee542c1e051b4,http://www.alessandrobergamo.com/data/compact_descriptors_supplementary.pdf,,,http://www.cs.dartmouth.edu/~aleb/data/compact_descriptors_supplementary.pdf
+01dc1e03f39901e212bdf291209b7686266aeb13,http://arxiv.org/pdf/1604.07279v1.pdf,,,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01300.pdf
+016f49a54b79ec787e701cc8c7d0280273f9b1ef,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Kotropoulos06a.pdf,,https://doi.org/10.1109/ICASSP.2006.1661378,
+017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637,http://pdfs.semanticscholar.org/017c/e398e1eb9f2eed82d0b22fb1c21d3bcf9637.pdf,,,http://www.jdl.ac.cn/user/sgshan/pub/ACCV2004-Laiyun-Harmonics.pdf
+01e14d8ffd6767336d50c2b817a7b7744903e567,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.128
+01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,http://openaccess.thecvf.com/content_ICCV_2017/papers/Jackson_Large_Pose_3D_ICCV_2017_paper.pdf,,,https://arxiv.org/pdf/1703.07834v1.pdf
+0133d1fe8a3138871075cd742c761a3de93a42ec,,,https://doi.org/10.1109/ICDSP.2015.7251932,
+01c09acf0c046296643de4c8b55a9330e9c8a419,http://pdfs.semanticscholar.org/01c0/9acf0c046296643de4c8b55a9330e9c8a419.pdf,,,http://www.iipl.fudan.edu.cn/~zhangjp/literatures/MLF/manifold%20learning/costa_icassp04.pdf
+01d23cbac762b0e46251f5dbde08f49f2d13b9f8,http://pdfs.semanticscholar.org/01d2/3cbac762b0e46251f5dbde08f49f2d13b9f8.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2002.1048228
+014143aa16604ec3f334c1407ceaa496d2ed726e,http://www.cs.cmu.edu/~har/cvpr2008-manifold.pdf,,,http://www.cs.nyu.edu/~ameet/largeManifold.pdf
+011e6146995d5d63c852bd776f782cc6f6e11b7b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhuang_Fast_Training_of_CVPR_2016_paper.pdf,,,https://arxiv.org/pdf/1603.02844v2.pdf
+0182d090478be67241392df90212d6cd0fb659e6,http://www.cs.utexas.edu/~grauman/papers/localized_attributes_cvpr2012.pdf,,,https://www.cc.gatech.edu/~parikh/Publications/DuanParikhCrandallGrauman_CVPR_2012_local_attributes.pdf
+016a8ed8f6ba49bc669dbd44de4ff31a79963078,http://www.jdl.ac.cn/user/sgshan/pub/icassp04_qing.pdf,,https://doi.org/10.1109/ICASSP.2004.1327215,
+01e63d0a21fad7a29301749e9eafed826101b636,,,,
+016194dbcd538ab5a129ef1bcff3c6e073db63f9,,,https://doi.org/10.1007/s10462-012-9334-2,
+01beab8f8293a30cf48f52caea6ca0fb721c8489,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553729.pdf,,,http://parnec.nuaa.edu.cn/xtan/paper/fg2013.pdf
+01f0a4e1442a7804e1fe95798eff777d08e42014,,,https://doi.org/10.1016/j.knosys.2017.09.005,
+0178929595f505ef7655272cc2c339d7ed0b9507,http://pdfs.semanticscholar.org/7d84/151beccef17f71b3eeaca59ebc690561ab73.pdf,,,http://arxiv.org/pdf/1609.00496v1.pdf
+0181fec8e42d82bfb03dc8b82381bb329de00631,http://users.isy.liu.se/en/cvl/zografos/publications/CVPR2013.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989c107.pdf
+01b4b32c5ef945426b0396d32d2a12c69c282e29,http://pdfs.semanticscholar.org/1510/bfa3a31ccf47e0241d3528aeda4871597a0f.pdf,,https://doi.org/10.5244/C.27.85,http://www.bmva.org/bmvc/2013/Papers/paper0085/paper0085.pdf
+016435db03820374d6af65b68f001f0918914e4f,,,,
+01e27c91c7cef926389f913d12410725e7dd35ab,,,https://doi.org/10.1007/s11760-017-1140-5,
+0113b302a49de15a1d41ca4750191979ad756d2f,http://www.cecs.uci.edu/~papers/icme06/pdfs/0000537.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2006.262444
+01379c50c392c104694ccb871a4b6a36d514f102,http://sse.tongji.edu.cn/hyli/Publications/icmla2010.pdf,,,
+01c948d2b73abe8be1ac128a6439c1081ebca95a,http://mla.sdu.edu.cn/PeopleInfo/lixuzhou/A%20hybrid%20biometric%20identification%20framework%20for%20high%20security%20applications.pdf,,https://doi.org/10.1007/s11704-014-4070-1,
+01733018a79aa447a27f269a1b9a58cd5f39603e,http://vc.sce.ntu.edu.sg/index_files/Semi-supervised%20Bilinear%20Subspace%20Learning.pdf,,https://doi.org/10.1109/TIP.2009.2018015,http://www.lv-nus.org/papers/2009/2009_J_3.pdf
+019e471667c72b5b3728b4a9ba9fe301a7426fb2,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_012.pdf,,https://doi.org/10.1109/CVPR.2015.7298846,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2A_012_ext.pdf
+0601416ade6707c689b44a5bb67dab58d5c27814,http://pdfs.semanticscholar.org/0601/416ade6707c689b44a5bb67dab58d5c27814.pdf,,,http://digitalassets.lib.berkeley.edu/techreports/ucb/text/EECS-2007-99.pdf
+064b797aa1da2000640e437cacb97256444dee82,http://pdfs.semanticscholar.org/064b/797aa1da2000640e437cacb97256444dee82.pdf,,,https://arxiv.org/pdf/1511.04901v1.pdf
+06f146dfcde10915d6284981b6b84b85da75acd4,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/tmm12.pdf,,https://doi.org/10.1109/TMM.2013.2242460,http://www.chennaisunday.com/IEEE%202013%20Java%20Basepaper/Scalable%20Face%20Image%20Retrieval%20using%20Attribute-Enhanced%20Sparse%20Codewords.pdf
+067126ce1f1a205f98e33db7a3b77b7aec7fb45a,http://pdfs.semanticscholar.org/0671/26ce1f1a205f98e33db7a3b77b7aec7fb45a.pdf,,https://doi.org/10.1007/978-3-642-16687-7_56,http://www.rduin.nl/papers/ciarp_10_dismat.pdf
+06466276c4955257b15eff78ebc576662100f740,http://cmlab.csie.ntu.edu.tw/~sirius42/papers/sigir12.pdf,,,http://www.csie.ntu.edu.tw/~winston/papers/lei12where.pdf
+06d028bd761ad6f29e9f1835d6686d9880706438,,,,
+0697bd81844d54064d992d3229162fe8afcd82cb,http://pdfs.semanticscholar.org/0697/bd81844d54064d992d3229162fe8afcd82cb.pdf,,,https://arxiv.org/pdf/1706.05850v1.pdf
+06f8aa1f436a33014e9883153b93581eea8c5c70,http://pdfs.semanticscholar.org/8926/471921ff608f70c6c81777782974a91086ae.pdf,,https://doi.org/10.1007/978-3-319-46478-7_48,http://www.cs.utexas.edu/~ycsu/projects/leaving-some-stones-unturned/eccv2016-1542su.pdf
+067fe74aec42cb82b92cf6742c7cfb4a65f16951,,,,http://doi.acm.org/10.1145/2601434
+061c84a4143e859a7caf6e6d283dfb30c23ee56e,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_008_ext.pdf,,https://doi.org/10.1109/CVPR.2015.7298962,http://arxiv.org/abs/1504.04871
+06d93a40365da90f30a624f15bf22a90d9cfe6bb,http://pdfs.semanticscholar.org/6940/40e59bffd860640e45c54ca7b093630caa39.pdf,,,http://publications.idiap.ch/downloads/papers/2010/Luo_NIPS10_2010.pdf
+061e29eae705f318eee703b9e17dc0989547ba0c,http://pdfs.semanticscholar.org/061e/29eae705f318eee703b9e17dc0989547ba0c.pdf,,https://doi.org/10.1007/978-3-642-37444-9_45,http://vipl.ict.ac.cn/sites/default/files/papers/files/2012_ACCV_myliu_Enhancing%20Expression%20Recognition%20in%20the%20Wild%20with%20Unlabeled%20Reference%20Data.pdf
+06402979cb55ec7c4488204aab5bc23d5f432f50,,,,
+06850b60e33baa4ea9473811d58c0d5015da079e,http://pdfs.semanticscholar.org/4cff/901521af06d6a0c98c9dce253296dd88b496.pdf,,,https://arxiv.org/pdf/1511.02407v1.pdf
+06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32,http://www.cs.utexas.edu/~grauman/papers/whittle-search-supp-cvpr2012.pdf,,,http://www.cs.utexas.edu/~grauman/papers/whittlesearch-poster-cvpr2012.pdf
+06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc,https://www.cs.umd.edu/sites/default/files/scholarly_papers/Biswas_1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247922
+0699475af70765d0810881d3536b44a3c1d745a2,,,,
+0614cafad1b546faa7e99c67c9bda6bae2cacb5e,,,,
+066d71fcd997033dce4ca58df924397dfe0b5fd1,http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf,,,http://www.dehshibi.com/files/papers/Iranian%20Face%20Database%20and%20Evaluation%20with%20a%20new%20detection.pdf
+06a799ad89a2a45aee685b9e892805e3e0251770,,,https://doi.org/10.1007/978-3-319-42147-6,
+06526c52a999fdb0a9fd76e84f9795a69480cecf,http://pdfs.semanticscholar.org/0652/6c52a999fdb0a9fd76e84f9795a69480cecf.pdf,,https://doi.org/10.1007/978-3-319-14442-9_24,http://www.ceng.metu.edu.tr/~ys/pubs/others/imotion.pdf
+06bad0cdda63e3fd054e7b334a5d8a46d8542817,http://vision.cs.utexas.edu/projects/featuresharing/0975.pdf,,,http://www.cs.utexas.edu/~sjhwang/0975.pdf
+06fe63b34fcc8ff68b72b5835c4245d3f9b8a016,http://chechiklab.biu.ac.il/~gal/Papers/Mesnil_MachineLearning2013_objects_and_their_parts.pdf,,https://doi.org/10.1007/s10994-013-5336-9,http://www.iro.umontreal.ca/~lisa/pointeurs/2013_semantic_image_mlj.pdf
+06aab105d55c88bd2baa058dc51fa54580746424,http://www4.comp.polyu.edu.hk/~cslzhang/paper/ISCRC_TIFS.pdf,,https://doi.org/10.1109/TIFS.2014.2324277,https://arxiv.org/pdf/1308.6687v1.pdf
+0641dbee7202d07b6c78a39eecd312c17607412e,http://users.cecs.anu.edu.au/~hongdong/JiZhongLiSalzmannICIP14.pdf,,https://doi.org/10.1109/ICIP.2014.7025056,
+06262d14323f9e499b7c6e2a3dec76ad9877ba04,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Juranek_Real-Time_Pose_Estimation_ICCV_2015_paper.pdf,,,http://www.fit.vutbr.cz/research/groups/graph/PoseEstimation/iccv2015/Juranek_ICCV2015_PoseEstimation.pdf
+06400a24526dd9d131dfc1459fce5e5189b7baec,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_01054.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Bossard_Event_Recognition_in_2013_ICCV_paper.pdf
+062d67af7677db086ef35186dc936b4511f155d7,http://openaccess.thecvf.com/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf
+0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0,http://pdfs.semanticscholar.org/0694/b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0.pdf,,https://doi.org/10.5220/0005038500490055,https://research-information.bris.ac.uk/ws/files/76352533/Ionnas_Pitas_Exploiting_Local_Class_Information_in_Extreme_Learning_Machine_2014.pdf
+060034b59275c13746413ca9c67d6304cba50da6,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W14/papers/Murthy_Ordered_Trajectories_for_2013_ICCV_paper.pdf,,,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana-Murthy_Goecke_ICCV2013_THUMOS_OrderedTrajectoriesForLargeScaleHumanActionRecognition.pdf
+060f67c8a0de8fee9c1732b63ab40627993f93d0,,,https://doi.org/10.1007/978-3-642-33564-8,
+060820f110a72cbf02c14a6d1085bd6e1d994f6a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_113_ext.pdf,,,http://www.vision.caltech.edu/~dhall/projects/CRP/Data/CVPR2015_HALL.pdf
+0653dcdff992ad980cd5ea5bc557efb6e2a53ba1,http://pdfs.semanticscholar.org/0653/dcdff992ad980cd5ea5bc557efb6e2a53ba1.pdf,,,http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/5698/2/b25512948_ir.pdf
+063a3be18cc27ba825bdfb821772f9f59038c207,http://eprints.whiterose.ac.uk/125231/1/kaiser_et_al_17.pdf,,,
+064cd41d323441209ce1484a9bba02a22b625088,http://www.ri.cmu.edu/pub_files/2013/6/stm_final.pdf,,,http://www.humansensing.cs.cmu.edu/sites/default/files/7cvpr13-stm.pdf
+06719154ab53d3a57041b2099167e3619f1677bc,,,,
+06c2dfe1568266ad99368fc75edf79585e29095f,http://ibug.doc.ic.ac.uk/media/uploads/documents/joan_cvpr2014.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Alabort-i-Medina_Bayesian_Active_Appearance_2014_CVPR_paper.pdf
+06f39834e870278243dda826658319be2d5d8ded,http://www.public.asu.edu/~bli24/Papers/ICIP2016_video.pdf,,https://doi.org/10.1109/ICIP.2016.7533150,
+06c956d4aac65752672ce4bd5a379f10a7fd6148,,,https://doi.org/10.1109/LSP.2017.2749763,
+06d7ef72fae1be206070b9119fb6b61ce4699587,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zafeiriou_On_One-Shot_Similarity_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.297
+0629bc2b12245195af989e21573369329b7ef2b7,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2553038
+062d0813815c2b9864cd9bb4f5a1dc2c580e0d90,https://infoscience.epfl.ch/record/230310/files/AliakbarianEtAlICCV17.pdf?version=1,,,http://openaccess.thecvf.com/content_ICCV_2017/papers/Aliakbarian_Encouraging_LSTMs_to_ICCV_2017_paper.pdf
+06a9ed612c8da85cb0ebb17fbe87f5a137541603,http://pdfs.semanticscholar.org/06a9/ed612c8da85cb0ebb17fbe87f5a137541603.pdf,,,http://www.sloansportsconference.com/wp-content/uploads/2018/02/2003.pdf
+06959f9cf3226179fa1b05efade843b7844fb2bc,http://www.researchgate.net/profile/Fei_Wu2/publication/4090506_Relevant_linear_feature_extraction_using_side-information_and_unlabeled_data/links/549062220cf214269f2668c9.pdf,,,http://bigeye.au.tsinghua.edu.cn/english/paper/SERCA_ICPR_2th_byafei.pdf
+06ad99f19cf9cb4a40741a789e4acbf4433c19ae,http://pdfs.semanticscholar.org/06ad/99f19cf9cb4a40741a789e4acbf4433c19ae.pdf,,,https://arxiv.org/pdf/1608.04489v1.pdf
+06fb92e110d077c27d401d2f9483964cd0615284,http://www.cs.sunysb.edu/~ial/content/papers/2009/wang_pami09.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.244
+6cb4c7f52fbe386a4ab06d5ca61a11d69abba0e4,,,,
+6c27eccf8c4b22510395baf9f0d0acc3ee547862,http://pdfs.semanticscholar.org/6c27/eccf8c4b22510395baf9f0d0acc3ee547862.pdf,,,http://www.dice.ucl.ac.be/Proceedings/esann/esannpdf/es2005-146.pdf
+6c6f0e806e4e286f3b18b934f42c72b67030ce17,,,https://doi.org/10.1109/FG.2011.5771345,
+6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf,,,https://arxiv.org/pdf/1710.01727v3.pdf
+6c28b3550f57262889fe101e5d027912eb39564e,,,https://doi.org/10.1109/LSP.2014.2338911,
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,http://www.umiacs.umd.edu/~fyang/papers/iccv15.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.426
+6c0048265758442d1620c2a239590d0d9060c09d,,,,
+6c40fc9df6588f7cb721537883167eede1b8d369,,,,
+6c0ad77af4c0850bd01bb118e175ecc313476f27,,,,http://doi.acm.org/10.1145/3009977.3010026
+6ce23cf4f440021b7b05aa3c1c2700cc7560b557,http://pdfs.semanticscholar.org/6ce2/3cf4f440021b7b05aa3c1c2700cc7560b557.pdf,,https://doi.org/10.1007/978-3-319-49409-8_62,https://www-i6.informatik.rwth-aachen.de/publications/download/1012/Hanselmann-ECCV-DESCRW-2016.pdf
+6c9266aa77ea01b9d26a98a483b56e9e8b80eeba,https://www.researchgate.net/profile/Stefano_Tubaro/publication/224641232_Mixed_2D-3D_Information_for_Pose_Estimation_and_Face_Recognition/links/00b7d5178477f30fb3000000.pdf,,https://doi.org/10.1109/ICASSP.2006.1660354,http://gtav.upc.edu/en/publications/papers/2006/mixed-2d-3d-information-for-pose-estimation-and-face-recognition
+6c36ed5391cb3fda6c55a4f71e991f9138e226d0,,,,
+6c2b392b32b2fd0fe364b20c496fcf869eac0a98,http://www3.ntu.edu.sg/home/EXDJiang/JiangX.D.-MVA-13.pdf,,https://doi.org/10.1007/s00138-012-0423-7,
+6c26744149ae08af8bc84137633495fa948b41ad,,,,
+6c30b29b24dc11e37fe36c6e2c283e1c8fe5e339,,,,
+6c6bb85a08b0bdc50cf8f98408d790ccdb418798,http://pdfs.semanticscholar.org/6c6b/b85a08b0bdc50cf8f98408d790ccdb418798.pdf,,,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu_PCI_2003.pdf
+6c705285c554985ecfe1117e854e1fe1323f8c21,http://pdfs.semanticscholar.org/6c70/5285c554985ecfe1117e854e1fe1323f8c21.pdf,,,https://arxiv.org/pdf/1803.11264v1.pdf
+6cddc7e24c0581c50adef92d01bb3c73d8b80b41,http://users.soe.ucsc.edu/~milanfar/publications/journal/TIFS_Final.pdf,,https://doi.org/10.1109/TIFS.2011.2159205,https://users.soe.ucsc.edu/~milanfar/publications/journal/TIFS_Final.pdf
+6ca7a82ec1c51417c4f0b8eebddb53a73a3874b1,,,,http://doi.acm.org/10.1145/2708463.2709059
+6cd96f2b63c6b6f33f15c0ea366e6003f512a951,http://pdfs.semanticscholar.org/6cd9/6f2b63c6b6f33f15c0ea366e6003f512a951.pdf,,,http://www.icoci.cms.net.my/proceedings/2009/papers/PID29.pdf
+6c8c7065d1041146a3604cbe15c6207f486021ba,http://pdfs.semanticscholar.org/6c8c/7065d1041146a3604cbe15c6207f486021ba.pdf,,,https://mindmodeling.org/cogsci2012/papers/0453/paper0453.pdf
+6cd5b56f4262c7e13f61a4a6f28eaa805f4e3291,,,,
+39c8ed5213882d4dbc74332245ffe201882c5de1,,,https://doi.org/10.1109/ICASSP.2013.6638045,
+390f3d7cdf1ce127ecca65afa2e24c563e9db93b,https://arxiv.org/pdf/1408.3967v2.pdf,,,http://arxiv.org/pdf/1408.3967v4.pdf
+391b86cf16c2702dcc4beee55a6dd6d3bd7cf27b,http://dayongwang.info/pdf/2014-MM.pdf,,,http://research.larc.smu.edu.sg/mlg/papers/MM14-fp336-hoi.pdf
+395a91d49e9283e1bf2d61a75c3dc846b347ea74,http://cake.fiu.edu/Publications/Reza+al-13-OV.On-demand_Virtual_Health.IEEE.downloaded.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICHI.2013.13
+3918b425bb9259ddff9eca33e5d47bde46bd40aa,http://pdfs.semanticscholar.org/3918/b425bb9259ddff9eca33e5d47bde46bd40aa.pdf,,,http://www.cs.utexas.edu/users/ai-lab/pubs/chen-dissertation.pdf
+39ce143238ea1066edf0389d284208431b53b802,http://pdfs.semanticscholar.org/39ce/143238ea1066edf0389d284208431b53b802.pdf,,https://doi.org/10.1016/j.patcog.2015.08.004,http://www.dcs.bbk.ac.uk/~sjmaybank/Facial%20Expression%20Transfer%20Method%20Based%20on%20Frequency%20Analysis_minor%20version.pdf
+39ce2232452c0cd459e32a19c1abe2a2648d0c3f,http://pdfs.semanticscholar.org/4fac/61d638cf7a1ab995e2ee9a02d3672b12d2ca.pdf,,,http://psych.wisc.edu/niedenthal/fichiers_pdf/2009_mermillod-vermeulen-lundqvist-niedenthal_cognition.pdf
+39f7878f447df7703f2c4ddeeffd7eb0e21f6cd4,http://dev.pubs.doc.ic.ac.uk/Pantic-CVPR05/Pantic-CVPR05.pdf,,,http://www.doc.ic.ac.uk/~maja/ValstarPatrasPantic-CVPR2005-Final.pdf
+3998c5aa6be58cce8cb65a64cb168864093a9a3e,http://cvrr.ucsd.edu/publications/2014/HeadHand.pdf,,https://doi.org/10.1109/IVS.2014.6856610,http://cvrr.ucsd.edu/publications/2014/MartinOhnbarTawariTrivedi_IV2014.pdf
+39dc2ce4cce737e78010642048b6ed1b71e8ac2f,http://www.mirlab.org/conference_papers/International_Conference/ICME%202004/html/papers/P59890.pdf,,,
+397aeaea61ecdaa005b09198942381a7a11cd129,http://pdfs.semanticscholar.org/e30b/df82a358587f7d27ee4ea0b34762328c2a8d.pdf,,,https://arxiv.org/pdf/1712.02874v1.pdf
+391642ec5ade3579654a14c3644af6f086af0158,,,,
+3991223b1dc3b87883cec7af97cf56534178f74a,http://www.ics.uci.edu/~dvk/pub/ICMR13_dvk.pdf,,,http://doi.acm.org/10.1145/2461466.2461469
+396b2963f0403109d92a4d4f26205f279ea79d2c,,,https://doi.org/10.1109/TSMCB.2005.845399,
+39b22bcbd452d5fea02a9ee63a56c16400af2b83,http://www.uoguelph.ca/~gwtaylor/publications/gwtaylor_crv2014.pdf,,,http://www.uoguelph.ca/~gwtaylor/publications/devries2014multi-task.pdf
+399a2c23bd2592ebe20aa35a8ea37d07c14199da,http://pdfs.semanticscholar.org/399a/2c23bd2592ebe20aa35a8ea37d07c14199da.pdf,,https://doi.org/10.1016/j.image.2007.06.006,http://www.cvc.uab.es/~bogdan/Publications/raducanu_SPIC2007.pdf
+397022a4460750c762dbb0aaebcacc829dee8002,,,https://doi.org/10.1109/TIFS.2013.2258152,
+39acf4bb06b889686ca17fd8c89887a3cec26554,,,,http://www.springerlink.com/index/10.1007/s10044-004-0223-4
+396a19e29853f31736ca171a3f40c506ef418a9f,http://pdfs.semanticscholar.org/396a/19e29853f31736ca171a3f40c506ef418a9f.pdf,,,http://www.andrewsenior.com/papers/TianPETS03.pdf
+392d35bb359a3b61cca1360272a65690a97a2b3f,http://pdfs.semanticscholar.org/9cc1/0842f7701bfb92725b4dda4df391b0b341e3.pdf,,https://doi.org/10.5244/C.29.37,http://www.bmva.org/bmvc/2015/papers/paper037/abstract037.pdf
+39c10888a470b92b917788c57a6fd154c97b421c,,,https://doi.org/10.1109/VCIP.2017.8305036,
+39d0de660e2116f32088ce07c3376759d0fdaff5,,,https://doi.org/10.1109/ICPR.2016.7900043,
+397085122a5cade71ef6c19f657c609f0a4f7473,http://pdfs.semanticscholar.org/db11/4901d09a07ab66bffa6986bc81303e133ae1.pdf,,https://doi.org/10.5244/C.29.22,http://vision.ics.uci.edu/papers/GhiasiF_BMVC_2015/GhiasiF_BMVC_2015.pdf
+39c48309b930396a5a8903fdfe781d3e40d415d0,http://www.ri.cmu.edu/pub_files/2017/5/ant_low.pdf,,,https://www.ri.cmu.edu/pub_files/2017/5/ant_low.pdf
+396de136485d85242583951bee4e7b19234bc964,,,,
+3986161c20c08fb4b9b791b57198b012519ea58b,http://pdfs.semanticscholar.org/3986/161c20c08fb4b9b791b57198b012519ea58b.pdf,,,http://ijsce.org/attachments/File/v4i4/D2354094414.pdf
+392425be1c9d9c2ee6da45de9df7bef0d278e85f,http://pdfs.semanticscholar.org/3924/25be1c9d9c2ee6da45de9df7bef0d278e85f.pdf,,,http://cvrr.ucsd.edu/publications/2016/0411.pdf
+392c3cabe516c0108b478152902a9eee94f4c81e,http://pdfs.semanticscholar.org/392c/3cabe516c0108b478152902a9eee94f4c81e.pdf,,,http://www.csd.uwo.ca/~olga/Courses/Fall2009/9840/Papers/TR_tiny_images.pdf
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,http://pdfs.semanticscholar.org/39f5/25f3a0475e6bbfbe781ae3a74aca5b401125.pdf,,,https://arxiv.org/pdf/1611.08091v1.pdf
+3946b8f862ecae64582ef0912ca2aa6d3f6f84dc,http://pdfs.semanticscholar.org/3946/b8f862ecae64582ef0912ca2aa6d3f6f84dc.pdf,,,https://arxiv.org/pdf/1307.8405v1.pdf
+3933416f88c36023a0cba63940eb92f5cef8001a,http://pdfs.semanticscholar.org/3933/416f88c36023a0cba63940eb92f5cef8001a.pdf,,,https://www.cs.umd.edu/~qiu/pub/1308.0273v1.pdf
+39150acac6ce7fba56d54248f9c0badbfaeef0ea,http://pdfs.semanticscholar.org/3915/0acac6ce7fba56d54248f9c0badbfaeef0ea.pdf,,,http://mplab.ucsd.edu/~marni/pubs/Vural_DSP_2007.pdf
+39d6339a39151b5f88ec2d7acc38fe0618d71b5f,,,https://doi.org/10.1109/MMSP.2013.6659285,
+391b273af237b69ebbdfacb8e33b8e873421c780,,,,
+39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc,http://openaccess.thecvf.com/content_iccv_2015/papers/Lu_Simultaneous_Local_Binary_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.424
+3983637022992a329f1d721bed246ae76bc934f7,http://www.cs.umd.edu/~djacobs/pubs_files/SlantCVPRFinal.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995559
+3980dadd27933d99b2f576c3b36fe0d22ffc4746,,,https://doi.org/10.1109/ROBIO.2017.8324597,
+3960882a7a1cd19dfb711e35a5fc1843ed9002e7,,,,http://doi.acm.org/10.1145/2487575.2487701
+3958db5769c927cfc2a9e4d1ee33ecfba86fe054,http://homes.cs.washington.edu/~neeraj/base/base/papers/nk_pami2011_faceattrs.pdf,,,http://homes.cs.washington.edu/~neeraj/base/projects/faceverification/base/papers/nk_pami2011_faceattrs.pdf
+39ecdbad173e45964ffe589b9ced9f1ebfe2d44e,http://measuringbehavior.org/files/ProceedingsPDF(website)/Gonzalez_FullPaper3.4.pdf,,,http://doi.acm.org/10.1145/1931344.1931352
+398558817e05e8de184cc4c247d4ea51ab9d4d58,,,https://doi.org/10.1109/ICPR.2014.14,
+39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df,https://pdfs.semanticscholar.org/39b5/f6d6f8d8127b2b97ea1a4987732c0db6f9df.pdf,,https://doi.org/10.1109/ICPR.2008.4761856,http://figment.cse.usf.edu/~sfefilat/data/papers/WeBCT9.21.pdf
+99ced8f36d66dce20d121f3a29f52d8b27a1da6c,http://pdfs.semanticscholar.org/99ce/d8f36d66dce20d121f3a29f52d8b27a1da6c.pdf,,https://doi.org/10.1007/978-3-319-73013-4_20,https://arxiv.org/pdf/1709.05675v1.pdf
+9949ac42f39aeb7534b3478a21a31bc37fe2ffe3,http://pdfs.semanticscholar.org/9949/ac42f39aeb7534b3478a21a31bc37fe2ffe3.pdf,,https://doi.org/10.1007/11564386_10,https://pdfs.semanticscholar.org/9949/ac42f39aeb7534b3478a21a31bc37fe2ffe3.pdf
+999289b0ef76c4c6daa16a4f42df056bf3d68377,http://pdfs.semanticscholar.org/9992/89b0ef76c4c6daa16a4f42df056bf3d68377.pdf,,https://doi.org/10.1007/978-3-319-11839-0_5,https://www.cmpe.boun.edu.tr/~salah/dibeklioglu_hbu14.pdf
+993934822a42e70dd35fb366693d847164ca15ff,,,https://doi.org/10.1109/ICME.2009.5202753,
+998244a44f90b3b569f9c93226df70239818ead9,,,,
+9958942a0b7832e0774708a832d8b7d1a5d287ae,https://engineering.purdue.edu/~bouman/publications/pdf/tip29.pdf,,https://doi.org/10.1109/TIP.2010.2071390,https://engineering.purdue.edu/~bouman/publications/orig-pdf/tip29.pdf
+995d55fdf5b6fe7fb630c93a424700d4bc566104,http://openaccess.thecvf.com/content_iccv_2015/papers/Nilsson_The_One_Triangle_ICCV_2015_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Nilsson_The_One_Triangle_ICCV_2015_paper.pdf
+99726ad232cef837f37914b63de70d8c5101f4e2,http://pdfs.semanticscholar.org/9972/6ad232cef837f37914b63de70d8c5101f4e2.pdf,,,https://www.ijser.org/researchpaper/Facial-Expression-Recognition-Using-PCA-Distance-Classifier.pdf
+99a1180c3d39532efecfc5fa251d6893375c91a1,,,https://doi.org/10.1109/ICARCV.2012.6485394,
+9931c6b050e723f5b2a189dd38c81322ac0511de,http://pdfs.semanticscholar.org/9931/c6b050e723f5b2a189dd38c81322ac0511de.pdf,,https://doi.org/10.1016/j.cviu.2015.10.010,http://arxiv.org/pdf/1511.05788v2.pdf
+994b52bf884c71a28b4f5be4eda6baaacad1beee,http://www.yugangjiang.info/publication/BIGMM15-summit-invited.pdf,,,http://doi.ieeecomputersociety.org/10.1109/BigMM.2015.17
+9963c73b03e4649959f021ef6f4fb1eac0b617d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/Person%20Re-identification%20Using%20Multiple%20Egocentric%20Views.pdf,,https://doi.org/10.1109/TCSVT.2016.2615445,
+99e0c03686f7bc9d7add6cff39a941a047c3600a,,,https://doi.org/10.1109/ACCESS.2017.2712788,
+99001ac9fdaf7649c0d0bd8d2078719bafd216d9,http://people.ee.duke.edu/~lcarin/TPAMI_2007_General_tensor_analysis.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1096
+99b8a24aacaa53fa3f8a7e48734037c7b16f1c40,,,https://doi.org/10.1109/ACCESS.2017.2752176,
+9993f1a7cfb5b0078f339b9a6bfa341da76a3168,http://pdfs.semanticscholar.org/9993/f1a7cfb5b0078f339b9a6bfa341da76a3168.pdf,,,http://arxiv.org/abs/1609.09058
+998542e5e3882bb0ce563d390b1e1bff5460e80c,,,https://doi.org/10.1109/AFGR.2008.4813471,
+992ebd81eb448d1eef846bfc416fc929beb7d28b,http://pdfs.semanticscholar.org/992e/bd81eb448d1eef846bfc416fc929beb7d28b.pdf,,,http://pages.cs.wisc.edu/~lizhang/projects/face-parsing/SmithCVPR2013_supplementary.pdf
+992e4119d885f866cb715f4fbf0250449ce0db05,,,https://doi.org/10.1007/s00138-015-0674-1,
+9990e0b05f34b586ffccdc89de2f8b0e5d427067,http://pdfs.semanticscholar.org/9990/e0b05f34b586ffccdc89de2f8b0e5d427067.pdf,,,http://www.ijmo.org/papers/247-T091.pdf
+9989eda2f5392cfe1f789bb0f6213a46d92d1302,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477584
+997c7ebf467c579b55859315c5a7f15c1df43432,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.141
+993374c1c9d58a3dec28160188ff6ac1227d02f5,,,https://doi.org/10.1109/ICARCV.2016.7838650,
+99cd84a62edb2bda2fc2fdc362a72413941f6aa4,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.109
+52012b4ecb78f6b4b9ea496be98bcfe0944353cd,http://pdfs.semanticscholar.org/5201/2b4ecb78f6b4b9ea496be98bcfe0944353cd.pdf,,,https://scienceq.org/Uploaded/Editorial/631997492.pdf
+5278b7a6f1178bf5f90cd3388908925edff5ad46,,,https://doi.org/10.1007/s11704-015-4291-y,
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,http://pdfs.semanticscholar.org/521c/fbc1949289a7ffc3ff90af7c55adeb43db2a.pdf,,,https://arxiv.org/pdf/1711.07430v1.pdf
+529e2ce6fb362bfce02d6d9a9e5de635bde81191,http://image.sciencenet.cn/olddata/kexue.com.cn/upload/blog/file/2011/1/20111721232398113.pdf,,https://doi.org/10.1109/TIP.2010.2097270,http://www.researchgate.net/profile/Xiaohua_Xie/publication/224203021_Normalization_of_Face_Illumination_Based_on_Large-and_Small-Scale_Features/links/0c9605204502666709000000.pdf
+52887969107956d59e1218abb84a1f834a314578,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/chen13travel.pdf,,https://doi.org/10.1109/TMM.2013.2265077,
+520782f07474616879f94aae0d9d1fff48910254,,,https://doi.org/10.1016/j.neucom.2014.11.038,
+52d4952426f40394af1db43f429e0b2a2e326197,,,,
+521482c2089c62a59996425603d8264832998403,http://pdfs.semanticscholar.org/5214/82c2089c62a59996425603d8264832998403.pdf,,https://doi.org/10.1016/j.cviu.2015.06.006,http://www.cs.binghamton.edu/~scanavan/papers/CVIU_2015.pdf
+5217ab9b723158b3ba2235e807d165e72fd33007,,,,http://doi.acm.org/10.1145/2043674.2043710
+524c25217a6f1ed17f47871e947a5581d775fa56,,,https://doi.org/10.1117/12.2030875,
+52e2dab86eb1444750b5dc45885288216741220b,,,,
+521b625eebea73b5deb171a350e3709a4910eebf,https://arxiv.org/pdf/1604.06397v1.pdf,,,http://www3.cs.stonybrook.edu/~minhhoai/papers/GAC_CVPR16_final.pdf
+52258ec5ec73ce30ca8bc215539c017d279517cf,http://pdfs.semanticscholar.org/5225/8ec5ec73ce30ca8bc215539c017d279517cf.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2002.1044632
+5253c94f955146ba7d3566196e49fe2edea1c8f4,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Kemelmacher-Shlizerman_Internet_Based_Morphable_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.404
+527dda77a3864d88b35e017d542cb612f275a4ec,https://arxiv.org/pdf/1709.00531v1.pdf,,https://doi.org/10.1109/BTAS.2017.8272734,http://arxiv.org/abs/1709.00531
+529b1f33aed49dbe025a99ac1d211c777ad881ec,https://teresaproject.eu/wp-content/uploads/2015/07/kossaifi_bidirectional_icip.pdf,,https://doi.org/10.1109/ICIP.2015.7350977,https://ibug.doc.ic.ac.uk/media/uploads/documents/kossaifi_bidirectional_icip.pdf
+523b2cbc48decfabffb66ecaeced4fe6a6f2ac78,https://arxiv.org/pdf/1708.09126v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273626
+5287d8fef49b80b8d500583c07e935c7f9798933,http://pdfs.semanticscholar.org/8e65/13b642dcd5dc0fb60173dd0da1d8440eba8d.pdf,,,http://www-personal.umich.edu/~reedscot/files/icml2016.pdf
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,http://blogs.bu.edu/joewang/files/2013/06/allerton_2011_v2.pdf,,https://doi.org/10.1109/Allerton.2011.6120242,
+52c71d20dced998a607c466241dfc2eb88183de8,,,,
+52e270ca8f5b53eabfe00a21850a17b5cc10f6d5,,,https://doi.org/10.1109/ROBIO.2013.6739643,
+5226296884b3e151ce317a37f94827dbda0b9d16,,,https://doi.org/10.1109/IWBF.2016.7449690,
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,http://pdfs.semanticscholar.org/52bf/00df3b970e017e4e2f8079202460f1c0e1bd.pdf,,,http://arxiv.org/pdf/1511.06988v1.pdf
+5213549200bccec57232fc3ff788ddf1043af7b3,,,,http://doi.acm.org/10.1145/2601097.2601204
+52c91fcf996af72d191520d659af44e310f86ef9,http://pdfs.semanticscholar.org/52c9/1fcf996af72d191520d659af44e310f86ef9.pdf,,,http://ttic.uchicago.edu/~smaji/cvhc2014/kovashkainteractive2014.pdf
+52a9f957f776c8b3d913cfcd20452b9e31c27845,http://pdfs.semanticscholar.org/52a9/f957f776c8b3d913cfcd20452b9e31c27845.pdf,,https://doi.org/10.1016/j.patcog.2017.03.016,https://arxiv.org/pdf/1609.09178v1.pdf
+526c79c6ce39882310b814b7918449d48662e2a9,,,https://doi.org/10.1109/ICASSP.2005.1416338,
+52885fa403efbab5ef21274282edd98b9ca70cbf,http://www.aiia.csd.auth.gr/EN/cor_baayen/Discriminant_Graph_Structures_FER.pdf,,https://doi.org/10.1109/TMM.2008.2007292,http://ikee.lib.auth.gr/record/115043/files/Zafeiriou.pdf
+524f6dc7441a3899ea8eb5d3e0d5d70e50ba566a,,,,http://doi.acm.org/10.1145/2797143.2797165
+52f23e1a386c87b0dab8bfdf9694c781cd0a3984,http://pdfs.semanticscholar.org/52f2/3e1a386c87b0dab8bfdf9694c781cd0a3984.pdf,,https://doi.org/10.1016/j.neucom.2015.04.006,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2015/2_A_Iosifidis_NEUCOM_DropELM.pdf
+528069963f0bd0861f380f53270c96c269a3ea1c,http://pdfs.semanticscholar.org/5280/69963f0bd0861f380f53270c96c269a3ea1c.pdf,,,http://orca.cf.ac.uk/82405/1/2015vandeventerjphd.pdf
+5239001571bc64de3e61be0be8985860f08d7e7e,http://pdfs.semanticscholar.org/5239/001571bc64de3e61be0be8985860f08d7e7e.pdf,,,https://arxiv.org/pdf/1607.06871v2.pdf
+52b102620fff029b80b3193bec147fe6afd6f42e,,,,http://dl.acm.org/citation.cfm?id=3028863
+556b9aaf1bc15c928718bc46322d70c691111158,https://www.ecse.rpi.edu/~cvrl/lwh/myPublications/ICPR08_BNlearning_camera.pdf,,https://doi.org/10.1109/ICPR.2008.4761074,http://figment.cse.usf.edu/~sfefilat/data/papers/MoBT9.33.pdf
+550858b7f5efaca2ebed8f3969cb89017bdb739f,http://pdfs.semanticscholar.org/5508/58b7f5efaca2ebed8f3969cb89017bdb739f.pdf,,https://doi.org/10.1109/ICRA.2011.5980310,http://www.researchgate.net/profile/Gn_Desouza/publication/224252716_Wii_Using_Only_We_Using_background_subtraction_and_human_pose_recognition_to_eliminate_game_controllers/links/02e7e53c6d2dd70b9a000000.pdf
+5551a03353f571b552125dd4ee57301b69a10c46,,,https://doi.org/10.1016/j.neucom.2015.09.083,
+55c46ae1154ed310610bdf5f6d9e7023d14c7eb4,,,,http://doi.acm.org/10.1145/1027933.1028013
+554b9478fd285f2317214396e0ccd81309963efd,http://pdfs.semanticscholar.org/554b/9478fd285f2317214396e0ccd81309963efd.pdf,,,http://www-l2ti.univ-paris13.fr/~beghdadi/wp-content/uploads/2012/09/SPIE_2015_MMBS.pdf
+55cc90968e5e6ed413dd607af2a850ac2f54e378,http://pdfs.semanticscholar.org/55cc/90968e5e6ed413dd607af2a850ac2f54e378.pdf,,https://doi.org/10.1016/j.cviu.2014.03.008,http://www.umiacs.umd.edu/~arijit/subclustering.pdf
+55ee484f9cbd62111512485e3c1c3eadbf2e15c0,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.25
+559795d3f3b096ceddc03720ba62d79d50eae300,http://www3.nd.edu/~kwb/BarrBowyerFlynnTIFS_2014.pdf,,https://doi.org/10.1109/TIFS.2014.2359369,https://www3.nd.edu/~kwb/Barr_Bowyer_Flynn_TIFS_2014.pdf
+559645d2447004355c83737a19c9a811b45780f1,,,https://doi.org/10.1109/ICB.2015.7139114,
+558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f,http://pdfs.semanticscholar.org/558f/c9a2bce3d3993a9c1f41b6c7f290cefcf92f.pdf,,,http://eprints-phd.biblio.unitn.it/2669/1/Duta_PhD-Thesis.pdf
+55138c2b127ebdcc508503112bf1d1eeb5395604,http://pdfs.semanticscholar.org/7815/368a8f6474910d3faf798198ff9dae836360.pdf,,,http://www.eecs.berkeley.edu/~ameet/ens.pdf
+5502dfe47ac26e60e0fb25fc0f810cae6f5173c0,http://pdfs.semanticscholar.org/5502/dfe47ac26e60e0fb25fc0f810cae6f5173c0.pdf,,,http://www.cc.gatech.edu/~thermans/papers/hermans-icra-spme2011.pdf
+55e18e0dde592258882134d2dceeb86122b366ab,http://pdfs.semanticscholar.org/f863/ba982068d676084032146e8053d4791114e9.pdf,,https://doi.org/10.1613/jair.2962,http://arxiv.org/pdf/1405.7711v1.pdf
+550351edcfd59d3666984771f5248d95548f465a,,,https://doi.org/10.1109/TIP.2014.2327805,
+556545eec370b9d300fc044a1aa63fc44fd79b0f,http://www.cs.cmu.edu/~dhoiem/publications/cvpr2010_gangwang.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539955
+55a158f4e7c38fe281d06ae45eb456e05516af50,http://pdfs.semanticscholar.org/55a1/58f4e7c38fe281d06ae45eb456e05516af50.pdf,,,http://www.graphicon.ru/proceedings/2012/conference/RU1%20-%20Biometry/gc2012konushin.pdf
+5594beb2b314f5433bd7581f64bdbc58f2933dc4,,,https://doi.org/10.1016/j.neucom.2016.12.013,
+5506a1a1e1255353fde05d9188cb2adc20553af5,http://pdfs.semanticscholar.org/ff69/cb49c8cb86d0afadbcfa0baa607d7065965a.pdf,,,https://arxiv.org/pdf/1611.00284v1.pdf
+55966926e7c28b1eee1c7eb7a0b11b10605a1af0,http://pdfs.semanticscholar.org/baa8/bdeb5aa545af5b5f43efaf9dda08490da0bc.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/download/9845/9816
+552c55c71bccfc6de7ce1343a1cd12208e9a63b3,https://ivi.fnwi.uva.nl/isis/publications/2008/ValentiCVPR2008/ValentiCVPR2008.pdf,,,http://staff.science.uva.nl/~rvalenti/publications/CVPR08.pdf
+55fdff2881d43050a8c51c7fdc094dbfbbe6fa46,,,https://doi.org/10.1109/ICB.2016.7550064,
+5517b28795d7a68777c9f3b2b46845dcdb425b2c,http://pdfs.semanticscholar.org/5517/b28795d7a68777c9f3b2b46845dcdb425b2c.pdf,,,http://arxiv.org/abs/1603.06531
+55c81f15c89dc8f6eedab124ba4ccab18cf38327,http://pdfs.semanticscholar.org/d31e/258f6af40f457c27ce118986ea157673c9c4.pdf,,https://doi.org/10.5244/C.20.37,http://www.bmva.ac.uk/bmvc/2006/papers/157.pdf
+55bc7abcef8266d76667896bbc652d081d00f797,http://www.cse.msu.edu/~rossarun/pubs/ChenCosmeticsGenderAge_VISAPP2014.pdf,,https://doi.org/10.5220/0004746001820190,
+55b4b1168c734eeb42882082bd131206dbfedd5b,http://pdfs.semanticscholar.org/76fd/f16bcc2cb260b9e6b2880c8fe128533bc2c6.pdf,,,http://papers.nips.cc/paper/4769-learning-to-align-from-scratch
+55079a93b7d1eb789193d7fcdcf614e6829fad0f,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w10/papers/Conde_Efficient_and_Robust_ICCV_2015_paper.pdf,,,http://mi.informatik.uni-siegen.de/publications/Conde_Efficient_and_Robust_ICCV_2015_paper.pdf
+553a605243b77a76c1ed4c1ad4f9a43ff45e391b,,,https://doi.org/10.1109/CISP-BMEI.2017.8302001,
+55804f85613b8584d5002a5b0ddfe86b0d0e3325,http://pdfs.semanticscholar.org/ba13/b161aa8e6f6cb511592016058882d976a898.pdf,,,http://authors.library.caltech.edu/27081/1/dcomplex.pdf
+557115454c1b8e6eaf8dbb65122c5b00dc713d51,,,https://doi.org/10.1109/LSP.2011.2140370,
+551fa37e8d6d03b89d195a5c00c74cc52ff1c67a,http://pdfs.semanticscholar.org/551f/a37e8d6d03b89d195a5c00c74cc52ff1c67a.pdf,,https://doi.org/10.1007/978-3-319-48881-3_58,http://arxiv.org/pdf/1609.05281v1.pdf
+55266ddbe9d5366e8cd1b0b645971cad6d12157a,,,https://doi.org/10.1109/SIU.2017.7960368,
+556875fb04ed6043620d7ca04dfe3d8b3a9284f5,,,https://doi.org/10.1109/ICPR.2014.437,
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,http://pdfs.semanticscholar.org/55eb/7ec9b9740f6c69d6e62062a24bfa091bbb0c.pdf,,https://doi.org/10.1007/978-3-319-39513-5_5,http://sujingwang.name/publication/Casme2.pdf
+5599ac2cd569ed83ecab8449d2f245e13034da06,,,,
+55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96,http://pdfs.semanticscholar.org/55b9/b1c1c5487f5f62b44340104a9c4cc2ed7c96.pdf,,,http://arxiv.org/abs/1609.06657
+55fd4639c2126de5ad69d23b8a6e670a05911b9d,,,,
+9745a7f38c9bba9d2fd076813fc9ab7a128a3e19,,,,http://doi.acm.org/10.1145/2393347.2396335
+97f3d35d3567cd3d973c4c435cdd6832461b7c3c,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.75
+979fd81d135078886808839391adf1249c354cca,,,,
+97c554fbcf783d554c4f6c2f3fcc0a0f9dba0759,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2016.0085
+973e3d9bc0879210c9fad145a902afca07370b86,http://pdfs.semanticscholar.org/973e/3d9bc0879210c9fad145a902afca07370b86.pdf,,,http://thesai.org/Downloads/Volume7No7/Paper_72-From_Emotion_Recognition_to_Website.pdf
+9776a9f3c59907f45baaeda4b8907dcdac98aef1,,,https://doi.org/10.1109/CISP-BMEI.2017.8301924,
+970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3,http://pdfs.semanticscholar.org/970c/0d6c0fd2ebe7c5921a45bc70f6345c844ff3.pdf,,,https://www.ijcai.org/Proceedings/16/Papers/475.pdf
+97b8249914e6b4f8757d22da51e8347995a40637,http://rogerioferis.com/VisualRecognitionAndSearch2014/material/papers/FerisTransMultimedia2012.pdf,,https://doi.org/10.1109/TMM.2011.2170666,http://rogerioferis.com/publications/FerisTransMultimedia2012.pdf
+972ef9ddd9059079bdec17abc8b33039ed25c99c,http://pdfs.semanticscholar.org/972e/f9ddd9059079bdec17abc8b33039ed25c99c.pdf,,,http://ijiet.com/wp-content/uploads/2014/12/36.pdf
+97c59db934ff85c60c460a4591106682b5ab9caa,,,https://doi.org/10.1109/BTAS.2012.6374568,
+97032b13f1371c8a813802ade7558e816d25c73f,http://pdfs.semanticscholar.org/9703/2b13f1371c8a813802ade7558e816d25c73f.pdf,,,http://www.doc.ic.ac.uk/~khilan/index_files/fr-report.pdf
+978a219e07daa046244821b341631c41f91daccd,http://pdfs.semanticscholar.org/e2b9/f8b66d3f9080ccb14f058cf4798cb4d89241.pdf,,https://doi.org/10.1007/978-3-540-78293-3_5,http://www.cs.bham.ac.uk/~cpc/publications/creed_handbook_emotion_08.pdf
+976e0264bb57786952a987d4456850e274714fb8,https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Dehghan_Improving_Semantic_Concept_2014_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Dehghan_Improving_Semantic_Concept_2014_CVPR_paper.pdf
+978b32ff990d636f7e2050bb05b8df7dfcbb42a1,,,https://doi.org/10.1109/BTAS.2014.6996270,
+9758f3fd94239a8d974217fe12599f88fb413f3d,http://pdfs.semanticscholar.org/9758/f3fd94239a8d974217fe12599f88fb413f3d.pdf,,,http://crcv.ucf.edu/THUMOS14/papers/Univ%20of%20Canberra-HCC.pdf
+97f9c3bdb4668f3e140ded2da33fe704fc81f3ea,http://pdfs.semanticscholar.org/97f9/c3bdb4668f3e140ded2da33fe704fc81f3ea.pdf,,https://doi.org/10.1007/3-540-61750-7_32,ftp://ftp-robotvis.inria.fr/pub/html/Papers/mundy-liu-etal:96.ps.gz
+9729930ab0f9cbcd07f1105bc69c540330cda50a,,,https://doi.org/10.1109/ACCESS.2017.2749331,
+97946f13c1cf8924b0c1ce88682290ae87d630a1,,,,
+9790ec6042fb2665c7d9369bf28566b0ce75a936,,,,http://doi.acm.org/10.1145/3056540.3056546
+97e569159d5658760eb00ca9cb662e6882d2ab0e,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989c291.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.297
+97d1d561362a8b6beb0fdbee28f3862fb48f1380,http://pages.cs.wisc.edu/~gdguo/myPapersOnWeb/PAMI10Guo.pdf,,,http://www.cs.wisc.edu/~gdguo/myPapersOnWeb/PAMI10Guo.pdf
+97540905e4a9fdf425989a794f024776f28a3fa9,http://pdfs.semanticscholar.org/cc5a/1bf68ba00c20415e43684c6f75ce3fbc176c.pdf,,,http://arxiv.org/abs/1801.08297
+97865d31b5e771cf4162bc9eae7de6991ceb8bbf,http://pdfs.semanticscholar.org/9786/5d31b5e771cf4162bc9eae7de6991ceb8bbf.pdf,,,https://repository.iiitd.edu.in/jspui/bitstream/handle/123456789/357/MT13100.pdf;sequence=1
+973022a1f9e30a624f5e8f7158b5bbb114f4af32,,,,http://doi.acm.org/10.1145/3011077.3011138
+9774430006f1ed017156b17f3cf669071e398c58,,,https://doi.org/10.1109/SMC.2013.513,
+9797de286a3101fc31fb51995c18ec7d3eab804d,,,,
+976c9f88c23e892c75c452b450407841e5161a32,,,,
+9753ee59db115e1e84a7c045f2234a3f63f255b1,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344683
+9771e04f48d8a1d7ae262539de8924117a04c20d,,,,http://doi.ieeecomputersociety.org/10.1109/CGIV.2007.70
+9755554b13103df634f9b1ef50a147dd02eab02f,https://arxiv.org/pdf/1610.00134v1.pdf,,https://doi.org/10.1109/BIOSIG.2016.7736925,http://arxiv.org/abs/1610.00134
+635158d2da146e9de559d2742a2fa234e06b52db,http://www.openu.ac.il/home/hassner/projects/cnn_emotions/LeviHassnerICMI15.pdf,,,"http://www.docum-enter.com/get/d1y3RvllH36yjINsiopl5jjhARrn10ldjnlVrCiMGlM,/Emotion-Recognition-in-the-Wild-via-Convolutional-Neural.pdf"
+63a4105adbe182e67d8fd324de5c84a6df444294,,,,
+63d8110ac76f57b3ba8a5947bc6bdbb86f25a342,http://pdfs.semanticscholar.org/63d8/110ac76f57b3ba8a5947bc6bdbb86f25a342.pdf,,,http://web.cse.msu.edu/~liuxm/publication/FG2002_xiaoming1.pdf
+63f2d1a64737afa1608588b9651b1e4207e82d1c,http://staff.estem-uc.edu.au/roland/files/2009/05/Rajagopalan_Goecke_ICIP2014_DetectingSelf-StimulatoryBehavioursForAutismDiagnosis.pdf,,https://doi.org/10.1109/ICIP.2014.7025294,
+63c74794aedb40dd6b1650352a2da7a968180302,,,https://doi.org/10.1016/j.neucom.2016.09.015,
+63cf5fc2ee05eb9c6613043f585dba48c5561192,http://pdfs.semanticscholar.org/63cf/5fc2ee05eb9c6613043f585dba48c5561192.pdf,,,http://repository.tudelft.nl/assets/uuid:4a8f0412-fc16-4dc7-8f42-cb223c64de1b/thesis_final.pdf
+6339e9385ae3609cb22f6b87175c7e6850f2c05b,http://vision.ucmerced.edu/papers/Yang_WACV12_EstimatingTheSpatialExtent.pdf,,,http://vision.ucmerced.edu/projects/integrating/papers/Yang_WACV2012_EstimatingTheSpatialExtents.pdf
+637b31157386efbde61505365c0720545248fbae,,,https://doi.org/10.1109/BTAS.2017.8272721,
+6324fada2fb00bd55e7ff594cf1c41c918813030,http://pdfs.semanticscholar.org/6324/fada2fb00bd55e7ff594cf1c41c918813030.pdf,,,http://www.aaai.org/ocs/index.php/WS/AAAIW13/paper/download/7013/6753
+6308e9c991125ee6734baa3ec93c697211237df8,http://www.ifp.illinois.edu/~jyang29/papers/ICME-SSR.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2011.6012083
+6342a4c54835c1e14159495373ab18b4233d2d9b,http://pdfs.semanticscholar.org/6342/a4c54835c1e14159495373ab18b4233d2d9b.pdf,,,http://eprints.qut.edu.au/77836/1/Moh%20Edi_Wibowo_Thesis.pdf
+63d8d69e90e79806a062cb8654ad78327c8957bb,http://pdfs.semanticscholar.org/63d8/d69e90e79806a062cb8654ad78327c8957bb.pdf,,https://doi.org/10.1016/j.procs.2010.11.003,http://eprints.uwe.ac.uk/17653/1/icebt10_repo.pdf
+63c109946ffd401ee1195ed28f2fb87c2159e63d,http://pdfs.semanticscholar.org/63c1/09946ffd401ee1195ed28f2fb87c2159e63d.pdf,,,https://pdfs.semanticscholar.org/63c1/09946ffd401ee1195ed28f2fb87c2159e63d.pdf
+63b29886577a37032c7e32d8899a6f69b11a90de,http://pdfs.semanticscholar.org/63b2/9886577a37032c7e32d8899a6f69b11a90de.pdf,,https://doi.org/10.1007/978-3-642-12307-8_30,http://www.cvlab.cs.tsukuba.ac.jp/~kfukui/english/epapers/MO8-3-594.pdf
+638e0d6f9f5d714d8a0edcf65297e8735b30db71,,,,
+6345c0062885b82ccb760c738a9ab7fdce8cd577,,,https://doi.org/10.1109/EMBC.2016.7590729,
+63a6c256ec2cf2e0e0c9a43a085f5bc94af84265,http://www.cs.tau.ac.il/~wolf/papers/complexity-multiverse-networks.pdf,,https://doi.org/10.1109/ICPR.2016.7899662,
+63213d080a43660ac59ea12e3c35e6953f6d7ce8,https://arxiv.org/pdf/1704.02895v1.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Girdhar_ActionVLAD_Learning_Spatio-Temporal_CVPR_2017_paper.pdf
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,http://pdfs.semanticscholar.org/aa67/719e839d035e4d67e4434794b6cccaf091d6.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Ding_Facial_Action_Unit_2013_ICCV_paper.pdf
+63eefc775bcd8ccad343433fc7a1dd8e1e5ee796,http://www.lv-nus.org/papers%5C2008%5C2008_J_6.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.154
+635d2696aa597a278dd6563f079be06aa76a33c0,,,https://doi.org/10.1109/ICIP.2016.7532429,
+636c786d4e4ac530ac85e3883a2f2cf469e45fe2,,,https://doi.org/10.1016/j.neucom.2016.12.043,
+63340c00896d76f4b728dbef85674d7ea8d5ab26,https://www.comp.nus.edu.sg/~tsim/documents/fkt-dsa-pami-published.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1089
+6343bc0013343b6a5f96154f02d18dcd36a3f74c,,,https://doi.org/10.1007/s11042-014-2083-2,
+63ce37da6c0c789099307337bb913e1104473854,http://pdfs.semanticscholar.org/63ce/37da6c0c789099307337bb913e1104473854.pdf,,,http://web.cse.msu.edu/~liuxm/publication/Chen_Liu_PRL_OneClassTransfer.pdf
+63a2e2155193dc2da9764ae7380cdbd044ff2b94,http://pdfs.semanticscholar.org/a8fb/2c65a23d1e75c4923c36fdd6e3d2a4b3d8f7.pdf,,https://doi.org/10.1007/978-3-319-04114-8_32,http://img.cs.uec.ac.jp/pub/conf13/140108dohang_0_ppt.pdf
+63367972e1ada96dd47211d86ddee83f65ca1880,,,,
+63d865c66faaba68018defee0daf201db8ca79ed,http://pdfs.semanticscholar.org/63d8/65c66faaba68018defee0daf201db8ca79ed.pdf,,,http://arxiv.org/abs/1409.5230
+63cff99eff0c38b633c8a3a2fec8269869f81850,http://pdfs.semanticscholar.org/63cf/f99eff0c38b633c8a3a2fec8269869f81850.pdf,,https://doi.org/10.1007/978-3-540-74549-5_9,http://www.ics.uci.edu/~xzhu/paper/FCF-ICB07.pdf
+634541661d976c4b82d590ef6d1f3457d2857b19,http://pdfs.semanticscholar.org/6345/41661d976c4b82d590ef6d1f3457d2857b19.pdf,,,http://amsdottorato.unibo.it/6355/1/sun_yunlian_tesi.pdf
+6332a99e1680db72ae1145d65fa0cccb37256828,http://pdfs.semanticscholar.org/6332/a99e1680db72ae1145d65fa0cccb37256828.pdf,,,http://www.maia.ub.es/~sergio/linked/tonitesis.pdf
+63488398f397b55552f484409b86d812dacde99a,http://pdfs.semanticscholar.org/6348/8398f397b55552f484409b86d812dacde99a.pdf,,,http://www.comp.nus.edu.sg/~tsim/documents/age-iccv.pdf
+6341274aca0c2977c3e1575378f4f2126aa9b050,http://arxiv.org/pdf/1609.03536v1.pdf,,https://doi.org/10.1109/ICPR.2016.7899705,https://arxiv.org/pdf/1609.03536v1.pdf
+63c022198cf9f084fe4a94aa6b240687f21d8b41,http://pdfs.semanticscholar.org/63c0/22198cf9f084fe4a94aa6b240687f21d8b41.pdf,,,http://research.microsoft.com/en-us/um/people/pkohli/papers/jetkw_aistats2015.pdf
+632441c9324cd29489cee3da773a9064a46ae26b,http://pdfs.semanticscholar.org/6324/41c9324cd29489cee3da773a9064a46ae26b.pdf,,,https://open.library.ubc.ca/media/download/pdf/24/1.0166350/1/1695
+0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab,http://arxiv.org/pdf/1401.5311v1.pdf,,,http://arxiv.org/pdf/1401.5311v2.pdf
+0fc5c6f06e40014a56f492172f44c073d269e95c,,,https://doi.org/10.1108/17563781311301490,
+0f9dd79de75a3dce394846369f09c05ddf250e31,,,,
+0f112e49240f67a2bd5aaf46f74a924129f03912,http://www.cse.msu.edu/biometrics/Publications/Face/ParkTongJain_AgeInvariantFaceRecognition_PAMI10.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.14
+0fb45e704ef3ca1f9c70e7be3fb93b53714ed8b5,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.142
+0fc254272db096a9305c760164520ad9914f4c9e,https://arxiv.org/pdf/1601.06087v1.pdf,,https://doi.org/10.1109/ICIP.2016.7532634,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/18791/Ahmadi%20Unsupervised%20convolutional%20neural%202016%20Accepted.pdf?sequence=1
+0fae5d9d2764a8d6ea691b9835d497dd680bbccd,http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf,,,http://www.ncc.org.in/download.php?f=NCC2007/1.2.4.pdf
+0f4cfcaca8d61b1f895aa8c508d34ad89456948e,http://signal.ee.bilkent.edu.tr/defevent/abstract/a2051.pdf,http://ieeexplore.ieee.org/document/7078507/,,http://isl.ira.uka.de/~stiefel/papers/EUSIPCO05_ekenel.pdf
+0fdcfb4197136ced766d538b9f505729a15f0daf,https://arxiv.org/pdf/0907.5321v2.pdf,,,http://arxiv.org/pdf/0907.5321v1.pdf
+0fad544edfc2cd2a127436a2126bab7ad31ec333,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=7D609FEFFC36336C4A45ECA3B56C336A?doi=10.1.1.476.9590&rep=rep1&type=pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.211
+0f32df6ae76402b98b0823339bd115d33d3ec0a0,http://perceptual.mpi-inf.mpg.de/files/2015/07/Mueller15_ACII.pdf,,,http://datasets.d2.mpi-inf.mpg.de/MPIIEmo/supp_mat.pdf
+0fee3b9191dc1cef21f54232a23530cd8169d3b2,,,https://doi.org/10.1109/ICDM.2016.0050,
+0fd1715da386d454b3d6571cf6d06477479f54fc,http://pdfs.semanticscholar.org/0fd1/715da386d454b3d6571cf6d06477479f54fc.pdf,,https://doi.org/10.1007/s10846-015-0259-2,http://www.cs.columbia.edu/~allen/S17/Student_Papers/emotion_survey.pdf
+0f9bf5d8f9087fcba419379600b86ae9e9940013,http://pdfs.semanticscholar.org/0f9b/f5d8f9087fcba419379600b86ae9e9940013.pdf,,https://doi.org/10.1016/j.neucom.2016.02.011,http://www.ee.cuhk.edu.hk/~knngan/2016/NC_v194_p10-23.pdf
+0f829fee12e86f980a581480a9e0cefccb59e2c5,http://www.cs.columbia.edu/~liujx09/posters/birdpart_poster.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.313
+0faee699eccb2da6cf4307ded67ba8434368257b,http://pdfs.semanticscholar.org/2396/5bd9b557b04b2c81a35ee5c16951c0e420f3.pdf,,,http://www.openu.ac.il/home/hassner/projects/multishot/TWH_BMVC09_Multishot.pdf
+0fabb4a40f2e3a2502cd935e54e090a304006c1c,http://arxiv.org/pdf/1202.4207v2.pdf,,https://doi.org/10.1109/TIP.2012.2235849,http://www.matlabi.ir/wp-content/uploads/bank_papers/ipaper/i23_www.Matlabi.ir_Regularized%20Robust%20Coding%20for%20Face%20Recognition.pdf
+0f92e9121e9c0addc35eedbbd25d0a1faf3ab529,http://pdfs.semanticscholar.org/0f92/e9121e9c0addc35eedbbd25d0a1faf3ab529.pdf,,,http://libres.uncg.edu/ir/uncw/f/wangy2017-2.pdf
+0f2461a265be997c962fa562ae48378fb964b7b4,,,https://doi.org/10.1109/BigData.2016.7841028,
+0f0366070b46972fcb2976775b45681e62a94a26,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W01/papers/Bendale_Reliable_Posterior_Probability_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2014.14
+0ff23392e1cb62a600d10bb462d7a1f171f579d0,http://www.umiacs.umd.edu/~jhchoi/paper/icpr2014_slide.pdf,,https://doi.org/10.1109/ICPR.2014.757,http://www.umiacs.umd.edu/~jhchoi/paper/icpr2014_cossparse.pdf
+0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd,http://media.cs.tsinghua.edu.cn/~ahz/papers/%5B2011%5D%5Bacpr%5Dwang%20nan.pdf,,https://doi.org/10.1109/ACPR.2011.6166682,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2011%5D%5Bacpr%5Dwang%20nan.pdf
+0faf441a1ef1e788fb9ccd20484b104a1fa95ee8,,,,
+0f533bc9fdfb75a3680d71c84f906bbd59ee48f1,http://www.iis.sinica.edu.tw/papers/song/11837-F.pdf,,,https://www.iis.sinica.edu.tw/papers/song/11837-F.pdf
+0f53ab8b6c428127753281dd77cf94bdb889b624,https://www.researchgate.net/profile/Dian_Tjondronegoro/publication/224257559_Toward_a_more_robust_facial_expression_recognition_in_occluded_images_using_randomly_sampled_Gabor_based_templates/links/00b7d51f84babec8ad000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2011.6012015
+0f4eb63402a4f3bae8f396e12133684fb760def1,http://pdfs.semanticscholar.org/8c4e/b15de264af9f92a93d6e89d36295c5c4bf37.pdf,,,http://www.bmva.org/bmvc/2016/papers/paper040/paper040.pdf
+0fba39bf12486c7684fd3d51322e3f0577d3e4e8,http://vision.ucsd.edu/~pdollar/files/papers/BabenkoICCV07boom.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/boom_iccv07.pdf
+0f395a49ff6cbc7e796656040dbf446a40e300aa,http://pdfs.semanticscholar.org/0f39/5a49ff6cbc7e796656040dbf446a40e300aa.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/e1/17/fpsyg-06-01937.PMC4686644.pdf
+0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277,http://pdfs.semanticscholar.org/0fb8/317a8bf5feaf297af8e9b94c50c5ed0e8277.pdf,,https://doi.org/10.1007/978-3-319-74727-9_39,https://arxiv.org/pdf/1709.02780v1.pdf
+0f22b89341d162a7a0ebaa3c622d9731e5551064,,,,http://doi.ieeecomputersociety.org/10.1109/AIPR.2011.6176352
+0f8116b631c17f7adf55df3faafc6f2c316599f6,,,,
+0fdc3cbf92027cb1200f3f94927bef017d7325ae,,,https://doi.org/10.1109/BTAS.2015.7358771,
+0f29bc5d8458358d74dc8c4fd6968b4182dd71d2,,,https://doi.org/10.1109/ICIP.2016.7532637,
+0f811d717c459c897a4fbffb3ccd9ac794be0b8f,,,,
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,http://pdfs.semanticscholar.org/51be/ffe5f96ccb6b64057a540a7874185ccad8d7.pdf,,,https://arxiv.org/pdf/1506.03140v2.pdf
+0f1cb558b32c516e2b6919fea0f97a307aaa9091,,,https://doi.org/10.1007/s41095-017-0091-7,
+0f1d42e1296474c9211fb57604574ba0cae4380d,,,,
+0f1cbe4e26d584c82008ccef9fb1e4669b82de1f,http://figment.cse.usf.edu/~sfefilat/data/papers/MoBT9.24.pdf,,https://doi.org/10.1109/ICPR.2008.4761064,
+0f940d2cdfefc78c92ec6e533a6098985f47a377,https://www.ecse.rpi.edu/~cvrl/chenj/Expression_v6_submit.pdf,,https://doi.org/10.1109/FG.2011.5771330,http://www.ecse.rpi.edu/~cvrl/chenj/Expression_v6_submit.pdf
+0fcf04fda0bea5265b73c85d2cc2f7f70416537b,,,https://doi.org/10.1109/TCSVT.2015.2409012,
+0f64e26d6dd6f1c99fe2050887fac26cafe9ed60,,,https://doi.org/10.1109/MCI.2016.2627668,
+0faeec0d1c51623a511adb779dabb1e721a6309b,http://pdfs.semanticscholar.org/a075/782ea38167658fe28986755adddba7369b4f.pdf,,https://doi.org/10.1007/978-3-319-10602-1_40,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8693/86930612.pdf
+0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,https://arxiv.org/pdf/1501.05152v1.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_026_ext.pdf
+0f0241124d6092a0bb56259ac091467c2c6938ca,http://mm.cs.uec.ac.jp/kitaha-a/research/maw2008.pdf?origin=publication_detail,,,http://doi.ieeecomputersociety.org/10.1109/WAINA.2008.97
+0a6d344112b5af7d1abbd712f83c0d70105211d0,http://www.cl.cam.ac.uk/~tb346/pub/papers/iccv2013.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Baltrusaitis_Constrained_Local_Neural_2013_ICCV_paper.pdf
+0a15b8c7d529c7facc2d3b4c2111801dd4adfc28,,,,
+0a64f4fec592662316764283575d05913eb2135b,http://pdfs.semanticscholar.org/0a64/f4fec592662316764283575d05913eb2135b.pdf,,,https://arxiv.org/pdf/1803.00068v1.pdf
+0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf,,,http://discovery.ucl.ac.uk/1306170/1/1306170.pdf
+0a5ffc55b584da7918c2650f9d8602675d256023,http://pdfs.semanticscholar.org/0a5f/fc55b584da7918c2650f9d8602675d256023.pdf,,,https://arxiv.org/pdf/1507.07073v2.pdf
+0a297523188b03fdf9d2155bfdcca7e1bcab3762,,,,
+0aeb5020003e0c89219031b51bd30ff1bceea363,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTarxiv15.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Sun_Sparsifying_Neural_Network_CVPR_2016_paper.pdf
+0a511058edae582e8327e8b9d469588c25152dc6,http://pdfs.semanticscholar.org/0a51/1058edae582e8327e8b9d469588c25152dc6.pdf,,,http://research.microsoft.com/en-us/um/people/horvitz/memory_constrained_face_recognition.pdf
+0a4f3a423a37588fde9a2db71f114b293fc09c50,http://pdfs.semanticscholar.org/0a4f/3a423a37588fde9a2db71f114b293fc09c50.pdf,,https://doi.org/10.1016/j.cviu.2014.04.006,http://porto.polito.it/2541099/1/Analyzing_human_beauty_R2_2.0.pdf
+0aa74ad36064906e165ac4b79dec298911a7a4db,http://pdfs.semanticscholar.org/7645/11b63b0eeba9f3dfe1e5ec9ff261cdc59d25.pdf,,,http://finale.seas.harvard.edu/files/finale/files/2009_variational_inference_for_the_indian_buffet_process.pdf?m=1455122778
+0abf67e7bd470d9eb656ea2508beae13ca173198,http://www.cs.cmu.edu/~kkitani/pdf/MFK-CVPR16.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Ma_Going_Deeper_into_CVPR_2016_paper.pdf
+0af33f6b5fcbc5e718f24591b030250c6eec027a,http://pdfs.semanticscholar.org/fa2c/96273027ff92f98109dbcef5b65f34b36627.pdf,,,https://www2.cs.kuleuven.be/cwis/research/liir/publication_files/BNAIC-abstract-2007.pdf
+0a3863a0915256082aee613ba6dab6ede962cdcd,http://pdfs.semanticscholar.org/0a38/63a0915256082aee613ba6dab6ede962cdcd.pdf,,,http://jmlr.csail.mit.edu/proceedings/papers/v48/sangnier16.pdf
+0a4a8768c1ed419baebe1c420bd9051760875cbe,,,https://doi.org/10.1109/EUSIPCO.2016.7760451,
+0a85bdff552615643dd74646ac881862a7c7072d,https://fbcdn-dragon-a.akamaihd.net/hphotos-ak-xpa1/t39.2365-6/10000000_1672336992989417_1391274031_n/Beyond_Frontal_Faces_Improving_Person_Recognition_Using_Multiple_Cues.pdf,,https://doi.org/10.1109/CVPR.2015.7299113,https://people.eecs.berkeley.edu/~nzhang/papers/piper_camera_ready.pdf
+0a5b2e642683ff20b6f0cee16a32a68ba0099908,,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2012.6239342
+0a325d70cc381b136a8f4e471b406cda6d27668c,http://pdfs.semanticscholar.org/0a32/5d70cc381b136a8f4e471b406cda6d27668c.pdf,,https://doi.org/10.1016/j.patcog.2015.12.003,https://www.etsmtl.ca/Unites-de-recherche/LIVIA/Recherche-et-innovation/Publications/Publications-2016/F1b-PR2016.pdf
+0a88f5936528dcfdd27df886b07e62f2fd2072d0,,,,
+0ad8149318912b5449085187eb3521786a37bc78,http://arxiv.org/pdf/1604.02975v1.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Bhattarai_CP-mtML_Coupled_Projection_CVPR_2016_paper.pdf
+0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7,http://pdfs.semanticscholar.org/0aa9/872daf2876db8d8e5d6197c1ce0f8efee4b7.pdf,,,http://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.484803
+0aae88cf63090ea5b2c80cd014ef4837bcbaadd8,http://pdfs.semanticscholar.org/0aae/88cf63090ea5b2c80cd014ef4837bcbaadd8.pdf,,,http://idea.library.drexel.edu/bitstream/1860/1294/1/Zhang_Cuiping.pdf
+0aebe97a92f590bdf21cdadfddec8061c682cdb2,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2695183
+0a87d781fe2ae2e700237ddd00314dbc10b1429c,http://pdfs.semanticscholar.org/0a87/d781fe2ae2e700237ddd00314dbc10b1429c.pdf,,,http://bengal.missouri.edu/~kes25c/SPIE_2015-Multiscale_HOG_FLIR_FLGPR.pdf
+0ad90118b4c91637ee165f53d557da7141c3fde0,http://pdfs.semanticscholar.org/0ad9/0118b4c91637ee165f53d557da7141c3fde0.pdf,,https://doi.org/10.1109/TNN.2002.1000134,http://www.dsp.toronto.edu/juwei/Publication/Juwei_RBF.pdf
+0a82860d11fcbf12628724333f1e7ada8f3cd255,http://pdfs.semanticscholar.org/0a82/860d11fcbf12628724333f1e7ada8f3cd255.pdf,,,http://arxiv.org/pdf/1601.02129v1.pdf
+0a23d374c6cf71a65e845569230420362fe4903a,http://mplab.ucsd.edu/~ksikka/in_the_wild.pdf,,,http://users.cecs.anu.edu.au/~adhall/Head_Pose_Normalisation_in_The_Wild.pdf
+0a0b9a9ff827065e4ff11022b0e417ddf1d3734e,,,,http://dl.acm.org/citation.cfm?id=2935856
+0a6a818b634cca4eb75a37bfd23b5c5c21331b12,http://hal.cse.msu.edu/pdfs/papers/wacv-2015.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.29
+0ac442bb570b086d04c4d51a8410fcbfd0b1779d,http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/cvpr16_warpnet.pdf,,,http://arxiv.org/pdf/1604.05592v1.pdf
+0af48a45e723f99b712a8ce97d7826002fe4d5a5,http://vision.seas.harvard.edu/papers/WideAngle_PAMI2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.22
+0aa8a0203e5f406feb1815f9b3dd49907f5fd05b,http://www.iti.gr/~bmezaris/publications/spl11_preprint.pdf,,https://doi.org/10.1109/LSP.2011.2127474,http://mkg.iti.gr/files/spl11_preprint.pdf
+0a68747d001aba014acd3b6ec83ba9534946a0da,http://staff.estem-uc.edu.au/roland/files/2009/05/Dhall_Goecke_Gedeon_TAC2015_AutomaticGroupHappinessIntensityAnalysis.pdf,,,http://videolectures.net/site/normal_dl/tag=977253/fgconference2015_goecke_intensity_analysis_01.pdf
+0ac664519b2b8abfb8966dafe60d093037275573,http://face.cs.kit.edu/download/publications/supplemental_material.pdf,,https://doi.org/10.1109/ICCVW.2011.6130506,
+0a9345ea6e488fb936e26a9ba70b0640d3730ba7,http://www1.ece.neu.edu/~yuewu/files/2016/p52-jiang.pdf,,,http://doi.acm.org/10.1145/2964284.2967182
+0a79d0ba1a4876086e64fc0041ece5f0de90fbea,http://pdfs.semanticscholar.org/0a79/d0ba1a4876086e64fc0041ece5f0de90fbea.pdf,,,http://amp.ece.cmu.edu/Publication/Avinash/abaliga-thesis-20040510.pdf
+0a451fc7d2c6b3509d213c210ae880645edf90ed,,,https://doi.org/10.1109/IJCNN.2014.6889591,
+0abfb5b89e9546f8a5c569ab35b39b888e7cea46,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.68
+0a11b82aa207d43d1b4c0452007e9388a786be12,http://pdfs.semanticscholar.org/0a11/b82aa207d43d1b4c0452007e9388a786be12.pdf,,https://doi.org/10.1007/978-3-642-38067-9_7,http://epubs.surrey.ac.uk/802295/1/mcs2013_submission_32.pdf
+0a29cee986471b495728b08756f135a2377d5a2a,,,,
+0a1138276c52c734b67b30de0bf3f76b0351f097,https://ibug.doc.ic.ac.uk/media/uploads/documents/georgakis_dica.pdf,,https://doi.org/10.1109/TIP.2016.2539502,
+0a4b808ff800fb0041132854361f591ad01067a5,,,,
+0aa405447a8797e509521f0570e4679a42fdac9b,http://mplab.ucsd.edu/~jake/AISeminar26Sep2011.pdf,,,https://arxiv.org/pdf/1110.0585v1.pdf
+0abc13166e4a098fc34d4c708f3349fdd8f6f4c6,,,,
+0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7,http://pdfs.semanticscholar.org/0ae9/cc6a06cfd03d95eee4eca9ed77b818b59cb7.pdf,,,https://arxiv.org/pdf/1802.06664v1.pdf
+0ac2e8bd5a77d83bae9b49daab2c6f321e9b7a4e,,,https://doi.org/10.1109/SCIS-ISIS.2016.0166,
+0acf23485ded5cb9cd249d1e4972119239227ddb,http://pdfs.semanticscholar.org/507e/2bad4851f04a686ae6e964e15bbef28583e9.pdf,,,http://arxiv.org/abs/1312.1743
+0ad4a814b30e096ad0e027e458981f812c835aa0,http://arxiv.org/pdf/1602.01827v1.pdf,,https://doi.org/10.1109/ICIP.2016.7532958,https://arxiv.org/pdf/1602.01827v3.pdf
+6448d23f317babb8d5a327f92e199aaa45f0efdc,http://pdfs.semanticscholar.org/6448/d23f317babb8d5a327f92e199aaa45f0efdc.pdf,,,http://www.mic.atr.co.jp/~mlyons/pub_pdf/fg00.pdf
+642417f2bb1ff98989e0a0aa855253fed1fffe04,,,https://doi.org/10.1117/12.2004255,
+6412d8bbcc01f595a2982d6141e4b93e7e982d0f,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Kang_Deep_Convolutional_Neural_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.89
+6440d6c7081efe4538a1c75e93144f3d142feb41,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.280
+646c38494aa960c1c120c26619473f5968e5dc34,,,,
+6486a58f675461d1c9f42a39e942bf39f4427f7d,,,,
+6486b36c6f7fd7675257d26e896223a02a1881d9,,,https://doi.org/10.1109/THMS.2014.2376874,
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,http://pdfs.semanticscholar.org/6409/b8879c7e61acf3ca17bcc62f49edca627d4c.pdf,,,http://ijcai.org/Proceedings/13/Papers/199.pdf
+64153df77fe137b7c6f820a58f0bdb4b3b1a879b,http://pdfs.semanticscholar.org/6415/3df77fe137b7c6f820a58f0bdb4b3b1a879b.pdf,,,http://ias.in.tum.de/_media/spezial/bib/riaz08inmic.pdf
+649eb674fc963ce25e4e8ce53ac7ee20500fb0e3,http://chenlab.ece.cornell.edu/Publication/Kuan-Chuan/WACV16.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477616
+647b2e162e9c476728172f62463a8547d245cde3,,,https://doi.org/10.1109/ICPR.2016.7899898,
+642b5173644caa5c5189982a3d1e41163fa9d595,,,,
+64e216c128164f56bc91a33c18ab461647384869,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2016.7738017
+6411c72a2da7180538baf316bac54748fdf2243c,,,,
+642c66df8d0085d97dc5179f735eed82abf110d0,http://research.microsoft.com/users/leizhang/Paper/CVPR05-Shuicheng-Coupled.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.114
+6489ad111fee8224b34f99d1bcfb5122786508cd,,,https://doi.org/10.1109/ICIP.2014.7025280,
+6459f1e67e1ea701b8f96177214583b0349ed964,http://vision.ece.ucsb.edu/publications/karthik_icip2011.pdf,,https://doi.org/10.1109/ICIP.2011.6115826,https://labs.psych.ucsb.edu/grafton/scott/Papers/Shanmuga%20Vadivel%202011.pdf
+64a08beb073f62d2ce44e25c4f887de9208625a4,,,https://doi.org/10.1080/09540090701725557,
+64c4019f1ea9b54b1848418ac53c4e2584dc62d4,,,,
+64e82b42e1c41250bdf9eb952686631287cfd410,,,https://doi.org/10.1111/cgf.12760,
+64b9ad39d115f3e375bde4f70fb8fdef5d681df8,,,https://doi.org/10.1109/ICB.2016.7550088,
+64cf86ba3b23d3074961b485c16ecb99584401de,http://pdfs.semanticscholar.org/b54a/54a2f33c24123c6943597462ef02928ec99f.pdf,,https://doi.org/10.1007/978-3-319-46466-4_22,http://3dinterpreter.csail.mit.edu/talks/3dinn_poster_eccv.pdf
+6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4,http://arxiv.org/pdf/1411.7766v2.pdf,,,http://www.ee.cuhk.edu.hk/~xgwang/papers/liuLWTiccv05.pdf
+64cf1cda80a23ed6fc1c8e66065614ef7bdeadf3,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/PAMI_LIV.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2011.104
+6479b61ea89e9d474ffdefa71f068fbcde22cc44,http://pdfs.semanticscholar.org/6479/b61ea89e9d474ffdefa71f068fbcde22cc44.pdf,,,http://www.robots.ox.ac.uk/~qiong/publications/thesis-2015.pdf
+64102c217cba63a89cd2227dc4b3a9ed2104b73e,,,,
+64e75f53ff3991099c3fb72ceca55b76544374e5,http://pdfs.semanticscholar.org/eb48/804eefe4c61f62178d2a83a9ae0097091897.pdf,,,http://pages.cs.wisc.edu/~gdguo/myPapersOnWeb/cvpr03Guo.pdf
+64fd48fae4d859583c4a031b51ce76ecb5de614c,,,https://doi.org/10.1109/ICARCV.2008.4795556,
+64ca0dbe60bf8f8243fad73a2494c3fa7a2770e2,,,,
+646ef290bc69ab38547632cb12ef1dd74a7c97ee,,,,
+64f9519f20acdf703984f02e05fd23f5e2451977,http://arxiv.org/pdf/1509.01343v1.pdf,,https://doi.org/10.1109/DICTA.2015.7371278,http://ci2cv.net/media/papers/2015_DICTA_Iman.pdf
+641f34deb3bdd123c6b6e7b917519c3e56010cb7,https://pdfs.semanticscholar.org/878d/68c5d016a0a63f328d72adda6b135432b66d.pdf,,,http://whdeng.cn/whdeng_pami2.pdf
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,http://pdfs.semanticscholar.org/6478/2a2bc5da11b1b18ca20cecf7bdc26a538d68.pdf,,,http://www.iis.sinica.edu.tw/page/jise/2013/201309_07.pdf
+64ba203c8cfc631d5f3f20419880523155fbeeb2,,,,http://doi.acm.org/10.1145/3009977.3010008
+6462ef39ca88f538405616239471a8ea17d76259,http://pdfs.semanticscholar.org/6462/ef39ca88f538405616239471a8ea17d76259.pdf,,https://doi.org/10.1016/j.patcog.2017.05.021,http://www.cse.msu.edu/~rossarun/pubs/NguyenLongRangeIris_PR2017.pdf
+64d5772f44efe32eb24c9968a3085bc0786bfca7,http://pdfs.semanticscholar.org/64d5/772f44efe32eb24c9968a3085bc0786bfca7.pdf,,https://doi.org/10.1007/978-3-642-33718-5_8,http://www.jdl.ac.cn/doc/2011/20131910341623527_2012_eccv_sxli_mdf.pdf
+90d735cffd84e8f2ae4d0c9493590f3a7d99daf1,http://pdfs.semanticscholar.org/90d7/35cffd84e8f2ae4d0c9493590f3a7d99daf1.pdf,,,http://thescipub.com/PDF/ajeassp.2017.726.732.pdf
+90298f9f80ebe03cb8b158fd724551ad711d4e71,http://pdfs.semanticscholar.org/9029/8f9f80ebe03cb8b158fd724551ad711d4e71.pdf,,,http://arxiv.org/abs/1703.02716
+900207b3bc3a4e5244cae9838643a9685a84fee0,http://pdfs.semanticscholar.org/9002/07b3bc3a4e5244cae9838643a9685a84fee0.pdf,,,https://idea.library.drexel.edu/islandora/object/idea:4562/datastream/OBJ/download/Reconstructing_Geometry_from_Its_Latent_Structures.pdf
+90ddf1aabf1c73b5fc45254a2de46e53a0bde857,,,https://doi.org/10.1109/ROBIO.2015.7418917,
+907bb6c2b292e6db74fad5c0b7a7f1cc2a4d4224,,,https://doi.org/10.1016/j.patcog.2014.07.010,
+9048732c8591a92a1f4f589b520a733f07578f80,,,https://doi.org/10.1109/CISP-BMEI.2017.8301921,
+901b0a76fde57c262fabd3a35d3d5ec8366a8480,,,,
+90f4b20f4b7115cb84dda22e5e4eb9c50d7fddce,,,,
+90fb58eeb32f15f795030c112f5a9b1655ba3624,http://pdfs.semanticscholar.org/90fb/58eeb32f15f795030c112f5a9b1655ba3624.pdf,,,http://www.ijrcar.com/Volume_4_Issue_6/v4i605.pdf
+9055b155cbabdce3b98e16e5ac9c0edf00f9552f,,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78
+90b7619eabe94731722ae884d0802256462457dc,https://arxiv.org/pdf/1511.09319v1.pdf,,https://doi.org/10.1007/s11263-016-0939-9,http://arxiv.org/abs/1511.09319
+90c4f15f1203a3a8a5bf307f8641ba54172ead30,http://pdfs.semanticscholar.org/90c4/f15f1203a3a8a5bf307f8641ba54172ead30.pdf,,https://doi.org/10.1007/978-3-319-60964-5_64,http://www-users.cs.york.ac.uk/~nep/research/papers/miua17dai.pdf
+902114feaf33deac209225c210bbdecbd9ef33b1,http://pdfs.semanticscholar.org/b5b0/8aaf56df40260abea890813503003485bda3.pdf,,https://doi.org/10.5244/C.25.125,http://www.bmva.org/bmvc/2011/proceedings/paper125/paper125.pdf
+909c23143162d98ffb2447f0018f92ac6cf8591b,,,,
+902cc7dd4ecfb2b6750905ef08bceeed24e1eeeb,,,https://doi.org/10.1016/j.patcog.2016.03.002,
+90ad0daa279c3e30b360f9fe9371293d68f4cebf,http://pdfs.semanticscholar.org/90ad/0daa279c3e30b360f9fe9371293d68f4cebf.pdf,,,http://pesona.mmu.edu.my/~johnsee/research/papers/files/phdthesis_johnsee.pdf
+90eb66e75381cce7146b3953a2ae479a7beec539,,,,http://doi.ieeecomputersociety.org/10.1109/AIPR.2015.7444542
+90ae02da16b750a9fd43f8a38440f848309c2fe0,,,https://doi.org/10.1007/s10044-015-0499-6,
+90a754f597958a2717862fbaa313f67b25083bf9,http://pdfs.semanticscholar.org/90a7/54f597958a2717862fbaa313f67b25083bf9.pdf,,https://doi.org/10.3389/frobt.2015.00028,
+90dd2a53236b058c79763459b9d8a7ba5e58c4f1,http://pdfs.semanticscholar.org/90dd/2a53236b058c79763459b9d8a7ba5e58c4f1.pdf,,https://doi.org/10.5244/C.21.51,http://www.dcs.warwick.ac.uk/bmvc2007/proceedings/CD-ROM/papers/169/bmvc_v2.pdf
+9026ee8a89ecfa6bd2688a4943eee027e3fc4b0f,,,,http://doi.ieeecomputersociety.org/10.1109/CGIV.2011.28
+90cb074a19c5e7d92a1c0d328a1ade1295f4f311,http://pdfs.semanticscholar.org/90cb/074a19c5e7d92a1c0d328a1ade1295f4f311.pdf,,,http://vismod.media.mit.edu//tech-reports/TR-571.pdf
+90b11e095c807a23f517d94523a4da6ae6b12c76,https://arxiv.org/pdf/1609.08475v1.pdf,,https://doi.org/10.1109/TIP.2017.2686003,http://arxiv.org/abs/1609.08475
+90c2d4d9569866a0b930e91713ad1da01c2a6846,http://pdfs.semanticscholar.org/90c2/d4d9569866a0b930e91713ad1da01c2a6846.pdf,,,http://www.bentham-open.com/contents/pdf/TOAUTOCJ/TOAUTOCJ-6-528.pdf
+90c4a6c6f790dbcef9a29c9a755458be09e319b6,,,,http://doi.acm.org/10.1145/2964284.2967242
+9026eb610916ec4ce77f0d7d543b7c2482ba4173,,,https://doi.org/10.1016/j.patrec.2012.03.006,
+907475a4febf3f1d4089a3e775ea018fbec895fe,http://pdfs.semanticscholar.org/9074/75a4febf3f1d4089a3e775ea018fbec895fe.pdf,,https://doi.org/10.1109/ICIP.2003.1247046,http://www.hds.utc.fr/~fdavoine/mypublications/icip03.pdf
+9028fbbd1727215010a5e09bc5758492211dec19,http://pdfs.semanticscholar.org/9028/fbbd1727215010a5e09bc5758492211dec19.pdf,,https://doi.org/10.1007/978-3-642-38267-3_23,http://ubee.enseeiht.fr/photometricstereo/pdf/ssvm2013.pdf
+90c4deaa538da42b9b044d7b68c3692cced66036,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2007.89
+bf30477f4bd70a585588528355b7418d2f37953e,,,https://doi.org/10.1109/ICPR.2016.7900280,
+bff77a3b80f40cefe79550bf9e220fb82a74c084,http://pdfs.semanticscholar.org/bff7/7a3b80f40cefe79550bf9e220fb82a74c084.pdf,,,http://www.wseas.org/multimedia/journals/signal/2012/53-718.pdf
+bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11,http://pdfs.semanticscholar.org/bf03/f0fe8f3ba5b118bdcbb935bacb62989ecb11.pdf,,,http://www.sis.uta.fi/~gofase/docs/papers/2008_Gizatdinova_Surakka_(Effect%20of%20facial%20expressions%20on%20feature-based%20landmark%20localization%20in%20static%20grey%20scale%20images).pdf
+bf1e0545785b05b47caa3ffe7d16982769986f38,,,https://doi.org/10.1016/j.asoc.2010.12.002,
+bf1e0279a13903e1d43f8562aaf41444afca4fdc,http://pdfs.semanticscholar.org/bf1e/0279a13903e1d43f8562aaf41444afca4fdc.pdf,,,https://www.irjet.net/archives/V4/i10/IRJET-V4I10219.pdf
+bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103,http://pdfs.semanticscholar.org/bf0f/0eb0fb31ee498da4ae2ca9b467f730ea9103.pdf,,,http://orca.cf.ac.uk/76770/1/brainsci-94385-update.pdf
+bf0836e5c10add0b13005990ba019a9c4b744b06,,,https://doi.org/10.1109/TCE.2009.5373791,
+bf4f79fd31493648d80d0a4a8da5edeeaba74055,,,,http://doi.acm.org/10.1145/2783258.2783280
+bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5,https://ubicomp-mental-health.github.io/papers/2017/perception-syeda.pdf,,,http://doi.acm.org/10.1145/3123024.3125618
+bf4825474673246ae855979034c8ffdb12c80a98,http://pdfs.semanticscholar.org/bf48/25474673246ae855979034c8ffdb12c80a98.pdf,,,http://www.ee.ucr.edu/~amitrc/THESIS/thesis-abir.pdf
+bf00071a7c4c559022272ca5d39e07f727ebb479,,,https://doi.org/10.1109/MMSP.2016.7813388,
+bf2f2696fdb4077b5ab18aa583f6376acadf2438,,,,
+bf2eb77e9b795a4a0a38ed4b1c8dd4b2c9a74317,,,https://doi.org/10.1007/978-3-319-69900-4_70,
+bf776e3483419d7e0cb1dfd770be02d552e1fedf,,,,
+bf8a520533f401347e2f55da17383a3e567ef6d8,http://pdfs.semanticscholar.org/bf8a/520533f401347e2f55da17383a3e567ef6d8.pdf,,,http://www.cs.toronto.edu/~rjliao/papers/arXiv_2015_Bounded.pdf
+bfb98423941e51e3cd067cb085ebfa3087f3bfbe,http://pdfs.semanticscholar.org/bfb9/8423941e51e3cd067cb085ebfa3087f3bfbe.pdf,,,https://arxiv.org/pdf/1511.08956v1.pdf
+bffbd04ee5c837cd919b946fecf01897b2d2d432,http://pdfs.semanticscholar.org/bffb/d04ee5c837cd919b946fecf01897b2d2d432.pdf,,,http://www.cs.bu.edu/techreports/pdf/2005-024-tracking-occlusion-ASL.pdf
+bf1ebcaad91c2c0ed35544159415b3ad388cc7a9,,,https://doi.org/10.1007/s11042-015-2665-7,
+bfd0dd2d13166a9c59e04c62f5463eacfc8d0d2b,,,,
+d3424761e06a8f5f3c1f042f1f1163a469872129,http://pdfs.semanticscholar.org/d342/4761e06a8f5f3c1f042f1f1163a469872129.pdf,,,http://www.bmva.org/thesis-archive/2009/2009-zografos.pdf
+d33b26794ea6d744bba7110d2d4365b752d7246f,http://pdfs.semanticscholar.org/d33b/26794ea6d744bba7110d2d4365b752d7246f.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/viewFile/9574/9983
+d37ca68742b2999667faf464f78d2fbf81e0cb07,,,https://doi.org/10.1007/978-3-319-25417-3_76,
+d3b73e06d19da6b457924269bb208878160059da,http://pdfs.semanticscholar.org/d3b7/3e06d19da6b457924269bb208878160059da.pdf,,,http://icoci.cms.net.my/PROCEEDINGS/2015/PDF/PID065.pdf
+d3f40b393e0e6a88ae4b4072e01ddb0b420300af,,,,
+d3367c9a4825295301225a05a190c0b7ed62736e,,,,
+d3e04963ff42284c721f2bc6a90b7a9e20f0242f,http://pdfs.semanticscholar.org/d3e0/4963ff42284c721f2bc6a90b7a9e20f0242f.pdf,,,http://www.researchgate.net/profile/Xingjie_Wei/publication/269037172_On_Forensic_Use_of_Biometrics/links/54916f230cf2d1800d886901.pdf
+d3d71a110f26872c69cf25df70043f7615edcf92,https://www.cise.ufl.edu/~dihong/assets/07094272.pdf,,https://doi.org/10.1109/TIP.2015.2426413,
+d38b32d91d56b01c77ef4dd7d625ce5217c6950b,,,,
+d35c82588645b94ce3f629a0b98f6a531e4022a3,http://pdfs.semanticscholar.org/d35c/82588645b94ce3f629a0b98f6a531e4022a3.pdf,,,http://epubs.surrey.ac.uk/812816/1/draft_final_charles_gray_mphil_corrections.pdf
+d394bd9fbaad1f421df8a49347d4b3fca307db83,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_AVSS05.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2005.1577290
+d3b550e587379c481392fb07f2cbbe11728cf7a6,http://pdfs.semanticscholar.org/d3b5/50e587379c481392fb07f2cbbe11728cf7a6.pdf,,,http://vision.kuee.kyoto-u.ac.jp/japanese/happyou/pdf/Zhang_ICDP_201312.pdf
+d3a3d15a32644beffaac4322b9f165ed51cfd99b,,,https://doi.org/10.1109/SIU.2016.7496197,
+d3409f66d35f5828affda26fc3416771eb8154b1,,,,
+d30050cfd16b29e43ed2024ae74787ac0bbcf2f7,http://pdfs.semanticscholar.org/d300/50cfd16b29e43ed2024ae74787ac0bbcf2f7.pdf,,,http://coviss.org/wp-content/uploads/2016/09/Pilla2016Facial.pdf
+d3c004125c71942846a9b32ae565c5216c068d1e,http://pdfs.semanticscholar.org/d3c0/04125c71942846a9b32ae565c5216c068d1e.pdf,,,http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0112234&type=printable
+d350a9390f0818703f886138da27bf8967fe8f51,http://mi.informatik.uni-siegen.de/publications/shahlaei_icip2016.pdf,,https://doi.org/10.1109/ICIP.2016.7532624,
+d42dbc995318e2936714c65c028700bfd3633049,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477592
+d42a8c6528cdf1a63050f9a282f6b5daec6b4e73,,,,
+d41bcb0c79f46aca47b9f9b8a779ce80a2a351f9,,,,
+d4331a8dd47b03433f8390da2eaa618751861c64,,,https://doi.org/10.1109/TIP.2012.2192125,
+d41c11ebcb06c82b7055e2964914b9af417abfb2,http://pdfs.semanticscholar.org/d41c/11ebcb06c82b7055e2964914b9af417abfb2.pdf,,,http://www.cs.cmu.edu/~ftorre/nsf_grant_v3.pdf
+d4d1ac1cfb2ca703c4db8cc9a1c7c7531fa940f9,,,,
+d46fda4b49bbc219e37ef6191053d4327e66c74b,http://pdfs.semanticscholar.org/d46f/da4b49bbc219e37ef6191053d4327e66c74b.pdf,,,https://arxiv.org/pdf/1803.00185v1.pdf
+d448d67c6371f9abf533ea0f894ef2f022b12503,http://pdfs.semanticscholar.org/d448/d67c6371f9abf533ea0f894ef2f022b12503.pdf,,,https://arxiv.org/pdf/1802.04668v1.pdf
+d4353952a408e1eae8c27a45cc358976d38dde00,,,https://doi.org/10.1007/s00138-014-0594-5,
+d4c7d1a7a03adb2338704d2be7467495f2eb6c7b,http://pdfs.semanticscholar.org/d4c7/d1a7a03adb2338704d2be7467495f2eb6c7b.pdf,,,http://www.research.ed.ac.uk/portal/files/31544439/823094d18e41e7fa9c58918d818ecbde20a7d3d6.pdf
+d4001826cc6171c821281e2771af3a36dd01ffc0,http://pdfs.semanticscholar.org/d400/1826cc6171c821281e2771af3a36dd01ffc0.pdf,,,https://pastel.archives-ouvertes.fr/file/index/docid/958135/filename/2013ENMP0051.pdf
+d4ccc4f18a824af08649657660e60b67c6868d9c,,,https://doi.org/10.1142/S021800141655020X,
+d46b4e6871fc9974542215f001e92e3035aa08d9,http://pdfs.semanticscholar.org/d46b/4e6871fc9974542215f001e92e3035aa08d9.pdf,,https://doi.org/10.1007/978-3-540-89646-3_50,https://www.researchgate.net/profile/Amnart_Petpon/publication/220845032_A_Gabor_Quotient_Image_for_Face_Recognition_under_Varying_Illumination/links/0fcfd508de5db4a7ab000000.pdf
+d40c16285d762f7a1c862b8ac05a0fdb24af1202,,,https://doi.org/10.1109/BESC.2017.8256378,
+d40cd10f0f3e64fd9b0c2728089e10e72bea9616,http://pdfs.semanticscholar.org/d40c/d10f0f3e64fd9b0c2728089e10e72bea9616.pdf,,https://doi.org/10.3390/jimaging3030037,http://mdpi.com/2313-433X/3/3/37/pdf
+d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d,http://pdfs.semanticscholar.org/d4eb/f0a4f48275ecd8dbc2840b2a31cc07bd676d.pdf,,,https://arxiv.org/pdf/1802.00421v1.pdf
+d43b6ca9257e9b24f89eb3867f2c04068a78c778,,,,
+d4ec62efcc631fa720dfaa1cbc5692b39e649008,,,https://doi.org/10.1109/ICDM.2016.0026,
+d46e793b945c4f391031656357625e902c4405e8,http://140.118.9.222/publications/journal/faceoff.pdf,,https://doi.org/10.1007/s11042-010-0624-x,
+d4c2d26523f577e2d72fc80109e2540c887255c8,http://pdfs.semanticscholar.org/d4c2/d26523f577e2d72fc80109e2540c887255c8.pdf,,,http://arxiv.org/pdf/1601.04293v1.pdf
+d4fb26f5528b9a1f04ea773cc2b920e01fc0edd4,,,https://doi.org/10.1109/TSMCB.2009.2032155,
+d4b88be6ce77164f5eea1ed2b16b985c0670463a,http://pdfs.semanticscholar.org/d4b8/8be6ce77164f5eea1ed2b16b985c0670463a.pdf,,,https://msu.edu/~jourablo/images/TechnicalReport160115.pdf
+d4fba386caca1b5b2ee35ee5310b5fce50b2b1c3,,,https://doi.org/10.23919/MVA.2017.7986886,
+d44d911c045a6df610cb4103f1ab09827fab8296,,,,
+d4026438ce2b92302fa635c05507cf0e888414c0,,,,
+d44ca9e7690b88e813021e67b855d871cdb5022f,http://pdfs.semanticscholar.org/d44c/a9e7690b88e813021e67b855d871cdb5022f.pdf,,https://doi.org/10.1007/978-3-642-10677-4_83,http://eprints.qut.edu.au/28618/1/c28618.pdf
+baaaf73ec28226d60d923bc639f3c7d507345635,http://pdfs.semanticscholar.org/baaa/f73ec28226d60d923bc639f3c7d507345635.pdf,,,http://cs229.stanford.edu/proj2015/158_report.pdf
+ba2bbef34f05551291410103e3de9e82fdf9dddd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Guo_A_Study_on_2014_CVPR_paper.pdf,,https://doi.org/10.1109/CVPR.2014.542,
+bafb8812817db7445fe0e1362410a372578ec1fc,http://www.cin.ufpe.br/~rps/Artigos/Image-Quality-Based%20Adaptive%20Face%20Recognition.pdf,,https://doi.org/10.1109/TIM.2009.2037989,
+baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143,http://pdfs.semanticscholar.org/baa0/fe4d0ac0c7b664d4c4dd00b318b6d4e09143.pdf,,,http://www.sersc.org/journals/IJSIP/vol8_no1/2.pdf
+bab2f4949a38a712a78aafbc0a3c392227c65f56,,,https://doi.org/10.1109/CISP-BMEI.2017.8302191,
+ba99c37a9220e08e1186f21cab11956d3f4fccc2,https://arxiv.org/pdf/1609.08677v1.pdf,,https://doi.org/10.1109/ICDM.2016.0149,http://arxiv.org/abs/1609.08677
+ba816806adad2030e1939450226c8647105e101c,http://pdfs.semanticscholar.org/ba81/6806adad2030e1939450226c8647105e101c.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2047%20(Supplementary).pdf
+ba30cc9d8bac724dafc0aea247159cc7e7105784,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019360
+ba017a8d16e47e57a1f3eb5a94c1ba24e6952274,,,,
+ba6769c165967c8dcb11fe5e0be2153ddbe99c7e,,,,
+bad15b4dea2399d57ee17f33a5ba8f04b012ef63,,,,
+ba83b28ac5ce92ef8437fdd499132823f487ff83,,,,
+ba931c3f90dd40a5db4301a8f0c71779a23043d6,,,https://doi.org/10.1109/ICPR.2014.136,
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,http://pdfs.semanticscholar.org/badc/fb7d4e2ef0d3e332a19a3f93d59b4f85668e.pdf,,https://doi.org/10.1007/11608288_26,http://www.jdl.ac.cn/doc/2006/The%20Application%20of%20Extended%20Geodesic%20Distance%20in%20Head%20Poses%20Estimation.pdf
+ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906,http://pdfs.semanticscholar.org/ba8a/99d35aee2c4e5e8a40abfdd37813bfdd0906.pdf,,,http://ev.fe.uni-lj.si/1-2-2011/STkalcic.pdf
+bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea,http://pdfs.semanticscholar.org/bac1/1ce0fb3e12c466f7ebfb6d036a9fe62628ea.pdf,,https://doi.org/10.1007/978-3-319-46466-4_17,http://www.public.asu.edu/~kkulkar1/wslhc.pdf
+baad4e7ab0942a6b93ee2df39685f928efdae006,,,,
+ba29ba8ec180690fca702ad5d516c3e43a7f0bb8,http://pdfs.semanticscholar.org/ba29/ba8ec180690fca702ad5d516c3e43a7f0bb8.pdf,,https://doi.org/10.1016/j.patcog.2017.01.027,http://cs-people.bu.edu/sbargal/Do-Less-and-Achieve-More.pdf
+bab88235a30e179a6804f506004468aa8c28ce4f,http://pdfs.semanticscholar.org/bab8/8235a30e179a6804f506004468aa8c28ce4f.pdf,,https://doi.org/10.1016/j.patcog.2013.01.016,http://www4.comp.polyu.edu.hk/~cslzhang/paper/PR-JDDLDR.pdf
+ba69d464bc360f94303ffc9f710009d16a5673a0,,,,
+a065080353d18809b2597246bb0b48316234c29a,http://pdfs.semanticscholar.org/a065/080353d18809b2597246bb0b48316234c29a.pdf,,,http://arxiv.org/abs/1712.03687
+a0f94e9400938cbd05c4b60b06d9ed58c3458303,http://people.ee.duke.edu/~lcarin/Hoey_Little07.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1145
+a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4,http://pdfs.semanticscholar.org/a0f1/93c86e3dd7e0020c0de3ec1e24eaff343ce4.pdf,,,http://www.iis.sinica.edu.tw/page/jise/2005/200507_10.html
+a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,http://www.cs.columbia.edu/~neeraj/base/papers/nk_ijcb2011_fusion.pdf,,https://doi.org/10.1109/IJCB.2011.6117490,https://www.wjscheirer.com/projects/unconstrained-face/ijcb_2011_slides.pdf
+a0021e3bbf942a88e13b67d83db7cf52e013abfd,http://pdfs.semanticscholar.org/a002/1e3bbf942a88e13b67d83db7cf52e013abfd.pdf,,,https://www.jstage.jst.go.jp/article/jamdsm/9/5/9_2015jamdsm0072/_pdf
+a0beb0cc6f167373f8b4b7458ff0ec42fc290a75,,,,
+a0d6390dd28d802152f207940c7716fe5fae8760,http://pdfs.semanticscholar.org/a0d6/390dd28d802152f207940c7716fe5fae8760.pdf,,https://doi.org/10.1007/978-3-642-33712-3_41,https://www.microsoft.com/en-us/research/wp-content/uploads/2012/01/JointBayesian.pdf
+a07f78124f83eef1ed3a6f54ba982664ae7ca82a,,http://ieeexplore.ieee.org/document/6460481/,,
+a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670,http://webhost.uoradea.ro/ibuciu/ISCAS2006_Buciu.pdf,,https://doi.org/10.1109/ISCAS.2006.1693672,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu06b.pdf
+a0d5990eb150cdcb1c8b2967e6a4fe7a5d85063b,,,https://doi.org/10.1109/ICIP.2017.8296805,
+a02f0aad91c2d88b49c443e1e39c3acfc067a705,http://www.cs.columbia.edu/~wfan/PAPERS/SMC10cher.pdf,,https://doi.org/10.1109/ICSMC.2010.5641971,https://www.researchgate.net/profile/Stefan_Robila/publication/220755300_Analysis_of_Chernoff_criterion_for_linear_dimensionality_reduction/links/09e41510bdf965a63e000000.pdf
+a05b1254630257fe27ee195ef05cc50ce6e41f22,,,,
+a0dfb8aae58bd757b801e2dcb717a094013bc178,http://pdfs.semanticscholar.org/a0df/b8aae58bd757b801e2dcb717a094013bc178.pdf,,,http://rcs.cic.ipn.mx/2017_140/Reconocimiento%20de%20expresiones%20faciales%20con%20base%20en%20la%20dinamica%20de%20puntos%20de%20referencia%20faciales.pdf
+a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60,http://pdfs.semanticscholar.org/ac3b/033fd24913c31778cd4cb2d013239315d7a9.pdf,,,http://vision.ucsd.edu/~pdollar/research/papers/BabenkoEtAlECCV08simul.pdf
+a06b6d30e2b31dc600f622ab15afe5e2929581a7,https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Sagonas_Robust_Joint_and_CVPR_2017_paper.pdf
+a090d61bfb2c3f380c01c0774ea17929998e0c96,http://iitlab.bit.edu.cn/mcislab/~jiayunde/pdf/CVPR2012_BrickIllumDimension.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247679
+a094e52771baabe4ab37ef7853f9a4f534227457,,,https://doi.org/10.1109/TITS.2016.2551298,
+a0f6196d27a39cde2dbf62c08d89cbe489600bb0,,,https://doi.org/10.1016/j.cose.2016.03.007,
+a0e7f8771c7d83e502d52c276748a33bae3d5f81,http://pdfs.semanticscholar.org/a0e7/f8771c7d83e502d52c276748a33bae3d5f81.pdf,,,http://www.cs.nyu.edu/~mohri/pub/ens_springer.pdf
+a0061dae94d916f60a5a5373088f665a1b54f673,http://pdfs.semanticscholar.org/a006/1dae94d916f60a5a5373088f665a1b54f673.pdf,,,http://arxiv.org/abs/1702.08516
+a0848d7b1bb43f4b4f1b4016e58c830f40944817,http://lhncbc.nlm.nih.gov/system/files/pub8893.pdf,,,http://www.lhncbc.nlm.nih.gov/system/files/pub8893.pdf
+a006cd95c14de399706c5709b86ac17fce93fcba,,,https://doi.org/10.1109/ICPR.2014.343,
+a000149e83b09d17e18ed9184155be140ae1266e,http://pdfs.semanticscholar.org/a000/149e83b09d17e18ed9184155be140ae1266e.pdf,,,http://crcv.ucf.edu/papers/Springer2015_UCFSports_Action.pdf
+a01f9461bc8cf8fe40c26d223ab1abea5d8e2812,http://pdfs.semanticscholar.org/a01f/9461bc8cf8fe40c26d223ab1abea5d8e2812.pdf,,https://doi.org/10.1007/978-3-319-16181-5_51,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w16/W16-02.pdf
+a000e15656e84dd538f1f0b8f8639dd29f122c95,,,,
+a7c066e636b8953481b4a8d8ff25a43a96dd348f,,,https://doi.org/10.1109/ATSIP.2017.8075517,
+a702fc36f0644a958c08de169b763b9927c175eb,http://www.apsipa.org/proceedings_2013/papers/170_PID2935307.pdf,,https://doi.org/10.1109/APSIPA.2013.6694152,
+a7267bc781a4e3e79213bb9c4925dd551ea1f5c4,http://pdfs.semanticscholar.org/a726/7bc781a4e3e79213bb9c4925dd551ea1f5c4.pdf,,,https://arxiv.org/pdf/1801.06349v1.pdf
+a784a0d1cea26f18626682ab108ce2c9221d1e53,http://openaccess.thecvf.com/content_ICCV_2017/papers/Agustsson_Anchored_Regression_Networks_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.182
+a7a5d9a2dece15ddbab77b7ecc81294cfa1fafdb,,,,
+a76e57c1b2e385b68ffdf7609802d71244804c1d,,,https://doi.org/10.1016/j.patrec.2016.05.027,
+a777101b56fe46c4d377941afcf34edc2b8b5f6f,,,,
+a729d0243b1e3b055f44248a32b3caf20b7e93be,,,,
+a72f0be803c9290923643660caf3bffec4ea3611,,,,
+a7da7e5a6a4b53bf8736c470ff8381a654e8c965,,,https://doi.org/10.1007/s13042-011-0045-9,
+a74251efa970b92925b89eeef50a5e37d9281ad0,http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf,,https://doi.org/10.1109/ICCVW.2011.6130513,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_befit_11.pdf
+a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51,http://pdfs.semanticscholar.org/a7d2/3c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51.pdf,,,
+a78ef252d7e7cd86e4a72c2a7be628e73824fb92,,,,
+a70e36daf934092f40a338d61e0fe27be633f577,http://pdfs.semanticscholar.org/a70e/36daf934092f40a338d61e0fe27be633f577.pdf,,,http://www.cl.cam.ac.uk/~re227/publications/EnhancedFacialFeatureTracking-HCII2001.pdf
+a7191958e806fce2505a057196ccb01ea763b6ea,http://pdfs.semanticscholar.org/a719/1958e806fce2505a057196ccb01ea763b6ea.pdf,,,https://openresearch-repository.anu.edu.au/bitstream/1885/102510/1/Qiu%20Thesis%202016.pdf
+a7e1327bd76945a315f2869bfae1ce55bb94d165,http://pdfs.semanticscholar.org/a7e1/327bd76945a315f2869bfae1ce55bb94d165.pdf,,https://doi.org/10.1080/18756891.2013.816051,http://download.atlantis-press.com/php/download_paper.php?id=25868440
+a7a3ec1128f920066c25cb86fbc33445ce613919,,,https://doi.org/10.1109/VCIP.2017.8305115,
+a71bd4b94f67a71bc5c3563884bb9d12134ee46a,,,https://doi.org/10.1016/j.asoc.2015.05.006,
+a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9,http://pdfs.semanticscholar.org/a7a6/eb53bee5e2224f2ecd56a14e3a5a717e55b9.pdf,,https://doi.org/10.1007/11008941_21,http://www.cvlab.cs.tsukuba.ac.jp/~kfukui/papers/isrrModifiedwithHeaders.pdf
+a79704c1ce7bf10c8753a8f51437ccbc61947d03,http://www.eecs.qmul.ac.uk/~cfshan/papers/shan-etal-icip05.pdf,,https://doi.org/10.1109/ICIP.2005.1530069,
+a7c39a4e9977a85673892b714fc9441c959bf078,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2007/data/papers/workshops/Biometrics/papers/06-p71.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2007.383381
+a735c6330430c0ff0752d117c54281b1396b16bf,,,https://doi.org/10.1109/SMC.2014.6974118,
+a75edf8124f5b52690c08ff35b0c7eb8355fe950,http://pdfs.semanticscholar.org/a75e/df8124f5b52690c08ff35b0c7eb8355fe950.pdf,,https://doi.org/10.1007/978-3-540-24837-8_10,http://carol.wins.uva.nl/~nicu/publications/Yafei_hci04.pdf
+a73405038fdc0d8bf986539ef755a80ebd341e97,,,https://doi.org/10.1109/TIP.2017.2698918,
+a713a01971e73d0c3118d0409dc7699a24f521d6,,,https://doi.org/10.1109/SSCI.2017.8285381,
+a75ee7f4c4130ef36d21582d5758f953dba03a01,http://pdfs.semanticscholar.org/a75e/e7f4c4130ef36d21582d5758f953dba03a01.pdf,,,https://mohaseeb.github.io/public/posts_imgs/mohamed_abdulaziz_project_report.pdf
+a7f188a7161b6605d58e48b2537c18a69bd2446f,,,https://doi.org/10.1109/PIMRC.2011.6139898,
+a752ed42171c49c4616c9a367d2ff4b1eac09cbe,,,,
+a703d51c200724517f099ee10885286ddbd8b587,http://pdfs.semanticscholar.org/a703/d51c200724517f099ee10885286ddbd8b587.pdf,,https://doi.org/10.1109/FUZZ.2003.1206552,http://www.robotian.net/akaii/about/paper/FUZZ-IEEE2003-1.pdf
+a76969df111f9ee9f0b898b51ad23a721d289bdc,,,https://doi.org/10.1109/ICMLA.2015.185,
+a75dfb5a839f0eb4b613d150f54a418b7812aa90,https://arxiv.org/pdf/1708.02314v1.pdf,,https://doi.org/10.1109/GlobalSIP.2017.8308652,http://arxiv.org/abs/1708.02314
+a75de488eaacb1dafffbe667465390f101498aaf,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.47
+b839bc95794dc65340b6e5fea098fa6e6ea5e430,,,https://doi.org/10.1109/WACVW.2017.8,
+b8e5800dfc590f82a0f7eedefce9abebf8088d12,,,https://doi.org/10.1109/DCC.2017.87,
+b86c49c6e3117ea116ec2d8174fa957f83502e89,,,https://doi.org/10.1109/CIT/IUCC/DASC/PICOM.2015.149,
+b85d0aef3ee2883daca2835a469f5756917e76b7,,,https://doi.org/10.1007/s41095-015-0015-3,
+b88ceded6467e9b286f048bb1b17be5998a077bd,http://pdfs.semanticscholar.org/b88c/eded6467e9b286f048bb1b17be5998a077bd.pdf,,,http://arxiv.org/pdf/1608.01793v1.pdf
+b89d4c474b42f9a241e347915391b4aba391c307,,,,
+b871d1b8495025ff8a6255514ed39f7765415935,http://pdfs.semanticscholar.org/b871/d1b8495025ff8a6255514ed39f7765415935.pdf,,,http://www.aicit.org/JDCTA/ppl/JDCTA3439PPL.pdf
+b856d8d6bff745bb1b4beb67e4b821fc20073840,,,https://doi.org/10.1109/ICMLC.2016.7872935,
+b84dde74dddf6a3281a0b22c68999942d2722919,,,,http://dl.acm.org/citation.cfm?id=2910703
+b85b754ace15f4e9bee4ee76296580ddfbc3a11e,,,,
+b88d5e12089f6f598b8c72ebeffefc102cad1fc0,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w24/papers/Wang_Robust_2DPCA_and_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.147
+b8a16fcb65a8cee8dd32310a03fe36b5dff9266a,,,https://doi.org/10.1109/SIU.2014.6830473,
+b803cdb3377fa3b6194932607f51f2d1fafbf964,,,,
+b8b9cef0938975c5b640b7ada4e3dea6c06d64e9,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.119
+b85d953de16eecaecccaa8fad4081bd6abda9b1b,,,https://doi.org/10.1016/j.neuroimage.2015.12.020,
+b84b7b035c574727e4c30889e973423fe15560d7,http://pdfs.semanticscholar.org/b84b/7b035c574727e4c30889e973423fe15560d7.pdf,,https://doi.org/10.1007/978-3-642-35136-5_39,http://www.nlpr.ia.ac.cn/2012papers/gnhy/nh12.pdf
+b8dba0504d6b4b557d51a6cf4de5507141db60cf,http://pdfs.semanticscholar.org/b8db/a0504d6b4b557d51a6cf4de5507141db60cf.pdf,,,http://ceur-ws.org/Vol-2037/paper_21.pdf
+b89862f38fff416d2fcda389f5c59daba56241db,http://pdfs.semanticscholar.org/b898/62f38fff416d2fcda389f5c59daba56241db.pdf,,,https://infoscience.epfl.ch/record/125065/files/SorciAntoniniSurvey_fg08_tr.pdf
+b84f164dbccb16da75a61323adaca730f528edde,,,https://doi.org/10.1109/TIP.2013.2237914,
+b8caf1b1bc3d7a26a91574b493c502d2128791f6,http://pdfs.semanticscholar.org/b8ca/f1b1bc3d7a26a91574b493c502d2128791f6.pdf,,,"https://orca.cf.ac.uk/97929/1/Burley,%20Snowden%20PLOS%20ONE.pdf"
+b8084d5e193633462e56f897f3d81b2832b72dff,http://pdfs.semanticscholar.org/b808/4d5e193633462e56f897f3d81b2832b72dff.pdf,,,http://arxiv.org/abs/1502.00873
+b8bcf9c773da1c5ee76db4bf750c9ff5d159f1a0,,,,http://doi.acm.org/10.1145/2911996.2911999
+b8378ab83bc165bc0e3692f2ce593dcc713df34a,http://cmp.felk.cvut.cz/ftp/articles/cech/Cech-ICPR-2014.pdf,,https://doi.org/10.1109/ICPR.2014.378,
+b85580ff2d8d8be0a2c40863f04269df4cd766d9,http://pdfs.semanticscholar.org/b855/80ff2d8d8be0a2c40863f04269df4cd766d9.pdf,,,http://ceur-ws.org/Vol-1739/MediaEval_2016_paper_49.pdf
+b8978a5251b6e341a1171e4fd9177aec1432dd3a,,,https://doi.org/10.1016/j.image.2016.04.004,
+b87b0fa1ac0aad0ca563844daecaeecb2df8debf,http://users.cs.cf.ac.uk/Paul.Rosin/resources/papers/portraits-CAe.pdf,,,http://orca.cf.ac.uk/76344/1/portraits-CAe.pdf
+b87db5ac17312db60e26394f9e3e1a51647cca66,http://pdfs.semanticscholar.org/b87d/b5ac17312db60e26394f9e3e1a51647cca66.pdf,,https://doi.org/10.1007/978-3-540-74958-5_79,https://pdfs.semanticscholar.org/b87d/b5ac17312db60e26394f9e3e1a51647cca66.pdf
+b81cae2927598253da37954fb36a2549c5405cdb,http://pdfs.semanticscholar.org/d892/753827950a227179b691e6df85820ab7c417.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/download/8570/8398
+b8f64a94f536b46ef34a0223272e02f9be785ef9,,,https://doi.org/10.1109/EMBC.2012.6346590,
+b8a829b30381106b806066d40dd372045d49178d,http://gavrila.net/tits15.pdf,,https://doi.org/10.1109/TITS.2014.2379441,http://www.gavrila.net/tits15.pdf
+b8d4754813b88ef1a583da2fcd164398824d04db,,,,
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,http://pdfs.semanticscholar.org/b191/aa2c5b8ece06c221c3a4a0914e8157a16129.pdf,,,https://arxiv.org/pdf/1705.03148v1.pdf
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,http://pdfs.semanticscholar.org/b13b/f657ca6d34d0df90e7ae739c94a7efc30dc3.pdf,,,http://acberg.com/papers/iccv09_faces.pdf
+b13a882e6168afc4058fe14cc075c7e41434f43e,http://pdfs.semanticscholar.org/b13a/882e6168afc4058fe14cc075c7e41434f43e.pdf,,https://doi.org/10.2200/S00002ED1V01Y200508IVM001,http://www.ee.ucr.edu/~amitrc/mono.pdf
+b1665e1ddf9253dcaebecb48ac09a7ab4095a83e,http://pdfs.semanticscholar.org/b166/5e1ddf9253dcaebecb48ac09a7ab4095a83e.pdf,,,http://people.uncw.edu/pattersone/research/publications/RatliffPatterson_HCI2008.pdf
+b16580d27bbf4e17053f2f91bc1d0be12045e00b,http://pdfs.semanticscholar.org/b165/80d27bbf4e17053f2f91bc1d0be12045e00b.pdf,,https://doi.org/10.1007/978-3-642-38628-2_2,https://www-i6.informatik.rwth-aachen.de/publications/download/944/HanselmannHaraldNeyHermannDreuwPhilippe--Pose-invariantFaceRecognitionwithaTwo-LevelDynamicProgrammingAlgorithm--2013.pdf
+b1ed708d090dd155ffa9ac9699a876292f31aaff,,,,
+b11bb6bd63ee6f246d278dd4edccfbe470263803,http://pdfs.semanticscholar.org/b11b/b6bd63ee6f246d278dd4edccfbe470263803.pdf,,,https://arxiv.org/pdf/1801.09242v1.pdf
+b171f9e4245b52ff96790cf4f8d23e822c260780,http://pdfs.semanticscholar.org/b171/f9e4245b52ff96790cf4f8d23e822c260780.pdf,,,http://www.ri.cmu.edu/downloads/other_pdfs/2014RISSJournal.pdf
+b1a3b19700b8738b4510eecf78a35ff38406df22,http://pdfs.semanticscholar.org/b1a3/b19700b8738b4510eecf78a35ff38406df22.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/tac_survey_2017.pdf
+b1301c722886b6028d11e4c2084ee96466218be4,http://pdfs.semanticscholar.org/b130/1c722886b6028d11e4c2084ee96466218be4.pdf,,,https://arxiv.org/pdf/1804.02740v1.pdf
+b1a8315b4843da3d0b61c933a11d9b152cfaae70,,,,
+b1c5581f631dba78927aae4f86a839f43646220c,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553753.pdf,,,http://cvrc.ece.utexas.edu/Publications/ShaohuaFG2013.pdf
+b1891010a0722117c57e98809e1f2b26cd8e9ee3,,,,http://doi.acm.org/10.1145/2330784.2331026
+b1efefcc9a5d30be90776571a6cc0071f3679753,,,https://doi.org/10.1109/ROBIO.2016.7866471,
+b1bb517bd87a1212174033fc786b2237844b04e6,,,https://doi.org/10.1016/j.neucom.2015.03.078,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,http://pdfs.semanticscholar.org/b188/58ad6ec88d8b443dffd3e944e653178bc28b.pdf,,,https://docs.lib.purdue.edu/cgi/viewcontent.cgi?article=2782&context=cstech
+b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1,http://pdfs.semanticscholar.org/b144/4b3bf15eec84f6d9a2ade7989bb980ea7bd1.pdf,,,https://arxiv.org/pdf/1709.09518v1.pdf
+b1534888673e6119f324082246016d28eba249aa,,,https://doi.org/10.1109/MMSP.2017.8122229,
+b133b2d7df9b848253b9d75e2ca5c68e21eba008,http://pdfs.semanticscholar.org/c2c1/ab9eac2907e15618d80f5ce0c9b60f2c36cc.pdf,,,https://www-nlpir.nist.gov/projects/tvpubs/tv17.papers/kobe_nict_siegen.pdf
+b13b101b6197048710e82f044ad2eda6b93affd8,,,,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.91
+b1df214e0f1c5065f53054195cd15012e660490a,http://pdfs.semanticscholar.org/b1df/214e0f1c5065f53054195cd15012e660490a.pdf,,,http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Huang_Sparse_Coding_and_2016_CVPR_supplemental.pdf
+b185f0a39384ceb3c4923196aeed6d68830a069f,http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf,,https://doi.org/10.1007/978-3-642-33712-3_44,http://web.stanford.edu/~hchen2/papers/ECCV2012_ClothingAttributes.pdf
+b11df79c812ff7ea63f7c93ec8eafefc3fd04f7e,,,,
+b19e83eda4a602abc5a8ef57467c5f47f493848d,http://www.cs.jhu.edu/~hwang/papers/SPL10.pdf,,https://doi.org/10.1109/LSP.2009.2036653,http://www.cs.adelaide.edu.au/~hanzi/papers/SPL10.pdf
+b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e,http://www.hamedkiani.com/uploads/5/1/8/8/51882963/176.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477676
+ddf55fc9cf57dabf4eccbf9daab52108df5b69aa,http://pdfs.semanticscholar.org/ddf5/5fc9cf57dabf4eccbf9daab52108df5b69aa.pdf,,,http://www.sersc.org/journals/IJGDC/vol4_no3/7.pdf
+ddd0f1c53f76d7fc20e11b7e33bbdc0437516d2b,,,https://doi.org/10.1109/ICDSP.2016.7868598,
+dd05cbfa0045759088d610173a78c792a4f17e4c,,,,
+dda35768681f74dafd02a667dac2e6101926a279,http://www.cim.mcgill.ca/~clark/vmrl/web-content/papers/jjclark_icip_2014.pdf,,https://doi.org/10.1109/ICIP.2014.7025686,
+dd0760bda44d4e222c0a54d41681f97b3270122b,http://pdfs.semanticscholar.org/dd07/60bda44d4e222c0a54d41681f97b3270122b.pdf,,https://doi.org/10.1016/j.engappai.2007.11.010,http://rtpis.org/documents/mypaper/RTPIS_publication_1291227544.pdf
+ddea3c352f5041fb34433b635399711a90fde0e8,http://pdfs.semanticscholar.org/fc6b/2eb9253f33197b1ba8a045525487a16e8756.pdf,,,http://www.cs.berkeley.edu/~akar/IITK_website/se367/project/report.pdf
+dd031dbf634103ff3c58ce87aa74ec6921b2e21d,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344664
+dddd70fb2746a944e7428e2eb61ca06faff3fce9,,,,
+ddb1a392582c624c9116cb00eac01aba220fad84,,,,
+dd8a851f2a0c63bb97e33aaff1841695f601c863,,,https://doi.org/10.1109/BTAS.2014.6996260,
+dd033d4886f2e687b82d893a2c14dae02962ea70,http://pdfs.semanticscholar.org/dd03/3d4886f2e687b82d893a2c14dae02962ea70.pdf,,,https://ddd.uab.cat/pub/elcvia/elcvia_a2012v11n1/elcvia_a2012v11n1p41.pdf
+dd3181c229819679186056cdfe94a772929ca758,,,,
+ddf099f0e0631da4a6396a17829160301796151c,http://pdfs.semanticscholar.org/ddf0/99f0e0631da4a6396a17829160301796151c.pdf,,,http://biometrics.cse.msu.edu/Publications/Face/BestRowdenJain_FaceQualityHumanAssessments_TIFS2018.pdf
+dd0a334b767e0065c730873a95312a89ef7d1c03,http://pdfs.semanticscholar.org/dd0a/334b767e0065c730873a95312a89ef7d1c03.pdf,,https://doi.org/10.1007/978-3-642-38628-2_90,http://luismarco.nom.es/wp/wp-content/uploads/2014/03/egpaper_IbPRIA-2013_v2.pdf
+dd8ad6ce8701d4b09be460a6cf058fcd5318c700,https://www.researchgate.net/profile/Daniel_Riccio/publication/260652311_Robust_Face_Recognition_for_Uncontrolled_Pose_and_Illumination_Changes/links/5402f4450cf23d9765a55fbc.pdf,,https://doi.org/10.1109/TSMCA.2012.2192427,
+dd0258367fadb632b612ccd84fbc1ef892e70aeb,,,,
+ddd9d7cb809589b701fba9f326d7cf998a63b14f,,,,http://doi.acm.org/10.1145/2647868.2654992
+dd2f6a1ba3650075245a422319d86002e1e87808,http://pdfs.semanticscholar.org/dd2f/6a1ba3650075245a422319d86002e1e87808.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/2017_pami_offline_deformable_tracking.pdf
+ddf577e8b7c86b1122c1bc90cba79f641d2b33fa,,,,http://doi.acm.org/10.1145/3013971.3014026
+dd8d09eab82d7ec4457317d9f9427122d2ffb649,,,,
+dd715a98dab34437ad05758b20cc640c2cdc5715,,,https://doi.org/10.1007/s41095-017-0082-8,
+ddaa8add8528857712424fd57179e5db6885df7c,http://pdfs.semanticscholar.org/ff63/a8e8e462d15c9d59ac66025a043d3c299aea.pdf,,,http://arxiv.org/abs/1707.09143
+dd8d53e67668067fd290eb500d7dfab5b6f730dd,http://mmlab.ie.cuhk.edu.hk/archive/2007/IFS07_subspace.pdf,,https://doi.org/10.1109/TIFS.2006.890313,
+dd600e7d6e4443ebe87ab864d62e2f4316431293,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553774.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553774
+dcb50e1f439d1f9b14ae85866f4542e51b830a07,,,https://doi.org/10.1109/FSKD.2012.6234354,
+dcc44853911c3df7db9c3ea5068e6c16aeec71c1,,,,
+dcea30602c4e0b7525a1bf4088620128d4cbb800,,,https://doi.org/10.1109/VCIP.2013.6706430,
+dcdece0d0ee382e2f388dcd7f5bd9721bb7354d6,,,https://doi.org/10.1109/TCYB.2014.2311033,
+dc2f16f967eac710cb9b7553093e9c977e5b761d,,,https://doi.org/10.1109/ICPR.2016.7900141,
+dcb44fc19c1949b1eda9abe998935d567498467d,http://pdfs.semanticscholar.org/dcb4/4fc19c1949b1eda9abe998935d567498467d.pdf,,https://doi.org/10.24963/ijcai.2017/266,https://www.ijcai.org/proceedings/2017/0266.pdf
+dcc38db6c885444694f515d683bbb50521ff3990,http://pdfs.semanticscholar.org/dcc3/8db6c885444694f515d683bbb50521ff3990.pdf,,https://doi.org/10.24963/ijcai.2017/633,http://www.ijcai.org/proceedings/2017/0633.pdf
+dc5cde7e4554db012d39fc41ac8580f4f6774045,http://pdfs.semanticscholar.org/dc5c/de7e4554db012d39fc41ac8580f4f6774045.pdf,,,http://www.bmva.org/bmvc/2014/files/abstract008.pdf
+dc7df544d7c186723d754e2e7b7217d38a12fcf7,http://pdfs.semanticscholar.org/dc7d/f544d7c186723d754e2e7b7217d38a12fcf7.pdf,,,http://wscg.zcu.cz/wscg2016/short/E89-full.pdf
+dc77287bb1fcf64358767dc5b5a8a79ed9abaa53,http://pdfs.semanticscholar.org/dc77/287bb1fcf64358767dc5b5a8a79ed9abaa53.pdf,,,http://arxiv.org/abs/1704.04137
+dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb,http://pdfs.semanticscholar.org/dc2e/805d0038f9d1b3d1bc79192f1d90f6091ecb.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/15884/Ho_umd_0117E_15444.pdf?isAllowed=y&sequence=1
+dc84d3f29c52e6d296b5d457962c02074aa75d0f,,,https://doi.org/10.1109/TIP.2016.2580939,
+dced05d28f353be971ea2c14517e85bc457405f3,http://pdfs.semanticscholar.org/dced/05d28f353be971ea2c14517e85bc457405f3.pdf,,https://doi.org/10.1007/11760023_22,http://iiclab.kw.ac.kr/pdf/Manuscript-051228-final.pdf
+dc0341e5392c853f11283e99a7dc5c51be730aca,,,,
+dc295e85e698af56cd115e5531b66e19f3b9e0ce,,,,
+dcce3d7e8d59041e84fcdf4418702fb0f8e35043,http://www.cfar.umd.edu/~rama/Conf.pdf-files/zhou04cvpr-10.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2004.190
+dce3dff9216d63c4a77a2fcb0ec1adf6d2489394,http://pdfs.semanticscholar.org/dce3/dff9216d63c4a77a2fcb0ec1adf6d2489394.pdf,,https://doi.org/10.1007/978-3-642-01793-3_9,http://www.ee.oulu.fi/~hadid/ICB2009.pdf
+dca2bb023b076de1ccd0c6b8d71faeb3fccb3978,,,,http://doi.acm.org/10.1145/3152118
+b69e7e2a7705a58a0e3f1b80ae542907b89ce02e,,,https://doi.org/10.1007/s11042-015-2614-5,
+b6f758be954d34817d4ebaa22b30c63a4b8ddb35,https://arxiv.org/pdf/1703.04835v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.134
+b6259115b819424de53bb92f64cc459dcb649f31,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2017.8078466
+b62571691a23836b35719fc457e093b0db187956,http://pdfs.semanticscholar.org/b625/71691a23836b35719fc457e093b0db187956.pdf,,,http://www.ijarcsse.com/docs/papers/Volume_3/5_May2013/V3I5-0197.pdf
+b69b239217d4e9a20fe4fe1417bf26c94ded9af9,http://pdfs.semanticscholar.org/b69b/239217d4e9a20fe4fe1417bf26c94ded9af9.pdf,,,https://arxiv.org/pdf/1803.07218v1.pdf
+b6c047ab10dd86b1443b088029ffe05d79bbe257,http://pdfs.semanticscholar.org/b6c0/47ab10dd86b1443b088029ffe05d79bbe257.pdf,,https://doi.org/10.1016/j.patcog.2013.05.016,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/2013/PR_SVM_2013.pdf
+b6052dc718c72f2506cfd9d29422642ecf3992ef,http://pdfs.semanticscholar.org/b605/2dc718c72f2506cfd9d29422642ecf3992ef.pdf,,https://doi.org/10.1007/978-3-642-44964-2_8,http://files.is.tue.mpg.de/jgall/tutorials/slides/motionanalysis_DRAFT.pdf
+b6145d3268032da70edc9cfececa1f9ffa4e3f11,http://cnl.salk.edu/~zhafed/papers/fr_IJCV_2001.pdf,,https://doi.org/10.1023/A:1011183429707,http://www.researchgate.net/profile/Martin_Levine3/publication/220660033_Face_Recognition_Using_the_Discrete_Cosine_Transform/links/5446c5dc0cf22b3c14e0b3a1.pdf
+b68452e28951bf8db5f1193eca3a8fd9e2d0d7ef,,,https://doi.org/10.1109/ICACCI.2015.7275752,
+b6c53891dff24caa1f2e690552a1a5921554f994,http://pdfs.semanticscholar.org/b6c5/3891dff24caa1f2e690552a1a5921554f994.pdf,,https://doi.org/10.1007/978-3-319-16817-3_10,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop3/pdffiles/w3-p18.pdf
+b6ac33d2c470077fa8dcbfe9b113beccfbd739f8,,,,http://doi.acm.org/10.1145/2509896.2509905
+b68f55bab12ca50b033d8b5c773ce5fe88c5923d,,,,
+b6685941588febbf66f9bf6a074cd548bc8a567f,,,,
+b6ae677b26da039e0112e434d40baf7dd929a3ba,,,,
+b65b51c796ed667c4c7914bf12b1926fd6bbaa0c,,,https://doi.org/10.1016/j.neuroimage.2013.05.108,
+b6c83e6706a9931a2670bc686485d76b67cb92ea,,,,
+b613b30a7cbe76700855479a8d25164fa7b6b9f1,http://www.cs.ucf.edu/~kienhua/classes/COP6731/Reading/AffectiveComputing.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2015.2495222
+b6a23f72007cb40223d7e1e1cc47e466716de945,,,https://doi.org/10.1109/CVPRW.2010.5544598,
+b64cfb39840969b1c769e336a05a30e7f9efcd61,http://pdfs.semanticscholar.org/fde2/b8943eb429d35e649c56ce95658b44c49243.pdf,,https://doi.org/10.3389/fict.2016.00009,http://publications.idiap.ch/downloads/papers/2017/Gay_FRONTIERS-CIA_2016.pdf
+b6c00e51590c48a48fae51385b3534c4d282f76c,,,https://doi.org/10.1109/TIFS.2015.2427778,
+b689d344502419f656d482bd186a5ee6b0140891,http://pdfs.semanticscholar.org/b689/d344502419f656d482bd186a5ee6b0140891.pdf,,,http://www.cns.nyu.edu/~csaid/publications/SaidSebeTodorov2009.pdf
+b631f3c212aab45d73ddc119f1f7d00c3c502a72,,,https://doi.org/10.1109/TIFS.2009.2035976,
+b6530ea4c42f0133468d1ff0a44738b505152a8e,,,,
+b656abc4d1e9c8dc699906b70d6fcd609fae8182,http://pdfs.semanticscholar.org/b656/abc4d1e9c8dc699906b70d6fcd609fae8182.pdf,,https://doi.org/10.1016/j.patrec.2006.12.006,http://amp.ece.cmu.edu/Publication/simon/cvpr05-a.pdf
+b6a01cd4572b5f2f3a82732ef07d7296ab0161d3,http://pdfs.semanticscholar.org/b6a0/1cd4572b5f2f3a82732ef07d7296ab0161d3.pdf,,https://doi.org/10.1007/978-3-319-46478-7_26,https://www.cise.ufl.edu/~zizhao/paper_list/eccv2016.pdf
+b63b6ed78b39166d87d4c56f8890873aa65976a2,,,https://doi.org/10.1109/ICRA.2011.5979953,
+a9f0e940cfba3663dc8304dd5dc77509f024a3cc,,,,
+a9881ae58987da71b4c1ce01ba213eb4be2eef02,,,,
+a92e24c8c53e31fc444a13bd75b434b7207c58f1,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2317711
+a9791544baa14520379d47afd02e2e7353df87e5,http://pdfs.semanticscholar.org/a979/1544baa14520379d47afd02e2e7353df87e5.pdf,,,
+a9756ca629f73dc8f84ee97cfa8b34b8207392dc,,,https://doi.org/10.1109/ICIP.2017.8296542,
+a9cecfbc47a39fa0158a5f6fd883e0e5ac2aa134,,,https://doi.org/10.1142/S0218001405004071,
+a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd,http://pdfs.semanticscholar.org/a9eb/6e436cfcbded5a9f4b82f6b914c7f390adbd.pdf,,,http://thesai.org/Downloads/IJARAI/Volume5No6/Paper_8-A_Model_for_Facial_Emotion_Inference.pdf
+a955033ca6716bf9957b362b77092592461664b4,http://pdfs.semanticscholar.org/a955/033ca6716bf9957b362b77092592461664b4.pdf,,,http://ijircce.com/upload/2015/june/163_santhy.pdf
+a956ff50ca958a3619b476d16525c6c3d17ca264,http://ce.sharif.edu/~amiryanj/downloads/novel_bidirectional_nn_for_face_recognition.pdf,,,
+a93781e6db8c03668f277676d901905ef44ae49f,http://pdfs.semanticscholar.org/a937/81e6db8c03668f277676d901905ef44ae49f.pdf,,,http://rpal.cse.usf.edu/products/bigdata_2016.pdf
+a947c21a15fb0a02378c36271e1addf6b6e110eb,http://www.researchgate.net/profile/Bryan_Conroy/publication/220734216_The_grouped_two-sided_orthogonal_Procrustes_problem/links/02e7e52541c3f27987000000.pdf,,https://doi.org/10.1109/ICASSP.2011.5947151,http://mirlab.org/conference_papers/International_Conference/ICASSP%202011/pdfs/0003688.pdf
+a94d2bc6854ee329ee02910e6cdb9d9228f85944,,,,
+a9fc23d612e848250d5b675e064dba98f05ad0d9,http://pdfs.semanticscholar.org/a9fc/23d612e848250d5b675e064dba98f05ad0d9.pdf,,,http://thesai.org/Downloads/Volume9No2/Paper_22-Face_Age_Estimation_Approach_based_on_Deep_Learning.pdf
+a9af0dc1e7a724464d4b9d174c9cf2441e34d487,,,https://doi.org/10.1142/S0219691316500351,
+a9adb6dcccab2d45828e11a6f152530ba8066de6,http://pdfs.semanticscholar.org/a9ad/b6dcccab2d45828e11a6f152530ba8066de6.pdf,,,http://face.cs.kit.edu/download/publications/Kern_Illumination_Subspaces.pdf
+a967426ec9b761a989997d6a213d890fc34c5fe3,http://vision.ucsd.edu/sites/default/files/043-wacv.pdf,,,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/043-wacv.pdf
+a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6,http://pdfs.semanticscholar.org/a928/6519e12675302b1d7d2fe0ca3cc4dc7d17f6.pdf,,,https://arxiv.org/pdf/1705.08197v1.pdf
+a949b8700ca6ba96ee40f75dfee1410c5bbdb3db,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Haase_Instance-weighted_Transfer_Learning_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.185
+a98a69739527f46c0a73c983789210d098c1eb09,,,,
+a9d861e270b8b1e6deea1936b258f49f1823005b,,,,
+a9506c60ec48056087ee3e10d28ff7774fbbd553,,,https://doi.org/10.1109/TCSVT.2014.2376136,
+a941434fce5d3fddcd78e2b82d46ccab0411fca9,,,,
+a9be20954e9177d8b2bc39747acdea4f5496f394,http://acsweb.ucsd.edu/~yuw176/report/cvpr_2016.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.520
+a9d1d00d6897ae23c9a7e9fb75a3c7417a6730a4,,,https://doi.org/10.1049/iet-ipr.2016.1074,
+a9426cb98c8aedf79ea19839643a7cf1e435aeaa,,,https://doi.org/10.1109/GlobalSIP.2016.7905998,
+d5afd7b76f1391321a1340a19ba63eec9e0f9833,http://pdfs.semanticscholar.org/d5af/d7b76f1391321a1340a19ba63eec9e0f9833.pdf,,,http://bit.kuas.edu.tw/~jihmsp/2010/vol1/JIH-MSP-2010-03-007.pdf
+d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12,http://pdfs.semanticscholar.org/d537/5f51eeb0c6eff71d6c6ad73e11e9353c1f12.pdf,,https://doi.org/10.1007/978-3-642-23887-1_84,http://ebooks.narotama.ac.id/files/Artificial%20Intelligence%20and%20Computational%20Intelligence;%202nd%20AICIS%202011%20PART%20II/Chapter%2084%20Manifold%20Ranking-Based%20Locality%20Preserving%20Projections.pdf
+d5f8827fc7d66643bf018d5636e81ed41026b61a,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.36
+d50c6d22449cc9170ab868b42f8c72f8d31f9b6c,http://pdfs.semanticscholar.org/d50c/6d22449cc9170ab868b42f8c72f8d31f9b6c.pdf,,https://doi.org/10.24963/ijcai.2017/231,http://www.ijcai.org/proceedings/2017/0231.pdf
+d569c3e62f471aa75ed53e631ec05c1a3d594595,,,https://doi.org/10.1109/NNSP.2002.1030072,
+d5b445c5716952be02172ca4d40c44f4f04067fa,,,https://doi.org/10.1109/ICICS.2011.6173537,
+d522c162bd03e935b1417f2e564d1357e98826d2,http://pdfs.semanticscholar.org/d522/c162bd03e935b1417f2e564d1357e98826d2.pdf,,https://doi.org/10.1186/1687-6180-2013-19,http://asp.eurasipjournals.com/content/pdf/1687-6180-2013-19.pdf
+d59f18fcb07648381aa5232842eabba1db52383e,http://pdfs.semanticscholar.org/d59f/18fcb07648381aa5232842eabba1db52383e.pdf,,,http://www.cs.stanford.edu/people/asaxena/papers/icsci2004_facial.pdf
+d5fa9d98c8da54a57abf353767a927d662b7f026,http://pdfs.semanticscholar.org/f15e/9712b8731e1f5fd9566aca513edda910b5b8.pdf,,,http://www.researchgate.net/profile/Nabil_Hewahi/publication/47277288_Age_Estimation_based_on_Neural_Networks_using_Face_Features/links/0912f50a9b50c57f08000000.pdf
+d588dd4f305cdea37add2e9bb3d769df98efe880,http://pdfs.semanticscholar.org/d588/dd4f305cdea37add2e9bb3d769df98efe880.pdf,,,http://www.iaeng.org/publication/IMECS2009/IMECS2009_pp938-943.pdf
+d5f751d31a9d2d754d0d136d5b02c24b28fb94a0,http://www.researchgate.net/profile/Marie-Francine_Moens/publication/220634584_Naming_People_in_News_Videos_with_Label_Propagation/links/0a85e52ecd01912489000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/MMUL.2011.22
+d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e,http://pdfs.semanticscholar.org/d5ab/6aa15dad26a6ace5ab83ce62b7467a18a88e.pdf,,,http://www.hrpub.org/download/20141201/WJCAT1-13702887.pdf
+d57ce0ff4acb2910c2d1afee2ebb7aa1e72a4584,,,https://doi.org/10.1109/CVPRW.2010.5543816,
+d57c25c50e5e25fb07fc80b3c3d77b45e16e98cf,,,,
+d5b0e73b584be507198b6665bcddeba92b62e1e5,http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf,,,http://www.cbsr.ia.ac.cn/users/jwan/papers/BMVC2017_age.pdf
+d5c66a48bc0a324750db3d295803f47f6060043d,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2006.109
+d56fe69cbfd08525f20679ffc50707b738b88031,http://pdfs.semanticscholar.org/d56f/e69cbfd08525f20679ffc50707b738b88031.pdf,,,https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2011-80.pdf
+d58fce50e9028dfc12cb2e7964f83d3b28bcc2fc,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.101
+d50751da2997e7ebc89244c88a4d0d18405e8507,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553713.pdf,,,http://www.vision.ee.ethz.ch/~gfanelli/pubs/fanelli_fg2013.pdf
+d511e903a882658c9f6f930d6dd183007f508eda,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553766.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553766
+d567f2bbc6ce6d6acf0114e6514f31eff4da68f6,,,,
+d50a40f2d24363809a9ac57cf7fbb630644af0e5,http://pdfs.semanticscholar.org/d50a/40f2d24363809a9ac57cf7fbb630644af0e5.pdf,,,https://arxiv.org/pdf/1711.07201v1.pdf
+d5b5c63c5611d7b911bc1f7e161a0863a34d44ea,http://pdfs.semanticscholar.org/d5b5/c63c5611d7b911bc1f7e161a0863a34d44ea.pdf,,https://doi.org/10.1007/978-3-642-22819-3_43,http://www.researchgate.net/profile/Rui_Ishiyama/publication/220744975_Extracting_Scene-Dependent_Discriminant_Features_for_Enhancing_Face_Recognition_under_Severe_Conditions/links/5449185c0cf2ea65413021fe.pdf
+d59404354f84ad98fa809fd1295608bf3d658bdc,http://pdfs.semanticscholar.org/d594/04354f84ad98fa809fd1295608bf3d658bdc.pdf,,,https://arxiv.org/pdf/1801.00077v1.pdf
+d57dca4413ad4f33c97ae06a5a7fc86dc5a75f8b,http://iplab.dmi.unict.it/sites/default/files/_11.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2015.7169756
+d5e1173dcb2a51b483f86694889b015d55094634,http://pdfs.semanticscholar.org/d5e1/173dcb2a51b483f86694889b015d55094634.pdf,,https://doi.org/10.1016/j.patrec.2005.05.004,https://www.researchgate.net/profile/Shiqian_Wu/publication/222879673_PCA_and_LDA_in_DCT_domain/links/09e415044106815b65000000.pdf
+d5dc78eae7a3cb5c953c89376e06531d39b34836,,,https://doi.org/10.1007/s00521-009-0242-6,
+d2d9612d3d67582d0cd7c1833599b88d84288fab,,,https://doi.org/10.1049/iet-cvi.2015.0222,
+d2a415365f997c8fe2dbdd4e06ceab2e654172f6,,,,http://doi.acm.org/10.1145/2425333.2425361
+d28d32af7ef9889ef9cb877345a90ea85e70f7f1,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Kim_Local.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84
+d2bad850d30973a61b1a7d7dc582241a41e5c326,,,,http://doi.ieeecomputersociety.org/10.1109/ICICIC.2006.12
+d2a6f77ce311e51bb36a5301c1a4a2d220a2947b,,,,
+d28d697b578867500632b35b1b19d3d76698f4a9,http://pdfs.semanticscholar.org/d28d/697b578867500632b35b1b19d3d76698f4a9.pdf,,,http://www.cs.njit.edu/~liu/papers/mypdfs/cvpr99.pdf
+d231a81b38fde73bdbf13cfec57d6652f8546c3c,http://pdfs.semanticscholar.org/d231/a81b38fde73bdbf13cfec57d6652f8546c3c.pdf,,,http://www.ece.gatech.edu/research/labs/MCCL/pubs/dwnlds/Osman_Gokhan_Sezer_Tez.pdf
+d22785eae6b7503cb16402514fd5bd9571511654,http://pdfs.semanticscholar.org/d227/85eae6b7503cb16402514fd5bd9571511654.pdf,,,http://www.ijcsit.com/docs/Volume%205/vol5issue06/ijcsit20140506149.pdf
+d24dafe10ec43ac8fb98715b0e0bd8e479985260,http://pdfs.semanticscholar.org/d24d/afe10ec43ac8fb98715b0e0bd8e479985260.pdf,,,
+d29eec5e047560627c16803029d2eb8a4e61da75,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf,,,https://arxiv.org/pdf/1803.09014v1.pdf
+d280bcbb387b1d548173917ae82cb6944e3ceca6,https://cse.sc.edu/~mengz/papers/ICIP2014.pdf,,https://doi.org/10.1109/ICIP.2014.7025283,
+d2baa43471d959075fc4c93485643cbd009797fd,,,,http://doi.ieeecomputersociety.org/10.1109/MM.2017.4241350
+d2598c088b0664c084413796f39697c6f821d56e,,,https://doi.org/10.1109/VCIP.2016.7805451,
+d2fac640086ba89271ad7c1ebf36239ecd64605e,,http://ieeexplore.ieee.org/document/6460449/,,
+d24d3370b2e7d254e999140024d8a7bddf701502,https://www.researchgate.net/profile/Thang_Hoang2/publication/252047382_SVM_classifier_based_face_detection_system_using_BDIP_and_BVLC_moments/links/53f0b8be0cf2711e0c431012.pdf,,,
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,http://pdfs.semanticscholar.org/d2cd/9a7f19600370bce3ea29aba97d949fe0ceb9.pdf,,https://doi.org/10.1007/978-3-642-33786-4_23,http://www.jdl.ac.cn/doc/2011/20131910374671726_2012_eccv_hhan_sop.pdf
+d2b3166b8a6a3e6e7bc116257e718e4fe94a0638,,,https://doi.org/10.1007/s00521-010-0411-7,
+d22b378fb4ef241d8d210202893518d08e0bb213,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Zhang_Random_Faces_Guided_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.300
+d2cb8814068c5a64a54ac8e5d0d3df6986370295,,,,
+aa5eeb1ab953411e915ea5e6298474dbebfa6fb6,,,,
+aa0be8029ea4c657ac8440958364add54ce8c29c,,,,
+aac39ca161dfc52aade063901f02f56d01a1693c,http://pdfs.semanticscholar.org/aac3/9ca161dfc52aade063901f02f56d01a1693c.pdf,,https://doi.org/10.1007/978-3-642-21524-7_40,http://sujingwang.name/publication/icsi11.pdf
+aadf4b077880ae5eee5dd298ab9e79a1b0114555,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W08/papers/Presti_Using_Hankel_Matrices_2015_CVPR_paper.pdf,,,http://arxiv.org/abs/1506.05001
+aa7c72f874951ff7ca3769439f2f39b7cfd4b202,,,https://doi.org/10.1109/JPROC.2009.2032355,
+aa127e6b2dc0aaccfb85e93e8b557f83ebee816b,http://pdfs.semanticscholar.org/aa12/7e6b2dc0aaccfb85e93e8b557f83ebee816b.pdf,,,http://ora.ox.ac.uk/objects/uuid:64e5b1be-231e-49ed-b385-e87db6dbeed8
+aafb271684a52a0b23debb3a5793eb618940c5dd,http://pdfs.semanticscholar.org/aafb/271684a52a0b23debb3a5793eb618940c5dd.pdf,,,http://cs.stanford.edu/groups/vision/documents/DengBergFei-Fei_CVPR2011_supp.pdf
+aaf2436bc63a58d18192b71cc8100768e2f8a6cb,,,,http://doi.ieeecomputersociety.org/10.1109/ICDIP.2009.77
+aad6fc5bd7631d2e68b7a5a01ac5d578899c43e5,,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.80
+aa892fe17c06e2b18db2b12314499a741e755df7,,,https://doi.org/10.1109/IJCNN.2017.7966089,
+aab9a617be6e5507beb457b1e6c2e5b046f9cff0,,,https://doi.org/10.1109/ICIP.2008.4712153,
+aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52,http://www.gtti.it/gtti13/Presentazioni_GTTI13/25_Giugno/Sessioni_Scientifiche_Short_Presentation/Piacenza.pdf,http://ieeexplore.ieee.org/document/6811660/,,
+aade6c3dbea3b0a918f87c85a36cb6b06eff4f5b,,,,
+aa4af9b3811db6a30e1c7cc1ebf079078c1ee152,,,,http://doi.acm.org/10.1145/3129416.3129451
+aa912375eaf50439bec23de615aa8a31a3395ad3,http://pdfs.semanticscholar.org/aa91/2375eaf50439bec23de615aa8a31a3395ad3.pdf,,,http://wireilla.com/papers/ijcis/V2N2/2212ijcis02.pdf
+aa52910c8f95e91e9fc96a1aefd406ffa66d797d,http://pdfs.semanticscholar.org/aa52/910c8f95e91e9fc96a1aefd406ffa66d797d.pdf,,,http://ijcset.com/docs/IJCSET13-04-05-090.pdf
+aad7b12936e0ced60bc0be95e8670b60b5d5ce20,,,https://doi.org/10.1109/URAI.2013.6677383,
+aaeb8b634bb96a372b972f63ec1dc4db62e7b62a,http://pdfs.semanticscholar.org/aaeb/8b634bb96a372b972f63ec1dc4db62e7b62a.pdf,,,http://www.ijceronline.com/papers/Vol4_issue12/Version-2/A0412201012.pdf
+aa0c30bd923774add6e2f27ac74acd197b9110f2,http://research.gold.ac.uk/20200/1/dplda.pdf,,https://doi.org/10.1109/ICASSP.2017.7952663,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002781.pdf
+aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5,http://pdfs.semanticscholar.org/aaa4/c625f5f9b65c7f3df5c7bfe8a6595d0195a5.pdf,,https://doi.org/10.1007/s12652-010-0033-z,https://www.researchgate.net/profile/Massimo_Tistarelli/publication/225122788_Biometrics_in_ambient_intelligence/links/0fcfd50192d9ed333f000000.pdf
+aac934f2eed758d4a27562dae4e9c5415ff4cdb7,http://pdfs.semanticscholar.org/aac9/34f2eed758d4a27562dae4e9c5415ff4cdb7.pdf,,,http://arxiv.org/abs/1703.10667
+aa331fe378056b6d6031bb8fe6676e035ed60d6d,http://pdfs.semanticscholar.org/aa33/1fe378056b6d6031bb8fe6676e035ed60d6d.pdf,,https://doi.org/10.1016/j.patcog.2016.07.010,http://www.cs.sfu.ca/~li/papers-on-line/Haoyu-PR-2016.pdf
+aa90a466a2ff7781c36e7da7df0013aa5b117510,,,,http://doi.ieeecomputersociety.org/10.1109/AICCSA.2017.159
+aa8341cb5d8f0b95f619d9949131ed5c896d6470,,,,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2007.403
+aae0e417bbfba701a1183d3d92cc7ad550ee59c3,https://staff.fnwi.uva.nl/th.gevers/pub/GeversTIP12-3.pdf,,https://doi.org/10.1109/TIP.2011.2163162,http://staff.science.uva.nl/~gevers/pub/GeversTIP12-3.pdf
+aaec8141d57d29aa3cedf1baec9633180ddb7a3d,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552916
+aa577652ce4dad3ca3dde44f881972ae6e1acce7,http://pdfs.semanticscholar.org/aa57/7652ce4dad3ca3dde44f881972ae6e1acce7.pdf,,,https://arxiv.org/pdf/1211.2881v3.pdf
+aa94f214bb3e14842e4056fdef834a51aecef39c,http://pdfs.semanticscholar.org/aa94/f214bb3e14842e4056fdef834a51aecef39c.pdf,,,http://www.lbd.dcc.ufmg.br/colecoes/eniac/2015/034.pdf
+aac101dd321e6d2199d8c0b48c543b541c181b66,http://pdfs.semanticscholar.org/aac1/01dd321e6d2199d8c0b48c543b541c181b66.pdf,,,https://web.cs.umass.edu/publication/docs/2010/UM-CS-PhD-2010-008.pdf
+aae31f092fadd09a843e1ca62af52dc15fc33c56,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273609
+af8fe1b602452cf7fc9ecea0fd4508ed4149834e,http://pdfs.semanticscholar.org/af8f/e1b602452cf7fc9ecea0fd4508ed4149834e.pdf,,https://doi.org/10.1016/j.cviu.2008.07.010,http://mi.eng.cam.ac.uk/~cipolla/publications/article/2009-CVIU-face-manifold.pdf
+af6e351d58dba0962d6eb1baf4c9a776eb73533f,http://pdfs.semanticscholar.org/af6e/351d58dba0962d6eb1baf4c9a776eb73533f.pdf,,,https://arxiv.org/pdf/1612.07454v1.pdf
+aff92784567095ee526a705e21be4f42226bbaab,http://pdfs.semanticscholar.org/aff9/2784567095ee526a705e21be4f42226bbaab.pdf,,,http://discovery.ucl.ac.uk/1468901/1/Yun%20Fu's%20UCL%20PhD%20thesis.pdf
+affa61d044daa1a7d43a6803a743eab47c89c45d,,,https://doi.org/10.1109/TNNLS.2015.2405574,
+afba76d0fe40e1be381182aec822431e20de8153,,,https://doi.org/10.1007/s00521-014-1768-9,
+af13c355a2a14bb74847aedeafe990db3fc9cbd4,http://publications.idiap.ch/downloads/papers/2015/Chavez-Martinez_MUM2015_2015.pdf,,,http://www.idiap.ch/~gatica/publications/ChavezRuizGatica-mum15.pdf
+af0a8199328d4c806574866f419d1962def9305a,http://ttic.uchicago.edu/~smaji/papers/mr07mms.pdf,,,http://people.cs.umass.edu/~smaji/papers/alignment-acmmm07.pdf
+af12a79892bd030c19dfea392f7a7ccb0e7ebb72,,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247972
+afdbbc5c84eb4e535c7c478b5227c0138b57af64,,,,http://doi.ieeecomputersociety.org/10.1109/TMC.2016.2593919
+af62621816fbbe7582a7d237ebae1a4d68fcf97d,http://pdfs.semanticscholar.org/af62/621816fbbe7582a7d237ebae1a4d68fcf97d.pdf,,,http://www.ijera.com/special_issue/Humming%20Bird_March_2014/Version%20%201/AF3033.pdf
+af54dd5da722e104740f9b6f261df9d4688a9712,http://pdfs.semanticscholar.org/af54/dd5da722e104740f9b6f261df9d4688a9712.pdf,,,http://www.dehshibi.com/files/papers/Portability%20A%20New%20Challenge%20on%20Designing%20Family%20Image%20Database.pdf
+af2d30fdb8c611dc5b883b90311d873e336fc534,,,https://doi.org/10.1109/ISCAS.2017.8050275,
+af3e6e20de06b03c33f8e85eced74c2d096730ea,,,https://doi.org/10.1109/CISP-BMEI.2017.8301972,
+afe9cfba90d4b1dbd7db1cf60faf91f24d12b286,http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf,,https://doi.org/10.1007/978-3-642-19530-3_17,http://luks.fe.uni-lj.si/sl/osebje/vitomir/pub/BioID11.pdf
+af53ce0f3a039c685b754e1f704817e03e182412,,,,
+af7553d833886663550ce83b087a592a04b36419,,,https://doi.org/10.1109/TIFS.2015.2390138,
+af8e22ef8c405f9cc9ad26314cb7a9e7d3d4eec2,,,https://doi.org/10.1007/s00521-014-1569-1,
+afca252f314b46d5c1f2cb4e75ce15d551069b05,,,,
+af97e792827438ddea1d5900960571939fc0533e,,,https://doi.org/10.1109/ICSMC.2005.1571460,
+af278274e4bda66f38fd296cfa5c07804fbc26ee,http://pdfs.semanticscholar.org/af27/8274e4bda66f38fd296cfa5c07804fbc26ee.pdf,,,
+af97a51f56cd6b793cf96692931a8d1ddbe4e3cc,,,https://doi.org/10.1109/ICPR.2014.57,
+afef2b1d35fb807f422cfec0a370f7d08d4651d1,http://www.researchgate.net/profile/Dong_Yi3/publication/228853254_A_robust_eye_localization_method_for_low_quality_face_images/links/0912f509c4d7ec1630000000.pdf,,https://doi.org/10.1109/IJCB.2011.6117499,
+afc7092987f0d05f5685e9332d83c4b27612f964,http://ci2cv.net/media/papers/2011_AFGR_Chew.pdf,,https://doi.org/10.1109/FG.2011.5771373,http://ci2cv.net/static/papers/2011_AFGR_Chew.pdf
+b749ca71c60904d7dad6fc8fa142bf81f6e56a62,,,https://doi.org/10.1109/TIP.2013.2292560,
+b730908bc1f80b711c031f3ea459e4de09a3d324,http://ibug.doc.ic.ac.uk/media/uploads/documents/tifs_aoms.pdf,,https://doi.org/10.1109/TIFS.2014.2361018,https://ibug.doc.ic.ac.uk/media/uploads/documents/tifs_aoms.pdf
+b7426836ca364603ccab0e533891d8ac54cf2429,http://pdfs.semanticscholar.org/b742/6836ca364603ccab0e533891d8ac54cf2429.pdf,,,
+b7128e0fe18dcb42e8a2ac5cf6794f64a8e37bd0,,,https://doi.org/10.1109/SERA.2017.7965717,
+b7cf7bb574b2369f4d7ebc3866b461634147041a,http://www.patternrecognition.cn/~zhongjin/2012/2012_yinjun_NCA.pdf,,https://doi.org/10.1007/s00521-011-0728-x,
+b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24,http://grid.hust.edu.cn/xbliu/papers/ICDM09.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2009.18
+b7845e0b0ce17cde7db37d5524ef2a61dee3e540,,,https://doi.org/10.1109/ICPR.2016.7899608,
+b75eecc879da38138bf3ace9195ae1613fb6e3cc,,,https://doi.org/10.1007/s10278-015-9808-2,
+b7eead8586ffe069edd190956bd338d82c69f880,http://pdfs.semanticscholar.org/b7ee/ad8586ffe069edd190956bd338d82c69f880.pdf,,,https://acceda.ulpgc.es:8443/bitstream/10553/15078/5/C081_JRBP12.pdf
+b7b8e7813fbc12849f2daba5cab604abd8cbaab6,,,https://doi.org/10.1109/ICCE.2014.6775938,
+b704eaa339d55ef7eac56d0117a8e127fc597686,,,,
+b75cee96293c11fe77ab733fc1147950abbe16f9,http://pdfs.semanticscholar.org/e1a6/16674f63dd54b495d06cf1b7bd59f4cb772e.pdf,,https://doi.org/10.5244/C.20.27,http://www.bmva.org/bmvc/2006/papers/081.pdf
+b7a0c70a320c1ac3e92f4bf0b50a7d8ceb757c41,,,https://doi.org/10.1109/IJCNN.2016.7727203,
+b7f05d0771da64192f73bdb2535925b0e238d233,http://pdfs.semanticscholar.org/b7f0/5d0771da64192f73bdb2535925b0e238d233.pdf,,,http://www.mva-org.jp/Proceedings/CommemorativeDVD/2005/papers/2005172.pdf
+b784bb1d2b2720dac8d4b92851a8d6360c35b0b2,,,https://doi.org/10.1109/ICDM.2016.0041,
+b755505bdd5af078e06427d34b6ac2530ba69b12,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf,,https://doi.org/10.1109/IJCB.2011.6117486,http://www.cse.msu.edu/biometrics/Publications/Face/Maengetal_NIFaceRecognitionDistance_IJCB11.pdf
+b7b461f82c911f2596b310e2b18dd0da1d5d4491,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p2961-wang.pdf,,https://doi.org/10.1109/ICASSP.2014.6854138,
+b7740dba37a3cbd5c832a8deb9a710a28966486a,http://pdfs.semanticscholar.org/b774/0dba37a3cbd5c832a8deb9a710a28966486a.pdf,,,http://cepa.stanford.edu/sites/default/files/widen%20etal%202015-story%20superiority%20adol.pdf
+b73fdae232270404f96754329a1a18768974d3f6,http://pdfs.semanticscholar.org/b73f/dae232270404f96754329a1a18768974d3f6.pdf,,,http://cdn.intechopen.com/pdfs-wm/40174.pdf
+b7c5f885114186284c51e863b58292583047a8b4,http://pdfs.semanticscholar.org/b7c5/f885114186284c51e863b58292583047a8b4.pdf,,https://doi.org/10.5220/0006041101560163,http://arxiv.org/abs/1609.06260
+b728e7db6e5559a77dc59381bfb8df96d482a721,,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.28
+b73d9e1af36aabb81353f29c40ecdcbdf731dbed,http://pdfs.semanticscholar.org/b73d/9e1af36aabb81353f29c40ecdcbdf731dbed.pdf,,https://doi.org/10.3390/s150920945,http://ftp.ncbi.nlm.nih.gov/pub/pmc/d7/3b/sensors-15-20945.PMC4610497.pdf
+b747fcad32484dfbe29530a15776d0df5688a7db,http://pdfs.semanticscholar.org/b747/fcad32484dfbe29530a15776d0df5688a7db.pdf,,https://doi.org/10.1016/j.patrec.2014.10.001,http://www.cs.csub.edu/~acruz/papers/10.1016-j.patrec.2014.10.001.pdf
+b7fa06b76f4b9263567875b2988fb7bbc753e69f,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469282
+b7043048b4ba748c9c6317b6d8206192c34f57ff,,,https://doi.org/10.1109/ICIP.2016.7533061,
+b7f7a4df251ff26aca83d66d6b479f1dc6cd1085,http://pdfs.semanticscholar.org/b7f7/a4df251ff26aca83d66d6b479f1dc6cd1085.pdf,,https://doi.org/10.1186/1687-5281-2013-55,http://jivp.eurasipjournals.com/content/pdf/1687-5281-2013-55.pdf
+b736bf09e1f94a8722c121c19f7a22d340c13e0b,,,,
+b74a3ede83e10544640e5f58707f567e00281f54,,,,
+b71d1aa90dcbe3638888725314c0d56640c1fef1,,,,
+db1f48a7e11174d4a724a4edb3a0f1571d649670,http://pdfs.semanticscholar.org/db1f/48a7e11174d4a724a4edb3a0f1571d649670.pdf,,,http://summit.sfu.ca/system/files/iritems1/17516/etd10355_XLiu.pdf
+db9ef28cc3531a27c273d769e1b1d6b8aeff2db4,,,,
+db227f72bb13a5acca549fab0dc76bce1fb3b948,http://pdfs.semanticscholar.org/e83d/6fd4502d6d31134ffddb80b6d5c752cf3123.pdf,,,http://www.ijmer.com/papers/%5BNC-%20DATES2K16%5D%20-%202016/CSE/Version-1/CSE-32-38.pdf
+dbaf89ca98dda2c99157c46abd136ace5bdc33b3,http://pdfs.semanticscholar.org/dbaf/89ca98dda2c99157c46abd136ace5bdc33b3.pdf,,https://doi.org/10.1007/978-3-319-16199-0_4,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w18/1.pdf
+dbab6ac1a9516c360cdbfd5f3239a351a64adde7,http://pdfs.semanticscholar.org/dbab/6ac1a9516c360cdbfd5f3239a351a64adde7.pdf,,https://doi.org/10.1016/j.patrec.2015.11.014,http://www.cs.nott.ac.uk/~pszmv/Documents/prl_blockwise_SDM.pdf
+dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8,http://pdfs.semanticscholar.org/dbb0/a527612c828d43bcb9a9c41f1bf7110b1dc8.pdf,,https://doi.org/10.1007/978-3-540-75171-7_7,http://sightcorp.com/downloads/Machine%20Learning%20Techniques%20for%20Face%20Analysis.pdf
+db3984b143c59584a32d762d712d21c0e8cf38b8,,,https://doi.org/10.1109/SMC.2015.324,
+db93049981abca0a281918b8d0655572922553de,http://www.cs.odu.edu/~sji/papers/pdf/Ji_TKDE08.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2008.57
+dba493caf6647214c8c58967a8251641c2bda4c2,http://pdfs.semanticscholar.org/dba4/93caf6647214c8c58967a8251641c2bda4c2.pdf,,,http://www.visgraf.impa.br/Data/RefBib/PS_PDF/amfg05/amfg05.pdf
+dbcfefa92edab8d1ffe8bc1cc66ad80fb13d2b6a,,,https://doi.org/10.1007/s00521-010-0519-9,
+dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57,http://pdfs.semanticscholar.org/dbb7/f37fb9b41d1aa862aaf2d2e721a470fd2c57.pdf,,,http://www.freidok.uni-freiburg.de/volltexte/4835/pdf/thesis_final.pdf
+dbf2d2ca28582031be6d16519ab887248f5e8ad8,,,https://doi.org/10.1109/TMM.2015.2410135,
+db36e682501582d1c7b903422993cf8d70bb0b42,http://pdfs.semanticscholar.org/db36/e682501582d1c7b903422993cf8d70bb0b42.pdf,,,https://arxiv.org/pdf/1509.08038v1.pdf
+dbe0e533d715f8543bcf197f3b8e5cffa969dfc0,http://pdfs.semanticscholar.org/dbe0/e533d715f8543bcf197f3b8e5cffa969dfc0.pdf,,,http://www.ijareeie.com/upload/2014/may/48_AComprehensive.pdf
+dbd5e9691cab2c515b50dda3d0832bea6eef79f2,http://pdfs.semanticscholar.org/dbd5/e9691cab2c515b50dda3d0832bea6eef79f2.pdf,,,http://www.umiacs.umd.edu/~wyzhao/Chapter_figure.ps
+dbfe62c02b544b48354fac741d90eb4edf815db5,,,https://doi.org/10.1109/SITIS.2016.43,
+db150d158ca696c7fb4f39b707f71d609481a250,,,,
+db82f9101f64d396a86fc2bd05b352e433d88d02,http://pdfs.semanticscholar.org/db82/f9101f64d396a86fc2bd05b352e433d88d02.pdf,,https://doi.org/10.1007/978-3-642-24571-8_74,http://cvpia.memphis.edu/wp-content/uploads/2012/05/69750598.pdf
+dbc3ab8c9f564f038e7779b87900c4a0426f3dd1,,,,http://doi.acm.org/10.1145/1386352.1386401
+db428d03e3dfd98624c23e0462817ad17ef14493,http://pdfs.semanticscholar.org/db42/8d03e3dfd98624c23e0462817ad17ef14493.pdf,,,http://www.itl.nist.gov/iaui/894.02/projects/tvpubs/tv6.papers/oxford.pdf
+a83fc450c124b7e640adc762e95e3bb6b423b310,http://pdfs.semanticscholar.org/b908/edadad58c604a1e4b431f69ac8ded350589a.pdf,,,http://arxiv.org/abs/1708.02721
+a8117a4733cce9148c35fb6888962f665ae65b1e,http://pdfs.semanticscholar.org/a811/7a4733cce9148c35fb6888962f665ae65b1e.pdf,,,http://arxiv.org/abs/1704.00438
+a820941eaf03077d68536732a4d5f28d94b5864a,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf,,,http://openaccess.thecvf.com/content_iccv_2015/papers/Zhang_Leveraging_Datasets_With_ICCV_2015_paper.pdf
+a8faeef97e2a00eddfb17a44d4892c179a7cc277,,,https://doi.org/10.1109/FG.2011.5771459,
+a8affc2819f7a722a41bb913dea9149ee0e23a1f,http://robotics.szpku.edu.cn/c/publication/paper/ICIP2014-gaoyuan1.pdf,,https://doi.org/10.1109/ICIP.2014.7025291,
+a8035ca71af8cc68b3e0ac9190a89fed50c92332,http://pdfs.semanticscholar.org/a803/5ca71af8cc68b3e0ac9190a89fed50c92332.pdf,,https://doi.org/10.1007/978-3-319-46604-0_3,https://cvit.iiit.ac.in/images/ConferencePapers/2016/Mishra-ECCVW2016.pdf
+a88640045d13fc0207ac816b0bb532e42bcccf36,http://pdfs.semanticscholar.org/a886/40045d13fc0207ac816b0bb532e42bcccf36.pdf,,,https://arxiv.org/pdf/1709.02896v1.pdf
+a8638a07465fe388ae5da0e8a68e62a4ee322d68,http://pdfs.semanticscholar.org/a863/8a07465fe388ae5da0e8a68e62a4ee322d68.pdf,,,https://hal.archives-ouvertes.fr/hal-01198718/document
+a8e75978a5335fd3deb04572bb6ca43dbfad4738,http://pdfs.semanticscholar.org/a8e7/5978a5335fd3deb04572bb6ca43dbfad4738.pdf,,,https://arxiv.org/pdf/1607.00137v1.pdf
+a8583e80a455507a0f146143abeb35e769d25e4e,http://pdfs.semanticscholar.org/a858/3e80a455507a0f146143abeb35e769d25e4e.pdf,,,http://staff.csie.ncu.edu.tw/yunghui/resources/2013_ILT.pdf
+a8c62833f5e57d4cd060d6b5f0f9cfe486ee6825,,,,http://doi.ieeecomputersociety.org/10.1109/CSIE.2009.808
+a87e37d43d4c47bef8992ace408de0f872739efc,http://pdfs.semanticscholar.org/a87e/37d43d4c47bef8992ace408de0f872739efc.pdf,,,http://www.mdpi.com/2076-3417/7/1/110/pdf
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,http://pdfs.semanticscholar.org/a8c8/a96b78e7b8e0d4a4a422fcb083e53ad06531.pdf,,,http://thesai.org/Downloads/Volume8No4/Paper_3-3D_Human_Action_Recognition_using_Hu_Moment_Invariants.pdf
+a8fd23934e5039bb818b8d1c47ccb540ce2c253c,,,https://doi.org/10.1007/s11760-015-0808-y,
+a8f1fc34089c4f2bc618a122be71c25813cae354,,,https://doi.org/10.1142/S0219467816500194,
+a8748a79e8d37e395354ba7a8b3038468cb37e1f,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w9/papers/Reale_Seeing_the_Forest_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.47
+de048065ea2c5b3e306e2c963533df055e7dfcaa,,,https://doi.org/10.1109/LSP.2016.2598878,
+de8381903c579a4fed609dff3e52a1dc51154951,http://pdfs.semanticscholar.org/de83/81903c579a4fed609dff3e52a1dc51154951.pdf,,,http://www.icg.tugraz.at/publications/pdf/shape-and-appearance-based-analysis-of-facial-images-for-assessing-icao-compliance/
+ded8252fc6df715753e75ba7b7fee518361266ef,,,https://doi.org/10.1109/SIU.2012.6204837,
+de79437f74e8e3b266afc664decf4e6e4bdf34d7,,,https://doi.org/10.1109/IVCNZ.2016.7804415,
+dee39ab960882e70a87501118dfb61cf7a0cd017,,,,
+de8657e9eab0296ac062c60a6e10339ccf173ec1,,,,http://doi.ieeecomputersociety.org/10.1109/BRACIS.2014.51
+dea409847d52bb0ad54bf586cb0482a29a584a7e,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2009.115
+de15af84b1257211a11889b6c2adf0a2bcf59b42,http://pdfs.semanticscholar.org/de15/af84b1257211a11889b6c2adf0a2bcf59b42.pdf,,,http://epubs.surrey.ac.uk/809414/1/Colin_OReilly_PhD_thesis.pdf
+de0ee491d2747a6f3d171f813fe6f5cdb3a27fd6,,,https://doi.org/10.1002/cpe.3850,
+dedabf9afe2ae4a1ace1279150e5f1d495e565da,http://www.citi.sinica.edu.tw/papers/ycwang/4156-F.pdf,,https://doi.org/10.1109/TIP.2014.2329451,
+dee36d438d7dcb5923ab63dfe1e8676726dd4d69,,,,
+dec5b11b01f35f72adb41d2be26b9b95870c5c00,,http://ieeexplore.ieee.org/document/7071948/,,
+deb89950939ae9847f0a1a4bb198e6dbfed62778,,,https://doi.org/10.1109/LSP.2016.2543019,
+de398bd8b7b57a3362c0c677ba8bf9f1d8ade583,http://www.cs.wayne.edu/~mdong/TMM16.pdf,,https://doi.org/10.1109/TMM.2016.2629282,
+de878384f00b6ce1caa66ac01735fb4b63ad0279,,,https://doi.org/10.1049/iet-ipr.2014.0670,
+defd44b02a1532f47bdd8c8f2375e3df64ac5d79,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.139
+ded41c9b027c8a7f4800e61b7cfb793edaeb2817,http://pdfs.semanticscholar.org/ded4/1c9b027c8a7f4800e61b7cfb793edaeb2817.pdf,,,https://arxiv.org/pdf/1803.07201v1.pdf
+defa8774d3c6ad46d4db4959d8510b44751361d8,http://pdfs.semanticscholar.org/defa/8774d3c6ad46d4db4959d8510b44751361d8.pdf,,,http://vision.soic.indiana.edu/b657/sp2016/projects/prmurali/paper.pdf
+b0d7013577219f34dc8208d31b2af3ee4c358157,,,,
+b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89,http://pdfs.semanticscholar.org/b03b/4d8b4190361ed2de66fcbb6fda0c9a0a7d89.pdf,,,http://papers.nips.cc/paper/6335-deep-alternative-neural-network-exploring-contexts-as-early-as-possible-for-action-recognition
+b05943b05ef45e8ea8278e8f0870f23db5c83b23,,,https://doi.org/10.1109/ROBIO.2010.5723349,
+b084ad222c1fc9409d355d8e54ac3d1e86f2ca18,,,https://doi.org/10.1016/j.neucom.2017.04.001,
+b013cce42dd769db754a57351d49b7410b8e82ad,http://tlab.princeton.edu/publication_files/Rojas%20et%20al%20IEEE%202010.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2010.5539993
+b0358af78b7c5ee7adc883ef513bbcc84a18a02b,,,https://doi.org/10.1109/WACV.2017.10,
+b07582d1a59a9c6f029d0d8328414c7bef64dca0,http://pdfs.semanticscholar.org/b075/82d1a59a9c6f029d0d8328414c7bef64dca0.pdf,,,https://arxiv.org/pdf/1710.07662v1.pdf
+b017963d83b3edf71e1673d7ffdec13a6d350a87,http://pdfs.semanticscholar.org/b017/963d83b3edf71e1673d7ffdec13a6d350a87.pdf,,,https://static.aminer.org/pdf/PDF/000/273/088/view_independent_video_based_face_recognition_using_posterior_probability_in.pdf
+b03d6e268cde7380e090ddaea889c75f64560891,http://pdfs.semanticscholar.org/b03d/6e268cde7380e090ddaea889c75f64560891.pdf,,,http://students.cse.tamu.edu/fuhaoshi/FacefromVideo/paper/facefromvideo_supplementary_material.pdf
+b03446a2de01126e6a06eb5d526df277fa36099f,http://pdfs.semanticscholar.org/b034/46a2de01126e6a06eb5d526df277fa36099f.pdf,,,http://cs231n.stanford.edu/reports/2016/pdfs/221_Report.pdf
+b0de0892d2092c8c70aa22500fed31aa7eb4dd3f,http://arxiv.org/pdf/1504.05524.pdf,,https://doi.org/10.1007/s11263-015-0846-5,https://arxiv.org/pdf/1504.05524v1.pdf
+b018fa5cb9793e260b8844ae155bd06380988584,http://pdfs.semanticscholar.org/b018/fa5cb9793e260b8844ae155bd06380988584.pdf,,,http://www.esat.kuleuven.ac.be/~knummiar/star/KUL_deliverable_6_3.pdf
+b07f9dfc904d317fa71c1efa9b466460abc0bee5,,,,
+b073313325b6482e22032e259d7311fb9615356c,http://alumni.cs.ucr.edu/~hli/paper/hli05tumor.pdf,,,http://conferences.computer.org/bioinformatics/CSB2005/PDF3/35_lih_robust.pdf
+b0502dcc6df378ee3ddeefeeb1cc51a20e04f39b,,,,
+b0f59b71f86f18495b9f4de7c5dbbebed4ae1607,,,https://doi.org/10.1016/j.neucom.2015.04.085,
+a6f81619158d9caeaa0863738ab400b9ba2d77c2,http://pdfs.semanticscholar.org/a6f8/1619158d9caeaa0863738ab400b9ba2d77c2.pdf,,,http://dap.vsb.cz/wsc17conf/Media/Default/Page/online_wsc17_submission_59.pdf
+a6d621a5aae983a6996849db5e6bc63fe0a234af,http://mplab.ucsd.edu/~ksikka/pain_icmi14.pdf,,,http://doi.acm.org/10.1145/2663204.2666282
+a63ec22e84106685c15c869aeb157aa48259e855,,,https://doi.org/10.1142/S0219691312500294,
+a65301ec723dfac73c1e884d26dedeb4de309429,,,,
+a695c2240382e362262db72017ceae0365d63f8f,http://www3.nd.edu/~kwb/AggarwalBiswasFlynnBowyerWACV_2012.pdf,,,https://www3.nd.edu/~kwb/AggarwalBiswasFlynnBowyerWACV_2012.pdf
+a66d89357ada66d98d242c124e1e8d96ac9b37a0,http://pdfs.semanticscholar.org/a66d/89357ada66d98d242c124e1e8d96ac9b37a0.pdf,,https://doi.org/10.1007/978-3-319-54427-4_27,https://arxiv.org/pdf/1608.06451v1.pdf
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,http://pdfs.semanticscholar.org/a6d7/cf29f333ea3d2aeac67cde39a73898e270b7.pdf,,,http://www.researchgate.net/profile/Ihsan_Ullah5/publication/266318027_Gender_Classification_from_Facial_Images_Using_Texture_Descriptors/links/542c5fbd0cf27e39fa93e742.pdf
+a611c978e05d7feab01fb8a37737996ad6e88bd9,http://cbl.uh.edu/pub_files/3_Benchmarking3DPoseEstimationForFaceRecognition_ICPR2014_v8.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.42
+a608c5f8fd42af6e9bd332ab516c8c2af7063c61,http://mcl.usc.edu/wp-content/uploads/2016/01/Liu-TIFS-2015-10.pdf,,https://doi.org/10.1109/TIFS.2015.2462732,
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,http://pdfs.semanticscholar.org/a6ff/e238eaf8632b4a8a6f718c8917e7f3261546.pdf,,,http://www.amj.net.au/index.php?journal=AMJ&op=viewFile&page=article&path%5B%5D=921&path%5B%5D=840
+a6e75b4ccc793a58ef0f6dbe990633f7658c7241,,,https://doi.org/10.1016/j.cviu.2016.10.007,
+a6583c8daa7927eedb3e892a60fc88bdfe89a486,http://pdfs.semanticscholar.org/a658/3c8daa7927eedb3e892a60fc88bdfe89a486.pdf,,https://doi.org/10.1016/j.patrec.2016.06.020,http://mediatum.ub.tum.de/doc/1315864/359237.pdf
+a660390654498dff2470667b64ea656668c98ecc,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf,,https://doi.org/10.1109/ICIP.2009.5413940,http://people.kth.se/~mflierl/Publications/zhi09-ICIP.pdf
+a60907b7ee346b567972074e3e03c82f64d7ea30,http://pdfs.semanticscholar.org/a609/07b7ee346b567972074e3e03c82f64d7ea30.pdf,,https://doi.org/10.1007/978-3-319-16811-1_21,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/PDF/374.pdf
+a6e43b73f9f87588783988333997a81b4487e2d5,http://pdfs.semanticscholar.org/a6e4/3b73f9f87588783988333997a81b4487e2d5.pdf,,https://doi.org/10.1007/978-3-319-42911-3_50,https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/pricai16.pdf
+a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc,http://pdfs.semanticscholar.org/a649/6553fb9ab9ca5d69eb45af1bdf0b60ed86dc.pdf,,https://doi.org/10.1007/978-3-642-19318-7_16,http://www.researchgate.net/profile/Ramzan_Khan/publication/220744786_Semi-supervised_Neighborhood_Preserving_Discriminant_Embedding_A_Semi-supervised_Subspace_Learning_Algorithm/links/0c96052ec3a26284f5000000.pdf
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,http://pdfs.semanticscholar.org/bce2/02717ce134b317b39f0a18151659d643875b.pdf,,https://doi.org/10.1007/978-3-642-24471-1_3,http://www.cs.tau.ac.il/~wolf/papers/ossml.pdf
+a6771936ffeba6e7fffad1d2c60e42519c615e24,,,,
+a694180a683f7f4361042c61648aa97d222602db,http://www.iab-rubric.org/papers/ICB16-Autoscat.pdf,,https://doi.org/10.1109/ICB.2016.7550091,
+a6db73f10084ce6a4186363ea9d7475a9a658a11,http://pdfs.semanticscholar.org/afce/ebbea6e9130cf22142206c19a19cda226b13.pdf,,,https://arxiv.org/pdf/1612.03052v3.pdf
+a6634ff2f9c480e94ed8c01d64c9eb70e0d98487,http://pdfs.semanticscholar.org/a663/4ff2f9c480e94ed8c01d64c9eb70e0d98487.pdf,,https://doi.org/10.1016/j.ipl.2004.09.014,http://www.researchgate.net/profile/Tee_Connie/publication/220114011_PalmHashing_a_novel_approach_for_cancelable_biometrics/links/00b4951830c68b2d5e000000.pdf
+a6e2ee89cbe6fabad88713ef1f8e9da5dd7cf167,,,,
+a62997208fec1b2fbca6557198eb7bc9340b2409,,,https://doi.org/10.1109/HPCC.and.EUC.2013.241,
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,http://pdfs.semanticscholar.org/a6b1/d79bc334c74cde199e26a7ef4c189e9acd46.pdf,,,http://www.biorxiv.org/content/biorxiv/early/2017/08/17/177196.full.pdf
+a6ebe013b639f0f79def4c219f585b8a012be04f,http://pdfs.semanticscholar.org/a6eb/e013b639f0f79def4c219f585b8a012be04f.pdf,,https://doi.org/10.1007/978-3-319-22053-6_33,https://pdfs.semanticscholar.org/a6eb/e013b639f0f79def4c219f585b8a012be04f.pdf
+a6ab23f67d85da26592055c0eac4c34f05c26519,,,,http://doi.ieeecomputersociety.org/10.1109/ICTAI.2006.15
+a6e21438695dbc3a184d33b6cf5064ddf655a9ba,http://pdfs.semanticscholar.org/b673/ffe63c5d0723009042f0f922f19f093b7e34.pdf,,,https://arxiv.org/pdf/1703.07475v1.pdf
+a6793de9a01afe47ffbb516cc32f66625f313231,,,,http://doi.acm.org/10.1145/2939672.2939853
+b944cc4241d195b1609a7a9d87fce0e9ba1498bc,,,https://doi.org/10.1109/TSP.2011.2179539,
+b95d13d321d016077bd2906f7fbd9be7c3643475,,,,
+b934f730a81c071dbfc08eb4c360d6fca2daa08f,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2015.7177496
+b9cedd09bdae827dacb138d6b054449d5346caf1,http://www.cs.colostate.edu/~lui/Papers/BTAS09LUIa.pdf,,,
+b98e7a8f605c21e25ac5e32bfb1851a01f30081b,,,,http://doi.acm.org/10.1145/2393347.2396303
+b9d68dbeb8e5fdc5984b49a317ea6798b378e5ae,,,,http://doi.acm.org/10.1145/2733373.2807962
+b9cad920a00fc0e997fc24396872e03f13c0bb9c,http://www.ic.unicamp.br/~rocha/pub/papers/2011-icip-spoofing-detection.pdf,,https://doi.org/10.1109/ICIP.2011.6116484,
+b972683d702a65d3ee7a25bc931a5890d1072b6b,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2669035
+b9c9c7ef82f31614c4b9226e92ab45de4394c5f6,http://pdfs.semanticscholar.org/b9c9/c7ef82f31614c4b9226e92ab45de4394c5f6.pdf,,,http://cdn.intechopen.com/pdfs/12307/InTech-Face_recognition_under_varying_illumination.pdf
+b910590a0eb191d03e1aedb3d55c905129e92e6b,,,,http://doi.acm.org/10.1145/2808492.2808570
+b9b5624045c6f9d77fd1a029f4ff27aab26fa9fe,,,,
+b9f2a755940353549e55690437eb7e13ea226bbf,http://pdfs.semanticscholar.org/b9f2/a755940353549e55690437eb7e13ea226bbf.pdf,,,http://vision.cs.utexas.edu/aavl_workshop_eccv16/papers/AAVL_PID3.pdf
+b9cedd1960d5c025be55ade0a0aa81b75a6efa61,http://pdfs.semanticscholar.org/b9ce/dd1960d5c025be55ade0a0aa81b75a6efa61.pdf,,,http://arxiv.org/pdf/1512.06500v1.pdf
+b959055bae89f279015f0f6b1eca3e37ecbdd339,,,,
+a180dc9766490416246e7fbafadca14a3c500a46,,,https://doi.org/10.1016/S0167-8655(03)00112-0,
+a1dd806b8f4f418d01960e22fb950fe7a56c18f1,https://www.cc.gatech.edu/~parikh/Publications/ParikhGrauman_CVPR2011_nameable.pdf,,,https://filebox.ece.vt.edu/~parikh/Publications/ParikhGrauman_CVPR2011_nameable.pdf
+a158c1e2993ac90a90326881dd5cb0996c20d4f3,http://pdfs.semanticscholar.org/a158/c1e2993ac90a90326881dd5cb0996c20d4f3.pdf,,https://doi.org/10.3390/sym2020554,http://www.mdpi.com/2073-8994/2/2/554/pdf-vor
+a15d9d2ed035f21e13b688a78412cb7b5a04c469,http://pdfs.semanticscholar.org/a15d/9d2ed035f21e13b688a78412cb7b5a04c469.pdf,,https://doi.org/10.1007/978-3-642-33718-5_60,http://www.csc.kth.se/~azizpour/papers/ha_eccv12.pdf
+a1b1442198f29072e907ed8cb02a064493737158,http://affect.media.mit.edu/pdfs/12.McDuff-etal-Crowdsourcing-TAC.pdf,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.19
+a100595c66f84c3ddd3da8d362a53f7a82f6e3eb,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.46
+a1cda8e30ce35445e4f51b47ab65b775f75c9f18,,,https://doi.org/10.1109/ISBA.2018.8311462,
+a14db48785d41cd57d4eac75949a6b79fc684e70,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Barkan_Fast_High_Dimensional_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.246
+a15c728d008801f5ffc7898568097bbeac8270a4,http://pdfs.semanticscholar.org/a15c/728d008801f5ffc7898568097bbeac8270a4.pdf,,,https://www.forgetit-project.eu/fileadmin/fm-dam/deliverables/ForgetIT_WP4_D4.4.pdf
+a13a27e65c88b6cb4a414fd4f6bca780751a59db,,,https://doi.org/10.1109/SMC.2016.7844934,
+a1cecbb759c266133084d98747d022c1e638340d,,,,http://doi.acm.org/10.1145/2670473.2670501
+a11ce3c9b78bf3f868b1467b620219ff651fe125,,,,http://doi.acm.org/10.1145/2911996.2912073
+a192845a7695bdb372cccf008e6590a14ed82761,,,https://doi.org/10.1109/TIP.2014.2321495,
+a119844792fd9157dec87e3937685c8319cac62f,,,https://doi.org/10.1109/APSIPA.2015.7415395,
+a125bc55bdf4bec7484111eea9ae537be314ec62,http://pdfs.semanticscholar.org/a125/bc55bdf4bec7484111eea9ae537be314ec62.pdf,,,http://apsipa.org/proceedings_2009/pdf/MA-L1-2.pdf
+a14ae81609d09fed217aa12a4df9466553db4859,http://homepages.dcc.ufmg.br/~william/papers/paper_2011_TIP.pdf,,https://doi.org/10.1109/TIP.2011.2176951,http://www.umiacs.umd.edu/~jhchoi/paper/tip12.pdf
+a1a5143a962ab3dc6f2a0d5300cde71d9f087404,,,,
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,http://pdfs.semanticscholar.org/a1ee/0176a9c71863d812fe012b5c6b9c15f9aa8a.pdf,,,http://ceur-ws.org/Vol-811/paper2.pdf
+a1f40bcfadbeee66f67ab0755dd3037c030a7450,http://www.researchgate.net/profile/Jiansheng_Chen/publication/265016758_Face_Image_Quality_Assessment_Based_on_Learning_to_Rank/links/546d662d0cf2193b94c5852b.pdf,,https://doi.org/10.1109/LSP.2014.2347419,
+a1e97c4043d5cc9896dc60ae7ca135782d89e5fc,http://pdfs.semanticscholar.org/a1e9/7c4043d5cc9896dc60ae7ca135782d89e5fc.pdf,,,http://arxiv.org/abs/1612.02155
+efd308393b573e5410455960fe551160e1525f49,http://pdfs.semanticscholar.org/efd3/08393b573e5410455960fe551160e1525f49.pdf,,,https://arxiv.org/pdf/1710.02139v1.pdf
+efeeb000107745e3fba04ee4676c0435eaf4257b,,,,
+ef36ca8abf0a23e661f3b1603057963a70e16704,,,,
+efd28eabebb9815e34031316624e7f095c7dfcfe,http://pdfs.semanticscholar.org/efd2/8eabebb9815e34031316624e7f095c7dfcfe.pdf,,https://doi.org/10.1007/978-3-642-31298-4_10,http://wavelab.at/papers/Uhl12b.pdf
+eff87ecafed67cc6fc4f661cb077fed5440994bb,http://pdfs.semanticscholar.org/eff8/7ecafed67cc6fc4f661cb077fed5440994bb.pdf,,https://doi.org/10.1007/3-540-45113-7_19,http://carol.science.uva.nl/~nicu/publications/CIVR03_salient.pdf
+ef2a5a26448636570986d5cda8376da83d96ef87,http://pdfs.semanticscholar.org/ef2a/5a26448636570986d5cda8376da83d96ef87.pdf,,,http://cs231n.stanford.edu/reports/giel_diaz.pdf
+ef9b8724f857daec94690d03764dd1299d0cbbcd,,,,
+ef7b8f73e95faa7a747e0b04363fced0a38d33b0,,,https://doi.org/10.1109/ICIP.2017.8297028,
+ef26b36eb5966364c71d4fed135fe68f891127e5,,,,
+ef35c30529df914a6975af62aca1b9428f678e9f,,,https://doi.org/10.1007/s00138-016-0817-z,
+ef3a0b454370991a9c18ac7bfd228cf15ad53da0,,,https://doi.org/10.1109/ICNC.2010.5582886,
+ef761435c1af2b3e5caba5e8bbbf5aeab69d934e,,,,
+c3c463a9ee464bb610423b7203300a83a166b500,,,https://doi.org/10.1109/ICIP.2014.7025069,
+c3ae4a4c9a9528791e36b64fea8d02b2fced7955,,,,
+c32fb755856c21a238857b77d7548f18e05f482d,http://pdfs.semanticscholar.org/c32f/b755856c21a238857b77d7548f18e05f482d.pdf,,,https://www.ijser.org/researchpaper/Multimodal-Emotion-Recognition-for-Human-Computer-Interaction-A-Survey.pdf
+c34e48d637705ffb52360c2afb6b03efdeb680bf,http://pdfs.semanticscholar.org/c34e/48d637705ffb52360c2afb6b03efdeb680bf.pdf,,https://doi.org/10.1016/j.patcog.2012.04.030,http://ibug.doc.ic.ac.uk/media/uploads/documents/sdnmf_journal.pdf
+c3b3636080b9931ac802e2dd28b7b684d6cf4f8b,http://pdfs.semanticscholar.org/c3b3/636080b9931ac802e2dd28b7b684d6cf4f8b.pdf,,,http://www.sersc.org/journals/IJSIA/vol7_no2_2013/15.pdf
+c398684270543e97e3194674d9cce20acaef3db3,http://pdfs.semanticscholar.org/c398/684270543e97e3194674d9cce20acaef3db3.pdf,,,http://www.springer.com/cda/content/document/cda_downloaddocument/9783319685328-c2.pdf?SGWID=0-0-45-1625643-p181166578
+c352b5ccd6fa1812b108d74d268ce3f19efccf0b,,,,
+c3418f866a86dfd947c2b548cbdeac8ca5783c15,http://pdfs.semanticscholar.org/c341/8f866a86dfd947c2b548cbdeac8ca5783c15.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/disentangling-modes-variation.pdf
+c3390711f5ce6f5f0728ef88c54148bf9d8783a2,,,https://doi.org/10.1016/j.engappai.2015.03.016,
+c3f76a9ebe53825e14f851120cca4e1fc29807de,,,,
+c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0,http://pdfs.semanticscholar.org/c3bc/c4ee9e81ce9c5c0845f34e9992872a8defc0.pdf,,,http://b2.cvl.iis.u-tokyo.ac.jp/mva/proceedings/CommemorativeDVD/2005/papers/2005265.pdf
+c32383330df27625592134edd72d69bb6b5cff5c,http://www.iis.sinica.edu.tw/papers/song/13690-F.pdf,,https://doi.org/10.1109/TSMCB.2011.2167322,
+c3a3f7758bccbead7c9713cb8517889ea6d04687,http://pdfs.semanticscholar.org/c3a3/f7758bccbead7c9713cb8517889ea6d04687.pdf,,https://doi.org/10.1016/j.neucom.2016.09.072,http://arxiv.org/abs/1609.07304
+c3cfbd03efca980431e17fcbc507962377821681,,,,
+c3e53788370341afe426f2216bed452cbbdaf117,,,,http://doi.ieeecomputersociety.org/10.1109/ATNAC.2017.8215436
+c32f04ccde4f11f8717189f056209eb091075254,http://pdfs.semanticscholar.org/c32f/04ccde4f11f8717189f056209eb091075254.pdf,,,http://www.cs.bris.ac.uk/Publications/Papers/2000687.pdf
+c30982d6d9bbe470a760c168002ed9d66e1718a2,http://facstaff.elon.edu/sspurlock/papers/spurlock15_head_pose.pdf,,,http://doi.acm.org/10.1145/2789116.2789123
+c32cd207855e301e6d1d9ddd3633c949630c793a,http://pdfs.semanticscholar.org/c32c/d207855e301e6d1d9ddd3633c949630c793a.pdf,,,http://www.cise.ufl.edu/~jho/papers/BookChapter.pdf
+c37a971f7a57f7345fdc479fa329d9b425ee02be,http://pdfs.semanticscholar.org/c37a/971f7a57f7345fdc479fa329d9b425ee02be.pdf,,,http://arxiv.org/abs/1509.01074
+c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af,http://pdfs.semanticscholar.org/c363/8b026c7f80a2199b5ae89c8fcbedfc0bd8af.pdf,,,http://acberg.com/papers/berg_thesis.pdf
+c363c5d44214bf518a085fb13896909f821f39e8,,,,
+c3a53b308c7a75c66759cbfdf52359d9be4f552b,,,,http://doi.ieeecomputersociety.org/10.1109/ISPAN-FCST-ISCC.2017.16
+c36f3cabeddce0263c944e9fe4afd510b5bae816,,,https://doi.org/10.1109/DICTA.2017.8227399,
+c32c8bfadda8f44d40c6cd9058a4016ab1c27499,http://pdfs.semanticscholar.org/c32c/8bfadda8f44d40c6cd9058a4016ab1c27499.pdf,,,http://www.cfar.umd.edu/~shaohua/papers/egip_chapter.pdf
+c3fb2399eb4bcec22723715556e31c44d086e054,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p499-srinivasan.pdf,,https://doi.org/10.1109/ICASSP.2014.6853646,
+c418a3441f992fea523926f837f4bfb742548c16,http://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf,,https://doi.org/10.1007/978-3-642-13059-5_57,https://pdfs.semanticscholar.org/c418/a3441f992fea523926f837f4bfb742548c16.pdf
+c4b00e86841db3fced2a5d8ac65f80d0d3bbe352,,,,http://doi.ieeecomputersociety.org/10.1109/AIPR.2004.4
+c4b58ceafdf4cf55586b036b9eb4d6d3d9ecd9c4,http://www.serc.iisc.ernet.in/~venky/Papers/Action_Recognition_CD_ISSNIP14.pdf,,https://doi.org/10.1109/ISSNIP.2014.6827622,https://pdfs.semanticscholar.org/c4b5/8ceafdf4cf55586b036b9eb4d6d3d9ecd9c4.pdf
+c44c84540db1c38ace232ef34b03bda1c81ba039,http://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf,,https://doi.org/10.1007/978-3-319-10599-4_49,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8694/86940768.pdf
+c41a3c31972cf0c1be6b6895f3bf97181773fcfb,,,https://doi.org/10.1109/ICPR.2014.103,
+c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4,https://vision.cornell.edu/se3/wp-content/uploads/2015/02/ijcv2014.pdf,,https://doi.org/10.1007/s11263-014-0698-4,http://vision.cornell.edu/se3/wp-content/uploads/2015/02/ijcv2014.pdf
+c4ca092972abb74ee1c20b7cae6e69c654479e2c,,,https://doi.org/10.1109/ICIP.2016.7532960,
+c4c1fb882ae8b48c461e1f7c359ea3ea15da29fa,,,,
+c444c4dab97dd6d6696f56c1cacda051dde60448,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.37
+c459014131cbcd85f5bd5c0a89115b5cc1512be9,,,,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.23
+c46a4db7247d26aceafed3e4f38ce52d54361817,http://pdfs.semanticscholar.org/c46a/4db7247d26aceafed3e4f38ce52d54361817.pdf,,https://doi.org/10.1007/978-3-319-49409-8_14,http://arxiv.org/abs/1609.09642
+c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad,http://pdfs.semanticscholar.org/c4dc/f41506c23aa45c33a0a5e51b5b9f8990e8ad.pdf,,,http://www.aaai.org/Papers/Symposia/Fall/2005/FS-05-05/FS05-05-017.pdf
+c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f,http://pdfs.semanticscholar.org/c42a/8969cd76e9f54d43f7f4dd8f9b08da566c5f.pdf,,,http://cdn.intechopen.com/pdfs/21764/InTech-Towards_unconstrained_face_recognition_using_3d_face_model.pdf
+c49075ead6eb07ede5ada4fe372899bd0cfb83ac,,,https://doi.org/10.1109/ICSPCS.2015.7391782,
+c41de506423e301ef2a10ea6f984e9e19ba091b4,http://www.ee.columbia.edu/ln/dvmm/publications/14/felixyu_llp_mm2014.pdf,,,http://doi.acm.org/10.1145/2647868.2654993
+c4541802086461420afb1ecb5bb8ccd5962a9f02,,,https://doi.org/10.1109/TSMCB.2009.2029076,
+c4d439fe07a65b735d0c8604bd5fdaea13f6b072,,,,http://doi.acm.org/10.1145/2671188.2749294
+c4d0d09115a0df856cdb389fbccb20f62b07b14e,,,https://doi.org/10.1109/ICIP.2012.6466925,
+c4934d9f9c41dbc46f4173aad2775432fe02e0e6,http://pdfs.semanticscholar.org/c493/4d9f9c41dbc46f4173aad2775432fe02e0e6.pdf,,,https://openreview.net/pdf?id=rJvPIReKx
+c40c23e4afc81c8b119ea361e5582aa3adecb157,http://pdfs.semanticscholar.org/c40c/23e4afc81c8b119ea361e5582aa3adecb157.pdf,,https://doi.org/10.1007/978-3-642-33868-7_24,http://vishnu.boddeti.net/papers/eccv-2012.pdf
+c49aed65fcf9ded15c44f9cbb4b161f851c6fa88,http://pdfs.semanticscholar.org/c49a/ed65fcf9ded15c44f9cbb4b161f851c6fa88.pdf,,,http://www.ee.iitb.ac.in/~icvgip/PAPERS/298.pdf
+c472436764a30278337aca9681eee456bee95c34,http://pdfs.semanticscholar.org/c472/436764a30278337aca9681eee456bee95c34.pdf,,,http://labconscious.huji.ac.il/wp-content/uploads/2017/09/1-s2.0-S2352250X1730043X-main.pdf
+c466ad258d6262c8ce7796681f564fec9c2b143d,http://pdfs.semanticscholar.org/c466/ad258d6262c8ce7796681f564fec9c2b143d.pdf,,,http://www.mva-org.jp/Proceedings/2013USB/papers/14-21.pdf
+ea3fa5e6004c0504feaa31e01b2ea19f138e9a78,,,,
+eacba5e8fbafb1302866c0860fc260a2bdfff232,http://pdfs.semanticscholar.org/eacb/a5e8fbafb1302866c0860fc260a2bdfff232.pdf,,,https://arxiv.org/pdf/1803.09092v1.pdf
+ea227e47b8a1e8f55983c34a17a81e5d3fa11cfd,,,https://doi.org/10.1109/ICIP.2017.8296549,
+ea8fa68b74ffefbe79a3576d7e4ae4365a1346ff,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.113
+ea8d217231d4380071132ce37bf997164b60ec44,,,https://doi.org/10.1109/SIU.2016.7496031,
+ea2b3efd4d317ebaffaf7dc8c62db5ff1eab0e1b,,,https://doi.org/10.1109/FRUCT-ISPIT.2016.7561522,
+ea026456729f0ec54c697198e1fd089310de4ae2,,,https://doi.org/10.1109/CIBIM.2013.6607917,
+ea79a2ad4ac307cb8c586b52bf06d7bf783003a8,,,,
+ea482bf1e2b5b44c520fc77eab288caf8b3f367a,http://pdfs.semanticscholar.org/ea48/2bf1e2b5b44c520fc77eab288caf8b3f367a.pdf,,https://doi.org/10.24963/ijcai.2017/361,http://www.ijcai.org/proceedings/2017/0361.pdf
+ea6f5c8e12513dbaca6bbdff495ef2975b8001bd,http://pdfs.semanticscholar.org/ea6f/5c8e12513dbaca6bbdff495ef2975b8001bd.pdf,,,https://www.amhsr.org/articles/applying-a-set-of-gabor-filter-to-2dretinal-fundus-image-to-detect-the-optic-nerve-head-onh.pdf
+eab53c9e3e8442050aa6ad97003f2356a365adaa,,,,
+ea1eeefb676d39b5f456937f8894311587cc7c2f,,,,
+ea85378a6549bb9eb9bcc13e31aa6a61b655a9af,http://pdfs.semanticscholar.org/ea85/378a6549bb9eb9bcc13e31aa6a61b655a9af.pdf,,,http://www.gris.informatik.tu-darmstadt.de/lehre/dipl_bachl_mastr/DanielHartungDiplomarbeit.pdf
+ea2ee5c53747878f30f6d9c576fd09d388ab0e2b,http://pdfs.semanticscholar.org/ea2e/e5c53747878f30f6d9c576fd09d388ab0e2b.pdf,,https://doi.org/10.1007/978-3-642-21257-4_37,https://acceda.ulpgc.es:8443/bitstream/10553/15079/5/C075_LNCS_IBPRIA11_postprint.pdf
+ea86b75427f845f04e96bdaadfc0d67b3f460005,,,https://doi.org/10.1109/ICIP.2016.7532686,
+ea5c9d5438cde6d907431c28c2f1f35e02b64b33,,,https://doi.org/10.1109/SPAC.2017.8304257,
+ea218cebea2228b360680cb85ca133e8c2972e56,http://pdfs.semanticscholar.org/ea21/8cebea2228b360680cb85ca133e8c2972e56.pdf,,,http://arxiv.org/pdf/1404.3543v2.pdf
+ea96bc017fb56593a59149e10d5f14011a3744a0,http://pdfs.semanticscholar.org/ea96/bc017fb56593a59149e10d5f14011a3744a0.pdf,,https://doi.org/10.1016/j.sigpro.2015.09.038,http://www.cse.ust.hk/~qnature/pdf/SignalProcessing2016.pdf
+e12b2c468850acb456b0097d5535fc6a0d34efe3,,,https://doi.org/10.1016/j.neucom.2011.03.009,
+e1c50cf0c08d70ff90cf515894b2b360b2bc788b,,,https://doi.org/10.1109/ICSMC.2007.4414085,
+e10cbd049ac2f5cc8af9eb8e587b3408ad4bb111,,,https://doi.org/10.1117/1.JEI.24.5.053028,
+e1b656c846a360d816a9f240499ec4f306897b98,,,,
+e10a257f1daf279e55f17f273a1b557141953ce2,http://pdfs.semanticscholar.org/e10a/257f1daf279e55f17f273a1b557141953ce2.pdf,,https://doi.org/10.1016/j.imavis.2014.02.001,http://qil.uh.edu/qil/websitecontent/pdf/2015-9.pdf
+e171fba00d88710e78e181c3e807c2fdffc6798a,http://pdfs.semanticscholar.org/e171/fba00d88710e78e181c3e807c2fdffc6798a.pdf,,https://doi.org/10.1016/S0031-3203(03)00008-6,https://pdfs.semanticscholar.org/e171/fba00d88710e78e181c3e807c2fdffc6798a.pdf
+e1c59e00458b4dee3f0e683ed265735f33187f77,http://pdfs.semanticscholar.org/e1c5/9e00458b4dee3f0e683ed265735f33187f77.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI13/paper/download/6462/7178
+e111624fb4c5dc60b9e8223abfbf7c4196d34b21,,,,http://doi.ieeecomputersociety.org/10.1109/BIBM.2016.7822814
+e101bab97bce2733222db9cfbb92a82779966508,,,https://doi.org/10.1109/TCYB.2016.2549639,
+e1f790bbedcba3134277f545e56946bc6ffce48d,http://pdfs.semanticscholar.org/e1f7/90bbedcba3134277f545e56946bc6ffce48d.pdf,,,http://ijirset.com/upload/2014/may/71_Image_Retrieval.pdf
+e14b046a564604508ea8e3369e7e9f612e148511,,,https://doi.org/10.1007/978-3-642-17829-0_4,
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,http://pdfs.semanticscholar.org/e1ab/3b9dee2da20078464f4ad8deb523b5b1792e.pdf,,,http://www.ni.tu-berlin.de/fileadmin/fg215/teaching/nnproject/cnn_pre_trainin_paper.pdf
+e16efd2ae73a325b7571a456618bfa682b51aef8,http://pdfs.semanticscholar.org/e16e/fd2ae73a325b7571a456618bfa682b51aef8.pdf,,,http://cse.seu.edu.cn/people/xgeng/LDL/resource/aaai16b.pdf
+e16eeed2ada9166a035d238b1609462928db69db,,,,
+e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,http://pdfs.semanticscholar.org/e133/60cda1ebd6fa5c3f3386c0862f292e4dbee4.pdf,,,http://arxiv.org/abs/1611.08976
+e1f6e2651b7294951b5eab5d2322336af1f676dc,http://pdfs.semanticscholar.org/e1f6/e2651b7294951b5eab5d2322336af1f676dc.pdf,,,http://naturalspublishing.com/files/published/947g26k9xcj6vx.pdf
+e198a7b9e61dd19c620e454aaa81ae8f7377ade0,,,https://doi.org/10.1109/CVPRW.2010.5543611,
+e1dd586842419f3c40c0d7b70c120cdea72f5b5c,,,,
+e1449be4951ba7519945cd1ad50656c3516113da,,,https://doi.org/10.1109/TCSVT.2016.2603535,
+e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2,http://pdfs.semanticscholar.org/e1e6/e6792e92f7110e26e27e80e0c30ec36ac9c2.pdf,,,https://arxiv.org/pdf/1803.05105v1.pdf
+cd9666858f6c211e13aa80589d75373fd06f6246,http://pdfs.semanticscholar.org/cd96/66858f6c211e13aa80589d75373fd06f6246.pdf,,https://doi.org/10.1007/978-3-319-54187-7_29,http://www.diid.unipa.it/cvip/pdf/ACCV2016.pdf
+cd4c047f4d4df7937aff8fc76f4bae7718004f40,http://pdfs.semanticscholar.org/cd4c/047f4d4df7937aff8fc76f4bae7718004f40.pdf,,https://doi.org/10.1016/j.cviu.2015.01.008,http://gravis.cs.unibas.ch/publications/2015/2015_Background_Modeling_Generative_Models.pdf
+cd2bf0e1d19babe51eaa94cbc24b223e9c048ad6,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2016.2581168
+cd6c2ae00157e3fb6ab56379843280eb4cbb01b4,http://www.umiacs.umd.edu/~yzyang/paper/ICRA_2013_Multi.pdf,,https://doi.org/10.1109/ICRA.2013.6631179,http://www.umiacs.umd.edu/~cteo/public-shared/ICRA_2013_Multi_preprint.pdf
+cd596a2682d74bdfa7b7160dd070b598975e89d9,http://pdfs.semanticscholar.org/cd59/6a2682d74bdfa7b7160dd070b598975e89d9.pdf,,,http://cs229.stanford.edu/proj2009/AgrawalCosgriffMudur.pdf
+cde7901c0945683d0c677b1bb415786e4f6081e6,,,,http://doi.ieeecomputersociety.org/10.1109/IRI.2015.44
+cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Functional_Faces_Groupwise_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.544
+cdf2c8752f1070b0385a94c7bf22e8b54cac521b,,,https://doi.org/10.1007/s11265-010-0541-2,
+cda8fd9dd8b485e6854b1733d2294f69666c66f7,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2014/Activity%20Recognition%20in%20Unconstrained%20RGB-D%20Video%20using%203D%20Trajectories.pdf,,,http://doi.acm.org/10.1145/2668956.2668961
+cd33b3ca8d7f00c1738c41b2071a3164ba42ea61,,,https://doi.org/10.1142/S0218213008003832,http://doi.ieeecomputersociety.org/10.1109/ICTAI.2006.25
+cda4fb9df653b5721ad4fe8b4a88468a410e55ec,http://pdfs.semanticscholar.org/cda4/fb9df653b5721ad4fe8b4a88468a410e55ec.pdf,,,http://disp.ee.ntu.edu.tw/~pujols/Gabor%20wavelet%20transform%20and%20its%20application.pdf
+cdf0dc4e06d56259f6c621741b1ada5c88963c6d,,,https://doi.org/10.1109/ICIP.2014.7025061,
+cd85f71907f1c27349947690b48bfb84e44a3db0,,,https://doi.org/10.1007/978-981-10-4840-1,
+cdfa7dccbc9e9d466f8a5847004973a33c7fcc89,,,https://doi.org/10.1109/TIFS.2013.2263498,
+cd3005753012409361aba17f3f766e33e3a7320d,http://pdfs.semanticscholar.org/cd30/05753012409361aba17f3f766e33e3a7320d.pdf,,,http://arxiv.org/abs/1004.0517
+cd64530a910ba28cbd127c78913dd787184f8e6d,,,,
+cd687ddbd89a832f51d5510c478942800a3e6854,http://pdfs.semanticscholar.org/cd68/7ddbd89a832f51d5510c478942800a3e6854.pdf,,,http://www.fdg2014.org/papers/fdg2014_demo_12.pdf
+cde373b159361705580498d8712b9b7063c0d58c,,,,
+cd3b713722ccb1e2ae3b050837ca296b2a2dd82a,,,https://doi.org/10.1016/j.jvcir.2016.07.015,
+cd436f05fb4aeeda5d1085f2fe0384526571a46e,http://pdfs.semanticscholar.org/cd43/6f05fb4aeeda5d1085f2fe0384526571a46e.pdf,,https://doi.org/10.1007/978-3-319-46478-7_39,http://vision.csee.wvu.edu/~doretto/publications/motiianD16eccv.pdf
+cd74d606e76ecddee75279679d9770cdc0b49861,,,https://doi.org/10.1109/TIP.2014.2365725,
+cdd30bd77c7a4fa21176a21498f65f6b8b873965,,,,
+cc589c499dcf323fe4a143bbef0074c3e31f9b60,http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.6
+ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18,http://ibug.doc.ic.ac.uk/media/uploads/documents/taud.pdf,,https://doi.org/10.1109/TCYB.2013.2249063,https://ibug.doc.ic.ac.uk/media/uploads/documents/taud.pdf
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,http://pdfs.semanticscholar.org/ce2e/e807a63bbdffa530c80915b04d11a7f29a21.pdf,,,http://www.aaai.org/ocs/index.php/HCOMP/HCOMP15/paper/view/11593
+cc1b093cfb97475faabab414878fa7e4a2d97cd7,,,,http://doi.ieeecomputersociety.org/10.1109/ICALT.2017.141
+ccca2263786429b1b3572886ce6a2bea8f0dfb26,,,https://doi.org/10.1007/s10044-014-0388-4,
+ccbfc004e29b3aceea091056b0ec536e8ea7c47e,http://research.microsoft.com/~yqxu/papers/IEEE%20ICIP2005.pdf,,https://doi.org/10.1109/ICIP.2005.1530073,
+ccdea57234d38c7831f1e9231efcb6352c801c55,http://pdfs.semanticscholar.org/ccde/a57234d38c7831f1e9231efcb6352c801c55.pdf,,https://doi.org/10.1142/S0218001414560114,http://cdn.intechopen.com/pdfs/10200/InTech-Illumination_processing_in_face_recognition.pdf
+cc47368fe303c6cbda38caf5ac0e1d1c9d7e2a52,,,,
+cc38942825d3a2c9ee8583c153d2c56c607e61a7,http://pdfs.semanticscholar.org/cc38/942825d3a2c9ee8583c153d2c56c607e61a7.pdf,,,http://doc.utwente.nl/84357/1/dutta2012database.pdf
+cc3c273bb213240515147e8be68c50f7ea22777c,http://pdfs.semanticscholar.org/cc3c/273bb213240515147e8be68c50f7ea22777c.pdf,,,http://piim.newschool.edu/journal/issues/2015/01/pdfs/ParsonsJournalForInformationMapping_Misha_Rabinovich.pdf
+cc7c63473c5bef5ae09f26b2258691d9ffdd5f93,,,https://doi.org/10.1109/ICMLA.2012.17,
+cc44f1d99b17a049a8186ec04c6a1ecf1906c3c8,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2012.87
+ccb95192001b07bb25fc924587f9682b0df3de8e,,,https://doi.org/10.1109/ICACCI.2016.7732123,
+cc70fb1ab585378c79a2ab94776723e597afe379,,,https://doi.org/10.1109/ICIP.2017.8297067,
+cc6d3ccc9e3dd0a43313a714316c8783cd879572,,,https://doi.org/10.1109/ICIP.2017.8296802,
+cc9a61a30afdb8a5bc7088e1cef814b53dc4fc66,,,https://doi.org/10.1142/s0218213015400199,
+ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b,http://pdfs.semanticscholar.org/ccf4/3c62e4bf76b6a48ff588ef7ed51e87ddf50b.pdf,,,http://files.aiscience.org/journal/article/pdf/70160033.pdf
+cc5edaa1b0e91bc3577547fc30ea094aa2722bf0,,,https://doi.org/10.1109/CICARE.2014.7007832,
+cce2f036d0c5f47c25e459b2f2c49fa992595654,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.93
+cca476114c48871d05537abb303061de5ab010d6,,,https://doi.org/10.15439/2016F472,
+cc1ed45b02d7fffb42a0fd8cffe5f11792b6ea74,,,https://doi.org/10.1109/SIU.2016.7495874,
+cc8bf03b3f5800ac23e1a833447c421440d92197,https://pdfs.semanticscholar.org/cc8b/f03b3f5800ac23e1a833447c421440d92197.pdf,,https://doi.org/10.1007/s00138-009-0232-9,https://www.researchgate.net/profile/Reza_Ebrahimpour/publication/220464885_Improving_mixture_of_experts_for_view-independent_face_recognition_using_teacher-directed_learning/links/0fcfd50774e2478b5e000000.pdf
+ccebd3bf069f5c73ea2ccc5791976f894bc6023d,,,https://doi.org/10.1109/ICPR.2016.7900186,
+cc713a92d8a3aff6f1586923ca9ba267d5e89251,,,,
+cc91001f9d299ad70deb6453d55b2c0b967f8c0d,http://pdfs.semanticscholar.org/cc91/001f9d299ad70deb6453d55b2c0b967f8c0d.pdf,,https://doi.org/10.3390/sym7031475,http://www.mdpi.com/2073-8994/7/3/1475/pdf
+cc96eab1e55e771e417b758119ce5d7ef1722b43,http://pdfs.semanticscholar.org/cc96/eab1e55e771e417b758119ce5d7ef1722b43.pdf,,,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1511.05049.pdf
+cceec87bad847b9b87178bde8ce5cce6bf1a8e99,,,https://doi.org/10.1109/RIISS.2014.7009163,
+ccfebdf7917cb50b5fcd56fb837f841a2246a149,,,https://doi.org/10.1109/ICIP.2015.7351065,
+cc7e66f2ba9ac0c639c80c65534ce6031997acd7,http://pdfs.semanticscholar.org/cc7e/66f2ba9ac0c639c80c65534ce6031997acd7.pdf,,,https://infoscience.epfl.ch/record/187534/files/top_1.pdf?version=1
+cc9057d2762e077c53e381f90884595677eceafa,http://pdfs.semanticscholar.org/cc90/57d2762e077c53e381f90884595677eceafa.pdf,,https://doi.org/10.1007/978-3-319-16865-4_44,http://web.cse.msu.edu/~liuxm/publication/Roth_Liu_ACCV2014.pdf
+e692870efb009da4b9316678b354ae935fdf48eb,,,,
+e64b683e32525643a9ddb6b6af8b0472ef5b6a37,http://pdfs.semanticscholar.org/e64b/683e32525643a9ddb6b6af8b0472ef5b6a37.pdf,,https://doi.org/10.1007/978-3-642-12900-1_9,http://bi.snu.ac.kr/Courses/DMIR/files/face%20recog.pdf
+e6f3707a75d760c8590292b54bc8a48582da2cd4,,,https://doi.org/10.1007/s11760-012-0410-5,
+e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef,http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf,,https://doi.org/10.1049/iet-cvi.2014.0086,http://cbl.uh.edu/pub_files/CVI-2014-0086.pdf
+e6c491fb6a57c9a7c2d71522a1a066be2e681c84,,,https://doi.org/10.1016/j.imavis.2016.06.002,
+e6d46d923f201da644ae8d8bd04721dd9ac0e73d,,,https://doi.org/10.1109/ISBA.2016.7477226,
+e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec,http://pdfs.semanticscholar.org/e686/5b000cf4d4e84c3fe895b7ddfc65a9c4aaec.pdf,,,http://user.phil-fak.uni-duesseldorf.de/~petersen/WiSe1213_InfowiColl/cap15.pdf
+e6c4715476216be00ea61fc276ff39fb4620d785,,,,
+e6d689054e87ad3b8fbbb70714d48712ad84dc1c,http://pdfs.semanticscholar.org/e6d6/89054e87ad3b8fbbb70714d48712ad84dc1c.pdf,,https://doi.org/10.5244/C.14.24,http://www.bmva.org/bmvc/2000/papers/p24.pdf
+e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd,http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=8BA80DE8A35C6665EB6C19D582E5689F?doi=10.1.1.227.7824&rep=rep1&type=pdf,,https://doi.org/10.1109/TIP.2010.2044958,http://c2inet.sce.ntu.edu.sg/ivor/publication/TIP_FME.pdf
+e6f20e7431172c68f7fce0d4595100445a06c117,http://pdfs.semanticscholar.org/e6f2/0e7431172c68f7fce0d4595100445a06c117.pdf,,https://doi.org/10.1007/978-3-319-54184-6_24,http://arxiv.org/abs/1608.06495
+e6c834c816b5366875cf3060ccc20e16f19a9fc6,,,https://doi.org/10.1109/BTAS.2016.7791185,
+e6540d70e5ffeed9f447602ea3455c7f0b38113e,http://pdfs.semanticscholar.org/e654/0d70e5ffeed9f447602ea3455c7f0b38113e.pdf,,https://doi.org/10.1016/j.neucom.2016.12.017,http://www.ic.unicamp.br/~sandra/pdf/perez_Neurocomputing17.pdf
+e66a6ae542907d6a0ebc45da60a62d3eecf17839,,,https://doi.org/10.1109/EUVIP.2014.7018366,
+e66b4aa85524f493dafde8c75176ac0afad5b79c,,,https://doi.org/10.1109/SSCI.2017.8285219,
+e6ee36444038de5885473693fb206f49c1369138,http://pdfs.semanticscholar.org/e6ee/36444038de5885473693fb206f49c1369138.pdf,,,https://arxiv.org/pdf/1801.06345v1.pdf
+e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5,http://pdfs.semanticscholar.org/e617/8de1ef15a6a973aad2791ce5fbabc2cb8ae5.pdf,,https://doi.org/10.1007/978-3-319-66709-6_20,http://mediatum.ub.tum.de/doc/1368388/62577.pdf
+e6d6d1b0a8b414160f67142fc18e1321fe3f1c49,,,https://doi.org/10.1109/FSKD.2015.7382037,
+e6c8f5067ec2ad6af33745312b45fab03e7e038b,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1297.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206657
+e69a765d033ef6ea55c57ca41c146b27964c5cf2,,,https://doi.org/10.1109/ISCAS.2017.8050764,
+f913bb65b62b0a6391ffa8f59b1d5527b7eba948,http://pdfs.semanticscholar.org/f913/bb65b62b0a6391ffa8f59b1d5527b7eba948.pdf,,https://doi.org/10.1016/j.patrec.2013.03.001,https://riunet.upv.es/bitstream/handle/10251/40332/Villegas13_PRL_TSRDA_draft.pdf;jsessionid=E2F74A7179A7A6CAE63858FAE0AE7190?sequence=2
+f9784db8ff805439f0a6b6e15aeaf892dba47ca0,http://pdfs.semanticscholar.org/f978/4db8ff805439f0a6b6e15aeaf892dba47ca0.pdf,,,http://www.wseas.org/multimedia/journals/information/2017/a405909-078.pdf
+f9fb7979af4233c2dd14813da94ec7c38ce9232a,,,,http://doi.acm.org/10.1145/3131902
+f935225e7811858fe9ef6b5fd3fdd59aec9abd1a,http://pdfs.semanticscholar.org/f935/225e7811858fe9ef6b5fd3fdd59aec9abd1a.pdf,,,http://aaiscs.com/LHBD/papers/Liu_2006_NeuroImage.pdf
+f9752fd07b14505d0438bc3e14b23d7f0fe7f48b,,,,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2009.114
+f95321f4348cfacc52084aae2a19127d74426047,,,https://doi.org/10.1109/ICMLC.2013.6890897,
+f963967e52a5fd97fa3ebd679fd098c3cb70340e,http://pdfs.semanticscholar.org/f963/967e52a5fd97fa3ebd679fd098c3cb70340e.pdf,,https://doi.org/10.1007/978-3-642-12159-3_15,https://arxiv.org/pdf/1004.0512v1.pdf
+f9e0209dc9e72d64b290d0622c1c1662aa2cc771,http://pdfs.semanticscholar.org/f9e0/209dc9e72d64b290d0622c1c1662aa2cc771.pdf,,,http://www.cse.msu.edu/biometrics/Publications/Thesis/AlessandraPaulino_ContributionsBiometricRecognitionIdenticalTwinsLatentFp_PhD13.pdf
+f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1,http://pdfs.semanticscholar.org/f96b/dd1e2a940030fb0a89abbe6c69b8d7f6f0c1.pdf,,https://doi.org/10.1016/j.imavis.2013.12.002,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=913011
+f93606d362fcbe62550d0bf1b3edeb7be684b000,http://pdfs.semanticscholar.org/f936/06d362fcbe62550d0bf1b3edeb7be684b000.pdf,,https://doi.org/10.1093/comjnl/bxs001,http://www.researchgate.net/profile/Alex_James/publication/232710120_Nearest_Neighbor_Classifier_Based_on_Nearest_Feature_Decisions/links/09e4150a1e9ce1a6db000000.pdf
+f909d04c809013b930bafca12c0f9a8192df9d92,http://pdfs.semanticscholar.org/f909/d04c809013b930bafca12c0f9a8192df9d92.pdf,,https://doi.org/10.1007/978-3-540-75690-3_16,http://parnec.nuaa.edu.cn/papers/conference/2007/jliu-AMFG07.pdf
+f9c86f8b0d312ceec871c8a3b6bc79bbe76c1069,,,,
+f925879459848a3eeb0035fe206c4645e3f20d42,,,,http://doi.acm.org/10.1145/3025453.3025472
+f9d1f12070e5267afc60828002137af949ff1544,http://pdfs.semanticscholar.org/f9d1/f12070e5267afc60828002137af949ff1544.pdf,,,http://arxiv.org/pdf/1512.01691v1.pdf
+f9ccfe000092121a2016639732cdb368378256d5,http://pdfs.semanticscholar.org/f9cc/fe000092121a2016639732cdb368378256d5.pdf,,,http://www-rech.telecom-lille.fr/uha3ds2016/Papers/Cognitive%20behaviour%20analysis.pdf
+f0ba5c89094b15469f95fd2a05a46b68b8faf1ca,,,,
+f02f0f6fcd56a9b1407045de6634df15c60a85cd,http://pdfs.semanticscholar.org/f02f/0f6fcd56a9b1407045de6634df15c60a85cd.pdf,,,http://arxiv.org/abs/1712.05015
+f08cb47cd91a83ea849f2dfe2682529f3bb95aa9,,,,
+f0f80055ab85254ca58c1b08017969a0c355881f,,,,
+f0ae807627f81acb63eb5837c75a1e895a92c376,http://pdfs.semanticscholar.org/f0ae/807627f81acb63eb5837c75a1e895a92c376.pdf,,,http://www.ijeert.org/pdf/v3-i12/15.pdf
+f074e86e003d5b7a3b6e1780d9c323598d93f3bc,http://pdfs.semanticscholar.org/f074/e86e003d5b7a3b6e1780d9c323598d93f3bc.pdf,,https://doi.org/10.3390/axioms3020202,http://www.mdpi.com/2075-1680/3/2/202/pdf
+f0dac9a55443aa39fd9832bdff202a579b835e88,,,https://doi.org/10.1109/JSTSP.2016.2543681,
+f0681fc08f4d7198dcde803d69ca62f09f3db6c5,http://pdfs.semanticscholar.org/f068/1fc08f4d7198dcde803d69ca62f09f3db6c5.pdf,,https://doi.org/10.1007/978-3-642-35749-7_16,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Conf_ECCV_Girit_2010.pdf
+f0a9d69028edd1a39147848ad1116ca308d7491e,,,https://doi.org/10.1007/11573548_11,
+f09d5b6433f63d7403df5650893b78cdcf7319b3,,,https://doi.org/10.1109/AFGR.2008.4813384,
+f0de1e61ba806f3db918f9e498fcc6dfa223b13d,,,,
+f0f501e1e8726148d18e70c8e9f6feea9360d119,http://pdfs.semanticscholar.org/f0f5/01e1e8726148d18e70c8e9f6feea9360d119.pdf,,,http://jultika.oulu.fi/files/isbn9789526208732.pdf
+f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,http://pdfs.semanticscholar.org/f06b/015bb19bd3c39ac5b1e4320566f8d83a0c84.pdf,,https://doi.org/10.1016/j.imavis.2014.02.008,http://www.emotient.com/wp-content/uploads/Sikka_IMAVIS_2014.pdf
+f0b4f5104571020206b2d5e606c4d70f496983f9,,,https://doi.org/10.1109/FUZZ-IEEE.2014.6891674,
+f0a3f12469fa55ad0d40c21212d18c02be0d1264,http://pdfs.semanticscholar.org/f0a3/f12469fa55ad0d40c21212d18c02be0d1264.pdf,,https://doi.org/10.1007/978-3-642-37444-9_49,http://slsp.kaist.ac.kr/paperdata/sparsitysharing.pdf
+f05ad40246656a977cf321c8299158435e3f3b61,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Lu_Face_Recognition_Using_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.408
+f702f1294c0cd74b31db39c698281744d3137eb4,,,,
+f781e50caa43be13c5ceb13f4ccc2abc7d1507c5,http://pdfs.semanticscholar.org/f781/e50caa43be13c5ceb13f4ccc2abc7d1507c5.pdf,,,http://www.mva-org.jp/Proceedings/CommemorativeDVD/2005/papers/2005430.pdf
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,http://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,,,https://openreview.net/pdf?id=S1CIev1vM
+f7911b9ff58d07d19c68f4a30f40621f63c0f385,,,,http://dl.acm.org/citation.cfm?id=3007693
+f740bac1484f2f2c70777db6d2a11cf4280081d6,http://pdfs.semanticscholar.org/f740/bac1484f2f2c70777db6d2a11cf4280081d6.pdf,,,https://arxiv.org/pdf/1801.03754v1.pdf
+f7452a12f9bd927398e036ea6ede02da79097e6e,http://pdfs.semanticscholar.org/f745/2a12f9bd927398e036ea6ede02da79097e6e.pdf,,,https://arxiv.org/pdf/1803.09851v1.pdf
+f7093b138fd31956e30d411a7043741dcb8ca4aa,http://pdfs.semanticscholar.org/f709/3b138fd31956e30d411a7043741dcb8ca4aa.pdf,,,https://arxiv.org/pdf/1605.06052v1.pdf
+f7ffc2dc6801b0feee7d863f02ae2ca34c3e6a66,,,,
+f762afd65f3b680330e390f88d4cc39485345a01,,,,http://doi.ieeecomputersociety.org/10.1109/ACIIW.2017.8272606
+f7de943aa75406fe5568fdbb08133ce0f9a765d4,http://pdfs.semanticscholar.org/f7de/943aa75406fe5568fdbb08133ce0f9a765d4.pdf,,,http://borders.arizona.edu/cms/sites/default/files/BORDERS_YR5-T1%204-TechReport.pdf
+f75852386e563ca580a48b18420e446be45fcf8d,http://pdfs.semanticscholar.org/f758/52386e563ca580a48b18420e446be45fcf8d.pdf,,,http://www.umiacs.umd.edu/~raghuram/Publications/CourseProjects/ENEE631.pdf
+f702a6cf6bc5e4cf53ea72baa4fc9d80cdbbae93,,,https://doi.org/10.1109/TCSVT.2007.903317,
+f73174cfcc5c329b63f19fffdd706e1df4cc9e20,,,,http://doi.ieeecomputersociety.org/10.1109/FIT.2015.13
+f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3,http://pdfs.semanticscholar.org/f7c5/0d2be9fba0e4527fd9fbe3095e9d9a94fdd3.pdf,,https://doi.org/10.1007/978-3-319-16811-1_17,http://www.kinfacew.com/papers/LM3L_accv14.pdf
+f78863f4e7c4c57744715abe524ae4256be884a9,http://pdfs.semanticscholar.org/f788/63f4e7c4c57744715abe524ae4256be884a9.pdf,,https://doi.org/10.1016/j.neucom.2010.07.017,http://cgit.nutn.edu.tw:8080/cgit/PaperDL/LZJ_120826151743.PDF
+f77c9bf5beec7c975584e8087aae8d679664a1eb,http://pdfs.semanticscholar.org/f77c/9bf5beec7c975584e8087aae8d679664a1eb.pdf,,,http://arxiv.org/abs/1703.08497
+f7bebb2d5ef7c9bd38808b8e615756efafc2a1e7,,,https://doi.org/10.1109/ICIP.2012.6467434,
+f79e4ba09402adab54d2efadd1c4bfe4e20c5da5,,,https://doi.org/10.1109/ICIP.2017.8296364,
+f772af1dbed4ae31d75ff257e6ba42a70039b417,,,,
+e8686663aec64f4414eba6a0f821ab9eb9f93e38,http://pdfs.semanticscholar.org/e868/6663aec64f4414eba6a0f821ab9eb9f93e38.pdf,,https://doi.org/10.1109/ICIP.2003.1247391,http://www.gts.tsc.uvigo.es/~jalba/papers/icip03.pdf
+e83e5960c2aabab654e1545eb419ef64c25800d5,,,https://doi.org/10.1016/j.neunet.2016.08.011,
+e860db656f39d738050b5f3e0bf72724e6a4ad5c,,,,
+e8f4a4e0fe0b2f0054b44b947828d71e10ec61a7,,,,
+e82360682c4da11f136f3fccb73a31d7fd195694,http://pdfs.semanticscholar.org/e823/60682c4da11f136f3fccb73a31d7fd195694.pdf,,,https://aaltodoc.aalto.fi/bitstream/handle/123456789/3242/urn100224.pdf?isAllowed=y&sequence=1
+e8951cc76af80da43e3528fe6d984071f17f57e7,,,https://doi.org/10.1109/WACVW.2017.9,
+e8410c4cd1689829c15bd1f34995eb3bd4321069,https://www.computer.org/csdl/proceedings/fg/2013/5545/00/06553731.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553731
+e8c051d9e7eb8891b23cde6cbfad203011318a4f,,,,http://doi.acm.org/10.1145/3013971.3014015
+e88988f4696e7e2925ed96467fde4314bfa95eff,,,https://doi.org/10.1016/j.neucom.2015.01.076,
+e8f0f9b74db6794830baa2cab48d99d8724e8cb6,http://pdfs.semanticscholar.org/e8f0/f9b74db6794830baa2cab48d99d8724e8cb6.pdf,,https://doi.org/10.1007/978-3-540-88688-4_52,http://www.ecse.rpi.edu/homepages/qji/Papers/activelabeling.pdf
+e82a0976db908e6f074b926f58223ac685533c65,,,https://doi.org/10.1007/s11042-015-2848-2,
+e8b2a98f87b7b2593b4a046464c1ec63bfd13b51,http://pdfs.semanticscholar.org/e8b2/a98f87b7b2593b4a046464c1ec63bfd13b51.pdf,,,http://arxiv.org/abs/1606.05413
+e86008f6aebd0ab26bdb69d2549b2e8454b8959c,,,,
+e8c9dcbf56714db53063b9c367e3e44300141ff6,http://faculty.virginia.edu/humandynamicslab/pubs/BrickHunterCohn-ACII2009.pdf,,https://doi.org/10.1109/ACII.2009.5349600,
+e865908ed5e5d7469b412b081ca8abd738c72121,,,https://doi.org/10.1109/TIP.2016.2621667,
+e8b56ed34ece9b1739fff0df6af3b65390c468d3,,,,
+e8c6853135856515fc88fff7c55737a292b0a15b,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.46
+fa54ab106c7f6dbd3c004cea4ef74ea580cf50bf,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.18
+fadafdd7dedd2bdd775b4591a998c8b5254081e1,,,,
+fac5a9a18157962cff38df6d4ae69f8a7da1cfa8,http://www.cs.sunysb.edu/~vislab/papers/01580481.pdf,,,http://www3.cs.stonybrook.edu/~ial/content/papers/2006/lzhang-pami2006.pdf
+faf19885431cb39360158982c3a1127f6090a1f6,,,https://doi.org/10.1109/BTAS.2015.7358768,
+fa72e39971855dff6beb8174b5fa654e0ab7d324,,,https://doi.org/10.1007/s11042-013-1793-1,
+fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6,http://pdfs.semanticscholar.org/fab8/3bf8d7cab8fe069796b33d2a6bd70c8cefc6.pdf,,,http://fipa.cs.kit.edu/download/befit-evaluation_guidelines.pdf
+faeefc5da67421ecd71d400f1505cfacb990119c,http://pdfs.semanticscholar.org/faee/fc5da67421ecd71d400f1505cfacb990119c.pdf,,https://doi.org/10.3389/frobt.2017.00061,
+fa08a4da5f2fa39632d90ce3a2e1688d147ece61,http://pdfs.semanticscholar.org/fa08/a4da5f2fa39632d90ce3a2e1688d147ece61.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/supplemental/Wolf_Unsupervised_Creation_of_ICCV_2017_supplemental.pdf
+fab2fc6882872746498b362825184c0fb7d810e4,http://pdfs.semanticscholar.org/fab2/fc6882872746498b362825184c0fb7d810e4.pdf,,,
+faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b,http://pdfs.semanticscholar.org/faea/d8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b.pdf,,,https://arxiv.org/pdf/1611.10152v1.pdf
+faa46ef96493b04694555738100d9f983915cf9b,,,https://doi.org/10.1007/s10489-015-0735-1,
+fa24bf887d3b3f6f58f8305dcd076f0ccc30272a,http://pdfs.semanticscholar.org/fa24/bf887d3b3f6f58f8305dcd076f0ccc30272a.pdf,,,http://www.jmlr.org/proceedings/papers/v39/antoniuk14.pdf
+faa29975169ba3bbb954e518bc9814a5819876f6,http://pdfs.semanticscholar.org/faa2/9975169ba3bbb954e518bc9814a5819876f6.pdf,,,https://arxiv.org/pdf/1702.04037v1.pdf
+fafe69a00565895c7d57ad09ef44ce9ddd5a6caa,http://pdfs.semanticscholar.org/fafe/69a00565895c7d57ad09ef44ce9ddd5a6caa.pdf,,,http://file.scirp.org/pdf/AM_2012123109350553.pdf
+fa08b52dda21ccf71ebc91bc0c4d206ac0aa3719,,,https://doi.org/10.1109/TIM.2015.2415012,
+fadbb3a447d697d52771e237173b80782caaa936,,,https://doi.org/10.1007/s00530-012-0290-0,
+faca1c97ac2df9d972c0766a296efcf101aaf969,http://pdfs.semanticscholar.org/faca/1c97ac2df9d972c0766a296efcf101aaf969.pdf,,https://doi.org/10.1007/978-3-319-46478-7_43,https://arxiv.org/pdf/1608.07138v1.pdf
+fa9610c2dc7e2a79e0096ac033b11508d8ae7ed7,,,https://doi.org/10.1109/FSKD.2016.7603418,
+fa398c6d6bd03df839dce7b59e04f473bc0ed660,https://www.researchgate.net/profile/Sujata_Pandey/publication/4308761_A_Novel_Approach_for_Face_Recognition_Using_DCT_Coefficients_Re-scaling_for_Illumination_Normalization/links/004635211c385bb7e3000000.pdf,,,
+fa5ab4b1b45bf22ce7b194c20c724946de2f2dd4,,,https://doi.org/10.1109/TIP.2015.2421437,
+fae83b145e5eeda8327de9f19df286edfaf5e60c,http://pdfs.semanticscholar.org/fae8/3b145e5eeda8327de9f19df286edfaf5e60c.pdf,,,http://academia.edu.documents.s3.amazonaws.com/1938721/ICICTE_2010.pdf
+ff82825a04a654ca70e6d460c8d88080ee4a7fcc,,,,http://doi.acm.org/10.1145/2683483.2683533
+ff8315c1a0587563510195356c9153729b533c5b,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2014/Zapping%20IndexUsing%20Smile%20to%20MeasureAdvertisement14.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2364581
+ff9e042cccbed7e350a25b7d806cd17fb79dfdf9,,,https://doi.org/10.1007/s11760-016-0882-9,
+fff31548617f208cd5ae5c32917afd48abc4ff6a,,,,http://doi.acm.org/10.1145/3139295.3139309
+ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a,http://pdfs.semanticscholar.org/ff44/d8938c52cfdca48c80f8e1618bbcbf91cb2a.pdf,,https://doi.org/10.1007/978-3-319-68548-9_36,http://imagelab.ing.unimore.it/imagelab/pubblicazioni/2017_ICIAP_Naming.pdf
+ff3859917d4121f47de0d46922a103c78514fcab,,,https://doi.org/10.1109/ICB.2016.7550050,
+ff402bd06c9c4e94aa47ad80ccc4455efa869af3,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334120
+ff42ec628b0980909bbb84225d0c4f8d9ac51e03,,,https://doi.org/10.1109/TCSVT.2008.2005799,
+ff398e7b6584d9a692e70c2170b4eecaddd78357,http://pdfs.semanticscholar.org/ff39/8e7b6584d9a692e70c2170b4eecaddd78357.pdf,,,https://drum.lib.umd.edu/bitstream/handle/1903/13813/Guo_umd_0117E_13642.pdf?isAllowed=y&sequence=1
+ffc5a9610df0341369aa75c0331ef021de0a02a9,http://pdfs.semanticscholar.org/ffc5/a9610df0341369aa75c0331ef021de0a02a9.pdf,,https://doi.org/10.1007/978-3-540-87481-2_36,http://www.researchgate.net/profile/Yangqiu_Song/publication/220698949_Transferred_Dimensionality_Reduction/links/551e3c950cf29dcabb03a72d.pdf
+ffd81d784549ee51a9b0b7b8aaf20d5581031b74,http://pdfs.semanticscholar.org/ffd8/1d784549ee51a9b0b7b8aaf20d5581031b74.pdf,,,http://www.rcs.cic.ipn.mx/2014_80/Performance%20Analysis%20of%20Retina%20and%20DoG%20Filtering%20Applied%20to%20Face%20Images%20for%20Training.pdf
+ffd4a5bf55fe089ac05ca96285e4e563325f3d1b,,,,
+ff01bc3f49130d436fca24b987b7e3beedfa404d,http://pdfs.semanticscholar.org/ff01/bc3f49130d436fca24b987b7e3beedfa404d.pdf,,https://doi.org/10.3390/sym8080075,http://www.mdpi.com/2073-8994/8/8/75/pdf
+ff061f7e46a6213d15ac2eb2c49d9d3003612e49,http://pdfs.semanticscholar.org/ff06/1f7e46a6213d15ac2eb2c49d9d3003612e49.pdf,,,http://www.csse.monash.edu.au/~app/recent/ThesisFinal.pdf
+ff1f45bdad41d8b35435098041e009627e60d208,http://pdfs.semanticscholar.org/ff1f/45bdad41d8b35435098041e009627e60d208.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/2017/Nagrani17b/nagrani17b.pdf
+ffea4184a0b24807b5f4ed87f9a985c2a27027d9,,,https://doi.org/10.1007/s00530-012-0297-6,
+ff60d4601adabe04214c67e12253ea3359f4e082,http://pdfs.semanticscholar.org/ff60/d4601adabe04214c67e12253ea3359f4e082.pdf,,https://doi.org/10.1016/j.imavis.2017.01.012,https://www.cmpe.boun.edu.tr/~salah/imavis_emotiw15_v10.pdf
+ff8db3810f927506f3aa594d66d5e8658f3cf4d5,,,,http://doi.acm.org/10.1145/3078971.3079026
+ffea2b26e422c1009afa7e200a43b31a1fae86a9,,,https://doi.org/10.1007/s00500-009-0441-1,
+ffb1cb0f9fd65247f02c92cfcb152590a5d68741,,,https://doi.org/10.1109/CISS.2012.6310782,
+ffec78f270dba4bdaf6bca7aedc16798bb9347ef,,,,
+ffc9d6a5f353e5aec3116a10cf685294979c63d9,http://pdfs.semanticscholar.org/ffc9/d6a5f353e5aec3116a10cf685294979c63d9.pdf,,,https://bib.irb.hr/datoteka/486456.Eigenphase_ERK_Final.pdf
+ffaad0204f4af763e3390a2f6053c0e9875376be,http://pdfs.semanticscholar.org/ffaa/d0204f4af763e3390a2f6053c0e9875376be.pdf,,https://doi.org/10.3390/s17071633,http://www.mdpi.com/1424-8220/17/7/1633/pdf
+ffcbedb92e76fbab083bb2c57d846a2a96b5ae30,http://pdfs.semanticscholar.org/ffcb/edb92e76fbab083bb2c57d846a2a96b5ae30.pdf,,,https://www.base-search.net/Record/0a42c9d890b1fc4865681a773a3723eacc13f90f30363de110f261027caa5c49
+ff7bc7a6d493e01ec8fa2b889bcaf6349101676e,http://pdfs.semanticscholar.org/ff7b/c7a6d493e01ec8fa2b889bcaf6349101676e.pdf,,,http://www.ee.oulu.fi/~gyzhao/Papers/2008/Facial%20expression%20recognition%20with%20spatiotemporal%20local%20descriptors.pdf
+fffa2943808509fdbd2fc817cc5366752e57664a,http://pdfs.semanticscholar.org/fffa/2943808509fdbd2fc817cc5366752e57664a.pdf,,,http://crcv.ucf.edu/ICCV13-Action-Workshop/index.files/NotebookPapers13/Paper%2035%20(Supplementary).pdf
+ff46c41e9ea139d499dd349e78d7cc8be19f936c,http://pdfs.semanticscholar.org/ff46/c41e9ea139d499dd349e78d7cc8be19f936c.pdf,,,http://www.ijmer.com/papers/Vol2_Issue4/Vol3_Issue3/AO3313391342.pdf
+ff3f128f5addc6ce6b41f19f3d679282bbdaa2ee,,,,http://doi.acm.org/10.1145/2903220.2903255
+ff0617d750fa49416514c1363824b8f61baf8fb5,,,https://doi.org/10.1587/elex.7.1125,
+ff5dd6f96e108d8233220cc262bc282229c1a582,http://pdfs.semanticscholar.org/ff5d/d6f96e108d8233220cc262bc282229c1a582.pdf,,,http://ijera.com/papers/Vol2_issue6/DC26708715.pdf
+ff946df1cea6c107b2c336419c34ea69cc3ddbc4,,,,
+c570d1247e337f91e555c3be0e8c8a5aba539d9f,,,https://doi.org/10.1007/s11042-012-1352-1,
+c586463b8dbedce2bfce3ee90517085a9d9e2e13,,,,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2006.9
+c5468665d98ce7349d38afb620adbf51757ab86f,http://pdfs.semanticscholar.org/c546/8665d98ce7349d38afb620adbf51757ab86f.pdf,,https://doi.org/10.1007/11564386_18,http://www.umiacs.umd.edu/~rama/Conf.pdf-files/amfg2005.pdf
+c5fff7adc5084d69390918daf09e832ec191144b,,,,
+c5eba789aeb41904aa1b03fad1dc7cea5d0cd3b6,,,https://doi.org/10.1109/BTAS.2017.8272773,
+c5022fbeb65b70f6fe11694575b8ad1b53412a0d,,,https://doi.org/10.1109/ICIP.2005.1530209,
+c5adb33bd3557c94d0e54cfe2036a1859118a65e,,,,
+c5d13e42071813a0a9dd809d54268712eba7883f,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%2016/PID2891229.pdf,,https://doi.org/10.1109/BTAS.2013.6712718,
+c50d73557be96907f88b59cfbd1ab1b2fd696d41,http://pdfs.semanticscholar.org/c50d/73557be96907f88b59cfbd1ab1b2fd696d41.pdf,,https://doi.org/10.1117/1.1763586,http://web.ornl.gov/sci/ees/eesrd/pdfs/publications/semiconductorsidewallshapeestimation.pdf
+c54f9f33382f9f656ec0e97d3004df614ec56434,http://pdfs.semanticscholar.org/c54f/9f33382f9f656ec0e97d3004df614ec56434.pdf,,https://doi.org/10.1016/j.patrec.2010.07.020,http://www.sis.uta.fi/~gofase/docs/papers/2010_Gizatdinova_Surakka_(Automatic_edge-based_localization_of_facial_features).pdf
+c574c72b5ef1759b7fd41cf19a9dcd67e5473739,http://pdfs.semanticscholar.org/c574/c72b5ef1759b7fd41cf19a9dcd67e5473739.pdf,,https://doi.org/10.1186/s13640-017-0194-1,https://jivp-eurasipjournals.springeropen.com/track/pdf/10.1186/s13640-017-0194-1?site=jivp-eurasipjournals.springeropen.com
+c5c56e9c884ac4070880ac481909bb6b621d2a3f,,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126466
+c5a561c662fc2b195ff80d2655cc5a13a44ffd2d,http://www.cs.toronto.edu/~suzanne/papers/JamiesonEtAlPAMI.pdf,,,http://www.cs.toronto.edu/~afsaneh/JamiesonEtAl-PAMI10.pdf
+c553f0334fcadf43607925733685adef81fbe406,,,https://doi.org/10.1109/ICSIPA.2017.8120636,
+c5366f412f2e8e78280afcccc544156f63b516e3,http://lep.unige.ch/system/files/biblio/2012_Valstar_MetaAnalysisGEMEP-FERA.pdf,,https://doi.org/10.1109/TSMCB.2012.2200675,http://www.cs.nott.ac.uk/~mfv/Documents/fera_smcb.pdf
+c5fe40875358a286594b77fa23285fcfb7bda68e,http://pdfs.semanticscholar.org/edd1/cfb1caff16f80d807ff0821883ae855950c5.pdf,,https://doi.org/10.1016/j.neucom.2012.06.032,https://cis.temple.edu/~latecki/Papers/FaceIdentificationNeuro2013.pdf
+c58ece1a3fa23608f022e424ec5a93cddda31308,,,https://doi.org/10.1109/JSYST.2014.2325957,
+c5c379a807e02cab2e57de45699ababe8d13fb6d,http://pdfs.semanticscholar.org/c5c3/79a807e02cab2e57de45699ababe8d13fb6d.pdf,,,http://www.wseas.org/multimedia/journals/systems/2012/56-519.pdf
+c59a9151cef054984607b7253ef189c12122a625,,,https://doi.org/10.1007/s00138-016-0791-5,
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,http://openaccess.thecvf.com/content_cvpr_2017/papers/Feichtenhofer_Spatiotemporal_Multiplier_Networks_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.787
+c5421a18583f629b49ca20577022f201692c4f5d,http://pdfs.semanticscholar.org/c542/1a18583f629b49ca20577022f201692c4f5d.pdf,,,http://world-comp.org/p2011/IPC4140.pdf
+c5be0feacec2860982fbbb4404cf98c654142489,http://pdfs.semanticscholar.org/c5be/0feacec2860982fbbb4404cf98c654142489.pdf,,,http://ipg.idsia.ch/preprints/decampos2009c.pdf
+c59b62864a6d86eead075c88137a87070a984550,,,https://doi.org/10.1109/IVCNZ.2015.7761546,
+c5844de3fdf5e0069d08e235514863c8ef900eb7,http://pdfs.semanticscholar.org/c584/4de3fdf5e0069d08e235514863c8ef900eb7.pdf,,,http://www.enggjournals.com/ijcse/doc/IJCSE10-02-08-106.pdf
+c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1,http://pdfs.semanticscholar.org/d6f1/42f5ddcb027e7b346eb20703abbf5cc4e883.pdf,,https://doi.org/10.1007/978-3-319-46454-1_20,http://porikli.com/mysite/pdfs/porikli%202016%20-%20Ultra%20resolving%20face%20images%20by%20discriminative%20generative%20networks.pdf
+c590c6c171392e9f66aab1bce337470c43b48f39,http://pdfs.semanticscholar.org/c590/c6c171392e9f66aab1bce337470c43b48f39.pdf,,,http://www.aicit.org/IJEI/ppl/IJEI%20vol3.no1_06.pdf
+c5f1ae9f46dc44624591db3d5e9f90a6a8391111,http://poseidon.csd.auth.gr/papers/PUBLISHED/CONFERENCE/pdf/Buciu_ICPR_2004.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334109
+c53352a4239568cc915ad968aff51c49924a3072,http://pdfs.semanticscholar.org/c533/52a4239568cc915ad968aff51c49924a3072.pdf,,,http://imageanalysis.cs.ucl.ac.uk/documents/JAndrews-ADWorkshop-ICML.pdf
+c5765590c294146a8e3c9987d394c0990ab6a35b,http://media.cs.tsinghua.edu.cn/~imagevision/papers/%5B2012%5D084_P1B-31-cvpr2012-wan.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247734
+c5437496932dcb9d33519a120821da755951e1a9,,,,http://doi.acm.org/10.1145/2487575.2487604
+c2b10909a0dd068b8e377a55b0a1827c8319118a,,,https://doi.org/10.1109/TCYB.2016.2565898,
+c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4,http://pdfs.semanticscholar.org/c2c5/206f6a539b02f5d5a19bdb3a90584f7e6ba4.pdf,,https://doi.org/10.1007/11573548_125,http://speakit.cn/Group/file/ACReview_ACII05.pdf
+c270aff2b066ee354b4fe7e958a40a37f7bfca45,,,https://doi.org/10.1109/WCSP.2017.8170910,
+c2fa83e8a428c03c74148d91f60468089b80c328,http://pdfs.semanticscholar.org/c2fa/83e8a428c03c74148d91f60468089b80c328.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/icml2014c2_nieb14.pdf
+c2c3ff1778ed9c33c6e613417832505d33513c55,http://pdfs.semanticscholar.org/c2c3/ff1778ed9c33c6e613417832505d33513c55.pdf,,https://doi.org/10.1007/978-3-642-32695-0_54,https://tapchikhdt.lhu.edu.vn/Data/News/136/files/74580613_spost.pdf
+c252bc84356ed69ccf53507752135b6e98de8db4,,,https://doi.org/10.1016/j.neucom.2015.02.067,
+c291f0e29871c8b9509d1a2876c3e305839ad4ac,,,https://doi.org/10.1109/ICARCV.2014.7064432,
+c244c3c797574048d6931b6714ebac64d820dbb3,,,,http://doi.acm.org/10.1145/2808492.2808500
+c27f64eaf48e88758f650e38fa4e043c16580d26,http://pdfs.semanticscholar.org/c27f/64eaf48e88758f650e38fa4e043c16580d26.pdf,,,http://intranet.daiict.ac.in/~daiict_nt01/Lecture/SUMAN%20MITRA/Sample_TCS_Research%20Proposal.pdf
+c222f8079c246ead285894c47bdbb2dfc7741044,,,https://doi.org/10.1109/ICIP.2015.7351631,
+c2be82ed0db509087b08423c8cf39ab3c36549c3,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019363
+c23bd1917badd27093c8284bd324332b8c45bfcf,,,https://doi.org/10.1109/IJCNN.2010.5596316,
+c2474202d56bb80663e7bece5924245978425fc1,,,https://doi.org/10.1109/ICIP.2016.7532771,
+c23153aade9be0c941390909c5d1aad8924821db,http://pdfs.semanticscholar.org/c231/53aade9be0c941390909c5d1aad8924821db.pdf,,,https://infoscience.epfl.ch/record/220905/files/Le_ICPR_2016.pdf
+c207fd762728f3da4cddcfcf8bf19669809ab284,http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf,,https://doi.org/10.1007/978-3-642-12304-7_11,http://www.researchgate.net/profile/Mannes_Poel/publication/220744810_Face_Alignment_Using_Boosting_and_Evolutionary_Search/links/0a85e53b125fdb3c39000000.pdf
+c20b2d365186f4471950fbe1ef8755de90efc000,,,,
+c259db2675f3bfb157f37e6c93b03d1d14dab4c7,,,,
+c220f457ad0b28886f8b3ef41f012dd0236cd91a,http://pdfs.semanticscholar.org/c220/f457ad0b28886f8b3ef41f012dd0236cd91a.pdf,,,https://arxiv.org/pdf/1804.01159v1.pdf
+c2422c975d9f9b62fbb19738e5ce5e818a6e1752,,,https://doi.org/10.1109/TNNLS.2015.2481006,
+c2e03efd8c5217188ab685e73cc2e52c54835d1a,http://web.eecs.utk.edu/~ataalimi/wp-content/uploads/2016/09/Deep-Tree-structured-Face-A-Unified-Representation-for-Multi-task-Facial.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477585
+c2dc29e0db76122dfed075c3b9ee48503b027809,,,https://doi.org/10.1109/ICIP.2016.7532632,
+c28461e266fe0f03c0f9a9525a266aa3050229f0,http://pdfs.semanticscholar.org/c284/61e266fe0f03c0f9a9525a266aa3050229f0.pdf,,https://doi.org/10.1007/978-3-642-21257-4_46,http://www.cvc.uab.es/~davidm/pdfs/IBPRIA2011.pdf
+c29e33fbd078d9a8ab7adbc74b03d4f830714cd0,http://research.microsoft.com/en-us/um/people/leizhang/Paper/FG04-Longbin.pdf,,,http://research.microsoft.com/users/leizhang/Paper/FG04-Longbin.pdf
+c2e6daebb95c9dfc741af67464c98f1039127627,http://pdfs.semanticscholar.org/c2e6/daebb95c9dfc741af67464c98f1039127627.pdf,,,http://www.mva-org.jp/Proceedings/2013USB/papers/05-01.pdf
+f6b4811c5e7111485e2c9cc5bf63f8ac80f3e2d7,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2569436
+f604c312ff4706f1849078b2ca28409f0fcd859d,,,,
+f6742010372210d06e531e7df7df9c01a185e241,http://pdfs.semanticscholar.org/f674/2010372210d06e531e7df7df9c01a185e241.pdf,,,https://arxiv.org/pdf/1707.09599v1.pdf
+f69de2b6770f0a8de6d3ec1a65cb7996b3c99317,http://pdfs.semanticscholar.org/f69d/e2b6770f0a8de6d3ec1a65cb7996b3c99317.pdf,,,http://www.maxwellsci.com/print/rjaset/v8-2265-2271.pdf
+f6311d6b3f4d3bd192d866d2e898c30eea37d7d5,,http://ieeexplore.ieee.org/document/6460511/,,
+f6ca29516cce3fa346673a2aec550d8e671929a6,http://pdfs.semanticscholar.org/f6ca/29516cce3fa346673a2aec550d8e671929a6.pdf,,,http://www.ijeat.org/attachments/File/v2i4/D1388042413.pdf
+f67a73c9dd1e05bfc51219e70536dbb49158f7bc,http://pdfs.semanticscholar.org/f67a/73c9dd1e05bfc51219e70536dbb49158f7bc.pdf,,https://doi.org/10.3844/jcssp.2014.2292.2298,http://thescipub.com/PDF/jcssp.2014.2292.2298.pdf
+f6c70635241968a6d5fd5e03cde6907022091d64,http://pdfs.semanticscholar.org/f6c7/0635241968a6d5fd5e03cde6907022091d64.pdf,,,http://drum.lib.umd.edu/bitstream/handle/1903/13217/Jorstad_umd_0117E_13521.pdf?isAllowed=y&sequence=1
+f63b3b8388bc4dcd4a0330402af37a59ce37e4f3,,,https://doi.org/10.1109/SIU.2013.6531214,
+f66f3d1e6e33cb9e9b3315d3374cd5f121144213,http://pdfs.semanticscholar.org/f66f/3d1e6e33cb9e9b3315d3374cd5f121144213.pdf,,,http://www.jneurosci.org/content/jneuro/33/44/17435.full.pdf
+f6abecc1f48f6ec6eede4143af33cc936f14d0d0,http://pdfs.semanticscholar.org/f6ab/ecc1f48f6ec6eede4143af33cc936f14d0d0.pdf,,,https://arxiv.org/pdf/1705.08764v1.pdf
+f6ebfa0cb3865c316f9072ded26725fd9881e73e,,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.109
+f6fa97fbfa07691bc9ff28caf93d0998a767a5c1,http://pdfs.semanticscholar.org/f6fa/97fbfa07691bc9ff28caf93d0998a767a5c1.pdf,,,https://arxiv.org/pdf/1605.09299v1.pdf
+f6e6b4d0b7c16112dcb71ff502033a2187b1ec9b,,,https://doi.org/10.1109/TMM.2015.2476657,
+f66add890c2458466e1cb942ad3981f8651ace2d,,,,
+f6511d8156058737ec5354c66ef6fdcf035d714d,,,,http://doi.ieeecomputersociety.org/10.1109/BWCCA.2014.115
+f68f20868a6c46c2150ca70f412dc4b53e6a03c2,http://pdfs.semanticscholar.org/f68f/20868a6c46c2150ca70f412dc4b53e6a03c2.pdf,,,http://hrcak.srce.hr/file/206247
+f652cb159a2cf2745aabcbf6a7beed4415e79e34,,,,http://doi.acm.org/10.1145/1460096.1460119
+f6dabb4d91bf7389f3af219d486d4e67cec18c17,,,https://doi.org/10.1016/j.compeleceng.2014.08.010,
+e95895262f66f7c5e47dd46a70110d89c3b4c203,,,https://doi.org/10.1016/j.neucom.2016.09.023,
+e96540252f2f83e394012d653452411efb9f744f,,,,
+e957d0673af7454dbf0a14813201b0e2570577e9,,,https://doi.org/10.1109/ICPR.2016.7899699,
+e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66,http://pdfs.semanticscholar.org/e9ed/17fd8bf1f3d343198e206a4a7e0561ad7e66.pdf,,,http://www.erpublications.com/uploaded_files/download/download_07_02_2014_12_23_06.pdf
+e95c5aaa72e72761b05f00fad6aec11c3e2f8d0f,,,,http://doi.acm.org/10.1145/2791405.2791505
+e9cebf627c204c6949dcc077d04c57eb66b2c038,,,https://doi.org/10.1109/SIU.2013.6531371,
+e9d147e657619c393ca702117602fd7d15675f69,,,,
+e9b731f00d16a10a31ceea446b2baa38719a31f1,,,https://doi.org/10.1109/ICSMC.2012.6378271,
+e9e40e588f8e6510fa5537e0c9e083ceed5d07ad,http://pdfs.semanticscholar.org/e9e4/0e588f8e6510fa5537e0c9e083ceed5d07ad.pdf,,,http://www.ijcsit.com/docs/Volume%202/vol2issue3/ijcsit2011020328.pdf
+e9d1b3767c06c896f89690deea7a95401ae4582b,,,https://doi.org/10.1109/VCIP.2016.7805565,
+e9d77a85bc2fa672cc1bd10258c896c8d89b41e8,,,https://doi.org/10.1109/ICTAI.2012.25,
+e9bb045e702ee38e566ce46cc1312ed25cb59ea7,http://pdfs.semanticscholar.org/e9bb/045e702ee38e566ce46cc1312ed25cb59ea7.pdf,,https://doi.org/10.1007/978-981-10-2104-6_55,https://samyak-268.github.io/pdfs/cvip_paper.pdf
+e908ce44fa94bb7ecf2a8b70cb5ec0b1a00b311a,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019548
+e94168c35be1d4b4d2aaf42ef892e64a3874ed8c,,,https://doi.org/10.1109/TSMCB.2008.2010715,
+e96ce25d11296fce4e2ecc2da03bd207dc118724,,,https://doi.org/10.1007/s00138-007-0095-x,
+e99718d08aca2c49cd2848eebdbb7c7855b4e484,,,,
+e9fcd15bcb0f65565138dda292e0c71ef25ea8bb,http://pdfs.semanticscholar.org/e9fc/d15bcb0f65565138dda292e0c71ef25ea8bb.pdf,,https://doi.org/10.1007/978-3-642-38061-7_22,https://repositorio.uam.es/xmlui/bitstream/handle/10486/663763/analysing_tome_CCIS_2013_ps.pdf?sequence=3
+e9b6804cd56cadb9342ec2ce412aacba7afd0723,,,,
+e935270db6bd778283de9767075763a538181d8e,,,,
+e98551055bdcf8e25e07f4ffdbf39d0a4a57bffc,,,https://doi.org/10.1109/ICPR.2014.440,
+e9331ae2a887c02e0a908ebae2810a681aedee29,,,https://doi.org/10.1016/j.image.2011.05.003,http://doi.ieeecomputersociety.org/10.1109/ICIG.2009.106
+e9f1cdd9ea95810efed306a338de9e0de25990a0,http://pdfs.semanticscholar.org/e9f1/cdd9ea95810efed306a338de9e0de25990a0.pdf,,,http://cvpia.memphis.edu/wp-content/uploads/2011/04/CVPIA-TR2011-001.pdf
+f1e44e64957397d167d13f8f551cae99e5c16c75,,,https://doi.org/10.1007/s11042-013-1548-z,
+f1e13c1e8426243320014c45cf2c9382d9cbfac2,,,,
+f17d8f14651c123d39e13a39dc79b7eb3659fe68,,,https://doi.org/10.1007/s11042-013-1803-3,
+f16a605abb5857c39a10709bd9f9d14cdaa7918f,http://pdfs.semanticscholar.org/f16a/605abb5857c39a10709bd9f9d14cdaa7918f.pdf,,,http://www.cvc.uab.es/~petia/sergi%20road_sign_recognition.pdf
+f1da4d705571312b244ebfd2b450692fd875cd1f,,,https://doi.org/10.1109/TIP.2014.2322446,
+f1748303cc02424704b3a35595610890229567f9,http://pdfs.semanticscholar.org/f174/8303cc02424704b3a35595610890229567f9.pdf,,https://doi.org/10.1016/j.imavis.2012.07.009,http://staff.science.uva.nl/~gevers/pub/GeversIVC2012.pdf
+f1d6da83dcf71eda45a56a86c5ae13e7f45a8536,,,https://doi.org/10.1109/ACCESS.2017.2737544,
+f18ff597bbfca10f84d017ac5e1ef0de6d7ad66c,,,,http://doi.ieeecomputersociety.org/10.1109/SNPD.2016.7515888
+f1d090fcea63d9f9e835c49352a3cd576ec899c1,http://pdfs.semanticscholar.org/f1d0/90fcea63d9f9e835c49352a3cd576ec899c1.pdf,,,http://research-information.bristol.ac.uk/files/75922825/Ioannis_Pitas_Single_hidden_Layer_Feedforward_Neual_network_training_using_class_geometric_information_2015.pdf
+f1061b2b5b7ca32edd5aa486aecc63a0972c84f3,,,https://doi.org/10.1109/TIP.2017.2760512,
+f19777e37321f79e34462fc4c416bd56772031bf,http://pdfs.semanticscholar.org/f197/77e37321f79e34462fc4c416bd56772031bf.pdf,,,https://www.ijser.org/researchpaper/Literature-Review-of-Image-Compression-Algorithm.pdf
+f19ab817dd1ef64ee94e94689b0daae0f686e849,http://pdfs.semanticscholar.org/f19a/b817dd1ef64ee94e94689b0daae0f686e849.pdf,,,http://d-nb.info/999629263
+f180cb7111e9a6ba7cfe0b251c0c35daaef4f517,,,https://doi.org/10.1109/TIP.2015.2417502,
+f1ea8bdb3bd39d8269628bc7b99b2d918ea23ef7,,,,
+f19bf8b5c1860cd81b5339804d5db9e791085aa7,,,https://doi.org/10.1109/SMC.2017.8122640,
+f14403d9d5fbc4c6e8aeb7505b5d887c50bad8a4,,,https://doi.org/10.1109/ICIP.2012.6467433,
+f1af714b92372c8e606485a3982eab2f16772ad8,,http://ieeexplore.ieee.org/document/5617662/,,
+e76798bddd0f12ae03de26b7c7743c008d505215,http://pdfs.semanticscholar.org/e767/98bddd0f12ae03de26b7c7743c008d505215.pdf,,,https://arxiv.org/pdf/1706.04122v1.pdf
+e7436b8e68bb7139b823a7572af3decd96241e78,,,https://doi.org/10.1109/ROBIO.2011.6181560,
+e7144f5c19848e037bb96e225d1cfd961f82bd9f,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.126
+e793f8644c94b81b7a0f89395937a7f8ad428a89,http://pdfs.semanticscholar.org/e793/f8644c94b81b7a0f89395937a7f8ad428a89.pdf,,,http://crcv.ucf.edu/THUMOS14/papers/Univesiry%20of%20Ottawa.pdf
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,http://pdfs.semanticscholar.org/e726/174d516605f80ff359e71f68b6e8e6ec6d5d.pdf,,,http://www.iis.sinica.edu.tw/page/jise/2010/201011_22.html
+e78394213ae07b682ce40dc600352f674aa4cb05,http://pdfs.semanticscholar.org/e783/94213ae07b682ce40dc600352f674aa4cb05.pdf,,,https://www.researchgate.net/profile/Ron_Kimmel/publication/228856957_Expression-invariant_three-dimensional_face_recognition/links/0fcfd50870bbb5510d000000.pdf
+e73b1137099368dd7909d203b80c3d5164885e44,,,,http://doi.ieeecomputersociety.org/10.1109/FSKD.2008.116
+e73f2839fc232c03e9f027c78bc419ee15810fe8,,,https://doi.org/10.1109/ICIP.2017.8296413,
+e71c15f5650a59755619b2a62fa93ac922151fd6,,,,http://doi.ieeecomputersociety.org/10.1109/AUTOID.2005.22
+e726acda15d41b992b5a41feabd43617fab6dc23,http://pdfs.semanticscholar.org/e726/acda15d41b992b5a41feabd43617fab6dc23.pdf,,https://doi.org/10.1016/j.patrec.2005.07.026,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Journals/2006/evolutionaryfeaturesynthesis06.pdf
+e74a2159f0f7afb35c7318a6e035bc31b8e69634,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019503
+e790a2538579c8e2ef9b314962ab26197d6664c6,,,https://doi.org/10.1109/ICIP.2016.7532915,
+e7e8c0bbee09b5af6f7df1de8f0f26da992737c4,,,https://doi.org/10.1109/IJCNN.2011.6033417,
+e74816bc0803460e20edbd30a44ab857b06e288e,http://pdfs.semanticscholar.org/e748/16bc0803460e20edbd30a44ab857b06e288e.pdf,,,http://arxiv.org/abs/1612.01035
+e724c9a69613bef36f67ae7ed6850b1942918804,,,,
+e7b6887cd06d0c1aa4902335f7893d7640aef823,http://pdfs.semanticscholar.org/e7b6/887cd06d0c1aa4902335f7893d7640aef823.pdf,,,https://arxiv.org/pdf/1802.04636v1.pdf
+e73b9b16adcf4339ff4d6723e61502489c50c2d9,http://pdfs.semanticscholar.org/e73b/9b16adcf4339ff4d6723e61502489c50c2d9.pdf,,,http://airccse.org/journal/ieij/papers/2114ieij01.pdf
+e7b7df786cf5960d55cbac4e696ca37b7cee8dcd,,,https://doi.org/10.1109/IJCNN.2012.6252728,
+cb669c1d1e17c2a54d78711fa6a9f556b83f1987,http://satoh-lab.ex.nii.ac.jp/users/ledduy/pub/Ngo-RobustFaceTrackFindingUnsingTrackedPoints.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2008.74
+cba090a5bfae7dd8a60a973259f0870ed68c4dd3,,,,http://doi.ieeecomputersociety.org/10.1109/ISM.2017.22
+cb4d3d1b8fbb6df71a184dd8f00f89f84fa8373b,,,,http://doi.ieeecomputersociety.org/10.1109/IJCNN.2009.5179002
+cb992fe67f0d4025e876161bfd2dda467eaec741,,,https://doi.org/10.1109/IPTA.2015.7367144,
+cbcf5da9f09b12f53d656446fd43bc6df4b2fa48,http://pdfs.semanticscholar.org/cbcf/5da9f09b12f53d656446fd43bc6df4b2fa48.pdf,,,http://www.ijeit.com/vol%202/Issue%206/IJEIT1412201212_84.pdf
+cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a,http://pdfs.semanticscholar.org/cba4/5a87fc6cf12b3b0b6f57ba1a5282ef7fee7a.pdf,,,http://web.stanford.edu/class/cs231a/prev_projects_2016/emotion-ai-real.pdf
+cbc2de9b919bc63590b6ee2dfd9dda134af45286,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477561
+cbf3e848c5d2130dd640d9bd546403b8d78ce0f9,,,https://doi.org/10.1109/IJCNN.2012.6252385,
+cbe1df2213a88eafc5dcaf55264f2523fe3ec981,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.34
+cb4d8cef8cec9406b1121180d47c14dfef373882,,,https://doi.org/10.1109/ICPR.2014.301,
+cb8c067aeabacd0eb723c5bb23eb41d8d219c57d,,,,
+cb9092fe74ea6a5b2bb56e9226f1c88f96094388,http://pdfs.semanticscholar.org/cb90/92fe74ea6a5b2bb56e9226f1c88f96094388.pdf,,https://doi.org/10.1016/j.robot.2014.03.022,http://www.dei.unipd.it/~ghidoni/papers/ghidoni_distributed_perception.pdf
+cbd004d4c5e3b64321dc1a8f05fa5d64500389c2,http://www.researchgate.net/profile/Wen_Li38/publication/261711227_POSE-ROBUST_REPRESENTATION_FOR_FACE_VERIFICATION_IN_UNCONSTRAINED_VIDEOS/links/00b7d53535ed96428c000000.pdf,,https://doi.org/10.1109/ICIP.2013.6738766,
+cb08f679f2cb29c7aa972d66fe9e9996c8dfae00,http://pdfs.semanticscholar.org/cb08/f679f2cb29c7aa972d66fe9e9996c8dfae00.pdf,,,https://arxiv.org/pdf/1704.08723v1.pdf
+cb84229e005645e8623a866d3d7956c197f85e11,http://pdfs.semanticscholar.org/cb84/229e005645e8623a866d3d7956c197f85e11.pdf,,,http://homepages.inf.ed.ac.uk/keller/publications/pami18.pdf
+cb5cda13a4ccbc32ce912d51e402363c1b501b32,,,,
+cb1b5e8b35609e470ce519303915236b907b13b6,http://dforte.ece.ufl.edu/Domenic_files/IJCB.pdf,,https://doi.org/10.1109/BTAS.2017.8272692,
+cb7a743b9811d20682c13c4ee7b791ff01c62155,,,https://doi.org/10.1109/MMSP.2015.7340789,
+cb9921d5fc4ffa50be537332e111f03d74622442,,,https://doi.org/10.1007/978-3-319-46654-5_79,
+cbdcc28d36f1135d235b5067383b25dcac5d2ff3,,,,
+cbe859d151466315a050a6925d54a8d3dbad591f,http://homes.di.unimi.it/~boccignone/GiuseppeBoccignone_webpage/Stochastic_files/Euvip2010.pdf,,https://doi.org/10.1109/EUVIP.2010.5699099,http://homes.dsi.unimi.it/~boccignone/GiuseppeBoccignone_webpage/Stochastic_Gaze_Shift_files/Euvip2010.pdf
+cbaa17be8c22e219a9c656559e028867dfb2c2ed,,,https://doi.org/10.1109/ICIP.2016.7532636,
+cb160c5c2a0b34aba7b0f39f5dda6aca8135f880,,,https://doi.org/10.1109/SIU.2016.7496023,
+f86ddd6561f522d115614c93520faad122eb3b56,http://pdfs.semanticscholar.org/f86d/dd6561f522d115614c93520faad122eb3b56.pdf,,,https://bi.snu.ac.kr/Publications/Conferences/International/PACS2016_HKwak.pdf
+f8015e31d1421f6aee5e17fc3907070b8e0a5e59,http://pdfs.semanticscholar.org/f801/5e31d1421f6aee5e17fc3907070b8e0a5e59.pdf,,,http://www.cs.cmu.edu/~lanzhzh/thesis/proposal_draft.pdf
+f839ae810338e3b12c8e2f8db6ce4d725738d2d9,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.115
+f83dd9ff002a40228bbe3427419b272ab9d5c9e4,http://pdfs.semanticscholar.org/f83d/d9ff002a40228bbe3427419b272ab9d5c9e4.pdf,,,http://www.science.uva.nl/research/publications/2008/ValentiSEI2008/SPIE08.pdf
+f8c94afd478821681a1565d463fc305337b02779,http://pdfs.semanticscholar.org/f8c9/4afd478821681a1565d463fc305337b02779.pdf,,,http://www.ijsetr.com/uploads/631425IJSETR2137-899.pdf
+f888c165f45febf3d17b8604a99a2f684d689cbc,,,,http://doi.ieeecomputersociety.org/10.1109/CIT.2004.1357196
+f8f2d2910ce8b81cb4bbf84239f9229888158b34,http://pdfs.semanticscholar.org/f8f2/d2910ce8b81cb4bbf84239f9229888158b34.pdf,,,http://www.ijcai.org/Proceedings/16/Papers/514.pdf
+f8ec92f6d009b588ddfbb47a518dd5e73855547d,http://pdfs.semanticscholar.org/f8ec/92f6d009b588ddfbb47a518dd5e73855547d.pdf,,https://doi.org/10.3745/JIPS.02.0004,http://www.jips-k.org/dlibrary/JIPS_v10_no3_paper8.pdf
+f8ba921670c94ed94d94a98d64f38b857b0dc104,,,,
+f869601ae682e6116daebefb77d92e7c5dd2cb15,http://pdfs.semanticscholar.org/f869/601ae682e6116daebefb77d92e7c5dd2cb15.pdf,,,https://cis.temple.edu/~latecki/Papers/RDP_AAAI2017.pdf
+f8ddb2cac276812c25021b5b79bf720e97063b1e,http://www.eecs.qmul.ac.uk/~sgg/papers/ShanEtAl_HCI2006.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2006.13
+f8ed5f2c71e1a647a82677df24e70cc46d2f12a8,http://pdfs.semanticscholar.org/f8ed/5f2c71e1a647a82677df24e70cc46d2f12a8.pdf,,,http://www.ijser.org/researchpaper/Artificial-Neural-Network-Design-and-Parameter-Optimization-for-Facial-Expressions-Recognition.pdf
+f812347d46035d786de40c165a158160bb2988f0,,,https://doi.org/10.1007/s10339-016-0765-6,
+f856532a729bd337fae1eb7dbe55129ae7788f45,,,,http://doi.ieeecomputersociety.org/10.1109/ARTCom.2009.26
+f88ce52c5042f9f200405f58dbe94b4e82cf0d34,,,https://doi.org/10.1109/TNNLS.2015.2508025,
+f8fe1b57347cdcbea755722bf1ae85c4b26f3e5c,,,https://doi.org/10.1007/s00138-016-0790-6,
+f86c6942a7e187c41dd0714531efd2be828e18ad,,,https://doi.org/10.1109/VCIP.2016.7805514,
+f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464,http://pdfs.semanticscholar.org/f8a5/bc2bd26790d474a1f6cc246b2ba0bcde9464.pdf,,,
+f834c50e249c9796eb7f03da7459b71205dc0737,,,https://doi.org/10.1109/TIP.2011.2166974,
+cef841f27535c0865278ee9a4bc8ee113b4fb9f3,http://pdfs.semanticscholar.org/cef8/41f27535c0865278ee9a4bc8ee113b4fb9f3.pdf,,https://doi.org/10.1016/j.eswa.2012.07.074,http://www.ppgia.pucpr.br/~alekoe/Papers/ESWA2012-Koerich.pdf
+cead57f2f7f7b733f4524c4b5a7ba7f271749b5f,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.46
+ce6d60b69eb95477596535227958109e07c61e1e,http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf,,https://doi.org/10.1109/BTAS.2015.7358802,
+cefaad8241bceb24827a71bf7c2556e458e57faa,,,https://doi.org/10.1109/TIP.2013.2264676,
+ceb763d6657a07b47e48e8a2956bcfdf2cf10818,http://pdfs.semanticscholar.org/ceb7/63d6657a07b47e48e8a2956bcfdf2cf10818.pdf,,,http://airccse.org/journal/ijcsity/papers/2114ijcsity01.pdf
+ce3304119ba6391cb6bb25c4b3dff79164df9ac6,,,https://doi.org/10.1016/j.imavis.2016.03.004,
+cefd9936e91885ba7af9364d50470f6cb54315a4,http://pdfs.semanticscholar.org/cefd/9936e91885ba7af9364d50470f6cb54315a4.pdf,,,http://www.jneurosci.org/content/jneuro/30/49/16601.full.pdf
+ce85d953086294d989c09ae5c41af795d098d5b2,http://mmlab.ie.cuhk.edu.hk/archive/2007/NN07_feature.pdf,,https://doi.org/10.1109/TNN.2007.894042,
+ce5eac297174c17311ee28bda534faaa1d559bae,http://pdfs.semanticscholar.org/ce5e/ac297174c17311ee28bda534faaa1d559bae.pdf,,,http://www.bmva.org/thesis-archive/2016/2016-abdallahi.pdf
+ce5e50467e43e3178cbd86cfc3348e3f577c4489,https://www.computer.org/csdl/proceedings/avss/2013/9999/00/06636683.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2013.6636683
+ce691a37060944c136d2795e10ed7ba751cd8394,http://pdfs.semanticscholar.org/ce69/1a37060944c136d2795e10ed7ba751cd8394.pdf,,,https://arxiv.org/pdf/1803.09202v1.pdf
+ce8db0fe11e7c96d08de561506f9f8f399dabbb2,,,https://doi.org/10.1109/ICIP.2015.7351677,
+ce11b2d7905d2955c4282db5b68482edb846f29f,,,,http://doi.acm.org/10.1145/3126686.3126705
+ce3f3088d0c0bf236638014a299a28e492069753,http://pdfs.semanticscholar.org/ce3f/3088d0c0bf236638014a299a28e492069753.pdf,,,http://www.researchgate.net/profile/Ilan_Shimshoni/publication/265253067_Online_Action_Recognition_Using_Covariance_of_Shape_and_Motion/links/54ae415a0cf24aca1c6f812d.pdf
+ceeb67bf53ffab1395c36f1141b516f893bada27,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,,,http://arxiv.org/abs/1601.07950
+ce9a61bcba6decba72f91497085807bface02daf,http://www.jdl.ac.cn/user/sgshan/pub/FG04_Qing_LY.pdf,,,http://www.jdl.ac.cn/project/faceId/articles/FRJDL-LaiyunQing-FGR04.pdf
+cef6cffd7ad15e7fa5632269ef154d32eaf057af,http://pdfs.semanticscholar.org/cef6/cffd7ad15e7fa5632269ef154d32eaf057af.pdf,,,https://web.stanford.edu/class/ee368/Project_Autumn_1617/Reports/report_pao.pdf
+cebfafea92ed51b74a8d27c730efdacd65572c40,http://biometrics.cse.msu.edu/Publications/Face/LuJainColbry_Matching2.5DFaceScans_PAMI06.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.15
+ce56be1acffda599dec6cc2af2b35600488846c9,http://pdfs.semanticscholar.org/ce56/be1acffda599dec6cc2af2b35600488846c9.pdf,,,http://www.aaai.org/ocs/index.php/ICWSM/ICWSM15/paper/download/10532/10530
+ce30ddb5ceaddc0e7d308880a45c135287573d0e,,,https://doi.org/10.1109/ICSMC.2012.6378304,
+ce54e891e956d5b502a834ad131616786897dc91,http://pdfs.semanticscholar.org/ce54/e891e956d5b502a834ad131616786897dc91.pdf,,,https://www.ijsr.net/archive/v4i12/NOV152507.pdf
+ce6f459462ea9419ca5adcc549d1d10e616c0213,http://pdfs.semanticscholar.org/ce6f/459462ea9419ca5adcc549d1d10e616c0213.pdf,,,http://www.ijcsit.com/docs/Volume%205/vol5issue05/ijcsit20140505147.pdf
+ce933821661a0139a329e6c8243e335bfa1022b1,http://pdfs.semanticscholar.org/ce93/3821661a0139a329e6c8243e335bfa1022b1.pdf,,,https://static.googleusercontent.com/media/research.google.com/en//youtube8m/workshop2017/c14.pdf
+ce6a6d35f65e584214aaf24378ab85038decddbb,,,,
+e0b71d3c7d551684bd334af5b3671df7053a529d,http://mplab.ucsd.edu/~jake/locality.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.74
+e0e4910d575c4a8309f2069b38b99c972dbedc57,http://eprints.pascal-network.org/archive/00009548/01/PoseDetectRandomizedCascades.pdf,,https://doi.org/10.1007/s11263-012-0516-9,http://vision.ics.uci.edu/papers/RogezROT_IJCV_2012/RogezROT_IJCV_2012.pdf
+e0fe68c92fefa80992f4861b0c45a3fbec7cf1c9,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2015.7344671
+e0dedb6fc4d370f4399bf7d67e234dc44deb4333,http://pdfs.semanticscholar.org/e0de/db6fc4d370f4399bf7d67e234dc44deb4333.pdf,,,http://anthology.aclweb.org/attachments/P/P17/P17-1117.Notes.pdf
+e0638e0628021712ac76e3472663ccc17bd8838c,http://pdfs.semanticscholar.org/e063/8e0628021712ac76e3472663ccc17bd8838c.pdf,,,http://arpnjournals.com/jeas/research_papers/rp_2014/jeas_0214_1007.pdf
+e084b0e477ee07d78c32c3696ea22c94f5fdfbec,,,https://doi.org/10.1109/ICIP.2013.6738565,
+e0c081a007435e0c64e208e9918ca727e2c1c44e,http://pdfs.semanticscholar.org/e0c0/81a007435e0c64e208e9918ca727e2c1c44e.pdf,,,http://mozart.dis.ulpgc.es/Gias/Publications/coolbot-thesis.pdf
+e0cc2a9fe6b5086c55fdbf0021aca3dc1a77a1ca,,,,http://doi.ieeecomputersociety.org/10.1109/BLISS.2008.25
+e0d878cc095eaae220ad1f681b33d7d61eb5e425,http://pdfs.semanticscholar.org/e0d8/78cc095eaae220ad1f681b33d7d61eb5e425.pdf,,https://doi.org/10.3390/s18020627,
+e00d4e4ba25fff3583b180db078ef962bf7d6824,http://pdfs.semanticscholar.org/e00d/4e4ba25fff3583b180db078ef962bf7d6824.pdf,,,https://www.preprints.org/manuscript/201703.0152/v1/download
+e0ab926cd48a47a8c7b16e27583421141f71f6df,,,https://doi.org/10.1109/HPCSim.2016.7568383,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,http://pdfs.semanticscholar.org/e0bf/cf965b402f3f209f26ae20ee88bc4d0002ab.pdf,,,https://scholarspace.manoa.hawaii.edu/bitstream/10125/49890/1/paper0003.pdf
+e0423788eb91772de9d708a17799179cf3230d63,,,,http://doi.acm.org/10.1145/3093241.3093277
+e0ed0e2d189ff73701ec72e167d44df4eb6e864d,http://pdfs.semanticscholar.org/e0ed/0e2d189ff73701ec72e167d44df4eb6e864d.pdf,,,http://www.scielo.br/pdf/epsic/v18n1/20.pdf
+e03f69bad7e6537794a50a99da807c9df4ff5186,,,,http://doi.acm.org/10.1145/2708463.2709060
+e0765de5cabe7e287582532456d7f4815acd74c1,http://pdfs.semanticscholar.org/e076/5de5cabe7e287582532456d7f4815acd74c1.pdf,,https://doi.org/10.1016/j.cviu.2009.06.007,http://ir.lib.hiroshima-u.ac.jp/files/public/27686/20141016161839215539/CVIU_113_1210.pdf
+e013c650c7c6b480a1b692bedb663947cd9d260f,http://www.nlpr.ia.ac.cn/2013papers/gjkw/gk25.pdf,,https://doi.org/10.1109/TIP.2012.2219543,
+e0dc6f1b740479098c1d397a7bc0962991b5e294,http://pdfs.semanticscholar.org/e0dc/6f1b740479098c1d397a7bc0962991b5e294.pdf,,,http://www.jdl.ac.cn/doc/2004/Face%20Detection%20a%20Survey.pdf
+e0446d14d25a178702c10752b803966a54b539e4,,,,
+e0793fd343aa63b5f366c8ace61b9c5489c51a4d,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2016.46
+465faf9974a60da00950be977f3bc2fc3e56f5d2,,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2017.8273631
+468c8f09d2ad8b558b65d11ec5ad49208c4da2f2,http://www.public.asu.edu/~bli24/Papers/ICPR2016_MSR-CNN.pdf,,https://doi.org/10.1109/ICPR.2016.7900180,
+46a4551a6d53a3cd10474ef3945f546f45ef76ee,http://cvrr.ucsd.edu/publications/2014/TawariTrivedi_IV2014.pdf,,https://doi.org/10.1109/IVS.2014.6856607,
+4686bdcee01520ed6a769943f112b2471e436208,http://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0024-5?site=ipsjcva.springeropen.com,,https://doi.org/10.23919/MVA.2017.7986831,
+4688787d064e59023a304f7c9af950d192ddd33e,http://www.cse.msu.edu/~liuxm/publication/Roth_Liu_Ross_Metaxas_TIFS.pdf,,https://doi.org/10.1109/TIFS.2014.2374424,http://www.cse.msu.edu/~rothjos1/papers/2015TIFS_Roth_Liu_Ross_Metaxas.pdf
+466184b10fb7ce9857e6b5bd6b4e5003e09a0b16,http://pdfs.semanticscholar.org/a42f/433e500661589e567340fe7f7d761d1f14df.pdf,,,http://papers.nips.cc/paper/3433-extended-grassmann-kernels-for-subspace-based-learning
+46e86cdb674440f61b6658ef3e84fea95ea51fb4,http://pdfs.semanticscholar.org/c075/e79a832d36e5b4c76b0f07c3b9d5f3be43e0.pdf,,,http://www.waset.org/journals/waset/v28/v28-94.pdf
+46b2ecef197b465abc43e0e017543b1af61921ac,,,https://doi.org/10.1109/ICPR.2016.7899652,
+467747f86df4537d6deff03dee8e552f760d7c16,,,,
+464ef1b3dcbe84099c904b6f9e9281c5f6fd75eb,,,https://doi.org/10.1109/TIP.2014.2359765,
+46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4,http://ibug.doc.ic.ac.uk/media/uploads/documents/3d_local_features.pdf,,https://doi.org/10.1109/ICIP.2014.7025285,http://doc.utwente.nl/95232/1/Pantic_3D_facial_geometric_features.pdf
+46ae4d593d89b72e1a479a91806c39095cd96615,http://www.idiap.ch/~odobez/publications/GayKhouryMeignierOdobezDeleglise-FaceNaming-ICIP-2014.pdf,,https://doi.org/10.1109/ICIP.2014.7025063,http://publications.idiap.ch/downloads/papers/2015/Gay_ICIP_2014.pdf
+467b602a67cfd7c347fe7ce74c02b38c4bb1f332,http://pdfs.semanticscholar.org/467b/602a67cfd7c347fe7ce74c02b38c4bb1f332.pdf,,https://doi.org/10.1007/978-3-319-10605-2_44,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8690/86900679.pdf
+466f80b066215e85da63e6f30e276f1a9d7c843b,http://cbl.uh.edu/pub_files/07961802.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.81
+464de30d3310123644ab81a1f0adc51598586fd2,http://pdfs.semanticscholar.org/464d/e30d3310123644ab81a1f0adc51598586fd2.pdf,,https://doi.org/10.1016/j.imavis.2014.04.002,https://hal.archives-ouvertes.fr/file/index/docid/1009958/filename/14_IMAVIS_REID.pdf
+466a5add15bb5f91e0cfd29a55f5fb159a7980e5,http://pdfs.semanticscholar.org/466a/5add15bb5f91e0cfd29a55f5fb159a7980e5.pdf,,https://doi.org/10.1007/978-3-642-12900-1_12,https://bi.snu.ac.kr/Courses/DMIR/files/Video%20Repeat%20Recognition%20and%20Mining%20by%20Visual%20Features.pdf
+46f3b113838e4680caa5fc8bda6e9ae0d35a038c,http://pdfs.semanticscholar.org/46f3/b113838e4680caa5fc8bda6e9ae0d35a038c.pdf,,,http://www.mdpi.com/2072-6694/2/2/262/pdf/
+465d5bb11912005f0a4f0569c6524981df18a7de,http://pdfs.semanticscholar.org/465d/5bb11912005f0a4f0569c6524981df18a7de.pdf,,https://doi.org/10.1007/978-3-319-27674-8_36,http://user.ceng.metu.edu.tr/~ys/pubs/others/imotionmultishot.pdf
+46c87fded035c97f35bb991fdec45634d15f9df2,https://arxiv.org/pdf/1707.09145v1.pdf,,,https://ivi.fnwi.uva.nl/isis/publications/2017/MettesICCV2017/MettesICCV2017.pdf
+46f32991ebb6235509a6d297928947a8c483f29e,http://pdfs.semanticscholar.org/46f3/2991ebb6235509a6d297928947a8c483f29e.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2003.1211375
+46551095a2cc4976d6be0165c31c37b0c5638719,http://staff.estem-uc.edu.au/roland/wp-content/uploads/file/roland/publications/Journal/JMUI/joshi_goecke_alghowinem_dhall_wagner_epps_parker_breakspear_JMUI2013_MultimodalAssistiveTechnologiesForDepressionDiagnosisAndMonitoring.pdf,,https://doi.org/10.1007/s12193-013-0123-2,http://users.cecs.anu.edu.au/~adhall/Joshi_MultiModal_Depression_Analysis.pdf
+46538b0d841654a0934e4c75ccd659f6c5309b72,http://pdfs.semanticscholar.org/4653/8b0d841654a0934e4c75ccd659f6c5309b72.pdf,,,http://aircconline.com/sipij/V5N1/5114sipij04.pdf
+4672513d0dbc398719d66bba36183f6e2b78947b,,,https://doi.org/10.1016/j.ipm.2015.05.007,
+46a29a5026142c91e5655454aa2c2f122561db7f,http://vipl.ict.ac.cn/sites/default/files/papers/files/2011_FG_sxli_Margin%20Emphasized%20Metric%20Learning%20and%20Its%20Application%20to%20Gabor%20Feature%20Based%20Face%20Recognition.pdf,,https://doi.org/10.1109/FG.2011.5771461,
+469ee1b00f7bbfe17c698ccded6f48be398f2a44,http://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf,,,https://pdfs.semanticscholar.org/469e/e1b00f7bbfe17c698ccded6f48be398f2a44.pdf
+4613b3a9344622b2997039afe3d47df1fd4de72f,,,,
+46196735a201185db3a6d8f6e473baf05ba7b68f,http://pdfs.semanticscholar.org/4619/6735a201185db3a6d8f6e473baf05ba7b68f.pdf,,https://doi.org/10.1109/TCYB.2013.2262936,http://mipal.snu.ac.kr/images/5/59/PCA_Lp.pdf
+4682fee7dc045aea7177d7f3bfe344aabf153bd5,http://www.cs.utexas.edu/~cv-fall2012/slides/elad-paper.pdf,,,http://people.csail.mit.edu/yusuf/publications/2011/Aytar11/aytar11.pdf
+46c1af268d4b3c61a0a12be091ca008a3a60e4cd,,,https://doi.org/10.1007/s11042-016-3592-y,
+4657d87aebd652a5920ed255dca993353575f441,http://pdfs.semanticscholar.org/4657/d87aebd652a5920ed255dca993353575f441.pdf,,,http://www.cim.mcgill.ca/~levine/IlluminationReport.pdf
+4622b82a8aff4ac1e87b01d2708a333380b5913b,http://www.cbsr.ia.ac.cn/users/zlei/papers/ICB2015/Zhu-ICB-15.pdf,,https://doi.org/10.1109/ICB.2015.7139070,
+46ded0e6e0042e43b94cf179b902d7932fbbdae1,,,,
+46e866f58419ff4259c65e8256c1d4f14927b2c6,http://pdfs.semanticscholar.org/f03d/cfd956cf4404ec9f0c7fb451479d72a63e03.pdf,,,http://www2.warwick.ac.uk/fac/sci/dcs/people/chang-tsun_li/publications/ijdcf_2014.pdf
+46072f872eee3413f9d05482be6446f6b96b6c09,http://pdfs.semanticscholar.org/4607/2f872eee3413f9d05482be6446f6b96b6c09.pdf,,https://doi.org/10.1007/11744047_18,http://www.lv-nus.org/papers%5C2006%5C2006_C_9.pdf
+4698a599425c3a6bae1c698456029519f8f2befe,http://pdfs.semanticscholar.org/4698/a599425c3a6bae1c698456029519f8f2befe.pdf,,,https://arxiv.org/pdf/1803.07253v1.pdf
+2cf92ee60f719098acc3aae3981cedc47fa726b3,http://eksl.isi.edu/files/papers/sinjini_2007_1172280675.pdf,,,https://www.researchgate.net/profile/Marios_Savvides/publication/6506611_Statistical_performance_evaluation_of_biometric_authentication_systems_using_random_effects_models/links/0a85e53c8443fd25ae000000.pdf
+2c258eec8e4da9e65018f116b237f7e2e0b2ad17,http://openaccess.thecvf.com/content_cvpr_2017/papers/Qiu_Deep_Quantization_Encoding_CVPR_2017_paper.pdf,,,http://arxiv.org/abs/1611.09502
+2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58,http://www.openu.ac.il/home/hassner/projects/cnn_agegender/CNN_AgeGenderEstimation.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301352
+2ccedc961d4d9cd9a88297c0061d67f81773f8b8,,,,
+2c8743089d9c7df04883405a31b5fbe494f175b4,http://srl.informatik.uni-freiburg.de/publicationsdir/linderICRA15.pdf,,https://doi.org/10.1109/ICRA.2015.7139616,
+2c61a9e26557dd0fe824909adeadf22a6a0d86b0,http://pdfs.semanticscholar.org/f117/3a4c5e3501323b37c1ae9a6d7dd8a236eab8.pdf,,,http://arxiv.org/pdf/1504.07339v3.pdf
+2c34bf897bad780e124d5539099405c28f3279ac,http://pdfs.semanticscholar.org/2c34/bf897bad780e124d5539099405c28f3279ac.pdf,,,https://arxiv.org/pdf/1301.6847v2.pdf
+2c203050a6cca0a0bff80e574bda16a8c46fe9c2,http://pdfs.semanticscholar.org/608f/43ee003c7c2e7f170336fda7a00cccd06311.pdf,,https://doi.org/10.24963/ijcai.2017/315,http://www.ijcai.org/proceedings/2017/0315.pdf
+2cc4ae2e864321cdab13c90144d4810464b24275,http://pdfs.semanticscholar.org/f3d2/c66630176cbb1409ebacd2dac4b30d8e3145.pdf,,https://doi.org/10.1007/11559573_127,http://s.i-techonline.com/Book/Face-Recognition/ISBN978-3-902613-03-5-fr23.pdf
+2cb5db4df50921d276ad9e7186119a276324e465,http://cbcl.mit.edu/projects/cbcl/publications/ps/Leibo_Liao_Poggio_VISAPP_2014.pdf,,https://doi.org/10.5220/0004694201130121,http://cbcl.mit.edu/publications/ps/Subtasks_Presentation_VISAPP2014.pdf
+2c3430e0cbe6c8d7be3316a88a5c13a50e90021d,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Wang_Multi-feature_Spectral_Clustering_2014_CVPR_paper.pdf,,https://doi.org/10.1109/CVPR.2014.523,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2014/Multi-feature%20Spectral%20Clustering%20with%20Minimax%20Optimization.pdf
+2c2786ea6386f2d611fc9dbf209362699b104f83,http://pdfs.semanticscholar.org/2c27/86ea6386f2d611fc9dbf209362699b104f83.pdf,,,http://libdcms.nida.ac.th/thesis6/2013/b179796.pdf
+2c92839418a64728438c351a42f6dc5ad0c6e686,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Masi_Pose-Aware_Face_Recognition_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.523
+2c848cc514293414d916c0e5931baf1e8583eabc,http://pdfs.semanticscholar.org/2c84/8cc514293414d916c0e5931baf1e8583eabc.pdf,,,http://www.researchgate.net/profile/Andrews_Sobral/publication/264295959_An_automatic_facial_expression_recognition_system_evaluated_with_different_classifiers/links/543691580cf2dc341db35eea.pdf
+2c883977e4292806739041cf8409b2f6df171aee,http://pdfs.semanticscholar.org/c5fb/ef530eb28d4f787990e0b962a6a68e420e49.pdf,,https://doi.org/10.1007/978-3-642-41827-3_42,http://vbn.aau.dk/files/80006265/CIARP.pdf
+2cdd9e445e7259117b995516025fcfc02fa7eebb,http://hub.hku.hk/bitstream/10722/61208/1/Content.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICMLA.2008.9
+2cf9088e9faa81872b355a4ea0a9fae46d3c8a08,http://www.cvg.unibe.ch/tpapadhimitri/tech.pdf,,,http://www.cvg.unibe.ch/tpapadhimitri/0206.pdf
+2cdc40f20b70ca44d9fd8e7716080ee05ca7924a,http://pdfs.semanticscholar.org/2cdc/40f20b70ca44d9fd8e7716080ee05ca7924a.pdf,,,https://arxiv.org/pdf/1710.07557v1.pdf
+2cac70f9c8140a12b6a55cef834a3d7504200b62,http://www.eng.auburn.edu/~reevesj/Classes/ELEC6970-latex/posters/baposterex1.pdf,,,http://www.brian-amberg.de/uni/poster/brian_iccv07.pdf
+2cf3564d7421b661e84251d280d159d4b3ebb336,,,https://doi.org/10.1109/BTAS.2014.6996287,
+2c8f24f859bbbc4193d4d83645ef467bcf25adc2,http://romisatriawahono.net/lecture/rm/survey/machine%20learning/Frenay%20-%20Classification%20in%20the%20Presence%20of%20Label%20Noise%20-%202014.pdf,,https://doi.org/10.1109/TNNLS.2013.2292894,
+2ca43325a5dbde91af90bf850b83b0984587b3cc,http://pdfs.semanticscholar.org/2ca4/3325a5dbde91af90bf850b83b0984587b3cc.pdf,,,http://worldcomp-proceedings.com/proc/p2013/EEE2665.pdf
+2c6ab32a03c4862ee3e2bc02e7e74745cd523ad2,,,https://doi.org/10.1109/IC3.2013.6612218,
+2ca10da4b59b406533ad1dc7740156e01782658f,,,https://doi.org/10.1109/SIU.2016.7496207,
+2cfc28a96b57e0817cc9624a5d553b3aafba56f3,https://web.njit.edu/~borcea/papers/ieee-sarnoff16.pdf,,https://doi.org/10.1109/SARNOF.2016.7846758,https://web.njit.edu/~crix/publications/sarnoff16.pdf
+2cdd5b50a67e4615cb0892beaac12664ec53b81f,http://people.eecs.berkeley.edu/~junyanz/projects/mirrormirror/mirrormirror_small.pdf,,,http://www.eecs.berkeley.edu/~junyanz/projects/mirrormirror/mirrormirror_small.pdf
+2cae619d0209c338dc94593892a787ee712d9db0,http://vis-www.cs.umass.edu/papers/cvpr08shrf.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/091.pdf
+2c0acaec54ab2585ff807e18b6b9550c44651eab,http://pdfs.semanticscholar.org/2c0a/caec54ab2585ff807e18b6b9550c44651eab.pdf,,,http://www.graphicon.ru/html/2014/papers/111-114.pdf
+2c811b647a6aac924920c06e607e9e8d4b8d872d,http://pdfs.semanticscholar.org/2c81/1b647a6aac924920c06e607e9e8d4b8d872d.pdf,,https://doi.org/10.1016/j.patcog.2006.03.017,http://www.inf.unideb.hu/~sajolevente/papers/lipContour/2009%20Recognizing%20facial%20action%20units%20using%20independent%20component%20analysis%20and%20support%20vector%20machine.pdf
+2cdde47c27a8ecd391cbb6b2dea64b73282c7491,http://pdfs.semanticscholar.org/2cdd/e47c27a8ecd391cbb6b2dea64b73282c7491.pdf,,,http://arxiv.org/pdf/1602.00224v1.pdf
+2c7c3a74da960cc76c00965bd3e343958464da45,http://pdfs.semanticscholar.org/2c7c/3a74da960cc76c00965bd3e343958464da45.pdf,,,http://search.ieice.org/bin/summary.php?id=e94-d_5_1099
+2cf5f2091f9c2d9ab97086756c47cd11522a6ef3,http://pdfs.semanticscholar.org/2cf5/f2091f9c2d9ab97086756c47cd11522a6ef3.pdf,,,http://arxiv.org/abs/1711.09017
+2cd426f10178bd95fef3dede69ae7b67e73bb70c,,,https://doi.org/10.1109/ROBIO.2016.7866457,
+2c285dadfa6c07d392ee411d0213648a8a1cf68f,http://www.contrib.andrew.cmu.edu/~yzhiding/ICMI15.pdf,,,http://doi.acm.org/10.1145/2818346.2830595
+2c6e65d8ef8c17387b839ab6a82fb469117ae396,,,,
+2c2f03edc9b76e5ac132b54b2e3313237e22b5e7,,,,
+2c17d36bab56083293456fe14ceff5497cc97d75,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.371
+2c06781ba75d51f5246d65d1acf66ab182e9bde6,,,https://doi.org/10.1016/j.imavis.2016.11.002,
+2c4b96f6c1a520e75eb37c6ee8b844332bc0435c,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w12/papers/Leo_Automatic_Emotion_Recognition_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.76
+2cd7821fcf5fae53a185624f7eeda007434ae037,http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835989
+2ce84465b9759166effc7302c2f5339766cc523d,,,https://doi.org/10.1109/VCIP.2015.7457830,
+79581c364cefe53bff6bdd224acd4f4bbc43d6d4,http://pdfs.semanticscholar.org/7958/1c364cefe53bff6bdd224acd4f4bbc43d6d4.pdf,,https://doi.org/10.1016/j.imavis.2016.10.004,https://arxiv.org/pdf/1507.06838v1.pdf
+794ddb1f3b7598985d4d289b5b0664be736a50c4,http://pdfs.semanticscholar.org/794d/db1f3b7598985d4d289b5b0664be736a50c4.pdf,,,http://www.dabi.temple.edu/~hbling/publication/aaai_compboost-2.pdf
+790aa543151312aef3f7102d64ea699a1d15cb29,http://arxiv.org/pdf/1607.06290v1.pdf,,https://doi.org/10.1007/s11263-017-1010-1,https://arxiv.org/pdf/1607.06290v1.pdf
+795aa8064b34c4bf4acdd8be3f1e5d06da5a7756,http://pdfs.semanticscholar.org/795a/a8064b34c4bf4acdd8be3f1e5d06da5a7756.pdf,,,https://arxiv.org/pdf/1803.05258v1.pdf
+79617903c5cb56697f2e738e1463b9654e2d68ed,http://hal.cse.msu.edu/pdfs/papers/2013-mmcf-tip.pdf,,https://doi.org/10.1109/TIP.2012.2220151,http://vishnu.boddeti.net/papers/mmcf-tip-2013.pdf
+7957abae15f631c5f5c50de68aa2ad08fe1f366f,,,,
+795ea140df2c3d29753f40ccc4952ef24f46576c,http://pdfs.semanticscholar.org/795e/a140df2c3d29753f40ccc4952ef24f46576c.pdf,,,https://arxiv.org/pdf/1711.00111v1.pdf
+795b555abb26e62ad89a93645122da530327c447,,,,
+79b669abf65c2ca323098cf3f19fa7bdd837ff31,http://dro.deakin.edu.au/eserv/DU:30044585/venkatesh-efficienttensor-2008.pdf,,https://doi.org/10.1109/ICPR.2008.4761706,http://figment.cse.usf.edu/~sfefilat/data/papers/WeAT2.2.pdf
+798e58c181f3ba3aecbe41acd1881860c5e2df3a,,,https://doi.org/10.1109/TNNLS.2012.2237038,
+794c0dc199f0bf778e2d40ce8e1969d4069ffa7b,http://hcil2.cs.umd.edu/trs/2011-17/2011-17.pdf,,https://doi.org/10.1109/PASSAT/SocialCom.2011.225,http://hcil.cs.umd.edu/trs/2011-17/2011-17.pdf
+7975f12187a7686d861054649845ccc634c3b00f,,,,
+79dd787b2877cf9ce08762d702589543bda373be,http://fipa.cs.kit.edu/befit/workshop2011/pdf/slides/jianguo_li-slides.pdf,,https://doi.org/10.1109/ICCVW.2011.6130518,http://face.cs.kit.edu/befit/workshop2011/pdf/slides/jianguo_li-slides.pdf
+7935f644c8044c0d3b81e2842e5ecc3672698bbb,,,https://doi.org/10.1109/ICIP.2011.6116258,
+7966146d72f9953330556baa04be746d18702047,http://pdfs.semanticscholar.org/7966/146d72f9953330556baa04be746d18702047.pdf,,,http://www.ri.cmu.edu/pub_files/2013/2/Mason2013.pdf
+79fa57dedafddd3f3720ca26eb41c82086bfb332,http://www.cis.pku.edu.cn/vision/Visual&Robot/publication/doc/IROS05_wu.pdf,,https://doi.org/10.1109/IROS.2005.1545532,
+79cdc8c786c535366cafeced1f3bdeb18ff04e66,http://www.researchgate.net/profile/Ziga_Spiclin/publication/221795259_Groupwise_registration_of_multimodal_images_by_an_efficient_joint_entropy_minimization_scheme/links/0deec520dd49e7bc24000000.pdf,,https://doi.org/10.1109/TIP.2012.2186145,https://www.researchgate.net/profile/Ziga_Spiclin/publication/221795259_Groupwise_registration_of_multimodal_images_by_an_efficient_joint_entropy_minimization_scheme/links/0deec520dd49e7bc24000000.pdf
+79fd4baca5f840d6534a053b22e0029948b9075e,,,https://doi.org/10.1109/ISDA.2012.6416647,
+793e7f1ba18848908da30cbad14323b0389fd2a8,http://pdfs.semanticscholar.org/793e/7f1ba18848908da30cbad14323b0389fd2a8.pdf,,,http://openaccess.thecvf.com/content_ICCV_2017/supplemental/Jin_End-To-End_Face_Detection_ICCV_2017_supplemental.pdf
+2d5d3905adfea7a6a8371dc2c5edc669cadacf70,,,,
+2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359,http://pdfs.semanticscholar.org/2d99/0b04c2bd61d3b7b922b8eed33aeeeb7b9359.pdf,,https://doi.org/10.1007/978-3-642-37331-2_25,http://www.umiacs.umd.edu/~zhuolin/Publications/DDLPC_ACCV2012_Slide.pdf
+2d25045ec63f9132371841c0beccd801d3733908,http://pdfs.semanticscholar.org/2d25/045ec63f9132371841c0beccd801d3733908.pdf,,https://doi.org/10.3390/s150306719,http://www.mdpi.com/1424-8220/15/3/6719/pdf
+2dd6c988b279d89ab5fb5155baba65ce4ce53c1e,http://pdfs.semanticscholar.org/2dd6/c988b279d89ab5fb5155baba65ce4ce53c1e.pdf,,https://doi.org/10.1016/j.patcog.2011.09.023,http://www2.ece.ohio-state.edu/~aleix/PR12.pdf
+2db05ef11041447dbc735362db68b04e562c1e35,http://www.cs.berkeley.edu/~daf/eccv-sft.pdf,,https://doi.org/10.1007/3-540-47977-5_15,http://vision.cse.psu.edu/research/3Dreconstruction/relatedWork/papers/forsythIJCV.pdf
+2d94dfa9c8f6708e071ef38d58f9f9bcb374cd84,,,https://doi.org/10.1109/CVPRW.2011.5981817,
+2d080662a1653f523321974a57518e7cb67ecb41,http://pdfs.semanticscholar.org/2d08/0662a1653f523321974a57518e7cb67ecb41.pdf,,https://doi.org/10.1007/978-3-319-47665-0_35,http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110358.pdf
+2d4b9fe3854ccce24040074c461d0c516c46baf4,https://arxiv.org/pdf/1704.04671v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.342
+2d294c58b2afb529b26c49d3c92293431f5f98d0,https://ibug.doc.ic.ac.uk/media/uploads/documents/mmpp_journal.pdf,,https://doi.org/10.1109/TIP.2014.2348868,http://ibug.doc.ic.ac.uk/media/uploads/documents/mmpp_journal.pdf
+2d1f86e2c7ba81392c8914edbc079ac64d29b666,https://arxiv.org/pdf/1702.04471v1.pdf,,https://doi.org/10.1109/WACV.2017.71,http://arxiv.org/abs/1702.04471
+2d87f4bf0606ce9939033b8f1fbc64b539eb18a6,,,,
+2d164f88a579ba53e06b601d39959aaaae9016b7,http://pdfs.semanticscholar.org/a666/2bf767df8f8a5bcb655142ac0fb7c4f524f1.pdf,,https://doi.org/10.5244/C.20.31,http://www.comp.leeds.ac.uk/bmvc2008/proceedings/2006/papers/099.pdf
+2d23fa205acca9c21e3e1a04674f1e5a9528550e,http://pdfs.semanticscholar.org/2d23/fa205acca9c21e3e1a04674f1e5a9528550e.pdf,,https://doi.org/10.1007/978-3-642-21257-4_7,https://www-i6.informatik.rwth-aachen.de/publications/download/703/Pishchulin-IbPRIA-2011.pdf
+2d244d70ed1a2ba03d152189f1f90ff2b4f16a79,http://pdfs.semanticscholar.org/2d24/4d70ed1a2ba03d152189f1f90ff2b4f16a79.pdf,,https://doi.org/10.5244/C.17.1,http://www.bmva.org/bmvc/2003/papers/46/BMVC_0418.pdf
+2d88e7922d9f046ace0234f9f96f570ee848a5b5,http://pdfs.semanticscholar.org/2d88/e7922d9f046ace0234f9f96f570ee848a5b5.pdf,,,http://arxiv.org/pdf/1603.09638v2.pdf
+2d31ab536b3c8a05de0d24e0257ca4433d5a7c75,http://tamaraberg.com/papers/xray.pdf,,,http://www.tamaraberg.com/papers/xray.pdf
+2dbde64ca75e7986a0fa6181b6940263bcd70684,http://www.micc.unifi.it/wp-content/uploads/2016/01/2014_pose_independent.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2014.766
+2d146cc0908c931d87f6e6e5d08b117c30a69b8d,http://www.cs.cityu.edu.hk/~yihong/download/TSMC.pdf,,https://doi.org/10.1109/TSMCB.2008.2006641,
+2d0363a3ebda56d91d704d5ff5458a527775b609,http://pdfs.semanticscholar.org/2e07/a4c0f87ac078fcccf057d109f9387f4703a9.pdf,,https://doi.org/10.1007/978-3-319-46493-0_47,http://arxiv.org/abs/1512.00570
+2debdb6a772312788251cc3bd1cb7cc8a6072214,,,https://doi.org/10.1142/S0218001415560157,
+2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8,http://pdfs.semanticscholar.org/2d93/a9aa8bed51d0d1b940c73ac32c046ebf1eb8.pdf,,,http://arxiv.org/abs/1608.03824
+2dd2c7602d7f4a0b78494ac23ee1e28ff489be88,https://www.tugraz.at/fileadmin/user_upload/Institute/ICG/Documents/lrs/pubs/koestinger_cvpr_2012.pdf,,,http://lrs.icg.tugraz.at/research/kissme/paper/lrs_icg_koestinger_cvpr_2012.pdf
+2d411826cd7865638b65e1b5f92043c245f009f9,,,,http://doi.acm.org/10.1145/2733373.2806239
+2d79dece7890121469f515a6e773ba0251fc2d98,,,https://doi.org/10.1109/ICIP.2017.8296756,
+2d84e30c61281d3d7cdd11676683d6e66a68aea6,http://pdfs.semanticscholar.org/2d84/e30c61281d3d7cdd11676683d6e66a68aea6.pdf,,https://doi.org/10.1007/978-3-319-29451-3_14,http://img.cs.uec.ac.jp/e/pub/conf15/151125dohang_0.pdf
+2d98a1cb0d1a37c79a7ebcb727066f9ccc781703,https://arxiv.org/pdf/1706.07525v1.pdf,,,http://doi.acm.org/10.1145/2733373.2806334
+2dced31a14401d465cd115902bf8f508d79de076,http://pdfs.semanticscholar.org/2dce/d31a14401d465cd115902bf8f508d79de076.pdf,,,http://journal-cdn.frontiersin.org/article/127212/files/pubmed-zip/versions/3/pdf
+2d05e768c64628c034db858b7154c6cbd580b2d5,http://pdfs.semanticscholar.org/2d05/e768c64628c034db858b7154c6cbd580b2d5.pdf,,,http://ijcsmc.com/docs/papers/August2015/V4I8201567.pdf
+2d072cd43de8d17ce3198fae4469c498f97c6277,http://www.patrikhuber.ch/files/RCRC_SPL_2015.pdf,,https://doi.org/10.1109/LSP.2014.2347011,http://www.ee.surrey.ac.uk/CVSSP/Publications/papers/Feng-IEEE-SPL-2015.pdf
+2d35a07c4fa03d78d5b622ab703ea44850de8d39,http://www.cs.sunysb.edu/~vislab/papers/Zhang2005cgi.pdf,,,http://www3.cs.stonybrook.edu/~cvl/content/papers/2005/Zhang2005cgi.pdf
+2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3,http://pdfs.semanticscholar.org/ca31/53a726d8c212a7fd92f696c7e00a3ae3b31f.pdf,,,http://s.i-techonline.com/Book/Face-Recognition/ISBN978-3-902613-03-5-fr20.pdf
+2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3,http://pdfs.semanticscholar.org/77c1/56969e3b7fbc86432c5238a95679d25ac579.pdf,,,http://research.ijcaonline.org/ncacit2016/number7/ncacit3093.pdf
+2d38fd1df95f5025e2cee5bc439ba92b369a93df,http://pdfs.semanticscholar.org/2d38/fd1df95f5025e2cee5bc439ba92b369a93df.pdf,,,http://www.cs.dartmouth.edu/reports/TR2011-700.pdf
+2d83ba2d43306e3c0587ef16f327d59bf4888dc3,http://www.cs.colby.edu/courses/S16/cs365/papers/karpath-deepVideo-CVPR14.pdf,,,http://cs.stanford.edu/people/karpathy/deepvideo/deepvideo_cvpr2014.pdf
+2df4d0c06f4f68060cecbbb8e2088d9c6b20d04f,,,https://doi.org/10.1109/ICIP.2014.7026056,
+2d84c0d96332bb4fbd8acced98e726aabbf15591,http://pdfs.semanticscholar.org/2d84/c0d96332bb4fbd8acced98e726aabbf15591.pdf,,,http://www.ee.ucr.edu/~amitrc/THESIS/thesis-ramya.pdf
+2d79d338c114ece1d97cde1aa06ab4cf17d38254,http://crcv.ucf.edu/papers/cvpr2016/Borji_CVPR2016.pdf,,,http://crcv-web.eecs.ucf.edu/papers/cvpr2016/Borji_CVPR2016.pdf
+2df4d05119fe3fbf1f8112b3ad901c33728b498a,http://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf,,,https://pdfs.semanticscholar.org/9ca7/899338129f4ba6744f801e722d53a44e4622.pdf
+2d3482dcff69c7417c7b933f22de606a0e8e42d4,http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf,,,http://people.cs.umass.edu/~elm/papers/lfw_update.pdf
+2d925cddb4a42d235b637e4888e24ba876b09e4a,,,,
+2d2fb01f761d21a459cfb34935bc47ab45a9913b,,,,http://doi.ieeecomputersociety.org/10.1109/TAFFC.2014.2346515
+2d748f8ee023a5b1fbd50294d176981ded4ad4ee,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf,,,http://arxiv.org/pdf/1602.03418v1.pdf
+2d3c17ced03e4b6c4b014490fe3d40c62d02e914,http://pdfs.semanticscholar.org/2d3c/17ced03e4b6c4b014490fe3d40c62d02e914.pdf,,https://doi.org/10.1002/cav.1455,http://www.cs.siue.edu/~wwhite/CS582/ResearchPapers/Eccher_FacialAnimation/VideoDrivenStateAware_CAVW0512.pdf
+4188bd3ef976ea0dec24a2512b44d7673fd4ad26,http://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tip2010.pdf,,https://doi.org/10.1109/TIP.2009.2038816,https://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tip2010.pdf
+416b559402d0f3e2b785074fcee989d44d82b8e5,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Cai_Multi-View_Super_Vector_2014_CVPR_paper.pdf,,,http://xjpeng.weebly.com/uploads/5/5/4/4/55444193/caiwpq_cvpr14.pdf
+416364cfdbc131d6544582e552daf25f585c557d,http://www.dcs.qmw.ac.uk/~sgg/papers/Zalewski_Gong_FG04.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AFGR.2004.1301581
+41b38da2f4137c957537908f9cb70cbd2fac8bc1,https://arxiv.org/pdf/1701.01879v1.pdf,,https://doi.org/10.1109/ICASSP.2017.7952406,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001497.pdf
+41cfc9edbf36754746991c2a1e9a47c0d129d105,https://www.cs.princeton.edu/~ohad/papers/FriedShechtmanGoldmanFinkelstein_SIGGRAPH2016.pdf,,,http://gfx.cs.princeton.edu/pubs/Fried_2016_PMO/fried2016-portraits.pdf
+41000c3a3344676513ef4bfcd392d14c7a9a7599,http://pdfs.semanticscholar.org/d3ba/9ed56e9ddb73f0e0f2bea3fd3920db30f42e.pdf,,,https://arxiv.org/pdf/1401.0092v1.pdf
+411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8,http://pdfs.semanticscholar.org/411e/e9236095f8f5ca3b9ef18fd3381c1c68c4b8.pdf,,,http://www.scielo.br/pdf/babt/v59nspe2/1516-8913-babt-59-16161057.pdf
+41e5d92b13d36da61287c7ffd77ee71de9eb2942,,,https://doi.org/10.1016/j.asoc.2016.12.033,
+4159663f0b292fd8cc7411929be9d669bb98b386,http://www.researchgate.net/profile/Pradeep_Khosla/publication/224752362_Cancelable_biometric_filters_for_face_recognition/links/00b4952ade904b0db4000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2004.1334679
+410bc0b3bd82c85c98df71ec0cfe995f14621077,,,,
+41781474d834c079e8fafea154d7916b77991b15,,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2017.60
+4140498e96a5ff3ba816d13daf148fffb9a2be3f,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Li_Constrained.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.88
+41f8477a6be9cd992a674d84062108c68b7a9520,http://pdfs.semanticscholar.org/41f8/477a6be9cd992a674d84062108c68b7a9520.pdf,,,http://ivpl.eecs.northwestern.edu/sites/default/files/allerton_2007.pdf
+41c8e222ebb26e72050f5d26c82f25d7618b700f,,,,
+417c2fa930bb7078fdf10cb85c503bd5270b9dc2,,,https://doi.org/10.1109/ICSIPA.2015.7412169,
+411503a304a661b0c04c2b446a6e43e4a70942dc,http://www0.cs.ucl.ac.uk/staff/s.prince/Papers/CRV2010FaceClustFinal.pdf,,,http://www.researchgate.net/profile/Simon_Prince/publication/221469462_Bayesian_Identity_Clustering/links/09e41510ae5d48f567000000.pdf
+41aa8c1c90d74f2653ef4b3a2e02ac473af61e47,http://pdfs.semanticscholar.org/41aa/8c1c90d74f2653ef4b3a2e02ac473af61e47.pdf,,,https://arxiv.org/pdf/1410.5861v1.pdf
+41ab4939db641fa4d327071ae9bb0df4a612dc89,http://pdfs.semanticscholar.org/41ab/4939db641fa4d327071ae9bb0df4a612dc89.pdf,,https://doi.org/10.1007/978-3-642-01811-4_33,http://www.researchgate.net/profile/Leopoldo_Altamirano_Robles/publication/221055031_Interpreting_Face_Images_by_Fitting_a_Fast_Illumination-Based_3D_Active_Appearance_Model/links/54b002da0cf28ebe92de3cdd.pdf
+41971dfbf404abeb8cf73fea29dc37b9aae12439,http://pdfs.semanticscholar.org/4197/1dfbf404abeb8cf73fea29dc37b9aae12439.pdf,,,http://sitis.u-bourgogne.fr/06/Proceedings/SIT/f23.pdf
+4157e45f616233a0874f54a59c3df001b9646cd7,http://pdfs.semanticscholar.org/4157/e45f616233a0874f54a59c3df001b9646cd7.pdf,,,https://elifesciences.org/content/3/e02020-download.pdf
+41a6196f88beced105d8bc48dd54d5494cc156fb,http://toc.proceedings.com/25848webtoc.pdf,,,
+41de109bca9343691f1d5720df864cdbeeecd9d0,http://pdfs.semanticscholar.org/41de/109bca9343691f1d5720df864cdbeeecd9d0.pdf,,https://doi.org/10.3390/s18020416,
+41d9a240b711ff76c5448d4bf4df840cc5dad5fc,https://arxiv.org/pdf/1206.2627v2.pdf,,https://doi.org/10.1109/TMM.2014.2306175,
+419a6fca4c8d73a1e43003edc3f6b610174c41d2,http://www.robots.newcastle.edu.au/~chalup/chalup_publications/p058_preprint.pdf,,https://doi.org/10.1109/IJCNN.2010.5596836,http://www.robots.newcastle.edu.au/~chalup/chalup_publications/p058.pdf
+41c97af4801ac302f09902aeec2af17b481563ab,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2016/Collaborative%20Multi-View%20Metric%20Learning%20for%20Visual%20Classification.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552996
+41f195f421b548357088c2985077d6b14003ce7e,,,,
+4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c,http://www.ifp.illinois.edu/~dagli/papers/ICME07.pdf,,https://doi.org/10.1109/ICME.2007.4284961,
+4180978dbcd09162d166f7449136cb0b320adf1f,http://pdfs.semanticscholar.org/4180/978dbcd09162d166f7449136cb0b320adf1f.pdf,,,http://www.cvc.uab.es/~petia/2011/Miguel%20Real-time%20head%20pose%20classificatio%20cvcrd2010reyes.pdf
+414fdfe5f2e4f32a59bf15062b6e524cbf970637,,,https://doi.org/10.1109/TIFS.2014.2361028,
+41b997f6cec7a6a773cd09f174cb6d2f036b36cd,http://pdfs.semanticscholar.org/41b9/97f6cec7a6a773cd09f174cb6d2f036b36cd.pdf,,https://doi.org/10.1016/j.cviu.2010.12.001,https://pdfs.semanticscholar.org/41b9/97f6cec7a6a773cd09f174cb6d2f036b36cd.pdf
+41aa209e9d294d370357434f310d49b2b0baebeb,https://arxiv.org/pdf/1605.05440v1.pdf,,https://doi.org/10.1109/ICIP.2016.7532983,http://arxiv.org/pdf/1605.05440v1.pdf
+4118b4fc7d61068b9b448fd499876d139baeec81,http://www.cs.utexas.edu/~ssi/TKDE2010.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2009.126
+413a184b584dc2b669fbe731ace1e48b22945443,http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_00911.pdf,,,http://groups.inf.ed.ac.uk/calvin/Publications/eichner-techreport11.pdf
+83b7578e2d9fa60d33d9336be334f6f2cc4f218f,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_101_ext.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298815
+839a2155995acc0a053a326e283be12068b35cb8,http://pdfs.semanticscholar.org/839a/2155995acc0a053a326e283be12068b35cb8.pdf,,,http://arxiv.org/pdf/1511.05045v2.pdf
+83b54b8c97dc14e302dad191327407ec0d5fb4a6,,,https://doi.org/10.1109/ICIP.2017.8296913,
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,http://pdfs.semanticscholar.org/83fd/2d2d5ad6e4e153672c9b6d1a3785f754b60e.pdf,,,http://langcog.stanford.edu/papers_new/hall-2015-ajmg.pdf
+83c1fee5ef4b7ba9d9730f3b550dd7bfbdaf591d,,,,
+83ca4cca9b28ae58f461b5a192e08dffdc1c76f3,http://infoscience.epfl.ch/record/200407/files/icip1024-cam-ready.pdf,,https://doi.org/10.1109/ICIP.2014.7026203,https://infoscience.epfl.ch/record/200407/files/icip1024-cam-ready.pdf
+831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9,http://pdfs.semanticscholar.org/831f/bef657cc5e1bbf298ce6aad6b62f00a5b5d9.pdf,,,https://arxiv.org/pdf/1712.05526v1.pdf
+83011670e083dd52484578f8b6b3b4ccde3237ec,,,,
+832e1d128059dd5ed5fa5a0b0f021a025903f9d5,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dapogny_Pairwise_Conditional_Random_ICCV_2015_paper.pdf,,,http://www.isir.upmc.fr/files/2015ACTI3549.pdf
+83e093a07efcf795db5e3aa3576531d61557dd0d,http://pdfs.semanticscholar.org/83e0/93a07efcf795db5e3aa3576531d61557dd0d.pdf,,https://doi.org/10.1007/978-3-319-27863-6_34,https://www.tnt.uni-hannover.de/papers/data/1124/paper.pdf
+831d661d657d97a07894da8639a048c430c5536d,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w4/papers/Zhu_Weakly_Supervised_Facial_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.19
+8383faea09b4b4bef8117a1da897495ebd68691b,,,https://doi.org/10.1109/TCYB.2015.2493538,
+83b4899d2899dd6a8d956eda3c4b89f27f1cd308,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_ICIP_2007/pdfs/0100377.pdf,,https://doi.org/10.1109/ICIP.2007.4378970,
+83f3491249f1ec8b546267f53449686754f2f7fd,,,,
+830e5b1043227fe189b3f93619ef4c58868758a7,http://pdfs.semanticscholar.org/830e/5b1043227fe189b3f93619ef4c58868758a7.pdf,,https://doi.org/10.1016/j.cviu.2015.03.015,http://ibug.doc.ic.ac.uk/media/uploads/documents/face_detection_survey.pdf
+8323af714efe9a3cadb31b309fcc2c36c8acba8f,http://pdfs.semanticscholar.org/8323/af714efe9a3cadb31b309fcc2c36c8acba8f.pdf,,,http://mplab.ucsd.edu/~jake/thesis.pdf
+831226405bb255527e9127b84e8eaedd7eb8e9f9,http://pdfs.semanticscholar.org/8312/26405bb255527e9127b84e8eaedd7eb8e9f9.pdf,,,
+83fd5c23204147844a0528c21e645b757edd7af9,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W05/papers/Bulan_USDOT_Number_Localization_2015_CVPR_paper.pdf,,https://doi.org/10.1109/CVPRW.2015.7301301,
+838dad9d1d68d29be280d92e69410eaac40084bc,,,https://doi.org/10.1109/HPCSim.2014.6903749,
+8384e104796488fa2667c355dd15b65d6d5ff957,http://pdfs.semanticscholar.org/feea/803c1eaedc825509e24a8c1279ffe0251d9d.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2010_0097.pdf
+8323529cf37f955fb3fc6674af6e708374006a28,http://researcher.ibm.com/researcher/files/us-smiyaza/FPIV04.pdf,,https://doi.org/10.1109/CVPR.2004.334,http://www-ee.ccny.cuny.edu/www/web/yltian/Publications/FPIV04.pdf
+8395cf3535a6628c3bdc9b8d0171568d551f5ff0,http://pdfs.semanticscholar.org/8395/cf3535a6628c3bdc9b8d0171568d551f5ff0.pdf,,,http://arxiv.org/abs/1702.04389
+83ac942d71ba908c8d76fc68de6173151f012b38,http://pdfs.semanticscholar.org/83ac/942d71ba908c8d76fc68de6173151f012b38.pdf,,https://doi.org/10.1016/j.patcog.2012.05.017,https://www.cbica.upenn.edu/sbia/Birkan.Tunc/icerik/belgeler/cdfa.pdf
+834f5ab0cb374b13a6e19198d550e7a32901a4b2,http://pdfs.semanticscholar.org/834f/5ab0cb374b13a6e19198d550e7a32901a4b2.pdf,,,https://arxiv.org/pdf/1712.00971v1.pdf
+831a64f59944fa05f023288f284325429026e4e8,,,,
+8320dbdd3e4712cca813451cd94a909527652d63,http://pdfs.semanticscholar.org/d921/1df11080fa5eb0dc1d62fb683b10c055673a.pdf,,,http://www.cs.armstrong.edu/burge/pdf/burge-burger-us.pdf
+83d50257eb4c0aa8d16d27bf2ee8d0614fd63bf6,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284834
+834b15762f97b4da11a2d851840123dbeee51d33,http://pdfs.semanticscholar.org/834b/15762f97b4da11a2d851840123dbeee51d33.pdf,,,http://sibgrapi.sid.inpe.br/col/sid.inpe.br/sibgrapi/2016/09.13.14.06/doc/Landmark_free_smile_intensity_estimation.pdf
+83bce0907937f09f5ccde26c361d52fe55fc8979,,,,http://doi.acm.org/10.1145/2993148.2993185
+833fa04463d90aab4a9fe2870d480f0b40df446e,http://static.cs.brown.edu/~gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf,,,http://static.cs.brown.edu/people/gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf
+833f6ab858f26b848f0d747de502127406f06417,http://mediatum.ub.tum.de/doc/980054/157447.pdf,,https://doi.org/10.1109/ICIP.2009.5413952,http://www.mmk.ei.tum.de/publ/pdf/09/09sto1.pdf
+8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff,http://pdfs.semanticscholar.org/8309/e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff.pdf,,https://doi.org/10.1007/978-3-642-01793-3_38,http://cvhci.ira.uka.de/download/publications/ICB_2009_241.pdf
+1b8541ec28564db66a08185510c8b300fa4dc793,,,https://doi.org/10.1109/LSP.2015.2499778,
+1b211f8221162ce7ef212956b637b50e30ad48f4,,,https://doi.org/10.1109/ICIP.2016.7532925,
+1b635f494eff2e5501607ebe55eda7bdfa8263b8,http://pdfs.semanticscholar.org/1b63/5f494eff2e5501607ebe55eda7bdfa8263b8.pdf,,,http://crcv.ucf.edu/THUMOS14/papers/USC.pdf
+1b6394178dbc31d0867f0b44686d224a19d61cf4,http://pdfs.semanticscholar.org/ca8e/5419fd570f19643425b24da801283b706fc1.pdf,,https://doi.org/10.1007/978-3-319-16817-3_4,https://hal.archives-ouvertes.fr/hal-01070657/document
+1bd50926079e68a6e32dc4412e9d5abe331daefb,https://pdfs.semanticscholar.org/544d/6cd24db5adad8453033e0cc1aa7d3d6224ab.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126286
+1b150248d856f95da8316da868532a4286b9d58e,http://pdfs.semanticscholar.org/6724/41000751d58396790f4c993419d70f6af3f4.pdf,,,http://courses.cs.washington.edu/courses/cse590v/13au/car.pdf
+1be498d4bbc30c3bfd0029114c784bc2114d67c0,http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf,,https://doi.org/10.1109/TIFS.2014.2359646,
+1be785355ae29e32d85d86285bb8f90ea83171df,http://staff.estem-uc.edu.au/roland/files/2009/05/Sharma_Dhall_Gedeon_Goecke_ACII2013_ModelingStressUsingThermalFacialPatterns_ASpatio-TemporalApproach.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.70
+1b6c65442f2b572fb6c8fc9a7d5ae49a8e6d32ab,,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2006.537
+1b5acd1736f18e4fa202d88a80f774c6deea5733,,,,
+1b5875dbebc76fec87e72cee7a5263d325a77376,http://arxiv.org/pdf/1603.00560v2.pdf,,,https://arxiv.org/pdf/1603.00560v2.pdf
+1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9,http://pdfs.semanticscholar.org/1bdf/b3deae6e6c0df6537efcd1d7edcb4d7a96e9.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/ICML2012Li_214.pdf
+1bba358c9323883ddd54224ad24d2ac4d8218fec,,,,
+1b29f23f3517ac5bbe9bf5e80cda741b61bb9b12,,,https://doi.org/10.1016/j.patcog.2017.01.007,
+1b300a7858ab7870d36622a51b0549b1936572d4,http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/Yimo-TIP2016.pdf,,https://doi.org/10.1109/TIP.2016.2537215,
+1b90507f02967ff143fce993a5abbfba173b1ed0,http://mrl.cs.vsb.cz/publications/fusek_ipta_2014.pdf,,https://doi.org/10.1109/IPTA.2014.7001946,
+1b794b944fd462a2742b6c2f8021fecc663004c9,https://www.ecse.rpi.edu/~cvrl/wuy/HierarchicalShape/CVPR14_facialfeaturedetection_cameraready.pdf,,,https://arxiv.org/pdf/1709.05732v1.pdf
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,http://pdfs.semanticscholar.org/3a2f/aa145c5fe63ab906568a29fa4100220e03d9.pdf,,,http://www.cs.columbia.edu/~belhumeu/conference/few2many-fg00.pdf
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,https://arxiv.org/pdf/1608.00486v3.pdf,,https://doi.org/10.1109/DICTA.2016.7797039,http://arxiv.org/pdf/1608.00486v1.pdf
+1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d,http://www.pitt.edu/~jeffcohn/biblio/dicta2010.pdf,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2010.53
+1b0a071450c419138432c033f722027ec88846ea,http://cvrr.ucsd.edu/publications/2016/YuenMartinTrivediITSC2016.pdf,,https://doi.org/10.1109/ITSC.2016.7795622,
+1b60b8e70859d5c85ac90510b370b501c5728620,http://pdfs.semanticscholar.org/1b60/b8e70859d5c85ac90510b370b501c5728620.pdf,,https://doi.org/10.1007/978-3-642-33191-6_39,https://personalpages.manchester.ac.uk/staff/timothy.f.cootes/Projects/Toyota/Downloads/Caunce_ISVC12.pdf
+1b3b01513f99d13973e631c87ffa43904cd8a821,http://pdfs.semanticscholar.org/1b3b/01513f99d13973e631c87ffa43904cd8a821.pdf,,https://doi.org/10.1109/ICASSP.2003.1199141,http://landabaso.org/publications/icassp-03-landabaso.pdf
+1bc214c39536c940b12c3a2a6b78cafcbfddb59a,http://pdfs.semanticscholar.org/1bc2/14c39536c940b12c3a2a6b78cafcbfddb59a.pdf,,https://doi.org/10.5220/0005723700490058,http://www.diva-portal.org/smash/get/diva2:944056/FULLTEXT01.pdf
+1b4b3d0ce900996a6da8928e16370e21d15ed83e,,,https://doi.org/10.1109/BigDataService.2017.38,
+1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113,http://pdfs.semanticscholar.org/1bc9/aaa41c08bbd0c01dd5d7d7ebf3e48ae78113.pdf,,https://doi.org/10.3390/e20010060,http://www.mdpi.com/1099-4300/20/1/60/pdf
+1b9976fea3c1cf13f0a102a884f027d9d80a14b3,,,https://doi.org/10.1109/ROMAN.2014.6926354,
+1ba9d12f24ac04f0309e8ff9b0162c6e18d97dc3,,,,http://doi.acm.org/10.1145/2964284.2984061
+1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3,http://pdfs.semanticscholar.org/1be1/8a701d5af2d8088db3e6aaa5b9b1d54b6fd3.pdf,,,https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XLII-2-W4/237/2017/isprs-archives-XLII-2-W4-237-2017.pdf
+1bcb1c6d6cebc9737f9933fcefbf3da8a612f994,,,https://doi.org/10.1016/j.jvcir.2017.10.008,
+1b79628af96eb3ad64dbb859dae64f31a09027d5,http://pdfs.semanticscholar.org/1b79/628af96eb3ad64dbb859dae64f31a09027d5.pdf,,,http://csjarchive.cogsci.rpi.edu/2006v30/1/s15516709HCOG0000_48/s15516709HCOG0000_48.pdf
+1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61,http://mplab.ucsd.edu/~marni/pubs/Bartlett_CVPR05.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.297
+1bddad4dc0dfa8efa402aa5d18c29304a5760f12,https://www.researchgate.net/profile/Iickho_Song/publication/254062033_Complexity-Reduced_Scheme_for_Feature_Extraction_With_Linear_Discriminant_Analysis/links/53d694ce0cf228d363ea69d5.pdf,,https://doi.org/10.1109/TNNLS.2012.2194793,https://www.researchgate.net/profile/Iickho_Song/publication/254062033_Complexity-Reduced_Scheme_for_Feature_Extraction_With_Linear_Discriminant_Analysis/links/53d694ce0cf228d363ea69d5.pdf?inViewer=0&origin=publication_detail&pdfJsDownload=0
+1b70bbf7cdfc692873ce98dd3c0e191580a1b041,http://pdfs.semanticscholar.org/1b70/bbf7cdfc692873ce98dd3c0e191580a1b041.pdf,,,https://www.irjet.net/archives/V3/i10/IRJET-V3I1096.pdf
+1bd8ab47177997acb3b0cca4b6a801e6e6ec3eac,,,https://doi.org/10.1109/ICIP.2014.7025273,
+1bc23c771688109bed9fd295ce82d7e702726327,http://pdfs.semanticscholar.org/1bc2/3c771688109bed9fd295ce82d7e702726327.pdf,,,https://www.ideals.illinois.edu/bitstream/handle/2142/29816/Yang_Jianchao.pdf?sequence=1
+1bad8a9640cdbc4fe7de12685651f44c4cff35ce,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W08/papers/Gourgari_THETIS_Three_Dimensional_2013_CVPR_paper.pdf,,,http://www.image.ece.ntua.gr/papers/783.pdf
+1b589016fbabe607a1fb7ce0c265442be9caf3a9,http://pdfs.semanticscholar.org/5efe/b55fe3f03cd16aa0c268d74a5ad2e03170cf.pdf,,,http://library.allanschore.com/docs/PerceptualExpertPollak09.pdf
+1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8,http://pdfs.semanticscholar.org/b1c8/4ab7cc0c85e8aa8be4c0ec32bad225c9c630.pdf,,,https://arxiv.org/pdf/1312.5785v3.pdf
+1b4bc7447f500af2601c5233879afc057a5876d8,https://www.ecse.rpi.edu/~cvrl/Publication/pdf/Wang2015g.pdf,,,http://doi.acm.org/10.1145/2671188.2749311
+1b27ca161d2e1d4dd7d22b1247acee5c53db5104,http://pdfs.semanticscholar.org/1b27/ca161d2e1d4dd7d22b1247acee5c53db5104.pdf,,,http://atvs.ii.uam.es/audias/files/2015_FSI_ForensicFacialFeat_PTome_final.pdf
+1b2d9a1c067f692dd48991beff03cd62b9faebf2,,,https://doi.org/10.1109/ICIP.2011.6116302,
+1b69b860e22278a6f482507b8ce879082dd00c44,http://www.cs.utexas.edu/~chaoyeh/cvpr_2014_Inferring_Analogous_Attributes.pdf,,,http://vision.cs.utexas.edu/projects/inferring_analogous_attribute/inferring-analogous-attributes-poster.pdf
+7711a7404f1f1ac3a0107203936e6332f50ac30c,http://pdfs.semanticscholar.org/7711/a7404f1f1ac3a0107203936e6332f50ac30c.pdf,,,https://arxiv.org/pdf/1708.09522v1.pdf
+7782627fa2e545276996ff9e9a1686ac496df081,,,,http://doi.acm.org/10.1145/2663204.2666276
+771a6a80dd08212d83a4e976522e1ce108881401,,,https://doi.org/10.1109/IPTA.2016.7820979,
+7755bac678027f23fe59e13119182a9c7c18f9f7,,,,
+77652e55f73539df94f03489544504874f96d25e,,,,
+7701952e405c3d8a0947e2a309de281aa76bd3f4,http://isl.ira.uka.de/~stiefel/papers/IEE_SIU_2LDA.pdf,,,http://face.cs.kit.edu/download/publications/IEE_SIU_2LDA.pdf
+778c9f88839eb26129427e1b8633caa4bd4d275e,http://www.cs.berkeley.edu/~nzhang/papers/cvpr12_ppk.pdf,,,http://www.icsi.berkeley.edu/pubs/vision/ICSI_posepooling12.pdf
+7735f63e5790006cb3d989c8c19910e40200abfc,http://pdfs.semanticscholar.org/7735/f63e5790006cb3d989c8c19910e40200abfc.pdf,,,https://www.imaging.utk.edu/publications/papers/dissertation/2008-dec-thesis-hchang.pdf
+77223849321d57a03e0571a08e71eba06e38834a,,,,http://doi.ieeecomputersociety.org/10.1109/EMS.2011.20
+77b1db2281292372c38926cc4aca32ef056011dc,http://pdfs.semanticscholar.org/77b1/db2281292372c38926cc4aca32ef056011dc.pdf,,,https://www2.bc.edu/~russeljm/publications/adjectives.pdf
+77c5437107f8138d48cb7e10b2b286fa51473678,,,https://doi.org/10.1109/URAI.2016.7734005,
+776835eb176ed4655d6e6c308ab203126194c41e,http://pdfs.semanticscholar.org/7768/35eb176ed4655d6e6c308ab203126194c41e.pdf,,https://doi.org/10.1109/TMM.2008.921737,http://www.ifp.illinois.edu/~zhzeng/Trans_Multimedia_2008.pdf
+77c53ec6ea448db4dad586e002a395c4a47ecf66,http://pdfs.semanticscholar.org/77c5/3ec6ea448db4dad586e002a395c4a47ecf66.pdf,,,https://www.researchgate.net/profile/Muhammad_Sharif9/publication/236953573_Face_Recognition_Based_on_Facial_Features/links/0deec5235811606ab6000000.pdf
+77c3574a020757769b2ca807ff4b95a88eaa2a37,,,https://doi.org/10.1109/MSP.2015.2410783,
+77cea27494499dd162221d1476bf70a87391790a,,,https://doi.org/10.1109/VCIP.2015.7457930,
+778bff335ae1b77fd7ec67404f71a1446624331b,http://pdfs.semanticscholar.org/778b/ff335ae1b77fd7ec67404f71a1446624331b.pdf,,https://doi.org/10.1007/978-3-642-35749-7_15,http://www.vision.ee.ethz.ch/publications/papers/proceedings/eth_biwi_00743.pdf
+77816b9567d5fed1f6085f33e1ddbcc73af2010e,,,https://doi.org/10.1109/MRA.2012.2201574,
+7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d,https://graphics.ethz.ch/Downloads/Publications/Papers/2013/Zun13a/Zun13a.pdf,,https://doi.org/10.1109/ICIP.2013.6738380,http://www.disneyresearch.com/wp-content/uploads/Content-Aware-Compression-using-Saliency-Driven-Image-Retargeting-for-Wireless-Video.pdf
+774cbb45968607a027ae4729077734db000a1ec5,http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf,,https://doi.org/10.5244/C.27.14,http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf
+77362789d04db4c51be61eaffa4f43e03759e677,,,,
+772a30f1a7a3071e5ce6ad4b0dbddc67889f5873,,,,
+7754b708d6258fb8279aa5667ce805e9f925dfd0,https://www.ecse.rpi.edu/~qji/Papers/PAMI_AU.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.1094
+77037a22c9b8169930d74d2ce6f50f1a999c1221,https://ueaeprints.uea.ac.uk/64308/1/Accepted_manuscript.pdf,,https://doi.org/10.1109/TIP.2017.2716180,
+778c1e95b6ea4ccf89067b83364036ab08797256,,,https://doi.org/10.1109/TIFS.2012.2224866,
+779ad364cae60ca57af593c83851360c0f52c7bf,http://pdfs.semanticscholar.org/779a/d364cae60ca57af593c83851360c0f52c7bf.pdf,,,http://www.dcc.ufla.br/infocomp/artigos/v8.3/art09.pdf
+77a9b1856ebbc9a6170ee4c572a515d6db062cef,http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1291.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2009.5206654
+7753e3b9e158289cbaa22203166424ca9c229f68,,,,http://doi.ieeecomputersociety.org/10.1109/ICDM.2014.29
+7792fbc59f3eafc709323cdb63852c5d3a4b23e9,http://pdfs.semanticscholar.org/7792/fbc59f3eafc709323cdb63852c5d3a4b23e9.pdf,,,http://arxiv.org/abs/1609.05420
+77869f274d4be4d4b4c438dbe7dff4baed521bd8,,,https://doi.org/10.1109/TIP.2016.2551362,
+77be118034a700e5b7d9633f50f6fbb7fabec8ef,,,,
+77d31d2ec25df44781d999d6ff980183093fb3de,http://openaccess.thecvf.com/content_cvpr_2016/supplemental/Littwin_The_Multiverse_Loss_2016_CVPR_supplemental.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Littwin_The_Multiverse_Loss_CVPR_2016_paper.pdf
+77e747b12d22827fa84f506eefdac4ec37948359,,,,
+7767059c935fb773d5e6f559b9eca6e72caa456d,,,,
+773ce00841a23d32727aa1f54c29865fefd4ce02,,,,http://doi.ieeecomputersociety.org/10.1109/AIPR.2006.24
+772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,,,https://doi.org/10.1109/BTAS.2017.8272716,
+77fb9e36196d7bb2b505340b6b94ba552a58b01b,http://pdfs.semanticscholar.org/77fb/9e36196d7bb2b505340b6b94ba552a58b01b.pdf,,,https://arxiv.org/pdf/1710.02310v1.pdf
+486840f4f524e97f692a7f6b42cd19019ee71533,https://arxiv.org/pdf/1703.08388v2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.197
+48af47406ec14b561a9cdfafc5b8bdfdc746eb8a,,,,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,http://pdfs.semanticscholar.org/4846/3a119f67ff2c43b7c38f0a722a32f590dfeb.pdf,,,http://research.ijcaonline.org/volume52/number4/pxc3881569.pdf
+488d3e32d046232680cc0ba80ce3879f92f35cac,http://pdfs.semanticscholar.org/488d/3e32d046232680cc0ba80ce3879f92f35cac.pdf,,,https://www.researchgate.net/profile/Hamid_Sadeghi6/publication/269694109_Facial_Expression_Recognition_Using_Texture_Description_of_Displacement_Image/links/549264c80cf2ac83c53dc1b8.pdf
+480858e55abdbc07ca47b7dc10204613fdd9783c,,,https://doi.org/10.1109/ICPR.2014.786,
+486a82f50835ea888fbc5c6babf3cf8e8b9807bc,http://pdfs.semanticscholar.org/486a/82f50835ea888fbc5c6babf3cf8e8b9807bc.pdf,,,http://arxiv.org/pdf/1507.07242v1.pdf
+48901e44cd3e17efcfc9866982f8bd7b2c26b99d,,,,
+48a6a1c6a0ac5f2b7912b3ccb40b0c07f62ddfdf,,,https://doi.org/10.1016/j.imavis.2015.12.003,
+48d18b5f17672af694f0f5b5ec577516dbf697f4,,,,
+487f9ab19ca6779a014278d93f3e56ff82dac2e3,,,,
+48fea82b247641c79e1994f4ac24cad6b6275972,http://wan.poly.edu/KDD2012/docs/p1469.pdf,,,http://doi.acm.org/10.1145/2339530.2339760
+480ab25eba799b59e0a1a51021c5126c88a58a0c,,,,
+48734cb558b271d5809286447ff105fd2e9a6850,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w41/papers/Mahoor_Facial_Expression_Recognition_CVPR_2017_paper.pdf,,,https://arxiv.org/pdf/1705.07871v1.pdf
+48a417cfeba06feb4c7ab30f06c57ffbc288d0b5,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Chen_Robust_Dictionary_Learning_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.276
+48853c25dc75481b0c77f408a8a76383287ebe2a,http://qil.uh.edu/qil/websitecontent/pdf/2015-45.pdf,,https://doi.org/10.1109/IJCB.2011.6117477,http://www.csis.pace.edu/~ctappert/dps/2011IJCB/papers/322.pdf
+489b7e12a420eff0d585f3f866e76b838c2cd275,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477675
+48dcf45a1e38adbb9826594f7ffaa5e95ef78395,,,https://doi.org/10.1109/VCIP.2017.8305111,
+48db8bf18e2f6f19e07e88384be855c8b7ea0ead,,,,http://doi.acm.org/10.1145/2964284.2967225
+4848a48a2b8bacd2092e87961cd86818da8e7151,,,https://doi.org/10.1109/VCIP.2017.8305080,
+48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e,http://www.iri.upc.edu/files/scidoc/1938-Multi-Modal-Embedding-for-Main-Product-Detection-in-Fashion.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.261
+488a61e0a1c3768affdcd3c694706e5bb17ae548,http://pdfs.semanticscholar.org/916b/f08e66c3dd11bec809dd8cbe384e8860bb66.pdf,,https://doi.org/10.1007/978-3-319-54427-4_28,https://arxiv.org/pdf/1602.01125v1.pdf
+48910f9b6ccc40226cd4f105ed5291571271b39e,http://pdfs.semanticscholar.org/4891/0f9b6ccc40226cd4f105ed5291571271b39e.pdf,,,http://machinelearning.wustl.edu/mlpapers/paper_files/ICML2011Van_178.pdf
+48a9241edda07252c1aadca09875fabcfee32871,https://arxiv.org/pdf/1611.08657v5.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Zadeh_Convolutional_Experts_Constrained_CVPR_2017_paper.pdf
+48f0055295be7b175a06df5bc6fa5c6b69725785,http://pdfs.semanticscholar.org/48f0/055295be7b175a06df5bc6fa5c6b69725785.pdf,,,http://www.ijcaonline.org/archives/volume96/number19/16904-6971?format=pdf
+48729e4de8aa478ee5eeeb08a72a446b0f5367d5,http://faculty.ucmerced.edu/mhyang/papers/icip14_cfh.pdf,,https://doi.org/10.1109/ICIP.2014.7025819,
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,http://pdfs.semanticscholar.org/4817/4c414cfce7f1d71c4401d2b3d49ba91c5338.pdf,,,https://arxiv.org/pdf/1507.02779v1.pdf
+48255c9e1d6e1d030728d33a71699757e337be08,,,https://doi.org/10.1109/ISSNIP.2013.6529832,
+4863333b9e5f25423e273a0581de3edee8bb3b97,,,,
+488375ae857a424febed7c0347cc9590989f01f7,http://pdfs.semanticscholar.org/4883/75ae857a424febed7c0347cc9590989f01f7.pdf,,,http://users.ics.forth.gr/~tsakalid/PAPERS/CNFRS/2018-EI3-Greg.pdf
+48121f5937accc8050b0c9bf2be6d1c58b07a8a0,,,,
+4836b084a583d2e794eb6a94982ea30d7990f663,http://pdfs.semanticscholar.org/4836/b084a583d2e794eb6a94982ea30d7990f663.pdf,,,http://arxiv.org/abs/1611.06642
+4866a5d6d7a40a26f038fc743e16345c064e9842,http://pdfs.semanticscholar.org/4866/a5d6d7a40a26f038fc743e16345c064e9842.pdf,,https://doi.org/10.1016/j.patcog.2012.09.005,https://www.researchgate.net/profile/Xutao_Li2/publication/256822697_Stratified_sampling_for_feature_subspace_selection_in_random_forests_for_high_dimensional_data/links/00b7d538f49502957b000000.pdf
+48906f609446afcdaacbe1d65770d7a6165a8eee,,,https://doi.org/10.1007/s12559-017-9482-4,
+48cf1105eca8049e8625c5b30a69620b2381589c,,,,
+4805f41c4f8cfb932b011dfdd7f8907152590d1a,http://www.affectiva.com/wp-content/uploads/2014/09/From_Dials_to_Facial_Coding_Automated_Detection_of_Spontaneous_Facial_Expressions_fo.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553780
+488e475eeb3bb39a145f23ede197cd3620f1d98a,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf,,,
+487df616e981557c8e1201829a1d0ec1ecb7d275,http://www.citi.sinica.edu.tw/papers/yu.tsao/4293-F.pdf,,https://doi.org/10.1109/LSP.2014.2360099,
+48f211a9764f2bf6d6dda4a467008eda5680837a,http://www.lv-nus.org/papers/2011/iccv2011-occupation.pdf,,,http://www.lv-nus.org/%5C/papers/2011/iccv2011-occupation.pdf
+4858d014bb5119a199448fcd36746c413e60f295,http://pdfs.semanticscholar.org/4858/d014bb5119a199448fcd36746c413e60f295.pdf,,https://doi.org/10.5244/C.27.28,http://www.bmva.org/bmvc/2013/Papers/paper0028/abstract0028.pdf
+48319e611f0daaa758ed5dcf5a6496b4c6ef45f2,http://pdfs.semanticscholar.org/4831/9e611f0daaa758ed5dcf5a6496b4c6ef45f2.pdf,,,http://arxiv.org/abs/1411.0442
+48cfc5789c246c6ad88ff841701204fc9d6577ed,http://pdfs.semanticscholar.org/48cf/c5789c246c6ad88ff841701204fc9d6577ed.pdf,,https://doi.org/10.3745/JIPS.02.0043,http://jips.jatsxml.org/upload/pdf/jips-12-3-392.pdf
+481fb0a74528fa7706669a5cce6a212ac46eaea3,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Chen_Recognizing_RGB_Images_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.184
+486f5e85944404a1b57333443070b0b8c588c262,,,,http://doi.ieeecomputersociety.org/10.1109/IRI.2014.7051957
+70f189798c8b9f2b31c8b5566a5cf3107050b349,http://www.cs.colostate.edu/~vision/pasc/docs/pasc2013_NISTIR_061013.pdf,,https://doi.org/10.1109/BTAS.2013.6712704,https://www3.nd.edu/~kwb/Beveridge_EtAl_BTAS_2013.pdf
+7049187c5155d9652747413ce1ebc8dbb209fd69,,,https://doi.org/10.1109/ICPR.2016.7899808,
+70580ed8bc482cad66e059e838e4a779081d1648,http://pdfs.semanticscholar.org/7058/0ed8bc482cad66e059e838e4a779081d1648.pdf,,,http://www.uni-obuda.hu/journal/Khan_Nazir_Riaz_42.pdf
+70769def1284fe88fd57a477cde8a9c9a3dff13f,,,https://doi.org/10.1016/j.neucom.2006.10.036,
+70341f61dfe2b92d8607814b52dfd0863a94310e,,,,http://doi.ieeecomputersociety.org/10.1109/AVSS.2015.7301750
+703890b7a50d6535900a5883e8d2a6813ead3a03,http://pdfs.semanticscholar.org/7038/90b7a50d6535900a5883e8d2a6813ead3a03.pdf,,https://doi.org/10.1016/j.patcog.2015.04.025,http://wrap.warwick.ac.uk/71508/1/WRAP_8471118-es-200815-fantjahjadi-pr2015.pdf
+70db3a0d2ca8a797153cc68506b8650908cb0ada,http://pdfs.semanticscholar.org/70db/3a0d2ca8a797153cc68506b8650908cb0ada.pdf,,https://doi.org/10.1007/978-3-319-16181-5_56,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w16/W16-07.pdf
+706236308e1c8d8b8ba7749869c6b9c25fa9f957,http://affect.media.mit.edu/pdfs/11.McDuff-etal-Crowdsourced-2011.pdf,,,http://www.affectiva.com/wp-content/uploads/2014/09/Crowdsourced_Data_Collection_of_Facial_Expressions.pdf
+7002d6fc3e0453320da5c863a70dbb598415e7aa,http://www.cris.ucr.edu/IGERT/papers/SongfanAbstract.pdf,,https://doi.org/10.1109/TSMCB.2012.2192269,http://www.ee.ucr.edu/~syang/attach/Yang_SMC12.pdf
+7071cd1ee46db4bc1824c4fd62d36f6d13cad08a,http://pdfs.semanticscholar.org/7071/cd1ee46db4bc1824c4fd62d36f6d13cad08a.pdf,,,http://shuoyang1213.me/projects/ScaleFace/support/ScaleFace.pdf
+70444627cb765a67a2efba17b0f4b81ce1fc20ff,,,https://doi.org/10.1109/TNNLS.2016.2609434,
+70c2c2d2b7e34ff533a8477eff9763be196cd03a,http://iplab.dmi.unict.it/sites/default/files/_9.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2015.7169755
+70c25293e33f5c37143ae20e3b0198a68083a5ed,,,,
+70569810e46f476515fce80a602a210f8d9a2b95,http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w18/papers/Antipov_Apparent_Age_Estimation_CVPR_2016_paper.pdf,,,http://www.eurecom.fr/en/publication/4908/download/sec-publi-4908.pdf
+705a24f4e1766a44bbba7cf335f74229ed443c7b,http://web.ing.puc.cl/~asoto/papers/Maturana-09.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SCCC.2009.21
+70e79d7b64f5540d309465620b0dab19d9520df1,http://pdfs.semanticscholar.org/70e7/9d7b64f5540d309465620b0dab19d9520df1.pdf,,,https://www.ijser.org/researchpaper/Facial-Expression-Recognition-System-Using-Extreme-Learning-Machine.pdf
+70516aede32cf0dbc539abd9416c44faafc868bd,,,https://doi.org/10.1109/MICAI.2013.16,
+7003d903d5e88351d649b90d378f3fc5f211282b,http://pdfs.semanticscholar.org/7003/d903d5e88351d649b90d378f3fc5f211282b.pdf,,,http://research.ijcaonline.org/volume68/number23/pxc3887290.pdf
+703c9c8f20860a1b1be63e6df1622b2021b003ca,http://openaccess.thecvf.com/content_ICCV_2017/papers/Kobayashi_Flip-Invariant_Motion_Representation_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.600
+70a69569ba61f3585cd90c70ca5832e838fa1584,http://pdfs.semanticscholar.org/70a6/9569ba61f3585cd90c70ca5832e838fa1584.pdf,,https://doi.org/10.1007/978-3-319-13737-7_11,http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/2014/Marter_ICPR_2014.pdf
+7085d21f483743007cc6a8e3fa01d8bdf592ad33,http://www.meeting.edu.cn/meeting/UploadPapers/1282699022328.pdf,,,
+70bf1769d2d5737fc82de72c24adbb7882d2effd,http://pdfs.semanticscholar.org/70bf/1769d2d5737fc82de72c24adbb7882d2effd.pdf,,https://doi.org/10.1007/978-3-319-00566-9_25,http://mmi.tudelft.nl/sites/default/files/Face%20detection%20in%20intelligent%20ambiences%20with%20colored%20illumination.pdf
+7081958a390d3033f5f33e22bbfec7055ea8d601,,,https://doi.org/10.1109/MCI.2015.2437318,
+70d8bda4aafb0272ac4b93cd43e2448446b8e94d,,,https://doi.org/10.1109/ICMLC.2010.5580938,
+705e086bb666d129a6969882cfa49282116a638e,,,https://doi.org/10.1109/TNNLS.2014.2376963,
+70d0bffa288e317bc62376f4f577c5bd7712e521,,,https://doi.org/10.1049/iet-cvi.2012.0094,
+70d2f5e897086b8d3914f8fa1d9e479d71597e96,,,,
+1e5ca4183929929a4e6f09b1e1d54823b8217b8e,http://pdfs.semanticscholar.org/1e5c/a4183929929a4e6f09b1e1d54823b8217b8e.pdf,,,http://summit.sfu.ca/system/files/iritems1/17453/etd10244_ZZhao.pdf
+1e058b3af90d475bf53b3f977bab6f4d9269e6e8,http://pdfs.semanticscholar.org/30b9/7c36bcb99e857cd78fc55e2600d7851dc117.pdf,,,http://icml.cc/2012//papers/94.pdf
+1e799047e294267087ec1e2c385fac67074ee5c8,http://pdfs.semanticscholar.org/1e79/9047e294267087ec1e2c385fac67074ee5c8.pdf,,,http://doi.ieeecomputersociety.org/10.1109/34.817413
+1ef4815f41fa3a9217a8a8af12cc385f6ed137e1,https://www.d2.mpi-inf.mpg.de/sites/default/files/wood2015_iccv.pdf,,,http://arxiv.org/abs/1505.05916
+1eb4ea011a3122dc7ef3447e10c1dad5b69b0642,http://pdfs.semanticscholar.org/1eb4/ea011a3122dc7ef3447e10c1dad5b69b0642.pdf,,,https://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-132.pdf
+1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3,http://pdfs.semanticscholar.org/1e7a/e86a78a9b4860aa720fb0fd0bdc199b092c3.pdf,,https://doi.org/10.3390/s18020401,
+1e8eee51fd3bf7a9570d6ee6aa9a09454254689d,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangOttoJain_FaceSearchAtScale_TPAMI.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2582166
+1ea8085fe1c79d12adffb02bd157b54d799568e4,http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf,,,https://vision.cornell.edu/se3/wp-content/uploads/2014/09/eccv96.pdf
+1ebdfceebad642299e573a8995bc5ed1fad173e3,http://pdfs.semanticscholar.org/1ebd/fceebad642299e573a8995bc5ed1fad173e3.pdf,,https://doi.org/10.1016/j.cviu.2015.10.005,http://imag.pub.ro/~bionescu/index_files/Mironica_CVIU_2015.pdf
+1eec03527703114d15e98ef9e55bee5d6eeba736,http://pdfs.semanticscholar.org/1eec/03527703114d15e98ef9e55bee5d6eeba736.pdf,,,https://cvhci.anthropomatik.kit.edu/~stiefel/diplomarbeiten/DA_MikaFischer.pdf
+1e07500b00fcd0f65cf30a11f9023f74fe8ce65c,http://vijaychan.github.io/Publications/2015%20ICIP%20-%20Whole%20Subspace%20Discriminant%20Analysis%20for%20Face%20Recognition.pdf,,https://doi.org/10.1109/ICIP.2015.7350814,
+1e19ea6e7f1c04a18c952ce29386252485e4031e,http://pdfs.semanticscholar.org/1e19/ea6e7f1c04a18c952ce29386252485e4031e.pdf,,,http://www.iasir.net/IJETCASpapers/IJETCAS12-205.pdf
+1ec98785ac91808455b753d4bc00441d8572c416,https://www.cl.cam.ac.uk/~tb346/pub/papers/fg2017_curriculum.pdf,,,http://multicomp.cs.cmu.edu/wp-content/uploads/2017/10/2017_FG_Gui_Curriculum.pdf
+1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,http://pdfs.semanticscholar.org/6433/c412149382418ccd8aa966aa92973af41671.pdf,,,http://arxiv.org/pdf/1404.3596v5.pdf
+1eba6fc35a027134aa8997413647b49685f6fbd1,https://ubicomp-mental-health.github.io/papers/voss-glass.pdf,,,http://doi.acm.org/10.1145/2968219.2968310
+1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf,,,http://users.eecs.northwestern.edu/~xsh835/assets/cvpr2016_peoplerecognition.pdf
+1ef5ce743a44d8a454dbfc2657e1e2e2d025e366,http://pdfs.semanticscholar.org/1ef5/ce743a44d8a454dbfc2657e1e2e2d025e366.pdf,,,http://globaljournals.org/GJCST_Volume11/3-Accurate-Corner-Detection-Methods-using-Two-Step-Approach.pdf
+1e2770ce52d581d9a39642b40bfa827e3abf7ea2,,,,http://doi.acm.org/10.1145/2425333.2425362
+1eb48895d86404251aa21323e5a811c19f9a55f9,,,,http://doi.ieeecomputersociety.org/10.1109/CIS.2015.22
+1e58d7e5277288176456c66f6b1433c41ca77415,http://pdfs.semanticscholar.org/1e58/d7e5277288176456c66f6b1433c41ca77415.pdf,,,http://static.cs.brown.edu/people/gen/pub_papers/nips_workshop_2013.pdf
+1e5a1619fe5586e5ded2c7a845e73f22960bbf5a,https://arxiv.org/pdf/1509.04783v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.446
+1e213b03e1b8a6067bf37503904491e98b9e42df,http://figment.cse.usf.edu/~sfefilat/data/papers/TuAT10.9.pdf,,https://doi.org/10.1109/ICPR.2008.4761433,
+1e8fd77d4717e9cb6079e10771dd2ed772098cb3,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2016.7574681
+1e7c73602e6a17986b2e66ef411748056acf2545,,,,
+1e9f1bbb751fe538dde9f612f60eb946747defaa,http://pdfs.semanticscholar.org/1e9f/1bbb751fe538dde9f612f60eb946747defaa.pdf,,,http://vision.cs.tut.fi/data/publications/jsee2017.pdf
+1e917fe7462445996837934a7e46eeec14ebc65f,http://pdfs.semanticscholar.org/1e91/7fe7462445996837934a7e46eeec14ebc65f.pdf,,,https://www.ri.cmu.edu/pub_files/pub4/teng_kenny_2006_1/teng_kenny_2006_1.pdf
+1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de,https://arxiv.org/pdf/1410.3748v1.pdf,,https://doi.org/10.1109/THMS.2014.2358649,http://arxiv.org/abs/1410.3748
+1ef4aac0ebc34e76123f848c256840d89ff728d0,http://www.openu.ac.il/home/hassner/projects/augmented_faces/Masietal2017rapid.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.76
+1ecb56e7c06a380b3ce582af3a629f6ef0104457,http://pdfs.semanticscholar.org/1ecb/56e7c06a380b3ce582af3a629f6ef0104457.pdf,,https://doi.org/10.20965/jaciii.2004.p0002,
+1e62ca5845a6f0492574a5da049e9b43dbeadb1b,,,https://doi.org/10.1109/LSP.2016.2637400,
+1e64b2d2f0a8a608d0d9d913c4baee6973995952,http://sergioescalera.com/wp-content/uploads/2017/06/FG_presentation.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.106
+1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9,http://pdfs.semanticscholar.org/1ee2/7c66fabde8ffe90bd2f4ccee5835f8dedbb9.pdf,,,http://www.iro.umontreal.ca/~lisa/pointeurs/entropy_regularization_2006.pdf
+1e344b99583b782e3eaf152cdfa15f217b781181,,,,http://doi.acm.org/10.1145/2499788.2499789
+1eb9c859ff7537182a25556635954bcd11830822,,,https://doi.org/10.1109/ICDSP.2015.7252004,
+1ef6ad9e1742d0b2588deaf506ef83b894fb9956,,,https://doi.org/10.1007/s12193-016-0213-z,
+1ed617d14dbc53b20287d3405b14c68d8dad3965,,,https://doi.org/10.1109/TCYB.2016.2582918,
+1ec73ee49e422b4509c016ce244822144c849089,,,,
+1e41a3fdaac9f306c0ef0a978ae050d884d77d2a,http://www.cs.huji.ac.il/~daphna/course/CoursePapers/SerreEtAl%20PAMI2007.pdf,,,http://cbcl.mit.edu/publications/ps/serre-wolf-poggio-PAMI-07.pdf
+1ed49161e58559be399ce7092569c19ddd39ca0b,,,https://doi.org/10.1109/ICPR.2016.7899973,
+1eeb39d618f5fab243dd07b955a8e0e722f6dfdb,,,,
+1e94cc91c5293c8fc89204d4b881552e5b2ce672,http://pdfs.semanticscholar.org/5893/7d427ff36e1470b18120245148355047e4ea.pdf,,,https://www.ijcai.org/Proceedings/16/Papers/289.pdf
+1e1e66783f51a206509b0a427e68b3f6e40a27c8,http://pdfs.semanticscholar.org/1e1e/66783f51a206509b0a427e68b3f6e40a27c8.pdf,,,http://sugiyama-www.cs.titech.ac.jp/~sugi/2010/VISAPP2010.pdf
+1eb1fdc5c933d2483ba1acbfa8c457fae87e71e5,,,https://doi.org/10.1109/ICPR.2016.7899945,
+1ea4347def5868c622d7ce57cbe171fa68207e2b,,,https://doi.org/10.1007/978-3-642-41181-6_23,
+1e0add381031245b1d5129b482853ee738b498e1,http://eprints.pascal-network.org/archive/00001829/01/CVPR05_Romdhani.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.145
+1e8eec6fc0e4538e21909ab6037c228547a678ba,http://pdfs.semanticscholar.org/1e8e/ec6fc0e4538e21909ab6037c228547a678ba.pdf,,,http://www.imperial.ac.uk/pls/portallive/docs/1/18619709.PDF
+1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,http://arxiv.org/pdf/1607.01450v1.pdf,,,https://arxiv.org/pdf/1607.01450v1.pdf
+84fe5b4ac805af63206012d29523a1e033bc827e,http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf,,https://doi.org/10.1016/j.neucom.2016.08.139,http://arxiv.org/abs/1611.06203
+84e4b7469f9c4b6c9e73733fa28788730fd30379,http://pdfs.semanticscholar.org/84e4/b7469f9c4b6c9e73733fa28788730fd30379.pdf,,,https://asp-eurasipjournals.springeropen.com/track/pdf/10.1186/s13634-017-0521-9?site=asp-eurasipjournals.springeropen.com
+84dcf04802743d9907b5b3ae28b19cbbacd97981,http://pdfs.semanticscholar.org/84dc/f04802743d9907b5b3ae28b19cbbacd97981.pdf,,,http://arxiv.org/abs/1701.08289
+841bf196ee0086c805bd5d1d0bddfadc87e424ec,http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf,,,http://www.sersc.org/journals/IJSIP/vol5_no4/10.pdf
+849a1d1accafe9e41b7015bf8cf85efe7e742df3,,,,
+842d82081f4b27ca2d4bc05c6c7e389378f0c7b8,http://pdfs.semanticscholar.org/842d/82081f4b27ca2d4bc05c6c7e389378f0c7b8.pdf,,,http://ev.fe.uni-lj.si/1-2-2011/Tkalcic.pdf
+84f3c4937cd006888b82f2eb78e884f2247f0c4e,,,https://doi.org/10.1109/CCNC.2012.6181097,http://cgit.nutn.edu.tw:8080/cgit/PaperDL/LZJ_120807051353.PDF
+841a5de1d71a0b51957d9be9d9bebed33fb5d9fa,http://mx.nthu.edu.tw/~tsunghan/papers/journal%20papers/TIP_PCANet.pdf,,https://doi.org/10.1109/TIP.2015.2475625,http://vision.sysu.edu.cn/vision_sysu/wp-content/uploads/2014/04/PCANet-slides.pdf
+84be18c7683417786c13d59026f30daeed8bd8c9,,,https://doi.org/10.1007/s00138-016-0755-9,
+84d7af78c8dba3cad0380a33511725db4db1a54d,,,,
+84e6669b47670f9f4f49c0085311dce0e178b685,http://pdfs.semanticscholar.org/84e6/669b47670f9f4f49c0085311dce0e178b685.pdf,,,http://arxiv.org/abs/1502.00852
+84f86f8c559a38752ddfb417e58f98e1f8402f17,,,,http://doi.ieeecomputersociety.org/10.1109/EST.2013.10
+844e3e6992c98e53b45e4eb88368d0d6e27fc1d6,,,https://doi.org/10.1109/ICIP.2014.7026057,
+84bc3ca61fc63b47ec3a1a6566ab8dcefb3d0015,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS%20144.pdf,,https://doi.org/10.1109/BTAS.2012.6374602,
+84ae55603bffda40c225fe93029d39f04793e01f,,,https://doi.org/10.1109/ICB.2016.7550066,
+84ec0983adb8821f0655f83b8ce47f36896ca9ee,,,https://doi.org/10.1109/SMC.2017.8122985,
+847e07387142c1bcc65035109ccce681ef88362c,http://pdfs.semanticscholar.org/847e/07387142c1bcc65035109ccce681ef88362c.pdf,,https://doi.org/10.1007/978-3-540-24855-2_103,http://vislab.ucr.edu/PUBLICATIONS/pubs/Journal%20and%20Conference%20Papers/after10-1-1997/Conference/2004/Feature%20Synthesis%20Using%20Genetic%20Programming03.pdf
+8411fe1142935a86b819f065cd1f879f16e77401,http://pdfs.semanticscholar.org/8411/fe1142935a86b819f065cd1f879f16e77401.pdf,,,http://airccse.org/journal/ijaia/papers/4613ijaia03.pdf
+843e6f1e226480e8a6872d8fd7b7b2cd74b637a4,http://pdfs.semanticscholar.org/843e/6f1e226480e8a6872d8fd7b7b2cd74b637a4.pdf,,,http://maxwellsci.com/print/rjaset/v4-4724-4728.pdf
+841c99e887eb262e397fdf5b0490a2ae6c82d6b5,,,,
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,http://pdfs.semanticscholar.org/84f9/04a71bee129a1cf00dc97f6cdbe1011657e6.pdf,,,https://kddfashion2017.mybluemix.net/final_submissions/ML4Fashion_paper_19.pdf
+84b4eb66ad75a74f77299f1ecb6aa6305362e8cd,https://www.researchgate.net/profile/Joao_Carvalho8/publication/4285113_A_Learning-based_Eye_Detector_Coupled_with_Eye_Candidate_Filtering_and_PCA_Features/links/0f31752d6b19aa31ec000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/SIBGRAPI.2007.44
+846c028643e60fefc86bae13bebd27341b87c4d1,http://pdfs.semanticscholar.org/a06f/510ee0f206abc4c44a2b68455d88a1748427.pdf,,https://doi.org/10.1007/11612032_58,http://www.hci.iis.u-tokyo.ac.jp/~ysato/papers/Shimano-ACCV06.pdf
+4a14a321a9b5101b14ed5ad6aa7636e757909a7c,http://openaccess.thecvf.com/content_iccv_2015/papers/Li_Learning_Semi-Supervised_Representation_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.317
+4adca62f888226d3a16654ca499bf2a7d3d11b71,http://pdfs.semanticscholar.org/5525/119941f6710fcde85cf71cc2ca25484e78c6.pdf,,,http://www.aclweb.org/anthology/P13-1056
+4aa286914f17cd8cefa0320e41800a99c142a1cd,http://www.vbettadapura.com/egocentric/food/Food-Bettadapura15.pdf,,,http://arxiv.org/abs/1510.02078
+4a9d906935c9de019c61aedc10b77ee10e3aec63,http://openaccess.thecvf.com/content_cvpr_2016/papers/Gupta_Cross_Modal_Distillation_CVPR_2016_paper.pdf,,,https://arxiv.org/pdf/1507.00448.pdf
+4a2d54ea1da851151d43b38652b7ea30cdb6dfb2,http://pdfs.semanticscholar.org/4a2d/54ea1da851151d43b38652b7ea30cdb6dfb2.pdf,,https://doi.org/10.1017/CBO9781107360181.013,https://www.ece.rice.edu/~km23/files/paper/faceRegMotBlurCUP.pdf
+4ae59d2a28abd76e6d9fb53c9e7ece833dce7733,http://pdfs.semanticscholar.org/4ae5/9d2a28abd76e6d9fb53c9e7ece833dce7733.pdf,,https://doi.org/10.1016/j.cosrev.2017.07.002,https://arxiv.org/pdf/1410.1648v5.pdf
+4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8,http://pdfs.semanticscholar.org/4ab1/0174a4f98f7e2da7cf6ccfeb9bc64c8e7da8.pdf,,,http://lrs.icg.tugraz.at/pubs/koestinger_phd_13.pdf
+4a484d97e402ed0365d6cf162f5a60a4d8000ea0,http://pdfs.semanticscholar.org/4a48/4d97e402ed0365d6cf162f5a60a4d8000ea0.pdf,,,https://www.ideals.illinois.edu/bitstream/handle/2142/47409/061_ready.pdf?isAllowed=y&sequence=2
+4a64758786e3f49fc13781304197591ffbd69a6e,http://vicos.fri.uni-lj.si/alesl/files/2008/05/fidlerpami06.pdf,,,http://www.cs.toronto.edu/~fidler/papers/Fidler2006Combining.pdf
+4aa27c1f8118dbb39809a0f79a28c0cbc3ede276,,,,http://doi.acm.org/10.1145/2683483.2683530
+4a4da3d1bbf10f15b448577e75112bac4861620a,http://pdfs.semanticscholar.org/4a4d/a3d1bbf10f15b448577e75112bac4861620a.pdf,,,http://ftp.cs.wisc.edu/computer-vision/repository/PDF/guo.2006.thesis.pdf
+4abd49538d04ea5c7e6d31701b57ea17bc349412,http://resources.mpi-inf.mpg.de/publications/D2/2015/rohrbach15ijcv.pdf,,https://doi.org/10.1007/s11263-015-0851-8,https://arxiv.org/pdf/1502.06648v2.pdf
+4a0f98d7dbc31497106d4f652968c708f7da6692,http://arxiv.org/pdf/1605.05258v1.pdf,,,https://arxiv.org/pdf/1605.05258v1.pdf
+4aabd6db4594212019c9af89b3e66f39f3108aac,http://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf,,,https://pdfs.semanticscholar.org/4aab/d6db4594212019c9af89b3e66f39f3108aac.pdf
+4adb97b096b700af9a58d00e45a2f980136fcbb5,http://pdfs.semanticscholar.org/9ea2/23c070ec9a00f4cb5ca0de35d098eb9a8e32.pdf,,,https://arxiv.org/pdf/1708.03280v1.pdf
+4a5592ae1f5e9fa83d9fa17451c8ab49608421e4,http://sergioescalera.com/wp-content/uploads/2015/08/cha11g-lopezATS.pdf,,,http://doi.acm.org/10.1145/2522848.2532594
+4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c,http://pdfs.semanticscholar.org/4a1a/5316e85528f4ff7a5f76699dfa8c70f6cc5c.pdf,,,http://b2.cvl.iis.u-tokyo.ac.jp/mva/proceedings/CommemorativeDVD/2005/papers/2005104.pdf
+4ae291b070ad7940b3c9d3cb10e8c05955c9e269,http://www.cl.cam.ac.uk/~pr10/publications/icmi14.pdf,,,http://doi.acm.org/10.1145/2663204.2663258
+4aa8db1a3379f00db2403bba7dade5d6e258b9e9,http://pdfs.semanticscholar.org/4aa8/db1a3379f00db2403bba7dade5d6e258b9e9.pdf,,https://doi.org/10.1007/978-3-642-12127-2_31,http://www.cs.unc.edu/~hadi/mcs_2010.pdf
+4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9,https://arxiv.org/pdf/1608.01866v1.pdf,,,http://arxiv.org/pdf/1608.01866v1.pdf
+4ac4e8d17132f2d9812a0088594d262a9a0d339b,http://pdfs.semanticscholar.org/4ac4/e8d17132f2d9812a0088594d262a9a0d339b.pdf,,,http://doi.ieeecomputersociety.org/10.1109/AMFG.2003.1240818
+4a03f07397c5d32463750facf010c532f45233a5,,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2012.32
+4abaebe5137d40c9fcb72711cdefdf13d9fc3e62,http://pdfs.semanticscholar.org/4aba/ebe5137d40c9fcb72711cdefdf13d9fc3e62.pdf,,https://doi.org/10.1007/978-3-642-15381-5_5,http://www.lce.hut.fi/~eiparvia/publ/IDEAL2010_cready_Parviainen.pdf
+4aea1213bdb5aa6c74b99fca1afc72d8a99503c6,,,https://doi.org/10.1109/ICDIM.2010.5664688,
+4acd683b5f91589002e6f50885df51f48bc985f4,http://www.albany.edu/faculty/mchang2/files/2015_09_ICIP_Darpa.pdf,,https://doi.org/10.1109/ICIP.2015.7350914,
+4a1d640f5e25bb60bb2347d36009718249ce9230,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Xing_Towards_Multi-view_and_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.236
+4a4b5ae5793696b861aa009932e7a121d36ad67a,,,,
+4aefd3ffa712a9b7d9db0615d4ee1932de6060d6,,,,
+4aeb87c11fb3a8ad603311c4650040fd3c088832,http://pdfs.semanticscholar.org/4aeb/87c11fb3a8ad603311c4650040fd3c088832.pdf,,https://doi.org/10.24963/ijcai.2017/252,http://www.ijcai.org/proceedings/2017/0252.pdf
+4a7e5a0f6a0df8f5ed25ef356cd67745cd854bea,,,https://doi.org/10.1007/978-3-642-14922-1_68,
+4a3d96b2a53114da4be3880f652a6eef3f3cc035,https://www.micc.unifi.it/wp-content/uploads/2018/01/07932891.pdf,,https://doi.org/10.1109/TMM.2017.2707341,
+4a6fcf714f663618657effc341ae5961784504c7,http://www.cs.tut.fi/~iosifidi/files/journal/2016_TIFS_ACSKDA.pdf?dl=0,,https://doi.org/10.1109/TIFS.2016.2582562,
+24b37016fee57057cf403fe2fc3dda78476a8262,http://pdfs.semanticscholar.org/24b3/7016fee57057cf403fe2fc3dda78476a8262.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICPR.2002.1047404
+243cd27dce38fd756a840b397c28ad21cfb78897,,,https://doi.org/10.1049/iet-ipr.2013.0003,
+24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd,http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf,,https://doi.org/10.1109/ICCVW.2011.6130511,https://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf
+24b5ea4e262e22768813e7b6581f60e4ab9a8de7,,,https://doi.org/10.1109/TIFS.2018.2807791,
+24c442ac3f6802296d71b1a1914b5d44e48b4f29,http://vision.caltech.edu/~xpburgos/papers/ICCVW15%20Burgos-Artizzu.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w24/papers/Burgos-Artizzu_Pose_and_Expression-Coherent_ICCV_2015_paper.pdf
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,http://pdfs.semanticscholar.org/247c/ab87b133bd0f4f9e8ce5e7fc682be6340eac.pdf,,,
+24936849676b25a36eb6216e458286dcaee314e5,,,,
+244293024aebbb0ff42a7cf2ba49b1164697a127,,,https://doi.org/10.1109/BTAS.2016.7791187,
+245f8ec4373e0a6c1cae36cd6fed5a2babed1386,http://pdfs.semanticscholar.org/245f/8ec4373e0a6c1cae36cd6fed5a2babed1386.pdf,,,"https://www.textroad.com/pdf/JAEBS/J.%20Appl.%20Environ.%20Biol.%20Sci.,%207(3S)1-10,%202017.pdf"
+24cb375a998f4af278998f8dee1d33603057e525,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_016_ext.pdf,,,http://vipl.ict.ac.cn/resources/codes/code/Huang_Projection_Metric_Learning_2015_CVPR_paper_2.pdf
+24aac045f1e1a4c13a58eab4c7618dccd4c0e671,https://arxiv.org/pdf/1706.04124v1.pdf,,,https://arxiv.org/pdf/1706.04124v2.pdf
+24eeb748a5e431510381ec7c8253bcb70eff8526,,,https://doi.org/10.1109/TIP.2017.2746270,
+240d5390af19bb43761f112b0209771f19bfb696,http://pdfs.semanticscholar.org/4e10/0973f1540312df3465a087597018a7892310.pdf,,https://doi.org/10.1016/j.neunet.2014.10.005,http://www.sentic.net/multimodal-affective-data-analysis.pdf
+24de12df6953151ef5cd0379e205eb0f57ff9d1f,http://www.researchgate.net/profile/Sebastian_Ventura/publication/270337594_A_Tutorial_on_Multi-Label_Learning/links/54bcd8460cf253b50e2d697b.pdf?origin=publication_list,,,http://doi.acm.org/10.1145/2716262
+24f9248f01df3020351347c2a3f632e01de72090,http://www.cs.utexas.edu/users/bwaters/publications/papers/luong-wacv2013.pdf,,,http://www.cs.utexas.edu/~grauman/papers/luong-wacv2013.pdf
+24e099e77ae7bae3df2bebdc0ee4e00acca71250,https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/22467/Yang%20Robust%20Face%20Alignment%20Under%20Occlusion%20via%20Regional%20Predictive%20Power%20Estimation%202015%20Accepted.pdf?sequence=1,,https://doi.org/10.1109/TIP.2015.2421438,http://www.eecs.qmul.ac.uk/~ioannisp/pubs/ecopies/2015-TIP-Yang-Patras-Robust-Face-Alignment-under-Occlusion-final-submitted.pdf
+24959d1a9c9faf29238163b6bcaf523e2b05a053,http://pdfs.semanticscholar.org/2495/9d1a9c9faf29238163b6bcaf523e2b05a053.pdf,,https://doi.org/10.1007/978-3-319-09912-5_34,http://home.elka.pw.edu.pl/~astrupcz/uploads/7/4/5/7/74570135/2014_high_accuracy_head_pose_tracking_survey_amt.pdf
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,http://www.umiacs.umd.edu/~cteo/public-shared/language_robotsMethods_PerMIS2012.pdf,,,http://doi.acm.org/10.1145/2393091.2393109
+2450c618cca4cbd9b8cdbdb05bb57d67e63069b1,http://liris.cnrs.fr/Documents/Liris-6127.pdf,,,http://www.cs.uwyo.edu/~dspears/courses/ML/NN_facial.pdf
+24cce97c3fe3c3fc21f1225e4a9f6c1e736e6bb9,,,,
+24496e4acfb8840616b2960b0e2c80cc4c9e5a87,http://ai2-s2-pdfs.s3.amazonaws.com/2449/6e4acfb8840616b2960b0e2c80cc4c9e5a87.pdf,,,http://www.cs.washington.edu/homes/neeraj/papers/nk_cvpr2012_multiattrs.pdf
+2400c4994655c4dd59f919c4d6e9640f57f2009f,,,https://doi.org/10.1109/IPTA.2015.7367096,
+244b57cc4a00076efd5f913cc2833138087e1258,http://pdfs.semanticscholar.org/dfa8/d0afc548a8086902412fb0eae0fcf881ed8a.pdf,,,https://arxiv.org/pdf/1609.04382.pdf
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,http://pdfs.semanticscholar.org/b3e7/4cbe27454e32b4b35014af831783d3480ad5.pdf,,,https://arxiv.org/pdf/1607.01437v1.pdf
+241d2c517dbc0e22d7b8698e06ace67de5f26fdf,http://pdfs.semanticscholar.org/bfc3/546fa119443fdcbac3a5723647c2ba0007ac.pdf,,https://doi.org/10.1007/978-3-319-10590-1_24,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8689/86890361.pdf
+24e6a28c133b7539a57896393a79d43dba46e0f6,http://arxiv.org/pdf/1605.02057v2.pdf,,https://doi.org/10.1109/ICIP.2016.7533085,https://arxiv.org/pdf/1605.02057v2.pdf
+24e82eaf3257e761d6ca0ffcc2cbca30dfca82e9,,,https://doi.org/10.1109/GlobalSIP.2016.7906030,
+248db911e3a6a63ecd5ff6b7397a5d48ac15e77a,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Matthews_Enriching_Texture_Analysis_2013_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2013.165
+24b637c98b22cd932f74acfeecdb50533abea9ae,,,https://doi.org/10.1109/TIP.2015.2492819,
+24d376e4d580fb28fd66bc5e7681f1a8db3b6b78,http://pdfs.semanticscholar.org/24d3/76e4d580fb28fd66bc5e7681f1a8db3b6b78.pdf,,,https://arxiv.org/pdf/1707.06330v1.pdf
+24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39,http://openaccess.thecvf.com/content_ICCV_2017/papers/Benitez-Quiroz_Recognition_of_Action_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.428
+2489a839d0a761ef8520393a7e412c36f5f26324,https://cs.adelaide.edu.au/~tjchin/lib/exe/fetch.php?media=eccv2014_hypergraph.pdf,,https://doi.org/10.1007/978-3-319-10593-2_44,http://www.tnt.uni-hannover.de/papers/data/1032/Pulak14_Hypergraph.pdf
+243e9d490fe98d139003bb8dc95683b366866c57,http://pdfs.semanticscholar.org/243e/9d490fe98d139003bb8dc95683b366866c57.pdf,,,http://web2py.iiit.ac.in/research_centres/publications/download/mastersthesis.pdf.a9b6d5276b7588be.46696e616c5468657369732e706466.pdf
+2465fc22e03faf030e5a319479a95ef1dfc46e14,https://www.fruct.org/publications/fruct20/files/Bel.pdf,,https://doi.org/10.23919/FRUCT.2017.8071290,
+24205a60cbf1cc12d7e0a9d44ed3c2ea64ed7852,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.30
+2495ebdcb6da8d8c2e82cf57fcaab0ec003d571d,http://eprints.pascal-network.org/archive/00002118/01/russell06.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1283&context=robotics
+24e42e6889314099549583c7e19b1cb4cc995226,,,https://doi.org/10.1109/ACPR.2011.6166646,
+247a6b0e97b9447850780fe8dbc4f94252251133,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Conf_Arman_CVPR2010.pdf,,https://doi.org/10.1109/CVPRW.2010.5543263,
+24bf94f8090daf9bda56d54e42009067839b20df,https://www.computer.org/csdl/trans/tp/2015/06/06940284.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2366127
+24f3dfeb95bdecdc604d630acdfcafa1dc7c9124,,,,http://doi.acm.org/10.1145/2994258.2994270
+240eb0b34872c431ecf9df504671281f59e7da37,http://www.ece.cmu.edu/~dbatra/publications/assets/cutout_tags_iv2009_small.pdf,,https://doi.org/10.1109/CVPRW.2009.5204195,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/20.pdf
+245d98726674297208e76308c3a11ce3fc43bee2,,,https://doi.org/10.1007/s11042-015-2699-x,
+230527d37421c28b7387c54e203deda64564e1b7,http://pdfs.semanticscholar.org/2305/27d37421c28b7387c54e203deda64564e1b7.pdf,,https://doi.org/10.1007/978-1-4471-6296-4_17,http://www.ee.cuhk.edu.hk/~xgwang/papers/wangZreid.pdf
+23fdbef123bcda0f07d940c72f3b15704fd49a98,http://pdfs.semanticscholar.org/23fd/bef123bcda0f07d940c72f3b15704fd49a98.pdf,,,http://humansensing.cs.cmu.edu/sites/default/files/1nips2011.pdf
+2348f1fa2940b01ec90e023fac8cc96812189774,,,,http://doi.ieeecomputersociety.org/10.1109/EWDTS.2017.8110157
+23ebbbba11c6ca785b0589543bf5675883283a57,https://pdfs.semanticscholar.org/23eb/bbba11c6ca785b0589543bf5675883283a57.pdf,,https://doi.org/10.1007/s11042-010-0602-3,http://webia.lip6.fr/~cord/Publications_files/KernelVideo.pdf
+23aef683f60cb8af239b0906c45d11dac352fb4e,http://pdfs.semanticscholar.org/b6cd/e64dcf864e457a83b72b7742fd19984a7552.pdf,,,http://www.cs.cmu.edu/~ymiao/thesis/thesis.pdf
+23860d947cf221b6ddb6d6cf3a7ac4b08c7cb8d3,,,,
+235d5620d05bb7710f5c4fa6fceead0eb670dec5,http://pdfs.semanticscholar.org/7497/50d81dbd4d9fdcc9c1728b797dbb538a8747.pdf,,,http://infoscience.epfl.ch/record/146070/files/Jie_NIPS2009.pdf
+2360ecf058393141ead1ca6b587efa2461e120e4,,,https://doi.org/10.1007/s00138-017-0895-6,
+235a347cb96ef22bf35b4cf37e2b4ee5cde9df77,,,,http://doi.ieeecomputersociety.org/10.1109/DICTA.2008.13
+23ecc496eaa238ac884e6bae5763f6138a9c90a3,,,https://doi.org/10.1109/ICB.2016.7550085,
+23fd653b094c7e4591a95506416a72aeb50a32b5,http://pdfs.semanticscholar.org/8a92/17f540845a7d11d24f2d76c0b752ca439457.pdf,,,http://research.ijcaonline.org/volume93/number11/pxc3895920.pdf
+23172f9a397f13ae1ecb5793efd81b6aba9b4537,http://pdfs.semanticscholar.org/2317/2f9a397f13ae1ecb5793efd81b6aba9b4537.pdf,,https://doi.org/10.18653/v1/W15-2805,http://aclweb.org/anthology/W/W15/W15-2805.pdf
+2336de3a81dada63eb00ea82f7570c4069342fb5,,,,http://doi.acm.org/10.1145/2361407.2361428
+231a6d2ee1cc76f7e0c5912a530912f766e0b459,http://pdfs.semanticscholar.org/231a/6d2ee1cc76f7e0c5912a530912f766e0b459.pdf,,,http://arxiv.org/abs/1312.7446
+236a4f38f79a4dcc2183e99b568f472cf45d27f4,https://jurie.users.greyc.fr/papers/moosman-nowak-jurie-pami08.pdf,,,http://www.mrt.kit.edu/z/publ/download/Moosmann_al2008pami.pdf
+230c4a30f439700355b268e5f57d15851bcbf41f,http://arxiv.org/pdf/1509.01509v2.pdf,,,http://arxiv.org/abs/1509.01509
+237fa91c8e8098a0d44f32ce259ff0487aec02cf,http://ira.lib.polyu.edu.hk/bitstream/10397/241/1/SMCB_C_36_4_06_B.pdf,,,http://www.baskent.edu.tr/~mudogan/eem513/PCA.pdf
+235bebe7d0db37e6727dfa1246663be34027d96b,,,https://doi.org/10.1109/NAFIPS.2016.7851625,
+233be88c7ce1fbf1c1680643dca7869dc637b379,,,,
+23d5b2dccd48a17e743d3a5a4d596111a2f16c41,http://pdfs.semanticscholar.org/8cda/dc4d5e7e4fe6a0dbe15611f6fc8b7c0f103e.pdf,,https://doi.org/10.1016/j.imavis.2012.02.003,http://www.gatsby.ucl.ac.uk/~szabo/publications/jeni12shape.pdf
+23fc83c8cfff14a16df7ca497661264fc54ed746,http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf,,,http://www.pitt.edu/~jeffcohn/biblio/Cohn-Kanade_Database.pdf
+2331df8ca9f29320dd3a33ce68a539953fa87ff5,http://faculty.ucmerced.edu/mhyang/papers/aaai02.pdf,,,http://www.aaai.org/Library/AAAI/2002/aaai02-035.php
+2340d810c515dc0c9fd319f598fa8012dc0368a0,,,https://doi.org/10.1109/AFGR.2008.4813420,
+232b6e2391c064d483546b9ee3aafe0ba48ca519,https://ibug.doc.ic.ac.uk/media/uploads/documents/tzimiro_pantic_iccv2013.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Tzimiropoulos_Optimization_Problems_for_2013_ICCV_paper.pdf
+23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3,http://pdfs.semanticscholar.org/23ba/9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3.pdf,,,"http://cs229.stanford.edu/proj2014/Matthew%20Wang,%20Spencer%20Yee,%20Determining%20Mood%20From%20Facial%20Expressions.pdf"
+237eba4822744a9eabb121fe7b50fd2057bf744c,http://pdfs.semanticscholar.org/ba2a/65bef17d9db7366fe8c1344ca918ba50b99a.pdf,,https://doi.org/10.1007/978-3-540-74889-2_3,http://www.se.cuhk.edu.hk/hccl/publications/pub/ACII2007.pdf
+23086a13b83d1b408b98346cf44f3e11920b404d,http://pdfs.semanticscholar.org/2308/6a13b83d1b408b98346cf44f3e11920b404d.pdf,,https://doi.org/10.1016/j.imavis.2016.03.008,http://ca.cs.cmu.edu/sites/default/files/main.pdf
+238fc68b2e0ef9f5ec043d081451902573992a03,http://www.cbsr.ia.ac.cn/users/zlei/papers/ChuanxianRen-ELGOF-TCYB.pdf,,https://doi.org/10.1109/TCYB.2015.2484356,
+23e75f5ce7e73714b63f036d6247fa0172d97cb6,http://pdfs.semanticscholar.org/23e7/5f5ce7e73714b63f036d6247fa0172d97cb6.pdf,,,http://www.biomedical-engineering-online.com/content/pdf/1475-925X-8-16.pdf
+23c66ab737367a96f1422ce5c4ff8421709ef70d,,,,
+23675cb2180aac466944df0edda4677a77c455cd,,,,http://doi.ieeecomputersociety.org/10.1109/IIH-MSP.2009.142
+23aba7b878544004b5dfa64f649697d9f082b0cf,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Wang_Locality-Constrained_Discriminative_Learning_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301315
+23120f9b39e59bbac4438bf4a8a7889431ae8adb,http://pdfs.semanticscholar.org/2312/0f9b39e59bbac4438bf4a8a7889431ae8adb.pdf,,https://doi.org/10.1049/iet-bmt.2015.0057,http://vbn.aau.dk/files/230667133/IET_BMT_preprint.pdf
+23d55061f7baf2ffa1c847d356d8f76d78ebc8c1,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0033-4?site=ipsjcva.springeropen.com,,https://doi.org/10.1186/s41074-017-0033-4,
+23c3eb6ad8e5f18f672f187a6e9e9b0d94042970,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_095_ext.pdf,,https://doi.org/10.1109/CVPR.2015.7299169,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/3B_095_ext.pdf
+23a8d02389805854cf41c9e5fa56c66ee4160ce3,http://www.advancedsourcecode.com/influencelow10.pdf,,https://doi.org/10.1007/s11042-013-1568-8,
+4fee2f524ef12741d2b0fa96f45a5ef9d20ada83,,,,
+4ffd744a5f079c2d65f36e3ee0979b978f522a13,,,,http://doi.ieeecomputersociety.org/10.1109/SITIS.2009.15
+4fd29e5f4b7186e349ba34ea30738af7860cf21f,https://arxiv.org/pdf/1506.02588v1.pdf,,https://doi.org/10.1007/s11263-015-0875-0,http://arxiv.org/pdf/1506.02588v2.pdf
+4fbc0189252ed4fe8f9cffd3ea0ebbb0c621e3ef,,,https://doi.org/10.1049/iet-cvi.2012.0127,
+4f742c09ce12859b20deaa372c8f1575acfc99c9,,,https://doi.org/10.1016/j.neucom.2017.01.020,
+4f0d9200647042e41dea71c35eb59e598e6018a7,http://pdfs.semanticscholar.org/4f0d/9200647042e41dea71c35eb59e598e6018a7.pdf,,,https://mice.cs.columbia.edu/getTechreport.php?format=pdf&techreportID=1505
+4f00a48a60cbf750b4ccbd698d5547d83b3eaf3f,,,,
+4faded442b506ad0f200a608a69c039e92eaff11,http://pdfs.semanticscholar.org/4fad/ed442b506ad0f200a608a69c039e92eaff11.pdf,,,https://www.cg.tuwien.ac.at/research/publications/2006/vucini_2006/vucini_2006-thesis.pdf
+4f7967158b257e86d66bdabfdc556c697d917d24,http://pdfs.semanticscholar.org/4f79/67158b257e86d66bdabfdc556c697d917d24.pdf,,,http://www.ri.cmu.edu/pub_files/2016/8/3-CMU-RI-MS-Thesis.pdf
+4fc7a540efb24bea338f82c8bdc64c214744a3de,http://www.researchgate.net/profile/Touradj_Ebrahimi/publication/41083907_Object-based_Tag_Propagation_for_Semi-automatic_Annotation_of_Images/links/02e7e515b3de45cd50000000.pdf,,,https://infoscience.epfl.ch/record/143543/files/Ivanov_201003_ACMMIR2010.pdf
+4fc936102e2b5247473ea2dd94c514e320375abb,http://pdfs.semanticscholar.org/4fc9/36102e2b5247473ea2dd94c514e320375abb.pdf,,,https://arxiv.org/pdf/1804.01824v1.pdf
+4f298d6d0c8870acdbf94fe473ebf6814681bd1f,http://pdfs.semanticscholar.org/9979/b794d0bd06a1959a6b169f2cf32ba8ba376b.pdf,,https://doi.org/10.1016/j.imavis.2017.01.010,https://arxiv.org/pdf/1605.04988v1.pdf
+4f6adc53798d9da26369bea5a0d91ed5e1314df2,http://pdfs.semanticscholar.org/4f6a/dc53798d9da26369bea5a0d91ed5e1314df2.pdf,,,https://arxiv.org/pdf/1608.00075v2.pdf
+4fbef7ce1809d102215453c34bf22b5f9f9aab26,http://pdfs.semanticscholar.org/4fbe/f7ce1809d102215453c34bf22b5f9f9aab26.pdf,,,http://espace.library.uq.edu.au/eserv/UQ:18/Robust_Face_Recognition_for_Data_Mining.pdf
+4fa0d73b8ba114578744c2ebaf610d2ca9694f45,http://pdfs.semanticscholar.org/4fa0/d73b8ba114578744c2ebaf610d2ca9694f45.pdf,,,https://arxiv.org/pdf/1712.04851v1.pdf
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,https://arxiv.org/pdf/1510.00562v1.pdf,,,http://arxiv.org/abs/1510.00562
+4f591e243a8f38ee3152300bbf42899ac5aae0a5,http://pdfs.semanticscholar.org/4f59/1e243a8f38ee3152300bbf42899ac5aae0a5.pdf,,,https://arxiv.org/pdf/1612.06836v2.pdf
+4f9958946ad9fc71c2299847e9ff16741401c591,http://pdfs.semanticscholar.org/4f99/58946ad9fc71c2299847e9ff16741401c591.pdf,,,http://www6.in.tum.de/Main/Publications/graves2008d.pdf
+4f03ba35440436cfa06a2ed2a571fea01cb36598,,,https://doi.org/10.1109/SPAC.2017.8304260,
+4fb0954ef02a178fd64f1c8cd0408866982bac2c,,,,
+4f773c8e7ca98ece9894ba3a22823127a70c6e6c,http://pdfs.semanticscholar.org/4f77/3c8e7ca98ece9894ba3a22823127a70c6e6c.pdf,,https://doi.org/10.1007/978-3-642-35749-7_26,http://humansensing.cs.cmu.edu/sites/default/files/real_time.pdf
+4fac09969ee80d485876e3198c7177181c600a4a,,,,http://doi.ieeecomputersociety.org/10.1109/CRV.2015.32
+4f3b652c75b1d7cf4997e0baaef2067b61e3a79b,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2016.7552910
+4ff11512e4fde3d1a109546d9c61a963d4391add,http://pdfs.semanticscholar.org/4ff1/1512e4fde3d1a109546d9c61a963d4391add.pdf,,,http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12838
+4f028efe6708fc252851eee4a14292b7ce79d378,http://pdfs.semanticscholar.org/ae17/aca92b4710efb00e3180a46e56e463ae2a6f.pdf,,https://doi.org/10.1109/IJCNN.1999.836189,http://www.cs.njit.edu/~liu/papers/ijcnn99.pdf
+4f0bf2508ae801aee082b37f684085adf0d06d23,http://pdfs.semanticscholar.org/4f0b/f2508ae801aee082b37f684085adf0d06d23.pdf,,https://doi.org/10.1016/j.imavis.2012.02.010,http://www.eecs.qmul.ac.uk/~ioannisp/pubs/ecopies/2012-IVC-KumarKotsiaPatras_mnmf.pdf
+4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/CNN_Gender_Recognition.pdf,,https://doi.org/10.1109/SSCI.2015.37,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SSCI_2015/data/7560a188.pdf
+4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7,http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf,,https://doi.org/10.1007/978-3-319-46475-6_15,http://www.ee.cuhk.edu.hk/~xgwang/papers/liuYLWTeccv16.pdf
+4f0d5cbcd30fef3978b9691c2e736daed2f841c1,http://www.ics.uci.edu/~dramanan/papers/localdist_journal.pdf,,https://doi.org/10.1109/ICCV.2009.5459265,http://research.microsoft.com/pubs/120617/main.pdf
+4f77a37753c03886ca9c9349723ec3bbfe4ee967,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Hasan_Localizing_Facial_Keypoints_2013_ICCV_paper.pdf,,,
+4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,http://pdfs.semanticscholar.org/4f36/c14d1453fc9d6481b09c5a09e91d8d9ee47a.pdf,,,http://www.bmva.org/bmvc/2014/files/abstract128.pdf
+8d71872d5877c575a52f71ad445c7e5124a4b174,http://pdfs.semanticscholar.org/8d71/872d5877c575a52f71ad445c7e5124a4b174.pdf,,https://doi.org/10.1016/j.patcog.2006.11.020,https://www.researchgate.net/profile/Chunghoon_Kim/publication/222416291_Shadow_compensation_in_2D_images_for_face_recognition/links/53fc340a0cf2dca8fffeefb0.pdf?origin=publication_list
+8de06a584955f04f399c10f09f2eed77722f6b1c,http://pdfs.semanticscholar.org/8de0/6a584955f04f399c10f09f2eed77722f6b1c.pdf,,,http://hal.archives-ouvertes.fr/docs/00/81/88/08/PDF/article-Visapp.pdf
+8d4f0517eae232913bf27f516101a75da3249d15,http://pdfs.semanticscholar.org/8d4f/0517eae232913bf27f516101a75da3249d15.pdf,,,https://arxiv.org/pdf/1803.10106v1.pdf
+8dd3f05071fd70fb1c349460b526b0e69dcc65bf,,,https://doi.org/10.1109/TIP.2017.2726010,
+8de2dbe2b03be8a99628ffa000ac78f8b66a1028,http://pdfs.semanticscholar.org/8de2/dbe2b03be8a99628ffa000ac78f8b66a1028.pdf,,,http://lear.inrialpes.fr/people/gaidon/pubmedia/data/master_rapport.pdf
+8d3e95c31c93548b8c71dbeee2e9f7180067a888,,,https://doi.org/10.1109/ICPR.2016.7899841,
+8d3fbdb9783716c1832a0b7ab1da6390c2869c14,http://pdfs.semanticscholar.org/ae81/6e7e0077fe94f1e62629647dc04263a970b5.pdf,,,http://cdn.intechopen.com/pdfs/5899/InTech-Discriminant_subspace_analysis_for_uncertain_situation_in_facial_recognition.pdf
+8d42a24d570ad8f1e869a665da855628fcb1378f,http://pdfs.semanticscholar.org/8d42/a24d570ad8f1e869a665da855628fcb1378f.pdf,,,http://www.cs.cmu.edu/~santosh/projects/papers/contextInDetectn_cvpr09_preCRC.pdf
+8d8461ed57b81e05cc46be8e83260cd68a2ebb4d,http://pdfs.semanticscholar.org/8d84/61ed57b81e05cc46be8e83260cd68a2ebb4d.pdf,,,http://www.ijcsit.com/docs/Volume%203/vol3Issue3/ijcsit2012030384.pdf
+8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3,http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR10pose.pdf,,,http://pdollar.github.io/files/papers/DollarCVPR10pose.pdf
+8d2c0c9155a1ed49ba576ac0446ec67725468d87,http://media.cs.tsinghua.edu.cn/~cvg/publications/ENGLISH%20CONFERENCE%20PAPERS/A%20Study%20of%20Two%20Image%20Representations%20for%20Head%20Pose%20Estimation.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2009.141
+8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152,http://pdfs.semanticscholar.org/8d02/43b8b663ca0ab7cbe613e3b886a5d1c8c152.pdf,,,http://www.dsls.usra.edu/meetings/IAA/pdf/2306.pdf
+8db9188e5137e167bffb3ee974732c1fe5f7a7dc,,,https://doi.org/10.1109/TIP.2016.2612885,
+8d6c4af9d4c01ff47fe0be48155174158a9a5e08,http://pdfs.semanticscholar.org/8d6c/4af9d4c01ff47fe0be48155174158a9a5e08.pdf,,,http://homes.cs.washington.edu/~bcr/papers/RussellThesis.pdf
+8d2c43759e221f39ab1b4bf70d6891ffd19fb8da,https://www.researchgate.net/profile/Zhang_Pinzheng/publication/224711010_An_Automatic_Facial_Expression_Recognition_Approach_Based_on_Confusion-Crossed_Support_Vector_Machine_Tree/links/54658c630cf2052b509f3391.pdf,,https://doi.org/10.1109/ICASSP.2007.365985,
+8dbe79830713925affc48d0afa04ed567c54724b,http://pdfs.semanticscholar.org/8dbe/79830713925affc48d0afa04ed567c54724b.pdf,,,https://core.ac.uk/download/pdf/42415427.pdf
+8d1adf0ac74e901a94f05eca2f684528129a630a,http://www.denniscodd.com/dotnet-ieee/Facial%20Expression%20Recognition%20Using%20Facial.pdf,,,http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2011.13
+8db609d84190b905913eb2f17f4e558c6e982208,,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.182
+8d91f06af4ef65193f3943005922f25dbb483ee4,http://pdfs.semanticscholar.org/8d91/f06af4ef65193f3943005922f25dbb483ee4.pdf,,,http://arxiv.org/abs/1607.01040
+8dc9de0c7324d098b537639c8214543f55392a6b,http://www.diva-portal.org/smash/get/diva2:280081/FULLTEXT01.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCTA.2007.105
+8d712cef3a5a8a7b1619fb841a191bebc2a17f15,http://pdfs.semanticscholar.org/8d71/2cef3a5a8a7b1619fb841a191bebc2a17f15.pdf,,https://doi.org/10.1016/j.patrec.2015.07.040,https://pdfs.semanticscholar.org/8d71/2cef3a5a8a7b1619fb841a191bebc2a17f15.pdf
+8d646ac6e5473398d668c1e35e3daa964d9eb0f6,http://pdfs.semanticscholar.org/8d64/6ac6e5473398d668c1e35e3daa964d9eb0f6.pdf,,,https://arxiv.org/pdf/1702.08481v1.pdf
+8dffbb6d75877d7d9b4dcde7665888b5675deee1,http://pdfs.semanticscholar.org/8dff/bb6d75877d7d9b4dcde7665888b5675deee1.pdf,,,http://cs229.stanford.edu/proj2010/McLaughlinLeBayanbat-RecognizingEmotionsWithDeepBeliefNets.pdf
+8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff,http://publications.idiap.ch/downloads/papers/2017/Le_CBMI_2017.pdf,,,http://doi.acm.org/10.1145/3095713.3095732
+153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4,http://pdfs.semanticscholar.org/153f/5ad54dd101f7f9c2ae17e96c69fe84aa9de4.pdf,,,"http://www.fer.unizg.hr/_download/repository/KDI,_Nenad_Markus.pdf"
+15ef449ac443c494ceeea8a9c425043f4079522e,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477583
+155199d7f10218e29ddaee36ebe611c95cae68c4,http://pdfs.semanticscholar.org/1551/99d7f10218e29ddaee36ebe611c95cae68c4.pdf,,,http://ri.cmu.edu/pub_files/2016/4/main2-daftry.pdf
+157647b0968d95f9288b27d6d9179a8e1ef5c970,,,https://doi.org/10.1049/iet-bmt.2014.0086,
+15ef65fd68d61f3d47326e358c446b0f054f093a,,,https://doi.org/10.1109/MLSP.2017.8168180,
+15bf0e70b069cea62d87d3bf706172c4a6a7779e,,,,
+1584edf8106e8f697f19b726e011b9717de0e4db,,,https://doi.org/10.1049/iet-cvi.2015.0350,
+15a9f812e781cf85c283f7cf2aa2928b370329c5,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2469281
+15cd05baa849ab058b99a966c54d2f0bf82e7885,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_031_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/1A_031_ext.pdf
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,http://feiwang03.googlepages.com/CVPRposter.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2006.272
+159e792096756b1ec02ec7a980d5ef26b434ff78,http://pdfs.semanticscholar.org/159e/792096756b1ec02ec7a980d5ef26b434ff78.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8165
+1541d5cb8af55930968c02f9185c1a3b5da6b7ea,,,,
+153e5cddb79ac31154737b3e025b4fb639b3c9e7,http://pdfs.semanticscholar.org/d9f5/9178ef2d91c98e0f3108fe273cdc6c6590f4.pdf,,,https://arxiv.org/pdf/1409.5763v2.pdf
+1586871a1ddfe031b885b94efdbff647cf03eff1,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w18/papers/Ginosar_A_Century_of_ICCV_2015_paper.pdf,,https://doi.org/10.1109/TCI.2017.2699865,http://www.cs.berkeley.edu/~shiry/publications/Ginosar15_Yearbooks.pdf
+158aa18c724107587bcc4137252d0ba10debf417,,,https://doi.org/10.1109/ACSSC.2016.7869522,
+15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4,https://arxiv.org/pdf/1707.07196v1.pdf,,https://doi.org/10.1109/TSP.2017.2781649,https://arxiv.org/pdf/1707.07196v2.pdf
+159b1e3c3ed0982061dae3cc8ab7d9b149a0cdb1,,,https://doi.org/10.1109/TIP.2017.2694226,
+152683f3ac99f829b476ea1b1b976dec6e17b911,,,https://doi.org/10.1109/MIXDES.2016.7529773,
+15cf7bdc36ec901596c56d04c934596cf7b43115,http://pdfs.semanticscholar.org/15cf/7bdc36ec901596c56d04c934596cf7b43115.pdf,,,http://thesai.org/Downloads/Volume8No9/Paper_14-Face_Extraction_from_Image_based_on_K_Means.pdf
+1576ed0f3926c6ce65e0ca770475bca6adcfdbb4,http://openaccess.thecvf.com/content_cvpr_workshops_2015/W09/papers/Bagheri_Keep_it_Accurate_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301332
+156cd2a0e2c378e4c3649a1d046cd080d3338bca,http://pdfs.semanticscholar.org/156c/d2a0e2c378e4c3649a1d046cd080d3338bca.pdf,,,http://web2py.iiit.ac.in/research_centres/publications/download/mastersthesis.pdf.bdab7fb0f6a384af.4578656d706c617220626173656420617070726f6163686573206f6e204661636520466964756369616c20446574656374696f6e20616e642046726f6e74616c697a6174696f6e20284d616c6c696b61726a756e204220522d323031333037363831292e706466.pdf
+157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6,http://www.merl.com/publications/docs/TR2012-043.pdf,,,http://www2.ece.ohio-state.edu/~chi/papers/CROC_CVPR2012.pdf
+15e0b9ba3389a7394c6a1d267b6e06f8758ab82b,https://ipsjcva.springeropen.com/track/pdf/10.1186/s41074-017-0035-2?site=ipsjcva.springeropen.com,,https://doi.org/10.1186/s41074-017-0035-2,
+151481703aa8352dc78e2577f0601782b8c41b34,http://pdfs.semanticscholar.org/943c/f990952712673320b011e1e8092fad65eedd.pdf,,https://doi.org/10.1007/11573425_22,http://www.dcs.qmul.ac.uk/~sgg/papers/Shan_etal_HCI05.pdf
+159caaa56c2291bedbd41d12af5546a7725c58d4,,,https://doi.org/10.1109/ICIP.2016.7532910,
+1565721ebdbd2518224f54388ed4f6b21ebd26f3,http://cmp.felk.cvut.cz/ftp/articles/franc/Cevilkalp-FaceDetector-FG2013.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553705
+15f3d47b48a7bcbe877f596cb2cfa76e798c6452,http://pdfs.semanticscholar.org/15f3/d47b48a7bcbe877f596cb2cfa76e798c6452.pdf,,,http://www.cl.cam.ac.uk/~pr10/publications/idgei15b.pdf
+15fbb5fc3bdd692a6b2dd737cce7f39f7c89a25c,,,https://doi.org/10.1109/TMM.2011.2167317,
+15728d6fd5c9fc20b40364b733228caf63558c31,http://pdfs.semanticscholar.org/1572/8d6fd5c9fc20b40364b733228caf63558c31.pdf,,,http://web.engr.illinois.edu/~iendres2/publications/dissertation2013.pdf
+15252b7af081761bb00535aac6bd1987391f9b79,http://cvsp.cs.ntua.gr/publications/confr/KoutrasMaragos_EyeGaze_ICIP15.pdf,,https://doi.org/10.1109/ICIP.2015.7351237,
+1513949773e3a47e11ab87d9a429864716aba42d,http://pdfs.semanticscholar.org/1513/949773e3a47e11ab87d9a429864716aba42d.pdf,,https://doi.org/10.1016/j.neucom.2011.10.040,http://www.ee.oulu.fi/~hadid/Neurocomputing.pdf
+15ee80e86e75bf1413dc38f521b9142b28fe02d1,https://arxiv.org/pdf/1612.05322v1.pdf,,https://doi.org/10.1109/BTAS.2016.7791203,https://arxiv.org/pdf/1612.05322v2.pdf
+15e12d5c4d80a2b6f4d957a3ffd130564e9bab3a,,,https://doi.org/10.5220/0004736505740580,
+1599718bf756a0fb7157277b93f21cfcad04e383,,,,
+153c8715f491272b06dc93add038fae62846f498,http://pdfs.semanticscholar.org/153c/8715f491272b06dc93add038fae62846f498.pdf,,,http://vision.ucsd.edu/~jwlim/files/phdthesis.pdf
+15e27f968458bf99dd34e402b900ac7b34b1d575,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p8362-mahanta.pdf,,https://doi.org/10.1109/ICASSP.2014.6855221,
+15f70a0ad8903017250927595ae2096d8b263090,http://pdfs.semanticscholar.org/15f7/0a0ad8903017250927595ae2096d8b263090.pdf,,,http://arxiv.org/abs/1507.04844
+1564bf0a268662df752b68bee5addc4b08868739,https://arxiv.org/pdf/1605.04129v2.pdf,,https://doi.org/10.1109/ICPR.2016.7900087,https://arxiv.org/pdf/1605.04129v1.pdf
+158e32579e38c29b26dfd33bf93e772e6211e188,http://pdfs.semanticscholar.org/158e/32579e38c29b26dfd33bf93e772e6211e188.pdf,,,https://curve.carleton.ca/system/files/etd/9d64f172-02e4-4172-890e-d751eee18de4/etd_pdf/e5ac8a1b25ad65e92ce18a84241280f8/fratesi-automatedrealtimeemotionrecognitionusingfacial.pdf
+1277b1b8b609a18b94e4907d76a117c9783a5373,,,,http://doi.ieeecomputersociety.org/10.1109/ASONAM.2016.7752438
+122f51cee489ba4da5ab65064457fbe104713526,http://www.speakit.cn/Group/file/2015_LongShortTerm_ACMAVEC@MM15_EI.pdf,,,http://doi.acm.org/10.1145/2808196.2811634
+12c4ba96eaa37586f07be0d82b2e99964048dcb5,,,https://doi.org/10.1109/LSP.2017.2694460,
+125d82fee1b9fbcc616622b0977f3d06771fc152,http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTcvpr12.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247963
+1255afbf86423c171349e874b3ac297de19f00cd,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SSCI_2015/data/7560a203.pdf,,https://doi.org/10.1109/SSCI.2015.39,http://www.ai.rug.nl/~mwiering/GROUP/ARTICLES/Multi_HOG_Face_Recognition.pdf
+122f52fadd4854cf6c9287013520eced3c91e71a,,,https://doi.org/10.1109/TIP.2016.2515987,
+1283398de84ec0178dc74d41a87febfbfbcbbb02,,,,
+1280b35e4a20036fcfd82ee09f45a3fca190276f,,,,http://doi.ieeecomputersociety.org/10.1109/iThings-GreenCom-CPSCom-SmartData.2017.166
+1252727e8096f48096ef89483d30c3a74500dd15,,,https://doi.org/10.1007/s00138-016-0746-x,
+126535430845361cd7a3a6f317797fe6e53f5a3b,http://pdfs.semanticscholar.org/1265/35430845361cd7a3a6f317797fe6e53f5a3b.pdf,,https://doi.org/10.1007/978-3-642-19318-7_55,http://perception.csl.uiuc.edu/matrix-rank/Files/robust_stereo.pdf
+122ee00cc25c0137cab2c510494cee98bd504e9f,http://pdfs.semanticscholar.org/122e/e00cc25c0137cab2c510494cee98bd504e9f.pdf,,,http://www.mmer-systems.eu/uploads/media/MMER_AAM_Evaluation.pdf
+1286641b8896ae737e140cfd3da2d081d4cd548e,,,,
+126204b377029feb500e9b081136e7a9010e3b6b,,,,http://doi.ieeecomputersociety.org/10.1109/ICDMW.2010.50
+121fe33daf55758219e53249cf8bcb0eb2b4db4b,http://pdfs.semanticscholar.org/121f/e33daf55758219e53249cf8bcb0eb2b4db4b.pdf,,https://doi.org/10.5244/C.23.51,http://www.bmva.org/bmvc/2009/Papers/Paper364/Paper364.pdf
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,http://www.cs.colostate.edu/~vision/pasc/docs/fg2015videoEvalPreprint.pdf,,,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=917840
+12cb3bf6abf63d190f849880b1703ccc183692fe,http://pdfs.semanticscholar.org/12cb/3bf6abf63d190f849880b1703ccc183692fe.pdf,,,http://cgit.nutn.edu.tw:8080/cgit/PaperDL/LZJ_130102123815.PDF
+1246534c3104da030fdb9e041819257e0d57dcbf,http://home.isr.uc.pt/~joaoluis/papers/cvpr2015_2.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2A_078_ext.pdf
+12cd96a419b1bd14cc40942b94d9c4dffe5094d2,http://pdfs.semanticscholar.org/12cd/96a419b1bd14cc40942b94d9c4dffe5094d2.pdf,,,http://aclweb.org/anthology//W/W16/W16-3204.pdf
+12bb0cb32e48269da2902c4c6d41ea2966ba8462,,,,
+12055b8f82d5411f9ad196b60698d76fbd07ac1e,https://zhzhanp.github.io/papers/TCSVT2014.pdf,,https://doi.org/10.1109/TCSVT.2014.2308639,
+126214ef0dcef2b456cb413905fa13160c73ec8e,http://infoscience.epfl.ch/record/125056/files/MHFE_fg08.pdf,,https://doi.org/10.1109/AFGR.2008.4813428,https://www.researchgate.net/profile/Gianluca_Antonini/publication/37462834_Modelling_human_perception_of_static_facial_expressions/links/0912f50c1ab55c4a91000000.pdf
+12692fbe915e6bb1c80733519371bbb90ae07539,http://pdfs.semanticscholar.org/50ef/4817a6e50a2ec525d6e417d05d2400983c11.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1226&context=machine_learning
+1226a230b0be43d03b6e0ff5a22f5752f30834bb,,,,
+12ded6a869b4e21149452234140257019af9494d,,,,
+12ccfc188de0b40c84d6a427999239c6a379cd66,http://pdfs.semanticscholar.org/12cc/fc188de0b40c84d6a427999239c6a379cd66.pdf,,,https://arxiv.org/pdf/1803.02536v1.pdf
+12c713166c46ac87f452e0ae383d04fb44fe4eb2,http://pdfs.semanticscholar.org/98dc/a90e43c7592ef81cf84445d73c8baa719686.pdf,,,http://www.waset.org/journals/waset/v32/v32-129.pdf
+1270044a3fa1a469ec2f4f3bd364754f58a1cb56,http://pdfs.semanticscholar.org/1270/044a3fa1a469ec2f4f3bd364754f58a1cb56.pdf,,,http://vision.ucsd.edu/kriegman-grp/papers/papers/cvpr03b.pdf
+12150d8b51a2158e574e006d4fbdd3f3d01edc93,https://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ss16/DeepEnd2EndV2V.pdf,,,http://www.cs.dartmouth.edu/~dutran/papers/cvpr16w_voxel.pdf
+12003a7d65c4f98fb57587fd0e764b44d0d10125,http://luks.fe.uni-lj.si/en/staff/simond/publications/Dobrisek2015.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7284835
+126076774da192d4d3f4efcd1accc719ee5f9683,,,https://doi.org/10.1109/SIU.2012.6204774,
+124538b3db791e30e1b62f81d4101be435ee12ef,http://pdfs.semanticscholar.org/1245/38b3db791e30e1b62f81d4101be435ee12ef.pdf,,,http://cs.brown.edu/people/gen/pub_papers/BasicLevelSceneUnderstanding_FrontiersPsychology.pdf
+12d8730da5aab242795bdff17b30b6e0bac82998,http://pdfs.semanticscholar.org/12d8/730da5aab242795bdff17b30b6e0bac82998.pdf,,https://doi.org/10.1007/978-3-319-19665-7_21,http://arxiv.org/abs/1411.6509
+120b9c271c3a4ea0ad12bbc71054664d4d460bc3,,,https://doi.org/10.1109/DICTA.2015.7371259,
+12b533f7c6847616393591dcfe4793cfe9c4bb17,,,https://doi.org/10.1109/TIFS.2017.2765519,
+8c643e1a61f3f563ec382c1e450f4b2b28122614,http://www.cvip.louisville.edu/wwwcvip/research/publications/Pub_Pdf/2012/BTAS147.pdf,,https://doi.org/10.1109/BTAS.2012.6374603,
+8c13f2900264b5cf65591e65f11e3f4a35408b48,http://cvhci.ira.uka.de/~stiefel/papers/Ekenel_Local_Appearance.pdf,,,https://cvhci.anthropomatik.kit.edu/~stiefel/papers/Ekenel_Local_Appearance.pdf
+8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf,http://pdfs.semanticscholar.org/8cb3/f421b55c78e56c8a1c1d96f23335ebd4a5bf.pdf,,https://doi.org/10.1016/j.image.2004.05.009,https://www.hds.utc.fr/~fdavoine/mypublications/spic04.pdf
+8c955f3827a27e92b6858497284a9559d2d0623a,http://pdfs.semanticscholar.org/8c95/5f3827a27e92b6858497284a9559d2d0623a.pdf,,,
+8c8525e626c8857a4c6c385de34ffea31e7e41d1,http://arxiv.org/pdf/1505.07922.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Cross-Domain_Image_Retrieval_ICCV_2015_paper.pdf
+8c66378df977606d332fc3b0047989e890a6ac76,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_078_ext.pdf
+8c9c8111e18f8798a612e7386e88536dfe26455e,http://pdfs.semanticscholar.org/8c9c/8111e18f8798a612e7386e88536dfe26455e.pdf,,,http://mail.isr.uc.pt/~mrl/admin/upload/Ra10_CS_JP_JD_correction_03.pdf
+8c5cf18c456957c63248245791f44a685e832345,,,,
+8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa,http://pdfs.semanticscholar.org/8c7f/4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa.pdf,,,https://arxiv.org/pdf/1704.04326v1.pdf
+8cd9475a3a1b2bcccf2034ce8f4fe691c57a4889,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.138
+8c81705e5e4a1e2068a5bd518adc6955d49ae434,http://pdfs.semanticscholar.org/8c81/705e5e4a1e2068a5bd518adc6955d49ae434.pdf,,https://doi.org/10.1007/978-3-319-54526-4_26,http://www.cvlab.cs.tsukuba.ac.jp/~lincons/papers/egda.pdf
+8cb403c733a5f23aefa6f583a17cf9b972e35c90,http://pdfs.semanticscholar.org/e4ca/1fa70823c4350888607df470248be0ed4c56.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/2016/Novotny16B/novotny16b.pdf
+8cffe360a05085d4bcba111a3a3cd113d96c0369,,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126248
+8c85ef961826575bc2c2f4da7784bc3bfcf8b188,,,https://doi.org/10.1109/ICIP.2015.7350871,
+8c6b9c9c26ead75ce549a57c4fd0a12b46142848,http://pdfs.semanticscholar.org/97fc/47ba1427b0e50cd815b8b1657fea6fb9e25a.pdf,,https://doi.org/10.1007/978-0-387-34747-9_38,http://dl.ifip.org/db/conf/ifip12/ai2006/KotsiaP06.pdf
+8c50869b745fc094a4fb1b27861934c3c14d7199,,,https://doi.org/10.1109/EMBC.2016.7591826,
+8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82,http://pdfs.semanticscholar.org/e09e/aa666f354d4262d5ff4cf4ef54a960561bbe.pdf,,https://doi.org/10.3390/rs9050446,http://www.mdpi.com/2072-4292/9/5/446/pdf
+8cedb92694845854f3ad0daf6c9adb6b81c293de,,,,http://doi.acm.org/10.1145/1839379.1839431
+8c3f7bd8ae50337dd812b370ce4c4ea9375a9f58,,,https://doi.org/10.1109/ICIP.2014.7025276,
+8c7bceba769762126fd3dae78d622908bb83c3d3,http://qil.uh.edu/qil/websitecontent/pdf/2015-33.pdf,,https://doi.org/10.1109/WIFS.2012.6412618,http://cbl.uh.edu/pub_files/facial-landmark-configuration-for-improved-detection.pdf
+8c37bd06e1a637c6f249dcd1d2c4bc9589ae24b3,,,https://doi.org/10.1007/11608288_28,
+8c6c0783d90e4591a407a239bf6684960b72f34e,http://pdfs.semanticscholar.org/8c6c/0783d90e4591a407a239bf6684960b72f34e.pdf,,,http://worldcomp-proceedings.com/proc/proc2013/ike/IKE_Papers.pdf
+8c2b663f8be1702ed3e377b5e6e85921fe7c6389,,,https://doi.org/10.1109/IPTA.2016.7821006,
+8cd0855ca967ce47b0225b58bbadd38d8b1b41a1,,,https://doi.org/10.1109/TIP.2017.2721106,
+8cb55413f1c5b6bda943697bba1dc0f8fc880d28,http://cvhci.anthropomatik.kit.edu/~stiefel/papers/ICCV07_031.pdf,,,http://isl.ira.uka.de/~stiefel/papers/ICCV07_031.pdf
+8c048be9dd2b601808b893b5d3d51f00907bdee0,,,https://doi.org/10.1631/FITEE.1600041,
+8cc07ae9510854ec6e79190cc150f9f1fe98a238,http://pdfs.semanticscholar.org/8cc0/7ae9510854ec6e79190cc150f9f1fe98a238.pdf,,https://doi.org/10.3390/jimaging2010006,http://www.mdpi.com/2313-433X/2/1/6/pdf
+8509abbde2f4b42dc26a45cafddcccb2d370712f,http://pdfs.semanticscholar.org/ad9a/169042d887c33cfcec2716a453a0d3abcb0c.pdf,,,http://arxiv.org/abs/1709.03872
+855bfc17e90ec1b240efba9100fb760c068a8efa,http://pdfs.semanticscholar.org/855b/fc17e90ec1b240efba9100fb760c068a8efa.pdf,,https://doi.org/10.1016/j.engappai.2012.09.002,http://www.researchgate.net/profile/Abdelmalik_Moujahid/publication/257392756_Facial_expression_recognition_using_tracked_facial_actions_Classifier_performance_analysis/links/00b7d532d6de5cbd82000000.pdf
+858ddff549ae0a3094c747fb1f26aa72821374ec,https://arxiv.org/pdf/1606.03237v1.pdf,,,http://arxiv.org/pdf/1606.03237v1.pdf
+85041e48b51a2c498f22850ce7228df4e2263372,http://pdfs.semanticscholar.org/8504/1e48b51a2c498f22850ce7228df4e2263372.pdf,,,http://www.ca.cs.cmu.edu/sites/default/files/2accv2010finalpaper.pdf
+85785ae222c6a9e01830d73a120cdac75d0b838a,,,https://doi.org/10.1007/978-3-319-11782-9,
+857ad04fca2740b016f0066b152bd1fa1171483f,http://pdfs.semanticscholar.org/857a/d04fca2740b016f0066b152bd1fa1171483f.pdf,,,http://www.sce.carleton.ca/faculty/adler//publications/2003/adler-2003-ccece-restore-face-recognition-templates.pdf
+858901405086056361f8f1839c2f3d65fc86a748,http://pdfs.semanticscholar.org/8589/01405086056361f8f1839c2f3d65fc86a748.pdf,,,http://www.menet.umn.edu/~zhangs/Reports/2013_CLZ.pdf
+85567174a61b5b526e95cd148da018fa2a041d43,,,https://doi.org/10.1109/TMM.2016.2515367,
+85c007758e409eb3a9ae83375c7427dd517f4ab9,,,,
+8576d0031f2b0fe1a0f93dd454e73d48d98a4c63,,,,http://doi.acm.org/10.1145/2522848.2531743
+85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9,http://pdfs.semanticscholar.org/8518/8c77f3b2de3a45f7d4f709b6ea79e36bd0d9.pdf,,,http://hal.archives-ouvertes.fr/docs/00/32/67/33/PDF/Karlinsky_ECCV2008.pdf
+857544746a1d1071739d98718df51936a3488737,,,,
+8598d31c7ca9c8f5bb433409af5e472a75037b4d,,,https://doi.org/10.1109/JPROC.2008.916364,
+85f27ec70474fe93f32864dd03c1d0f321979100,,,https://doi.org/10.1109/IJCNN.2014.6889381,
+85ccf2c9627a988ebab7032d0ec2d76ec7832c98,,,,
+85f7f03b79d03da5fae3a7f79d9aac228a635166,,,https://doi.org/10.1109/WACV.2009.5403085,
+855882a5943fc12fa9c0e8439c482e055b4b46f3,http://humansensing.cs.cmu.edu/papers/Automated.pdf,,,http://www.ca.cs.cmu.edu/sites/default/files/5Automated.pdf
+8536fd81b568b2c9e567adad83be3a048664ade6,,,,
+8518b501425f2975ea6dcbf1e693d41e73d0b0af,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Zhang_Relative_Hidden_Markov_2013_CVPR_paper.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989a548.pdf
+8557914593e8540fcdd9b11aef076f68d41d3b4b,http://elwilber.com/papers/ecodes-2014.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836099
+855184c789bca7a56bb223089516d1358823db0b,http://pdfs.semanticscholar.org/8551/84c789bca7a56bb223089516d1358823db0b.pdf,,,https://inst.eecs.berkeley.edu/~cs194-26/fa16/upload/files/projFinalGrad/cs194-26-aai/doc/Hung_Vu_report_final.pdf
+85f6eaa1ed3ae15ec7e777b7f90a277eda38cf7f,,,,
+853bd61bc48a431b9b1c7cab10c603830c488e39,http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf,,,https://arxiv.org/pdf/1411.7923v1.pdf
+85639cefb8f8deab7017ce92717674d6178d43cc,http://pdfs.semanticscholar.org/8563/9cefb8f8deab7017ce92717674d6178d43cc.pdf,,,http://mplab.ucsd.edu/grants/project1/publications/pdfs/cia_techreport-10.pdf
+85205914a99374fa87e004735fe67fc6aec29d36,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2015.2392774
+854dbb4a0048007a49df84e3f56124d387588d99,http://pdfs.semanticscholar.org/854d/bb4a0048007a49df84e3f56124d387588d99.pdf,,,https://arxiv.org/pdf/1705.04515v1.pdf
+85674b1b6007634f362cbe9b921912b697c0a32c,http://pdfs.semanticscholar.org/8567/4b1b6007634f362cbe9b921912b697c0a32c.pdf,,,https://www.cc.gatech.edu/~parikh/PnA2014/posters/Posters/ZhanpengZhangPnA2014.pdf
+852ff0d410a25ebb7936043a05efe2469c699e4b,http://pdfs.semanticscholar.org/852f/f0d410a25ebb7936043a05efe2469c699e4b.pdf,,https://doi.org/10.1016/j.patrec.2011.05.016,http://repository.tudelft.nl/assets/uuid:678436fb-d859-4c4a-8842-f1b4bb5a0fe3/MS-32.590.pdf
+1db45038ff49e4220a56b17a3b255df1c97b32c1,,,,
+1d21e5beef23eecff6fff7d4edc16247f0fd984a,http://pdfs.semanticscholar.org/1d21/e5beef23eecff6fff7d4edc16247f0fd984a.pdf,,https://doi.org/10.1007/11744085_3,http://mi.eng.cam.ac.uk/~cipolla/publications/inproceedings/2006-ECCV-Arandjelovic-face.pdf
+1ddea58d04e29069b583ac95bc0ae9bebb0bed07,,,https://doi.org/10.1109/KSE.2015.50,
+1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b,http://pdfs.semanticscholar.org/9d44/ef9e28d7722c388091ec4c1fa7c05f085e53.pdf,,,http://papers.nips.cc/paper/5472-global-sensitivity-analysis-for-map-inference-in-graphical-models
+1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9,https://web.stanford.edu/~bgirod/pdfs/ChenHuizhongTransPAMISep2014.pdf,,,http://web.stanford.edu/~bgirod/pdfs/ChenHuizhongTransPAMISep2014.pdf
+1d846934503e2bd7b8ea63b2eafe00e29507f06a,http://www.iipl.fudan.edu.cn/~zhangjp/literatures/MLF/manifold%20learning/20fa.pdf,,https://doi.org/10.1109/CVPR.2004.390,http://www.psych.ucsb.edu/research/recveb/pdfs/1_PID34511.pdf
+1dabb080e3e968633f4b3774f19192f8378f5b67,,,https://doi.org/10.1109/ICPR.2016.7899664,
+1d19c6857e798943cd0ecd110a7a0d514c671fec,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w2/papers/Khorrami_Do_Deep_Neural_ICCV_2015_paper.pdf,,,http://arxiv.org/pdf/1510.02969v1.pdf
+1d1a7ef193b958f9074f4f236060a5f5e7642fc1,http://pdfs.semanticscholar.org/db40/804914afbb7f8279ca9a4f52e0ade695f19e.pdf,,,http://www.brahnam.info/EN4005.pdf
+1d3bd75e2fb95cc0996a1a2eeaf21dfa42ab7ca0,,,,
+1d696a1beb42515ab16f3a9f6f72584a41492a03,http://www.ee.cuhk.edu.hk/~xgwang/papers/sunWTcvpr15.pdf,,https://doi.org/10.1109/CVPR.2015.7298907,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_073.pdf
+1d1caaa2312390260f7d20ad5f1736099818d358,https://eprints.soton.ac.uk/271401/1/paperOnIEEEexplore.pdf,,,
+1dc241ee162db246882f366644171c11f7aed96d,http://pdfs.semanticscholar.org/1dc2/41ee162db246882f366644171c11f7aed96d.pdf,,,https://arxiv.org/pdf/1611.05520v2.pdf
+1d0128b9f96f4c11c034d41581f23eb4b4dd7780,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Snape_Automatic_Construction_Of_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1A_011_ext.pdf
+1d79ec93a9feba817c75c31604c3f8df346eabe8,https://www.researchgate.net/profile/Manjunath_Aradhya/publication/254461422_The_study_of_different_similarity_measure_techniques_in_recognition_of_handwritten_characters/links/0046352049dae0d044000000.pdf,,,http://doi.acm.org/10.1145/2345396.2345524
+1da5fc63d66fbf750b0e15c5ef6d4274ca73cca1,,,,
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,http://www.cise.ufl.edu/~dihong/assets/Gong_A_Maximum_Entropy_2015_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_092_ext.pdf
+1d0dd20b9220d5c2e697888e23a8d9163c7c814b,http://pdfs.semanticscholar.org/1d0d/d20b9220d5c2e697888e23a8d9163c7c814b.pdf,,https://doi.org/10.5244/C.29.139,http://www.bmva.org/bmvc/2015/papers/paper139/abstract139.pdf
+1d5aad4f7fae6d414ffb212cec1f7ac876de48bf,http://biometrics.cse.msu.edu/Publications/Face/WangJain_FaceRetriever_ICB15.pdf,,https://doi.org/10.1109/ICB.2015.7139112,http://www.cse.msu.edu/rgroups/biometrics/Publications/Face/WangJain_FaceRetriever_ICB15.pdf
+1db23a0547700ca233aef9cfae2081cd8c5a04d7,http://pdfs.semanticscholar.org/1db2/3a0547700ca233aef9cfae2081cd8c5a04d7.pdf,,,http://ijecs.in/issue/v4-i5/44%20ijecs.pdf
+1d7dde30b8d0f75576f4a23b75b8350071fd4839,,,,
+1d10010ea7af43d59e1909d27e4e0e987264c667,,,https://doi.org/10.1016/j.neunet.2004.06.006,
+1d97735bb0f0434dde552a96e1844b064af08f62,http://www.apsipa.org/proceedings_2015/pdf/290.pdf,,https://doi.org/10.1109/APSIPA.2015.7415432,
+1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb,http://research.microsoft.com/en-us/um/people/jiansun/papers/CVPR12_FaceAlignRegression.pdf,,https://doi.org/10.1007/s11263-013-0667-3,http://www.tpbin.com/Uploads/Subjects/c94b2e5f-171c-4387-8fe1-e005afe2f0cd.pdf
+1dff919e51c262c22630955972968f38ba385d8a,http://pdfs.semanticscholar.org/1dff/919e51c262c22630955972968f38ba385d8a.pdf,,,http://www.cs.pitt.edu/~litman/courses/ads/readings/Pantic.M-ProcIEEE2003.pdf
+1de8f38c35f14a27831130060810cf9471a62b45,http://www.psy.miami.edu/faculty/dmessinger/c_c/rsrcs/rdgs/emot/Unsupervised_Discovery.IJCompVis.2017.pdf,,https://doi.org/10.1007/s11263-017-0989-7,http://www.pitt.edu/~jeffcohn/biblio/NIHMS851317_2017.pdf
+1da83903c8d476c64c14d6851c85060411830129,http://pdfs.semanticscholar.org/90c3/b003b85bd60ae06630bcef6abc03c3b1ef96.pdf,,,http://www.researchgate.net/profile/Wangmeng_Zuo/publication/271771444_Iterated_Support_Vector_Machines_for_Distance_Metric_Learning/links/54d473800cf2970e4e6338d2.pdf
+1dae2f492d3ca2351349a73df6ee8a99b05ffc30,,,https://doi.org/10.1137/110842570,
+1d6068631a379adbcff5860ca2311b790df3a70f,http://pdfs.semanticscholar.org/c322/b1b998ec8f1892b29a1ebcbdc2f62e644cf1.pdf,,https://doi.org/10.1016/j.neucom.2014.04.072,http://www.ee.ucr.edu/~lan/papers/AnNeurocomputing14.pdf
+1dacc2f4890431d867a038fd81c111d639cf4d7e,http://pdfs.semanticscholar.org/1dac/c2f4890431d867a038fd81c111d639cf4d7e.pdf,,,"http://socrates.berkeley.edu/~akring/Campellone,%20Fisher,%20&%20Kring%202016.pdf"
+1dc6c0ad19b41e5190fc9fe50e3ae27f49f18fa2,http://www.researchgate.net/profile/Stefano_Alletto/publication/265611795_Head_Pose_Estimation_in_First-Person_Camera_Views/links/5416b5ef0cf2788c4b35e14b.pdf,,https://doi.org/10.1109/ICPR.2014.718,
+1d6d6399fd98472012edb211981d5eb8370a07b0,,,,
+1de690714f143a8eb0d6be35d98390257a3f4a47,http://www.cs.fsu.edu/~liux/research/publications/papers/waring-liu-face-detection-smcb-2005.pdf,,https://doi.org/10.1109/TSMCB.2005.846655,http://www.cs.fsu.edu/~liux/research/pub/papers/smcb-25-2005-waring-detection.pdf
+1da1299088a6bf28167c58bbd46ca247de41eb3c,,,https://doi.org/10.1109/ICASSP.2002.5745055,
+1d6c09019149be2dc84b0c067595f782a5d17316,http://pdfs.semanticscholar.org/3e27/b747e272c2ab778df92ea802d30af15e43d6.pdf,,,https://static.googleusercontent.com/media/research.google.com/en//youtube8m/workshop2017/c08.pdf
+1d58d83ee4f57351b6f3624ac7e727c944c0eb8d,http://parnec.nuaa.edu.cn/xtan/paper/amfg07_talk.pdf,,https://doi.org/10.1007/978-3-540-75690-3_13,http://eprints.pascal-network.org/archive/00003658/01/08-x-tan-amfg2007.pdf
+71d786fdb563bdec6ca0bbf69eba8e3f37c48c6f,,,https://doi.org/10.1109/SMC.2016.7844680,
+710c3aaffef29730ffd909a63798e9185f488327,,,https://doi.org/10.1109/ICPR.2016.7900095,
+71a9d7cf8cf1e206cb5fa18795f5ab7588c61aba,,,https://doi.org/10.1109/TIM.2011.2141270,
+71b376dbfa43a62d19ae614c87dd0b5f1312c966,http://www.cs.cmu.edu/~ltrutoiu/pdfs/FG2013_trutoiu.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553736
+71b07c537a9e188b850192131bfe31ef206a39a0,http://pdfs.semanticscholar.org/71b0/7c537a9e188b850192131bfe31ef206a39a0.pdf,,https://doi.org/10.1016/j.imavis.2016.01.002,https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_2016_imavis.pdf
+71fd29c2ae9cc9e4f959268674b6b563c06d9480,http://pdfs.semanticscholar.org/71fd/29c2ae9cc9e4f959268674b6b563c06d9480.pdf,,,https://arxiv.org/pdf/1711.05858v1.pdf
+71e95c3a31dceabe9cde9f117615be8bf8f6d40e,,,https://doi.org/10.1109/ICIP.2010.5653024,
+71f07c95a2b039cc21854c602f29e5be053f2aba,,,https://doi.org/10.1007/s00138-010-0250-7,
+7123e510dea783035b02f6c35e35a1a09677c5ab,,,https://doi.org/10.1109/ICPR.2016.7900297,
+71f36c8e17a5c080fab31fce1ffea9551fc49e47,http://openaccess.thecvf.com/content_cvpr_2014/papers/Zhang_Predicting_Failures_of_2014_CVPR_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Zhang_Predicting_Failures_of_2014_CVPR_paper.pdf
+715d3eb3665f46cd2fab74d35578a72aafbad799,,,,http://doi.ieeecomputersociety.org/10.1109/WI-IAT.2013.118
+7177649ece5506b315cb73c36098baac1681b8d2,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.130
+71e6a46b32a8163c9eda69e1badcee6348f1f56a,http://pdfs.semanticscholar.org/71e6/a46b32a8163c9eda69e1badcee6348f1f56a.pdf,,,http://www.fxpal.com/publications/visually-interpreting-names-as-demographic-attributes-by-exploiting-click-through-data.pdf
+713594c18978b965be87651bb553c28f8501df0a,http://pdfs.semanticscholar.org/fbfc/a34d52422cf8eac9d92d68dd16f95db5ef36.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11883
+718824256b4461d62d192ab9399cfc477d3660b4,http://pdfs.semanticscholar.org/7188/24256b4461d62d192ab9399cfc477d3660b4.pdf,,,http://www.mmk.ei.tum.de/publ/pdf/11/11sch4.pdf
+71d68af11df855f886b511e4fc1635c1e9e789b0,,,https://doi.org/10.1109/TCSVT.2011.2133210,
+719a5286611c2a89890f713af54f4a00d10967e6,,,,
+71bbda43b97e8dc8b67b2bde3c873fa6aacd439f,,,https://doi.org/10.1016/j.patcog.2015.09.012,
+718d3137adba9e3078fa1f698020b666449f3336,http://pdfs.semanticscholar.org/718d/3137adba9e3078fa1f698020b666449f3336.pdf,,,http://thesai.org/Downloads/Volume8No10/Paper_48-Accuracy_based_Feature_Ranking_Metric.pdf
+7196b3832065aec49859c61318037b0c8c12363a,,,https://doi.org/10.1007/s11432-014-5151-3,
+716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0,http://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf,,,http://pages.cs.wisc.edu/~bmsmith/projects/2016/face-landmarks-3d/SmithBMVC2016_poster.pdf
+71bece8ec4934e3034f76d8ba19199c5b8ec52ea,,,,
+7171b46d233810df57eaba44ccd8eabd0ad1f53a,http://pdfs.semanticscholar.org/7171/b46d233810df57eaba44ccd8eabd0ad1f53a.pdf,,,http://mmlab.ie.cuhk.edu.hk/projects/DeepFaceClustering/support/sm.pdf
+71f9861df104b90399dc15e12bbb14cd03f16e0b,,,,http://doi.ieeecomputersociety.org/10.1109/CGIV.2009.7
+71e56f2aebeb3c4bb3687b104815e09bb4364102,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Guo_Video_Co-segmentation_for_2013_ICCV_paper.pdf,,,https://www.ece.nus.edu.sg/stfpage/eleclf/Video%20Co-segmentation%20for%20Meaningful%20Action%20Extraction.pdf
+7118162a994c564004d167018c0048386f408dd6,,,,
+711bb5f63139ee7a9b9aef21533f959671a7d80e,http://pdfs.semanticscholar.org/711b/b5f63139ee7a9b9aef21533f959671a7d80e.pdf,,,https://aaltodoc.aalto.fi/bitstream/handle/123456789/2973/isbn9789512291342.pdf?isAllowed=y&sequence=1
+76fd801981fd69ff1b18319c450cb80c4bc78959,http://pdfs.semanticscholar.org/76fd/801981fd69ff1b18319c450cb80c4bc78959.pdf,,,http://aclweb.org/anthology/W15-0111
+76dc11b2f141314343d1601635f721fdeef86fdb,http://pdfs.semanticscholar.org/8d19/1804f5b260807dac107b89a5837ac15857aa.pdf,,https://doi.org/10.1007/978-3-642-03999-7_4,http://epubs.surrey.ac.uk/7129/2/windeatt_suema_chap08.pdf
+76673de6d81bedd6b6be68953858c5f1aa467e61,http://pdfs.semanticscholar.org/8883/2abb9082af6a1395e1b9bd3d4c1b46d00616.pdf,,https://doi.org/10.1007/978-3-642-33885-4_3,http://ttic.uchicago.edu/~smaji/papers/lexiconDiscoveryECCVWS2012.pdf
+7644b3a0871b8e0e7e1cdf06099e295f1e5fbdf7,,,https://doi.org/10.1007/s11063-015-9464-z,
+7643861bb492bf303b25d0306462f8fb7dc29878,https://www-i6.informatik.rwth-aachen.de/publications/download/991/Hanselmann-FG-2015.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163136
+760a712f570f7a618d9385c0cee7e4d0d6a78ed2,http://pdfs.semanticscholar.org/760a/712f570f7a618d9385c0cee7e4d0d6a78ed2.pdf,,,http://c2inet.sce.ntu.edu.sg/ivor/publication/KSR.pdf
+76669f166ddd3fb830dbaacb3daa875cfedc24d9,,,https://doi.org/10.1109/ICPR.2016.7899840,
+7698ba9fd1f49157ca2666a93311afbf1ff4e66c,http://www.ics.uci.edu/~dramanan/papers/dpm_acm.pdf,,,http://doi.acm.org/10.1145/2494532
+76ce3d35d9370f0e2e27cfd29ea0941f1462895f,http://pdfs.semanticscholar.org/76ce/3d35d9370f0e2e27cfd29ea0941f1462895f.pdf,,,
+76dff7008d9b8bf44ec5348f294d5518877c6182,,,https://doi.org/10.1016/j.imavis.2014.09.004,
+76b9fe32d763e9abd75b427df413706c4170b95c,http://pdfs.semanticscholar.org/76b9/fe32d763e9abd75b427df413706c4170b95c.pdf,,https://doi.org/10.1016/j.patcog.2012.06.022,http://www4.comp.polyu.edu.hk/~cslzhang/paper/GOD_SI_PR.pdf
+768c332650a44dee02f3d1d2be1debfa90a3946c,http://mmlab.ie.cuhk.edu.hk/archive/2004/CVPR04_Face3.pdf,,,http://mmlab.ie.cuhk.edu.hk/2004/CVPR04_Face3.pdf
+769461ff717d987482b28b32b1e2a6e46570e3ff,http://pdfs.semanticscholar.org/7694/61ff717d987482b28b32b1e2a6e46570e3ff.pdf,,,http://ceur-ws.org/Vol-1984/Mediaeval_2017_paper_13.pdf
+76d9f5623d3a478677d3f519c6e061813e58e833,http://pdfs.semanticscholar.org/76d9/f5623d3a478677d3f519c6e061813e58e833.pdf,,https://doi.org/10.1137/080720863,http://www.optimization-online.org/DB_FILE///2009/01/2189.pdf
+7668ce758af72df8e0a10d4b3cb0fd58092fe3e1,,,,
+76e2d7621019bd45a5851740bd2742afdcf62837,http://pdfs.semanticscholar.org/76e2/d7621019bd45a5851740bd2742afdcf62837.pdf,,https://doi.org/10.3390/s16071105,http://www.mdpi.com/1424-8220/16/7/1105/pdf
+765b2cb322646c52e20417c3b44b81f89860ff71,http://cg.cs.tsinghua.edu.cn/papers/TVCG_2013_poseshop.pdf,,,http://mmcheng.net/mftp/Papers/PoseShop.pdf
+76640cb1a683a479ce2e0d6681d821ff39126d63,,,https://doi.org/10.1109/IJCNN.2011.6033408,
+7644d90efef157e61fe4d773d8a3b0bad5feccec,http://pdfs.semanticscholar.org/7644/d90efef157e61fe4d773d8a3b0bad5feccec.pdf,,https://doi.org/10.1016/j.neucom.2006.11.007,http://www.pami.sjtu.edu.cn/people/zhangth/Linear%20local%20tangent%20space%20alignment%20and%20application%20to%20face%20recognition.pdf
+763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff,http://pdfs.semanticscholar.org/7631/58cef9d1e4041f24fce4cf9d6a3b7a7f08ff.pdf,,,http://www.cs.huji.ac.il/~daphna/theses/Alon_Zweig_2013.pdf
+76d939f73a327bf1087d91daa6a7824681d76ea1,http://pdfs.semanticscholar.org/76d9/39f73a327bf1087d91daa6a7824681d76ea1.pdf,,https://doi.org/10.1007/978-3-642-53842-1_34,http://www.jaist.ac.jp/~chen-fan/publication/PSIVT2013.pdf
+760ba44792a383acd9ca8bef45765d11c55b48d4,http://class-specific.com/csf/papers/aes_tut.pdf,,,
+76a52ebfc5afd547f8b73430ec81456cf25ddd69,,,,http://doi.ieeecomputersociety.org/10.1109/AIPR.2014.7041914
+76d1c6c6b67e67ced1f19a89a5034dafc9599f25,,,,http://doi.acm.org/10.1145/2590296.2590315
+761304bbd259a9e419a2518193e1ff1face9fd2d,,,https://doi.org/10.1007/978-3-642-33885-4_57,
+7636f94ddce79f3dea375c56fbdaaa0f4d9854aa,http://pdfs.semanticscholar.org/7636/f94ddce79f3dea375c56fbdaaa0f4d9854aa.pdf,,,http://www.naturalspublishing.com/files/published/27twqs294z2r34.pdf
+1c80bc91c74d4984e6422e7b0856cf3cf28df1fb,http://refbase.cvc.uab.es/files/xrv2014d.pdf,,https://doi.org/10.1007/s11263-016-0885-6,https://arxiv.org/pdf/1408.5400v1.pdf
+1ce3a91214c94ed05f15343490981ec7cc810016,http://grail.cs.washington.edu/photobios/paper.pdf,,,http://www.cs.toronto.edu/~kyros/courses/2530/papers/Lecture-14/Kemelmacher2011.pdf
+1c2724243b27a18a2302f12dea79d9a1d4460e35,http://read.pudn.com/downloads157/doc/697237/kfd/Fisher+Kernel%20criterion%20for%20discriminant%20analysis.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2005.162
+1ca8c09abb73a02519d8db77e4fe107acfc589b6,http://sci.pitt.edu/wp-content/uploads/2018/03/111_Zhang.pdf,,,http://people.cs.pitt.edu/~kovashka/hussain_zhang_kovashka_ads_cvpr2017.pdf
+1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee,https://arxiv.org/pdf/1611.00142v2.pdf,,,http://arxiv.org/abs/1611.00142
+1c30bb689a40a895bd089e55e0cad746e343d1e2,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.pdf,,,https://web.cs.hacettepe.edu.tr/~aykut/classes/spring2016/bil722/slides/w07-conv3d.pdf
+1c4ceae745fe812d8251fda7aad03210448ae25e,http://pdfs.semanticscholar.org/98d3/6d12cf6f2da181a9c1fb9d652ceaa57eb7bb.pdf,,https://doi.org/10.1155/S1110865704401073,http://www.asp.eurasipjournals.com/content/pdf/1687-6180-2004-948790.pdf
+1c3073b57000f9b6dbf1c5681c52d17c55d60fd7,http://pdfs.semanticscholar.org/1c30/73b57000f9b6dbf1c5681c52d17c55d60fd7.pdf,,,http://hal-enpc.archives-ouvertes.fr/docs/00/55/51/40/PDF/ThA_se_ENPC_charlotte_GHYS_19_mai_2010.pdf
+1cee993dc42626caf5dbc26c0a7790ca6571d01a,http://www.iri.upc.edu/people/fmoreno/Publications/2005/pdf/Moreno_siggraphsketch2005.pdf,,,http://www1.cs.columbia.edu/CAVE/publications/pdfs/Moreno_CVMP05.pdf
+1ca1b4f787712ede215030d22a0eea41534a601e,,,https://doi.org/10.1109/CVPRW.2010.5543609,
+1c147261f5ab1b8ee0a54021a3168fa191096df8,http://pdfs.semanticscholar.org/1c14/7261f5ab1b8ee0a54021a3168fa191096df8.pdf,,,http://file.scirp.org/pdf/JIS_2016041115320819.pdf
+1c5d7d02a26aa052ecc47d301de4929083e5d320,https://www.ll.mit.edu/news/avec2014_mitll.pdf,,,http://web.mit.edu/dmehta/www/docs/WilliamsonAVEC2014%20Vocal%20and%20facial%20biomarkers%20of%20depression%20based%20on%20motor%20incoordination%20and%20timing.pdf
+1c17450c4d616e1e1eece248c42eba4f87de9e0d,http://pdfs.semanticscholar.org/d269/39a00a8d3964de612cd3faa86764343d5622.pdf,,https://doi.org/10.5244/C.29.55,http://www.bmva.org/bmvc/2015/papers/paper055/abstract055.pdf
+1c93b48abdd3ef1021599095a1a5ab5e0e020dd5,http://www.stat.ucla.edu/~sczhu/papers/PAMI_FaceAging.pdf,,,http://www.stat.ucla.edu/~sczhu/papers/FaceAging_PAMI.pdf
+1c1f957d85b59d23163583c421755869f248ceef,http://homepages.rpi.edu/~wuy9/ICCV15/FLD_iccv15.pdf,,,http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Wu_Robust_Facial_Landmark_ICCV_2015_paper.pdf
+1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16,http://pdfs.semanticscholar.org/1cbd/3f96524ca2258fd2d5c504c7ea8da7fb1d16.pdf,,https://doi.org/10.5220/0004828606710678,http://www.researchgate.net/profile/Markus_Kaechele/publication/266815457_Fusion_of_audio-visual_features_using_hierarchical_classifier_systems_for_the_recognition_of_affective_states_and_the_state_of_depression/links/546c6a090cf257ec78ffea7a.pdf
+1cad5d682393ffbb00fd26231532d36132582bb4,http://pdfs.semanticscholar.org/1cad/5d682393ffbb00fd26231532d36132582bb4.pdf,,,http://export.arxiv.org/pdf/1708.00042
+1c1a98df3d0d5e2034ea723994bdc85af45934db,http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W11/papers/Jaiswal_Guided_Unsupervised_Learning_2013_ICCV_paper.pdf,,,http://www.researchgate.net/profile/Michel_Valstar/publication/262361649_Guided_Unsupervised_Learning_of_Mode_Specific_Models_for_Facial_Point_Detection_in_the_Wild/links/54006a5b0cf24c81027deadb.pdf
+1ca815327e62c70f4ee619a836e05183ef629567,http://www.humansensing.cs.cmu.edu/sites/default/files/Xiong_Global_Supervised_Descent_2015_CVPR_paper.pdf,,https://doi.org/10.1109/CVPR.2015.7298882,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2A_048.pdf
+1cb0c11620bde2734c1a428c789158ffff0d6c7b,,,,http://doi.ieeecomputersociety.org/10.1109/BigMM.2016.62
+1c5a5d58a92c161e9ba27e2dfe490e7caaee1ff5,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163119
+1c4404885443b65b7cbda3c131e54f769fbd827d,,,,
+1c6be6874e150898d9db984dd546e9e85c85724e,http://research.microsoft.com/~szli/papers/WHT-CVPR2004.pdf,,,http://www.researchgate.net/profile/Yangsheng_Wang/publication/4082380_Generalized_quotient_image/links/0c9605320fb0488f51000000.pdf
+1ce29d6b820ed4a24da27b76ffd9605d5b3b10b5,,,https://doi.org/10.1016/j.imavis.2015.01.007,
+1cfe8c1d341dbf8cc43040b37ca3552385adb10b,,,,http://doi.acm.org/10.1145/2461466.2461473
+1c65f3b3c70e1ea89114f955624d7adab620a013,http://pdfs.semanticscholar.org/ef34/cc2a26e88abd6a03d1a831c750440c6147d2.pdf,,,http://www.imaging.org/site/PDFS/Reporter/Articles/2011_26/REP26_2_EI2011_MEHTA_7881_23.pdf
+1c530de1a94ac70bf9086e39af1712ea8d2d2781,http://pdfs.semanticscholar.org/1c53/0de1a94ac70bf9086e39af1712ea8d2d2781.pdf,,,http://www.ijcai.org/Proceedings/16/Papers/322.pdf
+82f8652c2059187b944ce65e87bacb6b765521f6,http://pdfs.semanticscholar.org/82f8/652c2059187b944ce65e87bacb6b765521f6.pdf,,,http://www.cs.utexas.edu/~sjhwang/proposal.pdf
+82e1692467969940a6d6ac40eae606b8b4981f7e,,,https://doi.org/10.1109/ICMEW.2012.56,
+82bef8481207de9970c4dc8b1d0e17dced706352,http://pdfs.semanticscholar.org/82be/f8481207de9970c4dc8b1d0e17dced706352.pdf,,,http://dev.pubs.doc.ic.ac.uk/Pantic-SMC04/Pantic-SMC04.pdf
+8274069feeff6392b6c5d45d8bfaaacd36daedad,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019312
+826015d9ade1637b3fcbeca071e3137d3ac1ef56,,,https://doi.org/10.1109/WACV.2017.84,
+82d2af2ffa106160a183371946e466021876870d,http://pdfs.semanticscholar.org/82d2/af2ffa106160a183371946e466021876870d.pdf,,,https://arxiv.org/pdf/1707.06440v1.pdf
+82be2ede6b7613286b80c3e2afe3b5353f322bed,http://www.eecs.berkeley.edu/~jiayq/papers/iccv11_mm.pdf,,,http://www.eecs.berkeley.edu/~trevor/iccv11-mm.pdf
+82ccd62f70e669ec770daf11d9611cab0a13047e,http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf,,https://doi.org/10.1109/DICTA.2013.6691530,
+828d7553a45eb0c3132e406105732a254369eb4d,,,https://doi.org/10.1016/j.neunet.2017.09.001,
+82c303cf4852ad18116a2eea31e2291325bc19c3,http://pdfs.semanticscholar.org/82c3/03cf4852ad18116a2eea31e2291325bc19c3.pdf,,,http://www.joig.org/uploadfile/2014/0516/20140516015550766.pdf
+8210fd10ef1de44265632589f8fc28bc439a57e6,http://www.ytzhang.net/files/publications/2015-tifs-sup-ae.pdf,,https://doi.org/10.1109/TIFS.2015.2446438,
+82a4a35b2bae3e5c51f4d24ea5908c52973bd5be,http://pdfs.semanticscholar.org/82a4/a35b2bae3e5c51f4d24ea5908c52973bd5be.pdf,,,http://arxiv.org/abs/1408.3750
+82cd5a5fec8a27887a35f1ecec684ec55eefad73,http://www.researchgate.net/profile/Giuseppe_Boccignone/publication/265793480_Using_Sparse_Coding_for_Landmark_Localization_in_Facial_Expressions/links/541bf80b0cf241a65a0ba53a.pdf,,https://doi.org/10.1109/EUVIP.2014.7018369,https://www.researchgate.net/profile/Giuseppe_Boccignone/publication/265793480_Using_Sparse_Coding_for_Landmark_Localization_in_Facial_Expressions/links/541bf80b0cf241a65a0ba53a.pdf
+82953e7b3d28ccd1534eedbb6de7984c59d38cd4,,,https://doi.org/10.1109/TNNLS.2014.2356856,
+8229f2735a0db0ad41f4d7252129311f06959907,,,https://doi.org/10.1109/TIP.2011.2106794,
+82f4e8f053d20be64d9318529af9fadd2e3547ef,http://pdfs.semanticscholar.org/82f4/e8f053d20be64d9318529af9fadd2e3547ef.pdf,,,http://www.cse.msu.edu/rgroups/biometrics/Publications/SecureBiometrics/NagarNandakumarJain_MultibiometricCryptosystems_TIFS11_TechRep.pdf
+82b43bc9213230af9db17322301cbdf81e2ce8cc,http://pdfs.semanticscholar.org/82b4/3bc9213230af9db17322301cbdf81e2ce8cc.pdf,,,https://arxiv.org/pdf/1704.03805v3.pdf
+82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d,http://pdfs.semanticscholar.org/82d7/81b7b6b7c8c992e0cb13f7ec3989c8eafb3d.pdf,,,http://shodhganga.inflibnet.ac.in/bitstream/10603/9927/13/13_references.pdf
+82e66c4832386cafcec16b92ac88088ffd1a1bc9,http://pdfs.semanticscholar.org/82e6/6c4832386cafcec16b92ac88088ffd1a1bc9.pdf,,,http://elijah.cs.cmu.edu/DOCS/CMU-CS-16-118.pdf
+82d79658805f6c1aedf7b0b88b47b9555584d7ae,http://cheonji.kaist.ac.kr/pdfsrc/ic/2008_KHAn_IROS.pdf,,https://doi.org/10.1109/IROS.2008.4650742,
+82dad0941a7cada11d2e2f2359293fe5fabf913f,,,https://doi.org/10.1109/ICIP.2017.8296810,
+826c66bd182b54fea3617192a242de1e4f16d020,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0001602.pdf,,https://doi.org/10.1109/ICASSP.2017.7952427,
+499f1d647d938235e9186d968b7bb2ab20f2726d,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Xiong_Face_Recognition_via_2013_ICCV_paper.pdf,,,http://www.ee.columbia.edu/~wliu/ICCV13_face_poster.pdf
+4919663c62174a9bc0cc7f60da8f96974b397ad2,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/EBIF_5-2-2010_v_5.pdf,,https://doi.org/10.1109/ICIP.2010.5651440,http://research.microsoft.com/pubs/131914/EBIF_5-2-2010_v_5.pdf
+49f70f707c2e030fe16059635df85c7625b5dc7e,http://pdfs.semanticscholar.org/55b7/59b3e94088488334e3af2d17710c5e1fce4b.pdf,,https://doi.org/10.1049/iet-bmt.2014.0033,http://digital.cs.usu.edu/~xqi/Promotion/IETBio.FRProof.14.pdf
+493bc7071e35e7428336a515d1d26020a5fb9015,,,https://doi.org/10.1109/ACSSC.2013.6810420,
+4967b0acc50995aa4b28e576c404dc85fefb0601,http://pdfs.semanticscholar.org/4967/b0acc50995aa4b28e576c404dc85fefb0601.pdf,,,http://www.cisjournal.org/journalofcomputing/archive/vol4no1/vol4no1_2.pdf
+49820ae612b3c0590a8a78a725f4f378cb605cd1,http://pdfs.semanticscholar.org/4982/0ae612b3c0590a8a78a725f4f378cb605cd1.pdf,,https://doi.org/10.1007/978-3-319-16634-6_13,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ACCV_2014/pages/workshop12/pdffiles/w12-p4.pdf
+49dd4b359f8014e85ed7c106e7848049f852a304,http://pdfs.semanticscholar.org/49dd/4b359f8014e85ed7c106e7848049f852a304.pdf,,https://doi.org/10.1016/j.patcog.2010.05.009,https://www.researchgate.net/profile/Zhouchen_Lin/publication/236023274_Feature_extraction_by_learning_Lorentzian_metric_tensor_and_its_extensions/links/0deec515d63a9bb7d8000000.pdf
+4958c06da5581fd0b4904d3bf0ee09958ecdba5b,,,https://doi.org/10.1016/j.knosys.2016.12.005,
+492afe8f07de6225f70b72c922df83effd909334,,,,
+49e85869fa2cbb31e2fd761951d0cdfa741d95f3,http://studentnet.cs.manchester.ac.uk/pgt/COMP61021/reference/adaptive-manifold-learning.pdf,,,http://papers.nips.cc/paper/2560-adaptive-manifold-learning
+49ed46d45d7a9cbb1077d6f7cf151a63c2f02cab,,,,
+49659fb64b1d47fdd569e41a8a6da6aa76612903,http://pdfs.semanticscholar.org/4965/9fb64b1d47fdd569e41a8a6da6aa76612903.pdf,,,http://behav.zoology.unibe.ch/sysuif/uploads/files/Mu__ller_etal_2015_Dogs_Can_Discriminate_Emotional_Expressions.pdf
+490a217a4e9a30563f3a4442a7d04f0ea34442c8,http://pdfs.semanticscholar.org/490a/217a4e9a30563f3a4442a7d04f0ea34442c8.pdf,,,http://airccse.org/journal/ijscai/papers/2413ijscai05.pdf
+4909ed22b1310f1c6f2005be5ce3349e3259ff6a,,,https://doi.org/10.1109/ROBIO.2009.4913106,
+49e4f05fa98f63510de76e7abd8856ff8db0f38d,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.110
+49a7949fabcdf01bbae1c2eb38946ee99f491857,http://pdfs.semanticscholar.org/49a7/949fabcdf01bbae1c2eb38946ee99f491857.pdf,,,http://arxiv.org/abs/1710.00974
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Luo_A_Deep_Sum-Product_2013_ICCV_paper.pdf,,,http://www.ee.cuhk.edu.hk/~xgwang/papers/luoWTiccv13a.pdf
+4932b929a2e09ddebedcb1abe8c62f269e7d4e33,,,https://doi.org/10.1109/SIU.2016.7496076,
+492116d16a39eb54454c7ffb1754cea27ad3a171,,,,http://doi.acm.org/10.1145/3132525.3134823
+499343a2fd9421dca608d206e25e53be84489f44,http://pdfs.semanticscholar.org/4993/43a2fd9421dca608d206e25e53be84489f44.pdf,,,http://www.ijtes.com/upload/8.%20FINAL%20PAPER.pdf
+498fd231d7983433dac37f3c97fb1eafcf065268,http://pdfs.semanticscholar.org/498f/d231d7983433dac37f3c97fb1eafcf065268.pdf,,,http://arxiv.org/abs/1701.03102
+49e1aa3ecda55465641b2c2acc6583b32f3f1fc6,http://pdfs.semanticscholar.org/49e1/aa3ecda55465641b2c2acc6583b32f3f1fc6.pdf,,,http://www.ijetae.com/files/Volume2Issue5/IJETAE_0512_21.pdf
+49394a5e0ca1d4bb77d8c9bfa963b8b8cb761ecf,,,,
+499f2b005e960a145619305814a4e9aa6a1bba6a,http://pdfs.semanticscholar.org/499f/2b005e960a145619305814a4e9aa6a1bba6a.pdf,,https://doi.org/10.1117/12.2042506,http://sip.unige.ch/articles/2014/SPIE-2014-EXTENTION-FINAL_SVv1.pdf
+497bf2df484906e5430aa3045cf04a40c9225f94,http://pdfs.semanticscholar.org/497b/f2df484906e5430aa3045cf04a40c9225f94.pdf,,https://doi.org/10.3390/s131216682,http://www.mdpi.com/1424-8220/13/12/16682/pdf
+492f41e800c52614c5519f830e72561db205e86c,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.393
+496f3d14cf466f054d395a3c71fa2cd6a3dda61d,,,,http://doi.acm.org/10.1145/3009977.3010055
+493ec9e567c5587c4cbeb5f08ca47408ca2d6571,http://pdfs.semanticscholar.org/493e/c9e567c5587c4cbeb5f08ca47408ca2d6571.pdf,,https://doi.org/10.1186/s40294-016-0034-7,http://casmodeling.springeropen.com/track/pdf/10.1186/s40294-016-0034-7?site=casmodeling.springeropen.com
+49570b41bd9574bd9c600e24b269d945c645b7bd,http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf,,,https://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf
+49fdafef327069516d887d8e69b5e96c983c3dd0,,,https://doi.org/10.1109/DICTA.2017.8227433,
+496074fcbeefd88664b7bd945012ca22615d812e,http://pdfs.semanticscholar.org/4960/74fcbeefd88664b7bd945012ca22615d812e.pdf,,https://doi.org/10.3390/s16111805,http://www.mdpi.com/1424-8220/16/11/1805/pdf
+496d62741e8baf3859c24bb22eaccd3043322126,,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2017.2728531
+49fe4f387ac7e5852a78b327ec42cc7300c5f8e0,,,https://doi.org/10.1007/s11042-014-2055-6,
+40205181ed1406a6f101c5e38c5b4b9b583d06bc,http://pdfs.semanticscholar.org/4020/5181ed1406a6f101c5e38c5b4b9b583d06bc.pdf,,https://doi.org/10.2197/ipsjtcva.1.115,http://chenlab.ece.cornell.edu/people/Andy/publications/Andy_files/cva1042.pdf
+40dab43abef32deaf875c2652133ea1e2c089223,http://pdfs.semanticscholar.org/40da/b43abef32deaf875c2652133ea1e2c089223.pdf,,https://doi.org/10.1007/s12369-012-0145-z,http://d-nb.info/1027478891
+40b0fced8bc45f548ca7f79922e62478d2043220,http://pdfs.semanticscholar.org/40b0/fced8bc45f548ca7f79922e62478d2043220.pdf,,,http://papers.nips.cc/paper/5420-do-convnets-learn-correspondence
+405b43f4a52f70336ac1db36d5fa654600e9e643,http://pdfs.semanticscholar.org/405b/43f4a52f70336ac1db36d5fa654600e9e643.pdf,,,https://arxiv.org/pdf/1512.01320v2.pdf
+40b86ce698be51e36884edcc8937998979cd02ec,http://www.cs.bilkent.edu.tr/~duygulu/papers/SIU2006-face.pdf,,,
+40a74eea514b389b480d6fe8b359cb6ad31b644a,http://pdfs.semanticscholar.org/7ac4/2be6c1f01ccc42b28c0bfa77856cc75b65a2.pdf,,,http://jmlr.org/proceedings/papers/v48/wiatowski16.html
+40ee38d7ff2871761663d8634c3a4970ed1dc058,http://pdfs.semanticscholar.org/40ee/38d7ff2871761663d8634c3a4970ed1dc058.pdf,,https://doi.org/10.1007/978-3-540-30126-4_83,http://www-users.cs.york.ac.uk/~nep/research/papers/iciar04heseltine.pdf
+402f6db00251a15d1d92507887b17e1c50feebca,http://pdfs.semanticscholar.org/402f/6db00251a15d1d92507887b17e1c50feebca.pdf,,,https://arxiv.org/pdf/1712.00195v1.pdf
+404042a1dcfde338cf24bc2742c57c0fb1f48359,http://pdfs.semanticscholar.org/4040/42a1dcfde338cf24bc2742c57c0fb1f48359.pdf,,,http://www.cs.zju.edu.cn/people/gpan/publication/JIG03-localizationsurvey.pdf
+4015e8195db6edb0ef8520709ca9cb2c46f29be7,http://pdfs.semanticscholar.org/4015/e8195db6edb0ef8520709ca9cb2c46f29be7.pdf,,,https://comserv.cs.ut.ee/home/files/thesis.pdf?reference=4015E8195DB6EDB0EF8520709CA9CB2C46F29BE7&study=ATILoputoo
+404776aa18031828f3d5dbceed39907f038a47fe,http://pdfs.semanticscholar.org/4047/76aa18031828f3d5dbceed39907f038a47fe.pdf,,https://doi.org/10.1016/j.neucom.2014.06.044,http://vipl.ict.ac.cn/homepage/rpwang/publications/Sparsely%20Encoded%20Local%20Descriptor%20for%20Face%20Verification_NECO2015.pdf
+407bb798ab153bf6156ba2956f8cf93256b6910a,http://pdfs.semanticscholar.org/407b/b798ab153bf6156ba2956f8cf93256b6910a.pdf,,,https://arxiv.org/pdf/1803.08134v1.pdf
+400e6c777d5894db2f6538c8ebd1124352b1c064,http://www.ee.ucr.edu/~lan/papers/FG13.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2013.6553790
+40fb4e8932fb6a8fef0dddfdda57a3e142c3e823,http://gavrila.net/Publications/cvpr08.pdf,,,http://www.gavrila.net/Publications/cvpr08.pdf
+4033ac52dba394e390a86cd149b9838f1d7834b5,,,https://doi.org/10.1109/ICMLC.2012.6359009,
+406c5aeca71011fd8f8bd233744a81b53ccf635a,,,,
+405526dfc79de98f5bf3c97bf4aa9a287700f15d,http://pdfs.semanticscholar.org/8a6c/57fcd99a77982ec754e0b97fd67519ccb60c.pdf,,,http://arxiv.org/pdf/1505.02108v2.pdf
+40cd062438c280c76110e7a3a0b2cf5ef675052c,http://pdfs.semanticscholar.org/40cd/062438c280c76110e7a3a0b2cf5ef675052c.pdf,,,http://liris.cnrs.fr/Documents/Liris-6108.pdf
+40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d,http://pdfs.semanticscholar.org/40a5/b32e261dc5ccc1b5df5d5338b7d3fe10370d.pdf,,,https://arxiv.org/pdf/1608.06010v2.pdf
+40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a,http://pdfs.semanticscholar.org/cea3/8a329e98900923e9c962b0d58bf8e15405d6.pdf,,,http://www.cs.utah.edu/~jeffp/papers/alternative.pdf
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,http://www.site.uottawa.ca/~wslee/publication/CCECE2006.pdf,,https://doi.org/10.1109/CCECE.2006.277379,
+4014d74e8f5ea4d76c2c1add81d0c88d6e342478,,,,http://doi.acm.org/10.1145/3136755.3143010
+4014e8c1a1b49ad2b9b2c45c328ec9f1fd56f676,,,https://doi.org/10.1109/IJCNN.2017.7966191,
+40389b941a6901c190fb74e95dc170166fd7639d,http://pdfs.semanticscholar.org/56f7/dad4d6d98292061a2c1e399d9a0ecfbbbde3.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/chapter.pdf
+4068574b8678a117d9a434360e9c12fe6232dae0,http://www.visionmeetscognition.org/fpic2014/Camera_Ready/Paper%2031.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/antonakos_automatic_2014_poster.pdf
+40c8cffd5aac68f59324733416b6b2959cb668fd,https://arxiv.org/pdf/1701.08341v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.80
+4097fef623185557bb1842501cfdc97f812fc66d,,,,http://doi.acm.org/10.1145/3126686.3126755
+40b10e330a5511a6a45f42c8b86da222504c717f,http://pdfs.semanticscholar.org/40b1/0e330a5511a6a45f42c8b86da222504c717f.pdf,,,http://etd.dtu.dk/thesis/223656/ep08_93.pdf
+40bb090a4e303f11168dce33ed992f51afe02ff7,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.251
+40ca925befa1f7e039f0cd40d57dbef6007b4416,https://arxiv.org/pdf/1706.07567v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.309
+40dd736c803720890d6bfc1e083f6050e35d8f7a,,,,http://doi.acm.org/10.1145/3139958.3140055
+4042bbb4e74e0934f4afbedbe92dd3e37336b2f4,http://pdfs.semanticscholar.org/b35a/6b2f335c28696eb78a02e0b30ee59a3e3fd2.pdf,,https://doi.org/10.1016/j.patrec.2008.04.013,https://ome.irp.nia.nih.gov/wnd-charm/PRL_2008.pdf
+4026dc62475d2ff2876557fc2b0445be898cd380,http://pdfs.semanticscholar.org/4026/dc62475d2ff2876557fc2b0445be898cd380.pdf,,https://doi.org/10.1007/11573548_116,http://www.researchgate.net/profile/Yong-Guk_Kim/publication/221622144_An_Affective_User_Interface_Based_on_Facial_Expression_Recognition_and_Eye-Gaze_Tracking/links/0c960516f7b83381c8000000.pdf
+40f06e5c052d34190832b8c963b462ade739cbf0,,,https://doi.org/10.1109/ICNC.2010.5583821,
+40f127fa4459a69a9a21884ee93d286e99b54c5f,http://graphics.tu-bs.de/media/publications/stengel2013resolution.pdf,,https://doi.org/10.1109/TIP.2013.2265885,http://www.cg.cs.tu-bs.de/media/publications/stengel2013resolution.pdf
+401e6b9ada571603b67377b336786801f5b54eee,http://pdfs.semanticscholar.org/401e/6b9ada571603b67377b336786801f5b54eee.pdf,,,http://www.umiacs.umd.edu/~arijit/Active_Image_Clustering_sup_material.pdf
+406431d2286a50205a71f04e0b311ba858fc7b6c,http://pdfs.semanticscholar.org/4064/31d2286a50205a71f04e0b311ba858fc7b6c.pdf,,,http://etheses.bham.ac.uk/4371/9/UjirH13PhD.pdf
+40854850a1ca24d9f1e62f2a0432edcbb5633f76,,,,
+40217a8c60e0a7d1735d4f631171aa6ed146e719,http://pdfs.semanticscholar.org/4021/7a8c60e0a7d1735d4f631171aa6ed146e719.pdf,,https://doi.org/10.1007/978-3-319-10605-2_30,http://www.cs.columbia.edu/~yli/papers/Parts_Localization_ECCV2014.pdf
+405cf40f3ce74210f7e9862b2b828ce002b409ed,,,https://doi.org/10.1109/IJCNN.2017.7966244,
+407a26fff7fac195b74de9fcb556005e8785a4e9,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.29
+2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9,http://pdfs.semanticscholar.org/71f1/c8d39e1fbf1083a4616a3496f5c397a2daf5.pdf,,,http://www.cs.berkeley.edu/~feisha/pubs/nips08.pdf
+2eb37a3f362cffdcf5882a94a20a1212dfed25d9,http://pdfs.semanticscholar.org/2eb3/7a3f362cffdcf5882a94a20a1212dfed25d9.pdf,,,http://cdn.intechopen.com/pdfs/20587/InTech-Local_feature_based_face_recognition.pdf
+2e0addeffba4be98a6ad0460453fbab52616b139,http://pdfs.semanticscholar.org/3cd7/8b1f43ead1226554f450bafcb8fbe208b5f0.pdf,,,http://www.cs.cmu.edu/~jiangni/jiang_thesis.pdf
+2e36b63fdf1353425a57a0665b0d0274efe92963,,,,http://doi.acm.org/10.1145/3152771.3156179
+2e5d173ee0d1d7f88c335ade6a7b879b2d987ab4,,,https://doi.org/10.1109/ICASSP.2015.7178367,
+2e535b8cd02c2f767670ba47a43ad449fa1faad7,,,https://doi.org/10.1109/MSP.2017.2740460,
+2e091b311ac48c18aaedbb5117e94213f1dbb529,http://pdfs.semanticscholar.org/b1a1/a049f1d78f6e3d072236237c467292ccd537.pdf,,https://doi.org/10.1007/978-3-319-10599-4_6,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/papers/8694/86940078.pdf
+2e1415a814ae9abace5550e4893e13bd988c7ba1,http://pdfs.semanticscholar.org/2e14/15a814ae9abace5550e4893e13bd988c7ba1.pdf,,,http://ijettjournal.org/2015/volume-21/number-3/IJETT-V21P226.pdf
+2e0e056ed5927a4dc6e5c633715beb762628aeb0,http://pdfs.semanticscholar.org/2e0e/056ed5927a4dc6e5c633715beb762628aeb0.pdf,,,http://www.researchgate.net/profile/Xian_Hua_Han/publication/221926748_Multilinear_Supervised_Neighborhood_Preserving_Embedding_Analysis_of_Local_Descriptor_Tensor/links/00463531e6d0c26e27000000.pdf
+2e9e07b871e7703c60d6849282174d99977ccea7,,,,
+2e8a0cc071017845ee6f67bd0633b8167a47abed,https://arxiv.org/pdf/1303.6021v1.pdf,,,http://arxiv.org/abs/1303.6021
+2e68190ebda2db8fb690e378fa213319ca915cf8,http://pdfs.semanticscholar.org/a705/804fa2e97ce23619b4f43da1b75fb138296d.pdf,,,http://arxiv.org/abs/1609.02612
+2e157e8b57f679c2f1b8e16d6e934f52312f08f6,http://pdfs.semanticscholar.org/2e15/7e8b57f679c2f1b8e16d6e934f52312f08f6.pdf,,,http://waset.org/publications/6318/2d-spherical-spaces-for-face-relighting-under-harsh-illumination
+2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd,http://research.microsoft.com/users/byzhang/publications/20-81_01.pdf,,,https://pdfs.semanticscholar.org/2ee8/900bbde5d3c81b7ed4725710ed46cc7e91cd.pdf
+2e475f1d496456831599ce86d8bbbdada8ee57ed,http://www.l3s.de/~siersdorfer/sources/2015/www2015groupsourcing.pdf,,,http://doi.acm.org/10.1145/2736277.2741097
+2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522,http://pdfs.semanticscholar.org/2ef5/1b57c4a3743ac33e47e0dc6a40b0afcdd522.pdf,,,http://arxiv.org/abs/1108.1122
+2ed7d95588200c8c738c7dd61b8338538e04ea30,,,https://doi.org/10.1109/ICIP.2010.5654063,
+2ee1ba1c3d4797fdae46d3d5f01db7ef5903dadd,,,https://doi.org/10.1016/j.neucom.2015.07.031,
+2e6cfeba49d327de21ae3186532e56cadeb57c02,http://openaccess.thecvf.com/content_ICCV_2017/papers/Wang_Real_Time_Eye_ICCV_2017_paper.pdf,,,http://homepages.rpi.edu/~wangk10/papers/wang2017_webcam.pdf
+2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d,,,https://doi.org/10.1109/CVPRW.2011.5981801,
+2ea247029ac1b8ded60023a369e8d259a8637bd2,,,,
+2ee817981e02c4709d65870c140665ed25b005cc,http://www.umiacs.umd.edu/users/rama/Publications/Patel_ICARCV_2010.pdf,,https://doi.org/10.1109/ICARCV.2010.5707955,http://www.umiacs.umd.edu/users/pvishalm/Conference_pub/ICARCV2010.pdf
+2e98329fdec27d4b3b9b894687e7d1352d828b1d,http://pdfs.semanticscholar.org/2e98/329fdec27d4b3b9b894687e7d1352d828b1d.pdf,,,http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS14/paper/download/7839/7835
+2e19371a2d797ab9929b99c80d80f01a1fbf9479,http://pdfs.semanticscholar.org/2e19/371a2d797ab9929b99c80d80f01a1fbf9479.pdf,,https://doi.org/10.1016/j.imavis.2015.09.003,http://www.cs.nott.ac.uk/~pszmv/Documents/2015IVC_L21.pdf
+2ebc35d196cd975e1ccbc8e98694f20d7f52faf3,http://pdfs.semanticscholar.org/2ebc/35d196cd975e1ccbc8e98694f20d7f52faf3.pdf,,,http://vision.seas.harvard.edu/papers/WideAngle_PAMI2013.pdf
+2e3d081c8f0e10f138314c4d2c11064a981c1327,http://arxiv.org/pdf/1603.06015v1.pdf,,https://doi.org/10.1007/s11263-017-0999-5,http://ibug.doc.ic.ac.uk/media/uploads/documents/1603.06015v1.pdf
+2ef1b1b5ed732634e005df779fd9b21da0ffe60c,,,https://doi.org/10.1016/j.image.2017.03.012,
+2ef328e035b2b5501ceddc0052615d4cebac6f1f,http://mi.eng.cam.ac.uk/~ss965/semantic_transform.pdf,,,http://mi.eng.cam.ac.uk/~cipolla/publications/inproceedings/2013-ICCV-Shankar-attrirbutes.pdf
+2e86402b354516d0a8392f75430156d629ca6281,https://arxiv.org/pdf/1604.03628v2.pdf,,,http://arxiv.org/pdf/1604.03628v2.pdf
+2e5b160892b70a1e846aa9dcdf132b8011937ec6,,,https://doi.org/10.1109/LSP.2017.2689921,
+2e27667421a7eeab278e0b761db4d2c725683c3f,,,https://doi.org/10.1007/s11042-013-1815-z,
+2ea78e128bec30fb1a623c55ad5d55bb99190bd2,http://pdfs.semanticscholar.org/2ea7/8e128bec30fb1a623c55ad5d55bb99190bd2.pdf,,https://doi.org/10.1007/978-3-319-59129-2_32,http://ies.anthropomatik.kit.edu/ies/download/publ/ies_2017_herrmann_low_resolution.pdf
+2e8eb9dc07deb5142a99bc861e0b6295574d1fbd,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Hejrati_Analysis_by_Synthesis_2014_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2014.314
+2e0f5e72ad893b049f971bc99b67ebf254e194f7,http://pdfs.semanticscholar.org/2e0f/5e72ad893b049f971bc99b67ebf254e194f7.pdf,,https://doi.org/10.1007/978-3-642-37447-0_25,http://www.vision.ee.ethz.ch/en/publications/papers/proceedings/eth_biwi_00974.pdf
+2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5,http://pdfs.semanticscholar.org/cb94/9e849b20ddc157aaf648dca1e8c71463c288.pdf,,,http://www.vision.ucla.edu/~vedaldi/assets/pubs/blaschko10simultaneous.pdf
+2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c,http://pdfs.semanticscholar.org/6abe/c94e0af01d9706d73dfd91fd76139c7d99e0.pdf,,https://doi.org/10.3390/s17020275,https://www.preprints.org/manuscript/201701.0120/v1/download
+2e6776cd582c015b46faf616f29c98ce9cff51a2,,,https://doi.org/10.1109/TNN.2005.860849,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,http://www.infomus.org/Events/proceedings/ICMI2014/icmi/p514.pdf,,,http://doi.acm.org/10.1145/2663204.2666278
+2eca099b90274fb28569f19ef945f43758f5b367,,,,
+2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c,http://pdfs.semanticscholar.org/2ec7/d6a04c8c72cc194d7eab7456f73dfa501c8c.pdf,,,http://www.ijsrms.com/media/0002/4I28-IJSRMS0303119-v3-i4-pp164-169.pdf
+2eb9f1dbea71bdc57821dedbb587ff04f3a25f07,http://pdfs.semanticscholar.org/2eb9/f1dbea71bdc57821dedbb587ff04f3a25f07.pdf,,https://doi.org/10.1007/11825890_2,https://ibug.doc.ic.ac.uk/media/uploads/documents/Pantic-FaceAmI-PUBLISHED.pdf
+2e12c5ea432004de566684b29a8e148126ef5b70,,,https://doi.org/10.1007/s12193-015-0204-5,
+2e1fd8d57425b727fd850d7710d38194fa6e2654,http://www.cs.toronto.edu/~afsaneh/JamiesonEtAl2007.pdf,,,http://www.cs.utoronto.ca/~jamieson/Jamieson_ICCV07.pdf
+2e1b1969ded4d63b69a5ec854350c0f74dc4de36,http://pdfs.semanticscholar.org/2e1b/1969ded4d63b69a5ec854350c0f74dc4de36.pdf,,https://doi.org/10.1016/j.patcog.2011.07.022,http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Jour_PR_3DAU_Detection_final.pdf
+2b286ed9f36240e1d11b585d65133db84b52122c,,,,http://doi.acm.org/10.1145/3130800.3130837
+2b8c5017633a82b15dbe0047cfc76ffdce462176,,,,
+2babf665198a91932a4ce557f627c28e7e8f31f2,,,,http://doi.acm.org/10.1145/3009977.3010004
+2be0ab87dc8f4005c37c523f712dd033c0685827,http://www3.ntu.edu.sg/home/EXDJiang/ICIP2013_4.pdf,,https://doi.org/10.1109/ICIP.2013.6738759,
+2b60fe300735ea7c63f91c1121e89ba66040b833,,,,
+2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb,http://jmcauley.ucsd.edu/data/amazon/sigir_draft.pdf,,,http://cseweb.ucsd.edu/~jmcauley/pdfs/sigir15.pdf
+2bbe89f61a8d6d4d6e39fdcaf8c185f110a01c78,http://www3.ntu.edu.sg/home/wanggang/TIFS15.pdf,,https://doi.org/10.1109/TIFS.2015.2408431,https://pdfs.semanticscholar.org/2bbe/89f61a8d6d4d6e39fdcaf8c185f110a01c78.pdf
+2b339ece73e3787f445c5b92078e8f82c9b1c522,http://pdfs.semanticscholar.org/7a2e/e06aaa3f342937225272951c0b6dd4309a7a.pdf,,https://doi.org/10.1007/978-3-319-46475-6_8,http://crcv.ucf.edu/papers/eccv2016/AssariIdreesShah_ECCV16_ReIdCrowds.pdf
+2b4d092d70efc13790d0c737c916b89952d4d8c7,http://pdfs.semanticscholar.org/2b4d/092d70efc13790d0c737c916b89952d4d8c7.pdf,,,http://journal.iis.sinica.edu.tw/paper/1/170093-2.pdf?cd=4E628A37ADB11E9D1
+2b7b55a4143ad23aa31f00b11efebdd8246231a8,,,,
+2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f,http://pdfs.semanticscholar.org/2b0f/f4b82bac85c4f980c40b3dc4fde05d3cc23f.pdf,,,http://www.globalcis.org/rnis/ppl/RNIS237PPL.pdf
+2b3ceb40dced78a824cf67054959e250aeaa573b,http://pdfs.semanticscholar.org/7493/4a2b65538f42701e15f7f532437db2beead2.pdf,,,http://yining-wang.com/private-sc.pdf
+2b300985a507533db3ec9bd38ade16a32345968e,,,https://doi.org/10.1007/s11042-015-3070-y,
+2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_028_ext.pdf,,,http://arxiv.org/abs/1505.05769
+2bf03e8fb775718ac9730524a176ddd189c0e457,,,,
+2b5005c2abf2d9a8c16afa50306b6959dfc72275,,,https://doi.org/10.1109/ICARCV.2010.5707216,
+2bcec23ac1486f4106a3aa588b6589e9299aba70,http://pdfs.semanticscholar.org/2bce/c23ac1486f4106a3aa588b6589e9299aba70.pdf,,https://doi.org/10.1007/978-3-319-46478-7_51,http://vision.cs.utexas.edu/aavl_workshop_eccv16/papers/AAVL_PID14.pdf
+2b0d14dbd079b3d78631117b1304d6c1579e1940,,,https://doi.org/10.1007/s11063-016-9524-z,
+2b773fe8f0246536c9c40671dfa307e98bf365ad,http://pdfs.semanticscholar.org/2b77/3fe8f0246536c9c40671dfa307e98bf365ad.pdf,,https://doi.org/10.1155/2013/106867,
+2b43100a13811b33cc9f905fa1334bfd8b1873ba,,,https://doi.org/10.1109/IVCNZ.2015.7761564,
+2bf08d4cb8d1201a9866ee7c4852bfcbf8f8e7f1,http://mplab.ucsd.edu/~jake/haar.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.61
+2be9144a1e66de127192b01907c862381f4011d1,http://www1.cs.columbia.edu/~belhumeur/conference/eye-iccv05.pdf,,,http://www.cs.columbia.edu/~belhumeu/conference/eye-iccv05.pdf
+2bab44d3a4c5ca79fb8f87abfef4456d326a0445,http://www.mirlab.org/conference_papers/International_Conference/ACM%202005/docs/mir25.pdf,,,http://viplab.dsi.unifi.it/~nunziati/files/mir2005_nunziati.pdf
+2b0102d77d3d3f9bc55420d862075934f5c85bec,http://openaccess.thecvf.com/content_cvpr_2016/papers/Shao_Slicing_Convolutional_Neural_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.606
+2b435ee691718d0b55d057d9be4c3dbb8a81526e,http://pdfs.semanticscholar.org/43ef/472c2c09d1ae2f2e5fc35d6d3ab7578658b4.pdf,,https://doi.org/10.5244/C.23.7,http://www-i6.informatik.rwth-aachen.de/publications/download/625/DreuwPhilippeSteingrubePascalHanselmannHaraldNeyHermann--SURF-FaceFaceRecognitionUnderViewpointConsistencyConstraints--2009.pdf
+2b2924af7ec219bd1fadcbd2c57014ed54efec86,,,,http://doi.ieeecomputersociety.org/10.1109/SSIAI.2014.6806053
+2b1327a51412646fcf96aa16329f6f74b42aba89,http://pdfs.semanticscholar.org/8296/cb7fea317fcd0a7ff6b7e4486ab869a7231e.pdf,,,https://arxiv.org/pdf/1511.03771v2.pdf
+2be1e2f2b7208fdf7a379da37a2097cfe52bc196,http://www2.cvl.isy.liu.se/Education/Graduate/artikelklubb/aryananda_icra09.pdf,,https://doi.org/10.1109/ROBOT.2009.5152362,https://pdfs.semanticscholar.org/a8d8/2439d91f7c6c5d0f907e3f30730d0f5bdf12.pdf
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,https://www.cse.iitb.ac.in/~sharat/icvgip.org/icvgip2010/papers/53.sethuram.134.pdf,,,http://doi.acm.org/10.1145/1924559.1924608
+2b2e6e073fe0876fdf96a336cbc14de0217ce070,,,,
+2b64a8c1f584389b611198d47a750f5d74234426,http://pdfs.semanticscholar.org/fb11/6f00320a37d80ec32561d1ab9b795c943202.pdf,,https://doi.org/10.1007/978-3-319-10584-0_4,http://faculty.ucmerced.edu/mhyang/papers/eccv14_deblur.pdf
+2b632f090c09435d089ff76220fd31fd314838ae,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Hajibabaei_Early_Adaptation_of_ICCV_2017_paper.pdf,,,http://www.vision.ee.ethz.ch/~timofter/publications/Hajibabaei-ICCVW-2017.pdf
+2b10a07c35c453144f22e8c539bf9a23695e85fc,http://pdfs.semanticscholar.org/2b10/a07c35c453144f22e8c539bf9a23695e85fc.pdf,,https://doi.org/10.1007/978-3-540-74549-5_26,http://www.researchgate.net/profile/Stan_Li3/publication/221383502_Standardization_of_Face_Image_Sample_Quality/links/0c960533189badd44c000000.pdf
+2b84630680e2c906f8d7ac528e2eb32c99ef203a,http://disi.unitn.it/~zen/data/acmmm14_zen3_orlando.pdf,,,http://disi.unitn.it/~zen/data/acmmm14_weareallnotequal.pdf
+2b507f659b341ed0f23106446de8e4322f4a3f7e,http://pdfs.semanticscholar.org/2b50/7f659b341ed0f23106446de8e4322f4a3f7e.pdf,,,https://arxiv.org/pdf/1610.05586v1.pdf
+2b7ef95822a4d577021df16607bf7b4a4514eb4b,http://pdfs.semanticscholar.org/b596/9178f843bfaecd0026d04c41e79bcb9edab5.pdf,,,http://books.nips.cc/papers/files/nips25/NIPS2012_1248.pdf
+2b8dfbd7cae8f412c6c943ab48c795514d53c4a7,http://mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p529-bordei.pdf,,https://doi.org/10.1109/ICASSP.2014.6853652,
+2b1129efcbafa61da1d660de3b5c84b646540311,http://www.researchgate.net/profile/Haizhou_Ai/publication/221368891_Distributing_expressional_faces_in_2-D_emotional_space/links/546b431f0cf20dedafd52906.pdf,,,http://doi.acm.org/10.1145/1282280.1282339
+2bae810500388dd595f4ebe992c36e1443b048d2,http://pdfs.semanticscholar.org/2bae/810500388dd595f4ebe992c36e1443b048d2.pdf,,,http://www.ijbem.org/volume18/number1/ijbem_vol18_no1_pp13-18.pdf
+2b42f83a720bd4156113ba5350add2df2673daf0,http://pdfs.semanticscholar.org/2b42/f83a720bd4156113ba5350add2df2673daf0.pdf,,,http://crcv.ucf.edu/THUMOS14/papers/CUHK&SIAT.pdf
+2bbbbe1873ad2800954058c749a00f30fe61ab17,http://pdfs.semanticscholar.org/2bbb/be1873ad2800954058c749a00f30fe61ab17.pdf,,,https://www.rroij.com/open-access/face-verification-across-ages-using-selforganizing-map.pdf
+2baec98c19804bf19b480a9a0aa814078e28bb3d,http://eprints.eemcs.utwente.nl/26841/01/Pantic_Multi-conditional_Latent_Variable_Model.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/eleftheriadis_iccv2015.pdf
+2be9284d531b8c573a4c39503ca50606446041a3,,,https://doi.org/10.1109/ICIP.2005.1530004,
+2be24e8a3f2b89bdaccd02521eff3b7bb917003e,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.96
+47fdbd64edd7d348713253cf362a9c21f98e4296,http://www.vision.cs.chubu.ac.jp/MPRG/C_group/C071_yamashita2015.pdf,,https://doi.org/10.1109/ICIP.2015.7351325,
+47382cb7f501188a81bb2e10cfd7aed20285f376,http://pdfs.semanticscholar.org/4738/2cb7f501188a81bb2e10cfd7aed20285f376.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/12318
+473366f025c4a6e0783e6174ca914f9cb328fe70,http://pdfs.semanticscholar.org/f021/cbfa5f3483889c3980b62c6cec329c8c5aec.pdf,,,https://arxiv.org/pdf/1610.06906v1.pdf
+476755252e53799b490c5a88fde81eef9a64fb7e,,,,
+4793f11fbca4a7dba898b9fff68f70d868e2497c,http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf,,https://doi.org/10.5591/978-1-57735-516-8/IJCAI11-422,http://ijcai.org/Proceedings/11/Papers/422.pdf
+470dbd3238b857f349ebf0efab0d2d6e9779073a,http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/3B_062_ext.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7299136
+473031328c58b7461753e81251379331467f7a69,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W09/papers/Wang_Exploring_Fisher_Vector_2015_CVPR_paper.pdf,,,http://wangzheallen.github.io/papers/05.pdf
+47638197d83a8f8174cdddc44a2c7101fa8301b7,http://grail.cs.washington.edu/wp-content/uploads/2015/08/saleh2013oad.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Saleh_Object-Centric_Anomaly_Detection_2013_CVPR_paper.pdf
+47541d04ec24662c0be438531527323d983e958e,http://pdfs.semanticscholar.org/4754/1d04ec24662c0be438531527323d983e958e.pdf,,,http://www.doc.ic.ac.uk/~maja/BookChapter_ZengPanticHuang-CAMERA.pdf
+476f177b026830f7b31e94bdb23b7a415578f9a4,http://vision.ece.ucsb.edu/sites/vision.ece.ucsb.edu/files/publications/karthikeyan_icip2012_subspace_final.pdf,,https://doi.org/10.1109/ICIP.2012.6467074,https://labs.psych.ucsb.edu/grafton/scott/Papers/Karthikeyan%202012.pdf
+47cd161546c59ab1e05f8841b82e985f72e5ddcb,,,https://doi.org/10.1109/ICIP.2017.8296552,
+47109343e502a4097cb7efee54bc5fbb14598c05,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.182
+4786638ffb3b2fb385cec80720cc6e7c3588b773,,,https://doi.org/10.1007/s11042-015-2598-1,
+474b461cd12c6d1a2fbd67184362631681defa9e,http://toc.proceedings.com/24478webtoc.pdf,,https://doi.org/10.1109/SMC.2014.6973888,
+472ba8dd4ec72b34e85e733bccebb115811fd726,http://pdfs.semanticscholar.org/472b/a8dd4ec72b34e85e733bccebb115811fd726.pdf,,https://doi.org/10.1007/978-3-642-19309-5_55,http://www.researchgate.net/profile/Li_Bai/publication/220745463_Cosine_Similarity_Metric_Learning_for_Face_Verification/links/54dcd4880cf25b09b912d2ed.pdf
+471bef061653366ba66a7ac4f29268e8444f146e,,,https://doi.org/10.1109/SMC.2015.524,
+47ca2df3d657d7938d7253bed673505a6a819661,http://pdfs.semanticscholar.org/47ca/2df3d657d7938d7253bed673505a6a819661.pdf,,,http://ilab.cs.ucsb.edu/publications/ChangPhD.pdf
+47d4838087a7ac2b995f3c5eba02ecdd2c28ba14,http://pdfs.semanticscholar.org/b2b5/35118c5c4dfcc96f547274cdc05dde629976.pdf,,,http://arxiv.org/abs/1707.04061
+47eba2f95679e106e463e8296c1f61f6ddfe815b,https://www.csie.ntu.edu.tw/~cyy/publications/papers/Shih2017DCF.pdf,,,http://openaccess.thecvf.com/content_cvpr_2017/papers/Shih_Deep_Co-Occurrence_Feature_CVPR_2017_paper.pdf
+47fb74785fbd8870c2e819fc91d04b9d9722386f,,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2017.161
+47a2727bd60e43f3253247b6d6f63faf2b67c54b,http://openaccess.thecvf.com/content_cvpr_2016/papers/Fu_Semi-Supervised_Vocabulary-Informed_Learning_CVPR_2016_paper.pdf,,,http://arxiv.org/abs/1604.07093
+47d3b923730746bfaabaab29a35634c5f72c3f04,http://pdfs.semanticscholar.org/47d3/b923730746bfaabaab29a35634c5f72c3f04.pdf,,,http://www.ijera.com/papers/Vol7_issue7/Part-3/F0707033038.pdf
+47e3029a3d4cf0a9b0e96252c3dc1f646e750b14,http://mmi.tudelft.nl/pub/dragos/_CompSysTech07.pdf,,,http://www.mmi.tudelft.nl/pub/dragos/_CompSysTech07.pdf
+475e16577be1bfc0dd1f74f67bb651abd6d63524,http://pdfs.semanticscholar.org/475e/16577be1bfc0dd1f74f67bb651abd6d63524.pdf,,,https://arxiv.org/pdf/1609.01885v2.pdf
+471befc1b5167fcfbf5280aa7f908eff0489c72b,http://poseidon.csd.auth.gr/papers/PUBLISHED/JOURNAL/pdf/Goudelis07a.pdf,,https://doi.org/10.1109/TIFS.2007.902915,https://ibug.doc.ic.ac.uk/media/uploads/documents/ieee_tifs_2007_goudelis.pdf
+47f8b3b3f249830b6e17888df4810f3d189daac1,http://pdfs.semanticscholar.org/fd44/c0c238fe90d6ca61864010abd94768fcde0c.pdf,,https://doi.org/10.1016/j.cviu.2012.01.005,http://www.ece.ualberta.ca/~djoseph/publications/journal/CVIU_2012.pdf
+47e8db3d9adb79a87c8c02b88f432f911eb45dc5,http://pdfs.semanticscholar.org/5f99/63990ab7dd888ab33393f712f8d5c1463348.pdf,,https://doi.org/10.1137/15M104013X,http://ibug.doc.ic.ac.uk/media/uploads/documents/magma.pdf
+47aeb3b82f54b5ae8142b4bdda7b614433e69b9a,http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2013.130
+47dabb566f2bdd6b3e4fa7efc941824d8b923a13,http://pdfs.semanticscholar.org/47da/bb566f2bdd6b3e4fa7efc941824d8b923a13.pdf,,https://doi.org/10.1007/978-3-319-10590-1_22,http://www.cim.mcgill.ca/~clark/vmrl/web-content/papers/jjclark_eccv_2014.pdf
+47f5f740e225281c02c8a2ae809be201458a854f,http://pdfs.semanticscholar.org/5241/ad03e9276d4acd1c51eaa7f44e2d04d07b68.pdf,,https://doi.org/10.1002/sam.10007,http://www.cs.utexas.edu/users/inderjit/public_papers/disparate_sama.pdf
+47d07217c501644d63adfec740346f244abaaae8,,,https://doi.org/10.1016/j.patcog.2016.05.017,
+47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa,http://pdfs.semanticscholar.org/d948/50abdd272a402cd2f00e5b85311d87c75b16.pdf,,,http://www.ijcaonline.org/archives/volume83/number5/14443-2602?format=pdf
+47a003e6bbfc5bf04a099ca53c67ddfdbea71315,http://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf,http://ieeexplore.ieee.org/document/7077723/,,https://www.researchgate.net/profile/Andrzej_Drygajlo/publication/228669241_Q-stack_aging_model_for_face_verification/links/09e4150f7ffb6d3946000000.pdf
+47b508abdaa5661fe14c13e8eb21935b8940126b,http://pdfs.semanticscholar.org/47b5/08abdaa5661fe14c13e8eb21935b8940126b.pdf,,,http://www.ijarcsse.com/docs/papers/Volume_4/12_December2014/V4I11-0350.pdf
+477811ff147f99b21e3c28309abff1304106dbbe,http://pdfs.semanticscholar.org/f0f8/23511188d8c10b67512d23eb9cb7f3dd2f9a.pdf,,https://doi.org/10.1016/j.neucom.2011.05.043,http://www.csie.ntu.edu.tw/~winston/papers/wang11learning.pdf
+47506951d2dc7c4bb4d2d33dd25b67a767e56680,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2015_04_15_BradyJ_IEEEHST_FP.pdf,,,
+473cbc5ec2609175041e1410bc6602b187d03b23,http://pdfs.semanticscholar.org/473c/bc5ec2609175041e1410bc6602b187d03b23.pdf,,,http://mmi.tudelft.nl/pub/dragos/datcu_euromedia08.pdf
+78b457f8b1ba4fbd1c50c32ec1f02f4f58764ad7,,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2015.99
+78d4d861c766af2a8da8855bece5da4e6eed2e1c,,,,http://doi.acm.org/10.1145/3129416.3129455
+78216cd51e6e1cc014b83e27e7e78631ad44b899,http://www.ami-lab.org/uploads/Publications/Conference/WP4/Tracking%20facial%20features%20under%20occlusions%20and%20recognizing%20facial%20expressions%20in%20sign%20language.pdf,,https://doi.org/10.1109/AFGR.2008.4813464,
+78a4cabf0afc94da123e299df5b32550cd638939,http://pdfs.semanticscholar.org/78a4/cabf0afc94da123e299df5b32550cd638939.pdf,,https://doi.org/10.1016/j.cviu.2017.04.008,https://engineering.purdue.edu/kak/FaceRecognitionUnconstrainedPurdueRVL.pdf
+78f08cc9f845dc112f892a67e279a8366663e26d,http://pdfs.semanticscholar.org/78f0/8cc9f845dc112f892a67e279a8366663e26d.pdf,,,http://mediatum.ub.tum.de/doc/1289837/548618.pdf
+78d645d5b426247e9c8f359694080186681f57db,http://pdfs.semanticscholar.org/78d6/45d5b426247e9c8f359694080186681f57db.pdf,,https://doi.org/10.1007/978-3-319-19665-7_45,http://publications.idiap.ch/downloads/papers/2015/Metha_SCIA_2015.pdf
+78e1798c3077f4f8a4df04ca35cd73f82e9a38f3,,http://ieeexplore.ieee.org/document/6460640/,,
+7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a,https://arxiv.org/pdf/1709.07598v1.pdf,,https://doi.org/10.1109/BTAS.2017.8272732,http://arxiv.org/abs/1709.07598
+783f3fccde99931bb900dce91357a6268afecc52,http://pdfs.semanticscholar.org/d1ea/f2cc9dfc6cdbc5468ef2152c46e9111a3f3b.pdf,,https://doi.org/10.1155/2009/945717,http://liris.cnrs.fr/Documents/Liris-6083.pdf
+78f244dc2a171944836a89874b8f60e9fe80865d,,,,http://doi.ieeecomputersociety.org/10.1109/ICIG.2011.181
+7897c8a9361b427f7b07249d21eb9315db189496,https://arxiv.org/pdf/1102.2743v2.pdf,,https://doi.org/10.1109/ICIP.2011.6116674,http://arxiv.org/abs/1102.2743
+7825708552c86079d0d11f48033ced391c0754ce,,,,
+7859667ed6c05a467dfc8a322ecd0f5e2337db56,http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf,,,http://www.cs.tau.ac.il/~wolf/papers/webscale.pdf
+78436256ff8f2e448b28e854ebec5e8d8306cf21,http://pdfs.semanticscholar.org/7843/6256ff8f2e448b28e854ebec5e8d8306cf21.pdf,,,https://arxiv.org/pdf/1502.04972v1.pdf
+78f57e5e23ca40af858e6e97ebecb694036bd8a8,,,,
+78f438ed17f08bfe71dfb205ac447ce0561250c6,http://pdfs.semanticscholar.org/78f4/38ed17f08bfe71dfb205ac447ce0561250c6.pdf,,,https://www.base-search.net/Record/7483cf20f0148b75b45a67b3dd9f384588cce0b62ea55846263e07c9a6375fad
+78f79c83b50ff94d3e922bed392737b47f93aa06,http://mplab.ucsd.edu/wp-content/uploads/2011-LittlewortEtAl-FG-CERT.pdf,,https://doi.org/10.1109/FG.2011.5771414,http://mplab.ucsd.edu/wordpress/wp-content/uploads/2011-LittlewortEtAl-FG-CERT.pdf
+7887824e9cc42914165dd3d96b956bff7560e4e4,,,,
+78fede85d6595e7a0939095821121f8bfae05da6,http://pdfs.semanticscholar.org/78fe/de85d6595e7a0939095821121f8bfae05da6.pdf,,https://doi.org/10.3837/tiis.2015.02.015,http://www.csie.kuas.edu.tw/~jcchen/pdf/Discriminant%20metric%20learning%20approach%20for%20face%20verification.pdf
+7862f646d640cbf9f88e5ba94a7d642e2a552ec9,http://pdfs.semanticscholar.org/7862/f646d640cbf9f88e5ba94a7d642e2a552ec9.pdf,,https://doi.org/10.1007/978-3-642-15549-9_25,http://grail.cs.washington.edu/projects/malkovich/eccv10paper.pdf
+780c8a795baca1ba4cb4956cded877dd3d1ca313,,,,http://doi.ieeecomputersociety.org/10.1109/ISSPIT.2013.6781879
+789b8fff223b0db0fe3babf46ea98b1d5197f0c0,,,https://doi.org/10.1002/ima.20245,
+785eeac2e236a85a45b4e0356c0745279c31e089,,,https://doi.org/10.1109/TIFS.2014.2359543,
+78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c,http://pdfs.semanticscholar.org/78a1/1b7d2d7e1b19d92d2afd51bd3624eca86c3c.pdf,,,http://papers.nips.cc/paper/6200-improved-deep-metric-learning-with-multi-class-n-pair-loss-objective
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,https://graphics.stanford.edu/papers/ib-relighting/ib-relighting.pdf,,,http://www.graphics.stanford.edu/papers/ib-relighting/ib-relighting.pdf
+781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed,https://ivi.fnwi.uva.nl/isis/publications/2017/JainIJCV2017/JainIJCV2017.pdf,,https://doi.org/10.1007/s11263-017-1023-9,http://arxiv.org/pdf/1607.02003v1.pdf
+7813d405450013bbdb0b3a917319d5964a89484a,,,https://doi.org/10.1109/WACV.2017.62,
+78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e,http://arxiv.org/pdf/1503.01224.pdf,,https://doi.org/10.1109/TCSVT.2016.2576761,
+780557daaa39a445b24c41f637d5fc9b216a0621,http://www.ee.columbia.edu/ln/dvmm/publications/15/EventNetDemo.pdf,,,http://doi.acm.org/10.1145/2733373.2807973
+78fdf2b98cf6380623b0e20b0005a452e736181e,http://pdfs.semanticscholar.org/78fd/f2b98cf6380623b0e20b0005a452e736181e.pdf,,,https://drum.lib.umd.edu/bitstream/handle/1903/12631/Castillo_umd_0117E_13012.pdf?isAllowed=y&sequence=1
+788a7b59ea72e23ef4f86dc9abb4450efefeca41,http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/robust_frontalization.pdf
+787c1bb6d1f2341c5909a0d6d7314bced96f4681,http://pdfs.semanticscholar.org/787c/1bb6d1f2341c5909a0d6d7314bced96f4681.pdf,,,https://repository.iiitd.edu.in/jspui/bitstream/handle/123456789/360/MT13106.pdf;sequence=1
+7808937b46acad36e43c30ae4e9f3fd57462853d,http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf,,,http://www.eecs.berkeley.edu/Research/Projects/CS/vision/shape/attributes-poselets-iccv11.pdf
+789a43f51e0a3814327dab4299e4eda8165a5748,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.189
+782eee555067b2d6d24db87775e1ded5fb047491,,,https://doi.org/10.1109/MMSP.2008.4665158,
+8ba67f45fbb1ce47a90df38f21834db37c840079,http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf,,,http://cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf
+8b547b87fd95c8ff6a74f89a2b072b60ec0a3351,http://pdfs.semanticscholar.org/8b54/7b87fd95c8ff6a74f89a2b072b60ec0a3351.pdf,,,http://www.fdg2014.org/papers/fdg2014_wip_19.pdf
+8b7191a2b8ab3ba97423b979da6ffc39cb53f46b,http://www.eurecom.fr/fr/publication/3472/download/mm-publi-3472.pdf,,https://doi.org/10.1109/ICCVW.2011.6130409,http://www.eurecom.fr/en/publication/3472/download/mm-publi-3472.pdf
+8bf57dc0dd45ed969ad9690033d44af24fd18e05,http://pdfs.semanticscholar.org/8bf5/7dc0dd45ed969ad9690033d44af24fd18e05.pdf,,,http://www.wseas.us/e-library/conferences/2011/Florence/GAVTASC/GAVTASC-46.pdf
+8bf243817112ac0aa1348b40a065bb0b735cdb9c,http://pdfs.semanticscholar.org/8bf2/43817112ac0aa1348b40a065bb0b735cdb9c.pdf,,,https://arxiv.org/pdf/1708.02386v1.pdf
+8bfada57140aa1aa22a575e960c2a71140083293,http://pdfs.semanticscholar.org/8bfa/da57140aa1aa22a575e960c2a71140083293.pdf,,,https://multispectral-imagery-lab.sandbox.wvu.edu/files/d/5c237606-30f4-43e3-a12e-c1ebad1bb99b/spieuv_neeru.pdf
+8b8728edc536020bc4871dc66b26a191f6658f7c,http://pdfs.semanticscholar.org/8b87/28edc536020bc4871dc66b26a191f6658f7c.pdf,,https://doi.org/10.1016/j.patrec.2013.04.028,http://www.dia.fi.upm.es/~pcr/publications/prl2013.pdf
+8be60114634caa0eff8566f3252cb9a1b7d5ef10,,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2014.6890133
+8bbbdff11e88327816cad3c565f4ab1bb3ee20db,https://eprints.soton.ac.uk/410731/1/FG_soton_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.31
+8b4124bb68e5b3e6b8b77888beae7350dc594a40,,,https://doi.org/10.1109/ICSMC.2005.1571395,
+8bf945166305eb8e304a9471c591139b3b01a1e1,,,https://doi.org/10.1109/ACCESS.2017.2756451,
+8b10383ef569ea0029a2c4a60cc2d8c87391b4db,http://pdfs.semanticscholar.org/fe2d/20dca6dcedc7944cc2d9fea76de6cbb9d90c.pdf,,https://doi.org/10.5244/C.25.28,http://staff.computing.dundee.ac.uk/jgzhang/publications/zhou_bmvc11.pdf
+8bfec7afcf5015017406fc04c43c1f43eb723631,http://www.umiacs.umd.edu/users/pvishalm/Journal_pub/DCS_TAC_2013.pdf,,,http://www.rci.rutgers.edu/~vmp93/Journal_pub/DCS_TAC_2013.pdf
+8b1fa60b9164b60d1ca2705611fab063505a3ef5,,,,http://doi.ieeecomputersociety.org/10.1109/ICMEW.2013.6618337
+8b30259a8ab07394d4dac971f3d3bd633beac811,http://pdfs.semanticscholar.org/8b30/259a8ab07394d4dac971f3d3bd633beac811.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11743
+8b3c867e67b263d7a0577a112173a64009a3b4ba,,,https://doi.org/10.1109/ICIP.2010.5652374,
+8b19efa16a9e73125ab973429eb769d0ad5a8208,http://pdfs.semanticscholar.org/8b19/efa16a9e73125ab973429eb769d0ad5a8208.pdf,,https://doi.org/10.1007/978-3-642-33191-6_18,http://www.cs.stevens-tech.edu/~kamberov/Papers/isvc2012_186_SCAR_KamberovEtAL.pdf
+8b1f697d81de1245c283b4f8f055b9b76badfa66,,,https://doi.org/10.1142/S0218126616500171,
+8b6fded4d08bf0b7c56966b60562ee096af1f0c4,http://pdfs.semanticscholar.org/8b6f/ded4d08bf0b7c56966b60562ee096af1f0c4.pdf,,,http://research.ijcaonline.org/volume59/number3/pxc3883956.pdf
+8bf647fed40bdc9e35560021636dfb892a46720e,https://arxiv.org/pdf/1612.04061v1.pdf,,,http://doi.acm.org/10.1145/3009977.3010035
+8b2704a5218a6ef70e553eaf0a463bd55129b69d,http://pdfs.semanticscholar.org/8b27/04a5218a6ef70e553eaf0a463bd55129b69d.pdf,,https://doi.org/10.3390/s130607714,http://arxiv.org/pdf/1604.03225v1.pdf
+8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0,http://pdfs.semanticscholar.org/8bb2/1b1f8d6952d77cae95b4e0b8964c9e0201b0.pdf,,,https://mediatum.ub.tum.de/doc/1323920/1323920.pdf
+8b1db0894a23c4d6535b5adf28692f795559be90,http://pdfs.semanticscholar.org/8b1d/b0894a23c4d6535b5adf28692f795559be90.pdf,,,http://homes.cs.washington.edu/~neeraj/publications/base/papers/nk_spie2013_reliability.pdf
+8b2e3805b37c18618b74b243e7a6098018556559,http://pdfs.semanticscholar.org/8b2e/3805b37c18618b74b243e7a6098018556559.pdf,,,https://openreview.net/pdf?id=ryn-581vM
+8b74252625c91375f55cbdd2e6415e752a281d10,http://epubs.surrey.ac.uk/813060/1/camgoz2016icprw.pdf,,https://doi.org/10.1109/ICPR.2016.7899606,
+133f42368e63928dc860cce7618f30ee186d328c,http://pdfs.semanticscholar.org/50bd/1c76a5051db0b13fd76e7a633884ad49d5a8.pdf,,https://doi.org/10.5244/C.27.108,http://www.cl.cam.ac.uk/~hg410/SariyanidiEtAl-BMVC2013.pdf
+134aad8153ab78345b2581efac2fe175a3084154,http://www.cs.utexas.edu/~ai-lab/pubs/vijayanarasimhan_grauman_cvpr2008.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/292.pdf
+13907865a97afde053d7bb7134d58a7bbc12043c,,,https://doi.org/10.1016/j.patcog.2014.05.001,
+13fd25a18ab3faebcd6a4ab95f4cc814fcda337a,,,,
+134cea33099cafc6615e57437e29d7c3906a2b48,,,,http://doi.ieeecomputersociety.org/10.1109/ICETET.2010.80
+13719bbb4bb8bbe0cbcdad009243a926d93be433,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w6/papers/Tian_Deep_LDA-Pruned_Nets_CVPR_2017_paper.pdf,,,http://www.vislab.ucr.edu/Biometrics2017/program_slides/deep-lda-pruned-final.pdf
+134db6ca13f808a848321d3998e4fe4cdc52fbc2,http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticPatras-SMCB-2005-FINAL.pdf,,https://doi.org/10.1109/TSMCB.2005.859075,http://www.doc.ic.ac.uk/~maja/PanticPatras-SMCB-2005-FINAL.pdf
+133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d,http://www.stat.ucla.edu/~caiming/pubs/1402.1783v2.pdf,,,http://web.eecs.umich.edu/~jjcorso/pubs/cxiong_TPAMI_ace.pdf
+136aae348c7ebc6fd9df970b0657241983075795,,,https://doi.org/10.1109/ICIP.2015.7351542,
+13f065d4e6dfe2a130bd64d73eee97d10d9f7d33,,,https://doi.org/10.1109/DICTA.2015.7371222,
+1329206dbdb0a2b9e23102e1340c17bd2b2adcf5,http://pdfs.semanticscholar.org/a2f4/06c8babac96b2108c530974c4d3132106d42.pdf,,https://doi.org/10.1007/978-3-319-10590-1_54,http://people.eecs.berkeley.edu/~rbg/papers/part-rcnn.pdf
+1369e9f174760ea592a94177dbcab9ed29be1649,http://geza.kzoo.edu/~erdi/IJCNN2013/HTMLFiles/PDFs/P393-1401.pdf,,https://doi.org/10.1109/IJCNN.2013.6707085,
+133900a0e7450979c9491951a5f1c2a403a180f0,http://rlair.cs.ucr.edu/papers/docs/socgroup.pdf,,,http://www.cs.ucr.edu/~cshelton/papers/docs/socgroup.pdf
+13bda03fc8984d5943ed8d02e49a779d27c84114,http://www-ljk.imag.fr/Publications/Basilic/com.lmc.publi.PUBLI_Inproceedings@13730f58c78_1669a2e/cevikalp-cvpr12.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6248047
+13db9466d2ddf3c30b0fd66db8bfe6289e880802,http://pdfs.semanticscholar.org/13db/9466d2ddf3c30b0fd66db8bfe6289e880802.pdf,,,http://www.mecs-press.org/ijigsp/ijigsp-v9-n1/IJIGSP-V9-N1-4.pdf
+13a994d489c15d440c1238fc1ac37dad06dd928c,http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf,,https://doi.org/10.1007/978-3-642-37444-9_58,http://www.cbsr.ia.ac.cn/users/zlei/papers/ACCV2012/LEI-ACCV12.pdf
+131178dad3c056458e0400bed7ee1a36de1b2918,http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Deng_Visual_Reranking_through_2013_ICCV_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2013.323
+13141284f1a7e1fe255f5c2b22c09e32f0a4d465,http://www.micc.unifi.it/pernici/index_files/ALIEN_final.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.250
+132527383890565d18f1b7ad50d76dfad2f14972,http://pdfs.semanticscholar.org/1325/27383890565d18f1b7ad50d76dfad2f14972.pdf,,,http://www.iis.sinica.edu.tw/JISE/2006/200609_03.html
+1394ca71fc52db972366602a6643dc3e65ee8726,https://www.cl.cam.ac.uk/~tb346/pub/papers/icmi2016EmoReact.pdf,,,http://www.cl.cam.ac.uk/~tb346/pub/papers/icmi2016EmoReact.pdf
+137aa2f891d474fce1e7a1d1e9b3aefe21e22b34,http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%20139/PID2859389.pdf,,https://doi.org/10.1109/BTAS.2013.6712710,
+13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a,http://www.sfu.ca/~smuralid/papers/thesis.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Ibrahim_A_Hierarchical_Deep_CVPR_2016_paper.pdf
+131130f105661a47e0ffb85c2fe21595785f948a,http://pdfs.semanticscholar.org/1311/30f105661a47e0ffb85c2fe21595785f948a.pdf,,,http://www.umiacs.umd.edu/~morariu/publications/WangLatLRRWACV15_supplementary.pdf
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,http://pdfs.semanticscholar.org/e5c5/e5531aaa661c223088454572de11d2f266c3.pdf,,https://doi.org/10.1007/11957959_2,http://www.cs.huji.ac.il/~daphna/course/student%20lectures/elad%20mezuman.pdf
+133da0d8c7719a219537f4a11c915bf74c320da7,http://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf,,,https://www.ijcaonline.org/archives/volume123/number4/21946-21946-2015905254?format=pdf
+13901473a12061f080b9d54219f16db7d406e769,,,https://doi.org/10.1109/TIP.2012.2222895,
+13c250fb740cb5616aeb474869db6ab11560e2a6,http://pdfs.semanticscholar.org/13c2/50fb740cb5616aeb474869db6ab11560e2a6.pdf,,,http://www.umiacs.umd.edu/~lsd/papers/JamiesonThesis.pdf
+13940d0cc90dbf854a58f92d533ce7053aac024a,http://pdfs.semanticscholar.org/949c/a8a6997aba88a162a36d48047f35ba8d0aab.pdf,,,https://open.bu.edu/bitstream/handle/2144/15204/Wang_bu_0017E_11004.pdf?isAllowed=y&sequence=1
+133f01aec1534604d184d56de866a4bd531dac87,http://www.cs.tau.ac.il/~wolf/papers/jpatchlbp.pdf,,,http://www.openu.ac.il/home/hassner/projects/Patchlbp/WolfHassnerTaigman_TPAMI11.pdf
+131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1,http://pdfs.semanticscholar.org/131b/fa2ae6a04fd3b921ccb82b1c3f18a400a9c1.pdf,,,https://ibug.doc.ic.ac.uk/media/uploads/documents/nsip_2005.pdf
+13841d54c55bd74964d877b4b517fa94650d9b65,http://www98.griffith.edu.au/dspace/bitstream/handle/10072/30001/60226_1.pdf?sequence=1,,https://doi.org/10.1109/ICIP.2009.5413812,https://research-repository.griffith.edu.au/bitstream/handle/10072/30001/60226_1.pdf;jsessionid=124E47B54EACEDD66110AD26E5A124AF?sequence=1
+1389ba6c3ff34cdf452ede130c738f37dca7e8cb,http://pdfs.semanticscholar.org/1389/ba6c3ff34cdf452ede130c738f37dca7e8cb.pdf,,,http://arxiv.org/abs/1704.01880
+131e395c94999c55c53afead65d81be61cd349a4,http://pdfs.semanticscholar.org/2c3f/aeaf0fe103e1e6cb8c2116728e2a5c7b7f29.pdf,,,https://arxiv.org/pdf/1612.02203v2.pdf
+1384a83e557b96883a6bffdb8433517ec52d0bea,http://pdfs.semanticscholar.org/6be6/392550222ca07ba4c47931bffaedace72d24.pdf,,,https://arxiv.org/pdf/1612.05203v5.pdf
+13fd0a4d06f30a665fc0f6938cea6572f3b496f7,http://pdfs.semanticscholar.org/13fd/0a4d06f30a665fc0f6938cea6572f3b496f7.pdf,,https://doi.org/10.1016/j.procs.2015.07.319,http://www.cs.tut.fi/~iosifidi/files/conference/2015_INNS_RELM.pdf?dl=0
+132f88626f6760d769c95984212ed0915790b625,http://pdfs.semanticscholar.org/132f/88626f6760d769c95984212ed0915790b625.pdf,,,https://escholarship.org/content/qt9t59f756/qt9t59f756.pdf
+13f6ab2f245b4a871720b95045c41a4204626814,http://pdfs.semanticscholar.org/9d74/382b6c4209c49de7c2b0fab7b34483ba0ddb.pdf,,,http://elife-publishing-cdn.s3.amazonaws.com/10774/elife-10774-v2.pdf
+136f92989e982ecf795cb27d65b48464eaec9323,,,,
+13be4f13dac6c9a93f969f823c4b8c88f607a8c4,http://www1.ece.neu.edu/~yuewu/files/2016/p242-robinson.pdf,,,https://export.arxiv.org/pdf/1604.02182
+13afc4f8d08f766479577db2083f9632544c7ea6,https://cs.anu.edu.au/few/KSikka_EmotiW.pdf,,,http://mplab.ucsd.edu/~ksikka/EmotiW%20presentation.pdf
+13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd,http://pdfs.semanticscholar.org/1318/8a88bbf83a18dd4964e3f89d0bc0a4d3a0bd.pdf,,,https://www.ijsr.net/archive/v3i11/T0NUMTQxMzY3.pdf
+13d9da779138af990d761ef84556e3e5c1e0eb94,http://www.cs.berkeley.edu/~malik/papers/ferencz-learnedmiller-malik08.pdf,,https://doi.org/10.1007/s11263-007-0093-5,http://www.eecs.berkeley.edu/Research/Projects/CS/vision/papers/ferenczMillerMalikIJCV06.pdf
+7f57e9939560562727344c1c987416285ef76cda,http://people.cs.vt.edu/~gangwang/class/cs6604/papers/face.pdf,,,http://doi.acm.org/10.1145/2976749.2978392
+7f9be0e08784835de0f8bc3a82fcca02b3721dc1,,,https://doi.org/10.1109/IJCNN.2014.6889744,
+7f415aee0137acab659c664eb1dff15f7b726bdd,,,https://doi.org/10.1109/TCSVT.2014.2302522,
+7f5346a169c9784ca79aca5d95ae8bf2ebab58e3,,,https://doi.org/10.1109/ICIP.2015.7351304,
+7fc5b6130e9d474dfb49d9612b6aa0297d481c8e,http://pdfs.semanticscholar.org/7fc5/b6130e9d474dfb49d9612b6aa0297d481c8e.pdf,,,https://arxiv.org/pdf/1711.06382v1.pdf
+7fce5769a7d9c69248178989a99d1231daa4fce9,http://pdfs.semanticscholar.org/7fce/5769a7d9c69248178989a99d1231daa4fce9.pdf,,,http://thesai.org/Downloads/Volume7No5/Paper_5-Towards_Face_Recognition_Using_Eigenface.pdf
+7fa2605676c589a7d1a90d759f8d7832940118b5,http://www.ces.clemson.edu/~stb/publications/willimon_clothing_classification_icra2013.pdf,,https://doi.org/10.1109/ICRA.2013.6631181,http://cecas.clemson.edu/~stb/publications/willimon_clothing_classification_icra2013.pdf
+7ff42ee09c9b1a508080837a3dc2ea780a1a839b,http://pdfs.semanticscholar.org/7ff4/2ee09c9b1a508080837a3dc2ea780a1a839b.pdf,,https://doi.org/10.1080/10447318.2016.1159799,"http://dspace.ou.nl/bitstream/1820/6749/2/Bahreini,%20Nadolski,%20Westera,%202016a.pdf"
+7f533bd8f32525e2934a66a5b57d9143d7a89ee1,http://pdfs.semanticscholar.org/7f53/3bd8f32525e2934a66a5b57d9143d7a89ee1.pdf,,,http://www.ll.mit.edu/mission/cybersec/publications/publication-files/full_papers/2014_06_28_BradyJ_IEEECVPRBigDataWorkshop_FP.pdf
+7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5,http://www.cs.cmu.edu/~epxing/papers/2015/Zhao_Xing_IJCV15.pdf,,https://doi.org/10.1007/s11263-015-0839-4,
+7f6061c83dc36633911e4d726a497cdc1f31e58a,http://pdfs.semanticscholar.org/7f60/61c83dc36633911e4d726a497cdc1f31e58a.pdf,,,http://arxiv.org/abs/1609.08675
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,http://pdfs.semanticscholar.org/d5a4/c2757619a1f2c8d9a879e6f26f539a4a18f2.pdf,,https://doi.org/10.1007/978-3-319-16178-5_54,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2014/Efficient%20Online%20Spatio-Temporal%20Filtering%20for%20Video%20Event%20Detection.pdf
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,http://pdfs.semanticscholar.org/7f82/f8a416170e259b217186c9e38a9b05cb3eb4.pdf,,,https://arxiv.org/pdf/1712.05799v1.pdf
+7f36dd9ead29649ed389306790faf3b390dc0aa2,http://pdfs.semanticscholar.org/7f36/dd9ead29649ed389306790faf3b390dc0aa2.pdf,,,http://www.pitt.edu/~kschmidt/schmidt06DS.pdf
+7f4040b482d16354d5938c1d1b926b544652bf5b,,,,http://doi.acm.org/10.1145/2502081.2502115
+7f6cd03e3b7b63fca7170e317b3bb072ec9889e0,http://pdfs.semanticscholar.org/7f6c/d03e3b7b63fca7170e317b3bb072ec9889e0.pdf,,,https://arxiv.org/pdf/1803.09359v1.pdf
+7f703613149b190ea3bb0e3c803844895419846b,,,,
+7f6599e674a33ed64549cd512ad75bdbd28c7f6c,http://pdfs.semanticscholar.org/7f65/99e674a33ed64549cd512ad75bdbd28c7f6c.pdf,,https://doi.org/10.1007/978-3-662-44845-8_26,https://arxiv.org/pdf/1610.04576v1.pdf
+7f8d2d7eaa03132caefe0f3b126b5b369a712c9d,,,,http://doi.ieeecomputersociety.org/10.1109/ACHI.2009.33
+7f9260c00a86a0d53df14469f1fa10e318ee2a3c,http://www.cse.msu.edu/~stockman/Book/projects.html/F06Docs/Papers/daugemanIrisICIP02.pdf,,https://doi.org/10.1109/ICIP.2002.1037952,http://www.cl.cam.ac.uk/~jgd1000/irisrecog.pdf
+7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae,http://pdfs.semanticscholar.org/7f97/a36a5a634c30de5a8e8b2d1c812ca9f971ae.pdf,,,https://arxiv.org/pdf/1802.00853v1.pdf
+7f2a4cd506fe84dee26c0fb41848cb219305173f,http://pdfs.semanticscholar.org/7f2a/4cd506fe84dee26c0fb41848cb219305173f.pdf,,,http://www.sersc.org/journals/IJHIT/vol8_no2_2015/10.pdf
+7fd700f4a010d765c506841de9884df394c1de1c,http://www.kyb.tuebingen.mpg.de/publications/attachments/CVPR2008-Blaschko_5069%5B0%5D.pdf,,,http://pub.ist.ac.at/~chl/papers/blaschko-cvpr2008.pdf
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,http://pdfs.semanticscholar.org/7f59/657c883f77dc26393c2f9ed3d19bdf51137b.pdf,,,http://ro.uow.edu.au/cgi/viewcontent.cgi?article=2355&context=infopapers
+7ffc5c58e5b61ac7c45d8e6ed076248051ebea34,http://repository.lib.polyu.edu.hk/jspui/bitstream/10397/238/1/SMCB_C_34_5_04.pdf,,https://doi.org/10.1109/TSMCB.2004.831770,http://ira.lib.polyu.edu.hk/bitstream/10397/238/1/SMCB_C_34_5_04.pdf
+7f23a4bb0c777dd72cca7665a5f370ac7980217e,http://pdfs.semanticscholar.org/ce70/fecc7150816e081b422cbc157bd9019cdf25.pdf,,,https://arxiv.org/pdf/1703.07220v2.pdf
+7fb6bc6c920ca574677f0d3a40c5c377a095885b,http://www.cs.bris.ac.uk/Publications/Papers/2000124.pdf,,,http://www.cs.bris.ac.uk/Research/Vision/MotionRipper/pdf/graphite04.pdf
+7fa00c81f7c2d8da1551334b0e7bc3d7fd43130c,,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2014.2353635
+7f268f29d2c8f58cea4946536f5e2325777fa8fa,http://pdfs.semanticscholar.org/7f26/8f29d2c8f58cea4946536f5e2325777fa8fa.pdf,,,http://www.researchgate.net/profile/Gyanendra_Verma/publication/216673522_Facial_Emotion_Recognition_in_Curvelet_Domain/links/09e4150f631bb57c16000000.pdf
+7fc3442c8b4c96300ad3e860ee0310edb086de94,http://pdfs.semanticscholar.org/82f3/b7cacc15e026fd3a7639091d54162f6ae064.pdf,,https://doi.org/10.1007/978-3-642-12304-7_9,http://www.openu.ac.il/home/hassner/projects/bgoss/ACCV09WolfHassnerTaigman.pdf
+7fcd03407c084023606c901e8933746b80d2ad57,,,https://doi.org/10.1109/BTAS.2017.8272694,
+7f3a73babe733520112c0199ff8d26ddfc7038a0,http://pdfs.semanticscholar.org/7f3a/73babe733520112c0199ff8d26ddfc7038a0.pdf,,https://doi.org/10.5220/0005722305820589,http://www.ai.rug.nl/~mrolarik/Publications/VISAPP_2016_135.pdf
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,http://pdfs.semanticscholar.org/c84f/88b2a764ddcc22c4971827d58024b6017496.pdf,,,http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/12384
+7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a,http://pdfs.semanticscholar.org/7f1f/3d7b1a4e7fc895b77cb23b1119a6f13e4d3a.pdf,,https://doi.org/10.1109/CIRA.2003.1222308,http://www.ri.cmu.edu/pub_files/pub4/kanade_takeo_2003_1/kanade_takeo_2003_1.pdf
+7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/He_Robust_FEC-CNN_A_CVPR_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.255
+7f8cef6ba2f059e465b1b23057a6dbb23fba1c63,,,https://doi.org/10.1109/TCSVT.2016.2539541,
+7f205b9fca7e66ac80758c4d6caabe148deb8581,http://pdfs.semanticscholar.org/7f20/5b9fca7e66ac80758c4d6caabe148deb8581.pdf,,,http://epubs.surrey.ac.uk/809838/1/mssp-survey-firstlook.pdf
+7fd6bb30ad5d7eb3078efbb85f94d2d60e701115,http://pdfs.semanticscholar.org/7fd6/bb30ad5d7eb3078efbb85f94d2d60e701115.pdf,,,https://arxiv.org/pdf/1802.09745v1.pdf
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,http://pdfs.semanticscholar.org/7fc7/6446d2b11fc0479df6e285723ceb4244d4ef.pdf,,,https://pdfs.semanticscholar.org/7fc7/6446d2b11fc0479df6e285723ceb4244d4ef.pdf
+7f1078a2ebfa23a58adb050084d9034bd48a8a99,,,https://doi.org/10.1007/s00371-015-1169-9,
+7aa32e0639e0750e9eee3ce16e51e9f94241ae88,,,,
+7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098,http://pdfs.semanticscholar.org/7a9e/f21a7f59a47ce53b1dff2dd49a8289bb5098.pdf,,,http://www.eecs.harvard.edu/~zickler/papers/Appearance_CGV2009.pdf
+7a595800b490ff437ab06fe7612a678d5fe2b57d,,,https://doi.org/10.1109/MMSP.2009.5293285,
+7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b,http://pdfs.semanticscholar.org/7af3/8f6dcfbe1cd89f2307776bcaa09c54c30a8b.pdf,,,http://www.cse.msu.edu/rgroups/amdl/papers/VCIPchapter.pdf
+7ae0212d6bf8a067b468f2a78054c64ea6a577ce,http://pdfs.semanticscholar.org/7ae0/212d6bf8a067b468f2a78054c64ea6a577ce.pdf,,,http://www.nii.ac.jp/graduate/thesis/pdf/duy_Dr_thesis.pdf
+7a9c317734acaf4b9bd8e07dd99221c457b94171,http://pdfs.semanticscholar.org/7a9c/317734acaf4b9bd8e07dd99221c457b94171.pdf,,https://doi.org/10.1007/978-3-642-12297-2_30,http://research.microsoft.com/en-us/people/zhoulin/Publications/2009-ACCV-LDP.pdf
+7a0fb972e524cb9115cae655e24f2ae0cfe448e0,http://pdfs.semanticscholar.org/7a0f/b972e524cb9115cae655e24f2ae0cfe448e0.pdf,,,http://www.researchgate.net/profile/Marley_Vellasco/publication/239691628_Facial_Expression_Classification_Using_RBF_AND_Back-Propagation_Neural_Networks/links/0a85e537b87c780f17000000.pdf
+7ad77b6e727795a12fdacd1f328f4f904471233f,https://ueaeprints.uea.ac.uk/65008/1/Accepted_manuscript.pdf,,https://doi.org/10.1109/TMM.2017.2700204,
+7adaad633d3002f88cdee105d9c148e013202a06,,,,
+7a09e8f65bd85d4c79f0ae90d4e2685869a9894f,,,https://doi.org/10.1109/TMM.2016.2551698,
+7ab8cafe454a9fd0fe5d51e718a010ef552b9271,,,,
+7a7f2403e3cc7207e76475e8f27a501c21320a44,http://www.apsipa2013.org/wp-content/uploads/2013/05/395_Emotion-recognition-Wu-2928773.pdf,,https://doi.org/10.1109/APSIPA.2013.6694347,http://apsipa2013.org/wp-content/uploads/2013/05/395_Emotion-recognition-Wu-2928773.pdf
+7aafeb9aab48fb2c34bed4b86755ac71e3f00338,http://pdfs.semanticscholar.org/7aaf/eb9aab48fb2c34bed4b86755ac71e3f00338.pdf,,https://doi.org/10.3390/s16081157,http://www.mdpi.com/1424-8220/16/8/1157/pdf
+7a6e3ed956f71b20c41fbec008b1fa8dacad31a6,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2015.7163117
+7a84368ebb1a20cc0882237a4947efc81c56c0c0,https://ibug.doc.ic.ac.uk/media/uploads/documents/iccv_final.pdf,,,http://ibug.doc.ic.ac.uk/media/uploads/documents/iccv_final.pdf
+7a91617ec959acedc5ec8b65e55b9490b76ab871,,,https://doi.org/10.1109/RAIT.2012.6194481,
+7a666a91a47da0d371a9ba288912673bcd5881e4,,,https://doi.org/10.1016/j.patrec.2009.05.011,
+7ab238c23c6640fe0b23d635d6b5fc38fa4a3b46,,,,
+7aa4c16a8e1481629f16167dea313fe9256abb42,http://mirlab.org/conference_papers/International_Conference/ICASSP%202017/pdfs/0002981.pdf,,https://doi.org/10.1109/ICASSP.2017.7952703,
+7ad1638f7d76c7e885bc84cd694c60f109f02159,https://www.researchgate.net/profile/Wen-Jing_Yan/publication/236120483_Face_Recognition_and_Micro-expression_Recognition_Based_on_Discriminant_Tensor_Subspace_Analysis_Plus_Extreme_Learning_Machine/links/0deec51adcddd72a4f000000.pdf,,https://doi.org/10.1007/s11063-013-9288-7,https://pdfs.semanticscholar.org/8b3f/6cbd296ee28c9154ec020b2df7ca6201b045.pdf
+7a6d9f89e0925a220fe3dfba4f0d2745f8be6c9a,http://www.faceplusplus.com/wp-content/uploads/2014/11/Learning-Compact-Face-Representation-Packing-a-Face-into-an-int32.pdf,,,http://doi.acm.org/10.1145/2647868.2654960
+7a85b3ab0efb6b6fcb034ce13145156ee9d10598,http://pdfs.semanticscholar.org/7a85/b3ab0efb6b6fcb034ce13145156ee9d10598.pdf,,https://doi.org/10.1016/j.patcog.2010.07.005,http://www98.griffith.edu.au/dspace/bitstream/10072/34436/1/64724_1.pdf
+7ab930146f4b5946ec59459f8473c700bcc89233,http://pdfs.semanticscholar.org/7ab9/30146f4b5946ec59459f8473c700bcc89233.pdf,,https://doi.org/10.1016/j.neucom.2016.04.023,http://arxiv.org/pdf/1602.07464v1.pdf
+7a65fc9e78eff3ab6062707deaadde024d2fad40,http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Zhu_A_Study_on_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.43
+7ad7897740e701eae455457ea74ac10f8b307bed,http://pdfs.semanticscholar.org/7ad7/897740e701eae455457ea74ac10f8b307bed.pdf,,,https://arxiv.org/pdf/1711.00575v1.pdf
+7adfc2f854e2ea45c29d22d6e2dcccdd527f46a8,,,https://doi.org/10.1007/s00138-015-0677-y,
+7a94936ce558627afde4d5b439ec15c59dbcdaa4,,,https://doi.org/10.1007/s11263-013-0665-5,
+7a1ce696e260899688cb705f243adf73c679f0d9,http://www.cse.msu.edu/~rossarun/pubs/SwearingenRossLabelPropagation_BIOSIG2016.pdf,,https://doi.org/10.1109/BIOSIG.2016.7736932,
+7a061e7eab865fc8d2ef00e029b7070719ad2e9a,http://cvrr.ucsd.edu/ece285/papers/from_WI13/Ramanan_IJCV2013.pdf,,https://doi.org/10.1007/s11263-012-0564-1,
+7ae8acf20f9415f99bfb95aa000d698b8499f1ee,,,,
+7ab7befcd319d55d26c1e4b7b9560da5763906f3,http://www.researchgate.net/profile/Lee_Ping-Han/publication/236160185_Facial_Trait_Code/links/0c96051e26825bd65a000000.pdf,,https://doi.org/10.1109/TCSVT.2012.2211951,
+7a8c2743db1749c2d9f16f62ee633574c1176e34,http://pdfs.semanticscholar.org/7a8c/2743db1749c2d9f16f62ee633574c1176e34.pdf,,,http://ijetae.com/files/Volume2Issue2/IJETAE_0212_52.pdf
+1451e7b11e66c86104f9391b80d9fb422fb11c01,http://pdfs.semanticscholar.org/1451/e7b11e66c86104f9391b80d9fb422fb11c01.pdf,,,https://infoscience.epfl.ch/record/229463/files/IET-SPR.2016.0756.pdf
+14d7bce17265738f10f48987bb7bffb3eafc676e,,http://ieeexplore.ieee.org/document/7514504/,,
+14761b89152aa1fc280a33ea4d77b723df4e3864,http://pdfs.semanticscholar.org/1476/1b89152aa1fc280a33ea4d77b723df4e3864.pdf,,https://doi.org/10.1007/978-3-319-10593-2_27,https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/eccv2014_zsl_via_visual_abstraction.pdf
+14b87359f6874ff9b8ee234b18b418e57e75b762,http://pdfs.semanticscholar.org/1b62/6c14544f249cd52ef86a4efc17f3d3834003.pdf,,https://doi.org/10.5244/C.26.118,http://www.researchgate.net/profile/Hua_Gao3/publication/266458682_Face_Alignment_Using_a_Ranking_Model_based_on_Regression_Trees/links/54d393e70cf2501791825481.pdf
+14fdec563788af3202ce71c021dd8b300ae33051,http://pdfs.semanticscholar.org/14fd/ec563788af3202ce71c021dd8b300ae33051.pdf,,,http://ceur-ws.org/Vol-1622/SocInf2016_Paper2.pdf
+143571c2fc9b1b69d3172f8a35b8fad50bc8202a,,,https://doi.org/10.1016/j.neucom.2014.07.066,
+142e5b4492bc83b36191be4445ef0b8b770bf4b0,http://pdfs.semanticscholar.org/142e/5b4492bc83b36191be4445ef0b8b770bf4b0.pdf,,https://doi.org/10.1007/11566489_58,http://www.nlpr.ia.ac.cn/2005papers/gjhy/gh65.pdf
+14b016c7a87d142f4b9a0e6dc470dcfc073af517,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912,,https://doi.org/10.1109/BTAS.2015.7358778,http://www.nist.gov/customcf/get_pdf.cfm?pub_id=918912
+142e233adceed9171f718a214a7eba8497af4324,,,https://doi.org/10.1109/IJCNN.2014.6889504,
+14b66748d7c8f3752dca23991254fca81b6ee86c,http://pdfs.semanticscholar.org/4e92/a8dcfd802c3248d56ba16d2613dceacaef59.pdf,,https://doi.org/10.5244/C.29.57,http://www.iai.uni-bonn.de/~gall/download/jgall_bowrnn_bmvc15.pdf
+14e8dbc0db89ef722c3c198ae19bde58138e88bf,http://ascl.cis.fiu.edu/uploads/1/3/4/2/13423859/amini-lisetti-acii-2013-final.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ACII.2013.51
+14fa27234fa2112014eda23da16af606db7f3637,http://pdfs.semanticscholar.org/14fa/27234fa2112014eda23da16af606db7f3637.pdf,,https://doi.org/10.1016/j.patcog.2010.08.026,http://www.deakin.edu.au/research/src/prada/publications/2010/journals/an_liu_venkatesh_yan_pr10.pdf
+1459d4d16088379c3748322ab0835f50300d9a38,https://arxiv.org/pdf/1605.04039v1.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2016.2567386
+14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6,http://pdfs.semanticscholar.org/4b76/694ff2efb302074adf1ba6052d643177abd1.pdf,,https://doi.org/10.1016/j.patcog.2014.05.004,http://arxiv.org/abs/1603.07604
+146bbf00298ee1caecde3d74e59a2b8773d2c0fc,http://pdfs.semanticscholar.org/146b/bf00298ee1caecde3d74e59a2b8773d2c0fc.pdf,,,http://www.rug.nl/research/portal/files/19536021/4d_face_recognition.pdf
+14e9158daf17985ccbb15c9cd31cf457e5551990,http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf,,,http://proceedings.mlr.press/v54/hou17a/hou17a.pdf
+14ce7635ff18318e7094417d0f92acbec6669f1c,http://www.cs.tau.ac.il/~wolf/papers/deepface_11_01_2013.pdf,,,http://www.cs.toronto.edu/~ranzato/publications/taigman_cvpr14.pdf
+143f7a51058b743a0d43026a523d9bbbc1ae43a8,http://www.researchgate.net/profile/Shinichi_Satoh/publication/221368838_An_efficient_method_for_face_retrieval_from_large_video_datasets/links/0912f510a0404c605f000000.pdf,,,https://www.researchgate.net/profile/Shinichi_Satoh/publication/221368838_An_efficient_method_for_face_retrieval_from_large_video_datasets/links/0912f510a0404c605f000000.pdf
+14d4c019c3eac3c3fa888cb8c184f31457eced02,http://pdfs.semanticscholar.org/14d4/c019c3eac3c3fa888cb8c184f31457eced02.pdf,,https://doi.org/10.1162/NECO_a_00555,http://yima.csl.illinois.edu/psfile/subspace_discovery_v7.pdf
+1450296fb936d666f2f11454cc8f0108e2306741,http://pdfs.semanticscholar.org/1450/296fb936d666f2f11454cc8f0108e2306741.pdf,,,http://axon.cs.byu.edu/Dan/673/papers/kim.pdf
+140438a77a771a8fb656b39a78ff488066eb6b50,http://homes.cs.washington.edu/~neeraj/base/publications/base/papers/nk_cvpr2011_faceparts.pdf,,,http://neerajkumar.org/projects/face-parts/base/papers/nk_pami2013_faceparts.pdf
+143bee9120bcd7df29a0f2ad6f0f0abfb23977b8,http://pdfs.semanticscholar.org/143b/ee9120bcd7df29a0f2ad6f0f0abfb23977b8.pdf,,https://doi.org/10.1007/978-3-642-41914-0_52,https://ibug.doc.ic.ac.uk/media/uploads/documents/isvc_submission_new.pdf
+14d72dc9f78d65534c68c3ed57305f14bd4b5753,http://openaccess.thecvf.com/content_ICCV_2017/papers/Yan_Exploiting_Multi-Grain_Ranking_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.68
+14b162c2581aea1c0ffe84e7e9273ab075820f52,http://pdfs.semanticscholar.org/4b87/c72e53f19e29f2ccf4d24f9432ebbafcf1a8.pdf,,https://doi.org/10.1007/978-3-319-10602-1_24,http://homepages.inf.ed.ac.uk/keller/publications/eccv14.pdf
+14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d,https://www.comp.nus.edu.sg/~tsim/documents/cascade-cf-landmarks.pdf,,,http://www.hamedkiani.com/uploads/5/1/8/8/51882963/wacv_presentation.pdf
+14fdce01c958043140e3af0a7f274517b235adf3,http://pdfs.semanticscholar.org/14fd/ce01c958043140e3af0a7f274517b235adf3.pdf,,https://doi.org/10.1016/j.neucom.2009.09.021,http://www.cil.pku.edu.cn/publications/papers/NC2010Gusuicheng.pdf
+14b69626b64106bff20e17cf8681790254d1e81c,http://pdfs.semanticscholar.org/14b6/9626b64106bff20e17cf8681790254d1e81c.pdf,,,http://crcv.ucf.edu/THUMOS14/index.files/NotebookPapers13/Paper%2038%20(Supplementary).pdf
+14efb131bed66f1874dd96170f714def8db45d90,,,,http://doi.acm.org/10.1145/2818346.2830585
+14070478b8f0d84e5597c3e67c30af91b5c3a917,http://pdfs.semanticscholar.org/f0a5/f885aa14ac2bbb3cc8e4c7530f2449b2f160.pdf,,https://doi.org/10.1007/978-3-319-10605-2_50,http://www.vision.caltech.edu/~eeyjolfs/papers/EyjolfsdottirECCV2014.pdf
+14fb3283d4e37760b7dc044a1e2906e3cbf4d23a,http://crcv.ucf.edu/courses/CAP6412/Spring2013/papers/felix_yu_attribute_cvpr2012.pdf,,,http://felixyu.org/pdf/weak_poster.pdf
+14ae16e9911f6504d994503989db34d2d1cb2cd4,,,https://doi.org/10.1007/s11042-013-1616-4,
+14811696e75ce09fd84b75fdd0569c241ae02f12,https://jurie.users.greyc.fr/papers/cvpr08-cevikalp.pdf,,,http://mplab.ucsd.edu/wp-content/uploads/cvpr2008/conference/data/papers/251.pdf
+14bdd23ea8f4f6d7f4c193e5cbb0622362e12ae1,,,https://doi.org/10.1109/TIP.2006.884932,
+141eab5f7e164e4ef40dd7bc19df9c31bd200c5e,http://www.jdl.ac.cn/doc/2006/Local%20Linear%20Regression%20(LLR)%20for%20Pose%20Invariant%20Face%20Recognition.pdf,,,http://doi.ieeecomputersociety.org/10.1109/FGR.2006.73
+14e759cb019aaf812d6ac049fde54f40c4ed1468,http://pdfs.semanticscholar.org/14e7/59cb019aaf812d6ac049fde54f40c4ed1468.pdf,,https://doi.org/10.1007/978-0-387-31439-6_708,http://www.cvlab.cs.tsukuba.ac.jp/~kfukui/english/epapers/subspace_method.pdf
+146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,https://arxiv.org/pdf/1611.06158v1.pdf,,https://doi.org/10.1109/BTAS.2017.8272686,http://arxiv.org/abs/1611.06158
+148eb413bede35487198ce7851997bf8721ea2d6,http://pdfs.semanticscholar.org/148e/b413bede35487198ce7851997bf8721ea2d6.pdf,,,http://www.cs.ucsb.edu/~daniel/publications/abstracts/VaqueroGSWC09PeopleSearch.pdf
+1462bc73834e070201acd6e3eaddd23ce3c1a114,http://pdfs.semanticscholar.org/1462/bc73834e070201acd6e3eaddd23ce3c1a114.pdf,,,http://www.advancedsourcecode.com/V2I460.pdf
+14014a1bdeb5d63563b68b52593e3ac1e3ce7312,http://pdfs.semanticscholar.org/1401/4a1bdeb5d63563b68b52593e3ac1e3ce7312.pdf,,,https://staff.fnwi.uva.nl/z.lou/pub/AgeExpressionBMVC2014.pdf
+1473a233465ea664031d985e10e21de927314c94,http://pdfs.semanticscholar.org/e985/0501e707f8783172ecacfe0cd29159abda34.pdf,,,https://arxiv.org/pdf/1611.00050v2.pdf
+140c95e53c619eac594d70f6369f518adfea12ef,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Klare_Pushing_the_Frontiers_2015_CVPR_paper.pdf
+14418ae9a6a8de2b428acb2c00064da129632f3e,http://fanyix.cs.ucdavis.edu/project/discovery/files/ext_abstract.pdf,,,http://openaccess.thecvf.com/content_iccv_2015/papers/Xiao_Discovering_the_Spatial_ICCV_2015_paper.pdf
+14ba910c46d659871843b31d5be6cba59843a8b8,http://www.crcv.ucf.edu/papers/cvpr2013/ortiz_vfr_trailers.pdf,,,http://www.enriquegortiz.com/publications/VFR_MSSRC.pdf
+1467c4ab821c3b340abe05a1b13a19318ebbce98,http://pdfs.semanticscholar.org/1467/c4ab821c3b340abe05a1b13a19318ebbce98.pdf,,,http://discovery.ucl.ac.uk/1457869/1/thesis_final.pdf
+14318d2b5f2cf731134a6964d8193ad761d86942,http://pdfs.semanticscholar.org/1431/8d2b5f2cf731134a6964d8193ad761d86942.pdf,,,http://worldcomp-proceedings.com/proc/p2016/IPC3819.pdf
+142dcfc3c62b1f30a13f1f49c608be3e62033042,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Tsai_Adaptive_Region_Pooling_2015_CVPR_paper.pdf,,,http://faculty.ucmerced.edu/mhyang/papers/cvpr15_object_detection.pdf
+14c0f9dc9373bea1e27b11fa0594c86c9e632c8d,http://openaccess.thecvf.com/content_iccv_2015/papers/Dang_Adaptive_Exponential_Smoothing_ICCV_2015_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.367
+1439bf9ba7ff97df9a2da6dae4784e68794da184,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W16/papers/Ptucha_LGE-KSVD_Flexible_Dictionary_2013_CVPR_paper.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/Workshops/4990a854.pdf
+1473e6f2d250307f0421f1e2ea68b6485d3bd481,,,https://doi.org/10.1109/IJCNN.2016.7727333,
+141768ab49a5a9f5adcf0cf7e43a23471a7e5d82,http://arxiv.org/pdf/1405.0085v1.pdf,,,http://ict.usc.edu/pubs/Relative%20Facial%20Action%20Unit%20Detection.pdf
+1455591d81c4ddabfe31de9f57f53e9b91e71fa2,,,,
+14e428f2ff3dc5cf96e5742eedb156c1ea12ece1,http://www.univ-soukahras.dz/eprints/2014-150-03190.pdf,,,
+14bca107bb25c4dce89210049bf39ecd55f18568,http://pdfs.semanticscholar.org/6f56/b0fada68f36d78cf20148fd13de8bce8a93d.pdf,,https://doi.org/10.5244/C.27.76,http://www.bmva.org/bmvc/2013/Papers/paper0076/paper0076.pdf
+14a5feadd4209d21fa308e7a942967ea7c13b7b6,http://mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001025.pdf,,https://doi.org/10.1109/ICASSP.2012.6288060,http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202012/pdfs/0001025.pdf
+8ec82da82416bb8da8cdf2140c740e1574eaf84f,http://pdfs.semanticscholar.org/8ec8/2da82416bb8da8cdf2140c740e1574eaf84f.pdf,,,http://www.robots.ox.ac.uk/~vgg/publications/2017/Chung17a/chung17a.pdf
+8ee62f7d59aa949b4a943453824e03f4ce19e500,http://arxiv.org/pdf/1603.09732v1.pdf,,https://doi.org/10.1109/TIP.2017.2654165,http://arxiv.org/pdf/1603.09732v2.pdf
+8e0ede53dc94a4bfcf1238869bf1113f2a37b667,http://www.ri.cmu.edu/pub_files/2015/6/jpml_final.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Zhao_Joint_Patch_and_2015_CVPR_paper.pdf
+8e33183a0ed7141aa4fa9d87ef3be334727c76c0,http://pdfs.semanticscholar.org/8e33/183a0ed7141aa4fa9d87ef3be334727c76c0.pdf,,,http://www.cs.princeton.edu/courses/archive/fall17/cos429/COS429-proj/COS429_facerobustness_CathyLindyZachary.pdf
+8e9b92a805d1ce0bf4e0c04133d26e28db036e6a,,,https://doi.org/10.1109/DICTA.2017.8227428,
+8e94ed0d7606408a0833e69c3185d6dcbe22bbbe,http://www.wjscheirer.com/papers/wjs_wacv2012_eyes.pdf,,,https://www.wjscheirer.com/papers/wjs_wacv2012_eyes.pdf
+8eb9aa6349db3dd1b724266fcd5fc39a83da022a,http://www.hcii-lab.net/2009/%5BICIP%202009%5D%20A%20Novel%20feature%20extraction%20using%20PHOG%20for%20Smile%20Recognition.pdf,,https://doi.org/10.1109/ICIP.2009.5413938,
+8e461978359b056d1b4770508e7a567dbed49776,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Sikka_LOMo_Latent_Ordinal_CVPR_2016_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.602
+8e4808e71c9b9f852dc9558d7ef41566639137f3,http://pdfs.semanticscholar.org/8e48/08e71c9b9f852dc9558d7ef41566639137f3.pdf,,,https://arxiv.org/pdf/1801.00349v1.pdf
+8ef465ff12ee1d2be2a99d1c628117a4ce890a6b,,,https://doi.org/10.1016/j.camwa.2010.08.082,
+8ea30ade85880b94b74b56a9bac013585cb4c34b,http://www.eurecom.fr/fr/publication/1392/download/mm-perrfl-040517.pdf,,https://doi.org/10.1109/ICASSP.2004.1326473,http://www.eurecom.fr/en/publication/1392/download/mm-perrfl-040517.pdf
+8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958,http://pdfs.semanticscholar.org/bff6/c3acd48f34c671c48fae9b3fdf60f5d7b363.pdf,,,http://repository.cmu.edu/cgi/viewcontent.cgi?article=1209&context=dissertations
+8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125,https://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf,,https://doi.org/10.1109/BTAS.2012.6374555,http://www.wjscheirer.com/papers/wjs_btas2012_smt.pdf
+8e29884d4a0a1a53412e115e43f1b1cefe3bbc34,,,,
+8e55486aa456cae7f04fe922689b3e99a0e409fe,,,,http://doi.acm.org/10.1145/3123266.3123342
+8e378ef01171b33c59c17ff5798f30293fe30686,http://pdfs.semanticscholar.org/8e37/8ef01171b33c59c17ff5798f30293fe30686.pdf,,,http://mediatum2.ub.tum.de/doc/635955/document.pdf
+8ebe2df4d82af79f0f082ced70f3a73d7fb93b66,,,https://doi.org/10.1109/URAI.2015.7358851,
+8e272978dd1500ce6e4c2ef5e91d4332078ff757,,,https://doi.org/10.1007/11848035_5,
+8ed051be31309a71b75e584bc812b71a0344a019,http://www.vision.caltech.edu/~bart/Publications/2007/BartUllmanMBE.pdf,,,http://www.vision.caltech.edu/~bart/Publications/2007/BartUllmanMBEAppendix.pdf
+8e8a6623b4abd2452779c43f3c2085488dfcb323,,,,http://doi.acm.org/10.1145/2993148.2997630
+8e21399bb102e993edd82b003c306a068a2474da,,,https://doi.org/10.1109/ICIP.2013.6738758,
+8ee5b1c9fb0bded3578113c738060290403ed472,https://infoscience.epfl.ch/record/200452/files/wacv2014-RGE.pdf,,,http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6835993
+8edc48e7a110f176ca08c26c0085c4dbb4146c5b,,,,
+8efda5708bbcf658d4f567e3866e3549fe045bbb,http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf,,,http://www.ai.rug.nl/~mwiering/Master_Thesis_Siebert_Looije.pdf
+225fb9181545f8750061c7693661b62d715dc542,http://pdfs.semanticscholar.org/c592/e408d95c838bced90b79640bead7c226fe64.pdf,,,https://arxiv.org/pdf/1711.08238v2.pdf
+22043cbd2b70cb8195d8d0500460ddc00ddb1a62,http://uir.ulster.ac.uk/37137/2/Separability-Oriented%20Subclass%20Discriminant%20Analysis.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TPAMI.2017.2672557
+22137ce9c01a8fdebf92ef35407a5a5d18730dde,http://pdfs.semanticscholar.org/2213/7ce9c01a8fdebf92ef35407a5a5d18730dde.pdf,,,https://www.base-search.net/Record/f1103b37cf20d990aad077f2155946031c72f354c51d2e5bd4a1ffbb0ccdb842
+22e2066acfb795ac4db3f97d2ac176d6ca41836c,http://pdfs.semanticscholar.org/26f5/3a1abb47b1f0ea1f213dc7811257775dc6e6.pdf,,https://doi.org/10.1007/978-3-319-10605-2_1,http://vipl.ict.ac.cn/sites/default/files/papers/files/2014_ECCV_Coarse-to-Fine%20Auto-encoder%20Networks%20(CFAN)%20for%20Real-time%20Face%20Alignment.pdf
+22717ad3ad1dfcbb0fd2f866da63abbde9af0b09,http://pdfs.semanticscholar.org/2271/7ad3ad1dfcbb0fd2f866da63abbde9af0b09.pdf,,,https://tspace.library.utoronto.ca/bitstream/1807/30536/6/Chan_Jeanie_201111_MASc_thesis.pdf
+224d0eee53c2aa5d426d2c9b7fa5d843a47cf1db,http://www.ifp.illinois.edu/~jyang29/papers/CVPR13-PEM.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Li_Probabilistic_Elastic_Matching_2013_CVPR_paper.pdf
+2288696b6558b7397bdebe3aed77bedec7b9c0a9,http://pdfs.semanticscholar.org/2288/696b6558b7397bdebe3aed77bedec7b9c0a9.pdf,,,http://arxiv.org/abs/1607.02556
+22c06284a908d8ad0994ad52119773a034eed7ee,,,,http://doi.acm.org/10.1145/2964284.2967236
+22bebedc1a5f3556cb4f577bdbe032299a2865e8,http://pdfs.semanticscholar.org/22be/bedc1a5f3556cb4f577bdbe032299a2865e8.pdf,,https://doi.org/10.1016/j.patcog.2017.06.031,http://www.eurecom.fr/fr/publication/5252/download/sec-publi-5252.pdf
+22264e60f1dfbc7d0b52549d1de560993dd96e46,http://arxiv.org/pdf/1608.01471v1.pdf,,,https://arxiv.org/pdf/1608.01471v1.pdf
+22dada4a7ba85625824489375184ba1c3f7f0c8f,http://arxiv.org/pdf/1506.02328v1.pdf,,,https://arxiv.org/pdf/1506.02328v1.pdf
+2238dddb76499b19035641d97711cf30d899dadb,,,https://doi.org/10.1109/SIU.2016.7496098,
+221252be5d5be3b3e53b3bbbe7a9930d9d8cad69,http://pdfs.semanticscholar.org/2212/52be5d5be3b3e53b3bbbe7a9930d9d8cad69.pdf,,https://doi.org/10.5244/C.26.80,http://www.bmva.org/bmvc/2012/BMVC/paper080/abstract080.pdf
+22894c7a84984bd4822dcfe7c76a74673a242c36,,,,http://doi.acm.org/10.1145/2993148.2997634
+223ec77652c268b98c298327d42aacea8f3ce23f,http://pdfs.semanticscholar.org/223e/c77652c268b98c298327d42aacea8f3ce23f.pdf,,,http://cs.anu.edu.au/techreports/2011/TR-CS-11-02.pdf
+22df6b6c87d26f51c0ccf3d4dddad07ce839deb0,http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Yu_Fast_Action_Proposals_2015_CVPR_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/CVPR.2015.7298735
+228558a2a38a6937e3c7b1775144fea290d65d6c,http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Smith_Nonparametric_Context_Modeling_2014_CVPR_paper.pdf,,,http://pages.cs.wisc.edu/~lizhang/projects/face-landmark-localization/SmithCVPR2014.pdf
+22a10d8d2a2cb9055557a3b335d6706100890afb,,,https://doi.org/10.1109/SIU.2016.7496121,
+22ccd537857aca1ee4b961f081f07c58d42a7f32,,,https://doi.org/10.1109/DICTA.2015.7371260,
+22fdd8d65463f520f054bf4f6d2d216b54fc5677,http://pdfs.semanticscholar.org/22fd/d8d65463f520f054bf4f6d2d216b54fc5677.pdf,,,http://www.ijetae.com/files/Volume3Issue8/IJETAE_0813_63.pdf
+2251a88fbccb0228d6d846b60ac3eeabe468e0f1,http://pdfs.semanticscholar.org/2251/a88fbccb0228d6d846b60ac3eeabe468e0f1.pdf,,,http://www.cfar.umd.edu/~shaohua/papers/zhou05tr_mtx.pdf
+22e678d3e915218a7c09af0d1602e73080658bb7,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2009_WS/data/papers/04/13.pdf,,https://doi.org/10.1109/CVPRW.2009.5204185,http://www.cse.wustl.edu/~jacobsn/papers/iv_2009/jacobs09webcamdata.pdf
+22ad2c8c0f4d6aa4328b38d894b814ec22579761,http://nichol.as/papers/Gallagher/Clothing%20Cosegmentation%20for%20Recognizing%20People.pdf,,,http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR_2008/data/papers/141.pdf
+226a5ff790b969593596a52b55b3718dcdd7bb7f,https://www.cise.ufl.edu/~jho/papers/IEEE06.pdf,,https://doi.org/10.1109/JPROC.2006.886019,http://www.cise.ufl.edu/~jho/papers/IEEE06.pdf
+22d5aeb25bb034f6ae2fc50b5cdd9934a85d6505,,,,http://doi.acm.org/10.1145/2808469.2810102
+227b18fab568472bf14f9665cedfb95ed33e5fce,https://arxiv.org/pdf/1308.0271v2.pdf,,https://doi.org/10.1109/TIP.2015.2479456,https://arxiv.org/pdf/1308.0271v1.pdf
+2241eda10b76efd84f3c05bdd836619b4a3df97e,http://arxiv.org/pdf/1506.01342v5.pdf,,,http://arxiv.org/pdf/1506.01342v4.pdf
+22dbdace88c8f4bda2843ed421e3708ec0744237,,,https://doi.org/10.1016/j.cviu.2013.12.010,
+22646cf884cc7093b0db2c1731bd52f43682eaa8,http://pdfs.semanticscholar.org/2264/6cf884cc7093b0db2c1731bd52f43682eaa8.pdf,,,https://arxiv.org/pdf/1802.01144v2.pdf
+22f94c43dd8b203f073f782d91e701108909690b,http://pdfs.semanticscholar.org/22f9/4c43dd8b203f073f782d91e701108909690b.pdf,,,http://www.cs.virginia.edu/~gs9ed/reports/moviescope.pdf
+22dabd4f092e7f3bdaf352edd925ecc59821e168,http://dro.deakin.edu.au/eserv/DU:30044576/venkatesh-exploitingside-2008.pdf,,,http://mplab.ucsd.edu/wordpress/wp-content/uploads/cvpr2008/conference/data/papers/256.pdf
+22f656d0f8426c84a33a267977f511f127bfd7f3,https://arxiv.org/pdf/1609.06426v2.pdf,,https://doi.org/10.1007/s11263-017-1055-1,http://arxiv.org/pdf/1609.06426v2.pdf
+22143664860c6356d3de3556ddebe3652f9c912a,http://pdfs.semanticscholar.org/2214/3664860c6356d3de3556ddebe3652f9c912a.pdf,,https://doi.org/10.1007/978-3-540-78157-8_11,http://ias.cs.tum.edu/_media/spezial/bib/wimmer08facial.pdf
+2271d554787fdad561fafc6e9f742eea94d35518,http://pdfs.semanticscholar.org/2271/d554787fdad561fafc6e9f742eea94d35518.pdf,,,http://mediatum.ub.tum.de/doc/1189694/528291.pdf
+2293413ebd24e377c1785113b695cc8a918a5fdb,,,,
+22ec256400e53cee35f999244fb9ba6ba11c1d06,http://pdfs.semanticscholar.org/2dbd/f0093228eee11ce9ef17365055dada756413.pdf,,,https://arxiv.org/pdf/1712.01619v2.pdf
+22ec8af0f0e5469e40592d29e28cfbdf1154c666,http://pdfs.semanticscholar.org/aa07/2c823da778a2b8bf1fc79141b3b228a14e99.pdf,,,http://hal.upmc.fr/hal-01518089/document
+22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7,http://pdfs.semanticscholar.org/22a7/f1aebdb57eecd64be2a1f03aef25f9b0e9a7.pdf,,https://doi.org/10.1016/j.patcog.2012.05.019,https://www.ece.nus.edu.sg/stfpage/eleqiz/publications/pdf/person_reidentification_pr12.pdf
+22e189a813529a8f43ad76b318207d9a4b6de71a,http://openaccess.thecvf.com/content_ICCV_2017/papers/Felsen_What_Will_Happen_ICCV_2017_paper.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2017.362
+259ddd3c618feec51576baac7eaaf80ea924b791,,,https://doi.org/10.1007/s11257-007-9039-4,
+254964096e523d5e48e03390ce440c9af337d200,,,,http://dl.acm.org/citation.cfm?id=3005378
+25d514d26ecbc147becf4117512523412e1f060b,http://www.iab-rubric.org/papers/2015_ICB_CrowdVideoFaceDataset.pdf,,https://doi.org/10.1109/ICB.2015.7139083,
+2533c88b278e84a248200d3c5a281177d392e78f,,,,
+25866eb48b94e85fa675b1d393163d27ffd62ba6,,,,
+25de28e6470b742539f124b93181166a3812e3af,,,,
+250b73ec5a4f78b7b4ea3aba65c27fc1352154d5,,,https://doi.org/10.1109/TIP.2015.2463223,
+25c19d8c85462b3b0926820ee5a92fc55b81c35a,http://www.brl.ntt.co.jp/people/kumano/papers/Kumano.IJCV2009.pdf,,https://doi.org/10.1007/978-3-540-76386-4_30,http://www.hci.iis.u-tokyo.ac.jp/~ysato/papers/Kumano-IJCV09.pdf
+258a8c6710a9b0c2dc3818333ec035730062b1a5,http://pdfs.semanticscholar.org/258a/8c6710a9b0c2dc3818333ec035730062b1a5.pdf,,,http://eprints.eemcs.utwente.nl/10036/01/benelearn05.pdf
+25695abfe51209798f3b68fb42cfad7a96356f1f,http://pdfs.semanticscholar.org/2569/5abfe51209798f3b68fb42cfad7a96356f1f.pdf,,,"http://eprints.lincoln.ac.uk/26652/1/McDonagh,%20John%20-%20Computer%20Science%20-%20December%202016.pdf"
+250ebcd1a8da31f0071d07954eea4426bb80644c,http://pdfs.semanticscholar.org/2e26/8598d9c2fd9757ba43f7967e57b8a2a871f4.pdf,,,http://arxiv.org/pdf/1509.04874v2.pdf
+2525f336af31178b836e27f8c60056e18f1455d2,http://eeeweba.ntu.edu.sg/computervision/Research%20Papers/2017/TEMPORALLY%20ENHANCED%20IMAGE%20OBJECT%20PROPOSALS%20FOR%20VIDEOS.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICME.2017.8019495
+25337690fed69033ef1ce6944e5b78c4f06ffb81,http://pdfs.semanticscholar.org/2533/7690fed69033ef1ce6944e5b78c4f06ffb81.pdf,,,http://udspace.udel.edu/bitstream/handle/19716/13396/2014_Leitner_Jordan_PhD.pdf?isAllowed=y&sequence=1
+25c3cdbde7054fbc647d8be0d746373e7b64d150,http://openaccess.thecvf.com/content_cvpr_2016/papers/Ouyang_ForgetMeNot_Memory-Aware_Forensic_CVPR_2016_paper.pdf,,,http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Ouyang_ForgetMeNot_Memory-Aware_Forensic_CVPR_2016_paper.pdf
+256b46b12ab47283e6ada05fad6a2b501de35323,,,https://doi.org/10.1109/ICPR.2016.7900275,
+252f202bfb14d363a969fce19df2972b83fa7ec0,,,,http://doi.ieeecomputersociety.org/10.1109/FG.2017.120
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,http://pdfs.semanticscholar.org/25bf/288b2d896f3c9dab7e7c3e9f9302e7d6806b.pdf,,,http://arxiv.org/abs/1608.06557
+251281d9cbd207038efbde0515f4077541967239,http://staff.estem-uc.edu.au/roland/files/2009/05/Ramana-Murthy_Radwan_Goecke_ICIP2014_DenseBodyPartTrajectoriesForHumanActionRecognition.pdf,,https://doi.org/10.1109/ICIP.2014.7025293,
+25bcd5aa3bbe56c992547fba683418655b46fc4a,,,https://doi.org/10.1016/j.eswa.2017.03.030,
+25d3e122fec578a14226dc7c007fb1f05ddf97f7,https://ibug.doc.ic.ac.uk/media/uploads/documents/pdf17.pdf,,https://doi.org/10.1109/FG.2011.5771374,http://ibug.doc.ic.ac.uk/media/uploads/documents/pdf17.pdf
+2597b0dccdf3d89eaffd32e202570b1fbbedd1d6,http://pdfs.semanticscholar.org/26f3/03ae1912c16f08523a7d8db926e35114e8f0.pdf,,,http://lib-arxiv-008.serverfarm.cornell.edu/pdf/1511.05296.pdf
+2546dc7e2c2390233de16502413fe1097ecf3fb5,,,https://doi.org/10.1016/j.patrec.2011.01.009,
+25c108a56e4cb757b62911639a40e9caf07f1b4f,https://arxiv.org/pdf/1707.09531v2.pdf,,,https://arxiv.org/pdf/1707.09531v1.pdf
+2594a77a3f0dd5073f79ba620e2f287804cec630,https://arxiv.org/pdf/1702.06925v1.pdf,,https://doi.org/10.1109/ICIP.2017.8296449,https://arxiv.org/pdf/1702.06925v2.pdf
+258b3b1df82186dd76064ef86b28555e91389b73,,,https://doi.org/10.1109/ACCESS.2017.2739822,
+25f7f03acf62b2cf3672bb506c8827d00b048608,,,,
+25e2d3122d4926edaab56a576925ae7a88d68a77,http://pdfs.semanticscholar.org/25e2/d3122d4926edaab56a576925ae7a88d68a77.pdf,,,http://ftp.ncbi.nlm.nih.gov/pub/pmc/8d/1d/fpsyg-07-00166.PMC4762993.pdf
+25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8,http://arxiv.org/pdf/1408.6027v2.pdf,,,http://doi.ieeecomputersociety.org/10.1109/TKDE.2016.2545658
+2559b15f8d4a57694a0a33bdc4ac95c479a3c79a,http://vision.ucsd.edu/~carolina/files/mklmnn.pdf,,https://doi.org/10.1109/TIP.2010.2068556,http://bmcfee.github.io/papers/contextmklmnn.pdf
+256ef946b4cecd8889df8d799d0c9175ae986af9,https://pdfs.semanticscholar.org/cd73/8347673151b378f447119fe2665f5c8c2215.pdf,,https://doi.org/10.1109/TIP.2015.2405346,https://www.researchgate.net/profile/Muhammad_Siddiqi2/publication/273003972_Human_Facial_Expression_Recognition_Using_Stepwise_Linear_Discriminant_Analysis_and_Hidden_Conditional_Random_Fields/links/54f54d120cf2ba6150657d84.pdf
+251e386a90f21db6d02806395b012b297cbf06ff,,,,
+2549ac0d3f40c1f6d72f641c2f05a17aef4bf42a,,,,
+2574860616d7ffa653eb002bbaca53686bc71cdd,http://pdfs.semanticscholar.org/e01d/f3e6faffad3f304f6c40b133ae1dcf326662.pdf,,,http://www.cell.com/cms/attachment/2062283687/2063980138/mmc1.pdf
+2564848f094f7c1cd5e599aa907947b10b5c7df2,http://prr.hec.gov.pk/Thesis/252S.pdf,,https://doi.org/10.1109/TCE.2007.381732,
+25f1f195c0efd84c221b62d1256a8625cb4b450c,http://www.ee.oulu.fi/~gyzhao/Papers/2007/04284844-ICME.pdf,,https://doi.org/10.1109/ICME.2007.4284844,http://www.researchgate.net/profile/Guoying_Zhao/publication/221264253_Experiments_with_Facial_Expression_Recognition_using_Spatiotemporal_Local_Binary_Patterns/links/02e7e528efcc290e81000000.pdf
+25885e9292957feb89dcb4a30e77218ffe7b9868,http://pdfs.semanticscholar.org/2588/5e9292957feb89dcb4a30e77218ffe7b9868.pdf,,,https://arxiv.org/pdf/1610.03640v1.pdf
+259706f1fd85e2e900e757d2656ca289363e74aa,http://pdfs.semanticscholar.org/6f98/3e8f26066f2ea486f6653b87154360d948ca.pdf,,,https://hal.inria.fr/inria-00321045v2/file/MV08.slides.pdf
+25b2811118ed73c64682544fe78023bb8242c709,http://www.researchgate.net/profile/Xueyin_Lin/publication/4193803_Kernel-based_multifactor_analysis_for_image_synthesis_and_recognition/links/00b7d51a9fd4fb9962000000.pdf,,,http://doi.ieeecomputersociety.org/10.1109/ICCV.2005.131
+25728e08b0ee482ee6ced79c74d4735bb5478e29,http://pdfs.semanticscholar.org/2572/8e08b0ee482ee6ced79c74d4735bb5478e29.pdf,,https://doi.org/10.1186/1687-5281-2014-28,http://www.researchgate.net/profile/Abhinav_Dhall/publication/263518292_Thermal_spatio-temporal_data_for_stress_recognition/links/0a85e53b27e43f3d61000000.pdf
+258a2dad71cb47c71f408fa0611a4864532f5eba,http://pdfs.semanticscholar.org/258a/2dad71cb47c71f408fa0611a4864532f5eba.pdf,,,http://www.nada.kth.se/utbildning/grukth/exjobb/rapportlistor/2011/rapporter11/azizpour_hossein_11111.pdf
+25127c2d9f14d36f03d200a65de8446f6a0e3bd6,http://pdfs.semanticscholar.org/2512/7c2d9f14d36f03d200a65de8446f6a0e3bd6.pdf,,,http://www.jatit.org/volumes/Vol87No2/10Vol87No2.pdf
diff --git a/scraper/reports/misc/missing-1.csv b/scraper/reports/misc/missing-1.csv
new file mode 100644
index 00000000..ade27ed5
--- /dev/null
+++ b/scraper/reports/misc/missing-1.csv
@@ -0,0 +1,817 @@
+57246142814d7010d3592e3a39a1ed819dd01f3b
+7788fa76f1488b1597ee2bebc462f628e659f61e
+cca9ae621e8228cfa787ec7954bb375536160e0d
+75249ebb85b74e8932496272f38af274fbcfd696
+47190d213caef85e8b9dd0d271dbadc29ed0a953
+8bdf6f03bde08c424c214188b35be8b2dec7cdea
+3dfb822e16328e0f98a47209d7ecd242e4211f82
+d0509afe9c2c26fe021889f8efae1d85b519452a
+4b48e912a17c79ac95d6a60afed8238c9ab9e553
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e
+a32c5138c6a0b3d3aff69bcab1015d8b043c91fb
+1275d6a800f8cf93c092603175fdad362b69c191
+b4ee64022cc3ccd14c7f9d4935c59b16456067d3
+d46b790d22cb59df87f9486da28386b0f99339d3
+d7cbedbee06293e78661335c7dd9059c70143a28
+eb027969f9310e0ae941e2adee2d42cdf07d938c
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a
+be4faea0971ef74096ec9800750648b7601dda65
+831b4d8b0c0173b0bac0e328e844a0fbafae6639
+746c0205fdf191a737df7af000eaec9409ede73f
+b0c1615ebcad516b5a26d45be58068673e2ff217
+c866a2afc871910e3282fd9498dce4ab20f6a332
+9131c990fad219726eb38384976868b968ee9d9c
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7
+e1256ff535bf4c024dd62faeb2418d48674ddfa2
+8ccde9d80706a59e606f6e6d48d4260b60ccc736
+6789bddbabf234f31df992a3356b36a47451efc7
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e
+f442a2f2749f921849e22f37e0480ac04a3c3fec
+ef230e3df720abf2983ba6b347c9d46283e4b690
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135
+54a9ed950458f4b7e348fa78a718657c8d3d0e05
+d02e27e724f9b9592901ac1f45830341d37140fe
+6993bca2b3471f26f2c8a47adfe444bfc7852484
+00fb2836068042c19b5197d0999e8e93b920eb9c
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e
+ff9195f99a1a28ced431362f5363c9a5da47a37b
+9865fe20df8fe11717d92b5ea63469f59cf1635a
+1badfeece64d1bf43aa55c141afe61c74d0bd25e
+1e21b925b65303ef0299af65e018ec1e1b9b8d60
+1b55c4e804d1298cbbb9c507497177014a923d22
+23ce6f404c504592767b8bec7d844d87b462de71
+ada063ce9a1ff230791c48b6afa29c401a9007f1
+59fc69b3bc4759eef1347161e1248e886702f8f7
+0750a816858b601c0dbf4cfb68066ae7e788f05d
+552122432b92129d7e7059ef40dc5f6045f422b5
+368e99f669ea5fd395b3193cd75b301a76150f9d
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827
+6c66ae815e7e508e852ecb122fb796abbcda16a8
+28d4e027c7e90b51b7d8908fce68128d1964668a
+5da827fe558fb2e1124dcc84ef08311241761726
+30870ef75aa57e41f54310283c0057451c8c822b
+b9d0774b0321a5cfc75471b62c8c5ef6c15527f5
+e87d6c284cdd6828dfe7c092087fbd9ff5091ee4
+305346d01298edeb5c6dc8b55679e8f60ba97efb
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8
+61f04606528ecf4a42b49e8ac2add2e9f92c0def
+4c4e49033737467e28aa2bb32f6c21000deda2ef
+7c6686fa4d8c990e931f1d16deabf647bf3b1986
+12095f9b35ee88272dd5abc2d942a4f55804b31e
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c
+cf736f596bf881ca97ec4b29776baaa493b9d50e
+eb48a58b873295d719827e746d51b110f5716d6c
+dce5e0a1f2cdc3d4e0e7ca0507592860599b0454
+b76af8fcf9a3ebc421b075b689defb6dc4282670
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6
+9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3
+5b5b9c6c67855ede21a60c834aea5379df7d51b7
+c858c74d30c02be2d992f82a821b925669bfca13
+713db3874b77212492d75fb100a345949f3d3235
+ccf16bcf458e4d7a37643b8364594656287f5bfc
+ed1886e233c8ecef7f414811a61a83e44c8bbf50
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f
+9ca7899338129f4ba6744f801e722d53a44e4622
+034b3f3bac663fb814336a69a9fd3514ca0082b9
+f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53
+bbf28f39e5038813afd74cf1bc78d55fcbe630f1
+4ac3cd8b6c50f7a26f27eefc64855134932b39be
+4a8480d58c30dc484bda08969e754cd13a64faa1
+766728bac030b169fcbc2fbafe24c6e22a58ef3c
+701f56f0eac9f88387de1f556acef78016b05d52
+ed96f2eb1771f384df2349879970065a87975ca7
+45e7ddd5248977ba8ec61be111db912a4387d62f
+afdf9a3464c3b015f040982750f6b41c048706f5
+ba1c0600d3bdb8ed9d439e8aa736a96214156284
+a0b1990dd2b4cd87e4fd60912cc1552c34792770
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1
+963d0d40de8780161b70d28d2b125b5222e75596
+ed09db68bf317cad27df6ed96a0c16eab6b2f827
+a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+ce9e1dfa7705623bb67df3a91052062a0a0ca456
+daa4cfde41d37b2ab497458e331556d13dd14d0b
+4b936847f39094d6cb0bde68cea654d948c4735d
+c5ea084531212284ce3f1ca86a6209f0001de9d1
+f095b5770f0ff13ba9670e3d480743c5e9ad1036
+bbc5f4052674278c96abe7ff9dc2d75071b6e3f3
+3be8f1f7501978287af8d7ebfac5963216698249
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4
+b1fdd4ae17d82612cefd4e78b690847b071379d3
+708f4787bec9d7563f4bb8b33834de445147133b
+88e2efab01e883e037a416c63a03075d66625c26
+696236fb6f986f6d5565abb01f402d09db68e5fa
+f61829274cfe64b94361e54351f01a0376cd1253
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d
+0a34fe39e9938ae8c813a81ae6d2d3a325600e5c
+837e99301e00c2244023a8a48ff98d7b521c93ac
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f
+d9deafd9d9e60657a7f34df5f494edff546c4fb8
+9207671d9e2b668c065e06d9f58f597601039e5e
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc
+6d8c9a1759e7204eacb4eeb06567ad0ef4229f93
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1
+1fb980e137b2c9f8781a0d98c026e164b497ddb1
+6966d9d30fa9b7c01523425726ab417fd8428790
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14
+a40edf6eb979d1ddfe5894fac7f2cf199519669f
+40e1743332523b2ab5614bae5e10f7a7799161f4
+f201baf618574108bcee50e9a8b65f5174d832ee
+80ed678ef28ccc1b942e197e0393229cd99d55c8
+5fa6e4a23da0b39e4b35ac73a15d55cee8608736
+17c0d99171efc957b88c31a465c59485ab033234
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d
+530243b61fa5aea19b454b7dbcac9f463ed0460e
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e
+d4a5eaf2e9f2fd3e264940039e2cbbf08880a090
+0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a
+3352426a67eabe3516812cb66a77aeb8b4df4d1b
+2724ba85ec4a66de18da33925e537f3902f21249
+234c106036964131c0f2daf76c47ced802652046
+f0a4a3fb6997334511d7b8fc090f9ce894679faf
+83295bce2340cb87901499cff492ae6ff3365475
+fd809ee36fa6832dda57a0a2403b4b52c207549d
+74ce7e5e677a4925489897665c152a352c49d0a2
+e4754afaa15b1b53e70743880484b8d0736990ff
+185263189a30986e31566394680d6d16b0089772
+2c62b9e64aeddf12f9d399b43baaefbca8e11148
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0
+c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd
+6b8d0569fffce5cc221560d459d6aa10c4db2f03
+56fd4c05869e11e4935d48aa1d7abb96072ac242
+1fe1a78c941e03abe942498249c041b2703fd3d2
+f070d739fb812d38571ec77490ccd8777e95ce7a
+ec1e03ec72186224b93b2611ff873656ed4d2f74
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43
+e97ba85a4550667b8a28f83a98808d489e0ff3bc
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd
+dee406a7aaa0f4c9d64b7550e633d81bc66ff451
+51b42da0706a1260430f27badcf9ee6694768b9b
+891b10c4b3b92ca30c9b93170ec9abd71f6099c4
+f2d5bb329c09a5867045721112a7dad82ca757a3
+8f772d9ce324b2ef5857d6e0b2a420bc93961196
+6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb
+927ba64123bd4a8a31163956b3d1765eb61e4426
+e01bb53b611c679141494f3ffe6f0b91953af658
+f7ae38a073be7c9cd1b92359131b9c8374579b13
+f7dea4454c2de0b96ab5cf95008ce7144292e52a
+c3d3d2229500c555c7a7150a8b126ef874cbee1c
+eee06d68497be8bf3a8aba4fde42a13aa090b301
+bbd1eb87c0686fddb838421050007e934b2d74ab
+370b6b83c7512419188f5373a962dd3175a56a9b
+2201f187a7483982c2e8e2585ad9907c5e66671d
+438c4b320b9a94a939af21061b4502f4a86960e3
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9
+ec8ec2dfd73cf3667f33595fef84c95c42125945
+8a63a2b10068b6a917e249fdc73173f5fd918db0
+7cfbf90368553333b47731729e0e358479c25340
+9b2c359c36c38c289c5bacaeb5b1dd06b464f301
+7b0f1fc93fb24630eb598330e13f7b839fb46cce
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71
+9901f473aeea177a55e58bac8fd4f1b086e575a4
+754f7f3e9a44506b814bf9dc06e44fecde599878
+127c7f87f289b1d32e729738475b337a6b042cf7
+30fd1363fa14965e3ab48a7d6235e4b3516c1da1
+9627f28ea5f4c389350572b15968386d7ce3fe49
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8
+6a52e6fce541126ff429f3c6d573bc774f5b8d89
+c38b1fa00f1f370c029984c55d4d2d40b529d00c
+a60db9ca8bc144a37fe233b08232d9c91641cbb5
+6932baa348943507d992aba75402cfe8545a1a9b
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1
+f4ba07d2ae6c9673502daf50ee751a5e9262848f
+d06bcb2d46342ee011e652990edf290a0876b502
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11
+4342a2b63c9c344d78cf153600cd918a5fecad59
+5b2cfee6e81ef36507ebf3c305e84e9e0473575a
+8e24db957be2b643db464cc566bfabc650f1ffac
+ded968b97bd59465d5ccda4f1e441f24bac7ede5
+6ad107c08ac018bfc6ab31ec92c8a4b234f67d49
+6dcf418c778f528b5792104760f1fbfe90c6dd6a
+5a3da29970d0c3c75ef4cb372b336fc8b10381d7
+e0162dea3746d58083dd1d061fb276015d875b2e
+b6bb883dd14f2737d0d6225cf4acbf050d307634
+92e464a5a67582d5209fa75e3b29de05d82c7c86
+9939498315777b40bed9150d8940fc1ac340e8ba
+3176ee88d1bb137d0b561ee63edf10876f805cf0
+fb87045600da73b07f0757f345a937b1c8097463
+88a898592b4c1dfd707f04f09ca58ec769a257de
+b908edadad58c604a1e4b431f69ac8ded350589a
+7df4f96138a4e23492ea96cf921794fc5287ba72
+a6ce2f0795839d9c2543d64a08e043695887e0eb
+3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2
+013305c13cfabaea82c218b841dbe71e108d2b97
+f472cb8380a41c540cfea32ebb4575da241c0288
+4bbe460ab1b279a55e3c9d9f488ff79884d01608
+6ca2c5ff41e91c34696f84291a458d1312d15bf2
+3e40991ab1daa2a4906eb85a5d6a01a958b6e674
+85ae6fa48e07857e17ac4bd48fb804785483e268
+657e702326a1cbc561e059476e9be4d417c37795
+93dcea2419ca95b96a47e541748c46220d289d77
+1a327c588b8f1057b40ecba451145dd885598e5d
+34fd227f4fdbc7fe028cc1f7d92cb59204333718
+42a6beed493c69d5bad99ae47ea76497c8e5fdae
+849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b
+96a9ca7a8366ae0efe6b58a515d15b44776faf6e
+8986585975c0090e9ad97bec2ba6c4b437419dae
+d3b0839324d0091e70ce34f44c979b9366547327
+badcd992266c6813063c153c41b87babc0ba36a3
+51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee
+38f1fac3ed0fd054e009515e7bbc72cdd4cf801a
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e
+b6f682648418422e992e3ef78a6965773550d36b
+2d8001ffee6584b3f4d951d230dc00a06e8219f8
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc
+58bf72750a8f5100e0c01e55fd1b959b31e7dbce
+c39ffc56a41d436748b9b57bdabd8248b2d28a32
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0
+93420d9212dd15b3ef37f566e4d57e76bb2fab2f
+acee2201f8a15990551804dd382b86973eb7c0a8
+6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd
+fe48f0e43dbdeeaf4a03b3837e27f6705783e576
+d4f0960c6587379ad7df7928c256776e25952c60
+c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee
+9e105c4a176465d14434fb3f5bae67f57ff5fba2
+94eeae23786e128c0635f305ba7eebbb89af0023
+b3b467961ba66264bb73ffe00b1830d7874ae8ce
+40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b
+6dbdb07ce2991db0f64c785ad31196dfd4dae721
+9bd35145c48ce172b80da80130ba310811a44051
+67484723e0c2cbeb936b2e863710385bdc7d5368
+f3b7938de5f178e25a3cf477107c76286c0ad691
+c86e6ed734d3aa967deae00df003557b6e937d3d
+7eb895e7de883d113b75eda54389460c61d63f67
+5c35ac04260e281141b3aaa7bbb147032c887f0c
+cd023d2d067365c83d8e27431e83e7e66082f718
+d69271c7b77bc3a06882884c21aa1b609b3f76cc
+b084683e5bab9b2bc327788e7b9a8e049d5fff8f
+e5d53a335515107452a30b330352cad216f88fc3
+52d7eb0fbc3522434c13cc247549f74bb9609c5d
+6dc1f94b852538d572e4919238ddb10e2ee449a4
+878301453e3d5cb1a1f7828002ea00f59cbeab06
+405d9a71350c9a13adea41f9d7f7f9274793824f
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e
+e065a2cb4534492ccf46d0afc81b9ad8b420c5ec
+dcf71245addaf66a868221041aabe23c0a074312
+9efdb73c6833df57732b727c6aeac510cadb53fe
+0b82bf595e76898993ed4f4b2883c42720c0f277
+a896ddeb0d253739c9aaef7fc1f170a2ba8407d3
+72cbbdee4f6eeee8b7dd22cea6092c532271009f
+24286ef164f0e12c3e9590ec7f636871ba253026
+377f2b65e6a9300448bdccf678cde59449ecd337
+1ee3b4ba04e54bfbacba94d54bf8d05fd202931d
+55e87050b998eb0a8f0b16163ef5a28f984b01fa
+4d90d7834ae25ee6176c096d5d6608555766c0b1
+878169be6e2c87df2d8a1266e9e37de63b524ae7
+bc607bee2002c6c6bf694a15efd0a5d049767237
+68caf5d8ef325d7ea669f3fb76eac58e0170fff0
+53bfe2ab770e74d064303f3bd2867e5bf7b86379
+c9bbd7828437e70cc3e6863b278aa56a7d545150
+8818b12aa0ff3bf0b20f9caa250395cbea0e8769
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac
+4db0968270f4e7b3fa73e41c50d13d48e20687be
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9
+64d7e62f46813b5ad08289aed5dc4825d7ec5cff
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4
+bf5940d57f97ed20c50278a81e901ae4656f0f2c
+69a55c30c085ad1b72dd2789b3f699b2f4d3169f
+ef5531711a69ed687637c48930261769465457f0
+8a8861ad6caedc3993e31d46e7de6c251a8cda22
+ef458499c3856a6e9cd4738b3e97bef010786adb
+3b84d074b8622fac125f85ab55b63e876fed4628
+18010284894ed0edcca74e5bf768ee2e15ef7841
+bb2f61a057bbf176e402d171d79df2635ccda9f6
+35e0256b33212ddad2db548484c595334f15b4da
+782188821963304fb78791e01665590f0cd869e8
+83f80fd4eb614777285202fa99e8314e3e5b169c
+4e0636a1b92503469b44e2807f0bb35cc0d97652
+0ee5c4112208995bf2bb0fb8a87efba933a94579
+e85a255a970ee4c1eecc3e3d110e157f3e0a4629
+923ec0da8327847910e8dd71e9d801abcbc93b08
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807
+572dbaee6648eefa4c9de9b42551204b985ff863
+2480f8dccd9054372d696e1e521e057d9ac9de17
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7
+86f3552b822f6af56cb5079cc31616b4035ccc4e
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf
+4bf85ef995c684b841d0a5a002d175fadd922ff0
+58d47c187b38b8a2bad319c789a09781073d052d
+59d225486161b43b7bf6919b4a4b4113eb50f039
+c038beaa228aeec174e5bd52460f0de75e9cccbe
+e43045a061421bd79713020bc36d2cf4653c044d
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae
+1c9efb6c895917174ac6ccc3bae191152f90c625
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb
+7ec431e36919e29524eceb1431d3e1202637cf19
+44d23df380af207f5ac5b41459c722c87283e1eb
+dc5d04d34b278b944097b8925a9147773bbb80cc
+b999364980e4c21d9c22cc5a9f14501432999ca4
+e8f4ded98f5955aad114f55e7aca6b540599236b
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd
+206e24f7d4b3943b35b069ae2d028143fcbd0704
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec
+7ef0cc4f3f7566f96f168123bac1e07053a939b2
+25960f0a2ed38a89fa8076a448ca538de2f1e183
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a
+872dfdeccf99bbbed7c8f1ea08afb2d713ebe085
+e9c008d31da38d9eef67a28d2c77cb7daec941fb
+c75e6ce54caf17b2780b4b53f8d29086b391e839
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d
+5435d5f8b9f4def52ac84bee109320e64e58ab8f
+9af9a88c60d9e4b53e759823c439fc590a4b5bc5
+b72eebffe697008048781ab7b768e0c96e52236a
+57178b36c21fd7f4529ac6748614bb3374714e91
+361eaef45fccfffd5b7df12fba902490a7d24a8d
+380d5138cadccc9b5b91c707ba0a9220b0f39271
+4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa
+b59f441234d2d8f1765a20715e227376c7251cd7
+43dce79cf815b5c7068b1678f6200dabf8f5de31
+571b83f7fc01163383e6ca6a9791aea79cafa7dd
+ab80582807506c0f840bd1ba03a8b84f8ac72f79
+5180df9d5eb26283fb737f491623395304d57497
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d
+5e7e055ef9ba6e8566a400a8b1c6d8f827099553
+c87f7ee391d6000aef2eadb49f03fc237f4d1170
+3bb6570d81685b769dc9e74b6e4958894087f3f1
+27da432cf2b9129dce256e5bf7f2f18953eef5a5
+173657da03e3249f4e47457d360ab83b3cefbe63
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3
+a6d47f7aa361ab9b37c7f3f868280318f355fadc
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669
+c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0
+e79bacc03152ea55343e6af97bcd17d8904cf5ef
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1
+cdae8e9cc9d605856cf5709b2fdf61f722d450c1
+4d6ad0c7b3cf74adb0507dc886993e603c863e8c
+2770b095613d4395045942dc60e6c560e882f887
+17479e015a2dcf15d40190e06419a135b66da4e0
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45
+dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a
+59a6c9333c941faf2540979dcfcb5d503a49b91e
+84574aa43a98ad8a29470977e7b091f5a5ec2366
+46e72046a9bb2d4982d60bcf5c63dbc622717f0f
+ec00ecb64fa206cea8b2e716955a738a96424084
+cd55fb30737625e86454a2861302b96833ed549d
+eed93d2e16b55142b3260d268c9e72099c53d5bc
+b5fdd7778503f27c9d9bf77fab193b475fab6076
+08903bf161a1e8dec29250a752ce9e2a508a711c
+672fae3da801b2a0d2bad65afdbbbf1b2320623e
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c
+f997a71f1e54d044184240b38d9dc680b3bbbbc0
+bb4be8e24d7b8ed56d81edec435b7b59bad96214
+cef73d305e5368ee269baff53ec20ea3ae7cdd82
+f28b7d62208fdaaa658716403106a2b0b527e763
+76cd5e43df44e389483f23cb578a9015d1483d70
+341002fac5ae6c193b78018a164d3c7295a495e4
+c68ec931585847b37cde9f910f40b2091a662e83
+89d3a57f663976a9ac5e9cdad01267c1fc1a7e06
+bec0c33d330385d73a5b6a05ad642d6954a6d632
+54204e28af73c7aca073835a14afcc5d8f52a515
+2c1ffb0feea5f707c890347d2c2882be0494a67a
+7ebb153704706e457ab57b432793d2b6e5d12592
+a1d86c898da3aea54deafd60864aa05dff8a4c9c
+49df381ea2a1e7f4059346311f1f9f45dd997164
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25
+b7ec41005ce4384e76e3be854ecccd564d2f89fb
+fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139
+c5c53d42e551f3c8f6ca2c13335af80a882009fa
+4896909796f9bd2f70a2cb24bf18daacd6a12128
+a98316980b126f90514f33214dde51813693fe0d
+3bd10f7603c4f5a4737c5613722124787d0dd818
+809e5884cf26b71dc7abc56ac0bad40fb29c671c
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef
+cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce
+d44a93027208816b9e871101693b05adab576d89
+841855205818d3a6d6f85ec17a22515f4f062882
+1773d65c1dc566fd6128db65e907ac91b4583bed
+021e008282714eaefc0796303f521c9e4f199d7e
+f03a82fd4a039c1b94a0e8719284a777f776fb22
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7
+4e061a302816f5890a621eb278c6efa6e37d7e2f
+ac8441e30833a8e2a96a57c5e6fede5df81794af
+052f994898c79529955917f3dfc5181586282cf8
+4cdb6144d56098b819076a8572a664a2c2d27f72
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935
+b971266b29fcecf1d5efe1c4dcdc2355cb188ab0
+58542eeef9317ffab9b155579256d11efb4610f2
+2983cf95743be82671a71528004036bd19172712
+d9e66b877b277d73f8876f537206395e71f58269
+2a612a7037646276ff98141d3e7abbc9c91fccb8
+6d70344ae6f6108144a15e9debc7b0be4e3335f1
+78174c2be084e67f48f3e8ea5cb6c9968615a42c
+ab734bac3994b00bf97ce22b9abc881ee8c12918
+df577a89830be69c1bfb196e925df3055cafc0ed
+a3d8b5622c4b9af1f753aade57e4774730787a00
+bb4f83458976755e9310b241a689c8d21b481238
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4
+432d8cba544bf7b09b0455561fea098177a85db1
+6f22628d34a486d73c6b46eb071200a00e3abae3
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2
+5b5b568a0ba63d00e16a263051c73e09ab83e245
+fdaf65b314faee97220162980e76dbc8f32db9d6
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682
+39d6f8b791995dc5989f817373391189d7ac478a
+be393cd567b338da6ed60181c8ad429627578a31
+cbca355c5467f501d37b919d8b2a17dcb39d3ef9
+56dca23481de9119aa21f9044efd7db09f618704
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df
+7831ab4f8c622d91974579c1ff749dadc170c73c
+d3d5d86afec84c0713ec868cf5ed41661fc96edc
+a1081cb856faae25df14e25045cd682db8028141
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb
+993d189548e8702b1cb0b02603ef02656802c92b
+098363b29eef1471c494382338687f2fe98f6e15
+e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227
+9be653e1bc15ef487d7f93aad02f3c9552f3ee4a
+8bebb26880274bdb840ebcca530caf26c393bf45
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35
+c05ae45c262b270df1e99a32efa35036aae8d950
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b
+d3edbfe18610ce63f83db83f7fbc7634dde1eb40
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6
+4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e
+e96cef8732f3021080c362126518455562606f2d
+9f2984081ef88c20d43b29788fdf732ceabd5d6a
+e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69
+80d4cf7747abfae96328183dd1f84133023c2668
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44
+c1298120e9ab0d3764512cbd38b47cd3ff69327b
+fab60b3db164327be8588bce6ce5e45d5b882db6
+c6382de52636705be5898017f2f8ed7c70d7ae96
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73
+fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac
+48499deeaa1e31ac22c901d115b8b9867f89f952
+33ef419dffef85443ec9fe89a93f928bafdc922e
+cdcfc75f54405c77478ab776eb407c598075d9f8
+a92147bed9c17c311c6081beb0ef4c3165b6268e
+e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7
+e7b2b0538731adaacb2255235e0a07d5ccf09189
+fd15e397629e0241642329fc8ee0b8cd6c6ac807
+5779e3e439c90d43648db107e848aeb954d3e347
+16b9d258547f1eccdb32111c9f45e2e4bbee79af
+628a3f027b7646f398c68a680add48c7969ab1d9
+8da32ff9e3759dc236878ac240728b344555e4e9
+014e3d0fa5248e6f4634dc237e2398160294edce
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9
+275b5091c50509cc8861e792e084ce07aa906549
+cd4941cbef1e27d7afdc41b48c1aff5338aacf06
+b3ba7ab6de023a0d58c741d6abfa3eae67227caf
+1b3587363d37dd197b6adbcfa79d49b5486f27d8
+7d2556d674ad119cf39df1f65aedbe7493970256
+2d8d089d368f2982748fde93a959cf5944873673
+22648dcd3100432fe0cc71e09de5ee855c61f12b
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff
+55ea0c775b25d9d04b5886e322db852e86a556cd
+3240c9359061edf7a06bfeb7cc20c103a65904c2
+23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f
+60542b1a857024c79db8b5b03db6e79f74ec8f9f
+aa3c9de34ef140ec812be85bb8844922c35eba47
+8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c
+ee463f1f72a7e007bae274d2d42cd2e5d817e751
+d5de42d37ee84c86b8f9a054f90ddb4566990ec0
+b2c60061ad32e28eb1e20aff42e062c9160786be
+4641986af5fc8836b2c883ea1a65278d58fe4577
+fa90b825346a51562d42f6b59a343b98ea2e501a
+daefac0610fdeff415c2a3f49b47968d84692e87
+f08e425c2fce277aedb51d93757839900d591008
+b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000
+8f3da45ff0c3e1777c3a7830f79c10f5896bcc21
+7697295ee6fc817296bed816ac5cae97644c2d5b
+efa08283656714911acff2d5022f26904e451113
+5a5f9e0ed220ce51b80cd7b7ede22e473a62062c
+fdfd57d4721174eba288e501c0c120ad076cdca8
+a8d52265649c16f95af71d6f548c15afc85ac905
+f6e00d6430cbbaa64789d826d093f7f3e323b082
+4f8345f31e38f65f1155569238d14bd8517606f4
+16fadde3e68bba301f9829b3f99157191106bd0f
+1287bfe73e381cc8042ac0cc27868ae086e1ce3b
+663efaa0671eace1100fdbdecacd94216a17b1db
+7bbaa09c9e318da4370a83b126bcdb214e7f8428
+103c8eaca2a2176babab2cc6e9b25d48870d6928
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa
+4f8b4784d0fca31840307650f7052b0dde736a76
+6c01b349edb2d33530e8bb07ba338f009663a9dd
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f
+cfffae38fe34e29d47e6deccfd259788176dc213
+8ed33184fccde677ec8413ae06f28ea9f2ca70f3
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7
+61971f8e6fff5b35faed610d02ad14ccfc186c70
+2902f62457fdf7e8e8ee77a9155474107a2f423e
+6cfc337069868568148f65732c52cbcef963f79d
+c84233f854bbed17c22ba0df6048cbb1dd4d3248
+f33bd953d2df0a5305fc8a93a37ff754459a906c
+e9d43231a403b4409633594fa6ccc518f035a135
+b558be7e182809f5404ea0fcf8a1d1d9498dc01a
+64ec02e1056de4b400f9547ce56e69ba8393e2ca
+e3b324101157daede3b4d16bdc9c2388e849c7d4
+a4898f55f12e6393b1c078803909ea715bf71730
+4e27fec1703408d524d6b7ed805cdb6cba6ca132
+193bc8b663d041bc34134a8407adc3e546daa9cc
+3f9a7d690db82cf5c3940fbb06b827ced59ec01e
+6da711d07b63c9f24d143ca3991070736baeb412
+113b06e70b7eead8ae7450bafe9c91656705024c
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab
+1d729693a888a460ee855040f62bdde39ae273af
+b0c512fcfb7bd6c500429cbda963e28850f2e948
+de162d4b8450bf2b80f672478f987f304b7e6ae4
+e295c1aa47422eb35123053038e62e9aa50a2e3a
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3
+c98b13871a3bc767df0bdd51ff00c5254ede8b22
+b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4
+7a81967598c2c0b3b3771c1af943efb1defd4482
+31d51e48dbd9e7253eafe0719f3788adb564a971
+506c2fbfa9d16037d50d650547ad3366bb1e1cde
+d2f2b10a8f29165d815e652f8d44955a12d057e6
+5cbe1445d683d605b31377881ac8540e1d17adf0
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8
+529baf1a79cca813f8c9966ceaa9b3e42748c058
+42ea8a96eea023361721f0ea34264d3d0fc49ebd
+632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c
+e4abc40f79f86dbc06f5af1df314c67681dedc51
+59b83666c1031c3f509f063b9963c7ad9781ca23
+c12260540ec14910f5ec6e38d95bdb606826b32e
+8633732d9f787f8497c2696309c7d70176995c15
+ca44a838da4187617dca9f6249d8c4b604661ec7
+239958d6778643101ab631ec354ea1bc4d33e7e0
+cd444ee7f165032b97ee76b21b9ff58c10750570
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8
+4f051022de100241e5a4ba8a7514db9167eabf6e
+f94f366ce14555cf0d5d34248f9467c18241c3ee
+982fcead58be419e4f34df6e806204674a4bc579
+55c4efc082a8410b528af7325de8148b80cf41e3
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503
+3803b91e784922a2dacd6a18f61b3100629df932
+fcceea054cb59f1409dda181198ed4070ed762c9
+562f7555e5cb79ce0fe834c4613264d8378dd007
+614079f1a0d0938f9c30a1585f617fa278816d53
+1025c4922491745534d5d4e8c6e74ba2dc57b138
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3
+30c93fec078b98453a71f9f21fbc9512ab3e916f
+a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892
+2866cbeb25551257683cf28f33d829932be651fe
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021
+6a931e7b7475635f089dd33e8d9a2899ae963804
+2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b
+6754c98ba73651f69525c770fb0705a1fae78eb5
+9d57c4036a0e5f1349cd11bc342ac515307b6720
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6
+cc31db984282bb70946f6881bab741aa841d3a7c
+fb9ad920809669c1b1455cc26dbd900d8e719e61
+3619a9b46ad4779d0a63b20f7a6a8d3d49530339
+aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a
+e287ff7997297ce1197359ed0fb2a0bd381638c9
+beae35eb5b2c7f63dfa9115f07b5ba0319709951
+79744fc71bea58d2e1918c9e254b10047472bd76
+950171acb24bb24a871ba0d02d580c09829de372
+1a849b694f2d68c3536ed849ed78c82e979d64d5
+77fbbf0c5729f97fcdbfdc507deee3d388cd4889
+39c8b34c1b678235b60b648d0b11d241a34c8e32
+26e570049aaedcfa420fc8c7b761bc70a195657c
+a775da3e6e6ea64bffab7f9baf665528644c7ed3
+e896389891ba84af58a8c279cf8ab5de3e9320ee
+55aafdef9d9798611ade1a387d1e4689f2975e51
+860588fafcc80c823e66429fadd7e816721da42a
+1fdeba9c4064b449231eac95e610f3288801fd3e
+df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb
+652ec3947d3d04dda719b1f5ba7c975e567166ef
+0bf0029c9bdb0ac61fda35c075deb1086c116956
+281486d172cf0c78d348ce7d977a82ff763efccd
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2
+68f61154a0080c4aae9322110c8827978f01ac2e
+747c25bff37b96def96dc039cc13f8a7f42dbbc7
+fc0f5859a111fb17e6dcf6ba63dd7b751721ca61
+b6d0e461535116a675a0354e7da65b2c1d2958d4
+f5c57979ec3d8baa6f934242965350865c0121bd
+ae2cf545565c157813798910401e1da5dc8a6199
+ef4ecb76413a05c96eac4c743d2c2a3886f2ae07
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429
+2cac8ab4088e2bdd32dcb276b86459427355085c
+eaf020bc8a3ed5401fc3852f7037a03b2525586a
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2
+e569f4bd41895028c4c009e5b46b935056188e91
+264f7ab36ff2e23a1514577a6404229d7fe1242b
+214072c84378802a0a0fde0b93ffb17bc04f3759
+18941b52527e6f15abfdf5b86a0086935706e83b
+51d048b92f6680aca4a8adf07deb380c0916c808
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1
+0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2
+b5f3b0f45cf7f462a9c463a941e34e102a029506
+9f1a854d574d0bd14786c41247db272be6062581
+8cb6daba2cb1e208e809633133adfee0183b8dd2
+50a0930cb8cc353e15a5cb4d2f41b365675b5ebf
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3
+73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c
+e57014b4106dd1355e69a0f60bb533615a705606
+f4f6fc473effb063b7a29aa221c65f64a791d7f4
+6e00a406edb508312108f683effe6d3c1db020fb
+e393a038d520a073b9835df7a3ff104ad610c552
+d904f945c1506e7b51b19c99c632ef13f340ef4c
+587b8c147c6253878128ddacf6e5faf8272842a4
+af29ad70ab148c83e1faa8b3098396bc1cd87790
+91ead35d1d2ff2ea7cf35d15b14996471404f68d
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d
+19b492d426f092d80825edba3b02e354c312295f
+f3ea181507db292b762aa798da30bc307be95344
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499
+2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c
+a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8
+3157be811685c93d0cef7fa4c489efea581f9b8e
+437642cfc8c34e445ea653929e2d183aaaeeb704
+ccb2ecb30a50460c9189bb55ba594f2300882747
+76b11c281ac47fe6d95e124673a408ee9eb568e3
+4850af6b54391fc33c8028a0b7fafe05855a96ff
+f7824758800a7b1a386db5bd35f84c81454d017a
+e5fbffd3449a2bfe0acb4ec339a19f5b88fff783
+ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6
+166ef5d3fd96d99caeabe928eba291c082ec75a0
+5632ba72b2652df3b648b2ee698233e76a4eee65
+397257783ccc8cace5b67cc71e0c73034d559a4f
+6b333b2c6311e36c2bde920ab5813f8cfcf2b67b
+9285f4a6a06e975bde3ae3267fccd971d4fff98a
+55cfc3c08000f9d21879582c6296f2a864b657e8
+a812368fe1d4a186322bf72a6d07e1cf60067234
+b8f3f6d8f188f65ca8ea2725b248397c7d1e662d
+60777fbca8bff210398ec8b1179bc4ecb72dfec0
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34
+8a8127a06f432982bfb0150df3212f379b36840b
+d6e08345ba293565086cb282ba08b225326022fc
+a136ccaa67f660c45d3abb8551c5ed357faf7081
+d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0
+f5770dd225501ff3764f9023f19a76fad28127d4
+b5f2846a506fc417e7da43f6a7679146d99c5e96
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2
+2cde051e04569496fb525d7f1b1e5ce6364c8b21
+a5f11c132eaab258a7cea2d681875af09cddba65
+f42dca4a4426e5873a981712102aa961be34539a
+35e6f6e5f4f780508e5f58e87f9efe2b07d8a864
+a6e25cab2251a8ded43c44b28a87f4c62e3a548a
+e8b3a257a0a44d2859862cdec91c8841dc69144d
+8e3c97e420e0112c043929087d6456d8ab61e95c
+48186494fc7c0cc664edec16ce582b3fcb5249c0
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b
+b8d8501595f38974e001a66752dc7098db13dfec
+2fea258320c50f36408032c05c54ba455d575809
+656f05741c402ba43bb1b9a58bcc5f7ce2403d9a
+1b71d3f30238cb6621021a95543cce3aab96a21b
+d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9
+e096b11b3988441c0995c13742ad188a80f2b461
+b9081856963ceb78dcb44ac410c6fca0533676a3
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e
+0831794eddcbac1f601dcb9be9d45531a56dbf7e
+70c9d11cad12dc1692a4507a97f50311f1689dbf
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a
+e38371b69be4f341baa95bc854584e99b67c6d3a
+947ee3452e4f3d657b16325c6b959f8b8768efad
+7c17280c9193da3e347416226b8713b99e7825b8
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64
+61e2044184d86d0f13e50ecaa3da6a4913088c76
+16fdd6d842475e6fbe58fc809beabbed95f0642e
+98e098ba9ff98fc58f22fed6d3d8540116284b91
+362bfeb28adac5f45b6ef46c07c59744b4ed6a52
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29
+6c80c834d426f0bc4acd6355b1946b71b50cbc0b
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62
+944faf7f14f1bead911aeec30cc80c861442b610
+5ac946fc6543a445dd1ee6d5d35afd3783a31353
+11691f1e7c9dbcbd6dfd256ba7ac710581552baa
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30
+193474d008cab9fa1c1fa81ce094d415f00b075c
+620339aef06aed07a78f9ed1a057a25433faa58b
+a6b5ca99432c23392cec682aebb8295c0283728b
+704d88168bdfabe31b6ff484507f4a2244b8c52b
+db848c3c32464d12da33b2f4c3a29fe293fc35d1
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade
+fc8fb68a7e3b79c37108588671c0e1abf374f501
+2d9e58ea582e054e9d690afca8b6a554c3687ce6
+e19ebad4739d59f999d192bac7d596b20b887f78
+2303d07d839e8b20f33d6e2ec78d1353cac256cf
+a36c8a4213251d3fd634e8893ad1b932205ad1ca
+2c19d3d35ef7062061b9e16d040cebd7e45f281d
+5c493c42bfd93e4d08517438983e3af65e023a87
+101d4cfbd6f8a7a10bd33505e2b183183f1d8770
+aafb8dc8fda3b13a64ec3f1ca7911df01707c453
+dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335
+ddfae3a96bd341109d75cedeaebb5ed2362b903f
+f257300b2b4141aab73f93c146bf94846aef5fa1
+15d653972d176963ef0ad2cc582d3b35ca542673
+6c304f3b9c3a711a0cca5c62ce221fb098dccff0
+ce450e4849490924488664b44769b4ca57f1bc1a
+6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d
+c76f64e87f88475069f7707616ad9df1719a6099
+86374bb8d309ad4dbde65c21c6fda6586ae4147a
+1c41965c5e1f97b1504c1bdde8037b5e0417da5e
+799c02a3cde2c0805ea728eb778161499017396b
+28f1542c63f5949ee6f2d51a6422244192b5a900
+85c90ad5eebb637f048841ebfded05942bb786b7
+05891725f5b27332836cf058f04f18d74053803f
+03ce2ff688f9b588b6f264ca79c6857f0d80ceae
+d141c31e3f261d7d5214f07886c1a29ac734d6fc
+c0c8d720658374cc1ffd6116554a615e846c74b5
+ad2339c48ad4ffdd6100310dcbb1fb78e72fac98
+bc36badb6606b8162d821a227dda09a94aac537f
+3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1
+b759936982d6fb25c55c98955f6955582bdaeb27
+5e6f546a50ed97658be9310d5e0a67891fe8a102
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3
+6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a
+631483c15641c3652377f66c8380ff684f3e365c
+2bb53e66aa9417b6560e588b6235e7b8ebbc294c
+dee6609615b73b10540f32537a242baa3c9fca4d
+959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c
+1442319de86d171ce9595b20866ec865003e66fc
+ac559873b288f3ac28ee8a38c0f3710ea3f986d9
+8d384e8c45a429f5c5f6628e8ba0d73c60a51a89
+fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81
+825f56ff489cdd3bcc41e76426d0070754eab1a8
+f1250900074689061196d876f551ba590fc0a064
+1bbec7190ac3ba34ca91d28f145e356a11418b67
+41f26101fed63a8d149744264dd5aa79f1928265
+06f585a3a05dd3371cd600a40dc35500e2f82f9b
+49be50efc87c5df7a42905e58b092729ea04c2f5
+bcf19b964e7d1134d00332cf1acf1ee6184aff00
+fa4f59397f964a23e3c10335c67d9a24ef532d5c
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf
+fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3
+e03e86ac61cfac9148b371d75ce81a55e8b332ca
+8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f
+202dc3c6fda654aeb39aee3e26a89340fb06802a
+247a8040447b6577aa33648395d95d80441a0cf3
+626859fe8cafd25da13b19d44d8d9eb6f0918647
+a0c37f07710184597befaa7e6cf2f0893ff440e9
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d
+10cb39e93fac194220237f15dae084136fdc6740
+7e2f7c0eeaeb47b163a7258665324643669919e8
+351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd
+d86fabd4498c8feaed80ec342d254fb877fb92f5
+4c0cc732314ba3ccccd9036e019b1cfc27850c17
+446dc1413e1cfaee0030dc74a3cee49a47386355
+cb2917413c9b36c3bb9739bce6c03a1a6eb619b3
+521aa8dcd66428b07728b91722cc8f2b5a73944b
+11bb2abe0ca614c15701961428eb2f260e3e2eef
+863ad2838b9b90d4461995f498a39bcd2fb87c73
+cd22e6532211f679ba6057d15a801ba448b9915c
+df9269657505fcdc1e10cf45bbb8e325678a40f5
+673d4885370b27c863e11a4ece9189a6a45931cc
+48e6c6d981efe2c2fb0ae9287376fcae59da9878
+6cb7648465ba7757ecc9c222ac1ab6402933d983
+407de9da58871cae7a6ded2f3a6162b9dc371f38
+97b5800e144a8df48f1f7e91383b0f37bc37cf60
+9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32
+14558a70418ec4012c5f058145eef2d22d89284a
+097340d3ac939ce181c829afb6b6faff946cdce0
+a8a61badec9b8bc01f002a06e1426a623456d121
+ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e
+b6ef158d95042f39765df04373c01546524c9ccd
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4
+09926ed62511c340f4540b5bc53cf2480e8063f8
+46f2611dc4a9302e0ac00a79456fa162461a8c80
+f02a6bccdaee14ab55ad94263539f4f33f1b15bb
+6964af90cf8ac336a2a55800d9c510eccc7ba8e1
+17a8d1b1b4c23a630b051f35e47663fc04dcf043
+be5276e9744c4445fe5b12b785650e8f173f56ff
+580f86f1ace1feed16b592d05c2b07f26c429b4b
+09507f1f1253101d04a975fc5600952eac868602
+2d4a3e9361505616fa4851674eb5c8dd18e0c3cf
+f2a7f9bd040aa8ea87672d38606a84c31163e171
+d700aedcb22a4be374c40d8bee50aef9f85d98ef
+d289ce63055c10937e5715e940a4bb9d0af7a8c5
+9aad8e52aff12bd822f0011e6ef85dfc22fe8466
+645f09f4bc2e6a13663564ee9032ca16e35fc52d
+39b452453bea9ce398613d8dd627984fd3a0d53c
+20c02e98602f6adf1cebaba075d45cef50de089f
+73ed64803d6f2c49f01cffef8e6be8fc9b5273b8
+8befcd91c24038e5c26df0238d26e2311b21719a
+10af69f11301679b6fbb23855bf10f6af1f3d2e6
diff --git a/scraper/reports/misc/missing-2.csv b/scraper/reports/misc/missing-2.csv
new file mode 100644
index 00000000..c3182fe7
--- /dev/null
+++ b/scraper/reports/misc/missing-2.csv
@@ -0,0 +1,817 @@
+a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca
+504028218290d68859f45ec686f435f473aa326c
+164b0e2a03a5a402f66c497e6c327edf20f8827b
+4d19401e44848fe65b721971bc71a9250870ed5f
+ab0981d1da654f37620ca39c6b42de21d7eb58eb
+b09b693708f412823053508578df289b8403100a
+c9b958c2494b7ba08b5b460f19a06814dba8aee0
+badd371a49d2c4126df95120902a34f4bee01b00
+bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab
+f571fe3f753765cf695b75b1bd8bed37524a52d2
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea
+70c58700eb89368e66a8f0d3fc54f32f69d423e1
+5945464d47549e8dcaec37ad41471aa70001907f
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7
+c6241e6fc94192df2380d178c4c96cf071e7a3ac
+f0f854f8cfe826fd08385c0c3c8097488f468076
+35b1c1f2851e9ac4381ef41b4d980f398f1aad68
+7ed3b79248d92b255450c7becd32b9e5c834a31e
+30cbd41e997445745b6edd31f2ebcc7533453b61
+24115d209e0733e319e39badc5411bbfd82c5133
+0a9d204db13d395f024067cf70ac19c2eeb5f942
+74c19438c78a136677a7cb9004c53684a4ae56ff
+d5d5cc27ca519d1300e77e3c1a535a089f52f646
+d309e414f0d6e56e7ba45736d28ee58ae2bad478
+a87ab836771164adb95d6744027e62e05f47fd96
+e75a589ca27dc4f05c2715b9d54206dee37af266
+e2faaebd17d10e2919bd69492787e7565546a63f
+bd0e100a91ff179ee5c1d3383c75c85eddc81723
+9c065dfb26ce280610a492c887b7f6beccf27319
+81a142c751bf0b23315fb6717bc467aa4fdfbc92
+8b744786137cf6be766778344d9f13abf4ec0683
+9077365c9486e54e251dd0b6f6edaeda30ae52b9
+928b8eb47288a05611c140d02441660277a7ed54
+1ea74780d529a458123a08250d8fa6ef1da47a25
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3
+691964c43bfd282f6f4d00b8b0310c554b613e3b
+bff567c58db554858c7f39870cff7c306523dfee
+82e3f4099503633c042a425e9217bfe47cfe9d4b
+062c41dad67bb68fefd9ff0c5c4d296e796004dc
+95d858b39227edeaf75b7fad71f3dc081e415d16
+2c5d1e0719f3ad7f66e1763685ae536806f0c23b
+c4cfdcf19705f9095fb60fb2e569a9253a475f11
+d3b18ba0d9b247bfa2fb95543d172ef888dfff95
+b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89
+95288fa7ff4683e32fe021a78cbf7d3376e6e400
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3
+151b87de997e55db892b122c211f9c749f4293de
+e060e32f8ad98f10277b582393df50ac17f2836c
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c
+8356832f883207187437872742d6b7dc95b51fde
+8981be3a69cd522b4e57e9914bf19f034d4b530c
+ca37eda56b9ee53610c66951ee7ca66a35d0a846
+b8375ff50b8a6f1a10dd809129a18df96888ac8b
+c588c89a72f89eed29d42f34bfa5d4cffa530732
+a92b5234b8b73e06709dd48ec5f0ec357c1aabed
+1efacaa0eaa7e16146c34cd20814d1411b35538e
+4f0b8f730273e9f11b2bfad2415485414b96299f
+d02b32b012ffba2baeb80dca78e7857aaeececb0
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055
+ae8d5be3caea59a21221f02ef04d49a86cb80191
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1
+94806f0967931d376d1729c29702f3d3bb70167c
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98
+533d14e539ae5cdca0ece392487a2b19106d468a
+24ff832171cb774087a614152c21f54589bf7523
+62dccab9ab715f33761a5315746ed02e48eed2a0
+506ea19145838a035e7dba535519fb40a3a0018c
+677585ccf8619ec2330b7f2d2b589a37146ffad7
+508702ed2bf7d1b0655ea7857dd8e52d6537e765
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491
+9d58e8ab656772d2c8a99a9fb876d5611fe2fe20
+3c56acaa819f4e2263638b67cea1ec37a226691d
+90d9209d5dd679b159051a8315423a7f796d704d
+c83e26622b275fdf878135e71c23325a31d0e5fc
+3fb98e76ffd8ba79e1c22eda4d640da0c037e98a
+62fddae74c553ac9e34f511a2957b1614eb4f937
+fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1
+54969bcd728b0f2d3285866c86ef0b4797c2a74d
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4
+55c68c1237166679d2cb65f266f496d1ecd4bec6
+f0f4f16d5b5f9efe304369120651fa688a03d495
+baafe3253702955c6904f0b233e661b47aa067e1
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29
+8895d6ae9f095a8413f663cc83f5b7634b3dc805
+3daafe6389d877fe15d8823cdf5ac15fd919676f
+c0f67e850176bb778b6c048d81c3d7e4d8c41003
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1
+14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b
+be4a20113bc204019ea79c6557a0bece23da1121
+6318d3842b36362bb45527b717e1a45ae46151d5
+6cbde27d9a287ae926979dbb18dfef61cf49860e
+a6270914cf5f60627a1332bcc3f5951c9eea3be0
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261
+93f37c69dd92c4e038710cdeef302c261d3a4f92
+cec8936d97dea2fcf04f175d3facaaeb65e574bf
+bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17
+cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66
+86f191616423efab8c0d352d986126a964983219
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe
+263ed62f94ea615c747c00ebbb4008385285b33b
+414715421e01e8c8b5743c5330e6d2553a08c16d
+6fea198a41d2f6f73e47f056692f365c8e6b04ce
+2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87
+6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f
+d1dfdc107fa5f2c4820570e369cda10ab1661b87
+106092fafb53e36077eba88f06feecd07b9e78e7
+782a05fbe30269ff8ab427109f5c4d0a577e5284
+a5a44a32a91474f00a3cda671a802e87c899fbb4
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a
+6c1227659878e867a01888eef472dd96b679adb6
+cad24ba99c7b6834faf6f5be820dd65f1a755b29
+17a995680482183f3463d2e01dd4c113ebb31608
+057b80e235b10799d03876ad25465208a4c64caf
+edf60d081ffdfa80243217a50a411ab5407c961d
+90cc2f08a6c2f0c41a9dd1786bae097f9292105e
+afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3
+162403e189d1b8463952fa4f18a291241275c354
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc
+6d5125c9407c7762620eeea7570af1a8ee7d76f3
+2227f978f084ebb18cb594c0cfaf124b0df6bf95
+398e0771e64cab6ca5d21754e32dce63f9e3c223
+04b851f25d6d49e61a528606953e11cfac7df2b2
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908
+695426275dee2ec56bc0c0afe1c5b4227a350840
+7142ac9e4d5498037aeb0f459f278fd28dae8048
+5f758a29dae102511576c0a5c6beda264060a401
+aef58a54d458ab76f62c9b6de61af4f475e0f616
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a
+eb100638ed73b82e1cce8475bb8e180cb22a09a2
+3c09fb7fe1886072670e0c4dd632d052102a3733
+9730b9cd998c0a549601c554221a596deda8af5b
+a301ddc419cbd900b301a95b1d9e4bb770afc6a3
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2
+1ffb39ed4d684a80652dfa30d604b82b4c542615
+83d41f6548bb76241737dcd3fed9e182ee901ff9
+ae5f32e489c4d52e7311b66060c7381d932f4193
+a3f78cc944ac189632f25925ba807a0e0678c4d5
+a2359c0f81a7eb032cff1fe45e3b80007facaa2a
+5141cf2e59fb2ec9bb489b9c1832447d3cd93110
+7e467e686f9468b826133275484e0a1ec0f5bde6
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8
+911bef7465665d8b194b6b0370b2b2389dfda1a1
+645de797f936cb19c1b8dba3b862543645510544
+40dd2b9aace337467c6e1e269d0cb813442313d7
+fecccc79548001ecbd6cafd3067bcf14de80b11a
+34ec83c8ff214128e7a4a4763059eebac59268a6
+a1af7ec84472afba0451b431dfdb59be323e35b7
+56a677c889e0e2c9f68ab8ca42a7e63acf986229
+ebde9b9c714ed326157f41add8c781f826c1d864
+60462b981fda63c5f9d780528a37c46884fe0b54
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3
+c843f591658ca9dbb77944a89372a92006defe68
+6c5fbf156ef9fc782be0089309074cc52617b868
+2bb36c875754a2a8919f2f9b00a336c00006e453
+3b60b047831146044d154156441c60f6edd80346
+ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff
+eac1b644492c10546a50f3e125a1f790ec46365f
+451b6409565a5ad18ea49b063561a2645fa4281b
+a78b5495a4223b9784cc53670cc10b6f0beefd32
+c15b68986ecfa1e13e3791686ae9024f66983f14
+96b1000031c53cd4c1c154013bb722ffd87fa7da
+2588acc7a730d864f84d4e1a050070ff873b03d5
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122
+b3afa234996f44852317af382b98f5f557cab25a
+9b684e2e2bb43862f69b12c6be94db0e7a756187
+12408baf69419409d228d96c6f88b6bcde303505
+e1312b0b0fd660de87fa42de39316b28f9336e70
+574ad7ef015995efb7338829a021776bf9daaa08
+cfc30ce53bfc204b8764ebb764a029a8d0ad01f4
+68d08ed9470d973a54ef7806318d8894d87ba610
+e00241f00fb31c660df6c6f129ca38370e6eadb3
+07c83f544d0604e6bab5d741b0bf9a3621d133da
+eee2d2ac461f46734c8e674ae14ed87bbc8d45c6
+9888edfb6276887eb56a6da7fe561e508e72a517
+2f1485994ef2c09a7bb2874eb8252be8fe710db1
+04b4c779b43b830220bf938223f685d1057368e9
+beabb0d9d30871d517c5d915cf852f7f5293f52f
+45e459462a80af03e1bb51a178648c10c4250925
+18b9dc55e5221e704f90eea85a81b41dab51f7da
+675b2caee111cb6aa7404b4d6aa371314bf0e647
+372a8bf0ef757c08551d41e40cb7a485527b6cd7
+09903df21a38e069273b80e94c8c29324963a832
+07fa153b8e6196ee6ef6efd8b743de8485a07453
+7e27d946d23229220bcb6672aacab88e09516d39
+f0398ee5291b153b716411c146a17d4af9cb0edc
+beb2f1a6f3f781443580ffec9161d9ce6852bf48
+aa581b481d400982a7e2a88830a33ec42ad0414f
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2
+5c4d4fd37e8c80ae95c00973531f34a6d810ea3a
+06262d6beeccf2784e4e36a995d5ee2ff73c8d11
+9c1664f69d0d832e05759e8f2f001774fad354d6
+7f511a6a2b38a26f077a5aec4baf5dffc981d881
+43c3b6a564b284382fdf8ae33f974f4e7a89600e
+70109c670471db2e0ede3842cbb58ba6be804561
+c61eaf172820fcafaabf39005bd4536f0c45f995
+cec70cf159b51a18b39c80fac1ad34f65f3691ef
+0a7309147d777c2f20f780a696efe743520aa2db
+c317181fa1de2260e956f05cd655642607520a4f
+6e46d8aa63db3285417c8ebb65340b5045ca106f
+f6fc112ff7e4746b040c13f28700a9c47992045e
+e8d1b134d48eb0928bc999923a4e092537e106f6
+afa57e50570a6599508ee2d50a7b8ca6be04834a
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5
+b3200539538eca54a85223bf0ec4f3ed132d0493
+7f445191fa0475ff0113577d95502a96dc702ef9
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec
+b08203fca1af7b95fda8aa3d29dcacd182375385
+4e6c9be0b646d60390fe3f72ce5aeb0136222a10
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260
+a77e9f0bd205a7733431a6d1028f09f57f9f73b0
+7c30ea47f5ae1c5abd6981d409740544ed16ed16
+ed388878151a3b841f95a62c42382e634d4ab82e
+749d605dd12a4af58de1fae6f5ef5e65eb06540e
+f27e5a13c1c424504b63a9084c50f491c1b17978
+703dc33736939f88625227e38367cfb2a65319fe
+de3285da34df0262a4548574c2383c51387a24bf
+d444e010049944c1b3438c9a25ae09b292b17371
+ec576efd18203bcb8273539fa277839ec92232a1
+0bce54bfbd8119c73eb431559fc6ffbba741e6aa
+7306d42ca158d40436cc5167e651d7ebfa6b89c1
+1fe1bd6b760e3059fff73d53a57ce3a6079adea1
+53a41c711b40e7fe3dc2b12e0790933d9c99a6e0
+8fe38962c24300129391f6d7ac24d7783e0fddd0
+dc974c31201b6da32f48ef81ae5a9042512705fe
+b4d209845e1c67870ef50a7c37abaf3770563f3e
+480ccd25cb2a851745f5e6e95d33edb703efb49e
+38c901a58244be9a2644d486f9a1284dc0edbf8a
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45
+a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f
+3266fcd1886e8ad883714e38203e66c0c6487f7b
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638
+c829be73584966e3162f7ccae72d9284a2ebf358
+ddbd24a73ba3d74028596f393bb07a6b87a469c0
+eb566490cd1aa9338831de8161c6659984e923fd
+5a07945293c6b032e465d64f2ec076b82e113fa6
+bd26dabab576adb6af30484183c9c9c8379bf2e0
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d
+c3d874336eb8fae92ab335393fd801fa8df98412
+fcc6fe6007c322641796cb8792718641856a22a7
+23e824d1dfc33f3780dd18076284f07bd99f1c43
+1a53ca294bbe5923c46a339955e8207907e9c8c6
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf
+e1179a5746b4bf12e1c8a033192326bf7f670a4d
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad
+55c40cbcf49a0225e72d911d762c27bb1c2d14aa
+d4885ca24189b4414031ca048a8b7eb2c9ac646c
+8a6033cbba8598945bfadd2dd04023c2a9f31681
+c26b43c2e1e2da96e7caabd46e1d7314acac0992
+289cfcd081c4393c7d6f63510747b5372202f855
+0b5a82f8c0ee3640503ba24ef73e672d93aeebbf
+120785f9b4952734818245cc305148676563a99b
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb
+dbed26cc6d818b3679e46677abc9fa8e04e8c6a6
+7789a5d87884f8bafec8a82085292e87d4e2866f
+78f2c8671d1a79c08c80ac857e89315197418472
+5b97e997b9b654373bd129b3baf5b82c2def13d1
+758d7e1be64cc668c59ef33ba8882c8597406e53
+9db4b25df549555f9ffd05962b5adf2fd9c86543
+926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0
+bebea83479a8e1988a7da32584e37bfc463d32d4
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8
+67386772c289cd40db343bdc4cb8cb4f58271df2
+5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31
+2322ec2f3571e0ddc593c4e2237a6a794c61251d
+2ed4973984b254be5cba3129371506275fe8a8eb
+75308067ddd3c53721430d7984295838c81d4106
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148
+0450dacc43171c6e623d0d5078600dd570de777e
+892c911ca68f5b4bad59cde7eeb6c738ec6c4586
+85fd2bda5eb3afe68a5a78c30297064aec1361f6
+faf5583063682e70dedc4466ac0f74eeb63169e7
+18d51a366ce2b2068e061721f43cb798177b4bb7
+372fb32569ced35eaf3740a29890bec2be1869fa
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9
+57a14a65e8ae15176c9afae874854e8b0f23dca7
+1159ff04fd17c59515199e0fc2d5e01e72818b59
+0da4c3d898ca2fff9e549d18f513f4898e960aca
+e0244a8356b57a5721c101ead351924bcfb2eef4
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9
+b32cf547a764a4efa475e9c99a72a5db36eeced6
+d7fe2a52d0ad915b78330340a8111e0b5a66513a
+e180572400b64860e190a8bc04ef839fa491e056
+e7cac91da51b78eb4a28e194d3f599f95742e2a2
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c
+814d091c973ff6033a83d4e44ab3b6a88cc1cb66
+2dfe0e7e81f65716b09c590652a4dd8452c10294
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18
+869583b700ecf33a9987447aee9444abfe23f343
+239e305c24155add73f2a0ba5ccbd66b37f77e14
+794a51097385648e3909a1acae7188f5ab881710
+f0ca31fd5cad07e84b47d50dc07db9fc53482a46
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46
+f842b13bd494be1bbc1161dc6df244340b28a47f
+ea80a050d20c0e24e0625a92e5c03e5c8db3e786
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d
+d34f546e61eccbac2450ca7490f558e751e13ec3
+9bac481dc4171aa2d847feac546c9f7299cc5aa0
+02f4b900deabbe7efa474f2815dc122a4ddb5b76
+61329bc767152f01aa502989abc854b53047e52c
+da7bbfa905d88834f8929cb69f41a1b683639f4b
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1
+22e121a8dea49e3042de305574356477ecacadda
+bef926d63512dbffcf1af59f72295ef497f5acf9
+605f6817018a572797095b83bec7fae7195b2abc
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce
+d4df31006798ee091b86e091a7bf5dce6e51ba3e
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9
+5167e16b53283be5587659ea8eaa3b8ef3fddd33
+352d61eb66b053ae5689bd194840fd5d33f0e9c0
+e6e5a6090016810fb902b51d5baa2469ae28b8a1
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb
+e5ea7295b89ef679e74919bf957f58d55ad49489
+97c1f68fb7162af326cd0f1bc546908218ec5da6
+78c1ad33772237bf138084220d1ffab800e1200d
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4
+92292fffc36336d63f4f77d6b8fc23b0c54090e9
+7477cf04c6b086108f459f693a60272523c134db
+18855be5e7a60269c0652e9567484ce5b9617caa
+4ca9753ab023accbfa75a547a65344ee17b549ba
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a
+43e268c118ac25f1f0e984b57bc54f0119ded520
+1fff309330f85146134e49e0022ac61ac60506a9
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e
+2bf646a6efd15ab830344ae9d43e10cc89e29f34
+6e8a81d452a91f5231443ac83e4c0a0db4579974
+7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d
+f1ba2fe3491c715ded9677862fea966b32ca81f0
+0a4fc9016aacae9cdf40663a75045b71e64a70c9
+856317f27248cdb20226eaae599e46de628fb696
+65b1209d38c259fe9ca17b537f3fb4d1857580ae
+d02c54192dbd0798b43231efe1159d6b4375ad36
+5b0008ba87667085912ea474025d2323a14bfc90
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3
+e7cfaff65541cde4298a04882e00608d992f6703
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e
+894f27b6ea68a1ec9b7632533eabf2353b1e9d79
+dfd8602820c0e94b624d02f2e10ce6c798193a25
+bd70f832e133fb87bae82dfaa0ae9d1599e52e4b
+7aa062c6c90dba866273f5edd413075b90077b51
+a20036b7fbf6c0db454c8711e72d78f145560dc8
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7
+d7b6bbb94ac20f5e75893f140ef7e207db7cd483
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0
+1b5d445741473ced3d4d33732c9c9225148ed4a1
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3
+39af06d29a74ad371a1846259e01c14b5343e3d1
+eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979
+9853136dbd7d5f6a9c57dc66060cab44a86cd662
+771505abd38641454757de75fe751d41e87f89a4
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708
+d1079444ceddb1de316983f371ecd1db7a0c2f38
+72591a75469321074b072daff80477d8911c3af3
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea
+13d430257d595231bda216ef859950caa736ad1d
+11df25b4e074b7610ec304a8733fa47625d9faca
+758d481bbf24d12615b751fd9ec121500a648bce
+8694cd9748fb1c128f91a572119978075fede848
+8ce9b7b52d05701d5ef4a573095db66ce60a7e1c
+c73dd452c20460f40becb1fd8146239c88347d87
+3f9ca2526013e358cd8caeb66a3d7161f5507cbc
+8c4042191431e9eb43f00b0f14c23765ab9c6688
+90ac0f32c0c29aa4545ed3d5070af17f195d015f
+9c6dfd3a38374399d998d5a130ffc2864c37f554
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a
+e68869499471bcd6fa8b4dc02aa00633673c0917
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b
+197c64c36e8a9d624a05ee98b740d87f94b4040c
+3294e27356c3b1063595885a6d731d625b15505a
+2a2df7e790737a026434187f9605c4763ff71292
+535cdce8264ac0813d5bb8b19ceafa77a1674adf
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092
+9d46485ca2c562d5e295251530a99dd5df99b589
+83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05
+7ac9aaafe4d74542832c273acf9d631cb8ea6193
+9d5bfaf6191484022a6731ce13ac1b866d21ad18
+35208eda874591eac70286441d19785726578946
+fd10b0c771a2620c0db294cfb82b80d65f73900d
+a1132e2638a8abd08bdf7fc4884804dd6654fa63
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5
+8ed32c8fad924736ebc6d99c5c319312ba1fa80b
+f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca
+6b742055a664bcbd1c6a85ae6796bd15bc945367
+eedfb384a5e42511013b33104f4cd3149432bd9e
+7f203f2ff6721e73738720589ea83adddb7fdd27
+188abc5bad3a3663d042ce98c7a7327e5a1ae298
+519f1486f0755ef3c1f05700ea8a05f52f83387b
+829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a
+aca232de87c4c61537c730ee59a8f7ebf5ecb14f
+d20ea5a4fa771bc4121b5654a7483ced98b39148
+ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9
+09df62fd17d3d833ea6b5a52a232fc052d4da3f5
+edf98a925bb24e39a6e6094b0db839e780a77b08
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e
+cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd
+ab58a7db32683aea9281c188c756ddf969b4cdbd
+85a136b48c2036b16f444f93b086e2bd8539a498
+522a4ca705c06a0436bbe62f46efe24d67a82422
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4
+5a547df635a9a56ac224d556333d36ff68cbf088
+af654a7ec15168b16382bd604889ea07a967dac6
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46
+d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c
+ae836e2be4bb784760e43de88a68c97f4f9e44a1
+efc78a7d95b14abacdfde5c78007eabf9a21689c
+f2eab39cf68de880ee7264b454044a55098e8163
+7d18e9165312cf669b799aa1b883c6bbe95bf40e
+721b109970bf5f1862767a1bec3f9a79e815f79a
+c43862db5eb7e43e3ef45b5eac4ab30e318f2002
+8e63868e552e433dc536ba732f4c2af095602869
+edd6ed94207ab614c71ac0591d304a708d708e7b
+bd0265ba7f391dc3df9059da3f487f7ef17144df
+45a6333fc701d14aab19f9e2efd59fe7b0e89fec
+59dac8b460a89e03fa616749a08e6149708dcc3a
+d26b443f87df76034ff0fa9c5de9779152753f0c
+2564920d6976be68bb22e299b0b8098090bbf259
+20ade100a320cc761c23971d2734388bfe79f7c5
+10bfa4cecd64b9584c901075d6b50f4fad898d0b
+0b572a2b7052b15c8599dbb17d59ff4f02838ff7
+7f2a234ad5c256733a837dbf98f25ed5aad214e8
+8fed5ea3b69ea441a8b02f61473eafee25fb2374
+1efaa128378f988965841eb3f49d1319a102dc36
+a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b
+78598e7005f7c96d64cc47ff47e6f13ae52245b8
+c7c8d150ece08b12e3abdb6224000c07a6ce7d47
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6
+15aa6c457678e25f6bc0e818e5fc39e42dd8e533
+768f6a14a7903099729872e0db231ea814eb05e9
+0141cb33c822e87e93b0c1bad0a09db49b3ad470
+29c340c83b3bbef9c43b0c50b4d571d5ed037cbd
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed
+206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8
+599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a
+598744c8620e4ecbf449d14d7081fbf1cd05851f
+a85e9e11db5665c89b057a124547377d3e1c27ef
+d4453ec649dbde752e74da8ab0984c6f15cc6e06
+a26fd9df58bb76d6c7a3254820143b3da5bd584b
+66490b5869822b31d32af7108eaff193fbdb37b0
+2f73203fd71b755a9601d00fc202bbbd0a595110
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf
+b6f15bf8723b2d5390122442ab04630d2d3878d8
+f11c76efdc9651db329c8c862652820d61933308
+39ed31ced75e6151dde41944a47b4bdf324f922b
+411318684bd2d42e4b663a37dcf0532a48f0146d
+352a620f0b96a7e76b9195a7038d5eec257fd994
+0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457
+7a131fafa7058fb75fdca32d0529bc7cb50429bd
+27a00f2490284bc0705349352d36e9749dde19ab
+212608e00fc1e8912ff845ee7a4a67f88ba938fc
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9
+69adf2f122ff18848ff85e8de3ee3b2bc495838e
+9e182e0cd9d70f876f1be7652c69373bcdf37fb4
+3a27d164e931c422d16481916a2fa6401b74bcef
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1
+e5e5f31b81ed6526c26d277056b6ab4909a56c6c
+2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4
+c71217b2b111a51a31cf1107c71d250348d1ff68
+c11eb653746afa8148dc9153780a4584ea529d28
+b73795963dc623a634d218d29e4a5b74dfbc79f1
+bd379f8e08f88729a9214260e05967f4ca66cd65
+d80a3d1f3a438e02a6685e66ee908446766fefa9
+8e0ab1b08964393e4f9f42ca037220fe98aad7ac
+040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d
+7c80d91db5977649487388588c0c823080c9f4b4
+4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0
+c808c784237f167c78a87cc5a9d48152579c27a4
+3e3227c8e9f44593d2499f4d1302575c77977b2e
+4209783b0cab1f22341f0600eed4512155b1dee6
+f61d5f2a082c65d5330f21b6f36312cc4fab8a3b
+d78734c54f29e4474b4d47334278cfde6efe963a
+cb2470aade8e5630dcad5e479ab220db94ecbf91
+dd8084b2878ca95d8f14bae73e1072922f0cc5da
+3795974e24296185d9b64454cde6f796ca235387
+e5823a9d3e5e33e119576a34cb8aed497af20eea
+d1a43737ca8be02d65684cf64ab2331f66947207
+ba788365d70fa6c907b71a01d846532ba3110e31
+266766818dbc5a4ca1161ae2bc14c9e269ddc490
+1316296fae6485c1510f00b1b57fb171b9320ac2
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb
+e3c011d08d04c934197b2a4804c90be55e21d572
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a
+7323b594d3a8508f809e276aa2d224c4e7ec5a80
+a322479a6851f57a3d74d017a9cb6d71395ed806
+d949fadc9b6c5c8b067fa42265ad30945f9caa99
+3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f
+ffea8775fc9c32f573d1251e177cd283b4fe09c9
+8199803f476c12c7f6c0124d55d156b5d91314b6
+dec0c26855da90876c405e9fd42830c3051c2f5f
+d35534f3f59631951011539da2fe83f2844ca245
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8
+2e0d56794379c436b2d1be63e71a215dd67eb2ca
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da
+313d5eba97fe064bdc1f00b7587a4b3543ef712a
+3cb2841302af1fb9656f144abc79d4f3d0b27380
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1
+3933e323653ff27e68c3458d245b47e3e37f52fd
+37c5c65ae204ad3692cd30a3dc62f28a263ad468
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8
+707a542c580bcbf3a5a75cce2df80d75990853cc
+4735fa28fa2a2af98f7b266efd300a00e60dddf7
+6584c3c877400e1689a11ef70133daa86a238602
+84a74ef8680b66e6dccbc69ae80321a52780a68e
+911505a4242da555c6828509d1b47ba7854abb7a
+c175ebe550761b18bac24d394d85bdfaf3b7718c
+d4b4020e289c095ce2c2941685c6cd37667f5cc9
+b331ca23aed90394c05f06701f90afd550131fe3
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3
+c48b68dc780c71ab0f0f530cd160aa564ed08ade
+75a74a74d6abbbb302a99de3225c8870fa149aee
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97
+180bd019eab85bbf01d9cddc837242e111825750
+fe50efe9e282c63941ec23eb9b8c7510b6283228
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6
+81706277ed180a92d2eeb94ac0560f7dc591ee13
+62f017907e19766c76887209d01d4307be0cc573
+d9218c2bbc7449dbccac351f55675efd810535db
+ea890846912f16a0f3a860fce289596a7dac575f
+403a108dec92363fd1f465340bd54dbfe65af870
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7
+763b60feaabceebbe9eddfbaa0378b8b454327aa
+46976097c54e86032932d559c8eb82ffea4bb6bb
+5f453a35d312debfc993d687fd0b7c36c1704b16
+88535dba55b0a80975df179d31a6cc80cae1cc92
+1d51b256af68c5546d230f3e6f41da029e0f5852
+cb8382f43ce073322eba82809f02d3084dad7969
+8ccbbd9da0749d96f09164e28480d54935ee171c
+1b02b9413b730b96b91d16dcd61b2420aef97414
+0322e69172f54b95ae6a90eb3af91d3daa5e36ea
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3
+099053f2cbfa06c0141371b9f34e26970e316426
+77db171a523fc3d08c91cea94c9562f3edce56e1
+bf54b5586cdb0b32f6eed35798ff91592b03fbc4
+40c9dce0a4c18829c4100bff5845eb7799b54ca1
+dc550f361ae82ec6e1a0cf67edf6a0138163382e
+7ef44b7c2b5533d00001ae81f9293bdb592f1146
+f78fe101b21be36e98cd3da010051bb9b9829a1e
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce
+eed7920682789a9afd0de4efd726cd9a706940c8
+6316a4b689706b0f01b40f9a3cef47b92bc52411
+aca728cab26b95fbe04ec230b389878656d8af5b
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1
+c3dc4f414f5233df96a9661609557e341b71670d
+fad895771260048f58d12158a4d4d6d0623f4158
+38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006
+2f837ff8b134b785ee185a9c24e1f82b4e54df04
+984edce0b961418d81203ec477b9bfa5a8197ba3
+fd5376fcb09001a3acccc03159e8ff5801129683
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f
+ac2e166c76c103f17fdea2b4ecb137200b8d4703
+179564f157a96787b1b3380a9f79701e3394013d
+accbd6cd5dd649137a7c57ad6ef99232759f7544
+acff2dc5d601887741002a78f8c0c35a799e6403
+f3fed71cc4fc49b02067b71c2df80e83084b2a82
+b1f4423c227fa37b9680787be38857069247a307
+443f4421e44d4f374c265e6f2551bf9830de5597
+9c2f20ed168743071db6268480a966d5d238a7ee
+d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f
+6dcf6b028a6042a9904628a3395520995b1d0ef9
+43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a
+0ca36ecaf4015ca4095e07f0302d28a5d9424254
+312b2566e315dd6e65bd42cfcbe4d919159de8a1
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4
+9bc01fa9400c231e41e6a72ec509d76ca797207c
+cbbd9880fb28bef4e33da418a3795477d3a1616e
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581
+468bb5344f74842a9a43a7e1a3333ebd394929b4
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9
+111d0b588f3abbbea85d50a28c0506f74161e091
+fde611bf25a89fe11e077692070f89dcdede043a
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a
+516a27d5dd06622f872f5ef334313350745eadc3
+b8048a7661bdb73d3613fde9d710bd45a20d13e7
+3a6334953cd2775fab7a8e7b72ed63468c71dee7
+ec983394f800da971d243f4143ab7f8421aa967c
+a45e6172713a56736a2565ddea9cb8b1d94721cd
+7c9a65f18f7feb473e993077d087d4806578214e
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e
+d33fcdaf2c0bd0100ec94b2c437dccdacec66476
+12226bca7a891e25b7d1e1a34a089521bba75731
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150
+cc8e378fd05152a81c2810f682a78c5057c8a735
+640e12837241d52d04379d3649d050ee3760048c
+48de3ca194c3830daa7495603712496fe908375c
+bddc822cf20b31d8f714925bec192c39294184f7
+f6f2a212505a118933ef84110e487551b6591553
+58538cc418bf41197fad4fc4ee2449b2daeb08b1
+dbd958ffedc3eae8032be67599ec281310c05630
+62750d78e819d745b9200b0c5c35fcae6fb9f404
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8
+ab6886252aea103b3d974462f589b4886ef2735a
+65babb10e727382b31ca5479b452ee725917c739
+87610276ccbc12d0912b23fd493019f06256f94e
+1c6e22516ceb5c97c3caf07a9bd5df357988ceda
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225
+3d6ee995bc2f3e0f217c053368df659a5d14d5b5
+f3a59d85b7458394e3c043d8277aa1ffe3cdac91
+b59cee1f647737ec3296ccb3daa25c890359c307
+62007c30f148334fb4d8975f80afe76e5aef8c7f
+626913b8fcbbaee8932997d6c4a78fe1ce646127
+cfd4004054399f3a5f536df71f9b9987f060f434
+dc13229afbbc8b7a31ed5adfe265d971850c0976
+1ffe20eb32dbc4fa85ac7844178937bba97f4bf0
+30b15cdb72760f20f80e04157b57be9029d8a1ab
+fd53be2e0a9f33080a9db4b5a5e416e24ae8e198
+d444368421f456baf8c3cb089244e017f8d32c41
+fffefc1fb840da63e17428fd5de6e79feb726894
+1d776bfe627f1a051099997114ba04678c45f0f5
+cb27b45329d61f5f95ed213798d4b2a615e76be2
+d0144d76b8b926d22411d388e7a26506519372eb
+08d41d2f68a2bf0091dc373573ca379de9b16385
+4f1249369127cc2e2894f6b2f1052d399794919a
+2e231f1e7e641dd3619bec59e14d02e91360ac01
+632fa986bed53862d83918c2b71ab953fd70d6cc
+112780a7fe259dc7aff2170d5beda50b2bfa7bda
+93af335bf8c610f34ce0cadc15d1dd592debc706
+5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b
+975978ee6a32383d6f4f026b944099e7739e5890
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c
+5f94969b9491db552ffebc5911a45def99026afe
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08
+d00e9a6339e34c613053d3b2c132fccbde547b56
+288964068cd87d97a98b8bc927d6e0d2349458a2
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a
+56e079f4eb40744728fd1d7665938b06426338e5
+92b61b09d2eed4937058d0f9494d9efeddc39002
+36939e6a365e9db904d81325212177c9e9e76c54
+2957715e96a18dbb5ed5c36b92050ec375214aa6
+1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc
+7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80
+f5fae7810a33ed67852ad6a3e0144cb278b24b41
+24f022d807352abf071880877c38e53a98254dcd
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be
+d454ad60b061c1a1450810a0f335fafbfeceeccc
+2c1f8ddbfbb224271253a27fed0c2425599dfe47
+ed9d11e995baeec17c5d2847ec1a8d5449254525
+493c8591d6a1bef5d7b84164a73761cefb9f5a25
+d5444f9475253bbcfef85c351ea9dab56793b9ea
+86d0127e1fd04c3d8ea78401c838af621647dc95
+4560491820e0ee49736aea9b81d57c3939a69e12
+c62c07de196e95eaaf614fb150a4fa4ce49588b4
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b
+8f9c37f351a91ed416baa8b6cdb4022b231b9085
+a022eff5470c3446aca683eae9c18319fd2406d5
+af6cae71f24ea8f457e581bfe1240d5fa63faaf7
+a81c86cda6f1da2aa09b6737297addd3d4a64ffa
+633c851ebf625ad7abdda2324e9de093cf623141
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5
+7fab17ef7e25626643f1d55257a3e13348e435bd
+523854a7d8755e944bd50217c14481fe1329a969
+6e911227e893d0eecb363015754824bf4366bdb7
+7c1cfab6b60466c13f07fe028e5085a949ec8b30
+ff8ef43168b9c8dd467208a0b1b02e223b731254
+7eaa97be59019f0d36aa7dac27407b004cad5e93
+2bcd9b2b78eb353ea57cf50387083900eae5384a
+61e9e180d3d1d8b09f1cc59bdd9f98c497707eff
+f16599e4ec666c6390c90ff9a253162178a70ef5
+37866fea39deeff453802cde529dd9d32e0205a5
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf
+4c72a51a7c7288e6e17dfefe4f87df47929608e7
+06560d5721ecc487a4d70905a485e22c9542a522
+8006219efb6ab76754616b0e8b7778dcfb46603d
+7a3d46f32f680144fd2ba261681b43b86b702b85
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26
+95289007f2f336e6636cf8f920225b8d47c6e94f
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e
+2ffcd35d9b8867a42be23978079f5f24be8d3e35
+10e4172dd4f4a633f10762fc5d4755e61d52dc36
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02
+eba4cfd76f99159ccc0a65cab0a02db42b548d85
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79
+484bac2a9ff3a43a6f85d109bbc579a4346397f5
+d383ba7bbf8b7b49dcef9f8abab47521966546bb
+a1dd9038b1e1e59c9d564e252d3e14705872fdec
+24869258fef8f47623b5ef43bd978a525f0af60e
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49
+6baaa8b763cc5553715766e7fbe7abb235fae33c
+88e090ffc1f75eed720b5afb167523eb2e316f7f
+2961e14c327341d22d5f266a6872aa174add8ac4
+7f4bc8883c3b9872408cc391bcd294017848d0cf
+f7dcadc5288653ec6764600c7c1e2b49c305dfaa
+cff911786b5ac884bb71788c5bc6acf6bf569eff
+0b45aeb0aede5e0c19b508ede802bdfec668aefd
+c79cf7f61441195404472102114bcf079a72138a
+4686df20f0ee40cd411e4b43860ef56de5531d9e
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e
+f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a
+fd9ab411dc6258763c95b7741e3d51adf5504040
+29a5d38390857e234c111f8bb787724c08f39110
+06ab24721d7117974a6039eb2e57d1545eee5e46
+55432723c728a2ce90d817e9e9877ae9fbad6fe5
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be
+40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd
+5748652924084b7b0220cddcd28f6b2222004359
+3cd7b15f5647e650db66fbe2ce1852e00c05b2e4
+0004f72a00096fa410b179ad12aa3a0d10fc853c
+27b451abfe321a696c852215bb7efb4c2e50c89f
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5
+8562b4f63e49847692b8cb31ef0bdec416b9a87a
+b562def2624f59f7d3824e43ecffc990ad780898
+6e91be2ad74cf7c5969314b2327b513532b1be09
+4ed6c7740ba93d75345397ef043f35c0562fb0fd
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9
+a03448488950ee5bf50e9e1d744129fbba066c50
+1d30f813798c55ae4fe454829be6e2948ee841da
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef
+9057044c0347fb9798a9b552910a9aff150385db
+5b4b84ce3518c8a14f57f5f95a1d07fb60e58223
+99facca6fc50cc30f13b7b6dd49ace24bc94f702
+49068538b7eef66b4254cc11914128097302fab8
+51dcb36a6c247189be4420562f19feb00c9487f8
+e03bda45248b4169e2a20cb9124ae60440cad2de
+f60a85bd35fa85739d712f4c93ea80d31aa7de07
+21b5af67618fcc047b495d2d5d7c2bf145753633
+023decb4c56f2e97d345593e4f7b89b667a6763d
+73d53a7c27716ae9a6d3484e78883545e53117ae
+a803453edd2b4a85b29da74dcc551b3c53ff17f9
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8
+4e37cd250130c6fd60e066f0c8efb3cbb778c421
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4
+5dafab3c936763294257af73baf9fb3bb1696654
+a96c45ed3a44ad79a72499be238264ae38857988
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d
+6b3e360b80268fda4e37ff39b7f303e3684e8719
+60e2b9b2e0db3089237d0208f57b22a3aac932c1
+df767f62a6bf3b09e6417d801726f2d5d642a202
+41c42cb001f34c43d4d8dd8fb72a982854e173fb
+aadfcaf601630bdc2af11c00eb34220da59b7559
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f
+e52f57a7de675d14aed28e5d0f2f3c5a01715337
+ab989225a55a2ddcd3b60a99672e78e4373c0df1
+cbfcd1ec8aa30e31faf205c73d350d447704afee
+dc1510110c23f7b509035a1eda22879ef2506e61
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a
+db67edbaeb78e1dd734784cfaaa720ba86ceb6d2
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac
+44d93039eec244083ac7c46577b9446b3a071f3e
+f68ed499e9d41f9c3d16d843db75dc12833d988d
+f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e
+764882e6779fbee29c3d87e00302befc52d2ea8d
+1951dc9dd4601168ab5acf4c14043b124a8e2f67
+dc964b9c7242a985eb255b2410a9c45981c2f4d0
+0532cbcf616f27e5f6a4054f818d4992b99d201d
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82
+8acdc4be8274e5d189fb67b841c25debf5223840
+ad77056780328bdcc6b7a21bce4ddd49c49e2013
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5
+4972aadcce369a8c0029e6dc2f288dfd0241e144
+8fb2ec3bbd862f680be05ef348b595e142463524
+684f5166d8147b59d9e0938d627beff8c9d208dd
+06b4e41185734f70ce432fdb2b121a7eb01140af
+dd0086da7c4efe61abb70dd012538f5deb9a8d16
+3e59d97d42f36fc96d33a5658951856a555e997b
+bc9003ad368cb79d8a8ac2ad025718da5ea36bc4
+40c1de7b1b0a087c590537df55ecd089c86e8bfc
+32f62da99ec9f58dd93e3be667612abcf00df16a
+81f101cea3c451754506bf1c7edf80a661fa4dd1
+dfecaedeaf618041a5498cd3f0942c15302e75c3
+5ba7882700718e996d576b58528f1838e5559225
+7f68a5429f150f9eb7550308bb47a363f2989cb3
+cc2a9f4be1e465cb4ba702539f0f088ac3383834
+c8585c95215bc53e28edb740678b3a0460ca8aa4
+da23d90bacf246b75ef752a2cbb138c4fcd789b7
+d31af74425719a3840b496b7932e0887b35e9e0d
+a5acda0e8c0937bfed013e6382da127103e41395
+df87193e15a19d5620f5a6458b05fee0cf03729f
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b
+f3df296de36b7c114451865778e211350d153727
+7cf579088e0456d04b531da385002825ca6314e2
+a939e287feb3166983e36b8573cd161d12097ad8
+97137d5154a9f22a5d9ecc32e8e2b95d07a5a571
+938ae9597f71a21f2e47287cca318d4a2113feb2
+e4d8ba577cabcb67b4e9e1260573aea708574886
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a
+196c12571ab51273f44ea3469d16301d5b8d2828
+abba1bf1348a6f1b70a26aac237338ee66764458
+af3b803188344971aa89fee861a6a598f30c6f10
+6af75a8572965207c2b227ad35d5c61a5bd69f45
+9d24812d942e69f86279a26932df53c0a68c4111
+1bdef21f093c41df2682a07f05f3548717c7a3d1
+8a866bc0d925dfd8bb10769b8b87d7d0ff01774d
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9
+857c64060963dd8d28e4740f190d321298ddd503
+540b39ba1b8ef06293ed793f130e0483e777e278
+b8ebda42e272d3617375118542d4675a0c0e501d
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661
+97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5
+cfa931e6728a825caada65624ea22b840077f023
+f87b22e7f0c66225824a99cada71f9b3e66b5742
+d69719b42ee53b666e56ed476629a883c59ddf66
+d916602f694ebb9cf95d85e08dd53f653b6196c3
diff --git a/scraper/reports/misc/missing-3.csv b/scraper/reports/misc/missing-3.csv
new file mode 100644
index 00000000..a11c16e4
--- /dev/null
+++ b/scraper/reports/misc/missing-3.csv
@@ -0,0 +1,815 @@
+8dcc95debd07ebab1721c53fa50d846fef265022
+aa5a7a9900548a1f1381389fc8695ced0c34261a
+48a402593ca4896ac34fbebf1e725ab1226ecdb7
+ef23e82180508606a3ab8d9a30205b5e3c0daf67
+5760d29574d78e79e8343b74e6e30b3555e48676
+8eb40d0a0a1339469a05711f532839e8ffd8126c
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e
+c9367ed83156d4d682cefc59301b67f5460013e0
+bf37a81d572bb154581845b65a766fab1e5c7dda
+c997744db532767ee757197491d8ac28d10f1c0f
+d36a1e4637618304c2093f72702dcdcc4dcd41d1
+ce70dd0d613b840754dce528c14c0ebadd20ffaa
+525da67fb524d46f2afa89478cd482a68be8a42b
+b5f9180666924a3215ab0b1faf712e70b353444d
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b
+60c24e44fce158c217d25c1bae9f880a8bd19fc3
+2dbc57abf3ceda80827b85593ce1f457b76a870b
+592f14f4b12225fc691477a180a2a3226a5ef4f0
+81513764b73dae486a9d2df28269c7db75e9beb3
+be48b5dcd10ab834cd68d5b2a24187180e2b408f
+ec1bec7344d07417fb04e509a9d3198da850349f
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b
+2c93c8da5dfe5c50119949881f90ac5a0a4f39fe
+176e6ba56e04c98e1997ffdef964ece90fd827b4
+9e2ab407ff36f3b793d78d9118ea25622f4b7434
+9ce0d64125fbaf625c466d86221505ad2aced7b1
+df6e68db278bedf5486a80697dec6623958edba8
+7d45f1878d8048f6b3de5b3ec912c49742d5e968
+610779e90b644cc18696d7ac7820d3e0598e24d0
+3b350afd8b82487aa97097170c269a25daa0c82d
+ee815f60dc4a090fa9fcfba0135f4707af21420d
+7ee7b0602ef517b445316ca8aa525e28ea79307e
+74dbe6e0486e417a108923295c80551b6d759dbe
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3
+f3553148e322f4f64545d6667dfbc7607c82703a
+f9bce7bd7909f1c75dbeb44900d374bc89072df0
+265a88a8805f6ba3efae3fcc93d810be1ea68866
+84508e846af3ac509f7e1d74b37709107ba48bde
+ab2b09b65fdc91a711e424524e666fc75aae7a51
+318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a
+4983076c1a8b80ff5cd68b924b11df58a68b6c84
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a
+a6ce1a1de164f41cb8999c728bceedf65d66bb23
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8
+f6cf2108ec9d0f59124454d88045173aa328bd2e
+73b90573d272887a6d835ace89bfaf717747c59b
+1c0acf9c2f2c43be47b34acbd4e7338de360e555
+ef940b76e40e18f329c43a3f545dc41080f68748
+ef559d5f02e43534168fbec86707915a70cd73a0
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d
+e45a556df61e2357a8f422bdf864b7a5ed3b8627
+bc08dfa22949fbe54e15b1a6379afade71835968
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c
+85e78aa374d85f9a61da693e5010e40decd3f986
+de92951ea021ec56492d76381a8ae560a972dd68
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca
+03c56c176ec6377dddb6a96c7b2e95408db65a7a
+c631a31be2c793d398175ceef7daff1848bb6408
+1ef1f33c48bc159881c5c8536cbbd533d31b0e9a
+f85ccab7173e543f2bfd4c7a81fb14e147695740
+d62d82c312c40437bc4c1c91caedac2ba5beb292
+858b51a8a8aa082732e9c7fbbd1ea9df9c76b013
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78
+de45bf9e5593a5549a60ca01f2988266d04d77da
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5
+744fa8062d0ae1a11b79592f0cd3fef133807a03
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331
+aeaf5dbb3608922246c7cd8a619541ea9e4a7028
+1feeab271621128fe864e4c64bab9b2e2d0ed1f1
+77d929b3c4bf546557815b41ed5c076a5792dc6b
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c
+daca9d03c1c951ed518248de7f75ff51e5c272cb
+24603ed946cb9385ec541c86d2e42db47361c102
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4
+c37de914c6e9b743d90e2566723d0062bedc9e6a
+b2b535118c5c4dfcc96f547274cdc05dde629976
+dec76940896a41a8a7b6e9684df326b23737cd5d
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4
+03fe3d031afdcddf38e5cc0d908b734884542eeb
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7
+4cfe921ac4650470b0473fd52a2b801f4494ee64
+6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89
+20a0f71d2c667f3c69df18f097f2b5678ac7d214
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056
+721d9c387ed382988fce6fa864446fed5fb23173
+d116bac3b6ad77084c12bea557d42ed4c9d78433
+7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10
+e14cc2715b806288fe457d88c1ad07ef55c65318
+835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd
+25982e2bef817ebde7be5bb80b22a9864b979fb0
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c
+d57982dc55dbed3d0f89589e319dc2d2bd598532
+714d487571ca0d676bad75c8fa622d6f50df953b
+b11b71b704629357fe13ed97b216b9554b0e7463
+c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f
+bdbba95e5abc543981fb557f21e3e6551a563b45
+779d3f0cf74b7d33344eea210170c7c981a7e27b
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0
+a9215666b4bcdf8d510de8952cf0d55b635727dc
+91495c689e6e614247495c3f322d400d8098de43
+8bed7ff2f75d956652320270eaf331e1f73efb35
+2a826273e856939b58be8779d2136bffa0dddb08
+53c36186bf0ffbe2f39165a1824c965c6394fe0d
+c900e0ad4c95948baaf0acd8449fde26f9b4952a
+a168ca2e199121258fbb2b6c821207456e5bf994
+4e43408a59852c1bbaa11596a5da3e42034d9380
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e
+ce75deb5c645eeb08254e9a7962c74cab1e4c480
+9f43caad22803332400f498ca4dd0429fe7da0aa
+2a7058a720fa9da4b9b607ea00bfdb63652dff95
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9
+914d7527678b514e3ee9551655f55ffbd3f0eb0a
+aae742779e8b754da7973949992d258d6ca26216
+b41d585246360646c677a8238ec35e8605b083b0
+7acbf0b060e948589b38d5501ca217463cfd5c2f
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22
+23ee7b7a9ca5948e81555aaf3a044cfec778f148
+776362314f1479f5319aaf989624ac604ba42c65
+bfdafe932f93b01632a5ba590627f0d41034705d
+ee1f9637f372d2eccc447461ef834a9859011ec1
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84
+b0b944b3a783c2d9f12637b471fe1efb44deb52b
+82417d8ec8ac6406f2d55774a35af2a1b3f4b66e
+bc9bad25f8149318314971d8b8c170064e220ea8
+053931267af79a89791479b18d1b9cde3edcb415
+b6620027b441131a18f383d544779521b119c1aa
+90221884fe2643b80203991686af78a9da0f9791
+48a5b6ee60475b18411a910c6084b3a32147b8cd
+53ce84598052308b86ba79d873082853022aa7e9
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87
+b7774c096dc18bb0be2acef07ff5887a22c2a848
+0e93a5a7f6dbdb3802173dca05717d27d72bfec0
+5fea26746f3140b12317fcf3bc1680f2746e172e
+dc5d9399b3796db7fd850990402dce221b98c8be
+c88c21eb9a8e08b66c981db35f6556f4974d27a8
+e3a6e5a573619a97bd6662b652ea7d088ec0b352
+16de1324459fe8fdcdca80bba04c3c30bb789bdf
+46c82cfadd9f885f5480b2d7155f0985daf949fc
+be437b53a376085b01ebd0f4c7c6c9e40a4b1a75
+32e9c9520cf6acb55dde672b73760442b2f166f5
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231
+7574f999d2325803f88c4915ba8f304cccc232d1
+450c6a57f19f5aa45626bb08d7d5d6acdb863b4b
+641f0989b87bf7db67a64900dcc9568767b7b50f
+9aab33ce8d6786b3b77900a9b25f5f4577cea461
+fa32b29e627086d4302db4d30c07a9d11dcd6b84
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd
+dac8fc521dfafb2d082faa4697f491eae00472c7
+c3beae515f38daf4bd8053a7d72f6d2ed3b05d88
+d066575b48b552a38e63095bb1f7b56cbb1fbea4
+706b9767a444de4fe153b2f3bff29df7674c3161
+fffe5ab3351deab81f7562d06764551422dbd9c4
+5fe3a9d54d5070308803dd8ef611594f59805400
+def934edb7c7355757802a95218c6e4ed6122a72
+071ec4f3fb4bfe6ae9980477d208a7b12691710e
+d79365336115661b0e8dbbcd4b2aa1f504b91af6
+d666ce9d783a2d31550a8aa47da45128a67304a7
+b13e2e43672e66ba45d1b852a34737e4ce04226b
+ab1719f573a6c121d7d7da5053fe5f12de0182e7
+79dc84a3bf76f1cb983902e2591d913cee5bdb0e
+fbc9ba70e36768efff130c7d970ce52810b044ff
+46f48211716062744ddec5824e9de9322704dea1
+784a83437b3dba49c0d7ccc10ac40497b84661a5
+824d1db06e1c25f7681e46199fd02cb5fc343784
+20d6a4aaf5abf2925fdce2780e38ab1771209f76
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177
+73ba33e933e834b815f62a50aa1a0e15c6547e83
+ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d
+94a11b601af77f0ad46338afd0fa4ccbab909e82
+2f88d3189723669f957d83ad542ac5c2341c37a5
+f39783847499dd56ba39c1f3b567f64dfdfa8527
+b5747ecfa0f3be0adaad919d78763b1133c4d662
+bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc
+633101e794d7b80f55f466fd2941ea24595e10e6
+d01303062b21cd9ff46d5e3ff78897b8499480de
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08
+1910f5f7ac81d4fcc30284e88dee3537887acdf3
+7923742e2af655dee4f9a99e39916d164bc30178
+44b1399e8569a29eed0d22d88767b1891dbcf987
+dc107e7322f7059430b4ef4991507cb18bcc5d95
+f0f0e94d333b4923ae42ee195df17c0df62ea0b1
+7e8c8b1d72c67e2e241184448715a8d4bd88a727
+f7b4bc4ef14349a6e66829a0101d5b21129dcf55
+d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576
+bc27434e376db89fe0e6ef2d2fabc100d2575ec6
+1b4f6f73c70353869026e5eec1dd903f9e26d43f
+7f904093e6933cab876e87532111db94c71a304f
+3f5693584d7dab13ffc12122d6ddbf862783028b
+710011644006c18291ad512456b7580095d628a2
+34ce703b7e79e3072eed7f92239a4c08517b0c55
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2
+eac97959f2fcd882e8236c5dd6035870878eb36b
+a6e8a8bb99e30a9e80dbf80c46495cf798066105
+e506cdb250eba5e70c5147eb477fbd069714765b
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c
+1a81c722727299e45af289d905d7dcf157174248
+e1d726d812554f2b2b92cac3a4d2bec678969368
+79f6a8f777a11fd626185ab549079236629431ac
+aeff403079022683b233decda556a6aee3225065
+cd23dc3227ee2a3ab0f4de1817d03ca771267aeb
+a8154d043f187c6640cb6aedeaa8385a323e46cf
+e3b9863e583171ac9ae7b485f88e503852c747b6
+7914c3f510e84a3d83d66717aad0d852d6a4d148
+42eda7c20db9dc0f42f72bb997dd191ed8499b10
+46e0703044811c941f0b5418139f89d46b360aa3
+7fcecaef60a681c47f0476e54e08712ee05d6154
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019
+53f5cb365806c57811319a42659c9f68b879454a
+477236563c6a6c6db922045453b74d3f9535bfa1
+91e17338a12b5e570907e816bff296b13177971e
+346752e3ab96c93483413be4feaa024ccfe9499f
+da4170c862d8ae39861aa193667bfdbdf0ecb363
+8de5dc782178114d9424d33d9adabb2f29a1ab17
+9a59abdf3460970de53e09cb397f47d86744f472
+99d7678039ad96ee29ab520ff114bb8021222a91
+6d07e176c754ac42773690d4b4919a39df85d7ec
+4a733a0862bd5f7be73fb4040c1375a6d17c9276
+1d4c25f9f8f08f5a756d6f472778ab54a7e6129d
+bed06e7ff0b510b4a1762283640b4233de4c18e0
+09137e3c267a3414314d1e7e4b0e3a4cae801f45
+682760f2f767fb47e1e2ca35db3becbb6153756f
+9c23859ec7313f2e756a3e85575735e0c52249f4
+869a2fbe42d3fdf40ed8b768edbf54137be7ac71
+70e14e216b12bed2211c4df66ef5f0bdeaffe774
+f4b5a8f6462a68e79d643648c780efe588e4b6ca
+345cc31c85e19cea9f8b8521be6a37937efd41c2
+6359fcb0b4546979c54818df8271debc0d653257
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba
+9d3377313759dfdc1a702b341d8d8e4b1469460c
+b14b672e09b5b2d984295dfafb05604492bfaec5
+720763bcb5e0507f13a8a319018676eb24270ff0
+0fd1bffb171699a968c700f206665b2f8837d953
+67af3ec65f1dc535018f3671624e72c96a611c39
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df
+f113aed343bcac1021dc3e57ba6cc0647a8f5ce1
+a6902db7972a7631d186bbf59c5ef116c205b1e8
+44855e53801d09763c1fb5f90ab73e5c3758a728
+121503705689f46546cade78ff62963574b4750b
+4113269f916117f975d5d2a0e60864735b73c64c
+b85c198ce09ffc4037582a544c7ffb6ebaeff198
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308
+b49affdff167f5d170da18de3efa6fd6a50262a2
+e19fb22b35c352f57f520f593d748096b41a4a7b
+99c20eb5433ed27e70881d026d1dbe378a12b342
+642a386c451e94d9c44134e03052219a7512b9de
+3779e0599481f11fc1acee60d5108d63e55819b3
+a6eb6ad9142130406fb4ffd4d60e8348c2442c29
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9
+ed07856461da6c7afa4f1782b5b607b45eebe9f6
+a2e0966f303f38b58b898d388d1c83e40b605262
+6a4419ce2338ea30a570cf45624741b754fa52cb
+c146aa6d56233ce700032f1cb179700778557601
+4db9e5f19366fe5d6a98ca43c1d113dac823a14d
+cbbd13c29d042743f0139f1e044b6bca731886d0
+4dca3d6341e1d991c902492952e726dc2a443d1c
+8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b
+7cffcb4f24343a924a8317d560202ba9ed26cd0b
+228ea13041910c41b50d0052bdce924037c3bc6a
+c98983592777952d1751103b4d397d3ace00852d
+eb8519cec0d7a781923f68fdca0891713cb81163
+dde5125baefa1141f1ed50479a3fd67c528a965f
+5fff61302adc65d554d5db3722b8a604e62a8377
+6193c833ad25ac27abbde1a31c1cabe56ce1515b
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff
+d2eb1079552fb736e3ba5e494543e67620832c52
+62fd622b3ca97eb5577fd423fb9efde9a849cbef
+93af36da08bf99e68c9b0d36e141ed8154455ac2
+ea079334121a0ba89452036e5d7f8e18f6851519
+b55e70df03d9b80c91446a97957bc95772dcc45b
+fe7c0bafbd9a28087e0169259816fca46db1a837
+b15a06d701f0a7f508e3355a09d0016de3d92a6d
+0647c9d56cf11215894d57d677997826b22f6a13
+28e1982d20b6eff33989abbef3e9e74400dbf508
+bc811a66855aae130ca78cd0016fd820db1603ec
+af9419f2155785961a5c16315c70b8228435d5f8
+8a0159919ee4e1a9f4cbfb652a1be212bf0554fd
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0
+3ff79cf6df1937949cc9bc522041a9a39d314d83
+e8aa1f207b4b0bb710f79ab47a671d5639696a56
+cb522b2e16b11dde48203bef97131ddca3cdaebd
+fea83550a21f4b41057b031ac338170bacda8805
+e293a31260cf20996d12d14b8f29a9d4d99c4642
+dac34b590adddef2fc31f26e2aeb0059115d07a1
+845f45f8412905137bf4e46a0d434f5856cd3aec
+cc9d068cf6c4a30da82fd6350a348467cb5086d4
+b3cb91a08be4117d6efe57251061b62417867de9
+d77f18917a58e7d4598d31af4e7be2762d858370
+e9363f4368b04aeaa6d6617db0a574844fc59338
+d458c49a5e34263c95b3393386b5d76ba770e497
+b97f694c2a111b5b1724eefd63c8d64c8e19f6c9
+134f1cee8408cca648d8b4ca44b38b0a7023af71
+9c59bb28054eee783a40b467c82f38021c19ff3e
+518a3ce2a290352afea22027b64bf3950bffc65a
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db
+e20e2db743e8db1ff61279f4fda32bf8cf381f8e
+dc3dc18b6831c867a8d65da130a9ff147a736745
+7783095a565094ae5b3dccf082d504ddd7255a5c
+54948ee407b5d32da4b2eee377cc44f20c3a7e0c
+a532cfc69259254192aee3fc5be614d9197e7824
+abdd17e411a7bfe043f280abd4e560a04ab6e992
+f6f06be05981689b94809130e251f9e4bf932660
+bf3bf5400b617fef2825eb987eb496fea99804b9
+15136c2f94fd29fc1cb6bedc8c1831b7002930a6
+3e9ab40e6e23f09d16c852b74d40264067ac6abc
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5
+9989ad33b64accea8042e386ff3f1216386ba7f1
+20111924fbf616a13d37823cd8712a9c6b458cd6
+5134353bd01c4ea36bd007c460e8972b1541d0ad
+e6da1fcd2a8cda0c69b3d94812caa7d844903007
+1921795408345751791b44b379f51b7dd54ebfa2
+96e1ccfe96566e3c96d7b86e134fa698c01f2289
+b166ce267ddb705e6ed855c6b679ec699d62e9cb
+972e044f69443dfc5c987e29250b2b88a6d2f986
+e6d6203fa911429d76f026e2ec2de260ec520432
+f1aa120fb720f6cfaab13aea4b8379275e6d40a2
+8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2
+d340a135a55ecf7506010e153d5f23155dcfa7e8
+ef032afa4bdb18b328ffcc60e2dc5229cc1939bc
+f92ade569cbe54344ffd3bb25efd366dcd8ad659
+dcb6f06631021811091ce691592b12a237c12907
+01c4cf9c7c08f0ad3f386d88725da564f3c54679
+f5eb0cf9c57716618fab8e24e841f9536057a28a
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320
+9806d3dc7805dd8c9c20d7222c915fc4beee7099
+93c0405b1f5432eab11cb5180229720604ffd030
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0
+a961f1234e963a7945fed70197015678149b37d8
+9f131b4e036208f2402182a1af2a59e3c5d7dd44
+e049d3db7c59f8173aa91dd4bd1bd0beebdaa260
+d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d
+660c99ac408b535bb0468ab3708d0d1d5db30180
+3965d61c4f3b72044f43609c808f8760af8781a2
+96f0e7416994035c91f4e0dfa40fd45090debfc5
+f4d30896c5f808a622824a2d740b3130be50258e
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35
+5de9670f72d10682bf2cb3156988346257e0489f
+69adbfa7b0b886caac15ebe53b89adce390598a3
+a92c207031b0778572bf41803dba1a21076e128b
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05
+7fb5006b6522436ece5bedf509e79bdb7b79c9a7
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e
+eac6aee477446a67d491ef7c95abb21867cf71fc
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e
+3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b
+79db191ca1268dc88271abef3179c4fe4ee92aed
+c07ab025d9e3c885ad5386e6f000543efe091c4b
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc
+47e14fdc6685f0b3800f709c32e005068dfc8d47
+06518858bd99cddf9bc9200fac5311fc29ac33b4
+178a82e3a0541fa75c6a11350be5bded133a59fd
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7
+1de23d7fe718d9fab0159f58f422099e44ad3f0a
+0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f
+e40cb4369c6402ae53c81ce52b73df3ef89f578b
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8
+dbe255d3d2a5d960daaaba71cb0da292e0af36a7
+21959bc56a160ebd450606867dce1462a913afab
+2717b044ae9933f9ab87f16d6c611352f66b2033
+04317e63c08e7888cef480fe79f12d3c255c5b00
+2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd
+166186e551b75c9b5adcc9218f0727b73f5de899
+0857281a3b6a5faba1405e2c11f4e17191d3824d
+653d19e64bd75648cdb149f755d59e583b8367e3
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4
+edfce091688bc88389dd4877950bd58e00ff1253
+917bea27af1846b649e2bced624e8df1d9b79d6f
+e22adcd2a6a7544f017ec875ce8f89d5c59e09c8
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a
+1e3068886b138304ec5a7296702879cc8788143d
+54ba18952fe36c9be9f2ab11faecd43d123b389b
+a95dc0c4a9d882a903ce8c70e80399f38d2dcc89
+2e5cfa97f3ecc10ae8f54c1862433285281e6a7c
+f65b47093e4d45013f54c3ba09bbcce7140af6bb
+c3285a1d6ec6972156fea9e6dc9a8d88cd001617
+2004afb2276a169cdb1f33b2610c5218a1e47332
+3a9681e2e07be7b40b59c32a49a6ff4c40c962a2
+e988be047b28ba3b2f1e4cdba3e8c94026139fcf
+d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e
+8e3d0b401dec8818cd0245c540c6bc032f169a1d
+2ab034e1f54c37bfc8ae93f7320160748310dc73
+53507e2de66eaba996f14fd2f54a5535056f1e59
+9788b491ddc188941dadf441fc143a4075bff764
+661ca4bbb49bb496f56311e9d4263dfac8eb96e9
+b68150bfdec373ed8e025f448b7a3485c16e3201
+3a3f75e0ffdc0eef07c42b470593827fcd4020b4
+ac86ccc16d555484a91741e4cb578b75599147b2
+ee56823f2f00c8c773e4ebc725ca57d2f9242947
+3f5e8f884e71310d7d5571bd98e5a049b8175075
+270acff7916589a6cc9ca915b0012ffcb75d4899
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794
+40273657e6919455373455bd9a5355bb46a7d614
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf
+2c424f21607ff6c92e640bfe3da9ff105c08fac4
+aa1129780cc496918085cd0603a774345c353c54
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6
+9649a19b49607459cef32f43db4f6e6727080bdb
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d
+20b994a78cd1db6ba86ea5aab7211574df5940b3
+68484ae8a042904a95a8d284a7f85a4e28e37513
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0
+7143518f847b0ec57a0ff80e0304c89d7e924d9a
+8e36100cb144685c26e46ad034c524b830b8b2f2
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec
+9487cea80f23afe9bccc94deebaa3eefa6affa99
+43fce0c6b11eb50f597aa573611ac6dc47e088d3
+7c66e7f357553fd4b362d00ff377bffb9197410e
+5e9ec3b8daa95d45138e30c07321e386590f8ec7
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d
+032825000c03b8ab4c207e1af4daeb1f225eb025
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a
+3fb4bf38d34f7f7e5b3df36de2413d34da3e174a
+95ea564bd983129ddb5535a6741e72bb1162c779
+4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f
+7c825562b3ff4683ed049a372cb6807abb09af2a
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab
+cf805d478aeb53520c0ab4fcdc9307d093c21e52
+b1d89015f9b16515735d4140c84b0bacbbef19ac
+c6f3399edb73cfba1248aec964630c8d54a9c534
+5fa04523ff13a82b8b6612250a39e1edb5066521
+ede5982980aa76deae8f9dc5143a724299d67742
+f5eb411217f729ad7ae84bfd4aeb3dedb850206a
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c
+59e9934720baf3c5df3a0e1e988202856e1f83ce
+25ff865460c2b5481fa4161749d5da8501010aa0
+7f5b379b12505d60f9303aab1fea48515d36d098
+8f71c97206a03c366ddefaa6812f865ac6df87e9
+aab3561acbd19f7397cbae39dd34b3be33220309
+9ac43a98fe6fde668afb4fcc115e4ee353a6732d
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c
+84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1
+c7de0c85432ad17a284b5b97c4f36c23f506d9d1
+13604bbdb6f04a71dea4bd093794e46730b0a488
+d790093cb85fc556c0089610026e0ec3466ab845
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912
+dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd
+5f7c4c20ae2731bfb650a96b69fd065bf0bb950e
+cd2c54705c455a4379f45eefdf32d8d10087e521
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a
+b5968e7bb23f5f03213178c22fd2e47af3afa04c
+3e0a1884448bfd7f416c6a45dfcdfc9f2e617268
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b
+eb8a3948c4be0d23eb7326d27f2271be893b3409
+725c3605c2d26d113637097358cd4c08c19ff9e1
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83
+ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb
+ca096e158912080493a898b0b8a4bd2902674fed
+5f771fed91c8e4b666489ba2384d0705bcf75030
+dad6b36fd515bda801f3d22a462cc62348f6aad8
+2d7c2c015053fff5300515a7addcd74b523f3f66
+29db16efc3b378c50511f743e5197a4c0b9e902f
+cd63759842a56bd2ede3999f6e11a74ccbec318b
+893239f17dc2d17183410d8a98b0440d98fa2679
+e5dfd17dbfc9647ccc7323a5d62f65721b318ba9
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f
+87b607b8d4858a16731144d17f457a54e488f15d
+be7444c891caf295d162233bdae0e1c79791d566
+ffc81ced9ee8223ab0adb18817321cbee99606e6
+4b9ec224949c79a980a5a66664d0ac6233c3d575
+c7c53d75f6e963b403057d8ba5952e4974a779ad
+4a3758f283b7c484d3f164528d73bc8667eb1591
+3dce635ce4b55fb63fc6d41b38640403b152a048
+0cf2eecf20cfbcb7f153713479e3206670ea0e9c
+f2902f5956d7e2dca536d9131d4334f85f52f783
+ff012c56b9b1de969328dacd13e26b7138ff298b
+ebbceab4e15bf641f74e335b70c6c4490a043961
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595
+fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f
+604a281100784b4d5bc1a6db993d423abc5dc8f0
+23edcd0d2011d9c0d421193af061f2eb3e155da3
+8355d095d3534ef511a9af68a3b2893339e3f96b
+b034cc919af30e96ee7bed769b93ea5828ae361b
+834736698f2cc5c221c22369abe95515243a9fc3
+c29fe5ed41d2240352fcb8d8196eb2f31d009522
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d
+7c11fa4fd91cb57e6e216117febcdd748e595760
+c05a7c72e679745deab9c9d7d481f7b5b9b36bdd
+f374ac9307be5f25145b44931f5a53b388a77e49
+141cb9ee401f223220d3468592effa90f0c255fa
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a
+414d78e32ac41e6ff8b192bc095fe55f865a02f4
+cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab
+ed32df6b122b15a52238777c9993ed31107b4bed
+b5f9306c3207ac12ac761e7d028c78b3009a219c
+51bb86dc8748088a198b216f7e97616634147388
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450
+cb004e9706f12d1de83b88c209ac948b137caae0
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0
+b7894c1f805ffd90ab4ab06002c70de68d6982ab
+f2896dd2701fbb3564492a12c64f11a5ad456a67
+fe866887d3c26ee72590c440ed86ffc80e980293
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f
+ac26166857e55fd5c64ae7194a169ff4e473eb8b
+13aef395f426ca8bd93640c9c3f848398b189874
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf
+4ab84f203b0e752be83f7f213d7495b04b1c4c79
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69
+bc6de183cd8b2baeebafeefcf40be88468b04b74
+d4288daef6519f6852f59ac6b85e21b8910f2207
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e
+aee3427d0814d8a398fd31f4f46941e9e5488d83
+0e192ca16ce1c967e21d62f9810591eed3d6904b
+4f37f71517420c93c6841beb33ca0926354fa11d
+cce332405ce9cd9dccc45efac26d1d614eaa982d
+a5f35880477ae82902c620245e258cf854c09be9
+9944c451b4a487940d3fd8819080fe16d627892d
+7117ed0be436c0291bc6fb6ea6db18de74e2464a
+62b3598b401c807288a113796f424612cc5833ca
+3cb057a24a8adba6fe964b5d461ba4e4af68af14
+68c1090f912b69b76437644dd16922909dd40d60
+c4fb2de4a5dc28710d9880aece321acf68338fde
+c00df53bd46f78ae925c5768d46080159d4ef87d
+68d2afd8c5c1c3a9bbda3dd209184e368e4376b9
+fdf8e293a7618f560e76bd83e3c40a0788104547
+759cf57215fcfdd8f59c97d14e7f3f62fafa2b30
+043efe5f465704ced8d71a067d2b9d5aa5b59c29
+14ee4948be56caeb30aa3b94968ce663e7496ce4
+3b73f8a2b39751efb7d7b396bf825af2aaadee24
+be632b206f1cd38eab0c01c5f2004d1e8fc72880
+8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d
+d7593148e4319df7a288180d920f2822eeecea0b
+ed0cf5f577f5030ac68ab62fee1cf065349484cc
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211
+87e592ee1a7e2d34e6b115da08700a1ae02e9355
+0a85afebaa19c80fddb660110a4352fd22eb2801
+c7f0c0636d27a1d45b8fcef37e545b902195d937
+4ccf64fc1c9ca71d6aefdf912caf8fea048fb211
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3
+06c2086f7f72536bf970ca629151b16927104df3
+6dddf1440617bf7acda40d4d75c7fb4bf9517dbb
+3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2
+8d5998cd984e7cce307da7d46f155f9db99c6590
+6f3054f182c34ace890a32fdf1656b583fbc7445
+803c92a3f0815dbf97e30c4ee9450fd005586e1a
+a7664247a37a89c74d0e1a1606a99119cffc41d4
+f7b422df567ce9813926461251517761e3e6cda0
+71c4b8e1bb25ee80f4317411ea8180dae6499524
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a
+26a44feb7a64db7986473ca801c251aa88748477
+854b1f0581f5d3340f15eb79452363cbf38c04c8
+b40c001b3e304dccb28c745bd54aa281c8ff1f29
+a16fb74ea66025d1f346045fda00bd287c20af0e
+0951f42abbf649bb564a21d4ff5dddf9a5ea54d9
+c19222d138eb45903a3aa7e46030979d50769771
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc
+ec5c63609cf56496715b0eba0e906de3231ad6d1
+16d6737b50f969247339a6860da2109a8664198a
+31ea88f29e7f01a9801648d808f90862e066f9ea
+cd7a7be3804fd217e9f10682e0c0bfd9583a08db
+f0cee87e9ecedeb927664b8da44b8649050e1c86
+5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c
+d0471d5907d6557cf081edf4c7c2296c3c221a38
+7361b900018f22e37499443643be1ff9d20edfd6
+2e9c780ee8145f29bd1a000585dd99b14d1f5894
+d278e020be85a1ccd90aa366b70c43884dd3f798
+017e94ad51c9be864b98c9b75582753ce6ee134f
+0cfca73806f443188632266513bac6aaf6923fa8
+b161d261fabb507803a9e5834571d56a3b87d147
+cfdc632adcb799dba14af6a8339ca761725abf0a
+c254b4c0f6d5a5a45680eb3742907ec93c3a222b
+eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0
+6601a0906e503a6221d2e0f2ca8c3f544a4adab7
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c
+9961f1e5cf8fda29912344773bc75c47f18333a0
+8de6deefb90fb9b3f7d451b9d8a1a3264b768482
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f
+286a5c19a43382a21c8d96d847b52bba6b715a71
+33aa980544a9d627f305540059828597354b076c
+6b6ff9d55e1df06f8b3e6f257e23557a73b2df96
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70
+1aa61dd85d3a5a2fe819cba21192ec4471c08628
+969626c52d30ea803064ddef8fb4613fa73ba11d
+66837add89caffd9c91430820f49adb5d3f40930
+07a328999666ef2dc28ce57bc1881d10e6f0b370
+b484141b99d3478a12b8a6854864c4b875d289b8
+80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923
+adf62dfa00748381ac21634ae97710bb80fc2922
+c7745f941532b7d6fa70db09e81eb1167f70f8a7
+895081d6a5545ad6385bfc6fcf460fc0b13bac86
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d
+58217ae5423828ed5e1569bee93d491569d79970
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2
+c362116a358320e71fb6bc8baa559142677622d2
+23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e
+ddbb6e0913ac127004be73e2d4097513a8f02d37
+12ebeb2176a5043ad57bc5f3218e48a96254e3e9
+f8162276f3b21a3873dde7a507fd68b4ab858bcc
+e1d1540a718bb7a933e21339f1a2d90660af7353
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9
+7e600faee0ba11467d3f7aed57258b0db0448a72
+73dcb4c452badb3ee39a2f222298b234d08c21eb
+fa80344137c4d158bf59be4ac5591d074483157a
+0f7e9199dad3237159e985e430dd2bf619ef2db5
+ec90d333588421764dff55658a73bbd3ea3016d2
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364
+bed8feb11e8077df158e16bce064853cf217ba62
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958
+79c3a7131c6c176b02b97d368cd0cd0bc713ff7e
+53dd25350d3b3aaf19beb2104f1e389e3442df61
+b598f7761b153ecb26e9d08d3c5817aac5b34b52
+679b72d23a9cfca8a7fe14f1d488363f2139265f
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07
+0ba5369c5e1e87ea172089d84a5610435c73de00
+49358915ae259271238c7690694e6a887b16f7ed
+f3cf10c84c4665a0b28734f5233d423a65ef1f23
+72167c9e4e03e78152f6df44c782571c3058050e
+8a4893d825db22f398b81d6a82ad2560832cd890
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d
+4f9e00aaf2736b79e415f5e7c8dfebda3043a97d
+db0379c9b02e514f10f778cccff0d6a6acf40519
+db3545a983ffd24c97c18bf7f068783102548ad7
+ac03849956ac470c41585d2ee34d8bb58bb3c764
+b91f54e1581fbbf60392364323d00a0cd43e493c
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc
+994f7c469219ccce59c89badf93c0661aae34264
+a38045ed82d6800cbc7a4feb498e694740568258
+51410d6bd9a41eacb105f15dbdaee520e050d646
+e0939b4518a5ad649ba04194f74f3413c793f28e
+60821d447e5b8a96dd9294a0514911e1141ff620
+eafda8a94e410f1ad53b3e193ec124e80d57d095
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a
+395bf182983e0917f33b9701e385290b64e22f9a
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba
+82a610a59c210ff77cfdde7fd10c98067bd142da
+2f67d5448b5372f639633d8d29aac9c0295b4d72
+fc7f140fcedfe54dd63769268a36ff3f175662b5
+0be43cf4299ce2067a0435798ef4ca2fbd255901
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe
+2e7e1ee7e3ee1445939480efd615e8828b9838f8
+2c7185bcf31a4950b014b67ca7c63735ee00d56f
+d10cfcf206b0991e3bc20ac28df1f61c63516f30
+bd8d579715d58405dfd5a77f32920aafe018fce4
+3674f3597bbca3ce05e4423611d871d09882043b
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d
+1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc
+7eb8476024413269bfb2abd54e88d3e131d0aa0e
+5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6
+9ac2960f646a46b701963230e6949abd9ac0a9b3
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74
+f8f872044be2918de442ba26a30336d80d200c42
+98d1b5515b079492c8e7f0f9688df7d42d96da8e
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f
+734cdda4a4de2a635404e4c6b61f1b2edb3f501d
+c61a8940d66eed9850b35dd3768f18b59471ca34
+5f2c210644c1e567435d78522258e0ae036deedb
+15cf1f17aeba62cd834116b770f173b0aa614bf4
+e8c6c3fc9b52dffb15fe115702c6f159d955d308
+95b5296f7ec70455b0cf1748cddeaa099284bfed
+51d6a8a61ea9588a795b20353c97efccec73f5db
+ac8e09128e1e48a2eae5fa90f252ada689f6eae7
+4f4f920eb43399d8d05b42808e45b56bdd36a929
+fcf393a90190e376b617cc02e4a473106684d066
+5550a6df1b118a80c00a2459bae216a7e8e3966c
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9
+f6532bf13a4649b7599eb40f826aa5281e392c61
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6
+270733d986a1eb72efda847b4b55bc6ba9686df4
+5d2e5833ca713f95adcf4267148ac2ccf2318539
+8b2c090d9007e147b8c660f9282f357336358061
+df90850f1c153bfab691b985bfe536a5544e438b
+ae2c71080b0e17dee4e5a019d87585f2987f0508
+8127b7654d6e5c46caaf2404270b74c6b0967e19
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab
+2e832d5657bf9e5678fd45b118fc74db07dac9da
+75879ab7a77318bbe506cb9df309d99205862f6c
+9436170c648c40b6f4cc3751fca3674aa82ffe9a
+f7be8956639e66e534ed6195d929aed4e0b90cad
+fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59
+f5603ceaebe3caf6a812edef9c4b38def78cbf34
+c30e4e4994b76605dcb2071954eaaea471307d80
+cfa92e17809e8d20ebc73b4e531a1b106d02b38c
+335435a94f8fa9c128b9f278d929c9d0e45e2510
+bcc346f4a287d96d124e1163e4447bfc47073cd8
+5d9971c6a9d5c56463ea186850b16f8969a58e67
+b961e512242ddad7712855ab00b4d37723376e5d
+49e975a4c60d99bcc42c921d73f8d89ec7130916
+6a6406906470be10f6d6d94a32741ba370a1db68
+5f27ed82c52339124aa368507d66b71d96862cb7
+5db4fe0ce9e9227042144758cf6c4c2de2042435
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362
+89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199
+eacf974e235add458efb815ada1e5b82a05878fa
+fe464b2b54154d231671750053861f5fd14454f5
+08f4832507259ded9700de81f5fd462caf0d5be8
+c87d5036d3a374c66ec4f5870df47df7176ce8b9
+59efb1ac77c59abc8613830787d767100387c680
+a285b6edd47f9b8966935878ad4539d270b406d1
+c72e6992f44ce75a40f44be4365dc4f264735cfb
+6cb8c52bb421ce04898fa42cb997c04097ddd328
+08fbbfe87563595508a77629e47613d6bd1119eb
+ce2945e369603fcec1fcdc6e19aac5996325cba9
+4db99a2268a120c7af636387241188064ea42338
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11
+db5a00984fa54b9d2a1caad0067a9ff0d0489517
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29
+9e297343da13cf9ba0ad8b5b75c07723136f4885
+edff76149ec44f6849d73f019ef9bded534a38c2
+1d7df3df839a6aa8f5392310d46b2a89080a3c25
+07377c375ac76a34331c660fe87ebd7f9b3d74c4
+52472ec859131844f38fc7d57944778f01d109ac
+a2b4a6c6b32900a066d0257ae6d4526db872afe2
+9ca542d744149f0efc8b8aac8289f5e38e6d200c
+4317856a1458baa427dc00e8ea505d2fc5f118ab
+eb3066de677f9f6131aab542d9d426aaf50ed2ce
+574b62c845809fd54cc168492424c5fac145bc83
+84c5b45328dee855c4855a104ac9c0558cc8a328
+7e2cfbfd43045fbd6aabd9a45090a5716fc4e179
+fb85867c989b9ee6b7899134136f81d6372526a9
+946017d5f11aa582854ac4c0e0f1b18b06127ef1
+050a149051a5d268fcc5539e8b654c2240070c82
+8334da483f1986aea87b62028672836cb3dc6205
+b2470969e4fba92f7909eac26b77d08cc5575533
+c678920facffd35853c9d185904f4aebcd2d8b49
+cdef0eaff4a3c168290d238999fc066ebc3a93e8
+fa641327dc5873276f0af453a2caa1634c16f143
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc
+ea03a569272d329090fe60d6bff8d119e18057d7
+cbb27980eb04f68d9f10067d3d3c114efa9d0054
+90498b95fe8b299ce65d5cafaef942aa58bd68b7
+92be73dffd3320fe7734258961fe5a5f2a43390e
+ac855f0de9086e9e170072cb37400637f0c9b735
+ef2bb8bd93fa8b44414565b32735334fa6823b56
+f20e0eefd007bc310d2a753ba526d33a8aba812c
+80097a879fceff2a9a955bf7613b0d3bfa68dc23
+1275852f2e78ed9afd189e8b845fdb5393413614
+82eff71af91df2ca18aebb7f1153a7aed16ae7cc
+0ee737085af468f264f57f052ea9b9b1f58d7222
+566563a02dbaebec07429046122426acd7039166
+a3201e955d6607d383332f3a12a7befa08c5a18c
+5babbad3daac5c26503088782fd5b62067b94fa5
+0e2ea7af369dbcaeb5e334b02dd9ba5271b10265
+de0eb358b890d92e8f67592c6e23f0e3b2ba3f66
+3c563542db664321aa77a9567c1601f425500f94
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae
+7d40e7e5c01bd551edf65902386401e1b8b8014b
+dbced84d839165d9b494982449aa2eb9109b8467
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17
+be4f18e25b06f430e2de0cc8fddcac8585b00beb
+ecd08edab496801fd4fde45362dde462d00ee91c
+6cce5ccc5d366996f5a32de17a403341db5fddc6
+1063be2ad265751fb958b396ee26167fa0e844d2
+101569eeef2cecc576578bd6500f1c2dcc0274e2
+5b721f86f4a394f05350641e639a9d6cb2046c45
+53de11d144cd2eda7cf1bb644ae27f8ef2489289
+84c0f814951b80c3b2e39caf3925b56a9b2e1733
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b
+651cafb2620ab60a0e4f550c080231f20ae6d26e
+b712f08f819b925ff7587b6c09a8855bc295d795
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290
+5161e38e4ea716dcfb554ccb88901b3d97778f64
+9a98dd6d6aaba05c9e46411ea263f74df908203d
+9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca
+f2d605985821597773bc6b956036bdbc5d307386
+ce032dae834f383125cdd852e7c1bc793d4c3ba3
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39
+656531036cee6b2c2c71954bb6540ef6b2e016d0
+63fd7a159e58add133b9c71c4b1b37b899dd646f
+3646b42511a6a0df5470408bc9a7a69bb3c5d742
+82eb267b8e86be0b444e841b4b4ed4814b6f1942
+d3faed04712b4634b47e1de0340070653546deb2
+7c47da191f935811f269f9ba3c59556c48282e80
+227b1a09b942eaf130d1d84cdcabf98921780a22
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265
+b1451721864e836069fa299a64595d1655793757
+cccd0edb5dafb3a160179a60f75fd8c835c0be82
+e7697c7b626ba3a426106d83f4c3a052fcde02a4
+66d087f3dd2e19ffe340c26ef17efe0062a59290
+def569db592ed1715ae509644444c3feda06a536
+a6590c49e44aa4975b2b0152ee21ac8af3097d80
+c847de9faa1f1a06d5647949a23f523f84aba7f3
+edde81b2bdd61bd757b71a7b3839b6fef81f4be4
+a29566375836f37173ccaffa47dea25eb1240187
+5fea59ccdab484873081eaa37af88e26e3db2aed
+94325522c9be8224970f810554611d6a73877c13
+4e32fbb58154e878dd2fd4b06398f85636fd0cf4
+61262450d4d814865a4f9a84299c24daa493f66e
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74
+f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a
+a3f69a073dcfb6da8038607a9f14eb28b5dab2db
diff --git a/scraper/reports/misc/missing.csv b/scraper/reports/misc/missing.csv
new file mode 100644
index 00000000..62bee46d
--- /dev/null
+++ b/scraper/reports/misc/missing.csv
@@ -0,0 +1,2449 @@
+57246142814d7010d3592e3a39a1ed819dd01f3b
+7788fa76f1488b1597ee2bebc462f628e659f61e
+cca9ae621e8228cfa787ec7954bb375536160e0d
+75249ebb85b74e8932496272f38af274fbcfd696
+47190d213caef85e8b9dd0d271dbadc29ed0a953
+8bdf6f03bde08c424c214188b35be8b2dec7cdea
+3dfb822e16328e0f98a47209d7ecd242e4211f82
+d0509afe9c2c26fe021889f8efae1d85b519452a
+4b48e912a17c79ac95d6a60afed8238c9ab9e553
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e
+a32c5138c6a0b3d3aff69bcab1015d8b043c91fb
+1275d6a800f8cf93c092603175fdad362b69c191
+b4ee64022cc3ccd14c7f9d4935c59b16456067d3
+d46b790d22cb59df87f9486da28386b0f99339d3
+d7cbedbee06293e78661335c7dd9059c70143a28
+eb027969f9310e0ae941e2adee2d42cdf07d938c
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a
+be4faea0971ef74096ec9800750648b7601dda65
+831b4d8b0c0173b0bac0e328e844a0fbafae6639
+746c0205fdf191a737df7af000eaec9409ede73f
+b0c1615ebcad516b5a26d45be58068673e2ff217
+c866a2afc871910e3282fd9498dce4ab20f6a332
+9131c990fad219726eb38384976868b968ee9d9c
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7
+e1256ff535bf4c024dd62faeb2418d48674ddfa2
+8ccde9d80706a59e606f6e6d48d4260b60ccc736
+6789bddbabf234f31df992a3356b36a47451efc7
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e
+f442a2f2749f921849e22f37e0480ac04a3c3fec
+ef230e3df720abf2983ba6b347c9d46283e4b690
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135
+54a9ed950458f4b7e348fa78a718657c8d3d0e05
+d02e27e724f9b9592901ac1f45830341d37140fe
+6993bca2b3471f26f2c8a47adfe444bfc7852484
+00fb2836068042c19b5197d0999e8e93b920eb9c
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e
+ff9195f99a1a28ced431362f5363c9a5da47a37b
+9865fe20df8fe11717d92b5ea63469f59cf1635a
+1badfeece64d1bf43aa55c141afe61c74d0bd25e
+1e21b925b65303ef0299af65e018ec1e1b9b8d60
+1b55c4e804d1298cbbb9c507497177014a923d22
+23ce6f404c504592767b8bec7d844d87b462de71
+ada063ce9a1ff230791c48b6afa29c401a9007f1
+59fc69b3bc4759eef1347161e1248e886702f8f7
+0750a816858b601c0dbf4cfb68066ae7e788f05d
+552122432b92129d7e7059ef40dc5f6045f422b5
+368e99f669ea5fd395b3193cd75b301a76150f9d
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827
+6c66ae815e7e508e852ecb122fb796abbcda16a8
+28d4e027c7e90b51b7d8908fce68128d1964668a
+5da827fe558fb2e1124dcc84ef08311241761726
+30870ef75aa57e41f54310283c0057451c8c822b
+b9d0774b0321a5cfc75471b62c8c5ef6c15527f5
+e87d6c284cdd6828dfe7c092087fbd9ff5091ee4
+305346d01298edeb5c6dc8b55679e8f60ba97efb
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8
+61f04606528ecf4a42b49e8ac2add2e9f92c0def
+4c4e49033737467e28aa2bb32f6c21000deda2ef
+7c6686fa4d8c990e931f1d16deabf647bf3b1986
+12095f9b35ee88272dd5abc2d942a4f55804b31e
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c
+cf736f596bf881ca97ec4b29776baaa493b9d50e
+eb48a58b873295d719827e746d51b110f5716d6c
+dce5e0a1f2cdc3d4e0e7ca0507592860599b0454
+b76af8fcf9a3ebc421b075b689defb6dc4282670
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6
+9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3
+5b5b9c6c67855ede21a60c834aea5379df7d51b7
+c858c74d30c02be2d992f82a821b925669bfca13
+713db3874b77212492d75fb100a345949f3d3235
+ccf16bcf458e4d7a37643b8364594656287f5bfc
+ed1886e233c8ecef7f414811a61a83e44c8bbf50
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f
+9ca7899338129f4ba6744f801e722d53a44e4622
+034b3f3bac663fb814336a69a9fd3514ca0082b9
+f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53
+bbf28f39e5038813afd74cf1bc78d55fcbe630f1
+4ac3cd8b6c50f7a26f27eefc64855134932b39be
+4a8480d58c30dc484bda08969e754cd13a64faa1
+766728bac030b169fcbc2fbafe24c6e22a58ef3c
+701f56f0eac9f88387de1f556acef78016b05d52
+ed96f2eb1771f384df2349879970065a87975ca7
+45e7ddd5248977ba8ec61be111db912a4387d62f
+afdf9a3464c3b015f040982750f6b41c048706f5
+ba1c0600d3bdb8ed9d439e8aa736a96214156284
+a0b1990dd2b4cd87e4fd60912cc1552c34792770
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1
+963d0d40de8780161b70d28d2b125b5222e75596
+ed09db68bf317cad27df6ed96a0c16eab6b2f827
+a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+ce9e1dfa7705623bb67df3a91052062a0a0ca456
+daa4cfde41d37b2ab497458e331556d13dd14d0b
+4b936847f39094d6cb0bde68cea654d948c4735d
+c5ea084531212284ce3f1ca86a6209f0001de9d1
+f095b5770f0ff13ba9670e3d480743c5e9ad1036
+bbc5f4052674278c96abe7ff9dc2d75071b6e3f3
+3be8f1f7501978287af8d7ebfac5963216698249
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4
+b1fdd4ae17d82612cefd4e78b690847b071379d3
+708f4787bec9d7563f4bb8b33834de445147133b
+88e2efab01e883e037a416c63a03075d66625c26
+696236fb6f986f6d5565abb01f402d09db68e5fa
+f61829274cfe64b94361e54351f01a0376cd1253
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d
+0a34fe39e9938ae8c813a81ae6d2d3a325600e5c
+837e99301e00c2244023a8a48ff98d7b521c93ac
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f
+d9deafd9d9e60657a7f34df5f494edff546c4fb8
+9207671d9e2b668c065e06d9f58f597601039e5e
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc
+6d8c9a1759e7204eacb4eeb06567ad0ef4229f93
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1
+1fb980e137b2c9f8781a0d98c026e164b497ddb1
+6966d9d30fa9b7c01523425726ab417fd8428790
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14
+a40edf6eb979d1ddfe5894fac7f2cf199519669f
+40e1743332523b2ab5614bae5e10f7a7799161f4
+f201baf618574108bcee50e9a8b65f5174d832ee
+80ed678ef28ccc1b942e197e0393229cd99d55c8
+5fa6e4a23da0b39e4b35ac73a15d55cee8608736
+17c0d99171efc957b88c31a465c59485ab033234
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d
+530243b61fa5aea19b454b7dbcac9f463ed0460e
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e
+d4a5eaf2e9f2fd3e264940039e2cbbf08880a090
+0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a
+3352426a67eabe3516812cb66a77aeb8b4df4d1b
+2724ba85ec4a66de18da33925e537f3902f21249
+234c106036964131c0f2daf76c47ced802652046
+f0a4a3fb6997334511d7b8fc090f9ce894679faf
+83295bce2340cb87901499cff492ae6ff3365475
+fd809ee36fa6832dda57a0a2403b4b52c207549d
+74ce7e5e677a4925489897665c152a352c49d0a2
+e4754afaa15b1b53e70743880484b8d0736990ff
+185263189a30986e31566394680d6d16b0089772
+2c62b9e64aeddf12f9d399b43baaefbca8e11148
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0
+c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd
+6b8d0569fffce5cc221560d459d6aa10c4db2f03
+56fd4c05869e11e4935d48aa1d7abb96072ac242
+1fe1a78c941e03abe942498249c041b2703fd3d2
+f070d739fb812d38571ec77490ccd8777e95ce7a
+ec1e03ec72186224b93b2611ff873656ed4d2f74
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43
+e97ba85a4550667b8a28f83a98808d489e0ff3bc
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd
+dee406a7aaa0f4c9d64b7550e633d81bc66ff451
+51b42da0706a1260430f27badcf9ee6694768b9b
+891b10c4b3b92ca30c9b93170ec9abd71f6099c4
+f2d5bb329c09a5867045721112a7dad82ca757a3
+8f772d9ce324b2ef5857d6e0b2a420bc93961196
+6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb
+927ba64123bd4a8a31163956b3d1765eb61e4426
+e01bb53b611c679141494f3ffe6f0b91953af658
+f7ae38a073be7c9cd1b92359131b9c8374579b13
+f7dea4454c2de0b96ab5cf95008ce7144292e52a
+c3d3d2229500c555c7a7150a8b126ef874cbee1c
+eee06d68497be8bf3a8aba4fde42a13aa090b301
+bbd1eb87c0686fddb838421050007e934b2d74ab
+370b6b83c7512419188f5373a962dd3175a56a9b
+2201f187a7483982c2e8e2585ad9907c5e66671d
+438c4b320b9a94a939af21061b4502f4a86960e3
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9
+ec8ec2dfd73cf3667f33595fef84c95c42125945
+8a63a2b10068b6a917e249fdc73173f5fd918db0
+7cfbf90368553333b47731729e0e358479c25340
+9b2c359c36c38c289c5bacaeb5b1dd06b464f301
+7b0f1fc93fb24630eb598330e13f7b839fb46cce
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71
+9901f473aeea177a55e58bac8fd4f1b086e575a4
+754f7f3e9a44506b814bf9dc06e44fecde599878
+127c7f87f289b1d32e729738475b337a6b042cf7
+30fd1363fa14965e3ab48a7d6235e4b3516c1da1
+9627f28ea5f4c389350572b15968386d7ce3fe49
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8
+6a52e6fce541126ff429f3c6d573bc774f5b8d89
+c38b1fa00f1f370c029984c55d4d2d40b529d00c
+a60db9ca8bc144a37fe233b08232d9c91641cbb5
+6932baa348943507d992aba75402cfe8545a1a9b
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1
+f4ba07d2ae6c9673502daf50ee751a5e9262848f
+d06bcb2d46342ee011e652990edf290a0876b502
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11
+4342a2b63c9c344d78cf153600cd918a5fecad59
+5b2cfee6e81ef36507ebf3c305e84e9e0473575a
+8e24db957be2b643db464cc566bfabc650f1ffac
+ded968b97bd59465d5ccda4f1e441f24bac7ede5
+6ad107c08ac018bfc6ab31ec92c8a4b234f67d49
+6dcf418c778f528b5792104760f1fbfe90c6dd6a
+5a3da29970d0c3c75ef4cb372b336fc8b10381d7
+e0162dea3746d58083dd1d061fb276015d875b2e
+b6bb883dd14f2737d0d6225cf4acbf050d307634
+92e464a5a67582d5209fa75e3b29de05d82c7c86
+9939498315777b40bed9150d8940fc1ac340e8ba
+3176ee88d1bb137d0b561ee63edf10876f805cf0
+fb87045600da73b07f0757f345a937b1c8097463
+88a898592b4c1dfd707f04f09ca58ec769a257de
+b908edadad58c604a1e4b431f69ac8ded350589a
+7df4f96138a4e23492ea96cf921794fc5287ba72
+a6ce2f0795839d9c2543d64a08e043695887e0eb
+3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2
+013305c13cfabaea82c218b841dbe71e108d2b97
+f472cb8380a41c540cfea32ebb4575da241c0288
+4bbe460ab1b279a55e3c9d9f488ff79884d01608
+6ca2c5ff41e91c34696f84291a458d1312d15bf2
+3e40991ab1daa2a4906eb85a5d6a01a958b6e674
+85ae6fa48e07857e17ac4bd48fb804785483e268
+657e702326a1cbc561e059476e9be4d417c37795
+93dcea2419ca95b96a47e541748c46220d289d77
+1a327c588b8f1057b40ecba451145dd885598e5d
+34fd227f4fdbc7fe028cc1f7d92cb59204333718
+42a6beed493c69d5bad99ae47ea76497c8e5fdae
+849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b
+96a9ca7a8366ae0efe6b58a515d15b44776faf6e
+8986585975c0090e9ad97bec2ba6c4b437419dae
+d3b0839324d0091e70ce34f44c979b9366547327
+badcd992266c6813063c153c41b87babc0ba36a3
+51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee
+38f1fac3ed0fd054e009515e7bbc72cdd4cf801a
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e
+b6f682648418422e992e3ef78a6965773550d36b
+2d8001ffee6584b3f4d951d230dc00a06e8219f8
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc
+58bf72750a8f5100e0c01e55fd1b959b31e7dbce
+c39ffc56a41d436748b9b57bdabd8248b2d28a32
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0
+93420d9212dd15b3ef37f566e4d57e76bb2fab2f
+acee2201f8a15990551804dd382b86973eb7c0a8
+6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd
+fe48f0e43dbdeeaf4a03b3837e27f6705783e576
+d4f0960c6587379ad7df7928c256776e25952c60
+c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee
+9e105c4a176465d14434fb3f5bae67f57ff5fba2
+94eeae23786e128c0635f305ba7eebbb89af0023
+b3b467961ba66264bb73ffe00b1830d7874ae8ce
+40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b
+6dbdb07ce2991db0f64c785ad31196dfd4dae721
+9bd35145c48ce172b80da80130ba310811a44051
+67484723e0c2cbeb936b2e863710385bdc7d5368
+f3b7938de5f178e25a3cf477107c76286c0ad691
+c86e6ed734d3aa967deae00df003557b6e937d3d
+7eb895e7de883d113b75eda54389460c61d63f67
+5c35ac04260e281141b3aaa7bbb147032c887f0c
+cd023d2d067365c83d8e27431e83e7e66082f718
+d69271c7b77bc3a06882884c21aa1b609b3f76cc
+b084683e5bab9b2bc327788e7b9a8e049d5fff8f
+e5d53a335515107452a30b330352cad216f88fc3
+52d7eb0fbc3522434c13cc247549f74bb9609c5d
+6dc1f94b852538d572e4919238ddb10e2ee449a4
+878301453e3d5cb1a1f7828002ea00f59cbeab06
+405d9a71350c9a13adea41f9d7f7f9274793824f
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e
+e065a2cb4534492ccf46d0afc81b9ad8b420c5ec
+dcf71245addaf66a868221041aabe23c0a074312
+9efdb73c6833df57732b727c6aeac510cadb53fe
+0b82bf595e76898993ed4f4b2883c42720c0f277
+a896ddeb0d253739c9aaef7fc1f170a2ba8407d3
+72cbbdee4f6eeee8b7dd22cea6092c532271009f
+24286ef164f0e12c3e9590ec7f636871ba253026
+377f2b65e6a9300448bdccf678cde59449ecd337
+1ee3b4ba04e54bfbacba94d54bf8d05fd202931d
+55e87050b998eb0a8f0b16163ef5a28f984b01fa
+4d90d7834ae25ee6176c096d5d6608555766c0b1
+878169be6e2c87df2d8a1266e9e37de63b524ae7
+bc607bee2002c6c6bf694a15efd0a5d049767237
+68caf5d8ef325d7ea669f3fb76eac58e0170fff0
+53bfe2ab770e74d064303f3bd2867e5bf7b86379
+c9bbd7828437e70cc3e6863b278aa56a7d545150
+8818b12aa0ff3bf0b20f9caa250395cbea0e8769
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac
+4db0968270f4e7b3fa73e41c50d13d48e20687be
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9
+64d7e62f46813b5ad08289aed5dc4825d7ec5cff
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4
+bf5940d57f97ed20c50278a81e901ae4656f0f2c
+69a55c30c085ad1b72dd2789b3f699b2f4d3169f
+ef5531711a69ed687637c48930261769465457f0
+8a8861ad6caedc3993e31d46e7de6c251a8cda22
+ef458499c3856a6e9cd4738b3e97bef010786adb
+3b84d074b8622fac125f85ab55b63e876fed4628
+18010284894ed0edcca74e5bf768ee2e15ef7841
+bb2f61a057bbf176e402d171d79df2635ccda9f6
+35e0256b33212ddad2db548484c595334f15b4da
+782188821963304fb78791e01665590f0cd869e8
+83f80fd4eb614777285202fa99e8314e3e5b169c
+4e0636a1b92503469b44e2807f0bb35cc0d97652
+0ee5c4112208995bf2bb0fb8a87efba933a94579
+e85a255a970ee4c1eecc3e3d110e157f3e0a4629
+923ec0da8327847910e8dd71e9d801abcbc93b08
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807
+572dbaee6648eefa4c9de9b42551204b985ff863
+2480f8dccd9054372d696e1e521e057d9ac9de17
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7
+86f3552b822f6af56cb5079cc31616b4035ccc4e
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf
+4bf85ef995c684b841d0a5a002d175fadd922ff0
+58d47c187b38b8a2bad319c789a09781073d052d
+59d225486161b43b7bf6919b4a4b4113eb50f039
+c038beaa228aeec174e5bd52460f0de75e9cccbe
+e43045a061421bd79713020bc36d2cf4653c044d
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae
+1c9efb6c895917174ac6ccc3bae191152f90c625
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb
+7ec431e36919e29524eceb1431d3e1202637cf19
+44d23df380af207f5ac5b41459c722c87283e1eb
+dc5d04d34b278b944097b8925a9147773bbb80cc
+b999364980e4c21d9c22cc5a9f14501432999ca4
+e8f4ded98f5955aad114f55e7aca6b540599236b
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd
+206e24f7d4b3943b35b069ae2d028143fcbd0704
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec
+7ef0cc4f3f7566f96f168123bac1e07053a939b2
+25960f0a2ed38a89fa8076a448ca538de2f1e183
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a
+872dfdeccf99bbbed7c8f1ea08afb2d713ebe085
+e9c008d31da38d9eef67a28d2c77cb7daec941fb
+c75e6ce54caf17b2780b4b53f8d29086b391e839
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d
+5435d5f8b9f4def52ac84bee109320e64e58ab8f
+9af9a88c60d9e4b53e759823c439fc590a4b5bc5
+b72eebffe697008048781ab7b768e0c96e52236a
+57178b36c21fd7f4529ac6748614bb3374714e91
+361eaef45fccfffd5b7df12fba902490a7d24a8d
+380d5138cadccc9b5b91c707ba0a9220b0f39271
+4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa
+b59f441234d2d8f1765a20715e227376c7251cd7
+43dce79cf815b5c7068b1678f6200dabf8f5de31
+571b83f7fc01163383e6ca6a9791aea79cafa7dd
+ab80582807506c0f840bd1ba03a8b84f8ac72f79
+5180df9d5eb26283fb737f491623395304d57497
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d
+5e7e055ef9ba6e8566a400a8b1c6d8f827099553
+c87f7ee391d6000aef2eadb49f03fc237f4d1170
+3bb6570d81685b769dc9e74b6e4958894087f3f1
+27da432cf2b9129dce256e5bf7f2f18953eef5a5
+173657da03e3249f4e47457d360ab83b3cefbe63
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3
+a6d47f7aa361ab9b37c7f3f868280318f355fadc
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669
+c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0
+e79bacc03152ea55343e6af97bcd17d8904cf5ef
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1
+cdae8e9cc9d605856cf5709b2fdf61f722d450c1
+4d6ad0c7b3cf74adb0507dc886993e603c863e8c
+2770b095613d4395045942dc60e6c560e882f887
+17479e015a2dcf15d40190e06419a135b66da4e0
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45
+dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a
+59a6c9333c941faf2540979dcfcb5d503a49b91e
+84574aa43a98ad8a29470977e7b091f5a5ec2366
+46e72046a9bb2d4982d60bcf5c63dbc622717f0f
+ec00ecb64fa206cea8b2e716955a738a96424084
+cd55fb30737625e86454a2861302b96833ed549d
+eed93d2e16b55142b3260d268c9e72099c53d5bc
+b5fdd7778503f27c9d9bf77fab193b475fab6076
+08903bf161a1e8dec29250a752ce9e2a508a711c
+672fae3da801b2a0d2bad65afdbbbf1b2320623e
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c
+f997a71f1e54d044184240b38d9dc680b3bbbbc0
+bb4be8e24d7b8ed56d81edec435b7b59bad96214
+cef73d305e5368ee269baff53ec20ea3ae7cdd82
+f28b7d62208fdaaa658716403106a2b0b527e763
+76cd5e43df44e389483f23cb578a9015d1483d70
+341002fac5ae6c193b78018a164d3c7295a495e4
+c68ec931585847b37cde9f910f40b2091a662e83
+89d3a57f663976a9ac5e9cdad01267c1fc1a7e06
+bec0c33d330385d73a5b6a05ad642d6954a6d632
+54204e28af73c7aca073835a14afcc5d8f52a515
+2c1ffb0feea5f707c890347d2c2882be0494a67a
+7ebb153704706e457ab57b432793d2b6e5d12592
+a1d86c898da3aea54deafd60864aa05dff8a4c9c
+49df381ea2a1e7f4059346311f1f9f45dd997164
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25
+b7ec41005ce4384e76e3be854ecccd564d2f89fb
+fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139
+c5c53d42e551f3c8f6ca2c13335af80a882009fa
+4896909796f9bd2f70a2cb24bf18daacd6a12128
+a98316980b126f90514f33214dde51813693fe0d
+3bd10f7603c4f5a4737c5613722124787d0dd818
+809e5884cf26b71dc7abc56ac0bad40fb29c671c
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef
+cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce
+d44a93027208816b9e871101693b05adab576d89
+841855205818d3a6d6f85ec17a22515f4f062882
+1773d65c1dc566fd6128db65e907ac91b4583bed
+021e008282714eaefc0796303f521c9e4f199d7e
+f03a82fd4a039c1b94a0e8719284a777f776fb22
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7
+4e061a302816f5890a621eb278c6efa6e37d7e2f
+ac8441e30833a8e2a96a57c5e6fede5df81794af
+052f994898c79529955917f3dfc5181586282cf8
+4cdb6144d56098b819076a8572a664a2c2d27f72
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935
+b971266b29fcecf1d5efe1c4dcdc2355cb188ab0
+58542eeef9317ffab9b155579256d11efb4610f2
+2983cf95743be82671a71528004036bd19172712
+d9e66b877b277d73f8876f537206395e71f58269
+2a612a7037646276ff98141d3e7abbc9c91fccb8
+6d70344ae6f6108144a15e9debc7b0be4e3335f1
+78174c2be084e67f48f3e8ea5cb6c9968615a42c
+ab734bac3994b00bf97ce22b9abc881ee8c12918
+df577a89830be69c1bfb196e925df3055cafc0ed
+a3d8b5622c4b9af1f753aade57e4774730787a00
+bb4f83458976755e9310b241a689c8d21b481238
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4
+432d8cba544bf7b09b0455561fea098177a85db1
+6f22628d34a486d73c6b46eb071200a00e3abae3
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2
+5b5b568a0ba63d00e16a263051c73e09ab83e245
+fdaf65b314faee97220162980e76dbc8f32db9d6
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682
+39d6f8b791995dc5989f817373391189d7ac478a
+be393cd567b338da6ed60181c8ad429627578a31
+cbca355c5467f501d37b919d8b2a17dcb39d3ef9
+56dca23481de9119aa21f9044efd7db09f618704
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df
+7831ab4f8c622d91974579c1ff749dadc170c73c
+d3d5d86afec84c0713ec868cf5ed41661fc96edc
+a1081cb856faae25df14e25045cd682db8028141
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb
+993d189548e8702b1cb0b02603ef02656802c92b
+098363b29eef1471c494382338687f2fe98f6e15
+e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227
+9be653e1bc15ef487d7f93aad02f3c9552f3ee4a
+8bebb26880274bdb840ebcca530caf26c393bf45
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35
+c05ae45c262b270df1e99a32efa35036aae8d950
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b
+d3edbfe18610ce63f83db83f7fbc7634dde1eb40
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6
+4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e
+e96cef8732f3021080c362126518455562606f2d
+9f2984081ef88c20d43b29788fdf732ceabd5d6a
+e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69
+80d4cf7747abfae96328183dd1f84133023c2668
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44
+c1298120e9ab0d3764512cbd38b47cd3ff69327b
+fab60b3db164327be8588bce6ce5e45d5b882db6
+c6382de52636705be5898017f2f8ed7c70d7ae96
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73
+fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac
+48499deeaa1e31ac22c901d115b8b9867f89f952
+33ef419dffef85443ec9fe89a93f928bafdc922e
+cdcfc75f54405c77478ab776eb407c598075d9f8
+a92147bed9c17c311c6081beb0ef4c3165b6268e
+e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7
+e7b2b0538731adaacb2255235e0a07d5ccf09189
+fd15e397629e0241642329fc8ee0b8cd6c6ac807
+5779e3e439c90d43648db107e848aeb954d3e347
+16b9d258547f1eccdb32111c9f45e2e4bbee79af
+628a3f027b7646f398c68a680add48c7969ab1d9
+8da32ff9e3759dc236878ac240728b344555e4e9
+014e3d0fa5248e6f4634dc237e2398160294edce
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9
+275b5091c50509cc8861e792e084ce07aa906549
+cd4941cbef1e27d7afdc41b48c1aff5338aacf06
+b3ba7ab6de023a0d58c741d6abfa3eae67227caf
+1b3587363d37dd197b6adbcfa79d49b5486f27d8
+7d2556d674ad119cf39df1f65aedbe7493970256
+2d8d089d368f2982748fde93a959cf5944873673
+22648dcd3100432fe0cc71e09de5ee855c61f12b
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff
+55ea0c775b25d9d04b5886e322db852e86a556cd
+3240c9359061edf7a06bfeb7cc20c103a65904c2
+23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f
+60542b1a857024c79db8b5b03db6e79f74ec8f9f
+aa3c9de34ef140ec812be85bb8844922c35eba47
+8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c
+ee463f1f72a7e007bae274d2d42cd2e5d817e751
+d5de42d37ee84c86b8f9a054f90ddb4566990ec0
+b2c60061ad32e28eb1e20aff42e062c9160786be
+4641986af5fc8836b2c883ea1a65278d58fe4577
+fa90b825346a51562d42f6b59a343b98ea2e501a
+daefac0610fdeff415c2a3f49b47968d84692e87
+f08e425c2fce277aedb51d93757839900d591008
+b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000
+8f3da45ff0c3e1777c3a7830f79c10f5896bcc21
+7697295ee6fc817296bed816ac5cae97644c2d5b
+efa08283656714911acff2d5022f26904e451113
+5a5f9e0ed220ce51b80cd7b7ede22e473a62062c
+fdfd57d4721174eba288e501c0c120ad076cdca8
+a8d52265649c16f95af71d6f548c15afc85ac905
+f6e00d6430cbbaa64789d826d093f7f3e323b082
+4f8345f31e38f65f1155569238d14bd8517606f4
+16fadde3e68bba301f9829b3f99157191106bd0f
+1287bfe73e381cc8042ac0cc27868ae086e1ce3b
+663efaa0671eace1100fdbdecacd94216a17b1db
+7bbaa09c9e318da4370a83b126bcdb214e7f8428
+103c8eaca2a2176babab2cc6e9b25d48870d6928
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa
+4f8b4784d0fca31840307650f7052b0dde736a76
+6c01b349edb2d33530e8bb07ba338f009663a9dd
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f
+cfffae38fe34e29d47e6deccfd259788176dc213
+8ed33184fccde677ec8413ae06f28ea9f2ca70f3
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7
+61971f8e6fff5b35faed610d02ad14ccfc186c70
+2902f62457fdf7e8e8ee77a9155474107a2f423e
+6cfc337069868568148f65732c52cbcef963f79d
+c84233f854bbed17c22ba0df6048cbb1dd4d3248
+f33bd953d2df0a5305fc8a93a37ff754459a906c
+e9d43231a403b4409633594fa6ccc518f035a135
+b558be7e182809f5404ea0fcf8a1d1d9498dc01a
+64ec02e1056de4b400f9547ce56e69ba8393e2ca
+e3b324101157daede3b4d16bdc9c2388e849c7d4
+a4898f55f12e6393b1c078803909ea715bf71730
+4e27fec1703408d524d6b7ed805cdb6cba6ca132
+193bc8b663d041bc34134a8407adc3e546daa9cc
+3f9a7d690db82cf5c3940fbb06b827ced59ec01e
+6da711d07b63c9f24d143ca3991070736baeb412
+113b06e70b7eead8ae7450bafe9c91656705024c
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab
+1d729693a888a460ee855040f62bdde39ae273af
+b0c512fcfb7bd6c500429cbda963e28850f2e948
+de162d4b8450bf2b80f672478f987f304b7e6ae4
+e295c1aa47422eb35123053038e62e9aa50a2e3a
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3
+c98b13871a3bc767df0bdd51ff00c5254ede8b22
+b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4
+7a81967598c2c0b3b3771c1af943efb1defd4482
+31d51e48dbd9e7253eafe0719f3788adb564a971
+506c2fbfa9d16037d50d650547ad3366bb1e1cde
+d2f2b10a8f29165d815e652f8d44955a12d057e6
+5cbe1445d683d605b31377881ac8540e1d17adf0
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8
+529baf1a79cca813f8c9966ceaa9b3e42748c058
+42ea8a96eea023361721f0ea34264d3d0fc49ebd
+632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c
+e4abc40f79f86dbc06f5af1df314c67681dedc51
+59b83666c1031c3f509f063b9963c7ad9781ca23
+c12260540ec14910f5ec6e38d95bdb606826b32e
+8633732d9f787f8497c2696309c7d70176995c15
+ca44a838da4187617dca9f6249d8c4b604661ec7
+239958d6778643101ab631ec354ea1bc4d33e7e0
+cd444ee7f165032b97ee76b21b9ff58c10750570
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8
+4f051022de100241e5a4ba8a7514db9167eabf6e
+f94f366ce14555cf0d5d34248f9467c18241c3ee
+982fcead58be419e4f34df6e806204674a4bc579
+55c4efc082a8410b528af7325de8148b80cf41e3
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503
+3803b91e784922a2dacd6a18f61b3100629df932
+fcceea054cb59f1409dda181198ed4070ed762c9
+562f7555e5cb79ce0fe834c4613264d8378dd007
+614079f1a0d0938f9c30a1585f617fa278816d53
+1025c4922491745534d5d4e8c6e74ba2dc57b138
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3
+30c93fec078b98453a71f9f21fbc9512ab3e916f
+a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892
+2866cbeb25551257683cf28f33d829932be651fe
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021
+6a931e7b7475635f089dd33e8d9a2899ae963804
+2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b
+6754c98ba73651f69525c770fb0705a1fae78eb5
+9d57c4036a0e5f1349cd11bc342ac515307b6720
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6
+cc31db984282bb70946f6881bab741aa841d3a7c
+fb9ad920809669c1b1455cc26dbd900d8e719e61
+3619a9b46ad4779d0a63b20f7a6a8d3d49530339
+aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a
+e287ff7997297ce1197359ed0fb2a0bd381638c9
+beae35eb5b2c7f63dfa9115f07b5ba0319709951
+79744fc71bea58d2e1918c9e254b10047472bd76
+950171acb24bb24a871ba0d02d580c09829de372
+1a849b694f2d68c3536ed849ed78c82e979d64d5
+77fbbf0c5729f97fcdbfdc507deee3d388cd4889
+39c8b34c1b678235b60b648d0b11d241a34c8e32
+26e570049aaedcfa420fc8c7b761bc70a195657c
+a775da3e6e6ea64bffab7f9baf665528644c7ed3
+e896389891ba84af58a8c279cf8ab5de3e9320ee
+55aafdef9d9798611ade1a387d1e4689f2975e51
+860588fafcc80c823e66429fadd7e816721da42a
+1fdeba9c4064b449231eac95e610f3288801fd3e
+df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb
+652ec3947d3d04dda719b1f5ba7c975e567166ef
+0bf0029c9bdb0ac61fda35c075deb1086c116956
+281486d172cf0c78d348ce7d977a82ff763efccd
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2
+68f61154a0080c4aae9322110c8827978f01ac2e
+747c25bff37b96def96dc039cc13f8a7f42dbbc7
+fc0f5859a111fb17e6dcf6ba63dd7b751721ca61
+b6d0e461535116a675a0354e7da65b2c1d2958d4
+f5c57979ec3d8baa6f934242965350865c0121bd
+ae2cf545565c157813798910401e1da5dc8a6199
+ef4ecb76413a05c96eac4c743d2c2a3886f2ae07
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429
+2cac8ab4088e2bdd32dcb276b86459427355085c
+eaf020bc8a3ed5401fc3852f7037a03b2525586a
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2
+e569f4bd41895028c4c009e5b46b935056188e91
+264f7ab36ff2e23a1514577a6404229d7fe1242b
+214072c84378802a0a0fde0b93ffb17bc04f3759
+18941b52527e6f15abfdf5b86a0086935706e83b
+51d048b92f6680aca4a8adf07deb380c0916c808
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1
+0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2
+b5f3b0f45cf7f462a9c463a941e34e102a029506
+9f1a854d574d0bd14786c41247db272be6062581
+8cb6daba2cb1e208e809633133adfee0183b8dd2
+50a0930cb8cc353e15a5cb4d2f41b365675b5ebf
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3
+73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c
+e57014b4106dd1355e69a0f60bb533615a705606
+f4f6fc473effb063b7a29aa221c65f64a791d7f4
+6e00a406edb508312108f683effe6d3c1db020fb
+e393a038d520a073b9835df7a3ff104ad610c552
+d904f945c1506e7b51b19c99c632ef13f340ef4c
+587b8c147c6253878128ddacf6e5faf8272842a4
+af29ad70ab148c83e1faa8b3098396bc1cd87790
+91ead35d1d2ff2ea7cf35d15b14996471404f68d
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d
+19b492d426f092d80825edba3b02e354c312295f
+f3ea181507db292b762aa798da30bc307be95344
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499
+2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c
+a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8
+3157be811685c93d0cef7fa4c489efea581f9b8e
+437642cfc8c34e445ea653929e2d183aaaeeb704
+ccb2ecb30a50460c9189bb55ba594f2300882747
+76b11c281ac47fe6d95e124673a408ee9eb568e3
+4850af6b54391fc33c8028a0b7fafe05855a96ff
+f7824758800a7b1a386db5bd35f84c81454d017a
+e5fbffd3449a2bfe0acb4ec339a19f5b88fff783
+ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6
+166ef5d3fd96d99caeabe928eba291c082ec75a0
+5632ba72b2652df3b648b2ee698233e76a4eee65
+397257783ccc8cace5b67cc71e0c73034d559a4f
+6b333b2c6311e36c2bde920ab5813f8cfcf2b67b
+9285f4a6a06e975bde3ae3267fccd971d4fff98a
+55cfc3c08000f9d21879582c6296f2a864b657e8
+a812368fe1d4a186322bf72a6d07e1cf60067234
+b8f3f6d8f188f65ca8ea2725b248397c7d1e662d
+60777fbca8bff210398ec8b1179bc4ecb72dfec0
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34
+8a8127a06f432982bfb0150df3212f379b36840b
+d6e08345ba293565086cb282ba08b225326022fc
+a136ccaa67f660c45d3abb8551c5ed357faf7081
+d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0
+f5770dd225501ff3764f9023f19a76fad28127d4
+b5f2846a506fc417e7da43f6a7679146d99c5e96
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2
+2cde051e04569496fb525d7f1b1e5ce6364c8b21
+a5f11c132eaab258a7cea2d681875af09cddba65
+f42dca4a4426e5873a981712102aa961be34539a
+35e6f6e5f4f780508e5f58e87f9efe2b07d8a864
+a6e25cab2251a8ded43c44b28a87f4c62e3a548a
+e8b3a257a0a44d2859862cdec91c8841dc69144d
+8e3c97e420e0112c043929087d6456d8ab61e95c
+48186494fc7c0cc664edec16ce582b3fcb5249c0
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b
+b8d8501595f38974e001a66752dc7098db13dfec
+2fea258320c50f36408032c05c54ba455d575809
+656f05741c402ba43bb1b9a58bcc5f7ce2403d9a
+1b71d3f30238cb6621021a95543cce3aab96a21b
+d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9
+e096b11b3988441c0995c13742ad188a80f2b461
+b9081856963ceb78dcb44ac410c6fca0533676a3
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e
+0831794eddcbac1f601dcb9be9d45531a56dbf7e
+70c9d11cad12dc1692a4507a97f50311f1689dbf
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a
+e38371b69be4f341baa95bc854584e99b67c6d3a
+947ee3452e4f3d657b16325c6b959f8b8768efad
+7c17280c9193da3e347416226b8713b99e7825b8
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64
+61e2044184d86d0f13e50ecaa3da6a4913088c76
+16fdd6d842475e6fbe58fc809beabbed95f0642e
+98e098ba9ff98fc58f22fed6d3d8540116284b91
+362bfeb28adac5f45b6ef46c07c59744b4ed6a52
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29
+6c80c834d426f0bc4acd6355b1946b71b50cbc0b
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62
+944faf7f14f1bead911aeec30cc80c861442b610
+5ac946fc6543a445dd1ee6d5d35afd3783a31353
+11691f1e7c9dbcbd6dfd256ba7ac710581552baa
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30
+193474d008cab9fa1c1fa81ce094d415f00b075c
+620339aef06aed07a78f9ed1a057a25433faa58b
+a6b5ca99432c23392cec682aebb8295c0283728b
+704d88168bdfabe31b6ff484507f4a2244b8c52b
+db848c3c32464d12da33b2f4c3a29fe293fc35d1
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade
+fc8fb68a7e3b79c37108588671c0e1abf374f501
+2d9e58ea582e054e9d690afca8b6a554c3687ce6
+e19ebad4739d59f999d192bac7d596b20b887f78
+2303d07d839e8b20f33d6e2ec78d1353cac256cf
+a36c8a4213251d3fd634e8893ad1b932205ad1ca
+2c19d3d35ef7062061b9e16d040cebd7e45f281d
+5c493c42bfd93e4d08517438983e3af65e023a87
+101d4cfbd6f8a7a10bd33505e2b183183f1d8770
+aafb8dc8fda3b13a64ec3f1ca7911df01707c453
+dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335
+ddfae3a96bd341109d75cedeaebb5ed2362b903f
+f257300b2b4141aab73f93c146bf94846aef5fa1
+15d653972d176963ef0ad2cc582d3b35ca542673
+6c304f3b9c3a711a0cca5c62ce221fb098dccff0
+ce450e4849490924488664b44769b4ca57f1bc1a
+6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d
+c76f64e87f88475069f7707616ad9df1719a6099
+86374bb8d309ad4dbde65c21c6fda6586ae4147a
+1c41965c5e1f97b1504c1bdde8037b5e0417da5e
+799c02a3cde2c0805ea728eb778161499017396b
+28f1542c63f5949ee6f2d51a6422244192b5a900
+85c90ad5eebb637f048841ebfded05942bb786b7
+05891725f5b27332836cf058f04f18d74053803f
+03ce2ff688f9b588b6f264ca79c6857f0d80ceae
+d141c31e3f261d7d5214f07886c1a29ac734d6fc
+c0c8d720658374cc1ffd6116554a615e846c74b5
+ad2339c48ad4ffdd6100310dcbb1fb78e72fac98
+bc36badb6606b8162d821a227dda09a94aac537f
+3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1
+b759936982d6fb25c55c98955f6955582bdaeb27
+5e6f546a50ed97658be9310d5e0a67891fe8a102
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3
+6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a
+631483c15641c3652377f66c8380ff684f3e365c
+2bb53e66aa9417b6560e588b6235e7b8ebbc294c
+dee6609615b73b10540f32537a242baa3c9fca4d
+959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c
+1442319de86d171ce9595b20866ec865003e66fc
+ac559873b288f3ac28ee8a38c0f3710ea3f986d9
+8d384e8c45a429f5c5f6628e8ba0d73c60a51a89
+fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81
+825f56ff489cdd3bcc41e76426d0070754eab1a8
+f1250900074689061196d876f551ba590fc0a064
+1bbec7190ac3ba34ca91d28f145e356a11418b67
+41f26101fed63a8d149744264dd5aa79f1928265
+06f585a3a05dd3371cd600a40dc35500e2f82f9b
+49be50efc87c5df7a42905e58b092729ea04c2f5
+bcf19b964e7d1134d00332cf1acf1ee6184aff00
+fa4f59397f964a23e3c10335c67d9a24ef532d5c
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf
+fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3
+e03e86ac61cfac9148b371d75ce81a55e8b332ca
+8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f
+202dc3c6fda654aeb39aee3e26a89340fb06802a
+247a8040447b6577aa33648395d95d80441a0cf3
+626859fe8cafd25da13b19d44d8d9eb6f0918647
+a0c37f07710184597befaa7e6cf2f0893ff440e9
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d
+10cb39e93fac194220237f15dae084136fdc6740
+7e2f7c0eeaeb47b163a7258665324643669919e8
+351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd
+d86fabd4498c8feaed80ec342d254fb877fb92f5
+4c0cc732314ba3ccccd9036e019b1cfc27850c17
+446dc1413e1cfaee0030dc74a3cee49a47386355
+cb2917413c9b36c3bb9739bce6c03a1a6eb619b3
+521aa8dcd66428b07728b91722cc8f2b5a73944b
+11bb2abe0ca614c15701961428eb2f260e3e2eef
+863ad2838b9b90d4461995f498a39bcd2fb87c73
+cd22e6532211f679ba6057d15a801ba448b9915c
+df9269657505fcdc1e10cf45bbb8e325678a40f5
+673d4885370b27c863e11a4ece9189a6a45931cc
+48e6c6d981efe2c2fb0ae9287376fcae59da9878
+6cb7648465ba7757ecc9c222ac1ab6402933d983
+407de9da58871cae7a6ded2f3a6162b9dc371f38
+97b5800e144a8df48f1f7e91383b0f37bc37cf60
+9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32
+14558a70418ec4012c5f058145eef2d22d89284a
+097340d3ac939ce181c829afb6b6faff946cdce0
+a8a61badec9b8bc01f002a06e1426a623456d121
+ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e
+b6ef158d95042f39765df04373c01546524c9ccd
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4
+09926ed62511c340f4540b5bc53cf2480e8063f8
+46f2611dc4a9302e0ac00a79456fa162461a8c80
+f02a6bccdaee14ab55ad94263539f4f33f1b15bb
+6964af90cf8ac336a2a55800d9c510eccc7ba8e1
+17a8d1b1b4c23a630b051f35e47663fc04dcf043
+be5276e9744c4445fe5b12b785650e8f173f56ff
+580f86f1ace1feed16b592d05c2b07f26c429b4b
+09507f1f1253101d04a975fc5600952eac868602
+2d4a3e9361505616fa4851674eb5c8dd18e0c3cf
+f2a7f9bd040aa8ea87672d38606a84c31163e171
+d700aedcb22a4be374c40d8bee50aef9f85d98ef
+d289ce63055c10937e5715e940a4bb9d0af7a8c5
+9aad8e52aff12bd822f0011e6ef85dfc22fe8466
+645f09f4bc2e6a13663564ee9032ca16e35fc52d
+39b452453bea9ce398613d8dd627984fd3a0d53c
+20c02e98602f6adf1cebaba075d45cef50de089f
+73ed64803d6f2c49f01cffef8e6be8fc9b5273b8
+8befcd91c24038e5c26df0238d26e2311b21719a
+10af69f11301679b6fbb23855bf10f6af1f3d2e6
+a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca
+504028218290d68859f45ec686f435f473aa326c
+164b0e2a03a5a402f66c497e6c327edf20f8827b
+4d19401e44848fe65b721971bc71a9250870ed5f
+ab0981d1da654f37620ca39c6b42de21d7eb58eb
+b09b693708f412823053508578df289b8403100a
+c9b958c2494b7ba08b5b460f19a06814dba8aee0
+badd371a49d2c4126df95120902a34f4bee01b00
+bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab
+f571fe3f753765cf695b75b1bd8bed37524a52d2
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea
+70c58700eb89368e66a8f0d3fc54f32f69d423e1
+5945464d47549e8dcaec37ad41471aa70001907f
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7
+c6241e6fc94192df2380d178c4c96cf071e7a3ac
+f0f854f8cfe826fd08385c0c3c8097488f468076
+35b1c1f2851e9ac4381ef41b4d980f398f1aad68
+7ed3b79248d92b255450c7becd32b9e5c834a31e
+30cbd41e997445745b6edd31f2ebcc7533453b61
+24115d209e0733e319e39badc5411bbfd82c5133
+0a9d204db13d395f024067cf70ac19c2eeb5f942
+74c19438c78a136677a7cb9004c53684a4ae56ff
+d5d5cc27ca519d1300e77e3c1a535a089f52f646
+d309e414f0d6e56e7ba45736d28ee58ae2bad478
+a87ab836771164adb95d6744027e62e05f47fd96
+e75a589ca27dc4f05c2715b9d54206dee37af266
+e2faaebd17d10e2919bd69492787e7565546a63f
+bd0e100a91ff179ee5c1d3383c75c85eddc81723
+9c065dfb26ce280610a492c887b7f6beccf27319
+81a142c751bf0b23315fb6717bc467aa4fdfbc92
+8b744786137cf6be766778344d9f13abf4ec0683
+9077365c9486e54e251dd0b6f6edaeda30ae52b9
+928b8eb47288a05611c140d02441660277a7ed54
+1ea74780d529a458123a08250d8fa6ef1da47a25
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3
+691964c43bfd282f6f4d00b8b0310c554b613e3b
+bff567c58db554858c7f39870cff7c306523dfee
+82e3f4099503633c042a425e9217bfe47cfe9d4b
+062c41dad67bb68fefd9ff0c5c4d296e796004dc
+95d858b39227edeaf75b7fad71f3dc081e415d16
+2c5d1e0719f3ad7f66e1763685ae536806f0c23b
+c4cfdcf19705f9095fb60fb2e569a9253a475f11
+d3b18ba0d9b247bfa2fb95543d172ef888dfff95
+b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89
+95288fa7ff4683e32fe021a78cbf7d3376e6e400
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3
+151b87de997e55db892b122c211f9c749f4293de
+e060e32f8ad98f10277b582393df50ac17f2836c
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c
+8356832f883207187437872742d6b7dc95b51fde
+8981be3a69cd522b4e57e9914bf19f034d4b530c
+ca37eda56b9ee53610c66951ee7ca66a35d0a846
+b8375ff50b8a6f1a10dd809129a18df96888ac8b
+c588c89a72f89eed29d42f34bfa5d4cffa530732
+a92b5234b8b73e06709dd48ec5f0ec357c1aabed
+1efacaa0eaa7e16146c34cd20814d1411b35538e
+4f0b8f730273e9f11b2bfad2415485414b96299f
+d02b32b012ffba2baeb80dca78e7857aaeececb0
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055
+ae8d5be3caea59a21221f02ef04d49a86cb80191
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1
+94806f0967931d376d1729c29702f3d3bb70167c
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98
+533d14e539ae5cdca0ece392487a2b19106d468a
+24ff832171cb774087a614152c21f54589bf7523
+62dccab9ab715f33761a5315746ed02e48eed2a0
+506ea19145838a035e7dba535519fb40a3a0018c
+677585ccf8619ec2330b7f2d2b589a37146ffad7
+508702ed2bf7d1b0655ea7857dd8e52d6537e765
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491
+9d58e8ab656772d2c8a99a9fb876d5611fe2fe20
+3c56acaa819f4e2263638b67cea1ec37a226691d
+90d9209d5dd679b159051a8315423a7f796d704d
+c83e26622b275fdf878135e71c23325a31d0e5fc
+3fb98e76ffd8ba79e1c22eda4d640da0c037e98a
+62fddae74c553ac9e34f511a2957b1614eb4f937
+fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1
+54969bcd728b0f2d3285866c86ef0b4797c2a74d
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4
+55c68c1237166679d2cb65f266f496d1ecd4bec6
+f0f4f16d5b5f9efe304369120651fa688a03d495
+baafe3253702955c6904f0b233e661b47aa067e1
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29
+8895d6ae9f095a8413f663cc83f5b7634b3dc805
+3daafe6389d877fe15d8823cdf5ac15fd919676f
+c0f67e850176bb778b6c048d81c3d7e4d8c41003
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1
+14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b
+be4a20113bc204019ea79c6557a0bece23da1121
+6318d3842b36362bb45527b717e1a45ae46151d5
+6cbde27d9a287ae926979dbb18dfef61cf49860e
+a6270914cf5f60627a1332bcc3f5951c9eea3be0
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261
+93f37c69dd92c4e038710cdeef302c261d3a4f92
+cec8936d97dea2fcf04f175d3facaaeb65e574bf
+bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17
+cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66
+86f191616423efab8c0d352d986126a964983219
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe
+263ed62f94ea615c747c00ebbb4008385285b33b
+414715421e01e8c8b5743c5330e6d2553a08c16d
+6fea198a41d2f6f73e47f056692f365c8e6b04ce
+2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87
+6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f
+d1dfdc107fa5f2c4820570e369cda10ab1661b87
+106092fafb53e36077eba88f06feecd07b9e78e7
+782a05fbe30269ff8ab427109f5c4d0a577e5284
+a5a44a32a91474f00a3cda671a802e87c899fbb4
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a
+6c1227659878e867a01888eef472dd96b679adb6
+cad24ba99c7b6834faf6f5be820dd65f1a755b29
+17a995680482183f3463d2e01dd4c113ebb31608
+057b80e235b10799d03876ad25465208a4c64caf
+edf60d081ffdfa80243217a50a411ab5407c961d
+90cc2f08a6c2f0c41a9dd1786bae097f9292105e
+afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3
+162403e189d1b8463952fa4f18a291241275c354
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc
+6d5125c9407c7762620eeea7570af1a8ee7d76f3
+2227f978f084ebb18cb594c0cfaf124b0df6bf95
+398e0771e64cab6ca5d21754e32dce63f9e3c223
+04b851f25d6d49e61a528606953e11cfac7df2b2
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908
+695426275dee2ec56bc0c0afe1c5b4227a350840
+7142ac9e4d5498037aeb0f459f278fd28dae8048
+5f758a29dae102511576c0a5c6beda264060a401
+aef58a54d458ab76f62c9b6de61af4f475e0f616
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a
+eb100638ed73b82e1cce8475bb8e180cb22a09a2
+3c09fb7fe1886072670e0c4dd632d052102a3733
+9730b9cd998c0a549601c554221a596deda8af5b
+a301ddc419cbd900b301a95b1d9e4bb770afc6a3
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2
+1ffb39ed4d684a80652dfa30d604b82b4c542615
+83d41f6548bb76241737dcd3fed9e182ee901ff9
+ae5f32e489c4d52e7311b66060c7381d932f4193
+a3f78cc944ac189632f25925ba807a0e0678c4d5
+a2359c0f81a7eb032cff1fe45e3b80007facaa2a
+5141cf2e59fb2ec9bb489b9c1832447d3cd93110
+7e467e686f9468b826133275484e0a1ec0f5bde6
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8
+911bef7465665d8b194b6b0370b2b2389dfda1a1
+645de797f936cb19c1b8dba3b862543645510544
+40dd2b9aace337467c6e1e269d0cb813442313d7
+fecccc79548001ecbd6cafd3067bcf14de80b11a
+34ec83c8ff214128e7a4a4763059eebac59268a6
+a1af7ec84472afba0451b431dfdb59be323e35b7
+56a677c889e0e2c9f68ab8ca42a7e63acf986229
+ebde9b9c714ed326157f41add8c781f826c1d864
+60462b981fda63c5f9d780528a37c46884fe0b54
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3
+c843f591658ca9dbb77944a89372a92006defe68
+6c5fbf156ef9fc782be0089309074cc52617b868
+2bb36c875754a2a8919f2f9b00a336c00006e453
+3b60b047831146044d154156441c60f6edd80346
+ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff
+eac1b644492c10546a50f3e125a1f790ec46365f
+451b6409565a5ad18ea49b063561a2645fa4281b
+a78b5495a4223b9784cc53670cc10b6f0beefd32
+c15b68986ecfa1e13e3791686ae9024f66983f14
+96b1000031c53cd4c1c154013bb722ffd87fa7da
+2588acc7a730d864f84d4e1a050070ff873b03d5
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122
+b3afa234996f44852317af382b98f5f557cab25a
+9b684e2e2bb43862f69b12c6be94db0e7a756187
+12408baf69419409d228d96c6f88b6bcde303505
+e1312b0b0fd660de87fa42de39316b28f9336e70
+574ad7ef015995efb7338829a021776bf9daaa08
+cfc30ce53bfc204b8764ebb764a029a8d0ad01f4
+68d08ed9470d973a54ef7806318d8894d87ba610
+e00241f00fb31c660df6c6f129ca38370e6eadb3
+07c83f544d0604e6bab5d741b0bf9a3621d133da
+eee2d2ac461f46734c8e674ae14ed87bbc8d45c6
+9888edfb6276887eb56a6da7fe561e508e72a517
+2f1485994ef2c09a7bb2874eb8252be8fe710db1
+04b4c779b43b830220bf938223f685d1057368e9
+beabb0d9d30871d517c5d915cf852f7f5293f52f
+45e459462a80af03e1bb51a178648c10c4250925
+18b9dc55e5221e704f90eea85a81b41dab51f7da
+675b2caee111cb6aa7404b4d6aa371314bf0e647
+372a8bf0ef757c08551d41e40cb7a485527b6cd7
+09903df21a38e069273b80e94c8c29324963a832
+07fa153b8e6196ee6ef6efd8b743de8485a07453
+7e27d946d23229220bcb6672aacab88e09516d39
+f0398ee5291b153b716411c146a17d4af9cb0edc
+beb2f1a6f3f781443580ffec9161d9ce6852bf48
+aa581b481d400982a7e2a88830a33ec42ad0414f
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2
+5c4d4fd37e8c80ae95c00973531f34a6d810ea3a
+06262d6beeccf2784e4e36a995d5ee2ff73c8d11
+9c1664f69d0d832e05759e8f2f001774fad354d6
+7f511a6a2b38a26f077a5aec4baf5dffc981d881
+43c3b6a564b284382fdf8ae33f974f4e7a89600e
+70109c670471db2e0ede3842cbb58ba6be804561
+c61eaf172820fcafaabf39005bd4536f0c45f995
+cec70cf159b51a18b39c80fac1ad34f65f3691ef
+0a7309147d777c2f20f780a696efe743520aa2db
+c317181fa1de2260e956f05cd655642607520a4f
+6e46d8aa63db3285417c8ebb65340b5045ca106f
+f6fc112ff7e4746b040c13f28700a9c47992045e
+e8d1b134d48eb0928bc999923a4e092537e106f6
+afa57e50570a6599508ee2d50a7b8ca6be04834a
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5
+b3200539538eca54a85223bf0ec4f3ed132d0493
+7f445191fa0475ff0113577d95502a96dc702ef9
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec
+b08203fca1af7b95fda8aa3d29dcacd182375385
+4e6c9be0b646d60390fe3f72ce5aeb0136222a10
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260
+a77e9f0bd205a7733431a6d1028f09f57f9f73b0
+7c30ea47f5ae1c5abd6981d409740544ed16ed16
+ed388878151a3b841f95a62c42382e634d4ab82e
+749d605dd12a4af58de1fae6f5ef5e65eb06540e
+f27e5a13c1c424504b63a9084c50f491c1b17978
+703dc33736939f88625227e38367cfb2a65319fe
+de3285da34df0262a4548574c2383c51387a24bf
+d444e010049944c1b3438c9a25ae09b292b17371
+ec576efd18203bcb8273539fa277839ec92232a1
+0bce54bfbd8119c73eb431559fc6ffbba741e6aa
+7306d42ca158d40436cc5167e651d7ebfa6b89c1
+1fe1bd6b760e3059fff73d53a57ce3a6079adea1
+53a41c711b40e7fe3dc2b12e0790933d9c99a6e0
+8fe38962c24300129391f6d7ac24d7783e0fddd0
+dc974c31201b6da32f48ef81ae5a9042512705fe
+b4d209845e1c67870ef50a7c37abaf3770563f3e
+480ccd25cb2a851745f5e6e95d33edb703efb49e
+38c901a58244be9a2644d486f9a1284dc0edbf8a
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45
+a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f
+3266fcd1886e8ad883714e38203e66c0c6487f7b
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638
+c829be73584966e3162f7ccae72d9284a2ebf358
+ddbd24a73ba3d74028596f393bb07a6b87a469c0
+eb566490cd1aa9338831de8161c6659984e923fd
+5a07945293c6b032e465d64f2ec076b82e113fa6
+bd26dabab576adb6af30484183c9c9c8379bf2e0
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d
+c3d874336eb8fae92ab335393fd801fa8df98412
+fcc6fe6007c322641796cb8792718641856a22a7
+23e824d1dfc33f3780dd18076284f07bd99f1c43
+1a53ca294bbe5923c46a339955e8207907e9c8c6
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf
+e1179a5746b4bf12e1c8a033192326bf7f670a4d
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad
+55c40cbcf49a0225e72d911d762c27bb1c2d14aa
+d4885ca24189b4414031ca048a8b7eb2c9ac646c
+8a6033cbba8598945bfadd2dd04023c2a9f31681
+c26b43c2e1e2da96e7caabd46e1d7314acac0992
+289cfcd081c4393c7d6f63510747b5372202f855
+0b5a82f8c0ee3640503ba24ef73e672d93aeebbf
+120785f9b4952734818245cc305148676563a99b
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb
+dbed26cc6d818b3679e46677abc9fa8e04e8c6a6
+7789a5d87884f8bafec8a82085292e87d4e2866f
+78f2c8671d1a79c08c80ac857e89315197418472
+5b97e997b9b654373bd129b3baf5b82c2def13d1
+758d7e1be64cc668c59ef33ba8882c8597406e53
+9db4b25df549555f9ffd05962b5adf2fd9c86543
+926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0
+bebea83479a8e1988a7da32584e37bfc463d32d4
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8
+67386772c289cd40db343bdc4cb8cb4f58271df2
+5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31
+2322ec2f3571e0ddc593c4e2237a6a794c61251d
+2ed4973984b254be5cba3129371506275fe8a8eb
+75308067ddd3c53721430d7984295838c81d4106
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148
+0450dacc43171c6e623d0d5078600dd570de777e
+892c911ca68f5b4bad59cde7eeb6c738ec6c4586
+85fd2bda5eb3afe68a5a78c30297064aec1361f6
+faf5583063682e70dedc4466ac0f74eeb63169e7
+18d51a366ce2b2068e061721f43cb798177b4bb7
+372fb32569ced35eaf3740a29890bec2be1869fa
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9
+57a14a65e8ae15176c9afae874854e8b0f23dca7
+1159ff04fd17c59515199e0fc2d5e01e72818b59
+0da4c3d898ca2fff9e549d18f513f4898e960aca
+e0244a8356b57a5721c101ead351924bcfb2eef4
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9
+b32cf547a764a4efa475e9c99a72a5db36eeced6
+d7fe2a52d0ad915b78330340a8111e0b5a66513a
+e180572400b64860e190a8bc04ef839fa491e056
+e7cac91da51b78eb4a28e194d3f599f95742e2a2
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c
+814d091c973ff6033a83d4e44ab3b6a88cc1cb66
+2dfe0e7e81f65716b09c590652a4dd8452c10294
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18
+869583b700ecf33a9987447aee9444abfe23f343
+239e305c24155add73f2a0ba5ccbd66b37f77e14
+794a51097385648e3909a1acae7188f5ab881710
+f0ca31fd5cad07e84b47d50dc07db9fc53482a46
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46
+f842b13bd494be1bbc1161dc6df244340b28a47f
+ea80a050d20c0e24e0625a92e5c03e5c8db3e786
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d
+d34f546e61eccbac2450ca7490f558e751e13ec3
+9bac481dc4171aa2d847feac546c9f7299cc5aa0
+02f4b900deabbe7efa474f2815dc122a4ddb5b76
+61329bc767152f01aa502989abc854b53047e52c
+da7bbfa905d88834f8929cb69f41a1b683639f4b
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1
+22e121a8dea49e3042de305574356477ecacadda
+bef926d63512dbffcf1af59f72295ef497f5acf9
+605f6817018a572797095b83bec7fae7195b2abc
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce
+d4df31006798ee091b86e091a7bf5dce6e51ba3e
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9
+5167e16b53283be5587659ea8eaa3b8ef3fddd33
+352d61eb66b053ae5689bd194840fd5d33f0e9c0
+e6e5a6090016810fb902b51d5baa2469ae28b8a1
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb
+e5ea7295b89ef679e74919bf957f58d55ad49489
+97c1f68fb7162af326cd0f1bc546908218ec5da6
+78c1ad33772237bf138084220d1ffab800e1200d
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4
+92292fffc36336d63f4f77d6b8fc23b0c54090e9
+7477cf04c6b086108f459f693a60272523c134db
+18855be5e7a60269c0652e9567484ce5b9617caa
+4ca9753ab023accbfa75a547a65344ee17b549ba
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a
+43e268c118ac25f1f0e984b57bc54f0119ded520
+1fff309330f85146134e49e0022ac61ac60506a9
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e
+2bf646a6efd15ab830344ae9d43e10cc89e29f34
+6e8a81d452a91f5231443ac83e4c0a0db4579974
+7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d
+f1ba2fe3491c715ded9677862fea966b32ca81f0
+0a4fc9016aacae9cdf40663a75045b71e64a70c9
+856317f27248cdb20226eaae599e46de628fb696
+65b1209d38c259fe9ca17b537f3fb4d1857580ae
+d02c54192dbd0798b43231efe1159d6b4375ad36
+5b0008ba87667085912ea474025d2323a14bfc90
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3
+e7cfaff65541cde4298a04882e00608d992f6703
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e
+894f27b6ea68a1ec9b7632533eabf2353b1e9d79
+dfd8602820c0e94b624d02f2e10ce6c798193a25
+bd70f832e133fb87bae82dfaa0ae9d1599e52e4b
+7aa062c6c90dba866273f5edd413075b90077b51
+a20036b7fbf6c0db454c8711e72d78f145560dc8
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7
+d7b6bbb94ac20f5e75893f140ef7e207db7cd483
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0
+1b5d445741473ced3d4d33732c9c9225148ed4a1
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3
+39af06d29a74ad371a1846259e01c14b5343e3d1
+eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979
+9853136dbd7d5f6a9c57dc66060cab44a86cd662
+771505abd38641454757de75fe751d41e87f89a4
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708
+d1079444ceddb1de316983f371ecd1db7a0c2f38
+72591a75469321074b072daff80477d8911c3af3
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea
+13d430257d595231bda216ef859950caa736ad1d
+11df25b4e074b7610ec304a8733fa47625d9faca
+758d481bbf24d12615b751fd9ec121500a648bce
+8694cd9748fb1c128f91a572119978075fede848
+8ce9b7b52d05701d5ef4a573095db66ce60a7e1c
+c73dd452c20460f40becb1fd8146239c88347d87
+3f9ca2526013e358cd8caeb66a3d7161f5507cbc
+8c4042191431e9eb43f00b0f14c23765ab9c6688
+90ac0f32c0c29aa4545ed3d5070af17f195d015f
+9c6dfd3a38374399d998d5a130ffc2864c37f554
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a
+e68869499471bcd6fa8b4dc02aa00633673c0917
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b
+197c64c36e8a9d624a05ee98b740d87f94b4040c
+3294e27356c3b1063595885a6d731d625b15505a
+2a2df7e790737a026434187f9605c4763ff71292
+535cdce8264ac0813d5bb8b19ceafa77a1674adf
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092
+9d46485ca2c562d5e295251530a99dd5df99b589
+83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05
+7ac9aaafe4d74542832c273acf9d631cb8ea6193
+9d5bfaf6191484022a6731ce13ac1b866d21ad18
+35208eda874591eac70286441d19785726578946
+fd10b0c771a2620c0db294cfb82b80d65f73900d
+a1132e2638a8abd08bdf7fc4884804dd6654fa63
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5
+8ed32c8fad924736ebc6d99c5c319312ba1fa80b
+f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca
+6b742055a664bcbd1c6a85ae6796bd15bc945367
+eedfb384a5e42511013b33104f4cd3149432bd9e
+7f203f2ff6721e73738720589ea83adddb7fdd27
+188abc5bad3a3663d042ce98c7a7327e5a1ae298
+519f1486f0755ef3c1f05700ea8a05f52f83387b
+829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a
+aca232de87c4c61537c730ee59a8f7ebf5ecb14f
+d20ea5a4fa771bc4121b5654a7483ced98b39148
+ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9
+09df62fd17d3d833ea6b5a52a232fc052d4da3f5
+edf98a925bb24e39a6e6094b0db839e780a77b08
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e
+cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd
+ab58a7db32683aea9281c188c756ddf969b4cdbd
+85a136b48c2036b16f444f93b086e2bd8539a498
+522a4ca705c06a0436bbe62f46efe24d67a82422
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4
+5a547df635a9a56ac224d556333d36ff68cbf088
+af654a7ec15168b16382bd604889ea07a967dac6
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46
+d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c
+ae836e2be4bb784760e43de88a68c97f4f9e44a1
+efc78a7d95b14abacdfde5c78007eabf9a21689c
+f2eab39cf68de880ee7264b454044a55098e8163
+7d18e9165312cf669b799aa1b883c6bbe95bf40e
+721b109970bf5f1862767a1bec3f9a79e815f79a
+c43862db5eb7e43e3ef45b5eac4ab30e318f2002
+8e63868e552e433dc536ba732f4c2af095602869
+edd6ed94207ab614c71ac0591d304a708d708e7b
+bd0265ba7f391dc3df9059da3f487f7ef17144df
+45a6333fc701d14aab19f9e2efd59fe7b0e89fec
+59dac8b460a89e03fa616749a08e6149708dcc3a
+d26b443f87df76034ff0fa9c5de9779152753f0c
+2564920d6976be68bb22e299b0b8098090bbf259
+20ade100a320cc761c23971d2734388bfe79f7c5
+10bfa4cecd64b9584c901075d6b50f4fad898d0b
+0b572a2b7052b15c8599dbb17d59ff4f02838ff7
+7f2a234ad5c256733a837dbf98f25ed5aad214e8
+8fed5ea3b69ea441a8b02f61473eafee25fb2374
+1efaa128378f988965841eb3f49d1319a102dc36
+a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b
+78598e7005f7c96d64cc47ff47e6f13ae52245b8
+c7c8d150ece08b12e3abdb6224000c07a6ce7d47
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6
+15aa6c457678e25f6bc0e818e5fc39e42dd8e533
+768f6a14a7903099729872e0db231ea814eb05e9
+0141cb33c822e87e93b0c1bad0a09db49b3ad470
+29c340c83b3bbef9c43b0c50b4d571d5ed037cbd
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed
+206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8
+599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a
+598744c8620e4ecbf449d14d7081fbf1cd05851f
+a85e9e11db5665c89b057a124547377d3e1c27ef
+d4453ec649dbde752e74da8ab0984c6f15cc6e06
+a26fd9df58bb76d6c7a3254820143b3da5bd584b
+66490b5869822b31d32af7108eaff193fbdb37b0
+2f73203fd71b755a9601d00fc202bbbd0a595110
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf
+b6f15bf8723b2d5390122442ab04630d2d3878d8
+f11c76efdc9651db329c8c862652820d61933308
+39ed31ced75e6151dde41944a47b4bdf324f922b
+411318684bd2d42e4b663a37dcf0532a48f0146d
+352a620f0b96a7e76b9195a7038d5eec257fd994
+0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457
+7a131fafa7058fb75fdca32d0529bc7cb50429bd
+27a00f2490284bc0705349352d36e9749dde19ab
+212608e00fc1e8912ff845ee7a4a67f88ba938fc
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9
+69adf2f122ff18848ff85e8de3ee3b2bc495838e
+9e182e0cd9d70f876f1be7652c69373bcdf37fb4
+3a27d164e931c422d16481916a2fa6401b74bcef
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1
+e5e5f31b81ed6526c26d277056b6ab4909a56c6c
+2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4
+c71217b2b111a51a31cf1107c71d250348d1ff68
+c11eb653746afa8148dc9153780a4584ea529d28
+b73795963dc623a634d218d29e4a5b74dfbc79f1
+bd379f8e08f88729a9214260e05967f4ca66cd65
+d80a3d1f3a438e02a6685e66ee908446766fefa9
+8e0ab1b08964393e4f9f42ca037220fe98aad7ac
+040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d
+7c80d91db5977649487388588c0c823080c9f4b4
+4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0
+c808c784237f167c78a87cc5a9d48152579c27a4
+3e3227c8e9f44593d2499f4d1302575c77977b2e
+4209783b0cab1f22341f0600eed4512155b1dee6
+f61d5f2a082c65d5330f21b6f36312cc4fab8a3b
+d78734c54f29e4474b4d47334278cfde6efe963a
+cb2470aade8e5630dcad5e479ab220db94ecbf91
+dd8084b2878ca95d8f14bae73e1072922f0cc5da
+3795974e24296185d9b64454cde6f796ca235387
+e5823a9d3e5e33e119576a34cb8aed497af20eea
+d1a43737ca8be02d65684cf64ab2331f66947207
+ba788365d70fa6c907b71a01d846532ba3110e31
+266766818dbc5a4ca1161ae2bc14c9e269ddc490
+1316296fae6485c1510f00b1b57fb171b9320ac2
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb
+e3c011d08d04c934197b2a4804c90be55e21d572
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a
+7323b594d3a8508f809e276aa2d224c4e7ec5a80
+a322479a6851f57a3d74d017a9cb6d71395ed806
+d949fadc9b6c5c8b067fa42265ad30945f9caa99
+3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f
+ffea8775fc9c32f573d1251e177cd283b4fe09c9
+8199803f476c12c7f6c0124d55d156b5d91314b6
+dec0c26855da90876c405e9fd42830c3051c2f5f
+d35534f3f59631951011539da2fe83f2844ca245
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8
+2e0d56794379c436b2d1be63e71a215dd67eb2ca
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da
+313d5eba97fe064bdc1f00b7587a4b3543ef712a
+3cb2841302af1fb9656f144abc79d4f3d0b27380
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1
+3933e323653ff27e68c3458d245b47e3e37f52fd
+37c5c65ae204ad3692cd30a3dc62f28a263ad468
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8
+707a542c580bcbf3a5a75cce2df80d75990853cc
+4735fa28fa2a2af98f7b266efd300a00e60dddf7
+6584c3c877400e1689a11ef70133daa86a238602
+84a74ef8680b66e6dccbc69ae80321a52780a68e
+911505a4242da555c6828509d1b47ba7854abb7a
+c175ebe550761b18bac24d394d85bdfaf3b7718c
+d4b4020e289c095ce2c2941685c6cd37667f5cc9
+b331ca23aed90394c05f06701f90afd550131fe3
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3
+c48b68dc780c71ab0f0f530cd160aa564ed08ade
+75a74a74d6abbbb302a99de3225c8870fa149aee
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97
+180bd019eab85bbf01d9cddc837242e111825750
+fe50efe9e282c63941ec23eb9b8c7510b6283228
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6
+81706277ed180a92d2eeb94ac0560f7dc591ee13
+62f017907e19766c76887209d01d4307be0cc573
+d9218c2bbc7449dbccac351f55675efd810535db
+ea890846912f16a0f3a860fce289596a7dac575f
+403a108dec92363fd1f465340bd54dbfe65af870
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7
+763b60feaabceebbe9eddfbaa0378b8b454327aa
+46976097c54e86032932d559c8eb82ffea4bb6bb
+5f453a35d312debfc993d687fd0b7c36c1704b16
+88535dba55b0a80975df179d31a6cc80cae1cc92
+1d51b256af68c5546d230f3e6f41da029e0f5852
+cb8382f43ce073322eba82809f02d3084dad7969
+8ccbbd9da0749d96f09164e28480d54935ee171c
+1b02b9413b730b96b91d16dcd61b2420aef97414
+0322e69172f54b95ae6a90eb3af91d3daa5e36ea
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3
+099053f2cbfa06c0141371b9f34e26970e316426
+77db171a523fc3d08c91cea94c9562f3edce56e1
+bf54b5586cdb0b32f6eed35798ff91592b03fbc4
+40c9dce0a4c18829c4100bff5845eb7799b54ca1
+dc550f361ae82ec6e1a0cf67edf6a0138163382e
+7ef44b7c2b5533d00001ae81f9293bdb592f1146
+f78fe101b21be36e98cd3da010051bb9b9829a1e
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce
+eed7920682789a9afd0de4efd726cd9a706940c8
+6316a4b689706b0f01b40f9a3cef47b92bc52411
+aca728cab26b95fbe04ec230b389878656d8af5b
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1
+c3dc4f414f5233df96a9661609557e341b71670d
+fad895771260048f58d12158a4d4d6d0623f4158
+38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006
+2f837ff8b134b785ee185a9c24e1f82b4e54df04
+984edce0b961418d81203ec477b9bfa5a8197ba3
+fd5376fcb09001a3acccc03159e8ff5801129683
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f
+ac2e166c76c103f17fdea2b4ecb137200b8d4703
+179564f157a96787b1b3380a9f79701e3394013d
+accbd6cd5dd649137a7c57ad6ef99232759f7544
+acff2dc5d601887741002a78f8c0c35a799e6403
+f3fed71cc4fc49b02067b71c2df80e83084b2a82
+b1f4423c227fa37b9680787be38857069247a307
+443f4421e44d4f374c265e6f2551bf9830de5597
+9c2f20ed168743071db6268480a966d5d238a7ee
+d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f
+6dcf6b028a6042a9904628a3395520995b1d0ef9
+43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a
+0ca36ecaf4015ca4095e07f0302d28a5d9424254
+312b2566e315dd6e65bd42cfcbe4d919159de8a1
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4
+9bc01fa9400c231e41e6a72ec509d76ca797207c
+cbbd9880fb28bef4e33da418a3795477d3a1616e
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581
+468bb5344f74842a9a43a7e1a3333ebd394929b4
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9
+111d0b588f3abbbea85d50a28c0506f74161e091
+fde611bf25a89fe11e077692070f89dcdede043a
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a
+516a27d5dd06622f872f5ef334313350745eadc3
+b8048a7661bdb73d3613fde9d710bd45a20d13e7
+3a6334953cd2775fab7a8e7b72ed63468c71dee7
+ec983394f800da971d243f4143ab7f8421aa967c
+a45e6172713a56736a2565ddea9cb8b1d94721cd
+7c9a65f18f7feb473e993077d087d4806578214e
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e
+d33fcdaf2c0bd0100ec94b2c437dccdacec66476
+12226bca7a891e25b7d1e1a34a089521bba75731
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150
+cc8e378fd05152a81c2810f682a78c5057c8a735
+640e12837241d52d04379d3649d050ee3760048c
+48de3ca194c3830daa7495603712496fe908375c
+bddc822cf20b31d8f714925bec192c39294184f7
+f6f2a212505a118933ef84110e487551b6591553
+58538cc418bf41197fad4fc4ee2449b2daeb08b1
+dbd958ffedc3eae8032be67599ec281310c05630
+62750d78e819d745b9200b0c5c35fcae6fb9f404
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8
+ab6886252aea103b3d974462f589b4886ef2735a
+65babb10e727382b31ca5479b452ee725917c739
+87610276ccbc12d0912b23fd493019f06256f94e
+1c6e22516ceb5c97c3caf07a9bd5df357988ceda
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225
+3d6ee995bc2f3e0f217c053368df659a5d14d5b5
+f3a59d85b7458394e3c043d8277aa1ffe3cdac91
+b59cee1f647737ec3296ccb3daa25c890359c307
+62007c30f148334fb4d8975f80afe76e5aef8c7f
+626913b8fcbbaee8932997d6c4a78fe1ce646127
+cfd4004054399f3a5f536df71f9b9987f060f434
+dc13229afbbc8b7a31ed5adfe265d971850c0976
+1ffe20eb32dbc4fa85ac7844178937bba97f4bf0
+30b15cdb72760f20f80e04157b57be9029d8a1ab
+fd53be2e0a9f33080a9db4b5a5e416e24ae8e198
+d444368421f456baf8c3cb089244e017f8d32c41
+fffefc1fb840da63e17428fd5de6e79feb726894
+1d776bfe627f1a051099997114ba04678c45f0f5
+cb27b45329d61f5f95ed213798d4b2a615e76be2
+d0144d76b8b926d22411d388e7a26506519372eb
+08d41d2f68a2bf0091dc373573ca379de9b16385
+4f1249369127cc2e2894f6b2f1052d399794919a
+2e231f1e7e641dd3619bec59e14d02e91360ac01
+632fa986bed53862d83918c2b71ab953fd70d6cc
+112780a7fe259dc7aff2170d5beda50b2bfa7bda
+93af335bf8c610f34ce0cadc15d1dd592debc706
+5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b
+975978ee6a32383d6f4f026b944099e7739e5890
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c
+5f94969b9491db552ffebc5911a45def99026afe
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08
+d00e9a6339e34c613053d3b2c132fccbde547b56
+288964068cd87d97a98b8bc927d6e0d2349458a2
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a
+56e079f4eb40744728fd1d7665938b06426338e5
+92b61b09d2eed4937058d0f9494d9efeddc39002
+36939e6a365e9db904d81325212177c9e9e76c54
+2957715e96a18dbb5ed5c36b92050ec375214aa6
+1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc
+7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80
+f5fae7810a33ed67852ad6a3e0144cb278b24b41
+24f022d807352abf071880877c38e53a98254dcd
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be
+d454ad60b061c1a1450810a0f335fafbfeceeccc
+2c1f8ddbfbb224271253a27fed0c2425599dfe47
+ed9d11e995baeec17c5d2847ec1a8d5449254525
+493c8591d6a1bef5d7b84164a73761cefb9f5a25
+d5444f9475253bbcfef85c351ea9dab56793b9ea
+86d0127e1fd04c3d8ea78401c838af621647dc95
+4560491820e0ee49736aea9b81d57c3939a69e12
+c62c07de196e95eaaf614fb150a4fa4ce49588b4
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b
+8f9c37f351a91ed416baa8b6cdb4022b231b9085
+a022eff5470c3446aca683eae9c18319fd2406d5
+af6cae71f24ea8f457e581bfe1240d5fa63faaf7
+a81c86cda6f1da2aa09b6737297addd3d4a64ffa
+633c851ebf625ad7abdda2324e9de093cf623141
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5
+7fab17ef7e25626643f1d55257a3e13348e435bd
+523854a7d8755e944bd50217c14481fe1329a969
+6e911227e893d0eecb363015754824bf4366bdb7
+7c1cfab6b60466c13f07fe028e5085a949ec8b30
+ff8ef43168b9c8dd467208a0b1b02e223b731254
+7eaa97be59019f0d36aa7dac27407b004cad5e93
+2bcd9b2b78eb353ea57cf50387083900eae5384a
+61e9e180d3d1d8b09f1cc59bdd9f98c497707eff
+f16599e4ec666c6390c90ff9a253162178a70ef5
+37866fea39deeff453802cde529dd9d32e0205a5
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf
+4c72a51a7c7288e6e17dfefe4f87df47929608e7
+06560d5721ecc487a4d70905a485e22c9542a522
+8006219efb6ab76754616b0e8b7778dcfb46603d
+7a3d46f32f680144fd2ba261681b43b86b702b85
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26
+95289007f2f336e6636cf8f920225b8d47c6e94f
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e
+2ffcd35d9b8867a42be23978079f5f24be8d3e35
+10e4172dd4f4a633f10762fc5d4755e61d52dc36
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02
+eba4cfd76f99159ccc0a65cab0a02db42b548d85
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79
+484bac2a9ff3a43a6f85d109bbc579a4346397f5
+d383ba7bbf8b7b49dcef9f8abab47521966546bb
+a1dd9038b1e1e59c9d564e252d3e14705872fdec
+24869258fef8f47623b5ef43bd978a525f0af60e
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49
+6baaa8b763cc5553715766e7fbe7abb235fae33c
+88e090ffc1f75eed720b5afb167523eb2e316f7f
+2961e14c327341d22d5f266a6872aa174add8ac4
+7f4bc8883c3b9872408cc391bcd294017848d0cf
+f7dcadc5288653ec6764600c7c1e2b49c305dfaa
+cff911786b5ac884bb71788c5bc6acf6bf569eff
+0b45aeb0aede5e0c19b508ede802bdfec668aefd
+c79cf7f61441195404472102114bcf079a72138a
+4686df20f0ee40cd411e4b43860ef56de5531d9e
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e
+f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a
+fd9ab411dc6258763c95b7741e3d51adf5504040
+29a5d38390857e234c111f8bb787724c08f39110
+06ab24721d7117974a6039eb2e57d1545eee5e46
+55432723c728a2ce90d817e9e9877ae9fbad6fe5
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be
+40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd
+5748652924084b7b0220cddcd28f6b2222004359
+3cd7b15f5647e650db66fbe2ce1852e00c05b2e4
+0004f72a00096fa410b179ad12aa3a0d10fc853c
+27b451abfe321a696c852215bb7efb4c2e50c89f
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5
+8562b4f63e49847692b8cb31ef0bdec416b9a87a
+b562def2624f59f7d3824e43ecffc990ad780898
+6e91be2ad74cf7c5969314b2327b513532b1be09
+4ed6c7740ba93d75345397ef043f35c0562fb0fd
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9
+a03448488950ee5bf50e9e1d744129fbba066c50
+1d30f813798c55ae4fe454829be6e2948ee841da
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef
+9057044c0347fb9798a9b552910a9aff150385db
+5b4b84ce3518c8a14f57f5f95a1d07fb60e58223
+99facca6fc50cc30f13b7b6dd49ace24bc94f702
+49068538b7eef66b4254cc11914128097302fab8
+51dcb36a6c247189be4420562f19feb00c9487f8
+e03bda45248b4169e2a20cb9124ae60440cad2de
+f60a85bd35fa85739d712f4c93ea80d31aa7de07
+21b5af67618fcc047b495d2d5d7c2bf145753633
+023decb4c56f2e97d345593e4f7b89b667a6763d
+73d53a7c27716ae9a6d3484e78883545e53117ae
+a803453edd2b4a85b29da74dcc551b3c53ff17f9
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8
+4e37cd250130c6fd60e066f0c8efb3cbb778c421
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4
+5dafab3c936763294257af73baf9fb3bb1696654
+a96c45ed3a44ad79a72499be238264ae38857988
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d
+6b3e360b80268fda4e37ff39b7f303e3684e8719
+60e2b9b2e0db3089237d0208f57b22a3aac932c1
+df767f62a6bf3b09e6417d801726f2d5d642a202
+41c42cb001f34c43d4d8dd8fb72a982854e173fb
+aadfcaf601630bdc2af11c00eb34220da59b7559
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f
+e52f57a7de675d14aed28e5d0f2f3c5a01715337
+ab989225a55a2ddcd3b60a99672e78e4373c0df1
+cbfcd1ec8aa30e31faf205c73d350d447704afee
+dc1510110c23f7b509035a1eda22879ef2506e61
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a
+db67edbaeb78e1dd734784cfaaa720ba86ceb6d2
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac
+44d93039eec244083ac7c46577b9446b3a071f3e
+f68ed499e9d41f9c3d16d843db75dc12833d988d
+f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e
+764882e6779fbee29c3d87e00302befc52d2ea8d
+1951dc9dd4601168ab5acf4c14043b124a8e2f67
+dc964b9c7242a985eb255b2410a9c45981c2f4d0
+0532cbcf616f27e5f6a4054f818d4992b99d201d
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82
+8acdc4be8274e5d189fb67b841c25debf5223840
+ad77056780328bdcc6b7a21bce4ddd49c49e2013
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5
+4972aadcce369a8c0029e6dc2f288dfd0241e144
+8fb2ec3bbd862f680be05ef348b595e142463524
+684f5166d8147b59d9e0938d627beff8c9d208dd
+06b4e41185734f70ce432fdb2b121a7eb01140af
+dd0086da7c4efe61abb70dd012538f5deb9a8d16
+3e59d97d42f36fc96d33a5658951856a555e997b
+bc9003ad368cb79d8a8ac2ad025718da5ea36bc4
+40c1de7b1b0a087c590537df55ecd089c86e8bfc
+32f62da99ec9f58dd93e3be667612abcf00df16a
+81f101cea3c451754506bf1c7edf80a661fa4dd1
+dfecaedeaf618041a5498cd3f0942c15302e75c3
+5ba7882700718e996d576b58528f1838e5559225
+7f68a5429f150f9eb7550308bb47a363f2989cb3
+cc2a9f4be1e465cb4ba702539f0f088ac3383834
+c8585c95215bc53e28edb740678b3a0460ca8aa4
+da23d90bacf246b75ef752a2cbb138c4fcd789b7
+d31af74425719a3840b496b7932e0887b35e9e0d
+a5acda0e8c0937bfed013e6382da127103e41395
+df87193e15a19d5620f5a6458b05fee0cf03729f
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b
+f3df296de36b7c114451865778e211350d153727
+7cf579088e0456d04b531da385002825ca6314e2
+a939e287feb3166983e36b8573cd161d12097ad8
+97137d5154a9f22a5d9ecc32e8e2b95d07a5a571
+938ae9597f71a21f2e47287cca318d4a2113feb2
+e4d8ba577cabcb67b4e9e1260573aea708574886
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a
+196c12571ab51273f44ea3469d16301d5b8d2828
+abba1bf1348a6f1b70a26aac237338ee66764458
+af3b803188344971aa89fee861a6a598f30c6f10
+6af75a8572965207c2b227ad35d5c61a5bd69f45
+9d24812d942e69f86279a26932df53c0a68c4111
+1bdef21f093c41df2682a07f05f3548717c7a3d1
+8a866bc0d925dfd8bb10769b8b87d7d0ff01774d
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9
+857c64060963dd8d28e4740f190d321298ddd503
+540b39ba1b8ef06293ed793f130e0483e777e278
+b8ebda42e272d3617375118542d4675a0c0e501d
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661
+97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5
+cfa931e6728a825caada65624ea22b840077f023
+f87b22e7f0c66225824a99cada71f9b3e66b5742
+d69719b42ee53b666e56ed476629a883c59ddf66
+d916602f694ebb9cf95d85e08dd53f653b6196c3
+8dcc95debd07ebab1721c53fa50d846fef265022
+aa5a7a9900548a1f1381389fc8695ced0c34261a
+48a402593ca4896ac34fbebf1e725ab1226ecdb7
+ef23e82180508606a3ab8d9a30205b5e3c0daf67
+5760d29574d78e79e8343b74e6e30b3555e48676
+8eb40d0a0a1339469a05711f532839e8ffd8126c
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e
+c9367ed83156d4d682cefc59301b67f5460013e0
+bf37a81d572bb154581845b65a766fab1e5c7dda
+c997744db532767ee757197491d8ac28d10f1c0f
+d36a1e4637618304c2093f72702dcdcc4dcd41d1
+ce70dd0d613b840754dce528c14c0ebadd20ffaa
+525da67fb524d46f2afa89478cd482a68be8a42b
+b5f9180666924a3215ab0b1faf712e70b353444d
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b
+60c24e44fce158c217d25c1bae9f880a8bd19fc3
+2dbc57abf3ceda80827b85593ce1f457b76a870b
+592f14f4b12225fc691477a180a2a3226a5ef4f0
+81513764b73dae486a9d2df28269c7db75e9beb3
+be48b5dcd10ab834cd68d5b2a24187180e2b408f
+ec1bec7344d07417fb04e509a9d3198da850349f
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b
+2c93c8da5dfe5c50119949881f90ac5a0a4f39fe
+176e6ba56e04c98e1997ffdef964ece90fd827b4
+9e2ab407ff36f3b793d78d9118ea25622f4b7434
+9ce0d64125fbaf625c466d86221505ad2aced7b1
+df6e68db278bedf5486a80697dec6623958edba8
+7d45f1878d8048f6b3de5b3ec912c49742d5e968
+610779e90b644cc18696d7ac7820d3e0598e24d0
+3b350afd8b82487aa97097170c269a25daa0c82d
+ee815f60dc4a090fa9fcfba0135f4707af21420d
+7ee7b0602ef517b445316ca8aa525e28ea79307e
+74dbe6e0486e417a108923295c80551b6d759dbe
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3
+f3553148e322f4f64545d6667dfbc7607c82703a
+f9bce7bd7909f1c75dbeb44900d374bc89072df0
+265a88a8805f6ba3efae3fcc93d810be1ea68866
+84508e846af3ac509f7e1d74b37709107ba48bde
+ab2b09b65fdc91a711e424524e666fc75aae7a51
+318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a
+4983076c1a8b80ff5cd68b924b11df58a68b6c84
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a
+a6ce1a1de164f41cb8999c728bceedf65d66bb23
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8
+f6cf2108ec9d0f59124454d88045173aa328bd2e
+73b90573d272887a6d835ace89bfaf717747c59b
+1c0acf9c2f2c43be47b34acbd4e7338de360e555
+ef940b76e40e18f329c43a3f545dc41080f68748
+ef559d5f02e43534168fbec86707915a70cd73a0
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d
+e45a556df61e2357a8f422bdf864b7a5ed3b8627
+bc08dfa22949fbe54e15b1a6379afade71835968
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c
+85e78aa374d85f9a61da693e5010e40decd3f986
+de92951ea021ec56492d76381a8ae560a972dd68
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca
+03c56c176ec6377dddb6a96c7b2e95408db65a7a
+c631a31be2c793d398175ceef7daff1848bb6408
+1ef1f33c48bc159881c5c8536cbbd533d31b0e9a
+f85ccab7173e543f2bfd4c7a81fb14e147695740
+d62d82c312c40437bc4c1c91caedac2ba5beb292
+858b51a8a8aa082732e9c7fbbd1ea9df9c76b013
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78
+de45bf9e5593a5549a60ca01f2988266d04d77da
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5
+744fa8062d0ae1a11b79592f0cd3fef133807a03
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331
+aeaf5dbb3608922246c7cd8a619541ea9e4a7028
+1feeab271621128fe864e4c64bab9b2e2d0ed1f1
+77d929b3c4bf546557815b41ed5c076a5792dc6b
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c
+daca9d03c1c951ed518248de7f75ff51e5c272cb
+24603ed946cb9385ec541c86d2e42db47361c102
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4
+c37de914c6e9b743d90e2566723d0062bedc9e6a
+b2b535118c5c4dfcc96f547274cdc05dde629976
+dec76940896a41a8a7b6e9684df326b23737cd5d
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4
+03fe3d031afdcddf38e5cc0d908b734884542eeb
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7
+4cfe921ac4650470b0473fd52a2b801f4494ee64
+6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89
+20a0f71d2c667f3c69df18f097f2b5678ac7d214
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056
+721d9c387ed382988fce6fa864446fed5fb23173
+d116bac3b6ad77084c12bea557d42ed4c9d78433
+7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10
+e14cc2715b806288fe457d88c1ad07ef55c65318
+835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd
+25982e2bef817ebde7be5bb80b22a9864b979fb0
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c
+d57982dc55dbed3d0f89589e319dc2d2bd598532
+714d487571ca0d676bad75c8fa622d6f50df953b
+b11b71b704629357fe13ed97b216b9554b0e7463
+c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f
+bdbba95e5abc543981fb557f21e3e6551a563b45
+779d3f0cf74b7d33344eea210170c7c981a7e27b
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0
+a9215666b4bcdf8d510de8952cf0d55b635727dc
+91495c689e6e614247495c3f322d400d8098de43
+8bed7ff2f75d956652320270eaf331e1f73efb35
+2a826273e856939b58be8779d2136bffa0dddb08
+53c36186bf0ffbe2f39165a1824c965c6394fe0d
+c900e0ad4c95948baaf0acd8449fde26f9b4952a
+a168ca2e199121258fbb2b6c821207456e5bf994
+4e43408a59852c1bbaa11596a5da3e42034d9380
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e
+ce75deb5c645eeb08254e9a7962c74cab1e4c480
+9f43caad22803332400f498ca4dd0429fe7da0aa
+2a7058a720fa9da4b9b607ea00bfdb63652dff95
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9
+914d7527678b514e3ee9551655f55ffbd3f0eb0a
+aae742779e8b754da7973949992d258d6ca26216
+b41d585246360646c677a8238ec35e8605b083b0
+7acbf0b060e948589b38d5501ca217463cfd5c2f
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22
+23ee7b7a9ca5948e81555aaf3a044cfec778f148
+776362314f1479f5319aaf989624ac604ba42c65
+bfdafe932f93b01632a5ba590627f0d41034705d
+ee1f9637f372d2eccc447461ef834a9859011ec1
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84
+b0b944b3a783c2d9f12637b471fe1efb44deb52b
+82417d8ec8ac6406f2d55774a35af2a1b3f4b66e
+bc9bad25f8149318314971d8b8c170064e220ea8
+053931267af79a89791479b18d1b9cde3edcb415
+b6620027b441131a18f383d544779521b119c1aa
+90221884fe2643b80203991686af78a9da0f9791
+48a5b6ee60475b18411a910c6084b3a32147b8cd
+53ce84598052308b86ba79d873082853022aa7e9
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87
+b7774c096dc18bb0be2acef07ff5887a22c2a848
+0e93a5a7f6dbdb3802173dca05717d27d72bfec0
+5fea26746f3140b12317fcf3bc1680f2746e172e
+dc5d9399b3796db7fd850990402dce221b98c8be
+c88c21eb9a8e08b66c981db35f6556f4974d27a8
+e3a6e5a573619a97bd6662b652ea7d088ec0b352
+16de1324459fe8fdcdca80bba04c3c30bb789bdf
+46c82cfadd9f885f5480b2d7155f0985daf949fc
+be437b53a376085b01ebd0f4c7c6c9e40a4b1a75
+32e9c9520cf6acb55dde672b73760442b2f166f5
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231
+7574f999d2325803f88c4915ba8f304cccc232d1
+450c6a57f19f5aa45626bb08d7d5d6acdb863b4b
+641f0989b87bf7db67a64900dcc9568767b7b50f
+9aab33ce8d6786b3b77900a9b25f5f4577cea461
+fa32b29e627086d4302db4d30c07a9d11dcd6b84
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd
+dac8fc521dfafb2d082faa4697f491eae00472c7
+c3beae515f38daf4bd8053a7d72f6d2ed3b05d88
+d066575b48b552a38e63095bb1f7b56cbb1fbea4
+706b9767a444de4fe153b2f3bff29df7674c3161
+fffe5ab3351deab81f7562d06764551422dbd9c4
+5fe3a9d54d5070308803dd8ef611594f59805400
+def934edb7c7355757802a95218c6e4ed6122a72
+071ec4f3fb4bfe6ae9980477d208a7b12691710e
+d79365336115661b0e8dbbcd4b2aa1f504b91af6
+d666ce9d783a2d31550a8aa47da45128a67304a7
+b13e2e43672e66ba45d1b852a34737e4ce04226b
+ab1719f573a6c121d7d7da5053fe5f12de0182e7
+79dc84a3bf76f1cb983902e2591d913cee5bdb0e
+fbc9ba70e36768efff130c7d970ce52810b044ff
+46f48211716062744ddec5824e9de9322704dea1
+784a83437b3dba49c0d7ccc10ac40497b84661a5
+824d1db06e1c25f7681e46199fd02cb5fc343784
+20d6a4aaf5abf2925fdce2780e38ab1771209f76
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177
+73ba33e933e834b815f62a50aa1a0e15c6547e83
+ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d
+94a11b601af77f0ad46338afd0fa4ccbab909e82
+2f88d3189723669f957d83ad542ac5c2341c37a5
+f39783847499dd56ba39c1f3b567f64dfdfa8527
+b5747ecfa0f3be0adaad919d78763b1133c4d662
+bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc
+633101e794d7b80f55f466fd2941ea24595e10e6
+d01303062b21cd9ff46d5e3ff78897b8499480de
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08
+1910f5f7ac81d4fcc30284e88dee3537887acdf3
+7923742e2af655dee4f9a99e39916d164bc30178
+44b1399e8569a29eed0d22d88767b1891dbcf987
+dc107e7322f7059430b4ef4991507cb18bcc5d95
+f0f0e94d333b4923ae42ee195df17c0df62ea0b1
+7e8c8b1d72c67e2e241184448715a8d4bd88a727
+f7b4bc4ef14349a6e66829a0101d5b21129dcf55
+d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576
+bc27434e376db89fe0e6ef2d2fabc100d2575ec6
+1b4f6f73c70353869026e5eec1dd903f9e26d43f
+7f904093e6933cab876e87532111db94c71a304f
+3f5693584d7dab13ffc12122d6ddbf862783028b
+710011644006c18291ad512456b7580095d628a2
+34ce703b7e79e3072eed7f92239a4c08517b0c55
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2
+eac97959f2fcd882e8236c5dd6035870878eb36b
+a6e8a8bb99e30a9e80dbf80c46495cf798066105
+e506cdb250eba5e70c5147eb477fbd069714765b
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c
+1a81c722727299e45af289d905d7dcf157174248
+e1d726d812554f2b2b92cac3a4d2bec678969368
+79f6a8f777a11fd626185ab549079236629431ac
+aeff403079022683b233decda556a6aee3225065
+cd23dc3227ee2a3ab0f4de1817d03ca771267aeb
+a8154d043f187c6640cb6aedeaa8385a323e46cf
+e3b9863e583171ac9ae7b485f88e503852c747b6
+7914c3f510e84a3d83d66717aad0d852d6a4d148
+42eda7c20db9dc0f42f72bb997dd191ed8499b10
+46e0703044811c941f0b5418139f89d46b360aa3
+7fcecaef60a681c47f0476e54e08712ee05d6154
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019
+53f5cb365806c57811319a42659c9f68b879454a
+477236563c6a6c6db922045453b74d3f9535bfa1
+91e17338a12b5e570907e816bff296b13177971e
+346752e3ab96c93483413be4feaa024ccfe9499f
+da4170c862d8ae39861aa193667bfdbdf0ecb363
+8de5dc782178114d9424d33d9adabb2f29a1ab17
+9a59abdf3460970de53e09cb397f47d86744f472
+99d7678039ad96ee29ab520ff114bb8021222a91
+6d07e176c754ac42773690d4b4919a39df85d7ec
+4a733a0862bd5f7be73fb4040c1375a6d17c9276
+1d4c25f9f8f08f5a756d6f472778ab54a7e6129d
+bed06e7ff0b510b4a1762283640b4233de4c18e0
+09137e3c267a3414314d1e7e4b0e3a4cae801f45
+682760f2f767fb47e1e2ca35db3becbb6153756f
+9c23859ec7313f2e756a3e85575735e0c52249f4
+869a2fbe42d3fdf40ed8b768edbf54137be7ac71
+70e14e216b12bed2211c4df66ef5f0bdeaffe774
+f4b5a8f6462a68e79d643648c780efe588e4b6ca
+345cc31c85e19cea9f8b8521be6a37937efd41c2
+6359fcb0b4546979c54818df8271debc0d653257
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba
+9d3377313759dfdc1a702b341d8d8e4b1469460c
+b14b672e09b5b2d984295dfafb05604492bfaec5
+720763bcb5e0507f13a8a319018676eb24270ff0
+0fd1bffb171699a968c700f206665b2f8837d953
+67af3ec65f1dc535018f3671624e72c96a611c39
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df
+f113aed343bcac1021dc3e57ba6cc0647a8f5ce1
+a6902db7972a7631d186bbf59c5ef116c205b1e8
+44855e53801d09763c1fb5f90ab73e5c3758a728
+121503705689f46546cade78ff62963574b4750b
+4113269f916117f975d5d2a0e60864735b73c64c
+b85c198ce09ffc4037582a544c7ffb6ebaeff198
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308
+b49affdff167f5d170da18de3efa6fd6a50262a2
+e19fb22b35c352f57f520f593d748096b41a4a7b
+99c20eb5433ed27e70881d026d1dbe378a12b342
+642a386c451e94d9c44134e03052219a7512b9de
+3779e0599481f11fc1acee60d5108d63e55819b3
+a6eb6ad9142130406fb4ffd4d60e8348c2442c29
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9
+ed07856461da6c7afa4f1782b5b607b45eebe9f6
+a2e0966f303f38b58b898d388d1c83e40b605262
+6a4419ce2338ea30a570cf45624741b754fa52cb
+c146aa6d56233ce700032f1cb179700778557601
+4db9e5f19366fe5d6a98ca43c1d113dac823a14d
+cbbd13c29d042743f0139f1e044b6bca731886d0
+4dca3d6341e1d991c902492952e726dc2a443d1c
+8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b
+7cffcb4f24343a924a8317d560202ba9ed26cd0b
+228ea13041910c41b50d0052bdce924037c3bc6a
+c98983592777952d1751103b4d397d3ace00852d
+eb8519cec0d7a781923f68fdca0891713cb81163
+dde5125baefa1141f1ed50479a3fd67c528a965f
+5fff61302adc65d554d5db3722b8a604e62a8377
+6193c833ad25ac27abbde1a31c1cabe56ce1515b
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff
+d2eb1079552fb736e3ba5e494543e67620832c52
+62fd622b3ca97eb5577fd423fb9efde9a849cbef
+93af36da08bf99e68c9b0d36e141ed8154455ac2
+ea079334121a0ba89452036e5d7f8e18f6851519
+b55e70df03d9b80c91446a97957bc95772dcc45b
+fe7c0bafbd9a28087e0169259816fca46db1a837
+b15a06d701f0a7f508e3355a09d0016de3d92a6d
+0647c9d56cf11215894d57d677997826b22f6a13
+28e1982d20b6eff33989abbef3e9e74400dbf508
+bc811a66855aae130ca78cd0016fd820db1603ec
+af9419f2155785961a5c16315c70b8228435d5f8
+8a0159919ee4e1a9f4cbfb652a1be212bf0554fd
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0
+3ff79cf6df1937949cc9bc522041a9a39d314d83
+e8aa1f207b4b0bb710f79ab47a671d5639696a56
+cb522b2e16b11dde48203bef97131ddca3cdaebd
+fea83550a21f4b41057b031ac338170bacda8805
+e293a31260cf20996d12d14b8f29a9d4d99c4642
+dac34b590adddef2fc31f26e2aeb0059115d07a1
+845f45f8412905137bf4e46a0d434f5856cd3aec
+cc9d068cf6c4a30da82fd6350a348467cb5086d4
+b3cb91a08be4117d6efe57251061b62417867de9
+d77f18917a58e7d4598d31af4e7be2762d858370
+e9363f4368b04aeaa6d6617db0a574844fc59338
+d458c49a5e34263c95b3393386b5d76ba770e497
+b97f694c2a111b5b1724eefd63c8d64c8e19f6c9
+134f1cee8408cca648d8b4ca44b38b0a7023af71
+9c59bb28054eee783a40b467c82f38021c19ff3e
+518a3ce2a290352afea22027b64bf3950bffc65a
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db
+e20e2db743e8db1ff61279f4fda32bf8cf381f8e
+dc3dc18b6831c867a8d65da130a9ff147a736745
+7783095a565094ae5b3dccf082d504ddd7255a5c
+54948ee407b5d32da4b2eee377cc44f20c3a7e0c
+a532cfc69259254192aee3fc5be614d9197e7824
+abdd17e411a7bfe043f280abd4e560a04ab6e992
+f6f06be05981689b94809130e251f9e4bf932660
+bf3bf5400b617fef2825eb987eb496fea99804b9
+15136c2f94fd29fc1cb6bedc8c1831b7002930a6
+3e9ab40e6e23f09d16c852b74d40264067ac6abc
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5
+9989ad33b64accea8042e386ff3f1216386ba7f1
+20111924fbf616a13d37823cd8712a9c6b458cd6
+5134353bd01c4ea36bd007c460e8972b1541d0ad
+e6da1fcd2a8cda0c69b3d94812caa7d844903007
+1921795408345751791b44b379f51b7dd54ebfa2
+96e1ccfe96566e3c96d7b86e134fa698c01f2289
+b166ce267ddb705e6ed855c6b679ec699d62e9cb
+972e044f69443dfc5c987e29250b2b88a6d2f986
+e6d6203fa911429d76f026e2ec2de260ec520432
+f1aa120fb720f6cfaab13aea4b8379275e6d40a2
+8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2
+d340a135a55ecf7506010e153d5f23155dcfa7e8
+ef032afa4bdb18b328ffcc60e2dc5229cc1939bc
+f92ade569cbe54344ffd3bb25efd366dcd8ad659
+dcb6f06631021811091ce691592b12a237c12907
+01c4cf9c7c08f0ad3f386d88725da564f3c54679
+f5eb0cf9c57716618fab8e24e841f9536057a28a
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320
+9806d3dc7805dd8c9c20d7222c915fc4beee7099
+93c0405b1f5432eab11cb5180229720604ffd030
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0
+a961f1234e963a7945fed70197015678149b37d8
+9f131b4e036208f2402182a1af2a59e3c5d7dd44
+e049d3db7c59f8173aa91dd4bd1bd0beebdaa260
+d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d
+660c99ac408b535bb0468ab3708d0d1d5db30180
+3965d61c4f3b72044f43609c808f8760af8781a2
+96f0e7416994035c91f4e0dfa40fd45090debfc5
+f4d30896c5f808a622824a2d740b3130be50258e
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35
+5de9670f72d10682bf2cb3156988346257e0489f
+69adbfa7b0b886caac15ebe53b89adce390598a3
+a92c207031b0778572bf41803dba1a21076e128b
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05
+7fb5006b6522436ece5bedf509e79bdb7b79c9a7
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e
+eac6aee477446a67d491ef7c95abb21867cf71fc
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e
+3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b
+79db191ca1268dc88271abef3179c4fe4ee92aed
+c07ab025d9e3c885ad5386e6f000543efe091c4b
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc
+47e14fdc6685f0b3800f709c32e005068dfc8d47
+06518858bd99cddf9bc9200fac5311fc29ac33b4
+178a82e3a0541fa75c6a11350be5bded133a59fd
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7
+1de23d7fe718d9fab0159f58f422099e44ad3f0a
+0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f
+e40cb4369c6402ae53c81ce52b73df3ef89f578b
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8
+dbe255d3d2a5d960daaaba71cb0da292e0af36a7
+21959bc56a160ebd450606867dce1462a913afab
+2717b044ae9933f9ab87f16d6c611352f66b2033
+04317e63c08e7888cef480fe79f12d3c255c5b00
+2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd
+166186e551b75c9b5adcc9218f0727b73f5de899
+0857281a3b6a5faba1405e2c11f4e17191d3824d
+653d19e64bd75648cdb149f755d59e583b8367e3
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4
+edfce091688bc88389dd4877950bd58e00ff1253
+917bea27af1846b649e2bced624e8df1d9b79d6f
+e22adcd2a6a7544f017ec875ce8f89d5c59e09c8
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a
+1e3068886b138304ec5a7296702879cc8788143d
+54ba18952fe36c9be9f2ab11faecd43d123b389b
+a95dc0c4a9d882a903ce8c70e80399f38d2dcc89
+2e5cfa97f3ecc10ae8f54c1862433285281e6a7c
+f65b47093e4d45013f54c3ba09bbcce7140af6bb
+c3285a1d6ec6972156fea9e6dc9a8d88cd001617
+2004afb2276a169cdb1f33b2610c5218a1e47332
+3a9681e2e07be7b40b59c32a49a6ff4c40c962a2
+e988be047b28ba3b2f1e4cdba3e8c94026139fcf
+d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e
+8e3d0b401dec8818cd0245c540c6bc032f169a1d
+2ab034e1f54c37bfc8ae93f7320160748310dc73
+53507e2de66eaba996f14fd2f54a5535056f1e59
+9788b491ddc188941dadf441fc143a4075bff764
+661ca4bbb49bb496f56311e9d4263dfac8eb96e9
+b68150bfdec373ed8e025f448b7a3485c16e3201
+3a3f75e0ffdc0eef07c42b470593827fcd4020b4
+ac86ccc16d555484a91741e4cb578b75599147b2
+ee56823f2f00c8c773e4ebc725ca57d2f9242947
+3f5e8f884e71310d7d5571bd98e5a049b8175075
+270acff7916589a6cc9ca915b0012ffcb75d4899
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794
+40273657e6919455373455bd9a5355bb46a7d614
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf
+2c424f21607ff6c92e640bfe3da9ff105c08fac4
+aa1129780cc496918085cd0603a774345c353c54
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6
+9649a19b49607459cef32f43db4f6e6727080bdb
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d
+20b994a78cd1db6ba86ea5aab7211574df5940b3
+68484ae8a042904a95a8d284a7f85a4e28e37513
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0
+7143518f847b0ec57a0ff80e0304c89d7e924d9a
+8e36100cb144685c26e46ad034c524b830b8b2f2
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec
+9487cea80f23afe9bccc94deebaa3eefa6affa99
+43fce0c6b11eb50f597aa573611ac6dc47e088d3
+7c66e7f357553fd4b362d00ff377bffb9197410e
+5e9ec3b8daa95d45138e30c07321e386590f8ec7
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d
+032825000c03b8ab4c207e1af4daeb1f225eb025
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a
+3fb4bf38d34f7f7e5b3df36de2413d34da3e174a
+95ea564bd983129ddb5535a6741e72bb1162c779
+4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f
+7c825562b3ff4683ed049a372cb6807abb09af2a
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab
+cf805d478aeb53520c0ab4fcdc9307d093c21e52
+b1d89015f9b16515735d4140c84b0bacbbef19ac
+c6f3399edb73cfba1248aec964630c8d54a9c534
+5fa04523ff13a82b8b6612250a39e1edb5066521
+ede5982980aa76deae8f9dc5143a724299d67742
+f5eb411217f729ad7ae84bfd4aeb3dedb850206a
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c
+59e9934720baf3c5df3a0e1e988202856e1f83ce
+25ff865460c2b5481fa4161749d5da8501010aa0
+7f5b379b12505d60f9303aab1fea48515d36d098
+8f71c97206a03c366ddefaa6812f865ac6df87e9
+aab3561acbd19f7397cbae39dd34b3be33220309
+9ac43a98fe6fde668afb4fcc115e4ee353a6732d
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c
+84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1
+c7de0c85432ad17a284b5b97c4f36c23f506d9d1
+13604bbdb6f04a71dea4bd093794e46730b0a488
+d790093cb85fc556c0089610026e0ec3466ab845
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912
+dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd
+5f7c4c20ae2731bfb650a96b69fd065bf0bb950e
+cd2c54705c455a4379f45eefdf32d8d10087e521
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a
+b5968e7bb23f5f03213178c22fd2e47af3afa04c
+3e0a1884448bfd7f416c6a45dfcdfc9f2e617268
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b
+eb8a3948c4be0d23eb7326d27f2271be893b3409
+725c3605c2d26d113637097358cd4c08c19ff9e1
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83
+ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb
+ca096e158912080493a898b0b8a4bd2902674fed
+5f771fed91c8e4b666489ba2384d0705bcf75030
+dad6b36fd515bda801f3d22a462cc62348f6aad8
+2d7c2c015053fff5300515a7addcd74b523f3f66
+29db16efc3b378c50511f743e5197a4c0b9e902f
+cd63759842a56bd2ede3999f6e11a74ccbec318b
+893239f17dc2d17183410d8a98b0440d98fa2679
+e5dfd17dbfc9647ccc7323a5d62f65721b318ba9
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f
+87b607b8d4858a16731144d17f457a54e488f15d
+be7444c891caf295d162233bdae0e1c79791d566
+ffc81ced9ee8223ab0adb18817321cbee99606e6
+4b9ec224949c79a980a5a66664d0ac6233c3d575
+c7c53d75f6e963b403057d8ba5952e4974a779ad
+4a3758f283b7c484d3f164528d73bc8667eb1591
+3dce635ce4b55fb63fc6d41b38640403b152a048
+0cf2eecf20cfbcb7f153713479e3206670ea0e9c
+f2902f5956d7e2dca536d9131d4334f85f52f783
+ff012c56b9b1de969328dacd13e26b7138ff298b
+ebbceab4e15bf641f74e335b70c6c4490a043961
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595
+fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f
+604a281100784b4d5bc1a6db993d423abc5dc8f0
+23edcd0d2011d9c0d421193af061f2eb3e155da3
+8355d095d3534ef511a9af68a3b2893339e3f96b
+b034cc919af30e96ee7bed769b93ea5828ae361b
+834736698f2cc5c221c22369abe95515243a9fc3
+c29fe5ed41d2240352fcb8d8196eb2f31d009522
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d
+7c11fa4fd91cb57e6e216117febcdd748e595760
+c05a7c72e679745deab9c9d7d481f7b5b9b36bdd
+f374ac9307be5f25145b44931f5a53b388a77e49
+141cb9ee401f223220d3468592effa90f0c255fa
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a
+414d78e32ac41e6ff8b192bc095fe55f865a02f4
+cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab
+ed32df6b122b15a52238777c9993ed31107b4bed
+b5f9306c3207ac12ac761e7d028c78b3009a219c
+51bb86dc8748088a198b216f7e97616634147388
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450
+cb004e9706f12d1de83b88c209ac948b137caae0
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0
+b7894c1f805ffd90ab4ab06002c70de68d6982ab
+f2896dd2701fbb3564492a12c64f11a5ad456a67
+fe866887d3c26ee72590c440ed86ffc80e980293
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f
+ac26166857e55fd5c64ae7194a169ff4e473eb8b
+13aef395f426ca8bd93640c9c3f848398b189874
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf
+4ab84f203b0e752be83f7f213d7495b04b1c4c79
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69
+bc6de183cd8b2baeebafeefcf40be88468b04b74
+d4288daef6519f6852f59ac6b85e21b8910f2207
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e
+aee3427d0814d8a398fd31f4f46941e9e5488d83
+0e192ca16ce1c967e21d62f9810591eed3d6904b
+4f37f71517420c93c6841beb33ca0926354fa11d
+cce332405ce9cd9dccc45efac26d1d614eaa982d
+a5f35880477ae82902c620245e258cf854c09be9
+9944c451b4a487940d3fd8819080fe16d627892d
+7117ed0be436c0291bc6fb6ea6db18de74e2464a
+62b3598b401c807288a113796f424612cc5833ca
+3cb057a24a8adba6fe964b5d461ba4e4af68af14
+68c1090f912b69b76437644dd16922909dd40d60
+c4fb2de4a5dc28710d9880aece321acf68338fde
+c00df53bd46f78ae925c5768d46080159d4ef87d
+68d2afd8c5c1c3a9bbda3dd209184e368e4376b9
+fdf8e293a7618f560e76bd83e3c40a0788104547
+759cf57215fcfdd8f59c97d14e7f3f62fafa2b30
+043efe5f465704ced8d71a067d2b9d5aa5b59c29
+14ee4948be56caeb30aa3b94968ce663e7496ce4
+3b73f8a2b39751efb7d7b396bf825af2aaadee24
+be632b206f1cd38eab0c01c5f2004d1e8fc72880
+8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d
+d7593148e4319df7a288180d920f2822eeecea0b
+ed0cf5f577f5030ac68ab62fee1cf065349484cc
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211
+87e592ee1a7e2d34e6b115da08700a1ae02e9355
+0a85afebaa19c80fddb660110a4352fd22eb2801
+c7f0c0636d27a1d45b8fcef37e545b902195d937
+4ccf64fc1c9ca71d6aefdf912caf8fea048fb211
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3
+06c2086f7f72536bf970ca629151b16927104df3
+6dddf1440617bf7acda40d4d75c7fb4bf9517dbb
+3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2
+8d5998cd984e7cce307da7d46f155f9db99c6590
+6f3054f182c34ace890a32fdf1656b583fbc7445
+803c92a3f0815dbf97e30c4ee9450fd005586e1a
+a7664247a37a89c74d0e1a1606a99119cffc41d4
+f7b422df567ce9813926461251517761e3e6cda0
+71c4b8e1bb25ee80f4317411ea8180dae6499524
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a
+26a44feb7a64db7986473ca801c251aa88748477
+854b1f0581f5d3340f15eb79452363cbf38c04c8
+b40c001b3e304dccb28c745bd54aa281c8ff1f29
+a16fb74ea66025d1f346045fda00bd287c20af0e
+0951f42abbf649bb564a21d4ff5dddf9a5ea54d9
+c19222d138eb45903a3aa7e46030979d50769771
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc
+ec5c63609cf56496715b0eba0e906de3231ad6d1
+16d6737b50f969247339a6860da2109a8664198a
+31ea88f29e7f01a9801648d808f90862e066f9ea
+cd7a7be3804fd217e9f10682e0c0bfd9583a08db
+f0cee87e9ecedeb927664b8da44b8649050e1c86
+5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c
+d0471d5907d6557cf081edf4c7c2296c3c221a38
+7361b900018f22e37499443643be1ff9d20edfd6
+2e9c780ee8145f29bd1a000585dd99b14d1f5894
+d278e020be85a1ccd90aa366b70c43884dd3f798
+017e94ad51c9be864b98c9b75582753ce6ee134f
+0cfca73806f443188632266513bac6aaf6923fa8
+b161d261fabb507803a9e5834571d56a3b87d147
+cfdc632adcb799dba14af6a8339ca761725abf0a
+c254b4c0f6d5a5a45680eb3742907ec93c3a222b
+eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0
+6601a0906e503a6221d2e0f2ca8c3f544a4adab7
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c
+9961f1e5cf8fda29912344773bc75c47f18333a0
+8de6deefb90fb9b3f7d451b9d8a1a3264b768482
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f
+286a5c19a43382a21c8d96d847b52bba6b715a71
+33aa980544a9d627f305540059828597354b076c
+6b6ff9d55e1df06f8b3e6f257e23557a73b2df96
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70
+1aa61dd85d3a5a2fe819cba21192ec4471c08628
+969626c52d30ea803064ddef8fb4613fa73ba11d
+66837add89caffd9c91430820f49adb5d3f40930
+07a328999666ef2dc28ce57bc1881d10e6f0b370
+b484141b99d3478a12b8a6854864c4b875d289b8
+80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923
+adf62dfa00748381ac21634ae97710bb80fc2922
+c7745f941532b7d6fa70db09e81eb1167f70f8a7
+895081d6a5545ad6385bfc6fcf460fc0b13bac86
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d
+58217ae5423828ed5e1569bee93d491569d79970
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2
+c362116a358320e71fb6bc8baa559142677622d2
+23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e
+ddbb6e0913ac127004be73e2d4097513a8f02d37
+12ebeb2176a5043ad57bc5f3218e48a96254e3e9
+f8162276f3b21a3873dde7a507fd68b4ab858bcc
+e1d1540a718bb7a933e21339f1a2d90660af7353
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9
+7e600faee0ba11467d3f7aed57258b0db0448a72
+73dcb4c452badb3ee39a2f222298b234d08c21eb
+fa80344137c4d158bf59be4ac5591d074483157a
+0f7e9199dad3237159e985e430dd2bf619ef2db5
+ec90d333588421764dff55658a73bbd3ea3016d2
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364
+bed8feb11e8077df158e16bce064853cf217ba62
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958
+79c3a7131c6c176b02b97d368cd0cd0bc713ff7e
+53dd25350d3b3aaf19beb2104f1e389e3442df61
+b598f7761b153ecb26e9d08d3c5817aac5b34b52
+679b72d23a9cfca8a7fe14f1d488363f2139265f
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07
+0ba5369c5e1e87ea172089d84a5610435c73de00
+49358915ae259271238c7690694e6a887b16f7ed
+f3cf10c84c4665a0b28734f5233d423a65ef1f23
+72167c9e4e03e78152f6df44c782571c3058050e
+8a4893d825db22f398b81d6a82ad2560832cd890
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d
+4f9e00aaf2736b79e415f5e7c8dfebda3043a97d
+db0379c9b02e514f10f778cccff0d6a6acf40519
+db3545a983ffd24c97c18bf7f068783102548ad7
+ac03849956ac470c41585d2ee34d8bb58bb3c764
+b91f54e1581fbbf60392364323d00a0cd43e493c
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc
+994f7c469219ccce59c89badf93c0661aae34264
+a38045ed82d6800cbc7a4feb498e694740568258
+51410d6bd9a41eacb105f15dbdaee520e050d646
+e0939b4518a5ad649ba04194f74f3413c793f28e
+60821d447e5b8a96dd9294a0514911e1141ff620
+eafda8a94e410f1ad53b3e193ec124e80d57d095
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a
+395bf182983e0917f33b9701e385290b64e22f9a
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba
+82a610a59c210ff77cfdde7fd10c98067bd142da
+2f67d5448b5372f639633d8d29aac9c0295b4d72
+fc7f140fcedfe54dd63769268a36ff3f175662b5
+0be43cf4299ce2067a0435798ef4ca2fbd255901
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe
+2e7e1ee7e3ee1445939480efd615e8828b9838f8
+2c7185bcf31a4950b014b67ca7c63735ee00d56f
+d10cfcf206b0991e3bc20ac28df1f61c63516f30
+bd8d579715d58405dfd5a77f32920aafe018fce4
+3674f3597bbca3ce05e4423611d871d09882043b
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d
+1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc
+7eb8476024413269bfb2abd54e88d3e131d0aa0e
+5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6
+9ac2960f646a46b701963230e6949abd9ac0a9b3
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74
+f8f872044be2918de442ba26a30336d80d200c42
+98d1b5515b079492c8e7f0f9688df7d42d96da8e
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f
+734cdda4a4de2a635404e4c6b61f1b2edb3f501d
+c61a8940d66eed9850b35dd3768f18b59471ca34
+5f2c210644c1e567435d78522258e0ae036deedb
+15cf1f17aeba62cd834116b770f173b0aa614bf4
+e8c6c3fc9b52dffb15fe115702c6f159d955d308
+95b5296f7ec70455b0cf1748cddeaa099284bfed
+51d6a8a61ea9588a795b20353c97efccec73f5db
+ac8e09128e1e48a2eae5fa90f252ada689f6eae7
+4f4f920eb43399d8d05b42808e45b56bdd36a929
+fcf393a90190e376b617cc02e4a473106684d066
+5550a6df1b118a80c00a2459bae216a7e8e3966c
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9
+f6532bf13a4649b7599eb40f826aa5281e392c61
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6
+270733d986a1eb72efda847b4b55bc6ba9686df4
+5d2e5833ca713f95adcf4267148ac2ccf2318539
+8b2c090d9007e147b8c660f9282f357336358061
+df90850f1c153bfab691b985bfe536a5544e438b
+ae2c71080b0e17dee4e5a019d87585f2987f0508
+8127b7654d6e5c46caaf2404270b74c6b0967e19
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab
+2e832d5657bf9e5678fd45b118fc74db07dac9da
+75879ab7a77318bbe506cb9df309d99205862f6c
+9436170c648c40b6f4cc3751fca3674aa82ffe9a
+f7be8956639e66e534ed6195d929aed4e0b90cad
+fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59
+f5603ceaebe3caf6a812edef9c4b38def78cbf34
+c30e4e4994b76605dcb2071954eaaea471307d80
+cfa92e17809e8d20ebc73b4e531a1b106d02b38c
+335435a94f8fa9c128b9f278d929c9d0e45e2510
+bcc346f4a287d96d124e1163e4447bfc47073cd8
+5d9971c6a9d5c56463ea186850b16f8969a58e67
+b961e512242ddad7712855ab00b4d37723376e5d
+49e975a4c60d99bcc42c921d73f8d89ec7130916
+6a6406906470be10f6d6d94a32741ba370a1db68
+5f27ed82c52339124aa368507d66b71d96862cb7
+5db4fe0ce9e9227042144758cf6c4c2de2042435
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362
+89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199
+eacf974e235add458efb815ada1e5b82a05878fa
+fe464b2b54154d231671750053861f5fd14454f5
+08f4832507259ded9700de81f5fd462caf0d5be8
+c87d5036d3a374c66ec4f5870df47df7176ce8b9
+59efb1ac77c59abc8613830787d767100387c680
+a285b6edd47f9b8966935878ad4539d270b406d1
+c72e6992f44ce75a40f44be4365dc4f264735cfb
+6cb8c52bb421ce04898fa42cb997c04097ddd328
+08fbbfe87563595508a77629e47613d6bd1119eb
+ce2945e369603fcec1fcdc6e19aac5996325cba9
+4db99a2268a120c7af636387241188064ea42338
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11
+db5a00984fa54b9d2a1caad0067a9ff0d0489517
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29
+9e297343da13cf9ba0ad8b5b75c07723136f4885
+edff76149ec44f6849d73f019ef9bded534a38c2
+1d7df3df839a6aa8f5392310d46b2a89080a3c25
+07377c375ac76a34331c660fe87ebd7f9b3d74c4
+52472ec859131844f38fc7d57944778f01d109ac
+a2b4a6c6b32900a066d0257ae6d4526db872afe2
+9ca542d744149f0efc8b8aac8289f5e38e6d200c
+4317856a1458baa427dc00e8ea505d2fc5f118ab
+eb3066de677f9f6131aab542d9d426aaf50ed2ce
+574b62c845809fd54cc168492424c5fac145bc83
+84c5b45328dee855c4855a104ac9c0558cc8a328
+7e2cfbfd43045fbd6aabd9a45090a5716fc4e179
+fb85867c989b9ee6b7899134136f81d6372526a9
+946017d5f11aa582854ac4c0e0f1b18b06127ef1
+050a149051a5d268fcc5539e8b654c2240070c82
+8334da483f1986aea87b62028672836cb3dc6205
+b2470969e4fba92f7909eac26b77d08cc5575533
+c678920facffd35853c9d185904f4aebcd2d8b49
+cdef0eaff4a3c168290d238999fc066ebc3a93e8
+fa641327dc5873276f0af453a2caa1634c16f143
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc
+ea03a569272d329090fe60d6bff8d119e18057d7
+cbb27980eb04f68d9f10067d3d3c114efa9d0054
+90498b95fe8b299ce65d5cafaef942aa58bd68b7
+92be73dffd3320fe7734258961fe5a5f2a43390e
+ac855f0de9086e9e170072cb37400637f0c9b735
+ef2bb8bd93fa8b44414565b32735334fa6823b56
+f20e0eefd007bc310d2a753ba526d33a8aba812c
+80097a879fceff2a9a955bf7613b0d3bfa68dc23
+1275852f2e78ed9afd189e8b845fdb5393413614
+82eff71af91df2ca18aebb7f1153a7aed16ae7cc
+0ee737085af468f264f57f052ea9b9b1f58d7222
+566563a02dbaebec07429046122426acd7039166
+a3201e955d6607d383332f3a12a7befa08c5a18c
+5babbad3daac5c26503088782fd5b62067b94fa5
+0e2ea7af369dbcaeb5e334b02dd9ba5271b10265
+de0eb358b890d92e8f67592c6e23f0e3b2ba3f66
+3c563542db664321aa77a9567c1601f425500f94
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae
+7d40e7e5c01bd551edf65902386401e1b8b8014b
+dbced84d839165d9b494982449aa2eb9109b8467
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17
+be4f18e25b06f430e2de0cc8fddcac8585b00beb
+ecd08edab496801fd4fde45362dde462d00ee91c
+6cce5ccc5d366996f5a32de17a403341db5fddc6
+1063be2ad265751fb958b396ee26167fa0e844d2
+101569eeef2cecc576578bd6500f1c2dcc0274e2
+5b721f86f4a394f05350641e639a9d6cb2046c45
+53de11d144cd2eda7cf1bb644ae27f8ef2489289
+84c0f814951b80c3b2e39caf3925b56a9b2e1733
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b
+651cafb2620ab60a0e4f550c080231f20ae6d26e
+b712f08f819b925ff7587b6c09a8855bc295d795
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290
+5161e38e4ea716dcfb554ccb88901b3d97778f64
+9a98dd6d6aaba05c9e46411ea263f74df908203d
+9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca
+f2d605985821597773bc6b956036bdbc5d307386
+ce032dae834f383125cdd852e7c1bc793d4c3ba3
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39
+656531036cee6b2c2c71954bb6540ef6b2e016d0
+63fd7a159e58add133b9c71c4b1b37b899dd646f
+3646b42511a6a0df5470408bc9a7a69bb3c5d742
+82eb267b8e86be0b444e841b4b4ed4814b6f1942
+d3faed04712b4634b47e1de0340070653546deb2
+7c47da191f935811f269f9ba3c59556c48282e80
+227b1a09b942eaf130d1d84cdcabf98921780a22
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265
+b1451721864e836069fa299a64595d1655793757
+cccd0edb5dafb3a160179a60f75fd8c835c0be82
+e7697c7b626ba3a426106d83f4c3a052fcde02a4
+66d087f3dd2e19ffe340c26ef17efe0062a59290
+def569db592ed1715ae509644444c3feda06a536
+a6590c49e44aa4975b2b0152ee21ac8af3097d80
+c847de9faa1f1a06d5647949a23f523f84aba7f3
+edde81b2bdd61bd757b71a7b3839b6fef81f4be4
+a29566375836f37173ccaffa47dea25eb1240187
+5fea59ccdab484873081eaa37af88e26e3db2aed
+94325522c9be8224970f810554611d6a73877c13
+4e32fbb58154e878dd2fd4b06398f85636fd0cf4
+61262450d4d814865a4f9a84299c24daa493f66e
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74
+f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a
+a3f69a073dcfb6da8038607a9f14eb28b5dab2db
diff --git a/scraper/reports/misc/raw_paper_doi.csv b/scraper/reports/misc/raw_paper_doi.csv
new file mode 100644
index 00000000..bd56e667
--- /dev/null
+++ b/scraper/reports/misc/raw_paper_doi.csv
@@ -0,0 +1,1067 @@
+610779e90b644cc18696d7ac7820d3e0598e24d0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7067419
+61262450d4d814865a4f9a84299c24daa493f66e,http://doi.org/10.1007/s10462-016-9474-x
+61971f8e6fff5b35faed610d02ad14ccfc186c70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373843
+61e2044184d86d0f13e50ecaa3da6a4913088c76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7572183
+61329bc767152f01aa502989abc854b53047e52c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450832
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc,http://doi.org/10.1007/11527923_7
+95289007f2f336e6636cf8f920225b8d47c6e94f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796
+95b5296f7ec70455b0cf1748cddeaa099284bfed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8443886
+95d858b39227edeaf75b7fad71f3dc081e415d16,http://doi.org/10.1007/s11042-017-5073-3
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,http://doi.org/10.1007/s11042-016-4261-x
+95288fa7ff4683e32fe021a78cbf7d3376e6e400,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014759
+598744c8620e4ecbf449d14d7081fbf1cd05851f,https://www.ncbi.nlm.nih.gov/pubmed/29731533
+59b83666c1031c3f509f063b9963c7ad9781ca23,http://dl.acm.org/citation.cfm?id=2830590
+592f14f4b12225fc691477a180a2a3226a5ef4f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789592
+9285f4a6a06e975bde3ae3267fccd971d4fff98a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099853
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6,http://doi.org/10.1038/s41598-017-07122-x
+92292fffc36336d63f4f77d6b8fc23b0c54090e9,http://doi.org/10.1016/j.jvcir.2015.03.001
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595,http://doi.org/10.1016/j.patcog.2013.03.010
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8335166
+660c99ac408b535bb0468ab3708d0d1d5db30180,http://doi.org/10.1007/s11042-015-3083-6
+66490b5869822b31d32af7108eaff193fbdb37b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857
+663efaa0671eace1100fdbdecacd94216a17b1db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619243
+3e3227c8e9f44593d2499f4d1302575c77977b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347112
+3e59d97d42f36fc96d33a5658951856a555e997b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163128
+3e9ab40e6e23f09d16c852b74d40264067ac6abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619307
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553717
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995354
+506ea19145838a035e7dba535519fb40a3a0018c,http://arxiv.org/abs/1806.08251
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,http://doi.org/10.1134/S1054661818030136
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771409
+68c1090f912b69b76437644dd16922909dd40d60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6987312
+5760d29574d78e79e8343b74e6e30b3555e48676,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8447743
+572dbaee6648eefa4c9de9b42551204b985ff863,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151
+5779e3e439c90d43648db107e848aeb954d3e347,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7927417
+5748652924084b7b0220cddcd28f6b2222004359,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7492255
+57178b36c21fd7f4529ac6748614bb3374714e91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217
+3b350afd8b82487aa97097170c269a25daa0c82d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8248664
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099900
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b,http://dl.acm.org/citation.cfm?id=3240893
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393012
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270392
+3bd10f7603c4f5a4737c5613722124787d0dd818,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949
+6f22628d34a486d73c6b46eb071200a00e3abae3,https://www.ncbi.nlm.nih.gov/pubmed/29994497
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,http://doi.org/10.1007/s00138-018-0943-x
+034b3f3bac663fb814336a69a9fd3514ca0082b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298991
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf,http://doi.org/10.1007/s00371-015-1158-z
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,http://dl.acm.org/citation.cfm?id=3123323
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316962
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354290
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7114333
+9efdb73c6833df57732b727c6aeac510cadb53fe,http://dl.acm.org/citation.cfm?id=3184071
+9e105c4a176465d14434fb3f5bae67f57ff5fba2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354230
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,http://doi.org/10.1007/s11042-018-5679-0
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771444
+0450dacc43171c6e623d0d5078600dd570de777e,http://doi.org/10.1007/s10339-016-0774-5
+6af75a8572965207c2b227ad35d5c61a5bd69f45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433687
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6,http://doi.org/10.1007/s10044-016-0531-5
+6a931e7b7475635f089dd33e8d9a2899ae963804,http://doi.org/10.1007/s00371-018-1561-3
+6a6406906470be10f6d6d94a32741ba370a1db68,http://doi.org/10.1007/s11042-016-4213-5
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408
+3266fcd1886e8ad883714e38203e66c0c6487f7b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7533149
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4527244
+32f62da99ec9f58dd93e3be667612abcf00df16a,http://doi.org/10.1007/s11042-017-5583-z
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1334680
+32e9c9520cf6acb55dde672b73760442b2f166f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7970176
+35208eda874591eac70286441d19785726578946,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789507
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7052327
+352a620f0b96a7e76b9195a7038d5eec257fd994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823
+69adf2f122ff18848ff85e8de3ee3b2bc495838e,http://arxiv.org/abs/1711.10678
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8444452
+695426275dee2ec56bc0c0afe1c5b4227a350840,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7878535
+696236fb6f986f6d5565abb01f402d09db68e5fa,http://doi.org/10.1007/s41095-018-0112-1
+6932baa348943507d992aba75402cfe8545a1a9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987
+6966d9d30fa9b7c01523425726ab417fd8428790,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291
+3cb057a24a8adba6fe964b5d461ba4e4af68af14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6701391
+3c09fb7fe1886072670e0c4dd632d052102a3733,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101020
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373845
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9,http://doi.org/10.1007/s00371-016-1323-z
+562f7555e5cb79ce0fe834c4613264d8378dd007,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7153112
+56fd4c05869e11e4935d48aa1d7abb96072ac242,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812
+566563a02dbaebec07429046122426acd7039166,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461618
+5632ba72b2652df3b648b2ee698233e76a4eee65,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387
+51b42da0706a1260430f27badcf9ee6694768b9b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471882
+51410d6bd9a41eacb105f15dbdaee520e050d646,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412888
+51d6a8a61ea9588a795b20353c97efccec73f5db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460308
+518a3ce2a290352afea22027b64bf3950bffc65a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174
+51dcb36a6c247189be4420562f19feb00c9487f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1394433
+519f1486f0755ef3c1f05700ea8a05f52f83387b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595846
+5167e16b53283be5587659ea8eaa3b8ef3fddd33,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813364
+51bb86dc8748088a198b216f7e97616634147388,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496
+3dce635ce4b55fb63fc6d41b38640403b152a048,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8402469
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389
+58217ae5423828ed5e1569bee93d491569d79970,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1578742
+587b8c147c6253878128ddacf6e5faf8272842a4,http://dl.acm.org/citation.cfm?id=2638549
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,http://doi.org/10.1007/s11042-017-4343-4
+67386772c289cd40db343bdc4cb8cb4f58271df2,http://doi.org/10.1038/s41598-017-10745-9
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699428
+67af3ec65f1dc535018f3671624e72c96a611c39,http://doi.org/10.1007/s11042-016-4058-y
+0b45aeb0aede5e0c19b508ede802bdfec668aefd,http://dl.acm.org/citation.cfm?id=1963206
+0ba5369c5e1e87ea172089d84a5610435c73de00,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347111
+0b82bf595e76898993ed4f4b2883c42720c0f277,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229
+93af335bf8c610f34ce0cadc15d1dd592debc706,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e,http://doi.org/10.1007/s11042-017-5062-6
+93dcea2419ca95b96a47e541748c46220d289d77,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014993
+93c0405b1f5432eab11cb5180229720604ffd030,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462228
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952499
+94806f0967931d376d1729c29702f3d3bb70167c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780581
+9436170c648c40b6f4cc3751fca3674aa82ffe9a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6811741
+947ee3452e4f3d657b16325c6b959f8b8768efad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952677
+604a281100784b4d5bc1a6db993d423abc5dc8f0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681
+60777fbca8bff210398ec8b1179bc4ecb72dfec0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751535
+60821d447e5b8a96dd9294a0514911e1141ff620,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813328
+605f6817018a572797095b83bec7fae7195b2abc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339020
+60462b981fda63c5f9d780528a37c46884fe0b54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397015
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7872382
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9,https://www.ncbi.nlm.nih.gov/pubmed/29069621
+346752e3ab96c93483413be4feaa024ccfe9499f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6960834
+34fd227f4fdbc7fe028cc1f7d92cb59204333718,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446331
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3,http://doi.org/10.1007/s13735-016-0117-4
+5a547df635a9a56ac224d556333d36ff68cbf088,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359041
+5fea59ccdab484873081eaa37af88e26e3db2aed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8263394
+5f2c210644c1e567435d78522258e0ae036deedb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4036602
+5fe3a9d54d5070308803dd8ef611594f59805400,http://doi.org/10.1016/j.patcog.2016.02.006
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853687
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762938
+335435a94f8fa9c128b9f278d929c9d0e45e2510,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849440
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675
+057b80e235b10799d03876ad25465208a4c64caf,http://dl.acm.org/citation.cfm?id=3123427
+0532cbcf616f27e5f6a4054f818d4992b99d201d,http://doi.org/10.1007/s11042-015-3042-2
+9d5bfaf6191484022a6731ce13ac1b866d21ad18,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139086
+9d24812d942e69f86279a26932df53c0a68c4111,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8417316
+9d46485ca2c562d5e295251530a99dd5df99b589,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813386
+9d3377313759dfdc1a702b341d8d8e4b1469460c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7342926
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1385984
+9c2f20ed168743071db6268480a966d5d238a7ee,http://dl.acm.org/citation.cfm?id=1456304
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536
+9c6dfd3a38374399d998d5a130ffc2864c37f554,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553738
+9c23859ec7313f2e756a3e85575735e0c52249f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788
+9ca542d744149f0efc8b8aac8289f5e38e6d200c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789587
+9c59bb28054eee783a40b467c82f38021c19ff3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311
+023decb4c56f2e97d345593e4f7b89b667a6763d,http://doi.org/10.1007/s10994-005-3561-6
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb,http://doi.org/10.1007/s11042-018-6146-7
+021e008282714eaefc0796303f521c9e4f199d7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354319
+a4898f55f12e6393b1c078803909ea715bf71730,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6957817
+a45e6172713a56736a2565ddea9cb8b1d94721cd,http://doi.org/10.1038/s41746-018-0035-3
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909426
+a3201e955d6607d383332f3a12a7befa08c5a18c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,http://doi.org/10.1007/s11042-016-4324-z
+b5f9180666924a3215ab0b1faf712e70b353444d,http://doi.org/10.1007/s11042-017-4661-6
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,http://doi.org/10.1007/s11263-017-1012-z
+b5747ecfa0f3be0adaad919d78763b1133c4d662,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397022
+b5f3b0f45cf7f462a9c463a941e34e102a029506,http://dl.acm.org/citation.cfm?id=3143004
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8358588
+b5fdd7778503f27c9d9bf77fab193b475fab6076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373891
+b598f7761b153ecb26e9d08d3c5817aac5b34b52,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4618852
+b55e70df03d9b80c91446a97957bc95772dcc45b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624
+b5f9306c3207ac12ac761e7d028c78b3009a219c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462692
+b2470969e4fba92f7909eac26b77d08cc5575533,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8326475
+d916602f694ebb9cf95d85e08dd53f653b6196c3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237607
+d9e66b877b277d73f8876f537206395e71f58269,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7225130
+d9deafd9d9e60657a7f34df5f494edff546c4fb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100124
+d9218c2bbc7449dbccac351f55675efd810535db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5699141
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55,http://doi.org/10.1016/j.jvcir.2015.11.002
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8,http://doi.org/10.1007/s11063-017-9578-6
+aca728cab26b95fbe04ec230b389878656d8af5b,http://doi.org/10.1007/978-981-10-8258-0
+acff2dc5d601887741002a78f8c0c35a799e6403,http://doi.org/10.1007/978-3-662-44654-6
+ac2e166c76c103f17fdea2b4ecb137200b8d4703,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5373798
+ac03849956ac470c41585d2ee34d8bb58bb3c764,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853690
+ad77056780328bdcc6b7a21bce4ddd49c49e2013,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398021
+ada063ce9a1ff230791c48b6afa29c401a9007f1,http://doi.org/10.1007/978-3-319-97909-0
+bb4f83458976755e9310b241a689c8d21b481238,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265393
+bb4be8e24d7b8ed56d81edec435b7b59bad96214,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7060677
+bb2f61a057bbf176e402d171d79df2635ccda9f6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296311
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373838
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f,http://dl.acm.org/citation.cfm?id=3164593
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,http://doi.org/10.1007/s10994-014-5463-y
+d790093cb85fc556c0089610026e0ec3466ab845,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4721612
+d77f18917a58e7d4598d31af4e7be2762d858370,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062
+d00e9a6339e34c613053d3b2c132fccbde547b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154
+d06bcb2d46342ee011e652990edf290a0876b502,http://arxiv.org/abs/1708.00980
+d066575b48b552a38e63095bb1f7b56cbb1fbea4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359888
+bed8feb11e8077df158e16bce064853cf217ba62,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6191360
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344632
+be7444c891caf295d162233bdae0e1c79791d566,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816
+bec0c33d330385d73a5b6a05ad642d6954a6d632,http://doi.org/10.1007/s11042-017-4491-6
+bef926d63512dbffcf1af59f72295ef497f5acf9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6990726
+be632b206f1cd38eab0c01c5f2004d1e8fc72880,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607601
+beb2f1a6f3f781443580ffec9161d9ce6852bf48,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424735
+beae35eb5b2c7f63dfa9115f07b5ba0319709951,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163096
+be4faea0971ef74096ec9800750648b7601dda65,http://doi.org/10.1007/s11063-017-9724-1
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,http://doi.org/10.1007/s11263-008-0143-7
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29,http://dl.acm.org/citation.cfm?id=3206041
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958,http://doi.org/10.1007/s10766-017-0552-8
+df767f62a6bf3b09e6417d801726f2d5d642a202,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699727
+df87193e15a19d5620f5a6458b05fee0cf03729f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363421
+df6e68db278bedf5486a80697dec6623958edba8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952696
+da7bbfa905d88834f8929cb69f41a1b683639f4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593
+dad6b36fd515bda801f3d22a462cc62348f6aad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531
+daca9d03c1c951ed518248de7f75ff51e5c272cb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6976977
+dac8fc521dfafb2d082faa4697f491eae00472c7,http://dl.acm.org/citation.cfm?id=3123423
+daa4cfde41d37b2ab497458e331556d13dd14d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477
+da23d90bacf246b75ef752a2cbb138c4fcd789b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406360
+dac34b590adddef2fc31f26e2aeb0059115d07a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078
+b484141b99d3478a12b8a6854864c4b875d289b8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117595
+b41d585246360646c677a8238ec35e8605b083b0,http://doi.org/10.1007/s11042-018-6017-2
+b40c001b3e304dccb28c745bd54aa281c8ff1f29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361072
+a2e0966f303f38b58b898d388d1c83e40b605262,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125
+a2b4a6c6b32900a066d0257ae6d4526db872afe2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466
+a20036b7fbf6c0db454c8711e72d78f145560dc8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761890
+a26fd9df58bb76d6c7a3254820143b3da5bd584b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446759
+a5acda0e8c0937bfed013e6382da127103e41395,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672
+a532cfc69259254192aee3fc5be614d9197e7824,http://doi.org/10.1016/j.patcog.2016.12.028
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7350093
+a5f35880477ae82902c620245e258cf854c09be9,http://doi.org/10.1016/j.imavis.2013.12.004
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a,http://doi.org/10.1007/s11063-017-9649-8
+bddc822cf20b31d8f714925bec192c39294184f7,http://doi.org/10.1134/S1054661807040190
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62,http://arxiv.org/abs/1412.0767
+bd8d579715d58405dfd5a77f32920aafe018fce4,http://doi.org/10.1016/j.imavis.2008.08.005
+d141c31e3f261d7d5214f07886c1a29ac734d6fc,http://doi.org/10.1007/s11063-018-9812-x
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613000
+d10cfcf206b0991e3bc20ac28df1f61c63516f30,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,http://doi.org/10.1007/s13735-016-0112-9
+d116bac3b6ad77084c12bea557d42ed4c9d78433,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471886
+d1079444ceddb1de316983f371ecd1db7a0c2f38,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460478
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35,http://dl.acm.org/citation.cfm?id=31310887
+d6e08345ba293565086cb282ba08b225326022fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7490397
+d62d82c312c40437bc4c1c91caedac2ba5beb292,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461322
+bc607bee2002c6c6bf694a15efd0a5d049767237,http://doi.org/10.1007/s11042-017-4364-z
+bc9bad25f8149318314971d8b8c170064e220ea8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078542
+bc08dfa22949fbe54e15b1a6379afade71835968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899970
+bc36badb6606b8162d821a227dda09a94aac537f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337442
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466467
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373805
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644
+aef58a54d458ab76f62c9b6de61af4f475e0f616,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706790
+aee3427d0814d8a398fd31f4f46941e9e5488d83,http://dl.acm.org/citation.cfm?id=1924573
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,http://doi.org/10.1007/s11042-018-6047-9
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5598629
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8439631
+ab0981d1da654f37620ca39c6b42de21d7eb58eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8016651
+ab80582807506c0f840bd1ba03a8b84f8ac72f79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462326
+ab6886252aea103b3d974462f589b4886ef2735a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4371439
+e5ea7295b89ef679e74919bf957f58d55ad49489,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401948
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099713
+e52f57a7de675d14aed28e5d0f2f3c5a01715337,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319987
+e57014b4106dd1355e69a0f60bb533615a705606,http://doi.org/10.1007/s13748-018-0143-y
+e295c1aa47422eb35123053038e62e9aa50a2e3a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389
+e287ff7997297ce1197359ed0fb2a0bd381638c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7795253
+e2faaebd17d10e2919bd69492787e7565546a63f,http://doi.org/10.1007/s11042-017-4514-3
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952626
+f472cb8380a41c540cfea32ebb4575da241c0288,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284869
+f4ba07d2ae6c9673502daf50ee751a5e9262848f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a,https://www.ncbi.nlm.nih.gov/pubmed/24314504
+f4b5a8f6462a68e79d643648c780efe588e4b6ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995700
+f39783847499dd56ba39c1f3b567f64dfdfa8527,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791189
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b,http://doi.org/10.1016/j.patrec.2013.03.022
+f374ac9307be5f25145b44931f5a53b388a77e49,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354277
+f3553148e322f4f64545d6667dfbc7607c82703a,http://doi.org/10.1007/s00138-016-0763-9
+f33bd953d2df0a5305fc8a93a37ff754459a906c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961800
+ebbceab4e15bf641f74e335b70c6c4490a043961,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a,http://doi.org/10.1111/cgf.13218
+eba4cfd76f99159ccc0a65cab0a02db42b548d85,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751379
+ebde9b9c714ed326157f41add8c781f826c1d864,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014758
+eb3066de677f9f6131aab542d9d426aaf50ed2ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373860
+eb8a3948c4be0d23eb7326d27f2271be893b3409,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2,http://doi.org/10.1117/12.650555
+c7745f941532b7d6fa70db09e81eb1167f70f8a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1640757
+c05ae45c262b270df1e99a32efa35036aae8d950,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354120
+c07ab025d9e3c885ad5386e6f000543efe091c4b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909629
+c0f67e850176bb778b6c048d81c3d7e4d8c41003,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296441
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014982
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6998872
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18,https://www.sciencedirect.com/science/article/pii/S0197458017301859
+ee1f9637f372d2eccc447461ef834a9859011ec1,http://doi.org/10.1007/s11042-016-3950-9
+ee56823f2f00c8c773e4ebc725ca57d2f9242947,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7110235
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368755
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4453844
+c9b958c2494b7ba08b5b460f19a06814dba8aee0,https://www.ncbi.nlm.nih.gov/pubmed/30080142
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237606
+c997744db532767ee757197491d8ac28d10f1c0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364339
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a,http://dl.acm.org/citation.cfm?id=2984060
+c900e0ad4c95948baaf0acd8449fde26f9b4952a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,http://doi.org/10.1007/978-3-319-11071-4
+c98b13871a3bc767df0bdd51ff00c5254ede8b22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909913
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7045492
+fcc6fe6007c322641796cb8792718641856a22a7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994
+fc8fb68a7e3b79c37108588671c0e1abf374f501,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565615
+fcf393a90190e376b617cc02e4a473106684d066,http://doi.org/10.1007/s10044-015-0507-x
+fcceea054cb59f1409dda181198ed4070ed762c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8388318
+fc7f140fcedfe54dd63769268a36ff3f175662b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8013122
+fd9ab411dc6258763c95b7741e3d51adf5504040,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595808
+fd809ee36fa6832dda57a0a2403b4b52c207549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409768
+fde611bf25a89fe11e077692070f89dcdede043a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7322904
+fd5376fcb09001a3acccc03159e8ff5801129683,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373899
+f2902f5956d7e2dca536d9131d4334f85f52f783,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191
+f2d605985821597773bc6b956036bdbc5d307386,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8027090
+f2896dd2701fbb3564492a12c64f11a5ad456a67,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237686
+f27e5a13c1c424504b63a9084c50f491c1b17978,http://dl.acm.org/citation.cfm?id=3097991
+f2eab39cf68de880ee7264b454044a55098e8163,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5539989
+f2d5bb329c09a5867045721112a7dad82ca757a3,http://doi.org/10.1007/s11042-015-3009-3
+f201baf618574108bcee50e9a8b65f5174d832ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057
+f5c57979ec3d8baa6f934242965350865c0121bd,http://doi.org/10.1007/s12539-018-0281-8
+f5603ceaebe3caf6a812edef9c4b38def78cbf34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4455998
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099824
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907
+e3b9863e583171ac9ae7b485f88e503852c747b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7494596
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950
+cf736f596bf881ca97ec4b29776baaa493b9d50e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba,http://arxiv.org/abs/1503.01521
+ca096e158912080493a898b0b8a4bd2902674fed,http://dl.acm.org/citation.cfm?id=3264899
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,http://doi.org/10.1007/s11042-018-5945-1
+ca44a838da4187617dca9f6249d8c4b604661ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7351564
+e4754afaa15b1b53e70743880484b8d0736990ff,http://doi.org/10.1016/j.imavis.2016.01.002
+e40cb4369c6402ae53c81ce52b73df3ef89f578b,http://doi.org/10.1016/j.image.2015.01.009
+e45a556df61e2357a8f422bdf864b7a5ed3b8627,http://doi.org/10.1016/j.image.2017.08.001
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8023876
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407641
+fe866887d3c26ee72590c440ed86ffc80e980293,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011
+fe50efe9e282c63941ec23eb9b8c7510b6283228,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7314574
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,http://dl.acm.org/citation.cfm?id=3126693
+fecccc79548001ecbd6cafd3067bcf14de80b11a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354157
+c847de9faa1f1a06d5647949a23f523f84aba7f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199761
+c8585c95215bc53e28edb740678b3a0460ca8aa4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373829
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7090979
+c83e26622b275fdf878135e71c23325a31d0e5fc,http://dl.acm.org/citation.cfm?id=3164611
+c808c784237f167c78a87cc5a9d48152579c27a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437
+c858c74d30c02be2d992f82a821b925669bfca13,http://doi.org/10.1007/978-3-319-10605-2
+c843f591658ca9dbb77944a89372a92006defe68,http://doi.org/10.1007/s11042-015-2550-4
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad,http://doi.org/10.1007/s11390-018-1835-2
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463262
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf,http://dl.acm.org/citation.cfm?id=2820910
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb,https://www.ncbi.nlm.nih.gov/pubmed/30040629
+fbc9ba70e36768efff130c7d970ce52810b044ff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef,https://www.ncbi.nlm.nih.gov/pubmed/29994550
+edfce091688bc88389dd4877950bd58e00ff1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700
+ed32df6b122b15a52238777c9993ed31107b4bed,http://doi.org/10.1016/j.eswa.2017.03.008
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8241843
+ede5982980aa76deae8f9dc5143a724299d67742,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081396
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6802347
+edd6ed94207ab614c71ac0591d304a708d708e7b,http://doi.org/10.1016/j.neucom.2012.02.001
+edf60d081ffdfa80243217a50a411ab5407c961d,http://doi.org/10.1007/s11263-016-0893-6
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343
+c15b68986ecfa1e13e3791686ae9024f66983f14,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014747
+c12260540ec14910f5ec6e38d95bdb606826b32e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6196236
+c175ebe550761b18bac24d394d85bdfaf3b7718c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301582
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793,https://www.ncbi.nlm.nih.gov/pubmed/19167865
+c61eaf172820fcafaabf39005bd4536f0c45f995,http://doi.org/10.1007/978-3-319-58771-4_1
+c6382de52636705be5898017f2f8ed7c70d7ae96,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089
+c631a31be2c793d398175ceef7daff1848bb6408,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466318
+c61a8940d66eed9850b35dd3768f18b59471ca34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1374768
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301635
+ec576efd18203bcb8273539fa277839ec92232a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7994601
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771439
+ec1bec7344d07417fb04e509a9d3198da850349f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342699
+ec983394f800da971d243f4143ab7f8421aa967c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8340635
+ecd08edab496801fd4fde45362dde462d00ee91c,https://www.ncbi.nlm.nih.gov/pubmed/29994561
+ec5c63609cf56496715b0eba0e906de3231ad6d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651
+ec00ecb64fa206cea8b2e716955a738a96424084,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265512
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb,http://doi.org/10.1016/j.patcog.2015.03.011
+4e061a302816f5890a621eb278c6efa6e37d7e2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909638
+4e43408a59852c1bbaa11596a5da3e42034d9380,http://doi.org/10.1007/s11042-018-6040-3
+4ed6c7740ba93d75345397ef043f35c0562fb0fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117516
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,http://doi.org/10.1007/s11227-018-2408-4
+4e37cd250130c6fd60e066f0c8efb3cbb778c421,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8419742
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,http://doi.org/10.1007/s10055-018-0357-0
+20d6a4aaf5abf2925fdce2780e38ab1771209f76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446795
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30,http://doi.org/10.1007/s00138-018-0956-5
+18855be5e7a60269c0652e9567484ce5b9617caa,http://doi.org/10.1007/s11042-017-4579-z
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195
+18010284894ed0edcca74e5bf768ee2e15ef7841,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7902214
+188abc5bad3a3663d042ce98c7a7327e5a1ae298,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6152129
+180bd019eab85bbf01d9cddc837242e111825750,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239690
+270acff7916589a6cc9ca915b0012ffcb75d4899,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659
+27b451abfe321a696c852215bb7efb4c2e50c89f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7898447
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,http://doi.org/10.1007/s11042-017-5571-3
+2724ba85ec4a66de18da33925e537f3902f21249,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8172386
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306
+4b9ec224949c79a980a5a66664d0ac6233c3d575,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501
+4bf85ef995c684b841d0a5a002d175fadd922ff0,http://dl.acm.org/citation.cfm?id=3199668
+4b936847f39094d6cb0bde68cea654d948c4735d,http://doi.org/10.1007/s11042-016-3470-7
+11bb2abe0ca614c15701961428eb2f260e3e2eef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343867
+113b06e70b7eead8ae7450bafe9c91656705024c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373864
+11df25b4e074b7610ec304a8733fa47625d9faca,http://doi.org/10.1016/j.patrec.2012.09.024
+7d18e9165312cf669b799aa1b883c6bbe95bf40e,http://doi.org/10.1007/s11042-016-3492-1
+7d45f1878d8048f6b3de5b3ec912c49742d5e968,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7747479
+7d40e7e5c01bd551edf65902386401e1b8b8014b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876
+29db16efc3b378c50511f743e5197a4c0b9e902f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401
+2961e14c327341d22d5f266a6872aa174add8ac4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6654170
+2983cf95743be82671a71528004036bd19172712,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7915734
+29a5d38390857e234c111f8bb787724c08f39110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813387
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301520
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6218178
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,http://doi.org/10.1007/s11263-016-0967-5
+7c11fa4fd91cb57e6e216117febcdd748e595760,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0,http://doi.org/10.1007/s11263-016-0920-7
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,http://doi.org/10.1007/978-3-030-00470-5
+7cfbf90368553333b47731729e0e358479c25340,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7346480
+7c66e7f357553fd4b362d00ff377bffb9197410e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961231
+7c6686fa4d8c990e931f1d16deabf647bf3b1986,http://arxiv.org/abs/1504.07550
+166ef5d3fd96d99caeabe928eba291c082ec75a0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237597
+16fadde3e68bba301f9829b3f99157191106bd0f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,http://doi.org/10.1007/s11704-017-6613-8
+895081d6a5545ad6385bfc6fcf460fc0b13bac86,http://doi.org/10.1016/S0167-8655%2899%2900134-8
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7888593
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69,http://doi.org/10.1162/jocn_a_00645
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397536
+1fe1a78c941e03abe942498249c041b2703fd3d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393355
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8385089
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100195
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,http://dl.acm.org/citation.cfm?id=3213539
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373833
+73ba33e933e834b815f62a50aa1a0e15c6547e83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368754
+7361b900018f22e37499443643be1ff9d20edfd6,http://doi.org/10.1049/iet-bmt.2016.0169
+73d53a7c27716ae9a6d3484e78883545e53117ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8371978
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499,http://doi.org/10.1016/j.csi.2015.06.004
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198
+73dcb4c452badb3ee39a2f222298b234d08c21eb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6779478
+87610276ccbc12d0912b23fd493019f06256f94e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706757
+87b607b8d4858a16731144d17f457a54e488f15d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532
+80d4cf7747abfae96328183dd1f84133023c2668,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369786
+80ed678ef28ccc1b942e197e0393229cd99d55c8,http://doi.org/10.1007/s10044-015-0456-4
+809e5884cf26b71dc7abc56ac0bad40fb29c671c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6247842
+7477cf04c6b086108f459f693a60272523c134db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618937
+746c0205fdf191a737df7af000eaec9409ede73f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119
+1aa61dd85d3a5a2fe819cba21192ec4471c08628,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359518
+1a53ca294bbe5923c46a339955e8207907e9c8c6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870
+1a81c722727299e45af289d905d7dcf157174248,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995466
+286a5c19a43382a21c8d96d847b52bba6b715a71,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6876188
+289cfcd081c4393c7d6f63510747b5372202f855,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873
+28e1982d20b6eff33989abbef3e9e74400dbf508,http://doi.org/10.1007/s11042-015-3007-5
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402
+28f1542c63f5949ee6f2d51a6422244192b5a900,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780475
+176e6ba56e04c98e1997ffdef964ece90fd827b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8322125
+179564f157a96787b1b3380a9f79701e3394013d,http://dl.acm.org/citation.cfm?id=2493502
+1773d65c1dc566fd6128db65e907ac91b4583bed,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8328914
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13,http://doi.org/10.1007/s11063-017-9693-4
+8f71c97206a03c366ddefaa6812f865ac6df87e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342943
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96,http://doi.org/10.1007/s11042-017-5245-1
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771329
+8fb2ec3bbd862f680be05ef348b595e142463524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699880
+8a8127a06f432982bfb0150df3212f379b36840b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373884
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7539153
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,http://doi.org/10.1007/s11042-015-2945-2
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,http://dl.acm.org/citation.cfm?id=3123271
+8a6033cbba8598945bfadd2dd04023c2a9f31681,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014991
+8a63a2b10068b6a917e249fdc73173f5fd918db0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8120021
+8a4893d825db22f398b81d6a82ad2560832cd890,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5349489
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832,http://dl.acm.org/citation.cfm?id=3078988
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4284739
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1275543
+7ee7b0602ef517b445316ca8aa525e28ea79307e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418530
+7e8c8b1d72c67e2e241184448715a8d4bd88a727,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314
+7e2f7c0eeaeb47b163a7258665324643669919e8,http://doi.org/10.1007/s11042-018-5801-3
+7e27d946d23229220bcb6672aacab88e09516d39,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900131
+7ec431e36919e29524eceb1431d3e1202637cf19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8365242
+10cb39e93fac194220237f15dae084136fdc6740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457972
+10bfa4cecd64b9584c901075d6b50f4fad898d0b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728013
+10e4172dd4f4a633f10762fc5d4755e61d52dc36,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100146
+1025c4922491745534d5d4e8c6e74ba2dc57b138,http://doi.org/10.1007/s11263-017-1014-x
+1063be2ad265751fb958b396ee26167fa0e844d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369056
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8000333
+193474d008cab9fa1c1fa81ce094d415f00b075c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466415
+196c12571ab51273f44ea3469d16301d5b8d2828,http://doi.org/10.1007/s00371-018-1494-x
+19b492d426f092d80825edba3b02e354c312295f,http://doi.org/10.1007/s00371-016-1332-y
+1951dc9dd4601168ab5acf4c14043b124a8e2f67,http://doi.org/10.1162/neco_a_01116
+193bc8b663d041bc34134a8407adc3e546daa9cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373908
+4c72a51a7c7288e6e17dfefe4f87df47929608e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736912
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d,http://doi.org/10.1007/s11042-016-3830-3
+4ca9753ab023accbfa75a547a65344ee17b549ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5457710
+4cfe921ac4650470b0473fd52a2b801f4494ee64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6467429
+4c0cc732314ba3ccccd9036e019b1cfc27850c17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854473
+263ed62f94ea615c747c00ebbb4008385285b33b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319974
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,http://doi.org/10.1007/s10044-017-0633-8
+265a88a8805f6ba3efae3fcc93d810be1ea68866,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342346
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411910
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70,http://doi.org/10.1142/S0218001415560078
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7358748
+21b5af67618fcc047b495d2d5d7c2bf145753633,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771442
+21959bc56a160ebd450606867dce1462a913afab,http://doi.org/10.1007/s11042-018-6071-9
+214072c84378802a0a0fde0b93ffb17bc04f3759,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301397
+4d90d7834ae25ee6176c096d5d6608555766c0b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,http://doi.org/10.1007/s00371-018-1477-y
+4d19401e44848fe65b721971bc71a9250870ed5f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462612
+4db99a2268a120c7af636387241188064ea42338,https://www.ncbi.nlm.nih.gov/pubmed/21820862
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163152
+75a74a74d6abbbb302a99de3225c8870fa149aee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914657
+758d481bbf24d12615b751fd9ec121500a648bce,http://doi.org/10.1007/s11042-015-2914-9
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597559
+81513764b73dae486a9d2df28269c7db75e9beb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7839217
+8127b7654d6e5c46caaf2404270b74c6b0967e19,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813406
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3,http://doi.org/10.1016/j.patcog.2015.08.026
+81f101cea3c451754506bf1c7edf80a661fa4dd1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163081
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725
+863ad2838b9b90d4461995f498a39bcd2fb87c73,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265580
+8633732d9f787f8497c2696309c7d70176995c15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967
+8694cd9748fb1c128f91a572119978075fede848,http://doi.org/10.1016/j.neucom.2017.08.028
+720763bcb5e0507f13a8a319018676eb24270ff0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5202783
+72167c9e4e03e78152f6df44c782571c3058050e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771464
+443f4421e44d4f374c265e6f2551bf9830de5597,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771467
+44855e53801d09763c1fb5f90ab73e5c3758a728,http://doi.org/10.1007/s11263-017-1018-6
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342368
+44d93039eec244083ac7c46577b9446b3a071f3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571
+2a826273e856939b58be8779d2136bffa0dddb08,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373892
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8432363
+2a7058a720fa9da4b9b607ea00bfdb63652dff95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590031
+2a612a7037646276ff98141d3e7abbc9c91fccb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909615
+2a2df7e790737a026434187f9605c4763ff71292,http://doi.org/10.1007/s11042-017-4665-2
+2f1485994ef2c09a7bb2874eb8252be8fe710db1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780700
+2f67d5448b5372f639633d8d29aac9c0295b4d72,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460923
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7801496
+2f837ff8b134b785ee185a9c24e1f82b4e54df04,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5739539
+2f73203fd71b755a9601d00fc202bbbd0a595110,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868
+43fce0c6b11eb50f597aa573611ac6dc47e088d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8465617
+43dce79cf815b5c7068b1678f6200dabf8f5de31,http://arxiv.org/abs/1709.03196
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,http://dl.acm.org/citation.cfm?id=3190784
+437642cfc8c34e445ea653929e2d183aaaeeb704,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815
+4317856a1458baa427dc00e8ea505d2fc5f118ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296449
+4342a2b63c9c344d78cf153600cd918a5fecad59,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237671
+88535dba55b0a80975df179d31a6cc80cae1cc92,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355366
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255
+88e2efab01e883e037a416c63a03075d66625c26,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637
+9f1a854d574d0bd14786c41247db272be6062581,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8360155
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4426825
+9f131b4e036208f2402182a1af2a59e3c5d7dd44,http://dl.acm.org/citation.cfm?id=3206038
+9f2984081ef88c20d43b29788fdf732ceabd5d6a,http://arxiv.org/abs/1806.01547
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534
+9f43caad22803332400f498ca4dd0429fe7da0aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6239186
+6baaa8b763cc5553715766e7fbe7abb235fae33c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789589
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8246530
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100204
+6b742055a664bcbd1c6a85ae6796bd15bc945367,http://doi.org/10.1007/s00138-006-0052-0
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08,http://doi.org/10.1007/s11042-015-3120-5
+071ec4f3fb4bfe6ae9980477d208a7b12691710e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6552193
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4,http://doi.org/10.1016/j.asoc.2018.03.030
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3,http://dl.acm.org/citation.cfm?id=3209659
+6e46d8aa63db3285417c8ebb65340b5045ca106f,http://dl.acm.org/citation.cfm?id=3183751
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265376
+9a98dd6d6aaba05c9e46411ea263f74df908203d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7859405
+9a59abdf3460970de53e09cb397f47d86744f472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995399
+9aab33ce8d6786b3b77900a9b25f5f4577cea461,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739
+9ac2960f646a46b701963230e6949abd9ac0a9b3,http://doi.org/10.1162/jocn_a_01174
+361eaef45fccfffd5b7df12fba902490a7d24a8d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319
+09903df21a38e069273b80e94c8c29324963a832,http://doi.org/10.1007/s11042-017-4980-7
+098363b29eef1471c494382338687f2fe98f6e15,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411212
+099053f2cbfa06c0141371b9f34e26970e316426,http://doi.org/10.1007/s11042-016-4079-6
+5dafab3c936763294257af73baf9fb3bb1696654,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5514556
+5d9971c6a9d5c56463ea186850b16f8969a58e67,http://doi.org/10.1007/s11042-017-5354-x
+5da827fe558fb2e1124dcc84ef08311241761726,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908,http://arxiv.org/abs/1711.08238
+5de9670f72d10682bf2cb3156988346257e0489f,http://doi.org/10.1016/j.inffus.2015.12.004
+5d2e5833ca713f95adcf4267148ac2ccf2318539,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6121744
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7292416
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776921
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337733
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336
+31d51e48dbd9e7253eafe0719f3788adb564a971,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410588
+3157be811685c93d0cef7fa4c489efea581f9b8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411222
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354285
+914d7527678b514e3ee9551655f55ffbd3f0eb0a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404350
+91e17338a12b5e570907e816bff296b13177971e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,http://doi.org/10.1007/s41095-016-0068-y
+657e702326a1cbc561e059476e9be4d417c37795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343704
+651cafb2620ab60a0e4f550c080231f20ae6d26e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717
+6584c3c877400e1689a11ef70133daa86a238602,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4427488
+62fddae74c553ac9e34f511a2957b1614eb4f937,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406684
+62750d78e819d745b9200b0c5c35fcae6fb9f404,http://doi.org/10.1007/s11042-016-4085-8
+62f017907e19766c76887209d01d4307be0cc573,http://doi.org/10.1016/j.imavis.2012.02.001
+969626c52d30ea803064ddef8fb4613fa73ba11d,http://doi.org/10.1007/BF02683992
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2,http://doi.org/10.1007/s11042-018-5658-5
+9649a19b49607459cef32f43db4f6e6727080bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395207
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6636646
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c,http://dl.acm.org/citation.cfm?id=3184081
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c,http://dl.acm.org/citation.cfm?id=3025149
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3,http://doi.org/10.1007/s11042-017-5319-0
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7591180
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,http://doi.org/10.1007/s11042-016-4321-2
+54ba18952fe36c9be9f2ab11faecd43d123b389b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334
+982fcead58be419e4f34df6e806204674a4bc579,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012
+9888edfb6276887eb56a6da7fe561e508e72a517,http://dl.acm.org/citation.cfm?id=3243904
+984edce0b961418d81203ec477b9bfa5a8197ba3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732
+98d1b5515b079492c8e7f0f9688df7d42d96da8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204260
+9806d3dc7805dd8c9c20d7222c915fc4beee7099,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755972
+98e098ba9ff98fc58f22fed6d3d8540116284b91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8332532
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122,http://doi.org/10.1007/s11704-016-6066-5
+53507e2de66eaba996f14fd2f54a5535056f1e59,http://doi.org/10.1016/j.sigpro.2017.10.024
+53de11d144cd2eda7cf1bb644ae27f8ef2489289,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424637
+535cdce8264ac0813d5bb8b19ceafa77a1674adf,http://doi.org/10.1007/s12559-016-9402-z
+53f5cb365806c57811319a42659c9f68b879454a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8356995
+3ff79cf6df1937949cc9bc522041a9a39d314d83,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880
+30c93fec078b98453a71f9f21fbc9512ab3e916f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395274
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392250
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4,http://dl.acm.org/citation.cfm?id=3206048
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8017477
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163116
+5e9ec3b8daa95d45138e30c07321e386590f8ec7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6967830
+5b5b9c6c67855ede21a60c834aea5379df7d51b7,http://hdl.handle.net/10044/1/45280
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019,http://doi.org/10.1016/j.neucom.2017.07.014
+5b5b568a0ba63d00e16a263051c73e09ab83e245,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8416840
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,http://doi.org/10.1007/978-3-319-75420-8
+37866fea39deeff453802cde529dd9d32e0205a5,http://dl.acm.org/citation.cfm?id=2393385
+3779e0599481f11fc1acee60d5108d63e55819b3,http://doi.org/10.1007/s11280-018-0581-2
+0831794eddcbac1f601dcb9be9d45531a56dbf7e,http://doi.org/10.1007/s11042-017-4416-4
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,http://doi.org/10.1007/s10851-017-0771-z
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a,http://doi.org/10.1007/s11063-017-9715-2
+6dcf6b028a6042a9904628a3395520995b1d0ef9,http://dl.acm.org/citation.cfm?id=3158392
+6dcf418c778f528b5792104760f1fbfe90c6dd6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c,http://doi.org/10.1007/s11263-017-1029-3
+6da711d07b63c9f24d143ca3991070736baeb412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7000295
+6d70344ae6f6108144a15e9debc7b0be4e3335f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8390318
+013305c13cfabaea82c218b841dbe71e108d2b97,http://doi.org/10.1007/s11063-016-9554-6
+017e94ad51c9be864b98c9b75582753ce6ee134f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846,http://doi.org/10.1007/s11263-017-0996-8
+0141cb33c822e87e93b0c1bad0a09db49b3ad470,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876
+0647c9d56cf11215894d57d677997826b22f6a13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557
+06518858bd99cddf9bc9200fac5311fc29ac33b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777
+06ab24721d7117974a6039eb2e57d1545eee5e46,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373809
+06b4e41185734f70ce432fdb2b121a7eb01140af,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362753
+6c1227659878e867a01888eef472dd96b679adb6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354280
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7,http://dl.acm.org/citation.cfm?id=3234805
+6cb8c52bb421ce04898fa42cb997c04097ddd328,http://doi.org/10.1007/978-3-319-11289-3
+6c01b349edb2d33530e8bb07ba338f009663a9dd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5332299
+6cce5ccc5d366996f5a32de17a403341db5fddc6,http://doi.org/10.1016/j.cviu.2016.04.012
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49,http://dl.acm.org/citation.cfm?id=2501650
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553718
+6cbde27d9a287ae926979dbb18dfef61cf49860e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8253589
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899896
+397257783ccc8cace5b67cc71e0c73034d559a4f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6918513
+398e0771e64cab6ca5d21754e32dce63f9e3c223,http://dl.acm.org/citation.cfm?id=3206028
+39af06d29a74ad371a1846259e01c14b5343e3d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8046026
+39d6f8b791995dc5989f817373391189d7ac478a,http://doi.org/10.1016/j.patrec.2015.09.015
+9944c451b4a487940d3fd8819080fe16d627892d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612967
+9939498315777b40bed9150d8940fc1ac340e8ba,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2,http://dl.acm.org/citation.cfm?id=2967199
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a,http://dl.acm.org/citation.cfm?id=3268909
+9989ad33b64accea8042e386ff3f1216386ba7f1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320
+9961f1e5cf8fda29912344773bc75c47f18333a0,http://doi.org/10.1007/s10044-017-0618-7
+521aa8dcd66428b07728b91722cc8f2b5a73944b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367126
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7,http://doi.org/10.1007/s00371-018-1585-8
+525da67fb524d46f2afa89478cd482a68be8a42b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354128
+522a4ca705c06a0436bbe62f46efe24d67a82422,http://doi.org/10.1007/s11042-017-5475-2
+55432723c728a2ce90d817e9e9877ae9fbad6fe5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412925
+55cfc3c08000f9d21879582c6296f2a864b657e8,http://doi.org/10.1049/iet-cvi.2015.0287
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7,http://dl.acm.org/citation.cfm?id=2926713
+552122432b92129d7e7059ef40dc5f6045f422b5,http://doi.org/10.1007/s11263-017-1000-3
+55aafdef9d9798611ade1a387d1e4689f2975e51,http://doi.org/10.1007/s11263-017-1044-4
+55c4efc082a8410b528af7325de8148b80cf41e3,http://dl.acm.org/citation.cfm?id=3231899
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231,http://arxiv.org/abs/1603.02814
+97b5800e144a8df48f1f7e91383b0f37bc37cf60,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237657
+972e044f69443dfc5c987e29250b2b88a6d2f986,http://doi.org/10.1134/S1054661811020738
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4,http://doi.org/10.1007/s11042-017-5141-8
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0,http://doi.org/10.1007/978-3-319-99978-4
+97c1f68fb7162af326cd0f1bc546908218ec5da6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471977
+63fd7a159e58add133b9c71c4b1b37b899dd646f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6603332
+6318d3842b36362bb45527b717e1a45ae46151d5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780708
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308
+6359fcb0b4546979c54818df8271debc0d653257,http://doi.org/10.1007/s11704-017-6275-6
+633c851ebf625ad7abdda2324e9de093cf623141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727
+6316a4b689706b0f01b40f9a3cef47b92bc52411,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699534
+0f7e9199dad3237159e985e430dd2bf619ef2db5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883882
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,http://doi.org/10.1007/s11042-016-4105-8
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,http://dl.acm.org/citation.cfm?id=3173789
+642a386c451e94d9c44134e03052219a7512b9de,http://doi.org/10.1016/j.imavis.2008.04.018
+640e12837241d52d04379d3649d050ee3760048c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5692624
+64ec02e1056de4b400f9547ce56e69ba8393e2ca,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446491
+645f09f4bc2e6a13663564ee9032ca16e35fc52d,http://dl.acm.org/citation.cfm?id=3193542
+9057044c0347fb9798a9b552910a9aff150385db,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6778411
+9077365c9486e54e251dd0b6f6edaeda30ae52b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373910
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c,https://www.sciencedirect.com/science/article/pii/S0042698914002739
+90221884fe2643b80203991686af78a9da0f9791,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995467
+bfdafe932f93b01632a5ba590627f0d41034705d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6134770
+bf3bf5400b617fef2825eb987eb496fea99804b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461385
+bf37a81d572bb154581845b65a766fab1e5c7dda,http://doi.org/10.1007/s11760-017-1111-x
+d34f546e61eccbac2450ca7490f558e751e13ec3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461800
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea,http://dl.acm.org/citation.cfm?id=2806218
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728015
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866
+d36a1e4637618304c2093f72702dcdcc4dcd41d1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961791
+d383ba7bbf8b7b49dcef9f8abab47521966546bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995471
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619296
+d340a135a55ecf7506010e153d5f23155dcfa7e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7884781
+d4f0960c6587379ad7df7928c256776e25952c60,https://www.ncbi.nlm.nih.gov/pubmed/29107889
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,http://doi.org/10.1007/s11042-016-3361-y
+d4288daef6519f6852f59ac6b85e21b8910f2207,https://www.ncbi.nlm.nih.gov/pubmed/29994505
+d4b4020e289c095ce2c2941685c6cd37667f5cc9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7489442
+d4df31006798ee091b86e091a7bf5dce6e51ba3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1612996
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265336
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda,http://doi.org/10.1007/s00530-017-0566-5
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344635
+ba1c0600d3bdb8ed9d439e8aa736a96214156284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081394
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363515
+baafe3253702955c6904f0b233e661b47aa067e1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776926
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5509290
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163111
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892186
+a03448488950ee5bf50e9e1d744129fbba066c50,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367180
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699580
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357
+a78b5495a4223b9784cc53670cc10b6f0beefd32,http://doi.org/10.1007/s11042-018-6260-6
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411214
+b8048a7661bdb73d3613fde9d710bd45a20d13e7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8468792
+b85c198ce09ffc4037582a544c7ffb6ebaeff198,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100113
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6894202
+b8d8501595f38974e001a66752dc7098db13dfec,http://arxiv.org/abs/1711.09265
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7995928
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699097
+b161d261fabb507803a9e5834571d56a3b87d147,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913
+b1f4423c227fa37b9680787be38857069247a307,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415357
+b11b71b704629357fe13ed97b216b9554b0e7463,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736040
+dd0086da7c4efe61abb70dd012538f5deb9a8d16,http://doi.org/10.1007/s11704-016-5024-6
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7106467
+ddfae3a96bd341109d75cedeaebb5ed2362b903f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6837429
+dc1510110c23f7b509035a1eda22879ef2506e61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909642
+dc107e7322f7059430b4ef4991507cb18bcc5d95,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995338
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120
+dc964b9c7242a985eb255b2410a9c45981c2f4d0,http://doi.org/10.1007/s10851-018-0837-6
+dc5d04d34b278b944097b8925a9147773bbb80cc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354149
+dc5d9399b3796db7fd850990402dce221b98c8be,http://dl.acm.org/citation.cfm?id=3220016
+dc3dc18b6831c867a8d65da130a9ff147a736745,http://dl.acm.org/citation.cfm?id=2750679
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462222
+dcb6f06631021811091ce691592b12a237c12907,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8438999
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,http://doi.org/10.1007/s11042-017-4646-5
+b6bb883dd14f2737d0d6225cf4acbf050d307634,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306
+b6f15bf8723b2d5390122442ab04630d2d3878d8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163142
+b6620027b441131a18f383d544779521b119c1aa,http://doi.org/10.1016/j.patcog.2013.04.013
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613024
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316891
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014745
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9,https://www.ncbi.nlm.nih.gov/pubmed/29334821
+a92147bed9c17c311c6081beb0ef4c3165b6268e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6805594
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a,http://doi.org/10.1007/978-3-319-68548-9_19
+a939e287feb3166983e36b8573cd161d12097ad8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7550048
+a961f1234e963a7945fed70197015678149b37d8,http://dl.acm.org/citation.cfm?id=3206068
+a96c45ed3a44ad79a72499be238264ae38857988,http://doi.org/10.1007/s00138-016-0786-2
+a92c207031b0778572bf41803dba1a21076e128b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433557
+a9215666b4bcdf8d510de8952cf0d55b635727dc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7498613
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,http://doi.org/10.1007/s11042-018-5806-y
+d57982dc55dbed3d0f89589e319dc2d2bd598532,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099760
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,http://doi.org/10.1007/s11042-016-3768-5
+d289ce63055c10937e5715e940a4bb9d0af7a8c5,http://dl.acm.org/citation.cfm?id=3081360
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392234
+d2f2b10a8f29165d815e652f8d44955a12d057e6,http://doi.org/10.1007/s10044-015-0475-1
+d20ea5a4fa771bc4121b5654a7483ced98b39148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430554
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7777820
+aa581b481d400982a7e2a88830a33ec42ad0414f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7313922
+aa5a7a9900548a1f1381389fc8695ced0c34261a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900274
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014781
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8203756
+aa1129780cc496918085cd0603a774345c353c54,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8,https://www.sciencedirect.com/science/article/pii/S0006322316331110
+af29ad70ab148c83e1faa8b3098396bc1cd87790,http://doi.org/10.1007/s40012-016-0149-1
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595920
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7873272
+af3b803188344971aa89fee861a6a598f30c6f10,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404811
+af9419f2155785961a5c16315c70b8228435d5f8,http://doi.org/10.1016/j.patrec.2015.12.013
+b712f08f819b925ff7587b6c09a8855bc295d795,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450858
+b759936982d6fb25c55c98955f6955582bdaeb27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7472169
+b7ec41005ce4384e76e3be854ecccd564d2f89fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009
+b72eebffe697008048781ab7b768e0c96e52236a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100092
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7528404
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5,http://doi.org/10.1007/s11704-018-8015-y
+db0379c9b02e514f10f778cccff0d6a6acf40519,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6130343
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7422069
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961838
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396
+dbced84d839165d9b494982449aa2eb9109b8467,http://arxiv.org/abs/1712.05083
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771415
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401921
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434
+de162d4b8450bf2b80f672478f987f304b7e6ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237454
+def934edb7c7355757802a95218c6e4ed6122a72,http://doi.org/10.1007/978-0-387-31439-6
+dec76940896a41a8a7b6e9684df326b23737cd5d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607638
+de92951ea021ec56492d76381a8ae560a972dd68,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738246
+dee6609615b73b10540f32537a242baa3c9fca4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8015006
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791166
+de45bf9e5593a5549a60ca01f2988266d04d77da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404529
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,http://dl.acm.org/citation.cfm?id=2591684
+b034cc919af30e96ee7bed769b93ea5828ae361b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915
+a6b5ca99432c23392cec682aebb8295c0283728b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302395
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,http://doi.org/10.1007/s11042-017-4572-6
+a60db9ca8bc144a37fe233b08232d9c91641cbb5,http://doi.org/10.1007/s11280-018-0615-9
+a6902db7972a7631d186bbf59c5ef116c205b1e8,http://dl.acm.org/citation.cfm?id=1276381
+a6ce1a1de164f41cb8999c728bceedf65d66bb23,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7170694
+a6d47f7aa361ab9b37c7f3f868280318f355fadc,https://ora.ox.ac.uk/objects/uuid:7704244a-b327-4e5c-a58e-7bfe769ed988
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430537
+b999364980e4c21d9c22cc5a9f14501432999ca4,http://doi.org/10.1007/s10044-018-0727-y
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,http://dl.acm.org/citation.cfm?id=2964287
+b91f54e1581fbbf60392364323d00a0cd43e493c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788
+b961e512242ddad7712855ab00b4d37723376e5d,http://doi.org/10.1007/s11554-010-0178-1
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900056
+a168ca2e199121258fbb2b6c821207456e5bf994,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553808
+a1081cb856faae25df14e25045cd682db8028141,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122
+a136ccaa67f660c45d3abb8551c5ed357faf7081,https://www.ncbi.nlm.nih.gov/pubmed/27078863
+ef2bb8bd93fa8b44414565b32735334fa6823b56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393076
+efc78a7d95b14abacdfde5c78007eabf9a21689c,http://dl.acm.org/citation.cfm?id=2939840
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e,http://doi.org/10.1016/j.patrec.2015.08.006
+c3d3d2229500c555c7a7150a8b126ef874cbee1c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406478
+c3d874336eb8fae92ab335393fd801fa8df98412,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952438
+c362116a358320e71fb6bc8baa559142677622d2,http://doi.org/10.1016/j.patcog.2011.07.009
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,http://doi.org/10.1007/978-3-319-26561-2
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5326314
+c4cfdcf19705f9095fb60fb2e569a9253a475f11,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237333
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5,http://doi.org/10.1007/s00138-012-0439-z
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,http://dl.acm.org/citation.cfm?id=3230921
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e,http://doi.org/10.1016/j.patrec.2015.02.006
+c48b68dc780c71ab0f0f530cd160aa564ed08ade,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1357193
+eaf020bc8a3ed5401fc3852f7037a03b2525586a,http://arxiv.org/abs/1710.07735
+eac97959f2fcd882e8236c5dd6035870878eb36b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890147
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411434
+eacf974e235add458efb815ada1e5b82a05878fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4577667
+ea03a569272d329090fe60d6bff8d119e18057d7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906
+e1312b0b0fd660de87fa42de39316b28f9336e70,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369055
+e1d1540a718bb7a933e21339f1a2d90660af7353,http://doi.org/10.1007/s11063-018-9852-2
+e1179a5746b4bf12e1c8a033192326bf7f670a4d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373816
+e14cc2715b806288fe457d88c1ad07ef55c65318,http://dl.acm.org/citation.cfm?id=2830583
+e180572400b64860e190a8bc04ef839fa491e056,http://doi.org/10.1038/s41598-017-12097-w
+cdcfc75f54405c77478ab776eb407c598075d9f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410829
+cd22e6532211f679ba6057d15a801ba448b9915c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434092
+cd55fb30737625e86454a2861302b96833ed549d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094
+cd63759842a56bd2ede3999f6e11a74ccbec318b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7945277
+cc9d068cf6c4a30da82fd6350a348467cb5086d4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411204
+ccb2ecb30a50460c9189bb55ba594f2300882747,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8334751
+cccd0edb5dafb3a160179a60f75fd8c835c0be82,http://doi.org/10.1007/s12193-017-0241-3
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854428
+cce332405ce9cd9dccc45efac26d1d614eaa982d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3,http://doi.org/10.1016/j.patcog.2015.11.008
+cc2a9f4be1e465cb4ba702539f0f088ac3383834,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344595
+e6d6203fa911429d76f026e2ec2de260ec520432,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663
+e6da1fcd2a8cda0c69b3d94812caa7d844903007,http://dl.acm.org/citation.cfm?id=3137154
+e68869499471bcd6fa8b4dc02aa00633673c0917,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595885
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,http://doi.org/10.1007/s11042-018-6110-6
+f03a82fd4a039c1b94a0e8719284a777f776fb22,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355453
+f095b5770f0ff13ba9670e3d480743c5e9ad1036,http://doi.org/10.1007/s11263-016-0950-1
+f0f854f8cfe826fd08385c0c3c8097488f468076,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406454
+f070d739fb812d38571ec77490ccd8777e95ce7a,http://doi.org/10.1016/j.patcog.2014.09.007
+f7ae38a073be7c9cd1b92359131b9c8374579b13,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7487053
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429,http://doi.org/10.1007/s12559-017-9506-0
+f7be8956639e66e534ed6195d929aed4e0b90cad,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4117059
+e8aa1f207b4b0bb710f79ab47a671d5639696a56,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211,http://dl.acm.org/citation.cfm?id=3130971
+e8f4ded98f5955aad114f55e7aca6b540599236b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7047804
+e896389891ba84af58a8c279cf8ab5de3e9320ee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6958874
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595883
+fa641327dc5873276f0af453a2caa1634c16f143,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789590
+fa80344137c4d158bf59be4ac5591d074483157a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1470219
+fa32b29e627086d4302db4d30c07a9d11dcd6b84,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354123
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17,http://dl.acm.org/citation.cfm?id=3173582
+ffc81ced9ee8223ab0adb18817321cbee99606e6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157
+fffe5ab3351deab81f7562d06764551422dbd9c4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114
+ff012c56b9b1de969328dacd13e26b7138ff298b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,http://doi.org/10.1007/s11263-018-1088-0
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e,http://dl.acm.org/citation.cfm?id=3078973
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237344
+c26b43c2e1e2da96e7caabd46e1d7314acac0992,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466510
+c29fe5ed41d2240352fcb8d8196eb2f31d009522,http://doi.org/10.1007/s11042-015-3230-0
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699435
+f6fc112ff7e4746b040c13f28700a9c47992045e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7442559
+f6532bf13a4649b7599eb40f826aa5281e392c61,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6202713
+f61829274cfe64b94361e54351f01a0376cd1253,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784
+f6f2a212505a118933ef84110e487551b6591553,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952474
+f65b47093e4d45013f54c3ba09bbcce7140af6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354117
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7172556
+e97ba85a4550667b8a28f83a98808d489e0ff3bc,http://doi.org/10.1155/2018%2F9729014
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471957
+e96cef8732f3021080c362126518455562606f2d,http://dl.acm.org/citation.cfm?id=3206058
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099873
+f16599e4ec666c6390c90ff9a253162178a70ef5,http://dl.acm.org/citation.cfm?id=3206050
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8,http://arxiv.org/abs/1504.07339
+f11c76efdc9651db329c8c862652820d61933308,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100
+e75a589ca27dc4f05c2715b9d54206dee37af266,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409973
+e7cfaff65541cde4298a04882e00608d992f6703,http://doi.org/10.1007/s00521-018-3554-6
+e7697c7b626ba3a426106d83f4c3a052fcde02a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553713
+e79bacc03152ea55343e6af97bcd17d8904cf5ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669
+cb8382f43ce073322eba82809f02d3084dad7969,http://dl.acm.org/citation.cfm?id=3232664
+cbbd9880fb28bef4e33da418a3795477d3a1616e,http://doi.org/10.1016/j.patcog.2016.02.002
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406479
+cb522b2e16b11dde48203bef97131ddca3cdaebd,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979
+cbfcd1ec8aa30e31faf205c73d350d447704afee,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7955089
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055,http://dl.acm.org/citation.cfm?id=3234150
+cb27b45329d61f5f95ed213798d4b2a615e76be2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236
+cb2470aade8e5630dcad5e479ab220db94ecbf91,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018
+f85ccab7173e543f2bfd4c7a81fb14e147695740,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5946910
+f8162276f3b21a3873dde7a507fd68b4ab858bcc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761923
+cef73d305e5368ee269baff53ec20ea3ae7cdd82,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461485
+cec70cf159b51a18b39c80fac1ad34f65f3691ef,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7949100
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f,http://doi.org/10.1007/s11042-015-2847-3
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,http://dl.acm.org/citation.cfm?id=3134264
+ce70dd0d613b840754dce528c14c0ebadd20ffaa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7973159
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7129813
+ce75deb5c645eeb08254e9a7962c74cab1e4c480,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373839
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1605391
+ce2945e369603fcec1fcdc6e19aac5996325cba9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771366
+e060e32f8ad98f10277b582393df50ac17f2836c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099600
+e0162dea3746d58083dd1d061fb276015d875b2e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014992
+46f48211716062744ddec5824e9de9322704dea1,http://doi.org/10.1007/s11263-016-0923-4
+468bb5344f74842a9a43a7e1a3333ebd394929b4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373896
+46e0703044811c941f0b5418139f89d46b360aa3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883945
+4686df20f0ee40cd411e4b43860ef56de5531d9e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301536
+46c82cfadd9f885f5480b2d7155f0985daf949fc,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780537
+46976097c54e86032932d559c8eb82ffea4bb6bb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738868
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099709
+7923742e2af655dee4f9a99e39916d164bc30178,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272743
+7914c3f510e84a3d83d66717aad0d852d6a4d148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532448
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995492
+794a51097385648e3909a1acae7188f5ab881710,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813382
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c,http://doi.org/10.1007/s11042-017-4818-3
+2d7c2c015053fff5300515a7addcd74b523f3f66,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422
+2dbc57abf3ceda80827b85593ce1f457b76a870b,http://doi.org/10.1007/s11042-018-6133-z
+4113269f916117f975d5d2a0e60864735b73c64c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613059
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5720366
+41c42cb001f34c43d4d8dd8fb72a982854e173fb,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5308445
+414d78e32ac41e6ff8b192bc095fe55f865a02f4,http://arxiv.org/abs/1706.00631
+834736698f2cc5c221c22369abe95515243a9fc3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249
+83d41f6548bb76241737dcd3fed9e182ee901ff9,http://dl.acm.org/citation.cfm?id=2964328
+8355d095d3534ef511a9af68a3b2893339e3f96b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390
+83f80fd4eb614777285202fa99e8314e3e5b169c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265544
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c,http://doi.org/10.1038/nn870
+1b5d445741473ced3d4d33732c9c9225148ed4a1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8452894
+7783095a565094ae5b3dccf082d504ddd7255a5c,http://dl.acm.org/citation.cfm?id=2502258
+77d929b3c4bf546557815b41ed5c076a5792dc6b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265399
+779d3f0cf74b7d33344eea210170c7c981a7e27b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8115237
+7788fa76f1488b1597ee2bebc462f628e659f61e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888
+771505abd38641454757de75fe751d41e87f89a4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561
+48a402593ca4896ac34fbebf1e725ab1226ecdb7,http://doi.org/10.1016/j.patcog.2015.01.022
+48de3ca194c3830daa7495603712496fe908375c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619283
+480ccd25cb2a851745f5e6e95d33edb703efb49e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461792
+484bac2a9ff3a43a6f85d109bbc579a4346397f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6011991
+70e14e216b12bed2211c4df66ef5f0bdeaffe774,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237666
+708f4787bec9d7563f4bb8b33834de445147133b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e,http://doi.org/10.1007/s11042-018-5608-2
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4497872
+1e3068886b138304ec5a7296702879cc8788143d,http://doi.org/10.1007/s11263-013-0630-3
+84c5b45328dee855c4855a104ac9c0558cc8a328,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213
+84574aa43a98ad8a29470977e7b091f5a5ec2366,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321
+84a74ef8680b66e6dccbc69ae80321a52780a68e,http://doi.org/10.1007/978-0-85729-932-1_19
+845f45f8412905137bf4e46a0d434f5856cd3aec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418618
+4a733a0862bd5f7be73fb4040c1375a6d17c9276,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618949
+4a8480d58c30dc484bda08969e754cd13a64faa1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406475
+24603ed946cb9385ec541c86d2e42db47361c102,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373865
+24286ef164f0e12c3e9590ec7f636871ba253026,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721
+2480f8dccd9054372d696e1e521e057d9ac9de17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8396968
+247a8040447b6577aa33648395d95d80441a0cf3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362745
+23edcd0d2011d9c0d421193af061f2eb3e155da3,http://doi.org/10.1007/s00371-015-1137-4
+23ee7b7a9ca5948e81555aaf3a044cfec778f148,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771385
+239e305c24155add73f2a0ba5ccbd66b37f77e14,http://dl.acm.org/citation.cfm?id=1219097
+23e824d1dfc33f3780dd18076284f07bd99f1c43,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686
+239958d6778643101ab631ec354ea1bc4d33e7e0,http://doi.org/10.1016/j.patcog.2017.06.009
+234c106036964131c0f2daf76c47ced802652046,http://doi.org/10.1016/j.cviu.2015.07.007
+4f37f71517420c93c6841beb33ca0926354fa11d,http://doi.org/10.1016/j.neucom.2017.08.062
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308,http://dl.acm.org/citation.cfm?id=2396318
+4f1249369127cc2e2894f6b2f1052d399794919a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663
+4f8345f31e38f65f1155569238d14bd8517606f4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618941
+4f8b4784d0fca31840307650f7052b0dde736a76,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7017496
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,http://doi.org/10.1007/978-3-319-16865-4
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404159
+8da32ff9e3759dc236878ac240728b344555e4e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed,http://doi.org/10.1038/s41598-017-18993-5
+8de5dc782178114d9424d33d9adabb2f29a1ab17,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7053946
+151b87de997e55db892b122c211f9c749f4293de,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237481
+127c7f87f289b1d32e729738475b337a6b042cf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436988
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a,http://doi.org/10.1007/s11042-017-4353-2
+12226bca7a891e25b7d1e1a34a089521bba75731,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373861
+8c4042191431e9eb43f00b0f14c23765ab9c6688,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532956
+8ccbbd9da0749d96f09164e28480d54935ee171c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597578
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014
+85a136b48c2036b16f444f93b086e2bd8539a498,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7885525
+85e78aa374d85f9a61da693e5010e40decd3f986,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619100
+854b1f0581f5d3340f15eb79452363cbf38c04c8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0,http://dl.acm.org/citation.cfm?id=3264947
+85ae6fa48e07857e17ac4bd48fb804785483e268,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7755833
+85c90ad5eebb637f048841ebfded05942bb786b7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977163
+8562b4f63e49847692b8cb31ef0bdec416b9a87a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8128909
+857c64060963dd8d28e4740f190d321298ddd503,http://doi.org/10.1007/s11042-015-3103-6
+1d30f813798c55ae4fe454829be6e2948ee841da,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270396
+1d51b256af68c5546d230f3e6f41da029e0f5852,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590015
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,http://doi.org/10.1007/s11063-016-9558-2
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820
+71c4b8e1bb25ee80f4317411ea8180dae6499524,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463396
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373821
+768f6a14a7903099729872e0db231ea814eb05e9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2,http://doi.org/10.1007/s00371-016-1290-4
+1c0acf9c2f2c43be47b34acbd4e7338de360e555,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461986
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26,http://doi.org/10.1016/j.ijar.2012.01.003
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849828
+82e3f4099503633c042a425e9217bfe47cfe9d4b,http://doi.org/10.1007/s11042-015-2819-7
+49358915ae259271238c7690694e6a887b16f7ed,http://doi.org/10.1007/BF02884429
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,http://doi.org/10.1007/s11704-017-6114-9
+49068538b7eef66b4254cc11914128097302fab8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339040
+49be50efc87c5df7a42905e58b092729ea04c2f5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7177489
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,http://dl.acm.org/citation.cfm?id=3159691
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5346008
+405d9a71350c9a13adea41f9d7f7f9274793824f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373834
+40c1de7b1b0a087c590537df55ecd089c86e8bfc,http://doi.org/10.1162/NECO_a_00401
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4,http://doi.org/10.1007/s13735-017-0144-9
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0,http://doi.org/10.1007/s11042-017-5028-8
+2e7e1ee7e3ee1445939480efd615e8828b9838f8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5643167
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397046
+2bb36c875754a2a8919f2f9b00a336c00006e453,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373869
+2bf646a6efd15ab830344ae9d43e10cc89e29f34,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8387808
+2bcd9b2b78eb353ea57cf50387083900eae5384a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329
+4735fa28fa2a2af98f7b266efd300a00e60dddf7,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460647
+7831ab4f8c622d91974579c1ff749dadc170c73c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6712699
+78f2c8671d1a79c08c80ac857e89315197418472,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443
+784a83437b3dba49c0d7ccc10ac40497b84661a5,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100224
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995556
+782a05fbe30269ff8ab427109f5c4d0a577e5284,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8038860
+8bebb26880274bdb840ebcca530caf26c393bf45,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369529
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1,http://doi.org/10.1007/s11263-016-0986-2
+13d430257d595231bda216ef859950caa736ad1d,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394947
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,http://doi.org/10.1007/s00779-018-1171-0
+7fcecaef60a681c47f0476e54e08712ee05d6154,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7299097
+7f203f2ff6721e73738720589ea83adddb7fdd27,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301513
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404767
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,http://doi.org/10.1007/s11554-016-0645-4
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382330
+7f904093e6933cab876e87532111db94c71a304f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117544
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7781761
+7f2a234ad5c256733a837dbf98f25ed5aad214e8,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7207289
+7f5b379b12505d60f9303aab1fea48515d36d098,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411873
+7f68a5429f150f9eb7550308bb47a363f2989cb3,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977004
+7acbf0b060e948589b38d5501ca217463cfd5c2f,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6940304
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8453893
+141cb9ee401f223220d3468592effa90f0c255fa,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,http://doi.org/10.1007/s00138-016-0820-4
+8e63868e552e433dc536ba732f4c2af095602869,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699730
+8eb40d0a0a1339469a05711f532839e8ffd8126c,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7890464
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4586390
+22648dcd3100432fe0cc71e09de5ee855c61f12b,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393188
+228ea13041910c41b50d0052bdce924037c3bc6a,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495
+22e121a8dea49e3042de305574356477ecacadda,http://doi.org/10.1007/s00138-018-0935-x
+25960f0a2ed38a89fa8076a448ca538de2f1e183,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411220
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410635
+2564920d6976be68bb22e299b0b8098090bbf259,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8407761
diff --git a/scraper/reports/misc/raw_paper_pdf.csv b/scraper/reports/misc/raw_paper_pdf.csv
new file mode 100644
index 00000000..c9827c27
--- /dev/null
+++ b/scraper/reports/misc/raw_paper_pdf.csv
@@ -0,0 +1,1354 @@
+61f04606528ecf4a42b49e8ac2add2e9f92c0def,https://arxiv.org/pdf/1605.01014.pdf
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa,https://arxiv.org/pdf/1809.01604.pdf
+61e9e180d3d1d8b09f1cc59bdd9f98c497707eff,https://pdfs.semanticscholar.org/61e9/e180d3d1d8b09f1cc59bdd9f98c497707eff.pdf
+6193c833ad25ac27abbde1a31c1cabe56ce1515b,https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf
+614079f1a0d0938f9c30a1585f617fa278816d53,https://arxiv.org/pdf/1612.02374.pdf
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a,https://pdfs.semanticscholar.org/0da7/5b0d341c8f945fae1da6c77b6ec345f47f2a.pdf
+0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a,https://pdfs.semanticscholar.org/0d33/b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a.pdf
+0da4c3d898ca2fff9e549d18f513f4898e960aca,https://pdfs.semanticscholar.org/0da4/c3d898ca2fff9e549d18f513f4898e960aca.pdf
+959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c,https://pdfs.semanticscholar.org/959b/cb16afdf303c34a8bfc11e9fcc9d40d76b1c.pdf
+95ea564bd983129ddb5535a6741e72bb1162c779,https://arxiv.org/pdf/1711.00111.pdf
+950171acb24bb24a871ba0d02d580c09829de372,https://pdfs.semanticscholar.org/9501/71acb24bb24a871ba0d02d580c09829de372.pdf
+59fc69b3bc4759eef1347161e1248e886702f8f7,https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf
+59efb1ac77c59abc8613830787d767100387c680,https://arxiv.org/pdf/1805.10030.pdf
+59dac8b460a89e03fa616749a08e6149708dcc3a,https://pdfs.semanticscholar.org/59da/c8b460a89e03fa616749a08e6149708dcc3a.pdf
+59e9934720baf3c5df3a0e1e988202856e1f83ce,https://arxiv.org/pdf/1511.04136.pdf
+59d225486161b43b7bf6919b4a4b4113eb50f039,https://arxiv.org/pdf/1701.04769.pdf
+5945464d47549e8dcaec37ad41471aa70001907f,https://arxiv.org/pdf/1507.05738.pdf
+599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a,https://arxiv.org/pdf/1808.09316.pdf
+59a6c9333c941faf2540979dcfcb5d503a49b91e,https://arxiv.org/pdf/1806.08245.pdf
+92b61b09d2eed4937058d0f9494d9efeddc39002,https://pdfs.semanticscholar.org/92b6/1b09d2eed4937058d0f9494d9efeddc39002.pdf
+92be73dffd3320fe7734258961fe5a5f2a43390e,https://pdfs.semanticscholar.org/92be/73dffd3320fe7734258961fe5a5f2a43390e.pdf
+9207671d9e2b668c065e06d9f58f597601039e5e,https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf
+928b8eb47288a05611c140d02441660277a7ed54,https://arxiv.org/pdf/1805.04384.pdf
+926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0,https://arxiv.org/pdf/1705.07871.pdf
+92e464a5a67582d5209fa75e3b29de05d82c7c86,https://pdfs.semanticscholar.org/92e4/64a5a67582d5209fa75e3b29de05d82c7c86.pdf
+927ba64123bd4a8a31163956b3d1765eb61e4426,https://pdfs.semanticscholar.org/927b/a64123bd4a8a31163956b3d1765eb61e4426.pdf
+923ec0da8327847910e8dd71e9d801abcbc93b08,https://arxiv.org/pdf/1704.04232.pdf
+0cf2eecf20cfbcb7f153713479e3206670ea0e9c,https://arxiv.org/pdf/1806.08906.pdf
+0ca36ecaf4015ca4095e07f0302d28a5d9424254,https://arxiv.org/pdf/1810.00360.pdf
+0cfca73806f443188632266513bac6aaf6923fa8,https://arxiv.org/pdf/1805.04756.pdf
+6601a0906e503a6221d2e0f2ca8c3f544a4adab7,https://pdfs.semanticscholar.org/6601/a0906e503a6221d2e0f2ca8c3f544a4adab7.pdf
+661ca4bbb49bb496f56311e9d4263dfac8eb96e9,https://arxiv.org/pdf/1803.09010.pdf
+66d087f3dd2e19ffe340c26ef17efe0062a59290,https://pdfs.semanticscholar.org/66d0/87f3dd2e19ffe340c26ef17efe0062a59290.pdf
+66837add89caffd9c91430820f49adb5d3f40930,https://pdfs.semanticscholar.org/4a6d/20f60ff06cca446578ea1218737190e288e6.pdf
+3e0a1884448bfd7f416c6a45dfcdfc9f2e617268,https://arxiv.org/pdf/1805.05838.pdf
+3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b,https://arxiv.org/pdf/1703.04363.pdf
+3e40991ab1daa2a4906eb85a5d6a01a958b6e674,https://arxiv.org/pdf/1611.01599.pdf
+506c2fbfa9d16037d50d650547ad3366bb1e1cde,https://pdfs.semanticscholar.org/506c/2fbfa9d16037d50d650547ad3366bb1e1cde.pdf
+504028218290d68859f45ec686f435f473aa326c,https://arxiv.org/pdf/1807.11195.pdf
+50a0930cb8cc353e15a5cb4d2f41b365675b5ebf,https://pdfs.semanticscholar.org/50a0/930cb8cc353e15a5cb4d2f41b365675b5ebf.pdf
+508702ed2bf7d1b0655ea7857dd8e52d6537e765,https://pdfs.semanticscholar.org/5087/02ed2bf7d1b0655ea7857dd8e52d6537e765.pdf
+68d2afd8c5c1c3a9bbda3dd209184e368e4376b9,https://arxiv.org/pdf/1705.11136.pdf
+68d08ed9470d973a54ef7806318d8894d87ba610,https://arxiv.org/pdf/1804.02555.pdf
+68caf5d8ef325d7ea669f3fb76eac58e0170fff0,https://arxiv.org/pdf/1805.07646.pdf
+684f5166d8147b59d9e0938d627beff8c9d208dd,https://arxiv.org/pdf/1707.03548.pdf
+68484ae8a042904a95a8d284a7f85a4e28e37513,https://pdfs.semanticscholar.org/6848/4ae8a042904a95a8d284a7f85a4e28e37513.pdf
+682760f2f767fb47e1e2ca35db3becbb6153756f,https://arxiv.org/pdf/1804.03507.pdf
+68f61154a0080c4aae9322110c8827978f01ac2e,https://pdfs.semanticscholar.org/68f6/1154a0080c4aae9322110c8827978f01ac2e.pdf
+574b62c845809fd54cc168492424c5fac145bc83,https://arxiv.org/pdf/1804.04829.pdf
+57246142814d7010d3592e3a39a1ed819dd01f3b,https://pdfs.semanticscholar.org/5724/6142814d7010d3592e3a39a1ed819dd01f3b.pdf
+571b83f7fc01163383e6ca6a9791aea79cafa7dd,https://arxiv.org/pdf/1803.06524.pdf
+574ad7ef015995efb7338829a021776bf9daaa08,https://arxiv.org/pdf/1611.08240.pdf
+57a14a65e8ae15176c9afae874854e8b0f23dca7,https://pdfs.semanticscholar.org/57a1/4a65e8ae15176c9afae874854e8b0f23dca7.pdf
+3b73f8a2b39751efb7d7b396bf825af2aaadee24,https://arxiv.org/pdf/1712.01066.pdf
+3b84d074b8622fac125f85ab55b63e876fed4628,https://arxiv.org/pdf/1608.02676.pdf
+3be8f1f7501978287af8d7ebfac5963216698249,https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf
+3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f,https://arxiv.org/pdf/1707.07923.pdf
+3bb6570d81685b769dc9e74b6e4958894087f3f1,https://arxiv.org/pdf/1805.05098.pdf
+6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb,https://arxiv.org/pdf/1706.06247.pdf
+6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd,https://pdfs.semanticscholar.org/6f7d/06ced04ead3b9a5da86b37e7c27bfcedbbdd.pdf
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac,https://arxiv.org/pdf/1712.02662.pdf
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,https://arxiv.org/pdf/1807.03658.pdf
+6f3054f182c34ace890a32fdf1656b583fbc7445,https://pdfs.semanticscholar.org/6f30/54f182c34ace890a32fdf1656b583fbc7445.pdf
+6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d,https://arxiv.org/pdf/1807.08259.pdf
+6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae,https://pdfs.semanticscholar.org/6fdc/0bc13f2517061eaa1364dcf853f36e1ea5ae.pdf
+03c56c176ec6377dddb6a96c7b2e95408db65a7a,https://arxiv.org/pdf/1807.00676.pdf
+0322e69172f54b95ae6a90eb3af91d3daa5e36ea,https://pdfs.semanticscholar.org/0322/e69172f54b95ae6a90eb3af91d3daa5e36ea.pdf
+03ce2ff688f9b588b6f264ca79c6857f0d80ceae,https://arxiv.org/pdf/1711.09550.pdf
+032825000c03b8ab4c207e1af4daeb1f225eb025,https://pdfs.semanticscholar.org/0328/25000c03b8ab4c207e1af4daeb1f225eb025.pdf
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45,https://pdfs.semanticscholar.org/03ac/1c694bc84a27621da6bfe73ea9f7210c6d45.pdf
+03fe3d031afdcddf38e5cc0d908b734884542eeb,https://pdfs.semanticscholar.org/03fe/3d031afdcddf38e5cc0d908b734884542eeb.pdf
+9bd35145c48ce172b80da80130ba310811a44051,https://arxiv.org/pdf/1606.00850.pdf
+9bc01fa9400c231e41e6a72ec509d76ca797207c,https://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf
+9b2c359c36c38c289c5bacaeb5b1dd06b464f301,https://arxiv.org/pdf/1709.01442.pdf
+9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca,https://pdfs.semanticscholar.org/9b1b/cef8bfef0fb5eb5ea9af0b699aa0534fceca.pdf
+9be653e1bc15ef487d7f93aad02f3c9552f3ee4a,https://pdfs.semanticscholar.org/9be6/53e1bc15ef487d7f93aad02f3c9552f3ee4a.pdf
+9bac481dc4171aa2d847feac546c9f7299cc5aa0,https://arxiv.org/pdf/1609.04541.pdf
+9b684e2e2bb43862f69b12c6be94db0e7a756187,https://arxiv.org/pdf/1709.04666.pdf
+9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32,https://arxiv.org/pdf/1708.03280.pdf
+9e182e0cd9d70f876f1be7652c69373bcdf37fb4,https://arxiv.org/pdf/1807.07860.pdf
+040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d,https://arxiv.org/pdf/1806.03018.pdf
+04b851f25d6d49e61a528606953e11cfac7df2b2,https://arxiv.org/pdf/1711.11152.pdf
+043efe5f465704ced8d71a067d2b9d5aa5b59c29,https://pdfs.semanticscholar.org/000a/c6b0865c79bcf0d6f7f069b3abfe229e1462.pdf
+04b4c779b43b830220bf938223f685d1057368e9,https://arxiv.org/pdf/1712.00133.pdf
+04317e63c08e7888cef480fe79f12d3c255c5b00,https://pdfs.semanticscholar.org/0431/7e63c08e7888cef480fe79f12d3c255c5b00.pdf
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff,https://arxiv.org/pdf/1806.06098.pdf
+6ad107c08ac018bfc6ab31ec92c8a4b234f67d49,https://arxiv.org/pdf/1807.00966.pdf
+6a52e6fce541126ff429f3c6d573bc774f5b8d89,https://pdfs.semanticscholar.org/6a52/e6fce541126ff429f3c6d573bc774f5b8d89.pdf
+6a4419ce2338ea30a570cf45624741b754fa52cb,https://arxiv.org/pdf/1804.02541.pdf
+6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a,https://arxiv.org/pdf/1805.09092.pdf
+3294e27356c3b1063595885a6d731d625b15505a,https://pdfs.semanticscholar.org/89b6/fe99faefb8ff4c54f9e7a88fde2470a51ed1.pdf
+3240c9359061edf7a06bfeb7cc20c103a65904c2,https://arxiv.org/pdf/1708.01956.pdf
+352d61eb66b053ae5689bd194840fd5d33f0e9c0,https://arxiv.org/pdf/1807.04899.pdf
+35b1c1f2851e9ac4381ef41b4d980f398f1aad68,https://pdfs.semanticscholar.org/35b1/c1f2851e9ac4381ef41b4d980f398f1aad68.pdf
+351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd,https://pdfs.semanticscholar.org/351c/02d4775ae95e04ab1e5dd0c758d2d80c3ddd.pdf
+35e0256b33212ddad2db548484c595334f15b4da,https://pdfs.semanticscholar.org/35e0/256b33212ddad2db548484c595334f15b4da.pdf
+35e6f6e5f4f780508e5f58e87f9efe2b07d8a864,https://arxiv.org/pdf/1709.08421.pdf
+6964af90cf8ac336a2a55800d9c510eccc7ba8e1,https://arxiv.org/pdf/1711.08496.pdf
+69adbfa7b0b886caac15ebe53b89adce390598a3,https://arxiv.org/pdf/1805.10938.pdf
+69a55c30c085ad1b72dd2789b3f699b2f4d3169f,https://pdfs.semanticscholar.org/69a5/5c30c085ad1b72dd2789b3f699b2f4d3169f.pdf
+6993bca2b3471f26f2c8a47adfe444bfc7852484,https://arxiv.org/pdf/1705.07426.pdf
+691964c43bfd282f6f4d00b8b0310c554b613e3b,https://pdfs.semanticscholar.org/6919/64c43bfd282f6f4d00b8b0310c554b613e3b.pdf
+3cb2841302af1fb9656f144abc79d4f3d0b27380,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf
+3c563542db664321aa77a9567c1601f425500f94,https://arxiv.org/pdf/1712.02514.pdf
+3cd7b15f5647e650db66fbe2ce1852e00c05b2e4,https://pdfs.semanticscholar.org/3cd7/b15f5647e650db66fbe2ce1852e00c05b2e4.pdf
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,https://pdfs.semanticscholar.org/3c6c/ac7ecf546556d7c6050f7b693a99cc8a57b3.pdf
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,https://arxiv.org/pdf/1809.00594.pdf
+3c56acaa819f4e2263638b67cea1ec37a226691d,https://arxiv.org/pdf/1704.07160.pdf
+56e079f4eb40744728fd1d7665938b06426338e5,https://arxiv.org/pdf/1705.04293.pdf
+56a677c889e0e2c9f68ab8ca42a7e63acf986229,https://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf
+56dca23481de9119aa21f9044efd7db09f618704,https://arxiv.org/pdf/1507.02772.pdf
+516a27d5dd06622f872f5ef334313350745eadc3,https://arxiv.org/pdf/1805.01024.pdf
+5180df9d5eb26283fb737f491623395304d57497,https://arxiv.org/pdf/1804.10899.pdf
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff,https://arxiv.org/pdf/1807.04979.pdf
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3,https://arxiv.org/pdf/1503.08909.pdf
+5161e38e4ea716dcfb554ccb88901b3d97778f64,https://arxiv.org/pdf/1702.04069.pdf
+51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee,https://arxiv.org/pdf/1809.07586.pdf
+5141cf2e59fb2ec9bb489b9c1832447d3cd93110,https://arxiv.org/pdf/1706.00893.pdf
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf
+51d048b92f6680aca4a8adf07deb380c0916c808,https://pdfs.semanticscholar.org/51d0/48b92f6680aca4a8adf07deb380c0916c808.pdf
+5134353bd01c4ea36bd007c460e8972b1541d0ad,https://pdfs.semanticscholar.org/5134/353bd01c4ea36bd007c460e8972b1541d0ad.pdf
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,https://arxiv.org/pdf/1511.07212.pdf
+3daafe6389d877fe15d8823cdf5ac15fd919676f,https://arxiv.org/pdf/1605.05197.pdf
+3d6ee995bc2f3e0f217c053368df659a5d14d5b5,https://pdfs.semanticscholar.org/3d6e/e995bc2f3e0f217c053368df659a5d14d5b5.pdf
+3dfb822e16328e0f98a47209d7ecd242e4211f82,https://arxiv.org/pdf/1708.08197.pdf
+580f86f1ace1feed16b592d05c2b07f26c429b4b,https://arxiv.org/pdf/1705.00754.pdf
+58d47c187b38b8a2bad319c789a09781073d052d,https://arxiv.org/pdf/1806.11538.pdf
+58bf72750a8f5100e0c01e55fd1b959b31e7dbce,https://arxiv.org/pdf/1803.07737.pdf
+58542eeef9317ffab9b155579256d11efb4610f2,https://pdfs.semanticscholar.org/5854/2eeef9317ffab9b155579256d11efb4610f2.pdf
+677585ccf8619ec2330b7f2d2b589a37146ffad7,https://arxiv.org/pdf/1806.11328.pdf
+6789bddbabf234f31df992a3356b36a47451efc7,https://pdfs.semanticscholar.org/6789/bddbabf234f31df992a3356b36a47451efc7.pdf
+675b2caee111cb6aa7404b4d6aa371314bf0e647,https://arxiv.org/pdf/1705.08421.pdf
+679b72d23a9cfca8a7fe14f1d488363f2139265f,https://pdfs.semanticscholar.org/e7c4/bfe5ea260450f124f4253f2ebe0fff1d308f.pdf
+67484723e0c2cbeb936b2e863710385bdc7d5368,https://arxiv.org/pdf/1805.03363.pdf
+673d4885370b27c863e11a4ece9189a6a45931cc,https://arxiv.org/pdf/1802.09723.pdf
+6754c98ba73651f69525c770fb0705a1fae78eb5,https://pdfs.semanticscholar.org/f68b/3031e7092072bd7b38c05448031f17b087d1.pdf
+672fae3da801b2a0d2bad65afdbbbf1b2320623e,https://arxiv.org/pdf/1609.07042.pdf
+0be43cf4299ce2067a0435798ef4ca2fbd255901,https://pdfs.semanticscholar.org/0be4/3cf4299ce2067a0435798ef4ca2fbd255901.pdf
+0b5a82f8c0ee3640503ba24ef73e672d93aeebbf,https://arxiv.org/pdf/1808.09560.pdf
+0b572a2b7052b15c8599dbb17d59ff4f02838ff7,https://pdfs.semanticscholar.org/0b57/2a2b7052b15c8599dbb17d59ff4f02838ff7.pdf
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4,https://arxiv.org/pdf/1510.03909.pdf
+0bce54bfbd8119c73eb431559fc6ffbba741e6aa,https://pdfs.semanticscholar.org/f9b2/3a7270939136872d5e170b4a80aad68a4e66.pdf
+0bf0029c9bdb0ac61fda35c075deb1086c116956,https://pdfs.semanticscholar.org/c37d/3c53687b2b1654e20a5f67dce6585afc109a.pdf
+93420d9212dd15b3ef37f566e4d57e76bb2fab2f,https://arxiv.org/pdf/1611.00851.pdf
+93af36da08bf99e68c9b0d36e141ed8154455ac2,https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf
+93f37c69dd92c4e038710cdeef302c261d3a4f92,https://arxiv.org/pdf/1712.00636.pdf
+938ae9597f71a21f2e47287cca318d4a2113feb2,https://pdfs.semanticscholar.org/938a/e9597f71a21f2e47287cca318d4a2113feb2.pdf
+946017d5f11aa582854ac4c0e0f1b18b06127ef1,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf
+94eeae23786e128c0635f305ba7eebbb89af0023,https://arxiv.org/pdf/1706.01350.pdf
+944faf7f14f1bead911aeec30cc80c861442b610,https://arxiv.org/pdf/1705.01861.pdf
+94325522c9be8224970f810554611d6a73877c13,https://arxiv.org/pdf/1807.11440.pdf
+9487cea80f23afe9bccc94deebaa3eefa6affa99,https://arxiv.org/pdf/1612.05332.pdf
+94a11b601af77f0ad46338afd0fa4ccbab909e82,https://pdfs.semanticscholar.org/94a1/1b601af77f0ad46338afd0fa4ccbab909e82.pdf
+0ee737085af468f264f57f052ea9b9b1f58d7222,https://arxiv.org/pdf/1807.08370.pdf
+0e93a5a7f6dbdb3802173dca05717d27d72bfec0,https://arxiv.org/pdf/1709.08553.pdf
+0e2ea7af369dbcaeb5e334b02dd9ba5271b10265,https://arxiv.org/pdf/1807.01332.pdf
+0ee5c4112208995bf2bb0fb8a87efba933a94579,https://arxiv.org/pdf/1807.03235.pdf
+60c24e44fce158c217d25c1bae9f880a8bd19fc3,https://arxiv.org/pdf/1808.02992.pdf
+60e2b9b2e0db3089237d0208f57b22a3aac932c1,https://arxiv.org/pdf/1603.06470.pdf
+60542b1a857024c79db8b5b03db6e79f74ec8f9f,https://arxiv.org/pdf/1702.05448.pdf
+345cc31c85e19cea9f8b8521be6a37937efd41c2,https://arxiv.org/pdf/1511.06421.pdf
+341002fac5ae6c193b78018a164d3c7295a495e4,https://arxiv.org/pdf/1706.04264.pdf
+34ce703b7e79e3072eed7f92239a4c08517b0c55,https://pdfs.semanticscholar.org/34ce/703b7e79e3072eed7f92239a4c08517b0c55.pdf
+34ec83c8ff214128e7a4a4763059eebac59268a6,https://arxiv.org/pdf/1808.00141.pdf
+5a3da29970d0c3c75ef4cb372b336fc8b10381d7,https://arxiv.org/pdf/1708.00980.pdf
+5a5f9e0ed220ce51b80cd7b7ede22e473a62062c,https://arxiv.org/pdf/1806.01810.pdf
+5ac946fc6543a445dd1ee6d5d35afd3783a31353,https://arxiv.org/pdf/1803.06962.pdf
+5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c,https://pdfs.semanticscholar.org/2e36/a706bbec0f1adb7484e5d7416c3e612f43a1.pdf
+5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6,https://pdfs.semanticscholar.org/5aed/0f26549c6e64c5199048c4fd5fdb3c5e69d6.pdf
+5a07945293c6b032e465d64f2ec076b82e113fa6,https://pdfs.semanticscholar.org/5a07/945293c6b032e465d64f2ec076b82e113fa6.pdf
+5fff61302adc65d554d5db3722b8a604e62a8377,https://arxiv.org/pdf/1801.05599.pdf
+5f771fed91c8e4b666489ba2384d0705bcf75030,https://arxiv.org/pdf/1804.03287.pdf
+5fa04523ff13a82b8b6612250a39e1edb5066521,https://arxiv.org/pdf/1708.04370.pdf
+5fa6e4a23da0b39e4b35ac73a15d55cee8608736,https://arxiv.org/pdf/1801.06066.pdf
+5f7c4c20ae2731bfb650a96b69fd065bf0bb950e,https://pdfs.semanticscholar.org/5f7c/4c20ae2731bfb650a96b69fd065bf0bb950e.pdf
+5f94969b9491db552ffebc5911a45def99026afe,https://pdfs.semanticscholar.org/5f94/969b9491db552ffebc5911a45def99026afe.pdf
+5f758a29dae102511576c0a5c6beda264060a401,https://arxiv.org/pdf/1804.01373.pdf
+5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf
+5f27ed82c52339124aa368507d66b71d96862cb7,https://pdfs.semanticscholar.org/5f27/ed82c52339124aa368507d66b71d96862cb7.pdf
+5fea26746f3140b12317fcf3bc1680f2746e172e,https://arxiv.org/pdf/1612.06341.pdf
+5f453a35d312debfc993d687fd0b7c36c1704b16,https://pdfs.semanticscholar.org/5f45/3a35d312debfc993d687fd0b7c36c1704b16.pdf
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9,https://pdfs.semanticscholar.org/c4e8/3800fae0d6065aca19aa2a2fbff29ca6be1e.pdf
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1,https://arxiv.org/pdf/1807.00230.pdf
+33aa980544a9d627f305540059828597354b076c,https://pdfs.semanticscholar.org/18a6/9db63820183a7ed0d810c2fdf18865fdf10e.pdf
+3352426a67eabe3516812cb66a77aeb8b4df4d1b,https://arxiv.org/pdf/1708.06023.pdf
+33ef419dffef85443ec9fe89a93f928bafdc922e,https://arxiv.org/pdf/1809.08493.pdf
+05891725f5b27332836cf058f04f18d74053803f,https://pdfs.semanticscholar.org/0589/1725f5b27332836cf058f04f18d74053803f.pdf
+052f994898c79529955917f3dfc5181586282cf8,https://arxiv.org/pdf/1708.02191.pdf
+050a149051a5d268fcc5539e8b654c2240070c82,https://pdfs.semanticscholar.org/050a/149051a5d268fcc5539e8b654c2240070c82.pdf
+053931267af79a89791479b18d1b9cde3edcb415,https://pdfs.semanticscholar.org/0539/31267af79a89791479b18d1b9cde3edcb415.pdf
+9d58e8ab656772d2c8a99a9fb876d5611fe2fe20,https://arxiv.org/pdf/1506.01911.pdf
+9d57c4036a0e5f1349cd11bc342ac515307b6720,https://arxiv.org/pdf/1808.05399.pdf
+9db4b25df549555f9ffd05962b5adf2fd9c86543,https://arxiv.org/pdf/1804.03786.pdf
+9ca7899338129f4ba6744f801e722d53a44e4622,https://arxiv.org/pdf/1504.07550.pdf
+9c1664f69d0d832e05759e8f2f001774fad354d6,https://arxiv.org/pdf/1809.04317.pdf
+9c065dfb26ce280610a492c887b7f6beccf27319,https://arxiv.org/pdf/1707.09074.pdf
+9ce0d64125fbaf625c466d86221505ad2aced7b1,https://pdfs.semanticscholar.org/9ce0/d64125fbaf625c466d86221505ad2aced7b1.pdf
+02f4b900deabbe7efa474f2815dc122a4ddb5b76,https://pdfs.semanticscholar.org/02f4/b900deabbe7efa474f2815dc122a4ddb5b76.pdf
+a40edf6eb979d1ddfe5894fac7f2cf199519669f,https://arxiv.org/pdf/1704.08740.pdf
+a3d8b5622c4b9af1f753aade57e4774730787a00,https://arxiv.org/pdf/1705.10120.pdf
+a322479a6851f57a3d74d017a9cb6d71395ed806,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf
+a301ddc419cbd900b301a95b1d9e4bb770afc6a3,https://pdfs.semanticscholar.org/a301/ddc419cbd900b301a95b1d9e4bb770afc6a3.pdf
+a3f69a073dcfb6da8038607a9f14eb28b5dab2db,https://pdfs.semanticscholar.org/a3f6/9a073dcfb6da8038607a9f14eb28b5dab2db.pdf
+a38045ed82d6800cbc7a4feb498e694740568258,https://pdfs.semanticscholar.org/8f15/c3a426d307dd1e72f7feab1e671d20fb1adb.pdf
+a3f78cc944ac189632f25925ba807a0e0678c4d5,https://pdfs.semanticscholar.org/a3f7/8cc944ac189632f25925ba807a0e0678c4d5.pdf
+a32c5138c6a0b3d3aff69bcab1015d8b043c91fb,https://pdfs.semanticscholar.org/a32c/5138c6a0b3d3aff69bcab1015d8b043c91fb.pdf
+a36c8a4213251d3fd634e8893ad1b932205ad1ca,https://pdfs.semanticscholar.org/a36c/8a4213251d3fd634e8893ad1b932205ad1ca.pdf
+b5968e7bb23f5f03213178c22fd2e47af3afa04c,https://arxiv.org/pdf/1705.07206.pdf
+b558be7e182809f5404ea0fcf8a1d1d9498dc01a,https://pdfs.semanticscholar.org/dc8a/57827ffbe7064979638cf909abf7fcf7fb8d.pdf
+b562def2624f59f7d3824e43ecffc990ad780898,https://arxiv.org/pdf/1710.08310.pdf
+b5f2846a506fc417e7da43f6a7679146d99c5e96,https://arxiv.org/pdf/1212.0402.pdf
+b59f441234d2d8f1765a20715e227376c7251cd7,https://arxiv.org/pdf/1803.01449.pdf
+b59cee1f647737ec3296ccb3daa25c890359c307,https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf
+b2c60061ad32e28eb1e20aff42e062c9160786be,https://arxiv.org/pdf/1805.12589.pdf
+b2b535118c5c4dfcc96f547274cdc05dde629976,https://arxiv.org/pdf/1707.04061.pdf
+d904f945c1506e7b51b19c99c632ef13f340ef4c,https://pdfs.semanticscholar.org/d904/f945c1506e7b51b19c99c632ef13f340ef4c.pdf
+d949fadc9b6c5c8b067fa42265ad30945f9caa99,https://arxiv.org/pdf/1710.00870.pdf
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,https://arxiv.org/pdf/1804.04803.pdf
+d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c,https://arxiv.org/pdf/1709.00069.pdf
+d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f,https://arxiv.org/pdf/1807.00046.pdf
+aca232de87c4c61537c730ee59a8f7ebf5ecb14f,https://pdfs.semanticscholar.org/aca2/32de87c4c61537c730ee59a8f7ebf5ecb14f.pdf
+ac855f0de9086e9e170072cb37400637f0c9b735,https://arxiv.org/pdf/1809.08999.pdf
+accbd6cd5dd649137a7c57ad6ef99232759f7544,https://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf
+acee2201f8a15990551804dd382b86973eb7c0a8,https://arxiv.org/pdf/1701.01692.pdf
+ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e,https://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf
+ac26166857e55fd5c64ae7194a169ff4e473eb8b,https://arxiv.org/pdf/1706.01039.pdf
+ac559873b288f3ac28ee8a38c0f3710ea3f986d9,https://pdfs.semanticscholar.org/ac55/9873b288f3ac28ee8a38c0f3710ea3f986d9.pdf
+ac8e09128e1e48a2eae5fa90f252ada689f6eae7,https://arxiv.org/pdf/1806.01526.pdf
+ac8441e30833a8e2a96a57c5e6fede5df81794af,https://arxiv.org/pdf/1805.10557.pdf
+ac86ccc16d555484a91741e4cb578b75599147b2,https://arxiv.org/pdf/1709.08398.pdf
+ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff,https://arxiv.org/pdf/1712.05080.pdf
+ad2339c48ad4ffdd6100310dcbb1fb78e72fac98,https://arxiv.org/pdf/1704.04689.pdf
+adf62dfa00748381ac21634ae97710bb80fc2922,https://pdfs.semanticscholar.org/adf6/2dfa00748381ac21634ae97710bb80fc2922.pdf
+bbf28f39e5038813afd74cf1bc78d55fcbe630f1,https://arxiv.org/pdf/1803.04108.pdf
+bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197,https://arxiv.org/pdf/1612.06836.pdf
+bbc5f4052674278c96abe7ff9dc2d75071b6e3f3,https://pdfs.semanticscholar.org/287b/7baff99d6995fd5852002488eb44659be6c1.pdf
+bbd1eb87c0686fddb838421050007e934b2d74ab,https://arxiv.org/pdf/1805.10483.pdf
+d7593148e4319df7a288180d920f2822eeecea0b,https://pdfs.semanticscholar.org/192e/b550675b0f9cc69389ef2ec27efa72851253.pdf
+d7fe2a52d0ad915b78330340a8111e0b5a66513a,https://arxiv.org/pdf/1711.10735.pdf
+d7cbedbee06293e78661335c7dd9059c70143a28,https://arxiv.org/pdf/1804.07573.pdf
+d78734c54f29e4474b4d47334278cfde6efe963a,https://arxiv.org/pdf/1804.03487.pdf
+d79365336115661b0e8dbbcd4b2aa1f504b91af6,https://arxiv.org/pdf/1603.01801.pdf
+d7b6bbb94ac20f5e75893f140ef7e207db7cd483,https://pdfs.semanticscholar.org/d7b6/bbb94ac20f5e75893f140ef7e207db7cd483.pdf
+d700aedcb22a4be374c40d8bee50aef9f85d98ef,https://arxiv.org/pdf/1712.04851.pdf
+d0471d5907d6557cf081edf4c7c2296c3c221a38,https://pdfs.semanticscholar.org/d047/1d5907d6557cf081edf4c7c2296c3c221a38.pdf
+d0509afe9c2c26fe021889f8efae1d85b519452a,https://arxiv.org/pdf/1803.07140.pdf
+d0144d76b8b926d22411d388e7a26506519372eb,https://arxiv.org/pdf/1806.04613.pdf
+d02e27e724f9b9592901ac1f45830341d37140fe,https://arxiv.org/pdf/1802.06454.pdf
+d02b32b012ffba2baeb80dca78e7857aaeececb0,https://pdfs.semanticscholar.org/d02b/32b012ffba2baeb80dca78e7857aaeececb0.pdf
+d01303062b21cd9ff46d5e3ff78897b8499480de,https://pdfs.semanticscholar.org/d013/03062b21cd9ff46d5e3ff78897b8499480de.pdf
+d02c54192dbd0798b43231efe1159d6b4375ad36,https://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5,https://arxiv.org/pdf/1803.08460.pdf
+be48b5dcd10ab834cd68d5b2a24187180e2b408f,https://arxiv.org/pdf/1611.04870.pdf
+be4a20113bc204019ea79c6557a0bece23da1121,https://arxiv.org/pdf/1712.01670.pdf
+be437b53a376085b01ebd0f4c7c6c9e40a4b1a75,https://pdfs.semanticscholar.org/be43/7b53a376085b01ebd0f4c7c6c9e40a4b1a75.pdf
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87,https://pdfs.semanticscholar.org/be4f/7679797777f2bc1fd6aad8af67cce5e5ce87.pdf
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf,https://arxiv.org/pdf/1706.03864.pdf
+bebea83479a8e1988a7da32584e37bfc463d32d4,https://arxiv.org/pdf/1807.03146.pdf
+bed06e7ff0b510b4a1762283640b4233de4c18e0,https://pdfs.semanticscholar.org/bed0/6e7ff0b510b4a1762283640b4233de4c18e0.pdf
+be5276e9744c4445fe5b12b785650e8f173f56ff,https://pdfs.semanticscholar.org/be52/76e9744c4445fe5b12b785650e8f173f56ff.pdf
+be4f18e25b06f430e2de0cc8fddcac8585b00beb,https://pdfs.semanticscholar.org/be4f/18e25b06f430e2de0cc8fddcac8585b00beb.pdf
+b331ca23aed90394c05f06701f90afd550131fe3,https://pdfs.semanticscholar.org/b331/ca23aed90394c05f06701f90afd550131fe3.pdf
+b3cb91a08be4117d6efe57251061b62417867de9,https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf
+b3200539538eca54a85223bf0ec4f3ed132d0493,https://pdfs.semanticscholar.org/b320/0539538eca54a85223bf0ec4f3ed132d0493.pdf
+b3b467961ba66264bb73ffe00b1830d7874ae8ce,https://arxiv.org/pdf/1612.04402.pdf
+b3ba7ab6de023a0d58c741d6abfa3eae67227caf,https://arxiv.org/pdf/1707.09468.pdf
+b32cf547a764a4efa475e9c99a72a5db36eeced6,https://pdfs.semanticscholar.org/b32c/f547a764a4efa475e9c99a72a5db36eeced6.pdf
+b3afa234996f44852317af382b98f5f557cab25a,https://arxiv.org/pdf/1711.11248.pdf
+df90850f1c153bfab691b985bfe536a5544e438b,https://pdfs.semanticscholar.org/df90/850f1c153bfab691b985bfe536a5544e438b.pdf
+df577a89830be69c1bfb196e925df3055cafc0ed,https://arxiv.org/pdf/1711.08141.pdf
+df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb,https://arxiv.org/pdf/1704.06693.pdf
+dfd8602820c0e94b624d02f2e10ce6c798193a25,https://arxiv.org/pdf/1805.00597.pdf
+df9269657505fcdc1e10cf45bbb8e325678a40f5,https://pdfs.semanticscholar.org/1b38/1e864fa35cde69d85eada0eb515d274a6b74.pdf
+dfecaedeaf618041a5498cd3f0942c15302e75c3,https://arxiv.org/pdf/1608.01647.pdf
+da4170c862d8ae39861aa193667bfdbdf0ecb363,https://arxiv.org/pdf/1601.00400.pdf
+daefac0610fdeff415c2a3f49b47968d84692e87,https://pdfs.semanticscholar.org/daef/ac0610fdeff415c2a3f49b47968d84692e87.pdf
+b49affdff167f5d170da18de3efa6fd6a50262a2,https://pdfs.semanticscholar.org/b49a/ffdff167f5d170da18de3efa6fd6a50262a2.pdf
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf
+b4d209845e1c67870ef50a7c37abaf3770563f3e,https://arxiv.org/pdf/1807.06980.pdf
+b4ee64022cc3ccd14c7f9d4935c59b16456067d3,https://pdfs.semanticscholar.org/b4ee/64022cc3ccd14c7f9d4935c59b16456067d3.pdf
+b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4,https://pdfs.semanticscholar.org/cbb4/4f0a4b5d76152b90a24a1470cb4cc860587d.pdf
+a285b6edd47f9b8966935878ad4539d270b406d1,https://pdfs.semanticscholar.org/a285/b6edd47f9b8966935878ad4539d270b406d1.pdf
+a2359c0f81a7eb032cff1fe45e3b80007facaa2a,https://arxiv.org/pdf/1712.08714.pdf
+a5f11c132eaab258a7cea2d681875af09cddba65,https://arxiv.org/pdf/1707.02069.pdf
+a5a44a32a91474f00a3cda671a802e87c899fbb4,https://arxiv.org/pdf/1801.03150.pdf
+bd0265ba7f391dc3df9059da3f487f7ef17144df,https://pdfs.semanticscholar.org/bd02/65ba7f391dc3df9059da3f487f7ef17144df.pdf
+bd0e100a91ff179ee5c1d3383c75c85eddc81723,https://arxiv.org/pdf/1706.03038.pdf
+bd379f8e08f88729a9214260e05967f4ca66cd65,https://arxiv.org/pdf/1711.06148.pdf
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,https://arxiv.org/pdf/1808.08803.pdf
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,https://arxiv.org/pdf/1704.08063.pdf
+bd26dabab576adb6af30484183c9c9c8379bf2e0,https://arxiv.org/pdf/1511.02459.pdf
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9,https://arxiv.org/pdf/1806.04845.pdf
+bdbba95e5abc543981fb557f21e3e6551a563b45,https://arxiv.org/pdf/1807.07362.pdf
+bd70f832e133fb87bae82dfaa0ae9d1599e52e4b,https://pdfs.semanticscholar.org/acc6/bd697d46121c95f40b62eff7641ffa8d2318.pdf
+d1dfdc107fa5f2c4820570e369cda10ab1661b87,https://arxiv.org/pdf/1712.00080.pdf
+d1a43737ca8be02d65684cf64ab2331f66947207,https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf
+d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576,https://arxiv.org/pdf/1704.04131.pdf
+d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0,https://pdfs.semanticscholar.org/d69d/f51cff3d6b9b0625acdcbea27cd2bbf4b9c0.pdf
+d69719b42ee53b666e56ed476629a883c59ddf66,https://pdfs.semanticscholar.org/d697/19b42ee53b666e56ed476629a883c59ddf66.pdf
+d69271c7b77bc3a06882884c21aa1b609b3f76cc,https://arxiv.org/pdf/1708.05234.pdf
+d666ce9d783a2d31550a8aa47da45128a67304a7,https://pdfs.semanticscholar.org/c508/532efb1c02dcae0224e9e6894d232a1f4f6b.pdf
+bc6de183cd8b2baeebafeefcf40be88468b04b74,https://pdfs.semanticscholar.org/e057/e713301e089887295543226b79b534fdd145.pdf
+bcf19b964e7d1134d00332cf1acf1ee6184aff00,https://pdfs.semanticscholar.org/bcf1/9b964e7d1134d00332cf1acf1ee6184aff00.pdf
+bc9003ad368cb79d8a8ac2ad025718da5ea36bc4,https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf
+bcc346f4a287d96d124e1163e4447bfc47073cd8,https://arxiv.org/pdf/1707.05395.pdf
+bc27434e376db89fe0e6ef2d2fabc100d2575ec6,https://arxiv.org/pdf/1607.08438.pdf
+bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17,https://pdfs.semanticscholar.org/bc8e/11b8cdf0cfbedde798a53a0318e8d6f67e17.pdf
+bc811a66855aae130ca78cd0016fd820db1603ec,https://pdfs.semanticscholar.org/62ca/3c9b00bf3d9ff319afdee04dfa27ae2e0bdb.pdf
+bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab,https://arxiv.org/pdf/1707.04993.pdf
+ae8d5be3caea59a21221f02ef04d49a86cb80191,https://arxiv.org/pdf/1708.06834.pdf
+ae2cf545565c157813798910401e1da5dc8a6199,https://pdfs.semanticscholar.org/eef4/c6bb430c4792522866fdad40a0ed8e76809f.pdf
+aeaf5dbb3608922246c7cd8a619541ea9e4a7028,https://pdfs.semanticscholar.org/aeaf/5dbb3608922246c7cd8a619541ea9e4a7028.pdf
+ae836e2be4bb784760e43de88a68c97f4f9e44a1,https://pdfs.semanticscholar.org/ae83/6e2be4bb784760e43de88a68c97f4f9e44a1.pdf
+aeff403079022683b233decda556a6aee3225065,https://arxiv.org/pdf/1701.01876.pdf
+ae2c71080b0e17dee4e5a019d87585f2987f0508,https://pdfs.semanticscholar.org/ae2c/71080b0e17dee4e5a019d87585f2987f0508.pdf
+ae5f32e489c4d52e7311b66060c7381d932f4193,https://arxiv.org/pdf/1711.09125.pdf
+d86fabd4498c8feaed80ec342d254fb877fb92f5,https://pdfs.semanticscholar.org/d86f/abd4498c8feaed80ec342d254fb877fb92f5.pdf
+d80a3d1f3a438e02a6685e66ee908446766fefa9,https://arxiv.org/pdf/1708.09687.pdf
+d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d,https://arxiv.org/pdf/1509.00244.pdf
+ab58a7db32683aea9281c188c756ddf969b4cdbd,https://arxiv.org/pdf/1804.06291.pdf
+ab734bac3994b00bf97ce22b9abc881ee8c12918,https://pdfs.semanticscholar.org/ab73/4bac3994b00bf97ce22b9abc881ee8c12918.pdf
+ab989225a55a2ddcd3b60a99672e78e4373c0df1,https://arxiv.org/pdf/1706.05599.pdf
+ab1719f573a6c121d7d7da5053fe5f12de0182e7,https://pdfs.semanticscholar.org/ab17/19f573a6c121d7d7da5053fe5f12de0182e7.pdf
+ab2b09b65fdc91a711e424524e666fc75aae7a51,https://pdfs.semanticscholar.org/ab2b/09b65fdc91a711e424524e666fc75aae7a51.pdf
+abba1bf1348a6f1b70a26aac237338ee66764458,https://arxiv.org/pdf/1808.03457.pdf
+abdd17e411a7bfe043f280abd4e560a04ab6e992,https://arxiv.org/pdf/1803.00839.pdf
+e5e5f31b81ed6526c26d277056b6ab4909a56c6c,https://arxiv.org/pdf/1809.06131.pdf
+e506cdb250eba5e70c5147eb477fbd069714765b,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf,https://pdfs.semanticscholar.org/e572/c42d8ef2e0fadedbaae77c8dfe05c4933fbf.pdf
+e5823a9d3e5e33e119576a34cb8aed497af20eea,https://arxiv.org/pdf/1809.05620.pdf
+e5dfd17dbfc9647ccc7323a5d62f65721b318ba9,https://pdfs.semanticscholar.org/e5df/d17dbfc9647ccc7323a5d62f65721b318ba9.pdf
+e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69,https://arxiv.org/pdf/1806.05226.pdf
+e569f4bd41895028c4c009e5b46b935056188e91,https://pdfs.semanticscholar.org/e569/f4bd41895028c4c009e5b46b935056188e91.pdf
+e5fbffd3449a2bfe0acb4ec339a19f5b88fff783,https://arxiv.org/pdf/1808.06882.pdf
+e5d53a335515107452a30b330352cad216f88fc3,https://pdfs.semanticscholar.org/e5d5/3a335515107452a30b330352cad216f88fc3.pdf
+e22adcd2a6a7544f017ec875ce8f89d5c59e09c8,https://arxiv.org/pdf/1807.11936.pdf
+e293a31260cf20996d12d14b8f29a9d4d99c4642,https://arxiv.org/pdf/1703.01560.pdf
+e20e2db743e8db1ff61279f4fda32bf8cf381f8e,https://arxiv.org/pdf/1801.01486.pdf
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148,https://pdfs.semanticscholar.org/f412/d9d7bc7534e7daafa43f8f5eab811e7e4148.pdf
+f442a2f2749f921849e22f37e0480ac04a3c3fec,https://pdfs.semanticscholar.org/f442/a2f2749f921849e22f37e0480ac04a3c3fec.pdf
+f4f6fc473effb063b7a29aa221c65f64a791d7f4,https://pdfs.semanticscholar.org/48ec/4b2c3b6c6549fa7a988f8db135a41691f605.pdf
+f4d30896c5f808a622824a2d740b3130be50258e,https://arxiv.org/pdf/1705.06148.pdf
+f42dca4a4426e5873a981712102aa961be34539a,https://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,https://arxiv.org/pdf/1704.06327.pdf
+f3a59d85b7458394e3c043d8277aa1ffe3cdac91,https://arxiv.org/pdf/1802.09900.pdf
+f3df296de36b7c114451865778e211350d153727,https://arxiv.org/pdf/1703.06995.pdf
+f3ea181507db292b762aa798da30bc307be95344,https://arxiv.org/pdf/1805.04855.pdf
+f3fed71cc4fc49b02067b71c2df80e83084b2a82,https://arxiv.org/pdf/1804.06216.pdf
+f3cf10c84c4665a0b28734f5233d423a65ef1f23,https://pdfs.semanticscholar.org/203d/7c52e2bd0da104516abbe34cd5aa5cfc8368.pdf
+f3b7938de5f178e25a3cf477107c76286c0ad691,https://arxiv.org/pdf/1807.05511.pdf
+eb100638ed73b82e1cce8475bb8e180cb22a09a2,https://arxiv.org/pdf/1704.06228.pdf
+eb8519cec0d7a781923f68fdca0891713cb81163,https://arxiv.org/pdf/1703.08617.pdf
+eb566490cd1aa9338831de8161c6659984e923fd,https://arxiv.org/pdf/1712.02310.pdf
+eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6,https://pdfs.semanticscholar.org/eb4d/2ec77fae67141f6cf74b3ed773997c2c0cf6.pdf
+ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9,https://arxiv.org/pdf/1411.4324.pdf
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,https://pdfs.semanticscholar.org/a531/bf1b04b794b19e6a563afe077f78a82ecbd6.pdf
+eb027969f9310e0ae941e2adee2d42cdf07d938c,https://arxiv.org/pdf/1710.08092.pdf
+eb48a58b873295d719827e746d51b110f5716d6c,https://arxiv.org/pdf/1706.01820.pdf
+c7c53d75f6e963b403057d8ba5952e4974a779ad,https://pdfs.semanticscholar.org/c7c5/3d75f6e963b403057d8ba5952e4974a779ad.pdf
+c79cf7f61441195404472102114bcf079a72138a,https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf
+c73dd452c20460f40becb1fd8146239c88347d87,https://arxiv.org/pdf/1708.01846.pdf
+c72e6992f44ce75a40f44be4365dc4f264735cfb,https://arxiv.org/pdf/1807.11122.pdf
+c7de0c85432ad17a284b5b97c4f36c23f506d9d1,https://pdfs.semanticscholar.org/a908/f786591a846f9c48e1ee5a134603efd32f9c.pdf
+c71217b2b111a51a31cf1107c71d250348d1ff68,https://arxiv.org/pdf/1703.09912.pdf
+c76f64e87f88475069f7707616ad9df1719a6099,https://arxiv.org/pdf/1803.08094.pdf
+c7f0c0636d27a1d45b8fcef37e545b902195d937,https://arxiv.org/pdf/1709.00966.pdf
+c7c8d150ece08b12e3abdb6224000c07a6ce7d47,https://arxiv.org/pdf/1611.05271.pdf
+c75e6ce54caf17b2780b4b53f8d29086b391e839,https://arxiv.org/pdf/1802.00542.pdf
+c038beaa228aeec174e5bd52460f0de75e9cccbe,https://arxiv.org/pdf/1705.02953.pdf
+c05a7c72e679745deab9c9d7d481f7b5b9b36bdd,https://pdfs.semanticscholar.org/c05a/7c72e679745deab9c9d7d481f7b5b9b36bdd.pdf
+c0c8d720658374cc1ffd6116554a615e846c74b5,https://arxiv.org/pdf/1706.04508.pdf
+c00df53bd46f78ae925c5768d46080159d4ef87d,https://arxiv.org/pdf/1707.08105.pdf
+ee815f60dc4a090fa9fcfba0135f4707af21420d,https://arxiv.org/pdf/1702.02925.pdf
+eed7920682789a9afd0de4efd726cd9a706940c8,https://pdfs.semanticscholar.org/3115/90680f1ae14864df886af20699d2eca7099f.pdf
+ee463f1f72a7e007bae274d2d42cd2e5d817e751,https://pdfs.semanticscholar.org/ee46/3f1f72a7e007bae274d2d42cd2e5d817e751.pdf
+eee06d68497be8bf3a8aba4fde42a13aa090b301,https://arxiv.org/pdf/1806.11191.pdf
+eee2d2ac461f46734c8e674ae14ed87bbc8d45c6,https://arxiv.org/pdf/1704.02112.pdf
+eed93d2e16b55142b3260d268c9e72099c53d5bc,https://arxiv.org/pdf/1801.01262.pdf
+eedfb384a5e42511013b33104f4cd3149432bd9e,https://pdfs.semanticscholar.org/eedf/b384a5e42511013b33104f4cd3149432bd9e.pdf
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b,https://arxiv.org/pdf/1807.10510.pdf
+c9bbd7828437e70cc3e6863b278aa56a7d545150,https://arxiv.org/pdf/1708.02044.pdf
+c98983592777952d1751103b4d397d3ace00852d,https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf
+c9367ed83156d4d682cefc59301b67f5460013e0,https://arxiv.org/pdf/1802.01822.pdf
+fc0f5859a111fb17e6dcf6ba63dd7b751721ca61,https://pdfs.semanticscholar.org/fc0f/5859a111fb17e6dcf6ba63dd7b751721ca61.pdf
+fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac,https://pdfs.semanticscholar.org/fcf9/1995dc4d9b0cee84bda5b5b0ce5b757740ac.pdf
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf
+fdfd57d4721174eba288e501c0c120ad076cdca8,https://arxiv.org/pdf/1704.07129.pdf
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b,https://arxiv.org/pdf/1710.06236.pdf
+fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3,https://arxiv.org/pdf/1712.04109.pdf
+fd15e397629e0241642329fc8ee0b8cd6c6ac807,https://arxiv.org/pdf/1806.01547.pdf
+fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,https://arxiv.org/pdf/1809.01990.pdf
+fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,https://arxiv.org/pdf/1606.02909.pdf
+fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81,https://arxiv.org/pdf/1807.11332.pdf
+fd10b0c771a2620c0db294cfb82b80d65f73900d,https://arxiv.org/pdf/1809.02860.pdf
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e,https://pdfs.semanticscholar.org/fdba/cf2ff0fc21e021c830cdcff7d347f2fddd8e.pdf
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3,https://pdfs.semanticscholar.org/fd89/2e912149e3f5ddd82499e16f9ea0f0063fa3.pdf
+fdf8e293a7618f560e76bd83e3c40a0788104547,https://arxiv.org/pdf/1704.04023.pdf
+fdaf65b314faee97220162980e76dbc8f32db9d6,https://pdfs.semanticscholar.org/fdaf/65b314faee97220162980e76dbc8f32db9d6.pdf
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e,https://arxiv.org/pdf/1708.03958.pdf
+f2a7f9bd040aa8ea87672d38606a84c31163e171,https://arxiv.org/pdf/1608.07876.pdf
+f257300b2b4141aab73f93c146bf94846aef5fa1,https://arxiv.org/pdf/1708.05465.pdf
+f20e0eefd007bc310d2a753ba526d33a8aba812c,https://pdfs.semanticscholar.org/116e/c3a1a8225362a3e3e445df45036fae7cadc6.pdf
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f,https://pdfs.semanticscholar.org/f231/046d5f5d87e2ca5fae88f41e8d74964e8f4f.pdf
+f28b7d62208fdaaa658716403106a2b0b527e763,https://arxiv.org/pdf/1803.08457.pdf
+f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e,https://pdfs.semanticscholar.org/f58d/584c4ac93b4e7620ef6e5a8f20c6f6da295e.pdf
+f5eb0cf9c57716618fab8e24e841f9536057a28a,https://arxiv.org/pdf/1803.02988.pdf
+f571fe3f753765cf695b75b1bd8bed37524a52d2,https://pdfs.semanticscholar.org/8203/70a36ec56f8987fbec5ca2769f996d03d79b.pdf
+f5fae7810a33ed67852ad6a3e0144cb278b24b41,https://pdfs.semanticscholar.org/f5fa/e7810a33ed67852ad6a3e0144cb278b24b41.pdf
+f5770dd225501ff3764f9023f19a76fad28127d4,https://pdfs.semanticscholar.org/f577/0dd225501ff3764f9023f19a76fad28127d4.pdf
+f5eb411217f729ad7ae84bfd4aeb3dedb850206a,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf
+e393a038d520a073b9835df7a3ff104ad610c552,https://pdfs.semanticscholar.org/b6aa/94b81b2165e492cc2900e05dd997619bfe7a.pdf
+e3b324101157daede3b4d16bdc9c2388e849c7d4,https://pdfs.semanticscholar.org/e3b3/24101157daede3b4d16bdc9c2388e849c7d4.pdf
+e3c011d08d04c934197b2a4804c90be55e21d572,https://arxiv.org/pdf/1709.02940.pdf
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa,https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db,https://arxiv.org/pdf/1604.02182.pdf
+e38371b69be4f341baa95bc854584e99b67c6d3a,https://arxiv.org/pdf/1803.07201.pdf
+e3a6e5a573619a97bd6662b652ea7d088ec0b352,https://arxiv.org/pdf/1804.00112.pdf
+cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd,https://arxiv.org/pdf/1805.11195.pdf
+cfffae38fe34e29d47e6deccfd259788176dc213,https://pdfs.semanticscholar.org/cfff/ae38fe34e29d47e6deccfd259788176dc213.pdf
+cfd4004054399f3a5f536df71f9b9987f060f434,https://arxiv.org/pdf/1710.03224.pdf
+cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce,https://arxiv.org/pdf/1807.08512.pdf
+cfa92e17809e8d20ebc73b4e531a1b106d02b38c,https://pdfs.semanticscholar.org/cfa9/2e17809e8d20ebc73b4e531a1b106d02b38c.pdf
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150,https://pdfs.semanticscholar.org/e36f/a8b870fd155f9df898bcc6613f6554eab519.pdf
+cfdc632adcb799dba14af6a8339ca761725abf0a,https://arxiv.org/pdf/1804.01575.pdf
+cfa931e6728a825caada65624ea22b840077f023,https://arxiv.org/pdf/1806.06298.pdf
+cfc30ce53bfc204b8764ebb764a029a8d0ad01f4,https://arxiv.org/pdf/1710.05179.pdf
+cff911786b5ac884bb71788c5bc6acf6bf569eff,https://arxiv.org/pdf/1805.01290.pdf
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29,https://pdfs.semanticscholar.org/cfc4/aa456d9da1a6fabd7c6ca199332f03e35b29.pdf
+cf805d478aeb53520c0ab4fcdc9307d093c21e52,https://pdfs.semanticscholar.org/cf80/5d478aeb53520c0ab4fcdc9307d093c21e52.pdf
+cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf
+cad24ba99c7b6834faf6f5be820dd65f1a755b29,https://arxiv.org/pdf/1807.08254.pdf
+ca37eda56b9ee53610c66951ee7ca66a35d0a846,https://pdfs.semanticscholar.org/ca37/eda56b9ee53610c66951ee7ca66a35d0a846.pdf
+e43045a061421bd79713020bc36d2cf4653c044d,https://arxiv.org/pdf/1703.03492.pdf
+e4d8ba577cabcb67b4e9e1260573aea708574886,https://pdfs.semanticscholar.org/e4d8/ba577cabcb67b4e9e1260573aea708574886.pdf
+e4abc40f79f86dbc06f5af1df314c67681dedc51,https://arxiv.org/pdf/1707.06786.pdf
+fe464b2b54154d231671750053861f5fd14454f5,https://pdfs.semanticscholar.org/fe46/4b2b54154d231671750053861f5fd14454f5.pdf
+fe7c0bafbd9a28087e0169259816fca46db1a837,https://arxiv.org/pdf/1804.00326.pdf
+fe48f0e43dbdeeaf4a03b3837e27f6705783e576,https://arxiv.org/pdf/1607.05477.pdf
+fea83550a21f4b41057b031ac338170bacda8805,https://arxiv.org/pdf/1605.07270.pdf
+fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139,https://pdfs.semanticscholar.org/fe0c/51fd41cb2d5afa1bc1900bbbadb38a0de139.pdf
+c86e6ed734d3aa967deae00df003557b6e937d3d,https://arxiv.org/pdf/1807.03923.pdf
+c87f7ee391d6000aef2eadb49f03fc237f4d1170,https://arxiv.org/pdf/1804.03547.pdf
+c866a2afc871910e3282fd9498dce4ab20f6a332,https://arxiv.org/pdf/1804.09691.pdf
+c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd,https://arxiv.org/pdf/1609.02825.pdf
+c84233f854bbed17c22ba0df6048cbb1dd4d3248,https://pdfs.semanticscholar.org/c842/33f854bbed17c22ba0df6048cbb1dd4d3248.pdf
+c829be73584966e3162f7ccae72d9284a2ebf358,https://pdfs.semanticscholar.org/c829/be73584966e3162f7ccae72d9284a2ebf358.pdf
+c87d5036d3a374c66ec4f5870df47df7176ce8b9,https://pdfs.semanticscholar.org/c87d/5036d3a374c66ec4f5870df47df7176ce8b9.pdf
+c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f,https://arxiv.org/pdf/1705.01842.pdf
+c88c21eb9a8e08b66c981db35f6556f4974d27a8,https://pdfs.semanticscholar.org/c88c/21eb9a8e08b66c981db35f6556f4974d27a8.pdf
+fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1,https://arxiv.org/pdf/1602.01168.pdf
+fb87045600da73b07f0757f345a937b1c8097463,https://pdfs.semanticscholar.org/5c54/2fef80a35a4f930e5c82040b52c58e96ce87.pdf
+fb85867c989b9ee6b7899134136f81d6372526a9,https://arxiv.org/pdf/1808.01424.pdf
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a,https://pdfs.semanticscholar.org/c21b/ccf1ab4bb090fd5fc1109421a1a3979e7106.pdf
+fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59,https://pdfs.semanticscholar.org/fbb2/f81fc00ee0f257d4aa79bbef8cad5000ac59.pdf
+fb9ad920809669c1b1455cc26dbd900d8e719e61,https://pdfs.semanticscholar.org/fb9a/d920809669c1b1455cc26dbd900d8e719e61.pdf
+ed0cf5f577f5030ac68ab62fee1cf065349484cc,https://pdfs.semanticscholar.org/ed0c/f5f577f5030ac68ab62fee1cf065349484cc.pdf
+edde81b2bdd61bd757b71a7b3839b6fef81f4be4,https://arxiv.org/pdf/1507.06332.pdf
+edf98a925bb24e39a6e6094b0db839e780a77b08,https://arxiv.org/pdf/1807.09930.pdf
+ed9d11e995baeec17c5d2847ec1a8d5449254525,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf
+ed07856461da6c7afa4f1782b5b607b45eebe9f6,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf
+ed1886e233c8ecef7f414811a61a83e44c8bbf50,https://arxiv.org/pdf/1706.01789.pdf
+ed388878151a3b841f95a62c42382e634d4ab82e,https://arxiv.org/pdf/1805.07550.pdf
+edff76149ec44f6849d73f019ef9bded534a38c2,https://arxiv.org/pdf/1704.02203.pdf
+ed96f2eb1771f384df2349879970065a87975ca7,https://arxiv.org/pdf/1805.12302.pdf
+c146aa6d56233ce700032f1cb179700778557601,https://arxiv.org/pdf/1708.07199.pdf
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf
+c11eb653746afa8148dc9153780a4584ea529d28,https://arxiv.org/pdf/1809.07764.pdf
+c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee,https://arxiv.org/pdf/1805.05612.pdf
+c1298120e9ab0d3764512cbd38b47cd3ff69327b,https://pdfs.semanticscholar.org/c129/8120e9ab0d3764512cbd38b47cd3ff69327b.pdf
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290,https://pdfs.semanticscholar.org/7f02/61a759807f2ff57102a4e93318916232473f.pdf
+c68ec931585847b37cde9f910f40b2091a662e83,https://pdfs.semanticscholar.org/c68e/c931585847b37cde9f910f40b2091a662e83.pdf
+c6f3399edb73cfba1248aec964630c8d54a9c534,https://arxiv.org/pdf/1809.03336.pdf
+c678920facffd35853c9d185904f4aebcd2d8b49,https://arxiv.org/pdf/1803.11556.pdf
+c6241e6fc94192df2380d178c4c96cf071e7a3ac,https://arxiv.org/pdf/1505.04868.pdf
+c62c07de196e95eaaf614fb150a4fa4ce49588b4,https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf
+ec90d333588421764dff55658a73bbd3ea3016d2,https://pdfs.semanticscholar.org/ec90/d333588421764dff55658a73bbd3ea3016d2.pdf
+ec8ec2dfd73cf3667f33595fef84c95c42125945,https://arxiv.org/pdf/1707.06286.pdf
+ec1e03ec72186224b93b2611ff873656ed4d2f74,https://pdfs.semanticscholar.org/ec1e/03ec72186224b93b2611ff873656ed4d2f74.pdf
+4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f,https://pdfs.semanticscholar.org/4e30/107ee6a2e087f14a7725e7fc5535ec2f5a5f.pdf
+4e32fbb58154e878dd2fd4b06398f85636fd0cf4,https://arxiv.org/pdf/1805.02339.pdf
+4e0636a1b92503469b44e2807f0bb35cc0d97652,https://pdfs.semanticscholar.org/4e06/36a1b92503469b44e2807f0bb35cc0d97652.pdf
+4e27fec1703408d524d6b7ed805cdb6cba6ca132,https://pdfs.semanticscholar.org/7714/a5aa27ab5ad4d06a81fbb3e973d3b1002ac1.pdf
+4e6c9be0b646d60390fe3f72ce5aeb0136222a10,https://arxiv.org/pdf/1604.04494.pdf
+20b994a78cd1db6ba86ea5aab7211574df5940b3,https://arxiv.org/pdf/1805.08417.pdf
+2004afb2276a169cdb1f33b2610c5218a1e47332,https://pdfs.semanticscholar.org/2004/afb2276a169cdb1f33b2610c5218a1e47332.pdf
+20ade100a320cc761c23971d2734388bfe79f7c5,https://pdfs.semanticscholar.org/20ad/e100a320cc761c23971d2734388bfe79f7c5.pdf
+206e24f7d4b3943b35b069ae2d028143fcbd0704,https://arxiv.org/pdf/1803.11405.pdf
+2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b,https://arxiv.org/pdf/1408.2700.pdf
+206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8,https://arxiv.org/pdf/1706.02932.pdf
+202dc3c6fda654aeb39aee3e26a89340fb06802a,https://arxiv.org/pdf/1807.02800.pdf
+20111924fbf616a13d37823cd8712a9c6b458cd6,https://pdfs.semanticscholar.org/2011/1924fbf616a13d37823cd8712a9c6b458cd6.pdf
+20c02e98602f6adf1cebaba075d45cef50de089f,https://arxiv.org/pdf/1808.07507.pdf
+18d51a366ce2b2068e061721f43cb798177b4bb7,https://pdfs.semanticscholar.org/18d5/1a366ce2b2068e061721f43cb798177b4bb7.pdf
+185263189a30986e31566394680d6d16b0089772,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf
+18b9dc55e5221e704f90eea85a81b41dab51f7da,https://arxiv.org/pdf/1803.07179.pdf
+18941b52527e6f15abfdf5b86a0086935706e83b,https://arxiv.org/pdf/1808.09211.pdf
+27a00f2490284bc0705349352d36e9749dde19ab,https://arxiv.org/pdf/1806.05622.pdf
+275b5091c50509cc8861e792e084ce07aa906549,https://pdfs.semanticscholar.org/275b/5091c50509cc8861e792e084ce07aa906549.pdf
+270733d986a1eb72efda847b4b55bc6ba9686df4,https://pdfs.semanticscholar.org/2707/33d986a1eb72efda847b4b55bc6ba9686df4.pdf
+27da432cf2b9129dce256e5bf7f2f18953eef5a5,https://arxiv.org/pdf/1805.11519.pdf
+2717b044ae9933f9ab87f16d6c611352f66b2033,https://arxiv.org/pdf/1804.06964.pdf
+2770b095613d4395045942dc60e6c560e882f887,https://arxiv.org/pdf/1808.06210.pdf
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a,https://arxiv.org/pdf/1807.06708.pdf
+4b48e912a17c79ac95d6a60afed8238c9ab9e553,https://arxiv.org/pdf/1805.06741.pdf
+4bbe460ab1b279a55e3c9d9f488ff79884d01608,https://arxiv.org/pdf/1712.00684.pdf
+11691f1e7c9dbcbd6dfd256ba7ac710581552baa,https://arxiv.org/pdf/1804.04527.pdf
+112780a7fe259dc7aff2170d5beda50b2bfa7bda,https://arxiv.org/pdf/1805.00833.pdf
+1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf
+111d0b588f3abbbea85d50a28c0506f74161e091,https://pdfs.semanticscholar.org/111d/0b588f3abbbea85d50a28c0506f74161e091.pdf
+7d2556d674ad119cf39df1f65aedbe7493970256,https://pdfs.semanticscholar.org/7f01/762f2daf27282197cb84751eb30550417d41.pdf
+7df4f96138a4e23492ea96cf921794fc5287ba72,https://arxiv.org/pdf/1707.08705.pdf
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc,https://arxiv.org/pdf/1804.03675.pdf
+2902f62457fdf7e8e8ee77a9155474107a2f423e,https://arxiv.org/pdf/1803.07973.pdf
+2957715e96a18dbb5ed5c36b92050ec375214aa6,https://arxiv.org/pdf/1712.00193.pdf
+29c340c83b3bbef9c43b0c50b4d571d5ed037cbd,https://pdfs.semanticscholar.org/29c3/40c83b3bbef9c43b0c50b4d571d5ed037cbd.pdf
+7c47da191f935811f269f9ba3c59556c48282e80,https://arxiv.org/pdf/1503.07697.pdf
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,https://arxiv.org/pdf/1610.00291.pdf
+7c17280c9193da3e347416226b8713b99e7825b8,https://arxiv.org/pdf/1805.08162.pdf
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,https://arxiv.org/pdf/1708.06997.pdf
+7c825562b3ff4683ed049a372cb6807abb09af2a,https://pdfs.semanticscholar.org/7c82/5562b3ff4683ed049a372cb6807abb09af2a.pdf
+7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d,https://pdfs.semanticscholar.org/7ca7/255c2e0c86e4adddbbff2ce74f36b1dc522d.pdf
+7c9a65f18f7feb473e993077d087d4806578214e,https://pdfs.semanticscholar.org/7c9a/65f18f7feb473e993077d087d4806578214e.pdf
+7cf579088e0456d04b531da385002825ca6314e2,https://arxiv.org/pdf/1708.04299.pdf
+7c80d91db5977649487388588c0c823080c9f4b4,https://arxiv.org/pdf/1805.02283.pdf
+7c30ea47f5ae1c5abd6981d409740544ed16ed16,https://pdfs.semanticscholar.org/7c30/ea47f5ae1c5abd6981d409740544ed16ed16.pdf
+162403e189d1b8463952fa4f18a291241275c354,https://arxiv.org/pdf/1801.10304.pdf
+16fdd6d842475e6fbe58fc809beabbed95f0642e,https://arxiv.org/pdf/1505.00315.pdf
+16de1324459fe8fdcdca80bba04c3c30bb789bdf,https://arxiv.org/pdf/1712.02765.pdf
+16b9d258547f1eccdb32111c9f45e2e4bbee79af,https://arxiv.org/pdf/1704.06369.pdf
+164b0e2a03a5a402f66c497e6c327edf20f8827b,https://pdfs.semanticscholar.org/164b/0e2a03a5a402f66c497e6c327edf20f8827b.pdf
+166186e551b75c9b5adcc9218f0727b73f5de899,https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf
+16d6737b50f969247339a6860da2109a8664198a,https://pdfs.semanticscholar.org/16d6/737b50f969247339a6860da2109a8664198a.pdf
+4209783b0cab1f22341f0600eed4512155b1dee6,https://arxiv.org/pdf/1806.00365.pdf
+42eda7c20db9dc0f42f72bb997dd191ed8499b10,https://arxiv.org/pdf/1611.09309.pdf
+42ea8a96eea023361721f0ea34264d3d0fc49ebd,https://arxiv.org/pdf/1608.04695.pdf
+89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199,https://pdfs.semanticscholar.org/89c7/3b1e7c9b5e126a26ed5b7caccd7cd30ab199.pdf
+893239f17dc2d17183410d8a98b0440d98fa2679,https://pdfs.semanticscholar.org/d5b1/6481d34838cc92593f5f311badbf7f18ed5a.pdf
+892c911ca68f5b4bad59cde7eeb6c738ec6c4586,https://pdfs.semanticscholar.org/892c/911ca68f5b4bad59cde7eeb6c738ec6c4586.pdf
+8986585975c0090e9ad97bec2ba6c4b437419dae,https://arxiv.org/pdf/1808.04285.pdf
+89d3a57f663976a9ac5e9cdad01267c1fc1a7e06,https://arxiv.org/pdf/1708.09642.pdf
+8981be3a69cd522b4e57e9914bf19f034d4b530c,https://pdfs.semanticscholar.org/8981/be3a69cd522b4e57e9914bf19f034d4b530c.pdf
+891b10c4b3b92ca30c9b93170ec9abd71f6099c4,https://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf
+451b6409565a5ad18ea49b063561a2645fa4281b,https://arxiv.org/pdf/1706.00699.pdf
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec,https://arxiv.org/pdf/1803.11366.pdf
+45e7ddd5248977ba8ec61be111db912a4387d62f,https://arxiv.org/pdf/1711.00253.pdf
+4560491820e0ee49736aea9b81d57c3939a69e12,https://arxiv.org/pdf/1712.04008.pdf
+45e459462a80af03e1bb51a178648c10c4250925,https://arxiv.org/pdf/1606.08998.pdf
+45a6333fc701d14aab19f9e2efd59fe7b0e89fec,https://pdfs.semanticscholar.org/45a6/333fc701d14aab19f9e2efd59fe7b0e89fec.pdf
+450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,https://arxiv.org/pdf/1805.00611.pdf
+1fe1bd6b760e3059fff73d53a57ce3a6079adea1,https://pdfs.semanticscholar.org/1fe1/bd6b760e3059fff73d53a57ce3a6079adea1.pdf
+1ffe20eb32dbc4fa85ac7844178937bba97f4bf0,https://arxiv.org/pdf/1706.05067.pdf
+1fdeba9c4064b449231eac95e610f3288801fd3e,https://arxiv.org/pdf/1710.00925.pdf
+1fff309330f85146134e49e0022ac61ac60506a9,https://arxiv.org/pdf/1701.07569.pdf
+1feeab271621128fe864e4c64bab9b2e2d0ed1f1,https://pdfs.semanticscholar.org/e230/e2e60b1d20a5334f59ca669bbd35f9391d2e.pdf
+73b90573d272887a6d835ace89bfaf717747c59b,https://pdfs.semanticscholar.org/73b9/0573d272887a6d835ace89bfaf717747c59b.pdf
+7323b594d3a8508f809e276aa2d224c4e7ec5a80,https://arxiv.org/pdf/1808.05508.pdf
+73ed64803d6f2c49f01cffef8e6be8fc9b5273b8,https://arxiv.org/pdf/1508.06073.pdf
+7306d42ca158d40436cc5167e651d7ebfa6b89c1,https://arxiv.org/pdf/1511.04458.pdf
+734cdda4a4de2a635404e4c6b61f1b2edb3f501d,https://pdfs.semanticscholar.org/734c/dda4a4de2a635404e4c6b61f1b2edb3f501d.pdf
+73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c,https://arxiv.org/pdf/1705.02193.pdf
+872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,https://arxiv.org/pdf/1703.09507.pdf
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf
+8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f,https://arxiv.org/pdf/1701.03246.pdf
+878169be6e2c87df2d8a1266e9e37de63b524ae7,https://pdfs.semanticscholar.org/8781/69be6e2c87df2d8a1266e9e37de63b524ae7.pdf
+878301453e3d5cb1a1f7828002ea00f59cbeab06,https://arxiv.org/pdf/1701.08393.pdf
+87e592ee1a7e2d34e6b115da08700a1ae02e9355,https://arxiv.org/pdf/1807.10002.pdf
+8006219efb6ab76754616b0e8b7778dcfb46603d,https://pdfs.semanticscholar.org/7f79/e78e52883994a8a843af48922980ae730e65.pdf
+803c92a3f0815dbf97e30c4ee9450fd005586e1a,https://arxiv.org/pdf/1802.09308.pdf
+80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923,https://pdfs.semanticscholar.org/4569/f8e017af1e052b075d8a267116a8b795bd84.pdf
+80097a879fceff2a9a955bf7613b0d3bfa68dc23,https://arxiv.org/pdf/1701.03555.pdf
+74ce7e5e677a4925489897665c152a352c49d0a2,https://arxiv.org/pdf/1805.03356.pdf
+74dbe6e0486e417a108923295c80551b6d759dbe,https://pdfs.semanticscholar.org/ab95/1e780a7e8e28866b44c6a1a591ec470904b4.pdf
+747c25bff37b96def96dc039cc13f8a7f42dbbc7,https://arxiv.org/pdf/1503.01800.pdf
+744fa8062d0ae1a11b79592f0cd3fef133807a03,https://pdfs.semanticscholar.org/b5fd/440edd27702c8dbfa38fac0bf23deacf33cb.pdf
+749d605dd12a4af58de1fae6f5ef5e65eb06540e,https://arxiv.org/pdf/1704.07489.pdf
+74c19438c78a136677a7cb9004c53684a4ae56ff,https://pdfs.semanticscholar.org/74c1/9438c78a136677a7cb9004c53684a4ae56ff.pdf
+1a849b694f2d68c3536ed849ed78c82e979d64d5,https://pdfs.semanticscholar.org/318c/a222a7a4dfc63807c6b6c4285cc63c8610ba.pdf
+281486d172cf0c78d348ce7d977a82ff763efccd,https://arxiv.org/pdf/1708.03911.pdf
+288964068cd87d97a98b8bc927d6e0d2349458a2,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf
+28d4e027c7e90b51b7d8908fce68128d1964668a,https://arxiv.org/pdf/1705.00393.pdf
+2866cbeb25551257683cf28f33d829932be651fe,https://arxiv.org/pdf/1809.04621.pdf
+178a82e3a0541fa75c6a11350be5bded133a59fd,https://pdfs.semanticscholar.org/178a/82e3a0541fa75c6a11350be5bded133a59fd.pdf
+17479e015a2dcf15d40190e06419a135b66da4e0,https://arxiv.org/pdf/1610.08119.pdf
+17a995680482183f3463d2e01dd4c113ebb31608,https://arxiv.org/pdf/1802.06459.pdf
+17c0d99171efc957b88c31a465c59485ab033234,https://arxiv.org/pdf/1807.11458.pdf
+17a8d1b1b4c23a630b051f35e47663fc04dcf043,https://arxiv.org/pdf/1612.02372.pdf
+173657da03e3249f4e47457d360ab83b3cefbe63,https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf
+7bbaa09c9e318da4370a83b126bcdb214e7f8428,https://pdfs.semanticscholar.org/7bba/a09c9e318da4370a83b126bcdb214e7f8428.pdf
+7b0f1fc93fb24630eb598330e13f7b839fb46cce,https://arxiv.org/pdf/1805.04771.pdf
+8fe38962c24300129391f6d7ac24d7783e0fddd0,https://arxiv.org/pdf/1801.01967.pdf
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7,https://pdfs.semanticscholar.org/f1db/7f2e05e9c955cd59ac3d9040ab9b406c0b66.pdf
+8f772d9ce324b2ef5857d6e0b2a420bc93961196,https://arxiv.org/pdf/1805.01760.pdf
+8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a,https://pdfs.semanticscholar.org/8fda/2f6b85c7e34d3e23927e501a4b4f7fc15b2a.pdf
+8fed5ea3b69ea441a8b02f61473eafee25fb2374,https://pdfs.semanticscholar.org/8fed/5ea3b69ea441a8b02f61473eafee25fb2374.pdf
+8f3da45ff0c3e1777c3a7830f79c10f5896bcc21,https://pdfs.semanticscholar.org/8f3d/a45ff0c3e1777c3a7830f79c10f5896bcc21.pdf
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf
+8f9c37f351a91ed416baa8b6cdb4022b231b9085,https://pdfs.semanticscholar.org/8f9c/37f351a91ed416baa8b6cdb4022b231b9085.pdf
+8acdc4be8274e5d189fb67b841c25debf5223840,https://pdfs.semanticscholar.org/8acd/c4be8274e5d189fb67b841c25debf5223840.pdf
+8a8861ad6caedc3993e31d46e7de6c251a8cda22,https://arxiv.org/pdf/1706.01869.pdf
+8a866bc0d925dfd8bb10769b8b87d7d0ff01774d,https://pdfs.semanticscholar.org/34b7/2d4fb60b36bbf34ff3b1ce3045ba303ab643.pdf
+8a0159919ee4e1a9f4cbfb652a1be212bf0554fd,https://pdfs.semanticscholar.org/8a01/59919ee4e1a9f4cbfb652a1be212bf0554fd.pdf
+7e600faee0ba11467d3f7aed57258b0db0448a72,https://pdfs.semanticscholar.org/0f09/4a0cef9f81da0e4915e6ed45f73aef6d6976.pdf
+7ed3b79248d92b255450c7becd32b9e5c834a31e,https://pdfs.semanticscholar.org/7ed3/b79248d92b255450c7becd32b9e5c834a31e.pdf
+7eaa97be59019f0d36aa7dac27407b004cad5e93,https://arxiv.org/pdf/1609.04468.pdf
+7eb895e7de883d113b75eda54389460c61d63f67,https://arxiv.org/pdf/1709.02993.pdf
+7e467e686f9468b826133275484e0a1ec0f5bde6,https://arxiv.org/pdf/1407.4764.pdf
+7ef0cc4f3f7566f96f168123bac1e07053a939b2,https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf
+7e2cfbfd43045fbd6aabd9a45090a5716fc4e179,https://arxiv.org/pdf/1808.00435.pdf
+7ebb153704706e457ab57b432793d2b6e5d12592,https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922,https://pdfs.semanticscholar.org/7ec7/163ec1bc237c4c2f2841c386f2dbfd0cc922.pdf
+7ef44b7c2b5533d00001ae81f9293bdb592f1146,https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf
+10af69f11301679b6fbb23855bf10f6af1f3d2e6,https://arxiv.org/pdf/1411.6660.pdf
+101569eeef2cecc576578bd6500f1c2dcc0274e2,https://arxiv.org/pdf/1805.12317.pdf
+101d4cfbd6f8a7a10bd33505e2b183183f1d8770,https://pdfs.semanticscholar.org/d2d7/3d4a60ff9a4bb9544d05796637cb6a419e6a.pdf
+106092fafb53e36077eba88f06feecd07b9e78e7,https://arxiv.org/pdf/1711.06330.pdf
+103c8eaca2a2176babab2cc6e9b25d48870d6928,https://pdfs.semanticscholar.org/14ad/c9c2b776c751d254f9c924fcb7578563f8b8.pdf
+1921795408345751791b44b379f51b7dd54ebfa2,https://arxiv.org/pdf/1807.07872.pdf
+1910f5f7ac81d4fcc30284e88dee3537887acdf3,https://pdfs.semanticscholar.org/1910/f5f7ac81d4fcc30284e88dee3537887acdf3.pdf
+197c64c36e8a9d624a05ee98b740d87f94b4040c,https://arxiv.org/pdf/1804.04421.pdf
+4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc,https://arxiv.org/pdf/1807.09192.pdf
+4ccf64fc1c9ca71d6aefdf912caf8fea048fb211,https://arxiv.org/pdf/1804.08572.pdf
+4cdb6144d56098b819076a8572a664a2c2d27f72,https://arxiv.org/pdf/1806.01196.pdf
+4c4e49033737467e28aa2bb32f6c21000deda2ef,https://arxiv.org/pdf/1709.01591.pdf
+26a44feb7a64db7986473ca801c251aa88748477,https://arxiv.org/pdf/1804.02744.pdf
+264f7ab36ff2e23a1514577a6404229d7fe1242b,https://pdfs.semanticscholar.org/264f/7ab36ff2e23a1514577a6404229d7fe1242b.pdf
+266766818dbc5a4ca1161ae2bc14c9e269ddc490,https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf
+26e570049aaedcfa420fc8c7b761bc70a195657c,https://pdfs.semanticscholar.org/26e5/70049aaedcfa420fc8c7b761bc70a195657c.pdf
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44,https://pdfs.semanticscholar.org/2135/a3d9f4b8f5771fa5fc7c7794abf8c2840c44.pdf
+212608e00fc1e8912ff845ee7a4a67f88ba938fc,https://arxiv.org/pdf/1704.02450.pdf
+4db9e5f19366fe5d6a98ca43c1d113dac823a14d,https://pdfs.semanticscholar.org/a55d/ea7981ea0f90d1110005b5f5ca68a3175910.pdf
+4db0968270f4e7b3fa73e41c50d13d48e20687be,https://arxiv.org/pdf/1705.06394.pdf
+4d6ad0c7b3cf74adb0507dc886993e603c863e8c,https://pdfs.semanticscholar.org/4d6a/d0c7b3cf74adb0507dc886993e603c863e8c.pdf
+4dca3d6341e1d991c902492952e726dc2a443d1c,https://arxiv.org/pdf/1805.09298.pdf
+4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f,https://arxiv.org/pdf/1709.03196.pdf
+75879ab7a77318bbe506cb9df309d99205862f6c,https://pdfs.semanticscholar.org/7587/9ab7a77318bbe506cb9df309d99205862f6c.pdf
+7574f999d2325803f88c4915ba8f304cccc232d1,https://arxiv.org/pdf/1705.04396.pdf
+75308067ddd3c53721430d7984295838c81d4106,https://pdfs.semanticscholar.org/7530/8067ddd3c53721430d7984295838c81d4106.pdf
+759cf57215fcfdd8f59c97d14e7f3f62fafa2b30,https://arxiv.org/pdf/1706.09498.pdf
+758d7e1be64cc668c59ef33ba8882c8597406e53,https://arxiv.org/pdf/1708.03985.pdf
+754f7f3e9a44506b814bf9dc06e44fecde599878,https://arxiv.org/pdf/1808.02194.pdf
+75249ebb85b74e8932496272f38af274fbcfd696,https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf
+81a142c751bf0b23315fb6717bc467aa4fdfbc92,https://pdfs.semanticscholar.org/81a1/42c751bf0b23315fb6717bc467aa4fdfbc92.pdf
+8199803f476c12c7f6c0124d55d156b5d91314b6,https://arxiv.org/pdf/1707.06642.pdf
+81706277ed180a92d2eeb94ac0560f7dc591ee13,https://pdfs.semanticscholar.org/8170/6277ed180a92d2eeb94ac0560f7dc591ee13.pdf
+8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c,https://arxiv.org/pdf/1808.00171.pdf
+814d091c973ff6033a83d4e44ab3b6a88cc1cb66,https://pdfs.semanticscholar.org/814d/091c973ff6033a83d4e44ab3b6a88cc1cb66.pdf
+86f191616423efab8c0d352d986126a964983219,https://arxiv.org/pdf/1712.01393.pdf
+869a2fbe42d3fdf40ed8b768edbf54137be7ac71,https://pdfs.semanticscholar.org/915d/4a7202060d77c46e99121c1c8ca875898a11.pdf
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf
+86d0127e1fd04c3d8ea78401c838af621647dc95,https://arxiv.org/pdf/1804.02810.pdf
+86f3552b822f6af56cb5079cc31616b4035ccc4e,https://arxiv.org/pdf/1604.07547.pdf
+860588fafcc80c823e66429fadd7e816721da42a,https://arxiv.org/pdf/1804.04412.pdf
+86374bb8d309ad4dbde65c21c6fda6586ae4147a,https://arxiv.org/pdf/1712.09184.pdf
+869583b700ecf33a9987447aee9444abfe23f343,https://arxiv.org/pdf/1702.01005.pdf
+721b109970bf5f1862767a1bec3f9a79e815f79a,https://pdfs.semanticscholar.org/721b/109970bf5f1862767a1bec3f9a79e815f79a.pdf
+72591a75469321074b072daff80477d8911c3af3,https://arxiv.org/pdf/1212.3913.pdf
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,https://arxiv.org/pdf/1504.02351.pdf
+72cbbdee4f6eeee8b7dd22cea6092c532271009f,https://arxiv.org/pdf/1709.05188.pdf
+721d9c387ed382988fce6fa864446fed5fb23173,https://pdfs.semanticscholar.org/721d/9c387ed382988fce6fa864446fed5fb23173.pdf
+725c3605c2d26d113637097358cd4c08c19ff9e1,https://arxiv.org/pdf/1807.00504.pdf
+44b1399e8569a29eed0d22d88767b1891dbcf987,https://pdfs.semanticscholar.org/44b1/399e8569a29eed0d22d88767b1891dbcf987.pdf
+446dc1413e1cfaee0030dc74a3cee49a47386355,https://arxiv.org/pdf/1710.04837.pdf
+44d23df380af207f5ac5b41459c722c87283e1eb,https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf
+2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c,https://arxiv.org/pdf/1708.05340.pdf
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83,https://arxiv.org/pdf/1805.01515.pdf
+2ab034e1f54c37bfc8ae93f7320160748310dc73,https://arxiv.org/pdf/1805.07242.pdf
+2ffcd35d9b8867a42be23978079f5f24be8d3e35,https://pdfs.semanticscholar.org/2ffc/d35d9b8867a42be23978079f5f24be8d3e35.pdf
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a,https://arxiv.org/pdf/1711.09618.pdf
+2f88d3189723669f957d83ad542ac5c2341c37a5,https://pdfs.semanticscholar.org/2f88/d3189723669f957d83ad542ac5c2341c37a5.pdf
+2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd,https://pdfs.semanticscholar.org/2f17/f6c460e02bd105dcbf14c9b73f34c5fb59bd.pdf
+2fea258320c50f36408032c05c54ba455d575809,https://arxiv.org/pdf/1603.08199.pdf
+438c4b320b9a94a939af21061b4502f4a86960e3,https://arxiv.org/pdf/1702.03041.pdf
+43e268c118ac25f1f0e984b57bc54f0119ded520,https://arxiv.org/pdf/1410.4828.pdf
+432d8cba544bf7b09b0455561fea098177a85db1,https://arxiv.org/pdf/1606.02185.pdf
+43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a,https://pdfs.semanticscholar.org/4e42/182d40e0ea82bcab2289ae7c8b191dc834f1.pdf
+88e090ffc1f75eed720b5afb167523eb2e316f7f,https://pdfs.semanticscholar.org/88e0/90ffc1f75eed720b5afb167523eb2e316f7f.pdf
+88a898592b4c1dfd707f04f09ca58ec769a257de,https://arxiv.org/pdf/1809.08809.pdf
+8818b12aa0ff3bf0b20f9caa250395cbea0e8769,https://pdfs.semanticscholar.org/8818/b12aa0ff3bf0b20f9caa250395cbea0e8769.pdf
+8895d6ae9f095a8413f663cc83f5b7634b3dc805,https://pdfs.semanticscholar.org/8895/d6ae9f095a8413f663cc83f5b7634b3dc805.pdf
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,https://pdfs.semanticscholar.org/9fdf/e1695adac2380f99d3d5cb6879f0ac7f2bfd.pdf
+6b333b2c6311e36c2bde920ab5813f8cfcf2b67b,https://pdfs.semanticscholar.org/d330/64c32527a2690bd1b430b1d9f90a2a609a13.pdf
+6b3e360b80268fda4e37ff39b7f303e3684e8719,https://pdfs.semanticscholar.org/75d2/de5858dd45aca9c5db8af6f44617f521cb77.pdf
+6b8d0569fffce5cc221560d459d6aa10c4db2f03,https://arxiv.org/pdf/1806.02479.pdf
+6b6ff9d55e1df06f8b3e6f257e23557a73b2df96,https://pdfs.semanticscholar.org/6b6f/f9d55e1df06f8b3e6f257e23557a73b2df96.pdf
+07377c375ac76a34331c660fe87ebd7f9b3d74c4,https://arxiv.org/pdf/1808.01338.pdf
+07c83f544d0604e6bab5d741b0bf9a3621d133da,https://arxiv.org/pdf/1708.07632.pdf
+07fa153b8e6196ee6ef6efd8b743de8485a07453,https://pdfs.semanticscholar.org/07fa/153b8e6196ee6ef6efd8b743de8485a07453.pdf
+0750a816858b601c0dbf4cfb68066ae7e788f05d,https://arxiv.org/pdf/1801.09414.pdf
+3803b91e784922a2dacd6a18f61b3100629df932,https://arxiv.org/pdf/1709.07200.pdf
+38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7,https://arxiv.org/pdf/1807.00451.pdf
+38c901a58244be9a2644d486f9a1284dc0edbf8a,https://arxiv.org/pdf/1607.06408.pdf
+38f1fac3ed0fd054e009515e7bbc72cdd4cf801a,https://arxiv.org/pdf/1806.08246.pdf
+380d5138cadccc9b5b91c707ba0a9220b0f39271,https://arxiv.org/pdf/1806.00194.pdf
+00fb2836068042c19b5197d0999e8e93b920eb9c,https://pdfs.semanticscholar.org/00fb/2836068042c19b5197d0999e8e93b920eb9c.pdf
+0004f72a00096fa410b179ad12aa3a0d10fc853c,https://pdfs.semanticscholar.org/0004/f72a00096fa410b179ad12aa3a0d10fc853c.pdf
+6e91be2ad74cf7c5969314b2327b513532b1be09,https://arxiv.org/pdf/1412.2404.pdf
+6e8a81d452a91f5231443ac83e4c0a0db4579974,https://pdfs.semanticscholar.org/3f64/a5b26a8297d4b832bc5bb95264cdfabde105.pdf
+6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f,https://arxiv.org/pdf/1801.04134.pdf
+6e911227e893d0eecb363015754824bf4366bdb7,https://arxiv.org/pdf/1712.01026.pdf
+6e00a406edb508312108f683effe6d3c1db020fb,https://arxiv.org/pdf/1803.06340.pdf
+9ac43a98fe6fde668afb4fcc115e4ee353a6732d,https://arxiv.org/pdf/1804.07362.pdf
+9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb,https://arxiv.org/pdf/1711.11585.pdf
+9af9a88c60d9e4b53e759823c439fc590a4b5bc5,https://arxiv.org/pdf/1708.00277.pdf
+9aad8e52aff12bd822f0011e6ef85dfc22fe8466,https://arxiv.org/pdf/1809.03669.pdf
+36939e6a365e9db904d81325212177c9e9e76c54,https://pdfs.semanticscholar.org/941b/5492e6ac98355fd7bc7531f846d638e814ac.pdf
+3646b42511a6a0df5470408bc9a7a69bb3c5d742,https://pdfs.semanticscholar.org/2a6a/8d8ed0f980cc3b20d743f43c9e36dec3150e.pdf
+3674f3597bbca3ce05e4423611d871d09882043b,https://pdfs.semanticscholar.org/3674/f3597bbca3ce05e4423611d871d09882043b.pdf
+362bfeb28adac5f45b6ef46c07c59744b4ed6a52,https://arxiv.org/pdf/1808.01727.pdf
+368e99f669ea5fd395b3193cd75b301a76150f9d,https://arxiv.org/pdf/1506.01342.pdf
+3619a9b46ad4779d0a63b20f7a6a8d3d49530339,https://pdfs.semanticscholar.org/3619/a9b46ad4779d0a63b20f7a6a8d3d49530339.pdf
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be,https://pdfs.semanticscholar.org/399e/d1c6b72c765c2c8ec6437c9ef7a1866d0f29.pdf
+5cbe1445d683d605b31377881ac8540e1d17adf0,https://arxiv.org/pdf/1509.06161.pdf
+5c493c42bfd93e4d08517438983e3af65e023a87,https://pdfs.semanticscholar.org/5c49/3c42bfd93e4d08517438983e3af65e023a87.pdf
+5c35ac04260e281141b3aaa7bbb147032c887f0c,https://pdfs.semanticscholar.org/5c35/ac04260e281141b3aaa7bbb147032c887f0c.pdf
+5c4d4fd37e8c80ae95c00973531f34a6d810ea3a,https://arxiv.org/pdf/1603.09439.pdf
+09137e3c267a3414314d1e7e4b0e3a4cae801f45,https://arxiv.org/pdf/1711.06078.pdf
+09926ed62511c340f4540b5bc53cf2480e8063f8,https://pdfs.semanticscholar.org/0992/6ed62511c340f4540b5bc53cf2480e8063f8.pdf
+0951f42abbf649bb564a21d4ff5dddf9a5ea54d9,https://arxiv.org/pdf/1806.02023.pdf
+097340d3ac939ce181c829afb6b6faff946cdce0,https://arxiv.org/pdf/1805.11119.pdf
+09507f1f1253101d04a975fc5600952eac868602,https://arxiv.org/pdf/1807.10037.pdf
+09df62fd17d3d833ea6b5a52a232fc052d4da3f5,https://pdfs.semanticscholar.org/5baf/412bc25d131c2da702a6d3b972de7212c50b.pdf
+5db4fe0ce9e9227042144758cf6c4c2de2042435,https://pdfs.semanticscholar.org/5db4/fe0ce9e9227042144758cf6c4c2de2042435.pdf
+5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e,https://pdfs.semanticscholar.org/0946/ce4615f74c4666878757a5eb89494a1f208b.pdf
+318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a,https://arxiv.org/pdf/1601.04805.pdf
+313d5eba97fe064bdc1f00b7587a4b3543ef712a,https://pdfs.semanticscholar.org/313d/5eba97fe064bdc1f00b7587a4b3543ef712a.pdf
+31ea88f29e7f01a9801648d808f90862e066f9ea,https://arxiv.org/pdf/1605.06391.pdf
+3176ee88d1bb137d0b561ee63edf10876f805cf0,https://arxiv.org/pdf/1511.07356.pdf
+312b2566e315dd6e65bd42cfcbe4d919159de8a1,https://pdfs.semanticscholar.org/312b/2566e315dd6e65bd42cfcbe4d919159de8a1.pdf
+91495c689e6e614247495c3f322d400d8098de43,https://pdfs.semanticscholar.org/9149/5c689e6e614247495c3f322d400d8098de43.pdf
+917bea27af1846b649e2bced624e8df1d9b79d6f,https://arxiv.org/pdf/1805.00361.pdf
+911bef7465665d8b194b6b0370b2b2389dfda1a1,https://arxiv.org/pdf/1806.05666.pdf
+91ead35d1d2ff2ea7cf35d15b14996471404f68d,https://arxiv.org/pdf/1702.01325.pdf
+9131c990fad219726eb38384976868b968ee9d9c,https://arxiv.org/pdf/1804.08348.pdf
+911505a4242da555c6828509d1b47ba7854abb7a,https://pdfs.semanticscholar.org/9115/05a4242da555c6828509d1b47ba7854abb7a.pdf
+656531036cee6b2c2c71954bb6540ef6b2e016d0,https://arxiv.org/pdf/1511.04601.pdf
+65b1209d38c259fe9ca17b537f3fb4d1857580ae,https://arxiv.org/pdf/1805.08672.pdf
+656f05741c402ba43bb1b9a58bcc5f7ce2403d9a,https://pdfs.semanticscholar.org/656f/05741c402ba43bb1b9a58bcc5f7ce2403d9a.pdf
+653d19e64bd75648cdb149f755d59e583b8367e3,https://arxiv.org/pdf/1706.02613.pdf
+65babb10e727382b31ca5479b452ee725917c739,https://arxiv.org/pdf/1408.6027.pdf
+62dccab9ab715f33761a5315746ed02e48eed2a0,https://arxiv.org/pdf/1808.01340.pdf
+620339aef06aed07a78f9ed1a057a25433faa58b,https://arxiv.org/pdf/1806.11230.pdf
+62b3598b401c807288a113796f424612cc5833ca,https://arxiv.org/pdf/1807.10550.pdf
+628a3f027b7646f398c68a680add48c7969ab1d9,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf
+626913b8fcbbaee8932997d6c4a78fe1ce646127,https://arxiv.org/pdf/1711.05942.pdf
+626859fe8cafd25da13b19d44d8d9eb6f0918647,https://arxiv.org/pdf/1708.06637.pdf
+62fd622b3ca97eb5577fd423fb9efde9a849cbef,https://arxiv.org/pdf/1809.02169.pdf
+62007c30f148334fb4d8975f80afe76e5aef8c7f,https://arxiv.org/pdf/1712.03999.pdf
+96f0e7416994035c91f4e0dfa40fd45090debfc5,https://arxiv.org/pdf/1803.01260.pdf
+963d0d40de8780161b70d28d2b125b5222e75596,https://arxiv.org/pdf/1611.08657.pdf
+96a9ca7a8366ae0efe6b58a515d15b44776faf6e,https://arxiv.org/pdf/1609.00129.pdf
+96e1ccfe96566e3c96d7b86e134fa698c01f2289,https://arxiv.org/pdf/1712.00321.pdf
+9627f28ea5f4c389350572b15968386d7ce3fe49,https://arxiv.org/pdf/1802.07447.pdf
+96b1000031c53cd4c1c154013bb722ffd87fa7da,https://arxiv.org/pdf/1710.08518.pdf
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,https://arxiv.org/pdf/1611.05396.pdf
+3a27d164e931c422d16481916a2fa6401b74bcef,https://arxiv.org/pdf/1709.03654.pdf
+3a3f75e0ffdc0eef07c42b470593827fcd4020b4,https://arxiv.org/pdf/1805.05269.pdf
+3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2,https://arxiv.org/pdf/1710.03804.pdf
+3a9681e2e07be7b40b59c32a49a6ff4c40c962a2,https://pdfs.semanticscholar.org/1c95/1714996c573b00e63878acdc48cdc4ddc183.pdf
+54948ee407b5d32da4b2eee377cc44f20c3a7e0c,https://arxiv.org/pdf/1806.06296.pdf
+540b39ba1b8ef06293ed793f130e0483e777e278,https://pdfs.semanticscholar.org/540b/39ba1b8ef06293ed793f130e0483e777e278.pdf
+54969bcd728b0f2d3285866c86ef0b4797c2a74d,https://arxiv.org/pdf/1804.09869.pdf
+54a9ed950458f4b7e348fa78a718657c8d3d0e05,https://arxiv.org/pdf/1807.04001.pdf
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7,https://pdfs.semanticscholar.org/54ce/3ff2ab6e4465c2f94eb4d636183fa7878ab7.pdf
+54204e28af73c7aca073835a14afcc5d8f52a515,https://arxiv.org/pdf/1805.12185.pdf
+9853136dbd7d5f6a9c57dc66060cab44a86cd662,https://pdfs.semanticscholar.org/f3fb/f05026afb46b0186f6abbcbbcc08887f1be5.pdf
+9865fe20df8fe11717d92b5ea63469f59cf1635a,https://arxiv.org/pdf/1805.07566.pdf
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320,https://arxiv.org/pdf/1807.09532.pdf
+533d14e539ae5cdca0ece392487a2b19106d468a,https://arxiv.org/pdf/1611.09053.pdf
+53dd25350d3b3aaf19beb2104f1e389e3442df61,https://pdfs.semanticscholar.org/a2ee/e3191d860c854936d11365d4745224d89b53.pdf
+530243b61fa5aea19b454b7dbcac9f463ed0460e,https://arxiv.org/pdf/1807.11079.pdf
+53c36186bf0ffbe2f39165a1824c965c6394fe0d,https://arxiv.org/pdf/1805.00326.pdf
+53a41c711b40e7fe3dc2b12e0790933d9c99a6e0,https://arxiv.org/pdf/1611.06492.pdf
+53bfe2ab770e74d064303f3bd2867e5bf7b86379,https://pdfs.semanticscholar.org/d989/c3064d49bf8e63587ada4ed2bdb0d32b120a.pdf
+53ce84598052308b86ba79d873082853022aa7e9,https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf
+3f9ca2526013e358cd8caeb66a3d7161f5507cbc,https://arxiv.org/pdf/1607.01059.pdf
+3fb98e76ffd8ba79e1c22eda4d640da0c037e98a,https://pdfs.semanticscholar.org/b49a/a569ff63d045b7c0ce66d77e1345d4f9745c.pdf
+3fb4bf38d34f7f7e5b3df36de2413d34da3e174a,https://arxiv.org/pdf/1807.09882.pdf
+3f9a7d690db82cf5c3940fbb06b827ced59ec01e,https://arxiv.org/pdf/1502.05678.pdf
+3f5e8f884e71310d7d5571bd98e5a049b8175075,https://pdfs.semanticscholar.org/3f5e/8f884e71310d7d5571bd98e5a049b8175075.pdf
+3f5693584d7dab13ffc12122d6ddbf862783028b,https://arxiv.org/pdf/1804.04082.pdf
+30b15cdb72760f20f80e04157b57be9029d8a1ab,https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf
+30870ef75aa57e41f54310283c0057451c8c822b,https://arxiv.org/pdf/1801.01423.pdf
+305346d01298edeb5c6dc8b55679e8f60ba97efb,https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf
+30fd1363fa14965e3ab48a7d6235e4b3516c1da1,https://pdfs.semanticscholar.org/6bc2/07bab6a2b4ec335023474b391c9cb23e2e6d.pdf
+30cbd41e997445745b6edd31f2ebcc7533453b61,https://pdfs.semanticscholar.org/1a50/4cdd40877e3d74ed87666c8c540bb1643c79.pdf
+5e6f546a50ed97658be9310d5e0a67891fe8a102,https://arxiv.org/pdf/1711.09577.pdf
+5e7e055ef9ba6e8566a400a8b1c6d8f827099553,https://pdfs.semanticscholar.org/5e7e/055ef9ba6e8566a400a8b1c6d8f827099553.pdf
+5ba7882700718e996d576b58528f1838e5559225,https://pdfs.semanticscholar.org/5ba7/882700718e996d576b58528f1838e5559225.pdf
+5b0008ba87667085912ea474025d2323a14bfc90,https://pdfs.semanticscholar.org/5b00/08ba87667085912ea474025d2323a14bfc90.pdf
+5b97e997b9b654373bd129b3baf5b82c2def13d1,https://pdfs.semanticscholar.org/5b97/e997b9b654373bd129b3baf5b82c2def13d1.pdf
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14,https://pdfs.semanticscholar.org/5bd3/d08335bb4e444a86200c5e9f57fd9d719e14.pdf
+5babbad3daac5c26503088782fd5b62067b94fa5,https://arxiv.org/pdf/1809.02652.pdf
+5b2cfee6e81ef36507ebf3c305e84e9e0473575a,https://arxiv.org/pdf/1704.02402.pdf
+5b721f86f4a394f05350641e639a9d6cb2046c45,https://arxiv.org/pdf/1603.09638.pdf
+5b4b84ce3518c8a14f57f5f95a1d07fb60e58223,https://pdfs.semanticscholar.org/9f92/05a60ddf1135929e0747db34363b3a8c6bc8.pdf
+372fb32569ced35eaf3740a29890bec2be1869fa,https://pdfs.semanticscholar.org/372f/b32569ced35eaf3740a29890bec2be1869fa.pdf
+3795974e24296185d9b64454cde6f796ca235387,https://arxiv.org/pdf/1806.05252.pdf
+377f2b65e6a9300448bdccf678cde59449ecd337,https://arxiv.org/pdf/1804.10275.pdf
+370b6b83c7512419188f5373a962dd3175a56a9b,https://pdfs.semanticscholar.org/370b/6b83c7512419188f5373a962dd3175a56a9b.pdf
+372a8bf0ef757c08551d41e40cb7a485527b6cd7,https://pdfs.semanticscholar.org/2dcf/a8d72fee8732350935718ab86f3d9f3458cb.pdf
+08f4832507259ded9700de81f5fd462caf0d5be8,https://pdfs.semanticscholar.org/ad40/d61bf27e177d078df12727267f3190eee2b0.pdf
+08903bf161a1e8dec29250a752ce9e2a508a711c,https://pdfs.semanticscholar.org/e7f6/bfb9bb591eb1404ae13f0fa13ad4a3179150.pdf
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf
+0857281a3b6a5faba1405e2c11f4e17191d3824d,https://pdfs.semanticscholar.org/0857/281a3b6a5faba1405e2c11f4e17191d3824d.pdf
+08d41d2f68a2bf0091dc373573ca379de9b16385,https://arxiv.org/pdf/1802.05023.pdf
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d,https://arxiv.org/pdf/1607.00659.pdf
+6dbdb07ce2991db0f64c785ad31196dfd4dae721,https://arxiv.org/pdf/1802.09058.pdf
+6dddf1440617bf7acda40d4d75c7fb4bf9517dbb,https://arxiv.org/pdf/1705.10118.pdf
+6d07e176c754ac42773690d4b4919a39df85d7ec,https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf
+6d8c9a1759e7204eacb4eeb06567ad0ef4229f93,https://arxiv.org/pdf/1707.05938.pdf
+6dc1f94b852538d572e4919238ddb10e2ee449a4,https://arxiv.org/pdf/1703.09529.pdf
+6d5125c9407c7762620eeea7570af1a8ee7d76f3,https://arxiv.org/pdf/1807.01462.pdf
+01c4cf9c7c08f0ad3f386d88725da564f3c54679,https://pdfs.semanticscholar.org/01c4/cf9c7c08f0ad3f386d88725da564f3c54679.pdf
+014e3d0fa5248e6f4634dc237e2398160294edce,https://arxiv.org/pdf/1708.06703.pdf
+06262d6beeccf2784e4e36a995d5ee2ff73c8d11,https://pdfs.semanticscholar.org/0626/2d6beeccf2784e4e36a995d5ee2ff73c8d11.pdf
+06f585a3a05dd3371cd600a40dc35500e2f82f9b,https://arxiv.org/pdf/1804.10069.pdf
+06560d5721ecc487a4d70905a485e22c9542a522,https://pdfs.semanticscholar.org/0656/0d5721ecc487a4d70905a485e22c9542a522.pdf
+062c41dad67bb68fefd9ff0c5c4d296e796004dc,https://arxiv.org/pdf/1611.06624.pdf
+06c2086f7f72536bf970ca629151b16927104df3,https://arxiv.org/pdf/1805.03064.pdf
+6c66ae815e7e508e852ecb122fb796abbcda16a8,https://pdfs.semanticscholar.org/6c66/ae815e7e508e852ecb122fb796abbcda16a8.pdf
+6ca2c5ff41e91c34696f84291a458d1312d15bf2,https://pdfs.semanticscholar.org/c70b/2c373917ba61a871b97119413db1eadcf423.pdf
+6c5fbf156ef9fc782be0089309074cc52617b868,https://pdfs.semanticscholar.org/fe4c/3f97a80b73be4fad18cc1bfb72354efb528e.pdf
+6c304f3b9c3a711a0cca5c62ce221fb098dccff0,https://arxiv.org/pdf/1708.05980.pdf
+6c80c834d426f0bc4acd6355b1946b71b50cbc0b,https://arxiv.org/pdf/1805.08484.pdf
+6cb7648465ba7757ecc9c222ac1ab6402933d983,https://arxiv.org/pdf/1708.05827.pdf
+6cfc337069868568148f65732c52cbcef963f79d,https://pdfs.semanticscholar.org/80d7/8415aee24e65ea3031c31adc1dabc1956f8a.pdf
+39ed31ced75e6151dde41944a47b4bdf324f922b,https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf
+39c8b34c1b678235b60b648d0b11d241a34c8e32,https://arxiv.org/pdf/1805.05503.pdf
+3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1,https://arxiv.org/pdf/1612.00738.pdf
+3965d61c4f3b72044f43609c808f8760af8781a2,https://arxiv.org/pdf/1808.01121.pdf
+395bf182983e0917f33b9701e385290b64e22f9a,https://pdfs.semanticscholar.org/8ab5/18efa79af7d45faa425d1ccd82cfa3aba547.pdf
+3933e323653ff27e68c3458d245b47e3e37f52fd,https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf
+39b452453bea9ce398613d8dd627984fd3a0d53c,https://arxiv.org/pdf/1611.02155.pdf
+994f7c469219ccce59c89badf93c0661aae34264,https://pdfs.semanticscholar.org/994f/7c469219ccce59c89badf93c0661aae34264.pdf
+993d189548e8702b1cb0b02603ef02656802c92b,https://arxiv.org/pdf/1809.05992.pdf
+9901f473aeea177a55e58bac8fd4f1b086e575a4,https://arxiv.org/pdf/1509.04954.pdf
+99c20eb5433ed27e70881d026d1dbe378a12b342,https://pdfs.semanticscholar.org/2eb3/74476c9431a614b1841df1a7c32a4cd095e0.pdf
+99facca6fc50cc30f13b7b6dd49ace24bc94f702,https://arxiv.org/pdf/1609.03892.pdf
+99d7678039ad96ee29ab520ff114bb8021222a91,https://pdfs.semanticscholar.org/99d7/678039ad96ee29ab520ff114bb8021222a91.pdf
+523854a7d8755e944bd50217c14481fe1329a969,https://arxiv.org/pdf/1808.00380.pdf
+52472ec859131844f38fc7d57944778f01d109ac,https://arxiv.org/pdf/1707.02749.pdf
+52d7eb0fbc3522434c13cc247549f74bb9609c5d,https://arxiv.org/pdf/1511.06523.pdf
+529baf1a79cca813f8c9966ceaa9b3e42748c058,https://pdfs.semanticscholar.org/6ae7/47cf58eeda0687a3f779aaecfa12403b9684.pdf
+55ea0c775b25d9d04b5886e322db852e86a556cd,https://arxiv.org/pdf/1804.01077.pdf
+55c68c1237166679d2cb65f266f496d1ecd4bec6,https://arxiv.org/pdf/1802.02774.pdf
+5550a6df1b118a80c00a2459bae216a7e8e3966c,https://pdfs.semanticscholar.org/5550/a6df1b118a80c00a2459bae216a7e8e3966c.pdf
+55e87050b998eb0a8f0b16163ef5a28f984b01fa,https://arxiv.org/pdf/1710.10736.pdf
+55c40cbcf49a0225e72d911d762c27bb1c2d14aa,https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf
+9788b491ddc188941dadf441fc143a4075bff764,https://pdfs.semanticscholar.org/9788/b491ddc188941dadf441fc143a4075bff764.pdf
+97137d5154a9f22a5d9ecc32e8e2b95d07a5a571,https://arxiv.org/pdf/1604.04337.pdf
+9730b9cd998c0a549601c554221a596deda8af5b,https://arxiv.org/pdf/1704.07945.pdf
+97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5,https://arxiv.org/pdf/1804.10938.pdf
+975978ee6a32383d6f4f026b944099e7739e5890,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf
+632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c,https://arxiv.org/pdf/1604.02647.pdf
+631483c15641c3652377f66c8380ff684f3e365c,https://arxiv.org/pdf/1611.10314.pdf
+632fa986bed53862d83918c2b71ab953fd70d6cc,https://arxiv.org/pdf/1805.10355.pdf
+633101e794d7b80f55f466fd2941ea24595e10e6,https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71,https://pdfs.semanticscholar.org/0f21/a39fa4c0a19c4a5b4733579e393cb1d04f71.pdf
+0fd1bffb171699a968c700f206665b2f8837d953,https://arxiv.org/pdf/1503.00949.pdf
+0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457,https://pdfs.semanticscholar.org/0a03/21785c8beac1cbaaec4d8ad0cfd4a0d6d457.pdf
+0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f,https://pdfs.semanticscholar.org/0de4/0e8adc31a15af7496c92f261f9f703afed1d.pdf
+0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,https://arxiv.org/pdf/1708.07517.pdf
+0a9d204db13d395f024067cf70ac19c2eeb5f942,https://arxiv.org/pdf/1804.02843.pdf
+0a4fc9016aacae9cdf40663a75045b71e64a70c9,https://pdfs.semanticscholar.org/0235/563971fcf8b517271f8e4f424305fffa10f2.pdf
+0a85afebaa19c80fddb660110a4352fd22eb2801,https://arxiv.org/pdf/1809.03658.pdf
+0a7309147d777c2f20f780a696efe743520aa2db,https://arxiv.org/pdf/1805.05622.pdf
+0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a,https://arxiv.org/pdf/1807.05292.pdf
+641f0989b87bf7db67a64900dcc9568767b7b50f,https://pdfs.semanticscholar.org/e25a/6836e5f5dc6cf691cd9c42224c0f7f4bb42c.pdf
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b,https://arxiv.org/pdf/1803.11521.pdf
+645de797f936cb19c1b8dba3b862543645510544,https://arxiv.org/pdf/1611.06678.pdf
+64d7e62f46813b5ad08289aed5dc4825d7ec5cff,https://pdfs.semanticscholar.org/f7e1/251d831b763d1ee10bfc6fae78990405f9f9.pdf
+90ac0f32c0c29aa4545ed3d5070af17f195d015f,https://pdfs.semanticscholar.org/2322/1b7ff507d23da4e4b47b7228170b4fd224b8.pdf
+90498b95fe8b299ce65d5cafaef942aa58bd68b7,https://arxiv.org/pdf/1804.08790.pdf
+90cc2f08a6c2f0c41a9dd1786bae097f9292105e,https://arxiv.org/pdf/1808.09892.pdf
+90d9209d5dd679b159051a8315423a7f796d704d,https://arxiv.org/pdf/1808.05085.pdf
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,https://pdfs.semanticscholar.org/5db5/7be8bfed8f3a34aebc45dc69c4d4a7dee570.pdf
+bf54b5586cdb0b32f6eed35798ff91592b03fbc4,https://pdfs.semanticscholar.org/bf54/b5586cdb0b32f6eed35798ff91592b03fbc4.pdf
+bf5940d57f97ed20c50278a81e901ae4656f0f2c,https://arxiv.org/pdf/1711.00248.pdf
+bff567c58db554858c7f39870cff7c306523dfee,https://arxiv.org/pdf/1807.03480.pdf
+d35534f3f59631951011539da2fe83f2844ca245,https://arxiv.org/pdf/1705.07904.pdf
+d3edbfe18610ce63f83db83f7fbc7634dde1eb40,https://pdfs.semanticscholar.org/d3ed/bfe18610ce63f83db83f7fbc7634dde1eb40.pdf
+d3d5d86afec84c0713ec868cf5ed41661fc96edc,https://arxiv.org/pdf/1606.02894.pdf
+d3b18ba0d9b247bfa2fb95543d172ef888dfff95,https://pdfs.semanticscholar.org/0a92/0b6ed81de2e7665784eba433cb1cf15e73ad.pdf
+d309e414f0d6e56e7ba45736d28ee58ae2bad478,https://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf
+d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9,https://arxiv.org/pdf/1804.04326.pdf
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,https://arxiv.org/pdf/1804.10021.pdf
+d31af74425719a3840b496b7932e0887b35e9e0d,https://pdfs.semanticscholar.org/d31a/f74425719a3840b496b7932e0887b35e9e0d.pdf
+d3b0839324d0091e70ce34f44c979b9366547327,https://arxiv.org/pdf/1804.10743.pdf
+d3faed04712b4634b47e1de0340070653546deb2,https://arxiv.org/pdf/1805.04140.pdf
+d33fcdaf2c0bd0100ec94b2c437dccdacec66476,https://pdfs.semanticscholar.org/d33f/cdaf2c0bd0100ec94b2c437dccdacec66476.pdf
+d4a5eaf2e9f2fd3e264940039e2cbbf08880a090,https://arxiv.org/pdf/1802.02137.pdf
+d46b790d22cb59df87f9486da28386b0f99339d3,https://pdfs.semanticscholar.org/d46b/790d22cb59df87f9486da28386b0f99339d3.pdf
+d444e010049944c1b3438c9a25ae09b292b17371,https://pdfs.semanticscholar.org/d444/e010049944c1b3438c9a25ae09b292b17371.pdf
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf
+d444368421f456baf8c3cb089244e017f8d32c41,https://arxiv.org/pdf/1712.06352.pdf
+d4885ca24189b4414031ca048a8b7eb2c9ac646c,https://arxiv.org/pdf/1807.07718.pdf
+d458c49a5e34263c95b3393386b5d76ba770e497,https://pdfs.semanticscholar.org/d458/c49a5e34263c95b3393386b5d76ba770e497.pdf
+d454ad60b061c1a1450810a0f335fafbfeceeccc,https://arxiv.org/pdf/1712.07195.pdf
+d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e,https://pdfs.semanticscholar.org/d4e6/69d5d35fa0ca9f8d9a193c82d4153f5ffc4e.pdf
+d44a93027208816b9e871101693b05adab576d89,https://arxiv.org/pdf/1709.10433.pdf
+badcd992266c6813063c153c41b87babc0ba36a3,https://arxiv.org/pdf/1809.03193.pdf
+ba788365d70fa6c907b71a01d846532ba3110e31,https://arxiv.org/pdf/1805.08657.pdf
+ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb,https://arxiv.org/pdf/1711.09001.pdf
+badd371a49d2c4126df95120902a34f4bee01b00,https://arxiv.org/pdf/1809.04096.pdf
+a022eff5470c3446aca683eae9c18319fd2406d5,https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf
+a0c37f07710184597befaa7e6cf2f0893ff440e9,https://arxiv.org/pdf/1805.06374.pdf
+a0fd85b3400c7b3e11122f44dc5870ae2de9009a,https://arxiv.org/pdf/1408.3967.pdf
+a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b,https://pdfs.semanticscholar.org/a0aa/32bb7f406693217fba6dcd4aeb6c4d5a479b.pdf
+a0b1990dd2b4cd87e4fd60912cc1552c34792770,https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf
+a77e9f0bd205a7733431a6d1028f09f57f9f73b0,https://arxiv.org/pdf/1806.07753.pdf
+a7664247a37a89c74d0e1a1606a99119cffc41d4,https://pdfs.semanticscholar.org/a766/4247a37a89c74d0e1a1606a99119cffc41d4.pdf
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07,https://pdfs.semanticscholar.org/a758/b744a6d6962f1ddce6f0d04292a0b5cf8e07.pdf
+a775da3e6e6ea64bffab7f9baf665528644c7ed3,https://pdfs.semanticscholar.org/0e01/3be45033d43cc658b464cdb55cbf46a994b8.pdf
+b8375ff50b8a6f1a10dd809129a18df96888ac8b,https://pdfs.semanticscholar.org/e94d/8395ab477091c433b020f8fb535eae5c1df5.pdf
+b8f3f6d8f188f65ca8ea2725b248397c7d1e662d,https://arxiv.org/pdf/1611.04357.pdf
+b8ebda42e272d3617375118542d4675a0c0e501d,https://arxiv.org/pdf/1706.07522.pdf
+b1d89015f9b16515735d4140c84b0bacbbef19ac,https://arxiv.org/pdf/1709.00235.pdf
+b14b672e09b5b2d984295dfafb05604492bfaec5,https://pdfs.semanticscholar.org/b14b/672e09b5b2d984295dfafb05604492bfaec5.pdf
+b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000,https://arxiv.org/pdf/1703.03054.pdf
+b166ce267ddb705e6ed855c6b679ec699d62e9cb,https://pdfs.semanticscholar.org/b166/ce267ddb705e6ed855c6b679ec699d62e9cb.pdf
+b13e2e43672e66ba45d1b852a34737e4ce04226b,https://pdfs.semanticscholar.org/3552/4e63c11f13fe08b2996a7bc0a9105e7c407b.pdf
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c,https://pdfs.semanticscholar.org/dcc4/b241debf72f3898a69f32185b21766200771.pdf
+b15a06d701f0a7f508e3355a09d0016de3d92a6d,https://pdfs.semanticscholar.org/b15a/06d701f0a7f508e3355a09d0016de3d92a6d.pdf
+b1451721864e836069fa299a64595d1655793757,https://arxiv.org/pdf/1706.03863.pdf
+b1fdd4ae17d82612cefd4e78b690847b071379d3,https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf
+dde5125baefa1141f1ed50479a3fd67c528a965f,https://arxiv.org/pdf/1701.04851.pdf
+dd8084b2878ca95d8f14bae73e1072922f0cc5da,https://arxiv.org/pdf/1709.02929.pdf
+dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335,https://arxiv.org/pdf/1803.09490.pdf
+ddbd24a73ba3d74028596f393bb07a6b87a469c0,https://pdfs.semanticscholar.org/ddbd/24a73ba3d74028596f393bb07a6b87a469c0.pdf
+ddbb6e0913ac127004be73e2d4097513a8f02d37,https://pdfs.semanticscholar.org/d3ea/05926b22a9c45687d435611db14f608e410d.pdf
+dc550f361ae82ec6e1a0cf67edf6a0138163382e,https://pdfs.semanticscholar.org/dc55/0f361ae82ec6e1a0cf67edf6a0138163382e.pdf
+dcf71245addaf66a868221041aabe23c0a074312,https://arxiv.org/pdf/1708.05237.pdf
+dce5e0a1f2cdc3d4e0e7ca0507592860599b0454,https://arxiv.org/pdf/1803.05576.pdf
+dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd,https://arxiv.org/pdf/1705.01936.pdf
+dc974c31201b6da32f48ef81ae5a9042512705fe,https://arxiv.org/pdf/1705.01781.pdf
+b6ef158d95042f39765df04373c01546524c9ccd,https://pdfs.semanticscholar.org/b6ef/158d95042f39765df04373c01546524c9ccd.pdf
+b68150bfdec373ed8e025f448b7a3485c16e3201,https://arxiv.org/pdf/1703.09471.pdf
+b6f682648418422e992e3ef78a6965773550d36b,https://pdfs.semanticscholar.org/b6f6/82648418422e992e3ef78a6965773550d36b.pdf
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3,https://arxiv.org/pdf/1711.10305.pdf
+b6d0e461535116a675a0354e7da65b2c1d2958d4,https://arxiv.org/pdf/1805.03430.pdf
+a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f,https://arxiv.org/pdf/1708.05038.pdf
+a98316980b126f90514f33214dde51813693fe0d,https://arxiv.org/pdf/1805.01887.pdf
+a95dc0c4a9d882a903ce8c70e80399f38d2dcc89,https://pdfs.semanticscholar.org/a95d/c0c4a9d882a903ce8c70e80399f38d2dcc89.pdf
+a92b5234b8b73e06709dd48ec5f0ec357c1aabed,https://arxiv.org/pdf/1802.04962.pdf
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab,https://arxiv.org/pdf/1711.10398.pdf
+d5444f9475253bbcfef85c351ea9dab56793b9ea,https://arxiv.org/pdf/1703.00686.pdf
+d5de42d37ee84c86b8f9a054f90ddb4566990ec0,https://arxiv.org/pdf/1612.06371.pdf
+d2eb1079552fb736e3ba5e494543e67620832c52,https://arxiv.org/pdf/1807.04050.pdf
+d278e020be85a1ccd90aa366b70c43884dd3f798,https://arxiv.org/pdf/1805.11191.pdf
+d26b443f87df76034ff0fa9c5de9779152753f0c,https://arxiv.org/pdf/1807.03425.pdf
+aae742779e8b754da7973949992d258d6ca26216,https://arxiv.org/pdf/1505.04030.pdf
+aab3561acbd19f7397cbae39dd34b3be33220309,https://arxiv.org/pdf/1805.02152.pdf
+aafb8dc8fda3b13a64ec3f1ca7911df01707c453,https://arxiv.org/pdf/1711.06778.pdf
+aadfcaf601630bdc2af11c00eb34220da59b7559,https://arxiv.org/pdf/1804.07237.pdf
+aa3c9de34ef140ec812be85bb8844922c35eba47,https://arxiv.org/pdf/1707.09457.pdf
+aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a,https://arxiv.org/pdf/1805.09203.pdf
+af6cae71f24ea8f457e581bfe1240d5fa63faaf7,https://arxiv.org/pdf/1805.09791.pdf
+afdf9a3464c3b015f040982750f6b41c048706f5,https://arxiv.org/pdf/1608.05477.pdf
+afa57e50570a6599508ee2d50a7b8ca6be04834a,https://pdfs.semanticscholar.org/bc26/4e51ea341744eba137e9dd0e6adf8cbc01d0.pdf
+afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3,https://arxiv.org/pdf/1708.09268.pdf
+af654a7ec15168b16382bd604889ea07a967dac6,https://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf
+b73795963dc623a634d218d29e4a5b74dfbc79f1,https://arxiv.org/pdf/1807.08772.pdf
+b7894c1f805ffd90ab4ab06002c70de68d6982ab,https://pdfs.semanticscholar.org/5e87/06fab62a5716c30a245e5963f51793e1d0ed.pdf
+b7774c096dc18bb0be2acef07ff5887a22c2a848,https://pdfs.semanticscholar.org/d589/29d6cc1dfa513b145e47598c446b16487861.pdf
+b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89,https://arxiv.org/pdf/1804.10073.pdf
+b76af8fcf9a3ebc421b075b689defb6dc4282670,https://arxiv.org/pdf/1807.09207.pdf
+db848c3c32464d12da33b2f4c3a29fe293fc35d1,https://arxiv.org/pdf/1807.11152.pdf
+dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a,https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf
+dbe255d3d2a5d960daaaba71cb0da292e0af36a7,https://arxiv.org/pdf/1505.04373.pdf
+db5a00984fa54b9d2a1caad0067a9ff0d0489517,https://pdfs.semanticscholar.org/dd47/1f321ead8b405da6194057b2778ef3db7ea7.pdf
+dbd958ffedc3eae8032be67599ec281310c05630,https://pdfs.semanticscholar.org/d051/86de8343813a738c1fa5da9bf5165ee63bb7.pdf
+dbed26cc6d818b3679e46677abc9fa8e04e8c6a6,https://pdfs.semanticscholar.org/dbed/26cc6d818b3679e46677abc9fa8e04e8c6a6.pdf
+db3545a983ffd24c97c18bf7f068783102548ad7,https://pdfs.semanticscholar.org/080e/660b47647e81dadaec27365b3d5b88f3ae68.pdf
+db67edbaeb78e1dd734784cfaaa720ba86ceb6d2,https://arxiv.org/pdf/1509.04853.pdf
+a85e9e11db5665c89b057a124547377d3e1c27ef,https://arxiv.org/pdf/1802.00066.pdf
+a87ab836771164adb95d6744027e62e05f47fd96,https://arxiv.org/pdf/1808.00022.pdf
+a896ddeb0d253739c9aaef7fc1f170a2ba8407d3,https://arxiv.org/pdf/1708.03979.pdf
+a803453edd2b4a85b29da74dcc551b3c53ff17f9,https://pdfs.semanticscholar.org/a803/453edd2b4a85b29da74dcc551b3c53ff17f9.pdf
+a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8,https://pdfs.semanticscholar.org/3bf2/79c782cee5b43a766d248810d602b24033c9.pdf
+a8d52265649c16f95af71d6f548c15afc85ac905,https://arxiv.org/pdf/1708.04320.pdf
+a8a61badec9b8bc01f002a06e1426a623456d121,https://pdfs.semanticscholar.org/a8a6/1badec9b8bc01f002a06e1426a623456d121.pdf
+a8154d043f187c6640cb6aedeaa8385a323e46cf,https://arxiv.org/pdf/1805.03134.pdf
+a812368fe1d4a186322bf72a6d07e1cf60067234,https://pdfs.semanticscholar.org/a812/368fe1d4a186322bf72a6d07e1cf60067234.pdf
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,https://arxiv.org/pdf/1707.03986.pdf
+ded968b97bd59465d5ccda4f1e441f24bac7ede5,https://pdfs.semanticscholar.org/ded9/68b97bd59465d5ccda4f1e441f24bac7ede5.pdf
+de0eb358b890d92e8f67592c6e23f0e3b2ba3f66,https://arxiv.org/pdf/1711.01587.pdf
+def569db592ed1715ae509644444c3feda06a536,https://arxiv.org/pdf/1804.04604.pdf
+dee406a7aaa0f4c9d64b7550e633d81bc66ff451,https://arxiv.org/pdf/1710.01453.pdf
+de3285da34df0262a4548574c2383c51387a24bf,https://arxiv.org/pdf/1706.06982.pdf
+dec0c26855da90876c405e9fd42830c3051c2f5f,https://pdfs.semanticscholar.org/dec0/c26855da90876c405e9fd42830c3051c2f5f.pdf
+b0c512fcfb7bd6c500429cbda963e28850f2e948,https://arxiv.org/pdf/1408.1656.pdf
+b08203fca1af7b95fda8aa3d29dcacd182375385,https://arxiv.org/pdf/1805.01818.pdf
+b09b693708f412823053508578df289b8403100a,https://pdfs.semanticscholar.org/b09b/693708f412823053508578df289b8403100a.pdf
+b084683e5bab9b2bc327788e7b9a8e049d5fff8f,https://arxiv.org/pdf/1712.08263.pdf
+b0c1615ebcad516b5a26d45be58068673e2ff217,https://arxiv.org/pdf/1608.05246.pdf
+a6e8a8bb99e30a9e80dbf80c46495cf798066105,https://pdfs.semanticscholar.org/a6e8/a8bb99e30a9e80dbf80c46495cf798066105.pdf
+a6eb6ad9142130406fb4ffd4d60e8348c2442c29,https://arxiv.org/pdf/1806.00186.pdf
+a6590c49e44aa4975b2b0152ee21ac8af3097d80,https://arxiv.org/pdf/1804.00782.pdf
+a6e25cab2251a8ded43c44b28a87f4c62e3a548a,https://arxiv.org/pdf/1801.07388.pdf
+a6270914cf5f60627a1332bcc3f5951c9eea3be0,https://arxiv.org/pdf/1802.02522.pdf
+a6ce2f0795839d9c2543d64a08e043695887e0eb,https://arxiv.org/pdf/1507.04760.pdf
+b9081856963ceb78dcb44ac410c6fca0533676a3,https://arxiv.org/pdf/1703.03329.pdf
+b97f694c2a111b5b1724eefd63c8d64c8e19f6c9,https://arxiv.org/pdf/1710.01216.pdf
+b9d0774b0321a5cfc75471b62c8c5ef6c15527f5,https://pdfs.semanticscholar.org/b9d0/774b0321a5cfc75471b62c8c5ef6c15527f5.pdf
+b908edadad58c604a1e4b431f69ac8ded350589a,https://arxiv.org/pdf/1708.02721.pdf
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8,https://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf
+b971266b29fcecf1d5efe1c4dcdc2355cb188ab0,https://arxiv.org/pdf/1703.00832.pdf
+a1af7ec84472afba0451b431dfdb59be323e35b7,https://pdfs.semanticscholar.org/a1af/7ec84472afba0451b431dfdb59be323e35b7.pdf
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1,https://arxiv.org/pdf/1711.03990.pdf
+a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca,https://arxiv.org/pdf/1711.01467.pdf
+a1132e2638a8abd08bdf7fc4884804dd6654fa63,https://pdfs.semanticscholar.org/a113/2e2638a8abd08bdf7fc4884804dd6654fa63.pdf
+a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892,https://arxiv.org/pdf/1603.08895.pdf
+a1dd9038b1e1e59c9d564e252d3e14705872fdec,https://arxiv.org/pdf/1803.09851.pdf
+a16fb74ea66025d1f346045fda00bd287c20af0e,https://arxiv.org/pdf/1809.07447.pdf
+ef940b76e40e18f329c43a3f545dc41080f68748,https://pdfs.semanticscholar.org/ef94/0b76e40e18f329c43a3f545dc41080f68748.pdf
+ef230e3df720abf2983ba6b347c9d46283e4b690,https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf
+ef4ecb76413a05c96eac4c743d2c2a3886f2ae07,https://pdfs.semanticscholar.org/ef4e/cb76413a05c96eac4c743d2c2a3886f2ae07.pdf
+ef458499c3856a6e9cd4738b3e97bef010786adb,https://arxiv.org/pdf/1803.09196.pdf
+ef032afa4bdb18b328ffcc60e2dc5229cc1939bc,https://pdfs.semanticscholar.org/ef03/2afa4bdb18b328ffcc60e2dc5229cc1939bc.pdf
+ef5531711a69ed687637c48930261769465457f0,https://arxiv.org/pdf/1807.00556.pdf
+ef559d5f02e43534168fbec86707915a70cd73a0,https://pdfs.semanticscholar.org/ef55/9d5f02e43534168fbec86707915a70cd73a0.pdf
+efa08283656714911acff2d5022f26904e451113,https://arxiv.org/pdf/1607.00548.pdf
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98,https://arxiv.org/pdf/1406.1881.pdf
+ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d,https://pdfs.semanticscholar.org/001e/ad9b99ee57af44e1831be1670c40711d348d.pdf
+c3beae515f38daf4bd8053a7d72f6d2ed3b05d88,https://pdfs.semanticscholar.org/1093/3b6c487a269b87f9b561c5eedfdab6be306b.pdf
+c3dc4f414f5233df96a9661609557e341b71670d,https://pdfs.semanticscholar.org/c3dc/4f414f5233df96a9661609557e341b71670d.pdf
+c3285a1d6ec6972156fea9e6dc9a8d88cd001617,https://arxiv.org/pdf/1712.05083.pdf
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d,https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf
+c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0,https://pdfs.semanticscholar.org/2a1c/16f418d8d8e6fa179a8e6a368bb0b47266d0.pdf
+c39ffc56a41d436748b9b57bdabd8248b2d28a32,https://arxiv.org/pdf/1704.06904.pdf
+c317181fa1de2260e956f05cd655642607520a4f,https://arxiv.org/pdf/1708.07549.pdf
+c30e4e4994b76605dcb2071954eaaea471307d80,https://pdfs.semanticscholar.org/c30e/4e4994b76605dcb2071954eaaea471307d80.pdf
+c37de914c6e9b743d90e2566723d0062bedc9e6a,https://pdfs.semanticscholar.org/c37d/e914c6e9b743d90e2566723d0062bedc9e6a.pdf
+c4fb2de4a5dc28710d9880aece321acf68338fde,https://arxiv.org/pdf/1801.09092.pdf
+c43862db5eb7e43e3ef45b5eac4ab30e318f2002,https://arxiv.org/pdf/1704.03925.pdf
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc,https://arxiv.org/pdf/1809.03258.pdf
+eac6aee477446a67d491ef7c95abb21867cf71fc,https://arxiv.org/pdf/1602.07017.pdf
+ea079334121a0ba89452036e5d7f8e18f6851519,https://arxiv.org/pdf/1708.03615.pdf
+eac1b644492c10546a50f3e125a1f790ec46365f,https://arxiv.org/pdf/1704.00616.pdf
+ea80a050d20c0e24e0625a92e5c03e5c8db3e786,https://pdfs.semanticscholar.org/ea80/a050d20c0e24e0625a92e5c03e5c8db3e786.pdf
+eafda8a94e410f1ad53b3e193ec124e80d57d095,https://pdfs.semanticscholar.org/eafd/a8a94e410f1ad53b3e193ec124e80d57d095.pdf
+ea890846912f16a0f3a860fce289596a7dac575f,https://pdfs.semanticscholar.org/ea89/0846912f16a0f3a860fce289596a7dac575f.pdf
+eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf,https://pdfs.semanticscholar.org/eaae/d082762337e7c3f8a1b1dfea9c0d3ca281bf.pdf
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5,https://arxiv.org/pdf/1602.04364.pdf
+e19fb22b35c352f57f520f593d748096b41a4a7b,https://pdfs.semanticscholar.org/cbd8/716132ed289d21bdc2e031b7dea4849aae5d.pdf
+e19ebad4739d59f999d192bac7d596b20b887f78,https://arxiv.org/pdf/1709.03655.pdf
+e1d726d812554f2b2b92cac3a4d2bec678969368,https://pdfs.semanticscholar.org/c134/a2441bc1f3ec6b85f22868284c279881b918.pdf
+e1256ff535bf4c024dd62faeb2418d48674ddfa2,https://arxiv.org/pdf/1803.11182.pdf
+cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66,https://pdfs.semanticscholar.org/cdc7/bd87a2c9983dab728dbc8aac74d8c9ed7e66.pdf
+cd4941cbef1e27d7afdc41b48c1aff5338aacf06,https://arxiv.org/pdf/1712.06761.pdf
+cdef0eaff4a3c168290d238999fc066ebc3a93e8,https://arxiv.org/pdf/1707.07391.pdf
+cd444ee7f165032b97ee76b21b9ff58c10750570,https://pdfs.semanticscholar.org/cd44/4ee7f165032b97ee76b21b9ff58c10750570.pdf
+cd23dc3227ee2a3ab0f4de1817d03ca771267aeb,https://pdfs.semanticscholar.org/cd23/dc3227ee2a3ab0f4de1817d03ca771267aeb.pdf
+cd2c54705c455a4379f45eefdf32d8d10087e521,https://arxiv.org/pdf/1804.04779.pdf
+cd7a7be3804fd217e9f10682e0c0bfd9583a08db,https://arxiv.org/pdf/1807.00517.pdf
+cd023d2d067365c83d8e27431e83e7e66082f718,https://arxiv.org/pdf/1804.06039.pdf
+cca9ae621e8228cfa787ec7954bb375536160e0d,https://arxiv.org/pdf/1805.07410.pdf
+cc8e378fd05152a81c2810f682a78c5057c8a735,https://pdfs.semanticscholar.org/cc8e/378fd05152a81c2810f682a78c5057c8a735.pdf
+cc31db984282bb70946f6881bab741aa841d3a7c,https://arxiv.org/pdf/1610.02255.pdf
+ccf16bcf458e4d7a37643b8364594656287f5bfc,https://pdfs.semanticscholar.org/ccf1/6bcf458e4d7a37643b8364594656287f5bfc.pdf
+e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227,https://arxiv.org/pdf/1808.04976.pdf
+e6e5a6090016810fb902b51d5baa2469ae28b8a1,https://pdfs.semanticscholar.org/e6e5/a6090016810fb902b51d5baa2469ae28b8a1.pdf
+f92ade569cbe54344ffd3bb25efd366dcd8ad659,https://arxiv.org/pdf/1704.01464.pdf
+f94f366ce14555cf0d5d34248f9467c18241c3ee,https://pdfs.semanticscholar.org/f94f/366ce14555cf0d5d34248f9467c18241c3ee.pdf
+f997a71f1e54d044184240b38d9dc680b3bbbbc0,https://arxiv.org/pdf/1807.11688.pdf
+f08e425c2fce277aedb51d93757839900d591008,https://arxiv.org/pdf/1711.06640.pdf
+f0cee87e9ecedeb927664b8da44b8649050e1c86,https://arxiv.org/pdf/1805.02901.pdf
+f0f4f16d5b5f9efe304369120651fa688a03d495,https://pdfs.semanticscholar.org/f0f4/f16d5b5f9efe304369120651fa688a03d495.pdf
+f0ca31fd5cad07e84b47d50dc07db9fc53482a46,https://pdfs.semanticscholar.org/f0ca/31fd5cad07e84b47d50dc07db9fc53482a46.pdf
+f0a4a3fb6997334511d7b8fc090f9ce894679faf,https://arxiv.org/pdf/1704.05838.pdf
+f0398ee5291b153b716411c146a17d4af9cb0edc,https://arxiv.org/pdf/1805.02733.pdf
+f0f0e94d333b4923ae42ee195df17c0df62ea0b1,https://pdfs.semanticscholar.org/f0f0/e94d333b4923ae42ee195df17c0df62ea0b1.pdf
+f02a6bccdaee14ab55ad94263539f4f33f1b15bb,https://pdfs.semanticscholar.org/f02a/6bccdaee14ab55ad94263539f4f33f1b15bb.pdf
+f7dea4454c2de0b96ab5cf95008ce7144292e52a,https://arxiv.org/pdf/1805.05563.pdf
+f7b4bc4ef14349a6e66829a0101d5b21129dcf55,https://pdfs.semanticscholar.org/f7b4/bc4ef14349a6e66829a0101d5b21129dcf55.pdf
+f7b422df567ce9813926461251517761e3e6cda0,https://arxiv.org/pdf/1702.01983.pdf
+f7824758800a7b1a386db5bd35f84c81454d017a,https://arxiv.org/pdf/1702.05085.pdf
+f78fe101b21be36e98cd3da010051bb9b9829a1e,https://pdfs.semanticscholar.org/f78f/e101b21be36e98cd3da010051bb9b9829a1e.pdf
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f,https://pdfs.semanticscholar.org/f79c/97e7c3f9a98cf6f4a5d2431f149ffacae48f.pdf
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef,https://pdfs.semanticscholar.org/f7a2/71acccf9ec66c9b114d36eec284fbb89c7ef.pdf
+f7dcadc5288653ec6764600c7c1e2b49c305dfaa,https://pdfs.semanticscholar.org/f7dc/adc5288653ec6764600c7c1e2b49c305dfaa.pdf
+f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a,https://arxiv.org/pdf/1611.06179.pdf
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db,https://pdfs.semanticscholar.org/e8fd/acbd708feb60fd6e7843b048bf3c4387c6db.pdf
+e87d6c284cdd6828dfe7c092087fbd9ff5091ee4,https://arxiv.org/pdf/1704.05693.pdf
+e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7,https://arxiv.org/pdf/1701.07174.pdf
+e85a255a970ee4c1eecc3e3d110e157f3e0a4629,https://arxiv.org/pdf/1803.03415.pdf
+e8d1b134d48eb0928bc999923a4e092537e106f6,https://pdfs.semanticscholar.org/e8d1/b134d48eb0928bc999923a4e092537e106f6.pdf
+e8c6c3fc9b52dffb15fe115702c6f159d955d308,https://pdfs.semanticscholar.org/d927/77953677da471c060cbabc2c5b15de8d60b2.pdf
+e8b3a257a0a44d2859862cdec91c8841dc69144d,https://arxiv.org/pdf/1808.01725.pdf
+fa90b825346a51562d42f6b59a343b98ea2e501a,https://arxiv.org/pdf/1805.06533.pdf
+fa4f59397f964a23e3c10335c67d9a24ef532d5c,https://arxiv.org/pdf/1602.03346.pdf
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,https://pdfs.semanticscholar.org/fac8/cff9052fc5fab7d5ef114d1342daba5e4b82.pdf
+faf5583063682e70dedc4466ac0f74eeb63169e7,https://pdfs.semanticscholar.org/6ca0/be5608fc00181596e562eb867eeb8cb43a4a.pdf
+fab60b3db164327be8588bce6ce5e45d5b882db6,https://pdfs.semanticscholar.org/fab6/0b3db164327be8588bce6ce5e45d5b882db6.pdf
+fad895771260048f58d12158a4d4d6d0623f4158,https://pdfs.semanticscholar.org/fad8/95771260048f58d12158a4d4d6d0623f4158.pdf
+ffea8775fc9c32f573d1251e177cd283b4fe09c9,https://arxiv.org/pdf/1804.04418.pdf
+fffefc1fb840da63e17428fd5de6e79feb726894,https://arxiv.org/pdf/1805.10445.pdf
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,https://arxiv.org/pdf/1706.04277.pdf
+ff8ef43168b9c8dd467208a0b1b02e223b731254,https://arxiv.org/pdf/1603.07141.pdf
+ff9195f99a1a28ced431362f5363c9a5da47a37b,https://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf
+c588c89a72f89eed29d42f34bfa5d4cffa530732,https://arxiv.org/pdf/1705.01734.pdf
+c5ea084531212284ce3f1ca86a6209f0001de9d1,https://pdfs.semanticscholar.org/c5ea/084531212284ce3f1ca86a6209f0001de9d1.pdf
+c254b4c0f6d5a5a45680eb3742907ec93c3a222b,https://arxiv.org/pdf/1711.06451.pdf
+f60a85bd35fa85739d712f4c93ea80d31aa7de07,https://arxiv.org/pdf/1710.06924.pdf
+f6f06be05981689b94809130e251f9e4bf932660,https://pdfs.semanticscholar.org/fa86/ec19c1aec46202e0df12d209eb8062d53f7b.pdf
+f68ed499e9d41f9c3d16d843db75dc12833d988d,https://arxiv.org/pdf/1805.05029.pdf
+f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a,https://arxiv.org/pdf/1806.09755.pdf
+f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca,https://arxiv.org/pdf/1705.02928.pdf
+f61d5f2a082c65d5330f21b6f36312cc4fab8a3b,https://arxiv.org/pdf/1705.08841.pdf
+f6cf2108ec9d0f59124454d88045173aa328bd2e,https://pdfs.semanticscholar.org/f6cf/2108ec9d0f59124454d88045173aa328bd2e.pdf
+f6e00d6430cbbaa64789d826d093f7f3e323b082,https://pdfs.semanticscholar.org/5255/490925aa1e01ac0b9a55e93ec8c82efc07b7.pdf
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7,https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf
+e988be047b28ba3b2f1e4cdba3e8c94026139fcf,https://arxiv.org/pdf/1702.04710.pdf
+e9d43231a403b4409633594fa6ccc518f035a135,https://pdfs.semanticscholar.org/e9d4/3231a403b4409633594fa6ccc518f035a135.pdf
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166,https://arxiv.org/pdf/1712.02979.pdf
+e9c008d31da38d9eef67a28d2c77cb7daec941fb,https://arxiv.org/pdf/1708.03769.pdf
+e9363f4368b04aeaa6d6617db0a574844fc59338,https://arxiv.org/pdf/1710.08315.pdf
+f1250900074689061196d876f551ba590fc0a064,https://arxiv.org/pdf/1710.07354.pdf
+f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53,https://arxiv.org/pdf/1707.05653.pdf
+f1aa120fb720f6cfaab13aea4b8379275e6d40a2,https://pdfs.semanticscholar.org/f1aa/120fb720f6cfaab13aea4b8379275e6d40a2.pdf
+f1ba2fe3491c715ded9677862fea966b32ca81f0,https://pdfs.semanticscholar.org/f1ba/2fe3491c715ded9677862fea966b32ca81f0.pdf
+f113aed343bcac1021dc3e57ba6cc0647a8f5ce1,https://pdfs.semanticscholar.org/f113/aed343bcac1021dc3e57ba6cc0647a8f5ce1.pdf
+e7cac91da51b78eb4a28e194d3f599f95742e2a2,https://pdfs.semanticscholar.org/e7ca/c91da51b78eb4a28e194d3f599f95742e2a2.pdf
+e7b2b0538731adaacb2255235e0a07d5ccf09189,https://arxiv.org/pdf/1803.10837.pdf
+cbca355c5467f501d37b919d8b2a17dcb39d3ef9,https://pdfs.semanticscholar.org/cbca/355c5467f501d37b919d8b2a17dcb39d3ef9.pdf
+cbbd13c29d042743f0139f1e044b6bca731886d0,https://pdfs.semanticscholar.org/cbbd/13c29d042743f0139f1e044b6bca731886d0.pdf
+cb004e9706f12d1de83b88c209ac948b137caae0,https://arxiv.org/pdf/1511.01186.pdf
+cb2917413c9b36c3bb9739bce6c03a1a6eb619b3,https://pdfs.semanticscholar.org/cb29/17413c9b36c3bb9739bce6c03a1a6eb619b3.pdf
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,https://arxiv.org/pdf/1806.05781.pdf
+cbb27980eb04f68d9f10067d3d3c114efa9d0054,https://arxiv.org/pdf/1807.03380.pdf
+f842b13bd494be1bbc1161dc6df244340b28a47f,https://pdfs.semanticscholar.org/f842/b13bd494be1bbc1161dc6df244340b28a47f.pdf
+f8f872044be2918de442ba26a30336d80d200c42,https://pdfs.semanticscholar.org/f8f8/72044be2918de442ba26a30336d80d200c42.pdf
+f87b22e7f0c66225824a99cada71f9b3e66b5742,https://arxiv.org/pdf/1709.03126.pdf
+ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6,https://pdfs.semanticscholar.org/535a/b2d3a443235ef98d818f133a26c7445214a7.pdf
+ce450e4849490924488664b44769b4ca57f1bc1a,https://arxiv.org/pdf/1612.00881.pdf
+ce032dae834f383125cdd852e7c1bc793d4c3ba3,https://pdfs.semanticscholar.org/e459/e31cfd985ec0031d5e9ff4896a84ebaff972.pdf
+ce9e1dfa7705623bb67df3a91052062a0a0ca456,https://arxiv.org/pdf/1611.05507.pdf
+e03bda45248b4169e2a20cb9124ae60440cad2de,https://pdfs.semanticscholar.org/0434/9d5d7c72d7fa3d1427b7afbfaa3ae07992ed.pdf
+e03e86ac61cfac9148b371d75ce81a55e8b332ca,https://pdfs.semanticscholar.org/e03e/86ac61cfac9148b371d75ce81a55e8b332ca.pdf
+e096b11b3988441c0995c13742ad188a80f2b461,https://arxiv.org/pdf/1606.04702.pdf
+e01bb53b611c679141494f3ffe6f0b91953af658,https://arxiv.org/pdf/1711.10703.pdf
+e0939b4518a5ad649ba04194f74f3413c793f28e,https://pdfs.semanticscholar.org/02ce/655ade8d052d099ae145afd032eb39d089b4.pdf
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4,https://pdfs.semanticscholar.org/e00d/391d7943561f5c7b772ab68e2bb6a85e64c4.pdf
+e065a2cb4534492ccf46d0afc81b9ad8b420c5ec,https://arxiv.org/pdf/1804.06559.pdf
+e00241f00fb31c660df6c6f129ca38370e6eadb3,https://arxiv.org/pdf/1801.01415.pdf
+e0244a8356b57a5721c101ead351924bcfb2eef4,https://pdfs.semanticscholar.org/e024/4a8356b57a5721c101ead351924bcfb2eef4.pdf
+46f2611dc4a9302e0ac00a79456fa162461a8c80,https://arxiv.org/pdf/1806.07754.pdf
+46e72046a9bb2d4982d60bcf5c63dbc622717f0f,https://arxiv.org/pdf/1605.02424.pdf
+4641986af5fc8836b2c883ea1a65278d58fe4577,https://arxiv.org/pdf/1701.02426.pdf
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a,https://arxiv.org/pdf/1808.07272.pdf
+2c424f21607ff6c92e640bfe3da9ff105c08fac4,https://pdfs.semanticscholar.org/3f25/e17eb717e5894e0404ea634451332f85d287.pdf
+2c93c8da5dfe5c50119949881f90ac5a0a4f39fe,https://arxiv.org/pdf/1805.01951.pdf
+2cac8ab4088e2bdd32dcb276b86459427355085c,https://pdfs.semanticscholar.org/2cac/8ab4088e2bdd32dcb276b86459427355085c.pdf
+2cde051e04569496fb525d7f1b1e5ce6364c8b21,https://arxiv.org/pdf/1505.02890.pdf
+2c1ffb0feea5f707c890347d2c2882be0494a67a,https://arxiv.org/pdf/1807.08919.pdf
+2c5d1e0719f3ad7f66e1763685ae536806f0c23b,https://arxiv.org/pdf/1701.00599.pdf
+2c1f8ddbfbb224271253a27fed0c2425599dfe47,https://arxiv.org/pdf/1708.07689.pdf
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,https://arxiv.org/pdf/1803.05536.pdf
+2c19d3d35ef7062061b9e16d040cebd7e45f281d,https://arxiv.org/pdf/1711.04161.pdf
+79f6a8f777a11fd626185ab549079236629431ac,https://pdfs.semanticscholar.org/79f6/a8f777a11fd626185ab549079236629431ac.pdf
+79dc84a3bf76f1cb983902e2591d913cee5bdb0e,https://pdfs.semanticscholar.org/1e9e/87fc99430a82621810b3ce7db51e339be315.pdf
+79744fc71bea58d2e1918c9e254b10047472bd76,https://arxiv.org/pdf/1802.06713.pdf
+79c3a7131c6c176b02b97d368cd0cd0bc713ff7e,https://pdfs.semanticscholar.org/538a/30196253e458a2a30d530218ffa449c4d24e.pdf
+799c02a3cde2c0805ea728eb778161499017396b,https://arxiv.org/pdf/1711.01984.pdf
+79db191ca1268dc88271abef3179c4fe4ee92aed,https://pdfs.semanticscholar.org/79db/191ca1268dc88271abef3179c4fe4ee92aed.pdf
+2d9e58ea582e054e9d690afca8b6a554c3687ce6,https://arxiv.org/pdf/1706.08580.pdf
+2d8001ffee6584b3f4d951d230dc00a06e8219f8,https://arxiv.org/pdf/1712.00721.pdf
+2dfe0e7e81f65716b09c590652a4dd8452c10294,https://pdfs.semanticscholar.org/2dfe/0e7e81f65716b09c590652a4dd8452c10294.pdf
+2d8d089d368f2982748fde93a959cf5944873673,https://pdfs.semanticscholar.org/2d8d/089d368f2982748fde93a959cf5944873673.pdf
+2d4a3e9361505616fa4851674eb5c8dd18e0c3cf,https://arxiv.org/pdf/1807.08379.pdf
+41f26101fed63a8d149744264dd5aa79f1928265,https://arxiv.org/pdf/1604.07602.pdf
+411318684bd2d42e4b663a37dcf0532a48f0146d,https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf
+414715421e01e8c8b5743c5330e6d2553a08c16d,https://pdfs.semanticscholar.org/4147/15421e01e8c8b5743c5330e6d2553a08c16d.pdf
+8356832f883207187437872742d6b7dc95b51fde,https://arxiv.org/pdf/1807.00458.pdf
+835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd,https://arxiv.org/pdf/1804.00946.pdf
+83295bce2340cb87901499cff492ae6ff3365475,https://arxiv.org/pdf/1808.01558.pdf
+83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05,https://pdfs.semanticscholar.org/d726/6bf19e202f62f31c363a5a5656c67c03118b.pdf
+837e99301e00c2244023a8a48ff98d7b521c93ac,https://pdfs.semanticscholar.org/b7b7/4e0ec15c22e1c94406c592bbb83c8e865f52.pdf
+8334da483f1986aea87b62028672836cb3dc6205,https://arxiv.org/pdf/1805.06306.pdf
+831b4d8b0c0173b0bac0e328e844a0fbafae6639,https://arxiv.org/pdf/1809.01407.pdf
+1b02b9413b730b96b91d16dcd61b2420aef97414,https://pdfs.semanticscholar.org/1b02/b9413b730b96b91d16dcd61b2420aef97414.pdf
+1b55c4e804d1298cbbb9c507497177014a923d22,https://pdfs.semanticscholar.org/1b55/c4e804d1298cbbb9c507497177014a923d22.pdf
+1bdef21f093c41df2682a07f05f3548717c7a3d1,https://pdfs.semanticscholar.org/1bde/f21f093c41df2682a07f05f3548717c7a3d1.pdf
+1bbec7190ac3ba34ca91d28f145e356a11418b67,https://pdfs.semanticscholar.org/1bbe/c7190ac3ba34ca91d28f145e356a11418b67.pdf
+1b3587363d37dd197b6adbcfa79d49b5486f27d8,https://arxiv.org/pdf/1806.06371.pdf
+1b71d3f30238cb6621021a95543cce3aab96a21b,https://arxiv.org/pdf/1804.09235.pdf
+1b4f6f73c70353869026e5eec1dd903f9e26d43f,https://arxiv.org/pdf/1501.06202.pdf
+1badfeece64d1bf43aa55c141afe61c74d0bd25e,https://arxiv.org/pdf/1712.01727.pdf
+7789a5d87884f8bafec8a82085292e87d4e2866f,https://arxiv.org/pdf/1612.09548.pdf
+77db171a523fc3d08c91cea94c9562f3edce56e1,https://pdfs.semanticscholar.org/49af/c659fd0709511759fd220f49b5eb2265e815.pdf
+77fbbf0c5729f97fcdbfdc507deee3d388cd4889,https://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf
+776362314f1479f5319aaf989624ac604ba42c65,https://pdfs.semanticscholar.org/78aa/2775625c85aedd6a2adc90eb94b8cafd6e91.pdf
+48186494fc7c0cc664edec16ce582b3fcb5249c0,https://arxiv.org/pdf/1506.03607.pdf
+48499deeaa1e31ac22c901d115b8b9867f89f952,https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf
+4850af6b54391fc33c8028a0b7fafe05855a96ff,https://arxiv.org/pdf/1605.00707.pdf
+48e6c6d981efe2c2fb0ae9287376fcae59da9878,https://arxiv.org/pdf/1807.11010.pdf
+48a5b6ee60475b18411a910c6084b3a32147b8cd,https://pdfs.semanticscholar.org/48a5/b6ee60475b18411a910c6084b3a32147b8cd.pdf
+4896909796f9bd2f70a2cb24bf18daacd6a12128,https://pdfs.semanticscholar.org/4896/909796f9bd2f70a2cb24bf18daacd6a12128.pdf
+70109c670471db2e0ede3842cbb58ba6be804561,https://arxiv.org/pdf/1607.02104.pdf
+703dc33736939f88625227e38367cfb2a65319fe,https://arxiv.org/pdf/1703.09026.pdf
+701f56f0eac9f88387de1f556acef78016b05d52,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf
+706b9767a444de4fe153b2f3bff29df7674c3161,https://arxiv.org/pdf/1511.06442.pdf
+70c58700eb89368e66a8f0d3fc54f32f69d423e1,https://pdfs.semanticscholar.org/70c5/8700eb89368e66a8f0d3fc54f32f69d423e1.pdf
+707a542c580bcbf3a5a75cce2df80d75990853cc,https://arxiv.org/pdf/1809.01936.pdf
+704d88168bdfabe31b6ff484507f4a2244b8c52b,https://arxiv.org/pdf/1803.07445.pdf
+70c9d11cad12dc1692a4507a97f50311f1689dbf,https://arxiv.org/pdf/1702.02463.pdf
+1ea74780d529a458123a08250d8fa6ef1da47a25,https://pdfs.semanticscholar.org/1ea7/4780d529a458123a08250d8fa6ef1da47a25.pdf
+1efacaa0eaa7e16146c34cd20814d1411b35538e,https://arxiv.org/pdf/1805.06749.pdf
+1ef1f33c48bc159881c5c8536cbbd533d31b0e9a,https://pdfs.semanticscholar.org/1ef1/f33c48bc159881c5c8536cbbd533d31b0e9a.pdf
+1e21b925b65303ef0299af65e018ec1e1b9b8d60,https://arxiv.org/pdf/1611.02200.pdf
+1ee3b4ba04e54bfbacba94d54bf8d05fd202931d,https://pdfs.semanticscholar.org/1ee3/b4ba04e54bfbacba94d54bf8d05fd202931d.pdf
+1efaa128378f988965841eb3f49d1319a102dc36,https://arxiv.org/pdf/1808.04803.pdf
+8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2,https://pdfs.semanticscholar.org/8451/bf3dd6bcd946be14b1a75af8bbb65a42d4b2.pdf
+841855205818d3a6d6f85ec17a22515f4f062882,https://arxiv.org/pdf/1805.11529.pdf
+84c0f814951b80c3b2e39caf3925b56a9b2e1733,https://pdfs.semanticscholar.org/84c0/f814951b80c3b2e39caf3925b56a9b2e1733.pdf
+84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1,https://pdfs.semanticscholar.org/db88/70aba4eca31ba56e993e4e94ae86eed6589a.pdf
+84508e846af3ac509f7e1d74b37709107ba48bde,https://pdfs.semanticscholar.org/8450/8e846af3ac509f7e1d74b37709107ba48bde.pdf
+849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b,https://pdfs.semanticscholar.org/849f/891973ad2b6c6f70d7d43d9ac5805f1a1a5b.pdf
+4ab84f203b0e752be83f7f213d7495b04b1c4c79,https://arxiv.org/pdf/1711.00659.pdf
+4a3758f283b7c484d3f164528d73bc8667eb1591,https://arxiv.org/pdf/1809.06647.pdf
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0,https://arxiv.org/pdf/1809.06218.pdf
+4ac3cd8b6c50f7a26f27eefc64855134932b39be,https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf
+24115d209e0733e319e39badc5411bbfd82c5133,https://arxiv.org/pdf/1411.4389.pdf
+24f022d807352abf071880877c38e53a98254dcd,https://arxiv.org/pdf/1809.05465.pdf
+24869258fef8f47623b5ef43bd978a525f0af60e,https://pdfs.semanticscholar.org/c2b3/d8ac1f02e63809c74d2eacb37329ec139ce2.pdf
+24ff832171cb774087a614152c21f54589bf7523,https://arxiv.org/pdf/1508.03755.pdf
+23ce6f404c504592767b8bec7d844d87b462de71,https://arxiv.org/pdf/1805.00324.pdf
+2322ec2f3571e0ddc593c4e2237a6a794c61251d,https://pdfs.semanticscholar.org/2322/ec2f3571e0ddc593c4e2237a6a794c61251d.pdf
+23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f,https://arxiv.org/pdf/1704.06456.pdf
+2303d07d839e8b20f33d6e2ec78d1353cac256cf,https://arxiv.org/pdf/1806.00631.pdf
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf
+23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e,https://pdfs.semanticscholar.org/21b0/fe87731197c94f9e282e995c8f75a9b721a5.pdf
+4f9e00aaf2736b79e415f5e7c8dfebda3043a97d,https://pdfs.semanticscholar.org/d713/d11d5c8f466ad56286f407991b2d88b606ff.pdf
+4f051022de100241e5a4ba8a7514db9167eabf6e,https://arxiv.org/pdf/1708.03736.pdf
+4f4f920eb43399d8d05b42808e45b56bdd36a929,https://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf
+4f0b8f730273e9f11b2bfad2415485414b96299f,https://arxiv.org/pdf/1805.04687.pdf
+4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf
+8de6deefb90fb9b3f7d451b9d8a1a3264b768482,https://pdfs.semanticscholar.org/8de6/deefb90fb9b3f7d451b9d8a1a3264b768482.pdf
+8d384e8c45a429f5c5f6628e8ba0d73c60a51a89,https://arxiv.org/pdf/1708.00666.pdf
+8dcc95debd07ebab1721c53fa50d846fef265022,https://arxiv.org/pdf/1711.07011.pdf
+8d5998cd984e7cce307da7d46f155f9db99c6590,https://arxiv.org/pdf/1701.02664.pdf
+15136c2f94fd29fc1cb6bedc8c1831b7002930a6,https://arxiv.org/pdf/1802.09990.pdf
+15d653972d176963ef0ad2cc582d3b35ca542673,https://arxiv.org/pdf/1612.05203.pdf
+15aa6c457678e25f6bc0e818e5fc39e42dd8e533,https://arxiv.org/pdf/1806.07823.pdf
+15cf1f17aeba62cd834116b770f173b0aa614bf4,https://pdfs.semanticscholar.org/15cf/1f17aeba62cd834116b770f173b0aa614bf4.pdf
+121503705689f46546cade78ff62963574b4750b,https://arxiv.org/pdf/1602.08405.pdf
+1275d6a800f8cf93c092603175fdad362b69c191,https://arxiv.org/pdf/1804.06655.pdf
+1287bfe73e381cc8042ac0cc27868ae086e1ce3b,https://pdfs.semanticscholar.org/1287/bfe73e381cc8042ac0cc27868ae086e1ce3b.pdf
+12408baf69419409d228d96c6f88b6bcde303505,https://arxiv.org/pdf/1612.06950.pdf
+12095f9b35ee88272dd5abc2d942a4f55804b31e,https://pdfs.semanticscholar.org/1209/5f9b35ee88272dd5abc2d942a4f55804b31e.pdf
+1275852f2e78ed9afd189e8b845fdb5393413614,https://arxiv.org/pdf/1808.04068.pdf
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,https://pdfs.semanticscholar.org/1297/ee7a41aa4e8499c7ddb3b1fed783eba19056.pdf
+120785f9b4952734818245cc305148676563a99b,https://pdfs.semanticscholar.org/1207/85f9b4952734818245cc305148676563a99b.pdf
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22,https://arxiv.org/pdf/1804.04314.pdf
+12ebeb2176a5043ad57bc5f3218e48a96254e3e9,https://pdfs.semanticscholar.org/c5ae/ec7db8132685f408ca17a7a5c45c196b0323.pdf
+8ccde9d80706a59e606f6e6d48d4260b60ccc736,https://arxiv.org/pdf/1805.06846.pdf
+8ce9b7b52d05701d5ef4a573095db66ce60a7e1c,https://arxiv.org/pdf/1610.05211.pdf
+8cb6daba2cb1e208e809633133adfee0183b8dd2,https://pdfs.semanticscholar.org/8cb6/daba2cb1e208e809633133adfee0183b8dd2.pdf
+85fd2bda5eb3afe68a5a78c30297064aec1361f6,https://pdfs.semanticscholar.org/85fd/2bda5eb3afe68a5a78c30297064aec1361f6.pdf
+858b51a8a8aa082732e9c7fbbd1ea9df9c76b013,https://pdfs.semanticscholar.org/858b/51a8a8aa082732e9c7fbbd1ea9df9c76b013.pdf
+856317f27248cdb20226eaae599e46de628fb696,https://arxiv.org/pdf/1805.12467.pdf
+1d776bfe627f1a051099997114ba04678c45f0f5,https://arxiv.org/pdf/1805.10604.pdf
+1d7df3df839a6aa8f5392310d46b2a89080a3c25,https://arxiv.org/pdf/1612.02295.pdf
+1d729693a888a460ee855040f62bdde39ae273af,https://pdfs.semanticscholar.org/9da1/91858f65fd99c9b204a6f68916711d4bd51b.pdf
+1d4c25f9f8f08f5a756d6f472778ab54a7e6129d,https://pdfs.semanticscholar.org/1d4c/25f9f8f08f5a756d6f472778ab54a7e6129d.pdf
+7142ac9e4d5498037aeb0f459f278fd28dae8048,https://pdfs.semanticscholar.org/a148/0722ce6c89468ef44548c39fb79012f91a64.pdf
+7117ed0be436c0291bc6fb6ea6db18de74e2464a,https://pdfs.semanticscholar.org/7117/ed0be436c0291bc6fb6ea6db18de74e2464a.pdf
+714d487571ca0d676bad75c8fa622d6f50df953b,https://arxiv.org/pdf/1511.06491.pdf
+7143518f847b0ec57a0ff80e0304c89d7e924d9a,https://arxiv.org/pdf/1805.08373.pdf
+710011644006c18291ad512456b7580095d628a2,https://arxiv.org/pdf/1612.05363.pdf
+713db3874b77212492d75fb100a345949f3d3235,https://arxiv.org/pdf/1803.03345.pdf
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89,https://arxiv.org/pdf/1610.03207.pdf
+76cd5e43df44e389483f23cb578a9015d1483d70,https://pdfs.semanticscholar.org/76cd/5e43df44e389483f23cb578a9015d1483d70.pdf
+76b11c281ac47fe6d95e124673a408ee9eb568e3,https://pdfs.semanticscholar.org/76b1/1c281ac47fe6d95e124673a408ee9eb568e3.pdf
+764882e6779fbee29c3d87e00302befc52d2ea8d,https://arxiv.org/pdf/1711.07437.pdf
+766728bac030b169fcbc2fbafe24c6e22a58ef3c,https://pdfs.semanticscholar.org/7667/28bac030b169fcbc2fbafe24c6e22a58ef3c.pdf
+7697295ee6fc817296bed816ac5cae97644c2d5b,https://arxiv.org/pdf/1704.07333.pdf
+1c9efb6c895917174ac6ccc3bae191152f90c625,https://arxiv.org/pdf/1806.03084.pdf
+1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc,https://pdfs.semanticscholar.org/6a82/2f65c3a49525ffa0dc896ac24e9ad3dca62e.pdf
+1c41965c5e1f97b1504c1bdde8037b5e0417da5e,https://arxiv.org/pdf/1808.01106.pdf
+1c6e22516ceb5c97c3caf07a9bd5df357988ceda,https://arxiv.org/pdf/1806.05476.pdf
+825f56ff489cdd3bcc41e76426d0070754eab1a8,https://pdfs.semanticscholar.org/bc51/1519cf8d4e3e247d7506c38d80f64c6a859e.pdf
+824d1db06e1c25f7681e46199fd02cb5fc343784,https://pdfs.semanticscholar.org/824d/1db06e1c25f7681e46199fd02cb5fc343784.pdf
+82eff71af91df2ca18aebb7f1153a7aed16ae7cc,https://pdfs.semanticscholar.org/82ef/f71af91df2ca18aebb7f1153a7aed16ae7cc.pdf
+82a610a59c210ff77cfdde7fd10c98067bd142da,https://pdfs.semanticscholar.org/82a6/10a59c210ff77cfdde7fd10c98067bd142da.pdf
+829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a,https://pdfs.semanticscholar.org/bbf4/f0ce0838c8eec048e3a9b212053fd98dde5a.pdf
+82417d8ec8ac6406f2d55774a35af2a1b3f4b66e,https://pdfs.semanticscholar.org/8241/7d8ec8ac6406f2d55774a35af2a1b3f4b66e.pdf
+82eb267b8e86be0b444e841b4b4ed4814b6f1942,https://arxiv.org/pdf/1604.08685.pdf
+4972aadcce369a8c0029e6dc2f288dfd0241e144,https://arxiv.org/pdf/1809.00852.pdf
+49e975a4c60d99bcc42c921d73f8d89ec7130916,https://pdfs.semanticscholar.org/49e9/75a4c60d99bcc42c921d73f8d89ec7130916.pdf
+49df381ea2a1e7f4059346311f1f9f45dd997164,https://arxiv.org/pdf/1807.00848.pdf
+403a108dec92363fd1f465340bd54dbfe65af870,https://arxiv.org/pdf/1510.00542.pdf
+40dd2b9aace337467c6e1e269d0cb813442313d7,https://pdfs.semanticscholar.org/40dd/2b9aace337467c6e1e269d0cb813442313d7.pdf
+407de9da58871cae7a6ded2f3a6162b9dc371f38,https://arxiv.org/pdf/1808.00297.pdf
+40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b,https://arxiv.org/pdf/1804.05197.pdf
+40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd,https://pdfs.semanticscholar.org/8ef1/0da52c6b2c3856f56aa9d68acab4c1649ed8.pdf
+40e1743332523b2ab5614bae5e10f7a7799161f4,https://arxiv.org/pdf/1711.06753.pdf
+40273657e6919455373455bd9a5355bb46a7d614,https://arxiv.org/pdf/1805.09380.pdf
+2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87,https://arxiv.org/pdf/1805.11333.pdf
+2e5cfa97f3ecc10ae8f54c1862433285281e6a7c,https://pdfs.semanticscholar.org/2e5c/fa97f3ecc10ae8f54c1862433285281e6a7c.pdf
+2e0d56794379c436b2d1be63e71a215dd67eb2ca,https://arxiv.org/pdf/1709.03872.pdf
+2e231f1e7e641dd3619bec59e14d02e91360ac01,https://arxiv.org/pdf/1807.10421.pdf
+2ed4973984b254be5cba3129371506275fe8a8eb,https://pdfs.semanticscholar.org/2ed4/973984b254be5cba3129371506275fe8a8eb.pdf
+2e9c780ee8145f29bd1a000585dd99b14d1f5894,https://arxiv.org/pdf/1807.08108.pdf
+2e832d5657bf9e5678fd45b118fc74db07dac9da,https://pdfs.semanticscholar.org/2e83/2d5657bf9e5678fd45b118fc74db07dac9da.pdf
+2bb53e66aa9417b6560e588b6235e7b8ebbc294c,https://arxiv.org/pdf/1502.01540.pdf
+2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,https://arxiv.org/pdf/1803.00130.pdf
+477236563c6a6c6db922045453b74d3f9535bfa1,https://pdfs.semanticscholar.org/3c3a/e3a2e7c3ee00f33a87a82d5783e84c3a1de2.pdf
+47190d213caef85e8b9dd0d271dbadc29ed0a953,https://arxiv.org/pdf/1807.11649.pdf
+47e14fdc6685f0b3800f709c32e005068dfc8d47,https://arxiv.org/pdf/1805.00577.pdf
+782188821963304fb78791e01665590f0cd869e8,https://arxiv.org/pdf/1708.01311.pdf
+78c1ad33772237bf138084220d1ffab800e1200d,https://arxiv.org/pdf/1804.08450.pdf
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,https://arxiv.org/pdf/1708.00370.pdf
+78174c2be084e67f48f3e8ea5cb6c9968615a42c,https://arxiv.org/pdf/1809.06157.pdf
+8b2c090d9007e147b8c660f9282f357336358061,https://pdfs.semanticscholar.org/8b2c/090d9007e147b8c660f9282f357336358061.pdf
+8bed7ff2f75d956652320270eaf331e1f73efb35,https://arxiv.org/pdf/1709.03820.pdf
+8befcd91c24038e5c26df0238d26e2311b21719a,https://arxiv.org/pdf/1808.02559.pdf
+8bdf6f03bde08c424c214188b35be8b2dec7cdea,https://arxiv.org/pdf/1805.04049.pdf
+8b744786137cf6be766778344d9f13abf4ec0683,https://pdfs.semanticscholar.org/8b74/4786137cf6be766778344d9f13abf4ec0683.pdf
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8,https://pdfs.semanticscholar.org/8b61/fdc47b5eeae6bc0a52523f519eaeaadbc8c8.pdf
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259,https://pdfs.semanticscholar.org/8b38/124ff02a9cf8ad00de5521a7f8a9fa4d7259.pdf
+134f1cee8408cca648d8b4ca44b38b0a7023af71,https://pdfs.semanticscholar.org/134f/1cee8408cca648d8b4ca44b38b0a7023af71.pdf
+13604bbdb6f04a71dea4bd093794e46730b0a488,https://arxiv.org/pdf/1712.09482.pdf
+13aef395f426ca8bd93640c9c3f848398b189874,https://pdfs.semanticscholar.org/13ae/f395f426ca8bd93640c9c3f848398b189874.pdf
+1316296fae6485c1510f00b1b57fb171b9320ac2,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf
+7f511a6a2b38a26f077a5aec4baf5dffc981d881,https://arxiv.org/pdf/1805.02877.pdf
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80,https://arxiv.org/pdf/1809.05474.pdf
+7fb5006b6522436ece5bedf509e79bdb7b79c9a7,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf
+7f4bc8883c3b9872408cc391bcd294017848d0cf,https://pdfs.semanticscholar.org/7f4b/c8883c3b9872408cc391bcd294017848d0cf.pdf
+7f445191fa0475ff0113577d95502a96dc702ef9,https://arxiv.org/pdf/1805.04026.pdf
+7fab17ef7e25626643f1d55257a3e13348e435bd,https://arxiv.org/pdf/1702.08423.pdf
+7a81967598c2c0b3b3771c1af943efb1defd4482,https://arxiv.org/pdf/1503.01508.pdf
+7a3d46f32f680144fd2ba261681b43b86b702b85,https://arxiv.org/pdf/1805.01282.pdf
+7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b,https://arxiv.org/pdf/1709.08129.pdf
+7ac9aaafe4d74542832c273acf9d631cb8ea6193,https://arxiv.org/pdf/1809.04185.pdf
+7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697,https://arxiv.org/pdf/1807.07320.pdf
+7aa062c6c90dba866273f5edd413075b90077b51,https://pdfs.semanticscholar.org/7aa0/62c6c90dba866273f5edd413075b90077b51.pdf
+7a131fafa7058fb75fdca32d0529bc7cb50429bd,https://arxiv.org/pdf/1704.04086.pdf
+1442319de86d171ce9595b20866ec865003e66fc,https://pdfs.semanticscholar.org/1442/319de86d171ce9595b20866ec865003e66fc.pdf
+14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b,https://arxiv.org/pdf/1806.11008.pdf
+14ee4948be56caeb30aa3b94968ce663e7496ce4,https://pdfs.semanticscholar.org/14ee/4948be56caeb30aa3b94968ce663e7496ce4.pdf
+8e3d0b401dec8818cd0245c540c6bc032f169a1d,https://arxiv.org/pdf/1702.08398.pdf
+8e3c97e420e0112c043929087d6456d8ab61e95c,https://pdfs.semanticscholar.org/0e44/90f7616634e06a0b89eedbe37433d7f5392d.pdf
+8e0ab1b08964393e4f9f42ca037220fe98aad7ac,https://arxiv.org/pdf/1712.04695.pdf
+8ed32c8fad924736ebc6d99c5c319312ba1fa80b,https://pdfs.semanticscholar.org/8ed3/2c8fad924736ebc6d99c5c319312ba1fa80b.pdf
+8e36100cb144685c26e46ad034c524b830b8b2f2,https://pdfs.semanticscholar.org/8e36/100cb144685c26e46ad034c524b830b8b2f2.pdf
+8ed33184fccde677ec8413ae06f28ea9f2ca70f3,https://arxiv.org/pdf/1712.00796.pdf
+8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b,https://arxiv.org/pdf/1711.10520.pdf
+2227f978f084ebb18cb594c0cfaf124b0df6bf95,https://pdfs.semanticscholar.org/2227/f978f084ebb18cb594c0cfaf124b0df6bf95.pdf
+2201f187a7483982c2e8e2585ad9907c5e66671d,https://pdfs.semanticscholar.org/1cad/9aa5095733b56e998ad0cd396e89c2bc9928.pdf
+227b1a09b942eaf130d1d84cdcabf98921780a22,https://pdfs.semanticscholar.org/227b/1a09b942eaf130d1d84cdcabf98921780a22.pdf
+25ff865460c2b5481fa4161749d5da8501010aa0,https://arxiv.org/pdf/1702.07971.pdf
+2588acc7a730d864f84d4e1a050070ff873b03d5,https://pdfs.semanticscholar.org/2588/acc7a730d864f84d4e1a050070ff873b03d5.pdf
+25982e2bef817ebde7be5bb80b22a9864b979fb0,https://arxiv.org/pdf/1709.05731.pdf
diff --git a/scraper/reports/misc/raw_paper_pdf_list.csv b/scraper/reports/misc/raw_paper_pdf_list.csv
new file mode 100644
index 00000000..b78a3755
--- /dev/null
+++ b/scraper/reports/misc/raw_paper_pdf_list.csv
@@ -0,0 +1,2434 @@
+Paper ID,PDF URL,IEEE URL,DOI URL,Extra URL
+610779e90b644cc18696d7ac7820d3e0598e24d0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7067419,,
+61262450d4d814865a4f9a84299c24daa493f66e,,,http://doi.org/10.1007/s10462-016-9474-x,
+61971f8e6fff5b35faed610d02ad14ccfc186c70,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373843,,
+61e2044184d86d0f13e50ecaa3da6a4913088c76,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7572183,,
+61f04606528ecf4a42b49e8ac2add2e9f92c0def,https://arxiv.org/pdf/1605.01014.pdf,,,https://arxiv.org/pdf/1605.01014.pdf
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa,https://arxiv.org/pdf/1809.01604.pdf,,,https://arxiv.org/pdf/1809.01604.pdf
+61e9e180d3d1d8b09f1cc59bdd9f98c497707eff,https://pdfs.semanticscholar.org/61e9/e180d3d1d8b09f1cc59bdd9f98c497707eff.pdf,,,https://pdfs.semanticscholar.org/61e9/e180d3d1d8b09f1cc59bdd9f98c497707eff.pdf
+61329bc767152f01aa502989abc854b53047e52c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450832,,
+6193c833ad25ac27abbde1a31c1cabe56ce1515b,https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf,,,https://pdfs.semanticscholar.org/5f25/7ca18a92c3595db3bda3224927ec494003a5.pdf
+614079f1a0d0938f9c30a1585f617fa278816d53,https://arxiv.org/pdf/1612.02374.pdf,,,https://arxiv.org/pdf/1612.02374.pdf
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a,https://pdfs.semanticscholar.org/0da7/5b0d341c8f945fae1da6c77b6ec345f47f2a.pdf,,,https://pdfs.semanticscholar.org/0da7/5b0d341c8f945fae1da6c77b6ec345f47f2a.pdf
+0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a,https://pdfs.semanticscholar.org/0d33/b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a.pdf,,,https://pdfs.semanticscholar.org/0d33/b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a.pdf
+0da4c3d898ca2fff9e549d18f513f4898e960aca,https://pdfs.semanticscholar.org/0da4/c3d898ca2fff9e549d18f513f4898e960aca.pdf,,,https://pdfs.semanticscholar.org/0da4/c3d898ca2fff9e549d18f513f4898e960aca.pdf
+95b9df34bcf4ae04beea55c11cf0cc4095aa38dc,,,http://doi.org/10.1007/11527923_7,
+959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c,https://pdfs.semanticscholar.org/959b/cb16afdf303c34a8bfc11e9fcc9d40d76b1c.pdf,,,https://pdfs.semanticscholar.org/959b/cb16afdf303c34a8bfc11e9fcc9d40d76b1c.pdf
+95289007f2f336e6636cf8f920225b8d47c6e94f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6472796,,
+95b5296f7ec70455b0cf1748cddeaa099284bfed,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8443886,,
+95ea564bd983129ddb5535a6741e72bb1162c779,https://arxiv.org/pdf/1711.00111.pdf,,,https://arxiv.org/pdf/1711.00111.pdf
+95d858b39227edeaf75b7fad71f3dc081e415d16,,,http://doi.org/10.1007/s11042-017-5073-3,
+95e3b78eb4d5b469f66648ed4f37e45e0e01e63e,,,http://doi.org/10.1007/s11042-016-4261-x,
+950171acb24bb24a871ba0d02d580c09829de372,https://pdfs.semanticscholar.org/9501/71acb24bb24a871ba0d02d580c09829de372.pdf,,,https://pdfs.semanticscholar.org/9501/71acb24bb24a871ba0d02d580c09829de372.pdf
+95288fa7ff4683e32fe021a78cbf7d3376e6e400,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014759,,
+59fc69b3bc4759eef1347161e1248e886702f8f7,https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf,,,https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf
+59efb1ac77c59abc8613830787d767100387c680,https://arxiv.org/pdf/1805.10030.pdf,,,https://arxiv.org/pdf/1805.10030.pdf
+598744c8620e4ecbf449d14d7081fbf1cd05851f,,,,https://www.ncbi.nlm.nih.gov/pubmed/29731533
+59dac8b460a89e03fa616749a08e6149708dcc3a,https://pdfs.semanticscholar.org/59da/c8b460a89e03fa616749a08e6149708dcc3a.pdf,,,https://pdfs.semanticscholar.org/59da/c8b460a89e03fa616749a08e6149708dcc3a.pdf
+59e9934720baf3c5df3a0e1e988202856e1f83ce,https://arxiv.org/pdf/1511.04136.pdf,,,https://arxiv.org/pdf/1511.04136.pdf
+59d225486161b43b7bf6919b4a4b4113eb50f039,https://arxiv.org/pdf/1701.04769.pdf,,,https://arxiv.org/pdf/1701.04769.pdf
+5945464d47549e8dcaec37ad41471aa70001907f,https://arxiv.org/pdf/1507.05738.pdf,,,https://arxiv.org/pdf/1507.05738.pdf
+59b83666c1031c3f509f063b9963c7ad9781ca23,,,,http://dl.acm.org/citation.cfm?id=2830590
+592f14f4b12225fc691477a180a2a3226a5ef4f0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789592,,
+599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a,https://arxiv.org/pdf/1808.09316.pdf,,,https://arxiv.org/pdf/1808.09316.pdf
+59a6c9333c941faf2540979dcfcb5d503a49b91e,https://arxiv.org/pdf/1806.08245.pdf,,,https://arxiv.org/pdf/1806.08245.pdf
+9285f4a6a06e975bde3ae3267fccd971d4fff98a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099853,,
+9296f4ac0180e29226d6c016b5a4d5d2964eaaf6,,,http://doi.org/10.1038/s41598-017-07122-x,
+92b61b09d2eed4937058d0f9494d9efeddc39002,https://pdfs.semanticscholar.org/92b6/1b09d2eed4937058d0f9494d9efeddc39002.pdf,,,https://pdfs.semanticscholar.org/92b6/1b09d2eed4937058d0f9494d9efeddc39002.pdf
+92be73dffd3320fe7734258961fe5a5f2a43390e,https://pdfs.semanticscholar.org/92be/73dffd3320fe7734258961fe5a5f2a43390e.pdf,,,https://pdfs.semanticscholar.org/92be/73dffd3320fe7734258961fe5a5f2a43390e.pdf
+9207671d9e2b668c065e06d9f58f597601039e5e,https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf,,,https://pdfs.semanticscholar.org/9207/671d9e2b668c065e06d9f58f597601039e5e.pdf
+92292fffc36336d63f4f77d6b8fc23b0c54090e9,,,http://doi.org/10.1016/j.jvcir.2015.03.001,
+928b8eb47288a05611c140d02441660277a7ed54,https://arxiv.org/pdf/1805.04384.pdf,,,https://arxiv.org/pdf/1805.04384.pdf
+926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0,https://arxiv.org/pdf/1705.07871.pdf,,,https://arxiv.org/pdf/1705.07871.pdf
+92e464a5a67582d5209fa75e3b29de05d82c7c86,https://pdfs.semanticscholar.org/92e4/64a5a67582d5209fa75e3b29de05d82c7c86.pdf,,,https://pdfs.semanticscholar.org/92e4/64a5a67582d5209fa75e3b29de05d82c7c86.pdf
+927ba64123bd4a8a31163956b3d1765eb61e4426,https://pdfs.semanticscholar.org/927b/a64123bd4a8a31163956b3d1765eb61e4426.pdf,,,https://pdfs.semanticscholar.org/927b/a64123bd4a8a31163956b3d1765eb61e4426.pdf
+923ec0da8327847910e8dd71e9d801abcbc93b08,https://arxiv.org/pdf/1704.04232.pdf,,,https://arxiv.org/pdf/1704.04232.pdf
+0c6a566ebdac4bd14e80cd6bf4631bc7458e1595,,,http://doi.org/10.1016/j.patcog.2013.03.010,
+0cf2eecf20cfbcb7f153713479e3206670ea0e9c,https://arxiv.org/pdf/1806.08906.pdf,,,https://arxiv.org/pdf/1806.08906.pdf
+0ca36ecaf4015ca4095e07f0302d28a5d9424254,https://arxiv.org/pdf/1810.00360.pdf,,,https://arxiv.org/pdf/1810.00360.pdf
+0cfca73806f443188632266513bac6aaf6923fa8,https://arxiv.org/pdf/1805.04756.pdf,,,https://arxiv.org/pdf/1805.04756.pdf
+6689aee6c9599c1af4c607ea5385ac0c2cf0c4b3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8335166,,
+6601a0906e503a6221d2e0f2ca8c3f544a4adab7,https://pdfs.semanticscholar.org/6601/a0906e503a6221d2e0f2ca8c3f544a4adab7.pdf,,,https://pdfs.semanticscholar.org/6601/a0906e503a6221d2e0f2ca8c3f544a4adab7.pdf
+661ca4bbb49bb496f56311e9d4263dfac8eb96e9,https://arxiv.org/pdf/1803.09010.pdf,,,https://arxiv.org/pdf/1803.09010.pdf
+66d087f3dd2e19ffe340c26ef17efe0062a59290,https://pdfs.semanticscholar.org/66d0/87f3dd2e19ffe340c26ef17efe0062a59290.pdf,,,https://pdfs.semanticscholar.org/66d0/87f3dd2e19ffe340c26ef17efe0062a59290.pdf
+66837add89caffd9c91430820f49adb5d3f40930,https://pdfs.semanticscholar.org/4a6d/20f60ff06cca446578ea1218737190e288e6.pdf,,,https://pdfs.semanticscholar.org/4a6d/20f60ff06cca446578ea1218737190e288e6.pdf
+660c99ac408b535bb0468ab3708d0d1d5db30180,,,http://doi.org/10.1007/s11042-015-3083-6,
+66490b5869822b31d32af7108eaff193fbdb37b0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373857,,
+663efaa0671eace1100fdbdecacd94216a17b1db,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619243,,
+3e0a1884448bfd7f416c6a45dfcdfc9f2e617268,https://arxiv.org/pdf/1805.05838.pdf,,,https://arxiv.org/pdf/1805.05838.pdf
+3e3227c8e9f44593d2499f4d1302575c77977b2e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347112,,
+3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b,https://arxiv.org/pdf/1703.04363.pdf,,,https://arxiv.org/pdf/1703.04363.pdf
+3e59d97d42f36fc96d33a5658951856a555e997b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163128,,
+3e9ab40e6e23f09d16c852b74d40264067ac6abc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619307,,
+3e40991ab1daa2a4906eb85a5d6a01a958b6e674,https://arxiv.org/pdf/1611.01599.pdf,,,https://arxiv.org/pdf/1611.01599.pdf
+3e2b9ffeb708b4362ebfad95fa7bb0101db1579d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553717,,
+506c2fbfa9d16037d50d650547ad3366bb1e1cde,https://pdfs.semanticscholar.org/506c/2fbfa9d16037d50d650547ad3366bb1e1cde.pdf,,,https://pdfs.semanticscholar.org/506c/2fbfa9d16037d50d650547ad3366bb1e1cde.pdf
+504028218290d68859f45ec686f435f473aa326c,https://arxiv.org/pdf/1807.11195.pdf,,,https://arxiv.org/pdf/1807.11195.pdf
+50ee027c63dcc5ab5cd0a6cdffb1994f83916a46,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995354,,
+50a0930cb8cc353e15a5cb4d2f41b365675b5ebf,https://pdfs.semanticscholar.org/50a0/930cb8cc353e15a5cb4d2f41b365675b5ebf.pdf,,,https://pdfs.semanticscholar.org/50a0/930cb8cc353e15a5cb4d2f41b365675b5ebf.pdf
+508702ed2bf7d1b0655ea7857dd8e52d6537e765,https://pdfs.semanticscholar.org/5087/02ed2bf7d1b0655ea7857dd8e52d6537e765.pdf,,,https://pdfs.semanticscholar.org/5087/02ed2bf7d1b0655ea7857dd8e52d6537e765.pdf
+506ea19145838a035e7dba535519fb40a3a0018c,,,,http://arxiv.org/abs/1806.08251
+68d2afd8c5c1c3a9bbda3dd209184e368e4376b9,https://arxiv.org/pdf/1705.11136.pdf,,,https://arxiv.org/pdf/1705.11136.pdf
+68d08ed9470d973a54ef7806318d8894d87ba610,https://arxiv.org/pdf/1804.02555.pdf,,,https://arxiv.org/pdf/1804.02555.pdf
+68caf5d8ef325d7ea669f3fb76eac58e0170fff0,https://arxiv.org/pdf/1805.07646.pdf,,,https://arxiv.org/pdf/1805.07646.pdf
+684f5166d8147b59d9e0938d627beff8c9d208dd,https://arxiv.org/pdf/1707.03548.pdf,,,https://arxiv.org/pdf/1707.03548.pdf
+68604e7e1b01cdbd3c23832976d66f1a86edaa8f,,,http://doi.org/10.1134/S1054661818030136,
+68484ae8a042904a95a8d284a7f85a4e28e37513,https://pdfs.semanticscholar.org/6848/4ae8a042904a95a8d284a7f85a4e28e37513.pdf,,,https://pdfs.semanticscholar.org/6848/4ae8a042904a95a8d284a7f85a4e28e37513.pdf
+682760f2f767fb47e1e2ca35db3becbb6153756f,https://arxiv.org/pdf/1804.03507.pdf,,,https://arxiv.org/pdf/1804.03507.pdf
+6856a11b98ffffeff6e2f991d3d1a1232c029ea1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771409,,
+68f61154a0080c4aae9322110c8827978f01ac2e,https://pdfs.semanticscholar.org/68f6/1154a0080c4aae9322110c8827978f01ac2e.pdf,,,https://pdfs.semanticscholar.org/68f6/1154a0080c4aae9322110c8827978f01ac2e.pdf
+68c1090f912b69b76437644dd16922909dd40d60,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6987312,,
+5760d29574d78e79e8343b74e6e30b3555e48676,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8447743,,
+572dbaee6648eefa4c9de9b42551204b985ff863,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151,,
+574b62c845809fd54cc168492424c5fac145bc83,https://arxiv.org/pdf/1804.04829.pdf,,,https://arxiv.org/pdf/1804.04829.pdf
+57246142814d7010d3592e3a39a1ed819dd01f3b,https://pdfs.semanticscholar.org/5724/6142814d7010d3592e3a39a1ed819dd01f3b.pdf,,,https://pdfs.semanticscholar.org/5724/6142814d7010d3592e3a39a1ed819dd01f3b.pdf
+5779e3e439c90d43648db107e848aeb954d3e347,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7927417,,
+571b83f7fc01163383e6ca6a9791aea79cafa7dd,https://arxiv.org/pdf/1803.06524.pdf,,,https://arxiv.org/pdf/1803.06524.pdf
+574ad7ef015995efb7338829a021776bf9daaa08,https://arxiv.org/pdf/1611.08240.pdf,,,https://arxiv.org/pdf/1611.08240.pdf
+57a14a65e8ae15176c9afae874854e8b0f23dca7,https://pdfs.semanticscholar.org/57a1/4a65e8ae15176c9afae874854e8b0f23dca7.pdf,,,https://pdfs.semanticscholar.org/57a1/4a65e8ae15176c9afae874854e8b0f23dca7.pdf
+5748652924084b7b0220cddcd28f6b2222004359,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7492255,,
+57178b36c21fd7f4529ac6748614bb3374714e91,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217,,
+3b350afd8b82487aa97097170c269a25daa0c82d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8248664,,
+3b21aaf7def52964cf1fcc5f11520a7618c8fae3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099900,,
+3b73f8a2b39751efb7d7b396bf825af2aaadee24,https://arxiv.org/pdf/1712.01066.pdf,,,https://arxiv.org/pdf/1712.01066.pdf
+3bf8e4d89b9e6d004de6ea52e3e9d68f6015f94b,,,,http://dl.acm.org/citation.cfm?id=3240893
+3b84d074b8622fac125f85ab55b63e876fed4628,https://arxiv.org/pdf/1608.02676.pdf,,,https://arxiv.org/pdf/1608.02676.pdf
+3bcb93aa2a5e5eda039679516292af2f7c0ff9ac,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393012,,
+3be8f1f7501978287af8d7ebfac5963216698249,https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf,,,https://pdfs.semanticscholar.org/3be8/f1f7501978287af8d7ebfac5963216698249.pdf
+3bf579baf0903ee4d4180a29739bf05cbe8f4a74,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270392,,
+3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f,https://arxiv.org/pdf/1707.07923.pdf,,,https://arxiv.org/pdf/1707.07923.pdf
+3bb670b2afdcc45da2b09a02aac07e22ea7dbdc2,,,,
+3bd10f7603c4f5a4737c5613722124787d0dd818,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415949,,
+3bb6570d81685b769dc9e74b6e4958894087f3f1,https://arxiv.org/pdf/1805.05098.pdf,,,https://arxiv.org/pdf/1805.05098.pdf
+6f22628d34a486d73c6b46eb071200a00e3abae3,,,,https://www.ncbi.nlm.nih.gov/pubmed/29994497
+6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb,https://arxiv.org/pdf/1706.06247.pdf,,,https://arxiv.org/pdf/1706.06247.pdf
+6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885,,
+6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd,https://pdfs.semanticscholar.org/6f7d/06ced04ead3b9a5da86b37e7c27bfcedbbdd.pdf,,,https://pdfs.semanticscholar.org/6f7d/06ced04ead3b9a5da86b37e7c27bfcedbbdd.pdf
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac,https://arxiv.org/pdf/1712.02662.pdf,,,https://arxiv.org/pdf/1712.02662.pdf
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,https://arxiv.org/pdf/1807.03658.pdf,,,https://arxiv.org/pdf/1807.03658.pdf
+6f3054f182c34ace890a32fdf1656b583fbc7445,https://pdfs.semanticscholar.org/6f30/54f182c34ace890a32fdf1656b583fbc7445.pdf,,,https://pdfs.semanticscholar.org/6f30/54f182c34ace890a32fdf1656b583fbc7445.pdf
+6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d,https://arxiv.org/pdf/1807.08259.pdf,,,https://arxiv.org/pdf/1807.08259.pdf
+6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae,https://pdfs.semanticscholar.org/6fdc/0bc13f2517061eaa1364dcf853f36e1ea5ae.pdf,,,https://pdfs.semanticscholar.org/6fdc/0bc13f2517061eaa1364dcf853f36e1ea5ae.pdf
+03c56c176ec6377dddb6a96c7b2e95408db65a7a,https://arxiv.org/pdf/1807.00676.pdf,,,https://arxiv.org/pdf/1807.00676.pdf
+0322e69172f54b95ae6a90eb3af91d3daa5e36ea,https://pdfs.semanticscholar.org/0322/e69172f54b95ae6a90eb3af91d3daa5e36ea.pdf,,,https://pdfs.semanticscholar.org/0322/e69172f54b95ae6a90eb3af91d3daa5e36ea.pdf
+03ce2ff688f9b588b6f264ca79c6857f0d80ceae,https://arxiv.org/pdf/1711.09550.pdf,,,https://arxiv.org/pdf/1711.09550.pdf
+032825000c03b8ab4c207e1af4daeb1f225eb025,https://pdfs.semanticscholar.org/0328/25000c03b8ab4c207e1af4daeb1f225eb025.pdf,,,https://pdfs.semanticscholar.org/0328/25000c03b8ab4c207e1af4daeb1f225eb025.pdf
+035c8632c1ffbeb75efe16a4ec50c91e20e6e189,,,http://doi.org/10.1007/s00138-018-0943-x,
+034b3f3bac663fb814336a69a9fd3514ca0082b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298991,,
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45,https://pdfs.semanticscholar.org/03ac/1c694bc84a27621da6bfe73ea9f7210c6d45.pdf,,,https://pdfs.semanticscholar.org/03ac/1c694bc84a27621da6bfe73ea9f7210c6d45.pdf
+03fe3d031afdcddf38e5cc0d908b734884542eeb,https://pdfs.semanticscholar.org/03fe/3d031afdcddf38e5cc0d908b734884542eeb.pdf,,,https://pdfs.semanticscholar.org/03fe/3d031afdcddf38e5cc0d908b734884542eeb.pdf
+9b4b2a575641f3a7f8a5ce28b6a06c36694a9ddf,,,http://doi.org/10.1007/s00371-015-1158-z,
+9bd35145c48ce172b80da80130ba310811a44051,https://arxiv.org/pdf/1606.00850.pdf,,,https://arxiv.org/pdf/1606.00850.pdf
+9b9f6e5eb6d7fa50300d67502e8fda1006594b84,,,,http://dl.acm.org/citation.cfm?id=3123323
+9b1022a01ca4ecf8c1fa99b1b39a93924de2fcfb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316962,,
+9bc01fa9400c231e41e6a72ec509d76ca797207c,https://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf,,,https://pdfs.semanticscholar.org/9bc0/1fa9400c231e41e6a72ec509d76ca797207c.pdf
+9b2c359c36c38c289c5bacaeb5b1dd06b464f301,https://arxiv.org/pdf/1709.01442.pdf,,,https://arxiv.org/pdf/1709.01442.pdf
+9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca,https://pdfs.semanticscholar.org/9b1b/cef8bfef0fb5eb5ea9af0b699aa0534fceca.pdf,,,https://pdfs.semanticscholar.org/9b1b/cef8bfef0fb5eb5ea9af0b699aa0534fceca.pdf
+9ba4a5e0b7bf6e26563d294f1f3de44d95b7f682,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354113,,
+9be653e1bc15ef487d7f93aad02f3c9552f3ee4a,https://pdfs.semanticscholar.org/9be6/53e1bc15ef487d7f93aad02f3c9552f3ee4a.pdf,,,https://pdfs.semanticscholar.org/9be6/53e1bc15ef487d7f93aad02f3c9552f3ee4a.pdf
+9b0ead0a20a2b7c4ae40568d8d1c0c2b23a6b807,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354290,,
+9b6d9f0923e1d42c86a1154897b1a9bd7ba6716c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7114333,,
+9bac481dc4171aa2d847feac546c9f7299cc5aa0,https://arxiv.org/pdf/1609.04541.pdf,,,https://arxiv.org/pdf/1609.04541.pdf
+9b684e2e2bb43862f69b12c6be94db0e7a756187,https://arxiv.org/pdf/1709.04666.pdf,,,https://arxiv.org/pdf/1709.04666.pdf
+9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32,https://arxiv.org/pdf/1708.03280.pdf,,,https://arxiv.org/pdf/1708.03280.pdf
+9efdb73c6833df57732b727c6aeac510cadb53fe,,,,http://dl.acm.org/citation.cfm?id=3184071
+9e105c4a176465d14434fb3f5bae67f57ff5fba2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354230,,
+9e2ab407ff36f3b793d78d9118ea25622f4b7434,,,http://doi.org/10.1007/s11042-018-5679-0,
+9e297343da13cf9ba0ad8b5b75c07723136f4885,,,,
+9e182e0cd9d70f876f1be7652c69373bcdf37fb4,https://arxiv.org/pdf/1807.07860.pdf,,,https://arxiv.org/pdf/1807.07860.pdf
+9e10ea753b9767aa2f91dafe8545cd6f44befd7f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771444,,
+040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d,https://arxiv.org/pdf/1806.03018.pdf,,,https://arxiv.org/pdf/1806.03018.pdf
+04b851f25d6d49e61a528606953e11cfac7df2b2,https://arxiv.org/pdf/1711.11152.pdf,,,https://arxiv.org/pdf/1711.11152.pdf
+043efe5f465704ced8d71a067d2b9d5aa5b59c29,https://pdfs.semanticscholar.org/000a/c6b0865c79bcf0d6f7f069b3abfe229e1462.pdf,,,https://pdfs.semanticscholar.org/000a/c6b0865c79bcf0d6f7f069b3abfe229e1462.pdf
+0450dacc43171c6e623d0d5078600dd570de777e,,,http://doi.org/10.1007/s10339-016-0774-5,
+04b4c779b43b830220bf938223f685d1057368e9,https://arxiv.org/pdf/1712.00133.pdf,,,https://arxiv.org/pdf/1712.00133.pdf
+04317e63c08e7888cef480fe79f12d3c255c5b00,https://pdfs.semanticscholar.org/0431/7e63c08e7888cef480fe79f12d3c255c5b00.pdf,,,https://pdfs.semanticscholar.org/0431/7e63c08e7888cef480fe79f12d3c255c5b00.pdf
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff,https://arxiv.org/pdf/1806.06098.pdf,,,https://arxiv.org/pdf/1806.06098.pdf
+6ad107c08ac018bfc6ab31ec92c8a4b234f67d49,https://arxiv.org/pdf/1807.00966.pdf,,,https://arxiv.org/pdf/1807.00966.pdf
+6af75a8572965207c2b227ad35d5c61a5bd69f45,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433687,,
+6a6269e591e11f41d59c2ca1e707aaa1f0d57de6,,,http://doi.org/10.1007/s10044-016-0531-5,
+6a52e6fce541126ff429f3c6d573bc774f5b8d89,https://pdfs.semanticscholar.org/6a52/e6fce541126ff429f3c6d573bc774f5b8d89.pdf,,,https://pdfs.semanticscholar.org/6a52/e6fce541126ff429f3c6d573bc774f5b8d89.pdf
+6a4419ce2338ea30a570cf45624741b754fa52cb,https://arxiv.org/pdf/1804.02541.pdf,,,https://arxiv.org/pdf/1804.02541.pdf
+6a931e7b7475635f089dd33e8d9a2899ae963804,,,http://doi.org/10.1007/s00371-018-1561-3,
+6a6406906470be10f6d6d94a32741ba370a1db68,,,http://doi.org/10.1007/s11042-016-4213-5,
+6a5d7d20a8c4993d56bcf702c772aa3f95f99450,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813408,,
+6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a,https://arxiv.org/pdf/1805.09092.pdf,,,https://arxiv.org/pdf/1805.09092.pdf
+3294e27356c3b1063595885a6d731d625b15505a,https://pdfs.semanticscholar.org/89b6/fe99faefb8ff4c54f9e7a88fde2470a51ed1.pdf,,,https://pdfs.semanticscholar.org/89b6/fe99faefb8ff4c54f9e7a88fde2470a51ed1.pdf
+3266fcd1886e8ad883714e38203e66c0c6487f7b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7533149,,
+3266fbaaa317a796d0934b9a3f3bb7c64992ac7d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4527244,,
+32f62da99ec9f58dd93e3be667612abcf00df16a,,,http://doi.org/10.1007/s11042-017-5583-z,
+3240c9359061edf7a06bfeb7cc20c103a65904c2,https://arxiv.org/pdf/1708.01956.pdf,,,https://arxiv.org/pdf/1708.01956.pdf
+32e4fc2f0d9c535b1aca95aeb5bcc0623bcd2cf2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1334680,,
+32e9c9520cf6acb55dde672b73760442b2f166f5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7970176,,
+352d61eb66b053ae5689bd194840fd5d33f0e9c0,https://arxiv.org/pdf/1807.04899.pdf,,,https://arxiv.org/pdf/1807.04899.pdf
+35b1c1f2851e9ac4381ef41b4d980f398f1aad68,https://pdfs.semanticscholar.org/35b1/c1f2851e9ac4381ef41b4d980f398f1aad68.pdf,,,https://pdfs.semanticscholar.org/35b1/c1f2851e9ac4381ef41b4d980f398f1aad68.pdf
+351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd,https://pdfs.semanticscholar.org/351c/02d4775ae95e04ab1e5dd0c758d2d80c3ddd.pdf,,,https://pdfs.semanticscholar.org/351c/02d4775ae95e04ab1e5dd0c758d2d80c3ddd.pdf
+35208eda874591eac70286441d19785726578946,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789507,,
+35265cbd9c6ea95753f7c6b71659f7f7ef9081b6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7052327,,
+352a620f0b96a7e76b9195a7038d5eec257fd994,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373823,,
+35e0256b33212ddad2db548484c595334f15b4da,https://pdfs.semanticscholar.org/35e0/256b33212ddad2db548484c595334f15b4da.pdf,,,https://pdfs.semanticscholar.org/35e0/256b33212ddad2db548484c595334f15b4da.pdf
+35e6f6e5f4f780508e5f58e87f9efe2b07d8a864,https://arxiv.org/pdf/1709.08421.pdf,,,https://arxiv.org/pdf/1709.08421.pdf
+69adf2f122ff18848ff85e8de3ee3b2bc495838e,,,,http://arxiv.org/abs/1711.10678
+6964af90cf8ac336a2a55800d9c510eccc7ba8e1,https://arxiv.org/pdf/1711.08496.pdf,,,https://arxiv.org/pdf/1711.08496.pdf
+69adbfa7b0b886caac15ebe53b89adce390598a3,https://arxiv.org/pdf/1805.10938.pdf,,,https://arxiv.org/pdf/1805.10938.pdf
+69a41c98f6b71764913145dbc2bb4643c9bc4b0a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8444452,,
+69a55c30c085ad1b72dd2789b3f699b2f4d3169f,https://pdfs.semanticscholar.org/69a5/5c30c085ad1b72dd2789b3f699b2f4d3169f.pdf,,,https://pdfs.semanticscholar.org/69a5/5c30c085ad1b72dd2789b3f699b2f4d3169f.pdf
+695426275dee2ec56bc0c0afe1c5b4227a350840,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7878535,,
+6993bca2b3471f26f2c8a47adfe444bfc7852484,https://arxiv.org/pdf/1705.07426.pdf,,,https://arxiv.org/pdf/1705.07426.pdf
+696236fb6f986f6d5565abb01f402d09db68e5fa,,,http://doi.org/10.1007/s41095-018-0112-1,
+691964c43bfd282f6f4d00b8b0310c554b613e3b,https://pdfs.semanticscholar.org/6919/64c43bfd282f6f4d00b8b0310c554b613e3b.pdf,,,https://pdfs.semanticscholar.org/6919/64c43bfd282f6f4d00b8b0310c554b613e3b.pdf
+6932baa348943507d992aba75402cfe8545a1a9b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014987,,
+6966d9d30fa9b7c01523425726ab417fd8428790,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619291,,
+3cb2841302af1fb9656f144abc79d4f3d0b27380,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf,,,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf
+3c563542db664321aa77a9567c1601f425500f94,https://arxiv.org/pdf/1712.02514.pdf,,,https://arxiv.org/pdf/1712.02514.pdf
+3cd7b15f5647e650db66fbe2ce1852e00c05b2e4,https://pdfs.semanticscholar.org/3cd7/b15f5647e650db66fbe2ce1852e00c05b2e4.pdf,,,https://pdfs.semanticscholar.org/3cd7/b15f5647e650db66fbe2ce1852e00c05b2e4.pdf
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,https://pdfs.semanticscholar.org/3c6c/ac7ecf546556d7c6050f7b693a99cc8a57b3.pdf,,,https://pdfs.semanticscholar.org/3c6c/ac7ecf546556d7c6050f7b693a99cc8a57b3.pdf
+3cb057a24a8adba6fe964b5d461ba4e4af68af14,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6701391,,
+3c09fb7fe1886072670e0c4dd632d052102a3733,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101020,,
+3c0fffd4cdbfef4ccd92d528d8b8a60ab0929827,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373845,,
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,https://arxiv.org/pdf/1809.00594.pdf,,,https://arxiv.org/pdf/1809.00594.pdf
+3cd380bd0f3b164b44c49e3b01f6ac9798b6b6f9,,,http://doi.org/10.1007/s00371-016-1323-z,
+3c56acaa819f4e2263638b67cea1ec37a226691d,https://arxiv.org/pdf/1704.07160.pdf,,,https://arxiv.org/pdf/1704.07160.pdf
+562f7555e5cb79ce0fe834c4613264d8378dd007,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7153112,,
+56e079f4eb40744728fd1d7665938b06426338e5,https://arxiv.org/pdf/1705.04293.pdf,,,https://arxiv.org/pdf/1705.04293.pdf
+56fd4c05869e11e4935d48aa1d7abb96072ac242,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373812,,
+566563a02dbaebec07429046122426acd7039166,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461618,,
+5632ba72b2652df3b648b2ee698233e76a4eee65,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8346387,,
+56a677c889e0e2c9f68ab8ca42a7e63acf986229,https://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf,,,https://pdfs.semanticscholar.org/56a6/77c889e0e2c9f68ab8ca42a7e63acf986229.pdf
+56dca23481de9119aa21f9044efd7db09f618704,https://arxiv.org/pdf/1507.02772.pdf,,,https://arxiv.org/pdf/1507.02772.pdf
+516a27d5dd06622f872f5ef334313350745eadc3,https://arxiv.org/pdf/1805.01024.pdf,,,https://arxiv.org/pdf/1805.01024.pdf
+51b42da0706a1260430f27badcf9ee6694768b9b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471882,,
+5180df9d5eb26283fb737f491623395304d57497,https://arxiv.org/pdf/1804.10899.pdf,,,https://arxiv.org/pdf/1804.10899.pdf
+51410d6bd9a41eacb105f15dbdaee520e050d646,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412888,,
+51d6a8a61ea9588a795b20353c97efccec73f5db,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460308,,
+518a3ce2a290352afea22027b64bf3950bffc65a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204174,,
+51dcb36a6c247189be4420562f19feb00c9487f8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1394433,,
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff,https://arxiv.org/pdf/1807.04979.pdf,,,https://arxiv.org/pdf/1807.04979.pdf
+519f1486f0755ef3c1f05700ea8a05f52f83387b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595846,,
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3,https://arxiv.org/pdf/1503.08909.pdf,,,https://arxiv.org/pdf/1503.08909.pdf
+5161e38e4ea716dcfb554ccb88901b3d97778f64,https://arxiv.org/pdf/1702.04069.pdf,,,https://arxiv.org/pdf/1702.04069.pdf
+5167e16b53283be5587659ea8eaa3b8ef3fddd33,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813364,,
+51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee,https://arxiv.org/pdf/1809.07586.pdf,,,https://arxiv.org/pdf/1809.07586.pdf
+5141cf2e59fb2ec9bb489b9c1832447d3cd93110,https://arxiv.org/pdf/1706.00893.pdf,,,https://arxiv.org/pdf/1706.00893.pdf
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf,,,https://pdfs.semanticscholar.org/511a/8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7.pdf
+51d048b92f6680aca4a8adf07deb380c0916c808,https://pdfs.semanticscholar.org/51d0/48b92f6680aca4a8adf07deb380c0916c808.pdf,,,https://pdfs.semanticscholar.org/51d0/48b92f6680aca4a8adf07deb380c0916c808.pdf
+5134353bd01c4ea36bd007c460e8972b1541d0ad,https://pdfs.semanticscholar.org/5134/353bd01c4ea36bd007c460e8972b1541d0ad.pdf,,,https://pdfs.semanticscholar.org/5134/353bd01c4ea36bd007c460e8972b1541d0ad.pdf
+51bb86dc8748088a198b216f7e97616634147388,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890496,,
+3dce635ce4b55fb63fc6d41b38640403b152a048,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411225,,
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,https://arxiv.org/pdf/1511.07212.pdf,,,https://arxiv.org/pdf/1511.07212.pdf
+3daafe6389d877fe15d8823cdf5ac15fd919676f,https://arxiv.org/pdf/1605.05197.pdf,,,https://arxiv.org/pdf/1605.05197.pdf
+3db6fd6a0e9bb30f2421e84ee5e433683d17d9c1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8402469,,
+3d6ee995bc2f3e0f217c053368df659a5d14d5b5,https://pdfs.semanticscholar.org/3d6e/e995bc2f3e0f217c053368df659a5d14d5b5.pdf,,,https://pdfs.semanticscholar.org/3d6e/e995bc2f3e0f217c053368df659a5d14d5b5.pdf
+3dfb822e16328e0f98a47209d7ecd242e4211f82,https://arxiv.org/pdf/1708.08197.pdf,,,https://arxiv.org/pdf/1708.08197.pdf
+580f86f1ace1feed16b592d05c2b07f26c429b4b,https://arxiv.org/pdf/1705.00754.pdf,,,https://arxiv.org/pdf/1705.00754.pdf
+58d47c187b38b8a2bad319c789a09781073d052d,https://arxiv.org/pdf/1806.11538.pdf,,,https://arxiv.org/pdf/1806.11538.pdf
+588bed36b3cc9e2f26c39b5d99d6687f36ae1177,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389,,
+58217ae5423828ed5e1569bee93d491569d79970,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1578742,,
+58bf72750a8f5100e0c01e55fd1b959b31e7dbce,https://arxiv.org/pdf/1803.07737.pdf,,,https://arxiv.org/pdf/1803.07737.pdf
+58542eeef9317ffab9b155579256d11efb4610f2,https://pdfs.semanticscholar.org/5854/2eeef9317ffab9b155579256d11efb4610f2.pdf,,,https://pdfs.semanticscholar.org/5854/2eeef9317ffab9b155579256d11efb4610f2.pdf
+587b8c147c6253878128ddacf6e5faf8272842a4,,,,http://dl.acm.org/citation.cfm?id=2638549
+58538cc418bf41197fad4fc4ee2449b2daeb08b1,,,http://doi.org/10.1007/s11042-017-4343-4,
+67386772c289cd40db343bdc4cb8cb4f58271df2,,,http://doi.org/10.1038/s41598-017-10745-9,
+677585ccf8619ec2330b7f2d2b589a37146ffad7,https://arxiv.org/pdf/1806.11328.pdf,,,https://arxiv.org/pdf/1806.11328.pdf
+6789bddbabf234f31df992a3356b36a47451efc7,https://pdfs.semanticscholar.org/6789/bddbabf234f31df992a3356b36a47451efc7.pdf,,,https://pdfs.semanticscholar.org/6789/bddbabf234f31df992a3356b36a47451efc7.pdf
+675b2caee111cb6aa7404b4d6aa371314bf0e647,https://arxiv.org/pdf/1705.08421.pdf,,,https://arxiv.org/pdf/1705.08421.pdf
+679b72d23a9cfca8a7fe14f1d488363f2139265f,https://pdfs.semanticscholar.org/e7c4/bfe5ea260450f124f4253f2ebe0fff1d308f.pdf,,,https://pdfs.semanticscholar.org/e7c4/bfe5ea260450f124f4253f2ebe0fff1d308f.pdf
+67484723e0c2cbeb936b2e863710385bdc7d5368,https://arxiv.org/pdf/1805.03363.pdf,,,https://arxiv.org/pdf/1805.03363.pdf
+673d4885370b27c863e11a4ece9189a6a45931cc,https://arxiv.org/pdf/1802.09723.pdf,,,https://arxiv.org/pdf/1802.09723.pdf
+6754c98ba73651f69525c770fb0705a1fae78eb5,https://pdfs.semanticscholar.org/f68b/3031e7092072bd7b38c05448031f17b087d1.pdf,,,https://pdfs.semanticscholar.org/f68b/3031e7092072bd7b38c05448031f17b087d1.pdf
+672fae3da801b2a0d2bad65afdbbbf1b2320623e,https://arxiv.org/pdf/1609.07042.pdf,,,https://arxiv.org/pdf/1609.07042.pdf
+675b1fd2aaebe9c62be6b22b9ac6d278193cc581,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699428,,
+67af3ec65f1dc535018f3671624e72c96a611c39,,,http://doi.org/10.1007/s11042-016-4058-y,
+0be43cf4299ce2067a0435798ef4ca2fbd255901,https://pdfs.semanticscholar.org/0be4/3cf4299ce2067a0435798ef4ca2fbd255901.pdf,,,https://pdfs.semanticscholar.org/0be4/3cf4299ce2067a0435798ef4ca2fbd255901.pdf
+0b45aeb0aede5e0c19b508ede802bdfec668aefd,,,,http://dl.acm.org/citation.cfm?id=1963206
+0ba5369c5e1e87ea172089d84a5610435c73de00,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8347111,,
+0b5a82f8c0ee3640503ba24ef73e672d93aeebbf,https://arxiv.org/pdf/1808.09560.pdf,,,https://arxiv.org/pdf/1808.09560.pdf
+0b82bf595e76898993ed4f4b2883c42720c0f277,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411229,,
+0b572a2b7052b15c8599dbb17d59ff4f02838ff7,https://pdfs.semanticscholar.org/0b57/2a2b7052b15c8599dbb17d59ff4f02838ff7.pdf,,,https://pdfs.semanticscholar.org/0b57/2a2b7052b15c8599dbb17d59ff4f02838ff7.pdf
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4,https://arxiv.org/pdf/1510.03909.pdf,,,https://arxiv.org/pdf/1510.03909.pdf
+0bce54bfbd8119c73eb431559fc6ffbba741e6aa,https://pdfs.semanticscholar.org/f9b2/3a7270939136872d5e170b4a80aad68a4e66.pdf,,,https://pdfs.semanticscholar.org/f9b2/3a7270939136872d5e170b4a80aad68a4e66.pdf
+0bf0029c9bdb0ac61fda35c075deb1086c116956,https://pdfs.semanticscholar.org/c37d/3c53687b2b1654e20a5f67dce6585afc109a.pdf,,,https://pdfs.semanticscholar.org/c37d/3c53687b2b1654e20a5f67dce6585afc109a.pdf
+93af335bf8c610f34ce0cadc15d1dd592debc706,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475,,
+93cd5c47e4a3425d23e3db32c6eaef53745bb32e,,,http://doi.org/10.1007/s11042-017-5062-6,
+93420d9212dd15b3ef37f566e4d57e76bb2fab2f,https://arxiv.org/pdf/1611.00851.pdf,,,https://arxiv.org/pdf/1611.00851.pdf
+93af36da08bf99e68c9b0d36e141ed8154455ac2,https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf,,,https://pdfs.semanticscholar.org/93af/36da08bf99e68c9b0d36e141ed8154455ac2.pdf
+93dcea2419ca95b96a47e541748c46220d289d77,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014993,,
+93c0405b1f5432eab11cb5180229720604ffd030,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462228,,
+93f37c69dd92c4e038710cdeef302c261d3a4f92,https://arxiv.org/pdf/1712.00636.pdf,,,https://arxiv.org/pdf/1712.00636.pdf
+93dd4e512cd7647aecbfc0cd4767adf5d9289c3d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952499,,
+938ae9597f71a21f2e47287cca318d4a2113feb2,https://pdfs.semanticscholar.org/938a/e9597f71a21f2e47287cca318d4a2113feb2.pdf,,,https://pdfs.semanticscholar.org/938a/e9597f71a21f2e47287cca318d4a2113feb2.pdf
+946017d5f11aa582854ac4c0e0f1b18b06127ef1,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf,,,https://pdfs.semanticscholar.org/9460/17d5f11aa582854ac4c0e0f1b18b06127ef1.pdf
+94eeae23786e128c0635f305ba7eebbb89af0023,https://arxiv.org/pdf/1706.01350.pdf,,,https://arxiv.org/pdf/1706.01350.pdf
+944faf7f14f1bead911aeec30cc80c861442b610,https://arxiv.org/pdf/1705.01861.pdf,,,https://arxiv.org/pdf/1705.01861.pdf
+94806f0967931d376d1729c29702f3d3bb70167c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780581,,
+9436170c648c40b6f4cc3751fca3674aa82ffe9a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6811741,,
+94325522c9be8224970f810554611d6a73877c13,https://arxiv.org/pdf/1807.11440.pdf,,,https://arxiv.org/pdf/1807.11440.pdf
+9487cea80f23afe9bccc94deebaa3eefa6affa99,https://arxiv.org/pdf/1612.05332.pdf,,,https://arxiv.org/pdf/1612.05332.pdf
+947ee3452e4f3d657b16325c6b959f8b8768efad,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952677,,
+94a11b601af77f0ad46338afd0fa4ccbab909e82,https://pdfs.semanticscholar.org/94a1/1b601af77f0ad46338afd0fa4ccbab909e82.pdf,,,https://pdfs.semanticscholar.org/94a1/1b601af77f0ad46338afd0fa4ccbab909e82.pdf
+0ee737085af468f264f57f052ea9b9b1f58d7222,https://arxiv.org/pdf/1807.08370.pdf,,,https://arxiv.org/pdf/1807.08370.pdf
+0e93a5a7f6dbdb3802173dca05717d27d72bfec0,https://arxiv.org/pdf/1709.08553.pdf,,,https://arxiv.org/pdf/1709.08553.pdf
+0e2ea7af369dbcaeb5e334b02dd9ba5271b10265,https://arxiv.org/pdf/1807.01332.pdf,,,https://arxiv.org/pdf/1807.01332.pdf
+0ee5c4112208995bf2bb0fb8a87efba933a94579,https://arxiv.org/pdf/1807.03235.pdf,,,https://arxiv.org/pdf/1807.03235.pdf
+604a281100784b4d5bc1a6db993d423abc5dc8f0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5353681,,
+60c24e44fce158c217d25c1bae9f880a8bd19fc3,https://arxiv.org/pdf/1808.02992.pdf,,,https://arxiv.org/pdf/1808.02992.pdf
+60777fbca8bff210398ec8b1179bc4ecb72dfec0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751535,,
+60e2b9b2e0db3089237d0208f57b22a3aac932c1,https://arxiv.org/pdf/1603.06470.pdf,,,https://arxiv.org/pdf/1603.06470.pdf
+60821d447e5b8a96dd9294a0514911e1141ff620,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813328,,
+60542b1a857024c79db8b5b03db6e79f74ec8f9f,https://arxiv.org/pdf/1702.05448.pdf,,,https://arxiv.org/pdf/1702.05448.pdf
+605f6817018a572797095b83bec7fae7195b2abc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339020,,
+60462b981fda63c5f9d780528a37c46884fe0b54,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397015,,
+34c062e2b8a3f6421b9f4ff22f115a36d4aba823,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7872382,,
+345cc31c85e19cea9f8b8521be6a37937efd41c2,https://arxiv.org/pdf/1511.06421.pdf,,,https://arxiv.org/pdf/1511.06421.pdf
+341002fac5ae6c193b78018a164d3c7295a495e4,https://arxiv.org/pdf/1706.04264.pdf,,,https://arxiv.org/pdf/1706.04264.pdf
+34ce703b7e79e3072eed7f92239a4c08517b0c55,https://pdfs.semanticscholar.org/34ce/703b7e79e3072eed7f92239a4c08517b0c55.pdf,,,https://pdfs.semanticscholar.org/34ce/703b7e79e3072eed7f92239a4c08517b0c55.pdf
+34ec83c8ff214128e7a4a4763059eebac59268a6,https://arxiv.org/pdf/1808.00141.pdf,,,https://arxiv.org/pdf/1808.00141.pdf
+34bc8ecec0c0b328cd8c485cb34d4d2f4b84e0c9,,,,https://www.ncbi.nlm.nih.gov/pubmed/29069621
+346752e3ab96c93483413be4feaa024ccfe9499f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6960834,,
+34fd227f4fdbc7fe028cc1f7d92cb59204333718,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446331,,
+5a3da29970d0c3c75ef4cb372b336fc8b10381d7,https://arxiv.org/pdf/1708.00980.pdf,,,https://arxiv.org/pdf/1708.00980.pdf
+5a5f9e0ed220ce51b80cd7b7ede22e473a62062c,https://arxiv.org/pdf/1806.01810.pdf,,,https://arxiv.org/pdf/1806.01810.pdf
+5a12e1d4d74fe1a57929eaaa14f593b80f907ea3,,,http://doi.org/10.1007/s13735-016-0117-4,
+5ac946fc6543a445dd1ee6d5d35afd3783a31353,https://arxiv.org/pdf/1803.06962.pdf,,,https://arxiv.org/pdf/1803.06962.pdf
+5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c,https://pdfs.semanticscholar.org/2e36/a706bbec0f1adb7484e5d7416c3e612f43a1.pdf,,,https://pdfs.semanticscholar.org/2e36/a706bbec0f1adb7484e5d7416c3e612f43a1.pdf
+5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6,https://pdfs.semanticscholar.org/5aed/0f26549c6e64c5199048c4fd5fdb3c5e69d6.pdf,,,https://pdfs.semanticscholar.org/5aed/0f26549c6e64c5199048c4fd5fdb3c5e69d6.pdf
+5a547df635a9a56ac224d556333d36ff68cbf088,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359041,,
+5a07945293c6b032e465d64f2ec076b82e113fa6,https://pdfs.semanticscholar.org/5a07/945293c6b032e465d64f2ec076b82e113fa6.pdf,,,https://pdfs.semanticscholar.org/5a07/945293c6b032e465d64f2ec076b82e113fa6.pdf
+5fff61302adc65d554d5db3722b8a604e62a8377,https://arxiv.org/pdf/1801.05599.pdf,,,https://arxiv.org/pdf/1801.05599.pdf
+5f771fed91c8e4b666489ba2384d0705bcf75030,https://arxiv.org/pdf/1804.03287.pdf,,,https://arxiv.org/pdf/1804.03287.pdf
+5fa04523ff13a82b8b6612250a39e1edb5066521,https://arxiv.org/pdf/1708.04370.pdf,,,https://arxiv.org/pdf/1708.04370.pdf
+5fa6e4a23da0b39e4b35ac73a15d55cee8608736,https://arxiv.org/pdf/1801.06066.pdf,,,https://arxiv.org/pdf/1801.06066.pdf
+5f7c4c20ae2731bfb650a96b69fd065bf0bb950e,https://pdfs.semanticscholar.org/5f7c/4c20ae2731bfb650a96b69fd065bf0bb950e.pdf,,,https://pdfs.semanticscholar.org/5f7c/4c20ae2731bfb650a96b69fd065bf0bb950e.pdf
+5f94969b9491db552ffebc5911a45def99026afe,https://pdfs.semanticscholar.org/5f94/969b9491db552ffebc5911a45def99026afe.pdf,,,https://pdfs.semanticscholar.org/5f94/969b9491db552ffebc5911a45def99026afe.pdf
+5fea59ccdab484873081eaa37af88e26e3db2aed,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8263394,,
+5f758a29dae102511576c0a5c6beda264060a401,https://arxiv.org/pdf/1804.01373.pdf,,,https://arxiv.org/pdf/1804.01373.pdf
+5f2c210644c1e567435d78522258e0ae036deedb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4036602,,
+5fe3a9d54d5070308803dd8ef611594f59805400,,,http://doi.org/10.1016/j.patcog.2016.02.006,
+5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf,,,https://pdfs.semanticscholar.org/5f0d/4a0b5f72d8700cdf8cb179263a8fa866b59b.pdf
+5f27ed82c52339124aa368507d66b71d96862cb7,https://pdfs.semanticscholar.org/5f27/ed82c52339124aa368507d66b71d96862cb7.pdf,,,https://pdfs.semanticscholar.org/5f27/ed82c52339124aa368507d66b71d96862cb7.pdf
+5fea26746f3140b12317fcf3bc1680f2746e172e,https://arxiv.org/pdf/1612.06341.pdf,,,https://arxiv.org/pdf/1612.06341.pdf
+5f0d4657eab4152a1785ee0a25b5b499cd1163ec,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853687,,
+5f453a35d312debfc993d687fd0b7c36c1704b16,https://pdfs.semanticscholar.org/5f45/3a35d312debfc993d687fd0b7c36c1704b16.pdf,,,https://pdfs.semanticscholar.org/5f45/3a35d312debfc993d687fd0b7c36c1704b16.pdf
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9,https://pdfs.semanticscholar.org/c4e8/3800fae0d6065aca19aa2a2fbff29ca6be1e.pdf,,,https://pdfs.semanticscholar.org/c4e8/3800fae0d6065aca19aa2a2fbff29ca6be1e.pdf
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1,https://arxiv.org/pdf/1807.00230.pdf,,,https://arxiv.org/pdf/1807.00230.pdf
+336488746cc76e7f13b0ec68ccfe4df6d76cdc8f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762938,,
+335435a94f8fa9c128b9f278d929c9d0e45e2510,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849440,,
+3337cfc3de2c16dee6f7cbeda5f263409a9ad81e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398675,,
+33aa980544a9d627f305540059828597354b076c,https://pdfs.semanticscholar.org/18a6/9db63820183a7ed0d810c2fdf18865fdf10e.pdf,,,https://pdfs.semanticscholar.org/18a6/9db63820183a7ed0d810c2fdf18865fdf10e.pdf
+3352426a67eabe3516812cb66a77aeb8b4df4d1b,https://arxiv.org/pdf/1708.06023.pdf,,,https://arxiv.org/pdf/1708.06023.pdf
+33ef419dffef85443ec9fe89a93f928bafdc922e,https://arxiv.org/pdf/1809.08493.pdf,,,https://arxiv.org/pdf/1809.08493.pdf
+05891725f5b27332836cf058f04f18d74053803f,https://pdfs.semanticscholar.org/0589/1725f5b27332836cf058f04f18d74053803f.pdf,,,https://pdfs.semanticscholar.org/0589/1725f5b27332836cf058f04f18d74053803f.pdf
+057b80e235b10799d03876ad25465208a4c64caf,,,,http://dl.acm.org/citation.cfm?id=3123427
+052f994898c79529955917f3dfc5181586282cf8,https://arxiv.org/pdf/1708.02191.pdf,,,https://arxiv.org/pdf/1708.02191.pdf
+050a149051a5d268fcc5539e8b654c2240070c82,https://pdfs.semanticscholar.org/050a/149051a5d268fcc5539e8b654c2240070c82.pdf,,,https://pdfs.semanticscholar.org/050a/149051a5d268fcc5539e8b654c2240070c82.pdf
+0532cbcf616f27e5f6a4054f818d4992b99d201d,,,http://doi.org/10.1007/s11042-015-3042-2,
+053931267af79a89791479b18d1b9cde3edcb415,https://pdfs.semanticscholar.org/0539/31267af79a89791479b18d1b9cde3edcb415.pdf,,,https://pdfs.semanticscholar.org/0539/31267af79a89791479b18d1b9cde3edcb415.pdf
+9d58e8ab656772d2c8a99a9fb876d5611fe2fe20,https://arxiv.org/pdf/1506.01911.pdf,,,https://arxiv.org/pdf/1506.01911.pdf
+9d5bfaf6191484022a6731ce13ac1b866d21ad18,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139086,,
+9d24812d942e69f86279a26932df53c0a68c4111,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8417316,,
+9d46485ca2c562d5e295251530a99dd5df99b589,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813386,,
+9d57c4036a0e5f1349cd11bc342ac515307b6720,https://arxiv.org/pdf/1808.05399.pdf,,,https://arxiv.org/pdf/1808.05399.pdf
+9d3377313759dfdc1a702b341d8d8e4b1469460c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7342926,,
+9dcfa771a7e87d7681348dd9f6cf9803699b16ce,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1385984,,
+9db4b25df549555f9ffd05962b5adf2fd9c86543,https://arxiv.org/pdf/1804.03786.pdf,,,https://arxiv.org/pdf/1804.03786.pdf
+9c2f20ed168743071db6268480a966d5d238a7ee,,,,http://dl.acm.org/citation.cfm?id=1456304
+9ca7899338129f4ba6744f801e722d53a44e4622,https://arxiv.org/pdf/1504.07550.pdf,,,https://arxiv.org/pdf/1504.07550.pdf
+9cc8cf0c7d7fa7607659921b6ff657e17e135ecc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099536,,
+9c1664f69d0d832e05759e8f2f001774fad354d6,https://arxiv.org/pdf/1809.04317.pdf,,,https://arxiv.org/pdf/1809.04317.pdf
+9c6dfd3a38374399d998d5a130ffc2864c37f554,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553738,,
+9c065dfb26ce280610a492c887b7f6beccf27319,https://arxiv.org/pdf/1707.09074.pdf,,,https://arxiv.org/pdf/1707.09074.pdf
+9c23859ec7313f2e756a3e85575735e0c52249f4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788,,
+9ca542d744149f0efc8b8aac8289f5e38e6d200c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789587,,
+9c59bb28054eee783a40b467c82f38021c19ff3e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7178311,,
+9ce0d64125fbaf625c466d86221505ad2aced7b1,https://pdfs.semanticscholar.org/9ce0/d64125fbaf625c466d86221505ad2aced7b1.pdf,,,https://pdfs.semanticscholar.org/9ce0/d64125fbaf625c466d86221505ad2aced7b1.pdf
+02f4b900deabbe7efa474f2815dc122a4ddb5b76,https://pdfs.semanticscholar.org/02f4/b900deabbe7efa474f2815dc122a4ddb5b76.pdf,,,https://pdfs.semanticscholar.org/02f4/b900deabbe7efa474f2815dc122a4ddb5b76.pdf
+023decb4c56f2e97d345593e4f7b89b667a6763d,,,http://doi.org/10.1007/s10994-005-3561-6,
+02fc9e7283b79183eb3757a9b6ddeb8c91c209bb,,,http://doi.org/10.1007/s11042-018-6146-7,
+021e008282714eaefc0796303f521c9e4f199d7e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354319,,
+a40edf6eb979d1ddfe5894fac7f2cf199519669f,https://arxiv.org/pdf/1704.08740.pdf,,,https://arxiv.org/pdf/1704.08740.pdf
+a4898f55f12e6393b1c078803909ea715bf71730,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6957817,,
+a45e6172713a56736a2565ddea9cb8b1d94721cd,,,http://doi.org/10.1038/s41746-018-0035-3,
+a3d8b5622c4b9af1f753aade57e4774730787a00,https://arxiv.org/pdf/1705.10120.pdf,,,https://arxiv.org/pdf/1705.10120.pdf
+a322479a6851f57a3d74d017a9cb6d71395ed806,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf,,,https://pdfs.semanticscholar.org/a322/479a6851f57a3d74d017a9cb6d71395ed806.pdf
+a325d5ea42a0b6aeb0390318e9f65f584bd67edd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909426,,
+a301ddc419cbd900b301a95b1d9e4bb770afc6a3,https://pdfs.semanticscholar.org/a301/ddc419cbd900b301a95b1d9e4bb770afc6a3.pdf,,,https://pdfs.semanticscholar.org/a301/ddc419cbd900b301a95b1d9e4bb770afc6a3.pdf
+a3f69a073dcfb6da8038607a9f14eb28b5dab2db,https://pdfs.semanticscholar.org/a3f6/9a073dcfb6da8038607a9f14eb28b5dab2db.pdf,,,https://pdfs.semanticscholar.org/a3f6/9a073dcfb6da8038607a9f14eb28b5dab2db.pdf
+a3201e955d6607d383332f3a12a7befa08c5a18c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,,
+a38045ed82d6800cbc7a4feb498e694740568258,https://pdfs.semanticscholar.org/8f15/c3a426d307dd1e72f7feab1e671d20fb1adb.pdf,,,https://pdfs.semanticscholar.org/8f15/c3a426d307dd1e72f7feab1e671d20fb1adb.pdf
+a313851ed00074a4a6c0fccf372acb6a68d9bc0b,,,http://doi.org/10.1007/s11042-016-4324-z,
+a3f78cc944ac189632f25925ba807a0e0678c4d5,https://pdfs.semanticscholar.org/a3f7/8cc944ac189632f25925ba807a0e0678c4d5.pdf,,,https://pdfs.semanticscholar.org/a3f7/8cc944ac189632f25925ba807a0e0678c4d5.pdf
+a32c5138c6a0b3d3aff69bcab1015d8b043c91fb,https://pdfs.semanticscholar.org/a32c/5138c6a0b3d3aff69bcab1015d8b043c91fb.pdf,,,https://pdfs.semanticscholar.org/a32c/5138c6a0b3d3aff69bcab1015d8b043c91fb.pdf
+a36c8a4213251d3fd634e8893ad1b932205ad1ca,https://pdfs.semanticscholar.org/a36c/8a4213251d3fd634e8893ad1b932205ad1ca.pdf,,,https://pdfs.semanticscholar.org/a36c/8a4213251d3fd634e8893ad1b932205ad1ca.pdf
+b5f9180666924a3215ab0b1faf712e70b353444d,,,http://doi.org/10.1007/s11042-017-4661-6,
+b5968e7bb23f5f03213178c22fd2e47af3afa04c,https://arxiv.org/pdf/1705.07206.pdf,,,https://arxiv.org/pdf/1705.07206.pdf
+b53485dbdd2dc5e4f3c7cff26bd8707964bb0503,,,http://doi.org/10.1007/s11263-017-1012-z,
+b558be7e182809f5404ea0fcf8a1d1d9498dc01a,https://pdfs.semanticscholar.org/dc8a/57827ffbe7064979638cf909abf7fcf7fb8d.pdf,,,https://pdfs.semanticscholar.org/dc8a/57827ffbe7064979638cf909abf7fcf7fb8d.pdf
+b562def2624f59f7d3824e43ecffc990ad780898,https://arxiv.org/pdf/1710.08310.pdf,,,https://arxiv.org/pdf/1710.08310.pdf
+b5747ecfa0f3be0adaad919d78763b1133c4d662,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397022,,
+b5f3b0f45cf7f462a9c463a941e34e102a029506,,,,http://dl.acm.org/citation.cfm?id=3143004
+b5f2846a506fc417e7da43f6a7679146d99c5e96,https://arxiv.org/pdf/1212.0402.pdf,,,https://arxiv.org/pdf/1212.0402.pdf
+b51d11fa400d66b9f9d903a60c4ebe03fd77c8f2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8358588,,
+b5fdd7778503f27c9d9bf77fab193b475fab6076,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373891,,
+b598f7761b153ecb26e9d08d3c5817aac5b34b52,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4618852,,
+b55e70df03d9b80c91446a97957bc95772dcc45b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8269329,,
+b5ca8d4f259f35c1f3edfd9f108ce29881e478b0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099624,,
+b5f9306c3207ac12ac761e7d028c78b3009a219c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6093779,,
+b59f441234d2d8f1765a20715e227376c7251cd7,https://arxiv.org/pdf/1803.01449.pdf,,,https://arxiv.org/pdf/1803.01449.pdf
+b59cee1f647737ec3296ccb3daa25c890359c307,https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf,,,https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf
+b26e8f6ad7c2d4c838660d5a17337ce241442ed9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462692,,
+b2470969e4fba92f7909eac26b77d08cc5575533,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8326475,,
+b2c60061ad32e28eb1e20aff42e062c9160786be,https://arxiv.org/pdf/1805.12589.pdf,,,https://arxiv.org/pdf/1805.12589.pdf
+b2b535118c5c4dfcc96f547274cdc05dde629976,https://arxiv.org/pdf/1707.04061.pdf,,,https://arxiv.org/pdf/1707.04061.pdf
+d904f945c1506e7b51b19c99c632ef13f340ef4c,https://pdfs.semanticscholar.org/d904/f945c1506e7b51b19c99c632ef13f340ef4c.pdf,,,https://pdfs.semanticscholar.org/d904/f945c1506e7b51b19c99c632ef13f340ef4c.pdf
+d949fadc9b6c5c8b067fa42265ad30945f9caa99,https://arxiv.org/pdf/1710.00870.pdf,,,https://arxiv.org/pdf/1710.00870.pdf
+d916602f694ebb9cf95d85e08dd53f653b6196c3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237607,,
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,https://arxiv.org/pdf/1804.04803.pdf,,,https://arxiv.org/pdf/1804.04803.pdf
+d9e66b877b277d73f8876f537206395e71f58269,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7225130,,
+d9deafd9d9e60657a7f34df5f494edff546c4fb8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100124,,
+d9218c2bbc7449dbccac351f55675efd810535db,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5699141,,
+d9a5c82b710b1f4f1ffb67be2ae1d3c0ae7f6c55,,,http://doi.org/10.1016/j.jvcir.2015.11.002,
+d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c,https://arxiv.org/pdf/1709.00069.pdf,,,https://arxiv.org/pdf/1709.00069.pdf
+d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f,https://arxiv.org/pdf/1807.00046.pdf,,,https://arxiv.org/pdf/1807.00046.pdf
+d99b5ee3e2d7e3a016fbc5fd417304e15efbd1f8,,,http://doi.org/10.1007/s11063-017-9578-6,
+aca232de87c4c61537c730ee59a8f7ebf5ecb14f,https://pdfs.semanticscholar.org/aca2/32de87c4c61537c730ee59a8f7ebf5ecb14f.pdf,,,https://pdfs.semanticscholar.org/aca2/32de87c4c61537c730ee59a8f7ebf5ecb14f.pdf
+ac855f0de9086e9e170072cb37400637f0c9b735,https://arxiv.org/pdf/1809.08999.pdf,,,https://arxiv.org/pdf/1809.08999.pdf
+aca728cab26b95fbe04ec230b389878656d8af5b,,,http://doi.org/10.1007/978-981-10-8258-0,
+acff2dc5d601887741002a78f8c0c35a799e6403,,,http://doi.org/10.1007/978-3-662-44654-6,
+accbd6cd5dd649137a7c57ad6ef99232759f7544,https://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf,,,https://pdfs.semanticscholar.org/accb/d6cd5dd649137a7c57ad6ef99232759f7544.pdf
+acee2201f8a15990551804dd382b86973eb7c0a8,https://arxiv.org/pdf/1701.01692.pdf,,,https://arxiv.org/pdf/1701.01692.pdf
+ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e,https://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf,,,https://pdfs.semanticscholar.org/c67c/5780cb9870b70b78e4c82da4f92c7bb2592d.pdf
+ac26166857e55fd5c64ae7194a169ff4e473eb8b,https://arxiv.org/pdf/1706.01039.pdf,,,https://arxiv.org/pdf/1706.01039.pdf
+ac559873b288f3ac28ee8a38c0f3710ea3f986d9,https://pdfs.semanticscholar.org/ac55/9873b288f3ac28ee8a38c0f3710ea3f986d9.pdf,,,https://pdfs.semanticscholar.org/ac55/9873b288f3ac28ee8a38c0f3710ea3f986d9.pdf
+ac8e09128e1e48a2eae5fa90f252ada689f6eae7,https://arxiv.org/pdf/1806.01526.pdf,,,https://arxiv.org/pdf/1806.01526.pdf
+ac8441e30833a8e2a96a57c5e6fede5df81794af,https://arxiv.org/pdf/1805.10557.pdf,,,https://arxiv.org/pdf/1805.10557.pdf
+ac2e166c76c103f17fdea2b4ecb137200b8d4703,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5373798,,
+ac86ccc16d555484a91741e4cb578b75599147b2,https://arxiv.org/pdf/1709.08398.pdf,,,https://arxiv.org/pdf/1709.08398.pdf
+ac03849956ac470c41585d2ee34d8bb58bb3c764,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6853690,,
+ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff,https://arxiv.org/pdf/1712.05080.pdf,,,https://arxiv.org/pdf/1712.05080.pdf
+ad77056780328bdcc6b7a21bce4ddd49c49e2013,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8398021,,
+ada063ce9a1ff230791c48b6afa29c401a9007f1,,,http://doi.org/10.1007/978-3-319-97909-0,
+ad2339c48ad4ffdd6100310dcbb1fb78e72fac98,https://arxiv.org/pdf/1704.04689.pdf,,,https://arxiv.org/pdf/1704.04689.pdf
+adf62dfa00748381ac21634ae97710bb80fc2922,https://pdfs.semanticscholar.org/adf6/2dfa00748381ac21634ae97710bb80fc2922.pdf,,,https://pdfs.semanticscholar.org/adf6/2dfa00748381ac21634ae97710bb80fc2922.pdf
+bbf28f39e5038813afd74cf1bc78d55fcbe630f1,https://arxiv.org/pdf/1803.04108.pdf,,,https://arxiv.org/pdf/1803.04108.pdf
+bb4f83458976755e9310b241a689c8d21b481238,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265393,,
+bb4be8e24d7b8ed56d81edec435b7b59bad96214,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7060677,,
+bb2f61a057bbf176e402d171d79df2635ccda9f6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296311,,
+bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197,https://arxiv.org/pdf/1612.06836.pdf,,,https://arxiv.org/pdf/1612.06836.pdf
+bb0ecedde7d6e837dc9a5e115302a2aaad1035e1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373838,,
+bbc5f4052674278c96abe7ff9dc2d75071b6e3f3,https://pdfs.semanticscholar.org/287b/7baff99d6995fd5852002488eb44659be6c1.pdf,,,https://pdfs.semanticscholar.org/287b/7baff99d6995fd5852002488eb44659be6c1.pdf
+bbd1eb87c0686fddb838421050007e934b2d74ab,https://arxiv.org/pdf/1805.10483.pdf,,,https://arxiv.org/pdf/1805.10483.pdf
+d7b8f285b0701ba7b1a11d1c7dd3d1e7e304083f,,,,http://dl.acm.org/citation.cfm?id=3164593
+d7dd35a86117e46d24914ef49ccd99ea0a7bf705,,,http://doi.org/10.1007/s10994-014-5463-y,
+d790093cb85fc556c0089610026e0ec3466ab845,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4721612,,
+d7593148e4319df7a288180d920f2822eeecea0b,https://pdfs.semanticscholar.org/192e/b550675b0f9cc69389ef2ec27efa72851253.pdf,,,https://pdfs.semanticscholar.org/192e/b550675b0f9cc69389ef2ec27efa72851253.pdf
+d7fe2a52d0ad915b78330340a8111e0b5a66513a,https://arxiv.org/pdf/1711.10735.pdf,,,https://arxiv.org/pdf/1711.10735.pdf
+d7cbedbee06293e78661335c7dd9059c70143a28,https://arxiv.org/pdf/1804.07573.pdf,,,https://arxiv.org/pdf/1804.07573.pdf
+d78734c54f29e4474b4d47334278cfde6efe963a,https://arxiv.org/pdf/1804.03487.pdf,,,https://arxiv.org/pdf/1804.03487.pdf
+d79365336115661b0e8dbbcd4b2aa1f504b91af6,https://arxiv.org/pdf/1603.01801.pdf,,,https://arxiv.org/pdf/1603.01801.pdf
+d7b6bbb94ac20f5e75893f140ef7e207db7cd483,https://pdfs.semanticscholar.org/d7b6/bbb94ac20f5e75893f140ef7e207db7cd483.pdf,,,https://pdfs.semanticscholar.org/d7b6/bbb94ac20f5e75893f140ef7e207db7cd483.pdf
+d700aedcb22a4be374c40d8bee50aef9f85d98ef,https://arxiv.org/pdf/1712.04851.pdf,,,https://arxiv.org/pdf/1712.04851.pdf
+d77f18917a58e7d4598d31af4e7be2762d858370,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6289062,,
+d0471d5907d6557cf081edf4c7c2296c3c221a38,https://pdfs.semanticscholar.org/d047/1d5907d6557cf081edf4c7c2296c3c221a38.pdf,,,https://pdfs.semanticscholar.org/d047/1d5907d6557cf081edf4c7c2296c3c221a38.pdf
+d00e9a6339e34c613053d3b2c132fccbde547b56,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154,,
+d06bcb2d46342ee011e652990edf290a0876b502,,,,http://arxiv.org/abs/1708.00980
+d0509afe9c2c26fe021889f8efae1d85b519452a,https://arxiv.org/pdf/1803.07140.pdf,,,https://arxiv.org/pdf/1803.07140.pdf
+d0144d76b8b926d22411d388e7a26506519372eb,https://arxiv.org/pdf/1806.04613.pdf,,,https://arxiv.org/pdf/1806.04613.pdf
+d02e27e724f9b9592901ac1f45830341d37140fe,https://arxiv.org/pdf/1802.06454.pdf,,,https://arxiv.org/pdf/1802.06454.pdf
+d02b32b012ffba2baeb80dca78e7857aaeececb0,https://pdfs.semanticscholar.org/d02b/32b012ffba2baeb80dca78e7857aaeececb0.pdf,,,https://pdfs.semanticscholar.org/d02b/32b012ffba2baeb80dca78e7857aaeececb0.pdf
+d066575b48b552a38e63095bb1f7b56cbb1fbea4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359888,,
+d01303062b21cd9ff46d5e3ff78897b8499480de,https://pdfs.semanticscholar.org/d013/03062b21cd9ff46d5e3ff78897b8499480de.pdf,,,https://pdfs.semanticscholar.org/d013/03062b21cd9ff46d5e3ff78897b8499480de.pdf
+d02c54192dbd0798b43231efe1159d6b4375ad36,https://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf,,,https://pdfs.semanticscholar.org/d02c/54192dbd0798b43231efe1159d6b4375ad36.pdf
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5,https://arxiv.org/pdf/1803.08460.pdf,,,https://arxiv.org/pdf/1803.08460.pdf
+beabb0d9d30871d517c5d915cf852f7f5293f52f,,,,
+bed8feb11e8077df158e16bce064853cf217ba62,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6191360,,
+be48b5dcd10ab834cd68d5b2a24187180e2b408f,https://arxiv.org/pdf/1611.04870.pdf,,,https://arxiv.org/pdf/1611.04870.pdf
+be4a20113bc204019ea79c6557a0bece23da1121,https://arxiv.org/pdf/1712.01670.pdf,,,https://arxiv.org/pdf/1712.01670.pdf
+bef4df99e1dc6f696f9b3732ab6bac8e85d3fb3c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344632,,
+be437b53a376085b01ebd0f4c7c6c9e40a4b1a75,https://pdfs.semanticscholar.org/be43/7b53a376085b01ebd0f4c7c6c9e40a4b1a75.pdf,,,https://pdfs.semanticscholar.org/be43/7b53a376085b01ebd0f4c7c6c9e40a4b1a75.pdf
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87,https://pdfs.semanticscholar.org/be4f/7679797777f2bc1fd6aad8af67cce5e5ce87.pdf,,,https://pdfs.semanticscholar.org/be4f/7679797777f2bc1fd6aad8af67cce5e5ce87.pdf
+be7444c891caf295d162233bdae0e1c79791d566,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014816,,
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf,https://arxiv.org/pdf/1706.03864.pdf,,,https://arxiv.org/pdf/1706.03864.pdf
+bec0c33d330385d73a5b6a05ad642d6954a6d632,,,http://doi.org/10.1007/s11042-017-4491-6,
+bebea83479a8e1988a7da32584e37bfc463d32d4,https://arxiv.org/pdf/1807.03146.pdf,,,https://arxiv.org/pdf/1807.03146.pdf
+bef926d63512dbffcf1af59f72295ef497f5acf9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6990726,,
+be632b206f1cd38eab0c01c5f2004d1e8fc72880,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607601,,
+bed06e7ff0b510b4a1762283640b4233de4c18e0,https://pdfs.semanticscholar.org/bed0/6e7ff0b510b4a1762283640b4233de4c18e0.pdf,,,https://pdfs.semanticscholar.org/bed0/6e7ff0b510b4a1762283640b4233de4c18e0.pdf
+beb2f1a6f3f781443580ffec9161d9ce6852bf48,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424735,,
+beae35eb5b2c7f63dfa9115f07b5ba0319709951,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163096,,
+be5276e9744c4445fe5b12b785650e8f173f56ff,https://pdfs.semanticscholar.org/be52/76e9744c4445fe5b12b785650e8f173f56ff.pdf,,,https://pdfs.semanticscholar.org/be52/76e9744c4445fe5b12b785650e8f173f56ff.pdf
+be4f18e25b06f430e2de0cc8fddcac8585b00beb,https://pdfs.semanticscholar.org/be4f/18e25b06f430e2de0cc8fddcac8585b00beb.pdf,,,https://pdfs.semanticscholar.org/be4f/18e25b06f430e2de0cc8fddcac8585b00beb.pdf
+be4faea0971ef74096ec9800750648b7601dda65,,,http://doi.org/10.1007/s11063-017-9724-1,
+b313751548018e4ecd5ae2ce6b3b94fbd9cae33e,,,http://doi.org/10.1007/s11263-008-0143-7,
+b331ca23aed90394c05f06701f90afd550131fe3,https://pdfs.semanticscholar.org/b331/ca23aed90394c05f06701f90afd550131fe3.pdf,,,https://pdfs.semanticscholar.org/b331/ca23aed90394c05f06701f90afd550131fe3.pdf
+b3ad7bc128b77d9254aa38c5e1ead7fa10b07d29,,,,http://dl.acm.org/citation.cfm?id=3206041
+b3cb91a08be4117d6efe57251061b62417867de9,https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf,,,https://pdfs.semanticscholar.org/b3cb/91a08be4117d6efe57251061b62417867de9.pdf
+b3200539538eca54a85223bf0ec4f3ed132d0493,https://pdfs.semanticscholar.org/b320/0539538eca54a85223bf0ec4f3ed132d0493.pdf,,,https://pdfs.semanticscholar.org/b320/0539538eca54a85223bf0ec4f3ed132d0493.pdf
+b3add9bc9e70b6b28ba31e843e9155e7c37f3958,,,http://doi.org/10.1007/s10766-017-0552-8,
+b3b467961ba66264bb73ffe00b1830d7874ae8ce,https://arxiv.org/pdf/1612.04402.pdf,,,https://arxiv.org/pdf/1612.04402.pdf
+b3ba7ab6de023a0d58c741d6abfa3eae67227caf,https://arxiv.org/pdf/1707.09468.pdf,,,https://arxiv.org/pdf/1707.09468.pdf
+b32cf547a764a4efa475e9c99a72a5db36eeced6,https://pdfs.semanticscholar.org/b32c/f547a764a4efa475e9c99a72a5db36eeced6.pdf,,,https://pdfs.semanticscholar.org/b32c/f547a764a4efa475e9c99a72a5db36eeced6.pdf
+b3afa234996f44852317af382b98f5f557cab25a,https://arxiv.org/pdf/1711.11248.pdf,,,https://arxiv.org/pdf/1711.11248.pdf
+df90850f1c153bfab691b985bfe536a5544e438b,https://pdfs.semanticscholar.org/df90/850f1c153bfab691b985bfe536a5544e438b.pdf,,,https://pdfs.semanticscholar.org/df90/850f1c153bfab691b985bfe536a5544e438b.pdf
+df767f62a6bf3b09e6417d801726f2d5d642a202,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699727,,
+df577a89830be69c1bfb196e925df3055cafc0ed,https://arxiv.org/pdf/1711.08141.pdf,,,https://arxiv.org/pdf/1711.08141.pdf
+df87193e15a19d5620f5a6458b05fee0cf03729f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363421,,
+df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb,https://arxiv.org/pdf/1704.06693.pdf,,,https://arxiv.org/pdf/1704.06693.pdf
+dfd8602820c0e94b624d02f2e10ce6c798193a25,https://arxiv.org/pdf/1805.00597.pdf,,,https://arxiv.org/pdf/1805.00597.pdf
+df9269657505fcdc1e10cf45bbb8e325678a40f5,https://pdfs.semanticscholar.org/1b38/1e864fa35cde69d85eada0eb515d274a6b74.pdf,,,https://pdfs.semanticscholar.org/1b38/1e864fa35cde69d85eada0eb515d274a6b74.pdf
+dfecaedeaf618041a5498cd3f0942c15302e75c3,https://arxiv.org/pdf/1608.01647.pdf,,,https://arxiv.org/pdf/1608.01647.pdf
+df6e68db278bedf5486a80697dec6623958edba8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952696,,
+da7bbfa905d88834f8929cb69f41a1b683639f4b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199752,,
+daa120032d8f141bc6aae20e23b1b754a0dd7d5f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789593,,
+dad6b36fd515bda801f3d22a462cc62348f6aad8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117531,,
+da4170c862d8ae39861aa193667bfdbdf0ecb363,https://arxiv.org/pdf/1601.00400.pdf,,,https://arxiv.org/pdf/1601.00400.pdf
+daca9d03c1c951ed518248de7f75ff51e5c272cb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6976977,,
+dac8fc521dfafb2d082faa4697f491eae00472c7,,,,http://dl.acm.org/citation.cfm?id=3123423
+daa4cfde41d37b2ab497458e331556d13dd14d0b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406477,,
+da23d90bacf246b75ef752a2cbb138c4fcd789b7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406360,,
+daefac0610fdeff415c2a3f49b47968d84692e87,https://pdfs.semanticscholar.org/daef/ac0610fdeff415c2a3f49b47968d84692e87.pdf,,,https://pdfs.semanticscholar.org/daef/ac0610fdeff415c2a3f49b47968d84692e87.pdf
+dac34b590adddef2fc31f26e2aeb0059115d07a1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436078,,
+b49affdff167f5d170da18de3efa6fd6a50262a2,https://pdfs.semanticscholar.org/b49a/ffdff167f5d170da18de3efa6fd6a50262a2.pdf,,,https://pdfs.semanticscholar.org/b49a/ffdff167f5d170da18de3efa6fd6a50262a2.pdf
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf,,,https://pdfs.semanticscholar.org/b42a/97fb47bcd6bfa72e130c08960a77ee96f9ab.pdf
+b4d209845e1c67870ef50a7c37abaf3770563f3e,https://arxiv.org/pdf/1807.06980.pdf,,,https://arxiv.org/pdf/1807.06980.pdf
+b4ee64022cc3ccd14c7f9d4935c59b16456067d3,https://pdfs.semanticscholar.org/b4ee/64022cc3ccd14c7f9d4935c59b16456067d3.pdf,,,https://pdfs.semanticscholar.org/b4ee/64022cc3ccd14c7f9d4935c59b16456067d3.pdf
+b484141b99d3478a12b8a6854864c4b875d289b8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117595,,
+b41d585246360646c677a8238ec35e8605b083b0,,,http://doi.org/10.1007/s11042-018-6017-2,
+b40c001b3e304dccb28c745bd54aa281c8ff1f29,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361072,,
+b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4,https://pdfs.semanticscholar.org/cbb4/4f0a4b5d76152b90a24a1470cb4cc860587d.pdf,,,https://pdfs.semanticscholar.org/cbb4/4f0a4b5d76152b90a24a1470cb4cc860587d.pdf
+a285b6edd47f9b8966935878ad4539d270b406d1,https://pdfs.semanticscholar.org/a285/b6edd47f9b8966935878ad4539d270b406d1.pdf,,,https://pdfs.semanticscholar.org/a285/b6edd47f9b8966935878ad4539d270b406d1.pdf
+a2e0966f303f38b58b898d388d1c83e40b605262,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354125,,
+a2359c0f81a7eb032cff1fe45e3b80007facaa2a,https://arxiv.org/pdf/1712.08714.pdf,,,https://arxiv.org/pdf/1712.08714.pdf
+a2b4a6c6b32900a066d0257ae6d4526db872afe2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466,,
+a20036b7fbf6c0db454c8711e72d78f145560dc8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761890,,
+a26fd9df58bb76d6c7a3254820143b3da5bd584b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446759,,
+a5acda0e8c0937bfed013e6382da127103e41395,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672,,
+a532cfc69259254192aee3fc5be614d9197e7824,,,http://doi.org/10.1016/j.patcog.2016.12.028,
+a59c0cf3d2c5bf144ee0dbc1152b1b5dd7634990,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7350093,,
+a5f11c132eaab258a7cea2d681875af09cddba65,https://arxiv.org/pdf/1707.02069.pdf,,,https://arxiv.org/pdf/1707.02069.pdf
+a5f35880477ae82902c620245e258cf854c09be9,,,http://doi.org/10.1016/j.imavis.2013.12.004,
+a5f70e0cd7da2b2df05fadb356a24743f3cf459a,,,http://doi.org/10.1007/s11063-017-9649-8,
+a5a44a32a91474f00a3cda671a802e87c899fbb4,https://arxiv.org/pdf/1801.03150.pdf,,,https://arxiv.org/pdf/1801.03150.pdf
+bd0265ba7f391dc3df9059da3f487f7ef17144df,https://pdfs.semanticscholar.org/bd02/65ba7f391dc3df9059da3f487f7ef17144df.pdf,,,https://pdfs.semanticscholar.org/bd02/65ba7f391dc3df9059da3f487f7ef17144df.pdf
+bddc822cf20b31d8f714925bec192c39294184f7,,,http://doi.org/10.1134/S1054661807040190,
+bd0e100a91ff179ee5c1d3383c75c85eddc81723,https://arxiv.org/pdf/1706.03038.pdf,,,https://arxiv.org/pdf/1706.03038.pdf
+bd243d77076b3b8fe046bd3dc6e8a02aa9b38d62,,,,http://arxiv.org/abs/1412.0767
+bd8d579715d58405dfd5a77f32920aafe018fce4,,,http://doi.org/10.1016/j.imavis.2008.08.005,
+bd379f8e08f88729a9214260e05967f4ca66cd65,https://arxiv.org/pdf/1711.06148.pdf,,,https://arxiv.org/pdf/1711.06148.pdf
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,https://arxiv.org/pdf/1808.08803.pdf,,,https://arxiv.org/pdf/1808.08803.pdf
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,https://arxiv.org/pdf/1704.08063.pdf,,,https://arxiv.org/pdf/1704.08063.pdf
+bd26dabab576adb6af30484183c9c9c8379bf2e0,https://arxiv.org/pdf/1511.02459.pdf,,,https://arxiv.org/pdf/1511.02459.pdf
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9,https://arxiv.org/pdf/1806.04845.pdf,,,https://arxiv.org/pdf/1806.04845.pdf
+bdbba95e5abc543981fb557f21e3e6551a563b45,https://arxiv.org/pdf/1807.07362.pdf,,,https://arxiv.org/pdf/1807.07362.pdf
+bd70f832e133fb87bae82dfaa0ae9d1599e52e4b,https://pdfs.semanticscholar.org/acc6/bd697d46121c95f40b62eff7641ffa8d2318.pdf,,,https://pdfs.semanticscholar.org/acc6/bd697d46121c95f40b62eff7641ffa8d2318.pdf
+d141c31e3f261d7d5214f07886c1a29ac734d6fc,,,http://doi.org/10.1007/s11063-018-9812-x,
+d1dfdc107fa5f2c4820570e369cda10ab1661b87,https://arxiv.org/pdf/1712.00080.pdf,,,https://arxiv.org/pdf/1712.00080.pdf
+d1ee9e63c8826a39d75fa32711fddbcc58d5161a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613000,,
+d10cfcf206b0991e3bc20ac28df1f61c63516f30,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553776,,
+d1edb8ba9d50817dbfec7e30f25b1846941e84d8,,,http://doi.org/10.1007/s13735-016-0112-9,
+d116bac3b6ad77084c12bea557d42ed4c9d78433,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471886,,
+d1079444ceddb1de316983f371ecd1db7a0c2f38,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460478,,
+d1a43737ca8be02d65684cf64ab2331f66947207,https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf,,,https://pdfs.semanticscholar.org/d1a4/3737ca8be02d65684cf64ab2331f66947207.pdf
+d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576,https://arxiv.org/pdf/1704.04131.pdf,,,https://arxiv.org/pdf/1704.04131.pdf
+d6c8f5674030cf3f5a2f7cc929bad37a422b26a0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337371,,
+d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0,https://pdfs.semanticscholar.org/d69d/f51cff3d6b9b0625acdcbea27cd2bbf4b9c0.pdf,,,https://pdfs.semanticscholar.org/d69d/f51cff3d6b9b0625acdcbea27cd2bbf4b9c0.pdf
+d6ae7941dcec920d5726d50d1b1cdfe4dde34d35,,,,http://dl.acm.org/citation.cfm?id=31310887
+d69719b42ee53b666e56ed476629a883c59ddf66,https://pdfs.semanticscholar.org/d697/19b42ee53b666e56ed476629a883c59ddf66.pdf,,,https://pdfs.semanticscholar.org/d697/19b42ee53b666e56ed476629a883c59ddf66.pdf
+d69271c7b77bc3a06882884c21aa1b609b3f76cc,https://arxiv.org/pdf/1708.05234.pdf,,,https://arxiv.org/pdf/1708.05234.pdf
+d6e08345ba293565086cb282ba08b225326022fc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7490397,,
+d666ce9d783a2d31550a8aa47da45128a67304a7,https://pdfs.semanticscholar.org/c508/532efb1c02dcae0224e9e6894d232a1f4f6b.pdf,,,https://pdfs.semanticscholar.org/c508/532efb1c02dcae0224e9e6894d232a1f4f6b.pdf
+d62d82c312c40437bc4c1c91caedac2ba5beb292,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461322,,
+bc607bee2002c6c6bf694a15efd0a5d049767237,,,http://doi.org/10.1007/s11042-017-4364-z,
+bc6de183cd8b2baeebafeefcf40be88468b04b74,https://pdfs.semanticscholar.org/e057/e713301e089887295543226b79b534fdd145.pdf,,,https://pdfs.semanticscholar.org/e057/e713301e089887295543226b79b534fdd145.pdf
+bcf19b964e7d1134d00332cf1acf1ee6184aff00,https://pdfs.semanticscholar.org/bcf1/9b964e7d1134d00332cf1acf1ee6184aff00.pdf,,,https://pdfs.semanticscholar.org/bcf1/9b964e7d1134d00332cf1acf1ee6184aff00.pdf
+bc9003ad368cb79d8a8ac2ad025718da5ea36bc4,https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf,,,https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf
+bc9bad25f8149318314971d8b8c170064e220ea8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8078542,,
+bcc346f4a287d96d124e1163e4447bfc47073cd8,https://arxiv.org/pdf/1707.05395.pdf,,,https://arxiv.org/pdf/1707.05395.pdf
+bc27434e376db89fe0e6ef2d2fabc100d2575ec6,https://arxiv.org/pdf/1607.08438.pdf,,,https://arxiv.org/pdf/1607.08438.pdf
+bc08dfa22949fbe54e15b1a6379afade71835968,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899970,,
+bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17,https://pdfs.semanticscholar.org/bc8e/11b8cdf0cfbedde798a53a0318e8d6f67e17.pdf,,,https://pdfs.semanticscholar.org/bc8e/11b8cdf0cfbedde798a53a0318e8d6f67e17.pdf
+bc811a66855aae130ca78cd0016fd820db1603ec,https://pdfs.semanticscholar.org/62ca/3c9b00bf3d9ff319afdee04dfa27ae2e0bdb.pdf,,,https://pdfs.semanticscholar.org/62ca/3c9b00bf3d9ff319afdee04dfa27ae2e0bdb.pdf
+bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab,https://arxiv.org/pdf/1707.04993.pdf,,,https://arxiv.org/pdf/1707.04993.pdf
+bc36badb6606b8162d821a227dda09a94aac537f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337442,,
+ae8d5be3caea59a21221f02ef04d49a86cb80191,https://arxiv.org/pdf/1708.06834.pdf,,,https://arxiv.org/pdf/1708.06834.pdf
+ae78469de00ea1e7602ca468dcf188cdfe2c80d4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466467,,
+ae5e92abd5929ee7f0a5aa1622aa094bac4fae29,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373805,,
+ae2cf545565c157813798910401e1da5dc8a6199,https://pdfs.semanticscholar.org/eef4/c6bb430c4792522866fdad40a0ed8e76809f.pdf,,,https://pdfs.semanticscholar.org/eef4/c6bb430c4792522866fdad40a0ed8e76809f.pdf
+aeaf5dbb3608922246c7cd8a619541ea9e4a7028,https://pdfs.semanticscholar.org/aeaf/5dbb3608922246c7cd8a619541ea9e4a7028.pdf,,,https://pdfs.semanticscholar.org/aeaf/5dbb3608922246c7cd8a619541ea9e4a7028.pdf
+ae836e2be4bb784760e43de88a68c97f4f9e44a1,https://pdfs.semanticscholar.org/ae83/6e2be4bb784760e43de88a68c97f4f9e44a1.pdf,,,https://pdfs.semanticscholar.org/ae83/6e2be4bb784760e43de88a68c97f4f9e44a1.pdf
+aeff403079022683b233decda556a6aee3225065,https://arxiv.org/pdf/1701.01876.pdf,,,https://arxiv.org/pdf/1701.01876.pdf
+aeb6b9aba5bb08cde2aebfeda7ced6c38c84df4a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424644,,
+aef58a54d458ab76f62c9b6de61af4f475e0f616,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706790,,
+ae2c71080b0e17dee4e5a019d87585f2987f0508,https://pdfs.semanticscholar.org/ae2c/71080b0e17dee4e5a019d87585f2987f0508.pdf,,,https://pdfs.semanticscholar.org/ae2c/71080b0e17dee4e5a019d87585f2987f0508.pdf
+aee3427d0814d8a398fd31f4f46941e9e5488d83,,,,http://dl.acm.org/citation.cfm?id=1924573
+ae5f32e489c4d52e7311b66060c7381d932f4193,https://arxiv.org/pdf/1711.09125.pdf,,,https://arxiv.org/pdf/1711.09125.pdf
+d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265439,,
+d833268a8ea9278e68aaf3bd9bc2c11a5bb0bab7,,,http://doi.org/10.1007/s11042-018-6047-9,
+d89a754d7c59e025d2bfcdb872d2d061e2e371ba,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5598629,,
+d8fbd3a16d2e2e59ce0cff98b3fd586863878dc1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952553,,
+d86fabd4498c8feaed80ec342d254fb877fb92f5,https://pdfs.semanticscholar.org/d86f/abd4498c8feaed80ec342d254fb877fb92f5.pdf,,,https://pdfs.semanticscholar.org/d86f/abd4498c8feaed80ec342d254fb877fb92f5.pdf
+d80a3d1f3a438e02a6685e66ee908446766fefa9,https://arxiv.org/pdf/1708.09687.pdf,,,https://arxiv.org/pdf/1708.09687.pdf
+d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d,https://arxiv.org/pdf/1509.00244.pdf,,,https://arxiv.org/pdf/1509.00244.pdf
+ab58a7db32683aea9281c188c756ddf969b4cdbd,https://arxiv.org/pdf/1804.06291.pdf,,,https://arxiv.org/pdf/1804.06291.pdf
+ab734bac3994b00bf97ce22b9abc881ee8c12918,https://pdfs.semanticscholar.org/ab73/4bac3994b00bf97ce22b9abc881ee8c12918.pdf,,,https://pdfs.semanticscholar.org/ab73/4bac3994b00bf97ce22b9abc881ee8c12918.pdf
+ab8ecf98f457e29b000c44d49f5bf49ec92e571c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8439631,,
+ab989225a55a2ddcd3b60a99672e78e4373c0df1,https://arxiv.org/pdf/1706.05599.pdf,,,https://arxiv.org/pdf/1706.05599.pdf
+ab0981d1da654f37620ca39c6b42de21d7eb58eb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8016651,,
+ab1719f573a6c121d7d7da5053fe5f12de0182e7,https://pdfs.semanticscholar.org/ab17/19f573a6c121d7d7da5053fe5f12de0182e7.pdf,,,https://pdfs.semanticscholar.org/ab17/19f573a6c121d7d7da5053fe5f12de0182e7.pdf
+ab2b09b65fdc91a711e424524e666fc75aae7a51,https://pdfs.semanticscholar.org/ab2b/09b65fdc91a711e424524e666fc75aae7a51.pdf,,,https://pdfs.semanticscholar.org/ab2b/09b65fdc91a711e424524e666fc75aae7a51.pdf
+abba1bf1348a6f1b70a26aac237338ee66764458,https://arxiv.org/pdf/1808.03457.pdf,,,https://arxiv.org/pdf/1808.03457.pdf
+abdd17e411a7bfe043f280abd4e560a04ab6e992,https://arxiv.org/pdf/1803.00839.pdf,,,https://arxiv.org/pdf/1803.00839.pdf
+ab80582807506c0f840bd1ba03a8b84f8ac72f79,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462326,,
+ab6886252aea103b3d974462f589b4886ef2735a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4371439,,
+e5e5f31b81ed6526c26d277056b6ab4909a56c6c,https://arxiv.org/pdf/1809.06131.pdf,,,https://arxiv.org/pdf/1809.06131.pdf
+e506cdb250eba5e70c5147eb477fbd069714765b,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf,,,https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf,https://pdfs.semanticscholar.org/e572/c42d8ef2e0fadedbaae77c8dfe05c4933fbf.pdf,,,https://pdfs.semanticscholar.org/e572/c42d8ef2e0fadedbaae77c8dfe05c4933fbf.pdf
+e5823a9d3e5e33e119576a34cb8aed497af20eea,https://arxiv.org/pdf/1809.05620.pdf,,,https://arxiv.org/pdf/1809.05620.pdf
+e5ea7295b89ef679e74919bf957f58d55ad49489,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401948,,
+e5dfd17dbfc9647ccc7323a5d62f65721b318ba9,https://pdfs.semanticscholar.org/e5df/d17dbfc9647ccc7323a5d62f65721b318ba9.pdf,,,https://pdfs.semanticscholar.org/e5df/d17dbfc9647ccc7323a5d62f65721b318ba9.pdf
+e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69,https://arxiv.org/pdf/1806.05226.pdf,,,https://arxiv.org/pdf/1806.05226.pdf
+e52f73c77c7eaece6f2d8fdd0f15327f9f007261,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099713,,
+e52f57a7de675d14aed28e5d0f2f3c5a01715337,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319987,,
+e569f4bd41895028c4c009e5b46b935056188e91,https://pdfs.semanticscholar.org/e569/f4bd41895028c4c009e5b46b935056188e91.pdf,,,https://pdfs.semanticscholar.org/e569/f4bd41895028c4c009e5b46b935056188e91.pdf
+e5fbffd3449a2bfe0acb4ec339a19f5b88fff783,https://arxiv.org/pdf/1808.06882.pdf,,,https://arxiv.org/pdf/1808.06882.pdf
+e5d53a335515107452a30b330352cad216f88fc3,https://pdfs.semanticscholar.org/e5d5/3a335515107452a30b330352cad216f88fc3.pdf,,,https://pdfs.semanticscholar.org/e5d5/3a335515107452a30b330352cad216f88fc3.pdf
+e57014b4106dd1355e69a0f60bb533615a705606,,,http://doi.org/10.1007/s13748-018-0143-y,
+e22adcd2a6a7544f017ec875ce8f89d5c59e09c8,https://arxiv.org/pdf/1807.11936.pdf,,,https://arxiv.org/pdf/1807.11936.pdf
+e295c1aa47422eb35123053038e62e9aa50a2e3a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406389,,
+e287ff7997297ce1197359ed0fb2a0bd381638c9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7795253,,
+e2faaebd17d10e2919bd69492787e7565546a63f,,,http://doi.org/10.1007/s11042-017-4514-3,
+e2106bb3febb4fc8fe91f0fcbc241bcda0e56b1e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952626,,
+e293a31260cf20996d12d14b8f29a9d4d99c4642,https://arxiv.org/pdf/1703.01560.pdf,,,https://arxiv.org/pdf/1703.01560.pdf
+e20e2db743e8db1ff61279f4fda32bf8cf381f8e,https://arxiv.org/pdf/1801.01486.pdf,,,https://arxiv.org/pdf/1801.01486.pdf
+f472cb8380a41c540cfea32ebb4575da241c0288,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284869,,
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148,https://pdfs.semanticscholar.org/f412/d9d7bc7534e7daafa43f8f5eab811e7e4148.pdf,,,https://pdfs.semanticscholar.org/f412/d9d7bc7534e7daafa43f8f5eab811e7e4148.pdf
+f442a2f2749f921849e22f37e0480ac04a3c3fec,https://pdfs.semanticscholar.org/f442/a2f2749f921849e22f37e0480ac04a3c3fec.pdf,,,https://pdfs.semanticscholar.org/f442/a2f2749f921849e22f37e0480ac04a3c3fec.pdf
+f4f6fc473effb063b7a29aa221c65f64a791d7f4,https://pdfs.semanticscholar.org/48ec/4b2c3b6c6549fa7a988f8db135a41691f605.pdf,,,https://pdfs.semanticscholar.org/48ec/4b2c3b6c6549fa7a988f8db135a41691f605.pdf
+f4ba07d2ae6c9673502daf50ee751a5e9262848f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7284810,,
+f4d30896c5f808a622824a2d740b3130be50258e,https://arxiv.org/pdf/1705.06148.pdf,,,https://arxiv.org/pdf/1705.06148.pdf
+f4251e02f87ac3fcae70bdb313f13ed16ff6ff0a,,,,https://www.ncbi.nlm.nih.gov/pubmed/24314504
+f4b5a8f6462a68e79d643648c780efe588e4b6ca,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995700,,
+f42dca4a4426e5873a981712102aa961be34539a,https://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf,,,https://pdfs.semanticscholar.org/f42d/ca4a4426e5873a981712102aa961be34539a.pdf
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,https://arxiv.org/pdf/1704.06327.pdf,,,https://arxiv.org/pdf/1704.06327.pdf
+f39783847499dd56ba39c1f3b567f64dfdfa8527,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791189,,
+f3cdd2c3180aa2bf08320ddd3b9a56f9fe00e72b,,,http://doi.org/10.1016/j.patrec.2013.03.022,
+f374ac9307be5f25145b44931f5a53b388a77e49,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339060,,
+f38813f1c9dac44dcb992ebe51c5ede66fd0f491,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354277,,
+f3a59d85b7458394e3c043d8277aa1ffe3cdac91,https://arxiv.org/pdf/1802.09900.pdf,,,https://arxiv.org/pdf/1802.09900.pdf
+f3df296de36b7c114451865778e211350d153727,https://arxiv.org/pdf/1703.06995.pdf,,,https://arxiv.org/pdf/1703.06995.pdf
+f3ea181507db292b762aa798da30bc307be95344,https://arxiv.org/pdf/1805.04855.pdf,,,https://arxiv.org/pdf/1805.04855.pdf
+f3fed71cc4fc49b02067b71c2df80e83084b2a82,https://arxiv.org/pdf/1804.06216.pdf,,,https://arxiv.org/pdf/1804.06216.pdf
+f3553148e322f4f64545d6667dfbc7607c82703a,,,http://doi.org/10.1007/s00138-016-0763-9,
+f3cf10c84c4665a0b28734f5233d423a65ef1f23,https://pdfs.semanticscholar.org/203d/7c52e2bd0da104516abbe34cd5aa5cfc8368.pdf,,,https://pdfs.semanticscholar.org/203d/7c52e2bd0da104516abbe34cd5aa5cfc8368.pdf
+f33bd953d2df0a5305fc8a93a37ff754459a906c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961800,,
+f3b7938de5f178e25a3cf477107c76286c0ad691,https://arxiv.org/pdf/1807.05511.pdf,,,https://arxiv.org/pdf/1807.05511.pdf
+eb100638ed73b82e1cce8475bb8e180cb22a09a2,https://arxiv.org/pdf/1704.06228.pdf,,,https://arxiv.org/pdf/1704.06228.pdf
+ebbceab4e15bf641f74e335b70c6c4490a043961,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813349,,
+eb8519cec0d7a781923f68fdca0891713cb81163,https://arxiv.org/pdf/1703.08617.pdf,,,https://arxiv.org/pdf/1703.08617.pdf
+eb566490cd1aa9338831de8161c6659984e923fd,https://arxiv.org/pdf/1712.02310.pdf,,,https://arxiv.org/pdf/1712.02310.pdf
+eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6,https://pdfs.semanticscholar.org/eb4d/2ec77fae67141f6cf74b3ed773997c2c0cf6.pdf,,,https://pdfs.semanticscholar.org/eb4d/2ec77fae67141f6cf74b3ed773997c2c0cf6.pdf
+ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9,https://arxiv.org/pdf/1411.4324.pdf,,,https://arxiv.org/pdf/1411.4324.pdf
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,https://pdfs.semanticscholar.org/a531/bf1b04b794b19e6a563afe077f78a82ecbd6.pdf,,,https://pdfs.semanticscholar.org/a531/bf1b04b794b19e6a563afe077f78a82ecbd6.pdf
+ebc3d7f50231cdb18a8107433ae9adc7bd94b97a,,,http://doi.org/10.1111/cgf.13218,
+eba4cfd76f99159ccc0a65cab0a02db42b548d85,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751379,,
+ebde9b9c714ed326157f41add8c781f826c1d864,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014758,,
+eb027969f9310e0ae941e2adee2d42cdf07d938c,https://arxiv.org/pdf/1710.08092.pdf,,,https://arxiv.org/pdf/1710.08092.pdf
+eb48a58b873295d719827e746d51b110f5716d6c,https://arxiv.org/pdf/1706.01820.pdf,,,https://arxiv.org/pdf/1706.01820.pdf
+eb3066de677f9f6131aab542d9d426aaf50ed2ce,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373860,,
+eb8a3948c4be0d23eb7326d27f2271be893b3409,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914701,,
+eb6f2b5529f2a7bc8b5b03b1171f75a4c753a0b2,,,http://doi.org/10.1117/12.650555,
+c7c53d75f6e963b403057d8ba5952e4974a779ad,https://pdfs.semanticscholar.org/c7c5/3d75f6e963b403057d8ba5952e4974a779ad.pdf,,,https://pdfs.semanticscholar.org/c7c5/3d75f6e963b403057d8ba5952e4974a779ad.pdf
+c79cf7f61441195404472102114bcf079a72138a,https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf,,,https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf
+c73dd452c20460f40becb1fd8146239c88347d87,https://arxiv.org/pdf/1708.01846.pdf,,,https://arxiv.org/pdf/1708.01846.pdf
+c7745f941532b7d6fa70db09e81eb1167f70f8a7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1640757,,
+c72e6992f44ce75a40f44be4365dc4f264735cfb,https://arxiv.org/pdf/1807.11122.pdf,,,https://arxiv.org/pdf/1807.11122.pdf
+c7de0c85432ad17a284b5b97c4f36c23f506d9d1,https://pdfs.semanticscholar.org/a908/f786591a846f9c48e1ee5a134603efd32f9c.pdf,,,https://pdfs.semanticscholar.org/a908/f786591a846f9c48e1ee5a134603efd32f9c.pdf
+c71217b2b111a51a31cf1107c71d250348d1ff68,https://arxiv.org/pdf/1703.09912.pdf,,,https://arxiv.org/pdf/1703.09912.pdf
+c76f64e87f88475069f7707616ad9df1719a6099,https://arxiv.org/pdf/1803.08094.pdf,,,https://arxiv.org/pdf/1803.08094.pdf
+c7f0c0636d27a1d45b8fcef37e545b902195d937,https://arxiv.org/pdf/1709.00966.pdf,,,https://arxiv.org/pdf/1709.00966.pdf
+c7c8d150ece08b12e3abdb6224000c07a6ce7d47,https://arxiv.org/pdf/1611.05271.pdf,,,https://arxiv.org/pdf/1611.05271.pdf
+c75e6ce54caf17b2780b4b53f8d29086b391e839,https://arxiv.org/pdf/1802.00542.pdf,,,https://arxiv.org/pdf/1802.00542.pdf
+c05ae45c262b270df1e99a32efa35036aae8d950,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354120,,
+c038beaa228aeec174e5bd52460f0de75e9cccbe,https://arxiv.org/pdf/1705.02953.pdf,,,https://arxiv.org/pdf/1705.02953.pdf
+c05a7c72e679745deab9c9d7d481f7b5b9b36bdd,https://pdfs.semanticscholar.org/c05a/7c72e679745deab9c9d7d481f7b5b9b36bdd.pdf,,,https://pdfs.semanticscholar.org/c05a/7c72e679745deab9c9d7d481f7b5b9b36bdd.pdf
+c07ab025d9e3c885ad5386e6f000543efe091c4b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302601,,
+c0c0b8558b17aa20debc4611275a4c69edd1e2a7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909629,,
+c0c8d720658374cc1ffd6116554a615e846c74b5,https://arxiv.org/pdf/1706.04508.pdf,,,https://arxiv.org/pdf/1706.04508.pdf
+c0f67e850176bb778b6c048d81c3d7e4d8c41003,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296441,,
+c00df53bd46f78ae925c5768d46080159d4ef87d,https://arxiv.org/pdf/1707.08105.pdf,,,https://arxiv.org/pdf/1707.08105.pdf
+eece52bd0ed4d7925c49b34e67dbb6657d2d649b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014982,,
+ee1465cbbc1d03cb9eddaad8618a4feea78a01ce,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6998872,,
+ee7e8aec3ebb37e41092e1285e4f81916ce92c18,,,,https://www.sciencedirect.com/science/article/pii/S0197458017301859
+ee815f60dc4a090fa9fcfba0135f4707af21420d,https://arxiv.org/pdf/1702.02925.pdf,,,https://arxiv.org/pdf/1702.02925.pdf
+eed7920682789a9afd0de4efd726cd9a706940c8,https://pdfs.semanticscholar.org/3115/90680f1ae14864df886af20699d2eca7099f.pdf,,,https://pdfs.semanticscholar.org/3115/90680f1ae14864df886af20699d2eca7099f.pdf
+ee1f9637f372d2eccc447461ef834a9859011ec1,,,http://doi.org/10.1007/s11042-016-3950-9,
+ee56823f2f00c8c773e4ebc725ca57d2f9242947,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7110235,,
+ee2ec0836ded2f3f37bf49fa0e985280a8addaca,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368755,,
+ee463f1f72a7e007bae274d2d42cd2e5d817e751,https://pdfs.semanticscholar.org/ee46/3f1f72a7e007bae274d2d42cd2e5d817e751.pdf,,,https://pdfs.semanticscholar.org/ee46/3f1f72a7e007bae274d2d42cd2e5d817e751.pdf
+eee06d68497be8bf3a8aba4fde42a13aa090b301,https://arxiv.org/pdf/1806.11191.pdf,,,https://arxiv.org/pdf/1806.11191.pdf
+eee2d2ac461f46734c8e674ae14ed87bbc8d45c6,https://arxiv.org/pdf/1704.02112.pdf,,,https://arxiv.org/pdf/1704.02112.pdf
+eed93d2e16b55142b3260d268c9e72099c53d5bc,https://arxiv.org/pdf/1801.01262.pdf,,,https://arxiv.org/pdf/1801.01262.pdf
+eedfb384a5e42511013b33104f4cd3149432bd9e,https://pdfs.semanticscholar.org/eedf/b384a5e42511013b33104f4cd3149432bd9e.pdf,,,https://pdfs.semanticscholar.org/eedf/b384a5e42511013b33104f4cd3149432bd9e.pdf
+c91da328fe50821182e1ae4e7bcbe2b62496f8b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4453844,,
+c9b958c2494b7ba08b5b460f19a06814dba8aee0,,,,https://www.ncbi.nlm.nih.gov/pubmed/30080142
+c9c9ade2ef4dffb7582a629a47ea70c31be7a35e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237606,,
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b,https://arxiv.org/pdf/1807.10510.pdf,,,https://arxiv.org/pdf/1807.10510.pdf
+c997744db532767ee757197491d8ac28d10f1c0f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364339,,
+c9efcd8e32dced6efa2bba64789df8d0a8e4996a,,,,http://dl.acm.org/citation.cfm?id=2984060
+c9bbd7828437e70cc3e6863b278aa56a7d545150,https://arxiv.org/pdf/1708.02044.pdf,,,https://arxiv.org/pdf/1708.02044.pdf
+c98983592777952d1751103b4d397d3ace00852d,https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf,,,https://pdfs.semanticscholar.org/c989/83592777952d1751103b4d397d3ace00852d.pdf
+c900e0ad4c95948baaf0acd8449fde26f9b4952a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969,,
+c9367ed83156d4d682cefc59301b67f5460013e0,https://arxiv.org/pdf/1802.01822.pdf,,,https://arxiv.org/pdf/1802.01822.pdf
+c914d2ba06ec3fd1baa0010dcc4d16c7c34fc225,,,http://doi.org/10.1007/978-3-319-11071-4,
+c98b13871a3bc767df0bdd51ff00c5254ede8b22,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909913,,
+fc7b34a2e43bb3d3585e1963bb64a488e2f278a0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7045492,,
+fc0f5859a111fb17e6dcf6ba63dd7b751721ca61,https://pdfs.semanticscholar.org/fc0f/5859a111fb17e6dcf6ba63dd7b751721ca61.pdf,,,https://pdfs.semanticscholar.org/fc0f/5859a111fb17e6dcf6ba63dd7b751721ca61.pdf
+fcc6fe6007c322641796cb8792718641856a22a7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994,,
+fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac,https://pdfs.semanticscholar.org/fcf9/1995dc4d9b0cee84bda5b5b0ce5b757740ac.pdf,,,https://pdfs.semanticscholar.org/fcf9/1995dc4d9b0cee84bda5b5b0ce5b757740ac.pdf
+fc8fb68a7e3b79c37108588671c0e1abf374f501,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565615,,
+fcf393a90190e376b617cc02e4a473106684d066,,,http://doi.org/10.1007/s10044-015-0507-x,
+fcceea054cb59f1409dda181198ed4070ed762c9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8388318,,
+fc7f140fcedfe54dd63769268a36ff3f175662b5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8013122,,
+fd9ab411dc6258763c95b7741e3d51adf5504040,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595808,,
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf,,,https://pdfs.semanticscholar.org/cea6/9010a2f75f7a057d56770e776dec206ed705.pdf
+fdfd57d4721174eba288e501c0c120ad076cdca8,https://arxiv.org/pdf/1704.07129.pdf,,,https://arxiv.org/pdf/1704.07129.pdf
+fd809ee36fa6832dda57a0a2403b4b52c207549d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409768,,
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b,https://arxiv.org/pdf/1710.06236.pdf,,,https://arxiv.org/pdf/1710.06236.pdf
+fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3,https://arxiv.org/pdf/1712.04109.pdf,,,https://arxiv.org/pdf/1712.04109.pdf
+fd15e397629e0241642329fc8ee0b8cd6c6ac807,https://arxiv.org/pdf/1806.01547.pdf,,,https://arxiv.org/pdf/1806.01547.pdf
+fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,https://arxiv.org/pdf/1809.01990.pdf,,,https://arxiv.org/pdf/1809.01990.pdf
+fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,https://arxiv.org/pdf/1606.02909.pdf,,,https://arxiv.org/pdf/1606.02909.pdf
+fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81,https://arxiv.org/pdf/1807.11332.pdf,,,https://arxiv.org/pdf/1807.11332.pdf
+fd10b0c771a2620c0db294cfb82b80d65f73900d,https://arxiv.org/pdf/1809.02860.pdf,,,https://arxiv.org/pdf/1809.02860.pdf
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e,https://pdfs.semanticscholar.org/fdba/cf2ff0fc21e021c830cdcff7d347f2fddd8e.pdf,,,https://pdfs.semanticscholar.org/fdba/cf2ff0fc21e021c830cdcff7d347f2fddd8e.pdf
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3,https://pdfs.semanticscholar.org/fd89/2e912149e3f5ddd82499e16f9ea0f0063fa3.pdf,,,https://pdfs.semanticscholar.org/fd89/2e912149e3f5ddd82499e16f9ea0f0063fa3.pdf
+fde611bf25a89fe11e077692070f89dcdede043a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7322904,,
+fdf8e293a7618f560e76bd83e3c40a0788104547,https://arxiv.org/pdf/1704.04023.pdf,,,https://arxiv.org/pdf/1704.04023.pdf
+fd5376fcb09001a3acccc03159e8ff5801129683,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373899,,
+fdaf65b314faee97220162980e76dbc8f32db9d6,https://pdfs.semanticscholar.org/fdaf/65b314faee97220162980e76dbc8f32db9d6.pdf,,,https://pdfs.semanticscholar.org/fdaf/65b314faee97220162980e76dbc8f32db9d6.pdf
+f2902f5956d7e2dca536d9131d4334f85f52f783,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460191,,
+f2d605985821597773bc6b956036bdbc5d307386,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8027090,,
+f2896dd2701fbb3564492a12c64f11a5ad456a67,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5495414,,
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e,https://arxiv.org/pdf/1708.03958.pdf,,,https://arxiv.org/pdf/1708.03958.pdf
+f2700e3d69d3cce2e0b1aea0d7f87e74aff437cd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237686,,
+f27e5a13c1c424504b63a9084c50f491c1b17978,,,,http://dl.acm.org/citation.cfm?id=3097991
+f2eab39cf68de880ee7264b454044a55098e8163,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5539989,,
+f2a7f9bd040aa8ea87672d38606a84c31163e171,https://arxiv.org/pdf/1608.07876.pdf,,,https://arxiv.org/pdf/1608.07876.pdf
+f257300b2b4141aab73f93c146bf94846aef5fa1,https://arxiv.org/pdf/1708.05465.pdf,,,https://arxiv.org/pdf/1708.05465.pdf
+f20e0eefd007bc310d2a753ba526d33a8aba812c,https://pdfs.semanticscholar.org/116e/c3a1a8225362a3e3e445df45036fae7cadc6.pdf,,,https://pdfs.semanticscholar.org/116e/c3a1a8225362a3e3e445df45036fae7cadc6.pdf
+f2d5bb329c09a5867045721112a7dad82ca757a3,,,http://doi.org/10.1007/s11042-015-3009-3,
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f,https://pdfs.semanticscholar.org/f231/046d5f5d87e2ca5fae88f41e8d74964e8f4f.pdf,,,https://pdfs.semanticscholar.org/f231/046d5f5d87e2ca5fae88f41e8d74964e8f4f.pdf
+f201baf618574108bcee50e9a8b65f5174d832ee,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8031057,,
+f28b7d62208fdaaa658716403106a2b0b527e763,https://arxiv.org/pdf/1803.08457.pdf,,,https://arxiv.org/pdf/1803.08457.pdf
+f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e,https://pdfs.semanticscholar.org/f58d/584c4ac93b4e7620ef6e5a8f20c6f6da295e.pdf,,,https://pdfs.semanticscholar.org/f58d/584c4ac93b4e7620ef6e5a8f20c6f6da295e.pdf
+f5c57979ec3d8baa6f934242965350865c0121bd,,,http://doi.org/10.1007/s12539-018-0281-8,
+f5eb0cf9c57716618fab8e24e841f9536057a28a,https://arxiv.org/pdf/1803.02988.pdf,,,https://arxiv.org/pdf/1803.02988.pdf
+f5603ceaebe3caf6a812edef9c4b38def78cbf34,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4455998,,
+f571fe3f753765cf695b75b1bd8bed37524a52d2,https://pdfs.semanticscholar.org/8203/70a36ec56f8987fbec5ca2769f996d03d79b.pdf,,,https://pdfs.semanticscholar.org/8203/70a36ec56f8987fbec5ca2769f996d03d79b.pdf
+f5fae7810a33ed67852ad6a3e0144cb278b24b41,https://pdfs.semanticscholar.org/f5fa/e7810a33ed67852ad6a3e0144cb278b24b41.pdf,,,https://pdfs.semanticscholar.org/f5fa/e7810a33ed67852ad6a3e0144cb278b24b41.pdf
+f5770dd225501ff3764f9023f19a76fad28127d4,https://pdfs.semanticscholar.org/f577/0dd225501ff3764f9023f19a76fad28127d4.pdf,,,https://pdfs.semanticscholar.org/f577/0dd225501ff3764f9023f19a76fad28127d4.pdf
+f5eb411217f729ad7ae84bfd4aeb3dedb850206a,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf,,,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf
+e393a038d520a073b9835df7a3ff104ad610c552,https://pdfs.semanticscholar.org/b6aa/94b81b2165e492cc2900e05dd997619bfe7a.pdf,,,https://pdfs.semanticscholar.org/b6aa/94b81b2165e492cc2900e05dd997619bfe7a.pdf
+e3ce4c3e1279e3dc0c14ff3bb2920aced9e62638,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099824,,
+e3b324101157daede3b4d16bdc9c2388e849c7d4,https://pdfs.semanticscholar.org/e3b3/24101157daede3b4d16bdc9c2388e849c7d4.pdf,,,https://pdfs.semanticscholar.org/e3b3/24101157daede3b4d16bdc9c2388e849c7d4.pdf
+e3d76f1920c5bf4a60129516abb4a2d8683e48ae,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014907,,
+e3c011d08d04c934197b2a4804c90be55e21d572,https://arxiv.org/pdf/1709.02940.pdf,,,https://arxiv.org/pdf/1709.02940.pdf
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa,https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf,,,https://pdfs.semanticscholar.org/e39a/66a6d1c5e753f8e6c33cd5d335f9bc9c07fa.pdf
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db,https://arxiv.org/pdf/1604.02182.pdf,,,https://arxiv.org/pdf/1604.02182.pdf
+e38371b69be4f341baa95bc854584e99b67c6d3a,https://arxiv.org/pdf/1803.07201.pdf,,,https://arxiv.org/pdf/1803.07201.pdf
+e3b9863e583171ac9ae7b485f88e503852c747b6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7494596,,
+e3a6e5a573619a97bd6662b652ea7d088ec0b352,https://arxiv.org/pdf/1804.00112.pdf,,,https://arxiv.org/pdf/1804.00112.pdf
+cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd,https://arxiv.org/pdf/1805.11195.pdf,,,https://arxiv.org/pdf/1805.11195.pdf
+cfaf61bacf61901b7e1ac25b779a1f87c1e8cf7f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6737950,,
+cfffae38fe34e29d47e6deccfd259788176dc213,https://pdfs.semanticscholar.org/cfff/ae38fe34e29d47e6deccfd259788176dc213.pdf,,,https://pdfs.semanticscholar.org/cfff/ae38fe34e29d47e6deccfd259788176dc213.pdf
+cfd4004054399f3a5f536df71f9b9987f060f434,https://arxiv.org/pdf/1710.03224.pdf,,,https://arxiv.org/pdf/1710.03224.pdf
+cf736f596bf881ca97ec4b29776baaa493b9d50e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629,,
+cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce,https://arxiv.org/pdf/1807.08512.pdf,,,https://arxiv.org/pdf/1807.08512.pdf
+cf2e1ebb9609f46af6de0c15b4f48d03e37e54ba,,,,http://arxiv.org/abs/1503.01521
+cfa92e17809e8d20ebc73b4e531a1b106d02b38c,https://pdfs.semanticscholar.org/cfa9/2e17809e8d20ebc73b4e531a1b106d02b38c.pdf,,,https://pdfs.semanticscholar.org/cfa9/2e17809e8d20ebc73b4e531a1b106d02b38c.pdf
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150,https://pdfs.semanticscholar.org/e36f/a8b870fd155f9df898bcc6613f6554eab519.pdf,,,https://pdfs.semanticscholar.org/e36f/a8b870fd155f9df898bcc6613f6554eab519.pdf
+cfdc632adcb799dba14af6a8339ca761725abf0a,https://arxiv.org/pdf/1804.01575.pdf,,,https://arxiv.org/pdf/1804.01575.pdf
+cfa931e6728a825caada65624ea22b840077f023,https://arxiv.org/pdf/1806.06298.pdf,,,https://arxiv.org/pdf/1806.06298.pdf
+cfc30ce53bfc204b8764ebb764a029a8d0ad01f4,https://arxiv.org/pdf/1710.05179.pdf,,,https://arxiv.org/pdf/1710.05179.pdf
+cff911786b5ac884bb71788c5bc6acf6bf569eff,https://arxiv.org/pdf/1805.01290.pdf,,,https://arxiv.org/pdf/1805.01290.pdf
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29,https://pdfs.semanticscholar.org/cfc4/aa456d9da1a6fabd7c6ca199332f03e35b29.pdf,,,https://pdfs.semanticscholar.org/cfc4/aa456d9da1a6fabd7c6ca199332f03e35b29.pdf
+cf805d478aeb53520c0ab4fcdc9307d093c21e52,https://pdfs.semanticscholar.org/cf80/5d478aeb53520c0ab4fcdc9307d093c21e52.pdf,,,https://pdfs.semanticscholar.org/cf80/5d478aeb53520c0ab4fcdc9307d093c21e52.pdf
+cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf,,,https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf
+ca096e158912080493a898b0b8a4bd2902674fed,,,,http://dl.acm.org/citation.cfm?id=3264899
+ca902aeec4fa54d32a4fed9ba89a7fb2f7131734,,,http://doi.org/10.1007/s11042-018-5945-1,
+cad24ba99c7b6834faf6f5be820dd65f1a755b29,https://arxiv.org/pdf/1807.08254.pdf,,,https://arxiv.org/pdf/1807.08254.pdf
+ca44a838da4187617dca9f6249d8c4b604661ec7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7351564,,
+ca37eda56b9ee53610c66951ee7ca66a35d0a846,https://pdfs.semanticscholar.org/ca37/eda56b9ee53610c66951ee7ca66a35d0a846.pdf,,,https://pdfs.semanticscholar.org/ca37/eda56b9ee53610c66951ee7ca66a35d0a846.pdf
+e4754afaa15b1b53e70743880484b8d0736990ff,,,http://doi.org/10.1016/j.imavis.2016.01.002,
+e40cb4369c6402ae53c81ce52b73df3ef89f578b,,,http://doi.org/10.1016/j.image.2015.01.009,
+e45a556df61e2357a8f422bdf864b7a5ed3b8627,,,http://doi.org/10.1016/j.image.2017.08.001,
+e4d7b8eb0a8e6d2bb5b90b027c1bf32bad320ba5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8023876,,
+e4fa062bff299a0bcef9f6b2e593c85be116c9f1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407641,,
+e43045a061421bd79713020bc36d2cf4653c044d,https://arxiv.org/pdf/1703.03492.pdf,,,https://arxiv.org/pdf/1703.03492.pdf
+e4d8ba577cabcb67b4e9e1260573aea708574886,https://pdfs.semanticscholar.org/e4d8/ba577cabcb67b4e9e1260573aea708574886.pdf,,,https://pdfs.semanticscholar.org/e4d8/ba577cabcb67b4e9e1260573aea708574886.pdf
+e4abc40f79f86dbc06f5af1df314c67681dedc51,https://arxiv.org/pdf/1707.06786.pdf,,,https://arxiv.org/pdf/1707.06786.pdf
+fe866887d3c26ee72590c440ed86ffc80e980293,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397011,,
+fe464b2b54154d231671750053861f5fd14454f5,https://pdfs.semanticscholar.org/fe46/4b2b54154d231671750053861f5fd14454f5.pdf,,,https://pdfs.semanticscholar.org/fe46/4b2b54154d231671750053861f5fd14454f5.pdf
+fe7c0bafbd9a28087e0169259816fca46db1a837,https://arxiv.org/pdf/1804.00326.pdf,,,https://arxiv.org/pdf/1804.00326.pdf
+fe50efe9e282c63941ec23eb9b8c7510b6283228,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7314574,,
+fe48f0e43dbdeeaf4a03b3837e27f6705783e576,https://arxiv.org/pdf/1607.05477.pdf,,,https://arxiv.org/pdf/1607.05477.pdf
+fea83550a21f4b41057b031ac338170bacda8805,https://arxiv.org/pdf/1605.07270.pdf,,,https://arxiv.org/pdf/1605.07270.pdf
+feea73095b1be0cbae1ad7af8ba2c4fb6f316d35,,,,http://dl.acm.org/citation.cfm?id=3126693
+fecccc79548001ecbd6cafd3067bcf14de80b11a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354157,,
+fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139,https://pdfs.semanticscholar.org/fe0c/51fd41cb2d5afa1bc1900bbbadb38a0de139.pdf,,,https://pdfs.semanticscholar.org/fe0c/51fd41cb2d5afa1bc1900bbbadb38a0de139.pdf
+c847de9faa1f1a06d5647949a23f523f84aba7f3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6199761,,
+c86e6ed734d3aa967deae00df003557b6e937d3d,https://arxiv.org/pdf/1807.03923.pdf,,,https://arxiv.org/pdf/1807.03923.pdf
+c8585c95215bc53e28edb740678b3a0460ca8aa4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373829,,
+c87f7ee391d6000aef2eadb49f03fc237f4d1170,https://arxiv.org/pdf/1804.03547.pdf,,,https://arxiv.org/pdf/1804.03547.pdf
+c84de67ec2a5d687869d0c3ca8ac974aaa5ee765,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7090979,,
+c866a2afc871910e3282fd9498dce4ab20f6a332,https://arxiv.org/pdf/1804.09691.pdf,,,https://arxiv.org/pdf/1804.09691.pdf
+c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd,https://arxiv.org/pdf/1609.02825.pdf,,,https://arxiv.org/pdf/1609.02825.pdf
+c83e26622b275fdf878135e71c23325a31d0e5fc,,,,http://dl.acm.org/citation.cfm?id=3164611
+c84233f854bbed17c22ba0df6048cbb1dd4d3248,https://pdfs.semanticscholar.org/c842/33f854bbed17c22ba0df6048cbb1dd4d3248.pdf,,,https://pdfs.semanticscholar.org/c842/33f854bbed17c22ba0df6048cbb1dd4d3248.pdf
+c829be73584966e3162f7ccae72d9284a2ebf358,https://pdfs.semanticscholar.org/c829/be73584966e3162f7ccae72d9284a2ebf358.pdf,,,https://pdfs.semanticscholar.org/c829/be73584966e3162f7ccae72d9284a2ebf358.pdf
+c87d5036d3a374c66ec4f5870df47df7176ce8b9,https://pdfs.semanticscholar.org/c87d/5036d3a374c66ec4f5870df47df7176ce8b9.pdf,,,https://pdfs.semanticscholar.org/c87d/5036d3a374c66ec4f5870df47df7176ce8b9.pdf
+c808c784237f167c78a87cc5a9d48152579c27a4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265437,,
+c858c74d30c02be2d992f82a821b925669bfca13,,,http://doi.org/10.1007/978-3-319-10605-2,
+c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f,https://arxiv.org/pdf/1705.01842.pdf,,,https://arxiv.org/pdf/1705.01842.pdf
+c843f591658ca9dbb77944a89372a92006defe68,,,http://doi.org/10.1007/s11042-015-2550-4,
+c88c21eb9a8e08b66c981db35f6556f4974d27a8,https://pdfs.semanticscholar.org/c88c/21eb9a8e08b66c981db35f6556f4974d27a8.pdf,,,https://pdfs.semanticscholar.org/c88c/21eb9a8e08b66c981db35f6556f4974d27a8.pdf
+fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1,https://arxiv.org/pdf/1602.01168.pdf,,,https://arxiv.org/pdf/1602.01168.pdf
+fb6f5cb26395608a3cf0e9c6c618293a4278a8ad,,,http://doi.org/10.1007/s11390-018-1835-2,
+fb87045600da73b07f0757f345a937b1c8097463,https://pdfs.semanticscholar.org/5c54/2fef80a35a4f930e5c82040b52c58e96ce87.pdf,,,https://pdfs.semanticscholar.org/5c54/2fef80a35a4f930e5c82040b52c58e96ce87.pdf
+fb85867c989b9ee6b7899134136f81d6372526a9,https://arxiv.org/pdf/1808.01424.pdf,,,https://arxiv.org/pdf/1808.01424.pdf
+fbc591cde7fb7beb985437a22466f9cf4b16f8b1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463262,,
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a,https://pdfs.semanticscholar.org/c21b/ccf1ab4bb090fd5fc1109421a1a3979e7106.pdf,,,https://pdfs.semanticscholar.org/c21b/ccf1ab4bb090fd5fc1109421a1a3979e7106.pdf
+fb6cc23fd6bd43bd4cacf6a57cd2c7c8dfe5269d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339084,,
+fbe4f8a6af19f63e47801c6f31402f9baae5fecf,,,,http://dl.acm.org/citation.cfm?id=2820910
+fbc2f5cf943f440b1ba2374ecf82d0176a44f1eb,,,,https://www.ncbi.nlm.nih.gov/pubmed/30040629
+fbc9ba70e36768efff130c7d970ce52810b044ff,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500,,
+fb8eb4a7b9b9602992e5982c9e0d6d7f7b8210ef,,,,https://www.ncbi.nlm.nih.gov/pubmed/29994550
+fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59,https://pdfs.semanticscholar.org/fbb2/f81fc00ee0f257d4aa79bbef8cad5000ac59.pdf,,,https://pdfs.semanticscholar.org/fbb2/f81fc00ee0f257d4aa79bbef8cad5000ac59.pdf
+fb9ad920809669c1b1455cc26dbd900d8e719e61,https://pdfs.semanticscholar.org/fb9a/d920809669c1b1455cc26dbd900d8e719e61.pdf,,,https://pdfs.semanticscholar.org/fb9a/d920809669c1b1455cc26dbd900d8e719e61.pdf
+edfce091688bc88389dd4877950bd58e00ff1253,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553700,,
+ed32df6b122b15a52238777c9993ed31107b4bed,,,http://doi.org/10.1016/j.eswa.2017.03.008,
+ed2f4e5ecbc4b08ee0784e97760a7f9e5ea9efae,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8241843,,
+ed0cf5f577f5030ac68ab62fee1cf065349484cc,https://pdfs.semanticscholar.org/ed0c/f5f577f5030ac68ab62fee1cf065349484cc.pdf,,,https://pdfs.semanticscholar.org/ed0c/f5f577f5030ac68ab62fee1cf065349484cc.pdf
+edde81b2bdd61bd757b71a7b3839b6fef81f4be4,https://arxiv.org/pdf/1507.06332.pdf,,,https://arxiv.org/pdf/1507.06332.pdf
+edf98a925bb24e39a6e6094b0db839e780a77b08,https://arxiv.org/pdf/1807.09930.pdf,,,https://arxiv.org/pdf/1807.09930.pdf
+ed9d11e995baeec17c5d2847ec1a8d5449254525,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf,,,https://pdfs.semanticscholar.org/ed9d/11e995baeec17c5d2847ec1a8d5449254525.pdf
+ede5982980aa76deae8f9dc5143a724299d67742,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081396,,
+ed07856461da6c7afa4f1782b5b607b45eebe9f6,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf,,,https://pdfs.semanticscholar.org/ed07/856461da6c7afa4f1782b5b607b45eebe9f6.pdf
+ed184fda0306079f2ee55a1ae60fbf675c8e11c6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6802347,,
+ed1886e233c8ecef7f414811a61a83e44c8bbf50,https://arxiv.org/pdf/1706.01789.pdf,,,https://arxiv.org/pdf/1706.01789.pdf
+ed388878151a3b841f95a62c42382e634d4ab82e,https://arxiv.org/pdf/1805.07550.pdf,,,https://arxiv.org/pdf/1805.07550.pdf
+edd6ed94207ab614c71ac0591d304a708d708e7b,,,http://doi.org/10.1016/j.neucom.2012.02.001,
+edf60d081ffdfa80243217a50a411ab5407c961d,,,http://doi.org/10.1007/s11263-016-0893-6,
+ede16b198b83d04b52dc3f0dafc11fd82c5abac4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952343,,
+edff76149ec44f6849d73f019ef9bded534a38c2,https://arxiv.org/pdf/1704.02203.pdf,,,https://arxiv.org/pdf/1704.02203.pdf
+ed96f2eb1771f384df2349879970065a87975ca7,https://arxiv.org/pdf/1805.12302.pdf,,,https://arxiv.org/pdf/1805.12302.pdf
+c15b68986ecfa1e13e3791686ae9024f66983f14,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014747,,
+c146aa6d56233ce700032f1cb179700778557601,https://arxiv.org/pdf/1708.07199.pdf,,,https://arxiv.org/pdf/1708.07199.pdf
+c12260540ec14910f5ec6e38d95bdb606826b32e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7005459,,
+c18a03568d4b512a0d8380cbb1fbf6bd56d11f05,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8430403,,
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf,,,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf
+c1c2775e19d6fd2ad6616f69bda92ac8927106a2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6196236,,
+c11eb653746afa8148dc9153780a4584ea529d28,https://arxiv.org/pdf/1809.07764.pdf,,,https://arxiv.org/pdf/1809.07764.pdf
+c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee,https://arxiv.org/pdf/1805.05612.pdf,,,https://arxiv.org/pdf/1805.05612.pdf
+c175ebe550761b18bac24d394d85bdfaf3b7718c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301582,,
+c1298120e9ab0d3764512cbd38b47cd3ff69327b,https://pdfs.semanticscholar.org/c129/8120e9ab0d3764512cbd38b47cd3ff69327b.pdf,,,https://pdfs.semanticscholar.org/c129/8120e9ab0d3764512cbd38b47cd3ff69327b.pdf
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290,https://pdfs.semanticscholar.org/7f02/61a759807f2ff57102a4e93318916232473f.pdf,,,https://pdfs.semanticscholar.org/7f02/61a759807f2ff57102a4e93318916232473f.pdf
+c68ec931585847b37cde9f910f40b2091a662e83,https://pdfs.semanticscholar.org/c68e/c931585847b37cde9f910f40b2091a662e83.pdf,,,https://pdfs.semanticscholar.org/c68e/c931585847b37cde9f910f40b2091a662e83.pdf
+c6f3399edb73cfba1248aec964630c8d54a9c534,https://arxiv.org/pdf/1809.03336.pdf,,,https://arxiv.org/pdf/1809.03336.pdf
+c6724c2bb7f491c92c8dd4a1f01a80b82644b793,,,,https://www.ncbi.nlm.nih.gov/pubmed/19167865
+c61eaf172820fcafaabf39005bd4536f0c45f995,,,http://doi.org/10.1007/978-3-319-58771-4_1,
+c6382de52636705be5898017f2f8ed7c70d7ae96,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,,
+c678920facffd35853c9d185904f4aebcd2d8b49,https://arxiv.org/pdf/1803.11556.pdf,,,https://arxiv.org/pdf/1803.11556.pdf
+c6241e6fc94192df2380d178c4c96cf071e7a3ac,https://arxiv.org/pdf/1505.04868.pdf,,,https://arxiv.org/pdf/1505.04868.pdf
+c631a31be2c793d398175ceef7daff1848bb6408,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466318,,
+c62c07de196e95eaaf614fb150a4fa4ce49588b4,https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf,,,https://pdfs.semanticscholar.org/c62c/07de196e95eaaf614fb150a4fa4ce49588b4.pdf
+c61a8940d66eed9850b35dd3768f18b59471ca34,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1374768,,
+ec90d333588421764dff55658a73bbd3ea3016d2,https://pdfs.semanticscholar.org/ec90/d333588421764dff55658a73bbd3ea3016d2.pdf,,,https://pdfs.semanticscholar.org/ec90/d333588421764dff55658a73bbd3ea3016d2.pdf
+ec8ec2dfd73cf3667f33595fef84c95c42125945,https://arxiv.org/pdf/1707.06286.pdf,,,https://arxiv.org/pdf/1707.06286.pdf
+ec1e03ec72186224b93b2611ff873656ed4d2f74,https://pdfs.semanticscholar.org/ec1e/03ec72186224b93b2611ff873656ed4d2f74.pdf,,,https://pdfs.semanticscholar.org/ec1e/03ec72186224b93b2611ff873656ed4d2f74.pdf
+ecac3da2ff8bc2ba55981467f7fdea9de80e2092,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301635,,
+ec576efd18203bcb8273539fa277839ec92232a1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7994601,,
+ecc4be938f0e61a9c6b5111e0a99013f2edc54b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771439,,
+ec1bec7344d07417fb04e509a9d3198da850349f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342699,,
+ec983394f800da971d243f4143ab7f8421aa967c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8340635,,
+ecd08edab496801fd4fde45362dde462d00ee91c,,,,https://www.ncbi.nlm.nih.gov/pubmed/29994561
+ec5c63609cf56496715b0eba0e906de3231ad6d1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651,,
+ec00ecb64fa206cea8b2e716955a738a96424084,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265512,,
+ec90738b6de83748957ff7c8aeb3150b4c9b68bb,,,http://doi.org/10.1016/j.patcog.2015.03.011,
+4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f,https://pdfs.semanticscholar.org/4e30/107ee6a2e087f14a7725e7fc5535ec2f5a5f.pdf,,,https://pdfs.semanticscholar.org/4e30/107ee6a2e087f14a7725e7fc5535ec2f5a5f.pdf
+4e061a302816f5890a621eb278c6efa6e37d7e2f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909638,,
+4e32fbb58154e878dd2fd4b06398f85636fd0cf4,https://arxiv.org/pdf/1805.02339.pdf,,,https://arxiv.org/pdf/1805.02339.pdf
+4e0636a1b92503469b44e2807f0bb35cc0d97652,https://pdfs.semanticscholar.org/4e06/36a1b92503469b44e2807f0bb35cc0d97652.pdf,,,https://pdfs.semanticscholar.org/4e06/36a1b92503469b44e2807f0bb35cc0d97652.pdf
+4e27fec1703408d524d6b7ed805cdb6cba6ca132,https://pdfs.semanticscholar.org/7714/a5aa27ab5ad4d06a81fbb3e973d3b1002ac1.pdf,,,https://pdfs.semanticscholar.org/7714/a5aa27ab5ad4d06a81fbb3e973d3b1002ac1.pdf
+4e43408a59852c1bbaa11596a5da3e42034d9380,,,http://doi.org/10.1007/s11042-018-6040-3,
+4e6c9be0b646d60390fe3f72ce5aeb0136222a10,https://arxiv.org/pdf/1604.04494.pdf,,,https://arxiv.org/pdf/1604.04494.pdf
+4ed6c7740ba93d75345397ef043f35c0562fb0fd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117516,,
+4ef09fe9f7fa027427414cf1f2e9050ac7f5e34d,,,http://doi.org/10.1007/s11227-018-2408-4,
+4e37cd250130c6fd60e066f0c8efb3cbb778c421,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8419742,,
+20b994a78cd1db6ba86ea5aab7211574df5940b3,https://arxiv.org/pdf/1805.08417.pdf,,,https://arxiv.org/pdf/1805.08417.pdf
+20a0f71d2c667f3c69df18f097f2b5678ac7d214,,,http://doi.org/10.1007/s10055-018-0357-0,
+2004afb2276a169cdb1f33b2610c5218a1e47332,https://pdfs.semanticscholar.org/2004/afb2276a169cdb1f33b2610c5218a1e47332.pdf,,,https://pdfs.semanticscholar.org/2004/afb2276a169cdb1f33b2610c5218a1e47332.pdf
+20ade100a320cc761c23971d2734388bfe79f7c5,https://pdfs.semanticscholar.org/20ad/e100a320cc761c23971d2734388bfe79f7c5.pdf,,,https://pdfs.semanticscholar.org/20ad/e100a320cc761c23971d2734388bfe79f7c5.pdf
+20d6a4aaf5abf2925fdce2780e38ab1771209f76,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446795,,
+206e24f7d4b3943b35b069ae2d028143fcbd0704,https://arxiv.org/pdf/1803.11405.pdf,,,https://arxiv.org/pdf/1803.11405.pdf
+20eeb83a8b6fea64c746bf993f9c991bb34a4b30,,,http://doi.org/10.1007/s00138-018-0956-5,
+2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b,https://arxiv.org/pdf/1408.2700.pdf,,,https://arxiv.org/pdf/1408.2700.pdf
+206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8,https://arxiv.org/pdf/1706.02932.pdf,,,https://arxiv.org/pdf/1706.02932.pdf
+202dc3c6fda654aeb39aee3e26a89340fb06802a,https://arxiv.org/pdf/1807.02800.pdf,,,https://arxiv.org/pdf/1807.02800.pdf
+20111924fbf616a13d37823cd8712a9c6b458cd6,https://pdfs.semanticscholar.org/2011/1924fbf616a13d37823cd8712a9c6b458cd6.pdf,,,https://pdfs.semanticscholar.org/2011/1924fbf616a13d37823cd8712a9c6b458cd6.pdf
+20c02e98602f6adf1cebaba075d45cef50de089f,https://arxiv.org/pdf/1808.07507.pdf,,,https://arxiv.org/pdf/1808.07507.pdf
+18855be5e7a60269c0652e9567484ce5b9617caa,,,http://doi.org/10.1007/s11042-017-4579-z,
+18d51a366ce2b2068e061721f43cb798177b4bb7,https://pdfs.semanticscholar.org/18d5/1a366ce2b2068e061721f43cb798177b4bb7.pdf,,,https://pdfs.semanticscholar.org/18d5/1a366ce2b2068e061721f43cb798177b4bb7.pdf
+1860b8f63ce501bd0dfa9e6f2debc080e88d9baa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7894195,,
+185263189a30986e31566394680d6d16b0089772,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf,,,https://pdfs.semanticscholar.org/1852/63189a30986e31566394680d6d16b0089772.pdf
+18010284894ed0edcca74e5bf768ee2e15ef7841,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493,,
+18b9dc55e5221e704f90eea85a81b41dab51f7da,https://arxiv.org/pdf/1803.07179.pdf,,,https://arxiv.org/pdf/1803.07179.pdf
+18941b52527e6f15abfdf5b86a0086935706e83b,https://arxiv.org/pdf/1808.09211.pdf,,,https://arxiv.org/pdf/1808.09211.pdf
+18e54b74ed1f3c02b7569f53a7d930d72fc329f5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7902214,,
+188abc5bad3a3663d042ce98c7a7327e5a1ae298,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6152129,,
+180bd019eab85bbf01d9cddc837242e111825750,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239690,,
+27a00f2490284bc0705349352d36e9749dde19ab,https://arxiv.org/pdf/1806.05622.pdf,,,https://arxiv.org/pdf/1806.05622.pdf
+270acff7916589a6cc9ca915b0012ffcb75d4899,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8425659,,
+275b5091c50509cc8861e792e084ce07aa906549,https://pdfs.semanticscholar.org/275b/5091c50509cc8861e792e084ce07aa906549.pdf,,,https://pdfs.semanticscholar.org/275b/5091c50509cc8861e792e084ce07aa906549.pdf
+270733d986a1eb72efda847b4b55bc6ba9686df4,https://pdfs.semanticscholar.org/2707/33d986a1eb72efda847b4b55bc6ba9686df4.pdf,,,https://pdfs.semanticscholar.org/2707/33d986a1eb72efda847b4b55bc6ba9686df4.pdf
+27da432cf2b9129dce256e5bf7f2f18953eef5a5,https://arxiv.org/pdf/1805.11519.pdf,,,https://arxiv.org/pdf/1805.11519.pdf
+2717b044ae9933f9ab87f16d6c611352f66b2033,https://arxiv.org/pdf/1804.06964.pdf,,,https://arxiv.org/pdf/1804.06964.pdf
+2770b095613d4395045942dc60e6c560e882f887,https://arxiv.org/pdf/1808.06210.pdf,,,https://arxiv.org/pdf/1808.06210.pdf
+27b451abfe321a696c852215bb7efb4c2e50c89f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7898447,,
+2744e6d526b8f2c1b297ac2d2458aaa08b0cda11,,,http://doi.org/10.1007/s11042-017-5571-3,
+2724ba85ec4a66de18da33925e537f3902f21249,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298,,
+4b0cb10c6c3f2d581ac9eb654412f70bc72ed661,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8172386,,
+4b5ff8c67f3496a414f94e35cb35a601ec98e5cf,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6547306,,
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a,https://arxiv.org/pdf/1807.06708.pdf,,,https://arxiv.org/pdf/1807.06708.pdf
+4b9ec224949c79a980a5a66664d0ac6233c3d575,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501,,
+4b48e912a17c79ac95d6a60afed8238c9ab9e553,https://arxiv.org/pdf/1805.06741.pdf,,,https://arxiv.org/pdf/1805.06741.pdf
+4bbe460ab1b279a55e3c9d9f488ff79884d01608,https://arxiv.org/pdf/1712.00684.pdf,,,https://arxiv.org/pdf/1712.00684.pdf
+4bf85ef995c684b841d0a5a002d175fadd922ff0,,,,http://dl.acm.org/citation.cfm?id=3199668
+4b936847f39094d6cb0bde68cea654d948c4735d,,,http://doi.org/10.1007/s11042-016-3470-7,
+11bb2abe0ca614c15701961428eb2f260e3e2eef,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343867,,
+11691f1e7c9dbcbd6dfd256ba7ac710581552baa,https://arxiv.org/pdf/1804.04527.pdf,,,https://arxiv.org/pdf/1804.04527.pdf
+113b06e70b7eead8ae7450bafe9c91656705024c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373832,,
+116f9e9cda25ff3187bc777ceb3ecd28077a7eca,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373864,,
+112780a7fe259dc7aff2170d5beda50b2bfa7bda,https://arxiv.org/pdf/1805.00833.pdf,,,https://arxiv.org/pdf/1805.00833.pdf
+1159ff04fd17c59515199e0fc2d5e01e72818b59,,,,
+11df25b4e074b7610ec304a8733fa47625d9faca,,,http://doi.org/10.1016/j.patrec.2012.09.024,
+1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf,,,https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf
+111d0b588f3abbbea85d50a28c0506f74161e091,https://pdfs.semanticscholar.org/111d/0b588f3abbbea85d50a28c0506f74161e091.pdf,,,https://pdfs.semanticscholar.org/111d/0b588f3abbbea85d50a28c0506f74161e091.pdf
+7d2556d674ad119cf39df1f65aedbe7493970256,https://pdfs.semanticscholar.org/7f01/762f2daf27282197cb84751eb30550417d41.pdf,,,https://pdfs.semanticscholar.org/7f01/762f2daf27282197cb84751eb30550417d41.pdf
+7d18e9165312cf669b799aa1b883c6bbe95bf40e,,,http://doi.org/10.1007/s11042-016-3492-1,
+7df4f96138a4e23492ea96cf921794fc5287ba72,https://arxiv.org/pdf/1707.08705.pdf,,,https://arxiv.org/pdf/1707.08705.pdf
+7d45f1878d8048f6b3de5b3ec912c49742d5e968,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7747479,,
+7d40e7e5c01bd551edf65902386401e1b8b8014b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7303876,,
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc,https://arxiv.org/pdf/1804.03675.pdf,,,https://arxiv.org/pdf/1804.03675.pdf
+2902f62457fdf7e8e8ee77a9155474107a2f423e,https://arxiv.org/pdf/1803.07973.pdf,,,https://arxiv.org/pdf/1803.07973.pdf
+29db16efc3b378c50511f743e5197a4c0b9e902f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406401,,
+2957715e96a18dbb5ed5c36b92050ec375214aa6,https://arxiv.org/pdf/1712.00193.pdf,,,https://arxiv.org/pdf/1712.00193.pdf
+29c340c83b3bbef9c43b0c50b4d571d5ed037cbd,https://pdfs.semanticscholar.org/29c3/40c83b3bbef9c43b0c50b4d571d5ed037cbd.pdf,,,https://pdfs.semanticscholar.org/29c3/40c83b3bbef9c43b0c50b4d571d5ed037cbd.pdf
+2961e14c327341d22d5f266a6872aa174add8ac4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6654170,,
+2983cf95743be82671a71528004036bd19172712,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7915734,,
+29a5d38390857e234c111f8bb787724c08f39110,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813387,,
+292e1c88d43a77dbe5c610f4f611cfdb6d3212b6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301520,,
+7c57ac7c9f84fbd093f6393e2b63c18078bf0fdf,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6218178,,
+7caa3a74313f9a7a2dd5b4c2cd7f825d895d3794,,,http://doi.org/10.1007/s11263-016-0967-5,
+7c47da191f935811f269f9ba3c59556c48282e80,https://arxiv.org/pdf/1503.07697.pdf,,,https://arxiv.org/pdf/1503.07697.pdf
+7c11fa4fd91cb57e6e216117febcdd748e595760,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597453,,
+7cdf3bc1de6c7948763c0c2dfa4384dcbd3677a0,,,http://doi.org/10.1007/s11263-016-0920-7,
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,https://arxiv.org/pdf/1610.00291.pdf,,,https://arxiv.org/pdf/1610.00291.pdf
+7c17280c9193da3e347416226b8713b99e7825b8,https://arxiv.org/pdf/1805.08162.pdf,,,https://arxiv.org/pdf/1805.08162.pdf
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,https://arxiv.org/pdf/1708.06997.pdf,,,https://arxiv.org/pdf/1708.06997.pdf
+7c8e0f3053e09da6d8f9a1812591a35bccd5c669,,,http://doi.org/10.1007/978-3-030-00470-5,
+7c825562b3ff4683ed049a372cb6807abb09af2a,https://pdfs.semanticscholar.org/7c82/5562b3ff4683ed049a372cb6807abb09af2a.pdf,,,https://pdfs.semanticscholar.org/7c82/5562b3ff4683ed049a372cb6807abb09af2a.pdf
+7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d,https://pdfs.semanticscholar.org/7ca7/255c2e0c86e4adddbbff2ce74f36b1dc522d.pdf,,,https://pdfs.semanticscholar.org/7ca7/255c2e0c86e4adddbbff2ce74f36b1dc522d.pdf
+7cfbf90368553333b47731729e0e358479c25340,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7346480,,
+7c9a65f18f7feb473e993077d087d4806578214e,https://pdfs.semanticscholar.org/7c9a/65f18f7feb473e993077d087d4806578214e.pdf,,,https://pdfs.semanticscholar.org/7c9a/65f18f7feb473e993077d087d4806578214e.pdf
+7c66e7f357553fd4b362d00ff377bffb9197410e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961231,,
+7c6686fa4d8c990e931f1d16deabf647bf3b1986,,,,http://arxiv.org/abs/1504.07550
+7cf579088e0456d04b531da385002825ca6314e2,https://arxiv.org/pdf/1708.04299.pdf,,,https://arxiv.org/pdf/1708.04299.pdf
+7c80d91db5977649487388588c0c823080c9f4b4,https://arxiv.org/pdf/1805.02283.pdf,,,https://arxiv.org/pdf/1805.02283.pdf
+7c30ea47f5ae1c5abd6981d409740544ed16ed16,https://pdfs.semanticscholar.org/7c30/ea47f5ae1c5abd6981d409740544ed16ed16.pdf,,,https://pdfs.semanticscholar.org/7c30/ea47f5ae1c5abd6981d409740544ed16ed16.pdf
+162403e189d1b8463952fa4f18a291241275c354,https://arxiv.org/pdf/1801.10304.pdf,,,https://arxiv.org/pdf/1801.10304.pdf
+16fdd6d842475e6fbe58fc809beabbed95f0642e,https://arxiv.org/pdf/1505.00315.pdf,,,https://arxiv.org/pdf/1505.00315.pdf
+16de1324459fe8fdcdca80bba04c3c30bb789bdf,https://arxiv.org/pdf/1712.02765.pdf,,,https://arxiv.org/pdf/1712.02765.pdf
+16b9d258547f1eccdb32111c9f45e2e4bbee79af,https://arxiv.org/pdf/1704.06369.pdf,,,https://arxiv.org/pdf/1704.06369.pdf
+164b0e2a03a5a402f66c497e6c327edf20f8827b,https://pdfs.semanticscholar.org/164b/0e2a03a5a402f66c497e6c327edf20f8827b.pdf,,,https://pdfs.semanticscholar.org/164b/0e2a03a5a402f66c497e6c327edf20f8827b.pdf
+166ef5d3fd96d99caeabe928eba291c082ec75a0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237597,,
+166186e551b75c9b5adcc9218f0727b73f5de899,https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf,,,https://pdfs.semanticscholar.org/1661/86e551b75c9b5adcc9218f0727b73f5de899.pdf
+16d6737b50f969247339a6860da2109a8664198a,https://pdfs.semanticscholar.org/16d6/737b50f969247339a6860da2109a8664198a.pdf,,,https://pdfs.semanticscholar.org/16d6/737b50f969247339a6860da2109a8664198a.pdf
+16fadde3e68bba301f9829b3f99157191106bd0f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4562953,,
+4209783b0cab1f22341f0600eed4512155b1dee6,https://arxiv.org/pdf/1806.00365.pdf,,,https://arxiv.org/pdf/1806.00365.pdf
+42a6beed493c69d5bad99ae47ea76497c8e5fdae,,,http://doi.org/10.1007/s11704-017-6613-8,
+42eda7c20db9dc0f42f72bb997dd191ed8499b10,https://arxiv.org/pdf/1611.09309.pdf,,,https://arxiv.org/pdf/1611.09309.pdf
+42ea8a96eea023361721f0ea34264d3d0fc49ebd,https://arxiv.org/pdf/1608.04695.pdf,,,https://arxiv.org/pdf/1608.04695.pdf
+89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199,https://pdfs.semanticscholar.org/89c7/3b1e7c9b5e126a26ed5b7caccd7cd30ab199.pdf,,,https://pdfs.semanticscholar.org/89c7/3b1e7c9b5e126a26ed5b7caccd7cd30ab199.pdf
+893239f17dc2d17183410d8a98b0440d98fa2679,https://pdfs.semanticscholar.org/d5b1/6481d34838cc92593f5f311badbf7f18ed5a.pdf,,,https://pdfs.semanticscholar.org/d5b1/6481d34838cc92593f5f311badbf7f18ed5a.pdf
+892c911ca68f5b4bad59cde7eeb6c738ec6c4586,https://pdfs.semanticscholar.org/892c/911ca68f5b4bad59cde7eeb6c738ec6c4586.pdf,,,https://pdfs.semanticscholar.org/892c/911ca68f5b4bad59cde7eeb6c738ec6c4586.pdf
+8986585975c0090e9ad97bec2ba6c4b437419dae,https://arxiv.org/pdf/1808.04285.pdf,,,https://arxiv.org/pdf/1808.04285.pdf
+89d3a57f663976a9ac5e9cdad01267c1fc1a7e06,https://arxiv.org/pdf/1708.09642.pdf,,,https://arxiv.org/pdf/1708.09642.pdf
+8981be3a69cd522b4e57e9914bf19f034d4b530c,https://pdfs.semanticscholar.org/8981/be3a69cd522b4e57e9914bf19f034d4b530c.pdf,,,https://pdfs.semanticscholar.org/8981/be3a69cd522b4e57e9914bf19f034d4b530c.pdf
+895081d6a5545ad6385bfc6fcf460fc0b13bac86,,,http://doi.org/10.1016/S0167-8655%2899%2900134-8,
+891b10c4b3b92ca30c9b93170ec9abd71f6099c4,https://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf,,,https://pdfs.semanticscholar.org/891b/10c4b3b92ca30c9b93170ec9abd71f6099c4.pdf
+451b6409565a5ad18ea49b063561a2645fa4281b,https://arxiv.org/pdf/1706.00699.pdf,,,https://arxiv.org/pdf/1706.00699.pdf
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec,https://arxiv.org/pdf/1803.11366.pdf,,,https://arxiv.org/pdf/1803.11366.pdf
+45e7ddd5248977ba8ec61be111db912a4387d62f,https://arxiv.org/pdf/1711.00253.pdf,,,https://arxiv.org/pdf/1711.00253.pdf
+45b9b7fe3850ef83d39d52f6edcc0c24fcc0bc73,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7888593,,
+4560491820e0ee49736aea9b81d57c3939a69e12,https://arxiv.org/pdf/1712.04008.pdf,,,https://arxiv.org/pdf/1712.04008.pdf
+45e459462a80af03e1bb51a178648c10c4250925,https://arxiv.org/pdf/1606.08998.pdf,,,https://arxiv.org/pdf/1606.08998.pdf
+45a6333fc701d14aab19f9e2efd59fe7b0e89fec,https://pdfs.semanticscholar.org/45a6/333fc701d14aab19f9e2efd59fe7b0e89fec.pdf,,,https://pdfs.semanticscholar.org/45a6/333fc701d14aab19f9e2efd59fe7b0e89fec.pdf
+450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,https://arxiv.org/pdf/1805.00611.pdf,,,https://arxiv.org/pdf/1805.00611.pdf
+1f3f7df159c338884ddfd38ee2d3ba2e1e3ada69,,,http://doi.org/10.1162/jocn_a_00645,
+1fe1bd6b760e3059fff73d53a57ce3a6079adea1,https://pdfs.semanticscholar.org/1fe1/bd6b760e3059fff73d53a57ce3a6079adea1.pdf,,,https://pdfs.semanticscholar.org/1fe1/bd6b760e3059fff73d53a57ce3a6079adea1.pdf
+1ffe20eb32dbc4fa85ac7844178937bba97f4bf0,https://arxiv.org/pdf/1706.05067.pdf,,,https://arxiv.org/pdf/1706.05067.pdf
+1f5f67d315c9dad341d39129d8f8fe7fa58e564c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397536,,
+1fe1a78c941e03abe942498249c041b2703fd3d2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393355,,
+1f59e0818e7b16c0d39dd08eb90533ea0ae0be5e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8385089,,
+1fdeba9c4064b449231eac95e610f3288801fd3e,https://arxiv.org/pdf/1710.00925.pdf,,,https://arxiv.org/pdf/1710.00925.pdf
+1fff309330f85146134e49e0022ac61ac60506a9,https://arxiv.org/pdf/1701.07569.pdf,,,https://arxiv.org/pdf/1701.07569.pdf
+1fa426496ed6bcd0c0b17b8b935a14c84a7ee1c2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100195,,
+1feeab271621128fe864e4c64bab9b2e2d0ed1f1,https://pdfs.semanticscholar.org/e230/e2e60b1d20a5334f59ca669bbd35f9391d2e.pdf,,,https://pdfs.semanticscholar.org/e230/e2e60b1d20a5334f59ca669bbd35f9391d2e.pdf
+1fb980e137b2c9f8781a0d98c026e164b497ddb1,,,,http://dl.acm.org/citation.cfm?id=3213539
+73b90573d272887a6d835ace89bfaf717747c59b,https://pdfs.semanticscholar.org/73b9/0573d272887a6d835ace89bfaf717747c59b.pdf,,,https://pdfs.semanticscholar.org/73b9/0573d272887a6d835ace89bfaf717747c59b.pdf
+7323b594d3a8508f809e276aa2d224c4e7ec5a80,https://arxiv.org/pdf/1808.05508.pdf,,,https://arxiv.org/pdf/1808.05508.pdf
+7360a2adcd6e3fe744b7d7aec5c08ee31094dfd4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373833,,
+73ba33e933e834b815f62a50aa1a0e15c6547e83,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8368754,,
+7361b900018f22e37499443643be1ff9d20edfd6,,,http://doi.org/10.1049/iet-bmt.2016.0169,
+73ed64803d6f2c49f01cffef8e6be8fc9b5273b8,https://arxiv.org/pdf/1508.06073.pdf,,,https://arxiv.org/pdf/1508.06073.pdf
+73d53a7c27716ae9a6d3484e78883545e53117ae,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8371978,,
+7343f0b7bcdaf909c5e37937e295bf0ac7b69499,,,http://doi.org/10.1016/j.csi.2015.06.004,
+7306d42ca158d40436cc5167e651d7ebfa6b89c1,https://arxiv.org/pdf/1511.04458.pdf,,,https://arxiv.org/pdf/1511.04458.pdf
+734cdda4a4de2a635404e4c6b61f1b2edb3f501d,https://pdfs.semanticscholar.org/734c/dda4a4de2a635404e4c6b61f1b2edb3f501d.pdf,,,https://pdfs.semanticscholar.org/734c/dda4a4de2a635404e4c6b61f1b2edb3f501d.pdf
+73f341ff68caa9f8802e9e81bfa90d88bbdbd9d2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791198,,
+73dcb4c452badb3ee39a2f222298b234d08c21eb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6779478,,
+73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c,https://arxiv.org/pdf/1705.02193.pdf,,,https://arxiv.org/pdf/1705.02193.pdf
+872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,https://arxiv.org/pdf/1703.09507.pdf,,,https://arxiv.org/pdf/1703.09507.pdf
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,,,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf
+87610276ccbc12d0912b23fd493019f06256f94e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6706757,,
+87b607b8d4858a16731144d17f457a54e488f15d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597532,,
+8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f,https://arxiv.org/pdf/1701.03246.pdf,,,https://arxiv.org/pdf/1701.03246.pdf
+878169be6e2c87df2d8a1266e9e37de63b524ae7,https://pdfs.semanticscholar.org/8781/69be6e2c87df2d8a1266e9e37de63b524ae7.pdf,,,https://pdfs.semanticscholar.org/8781/69be6e2c87df2d8a1266e9e37de63b524ae7.pdf
+878301453e3d5cb1a1f7828002ea00f59cbeab06,https://arxiv.org/pdf/1701.08393.pdf,,,https://arxiv.org/pdf/1701.08393.pdf
+87e592ee1a7e2d34e6b115da08700a1ae02e9355,https://arxiv.org/pdf/1807.10002.pdf,,,https://arxiv.org/pdf/1807.10002.pdf
+8006219efb6ab76754616b0e8b7778dcfb46603d,https://pdfs.semanticscholar.org/7f79/e78e52883994a8a843af48922980ae730e65.pdf,,,https://pdfs.semanticscholar.org/7f79/e78e52883994a8a843af48922980ae730e65.pdf
+803c92a3f0815dbf97e30c4ee9450fd005586e1a,https://arxiv.org/pdf/1802.09308.pdf,,,https://arxiv.org/pdf/1802.09308.pdf
+80d4cf7747abfae96328183dd1f84133023c2668,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369786,,
+80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923,https://pdfs.semanticscholar.org/4569/f8e017af1e052b075d8a267116a8b795bd84.pdf,,,https://pdfs.semanticscholar.org/4569/f8e017af1e052b075d8a267116a8b795bd84.pdf
+80ed678ef28ccc1b942e197e0393229cd99d55c8,,,http://doi.org/10.1007/s10044-015-0456-4,
+809e5884cf26b71dc7abc56ac0bad40fb29c671c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6247842,,
+80097a879fceff2a9a955bf7613b0d3bfa68dc23,https://arxiv.org/pdf/1701.03555.pdf,,,https://arxiv.org/pdf/1701.03555.pdf
+74ce7e5e677a4925489897665c152a352c49d0a2,https://arxiv.org/pdf/1805.03356.pdf,,,https://arxiv.org/pdf/1805.03356.pdf
+7477cf04c6b086108f459f693a60272523c134db,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618937,,
+746c0205fdf191a737df7af000eaec9409ede73f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119,,
+74dbe6e0486e417a108923295c80551b6d759dbe,https://pdfs.semanticscholar.org/ab95/1e780a7e8e28866b44c6a1a591ec470904b4.pdf,,,https://pdfs.semanticscholar.org/ab95/1e780a7e8e28866b44c6a1a591ec470904b4.pdf
+747c25bff37b96def96dc039cc13f8a7f42dbbc7,https://arxiv.org/pdf/1503.01800.pdf,,,https://arxiv.org/pdf/1503.01800.pdf
+744fa8062d0ae1a11b79592f0cd3fef133807a03,https://pdfs.semanticscholar.org/b5fd/440edd27702c8dbfa38fac0bf23deacf33cb.pdf,,,https://pdfs.semanticscholar.org/b5fd/440edd27702c8dbfa38fac0bf23deacf33cb.pdf
+749d605dd12a4af58de1fae6f5ef5e65eb06540e,https://arxiv.org/pdf/1704.07489.pdf,,,https://arxiv.org/pdf/1704.07489.pdf
+74c19438c78a136677a7cb9004c53684a4ae56ff,https://pdfs.semanticscholar.org/74c1/9438c78a136677a7cb9004c53684a4ae56ff.pdf,,,https://pdfs.semanticscholar.org/74c1/9438c78a136677a7cb9004c53684a4ae56ff.pdf
+1a849b694f2d68c3536ed849ed78c82e979d64d5,https://pdfs.semanticscholar.org/318c/a222a7a4dfc63807c6b6c4285cc63c8610ba.pdf,,,https://pdfs.semanticscholar.org/318c/a222a7a4dfc63807c6b6c4285cc63c8610ba.pdf
+1aa61dd85d3a5a2fe819cba21192ec4471c08628,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359518,,
+1a53ca294bbe5923c46a339955e8207907e9c8c6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7273870,,
+1a81c722727299e45af289d905d7dcf157174248,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995466,,
+286a5c19a43382a21c8d96d847b52bba6b715a71,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6876188,,
+289cfcd081c4393c7d6f63510747b5372202f855,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373873,,
+28e1982d20b6eff33989abbef3e9e74400dbf508,,,http://doi.org/10.1007/s11042-015-3007-5,
+281486d172cf0c78d348ce7d977a82ff763efccd,https://arxiv.org/pdf/1708.03911.pdf,,,https://arxiv.org/pdf/1708.03911.pdf
+288964068cd87d97a98b8bc927d6e0d2349458a2,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf,,,https://pdfs.semanticscholar.org/2889/64068cd87d97a98b8bc927d6e0d2349458a2.pdf
+28715fc79bd5ff8dd8b6fc68a4f2641e5d1b8a08,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406402,,
+28f1542c63f5949ee6f2d51a6422244192b5a900,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780475,,
+28d4e027c7e90b51b7d8908fce68128d1964668a,https://arxiv.org/pdf/1705.00393.pdf,,,https://arxiv.org/pdf/1705.00393.pdf
+2866cbeb25551257683cf28f33d829932be651fe,https://arxiv.org/pdf/1809.04621.pdf,,,https://arxiv.org/pdf/1809.04621.pdf
+176e6ba56e04c98e1997ffdef964ece90fd827b4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8322125,,
+178a82e3a0541fa75c6a11350be5bded133a59fd,https://pdfs.semanticscholar.org/178a/82e3a0541fa75c6a11350be5bded133a59fd.pdf,,,https://pdfs.semanticscholar.org/178a/82e3a0541fa75c6a11350be5bded133a59fd.pdf
+17479e015a2dcf15d40190e06419a135b66da4e0,https://arxiv.org/pdf/1610.08119.pdf,,,https://arxiv.org/pdf/1610.08119.pdf
+17a995680482183f3463d2e01dd4c113ebb31608,https://arxiv.org/pdf/1802.06459.pdf,,,https://arxiv.org/pdf/1802.06459.pdf
+17c0d99171efc957b88c31a465c59485ab033234,https://arxiv.org/pdf/1807.11458.pdf,,,https://arxiv.org/pdf/1807.11458.pdf
+17a8d1b1b4c23a630b051f35e47663fc04dcf043,https://arxiv.org/pdf/1612.02372.pdf,,,https://arxiv.org/pdf/1612.02372.pdf
+179564f157a96787b1b3380a9f79701e3394013d,,,,http://dl.acm.org/citation.cfm?id=2493502
+173657da03e3249f4e47457d360ab83b3cefbe63,https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf,,,https://pdfs.semanticscholar.org/1736/57da03e3249f4e47457d360ab83b3cefbe63.pdf
+1773d65c1dc566fd6128db65e907ac91b4583bed,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8328914,,
+7bbaa09c9e318da4370a83b126bcdb214e7f8428,https://pdfs.semanticscholar.org/7bba/a09c9e318da4370a83b126bcdb214e7f8428.pdf,,,https://pdfs.semanticscholar.org/7bba/a09c9e318da4370a83b126bcdb214e7f8428.pdf
+7b47dd9302b3085cd6705614b88d7bdbc8ae5c13,,,http://doi.org/10.1007/s11063-017-9693-4,
+7b0f1fc93fb24630eb598330e13f7b839fb46cce,https://arxiv.org/pdf/1805.04771.pdf,,,https://arxiv.org/pdf/1805.04771.pdf
+8fe38962c24300129391f6d7ac24d7783e0fddd0,https://arxiv.org/pdf/1801.01967.pdf,,,https://arxiv.org/pdf/1801.01967.pdf
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7,https://pdfs.semanticscholar.org/f1db/7f2e05e9c955cd59ac3d9040ab9b406c0b66.pdf,,,https://pdfs.semanticscholar.org/f1db/7f2e05e9c955cd59ac3d9040ab9b406c0b66.pdf
+8f71c97206a03c366ddefaa6812f865ac6df87e9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342943,,
+8f772d9ce324b2ef5857d6e0b2a420bc93961196,https://arxiv.org/pdf/1805.01760.pdf,,,https://arxiv.org/pdf/1805.01760.pdf
+8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a,https://pdfs.semanticscholar.org/8fda/2f6b85c7e34d3e23927e501a4b4f7fc15b2a.pdf,,,https://pdfs.semanticscholar.org/8fda/2f6b85c7e34d3e23927e501a4b4f7fc15b2a.pdf
+8fa9cb5dac394e30e4089bf5f4ffecc873d1da96,,,http://doi.org/10.1007/s11042-017-5245-1,
+8fed5ea3b69ea441a8b02f61473eafee25fb2374,https://pdfs.semanticscholar.org/8fed/5ea3b69ea441a8b02f61473eafee25fb2374.pdf,,,https://pdfs.semanticscholar.org/8fed/5ea3b69ea441a8b02f61473eafee25fb2374.pdf
+8fba84af61ac9b5e2bcb69b6730a597d7521ad73,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771329,,
+8f3da45ff0c3e1777c3a7830f79c10f5896bcc21,https://pdfs.semanticscholar.org/8f3d/a45ff0c3e1777c3a7830f79c10f5896bcc21.pdf,,,https://pdfs.semanticscholar.org/8f3d/a45ff0c3e1777c3a7830f79c10f5896bcc21.pdf
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,,,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf
+8f9c37f351a91ed416baa8b6cdb4022b231b9085,https://pdfs.semanticscholar.org/8f9c/37f351a91ed416baa8b6cdb4022b231b9085.pdf,,,https://pdfs.semanticscholar.org/8f9c/37f351a91ed416baa8b6cdb4022b231b9085.pdf
+8fb2ec3bbd862f680be05ef348b595e142463524,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699880,,
+8a8127a06f432982bfb0150df3212f379b36840b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373884,,
+8ad0a88a7583af819af66cf2d9e8adb860cf9c34,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7539153,,
+8acdc4be8274e5d189fb67b841c25debf5223840,https://pdfs.semanticscholar.org/8acd/c4be8274e5d189fb67b841c25debf5223840.pdf,,,https://pdfs.semanticscholar.org/8acd/c4be8274e5d189fb67b841c25debf5223840.pdf
+8ac2d704f27a2ddf19b40c8e4695da629aa52a54,,,http://doi.org/10.1007/s11042-015-2945-2,
+8ae642c87f0d6eeff1c6362571e7cd36dcda60ae,,,,http://dl.acm.org/citation.cfm?id=3123271
+8a8861ad6caedc3993e31d46e7de6c251a8cda22,https://arxiv.org/pdf/1706.01869.pdf,,,https://arxiv.org/pdf/1706.01869.pdf
+8a6033cbba8598945bfadd2dd04023c2a9f31681,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014991,,
+8a866bc0d925dfd8bb10769b8b87d7d0ff01774d,https://pdfs.semanticscholar.org/34b7/2d4fb60b36bbf34ff3b1ce3045ba303ab643.pdf,,,https://pdfs.semanticscholar.org/34b7/2d4fb60b36bbf34ff3b1ce3045ba303ab643.pdf
+8a63a2b10068b6a917e249fdc73173f5fd918db0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8120021,,
+8a0159919ee4e1a9f4cbfb652a1be212bf0554fd,https://pdfs.semanticscholar.org/8a01/59919ee4e1a9f4cbfb652a1be212bf0554fd.pdf,,,https://pdfs.semanticscholar.org/8a01/59919ee4e1a9f4cbfb652a1be212bf0554fd.pdf
+8a4893d825db22f398b81d6a82ad2560832cd890,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5349489,,
+8aa1591bf8fcb44f2e9f2f10d1029720ccbb8832,,,,http://dl.acm.org/citation.cfm?id=3078988
+7e600faee0ba11467d3f7aed57258b0db0448a72,https://pdfs.semanticscholar.org/0f09/4a0cef9f81da0e4915e6ed45f73aef6d6976.pdf,,,https://pdfs.semanticscholar.org/0f09/4a0cef9f81da0e4915e6ed45f73aef6d6976.pdf
+7ed3b79248d92b255450c7becd32b9e5c834a31e,https://pdfs.semanticscholar.org/7ed3/b79248d92b255450c7becd32b9e5c834a31e.pdf,,,https://pdfs.semanticscholar.org/7ed3/b79248d92b255450c7becd32b9e5c834a31e.pdf
+7eaa97be59019f0d36aa7dac27407b004cad5e93,https://arxiv.org/pdf/1609.04468.pdf,,,https://arxiv.org/pdf/1609.04468.pdf
+7eb895e7de883d113b75eda54389460c61d63f67,https://arxiv.org/pdf/1709.02993.pdf,,,https://arxiv.org/pdf/1709.02993.pdf
+7e467e686f9468b826133275484e0a1ec0f5bde6,https://arxiv.org/pdf/1407.4764.pdf,,,https://arxiv.org/pdf/1407.4764.pdf
+7eb8476024413269bfb2abd54e88d3e131d0aa0e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4284739,,
+7e56d9ebd47490bb06a8ff0bd5bcd8672ec52364,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1275543,,
+7ef0cc4f3f7566f96f168123bac1e07053a939b2,https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf,,,https://pdfs.semanticscholar.org/e735/b8212d8a81909753291d5d06789a917014f8.pdf
+7e2cfbfd43045fbd6aabd9a45090a5716fc4e179,https://arxiv.org/pdf/1808.00435.pdf,,,https://arxiv.org/pdf/1808.00435.pdf
+7ee7b0602ef517b445316ca8aa525e28ea79307e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418530,,
+7e8c8b1d72c67e2e241184448715a8d4bd88a727,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8097314,,
+7e2f7c0eeaeb47b163a7258665324643669919e8,,,http://doi.org/10.1007/s11042-018-5801-3,
+7ebb153704706e457ab57b432793d2b6e5d12592,https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf,,,https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922,https://pdfs.semanticscholar.org/7ec7/163ec1bc237c4c2f2841c386f2dbfd0cc922.pdf,,,https://pdfs.semanticscholar.org/7ec7/163ec1bc237c4c2f2841c386f2dbfd0cc922.pdf
+7ef44b7c2b5533d00001ae81f9293bdb592f1146,https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf,,,https://pdfs.semanticscholar.org/7ef4/4b7c2b5533d00001ae81f9293bdb592f1146.pdf
+7e27d946d23229220bcb6672aacab88e09516d39,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900131,,
+7ec431e36919e29524eceb1431d3e1202637cf19,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8365242,,
+10cb39e93fac194220237f15dae084136fdc6740,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457972,,
+10af69f11301679b6fbb23855bf10f6af1f3d2e6,https://arxiv.org/pdf/1411.6660.pdf,,,https://arxiv.org/pdf/1411.6660.pdf
+10bfa4cecd64b9584c901075d6b50f4fad898d0b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728013,,
+10e4172dd4f4a633f10762fc5d4755e61d52dc36,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100146,,
+101569eeef2cecc576578bd6500f1c2dcc0274e2,https://arxiv.org/pdf/1805.12317.pdf,,,https://arxiv.org/pdf/1805.12317.pdf
+1025c4922491745534d5d4e8c6e74ba2dc57b138,,,http://doi.org/10.1007/s11263-017-1014-x,
+101d4cfbd6f8a7a10bd33505e2b183183f1d8770,https://pdfs.semanticscholar.org/d2d7/3d4a60ff9a4bb9544d05796637cb6a419e6a.pdf,,,https://pdfs.semanticscholar.org/d2d7/3d4a60ff9a4bb9544d05796637cb6a419e6a.pdf
+1063be2ad265751fb958b396ee26167fa0e844d2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369056,,
+10bf35bf98cfe555dfc03b5f03f2769d330e3af9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8000333,,
+106092fafb53e36077eba88f06feecd07b9e78e7,https://arxiv.org/pdf/1711.06330.pdf,,,https://arxiv.org/pdf/1711.06330.pdf
+103c8eaca2a2176babab2cc6e9b25d48870d6928,https://pdfs.semanticscholar.org/14ad/c9c2b776c751d254f9c924fcb7578563f8b8.pdf,,,https://pdfs.semanticscholar.org/14ad/c9c2b776c751d254f9c924fcb7578563f8b8.pdf
+193474d008cab9fa1c1fa81ce094d415f00b075c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466415,,
+196c12571ab51273f44ea3469d16301d5b8d2828,,,http://doi.org/10.1007/s00371-018-1494-x,
+19b492d426f092d80825edba3b02e354c312295f,,,http://doi.org/10.1007/s00371-016-1332-y,
+1921795408345751791b44b379f51b7dd54ebfa2,https://arxiv.org/pdf/1807.07872.pdf,,,https://arxiv.org/pdf/1807.07872.pdf
+1910f5f7ac81d4fcc30284e88dee3537887acdf3,https://pdfs.semanticscholar.org/1910/f5f7ac81d4fcc30284e88dee3537887acdf3.pdf,,,https://pdfs.semanticscholar.org/1910/f5f7ac81d4fcc30284e88dee3537887acdf3.pdf
+197c64c36e8a9d624a05ee98b740d87f94b4040c,https://arxiv.org/pdf/1804.04421.pdf,,,https://arxiv.org/pdf/1804.04421.pdf
+1951dc9dd4601168ab5acf4c14043b124a8e2f67,,,http://doi.org/10.1162/neco_a_01116,
+193bc8b663d041bc34134a8407adc3e546daa9cc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373908,,
+4c72a51a7c7288e6e17dfefe4f87df47929608e7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736912,,
+4cc326fc977cf967eef5f3135bf0c48d07b79e2d,,,http://doi.org/10.1007/s11042-016-3830-3,
+4ca9753ab023accbfa75a547a65344ee17b549ba,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5457710,,
+4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc,https://arxiv.org/pdf/1807.09192.pdf,,,https://arxiv.org/pdf/1807.09192.pdf
+4cfe921ac4650470b0473fd52a2b801f4494ee64,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6467429,,
+4ccf64fc1c9ca71d6aefdf912caf8fea048fb211,https://arxiv.org/pdf/1804.08572.pdf,,,https://arxiv.org/pdf/1804.08572.pdf
+4cdb6144d56098b819076a8572a664a2c2d27f72,https://arxiv.org/pdf/1806.01196.pdf,,,https://arxiv.org/pdf/1806.01196.pdf
+4c4e49033737467e28aa2bb32f6c21000deda2ef,https://arxiv.org/pdf/1709.01591.pdf,,,https://arxiv.org/pdf/1709.01591.pdf
+4c0cc732314ba3ccccd9036e019b1cfc27850c17,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854473,,
+26a44feb7a64db7986473ca801c251aa88748477,https://arxiv.org/pdf/1804.02744.pdf,,,https://arxiv.org/pdf/1804.02744.pdf
+264f7ab36ff2e23a1514577a6404229d7fe1242b,https://pdfs.semanticscholar.org/264f/7ab36ff2e23a1514577a6404229d7fe1242b.pdf,,,https://pdfs.semanticscholar.org/264f/7ab36ff2e23a1514577a6404229d7fe1242b.pdf
+266766818dbc5a4ca1161ae2bc14c9e269ddc490,https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf,,,https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf
+263ed62f94ea615c747c00ebbb4008385285b33b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8319974,,
+2696d3708d6c6cccbd701f0dac14cc94d72dd76d,,,http://doi.org/10.1007/s10044-017-0633-8,
+265a88a8805f6ba3efae3fcc93d810be1ea68866,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342346,,
+26e570049aaedcfa420fc8c7b761bc70a195657c,https://pdfs.semanticscholar.org/26e5/70049aaedcfa420fc8c7b761bc70a195657c.pdf,,,https://pdfs.semanticscholar.org/26e5/70049aaedcfa420fc8c7b761bc70a195657c.pdf
+26575ad9e75efb440a7dc4ef8e548eed4e19dbd1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411910,,
+26c8ed504f852eda4a2e63dbbbc3480e57f43c70,,,http://doi.org/10.1142/S0218001415560078,
+21d5c838d19fcb4d624b69fe9d98e84d88f18e79,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7358748,,
+21b5af67618fcc047b495d2d5d7c2bf145753633,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771442,,
+21959bc56a160ebd450606867dce1462a913afab,,,http://doi.org/10.1007/s11042-018-6071-9,
+214072c84378802a0a0fde0b93ffb17bc04f3759,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301397,,
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44,https://pdfs.semanticscholar.org/2135/a3d9f4b8f5771fa5fc7c7794abf8c2840c44.pdf,,,https://pdfs.semanticscholar.org/2135/a3d9f4b8f5771fa5fc7c7794abf8c2840c44.pdf
+212608e00fc1e8912ff845ee7a4a67f88ba938fc,https://arxiv.org/pdf/1704.02450.pdf,,,https://arxiv.org/pdf/1704.02450.pdf
+4d90d7834ae25ee6176c096d5d6608555766c0b1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354115,,
+4da4e58072c15904d4ce31076061ebd3ab1cdcd5,,,http://doi.org/10.1007/s00371-018-1477-y,
+4db9e5f19366fe5d6a98ca43c1d113dac823a14d,https://pdfs.semanticscholar.org/a55d/ea7981ea0f90d1110005b5f5ca68a3175910.pdf,,,https://pdfs.semanticscholar.org/a55d/ea7981ea0f90d1110005b5f5ca68a3175910.pdf
+4d19401e44848fe65b721971bc71a9250870ed5f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462612,,
+4db0968270f4e7b3fa73e41c50d13d48e20687be,https://arxiv.org/pdf/1705.06394.pdf,,,https://arxiv.org/pdf/1705.06394.pdf
+4d6ad0c7b3cf74adb0507dc886993e603c863e8c,https://pdfs.semanticscholar.org/4d6a/d0c7b3cf74adb0507dc886993e603c863e8c.pdf,,,https://pdfs.semanticscholar.org/4d6a/d0c7b3cf74adb0507dc886993e603c863e8c.pdf
+4dca3d6341e1d991c902492952e726dc2a443d1c,https://arxiv.org/pdf/1805.09298.pdf,,,https://arxiv.org/pdf/1805.09298.pdf
+4db99a2268a120c7af636387241188064ea42338,,,,https://www.ncbi.nlm.nih.gov/pubmed/21820862
+4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f,https://arxiv.org/pdf/1709.03196.pdf,,,https://arxiv.org/pdf/1709.03196.pdf
+75879ab7a77318bbe506cb9df309d99205862f6c,https://pdfs.semanticscholar.org/7587/9ab7a77318bbe506cb9df309d99205862f6c.pdf,,,https://pdfs.semanticscholar.org/7587/9ab7a77318bbe506cb9df309d99205862f6c.pdf
+7574f999d2325803f88c4915ba8f304cccc232d1,https://arxiv.org/pdf/1705.04396.pdf,,,https://arxiv.org/pdf/1705.04396.pdf
+75ce75c1a5c35ecdba99dd8b7ba900d073e35f78,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163152,,
+75308067ddd3c53721430d7984295838c81d4106,https://pdfs.semanticscholar.org/7530/8067ddd3c53721430d7984295838c81d4106.pdf,,,https://pdfs.semanticscholar.org/7530/8067ddd3c53721430d7984295838c81d4106.pdf
+759cf57215fcfdd8f59c97d14e7f3f62fafa2b30,https://arxiv.org/pdf/1706.09498.pdf,,,https://arxiv.org/pdf/1706.09498.pdf
+75a74a74d6abbbb302a99de3225c8870fa149aee,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7914657,,
+758d481bbf24d12615b751fd9ec121500a648bce,,,http://doi.org/10.1007/s11042-015-2914-9,
+758d7e1be64cc668c59ef33ba8882c8597406e53,https://arxiv.org/pdf/1708.03985.pdf,,,https://arxiv.org/pdf/1708.03985.pdf
+754f7f3e9a44506b814bf9dc06e44fecde599878,https://arxiv.org/pdf/1808.02194.pdf,,,https://arxiv.org/pdf/1808.02194.pdf
+75249ebb85b74e8932496272f38af274fbcfd696,https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf,,,https://pdfs.semanticscholar.org/7524/9ebb85b74e8932496272f38af274fbcfd696.pdf
+81a142c751bf0b23315fb6717bc467aa4fdfbc92,https://pdfs.semanticscholar.org/81a1/42c751bf0b23315fb6717bc467aa4fdfbc92.pdf,,,https://pdfs.semanticscholar.org/81a1/42c751bf0b23315fb6717bc467aa4fdfbc92.pdf
+8199803f476c12c7f6c0124d55d156b5d91314b6,https://arxiv.org/pdf/1707.06642.pdf,,,https://arxiv.org/pdf/1707.06642.pdf
+81706277ed180a92d2eeb94ac0560f7dc591ee13,https://pdfs.semanticscholar.org/8170/6277ed180a92d2eeb94ac0560f7dc591ee13.pdf,,,https://pdfs.semanticscholar.org/8170/6277ed180a92d2eeb94ac0560f7dc591ee13.pdf
+8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c,https://arxiv.org/pdf/1808.00171.pdf,,,https://arxiv.org/pdf/1808.00171.pdf
+814369f171337ee1d8809446b7dbfc5e1ef9f4b5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597559,,
+814d091c973ff6033a83d4e44ab3b6a88cc1cb66,https://pdfs.semanticscholar.org/814d/091c973ff6033a83d4e44ab3b6a88cc1cb66.pdf,,,https://pdfs.semanticscholar.org/814d/091c973ff6033a83d4e44ab3b6a88cc1cb66.pdf
+81513764b73dae486a9d2df28269c7db75e9beb3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7839217,,
+8127b7654d6e5c46caaf2404270b74c6b0967e19,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813406,,
+81b0550c58e7409b4f1a1cd7838669cfaa512eb3,,,http://doi.org/10.1016/j.patcog.2015.08.026,
+81f101cea3c451754506bf1c7edf80a661fa4dd1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163081,,
+81a80b26979b40d5ebe3f5ba70b03cb9f19dd7a5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369725,,
+86f191616423efab8c0d352d986126a964983219,https://arxiv.org/pdf/1712.01393.pdf,,,https://arxiv.org/pdf/1712.01393.pdf
+869a2fbe42d3fdf40ed8b768edbf54137be7ac71,https://pdfs.semanticscholar.org/915d/4a7202060d77c46e99121c1c8ca875898a11.pdf,,,https://pdfs.semanticscholar.org/915d/4a7202060d77c46e99121c1c8ca875898a11.pdf
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf,,,https://pdfs.semanticscholar.org/86b6/afc667bb14ff4d69e7a5e8bb2454a6bbd2cd.pdf
+86d0127e1fd04c3d8ea78401c838af621647dc95,https://arxiv.org/pdf/1804.02810.pdf,,,https://arxiv.org/pdf/1804.02810.pdf
+863ad2838b9b90d4461995f498a39bcd2fb87c73,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265580,,
+86f3552b822f6af56cb5079cc31616b4035ccc4e,https://arxiv.org/pdf/1604.07547.pdf,,,https://arxiv.org/pdf/1604.07547.pdf
+860588fafcc80c823e66429fadd7e816721da42a,https://arxiv.org/pdf/1804.04412.pdf,,,https://arxiv.org/pdf/1804.04412.pdf
+8633732d9f787f8497c2696309c7d70176995c15,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298967,,
+86374bb8d309ad4dbde65c21c6fda6586ae4147a,https://arxiv.org/pdf/1712.09184.pdf,,,https://arxiv.org/pdf/1712.09184.pdf
+8694cd9748fb1c128f91a572119978075fede848,,,http://doi.org/10.1016/j.neucom.2017.08.028,
+869583b700ecf33a9987447aee9444abfe23f343,https://arxiv.org/pdf/1702.01005.pdf,,,https://arxiv.org/pdf/1702.01005.pdf
+721b109970bf5f1862767a1bec3f9a79e815f79a,https://pdfs.semanticscholar.org/721b/109970bf5f1862767a1bec3f9a79e815f79a.pdf,,,https://pdfs.semanticscholar.org/721b/109970bf5f1862767a1bec3f9a79e815f79a.pdf
+72591a75469321074b072daff80477d8911c3af3,https://arxiv.org/pdf/1212.3913.pdf,,,https://arxiv.org/pdf/1212.3913.pdf
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,https://arxiv.org/pdf/1504.02351.pdf,,,https://arxiv.org/pdf/1504.02351.pdf
+72cbbdee4f6eeee8b7dd22cea6092c532271009f,https://arxiv.org/pdf/1709.05188.pdf,,,https://arxiv.org/pdf/1709.05188.pdf
+721d9c387ed382988fce6fa864446fed5fb23173,https://pdfs.semanticscholar.org/721d/9c387ed382988fce6fa864446fed5fb23173.pdf,,,https://pdfs.semanticscholar.org/721d/9c387ed382988fce6fa864446fed5fb23173.pdf
+720763bcb5e0507f13a8a319018676eb24270ff0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5202783,,
+72167c9e4e03e78152f6df44c782571c3058050e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771464,,
+725c3605c2d26d113637097358cd4c08c19ff9e1,https://arxiv.org/pdf/1807.00504.pdf,,,https://arxiv.org/pdf/1807.00504.pdf
+44b1399e8569a29eed0d22d88767b1891dbcf987,https://pdfs.semanticscholar.org/44b1/399e8569a29eed0d22d88767b1891dbcf987.pdf,,,https://pdfs.semanticscholar.org/44b1/399e8569a29eed0d22d88767b1891dbcf987.pdf
+443f4421e44d4f374c265e6f2551bf9830de5597,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771467,,
+446dc1413e1cfaee0030dc74a3cee49a47386355,https://arxiv.org/pdf/1710.04837.pdf,,,https://arxiv.org/pdf/1710.04837.pdf
+44855e53801d09763c1fb5f90ab73e5c3758a728,,,http://doi.org/10.1007/s11263-017-1018-6,
+44d23df380af207f5ac5b41459c722c87283e1eb,https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf,,,https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf
+44b91268fbbf62e1d2ba1d5331ec7aedac30dbe8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8342368,,
+44d93039eec244083ac7c46577b9446b3a071f3e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571,,
+2a826273e856939b58be8779d2136bffa0dddb08,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373892,,
+2ac7bb3fb014d27d3928a9b4bc1bf019627e0c1a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8432363,,
+2a7058a720fa9da4b9b607ea00bfdb63652dff95,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590031,,
+2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c,https://arxiv.org/pdf/1708.05340.pdf,,,https://arxiv.org/pdf/1708.05340.pdf
+2a612a7037646276ff98141d3e7abbc9c91fccb8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909615,,
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83,https://arxiv.org/pdf/1805.01515.pdf,,,https://arxiv.org/pdf/1805.01515.pdf
+2a2df7e790737a026434187f9605c4763ff71292,,,http://doi.org/10.1007/s11042-017-4665-2,
+2ab034e1f54c37bfc8ae93f7320160748310dc73,https://arxiv.org/pdf/1805.07242.pdf,,,https://arxiv.org/pdf/1805.07242.pdf
+2f1485994ef2c09a7bb2874eb8252be8fe710db1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780700,,
+2ffcd35d9b8867a42be23978079f5f24be8d3e35,https://pdfs.semanticscholar.org/2ffc/d35d9b8867a42be23978079f5f24be8d3e35.pdf,,,https://pdfs.semanticscholar.org/2ffc/d35d9b8867a42be23978079f5f24be8d3e35.pdf
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a,https://arxiv.org/pdf/1711.09618.pdf,,,https://arxiv.org/pdf/1711.09618.pdf
+2f88d3189723669f957d83ad542ac5c2341c37a5,https://pdfs.semanticscholar.org/2f88/d3189723669f957d83ad542ac5c2341c37a5.pdf,,,https://pdfs.semanticscholar.org/2f88/d3189723669f957d83ad542ac5c2341c37a5.pdf
+2f67d5448b5372f639633d8d29aac9c0295b4d72,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460923,,
+2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd,https://pdfs.semanticscholar.org/2f17/f6c460e02bd105dcbf14c9b73f34c5fb59bd.pdf,,,https://pdfs.semanticscholar.org/2f17/f6c460e02bd105dcbf14c9b73f34c5fb59bd.pdf
+2f69e9964f3b6bdc0d18749b48bb6b44a4171c64,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7801496,,
+2f837ff8b134b785ee185a9c24e1f82b4e54df04,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5739539,,
+2f73203fd71b755a9601d00fc202bbbd0a595110,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394868,,
+2fea258320c50f36408032c05c54ba455d575809,https://arxiv.org/pdf/1603.08199.pdf,,,https://arxiv.org/pdf/1603.08199.pdf
+438c4b320b9a94a939af21061b4502f4a86960e3,https://arxiv.org/pdf/1702.03041.pdf,,,https://arxiv.org/pdf/1702.03041.pdf
+43fce0c6b11eb50f597aa573611ac6dc47e088d3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8465617,,
+43dce79cf815b5c7068b1678f6200dabf8f5de31,,,,http://arxiv.org/abs/1709.03196
+43e268c118ac25f1f0e984b57bc54f0119ded520,https://arxiv.org/pdf/1410.4828.pdf,,,https://arxiv.org/pdf/1410.4828.pdf
+43c3b6a564b284382fdf8ae33f974f4e7a89600e,,,,http://dl.acm.org/citation.cfm?id=3190784
+437642cfc8c34e445ea653929e2d183aaaeeb704,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014815,,
+432d8cba544bf7b09b0455561fea098177a85db1,https://arxiv.org/pdf/1606.02185.pdf,,,https://arxiv.org/pdf/1606.02185.pdf
+4317856a1458baa427dc00e8ea505d2fc5f118ab,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8296449,,
+4342a2b63c9c344d78cf153600cd918a5fecad59,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237671,,
+43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a,https://pdfs.semanticscholar.org/4e42/182d40e0ea82bcab2289ae7c8b191dc834f1.pdf,,,https://pdfs.semanticscholar.org/4e42/182d40e0ea82bcab2289ae7c8b191dc834f1.pdf
+88e090ffc1f75eed720b5afb167523eb2e316f7f,https://pdfs.semanticscholar.org/88e0/90ffc1f75eed720b5afb167523eb2e316f7f.pdf,,,https://pdfs.semanticscholar.org/88e0/90ffc1f75eed720b5afb167523eb2e316f7f.pdf
+88a898592b4c1dfd707f04f09ca58ec769a257de,https://arxiv.org/pdf/1809.08809.pdf,,,https://arxiv.org/pdf/1809.08809.pdf
+88535dba55b0a80975df179d31a6cc80cae1cc92,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355366,,
+885c37f94e9edbbb2177cfba8cb1ad840b2a5f20,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8006255,,
+8818b12aa0ff3bf0b20f9caa250395cbea0e8769,https://pdfs.semanticscholar.org/8818/b12aa0ff3bf0b20f9caa250395cbea0e8769.pdf,,,https://pdfs.semanticscholar.org/8818/b12aa0ff3bf0b20f9caa250395cbea0e8769.pdf
+88e2efab01e883e037a416c63a03075d66625c26,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265507,,
+8895d6ae9f095a8413f663cc83f5b7634b3dc805,https://pdfs.semanticscholar.org/8895/d6ae9f095a8413f663cc83f5b7634b3dc805.pdf,,,https://pdfs.semanticscholar.org/8895/d6ae9f095a8413f663cc83f5b7634b3dc805.pdf
+9ff931ca721d50e470e1a38e583c7b18b6cdc2cc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7407637,,
+9f1a854d574d0bd14786c41247db272be6062581,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8360155,,
+9f62ac43a1086c22b9a3d9f192c975d1a5a4b31f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4426825,,
+9f131b4e036208f2402182a1af2a59e3c5d7dd44,,,,http://dl.acm.org/citation.cfm?id=3206038
+9f2984081ef88c20d43b29788fdf732ceabd5d6a,,,,http://arxiv.org/abs/1806.01547
+9fc993aeb0a007ccfaca369a9a8c0ccf7697261d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534,,
+9f43caad22803332400f498ca4dd0429fe7da0aa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6239186,,
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,https://pdfs.semanticscholar.org/9fdf/e1695adac2380f99d3d5cb6879f0ac7f2bfd.pdf,,,https://pdfs.semanticscholar.org/9fdf/e1695adac2380f99d3d5cb6879f0ac7f2bfd.pdf
+6b333b2c6311e36c2bde920ab5813f8cfcf2b67b,https://pdfs.semanticscholar.org/d330/64c32527a2690bd1b430b1d9f90a2a609a13.pdf,,,https://pdfs.semanticscholar.org/d330/64c32527a2690bd1b430b1d9f90a2a609a13.pdf
+6baaa8b763cc5553715766e7fbe7abb235fae33c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789589,,
+6ba3cb67bcdb7aea8a07e144c03b8c5a79c19bc0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8246530,,
+6b3e360b80268fda4e37ff39b7f303e3684e8719,https://pdfs.semanticscholar.org/75d2/de5858dd45aca9c5db8af6f44617f521cb77.pdf,,,https://pdfs.semanticscholar.org/75d2/de5858dd45aca9c5db8af6f44617f521cb77.pdf
+6b8d0569fffce5cc221560d459d6aa10c4db2f03,https://arxiv.org/pdf/1806.02479.pdf,,,https://arxiv.org/pdf/1806.02479.pdf
+6b99cd366f2ea8e1c9abadf73b05388c0e24fec3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100204,,
+6b742055a664bcbd1c6a85ae6796bd15bc945367,,,http://doi.org/10.1007/s00138-006-0052-0,
+6b6ff9d55e1df06f8b3e6f257e23557a73b2df96,https://pdfs.semanticscholar.org/6b6f/f9d55e1df06f8b3e6f257e23557a73b2df96.pdf,,,https://pdfs.semanticscholar.org/6b6f/f9d55e1df06f8b3e6f257e23557a73b2df96.pdf
+07377c375ac76a34331c660fe87ebd7f9b3d74c4,https://arxiv.org/pdf/1808.01338.pdf,,,https://arxiv.org/pdf/1808.01338.pdf
+07a31bd7a0bd7118f8ac0bc735feef90e304fb08,,,http://doi.org/10.1007/s11042-015-3120-5,
+071ec4f3fb4bfe6ae9980477d208a7b12691710e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6552193,,
+07c83f544d0604e6bab5d741b0bf9a3621d133da,https://arxiv.org/pdf/1708.07632.pdf,,,https://arxiv.org/pdf/1708.07632.pdf
+07fa153b8e6196ee6ef6efd8b743de8485a07453,https://pdfs.semanticscholar.org/07fa/153b8e6196ee6ef6efd8b743de8485a07453.pdf,,,https://pdfs.semanticscholar.org/07fa/153b8e6196ee6ef6efd8b743de8485a07453.pdf
+0750a816858b601c0dbf4cfb68066ae7e788f05d,https://arxiv.org/pdf/1801.09414.pdf,,,https://arxiv.org/pdf/1801.09414.pdf
+38c7f80a1e7fa1bdec632042318dc7cdd3c9aad4,,,http://doi.org/10.1016/j.asoc.2018.03.030,
+3803b91e784922a2dacd6a18f61b3100629df932,https://arxiv.org/pdf/1709.07200.pdf,,,https://arxiv.org/pdf/1709.07200.pdf
+38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7,https://arxiv.org/pdf/1807.00451.pdf,,,https://arxiv.org/pdf/1807.00451.pdf
+38c901a58244be9a2644d486f9a1284dc0edbf8a,https://arxiv.org/pdf/1607.06408.pdf,,,https://arxiv.org/pdf/1607.06408.pdf
+38f1fac3ed0fd054e009515e7bbc72cdd4cf801a,https://arxiv.org/pdf/1806.08246.pdf,,,https://arxiv.org/pdf/1806.08246.pdf
+380d5138cadccc9b5b91c707ba0a9220b0f39271,https://arxiv.org/pdf/1806.00194.pdf,,,https://arxiv.org/pdf/1806.00194.pdf
+3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373804,,
+00fb2836068042c19b5197d0999e8e93b920eb9c,https://pdfs.semanticscholar.org/00fb/2836068042c19b5197d0999e8e93b920eb9c.pdf,,,https://pdfs.semanticscholar.org/00fb/2836068042c19b5197d0999e8e93b920eb9c.pdf
+0004f72a00096fa410b179ad12aa3a0d10fc853c,https://pdfs.semanticscholar.org/0004/f72a00096fa410b179ad12aa3a0d10fc853c.pdf,,,https://pdfs.semanticscholar.org/0004/f72a00096fa410b179ad12aa3a0d10fc853c.pdf
+007fbc7a1d7eae33b2bb59b175dd1033e5e178f3,,,,http://dl.acm.org/citation.cfm?id=3209659
+6e91be2ad74cf7c5969314b2327b513532b1be09,https://arxiv.org/pdf/1412.2404.pdf,,,https://arxiv.org/pdf/1412.2404.pdf
+6e8a81d452a91f5231443ac83e4c0a0db4579974,https://pdfs.semanticscholar.org/3f64/a5b26a8297d4b832bc5bb95264cdfabde105.pdf,,,https://pdfs.semanticscholar.org/3f64/a5b26a8297d4b832bc5bb95264cdfabde105.pdf
+6e46d8aa63db3285417c8ebb65340b5045ca106f,,,,http://dl.acm.org/citation.cfm?id=3183751
+6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f,https://arxiv.org/pdf/1801.04134.pdf,,,https://arxiv.org/pdf/1801.04134.pdf
+6e911227e893d0eecb363015754824bf4366bdb7,https://arxiv.org/pdf/1712.01026.pdf,,,https://arxiv.org/pdf/1712.01026.pdf
+6e00a406edb508312108f683effe6d3c1db020fb,https://arxiv.org/pdf/1803.06340.pdf,,,https://arxiv.org/pdf/1803.06340.pdf
+6e38011e38a1c893b90a48e8f8eae0e22d2008e8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265376,,
+9ac43a98fe6fde668afb4fcc115e4ee353a6732d,https://arxiv.org/pdf/1804.07362.pdf,,,https://arxiv.org/pdf/1804.07362.pdf
+9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb,https://arxiv.org/pdf/1711.11585.pdf,,,https://arxiv.org/pdf/1711.11585.pdf
+9af9a88c60d9e4b53e759823c439fc590a4b5bc5,https://arxiv.org/pdf/1708.00277.pdf,,,https://arxiv.org/pdf/1708.00277.pdf
+9a98dd6d6aaba05c9e46411ea263f74df908203d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7859405,,
+9a59abdf3460970de53e09cb397f47d86744f472,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995399,,
+9aab33ce8d6786b3b77900a9b25f5f4577cea461,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961739,,
+9aad8e52aff12bd822f0011e6ef85dfc22fe8466,https://arxiv.org/pdf/1809.03669.pdf,,,https://arxiv.org/pdf/1809.03669.pdf
+9ac2960f646a46b701963230e6949abd9ac0a9b3,,,http://doi.org/10.1162/jocn_a_01174,
+36939e6a365e9db904d81325212177c9e9e76c54,https://pdfs.semanticscholar.org/941b/5492e6ac98355fd7bc7531f846d638e814ac.pdf,,,https://pdfs.semanticscholar.org/941b/5492e6ac98355fd7bc7531f846d638e814ac.pdf
+3646b42511a6a0df5470408bc9a7a69bb3c5d742,https://pdfs.semanticscholar.org/2a6a/8d8ed0f980cc3b20d743f43c9e36dec3150e.pdf,,,https://pdfs.semanticscholar.org/2a6a/8d8ed0f980cc3b20d743f43c9e36dec3150e.pdf
+361eaef45fccfffd5b7df12fba902490a7d24a8d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319,,
+3674f3597bbca3ce05e4423611d871d09882043b,https://pdfs.semanticscholar.org/3674/f3597bbca3ce05e4423611d871d09882043b.pdf,,,https://pdfs.semanticscholar.org/3674/f3597bbca3ce05e4423611d871d09882043b.pdf
+362bfeb28adac5f45b6ef46c07c59744b4ed6a52,https://arxiv.org/pdf/1808.01727.pdf,,,https://arxiv.org/pdf/1808.01727.pdf
+368e99f669ea5fd395b3193cd75b301a76150f9d,https://arxiv.org/pdf/1506.01342.pdf,,,https://arxiv.org/pdf/1506.01342.pdf
+3619a9b46ad4779d0a63b20f7a6a8d3d49530339,https://pdfs.semanticscholar.org/3619/a9b46ad4779d0a63b20f7a6a8d3d49530339.pdf,,,https://pdfs.semanticscholar.org/3619/a9b46ad4779d0a63b20f7a6a8d3d49530339.pdf
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be,https://pdfs.semanticscholar.org/399e/d1c6b72c765c2c8ec6437c9ef7a1866d0f29.pdf,,,https://pdfs.semanticscholar.org/399e/d1c6b72c765c2c8ec6437c9ef7a1866d0f29.pdf
+5cbe1445d683d605b31377881ac8540e1d17adf0,https://arxiv.org/pdf/1509.06161.pdf,,,https://arxiv.org/pdf/1509.06161.pdf
+5c493c42bfd93e4d08517438983e3af65e023a87,https://pdfs.semanticscholar.org/5c49/3c42bfd93e4d08517438983e3af65e023a87.pdf,,,https://pdfs.semanticscholar.org/5c49/3c42bfd93e4d08517438983e3af65e023a87.pdf
+5c35ac04260e281141b3aaa7bbb147032c887f0c,https://pdfs.semanticscholar.org/5c35/ac04260e281141b3aaa7bbb147032c887f0c.pdf,,,https://pdfs.semanticscholar.org/5c35/ac04260e281141b3aaa7bbb147032c887f0c.pdf
+5c4d4fd37e8c80ae95c00973531f34a6d810ea3a,https://arxiv.org/pdf/1603.09439.pdf,,,https://arxiv.org/pdf/1603.09439.pdf
+09137e3c267a3414314d1e7e4b0e3a4cae801f45,https://arxiv.org/pdf/1711.06078.pdf,,,https://arxiv.org/pdf/1711.06078.pdf
+09903df21a38e069273b80e94c8c29324963a832,,,http://doi.org/10.1007/s11042-017-4980-7,
+09926ed62511c340f4540b5bc53cf2480e8063f8,https://pdfs.semanticscholar.org/0992/6ed62511c340f4540b5bc53cf2480e8063f8.pdf,,,https://pdfs.semanticscholar.org/0992/6ed62511c340f4540b5bc53cf2480e8063f8.pdf
+0951f42abbf649bb564a21d4ff5dddf9a5ea54d9,https://arxiv.org/pdf/1806.02023.pdf,,,https://arxiv.org/pdf/1806.02023.pdf
+097340d3ac939ce181c829afb6b6faff946cdce0,https://arxiv.org/pdf/1805.11119.pdf,,,https://arxiv.org/pdf/1805.11119.pdf
+09507f1f1253101d04a975fc5600952eac868602,https://arxiv.org/pdf/1807.10037.pdf,,,https://arxiv.org/pdf/1807.10037.pdf
+098363b29eef1471c494382338687f2fe98f6e15,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411212,,
+099053f2cbfa06c0141371b9f34e26970e316426,,,http://doi.org/10.1007/s11042-016-4079-6,
+09df62fd17d3d833ea6b5a52a232fc052d4da3f5,https://pdfs.semanticscholar.org/5baf/412bc25d131c2da702a6d3b972de7212c50b.pdf,,,https://pdfs.semanticscholar.org/5baf/412bc25d131c2da702a6d3b972de7212c50b.pdf
+5dafab3c936763294257af73baf9fb3bb1696654,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5514556,,
+5d9971c6a9d5c56463ea186850b16f8969a58e67,,,http://doi.org/10.1007/s11042-017-5354-x,
+5da827fe558fb2e1124dcc84ef08311241761726,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139096,,
+5dd473a4a9c6337b083edf38b6ddf5a6aece8908,,,,http://arxiv.org/abs/1711.08238
+5db4fe0ce9e9227042144758cf6c4c2de2042435,https://pdfs.semanticscholar.org/5db4/fe0ce9e9227042144758cf6c4c2de2042435.pdf,,,https://pdfs.semanticscholar.org/5db4/fe0ce9e9227042144758cf6c4c2de2042435.pdf
+5de9670f72d10682bf2cb3156988346257e0489f,,,http://doi.org/10.1016/j.inffus.2015.12.004,
+5d2e5833ca713f95adcf4267148ac2ccf2318539,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6121744,,
+5dd3c9ac3c6d826e17c5b378d1575b68d02432d7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7292416,,
+5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e,https://pdfs.semanticscholar.org/0946/ce4615f74c4666878757a5eb89494a1f208b.pdf,,,https://pdfs.semanticscholar.org/0946/ce4615f74c4666878757a5eb89494a1f208b.pdf
+31cdaaa7a47efe2ce0e78ebec29df4d2d81df265,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776921,,
+31f1c92dbfa5aa338a21a0cb15d071cb9dc6e362,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337733,,
+318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a,https://arxiv.org/pdf/1601.04805.pdf,,,https://arxiv.org/pdf/1601.04805.pdf
+313d5eba97fe064bdc1f00b7587a4b3543ef712a,https://pdfs.semanticscholar.org/313d/5eba97fe064bdc1f00b7587a4b3543ef712a.pdf,,,https://pdfs.semanticscholar.org/313d/5eba97fe064bdc1f00b7587a4b3543ef712a.pdf
+31dd6bafd6e7c6095eb8d0591abac3b0106a75e3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8457336,,
+31ea88f29e7f01a9801648d808f90862e066f9ea,https://arxiv.org/pdf/1605.06391.pdf,,,https://arxiv.org/pdf/1605.06391.pdf
+3176ee88d1bb137d0b561ee63edf10876f805cf0,https://arxiv.org/pdf/1511.07356.pdf,,,https://arxiv.org/pdf/1511.07356.pdf
+31d51e48dbd9e7253eafe0719f3788adb564a971,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410588,,
+3157be811685c93d0cef7fa4c489efea581f9b8e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411222,,
+312b2566e315dd6e65bd42cfcbe4d919159de8a1,https://pdfs.semanticscholar.org/312b/2566e315dd6e65bd42cfcbe4d919159de8a1.pdf,,,https://pdfs.semanticscholar.org/312b/2566e315dd6e65bd42cfcbe4d919159de8a1.pdf
+31ec1e5c3b5e020af4a5a3c1be2724c7429a7c78,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354285,,
+914d7527678b514e3ee9551655f55ffbd3f0eb0a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404350,,
+91495c689e6e614247495c3f322d400d8098de43,https://pdfs.semanticscholar.org/9149/5c689e6e614247495c3f322d400d8098de43.pdf,,,https://pdfs.semanticscholar.org/9149/5c689e6e614247495c3f322d400d8098de43.pdf
+91e17338a12b5e570907e816bff296b13177971e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272751,,
+917bea27af1846b649e2bced624e8df1d9b79d6f,https://arxiv.org/pdf/1805.00361.pdf,,,https://arxiv.org/pdf/1805.00361.pdf
+911bef7465665d8b194b6b0370b2b2389dfda1a1,https://arxiv.org/pdf/1806.05666.pdf,,,https://arxiv.org/pdf/1806.05666.pdf
+91ead35d1d2ff2ea7cf35d15b14996471404f68d,https://arxiv.org/pdf/1702.01325.pdf,,,https://arxiv.org/pdf/1702.01325.pdf
+91f0a95b8eb76e8fa24c8267e4a7a17815fc7a11,,,http://doi.org/10.1007/s41095-016-0068-y,
+9131c990fad219726eb38384976868b968ee9d9c,https://arxiv.org/pdf/1804.08348.pdf,,,https://arxiv.org/pdf/1804.08348.pdf
+911505a4242da555c6828509d1b47ba7854abb7a,https://pdfs.semanticscholar.org/9115/05a4242da555c6828509d1b47ba7854abb7a.pdf,,,https://pdfs.semanticscholar.org/9115/05a4242da555c6828509d1b47ba7854abb7a.pdf
+656531036cee6b2c2c71954bb6540ef6b2e016d0,https://arxiv.org/pdf/1511.04601.pdf,,,https://arxiv.org/pdf/1511.04601.pdf
+65b1209d38c259fe9ca17b537f3fb4d1857580ae,https://arxiv.org/pdf/1805.08672.pdf,,,https://arxiv.org/pdf/1805.08672.pdf
+657e702326a1cbc561e059476e9be4d417c37795,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8343704,,
+656f05741c402ba43bb1b9a58bcc5f7ce2403d9a,https://pdfs.semanticscholar.org/656f/05741c402ba43bb1b9a58bcc5f7ce2403d9a.pdf,,,https://pdfs.semanticscholar.org/656f/05741c402ba43bb1b9a58bcc5f7ce2403d9a.pdf
+651cafb2620ab60a0e4f550c080231f20ae6d26e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6360717,,
+6584c3c877400e1689a11ef70133daa86a238602,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8039231,,
+652ec3947d3d04dda719b1f5ba7c975e567166ef,,,,
+653d19e64bd75648cdb149f755d59e583b8367e3,https://arxiv.org/pdf/1706.02613.pdf,,,https://arxiv.org/pdf/1706.02613.pdf
+65babb10e727382b31ca5479b452ee725917c739,https://arxiv.org/pdf/1408.6027.pdf,,,https://arxiv.org/pdf/1408.6027.pdf
+62dccab9ab715f33761a5315746ed02e48eed2a0,https://arxiv.org/pdf/1808.01340.pdf,,,https://arxiv.org/pdf/1808.01340.pdf
+629a973ca5f3c7d2f4a9befab97d0044dfd3167a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4427488,,
+620339aef06aed07a78f9ed1a057a25433faa58b,https://arxiv.org/pdf/1806.11230.pdf,,,https://arxiv.org/pdf/1806.11230.pdf
+62fddae74c553ac9e34f511a2957b1614eb4f937,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406684,,
+62b3598b401c807288a113796f424612cc5833ca,https://arxiv.org/pdf/1807.10550.pdf,,,https://arxiv.org/pdf/1807.10550.pdf
+628a3f027b7646f398c68a680add48c7969ab1d9,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,,,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf
+626913b8fcbbaee8932997d6c4a78fe1ce646127,https://arxiv.org/pdf/1711.05942.pdf,,,https://arxiv.org/pdf/1711.05942.pdf
+62750d78e819d745b9200b0c5c35fcae6fb9f404,,,http://doi.org/10.1007/s11042-016-4085-8,
+626859fe8cafd25da13b19d44d8d9eb6f0918647,https://arxiv.org/pdf/1708.06637.pdf,,,https://arxiv.org/pdf/1708.06637.pdf
+62fd622b3ca97eb5577fd423fb9efde9a849cbef,https://arxiv.org/pdf/1809.02169.pdf,,,https://arxiv.org/pdf/1809.02169.pdf
+62007c30f148334fb4d8975f80afe76e5aef8c7f,https://arxiv.org/pdf/1712.03999.pdf,,,https://arxiv.org/pdf/1712.03999.pdf
+62f017907e19766c76887209d01d4307be0cc573,,,http://doi.org/10.1016/j.imavis.2012.02.001,
+969626c52d30ea803064ddef8fb4613fa73ba11d,,,http://doi.org/10.1007/BF02683992,
+96f0e7416994035c91f4e0dfa40fd45090debfc5,https://arxiv.org/pdf/1803.01260.pdf,,,https://arxiv.org/pdf/1803.01260.pdf
+963d0d40de8780161b70d28d2b125b5222e75596,https://arxiv.org/pdf/1611.08657.pdf,,,https://arxiv.org/pdf/1611.08657.pdf
+96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364450,,
+96ba65bffdddef7c7737c0f42ff4299e95cd85c2,,,http://doi.org/10.1007/s11042-018-5658-5,
+96a9ca7a8366ae0efe6b58a515d15b44776faf6e,https://arxiv.org/pdf/1609.00129.pdf,,,https://arxiv.org/pdf/1609.00129.pdf
+9649a19b49607459cef32f43db4f6e6727080bdb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395207,,
+96e1ccfe96566e3c96d7b86e134fa698c01f2289,https://arxiv.org/pdf/1712.00321.pdf,,,https://arxiv.org/pdf/1712.00321.pdf
+9627f28ea5f4c389350572b15968386d7ce3fe49,https://arxiv.org/pdf/1802.07447.pdf,,,https://arxiv.org/pdf/1802.07447.pdf
+96b1000031c53cd4c1c154013bb722ffd87fa7da,https://arxiv.org/pdf/1710.08518.pdf,,,https://arxiv.org/pdf/1710.08518.pdf
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,https://arxiv.org/pdf/1611.05396.pdf,,,https://arxiv.org/pdf/1611.05396.pdf
+3a27d164e931c422d16481916a2fa6401b74bcef,https://arxiv.org/pdf/1709.03654.pdf,,,https://arxiv.org/pdf/1709.03654.pdf
+3a0558ebfde592bd8bd07cb72b8ca8f700715bfb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6636646,,
+3a3e55cf5bfd689d6c922e082efa0cd71cd2ae5c,,,,http://dl.acm.org/citation.cfm?id=3184081
+3a3f75e0ffdc0eef07c42b470593827fcd4020b4,https://arxiv.org/pdf/1805.05269.pdf,,,https://arxiv.org/pdf/1805.05269.pdf
+3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2,https://arxiv.org/pdf/1710.03804.pdf,,,https://arxiv.org/pdf/1710.03804.pdf
+3ac3a714042d3ebc159546c26321a1f8f4f5f80c,,,,http://dl.acm.org/citation.cfm?id=3025149
+3a49507c46a2b8c6411809c81ac47b2b1d2282c3,,,http://doi.org/10.1007/s11042-017-5319-0,
+3a9681e2e07be7b40b59c32a49a6ff4c40c962a2,https://pdfs.semanticscholar.org/1c95/1714996c573b00e63878acdc48cdc4ddc183.pdf,,,https://pdfs.semanticscholar.org/1c95/1714996c573b00e63878acdc48cdc4ddc183.pdf
+3a6334953cd2775fab7a8e7b72ed63468c71dee7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7591180,,
+54948ee407b5d32da4b2eee377cc44f20c3a7e0c,https://arxiv.org/pdf/1806.06296.pdf,,,https://arxiv.org/pdf/1806.06296.pdf
+540b39ba1b8ef06293ed793f130e0483e777e278,https://pdfs.semanticscholar.org/540b/39ba1b8ef06293ed793f130e0483e777e278.pdf,,,https://pdfs.semanticscholar.org/540b/39ba1b8ef06293ed793f130e0483e777e278.pdf
+5435d5f8b9f4def52ac84bee109320e64e58ab8f,,,http://doi.org/10.1007/s11042-016-4321-2,
+54969bcd728b0f2d3285866c86ef0b4797c2a74d,https://arxiv.org/pdf/1804.09869.pdf,,,https://arxiv.org/pdf/1804.09869.pdf
+54a9ed950458f4b7e348fa78a718657c8d3d0e05,https://arxiv.org/pdf/1807.04001.pdf,,,https://arxiv.org/pdf/1807.04001.pdf
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7,https://pdfs.semanticscholar.org/54ce/3ff2ab6e4465c2f94eb4d636183fa7878ab7.pdf,,,https://pdfs.semanticscholar.org/54ce/3ff2ab6e4465c2f94eb4d636183fa7878ab7.pdf
+54ba18952fe36c9be9f2ab11faecd43d123b389b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163085,,
+54f169ad7d1f6c9ce94381e9b5ccc1a07fd49cc6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7911334,,
+54204e28af73c7aca073835a14afcc5d8f52a515,https://arxiv.org/pdf/1805.12185.pdf,,,https://arxiv.org/pdf/1805.12185.pdf
+982fcead58be419e4f34df6e806204674a4bc579,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012,,
+9888edfb6276887eb56a6da7fe561e508e72a517,,,,http://dl.acm.org/citation.cfm?id=3243904
+9853136dbd7d5f6a9c57dc66060cab44a86cd662,https://pdfs.semanticscholar.org/f3fb/f05026afb46b0186f6abbcbbcc08887f1be5.pdf,,,https://pdfs.semanticscholar.org/f3fb/f05026afb46b0186f6abbcbbcc08887f1be5.pdf
+984edce0b961418d81203ec477b9bfa5a8197ba3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369732,,
+98d1b5515b079492c8e7f0f9688df7d42d96da8e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5204260,,
+9806d3dc7805dd8c9c20d7222c915fc4beee7099,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6755972,,
+9865fe20df8fe11717d92b5ea63469f59cf1635a,https://arxiv.org/pdf/1805.07566.pdf,,,https://arxiv.org/pdf/1805.07566.pdf
+98e098ba9ff98fc58f22fed6d3d8540116284b91,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8332532,,
+98fd92d68a143a5ced4a016fa3b7addd6b4a0122,,,http://doi.org/10.1007/s11704-016-6066-5,
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320,https://arxiv.org/pdf/1807.09532.pdf,,,https://arxiv.org/pdf/1807.09532.pdf
+533d14e539ae5cdca0ece392487a2b19106d468a,https://arxiv.org/pdf/1611.09053.pdf,,,https://arxiv.org/pdf/1611.09053.pdf
+53507e2de66eaba996f14fd2f54a5535056f1e59,,,http://doi.org/10.1016/j.sigpro.2017.10.024,
+53dd25350d3b3aaf19beb2104f1e389e3442df61,https://pdfs.semanticscholar.org/a2ee/e3191d860c854936d11365d4745224d89b53.pdf,,,https://pdfs.semanticscholar.org/a2ee/e3191d860c854936d11365d4745224d89b53.pdf
+53de11d144cd2eda7cf1bb644ae27f8ef2489289,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8424637,,
+530243b61fa5aea19b454b7dbcac9f463ed0460e,https://arxiv.org/pdf/1807.11079.pdf,,,https://arxiv.org/pdf/1807.11079.pdf
+53c36186bf0ffbe2f39165a1824c965c6394fe0d,https://arxiv.org/pdf/1805.00326.pdf,,,https://arxiv.org/pdf/1805.00326.pdf
+535cdce8264ac0813d5bb8b19ceafa77a1674adf,,,http://doi.org/10.1007/s12559-016-9402-z,
+53a41c711b40e7fe3dc2b12e0790933d9c99a6e0,https://arxiv.org/pdf/1611.06492.pdf,,,https://arxiv.org/pdf/1611.06492.pdf
+53f5cb365806c57811319a42659c9f68b879454a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8356995,,
+53bfe2ab770e74d064303f3bd2867e5bf7b86379,https://pdfs.semanticscholar.org/d989/c3064d49bf8e63587ada4ed2bdb0d32b120a.pdf,,,https://pdfs.semanticscholar.org/d989/c3064d49bf8e63587ada4ed2bdb0d32b120a.pdf
+53ce84598052308b86ba79d873082853022aa7e9,https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf,,,https://pdfs.semanticscholar.org/4f07/b70883a98a69be3b3e29de06c73e59a9ba0e.pdf
+3f9ca2526013e358cd8caeb66a3d7161f5507cbc,https://arxiv.org/pdf/1607.01059.pdf,,,https://arxiv.org/pdf/1607.01059.pdf
+3fb98e76ffd8ba79e1c22eda4d640da0c037e98a,https://pdfs.semanticscholar.org/b49a/a569ff63d045b7c0ce66d77e1345d4f9745c.pdf,,,https://pdfs.semanticscholar.org/b49a/a569ff63d045b7c0ce66d77e1345d4f9745c.pdf
+3fb4bf38d34f7f7e5b3df36de2413d34da3e174a,https://arxiv.org/pdf/1807.09882.pdf,,,https://arxiv.org/pdf/1807.09882.pdf
+3f9a7d690db82cf5c3940fbb06b827ced59ec01e,https://arxiv.org/pdf/1502.05678.pdf,,,https://arxiv.org/pdf/1502.05678.pdf
+3ff79cf6df1937949cc9bc522041a9a39d314d83,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8406730,,
+3f0c6dbfd3c9cd5625ba748327d69324baa593a6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373880,,
+3f5e8f884e71310d7d5571bd98e5a049b8175075,https://pdfs.semanticscholar.org/3f5e/8f884e71310d7d5571bd98e5a049b8175075.pdf,,,https://pdfs.semanticscholar.org/3f5e/8f884e71310d7d5571bd98e5a049b8175075.pdf
+3f5693584d7dab13ffc12122d6ddbf862783028b,https://arxiv.org/pdf/1804.04082.pdf,,,https://arxiv.org/pdf/1804.04082.pdf
+30b15cdb72760f20f80e04157b57be9029d8a1ab,https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf,,,https://pdfs.semanticscholar.org/30b1/5cdb72760f20f80e04157b57be9029d8a1ab.pdf
+30870ef75aa57e41f54310283c0057451c8c822b,https://arxiv.org/pdf/1801.01423.pdf,,,https://arxiv.org/pdf/1801.01423.pdf
+305346d01298edeb5c6dc8b55679e8f60ba97efb,https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf,,,https://pdfs.semanticscholar.org/3053/46d01298edeb5c6dc8b55679e8f60ba97efb.pdf
+30fd1363fa14965e3ab48a7d6235e4b3516c1da1,https://pdfs.semanticscholar.org/6bc2/07bab6a2b4ec335023474b391c9cb23e2e6d.pdf,,,https://pdfs.semanticscholar.org/6bc2/07bab6a2b4ec335023474b391c9cb23e2e6d.pdf
+30c93fec078b98453a71f9f21fbc9512ab3e916f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8395274,,
+3083bd7a442af6a1d72cdc04ae1ad7c30697a4e8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392250,,
+30fb5c24cc15eb8cde5e389bf368d65fb96513e4,,,,http://dl.acm.org/citation.cfm?id=3206048
+30cbd41e997445745b6edd31f2ebcc7533453b61,https://pdfs.semanticscholar.org/1a50/4cdd40877e3d74ed87666c8c540bb1643c79.pdf,,,https://pdfs.semanticscholar.org/1a50/4cdd40877e3d74ed87666c8c540bb1643c79.pdf
+5e6fc99d8f5ebaab0e9c29bc0969530d201e0708,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8017477,,
+5e6f546a50ed97658be9310d5e0a67891fe8a102,https://arxiv.org/pdf/1711.09577.pdf,,,https://arxiv.org/pdf/1711.09577.pdf
+5e7e055ef9ba6e8566a400a8b1c6d8f827099553,https://pdfs.semanticscholar.org/5e7e/055ef9ba6e8566a400a8b1c6d8f827099553.pdf,,,https://pdfs.semanticscholar.org/5e7e/055ef9ba6e8566a400a8b1c6d8f827099553.pdf
+5ed66fb992bfefb070b5c39dc45b6e3ff5248c10,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163116,,
+5e9ec3b8daa95d45138e30c07321e386590f8ec7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6967830,,
+5b5b9c6c67855ede21a60c834aea5379df7d51b7,,,,http://hdl.handle.net/10044/1/45280
+5ba7882700718e996d576b58528f1838e5559225,https://pdfs.semanticscholar.org/5ba7/882700718e996d576b58528f1838e5559225.pdf,,,https://pdfs.semanticscholar.org/5ba7/882700718e996d576b58528f1838e5559225.pdf
+5bb4fd87fa4a27ddacd570aa81c2d66eb4721019,,,http://doi.org/10.1016/j.neucom.2017.07.014,
+5b0008ba87667085912ea474025d2323a14bfc90,https://pdfs.semanticscholar.org/5b00/08ba87667085912ea474025d2323a14bfc90.pdf,,,https://pdfs.semanticscholar.org/5b00/08ba87667085912ea474025d2323a14bfc90.pdf
+5b5b568a0ba63d00e16a263051c73e09ab83e245,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8416840,,
+5b97e997b9b654373bd129b3baf5b82c2def13d1,https://pdfs.semanticscholar.org/5b97/e997b9b654373bd129b3baf5b82c2def13d1.pdf,,,https://pdfs.semanticscholar.org/5b97/e997b9b654373bd129b3baf5b82c2def13d1.pdf
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14,https://pdfs.semanticscholar.org/5bd3/d08335bb4e444a86200c5e9f57fd9d719e14.pdf,,,https://pdfs.semanticscholar.org/5bd3/d08335bb4e444a86200c5e9f57fd9d719e14.pdf
+5babbad3daac5c26503088782fd5b62067b94fa5,https://arxiv.org/pdf/1809.02652.pdf,,,https://arxiv.org/pdf/1809.02652.pdf
+5b2cfee6e81ef36507ebf3c305e84e9e0473575a,https://arxiv.org/pdf/1704.02402.pdf,,,https://arxiv.org/pdf/1704.02402.pdf
+5b721f86f4a394f05350641e639a9d6cb2046c45,https://arxiv.org/pdf/1603.09638.pdf,,,https://arxiv.org/pdf/1603.09638.pdf
+5b4b84ce3518c8a14f57f5f95a1d07fb60e58223,https://pdfs.semanticscholar.org/9f92/05a60ddf1135929e0747db34363b3a8c6bc8.pdf,,,https://pdfs.semanticscholar.org/9f92/05a60ddf1135929e0747db34363b3a8c6bc8.pdf
+378418fdd28f9022b02857ef7dbab6b0b9a02dbe,,,http://doi.org/10.1007/978-3-319-75420-8,
+372fb32569ced35eaf3740a29890bec2be1869fa,https://pdfs.semanticscholar.org/372f/b32569ced35eaf3740a29890bec2be1869fa.pdf,,,https://pdfs.semanticscholar.org/372f/b32569ced35eaf3740a29890bec2be1869fa.pdf
+3795974e24296185d9b64454cde6f796ca235387,https://arxiv.org/pdf/1806.05252.pdf,,,https://arxiv.org/pdf/1806.05252.pdf
+37866fea39deeff453802cde529dd9d32e0205a5,,,,http://dl.acm.org/citation.cfm?id=2393385
+377f2b65e6a9300448bdccf678cde59449ecd337,https://arxiv.org/pdf/1804.10275.pdf,,,https://arxiv.org/pdf/1804.10275.pdf
+370b6b83c7512419188f5373a962dd3175a56a9b,https://pdfs.semanticscholar.org/370b/6b83c7512419188f5373a962dd3175a56a9b.pdf,,,https://pdfs.semanticscholar.org/370b/6b83c7512419188f5373a962dd3175a56a9b.pdf
+372a8bf0ef757c08551d41e40cb7a485527b6cd7,https://pdfs.semanticscholar.org/2dcf/a8d72fee8732350935718ab86f3d9f3458cb.pdf,,,https://pdfs.semanticscholar.org/2dcf/a8d72fee8732350935718ab86f3d9f3458cb.pdf
+3779e0599481f11fc1acee60d5108d63e55819b3,,,http://doi.org/10.1007/s11280-018-0581-2,
+0831794eddcbac1f601dcb9be9d45531a56dbf7e,,,http://doi.org/10.1007/s11042-017-4416-4,
+080e0efc3cf71260bfe9bdc62cd86614d1ebca46,,,http://doi.org/10.1007/s10851-017-0771-z,
+08f4832507259ded9700de81f5fd462caf0d5be8,https://pdfs.semanticscholar.org/ad40/d61bf27e177d078df12727267f3190eee2b0.pdf,,,https://pdfs.semanticscholar.org/ad40/d61bf27e177d078df12727267f3190eee2b0.pdf
+08903bf161a1e8dec29250a752ce9e2a508a711c,https://pdfs.semanticscholar.org/e7f6/bfb9bb591eb1404ae13f0fa13ad4a3179150.pdf,,,https://pdfs.semanticscholar.org/e7f6/bfb9bb591eb1404ae13f0fa13ad4a3179150.pdf
+08fbbfe87563595508a77629e47613d6bd1119eb,,,,
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf,,,https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf
+0857281a3b6a5faba1405e2c11f4e17191d3824d,https://pdfs.semanticscholar.org/0857/281a3b6a5faba1405e2c11f4e17191d3824d.pdf,,,https://pdfs.semanticscholar.org/0857/281a3b6a5faba1405e2c11f4e17191d3824d.pdf
+08d41d2f68a2bf0091dc373573ca379de9b16385,https://arxiv.org/pdf/1802.05023.pdf,,,https://arxiv.org/pdf/1802.05023.pdf
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d,https://arxiv.org/pdf/1607.00659.pdf,,,https://arxiv.org/pdf/1607.00659.pdf
+6dbdb07ce2991db0f64c785ad31196dfd4dae721,https://arxiv.org/pdf/1802.09058.pdf,,,https://arxiv.org/pdf/1802.09058.pdf
+6d2fd0a9cbea13e840f962ba7c8a9771ec437d3a,,,http://doi.org/10.1007/s11063-017-9715-2,
+6dddf1440617bf7acda40d4d75c7fb4bf9517dbb,https://arxiv.org/pdf/1705.10118.pdf,,,https://arxiv.org/pdf/1705.10118.pdf
+6dcf6b028a6042a9904628a3395520995b1d0ef9,,,,http://dl.acm.org/citation.cfm?id=3158392
+6dcf418c778f528b5792104760f1fbfe90c6dd6a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984,,
+6de935a02f87aa31e33245c3b85ea3b7f8b1111c,,,http://doi.org/10.1007/s11263-017-1029-3,
+6d07e176c754ac42773690d4b4919a39df85d7ec,https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf,,,https://pdfs.semanticscholar.org/6d07/e176c754ac42773690d4b4919a39df85d7ec.pdf
+6d8c9a1759e7204eacb4eeb06567ad0ef4229f93,https://arxiv.org/pdf/1707.05938.pdf,,,https://arxiv.org/pdf/1707.05938.pdf
+6dc1f94b852538d572e4919238ddb10e2ee449a4,https://arxiv.org/pdf/1703.09529.pdf,,,https://arxiv.org/pdf/1703.09529.pdf
+6d5125c9407c7762620eeea7570af1a8ee7d76f3,https://arxiv.org/pdf/1807.01462.pdf,,,https://arxiv.org/pdf/1807.01462.pdf
+6da711d07b63c9f24d143ca3991070736baeb412,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7000295,,
+6d70344ae6f6108144a15e9debc7b0be4e3335f1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8390318,,
+013305c13cfabaea82c218b841dbe71e108d2b97,,,http://doi.org/10.1007/s11063-016-9554-6,
+01c4cf9c7c08f0ad3f386d88725da564f3c54679,https://pdfs.semanticscholar.org/01c4/cf9c7c08f0ad3f386d88725da564f3c54679.pdf,,,https://pdfs.semanticscholar.org/01c4/cf9c7c08f0ad3f386d88725da564f3c54679.pdf
+014e3d0fa5248e6f4634dc237e2398160294edce,https://arxiv.org/pdf/1708.06703.pdf,,,https://arxiv.org/pdf/1708.06703.pdf
+017e94ad51c9be864b98c9b75582753ce6ee134f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892240,,
+01e27b6d1af4c9c2f50e2908b5f3b2331ff24846,,,http://doi.org/10.1007/s11263-017-0996-8,
+0141cb33c822e87e93b0c1bad0a09db49b3ad470,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298876,,
+06262d6beeccf2784e4e36a995d5ee2ff73c8d11,https://pdfs.semanticscholar.org/0626/2d6beeccf2784e4e36a995d5ee2ff73c8d11.pdf,,,https://pdfs.semanticscholar.org/0626/2d6beeccf2784e4e36a995d5ee2ff73c8d11.pdf
+0647c9d56cf11215894d57d677997826b22f6a13,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401557,,
+06f585a3a05dd3371cd600a40dc35500e2f82f9b,https://arxiv.org/pdf/1804.10069.pdf,,,https://arxiv.org/pdf/1804.10069.pdf
+06560d5721ecc487a4d70905a485e22c9542a522,https://pdfs.semanticscholar.org/0656/0d5721ecc487a4d70905a485e22c9542a522.pdf,,,https://pdfs.semanticscholar.org/0656/0d5721ecc487a4d70905a485e22c9542a522.pdf
+062c41dad67bb68fefd9ff0c5c4d296e796004dc,https://arxiv.org/pdf/1611.06624.pdf,,,https://arxiv.org/pdf/1611.06624.pdf
+06c2086f7f72536bf970ca629151b16927104df3,https://arxiv.org/pdf/1805.03064.pdf,,,https://arxiv.org/pdf/1805.03064.pdf
+06518858bd99cddf9bc9200fac5311fc29ac33b4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392777,,
+06ab24721d7117974a6039eb2e57d1545eee5e46,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373809,,
+06b4e41185734f70ce432fdb2b121a7eb01140af,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362753,,
+6c66ae815e7e508e852ecb122fb796abbcda16a8,https://pdfs.semanticscholar.org/6c66/ae815e7e508e852ecb122fb796abbcda16a8.pdf,,,https://pdfs.semanticscholar.org/6c66/ae815e7e508e852ecb122fb796abbcda16a8.pdf
+6c1227659878e867a01888eef472dd96b679adb6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354280,,
+6ca2c5ff41e91c34696f84291a458d1312d15bf2,https://pdfs.semanticscholar.org/c70b/2c373917ba61a871b97119413db1eadcf423.pdf,,,https://pdfs.semanticscholar.org/c70b/2c373917ba61a871b97119413db1eadcf423.pdf
+6ca6ade6c9acb833790b1b4e7ee8842a04c607f7,,,,http://dl.acm.org/citation.cfm?id=3234805
+6cb8c52bb421ce04898fa42cb997c04097ddd328,,,http://doi.org/10.1007/978-3-319-11289-3,
+6c5fbf156ef9fc782be0089309074cc52617b868,https://pdfs.semanticscholar.org/fe4c/3f97a80b73be4fad18cc1bfb72354efb528e.pdf,,,https://pdfs.semanticscholar.org/fe4c/3f97a80b73be4fad18cc1bfb72354efb528e.pdf
+6c304f3b9c3a711a0cca5c62ce221fb098dccff0,https://arxiv.org/pdf/1708.05980.pdf,,,https://arxiv.org/pdf/1708.05980.pdf
+6c01b349edb2d33530e8bb07ba338f009663a9dd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5332299,,
+6cce5ccc5d366996f5a32de17a403341db5fddc6,,,http://doi.org/10.1016/j.cviu.2016.04.012,
+6c80c834d426f0bc4acd6355b1946b71b50cbc0b,https://arxiv.org/pdf/1805.08484.pdf,,,https://arxiv.org/pdf/1805.08484.pdf
+6cb7648465ba7757ecc9c222ac1ab6402933d983,https://arxiv.org/pdf/1708.05827.pdf,,,https://arxiv.org/pdf/1708.05827.pdf
+6c92d87c84fa5e5d2bb5bed3ef38168786bacc49,,,,http://dl.acm.org/citation.cfm?id=2501650
+6c7a42b4f43b3a2f9b250f5803b697857b1444ac,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553718,,
+6cbde27d9a287ae926979dbb18dfef61cf49860e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8253589,,
+6cfc337069868568148f65732c52cbcef963f79d,https://pdfs.semanticscholar.org/80d7/8415aee24e65ea3031c31adc1dabc1956f8a.pdf,,,https://pdfs.semanticscholar.org/80d7/8415aee24e65ea3031c31adc1dabc1956f8a.pdf
+6c58e3a8209fef0e28ca2219726c15ea5f284f4f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899896,,
+39ed31ced75e6151dde41944a47b4bdf324f922b,https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf,,,https://pdfs.semanticscholar.org/39ed/31ced75e6151dde41944a47b4bdf324f922b.pdf
+397257783ccc8cace5b67cc71e0c73034d559a4f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6918513,,
+398e0771e64cab6ca5d21754e32dce63f9e3c223,,,,http://dl.acm.org/citation.cfm?id=3206028
+39c8b34c1b678235b60b648d0b11d241a34c8e32,https://arxiv.org/pdf/1805.05503.pdf,,,https://arxiv.org/pdf/1805.05503.pdf
+39af06d29a74ad371a1846259e01c14b5343e3d1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8046026,,
+3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1,https://arxiv.org/pdf/1612.00738.pdf,,,https://arxiv.org/pdf/1612.00738.pdf
+39d6f8b791995dc5989f817373391189d7ac478a,,,http://doi.org/10.1016/j.patrec.2015.09.015,
+3965d61c4f3b72044f43609c808f8760af8781a2,https://arxiv.org/pdf/1808.01121.pdf,,,https://arxiv.org/pdf/1808.01121.pdf
+395bf182983e0917f33b9701e385290b64e22f9a,https://pdfs.semanticscholar.org/8ab5/18efa79af7d45faa425d1ccd82cfa3aba547.pdf,,,https://pdfs.semanticscholar.org/8ab5/18efa79af7d45faa425d1ccd82cfa3aba547.pdf
+3933e323653ff27e68c3458d245b47e3e37f52fd,https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf,,,https://pdfs.semanticscholar.org/3933/e323653ff27e68c3458d245b47e3e37f52fd.pdf
+39b452453bea9ce398613d8dd627984fd3a0d53c,https://arxiv.org/pdf/1611.02155.pdf,,,https://arxiv.org/pdf/1611.02155.pdf
+994f7c469219ccce59c89badf93c0661aae34264,https://pdfs.semanticscholar.org/994f/7c469219ccce59c89badf93c0661aae34264.pdf,,,https://pdfs.semanticscholar.org/994f/7c469219ccce59c89badf93c0661aae34264.pdf
+9944c451b4a487940d3fd8819080fe16d627892d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612967,,
+9939498315777b40bed9150d8940fc1ac340e8ba,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789583,,
+993d189548e8702b1cb0b02603ef02656802c92b,https://arxiv.org/pdf/1809.05992.pdf,,,https://arxiv.org/pdf/1809.05992.pdf
+997b9ffe2f752ba84a66730cfd320d040e7ba2e2,,,,http://dl.acm.org/citation.cfm?id=2967199
+99d06fe2f4d6d76acf40b6da67c5052e82055f5a,,,,http://dl.acm.org/citation.cfm?id=3268909
+9989ad33b64accea8042e386ff3f1216386ba7f1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393320,,
+9961f1e5cf8fda29912344773bc75c47f18333a0,,,http://doi.org/10.1007/s10044-017-0618-7,
+9901f473aeea177a55e58bac8fd4f1b086e575a4,https://arxiv.org/pdf/1509.04954.pdf,,,https://arxiv.org/pdf/1509.04954.pdf
+99c20eb5433ed27e70881d026d1dbe378a12b342,https://pdfs.semanticscholar.org/2eb3/74476c9431a614b1841df1a7c32a4cd095e0.pdf,,,https://pdfs.semanticscholar.org/2eb3/74476c9431a614b1841df1a7c32a4cd095e0.pdf
+99facca6fc50cc30f13b7b6dd49ace24bc94f702,https://arxiv.org/pdf/1609.03892.pdf,,,https://arxiv.org/pdf/1609.03892.pdf
+99d7678039ad96ee29ab520ff114bb8021222a91,https://pdfs.semanticscholar.org/99d7/678039ad96ee29ab520ff114bb8021222a91.pdf,,,https://pdfs.semanticscholar.org/99d7/678039ad96ee29ab520ff114bb8021222a91.pdf
+523854a7d8755e944bd50217c14481fe1329a969,https://arxiv.org/pdf/1808.00380.pdf,,,https://arxiv.org/pdf/1808.00380.pdf
+521aa8dcd66428b07728b91722cc8f2b5a73944b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367126,,
+52af7625f7e7a0bd9f9d8eeafd631c4d431e67e7,,,http://doi.org/10.1007/s00371-018-1585-8,
+52472ec859131844f38fc7d57944778f01d109ac,https://arxiv.org/pdf/1707.02749.pdf,,,https://arxiv.org/pdf/1707.02749.pdf
+525da67fb524d46f2afa89478cd482a68be8a42b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354128,,
+522a4ca705c06a0436bbe62f46efe24d67a82422,,,http://doi.org/10.1007/s11042-017-5475-2,
+52d7eb0fbc3522434c13cc247549f74bb9609c5d,https://arxiv.org/pdf/1511.06523.pdf,,,https://arxiv.org/pdf/1511.06523.pdf
+529baf1a79cca813f8c9966ceaa9b3e42748c058,https://pdfs.semanticscholar.org/6ae7/47cf58eeda0687a3f779aaecfa12403b9684.pdf,,,https://pdfs.semanticscholar.org/6ae7/47cf58eeda0687a3f779aaecfa12403b9684.pdf
+55ea0c775b25d9d04b5886e322db852e86a556cd,https://arxiv.org/pdf/1804.01077.pdf,,,https://arxiv.org/pdf/1804.01077.pdf
+55432723c728a2ce90d817e9e9877ae9fbad6fe5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8412925,,
+55c68c1237166679d2cb65f266f496d1ecd4bec6,https://arxiv.org/pdf/1802.02774.pdf,,,https://arxiv.org/pdf/1802.02774.pdf
+55cfc3c08000f9d21879582c6296f2a864b657e8,,,http://doi.org/10.1049/iet-cvi.2015.0287,
+556b05ab6eff48d32ffbd04f9008b9a5c78a4ad7,,,,http://dl.acm.org/citation.cfm?id=2926713
+5550a6df1b118a80c00a2459bae216a7e8e3966c,https://pdfs.semanticscholar.org/5550/a6df1b118a80c00a2459bae216a7e8e3966c.pdf,,,https://pdfs.semanticscholar.org/5550/a6df1b118a80c00a2459bae216a7e8e3966c.pdf
+55e87050b998eb0a8f0b16163ef5a28f984b01fa,https://arxiv.org/pdf/1710.10736.pdf,,,https://arxiv.org/pdf/1710.10736.pdf
+552122432b92129d7e7059ef40dc5f6045f422b5,,,http://doi.org/10.1007/s11263-017-1000-3,
+55aafdef9d9798611ade1a387d1e4689f2975e51,,,http://doi.org/10.1007/s11263-017-1044-4,
+55c4efc082a8410b528af7325de8148b80cf41e3,,,,http://dl.acm.org/citation.cfm?id=3231899
+55c40cbcf49a0225e72d911d762c27bb1c2d14aa,https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf,,,https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf
+55a7286f014cc6b51a3f50b1e6bc8acc8166f231,,,,http://arxiv.org/abs/1603.02814
+97b5800e144a8df48f1f7e91383b0f37bc37cf60,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237657,,
+972e044f69443dfc5c987e29250b2b88a6d2f986,,,http://doi.org/10.1134/S1054661811020738,
+9788b491ddc188941dadf441fc143a4075bff764,https://pdfs.semanticscholar.org/9788/b491ddc188941dadf441fc143a4075bff764.pdf,,,https://pdfs.semanticscholar.org/9788/b491ddc188941dadf441fc143a4075bff764.pdf
+971cb1bfe3d10fcb2037e684c48bd99842f42fa4,,,http://doi.org/10.1007/s11042-017-5141-8,
+97137d5154a9f22a5d9ecc32e8e2b95d07a5a571,https://arxiv.org/pdf/1604.04337.pdf,,,https://arxiv.org/pdf/1604.04337.pdf
+9730b9cd998c0a549601c554221a596deda8af5b,https://arxiv.org/pdf/1704.07945.pdf,,,https://arxiv.org/pdf/1704.07945.pdf
+972b1a7ef8cc9c83c2c6d8d126f94f27b567d7d0,,,http://doi.org/10.1007/978-3-319-99978-4,
+97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5,https://arxiv.org/pdf/1804.10938.pdf,,,https://arxiv.org/pdf/1804.10938.pdf
+97c1f68fb7162af326cd0f1bc546908218ec5da6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471977,,
+975978ee6a32383d6f4f026b944099e7739e5890,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf,,,https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf
+63fd7a159e58add133b9c71c4b1b37b899dd646f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6603332,,
+6318d3842b36362bb45527b717e1a45ae46151d5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780708,,
+632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c,https://arxiv.org/pdf/1604.02647.pdf,,,https://arxiv.org/pdf/1604.02647.pdf
+636b8ffc09b1b23ff714ac8350bb35635e49fa3c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308,,
+631483c15641c3652377f66c8380ff684f3e365c,https://arxiv.org/pdf/1611.10314.pdf,,,https://arxiv.org/pdf/1611.10314.pdf
+6359fcb0b4546979c54818df8271debc0d653257,,,http://doi.org/10.1007/s11704-017-6275-6,
+633c851ebf625ad7abdda2324e9de093cf623141,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961727,,
+632fa986bed53862d83918c2b71ab953fd70d6cc,https://arxiv.org/pdf/1805.10355.pdf,,,https://arxiv.org/pdf/1805.10355.pdf
+633101e794d7b80f55f466fd2941ea24595e10e6,https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf,,,https://pdfs.semanticscholar.org/6331/01e794d7b80f55f466fd2941ea24595e10e6.pdf
+6316a4b689706b0f01b40f9a3cef47b92bc52411,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699534,,
+0f7e9199dad3237159e985e430dd2bf619ef2db5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883882,,
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71,https://pdfs.semanticscholar.org/0f21/a39fa4c0a19c4a5b4733579e393cb1d04f71.pdf,,,https://pdfs.semanticscholar.org/0f21/a39fa4c0a19c4a5b4733579e393cb1d04f71.pdf
+0fd1bffb171699a968c700f206665b2f8837d953,https://arxiv.org/pdf/1503.00949.pdf,,,https://arxiv.org/pdf/1503.00949.pdf
+0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457,https://pdfs.semanticscholar.org/0a03/21785c8beac1cbaaec4d8ad0cfd4a0d6d457.pdf,,,https://pdfs.semanticscholar.org/0a03/21785c8beac1cbaaec4d8ad0cfd4a0d6d457.pdf
+0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f,https://pdfs.semanticscholar.org/0de4/0e8adc31a15af7496c92f261f9f703afed1d.pdf,,,https://pdfs.semanticscholar.org/0de4/0e8adc31a15af7496c92f261f9f703afed1d.pdf
+0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,https://arxiv.org/pdf/1708.07517.pdf,,,https://arxiv.org/pdf/1708.07517.pdf
+0a9d204db13d395f024067cf70ac19c2eeb5f942,https://arxiv.org/pdf/1804.02843.pdf,,,https://arxiv.org/pdf/1804.02843.pdf
+0a0007cfd40ae9694c84f109aea11ec4f2b6cf39,,,http://doi.org/10.1007/s11042-016-4105-8,
+0a4fc9016aacae9cdf40663a75045b71e64a70c9,https://pdfs.semanticscholar.org/0235/563971fcf8b517271f8e4f424305fffa10f2.pdf,,,https://pdfs.semanticscholar.org/0235/563971fcf8b517271f8e4f424305fffa10f2.pdf
+0a85afebaa19c80fddb660110a4352fd22eb2801,https://arxiv.org/pdf/1809.03658.pdf,,,https://arxiv.org/pdf/1809.03658.pdf
+0a7309147d777c2f20f780a696efe743520aa2db,https://arxiv.org/pdf/1805.05622.pdf,,,https://arxiv.org/pdf/1805.05622.pdf
+0aaf785d7f21d2b5ad582b456896495d30b0a4e2,,,,http://dl.acm.org/citation.cfm?id=3173789
+0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a,https://arxiv.org/pdf/1807.05292.pdf,,,https://arxiv.org/pdf/1807.05292.pdf
+641f0989b87bf7db67a64900dcc9568767b7b50f,https://pdfs.semanticscholar.org/e25a/6836e5f5dc6cf691cd9c42224c0f7f4bb42c.pdf,,,https://pdfs.semanticscholar.org/e25a/6836e5f5dc6cf691cd9c42224c0f7f4bb42c.pdf
+642a386c451e94d9c44134e03052219a7512b9de,,,http://doi.org/10.1016/j.imavis.2008.04.018,
+640e12837241d52d04379d3649d050ee3760048c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5692624,,
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b,https://arxiv.org/pdf/1803.11521.pdf,,,https://arxiv.org/pdf/1803.11521.pdf
+64ec02e1056de4b400f9547ce56e69ba8393e2ca,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8446491,,
+645f09f4bc2e6a13663564ee9032ca16e35fc52d,,,,http://dl.acm.org/citation.cfm?id=3193542
+645de797f936cb19c1b8dba3b862543645510544,https://arxiv.org/pdf/1611.06678.pdf,,,https://arxiv.org/pdf/1611.06678.pdf
+64d7e62f46813b5ad08289aed5dc4825d7ec5cff,https://pdfs.semanticscholar.org/f7e1/251d831b763d1ee10bfc6fae78990405f9f9.pdf,,,https://pdfs.semanticscholar.org/f7e1/251d831b763d1ee10bfc6fae78990405f9f9.pdf
+90ac0f32c0c29aa4545ed3d5070af17f195d015f,https://pdfs.semanticscholar.org/2322/1b7ff507d23da4e4b47b7228170b4fd224b8.pdf,,,https://pdfs.semanticscholar.org/2322/1b7ff507d23da4e4b47b7228170b4fd224b8.pdf
+9057044c0347fb9798a9b552910a9aff150385db,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6778411,,
+9077365c9486e54e251dd0b6f6edaeda30ae52b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373910,,
+90498b95fe8b299ce65d5cafaef942aa58bd68b7,https://arxiv.org/pdf/1804.08790.pdf,,,https://arxiv.org/pdf/1804.08790.pdf
+90cc2f08a6c2f0c41a9dd1786bae097f9292105e,https://arxiv.org/pdf/1808.09892.pdf,,,https://arxiv.org/pdf/1808.09892.pdf
+90e7a86a57079f17f1089c3a46ea9bfd1d49226c,,,,https://www.sciencedirect.com/science/article/pii/S0042698914002739
+90221884fe2643b80203991686af78a9da0f9791,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995467,,
+90d9209d5dd679b159051a8315423a7f796d704d,https://arxiv.org/pdf/1808.05085.pdf,,,https://arxiv.org/pdf/1808.05085.pdf
+bfdafe932f93b01632a5ba590627f0d41034705d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6134770,,
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,https://pdfs.semanticscholar.org/5db5/7be8bfed8f3a34aebc45dc69c4d4a7dee570.pdf,,,https://pdfs.semanticscholar.org/5db5/7be8bfed8f3a34aebc45dc69c4d4a7dee570.pdf
+bf3bf5400b617fef2825eb987eb496fea99804b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461385,,
+bf54b5586cdb0b32f6eed35798ff91592b03fbc4,https://pdfs.semanticscholar.org/bf54/b5586cdb0b32f6eed35798ff91592b03fbc4.pdf,,,https://pdfs.semanticscholar.org/bf54/b5586cdb0b32f6eed35798ff91592b03fbc4.pdf
+bf37a81d572bb154581845b65a766fab1e5c7dda,,,http://doi.org/10.1007/s11760-017-1111-x,
+bf5940d57f97ed20c50278a81e901ae4656f0f2c,https://arxiv.org/pdf/1711.00248.pdf,,,https://arxiv.org/pdf/1711.00248.pdf
+bff567c58db554858c7f39870cff7c306523dfee,https://arxiv.org/pdf/1807.03480.pdf,,,https://arxiv.org/pdf/1807.03480.pdf
+d35534f3f59631951011539da2fe83f2844ca245,https://arxiv.org/pdf/1705.07904.pdf,,,https://arxiv.org/pdf/1705.07904.pdf
+d3edbfe18610ce63f83db83f7fbc7634dde1eb40,https://pdfs.semanticscholar.org/d3ed/bfe18610ce63f83db83f7fbc7634dde1eb40.pdf,,,https://pdfs.semanticscholar.org/d3ed/bfe18610ce63f83db83f7fbc7634dde1eb40.pdf
+d34f546e61eccbac2450ca7490f558e751e13ec3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461800,,
+d3008b4122e50a28f6cc1fa98ac6af28b42271ea,,,,http://dl.acm.org/citation.cfm?id=2806218
+d3dea0cd65ab3da14cb7b3bd0ec59531d98508aa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7728015,,
+d3d5d86afec84c0713ec868cf5ed41661fc96edc,https://arxiv.org/pdf/1606.02894.pdf,,,https://arxiv.org/pdf/1606.02894.pdf
+d31328b12eef33e7722b8e5505d0f9d9abe2ffd9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373866,,
+d36a1e4637618304c2093f72702dcdcc4dcd41d1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961791,,
+d3b18ba0d9b247bfa2fb95543d172ef888dfff95,https://pdfs.semanticscholar.org/0a92/0b6ed81de2e7665784eba433cb1cf15e73ad.pdf,,,https://pdfs.semanticscholar.org/0a92/0b6ed81de2e7665784eba433cb1cf15e73ad.pdf
+d383ba7bbf8b7b49dcef9f8abab47521966546bb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995471,,
+d3d39e419ac98db2de1a9d5a05cb0b4ca5cae8fd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619296,,
+d340a135a55ecf7506010e153d5f23155dcfa7e8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7884781,,
+d309e414f0d6e56e7ba45736d28ee58ae2bad478,https://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf,,,https://pdfs.semanticscholar.org/d309/e414f0d6e56e7ba45736d28ee58ae2bad478.pdf
+d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9,https://arxiv.org/pdf/1804.04326.pdf,,,https://arxiv.org/pdf/1804.04326.pdf
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,https://arxiv.org/pdf/1804.10021.pdf,,,https://arxiv.org/pdf/1804.10021.pdf
+d31af74425719a3840b496b7932e0887b35e9e0d,https://pdfs.semanticscholar.org/d31a/f74425719a3840b496b7932e0887b35e9e0d.pdf,,,https://pdfs.semanticscholar.org/d31a/f74425719a3840b496b7932e0887b35e9e0d.pdf
+d3b0839324d0091e70ce34f44c979b9366547327,https://arxiv.org/pdf/1804.10743.pdf,,,https://arxiv.org/pdf/1804.10743.pdf
+d3faed04712b4634b47e1de0340070653546deb2,https://arxiv.org/pdf/1805.04140.pdf,,,https://arxiv.org/pdf/1805.04140.pdf
+d33fcdaf2c0bd0100ec94b2c437dccdacec66476,https://pdfs.semanticscholar.org/d33f/cdaf2c0bd0100ec94b2c437dccdacec66476.pdf,,,https://pdfs.semanticscholar.org/d33f/cdaf2c0bd0100ec94b2c437dccdacec66476.pdf
+d4a5eaf2e9f2fd3e264940039e2cbbf08880a090,https://arxiv.org/pdf/1802.02137.pdf,,,https://arxiv.org/pdf/1802.02137.pdf
+d46b790d22cb59df87f9486da28386b0f99339d3,https://pdfs.semanticscholar.org/d46b/790d22cb59df87f9486da28386b0f99339d3.pdf,,,https://pdfs.semanticscholar.org/d46b/790d22cb59df87f9486da28386b0f99339d3.pdf
+d4f0960c6587379ad7df7928c256776e25952c60,,,,https://www.ncbi.nlm.nih.gov/pubmed/29107889
+d444e010049944c1b3438c9a25ae09b292b17371,https://pdfs.semanticscholar.org/d444/e010049944c1b3438c9a25ae09b292b17371.pdf,,,https://pdfs.semanticscholar.org/d444/e010049944c1b3438c9a25ae09b292b17371.pdf
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf,,,https://pdfs.semanticscholar.org/d492/dbfaa42b4f8b8a74786d7343b3be6a3e9a1d.pdf
+d444368421f456baf8c3cb089244e017f8d32c41,https://arxiv.org/pdf/1712.06352.pdf,,,https://arxiv.org/pdf/1712.06352.pdf
+d4453ec649dbde752e74da8ab0984c6f15cc6e06,,,http://doi.org/10.1007/s11042-016-3361-y,
+d4885ca24189b4414031ca048a8b7eb2c9ac646c,https://arxiv.org/pdf/1807.07718.pdf,,,https://arxiv.org/pdf/1807.07718.pdf
+d4288daef6519f6852f59ac6b85e21b8910f2207,,,,https://www.ncbi.nlm.nih.gov/pubmed/29994505
+d458c49a5e34263c95b3393386b5d76ba770e497,https://pdfs.semanticscholar.org/d458/c49a5e34263c95b3393386b5d76ba770e497.pdf,,,https://pdfs.semanticscholar.org/d458/c49a5e34263c95b3393386b5d76ba770e497.pdf
+d4b4020e289c095ce2c2941685c6cd37667f5cc9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7489442,,
+d454ad60b061c1a1450810a0f335fafbfeceeccc,https://arxiv.org/pdf/1712.07195.pdf,,,https://arxiv.org/pdf/1712.07195.pdf
+d4df31006798ee091b86e091a7bf5dce6e51ba3e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1612996,,
+d44e6baf3464bf56d3a29daf280b1b525ac30f7d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265336,,
+d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e,https://pdfs.semanticscholar.org/d4e6/69d5d35fa0ca9f8d9a193c82d4153f5ffc4e.pdf,,,https://pdfs.semanticscholar.org/d4e6/69d5d35fa0ca9f8d9a193c82d4153f5ffc4e.pdf
+d44a93027208816b9e871101693b05adab576d89,https://arxiv.org/pdf/1709.10433.pdf,,,https://arxiv.org/pdf/1709.10433.pdf
+ba01dbfa29dc86d1279b2e9b9eeca1c52509bbda,,,http://doi.org/10.1007/s00530-017-0566-5,
+bad2df94fa771869fa35bd11a1a7ab2e3f6d1da3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344635,,
+ba1c0600d3bdb8ed9d439e8aa736a96214156284,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8081394,,
+badcd992266c6813063c153c41b87babc0ba36a3,https://arxiv.org/pdf/1809.03193.pdf,,,https://arxiv.org/pdf/1809.03193.pdf
+ba788365d70fa6c907b71a01d846532ba3110e31,https://arxiv.org/pdf/1805.08657.pdf,,,https://arxiv.org/pdf/1805.08657.pdf
+badb95dbdfb3f044a46d7ba0ee69dba929c511b1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7363515,,
+baafe3253702955c6904f0b233e661b47aa067e1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7776926,,
+ba17782ca5fc0d932317389c2adf94b5dbd3ebfe,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5509290,,
+ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb,https://arxiv.org/pdf/1711.09001.pdf,,,https://arxiv.org/pdf/1711.09001.pdf
+badd371a49d2c4126df95120902a34f4bee01b00,https://arxiv.org/pdf/1809.04096.pdf,,,https://arxiv.org/pdf/1809.04096.pdf
+a082c77e9a6c2e2313d8255e8e4c0677d325ce3e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163111,,
+a022eff5470c3446aca683eae9c18319fd2406d5,https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf,,,https://pdfs.semanticscholar.org/a022/eff5470c3446aca683eae9c18319fd2406d5.pdf
+a0c37f07710184597befaa7e6cf2f0893ff440e9,https://arxiv.org/pdf/1805.06374.pdf,,,https://arxiv.org/pdf/1805.06374.pdf
+a0fd85b3400c7b3e11122f44dc5870ae2de9009a,https://arxiv.org/pdf/1408.3967.pdf,,,https://arxiv.org/pdf/1408.3967.pdf
+a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b,https://pdfs.semanticscholar.org/a0aa/32bb7f406693217fba6dcd4aeb6c4d5a479b.pdf,,,https://pdfs.semanticscholar.org/a0aa/32bb7f406693217fba6dcd4aeb6c4d5a479b.pdf
+a0b1990dd2b4cd87e4fd60912cc1552c34792770,https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf,,,https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf
+a00fdf49e5e0a73eb24345cb25a0bd1383a10021,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7892186,,
+a03448488950ee5bf50e9e1d744129fbba066c50,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8367180,,
+a77e9f0bd205a7733431a6d1028f09f57f9f73b0,https://arxiv.org/pdf/1806.07753.pdf,,,https://arxiv.org/pdf/1806.07753.pdf
+a7ec294373ccc0598cbb0bbb6340c4e56fe5d979,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699580,,
+a7664247a37a89c74d0e1a1606a99119cffc41d4,https://pdfs.semanticscholar.org/a766/4247a37a89c74d0e1a1606a99119cffc41d4.pdf,,,https://pdfs.semanticscholar.org/a766/4247a37a89c74d0e1a1606a99119cffc41d4.pdf
+a78025f39cf78f2fc66c4b2942fbe5bad3ea65fc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404357,,
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07,https://pdfs.semanticscholar.org/a758/b744a6d6962f1ddce6f0d04292a0b5cf8e07.pdf,,,https://pdfs.semanticscholar.org/a758/b744a6d6962f1ddce6f0d04292a0b5cf8e07.pdf
+a78b5495a4223b9784cc53670cc10b6f0beefd32,,,http://doi.org/10.1007/s11042-018-6260-6,
+a775da3e6e6ea64bffab7f9baf665528644c7ed3,https://pdfs.semanticscholar.org/0e01/3be45033d43cc658b464cdb55cbf46a994b8.pdf,,,https://pdfs.semanticscholar.org/0e01/3be45033d43cc658b464cdb55cbf46a994b8.pdf
+b8fc620a1563511744f1a9386bdfa09a2ea0f71b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411214,,
+b8375ff50b8a6f1a10dd809129a18df96888ac8b,https://pdfs.semanticscholar.org/e94d/8395ab477091c433b020f8fb535eae5c1df5.pdf,,,https://pdfs.semanticscholar.org/e94d/8395ab477091c433b020f8fb535eae5c1df5.pdf
+b8048a7661bdb73d3613fde9d710bd45a20d13e7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8468792,,
+b85c198ce09ffc4037582a544c7ffb6ebaeff198,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100113,,
+b82f89d6ef94d26bf4fec4d49437346b727c3bd4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6894202,,
+b8f3f6d8f188f65ca8ea2725b248397c7d1e662d,https://arxiv.org/pdf/1611.04357.pdf,,,https://arxiv.org/pdf/1611.04357.pdf
+b8ebda42e272d3617375118542d4675a0c0e501d,https://arxiv.org/pdf/1706.07522.pdf,,,https://arxiv.org/pdf/1706.07522.pdf
+b8d8501595f38974e001a66752dc7098db13dfec,,,,http://arxiv.org/abs/1711.09265
+b806a31c093b31e98cc5fca7e3ec53f2cc169db9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7995928,,
+b14e3fe0d320c0d7c09154840250d70bc88bb6c0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699097,,
+b1d89015f9b16515735d4140c84b0bacbbef19ac,https://arxiv.org/pdf/1709.00235.pdf,,,https://arxiv.org/pdf/1709.00235.pdf
+b14b672e09b5b2d984295dfafb05604492bfaec5,https://pdfs.semanticscholar.org/b14b/672e09b5b2d984295dfafb05604492bfaec5.pdf,,,https://pdfs.semanticscholar.org/b14b/672e09b5b2d984295dfafb05604492bfaec5.pdf
+b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000,https://arxiv.org/pdf/1703.03054.pdf,,,https://arxiv.org/pdf/1703.03054.pdf
+b161d261fabb507803a9e5834571d56a3b87d147,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913,,
+b166ce267ddb705e6ed855c6b679ec699d62e9cb,https://pdfs.semanticscholar.org/b166/ce267ddb705e6ed855c6b679ec699d62e9cb.pdf,,,https://pdfs.semanticscholar.org/b166/ce267ddb705e6ed855c6b679ec699d62e9cb.pdf
+b13e2e43672e66ba45d1b852a34737e4ce04226b,https://pdfs.semanticscholar.org/3552/4e63c11f13fe08b2996a7bc0a9105e7c407b.pdf,,,https://pdfs.semanticscholar.org/3552/4e63c11f13fe08b2996a7bc0a9105e7c407b.pdf
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c,https://pdfs.semanticscholar.org/dcc4/b241debf72f3898a69f32185b21766200771.pdf,,,https://pdfs.semanticscholar.org/dcc4/b241debf72f3898a69f32185b21766200771.pdf
+b15a06d701f0a7f508e3355a09d0016de3d92a6d,https://pdfs.semanticscholar.org/b15a/06d701f0a7f508e3355a09d0016de3d92a6d.pdf,,,https://pdfs.semanticscholar.org/b15a/06d701f0a7f508e3355a09d0016de3d92a6d.pdf
+b1f4423c227fa37b9680787be38857069247a307,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254,,
+b104c8ef6735eba1d29f50c99bbbf99d33fc8dc2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7415357,,
+b11b71b704629357fe13ed97b216b9554b0e7463,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7736040,,
+b1451721864e836069fa299a64595d1655793757,https://arxiv.org/pdf/1706.03863.pdf,,,https://arxiv.org/pdf/1706.03863.pdf
+b1fdd4ae17d82612cefd4e78b690847b071379d3,https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf,,,https://pdfs.semanticscholar.org/4fc5/416b6c7173d3462e5be796bda3ad8d5645a1.pdf
+dde5125baefa1141f1ed50479a3fd67c528a965f,https://arxiv.org/pdf/1701.04851.pdf,,,https://arxiv.org/pdf/1701.04851.pdf
+dd8084b2878ca95d8f14bae73e1072922f0cc5da,https://arxiv.org/pdf/1709.02929.pdf,,,https://arxiv.org/pdf/1709.02929.pdf
+dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335,https://arxiv.org/pdf/1803.09490.pdf,,,https://arxiv.org/pdf/1803.09490.pdf
+dd0086da7c4efe61abb70dd012538f5deb9a8d16,,,http://doi.org/10.1007/s11704-016-5024-6,
+ddbd24a73ba3d74028596f393bb07a6b87a469c0,https://pdfs.semanticscholar.org/ddbd/24a73ba3d74028596f393bb07a6b87a469c0.pdf,,,https://pdfs.semanticscholar.org/ddbd/24a73ba3d74028596f393bb07a6b87a469c0.pdf
+dd6826e9520a6e72bcd24d1bdb930e78c1083b31,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7106467,,
+ddbb6e0913ac127004be73e2d4097513a8f02d37,https://pdfs.semanticscholar.org/d3ea/05926b22a9c45687d435611db14f608e410d.pdf,,,https://pdfs.semanticscholar.org/d3ea/05926b22a9c45687d435611db14f608e410d.pdf
+ddfae3a96bd341109d75cedeaebb5ed2362b903f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6837429,,
+dc1510110c23f7b509035a1eda22879ef2506e61,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909642,,
+dc107e7322f7059430b4ef4991507cb18bcc5d95,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995338,,
+dcf6ecd51ba135d432fcb7697fc6c52e4e7b0a43,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100120,,
+dc964b9c7242a985eb255b2410a9c45981c2f4d0,,,http://doi.org/10.1007/s10851-018-0837-6,
+dc550f361ae82ec6e1a0cf67edf6a0138163382e,https://pdfs.semanticscholar.org/dc55/0f361ae82ec6e1a0cf67edf6a0138163382e.pdf,,,https://pdfs.semanticscholar.org/dc55/0f361ae82ec6e1a0cf67edf6a0138163382e.pdf
+dc5d04d34b278b944097b8925a9147773bbb80cc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354149,,
+dcf71245addaf66a868221041aabe23c0a074312,https://arxiv.org/pdf/1708.05237.pdf,,,https://arxiv.org/pdf/1708.05237.pdf
+dc5d9399b3796db7fd850990402dce221b98c8be,,,,http://dl.acm.org/citation.cfm?id=3220016
+dc3dc18b6831c867a8d65da130a9ff147a736745,,,,http://dl.acm.org/citation.cfm?id=2750679
+dc34ab49d378ddcf6c8e2dbf5472784c5bfa8006,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462222,,
+dce5e0a1f2cdc3d4e0e7ca0507592860599b0454,https://arxiv.org/pdf/1803.05576.pdf,,,https://arxiv.org/pdf/1803.05576.pdf
+dcb6f06631021811091ce691592b12a237c12907,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8438999,,
+dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd,https://arxiv.org/pdf/1705.01936.pdf,,,https://arxiv.org/pdf/1705.01936.pdf
+dc6ad30c7a4bc79bb06b4725b16e202d3d7d8935,,,http://doi.org/10.1007/s11042-017-4646-5,
+dc13229afbbc8b7a31ed5adfe265d971850c0976,,,,
+dc974c31201b6da32f48ef81ae5a9042512705fe,https://arxiv.org/pdf/1705.01781.pdf,,,https://arxiv.org/pdf/1705.01781.pdf
+b6bb883dd14f2737d0d6225cf4acbf050d307634,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382306,,
+b6f15bf8723b2d5390122442ab04630d2d3878d8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163142,,
+b6620027b441131a18f383d544779521b119c1aa,,,http://doi.org/10.1016/j.patcog.2013.04.013,
+b6ef158d95042f39765df04373c01546524c9ccd,https://pdfs.semanticscholar.org/b6ef/158d95042f39765df04373c01546524c9ccd.pdf,,,https://pdfs.semanticscholar.org/b6ef/158d95042f39765df04373c01546524c9ccd.pdf
+b68150bfdec373ed8e025f448b7a3485c16e3201,https://arxiv.org/pdf/1703.09471.pdf,,,https://arxiv.org/pdf/1703.09471.pdf
+b6f682648418422e992e3ef78a6965773550d36b,https://pdfs.semanticscholar.org/b6f6/82648418422e992e3ef78a6965773550d36b.pdf,,,https://pdfs.semanticscholar.org/b6f6/82648418422e992e3ef78a6965773550d36b.pdf
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3,https://arxiv.org/pdf/1711.10305.pdf,,,https://arxiv.org/pdf/1711.10305.pdf
+b6d0e461535116a675a0354e7da65b2c1d2958d4,https://arxiv.org/pdf/1805.03430.pdf,,,https://arxiv.org/pdf/1805.03430.pdf
+b69bcb5f73999ea12ff4ac1ac853b72cd5096b2d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613024,,
+a9fc8efd1aa3d58f89c0f53f0cb112725b5bda10,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8316891,,
+a9ae55c83a8047c6cdf7c958fd3d4a6bfb0a13df,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014745,,
+a9fdbe102f266cc20e600fa6b060a7bc8d1134e9,,,,https://www.ncbi.nlm.nih.gov/pubmed/29334821
+a92147bed9c17c311c6081beb0ef4c3165b6268e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6805594,,
+a98ff1c2e3c22e3d0a41a2718e4587537b92da0a,,,http://doi.org/10.1007/978-3-319-68548-9_19,
+a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f,https://arxiv.org/pdf/1708.05038.pdf,,,https://arxiv.org/pdf/1708.05038.pdf
+a939e287feb3166983e36b8573cd161d12097ad8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7550048,,
+a98316980b126f90514f33214dde51813693fe0d,https://arxiv.org/pdf/1805.01887.pdf,,,https://arxiv.org/pdf/1805.01887.pdf
+a961f1234e963a7945fed70197015678149b37d8,,,,http://dl.acm.org/citation.cfm?id=3206068
+a95dc0c4a9d882a903ce8c70e80399f38d2dcc89,https://pdfs.semanticscholar.org/a95d/c0c4a9d882a903ce8c70e80399f38d2dcc89.pdf,,,https://pdfs.semanticscholar.org/a95d/c0c4a9d882a903ce8c70e80399f38d2dcc89.pdf
+a92b5234b8b73e06709dd48ec5f0ec357c1aabed,https://arxiv.org/pdf/1802.04962.pdf,,,https://arxiv.org/pdf/1802.04962.pdf
+a96c45ed3a44ad79a72499be238264ae38857988,,,http://doi.org/10.1007/s00138-016-0786-2,
+a92c207031b0778572bf41803dba1a21076e128b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8433557,,
+a9215666b4bcdf8d510de8952cf0d55b635727dc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7498613,,
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab,https://arxiv.org/pdf/1711.10398.pdf,,,https://arxiv.org/pdf/1711.10398.pdf
+d5444f9475253bbcfef85c351ea9dab56793b9ea,https://arxiv.org/pdf/1703.00686.pdf,,,https://arxiv.org/pdf/1703.00686.pdf
+d57c8d46a869c63fb20e33bc21bc2a3c4628f5b4,,,http://doi.org/10.1007/s11042-018-5806-y,
+d57982dc55dbed3d0f89589e319dc2d2bd598532,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099760,,
+d5de42d37ee84c86b8f9a054f90ddb4566990ec0,https://arxiv.org/pdf/1612.06371.pdf,,,https://arxiv.org/pdf/1612.06371.pdf
+d5d5cc27ca519d1300e77e3c1a535a089f52f646,,,http://doi.org/10.1007/s11042-016-3768-5,
+d289ce63055c10937e5715e940a4bb9d0af7a8c5,,,,http://dl.acm.org/citation.cfm?id=3081360
+d264dedfdca8dc4c71c50311bcdd6ba3980eb331,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392234,,
+d2eb1079552fb736e3ba5e494543e67620832c52,https://arxiv.org/pdf/1807.04050.pdf,,,https://arxiv.org/pdf/1807.04050.pdf
+d278e020be85a1ccd90aa366b70c43884dd3f798,https://arxiv.org/pdf/1805.11191.pdf,,,https://arxiv.org/pdf/1805.11191.pdf
+d2f2b10a8f29165d815e652f8d44955a12d057e6,,,http://doi.org/10.1007/s10044-015-0475-1,
+d20ea5a4fa771bc4121b5654a7483ced98b39148,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430554,,
+d26b443f87df76034ff0fa9c5de9779152753f0c,https://arxiv.org/pdf/1807.03425.pdf,,,https://arxiv.org/pdf/1807.03425.pdf
+aad4c94fd55d33a3f3a5377bbe441c9474cdbd1e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7777820,,
+aae742779e8b754da7973949992d258d6ca26216,https://arxiv.org/pdf/1505.04030.pdf,,,https://arxiv.org/pdf/1505.04030.pdf
+aa581b481d400982a7e2a88830a33ec42ad0414f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7313922,,
+aa5a7a9900548a1f1381389fc8695ced0c34261a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900274,,
+aab3561acbd19f7397cbae39dd34b3be33220309,https://arxiv.org/pdf/1805.02152.pdf,,,https://arxiv.org/pdf/1805.02152.pdf
+aafeb3d76155ec28e8ab6b4d063105d5e04e471d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014781,,
+aafb8dc8fda3b13a64ec3f1ca7911df01707c453,https://arxiv.org/pdf/1711.06778.pdf,,,https://arxiv.org/pdf/1711.06778.pdf
+aadfcaf601630bdc2af11c00eb34220da59b7559,https://arxiv.org/pdf/1804.07237.pdf,,,https://arxiv.org/pdf/1804.07237.pdf
+aa6e8a2a9d3ed59d2ae72add84176e7b7f4b2912,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8203756,,
+aa1129780cc496918085cd0603a774345c353c54,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7779010,,
+aa1607090fbc80ab1e9c0f25ffe8b75b777e5fd8,,,,https://www.sciencedirect.com/science/article/pii/S0006322316331110
+aa3c9de34ef140ec812be85bb8844922c35eba47,https://arxiv.org/pdf/1707.09457.pdf,,,https://arxiv.org/pdf/1707.09457.pdf
+af29ad70ab148c83e1faa8b3098396bc1cd87790,,,http://doi.org/10.1007/s40012-016-0149-1,
+aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a,https://arxiv.org/pdf/1805.09203.pdf,,,https://arxiv.org/pdf/1805.09203.pdf
+af6cae71f24ea8f457e581bfe1240d5fa63faaf7,https://arxiv.org/pdf/1805.09791.pdf,,,https://arxiv.org/pdf/1805.09791.pdf
+afdf9a3464c3b015f040982750f6b41c048706f5,https://arxiv.org/pdf/1608.05477.pdf,,,https://arxiv.org/pdf/1608.05477.pdf
+afdc303b3325fbc1baa9f18a66bcad59d5aa675b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595920,,
+afa57e50570a6599508ee2d50a7b8ca6be04834a,https://pdfs.semanticscholar.org/bc26/4e51ea341744eba137e9dd0e6adf8cbc01d0.pdf,,,https://pdfs.semanticscholar.org/bc26/4e51ea341744eba137e9dd0e6adf8cbc01d0.pdf
+afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3,https://arxiv.org/pdf/1708.09268.pdf,,,https://arxiv.org/pdf/1708.09268.pdf
+af4745a3c3c7b51dab0fd90d68b53e60225aa4a9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7873272,,
+af3b803188344971aa89fee861a6a598f30c6f10,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404811,,
+af9419f2155785961a5c16315c70b8228435d5f8,,,http://doi.org/10.1016/j.patrec.2015.12.013,
+af654a7ec15168b16382bd604889ea07a967dac6,https://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf,,,https://pdfs.semanticscholar.org/af65/4a7ec15168b16382bd604889ea07a967dac6.pdf
+b73795963dc623a634d218d29e4a5b74dfbc79f1,https://arxiv.org/pdf/1807.08772.pdf,,,https://arxiv.org/pdf/1807.08772.pdf
+b712f08f819b925ff7587b6c09a8855bc295d795,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8450858,,
+b7894c1f805ffd90ab4ab06002c70de68d6982ab,https://pdfs.semanticscholar.org/5e87/06fab62a5716c30a245e5963f51793e1d0ed.pdf,,,https://pdfs.semanticscholar.org/5e87/06fab62a5716c30a245e5963f51793e1d0ed.pdf
+b759936982d6fb25c55c98955f6955582bdaeb27,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7472169,,
+b7ec41005ce4384e76e3be854ecccd564d2f89fb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8441009,,
+b7774c096dc18bb0be2acef07ff5887a22c2a848,https://pdfs.semanticscholar.org/d589/29d6cc1dfa513b145e47598c446b16487861.pdf,,,https://pdfs.semanticscholar.org/d589/29d6cc1dfa513b145e47598c446b16487861.pdf
+b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89,https://arxiv.org/pdf/1804.10073.pdf,,,https://arxiv.org/pdf/1804.10073.pdf
+b72eebffe697008048781ab7b768e0c96e52236a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100092,,
+b7461aac36fc0b8a24ecadf6c5b5caf54f2aa2f7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7528404,,
+b76af8fcf9a3ebc421b075b689defb6dc4282670,https://arxiv.org/pdf/1807.09207.pdf,,,https://arxiv.org/pdf/1807.09207.pdf
+b7c6df1ae0e8348feecd65e9ad574d1e04d212a5,,,http://doi.org/10.1007/s11704-018-8015-y,
+db848c3c32464d12da33b2f4c3a29fe293fc35d1,https://arxiv.org/pdf/1807.11152.pdf,,,https://arxiv.org/pdf/1807.11152.pdf
+dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a,https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf,,,https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf
+db0379c9b02e514f10f778cccff0d6a6acf40519,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6130343,,
+dba7d8c4d2fca41269a2c96b1ea594e2d0b9bdda,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7422069,,
+dbe255d3d2a5d960daaaba71cb0da292e0af36a7,https://arxiv.org/pdf/1505.04373.pdf,,,https://arxiv.org/pdf/1505.04373.pdf
+db1a9b8d8ce9a5696a96f8db4206b6f72707730e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7961838,,
+dbb9601a1d2febcce4c07dd2b819243d81abb2c2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8361884,,
+db5a00984fa54b9d2a1caad0067a9ff0d0489517,https://pdfs.semanticscholar.org/dd47/1f321ead8b405da6194057b2778ef3db7ea7.pdf,,,https://pdfs.semanticscholar.org/dd47/1f321ead8b405da6194057b2778ef3db7ea7.pdf
+dbd958ffedc3eae8032be67599ec281310c05630,https://pdfs.semanticscholar.org/d051/86de8343813a738c1fa5da9bf5165ee63bb7.pdf,,,https://pdfs.semanticscholar.org/d051/86de8343813a738c1fa5da9bf5165ee63bb7.pdf
+dbed26cc6d818b3679e46677abc9fa8e04e8c6a6,https://pdfs.semanticscholar.org/dbed/26cc6d818b3679e46677abc9fa8e04e8c6a6.pdf,,,https://pdfs.semanticscholar.org/dbed/26cc6d818b3679e46677abc9fa8e04e8c6a6.pdf
+db3545a983ffd24c97c18bf7f068783102548ad7,https://pdfs.semanticscholar.org/080e/660b47647e81dadaec27365b3d5b88f3ae68.pdf,,,https://pdfs.semanticscholar.org/080e/660b47647e81dadaec27365b3d5b88f3ae68.pdf
+dbc8ffd6457147ff06cd3f56834e3ec6dccb2057,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265396,,
+db67edbaeb78e1dd734784cfaaa720ba86ceb6d2,https://arxiv.org/pdf/1509.04853.pdf,,,https://arxiv.org/pdf/1509.04853.pdf
+dbced84d839165d9b494982449aa2eb9109b8467,,,,http://arxiv.org/abs/1712.05083
+a8bb698d1bb21b81497ef68f0f52fa6eaf14a6bf,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6587752,,
+a85f691c9f82a248aa2c86d4a63b9036d6cf47ab,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423530,,
+a85e9e11db5665c89b057a124547377d3e1c27ef,https://arxiv.org/pdf/1802.00066.pdf,,,https://arxiv.org/pdf/1802.00066.pdf
+a87ab836771164adb95d6744027e62e05f47fd96,https://arxiv.org/pdf/1808.00022.pdf,,,https://arxiv.org/pdf/1808.00022.pdf
+a896ddeb0d253739c9aaef7fc1f170a2ba8407d3,https://arxiv.org/pdf/1708.03979.pdf,,,https://arxiv.org/pdf/1708.03979.pdf
+a88ced67f4ed7940c76b666e1c9c0f08b59f9cf8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771415,,
+a803453edd2b4a85b29da74dcc551b3c53ff17f9,https://pdfs.semanticscholar.org/a803/453edd2b4a85b29da74dcc551b3c53ff17f9.pdf,,,https://pdfs.semanticscholar.org/a803/453edd2b4a85b29da74dcc551b3c53ff17f9.pdf
+a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8,https://pdfs.semanticscholar.org/3bf2/79c782cee5b43a766d248810d602b24033c9.pdf,,,https://pdfs.semanticscholar.org/3bf2/79c782cee5b43a766d248810d602b24033c9.pdf
+a8e7561ada380f2f50211c67fc45c3b3dea96bdb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401921,,
+a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265434,,
+a8d52265649c16f95af71d6f548c15afc85ac905,https://arxiv.org/pdf/1708.04320.pdf,,,https://arxiv.org/pdf/1708.04320.pdf
+a8a61badec9b8bc01f002a06e1426a623456d121,https://pdfs.semanticscholar.org/a8a6/1badec9b8bc01f002a06e1426a623456d121.pdf,,,https://pdfs.semanticscholar.org/a8a6/1badec9b8bc01f002a06e1426a623456d121.pdf
+a8154d043f187c6640cb6aedeaa8385a323e46cf,https://arxiv.org/pdf/1805.03134.pdf,,,https://arxiv.org/pdf/1805.03134.pdf
+a812368fe1d4a186322bf72a6d07e1cf60067234,https://pdfs.semanticscholar.org/a812/368fe1d4a186322bf72a6d07e1cf60067234.pdf,,,https://pdfs.semanticscholar.org/a812/368fe1d4a186322bf72a6d07e1cf60067234.pdf
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,https://arxiv.org/pdf/1707.03986.pdf,,,https://arxiv.org/pdf/1707.03986.pdf
+ded968b97bd59465d5ccda4f1e441f24bac7ede5,https://pdfs.semanticscholar.org/ded9/68b97bd59465d5ccda4f1e441f24bac7ede5.pdf,,,https://pdfs.semanticscholar.org/ded9/68b97bd59465d5ccda4f1e441f24bac7ede5.pdf
+de0eb358b890d92e8f67592c6e23f0e3b2ba3f66,https://arxiv.org/pdf/1711.01587.pdf,,,https://arxiv.org/pdf/1711.01587.pdf
+de162d4b8450bf2b80f672478f987f304b7e6ae4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237454,,
+def934edb7c7355757802a95218c6e4ed6122a72,,,http://doi.org/10.1007/978-0-387-31439-6,
+def569db592ed1715ae509644444c3feda06a536,https://arxiv.org/pdf/1804.04604.pdf,,,https://arxiv.org/pdf/1804.04604.pdf
+dec76940896a41a8a7b6e9684df326b23737cd5d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6607638,,
+dee406a7aaa0f4c9d64b7550e633d81bc66ff451,https://arxiv.org/pdf/1710.01453.pdf,,,https://arxiv.org/pdf/1710.01453.pdf
+de92951ea021ec56492d76381a8ae560a972dd68,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738246,,
+de3285da34df0262a4548574c2383c51387a24bf,https://arxiv.org/pdf/1706.06982.pdf,,,https://arxiv.org/pdf/1706.06982.pdf
+dee6609615b73b10540f32537a242baa3c9fca4d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8015006,,
+de0df8b2b4755da9f70cf1613d7b12040d0ce8ef,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791166,,
+dec0c26855da90876c405e9fd42830c3051c2f5f,https://pdfs.semanticscholar.org/dec0/c26855da90876c405e9fd42830c3051c2f5f.pdf,,,https://pdfs.semanticscholar.org/dec0/c26855da90876c405e9fd42830c3051c2f5f.pdf
+de45bf9e5593a5549a60ca01f2988266d04d77da,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404529,,
+b0c512fcfb7bd6c500429cbda963e28850f2e948,https://arxiv.org/pdf/1408.1656.pdf,,,https://arxiv.org/pdf/1408.1656.pdf
+b08203fca1af7b95fda8aa3d29dcacd182375385,https://arxiv.org/pdf/1805.01818.pdf,,,https://arxiv.org/pdf/1805.01818.pdf
+b0b944b3a783c2d9f12637b471fe1efb44deb52b,,,,http://dl.acm.org/citation.cfm?id=2591684
+b09b693708f412823053508578df289b8403100a,https://pdfs.semanticscholar.org/b09b/693708f412823053508578df289b8403100a.pdf,,,https://pdfs.semanticscholar.org/b09b/693708f412823053508578df289b8403100a.pdf
+b084683e5bab9b2bc327788e7b9a8e049d5fff8f,https://arxiv.org/pdf/1712.08263.pdf,,,https://arxiv.org/pdf/1712.08263.pdf
+b0c1615ebcad516b5a26d45be58068673e2ff217,https://arxiv.org/pdf/1608.05246.pdf,,,https://arxiv.org/pdf/1608.05246.pdf
+b034cc919af30e96ee7bed769b93ea5828ae361b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099915,,
+a6b5ca99432c23392cec682aebb8295c0283728b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302395,,
+a6e8a8bb99e30a9e80dbf80c46495cf798066105,https://pdfs.semanticscholar.org/a6e8/a8bb99e30a9e80dbf80c46495cf798066105.pdf,,,https://pdfs.semanticscholar.org/a6e8/a8bb99e30a9e80dbf80c46495cf798066105.pdf
+a6eb6ad9142130406fb4ffd4d60e8348c2442c29,https://arxiv.org/pdf/1806.00186.pdf,,,https://arxiv.org/pdf/1806.00186.pdf
+a6e4f924cf9a12625e85c974f0ed136b43c2f3b5,,,http://doi.org/10.1007/s11042-017-4572-6,
+a6590c49e44aa4975b2b0152ee21ac8af3097d80,https://arxiv.org/pdf/1804.00782.pdf,,,https://arxiv.org/pdf/1804.00782.pdf
+a6e25cab2251a8ded43c44b28a87f4c62e3a548a,https://arxiv.org/pdf/1801.07388.pdf,,,https://arxiv.org/pdf/1801.07388.pdf
+a6270914cf5f60627a1332bcc3f5951c9eea3be0,https://arxiv.org/pdf/1802.02522.pdf,,,https://arxiv.org/pdf/1802.02522.pdf
+a6ce2f0795839d9c2543d64a08e043695887e0eb,https://arxiv.org/pdf/1507.04760.pdf,,,https://arxiv.org/pdf/1507.04760.pdf
+a60db9ca8bc144a37fe233b08232d9c91641cbb5,,,http://doi.org/10.1007/s11280-018-0615-9,
+a6902db7972a7631d186bbf59c5ef116c205b1e8,,,,http://dl.acm.org/citation.cfm?id=1276381
+a6ce1a1de164f41cb8999c728bceedf65d66bb23,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7170694,,
+a6d47f7aa361ab9b37c7f3f868280318f355fadc,,,,https://ora.ox.ac.uk/objects/uuid:7704244a-b327-4e5c-a58e-7bfe769ed988
+b9081856963ceb78dcb44ac410c6fca0533676a3,https://arxiv.org/pdf/1703.03329.pdf,,,https://arxiv.org/pdf/1703.03329.pdf
+b97c7f82c1439fa1e4525e5860cb05a39cc412ea,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4430537,,
+b999364980e4c21d9c22cc5a9f14501432999ca4,,,http://doi.org/10.1007/s10044-018-0727-y,
+b97f694c2a111b5b1724eefd63c8d64c8e19f6c9,https://arxiv.org/pdf/1710.01216.pdf,,,https://arxiv.org/pdf/1710.01216.pdf
+b9d0774b0321a5cfc75471b62c8c5ef6c15527f5,https://pdfs.semanticscholar.org/b9d0/774b0321a5cfc75471b62c8c5ef6c15527f5.pdf,,,https://pdfs.semanticscholar.org/b9d0/774b0321a5cfc75471b62c8c5ef6c15527f5.pdf
+b908edadad58c604a1e4b431f69ac8ded350589a,https://arxiv.org/pdf/1708.02721.pdf,,,https://arxiv.org/pdf/1708.02721.pdf
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8,https://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf,,,https://pdfs.semanticscholar.org/b93b/f0a7e449cfd0db91a83284d9eba25a6094d8.pdf
+b9dc8cc479cacda1f23b91df00eb03f88cc0c260,,,,http://dl.acm.org/citation.cfm?id=2964287
+b91f54e1581fbbf60392364323d00a0cd43e493c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788,,
+b971266b29fcecf1d5efe1c4dcdc2355cb188ab0,https://arxiv.org/pdf/1703.00832.pdf,,,https://arxiv.org/pdf/1703.00832.pdf
+b961e512242ddad7712855ab00b4d37723376e5d,,,http://doi.org/10.1007/s11554-010-0178-1,
+a1af7ec84472afba0451b431dfdb59be323e35b7,https://pdfs.semanticscholar.org/a1af/7ec84472afba0451b431dfdb59be323e35b7.pdf,,,https://pdfs.semanticscholar.org/a1af/7ec84472afba0451b431dfdb59be323e35b7.pdf
+a1e07c31184d3728e009d4d1bebe21bf9fe95c8e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900056,,
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1,https://arxiv.org/pdf/1711.03990.pdf,,,https://arxiv.org/pdf/1711.03990.pdf
+a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca,https://arxiv.org/pdf/1711.01467.pdf,,,https://arxiv.org/pdf/1711.01467.pdf
+a1132e2638a8abd08bdf7fc4884804dd6654fa63,https://pdfs.semanticscholar.org/a113/2e2638a8abd08bdf7fc4884804dd6654fa63.pdf,,,https://pdfs.semanticscholar.org/a113/2e2638a8abd08bdf7fc4884804dd6654fa63.pdf
+a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892,https://arxiv.org/pdf/1603.08895.pdf,,,https://arxiv.org/pdf/1603.08895.pdf
+a168ca2e199121258fbb2b6c821207456e5bf994,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553808,,
+a1081cb856faae25df14e25045cd682db8028141,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8462122,,
+a1dd9038b1e1e59c9d564e252d3e14705872fdec,https://arxiv.org/pdf/1803.09851.pdf,,,https://arxiv.org/pdf/1803.09851.pdf
+a16fb74ea66025d1f346045fda00bd287c20af0e,https://arxiv.org/pdf/1809.07447.pdf,,,https://arxiv.org/pdf/1809.07447.pdf
+a136ccaa67f660c45d3abb8551c5ed357faf7081,,,,https://www.ncbi.nlm.nih.gov/pubmed/27078863
+ef2bb8bd93fa8b44414565b32735334fa6823b56,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393076,,
+ef940b76e40e18f329c43a3f545dc41080f68748,https://pdfs.semanticscholar.org/ef94/0b76e40e18f329c43a3f545dc41080f68748.pdf,,,https://pdfs.semanticscholar.org/ef94/0b76e40e18f329c43a3f545dc41080f68748.pdf
+ef230e3df720abf2983ba6b347c9d46283e4b690,https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf,,,https://pdfs.semanticscholar.org/ef23/0e3df720abf2983ba6b347c9d46283e4b690.pdf
+ef4ecb76413a05c96eac4c743d2c2a3886f2ae07,https://pdfs.semanticscholar.org/ef4e/cb76413a05c96eac4c743d2c2a3886f2ae07.pdf,,,https://pdfs.semanticscholar.org/ef4e/cb76413a05c96eac4c743d2c2a3886f2ae07.pdf
+efc78a7d95b14abacdfde5c78007eabf9a21689c,,,,http://dl.acm.org/citation.cfm?id=2939840
+ef458499c3856a6e9cd4738b3e97bef010786adb,https://arxiv.org/pdf/1803.09196.pdf,,,https://arxiv.org/pdf/1803.09196.pdf
+ef032afa4bdb18b328ffcc60e2dc5229cc1939bc,https://pdfs.semanticscholar.org/ef03/2afa4bdb18b328ffcc60e2dc5229cc1939bc.pdf,,,https://pdfs.semanticscholar.org/ef03/2afa4bdb18b328ffcc60e2dc5229cc1939bc.pdf
+ef5531711a69ed687637c48930261769465457f0,https://arxiv.org/pdf/1807.00556.pdf,,,https://arxiv.org/pdf/1807.00556.pdf
+ef559d5f02e43534168fbec86707915a70cd73a0,https://pdfs.semanticscholar.org/ef55/9d5f02e43534168fbec86707915a70cd73a0.pdf,,,https://pdfs.semanticscholar.org/ef55/9d5f02e43534168fbec86707915a70cd73a0.pdf
+efa08283656714911acff2d5022f26904e451113,https://arxiv.org/pdf/1607.00548.pdf,,,https://arxiv.org/pdf/1607.00548.pdf
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98,https://arxiv.org/pdf/1406.1881.pdf,,,https://arxiv.org/pdf/1406.1881.pdf
+efb24d35d8f6a46e1ff3800a2481bc7e681e255e,,,http://doi.org/10.1016/j.patrec.2015.08.006,
+ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d,https://pdfs.semanticscholar.org/001e/ad9b99ee57af44e1831be1670c40711d348d.pdf,,,https://pdfs.semanticscholar.org/001e/ad9b99ee57af44e1831be1670c40711d348d.pdf
+c3d3d2229500c555c7a7150a8b126ef874cbee1c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406478,,
+c3beae515f38daf4bd8053a7d72f6d2ed3b05d88,https://pdfs.semanticscholar.org/1093/3b6c487a269b87f9b561c5eedfdab6be306b.pdf,,,https://pdfs.semanticscholar.org/1093/3b6c487a269b87f9b561c5eedfdab6be306b.pdf
+c3dc4f414f5233df96a9661609557e341b71670d,https://pdfs.semanticscholar.org/c3dc/4f414f5233df96a9661609557e341b71670d.pdf,,,https://pdfs.semanticscholar.org/c3dc/4f414f5233df96a9661609557e341b71670d.pdf
+c3d874336eb8fae92ab335393fd801fa8df98412,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952438,,
+c362116a358320e71fb6bc8baa559142677622d2,,,http://doi.org/10.1016/j.patcog.2011.07.009,
+c3285a1d6ec6972156fea9e6dc9a8d88cd001617,https://arxiv.org/pdf/1712.05083.pdf,,,https://arxiv.org/pdf/1712.05083.pdf
+c38b1fa00f1f370c029984c55d4d2d40b529d00c,,,http://doi.org/10.1007/978-3-319-26561-2,
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d,https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf,,,https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf
+c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0,https://pdfs.semanticscholar.org/2a1c/16f418d8d8e6fa179a8e6a368bb0b47266d0.pdf,,,https://pdfs.semanticscholar.org/2a1c/16f418d8d8e6fa179a8e6a368bb0b47266d0.pdf
+c39ffc56a41d436748b9b57bdabd8248b2d28a32,https://arxiv.org/pdf/1704.06904.pdf,,,https://arxiv.org/pdf/1704.06904.pdf
+c317181fa1de2260e956f05cd655642607520a4f,https://arxiv.org/pdf/1708.07549.pdf,,,https://arxiv.org/pdf/1708.07549.pdf
+c30e4e4994b76605dcb2071954eaaea471307d80,https://pdfs.semanticscholar.org/c30e/4e4994b76605dcb2071954eaaea471307d80.pdf,,,https://pdfs.semanticscholar.org/c30e/4e4994b76605dcb2071954eaaea471307d80.pdf
+c37de914c6e9b743d90e2566723d0062bedc9e6a,https://pdfs.semanticscholar.org/c37d/e914c6e9b743d90e2566723d0062bedc9e6a.pdf,,,https://pdfs.semanticscholar.org/c37d/e914c6e9b743d90e2566723d0062bedc9e6a.pdf
+c4a2cd5ec81cdfd894c9a20d4ffb8cda637aab1f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5326314,,
+c4cfdcf19705f9095fb60fb2e569a9253a475f11,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237333,,
+c4fb2de4a5dc28710d9880aece321acf68338fde,https://arxiv.org/pdf/1801.09092.pdf,,,https://arxiv.org/pdf/1801.09092.pdf
+c4e2d5ebfebbb9dcee6a9866c3d6290481496df5,,,http://doi.org/10.1007/s00138-012-0439-z,
+c43dc4ae68a317b34a79636fadb3bcc4d1ccb61c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369763,,
+c47bd9f6eb255da525dbcdfc111609c90bc4d2ae,,,,http://dl.acm.org/citation.cfm?id=3230921
+c4f3185f010027a0a97fcb9753d74eb27a9cfd3e,,,http://doi.org/10.1016/j.patrec.2015.02.006,
+c43862db5eb7e43e3ef45b5eac4ab30e318f2002,https://arxiv.org/pdf/1704.03925.pdf,,,https://arxiv.org/pdf/1704.03925.pdf
+c48b68dc780c71ab0f0f530cd160aa564ed08ade,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1357193,,
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc,https://arxiv.org/pdf/1809.03258.pdf,,,https://arxiv.org/pdf/1809.03258.pdf
+eac6aee477446a67d491ef7c95abb21867cf71fc,https://arxiv.org/pdf/1602.07017.pdf,,,https://arxiv.org/pdf/1602.07017.pdf
+eaf020bc8a3ed5401fc3852f7037a03b2525586a,,,,http://arxiv.org/abs/1710.07735
+ea079334121a0ba89452036e5d7f8e18f6851519,https://arxiv.org/pdf/1708.03615.pdf,,,https://arxiv.org/pdf/1708.03615.pdf
+eac1b644492c10546a50f3e125a1f790ec46365f,https://arxiv.org/pdf/1704.00616.pdf,,,https://arxiv.org/pdf/1704.00616.pdf
+ea80a050d20c0e24e0625a92e5c03e5c8db3e786,https://pdfs.semanticscholar.org/ea80/a050d20c0e24e0625a92e5c03e5c8db3e786.pdf,,,https://pdfs.semanticscholar.org/ea80/a050d20c0e24e0625a92e5c03e5c8db3e786.pdf
+eac97959f2fcd882e8236c5dd6035870878eb36b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6890147,,
+ea1303f6746f815b7518c82c9c4d4a00cd6328b9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411434,,
+eacf974e235add458efb815ada1e5b82a05878fa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4577667,,
+eafda8a94e410f1ad53b3e193ec124e80d57d095,https://pdfs.semanticscholar.org/eafd/a8a94e410f1ad53b3e193ec124e80d57d095.pdf,,,https://pdfs.semanticscholar.org/eafd/a8a94e410f1ad53b3e193ec124e80d57d095.pdf
+ea890846912f16a0f3a860fce289596a7dac575f,https://pdfs.semanticscholar.org/ea89/0846912f16a0f3a860fce289596a7dac575f.pdf,,,https://pdfs.semanticscholar.org/ea89/0846912f16a0f3a860fce289596a7dac575f.pdf
+eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf,https://pdfs.semanticscholar.org/eaae/d082762337e7c3f8a1b1dfea9c0d3ca281bf.pdf,,,https://pdfs.semanticscholar.org/eaae/d082762337e7c3f8a1b1dfea9c0d3ca281bf.pdf
+ea03a569272d329090fe60d6bff8d119e18057d7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532906,,
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5,https://arxiv.org/pdf/1602.04364.pdf,,,https://arxiv.org/pdf/1602.04364.pdf
+e19fb22b35c352f57f520f593d748096b41a4a7b,https://pdfs.semanticscholar.org/cbd8/716132ed289d21bdc2e031b7dea4849aae5d.pdf,,,https://pdfs.semanticscholar.org/cbd8/716132ed289d21bdc2e031b7dea4849aae5d.pdf
+e1312b0b0fd660de87fa42de39316b28f9336e70,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369055,,
+e1d1540a718bb7a933e21339f1a2d90660af7353,,,http://doi.org/10.1007/s11063-018-9852-2,
+e1179a5746b4bf12e1c8a033192326bf7f670a4d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163104,,
+e19ebad4739d59f999d192bac7d596b20b887f78,https://arxiv.org/pdf/1709.03655.pdf,,,https://arxiv.org/pdf/1709.03655.pdf
+e16f73f3a63c44cf285b8c1bc630eb8377b85b6d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373816,,
+e14cc2715b806288fe457d88c1ad07ef55c65318,,,,http://dl.acm.org/citation.cfm?id=2830583
+e1d726d812554f2b2b92cac3a4d2bec678969368,https://pdfs.semanticscholar.org/c134/a2441bc1f3ec6b85f22868284c279881b918.pdf,,,https://pdfs.semanticscholar.org/c134/a2441bc1f3ec6b85f22868284c279881b918.pdf
+e1256ff535bf4c024dd62faeb2418d48674ddfa2,https://arxiv.org/pdf/1803.11182.pdf,,,https://arxiv.org/pdf/1803.11182.pdf
+e180572400b64860e190a8bc04ef839fa491e056,,,http://doi.org/10.1038/s41598-017-12097-w,
+cdcfc75f54405c77478ab776eb407c598075d9f8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410829,,
+cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66,https://pdfs.semanticscholar.org/cdc7/bd87a2c9983dab728dbc8aac74d8c9ed7e66.pdf,,,https://pdfs.semanticscholar.org/cdc7/bd87a2c9983dab728dbc8aac74d8c9ed7e66.pdf
+cd4941cbef1e27d7afdc41b48c1aff5338aacf06,https://arxiv.org/pdf/1712.06761.pdf,,,https://arxiv.org/pdf/1712.06761.pdf
+cdef0eaff4a3c168290d238999fc066ebc3a93e8,https://arxiv.org/pdf/1707.07391.pdf,,,https://arxiv.org/pdf/1707.07391.pdf
+cd444ee7f165032b97ee76b21b9ff58c10750570,https://pdfs.semanticscholar.org/cd44/4ee7f165032b97ee76b21b9ff58c10750570.pdf,,,https://pdfs.semanticscholar.org/cd44/4ee7f165032b97ee76b21b9ff58c10750570.pdf
+cd23dc3227ee2a3ab0f4de1817d03ca771267aeb,https://pdfs.semanticscholar.org/cd23/dc3227ee2a3ab0f4de1817d03ca771267aeb.pdf,,,https://pdfs.semanticscholar.org/cd23/dc3227ee2a3ab0f4de1817d03ca771267aeb.pdf
+cd22e6532211f679ba6057d15a801ba448b9915c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434092,,
+cd55fb30737625e86454a2861302b96833ed549d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,,
+cd63759842a56bd2ede3999f6e11a74ccbec318b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995404,,
+cd2f8d661ea2c6d6818a278eb4f0548751c3b1ae,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7945277,,
+cd2c54705c455a4379f45eefdf32d8d10087e521,https://arxiv.org/pdf/1804.04779.pdf,,,https://arxiv.org/pdf/1804.04779.pdf
+cd7a7be3804fd217e9f10682e0c0bfd9583a08db,https://arxiv.org/pdf/1807.00517.pdf,,,https://arxiv.org/pdf/1807.00517.pdf
+cdae8e9cc9d605856cf5709b2fdf61f722d450c1,,,,
+cd023d2d067365c83d8e27431e83e7e66082f718,https://arxiv.org/pdf/1804.06039.pdf,,,https://arxiv.org/pdf/1804.06039.pdf
+cca9ae621e8228cfa787ec7954bb375536160e0d,https://arxiv.org/pdf/1805.07410.pdf,,,https://arxiv.org/pdf/1805.07410.pdf
+cc9d068cf6c4a30da82fd6350a348467cb5086d4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411204,,
+ccb2ecb30a50460c9189bb55ba594f2300882747,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8334751,,
+cccd0edb5dafb3a160179a60f75fd8c835c0be82,,,http://doi.org/10.1007/s12193-017-0241-3,
+cc8e378fd05152a81c2810f682a78c5057c8a735,https://pdfs.semanticscholar.org/cc8e/378fd05152a81c2810f682a78c5057c8a735.pdf,,,https://pdfs.semanticscholar.org/cc8e/378fd05152a81c2810f682a78c5057c8a735.pdf
+cc31db984282bb70946f6881bab741aa841d3a7c,https://arxiv.org/pdf/1610.02255.pdf,,,https://arxiv.org/pdf/1610.02255.pdf
+cc05f758ccdf57d77b06b96b9d601bf2795a6cc4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6854428,,
+cce332405ce9cd9dccc45efac26d1d614eaa982d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597533,,
+ccb54fc5f263a8bc2a8373839cb6855f528f10d3,,,http://doi.org/10.1016/j.patcog.2015.11.008,
+cc2a9f4be1e465cb4ba702539f0f088ac3383834,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7344595,,
+ccf16bcf458e4d7a37643b8364594656287f5bfc,https://pdfs.semanticscholar.org/ccf1/6bcf458e4d7a37643b8364594656287f5bfc.pdf,,,https://pdfs.semanticscholar.org/ccf1/6bcf458e4d7a37643b8364594656287f5bfc.pdf
+e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227,https://arxiv.org/pdf/1808.04976.pdf,,,https://arxiv.org/pdf/1808.04976.pdf
+e6d6203fa911429d76f026e2ec2de260ec520432,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899663,,
+e6e5a6090016810fb902b51d5baa2469ae28b8a1,https://pdfs.semanticscholar.org/e6e5/a6090016810fb902b51d5baa2469ae28b8a1.pdf,,,https://pdfs.semanticscholar.org/e6e5/a6090016810fb902b51d5baa2469ae28b8a1.pdf
+e6da1fcd2a8cda0c69b3d94812caa7d844903007,,,,http://dl.acm.org/citation.cfm?id=3137154
+e68869499471bcd6fa8b4dc02aa00633673c0917,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595885,,
+f9bce7bd7909f1c75dbeb44900d374bc89072df0,,,,
+f9d9b2a1197cdb73e977490756c0ff8a30cafc3e,,,http://doi.org/10.1007/s11042-018-6110-6,
+f92ade569cbe54344ffd3bb25efd366dcd8ad659,https://arxiv.org/pdf/1704.01464.pdf,,,https://arxiv.org/pdf/1704.01464.pdf
+f94f366ce14555cf0d5d34248f9467c18241c3ee,https://pdfs.semanticscholar.org/f94f/366ce14555cf0d5d34248f9467c18241c3ee.pdf,,,https://pdfs.semanticscholar.org/f94f/366ce14555cf0d5d34248f9467c18241c3ee.pdf
+f997a71f1e54d044184240b38d9dc680b3bbbbc0,https://arxiv.org/pdf/1807.11688.pdf,,,https://arxiv.org/pdf/1807.11688.pdf
+f08e425c2fce277aedb51d93757839900d591008,https://arxiv.org/pdf/1711.06640.pdf,,,https://arxiv.org/pdf/1711.06640.pdf
+f03a82fd4a039c1b94a0e8719284a777f776fb22,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8355453,,
+f0cee87e9ecedeb927664b8da44b8649050e1c86,https://arxiv.org/pdf/1805.02901.pdf,,,https://arxiv.org/pdf/1805.02901.pdf
+f0f4f16d5b5f9efe304369120651fa688a03d495,https://pdfs.semanticscholar.org/f0f4/f16d5b5f9efe304369120651fa688a03d495.pdf,,,https://pdfs.semanticscholar.org/f0f4/f16d5b5f9efe304369120651fa688a03d495.pdf
+f0ca31fd5cad07e84b47d50dc07db9fc53482a46,https://pdfs.semanticscholar.org/f0ca/31fd5cad07e84b47d50dc07db9fc53482a46.pdf,,,https://pdfs.semanticscholar.org/f0ca/31fd5cad07e84b47d50dc07db9fc53482a46.pdf
+f0a4a3fb6997334511d7b8fc090f9ce894679faf,https://arxiv.org/pdf/1704.05838.pdf,,,https://arxiv.org/pdf/1704.05838.pdf
+f0398ee5291b153b716411c146a17d4af9cb0edc,https://arxiv.org/pdf/1805.02733.pdf,,,https://arxiv.org/pdf/1805.02733.pdf
+f0f0e94d333b4923ae42ee195df17c0df62ea0b1,https://pdfs.semanticscholar.org/f0f0/e94d333b4923ae42ee195df17c0df62ea0b1.pdf,,,https://pdfs.semanticscholar.org/f0f0/e94d333b4923ae42ee195df17c0df62ea0b1.pdf
+f095b5770f0ff13ba9670e3d480743c5e9ad1036,,,http://doi.org/10.1007/s11263-016-0950-1,
+f0f854f8cfe826fd08385c0c3c8097488f468076,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406454,,
+f02a6bccdaee14ab55ad94263539f4f33f1b15bb,https://pdfs.semanticscholar.org/f02a/6bccdaee14ab55ad94263539f4f33f1b15bb.pdf,,,https://pdfs.semanticscholar.org/f02a/6bccdaee14ab55ad94263539f4f33f1b15bb.pdf
+f070d739fb812d38571ec77490ccd8777e95ce7a,,,http://doi.org/10.1016/j.patcog.2014.09.007,
+f7ae38a073be7c9cd1b92359131b9c8374579b13,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7487053,,
+f7dea4454c2de0b96ab5cf95008ce7144292e52a,https://arxiv.org/pdf/1805.05563.pdf,,,https://arxiv.org/pdf/1805.05563.pdf
+f7b4bc4ef14349a6e66829a0101d5b21129dcf55,https://pdfs.semanticscholar.org/f7b4/bc4ef14349a6e66829a0101d5b21129dcf55.pdf,,,https://pdfs.semanticscholar.org/f7b4/bc4ef14349a6e66829a0101d5b21129dcf55.pdf
+f7b422df567ce9813926461251517761e3e6cda0,https://arxiv.org/pdf/1702.01983.pdf,,,https://arxiv.org/pdf/1702.01983.pdf
+f7824758800a7b1a386db5bd35f84c81454d017a,https://arxiv.org/pdf/1702.05085.pdf,,,https://arxiv.org/pdf/1702.05085.pdf
+f76a6b1d6029769e2dc1be4dadbee6a7ba777429,,,http://doi.org/10.1007/s12559-017-9506-0,
+f78fe101b21be36e98cd3da010051bb9b9829a1e,https://pdfs.semanticscholar.org/f78f/e101b21be36e98cd3da010051bb9b9829a1e.pdf,,,https://pdfs.semanticscholar.org/f78f/e101b21be36e98cd3da010051bb9b9829a1e.pdf
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f,https://pdfs.semanticscholar.org/f79c/97e7c3f9a98cf6f4a5d2431f149ffacae48f.pdf,,,https://pdfs.semanticscholar.org/f79c/97e7c3f9a98cf6f4a5d2431f149ffacae48f.pdf
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef,https://pdfs.semanticscholar.org/f7a2/71acccf9ec66c9b114d36eec284fbb89c7ef.pdf,,,https://pdfs.semanticscholar.org/f7a2/71acccf9ec66c9b114d36eec284fbb89c7ef.pdf
+f7dcadc5288653ec6764600c7c1e2b49c305dfaa,https://pdfs.semanticscholar.org/f7dc/adc5288653ec6764600c7c1e2b49c305dfaa.pdf,,,https://pdfs.semanticscholar.org/f7dc/adc5288653ec6764600c7c1e2b49c305dfaa.pdf
+f7be8956639e66e534ed6195d929aed4e0b90cad,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4117059,,
+f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a,https://arxiv.org/pdf/1611.06179.pdf,,,https://arxiv.org/pdf/1611.06179.pdf
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db,https://pdfs.semanticscholar.org/e8fd/acbd708feb60fd6e7843b048bf3c4387c6db.pdf,,,https://pdfs.semanticscholar.org/e8fd/acbd708feb60fd6e7843b048bf3c4387c6db.pdf
+e8aa1f207b4b0bb710f79ab47a671d5639696a56,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7362364,,
+e87d6c284cdd6828dfe7c092087fbd9ff5091ee4,https://arxiv.org/pdf/1704.05693.pdf,,,https://arxiv.org/pdf/1704.05693.pdf
+e853484dc585bed4b0ed0c5eb4bc6d9d93a16211,,,,http://dl.acm.org/citation.cfm?id=3130971
+e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7,https://arxiv.org/pdf/1701.07174.pdf,,,https://arxiv.org/pdf/1701.07174.pdf
+e8f4ded98f5955aad114f55e7aca6b540599236b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7047804,,
+e896389891ba84af58a8c279cf8ab5de3e9320ee,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6958874,,
+e85a255a970ee4c1eecc3e3d110e157f3e0a4629,https://arxiv.org/pdf/1803.03415.pdf,,,https://arxiv.org/pdf/1803.03415.pdf
+e8d1b134d48eb0928bc999923a4e092537e106f6,https://pdfs.semanticscholar.org/e8d1/b134d48eb0928bc999923a4e092537e106f6.pdf,,,https://pdfs.semanticscholar.org/e8d1/b134d48eb0928bc999923a4e092537e106f6.pdf
+e8c6c3fc9b52dffb15fe115702c6f159d955d308,https://pdfs.semanticscholar.org/d927/77953677da471c060cbabc2c5b15de8d60b2.pdf,,,https://pdfs.semanticscholar.org/d927/77953677da471c060cbabc2c5b15de8d60b2.pdf
+e8b3a257a0a44d2859862cdec91c8841dc69144d,https://arxiv.org/pdf/1808.01725.pdf,,,https://arxiv.org/pdf/1808.01725.pdf
+fa90b825346a51562d42f6b59a343b98ea2e501a,https://arxiv.org/pdf/1805.06533.pdf,,,https://arxiv.org/pdf/1805.06533.pdf
+fa4f59397f964a23e3c10335c67d9a24ef532d5c,https://arxiv.org/pdf/1602.03346.pdf,,,https://arxiv.org/pdf/1602.03346.pdf
+fa052fd40e717773c6dc9cc4a2f5c10b8760339f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5595883,,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,https://pdfs.semanticscholar.org/fac8/cff9052fc5fab7d5ef114d1342daba5e4b82.pdf,,,https://pdfs.semanticscholar.org/fac8/cff9052fc5fab7d5ef114d1342daba5e4b82.pdf
+fa641327dc5873276f0af453a2caa1634c16f143,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789590,,
+fa80344137c4d158bf59be4ac5591d074483157a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1470219,,
+faf5583063682e70dedc4466ac0f74eeb63169e7,https://pdfs.semanticscholar.org/6ca0/be5608fc00181596e562eb867eeb8cb43a4a.pdf,,,https://pdfs.semanticscholar.org/6ca0/be5608fc00181596e562eb867eeb8cb43a4a.pdf
+fa32b29e627086d4302db4d30c07a9d11dcd6b84,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354123,,
+fab60b3db164327be8588bce6ce5e45d5b882db6,https://pdfs.semanticscholar.org/fab6/0b3db164327be8588bce6ce5e45d5b882db6.pdf,,,https://pdfs.semanticscholar.org/fab6/0b3db164327be8588bce6ce5e45d5b882db6.pdf
+fad895771260048f58d12158a4d4d6d0623f4158,https://pdfs.semanticscholar.org/fad8/95771260048f58d12158a4d4d6d0623f4158.pdf,,,https://pdfs.semanticscholar.org/fad8/95771260048f58d12158a4d4d6d0623f4158.pdf
+ffea8775fc9c32f573d1251e177cd283b4fe09c9,https://arxiv.org/pdf/1804.04418.pdf,,,https://arxiv.org/pdf/1804.04418.pdf
+fffefc1fb840da63e17428fd5de6e79feb726894,https://arxiv.org/pdf/1805.10445.pdf,,,https://arxiv.org/pdf/1805.10445.pdf
+ff76ff05aa1ab17e5ca9864df2252e6bb44c8a17,,,,http://dl.acm.org/citation.cfm?id=3173582
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,https://arxiv.org/pdf/1706.04277.pdf,,,https://arxiv.org/pdf/1706.04277.pdf
+ff8ef43168b9c8dd467208a0b1b02e223b731254,https://arxiv.org/pdf/1603.07141.pdf,,,https://arxiv.org/pdf/1603.07141.pdf
+ff9195f99a1a28ced431362f5363c9a5da47a37b,https://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf,,,https://pdfs.semanticscholar.org/ff91/95f99a1a28ced431362f5363c9a5da47a37b.pdf
+ffc81ced9ee8223ab0adb18817321cbee99606e6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791157,,
+fffe5ab3351deab81f7562d06764551422dbd9c4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163114,,
+ff012c56b9b1de969328dacd13e26b7138ff298b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7762921,,
+c588c89a72f89eed29d42f34bfa5d4cffa530732,https://arxiv.org/pdf/1705.01734.pdf,,,https://arxiv.org/pdf/1705.01734.pdf
+c5c53d42e551f3c8f6ca2c13335af80a882009fa,,,http://doi.org/10.1007/s11263-018-1088-0,
+c5e37630d0672e4d44f7dee83ac2c1528be41c2e,,,,http://dl.acm.org/citation.cfm?id=3078973
+c535d4d61aa0f1d8aadb4082bdcc19f4cbdf0eaf,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237344,,
+c5ea084531212284ce3f1ca86a6209f0001de9d1,https://pdfs.semanticscholar.org/c5ea/084531212284ce3f1ca86a6209f0001de9d1.pdf,,,https://pdfs.semanticscholar.org/c5ea/084531212284ce3f1ca86a6209f0001de9d1.pdf
+c26b43c2e1e2da96e7caabd46e1d7314acac0992,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8466510,,
+c254b4c0f6d5a5a45680eb3742907ec93c3a222b,https://arxiv.org/pdf/1711.06451.pdf,,,https://arxiv.org/pdf/1711.06451.pdf
+c29fe5ed41d2240352fcb8d8196eb2f31d009522,,,http://doi.org/10.1007/s11042-015-3230-0,
+c2bd9322fa2d0a00fc62075cc0f1996fc75d42a8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014811,,
+f60a85bd35fa85739d712f4c93ea80d31aa7de07,https://arxiv.org/pdf/1710.06924.pdf,,,https://arxiv.org/pdf/1710.06924.pdf
+f64574ee0e6247b84d573ddb5c6e2c4ba798ffff,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699435,,
+f6f06be05981689b94809130e251f9e4bf932660,https://pdfs.semanticscholar.org/fa86/ec19c1aec46202e0df12d209eb8062d53f7b.pdf,,,https://pdfs.semanticscholar.org/fa86/ec19c1aec46202e0df12d209eb8062d53f7b.pdf
+f68ed499e9d41f9c3d16d843db75dc12833d988d,https://arxiv.org/pdf/1805.05029.pdf,,,https://arxiv.org/pdf/1805.05029.pdf
+f6fc112ff7e4746b040c13f28700a9c47992045e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7442559,,
+f6532bf13a4649b7599eb40f826aa5281e392c61,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6202713,,
+f61829274cfe64b94361e54351f01a0376cd1253,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410784,,
+f6f2a212505a118933ef84110e487551b6591553,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952474,,
+f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a,https://arxiv.org/pdf/1806.09755.pdf,,,https://arxiv.org/pdf/1806.09755.pdf
+f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca,https://arxiv.org/pdf/1705.02928.pdf,,,https://arxiv.org/pdf/1705.02928.pdf
+f61d5f2a082c65d5330f21b6f36312cc4fab8a3b,https://arxiv.org/pdf/1705.08841.pdf,,,https://arxiv.org/pdf/1705.08841.pdf
+f65b47093e4d45013f54c3ba09bbcce7140af6bb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8354117,,
+f6cf2108ec9d0f59124454d88045173aa328bd2e,https://pdfs.semanticscholar.org/f6cf/2108ec9d0f59124454d88045173aa328bd2e.pdf,,,https://pdfs.semanticscholar.org/f6cf/2108ec9d0f59124454d88045173aa328bd2e.pdf
+f6e00d6430cbbaa64789d826d093f7f3e323b082,https://pdfs.semanticscholar.org/5255/490925aa1e01ac0b9a55e93ec8c82efc07b7.pdf,,,https://pdfs.semanticscholar.org/5255/490925aa1e01ac0b9a55e93ec8c82efc07b7.pdf
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7,https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf,,,https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf
+e988be047b28ba3b2f1e4cdba3e8c94026139fcf,https://arxiv.org/pdf/1702.04710.pdf,,,https://arxiv.org/pdf/1702.04710.pdf
+e9809c0c6bf33cfe232a63b0a13f9b1263c58cb8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7172556,,
+e9d43231a403b4409633594fa6ccc518f035a135,https://pdfs.semanticscholar.org/e9d4/3231a403b4409633594fa6ccc518f035a135.pdf,,,https://pdfs.semanticscholar.org/e9d4/3231a403b4409633594fa6ccc518f035a135.pdf
+e97ba85a4550667b8a28f83a98808d489e0ff3bc,,,http://doi.org/10.1155/2018%2F9729014,
+e9b0a27018c7151016a9fe01c98b4c21d6ebf4be,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471957,,
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166,https://arxiv.org/pdf/1712.02979.pdf,,,https://arxiv.org/pdf/1712.02979.pdf
+e96cef8732f3021080c362126518455562606f2d,,,,http://dl.acm.org/citation.cfm?id=3206058
+e9c008d31da38d9eef67a28d2c77cb7daec941fb,https://arxiv.org/pdf/1708.03769.pdf,,,https://arxiv.org/pdf/1708.03769.pdf
+e9363f4368b04aeaa6d6617db0a574844fc59338,https://arxiv.org/pdf/1710.08315.pdf,,,https://arxiv.org/pdf/1710.08315.pdf
+f1ae9f5338fcff577b1ae9becdb66007fe57bd45,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099873,,
+f1250900074689061196d876f551ba590fc0a064,https://arxiv.org/pdf/1710.07354.pdf,,,https://arxiv.org/pdf/1710.07354.pdf
+f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53,https://arxiv.org/pdf/1707.05653.pdf,,,https://arxiv.org/pdf/1707.05653.pdf
+f1aa120fb720f6cfaab13aea4b8379275e6d40a2,https://pdfs.semanticscholar.org/f1aa/120fb720f6cfaab13aea4b8379275e6d40a2.pdf,,,https://pdfs.semanticscholar.org/f1aa/120fb720f6cfaab13aea4b8379275e6d40a2.pdf
+f16599e4ec666c6390c90ff9a253162178a70ef5,,,,http://dl.acm.org/citation.cfm?id=3206050
+f1ba2fe3491c715ded9677862fea966b32ca81f0,https://pdfs.semanticscholar.org/f1ba/2fe3491c715ded9677862fea966b32ca81f0.pdf,,,https://pdfs.semanticscholar.org/f1ba/2fe3491c715ded9677862fea966b32ca81f0.pdf
+f1280f76933ba8b7f4a6b8662580504f02bb4ab6,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7836703,,
+f113aed343bcac1021dc3e57ba6cc0647a8f5ce1,https://pdfs.semanticscholar.org/f113/aed343bcac1021dc3e57ba6cc0647a8f5ce1.pdf,,,https://pdfs.semanticscholar.org/f113/aed343bcac1021dc3e57ba6cc0647a8f5ce1.pdf
+f1173a4c5e3501323b37c1ae9a6d7dd8a236eab8,,,,http://arxiv.org/abs/1504.07339
+f11c76efdc9651db329c8c862652820d61933308,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163100,,
+e7cac91da51b78eb4a28e194d3f599f95742e2a2,https://pdfs.semanticscholar.org/e7ca/c91da51b78eb4a28e194d3f599f95742e2a2.pdf,,,https://pdfs.semanticscholar.org/e7ca/c91da51b78eb4a28e194d3f599f95742e2a2.pdf
+e75a589ca27dc4f05c2715b9d54206dee37af266,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8409973,,
+e7cfaff65541cde4298a04882e00608d992f6703,,,http://doi.org/10.1007/s00521-018-3554-6,
+e7b2b0538731adaacb2255235e0a07d5ccf09189,https://arxiv.org/pdf/1803.10837.pdf,,,https://arxiv.org/pdf/1803.10837.pdf
+e7697c7b626ba3a426106d83f4c3a052fcde02a4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553713,,
+e79bacc03152ea55343e6af97bcd17d8904cf5ef,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237669,,
+cb8382f43ce073322eba82809f02d3084dad7969,,,,http://dl.acm.org/citation.cfm?id=3232664
+cbca355c5467f501d37b919d8b2a17dcb39d3ef9,https://pdfs.semanticscholar.org/cbca/355c5467f501d37b919d8b2a17dcb39d3ef9.pdf,,,https://pdfs.semanticscholar.org/cbca/355c5467f501d37b919d8b2a17dcb39d3ef9.pdf
+cbbd9880fb28bef4e33da418a3795477d3a1616e,,,http://doi.org/10.1016/j.patcog.2016.02.002,
+cbe021d840f9fc1cb191cba79d3f7e3bbcda78d3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406479,,
+cb522b2e16b11dde48203bef97131ddca3cdaebd,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8331979,,
+cbbd13c29d042743f0139f1e044b6bca731886d0,https://pdfs.semanticscholar.org/cbbd/13c29d042743f0139f1e044b6bca731886d0.pdf,,,https://pdfs.semanticscholar.org/cbbd/13c29d042743f0139f1e044b6bca731886d0.pdf
+cb004e9706f12d1de83b88c209ac948b137caae0,https://arxiv.org/pdf/1511.01186.pdf,,,https://arxiv.org/pdf/1511.01186.pdf
+cb2917413c9b36c3bb9739bce6c03a1a6eb619b3,https://pdfs.semanticscholar.org/cb29/17413c9b36c3bb9739bce6c03a1a6eb619b3.pdf,,,https://pdfs.semanticscholar.org/cb29/17413c9b36c3bb9739bce6c03a1a6eb619b3.pdf
+cbfcd1ec8aa30e31faf205c73d350d447704afee,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7955089,,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,https://arxiv.org/pdf/1806.05781.pdf,,,https://arxiv.org/pdf/1806.05781.pdf
+cb8a1b8d87a3fef15635eb4a32173f9c6f966055,,,,http://dl.acm.org/citation.cfm?id=3234150
+cb27b45329d61f5f95ed213798d4b2a615e76be2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8329236,,
+cbb27980eb04f68d9f10067d3d3c114efa9d0054,https://arxiv.org/pdf/1807.03380.pdf,,,https://arxiv.org/pdf/1807.03380.pdf
+cb2470aade8e5630dcad5e479ab220db94ecbf91,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397018,,
+f842b13bd494be1bbc1161dc6df244340b28a47f,https://pdfs.semanticscholar.org/f842/b13bd494be1bbc1161dc6df244340b28a47f.pdf,,,https://pdfs.semanticscholar.org/f842/b13bd494be1bbc1161dc6df244340b28a47f.pdf
+f85ccab7173e543f2bfd4c7a81fb14e147695740,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5946910,,
+f8162276f3b21a3873dde7a507fd68b4ab858bcc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4761923,,
+f8f872044be2918de442ba26a30336d80d200c42,https://pdfs.semanticscholar.org/f8f8/72044be2918de442ba26a30336d80d200c42.pdf,,,https://pdfs.semanticscholar.org/f8f8/72044be2918de442ba26a30336d80d200c42.pdf
+f87b22e7f0c66225824a99cada71f9b3e66b5742,https://arxiv.org/pdf/1709.03126.pdf,,,https://arxiv.org/pdf/1709.03126.pdf
+cef73d305e5368ee269baff53ec20ea3ae7cdd82,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461485,,
+cec70cf159b51a18b39c80fac1ad34f65f3691ef,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7949100,,
+cea2911ccabab40e9c1e5bcc0aa1127cab0c789f,,,http://doi.org/10.1007/s11042-015-2847-3,
+cec8936d97dea2fcf04f175d3facaaeb65e574bf,,,,http://dl.acm.org/citation.cfm?id=3134264
+ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6,https://pdfs.semanticscholar.org/535a/b2d3a443235ef98d818f133a26c7445214a7.pdf,,,https://pdfs.semanticscholar.org/535a/b2d3a443235ef98d818f133a26c7445214a7.pdf
+ce70dd0d613b840754dce528c14c0ebadd20ffaa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7973159,,
+ceba8ca45bad226c401a509e6b8ccbf31361b0c9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7129813,,
+ce450e4849490924488664b44769b4ca57f1bc1a,https://arxiv.org/pdf/1612.00881.pdf,,,https://arxiv.org/pdf/1612.00881.pdf
+ce032dae834f383125cdd852e7c1bc793d4c3ba3,https://pdfs.semanticscholar.org/e459/e31cfd985ec0031d5e9ff4896a84ebaff972.pdf,,,https://pdfs.semanticscholar.org/e459/e31cfd985ec0031d5e9ff4896a84ebaff972.pdf
+ce9e1dfa7705623bb67df3a91052062a0a0ca456,https://arxiv.org/pdf/1611.05507.pdf,,,https://arxiv.org/pdf/1611.05507.pdf
+ce75deb5c645eeb08254e9a7962c74cab1e4c480,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373839,,
+ced7811f2b694e54e3d96ec5398e4b6afca67fc0,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1605391,,
+ce2945e369603fcec1fcdc6e19aac5996325cba9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771366,,
+e03bda45248b4169e2a20cb9124ae60440cad2de,https://pdfs.semanticscholar.org/0434/9d5d7c72d7fa3d1427b7afbfaa3ae07992ed.pdf,,,https://pdfs.semanticscholar.org/0434/9d5d7c72d7fa3d1427b7afbfaa3ae07992ed.pdf
+e03e86ac61cfac9148b371d75ce81a55e8b332ca,https://pdfs.semanticscholar.org/e03e/86ac61cfac9148b371d75ce81a55e8b332ca.pdf,,,https://pdfs.semanticscholar.org/e03e/86ac61cfac9148b371d75ce81a55e8b332ca.pdf
+e096b11b3988441c0995c13742ad188a80f2b461,https://arxiv.org/pdf/1606.04702.pdf,,,https://arxiv.org/pdf/1606.04702.pdf
+e060e32f8ad98f10277b582393df50ac17f2836c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099600,,
+e0162dea3746d58083dd1d061fb276015d875b2e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014992,,
+e01bb53b611c679141494f3ffe6f0b91953af658,https://arxiv.org/pdf/1711.10703.pdf,,,https://arxiv.org/pdf/1711.10703.pdf
+e0939b4518a5ad649ba04194f74f3413c793f28e,https://pdfs.semanticscholar.org/02ce/655ade8d052d099ae145afd032eb39d089b4.pdf,,,https://pdfs.semanticscholar.org/02ce/655ade8d052d099ae145afd032eb39d089b4.pdf
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4,https://pdfs.semanticscholar.org/e00d/391d7943561f5c7b772ab68e2bb6a85e64c4.pdf,,,https://pdfs.semanticscholar.org/e00d/391d7943561f5c7b772ab68e2bb6a85e64c4.pdf
+e065a2cb4534492ccf46d0afc81b9ad8b420c5ec,https://arxiv.org/pdf/1804.06559.pdf,,,https://arxiv.org/pdf/1804.06559.pdf
+e00241f00fb31c660df6c6f129ca38370e6eadb3,https://arxiv.org/pdf/1801.01415.pdf,,,https://arxiv.org/pdf/1801.01415.pdf
+e0244a8356b57a5721c101ead351924bcfb2eef4,https://pdfs.semanticscholar.org/e024/4a8356b57a5721c101ead351924bcfb2eef4.pdf,,,https://pdfs.semanticscholar.org/e024/4a8356b57a5721c101ead351924bcfb2eef4.pdf
+46f48211716062744ddec5824e9de9322704dea1,,,http://doi.org/10.1007/s11263-016-0923-4,
+46f2611dc4a9302e0ac00a79456fa162461a8c80,https://arxiv.org/pdf/1806.07754.pdf,,,https://arxiv.org/pdf/1806.07754.pdf
+468bb5344f74842a9a43a7e1a3333ebd394929b4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373896,,
+46e72046a9bb2d4982d60bcf5c63dbc622717f0f,https://arxiv.org/pdf/1605.02424.pdf,,,https://arxiv.org/pdf/1605.02424.pdf
+46e0703044811c941f0b5418139f89d46b360aa3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7883945,,
+4641986af5fc8836b2c883ea1a65278d58fe4577,https://arxiv.org/pdf/1701.02426.pdf,,,https://arxiv.org/pdf/1701.02426.pdf
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a,https://arxiv.org/pdf/1808.07272.pdf,,,https://arxiv.org/pdf/1808.07272.pdf
+4686df20f0ee40cd411e4b43860ef56de5531d9e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301536,,
+46c82cfadd9f885f5480b2d7155f0985daf949fc,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780537,,
+46976097c54e86032932d559c8eb82ffea4bb6bb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738868,,
+2c424f21607ff6c92e640bfe3da9ff105c08fac4,https://pdfs.semanticscholar.org/3f25/e17eb717e5894e0404ea634451332f85d287.pdf,,,https://pdfs.semanticscholar.org/3f25/e17eb717e5894e0404ea634451332f85d287.pdf
+2c93c8da5dfe5c50119949881f90ac5a0a4f39fe,https://arxiv.org/pdf/1805.01951.pdf,,,https://arxiv.org/pdf/1805.01951.pdf
+2cac8ab4088e2bdd32dcb276b86459427355085c,https://pdfs.semanticscholar.org/2cac/8ab4088e2bdd32dcb276b86459427355085c.pdf,,,https://pdfs.semanticscholar.org/2cac/8ab4088e2bdd32dcb276b86459427355085c.pdf
+2cde051e04569496fb525d7f1b1e5ce6364c8b21,https://arxiv.org/pdf/1505.02890.pdf,,,https://arxiv.org/pdf/1505.02890.pdf
+2c7185bcf31a4950b014b67ca7c63735ee00d56f,,,,
+2c1ffb0feea5f707c890347d2c2882be0494a67a,https://arxiv.org/pdf/1807.08919.pdf,,,https://arxiv.org/pdf/1807.08919.pdf
+2c5d1e0719f3ad7f66e1763685ae536806f0c23b,https://arxiv.org/pdf/1701.00599.pdf,,,https://arxiv.org/pdf/1701.00599.pdf
+2c1f8ddbfbb224271253a27fed0c2425599dfe47,https://arxiv.org/pdf/1708.07689.pdf,,,https://arxiv.org/pdf/1708.07689.pdf
+2c052a1c77a3ec2604b3deb702d77c41418c7d3e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373863,,
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,https://arxiv.org/pdf/1803.05536.pdf,,,https://arxiv.org/pdf/1803.05536.pdf
+2c19d3d35ef7062061b9e16d040cebd7e45f281d,https://arxiv.org/pdf/1711.04161.pdf,,,https://arxiv.org/pdf/1711.04161.pdf
+2ce1bac5ddc4cf668bbbb8879cd21dfb94b5cfe4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099709,,
+79f6a8f777a11fd626185ab549079236629431ac,https://pdfs.semanticscholar.org/79f6/a8f777a11fd626185ab549079236629431ac.pdf,,,https://pdfs.semanticscholar.org/79f6/a8f777a11fd626185ab549079236629431ac.pdf
+7923742e2af655dee4f9a99e39916d164bc30178,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272743,,
+79dc84a3bf76f1cb983902e2591d913cee5bdb0e,https://pdfs.semanticscholar.org/1e9e/87fc99430a82621810b3ce7db51e339be315.pdf,,,https://pdfs.semanticscholar.org/1e9e/87fc99430a82621810b3ce7db51e339be315.pdf
+79744fc71bea58d2e1918c9e254b10047472bd76,https://arxiv.org/pdf/1802.06713.pdf,,,https://arxiv.org/pdf/1802.06713.pdf
+79c3a7131c6c176b02b97d368cd0cd0bc713ff7e,https://pdfs.semanticscholar.org/538a/30196253e458a2a30d530218ffa449c4d24e.pdf,,,https://pdfs.semanticscholar.org/538a/30196253e458a2a30d530218ffa449c4d24e.pdf
+7914c3f510e84a3d83d66717aad0d852d6a4d148,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532448,,
+799c02a3cde2c0805ea728eb778161499017396b,https://arxiv.org/pdf/1711.01984.pdf,,,https://arxiv.org/pdf/1711.01984.pdf
+7918e3e15099b4b2943746e1f6c9e3992a79c5f3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995492,,
+794a51097385648e3909a1acae7188f5ab881710,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813382,,
+79db191ca1268dc88271abef3179c4fe4ee92aed,https://pdfs.semanticscholar.org/79db/191ca1268dc88271abef3179c4fe4ee92aed.pdf,,,https://pdfs.semanticscholar.org/79db/191ca1268dc88271abef3179c4fe4ee92aed.pdf
+2d3af3ee03793f76fb8ff15e7d7515ff1e03f34c,,,http://doi.org/10.1007/s11042-017-4818-3,
+2d9e58ea582e054e9d690afca8b6a554c3687ce6,https://arxiv.org/pdf/1706.08580.pdf,,,https://arxiv.org/pdf/1706.08580.pdf
+2d7c2c015053fff5300515a7addcd74b523f3f66,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8323422,,
+2dbc57abf3ceda80827b85593ce1f457b76a870b,,,http://doi.org/10.1007/s11042-018-6133-z,
+2d8001ffee6584b3f4d951d230dc00a06e8219f8,https://arxiv.org/pdf/1712.00721.pdf,,,https://arxiv.org/pdf/1712.00721.pdf
+2dfe0e7e81f65716b09c590652a4dd8452c10294,https://pdfs.semanticscholar.org/2dfe/0e7e81f65716b09c590652a4dd8452c10294.pdf,,,https://pdfs.semanticscholar.org/2dfe/0e7e81f65716b09c590652a4dd8452c10294.pdf
+2d8d089d368f2982748fde93a959cf5944873673,https://pdfs.semanticscholar.org/2d8d/089d368f2982748fde93a959cf5944873673.pdf,,,https://pdfs.semanticscholar.org/2d8d/089d368f2982748fde93a959cf5944873673.pdf
+2d4a3e9361505616fa4851674eb5c8dd18e0c3cf,https://arxiv.org/pdf/1807.08379.pdf,,,https://arxiv.org/pdf/1807.08379.pdf
+41f26101fed63a8d149744264dd5aa79f1928265,https://arxiv.org/pdf/1604.07602.pdf,,,https://arxiv.org/pdf/1604.07602.pdf
+411318684bd2d42e4b663a37dcf0532a48f0146d,https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf,,,https://pdfs.semanticscholar.org/4e20/8cfff33327863b5aeef0bf9b327798a5610c.pdf
+4113269f916117f975d5d2a0e60864735b73c64c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1613059,,
+414715421e01e8c8b5743c5330e6d2553a08c16d,https://pdfs.semanticscholar.org/4147/15421e01e8c8b5743c5330e6d2553a08c16d.pdf,,,https://pdfs.semanticscholar.org/4147/15421e01e8c8b5743c5330e6d2553a08c16d.pdf
+41c56c69b20b3f0b6c8a625009fc0a4d317e047a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5720366,,
+41c42cb001f34c43d4d8dd8fb72a982854e173fb,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5308445,,
+414d78e32ac41e6ff8b192bc095fe55f865a02f4,,,,http://arxiv.org/abs/1706.00631
+8356832f883207187437872742d6b7dc95b51fde,https://arxiv.org/pdf/1807.00458.pdf,,,https://arxiv.org/pdf/1807.00458.pdf
+835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd,https://arxiv.org/pdf/1804.00946.pdf,,,https://arxiv.org/pdf/1804.00946.pdf
+83295bce2340cb87901499cff492ae6ff3365475,https://arxiv.org/pdf/1808.01558.pdf,,,https://arxiv.org/pdf/1808.01558.pdf
+83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05,https://pdfs.semanticscholar.org/d726/6bf19e202f62f31c363a5a5656c67c03118b.pdf,,,https://pdfs.semanticscholar.org/d726/6bf19e202f62f31c363a5a5656c67c03118b.pdf
+837e99301e00c2244023a8a48ff98d7b521c93ac,https://pdfs.semanticscholar.org/b7b7/4e0ec15c22e1c94406c592bbb83c8e865f52.pdf,,,https://pdfs.semanticscholar.org/b7b7/4e0ec15c22e1c94406c592bbb83c8e865f52.pdf
+834736698f2cc5c221c22369abe95515243a9fc3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6996249,,
+83d41f6548bb76241737dcd3fed9e182ee901ff9,,,,http://dl.acm.org/citation.cfm?id=2964328
+8355d095d3534ef511a9af68a3b2893339e3f96b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390,,
+83f80fd4eb614777285202fa99e8314e3e5b169c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265544,,
+8334da483f1986aea87b62028672836cb3dc6205,https://arxiv.org/pdf/1805.06306.pdf,,,https://arxiv.org/pdf/1805.06306.pdf
+831b4d8b0c0173b0bac0e328e844a0fbafae6639,https://arxiv.org/pdf/1809.01407.pdf,,,https://arxiv.org/pdf/1809.01407.pdf
+1b02b9413b730b96b91d16dcd61b2420aef97414,https://pdfs.semanticscholar.org/1b02/b9413b730b96b91d16dcd61b2420aef97414.pdf,,,https://pdfs.semanticscholar.org/1b02/b9413b730b96b91d16dcd61b2420aef97414.pdf
+1b55c4e804d1298cbbb9c507497177014a923d22,https://pdfs.semanticscholar.org/1b55/c4e804d1298cbbb9c507497177014a923d22.pdf,,,https://pdfs.semanticscholar.org/1b55/c4e804d1298cbbb9c507497177014a923d22.pdf
+1bdef21f093c41df2682a07f05f3548717c7a3d1,https://pdfs.semanticscholar.org/1bde/f21f093c41df2682a07f05f3548717c7a3d1.pdf,,,https://pdfs.semanticscholar.org/1bde/f21f093c41df2682a07f05f3548717c7a3d1.pdf
+1bbec7190ac3ba34ca91d28f145e356a11418b67,https://pdfs.semanticscholar.org/1bbe/c7190ac3ba34ca91d28f145e356a11418b67.pdf,,,https://pdfs.semanticscholar.org/1bbe/c7190ac3ba34ca91d28f145e356a11418b67.pdf
+1b3587363d37dd197b6adbcfa79d49b5486f27d8,https://arxiv.org/pdf/1806.06371.pdf,,,https://arxiv.org/pdf/1806.06371.pdf
+1bd9dbe78918ed17b0a3ac40623f044cb3d3552c,,,http://doi.org/10.1038/nn870,
+1b71d3f30238cb6621021a95543cce3aab96a21b,https://arxiv.org/pdf/1804.09235.pdf,,,https://arxiv.org/pdf/1804.09235.pdf
+1b4f6f73c70353869026e5eec1dd903f9e26d43f,https://arxiv.org/pdf/1501.06202.pdf,,,https://arxiv.org/pdf/1501.06202.pdf
+1b5d445741473ced3d4d33732c9c9225148ed4a1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8452894,,
+1badfeece64d1bf43aa55c141afe61c74d0bd25e,https://arxiv.org/pdf/1712.01727.pdf,,,https://arxiv.org/pdf/1712.01727.pdf
+7783095a565094ae5b3dccf082d504ddd7255a5c,,,,http://dl.acm.org/citation.cfm?id=2502258
+7789a5d87884f8bafec8a82085292e87d4e2866f,https://arxiv.org/pdf/1612.09548.pdf,,,https://arxiv.org/pdf/1612.09548.pdf
+77d929b3c4bf546557815b41ed5c076a5792dc6b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8265399,,
+779d3f0cf74b7d33344eea210170c7c981a7e27b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8115237,,
+7788fa76f1488b1597ee2bebc462f628e659f61e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888,,
+77db171a523fc3d08c91cea94c9562f3edce56e1,https://pdfs.semanticscholar.org/49af/c659fd0709511759fd220f49b5eb2265e815.pdf,,,https://pdfs.semanticscholar.org/49af/c659fd0709511759fd220f49b5eb2265e815.pdf
+77fbbf0c5729f97fcdbfdc507deee3d388cd4889,https://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf,,,https://pdfs.semanticscholar.org/ec7f/c7bf79204166f78c27e870b620205751fff6.pdf
+776362314f1479f5319aaf989624ac604ba42c65,https://pdfs.semanticscholar.org/78aa/2775625c85aedd6a2adc90eb94b8cafd6e91.pdf,,,https://pdfs.semanticscholar.org/78aa/2775625c85aedd6a2adc90eb94b8cafd6e91.pdf
+771505abd38641454757de75fe751d41e87f89a4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8401561,,
+48186494fc7c0cc664edec16ce582b3fcb5249c0,https://arxiv.org/pdf/1506.03607.pdf,,,https://arxiv.org/pdf/1506.03607.pdf
+48499deeaa1e31ac22c901d115b8b9867f89f952,https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf,,,https://pdfs.semanticscholar.org/4849/9deeaa1e31ac22c901d115b8b9867f89f952.pdf
+48a402593ca4896ac34fbebf1e725ab1226ecdb7,,,http://doi.org/10.1016/j.patcog.2015.01.022,
+4850af6b54391fc33c8028a0b7fafe05855a96ff,https://arxiv.org/pdf/1605.00707.pdf,,,https://arxiv.org/pdf/1605.00707.pdf
+48e6c6d981efe2c2fb0ae9287376fcae59da9878,https://arxiv.org/pdf/1807.11010.pdf,,,https://arxiv.org/pdf/1807.11010.pdf
+48a5b6ee60475b18411a910c6084b3a32147b8cd,https://pdfs.semanticscholar.org/48a5/b6ee60475b18411a910c6084b3a32147b8cd.pdf,,,https://pdfs.semanticscholar.org/48a5/b6ee60475b18411a910c6084b3a32147b8cd.pdf
+48de3ca194c3830daa7495603712496fe908375c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619283,,
+480ccd25cb2a851745f5e6e95d33edb703efb49e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461792,,
+484bac2a9ff3a43a6f85d109bbc579a4346397f5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6011991,,
+4896909796f9bd2f70a2cb24bf18daacd6a12128,https://pdfs.semanticscholar.org/4896/909796f9bd2f70a2cb24bf18daacd6a12128.pdf,,,https://pdfs.semanticscholar.org/4896/909796f9bd2f70a2cb24bf18daacd6a12128.pdf
+70e14e216b12bed2211c4df66ef5f0bdeaffe774,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237666,,
+70109c670471db2e0ede3842cbb58ba6be804561,https://arxiv.org/pdf/1607.02104.pdf,,,https://arxiv.org/pdf/1607.02104.pdf
+708f4787bec9d7563f4bb8b33834de445147133b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237449,,
+70d2ab1af0edd5c0a30d576a5d4aa397c4f92d3e,,,http://doi.org/10.1007/s11042-018-5608-2,
+703dc33736939f88625227e38367cfb2a65319fe,https://arxiv.org/pdf/1703.09026.pdf,,,https://arxiv.org/pdf/1703.09026.pdf
+701f56f0eac9f88387de1f556acef78016b05d52,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf,,,https://pdfs.semanticscholar.org/701f/56f0eac9f88387de1f556acef78016b05d52.pdf
+706b9767a444de4fe153b2f3bff29df7674c3161,https://arxiv.org/pdf/1511.06442.pdf,,,https://arxiv.org/pdf/1511.06442.pdf
+70c58700eb89368e66a8f0d3fc54f32f69d423e1,https://pdfs.semanticscholar.org/70c5/8700eb89368e66a8f0d3fc54f32f69d423e1.pdf,,,https://pdfs.semanticscholar.org/70c5/8700eb89368e66a8f0d3fc54f32f69d423e1.pdf
+707a542c580bcbf3a5a75cce2df80d75990853cc,https://arxiv.org/pdf/1809.01936.pdf,,,https://arxiv.org/pdf/1809.01936.pdf
+704d88168bdfabe31b6ff484507f4a2244b8c52b,https://arxiv.org/pdf/1803.07445.pdf,,,https://arxiv.org/pdf/1803.07445.pdf
+70c9d11cad12dc1692a4507a97f50311f1689dbf,https://arxiv.org/pdf/1702.02463.pdf,,,https://arxiv.org/pdf/1702.02463.pdf
+1ea74780d529a458123a08250d8fa6ef1da47a25,https://pdfs.semanticscholar.org/1ea7/4780d529a458123a08250d8fa6ef1da47a25.pdf,,,https://pdfs.semanticscholar.org/1ea7/4780d529a458123a08250d8fa6ef1da47a25.pdf
+1e0d92b9b4011822825d1f7dc0eba6d83504d45d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4497872,,
+1efacaa0eaa7e16146c34cd20814d1411b35538e,https://arxiv.org/pdf/1805.06749.pdf,,,https://arxiv.org/pdf/1805.06749.pdf
+1e3068886b138304ec5a7296702879cc8788143d,,,http://doi.org/10.1007/s11263-013-0630-3,
+1ef1f33c48bc159881c5c8536cbbd533d31b0e9a,https://pdfs.semanticscholar.org/1ef1/f33c48bc159881c5c8536cbbd533d31b0e9a.pdf,,,https://pdfs.semanticscholar.org/1ef1/f33c48bc159881c5c8536cbbd533d31b0e9a.pdf
+1e21b925b65303ef0299af65e018ec1e1b9b8d60,https://arxiv.org/pdf/1611.02200.pdf,,,https://arxiv.org/pdf/1611.02200.pdf
+1ee3b4ba04e54bfbacba94d54bf8d05fd202931d,https://pdfs.semanticscholar.org/1ee3/b4ba04e54bfbacba94d54bf8d05fd202931d.pdf,,,https://pdfs.semanticscholar.org/1ee3/b4ba04e54bfbacba94d54bf8d05fd202931d.pdf
+1efaa128378f988965841eb3f49d1319a102dc36,https://arxiv.org/pdf/1808.04803.pdf,,,https://arxiv.org/pdf/1808.04803.pdf
+8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2,https://pdfs.semanticscholar.org/8451/bf3dd6bcd946be14b1a75af8bbb65a42d4b2.pdf,,,https://pdfs.semanticscholar.org/8451/bf3dd6bcd946be14b1a75af8bbb65a42d4b2.pdf
+841855205818d3a6d6f85ec17a22515f4f062882,https://arxiv.org/pdf/1805.11529.pdf,,,https://arxiv.org/pdf/1805.11529.pdf
+84c0f814951b80c3b2e39caf3925b56a9b2e1733,https://pdfs.semanticscholar.org/84c0/f814951b80c3b2e39caf3925b56a9b2e1733.pdf,,,https://pdfs.semanticscholar.org/84c0/f814951b80c3b2e39caf3925b56a9b2e1733.pdf
+84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1,https://pdfs.semanticscholar.org/db88/70aba4eca31ba56e993e4e94ae86eed6589a.pdf,,,https://pdfs.semanticscholar.org/db88/70aba4eca31ba56e993e4e94ae86eed6589a.pdf
+84508e846af3ac509f7e1d74b37709107ba48bde,https://pdfs.semanticscholar.org/8450/8e846af3ac509f7e1d74b37709107ba48bde.pdf,,,https://pdfs.semanticscholar.org/8450/8e846af3ac509f7e1d74b37709107ba48bde.pdf
+84c5b45328dee855c4855a104ac9c0558cc8a328,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411213,,
+84574aa43a98ad8a29470977e7b091f5a5ec2366,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7301321,,
+84a74ef8680b66e6dccbc69ae80321a52780a68e,,,http://doi.org/10.1007/978-0-85729-932-1_19,
+845f45f8412905137bf4e46a0d434f5856cd3aec,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8418618,,
+849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b,https://pdfs.semanticscholar.org/849f/891973ad2b6c6f70d7d43d9ac5805f1a1a5b.pdf,,,https://pdfs.semanticscholar.org/849f/891973ad2b6c6f70d7d43d9ac5805f1a1a5b.pdf
+4ab84f203b0e752be83f7f213d7495b04b1c4c79,https://arxiv.org/pdf/1711.00659.pdf,,,https://arxiv.org/pdf/1711.00659.pdf
+4a3758f283b7c484d3f164528d73bc8667eb1591,https://arxiv.org/pdf/1809.06647.pdf,,,https://arxiv.org/pdf/1809.06647.pdf
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0,https://arxiv.org/pdf/1809.06218.pdf,,,https://arxiv.org/pdf/1809.06218.pdf
+4a733a0862bd5f7be73fb4040c1375a6d17c9276,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618949,,
+4ac3cd8b6c50f7a26f27eefc64855134932b39be,https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf,,,https://pdfs.semanticscholar.org/4ac3/cd8b6c50f7a26f27eefc64855134932b39be.pdf
+4a8480d58c30dc484bda08969e754cd13a64faa1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406475,,
+24603ed946cb9385ec541c86d2e42db47361c102,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373865,,
+24115d209e0733e319e39badc5411bbfd82c5133,https://arxiv.org/pdf/1411.4389.pdf,,,https://arxiv.org/pdf/1411.4389.pdf
+24286ef164f0e12c3e9590ec7f636871ba253026,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369721,,
+24f022d807352abf071880877c38e53a98254dcd,https://arxiv.org/pdf/1809.05465.pdf,,,https://arxiv.org/pdf/1809.05465.pdf
+2480f8dccd9054372d696e1e521e057d9ac9de17,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8396968,,
+24869258fef8f47623b5ef43bd978a525f0af60e,https://pdfs.semanticscholar.org/c2b3/d8ac1f02e63809c74d2eacb37329ec139ce2.pdf,,,https://pdfs.semanticscholar.org/c2b3/d8ac1f02e63809c74d2eacb37329ec139ce2.pdf
+247a8040447b6577aa33648395d95d80441a0cf3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8362745,,
+24ff832171cb774087a614152c21f54589bf7523,https://arxiv.org/pdf/1508.03755.pdf,,,https://arxiv.org/pdf/1508.03755.pdf
+23edcd0d2011d9c0d421193af061f2eb3e155da3,,,http://doi.org/10.1007/s00371-015-1137-4,
+23ce6f404c504592767b8bec7d844d87b462de71,https://arxiv.org/pdf/1805.00324.pdf,,,https://arxiv.org/pdf/1805.00324.pdf
+23ee7b7a9ca5948e81555aaf3a044cfec778f148,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771385,,
+239e305c24155add73f2a0ba5ccbd66b37f77e14,,,,http://dl.acm.org/citation.cfm?id=1219097
+23e824d1dfc33f3780dd18076284f07bd99f1c43,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686,,
+2322ec2f3571e0ddc593c4e2237a6a794c61251d,https://pdfs.semanticscholar.org/2322/ec2f3571e0ddc593c4e2237a6a794c61251d.pdf,,,https://pdfs.semanticscholar.org/2322/ec2f3571e0ddc593c4e2237a6a794c61251d.pdf
+239958d6778643101ab631ec354ea1bc4d33e7e0,,,http://doi.org/10.1016/j.patcog.2017.06.009,
+23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f,https://arxiv.org/pdf/1704.06456.pdf,,,https://arxiv.org/pdf/1704.06456.pdf
+2303d07d839e8b20f33d6e2ec78d1353cac256cf,https://arxiv.org/pdf/1806.00631.pdf,,,https://arxiv.org/pdf/1806.00631.pdf
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf,,,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf
+234c106036964131c0f2daf76c47ced802652046,,,http://doi.org/10.1016/j.cviu.2015.07.007,
+23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e,https://pdfs.semanticscholar.org/21b0/fe87731197c94f9e282e995c8f75a9b721a5.pdf,,,https://pdfs.semanticscholar.org/21b0/fe87731197c94f9e282e995c8f75a9b721a5.pdf
+4f9e00aaf2736b79e415f5e7c8dfebda3043a97d,https://pdfs.semanticscholar.org/d713/d11d5c8f466ad56286f407991b2d88b606ff.pdf,,,https://pdfs.semanticscholar.org/d713/d11d5c8f466ad56286f407991b2d88b606ff.pdf
+4f37f71517420c93c6841beb33ca0926354fa11d,,,http://doi.org/10.1016/j.neucom.2017.08.062,
+4f051022de100241e5a4ba8a7514db9167eabf6e,https://arxiv.org/pdf/1708.03736.pdf,,,https://arxiv.org/pdf/1708.03736.pdf
+4f064c2a0ef0849eed61ab816ff0c2ff6d9d7308,,,,http://dl.acm.org/citation.cfm?id=2396318
+4f1249369127cc2e2894f6b2f1052d399794919a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8239663,,
+4f8345f31e38f65f1155569238d14bd8517606f4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6618941,,
+4f4f920eb43399d8d05b42808e45b56bdd36a929,https://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf,,,https://pdfs.semanticscholar.org/4f4f/920eb43399d8d05b42808e45b56bdd36a929.pdf
+4f0b8f730273e9f11b2bfad2415485414b96299f,https://arxiv.org/pdf/1805.04687.pdf,,,https://arxiv.org/pdf/1805.04687.pdf
+4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf,,,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf
+4f8b4784d0fca31840307650f7052b0dde736a76,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7017496,,
+8d0bc14589dea1f4f88914ffcb57a5c54830f2cc,,,http://doi.org/10.1007/978-3-319-16865-4,
+8dd9c97b85e883c16e5b1ec260f9cd610df52dec,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404159,,
+8de6deefb90fb9b3f7d451b9d8a1a3264b768482,https://pdfs.semanticscholar.org/8de6/deefb90fb9b3f7d451b9d8a1a3264b768482.pdf,,,https://pdfs.semanticscholar.org/8de6/deefb90fb9b3f7d451b9d8a1a3264b768482.pdf
+8da32ff9e3759dc236878ac240728b344555e4e9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014820,,
+8d384e8c45a429f5c5f6628e8ba0d73c60a51a89,https://arxiv.org/pdf/1708.00666.pdf,,,https://arxiv.org/pdf/1708.00666.pdf
+8dcc95debd07ebab1721c53fa50d846fef265022,https://arxiv.org/pdf/1711.07011.pdf,,,https://arxiv.org/pdf/1711.07011.pdf
+8dfe43c76b76a97f8938f5f5f81059a1f1fa74ed,,,http://doi.org/10.1038/s41598-017-18993-5,
+8d5998cd984e7cce307da7d46f155f9db99c6590,https://arxiv.org/pdf/1701.02664.pdf,,,https://arxiv.org/pdf/1701.02664.pdf
+8de5dc782178114d9424d33d9adabb2f29a1ab17,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7053946,,
+15136c2f94fd29fc1cb6bedc8c1831b7002930a6,https://arxiv.org/pdf/1802.09990.pdf,,,https://arxiv.org/pdf/1802.09990.pdf
+15d653972d176963ef0ad2cc582d3b35ca542673,https://arxiv.org/pdf/1612.05203.pdf,,,https://arxiv.org/pdf/1612.05203.pdf
+15aa6c457678e25f6bc0e818e5fc39e42dd8e533,https://arxiv.org/pdf/1806.07823.pdf,,,https://arxiv.org/pdf/1806.07823.pdf
+15cf1f17aeba62cd834116b770f173b0aa614bf4,https://pdfs.semanticscholar.org/15cf/1f17aeba62cd834116b770f173b0aa614bf4.pdf,,,https://pdfs.semanticscholar.org/15cf/1f17aeba62cd834116b770f173b0aa614bf4.pdf
+151b87de997e55db892b122c211f9c749f4293de,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237481,,
+121503705689f46546cade78ff62963574b4750b,https://arxiv.org/pdf/1602.08405.pdf,,,https://arxiv.org/pdf/1602.08405.pdf
+1275d6a800f8cf93c092603175fdad362b69c191,https://arxiv.org/pdf/1804.06655.pdf,,,https://arxiv.org/pdf/1804.06655.pdf
+127c7f87f289b1d32e729738475b337a6b042cf7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8436988,,
+1287bfe73e381cc8042ac0cc27868ae086e1ce3b,https://pdfs.semanticscholar.org/1287/bfe73e381cc8042ac0cc27868ae086e1ce3b.pdf,,,https://pdfs.semanticscholar.org/1287/bfe73e381cc8042ac0cc27868ae086e1ce3b.pdf
+1221e25763c3be95c1b6626ca9e7feaa3b636d9a,,,http://doi.org/10.1007/s11042-017-4353-2,
+12226bca7a891e25b7d1e1a34a089521bba75731,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373861,,
+12408baf69419409d228d96c6f88b6bcde303505,https://arxiv.org/pdf/1612.06950.pdf,,,https://arxiv.org/pdf/1612.06950.pdf
+12095f9b35ee88272dd5abc2d942a4f55804b31e,https://pdfs.semanticscholar.org/1209/5f9b35ee88272dd5abc2d942a4f55804b31e.pdf,,,https://pdfs.semanticscholar.org/1209/5f9b35ee88272dd5abc2d942a4f55804b31e.pdf
+1275852f2e78ed9afd189e8b845fdb5393413614,https://arxiv.org/pdf/1808.04068.pdf,,,https://arxiv.org/pdf/1808.04068.pdf
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,https://pdfs.semanticscholar.org/1297/ee7a41aa4e8499c7ddb3b1fed783eba19056.pdf,,,https://pdfs.semanticscholar.org/1297/ee7a41aa4e8499c7ddb3b1fed783eba19056.pdf
+120785f9b4952734818245cc305148676563a99b,https://pdfs.semanticscholar.org/1207/85f9b4952734818245cc305148676563a99b.pdf,,,https://pdfs.semanticscholar.org/1207/85f9b4952734818245cc305148676563a99b.pdf
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22,https://arxiv.org/pdf/1804.04314.pdf,,,https://arxiv.org/pdf/1804.04314.pdf
+12ebeb2176a5043ad57bc5f3218e48a96254e3e9,https://pdfs.semanticscholar.org/c5ae/ec7db8132685f408ca17a7a5c45c196b0323.pdf,,,https://pdfs.semanticscholar.org/c5ae/ec7db8132685f408ca17a7a5c45c196b0323.pdf
+8ccde9d80706a59e606f6e6d48d4260b60ccc736,https://arxiv.org/pdf/1805.06846.pdf,,,https://arxiv.org/pdf/1805.06846.pdf
+8ce9b7b52d05701d5ef4a573095db66ce60a7e1c,https://arxiv.org/pdf/1610.05211.pdf,,,https://arxiv.org/pdf/1610.05211.pdf
+8cb6daba2cb1e208e809633133adfee0183b8dd2,https://pdfs.semanticscholar.org/8cb6/daba2cb1e208e809633133adfee0183b8dd2.pdf,,,https://pdfs.semanticscholar.org/8cb6/daba2cb1e208e809633133adfee0183b8dd2.pdf
+8c4042191431e9eb43f00b0f14c23765ab9c6688,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532956,,
+8ccbbd9da0749d96f09164e28480d54935ee171c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5597578,,
+85fd2bda5eb3afe68a5a78c30297064aec1361f6,https://pdfs.semanticscholar.org/85fd/2bda5eb3afe68a5a78c30297064aec1361f6.pdf,,,https://pdfs.semanticscholar.org/85fd/2bda5eb3afe68a5a78c30297064aec1361f6.pdf
+856cc83a3121de89d4a6d9283afbcd5d7ef7aa2b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6417014,,
+85a136b48c2036b16f444f93b086e2bd8539a498,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7885525,,
+858b51a8a8aa082732e9c7fbbd1ea9df9c76b013,https://pdfs.semanticscholar.org/858b/51a8a8aa082732e9c7fbbd1ea9df9c76b013.pdf,,,https://pdfs.semanticscholar.org/858b/51a8a8aa082732e9c7fbbd1ea9df9c76b013.pdf
+85e78aa374d85f9a61da693e5010e40decd3f986,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6619100,,
+854b1f0581f5d3340f15eb79452363cbf38c04c8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648,,
+85ec86f8320ba2ed8b3da04d1c291ce88b8969c0,,,,http://dl.acm.org/citation.cfm?id=3264947
+856317f27248cdb20226eaae599e46de628fb696,https://arxiv.org/pdf/1805.12467.pdf,,,https://arxiv.org/pdf/1805.12467.pdf
+85ae6fa48e07857e17ac4bd48fb804785483e268,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7755833,,
+85c90ad5eebb637f048841ebfded05942bb786b7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977163,,
+8562b4f63e49847692b8cb31ef0bdec416b9a87a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8128909,,
+857c64060963dd8d28e4740f190d321298ddd503,,,http://doi.org/10.1007/s11042-015-3103-6,
+1d30f813798c55ae4fe454829be6e2948ee841da,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4270396,,
+1d51b256af68c5546d230f3e6f41da029e0f5852,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7590015,,
+1d776bfe627f1a051099997114ba04678c45f0f5,https://arxiv.org/pdf/1805.10604.pdf,,,https://arxiv.org/pdf/1805.10604.pdf
+1de23d7fe718d9fab0159f58f422099e44ad3f0a,,,http://doi.org/10.1007/s11063-016-9558-2,
+1d7df3df839a6aa8f5392310d46b2a89080a3c25,https://arxiv.org/pdf/1612.02295.pdf,,,https://arxiv.org/pdf/1612.02295.pdf
+1d729693a888a460ee855040f62bdde39ae273af,https://pdfs.semanticscholar.org/9da1/91858f65fd99c9b204a6f68916711d4bd51b.pdf,,,https://pdfs.semanticscholar.org/9da1/91858f65fd99c9b204a6f68916711d4bd51b.pdf
+1d4c25f9f8f08f5a756d6f472778ab54a7e6129d,https://pdfs.semanticscholar.org/1d4c/25f9f8f08f5a756d6f472778ab54a7e6129d.pdf,,,https://pdfs.semanticscholar.org/1d4c/25f9f8f08f5a756d6f472778ab54a7e6129d.pdf
+7142ac9e4d5498037aeb0f459f278fd28dae8048,https://pdfs.semanticscholar.org/a148/0722ce6c89468ef44548c39fb79012f91a64.pdf,,,https://pdfs.semanticscholar.org/a148/0722ce6c89468ef44548c39fb79012f91a64.pdf
+7117ed0be436c0291bc6fb6ea6db18de74e2464a,https://pdfs.semanticscholar.org/7117/ed0be436c0291bc6fb6ea6db18de74e2464a.pdf,,,https://pdfs.semanticscholar.org/7117/ed0be436c0291bc6fb6ea6db18de74e2464a.pdf
+71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774,,
+7195cb08ba2248f3214f5dc5d7881533dd1f46d9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5673820,,
+714d487571ca0d676bad75c8fa622d6f50df953b,https://arxiv.org/pdf/1511.06491.pdf,,,https://arxiv.org/pdf/1511.06491.pdf
+7143518f847b0ec57a0ff80e0304c89d7e924d9a,https://arxiv.org/pdf/1805.08373.pdf,,,https://arxiv.org/pdf/1805.08373.pdf
+710011644006c18291ad512456b7580095d628a2,https://arxiv.org/pdf/1612.05363.pdf,,,https://arxiv.org/pdf/1612.05363.pdf
+713db3874b77212492d75fb100a345949f3d3235,https://arxiv.org/pdf/1803.03345.pdf,,,https://arxiv.org/pdf/1803.03345.pdf
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89,https://arxiv.org/pdf/1610.03207.pdf,,,https://arxiv.org/pdf/1610.03207.pdf
+71c4b8e1bb25ee80f4317411ea8180dae6499524,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8463396,,
+765be0c44a67e41e0f8f0b5d8a3af0ff40a00c7d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373821,,
+76cd5e43df44e389483f23cb578a9015d1483d70,https://pdfs.semanticscholar.org/76cd/5e43df44e389483f23cb578a9015d1483d70.pdf,,,https://pdfs.semanticscholar.org/76cd/5e43df44e389483f23cb578a9015d1483d70.pdf
+76b11c281ac47fe6d95e124673a408ee9eb568e3,https://pdfs.semanticscholar.org/76b1/1c281ac47fe6d95e124673a408ee9eb568e3.pdf,,,https://pdfs.semanticscholar.org/76b1/1c281ac47fe6d95e124673a408ee9eb568e3.pdf
+768f6a14a7903099729872e0db231ea814eb05e9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411205,,
+764882e6779fbee29c3d87e00302befc52d2ea8d,https://arxiv.org/pdf/1711.07437.pdf,,,https://arxiv.org/pdf/1711.07437.pdf
+763b60feaabceebbe9eddfbaa0378b8b454327aa,,,,
+766728bac030b169fcbc2fbafe24c6e22a58ef3c,https://pdfs.semanticscholar.org/7667/28bac030b169fcbc2fbafe24c6e22a58ef3c.pdf,,,https://pdfs.semanticscholar.org/7667/28bac030b169fcbc2fbafe24c6e22a58ef3c.pdf
+7697295ee6fc817296bed816ac5cae97644c2d5b,https://arxiv.org/pdf/1704.07333.pdf,,,https://arxiv.org/pdf/1704.07333.pdf
+1c25a3c8ef3e2c4dbff337aa727d13f5eba40fb2,,,http://doi.org/10.1007/s00371-016-1290-4,
+1c0acf9c2f2c43be47b34acbd4e7338de360e555,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8461986,,
+1c9efb6c895917174ac6ccc3bae191152f90c625,https://arxiv.org/pdf/1806.03084.pdf,,,https://arxiv.org/pdf/1806.03084.pdf
+1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc,https://pdfs.semanticscholar.org/6a82/2f65c3a49525ffa0dc896ac24e9ad3dca62e.pdf,,,https://pdfs.semanticscholar.org/6a82/2f65c3a49525ffa0dc896ac24e9ad3dca62e.pdf
+1c41965c5e1f97b1504c1bdde8037b5e0417da5e,https://arxiv.org/pdf/1808.01106.pdf,,,https://arxiv.org/pdf/1808.01106.pdf
+1c6e22516ceb5c97c3caf07a9bd5df357988ceda,https://arxiv.org/pdf/1806.05476.pdf,,,https://arxiv.org/pdf/1806.05476.pdf
+825f56ff489cdd3bcc41e76426d0070754eab1a8,https://pdfs.semanticscholar.org/bc51/1519cf8d4e3e247d7506c38d80f64c6a859e.pdf,,,https://pdfs.semanticscholar.org/bc51/1519cf8d4e3e247d7506c38d80f64c6a859e.pdf
+824d1db06e1c25f7681e46199fd02cb5fc343784,https://pdfs.semanticscholar.org/824d/1db06e1c25f7681e46199fd02cb5fc343784.pdf,,,https://pdfs.semanticscholar.org/824d/1db06e1c25f7681e46199fd02cb5fc343784.pdf
+82eff71af91df2ca18aebb7f1153a7aed16ae7cc,https://pdfs.semanticscholar.org/82ef/f71af91df2ca18aebb7f1153a7aed16ae7cc.pdf,,,https://pdfs.semanticscholar.org/82ef/f71af91df2ca18aebb7f1153a7aed16ae7cc.pdf
+82a610a59c210ff77cfdde7fd10c98067bd142da,https://pdfs.semanticscholar.org/82a6/10a59c210ff77cfdde7fd10c98067bd142da.pdf,,,https://pdfs.semanticscholar.org/82a6/10a59c210ff77cfdde7fd10c98067bd142da.pdf
+829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a,https://pdfs.semanticscholar.org/bbf4/f0ce0838c8eec048e3a9b212053fd98dde5a.pdf,,,https://pdfs.semanticscholar.org/bbf4/f0ce0838c8eec048e3a9b212053fd98dde5a.pdf
+8202da548a128b28dd1f3aa9f86a0523ec2ecb26,,,http://doi.org/10.1016/j.ijar.2012.01.003,
+82a0a5d0785fb2c2282ed901a15c3ff02f8567df,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6849828,,
+82417d8ec8ac6406f2d55774a35af2a1b3f4b66e,https://pdfs.semanticscholar.org/8241/7d8ec8ac6406f2d55774a35af2a1b3f4b66e.pdf,,,https://pdfs.semanticscholar.org/8241/7d8ec8ac6406f2d55774a35af2a1b3f4b66e.pdf
+82e3f4099503633c042a425e9217bfe47cfe9d4b,,,http://doi.org/10.1007/s11042-015-2819-7,
+82eb267b8e86be0b444e841b4b4ed4814b6f1942,https://arxiv.org/pdf/1604.08685.pdf,,,https://arxiv.org/pdf/1604.08685.pdf
+49358915ae259271238c7690694e6a887b16f7ed,,,http://doi.org/10.1007/BF02884429,
+4972aadcce369a8c0029e6dc2f288dfd0241e144,https://arxiv.org/pdf/1809.00852.pdf,,,https://arxiv.org/pdf/1809.00852.pdf
+4983076c1a8b80ff5cd68b924b11df58a68b6c84,,,http://doi.org/10.1007/s11704-017-6114-9,
+49e975a4c60d99bcc42c921d73f8d89ec7130916,https://pdfs.semanticscholar.org/49e9/75a4c60d99bcc42c921d73f8d89ec7130916.pdf,,,https://pdfs.semanticscholar.org/49e9/75a4c60d99bcc42c921d73f8d89ec7130916.pdf
+49068538b7eef66b4254cc11914128097302fab8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5339040,,
+49be50efc87c5df7a42905e58b092729ea04c2f5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7177489,,
+49df381ea2a1e7f4059346311f1f9f45dd997164,https://arxiv.org/pdf/1807.00848.pdf,,,https://arxiv.org/pdf/1807.00848.pdf
+493c8591d6a1bef5d7b84164a73761cefb9f5a25,,,,http://dl.acm.org/citation.cfm?id=3159691
+403a108dec92363fd1f465340bd54dbfe65af870,https://arxiv.org/pdf/1510.00542.pdf,,,https://arxiv.org/pdf/1510.00542.pdf
+40c9dce0a4c18829c4100bff5845eb7799b54ca1,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5346008,,
+40dd2b9aace337467c6e1e269d0cb813442313d7,https://pdfs.semanticscholar.org/40dd/2b9aace337467c6e1e269d0cb813442313d7.pdf,,,https://pdfs.semanticscholar.org/40dd/2b9aace337467c6e1e269d0cb813442313d7.pdf
+407de9da58871cae7a6ded2f3a6162b9dc371f38,https://arxiv.org/pdf/1808.00297.pdf,,,https://arxiv.org/pdf/1808.00297.pdf
+40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b,https://arxiv.org/pdf/1804.05197.pdf,,,https://arxiv.org/pdf/1804.05197.pdf
+405d9a71350c9a13adea41f9d7f7f9274793824f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373834,,
+40c1de7b1b0a087c590537df55ecd089c86e8bfc,,,http://doi.org/10.1162/NECO_a_00401,
+40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd,https://pdfs.semanticscholar.org/8ef1/0da52c6b2c3856f56aa9d68acab4c1649ed8.pdf,,,https://pdfs.semanticscholar.org/8ef1/0da52c6b2c3856f56aa9d68acab4c1649ed8.pdf
+40e1743332523b2ab5614bae5e10f7a7799161f4,https://arxiv.org/pdf/1711.06753.pdf,,,https://arxiv.org/pdf/1711.06753.pdf
+4007bf090887d8a0e907ab5e17ecfcdbbdafc2e4,,,http://doi.org/10.1007/s13735-017-0144-9,
+40273657e6919455373455bd9a5355bb46a7d614,https://arxiv.org/pdf/1805.09380.pdf,,,https://arxiv.org/pdf/1805.09380.pdf
+407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0,,,http://doi.org/10.1007/s11042-017-5028-8,
+2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87,https://arxiv.org/pdf/1805.11333.pdf,,,https://arxiv.org/pdf/1805.11333.pdf
+2e7e1ee7e3ee1445939480efd615e8828b9838f8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5643167,,
+2e5cfa97f3ecc10ae8f54c1862433285281e6a7c,https://pdfs.semanticscholar.org/2e5c/fa97f3ecc10ae8f54c1862433285281e6a7c.pdf,,,https://pdfs.semanticscholar.org/2e5c/fa97f3ecc10ae8f54c1862433285281e6a7c.pdf
+2e3b981b9f3751fc5873f77ad2aa7789c3e1d1d2,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8397046,,
+2e0d56794379c436b2d1be63e71a215dd67eb2ca,https://arxiv.org/pdf/1709.03872.pdf,,,https://arxiv.org/pdf/1709.03872.pdf
+2e231f1e7e641dd3619bec59e14d02e91360ac01,https://arxiv.org/pdf/1807.10421.pdf,,,https://arxiv.org/pdf/1807.10421.pdf
+2ed4973984b254be5cba3129371506275fe8a8eb,https://pdfs.semanticscholar.org/2ed4/973984b254be5cba3129371506275fe8a8eb.pdf,,,https://pdfs.semanticscholar.org/2ed4/973984b254be5cba3129371506275fe8a8eb.pdf
+2e9c780ee8145f29bd1a000585dd99b14d1f5894,https://arxiv.org/pdf/1807.08108.pdf,,,https://arxiv.org/pdf/1807.08108.pdf
+2e832d5657bf9e5678fd45b118fc74db07dac9da,https://pdfs.semanticscholar.org/2e83/2d5657bf9e5678fd45b118fc74db07dac9da.pdf,,,https://pdfs.semanticscholar.org/2e83/2d5657bf9e5678fd45b118fc74db07dac9da.pdf
+2bb53e66aa9417b6560e588b6235e7b8ebbc294c,https://arxiv.org/pdf/1502.01540.pdf,,,https://arxiv.org/pdf/1502.01540.pdf
+2bb36c875754a2a8919f2f9b00a336c00006e453,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8373869,,
+2bf646a6efd15ab830344ae9d43e10cc89e29f34,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8387808,,
+2bcd9b2b78eb353ea57cf50387083900eae5384a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995329,,
+2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,https://arxiv.org/pdf/1803.00130.pdf,,,https://arxiv.org/pdf/1803.00130.pdf
+477236563c6a6c6db922045453b74d3f9535bfa1,https://pdfs.semanticscholar.org/3c3a/e3a2e7c3ee00f33a87a82d5783e84c3a1de2.pdf,,,https://pdfs.semanticscholar.org/3c3a/e3a2e7c3ee00f33a87a82d5783e84c3a1de2.pdf
+47190d213caef85e8b9dd0d271dbadc29ed0a953,https://arxiv.org/pdf/1807.11649.pdf,,,https://arxiv.org/pdf/1807.11649.pdf
+4735fa28fa2a2af98f7b266efd300a00e60dddf7,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6460647,,
+47e14fdc6685f0b3800f709c32e005068dfc8d47,https://arxiv.org/pdf/1805.00577.pdf,,,https://arxiv.org/pdf/1805.00577.pdf
+7831ab4f8c622d91974579c1ff749dadc170c73c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6712699,,
+78f2c8671d1a79c08c80ac857e89315197418472,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237443,,
+782188821963304fb78791e01665590f0cd869e8,https://arxiv.org/pdf/1708.01311.pdf,,,https://arxiv.org/pdf/1708.01311.pdf
+784a83437b3dba49c0d7ccc10ac40497b84661a5,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8100224,,
+78cec49ca0acd3b961021bc27d5cf78cbbbafc7e,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995556,,
+78c1ad33772237bf138084220d1ffab800e1200d,https://arxiv.org/pdf/1804.08450.pdf,,,https://arxiv.org/pdf/1804.08450.pdf
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,https://arxiv.org/pdf/1708.00370.pdf,,,https://arxiv.org/pdf/1708.00370.pdf
+782a05fbe30269ff8ab427109f5c4d0a577e5284,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8038860,,
+78174c2be084e67f48f3e8ea5cb6c9968615a42c,https://arxiv.org/pdf/1809.06157.pdf,,,https://arxiv.org/pdf/1809.06157.pdf
+8b2c090d9007e147b8c660f9282f357336358061,https://pdfs.semanticscholar.org/8b2c/090d9007e147b8c660f9282f357336358061.pdf,,,https://pdfs.semanticscholar.org/8b2c/090d9007e147b8c660f9282f357336358061.pdf
+8bed7ff2f75d956652320270eaf331e1f73efb35,https://arxiv.org/pdf/1709.03820.pdf,,,https://arxiv.org/pdf/1709.03820.pdf
+8befcd91c24038e5c26df0238d26e2311b21719a,https://arxiv.org/pdf/1808.02559.pdf,,,https://arxiv.org/pdf/1808.02559.pdf
+8bdf6f03bde08c424c214188b35be8b2dec7cdea,https://arxiv.org/pdf/1805.04049.pdf,,,https://arxiv.org/pdf/1805.04049.pdf
+8b744786137cf6be766778344d9f13abf4ec0683,https://pdfs.semanticscholar.org/8b74/4786137cf6be766778344d9f13abf4ec0683.pdf,,,https://pdfs.semanticscholar.org/8b74/4786137cf6be766778344d9f13abf4ec0683.pdf
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8,https://pdfs.semanticscholar.org/8b61/fdc47b5eeae6bc0a52523f519eaeaadbc8c8.pdf,,,https://pdfs.semanticscholar.org/8b61/fdc47b5eeae6bc0a52523f519eaeaadbc8c8.pdf
+8bebb26880274bdb840ebcca530caf26c393bf45,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8369529,,
+8bbd40558a99e33fac18f6736b8fe99f4a97d9b1,,,http://doi.org/10.1007/s11263-016-0986-2,
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259,https://pdfs.semanticscholar.org/8b38/124ff02a9cf8ad00de5521a7f8a9fa4d7259.pdf,,,https://pdfs.semanticscholar.org/8b38/124ff02a9cf8ad00de5521a7f8a9fa4d7259.pdf
+134f1cee8408cca648d8b4ca44b38b0a7023af71,https://pdfs.semanticscholar.org/134f/1cee8408cca648d8b4ca44b38b0a7023af71.pdf,,,https://pdfs.semanticscholar.org/134f/1cee8408cca648d8b4ca44b38b0a7023af71.pdf
+13d430257d595231bda216ef859950caa736ad1d,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8394947,,
+13604bbdb6f04a71dea4bd093794e46730b0a488,https://arxiv.org/pdf/1712.09482.pdf,,,https://arxiv.org/pdf/1712.09482.pdf
+13179bb3f2867ea44647b6fe0c8fb4109207e9f5,,,http://doi.org/10.1007/s00779-018-1171-0,
+13aef395f426ca8bd93640c9c3f848398b189874,https://pdfs.semanticscholar.org/13ae/f395f426ca8bd93640c9c3f848398b189874.pdf,,,https://pdfs.semanticscholar.org/13ae/f395f426ca8bd93640c9c3f848398b189874.pdf
+1316296fae6485c1510f00b1b57fb171b9320ac2,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,,,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf
+7f511a6a2b38a26f077a5aec4baf5dffc981d881,https://arxiv.org/pdf/1805.02877.pdf,,,https://arxiv.org/pdf/1805.02877.pdf
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80,https://arxiv.org/pdf/1809.05474.pdf,,,https://arxiv.org/pdf/1809.05474.pdf
+7fcecaef60a681c47f0476e54e08712ee05d6154,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7299097,,
+7fb5006b6522436ece5bedf509e79bdb7b79c9a7,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf,,,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf
+7f4bc8883c3b9872408cc391bcd294017848d0cf,https://pdfs.semanticscholar.org/7f4b/c8883c3b9872408cc391bcd294017848d0cf.pdf,,,https://pdfs.semanticscholar.org/7f4b/c8883c3b9872408cc391bcd294017848d0cf.pdf
+7f445191fa0475ff0113577d95502a96dc702ef9,https://arxiv.org/pdf/1805.04026.pdf,,,https://arxiv.org/pdf/1805.04026.pdf
+7f203f2ff6721e73738720589ea83adddb7fdd27,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1301513,,
+7fab17ef7e25626643f1d55257a3e13348e435bd,https://arxiv.org/pdf/1702.08423.pdf,,,https://arxiv.org/pdf/1702.08423.pdf
+7fb7ccc1aa093ca526f2d8b6f2c404d2c886f69a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404767,,
+7f44e5929b11ce2192c3ae81fbe602081a7ab5c4,,,http://doi.org/10.1007/s11554-016-0645-4,
+7fe2ab9f54242ef8609ef9bf988f008c7d42407c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8382330,,
+7f904093e6933cab876e87532111db94c71a304f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6117544,,
+7f26c615dd187ca5e4b15759d5cb23ab3ea9d9a9,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7781761,,
+7f2a234ad5c256733a837dbf98f25ed5aad214e8,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7207289,,
+7f5b379b12505d60f9303aab1fea48515d36d098,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411873,,
+7f68a5429f150f9eb7550308bb47a363f2989cb3,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6977004,,
+7a81967598c2c0b3b3771c1af943efb1defd4482,https://arxiv.org/pdf/1503.01508.pdf,,,https://arxiv.org/pdf/1503.01508.pdf
+7a3d46f32f680144fd2ba261681b43b86b702b85,https://arxiv.org/pdf/1805.01282.pdf,,,https://arxiv.org/pdf/1805.01282.pdf
+7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b,https://arxiv.org/pdf/1709.08129.pdf,,,https://arxiv.org/pdf/1709.08129.pdf
+7acbf0b060e948589b38d5501ca217463cfd5c2f,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6940304,,
+7ac4fc169fffa8e962b9df94f61e2adf6bac8f97,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8453893,,
+7ac9aaafe4d74542832c273acf9d631cb8ea6193,https://arxiv.org/pdf/1809.04185.pdf,,,https://arxiv.org/pdf/1809.04185.pdf
+7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697,https://arxiv.org/pdf/1807.07320.pdf,,,https://arxiv.org/pdf/1807.07320.pdf
+7aa062c6c90dba866273f5edd413075b90077b51,https://pdfs.semanticscholar.org/7aa0/62c6c90dba866273f5edd413075b90077b51.pdf,,,https://pdfs.semanticscholar.org/7aa0/62c6c90dba866273f5edd413075b90077b51.pdf
+7a131fafa7058fb75fdca32d0529bc7cb50429bd,https://arxiv.org/pdf/1704.04086.pdf,,,https://arxiv.org/pdf/1704.04086.pdf
+141cb9ee401f223220d3468592effa90f0c255fa,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7815403,,
+1442319de86d171ce9595b20866ec865003e66fc,https://pdfs.semanticscholar.org/1442/319de86d171ce9595b20866ec865003e66fc.pdf,,,https://pdfs.semanticscholar.org/1442/319de86d171ce9595b20866ec865003e66fc.pdf
+14aed1b7c08c941b1d2ba6c1c2ffb6255c306c74,,,http://doi.org/10.1007/s00138-016-0820-4,
+14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b,https://arxiv.org/pdf/1806.11008.pdf,,,https://arxiv.org/pdf/1806.11008.pdf
+14ee4948be56caeb30aa3b94968ce663e7496ce4,https://pdfs.semanticscholar.org/14ee/4948be56caeb30aa3b94968ce663e7496ce4.pdf,,,https://pdfs.semanticscholar.org/14ee/4948be56caeb30aa3b94968ce663e7496ce4.pdf
+8e63868e552e433dc536ba732f4c2af095602869,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1699730,,
+8eb40d0a0a1339469a05711f532839e8ffd8126c,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7890464,,
+8e3d0b401dec8818cd0245c540c6bc032f169a1d,https://arxiv.org/pdf/1702.08398.pdf,,,https://arxiv.org/pdf/1702.08398.pdf
+8e3c97e420e0112c043929087d6456d8ab61e95c,https://pdfs.semanticscholar.org/0e44/90f7616634e06a0b89eedbe37433d7f5392d.pdf,,,https://pdfs.semanticscholar.org/0e44/90f7616634e06a0b89eedbe37433d7f5392d.pdf
+8e0ab1b08964393e4f9f42ca037220fe98aad7ac,https://arxiv.org/pdf/1712.04695.pdf,,,https://arxiv.org/pdf/1712.04695.pdf
+8e452379fda31744d4a4383fcb8a9eab6dbc4ae4,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4586390,,
+8ed32c8fad924736ebc6d99c5c319312ba1fa80b,https://pdfs.semanticscholar.org/8ed3/2c8fad924736ebc6d99c5c319312ba1fa80b.pdf,,,https://pdfs.semanticscholar.org/8ed3/2c8fad924736ebc6d99c5c319312ba1fa80b.pdf
+8e24db957be2b643db464cc566bfabc650f1ffac,,,,
+8e36100cb144685c26e46ad034c524b830b8b2f2,https://pdfs.semanticscholar.org/8e36/100cb144685c26e46ad034c524b830b8b2f2.pdf,,,https://pdfs.semanticscholar.org/8e36/100cb144685c26e46ad034c524b830b8b2f2.pdf
+8ed33184fccde677ec8413ae06f28ea9f2ca70f3,https://arxiv.org/pdf/1712.00796.pdf,,,https://arxiv.org/pdf/1712.00796.pdf
+8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b,https://arxiv.org/pdf/1711.10520.pdf,,,https://arxiv.org/pdf/1711.10520.pdf
+2227f978f084ebb18cb594c0cfaf124b0df6bf95,https://pdfs.semanticscholar.org/2227/f978f084ebb18cb594c0cfaf124b0df6bf95.pdf,,,https://pdfs.semanticscholar.org/2227/f978f084ebb18cb594c0cfaf124b0df6bf95.pdf
+22648dcd3100432fe0cc71e09de5ee855c61f12b,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8393188,,
+228ea13041910c41b50d0052bdce924037c3bc6a,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8434495,,
+2201f187a7483982c2e8e2585ad9907c5e66671d,https://pdfs.semanticscholar.org/1cad/9aa5095733b56e998ad0cd396e89c2bc9928.pdf,,,https://pdfs.semanticscholar.org/1cad/9aa5095733b56e998ad0cd396e89c2bc9928.pdf
+227b1a09b942eaf130d1d84cdcabf98921780a22,https://pdfs.semanticscholar.org/227b/1a09b942eaf130d1d84cdcabf98921780a22.pdf,,,https://pdfs.semanticscholar.org/227b/1a09b942eaf130d1d84cdcabf98921780a22.pdf
+22e121a8dea49e3042de305574356477ecacadda,,,http://doi.org/10.1007/s00138-018-0935-x,
+25ff865460c2b5481fa4161749d5da8501010aa0,https://arxiv.org/pdf/1702.07971.pdf,,,https://arxiv.org/pdf/1702.07971.pdf
+25960f0a2ed38a89fa8076a448ca538de2f1e183,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411220,,
+2563fc1797f187e2f6f9d9f4387d4bcadd3fbd02,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410635,,
+2588acc7a730d864f84d4e1a050070ff873b03d5,https://pdfs.semanticscholar.org/2588/acc7a730d864f84d4e1a050070ff873b03d5.pdf,,,https://pdfs.semanticscholar.org/2588/acc7a730d864f84d4e1a050070ff873b03d5.pdf
+25982e2bef817ebde7be5bb80b22a9864b979fb0,https://arxiv.org/pdf/1709.05731.pdf,,,https://arxiv.org/pdf/1709.05731.pdf
+2564920d6976be68bb22e299b0b8098090bbf259,,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8407761,,
diff --git a/scraper/reports/pdf_institutions_deduped.csv b/scraper/reports/pdf_institutions_deduped.csv
new file mode 100644
index 00000000..6a5e23e0
--- /dev/null
+++ b/scraper/reports/pdf_institutions_deduped.csv
@@ -0,0 +1,1676 @@
+Canonical Name,Name,Address,Lat,Lng
+Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.0,10.0
+Aalborg University,"Aalborg University, Denmark","AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.0,10.0
+Aalto University,AALTO UNIVERSITY,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.2,24.8
+Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.2,24.8
+Aalto University,"Aalto University, Finland","Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.2,24.8
+Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.4,-4.1
+Aberystwyth University,"Aberystwyth University, UK","Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.4,-4.1
+AGH University of Science and Technology,AGH University of Science and Technology,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.1,19.9
+AGH University of Science and Technology,"AGH University of Science and Technology, Kraków, Poland","AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.1,19.9
+Ahmedabad University,Ahmedabad University,"School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.0,72.6
+Ahmedabad University,"Ahmedabad University, Gujarat, India 380009","School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.0,72.6
+Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.3,127.0
+Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.8,140.0
+Akita Prefectural University,"Akita Prefectural University, Yurihonjo, Japan","秋田県立大学, 日本海東北自動車道(無料区間), 八幡前, 由利本荘市, 秋田県, 東北地方, 〒015-0836, 日本",39.4,140.1
+Akita University,Akita University,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本",39.7,140.1
+Akita University,"Akita University, Akita, Japan","秋田大学鉱業博物館, 2, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-8502, 日本",39.7,140.1
+Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.2,29.9
+Alexandria University,"Alexandria University, Alexandria, Egypt","جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.2,29.9
+"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.3,120.0
+"Amazon, Inc.","Amazon, Berkshire, U.K.","Amazon Logistics, Exeter Road, Theale, West Berkshire, South East, England, RG7 4PL, UK",51.4,-1.1
+American University,American University,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.9,-77.1
+American University,"American University, Washington, DC, USA","American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.9,-77.1
+American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.0,31.2
+American University in Cairo,"The American University in Cairo, Egypt","الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.0,31.2
+Amherst College,Amherst College,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA",42.4,-72.5
+Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.7,51.4
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.7,51.4
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran, Iran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.7,51.4
+Amirkabir University of Technology,"Amirkabir University of Technology, Tehran. Iran","دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.7,51.4
+Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.9,81.0
+Amity University,"Amity University, Lucknow, India","Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.9,81.0
+Amity University Uttar Pradesh,Amity University Uttar Pradesh,"Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India",28.5,77.3
+Amity University Uttar Pradesh,"Amity University Uttar Pradesh, Noida","Amity University, Noida, Greater Noida Expressway, Noida Special Economic Zone, Bakhtawarpur, Ghaziabad, Uttar Pradesh, 201304, India",28.5,77.3
+Anhui Polytechnic University,Anhui Polytechnic University,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.3,118.4
+Anhui Polytechnic University,"Anhui Polytechnic University, Wuhu, China","安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.3,118.4
+Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.8,117.2
+Anhui University,"Anhui University, Hefei, China","安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.8,117.2
+Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0,80.2
+Anna University,Anna University Chennai,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0,80.2
+Anna University,"Anna University Chennai, India","Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0,80.2
+Anna University,"Anna University, Chennai","Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.0,80.2
+Aristotle University of Thessaloniki,ARISTOTLE UNIVERSITY OF THESSALONIKI,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,Aristotle University of Thessaloniki GR,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, GR-54124 Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Thessaloniki, 54124, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Aristotle University of Thessaloniki,"Aristotle University of Thessaloniki, Thessaloniki, Greece","Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.3,-111.7
+Arizona State University,"Arizona State University, AZ, USA","Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.3,-111.7
+Arizona State University,"Arizona State University, Tempe AZ","Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA",33.4,-111.9
+Arizona State University,"Arizona State University, Tempe, AZ, USA","Arizona State University, Palm Walk, Tempe, Maricopa County, Arizona, 85287, USA",33.4,-111.9
+Asia Pacific University of Technology and Innovation,Asia Pacific University of Technology and Innovation,"Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia",3.1,101.7
+Asia Pacific University of Technology and Innovation,"Asia Pacific University of Technology and Innovation, Kuala Lumpur 57000, Malaysia","Asia Pacific University of Technology and Innovation (APU), Astro North Entrance, Astro, Sungai Besi, KL, 57000, Malaysia",3.1,101.7
+Assiut University,Assiut University,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.2,31.2
+Assiut University,"Assiut University, Asyut, Egypt","Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.2,31.2
+Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.5,-1.9
+Aston University,"Aston University, Birmingham, U.K.","Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.5,-1.9
+Australian Institute of Sport,Australian Institute of Sport,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.2,149.1
+Australian National University,The Australian National University,"Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+Australian National University,The Australian National University Canberra ACT 2601,"Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+Australian National University,"The Australian National University Canberra ACT 2601, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+Australian National University,"The Australian National University, Canberra, ACT, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+Australian National University,"The Australian National University, Canberra, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+"Australian National University, Canberra","Australian National University, Canberra","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+"Australian National University, Canberra","Australian National University, Canberra, ACT 0200, Australia","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.3,149.1
+"Australian National University, Melbourne","Australian National University, Melbourne","Australian National University, 52, Collins Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+Azad University,Azad University,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.3,50.0
+Azad University,"Azad University, Qazvin, Iran","پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.3,50.0
+Azad University,"Central Tehran Branch, Azad University","دانشگاه آزاد شعبه مرکزی تربیت بدنی, بلوار ایران زمین, شهرک غرب, منطقه ۲ شهر تهران, تهران, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 14658, ‏ایران‎",35.8,51.4
+Bahcesehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.0,29.0
+Bahcesehir University,"Bahcesehir University, Istanbul, Turkey","BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.0,29.0
+Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.3,83.0
+Bangalore Institute of Technology,Bangalore Institute of Technology,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India",13.0,77.6
+"Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India","Bapuji Institute of Engineering and Technology Davanagere, Karnataka, India","Bapuji Institute of Engineering and Technology, 2nd Cross Road, K.T. Jambanna Nagara, Davanagere, Davanagere taluku, Davanagere district, Karnataka, 577000, India",14.4,75.9
+Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.1,34.8
+Bar-Ilan University,Bar Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.1,34.8
+Bar-Ilan University,"Bar Ilan University, Israel","אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.1,34.8
+Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.1,-88.2
+Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",40.0,116.3
+Beihang University,"Beihang University, Beijing 100191, China","北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",40.0,116.3
+Beihang University,"Beihang University, Beijing, China","北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",40.0,116.3
+Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",40.0,116.3
+Beijing Institute of Technology,"Beijing Institute of Technology University, P. R. China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",40.0,116.3
+Beijing Institute of Technology,"Beijing Institute of Technology, Beijing 100081 CHINA","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",40.0,116.3
+Beijing Institute of Technology,"Beijing Institute of Technology, Beijing, China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",40.0,116.3
+Beijing Institute of Technology,"Beijing Institute of Technology, China","北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",40.0,116.3
+Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.9,116.3
+Beijing Jiaotong University,"Beijing Jiaotong University, Beijing, 100044, China","北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.9,116.3
+Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",40.0,116.4
+Beijing Normal University,"Beijing Normal University, China","北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",40.0,116.4
+Beijing Union University,Beijing Union University,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",40.0,116.4
+Beijing Union University,"Beijing Union University, 100101, China","北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",40.0,116.4
+Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",40.0,116.4
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",40.0,116.4
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing, China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",40.0,116.4
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, Beijing, P.R. China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",40.0,116.4
+Beijing University of Posts and Telecommunications,"Beijing University of Posts and Telecommunications, China","北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",40.0,116.4
+Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.9,116.5
+Beijing University of Technology,"Beijing University of Technology, Beijing 100022, China","北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.9,116.5
+"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.9,116.4
+"Beijing, Haidian, China","Beijing, Haidian, China","北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",40.0,116.4
+Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.1,31.2
+Benha University,"Benha University, Egypt","كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.1,31.2
+Bharathidasan University,Bharathidasan University,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India",10.8,78.7
+Bharathidasan University,"Bharathidasan University, Trichy, India","Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India",10.8,78.7
+Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.0,8.5
+Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.9,32.8
+Bilkent University,"Bilkent University, 06800 Cankaya, Turkey","Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.9,32.8
+Bilkent University,of bilkent university,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.9,32.8
+Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.1,-75.9
+Binghamton University,"Binghamton University, Binghamton, NY","Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.1,-75.9
+Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.1,29.0
+Bogazici University,"Bogazici University, Bebek","Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.1,29.0
+Bogazici University,"Bogazici University, Turkey","Boğaziçi Üniversitesi Güney Yerleşkesi, Sehitlikdergahı Sokağı, Beşiktaş, İstanbul, Marmara Bölgesi, 33345, Türkiye",41.1,29.1
+Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.3,-71.2
+Boston College,"Boston College, USA","Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.3,-71.2
+Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.4,-71.1
+Boston University,"Boston University, Boston, MA","BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.4,-71.1
+Boston University,"Boston University, USA","BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.4,-71.1
+Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.7,-1.9
+Bournemouth University,"Bournemouth University, UK","Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.7,-1.9
+Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8,-71.4
+Brown University,"Brown University, Providence Rhode Island, 02912, USA","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8,-71.4
+Brown University,"Brown University, Providence, RI","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8,-71.4
+Brown University,"Brown University, United States","Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.8,-71.4
+Brunel University,Brunel University,"Brunel University London, The Strip, Hillingdon, London, Greater London, England, UB8 3PH, UK",51.5,-0.5
+California Institute of Technology,CALIFORNIA INSTITUTE OF TECHNOLOGY,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+California Institute of Technology,"California Institute of Technology, Pasadena, CA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+California Institute of Technology,"California Institute of Technology, Pasadena, CA, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+California Institute of Technology,"California Institute of Technology, Pasadena, California, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+California Institute of Technology,"California Institute of Technology, USA","California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.1,-118.1
+Cambridge Research Laboratory,Cambridge Research Laboratory,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK",52.2,0.1
+Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.8,-1.1
+Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.9,116.3
+Capital Normal University,"Capital Normal University, 100048, China","首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.9,116.3
+Cardiff University,Cardi University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.5,-3.2
+Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.5,-3.2
+Cardiff University,"Cardiff University, UK","Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.5,-3.2
+Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.4,-75.7
+Carnegie Mellon University,CARNEGIE MELLON UNIVERSITY,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4,-122.1
+Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4,-122.1
+Carnegie Mellon University,"Carnegie Mellon University Pittsburgh, PA - 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University Pittsburgh, PA, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh PA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA, 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, PA, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, Pittsburgh, USA","Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Carnegie Mellon University,"Carnegie Mellon University, USA","Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.4,-122.1
+Carnegie Mellon University,The Robotics Institute,,,
+Carnegie Mellon University ,Carnegie Mellon University Pittsburgh,"Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-79.9
+Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.0,-120.5
+Chang Gung University,Chang Gung University,"長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣",25.0,121.4
+Chang Gung University,"Chang Gung University, Taoyuan, Taiwan","長庚科技大學林口校區, 261, 文化一路, A7合宜住宅, 樂善里, 木尾, 龜山區, 桃園市, 33301, 臺灣",25.0,121.4
+Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.1,147.4
+China University of Mining and Technology,China University of Mining and Technology,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.2,117.1
+China University of Mining and Technology,"China University of Mining and Technology, Xuzhou, China","China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.2,117.1
+Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0,116.4
+Chinese Academy of Sciences,"Chinese Academy of Sciences, Beijing","中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0,116.4
+Chinese Academy of Sciences,"Chinese Academy of Sciences, China","中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.0,116.4
+Chinese Academy of Sciences,Institute of Computing Technology,"神戸情報大学院大学, フラワーロード, 中央区, 神戸市, 兵庫県, 近畿地方, 650-0001, 日本",34.7,135.2
+Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chinese University of Hong Kong,the Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chinese University of Hong Kong,"The Chinese University of Hong Kong, China","中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chinese University of Hong Kong,"The Chinese University of Hong Kong, Hong Kong","中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chinese University of Hong Kong,"The Chinese University of Hong Kong, Hong Kong, China","香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chinese University of Hong Kong,"The Chinese University of Hong Kong, New Territories, Hong Kong","香港中文大學 Chinese University of Hong Kong, 車站路 Station Road, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+Chittagong University of Engineering and Technology,Chittagong University of Engineering and Technology,"Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.5,92.0
+Chittagong University of Engineering and Technology,"Chittagong University of Engineering and Technology, Chittagong, 4349, Bangladesh","Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.5,92.0
+Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.8,127.1
+Chonbuk National University,"Chonbuk National University, Jeonju-si","전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.8,127.1
+Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5,106.6
+Chongqing University,"Chongqing University, China","重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5,106.6
+Chongqing University,"Chongqing University, Chongqing, China","重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.5,106.6
+Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.5,106.6
+Chongqing University of Posts and Telecommunications,"Chongqing University of Posts and Telecommunications, Chongqing, China","重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.5,106.6
+Chosun University,Chosun University,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국",35.1,126.9
+"Chu Hai College of Higher Education, Hong Kong","Chu Hai College of Higher Education, Hong Kong","珠海學院 Chu Hai College of Higher Education, 80, 青盈路 Tsing Ying Road, 嘉和里 Ka Wo Lei, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国",22.4,114.0
+"Chu Hai College of Higher Education, Hong Kong","Chu Hai College of Higher Education, Tsuen Wan, Hong Kong","珠海學院, 80, 青山公路-青山灣段 Castle Peak Road – Castle Peak Bay, 良田村 Leung Tin Tsuen, 青山灣 Castle Peak Bay, 小秀村 Siu Sau Tsuen, 屯門區 Tuen Mun District, 新界 New Territories, HK, DD132 586, 中国",22.4,114.0
+Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.3,137.0
+Chulalongkorn University,Chulalongkorn University,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.7,100.5
+Chulalongkorn University,"Chulalongkorn University, Bangkok","จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.7,100.5
+Chulalongkorn University ,Chulalongkorn University Bangkok,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.7,100.5
+Chulalongkorn University ,"Chulalongkorn University Bangkok, Thailand","จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.7,100.5
+Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.5,127.0
+Chung-Ang University,"Chung-Ang University, Seoul, Korea","중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.5,127.0
+Chung-Ang University,"Chung-Ang University, Seoul, South Korea","중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.5,127.0
+Chungnam National University,Chungnam National University,"충남대학교, 대덕사이언스길 2코스, 온천2동, 온천동, 유성구, 대전, 34140, 대한민국",36.4,127.3
+City College of New York,"The City College of New York, New York, NY 10031, USA","CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA",40.8,-74.0
+City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.3,114.2
+City University of Hong Kong,"City University of Hong Kong, Hong Kong","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.3,114.2
+City University of Hong Kong,"City University of Hong Kong, Hong Kong, China","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.3,114.2
+City University of Hong Kong,"City University of Hong Kong, Kowloon, Hong Kong","香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.3,114.2
+City University of New York,The City University of New York,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA",40.9,-73.9
+Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.7,-82.8
+Clemson University,"Clemson University, Clemson, SC","E-06 Parking, Parkway Drive, Pickens County, South Carolina, SC, USA",34.7,-82.8
+Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.3,11.0
+"College of Engineering, Pune, India","College of Engineering Pune, India","College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India",18.5,73.9
+"College of Engineering, Pune, India","College of Engineering, Pune, India","College of Engineering, Pune, NH753F, Mangalwar Peth, Pune, Pune District, Maharashtra, 411011, India",18.5,73.9
+Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.6,-105.1
+Colorado State University,"Colorado State University, Fort Collins","Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.6,-105.1
+Colorado State University,"Colorado State University, Fort Collins, Colorado, USA","Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.6,-105.1
+Columbia University,COLUMBIA UNIVERSITY,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, New York","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, New York NY 10027, USA","Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.8,-74.0
+Columbia University,"Columbia University, New York, NY","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, New York, NY 10027, USA","Columbia University, West 131st Street, Manhattanville Houses, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.8,-74.0
+Columbia University,"Columbia University, New York, NY, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, New York, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, United States","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University,"Columbia University, USA","Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.8,-73.9
+Columbia University ,Columbia University in the City of New York,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.8,-74.0
+Communication University of China,Communication University of China,"中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.9,116.6
+Communication University of China,"Communication University of China, Beijing, China","中国传媒大学, 朝阳路, 定福庄, 朝阳区 / Chaoyang, 北京市, 100024, 中国",39.9,116.6
+"COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎",33.7,73.2
+"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4,74.2
+"COMSATS Institute of Information Technology, Lahore ","COMSATS Institute of Information Technology, Lahore 54000, Pakistan","COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4,74.2
+"COMSATS Institute of Information Technology, Lahore ","COMSATS Institute of Information Technology, Pakistan","COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.4,74.2
+Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.6,-122.6
+Concordia University,"Concordia University, Canada","FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada",45.5,-73.6
+Concordia University,"Concordia University, Montreal, QC, Canada","FOFA Gallery, 1515, Rue Sainte-Catherine Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3H 2T2, Canada",45.5,-73.6
+Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.5,-76.5
+Cornell University,"Cornell University, Ithaca, New York","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.5,-76.5
+Cornell University,"Cornell University, Ithaca, NY, USA","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.5,-76.5
+Cornell University,"Cornell University, USA","Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.5,-76.5
+Cornell University,of Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.5,-76.5
+Courant Institute of Mathematical Sciences,Courant Institute,"NYU Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7,-74.0
+Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7,-74.0
+Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, New York, NY","Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7,-74.0
+CUNY City College,CUNY City College,"CUNY City College, 205 East 42nd Street, New York, NY 10017",45.6,5.4
+Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.0,115.9
+Curtin University,"Curtin University, Perth WA 6102, Australia","Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.0,115.9
+Curtin University,"Curtin University, Perth WA, Australia","A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia",-32.0,115.9
+Curtin University,"Curtin University, Perth, Australia","Curtin University, B201 L2 Entry South, Waterford, Perth, Western Australia, 6102, Australia",-32.0,115.9
+Curtin University,"Curtin University, Perth, Western Australia 6012","A1, Beazley Avenue, Karawara, Perth, Western Australia, 6102, Australia",-32.0,115.9
+Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.7,33.0
+Cyprus University of Technology,"Cyprus University of Technology, Cyprus","Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.7,33.0
+Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.1,14.4
+Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.9,121.5
+Dalian University of Technology,"Dalian University of Technology, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.9,121.5
+Dalian University of Technology,"Dalian University of Technology, Dalian 116024, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.9,121.5
+Dalian University of Technology,"Dalian University of Technology, Dalian, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.9,121.5
+Dalian University of Technology,"Dalian University of Technology, Dalian, Liaoning, 116024, China","大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.9,121.5
+Dankook University,Dankook University,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3,127.1
+Dankook University,"Dankook University, 126 Jukjeon-dong, Suji-gu, Yongin-si, Gyeonggi-do, Korea","단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3,127.1
+Dankook University,"Dankook University, Yongin, South Korea","단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.3,127.1
+Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.7,-72.3
+Dartmouth College,"Dartmouth College, NH 03755 USA","Dartmouth College, Maynard Street, Hanover, Grafton County, New Hampshire, 03755, USA",43.7,-72.3
+Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.2,144.3
+Deakin University,"Deakin University, Geelong, VIC 3216, Australia","Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.2,144.3
+Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",52.0,4.4
+Delft University of Technology,"Delft University of Technology, Mekelweg 4, Netherlands","TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",52.0,4.4
+Delft University of Technology,"Delft University of Technology, The Netherlands","TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",52.0,4.4
+Democritus University of Thrace,Democritus University of Thrace,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα",40.8,25.8
+"Deutsche Welle, Bonn, Germany","Deutsche Welle, Bonn, Germany","DW, Gronau, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7,7.1
+Dhaka University,Dhaka University,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ",23.7,90.4
+"Disney Research, Zurich","Disney Research, CH","Disney Research Zürich, 48, Stampfenbachstrasse, Unterstrass, Kreis 6, Zürich, Bezirk Zürich, Zürich, 8006, Schweiz/Suisse/Svizzera/Svizra",47.4,8.5
+DIT University,DIT UNIVERSITY,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India",30.4,78.1
+DIT University,"DIT UNIVERSITY, DEHRADUN","DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India",30.4,78.1
+Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.2,121.4
+Donghua University,"Donghua University, China","东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.2,121.4
+Dr. B. C. Roy Engineering College,Dr. B. C. Roy Engineering College,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India",23.5,87.3
+Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",40.0,-75.2
+Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.4,-6.3
+Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",36.0,-78.9
+East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.2,121.4
+Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.1,33.9
+Eastern University,Eastern University,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA",40.1,-75.4
+Ecole Centrale de Lyon,"Ecole Centrale de Lyon, Lyon, 69134, France","EC de Lyon, 36, Avenue Guy de Collongue, Écully, Lyon, Métropole de Lyon, Circonscription départementale du Rhône, Auvergne-Rhône-Alpes, France métropolitaine, 69134, France",45.8,4.8
+École Polytechnique Fédérale de Lausanne,"École Polytechnique Fédérale de Lausanne (EPFL), Switzerland","Bibliothèque de l'EPFL, Route des Noyerettes, Ecublens, District de l'Ouest lausannois, Vaud, 1024, Schweiz/Suisse/Svizzera/Svizra",46.5,6.6
+Edge Hill University,Edge Hill University,"Edge Hill University, St Helens Road, West Lancashire, Lancs, North West England, England, L39 4QP, UK",53.6,-2.9
+Education University of Hong Kong,The Education University of Hong Kong,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国",22.5,114.2
+Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.4,5.5
+Eindhoven University of Technology,"Eindhoven University of Technology, The Netherlands","Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.4,5.5
+Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.6,-101.9
+Elon University,Elon University,"Amphitheater, North Antioch Avenue, Elon, Alamance County, North Carolina, 27244, USA",36.1,-79.5
+Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.7,30.5
+"Facebook, Inc.","Facebook Inc., San Francisco, CA, USA","Facebook Inc., San Francisco Bay Trail, Menlo Park, San Mateo County, California, 94025-1246, USA",37.5,-122.2
+"Facebook, Singapore","Facebook, Singapore","Ewe Boon back lane, between Palm Spring, City Towers and Wing On Life Garden, Farrer Park Gardens, Novena, Singapore, Central, 259803, Singapore",1.3,103.8
+Feng Chia University,Feng Chia University,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.2,120.6
+Feng Chia University,"Feng Chia University, Taichung, Taiwan","逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.2,120.6
+Ferdowsi University of Mashhad,Ferdowsi University of Mashhad,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎",36.3,59.5
+Ferdowsi University of Mashhad,"Ferdowsi University of Mashhad, Mashhad, Iran","دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎",36.3,59.5
+Firat University,Firat University,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye",39.7,39.5
+Florida Institute of Technology,"Florida Institute Of Technology, Melbourne Fl","Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA",28.1,-80.6
+Florida Institute of Technology,"Florida Institute of Technology, Melbourne, USA","Florida Institute of Technology, West University Boulevard, Melbourne, Brevard County, Florida, 32901, USA",28.1,-80.6
+Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.8,-80.4
+Florida International University,"Florida International University, Miami, FL","FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.8,-80.4
+Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.4,-84.3
+Florida State University,"Florida State University, Tallahassee, FL 32306, USA","Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.4,-84.3
+Florida State University,The Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.4,-84.3
+Fordham University,Fordham University,"Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA",40.8,-74.0
+Fordham University,"Fordham University, New York, 10023, USA","Fordham University Lincoln Center Campus, West 61st Street, 1 West End Ave trade area, Lincoln Square, Manhattan, Manhattan Community Board 7, New York County, NYC, New York, 10023, USA",40.8,-74.0
+Foundation University Rawalpindi Campus,Foundation University Rawalpindi Campus,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎",33.6,73.1
+Foundation University Rawalpindi Campus,"Foundation University Rawalpindi Campus, Pakistan","Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎",33.6,73.1
+Fraser University,Fraser University,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA",45.0,-93.2
+Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.3,121.5
+Fudan University,"Fudan University, Shanghai, China","复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.3,121.5
+Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.4,18.6
+GE Global Research Center,GE Global Research,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.8,-73.9
+GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.8,-73.9
+George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.8,-77.3
+George Mason University,"George Mason University, Fairfax Virginia, USA","George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.8,-77.3
+George Mason University,"George Mason University, Fairfax, VA 22030","George Mason University, University Drive, Ardmore, Fairfax, Fairfax County, Virginia, 22030, USA",38.8,-77.3
+George Mason University,"George Mason University, Fairfax, VA, USA","George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.8,-77.3
+Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.8,-84.4
+Georgia Institute of Technology,"Georgia Institute of Technology, Atlanta, 30332-0250, USA","Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.8,-84.4
+Georgia Institute of Technology,"Georgia Institute of Technology, Atlanta, Georgia, USA","Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.8,-84.4
+Georgia Southern University,Georgia Southern University,"Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA",32.4,-81.8
+Georgia Southern University,"Georgia Southern University, Statesboro, USA","Georgia Southern University, Forrest Drive, Pine Cove, Statesboro, Bulloch County, Georgia, 30460, USA",32.4,-81.8
+"GIPSA-Lab, Grenoble, France","GIPSA-Lab, Grenoble, France","GIPSA-lab, 11, Rue des Mathématiques, Médiat Rhône-Alpes, Saint-Martin-d'Hères, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38400, France",45.2,5.8
+Glyndwr University,Glyndwr University,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK",53.1,-3.0
+Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.1,15.5
+Graz University of Technology,"Graz University of Technology, Austria","TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.1,15.5
+Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.6,153.1
+Griffith University,"Griffith University, Australia","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.6,153.1
+Griffith University,"Griffith University, Brisbane","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.6,153.1
+Griffith University,"Griffith University, Nathan, QLD, Australia","Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.6,153.1
+Guangdong Medical College,Guangdong Medical College,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国",23.1,113.3
+Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.1,113.3
+Guangdong University of Technology,"Guangdong University of Technology, China","广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.1,113.3
+Guangzhou University,Guangzhou University,"广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0,113.4
+Guangzhou University,"Guangzhou University, Guangzhou, China","广州大学, 大学城中环西路, 广州大学城, 南村镇, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0,113.4
+Guilin University of Electronic Technology Guangxi Guilin,Guilin University of Electronic Technology Guangxi Guilin,"桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.3,110.3
+Guilin University of Electronic Technology Guangxi Guilin,"Guilin University of Electronic Technology Guangxi Guilin, China","桂林电子科技大学金鸡岭校区, 1号, 金鸡路, 七星区, 黄莺岩村, 七星区, 桂林市, 广西壮族自治区, 541004, 中国",25.3,110.3
+Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.9,32.7
+Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.7,12.9
+Halmstad University,"Halmstad University, Halmstad, Sweden","Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.7,12.9
+Hangzhou Dianzi University,Hangzhou Dianzi University,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.3,120.3
+Hangzhou Dianzi University,"Hangzhou Dianzi University, Hangzhou, China","杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.3,120.3
+Hankuk University of Foreign Studies,Hankuk University of Foreign Studies,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.6,127.1
+Hankuk University of Foreign Studies,"Hankuk University of Foreign Studies, South Korea","외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.6,127.1
+Hanoi University of Science and Technology,Hanoi University of Science and Technology,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam",21.0,105.8
+Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.6,127.0
+Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.8,126.7
+Harbin Engineering University,"Harbin Engineering University, Harbin, Heilongjiang, 150001, China","哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.8,126.7
+Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7,126.6
+Harbin Institute of Technology,"Harbin Institute of Technology, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7,126.6
+Harbin Institute of Technology,"Harbin Institute of Technology, China, 150001","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7,126.6
+Harbin Institute of Technology,"Harbin Institute of Technology, Harbin 150001, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7,126.6
+Harbin Institute of Technology,"Harbin Institute of Technology, Harbin, China","哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.7,126.6
+Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Harvard University,"Harvard University, Cambridge","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Harvard University,"Harvard University, Cambridge, MA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Harvard University,"Harvard University, Cambridge, MA 02138","Harvard University, Rotterdam Street, North Brighton, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Harvard University,"Harvard University, Cambridge, MA, USA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Harvard University,"Harvard University, USA","Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.4,-71.1
+Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.8,35.2
+Hebrew University of Jerusalem,"The Hebrew University of Jerusalem, Israel","האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.8,35.2
+Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.8,117.3
+Hefei University of Technology,"Hefei University of Technology, Hefei, Anhui, 230601, China","合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.8,117.3
+Hefei University of Technology,"Hefei University of Technology, Hefei, China","合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.8,117.3
+Hengyang Normal University,Hengyang Normal University,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国",26.9,112.6
+Hengyang Normal University,"Hengyang Normal University, Hengyang, China","衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国",26.9,112.6
+Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.9,-3.3
+Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.4,132.7
+Hiroshima University,"Hiroshima University, Japan","Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.4,132.7
+Hofstra University,Hofstra University,"Hofstra University, Hempstead Turnpike Bike Path, East Garden City, Nassau County, New York, 11549, USA",40.7,-73.6
+Hofstra University,"Hofstra University, Hempstead, NY 11549","Hofstra University, Hempstead Turnpike Bike Path, East Garden City, Nassau County, New York, 11549, USA",40.7,-73.6
+HoHai University,HoHai University,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国",32.1,118.8
+Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.4,114.2
+Hong Kong Baptist University,"Hong Kong Baptist University, Hong Kong","香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.4,114.2
+Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"Hong Kong Polytechnic University, Hong Kong, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,The Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,the Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"the Hong Kong Polytechnic University, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Hong Kong, China","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong Polytechnic University,"The Hong Kong Polytechnic University, Kowloon, Hong Kong","hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.3,114.2
+Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3,114.3
+Hong Kong University of Science and Technology,"Hong Kong University of Science and Technology, Hong Kong","香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3,114.3
+Hong Kong University of Science and Technology,The Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3,114.3
+Hong Kong University of Science and Technology,"The Hong Kong University of Science and Technology, Hong Kong","香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3,114.3
+Howard University,Howard University,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.9,-77.0
+Howard University,"Howard University, Washington DC","Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.9,-77.0
+Huaqiao University,Huaqiao University,"华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.6,118.1
+Huaqiao University,"Huaqiao University, Xiamen, China","华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.6,118.1
+Huazhong University of Science and Technology,Huazhong University of,"深圳市第六人民医院, 89号, 桃园路, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518000, 中国",22.5,113.9
+Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5,114.4
+Huazhong University of Science and Technology,"Huazhong University of Science and Technology, Wuhan, China","华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5,114.4
+Huazhong University of Science and Technology,"Huazhong University of Science and Technology, Wuhan, China 430074","华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.5,114.4
+Humboldt University,Humboldt University,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.5,13.4
+Humboldt University,Humboldt-University,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.5,13.4
+Humboldt University,"Humboldt-University, Berlin, Germany","Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.5,13.4
+Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.9,112.6
+IBM Almaden Research Center,IBM Almaden Research Center,"IBM Almaden Research Center, San José, Santa Clara County, California, USA",37.2,-121.8
+IBM Almaden Research Center,"IBM Almaden Research Center, San Jose CA","IBM Almaden Research Center, San José, Santa Clara County, California, USA",37.2,-121.8
+IBM Research,IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.9,-78.9
+IBM Research,"IBM Research, USA","IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.9,-78.9
+IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.2,-73.8
+IDIAP Research Institute,IDIAP RESEARCH INSTITUTE,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+Idiap Research Institute,Idiap Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+IDIAP Research Institute,"IDIAP Research Institute, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+Idiap Research Institute,"Idiap Research Institute, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+Idiap Research Institute,"Idiap Research Institute, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+IDIAP Research Institute,"IDIAP, Martigny, Switzerland","Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.1,7.1
+Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.8,-87.6
+Illinois Institute of Technology,"Illinois Institute of Technology, Chicago, Illinois, USA","Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.8,-87.6
+Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, London, U.K.","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, London, UK","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, U.K","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, U.K.","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, UK","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College London, United Kingdom","Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.5,-0.2
+Imperial College London,"Imperial College, London, UK","Sung Chuan Kung Fu, Imperial College, Prince Consort Road, City of Westminster, London, Greater London, England, SW7 2QU, UK",51.5,-0.2
+Indian Institute of Science Bangalore,Indian Institute of Science,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0,77.6
+Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0,77.6
+Indian Institute of Science Bangalore,"Indian Institute of Science, India","IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.0,77.6
+Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.5,77.3
+Indian Institute of Technology Delhi,Indian Institute of Technology,"Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India",28.5,77.2
+Indian Institute of Technology Delhi,"Indian Institute of Technology Delhi, New Delhi, India","Indian Institute Of Technology, IIT Delhi Main Road, Adchini, Lado Sarai, Mehrauli, South Delhi, Delhi, 110066, India",28.5,77.2
+Indian Institute of Technology Guwahati,"IIT Guwahati, Guwahati, India","Indian Institute of Technology Guwahati - IIT Guwahati, NH27, Amingaon, Guwahati, Kamrup, Assam, 781015, India",26.2,91.7
+Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.5,80.2
+Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, Kanpur, India","Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.5,80.2
+Indian Institute of Technology Roorkee,"Indian Institute of Technology, Roorkee","Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India",29.9,77.9
+Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.9,-84.9
+Indiana University Bloomington,Indiana University Bloomington,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA",39.2,-86.5
+"Industrial Technology Research Institute, Hsinchu, Taiwan","Industrial Technology Research Institute, Hsinchu, Taiwan","工研院, 195, 中興路四段, 頭重里, 竹東鎮, 新竹縣, 31040, 臺灣",24.8,121.0
+Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.6,130.4
+Information Technology University,Information Technology University (ITU),"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎",31.5,74.3
+Information Technology University,"Information Technology University (ITU), Punjab, Lahore, Pakistan","Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎",31.5,74.3
+INRIA,"INRIA Grenoble Rhone-Alpes, FRANCE","INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France",45.2,5.8
+Institute for Communication Systems,Institute for Communication Systems,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK",51.2,-0.6
+"Institute for System Programming, Moscow",Institute for System Programming,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ",55.7,37.7
+"Institute of Engineering and Management, Kolkata, India","Institute of Engineering and Management, Kolkata, India","Institute of Engineering and Management, Block -EP, Ring Road, GP Block, Kolkata, Twenty-four Parganas, West Bengal, 700091, India",22.6,88.4
+Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.1,140.1
+Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.0,121.6
+Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7,90.4
+Institute of Media Innovation,Institute of Media Innovation,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore",1.3,103.7
+Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.8,10.7
+International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4,78.3
+International Institute of Information Technology,"International Institute of Information Technology (IIIT) Hyderabad, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4,78.3
+International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4,78.3
+International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Telangana, India","International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.4,78.3
+Ionian University,Ionian University,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.3,21.8
+Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.0,-93.6
+Iowa State University,"Iowa State University, Ames, IA, USA","Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.0,-93.6
+Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.8,48.6
+"Islamic University of Gaza, Palestine",Islamic University of Gaza - Palestine,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية",31.5,34.4
+Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.1,29.0
+Istanbul Technical University,Istanbul Technical University (ITU),"ITU Open Air Theater, Arı Yolu, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34485, Türkiye",41.1,29.0
+Istanbul Technical University,"Istanbul Technical University (ITU), Turkey","ITU Open Air Theater, Arı Yolu, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34485, Türkiye",41.1,29.0
+Istanbul Technical University,"Istanbul Technical University, Istanbul, 34469, TURKEY","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.1,29.0
+Istanbul Technical University,"Istanbul Technical University, Istanbul, Turkey","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.1,29.0
+Istanbul Technical University,"Istanbul Technical University, Turkey","Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.1,29.0
+Istanbul University,Istanbul University,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.0,29.0
+Istanbul University,"Istanbul University, Istanbul, Turkey","İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.0,29.0
+Jacobs University,Jacobs University,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK",53.4,-3.0
+Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.6,88.4
+Jadavpur University,"Jadavpur University, India","Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.6,88.4
+Jahangirnagar University,Jahangirnagar University,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.9,90.3
+Jahangirnagar University,"Jahangirnagar University, Savar, Dhaka 1342, Bangladesh","Jahangirnagar University, 1342, Dhaka - Aricha Highway, Nobinagar, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.9,90.3
+Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.4,136.6
+Japan Advanced Institute of Science and Technology,"Japan Advanced Institute of Science and Technology, Ishikawa-ken 923-1211, Japan","JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.4,136.6
+Jaypee Institute of Information Technology,Jaypee Institute of Information Technology,"Jaypee Institute of Information Technology, Noida, A-10, National Highway 24 Bypass, Asha Pushp Vihar, Kaushambi, Ghaziabad, Uttar Pradesh, 201001, India",28.6,77.4
+Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.5,120.3
+Jiangnan University,Jiangnan University Jiangsu Wuxi,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.5,120.3
+Jiangnan University,"Jiangnan University Jiangsu Wuxi, PR China","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.5,120.3
+Jiangnan University,"Jiangnan University, Jiangsu Wuxi, PR China","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.5,120.3
+Jiangnan University,"Jiangnan University, Wuxi","江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.5,120.3
+Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.2,119.5
+Jiangsu University,"Jiangsu University, Zhenjiang, China","江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.2,119.5
+Jiangsu University,"Jiangsu University, ZhenJiang, Jiangsu, 212013, P. R. China","江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.2,119.5
+Jiangsu University of Science and Technology,Jiangsu University of Science and Technology,"江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.2,119.5
+Jiangsu University of Science and Technology,"Jiangsu University of Science and Technology, Zhenjiang, China","江苏科技大学, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212000, 中国",32.2,119.5
+Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.1,113.4
+Jilin University,"Jilin University, China","吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.1,113.4
+"Joint Research Institute, Foshan, China","Joint Research Institute, Foshan, China","广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国",22.8,113.3
+Jordan University of Science and Technology,Jordan University of Science and Technology,"Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.5,36.0
+Jordan University of Science and Technology,"Jordan University of Science and Technology, Irbid, Jordan","Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.5,36.0
+K.N. Toosi University of Technology,K.N. Toosi University of Technology,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎",35.8,51.4
+K.N. Toosi University of Technology,"K.N. Toosi University of Technology, Tehran, Iran","دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎",35.8,51.4
+"KAIST, Daejeon, Korea","KAIST, Daejeon, Korea","궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국",36.4,127.4
+"KAIST, Daejeon, Korea","KAIST, Korea","궁동 카이스트 아파트 (Gungdong KAIST Apartments), 온천2동, 온천동, 유성구, 대전, 대한민국",36.4,127.4
+Karlsruhe Institute of Technology,Karlsruhe Institute of,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Karlsruhe Institute of Technology,"Karlsruhe Institute of Technology (KIT), Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Karlsruhe Institute of Technology,"Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Karlsruhe Institute of Technology,"Karlsruhe Institute of Technology, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Karlsruhe Institute of Technology,"Karlsruhe Institute of Technology, Karlsruhe, Germany","KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.1,8.4
+Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.9,4.7
+KAUST,King Abdullah University of Science and Technology 4700,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.3,39.1
+KAUST,King Abdullah University of Science and Technology 4700,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.3,39.1
+KAUST,"King Abdullah University of Science and Technology 4700, Thuwal, Saudi Arabia","KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.3,39.1
+Keio University,"Information, Keio University","綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.5,139.6
+Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.5,139.6
+Keio University,"Keio University, Yokohama 223-8522, Japan","慶應義塾大学 (矢上キャンパス), 理工坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-8522, 日本",35.6,139.7
+Kent State University,Kent State University,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.1,-81.3
+Kent State University,"Kent State University, Kent, Ohio, USA","Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.1,-81.3
+Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.4,54.4
+Khalifa University,"Khalifa University, Abu Dhabi, United Arab Emirates","Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.4,54.4
+Khon Kaen University,Khon Kaen University,"มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.5,102.8
+Khon Kaen University,"Khon Kaen University, Khon Kaen, 40002, Thailand","มหาวิทยาลัยขอนแก่น, 4, บ้านหนองหัวช้าง, ขอนแก่น, จังหวัดขอนแก่น, 40002, ประเทศไทย",16.5,102.8
+King Faisal University,King Faisal University,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.4,50.2
+King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7,46.6
+King Saud University,"King Saud University, Riyadh","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7,46.6
+King Saud University,"King Saud University, Riyadh 11543, Saudi Arabia","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7,46.6
+King Saud University,"King Saud University, Riyadh, Saudi Arabia","King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.7,46.6
+Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.4,-0.3
+Kingston University,"Kingston University, UK","Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.4,-0.3
+Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.7,135.2
+Kobe University,"Kobe University, Japan","神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.7,135.2
+Kogakuin University,Kogakuin University,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.7,139.7
+Kogakuin University,"Kogakuin University, Tokyo, Japan","工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.7,139.7
+Kookmin University,Kookmin University,"국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국",37.6,127.0
+Kookmin University,"Kookmin University, Seoul, Korea","국민대학교앞, 정릉로, 정릉2동, 정릉동, 성북구, 서울특별시, 02708, 대한민국",37.6,127.0
+Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea Advanced Institute of Science and Technology,Korea Advanced institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea Advanced Institute of Science and Technology,"Korea Advanced Institute of Science and Technology, Daejeon, Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea Advanced Institute of Science and Technology,"Korea Advanced Institute of Science and Technology, Daejeon, Republic of Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea Advanced Institute of Science and Technology,"Korea Advanced Institute of Science and Technology, Daejeon, South Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea Advanced Institute of Science and Technology,"Korea Advanced Institute of Science and Technology, Korea","카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.4,127.4
+Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.6,127.0
+Korea University,"Korea University, Seoul, South Korea","고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.6,127.0
+"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.3,18.1
+"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, 100 44 Stockholm, Sweden","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.3,18.1
+"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm, Sweden","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.3,18.1
+Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.8,130.7
+Kumamoto University,"Kumamoto University, Kumamoto, Japan","熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.8,130.7
+Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",30.0,76.8
+Kurukshetra University,"Kurukshetra University, Kurukshetra","Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",30.0,76.8
+Kurukshetra University,"Kurukshetra University, Kurukshetra, India","Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",30.0,76.8
+Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0,135.8
+Kyoto University,"Kyoto University, Kyoto, Japan","京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0,135.8
+Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.9,-117.2
+Kyung Hee University,"Kyung Hee University, Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.6,127.1
+Kyung Hee University,"Kyung Hee University, Seoul, South Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.6,127.1
+Kyung Hee University,"Kyung Hee University, South Korea","경희사이버대학교, 26, 경희대로, 회기동, 동대문구, 서울특별시, 02447, 대한민국",37.6,127.1
+Kyung Hee University,"Kyung Hee University, Yongin, South Korea","경희대학교 국제캠퍼스, 서천동로21번길, 서천동, 기흥구, 용인시, 경기, 17108, 대한민국",37.2,127.1
+Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.6,130.2
+La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.8,144.3
+La Trobe University,"La Trobe University, Australia","La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.8,144.3
+Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.0,-2.8
+Lancaster University,"Lancaster University, Lancaster, UK","Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.0,-2.8
+Lehigh University,Lehigh University,"Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.6,-75.4
+Lehigh University,"Lehigh University, Bethlehem, PA 18015, USA","Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.6,-75.4
+Liverpool John Moores University,Liverpool John Moores University,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK",53.4,-3.0
+Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.7,37.5
+"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.5,-0.1
+Loughborough University,"Computer Science, Loughborough University, Loughborough, UK","Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.8,-1.2
+Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.8,-1.2
+Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.4,-91.2
+Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.7,13.2
+Lund University,"Lund University, Lund, Sweden","TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.7,13.2
+"M S Ramaiah Institute of Technology, Bangalore, Karnataka, India","M S Ramaiah Institute of Technology, Bangalore, Karnataka, India","M S Ramaiah Institute of Technology, MSRIT Quadrangle Path, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560054, India",13.0,77.6
+Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.8,5.7
+Maastricht University,"Maastricht University, Maastricht, Netherlands","University College Maastricht, 4, Zwingelput, Jekerkwartier, Maastricht, Limburg, Nederland, 6211KH, Nederland",50.8,5.7
+Macau University of Science and Technology,Macau University of Science and,"HKUST, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.3,114.3
+Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.2,113.6
+Macau University of Science and Technology,"Macau University of Science and Technology, Macau","Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.2,113.6
+Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.8,100.9
+Manchester University,Manchester University,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK",53.5,-2.2
+Manchester University,"Manchester University, UK","Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK",53.5,-2.2
+Mangalore University,Mangalore University,"Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India",12.8,74.9
+Mangalore University,"Mangalore University, India","Mangalore University, LR, ದಕ್ಷಿಣ ಕನ್ನಡ, Bantwal taluk, Dakshina Kannada, Karnataka, 574153, India",12.8,74.9
+Manonmaniam Sundaranar University,Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.8,77.7
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, India","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.8,77.7
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tirunelveli","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.8,77.7
+Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tirunelveli, India","Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.8,77.7
+Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.0,-87.9
+"Massachusetts General Hospital, Boston, MA, USA","Massachusetts General Hospital, Boston, MA, USA","Mass General, 55, Fruit Street, Downtown Crossing, Beacon Hill, Boston, Suffolk County, Massachusetts, 02114, USA",42.4,-71.1
+Massachusetts Institute of Technology,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Massachusetts Institute of Technology,MASSACHUSETTS INSTITUTE OF TECHNOLOGY,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Massachusetts Institute of Technology,Massachusetts Institute of Technology,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Massachusetts Institute of Technology,MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Massachusetts Institute of Technology,Massachusetts Institute of Technology (MIT,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Massachusetts Institute of Technology,"Massachusetts Institute of Technology, Cambridge, MA 02139, USA","MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.5,9.1
+Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.3,7.0
+Max Planck Institute for Informatics,"Max Planck Institute for Informatics, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.3,7.0
+Max Planck Institute for Informatics,"Max Planck Institute for Informatics, Saarbrucken, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.3,7.0
+Max Planck Institute for Informatics,Max-Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.3,7.0
+Max Planck Institute for Informatics,"MPI Informatics, Germany","MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.3,7.0
+McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.5,-73.6
+McGill University,"McGill University, Montreal, Canada","McGill University, Avenue Docteur Penfield, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 2T8, Canada",45.5,-73.6
+McGovern Institute for Brain Research,McGovern Institute,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.4,-71.1
+McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.3,-79.9
+Meiji University,Meiji University,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本",35.7,139.8
+Memorial University of Newfoundland,Memorial University of Newfoundland,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.6,-52.7
+Memorial University of Newfoundland,"Memorial University of Newfoundland, Canada","Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.6,-52.7
+Memorial University of Newfoundland,"Memorial University of Newfoundland, Saint John's, NL, Canada","Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.6,-52.7
+Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, E. Lansing, MI 48823, USA","Dero Fixit Bike Station, Grand River Avenue, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing MI","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing, 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing, MI","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing, MI 48824, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, East Lansing, MI, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, United States of America","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,"Michigan State University, USA","Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.7,-84.5
+Michigan State University,to Michigan State University,"Red Cedar River, Small Acres Lane, Okemos, Ingham County, Michigan, 48864, USA",42.7,-84.4
+Microsoft Research Asia,"Microsoft Res. Asia, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",40.0,116.3
+Microsoft Research Asia,Microsoft Research,"Microsoft Research, 21, Station Road, Petersfield, Cambridge, Cambridgeshire, East of England, England, CB1 2FB, UK",52.2,0.1
+Microsoft Research Asia,"Microsoft Research Asia, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",40.0,116.3
+Microsoft Research Asia,"Microsoft Research Asia, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",40.0,116.3
+Microsoft Research Asia,"Microsoft Research, Beijing, China","微软亚洲研究院, 善缘街, 中关村, 稻香园南社区, 海淀区, 北京市, 100080, 中国",40.0,116.3
+"Microsoft, Inc.","Microsoft, Bellevue, WA, USA","Microsoft, 10455, Northeast 8th Street, Bellevue, King County, Washington, 98004-5002, USA",47.6,-122.2
+"Microsoft, Inc.","Microsoft, Inc.","Microsoft, 10455, Northeast 8th Street, Bellevue, King County, Washington, 98004-5002, USA",47.6,-122.2
+"Microsoft, Inc.","Microsoft, Redmond, WA","Microsoft Cafe RedW-F, Bridle Crest Trail, Microsoft Redwest Campus, Redmond, King County, Washington, W LAKE SAMMAMISH PKWY NE, USA",47.7,-122.1
+Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.9,32.8
+Middlebury College,Middlebury College,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA",44.0,-73.2
+Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.6,-0.2
+Middlesex University,Middlesex University London,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.6,-0.2
+Middlesex University,"Middlesex University London, London, UK","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.6,-0.2
+Middlesex University,"Middlesex University London, UK","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.6,-0.2
+Middlesex University,"Middlesex University, London","Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.6,-0.2
+Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+Monash University,"Monash University, Caulfield East, Australia","Monash University (Caulfield campus), Queens Avenue, Caulfield East, City of Glen Eira, Victoria, 3163, Australia",-37.9,145.0
+Monash University,"Monash University, Victoria, Australia","Monash University, Business Park Drive, Monash Business Park, Notting Hill, City of Monash, Victoria, 3800, Australia",-37.9,145.1
+Monash University Malaysia,Monash University Malaysia,"Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.1,101.6
+Monash University Malaysia,"Monash University Malaysia, Bandar Sunway, Malaysia","Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.1,101.6
+"Moscow Institute of Physics and Technology, Russia","Moscow Institute of Physics and Technology, Russia","МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ",55.9,37.5
+Muhlenberg College,Muhlenberg College,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA",40.6,-75.5
+Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.9,101.6
+Multimedia University,"Multimedia University, Cyberjaya, Malaysia","Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.9,101.6
+Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.2,127.2
+Nagaoka University of Technology,Nagaoka University of Technology,"長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本",37.4,138.8
+Nagaoka University of Technology,"Nagaoka University of Technology, Japan","長岡技術科学大学 (Nagaoka University of Technology), 長岡西山線, 長岡市, 新潟県, 中部地方, 日本",37.4,138.8
+Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.5,143.6
+Nagoya University,"Nagoya University, Japan","SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.5,143.6
+Nanjing Normal University,Nanjing Normal University,"南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1,118.9
+Nanjing Normal University,"Nanjing Normal University, China","南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1,118.9
+Nanjing Normal University,"Nanjing Normal University, Nanjing, China","南京师范大学仙林校区, 敏行路, 仙林大学城, 栖霞区, 南京市, 江苏省, 210046, 中国",32.1,118.9
+Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.1,118.8
+Nanjing University,Nanjing University of Aeronautics and Astronautics,"南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0,118.8
+Nanjing University,"Nanjing University of Aeronautics and Astronautics, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0,118.8
+Nanjing University,"Nanjing University of Aeronautics and Astronautics, Nanjing 210016, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0,118.8
+Nanjing University,"Nanjing University of Aeronautics and Astronautics, Nanjing, China","南京航空航天大学, 御道街, 白下区, 新世纪广场, 秦淮区, 南京市, 江苏省, 210016, 中国",32.0,118.8
+Nanjing University,Nanjing University of Information Science and Technology,"南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国",32.2,118.7
+Nanjing University,"Nanjing University of Information Science and Technology, Nanjing, China","南京信息工程大学, 龙山北路, 第十六街区, 浦口区, 南京市, 江苏省, 210032, 中国",32.2,118.7
+Nanjing University,Nanjing University of Science and Technology,"南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.0,118.9
+Nanjing University,"Nanjing University of Science and Technology, China","南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.0,118.9
+Nanjing University,"Nanjing University of Science and Technology, Nanjing, China","南京理工大学, 友谊路, 余粮庄, 玄武区, 南京市, 江苏省, 210016, 中国",32.0,118.9
+Nanjing University,"Nanjing University, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.1,118.8
+Nanjing University,"Nanjing University, Nanjing 210023, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.1,118.8
+Nanjing University,"Nanjing University, Nanjing 210093, China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.1,118.8
+Nanjing University,"Nanjing University, Nanjing 210093, P.R.China","NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.1,118.8
+Nantong University,Nantong University,"南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国",32.0,120.9
+Nantong University,"Nantong University, Nantong, China","南通大学, 狼山镇街道, 崇川区 (Chongchuan), 南通市 / Nantong, 江苏省, 226000, 中国",32.0,120.9
+Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"Nanyang Technological University, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"Nanyang Technological University, Singapore 639798","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"Nanyang Technological University, Singapore 639798, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"Nanyang Technological University, Singapore, 639798","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"Nanyang Technological University, Singapore, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,The Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+Nanyang Technological University,"The Nanyang Technological University, Singapore","NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.3,103.7
+National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",25.0,121.2
+National Central University,"National Central University, Taoyuan County, Taiwan","NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",25.0,121.2
+National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",23.0,120.2
+National Cheng Kung University,"National Cheng Kung University, Tainan, Taiwan","成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",23.0,120.2
+National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.8,121.0
+National Chiao Tung University,"National Chiao Tung University, Hsinchu, Taiwan","NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.8,121.0
+National Chiao Tung University,"National Chiao Tung University, Taiwan","NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.8,121.0
+National Chiao Tung University,National Chiao-Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.8,121.0
+National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.6,120.5
+National Chung Cheng University,"National Chung Cheng University, Chiayi, Taiwan","國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.6,120.5
+National Chung Hsing University,National Chung Hsing University,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.1,120.7
+National Chung Hsing University,"National Chung Hsing University, Taichung","國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.1,120.7
+National Chung Hsing University,"National Chung Hsing University, Taiwan","國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.1,120.7
+National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.1,140.1
+National Institute of Advanced Industrial Science and Technology,y National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.1,140.1
+National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.1,-77.2
+National Institute of Standards and Technology,"National Institute of Standards and Technology, Gaithersburg, MD 20899, USA","National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.1,-77.2
+"National Institute of Technology, Durgapur","National Institute of Technology, Durgapur, India","National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India",23.5,87.3
+"National Institute of Technology, Durgapur","National Institute of Technology, Durgapur, West Bengal, India","National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India",23.5,87.3
+"National Institute of Technology, Karnataka",National Institute of Technology Karnataka,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India",13.0,74.8
+"National Institute of Technology, Rourkela",National Institute of Technology Rourkela,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.3,84.9
+"National Institute of Technology, Rourkela","National Institute of Technology, Rourkela (Odisha), India","National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.3,84.9
+National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.0,-77.1
+National Institutes of Health,"National Institutes of Health, Bethesda, Maryland 20892","NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.0,-77.1
+National Sun Yat Sen University,National Sun Yat Sen University,"國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣",22.6,120.3
+National Sun Yat Sen University,"National Sun Yat Sen University, 804 Kaohsiung, Taiwan","國立中山大學, 70, 蓮海路, 桃源里, 柴山, 鼓山區, 高雄市, 804, 臺灣",22.6,120.3
+National Taichung University of Science and Technology,National Taichung University of science and Technology,"臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.2,120.7
+National Taichung University of Science and Technology,"National Taichung University of science and Technology, Taichung","臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.2,120.7
+National Taipei University,National Taipei University,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣",24.9,121.4
+National Taipei University of Technology,National Taipei University of Technology,"NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣",25.0,121.5
+National Taipei University of Technology,"National Taipei University of Technology, Taipei, Taiwan","NTUT, 1, 忠孝東路三段, 民輝里, 東區商圈, 大安區, 臺北市, 10608, 臺灣",25.0,121.5
+National Taiwan Normal University,National Taiwan Normal University,"師大分部, 88, 汀州路四段, 萬年里, 文山區, 臺北市, 11677, 臺灣",25.0,121.5
+National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.0,121.5
+National Taiwan University,"National Taiwan University, 10647, Taipei, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.0,121.5
+National Taiwan University,"National Taiwan University, Taipei, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.0,121.5
+National Taiwan University,"National Taiwan University, Taiwan","臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.0,121.5
+National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.0,121.5
+National Taiwan University of Science and Technology,"National Taiwan University of Science and Technology, Taipei 10607, Taiwan","臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.0,121.5
+National Taiwan University of Science and Technology,"National Taiwan University of Science and Technology, Taipei, Taiwan","臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.0,121.5
+National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",38.0,23.7
+National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.8,121.0
+National Tsing Hua University,"National Tsing Hua University, Hsinchu, Taiwan","國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.8,121.0
+National Tsing Hua University,"National Tsing Hua University, Taiwan","國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.8,121.0
+National University of Defense Technology,National University of Defense and Technology,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2,113.0
+National University of Defense Technology,National University of Defense Technology,"国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2,113.0
+National University of Defense Technology,"National University of Defense Technology, Changsha 410073, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2,113.0
+National University of Defense Technology,"National University of Defense Technology, Changsha, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.2,113.0
+National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.3,-9.1
+National University of Ireland Galway,"National University of Ireland Galway, Galway, Ireland","National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.3,-9.1
+National University of Ireland Maynooth,National University of Ireland Maynooth,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland",53.4,-6.6
+National University of Ireland Maynooth,"National University of Ireland Maynooth, Co. Kildare, Ireland","National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland",53.4,-6.6
+National University of Kaohsiung,National University of Kaohsiung,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.7,120.3
+National University of Kaohsiung,"National University of Kaohsiung, 811 Kaohsiung, Taiwan","國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.7,120.3
+National University of Sciences and Technology,National University of Science and Technology,"National University of Science and Technology, Indus Loop, H-11, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.6,73.0
+National University of Sciences and Technology,National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.6,73.0
+National University of Sciences and Technology,National University of Sciences and Technology (NUST),"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.6,73.0
+National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Islamabad, Pakistan","National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.6,73.0
+National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.3,103.8
+National University of Singapore,National University of singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.3,103.8
+National University of Singapore,"National University of Singapore, Singapore","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.3,103.8
+National University of Singapore,"National University of Singapore, Singapore 117576","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.3,103.8
+National University of Singapore,"National University of Singapore, Singapore, Singapore","NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.3,103.8
+Naval Research Laboratory,Naval Research Laboratory,"Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.8,-77.0
+Naval Research Laboratory,"Naval Research Laboratory, Washington DC","Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.8,-77.0
+Nazarbayev University,Nazarbayev University,"Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.1,71.4
+Nazarbayev University,"Nazarbayev University, Astana, Kazakhstan","Назарбаев Университет, проспект Туран, BI village, Астана, район Есиль, Астана, 010000, Казахстан",51.1,71.4
+New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,"New Jersey Institute of Technology, Newark , NJ, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,"New Jersey Institute of Technology, Newark, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,"New Jersey Institute of Technology, University Heights Newark, NJ 07102 USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,"New Jersey Institute of Technology, University Heights, Newark, New Jersey 07102, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,"New Jersey Institute of Technology, USA","New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,University Heights,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New Jersey Institute of Technology,University Heights Newark,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.7,-74.2
+New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.7,-74.0
+Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",55.0,-1.6
+Newcastle University,"Newcastle University, Newcastle upon Tyne","Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",55.0,-1.6
+Normal University,Normal University,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.1,102.7
+Normal University,"Normal University, Kunming, China","云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.1,102.7
+"North Acton, London","North Acton, London","North Acton, Victoria Road, Acton, London Borough of Ealing, London, Greater London, England, W3 6UP, UK",51.5,-0.3
+North Carolina Central University,North Carolina Central University,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA",36.0,-78.9
+North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.8,-78.7
+North Carolina State University,"North Carolina State University, Raleigh, United States of America","North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.8,-78.7
+North China Electric Power University,North China Electric Power University,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.9,115.5
+North China Electric Power University,"North China Electric Power University, Baoding, China","华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.9,115.5
+North Dakota State University,North Dakota State University,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.9,-96.8
+North Dakota State University,"North Dakota State University, Fargo, ND 58108-6050, USA","North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.9,-96.8
+Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northeastern University,"Northeastern University, Boston, MA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northeastern University,"Northeastern University, Boston, MA, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northeastern University,"Northeastern University, Boston, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northeastern University,"Northeastern University, Boston, USA, 02115","Northeastern University, Public Alley 807, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northeastern University,"Northeastern University, MA, USA","Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.3,-71.1
+Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0,-1.6
+Northumbria University,"Northumbria University, Newcastle upon Tyne, NE1 8ST, UK","Northumbria University, Northumberland Road, Cradlewell, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 8SG, UK",55.0,-1.6
+Northumbria University,"Northumbria University, Newcastle Upon Tyne, Tyne and Wear","Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0,-1.6
+Northumbria University,"Northumbria University, Newcastle upon Tyne, U.K.","Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.0,-1.6
+Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2,108.9
+Northwestern Polytechnical University,"Northwestern Polytechnical University, Xi’an, China","西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2,108.9
+Northwestern Polytechnical University,"Northwestern Polytechnical University, Xian 710072, Shaanxi, China","西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.2,108.9
+Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.1,-87.7
+Northwestern University,"Northwestern University, Evanston, IL, USA","Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.1,-87.7
+Nottingham Trent University,Nottingham Trent University,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",53.0,-1.2
+Nottingham Trent University,"Nottingham Trent University, Nottingham, UK","Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",53.0,-1.2
+Nottingham University Hospital,Nottingham University Hospital,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK",52.9,-1.2
+Nottingham University Hospital,"Nottingham University Hospital, Nottingham, UK","Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK",52.9,-1.2
+Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.9,-84.3
+Oak Ridge National Laboratory,"Oak Ridge National Laboratory, USA","Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.9,-84.3
+Oakland University,Oakland University,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA",42.7,-83.2
+Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.2,120.5
+Ocean University of China,"Ocean University of China, Qingdao, China","中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.2,120.5
+Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.0,-83.0
+Ohio State University,"The Ohio State University, Columbus, OH, USA","The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.0,-83.0
+Ohio State University,"The Ohio State University, OH","The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.0,-83.0
+Okayama University,Okayama University,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.7,133.9
+Okayama University,"Okayama University, Okayama, Japan","岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.7,133.9
+Oklahoma State University,Oklahoma State University,"Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.1,-97.1
+Oklahoma State University,"Oklahoma State University, Stillwater, OK, USA","Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.1,-97.1
+Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.9,-76.3
+Old Dominion University,"Old Dominion University, Norfolk, VA 23529, USA","Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.9,-76.3
+Old Dominion University,"Old Dominion University, Norfolk, VA, 23529","Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.9,-76.3
+Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.0,-0.7
+Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.8,35.0
+Open University of Israel,The Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.8,35.0
+"Orange Labs, R&D, Meylan, France","Orange Labs, R&D, Meylan, France","Orange Labs, 28, Chemin du Vieux Chêne, Inovallée Meylan, Le Mas du Bruchet, Meylan, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38240, France",45.2,5.8
+Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.5,-122.7
+Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.8,135.5
+Osaka University,Osaka university,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.8,135.5
+Osaka University,"Osaka university, Japan","大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.8,135.5
+Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.1,11.6
+Otto von Guericke University,Otto-von-Guericke University Magdeburg,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.1,11.6
+Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.8,-1.2
+Oxford Brookes University,"Oxford Brookes University, Oxford, United Kingdom","Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.8,-1.2
+Oxford University,Oxford University,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK",51.8,-1.3
+Oxford University,"Oxford University, UK","James Mellon Hall, Rectory Road, New Marston, Oxford, Oxon, South East, England, OX4 1BU, UK",51.7,-1.2
+Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",40.0,116.3
+Peking University,"Peking University, Beijing","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",40.0,116.3
+Peking University,"Peking University, Beijing 100871, China","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",40.0,116.3
+Peking University,"Peking University, Beijing, China","北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",40.0,116.3
+Philipps-Universität Marburg,Philipps-Universität Marburg,"FB 09 | Germanistik und Kunstwissenschaften (Dekanat), 3, Deutschhausstraße, Biegenhausen, Biegenviertel, Marburg, Landkreis Marburg-Biedenkopf, Regierungsbezirk Gießen, Hessen, 35037, Deutschland",50.8,8.8
+Philipps-Universität Marburg,"Philipps-Universität Marburg, D-35032, Germany","FB 09 | Germanistik und Kunstwissenschaften (Dekanat), 3, Deutschhausstraße, Biegenhausen, Biegenviertel, Marburg, Landkreis Marburg-Biedenkopf, Regierungsbezirk Gießen, Hessen, 35037, Deutschland",50.8,8.8
+Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.4,-4.1
+Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,Pohang University of Science and Technology (POSTECH),"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,"Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,"Pohang University of Science and Technology (POSTECH), South Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,"Pohang University of Science and Technology, Pohang, Korea","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+Pohang University of Science and Technology,"POSTECH, Pohang, South Korea, 37673","포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.0,129.3
+"Politecnico di Torino, Italy","Politecnico di Torino, Italy","Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia",45.1,7.7
+"Politecnico di Torino, Italy","Politecnico di Torino, Torino, Italy","Politecnico di Torino, Corso Castelfidardo, Crocetta, Circoscrizione 3, Torino, TO, PIE, 10129, Italia",45.1,7.7
+Politehnica University of Timisoara,Politehnica University of Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.7,21.2
+Politehnica University of Timisoara,Politehnica University of Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.7,21.2
+Pondicherry Engineering College,Pondicherry Engineering College,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India",12.0,79.8
+Pontificia Universidad Catolica de Chile,Pontificia Universidad Catolica de Chile,"Pontificia Universidad Católica de Chile - Campus Lo Contador, 1916, El Comendador, Pedro de Valdivia Norte, Providencia, Provincia de Santiago, Región Metropolitana de Santiago, 7500000, Chile",-33.4,-70.6
+Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.5,-122.7
+Portland State University,"Portland State University, USA","Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.5,-122.7
+Poznan University of Technology,Poznan University of Technology,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP",52.4,17.0
+Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.3,-74.7
+Princeton University,"Princeton University, Princeton, New Jersey, USA","Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.3,-74.7
+Princeton University,"Princeton University, Princeton, NJ, USA","Lot 25, Ivy Lane, Princeton Township, Mercer County, New Jersey, 08544, USA",40.3,-74.7
+PSG College of Technology,"PSG College of Technology, Coimbatore, Tamil Nadu, India","PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India",11.0,77.0
+"PSG College of Technology, Coimbatore, Tamil Nadu, India","PSG College of Technology, Coimbatore, Tamil Nadu, India","PSG College of Technology, Avinashi Road, Ward 38, North Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India",11.0,77.0
+Pune Institute of Computer Technology,"Pune Institute of Computer Technology, Pune, ( India","Pune Institute of Computer Technology, Mediacal College Road, Vadgaon Budruk, Katraj, Pune, Pune District, Maharashtra, 411043, India",18.5,73.9
+Punjabi University Patiala,Punjabi University Patiala,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India",30.4,76.5
+Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4,-86.9
+Purdue University,"Purdue University, West Lafayette, IN 47907, USA","Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4,-86.9
+Purdue University,"Purdue University, West Lafayette, IN, USA","Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4,-86.9
+Purdue University,"Purdue University, West Lafayette, IN. 47907, USA","Mathematical Sciences Library, 105, University Street, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4,-86.9
+Purdue University,"Purdue University, West Lafayette, Indiana, 47906, USA","Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.4,-86.9
+Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.4,51.5
+Qatar University,"Qatar University, Doha, Qatar","Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.4,51.5
+Qatar University,"Qatar University, Qatar","Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.4,51.5
+Quanzhou Normal University,Quanzhou Normal University,"泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国",24.9,118.7
+Quanzhou Normal University,"Quanzhou Normal University, Quanzhou, China","泉州师范学院, 东滨路, 丰泽区, 丰泽区 (Fengze), 泉州市 / Quanzhou, 福建省, 362000, 中国",24.9,118.7
+Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.1,21.9
+Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5,0.0
+Queen Mary University of London,"Queen Mary University of London, London","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5,0.0
+Queen Mary University of London,"Queen Mary University of London, London E1 4NS, UK","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5,0.0
+Queen Mary University of London,"Queen Mary University of London, London, U.K.","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5,0.0
+Queen Mary University of London,"Queen Mary University of London, UK","Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.5,0.0
+Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.5,153.0
+Queensland University of Technology,Queensland University of Technology (QUT,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.5,153.0
+Queensland University of Technology,"Queensland University of Technology, Australia","Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.5,153.0
+Queensland University of Technology,"Queensland University of Technology, Brisbane, QLD, Australia","Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.5,153.0
+Queensland University of Technology,Queensland University of Technology(QUT,"QUT Gardens Point Main Library, V, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.5,153.0
+"R V College of Engineering, Bangalore, India","R V College of Engineering, Bangalore, India","R. V. College of Engineering, Bangalore-Mysore Road, Kengeri, Rajarajeshwari Nagar Zone, Bengaluru, Bangalore Urban, Karnataka, 560059, India",12.9,77.5
+Raipur Institute of Technology,Raipur Institute of Technology,"Raipur Institute of Technology, NH53, Raipur, Chhattisgarh, 492101, India",21.2,81.8
+Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.7,-73.7
+Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Troy, NY 12180, USA","Rensselaer Polytechnic Institute, Tibbits Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.7,-73.7
+Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, USA","Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.7,-73.7
+Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.5,9.2
+RheinAhrCampus der Hochschule Koblenz,"RheinAhrCampus der Hochschule Koblenz, Remagen, Germany","RheinAhrCampus, 2, Joseph-Rovan-Allee, Remagen, Landkreis Ahrweiler, Rheinland-Pfalz, 53424, Deutschland",50.6,7.3
+Rheinische-Friedrich-Wilhelms University,Rheinische-Friedrich-Wilhelms University,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7,7.1
+Rheinische-Friedrich-Wilhelms University,"Rheinische-Friedrich-Wilhelms University, Bonn, Germany","Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7,7.1
+Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.7,-95.4
+Rice University,"Rice University, Houston, TX, 77005, USA","Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.7,-95.4
+Rio de Janeiro State University,Rio de Janeiro State University,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil",-22.9,-43.2
+Rio de Janeiro State University,"Rio de Janeiro State University, Brazil","UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil",-22.9,-43.2
+Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0,135.7
+Ritsumeikan University,"Ritsumeikan University, Japan","立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0,135.7
+Ritsumeikan University,"Ritsumeikan University, Kyoto, Japan","立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.0,135.7
+RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+RMIT University,"RMIT University, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+RMIT University,"RMIT University, Melbourne, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+RMIT University,"RMIT University, Melbourne, VIC, Australia","RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.8,145.0
+"RMIT University, Vietnam","RMIT University, Vietnam","RMIT University Vietnam - Saigon South Campus, 702, Nguyễn Văn Linh, Khu 3 - Khu Đại học, Phường Tân Phong, Quận 7, Tp HCM, 756604, Việt Nam",10.7,106.7
+Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.7,100.5
+Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.1,-77.7
+Rowan University,Rowan University,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA",39.7,-75.1
+Rowan University,"Rowan University, Glassboro, NJ- 08028","Wellness Center (Winans Hall), Mullica Hill Road, Beau Rivage, Glassboro, Gloucester County, New Jersey, 08028:08062, USA",39.7,-75.1
+Rowland Institute,Rowland Institute,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA",42.4,-71.1
+Ruhr-University Bochum,Ruhr University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.4,7.3
+Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.4,7.3
+Ruhr-University Bochum,"Ruhr-University Bochum, Germany","RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.4,7.3
+Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.5,-74.4
+Rutgers University,"Rutgers University, New Brunswick, NJ","Zimmerli Art Museum, 71, Hamilton Street, New Brunswick, Middlesex County, New Jersey, 08901-1248, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Newark, NJ, USA","Dana Library, Bleeker Street, Teachers Village, Newark, Essex County, New Jersey, 07102, USA",40.7,-74.2
+Rutgers University,"Rutgers University, Piscataway","James Dickson Carr Library, 75, Avenue E, Piscataway Township, Middlesex County, New Jersey, 08854-8040, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Piscataway NJ 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Piscataway, New Jersey 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Piscataway, NJ","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Piscataway, NJ 08854, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+Rutgers University,"Rutgers University, Piscataway, NJ, USA","The Rock Cafe, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+Rutgers University,"Rutgers University, USA","Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.5,-74.4
+RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.8,6.1
+RWTH Aachen University,"RWTH Aachen University, Aachen, Germany","RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.8,6.1
+Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.7,-79.4
+Ryerson University,"Ryerson University, Canada","Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.7,-79.4
+Ryerson University,"Ryerson University, Toronto, ON, Canada","Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.7,-79.4
+Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.9,29.4
+Sakarya University,Sakarya University,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye",40.8,30.4
+San Jose State University,San Jose State University,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.3,-121.9
+San Jose State University,"San Jose State University, San Jose, CA","SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.3,-121.9
+Santa Clara University,Santa Clara University,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA",37.3,-121.9
+Santa Clara University,"Santa Clara University, Santa Clara, CA. 95053, USA","Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA",37.3,-121.9
+Santa Fe Institute,Santa Fe Institute,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA",35.7,-105.9
+SASTRA University,SASTRA University,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India",11.0,79.4
+SASTRA University,"SASTRA University, Thanjavur, Tamil Nadu, India","SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India",11.0,79.4
+Selçuk University,Selçuk University,"Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye",38.0,32.5
+Selçuk University,"Selçuk University, Konya, Turkey","Selçuk Üniversitesi, Ali Fuat Cebesoy Cad., Ardıçlı Mahallesi, Konya, Selçuklu, Konya, İç Anadolu Bölgesi, Türkiye",38.0,32.5
+Semarang State University,Semarang State University,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia",-7.0,110.4
+Semnan University,Semnan University,"دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.6,53.4
+Semnan University,"Semnan University, Semnan, Iran","دانشگاه سمنان, بزرگراه امام رضا, شهرک مسکن مهر مصلی, ناسار, سمنان, بخش مرکزی, شهرستان سمنان, استان سمنان, ‏ایران‎",35.6,53.4
+Seoul National University,Seoul Nat'l Univ.,"서울대입구, 지하 1822, 남부순환로, 중앙동, 봉천동, 관악구, 서울특별시, 08787, 대한민국",37.5,127.0
+Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.3,127.0
+Seoul National University,"Seoul National University, Korea","서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.3,127.0
+Seoul National University,"Seoul National University, Seoul, Korea","서울대학교, 1, 관악로, 서림동, 신림동, 관악구, 서울특별시, 08825, 대한민국",37.5,126.9
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology,Shaheed Zulfikar Ali Bhutto Institute of,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎",24.8,67.0
+Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.4,120.7
+Shandong University,"Shandong University, Shandong, China","山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.4,120.7
+Shandong University of Science and Technology,Shandong University of Science and Technology,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国",36.0,120.1
+"Shanghai Institute of Technology, Shanghai, China","Shanghai Institute of Technology, Shanghai, China","上海应用技术大学, 康健路, 长桥, 徐汇区, 上海市, 200233, 中国",31.2,121.4
+Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.2,121.4
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.2,121.4
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, People's Republic of China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.2,121.4
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, Shanghai 200240, China","上海交通大学(闵行校区), 宣怀大道, 紫竹科技园区, 英武, 闵行区, 上海市, 200240, 中国",31.0,121.4
+Shanghai Jiao Tong University,"Shanghai Jiao Tong University, Shanghai, China","上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.2,121.4
+Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.3,121.4
+Shanghai University,Shanghai university,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.3,121.4
+Shanghai University,"Shanghai University, Shanghai, China","上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.3,121.4
+Sharda University,Sharda University,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India",28.5,77.5
+Sharda University,"Sharda University, Greater Noida, India","Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India",28.5,77.5
+Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.7,51.4
+Sharif University of Technology,"Sharif University of Technology, Tehran. Iran","دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.7,51.4
+Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.6,114.0
+Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.5,113.9
+Shenzhen University,"Shenzhen University, Shenzhen China","深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.5,113.9
+Shenzhen University,"Shenzhen University, Shenzhen, China","深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.5,113.9
+Shibaura Institute of Technology,Shibaura Institute of Technology,"芝浦工業大学 豊洲キャンパス, 晴海通り, 豊洲2, 豊洲, 富岡一丁目, 江東区, 東京都, 関東地方, 135-6001, 日本",35.7,139.8
+Shibaura Institute of Technology,"Shibaura Institute of Technology, Tokyo, Japan","芝浦工業大学 豊洲キャンパス, 晴海通り, 豊洲2, 豊洲, 富岡一丁目, 江東区, 東京都, 関東地方, 135-6001, 日本",35.7,139.8
+Shiraz University,Shiraz University,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎",29.6,52.5
+"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.6,104.1
+Simon Fraser University,SIMON FRASER UNIVERSITY,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.3,-122.9
+Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.3,-122.9
+Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.3,103.8
+Singapore Management University,"Singapore Management University, Singapore","Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.3,103.8
+Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.3,104.0
+Singapore University of Technology and Design,"Singapore University of Technology and Design, Singapore","Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.3,104.0
+Soochow University,Soochow University,"苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.3,120.6
+Soochow University,"Soochow University, Suzhou, China","苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.3,120.6
+South China Normal University,South China Normal University,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.1,113.3
+South China Normal University,"South China Normal University, Guangzhou, China","华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.1,113.3
+South China University of China,South China University of China,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.0,113.4
+South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.1,113.4
+South China University of Technology,"South China University of Technology, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.1,113.4
+South China University of Technology,"South China University of Technology, Guangzhou, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.1,113.4
+South China University of Technology,"South China University of Technology, Guangzhou, Guangdong, China","华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.1,113.4
+South East European University,South East European University,"Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија",42.0,21.0
+South East European University,"South East European University, Tetovo, Macedonia","Универзитет на Југоисточна Европа, 335, Мајка Тереза, Тетово, Општина Тетово, Полошки Регион, 1200, Македонија",42.0,21.0
+Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.1,118.8
+Southeast University,"Southeast University, Nanjing, China","SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.1,118.8
+Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.7,104.1
+Southwest Jiaotong University,"Southwest Jiaotong University, Chengdu, China","西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.7,104.1
+Southwest Jiaotong University,"Southwest Jiaotong University, Chengdu, P.R. China","西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.7,104.1
+Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.8,106.4
+Southwest University,"Southwest University, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.8,106.4
+Southwest University,"Southwest University, Chongqing 400715, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.8,106.4
+Southwest University,"Southwest University, Chongqing, China","西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.8,106.4
+SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.5,-122.2
+SRI International,"SRI International, Menlo Park, USA","SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.5,-122.2
+Sri Krishna College of Technology,"Sri Krishna College of Technology, Coimbatore, India","Sri Krishna College of Technology, Kovaipudur to Golf Course Road dirt track, Ward 89, South Zone, Coimbatore, Coimbatore district, Tamil Nadu, 641001, India",10.9,76.9
+Stamford University Bangladesh,Stamford University Bangladesh,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.7,90.4
+Stamford University Bangladesh,"Stamford University Bangladesh, Dhaka-1209, Bangladesh","Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.7,90.4
+Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+Stanford University,"Stanford University, CA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+Stanford University,"Stanford University, CA, United States","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+Stanford University,"Stanford University, Stanford, CA, USA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+Stanford University,"Stanford University, Stanford, California","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+Stanford University,"Stanford University, USA","Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.4,-122.2
+State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.5,-74.4
+State University of New York at Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.1,-76.0
+State University of New York at Binghamton,"State University of New York at Binghamton, USA","State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.1,-76.0
+State University of New York at Buffalo,State University of New York at Buffalo,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",43.0,-78.8
+State University of New York at Buffalo,The State University of New York at Buffalo,"University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",43.0,-78.8
+State University of New York at Buffalo,"The State University of New York at Buffalo, New York, USA","University at Buffalo, The State University of New York, South Campus, Norton Circle, University Heights, Buffalo, Erie County, New York, 14226, USA",43.0,-78.8
+State University of New York Polytechnic Institute,State University of New York Polytechnic Institute,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA",43.1,-75.2
+State University of New York Polytechnic Institute,"State University of New York Polytechnic Institute, Utica, New York","State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA",43.1,-75.2
+Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.7,-74.0
+Stevens Institute of Technology,"Stevens Institute of Technology, Hoboken, New Jersey, 07030","Stevens Institute of Technology, Hudson Street, Hoboken, Hudson County, New Jersey, 07030, USA",40.7,-74.0
+Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University,"Stony Brook University, NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University,"Stony Brook University, NY, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University,"Stony Brook University, Stony Brook NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University,"Stony Brook University, Stony Brook, NY 11794, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University,"Stony Brook University, Stony Brook, USA","Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Stony Brook University Hospital,Stony Brook University Hospital,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.9,-73.1
+Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,Sun Yat-sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-Sen University, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-sen University, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-Sen University, GuangZhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-Sen University, Guangzhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-sen University, Guangzhou, China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+Sun Yat-Sen University,"Sun Yat-Sen University, Guangzhou, P.R. China","中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.1,113.3
+SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3,127.0
+SungKyunKwan University,Sungkyunkwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3,127.0
+SungKyunKwan University,"Sungkyunkwan University, Suwon, Republic of Korea","성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.3,127.0
+SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.9,-78.9
+Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.6,-4.0
+Swansea University,"Swansea University, Swansea, UK","Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.6,-4.0
+Swiss Federal Institute of Technology,Swiss Federal Institute of Technology,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.4,8.5
+Tafresh University,Tafresh University,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.7,50.1
+Tafresh University,"Tafresh University, Tafresh, Iran","دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.7,50.1
+Tamkang University,Tamkang University,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.2,121.5
+Tamkang University,"Tamkang University, Taipei, Taiwan","淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.2,121.5
+Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.4,23.9
+Tampere University of Technology,"Tampere University of Technology, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.4,23.9
+Tampere University of Technology,"Tampere University of Technology, Tampere 33720, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.4,23.9
+Tampere University of Technology,"Tampere University of Technology, Tampere, Finland","TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.4,23.9
+Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.1,11.6
+Technical University Munich,"Technical University Munich, Germany","TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.1,11.6
+"Technicolor, France","Technicolor, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.8,2.3
+"Technicolor, France","Technicolor, Paris, France","Technicolor, Rue d'Oradour-sur-Glane, Javel, 15e, Paris, Île-de-France, France métropolitaine, 75015, France",48.8,2.3
+Technion Israel Institute of Technology,Technion Israel Institute of Technology,"הטכניון - מכון טכנולוגי לישראל, דוד רוז, חיפה, קרית הטכניון, חיפה, מחוז חיפה, NO, ישראל",32.8,35.0
+Teesside University,Teesside University,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.6,-1.2
+Teesside University,"Teesside University, Middlesbrough, UK","Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.6,-1.2
+Teesside University,"Teesside University, UK","Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.6,-1.2
+Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1,34.8
+Tel Aviv University,"Tel Aviv University, Israel","אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1,34.8
+Tel Aviv University,Tel-Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1,34.8
+Tel Aviv University,"Tel-Aviv University, Israel","אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.1,34.8
+Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",40.0,-75.2
+Temple University,"Temple University, Philadelphia, PA 19122, USA","Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA",40.0,-75.1
+Temple University,"Temple University, Philadelphia, PA, 19122, USA","Temple University, West Berks Street, Hartranft, Philadelphia, Philadelphia County, Pennsylvania, 19122, USA",40.0,-75.1
+Temple University,"Temple University, Philadelphia, PA, USA","Temple University, Beasley's Walk, Stanton, Philadelphia, Philadelphia County, Pennsylvania, 19132:19133, USA",40.0,-75.2
+Temple University,"Temple University, Philadelphia, USA","Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",40.0,-75.2
+Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.6,-96.4
+Texas A&M University,"Texas A&M University, College Station, TX, USA","Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.6,-96.4
+Thapar University,Thapar University,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India",30.4,76.4
+Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.2,117.1
+Tianjin University,"Tianjin University, 300072, China","泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.2,117.1
+Tianjin University,"Tianjin University, China","Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国",39.0,117.3
+Tianjin University,"Tianjin University, Tianjin, China","Tianjin University, South Qinmin Road, Haihe Education Park, 辛庄镇, 津南区 (Jinnan), 天津市, 中国",39.0,117.3
+Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.3,140.9
+Tohoku University,"Tohoku University, Japan","Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.3,140.9
+Tohoku University,"Tohoku University, Sendai, Japan","Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.3,140.9
+Tokyo Denki University,Tokyo Denki University,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.7,139.5
+Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5,139.5
+Tokyo Institute of Technology,"Tokyo Institute of Technology, Japan","東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5,139.5
+Tokyo Institute of Technology,"Tokyo Institute of Technology, Kanagawa, Japan","東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.5,139.5
+Tokyo Metropolitan University,Tokyo Metropolitan University,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本",35.6,139.4
+Tomsk Polytechnic University,Tomsk Polytechnic University,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ",56.5,85.0
+Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.3,121.5
+Tongji University,"Tongji University, Shanghai 201804, China","同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.3,121.5
+Tongji University,"Tongji University, Shanghai, China","同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.3,121.5
+Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.4,-122.1
+"Toyota Technological Institute, Chicago","Toyota Technological Institute (Chicago, US","Toyota Technological Institute, 6045, South Kenwood Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.8,-87.6
+Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, 100084 Beijing, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing 100084, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing 100084, P.R. China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing 100084, P.R.China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing, 100084, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing, P. R. China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, Beijing,China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+Tsinghua University,"Tsinghua University, China","清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.0,116.3
+TU Darmstadt,"TU Darmstadt, D-64283, Germany","Institut für Psychologie, 10, Alexanderstraße, Darmstadt-Mitte, Darmstadt, Regierungsbezirk Darmstadt, Hessen, 64283, Deutschland",49.9,8.7
+Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.4,10.0
+Ulm University,"Ulm University, Germany","HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.4,10.0
+Universidad Autonoma de Madrid,Universidad Autonoma de Madrid,"Facultad de Medicina de la Universidad Autónoma de Madrid, Calle de Arturo Duperier, Fuencarral, Fuencarral-El Pardo, Madrid, Área metropolitana de Madrid y Corredor del Henares, Comunidad de Madrid, 28001, España",40.5,-3.7
+"Universidad Tecnica Federico Santa Maria, Valparaiso, Chile","Universidad Tecnica Federico Santa Maria, Valparaiso, Chile","Universidad Técnica Federico Santa María, Condominio Esmeralda, Valparaíso, Provincia de Valparaíso, V Región de Valparaíso, 2390382, Chile",-33.0,-71.6
+Università degli Studi di Milano,Università degli Studi di Milano,"Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.5,9.2
+Università degli Studi di Milano,"Università degli Studi di Milano, Italy","Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.5,9.2
+Università di Salerno Italy,Università di Salerno Italy,"Università, Autostrada del Mediterraneo, Fisciano, SA, CAM, 84084, Italia",40.8,14.8
+Universitat Autònoma de Barcelona,"Centre de Visió per Computador, Universitat Autònoma de Barcelona, Barcelona, Spain","Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.5,2.1
+Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.5,2.1
+Universitat de València,Universitat de València,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.5,-0.3
+Universitat de València,"Universitat de València, Valencia, Spain","Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.5,-0.3
+Universität Hamburg,"Vogt-Koelln-Strasse 30, 22527 Hamburg - Germany","Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.6,9.9
+Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.4,2.2
+Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, Barcelona, Spain","Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.4,2.2
+Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.4,2.2
+Universitat Pompeu Fabra,"Universitat Pompeu Fabra, Barcelona, Spain","Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.4,2.2
+Université du Québec à Chicoutimi,Université du Québec à Chicoutimi (UQAC),"Université du Québec à Chicoutimi (UQAC), Chicoutimi, Ville de Saguenay, Saguenay - Lac-Saint-Jean, Québec, G7H 2B1, Canada",48.4,-71.1
+Universiti Teknologi Petronas,Universiti Teknologi PETRONAS,"UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia",4.4,101.0
+Universiti Teknologi Petronas,"Universiti Teknologi PETRONAS, Seri Iskandar, 32610, Perak Malaysia","UTP, Universiti Teknologi Petronas, Persiaran Desa Kediaman, Puncak Iskandar, Seri Iskandar, PRK, 32610, Malaysia",4.4,101.0
+University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5,-0.1
+University College London,"University College London, London WC1N 3BG, United Kingdom","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5,-0.1
+University College London,"University College London, London, UK","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5,-0.1
+University College London,"University College London, UK","UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.5,-0.1
+University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.2,-2.1
+University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.5,-3.0
+University of Adelaide,The University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Adelaide,"The University of Adelaide, Adelaide, SA, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Adelaide,"The University of Adelaide, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Adelaide,"University of Adelaide, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Adelaide,"University of Adelaide, SA, Australia","University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.9,138.6
+University of Agder,University of Agder,"UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge",58.2,8.0
+University of Agder,"University of Agder, Kristiansand, Norway","UiA, Vegard Hauges plass, Gimlemoen, Kvadraturen, Kristiansand, Vest-Agder, 4630, Norge",58.2,8.0
+University of Aizu,University of Aizu,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.5,139.9
+University of Aizu,"University of Aizu, Japan","会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.5,139.9
+University of Akron,University of Akron,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.1,-81.5
+University of Akron,"University of Akron, Akron","University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.1,-81.5
+University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.5,-113.5
+University of Alberta,"University of Alberta, Edmonton, Canada","University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.5,-113.5
+University of Amsterdam,"Science, University of Amsterdam","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Amsterdam,"University of Amsterdam, Amsterdam, The","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Amsterdam,"University of Amsterdam, Amsterdam, The Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Amsterdam,"University of Amsterdam, The Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Amsterdam,"University of Amsterdam, the Netherlands","Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.4,5.0
+University of Arizona,THE UNIVERSITY OF ARIZONA,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.2,-111.0
+University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.2,-111.0
+University of Arkansas at Little Rock,University of Arkansas at Little Rock,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA",34.7,-92.3
+University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.4,2.2
+University of Barcelona,"University of Barcelona, Spain","Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.4,2.2
+University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.6,7.6
+University of Basel,"University of Basel, Switzerland","Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.6,7.6
+University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.4,-2.3
+University of Bath,"University of Bath, Bath, Somerset, United Kingdom","University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.4,-2.3
+University of Bath,"University of Bath, Bath, United Kingdom","University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.4,-2.3
+University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.5,-1.9
+University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7,7.1
+University of Bonn,"University of Bonn, Germany","Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.7,7.1
+University of Brescia,University of Brescia,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA",37.8,-87.1
+University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.2,-73.2
+University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.5,-2.6
+University of Bristol,"University of Bristol, Bristol, BS8 1UB, UK","University of Bristol, Cantock's Close, Kingsdown, Canon's Marsh, Bristol, City of Bristol, South West England, England, BS8, UK",51.5,-2.6
+University of Bristol,"University of Bristol, Bristol, UK","Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.5,-2.6
+University of British Columbia,The University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.3,-123.2
+University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.3,-123.2
+University of British Columbia,"University of British Columbia, Canada","University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.3,-123.2
+University of British Columbia,"University of British Columbia, Vancouver, Canada","University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.3,-123.2
+University of Buffalo,University of Buffalo,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA",40.7,-99.1
+University of Caen,University of Caen,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.0,135.8
+University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.1,-114.1
+University of Calgary,"University of Calgary, Calgary, Alberta, Canada","University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.1,-114.1
+"University of California, Berkeley",University of California Berkeley,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.9,-122.2
+"University of California, Berkeley",University of California Berkeley,"UC Berkeley, Centennial Drive, Oakland, Alameda County, California, 94720-1076, USA",37.9,-122.2
+"University of California, Berkeley","UNIVERSITY OF CALIFORNIA, BERKELEY","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.9,-122.3
+"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.9,-122.3
+"University of California, Berkeley","University of California, Berkeley, Berkeley CA 94720, USA","Goldman School of Public Policy, Hearst Avenue, Northside, Berkeley, Alameda County, California, 94720, USA",37.9,-122.3
+"University of California, Davis",University of California Davis,"University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.5,-121.8
+"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.6,-117.8
+"University of California, Irvine","University of California, Irvine, USA","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.6,-117.8
+"University of California, Merced","UC Merced, USA","UC Merced Venture Lab, 1735, M Street, Merced, Merced County, California, 95340, USA",37.3,-120.5
+"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.4,-120.4
+"University of California, Merced","University of California, Merced, CA 95344, USA","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.4,-120.4
+"University of California, Merced","University of California, Merced, USA","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.4,-120.4
+"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",34.0,-117.3
+"University of California, Riverside","University of California, Riverside CA 92521-0425, USA","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",34.0,-117.3
+"University of California, Riverside","University of California, Riverside, California 92521, USA","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",34.0,-117.3
+"University of California, Riverside","University of California, Riverside, Riverside CA, California 92521 United States","UCR, North Campus Drive, Riverside, Riverside County, California, 92521, USA",34.0,-117.3
+"University of California, San Diego",University Of California San Diego,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego",University of California San Diego,"UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California San Diego, United States of America","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California San Diego, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","UNIVERSITY OF CALIFORNIA, SAN DIEGO","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California, San Diego, CA, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California, San Diego, California, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California, San Diego, La Jolla","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, San Diego","University of California, San Diego, USA","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.9,-117.2
+"University of California, Santa Barbara",University of California Santa Barbara,"UCSB, Santa Barbara County, California, 93106, USA",34.4,-119.8
+"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.4,-119.8
+University of Cambridge,The University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.2,0.1
+University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.2,0.1
+University of Cambridge,"University of Cambridge, United Kingdom","Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.2,0.1
+University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.6,-48.6
+University of Campinas,University of Campinas (Unicamp,"Universidade Estadual de Campinas - UNICAMP, Rua Josué de Castro, Barão Geraldo, Campinas, Microrregião de Campinas, RMC, Mesorregião de Campinas, SP, Região Sudeste, 13083-970, Brasil",-22.8,-47.1
+University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.2,149.1
+University of Canterbury,University of Canterbury,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.5,172.6
+University of Canterbury,"University of Canterbury, New Zealand","University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.5,172.6
+University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-34.0,18.5
+University of Cape Town,"University of Cape Town, South Africa","University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-34.0,18.5
+University of Central Florida,B.S. University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.6,-81.2
+University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.6,-81.2
+University of Central Florida,"University of Central Florida, Orlando","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.4,-81.4
+University of Central Florida,"University of Central Florida, Orlando, 32816, United States of America","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.4,-81.4
+University of Central Florida,"University of Central Florida, Orlando, FL, USA","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.4,-81.4
+University of Central Florida,"University of Central Florida, Orlando, USA","Rosen College of Hospitality Management, 9907, Universal Boulevard, Orange County, Florida, 32819, USA",28.4,-81.4
+University of Central Florida,"University of Central Florida, USA","University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.6,-81.2
+University of Central Punjab,University of Central Punjab,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎",31.4,74.3
+University of Central Punjab,"University of Central Punjab, Pakistan","University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎",31.4,74.3
+University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.8,-87.6
+University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,University of Chinese Academy of Sciences (UCAS,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,University of Chinese Academy of Sciences (UCAS),"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences (UCAS), Beijing, 100049, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing 100190, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing 101408, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing, 100049, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, Beijing, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, China","University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.9,116.2
+University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.2,-8.4
+University of Coimbra,"University of Coimbra, Portugal","Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.2,-8.4
+"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.0,-105.3
+"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.9,-104.8
+"University of Colorado, Denver",University of Colorado Denver,"University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.7,-105.0
+"University of Colorado, Denver","University of Colorado Denver, Denver, CO, USA","University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.7,-105.0
+University of Connecticut,University of Connecticut,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA",41.8,-72.3
+University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.7,12.6
+University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.4,24.5
+University of Crete,"University of Crete, Crete, 73100, Greece","House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.4,24.5
+University of Dammam,University of Dammam,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.4,50.2
+University of Dammam,"University of Dammam, Saudi Arabia","University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.4,50.2
+University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.7,-84.2
+University of Dayton,"University of Dayton, Dayton, OH, USA","University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.7,-84.2
+University of Dayton,"University of Dayton, Ohio, USA","University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.7,-84.2
+University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.7,-75.8
+University of Delaware,"University of Delaware, Newark, 19716, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.7,-75.8
+University of Delaware,"University of Delaware, Newark, DE, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.7,-75.8
+University of Delaware,"University of Delaware, Newark, DE. USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.7,-75.8
+University of Delaware,"University of Delaware, USA","University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.7,-75.8
+University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.7,-105.0
+University of Denver,"University of Denver, Denver, CO","University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.7,-105.0
+University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7,90.4
+University of Dhaka,"University of Dhaka, Bangladesh","World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.7,90.4
+University of Dschang,University of Dschang,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.4,10.1
+University of Dschang,"University of Dschang, Cameroon","Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.4,10.1
+University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.5,-3.0
+University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.6,1.2
+University of East Anglia,"University of East Anglia, Norwich, U.K.","Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.6,1.2
+University of Edinburgh,The University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.9,-3.2
+University of Edinburgh,"The University of Edinburgh, Edinburgh, U.K.","New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.9,-3.2
+University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.9,-3.2
+University of Edinburgh,"University of Edinburgh, Edinburgh, UK","New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.9,-3.2
+University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.7,139.5
+University of Electro-Communications,"The University of Electro-Communications, JAPAN","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.7,139.5
+University of Electro-Communications,"The University of Electro-Communications, Japan","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.7,139.5
+University of Electro-Communications,"The University of Electro-Communications, Tokyo","電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.7,139.5
+University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.7,-3.5
+University of Exeter,"University of Exeter, UK","University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.7,-3.5
+University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.6,-82.3
+University of Florida,"University of Florida, Gainesville, FL","University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.6,-82.3
+University of Florida,"University of Florida, Gainesville, FL, 32611, USA","University of Florida, Museum Road, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32601, USA",29.6,-82.4
+University of Frankfurt,University of Frankfurt,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland",50.1,8.7
+University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.6,-88.6
+University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.9,-4.3
+University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.2,6.6
+University of Groningen,"University of Groningen, Netherlands","Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.2,6.6
+University of Groningen,"University of Groningen, The Netherlands","Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.2,6.6
+University of Gujrat,University of Gujrat,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.6,74.2
+University of Gujrat,"University of Gujrat, Pakistan","University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.6,74.2
+University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.8,35.0
+University of Haifa,"University of Haifa, Haifa, Israel","אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.8,35.0
+University of Hawaii,University of Hawaii,"University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA",21.3,-157.8
+University of Hawaii,"University of Hawaii, Manoa, Honolulu, HI, 96822","University of Hawaii at Manoa, Bachman Place, Lower Mānoa, Moiliili, Honolulu, Honolulu County, Hawaii, 96848, USA",21.3,-157.8
+University of Hong Kong,"The Univ of Hong Kong, China","海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2,114.3
+University of Hong Kong,The University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2,114.3
+University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2,114.3
+University of Hong Kong,"University of Hong Kong, China","海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.2,114.3
+University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7,-95.3
+University of Houston,"University of Houston, Houston, TX 77204, USA","UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7,-95.3
+University of Houston,"University of Houston, Houston, TX, USA","UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.7,-95.3
+University of Iceland,University of Iceland,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland",64.1,-21.9
+University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.9,-87.6
+University of Illinois at Chicago,"University of Illinois at Chicago, Chicago, IL","University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.9,-87.6
+"University of Illinois, Urbana-Champaign",University of Illinois,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois at,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois at Urbana,"University of Illinois at Urbana-Champaign, West Pennsylvania Avenue, West Urbana Residential Area, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois at Urbana Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana Champaign, Urbana","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana Champaign, Urbana, IL 61801, USA","University of Illinois at Urbana-Champaign, South Goodwin Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois at Urbana-Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana-Champaign, IL USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana-Champaign, Urbana, IL","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana-Champaign, Urbana, IL, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana-Champaign, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois at Urbana—Champaign,"Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois at Urbana—Champaign, Champaign, IL, USA","Krannert Art Museum, 500, Peabody Drive, Urbana, Champaign County, Illinois, 61820, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign",University of Illinois Urbana Champaign,"B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.1,-88.2
+University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6,20.8
+University of Ioannina,"University of Ioannina, 45110, Greece","Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6,20.8
+University of Ioannina,"University of Ioannina, Ioannina, Greece","Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.6,20.8
+University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.7,-91.6
+University of Karlsruhe,University of Karlsruhe,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland",49.0,8.4
+University of Karlsruhe,"University of Karlsruhe, Germany","Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland",49.0,8.4
+University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.3,1.1
+University of Kent,"University of Kent, Canterbury, U.K.","University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.3,1.1
+University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.0,-84.5
+University of Kentucky,"University of Kentucky, USA","University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.0,-84.5
+University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.8,-1.6
+University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.2,-0.5
+University of Lincoln,"University of Lincoln, U. K.","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.2,-0.5
+University of Lincoln,"University of Lincoln, U.K","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.2,-0.5
+University of Lincoln,"University of Lincoln, UK","University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.2,-0.5
+University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.4,-3.0
+University of Liverpool,"University of Liverpool, Liverpool, U.K.","Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.4,-3.0
+University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.1,14.5
+University of Ljubljana,University of Ljubljana Faculty,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.1,14.5
+University of Ljubljana,"University of Ljubljana, Ljubljana, Slovenia","UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.1,14.5
+University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.5,-0.1
+University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.2,-85.8
+University of Louisville,"University of Louisville, Louisville, KY 40292 USA","University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.2,-85.8
+University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.1,113.5
+University of Macau,"University of Macau, Taipa, Macau","研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.1,113.5
+University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.1,101.7
+University of Malaya,"University of Malaya, 50603 Kuala Lumpur, Malaysia","UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.1,101.7
+University of Malaya,"University of Malaya, Kuala Lumpur, Malaysia","UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.1,101.7
+University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.9,14.5
+University of Malta,"University of Malta, Msida, Malta","University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.9,14.5
+University of Manchester,The University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.5,-2.2
+University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.5,-2.2
+University of Manchester,"University of Manchester, Manchester, U.K.","University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.5,-2.2
+University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.8,-97.1
+University of Maryland,The University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.3,-76.6
+University of Maryland,University Of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.3,-76.6
+University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.3,-76.6
+University of Maryland,"Y. Li, University of Maryland","Penn Street Garage, 120, Penn Street, Ridgleys Delight, Baltimore, Maryland, 21201, USA",39.3,-76.6
+University of Maryland College Park,"College Park, Maryland","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"College Park, MD","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"College Park, MD 20742 USA","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"College Park, MD, 20740, USA","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"College Park, United States","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"College Park, USA","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,University of Maryland-College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"University of Maryland-College Park, USA","University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",39.0,-76.9
+University of Maryland College Park,"University of Maryland, College Park, MD, USA","The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.3,-76.6
+University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,University of Massachusetts - Amherst,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,University of Massachusetts Amherst,"UMass Amherst, Commonwealth Avenue, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,"University of Massachusetts Amherst, Amherst MA, 01003","Murray D. Lincoln Campus Center, 1, Campus Center Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,"University of Massachusetts, Amherst","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,"University of Massachusetts, Amherst MA, USA","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts,"University of Massachusetts, Amherst, MA","University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.4,-72.5
+University of Massachusetts Dartmouth,University of Massachusetts Dartmouth,"University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA",41.6,-71.0
+University of Massachusetts Dartmouth,"University of Massachusetts Dartmouth, Dartmouth, MA, USA","University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA",41.6,-71.0
+University of Memphis,University of Memphis,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA",35.1,-89.9
+University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7,-80.3
+University of Miami,"University of Miami, Coral Gables, FL","University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7,-80.3
+University of Miami,"University of Miami, USA","University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.7,-80.3
+University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann Arbor","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann Arbor, MI","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann Arbor, MI 48109 USA","Power Center for the Performing Arts, 121, Fletcher Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann Arbor, MI, USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann Arbor, USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Michigan,"University of Michigan, Ann, Arbor, MI USA","University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.3,-83.7
+University of Milan,University of Milan,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA",38.7,-90.3
+University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",45.0,-93.2
+University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.9,-92.3
+University of Missouri,"University of Missouri, Columbia, MO","L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.9,-92.3
+University of Nebraska - Lincoln,University of Nebraska - Lincoln,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA",40.8,-96.7
+University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5,-119.8
+University of Nevada,"University of Nevada, Reno, Reno, NV, USA","Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5,-119.8
+University of Nevada,"University of Nevada, Reno, USA","Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.5,-119.8
+University of New South Wales,The University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.9,151.2
+University of New South Wales,"The University of New South Wales, Australia","UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.9,151.2
+University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.9,151.2
+University of New South Wales,"University of New South Wales, Sydney, NSW, Australia","UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.9,151.2
+University of Newcastle,The University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.4,151.4
+University of Newcastle,"The University of Newcastle, Callaghan 2308, Australia","University of Newcastle, Huxley Library, University Drive, Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia",-32.9,151.7
+University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.4,151.4
+University of Newcastle,"University of Newcastle, Newcastle, Australia","University of Newcastle, Christie Street, Newcastle, Newcastle-Maitland, Newcastle, NSW, 2300, Australia",-32.9,151.8
+"University of Newcastle, Australia","Callaghan, NSW 2308, Australia","Callaghan, Newcastle-Maitland, Newcastle, NSW, 2308, Australia",-32.9,151.7
+University of North Carolina,The University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.9,-79.0
+University of North Carolina,"The University of North Carolina, Chapel Hill","University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.9,-79.0
+University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.9,-79.0
+University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9,-79.1
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, Chapel Hill, NC","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9,-79.1
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, Chapel Hill, NC, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9,-79.1
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, NC, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9,-79.1
+University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, USA","University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.9,-79.1
+University of North Carolina at Charlotte,The University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3,-80.7
+University of North Carolina at Charlotte,"The University of North Carolina at Charlotte, USA","Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3,-80.7
+University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.3,-80.7
+University of North Carolina at Wilmington,University of North Carolina at Wilmington,"University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2,-77.9
+University of North Carolina at Wilmington,"University of North Carolina at Wilmington, USA","University of North Carolina at Wilmington, Price Drive, University Suites, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2,-77.9
+University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2,-77.9
+University of North Carolina Wilmington,"University of North Carolina Wilmington, USA","Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2,-77.9
+University of North Carolina Wilmington,"University of North Carolina Wilmington, Wilmington, NC, USA","Kenan House parking lot, Princess Street, Wilmington, New Hanover County, North Carolina, 28405, USA",34.2,-77.9
+University of North Carolina Wilmington,"University of North Carolina Wilmington, Wilmington, United States","Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.2,-77.9
+University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.2,-97.2
+University of North Texas,"University of North Texas, Denton, Texas, USA","University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.2,-97.2
+University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.9,-122.8
+University of Northern British Columbia,"University of Northern British Columbia, Canada","UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.9,-122.8
+University of Northern British Columbia,"University of Northern British Columbia, Prince George, Canada","UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.9,-122.8
+University of Notre Dame,of the University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Notre Dame,"University of Notre Dame, Notre Dame, IN, USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Notre Dame,"University of Notre Dame, USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Notre Dame,University of Notre Dame. Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Notre Dame,"University of Notre Dame. Notre Dame, IN 46556.USA","University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.7,-86.2
+University of Nottingham,The University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9,-1.2
+University of Nottingham,"The University of Nottingham, UK","University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9,-1.2
+University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9,-1.2
+University of Nottingham,"University of Nottingham, Nottingham, UK","University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.9,-1.2
+University of Nottingham Malaysia Campus,a The University of Nottingham Malaysia Campus,"The University of Nottingham Malaysia Campus, Jalan Broga, Bandar Rinching, Semenyih, Selangor, 43500, Malaysia",2.9,101.9
+University of Oradea,University of Oradea,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.1,21.9
+University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.9,10.7
+University of Oslo,"University of Oslo, Oslo, Norway","UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.9,10.7
+University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.4,-75.7
+University of Ottawa,"University of Ottawa, Canada","University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.4,-75.7
+University of Ottawa,"University of Ottawa, Ottawa, On, Canada","University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.4,-75.7
+University of Oulu,UNIVERSITY OF OULU,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.1,25.5
+University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.1,25.5
+University of Oulu,"University of Oulu, Finland","Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.1,25.5
+University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.8,-1.3
+University of Oxford,"University of Oxford, Oxford, United Kingdom","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.8,-1.3
+University of Oxford,"University of Oxford, UK","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.8,-1.3
+University of Oxford,"University of Oxford, United Kingdom","Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.8,-1.3
+University of Patras,University of Patras,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.3,21.8
+University of Patras,"University of Patras, Greece","Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.3,21.8
+University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.9,-75.2
+University of Pennsylvania,"University of Pennsylvania, Philadelphia, PA","40th Street Parking Lot, Walnut Street, Southwest Schuylkill, Philadelphia, Philadelphia County, Pennsylvania, 19104-1469, USA",40.0,-75.2
+University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.3,-123.2
+University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0,71.5
+University of Peshawar,"University of Peshawar, Pakistan","University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0,71.5
+University of Peshawar,"University of Peshawar, Peshawar, Pakistan","University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.0,71.5
+University of Piraeus,University of Piraeus,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα",37.9,23.7
+University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.7,10.4
+University of Pisa,"University of Pisa, Pisa, Italy","Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.7,10.4
+University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, PA 15213, USA","Nationality Rooms, 4200, Omicron Delta Kappa Walk, North Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, PA, 15260, USA","Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, PA, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh PA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4,-79.9
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4,-79.9
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA , USA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4,-79.9
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA 15260, USA","Stephen Foster Memorial Museum, Forbes Avenue, Panther Hollow, Central Oakland, PGH, Allegheny County, Pennsylvania, 15213, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, PA, USA","Visitor Parking, Thomas Boulevard, Homewood, Point Breeze North, Wilkinsburg, Allegheny County, Pennsylvania, 15208, USA",40.4,-79.9
+University of Pittsburgh,"University of Pittsburgh, Pittsburgh, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.4,-80.0
+University of Pittsburgh,"University of Pittsburgh, USA","University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.4,-80.0
+University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.4,-4.1
+University of Plymouth,"University of Plymouth, UK","Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.4,-4.1
+University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.8,-1.1
+University of Portsmouth,"University of Portsmouth, United Kingdom","University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.8,-1.1
+University of Queensland,The University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,the University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"The University of Queensland, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"The University of Queensland, Brisbane, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"the University of Queensland, Brisbane, Qld, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"The University of Queensland, QLD 4072, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"University of Queensland, Australia","University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Queensland,"University of Queensland, St Lucia, QLD, Australia","Anthropology Museum, Chancellors Place, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.5,153.0
+University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.2,-77.6
+University of Rochester,"University of Rochester, NY 14627, USA","Central Utilities Lot, Firemans, Rochester, Monroe County, New York, 14627, USA",43.1,-77.6
+University of Rochester,"University of Rochester, Rochester, NY, USA","Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.2,-77.6
+University of Salzburg,University of Salzburg,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.8,13.1
+University of Salzburg,"University of Salzburg, Austria","Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.8,13.1
+University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Science and Technology of China,"University of Science and Technology of China, Hefei 230026, P. R. China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, 230027, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, Anhui, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, Anhui, P. R. China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Science and Technology of China,"University of Science and Technology of China, Hefei, China","中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.8,117.3
+University of Sheffield,The University of Sheffield,"University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.4,-1.5
+University of Sheffield,"The University of Sheffield, Sheffield, U.K.","University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.4,-1.5
+University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.4,114.2
+University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",34.0,-81.0
+University of South Carolina,"University of South Carolina, Columbia, USA","University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",34.0,-81.0
+University of South Carolina,"University of South Carolina, USA","University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",34.0,-81.0
+University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.1,-82.4
+University of South Florida,"University of South Florida, Tampa, Florida 33620","University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.1,-82.4
+University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.9,-1.4
+University of Southampton,"University of Southampton, SO17 1BJ, UK","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.9,-1.4
+University of Southampton,"University of Southampton, Southampton, U.K.","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.9,-1.4
+University of Southampton,"University of Southampton, United Kingdom","Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.9,-1.4
+University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0,-118.3
+University of Southern California,"University of Southern California, Los Angeles, CA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0,-118.3
+University of Southern California,"University of Southern California, Los Angeles, CA 90089, USA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0,-118.3
+University of Southern California,"University of Southern California, Los Angeles, USA","University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.0,-118.3
+University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.3,-2.8
+University of St Andrews,"University of St Andrews, United Kingdom","University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.3,-2.8
+University of Stuttgart,University of Stuttgart,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland",48.9,9.2
+University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.2,-0.6
+University of Surrey,"University of Surrey, Guildford, Surrey GU2 7XH, UK","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.2,-0.6
+University of Surrey,"University of Surrey, Guildford, Surrey, GU2 7XH, UK","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.2,-0.6
+University of Surrey,"University of Surrey, United Kingdom","University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.2,-0.6
+University of Sydney,"School, The University of Sydney, Sydney, NSW, Australia","Royal Prince Alfred Hospital School, 57-59, Grose Street, Camperdown, Sydney, NSW, 2050, Australia",-33.9,151.2
+University of Sydney,The University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.9,151.2
+University of Sydney,"The University of Sydney, NSW 2006, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.9,151.2
+University of Sydney,"The University of Sydney, Sydney, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.9,151.2
+University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.9,151.2
+University of Sydney,"University of Sydney, Australia","USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.9,151.2
+University of Sydney,"University of Sydney, Sydney, NSW, Australia","Sand Roll House, Parramatta Road, Camperdown, Sydney, NSW, 2050, Australia",-33.9,151.2
+University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.1,46.3
+University of Tabriz,"University of Tabriz, Tabriz, Iran","دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.1,46.3
+University of Tampere,UNIVERSITY OF TAMPERE,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.5,23.8
+University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.5,23.8
+University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.4,26.7
+University of Technology Baghdad,"University of Technology, Baghdad, Iraq","الجامعة التكنلوجية, A86;N11;D383, محلة 103, Al Saadoom Park, Rusafa, بغداد, Al Resafa, محافظة بغداد, 3241, العراق",33.3,44.4
+University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology Sydney, New South Wales, Australia","University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology Sydney, Sydney, NSW, Australia","University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology Sydney, Ultimo, NSW, Australia","University of Technology Sydney, Harris Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology, Sydney","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology, Sydney, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology, Sydney, NSW, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Technology Sydney,"University of Technology, Sydney, Sydney, Australia","UTS, Thomas Street, Ultimo, Sydney, NSW, 2007, Australia",-33.9,151.2
+University of Tennessee,The University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",36.0,-83.9
+University of Tennessee,"The University of Tennessee, Knoxville","University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",36.0,-83.9
+University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",36.0,-83.9
+University of Tennessee,"University of Tennessee, Knoxville","University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",36.0,-83.9
+University of Texas,The University of Texas,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3,-95.3
+University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7,-97.1
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, Texas 76019, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7,-97.1
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, TX","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7,-97.1
+University of Texas at Arlington,"University of Texas at Arlington, Arlington, TX, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7,-97.1
+University of Texas at Arlington,"University of Texas at Arlington, TX, USA","University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.7,-97.1
+University of Texas at Austin,The University of Texas at,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.3,-95.3
+University of Texas at Austin,The University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of Texas at Austin,The University of Texas at Austin Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of Texas at Austin,"The University of Texas at Austin Austin, Texas, USA","University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of Texas at Austin,"University of Texas, Austin, TX 78712-1188, USA","University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of Texas at Dallas,The University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",33.0,-96.8
+University of Texas at Dallas,"The University of Texas at Dallas, Richardson, TX","University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",33.0,-96.8
+University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",33.0,-96.8
+University of Texas at Dallas,"University of Texas at Dallas, Richardson, 75080, USA","University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",33.0,-96.8
+University of Texas at San Antonio,The University of Texas at San Antonio,"Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.4,-98.5
+University of Texas at San Antonio,"The University of Texas at San Antonio, San Antonio, TX, USA","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.4,-98.5
+University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.6,-98.6
+University of Texas at San Antonio,"University of Texas at San Antonio, 78249, USA","UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.6,-98.6
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, Texas","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.4,-98.5
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, TX","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.4,-98.5
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, TX, USA","Lot D3, South PanAm Expressway, Cattleman's Square, San Antonio, Bexar County, Texas, 78205, USA",29.4,-98.5
+University of Texas at San Antonio,"University of Texas at San Antonio, San Antonio, United States","UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.6,-98.6
+University of Texas at San Antonio,"University of Texas, San Antonio, TX, USA","University of Texas at Austin, 2152, San Jacinto Boulevard, Medical District, Austin, Travis County, Texas, 78712, USA",30.3,-97.7
+University of the Basque Country,University of the Basque Country,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.3,-2.0
+University of the Basque Country,"University of the Basque Country, San Sebastian, Spain","Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.3,-2.0
+University of the Humanities,The University of the Humanities,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс",47.9,106.9
+University of the Western Cape,University of the Western Cape,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa",-33.9,18.6
+University of the Witwatersrand,University of the Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.2,28.0
+University of the Witwatersrand,"University of the Witwatersrand, Johannesburg, South Africa","University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.2,28.0
+University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.6,23.0
+University of Tokushima,The University of Tokushima,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.1,134.6
+University of Tokushima,"The University of Tokushima, Japan","大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.1,134.6
+University of Tokyo,The University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9,139.9
+University of Tokyo,"The University of Tokyo, Japan","東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9,139.9
+University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9,139.9
+University of Tokyo,"University of Tokyo, Japan","東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.9,139.9
+University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.7,-79.4
+University of Toronto,"University of Toronto, Toronto, ON, Canada","University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.7,-79.4
+University of Toronto Toronto,University of Toronto Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.7,-79.4
+University of Toronto Toronto,"University of Toronto Toronto, Canada","University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.7,-79.4
+University of Toulouse,University of Toulouse,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA",30.2,-93.2
+University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.1,11.1
+University of Trento,"University of Trento, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.1,11.1
+University of Trento,"University of Trento, Trento, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.1,11.1
+University of Trento,"University of Trento, Trento, TN, Italy","University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.1,11.1
+University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.1,140.1
+University of Tsukuba,"University of Tsukuba, Japan","University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.1,140.1
+University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2,6.9
+University of Twente,"University of Twente, Netherlands","University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2,6.9
+University of Twente,"University of Twente, The Netherlands","University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.2,6.9
+University of Venezia,University of Venezia,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia",45.4,12.3
+University of Vermont,University of Vermont,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.5,-73.2
+University of Vermont,"University of Vermont, 33 Colchester Avenue, Burlington","University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.5,-73.2
+University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.2,16.4
+University of Vienna,"University of Vienna, Austria","Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.2,16.4
+University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.0,-78.5
+University of Virginia,"University of Virginia, Charlottesville, VA","University of Virginia, Emmet Street North, Charlottesville, Virginia, 22901, USA",38.0,-78.5
+University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.4,-1.6
+University of Warwick,"University of Warwick, Coventry, U.K.","University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.4,-1.6
+University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.7,-122.3
+University of Washington,"University of Washington, Seattle, USA","University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.7,-122.3
+University of Washington,"University of Washington, Seattle, WA 98195, United States","University of Washington, Yakima Lane, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.7,-122.3
+University of Washington,"University of Washington, Seattle, WA, USA","University of Washington, Northeast Walla Walla Road, Montlake, University District, Seattle, King County, Washington, 98195-2350, USA",47.7,-122.3
+University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.5,-80.5
+University of Western Australia,The University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-32.0,115.8
+University of Western Australia,"The University of Western Australia, Crawley, WA, Australia","University of Western Australia (Crawley Campus), 35, Stirling Highway, Crawley, Perth, Western Australia, 6009, Australia",-32.0,115.8
+University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-32.0,115.8
+University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.3,-83.1
+University of Windsor,"University of Windsor, Canada","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.3,-83.1
+University of Windsor,"University of Windsor, Canada N9B 3P4","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.3,-83.1
+University of Windsor,"University of Windsor, Ontario, Canada","Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.3,-83.1
+University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.1,-89.4
+University of Wisconsin Madison,University of Wisconsin - Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.1,-89.4
+University of Wisconsin Madison,UNIVERSITY OF WISCONSIN MADISON,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.1,-89.4
+University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.1,-89.4
+University of Wisconsin Madison,University of Wisconsin-Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.1,-89.4
+University of Wisconsin Madison,"University of Wisconsin-Madison, Madison, WI, USA","UW Geology Museum, 1215, West Dayton Street, South Campus, Madison, Dane County, Wisconsin, 53715, USA",43.1,-89.4
+University of Witwatersrand,University of Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.2,28.0
+University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.4,150.9
+University of Wollongong,"University of Wollongong, Wollongong, Australia","University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.4,150.9
+University of York,The University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,"The University of York, Heslington, York YO10 5DD, United Kingdom","Campus Central Car Park, University Road, Heslington, York, Yorkshire and the Humber, England, YO10 5NH, UK",53.9,-1.1
+University of York,"The University of York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,"The University of York, United Kingdom","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,"University of York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,"University of York, York, UK","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of York,"University of York, York, United Kingdom","University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.9,-1.0
+University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.5,8.7
+University of Zurich,"University of Zurich, Zurich, Switzerland","ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.5,8.7
+University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.4,26.1
+University Politehnica of Bucharest,University POLITEHNICA of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.4,26.1
+University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.4,26.1
+University Politehnica of Bucharest,"University POLITEHNICA of Bucharest, Bucharest, Romania","Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.4,26.1
+University Politehnica of Bucharest,"University Politehnica of Bucharest, Romania","Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.4,26.1
+University Politehnica Timisoara,University POLITEHNICA Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.7,21.2
+University Politehnica Timisoara,"University POLITEHNICA Timisoara, Timisoara, 300223, Romania","UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.7,21.2
+Ural Federal University,Ural Federal University (UrFU,"УрФУ, улица Гагарина, Эврика, Втузгородок, Кировский район, Екатеринбург, городской округ Екатеринбург, Свердловская область, Уральский федеральный округ, 620062, РФ",56.8,60.6
+Urmia University,Urmia University,"دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎",37.5,45.0
+Urmia University,"Urmia University, Urmia, Iran","دانشگاه ارومیه, خیابان اداره گاز (منصور افشار), دانشکده, ارومیه, بخش مرکزی, شهرستان ارومیه, استان آذربایجان غربی, 444655677, ‏ایران‎",37.5,45.0
+Ursinus College,"Ursinus College, Collegeville, PA","Ursinus College, East Main Street, Collegeville, Montgomery County, Pennsylvania, 19426, USA",40.2,-75.5
+Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7,-111.8
+Utah State University,"Utah State University, Logan UT","Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7,-111.8
+Utah State University,"Utah State University, Logan, UT 84322-4205, USA","Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.7,-111.8
+Varendra University,Varendra University,"department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ",24.4,88.6
+Varendra University,"Varendra University, Rajshahi, Bangladesh","department of english Vrendra University, Dhaka - Rajshahi Highway, Talaimari, রাজশাহী, রাজশাহী বিভাগ, 6204, বাংলাদেশ",24.4,88.6
+Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.3,174.8
+Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.2,16.4
+Vignan University,Vignan University,"Vignan university, Sangam Dairy Entry, Sangam Dairy, Gowdapalem, Guntur District, Andhra Pradesh, 522213, India",16.2,80.5
+Vignan University,"Vignan University, Andhra Pradesh, India","Vignan university, Sangam Dairy Entry, Sangam Dairy, Gowdapalem, Guntur District, Andhra Pradesh, 522213, India",16.2,80.5
+Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.0,-75.3
+Virginia Commonwealth University,Virginia Commonwealth University,"Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.5,-77.5
+Virginia Commonwealth University,"Virginia Commonwealth University, Richmond, VA, USA","Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.5,-77.5
+Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.2,-80.4
+Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Blacksburg","Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.2,-80.4
+Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Blacksburg, Virginia","Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.2,-80.4
+Virginia Tech Carilion Research Institute,Virginia Tech Carilion Research Institute,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA",37.3,-79.9
+Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.8,4.3
+Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 1050 Brussels, Belgium","Vrije Universiteit Brussel, 2, Boulevard de la Plaine - Pleinlaan, Ixelles - Elsene, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1050, België / Belgique / Belgien",50.8,4.4
+"Vulcan, Inc.","Vulcan Inc, Seattle, WA 98104","Vulcan Inc., 505, Downtown Seattle Transit Tunnel, Seattle Downtown, International District/Chinatown, Seattle, King County, Washington, 98191, USA",47.6,-122.3
+Walt Disney Imagineering,"Walt Disney Imagineering, USA","Walt Disney Imagineering, 1401, Flower Street, Grand Central Creative Campus, Glendale, Los Angeles County, California, 91201, USA",34.2,-118.3
+Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.2,21.0
+Warsaw University of Technology,"Warsaw University of Technology, Poland","Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.2,21.0
+Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.9,130.7
+Waseda University,"Waseda University, Kitakyushu, Japan 808-0135","早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.9,130.7
+Washington University,Washington University,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.6,-90.3
+Washington University,"Washington University, St. Louis, MO, USA","Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.6,-90.3
+Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.4,-83.1
+Wayne State University,"Wayne State University, Detroit, MI 48202, USA","Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA",42.4,-83.1
+Wayne State University,"Wayne State University, Detroit, MI, USA","Wayne State University, Burroughs Street, New Center, Detroit, Wayne County, Michigan, 48202, USA",42.4,-83.1
+Weizmann Institute of Science,The Weizmann Institute of,"מכון ויצמן, הרצל, מעונות וולפסון, נווה עמית, רחובות, מחוז המרכז, NO, ישראל",31.9,34.8
+Weizmann Institute of Science,The Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9,34.8
+Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9,34.8
+Weizmann Institute of Science,"Weizmann Institute of Science, Rehovot, 76100, Israel","מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.9,34.8
+West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+West Virginia University,"West Virginia University, Morgantown WV 26506, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+West Virginia University,"West Virginia University, Morgantown, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+West Virginia University,"West Virginia University, Morgantown, WV","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+West Virginia University,"West Virginia University, Morgantown, WV 26506, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+West Virginia University,"West Virginia University, Morgantown, WV, USA","88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.7,-80.0
+Western Kentucky University,Western Kentucky University,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA",37.0,-86.5
+Western Sydney University,Western Sydney University,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia",-33.8,151.0
+Western Sydney University,"Western Sydney University, Parramatta, NSW 2150, Australia","Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia",-33.8,151.0
+Wolfson College,Wolfson College,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK",51.8,-1.3
+Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.6,114.4
+Wuhan University of Technology,"Wuhan University of Technology, Wuhan, China","武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.6,114.4
+Xerox Research Center,Xerox Research Center,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada",43.5,-79.7
+Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.2,109.0
+Xi'an Jiaotong University,"Xi'an Jiaotong University, Xi'an, China","西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.2,109.0
+Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4,118.1
+Xiamen University,"Xiamen University, Xiamen 361005, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4,118.1
+Xiamen University,"Xiamen University, Xiamen, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4,118.1
+Xiamen University,"Xiamen University, Xiamen, Fujian, China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4,118.1
+Xiamen University,"Xiamen University, Xiamen, P. R. China","厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.4,118.1
+Xiangtan University,Xiangtan University,"湘潭大学图书馆, 文化广场, 羊牯塘街道, 雨湖区, 湘潭市 / Xiangtan, 湖南省, 中国",27.9,112.9
+Xiangtan University,"Xiangtan University, Xiangtan, China","湘潭大学图书馆, 文化广场, 羊牯塘街道, 雨湖区, 湘潭市 / Xiangtan, 湖南省, 中国",27.9,112.9
+Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1,108.8
+Xidian University,"Xidian University, Xi an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1,108.8
+Xidian University,"Xidian University, Xi'an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1,108.8
+Xidian University,"Xidian University, Xi’an, China","Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.1,108.8
+Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.3,-73.0
+Yaroslavl State University,Yaroslavl State University,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ",57.6,39.9
+Yeungnam University,Yeungnam University,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국",35.8,128.8
+Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.6,126.9
+Yonsei University,"Yonsei University, 50 Yonsei-ro, Seodaemun-gu, SEOUL, Republic of Korea","연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.6,126.9
+Yonsei University,"Yonsei University, 50 Yonsei-ro, SEOUL, Republic of Korea","연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.6,126.9
+York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.8,-79.5
+York University,"York University, Toronto","York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.8,-79.5
+York University,"York University, Toronto, Canada","York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.8,-79.5
+Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.1,102.7
+Yunnan University,"Yunnan University, Kunming, P. R. China","云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.1,102.7
+Zaragoza University,Zaragoza University,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España",41.6,-0.9
+Zhejiang Normal University,Zhejiang Normal University,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国",29.1,119.6
+Zhejiang Normal University,"Zhejiang Normal University, Jinhua, China","浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国",29.1,119.6
+Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.2,120.1
+Zhejiang University,"Zhejiang University, Hangzhou, China","浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.2,120.1
+Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.3,120.2
+Zhejiang University of Technology,"Zhejiang University of Technology, Hangzhou, China","浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.3,120.2
+Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8,113.5
+Zhengzhou University,"Zhengzhou University, China","科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8,113.5
+Zhengzhou University,"Zhengzhou University, Zhengzhou, Henan 450052, China","科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.8,113.5 \ No newline at end of file
diff --git a/scraper/reports/pdf_unknown_bigrams.html b/scraper/reports/pdf_unknown_bigrams.html
new file mode 100644
index 00000000..a34fbe4d
--- /dev/null
+++ b/scraper/reports/pdf_unknown_bigrams.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>PDF Report: Unknown Bigrams</title><link rel='stylesheet' href='reports.css'></head><body><h2>PDF Report: Unknown Bigrams</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>of the</td><td>4760</td></tr><tr><td>in the</td><td>2840</td></tr><tr><td>computer science</td><td>2687</td></tr><tr><td>of computer</td><td>2418</td></tr><tr><td>face recognition</td><td>1920</td></tr><tr><td>science and</td><td>1502</td></tr><tr><td>to the</td><td>1498</td></tr><tr><td>member ieee</td><td>1381</td></tr><tr><td>of technology</td><td>1248</td></tr><tr><td>for the</td><td>1201</td></tr><tr><td>on the</td><td>1161</td></tr><tr><td>and the</td><td>858</td></tr><tr><td>computer vision</td><td>817</td></tr><tr><td>of electrical</td><td>785</td></tr><tr><td>by the</td><td>782</td></tr><tr><td>facial expression</td><td>778</td></tr><tr><td>from the</td><td>773</td></tr><tr><td>in this</td><td>758</td></tr><tr><td>and technology</td><td>719</td></tr><tr><td>and computer</td><td>715</td></tr><tr><td>classi cation</td><td>668</td></tr><tr><td>has been</td><td>653</td></tr><tr><td>with the</td><td>630</td></tr><tr><td>identi cation</td><td>614</td></tr><tr><td>computer engineering</td><td>584</td></tr><tr><td>of science</td><td>582</td></tr><tr><td>of engineering</td><td>576</td></tr><tr><td>international journal</td><td>571</td></tr><tr><td>facial expressions</td><td>570</td></tr><tr><td>center for</td><td>568</td></tr><tr><td>electrical engineering</td><td>565</td></tr><tr><td>that the</td><td>561</td></tr><tr><td>for face</td><td>560</td></tr><tr><td>and engineering</td><td>544</td></tr><tr><td>of information</td><td>543</td></tr><tr><td>of this</td><td>530</td></tr><tr><td>open access</td><td>525</td></tr><tr><td>engineering and</td><td>524</td></tr><tr><td>of psychology</td><td>517</td></tr><tr><td>beijing china</td><td>513</td></tr><tr><td>this paper</td><td>505</td></tr><tr><td>of california</td><td>497</td></tr><tr><td>institute for</td><td>494</td></tr><tr><td>is the</td><td>479</td></tr><tr><td>the university</td><td>462</td></tr><tr><td>http www</td><td>451</td></tr><tr><td>this article</td><td>450</td></tr><tr><td>electrical and</td><td>438</td></tr><tr><td>have been</td><td>437</td></tr><tr><td>of facial</td><td>411</td></tr><tr><td>neural networks</td><td>402</td></tr><tr><td>the degree</td><td>395</td></tr><tr><td>doi org</td><td>394</td></tr><tr><td>of face</td><td>393</td></tr><tr><td>carnegie mellon</td><td>390</td></tr><tr><td>the face</td><td>385</td></tr><tr><td>mellon university</td><td>384</td></tr><tr><td>this work</td><td>384</td></tr><tr><td>re identi</td><td>380</td></tr><tr><td>expression recognition</td><td>380</td></tr><tr><td>the same</td><td>374</td></tr><tr><td>state university</td><td>372</td></tr><tr><td>at the</td><td>369</td></tr><tr><td>and information</td><td>365</td></tr><tr><td>senior member</td><td>356</td></tr><tr><td>as the</td><td>348</td></tr><tr><td>of sciences</td><td>347</td></tr><tr><td>ieee and</td><td>343</td></tr><tr><td>pattern recognition</td><td>341</td></tr><tr><td>real time</td><td>332</td></tr><tr><td>hong kong</td><td>326</td></tr><tr><td>recognition using</td><td>322</td></tr><tr><td>chinese academy</td><td>320</td></tr><tr><td>of computing</td><td>314</td></tr><tr><td>deep learning</td><td>314</td></tr><tr><td>information technology</td><td>313</td></tr><tr><td>pose estimation</td><td>299</td></tr><tr><td>with autism</td><td>297</td></tr><tr><td>in partial</td><td>296</td></tr><tr><td>the most</td><td>296</td></tr><tr><td>the requirements</td><td>292</td></tr><tr><td>object detection</td><td>292</td></tr><tr><td>new york</td><td>288</td></tr><tr><td>autism spectrum</td><td>285</td></tr><tr><td>fellow ieee</td><td>285</td></tr><tr><td>dx doi</td><td>284</td></tr><tr><td>neural network</td><td>283</td></tr><tr><td>of social</td><td>281</td></tr><tr><td>detection and</td><td>275</td></tr><tr><td>national university</td><td>272</td></tr><tr><td>machine learning</td><td>271</td></tr><tr><td>of philosophy</td><td>265</td></tr><tr><td>be addressed</td><td>261</td></tr><tr><td>student member</td><td>259</td></tr><tr><td>research article</td><td>256</td></tr><tr><td>large scale</td><td>255</td></tr><tr><td>of human</td><td>253</td></tr><tr><td>face detection</td><td>253</td></tr><tr><td>science university</td><td>252</td></tr><tr><td>in computer</td><td>248</td></tr><tr><td>convolutional neural</td><td>246</td></tr><tr><td>the proposed</td><td>245</td></tr><tr><td>key laboratory</td><td>244</td></tr><tr><td>vision and</td><td>243</td></tr><tr><td>arti cial</td><td>242</td></tr><tr><td>ca usa</td><td>242</td></tr><tr><td>learning for</td><td>240</td></tr><tr><td>as well</td><td>239</td></tr><tr><td>of these</td><td>237</td></tr><tr><td>individuals with</td><td>233</td></tr><tr><td>the author</td><td>231</td></tr><tr><td>of our</td><td>230</td></tr><tr><td>they are</td><td>227</td></tr><tr><td>of electronics</td><td>225</td></tr><tr><td>requirements for</td><td>224</td></tr><tr><td>engineering university</td><td>224</td></tr><tr><td>with asd</td><td>224</td></tr><tr><td>the netherlands</td><td>224</td></tr><tr><td>emotion recognition</td><td>224</td></tr><tr><td>associated with</td><td>223</td></tr><tr><td>feature extraction</td><td>223</td></tr><tr><td>information engineering</td><td>222</td></tr><tr><td>the art</td><td>220</td></tr><tr><td>for example</td><td>220</td></tr><tr><td>the original</td><td>220</td></tr><tr><td>we propose</td><td>218</td></tr><tr><td>signal processing</td><td>216</td></tr><tr><td>international conference</td><td>212</td></tr><tr><td>under the</td><td>211</td></tr><tr><td>recognition and</td><td>207</td></tr><tr><td>in face</td><td>206</td></tr><tr><td>volume article</td><td>206</td></tr><tr><td>that are</td><td>202</td></tr><tr><td>the image</td><td>201</td></tr><tr><td>research center</td><td>200</td></tr><tr><td>for facial</td><td>200</td></tr><tr><td>is not</td><td>200</td></tr><tr><td>psychology university</td><td>197</td></tr><tr><td>image processing</td><td>197</td></tr><tr><td>united kingdom</td><td>196</td></tr><tr><td>correspondence should</td><td>194</td></tr><tr><td>in social</td><td>194</td></tr><tr><td>face images</td><td>193</td></tr><tr><td>show that</td><td>192</td></tr><tr><td>for human</td><td>192</td></tr><tr><td>cial intelligence</td><td>191</td></tr><tr><td>action recognition</td><td>190</td></tr><tr><td>college london</td><td>190</td></tr><tr><td>information science</td><td>189</td></tr><tr><td>of electronic</td><td>188</td></tr><tr><td>there are</td><td>187</td></tr><tr><td>object recognition</td><td>187</td></tr><tr><td>in order</td><td>186</td></tr><tr><td>in autism</td><td>186</td></tr><tr><td>centre for</td><td>185</td></tr><tr><td>ef cient</td><td>184</td></tr><tr><td>using the</td><td>184</td></tr><tr><td>the human</td><td>184</td></tr><tr><td>electronics and</td><td>184</td></tr><tr><td>of automation</td><td>183</td></tr><tr><td>and communication</td><td>183</td></tr><tr><td>in which</td><td>181</td></tr><tr><td>published online</td><td>180</td></tr><tr><td>analysis and</td><td>178</td></tr><tr><td>image and</td><td>177</td></tr><tr><td>the rst</td><td>177</td></tr><tr><td>san diego</td><td>176</td></tr><tr><td>face processing</td><td>176</td></tr><tr><td>partial ful</td><td>175</td></tr><tr><td>the main</td><td>175</td></tr><tr><td>networks for</td><td>175</td></tr><tr><td>to this</td><td>175</td></tr><tr><td>for visual</td><td>174</td></tr><tr><td>in any</td><td>174</td></tr><tr><td>los angeles</td><td>174</td></tr><tr><td>ful llment</td><td>173</td></tr><tr><td>graduate school</td><td>171</td></tr><tr><td>of hong</td><td>170</td></tr><tr><td>computer and</td><td>169</td></tr><tr><td>rights reserved</td><td>169</td></tr><tr><td>research institute</td><td>168</td></tr><tr><td>human pose</td><td>168</td></tr><tr><td>the authors</td><td>168</td></tr><tr><td>recognition system</td><td>167</td></tr><tr><td>veri cation</td><td>167</td></tr><tr><td>for image</td><td>167</td></tr><tr><td>all rights</td><td>167</td></tr><tr><td>of faces</td><td>166</td></tr><tr><td>corresponding author</td><td>166</td></tr><tr><td>science department</td><td>165</td></tr><tr><td>max planck</td><td>164</td></tr><tr><td>volume issue</td><td>163</td></tr><tr><td>method for</td><td>163</td></tr><tr><td>and research</td><td>163</td></tr><tr><td>an open</td><td>162</td></tr><tr><td>microsoft research</td><td>162</td></tr><tr><td>children with</td><td>162</td></tr><tr><td>model for</td><td>161</td></tr><tr><td>united states</td><td>159</td></tr><tr><td>of advanced</td><td>158</td></tr><tr><td>eth zurich</td><td>158</td></tr><tr><td>the other</td><td>158</td></tr><tr><td>face and</td><td>157</td></tr><tr><td>electronic engineering</td><td>155</td></tr><tr><td>cite this</td><td>155</td></tr><tr><td>do not</td><td>155</td></tr><tr><td>are not</td><td>154</td></tr><tr><td>be used</td><td>154</td></tr><tr><td>the wild</td><td>153</td></tr><tr><td>of visual</td><td>152</td></tr><tr><td>we use</td><td>152</td></tr><tr><td>and social</td><td>151</td></tr><tr><td>network for</td><td>151</td></tr><tr><td>of images</td><td>151</td></tr><tr><td>creative commons</td><td>151</td></tr><tr><td>the data</td><td>150</td></tr><tr><td>technical university</td><td>150</td></tr><tr><td>used for</td><td>150</td></tr><tr><td>in asd</td><td>150</td></tr><tr><td>for person</td><td>149</td></tr><tr><td>in video</td><td>149</td></tr><tr><td>information and</td><td>149</td></tr><tr><td>of informatics</td><td>149</td></tr><tr><td>to cite</td><td>148</td></tr><tr><td>real world</td><td>147</td></tr><tr><td>for each</td><td>147</td></tr><tr><td>an image</td><td>147</td></tr><tr><td>planck institute</td><td>147</td></tr><tr><td>of emotion</td><td>147</td></tr><tr><td>of psychiatry</td><td>147</td></tr><tr><td>between the</td><td>146</td></tr><tr><td>features for</td><td>146</td></tr><tr><td>spectrum disorders</td><td>146</td></tr><tr><td>it has</td><td>145</td></tr><tr><td>the number</td><td>145</td></tr><tr><td>we present</td><td>144</td></tr><tr><td>and its</td><td>143</td></tr><tr><td>recognition with</td><td>143</td></tr><tr><td>approach for</td><td>142</td></tr><tr><td>in addition</td><td>142</td></tr><tr><td>the facial</td><td>142</td></tr><tr><td>massachusetts institute</td><td>142</td></tr><tr><td>spectrum disorder</td><td>141</td></tr><tr><td>ma usa</td><td>141</td></tr><tr><td>studies have</td><td>141</td></tr><tr><td>the amygdala</td><td>141</td></tr><tr><td>or not</td><td>140</td></tr><tr><td>pedestrian detection</td><td>140</td></tr><tr><td>of chinese</td><td>139</td></tr><tr><td>the chinese</td><td>139</td></tr><tr><td>ny usa</td><td>138</td></tr><tr><td>the performance</td><td>138</td></tr><tr><td>of medicine</td><td>138</td></tr><tr><td>intelligent systems</td><td>137</td></tr><tr><td>commons attribution</td><td>137</td></tr><tr><td>technology and</td><td>136</td></tr><tr><td>of pattern</td><td>136</td></tr><tr><td>to learn</td><td>135</td></tr><tr><td>stanford university</td><td>135</td></tr><tr><td>images and</td><td>134</td></tr><tr><td>of china</td><td>133</td></tr><tr><td>and applications</td><td>133</td></tr><tr><td>signi cant</td><td>133</td></tr><tr><td>for research</td><td>133</td></tr><tr><td>conference paper</td><td>133</td></tr><tr><td>the problem</td><td>132</td></tr><tr><td>to make</td><td>132</td></tr><tr><td>framework for</td><td>132</td></tr><tr><td>chinese university</td><td>132</td></tr><tr><td>national laboratory</td><td>132</td></tr><tr><td>based face</td><td>131</td></tr><tr><td>barcelona spain</td><td>131</td></tr><tr><td>information processing</td><td>131</td></tr><tr><td>research and</td><td>131</td></tr><tr><td>the use</td><td>130</td></tr><tr><td>we show</td><td>130</td></tr><tr><td>deep neural</td><td>130</td></tr><tr><td>learning and</td><td>130</td></tr><tr><td>to face</td><td>130</td></tr><tr><td>semantic segmentation</td><td>129</td></tr><tr><td>www frontiersin</td><td>129</td></tr><tr><td>frontiersin org</td><td>129</td></tr><tr><td>computer applications</td><td>128</td></tr><tr><td>of maryland</td><td>128</td></tr><tr><td>for object</td><td>128</td></tr><tr><td>in our</td><td>128</td></tr><tr><td>as conference</td><td>128</td></tr><tr><td>pa usa</td><td>127</td></tr><tr><td>the results</td><td>127</td></tr><tr><td>of mathematics</td><td>127</td></tr><tr><td>shanghai china</td><td>127</td></tr><tr><td>which are</td><td>126</td></tr><tr><td>over the</td><td>126</td></tr><tr><td>and recognition</td><td>126</td></tr><tr><td>at iclr</td><td>126</td></tr><tr><td>suggest that</td><td>124</td></tr><tr><td>is that</td><td>124</td></tr><tr><td>face image</td><td>124</td></tr><tr><td>for this</td><td>123</td></tr><tr><td>we are</td><td>123</td></tr><tr><td>object tracking</td><td>122</td></tr><tr><td>key words</td><td>122</td></tr><tr><td>eye tracking</td><td>122</td></tr><tr><td>for more</td><td>122</td></tr><tr><td>social cognition</td><td>122</td></tr><tr><td>the eyes</td><td>122</td></tr><tr><td>van gool</td><td>121</td></tr><tr><td>be inserted</td><td>121</td></tr><tr><td>does not</td><td>121</td></tr><tr><td>university china</td><td>121</td></tr><tr><td>face perception</td><td>121</td></tr><tr><td>for video</td><td>120</td></tr><tr><td>the following</td><td>120</td></tr><tr><td>the system</td><td>120</td></tr><tr><td>the creative</td><td>120</td></tr><tr><td>of image</td><td>120</td></tr><tr><td>of interest</td><td>120</td></tr><tr><td>the visual</td><td>120</td></tr><tr><td>models for</td><td>119</td></tr><tr><td>of texas</td><td>119</td></tr><tr><td>the first</td><td>119</td></tr><tr><td>accepted for</td><td>119</td></tr><tr><td>not the</td><td>119</td></tr><tr><td>system for</td><td>118</td></tr><tr><td>association for</td><td>118</td></tr><tr><td>human computer</td><td>118</td></tr><tr><td>that can</td><td>118</td></tr><tr><td>image retrieval</td><td>117</td></tr><tr><td>the ability</td><td>117</td></tr><tr><td>features and</td><td>116</td></tr><tr><td>the editor</td><td>116</td></tr><tr><td>national institute</td><td>116</td></tr><tr><td>faces and</td><td>116</td></tr><tr><td>is also</td><td>116</td></tr><tr><td>processing and</td><td>116</td></tr><tr><td>luc van</td><td>115</td></tr><tr><td>to recognize</td><td>115</td></tr><tr><td>university college</td><td>115</td></tr><tr><td>doi fpsyg</td><td>114</td></tr><tr><td>assistant professor</td><td>114</td></tr><tr><td>tsinghua university</td><td>114</td></tr><tr><td>access article</td><td>113</td></tr><tr><td>original research</td><td>113</td></tr><tr><td>for informatics</td><td>113</td></tr><tr><td>computing and</td><td>113</td></tr><tr><td>of research</td><td>113</td></tr><tr><td>the paper</td><td>112</td></tr><tr><td>in many</td><td>112</td></tr><tr><td>during the</td><td>112</td></tr><tr><td>this version</td><td>112</td></tr><tr><td>and their</td><td>112</td></tr><tr><td>provided the</td><td>111</td></tr><tr><td>for computational</td><td>111</td></tr><tr><td>in terms</td><td>111</td></tr><tr><td>and face</td><td>111</td></tr><tr><td>tokyo japan</td><td>110</td></tr><tr><td>eye gaze</td><td>110</td></tr><tr><td>sciences beijing</td><td>110</td></tr><tr><td>of autism</td><td>109</td></tr><tr><td>ieee transactions</td><td>109</td></tr><tr><td>the recognition</td><td>109</td></tr><tr><td>of data</td><td>109</td></tr><tr><td>de ned</td><td>109</td></tr><tr><td>the department</td><td>109</td></tr><tr><td>human face</td><td>109</td></tr><tr><td>support vector</td><td>109</td></tr><tr><td>college park</td><td>108</td></tr><tr><td>ku leuven</td><td>108</td></tr><tr><td>is used</td><td>108</td></tr><tr><td>engineering department</td><td>108</td></tr><tr><td>computational linguistics</td><td>108</td></tr><tr><td>of intelligent</td><td>108</td></tr><tr><td>teaching and</td><td>108</td></tr><tr><td>recognition based</td><td>108</td></tr><tr><td>an important</td><td>107</td></tr><tr><td>of southern</td><td>107</td></tr><tr><td>to improve</td><td>107</td></tr><tr><td>this study</td><td>107</td></tr><tr><td>of oxford</td><td>107</td></tr><tr><td>question answering</td><td>106</td></tr><tr><td>feature selection</td><td>106</td></tr><tr><td>classi ers</td><td>106</td></tr><tr><td>our method</td><td>106</td></tr><tr><td>article was</td><td>106</td></tr><tr><td>in psychology</td><td>106</td></tr><tr><td>science engineering</td><td>106</td></tr><tr><td>data and</td><td>106</td></tr><tr><td>in particular</td><td>106</td></tr><tr><td>available online</td><td>105</td></tr><tr><td>facial features</td><td>105</td></tr><tr><td>of each</td><td>105</td></tr><tr><td>of london</td><td>105</td></tr><tr><td>wang and</td><td>105</td></tr><tr><td>distribution and</td><td>105</td></tr><tr><td>https doi</td><td>105</td></tr><tr><td>where the</td><td>105</td></tr><tr><td>and video</td><td>104</td></tr><tr><td>whether they</td><td>104</td></tr><tr><td>the social</td><td>104</td></tr><tr><td>component analysis</td><td>104</td></tr><tr><td>accepted date</td><td>104</td></tr><tr><td>about the</td><td>104</td></tr><tr><td>we have</td><td>104</td></tr><tr><td>when the</td><td>104</td></tr><tr><td>the development</td><td>103</td></tr><tr><td>by using</td><td>103</td></tr><tr><td>southern california</td><td>103</td></tr><tr><td>to extract</td><td>103</td></tr><tr><td>indian institute</td><td>103</td></tr><tr><td>issn online</td><td>103</td></tr><tr><td>local binary</td><td>103</td></tr><tr><td>laboratory for</td><td>103</td></tr><tr><td>technical report</td><td>102</td></tr><tr><td>than the</td><td>102</td></tr><tr><td>found that</td><td>102</td></tr><tr><td>imperial college</td><td>102</td></tr><tr><td>correspondence tel</td><td>102</td></tr><tr><td>on image</td><td>102</td></tr><tr><td>robust face</td><td>102</td></tr><tr><td>zero shot</td><td>102</td></tr><tr><td>received date</td><td>102</td></tr><tr><td>al and</td><td>102</td></tr><tr><td>of new</td><td>101</td></tr><tr><td>of illinois</td><td>101</td></tr><tr><td>of all</td><td>101</td></tr><tr><td>the model</td><td>101</td></tr><tr><td>high dimensional</td><td>101</td></tr><tr><td>ai research</td><td>101</td></tr><tr><td>into the</td><td>101</td></tr><tr><td>high level</td><td>101</td></tr><tr><td>of sci</td><td>101</td></tr><tr><td>the work</td><td>100</td></tr><tr><td>the object</td><td>100</td></tr><tr><td>more information</td><td>100</td></tr><tr><td>come from</td><td>100</td></tr><tr><td>generative adversarial</td><td>100</td></tr><tr><td>in human</td><td>100</td></tr><tr><td>in real</td><td>100</td></tr><tr><td>and then</td><td>100</td></tr><tr><td>and image</td><td>100</td></tr><tr><td>discriminant analysis</td><td>100</td></tr><tr><td>people with</td><td>99</td></tr><tr><td>for multi</td><td>99</td></tr><tr><td>information about</td><td>99</td></tr><tr><td>we can</td><td>99</td></tr><tr><td>distributed under</td><td>99</td></tr><tr><td>original work</td><td>99</td></tr><tr><td>and tracking</td><td>99</td></tr><tr><td>or from</td><td>99</td></tr><tr><td>sparse representation</td><td>99</td></tr><tr><td>not only</td><td>99</td></tr><tr><td>the current</td><td>99</td></tr><tr><td>low resolution</td><td>99</td></tr><tr><td>published version</td><td>98</td></tr><tr><td>of singapore</td><td>98</td></tr><tr><td>for action</td><td>98</td></tr><tr><td>date accepted</td><td>98</td></tr><tr><td>we also</td><td>98</td></tr><tr><td>of amsterdam</td><td>98</td></tr><tr><td>spatio temporal</td><td>97</td></tr><tr><td>this material</td><td>97</td></tr><tr><td>mathematics and</td><td>97</td></tr><tr><td>university beijing</td><td>97</td></tr><tr><td>the scene</td><td>96</td></tr><tr><td>semi supervised</td><td>96</td></tr><tr><td>the training</td><td>96</td></tr><tr><td>any medium</td><td>96</td></tr><tr><td>however the</td><td>96</td></tr><tr><td>the last</td><td>96</td></tr><tr><td>the two</td><td>96</td></tr><tr><td>is multi</td><td>96</td></tr><tr><td>and facial</td><td>96</td></tr><tr><td>vision center</td><td>96</td></tr><tr><td>are the</td><td>96</td></tr><tr><td>on computer</td><td>96</td></tr><tr><td>north carolina</td><td>96</td></tr><tr><td>we will</td><td>95</td></tr><tr><td>urbana champaign</td><td>95</td></tr><tr><td>permits unrestricted</td><td>95</td></tr><tr><td>downloaded from</td><td>95</td></tr><tr><td>recognition from</td><td>95</td></tr><tr><td>the present</td><td>95</td></tr><tr><td>compared with</td><td>95</td></tr><tr><td>visual recognition</td><td>95</td></tr><tr><td>and cognitive</td><td>95</td></tr><tr><td>pose and</td><td>94</td></tr><tr><td>of emotional</td><td>94</td></tr><tr><td>and reproduction</td><td>94</td></tr><tr><td>may come</td><td>94</td></tr><tr><td>from public</td><td>94</td></tr><tr><td>machine vision</td><td>94</td></tr><tr><td>methods for</td><td>94</td></tr><tr><td>of applied</td><td>94</td></tr><tr><td>cognition and</td><td>94</td></tr><tr><td>california san</td><td>94</td></tr><tr><td>or private</td><td>93</td></tr><tr><td>and systems</td><td>93</td></tr><tr><td>for instance</td><td>93</td></tr><tr><td>low rank</td><td>93</td></tr><tr><td>which permits</td><td>93</td></tr><tr><td>the documents</td><td>93</td></tr><tr><td>in other</td><td>93</td></tr><tr><td>social interaction</td><td>93</td></tr><tr><td>academic editor</td><td>93</td></tr><tr><td>of washington</td><td>93</td></tr><tr><td>our approach</td><td>92</td></tr><tr><td>key lab</td><td>92</td></tr><tr><td>and electronic</td><td>92</td></tr><tr><td>medium provided</td><td>92</td></tr><tr><td>is properly</td><td>92</td></tr><tr><td>and dissemination</td><td>92</td></tr><tr><td>private research</td><td>92</td></tr><tr><td>research centers</td><td>92</td></tr><tr><td>representation for</td><td>92</td></tr><tr><td>california los</td><td>92</td></tr><tr><td>of michigan</td><td>92</td></tr><tr><td>psychology and</td><td>92</td></tr><tr><td>and human</td><td>92</td></tr><tr><td>metric learning</td><td>92</td></tr><tr><td>in both</td><td>91</td></tr><tr><td>multi disciplinary</td><td>91</td></tr><tr><td>disciplinary open</td><td>91</td></tr><tr><td>rchive for</td><td>91</td></tr><tr><td>the deposit</td><td>91</td></tr><tr><td>deposit and</td><td>91</td></tr><tr><td>research documents</td><td>91</td></tr><tr><td>documents whether</td><td>91</td></tr><tr><td>are pub</td><td>91</td></tr><tr><td>documents may</td><td>91</td></tr><tr><td>research institutions</td><td>91</td></tr><tr><td>in france</td><td>91</td></tr><tr><td>archive ouverte</td><td>91</td></tr><tr><td>ouverte pluridisciplinaire</td><td>91</td></tr><tr><td>pluridisciplinaire hal</td><td>91</td></tr><tr><td>hal est</td><td>91</td></tr><tr><td>la diffusion</td><td>91</td></tr><tr><td>de documents</td><td>91</td></tr><tr><td>de niveau</td><td>91</td></tr><tr><td>niveau recherche</td><td>91</td></tr><tr><td>recherche publi</td><td>91</td></tr><tr><td>ou non</td><td>91</td></tr><tr><td>recherche fran</td><td>91</td></tr><tr><td>des laboratoires</td><td>91</td></tr><tr><td>ou priv</td><td>91</td></tr><tr><td>and pattern</td><td>91</td></tr><tr><td>and machine</td><td>91</td></tr><tr><td>chapel hill</td><td>91</td></tr><tr><td>fine grained</td><td>90</td></tr><tr><td>uc berkeley</td><td>90</td></tr><tr><td>all the</td><td>90</td></tr><tr><td>training data</td><td>90</td></tr><tr><td>article distributed</td><td>90</td></tr><tr><td>facial feature</td><td>90</td></tr><tr><td>thesis submitted</td><td>90</td></tr><tr><td>within the</td><td>90</td></tr><tr><td>communication engineering</td><td>89</td></tr><tr><td>of people</td><td>89</td></tr><tr><td>that this</td><td>89</td></tr><tr><td>use distribution</td><td>89</td></tr><tr><td>the journal</td><td>89</td></tr><tr><td>please contact</td><td>89</td></tr><tr><td>to detect</td><td>89</td></tr><tr><td>rather than</td><td>89</td></tr><tr><td>image analysis</td><td>89</td></tr><tr><td>latex class</td><td>89</td></tr><tr><td>software engineering</td><td>88</td></tr><tr><td>of central</td><td>88</td></tr><tr><td>on face</td><td>88</td></tr><tr><td>robotics institute</td><td>88</td></tr><tr><td>in videos</td><td>88</td></tr><tr><td>expression analysis</td><td>88</td></tr><tr><td>image classi</td><td>88</td></tr><tr><td>facial emotion</td><td>88</td></tr><tr><td>emotional expressions</td><td>88</td></tr><tr><td>visual question</td><td>88</td></tr><tr><td>weakly supervised</td><td>88</td></tr><tr><td>head pose</td><td>88</td></tr><tr><td>class files</td><td>88</td></tr><tr><td>the eye</td><td>88</td></tr><tr><td>detection using</td><td>87</td></tr><tr><td>the second</td><td>87</td></tr><tr><td>unrestricted use</td><td>87</td></tr><tr><td>of different</td><td>87</td></tr><tr><td>the best</td><td>87</td></tr><tr><td>experimental results</td><td>87</td></tr><tr><td>been accepted</td><td>87</td></tr><tr><td>signi cantly</td><td>87</td></tr><tr><td>for all</td><td>87</td></tr><tr><td>of latex</td><td>87</td></tr><tr><td>artificial intelligence</td><td>86</td></tr><tr><td>to identify</td><td>86</td></tr><tr><td>michigan state</td><td>86</td></tr><tr><td>at http</td><td>86</td></tr><tr><td>of toronto</td><td>86</td></tr><tr><td>ef icient</td><td>86</td></tr><tr><td>of their</td><td>86</td></tr><tr><td>of cse</td><td>86</td></tr><tr><td>multi view</td><td>85</td></tr><tr><td>the role</td><td>85</td></tr><tr><td>zhang and</td><td>85</td></tr><tr><td>from video</td><td>85</td></tr><tr><td>queen mary</td><td>85</td></tr><tr><td>automation chinese</td><td>85</td></tr><tr><td>vol issue</td><td>85</td></tr><tr><td>eurasip journal</td><td>85</td></tr><tr><td>duke university</td><td>85</td></tr><tr><td>social and</td><td>85</td></tr><tr><td>suggests that</td><td>85</td></tr><tr><td>to faces</td><td>85</td></tr><tr><td>the past</td><td>85</td></tr><tr><td>and other</td><td>85</td></tr><tr><td>and that</td><td>84</td></tr><tr><td>tel fax</td><td>84</td></tr><tr><td>analysis for</td><td>84</td></tr><tr><td>use the</td><td>84</td></tr><tr><td>university usa</td><td>84</td></tr><tr><td>is one</td><td>84</td></tr><tr><td>was supported</td><td>84</td></tr><tr><td>of any</td><td>84</td></tr><tr><td>in children</td><td>84</td></tr><tr><td>face alignment</td><td>83</td></tr><tr><td>engineering the</td><td>83</td></tr><tr><td>pattern analysis</td><td>83</td></tr><tr><td>the task</td><td>83</td></tr><tr><td>files vol</td><td>83</td></tr><tr><td>multi task</td><td>82</td></tr><tr><td>state key</td><td>82</td></tr><tr><td>systems and</td><td>82</td></tr><tr><td>of machine</td><td>82</td></tr><tr><td>face veri</td><td>82</td></tr><tr><td>of objects</td><td>82</td></tr><tr><td>the neural</td><td>82</td></tr><tr><td>the input</td><td>82</td></tr><tr><td>natural language</td><td>81</td></tr><tr><td>to achieve</td><td>81</td></tr><tr><td>the feature</td><td>81</td></tr><tr><td>attribution license</td><td>81</td></tr><tr><td>georgia institute</td><td>81</td></tr><tr><td>to obtain</td><td>81</td></tr><tr><td>this research</td><td>81</td></tr><tr><td>but not</td><td>81</td></tr><tr><td>computer sciences</td><td>80</td></tr><tr><td>recognition systems</td><td>80</td></tr><tr><td>and are</td><td>80</td></tr><tr><td>the full</td><td>80</td></tr><tr><td>jiaotong university</td><td>80</td></tr><tr><td>is available</td><td>80</td></tr><tr><td>tracking and</td><td>80</td></tr><tr><td>al this</td><td>80</td></tr><tr><td>jiao tong</td><td>80</td></tr><tr><td>appearance based</td><td>80</td></tr><tr><td>learning with</td><td>80</td></tr><tr><td>the presence</td><td>80</td></tr><tr><td>in section</td><td>80</td></tr><tr><td>of brain</td><td>80</td></tr><tr><td>images are</td><td>80</td></tr><tr><td>of deep</td><td>79</td></tr><tr><td>domain adaptation</td><td>79</td></tr><tr><td>properly cited</td><td>79</td></tr><tr><td>shanghai jiao</td><td>79</td></tr><tr><td>and computing</td><td>79</td></tr><tr><td>to social</td><td>79</td></tr><tr><td>nd the</td><td>79</td></tr><tr><td>nanjing university</td><td>79</td></tr><tr><td>speech and</td><td>79</td></tr><tr><td>under review</td><td>79</td></tr><tr><td>de lausanne</td><td>79</td></tr><tr><td>computer interaction</td><td>79</td></tr><tr><td>supervised learning</td><td>78</td></tr><tr><td>chen and</td><td>78</td></tr><tr><td>was submitted</td><td>78</td></tr><tr><td>tong university</td><td>78</td></tr><tr><td>images with</td><td>78</td></tr><tr><td>shown that</td><td>78</td></tr><tr><td>sciences and</td><td>78</td></tr><tr><td>perception and</td><td>78</td></tr><tr><td>transfer learning</td><td>77</td></tr><tr><td>speci cally</td><td>77</td></tr><tr><td>proposed method</td><td>77</td></tr><tr><td>mary university</td><td>77</td></tr><tr><td>facial action</td><td>77</td></tr><tr><td>engineering science</td><td>77</td></tr><tr><td>in figure</td><td>77</td></tr><tr><td>brain and</td><td>77</td></tr><tr><td>de barcelona</td><td>76</td></tr><tr><td>engineering research</td><td>76</td></tr><tr><td>the effect</td><td>76</td></tr><tr><td>archives ouvertes</td><td>76</td></tr><tr><td>vision group</td><td>76</td></tr><tr><td>partial fulfillment</td><td>76</td></tr><tr><td>from single</td><td>76</td></tr><tr><td>information sciences</td><td>76</td></tr><tr><td>of surrey</td><td>76</td></tr><tr><td>and signal</td><td>76</td></tr><tr><td>of object</td><td>76</td></tr><tr><td>ieee international</td><td>76</td></tr><tr><td>for intelligent</td><td>76</td></tr><tr><td>technological university</td><td>76</td></tr><tr><td>the target</td><td>76</td></tr><tr><td>dimensionality reduction</td><td>76</td></tr><tr><td>received april</td><td>76</td></tr><tr><td>asd and</td><td>76</td></tr><tr><td>improve the</td><td>76</td></tr><tr><td>the brain</td><td>76</td></tr><tr><td>shuicheng yan</td><td>75</td></tr><tr><td>id pages</td><td>75</td></tr><tr><td>for robust</td><td>75</td></tr><tr><td>re identification</td><td>75</td></tr><tr><td>of others</td><td>75</td></tr><tr><td>noname manuscript</td><td>75</td></tr><tr><td>and control</td><td>75</td></tr><tr><td>these results</td><td>75</td></tr><tr><td>but also</td><td>75</td></tr><tr><td>human faces</td><td>75</td></tr><tr><td>the user</td><td>75</td></tr><tr><td>paris france</td><td>75</td></tr><tr><td>authors and</td><td>75</td></tr><tr><td>among the</td><td>74</td></tr><tr><td>the state</td><td>74</td></tr><tr><td>learning based</td><td>74</td></tr><tr><td>york university</td><td>74</td></tr><tr><td>dissertation submitted</td><td>74</td></tr><tr><td>model based</td><td>74</td></tr><tr><td>which the</td><td>74</td></tr><tr><td>issn print</td><td>74</td></tr><tr><td>technische universit</td><td>74</td></tr><tr><td>machine intelligence</td><td>74</td></tr><tr><td>to have</td><td>74</td></tr><tr><td>age estimation</td><td>74</td></tr><tr><td>to whom</td><td>74</td></tr><tr><td>to end</td><td>74</td></tr><tr><td>cation and</td><td>74</td></tr><tr><td>the right</td><td>74</td></tr><tr><td>deep convolutional</td><td>73</td></tr><tr><td>central florida</td><td>73</td></tr><tr><td>the images</td><td>73</td></tr><tr><td>to address</td><td>73</td></tr><tr><td>more than</td><td>73</td></tr><tr><td>may not</td><td>73</td></tr><tr><td>and pose</td><td>73</td></tr><tr><td>adversarial networks</td><td>73</td></tr><tr><td>dictionary learning</td><td>73</td></tr><tr><td>bernt schiele</td><td>73</td></tr><tr><td>this journal</td><td>73</td></tr><tr><td>to solve</td><td>73</td></tr><tr><td>which can</td><td>73</td></tr><tr><td>to image</td><td>73</td></tr><tr><td>if the</td><td>73</td></tr><tr><td>the effects</td><td>73</td></tr><tr><td>of ece</td><td>73</td></tr><tr><td>california berkeley</td><td>73</td></tr><tr><td>berlin germany</td><td>73</td></tr><tr><td>for semantic</td><td>72</td></tr><tr><td>multi modal</td><td>72</td></tr><tr><td>dataset for</td><td>72</td></tr><tr><td>cornell university</td><td>72</td></tr><tr><td>vision laboratory</td><td>72</td></tr><tr><td>the study</td><td>72</td></tr><tr><td>the mouth</td><td>72</td></tr><tr><td>features are</td><td>72</td></tr><tr><td>the accuracy</td><td>72</td></tr><tr><td>li and</td><td>72</td></tr><tr><td>springer science</td><td>71</td></tr><tr><td>of use</td><td>71</td></tr><tr><td>if you</td><td>71</td></tr><tr><td>the public</td><td>71</td></tr><tr><td>at urbana</td><td>71</td></tr><tr><td>is more</td><td>71</td></tr><tr><td>this problem</td><td>71</td></tr><tr><td>sagepub com</td><td>71</td></tr><tr><td>australian national</td><td>71</td></tr><tr><td>in facial</td><td>71</td></tr><tr><td>peking university</td><td>71</td></tr><tr><td>the context</td><td>71</td></tr><tr><td>principal component</td><td>71</td></tr><tr><td>demonstrate that</td><td>71</td></tr><tr><td>lausanne switzerland</td><td>71</td></tr><tr><td>it can</td><td>71</td></tr><tr><td>of wisconsin</td><td>71</td></tr><tr><td>magnetic resonance</td><td>71</td></tr><tr><td>seoul korea</td><td>71</td></tr><tr><td>science business</td><td>70</td></tr><tr><td>business media</td><td>70</td></tr><tr><td>multi scale</td><td>70</td></tr><tr><td>information systems</td><td>70</td></tr><tr><td>low level</td><td>70</td></tr><tr><td>xiaogang wang</td><td>70</td></tr><tr><td>in contrast</td><td>70</td></tr><tr><td>based methods</td><td>70</td></tr><tr><td>research group</td><td>70</td></tr><tr><td>no august</td><td>70</td></tr><tr><td>to facial</td><td>70</td></tr><tr><td>with high</td><td>70</td></tr><tr><td>in individuals</td><td>70</td></tr><tr><td>super resolution</td><td>70</td></tr><tr><td>received july</td><td>70</td></tr><tr><td>optical flow</td><td>70</td></tr><tr><td>for any</td><td>70</td></tr><tr><td>de cits</td><td>70</td></tr><tr><td>singapore singapore</td><td>70</td></tr><tr><td>for publication</td><td>70</td></tr><tr><td>or other</td><td>69</td></tr><tr><td>we found</td><td>69</td></tr><tr><td>vision lab</td><td>69</td></tr><tr><td>been proposed</td><td>69</td></tr><tr><td>of features</td><td>69</td></tr><tr><td>fran ais</td><td>69</td></tr><tr><td>autism research</td><td>69</td></tr><tr><td>of software</td><td>69</td></tr><tr><td>nanyang technological</td><td>69</td></tr><tr><td>liu and</td><td>69</td></tr><tr><td>gaze direction</td><td>69</td></tr><tr><td>whom correspondence</td><td>69</td></tr><tr><td>adults with</td><td>69</td></tr><tr><td>eye contact</td><td>69</td></tr><tr><td>resonance imaging</td><td>69</td></tr><tr><td>of north</td><td>69</td></tr><tr><td>learning from</td><td>69</td></tr><tr><td>to publication</td><td>68</td></tr><tr><td>single image</td><td>68</td></tr><tr><td>invariant face</td><td>68</td></tr><tr><td>activity recognition</td><td>68</td></tr><tr><td>stefanos zafeiriou</td><td>68</td></tr><tr><td>intelligent information</td><td>68</td></tr><tr><td>specialty section</td><td>68</td></tr><tr><td>or the</td><td>68</td></tr><tr><td>based image</td><td>68</td></tr><tr><td>to their</td><td>68</td></tr><tr><td>in image</td><td>68</td></tr><tr><td>taipei taiwan</td><td>68</td></tr><tr><td>target tracking</td><td>68</td></tr><tr><td>engineering college</td><td>68</td></tr><tr><td>not been</td><td>68</td></tr><tr><td>for computer</td><td>67</td></tr><tr><td>tx usa</td><td>67</td></tr><tr><td>data set</td><td>67</td></tr><tr><td>electronic and</td><td>67</td></tr><tr><td>disorder asd</td><td>67</td></tr><tr><td>facial landmark</td><td>67</td></tr><tr><td>adobe research</td><td>67</td></tr><tr><td>to its</td><td>67</td></tr><tr><td>typically developing</td><td>67</td></tr><tr><td>of pennsylvania</td><td>67</td></tr><tr><td>zurich switzerland</td><td>67</td></tr><tr><td>dr ing</td><td>67</td></tr><tr><td>high resolution</td><td>67</td></tr><tr><td>has not</td><td>67</td></tr><tr><td>maryland college</td><td>66</td></tr><tr><td>publishing corporation</td><td>66</td></tr><tr><td>of training</td><td>66</td></tr><tr><td>accepted june</td><td>66</td></tr><tr><td>of doctor</td><td>66</td></tr><tr><td>of eye</td><td>66</td></tr><tr><td>information from</td><td>66</td></tr><tr><td>automatic face</td><td>66</td></tr><tr><td>ecole polytechnique</td><td>66</td></tr><tr><td>the video</td><td>66</td></tr><tr><td>binary pattern</td><td>66</td></tr><tr><td>model and</td><td>66</td></tr><tr><td>in their</td><td>66</td></tr><tr><td>received may</td><td>66</td></tr><tr><td>been shown</td><td>66</td></tr><tr><td>social interactions</td><td>66</td></tr><tr><td>in revised</td><td>66</td></tr><tr><td>revised form</td><td>66</td></tr><tr><td>montr eal</td><td>65</td></tr><tr><td>algorithm for</td><td>65</td></tr><tr><td>is often</td><td>65</td></tr><tr><td>hindawi publishing</td><td>65</td></tr><tr><td>ground truth</td><td>65</td></tr><tr><td>of cognitive</td><td>65</td></tr><tr><td>shot learning</td><td>65</td></tr><tr><td>for large</td><td>65</td></tr><tr><td>recent years</td><td>65</td></tr><tr><td>double blind</td><td>65</td></tr><tr><td>with respect</td><td>65</td></tr><tr><td>expression and</td><td>65</td></tr><tr><td>have shown</td><td>65</td></tr><tr><td>karlsruhe germany</td><td>65</td></tr><tr><td>on their</td><td>65</td></tr><tr><td>the research</td><td>65</td></tr><tr><td>columbia university</td><td>65</td></tr><tr><td>associate professor</td><td>65</td></tr><tr><td>facial images</td><td>64</td></tr><tr><td>both the</td><td>64</td></tr><tr><td>dif cult</td><td>64</td></tr><tr><td>human action</td><td>64</td></tr><tr><td>technology cas</td><td>64</td></tr><tr><td>video surveillance</td><td>64</td></tr><tr><td>received december</td><td>64</td></tr><tr><td>the world</td><td>64</td></tr><tr><td>national taiwan</td><td>64</td></tr><tr><td>recognition under</td><td>64</td></tr><tr><td>intelligence and</td><td>64</td></tr><tr><td>video based</td><td>64</td></tr><tr><td>multi target</td><td>64</td></tr><tr><td>and applied</td><td>64</td></tr><tr><td>detection with</td><td>63</td></tr><tr><td>autism and</td><td>63</td></tr><tr><td>this document</td><td>63</td></tr><tr><td>believe that</td><td>63</td></tr><tr><td>human detection</td><td>63</td></tr><tr><td>and more</td><td>63</td></tr><tr><td>university shanghai</td><td>63</td></tr><tr><td>personal use</td><td>63</td></tr><tr><td>wa usa</td><td>63</td></tr><tr><td>cation using</td><td>63</td></tr><tr><td>and intelligent</td><td>63</td></tr><tr><td>ne grained</td><td>63</td></tr><tr><td>on pattern</td><td>63</td></tr><tr><td>applied sciences</td><td>63</td></tr><tr><td>while the</td><td>63</td></tr><tr><td>idiap research</td><td>63</td></tr><tr><td>extracted from</td><td>63</td></tr><tr><td>cation with</td><td>63</td></tr><tr><td>the dataset</td><td>63</td></tr><tr><td>received march</td><td>63</td></tr><tr><td>received june</td><td>62</td></tr><tr><td>multi object</td><td>62</td></tr><tr><td>de montr</td><td>62</td></tr><tr><td>of experimental</td><td>62</td></tr><tr><td>of multiple</td><td>62</td></tr><tr><td>sciences university</td><td>62</td></tr><tr><td>nearest neighbor</td><td>62</td></tr><tr><td>engineering national</td><td>62</td></tr><tr><td>taiwan university</td><td>62</td></tr><tr><td>the goal</td><td>62</td></tr><tr><td>we used</td><td>62</td></tr><tr><td>representations for</td><td>62</td></tr><tr><td>based approach</td><td>62</td></tr><tr><td>data driven</td><td>62</td></tr><tr><td>the computer</td><td>62</td></tr><tr><td>to reduce</td><td>62</td></tr><tr><td>vector machine</td><td>62</td></tr><tr><td>feature based</td><td>62</td></tr><tr><td>june accepted</td><td>61</td></tr><tr><td>at www</td><td>61</td></tr><tr><td>computer graphics</td><td>61</td></tr><tr><td>of tokyo</td><td>61</td></tr><tr><td>international joint</td><td>61</td></tr><tr><td>objects and</td><td>61</td></tr><tr><td>images for</td><td>61</td></tr><tr><td>large number</td><td>61</td></tr><tr><td>shiguang shan</td><td>61</td></tr><tr><td>shaogang gong</td><td>61</td></tr><tr><td>received october</td><td>61</td></tr><tr><td>an object</td><td>61</td></tr><tr><td>this thesis</td><td>61</td></tr><tr><td>are more</td><td>61</td></tr><tr><td>communication and</td><td>61</td></tr><tr><td>with deep</td><td>61</td></tr><tr><td>recognition has</td><td>61</td></tr><tr><td>the appearance</td><td>61</td></tr><tr><td>accepted march</td><td>61</td></tr><tr><td>of two</td><td>61</td></tr><tr><td>and emotion</td><td>61</td></tr><tr><td>human robot</td><td>61</td></tr><tr><td>as follows</td><td>61</td></tr><tr><td>california institute</td><td>61</td></tr><tr><td>of computational</td><td>61</td></tr><tr><td>that they</td><td>60</td></tr><tr><td>peer reviewed</td><td>60</td></tr><tr><td>words and</td><td>60</td></tr><tr><td>the shape</td><td>60</td></tr><tr><td>in each</td><td>60</td></tr><tr><td>th international</td><td>60</td></tr><tr><td>is still</td><td>60</td></tr><tr><td>using deep</td><td>60</td></tr><tr><td>and electrical</td><td>60</td></tr><tr><td>emotional facial</td><td>60</td></tr><tr><td>of its</td><td>60</td></tr><tr><td>showed that</td><td>60</td></tr><tr><td>ann arbor</td><td>60</td></tr><tr><td>these methods</td><td>60</td></tr><tr><td>are used</td><td>60</td></tr><tr><td>stony brook</td><td>60</td></tr><tr><td>supplementary material</td><td>60</td></tr><tr><td>illumination and</td><td>60</td></tr><tr><td>received january</td><td>60</td></tr><tr><td>such that</td><td>60</td></tr><tr><td>linear discriminant</td><td>60</td></tr><tr><td>subspace clustering</td><td>59</td></tr><tr><td>to determine</td><td>59</td></tr><tr><td>il usa</td><td>59</td></tr><tr><td>published october</td><td>59</td></tr><tr><td>entific research</td><td>59</td></tr><tr><td>manant des</td><td>59</td></tr><tr><td>des tablissements</td><td>59</td></tr><tr><td>tablissements enseignement</td><td>59</td></tr><tr><td>ou trangers</td><td>59</td></tr><tr><td>trangers des</td><td>59</td></tr><tr><td>material for</td><td>59</td></tr><tr><td>joint conference</td><td>59</td></tr><tr><td>wide range</td><td>59</td></tr><tr><td>nanjing china</td><td>59</td></tr><tr><td>normal university</td><td>59</td></tr><tr><td>for learning</td><td>59</td></tr><tr><td>for real</td><td>59</td></tr><tr><td>visual information</td><td>59</td></tr><tr><td>that our</td><td>59</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/pdf_unknown_terms.html b/scraper/reports/pdf_unknown_terms.html
new file mode 100644
index 00000000..d19c2bb0
--- /dev/null
+++ b/scraper/reports/pdf_unknown_terms.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>PDF Report: Unknown Terms</title><link rel='stylesheet' href='reports.css'></head><body><h2>PDF Report: Unknown Terms</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>the</td><td>34336</td></tr><tr><td>and</td><td>31058</td></tr><tr><td>for</td><td>13577</td></tr><tr><td>university</td><td>11911</td></tr><tr><td>with</td><td>6287</td></tr><tr><td>face</td><td>6019</td></tr><tr><td>recognition</td><td>5389</td></tr><tr><td>that</td><td>5185</td></tr><tr><td>computer</td><td>5155</td></tr><tr><td>this</td><td>4892</td></tr><tr><td>department</td><td>4508</td></tr><tr><td>science</td><td>4412</td></tr><tr><td>research</td><td>4154</td></tr><tr><td>from</td><td>4031</td></tr><tr><td>are</td><td>3994</td></tr><tr><td>engineering</td><td>3678</td></tr><tr><td>image</td><td>3423</td></tr><tr><td>facial</td><td>3321</td></tr><tr><td>technology</td><td>3113</td></tr><tr><td>based</td><td>3073</td></tr><tr><td>learning</td><td>2938</td></tr><tr><td>institute</td><td>2876</td></tr><tr><td>using</td><td>2856</td></tr><tr><td>information</td><td>2807</td></tr><tr><td>ieee</td><td>2325</td></tr><tr><td>social</td><td>2209</td></tr><tr><td>human</td><td>2168</td></tr><tr><td>data</td><td>2067</td></tr><tr><td>usa</td><td>1987</td></tr><tr><td>images</td><td>1975</td></tr><tr><td>detection</td><td>1944</td></tr><tr><td>china</td><td>1918</td></tr><tr><td>school</td><td>1894</td></tr><tr><td>which</td><td>1822</td></tr><tr><td>visual</td><td>1819</td></tr><tr><td>have</td><td>1814</td></tr><tr><td>can</td><td>1670</td></tr><tr><td>object</td><td>1633</td></tr><tr><td>faces</td><td>1611</td></tr><tr><td>has</td><td>1600</td></tr><tr><td>processing</td><td>1596</td></tr><tr><td>these</td><td>1594</td></tr><tr><td>our</td><td>1573</td></tr><tr><td>model</td><td>1562</td></tr><tr><td>vision</td><td>1555</td></tr><tr><td>cation</td><td>1554</td></tr><tr><td>not</td><td>1543</td></tr><tr><td>video</td><td>1542</td></tr><tr><td>article</td><td>1507</td></tr><tr><td>features</td><td>1501</td></tr><tr><td>feature</td><td>1478</td></tr><tr><td>doi</td><td>1477</td></tr><tr><td>journal</td><td>1472</td></tr><tr><td>may</td><td>1450</td></tr><tr><td>such</td><td>1439</td></tr><tr><td>analysis</td><td>1433</td></tr><tr><td>member</td><td>1428</td></tr><tr><td>systems</td><td>1362</td></tr><tr><td>expression</td><td>1348</td></tr><tr><td>system</td><td>1338</td></tr><tr><td>more</td><td>1306</td></tr><tr><td>deep</td><td>1296</td></tr><tr><td>http</td><td>1290</td></tr><tr><td>been</td><td>1281</td></tr><tr><td>center</td><td>1268</td></tr><tr><td>neural</td><td>1235</td></tr><tr><td>published</td><td>1221</td></tr><tr><td>sciences</td><td>1191</td></tr><tr><td>multi</td><td>1172</td></tr><tr><td>wang</td><td>1157</td></tr><tr><td>www</td><td>1140</td></tr><tr><td>paper</td><td>1135</td></tr><tr><td>new</td><td>1130</td></tr><tr><td>electrical</td><td>1124</td></tr><tr><td>international</td><td>1121</td></tr><tr><td>autism</td><td>1106</td></tr><tr><td>was</td><td>1095</td></tr><tr><td>college</td><td>1091</td></tr><tr><td>person</td><td>1086</td></tr><tr><td>use</td><td>1084</td></tr><tr><td>one</td><td>1077</td></tr><tr><td>accepted</td><td>1062</td></tr><tr><td>tracking</td><td>1033</td></tr><tr><td>other</td><td>1028</td></tr><tr><td>received</td><td>1010</td></tr><tr><td>their</td><td>1010</td></tr><tr><td>national</td><td>1006</td></tr><tr><td>expressions</td><td>1001</td></tr><tr><td>work</td><td>998</td></tr><tr><td>psychology</td><td>996</td></tr><tr><td>pose</td><td>994</td></tr><tr><td>two</td><td>992</td></tr><tr><td>time</td><td>975</td></tr><tr><td>networks</td><td>970</td></tr><tr><td>models</td><td>970</td></tr><tr><td>emotion</td><td>968</td></tr><tr><td>results</td><td>964</td></tr><tr><td>different</td><td>957</td></tr><tr><td>approach</td><td>956</td></tr><tr><td>high</td><td>948</td></tr><tr><td>classi</td><td>944</td></tr><tr><td>method</td><td>942</td></tr><tr><td>laboratory</td><td>941</td></tr><tr><td>all</td><td>940</td></tr><tr><td>used</td><td>938</td></tr><tr><td>org</td><td>934</td></tr><tr><td>network</td><td>931</td></tr><tr><td>methods</td><td>917</td></tr><tr><td>applications</td><td>915</td></tr><tr><td>group</td><td>910</td></tr><tr><td>state</td><td>907</td></tr><tr><td>performance</td><td>902</td></tr><tr><td>zhang</td><td>891</td></tr><tr><td>each</td><td>880</td></tr><tr><td>between</td><td>865</td></tr><tr><td>also</td><td>860</td></tr><tr><td>india</td><td>854</td></tr><tr><td>germany</td><td>854</td></tr><tr><td>were</td><td>844</td></tr><tr><td>people</td><td>827</td></tr><tr><td>computing</td><td>827</td></tr><tr><td>chen</td><td>814</td></tr><tr><td>training</td><td>810</td></tr><tr><td>than</td><td>806</td></tr><tr><td>brain</td><td>804</td></tr><tr><td>study</td><td>804</td></tr><tr><td>under</td><td>800</td></tr><tr><td>pattern</td><td>797</td></tr><tr><td>california</td><td>796</td></tr><tr><td>when</td><td>792</td></tr><tr><td>emotional</td><td>772</td></tr><tr><td>large</td><td>771</td></tr><tr><td>access</td><td>763</td></tr><tr><td>author</td><td>762</td></tr><tr><td>representation</td><td>762</td></tr><tr><td>online</td><td>754</td></tr><tr><td>task</td><td>753</td></tr><tr><td>asd</td><td>753</td></tr><tr><td>yang</td><td>748</td></tr><tr><td>they</td><td>745</td></tr><tr><td>introduction</td><td>740</td></tr><tr><td>however</td><td>734</td></tr><tr><td>set</td><td>733</td></tr><tr><td>com</td><td>730</td></tr><tr><td>email</td><td>730</td></tr><tr><td>real</td><td>730</td></tr><tr><td>studies</td><td>727</td></tr><tr><td>local</td><td>726</td></tr><tr><td>mail</td><td>725</td></tr><tr><td>estimation</td><td>723</td></tr><tr><td>machine</td><td>721</td></tr><tr><td>but</td><td>716</td></tr><tr><td>number</td><td>715</td></tr><tr><td>beijing</td><td>712</td></tr><tr><td>liu</td><td>710</td></tr><tr><td>level</td><td>710</td></tr><tr><td>dataset</td><td>709</td></tr><tr><td>problem</td><td>708</td></tr><tr><td>eye</td><td>705</td></tr><tr><td>identi</td><td>704</td></tr><tr><td>its</td><td>698</td></tr><tr><td>open</td><td>696</td></tr><tr><td>key</td><td>690</td></tr><tr><td>action</td><td>687</td></tr><tr><td>gaze</td><td>685</td></tr><tr><td>most</td><td>678</td></tr><tr><td>proposed</td><td>677</td></tr><tr><td>correspondence</td><td>671</td></tr><tr><td>vol</td><td>669</td></tr><tr><td>both</td><td>665</td></tr><tr><td>universit</td><td>664</td></tr><tr><td>perception</td><td>660</td></tr><tr><td>low</td><td>655</td></tr><tr><td>france</td><td>651</td></tr><tr><td>conference</td><td>648</td></tr><tr><td>lab</td><td>648</td></tr><tr><td>multiple</td><td>640</td></tr><tr><td>issn</td><td>634</td></tr><tr><td>into</td><td>630</td></tr><tr><td>volume</td><td>626</td></tr><tr><td>submitted</td><td>626</td></tr><tr><td>will</td><td>624</td></tr><tr><td>individuals</td><td>617</td></tr><tr><td>non</td><td>616</td></tr><tr><td>semantic</td><td>615</td></tr><tr><td>many</td><td>612</td></tr><tr><td>robust</td><td>609</td></tr><tr><td>dept</td><td>604</td></tr><tr><td>attention</td><td>600</td></tr><tr><td>via</td><td>598</td></tr><tr><td>any</td><td>593</td></tr><tr><td>how</td><td>588</td></tr><tr><td>part</td><td>586</td></tr><tr><td>scene</td><td>585</td></tr><tr><td>only</td><td>581</td></tr><tr><td>objects</td><td>574</td></tr><tr><td>version</td><td>571</td></tr><tr><td>children</td><td>564</td></tr><tr><td>intelligence</td><td>559</td></tr><tr><td>segmentation</td><td>557</td></tr><tr><td>motion</td><td>550</td></tr><tr><td>keywords</td><td>545</td></tr><tr><td>available</td><td>544</td></tr><tr><td>there</td><td>542</td></tr><tr><td>well</td><td>542</td></tr><tr><td>london</td><td>533</td></tr><tr><td>faculty</td><td>532</td></tr><tr><td>scale</td><td>529</td></tr><tr><td>cognitive</td><td>525</td></tr><tr><td>some</td><td>516</td></tr><tr><td>where</td><td>514</td></tr><tr><td>copyright</td><td>514</td></tr><tr><td>review</td><td>514</td></tr><tr><td>single</td><td>508</td></tr><tr><td>shape</td><td>508</td></tr><tr><td>development</td><td>506</td></tr><tr><td>algorithm</td><td>505</td></tr><tr><td>class</td><td>504</td></tr><tr><td>illumination</td><td>504</td></tr><tr><td>should</td><td>503</td></tr><tr><td>show</td><td>501</td></tr><tr><td>automatic</td><td>498</td></tr><tr><td>chinese</td><td>497</td></tr><tr><td>view</td><td>497</td></tr><tr><td>space</td><td>497</td></tr><tr><td>authors</td><td>495</td></tr><tr><td>while</td><td>494</td></tr><tr><td>degree</td><td>493</td></tr><tr><td>over</td><td>491</td></tr><tr><td>communication</td><td>485</td></tr><tr><td>appearance</td><td>483</td></tr><tr><td>issue</td><td>478</td></tr><tr><td>algorithms</td><td>478</td></tr><tr><td>videos</td><td>477</td></tr><tr><td>hong</td><td>476</td></tr><tr><td>hal</td><td>476</td></tr><tr><td>june</td><td>474</td></tr><tr><td>about</td><td>474</td></tr><tr><td>figure</td><td>474</td></tr><tr><td>interaction</td><td>474</td></tr><tr><td>behavior</td><td>471</td></tr><tr><td>intelligent</td><td>470</td></tr><tr><td>convolutional</td><td>469</td></tr><tr><td>during</td><td>467</td></tr><tr><td>informatics</td><td>465</td></tr><tr><td>given</td><td>464</td></tr><tr><td>same</td><td>464</td></tr><tr><td>computational</td><td>463</td></tr><tr><td>japan</td><td>463</td></tr><tr><td>date</td><td>460</td></tr><tr><td>domain</td><td>460</td></tr><tr><td>publication</td><td>458</td></tr><tr><td>age</td><td>458</td></tr><tr><td>content</td><td>455</td></tr><tr><td>tel</td><td>453</td></tr><tr><td>temporal</td><td>452</td></tr><tr><td>database</td><td>440</td></tr><tr><td>original</td><td>440</td></tr><tr><td>understanding</td><td>438</td></tr><tr><td>then</td><td>437</td></tr><tr><td>novel</td><td>435</td></tr><tr><td>first</td><td>433</td></tr><tr><td>present</td><td>433</td></tr><tr><td>techniques</td><td>431</td></tr><tr><td>thesis</td><td>429</td></tr><tr><td>stimuli</td><td>429</td></tr><tr><td>august</td><td>427</td></tr><tr><td>through</td><td>426</td></tr><tr><td>classification</td><td>423</td></tr><tr><td>identity</td><td>418</td></tr><tr><td>framework</td><td>418</td></tr><tr><td>target</td><td>416</td></tr><tr><td>recent</td><td>414</td></tr><tr><td>sparse</td><td>413</td></tr><tr><td>professor</td><td>413</td></tr><tr><td>search</td><td>412</td></tr><tr><td>requirements</td><td>411</td></tr><tr><td>emotions</td><td>411</td></tr><tr><td>academy</td><td>410</td></tr><tr><td>student</td><td>407</td></tr><tr><td>september</td><td>406</td></tr><tr><td>memory</td><td>406</td></tr><tr><td>joint</td><td>405</td></tr><tr><td>spectrum</td><td>405</td></tr><tr><td>applied</td><td>405</td></tr><tr><td>important</td><td>405</td></tr><tr><td>huang</td><td>404</td></tr><tr><td>supervised</td><td>404</td></tr><tr><td>tasks</td><td>404</td></tr><tr><td>des</td><td>403</td></tr><tr><td>including</td><td>402</td></tr><tr><td>linear</td><td>401</td></tr><tr><td>whether</td><td>401</td></tr><tr><td>speci</td><td>400</td></tr><tr><td>participants</td><td>400</td></tr><tr><td>related</td><td>400</td></tr><tr><td>neuroscience</td><td>400</td></tr><tr><td>language</td><td>398</td></tr><tr><td>carnegie</td><td>398</td></tr><tr><td>italy</td><td>398</td></tr><tr><td>process</td><td>397</td></tr><tr><td>april</td><td>395</td></tr><tr><td>signal</td><td>395</td></tr><tr><td>section</td><td>394</td></tr><tr><td>mellon</td><td>393</td></tr><tr><td>representations</td><td>392</td></tr><tr><td>centre</td><td>392</td></tr><tr><td>australia</td><td>391</td></tr><tr><td>general</td><td>388</td></tr><tr><td>matching</td><td>388</td></tr><tr><td>canada</td><td>388</td></tr><tr><td>david</td><td>387</td></tr><tr><td>example</td><td>387</td></tr><tr><td>senior</td><td>387</td></tr><tr><td>electronic</td><td>385</td></tr><tr><td>accuracy</td><td>385</td></tr><tr><td>amygdala</td><td>385</td></tr><tr><td>partial</td><td>384</td></tr><tr><td>shown</td><td>382</td></tr><tr><td>context</td><td>381</td></tr><tr><td>michael</td><td>381</td></tr><tr><td>july</td><td>381</td></tr><tr><td>pedestrian</td><td>380</td></tr><tr><td>imaging</td><td>379</td></tr><tr><td>biometric</td><td>378</td></tr><tr><td>electronics</td><td>378</td></tr><tr><td>wei</td><td>377</td></tr><tr><td>united</td><td>377</td></tr><tr><td>input</td><td>377</td></tr><tr><td>extraction</td><td>377</td></tr><tr><td>regions</td><td>376</td></tr><tr><td>camera</td><td>375</td></tr><tr><td>spain</td><td>370</td></tr><tr><td>rst</td><td>370</td></tr><tr><td>advanced</td><td>369</td></tr><tr><td>found</td><td>369</td></tr><tr><td>van</td><td>369</td></tr><tr><td>york</td><td>367</td></tr><tr><td>control</td><td>367</td></tr><tr><td>retrieval</td><td>366</td></tr><tr><td>like</td><td>366</td></tr><tr><td>patterns</td><td>365</td></tr><tr><td>inc</td><td>365</td></tr><tr><td>activity</td><td>364</td></tr><tr><td>singapore</td><td>363</td></tr><tr><td>march</td><td>363</td></tr><tr><td>prof</td><td>362</td></tr><tr><td>natural</td><td>362</td></tr><tr><td>kong</td><td>362</td></tr><tr><td>three</td><td>362</td></tr><tr><td>humans</td><td>361</td></tr><tr><td>several</td><td>360</td></tr><tr><td>technical</td><td>359</td></tr><tr><td>resolution</td><td>358</td></tr><tr><td>dynamic</td><td>358</td></tr><tr><td>order</td><td>358</td></tr><tr><td>attributes</td><td>358</td></tr><tr><td>application</td><td>357</td></tr><tr><td>approaches</td><td>355</td></tr><tr><td>you</td><td>355</td></tr><tr><td>terms</td><td>354</td></tr><tr><td>ing</td><td>353</td></tr><tr><td>october</td><td>353</td></tr><tr><td>december</td><td>353</td></tr><tr><td>datasets</td><td>351</td></tr><tr><td>proceedings</td><td>351</td></tr><tr><td>without</td><td>350</td></tr><tr><td>world</td><td>350</td></tr><tr><td>attribute</td><td>348</td></tr><tr><td>rights</td><td>346</td></tr><tr><td>conditions</td><td>346</td></tr><tr><td>clustering</td><td>345</td></tr><tr><td>see</td><td>345</td></tr><tr><td>https</td><td>343</td></tr><tr><td>evaluation</td><td>342</td></tr><tr><td>con</td><td>341</td></tr><tr><td>even</td><td>340</td></tr><tr><td>cross</td><td>340</td></tr><tr><td>revised</td><td>339</td></tr><tr><td>disorders</td><td>338</td></tr><tr><td>vector</td><td>338</td></tr><tr><td>color</td><td>337</td></tr><tr><td>role</td><td>337</td></tr><tr><td>medical</td><td>336</td></tr><tr><td>disorder</td><td>334</td></tr><tr><td>switzerland</td><td>334</td></tr><tr><td>modeling</td><td>334</td></tr><tr><td>surveillance</td><td>333</td></tr><tr><td>dimensional</td><td>333</td></tr><tr><td>tion</td><td>332</td></tr><tr><td>graduate</td><td>331</td></tr><tr><td>presented</td><td>330</td></tr><tr><td>due</td><td>330</td></tr><tr><td>region</td><td>329</td></tr><tr><td>matrix</td><td>326</td></tr><tr><td>january</td><td>326</td></tr><tr><td>individual</td><td>326</td></tr><tr><td>compared</td><td>326</td></tr><tr><td>within</td><td>325</td></tr><tr><td>kim</td><td>323</td></tr><tr><td>design</td><td>322</td></tr><tr><td>korea</td><td>321</td></tr><tr><td>netherlands</td><td>321</td></tr><tr><td>cognition</td><td>321</td></tr><tr><td>november</td><td>320</td></tr><tr><td>february</td><td>320</td></tr><tr><td>fellow</td><td>320</td></tr><tr><td>very</td><td>320</td></tr><tr><td>form</td><td>319</td></tr><tr><td>problems</td><td>319</td></tr><tr><td>robotics</td><td>317</td></tr><tr><td>provide</td><td>317</td></tr><tr><td>function</td><td>317</td></tr><tr><td>binary</td><td>316</td></tr><tr><td>cnn</td><td>316</td></tr><tr><td>distribution</td><td>314</td></tr><tr><td>security</td><td>314</td></tr><tr><td>eyes</td><td>312</td></tr><tr><td>made</td><td>311</td></tr><tr><td>differences</td><td>311</td></tr><tr><td>localization</td><td>310</td></tr><tr><td>often</td><td>310</td></tr><tr><td>across</td><td>310</td></tr><tr><td>experimental</td><td>309</td></tr><tr><td>because</td><td>309</td></tr><tr><td>user</td><td>309</td></tr><tr><td>what</td><td>309</td></tr><tr><td>media</td><td>308</td></tr><tr><td>documents</td><td>305</td></tr><tr><td>edu</td><td>305</td></tr><tr><td>states</td><td>305</td></tr><tr><td>provided</td><td>304</td></tr><tr><td>citation</td><td>304</td></tr><tr><td>make</td><td>304</td></tr><tr><td>others</td><td>304</td></tr><tr><td>report</td><td>303</td></tr><tr><td>evidence</td><td>303</td></tr><tr><td>lin</td><td>303</td></tr><tr><td>depth</td><td>303</td></tr><tr><td>corresponding</td><td>302</td></tr><tr><td>similarity</td><td>301</td></tr><tr><td>question</td><td>301</td></tr><tr><td>shanghai</td><td>301</td></tr><tr><td>main</td><td>300</td></tr><tr><td>body</td><td>300</td></tr><tr><td>spatial</td><td>299</td></tr><tr><td>response</td><td>298</td></tr><tr><td>sun</td><td>296</td></tr><tr><td>those</td><td>296</td></tr><tr><td>zhu</td><td>294</td></tr><tr><td>park</td><td>292</td></tr><tr><td>structure</td><td>292</td></tr><tr><td>selection</td><td>291</td></tr><tr><td>lee</td><td>291</td></tr><tr><td>years</td><td>291</td></tr><tr><td>please</td><td>290</td></tr><tr><td>biometrics</td><td>289</td></tr><tr><td>effect</td><td>289</td></tr><tr><td>addressed</td><td>289</td></tr><tr><td>similar</td><td>289</td></tr><tr><td>pittsburgh</td><td>288</td></tr><tr><td>associated</td><td>288</td></tr><tr><td>san</td><td>288</td></tr><tr><td>effects</td><td>288</td></tr><tr><td>association</td><td>288</td></tr><tr><td>dissertation</td><td>288</td></tr><tr><td>digital</td><td>287</td></tr><tr><td>zhou</td><td>287</td></tr><tr><td>unsupervised</td><td>287</td></tr><tr><td>support</td><td>287</td></tr><tr><td>cambridge</td><td>287</td></tr><tr><td>multimedia</td><td>286</td></tr><tr><td>words</td><td>286</td></tr><tr><td>prediction</td><td>285</td></tr><tr><td>changes</td><td>284</td></tr><tr><td>complex</td><td>282</td></tr><tr><td>subspace</td><td>281</td></tr><tr><td>head</td><td>279</td></tr><tr><td>berkeley</td><td>277</td></tr><tr><td>der</td><td>277</td></tr><tr><td>still</td><td>277</td></tr><tr><td>test</td><td>277</td></tr><tr><td>philosophy</td><td>277</td></tr><tr><td>art</td><td>276</td></tr><tr><td>public</td><td>275</td></tr><tr><td>tech</td><td>275</td></tr><tr><td>self</td><td>275</td></tr><tr><td>technologies</td><td>274</td></tr><tr><td>known</td><td>274</td></tr><tr><td>component</td><td>274</td></tr><tr><td>various</td><td>274</td></tr><tr><td>project</td><td>273</td></tr><tr><td>knowledge</td><td>273</td></tr><tr><td>google</td><td>273</td></tr><tr><td>contact</td><td>273</td></tr><tr><td>propose</td><td>272</td></tr><tr><td>material</td><td>272</td></tr><tr><td>parts</td><td>272</td></tr><tr><td>further</td><td>272</td></tr><tr><td>works</td><td>272</td></tr><tr><td>manuscript</td><td>272</td></tr><tr><td>doctor</td><td>271</td></tr><tr><td>them</td><td>271</td></tr><tr><td>less</td><td>270</td></tr><tr><td>way</td><td>270</td></tr><tr><td>variations</td><td>270</td></tr><tr><td>fusion</td><td>269</td></tr><tr><td>editor</td><td>268</td></tr><tr><td>discriminative</td><td>268</td></tr><tr><td>learn</td><td>268</td></tr><tr><td>behavioral</td><td>268</td></tr><tr><td>barcelona</td><td>267</td></tr><tr><td>identification</td><td>266</td></tr><tr><td>experiments</td><td>265</td></tr><tr><td>automation</td><td>265</td></tr><tr><td>road</td><td>264</td></tr><tr><td>cial</td><td>264</td></tr><tr><td>adversarial</td><td>263</td></tr><tr><td>box</td><td>263</td></tr><tr><td>cues</td><td>262</td></tr><tr><td>distance</td><td>262</td></tr><tr><td>generative</td><td>261</td></tr><tr><td>global</td><td>261</td></tr><tr><td>current</td><td>261</td></tr><tr><td>thus</td><td>260</td></tr><tr><td>arti</td><td>260</td></tr><tr><td>affective</td><td>260</td></tr><tr><td>texture</td><td>260</td></tr><tr><td>interest</td><td>259</td></tr><tr><td>functional</td><td>259</td></tr><tr><td>psychiatry</td><td>259</td></tr><tr><td>points</td><td>258</td></tr><tr><td>small</td><td>258</td></tr><tr><td>yan</td><td>258</td></tr><tr><td>ability</td><td>258</td></tr><tr><td>active</td><td>258</td></tr><tr><td>software</td><td>257</td></tr><tr><td>per</td><td>257</td></tr><tr><td>who</td><td>257</td></tr><tr><td>address</td><td>257</td></tr><tr><td>label</td><td>256</td></tr><tr><td>medicine</td><td>256</td></tr><tr><td>full</td><td>256</td></tr><tr><td>scenes</td><td>256</td></tr><tr><td>thomas</td><td>256</td></tr><tr><td>early</td><td>256</td></tr><tr><td>sample</td><td>255</td></tr><tr><td>among</td><td>255</td></tr><tr><td>cient</td><td>254</td></tr><tr><td>fast</td><td>254</td></tr><tr><td>better</td><td>254</td></tr><tr><td>gender</td><td>254</td></tr><tr><td>mathematics</td><td>254</td></tr><tr><td>out</td><td>254</td></tr><tr><td>interactions</td><td>253</td></tr><tr><td>microsoft</td><td>252</td></tr><tr><td>point</td><td>251</td></tr><tr><td>zhao</td><td>249</td></tr><tr><td>signi</td><td>249</td></tr><tr><td>regression</td><td>248</td></tr><tr><td>link</td><td>248</td></tr><tr><td>higher</td><td>248</td></tr><tr><td>fig</td><td>245</td></tr><tr><td>invariant</td><td>244</td></tr><tr><td>robot</td><td>244</td></tr><tr><td>quality</td><td>243</td></tr><tr><td>speech</td><td>243</td></tr><tr><td>graph</td><td>242</td></tr><tr><td>trained</td><td>241</td></tr><tr><td>size</td><td>241</td></tr><tr><td>here</td><td>241</td></tr><tr><td>end</td><td>241</td></tr><tr><td>massachusetts</td><td>240</td></tr><tr><td>towards</td><td>240</td></tr><tr><td>second</td><td>239</td></tr><tr><td>subject</td><td>238</td></tr><tr><td>negative</td><td>238</td></tr><tr><td>subjects</td><td>238</td></tr><tr><td>hand</td><td>237</td></tr><tr><td>health</td><td>237</td></tr><tr><td>univ</td><td>236</td></tr><tr><td>theory</td><td>235</td></tr><tr><td>eth</td><td>235</td></tr><tr><td>responses</td><td>235</td></tr><tr><td>groups</td><td>235</td></tr><tr><td>max</td><td>234</td></tr><tr><td>pages</td><td>234</td></tr><tr><td>therefore</td><td>234</td></tr><tr><td>core</td><td>233</td></tr><tr><td>would</td><td>233</td></tr><tr><td>additional</td><td>232</td></tr><tr><td>personal</td><td>232</td></tr><tr><td>dif</td><td>232</td></tr><tr><td>long</td><td>232</td></tr><tr><td>oxford</td><td>232</td></tr><tr><td>potential</td><td>231</td></tr><tr><td>taiwan</td><td>229</td></tr><tr><td>samples</td><td>228</td></tr><tr><td>prior</td><td>228</td></tr><tr><td>categories</td><td>227</td></tr><tr><td>range</td><td>227</td></tr><tr><td>distributed</td><td>226</td></tr><tr><td>top</td><td>226</td></tr><tr><td>positive</td><td>226</td></tr><tr><td>recherche</td><td>226</td></tr><tr><td>transfer</td><td>225</td></tr><tr><td>kernel</td><td>225</td></tr><tr><td>area</td><td>225</td></tr><tr><td>central</td><td>224</td></tr><tr><td>effective</td><td>224</td></tr><tr><td>edited</td><td>224</td></tr><tr><td>since</td><td>223</td></tr><tr><td>multimodal</td><td>222</td></tr><tr><td>following</td><td>222</td></tr><tr><td>could</td><td>221</td></tr><tr><td>text</td><td>221</td></tr><tr><td>few</td><td>221</td></tr><tr><td>although</td><td>221</td></tr><tr><td>oxytocin</td><td>220</td></tr><tr><td>instance</td><td>220</td></tr><tr><td>crowd</td><td>220</td></tr><tr><td>areas</td><td>220</td></tr><tr><td>environment</td><td>220</td></tr><tr><td>limited</td><td>219</td></tr><tr><td>commons</td><td>219</td></tr><tr><td>loss</td><td>218</td></tr><tr><td>amsterdam</td><td>218</td></tr><tr><td>future</td><td>217</td></tr><tr><td>right</td><td>217</td></tr><tr><td>rate</td><td>217</td></tr><tr><td>inference</td><td>216</td></tr><tr><td>reviewed</td><td>215</td></tr><tr><td>south</td><td>215</td></tr><tr><td>frame</td><td>215</td></tr><tr><td>embedding</td><td>215</td></tr><tr><td>factors</td><td>215</td></tr><tr><td>peter</td><td>214</td></tr><tr><td>metric</td><td>214</td></tr><tr><td>adults</td><td>214</td></tr><tr><td>web</td><td>213</td></tr><tr><td>learned</td><td>213</td></tr><tr><td>cheng</td><td>212</td></tr><tr><td>activation</td><td>212</td></tr><tr><td>michigan</td><td>212</td></tr><tr><td>reconstruction</td><td>212</td></tr><tr><td>accurate</td><td>211</td></tr><tr><td>kumar</td><td>211</td></tr><tr><td>pca</td><td>211</td></tr><tr><td>after</td><td>211</td></tr><tr><td>optimization</td><td>210</td></tr><tr><td>young</td><td>210</td></tr><tr><td>tokyo</td><td>210</td></tr><tr><td>alignment</td><td>210</td></tr><tr><td>stanford</td><td>210</td></tr><tr><td>occlusion</td><td>210</td></tr><tr><td>city</td><td>209</td></tr><tr><td>driven</td><td>209</td></tr><tr><td>los</td><td>209</td></tr><tr><td>layer</td><td>208</td></tr><tr><td>possible</td><td>207</td></tr><tr><td>sensors</td><td>207</td></tr><tr><td>previous</td><td>206</td></tr><tr><td>adaptive</td><td>206</td></tr><tr><td>basic</td><td>205</td></tr><tr><td>gabor</td><td>205</td></tr><tr><td>ful</td><td>204</td></tr><tr><td>jun</td><td>204</td></tr><tr><td>obtained</td><td>204</td></tr><tr><td>zheng</td><td>204</td></tr><tr><td>comparison</td><td>203</td></tr><tr><td>particular</td><td>203</td></tr><tr><td>statistics</td><td>202</td></tr><tr><td>recognize</td><td>202</td></tr><tr><td>simple</td><td>202</td></tr><tr><td>much</td><td>202</td></tr><tr><td>discriminant</td><td>202</td></tr><tr><td>typically</td><td>202</td></tr><tr><td>bias</td><td>202</td></tr><tr><td>neutral</td><td>202</td></tr><tr><td>kingdom</td><td>201</td></tr><tr><td>actions</td><td>201</td></tr><tr><td>program</td><td>201</td></tr><tr><td>survey</td><td>200</td></tr><tr><td>shen</td><td>200</td></tr><tr><td>mobile</td><td>200</td></tr><tr><td>ali</td><td>200</td></tr><tr><td>processes</td><td>200</td></tr><tr><td>being</td><td>200</td></tr><tr><td>details</td><td>199</td></tr><tr><td>challenging</td><td>199</td></tr><tr><td>challenge</td><td>199</td></tr><tr><td>relative</td><td>199</td></tr><tr><td>daniel</td><td>198</td></tr><tr><td>highly</td><td>198</td></tr><tr><td>existing</td><td>198</td></tr><tr><td>zurich</td><td>197</td></tr><tr><td>inria</td><td>197</td></tr><tr><td>transactions</td><td>196</td></tr><tr><td>common</td><td>196</td></tr><tr><td>perceptual</td><td>196</td></tr><tr><td>recently</td><td>196</td></tr><tr><td>reported</td><td>196</td></tr><tr><td>mental</td><td>196</td></tr><tr><td>architecture</td><td>196</td></tr><tr><td>andrew</td><td>195</td></tr><tr><td>observed</td><td>195</td></tr><tr><td>focus</td><td>195</td></tr><tr><td>stimulus</td><td>195</td></tr><tr><td>song</td><td>195</td></tr><tr><td>direct</td><td>194</td></tr><tr><td>normal</td><td>194</td></tr><tr><td>does</td><td>194</td></tr><tr><td>improve</td><td>194</td></tr><tr><td>components</td><td>193</td></tr><tr><td>must</td><td>193</td></tr><tr><td>category</td><td>193</td></tr><tr><td>cortex</td><td>193</td></tr><tr><td>developmental</td><td>192</td></tr><tr><td>descriptors</td><td>192</td></tr><tr><td>pro</td><td>192</td></tr><tr><td>examples</td><td>192</td></tr><tr><td>source</td><td>192</td></tr><tr><td>creative</td><td>192</td></tr><tr><td>life</td><td>192</td></tr><tr><td>phd</td><td>192</td></tr><tr><td>cite</td><td>191</td></tr><tr><td>veri</td><td>191</td></tr><tr><td>recognizing</td><td>191</td></tr><tr><td>hierarchical</td><td>191</td></tr><tr><td>traits</td><td>191</td></tr><tr><td>tao</td><td>190</td></tr><tr><td>rank</td><td>190</td></tr><tr><td>generation</td><td>189</td></tr><tr><td>sets</td><td>189</td></tr><tr><td>adaptation</td><td>189</td></tr><tr><td>diego</td><td>189</td></tr><tr><td>geometric</td><td>188</td></tr><tr><td>cost</td><td>188</td></tr><tr><td>pre</td><td>187</td></tr><tr><td>license</td><td>187</td></tr><tr><td>alexander</td><td>187</td></tr><tr><td>random</td><td>186</td></tr><tr><td>management</td><td>186</td></tr><tr><td>fully</td><td>186</td></tr><tr><td>jiang</td><td>186</td></tr><tr><td>paris</td><td>186</td></tr><tr><td>users</td><td>185</td></tr><tr><td>labels</td><td>185</td></tr><tr><td>change</td><td>185</td></tr><tr><td>event</td><td>185</td></tr><tr><td>publisher</td><td>184</td></tr><tr><td>suggest</td><td>184</td></tr><tr><td>classes</td><td>184</td></tr><tr><td>dictionary</td><td>184</td></tr><tr><td>final</td><td>183</td></tr><tr><td>making</td><td>183</td></tr><tr><td>result</td><td>183</td></tr><tr><td>fear</td><td>183</td></tr><tr><td>sci</td><td>183</td></tr><tr><td>james</td><td>183</td></tr><tr><td>academic</td><td>183</td></tr><tr><td>net</td><td>182</td></tr><tr><td>street</td><td>182</td></tr><tr><td>specific</td><td>182</td></tr><tr><td>frontiers</td><td>182</td></tr><tr><td>best</td><td>182</td></tr><tr><td>florida</td><td>181</td></tr><tr><td>impact</td><td>181</td></tr><tr><td>shi</td><td>181</td></tr><tr><td>automated</td><td>181</td></tr><tr><td>annotation</td><td>180</td></tr><tr><td>document</td><td>180</td></tr><tr><td>map</td><td>180</td></tr><tr><td>reduced</td><td>179</td></tr><tr><td>texas</td><td>178</td></tr><tr><td>lausanne</td><td>178</td></tr><tr><td>parameters</td><td>178</td></tr><tr><td>grant</td><td>178</td></tr><tr><td>pain</td><td>178</td></tr><tr><td>john</td><td>177</td></tr><tr><td>demonstrate</td><td>177</td></tr><tr><td>showed</td><td>177</td></tr><tr><td>angeles</td><td>177</td></tr><tr><td>typical</td><td>177</td></tr><tr><td>reference</td><td>176</td></tr><tr><td>cas</td><td>176</td></tr><tr><td>geometry</td><td>176</td></tr><tr><td>society</td><td>176</td></tr><tr><td>direction</td><td>176</td></tr><tr><td>washington</td><td>176</td></tr><tr><td>aware</td><td>175</td></tr><tr><td>basis</td><td>175</td></tr><tr><td>springer</td><td>174</td></tr><tr><td>ming</td><td>174</td></tr><tr><td>liang</td><td>174</td></tr><tr><td>chang</td><td>174</td></tr><tr><td>master</td><td>174</td></tr><tr><td>llment</td><td>174</td></tr><tr><td>etc</td><td>174</td></tr><tr><td>need</td><td>173</td></tr><tr><td>energy</td><td>173</td></tr><tr><td>gait</td><td>173</td></tr><tr><td>technique</td><td>172</td></tr><tr><td>improved</td><td>172</td></tr><tr><td>mouth</td><td>172</td></tr><tr><td>behaviors</td><td>172</td></tr><tr><td>reserved</td><td>172</td></tr><tr><td>sequence</td><td>172</td></tr><tr><td>svm</td><td>172</td></tr><tr><td>jean</td><td>172</td></tr><tr><td>christian</td><td>171</td></tr><tr><td>campus</td><td>171</td></tr><tr><td>eld</td><td>171</td></tr><tr><td>step</td><td>171</td></tr><tr><td>condition</td><td>170</td></tr><tr><td>independent</td><td>170</td></tr><tr><td>gao</td><td>170</td></tr><tr><td>shot</td><td>170</td></tr><tr><td>supported</td><td>169</td></tr><tr><td>feng</td><td>168</td></tr><tr><td>another</td><td>168</td></tr><tr><td>static</td><td>168</td></tr><tr><td>attribution</td><td>168</td></tr><tr><td>patients</td><td>168</td></tr><tr><td>tang</td><td>167</td></tr><tr><td>left</td><td>167</td></tr><tr><td>goal</td><td>167</td></tr><tr><td>grained</td><td>166</td></tr><tr><td>personality</td><td>166</td></tr><tr><td>detecting</td><td>165</td></tr><tr><td>location</td><td>165</td></tr><tr><td>planck</td><td>164</td></tr><tr><td>private</td><td>164</td></tr><tr><td>coding</td><td>164</td></tr><tr><td>dong</td><td>164</td></tr><tr><td>black</td><td>164</td></tr><tr><td>either</td><td>164</td></tr><tr><td>variation</td><td>163</td></tr><tr><td>clinical</td><td>163</td></tr><tr><td>wild</td><td>163</td></tr><tr><td>martin</td><td>162</td></tr><tr><td>boston</td><td>162</td></tr><tr><td>psychological</td><td>162</td></tr><tr><td>nanjing</td><td>162</td></tr><tr><td>light</td><td>162</td></tr><tr><td>paul</td><td>161</td></tr><tr><td>eecs</td><td>161</td></tr><tr><td>developed</td><td>161</td></tr><tr><td>collection</td><td>161</td></tr><tr><td>properties</td><td>161</td></tr><tr><td>include</td><td>161</td></tr><tr><td>environments</td><td>161</td></tr><tr><td>latent</td><td>161</td></tr><tr><td>encoding</td><td>161</td></tr><tr><td>case</td><td>160</td></tr><tr><td>decision</td><td>160</td></tr><tr><td>cameras</td><td>160</td></tr><tr><td>error</td><td>160</td></tr><tr><td>east</td><td>160</td></tr><tr><td>reduction</td><td>160</td></tr><tr><td>interactive</td><td>160</td></tr><tr><td>jia</td><td>160</td></tr><tr><td>brazil</td><td>160</td></tr><tr><td>white</td><td>159</td></tr><tr><td>means</td><td>159</td></tr><tr><td>leuven</td><td>159</td></tr><tr><td>surface</td><td>159</td></tr><tr><td>improving</td><td>159</td></tr><tr><td>flow</td><td>159</td></tr><tr><td>peng</td><td>158</td></tr><tr><td>identify</td><td>158</td></tr><tr><td>unconstrained</td><td>158</td></tr><tr><td>literature</td><td>158</td></tr><tr><td>provides</td><td>158</td></tr><tr><td>dense</td><td>158</td></tr><tr><td>singh</td><td>158</td></tr><tr><td>year</td><td>157</td></tr><tr><td>maryland</td><td>157</td></tr><tr><td>publishing</td><td>157</td></tr><tr><td>generated</td><td>157</td></tr><tr><td>luc</td><td>157</td></tr><tr><td>measure</td><td>157</td></tr><tr><td>physical</td><td>157</td></tr><tr><td>performed</td><td>157</td></tr><tr><td>findings</td><td>156</td></tr><tr><td>free</td><td>156</td></tr><tr><td>mechanisms</td><td>155</td></tr><tr><td>mit</td><td>155</td></tr><tr><td>est</td><td>155</td></tr><tr><td>building</td><td>155</td></tr><tr><td>signals</td><td>155</td></tr><tr><td>capture</td><td>154</td></tr><tr><td>background</td><td>154</td></tr><tr><td>detect</td><td>154</td></tr><tr><td>functions</td><td>154</td></tr><tr><td>stage</td><td>153</td></tr><tr><td>down</td><td>153</td></tr><tr><td>fax</td><td>153</td></tr><tr><td>addition</td><td>153</td></tr><tr><td>pixel</td><td>153</td></tr><tr><td>shows</td><td>153</td></tr><tr><td>han</td><td>153</td></tr><tr><td>developing</td><td>153</td></tr><tr><td>universidad</td><td>153</td></tr><tr><td>cnrs</td><td>153</td></tr><tr><td>good</td><td>152</td></tr><tr><td>mapping</td><td>152</td></tr><tr><td>labeling</td><td>152</td></tr><tr><td>queen</td><td>152</td></tr><tr><td>education</td><td>152</td></tr><tr><td>extract</td><td>152</td></tr><tr><td>georgia</td><td>152</td></tr><tr><td>statistical</td><td>151</td></tr><tr><td>labeled</td><td>151</td></tr><tr><td>mean</td><td>151</td></tr><tr><td>north</td><td>151</td></tr><tr><td>able</td><td>151</td></tr><tr><td>tree</td><td>151</td></tr><tr><td>uses</td><td>151</td></tr><tr><td>represent</td><td>150</td></tr><tr><td>relevant</td><td>150</td></tr><tr><td>lighting</td><td>150</td></tr><tr><td>types</td><td>150</td></tr><tr><td>above</td><td>149</td></tr><tr><td>wide</td><td>149</td></tr><tr><td>levels</td><td>149</td></tr><tr><td>dimensionality</td><td>149</td></tr><tr><td>universitat</td><td>148</td></tr><tr><td>challenges</td><td>148</td></tr><tr><td>unit</td><td>148</td></tr><tr><td>transform</td><td>148</td></tr><tr><td>rgb</td><td>148</td></tr><tr><td>lei</td><td>147</td></tr><tr><td>value</td><td>147</td></tr><tr><td>corporation</td><td>147</td></tr><tr><td>permission</td><td>147</td></tr><tr><td>functioning</td><td>147</td></tr><tr><td>might</td><td>147</td></tr><tr><td>antonio</td><td>147</td></tr><tr><td>optimal</td><td>147</td></tr><tr><td>mark</td><td>146</td></tr><tr><td>ltd</td><td>146</td></tr><tr><td>rather</td><td>146</td></tr><tr><td>division</td><td>146</td></tr><tr><td>iclr</td><td>146</td></tr><tr><td>cse</td><td>146</td></tr><tr><td>correlation</td><td>145</td></tr><tr><td>semi</td><td>145</td></tr><tr><td>child</td><td>145</td></tr><tr><td>authentication</td><td>145</td></tr><tr><td>last</td><td>145</td></tr><tr><td>broad</td><td>145</td></tr><tr><td>affect</td><td>145</td></tr><tr><td>fei</td><td>145</td></tr><tr><td>toronto</td><td>145</td></tr><tr><td>detector</td><td>145</td></tr><tr><td>toward</td><td>145</td></tr><tr><td>institut</td><td>144</td></tr><tr><td>labs</td><td>144</td></tr><tr><td>facebook</td><td>144</td></tr><tr><td>happy</td><td>144</td></tr><tr><td>computation</td><td>143</td></tr><tr><td>xiang</td><td>143</td></tr><tr><td>page</td><td>143</td></tr><tr><td>heterogeneous</td><td>143</td></tr><tr><td>extracted</td><td>143</td></tr><tr><td>researchers</td><td>143</td></tr><tr><td>directly</td><td>143</td></tr><tr><td>optical</td><td>143</td></tr><tr><td>lower</td><td>143</td></tr><tr><td>zero</td><td>143</td></tr><tr><td>karlsruhe</td><td>143</td></tr><tr><td>seoul</td><td>142</td></tr><tr><td>projection</td><td>142</td></tr><tr><td>especially</td><td>142</td></tr><tr><td>tong</td><td>142</td></tr><tr><td>tsinghua</td><td>142</td></tr><tr><td>berlin</td><td>141</td></tr><tr><td>describe</td><td>141</td></tr><tr><td>publications</td><td>141</td></tr><tr><td>your</td><td>141</td></tr><tr><td>description</td><td>141</td></tr><tr><td>ned</td><td>141</td></tr><tr><td>reproduction</td><td>141</td></tr><tr><td>automatically</td><td>140</td></tr><tr><td>measures</td><td>140</td></tr><tr><td>categorization</td><td>140</td></tr><tr><td>jan</td><td>140</td></tr><tr><td>orientation</td><td>140</td></tr><tr><td>american</td><td>140</td></tr><tr><td>ased</td><td>140</td></tr><tr><td>pairs</td><td>140</td></tr><tr><td>landmark</td><td>139</td></tr><tr><td>technological</td><td>139</td></tr><tr><td>mining</td><td>139</td></tr><tr><td>average</td><td>139</td></tr><tr><td>train</td><td>139</td></tr><tr><td>autistic</td><td>139</td></tr><tr><td>scienti</td><td>139</td></tr><tr><td>pixels</td><td>139</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/pdf_unknown_trigram.html b/scraper/reports/pdf_unknown_trigram.html
new file mode 100644
index 00000000..1ea7b358
--- /dev/null
+++ b/scraper/reports/pdf_unknown_trigram.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>PDF Report: Unknown Trigrams</title><link rel='stylesheet' href='reports.css'></head><body><h2>PDF Report: Unknown Trigrams</h2><table border='1' cellpadding='3' cellspacing='3'><tr><td>of computer science</td><td>1786</td></tr><tr><td>department of computer</td><td>1320</td></tr><tr><td>computer science and</td><td>820</td></tr><tr><td>institute of technology</td><td>755</td></tr><tr><td>science and technology</td><td>526</td></tr><tr><td>science and engineering</td><td>500</td></tr><tr><td>university of california</td><td>485</td></tr><tr><td>department of electrical</td><td>469</td></tr><tr><td>school of computer</td><td>424</td></tr><tr><td>university of technology</td><td>411</td></tr><tr><td>carnegie mellon university</td><td>381</td></tr><tr><td>re identi cation</td><td>380</td></tr><tr><td>department of psychology</td><td>364</td></tr><tr><td>of electrical engineering</td><td>360</td></tr><tr><td>for the degree</td><td>355</td></tr><tr><td>senior member ieee</td><td>350</td></tr><tr><td>of electrical and</td><td>338</td></tr><tr><td>of science and</td><td>329</td></tr><tr><td>academy of sciences</td><td>326</td></tr><tr><td>member ieee and</td><td>315</td></tr><tr><td>electrical and computer</td><td>287</td></tr><tr><td>and computer engineering</td><td>287</td></tr><tr><td>dx doi org</td><td>284</td></tr><tr><td>facial expression recognition</td><td>280</td></tr><tr><td>in this paper</td><td>279</td></tr><tr><td>http dx doi</td><td>278</td></tr><tr><td>for face recognition</td><td>272</td></tr><tr><td>and computer science</td><td>272</td></tr><tr><td>person re identi</td><td>265</td></tr><tr><td>student member ieee</td><td>256</td></tr><tr><td>of the requirements</td><td>254</td></tr><tr><td>doctor of philosophy</td><td>247</td></tr><tr><td>should be addressed</td><td>246</td></tr><tr><td>proceedings of the</td><td>231</td></tr><tr><td>university of science</td><td>227</td></tr><tr><td>state of the</td><td>223</td></tr><tr><td>journal of computer</td><td>221</td></tr><tr><td>of the art</td><td>211</td></tr><tr><td>requirements for the</td><td>211</td></tr><tr><td>engineering and computer</td><td>205</td></tr><tr><td>electrical engineering and</td><td>201</td></tr><tr><td>one of the</td><td>197</td></tr><tr><td>computer science university</td><td>194</td></tr><tr><td>arti cial intelligence</td><td>189</td></tr><tr><td>college of engineering</td><td>185</td></tr><tr><td>the requirements for</td><td>179</td></tr><tr><td>in partial ful</td><td>175</td></tr><tr><td>university of hong</td><td>169</td></tr><tr><td>of hong kong</td><td>169</td></tr><tr><td>dept of computer</td><td>168</td></tr><tr><td>of computer engineering</td><td>167</td></tr><tr><td>all rights reserved</td><td>166</td></tr><tr><td>partial ful llment</td><td>164</td></tr><tr><td>of engineering and</td><td>159</td></tr><tr><td>of psychology university</td><td>154</td></tr><tr><td>based on the</td><td>153</td></tr><tr><td>face recognition using</td><td>153</td></tr><tr><td>in the wild</td><td>150</td></tr><tr><td>computer science department</td><td>150</td></tr><tr><td>max planck institute</td><td>147</td></tr><tr><td>submitted in partial</td><td>144</td></tr><tr><td>planck institute for</td><td>143</td></tr><tr><td>of electronics and</td><td>139</td></tr><tr><td>to cite this</td><td>138</td></tr><tr><td>computer vision and</td><td>137</td></tr><tr><td>of chinese academy</td><td>134</td></tr><tr><td>department of information</td><td>133</td></tr><tr><td>of information technology</td><td>130</td></tr><tr><td>www frontiersin org</td><td>129</td></tr><tr><td>the chinese university</td><td>128</td></tr><tr><td>as conference paper</td><td>128</td></tr><tr><td>school of information</td><td>126</td></tr><tr><td>university of maryland</td><td>126</td></tr><tr><td>of computer applications</td><td>124</td></tr><tr><td>paper at iclr</td><td>124</td></tr><tr><td>submitted to the</td><td>123</td></tr><tr><td>of computer and</td><td>122</td></tr><tr><td>of this work</td><td>121</td></tr><tr><td>convolutional neural networks</td><td>119</td></tr><tr><td>is an open</td><td>119</td></tr><tr><td>of pattern recognition</td><td>119</td></tr><tr><td>and information engineering</td><td>119</td></tr><tr><td>autism spectrum disorders</td><td>119</td></tr><tr><td>department of psychiatry</td><td>119</td></tr><tr><td>an open access</td><td>118</td></tr><tr><td>creative commons attribution</td><td>118</td></tr><tr><td>llment of the</td><td>118</td></tr><tr><td>of information science</td><td>117</td></tr><tr><td>by the editor</td><td>114</td></tr><tr><td>university of texas</td><td>113</td></tr><tr><td>luc van gool</td><td>113</td></tr><tr><td>open access article</td><td>113</td></tr><tr><td>autism spectrum disorder</td><td>112</td></tr><tr><td>will be inserted</td><td>112</td></tr><tr><td>inserted by the</td><td>112</td></tr><tr><td>school of electrical</td><td>111</td></tr><tr><td>of the face</td><td>111</td></tr><tr><td>school of computing</td><td>109</td></tr><tr><td>department of electronics</td><td>108</td></tr><tr><td>in computer vision</td><td>107</td></tr><tr><td>institute of automation</td><td>106</td></tr><tr><td>this article was</td><td>106</td></tr><tr><td>of sciences beijing</td><td>106</td></tr><tr><td>https doi org</td><td>105</td></tr><tr><td>human pose estimation</td><td>105</td></tr><tr><td>university of oxford</td><td>105</td></tr><tr><td>the creative commons</td><td>104</td></tr><tr><td>it has been</td><td>104</td></tr><tr><td>provided the original</td><td>102</td></tr><tr><td>of southern california</td><td>102</td></tr><tr><td>engineering and technology</td><td>102</td></tr><tr><td>university of illinois</td><td>101</td></tr><tr><td>due to the</td><td>101</td></tr><tr><td>sciences beijing china</td><td>101</td></tr><tr><td>et al and</td><td>100</td></tr><tr><td>university of london</td><td>99</td></tr><tr><td>convolutional neural network</td><td>99</td></tr><tr><td>university of chinese</td><td>98</td></tr><tr><td>university of singapore</td><td>98</td></tr><tr><td>received date accepted</td><td>98</td></tr><tr><td>date accepted date</td><td>98</td></tr><tr><td>institute for informatics</td><td>98</td></tr><tr><td>the original work</td><td>97</td></tr><tr><td>in any medium</td><td>96</td></tr><tr><td>whether they are</td><td>96</td></tr><tr><td>computer vision center</td><td>96</td></tr><tr><td>department of computing</td><td>95</td></tr><tr><td>reproduction in any</td><td>95</td></tr><tr><td>laboratory of pattern</td><td>95</td></tr><tr><td>imperial college london</td><td>94</td></tr><tr><td>of california san</td><td>94</td></tr><tr><td>information science and</td><td>93</td></tr><tr><td>or not the</td><td>93</td></tr><tr><td>may come from</td><td>93</td></tr><tr><td>of face recognition</td><td>93</td></tr><tr><td>association for computational</td><td>93</td></tr><tr><td>for computational linguistics</td><td>93</td></tr><tr><td>california san diego</td><td>93</td></tr><tr><td>public or private</td><td>92</td></tr><tr><td>distribution and reproduction</td><td>92</td></tr><tr><td>medium provided the</td><td>92</td></tr><tr><td>work is properly</td><td>92</td></tr><tr><td>california los angeles</td><td>92</td></tr><tr><td>of the most</td><td>92</td></tr><tr><td>cite this version</td><td>91</td></tr><tr><td>hal is multi</td><td>91</td></tr><tr><td>is multi disciplinary</td><td>91</td></tr><tr><td>multi disciplinary open</td><td>91</td></tr><tr><td>disciplinary open access</td><td>91</td></tr><tr><td>rchive for the</td><td>91</td></tr><tr><td>for the deposit</td><td>91</td></tr><tr><td>the deposit and</td><td>91</td></tr><tr><td>deposit and dissemination</td><td>91</td></tr><tr><td>dissemination of sci</td><td>91</td></tr><tr><td>research documents whether</td><td>91</td></tr><tr><td>documents whether they</td><td>91</td></tr><tr><td>they are pub</td><td>91</td></tr><tr><td>lished or not</td><td>91</td></tr><tr><td>not the documents</td><td>91</td></tr><tr><td>the documents may</td><td>91</td></tr><tr><td>documents may come</td><td>91</td></tr><tr><td>teaching and research</td><td>91</td></tr><tr><td>and research institutions</td><td>91</td></tr><tr><td>institutions in france</td><td>91</td></tr><tr><td>broad or from</td><td>91</td></tr><tr><td>or from public</td><td>91</td></tr><tr><td>or private research</td><td>91</td></tr><tr><td>private research centers</td><td>91</td></tr><tr><td>archive ouverte pluridisciplinaire</td><td>91</td></tr><tr><td>ouverte pluridisciplinaire hal</td><td>91</td></tr><tr><td>pluridisciplinaire hal est</td><td>91</td></tr><tr><td>et la diffusion</td><td>91</td></tr><tr><td>diffusion de documents</td><td>91</td></tr><tr><td>de niveau recherche</td><td>91</td></tr><tr><td>niveau recherche publi</td><td>91</td></tr><tr><td>publics ou priv</td><td>91</td></tr><tr><td>university of michigan</td><td>91</td></tr><tr><td>access article distributed</td><td>90</td></tr><tr><td>article distributed under</td><td>90</td></tr><tr><td>distributed under the</td><td>90</td></tr><tr><td>any medium provided</td><td>90</td></tr><tr><td>university of amsterdam</td><td>90</td></tr><tr><td>face recognition system</td><td>89</td></tr><tr><td>frontiers in psychology</td><td>89</td></tr><tr><td>university of washington</td><td>89</td></tr><tr><td>with autism spectrum</td><td>88</td></tr><tr><td>use distribution and</td><td>88</td></tr><tr><td>of facial expressions</td><td>88</td></tr><tr><td>university beijing china</td><td>88</td></tr><tr><td>latex class files</td><td>88</td></tr><tr><td>school of engineering</td><td>87</td></tr><tr><td>permits unrestricted use</td><td>87</td></tr><tr><td>has been accepted</td><td>87</td></tr><tr><td>journal of latex</td><td>87</td></tr><tr><td>of latex class</td><td>87</td></tr><tr><td>part of the</td><td>86</td></tr><tr><td>which permits unrestricted</td><td>86</td></tr><tr><td>under the creative</td><td>86</td></tr><tr><td>pittsburgh pa usa</td><td>85</td></tr><tr><td>image classi cation</td><td>85</td></tr><tr><td>facebook ai research</td><td>85</td></tr><tr><td>such as the</td><td>85</td></tr><tr><td>been accepted for</td><td>85</td></tr><tr><td>school of medicine</td><td>85</td></tr><tr><td>for more information</td><td>84</td></tr><tr><td>university of toronto</td><td>84</td></tr><tr><td>michigan state university</td><td>83</td></tr><tr><td>automation chinese academy</td><td>83</td></tr><tr><td>deep neural networks</td><td>83</td></tr><tr><td>computer science engineering</td><td>83</td></tr><tr><td>class files vol</td><td>83</td></tr><tr><td>university college london</td><td>83</td></tr><tr><td>university of central</td><td>82</td></tr><tr><td>of automation chinese</td><td>82</td></tr><tr><td>section of the</td><td>82</td></tr><tr><td>face veri cation</td><td>82</td></tr><tr><td>faculty of electrical</td><td>82</td></tr><tr><td>technology of china</td><td>81</td></tr><tr><td>school of psychology</td><td>81</td></tr><tr><td>university of southern</td><td>81</td></tr><tr><td>faculty of engineering</td><td>81</td></tr><tr><td>et al this</td><td>80</td></tr><tr><td>unrestricted use distribution</td><td>80</td></tr><tr><td>institute of science</td><td>80</td></tr><tr><td>and pattern recognition</td><td>80</td></tr><tr><td>department of mathematics</td><td>79</td></tr><tr><td>shanghai jiao tong</td><td>79</td></tr><tr><td>master of science</td><td>79</td></tr><tr><td>for facial expression</td><td>78</td></tr><tr><td>jiao tong university</td><td>78</td></tr><tr><td>of california los</td><td>78</td></tr><tr><td>can be used</td><td>77</td></tr><tr><td>of information engineering</td><td>77</td></tr><tr><td>queen mary university</td><td>76</td></tr><tr><td>in partial fulfillment</td><td>76</td></tr><tr><td>computer and information</td><td>76</td></tr><tr><td>center for research</td><td>76</td></tr><tr><td>department of engineering</td><td>76</td></tr><tr><td>human computer interaction</td><td>76</td></tr><tr><td>article id pages</td><td>75</td></tr><tr><td>and information technology</td><td>75</td></tr><tr><td>review as conference</td><td>75</td></tr><tr><td>college of computer</td><td>74</td></tr><tr><td>in computer science</td><td>74</td></tr><tr><td>university of surrey</td><td>74</td></tr><tr><td>children with autism</td><td>74</td></tr><tr><td>is properly cited</td><td>73</td></tr><tr><td>individuals with autism</td><td>73</td></tr><tr><td>of central florida</td><td>72</td></tr><tr><td>dept of electrical</td><td>72</td></tr><tr><td>facial expression analysis</td><td>72</td></tr><tr><td>fulfillment of the</td><td>72</td></tr><tr><td>cambridge ma usa</td><td>72</td></tr><tr><td>journal of advanced</td><td>71</td></tr><tr><td>and electronic engineering</td><td>71</td></tr><tr><td>department of informatics</td><td>71</td></tr><tr><td>springer science business</td><td>70</td></tr><tr><td>science business media</td><td>70</td></tr><tr><td>illinois at urbana</td><td>70</td></tr><tr><td>commons attribution license</td><td>70</td></tr><tr><td>department of electronic</td><td>70</td></tr><tr><td>based face recognition</td><td>70</td></tr><tr><td>of engineering science</td><td>70</td></tr><tr><td>end to end</td><td>70</td></tr><tr><td>of california berkeley</td><td>70</td></tr><tr><td>at urbana champaign</td><td>69</td></tr><tr><td>australian national university</td><td>69</td></tr><tr><td>of electronic engineering</td><td>69</td></tr><tr><td>visual question answering</td><td>69</td></tr><tr><td>institute of information</td><td>69</td></tr><tr><td>of information and</td><td>69</td></tr><tr><td>article was submitted</td><td>68</td></tr><tr><td>to whom correspondence</td><td>68</td></tr><tr><td>university of wisconsin</td><td>68</td></tr><tr><td>individuals with asd</td><td>68</td></tr><tr><td>in face recognition</td><td>67</td></tr><tr><td>electrical and electronic</td><td>67</td></tr><tr><td>on computer vision</td><td>67</td></tr><tr><td>of maryland college</td><td>66</td></tr><tr><td>maryland college park</td><td>66</td></tr><tr><td>journal of engineering</td><td>66</td></tr><tr><td>robust face recognition</td><td>66</td></tr><tr><td>university of north</td><td>66</td></tr><tr><td>in revised form</td><td>66</td></tr><tr><td>for action recognition</td><td>65</td></tr><tr><td>science and information</td><td>65</td></tr><tr><td>whom correspondence should</td><td>65</td></tr><tr><td>link to publication</td><td>64</td></tr><tr><td>hindawi publishing corporation</td><td>64</td></tr><tr><td>image and video</td><td>64</td></tr><tr><td>detection and tracking</td><td>64</td></tr><tr><td>of the journal</td><td>64</td></tr><tr><td>pattern analysis and</td><td>64</td></tr><tr><td>and communication engineering</td><td>64</td></tr><tr><td>of the same</td><td>63</td></tr><tr><td>of intelligent information</td><td>63</td></tr><tr><td>idiap research institute</td><td>63</td></tr><tr><td>computer vision laboratory</td><td>62</td></tr><tr><td>school of electronic</td><td>62</td></tr><tr><td>vol no august</td><td>62</td></tr><tr><td>national taiwan university</td><td>62</td></tr><tr><td>accepted for publication</td><td>62</td></tr><tr><td>state key laboratory</td><td>61</td></tr><tr><td>on the other</td><td>61</td></tr><tr><td>in this work</td><td>61</td></tr><tr><td>nanyang technological university</td><td>61</td></tr><tr><td>university of new</td><td>60</td></tr><tr><td>computer vision lab</td><td>60</td></tr><tr><td>spectrum disorder asd</td><td>60</td></tr><tr><td>university of pennsylvania</td><td>60</td></tr><tr><td>ieee international conference</td><td>60</td></tr><tr><td>of north carolina</td><td>60</td></tr><tr><td>university of tokyo</td><td>59</td></tr><tr><td>we show that</td><td>59</td></tr><tr><td>entific research documents</td><td>59</td></tr><tr><td>scientifiques de niveau</td><td>59</td></tr><tr><td>publi ou non</td><td>59</td></tr><tr><td>manant des tablissements</td><td>59</td></tr><tr><td>des tablissements enseignement</td><td>59</td></tr><tr><td>recherche fran ais</td><td>59</td></tr><tr><td>ais ou trangers</td><td>59</td></tr><tr><td>ou trangers des</td><td>59</td></tr><tr><td>trangers des laboratoires</td><td>59</td></tr><tr><td>degree of doctor</td><td>59</td></tr><tr><td>paper we propose</td><td>59</td></tr><tr><td>magnetic resonance imaging</td><td>59</td></tr><tr><td>faces in the</td><td>58</td></tr><tr><td>face recognition with</td><td>58</td></tr><tr><td>institute of computing</td><td>58</td></tr><tr><td>new york university</td><td>57</td></tr><tr><td>university shanghai china</td><td>57</td></tr><tr><td>and electrical engineering</td><td>57</td></tr><tr><td>international joint conference</td><td>57</td></tr><tr><td>principal component analysis</td><td>57</td></tr><tr><td>and computer vision</td><td>57</td></tr><tr><td>of wisconsin madison</td><td>57</td></tr><tr><td>conference on computer</td><td>57</td></tr><tr><td>research in computer</td><td>57</td></tr><tr><td>online at www</td><td>56</td></tr><tr><td>version of the</td><td>56</td></tr><tr><td>expression recognition using</td><td>56</td></tr><tr><td>amsterdam the netherlands</td><td>56</td></tr><tr><td>image processing and</td><td>56</td></tr><tr><td>face detection and</td><td>55</td></tr><tr><td>to this work</td><td>55</td></tr><tr><td>generative adversarial networks</td><td>55</td></tr><tr><td>and signal processing</td><td>55</td></tr><tr><td>university of pittsburgh</td><td>55</td></tr><tr><td>face recognition based</td><td>55</td></tr><tr><td>this article has</td><td>55</td></tr><tr><td>ming hsuan yang</td><td>54</td></tr><tr><td>research center for</td><td>54</td></tr><tr><td>terms of use</td><td>54</td></tr><tr><td>have been proposed</td><td>54</td></tr><tr><td>sun yat sen</td><td>54</td></tr><tr><td>in individuals with</td><td>54</td></tr><tr><td>johns hopkins university</td><td>54</td></tr><tr><td>article has been</td><td>54</td></tr><tr><td>the proposed method</td><td>53</td></tr><tr><td>of electronic and</td><td>53</td></tr><tr><td>engineering the chinese</td><td>53</td></tr><tr><td>in the context</td><td>53</td></tr><tr><td>of machine learning</td><td>53</td></tr><tr><td>zero shot learning</td><td>53</td></tr><tr><td>show that the</td><td>53</td></tr><tr><td>vision and pattern</td><td>53</td></tr><tr><td>multi target tracking</td><td>53</td></tr><tr><td>support vector machine</td><td>53</td></tr><tr><td>this is the</td><td>52</td></tr><tr><td>university of edinburgh</td><td>52</td></tr><tr><td>of this material</td><td>52</td></tr><tr><td>creativecommons org licenses</td><td>52</td></tr><tr><td>use of this</td><td>52</td></tr><tr><td>of mathematics and</td><td>52</td></tr><tr><td>yat sen university</td><td>52</td></tr><tr><td>university of massachusetts</td><td>52</td></tr><tr><td>invariant face recognition</td><td>52</td></tr><tr><td>and machine intelligence</td><td>52</td></tr><tr><td>open access books</td><td>52</td></tr><tr><td>functional magnetic resonance</td><td>52</td></tr><tr><td>cas beijing china</td><td>52</td></tr><tr><td>of the university</td><td>52</td></tr><tr><td>neural networks for</td><td>51</td></tr><tr><td>the other hand</td><td>51</td></tr><tr><td>computer vision group</td><td>51</td></tr><tr><td>of the twenty</td><td>51</td></tr><tr><td>microsoft research asia</td><td>51</td></tr><tr><td>person re identification</td><td>51</td></tr><tr><td>published as conference</td><td>51</td></tr><tr><td>analysis and machine</td><td>51</td></tr><tr><td>issue of this</td><td>51</td></tr><tr><td>of this journal</td><td>51</td></tr><tr><td>has not been</td><td>51</td></tr><tr><td>texas at austin</td><td>50</td></tr><tr><td>lab of intelligent</td><td>50</td></tr><tr><td>intelligent information processing</td><td>50</td></tr><tr><td>follow this and</td><td>50</td></tr><tr><td>this and additional</td><td>50</td></tr><tr><td>and additional works</td><td>50</td></tr><tr><td>university of posts</td><td>50</td></tr><tr><td>supported by the</td><td>50</td></tr><tr><td>xi an china</td><td>50</td></tr><tr><td>in future issue</td><td>50</td></tr><tr><td>department of statistics</td><td>50</td></tr><tr><td>universit de montr</td><td>49</td></tr><tr><td>and engineering university</td><td>49</td></tr><tr><td>university of twente</td><td>49</td></tr><tr><td>of posts and</td><td>49</td></tr><tr><td>university of southampton</td><td>49</td></tr><tr><td>some of the</td><td>49</td></tr><tr><td>can be found</td><td>49</td></tr><tr><td>this work was</td><td>49</td></tr><tr><td>electronics and communication</td><td>49</td></tr><tr><td>university of cambridge</td><td>49</td></tr><tr><td>universit at unchen</td><td>49</td></tr><tr><td>multi object tracking</td><td>48</td></tr><tr><td>of the proposed</td><td>48</td></tr><tr><td>for object detection</td><td>48</td></tr><tr><td>journal on image</td><td>48</td></tr><tr><td>hal id hal</td><td>48</td></tr><tr><td>of technology sydney</td><td>48</td></tr><tr><td>paper under double</td><td>48</td></tr><tr><td>under double blind</td><td>48</td></tr><tr><td>double blind review</td><td>48</td></tr><tr><td>author to whom</td><td>48</td></tr><tr><td>be addressed mail</td><td>48</td></tr><tr><td>of computing technology</td><td>48</td></tr><tr><td>department of cse</td><td>48</td></tr><tr><td>in autism spectrum</td><td>48</td></tr><tr><td>additional key words</td><td>47</td></tr><tr><td>key words and</td><td>47</td></tr><tr><td>words and phrases</td><td>47</td></tr><tr><td>of computer vision</td><td>47</td></tr><tr><td>http creativecommons org</td><td>47</td></tr><tr><td>brought to you</td><td>47</td></tr><tr><td>institute carnegie mellon</td><td>47</td></tr><tr><td>speech and signal</td><td>47</td></tr><tr><td>the present study</td><td>47</td></tr><tr><td>cite this article</td><td>47</td></tr><tr><td>recognition in the</td><td>47</td></tr><tr><td>institute of computer</td><td>47</td></tr><tr><td>in recent years</td><td>47</td></tr><tr><td>the public portal</td><td>47</td></tr><tr><td>universit degli studi</td><td>47</td></tr><tr><td>according to the</td><td>47</td></tr><tr><td>most of the</td><td>46</td></tr><tr><td>to you for</td><td>46</td></tr><tr><td>you for free</td><td>46</td></tr><tr><td>for free and</td><td>46</td></tr><tr><td>free and open</td><td>46</td></tr><tr><td>and open access</td><td>46</td></tr><tr><td>more information please</td><td>46</td></tr><tr><td>information please contact</td><td>46</td></tr><tr><td>information and communication</td><td>46</td></tr><tr><td>of the human</td><td>46</td></tr><tr><td>of psychology and</td><td>46</td></tr><tr><td>xi an jiaotong</td><td>46</td></tr><tr><td>simon fraser university</td><td>46</td></tr><tr><td>of computing and</td><td>46</td></tr><tr><td>brain and cognitive</td><td>46</td></tr><tr><td>of california riverside</td><td>46</td></tr><tr><td>of facial expression</td><td>45</td></tr><tr><td>th international conference</td><td>45</td></tr><tr><td>face recognition and</td><td>45</td></tr><tr><td>on pattern analysis</td><td>45</td></tr><tr><td>for large scale</td><td>45</td></tr><tr><td>the fact that</td><td>45</td></tr><tr><td>local binary pattern</td><td>45</td></tr><tr><td>in real time</td><td>45</td></tr><tr><td>deep convolutional neural</td><td>44</td></tr><tr><td>if you believe</td><td>44</td></tr><tr><td>seoul national university</td><td>44</td></tr><tr><td>in psychology www</td><td>44</td></tr><tr><td>psychology www frontiersin</td><td>44</td></tr><tr><td>posts and telecommunications</td><td>44</td></tr><tr><td>www intechopen com</td><td>44</td></tr><tr><td>in which the</td><td>44</td></tr><tr><td>zur erlangung des</td><td>44</td></tr><tr><td>eth zurich switzerland</td><td>44</td></tr><tr><td>on arti cial</td><td>44</td></tr><tr><td>www mdpi com</td><td>44</td></tr><tr><td>mdpi com journal</td><td>44</td></tr><tr><td>authors contributed equally</td><td>44</td></tr><tr><td>citation for published</td><td>43</td></tr><tr><td>retained by the</td><td>43</td></tr><tr><td>and computer sciences</td><td>43</td></tr><tr><td>terms of the</td><td>43</td></tr><tr><td>university of oulu</td><td>43</td></tr><tr><td>works at http</td><td>43</td></tr><tr><td>by an authorized</td><td>43</td></tr><tr><td>mathematics and computer</td><td>43</td></tr><tr><td>www tandfonline com</td><td>43</td></tr><tr><td>for intelligent systems</td><td>43</td></tr><tr><td>head pose estimation</td><td>43</td></tr><tr><td>tsinghua university beijing</td><td>43</td></tr><tr><td>university of trento</td><td>42</td></tr><tr><td>and software engineering</td><td>42</td></tr><tr><td>for arti cial</td><td>42</td></tr><tr><td>accepted for inclusion</td><td>42</td></tr><tr><td>an authorized administrator</td><td>42</td></tr><tr><td>face recognition under</td><td>42</td></tr><tr><td>http www tandfonline</td><td>42</td></tr><tr><td>an jiaotong university</td><td>42</td></tr><tr><td>classi cation and</td><td>42</td></tr><tr><td>of the main</td><td>42</td></tr><tr><td>to improve the</td><td>42</td></tr><tr><td>equally to this</td><td>42</td></tr><tr><td>university of rochester</td><td>42</td></tr><tr><td>department of ece</td><td>42</td></tr><tr><td>we use the</td><td>42</td></tr><tr><td>wang member ieee</td><td>42</td></tr><tr><td>mellon university pittsburgh</td><td>41</td></tr><tr><td>for published version</td><td>41</td></tr><tr><td>well as the</td><td>41</td></tr><tr><td>university of thessaloniki</td><td>41</td></tr><tr><td>be used for</td><td>41</td></tr><tr><td>material is permitted</td><td>41</td></tr><tr><td>and information sciences</td><td>41</td></tr><tr><td>the face recognition</td><td>41</td></tr><tr><td>research showcase cmu</td><td>41</td></tr><tr><td>in children with</td><td>41</td></tr><tr><td>the eye region</td><td>41</td></tr><tr><td>facial emotion recognition</td><td>41</td></tr><tr><td>of psychiatry and</td><td>41</td></tr><tr><td>differences in the</td><td>41</td></tr><tr><td>int comput vis</td><td>40</td></tr><tr><td>saarland informatics campus</td><td>40</td></tr><tr><td>in accordance with</td><td>40</td></tr><tr><td>excellence in brain</td><td>40</td></tr><tr><td>brain science and</td><td>40</td></tr><tr><td>it is not</td><td>40</td></tr><tr><td>of the image</td><td>40</td></tr><tr><td>servers or lists</td><td>40</td></tr><tr><td>of massachusetts amherst</td><td>40</td></tr><tr><td>laboratory of intelligent</td><td>40</td></tr><tr><td>for real time</td><td>40</td></tr><tr><td>and face recognition</td><td>40</td></tr><tr><td>robotics institute carnegie</td><td>40</td></tr><tr><td>face recognition has</td><td>40</td></tr><tr><td>gender classi cation</td><td>40</td></tr><tr><td>university of adelaide</td><td>40</td></tr><tr><td>seattle wa usa</td><td>40</td></tr><tr><td>version of record</td><td>39</td></tr><tr><td>on artificial intelligence</td><td>39</td></tr><tr><td>center for excellence</td><td>39</td></tr><tr><td>in brain science</td><td>39</td></tr><tr><td>science and intelligence</td><td>39</td></tr><tr><td>and intelligence technology</td><td>39</td></tr><tr><td>this work for</td><td>39</td></tr><tr><td>journal of information</td><td>39</td></tr><tr><td>in the same</td><td>39</td></tr><tr><td>faculty of computer</td><td>39</td></tr><tr><td>face recognition systems</td><td>39</td></tr><tr><td>this article should</td><td>39</td></tr><tr><td>information engineering the</td><td>39</td></tr><tr><td>computing technology cas</td><td>39</td></tr><tr><td>the author published</td><td>39</td></tr><tr><td>of the data</td><td>39</td></tr><tr><td>in this study</td><td>39</td></tr><tr><td>but has not</td><td>39</td></tr><tr><td>content may change</td><td>39</td></tr><tr><td>may change prior</td><td>39</td></tr><tr><td>in the past</td><td>39</td></tr><tr><td>of electronic science</td><td>38</td></tr><tr><td>you believe that</td><td>38</td></tr><tr><td>university of western</td><td>38</td></tr><tr><td>on image and</td><td>38</td></tr><tr><td>in the literature</td><td>38</td></tr><tr><td>https hal archives</td><td>38</td></tr><tr><td>hal archives ouvertes</td><td>38</td></tr><tr><td>redistribution to servers</td><td>38</td></tr><tr><td>journal of science</td><td>38</td></tr><tr><td>to the department</td><td>38</td></tr><tr><td>modena and reggio</td><td>38</td></tr><tr><td>and intelligent systems</td><td>38</td></tr><tr><td>is an important</td><td>38</td></tr><tr><td>correspondence concerning this</td><td>38</td></tr><tr><td>concerning this article</td><td>38</td></tr><tr><td>linear discriminant analysis</td><td>38</td></tr><tr><td>school of electronics</td><td>38</td></tr><tr><td>of the amygdala</td><td>38</td></tr><tr><td>of brain and</td><td>38</td></tr><tr><td>publication in future</td><td>38</td></tr><tr><td>this journal but</td><td>38</td></tr><tr><td>journal but has</td><td>38</td></tr><tr><td>not been fully</td><td>38</td></tr><tr><td>been fully edited</td><td>38</td></tr><tr><td>fully edited content</td><td>38</td></tr><tr><td>edited content may</td><td>38</td></tr><tr><td>prior to final</td><td>38</td></tr><tr><td>to final publication</td><td>38</td></tr><tr><td>et al the</td><td>38</td></tr><tr><td>transactions on pattern</td><td>38</td></tr><tr><td>the graduate school</td><td>38</td></tr><tr><td>conference on arti</td><td>38</td></tr><tr><td>received june accepted</td><td>37</td></tr><tr><td>university of waterloo</td><td>37</td></tr><tr><td>conference on artificial</td><td>37</td></tr><tr><td>in the public</td><td>37</td></tr><tr><td>we found that</td><td>37</td></tr><tr><td>object detection and</td><td>37</td></tr><tr><td>republic of korea</td><td>37</td></tr><tr><td>feature extraction and</td><td>37</td></tr><tr><td>ouvertes fr hal</td><td>37</td></tr><tr><td>school of informatics</td><td>37</td></tr><tr><td>http hdl handle</td><td>37</td></tr><tr><td>hdl handle net</td><td>37</td></tr><tr><td>computer engineering department</td><td>37</td></tr><tr><td>hal id tel</td><td>37</td></tr><tr><td>ouvertes fr tel</td><td>37</td></tr><tr><td>work was supported</td><td>37</td></tr><tr><td>received april accepted</td><td>37</td></tr><tr><td>at chapel hill</td><td>37</td></tr><tr><td>oxford university press</td><td>37</td></tr><tr><td>technology cas beijing</td><td>37</td></tr><tr><td>automatic facial expression</td><td>37</td></tr><tr><td>angeles ca usa</td><td>37</td></tr><tr><td>recognition of facial</td><td>37</td></tr><tr><td>respect to the</td><td>37</td></tr><tr><td>science and software</td><td>36</td></tr><tr><td>institute for computer</td><td>36</td></tr><tr><td>of electrical computer</td><td>36</td></tr><tr><td>electrical computer engineering</td><td>36</td></tr><tr><td>edinburgh research explorer</td><td>36</td></tr><tr><td>journal of experimental</td><td>36</td></tr><tr><td>is the author</td><td>36</td></tr><tr><td>ieee personal use</td><td>36</td></tr><tr><td>university of modena</td><td>36</td></tr><tr><td>of modena and</td><td>36</td></tr><tr><td>of new york</td><td>36</td></tr><tr><td>conference on machine</td><td>36</td></tr><tr><td>that the proposed</td><td>36</td></tr><tr><td>of applied sciences</td><td>36</td></tr><tr><td>carolina at chapel</td><td>36</td></tr><tr><td>published by oxford</td><td>36</td></tr><tr><td>by oxford university</td><td>36</td></tr><tr><td>based on their</td><td>36</td></tr><tr><td>processing of chinese</td><td>36</td></tr><tr><td>university of nottingham</td><td>36</td></tr><tr><td>spectrum disorders asd</td><td>36</td></tr><tr><td>university of florida</td><td>36</td></tr><tr><td>cial intelligence ijcai</td><td>36</td></tr><tr><td>de montr eal</td><td>35</td></tr><tr><td>computer graphics and</td><td>35</td></tr><tr><td>university of electronic</td><td>35</td></tr><tr><td>electronic science and</td><td>35</td></tr><tr><td>take down policy</td><td>35</td></tr><tr><td>results suggest that</td><td>35</td></tr><tr><td>access by the</td><td>35</td></tr><tr><td>this material for</td><td>35</td></tr><tr><td>tel aviv university</td><td>35</td></tr><tr><td>of software engineering</td><td>35</td></tr><tr><td>for face detection</td><td>35</td></tr><tr><td>of singapore singapore</td><td>35</td></tr><tr><td>for human pose</td><td>35</td></tr><tr><td>prof dr ing</td><td>35</td></tr><tr><td>of arti cial</td><td>35</td></tr><tr><td>for zero shot</td><td>35</td></tr><tr><td>in signal processing</td><td>35</td></tr><tr><td>classi cation using</td><td>35</td></tr><tr><td>key laboratory for</td><td>35</td></tr><tr><td>of notre dame</td><td>35</td></tr><tr><td>the robotics institute</td><td>35</td></tr><tr><td>permission to make</td><td>34</td></tr><tr><td>to make digital</td><td>34</td></tr><tr><td>acm reference format</td><td>34</td></tr><tr><td>institute of engineering</td><td>34</td></tr><tr><td>university of bonn</td><td>34</td></tr><tr><td>for visual question</td><td>34</td></tr><tr><td>pose invariant face</td><td>34</td></tr><tr><td>university of barcelona</td><td>34</td></tr><tr><td>local binary patterns</td><td>34</td></tr><tr><td>polytechnique ed erale</td><td>34</td></tr><tr><td>advance access publication</td><td>34</td></tr><tr><td>des akademischen grades</td><td>34</td></tr><tr><td>mitsubishi electric research</td><td>34</td></tr><tr><td>onoma de barcelona</td><td>34</td></tr><tr><td>have shown that</td><td>34</td></tr><tr><td>results show that</td><td>34</td></tr><tr><td>the main paper</td><td>34</td></tr><tr><td>school of automation</td><td>34</td></tr><tr><td>of psychiatry university</td><td>34</td></tr><tr><td>dept of cse</td><td>34</td></tr><tr><td>computer engineering university</td><td>34</td></tr><tr><td>university of colorado</td><td>34</td></tr><tr><td>ministry of education</td><td>34</td></tr><tr><td>original research article</td><td>34</td></tr><tr><td>children with asd</td><td>34</td></tr><tr><td>due to its</td><td>34</td></tr><tr><td>for semantic segmentation</td><td>33</td></tr><tr><td>business media new</td><td>33</td></tr><tr><td>media new york</td><td>33</td></tr><tr><td>for the publications</td><td>33</td></tr><tr><td>the publications made</td><td>33</td></tr><tr><td>publications made accessible</td><td>33</td></tr><tr><td>or other copyright</td><td>33</td></tr><tr><td>it is condition</td><td>33</td></tr><tr><td>condition of accessing</td><td>33</td></tr><tr><td>publications that users</td><td>33</td></tr><tr><td>that users recognise</td><td>33</td></tr><tr><td>users recognise and</td><td>33</td></tr><tr><td>legal requirements associated</td><td>33</td></tr><tr><td>california at berkeley</td><td>33</td></tr><tr><td>copies are not</td><td>33</td></tr><tr><td>made or distributed</td><td>33</td></tr><tr><td>deep neural network</td><td>33</td></tr><tr><td>material for advertising</td><td>33</td></tr><tr><td>work in other</td><td>33</td></tr><tr><td>face recognition from</td><td>33</td></tr><tr><td>li fei fei</td><td>33</td></tr><tr><td>in the face</td><td>33</td></tr><tr><td>erale de lausanne</td><td>33</td></tr><tr><td>https tel archives</td><td>33</td></tr><tr><td>tel archives ouvertes</td><td>33</td></tr><tr><td>in the scene</td><td>33</td></tr><tr><td>states of america</td><td>33</td></tr><tr><td>electronics and information</td><td>33</td></tr><tr><td>license which permits</td><td>33</td></tr><tr><td>received december accepted</td><td>33</td></tr><tr><td>erlangung des akademischen</td><td>33</td></tr><tr><td>on face recognition</td><td>33</td></tr><tr><td>has been shown</td><td>33</td></tr><tr><td>electrical engineering university</td><td>33</td></tr><tr><td>journal on advances</td><td>33</td></tr><tr><td>electronic and information</td><td>33</td></tr><tr><td>neural network for</td><td>33</td></tr><tr><td>university of notre</td><td>33</td></tr><tr><td>image to image</td><td>33</td></tr><tr><td>the hong kong</td><td>33</td></tr><tr><td>peer reviewed version</td><td>32</td></tr><tr><td>by the legal</td><td>32</td></tr><tr><td>the legal requirements</td><td>32</td></tr><tr><td>with these rights</td><td>32</td></tr><tr><td>use is granted</td><td>32</td></tr><tr><td>uc san diego</td><td>32</td></tr><tr><td>of the creative</td><td>32</td></tr><tr><td>the th international</td><td>32</td></tr><tr><td>and video processing</td><td>32</td></tr><tr><td>works for resale</td><td>32</td></tr><tr><td>in other works</td><td>32</td></tr><tr><td>must be obtained</td><td>32</td></tr><tr><td>rwth aachen university</td><td>32</td></tr><tr><td>of advanced technology</td><td>32</td></tr><tr><td>this research was</td><td>32</td></tr><tr><td>support vector machines</td><td>32</td></tr><tr><td>of advanced computer</td><td>32</td></tr><tr><td>human action recognition</td><td>32</td></tr><tr><td>of computing science</td><td>32</td></tr><tr><td>enti research documents</td><td>32</td></tr><tr><td>ques de niveau</td><td>32</td></tr><tr><td>es ou non</td><td>32</td></tr><tr><td>emanant des etablissements</td><td>32</td></tr><tr><td>des etablissements enseignement</td><td>32</td></tr><tr><td>recherche fran cais</td><td>32</td></tr><tr><td>cais ou etrangers</td><td>32</td></tr><tr><td>ou etrangers des</td><td>32</td></tr><tr><td>etrangers des laboratoires</td><td>32</td></tr><tr><td>com journal sensors</td><td>32</td></tr><tr><td>for permissions please</td><td>32</td></tr><tr><td>accepted june published</td><td>32</td></tr><tr><td>new collective works</td><td>32</td></tr><tr><td>collective works for</td><td>32</td></tr><tr><td>these authors contributed</td><td>32</td></tr><tr><td>component of this</td><td>32</td></tr><tr><td>for ef cient</td><td>31</td></tr><tr><td>recognition system based</td><td>31</td></tr><tr><td>investigate your claim</td><td>31</td></tr><tr><td>or classroom use</td><td>31</td></tr><tr><td>are not made</td><td>31</td></tr><tr><td>that copies are</td><td>31</td></tr><tr><td>commercial advantage and</td><td>31</td></tr><tr><td>advantage and that</td><td>31</td></tr><tr><td>of the facial</td><td>31</td></tr><tr><td>multi task learning</td><td>31</td></tr><tr><td>of western australia</td><td>31</td></tr><tr><td>under the terms</td><td>31</td></tr><tr><td>and or other</td><td>31</td></tr><tr><td>or promotional purposes</td><td>31</td></tr><tr><td>and facial expression</td><td>31</td></tr><tr><td>and reggio emilia</td><td>31</td></tr><tr><td>in the image</td><td>31</td></tr><tr><td>for vision speech</td><td>31</td></tr><tr><td>electronics and computer</td><td>31</td></tr><tr><td>the author and</td><td>31</td></tr><tr><td>at the same</td><td>31</td></tr><tr><td>expressions of emotion</td><td>31</td></tr><tr><td>in the human</td><td>31</td></tr><tr><td>refers to the</td><td>31</td></tr><tr><td>song chun zhu</td><td>31</td></tr><tr><td>there has been</td><td>31</td></tr><tr><td>amit roy chowdhury</td><td>31</td></tr><tr><td>www elsevier com</td><td>31</td></tr><tr><td>york ny usa</td><td>31</td></tr><tr><td>electrical and electronics</td><td>31</td></tr><tr><td>hong kong polytechnic</td><td>31</td></tr><tr><td>for all other</td><td>31</td></tr><tr><td>of advanced research</td><td>30</td></tr><tr><td>for computer graphics</td><td>30</td></tr><tr><td>other copyright owners</td><td>30</td></tr><tr><td>we will remove</td><td>30</td></tr><tr><td>will remove access</td><td>30</td></tr><tr><td>the work immediately</td><td>30</td></tr><tr><td>http www eecs</td><td>30</td></tr><tr><td>to deal with</td><td>30</td></tr><tr><td>or distributed for</td><td>30</td></tr><tr><td>digital or hard</td><td>30</td></tr><tr><td>or hard copies</td><td>30</td></tr><tr><td>provided that copies</td><td>30</td></tr><tr><td>or commercial advantage</td><td>30</td></tr><tr><td>and luc van</td><td>30</td></tr><tr><td>pattern recognition and</td><td>30</td></tr><tr><td>video classi cation</td><td>30</td></tr><tr><td>italiano di tecnologia</td><td>30</td></tr><tr><td>resale or redistribution</td><td>30</td></tr><tr><td>science and research</td><td>30</td></tr><tr><td>of technology and</td><td>30</td></tr><tr><td>vision speech and</td><td>30</td></tr><tr><td>in the present</td><td>30</td></tr><tr><td>business media llc</td><td>30</td></tr><tr><td>natural language processing</td><td>30</td></tr><tr><td>this paper presents</td><td>30</td></tr><tr><td>stony brook university</td><td>30</td></tr><tr><td>boston ma usa</td><td>30</td></tr><tr><td>center for biometrics</td><td>30</td></tr><tr><td>of michigan ann</td><td>30</td></tr><tr><td>facial expressions are</td><td>30</td></tr><tr><td>college of information</td><td>30</td></tr><tr><td>of the association</td><td>30</td></tr><tr><td>the association for</td><td>30</td></tr><tr><td>in this chapter</td><td>30</td></tr><tr><td>received in revised</td><td>30</td></tr><tr><td>for fine grained</td><td>30</td></tr><tr><td>university of munich</td><td>30</td></tr><tr><td>classi cation with</td><td>30</td></tr><tr><td>hong kong china</td><td>30</td></tr><tr><td>science and applications</td><td>30</td></tr><tr><td>graphics and vision</td><td>29</td></tr><tr><td>and we will</td><td>29</td></tr><tr><td>eecs berkeley edu</td><td>29</td></tr><tr><td>of experimental psychology</td><td>29</td></tr><tr><td>work for personal</td><td>29</td></tr><tr><td>of engineering research</td><td>29</td></tr><tr><td>ibm watson research</td><td>29</td></tr><tr><td>engineering research and</td><td>29</td></tr><tr><td>for the purpose</td><td>29</td></tr><tr><td>of the paper</td><td>29</td></tr><tr><td>advertising or promotional</td><td>29</td></tr><tr><td>creating new collective</td><td>29</td></tr><tr><td>facial action unit</td><td>29</td></tr><tr><td>saarbr ucken germany</td><td>29</td></tr><tr><td>to face recognition</td><td>29</td></tr><tr><td>face recognition algorithms</td><td>29</td></tr><tr><td>institutes of advanced</td><td>29</td></tr><tr><td>technology chinese academy</td><td>29</td></tr><tr><td>fr ed eric</td><td>29</td></tr><tr><td>watson research center</td><td>29</td></tr><tr><td>american psychological association</td><td>29</td></tr><tr><td>advanced computer science</td><td>29</td></tr><tr><td>web of science</td><td>29</td></tr><tr><td>chen change loy</td><td>29</td></tr><tr><td>universitat aut onoma</td><td>29</td></tr><tr><td>department of mechanical</td><td>29</td></tr><tr><td>biometrics and security</td><td>29</td></tr><tr><td>contents lists available</td><td>29</td></tr><tr><td>michigan ann arbor</td><td>29</td></tr><tr><td>emotional facial expressions</td><td>29</td></tr><tr><td>an important role</td><td>29</td></tr><tr><td>school of software</td><td>29</td></tr><tr><td>based image retrieval</td><td>29</td></tr><tr><td>in the presence</td><td>29</td></tr><tr><td>der technischen universit</td><td>29</td></tr><tr><td>advances in signal</td><td>29</td></tr><tr><td>institute of psychology</td><td>29</td></tr><tr><td>and arti cial</td><td>29</td></tr><tr><td>for autism research</td><td>29</td></tr><tr><td>intelligent perception and</td><td>29</td></tr><tr><td>of california davis</td><td>29</td></tr><tr><td>received may accepted</td><td>29</td></tr><tr><td>features of the</td><td>29</td></tr><tr><td>theory of mind</td><td>29</td></tr><tr><td>the twenty sixth</td><td>29</td></tr><tr><td>twenty sixth international</td><td>29</td></tr><tr><td>sixth international joint</td><td>29</td></tr><tr><td>to the faculty</td><td>28</td></tr><tr><td>university of queensland</td><td>28</td></tr><tr><td>university of bristol</td><td>28</td></tr><tr><td>houston tx usa</td><td>28</td></tr><tr><td>report no ucb</td><td>28</td></tr><tr><td>no ucb eecs</td><td>28</td></tr><tr><td>www eecs berkeley</td><td>28</td></tr><tr><td>berkeley edu pubs</td><td>28</td></tr><tr><td>edu pubs techrpts</td><td>28</td></tr><tr><td>pubs techrpts eecs</td><td>28</td></tr><tr><td>techrpts eecs html</td><td>28</td></tr><tr><td>new york usa</td><td>28</td></tr><tr><td>new south wales</td><td>28</td></tr><tr><td>fellow ieee and</td><td>28</td></tr><tr><td>in the paper</td><td>28</td></tr><tr><td>and research ijsr</td><td>28</td></tr><tr><td>information technology and</td><td>28</td></tr><tr><td>university of freiburg</td><td>28</td></tr><tr><td>we propose novel</td><td>28</td></tr><tr><td>degree of master</td><td>28</td></tr><tr><td>to this article</td><td>28</td></tr><tr><td>detection and recognition</td><td>28</td></tr><tr><td>and mobile computing</td><td>28</td></tr><tr><td>of our books</td><td>28</td></tr><tr><td>our books indexed</td><td>28</td></tr><tr><td>of science core</td><td>28</td></tr><tr><td>science core collection</td><td>28</td></tr><tr><td>core collection bkci</td><td>28</td></tr><tr><td>in publishing with</td><td>28</td></tr><tr><td>jean marc odobez</td><td>28</td></tr><tr><td>published version apa</td><td>28</td></tr><tr><td>university of york</td><td>28</td></tr><tr><td>deep learning for</td><td>28</td></tr><tr><td>for biometrics and</td><td>28</td></tr><tr><td>and security research</td><td>28</td></tr><tr><td>university of british</td><td>28</td></tr><tr><td>www pnas org</td><td>28</td></tr><tr><td>human robot interaction</td><td>28</td></tr><tr><td>philadelphia pa usa</td><td>28</td></tr><tr><td>machine learning research</td><td>28</td></tr><tr><td>face to face</td><td>28</td></tr><tr><td>in an image</td><td>28</td></tr><tr><td>pose estimation and</td><td>28</td></tr><tr><td>school of economics</td><td>28</td></tr><tr><td>to the same</td><td>28</td></tr><tr><td>out of the</td><td>28</td></tr><tr><td>faculty of science</td><td>28</td></tr><tr><td>terms and conditions</td><td>28</td></tr><tr><td>year of publication</td><td>27</td></tr><tr><td>breaches copyright please</td><td>27</td></tr><tr><td>providing details and</td><td>27</td></tr><tr><td>personal or classroom</td><td>27</td></tr><tr><td>de la torre</td><td>27</td></tr><tr><td>in the last</td><td>27</td></tr><tr><td>engineering national university</td><td>27</td></tr><tr><td>school of eecs</td><td>27</td></tr><tr><td>of the author</td><td>27</td></tr><tr><td>engineering and information</td><td>27</td></tr><tr><td>because of the</td><td>27</td></tr><tr><td>et al eurasip</td><td>27</td></tr><tr><td>al eurasip journal</td><td>27</td></tr><tr><td>role in the</td><td>27</td></tr><tr><td>automatic face recognition</td><td>27</td></tr><tr><td>assistant professor department</td><td>27</td></tr><tr><td>pattern recognition institute</td><td>27</td></tr><tr><td>are among the</td><td>27</td></tr><tr><td>and brain sciences</td><td>27</td></tr><tr><td>institutes of health</td><td>27</td></tr><tr><td>of british columbia</td><td>27</td></tr><tr><td>permissions please email</td><td>27</td></tr><tr><td>conflict of interest</td><td>27</td></tr><tr><td>is that the</td><td>27</td></tr><tr><td>of this paper</td><td>27</td></tr><tr><td>facial expressions and</td><td>27</td></tr><tr><td>centre for vision</td><td>27</td></tr><tr><td>multiple object tracking</td><td>27</td></tr><tr><td>meeting of the</td><td>27</td></tr><tr><td>center for cognitive</td><td>27</td></tr><tr><td>cole polytechnique rale</td><td>27</td></tr><tr><td>of oriented gradients</td><td>27</td></tr><tr><td>department of automation</td><td>27</td></tr><tr><td>research on intelligent</td><td>27</td></tr><tr><td>on intelligent perception</td><td>27</td></tr><tr><td>perception and computing</td><td>27</td></tr><tr><td>university of sydney</td><td>27</td></tr><tr><td>for object recognition</td><td>27</td></tr><tr><td>reuse of any</td><td>27</td></tr><tr><td>dept of ece</td><td>27</td></tr><tr><td>to image translation</td><td>27</td></tr><tr><td>of this article</td><td>27</td></tr><tr><td>and information science</td><td>27</td></tr><tr><td>kong polytechnic university</td><td>27</td></tr><tr><td>recurrent neural networks</td><td>27</td></tr><tr><td>rale de lausanne</td><td>27</td></tr><tr><td>by the author</td><td>26</td></tr><tr><td>requirements associated with</td><td>26</td></tr><tr><td>access to the</td><td>26</td></tr><tr><td>semi supervised learning</td><td>26</td></tr><tr><td>part of this</td><td>26</td></tr><tr><td>center for automation</td><td>26</td></tr><tr><td>for automation research</td><td>26</td></tr><tr><td>the full citation</td><td>26</td></tr><tr><td>nature of the</td><td>26</td></tr><tr><td>allen institute for</td><td>26</td></tr><tr><td>et al for</td><td>26</td></tr><tr><td>committee on graduate</td><td>26</td></tr><tr><td>application to face</td><td>26</td></tr><tr><td>the relationship between</td><td>26</td></tr><tr><td>from single image</td><td>26</td></tr><tr><td>the human visual</td><td>26</td></tr><tr><td>university of the</td><td>26</td></tr><tr><td>shih fu chang</td><td>26</td></tr><tr><td>provided by the</td><td>26</td></tr><tr><td>link to this</td><td>26</td></tr><tr><td>saarbr cken germany</td><td>26</td></tr><tr><td>over the past</td><td>26</td></tr><tr><td>in the form</td><td>26</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/reddot.png b/scraper/reports/reddot.png
new file mode 100644
index 00000000..c414a464
--- /dev/null
+++ b/scraper/reports/reddot.png
Binary files differ
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html
new file mode 100644
index 00000000..51e53e72
--- /dev/null
+++ b/scraper/reports/report_coverage.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html
new file mode 100644
index 00000000..a84a6295
--- /dev/null
+++ b/scraper/reports/report_index.html
@@ -0,0 +1 @@
+<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/reports.css b/scraper/reports/reports.css
new file mode 100644
index 00000000..d5a9755d
--- /dev/null
+++ b/scraper/reports/reports.css
@@ -0,0 +1,18 @@
+body { font-size: smaller; }
+td,th { vertical-align: top; }
+td {
+ max-width: 500px;
+ overflow: hidden;
+}
+#mapid {
+ width: 100vw;
+ height: 30vw;
+}
+.gray { color: #888; }
+html.map, html.map body {
+ margin: 0; padding: 0;
+ width: 100%; height: 100%;
+}
+.map #mapid {
+ height: 100vh;
+} \ No newline at end of file
diff --git a/scraper/reports/snap.svg-min.js b/scraper/reports/snap.svg-min.js
new file mode 100755
index 00000000..a9551b5f
--- /dev/null
+++ b/scraper/reports/snap.svg-min.js
@@ -0,0 +1,21 @@
+// Snap.svg 0.5.1
+//
+// Copyright (c) 2013 – 2017 Adobe Systems Incorporated. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// build: 2017-02-07
+
+!function(a){var b,c,d="0.5.0",e="hasOwnProperty",f=/[\.\/]/,g=/\s*,\s*/,h="*",i=function(a,b){return a-b},j={n:{}},k=function(){for(var a=0,b=this.length;b>a;a++)if("undefined"!=typeof this[a])return this[a]},l=function(){for(var a=this.length;--a;)if("undefined"!=typeof this[a])return this[a]},m=Object.prototype.toString,n=String,o=Array.isArray||function(a){return a instanceof Array||"[object Array]"==m.call(a)};eve=function(a,d){var e,f=c,g=Array.prototype.slice.call(arguments,2),h=eve.listeners(a),j=0,m=[],n={},o=[],p=b;o.firstDefined=k,o.lastDefined=l,b=a,c=0;for(var q=0,r=h.length;r>q;q++)"zIndex"in h[q]&&(m.push(h[q].zIndex),h[q].zIndex<0&&(n[h[q].zIndex]=h[q]));for(m.sort(i);m[j]<0;)if(e=n[m[j++]],o.push(e.apply(d,g)),c)return c=f,o;for(q=0;r>q;q++)if(e=h[q],"zIndex"in e)if(e.zIndex==m[j]){if(o.push(e.apply(d,g)),c)break;do if(j++,e=n[m[j]],e&&o.push(e.apply(d,g)),c)break;while(e)}else n[e.zIndex]=e;else if(o.push(e.apply(d,g)),c)break;return c=f,b=p,o},eve._events=j,eve.listeners=function(a){var b,c,d,e,g,i,k,l,m=o(a)?a:a.split(f),n=j,p=[n],q=[];for(e=0,g=m.length;g>e;e++){for(l=[],i=0,k=p.length;k>i;i++)for(n=p[i].n,c=[n[m[e]],n[h]],d=2;d--;)b=c[d],b&&(l.push(b),q=q.concat(b.f||[]));p=l}return q},eve.separator=function(a){a?(a=n(a).replace(/(?=[\.\^\]\[\-])/g,"\\"),a="["+a+"]",f=new RegExp(a)):f=/[\.\/]/},eve.on=function(a,b){if("function"!=typeof b)return function(){};for(var c=o(a)?o(a[0])?a:[a]:n(a).split(g),d=0,e=c.length;e>d;d++)!function(a){for(var c,d=o(a)?a:n(a).split(f),e=j,g=0,h=d.length;h>g;g++)e=e.n,e=e.hasOwnProperty(d[g])&&e[d[g]]||(e[d[g]]={n:{}});for(e.f=e.f||[],g=0,h=e.f.length;h>g;g++)if(e.f[g]==b){c=!0;break}!c&&e.f.push(b)}(c[d]);return function(a){+a==+a&&(b.zIndex=+a)}},eve.f=function(a){var b=[].slice.call(arguments,1);return function(){eve.apply(null,[a,null].concat(b).concat([].slice.call(arguments,0)))}},eve.stop=function(){c=1},eve.nt=function(a){var c=o(b)?b.join("."):b;return a?new RegExp("(?:\\.|\\/|^)"+a+"(?:\\.|\\/|$)").test(c):c},eve.nts=function(){return o(b)?b:b.split(f)},eve.off=eve.unbind=function(a,b){if(!a)return void(eve._events=j={n:{}});var c=o(a)?o(a[0])?a:[a]:n(a).split(g);if(c.length>1)for(var d=0,i=c.length;i>d;d++)eve.off(c[d],b);else{c=o(a)?a:n(a).split(f);var k,l,m,d,i,p,q,r=[j],s=[];for(d=0,i=c.length;i>d;d++)for(p=0;p<r.length;p+=m.length-2){if(m=[p,1],k=r[p].n,c[d]!=h)k[c[d]]&&(m.push(k[c[d]]),s.unshift({n:k,name:c[d]}));else for(l in k)k[e](l)&&(m.push(k[l]),s.unshift({n:k,name:l}));r.splice.apply(r,m)}for(d=0,i=r.length;i>d;d++)for(k=r[d];k.n;){if(b){if(k.f){for(p=0,q=k.f.length;q>p;p++)if(k.f[p]==b){k.f.splice(p,1);break}!k.f.length&&delete k.f}for(l in k.n)if(k.n[e](l)&&k.n[l].f){var t=k.n[l].f;for(p=0,q=t.length;q>p;p++)if(t[p]==b){t.splice(p,1);break}!t.length&&delete k.n[l].f}}else{delete k.f;for(l in k.n)k.n[e](l)&&k.n[l].f&&delete k.n[l].f}k=k.n}a:for(d=0,i=s.length;i>d;d++){k=s[d];for(l in k.n[k.name].f)continue a;for(l in k.n[k.name].n)continue a;delete k.n[k.name]}}},eve.once=function(a,b){var c=function(){return eve.off(a,c),b.apply(this,arguments)};return eve.on(a,c)},eve.version=d,eve.toString=function(){return"You are running Eve "+d},"undefined"!=typeof module&&module.exports?module.exports=eve:"function"==typeof define&&define.amd?define("eve",[],function(){return eve}):a.eve=eve}(this),function(a,b){if("function"==typeof define&&define.amd)define(["eve"],function(c){return b(a,c)});else if("undefined"!=typeof exports){var c=require("eve");module.exports=b(a,c)}else b(a,a.eve)}(window||this,function(a,b){var c=function(b){var c,d={},e=a.requestAnimationFrame||a.webkitRequestAnimationFrame||a.mozRequestAnimationFrame||a.oRequestAnimationFrame||a.msRequestAnimationFrame||function(a){return setTimeout(a,16,(new Date).getTime()),!0},f=Array.isArray||function(a){return a instanceof Array||"[object Array]"==Object.prototype.toString.call(a)},g=0,h="M"+(+new Date).toString(36),i=function(){return h+(g++).toString(36)},j=Date.now||function(){return+new Date},k=function(a){var b=this;if(null==a)return b.s;var c=b.s-a;b.b+=b.dur*c,b.B+=b.dur*c,b.s=a},l=function(a){var b=this;return null==a?b.spd:void(b.spd=a)},m=function(a){var b=this;return null==a?b.dur:(b.s=b.s*a/b.dur,void(b.dur=a))},n=function(){var a=this;delete d[a.id],a.update(),b("mina.stop."+a.id,a)},o=function(){var a=this;a.pdif||(delete d[a.id],a.update(),a.pdif=a.get()-a.b)},p=function(){var a=this;a.pdif&&(a.b=a.get()-a.pdif,delete a.pdif,d[a.id]=a,r())},q=function(){var a,b=this;if(f(b.start)){a=[];for(var c=0,d=b.start.length;d>c;c++)a[c]=+b.start[c]+(b.end[c]-b.start[c])*b.easing(b.s)}else a=+b.start+(b.end-b.start)*b.easing(b.s);b.set(a)},r=function(a){if(!a)return void(c||(c=e(r)));var f=0;for(var g in d)if(d.hasOwnProperty(g)){var h=d[g],i=h.get();f++,h.s=(i-h.b)/(h.dur/h.spd),h.s>=1&&(delete d[g],h.s=1,f--,function(a){setTimeout(function(){b("mina.finish."+a.id,a)})}(h)),h.update()}c=f?e(r):!1},s=function(a,b,c,e,f,g,h){var j={id:i(),start:a,end:b,b:c,s:0,dur:e-c,spd:1,get:f,set:g,easing:h||s.linear,status:k,speed:l,duration:m,stop:n,pause:o,resume:p,update:q};d[j.id]=j;var t,u=0;for(t in d)if(d.hasOwnProperty(t)&&(u++,2==u))break;return 1==u&&r(),j};return s.time=j,s.getById=function(a){return d[a]||null},s.linear=function(a){return a},s.easeout=function(a){return Math.pow(a,1.7)},s.easein=function(a){return Math.pow(a,.48)},s.easeinout=function(a){if(1==a)return 1;if(0==a)return 0;var b=.48-a/1.04,c=Math.sqrt(.1734+b*b),d=c-b,e=Math.pow(Math.abs(d),1/3)*(0>d?-1:1),f=-c-b,g=Math.pow(Math.abs(f),1/3)*(0>f?-1:1),h=e+g+.5;return 3*(1-h)*h*h+h*h*h},s.backin=function(a){if(1==a)return 1;var b=1.70158;return a*a*((b+1)*a-b)},s.backout=function(a){if(0==a)return 0;a-=1;var b=1.70158;return a*a*((b+1)*a+b)+1},s.elastic=function(a){return a==!!a?a:Math.pow(2,-10*a)*Math.sin((a-.075)*(2*Math.PI)/.3)+1},s.bounce=function(a){var b,c=7.5625,d=2.75;return 1/d>a?b=c*a*a:2/d>a?(a-=1.5/d,b=c*a*a+.75):2.5/d>a?(a-=2.25/d,b=c*a*a+.9375):(a-=2.625/d,b=c*a*a+.984375),b},a.mina=s,s}("undefined"==typeof b?function(){}:b),d=function(a){function c(a,b){if(a){if(a.nodeType)return w(a);if(e(a,"array")&&c.set)return c.set.apply(c,a);if(a instanceof s)return a;if(null==b)try{return a=y.doc.querySelector(String(a)),w(a)}catch(d){return null}}return a=null==a?"100%":a,b=null==b?"100%":b,new v(a,b)}function d(a,b){if(b){if("#text"==a&&(a=y.doc.createTextNode(b.text||b["#text"]||"")),"#comment"==a&&(a=y.doc.createComment(b.text||b["#text"]||"")),"string"==typeof a&&(a=d(a)),"string"==typeof b)return 1==a.nodeType?"xlink:"==b.substring(0,6)?a.getAttributeNS(T,b.substring(6)):"xml:"==b.substring(0,4)?a.getAttributeNS(U,b.substring(4)):a.getAttribute(b):"text"==b?a.nodeValue:null;if(1==a.nodeType){for(var c in b)if(b[z](c)){var e=A(b[c]);e?"xlink:"==c.substring(0,6)?a.setAttributeNS(T,c.substring(6),e):"xml:"==c.substring(0,4)?a.setAttributeNS(U,c.substring(4),e):a.setAttribute(c,e):a.removeAttribute(c)}}else"text"in b&&(a.nodeValue=b.text)}else a=y.doc.createElementNS(U,a);return a}function e(a,b){return b=A.prototype.toLowerCase.call(b),"finite"==b?isFinite(a):"array"==b&&(a instanceof Array||Array.isArray&&Array.isArray(a))?!0:"null"==b&&null===a||b==typeof a&&null!==a||"object"==b&&a===Object(a)||J.call(a).slice(8,-1).toLowerCase()==b}function f(a){if("function"==typeof a||Object(a)!==a)return a;var b=new a.constructor;for(var c in a)a[z](c)&&(b[c]=f(a[c]));return b}function h(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return a.push(a.splice(c,1)[0])}function i(a,b,c){function d(){var e=Array.prototype.slice.call(arguments,0),f=e.join("␀"),g=d.cache=d.cache||{},i=d.count=d.count||[];return g[z](f)?(h(i,f),c?c(g[f]):g[f]):(i.length>=1e3&&delete g[i.shift()],i.push(f),g[f]=a.apply(b,e),c?c(g[f]):g[f])}return d}function j(a,b,c,d,e,f){if(null==e){var g=a-c,h=b-d;return g||h?(180+180*D.atan2(-h,-g)/H+360)%360:0}return j(a,b,e,f)-j(c,d,e,f)}function k(a){return a%360*H/180}function l(a){return 180*a/H%360}function m(a){var b=[];return a=a.replace(/(?:^|\s)(\w+)\(([^)]+)\)/g,function(a,c,d){return d=d.split(/\s*,\s*|\s+/),"rotate"==c&&1==d.length&&d.push(0,0),"scale"==c&&(d.length>2?d=d.slice(0,2):2==d.length&&d.push(0,0),1==d.length&&d.push(d[0],0,0)),"skewX"==c?b.push(["m",1,0,D.tan(k(d[0])),1,0,0]):"skewY"==c?b.push(["m",1,D.tan(k(d[0])),0,1,0,0]):b.push([c.charAt(0)].concat(d)),a}),b}function n(a,b){var d=aa(a),e=new c.Matrix;if(d)for(var f=0,g=d.length;g>f;f++){var h,i,j,k,l,m=d[f],n=m.length,o=A(m[0]).toLowerCase(),p=m[0]!=o,q=p?e.invert():0;"t"==o&&2==n?e.translate(m[1],0):"t"==o&&3==n?p?(h=q.x(0,0),i=q.y(0,0),j=q.x(m[1],m[2]),k=q.y(m[1],m[2]),e.translate(j-h,k-i)):e.translate(m[1],m[2]):"r"==o?2==n?(l=l||b,e.rotate(m[1],l.x+l.width/2,l.y+l.height/2)):4==n&&(p?(j=q.x(m[2],m[3]),k=q.y(m[2],m[3]),e.rotate(m[1],j,k)):e.rotate(m[1],m[2],m[3])):"s"==o?2==n||3==n?(l=l||b,e.scale(m[1],m[n-1],l.x+l.width/2,l.y+l.height/2)):4==n?p?(j=q.x(m[2],m[3]),k=q.y(m[2],m[3]),e.scale(m[1],m[1],j,k)):e.scale(m[1],m[1],m[2],m[3]):5==n&&(p?(j=q.x(m[3],m[4]),k=q.y(m[3],m[4]),e.scale(m[1],m[2],j,k)):e.scale(m[1],m[2],m[3],m[4])):"m"==o&&7==n&&e.add(m[1],m[2],m[3],m[4],m[5],m[6])}return e}function o(a){var b=a.node.ownerSVGElement&&w(a.node.ownerSVGElement)||a.node.parentNode&&w(a.node.parentNode)||c.select("svg")||c(0,0),d=b.select("defs"),e=null==d?!1:d.node;return e||(e=u("defs",b.node).node),e}function p(a){return a.node.ownerSVGElement&&w(a.node.ownerSVGElement)||c.select("svg")}function q(a,b,c){function e(a){if(null==a)return I;if(a==+a)return a;d(j,{width:a});try{return j.getBBox().width}catch(b){return 0}}function f(a){if(null==a)return I;if(a==+a)return a;d(j,{height:a});try{return j.getBBox().height}catch(b){return 0}}function g(d,e){null==b?i[d]=e(a.attr(d)||0):d==b&&(i=e(null==c?a.attr(d)||0:c))}var h=p(a).node,i={},j=h.querySelector(".svg---mgr");switch(j||(j=d("rect"),d(j,{x:-9e9,y:-9e9,width:10,height:10,"class":"svg---mgr",fill:"none"}),h.appendChild(j)),a.type){case"rect":g("rx",e),g("ry",f);case"image":g("width",e),g("height",f);case"text":g("x",e),g("y",f);break;case"circle":g("cx",e),g("cy",f),g("r",e);break;case"ellipse":g("cx",e),g("cy",f),g("rx",e),g("ry",f);break;case"line":g("x1",e),g("x2",e),g("y1",f),g("y2",f);break;case"marker":g("refX",e),g("markerWidth",e),g("refY",f),g("markerHeight",f);break;case"radialGradient":g("fx",e),g("fy",f);break;case"tspan":g("dx",e),g("dy",f);break;default:g(b,e)}return h.removeChild(j),i}function r(a){e(a,"array")||(a=Array.prototype.slice.call(arguments,0));for(var b=0,c=0,d=this.node;this[b];)delete this[b++];for(b=0;b<a.length;b++)"set"==a[b].type?a[b].forEach(function(a){d.appendChild(a.node)}):d.appendChild(a[b].node);var f=d.childNodes;for(b=0;b<f.length;b++)this[c++]=w(f[b]);return this}function s(a){if(a.snap in V)return V[a.snap];var b;try{b=a.ownerSVGElement}catch(c){}this.node=a,b&&(this.paper=new v(b)),this.type=a.tagName||a.nodeName;var d=this.id=S(this);if(this.anims={},this._={transform:[]},a.snap=d,V[d]=this,"g"==this.type&&(this.add=r),this.type in{g:1,mask:1,pattern:1,symbol:1})for(var e in v.prototype)v.prototype[z](e)&&(this[e]=v.prototype[e])}function t(a){this.node=a}function u(a,b){var c=d(a);b.appendChild(c);var e=w(c);return e}function v(a,b){var c,e,f,g=v.prototype;if(a&&a.tagName&&"svg"==a.tagName.toLowerCase()){if(a.snap in V)return V[a.snap];var h=a.ownerDocument;c=new s(a),e=a.getElementsByTagName("desc")[0],f=a.getElementsByTagName("defs")[0],e||(e=d("desc"),e.appendChild(h.createTextNode("Created with Snap")),c.node.appendChild(e)),f||(f=d("defs"),c.node.appendChild(f)),c.defs=f;for(var i in g)g[z](i)&&(c[i]=g[i]);c.paper=c.root=c}else c=u("svg",y.doc.body),d(c.node,{height:b,version:1.1,width:a,xmlns:U});return c}function w(a){return a?a instanceof s||a instanceof t?a:a.tagName&&"svg"==a.tagName.toLowerCase()?new v(a):a.tagName&&"object"==a.tagName.toLowerCase()&&"image/svg+xml"==a.type?new v(a.contentDocument.getElementsByTagName("svg")[0]):new s(a):a}function x(a,b){for(var c=0,d=a.length;d>c;c++){var e={type:a[c].type,attr:a[c].attr()},f=a[c].children();b.push(e),f.length&&x(f,e.childNodes=[])}}c.version="0.5.1",c.toString=function(){return"Snap v"+this.version},c._={};var y={win:a.window,doc:a.window.document};c._.glob=y;var z="hasOwnProperty",A=String,B=parseFloat,C=parseInt,D=Math,E=D.max,F=D.min,G=D.abs,H=(D.pow,D.PI),I=(D.round,""),J=Object.prototype.toString,K=/^\s*((#[a-f\d]{6})|(#[a-f\d]{3})|rgba?\(\s*([\d\.]+%?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+%?(?:\s*,\s*[\d\.]+%?)?)\s*\)|hsba?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?%?)\s*\)|hsla?\(\s*([\d\.]+(?:deg|\xb0|%)?\s*,\s*[\d\.]+%?\s*,\s*[\d\.]+(?:%?\s*,\s*[\d\.]+)?%?)\s*\))\s*$/i,L=(c._.separator=/[,\s]+/,/[\s]*,[\s]*/),M={hs:1,rg:1},N=/([a-z])[\s,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\s]*,?[\s]*)+)/gi,O=/([rstm])[\s,]*((-?\d*\.?\d*(?:e[\-+]?\d+)?[\s]*,?[\s]*)+)/gi,P=/(-?\d*\.?\d*(?:e[\-+]?\d+)?)[\s]*,?[\s]*/gi,Q=0,R="S"+(+new Date).toString(36),S=function(a){return(a&&a.type?a.type:I)+R+(Q++).toString(36)},T="http://www.w3.org/1999/xlink",U="http://www.w3.org/2000/svg",V={};c.url=function(a){return"url('#"+a+"')"};c._.$=d,c._.id=S,c.format=function(){var a=/\{([^\}]+)\}/g,b=/(?:(?:^|\.)(.+?)(?=\[|\.|$|\()|\[('|")(.+?)\2\])(\(\))?/g,c=function(a,c,d){var e=d;return c.replace(b,function(a,b,c,d,f){b=b||d,e&&(b in e&&(e=e[b]),"function"==typeof e&&f&&(e=e()))}),e=(null==e||e==d?a:e)+""};return function(b,d){return A(b).replace(a,function(a,b){return c(a,b,d)})}}(),c._.clone=f,c._.cacher=i,c.rad=k,c.deg=l,c.sin=function(a){return D.sin(c.rad(a))},c.tan=function(a){return D.tan(c.rad(a))},c.cos=function(a){return D.cos(c.rad(a))},c.asin=function(a){return c.deg(D.asin(a))},c.acos=function(a){return c.deg(D.acos(a))},c.atan=function(a){return c.deg(D.atan(a))},c.atan2=function(a){return c.deg(D.atan2(a))},c.angle=j,c.len=function(a,b,d,e){return Math.sqrt(c.len2(a,b,d,e))},c.len2=function(a,b,c,d){return(a-c)*(a-c)+(b-d)*(b-d)},c.closestPoint=function(a,b,c){function d(a){var d=a.x-b,e=a.y-c;return d*d+e*e}for(var e,f,g,h,i=a.node,j=i.getTotalLength(),k=j/i.pathSegList.numberOfItems*.125,l=1/0,m=0;j>=m;m+=k)(h=d(g=i.getPointAtLength(m)))<l&&(e=g,f=m,l=h);for(k*=.5;k>.5;){var n,o,p,q,r,s;(p=f-k)>=0&&(r=d(n=i.getPointAtLength(p)))<l?(e=n,f=p,l=r):(q=f+k)<=j&&(s=d(o=i.getPointAtLength(q)))<l?(e=o,f=q,l=s):k*=.5}return e={x:e.x,y:e.y,length:f,distance:Math.sqrt(l)}},c.is=e,c.snapTo=function(a,b,c){if(c=e(c,"finite")?c:10,e(a,"array")){for(var d=a.length;d--;)if(G(a[d]-b)<=c)return a[d]}else{a=+a;var f=b%a;if(c>f)return b-f;if(f>a-c)return b-f+a}return b},c.getRGB=i(function(a){if(!a||(a=A(a)).indexOf("-")+1)return{r:-1,g:-1,b:-1,hex:"none",error:1,toString:Z};if("none"==a)return{r:-1,g:-1,b:-1,hex:"none",toString:Z};if(!(M[z](a.toLowerCase().substring(0,2))||"#"==a.charAt())&&(a=W(a)),!a)return{r:-1,g:-1,b:-1,hex:"none",error:1,toString:Z};var b,d,f,g,h,i,j=a.match(K);return j?(j[2]&&(f=C(j[2].substring(5),16),d=C(j[2].substring(3,5),16),b=C(j[2].substring(1,3),16)),j[3]&&(f=C((h=j[3].charAt(3))+h,16),d=C((h=j[3].charAt(2))+h,16),b=C((h=j[3].charAt(1))+h,16)),j[4]&&(i=j[4].split(L),b=B(i[0]),"%"==i[0].slice(-1)&&(b*=2.55),d=B(i[1]),"%"==i[1].slice(-1)&&(d*=2.55),f=B(i[2]),"%"==i[2].slice(-1)&&(f*=2.55),"rgba"==j[1].toLowerCase().slice(0,4)&&(g=B(i[3])),i[3]&&"%"==i[3].slice(-1)&&(g/=100)),j[5]?(i=j[5].split(L),b=B(i[0]),"%"==i[0].slice(-1)&&(b/=100),d=B(i[1]),"%"==i[1].slice(-1)&&(d/=100),f=B(i[2]),"%"==i[2].slice(-1)&&(f/=100),("deg"==i[0].slice(-3)||"°"==i[0].slice(-1))&&(b/=360),"hsba"==j[1].toLowerCase().slice(0,4)&&(g=B(i[3])),i[3]&&"%"==i[3].slice(-1)&&(g/=100),c.hsb2rgb(b,d,f,g)):j[6]?(i=j[6].split(L),b=B(i[0]),"%"==i[0].slice(-1)&&(b/=100),d=B(i[1]),"%"==i[1].slice(-1)&&(d/=100),f=B(i[2]),"%"==i[2].slice(-1)&&(f/=100),("deg"==i[0].slice(-3)||"°"==i[0].slice(-1))&&(b/=360),"hsla"==j[1].toLowerCase().slice(0,4)&&(g=B(i[3])),i[3]&&"%"==i[3].slice(-1)&&(g/=100),c.hsl2rgb(b,d,f,g)):(b=F(D.round(b),255),d=F(D.round(d),255),f=F(D.round(f),255),g=F(E(g,0),1),j={r:b,g:d,b:f,toString:Z},j.hex="#"+(16777216|f|d<<8|b<<16).toString(16).slice(1),j.opacity=e(g,"finite")?g:1,j)):{r:-1,g:-1,b:-1,hex:"none",error:1,toString:Z}},c),c.hsb=i(function(a,b,d){return c.hsb2rgb(a,b,d).hex}),c.hsl=i(function(a,b,d){return c.hsl2rgb(a,b,d).hex}),c.rgb=i(function(a,b,c,d){if(e(d,"finite")){var f=D.round;return"rgba("+[f(a),f(b),f(c),+d.toFixed(2)]+")"}return"#"+(16777216|c|b<<8|a<<16).toString(16).slice(1)});var W=function(a){var b=y.doc.getElementsByTagName("head")[0]||y.doc.getElementsByTagName("svg")[0],c="rgb(255, 0, 0)";return(W=i(function(a){if("red"==a.toLowerCase())return c;b.style.color=c,b.style.color=a;var d=y.doc.defaultView.getComputedStyle(b,I).getPropertyValue("color");return d==c?null:d}))(a)},X=function(){return"hsb("+[this.h,this.s,this.b]+")"},Y=function(){return"hsl("+[this.h,this.s,this.l]+")"},Z=function(){return 1==this.opacity||null==this.opacity?this.hex:"rgba("+[this.r,this.g,this.b,this.opacity]+")"},$=function(a,b,d){if(null==b&&e(a,"object")&&"r"in a&&"g"in a&&"b"in a&&(d=a.b,b=a.g,a=a.r),null==b&&e(a,string)){var f=c.getRGB(a);a=f.r,b=f.g,d=f.b}return(a>1||b>1||d>1)&&(a/=255,b/=255,d/=255),[a,b,d]},_=function(a,b,d,f){a=D.round(255*a),b=D.round(255*b),d=D.round(255*d);var g={r:a,g:b,b:d,opacity:e(f,"finite")?f:1,hex:c.rgb(a,b,d),toString:Z};return e(f,"finite")&&(g.opacity=f),g};c.color=function(a){var b;return e(a,"object")&&"h"in a&&"s"in a&&"b"in a?(b=c.hsb2rgb(a),a.r=b.r,a.g=b.g,a.b=b.b,a.opacity=1,a.hex=b.hex):e(a,"object")&&"h"in a&&"s"in a&&"l"in a?(b=c.hsl2rgb(a),a.r=b.r,a.g=b.g,a.b=b.b,a.opacity=1,a.hex=b.hex):(e(a,"string")&&(a=c.getRGB(a)),e(a,"object")&&"r"in a&&"g"in a&&"b"in a&&!("error"in a)?(b=c.rgb2hsl(a),a.h=b.h,a.s=b.s,a.l=b.l,b=c.rgb2hsb(a),a.v=b.b):(a={hex:"none"},a.r=a.g=a.b=a.h=a.s=a.v=a.l=-1,a.error=1)),a.toString=Z,a},c.hsb2rgb=function(a,b,c,d){e(a,"object")&&"h"in a&&"s"in a&&"b"in a&&(c=a.b,b=a.s,d=a.o,a=a.h),a*=360;var f,g,h,i,j;return a=a%360/60,j=c*b,i=j*(1-G(a%2-1)),f=g=h=c-j,a=~~a,f+=[j,i,0,0,i,j][a],g+=[i,j,j,i,0,0][a],h+=[0,0,i,j,j,i][a],_(f,g,h,d)},c.hsl2rgb=function(a,b,c,d){e(a,"object")&&"h"in a&&"s"in a&&"l"in a&&(c=a.l,b=a.s,a=a.h),(a>1||b>1||c>1)&&(a/=360,b/=100,c/=100),a*=360;var f,g,h,i,j;return a=a%360/60,j=2*b*(.5>c?c:1-c),i=j*(1-G(a%2-1)),f=g=h=c-j/2,a=~~a,f+=[j,i,0,0,i,j][a],g+=[i,j,j,i,0,0][a],h+=[0,0,i,j,j,i][a],_(f,g,h,d)},c.rgb2hsb=function(a,b,c){c=$(a,b,c),a=c[0],b=c[1],c=c[2];var d,e,f,g;return f=E(a,b,c),g=f-F(a,b,c),d=0==g?null:f==a?(b-c)/g:f==b?(c-a)/g+2:(a-b)/g+4,d=(d+360)%6*60/360,e=0==g?0:g/f,{h:d,s:e,b:f,toString:X}},c.rgb2hsl=function(a,b,c){c=$(a,b,c),a=c[0],b=c[1],c=c[2];var d,e,f,g,h,i;return g=E(a,b,c),h=F(a,b,c),i=g-h,d=0==i?null:g==a?(b-c)/i:g==b?(c-a)/i+2:(a-b)/i+4,d=(d+360)%6*60/360,f=(g+h)/2,e=0==i?0:.5>f?i/(2*f):i/(2-2*f),{h:d,s:e,l:f,toString:Y}},c.parsePathString=function(a){if(!a)return null;var b=c.path(a);if(b.arr)return c.path.clone(b.arr);var d={a:7,c:6,o:2,h:1,l:2,m:2,r:4,q:4,s:4,t:2,v:1,u:3,z:0},f=[];return e(a,"array")&&e(a[0],"array")&&(f=c.path.clone(a)),f.length||A(a).replace(N,function(a,b,c){var e=[],g=b.toLowerCase();if(c.replace(P,function(a,b){b&&e.push(+b)}),"m"==g&&e.length>2&&(f.push([b].concat(e.splice(0,2))),g="l",b="m"==b?"l":"L"),"o"==g&&1==e.length&&f.push([b,e[0]]),"r"==g)f.push([b].concat(e));else for(;e.length>=d[g]&&(f.push([b].concat(e.splice(0,d[g]))),d[g]););}),f.toString=c.path.toString,b.arr=c.path.clone(f),f};var aa=c.parseTransformString=function(a){if(!a)return null;var b=[];return e(a,"array")&&e(a[0],"array")&&(b=c.path.clone(a)),b.length||A(a).replace(O,function(a,c,d){var e=[];c.toLowerCase();d.replace(P,function(a,b){b&&e.push(+b)}),b.push([c].concat(e))}),b.toString=c.path.toString,b};c._.svgTransform2string=m,c._.rgTransform=/^[a-z][\s]*-?\.?\d/i,c._.transform2matrix=n,c._unit2px=q;y.doc.contains||y.doc.compareDocumentPosition?function(a,b){var c=9==a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a==d||!(!d||1!=d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)for(;b;)if(b=b.parentNode,b==a)return!0;return!1};c._.getSomeDefs=o,c._.getSomeSVG=p,c.select=function(a){return a=A(a).replace(/([^\\]):/g,"$1\\:"),w(y.doc.querySelector(a))},c.selectAll=function(a){for(var b=y.doc.querySelectorAll(a),d=(c.set||Array)(),e=0;e<b.length;e++)d.push(w(b[e]));return d},setInterval(function(){for(var a in V)if(V[z](a)){var b=V[a],c=b.node;("svg"!=b.type&&!c.ownerSVGElement||"svg"==b.type&&(!c.parentNode||"ownerSVGElement"in c.parentNode&&!c.ownerSVGElement))&&delete V[a]}},1e4),s.prototype.attr=function(a,c){var d=this,f=d.node;if(!a){if(1!=f.nodeType)return{text:f.nodeValue};for(var g=f.attributes,h={},i=0,j=g.length;j>i;i++)h[g[i].nodeName]=g[i].nodeValue;return h}if(e(a,"string")){if(!(arguments.length>1))return b("snap.util.getattr."+a,d).firstDefined();var k={};k[a]=c,a=k}for(var l in a)a[z](l)&&b("snap.util.attr."+l,d,a[l]);return d},c.parse=function(a){var b=y.doc.createDocumentFragment(),c=!0,d=y.doc.createElement("div");if(a=A(a),a.match(/^\s*<\s*svg(?:\s|>)/)||(a="<svg>"+a+"</svg>",c=!1),d.innerHTML=a,a=d.getElementsByTagName("svg")[0])if(c)b=a;else for(;a.firstChild;)b.appendChild(a.firstChild);return new t(b)},c.fragment=function(){for(var a=Array.prototype.slice.call(arguments,0),b=y.doc.createDocumentFragment(),d=0,e=a.length;e>d;d++){var f=a[d];f.node&&f.node.nodeType&&b.appendChild(f.node),f.nodeType&&b.appendChild(f),"string"==typeof f&&b.appendChild(c.parse(f).node)}return new t(b)},c._.make=u,c._.wrap=w,v.prototype.el=function(a,b){var c=u(a,this.node);return b&&c.attr(b),c},s.prototype.children=function(){for(var a=[],b=this.node.childNodes,d=0,e=b.length;e>d;d++)a[d]=c(b[d]);return a},s.prototype.toJSON=function(){var a=[];return x([this],a),a[0]},b.on("snap.util.getattr",function(){var a=b.nt();a=a.substring(a.lastIndexOf(".")+1);var c=a.replace(/[A-Z]/g,function(a){return"-"+a.toLowerCase()});return ba[z](c)?this.node.ownerDocument.defaultView.getComputedStyle(this.node,null).getPropertyValue(c):d(this.node,a)});var ba={"alignment-baseline":0,"baseline-shift":0,clip:0,"clip-path":0,"clip-rule":0,color:0,"color-interpolation":0,"color-interpolation-filters":0,"color-profile":0,"color-rendering":0,cursor:0,direction:0,display:0,"dominant-baseline":0,"enable-background":0,fill:0,"fill-opacity":0,"fill-rule":0,filter:0,"flood-color":0,"flood-opacity":0,font:0,"font-family":0,"font-size":0,"font-size-adjust":0,"font-stretch":0,"font-style":0,"font-variant":0,"font-weight":0,"glyph-orientation-horizontal":0,"glyph-orientation-vertical":0,"image-rendering":0,kerning:0,"letter-spacing":0,"lighting-color":0,marker:0,"marker-end":0,"marker-mid":0,"marker-start":0,mask:0,opacity:0,overflow:0,"pointer-events":0,"shape-rendering":0,"stop-color":0,"stop-opacity":0,stroke:0,"stroke-dasharray":0,"stroke-dashoffset":0,"stroke-linecap":0,"stroke-linejoin":0,"stroke-miterlimit":0,"stroke-opacity":0,"stroke-width":0,"text-anchor":0,"text-decoration":0,"text-rendering":0,"unicode-bidi":0,visibility:0,"word-spacing":0,"writing-mode":0};b.on("snap.util.attr",function(a){var c=b.nt(),e={};c=c.substring(c.lastIndexOf(".")+1),e[c]=a;var f=c.replace(/-(\w)/gi,function(a,b){return b.toUpperCase()}),g=c.replace(/[A-Z]/g,function(a){return"-"+a.toLowerCase()});ba[z](g)?this.node.style[f]=null==a?I:a:d(this.node,e)}),function(a){}(v.prototype),c.ajax=function(a,c,d,f){var g=new XMLHttpRequest,h=S();if(g){if(e(c,"function"))f=d,d=c,c=null;else if(e(c,"object")){var i=[];for(var j in c)c.hasOwnProperty(j)&&i.push(encodeURIComponent(j)+"="+encodeURIComponent(c[j]));c=i.join("&")}return g.open(c?"POST":"GET",a,!0),c&&(g.setRequestHeader("X-Requested-With","XMLHttpRequest"),g.setRequestHeader("Content-type","application/x-www-form-urlencoded")),d&&(b.once("snap.ajax."+h+".0",d),b.once("snap.ajax."+h+".200",d),b.once("snap.ajax."+h+".304",d)),g.onreadystatechange=function(){4==g.readyState&&b("snap.ajax."+h+"."+g.status,f,g)},4==g.readyState?g:(g.send(c),g)}},c.load=function(a,b,d){c.ajax(a,function(a){var e=c.parse(a.responseText);d?b.call(d,e):b(e)})};var ca=function(a){var b=a.getBoundingClientRect(),c=a.ownerDocument,d=c.body,e=c.documentElement,f=e.clientTop||d.clientTop||0,h=e.clientLeft||d.clientLeft||0,i=b.top+(g.win.pageYOffset||e.scrollTop||d.scrollTop)-f,j=b.left+(g.win.pageXOffset||e.scrollLeft||d.scrollLeft)-h;return{y:i,x:j}};return c.getElementByPoint=function(a,b){var c=this,d=(c.canvas,y.doc.elementFromPoint(a,b));if(y.win.opera&&"svg"==d.tagName){var e=ca(d),f=d.createSVGRect();f.x=a-e.x,f.y=b-e.y,f.width=f.height=1;var g=d.getIntersectionList(f,null);g.length&&(d=g[g.length-1])}return d?w(d):null},c.plugin=function(a){a(c,s,v,y,t)},y.win.Snap=c,c}(a||this);return d.plugin(function(c,d,e,f,g){function h(a,b){if(null==b){var d=!0;if(b="linearGradient"==a.type||"radialGradient"==a.type?a.node.getAttribute("gradientTransform"):"pattern"==a.type?a.node.getAttribute("patternTransform"):a.node.getAttribute("transform"),!b)return new c.Matrix;b=c._.svgTransform2string(b)}else b=c._.rgTransform.test(b)?m(b).replace(/\.{3}|\u2026/g,a._.transform||""):c._.svgTransform2string(b),l(b,"array")&&(b=c.path?c.path.toString.call(b):m(b)),a._.transform=b;var e=c._.transform2matrix(b,a.getBBox(1));return d?e:void(a.matrix=e)}function i(a){function b(a,b){var d=o(a.node,b);d=d&&d.match(g),d=d&&d[2],d&&"#"==d.charAt()&&(d=d.substring(1),d&&(i[d]=(i[d]||[]).concat(function(d){var e={};e[b]=c.url(d),o(a.node,e)})))}function d(a){var b=o(a.node,"xlink:href");b&&"#"==b.charAt()&&(b=b.substring(1),b&&(i[b]=(i[b]||[]).concat(function(b){a.attr("xlink:href","#"+b)})))}for(var e,f=a.selectAll("*"),g=/^\s*url\(("|'|)(.*)\1\)\s*$/,h=[],i={},j=0,k=f.length;k>j;j++){e=f[j],b(e,"fill"),b(e,"stroke"),b(e,"filter"),b(e,"mask"),b(e,"clip-path"),d(e);var l=o(e.node,"id");l&&(o(e.node,{id:e.id}),h.push({old:l,id:e.id}))}for(j=0,k=h.length;k>j;j++){var m=i[h[j].old];if(m)for(var n=0,p=m.length;p>n;n++)m[n](h[j].id)}}function j(a){return function(){var b=a?"<"+this.type:"",c=this.node.attributes,d=this.node.childNodes;if(a)for(var e=0,f=c.length;f>e;e++)b+=" "+c[e].name+'="'+c[e].value.replace(/"/g,'\\"')+'"';if(d.length){for(a&&(b+=">"),e=0,f=d.length;f>e;e++)3==d[e].nodeType?b+=d[e].nodeValue:1==d[e].nodeType&&(b+=s(d[e]).toString());a&&(b+="</"+this.type+">")}else a&&(b+="/>");return b}}var k=d.prototype,l=c.is,m=String,n=c._unit2px,o=c._.$,p=c._.make,q=c._.getSomeDefs,r="hasOwnProperty",s=c._.wrap;k.getBBox=function(a){if("tspan"==this.type)return c._.box(this.node.getClientRects().item(0));if(!c.Matrix||!c.path)return this.node.getBBox();var b=this,d=new c.Matrix;if(b.removed)return c._.box();for(;"use"==b.type;)if(a||(d=d.add(b.transform().localMatrix.translate(b.attr("x")||0,b.attr("y")||0))),b.original)b=b.original;else{var e=b.attr("xlink:href");b=b.original=b.node.ownerDocument.getElementById(e.substring(e.indexOf("#")+1))}var f=b._,g=c.path.get[b.type]||c.path.get.deflt;try{return a?(f.bboxwt=g?c.path.getBBox(b.realPath=g(b)):c._.box(b.node.getBBox()),c._.box(f.bboxwt)):(b.realPath=g(b),b.matrix=b.transform().localMatrix,f.bbox=c.path.getBBox(c.path.map(b.realPath,d.add(b.matrix))),c._.box(f.bbox))}catch(h){return c._.box()}};var t=function(){return this.string};k.transform=function(a){var b=this._;if(null==a){for(var d,e=this,f=new c.Matrix(this.node.getCTM()),g=h(this),i=[g],j=new c.Matrix,k=g.toTransformString(),l=m(g)==m(this.matrix)?m(b.transform):k;"svg"!=e.type&&(e=e.parent());)i.push(h(e));for(d=i.length;d--;)j.add(i[d]);return{string:l,globalMatrix:f,totalMatrix:j,localMatrix:g,diffMatrix:f.clone().add(g.invert()),global:f.toTransformString(),total:j.toTransformString(),local:k,toString:t}}return a instanceof c.Matrix?(this.matrix=a,this._.transform=a.toTransformString()):h(this,a),this.node&&("linearGradient"==this.type||"radialGradient"==this.type?o(this.node,{gradientTransform:this.matrix}):"pattern"==this.type?o(this.node,{patternTransform:this.matrix}):o(this.node,{transform:this.matrix})),this},k.parent=function(){return s(this.node.parentNode)},k.append=k.add=function(a){if(a){if("set"==a.type){var b=this;return a.forEach(function(a){b.add(a)}),this}a=s(a),this.node.appendChild(a.node),a.paper=this.paper}return this},k.appendTo=function(a){return a&&(a=s(a),a.append(this)),this},k.prepend=function(a){if(a){if("set"==a.type){var b,c=this;return a.forEach(function(a){b?b.after(a):c.prepend(a),b=a}),this}a=s(a);var d=a.parent();this.node.insertBefore(a.node,this.node.firstChild),this.add&&this.add(),a.paper=this.paper,this.parent()&&this.parent().add(),d&&d.add()}return this},k.prependTo=function(a){return a=s(a),a.prepend(this),this},k.before=function(a){if("set"==a.type){var b=this;return a.forEach(function(a){var c=a.parent();b.node.parentNode.insertBefore(a.node,b.node),c&&c.add()}),this.parent().add(),this}a=s(a);var c=a.parent();return this.node.parentNode.insertBefore(a.node,this.node),this.parent()&&this.parent().add(),c&&c.add(),a.paper=this.paper,this},k.after=function(a){a=s(a);var b=a.parent();return this.node.nextSibling?this.node.parentNode.insertBefore(a.node,this.node.nextSibling):this.node.parentNode.appendChild(a.node),this.parent()&&this.parent().add(),b&&b.add(),a.paper=this.paper,this},k.insertBefore=function(a){a=s(a);var b=this.parent();return a.node.parentNode.insertBefore(this.node,a.node),this.paper=a.paper,b&&b.add(),a.parent()&&a.parent().add(),this},k.insertAfter=function(a){a=s(a);var b=this.parent();return a.node.parentNode.insertBefore(this.node,a.node.nextSibling),this.paper=a.paper,b&&b.add(),a.parent()&&a.parent().add(),this},k.remove=function(){var a=this.parent();return this.node.parentNode&&this.node.parentNode.removeChild(this.node),delete this.paper,this.removed=!0,a&&a.add(),this},k.select=function(a){return s(this.node.querySelector(a))},k.selectAll=function(a){for(var b=this.node.querySelectorAll(a),d=(c.set||Array)(),e=0;e<b.length;e++)d.push(s(b[e]));return d},k.asPX=function(a,b){return null==b&&(b=this.attr(a)),+n(this,a,b)},k.use=function(){var a,b=this.node.id;return b||(b=this.id,o(this.node,{id:b})),a="linearGradient"==this.type||"radialGradient"==this.type||"pattern"==this.type?p(this.type,this.node.parentNode):p("use",this.node.parentNode),o(a.node,{"xlink:href":"#"+b}),a.original=this,a},k.clone=function(){var a=s(this.node.cloneNode(!0));return o(a.node,"id")&&o(a.node,{id:a.id}),i(a),a.insertAfter(this),a},k.toDefs=function(){var a=q(this);return a.appendChild(this.node),this},k.pattern=k.toPattern=function(a,b,c,d){var e=p("pattern",q(this));return null==a&&(a=this.getBBox()),l(a,"object")&&"x"in a&&(b=a.y,c=a.width,d=a.height,a=a.x),o(e.node,{x:a,y:b,width:c,height:d,patternUnits:"userSpaceOnUse",id:e.id,viewBox:[a,b,c,d].join(" ")}),e.node.appendChild(this.node),e},k.marker=function(a,b,c,d,e,f){var g=p("marker",q(this));return null==a&&(a=this.getBBox()),l(a,"object")&&"x"in a&&(b=a.y,c=a.width,d=a.height,e=a.refX||a.cx,f=a.refY||a.cy,a=a.x),o(g.node,{viewBox:[a,b,c,d].join(" "),markerWidth:c,markerHeight:d,orient:"auto",refX:e||0,refY:f||0,id:g.id}),g.node.appendChild(this.node),g};var u={};k.data=function(a,d){var e=u[this.id]=u[this.id]||{};if(0==arguments.length)return b("snap.data.get."+this.id,this,e,null),e;if(1==arguments.length){if(c.is(a,"object")){for(var f in a)a[r](f)&&this.data(f,a[f]);return this}return b("snap.data.get."+this.id,this,e[a],a),e[a]}return e[a]=d,b("snap.data.set."+this.id,this,d,a),this},k.removeData=function(a){return null==a?u[this.id]={}:u[this.id]&&delete u[this.id][a],this},k.outerSVG=k.toString=j(1),k.innerSVG=j(),k.toDataURL=function(){if(a&&a.btoa){var b=this.getBBox(),d=c.format('<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{width}" height="{height}" viewBox="{x} {y} {width} {height}">{contents}</svg>',{x:+b.x.toFixed(3),y:+b.y.toFixed(3),width:+b.width.toFixed(3),height:+b.height.toFixed(3),
+contents:this.outerSVG()});return"data:image/svg+xml;base64,"+btoa(unescape(encodeURIComponent(d)))}},g.prototype.select=k.select,g.prototype.selectAll=k.selectAll}),d.plugin(function(a,d,e,f,g){function h(a,b,c){return function(d){var e=d.slice(a,b);return 1==e.length&&(e=e[0]),c?c(e):e}}var i=d.prototype,j=a.is,k=String,l="hasOwnProperty",m=function(a,b,d,e){"function"!=typeof d||d.length||(e=d,d=c.linear),this.attr=a,this.dur=b,d&&(this.easing=d),e&&(this.callback=e)};a._.Animation=m,a.animation=function(a,b,c,d){return new m(a,b,c,d)},i.inAnim=function(){var a=this,b=[];for(var c in a.anims)a.anims[l](c)&&!function(a){b.push({anim:new m(a._attrs,a.dur,a.easing,a._callback),mina:a,curStatus:a.status(),status:function(b){return a.status(b)},stop:function(){a.stop()}})}(a.anims[c]);return b},a.animate=function(a,d,e,f,g,h){"function"!=typeof g||g.length||(h=g,g=c.linear);var i=c.time(),j=c(a,d,i,i+f,c.time,e,g);return h&&b.once("mina.finish."+j.id,h),j},i.stop=function(){for(var a=this.inAnim(),b=0,c=a.length;c>b;b++)a[b].stop();return this},i.animate=function(a,d,e,f){"function"!=typeof e||e.length||(f=e,e=c.linear),a instanceof m&&(f=a.callback,e=a.easing,d=a.dur,a=a.attr);var g,i,n,o,p=[],q=[],r={},s=this;for(var t in a)if(a[l](t)){s.equal?(o=s.equal(t,k(a[t])),g=o.from,i=o.to,n=o.f):(g=+s.attr(t),i=+a[t]);var u=j(g,"array")?g.length:1;r[t]=h(p.length,p.length+u,n),p=p.concat(g),q=q.concat(i)}var v=c.time(),w=c(p,q,v,v+d,c.time,function(a){var b={};for(var c in r)r[l](c)&&(b[c]=r[c](a));s.attr(b)},e);return s.anims[w.id]=w,w._attrs=a,w._callback=f,b("snap.animcreated."+s.id,w),b.once("mina.finish."+w.id,function(){b.off("mina.*."+w.id),delete s.anims[w.id],f&&f.call(s)}),b.once("mina.stop."+w.id,function(){b.off("mina.*."+w.id),delete s.anims[w.id]}),s}}),d.plugin(function(a,b,c,d,e){function f(a,b,c,d,e,f){return null==b&&"[object SVGMatrix]"==g.call(a)?(this.a=a.a,this.b=a.b,this.c=a.c,this.d=a.d,this.e=a.e,void(this.f=a.f)):void(null!=a?(this.a=+a,this.b=+b,this.c=+c,this.d=+d,this.e=+e,this.f=+f):(this.a=1,this.b=0,this.c=0,this.d=1,this.e=0,this.f=0))}var g=Object.prototype.toString,h=String,i=Math,j="";!function(b){function c(a){return a[0]*a[0]+a[1]*a[1]}function d(a){var b=i.sqrt(c(a));a[0]&&(a[0]/=b),a[1]&&(a[1]/=b)}b.add=function(a,b,c,d,e,g){if(a&&a instanceof f)return this.add(a.a,a.b,a.c,a.d,a.e,a.f);var h=a*this.a+b*this.c,i=a*this.b+b*this.d;return this.e+=e*this.a+g*this.c,this.f+=e*this.b+g*this.d,this.c=c*this.a+d*this.c,this.d=c*this.b+d*this.d,this.a=h,this.b=i,this},f.prototype.multLeft=function(a,b,c,d,e,g){if(a&&a instanceof f)return this.multLeft(a.a,a.b,a.c,a.d,a.e,a.f);var h=a*this.a+c*this.b,i=a*this.c+c*this.d,j=a*this.e+c*this.f+e;return this.b=b*this.a+d*this.b,this.d=b*this.c+d*this.d,this.f=b*this.e+d*this.f+g,this.a=h,this.c=i,this.e=j,this},b.invert=function(){var a=this,b=a.a*a.d-a.b*a.c;return new f(a.d/b,-a.b/b,-a.c/b,a.a/b,(a.c*a.f-a.d*a.e)/b,(a.b*a.e-a.a*a.f)/b)},b.clone=function(){return new f(this.a,this.b,this.c,this.d,this.e,this.f)},b.translate=function(a,b){return this.e+=a*this.a+b*this.c,this.f+=a*this.b+b*this.d,this},b.scale=function(a,b,c,d){return null==b&&(b=a),(c||d)&&this.translate(c,d),this.a*=a,this.b*=a,this.c*=b,this.d*=b,(c||d)&&this.translate(-c,-d),this},b.rotate=function(b,c,d){b=a.rad(b),c=c||0,d=d||0;var e=+i.cos(b).toFixed(9),f=+i.sin(b).toFixed(9);return this.add(e,f,-f,e,c,d),this.add(1,0,0,1,-c,-d)},b.skewX=function(a){return this.skew(a,0)},b.skewY=function(a){return this.skew(0,a)},b.skew=function(b,c){b=b||0,c=c||0,b=a.rad(b),c=a.rad(c);var d=i.tan(b).toFixed(9),e=i.tan(c).toFixed(9);return this.add(1,e,d,1,0,0)},b.x=function(a,b){return a*this.a+b*this.c+this.e},b.y=function(a,b){return a*this.b+b*this.d+this.f},b.get=function(a){return+this[h.fromCharCode(97+a)].toFixed(4)},b.toString=function(){return"matrix("+[this.get(0),this.get(1),this.get(2),this.get(3),this.get(4),this.get(5)].join()+")"},b.offset=function(){return[this.e.toFixed(4),this.f.toFixed(4)]},b.determinant=function(){return this.a*this.d-this.b*this.c},b.split=function(){var b={};b.dx=this.e,b.dy=this.f;var e=[[this.a,this.b],[this.c,this.d]];b.scalex=i.sqrt(c(e[0])),d(e[0]),b.shear=e[0][0]*e[1][0]+e[0][1]*e[1][1],e[1]=[e[1][0]-e[0][0]*b.shear,e[1][1]-e[0][1]*b.shear],b.scaley=i.sqrt(c(e[1])),d(e[1]),b.shear/=b.scaley,this.determinant()<0&&(b.scalex=-b.scalex);var f=e[0][1],g=e[1][1];return 0>g?(b.rotate=a.deg(i.acos(g)),0>f&&(b.rotate=360-b.rotate)):b.rotate=a.deg(i.asin(f)),b.isSimple=!(+b.shear.toFixed(9)||b.scalex.toFixed(9)!=b.scaley.toFixed(9)&&b.rotate),b.isSuperSimple=!+b.shear.toFixed(9)&&b.scalex.toFixed(9)==b.scaley.toFixed(9)&&!b.rotate,b.noRotation=!+b.shear.toFixed(9)&&!b.rotate,b},b.toTransformString=function(a){var b=a||this.split();return+b.shear.toFixed(9)?"m"+[this.get(0),this.get(1),this.get(2),this.get(3),this.get(4),this.get(5)]:(b.scalex=+b.scalex.toFixed(4),b.scaley=+b.scaley.toFixed(4),b.rotate=+b.rotate.toFixed(4),(b.dx||b.dy?"t"+[+b.dx.toFixed(4),+b.dy.toFixed(4)]:j)+(b.rotate?"r"+[+b.rotate.toFixed(4),0,0]:j)+(1!=b.scalex||1!=b.scaley?"s"+[b.scalex,b.scaley,0,0]:j))}}(f.prototype),a.Matrix=f,a.matrix=function(a,b,c,d,e,g){return new f(a,b,c,d,e,g)}}),d.plugin(function(a,c,d,e,f){function g(d){return function(e){if(b.stop(),e instanceof f&&1==e.node.childNodes.length&&("radialGradient"==e.node.firstChild.tagName||"linearGradient"==e.node.firstChild.tagName||"pattern"==e.node.firstChild.tagName)&&(e=e.node.firstChild,n(this).appendChild(e),e=l(e)),e instanceof c)if("radialGradient"==e.type||"linearGradient"==e.type||"pattern"==e.type){e.node.id||p(e.node,{id:e.id});var g=q(e.node.id)}else g=e.attr(d);else if(g=a.color(e),g.error){var h=a(n(this).ownerSVGElement).gradient(e);h?(h.node.id||p(h.node,{id:h.id}),g=q(h.node.id)):g=e}else g=r(g);var i={};i[d]=g,p(this.node,i),this.node.style[d]=t}}function h(a){b.stop(),a==+a&&(a+="px"),this.node.style.fontSize=a}function i(a){for(var b=[],c=a.childNodes,d=0,e=c.length;e>d;d++){var f=c[d];3==f.nodeType&&b.push(f.nodeValue),"tspan"==f.tagName&&(1==f.childNodes.length&&3==f.firstChild.nodeType?b.push(f.firstChild.nodeValue):b.push(i(f)))}return b}function j(){return b.stop(),this.node.style.fontSize}var k=a._.make,l=a._.wrap,m=a.is,n=a._.getSomeDefs,o=/^url\((['"]?)([^)]+)\1\)$/,p=a._.$,q=a.url,r=String,s=a._.separator,t="";a.deurl=function(a){var b=String(a).match(o);return b?b[2]:a},b.on("snap.util.attr.mask",function(a){if(a instanceof c||a instanceof f){if(b.stop(),a instanceof f&&1==a.node.childNodes.length&&(a=a.node.firstChild,n(this).appendChild(a),a=l(a)),"mask"==a.type)var d=a;else d=k("mask",n(this)),d.node.appendChild(a.node);!d.node.id&&p(d.node,{id:d.id}),p(this.node,{mask:q(d.id)})}}),function(a){b.on("snap.util.attr.clip",a),b.on("snap.util.attr.clip-path",a),b.on("snap.util.attr.clipPath",a)}(function(a){if(a instanceof c||a instanceof f){b.stop();for(var d,e=a.node;e;){if("clipPath"===e.nodeName){d=new c(e);break}if("svg"===e.nodeName){d=void 0;break}e=e.parentNode}d||(d=k("clipPath",n(this)),d.node.appendChild(a.node),!d.node.id&&p(d.node,{id:d.id})),p(this.node,{"clip-path":q(d.node.id||d.id)})}}),b.on("snap.util.attr.fill",g("fill")),b.on("snap.util.attr.stroke",g("stroke"));var u=/^([lr])(?:\(([^)]*)\))?(.*)$/i;b.on("snap.util.grad.parse",function(a){function b(a,b){for(var c=(b-h)/(a-i),d=i;a>d;d++)f[d].offset=+(+h+c*(d-i)).toFixed(2);i=a,h=b}a=r(a);var c=a.match(u);if(!c)return null;var d=c[1],e=c[2],f=c[3];e=e.split(/\s*,\s*/).map(function(a){return+a==a?+a:a}),1==e.length&&0==e[0]&&(e=[]),f=f.split("-"),f=f.map(function(a){a=a.split(":");var b={color:a[0]};return a[1]&&(b.offset=parseFloat(a[1])),b});var g=f.length,h=0,i=0;g--;for(var j=0;g>j;j++)"offset"in f[j]&&b(j,f[j].offset);return f[g].offset=f[g].offset||100,b(g,f[g].offset),{type:d,params:e,stops:f}}),b.on("snap.util.attr.d",function(c){b.stop(),m(c,"array")&&m(c[0],"array")&&(c=a.path.toString.call(c)),c=r(c),c.match(/[ruo]/i)&&(c=a.path.toAbsolute(c)),p(this.node,{d:c})})(-1),b.on("snap.util.attr.#text",function(a){b.stop(),a=r(a);for(var c=e.doc.createTextNode(a);this.node.firstChild;)this.node.removeChild(this.node.firstChild);this.node.appendChild(c)})(-1),b.on("snap.util.attr.path",function(a){b.stop(),this.attr({d:a})})(-1),b.on("snap.util.attr.class",function(a){b.stop(),this.node.className.baseVal=a})(-1),b.on("snap.util.attr.viewBox",function(a){var c;c=m(a,"object")&&"x"in a?[a.x,a.y,a.width,a.height].join(" "):m(a,"array")?a.join(" "):a,p(this.node,{viewBox:c}),b.stop()})(-1),b.on("snap.util.attr.transform",function(a){this.transform(a),b.stop()})(-1),b.on("snap.util.attr.r",function(a){"rect"==this.type&&(b.stop(),p(this.node,{rx:a,ry:a}))})(-1),b.on("snap.util.attr.textpath",function(a){if(b.stop(),"text"==this.type){var d,e,f;if(!a&&this.textPath){for(e=this.textPath;e.node.firstChild;)this.node.appendChild(e.node.firstChild);return e.remove(),void delete this.textPath}if(m(a,"string")){var g=n(this),h=l(g.parentNode).path(a);g.appendChild(h.node),d=h.id,h.attr({id:d})}else a=l(a),a instanceof c&&(d=a.attr("id"),d||(d=a.id,a.attr({id:d})));if(d)if(e=this.textPath,f=this.node,e)e.attr({"xlink:href":"#"+d});else{for(e=p("textPath",{"xlink:href":"#"+d});f.firstChild;)e.appendChild(f.firstChild);f.appendChild(e),this.textPath=l(e)}}})(-1),b.on("snap.util.attr.text",function(a){if("text"==this.type){for(var c=this.node,d=function(a){var b=p("tspan");if(m(a,"array"))for(var c=0;c<a.length;c++)b.appendChild(d(a[c]));else b.appendChild(e.doc.createTextNode(a));return b.normalize&&b.normalize(),b};c.firstChild;)c.removeChild(c.firstChild);for(var f=d(a);f.firstChild;)c.appendChild(f.firstChild)}b.stop()})(-1),b.on("snap.util.attr.fontSize",h)(-1),b.on("snap.util.attr.font-size",h)(-1),b.on("snap.util.getattr.transform",function(){return b.stop(),this.transform()})(-1),b.on("snap.util.getattr.textpath",function(){return b.stop(),this.textPath})(-1),function(){function c(c){return function(){b.stop();var d=e.doc.defaultView.getComputedStyle(this.node,null).getPropertyValue("marker-"+c);return"none"==d?d:a(e.doc.getElementById(d.match(o)[1]))}}function d(a){return function(c){b.stop();var d="marker"+a.charAt(0).toUpperCase()+a.substring(1);if(""==c||!c)return void(this.node.style[d]="none");if("marker"==c.type){var e=c.node.id;return e||p(c.node,{id:c.id}),void(this.node.style[d]=q(e))}}}b.on("snap.util.getattr.marker-end",c("end"))(-1),b.on("snap.util.getattr.markerEnd",c("end"))(-1),b.on("snap.util.getattr.marker-start",c("start"))(-1),b.on("snap.util.getattr.markerStart",c("start"))(-1),b.on("snap.util.getattr.marker-mid",c("mid"))(-1),b.on("snap.util.getattr.markerMid",c("mid"))(-1),b.on("snap.util.attr.marker-end",d("end"))(-1),b.on("snap.util.attr.markerEnd",d("end"))(-1),b.on("snap.util.attr.marker-start",d("start"))(-1),b.on("snap.util.attr.markerStart",d("start"))(-1),b.on("snap.util.attr.marker-mid",d("mid"))(-1),b.on("snap.util.attr.markerMid",d("mid"))(-1)}(),b.on("snap.util.getattr.r",function(){return"rect"==this.type&&p(this.node,"rx")==p(this.node,"ry")?(b.stop(),p(this.node,"rx")):void 0})(-1),b.on("snap.util.getattr.text",function(){if("text"==this.type||"tspan"==this.type){b.stop();var a=i(this.node);return 1==a.length?a[0]:a}})(-1),b.on("snap.util.getattr.#text",function(){return this.node.textContent})(-1),b.on("snap.util.getattr.fill",function(c){if(!c){b.stop();var d=b("snap.util.getattr.fill",this,!0).firstDefined();return a(a.deurl(d))||d}})(-1),b.on("snap.util.getattr.stroke",function(c){if(!c){b.stop();var d=b("snap.util.getattr.stroke",this,!0).firstDefined();return a(a.deurl(d))||d}})(-1),b.on("snap.util.getattr.viewBox",function(){b.stop();var c=p(this.node,"viewBox");return c?(c=c.split(s),a._.box(+c[0],+c[1],+c[2],+c[3])):void 0})(-1),b.on("snap.util.getattr.points",function(){var a=p(this.node,"points");return b.stop(),a?a.split(s):void 0})(-1),b.on("snap.util.getattr.path",function(){var a=p(this.node,"d");return b.stop(),a})(-1),b.on("snap.util.getattr.class",function(){return this.node.className.baseVal})(-1),b.on("snap.util.getattr.fontSize",j)(-1),b.on("snap.util.getattr.font-size",j)(-1)}),d.plugin(function(a,b,c,d,e){var f=/\S+/g,g=String,h=b.prototype;h.addClass=function(a){var b,c,d,e,h=g(a||"").match(f)||[],i=this.node,j=i.className.baseVal,k=j.match(f)||[];if(h.length){for(b=0;d=h[b++];)c=k.indexOf(d),~c||k.push(d);e=k.join(" "),j!=e&&(i.className.baseVal=e)}return this},h.removeClass=function(a){var b,c,d,e,h=g(a||"").match(f)||[],i=this.node,j=i.className.baseVal,k=j.match(f)||[];if(k.length){for(b=0;d=h[b++];)c=k.indexOf(d),~c&&k.splice(c,1);e=k.join(" "),j!=e&&(i.className.baseVal=e)}return this},h.hasClass=function(a){var b=this.node,c=b.className.baseVal,d=c.match(f)||[];return!!~d.indexOf(a)},h.toggleClass=function(a,b){if(null!=b)return b?this.addClass(a):this.removeClass(a);var c,d,e,g,h=(a||"").match(f)||[],i=this.node,j=i.className.baseVal,k=j.match(f)||[];for(c=0;e=h[c++];)d=k.indexOf(e),~d?k.splice(d,1):k.push(e);return g=k.join(" "),j!=g&&(i.className.baseVal=g),this}}),d.plugin(function(a,c,d,e,f){function g(a){return a}function h(a){return function(b){return+b.toFixed(3)+a}}var i={"+":function(a,b){return a+b},"-":function(a,b){return a-b},"/":function(a,b){return a/b},"*":function(a,b){return a*b}},j=String,k=/[a-z]+$/i,l=/^\s*([+\-\/*])\s*=\s*([\d.eE+\-]+)\s*([^\d\s]+)?\s*$/;b.on("snap.util.attr",function(a){var c=j(a).match(l);if(c){var d=b.nt(),e=d.substring(d.lastIndexOf(".")+1),f=this.attr(e),g={};b.stop();var h=c[3]||"",m=f.match(k),n=i[c[1]];if(m&&m==h?a=n(parseFloat(f),+c[2]):(f=this.asPX(e),a=n(this.asPX(e),this.asPX(e,c[2]+h))),isNaN(f)||isNaN(a))return;g[e]=a,this.attr(g)}})(-10),b.on("snap.util.equal",function(a,c){var d=j(this.attr(a)||""),e=j(c).match(l);if(e){b.stop();var f=e[3]||"",m=d.match(k),n=i[e[1]];return m&&m==f?{from:parseFloat(d),to:n(parseFloat(d),+e[2]),f:h(m)}:(d=this.asPX(a),{from:d,to:n(d,this.asPX(a,e[2]+f)),f:g})}})(-10)}),d.plugin(function(c,d,e,f,g){var h=e.prototype,i=c.is;h.rect=function(a,b,c,d,e,f){var g;return null==f&&(f=e),i(a,"object")&&"[object Object]"==a?g=a:null!=a&&(g={x:a,y:b,width:c,height:d},null!=e&&(g.rx=e,g.ry=f)),this.el("rect",g)},h.circle=function(a,b,c){var d;return i(a,"object")&&"[object Object]"==a?d=a:null!=a&&(d={cx:a,cy:b,r:c}),this.el("circle",d)};var j=function(){function a(){this.parentNode.removeChild(this)}return function(b,c){var d=f.doc.createElement("img"),e=f.doc.body;d.style.cssText="position:absolute;left:-9999em;top:-9999em",d.onload=function(){c.call(d),d.onload=d.onerror=null,e.removeChild(d)},d.onerror=a,e.appendChild(d),d.src=b}}();h.image=function(a,b,d,e,f){var g=this.el("image");if(i(a,"object")&&"src"in a)g.attr(a);else if(null!=a){var h={"xlink:href":a,preserveAspectRatio:"none"};null!=b&&null!=d&&(h.x=b,h.y=d),null!=e&&null!=f?(h.width=e,h.height=f):j(a,function(){c._.$(g.node,{width:this.offsetWidth,height:this.offsetHeight})}),c._.$(g.node,h)}return g},h.ellipse=function(a,b,c,d){var e;return i(a,"object")&&"[object Object]"==a?e=a:null!=a&&(e={cx:a,cy:b,rx:c,ry:d}),this.el("ellipse",e)},h.path=function(a){var b;return i(a,"object")&&!i(a,"array")?b=a:a&&(b={d:a}),this.el("path",b)},h.group=h.g=function(a){var b=this.el("g");return 1==arguments.length&&a&&!a.type?b.attr(a):arguments.length&&b.add(Array.prototype.slice.call(arguments,0)),b},h.svg=function(a,b,c,d,e,f,g,h){var j={};return i(a,"object")&&null==b?j=a:(null!=a&&(j.x=a),null!=b&&(j.y=b),null!=c&&(j.width=c),null!=d&&(j.height=d),null!=e&&null!=f&&null!=g&&null!=h&&(j.viewBox=[e,f,g,h])),this.el("svg",j)},h.mask=function(a){var b=this.el("mask");return 1==arguments.length&&a&&!a.type?b.attr(a):arguments.length&&b.add(Array.prototype.slice.call(arguments,0)),b},h.ptrn=function(a,b,c,d,e,f,g,h){if(i(a,"object"))var j=a;else j={patternUnits:"userSpaceOnUse"},a&&(j.x=a),b&&(j.y=b),null!=c&&(j.width=c),null!=d&&(j.height=d),null!=e&&null!=f&&null!=g&&null!=h?j.viewBox=[e,f,g,h]:j.viewBox=[a||0,b||0,c||0,d||0];return this.el("pattern",j)},h.use=function(a){return null!=a?(a instanceof d&&(a.attr("id")||a.attr({id:c._.id(a)}),a=a.attr("id")),"#"==String(a).charAt()&&(a=a.substring(1)),this.el("use",{"xlink:href":"#"+a})):d.prototype.use.call(this)},h.symbol=function(a,b,c,d){var e={};return null!=a&&null!=b&&null!=c&&null!=d&&(e.viewBox=[a,b,c,d]),this.el("symbol",e)},h.text=function(a,b,c){var d={};return i(a,"object")?d=a:null!=a&&(d={x:a,y:b,text:c||""}),this.el("text",d)},h.line=function(a,b,c,d){var e={};return i(a,"object")?e=a:null!=a&&(e={x1:a,x2:c,y1:b,y2:d}),this.el("line",e)},h.polyline=function(a){arguments.length>1&&(a=Array.prototype.slice.call(arguments,0));var b={};return i(a,"object")&&!i(a,"array")?b=a:null!=a&&(b={points:a}),this.el("polyline",b)},h.polygon=function(a){arguments.length>1&&(a=Array.prototype.slice.call(arguments,0));var b={};return i(a,"object")&&!i(a,"array")?b=a:null!=a&&(b={points:a}),this.el("polygon",b)},function(){function d(){return this.selectAll("stop")}function e(a,b){var d=l("stop"),e={offset:+b+"%"};a=c.color(a),e["stop-color"]=a.hex,a.opacity<1&&(e["stop-opacity"]=a.opacity),l(d,e);for(var f,g=this.stops(),h=0;h<g.length;h++){var i=parseFloat(g[h].attr("offset"));if(i>b){this.node.insertBefore(d,g[h].node),f=!0;break}}return f||this.node.appendChild(d),this}function f(){if("linearGradient"==this.type){var a=l(this.node,"x1")||0,b=l(this.node,"x2")||1,d=l(this.node,"y1")||0,e=l(this.node,"y2")||0;return c._.box(a,d,math.abs(b-a),math.abs(e-d))}var f=this.node.cx||.5,g=this.node.cy||.5,h=this.node.r||0;return c._.box(f-h,g-h,2*h,2*h)}function g(a){var d=a,e=this.stops();if("string"==typeof a&&(d=b("snap.util.grad.parse",null,"l(0,0,0,1)"+a).firstDefined().stops),c.is(d,"array")){for(var f=0;f<e.length;f++)if(d[f]){var g=c.color(d[f].color),h={offset:d[f].offset+"%"};h["stop-color"]=g.hex,g.opacity<1&&(h["stop-opacity"]=g.opacity),e[f].attr(h)}else e[f].remove();for(f=e.length;f<d.length;f++)this.addStop(d[f].color,d[f].offset);return this}}function i(a,c){var d,e=b("snap.util.grad.parse",null,c).firstDefined();if(!e)return null;e.params.unshift(a),d="l"==e.type.toLowerCase()?j.apply(0,e.params):k.apply(0,e.params),e.type!=e.type.toLowerCase()&&l(d.node,{gradientUnits:"userSpaceOnUse"});for(var f=e.stops,g=f.length,h=0;g>h;h++){var i=f[h];d.addStop(i.color,i.offset)}return d}function j(a,b,h,i,j){var k=c._.make("linearGradient",a);return k.stops=d,k.addStop=e,k.getBBox=f,k.setStops=g,null!=b&&l(k.node,{x1:b,y1:h,x2:i,y2:j}),k}function k(a,b,g,h,i,j){var k=c._.make("radialGradient",a);return k.stops=d,k.addStop=e,k.getBBox=f,null!=b&&l(k.node,{cx:b,cy:g,r:h}),null!=i&&null!=j&&l(k.node,{fx:i,fy:j}),k}var l=c._.$;h.gradient=function(a){return i(this.defs,a)},h.gradientLinear=function(a,b,c,d){return j(this.defs,a,b,c,d)},h.gradientRadial=function(a,b,c,d,e){return k(this.defs,a,b,c,d,e)},h.toString=function(){var a,b=this.node.ownerDocument,d=b.createDocumentFragment(),e=b.createElement("div"),f=this.node.cloneNode(!0);return d.appendChild(e),e.appendChild(f),c._.$(f,{xmlns:"http://www.w3.org/2000/svg"}),a=e.innerHTML,d.removeChild(d.firstChild),a},h.toDataURL=function(){return a&&a.btoa?"data:image/svg+xml;base64,"+btoa(unescape(encodeURIComponent(this))):void 0},h.clear=function(){for(var a,b=this.node.firstChild;b;)a=b.nextSibling,"defs"!=b.tagName?b.parentNode.removeChild(b):h.clear.call({node:b}),b=a}}()}),d.plugin(function(a,b,c,d){function e(a){var b=e.ps=e.ps||{};return b[a]?b[a].sleep=100:b[a]={sleep:100},setTimeout(function(){for(var c in b)b[M](c)&&c!=a&&(b[c].sleep--,!b[c].sleep&&delete b[c])}),b[a]}function f(a,b,c,d){return null==a&&(a=b=c=d=0),null==b&&(b=a.y,c=a.width,d=a.height,a=a.x),{x:a,y:b,width:c,w:c,height:d,h:d,x2:a+c,y2:b+d,cx:a+c/2,cy:b+d/2,r1:P.min(c,d)/2,r2:P.max(c,d)/2,r0:P.sqrt(c*c+d*d)/2,path:y(a,b,c,d),vb:[a,b,c,d].join(" ")}}function g(){return this.join(",").replace(N,"$1")}function h(a){var b=L(a);return b.toString=g,b}function i(a,b,c,d,e,f,g,h,i){return null==i?p(a,b,c,d,e,f,g,h):k(a,b,c,d,e,f,g,h,q(a,b,c,d,e,f,g,h,i))}function j(c,d){function e(a){return+(+a).toFixed(3)}return a._.cacher(function(a,f,g){a instanceof b&&(a=a.attr("d")),a=G(a);for(var h,j,l,m,n,o="",p={},q=0,r=0,s=a.length;s>r;r++){if(l=a[r],"M"==l[0])h=+l[1],j=+l[2];else{if(m=i(h,j,l[1],l[2],l[3],l[4],l[5],l[6]),q+m>f){if(d&&!p.start){if(n=i(h,j,l[1],l[2],l[3],l[4],l[5],l[6],f-q),o+=["C"+e(n.start.x),e(n.start.y),e(n.m.x),e(n.m.y),e(n.x),e(n.y)],g)return o;p.start=o,o=["M"+e(n.x),e(n.y)+"C"+e(n.n.x),e(n.n.y),e(n.end.x),e(n.end.y),e(l[5]),e(l[6])].join(),q+=m,h=+l[5],j=+l[6];continue}if(!c&&!d)return n=i(h,j,l[1],l[2],l[3],l[4],l[5],l[6],f-q)}q+=m,h=+l[5],j=+l[6]}o+=l.shift()+l}return p.end=o,n=c?q:d?p:k(h,j,l[0],l[1],l[2],l[3],l[4],l[5],1)},null,a._.clone)}function k(a,b,c,d,e,f,g,h,i){var j=1-i,k=T(j,3),l=T(j,2),m=i*i,n=m*i,o=k*a+3*l*i*c+3*j*i*i*e+n*g,p=k*b+3*l*i*d+3*j*i*i*f+n*h,q=a+2*i*(c-a)+m*(e-2*c+a),r=b+2*i*(d-b)+m*(f-2*d+b),s=c+2*i*(e-c)+m*(g-2*e+c),t=d+2*i*(f-d)+m*(h-2*f+d),u=j*a+i*c,v=j*b+i*d,w=j*e+i*g,x=j*f+i*h,y=90-180*P.atan2(q-s,r-t)/Q;return{x:o,y:p,m:{x:q,y:r},n:{x:s,y:t},start:{x:u,y:v},end:{x:w,y:x},alpha:y}}function l(b,c,d,e,g,h,i,j){a.is(b,"array")||(b=[b,c,d,e,g,h,i,j]);var k=F.apply(null,b);return f(k.min.x,k.min.y,k.max.x-k.min.x,k.max.y-k.min.y)}function m(a,b,c){return b>=a.x&&b<=a.x+a.width&&c>=a.y&&c<=a.y+a.height}function n(a,b){return a=f(a),b=f(b),m(b,a.x,a.y)||m(b,a.x2,a.y)||m(b,a.x,a.y2)||m(b,a.x2,a.y2)||m(a,b.x,b.y)||m(a,b.x2,b.y)||m(a,b.x,b.y2)||m(a,b.x2,b.y2)||(a.x<b.x2&&a.x>b.x||b.x<a.x2&&b.x>a.x)&&(a.y<b.y2&&a.y>b.y||b.y<a.y2&&b.y>a.y)}function o(a,b,c,d,e){var f=-3*b+9*c-9*d+3*e,g=a*f+6*b-12*c+6*d;return a*g-3*b+3*c}function p(a,b,c,d,e,f,g,h,i){null==i&&(i=1),i=i>1?1:0>i?0:i;for(var j=i/2,k=12,l=[-.1252,.1252,-.3678,.3678,-.5873,.5873,-.7699,.7699,-.9041,.9041,-.9816,.9816],m=[.2491,.2491,.2335,.2335,.2032,.2032,.1601,.1601,.1069,.1069,.0472,.0472],n=0,p=0;k>p;p++){var q=j*l[p]+j,r=o(q,a,c,e,g),s=o(q,b,d,f,h),t=r*r+s*s;n+=m[p]*P.sqrt(t)}return j*n}function q(a,b,c,d,e,f,g,h,i){if(!(0>i||p(a,b,c,d,e,f,g,h)<i)){var j,k=1,l=k/2,m=k-l,n=.01;for(j=p(a,b,c,d,e,f,g,h,m);U(j-i)>n;)l/=2,m+=(i>j?1:-1)*l,j=p(a,b,c,d,e,f,g,h,m);return m}}function r(a,b,c,d,e,f,g,h){if(!(S(a,c)<R(e,g)||R(a,c)>S(e,g)||S(b,d)<R(f,h)||R(b,d)>S(f,h))){var i=(a*d-b*c)*(e-g)-(a-c)*(e*h-f*g),j=(a*d-b*c)*(f-h)-(b-d)*(e*h-f*g),k=(a-c)*(f-h)-(b-d)*(e-g);if(k){var l=i/k,m=j/k,n=+l.toFixed(2),o=+m.toFixed(2);if(!(n<+R(a,c).toFixed(2)||n>+S(a,c).toFixed(2)||n<+R(e,g).toFixed(2)||n>+S(e,g).toFixed(2)||o<+R(b,d).toFixed(2)||o>+S(b,d).toFixed(2)||o<+R(f,h).toFixed(2)||o>+S(f,h).toFixed(2)))return{x:l,y:m}}}}function s(a,b,c){var d=l(a),e=l(b);if(!n(d,e))return c?0:[];for(var f=p.apply(0,a),g=p.apply(0,b),h=~~(f/8),i=~~(g/8),j=[],m=[],o={},q=c?0:[],s=0;h+1>s;s++){var t=k.apply(0,a.concat(s/h));j.push({x:t.x,y:t.y,t:s/h})}for(s=0;i+1>s;s++)t=k.apply(0,b.concat(s/i)),m.push({x:t.x,y:t.y,t:s/i});for(s=0;h>s;s++)for(var u=0;i>u;u++){var v=j[s],w=j[s+1],x=m[u],y=m[u+1],z=U(w.x-v.x)<.001?"y":"x",A=U(y.x-x.x)<.001?"y":"x",B=r(v.x,v.y,w.x,w.y,x.x,x.y,y.x,y.y);if(B){if(o[B.x.toFixed(4)]==B.y.toFixed(4))continue;o[B.x.toFixed(4)]=B.y.toFixed(4);var C=v.t+U((B[z]-v[z])/(w[z]-v[z]))*(w.t-v.t),D=x.t+U((B[A]-x[A])/(y[A]-x[A]))*(y.t-x.t);C>=0&&1>=C&&D>=0&&1>=D&&(c?q++:q.push({x:B.x,y:B.y,t1:C,t2:D}))}}return q}function t(a,b){return v(a,b)}function u(a,b){return v(a,b,1)}function v(a,b,c){a=G(a),b=G(b);for(var d,e,f,g,h,i,j,k,l,m,n=c?0:[],o=0,p=a.length;p>o;o++){var q=a[o];if("M"==q[0])d=h=q[1],e=i=q[2];else{"C"==q[0]?(l=[d,e].concat(q.slice(1)),d=l[6],e=l[7]):(l=[d,e,d,e,h,i,h,i],d=h,e=i);for(var r=0,t=b.length;t>r;r++){var u=b[r];if("M"==u[0])f=j=u[1],g=k=u[2];else{"C"==u[0]?(m=[f,g].concat(u.slice(1)),f=m[6],g=m[7]):(m=[f,g,f,g,j,k,j,k],f=j,g=k);var v=s(l,m,c);if(c)n+=v;else{for(var w=0,x=v.length;x>w;w++)v[w].segment1=o,v[w].segment2=r,v[w].bez1=l,v[w].bez2=m;n=n.concat(v)}}}}}return n}function w(a,b,c){var d=x(a);return m(d,b,c)&&v(a,[["M",b,c],["H",d.x2+10]],1)%2==1}function x(a){var b=e(a);if(b.bbox)return L(b.bbox);if(!a)return f();a=G(a);for(var c,d=0,g=0,h=[],i=[],j=0,k=a.length;k>j;j++)if(c=a[j],"M"==c[0])d=c[1],g=c[2],h.push(d),i.push(g);else{var l=F(d,g,c[1],c[2],c[3],c[4],c[5],c[6]);h=h.concat(l.min.x,l.max.x),i=i.concat(l.min.y,l.max.y),d=c[5],g=c[6]}var m=R.apply(0,h),n=R.apply(0,i),o=S.apply(0,h),p=S.apply(0,i),q=f(m,n,o-m,p-n);return b.bbox=L(q),q}function y(a,b,c,d,e){if(e)return[["M",+a+ +e,b],["l",c-2*e,0],["a",e,e,0,0,1,e,e],["l",0,d-2*e],["a",e,e,0,0,1,-e,e],["l",2*e-c,0],["a",e,e,0,0,1,-e,-e],["l",0,2*e-d],["a",e,e,0,0,1,e,-e],["z"]];var f=[["M",a,b],["l",c,0],["l",0,d],["l",-c,0],["z"]];return f.toString=g,f}function z(a,b,c,d,e){if(null==e&&null==d&&(d=c),a=+a,b=+b,c=+c,d=+d,null!=e)var f=Math.PI/180,h=a+c*Math.cos(-d*f),i=a+c*Math.cos(-e*f),j=b+c*Math.sin(-d*f),k=b+c*Math.sin(-e*f),l=[["M",h,j],["A",c,c,0,+(e-d>180),0,i,k]];else l=[["M",a,b],["m",0,-d],["a",c,d,0,1,1,0,2*d],["a",c,d,0,1,1,0,-2*d],["z"]];return l.toString=g,l}function A(b){var c=e(b),d=String.prototype.toLowerCase;if(c.rel)return h(c.rel);a.is(b,"array")&&a.is(b&&b[0],"array")||(b=a.parsePathString(b));var f=[],i=0,j=0,k=0,l=0,m=0;"M"==b[0][0]&&(i=b[0][1],j=b[0][2],k=i,l=j,m++,f.push(["M",i,j]));for(var n=m,o=b.length;o>n;n++){var p=f[n]=[],q=b[n];if(q[0]!=d.call(q[0]))switch(p[0]=d.call(q[0]),p[0]){case"a":p[1]=q[1],p[2]=q[2],p[3]=q[3],p[4]=q[4],p[5]=q[5],p[6]=+(q[6]-i).toFixed(3),p[7]=+(q[7]-j).toFixed(3);break;case"v":p[1]=+(q[1]-j).toFixed(3);break;case"m":k=q[1],l=q[2];default:for(var r=1,s=q.length;s>r;r++)p[r]=+(q[r]-(r%2?i:j)).toFixed(3)}else{p=f[n]=[],"m"==q[0]&&(k=q[1]+i,l=q[2]+j);for(var t=0,u=q.length;u>t;t++)f[n][t]=q[t]}var v=f[n].length;switch(f[n][0]){case"z":i=k,j=l;break;case"h":i+=+f[n][v-1];break;case"v":j+=+f[n][v-1];break;default:i+=+f[n][v-2],j+=+f[n][v-1]}}return f.toString=g,c.rel=h(f),f}function B(b){var c=e(b);if(c.abs)return h(c.abs);if(K(b,"array")&&K(b&&b[0],"array")||(b=a.parsePathString(b)),!b||!b.length)return[["M",0,0]];var d,f=[],i=0,j=0,k=0,l=0,m=0;"M"==b[0][0]&&(i=+b[0][1],j=+b[0][2],k=i,l=j,m++,f[0]=["M",i,j]);for(var n,o,p=3==b.length&&"M"==b[0][0]&&"R"==b[1][0].toUpperCase()&&"Z"==b[2][0].toUpperCase(),q=m,r=b.length;r>q;q++){if(f.push(n=[]),o=b[q],d=o[0],d!=d.toUpperCase())switch(n[0]=d.toUpperCase(),n[0]){case"A":n[1]=o[1],n[2]=o[2],n[3]=o[3],n[4]=o[4],n[5]=o[5],n[6]=+o[6]+i,n[7]=+o[7]+j;break;case"V":n[1]=+o[1]+j;break;case"H":n[1]=+o[1]+i;break;case"R":for(var s=[i,j].concat(o.slice(1)),t=2,u=s.length;u>t;t++)s[t]=+s[t]+i,s[++t]=+s[t]+j;f.pop(),f=f.concat(I(s,p));break;case"O":f.pop(),s=z(i,j,o[1],o[2]),s.push(s[0]),f=f.concat(s);break;case"U":f.pop(),f=f.concat(z(i,j,o[1],o[2],o[3])),n=["U"].concat(f[f.length-1].slice(-2));break;case"M":k=+o[1]+i,l=+o[2]+j;default:for(t=1,u=o.length;u>t;t++)n[t]=+o[t]+(t%2?i:j)}else if("R"==d)s=[i,j].concat(o.slice(1)),f.pop(),f=f.concat(I(s,p)),n=["R"].concat(o.slice(-2));else if("O"==d)f.pop(),s=z(i,j,o[1],o[2]),s.push(s[0]),f=f.concat(s);else if("U"==d)f.pop(),f=f.concat(z(i,j,o[1],o[2],o[3])),n=["U"].concat(f[f.length-1].slice(-2));else for(var v=0,w=o.length;w>v;v++)n[v]=o[v];if(d=d.toUpperCase(),"O"!=d)switch(n[0]){case"Z":i=+k,j=+l;break;case"H":i=n[1];break;case"V":j=n[1];break;case"M":k=n[n.length-2],l=n[n.length-1];default:i=n[n.length-2],j=n[n.length-1]}}return f.toString=g,c.abs=h(f),f}function C(a,b,c,d){return[a,b,c,d,c,d]}function D(a,b,c,d,e,f){var g=1/3,h=2/3;return[g*a+h*c,g*b+h*d,g*e+h*c,g*f+h*d,e,f]}function E(b,c,d,e,f,g,h,i,j,k){var l,m=120*Q/180,n=Q/180*(+f||0),o=[],p=a._.cacher(function(a,b,c){var d=a*P.cos(c)-b*P.sin(c),e=a*P.sin(c)+b*P.cos(c);return{x:d,y:e}});if(!d||!e)return[b,c,i,j,i,j];if(k)y=k[0],z=k[1],w=k[2],x=k[3];else{l=p(b,c,-n),b=l.x,c=l.y,l=p(i,j,-n),i=l.x,j=l.y;var q=(P.cos(Q/180*f),P.sin(Q/180*f),(b-i)/2),r=(c-j)/2,s=q*q/(d*d)+r*r/(e*e);s>1&&(s=P.sqrt(s),d=s*d,e=s*e);var t=d*d,u=e*e,v=(g==h?-1:1)*P.sqrt(U((t*u-t*r*r-u*q*q)/(t*r*r+u*q*q))),w=v*d*r/e+(b+i)/2,x=v*-e*q/d+(c+j)/2,y=P.asin(((c-x)/e).toFixed(9)),z=P.asin(((j-x)/e).toFixed(9));y=w>b?Q-y:y,z=w>i?Q-z:z,0>y&&(y=2*Q+y),0>z&&(z=2*Q+z),h&&y>z&&(y-=2*Q),!h&&z>y&&(z-=2*Q)}var A=z-y;if(U(A)>m){var B=z,C=i,D=j;z=y+m*(h&&z>y?1:-1),i=w+d*P.cos(z),j=x+e*P.sin(z),o=E(i,j,d,e,f,0,h,C,D,[z,B,w,x])}A=z-y;var F=P.cos(y),G=P.sin(y),H=P.cos(z),I=P.sin(z),J=P.tan(A/4),K=4/3*d*J,L=4/3*e*J,M=[b,c],N=[b+K*G,c-L*F],O=[i+K*I,j-L*H],R=[i,j];if(N[0]=2*M[0]-N[0],N[1]=2*M[1]-N[1],k)return[N,O,R].concat(o);o=[N,O,R].concat(o).join().split(",");for(var S=[],T=0,V=o.length;V>T;T++)S[T]=T%2?p(o[T-1],o[T],n).y:p(o[T],o[T+1],n).x;return S}function F(a,b,c,d,e,f,g,h){for(var i,j,k,l,m,n,o,p,q=[],r=[[],[]],s=0;2>s;++s)if(0==s?(j=6*a-12*c+6*e,i=-3*a+9*c-9*e+3*g,k=3*c-3*a):(j=6*b-12*d+6*f,i=-3*b+9*d-9*f+3*h,k=3*d-3*b),U(i)<1e-12){if(U(j)<1e-12)continue;l=-k/j,l>0&&1>l&&q.push(l)}else o=j*j-4*k*i,p=P.sqrt(o),0>o||(m=(-j+p)/(2*i),m>0&&1>m&&q.push(m),n=(-j-p)/(2*i),n>0&&1>n&&q.push(n));for(var t,u=q.length,v=u;u--;)l=q[u],t=1-l,r[0][u]=t*t*t*a+3*t*t*l*c+3*t*l*l*e+l*l*l*g,r[1][u]=t*t*t*b+3*t*t*l*d+3*t*l*l*f+l*l*l*h;return r[0][v]=a,r[1][v]=b,r[0][v+1]=g,r[1][v+1]=h,r[0].length=r[1].length=v+2,{min:{x:R.apply(0,r[0]),y:R.apply(0,r[1])},max:{x:S.apply(0,r[0]),y:S.apply(0,r[1])}}}function G(a,b){var c=!b&&e(a);if(!b&&c.curve)return h(c.curve);for(var d=B(a),f=b&&B(b),g={x:0,y:0,bx:0,by:0,X:0,Y:0,qx:null,qy:null},i={x:0,y:0,bx:0,by:0,X:0,Y:0,qx:null,qy:null},j=(function(a,b,c){var d,e;if(!a)return["C",b.x,b.y,b.x,b.y,b.x,b.y];switch(!(a[0]in{T:1,Q:1})&&(b.qx=b.qy=null),a[0]){case"M":b.X=a[1],b.Y=a[2];break;case"A":a=["C"].concat(E.apply(0,[b.x,b.y].concat(a.slice(1))));break;case"S":"C"==c||"S"==c?(d=2*b.x-b.bx,e=2*b.y-b.by):(d=b.x,e=b.y),a=["C",d,e].concat(a.slice(1));break;case"T":"Q"==c||"T"==c?(b.qx=2*b.x-b.qx,b.qy=2*b.y-b.qy):(b.qx=b.x,b.qy=b.y),a=["C"].concat(D(b.x,b.y,b.qx,b.qy,a[1],a[2]));break;case"Q":b.qx=a[1],b.qy=a[2],a=["C"].concat(D(b.x,b.y,a[1],a[2],a[3],a[4]));break;case"L":a=["C"].concat(C(b.x,b.y,a[1],a[2]));break;case"H":a=["C"].concat(C(b.x,b.y,a[1],b.y));break;case"V":a=["C"].concat(C(b.x,b.y,b.x,a[1]));break;case"Z":a=["C"].concat(C(b.x,b.y,b.X,b.Y))}return a}),k=function(a,b){if(a[b].length>7){a[b].shift();for(var c=a[b];c.length;)m[b]="A",f&&(n[b]="A"),a.splice(b++,0,["C"].concat(c.splice(0,6)));a.splice(b,1),r=S(d.length,f&&f.length||0)}},l=function(a,b,c,e,g){a&&b&&"M"==a[g][0]&&"M"!=b[g][0]&&(b.splice(g,0,["M",e.x,e.y]),c.bx=0,c.by=0,c.x=a[g][1],c.y=a[g][2],r=S(d.length,f&&f.length||0))},m=[],n=[],o="",p="",q=0,r=S(d.length,f&&f.length||0);r>q;q++){d[q]&&(o=d[q][0]),"C"!=o&&(m[q]=o,q&&(p=m[q-1])),d[q]=j(d[q],g,p),"A"!=m[q]&&"C"==o&&(m[q]="C"),k(d,q),f&&(f[q]&&(o=f[q][0]),"C"!=o&&(n[q]=o,q&&(p=n[q-1])),f[q]=j(f[q],i,p),"A"!=n[q]&&"C"==o&&(n[q]="C"),k(f,q)),l(d,f,g,i,q),l(f,d,i,g,q);var s=d[q],t=f&&f[q],u=s.length,v=f&&t.length;g.x=s[u-2],g.y=s[u-1],g.bx=O(s[u-4])||g.x,g.by=O(s[u-3])||g.y,i.bx=f&&(O(t[v-4])||i.x),i.by=f&&(O(t[v-3])||i.y),i.x=f&&t[v-2],i.y=f&&t[v-1]}return f||(c.curve=h(d)),f?[d,f]:d}function H(a,b){if(!b)return a;var c,d,e,f,g,h,i;for(a=G(a),e=0,g=a.length;g>e;e++)for(i=a[e],f=1,h=i.length;h>f;f+=2)c=b.x(i[f],i[f+1]),d=b.y(i[f],i[f+1]),i[f]=c,i[f+1]=d;return a}function I(a,b){for(var c=[],d=0,e=a.length;e-2*!b>d;d+=2){var f=[{x:+a[d-2],y:+a[d-1]},{x:+a[d],y:+a[d+1]},{x:+a[d+2],y:+a[d+3]},{x:+a[d+4],y:+a[d+5]}];b?d?e-4==d?f[3]={x:+a[0],y:+a[1]}:e-2==d&&(f[2]={x:+a[0],y:+a[1]},f[3]={x:+a[2],y:+a[3]}):f[0]={x:+a[e-2],y:+a[e-1]}:e-4==d?f[3]=f[2]:d||(f[0]={x:+a[d],y:+a[d+1]}),c.push(["C",(-f[0].x+6*f[1].x+f[2].x)/6,(-f[0].y+6*f[1].y+f[2].y)/6,(f[1].x+6*f[2].x-f[3].x)/6,(f[1].y+6*f[2].y-f[3].y)/6,f[2].x,f[2].y])}return c}var J=b.prototype,K=a.is,L=a._.clone,M="hasOwnProperty",N=/,?([a-z]),?/gi,O=parseFloat,P=Math,Q=P.PI,R=P.min,S=P.max,T=P.pow,U=P.abs,V=j(1),W=j(),X=j(0,1),Y=a._unit2px,Z={path:function(a){return a.attr("path")},circle:function(a){var b=Y(a);return z(b.cx,b.cy,b.r)},ellipse:function(a){var b=Y(a);
+return z(b.cx||0,b.cy||0,b.rx,b.ry)},rect:function(a){var b=Y(a);return y(b.x||0,b.y||0,b.width,b.height,b.rx,b.ry)},image:function(a){var b=Y(a);return y(b.x||0,b.y||0,b.width,b.height)},line:function(a){return"M"+[a.attr("x1")||0,a.attr("y1")||0,a.attr("x2"),a.attr("y2")]},polyline:function(a){return"M"+a.attr("points")},polygon:function(a){return"M"+a.attr("points")+"z"},deflt:function(a){var b=a.node.getBBox();return y(b.x,b.y,b.width,b.height)}};a.path=e,a.path.getTotalLength=V,a.path.getPointAtLength=W,a.path.getSubpath=function(a,b,c){if(this.getTotalLength(a)-c<1e-6)return X(a,b).end;var d=X(a,c,1);return b?X(d,b).end:d},J.getTotalLength=function(){return this.node.getTotalLength?this.node.getTotalLength():void 0},J.getPointAtLength=function(a){return W(this.attr("d"),a)},J.getSubpath=function(b,c){return a.path.getSubpath(this.attr("d"),b,c)},a._.box=f,a.path.findDotsAtSegment=k,a.path.bezierBBox=l,a.path.isPointInsideBBox=m,a.closest=function(b,c,d,e){for(var g=100,h=f(b-g/2,c-g/2,g,g),i=[],j=d[0].hasOwnProperty("x")?function(a){return{x:d[a].x,y:d[a].y}}:function(a){return{x:d[a],y:e[a]}},k=0;1e6>=g&&!k;){for(var l=0,n=d.length;n>l;l++){var o=j(l);if(m(h,o.x,o.y)){k++,i.push(o);break}}k||(g*=2,h=f(b-g/2,c-g/2,g,g))}if(1e6!=g){var p,q=1/0;for(l=0,n=i.length;n>l;l++){var r=a.len(b,c,i[l].x,i[l].y);q>r&&(q=r,i[l].len=r,p=i[l])}return p}},a.path.isBBoxIntersect=n,a.path.intersection=t,a.path.intersectionNumber=u,a.path.isPointInside=w,a.path.getBBox=x,a.path.get=Z,a.path.toRelative=A,a.path.toAbsolute=B,a.path.toCubic=G,a.path.map=H,a.path.toString=g,a.path.clone=h}),d.plugin(function(a,d,e,f){var g=Math.max,h=Math.min,i=function(a){if(this.items=[],this.bindings={},this.length=0,this.type="set",a)for(var b=0,c=a.length;c>b;b++)a[b]&&(this[this.items.length]=this.items[this.items.length]=a[b],this.length++)},j=i.prototype;j.push=function(){for(var a,b,c=0,d=arguments.length;d>c;c++)a=arguments[c],a&&(b=this.items.length,this[b]=this.items[b]=a,this.length++);return this},j.pop=function(){return this.length&&delete this[this.length--],this.items.pop()},j.forEach=function(a,b){for(var c=0,d=this.items.length;d>c;c++)if(a.call(b,this.items[c],c)===!1)return this;return this},j.animate=function(d,e,f,g){"function"!=typeof f||f.length||(g=f,f=c.linear),d instanceof a._.Animation&&(g=d.callback,f=d.easing,e=f.dur,d=d.attr);var h=arguments;if(a.is(d,"array")&&a.is(h[h.length-1],"array"))var i=!0;var j,k=function(){j?this.b=j:j=this.b},l=0,m=this,n=g&&function(){++l==m.length&&g.call(this)};return this.forEach(function(a,c){b.once("snap.animcreated."+a.id,k),i?h[c]&&a.animate.apply(a,h[c]):a.animate(d,e,f,n)})},j.remove=function(){for(;this.length;)this.pop().remove();return this},j.bind=function(a,b,c){var d={};if("function"==typeof b)this.bindings[a]=b;else{var e=c||a;this.bindings[a]=function(a){d[e]=a,b.attr(d)}}return this},j.attr=function(a){var b={};for(var c in a)this.bindings[c]?this.bindings[c](a[c]):b[c]=a[c];for(var d=0,e=this.items.length;e>d;d++)this.items[d].attr(b);return this},j.clear=function(){for(;this.length;)this.pop()},j.splice=function(a,b,c){a=0>a?g(this.length+a,0):a,b=g(0,h(this.length-a,b));var d,e=[],f=[],j=[];for(d=2;d<arguments.length;d++)j.push(arguments[d]);for(d=0;b>d;d++)f.push(this[a+d]);for(;d<this.length-a;d++)e.push(this[a+d]);var k=j.length;for(d=0;d<k+e.length;d++)this.items[a+d]=this[a+d]=k>d?j[d]:e[d-k];for(d=this.items.length=this.length-=b-k;this[d];)delete this[d++];return new i(f)},j.exclude=function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]==a)return this.splice(b,1),!0;return!1},j.insertAfter=function(a){for(var b=this.items.length;b--;)this.items[b].insertAfter(a);return this},j.getBBox=function(){for(var a=[],b=[],c=[],d=[],e=this.items.length;e--;)if(!this.items[e].removed){var f=this.items[e].getBBox();a.push(f.x),b.push(f.y),c.push(f.x+f.width),d.push(f.y+f.height)}return a=h.apply(0,a),b=h.apply(0,b),c=g.apply(0,c),d=g.apply(0,d),{x:a,y:b,x2:c,y2:d,width:c-a,height:d-b,cx:a+(c-a)/2,cy:b+(d-b)/2}},j.clone=function(a){a=new i;for(var b=0,c=this.items.length;c>b;b++)a.push(this.items[b].clone());return a},j.toString=function(){return"Snap‘s set"},j.type="set",a.Set=i,a.set=function(){var a=new i;return arguments.length&&a.push.apply(a,Array.prototype.slice.call(arguments,0)),a}}),d.plugin(function(a,c,d,e){function f(a){var b=a[0];switch(b.toLowerCase()){case"t":return[b,0,0];case"m":return[b,1,0,0,1,0,0];case"r":return 4==a.length?[b,0,a[2],a[3]]:[b,0];case"s":return 5==a.length?[b,1,1,a[3],a[4]]:3==a.length?[b,1,1]:[b,1]}}function g(b,c,d){b=b||new a.Matrix,c=c||new a.Matrix,b=a.parseTransformString(b.toTransformString())||[],c=a.parseTransformString(c.toTransformString())||[];for(var e,g,h,i,j=Math.max(b.length,c.length),k=[],n=[],o=0;j>o;o++){if(h=b[o]||f(c[o]),i=c[o]||f(h),h[0]!=i[0]||"r"==h[0].toLowerCase()&&(h[2]!=i[2]||h[3]!=i[3])||"s"==h[0].toLowerCase()&&(h[3]!=i[3]||h[4]!=i[4])){b=a._.transform2matrix(b,d()),c=a._.transform2matrix(c,d()),k=[["m",b.a,b.b,b.c,b.d,b.e,b.f]],n=[["m",c.a,c.b,c.c,c.d,c.e,c.f]];break}for(k[o]=[],n[o]=[],e=0,g=Math.max(h.length,i.length);g>e;e++)e in h&&(k[o][e]=h[e]),e in i&&(n[o][e]=i[e])}return{from:m(k),to:m(n),f:l(k)}}function h(a){return a}function i(a){return function(b){return+b.toFixed(3)+a}}function j(a){return a.join(" ")}function k(b){return a.rgb(b[0],b[1],b[2],b[3])}function l(a){var b,c,d,e,f,g,h=0,i=[];for(b=0,c=a.length;c>b;b++){for(f="[",g=['"'+a[b][0]+'"'],d=1,e=a[b].length;e>d;d++)g[d]="val["+h++ +"]";f+=g+"]",i[b]=f}return Function("val","return Snap.path.toString.call(["+i+"])")}function m(a){for(var b=[],c=0,d=a.length;d>c;c++)for(var e=1,f=a[c].length;f>e;e++)b.push(a[c][e]);return b}function n(a){return isFinite(a)}function o(b,c){return a.is(b,"array")&&a.is(c,"array")?b.toString()==c.toString():!1}var p={},q=/[%a-z]+$/i,r=String;p.stroke=p.fill="colour",c.prototype.equal=function(a,c){return b("snap.util.equal",this,a,c).firstDefined()},b.on("snap.util.equal",function(b,c){var d,e,f=r(this.attr(b)||""),s=this;if("colour"==p[b])return d=a.color(f),e=a.color(c),{from:[d.r,d.g,d.b,d.opacity],to:[e.r,e.g,e.b,e.opacity],f:k};if("viewBox"==b)return d=this.attr(b).vb.split(" ").map(Number),e=c.split(" ").map(Number),{from:d,to:e,f:j};if("transform"==b||"gradientTransform"==b||"patternTransform"==b)return"string"==typeof c&&(c=r(c).replace(/\.{3}|\u2026/g,f)),f=this.matrix,c=a._.rgTransform.test(c)?a._.transform2matrix(c,this.getBBox()):a._.transform2matrix(a._.svgTransform2string(c),this.getBBox()),g(f,c,function(){return s.getBBox(1)});if("d"==b||"path"==b)return d=a.path.toCubic(f,c),{from:m(d[0]),to:m(d[1]),f:l(d[0])};if("points"==b)return d=r(f).split(a._.separator),e=r(c).split(a._.separator),{from:d,to:e,f:function(a){return a}};if(n(f)&&n(c))return{from:parseFloat(f),to:parseFloat(c),f:h};var t=f.match(q),u=r(c).match(q);return t&&o(t,u)?{from:parseFloat(f),to:parseFloat(c),f:i(t)}:{from:this.asPX(b),to:this.asPX(b,c),f:h}})}),d.plugin(function(a,c,d,e){for(var f=c.prototype,g="hasOwnProperty",h=("createTouch"in e.doc),i=["click","dblclick","mousedown","mousemove","mouseout","mouseover","mouseup","touchstart","touchmove","touchend","touchcancel"],j={mousedown:"touchstart",mousemove:"touchmove",mouseup:"touchend"},k=(function(a,b){var c="y"==a?"scrollTop":"scrollLeft",d=b&&b.node?b.node.ownerDocument:e.doc;return d[c in d.documentElement?"documentElement":"body"][c]}),l=function(){return this.originalEvent.preventDefault()},m=function(){return this.originalEvent.stopPropagation()},n=function(a,b,c,d){var e=h&&j[b]?j[b]:b,f=function(e){var f=k("y",d),i=k("x",d);if(h&&j[g](b))for(var n=0,o=e.targetTouches&&e.targetTouches.length;o>n;n++)if(e.targetTouches[n].target==a||a.contains(e.targetTouches[n].target)){var p=e;e=e.targetTouches[n],e.originalEvent=p,e.preventDefault=l,e.stopPropagation=m;break}var q=e.clientX+i,r=e.clientY+f;return c.call(d,e,q,r)};return b!==e&&a.addEventListener(b,f,!1),a.addEventListener(e,f,!1),function(){return b!==e&&a.removeEventListener(b,f,!1),a.removeEventListener(e,f,!1),!0}},o=[],p=function(a){for(var c,d=a.clientX,e=a.clientY,f=k("y"),g=k("x"),i=o.length;i--;){if(c=o[i],h){for(var j,l=a.touches&&a.touches.length;l--;)if(j=a.touches[l],j.identifier==c.el._drag.id||c.el.node.contains(j.target)){d=j.clientX,e=j.clientY,(a.originalEvent?a.originalEvent:a).preventDefault();break}}else a.preventDefault();var m=c.el.node;m.nextSibling,m.parentNode,m.style.display;d+=g,e+=f,b("snap.drag.move."+c.el.id,c.move_scope||c.el,d-c.el._drag.x,e-c.el._drag.y,d,e,a)}},q=function(c){a.unmousemove(p).unmouseup(q);for(var d,e=o.length;e--;)d=o[e],d.el._drag={},b("snap.drag.end."+d.el.id,d.end_scope||d.start_scope||d.move_scope||d.el,c),b.off("snap.drag.*."+d.el.id);o=[]},r=i.length;r--;)!function(b){a[b]=f[b]=function(c,d){if(a.is(c,"function"))this.events=this.events||[],this.events.push({name:b,f:c,unbind:n(this.node||document,b,c,d||this)});else for(var e=0,f=this.events.length;f>e;e++)if(this.events[e].name==b)try{this.events[e].f.call(this)}catch(g){}return this},a["un"+b]=f["un"+b]=function(a){for(var c=this.events||[],d=c.length;d--;)if(c[d].name==b&&(c[d].f==a||!a))return c[d].unbind(),c.splice(d,1),!c.length&&delete this.events,this;return this}}(i[r]);f.hover=function(a,b,c,d){return this.mouseover(a,c).mouseout(b,d||c)},f.unhover=function(a,b){return this.unmouseover(a).unmouseout(b)};var s=[];f.drag=function(c,d,e,f,g,h){function i(i,j,l){(i.originalEvent||i).preventDefault(),k._drag.x=j,k._drag.y=l,k._drag.id=i.identifier,!o.length&&a.mousemove(p).mouseup(q),o.push({el:k,move_scope:f,start_scope:g,end_scope:h}),d&&b.on("snap.drag.start."+k.id,d),c&&b.on("snap.drag.move."+k.id,c),e&&b.on("snap.drag.end."+k.id,e),b("snap.drag.start."+k.id,g||f||k,j,l,i)}function j(a,c,d){b("snap.draginit."+k.id,k,a,c,d)}var k=this;if(!arguments.length){var l;return k.drag(function(a,b){this.attr({transform:l+(l?"T":"t")+[a,b]})},function(){l=this.transform().local})}return b.on("snap.draginit."+k.id,i),k._drag={},s.push({el:k,start:i,init:j}),k.mousedown(j),k},f.undrag=function(){for(var c=s.length;c--;)s[c].el==this&&(this.unmousedown(s[c].init),s.splice(c,1),b.unbind("snap.drag.*."+this.id),b.unbind("snap.draginit."+this.id));return!s.length&&a.unmousemove(p).unmouseup(q),this}}),d.plugin(function(a,c,d,e){var f=(c.prototype,d.prototype),g=/^\s*url\((.+)\)/,h=String,i=a._.$;a.filter={},f.filter=function(b){var d=this;"svg"!=d.type&&(d=d.paper);var e=a.parse(h(b)),f=a._.id(),g=(d.node.offsetWidth,d.node.offsetHeight,i("filter"));return i(g,{id:f,filterUnits:"userSpaceOnUse"}),g.appendChild(e.node),d.defs.appendChild(g),new c(g)},b.on("snap.util.getattr.filter",function(){b.stop();var c=i(this.node,"filter");if(c){var d=h(c).match(g);return d&&a.select(d[1])}}),b.on("snap.util.attr.filter",function(d){if(d instanceof c&&"filter"==d.type){b.stop();var e=d.node.id;e||(i(d.node,{id:d.id}),e=d.id),i(this.node,{filter:a.url(e)})}d&&"none"!=d||(b.stop(),this.node.removeAttribute("filter"))}),a.filter.blur=function(b,c){null==b&&(b=2);var d=null==c?b:[b,c];return a.format('<feGaussianBlur stdDeviation="{def}"/>',{def:d})},a.filter.blur.toString=function(){return this()},a.filter.shadow=function(b,c,d,e,f){return null==f&&(null==e?(f=d,d=4,e="#000"):(f=e,e=d,d=4)),null==d&&(d=4),null==f&&(f=1),null==b&&(b=0,c=2),null==c&&(c=b),e=a.color(e),a.format('<feGaussianBlur in="SourceAlpha" stdDeviation="{blur}"/><feOffset dx="{dx}" dy="{dy}" result="offsetblur"/><feFlood flood-color="{color}"/><feComposite in2="offsetblur" operator="in"/><feComponentTransfer><feFuncA type="linear" slope="{opacity}"/></feComponentTransfer><feMerge><feMergeNode/><feMergeNode in="SourceGraphic"/></feMerge>',{color:e,dx:b,dy:c,blur:d,opacity:f})},a.filter.shadow.toString=function(){return this()},a.filter.grayscale=function(b){return null==b&&(b=1),a.format('<feColorMatrix type="matrix" values="{a} {b} {c} 0 0 {d} {e} {f} 0 0 {g} {b} {h} 0 0 0 0 0 1 0"/>',{a:.2126+.7874*(1-b),b:.7152-.7152*(1-b),c:.0722-.0722*(1-b),d:.2126-.2126*(1-b),e:.7152+.2848*(1-b),f:.0722-.0722*(1-b),g:.2126-.2126*(1-b),h:.0722+.9278*(1-b)})},a.filter.grayscale.toString=function(){return this()},a.filter.sepia=function(b){return null==b&&(b=1),a.format('<feColorMatrix type="matrix" values="{a} {b} {c} 0 0 {d} {e} {f} 0 0 {g} {h} {i} 0 0 0 0 0 1 0"/>',{a:.393+.607*(1-b),b:.769-.769*(1-b),c:.189-.189*(1-b),d:.349-.349*(1-b),e:.686+.314*(1-b),f:.168-.168*(1-b),g:.272-.272*(1-b),h:.534-.534*(1-b),i:.131+.869*(1-b)})},a.filter.sepia.toString=function(){return this()},a.filter.saturate=function(b){return null==b&&(b=1),a.format('<feColorMatrix type="saturate" values="{amount}"/>',{amount:1-b})},a.filter.saturate.toString=function(){return this()},a.filter.hueRotate=function(b){return b=b||0,a.format('<feColorMatrix type="hueRotate" values="{angle}"/>',{angle:b})},a.filter.hueRotate.toString=function(){return this()},a.filter.invert=function(b){return null==b&&(b=1),a.format('<feComponentTransfer><feFuncR type="table" tableValues="{amount} {amount2}"/><feFuncG type="table" tableValues="{amount} {amount2}"/><feFuncB type="table" tableValues="{amount} {amount2}"/></feComponentTransfer>',{amount:b,amount2:1-b})},a.filter.invert.toString=function(){return this()},a.filter.brightness=function(b){return null==b&&(b=1),a.format('<feComponentTransfer><feFuncR type="linear" slope="{amount}"/><feFuncG type="linear" slope="{amount}"/><feFuncB type="linear" slope="{amount}"/></feComponentTransfer>',{amount:b})},a.filter.brightness.toString=function(){return this()},a.filter.contrast=function(b){return null==b&&(b=1),a.format('<feComponentTransfer><feFuncR type="linear" slope="{amount}" intercept="{amount2}"/><feFuncG type="linear" slope="{amount}" intercept="{amount2}"/><feFuncB type="linear" slope="{amount}" intercept="{amount2}"/></feComponentTransfer>',{amount:b,amount2:.5-b/2})},a.filter.contrast.toString=function(){return this()}}),d.plugin(function(a,b,c,d,e){var f=a._.box,g=a.is,h=/^[^a-z]*([tbmlrc])/i,i=function(){return"T"+this.dx+","+this.dy};b.prototype.getAlign=function(a,b){null==b&&g(a,"string")&&(b=a,a=null),a=a||this.paper;var c=a.getBBox?a.getBBox():f(a),d=this.getBBox(),e={};switch(b=b&&b.match(h),b=b?b[1].toLowerCase():"c"){case"t":e.dx=0,e.dy=c.y-d.y;break;case"b":e.dx=0,e.dy=c.y2-d.y2;break;case"m":e.dx=0,e.dy=c.cy-d.cy;break;case"l":e.dx=c.x-d.x,e.dy=0;break;case"r":e.dx=c.x2-d.x2,e.dy=0;break;default:e.dx=c.cx-d.cx,e.dy=0}return e.toString=i,e},b.prototype.align=function(a,b){return this.transform("..."+this.getAlign(a,b))}}),d.plugin(function(b,c,d,e){function f(a){a=a.split(/(?=#)/);var b=new String(a[5]);return b[50]=a[0],b[100]=a[1],b[200]=a[2],b[300]=a[3],b[400]=a[4],b[500]=a[5],b[600]=a[6],b[700]=a[7],b[800]=a[8],b[900]=a[9],a[10]&&(b.A100=a[10],b.A200=a[11],b.A400=a[12],b.A700=a[13]),b}var g="#ffebee#ffcdd2#ef9a9a#e57373#ef5350#f44336#e53935#d32f2f#c62828#b71c1c#ff8a80#ff5252#ff1744#d50000",h="#FCE4EC#F8BBD0#F48FB1#F06292#EC407A#E91E63#D81B60#C2185B#AD1457#880E4F#FF80AB#FF4081#F50057#C51162",i="#F3E5F5#E1BEE7#CE93D8#BA68C8#AB47BC#9C27B0#8E24AA#7B1FA2#6A1B9A#4A148C#EA80FC#E040FB#D500F9#AA00FF",j="#EDE7F6#D1C4E9#B39DDB#9575CD#7E57C2#673AB7#5E35B1#512DA8#4527A0#311B92#B388FF#7C4DFF#651FFF#6200EA",k="#E8EAF6#C5CAE9#9FA8DA#7986CB#5C6BC0#3F51B5#3949AB#303F9F#283593#1A237E#8C9EFF#536DFE#3D5AFE#304FFE",l="#E3F2FD#BBDEFB#90CAF9#64B5F6#64B5F6#2196F3#1E88E5#1976D2#1565C0#0D47A1#82B1FF#448AFF#2979FF#2962FF",m="#E1F5FE#B3E5FC#81D4FA#4FC3F7#29B6F6#03A9F4#039BE5#0288D1#0277BD#01579B#80D8FF#40C4FF#00B0FF#0091EA",n="#E0F7FA#B2EBF2#80DEEA#4DD0E1#26C6DA#00BCD4#00ACC1#0097A7#00838F#006064#84FFFF#18FFFF#00E5FF#00B8D4",o="#E0F2F1#B2DFDB#80CBC4#4DB6AC#26A69A#009688#00897B#00796B#00695C#004D40#A7FFEB#64FFDA#1DE9B6#00BFA5",p="#E8F5E9#C8E6C9#A5D6A7#81C784#66BB6A#4CAF50#43A047#388E3C#2E7D32#1B5E20#B9F6CA#69F0AE#00E676#00C853",q="#F1F8E9#DCEDC8#C5E1A5#AED581#9CCC65#8BC34A#7CB342#689F38#558B2F#33691E#CCFF90#B2FF59#76FF03#64DD17",r="#F9FBE7#F0F4C3#E6EE9C#DCE775#D4E157#CDDC39#C0CA33#AFB42B#9E9D24#827717#F4FF81#EEFF41#C6FF00#AEEA00",s="#FFFDE7#FFF9C4#FFF59D#FFF176#FFEE58#FFEB3B#FDD835#FBC02D#F9A825#F57F17#FFFF8D#FFFF00#FFEA00#FFD600",t="#FFF8E1#FFECB3#FFE082#FFD54F#FFCA28#FFC107#FFB300#FFA000#FF8F00#FF6F00#FFE57F#FFD740#FFC400#FFAB00",u="#FFF3E0#FFE0B2#FFCC80#FFB74D#FFA726#FF9800#FB8C00#F57C00#EF6C00#E65100#FFD180#FFAB40#FF9100#FF6D00",v="#FBE9E7#FFCCBC#FFAB91#FF8A65#FF7043#FF5722#F4511E#E64A19#D84315#BF360C#FF9E80#FF6E40#FF3D00#DD2C00",w="#EFEBE9#D7CCC8#BCAAA4#A1887F#8D6E63#795548#6D4C41#5D4037#4E342E#3E2723",x="#FAFAFA#F5F5F5#EEEEEE#E0E0E0#BDBDBD#9E9E9E#757575#616161#424242#212121",y="#ECEFF1#CFD8DC#B0BEC5#90A4AE#78909C#607D8B#546E7A#455A64#37474F#263238";b.mui={},b.flat={},b.mui.red=f(g),b.mui.pink=f(h),b.mui.purple=f(i),b.mui.deeppurple=f(j),b.mui.indigo=f(k),b.mui.blue=f(l),b.mui.lightblue=f(m),b.mui.cyan=f(n),b.mui.teal=f(o),b.mui.green=f(p),b.mui.lightgreen=f(q),b.mui.lime=f(r),b.mui.yellow=f(s),b.mui.amber=f(t),b.mui.orange=f(u),b.mui.deeporange=f(v),b.mui.brown=f(w),b.mui.grey=f(x),b.mui.bluegrey=f(y),b.flat.turquoise="#1abc9c",b.flat.greensea="#16a085",b.flat.sunflower="#f1c40f",b.flat.orange="#f39c12",b.flat.emerland="#2ecc71",b.flat.nephritis="#27ae60",b.flat.carrot="#e67e22",b.flat.pumpkin="#d35400",b.flat.peterriver="#3498db",b.flat.belizehole="#2980b9",b.flat.alizarin="#e74c3c",b.flat.pomegranate="#c0392b",b.flat.amethyst="#9b59b6",b.flat.wisteria="#8e44ad",b.flat.clouds="#ecf0f1",b.flat.silver="#bdc3c7",b.flat.wetasphalt="#34495e",b.flat.midnightblue="#2c3e50",b.flat.concrete="#95a5a6",b.flat.asbestos="#7f8c8d",b.importMUIColors=function(){for(var c in b.mui)b.mui.hasOwnProperty(c)&&(a[c]=b.mui[c])}}),d}); \ No newline at end of file
diff --git a/scraper/reports/stats/empty_papers.csv b/scraper/reports/stats/empty_papers.csv
new file mode 100644
index 00000000..d0af596c
--- /dev/null
+++ b/scraper/reports/stats/empty_papers.csv
@@ -0,0 +1,2012 @@
+61668aeeb60bd2ede1f9b0873f0e19f6f845a029,The role of image understanding in contour detection,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+61084a25ebe736e8f6d7a6e53b2c20d9723c4608,Face recognition for web-scale datasets,Computer Vision and Image Understanding,2014
+6128190a8c18cde6b94e0fae934d6fcc406ea0bb,STAIR Captions: Constructing a Large-Scale Japanese Image Caption Dataset,,2017
+619d215c2e80eedcc5a65c00fdcf5852f9cdedf8,Feature selection and classification of imbalanced datasets: Application to PET images of children with autistic spectrum disorders,NeuroImage,2011
+6187f91f1e53cf6f62afe30e01c7b1ed43505c9e,Localizing and Orienting Street Views Using Overhead Imagery,Unknown,2016
+611b1301b3bd13c518d0ec93d695e08b794766f7,When coding meets ranking: A joint framework based on local learning,CoRR,2014
+0d3d290e93ac76d5ef2d6c8bbced79fb3101ad36,Conditional Adversarial Synthesis of 3D Facial Action Units,CoRR,2018
+0deca8c53adcc13d8da72050d9a4b638da52264b,"A Comprehensive Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets",CoRR,2016
+0dc11a37cadda92886c56a6fb5191ded62099c28,We Are Family: Joint Pose Estimation of Multiple Persons,,2010
+0d467adaf936b112f570970c5210bdb3c626a717,"""FlowNet 2.0: Evolution of Optical Flow Estimation with Deep Networks""",,2016
+0d4fce2853a867e055a0062c2ef2f8accfc623f3,Snap2Play: A Mixed-Reality Game Based on Scene Identification,,2008
+0db8e6eb861ed9a70305c1839eaef34f2c85bbaf,Towards Large-Pose Face Frontalization in the Wild,2017 IEEE International Conference on Computer Vision (ICCV),2017
+0d902541c26f03ff95221e0e71d67c39e094a61d,Multivariate Time-Series Classification Using the Hidden-Unit Logistic Model,IEEE Transactions on Neural Networks and Learning Systems,2018
+0dbf4232fcbd52eb4599dc0760b18fcc1e9546e9,Early facial expression recognition using early RankBoost,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+0d555309828e4c31b79bbdea55066ac175720f86,Homeomorphic Manifold Analysis (HMA): Generalized separation of style and content on manifolds,Image Vision Comput.,2013
+0d087aaa6e2753099789cd9943495fbbd08437c0,Folded Recurrent Neural Networks for Future Video Prediction,CoRR,2017
+0d8415a56660d3969449e77095be46ef0254a448,Nonlinear Discriminant Analysis on Embedded Manifold,IEEE Transactions on Circuits and Systems for Video Technology,2007
+0d232056ee26b5da9b6b0658be12053a76484d2b,Hierarchical Spatial Sum-Product Networks for Action Recognition in Still Images,IEEE Trans. Circuits Syst. Video Techn.,2018
+0d028a924d8fce70d9fc42daecf77eb7caea67d8,Auxiliary Training Information Assisted Visual Recognition,IPSJ Trans. Computer Vision and Applications,2015
+0d735e7552af0d1dcd856a8740401916e54b7eee,EMPATH: a neural network that categorizes facial expressions.,Journal of cognitive neuroscience,2002
+0d06b3a4132d8a2effed115a89617e0a702c957a,Achieving stable subspace clustering by post-processing generic clustering results,2016 International Joint Conference on Neural Networks (IJCNN),2016
+0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e,Large Age-Gap face verification by feature injection in deep networks,Pattern Recognition Letters,2017
+95ed9e883b6321658b95a8db28d0704e90973a9d,Improving Image Annotation via Ranking-Oriented Neighbor Search and Learning-Based Keyword Propagation,,2013
+9594cc518db7890c3f20073525e9a335b2049e02,Semantic Visual Localization,CoRR,2017
+95704293fcaa01473e5c0b389d8afdcc0178d8c7,Accurate Human Detection by Appearance and Motion,IEICE Transactions,2010
+95f7dc555d6ee4deaf3e30d4ed4c8a806bccb424,Feature space locality constraint for kernel based nonlinear discriminant analysis,Pattern Recognition,2012
+952283f21ab30a1026b26911c160433ca147bf8c,Affective Facial Expression Processing via Simulation: A Probabilistic Model,CoRR,2014
+95c5908d856010aa9836a4f1a6cebf3828bcb9f6,3D Shape Reconstruction from 2D Landmarks: A Convex Formulation,CoRR,2014
+59cdafed4eeb8ff7c9bb2d4ecd0edeb8a361ffc1,"International Journal of Computer Application Issue 2, Volume 3 (june 2012) Issn: 2250-1797",,2012
+59a76bab968ac4cd740eb376ce9a26f6c1b103e4,People Re-Identification with Local Distance Comparison Using Learned Metric,IEICE Transactions,2014
+590628a9584e500f3e7f349ba7e2046c8c273fcf,Generating Natural Questions About an Image,CoRR,2016
+5996e84f482b7335cdb08ca218d450d37501e182,A Game-Theoretic Approach to Hypergraph Clustering,IEEE Transactions on Pattern Analysis and Machine Intelligence,2009
+5991f26b871c8fd8f675c11e44c445e3cebfbe7d,Field Studies with Multimedia Big Data: Opportunities and Challenges (Extended Ver,CoRR,2017
+92cc2cecbf065c4b55a3bceb0d9e475fcd70f8c7,Detection of social events in streams of social multimedia,International Journal of Multimedia Information Retrieval,2015
+9214e71ca44d87a9f43ba719f411d5307d78fc4a,Face recognition under varying illuminations using logarithmic fractal dimension-based complete eight local directional patterns,Neurocomputing,2016
+9209095ac450f14c603582ac01692b0f11c9c33b,Usability Test of Exercise Games Designed for Rehabilitation of Elderly Patients After Hip Replacement Surgery: Pilot Study,,2017
+92fada7564d572b72fd3be09ea3c39373df3e27c,Feature selection in the independent component subspace for face recognition,Pattern Recognition Letters,2004
+929218b75858e244c1ca99a6bec07ed9465737c7,Multi-voxel pattern analysis of fMRI data predicts clinical symptom severity,NeuroImage,2011
+0c06d6e0336f6cb6c80beb445ec5fec51b5c735d,Novel coarse-to-fine dual scale technique for tuberculosis cavity detection in chest radiographs,EURASIP J. Image and Video Processing,2013
+0c41e4e699bd4f64d744ad0bc820ab20da367499,Neighborhood Discriminant Projection for Face Recognition,18th International Conference on Pattern Recognition (ICPR'06),2006
+0c1f066a2246fd8d817318e3081f6fe3589f42ea,Cortical 3D Face Recognition Framework,,2011
+0cc2dd2900339836e6d42f2cb0e542bbe5627454,Learning for Real-World Image Applications,,2012
+0cccf576050f493c8b8fec9ee0238277c0cfd69a,Incremental Tube Construction for Human Action Detection,CoRR,2017
+0c4a139bb87c6743c7905b29a3cfec27a5130652,The FERET Verification Testing Protocol for Face Recognition Algorithms,,1998
+0c642068ff8e4a437f8c16656b08d1ce3c47d59b,"Development of high-speed and real-time vision platform, H3 vision",,2009
+0ca2304166acc90c3ffb5934f9a6343aeb80bd03,Supervised methods for detection and segmentation of tissues in clinical lumbar MRI,Computerized medical imaging and graphics : the official journal of the Computerized Medical Imaging Society,2014
+0cbe9732bc80761d7770e952275d5757b1acaea2,Transfer metric learning for action similarity using high-level semantics,Pattern Recognition Letters,2016
+0cb7a27177a782a091916bca3d8edb02f88577b5,Contour based object detection using part bundles,Computer vision and image understanding : CVIU,2010
+0cc22d1dab50d9bab9501008e9b359cd9e51872a,SuperParsing: Scalable Nonparametric Image Parsing with Superpixels,,2010
+0c54e9ac43d2d3bab1543c43ee137fc47b77276e,Spontaneous subtle expression detection and recognition based on facial strain,Sig. Proc.: Image Comm.,2016
+0c7608a158207052e0d615cd86d886a50d1f33da,Designing various multivariate analysis at will via generalized pairwise expression ∗,,2012
+0c166b1e5ae46c157301da4965a453295ec85658,Adaptive appearance model tracking for still-to-video face recognition,Pattern Recognition,2016
+0cd79b2193ef5086fe17f621a449ef3d67f5b3c4,Pose sentences: A new representation for action recognition using sequence of pose words,2008 19th International Conference on Pattern Recognition,2008
+0c60eebe10b56dbffe66bb3812793dd514865935,Exploiting Feature and Class Relationships in Video Categorization with Regularized Deep Neural Networks,IEEE Transactions on Pattern Analysis and Machine Intelligence,2018
+661bf7aa2de455f966f114d900f92a43f973ae49,"The New Modality: Emoji Challenges in Prediction, Anticipation, and Retrieval",CoRR,2018
+66af0fb424e4bc07cc28e08c7bf3a8b70c094d60,Facial feature detection using distance vector fields,Pattern Recognition,2009
+66759e18b1a1d53178fc79d8275e301e4d2f4ee8,Visualizing Support Vectors and topological data mapping for improved generalization capabilities,The 2010 International Joint Conference on Neural Networks (IJCNN),2010
+66886997988358847615375ba7d6e9eb0f1bb27f,Prototype-Based Discriminative Feature Learning for Kinship Verification,IEEE Transactions on Cybernetics,2015
+66837add89caffd9c91430820f49adb5d3f40930,"A New Face Recognition Method using PCA , LDA and Neural Network",Unknown,2012
+6623d8efb11bdca7348249c357902a5527a71e84,A new descriptor of gradients Self-Similarity for smile detection in unconstrained scenarios,Neurocomputing,2016
+66313d48a6352e731e40450f80a66c64aabae817,Exploring new representations and applications for motion analysis,,2009
+3ee7a8107a805370b296a53e355d111118e96b7c,Bayesian Learning of Sparse Gaussian Graphical Models,,2011
+3e1aa21ab4a5c242f54f23fbbeb5da29f9a965a6,FiLM: Visual Reasoning with a General Conditioning Layer,CoRR,2017
+3e1d799e5b7d5bd7e0d3b3bffb292878d27c5b7e,Adversarial-Playground: A Visualization Suite for Adversarial Sample Generation,CoRR,2017
+3ec05713a1eed6fa9b57fef718f369f68bbbe09f,Wildlife recognition in nature documentaries with weak supervision from subtitles and external data,Pattern Recognition Letters,2016
+3ee096aff93ab9a2374cdde06973db1996331d86,Artistic Style Transfer for Videos,,2016
+3ec5afaee732157a1039d25b953aec38bc151638,Distributed Submodular Maximization,Journal of Machine Learning Research,2016
+3ea3bbdc9aedd24fe0b5122e04b1d59e7e14135c,Mastering the Dungeon: Grounded Language Learning by Mechanical Turker Descent,CoRR,2017
+3eaa860f2735fce8b839237397455c13dfad1ed1,Dynamic belief fusion for object detection,2016 IEEE Winter Conference on Applications of Computer Vision (WACV),2016
+3e01f0bac3d5df0744caf8f42ae189e113d0758d,Structured learning for detection of social groups in crowd,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+3e7f54801c886ea2061650fd24fc481e39be152f,Towards Viewpoint Invariant 3D Human Pose Estimation,,2016
+3edc43e336be075dca77c7e173b555b6c14274d8,Travelmedia: An intelligent management system for media captured in travel,J. Visual Communication and Image Representation,2011
+3e734cc79496091e8b08df8d781d005651885c38,Object Recognition Using Deep Neural Networks: A Survey,CoRR,2014
+3ea27ba44a3e8a13148236807e569b909517ed89,Visual Route Recognition with a Handful of Bits,,2012
+50f7d3faeeaca41748df4b8fd1187712add72bb4,"Global consistency, local sparsity and pixel correlation: A unified framework for face hallucination",Pattern Recognition,2014
+500b92578e4deff98ce20e6017124e6d2053b451,Incremental Face Alignment in the Wild,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+500ddabbfa3bb1064b6250cdd3d5fe207f7aed67,Towards Improved Cartoon Face Detection and Recognition Systems,CoRR,2018
+50bcbdcd9a21b88c2c3e640894081d1e225a5b80,Human Computing and Machine Understanding of Human Behavior: A Survey,,2006
+50a4a7725ee35124cca4e72a52bdf71f5088faf2,Trinary-Projection Trees for Approximate Nearest Neighbor Search,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+50bada01c37daf2ed11350b4b0d2be28d9bafd0a,A MAP approach to landmarking,,2007
+506e76681d02dc3a3748e326fb57c4e4ab66778e,Predicting Functional Regions of Objects,,2013
+68a3f12382003bc714c51c85fb6d0557dcb15467,Learning the Visual Interpretation of Sentences,2013 IEEE International Conference on Computer Vision,2013
+68bf7fc874c2db44d0446cdbb1e05f19c2239282,Fast Kernel Matrix Computation for Big Data Clustering,,2015
+68cf263a17862e4dd3547f7ecc863b2dc53320d8,A comparative study on illumination preprocessing in face recognition,Pattern Recognition,2013
+6880f27e9fef716b0a67d0da37104e5f767bf5dc,Improving object localization using macrofeature layout selection,2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops),2011
+68bf34e383092eb827dd6a61e9b362fcba36a83a,"Multi-view, High-resolution Face Image Analysis",,2014
+68150d92e2ca3141ff3f4ab3d770e07f6ca13961,Using a Discrete Hidden Markov Model Kernel for lip-based biometric identification,Image Vision Comput.,2014
+68dd150767f947a596d347afdba5ef76c350f9c7,Multi-view fast object detection by using extended haar filters in uncontrolled environments,Pattern Recognition Letters,2012
+6801c8ea1fcb2f76799234a9a81c6199dd61b24c,Articulated Pose Estimation by a Graphical Model with Image Dependent Pairwise Relations,,2014
+68aea17b80e7e98245a8717cbce01bc229b0f175,Hand Pose Estimation through Weakly-Supervised Learning of a Rich Intermediate Representation,CoRR,2015
+68c586e81f593904221598f7ababb97570dbfe63,An improved collaborative representation based classification with regularized least square (CRC-RLS) method for robust face recognition,Neurocomputing,2016
+574751dbb53777101502419127ba8209562c4758,Gender classification from unaligned facial images using support subspaces,Inf. Sci.,2013
+57fe081950f21ca03b5b375ae3e84b399c015861,Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection,,2007
+57b8b28f8748d998951b5a863ff1bfd7ca4ae6a5,Symmetry-Aware Mesh Segmentation into Uniform Overlapping Patches (Supplementary Material),,2016
+57c929b6f30eec954dc5f17a52fbce290d8e3ca9,Performance characterization in computer vision: A guide to best practices,Computer Vision and Image Understanding,2008
+57101b29680208cfedf041d13198299e2d396314,Oxytocin differentially modulates eye gaze to naturalistic social signals of happiness and anger.,Psychoneuroendocrinology,2013
+57893403f543db75d1f4e7355283bdca11f3ab1b,A Dynamic Texture-Based Approach to Recognition of Facial Actions and Their Temporal Models,IEEE Transactions on Pattern Analysis and Machine Intelligence,2010
+57cf990bb3d64668614787708efa7cb06d548d06,Learning representations for object classification using multi-stage optimal component analysis,Neural networks : the official journal of the International Neural Network Society,2008
+5721216f2163d026e90d7cd9942aeb4bebc92334,Objective Micro-Facial Movement Detection Using FACS-Based Regions and Baseline Evaluation,CoRR,2016
+574f05ab2f135fad33ccbde85debdd12bb41bc87,Proposal-free Network for Instance-level Object Segmentation,CoRR,2015
+5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725,Merging Pose Estimates Across Space and Time,,2013
+578e755e669caee147964f9412c23943cd0f0789,"l2, 1 Regularized correntropy for robust feature selection",,2012
+57ce2a7078dbd8e98266270e1c3c78e71c7c9bd3,Disinhibited reactive attachment disorder symptoms impair social judgements from faces.,Psychiatry research,2014
+3b1260d78885e872cf2223f2c6f3d6f6ea254204,Face Tracking and Recognition at a Distance: A Coaxial & Concentric PTZ Camera System,,2011
+3b9eaf8d913f99adeb9192f68808efb7d2c0fac5,A Statistical Multiresolution Approach for Face Recognition Using Structural Hidden Markov Models,EURASIP J. Adv. Sig. Proc.,2008
+3b80bf5a69a1b0089192d73fa3ace2fbb52a4ad5,"""Magic Mirror in my Hand, what is the Sentiment in the Lens?"": an Action Unit based Approach for Mining Sentiments from Multimedia Contents",,2014
+3b6d7df0cc0aebb0736f3664da4ea8a03e559db9,Manifold-based Similarity Adaptation for Label Propagation,,2013
+3bd8f6577bd4dab492f9a0836bee1d99e461f028,Reduced GABAA receptors and benzodiazepine binding sites in the posterior cingulate cortex and fusiform gyrus in autism.,Brain research,2011
+3be7b7eb11714e6191dd301a696c734e8d07435f,Capturing the Visual Language of Social Media Exploiting Web Image Search for User Interest Profiling,,2015
+3be8a8ddb40399f1b0c02156440167152f8b0cba,"Autism : A Review of Biological Bases , Assessment , and Intervention",Unknown,2008
+6f07560cb2ad1d15746df6da0f992601c7bb8815,Determining the best suited semantic events for cognitive surveillance,Expert Syst. Appl.,2011
+6f2dc51d607f491dbe6338711c073620c85351ac,Capturing correlations of local features for image representation,Neurocomputing,2016
+6f75697a86d23d12a14be5466a41e5a7ffb79fad,Recognition and intensity estimation of facial expression using ensemble classifiers,2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS),2016
+6f0dd01fd7cac346a82618982dd81516387079de,Illumination compensation and normalization in eigenspace-based face recognition: A comparative study of different pre-processing approaches,Pattern Recognition Letters,2008
+03a6cc09984669e3e85c779363a93ae7c7b5f124,Dense 3D Face Correspondence,IEEE transactions on pattern analysis and machine intelligence,2017
+03d9ccce3e1b4d42d234dba1856a9e1b28977640,"Facial Affect ""In-the-Wild"": A Survey and a New Database",,2016
+033998b0ac8dd5b86693bd0d27cd3daa00459c17,tartle modulation in autism: Positive affective stimuli enhance startle response,,2009
+0391dca8171f52015eba4fb0e4be3be071950fc9,Computational Paradigm to Elucidate the Effects of Arts-Based Approaches and Interventions: Individual and Collective Emerging Behaviors in Artwork Construction,,2015
+036839afdfe7ae59bfcddd22d2c688b03bef3bee,Learning Hough regression models via bridge partial least squares for object detection,Neurocomputing,2015
+03e6b8f173012cc2e1410404f9c3bb97e0881c00,The effects of Pose on Facial Expression Recognition,,2009
+031497b0061f4536eb431545af69161d3a2b2d42,Image restoration using online photo collections,2009 IEEE 12th International Conference on Computer Vision,2009
+03104f9e0586e43611f648af1132064cadc5cc07,Subspace clustering using a symmetric low-rank representation,Knowl.-Based Syst.,2017
+0334cc0374d9ead3dc69db4816d08c917316c6c4,Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition,CoRR,2017
+031c721ce468a136b9bac87da7274229e7b967b0,Autism and the development of face processing.,Clinical neuroscience research,2006
+03e88bf3c5ddd44ebf0e580d4bd63072566613ad,How intelligent are convolutional neural networks?,CoRR,2017
+0390e80ffde8a6e6cd544c6b91b19ec747c73637,Fast Estimation of Approximate Matrix Ranks Using Spectral Densities,Neural computation,2017
+03b149f6ae3e366fb45ec09e0350b55cf5ac0459,Associative hierarchical CRFs for object class image segmentation,2009 IEEE 12th International Conference on Computer Vision,2009
+034c2ed71c31cb0d984d66c7ca753ef2cb6196ca,Feature learning via partial differential equation with applications to face recognition,Pattern Recognition,2017
+9b2c813a94cee031325b3e76e20db7072063549f,A scalable and flexible framework for smart video surveillance,Computer Vision and Image Understanding,2016
+9b7884c7522fbd8ea52234a2c1bc1454a81f7426,Accurate Iris Recognition At-a-distance and Under Less Constrained Environments,,2014
+9b000ccc04a2605f6aab867097ebf7001a52b459,PCANet: An energy perspective,CoRR,2016
+9bc01fa9400c231e41e6a72ec509d76ca797207c,Emotion Classification using Adaptive SVMs,Unknown,2012
+9b7a41215af8950ac8cc791aba4a90e5eb908836,A new perspective to null linear discriminant analysis method and its fast implementation using random matrix multiplication with scatter matrices,Pattern Recognition,2012
+9bec10a2dfa925259470843058aa9ea5fe7004fd,Application of BW-ELM model on traffic sign recognition,Neurocomputing,2014
+9b976f7bfa636d89510fe5ad7fb7a8057b86a57f,Feature encoding in band-limited distributed surveillance systems,"2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",2017
+9baf0509f63a3322d127ae4374aa5b0f9d5439b8,Two Birds with One Stone: Transforming and Generating Facial Images with Iterative GAN,,2017
+9b4532181847d2a28c059e3c07a45c4ee8452cc6,Statistical Learning for Multimedia Information Retrieval and Their Applications Approved by Supervising,,2008
+9e36963aba45f76b9ee5056a92f1cc10894f7a77,Multi-dimensional subspace based feature selection for face recognition,,2010
+9ea992f009492888c482d5f4006281eaa8b758e7,"X2Face: A Network for Controlling Face Generation Using Images, Audio, and Pose Codes",Unknown,2018
+9e5c2d85a1caed701b68ddf6f239f3ff941bb707,Facial Expression Recognition Based on Significant Face Components Using Steerable Pyramid Transform,,2013
+04bb3fa0824d255b01e9db4946ead9f856cc0b59,Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition,CoRR,2017
+04b194d6358957e5a48b3e33a0738de59cf7cccf,Detecting Violent Crowds using Temporal Analysis of GLCM Texture,CoRR,2016
+0416f5d1564d1f2a597acac04e81b02b2eff67d2,A High Performance CRF Model for Clothes Parsing,,2014
+04e7a4ff6635552183e6a07b4ba7b415986b758b,3D face recognition using topographic high-order derivatives,2013 IEEE International Conference on Image Processing,2013
+04a1f6d15815957562932f030ce7590521a27763,"Autism, oxytocin and interoception",,2014
+042e83c87d9cd16eb2309d08b71ad955fc8a65d1,Probing short-term face memory in developmental prosopagnosia.,Cortex; a journal devoted to the study of the nervous system and behavior,2015
+049584922a6bb15ceb25fa1f771f834b9befbcae,Beyond visual features: A weak semantic image representation using exemplar classifiers for classification,Neurocomputing,2013
+04470861408d14cc860f24e73d93b3bb476492d0,Face Recognition using Features Combination and a New Non-linear Kernel,,2011
+044ca9f2194aca3cef7fbc6b94eb9857819a17be,Synthetic on-line signature generation. Part I: Methodology and algorithms,Pattern Recognition,2012
+04072a097a2ac6a0ee9132bb61bc95bd68bb0621,Non-Linear Subspace Clustering with Learned Low-Rank Kernels,,2017
+042daa253452d0e7e4b5920f5d56b3c7d8d7507b,Tracking Randomly Moving Objects on Edge Box Proposals,CoRR,2015
+04c6810b810f0e06f68954efb937a28de506aa43,Inferring tracklets for multi-object tracking,CVPR 2011 WORKSHOPS,2011
+04250e037dce3a438d8f49a4400566457190f4e2,A direct LDA algorithm for high-dimensional data - with application to face recognition,Pattern Recognition,2001
+049ad2deb4ce7d1f98057694406879816c4ac049,Self-taught object localization with deep networks,2016 IEEE Winter Conference on Applications of Computer Vision (WACV),2016
+04fd269c96f11235fbbb985bb16dacedaa3098fd,Grouping-By-ID: Guarding Against Adversarial Domain Shifts,,2017
+04d3299b91413aef9b412deace3da92409cd6639,"The effects of age, sex, and hormones on emotional conflict-related brain response during adolescence.",Brain and cognition,2015
+6aeee62bd32ebc3c5349689f9e4283afe8d162b4,Vs-star: A visual interpretation system for visual surveillance,Pattern Recognition Letters,2010
+6ad107c08ac018bfc6ab31ec92c8a4b234f67d49,Supervision-by-Registration: An Unsupervised Approach to Improve the Precision of Facial Landmark Detectors,CoRR,2018
+6a184f111d26787703f05ce1507eef5705fdda83,Mu desynchronization during observation and execution of facial expressions in 30-month-old children,,2016
+6a4f694b028b3d8392cbb185a34e49a657245265,IAIR-CarPed: A psychophysically annotated dataset with fine-grained and layered semantic labels for object recognition,Pattern Recognition Letters,2012
+6a0279c043eadaa09b5b486593c0f2f4f68adeb0,Monocular human pose tracking using multi frame part dynamics,2009 Workshop on Motion and Video Computing (WMVC),2009
+6aa43f673cc42ed2fa351cbc188408b724cb8d50,Field Studies with Multimedia Big Data: Opportunities and Challenges (Extended Ver,CoRR,2017
+6a13e4a294115c439063617ec31d26f156e1142a,The light-from-above prior is intact in autistic children,,2017
+6a203565275610eac73461438f4cff1a35d5075c,Abnormal behavior detection using hybrid agents in crowded scenes,Pattern Recognition Letters,2014
+6aef8eeff5f532dcdad95043ba464720be664ab8,Computer vision for assistive technologies,Computer Vision and Image Understanding,2017
+6ab8f2081b1420a6214a6c127e5828c14979d414,Analysis of Local Appearance - based Face Recognition,,2006
+6a1b76f1ef876061ec479ab9bc13fcd517eb4188,Large Kernel Matters - Improve Semantic Segmentation by Global Convolutional Network,,2017
+6ab94ed33779d21d233c274cdc65c308955668a9,Bayesian Reconstruction of Natural Images from Human Brain Activity,Neuron,2009
+6a1beb34a2dfcdf36ae3c16811f1aef6e64abff2,Cardiac vagal tone predicts inhibited attention to fearful faces.,Emotion,2012
+32ffc4f2665f0061b556f60c4db0f3f5999ef004,Guided saccades modulate face- and body-sensitive activation in the occipitotemporal cortex during social perception.,Brain and cognition,2008
+32d555faaaa0a6f6f9dfc9263e4dba75a38c3193,Sparse discriminative multi-manifold embedding for one-sample face identification,Pattern Recognition,2016
+32510e7f88bc0767fbbc811397ba068dbc4cf549,Boosted Multiple Deformable Trees for Parsing Human Poses,,2007
+32bdafb45d38f7743dd5cd3ca4173beda7bdacc1,A two-layer Conditional Random Field for the classification of partially occluded objects,CoRR,2013
+32b8c9fd4e3f44c371960eb0074b42515f318ee7,Learning Human Pose Models from Synthesized Data for Robust RGB-D Action Recognition,CoRR,2017
+32d072ce790b62750fbe343d9dd8620939d84975,Improvements to Frank-Wolfe optimization for multi-detector multi-object tracking,CoRR,2017
+32a336e2a99eb113eeba7cbf622b463cd46d3138,A mid-level video representation based on binary descriptors: A case study for pornography detection,Neurocomputing,2016
+32ea1ed0155cd7d68eac5719693328620fd308f2,Contributions to Object Detection and Human Action Recognition,,2010
+3261a6ca620845566a61ebd0205dfb75d1c0d0f8,Learning visual biases from human imagination,,2015
+3291aff20c171927eed7896eba659ce599ccb666,F-SVM: Combination of Feature Transformation and SVM Learning via Convex Relaxation,CoRR,2015
+35b6a0d001de8d58d2bcf5dfe8922d59576a87e2,Faces of Pain: Automated Measurement of Spontaneous Facial Expressions of Genuine and Posed Pain,,2007
+351550aa56b81de2eef4b8379dc85722366635dd,Mobile Biometry (mobio) Face and Speaker Verification Evaluation,,2010
+352fa54953cfe0da7f1547bc6fdc43e0e53595cd,General multivariate linear modeling of surface shapes using SurfStat,NeuroImage,2010
+357963a46dfc150670061dbc23da6ba7d6da786e,Online Regression with Model Selection,,2018
+3564ee7ead6263a6a83107ec9610f72498163f0a,Next-active-object prediction from egocentric videos,J. Visual Communication and Image Representation,2017
+35f1bcff4552632419742bbb6e1927ef5e998eb4,Unsupervised Visual-Linguistic Reference Resolution in Instructional Videos,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+3554ed270f27f8c3d6f8d847f6c6b2c17a9668dd,Validating and Extending Semantic Knowledge Bases using Video Games with a Purpose,,2014
+35c973dba6e1225196566200cfafa150dd231fa8,A graphical model based solution to the facial feature point tracking problem,Image Vision Comput.,2011
+35e2fb4a72656cbeb2e9afa140fb01af03815202,Automatic surveillance in transportation hubs: No longer just about catching the bad guy,Expert Syst. Appl.,2015
+356ec17af375b63a015d590562381a62f352f7d5,Occlusion Geodesics for Online Multi-object Tracking,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+351158e4481e3197bd63acdafd73a5df8336143b,Measuring Gender Bias in News Images,,2015
+35e15c8aa2a3f017462a64b5ef940baf5993480f,Top-Push Video-Based Person Re-identification,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+3597dce344b088f913689abde927a59a0bedde48,Hand action detection from ego-centric depth sequences with error-correcting Hough transform,Pattern Recognition,2017
+69c5fbc040f3ad70f396ec468bf1d725bb13531d,Online parameter tuning for object tracking algorithms,Image Vision Comput.,2014
+697b0b9630213ca08a1ae1d459fabc13325bdcbb,Learning to Invert Local Binary Patterns,,2016
+69188668dd6fe2075212a085bb63b5651f06704d,Unsmoothed functional MRI of the human amygdala and bed nucleus of the stria terminalis during processing of emotional faces,NeuroImage,2018
+69da91e45d74db80e8eb436db31d384f5322c1b6,Multi-Scale Gabor Feature Based Eye Localization,,2007
+69de532d93ad8099f4d4902c4cad28db958adfea,Face Attention Network: An Effective Face Detector for the Occluded Faces,CoRR,2017
+695423ede04c7ccf05997c123fd8ab9b94c4a088,"Framework for Performance Evaluation of Face, Text, and Vehicle Detection and Tracking in Video: Data, Metrics, and Protocol",IEEE Transactions on Pattern Analysis and Machine Intelligence,2009
+697c0c583cb62bdc847106f9ec79384ce66d8679,Evolving Deep Neural Networks,CoRR,2017
+69e52ce4df3fc14d2321637ac4e9843dc2e68b0b,Analysis of Noteworthy Issues in Illumination Processing for Face Recognition,IEICE Transactions,2015
+696e4f16723db3d1cb7888acb9ab6924a40cebfb,Improving scene attribute recognition using web-scale object detectors,Computer Vision and Image Understanding,2015
+69a9da55bd20ce4b83e1680fbc6be2c976067631,"""Here's looking at you, kid"". Detecting people looking at each other in videos",,2011
+69026120a20aafa64bed9fd3beccf546758642f8,3D face reconstructions from photometric stereo using near infrared and visible light,Computer Vision and Image Understanding,2010
+6974449ce544dc208b8cc88b606b03d95c8fd368,Local Evidence Aggregation for Regression-Based Facial Point Detection,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+696ca58d93f6404fea0fc75c62d1d7b378f47628,Microsoft COCO Captions: Data Collection and Evaluation Server,CoRR,2015
+3c1f5580a66c9624c77f27ab8e4cf0d1b3d9d171,SkyFinder: attribute-based sky image search,ACM Trans. Graph.,2009
+3c03d95084ccbe7bf44b6d54151625c68f6e74d0,Contextual constraints based linear discriminant analysis,Pattern Recognition Letters,2011
+3c6b46b7867a387ef46cfa7eeb3f0cfda47af2d8,Neural Expectation Maximization,,2017
+3c68834951564fdc2ace1dcd5bf7d1317a22a176,Multi-target tracking by on-line learned discriminative appearance models,2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition,2010
+3cdb1364c3e66443e1c2182474d44b2fb01cd584,SegNet: A Deep Convolutional Encoder-Decoder Architecture for Scene Segmentation,,2016
+3c47022955c3274250630b042b53d3de2df8eeda,Discriminant analysis with tensor representation,2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05),2005
+3cd5b1d71c1d6a50fcc986589f2d0026c68d9803,On SIFTs and their scales,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+3ce2ecf3d6ace8d80303daf67345be6ec33b3a93,Facial expression classification: An approach based on the fusion of facial deformations using the transferable belief model,Int. J. Approx. Reasoning,2007
+3ce0cecc16b49385d8d45044bef44a66e08b08bc,Multi-algorithm fusion with template protection,"2009 IEEE 3rd International Conference on Biometrics: Theory, Applications, and Systems",2009
+3cb64217ca2127445270000141cfa2959c84d9e7,Can body expressions contribute to automatic depression analysis?,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+3cd5da596060819e2b156e8b3a28331ef633036b,Dynamic composite faces are processed holistically,Vision Research,2015
+3cb1f4c1650f7e55b78abba5a00b56a90b8e0567,sTrack: Secure Tracking in Community Surveillance,,2014
+3cc5f82147bd203e6d0a2cf9d2b2ac0ce31e58d6,Automated face recognition of rhesus macaques,Journal of Neuroscience Methods,2018
+56754b3d841b31dc5fe2cddff5a1242786411e63,Towards Instance Segmentation with Object Priority: Prominent Object Detection and Recognition,Unknown,2017
+56662bb8a29e7d0064a35fb38cbabaef4578f3e0,DeepCAMP: Deep Convolutional Action&Attribute Mid-Level Patterns,,2016
+564035f1b8f06e9bb061255f40e3139fa57ea879,Automatic Recognition of Facial Actions in Spontaneous Expressions,Journal of Multimedia,2006
+563c940054e4b456661762c1ab858e6f730c3159,A Multi-modal Graphical Model for Scene Analysis,2015 IEEE Winter Conference on Applications of Computer Vision,2015
+56af5fae5142a7777001d80a2df0aa644186c4e8,Combining Neural Networks and Fuzzy Systems for Human Behavior Understanding,2012 IEEE Ninth International Conference on Advanced Video and Signal-Based Surveillance,2012
+51b1d708bdb758de8ce217c51992f794c70abe95,People silhouette extraction from people detection bounding boxes in images,Pattern Recognition Letters,2017
+51f9c3017455ca7440f34c01bf23764d3cc07aee,3D shape-based face representation and feature extraction for face recognition,Image Vision Comput.,2006
+51528cdce7a92835657c0a616c0806594de7513b,Visual Comparison of Images Using Multiple Kernel Learning for Ranking,,2015
+516c59a82888f9b401db596ff067a0e4f9bf3db3,Comparing and combining depth and texture cues for face recognition,Image Vision Comput.,2005
+519b69f50689cf0c702c8432282d98054095cec4,Attentional bias towards and away from fearful faces is modulated by developmental amygdala damage,,2016
+51348e24d2199b06273e7b65ae5f3fc764a2efc7,Scalable $k$-NN graph construction,CoRR,2013
+51673c4e2f92c04245c94b2b77065239b6a4922b,Tracking Gaze and Visual Focus of Attention of People Involved in Social Interaction,CoRR,2017
+510ad7d606c928fba52425dc804fba33dd8ff265,Introduction to face recognition and evaluation of algorithm performance,Computational Statistics & Data Analysis,2013
+51d438a7d0841fa25367323f7b12d76c76d44caa,Mobile Devices as an Infrastructure: A Survey of Opportunistic Sensing Technology,JIP,2015
+51173e0f31f362f3ea59ae3e98c5cdf31b2a2ec5,Face feature extraction and recognition based on discriminant subclass-center manifold preserving projection,Pattern Recognition Letters,2012
+51dc127f29d1bb076d97f515dca4cc42dda3d25b,3D Corpus of Spontaneous Complex Mental States,,2011
+3db75962857a602cae65f60f202d311eb4627b41,Deep Embedding Network for Clustering,2014 22nd International Conference on Pattern Recognition,2014
+3dbae414346398645001197a1d1ce37f5953aeae,Minimal Support Vector Machine,CoRR,2018
+3daa086acd367dc971a2dc1382caba2031294233,"Holistic, Instance-level Human Parsing",CoRR,2017
+3dac3d47ed220f010549d78819b27035d1ec6844,Identifying Noncooperative Subjects at a Distance Using Face Images and Inferred Three-Dimensional Face Models,"IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans",2009
+3dc522a6576c3475e4a166377cbbf4ba389c041f,The iNaturalist Challenge 2017 Dataset,CoRR,2017
+3dda181be266950ba1280b61eb63ac11777029f9,When Celebrities Endorse Politicians: Analyzing the Behavior of Celebrity Followers in the 2016 U.S. Presidential Election,CoRR,2017
+3dc2bbbd0c17643a8cd08ddf2ba385af2fc4b405,Online Semantic Activity Forecasting with DARKO,CoRR,2016
+3d6ee995bc2f3e0f217c053368df659a5d14d5b5,Learning a Two-Dimensional Fuzzy Discriminant Locality Preserving Subspace for Visual Recognition,IEICE Transactions,2014
+3dd906bc0947e56d2b7bf9530b11351bbdff2358,"The THUMOS challenge on action recognition for videos ""in the wild""",Computer Vision and Image Understanding,2017
+3d6943f1573f992d6897489b73ec46df983d776c,Unifying Low-Rank Models for Visual Learning,,2015
+3d1c9151929aece2c0cec96aa77f7d6ad30afbc9,Cross-architecture prediction based scheduling for energy efficient execution on single-ISA heterogeneous chip-multiprocessors,Microprocessors and Microsystems - Embedded Hardware Design,2015
+58c3aaf6157ac326e81f31ab5712072a506207fa,Color space normalization: Enhancing the discriminating power of color spaces for face recognition,Pattern Recognition,2010
+58a11053cb0d1322900273a450e4adf371252cd5,Differential modulation of neural activity throughout the distributed neural system for face perception in patients with Social Phobia and healthy subjects.,Brain research bulletin,2008
+5859774103306113707db02fe2dd3ac9f91f1b9e,"Generalization to Novel Views: Universal, Class-based, and Model-based Processing",International Journal of Computer Vision,1998
+5850aab97e1709b45ac26bb7d205e2accc798a87,Multimodal learning for facial expression recognition,Pattern Recognition,2015
+582c87ef9e98c24694c83eb03853eb96a4d84809,An evaluation of descriptors for large-scale image retrieval from sketched feature lines,Computers & Graphics,2010
+588041c603e5ce1cc8d3cfeae702a3439768ae0c,Face recognition on partially occluded images using compressed sensing,Pattern Recognition Letters,2014
+58cb1414095f5eb6a8c6843326a6653403a0ee17,Face recognition using multiple facial features,Pattern Recognition Letters,2007
+677251fae7ccc62bb776374daee146cc2b7f0f4b,DeepCoder: Semi-parametric Variational Autoencoders for Facial Action Unit Intensity Estimation,CoRR,2017
+677477e6d2ba5b99633aee3d60e77026fb0b9306,Multi-View Dynamic Facial Action Unit Detection,CoRR,2017
+67fdcbc07358605a8fd8eadf1200329af3c25749,Pigeonring: A Principle for Faster Thresholded Similarity Search,,2018
+679b72d23a9cfca8a7fe14f1d488363f2139265f,A New Approach to Face Recognition Using Dual Dimension Reduction,Unknown,2006
+673a7fdf36bb2ab2beca5678bd29eebf6eba0582,A heuristic model for optimizing fuzzy knowledge base in a pattern recognition system,Unknown,2012
+67a50752358d5d287c2b55e7a45cc39be47bf7d0,Correction: Low-Rank and Eigenface Based Sparse Representation for Face Recognition,,2015
+67af3aed0deb70eb0fcc089c47f15adfb8f637ee,Skew-sensitive boolean combination for adaptive ensembles - An application to face recognition in video surveillance,Information Fusion,2014
+67dca5503eb4068c6ed5be34b7488a4aad6686a2,Development of an Efficient Face Recognition System Based on Linear and Nonlinear Algorithms,,2017
+6709e3b3b860ddda5f79b30a0bb6080c6b747816,Emergence of Language with Multi-agent Games: Learning to Communicate with Sequences of Symbols,,2017
+67a496908ff624d0e8d8ac2412231c53f1424d59,Motion perception induced by dynamic grouping: A probe for the compositional structure of objects,Vision Research,2012
+67aa8c2e7fd5b079d8940ab4c5a8ab4013e45205,Tracking multiple interacting targets in a camera network,Computer Vision and Image Understanding,2015
+0bc6c3ee31d35eecf505bc8eabb98d553b351ba2,Robust-to-illumination Face Localisation Using Active Shape Models and Local Binary Patterns,,2006
+0b783e750da34c61ea404be8bc40788fd66c867d,Sliced Wasserstein Generative Models,,2017
+0b64351566cc0145ef9c963edcafe8229fcb1fd5,Robust Distance Metric Learning with Auxiliary Knowledge,,2009
+0b07f20c2037a6ca5fcc1dd022092fd5c57dd647,Anticipating the future by watching unlabeled video,CoRR,2015
+0ba64f4157d80720883a96a73e8d6a5f5b9f1d9b,Convolutional Point-set Representation: A Convolutional Bridge Between a Densely Annotated Image and 3D Face Alignment,,2018
+0b6406bb39bd18814ba5445d815b5e49757cfa03,Generating Instance Segmentation Annotation by Geometry-guided GAN,CoRR,2018
+0b5bd3ce90bf732801642b9f55a781e7de7fdde0,Face recognition using Histograms of Oriented Gradients,Pattern Recognition Letters,2011
+0baed6e8b8d5456980f2c9f64b6f566872c778be,FusionSeg: Learning to combine motion and appearance for fully automatic segmention of generic objects in videos,CoRR,2017
+0ba449e312894bca0d16348f3aef41ca01872383,A Unified Framework for Stochastic Matrix Factorization via Variance Reduction,CoRR,2017
+0b6c912b0c6beef4aea8cd7d0a265483caedb7c9,Monocular Semantic Occupancy Grid Mapping with Convolutional Variational Auto-Encoders,,2018
+0b37f9fc4fee278375c44d03d23bbea5d026dd2f,Active Shape Model with random forest for facial features detection,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+0bc94d0c1e75d90be373c09941899a9810080924,Supervised Label Transfer for Semantic Segmentation of Street Scenes,,2010
+0b39f2e02c0e8102092f980615449a5c6c3087e9,Performance Comparison and Evaluation of AdaBoost and SoftBoost Algorithms on Generic Object Recognition,,2009
+0bdff80ffb4015fa12951f14c9d7673dd915fc81,Generalized Hadamard-Product Fusion Operators for Visual Question Answering,,2018
+0ba99a709cd34654ac296418a4f41a9543928149,Image Clustering Using Local Discriminant Models and Global Integration,IEEE Transactions on Image Processing,2010
+0bdadea798eaf39995a2c3ee4e772f579f4dff43,Fast Dictionary Learning with a Smoothed Wasserstein Loss,,2016
+0b3f354e6796ef7416bf6dde9e0779b2fcfabed2,Color Face Recognition using Quaternionic Gabor Filters,,2005
+9361b784e73e9238d5cefbea5ac40d35d1e3103f,Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint),,2011
+93d80d544b5e5e5f84605b29f3fdb9b502f2e99b,Localization and Object Recognition for Mobile Robots,,2010
+938d363a87fa4020fe1e526c439f6f52e66c33c9,Formulating Face Verification With Semidefinite Programming,IEEE Transactions on Image Processing,2007
+932b157ea4e554af580124b5575097d47fb6a707,Video-based Face Recognition : A Survey,Unknown,2010
+9329523dc0bd4e2896d5f63cf2440f21b7a16f16,"Do They All Look the Same? Deciphering Chinese, Japanese and Koreans by Fine-Grained Deep Learning",CoRR,2016
+93a93ee535980ee30e3a5e473a37d89ecb20c4a7,Feature-based affine-invariant detection and localization of faces,,2004
+94a7c97d1e3eb5dbfb20b180780451486597a9be,Facial attributes for active authentication on mobile devices,Image Vision Comput.,2017
+94aa8a3787385b13ee7c4fdd2b2b2a574ffcbd81,Real-time generic face tracking in the wild with CUDA,,2014
+94325522c9be8224970f810554611d6a73877c13,Comparator Networks,CoRR,2018
+94f74c6314ffd02db581e8e887b5fd81ce288dbf,A Light CNN for Deep Face Representation with Noisy Labels,,2015
+94347c0f73c31a9fdf04e9d581cfc47ee94e9ae3,Improved Facial-Feature Detection for AVSP via Unsupervised Clustering and Discriminant Analysis,EURASIP J. Adv. Sig. Proc.,2003
+94ac3008bf6be6be6b0f5140a0bea738d4c75579,Accelerating Convolutional Neural Networks for Continuous Mobile Vision via Cache Reuse,CoRR,2017
+0e87a1dd0a0a639b1bf45ad47008c02e05170729,A two-dimensional Neighborhood Preserving Projection for appearance-based face recognition,Pattern Recognition,2012
+0e4ad0e373eecb81ec3e171c42860589589ab1c5,Static vs. dynamic modeling of human nonverbal behavior from multiple cues and modalities,,2009
+0e0900b88c33b671be5dd2ded9885b6526d6b429,From captions to visual concepts and back,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015
+0ea53c86d24b1b80389eefaf0a84fb9b2108b795,Extracting Human Body based on Background Estimation in Modified HLS Color Space,,2009
+0ebc58bb5d517db0111f3565c4eb378d93dad908,Seeing Through Noise: Speaker Separation and Enhancement using Visually-derived Speech,CoRR,2017
+0eeca9b515768d11cd5f9c37dfd997b808213738,Detecting activities from body-worn accelerometers via instance-based algorithms,Pervasive and Mobile Computing,2010
+0e8760fc198a7e7c9f4193478c0e0700950a86cd,"Brute-Force Facial Landmark Analysis With A 140, 000-Way Classifier",CoRR,2018
+0e3840ea3227851aaf4633133dd3cbf9bbe89e5b,ChaLearn Looking at People: Events and Resources,CoRR,2017
+0e64ae81817eb259c7802da39018757bc98116ac,Innovation Pursuit: A New Approach to Subspace Clustering,IEEE Transactions on Signal Processing,2017
+0e5dad0fe99aed6978c6c6c95dc49c6dca601e6a,LATCH: Learned arrangements of three patch codes,2016 IEEE Winter Conference on Applications of Computer Vision (WACV),2016
+0e790522e68e44a5c99515e009049831b15cf29f,Reconstructing Storyline Graphs for Image Recommendation from Web Community Photos,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+0ef399b8bad6b3d4a908e2a9318f2ba51699b4f1,Parsing Clothes in Unrestricted Images,,2013
+0e181a1b4c63143466c9ac858d46491f1ad11092,A Track Creation and Deletion Framework for Long-Term Online Multi-Face Tracking,,2012
+6080f26675e44f692dd722b61905af71c5260af8,Descriptor transition tables for object retrieval using unconstrained cluttered video acquired using a consumer level handheld mobile device,2016 International Joint Conference on Neural Networks (IJCNN),2016
+60d765f2c0a1a674b68bee845f6c02741a49b44e,An efficient illumination normalization method for face recognition,Pattern Recognition Letters,2006
+606c5f3ed9befa7113bc28436a8a91f176934874,Power-performance modeling on asymmetric multi-cores,"2013 International Conference on Compilers, Architecture and Synthesis for Embedded Systems (CASES)",2013
+60093318820f49b5a105352a6b8512d1601af153,Automatic and Efficient Long Term Arm and Hand Tracking for Continuous Sign Language TV Broadcasts,,2012
+600c8fcef0480b7061574532861369c1c631de75,Unsupervised Deep Representations for Learning Audience Facial Behaviors,CoRR,2018
+6097ea6fd21a5f86a10a52e6e4dd5b78a436d5bf,Multi-Region bilinear convolutional neural networks for person re-identification,2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS),2017
+60643bdab1c6261576e6610ea64ea0c0b200a28d,Multi-manifold metric learning for face recognition based on image sets,J. Visual Communication and Image Representation,2014
+60ed2c1acfddd02a0c0361366fc1a913e68946f1,Towards measuring the visualness of a concept,,2012
+6009f5c357a8b972c5eaafd104f03fde185568eb,Efficient regression of general-activity human poses from depth images,2011 International Conference on Computer Vision,2011
+34f9b561885198d3eaf8de2b6441d0a8aaeb9efa,Fast randomized Singular Value Thresholding for Nuclear Norm Minimization,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015
+34a41ec648d082270697b9ee264f0baf4ffb5c8d,Integration of multi-feature fusion and dictionary learning for face recognition,Image Vision Comput.,2013
+34f2bf9e98fc234d2c29f751f59407deef4f4404,How precise is gaze following in humans?,Vision Research,2008
+346578304ff943b97b3efb1171ecd902cb4f6081,Generative Multi-Adversarial Networks,CoRR,2016
+34072c31c2c778df471c9f0c43ba6198dfd0db32,Arbitrary Category Classification of Websites Based on Image Content A © EYEWIRE,,2015
+3403cb92192dc6b2943d8dbfa8212cc65880159e,Automatically Building Appearance Models from Image Sequences using Salient Features,,1999
+347573e0b27a01748f8a6781dd84bb312aea5c53,Multi-Target Adaptive On-line Tracking based on WIHM,,2014
+34d207eb19a0f61194511951f2071aae36431d76,IsoMatch: Creating Informative Grid Layouts,Comput. Graph. Forum,2015
+34b7e826db49a16773e8747bc8dfa48e344e425d,Learning sign language by watching TV (using weakly aligned subtitles),2009 IEEE Conference on Computer Vision and Pattern Recognition,2009
+34786071f672b55fcdb24213a95f2ee52623ff23,MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving,CoRR,2016
+340716ba8c6ab315a4253cb3750c74aca54dc3aa,The nature of face representations in subcortical regions.,Neuropsychologia,2014
+34103d6e466b47ae820612e527db8cb46077cb13,Discriminative sparsity preserving embedding for face recognition,2013 IEEE International Conference on Image Processing,2013
+5a362e8f6eee03095fb3001b417fcddd80ea3d73,FRVT 2006: Quo Vadis face quality,Image Vision Comput.,2010
+5aafca76dbbbbaefd82f5f0265776afb5320dafe,Empirical analysis of cascade deformable models for multi-view face detection,Image Vision Comput.,2015
+5ab8d83870a6fa71f787f3fbfdd03786801a3496,Learned local Gabor patterns for face representation and recognition,Signal Processing,2009
+5a8f96f6906af8fbf73810b88c68b84a31555f60,Iterative Grassmannian optimization for robust image alignment,Image Vision Comput.,2014
+5ad07ae06ba8ae012367fd06205e948ff13cc7ab,Scale-Space Volume Descriptors for Automatic 3D Facial Feature Extraction,,2009
+5ab3cbdaf3b14352f47c3d2a91c9f2c247fe94a7,Action recognition in cluttered dynamic scenes using Pose-Specific Part Models,2011 International Conference on Computer Vision,2011
+5a6c021f80d82f3fae283865b259e398f9ed0f32,The neurobiology of emotion–cognition interactions: fundamental questions and strategies for future research,,2015
+5a603ab4b6353fc244361930c28723b3bc091f4b,Deep feature learning with relative distance comparison for person re-identification,Pattern Recognition,2015
+5a8a4b0ec264e0959f0c1effcb9de4a74cf6b148,Greedy Subspace Clustering,,2014
+5a1255d65e8309131638b3eb94aad5c52ab3629a,Improving Open Source Face Detection by Combining an Adapted Cascade Classification Pipeline and Active Learning,Unknown,2017
+5a021bb28e8c62a8c21fffa1ff35929ef2edce8d,Trajectory aligned features for first person action recognition,Pattern Recognition,2017
+5a029a0b0ae8ae7fc9043f0711b7c0d442bfd372,Autoencoder Feature Selector,CoRR,2017
+5a4a53339068eebd1544b9f430098f2f132f641b,Hierarchical Disentangled Representations,,2018
+5a383940c769660e53558d8f4bfcca7f5c730e75,Atypical neural networks for social orienting in autism spectrum disorders,NeuroImage,2011
+5a08b451b0397782d81edb5b614bb2a523c6be98,Learning Correspondence Structures for Person Re-Identification,IEEE Transactions on Image Processing,2017
+5a62f0b5d5afaec50318a6d9063920a6aca6e3f2,Gender Classification in Human Gait Using Support Vector Machine,,2005
+5f333a12dbf3671605bc3c715dcf08e37849e6e1,Fast and robust face recognition via coding residual map learning based adaptive masking,Pattern Recognition,2014
+5f64a2a9b6b3d410dd60dc2af4a58a428c5d85f9,Scalable Object Detection for Stylized Objects,CoRR,2017
+5fbad7c39509a3edb4f8a946e2676562e88264bc,Gait recognition without subject cooperation,Pattern Recognition Letters,2010
+5fa0e6da81acece7026ac1bc6dcdbd8b204a5f0a,On applying linear discriminant analysis for multi-labeled problems,Pattern Recognition Letters,2008
+5f6fafa788bd1b25c3c462c4013fd8fc0049be74,Autoencoder Inspired Unsupervised Feature Selection,"2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",2018
+5fa932be4d30cad13ea3f3e863572372b915bec8,Orthogonal vs. uncorrelated least squares discriminant analysis for feature extraction,Pattern Recognition Letters,2012
+5f1dcaff475ef18a2ecec0e114a9849a0a8002b9,Parallelization of a color-entropy preprocessed Chan-Vese model for face contour detection on multi-core CPU and GPU,Parallel Computing,2015
+33c050241a203601b1e64ad45415e24c455ba7d0,Beyond χ2 Difference: Learning Optimal Metric for Boundary Detection,IEEE Signal Process. Lett.,2015
+338d4ea0813c668d6e43eb025ea580fbd76bec8a,Fearful faces heighten the cortical representation of contextual threat,NeuroImage,2014
+33264f4cfc7fa52ff2a6e9f739070e8501ce07bc,Video Captioning via Hierarchical Reinforcement Learning,CoRR,2017
+33aa980544a9d627f305540059828597354b076c,Face Recognition Using Eigen face Coefficients and Principal Component Analysis,Unknown,2010
+33554ff9d1d3b32f67020598320d3d761d7ec81f,Label Distribution Learning Forests,,2017
+33430277086192476fa6c32eae88688b0cb21228,The developmental neurobiology of autism spectrum disorder.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2006
+339b6e6b358b40db5807ae9701556fed9b7961c4,Built-in Foreground/Background Prior for Weakly-Supervised Semantic Segmentation: Supplementary Material,,2016
+33403e9b4bbd913ae9adafc6751b52debbd45b0e,Pose Invariant Affect Analysis using Thin - Plate Splines,,
+05db8e3a342f8f239203c24d496e809a65ca7f73,Learning Diverse Image Colorization,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+0562fc7eca23d47096472a1d42f5d4d086e21871,On the Integration of Optical Flow and Action Recognition,CoRR,2017
+0509c442550571907258f07aad9da9d00b1e468b,On multi-task learning for facial action unit detection,2013 28th International Conference on Image and Vision Computing New Zealand (IVCNZ 2013),2013
+056294ff40584cdce81702b948f88cebd731a93e,Unsupervised Semantic Parsing of Video Collections,2015 IEEE International Conference on Computer Vision (ICCV),2015
+05b435174d24b14b17df4ce5af79dc6086a2b16f,Deep sketch feature for cross-domain image retrieval,Neurocomputing,2016
+05e6ef04116fb096e590d73d6938e4fed6426263,A new benchmark for stereo-based pedestrian detection,2011 IEEE Intelligent Vehicles Symposium (IV),2011
+057c8f04bc5e9f528589eeb3806734e38b1ecc83,Learning to hash logistic regression for fast 3D scan point classification,2010 IEEE/RSJ International Conference on Intelligent Robots and Systems,2010
+05f4d907ee2102d4c63a3dc337db7244c570d067,Face recognition from a single image per person: A survey,Pattern Recognition,2006
+05d0c5f579314e12c35fd35ed0858255d8c48887,Multi-scale volumes for deep object detection and localization,Pattern Recognition,2017
+0523e14247d74c4505cd5e32e1f0495f291ec432,Factoring Variations in Natural Images with Deep Gaussian Mixture Models,,2014
+05f988ce9a92436f194a8e06ba21b6a62a3aeef8,On the Robustness of Semantic Segmentation Models to Adversarial Attacks,CoRR,2017
+05caf67982ce3416a28550f291211bd1459f9aeb,Face Recognition Using Holistic Features and Linear Discriminant Analysis Simplification,,2012
+056d1637fac0510146431a03d81de1cbf1147d65,UHDB11 Database for 3D-2D Face Recognition,,2013
+05e96d76ed4a044d8e54ef44dac004f796572f1a,Three-Dimensional Face Recognition,International Journal of Computer Vision,2005
+9d1e20c3d2d0d67d3c2f06dbfc336170c772f2fd,Attribute-Graph: A Graph Based Approach to Image Ranking,2015 IEEE International Conference on Computer Vision (ICCV),2015
+9cfb3a68fb10a59ec2a6de1b24799bf9154a8fd1,Semi-supervised learning in Spectral Dimensionality Reduction,,2016
+9c1fa04553e96ccc59b9c0026e6e25fb2c7dae77,Transductive Zero-Shot Learning with a Self-training dictionary approach,CoRR,2017
+9c03db9ad53be4862625256a24f56cc7b0a79c23,Hyperdrive: A Systolically Scalable Binary-Weight CNN Inference Engine for mW IoT End-Nodes,,2018
+9c571732af31360b79cee46b1809d98a42423dc1,"Autism spectrum disorder, but not amygdala lesions, impairs social attention in visual search.",Neuropsychologia,2014
+9cd8a2d07f07d888fcf50aa0735d0831edcf5e46,Bag of contour fragments for robust shape classification,Pattern Recognition,2014
+027bdb0f502cc61b73be32427a8dd56e213cc2b8,Deep Neural Networks Under Stress,2016 IEEE International Conference on Image Processing (ICIP),2016
+02b25bec70f500269e547014635b42f556d8e173,Effects of intranasal oxytocin on the neural basis of face processing in autism spectrum disorder.,Biological psychiatry,2013
+029b53f32079063047097fa59cfc788b2b550c4b,Continuous Conditional Neural Fields for Structured Regression,,2014
+026168fd2bcfbcd02012e379f35b7cfdc4c95ee1,Novel Fisher discriminant classifiers,Pattern Recognition,2012
+02bd665196bd50c4ecf05d6852a4b9ba027cd9d0,Feature Selection with Annealing for Computer Vision and Big Data Learning,IEEE Transactions on Pattern Analysis and Machine Intelligence,2016
+0217fb2a54a4f324ddf82babc6ec6692a3f6194f,InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets,,2016
+02c993d361dddba9737d79e7251feca026288c9c,Automatic player detection and recognition in images using AdaBoost,Proceedings of 2012 9th International Bhurban Conference on Applied Sciences & Technology (IBCAST),2012
+02c7740af5540f23a2da23d1769e64a8042ec62e,Big Data : The Management,,2012
+a4b09fe27dc38a7646877440d76947cdcc895d4c,Learning occlusion with likelihoods for visual tracking,2011 International Conference on Computer Vision,2011
+a46283e90bcdc0ee35c680411942c90df130f448,Moment-based local binary patterns: A novel descriptor for invariant pattern recognition applications,Neurocomputing,2013
+a48a8e337a155d01a9652f3ea36675710e600222,Spatio-temporal crowd density model in a human detection and tracking framework,Sig. Proc.: Image Comm.,2015
+a4e47b6cbadfe5085c0a83f39513bda0ed3e9a92,Precise Eye Localization through a General-to-specific Model Definition,,2006
+a4cd3fc63ddc8468d3f684f32cb0578e41fed226,Generative Adversarial Style Transfer Networks for Face Aging,Unknown,2018
+a4cc626da29ac48f9b4ed6ceb63081f6a4b304a2,KCRC-LCD: Discriminative kernel collaborative representation with locality constrained dictionary for visual categorization,Pattern Recognition,2015
+a4874a54a2afd74d2cbed50f2276c91c49f12ccb,Feature and label relation modeling for multiple-facial action unit classification and intensity estimation,Pattern Recognition,2017
+a3b183d041f8f3e90a2cf904eaab544070216367,Gabor Ordinal Measures for Face Recognition,IEEE Transactions on Information Forensics and Security,2014
+a3dd6a08c4132358877e3b3c3eb87c3f3f4adda1,Partial least squares-based human upper body orientation estimation with combined detection and tracking,Image Vision Comput.,2014
+a33f20773b46283ea72412f9b4473a8f8ad751ae,ISTANBUL TECHNICAL UNIVERSITY F INFORMATICS INSTITUTE ROBUST FACE RECOGNITION ON NONLINEAR MANIFOLDS Ph.D. THESIS,,2012
+a30efa3271161dc7409530fe0ea76bad62a6f191,Mirror mirror on the wall... An intelligent multisensory mirror for well-being self-assessment,,2015
+a3239de6f4c300b135d5c417890ab68be8e90801,Vulnerabilities and Attack Protection in Security Systems Based on Biometric Recognition –tesis Doctoral– Vulnerabilidades Y Protección Frente a Ataques En Sistemas De Seguridad Basados En,,2010
+a3f67dbb0d72b236ff7c11b9d3611478d04b902e,Crowd behavior analysis: A review where physics meets biology,Neurocomputing,2016
+b5e4b4cf5178b06e0bb5fd016b8ff5f609eddc8e,Multi-Scale Gabor Feature Based Eye Localization,Unknown,2007
+b5872d6952a0a073491e845c2071c5b06d92ba29,Autism-Associated Promoter Variant in MET Impacts Functional and Structural Brain Networks,Neuron,2012
+b5a778e8ce38d1131b9304652c09b2645b41e0c1,Image search - from thousands to billions in 20 years,TOMCCAP,2013
+b562def2624f59f7d3824e43ecffc990ad780898,Autoencoder Inspired Unsupervised Feature Selection,"2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",2018
+b59f441234d2d8f1765a20715e227376c7251cd7,Deep Continuous Clustering,CoRR,2018
+b53289f3f3b17dad91fa4fd25d09fdbc14f8c8cc,Online multi-object tracking via robust collaborative model and sample selection,Computer Vision and Image Understanding,2017
+b56882e8be1529717df8a5229edbad1d14f6a61a,Corrupted and occluded face recognition via cooperative sparse representation,Pattern Recognition,2016
+b233634f8944080bce276b6d8962810699494c93,Mage: Online Interference-Aware Scheduling in Multi-Scale Heterogeneous Systems,CoRR,2018
+b2a6518b47903f5e4318f31c099bbbe8f2425ab9,Detecting bipedal motion from correlated probabilistic trajectories,Pattern Recognition Letters,2013
+b27b507fa7b68897adab421d942395e98519cb21,Transport-Based Pattern Theory: A Signal Transformation Approach,CoRR,2018
+d9208c964bed4cc0055e313353c73fd00a60c412,Multi-class Fukunaga Koontz discriminant analysis for enhanced face recognition,Pattern Recognition,2016
+d979dbc55f73304a5d839079c070062e0b3ddbc5,Deep Learning Markov Random Field for Semantic Segmentation,IEEE transactions on pattern analysis and machine intelligence,2017
+d9739d1b4478b0bf379fe755b3ce5abd8c668f89,Unsupervised approach for the accurate localization of the pupils in near-frontal facial images,J. Electronic Imaging,2013
+d9a5640b66ddbb4f88a8ee4248116ff9a8719129,Non-Parametric Hand Pose Estimation with Object Context,,2013
+d9a1dd762383213741de4c1c1fd9fccf44e6480d,Reconstruction and analysis of multi-pose face images based on nonlinear dimensionality reduction,Pattern Recognition,2004
+d90e292c4bc2fdbeec5e494c92194e4d3420d760,Pairwise clustering based on the mutual-information criterion,Neurocomputing,2016
+aca273a9350b10b6e2ef84f0e3a327255207d0f5,On soft biometrics,Pattern Recognition Letters,2015
+ac79059866be081a8492c642291be159220979a5,Predicting psychological attributions from face photographs with a deep neural network,CoRR,2015
+ac5ab8f71edde6d1a2129da12d051ed03a8446a1,Comparator Networks,Unknown,2018
+ac0d3f6ed5c42b7fc6d7c9e1a9bb80392742ad5e,ViCom: Benchmark and Methods for Video Comprehension,CoRR,2016
+ac820d67b313c38b9add05abef8891426edd5afb,Fuzzy human motion analysis: A review,Pattern Recognition,2015
+ac6b280f2a43516fbaf92073304002f9f4da0188,Eyes closeness detection from still images with multi-scale histograms of principal oriented gradients,Pattern Recognition,2014
+acd5b6e9bb6a87a028aeb33c805e352ad98ade02,Low-rank representation based discriminative projection for robust feature extraction,Neurocomputing,2013
+ad7a3b600c67a604d51cfdc721069af31469c397,Recent advances in face biometrics with Gabor wavelets: A review,Pattern Recognition Letters,2010
+add020816e4eea7ec547d0c3233b15abf3413fd2,Content Extraction from Marketing Flyers,,2015
+ad08c6b0b42db6d6ba30387d558f5e427e39b7dc,Attribute CNNs for word spotting in handwritten documents,International Journal on Document Analysis and Recognition (IJDAR),2018
+ad8e7c9bf20a0507acb90b17574da631b3d8b7cd,Temporal dynamic appearance modeling for online multi-person tracking,Computer Vision and Image Understanding,2016
+ad6745dd793073f81abd1f3246ba4102046da022,A Coupled Hidden Markov Random Field model for simultaneous face clustering and tracking in videos,Pattern Recognition,2017
+ad5965e00d9511528c91adea0b356ad1e7081f0e,A weighted probabilistic approach to face recognition from multiple images and video sequences,Image Vision Comput.,2006
+adee5054f386c6eb8ca83417c9b9ce4571aa2633,2.5D face recognition using Patch Geodesic Moments,Pattern Recognition,2012
+bbf49e0dc67663b2d116eebdae93abb0f276ac8a,Face hallucination based on morphological component analysis,Signal Processing,2013
+bb557f4af797cae9205d5c159f1e2fdfe2d8b096,A distributed framework for trimmed Kernel k-Means clustering,Pattern Recognition,2015
+bbe91ef3ec4303d77a3847aa18fe5d9ef2739566,Factors that influence algorithm performance in the Face Recognition Grand Challenge,Computer Vision and Image Understanding,2009
+bbe1332b4d83986542f5db359aee1fd9b9ba9967,Convolutional neural network on three orthogonal planes for dynamic texture classification,Pattern Recognition,2018
+bb9b45f4b97935a95272c409d212589bc2a9a0cc,Efficient Multi-cue Scene Segmentation,,2013
+bbaebcc0a2d65dea32fe1cf2a2aa12c65bb1b3da,Track based relevance feedback for tracing persons in surveillance videos,Computer Vision and Image Understanding,2013
+bb1a6080072bd54eaa5afa1d29cc02525946d7bb,Functional brain correlates of social and nonsocial processes in autism spectrum disorders: an activation likelihood estimation meta-analysis.,Biological psychiatry,2009
+bbf01aa347982592b3e4c9e4f433e05d30e71305,Markov network-based multiple classifier for face image retrieval,2013 IEEE International Conference on Image Processing,2013
+bbf1396eb826b3826c5a800975047beabde2f0de,Illumination insensitive recognition using eigenspaces,Computer Vision and Image Understanding,2004
+bb69f750ccec9624f6dabd334251def2bbddf166,Automatic 3D reconstruction for face recognition,"Sixth IEEE International Conference on Automatic Face and Gesture Recognition, 2004. Proceedings.",2004
+bb750b4c485bc90a47d4b2f723be4e4b74229f7a,Robust Computer Vision: Theory and Applications,,2003
+d78077a7aa8a302d4a6a09fb9737ab489ae169a6,Robust face recognition with structural binary gradient patterns,Pattern Recognition,2017
+d7312149a6b773d1d97c0c2b847609c07b5255ec,An Experimentation Engine for Data-Driven Fashion Systems,,2017
+d71cefc30269feaa1de3e330b472a7dc66ec95d3,Person Re-Identification as Image Retrieval Using Bag of Ensemble Colors,IEICE Transactions,2015
+d75640108db01f7e0706780e6356a0c82c7eaf29,Multiple-Shot People Re-Identification by Patch-Wise Learning,IEICE Transactions,2015
+d78373de773c2271a10b89466fe1858c3cab677f,Pain intensity estimation by a self-taught selection of histograms of topographical features,Image Vision Comput.,2016
+d71a2baecdf28fd946458a34bfbc034681a82694,Integrate the original face image and its mirror image for face recognition,Neurocomputing,2014
+d7f5f4ae54e8020e8c01f5ea5de22a370d3e4b21,M-VIVIE: A multi-thread video indexer via identity extraction,Pattern Recognition Letters,2012
+d0eb3fd1b1750242f3bb39ce9ac27fc8cc7c5af0,Minimalistic CNN-based ensemble model for gender prediction from face images,Pattern Recognition Letters,2016
+d03baf17dff5177d07d94f05f5791779adf3cd5f,Real time face and mouth recognition using radial basis function neural networks,Expert Syst. Appl.,2009
+bef503cdfe38e7940141f70524ee8df4afd4f954,Improving class separability using extended pixel planes: a comparative study,Machine Vision and Applications,2011
+b3b51d80bae381e7a143c6cb532873a273b38e51,Information fusion in content based image retrieval: A comprehensive overview,Information Fusion,2017
+b3e51092fa8b127bef0e46c2e54f24bdaedf30c3,Automatic Face Recognition System for Hidden Markov Model Techniques,,2012
+b349714e9eb089c3a756c03533525cb3d5a84ff8,Face recognition based on 2D images under illumination and pose variations,Pattern Recognition Letters,2011
+b3effb96c09eabada94f9105241fe66658fe77b1,Synchronizing visual and language processing: an effect of object name length on eye movements.,Psychological science,2000
+b3cb117f2424209d5997d5745772dfadd02dc80d,Semantic Summarization of Egocentric Photo Stream Events,CoRR,2017
+b3fa62a7028578be8d1f8eb0877c762a4d6639c1,Efficient Face Recognition System Using Random Forests,,
+b3e6e4bed1b5f73aa114d19dcab214661a1d0cd6,Cooperative passers-by tracking with a mobile robot and external cameras,Computer Vision and Image Understanding,2013
+b372432ccd4c9cf169b1eee2adadae074eb3a3fd,Hallucinating face by position-patch,Pattern Recognition,2010
+b317d03d82c22f52dbd79a3a19b1384aa53a3925,Impact of Anodal and Cathodal Transcranial Direct Current Stimulation over the Left Dorsolateral Prefrontal Cortex during Attention Bias Modification: An Eye-Tracking Study,,2015
+b36833aacc1ae72f9ccb9eee9d9623df19802fe0,An on-line learning method for face association in personal photo collection,Image Vision Comput.,2012
+b3afa234996f44852317af382b98f5f557cab25a,A Closer Look at Spatiotemporal Convolutions for Action Recognition,CoRR,2017
+b36b1485cc07df374cf2b01e4797a98da887d641,The not face: A grammaticalization of facial expressions of emotion.,Cognition,2016
+dfabe7ef245ca68185f4fcc96a08602ee1afb3f7,Group-aware deep feature learning for facial age estimation,Pattern Recognition,2017
+df3d2f514d41c0c37293d88d4a594e5cfc6c3bea,Happy mouth and sad eyes: scanning emotional facial expressions.,Emotion,2011
+dfdc683a113c6543de36c5bec9325bbf4a2ad25c,A novel feature descriptor based on biologically inspired feature for head pose estimation,Neurocomputing,2013
+df0090524461ac8e16987a6e30d4287f7c8e0c8c,Finger vein recognition with manifold learning,J. Network and Computer Applications,2010
+daee91e5f88efcdf154dbf6f123a97ed8c5bb643,Shape analysis of local facial patches for 3D facial expression recognition,Pattern Recognition,2011
+da15344a4c10b91d6ee2e9356a48cb3a0eac6a97,Exploiting IoT technologies for enhancing Health Smart Homes through patient identification and emotion recognition,Computer Communications,2016
+da59f4fa6dc73b2b8b041e7d4e0e7f121297658a,Statistical non-rigid ICP algorithm and its application to 3D face alignment,Image Vision Comput.,2017
+da5bfddcfe703ca60c930e79d6df302920ab9465,An analysis of facial expression recognition under partial facial image occlusion,Image Vision Comput.,2008
+daba8f0717f3f47c272f018d0a466a205eba6395,Neither Global Nor Local: Regularized Patch-Based Representation for Single Sample Per Person Face Recognition,International Journal of Computer Vision,2014
+b42741dfc3a7f7d1d110978323e18fc71e2d67fe,Improving verification accuracy by synthesis of locally enhanced biometric images and deformable model,Signal Processing,2007
+b4ab2555d5690e8e6fb1cf23c995a120181698a6,Grounding semantic categories in behavioral interactions: Experiments with 100 objects,Robotics and Autonomous Systems,2014
+b4a4e93343e778d0b86c56132a63aceaa70911f7,Discovering object aspects from video,Image Vision Comput.,2016
+b488897ff5a357ad31c3b15ee9440de17df2200e,Near infrared face recognition using Zernike moments and Hermite kernels,Inf. Sci.,2015
+b4d7ca26deb83cec1922a6964c1193e8dd7270e7,Learning to score and summarize figure skating sport videos,CoRR,2018
+b4f90b09bb99405885bc9413288468f5892a62f7,Multi-class boosting with asymmetric binary weak-learners,Pattern Recognition,2014
+a228ba020bd321d29ab24485cb2988a62707fd64,Using objective ground-truth labels created by multiple annotators for improved video classification: A comparative study,Computer Vision and Image Understanding,2013
+a22bc85367a6474a91fecea9dd20681451c6fd0d,Applications of machine learning in animal behaviour studies,Animal Behaviour,2017
+a219e7a1fa717d4575284ccc80e850088dbe9597,A novel approach to expression recognition from non-frontal face images,2009 IEEE 12th International Conference on Computer Vision,2009
+a2d9c9ed29bbc2619d5e03320e48b45c15155195,Facial expression recognition based on anatomy,Computer Vision and Image Understanding,2014
+a2b54f4d73bdb80854aa78f0c5aca3d8b56b571d,Computer Recognition of Facial Actions: A study of co-articulation effects,,2001
+a2fe4f7bdfbdc32393ab6102c8e1063542229758,Two retinotopic visual areas in human lateral occipital cortex.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2006
+a2344004f0e1409c0c9473d071a5cfd74bff0a5d,Learnable PINs: Cross-modal Embeddings for Person Identity,Unknown,2018
+a52c72cd8538c62156aaa4d7e5c54946be53b9bb,Spontaneous facial expression recognition: A robust metric learning approach,Pattern Recognition,2014
+a57b87baca7f3512372e7d9cfb5a712c80b53289,3D Face recognition using distinctiveness enhanced facial representations and local feature hybrid matching,"2010 Fourth IEEE International Conference on Biometrics: Theory, Applications and Systems (BTAS)",2010
+a5173a67c7f32582739849cfa5e07278ee6746ab,Local contrast phase descriptor for fingerprint liveness detection,Pattern Recognition,2015
+a53fe4347da39dcf61ac37cee66c945e79a5052e,Category Independent Object Proposals,,2010
+a57995ecac8275b4c7d614f17538cf771b0b1657,A training-free nose tip detection method from face range images,Pattern Recognition,2011
+a509f0528c5fbb36993324922b13a9a303ce82ee,Evaluation of the Modelling of Local Areas and Errors of Localization in FRGC' 05,,2005
+a57b92ed2d8aa5b41fe513c3e98cbf83b7141741,Relevant Component Analysis for static facial expression classification,,2005
+a5c04f2ad6a1f7c50b6aa5b1b71c36af76af06be,Combined Support Vector Machines and Hidden Markov Models for Modeling Facial Action Temporal Dynamics,,2007
+a503eb91c0bce3a83bf6f524545888524b29b166,A Generative Approach to Zero-Shot and Few-Shot Action Recognition,CoRR,2018
+a562180056cc4906d6d5ef9d2b4ed098d8512317,Dropout-GAN: Learning from a Dynamic Ensemble of Discriminators,CoRR,2018
+bd36544bfecd5b9ea58d0eab186968b3c9d181aa,Weakly supervised 3D Reconstruction with Adversarial Constraint,,2017
+bd6333229199e7b4ac4b9ae7a7cae50ff9b9f5b2,A neural network with a single recurrent unit for associative memories based on linear optimization,Neurocomputing,2013
+bd9eb65d9f0df3379ef96e5491533326e9dde315,Graph Distillation for Action Detection with Privileged Information,CoRR,2017
+bd8e2d27987be9e13af2aef378754f89ab20ce10,Facial feature points detecting based on Gaussian Mixture Models,Pattern Recognition Letters,2015
+bd0d93e67c0b439caf372b704a377670f0c89be8,Assessing the precision of gaze following using a stereoscopic 3D virtual reality setting,Vision Research,2015
+bd866bbbaebc6bfc9707319312b44514e679f670,Enhanced graph-based dimensionality reduction with repulsion Laplaceans,Pattern Recognition,2009
+d154df56ac4382a0a81eb24b190bdba240546d87,Using Gaussian distribution to construct fitness functions in genetic programming for multiclass object classification,Pattern Recognition Letters,2006
+d13f176178f90efa6f91e9f45f710e72e5675c9a,Pedestrian Counting using Deep Models Trained on Synthetically Generated Images,Unknown,2017
+d18ee185ab659f218c97db53e22a2b98f3c642a3,Top-Down Feedback for Crowd Counting Convolutional Neural Network,Unknown,2018
+d6a4a34829b3b55497210ddbe88ad63ff801faae,Object Referring in Visual Scene with Spoken Language,CoRR,2017
+d6fb606e538763282e3942a5fb45c696ba38aee6,Affective Body Expression Perception and Recognition: A Survey,IEEE Transactions on Affective Computing,2013
+d6d7a248b1f59981277121b9c0626ee8d5495757,Superpixel clustering with deep features for unsupervised road segmentation,CoRR,2017
+bc704680b5032eadf78c4e49f548ba14040965bf,"Face Normals ""In-the-Wild"" Using Fully Convolutional Networks",,2017
+bcc346f4a287d96d124e1163e4447bfc47073cd8,Incremental Boosting Convolutional Neural Network for Facial Action Unit Recognition,Unknown,2016
+bcfeac1e5c31d83f1ed92a0783501244dde5a471,Achieving robust face recognition from video by combining a weak photometric model and a learnt generic face invariant,Pattern Recognition,2013
+bcc5cbbb540ee66dc8b9a3453b506e895d8395de,Joint Estimation of Pose and Face Landmark,,2014
+bc2852fa0a002e683aad3fb0db5523d1190d0ca5,Learning from Ambiguously Labeled Face Images,IEEE transactions on pattern analysis and machine intelligence,2017
+bcb99d5150d792001a7d33031a3bd1b77bea706b,Facial descriptors for human interaction recognition in still images,Pattern Recognition Letters,2016
+ae42dc9ef4a03caf69c23c117621108211977405,Visible-light and near-infrared face recognition at a distance,J. Visual Communication and Image Representation,2016
+aed321909bb87c81121c841b21d31509d6c78f69,"Unfamiliar Sides , Video , Image Enhancement in Face Recognition",,2016
+ae936628e78db4edb8e66853f59433b8cc83594f,Person Re-identification via Structured Prediction,CoRR,2014
+ae19008898ea1347cf0f7ecb81b71aa18137085a,Time-slice Prediction of Dyadic Human Activities,,2015
+aeb0f4ffb57e40c93606458707622c0b37ea3790,Video-Based Person Re-Identification via Self Paced Weighting,Unknown,2018
+ae6c9610297186e0e1d4347a2d203fe5f86dd42b,Adaptive ensembles for face recognition in changing video surveillance environments,Inf. Sci.,2014
+aeedc6b7f2ceaaf9d9cd8e327ca979128c1947e9,Locality-sensitive dictionary learning for sparse representation based classification,Pattern Recognition,2013
+ae2ee60219d63475c56fcb6c3f2b3664b3c4dbd9,The MAHNOB Mimicry Database: A database of naturalistic human interactions,Pattern Recognition Letters,2015
+ae872749c88331a93f8078aebf3a8d7f6d9c48fa,Modeling local behavior for predicting social interactions towards human tracking,Pattern Recognition,2014
+ae4e2c81c8a8354c93c4b21442c26773352935dd,On the kernel Extreme Learning Machine classifier,Pattern Recognition Letters,2015
+d81253d750f2c204899e71fd68ad60680f9c8d57,Impaired perception of facial emotion in developmental,,
+d8e5362a16914e779a135a5285775be49d60dccb,"Fast, Robust and Non-convex Subspace Recovery",,2014
+d84e075d571193bc616218a84951375e63ab20c8,"Driving Scene Perception Network: Real-time Joint Detection, Depth Estimation and Semantic Segmentation",,2018
+d8cbe136dd95d287786d0ed5f0d0e53f143bca7f,Glucose metabolic changes in the prefrontal cortex are associated with HPA axis response to a psychosocial stressor.,Psychoneuroendocrinology,2008
+abc5ee7fc8129c82fde8d151408042e4673762f6,Multi-part body segmentation based on depth maps for soft biometry analysis,Pattern Recognition Letters,2015
+abb68f5f393f60695ab16dcee08f2638ab3c7809,Probabilistic Zero-shot Classification with Semantic Rankings,CoRR,2015
+ab4c2e8071d99bdc8c1bff9bc0d6817300ee371a,Snapshot Spectral and Polarimetric Imaging; Target Identification with,,2013
+ab368172c8acc87ec1dc87d1ad607546b2ea8f6a,Gender Classification of Face Images: The Role of Global and Feature-Based Information,,2004
+ab9d368b2ebcc34e38046de49437b7bb224c5b56,Domain Adaptation of Deformable Part-Based Models,IEEE Transactions on Pattern Analysis and Machine Intelligence,2014
+abb74644e2bb1d1e8610e9782a6050192c3ceddf,3D human face description: landmarks measures and geometrical features,Image Vision Comput.,2012
+abd555f397abb6f46aad81c683b279cbd6d22637,Transfer between pose and expression training in face recognition,Vision Research,2009
+ab1dfcd96654af0bf6e805ffa2de0f55a73c025d,Higher order orthogonal moments for invariant facial expression recognition,Digital Signal Processing,2010
+ab1900b5d7cf3317d17193e9327d57b97e24d2fc,Expression transfer for facial sketch animation,Signal Processing,2011
+e5737ffc4e74374b0c799b65afdbf0304ff344cb,A literature survey on robust and efficient eye localization in real-life scenarios,Pattern Recognition,2013
+e5781730c9f1c81b08cf4b4a924f1058efe77908,Real-Time Human Detection Using Local Features Based on Depth Information,,2010
+e50ee29ca12028cb903cd498bb9cacd41bd5ce3a,Single-view-based 3D facial reconstruction method robust against pose variations,Pattern Recognition,2015
+e5dfd17dbfc9647ccc7323a5d62f65721b318ba9,Using Correlated Regression Models to Calculate Cumulative Attributes for Age Estimation,IEICE Transactions,2015
+e5d27e52fafde2b09ae6568fc6bde28468f5517e,Information extraction from shadowed regions in images: An eye movement study,Vision Research,2015
+e27c92255d7ccd1860b5fb71c5b1277c1648ed1e,Multilinear class-specific discriminant analysis,Pattern Recognition Letters,2017
+e22979cdf147a63be74f3816ef59ef11f3508919,Learning Image Representations by Completing Damaged Jigsaw Puzzles,CoRR,2018
+e2bf47d2e3339f366de8947cbb5a894608b91bf9,"Face Recognition for Newborns , Toddlers , and Pre-School Children : A Deep Learning Approach",Unknown,2018
+e2ff4d1bbd6333763292d9f605855b14c27b550b,Overreactive brain responses to sensory stimuli in youth with autism spectrum disorders.,Journal of the American Academy of Child and Adolescent Psychiatry,2013
+e200c3f2849d56e08056484f3b6183aa43c0f13a,The C-loss function for pattern classification,Pattern Recognition,2014
+e224d8fd66e3594cf27bcd06cc2ed25fc4419b7f,Geometrical descriptors for human face morphological analysis and recognition,Robotics and Autonomous Systems,2012
+e2d37596f1ad4823fe042f37137ff54048231de2,Sketch retrieval via local dense stroke features,Image Vision Comput.,2016
+e260ce226de2c945967a7c8d8363f22af02dd2bb,Performing Facial Expression Synthesis on Robot Faces: A Real-time Software System,,2015
+f4f5a68c8e7a90865c4e1a653db4ae788e387bb1,Transductive Zero-Shot Recognition via Shared Model Space Learning,,2016
+f4c01fc79c7ead67899f6fe7b79dd1ad249f71b0,Pose-invariant face recognition by matching on multi-resolution MRFs linked by supercoupling transform,Computer Vision and Image Understanding,2011
+f47e7253f0763579c6c045cd3fa5b34b0697f254,Perception-driven facial expression synthesis,Computers & Graphics,2012
+f48b89fa0aa7435cfb7fcd801a51b2504b9c4515,Disentangling the Impact of Social Groups on Response Times and Movement Dynamics in Evacuations,,2015
+f48665764089d42bb0123914e4ed0a3770f5d706,Shape matching using a binary search tree structure of weak classifiers,Pattern Recognition,2012
+f3fcaae2ea3e998395a1443c87544f203890ae15,Robust part-based face matching with multiple templates,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+f3196b1bcd1af733347edc923d43018724fc73c9,Person re-identification using visual attention,2017 IEEE International Conference on Image Processing (ICIP),2017
+f355e54ca94a2d8bbc598e06e414a876eb62ef99,"A survey on heterogeneous face recognition: Sketch, infra-red, 3D and low-resolution",Image Vision Comput.,2016
+eb724fed2a6457a2ee41a205892004116180bbfd,Pii: S0262-8856(99)00059-1,,2000
+ebedc841a2c1b3a9ab7357de833101648281ff0e,Facial landmarking for in-the-wild images with local inference based on global appearance,Image Vision Comput.,2015
+eb5b1dfe580722c0fc5ca202a1259b0bd62354c9,Transforming the mirror: power fundamentally changes facial responding to emotional expressions.,Journal of experimental psychology. General,2014
+eb65354fe51a177b2366f8d10b8140912e883d62,Estimating the Success of Unsupervised Image to Image Translation,Unknown,2018
+eb6b8359909749e52efff9b78b037a88468101fd,Tracking and Recognizing Multiple Faces Using Kalman Filter and ModularPCA,,2011
+ebb9d53668205c5797045ba130df18842e3eadef,Fully Context-Aware Video Prediction,CoRR,2017
+ebb139368e425d720d47a13dcd269014027b40ae,A secure biometric discretization scheme for face template protection,Future Generation Comp. Syst.,2012
+eb72dcf0ba423d0e12d63cd7881f2ac5dfda7984,Associative Compression Networks,,2018
+eb98feac659ff5c7e27bc8eb4f425bb158e3fc5e,Biometrics Selection and Their Influence over the Life Cycle of Electronic Identity Documents,,2017
+c79fe054f971a454406f46b62b5a397b95240046,Comparing and combining lighting insensitive approaches for face recognition,Computer Vision and Image Understanding,2010
+c784ba7120e807e244f508c71e96cd7677fe1109,An efficient 3D face recognition approach based on the fusion of novel local low-level features,Pattern Recognition,2013
+c0a7ffb06bf23cffc49e67d6359b1fb5db336edc,Geo-distinctive Visual Element Matching for Location Estimation of Images,CoRR,2016
+c0d1d9a585ef961f1c8e6a1e922822811181615c,Face and gaze perception in borderline personality disorder: An electrical neuroimaging study,Psychiatry Research: Neuroimaging,2017
+c0d5c3aab87d6e8dd3241db1d931470c15b9e39d,Bag of visual words and fusion methods for action recognition: Comprehensive study and good practice,Computer Vision and Image Understanding,2016
+c09032896722aa35a905d8905c1cfe67cead6e01,Three-dimensional face recognition using combinations of surface feature map subspace components,Image Vision Comput.,2008
+c0efa1a3cea5b1f450283b81eee9942defaad4d2,Similar exemplar pooling processes underlie the learning of facial identity and handwriting style: Evidence from typical observers and individuals with Autism.,Neuropsychologia,2016
+ee461d060da58d6053d2f4988b54eff8655ecede,Modelling facial colour and identity with Gaussian mixtures,Pattern Recognition,1998
+ee4c659ad75c302b223a3815a65aa2e304cccc30,Binary Biometrics: An Analytic Framework to Estimate the Bit Error Probability under Gaussian Assumption,"2008 IEEE Second International Conference on Biometrics: Theory, Applications and Systems",2008
+eea0640261e2d9bb6b851a519ef1a036093ec04f,The integration of visual context information in facial emotion recognition in 5- to 15-year-olds.,Journal of experimental child psychology,2016
+ee4bd9419405e051b709d90b63d9d264bd2fd796,Automatic head pose estimation with Synchronized sub manifold embedding and Random Regression Forests,,2014
+ee267e831aba3a2ead7ce6109b48afd41a30323f,Enhancing Anger Perception With Transcranial Alternating Current Stimulation Induced Gamma Oscillations.,Brain stimulation,2015
+c903af0d69edacf8d1bff3bfd85b9470f6c4c243,Nyström-based approximate kernel subspace learning,Pattern Recognition,2016
+c95cd791ad0cb0a08cb39e987f725eabe3a08648,Are all objects equal? Deep spatio-temporal importance prediction in driving videos,Pattern Recognition,2017
+c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709,Learning to predict human behaviour in crowded scenes,,2017
+c9d3b06d71f69dad7a9d3f312cf9dd008e2634ef,A multi-class classification strategy for Fisher scores: Application to signer independent sign language recognition,Pattern Recognition,2010
+c9168495c99b37ce601bc778419c2667f34cb29b,Image Matching: An Application-oriented Benchmark,Unknown,2017
+fcd7407d0df030d03e3a8879f184d4b3ceac4fb2,NISTIR 7923 Ground Truth Systems for Object Recognition and Tracking,,2013
+fc369a73eea045497f82634e6ea0c13477728f2e,Circularity and self-similarity analysis for the precise location of the pupils,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+fc516a492cf09aaf1d319c8ff112c77cfb55a0e5,"XBadges. Identifying and training soft skills with commercial video games. Improving persistence, risk taking & spatial reasoning with commercial video games and facial and emotional recognition system",,2017
+fc6c5ef8d97d0c4d7f2a8576f8f5fe8e0ed83fbb,3D Face Reconstruction with Geometry Details from a Single Image,CoRR,2017
+fc2be7dbdd8f5cbcb6fdf5d18241ddd630d03864,Human pose estimation via multi-layer composite models,Signal Processing,2015
+fdaba5860f753b1e5714d582db9851cd0aa29139,Evolutionary Hough Games for coherent object detection,Computer Vision and Image Understanding,2012
+fd38fd64398502c0f0fc010939e7f61d44a7b5a6,A Survey of Recent Advances in Texture Representation,CoRR,2018
+f2490341a21e54a800e5ac7a1c0a25b60f0e0c36,Gaze aversion during social style interactions in autism spectrum disorder and Williams syndrome.,Research in developmental disabilities,2013
+f2c568fe945e5743635c13fe5535af157b1903d1,Automatic Detection of Acromegaly From Facial Photographs Using Machine Learning Methods,,2018
+f26097a1a479fb6f32b27a93f8f32609cfe30fdc,What is the best way for extracting meaningful attributes from pictures?,Pattern Recognition,2017
+f214bcc6ecc3309e2efefdc21062441328ff6081,Speaker verification in score-ageing-quality classification space,Computer Speech & Language,2013
+f5c5f5fb2bfd11b65265a7a088b50185bdc7bccd,CovGa: A novel descriptor based on symmetry of regions for head pose estimation,Neurocomputing,2014
+f5541330741315b98e590e405c96c72bac49d51c,Biometric Score Calibration for Forensic Face Recognition,,2014
+e3657ab4129a7570230ff25ae7fbaccb4ba9950c,Recovering Joint and Individual Components in Facial Data,,2018
+e3a3a6c1f4802ea1cd0c34d0b34e4c83689895ac,An effective unconstrained correlation filter and its kernelization for face recognition,Neurocomputing,2013
+e381edad6f9040712e6a50caf9c82465722aa04c,Image classification based on complex wavelet structural similarity,Sig. Proc.: Image Comm.,2013
+e3cc912b9ca074e3d419c1dd289fd1b067fb61d5,Identifying multiple objects from their appearance in inaccurate detections,Computer Vision and Image Understanding,2015
+cfa572cd6ba8dfc2ee8ac3cc7be19b3abff1a8a2,Toward Use of Facial Thermal Features in Dynamic Assessment of Affect and Arousal Level,IEEE Transactions on Affective Computing,2017
+cfbffa4d143a72476d962906e413c5ed6306b09c,Why neuroscience matters to cognitive neuropsychology,Synthese,2007
+cfa205874bd192ab949132631a7eda995ecc57af,"Modeling the Contribution of Central Versus Peripheral Vision in Scene, Object, and Face Recognition",CoRR,2016
+cfb8bc66502fb5f941ecdb22aec1fdbfdb73adce,Git Loss for Deep Face Recognition,Unknown,2018
+cf682939be6828d1a70161618024e02af660d1bb,Structured learning of metric ensembles with application to person re-identification,Computer Vision and Image Understanding,2017
+cf54a133c89f730adc5ea12c3ac646971120781c,A comparative study for feature integration strategies in dynamic saliency estimation,Sig. Proc.: Image Comm.,2017
+cac8bb0e393474b9fb3b810c61efdbc2e2c25c29,Visual Segmentation of Simple Objects for Robots,,2011
+cae25b66b485b5b76fb6f3d383b294f3456519a3,Two-dimensional nearest neighbor discriminant analysis,Neurocomputing,2007
+ca627984743536d9403cbc25c00d033bcc1cb839,"Analyzing Computer Vision Data - The Good, the Bad and the Ugly",,2017
+ca83053d9a790319b11a04eac5ab412e7fcab914,Efficient generic face model fitting to images and videos,Image Vision Comput.,2014
+ca17025fe9519b0eec7738995902be2992040a87,A survey of video datasets for human action and activity recognition,Computer Vision and Image Understanding,2013
+e465f596d73f3d2523dbf8334d29eb93a35f6da0,"On Face Segmentation, Face Swapping, and Face Perception",CoRR,2017
+e49c59d19c1d652040f1bbd749c1e69a69f4b66c,On the Integration of Optical Flow and Action Recognition,CoRR,2017
+e4aeaf1af68a40907fda752559e45dc7afc2de67,Exponential Discriminative Metric Embedding in Deep Learning,,2018
+e4c3d5d43cb62ac5b57d74d55925bdf76205e306,Average Biased ReLU Based CNN Descriptor for Improved Face Retrieval,,2018
+e44b644dba198a4f8de553c9795aee77c0d23f37,Social effects of oxytocin in humans: context and person matter.,Trends in cognitive sciences,2011
+e45374e8d9491fe396497fc9fd91bd2f2f036315,Improving Hough Based Pedestrian Detection Accuracy by Using Segmentation and Pose Subspaces,IEICE Transactions,2014
+e476cbcb7c1de73a7bcaeab5d0d59b8b3c4c1cbf,Robust Kernel Representation With Statistical Local Features for Face Recognition,IEEE Transactions on Neural Networks and Learning Systems,2013
+fe7c0bafbd9a28087e0169259816fca46db1a837,Seeing Voices and Hearing Faces: Cross-modal biometric matching,CoRR,2018
+feba048e15c1931086f909d4be04ade134942947,The role of emotion regulation in autism spectrum disorder.,Journal of the American Academy of Child and Adolescent Psychiatry,2013
+fe48f0e43dbdeeaf4a03b3837e27f6705783e576,Supervised Transformer Network for Efficient Face Detection,Unknown,2016
+fe4609fdf8fc8ea18204ffe673c2b06acbe8d0fd,The surprisingly high human efficiency at learning to recognize faces,Vision Research,2009
+fed7ee7152b9477c75251a133bb7e26679cb3dba,Evidence for the triadic model of adolescent brain development: Cognitive load and task-relevance of emotion differentially affect adolescents and adults,Developmental Cognitive Neuroscience,2017
+fe108803ee97badfa2a4abb80f27fa86afd9aad9,Kernel discriminant transformation for image set-based face recognition,Pattern Recognition,2011
+c82c147c4f13e79ad49ef7456473d86881428b89,Facial Expression Recognition and Analysis: A Comparison Study of Feature Descriptors,IPSJ Trans. Computer Vision and Applications,2015
+c86bdec7c4aa6aa1b5872badb5e48193ff5920e8,Face Identi®cation Using One Spike per Neuron: Resistance to Image Degradations,,2001
+c86afba9c77a9b1085ccc6c44c36fa3a1fdb51c5,New Losses for Generative Adversarial Learning,CoRR,2018
+c8279a389738f3011edc6e9ddfefb0410df380ef,Robust pedestrian detection in thermal infrared imagery using a shape distribution histogram feature and modified sparse representation classification,Pattern Recognition,2015
+fb19c7cec103193ea4f4265a2d9534a20893b2a8,Zernike velocity moments for sequence-based description of moving features,Image Vision Comput.,2006
+fb5280b80edcf088f9dd1da769463d48e7b08390,The impact of weak ground truth and facial expressiveness on affect detection accuracy from time-continuous videos of facial expressions,Inf. Sci.,2013
+fb9673f0373ca4c72fe9059648ae618d45fd8c90,Why Is Facial Occlusion a Challenging Problem?,,2009
+ed717bd09e8344c2cfa81ceedfb8baf2105708f5,Binary-image comparison with local-dissimilarity quantification,Pattern Recognition,2008
+ede1f00c2ac27ac90dbeb0df1840ac757447af34,Neural Network Based Approach for Face Detection cum Face Recognition,Unknown,2012
+ed3bb078cab7f423dacf6e946538c6fd22e96e1a,"The BeiHang Keystroke Dynamics Systems, Databases and baselines",Neurocomputing,2014
+eda501bb1e610098648667eb25273adc4a4dc98d,"Fusing audio, visual and textual clues for sentiment analysis from multimodal content",Neurocomputing,2016
+ed702537d487de0737582f7ef7e937f4fe9b28fd,"Pattern Recognition in Latin America in the ""Big Data"" Era",Pattern Recognition,2015
+c178a86f4c120eca3850a4915134fff44cbccb48,Normalization Discriminant Independent Component Analysis,,2013
+c14781a995933e2b89d40a95ca1247845ddfe3ab,"Wize Mirror - a smart, multisensory cardio-metabolic risk monitoring system",Computer Vision and Image Understanding,2016
+c16cc7006ad3ba5f2c5ce022bfc97a6fbfff847b,Increasing the Stability of CNNs using a Denoising Layer Regularized by Local Lipschitz Constant in Road Understanding Problems,Unknown,2017
+c1fc70e0952f6a7587b84bf3366d2e57fc572fd7,Efficient clustering on Riemannian manifolds: A kernelised random projection approach,Pattern Recognition,2016
+c1482491f553726a8349337351692627a04d5dbe,When Follow is Just One Click Away: Understanding Twitter Follow Behavior in the 2016 U.S. Presidential Election,,2017
+c1e76c6b643b287f621135ee0c27a9c481a99054,Multi-point Regression Voting for Shape Model Matching,,2016
+c610db0ee2d111452f70ce4854e48ab9d5c2b1ab,Fast and Robust Multi-people Tracking from RGB-D Data for a Mobile Robot,,2012
+c6ea6fee4823b511eecf41f6c2574a0728055baf,HoloFace: Augmenting Human-to-Human Interactions on HoloLens,CoRR,2018
+c61a5961a344748272fe51ddf4584b22d9c10cde,Is anyone looking at me? Direct gaze detection in children with and without autism.,Brain and cognition,2008
+ece02507e17c7e6a5ce4d58f990f3e01c6555aa4,Improving pedestrian detection with selective gradient self-similarity feature,Pattern Recognition,2015
+ecdd4731e197f4afda804602f533565c19ffc271,Vision in autism spectrum disorders,Vision Research,2009
+ec2bf43338959e263d7fd5e3b2ef8665fa023ed9,A hybrid parallel projection approach to object-based image restoration,Pattern Recognition Letters,2006
+ec26d7b1cb028749d0d6972279cf4090930989d8,Making Bertha Drive - An Autonomous Journey on a Historic Route,IEEE Intell. Transport. Syst. Mag.,2014
+ec22eaa00f41a7f8e45ed833812d1ac44ee1174e,A novel phase congruency based descriptor for dynamic facial expression analysis,Pattern Recognition Letters,2014
+ec0239b3547639195c95d322632b6a83b648e8df,Robust visual tracking with structured sparse representation appearance model,Pattern Recognition,2012
+4e729427f5cd4be22dad7bef0eb241e93497dbf4,Model-Driven Domain Adaptation on Product Manifolds for Unconstrained Face Recognition,International Journal of Computer Vision,2014
+4e93a8a47473bf57e24aec048cb870ab366a43d6,Face authentication for multiple subjects using eigen#ow,,2001
+4ea759e13b0991772c61a4ede058d59d5e33a71b,"Scale resilient, rotation invariant articulated object matching",2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+4e4a4359c7dd25af7e2ef0910928cd9faa5d0cfb,End-to-End 3D Face Reconstruction with Deep Neural Networks,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+4e9f46a80ffaffa2dabde4fb48d6ac72398ef829,Deep Hybrid Similarity Learning for Person Re-identification,CoRR,2017
+4e444db884b5272f3a41e4b68dc0d453d4ec1f4c,Learning without Prejudice: Avoiding Bias in Webly-Supervised Action Recognition,CoRR,2017
+4e7ebf3c4c0c4ecc48348a769dd6ae1ebac3bf1b,"Towards the automatic detection of spontaneous agreement and disagreement based on nonverbal behaviour: A survey of related cues, databases, and tools",Image Vision Comput.,2013
+4e4311a5fd99b17bed31b7006a572d29a58cdcf3,A support vector machine classifier with automatic confidence and its application to gender classification,Neurocomputing,2011
+4e4e8fc9bbee816e5c751d13f0d9218380d74b8f,Tone-aware sparse representation for face recognition,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+4efc523df04fe19b600e372b9cfc9acf2e0b21d8,LSTM stack-based Neural Multi-sequence Alignment TeCHnique (NeuMATCH),,2018
+20a88cc454a03d62c3368aa1f5bdffa73523827b,Face recognition using a kernel fractional-step discriminant analysis algorithm,Pattern Recognition,2007
+20ca3dc873d7c986d7b1b233fdcf85e78b92914e,Patch to the Future: Unsupervised Visual Prediction,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+209e1d36f36b8e7db3147b0e424874e54df9012e,Variational Tempering,,2016
+20767ca3b932cbc7b8112db21980d7b9b3ea43a3,Dynamic Concept Composition for Zero-Example Event Detection,,2016
+203956dec006b8c313bfd166be58d1e70b3dffd9,Maternal immune activation in nonhuman primates alters social attention in juvenile offspring.,Biological psychiatry,2015
+20c2a5166206e7ffbb11a23387b9c5edf42b5230,Examining visible articulatory features in clear and plain speech,Speech Communication,2015
+2098983dd521e78746b3b3fa35a22eb2fa630299,Second-order Temporal Pooling for Action Recognition,CoRR,2017
+20cdaf21acd50fd2cfbdd0eb697a8906cfb012e2,"RAFI, KOSTRIKOV, GALL, LEIBE: EFFICIENT CNN FOR HUMAN POSE ESTIMATION 1 An Efficient Convolutional Network for Human Pose Estimation",,2016
+20af7f10485fca89c2c282e74016fe69765e4962,A Harmonic Mean Linear Discriminant Analysis for Robust Image Classification,2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI),2016
+20eb57978ec863e031e0960c6799d756a041d60a,Facial emotion recognition and sleep in mentally disordered patients: A natural experiment in a high security hospital.,Psychiatry research,2015
+20aa8348cf4847b9f72fe8ddbca8a2594ea23856,Learning ordinal discriminative features for age estimation,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+207e91b23253545c32dfedf71773f5af1dc88057,Line-based Face Recognition under Varying Pose Title Line-based Face Recognition under Varying Pose Primary Author(s) Date,,1998
+20e476887f9ad432ea35a5f712485e4e77363d64,Language Guided Visual Perception,,2016
+206fbe6ab6a83175a0ef6b44837743f8d5f9b7e8,Unsupervised learning of object frames by dense equivariant image labelling,Unknown,2017
+200ab001770a39d5465c661d0078f4d9410f343c,The Analysis of Invariant Repetitive Patterns in Images and Videos,,2013
+20b8b3bad07b31e8ee83b2d865266ec58667992e,"Fear, faces, and the human amygdala.",Current opinion in neurobiology,2008
+202aaa03da5c5c2707ac8fb42aeed7f582ce2848,Recursive Cross-Domain Face/Sketch Generation from Limited Facial Parts,CoRR,2017
+18206e1b988389eaab86ef8c852662accf3c3663,Compressed Video Action Recognition,CoRR,2017
+184e4a62fc9c3c8ea8948aceebb1debe0b5fc54a,Generative part-based Gabor object detector,Pattern Recognition Letters,2015
+184750382fe9b722e78d22a543e852a6290b3f70,Projection functions for eye detection,Pattern Recognition,2004
+18f7fe72fcefee11082534f4bd254d67e433a2bd,"Simultaneous inference of activity, pose and object",2012 IEEE Workshop on the Applications of Computer Vision (WACV),2012
+1883116d33a3e0321d2fe96e0a8a62546aca4ee9,"Visual question answering: Datasets, algorithms, and future challenges",Computer Vision and Image Understanding,2017
+18fc6bac478f069dbf35f1ebdf6f5d7d711872a0,Individual differences in symptom severity and behavior predict neural activation during face processing in adolescents with autism,,2015
+18fe5b96b620454baa5342ee6b8fb2908ed22988,Attribute rating for classification of visual objects,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+18d19cf4d09ac340428d091b24af561a3d5de3ea,Generalization Bounds for Unsupervised Cross-Domain Mapping with WGANs,CoRR,2018
+1828b1b0f5395b163fef087a72df0605249300c2,On-line Adaption of Class-specific Codebooks for Instance Tracking,,2010
+1802aebb98424af6fa8f3d4dc024da2f1d3ea1e5,The Human Face as a Dynamic Tool for Social Communication,Current Biology,2015
+18ec3b37a33db39ac0633677e944cc81be58f7ba,Cooperative Training of Descriptor and Generator Networks,CoRR,2016
+18dfc2434a95f149a6cbb583cca69a98c9de9887,Hough Networks for Head Pose Estimation and Facial Feature Localization,,2014
+189c0e5df2611dea909e51256b30c3ce3d25b5a4,Detecting Vanishing Points in Natural Scenes with Application in Photo Composition Analysis,CoRR,2016
+2771e262e54948ad2c35a80caabc7af181521d39,Co-localization with Category-Consistent CNN Features and Geodesic Distance Co-Propagation,CoRR,2016
+271fbc4c09b3f2eb9f56dc2bbac89262b3bc083d,Domain Adaptation with Soft-margin multiple feature-kernel learning beats Deep Learning for surveillance face recognition,CoRR,2016
+27d709f7b67204e1e5e05fe2cfac629afa21699d,"Learning the Latent ""Look"": Unsupervised Discovery of a Style-Coherent Embedding from Fashion Images",,2017
+27c9ddb72360f4cd0f715cd7ea82fa399af91f11,Multiresolution face recognition,Image Vision Comput.,2005
+27276945ce8b103b2341729e99da4e76acee19c6,Neural correlates of “social gaze” processing in high-functioning autism under systematic variation of gaze duration☆,,2013
+2727927c7493cef9785b3a06a38f5c1ce126fc23,Semi-supervised FusedGAN for Conditional Image Generation,Unknown,2018
+273fadb4247020b830f48be556b4b44fc900b94f,Face recognition in 2D and 2.5D using ridgelets and photometric stereo,Pattern Recognition,2012
+27d9d09126c1f2138f6aa719c4937da0bf8a8b87,Multi-Modal Detection and Mapping of Static and Dynamic Obstacles in Agriculture for Process Evaluation,,2018
+27da432cf2b9129dce256e5bf7f2f18953eef5a5,Face Recognition in Low Quality Images: A Survey,CoRR,2018
+27e9f54586475d495e68b4218fdbd6e926c0accf,Development of a Person Following Robot and Its Experimental Evaluation,,2010
+274f87ad659cd90382ef38f7c6fafc4fc7f0d74d,Latent Tensor Transfer Learning for RGB-D Action Recognition,,2014
+27a2fad58dd8727e280f97036e0d2bc55ef5424c,"Performance Measures and a Data Set for Multi-target, Multi-camera Tracking",,2016
+4bc1c2cea06e5f42905f5ee99a6e2c1693c098f6,More than a million ways to be pushed. A high-fidelity experimental dataset of planar pushing,2016 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),2016
+4bb9c5f1ef1240486374f4f80d8f65921f74c1ad,Double linear regressions for single labeled image per person face recognition,Pattern Recognition,2014
+4bbbee93519a4254736167b31be69ee1e537f942,Learning to Score Olympic Events,2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW),2017
+4b6be933057d939ddfa665501568ec4704fabb39,Graph Transduction as a Non-cooperative Game,,2011
+4b1d23d17476fcf78f4cbadf69fb130b1aa627c0,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,,2010
+4bc31b1cc8e4f0204a02fd900ed0ef36747e5b77,Robust classification of face and head gestures in video,Image Vision Comput.,2011
+11f5dd9f1cb14d14a48499d05907ac05a20828e9,Learning detectors quickly using structured covariance matrices,CoRR,2014
+1176c886afbd8685ecf0094450a02eb96b950f71,A Bayesian Hashing approach and its application to face recognition,Neurocomputing,2016
+11fb122efe711980ec4fb55e49bfbc03f538c462,Sparse representation for face recognition based on discriminative low-rank dictionary learning,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+110d474178b0bb5e2050537d89d08a76106ab736,A landmark paper in face recognition,7th International Conference on Automatic Face and Gesture Recognition (FGR06),2006
+11f8eb971b3ef63ffc1805e1508ff5e52c943cc4,Exploiting projective geometry for view-invariant monocular human motion analysis in man-made environments,Computer Vision and Image Understanding,2014
+11cf7aa5d940d0680e287b6e7f13490a619fdf47,"Joint Graph Decomposition &amp; Node Labeling: Problem, Algorithms, Applications",2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+119ac3d4a8c9bc5c36087140fbdddab788d10e5c,A review of different object recognition methods for the application in driver assistance systems,Eighth International Workshop on Image Analysis for Multimedia Interactive Services (WIAMIS '07),2007
+11367581c308f4ba6a32aac1b4a7cdb32cd63137,3D face shape approximation from intensities using Partial Least Squares,2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops,2008
+114b12559cbc2916b90728b09f158030d332e6a1,Image saliency: From intrinsic to extrinsic context,CVPR 2011,2011
+112780a7fe259dc7aff2170d5beda50b2bfa7bda,Learnable PINs: Cross-Modal Embeddings for Person Identity,CoRR,2018
+1180b1da7221c8c614ac7f6960772b78342f233e,Online tracking parameter adaptation based on evaluation,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+1113b4fcf644616e2587eacead2bca4b794ac47d,Efficient Human Pose Estimation from Single Depth Images,IEEE Transactions on Pattern Analysis and Machine Intelligence,2012
+1193317829bfcc9b9dffa5ae85a2e2114254b37e,Recent advances in convolutional neural networks,Pattern Recognition,2018
+7d0cb85f9f63afc23ce42b92337b12ef91fc091e,Discriminative transfer learning for single-sample face recognition,2015 International Conference on Biometrics (ICB),2015
+7d3698c0e828d05f147682b0f5bfcd3b681ff205,Tracklet Association by Online Target-Specific Metric Learning and Coherent Dynamics Estimation,IEEE Transactions on Pattern Analysis and Machine Intelligence,2017
+7d8141fd805da71af51205b9218e0768e9371188,Beyond the Lambertian Assumption: A generative model for Apparent BRDF field for Faces using Anti-Symmetric Tensor Splines,,2008
+7d513b7c22d8e771ce657489bb8e515dab897650,From gaze cueing to dual eye-tracking: novel approaches to investigate the neural correlates of gaze in social interaction.,Neuroscience and biobehavioral reviews,2013
+7d4a04c03b73d34c86f5d06cbb88cca4287d8b37,Changing facial affect recognition in schizophrenia: Effects of training on brain dynamics,,2014
+7dbd91389960498ee38ca7588025ec61a08ec942,Optimized Codebook Construction and Assignment for Product Quantization-based Approximate Nearest Neighbor Search,IPSJ Trans. Computer Vision and Applications,2012
+7da7678882d06a1f93636f58fe89635da5b1dd0c,EnhanceNet: Single Image Super-Resolution Through Automated Texture Synthesis,2017 IEEE International Conference on Computer Vision (ICCV),2017
+293d371d585d13159e53df703f724165704c9329,Social perception in synaesthesia for colour.,Cognitive neuropsychology,2016
+296cacad71b4181eca4a571cf080d2baee229dcc,Towards Multi-Object Detection and Tracking in Urban Scenario under Uncertainties,Unknown,2018
+294cc3d492a38f7a6886d55009286efdfb04395d,Locality sensitive semi-supervised feature selection,Neurocomputing,2008
+29c1f733a80c1e07acfdd228b7bcfb136c1dff98,Discriminatively Trained Latent Ordinal Model for Video Classification,IEEE transactions on pattern analysis and machine intelligence,2017
+298cbc3dfbbb3a20af4eed97906650a4ea1c29e0,Training deep networks for facial expression recognition with crowd-sourced label distribution,,2016
+29f27448e8dd843e1c4d2a78e01caeaea3f46a2d,Similar gait action recognition using an inertial sensor,Pattern Recognition,2015
+29d10748dfb3bc6883dae224bc4c6ddf774bf363,Learning Grounded Meaning Representations with Autoencoders,,2014
+290eda31bc13cbd5933acec8b6a25b3e3761c788,Multiple object tracking with context awareness,CoRR,2014
+29c9af31eb125b696ce34d0a8c64382f7e97bf23,Vision based tracking and recognition of dynamic hand gestures,,2007
+29094526e1179208b43e6223b03a7a5340f45689,A Novel Approach for Efficient SVM Classification with Histogram Intersection Kernel,,2013
+29778f86a936c5a5fbedcdffdc11d0ddfd3984f1,Video In Sentences Out,,2012
+29156e4fe317b61cdcc87b0226e6f09e416909e0,Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach,IEEE transactions on pattern analysis and machine intelligence,2017
+293ade202109c7f23637589a637bdaed06dc37c9,Material for : Adaptive Cascaded Regression,,2016
+7c8e64f20b58ddd1fc0e9c972c3eb0fe35b40a6b,Multiple-shot person re-identification by chromatic and epitomic analyses,Pattern Recognition Letters,2012
+7c0cf2fa4ed7cfb1cf41c986fdc3b82c53177854,INDREX: In-database relation extraction,Inf. Syst.,2015
+7c38572093b0d0ef72d828f59f95b3a6a067fe27,WESPE: Weakly Supervised Photo Enhancer for Digital Cameras,CoRR,2017
+7c8231cc89f628cad270f0c2d2228ad749a97d01,Semantic Softmax Loss for Zero-Shot Learning,CoRR,2017
+7ce03597b703a3b6754d1adac5fbc98536994e8f,On the Intrinsic Dimensionality of Face Representation,,2018
+7cb4ab1bfff61bf0d1ebec6c4402b7e45e62c609,Hierarchical Multiresolution Models for fast Object Detection,,2012
+16de1324459fe8fdcdca80bba04c3c30bb789bdf,Super-FAN: Integrated facial landmark localization and super-resolution of real-world low resolution faces in arbitrary poses with GANs,CoRR,2017
+16892074764386b74b6040fe8d6946b67a246a0b,Virtual Faces Expressing Emotions: An Initial Concomitant and Construct Validity Study,,2014
+165c27a4bfb56562c807279bef9d15f1bced5ca0,Scene parsing using inference Embedded Deep Networks,Pattern Recognition,2016
+16f1a35d0149482d6b2b67df58b21b68622e6b9c,Video-Based Person Re-Identification by Simultaneously Learning Intra-Video and Inter-Video Distance Metrics,,2016
+16371cf22f9de60dd1edd7178669e5ba69143686,Exploring LOTS in Deep Neural Networks,CoRR,2016
+16a2f42edb98495bb9b766c56a05edcd2ca4ef03,Multi-shot SURF-based person re-identification via sparse representation,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+1679943d22d60639b4670eba86665371295f52c3,Facial feature extraction using complex dual-tree wavelet transform,Computer Vision and Image Understanding,2008
+165b7b9ed474805c35cb60204671c9bb2053c976,Yes we can: simplex volume maximization for descriptive web-scale matrix factorization,,2010
+169076ffe5e7a2310e98087ef7da25aceb12b62d,Emotional restraint is good for men only: The influence of emotional restraint on perceptions of competence.,Emotion,2016
+169731093e6b1a5ca51805a876011a9c250f11cb,Skin injury model classification based on shape vector analysis,,2012
+42d0193edad27f4a4505f1bf7a9122f0ac1a0e9e,Facial Shape Estimation in the Presence of Cast Shadows,,2006
+429c3588ce54468090cc2cf56c9b328b549a86dc,Thermal and reflectance based personal identification methodology under variable illumination,Pattern Recognition,2010
+42495ae78d48209891874e90a4436a3e1b74ef0c,"Towards Scene Understanding: Object Detection, Segmentation, and Contextual Reasoning",,2013
+42f512d36722b09d1c83d328051badd374769fed,From Pixels to Object Sequences: Recurrent Semantic Instance Segmentation,,2017
+424259e9e917c037208125ccc1a02f8276afb667,Walk and Learn: Facial Attribute Representation Learning from Egocentric Video and Contextual Data,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+42c3adafbf8fce4b9d0986be184f2b1955958162,Human-centric design personalization of 3D glasses frame in markerless augmented reality,Advanced Engineering Informatics,2012
+42e0127a3fd6a96048e0bc7aab6d0ae88ba00fb0,AU-aware Deep Networks for facial expression recognition,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+89de30a75d3258816c2d4d5a733d2bef894b66b9,Relative Hidden Markov Models for Video-Based Evaluation of Motion Skills in Surgical Training,IEEE Transactions on Pattern Analysis and Machine Intelligence,2015
+89ee33b78797c0d6219d31200424f88ba8fbecfa,Biometric cryptosystem based on discretized fingerprint texture descriptors,Expert Syst. Appl.,2013
+892d47a6e46fb95def22bf4c21a79548457e045e,Research on Different Representation Methods for Classification,,2014
+894f540ed8e603a51c22c7040a5485dff856ae25,Effect of calibration data on forensic likelihood ratio from a face recognition system,"2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)",2013
+45a6add58dcb5587f607b8eedd92078560c313c5,Multi-Modality Vertebra Recognition in Arbitrary,,2015
+45a2d0b9d5361742a567bd1978036c0c14c5bd1f,Similar image search with a tiny bag-of-delegates representation,,2012
+45fccb72a1bc078ecb260c3e9871dda4edf37087,Review Networks for Caption Generation,,2016
+45f3bf505f1ce9cc600c867b1fb2aa5edd5feed8,Fully automatic facial feature point detection using Gabor feature based boosted classifiers,"2005 IEEE International Conference on Systems, Man and Cybernetics",2005
+456ae882c62434974448c37086b01fe707e04f5c,Robust Real-Time Multi-View Eye Tracking,CoRR,2017
+45fbeed124a8956477dbfc862c758a2ee2681278,Pose Invariant Approach for Face Recognition at Distance,,2012
+4511e09ee26044cb46073a8c2f6e1e0fbabe33e8,A Graph Based Approach for Finding People in News,,2007
+456c8c8ba65fb933166cce1699a2d12a37f60233,Coloured Filters Enhance the Visual Perception of Social Cues in Children with Autism Spectrum Disorders,,2012
+1fe73457d92f6158847e5e8dd18f040ef7cb3987,Kernel Methods on Approximate Infinite-Dimensional Covariance Operators for Image Classification,CoRR,2016
+1fe8b8dc1271b0cb5ce37f21be5809546597cfdf,Performances of the likelihood-ratio classifier based on different data modelings,"2008 10th International Conference on Control, Automation, Robotics and Vision",2008
+1f6576ef2f8b986b44f06bb83b4238d1ffb6c990,Salient feature and reliable classifier selection for facial expression classification,Pattern Recognition,2010
+1fd2ed45fb3ba77f10c83f0eef3b66955645dfe0,Generalized Unsupervised Manifold Alignment,,2014
+1f2d12531a1421bafafe71b3ad53cb080917b1a7,Joint optimization of manifold learning and sparse representations for face and gesture analysis,,2015
+1f4fed0183048d9014e22a72fd50e1e5fbe0777c,A Game-Theoretic Approach to Multi-Pedestrian Activity Forecasting,CoRR,2016
+732d0d3f57e93c96ee85c33b39012111a90624c2,Object motion detection using information theoretic spatio-temporal saliency,Pattern Recognition,2009
+7384c39a2d084c93566b98bc4d81532b5ad55892,A Comparative Study of Face Landmarking Techniques,EURASIP J. Image and Video Processing,2013
+73da66ea59da581c31ff9dd5f7d8243356360eb9,A simple and efficient eye detection method in color images,,2009
+73134cc8ab3cda6eeb7ac870ddf8d32430c48710,Semi-random subspace method for face recognition,Image Vision Comput.,2009
+73dbe02e590fed82640c46129f64651fd1b33c24,Learning Partially Shared Dictionaries for Domain Adaptation,,2014
+87f64a3f33f464a2602d5fb0d717d553c91fc39c,Learning object relationships via graph-based context model,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+870eba6ab6eba89682be11100b744fd4864e437c,ECO: Efficient Convolutional Network for Online Video Understanding,Unknown,2018
+87f1b49dee91ff0065ab4ed1f0ddb74fd0af6b5c,AR Model Based Human Identification using Ear Biometrics,,2014
+872d1392408358b88490047651052c87ca754040,"Analysis of face gaze in autism using ""Bubbles"".",Neuropsychologia,2007
+87ab2e74e2ab93de0316f09d76e7573052628989,Contour-based object detection as dominant set computation,Pattern Recognition,2012
+87ce943906579910572db0d0edda0813503b8015,Example-based image super-resolution with class-specific predictors,J. Visual Communication and Image Representation,2009
+804d856f09602f2b8e9184db155bf1b9ab7f31e7,Facial affect recognition training in autism: can we animate the fusiform gyrus?,Behavioral neuroscience,2006
+80c11a3ad362b294d5faa0d8e5c384db1d585795,Mixed Neural Network Approach for Temporal Sleep Stage Classification,IEEE Transactions on Neural Systems and Rehabilitation Engineering,2018
+80193dd633513c2d756c3f568ffa0ebc1bb5213e,Wavelet Subspace Method for Real-Time Face Tracking,,2001
+808d7e7c411ba8e4b31c63f34cce5a195db3dd38,RLBP: Robust Local Binary Pattern,,2013
+8031b81338c05d5fe4e2e5f8820d185b32734fb6,Fully automatic acute ischemic lesion segmentation in DWI using convolutional neural networks,,2017
+806c07757431ab3fd91f4276d350186cf6f9b7e4,Copula Ordinal Regression Framework for Joint Estimation of Facial Action Unit Intensity,,2017
+747fddd7345b60da121fc13c5440a18039b912e6,Improving Consistency and Correctness of Sequence Inpainting using Semantically Guided Generative Adversarial Network,CoRR,2017
+747d5fe667519acea1bee3df5cf94d9d6f874f20,Transferring Common-Sense Knowledge for Object Detection,,2018
+74ac172076ac9550b32cce7b8e8989f1eb113515,Zero-Shot Learning with Multi-Battery Factor Analysis,Signal Processing,2017
+7446cc18f173f2885dfea6dd27bcb725989f2788,Template Matching Techniques in Computer Vision,,2008
+745b42050a68a294e9300228e09b5748d2d20b81,Temporal Human Action Segmentation via Dynamic Clustering,,2018
+1ac6a33f04f6c5a8084c15c85295f987cc8e3d72,FVQA: Fact-based Visual Question Answering,IEEE transactions on pattern analysis and machine intelligence,2017
+1adb472cf79b9adc4f1223686528c524e5d790be,"""You're It!"": Role Identification Using Pairwise Interactions in Tag Games",,2013
+1aa0b335906e91cc026741e3523b088677755762,"Representation, Control, or Reasoning? Distinct Functions for Theory of Mind within the Medial Prefrontal Cortex",Journal of cognitive neuroscience,2014
+1a9337d70a87d0e30966ecd1d7a9b0bbc7be161f,"A novel binary adaptive weight GSA based feature selection for face recognition using local gradient patterns, modified census transform, and local binary patterns",Eng. Appl. of AI,2014
+1abadb7a70c9faa69b618926aa4c61a2520659a1,Supplementary Material: An Empirical Study and Analysis of Generalized Zero-Shot Learning for Object Recognition in the Wild,,2016
+1a00927d3719a0b6c2699f0ad1e1f4cb8402d4ea,VoD: A novel image representation for head yaw estimation,Neurocomputing,2015
+1a645bcd029cc5ce21b973146f21a9655047cc96,Phrase Localization and Visual Relationship Detection with Comprehensive Linguistic Cues,CoRR,2016
+1a20c1d04b93d91cf2fd0b4e3c7bf1153a93942c,Re-identification for Improved People Tracking,,2014
+1a1118cd4339553ad0544a0a131512aee50cf7de,Semantic Image Retrieval via Active Grounding of Visual Situations,CoRR,2017
+2845cc51262f3af6aafbad62690a23e9bc847b07,Looking at my own Face: Visual Processing Strategies in Physical Self-representation,,2017
+28e0ed749ebe7eb778cb13853c1456cb6817a166,C-Mantec: A novel constructive neural network algorithm incorporating competition between neurons,Neural networks : the official journal of the International Neural Network Society,2012
+28b9d92baea72ec665c54d9d32743cf7bc0912a7,Parametric temporal alignment for the detection of facial action temporal segments,,2014
+283a2bb8aece06b975a1109aaa8daecdf4d3df42,Summary Transfer: Exemplar-based Subset Selection for Video Summarizatio,,2016
+280b0a4078232f13a7d4234a9ae176f01b762b12,Coupled person orientation estimation and appearance modeling using spherical harmonics,Image Vision Comput.,2014
+28c14a6c64518c21888afb2d73fe8dff633ca4da,Mixture Subclass Discriminant Analysis Link to Restricted Gaussian Model and Other Generalizations,IEEE Transactions on Neural Networks and Learning Systems,2013
+282a3ee79a08486f0619caf0ada210f5c3572367,Accelerated Training for Massive Classification via Dynamic Class Selection,CoRR,2018
+288dbc40c027af002298b38954d648fddd4e2fd3,Local Higher-Order Statistics (LHS) for Texture Categorization and Facial Analysis,,2012
+28312c3a47c1be3a67365700744d3d6665b86f22,Face Recognition: A Literature Survey1,,2000
+287900f41dd880802aa57f602e4094a8a9e5ae56,Expressive deformation profiles for cross expression face recognition,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+2819ac49d1967a3e51e7e65730a666a76ba9a687,Internet Visual Media Processing for Graphics and Vision Applications: A Survey,,2012
+28d4e027c7e90b51b7d8908fce68128d1964668a,Level Playing Field for Million Scale Face Recognition,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+28b72ad9229f38ec61f950e1d794d6af070d1800,Zero-shot Object Prediction using Semantic Scene Knowledge,,2017
+28121cd9150250fe51de62521065c7e2246a73e9,Blind Image Deconvolution using Deep Generative Priors,,2018
+28a16718b633dbc7f612de637fdb0d49c0e09219,2017 Formatting Instructions for Authors Using LaTeX,,2017
+2822a883d149956934a20614d6934c6ddaac6857,A A Survey of Appearance Models in Visual Object Tracking,,2013
+17035089959a14fe644ab1d3b160586c67327db2,VLAD: Encoding Dynamics of Deep Features for Action Recognition,,
+179253152fba4626e02a57067c3eb5302431e537,Affine iterative closest point algorithm for point set registration,Pattern Recognition Letters,2010
+179a37cb5416cea7d24b5820e75327ecf105e488,A novel dynamic model for multiple pedestrians tracking in extremely crowded scenarios,Information Fusion,2013
+17d519e0400fcd973387af8482aae949c1ccc521,Fourier Descriptors Based on the Structure of the Human Primary Visual Cortex with Applications to Object Recognition,Journal of Mathematical Imaging and Vision,2016
+17aa78bd4331ef490f24bdd4d4cd21d22a18c09c,Appendix: Building high-level features using large scale unsupervised learning,,2012
+17c0d99171efc957b88c31a465c59485ab033234,"To learn image super-resolution, use a GAN to learn how to do image degradation first",CoRR,2018
+1791f790b99471fc48b7e9ec361dc505955ea8b1,"A motion capture library for the study of identity, gender, and emotion perception from biological motion.",Behavior research methods,2006
+17fad2cc826d2223e882c9fda0715fcd5475acf3,Human facial expressions as adaptations: Evolutionary questions in facial expression research.,American journal of physical anthropology,2001
+176a507ebbfdc0fad141da14d30d89caa35bfaf9,Automatic coding of facial expressions displayed during posed and genuine pain,Image Vision Comput.,2009
+7ba0bf9323c2d79300f1a433ff8b4fe0a00ad889,ViCom: Benchmark and Methods for Video Comprehension,CoRR,2016
+7bf8ba8c7fff5e8aa23b5bc68aa1756f6a55bfc0,Learning Smooth Pooling Regions for Visual Recognition,,2013
+7b8ae23573fb33e6a762e914128c425a7f381fb7,Automatic face authentication with self compensation,Image Vision Comput.,2008
+7b1be02cbbef951875813ad55d3016ec2aee17f6,Multi-View Priors for Learning Detectors from Sparse Viewpoint Data,CoRR,2013
+7bfe085c10761f5b0cc7f907bdafe1ff577223e0,Adaptive Semi-Supervised Learning with Discriminative Least Squares Regression,,2017
+7bee43956fd72d86ce7d1f8f6667aefd2de75f98,Three-dimensional facial surface modeling applied to recognition,Eng. Appl. of AI,2009
+7b905905b616be6ddacb1808ca9849ab19863967,Display-camera calibration using eye reflections and geometry constraints,Computer Vision and Image Understanding,2011
+7b2e083302f7ef8e93a3f83a2ffc0c366a743cba,A Feasibility Study on the Use of Binary Keypoint Descriptors for 3D Face Recognition,,2014
+7be7699221c9afd582dab35bd7196c544972ad1d,Leaf Recognition using Texture Features for Herbal Plant Identification,,2017
+7bd2f332a96fd64e015157d9564ada73cff0cf3b,Automatic behavior analysis in tag games: from traditional spaces to interactive playgrounds,Journal on Multimodal User Interfaces,2016
+8fb611aca3bd8a3a0527ac0f38561a5a9a5b8483,Human Face Identification via,,
+8f88bcf3b2e0fb9cb09240541d4b65bcdcd89826,Latent Low-Rank Representation for subspace segmentation and feature extraction,2011 International Conference on Computer Vision,2011
+8f9f599c05a844206b1bd4947d0524234940803d,Efficient 3D reconstruction for face recognition,,2004
+8f7ae27df3df63f0f9a0a8d595bd95f4dd6d2589,Efficient semi-supervised feature selection with noise insensitive trace ratio criterion,Neurocomputing,2013
+8f02ec0be21461fbcedf51d864f944cfc42c875f,The HDA+ Data Set for Research on Fully Automated Re-identification Systems,,2014
+8abe89ab85250fd7a8117da32bc339a71c67dc21,Multi-camera Multi-Object Tracking,CoRR,2017
+8a790c808c293cf5d8ca089e8963b133d1300712,Multi-level Contextual RNNs with Attention Model for Scene Labeling,CoRR,2016
+8a40b6c75dd6392ee0d3af73cdfc46f59337efa9,Feature-Based Facial Expression Recognition: Sensitivity Analysis and Experiments with A Multilayer Perceptron,IJPRAI,1999
+8a65a86ca07dba867b6435819239f96a6d825bf7,A Search Engine for Retrieval and Inspection of Events with 48 Human Actions in Realistic Videos,,2013
+7e600faee0ba11467d3f7aed57258b0db0448a72,Robust Face Recognition using AAM and Gabor Features,Unknown,2007
+7e1bb3a908f6bcd3ba09b2d48f559536e3034d88,Maximally Distant Cross Domain Generators for Estimating Per-Sample Error,CoRR,2017
+7e736f25911c91cda343c000aabc773ed9a94fdf,Accurate and Efficient Video De-Fencing Using Convolutional Neural Networks and Temporal Information,2018 IEEE International Conference on Multimedia and Expo (ICME),2018
+7e5316031b08b8855e0d3e089b7b412ef3ba425f,Face recognition with disguise and single gallery images,Image Vision Comput.,2009
+7ee637bee61a7a6d4b2d2d7aea921566bdf5922a,Monocular Vs Binocular 3D Real-time Ball Tracking from 2D Ellipses,,2011
+1056347fc5e8cd86c875a2747b5f84fd570ba232,Multi-Camera Action Dataset for Cross-Camera Action Recognition Benchmarking,2017 IEEE Winter Conference on Applications of Computer Vision (WACV),2017
+10af9d8f5895e9ff26fcfce779f9a1199ceba529,A novel fuzzy facial expression recognition system based on facial feature extraction from color face images,Eng. Appl. of AI,2012
+107dbd2ffa3bb26786ebb7bb57a308c7d1f4dbc4,Laterobasal amygdalar enlargement in 6- to 7-year-old children with autism spectrum disorder.,Archives of general psychiatry,2010
+1067ef2c4d8c73bb710add5c7bfe35dd74bcb98a,Mechanisms of facial emotion recognition in autism spectrum disorders: Insights from eye tracking and electroencephalography,Neuroscience & Biobehavioral Reviews,2017
+1061140c5177193585900e3a8a271366c0e48a43,Machine analysis of facial behaviour: naturalistic and dynamic behaviour.,"Philosophical transactions of the Royal Society of London. Series B, Biological sciences",2009
+10ab1b48b2a55ec9e2920a5397febd84906a7769,I-Pic: A Platform for Privacy-Compliant Image Capture,,2016
+10cb7f4f86c6437b496a1c98955ba413c7540cd4,Gaussian Process Morphable Models,IEEE transactions on pattern analysis and machine intelligence,2017
+10195a163ab6348eef37213a46f60a3d87f289c5,Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks,International Journal of Computer Vision,2016
+10e704c82616fb5d9c48e0e68ee86d4f83789d96,INSTITUT FÜR INFORMATIK UND PRAKTISCHE MATHEMATIK Gabor Wavelet Networks for Object Representation,,2000
+10e70a34d56258d10f468f8252a7762950830d2b,New Parallel Models for Face Recognition,2007 International Conference on Computational Intelligence and Security (CIS 2007),2007
+10cb43143c3370e54a4e365aecc29505ea968bec,Hashing in the zero shot framework with domain adaptation,Neurocomputing,2018
+10cc976f8bdc0ce269a1239cf7cc6f3a5df7cc8a,Self - Serving Memories 1 Running head : Self - Serving Memories A self - serving bias in children ’ s memories ?,,2015
+10689c0a253c858c898275b819609e3dbb6fae25,Convex Sparse PCA for Unsupervised Feature Learning,TKDD,2016
+105bc5bde56723abdd3979c7b9adaa0a1616520d,Semantic Graph for Zero-Shot Learning,CoRR,2014
+1978297fa32ca39f57f450608a48a19048b09270,Scene in the Loop: Towards Adaptation-by-Tracking in RGB-D Data,,2012
+19e4c7d3f3b60235848fdf1e2d23f6fa6f5b6586,Training-induced plasticity of the social brain in autism spectrum disorder.,The British journal of psychiatry : the journal of mental science,2015
+19746957aa0d800d550da246a025ad44409cdb03,A Review of Web Image Mining,,2015
+195df1106f4d7aff0e9cb609358abbf80f54a716,Detecting Events and Key Actors in Multi-person Videos,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+1939168a275013d9bc1afaefc418684caf99ba66,An associate-predict model for face recognition,CVPR 2011,2011
+190b3caa2e1a229aa68fd6b1a360afba6f50fde4,"VideoLSTM convolves, attends and flows for action recognition",Computer Vision and Image Understanding,2018
+19089ecd35606445c62ff4abaa26252f44dcda89,Review of statistical shape spaces for 3D data with comparative analysis for human faces,Computer Vision and Image Understanding,2014
+19f8f011516fe6ffa4ed74776a0149c9dbdc5ccf,Contextual Rescoring for Human Pose Estimation,,2014
+19808134b780b342e21f54b60095b181dfc7a600,SIFTing Through Scales,IEEE Transactions on Pattern Analysis and Machine Intelligence,2016
+193089d56758ab88391d846edd08d359b1f9a863,A Discriminatively Learned CNN Embedding for Person Reidentification,TOMCCAP,2017
+19c0e8f6fbe49b0065039ed7b23da3ef0fb9852d,Improved Object Categorization and Detection Using Comparative Object Similarity,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+19c5dded4a2d1b7e62e29c71a4a7bd0911e2f5ae,SMC faster R-CNN: Toward a scene-specialized multi-object detector,Computer Vision and Image Understanding,2017
+19d00c90674de88c093c367425bf6820f3a7ea35,Low-Rank Modeling and Its Applications in Image Analysis,ACM Comput. Surv.,2014
+196258fd1c722574680a72ae8fb4cb5132ff7a37,Neural bases of gaze and emotion processing in children with autism spectrum disorders,,2011
+197eaa59a003a4c7cc77c1abe0f99d942f716942,Web image mining towards universal age estimator,,2009
+19994e667d908bc0aacfb663ab0a2bb5ad16b221,Recognizing Complex Events in Videos by Learning Key Static-Dynamic Evidences,,2014
+19eb486dcfa1963c6404a9f146c378fc7ae3a1df,A probabilistic model of face mapping with local transformations and its application to person recognition,IEEE Transactions on Pattern Analysis and Machine Intelligence,2005
+4c6daffd092d02574efbf746d086e6dc0d3b1e91,Informedia@trecvid 201 4 Med and Mer Med System,,2015
+4c8a4f659e827a3189e14f0efd00987dc4c7785f,FeatureInsight: Visual support for error-driven feature ideation in text classification,2015 IEEE Conference on Visual Analytics Science and Technology (VAST),2015
+4cb48924acdcc0b20ef05ea5f5e856b081d9b40f,A Classification-Based Study of Covariate Shift in GAN Distributions,Unknown,2018
+4c29e1f31660ba33e46d7e4ffdebb9b8c6bd5adc,Multicolumn Networks for Face Recognition,Unknown,2018
+4cea60c30d404abfd4044a6367d436fa6f67bb89,ConTagNet: Exploiting User Context for Image Tag Recommendation,,2016
+4ceb9f530549f3edb3369fd0bf7406d55354f9c4,SceneNet: An annotated model generator for indoor scene understanding,2016 IEEE International Conference on Robotics and Automation (ICRA),2016
+263a5592cd872b9eeda2f2f01a3e782a02bad670,Contextualizing Object Detection and Classification,CVPR 2011,2011
+263ce02126d9e5f861eff30b3170eddc158018bf,Face recognition using spectral features,Pattern Recognition,2007
+266bf8847801ff302c6f91f899f36269807317ee,Online Learning for Matrix Factorization and Sparse Coding,Journal of Machine Learning Research,2010
+26d721a3ef7b694fd358b8ed42cdc0abea7f2e9e,"A multimodal biometric test bed for quality-dependent, cost-sensitive and client-specific score-level fusion algorithms",Pattern Recognition,2010
+26690f2548c6dbf630de202b40dec417b20c9b6c,Variational Inference of Disentangled Latent Concepts from Unlabeled Observations,CoRR,2017
+267c6e8af71bab68547d17966adfaab3b4711e6b,Two-stream Collaborative Learning with Spatial-Temporal Attention for Video Classification,CoRR,2017
+2682f197ab1437b3c79027320a983de8fa7a400c,Multimedia search reranking: A literature survey,ACM Comput. Surv.,2014
+26534206831483d9f5434fe2fe0839afe83cfca3,Ranking and retrieval of image sequences from multiple paragraph queries,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015
+26a89701f4d41806ce8dbc8ca00d901b68442d45,Eigenspace updating for non-stationary process and its application to face recognition,Pattern Recognition,2003
+2624d84503bc2f8e190e061c5480b6aa4d89277a,AFEW-VA database for valence and arousal estimation in-the-wild,Image Vision Comput.,2017
+2645a1c4ee285ebf4081ef1674bcf2e546908c18,"Martial Arts, Dancing and Sports dataset: A challenging stereo and multi-view dataset for 3D human pose estimation",Image Vision Comput.,2017
+215a3616f4a6b5b692282a0a7351f13071e4beda,An Efficient Technique for Calculating Exact Nearest-Neighbor Classification Accuracy,,1999
+21e828071249d25e2edaca0596e27dcd63237346,Scalable Face Image Retrieval with Identity-Based Quantization and Multireference Reranking,IEEE Transactions on Pattern Analysis and Machine Intelligence,2010
+2179afa1cb4bd6d6ff0ca8df580ae511f59d99a3,"Robust Face Localisation Using Motion, Colour & Fusion",,2003
+2102915d0c51cfda4d85133bd593ecb9508fa4bb,Looking beyond appearances: Synthetic training data for deep CNNs in re-identification,Computer Vision and Image Understanding,2018
+21e880907053301b621d318a4b81dbe1b51c3aad,A Novel Visual Word Co-occurrence Model for Person Re-identification,,2014
+21f7980a22300983e1cb0fa02a9c300045a08740,Methodological improvement on local Gabor face recognition based on feature selection and enhanced Borda count,Pattern Recognition,2011
+2160788824c4c29ffe213b2cbeb3f52972d73f37,Automatic 3D face authentication,Image Vision Comput.,2000
+21ab1e521820824b41606554e94dd0584734d100,Influence of compression on 3D face recognition,Pattern Recognition Letters,2009
+21a1e0cd24e4a1d383556fe566bb2326da18f26c,Discriminant simplex analysis,"2008 IEEE International Conference on Acoustics, Speech and Signal Processing",2008
+4d9a6c4b1f7797962bb2554cf4bb869c7ea57a0a,Resolution-aware Constrained Local Model with mixture of local experts,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+4d36d147297767cbe698436d77c0b93b1b47535c,Detecting People Using Mutually Consistent Poselet Activations,,2010
+4df889b10a13021928007ef32dc3f38548e5ee56,Multi-Stage Optimal Component Analysis,2007 International Joint Conference on Neural Networks,2007
+4d423acc78273b75134e2afd1777ba6d3a398973,"International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database",,2002
+4dd6d511a8bbc4d9965d22d79ae6714ba48c8e41,Automatic Pixel Boosting for Face Enhancement in Dim Light,,2008
+4d90bab42806d082e3d8729067122a35bbc15e8d,Towards a dynamic expression recognition system under facial occlusion,Pattern Recognition Letters,2012
+4d4be112c180d5a4484fe6e17e506ad6e1853f08,"Improving long range and high magnification face recognition: Database acquisition, evaluation, and enhancement",Computer Vision and Image Understanding,2008
+4dcff552a198b58311b04935ea2250385f54c585,Movement Coordination in Human-Robot Teams: A Dynamical Systems Approach,IEEE Trans. Robotics,2016
+4d21a2866cfd1f0fb2a223aab9eecfdec963059a,Recognizing Lower Face Action Units for Facial Expression Analysis,,2000
+7577a1ddf9195513a5c976887ad806d1386bb1e9,Temporal Action Labeling using Action Sets,CoRR,2017
+7594466248480647c38fbddc59d30abc34f4e2fb,Алгоритм множественного трекинга пешеходов (Multi-Target Pedestrian Tracking Algorithm),,2014
+75e9a141b85d902224f849ea61ab135ae98e7bfb,Quantifying human sensitivity to spatio-temporal information in dynamic faces,Vision Research,2014
+75a66e636021bcfde447135ba9a9ed893d3bc436,Using Visual Saliency to Improve Human Detection with Convolutional Networks,,2018
+75908b6460eb0781130ed0aa94585be25a584996,Image Object Search Combining Colour with Gabor Wavelet Shape Descriptors,,2004
+75cd81d2513b7e41ac971be08bbb25c63c37029a,Human action recognition using Pose-based discriminant embedding,Sig. Proc.: Image Comm.,2012
+75595c73bdce2e07dee0a4bfd911b36b6945b949,Self-paced Learning for Weakly Supervised Evidence Discovery in Multimedia Event Search,CoRR,2016
+75a3f622f273450d020af5bc5562a69a9dc02b77,A Unified Probabilistic Framework for Automatic 3D Facial Expression Analysis based on a Bayesian Belief Inference and Statistical Feature Models,,2012
+75a59bc6938fb2071ed01a5fe8e88781e43a5c3b,Robust ear identification using sparse representation of local texture descriptors,Pattern Recognition,2013
+75d69d183a1a9e8312e21e88e40fddda0affb96a,VT-KFER: A Kinect-based RGBD+time dataset for spontaneous and non-spontaneous facial expression recognition,2015 International Conference on Biometrics (ICB),2015
+75873df8a65cf8fead79ac7ebca7f910d4fbf2a3,Activity recognition with volume motion templates and histograms of 3D gradients,2015 IEEE International Conference on Image Processing (ICIP),2015
+75e5ba7621935b57b2be7bf4a10cad66a9c445b9,Equidistant prototypes embedding for single sample based face recognition with generic learning and incremental learning,Pattern Recognition,2014
+7596c7ed735970813a1b47dcb5b998058d68f1d9,VITON: An Image-based Virtual Try-on Network,CoRR,2017
+81ed539ccd14f99ed4b2d126e4b6a0ccb4082031,Modeling the Energy Efficiency of Heterogeneous Clusters,2014 43rd International Conference on Parallel Processing,2014
+814b05113ba0397d236736f94c01e85bb034c833,Local receptive field constrained deep networks,Inf. Sci.,2016
+81831ed8e5b304e9d28d2d8524d952b12b4cbf55,Discriminative histograms of local dominant orientation (D-HLDO) for biometric image feature extraction,Pattern Recognition,2013
+81d67fa2f5eb76c9b0afb2d887e95ba78b6e46c9,Learning Implicit Generative Models with the Method of Learned Moments,Unknown,2018
+8111eb725133da1f0128967bf8cf488dbd94ce2b,Audio-visual human recognition using semi-supervised spectral learning and hidden Markov models,J. Vis. Lang. Comput.,2009
+810f5606a4769fc3dd99611acf805596fb79223d,Extraction of illumination invariant facial features from a single image using nonsubsampled contourlet transform,Pattern Recognition,2010
+86564bcb628d4ba6728babcd7c5a38d5fee39241,Visual perception of materials and their properties,Vision Research,2014
+86b87fa14321f2ca8a4e606cd4de17763dc48ace,Wavelet packets-based image retrieval,"2002 IEEE International Conference on Acoustics, Speech, and Signal Processing",2002
+86b985b285c0982046650e8d9cf09565a939e4f9,Facial Micro-Expression Detection in Hi-Speed Video Based on Facial Action Coding System (FACS),IEICE Transactions,2013
+86b51bd0c80eecd6acce9fc538f284b2ded5bcdd,Learning with Privileged Information for Multi-Label Classification,CoRR,2017
+8699268ee81a7472a0807c1d3b1db0d0ab05f40d,Channel-Recurrent Autoencoding for Image Modeling,,2017
+869583b700ecf33a9987447aee9444abfe23f343,Intrinsic Grassmann Averages for Online Linear and Robust Subspace Learning,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+726b8aba2095eef076922351e9d3a724bb71cb51,3DFaceNet: Real-time Dense Face Reconstruction via Synthesizing Photo-realistic Face Images,,2017
+721b109970bf5f1862767a1bec3f9a79e815f79a,A Fast Implementation of PCA-L1 Using Gram-Schmidt Orthogonalization,IEICE Transactions,2013
+729dbe38538fbf2664bc79847601f00593474b05,Complementary effects of gaze direction and early saliency in guiding fixations during free-viewing,,2014
+72fc4625f42e0b20962a26d203961bb116809de0,Sparsity Preserving Projections,,2009
+720ef31b8fb5076c861fa55f55456ccbc9174132,Face Recognition: A Literature Review,,2006
+722c33bfb4443f4f0a98ab709d40e379e7787c38,The application of eye-tracking technology in the study of autism.,The Journal of physiology,2007
+72fe1d86581e4672a534852d1e4f4680811db074,Animated Pose Templates for Modelling and Detecting Human Actions,,2013
+44b5430d98aa581ebae4295f9f6441f4acb891ff,Pose2Seg: Human Instance Segmentation Without Detection,,2018
+442f09ddb5bb7ba4e824c0795e37cad754967208,Learning from Partial Labels,Journal of Machine Learning Research,2011
+44bbb7cd8b3cba9c00ba55746867fb29df59102f,Serotonin transporter gene promoter region polymorphism and selective processing of emotional images.,Biological psychology,2010
+442d3aeca486de787de10bc41bfeb0b42c81803f,Eigenspace Interpolation for Appearance-Based Object Recognition,,2008
+442b6114ae8316c95f59acabe6de26f2b569cc02,Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing,,2018
+449b1b91029e84dab14b80852e35387a9275870e,Dimensional emotion driven facial expression synthesis based on the multi-stream DBN model,Proceedings of The 2012 Asia Pacific Signal and Information Processing Association Annual Summit and Conference,2012
+44078d0daed8b13114cffb15b368acc467f96351,Triplet probabilistic embedding for face verification and clustering,"2016 IEEE 8th International Conference on Biometrics Theory, Applications and Systems (BTAS)",2016
+4469ff0b698d4752504b4b900b0cbef38ded59e4,Data association for multi-object Tracking-by-Detection in multi-camera networks,2012 Sixth International Conference on Distributed Smart Cameras (ICDSC),2012
+44eb4d128b60485377e74ffb5facc0bf4ddeb022,Database independent human emotion recognition with Meta-Cognitive Neuro-Fuzzy Inference System,"2014 IEEE Ninth International Conference on Intelligent Sensors, Sensor Networks and Information Processing (ISSNIP)",2014
+448ed201f6fceaa6533d88b0b29da3f36235e131,A generative restricted Boltzmann machine based method for high-dimensional motion data modeling,Computer Vision and Image Understanding,2015
+441e7df66fe6052a6b770c3aeca4acd8dea98643,PaMM: Pose-aware Multi-shot Matching for Improving Person Re-identification,CoRR,2017
+2aaa6969c03f435b3ea8431574a91a0843bd320b,Face Recognition using Radial Basis Function Network based on LDA,,
+2ad7cef781f98fd66101fa4a78e012369d064830,Neural Aggregation Network for Video Face Recognition,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+2ad29b2921aba7738c51d9025b342a0ec770c6ea,Where is my puppy? Retrieving lost dogs by facial features,Multimedia Tools and Applications,2016
+2a6bba2e81d5fb3c0fd0e6b757cf50ba7bf8e924,Compare and Contrast: Learning Prominent Differences in Relative Attributes,,2017
+2a9283b65c8f04cecc8fb6a2cca5610b18a6f677,Low-rank matrix reconstruction and clustering via approximate message passing,,2013
+2a725b002dfacc566a83c8096aa28e0af0eca8b1,Towards macro- and micro-expression spotting in video using strain patterns,2009 Workshop on Applications of Computer Vision (WACV),2009
+2ae6bcd37f5aecb84a9222331b80c84a3c65e05f,Blur and Illumination - Invariant Face Recognition via Set - Theoretic Characterization,,2013
+2a02355c1155f2d2e0cf7a8e197e0d0075437b19,On Face Recognition using Gabor Filters,,2009
+2aea27352406a2066ddae5fad6f3f13afdc90be9,Bottom-Up and Top-Down Reasoning with Hierarchical Rectified Gaussians,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+2aacfcdc5d06c86901852f7b666d17e97822ba23,BiCov: a novel image representation for person re-identification and face verification,,2012
+2a4b693127a28a2c56914bd2e5b99ea0f0883176,Screening Tests for Lasso Problems,IEEE Transactions on Pattern Analysis and Machine Intelligence,2017
+2f43233c1c165f225bb002874dac967736525d85,Transitive Re-identification,,2013
+2fdce3228d384456ea9faff108b9c6d0cf39e7c7,The motion in emotion - A CERT based approach to the FERA emotion challenge,,2011
+2f16459e2e24dc91b3b4cac7c6294387d4a0eacf,Fast Deep Convolutional Face Detection in the Wild Exploiting Hard Sample Mining,Big Data Research,2018
+2fa057a20a2b4a4f344988fee0a49fce85b0dc33,eHeritage of shadow puppetry: creation and manipulation,,2013
+2f7d3406a96a5f409872e13643463a4896d9a009,Fast Human Detection Combining Range Image Segmentation and Local Feature Based Detection,2014 22nd International Conference on Pattern Recognition,2014
+2fcd5cff2b4743ea640c4af68bf4143f4a2cccb1,Are You Talking to a Machine? Dataset and Methods for Multilingual Image Question,,2015
+2fdd09747f491249e706fb0df51dc6b59f0b7b23,Time-sensitive web image ranking and retrieval via dynamic multi-task regression,,2013
+2f9c173ccd8c1e6b88d7fb95d6679838bc9ca51d,Gaussian Process Domain Experts for Model Adaptation in Facial Behavior Analysis,2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW),2016
+2f598922f81e65c1f3ffbd8c2456d2e9dcd7124a,Interleaved Text/Image Deep Mining on a Large-Scale Radiology Database for Automated Image Interpretation,Journal of Machine Learning Research,2016
+2f8183b549ec51b67f7dad717f0db6bf342c9d02,3D Face Reconstruction from a Single Image Using a Single Reference Face Shape,IEEE Transactions on Pattern Analysis and Machine Intelligence,2011
+2fe2ea6e0bd939b3c2877d1fa6444b81d9940c35,Pose based activity recognition using Multiple Kernel learning,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+2f5d44dc3e1b5955942133ff872ebd31716ec604,2D and 3D face recognition: A survey,Pattern Recognition Letters,2007
+2ffd1e152e4d322f03d09be3edfc162508b9938a,A fast proximal method for convolutional sparse coding,The 2013 International Joint Conference on Neural Networks (IJCNN),2013
+2fcd3007f197fa2c799978162d49598c4180ae69,Differences in discrimination of eye and mouth displacement in autism spectrum disorders,Vision Research,2007
+2f2e1d2eee8a5a0c389d9dfb11b81964a0754335,Supplementary Materials of Bridging the Ultimate Semantic Gap: A Semantic Search Engine for Internet Videos,,2015
+2fea258320c50f36408032c05c54ba455d575809,Recurrent Mixture Density Network for Spatiotemporal Visual Attention,CoRR,2016
+2f4be1b5655df160c31cb132172922e0f440857c,A Blur-Robust Descriptor with Applications to Face Recognition,IEEE Transactions on Pattern Analysis and Machine Intelligence,2012
+4300fa1221beb9dc81a496cd2f645c990a7ede53,A comparison of generalized linear discriminant analysis algorithms,Pattern Recognition,2008
+43016e51bc6e7939521ec3c2fcff78f35bfc5e92,Explorer Eye - movements reveal attention to social information in autism spectrum disorder,,
+43123e77108e059098194deacae1d1a6044703a2,Manifold Regularized Discriminative Nonnegative Matrix Factorization With Fast Gradient Descent,IEEE Transactions on Image Processing,2011
+4354ed06582b37e52bc23d0b1e86993d88c00e92,MLRank: Multi-correlation Learning to Rank for image annotation,Pattern Recognition,2013
+43aa40eaa59244c233f83d81f86e12eba8d74b59,Fast pose invariant face recognition using super coupled multiresolution Markov Random Fields on a GPU,Pattern Recognition Letters,2014
+43e268c118ac25f1f0e984b57bc54f0119ded520,Generalized Conditional Gradient for Sparse Estimation,Journal of Machine Learning Research,2017
+430ff8b02caf541377749673dbf71c4d95213f5e,Non-parametric image super-resolution using multiple images,IEEE International Conference on Image Processing 2005,2005
+437a720c6f6fc1959ba95e48e487eb3767b4e508,Full interpretation of minimal images.,Cognition,2018
+436d80cc1b52365ed7b2477c0b385b6fbbb51d3b,Probabilistic Knowledge Transfer for Deep Representation Learning,,2018
+43b7f3d356ae89b3772f3e64d4456ff0f442d4d3,Bi-level Relative Information Analysis for Multiple-Shot Person Re-Identification,IEICE Transactions,2013
+430c4d7ad76e51d83bbd7ec9d3f856043f054915,Two decades of local binary patterns: A survey,CoRR,2016
+885e6f1ef99d04a057d2543cbf2ffc9e7bcfb309,Upper Body Pose Estimation for Team Sports Videos Using a Poselet-Regressor of Spine Pose and Body Orientation Classifiers Conditioned by the Spine Angle Prior,IPSJ Trans. Computer Vision and Applications,2015
+88ad82e6f2264f75f7783232ba9185a2f931a5d1,Facial Expression Analysis under Partial Occlusion: A Survey,CoRR,2018
+88e1580e975ec0edab7327783f59665dc711ee7c,"An evaluation of crowd counting methods, features and regression models",Computer Vision and Image Understanding,2015
+88a9b3043a951c622667dcd5f70acd2c850b3950,A review of motion analysis methods for human Nonverbal Communication Computing,Image Vision Comput.,2013
+884e63b5371883a1502f5c39a08e5100c89a5427,New Advances in Automatic Gait Recognition,Inf. Sec. Techn. Report,2002
+8877c5afa16b025452e444e0798292fe7ee4dca6,Occlusion Robust Symbol Level Fusion for Multiple People Tracking,Unknown,2017
+88e453bd1f05fca156697a9dbab86d0c37fe3940,A Novel Neighborhood Defined Feature Selection on Phase Congruency Images for Recognition of Faces with Extreme Variations,Unknown,2006
+882c1e78bd8e89200fc639076eab19843d118432,Articulated human body parts detection based on cluster background subtraction and foreground matching,Neurocomputing,2013
+88c307c51594c6d802080a0780d0d654e2e2891f,Visual question answering: A survey of methods and datasets,Computer Vision and Image Understanding,2017
+9fb372fd2fb79571de1cc388154d4a3f0547d440,PBGen: Partial Binarization of Deconvolution-Based Generators for Edge Intelligence,,2018
+6b333b2c6311e36c2bde920ab5813f8cfcf2b67b,Pain Level Detection From Facial Image Captured by Smartphone,JIP,2016
+6b9aa288ce7740ec5ce9826c66d059ddcfd8dba9,BNU-LSVED 2.0: Spontaneous multimodal student affect database with multi-dimensional labels,Sig. Proc.: Image Comm.,2017
+6b5d7223239f02a091db8b9d3624b59994402419,Improving Twitter Sentiment Classification via Multi-Level Sentiment-Enriched Word Embeddings,CoRR,2016
+6b43dcc17e7219f6b8b76c65dc1a62271b11b2dc,Can We Boost the Power of the Viola-Jones Face Detector Using Pre-processing? An Empirical Study,CoRR,2017
+6b327af674145a34597986ec60f2a49cff7ed155,Defense-gan: Protecting Classifiers against Adversarial Attacks Using Generative Models,,2017
+6bed9d0aec57a121b7950149f294e35ddf8902a2,An Improved Face Recognition Algorithm Using Histogram-Based Features in Spatial and Frequency Domains,Unknown,2016
+6b3c5ad668d793893dd5169c771c23bc9ffeff31,Mixture of related regressions for head pose estimation,2013 IEEE International Conference on Image Processing,2013
+6bb95a0f3668cd36407c85899b71c9fe44bf9573,Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks,CoRR,2016
+6b1b43d58faed7b457b1d4e8c16f5f7e7d819239,A multi-task model for simultaneous face identification and facial expression recognition,Neurocomputing,2016
+6b57526152a6093171a05499cb62840ba28da660,Weakly Supervised Object Detection with Pointwise Mutual Information,CoRR,2018
+6b35b15ceba2f26cf949f23347ec95bbbf7bed64,"RSILC: Rotation- and Scale-Invariant, Line-based Color-aware descriptor",Image Vision Comput.,2015
+6bb630dfa797168e6627d972560c3d438f71ea99,Sequential Deep Trajectory Descriptor for Action Recognition With Three-Stream CNN,IEEE Transactions on Multimedia,2017
+07eeb8f39f7d397a2ab236ce830c3b5c19adf9d7,Op-brai130094 1..16,,2013
+07c3c015cbe635ede679a87a9725a65902aa4a17,Optimal solutions for semantic image decomposition,Image Vision Comput.,2012
+071af21377cc76d5c05100a745fb13cb2e40500f,Structured Prediction for Event Detection,,2016
+079edd5cf7968ac4759dfe72af2042cf6e990efc,Delving Deeper into Convolutional Networks for Learning Video Representations,CoRR,2015
+0781498a38ac67722bb690cd04f69a80e07a55ae,Supplementary Materials: Augmenting Supervised Neural Networks with Unsupervised Objectives for Large-scale Image Classification,,2016
+07cb6efa6734b5cc22a38b0855189d12791a0551,Running head: ATTENTION TO FACE IN ASD Faces Do Not Capture Special Attention in Children with Autism Spectrum Disorder: A Change Blindness Study,,2016
+073eaa49ccde15b62425cda1d9feab0fea03a842,Delft University of Technology On detecting the playing/non-playing activity of musicians in symphonic music videos,,2017
+07c80339af2dc54c94c03c01db71a3d7d2bb9ea8,Learning Without Forgetting,,2016
+07d95be4922670ef2f8b11997e0c00eb643f3fca,The First Facial Landmark Tracking in-the-Wild Challenge: Benchmark and Results,2015 IEEE International Conference on Computer Vision Workshop (ICCVW),2015
+0756e1de70c4e3a58c78f2e9cdb2646555386724,Recognizing scene viewpoint using panoramic place representation Citation,,2012
+07fc8b4ba4a0f61cf1ea7c0bfefc556d44fb334d,Improving object detection with boosted histograms,Image Vision Comput.,2009
+38a75d92684122da464a7fb1f9adc8f6acec74da,Joint representation classification for collective face recognition,Pattern Recognition,2017
+380dd0ddd5d69adc52defc095570d1c22952f5cc,Improving Smiling Detection with Race and Gender Diversity,CoRR,2017
+38b8f80b05db035f1ba9eb2e76629ce937fc956c,Robust bilinear factorization with missing and grossly corrupted observations,Inf. Sci.,2015
+3825b2ccbf2b305fa051bd7b62306108d61a753e,Neuroimaging in child clinical populations: considerations for a successful research program.,Journal of the American Academy of Child and Adolescent Psychiatry,2012
+38fb6eada1e62e0c25c45023107ca8ab3426c162,A survey of approaches and challenges in 3D and multi-modal 3D + 2D face recognition,Computer Vision and Image Understanding,2006
+38fb67d26d27653f7aa538a1f0237e281d5a4e6d,Humans have idiosyncratic and task-specific scanpaths for judging faces,Vision Research,2015
+38558bd53b5bab485ca4abca35a0401c0c387883,Illumination Invariant Face Recognition Using Quaternion-Based Correlation Filters,Journal of Mathematical Imaging and Vision,2012
+384af919a685fbcb8dce37475a45cbf8dfe5c8f5,Using Richer Models for Articulated Pose Estimation of Footballers,,2012
+385750bcf95036c808d63db0e0b14768463ff4c6,Autoencoding beyond pixels using a learned similarity metric,,2016
+3805cd9f0db2a71bd33cb72ad6ca7bd23fe95e35,A support vector approach for cross-modal search of images and texts,Computer Vision and Image Understanding,2017
+38308a4fc038611797a5193c6d3abb593a6a3a37,Structured Sparse Linear Discriminant Analysis,2012 19th IEEE International Conference on Image Processing,2012
+38861d0d3a0292c1f54153b303b0d791cbba1d50,Making risk minimization tolerant to label noise,Neurocomputing,2015
+384112e458d887c036fb313953a217173eea5f93,Kernel Conditional Ordinal Random Fields for Temporal Segmentation of Facial Action Units,,2012
+38192a0f9261d9727b119e294a65f2e25f72d7e6,Facial feature point detection: A comprehensive survey,Neurocomputing,2018
+384156c658b312946eebab736235f03f726c787a,Static topographic modeling for facial expression recognition and analysis,Computer Vision and Image Understanding,2007
+000b27b8725432580ef9d5b9c5402fc7b76fd68b,Neural correlates of biased social fear learning and interaction in an intergroup context,,2015
+00b0ea36d426b35994b8a586a18651abf1dd1f93,Sparse Dictionary-based Attributes for Action Recognition and Summarization,CoRR,2013
+007394c2bae389cf43e46db4567dafe206355c25,MISE: Providing performance predictability and improving fairness in shared main memory systems,2013 IEEE 19th International Symposium on High Performance Computer Architecture (HPCA),2013
+00c3ccc8d7e799a39ca15415775e89e2b41a3972,Tracking many vehicles in wide area aerial surveillance,2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops,2012
+0004f72a00096fa410b179ad12aa3a0d10fc853c,Visual Interpretation of Human Body Language for Interactive Scenarios,Unknown,2012
+00ab6bb0df7fd605038d64eb5798b31481a39dd0,Delayed-Dynamic-Selective (DDS) Prediction for Reducing Extreme Tail Latency in Web Search,,2015
+005f4fb2256c6fa293e738bb53ebf437a5b98d73,CrowdCam: Instantaneous Navigation of Crowd Images Using Angled Graph Citation,,2013
+008baae7037a47f69804c2eb8438d366a6e67486,3D Human Pose Estimation via Deep Learning from 2D Annotations,2016 Fourth International Conference on 3D Vision (3DV),2016
+0020207f7e004a5f3faeee9b7c3ee86ceae88a2d,Weakly Supervised Top-down Salient Object Detection,CoRR,2016
+00cb5ee9c7f016a8ece5dd3b34e74ee65ee19e2d,Seeing it differently: visual processing in autism.,Trends in cognitive sciences,2006
+00fe3d95d0fd5f1433d81405bee772c4fe9af9c6,What value high level concepts in vision to language problems?,,2015
+0021e292c9d8fd19f5edd1cde5bc99c112f1992d,Fast multi-scale local phase quantization histogram for face recognition,Pattern Recognition Letters,2012
+006415b0ae3ac6ff9a2b482bc3d23ad15e8f09f2,Pedestrian Detection by Boosting-based Feature Co-occurrence Representation *,,2009
+0059b3dfc7056f26de1eabaafd1ad542e34c2c2e,Can Help You Change! An Empathic Virtual Agent Delivers Behavior Change Health Interventions,,2014
+00ae6ce99eb9ccefd8409e4ef5e3bbb5248821d6,Geometry Issues of Gaze Estimation,,2008
+6eaf446dec00536858548fe7cc66025b70ce20eb,GP-GAN: Gender Preserving GAN for Synthesizing Faces from Landmarks,CoRR,2017
+6e7c2f13bc2cf5547f4d8a845dc115108e52b27a,Emotional and effortful control abilities in 42-month-old very preterm and full-term children.,Early human development,2014
+6eeeb96350c676bbb9bf765851362e590e32eaed,Max-Margin Zero-Shot Learning for Multi-class Classification,,2015
+6eb5f375d67dd690ec3b134de7caecde461e8c72,Learning to Detect Concepts from Webly-Labeled Video Data,,2016
+6eaeac9ae2a1697fa0aa8e394edc64f32762f578,Constraint Score: A new filter method for feature selection with pairwise constraints,Pattern Recognition,2008
+6ee2ea416382d659a0dddc7a88fc093accc2f8ee,Graph-Preserving Sparse Nonnegative Matrix Factorization With Application to Facial Expression Recognition,"IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics)",2011
+6ef28af882e408ff63f83ca670392a008d203fbc,Learning relational object categories using behavioral exploration and multimodal perception,2014 IEEE International Conference on Robotics and Automation (ICRA),2014
+6e12ba518816cbc2d987200c461dc907fd19f533,A computational approach to body mass index prediction from face images,Image Vision Comput.,2013
+6ecd8ee110381e073fe6b4e79029fbb59d2b0e02,Face recognition via adaptive sparse representations of random patches,2014 IEEE International Workshop on Information Forensics and Security (WIFS),2014
+9ac7bb9be33f41d02754bc33a39974496ead0b27,Integrated local binary pattern texture features for classification of breast tissue imaged by optical coherence microscopy,Medical image analysis,2017
+9a0331bac634f67c2a993c36da95481fe53709bf,Second-order extended Kalman filter for extended object and group tracking,2016 19th International Conference on Information Fusion (FUSION),2016
+9a99c23aaac3598180c115e3843d06faa4211fe4,Scalable Mobile Visual Classification by Kernel Preserving Projection Over High-Dimensional Features,IEEE Trans. Multimedia,2014
+9af4d310415afb925e157e7120b7aa596298888c,Neurobiological correlates of social functioning in autism.,Clinical psychology review,2010
+9a3fe4631e8507e3409631d506de3cbe793f0b42,Hybrid eye center localization using cascaded regression and hand-crafted model fitting,Image Vision Comput.,2018
+36b40c75a3e53c633c4afb5a9309d10e12c292c7,Facial Expression Recognition Based on Fusion of Multiple Gabor Features,18th International Conference on Pattern Recognition (ICPR'06),2006
+367951ba687e4e52ca4ee1327627b332afc45fae,Consistent Optical Flow Maps for Full and Micro Facial Expression Recognition,Unknown,2017
+365f67fe670bf55dc9ccdcd6888115264b2a2c56,Improving facial analysis and performance driven animation through disentangling identity and expression,Image Vision Comput.,2016
+36d76954bcb4f381f3590598d5f00bb842ffddf7,Human Pose Estimation Using Consistent Max Covering,IEEE Transactions on Pattern Analysis and Machine Intelligence,2009
+369634f497852e05d5e72b12874e2a3db2d3945f,Description of interest regions with local binary patterns,Pattern Recognition,2009
+36e4578e29adacc5b44edd3bf9f2a77561b0f2e0,Directional binary code with application to PolyU near-infrared face database,Pattern Recognition Letters,2010
+36c5421d477697a8692fe6a51ce62473e690c62f,Group Affect Prediction Using Emotion Heatmaps and Scene Information,CoRR,2017
+36ea75e14b69bed454fde6076ea6b85ed87fbb14,Face Recognition using a Kernelization of Graph Embedding Pang,,
+366d20f8fd25b4fe4f7dc95068abc6c6cabe1194,Are facial attributes adversarially robust?,2016 23rd International Conference on Pattern Recognition (ICPR),2016
+362ba8317aba71c78dafca023be60fb71320381d,Nighttime face recognition at large standoff: Cross-distance and cross-spectral matching,Pattern Recognition,2014
+3624ca25f09f3acbcf4d3a4c40b9e45a29c22b94,Face recognition using second-order discriminant tensor subspace analysis,Neurocomputing,2011
+5c4ce36063dd3496a5926afd301e562899ff53ea,A Survey on Content-Aware Video Analysis for Sports,CoRR,2017
+5c2a7518fb26a37139cebff76753d83e4da25159,De-identification for privacy protection in multimedia content: A survey,Sig. Proc.: Image Comm.,2016
+5cfe70ccacd302938620662190c573cb6f19bdfb,Searching the Past: An Improved Shape Descriptor to Retrieve,,2011
+5c473cfda1d7c384724fbb139dfe8cb39f79f626,Facial expression recognition based on meta probability codes,Pattern Analysis and Applications,2012
+5cd47df260e65b2650a1123a2136ee5bc918d4c6,Deep learning for source camera identification on mobile devices,CoRR,2017
+5cd2425bfbfbc1413c5c853d27c35f8ce5d8f144,Face Recognition Using Discrete Orthogonal Hahn Moments,Unknown,2015
+5ce80b41443518a14d800f6b93b4057bbb007432,BenchIP: Benchmarking Intelligence Processors,Journal of Computer Science and Technology,2018
+5ce035891b920e4728a50af7e4afb54e088f5183,Modelling the perceptual similarity of facial expressions from image statistics and neural responses,NeuroImage,2016
+097dc32f712550f655facf74212a70ce3828d98c,Image classification using object detectors,2013 IEEE International Conference on Image Processing,2013
+09ae4b2c851a06e0bde3f4e00b9b7c6e5ac3ddac,Recognition of natural scenes from global properties: Seeing the forest without representing the trees,,2009
+09e63de98c7551079486f66bddb62a253fc596b7,Efficient parametrization of multi-domain deep neural networks,CoRR,2018
+0952ac6ce94c98049d518d29c18d136b1f04b0c0,Incremental Kernel PCA for Efficient Non-linear Feature Extraction,,2006
+09af91e913324255bd8358e62cd3b8a25f7141ec,Comparing strategies for 3D face recognition from a 3D sensor,2013 IEEE RO-MAN,2013
+09fbcd901db726caec1f3bcbda5266ca72c7deb6,Fusion of Heterogeneous Data in Convolutional Networks for Urban Semantic Labeling (Invited Paper),CoRR,2017
+0922e7d583d02f6078e59974a3de4452382ca9dd,Local approach for face verification in polar frequency domain,Image Vision Comput.,2006
+0997f69e081bc460923a34e55b525a2aa3c4548a,Learning locality-constrained collaborative representation for robust face recognition,Pattern Recognition,2014
+0949548b95e225dcb0ab88ba21f385ac6b5d81ae,Learning Driving Models with a Surround-View Camera System and a Route Planner,,2018
+0921548f06db5d4959126c823cda0bbeae542937,Cognitive Science in the era of Artificial Intelligence: A roadmap for reverse-engineering the infant language-learner,Cognition,2018
+091d0c7b3576fd6f3bb2bec344deb8f81fc1f7c6,Yin and Yang: Balancing and Answering Binary Visual Questions,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+097f1f58f1cb8dc9e0622e001d7f6cbb624d542c,Partner naming and forgetting: Recall of network members,Social networks,2007
+09fded4954d2df2ccabf5812a0cf5040e627a312,Face Recognition With Contiguous Occlusion Using Markov Random Fields,,2009
+09a05ecae987d9ababf5fe52323f69fa3e889d83,Part Bricolage: Flow-Assisted Part-Based Graphs for Detecting Activities in Videos,,2014
+09798b13739edabd55830fc5589d8ed263d62c82,"A cross-syndrome study of the development of holistic face recognition in children with autism, Down syndrome, and Williams syndrome.",Journal of experimental child psychology,2009
+097104fc731a15fad07479f4f2c4be2e071054a2,Texture and shape information fusion for facial expression and facial action unit recognition,Pattern Recognition,2008
+09111da0aedb231c8484601444296c50ca0b5388,"Joint estimation of age, gender and ethnicity: CCA vs. PLS",2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+092a4a0f16287c26dcc833958b87b32346546c8b,Multiple Object Tracking with Kernelized Correlation Filters in Urban Mixed Traffic,2017 14th Conference on Computer and Robot Vision (CRV),2017
+09fb440dd2daf2b93e36dd5df93950f0f3bda685,Symmetric low-rank representation for subspace clustering,Neurocomputing,2016
+5db25e8c1e45bcdb64b743f81dbdc69f32c70004,Shadow detection: A survey and comparative evaluation of recent methods,Pattern Recognition,2012
+5d485501f9c2030ab33f97972aa7585d3a0d59a7,Learning Bayesian network parameters under incomplete data with domain knowledge,Pattern Recognition,2009
+5d9d1b95d5afd58f6e53512b7ddd04b78d62864c,Fast approximate k-means via cluster closures,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+5dc056fe911a3e34a932513abe637076250d96da,Real-time facial feature detection using conditional regression forests,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+5d76a25936366c6619d2b5e6b74106cdb53a4978,Detecting violent and abnormal crowd activity using temporal analysis of grey level co-occurrence matrix (GLCM)-based texture measures,Machine Vision and Applications,2017
+5dbae8c58341d1f6e475ebd03aa6b8abbc9d149d,Hallucinating optimal high-dimensional subspaces,Pattern Recognition,2014
+5dcf78de4d3d867d0fd4a3105f0defae2234b9cb,A method for improving consistency in photometric databases,,2012
+5d7070067a75f57c841d0d30b23e21101da606b2,Generative Modeling using the Sliced Wasserstein Distance,CoRR,2018
+5d8ab5c473eb9e083ceb35ebeb00a062114ee6ac,A Reinforcement Learning Approach to Target Tracking in a Camera Network,CoRR,2018
+5d44c675addcb6e74cbc5a9c48df0d754bdbcd98,Emotion Classification using Adaptive SVMs,,2012
+5dc14823862ff1f07dec483d5b4860727055ea79,Multi-Instance Dynamic Ordinal Random Fields for Weakly-supervised Facial Behavior Analysis,,2018
+5d01283474b73a46d80745ad0cc0c4da14aae194,Classification schemes based on Partial Least Squares for face identification,J. Visual Communication and Image Representation,2015
+31aa20911cc7a2b556e7d273f0bdd5a2f0671e0a,Patch-based Face Recognition using a Hierarchical Multi-label Matcher,,2018
+31b05f65405534a696a847dd19c621b7b8588263,UMDFaces: An annotated face dataset for training deep networks,2017 IEEE International Joint Conference on Biometrics (IJCB),2017
+3180192694594f345f6fc5bed5a473762dfec522,An online spatio-temporal tensor learning model for visual tracking and its applications to facial expression recognition,Expert Syst. Appl.,2017
+31b0f482908d16d82826f2fc5fba67128cb07e4e,Context Generation with Image Based Sensors: An Interdisciplinary Enquiry on Technical and Social Issues and their Implications for System Design,,2012
+31e57fa83ac60c03d884774d2b515813493977b9,Face alignment with cascaded semi-parametric deep greedy neural forests,Pattern Recognition Letters,2018
+318b52b1f37669c24415f4aab6266c72a3b255fe,Oxytocin's impact on social face processing is stronger in homosexual than heterosexual men,Psychoneuroendocrinology,2014
+31cd61f05ea86a3eb08e06f1d0c2aa810805282f,On the effect of hyperedge weights on hypergraph learning,Image Vision Comput.,2017
+31b58ced31f22eab10bd3ee2d9174e7c14c27c01,Nonparametric Object and Scene Recognition,,2008
+3148c4ca284d6521769dfde54e3e7693228bda06,Neurodevelopmental changes across adolescence in viewing and labeling dynamic peer emotions,Developmental Cognitive Neuroscience,2017
+318a81acdd15a0ab2f706b5f53ee9d4d5d86237f,Multi-label learning: a review of the state of the art and ongoing research,Wiley Interdisc. Rew.: Data Mining and Knowledge Discovery,2014
+31af1f2614823504d1d643d1b019c6f9d2150b15,Super-FAN: Integrated facial landmark localization and super-resolution of real-world low resolution faces in arbitrary poses with GANs,CoRR,2017
+312afff739d1e0fcd3410adf78be1c66b3480396,Facial Attributes: Accuracy and Adversarial Robustness,CoRR,2018
+31a22514efe2b25088a91d8d4db9bb31ae1e9575,Proposing Plausible Answers for Open-ended Visual Question Answering,CoRR,2016
+311fcda76dc7b7cf50b17c705a2aaaaab5ed6a04,Learning Distributed Representations of Sentences from Unlabelled Data,,2016
+313387fc6c5b5561f23fdc63a546b18f54f6bebc,Convex Non-negative Matrix Factorization in the Wild,2009 Ninth IEEE International Conference on Data Mining,2009
+31bb49ba7df94b88add9e3c2db72a4a98927bb05,Static and dynamic 3D facial expression recognition: A comprehensive survey,Image Vision Comput.,2012
+91816b4b5fb74710144b3294dec61aab4de12fd3,"The more you learn, the less you store: Memory-controlled incremental SVM for visual place recognition",Image Vision Comput.,2010
+91883dabc11245e393786d85941fb99a6248c1fb,Face alignment in-the-wild: A Survey,Computer Vision and Image Understanding,2017
+919d0e681c4ef687bf0b89fe7c0615221e9a1d30,Fractal Techniques for Face Recognition,,2009
+912a6a97af390d009773452814a401e258b77640,An on-line variational Bayesian model for multi-person tracking from cluttered scenes,Computer Vision and Image Understanding,2016
+913352e569d3e5eeb90de2a7979533355e02acc9,3D Motion Data aided Human Action Recognition and Pose Estimation,,2013
+918b72a47b7f378bde0ba29c908babf6dab6f833,Uncorrelated trace ratio linear discriminant analysis for undersampled problems,Pattern Recognition Letters,2011
+91d2fe6fdf180e8427c65ffb3d895bf9f0ec4fa0,Tensor reduction error analysis - Applications to video compression and classification,,2008
+91027fd707aed714c9095551e3d63b3e18ee138b,Prediction as a Rule for Unsupervised Learning in Deep Neural Networks,,2017
+654ad3b6f7c6de7184a9e8eec724e56274f27e3f,Alternating Back-Propagation for Generator Network,,2017
+6512f42fd70b42300ea3f318e860d270cd6d3b0a,Joint multi-person detection and tracking from overlapping cameras,Computer Vision and Image Understanding,2014
+65ef33636f07d4d1aa1b22a5b67f1f402d6a5900,PartBook for image parsing,2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops,2012
+65ee4de888e5b934429dcb126ee0ae544156c9bd,Face recognition using linear representation ensembles,Pattern Recognition,2016
+625d68fdb0db5c3ad27c8defd608c3841086392d,Revisiting Additive Quantization,,2016
+620339aef06aed07a78f9ed1a057a25433faa58b,Human Action Recognition and Prediction: A Survey,CoRR,2018
+62b3598b401c807288a113796f424612cc5833ca,"X2Face: A network for controlling face generation by using images, audio, and pose codes",CoRR,2018
+6257a622ed6bd1b8759ae837b50580657e676192,Unsupervised Learning aids Prediction: Using Future Representation Learning Variantial Autoencoder for Human Action Prediction,CoRR,2017
+62e878445851c9d5e89a0ef8d49f11acd77e78ec,A log square average case algorithm to make insertions in fast similarity search,Pattern Recognition Letters,2012
+620e1dbf88069408b008347cd563e16aeeebeb83,FaceDCAPTCHA: Face detection based color image CAPTCHA,Future Generation Comp. Syst.,2014
+96c298354bee7c6c8dcc58f8fa749cfa75f5452e,Semantic segmentation of images exploiting DCT based features and random forest,Pattern Recognition,2016
+96f77524d0a26c27775162b1474915c1452f346f,Learning multiple visual domains with residual adapters,Unknown,2017
+96faccdddef887673d6007fed8ff2574580cae1f,"Multi-path Region-Based Convolutional Neural Network for Accurate Detection of Unconstrained ""Hard Faces""",,2017
+960ad662c2bb454d69006492cc3f52d1550de55d,Visualizing Graphs and Clusters as Maps,IEEE Computer Graphics and Applications,2010
+964a3196d44f0fefa7de3403849d22bbafa73886,Uncorrelated slow feature discriminant analysis using globality preserving projections for feature extraction,Neurocomputing,2015
+969dd8bc1179c047523d257516ade5d831d701ad,A weakly supervised method for makeup-invariant face verification,Pattern Recognition,2017
+9636c7d3643fc598dacb83d71f199f1d2cc34415,Automatic facial attribute analysis via adaptive sparse representation of random patches,Pattern Recognition Letters,2015
+3a8245748a5b682845784dab131f6d8240b09f7a,Content based Medical Image Retrieval: use of Generalized Gaussian Density to model BEMD's IMF,,2009
+3aa9d370378bce52238f2a8290926949ab38f0ae,A two-stage linear discriminant analysis for face-recognition,Pattern Recognition Letters,2012
+3af9e70e81ea67729953c9c0e5269881b35e3cc7,Coupling-and-decoupling: A hierarchical model for occlusion-free object detection,Pattern Recognition,2014
+3a415f3fc013bf3d045d9a45c7ed5d83996f4556,Expression modeling for expression-invariant face recognition,Computers & Graphics,2010
+3a3087c03f0403c3e180f47f9001509e852b82b3,Likelihood ratio based mixed resolution facial comparison,3rd International Workshop on Biometrics and Forensics (IWBF 2015),2015
+3a2f235fa82b41aee2a45194c1b159f777abffe0,Kernel sparse representation with pixel-level and region-level local feature kernels for face recognition,Neurocomputing,2014
+3a3c47b6da1ea1b8d57ce41d9ddb54a774e1914d,High-Level Prediction Signals in a Low-Level Area of the Macaque Face-Processing Hierarchy.,Neuron,2017
+3adbf4ed5e4e3f59afb7509119667c8701c7cf37,Activity-conditioned continuous human pose estimation for performance analysis of athletes using the example of swimming,CoRR,2018
+544fd5065c0f4f6b0a9ba1805785b5ef3cd68231,Bag-of-Genres for Video Genre Retrieval,CoRR,2015
+549afb73666202ec3c02a59de611387f723c1cf9,"Compact, Adaptive and Discriminative Spatial Pyramid for Improved Scene and Object Classification",,2012
+542a2ddc53d80d58a8791ab1a72dad660035e114,A Survey of Recent Advances in CNN-based Single Image Crowd Counting and Density Estimation,CoRR,2017
+5480bfe964e85770615a73837e5451888bfaf689,The Detection of Concept Frames Using Clustering Multi-instance Learning,2010 20th International Conference on Pattern Recognition,2010
+54760ceffc46a7a5425260834840fcfe910e0f3b,Viewing it differently: social scene perception in Williams syndrome and autism.,Neuropsychologia,2008
+543a005dd1c6118c73e099e65119ae10c790969e,The Effect of Image Resolution on the Performance of a Face Recognition System,"2006 9th International Conference on Control, Automation, Robotics and Vision",2006
+54bac87151febb2e9eecf237d6498f8ed8ac3b1e,A case study on appearance based feature extraction techniques and their susceptibility to image degradations for the task of face recognition,Unknown,2009
+98b2f21db344b8b9f7747feaf86f92558595990c,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,CoRR,2017
+987f73c1e17540716f47e2b4bd434a09ceab5074,Extensive articulated human detection by voting Cluster Boosted Tree,2009 Workshop on Applications of Computer Vision (WACV),2009
+980fd3fb067215017af8d13381e1d95fe3a34727,SPECIAL SECTION BRIEF REPORT Autism and the Extraction of Emotion From Briefly Presented Facial Expressions: Stumbling at the First Step of Empathy,,2008
+98aa4bc56aa6aa15735727a91bb3711bc90e73b2,Fast Low-Rank Subspace Segmentation,IEEE Transactions on Knowledge and Data Engineering,2014
+98a120802aef324599e8b9014decfeb2236a78a3,Crowdsourced Facial Expression Mapping Using a 3D Avatar,,2016
+982fed5c11e76dfef766ad9ff081bfa25e62415a,Undersampled Face Recognition via Robust Auxiliary Dictionary Learning,IEEE Transactions on Image Processing,2015
+9857eeded6b7608ff862174742b38946102f5008,Interpretable Facial Relational Network Using Relational Importance,CoRR,2017
+53288f4c3bcb993f8561b4af1776ec3145d7a051,Im2Text and Text2Im: Associating Images and Texts for Cross-Modal Retrieval (Extended Abstract),,2014
+531a40720f2809c560840e6d3afb11a31ad0b9a0,Development of social attention 1 Running head: DEVELOPMENT OF SOCIAL ATTENTION Measuring the development of social attention using free-viewing,,2010
+537fb9d35e56be9436b42a9e5e3405523c2f1e0e,Visual Analysis of Eye State and Head Pose for Driver Alertness Monitoring,IEEE Transactions on Intelligent Transportation Systems,2013
+5334ac0a6438483890d5eef64f6db93f44aacdf4,Minh Hoai: Regularizedmax Pooling for Image Categorization,,2014
+53dd25350d3b3aaf19beb2104f1e389e3442df61,Evolutionary Eigenspace Learning using CCIPCA and IPCA for Face Recognition,Unknown,2009
+53f0d493c64c908c479f28b4b0cc38aa2124697d,"mdBRIEF - a fast online-adaptable, distorted binary descriptor for real-time applications using calibrated wide-angle or fisheye cameras",Computer Vision and Image Understanding,2017
+536b37fe90a2f0bd8b40b7eb7ecf89b25a1c8ede,Computer Science and Artificial Intelligence Laboratory Receptive Field Structures for Recognition,,2005
+531fd9be964d18ba7970bd1ca6c3b9dc91b8d2ab,From the heart to the mind's eye: cardiac vagal tone is related to visual perception of fearful faces at high spatial frequency.,Biological psychology,2012
+530243b61fa5aea19b454b7dbcac9f463ed0460e,ReenactGAN: Learning to Reenact Faces via Boundary Transfer,CoRR,2018
+539ca9db570b5e43be0576bb250e1ba7a727d640,A Large-Scale Database of Images and Captions for Automatic Face Naming,,2011
+53c8cbc4a3a3752a74f79b74370ed8aeed97db85,Learning person-specific models for facial expression and action unit recognition,Pattern Recognition Letters,2013
+53e8781bb152e8e05ffe03737082448ac3378e37,A unified framework for event summarization and rare event detection,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+530ce1097d0681a0f9d3ce877c5ba31617b1d709,A component based approach for classifying the seven universal facial expressions of emotion,2013 IEEE Symposium on Computational Intelligence for Creativity and Affective Computing (CICAC),2013
+3f4b8fe5edfac918c1c74317242b2d91346d5fb6,Adaptive discriminant analysis for face recognition from single sample per person,Face and Gesture 2011,2011
+3fd970da1fd9ebcf1b97f4d16f5274b25666471b,Clothing-invariant gait identification using part-based clothing categorization and adaptive weight control,Pattern Recognition,2010
+3fd203807fab28243f84d2360572796869ccde90,Deep Video Code for Efficient Face Video Retrieval,,2016
+3f775e3be9e1a00ebf4fd281e524932e88cec0ae,Deep Contextual Recurrent Residual Networks for Scene Labeling,CoRR,2017
+3f5cf3771446da44d48f1d5ca2121c52975bb3d3,All the Images of an Outdoor Scene,,2002
+3f7c4fb00be2124fe8e2e9d48caf86265b6471b7,Active Subspace: Toward Scalable Low-Rank Learning,Neural computation,2012
+3f85020032ae335baf57aaf65c4831b67e4030c9,MonoPerfCap: Human Performance Capture from Monocular Video,CoRR,2017
+3f8537c2141ba19a03876c7bb5c1e71a01b56838,Face image super-resolution using 2D CCA,Signal Processing,2014
+3f45d73a7b8d10a59a68688c11950e003f4852fc,Joint Dimension Reduction and Metric Learning for Person Re-identification,CoRR,2014
+3f04caa9d17e6b26e4446578c020bf3b35df9de3,Video Captioning with Multi-Faceted Attention,CoRR,2016
+30801beeb4436ce1f15e641b74a3daae836b0a0d,Deep Convolutional Inverse Graphics Network,,2015
+3039381ced50a910234ceca5133a69aceb324faf,3D face recognition using passive stereo vision,IEEE International Conference on Image Processing 2005,2005
+307dae1bfa57c0c5dcf2abd22f2e16f7e894fa29,Local Kernel Feature Analysis (LKFA) for object recognition,Neurocomputing,2011
+303065c44cf847849d04da16b8b1d9a120cef73a,"3D Face Morphable Models ""In-the-Wild""",,2017
+30aa681ab80a830c3890090b0da3f1e786bd66ff,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 IEEE International Joint Conference on Biometrics (IJCB),2017
+300ce2d1ff744fea95fda05d2f3d48766c283042,Semantic Stixels: Depth is not enough,2016 IEEE Intelligent Vehicles Symposium (IV),2016
+30b74c53bd7a9b364920e5074b52b3f737a71c89,LSTM Pose Machines,CoRR,2017
+30af3e6e0165ebc9a641420d14ca285105550205,LIFT: A new framework of learning from testing data for face recognition,Neurocomputing,2011
+3093a57be04309e2380ac98b568dd8fcb8077ada,Leveraging local neighborhood topology for large scale person re-identification,Pattern Recognition,2014
+30e6cf0c3cb38997acb05a2f5ed86269643ae3ed,Weakly Supervised Semantic Labelling and Instance Segmentation,CoRR,2016
+3068dad264ece487e21fbb689d8f47d498c5aaa4,Minmin Chen 2013,,2013
+30b74e60ec11c0ebc4e640637d56d85872dd17ce,Large-Scale Human Activity Mapping using Geo-Tagged Videos,CoRR,2017
+304baa0481562d468fb7cfa1f89e726f82701a39,Towards Optimal Symbolization for Time Series Comparisons,2013 IEEE 13th International Conference on Data Mining Workshops,2013
+304a306d2a55ea41c2355bd9310e332fa76b3cb0,Variable-state Latent Conditional Random Field models for facial expression analysis,Image Vision Comput.,2017
+3064424c1abe01dd2f4d2c9022f5ee1312e3cec9,Configural processing in autism and its relationship to face processing.,Neuropsychologia,2006
+5e4fb9b216657cbed1125b3be359ee482168c3e3,Discriminative graph regularized extreme learning machine and its application to face recognition,Neurocomputing,2015
+5e0f8c355a37a5a89351c02f174e7a5ddcb98683,Microsoft COCO: Common Objects in Context,,2014
+5e56c8776b5aa6edce068255134ea31670755b0c,Robust indoor speaker recognition in a network of audio and video sensors,Signal Processing,2016
+5eb25ec961c6a86c93001a44d38b3eb894e7e5fb,FPGA Hardware with Target-Reconfigurable Object Detector,IEICE Transactions,2015
+5e821cb036010bef259046a96fe26e681f20266e,The Local Binary Pattern Approach and its Applications to Face Analysis,"2008 First Workshops on Image Processing Theory, Tools and Applications",2008
+5ec89f73a8d1e817ebf654f91318d28c9cfebead,Semantically Guided Depth Upsampling,,2016
+5b8164fc9c65cd96fb529c3c8db551027009d4d1,A Low-Dimensional Representation for Robust Partial Isometric Correspondences Computation,Graphical Models,2014
+5b90bf3ebad1583beebcae5f892db2add248bcad,", C . F . F . Costa Filho and M . G . F . Costa Evaluation of Haar Cascade Classifiers Designed for Face Detection",,
+5b74a57508069c719cff3c5410984be76f6b7785,Monotonicity and error type differentiability in performance measures for target detection and tracking in video,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+5b35bde1c144cbc96f25c5359ff44e898191dbe1,Semi-Supervised Active Learning with Cross-Class Sample Transfer,,2016
+5b27999b3f066137de537e78113faf4bd942b7c7,EEG analysis for implicit tagging of video data,2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops,2009
+5b5962bdb75c72848c1fb4b34c113ff6101b5a87,Finding Celebrities in Billions of Web Images,IEEE Transactions on Multimedia,2012
+5bcc8ef74efbb959407adfda15a01dad8fcf1648,Understanding Deep Architectures by Interpretable Visual Summaries,CoRR,2018
+5b2cfee6e81ef36507ebf3c305e84e9e0473575a,GoDP: Globally Optimized Dual Pathway deep network architecture for facial landmark localization in-the-wild,Image Vision Comput.,2018
+5b14d9264ea1020f05d4e2fc6144e5021986d917,Gabor feature constrained statistical model for efficient landmark localization and face recognition,Pattern Recognition Letters,2009
+5bdb6ad866f52a3fa439e81a88b11d7a78904b07,A video-based door monitoring system using local appearance-based face models,Computer Vision and Image Understanding,2010
+5b5d8b55e3365f74f4dbdbfdf7b72452a688692f,DelugeNets: Deep Networks with Massive and Flexible Cross-layer Information Inflows,CoRR,2016
+5b0ebb8430a04d9259b321fc3c1cc1090b8e600e,The One-Shot similarity kernel,2009 IEEE 12th International Conference on Computer Vision,2009
+3765c26362ad1095dfe6744c6d52494ea106a42c,I know what you did last summer: object-level auto-annotation of holiday snaps,2009 IEEE 12th International Conference on Computer Vision,2009
+3727ac3d50e31a394b200029b2c350073c1b69e3,Facial Expression Recognition from World Wild Web,2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW),2016
+37a892395061206b58127f04fee0e4d2db33803a,Holistic Human Pose Estimation with Regression Forests,,2014
+37faa075574e4abd323ff5ec85934464a5b93a83,Tracking of Facial Feature Points by Combining Singular Tracking Results with a 3D Active Shape Model,,2010
+377c6563f97e76a4dc836a0bd23d7673492b1aae,Motion deblurring of faces,,2018
+37fe5b28d4531c93668d4a56d2e3411c2c5978b0,Efficient and Effective Gabor Feature Representation for Face Detection,,2012
+3749eb18758e0f8e97b086e6b36a98fda6e6f945,Emotion Classification using Adaptive SVMs,Unknown,2012
+37fdb70003ab93267ee6c75a333cb62d9e4d0798,Impaired sadness recognition is linked to social interaction deficit in autism.,Neuropsychologia,2007
+3719960f974173f23b88a207a42d67d7a393a89a,Towards better exploiting convolutional neural networks for remote sensing scene classification,Pattern Recognition,2017
+37a3e7a01655b4806df2b95aad193a2965e48a5c,Spatial-Temporal Memory Networks for Video Object Detection,CoRR,2017
+08fe9658c086b842980e86c66bde3cef95bb6bec,Deformable part models are convolutional neural networks,2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015
+08bf83aeb7ec36815afd1183d8268ab4e10a2961,Learning invariant face recognition from examples.,Neural networks : the official journal of the International Neural Network Society,2013
+084145b7b828d93a5bb5f5dd04f3ccd003dcd5c1,Joint Deep Learning for Car Detection,CoRR,2014
+0878d67f1bca06d3ea8a9354901fba9bf0135cd4,On Available Corpora for Empirical Methods in Vision & Language,CoRR,2015
+081a431107eb38812b74a8cd036ca5e97235b499,Nonnegative Matrix Factorization in Polynomial Feature Space,IEEE Transactions on Neural Networks,2008
+0831a511435fd7d21e0cceddb4a532c35700a622,Structured occlusion coding for robust face recognition,Neurocomputing,2016
+08c66211b17a0ac7cad53995b15b0098cad8135a,Tri-Subject Kinship Verification: Understanding the Core of A Family,IEEE Transactions on Multimedia,2015
+08c1f8f0e69c0e2692a2d51040ef6364fb263a40,Beyond Eigenfaces: Probabilistic Matching for Face Recognition,,1998
+0830c9b9f207007d5e07f5269ffba003235e4eff,Jointly Learning Multiple Measures of Similarities from Triplet Comparisons,,2015
+08b25aa0cca422d3a896aa1fdd865a7e970666db,Hybrid Linear Modeling via Local Best-Fit Flats,International Journal of Computer Vision,2012
+081fb4e97d6bb357506d1b125153111b673cc128,Island Loss for Learning Discriminative Features in Facial Expression Recognition,CoRR,2017
+08d25f86d9ba5d2443bd3852aab01334a3a96dce,A Multiple Component Matching Framework for Person Re-identification,,2011
+08a78e0c57d0b6474c09ef8c6d118b3e95da1e18,Biometric Based Cryptographic Key Generation from Faces,9th Biennial Conference of the Australian Pattern Recognition Society on Digital Image Computing Techniques and Applications (DICTA 2007),2007
+082ad50ac59fc694ba4369d0f9b87430553b11db,Discriminative dictionary learning with low-rank regularization for face recognition,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+6d8a42dce4d79435c42bf8eefddbea0e38951f4e,Pixelwise Instance Segmentation with a Dynamically Instantiated Network,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+6de18708218988b0558f6c2f27050bb4659155e4,Learning Long-Term Dependencies for Action Recognition with a Biologically-Inspired Deep Network,2017 IEEE International Conference on Computer Vision (ICCV),2017
+6d91da37627c05150cb40cac323ca12a91965759,Gender Politics in the 2016 U.S. Presidential Election: A Computer Vision Approach,,2017
+6d43831c4501ff44ed0ea70ef696e1c496b68a1d,Exploiting Privileged Information from Web Data for Image Categorization,,2014
+016cbf0878db5c40566c1fbc237686fbad666a33,Efficient illumination independent appearance-based face tracking,Image Vision Comput.,2009
+01e77cd46ab75bab8f4b176455f0daa592e5f979,Modelling search for people in 900 scenes: A combined source model of eye guidance,,2009
+01e12be4097fa8c94cabeef0ad61498c8e7762f2,Simultaneous Active Learning of Classifiers & Attributes via Relative Feedback,,2013
+01d83dcb526a8b751df80ec493caf1937ba99155,Decentralized Sensor Fusion for Ubiquitous Networking Robotics in Urban Areas,,2010
+01beab8f8293a30cf48f52caea6ca0fb721c8489,Face alignment using local hough voting,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+0178929595f505ef7655272cc2c339d7ed0b9507,Label distribution based facial attractiveness computation by deep residual learning,CoRR,2016
+014892cc24798d04a066c50e33630bb3f1b882fe,Content Based Image Retrieval Using Multiscale Top Points A Feasibility Study,,2003
+01b4b32c5ef945426b0396d32d2a12c69c282e29,Blockwise Linear Regression for Face Alignment,,2013
+01e5c95aa20a44eed21b5037697043e57f606f3a,A fine-grained analysis of facial expression processing in high-functioning adults with autism.,Neuropsychologia,2007
+0170158c227ee1ccf0a6a2d642699ff184c84bab,3D/4D facial expression analysis: An advanced annotated face model approach,Image Vision Comput.,2012
+018e730f8947173e1140210d4d1760d05c9d3854,Zero-shot recognition with unreliable attributes,,2014
+06be17bcc4136476855fc594759dddc6f8b6150f,MMGAN: Manifold Matching Generative Adversarial Network for Generating Images,CoRR,2017
+0632a9ace74f540e8793f89a84bb7555ba9deece,Weakly Supervised Localization and Learning with Generic Knowledge,International Journal of Computer Vision,2012
+06d30fda7559ae1a6ac49ff7a9fb9280aaad2be8,Supplementary material: Strengthening the Effectiveness of Pedestrian Detection with Spatially Pooled Features,,2014
+06ce9ba74589ca179296318a76e882fe610b729b,Adaptive affinity matrix for unsupervised metric learning,2016 IEEE International Conference on Multimedia and Expo (ICME),2016
+069bb452e015ef53f0ef30e9690e460ccc73cf03,Multicolumn Networks for Face Recognition,Unknown,2018
+0679d05c11c8cd54a597fea870a23b3556c07e1a,Indexing through laplacian spectra,Computer Vision and Image Understanding,2008
+0665853ee87112bc27a9aaec70672f521b91d38e,Çoklu Gauss Karışım Modeli Tabanlı Yüz Öznitelikleri Bulma Algoritması Multi-stream Gaussian Mixture Model based Facial Feature Localization,,2009
+06ab50dccff619c58bb699ee182824b5dca65000,Developing constructs for psychopathology research: research domain criteria.,Journal of abnormal psychology,2010
+06526c52a999fdb0a9fd76e84f9795a69480cecf,IMOTION - A Content-Based Video Retrieval Engine,,2015
+068f8b19a3847a2eaf0c65f6d85ec60060750d3c,3D Face Recognition using Log-Gabor Templates,,2006
+065b4890957866a831ccf35694056dcec6f48acc,Road Damage Detection Using Deep Neural Networks with Images Captured Through a Smartphone,CoRR,2018
+0677dd5377895b3c61cea0e6a143f38b84f1ebd7,Super-Resolution via Deep Learning,CoRR,2017
+0653dcdff992ad980cd5ea5bc557efb6e2a53ba1,Regularized Robust Coding and Dictionary Learning for Face Recognition,,2012
+063792ff9a139a5b8375afcd35e4ae6c8d83c352,Rapid stereo-vision enhanced face detection,2009 16th IEEE International Conference on Image Processing (ICIP),2009
+06e5d9ad3363b8834229bf7e055a94092994e097,A comparative study on texture and surface descriptors for ear biometrics,2014 International Carnahan Conference on Security Technology (ICCST),2014
+06bdbcfc590359a8f5d10c482d1f010c61f829a4,Long-Range Pedestrian Detection using stereo and a cascade of convolutional network classifiers,2012 IEEE/RSJ International Conference on Intelligent Robots and Systems,2012
+06cea45f1b965b9820d80ca1107661b54cdb7e8e,Semantic hierarchies for image annotation: A survey,Pattern Recognition,2012
+06e7648e945b39b8ccaa9120c796adc170dc81e4,DVQA: Understanding Data Visualizations via Question Answering,CoRR,2018
+6c06452671a501edd6fb66c2c05ded614045a9ec,Pseudo 2D Hidden Markov Model and Neural Network Coefficients in Face Recognition,,2010
+6cda4d23983298ef2c9bd719805e66f4fda7e6fc,Distinct Class-Specific Saliency Maps for Weakly Supervised Semantic Segmentation,,2016
+6c896ca9bafd7479c8291d0448e2910117ee059f,Pedestrian detection with a Large-Field-Of-View deep network,2015 IEEE International Conference on Robotics and Automation (ICRA),2015
+39ce143238ea1066edf0389d284208431b53b802,Facial expression transfer method based on frequency analysis,Pattern Recognition,2016
+39ce2232452c0cd459e32a19c1abe2a2648d0c3f,Neural computation as a tool to differentiate perceptual from emotional processes: the case of anger superiority effect.,Cognition,2009
+397aeaea61ecdaa005b09198942381a7a11cd129,Multi-Scale Video Frame-Synthesis Network with Transitive Consistency Loss,CoRR,2017
+3949967b873dca8c8adf0761777e2702415c67d4,Recognizability assessment of facial images for automated teller machine applications,Pattern Recognition,2012
+39b22bcbd452d5fea02a9ee63a56c16400af2b83,Multi-task Learning of Facial Landmarks and Expression,2014 Canadian Conference on Computer and Robot Vision,2014
+399a2c23bd2592ebe20aa35a8ea37d07c14199da,Inferring facial expressions from videos: Tool and application,Sig. Proc.: Image Comm.,2007
+392245913bf63c8a9f44881628f8f3f587e08189,Fast Kernel Sparse Representation,2011 International Conference on Digital Image Computing: Techniques and Applications,2011
+392425be1c9d9c2ee6da45de9df7bef0d278e85f,Vision for Intelligent Vehicles & Applications (VIVA): Face Detection and Head Pose Challenge,,2016
+3947b64dcac5bcc1d3c8e9dcb50558efbb8770f1,Action Recognition with Dynamic Image Networks,IEEE transactions on pattern analysis and machine intelligence,2017
+3965d61c4f3b72044f43609c808f8760af8781a2,Diverse Conditional Image Generation by Stochastic Regression with Latent Drop-Out Codes,Unknown,2018
+395bf182983e0917f33b9701e385290b64e22f9a,Facial Expressions Animation and Lip Tracking Using Facial Characteristic Points and Deformable Model,Unknown,2004
+39b452453bea9ce398613d8dd627984fd3a0d53c,Spatiotemporal Residual Networks for Video Action Recognition,Unknown,2016
+39905f28acee51506352c34736577d49ceb2f318,Combining Statistics of Geometrical and Correlative Features for 3D Face Recognition,,2006
+39b5f6d6f8d8127b2b97ea1a4987732c0db6f9df,An evaluation of bi-modal facial appearance+facial expression face biometrics,2008 19th International Conference on Pattern Recognition,2008
+995b2868326837cde96e01390f87b2dee6239bdb,Feature Detection and Tracking with Constrained Local Models,,2006
+9976b88d15f89b6c82b16564735d489a7524821d,Learning Visual N-Grams from Web Data,2017 IEEE International Conference on Computer Vision (ICCV),2017
+993acefc2e350f9661125bb74df136e2b614ea23,People detection on the Pepper Robot using Convolutional Neural Networks and 3D Blob detection,,2017
+997fea9df7057cd342299e90c1c6e6e9f1cc5a88,Human movement summarization and depiction from videos,2013 IEEE International Conference on Multimedia and Expo (ICME),2013
+9931c6b050e723f5b2a189dd38c81322ac0511de,From pose to activity: Surveying datasets and introducing CONVERSE,Computer Vision and Image Understanding,2016
+997ffa2cd7f3c7ba3730fb348c9804f3f575f32a,Face Recognition using Discriminatively Trained Orthogonal Rank One Tensor Projections,2007 IEEE Conference on Computer Vision and Pattern Recognition,2007
+992655a7eaa846cdf755bb1be93693d7b6fe9094,Accurate eye localization in the Short Waved Infrared Spectrum through summation range filters,Computer Vision and Image Understanding,2015
+99b8e5b8544ed6aa45726311afb0679363c875ed,Region-based Quality Estimation Network for Large-scale Person Re-identification,CoRR,2017
+520901f189d7943ff060239d4152b34edc0524ae,Large-scale image annotation using visual synset,2011 International Conference on Computer Vision,2011
+52162b19e058f11b5d010f6b9f1f4944ce8db3a6,Hashing with Locally Linear Projections,,2014
+521482c2089c62a59996425603d8264832998403,Landmark localization on 3D/4D range data using a shape index-based statistical shape model with global and local constraints,Computer Vision and Image Understanding,2015
+521b625eebea73b5deb171a350e3709a4910eebf,Improving Human Action Recognition by Non-action Classification,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+5291304833a3565f8a2b6c13c1f12e6841925a87,"Detecting Actions, Poses, and Objects with Relational Phraselets",,2012
+527dda77a3864d88b35e017d542cb612f275a4ec,Facial 3D model registration under occlusions with sensiblepoints-based reinforced hypothesis refinement,2017 IEEE International Joint Conference on Biometrics (IJCB),2017
+524d119aa75dc9865db584cd4e0f17c957b8f56a,Pairwise linear regression: An efficient and fast multi-view facial expression recognition,2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2015
+52a9f957f776c8b3d913cfcd20452b9e31c27845,OPML: A one-pass closed-form solution for online metric learning,Pattern Recognition,2018
+52f23e1a386c87b0dab8bfdf9694c781cd0a3984,DropELM: Fast neural network regularization with Dropout and DropConnect,Neurocomputing,2015
+550858b7f5efaca2ebed8f3969cb89017bdb739f,"""Wii Using Only 'We'"": Using background subtraction and human pose recognition to eliminate game controllers",,2011
+551a62f43a9da5ceb9564358ad25523736fd48dc,Low-resolution face recognition with single sample per person,Signal Processing,2017
+5578be51e09379061f526e8d0fee65e3613eee8a,Shape-aware Instance Segmentation,CoRR,2016
+55cc90968e5e6ed413dd607af2a850ac2f54e378,Active subclustering,Computer Vision and Image Understanding,2014
+55c6cd3b3a0c0335de050468f55a5cc4bdc30681,Gaze allocation in a dynamic situation: effects of social status and speaking.,Cognition,2010
+55c16592502db5c2cc30711f4d04e4d3aa04d278,Universum Prescription: Regularization Using Unlabeled Data,,2017
+55d8052477e599125442de86cf4b05bc6ea0fbf8,Extended CRC: Face Recognition with a Single Training Image per Person via Intraclass Variant Dictionary,IEICE Transactions,2013
+5506a1a1e1255353fde05d9188cb2adc20553af5,Dictionary Integration using 3D Morphable Face Models for Pose-invariant Collaborative-representation-based Classification,CoRR,2016
+55b55426fcba3e298a20a4b95753a906956fc2ac,Tensor linear Laplacian discrimination (TLLD) for feature extraction,Pattern Recognition,2009
+55c81f15c89dc8f6eedab124ba4ccab18cf38327,Discriminative Training of Hyper-feature Models for Object Identification,,2006
+55e28e4c174bb7ad2fd80be3c13a033bbd91ac7a,Detection and Tracking of General Movable Objects in Large 3D Maps,CoRR,2017
+9730a140831f51a6640236e42059b948c5466d0c,Thumbs up or thumbs down? Effects of neuroticism and depressive symptoms on psychophysiological responses to social evaluation in healthy students,,2016
+9703e31a7f873eb9fc41c81c303d83a7416fffc8,Effects of Image Segmentation for Approximating Object Appearance Under Near Lighting,,2006
+9773cb8fff5e3735b34018212e83023cba227345,Enhancing ELM-based Facial Image Classification by Exploiting Multiple Facial Views,,2015
+97540905e4a9fdf425989a794f024776f28a3fa9,NDDR-CNN: Layer-wise Feature Fusing in Multi-Task CNN by Neural Discriminative Dimensionality Reduction,CoRR,2018
+9746186205ed5e559d17e87d7ede9e3dd3922e54,Face recognition based on 3D ridge images obtained from range data,Pattern Recognition,2009
+63856e83b69ac15e1252c1c3d89114dcf806fbcc,DeepIU: An Architecture for Image Understanding,,2016
+635158d2da146e9de559d2742a2fa234e06b52db,Emotion Recognition in the Wild via Convolutional Neural Networks and Mapped Binary Patterns,,2015
+63a3e425c634d0280198ae1b70ef3aec27fc95cc,An efficient face verification method in a transformed domain,Pattern Recognition Letters,2007
+63ac85ec1bff6009bb36f0b24ef189438836bc91,Deep linear discriminant analysis on fisher networks: A hybrid architecture for person re-identification,Pattern Recognition,2017
+630d2c5b60e28ff8710f415a8adb7a73f8162d9c,3D Face Recognition,,2012
+639937b3a1b8bded3f7e9a40e85bd3770016cf3c,A 3D Face Model for Pose and Illumination Invariant Face Recognition,2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance,2009
+63d8d69e90e79806a062cb8654ad78327c8957bb,A efficient and practical 3D face scanner using near infrared and visible photometric stereo,,2010
+63216e4bbb8736c5587b41ebbd92043656b374c6,A dynamic geometry-based approach for 4D facial expressions recognition,European Workshop on Visual Information Processing (EUVIP),2013
+63199f9d0034e82a0a7c9519d1a5bd31cc9de39f,InterpNET: Neural Introspection for Interpretable Deep Learning,CoRR,2017
+630d88e479046ef18e1b801bc37e2e1b3df85cc8,The social brain in psychiatric and neurological disorders.,Trends in cognitive sciences,2012
+63eefc775bcd8ccad343433fc7a1dd8e1e5ee796,Correlation Metric for Generalized Feature Extraction,IEEE Transactions on Pattern Analysis and Machine Intelligence,2008
+63111778d25b1105fec5e09bedf9122eafe34fd1,Optasia: A Relational Platform for Eõcient Large-Scale Video Analytics,,2016
+632c114e12a6b88bd488ddfb1960d669f101ca3f,Multi-face tracking by extended bag-of-tracklets in egocentric photo-streams,Computer Vision and Image Understanding,2016
+63ce37da6c0c789099307337bb913e1104473854,Transfer Learning with One-Class Data,,2013
+63e1ce7de0fdbce6e03d25b5001c670c30139aa8,Deep Feature Learning via Structured Graph Laplacian Embedding for Person Re-Identification,CoRR,2017
+63859f5b6d28aadbf9a41cb161a47fafc56b63dc,"Training-free, Generic Object Detection using Locally Adaptive Regression Kernels",,2009
+0fd7e70003c366cb93be06b5a3f250f798b939f3,Can fully convolutional networks perform well for general image restoration problems?,2017 Fifteenth IAPR International Conference on Machine Vision Applications (MVA),2017
+0f18ce082b7dba524759cf3fbc21bfd1e586dea3,Learning invariance through imitation,CVPR 2011,2011
+0f63499e22a1d77ef898f6b3db550231b09af59e,Pain Level Detection From Facial Image Captured by Smartphone,JIP,2016
+0ff94e25a8ff3bd5c98899684d0885423fbe4f91,A Regularized Correntropy Framework for Robust Pattern Recognition,Neural Computation,2011
+0f89c1000f1efd79d8c6b2d0a59bcc76e9272b1e,Comparing Local Descriptors and Bags of Visual Words to Deep Convolutional Neural Networks for Plant Recognition,Unknown,2017
+0f12c93d685ec82d23f2c43d555e7687f80e5b7c,Detecting unexpected obstacles for self-driving cars: Fusing deep learning and geometric modeling,2017 IEEE Intelligent Vehicles Symposium (IV),2017
+0ff4b53d140c2af0771a8a3dfeb17c149659bf07,Class-specific grasping of 3D objects from a single 2D image,2010 IEEE/RSJ International Conference on Intelligent Robots and Systems,2010
+0fe8d8e90889917acca22b9078a1a5607e603d8c,"Holistic processing, contact, and the other-race effect in face recognition",Vision Research,2014
+0f9bf5d8f9087fcba419379600b86ae9e9940013,Hybrid human detection and recognition in surveillance,Neurocomputing,2016
+0f2d6a2c37203af0a3b10a02773b659a71468d32,Unsupervised model selection for view-invariant object detection in surveillance environments,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+0f91e3e67ec5a71a6c29b9ea0fc1916b46a09b0a,Learning Slow Features for Behaviour Analysis,2013 IEEE International Conference on Computer Vision,2013
+0fd877cb088e38b00b44f52f5483be8f356788c2,Contour Context Selection for Object Detection: A Set-to-Set Contour Matching Approach,,2008
+0f0146855de3cc6e0fd1e3c6a7bd0d3df19653bf,An interpolation method for the reconstruction and recognition of face images,,2007
+0f0fcf041559703998abf310e56f8a2f90ee6f21,The FERET Evaluation Methodology for Face-Recognition Algorithms,IEEE Trans. Pattern Anal. Mach. Intell.,1997
+0ad318510969560e2fca3d7b257e6b6f7a541b3e,High-Resolution Deep Convolutional Generative Adversarial Networks,CoRR,2017
+0a511058edae582e8327e8b9d469588c25152dc6,Memory Constrained Face Recognition Ashish Kapoor,,
+0a4f3a423a37588fde9a2db71f114b293fc09c50,Computer analysis of face beauty: A survey,Computer Vision and Image Understanding,2014
+0ae192e146431a52d7bb51923e9bdd7292ab12ef,Multi-Generator Generative Adversarial Nets,CoRR,2017
+0a66b92198b874ab007fb25da8a5a48b7c1c08d8,ARGUS: An Automated Multi-Agent Visitor Identification System,,1999
+0a5d5f359614a5cb9f42f5b9e2ee6409975703e2,Multi-view face segmentation using fusion of statistical shape and appearance models,Computer Vision and Image Understanding,2010
+0a325d70cc381b136a8f4e471b406cda6d27668c,A flexible hierarchical approach for facial age estimation based on multiple features,Pattern Recognition,2016
+0ad90118b4c91637ee165f53d557da7141c3fde0,Face recognition with radial basis function (RBF) neural networks,IEEE transactions on neural networks,2002
+0ae69840d9dadcffdf13b0b712f89050d65559d3,Universal Correspondence Network,,2016
+0a8007f69954ac8bd05bede33341dd37dd7364fb,Relative Magnitude of Gaussian Curvature Using Neural Network and Object Rotation of Two Degrees of Freedom,,2007
+0ad4a814b30e096ad0e027e458981f812c835aa0,Leveraging mid-level deep representations for predicting face attributes in the wild,2016 IEEE International Conference on Image Processing (ICIP),2016
+0a3fa8e6f158e7faec024d83964751a5d59fe836,ICCV - 99 Cover Sheet,,1999
+6448d23f317babb8d5a327f92e199aaa45f0efdc,Classifying Facial Attributes using a 2-D Gabor Wavelet Representation and Discriminant Analysis,,1999
+64372501affd8571db20dc606b0146a76c266303,"Multiple instance classification: Review, taxonomy and comparative study",Artif. Intell.,2013
+64f6c8c333bc043d41b83b6e62fbe3a521882ec3,A 3D face matching framework for facial curves,Graphical Models,2009
+6414453e462f1a022302bce98cadd8a817629521,Neural correlates of social and nonsocial emotions: An fMRI study.,NeuroImage,2006
+646fda224def3651e3d31c419f49aaa6a90686ac,A multimodal execution monitor with anomaly classification for robot-assisted feeding,2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),2017
+642c66df8d0085d97dc5179f735eed82abf110d0,Coupled kernel-based subspace learning,2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05),2005
+641f34deb3bdd123c6b6e7b917519c3e56010cb7,Extended SRC: Undersampled Face Recognition via Intraclass Variant Dictionary,IEEE Transactions on Pattern Analysis and Machine Intelligence,2012
+6462ef39ca88f538405616239471a8ea17d76259,Long range iris recognition: A survey,Pattern Recognition,2017
+90ac0f32c0c29aa4545ed3d5070af17f195d015f,An Improved Illumination Normalization based on Anisotropic Smoothing for Face Recognition,Unknown,2012
+9019d11217cedd413d65052c72f07e320bc3f120,Facial Strain Pattern as a Soft Forensic Evidence,2007 IEEE Workshop on Applications of Computer Vision (WACV '07),2007
+900d5fadd4daf867dcd90929d0c2c31d5976d13a,Complex event recognition using constrained low-rank representation,Image Vision Comput.,2015
+90b7619eabe94731722ae884d0802256462457dc,Behavior Discovery and Alignment of Articulated Object Classes from Unstructured Video,International Journal of Computer Vision,2016
+90dcaeeed3cc5c6001a06e9fa674845a8fd471bd,Self-Supervised Depth Learning for Urban Scene Understanding,CoRR,2017
+90b11e095c807a23f517d94523a4da6ae6b12c76,Blind Facial Image Quality Enhancement Using Non-Rigid Semantic Patches,IEEE Transactions on Image Processing,2017
+901670d2c74a0630d991e1789ec0406988e809cb,An Optimization Based Framework for Human Pose Estimation in Monocular Videos,,2012
+bfc9a449e6364817a5a3e19b73b1527a85c32d02,Long Text Generation via Adversarial Training with Leaked Information,Unknown,2018
+bf42000d04efceab3f0f799a9b3f2058f91cf3a4,Neural response to specific components of fearful faces in healthy and schizophrenic adults,NeuroImage,2010
+bf23de0c2b478114cc5c4733e4e701a1d4662cc0,"Deformations, patches, and discriminative models for automatic annotation of medical radiographs",Pattern Recognition Letters,2008
+d39f311f1ae08efb6cd50bc5c0efe06532caad65,Image region description using orthogonal combination of local binary patterns enhanced with color information,Pattern Recognition,2013
+d3af3935eac968372b42e5bd6cf32a95420b0ac1,A complete and fully automated face verification system on mobile devices,Pattern Recognition,2013
+d3f945e0f14cf069d8a3f97497e94044f5d3b21a,"Robust, accurate and efficient face recognition from a single training image: A uniform pursuit approach",Pattern Recognition,2010
+d4c7d1a7a03adb2338704d2be7467495f2eb6c7b,Towards a Neural,,2017
+d4ca67160781e5c74b0385c3d45f35dcc0f79b8a,Polygonal Representation of Digital Curves,,2012
+d4ebf0a4f48275ecd8dbc2840b2a31cc07bd676d,A Fusion of Appearance based CNNs and Temporal evolution of Skeleton with LSTM for Daily Living Action Recognition,CoRR,2018
+d44a93027208816b9e871101693b05adab576d89,On the Capacity of Face Representation,CoRR,2017
+d4e99d6f9e91fcd58c9fd00932d1197a9e03d08d,"Neurophysiological responses to faces and gaze direction differentiate children with ASD, ADHD and ASD + ADHD",Developmental Cognitive Neuroscience,2013
+bace9d834e3582333b9460e33f0d6712eddab94e,An Efficient Filtering Method for Scalable Face Image Retrieval,IEICE Transactions,2015
+ba788365d70fa6c907b71a01d846532ba3110e31,Robust Conditional Generative Adversarial Networks,CoRR,2018
+badef8089c6b1b4cd479ea406c6b7358b68d2c26,Anorexia nervosa and autism spectrum disorders: guided investigation of social cognitive endophenotypes.,Psychological bulletin,2007
+ba227bb94ea9414bad8846673c904a10d813e443,Deep 360 Pilot: Learning a Deep Agent for Piloting through 360° Sports Videos,,2017
+ba29ba8ec180690fca702ad5d516c3e43a7f0bb8,Do less and achieve more: Training CNNs for action recognition utilizing action images from the Web,Pattern Recognition,2017
+bab88235a30e179a6804f506004468aa8c28ce4f,Joint discriminative dimensionality reduction and dictionary learning for face recognition,Pattern Recognition,2013
+a0067d23456c74d4bef5a8bef5bbe3c92e29c314,Fusion of Shape and Texture for Unconstrained Periocular Authentication,Unknown,2017
+a0fb5b079dd1ee5ac6ac575fe29f4418fdb0e670,On the initialization of the DNMF algorithm,2006 IEEE International Symposium on Circuits and Systems,2006
+a0172fc5e0bc49c3e12a0ae6769eeae40d22d28b,Matching Faces with Emotional Expressions,,2011
+a00ee78381f0bd5926851a68d6ee68368b44a5e1,Attribute-based learning for gait recognition using spatio-temporal interest points,Image Vision Comput.,2014
+a7e5a46e47dd21cc9347b913dd3dde2f0ad832ed,On denoising autoencoders trained to minimise binary cross-entropy,CoRR,2017
+a74251efa970b92925b89eeef50a5e37d9281ad0,"Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization",2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops),2011
+a7a7d4be51d0918cbec78d84dd1f7363535fd60c,Emotional facial expressions reduce neural adaptation to face identity.,Social cognitive and affective neuroscience,2014
+a7438874b2c008e614c46151fe244e5cd8455a29,Shape Based Detection and Top-Down Delineation Using Image Segments,International Journal of Computer Vision,2009
+b800b625b7cef0b6971b5d46d8a6f37c3c4f4057,Label propagation based on local information with adaptive determination of number and degree of neighbor's similarity,Neurocomputing,2015
+b8378ab83bc165bc0e3692f2ce593dcc713df34a,"A 3D Approach to Facial Landmarks: Detection, Refinement, and Tracking",2014 22nd International Conference on Pattern Recognition,2014
+b15e703ce4f01f4f0d52e835e2c907d5e8361bba,Online blind speech separation using multiple acoustic speaker tracking and time-frequency masking,Computer Speech & Language,2013
+b1c80444ecf42c303dbf65e47bea999af7a172bf,Exploring Generative Perspective of Convolutional Neural Networks by Learning Random Field Models,,2016
+b1f42e2b1b560c2451a1d704430633aed71f2bb9,Lighting invariant urban street classification,2014 IEEE International Conference on Robotics and Automation (ICRA),2014
+b171f9e4245b52ff96790cf4f8d23e822c260780,ROBOTICS INSTITUTE Summer Scholars ( RISS ) Working Papers JOURNAL VOLUME 2 FALL 2014,,2014
+b1301c722886b6028d11e4c2084ee96466218be4,Facial Aging and Rejuvenation by Conditional Multi-Adversarial Autoencoder with Ordinal Regression,,2018
+b1c5581f631dba78927aae4f86a839f43646220c,A scalable metric learning-based voting method for expression recognition,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+b14d06fa5dae7428b946db9ea48baa81a4f5a4cd,A Greedy Part Assignment Algorithm for Real-time Multi-person 2D Pose Estimation,CoRR,2017
+b17197921cfd6e06da85881a03abb2da2608b0c7,Fusing cluster-centric feature similarities for face recognition in video sequences,Pattern Recognition Letters,2013
+dd0760bda44d4e222c0a54d41681f97b3270122b,Recognition of facial expressions using Gabor wavelets and learning vector quantization,Eng. Appl. of AI,2008
+dd2f6a1ba3650075245a422319d86002e1e87808,"PD2T: Person-specific Detection, Deformable Tracking",,2018
+dd600e7d6e4443ebe87ab864d62e2f4316431293,Improving facial expression analysis using histograms of Log-Transformed Nonnegative Sparse Representation with a Spatial Pyramid Structure,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+dc6b40252885249f1e71831f57d9ddbaae8df635,Fast task-specific target detection via graph based constraints representation and checking,2017 IEEE International Conference on Robotics and Automation (ICRA),2017
+dcad0ff8ca4c60d69feeb1f642019798b52ca981,Quadratic projection based feature extraction with its application to biometric recognition,Pattern Recognition,2016
+b65bbf24479d4df65dcde2ac343f5c7cf96be3ef,Fitting 3D morphable models using implicit representations,JVRB,2007
+b6bf9d357f280ba8bb8338b2448f0f90773f5c57,A Testing Methodology for Face Recognition Algorithms,,2005
+b6c047ab10dd86b1443b088029ffe05d79bbe257,Using robust dispersion estimation in support vector machines,Pattern Recognition,2013
+b63957152a0f37ddc99904a5bddb60b3f056b8cf,Eye tracking young children with autism.,Journal of visualized experiments : JoVE,2012
+b6bbd9a66d573e4b22fd0603acc707dbc5379648,Heterogeneous cores for MapReduce processing: Opportunity or challenge?,2014 IEEE Network Operations and Management Symposium (NOMS),2014
+b6c53891dff24caa1f2e690552a1a5921554f994,Deeply Learning Deformable Facial Action Parts Model for Dynamic Expression Analysis,,2014
+b64bf3dab761d27a19f2ff4049691dc47369595d,Cascade of descriptors to detect and track objects across any network of cameras,Computer Vision and Image Understanding,2010
+b656abc4d1e9c8dc699906b70d6fcd609fae8182,Integrating monolithic and free-parts representations for improved face verification in the presence of pose mismatch,Pattern Recognition Letters,2007
+a9fb2ec954dbb8e1ee6b3a33e0e5c06db2d89d3c,Obese parents – obese children? Psychological-psychiatric risk factors of parental behavior and experience for the development of obesity in children aged 0–3: study protocol,,2013
+a92adfdd8996ab2bd7cdc910ea1d3db03c66d34f,ConvNet Architecture Search for Spatiotemporal Feature Learning,CoRR,2017
+a9a4e19337f04d9ad14fa3d231a9ed13735139c9,Online multiperson tracking with occlusion reasoning and unsupervised track motion model,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+a9f7ab254a8c73a51f0eba5a8e13b48924b542c0,Face morphology: Can it tell us something about body weight and fat?,Computers in biology and medicine,2016
+a9a6cd6d40b563a02ed899114559a6e14f2f39a1,Numerical Coordinate Regression with Convolutional Neural Networks,CoRR,2018
+a93c3dc4efaa80382210f5f8395ac9b04a485f45,Noisy subspace clustering via matching pursuits,CoRR,2016
+d5e12c9286038afaf9ae764b044929cd9a458c95,Leveraging Features from Background and Salient Regions for Automatic Image Annotation,JIP,2012
+d59e60c87309556c73c2885d133b459f20c90d9f,DLPaper2Code: Auto-generation of Code from Deep Learning Research Papers,CoRR,2017
+d541986a647e7ab10cc8f882e1a1f5e6d725d8a2,Emotion Recognition from Geometric Facial Patterns,,2015
+d50751da2997e7ebc89244c88a4d0d18405e8507,Real time 3D face alignment with Random Forests-based Active Appearance Models,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+d511e903a882658c9f6f930d6dd183007f508eda,Privileged information-based conditional regression forest for facial feature detection,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+d5c4e3c101041556e00b25c0dcb09716827ed5b3,Unsupervised Image-to-Image Translation with Generative Adversarial Networks,CoRR,2017
+d5e1173dcb2a51b483f86694889b015d55094634,PCA and LDA in DCT domain,Pattern Recognition Letters,2005
+d2a8779b4f533e1dce709ced55196ef28e9a4c90,SI: EMOTION REGULATION AND PSYCHIATRIC COMORBIDITY IN ASD Fear of Negative Evaluation Influences Eye Gaze in Adolescents with Autism Spectrum Disorder: A Pilot Study,Unknown,2015
+d23d747e9936299b177b0358ff0502a884276aa2,Wide Area Tracking in Single and Multiple Views,,2011
+aafb271684a52a0b23debb3a5793eb618940c5dd,Supplementary Material: Hierarchical Semantic Indexing for Large Scale Image Retrieval,,2011
+aaee760cd3e5669dd597f0daf8c50b4da995e7e5,Visual perception of facial expressions of emotion.,Current opinion in psychology,2017
+aa331fe378056b6d6031bb8fe6676e035ed60d6d,Object detection using boosted local binaries,Pattern Recognition,2016
+af8fe1b602452cf7fc9ecea0fd4508ed4149834e,A pose-wise linear illumination manifold model for face recognition using video,Computer Vision and Image Understanding,2009
+af6af58ba12920762638e1d0b8310a0d9961b7be,Sketch-to-Image Generation Using Deep Contextual Completion,CoRR,2017
+afeac9270149b927b592e2299d11095fbdf8d308,Accurate 3D Multi-marker Tracking in X-ray Cardiac Sequences Using a Two-Stage Graph Modeling Approach,,2013
+af6cae71f24ea8f457e581bfe1240d5fa63faaf7,Multi-Task Zipping via Layer-wise Neuron Sharing,CoRR,2018
+afd29ac2de84c8a6d48232477be018ec57d6f564,Deep Metric Learning for Practical Person Re-Identification,CoRR,2014
+af54dd5da722e104740f9b6f261df9d4688a9712,Portability: A New Challenge on Designing Family Image Database,,2010
+afd1e4157245d56711d4f16a5b7c9fd1f39a5139,Nearest-neighbor method using multiple neighborhood similarities for social media data mining,Neurocomputing,2012
+b7b5fd3e2cfc39967e389b974c1cb418b2bf1b8f,Heuristic Search for Structural Constraints in Data Association,CoRR,2017
+b7ccfc78cb54525f9cba996b73c780068a05527e,Task-Aware Compressed Sensing With Generative Adversarial Networks,Unknown,2018
+b75cee96293c11fe77ab733fc1147950abbe16f9,A Single Classifier for View-Invariant Multiple Object Class Recognition,,2006
+b786a16ca5d84257bb98024751429c9f42005e62,A Fine-Grained Approach to Scene Text Script Identification,2016 12th IAPR Workshop on Document Analysis Systems (DAS),2016
+b7740dba37a3cbd5c832a8deb9a710a28966486a,The development of emotion concepts: a story superiority effect in older children and adolescents.,Journal of experimental child psychology,2015
+b73f43a34af3f5ebac0a88066d8bd2eb39873be3,"Who reports it best? A comparison between parent-report, self-report, and the real life social behaviors of adults with Williams syndrome.",Research in developmental disabilities,2014
+b7cff43f653279a65e23a7a85c48b12a484148ef,Face and Facial Feature Detection Evaluation - Performance Evaluation of Public Domain Haar Detectors for Face and Facial Feature Detection,,2008
+b7239d619c5ad3d80a170bb33ca427bb4278f4a1,Pedestrian detection in single frame by edgelet-LBP part detectors,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+b767abb865fba93f35312127b61ba351a2a91a44,Face processing regions are sensitive to distinct aspects of temporal sequence in facial dynamics,NeuroImage,2014
+b747fcad32484dfbe29530a15776d0df5688a7db,Background suppressing Gabor energy filtering,Pattern Recognition Letters,2015
+b79412cee14e583a5c6816c1124913f560303a95,Learning fine-grained features via a CNN Tree for Large-scale Classification,Neurocomputing,2018
+b717d84d551de252300b9f161a5551162a936119,Query-driven iterated neighborhood graph search for large scale indexing,,2012
+dbbaa5d4a5d04267e5be454624f8d3be8265fe7c,"Fusiform function in children with an autism spectrum disorder is a matter of ""who"".",Biological psychiatry,2008
+dbab6ac1a9516c360cdbfd5f3239a351a64adde7,Cascaded regression with sparsified feature covariance matrix for facial landmark detection,Pattern Recognition Letters,2016
+dbc04694ef17c83bb12b3ad34da6092eab68ae68,Modeling cognitive deficits following neurodegenerative diseases and traumatic brain injuries with deep convolutional neural networks.,Brain and cognition,2018
+db5d1b4c295adb24c8cb58ec995ce11b569cbb77,Graph optimization for dimensionality reduction with sparsity constraints,Pattern Recognition,2012
+dbec415ba09ab66ea5855aaa1267796b75ef7e7b,Person Re-identification in Appearance Impaired Scenarios,CoRR,2016
+db35faccd8d9fcf25c363b4781cb50dbd76649b4,Full body human attribute detection in indoor surveillance environment using color-depth information,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+a80fdcb8c837fe7a516de7397373f4c4d6de2884,A new proposal for graph-based image classification using frequent approximate subgraphs,Pattern Recognition,2014
+a8be27b214a75642d43c726b8f1e92aa8e4c3768,A NOVEL CONIC SECTION CLASSIFIER WITH TRACTABLE GEOMETRIC LEARNING ALGORITHMS By SANTHOSH KODIPAKA A DISSERTATION PRESENTED TO THE GRADUATE SCHOOL OF THE UNIVERSITY OF FLORIDA IN PARTIAL FULFILLMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY,,2009
+de801c4bb4b420417d9514631a6cc4099fc64df2,Crowd counting in public video surveillance by label distribution learning,Neurocomputing,2015
+de6ba16ee8ad07e2f02d685b1e98b8be5045cb1b,Adaptive discriminant learning for face recognition,Pattern Recognition,2013
+de10f93b0a3656822aa7c0b5d62074ff5eac60b2,Measuring the Accuracy of Object Detectors and Trackers,,2017
+de0157390682eebc838e271f4fe8f704251ddef1,A reaction time advantage for calculating beliefs over public representations signals domain specificity for 'theory of mind'.,Cognition,2010
+ded41c9b027c8a7f4800e61b7cfb793edaeb2817,DYAN: A Dynamical Atoms Network for Video Prediction,,2018
+b0c512fcfb7bd6c500429cbda963e28850f2e948,A Fast and Accurate Unconstrained Face Detector,IEEE Transactions on Pattern Analysis and Machine Intelligence,2016
+b018f4ea4b46701103046c472468631cc28ab311,Generative Adversarial Perturbations,CoRR,2017
+b06b0086e84038abbe5088f3429603778f2b8fdf,Distributed Data Association in Smart Camera Networks via Dual Decomposition,Information Fusion,2018
+b01de5e9554109a006a0cface1f11d45922abc0b,Geometric Neural Phrase Pooling: Modeling the Spatial Co-occurrence of Neurons,,2016
+b03d6e268cde7380e090ddaea889c75f64560891,Automatic Acquisition of High-fidelity Facial Performances Using Monocular Videos: Supplementary Material,,2014
+b0d52bb1c9cff9416fe766e9cba94ceeab12d51f,Object Detection Using Deep CNNs Trained on Synthetic Images,CoRR,2017
+a643302a89805bb8d3d204660a3a60420fee36e2,Facial point detection using boosted regression and graph models,2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition,2010
+a62d352db6efa30b66bff378b0c27792ed37d8fa,"Revisiting Salient Object Detection: Simultaneous Detection, Ranking, and Subitizing of Multiple Salient Objects",CoRR,2018
+a65b93c01518755291e19a0545c1a3d20e401c0a,"A Large Contextual Dataset for Classification, Detection and Counting of Cars with Deep Learning",,2016
+a6583c8daa7927eedb3e892a60fc88bdfe89a486,Toward semantic attributes in dictionary learning and non-negative matrix factorization,Pattern Recognition Letters,2016
+a6db73f10084ce6a4186363ea9d7475a9a658a11,ActionFlowNet: Learning Motion Representation for Action Recognition,CoRR,2016
+a6634ff2f9c480e94ed8c01d64c9eb70e0d98487,PalmHashing: a novel approach for cancelable biometrics,Inf. Process. Lett.,2005
+a6c96fceabd0e0efabc89679927ee1877f3cf4ac,"Deep Video Generation, Prediction and Completion of Human Action Sequences",Unknown,2018
+a6afb698b19faa376fe9e04e63b35668bb608f3f,Spatial properties of objects predict patterns of neural response in the ventral visual pathway,NeuroImage,2016
+b99957df60ee58e38ad71ed70b9973c22d5c26cc,Impact of money on emotional expression☆,,2015
+b97ed8715ef7b93c3540c24245e91a2685708529,The influence of sleep on emotional and cognitive processing is primarily trait- (but not state-) dependent.,Neurobiology of learning and memory,2016
+b92f276ecf9077f7c09ce410336f8b9a819df4fc,A 3D face and hand biometric system for robust user-friendly authentication,Pattern Recognition Letters,2007
+b92a1ed9622b8268ae3ac9090e25789fc41cc9b8,Pooling in image representation: The visual codeword point of view,Computer Vision and Image Understanding,2013
+efc6e9bc366ef4b0de3fde4c81dff91f3f03063f,"Biometrics in Forensic Science: Challenges, Lessons and New Technologies",,2014
+c3a101f8fb6dd2fddfee94774ea3dbc8df8f45de,Synchronization of oscillations for machine perception of gaits,Computer Vision and Image Understanding,2004
+c34e48d637705ffb52360c2afb6b03efdeb680bf,Subclass discriminant Nonnegative Matrix Factorization for facial image analysis,Pattern Recognition,2012
+c3418f866a86dfd947c2b548cbdeac8ca5783c15,Disentangling the Modes of Variation in Unlabelled Data,,2018
+c3980cbaf613cad1fbd0ab6da472c789cda583a9,Robust arbitrary view gait recognition based on parametric 3D human body reconstruction and virtual posture synthesis,Pattern Recognition,2016
+c3a3f7758bccbead7c9713cb8517889ea6d04687,Funnel-structured cascade for multi-view face detection with alignment-awareness,Neurocomputing,2017
+c36ae7c5e9f9f992a5939e07283183707ee0a787,StuffNet: Using 'Stuff' to Improve Object Detection,,2017
+c397408e784004240e866d0f31cea7b9e44fdd0c,Face image super-resolution through locality-induced support regression,Signal Processing,2014
+c3638b026c7f80a2199b5ae89c8fcbedfc0bd8af,Shape Matching and Object Recognition,,2005
+c478faecfa337bb2d37ac7c63aca47d4148ffc6a,Spontaneous facial micro-expression analysis using Spatiotemporal Completed Local Quantized Patterns,Neurocomputing,2016
+c472436764a30278337aca9681eee456bee95c34,The inherently contextualized nature of facial emotion perception.,Current opinion in psychology,2017
+ea481ceaf3ad8bef871a9efdddb27c345e0c3b4e,Vision-based human motion analysis: An overview,Computer Vision and Image Understanding,2007
+eae83dac25f323f24b0f2f9df1ad6dc47456231e,CAR-Net: Clairvoyant Attentive Recurrent Network,Unknown,2018
+eaf356e0ddf7701fa3d52d5159a78202a4866296,Large margin learning of hierarchical semantic similarity for image classification,Computer Vision and Image Understanding,2015
+ea96bc017fb56593a59149e10d5f14011a3744a0,Local coordinate based graph-regularized NMF for image representation,Signal Processing,2016
+ea3801a15f4568856581357cfc4e5bb2de185a2e,Structure-Aware and Temporally Coherent 3D Human Pose Estimation,CoRR,2017
+e10f4d5c0e0e294e00ce3a92b2057c4b2a5acf09,Face Recognition at a Distance: a study of super resolution M.Sc. Thesis,,2011
+e10a257f1daf279e55f17f273a1b557141953ce2,A survey of approaches and trends in person re-identification,Image Vision Comput.,2014
+e171fba00d88710e78e181c3e807c2fdffc6798a,Pose-invariant face recognition using a 3D deformable model,Pattern Recognition,2003
+e1cc833f301c42579392f21335b70d0216b03ab4,Robust local features for remote face recognition,Image Vision Comput.,2017
+e1eca56ced4fd2a6a3048ba7240f0fe1991ba45e,Deceptive Intentions: Can Cues to Deception Be Measured before a Lie Is Even Stated?,,2015
+e1d9f97416986524f65733742021d9f02c8f7d0d,"Semantic assessment of shopping behavior using trajectories, shopping related actions, and context information",Pattern Recognition Letters,2013
+e16efd2ae73a325b7571a456618bfa682b51aef8,Semi-Supervised Adaptive Label Distribution Learning for Facial Age Estimation,,2017
+e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,Range Loss for Deep Face Recognition with Long-tail,CoRR,2016
+e1740c8a562901ac1b94c78b33c4416500cedebc,Joint-VAE: Learning Disentangled Joint Continuous and Discrete Representations,,2018
+cd7c007f5831b294160eaf1cc6270af4a0ca9bf4,Person re-identification with block sparse recovery,Image Vision Comput.,2017
+cd64bfee5c008c1d96b3d0e440ef94270f50fe5f,Motion segment decomposition of RGB-D sequences for human behavior understanding,Pattern Recognition,2017
+cd4c047f4d4df7937aff8fc76f4bae7718004f40,Background modeling for generative image models,Computer Vision and Image Understanding,2015
+cd4bab5d6845c2141c9b3b635d99dce1db446028,Dense Semantic Stereo Labelling Architecture for In-Campus Navigation,Unknown,2017
+ccbfc004e29b3aceea091056b0ec536e8ea7c47e,Tensor-based factor decomposition for relighting,IEEE International Conference on Image Processing 2005,2005
+ccde43d13203bf29ccf351e8c9a79ee4b0b36142,Combining patch matching and detection for robust pedestrian tracking in monocular calibrated cameras,Pattern Recognition Letters,2014
+ccc073d9894c0678e995086e1ca4d281de84f0ff,Object-based reasoning in VQA,CoRR,2018
+cc8bf03b3f5800ac23e1a833447c421440d92197,Improving mixture of experts for view-independent face recognition using teacher-directed learning,Machine Vision and Applications,2009
+e669c2fe2051648aeafa806bc10b380d5b99dbe3,No More Discrimination: Cross City Adaptation of Road Scene Segmenters Supplementary Material,,2017
+e6540d70e5ffeed9f447602ea3455c7f0b38113e,Video pornography detection through deep learning techniques and motion information,Neurocomputing,2017
+e6ee36444038de5885473693fb206f49c1369138,SCUT-FBP5500: A Diverse Benchmark Dataset for Multi-Paradigm Facial Beauty Prediction,CoRR,2018
+f9296decd223b13fca96836caf42aa037cd5055e,Efficient Resource Allocation for Sparse Multiple Object Tracking,Unknown,2017
+f913bb65b62b0a6391ffa8f59b1d5527b7eba948,On improving robustness of LDA and SRDA by using tangent vectors,Pattern Recognition Letters,2013
+f97e9818a8055668f9db7967b076dd036d25c417,Self-Supervised Video Hashing with Hierarchical Binary Auto-encoder,CoRR,2018
+f90efe7d3d6eef4fe653343442163bf20495b5aa,Transductive Zero-Shot Learning with Adaptive Structural Embedding,IEEE transactions on neural networks and learning systems,2017
+f96bdd1e2a940030fb0a89abbe6c69b8d7f6f0c1,Comparison of human and computer performance across face recognition experiments,Image Vision Comput.,2014
+f015cb3f5ecf61e5f6e597bdc4d39351f9c392e1,Lower Body Pose Estimation in Team Sports Videos Using Label-Grid Classifier Integrated with Tracking-by-Detection,IPSJ Trans. Computer Vision and Applications,2015
+f0fc82cabfbb7d7a8505ef1f78becaf179b9d72c,Abnormal cerebral effective connectivity during explicit emotional processing in adults with autism spectrum disorder.,Social cognitive and affective neuroscience,2008
+f06b015bb19bd3c39ac5b1e4320566f8d83a0c84,Classification and weakly supervised pain localization using multiple segment representation,Image and vision computing,2014
+f7dea4454c2de0b96ab5cf95008ce7144292e52a,Facial Landmark Detection: A Literature Survey,International Journal of Computer Vision,2018
+f793970c7b57c3470561e9830cebbdd590a38bf4,Photo-realistic Facial Texture Transfer,CoRR,2017
+f7452a12f9bd927398e036ea6ede02da79097e6e,Attributes as Operators,,2018
+f78863f4e7c4c57744715abe524ae4256be884a9,Differential optical flow applied to automatic facial expression recognition,Neurocomputing,2011
+e8410c4cd1689829c15bd1f34995eb3bd4321069,Decoding mixed emotions from expression map of face images,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+e896b084a247213f48b703c98f2ec6f55a02a2f5,Brain regions involved in processing facial identity and expression are differentially selective for surface and edge information,,2014
+e87b1ae1d9af4182e7c5b62c9bd01b15979dd59d,Addressing ambiguity in multi-target tracking by hierarchical strategy,2017 IEEE International Conference on Image Processing (ICIP),2017
+faed896f28281a77d6847534ece9c97a3036e75d,"Fusion of color, local spatial and global frequency information for face recognition",Pattern Recognition,2010
+fa0363db4e4cb96c00a0ad200b1c6922b3cd87d5,Subspace Clustering by Block Diagonal Representation,IEEE transactions on pattern analysis and machine intelligence,2018
+faead8f2eb54c7bc33bc7d0569adc7a4c2ec4c3b,Combining Data-driven and Model-driven Methods for Robust Facial Landmark Detection,CoRR,2016
+faf5583063682e70dedc4466ac0f74eeb63169e7,Holistic person processing: faces with bodies tell the whole story.,Journal of personality and social psychology,2012
+fa7689cedcf2f0ddb6fa4a3c0dbefc6fa63e1a14,Cross-modal influences of affect across social and non-social domains in individuals with Williams syndrome.,Neuropsychologia,2010
+fa4b5f663d5d600e5ae3cb85ba1d080ab1721add,Multi-resolutive sparse approximations of d-dimensional data,Computer Vision and Image Understanding,2013
+ff3d4f2406ca2d78b20ed94a33983bca3583d520,Aguará: An Improved Face Recognition Algorithm through Gabor Filter Adaptation,,2007
+ff60d4601adabe04214c67e12253ea3359f4e082,Video-based emotion recognition in the wild using deep transfer learning and score fusion,Image Vision Comput.,2017
+ffbecbc581d98648dc670f9b5757c25348b25561,Generalization in Metric Learning: Should the Embedding Layer be the Embedding Layer?,,2018
+ff98041e54682c6d1af7b86b5fe125b8252a3466,Anxiety attenuates awareness of emotional faces during rapid serial visual presentation.,Emotion,2012
+c5632e2117d268159225d5c307b7efbb6428ccba,Understanding image concepts using ISTOP model,Pattern Recognition,2016
+c562637140da95e37ea228d35f1046589d31b3b2,Evaluation of a template protection approach to integrate fingerprint biometrics in a PIN-based payment infrastructure,Electronic Commerce Research and Applications,2011
+c54f9f33382f9f656ec0e97d3004df614ec56434,Automatic edge-based localization of facial features from images with complex facial expressions,Pattern Recognition Letters,2010
+c5fe40875358a286594b77fa23285fcfb7bda68e,Face identification using reference-based features with message passing model,Neurocomputing,2013
+c51039a4cbfcdb0175f15824e186998500f5b85a,Processing of Face Images and Its Applications,,1999
+c5b2d166f77f072dfbbbd538729bf7ac11f4094d,Multibiometric human recognition using 3D ear and face features,Pattern Recognition,2013
+c2864a3551a3a5d41474d06639815939f8439add,Pedestrian Detection by Using a Spatio-Temporal Histogram of Oriented Gradients,IEICE Transactions,2013
+c27c2fe9642fb82a3dfc314ce6003fe7a88eb1ec,Interpretable R-CNN,CoRR,2017
+c20ac2441e6ec29ae926d3c5605b71ce10ef6dff,Heterogeneous image transformation,Pattern Recognition Letters,2013
+c29e33fbd078d9a8ab7adbc74b03d4f830714cd0,3D shape constraint for facial feature localization using probabilistic-like output,"Sixth IEEE International Conference on Automatic Face and Gesture Recognition, 2004. Proceedings.",2004
+f68ed499e9d41f9c3d16d843db75dc12833d988d,Multi-view Common Component Discriminant Analysis for Cross-view Classification,CoRR,2018
+f6abecc1f48f6ec6eede4143af33cc936f14d0d0,Adaptive Detrending to Accelerate Convolutional Gated Recurrent Unit Training for Contextual Video Recognition,CoRR,2017
+f60437dc3d8687930d82988713fe16184117ef27,The Stixel World: A medium-level representation of traffic scenes,Image Vision Comput.,2017
+e903fc4e9636d5e5635b6970b2520b920e919a68,Deep feature based contextual model for object detection,Neurocomputing,2018
+e9ce1ab4a1b6204114446cb255c1d7639adc9a80,On the Importance of the Grid Size for Gender Recognition using Full Body Static Images,,2011
+e92c934c047d0ec23e7ed3a749e14a0150dc1bc8,Privacy-Preserving Photo Sharing based on a Public Key Infrastructure,,2015
+e9c000b765ba5519050a61726e007c430cd5bfcb,Multi-class Multi-object Tracking Using Changing Point Detection,,2016
+e90816e1a0e14ea1e7039e0b2782260999aef786,Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers,CoRR,2018
+f1748303cc02424704b3a35595610890229567f9,Learning-based encoding with soft assignment for age estimation under unconstrained imaging conditions,Image Vision Comput.,2012
+f10f6c294130c76981f0e584af5811c44636eda5,The Re-identification Challenge,,2014
+e726acda15d41b992b5a41feabd43617fab6dc23,Evolutionary feature synthesis for facial expression recognition,Pattern Recognition Letters,2006
+e78572eeef8b967dec420013c65a6684487c13b2,3D Shape Induction from 2D Views of Multiple Objects,CoRR,2016
+e75255911aa88fda7c0ce8b42b0ca2d2a43bf33e,Reduced gaze aftereffects are related to difficulties categorising gaze direction in children with autism,,2013
+cb489395a7a89bc6299e78e75ac7c0207bcd39bb,Not so harmless anymore: How context impacts the perception and electrocortical processing of neutral faces,NeuroImage,2014
+cbadf6b89571d387eb5f1d56ae5671ad16ed1155,Face processing in autism spectrum disorders: From brain regions to brain networks.,Neuropsychologia,2015
+cb422f464e849272d92b8f2fc3c5605a71c98e54,Rethinking Atrous Convolution for Semantic Image Segmentation,CoRR,2017
+cba3fda21e073df8e97920ebefa63712b9796c89,DeepDriver : Automated System For measuring Valence and Arousal in Car Driver Videos,Unknown,2018
+cb13559e23fd88363d7eba62a98a269e6e41087e,3D skeletal reconstruction from low-resolution multi-view images,2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops,2012
+cb9092fe74ea6a5b2bb56e9226f1c88f96094388,A distributed perception infrastructure for robot assisted living,Robotics and Autonomous Systems,2014
+f8ebba8188c9f6b1688ba7ba76f297215e6cc7c7,Triplet-Based Deep Similarity Learning for Person Re-Identification,2017 IEEE International Conference on Computer Vision Workshops (ICCVW),2017
+f87ae55502267f82e031a8101b0efa626f3e6c7a,Pedestrian detection based on hierarchical co-occurrence model for occlusion handling,Neurocomputing,2015
+f869601ae682e6116daebefb77d92e7c5dd2cb15,Regularized Diffusion Process for Visual Retrieval,,2017
+f820bca64665ac90fbed5881599a049198d71118,Sensory over-responsivity and social cognition in ASD: Effects of aversive sensory stimuli and attentional modulation on neural responses to social cues,Developmental Cognitive Neuroscience,2018
+f8d434471c2850c5f1d0757d42142b655fb46ddb,A new distance measure for non-identical data with application to image classification,Pattern Recognition,2017
+cef841f27535c0865278ee9a4bc8ee113b4fb9f3,Fusion of feature sets and classifiers for facial expression recognition,Expert Syst. Appl.,2013
+ce5e50467e43e3178cbd86cfc3348e3f577c4489,Extending a local matching face recognition approach to low-resolution video,2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance,2013
+ce691a37060944c136d2795e10ed7ba751cd8394,"Unsupervised Depth Estimation, 3D Face Rotation and Replacement",,2018
+ce9a9f178018fc266fbf554bbde63155a48eaedb,Face recognition under pose variation with local Gabor features enhanced by Active Shape and Statistical Models,Pattern Recognition,2015
+ce3f3088d0c0bf236638014a299a28e492069753,Online Action Recognition Using Covariance of Shape and Motion,,2014
+ce7a385b791686f318313e94a0b573c456c1297f,Quantifying privacy and security of biometric fuzzy commitment,2011 International Joint Conference on Biometrics (IJCB),2011
+e0c081a007435e0c64e208e9918ca727e2c1c44e,Universidad De Las Palmas,,2005
+e0659abe7b377b146bcd8ac5040e620bd7f4ede4,Generative object detection and tracking in 3D range data,2012 IEEE International Conference on Robotics and Automation,2012
+e09ee005c07fdb5a370c73909a447e5303a74129,Leveraging social media for scalable object detection,Pattern Recognition,2012
+e0e19769ad446c2a74c0616fcfb551059c899ce6,Part level transfer regularization for enhancing exemplar SVMs,Computer Vision and Image Understanding,2015
+e0765de5cabe7e287582532456d7f4815acd74c1,Representing images of a rotating object with cyclic permutation for view-based pose estimation,Computer Vision and Image Understanding,2009
+46e86cdb674440f61b6658ef3e84fea95ea51fb4,Robust Face Recognition Using Eigen Faces and Karhunen-Loeve Algorithm,,2010
+46bd4df6176345097b0d239b3c8937f67130a69b,Bootstrapping Boosted Random Ferns for discriminative and efficient object classification,Pattern Recognition,2012
+464de30d3310123644ab81a1f0adc51598586fd2,Covariance descriptor based on bio-inspired features for person re-identification and face verification,Image Vision Comput.,2014
+4689e75bca5a6eb1e3e1d6bcbd78d67ee39bb378,Projectiveactiveshapemodels forpose-variant imageanalysisofquasi-planarobjects: Application to facial analysis,,2009
+46c00c4c4dfaee99976705209fc2ac1972081ab9,Relational HOG Feature and Masking of Binary by Using Wild - Card for Object,,2011
+46c65ee1a3e49bb77c7c73dcbfeb5d86db7fc3ee,Deep-neural-network based sinogram synthesis for sparse-view CT image reconstruction,,2018
+4699f98cfdb19e57c2c14c046d0a658ed2267aa7,Online Hashing,IEEE transactions on neural networks and learning systems,2013
+46196735a201185db3a6d8f6e473baf05ba7b68f,Principal Component Analysis by $L_{p}$ -Norm Maximization,IEEE transactions on cybernetics,2014
+4650ac406a79fa59ff147ffabc32e80c5edc1cbe,Predicting memorability of images using attention-driven spatial pooling and image semantics,Image Vision Comput.,2015
+469e0e79c936130b3727d598fac46913c75489f6,Compression Techniques for Deep Fisher Vectors,Unknown,2017
+2c69688a2fc686cad14bfa15f8a0335b26b54054,Multi-View Representation Learning: A Survey from Shallow Methods to Deep Methods,CoRR,2016
+2c28c95066b1df918f956f3cc072e29fd452dcad,Generalized Multi-view Embedding for Visual Recognition and Cross-modal Retrieval,IEEE transactions on cybernetics,2017
+2cbb4a2f8fd2ddac86f8804fd7ffacd830a66b58,Age and gender classification using convolutional neural networks,2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW),2015
+2c61a9e26557dd0fe824909adeadf22a6a0d86b0,Convolutional Channel Features: Tailoring CNN to Diverse Tasks,,2015
+2ca78bafe32dbca5f9f64ed4de5a893aa5ca03f7,Seeking the Strongest Rigid Detector,2013 IEEE Conference on Computer Vision and Pattern Recognition,2013
+2c5bfdf6b4f9c06a42280e99d101e628a1dc597f,"Multiple Target, Multiple Type Visual Tracking using a Tri-GM-PHD Filter",Unknown,2017
+2c13e817232e8693ecbd7a139cfb1825a720ff96,Learning Pose Grammar to Encode Human Body Configuration for 3D Pose Estimation,,2017
+2c375f93c0d0db944ea3ee5e5b4428c5b647f3fa,Automatic body segmentation with graph cut and self-adaptive initialization level set (SAILS),J. Visual Communication and Image Representation,2011
+2c71d189e131d8a0b1f832202392b83b31ee2818,DAAL: Deep activation-based attribute learning for action recognition in depth videos,Computer Vision and Image Understanding,2018
+2c811b647a6aac924920c06e607e9e8d4b8d872d,Recognizing facial action units using independent component analysis and support vector machine,Pattern Recognition,2006
+2c7c3a74da960cc76c00965bd3e343958464da45,Interactive Facial-Geometric-Feature Animation for Generating Expressions of Novel Faces,IEICE Transactions,2011
+2c49e626d297e6ee26671459a77776b97b5f2c88,A data-driven detection optimization framework,Neurocomputing,2013
+2c12b2bd93f9ac8efc5c94e46bfa7a3cd0461052,Eye pupil localization with an ensemble of randomized trees,Pattern Recognition,2014
+2c3138782317a97526a83a7ce264c0c772ddf7e3,Zero-Shot Learning by Generating Pseudo Feature Representations,CoRR,2017
+2c53cb4222cd9ccc868a07d494b8a4ce102658fa,Face recognition across pose: A review,Pattern Recognition,2009
+79581c364cefe53bff6bdd224acd4f4bbc43d6d4,Descriptors and regions of interest fusion for in- and cross-database gender classification in the wild,Image Vision Comput.,2017
+795aa8064b34c4bf4acdd8be3f1e5d06da5a7756,Face-MagNet: Magnifying Feature Maps to Detect Small Faces,,2018
+795ea140df2c3d29753f40ccc4952ef24f46576c,Multi-Task Learning by Deep Collaboration and Application in Facial Landmark Detection,CoRR,2017
+790ad3255083ac475185d9de8159ae3cf2e0068b,The role of the right hemisphere in semantic control: A case-series comparison of right and left hemisphere stroke,,2016
+79dc84a3bf76f1cb983902e2591d913cee5bdb0e,Grounded Compositional Semantics for Finding and Describing Images with Sentences,TACL,2014
+79519f181ca9378e72bdb41ca647ba6d2e65b106,Two can play this Game: Visual Dialog with Discriminative Question Generation and Answering,CoRR,2018
+79c3a7131c6c176b02b97d368cd0cd0bc713ff7e,A New Weighted LDA Method in Comparison to Some Versions of LDA,Unknown,2006
+7942d0c6e5d1a2440061f2ea4bc27e32badb9c3d,Ordinal convolutional neural networks for predicting RDoC positive valence psychiatric symptom severity scores.,Journal of biomedical informatics,2017
+793e7f1ba18848908da30cbad14323b0389fd2a8,End-to-end Face Detection and Cast Grouping in Movies Using Erdős-Rényi Clustering: Supplementary Material,,2017
+2dfba157e0b5db5becb99b3c412ac729cf3bb32d,Automatic Detection and Tracking of Pedestrians in Videos with Various Crowd Densities,,2014
+2dd6c988b279d89ab5fb5155baba65ce4ce53c1e,Learning deformable shape manifolds,Pattern recognition,2012
+2d83dbf4c8eabc6bdef3326c4a30d5f33ffc944e,Multimodal Residual Learning for Visual QA,,2016
+2d1f86e2c7ba81392c8914edbc079ac64d29b666,Deep Heterogeneous Feature Fusion for Template-Based Face Recognition,2017 IEEE Winter Conference on Applications of Computer Vision (WACV),2017
+2d2102d3fe127444e203a2ab11c2b3d5f56874cc,Wasserstein Auto-Encoders,CoRR,2017
+2d69b3965685066081e533b29fde3364a6cc21e7,Object Instance Sharing by Enhanced Bounding Box Correspondence,,2012
+2dd91115091f1691ea37c4b14788ca4199354012,Semi-supervised Learning by Sparse Representation,,2009
+2d36f8444581d806ce6e36ec1d9bdede193db005,Visual Memory QA: Your Personal Photo and Video Search Agent,,2017
+2da1a80955df1612766ffdf63916a6a374780161,Generating steganographic images via adversarial training,Unknown,2017
+41b38da2f4137c957537908f9cb70cbd2fac8bc1,Greedy search for descriptive spatial face features,"2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",2017
+41915a85714bbfea53af9e65abc01a7b54e5cd13,Reliable non-invasive measurement of human neurochemistry using proton spectroscopy with an anatomically defined amygdala-specific voxel,NeuroImage,2012
+419fec1a76d9233dcaa8d2c98ea622d19f663261,Unsupervised learning of object frames by dense equivariant image labelling,Unknown,2017
+41b997f6cec7a6a773cd09f174cb6d2f036b36cd,Local binary patterns for multi-view facial expression recognition,Computer Vision and Image Understanding,2011
+419279b5d21234737b10715fd785eeb51b317767,Haar-like features with optimally weighted rectangles for rapid object detection,Pattern Recognition,2010
+830e5b1043227fe189b3f93619ef4c58868758a7,"A survey on face detection in the wild: Past, present and future",Computer Vision and Image Understanding,2015
+834ff8e06ed3f01c10958a276f1526fce7ffd387,Automatic Adaptation of Person Association for Multiview Tracking in Group Activities,CoRR,2018
+83ac942d71ba908c8d76fc68de6173151f012b38,Class dependent factor analysis and its application to face recognition,Pattern Recognition,2012
+8359f65fd0e0ada2a3de8aead37a6680b53de2a6,Estimating smile intensity: A better way,,2014
+1b32284d732e0aec506411b71e6150df53d167f7,"Configurable, Photorealistic Image Rendering and Ground Truth Synthesis by Sampling Stochastic Grammars Representing Indoor Scenes",CoRR,2017
+1bd50926079e68a6e32dc4412e9d5abe331daefb,Fisher Discrimination Dictionary Learning for sparse representation,2011 International Conference on Computer Vision,2011
+1ba4d5d3b0cb46d61f23279f70ae42735601a60c,Crowd Counting via Weighted VLAD on Dense Attribute Feature Maps,CoRR,2016
+1b612877c4fb6fb7faf395357cd8092e5ec5dae7,A survey on still image based human action recognition,Pattern Recognition,2014
+1b510618969a298225764eaee54ee700fefb2d23,Depth Structure Association for RGB-D Multi-target Tracking,2014 22nd International Conference on Pattern Recognition,2014
+1bc8cc908cd722cf560b36e14a3333bf7b6114f4,Classification and Feature Extraction by Simplexization,IEEE Transactions on Information Forensics and Security,2008
+1bf01e83fba634bab085ec5f0ab86a1a67da8577,An equalised global graphical model-based approach for multi-camera object tracking,CoRR,2015
+1baeaa776c4f1cf17e690a73f8b38b8064c0e794,A spatio-temporal Long-term Memory approach for visual place recognition in mobile robotic navigation,Robotics and Autonomous Systems,2013
+1bc214c39536c940b12c3a2a6b78cafcbfddb59a,Leveraging Gabor Phase for Face Identification in Controlled Scenarios,,2016
+1b5a19828a1dd486ccab1e9c107dfe7bae20cfb7,Pedestrian detection from still images based on multi-feature covariances,2013 IEEE International Conference on Information and Automation (ICIA),2013
+1be32039596ff52fa09772f4606b65845d1c5853,Analysis-by-synthesis: Pedestrian tracking with crowd simulation models in a multi-camera video network,Computer Vision and Image Understanding,2015
+1b31d4a584818ce0f140026d172601116c6bc714,A Multi-layer Composite Model for Human Pose Estimation,,2012
+1bdb09190fc0c66f7e1a6deb7a0ebbaba6b2a42c,Voronoi-Based Compact Image Descriptors: Efficient Region-of-Interest Retrieval With VLAD and Deep-Learning-Based Descriptors,IEEE Transactions on Multimedia,2017
+1b79628af96eb3ad64dbb859dae64f31a09027d5,Modeling Recognition Memory Using the Similarity Structure of Natural Input,,2006
+1b589016fbabe607a1fb7ce0c265442be9caf3a9,Development of perceptual expertise in emotion recognition.,Cognition,2009
+1b27ca161d2e1d4dd7d22b1247acee5c53db5104,Facial soft biometric features for forensic face recognition.,Forensic science international,2015
+1b781faee797beff41ef67703dd80bd6da3c8b23,"A Survey of Affect Recognition Methods: Audio, Visual, and Spontaneous Expressions",IEEE Transactions on Pattern Analysis and Machine Intelligence,2007
+77cc3e55ff5e18eecc29f2fad1ced236ce9b0689,ENCARA2: Real-time detection of multiple faces at different resolutions in video streams,J. Visual Communication and Image Representation,2007
+77f064553b780471a2812ed2cb667d7332433bdb,Automatic Gait Recognition via Fourier Descriptors of Deformable Objects,,2003
+77addbb49abb80ccd3ebfb5b6f2d3b0687ce90f7,Sparsely-distributed organization of face and limb activations in human ventral temporal cortex,NeuroImage,2010
+77bb7759e09b47b35d5447d1d6fe07957f939f68,Experimental Analysis of Insertion Costs in a Naïve Dynamic MDF-Tree,,2009
+776835eb176ed4655d6e6c308ab203126194c41e,Audio-Visual Affective Expression Recognition Through Multistream Fused HMM,IEEE Trans. Multimedia,2008
+774f67303ea4a3a94874f08cf9a9dacc69b40782,"Fast, Accurate Detection of 100,000 Object Classes on a Single Machine: Technical Supplement",,2013
+7781ce5bb1b53533d2060aefaf8ddb95a6c77316,A Novel Framework for Robustness Analysis of Visual QA Models,CoRR,2017
+484708cc3bd4aaff0ccf166f6ead108f0842a04e,Recovering Spatiotemporal Correspondence between Deformable Objects by Exploiting Consistent Foreground Motion in Video,CoRR,2014
+48c494a8f1fdda835417ccc395a42fe210efec2c,Efficient Spatio-Temporal Data Association Using Multidimensional Assignment for Multi-Camera Multi-Target Tracking,,2015
+48a7c9f9f810b5b5befe7675e8c7ffe40cf473ff,A new algorithm for age recognition from facial images,Signal Processing,2010
+4866a5d6d7a40a26f038fc743e16345c064e9842,Stratified sampling for feature subspace selection in random forests for high dimensional data,Pattern Recognition,2013
+483351de2bdf58e21bf8a68a5d75e79a025956d6,Estimating Depth from Monocular Images as Classification Using Deep Fully Convolutional Residual Networks,CoRR,2016
+48832468be331e0257afd88ea71b807503551ca0,Supplementary material for the paper Are Sparse Representations Really Relevant for Image Classification,,2011
+48f211a9764f2bf6d6dda4a467008eda5680837a,Predicting occupation via human clothing and contexts,2011 International Conference on Computer Vision,2011
+4858d014bb5119a199448fcd36746c413e60f295,Deformable Part Models with Individual Part Scaling,,2013
+487668cc36443a67378f253afe05a550eda2c4f1,Continuous adaptation of multi-camera person identification models through sparse non-redundant representative selection,Computer Vision and Image Understanding,2017
+4879d56e5edc07ba5a34bc08700f0eed72131131,Optimization of Robust Loss Functions for Weakly-Labeled Image Taxonomies: An ImageNet Case Study,,2011
+70d71c2f8c865438c0158bed9f7d64e57e245535,"Higher Order Priors for Joint Intrinsic Image, Objects, and Attributes Estimation",,2013
+701d2c119733809f65311bc96733330b3ab59dce,ar X iv : 1 31 2 . 61 84 v 5 [ cs . L G ] 2 1 Fe b 20 14 Do Deep Nets Really Need to be Deep ?,,
+703890b7a50d6535900a5883e8d2a6813ead3a03,A spatial-temporal framework based on histogram of gradients and optical flow for facial expression recognition in video sequences,Pattern Recognition,2015
+70111f6868ffab46cf32534d8b2175693c1bbc26,SHOE: Supervised Hashing with Output Embeddings,CoRR,2015
+7066ca7d19a714012dd899f3ac0a84e4c0dc92e7,Processing of novel and familiar faces in infants at average and high risk for autism,Developmental Cognitive Neuroscience,2012
+700af3eb255ecfc9cb93d33fee763047875252ef,Large-Scale Face Image Retrieval: A Wyner-Ziv Coding Approach,,2012
+702ac86ca51e18a3a50ab0ba7c379673c077d97a,Multiscale 3D feature extraction and matching with an application to 3D face recognition,Graphical Models,2013
+1ea8085fe1c79d12adffb02bd157b54d799568e4,Eigenfaces vs. Fisherfaces: Recognition Using Class Speciic Linear Projection,,1996
+1ebdfceebad642299e573a8995bc5ed1fad173e3,Fisher Kernel Temporal Variation-based Relevance Feedback for video retrieval,Computer Vision and Image Understanding,2016
+1e516f45f87a94ceca466c9a101a01720a535117,A survey on image-based continuum-body motion estimation,Image Vision Comput.,2011
+1e335a6d3cdfe8f53540766b1495c45f72d8fb2f,Multi-Target Tracking and Occlusion Handling with Learned Variational Bayesian Clusters and a Social Force Model,IEEE Trans. Signal Processing,2016
+1e2cfa23aa2a9981bdc7f8f007121de541c387a7,Action Recognition from a Single Web Image Based on an Ensemble of Pose Experts,,2014
+1ef4aac0ebc34e76123f848c256840d89ff728d0,Rapid Synthesis of Massive Face Sets for Improved Face Recognition,2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017),2017
+1e94ae83bd49cc9c6366a7f486ed5956e5fa0e14,On the purity of training and testing data for learning: The case of pedestrian detection,Neurocomputing,2015
+1e5fab737794d18f4fb385a53d5ec0fc5c74f32b,Automatic Face Recognition System Based on Local Fourier-Bessel Features,,2005
+1eb27702acf0ec3e36d695f03385fab96b1e3c1e,Coupling camera-tracked humans with a simulated virtual crowd,2014 International Conference on Computer Graphics Theory and Applications (GRAPP),2014
+1e46af829a955dc5ca9c53f94eb416bcd9e2a2ce,Learning better image representations using 'flobject analysis',,2011
+1e5c8fded283dd4c305a1e4c9c1fc8e0988f9c01,Moving obstacle detection in highly dynamic scenes,2009 IEEE International Conference on Robotics and Automation,2009
+8453d03942c2a96a9158ea8e7b23e023fb8b4704,bi-layered artificial neural network and a Counterpropagation,,2008
+84fe5b4ac805af63206012d29523a1e033bc827e,Ear recognition: More than a survey,Neurocomputing,2017
+84dcf04802743d9907b5b3ae28b19cbbacd97981,Face Detection using Deep Learning: An Improved Faster RCNN Approach,CoRR,2017
+84908a9162b7243e70bff6861d084813ed011f0f,Zero-Shot Learning via Latent Space Encoding,CoRR,2017
+84e48da837978a0660184a0df7647e4b22b078e7,Low Rank Subspace Clustering (LRSC),,2013
+84e9de36dd7915f9334db5cc1fe567e17d717495,Fine-grained categorization via CNN-based automatic extraction and integration of object-level and part-level features,Image Vision Comput.,2017
+8459692ecc49cc87311ad97de85576e383e36490,A visual approach for driver inattention detection,Pattern Recognition,2007
+8458e49fb08d2cca3a8d7355465e182c30785220,Visual long-term memory has a massive storage capacity for object details.,Proceedings of the National Academy of Sciences of the United States of America,2008
+4a3b28e5ad2ae2c2f17d681f6177da212e51ca32,An Efficient Method for Service Level Agreement AssessmentI,,2011
+4a9145d52184b20f7241f52509034819c79ee162,Facial Deblur Inference Using Subspace Analysis for Recognition of Blurred Faces,IEEE Transactions on Pattern Analysis and Machine Intelligence,2011
+4add5fda38b0a651295ca2886a9a39ace48dcb3e,An Approach for Energy Efficient Execution of Hybrid Parallel Programs,2015 IEEE International Parallel and Distributed Processing Symposium,2015
+4a12d2c80ae7a4622bf500400ad6fcff83dfb5de,Fusion of Multiple Facial Features for Age Estimation,IEICE Transactions,2009
+4a6049e1926cc8e574301cfb229599cdc0a64e62,Characterizing the performance of an image-based recognizer for planar mechanical linkages in textbook graphics and hand-drawn sketches,Computers & Graphics,2015
+4a1eacd06dbeed8acef3e4ad68b28af3bcebda56,MonoCap: Monocular Human Motion Capture using a CNN Coupled with a Geometric Prior,CoRR,2017
+4ad702b784d0a2fef099a4f0336c92c92a412009,FigureQA: An Annotated Figure Dataset for Visual Reasoning,CoRR,2017
+4a53062c8e0a1ce54adff22d79f409876fdfeea7,CRF - based semantic labeling in miniaturized road scenes ( Extended Abstract ),,2014
+24e64e9fd79f138cf4d90f65da06eacf031ec635,Mapping Cropland in Smallholder-Dominated Savannas: Integrating Remote Sensing Techniques and Probabilistic Modeling,Remote Sensing,2015
+24aac045f1e1a4c13a58eab4c7618dccd4c0e671,Video Imagination from a Single Image with Transformation Generation,,2017
+240d5390af19bb43761f112b0209771f19bfb696,Towards an intelligent framework for multimodal affective data analysis,Neural networks : the official journal of the International Neural Network Society,2015
+24977d59a5de4eb597347bef00f0c097a641a8dd,Random projection-based partial feature extraction for robust face recognition,Neurocomputing,2015
+24486f70e0fa7a44844adefe352b18aaeb04fdb0,Increased BOLD signal in the fusiform gyrus during implicit emotion processing in anorexia nervosa☆,,2014
+2492bb313093cbfe885d1f3f9da2feba4923baf5,Observing and interpreting complex human activities in everyday environments,Unknown,2010
+249c9034959448e4ca96e9e753570c20ccbd90c9,On the Importance of Objects in Human Action Classification,CoRR,2015
+24d376e4d580fb28fd66bc5e7681f1a8db3b6b78,Multi-Branch Fully Convolutional Network for Face Detection,CoRR,2017
+24b31c4d044fc8a625a229fd8296b71836d4a422,"Image classification by non-negative sparse coding, correlation constrained low-rank and sparse decomposition",Computer Vision and Image Understanding,2014
+24bf94f8090daf9bda56d54e42009067839b20df,"Automatic Analysis of Facial Affect: A Survey of Registration, Representation, and Recognition",IEEE Transactions on Pattern Analysis and Machine Intelligence,2015
+23fdbef123bcda0f07d940c72f3b15704fd49a98,Matrix Completion for Multi-label Image Classification,,2011
+23b3b07cb484bd3aaeaa3728f8977c44f50443f6,A unified tensor framework for face recognition,Pattern Recognition,2009
+23ebbbba11c6ca785b0589543bf5675883283a57,Spatio-Temporal Tube data representation and Kernel design for SVM-based video object retrieval system,Multimedia Tools and Applications,2010
+231af7dc01a166cac3b5b01ca05778238f796e41,GANs Trained by a Two Time-Scale Update Rule Converge to a Nash Equilibrium,CoRR,2017
+23231becd8ca7bd3f1f10660e1709554a21c64bf,Semantic Edge Detection with Diverse Deep Supervision,,2018
+23d5b2dccd48a17e743d3a5a4d596111a2f16c41,3D shape estimation in video sequences provides high precision evaluation of facial expressions,Image Vision Comput.,2012
+23086a13b83d1b408b98346cf44f3e11920b404d,Cascade of Tasks for facial expression analysis,Image Vision Comput.,2016
+2329b177c71c7087013ab4bfdc3154a6ba87ff8c,Real-time and robust object tracking in video via low-rank coherency analysis in feature space,Pattern Recognition,2015
+4fd29e5f4b7186e349ba34ea30738af7860cf21f,Circulant Temporal Encoding for Video Retrieval and Temporal Alignment,International Journal of Computer Vision,2015
+4f298d6d0c8870acdbf94fe473ebf6814681bd1f,Going deeper into action recognition: A survey,Image Vision Comput.,2017
+4fbef7ce1809d102215453c34bf22b5f9f9aab26,Robust Face Recognition for Data Mining,,2009
+4fa0d73b8ba114578744c2ebaf610d2ca9694f45,Rethinking Spatiotemporal Feature Learning For Video Understanding,CoRR,2017
+4f9001753ceb18ee06f825687abe0e3d292e71e0,Non-parametric score normalization for biometric verification systems,Proceedings of the 21st International Conference on Pattern Recognition (ICPR2012),2012
+4fc67275bd9d68895933c3baddec266402cc2412,A Correlation Based Feature Representation for First-Person Activity Recognition,CoRR,2017
+4f0bf2508ae801aee082b37f684085adf0d06d23,Max-margin Non-negative Matrix Factorization,Image Vision Comput.,2012
+4f7d9c5fc3e0fd1b1a4860003bf2b482a215f721,Building and using fuzzy multimedia ontologies for semantic image annotation,Multimedia Tools and Applications,2013
+4f5ceebe7d166b2b96ef080e179e8f58f7787e5d,"Cognitive Tomography Reveals Complex, Task-Independent Mental Representations",,2013
+8d2d27753d316494574c4e8ac51190921e0765bb,Partially-supervised learning from facial trajectories for face recognition in video surveillance,Information Fusion,2015
+8d3b9a07483a9a80e7e8d67d9042ab6557c578d2,Head detection and orientation estimation for pedestrian safety,17th International IEEE Conference on Intelligent Transportation Systems (ITSC),2014
+8d71872d5877c575a52f71ad445c7e5124a4b174,Shadow compensation in 2D images for face recognition,Pattern Recognition,2007
+8d5ea0c79eecc9e6c857eac5d494d57960e0f587,Watch-List Screening Using Ensembles Based on Multiple Face Representations,2014 22nd International Conference on Pattern Recognition,2014
+8d96fbc52ffd784dee573d44e0c47a3577fd0266,Face identity recognition in autism spectrum disorders: a review of behavioral studies.,Neuroscience and biobehavioral reviews,2012
+8de026cf8a9a82d55743aaa4ec18c86029fda096,Bayesian Semantic Instance Segmentation in Open Set World,Unknown,2018
+8dc81389a61d4d80644f44e1fcfd35ccfb332082,Understanding and Predicting The Attractiveness of Human Action Shot,CoRR,2017
+8dbe79830713925affc48d0afa04ed567c54724b,Automatic facial age estimation,,2015
+8dc2b137b2a1a3713f6ce5e78f621a9f0f036bf8,Non-myopic information theoretic sensor management of a single pan-tilt-zoom camera for multiple object detection and tracking,Computer Vision and Image Understanding,2015
+8d712cef3a5a8a7b1619fb841a191bebc2a17f15,Non-verbal communication analysis in Victim-Offender Mediations,Pattern Recognition Letters,2015
+15860bc14c38c89256a4263b0d31eb67fd8ed923,Mel-cepstral methods for image feature extraction,2010 IEEE International Conference on Image Processing,2010
+1523ca87c74e967870e2aab738d9b25c15c03e8a,RoboCupRescue 2010 - Robot League Team,,2010
+15f795d436aaa9e77ccccb00b9df49bf0127f8b1,"Dissociation between face perception and face memory in adults, but not children, with developmental prosopagnosia",Developmental Cognitive Neuroscience,2014
+150f4d8a46dd90048acada63c42c12392c5706f5,Automatic Facial Expression Recognition using Bags of Motion Words,,2010
+15c99ba792bfb0496694884af5075c81a266ee46,Reconstructing Evolving Tree Structures in Time Lapse Sequences by Enforcing Time-Consistency - Appendix,,2017
+15b07dae17f184c8e6efbc9d2b58526d8e8dc9d4,Sketched Subspace Clustering,IEEE Transactions on Signal Processing,2018
+15e28e884fb6c7eba2610e3dfcd5b40dadb14155,DeepID-Net: multi-stage and deformable deep convolutional neural networks for object detection,CoRR,2014
+155219f3e6dcf5d5f44815a7493b6b7cc8e02263,Fully convolutional neural networks for dynamic object detection in grid maps,2017 IEEE Intelligent Vehicles Symposium (IV),2017
+15e27e189fe9549d674ebd0f55a7bf1fa026cb85,A Multi-cut Formulation for Joint Segmentation and Tracking of Multiple Objects,CoRR,2016
+1513949773e3a47e11ab87d9a429864716aba42d,Demographic classification from face videos using manifold learning,Neurocomputing,2013
+1550518a37d58e708023b9a1d457940a9c465717,Isometric deformation invariant 3D shape recognition,Pattern Recognition,2012
+1212af29cc596e8d058c1dc450b2040c51be6d6c,Multi-modal human aggression detection,Computer Vision and Image Understanding,2016
+1209fa3c61fb5cdb18d1afa55d64f155398827f5,An Interactive Approach to Pose-Assisted and Appearance-based Segmentation of Humans,2007 IEEE 11th International Conference on Computer Vision,2007
+12feba45b219f129b5f12c16b5ffb5c1687b66e0,Learning a Fully Convolutional Network for Object Recognition using very few Data,CoRR,2017
+1225619985309d5b7ea7cd55985707a2e07dec90,Static and space-time visual saliency detection by self-resemblance.,Journal of vision,2009
+1203b4fe233bb7514d7ba257089392c16a83a17b,Performance Analysis of Joint Opportunistic Scheduling and Receiver Design for MIMO-SDMA Downlink,,2011
+123286df95d93600f4281c60a60c69121c6440c7,Deep self-paced learning for person re-identification,Pattern Recognition,2018
+1287bfe73e381cc8042ac0cc27868ae086e1ce3b,Computational Mid-Level Vision: From Border Ownership to Categorical Object Recognition,Unknown,2015
+12138be732a2aa10e4eef460979bec64eb8e4f4c,Intelligent multi-camera video surveillance: A review,Pattern Recognition Letters,2013
+120dc243f034d517a2181d1788d921510ef30cbf,Foreground Focus: Finding Meaningful Features in Unlabeled Images,,2008
+12c713166c46ac87f452e0ae383d04fb44fe4eb2,Fusion Classifier for Open-Set Face Recognition with Pose Variations,,2009
+12bbd57ce427a9f847fdf4456eab3bd5caeb5891,Multiple Viewpoint Recognition and Localization,,2010
+12b2ae1ebbaed2e664a028b3d845456061722a6a,Greedy algorithm for real-time multi-object tracking,2014 IEEE International Conference on Image Processing (ICIP),2014
+128335bef19faa51f127e6a07a434b8949f59b0b,Human Attention in Visual Question Answering: Do Humans and Deep Networks look at the same regions?,Computer Vision and Image Understanding,2016
+8cb3f421b55c78e56c8a1c1d96f23335ebd4a5bf,Facial expression recognition and synthesis based on an appearance model,Sig. Proc.: Image Comm.,2004
+8cb3e0c4ad37dd7e0abd2eedd704d4d27edb0a17,Vehicle Detection in Aerial Images,CoRR,2018
+855bfc17e90ec1b240efba9100fb760c068a8efa,Facial expression recognition using tracked facial actions: Classifier performance analysis,Eng. Appl. of AI,2013
+852ff0d410a25ebb7936043a05efe2469c699e4b,Learning local binary patterns for gender classification on real-world face images,Pattern Recognition Letters,2012
+1d1184c92e7651d09d3231b4e650f3611a8e2c8b,Use of non-photorealistic rendering and photometric stereo in making bas-reliefs from photographs,Graphical Models,2014
+1dbbec4ad8429788e16e9f3a79a80549a0d7ac7b,Global Sensitivity Analysis for MAP Inference in Graphical Models,,2014
+1d08754a95715d1058772b48ecfb082bddfb16d8,Discriminative and Efficient Label Propagation on Complementary Graphs for Multi-Object Tracking,IEEE Transactions on Pattern Analysis and Machine Intelligence,2016
+1d846934503e2bd7b8ea63b2eafe00e29507f06a,Manifold Based Analysis of Facial Expression,2004 Conference on Computer Vision and Pattern Recognition Workshop,2004
+1d086defc586f914eb88acc380714478e0ad595c,Face recognition: a convolutional neural-network approach,IEEE transactions on neural networks,1997
+1d3e01d5e2721dcfafe5a3b39c54ee1c980350bb,Face Alignment by Explicit Shape Regression,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+1dfe35869c4cdb41cc1bd2c622d38d57ef8e310f,Recent progress in road and lane detection: a survey,Machine Vision and Applications,2011
+1df1391795000c8085f81316043f0a0adca87379,Functional and structural brain correlates of risk for major depression in children with familial depression,,2015
+1d1fe1bb2cecd94b1f905cf1d0675d214f6ebc50,Slowing down presentation of facial movements and vocal sounds enhances facial expression recognition and induces facial-vocal imitation in children with autism.,Journal of autism and developmental disorders,2007
+1d6068631a379adbcff5860ca2311b790df3a70f,Efficient smile detection by Extreme Learning Machine,Neurocomputing,2015
+1db11bd3e2d0794cbb0fab25508b494e0f0a46ea,Multi-target tracking by online learning of non-linear motion patterns and robust appearance models,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+1df49237f269b6809bcc90232776407359558d55,Efficient Control of PTZ Cameras in Automated Video Surveillance Systems,2012 IEEE International Symposium on Multimedia,2012
+1da19761c5b36e3169e7c7e9d2b2c519b5276691,Person Re-Identification by Common-Near-Neighbor Analysis,IEICE Transactions,2014
+7142e659d6466717cdb8a242d8e34fce176b3f4a,Improved scene identification and object detection on egocentric vision of daily activities,Computer Vision and Image Understanding,2017
+71cf2badae09d206b94e1a07cb73018e4334d638,AlignedReID: Surpassing Human-Level Performance in Person Re-Identification,CoRR,2017
+7113b51f20c01ea5cbe0be04c19588d20f432f9f,Face Recognition by Discriminative Orthogonal Rank-one Tensor Decomposition,,2008
+7125b81253ce09c46cdccff465b6066d8550c80b,Extracting discriminative features for CBIR,Multimedia Tools and Applications,2011
+7158179c1cc56edb32a2da3a139a168592bbd260,Boosting LiDAR-based Semantic Labeling by Cross-Modal Training Data Generation,CoRR,2018
+7105585fd49ba914e980c45cd72dd2cfcabea7c9,Saliency does not account for fixations to eyes within social scenes,Vision Research,2009
+7171b46d233810df57eaba44ccd8eabd0ad1f53a,Joint Face Representation Adaptation and Clustering in Videos: Supplementary Material,,2016
+71ceeb34631718e3492fe7c103ceb9cc2de3c260,Florida International University - University of Miami TRECVID 2016,,2016
+71dc03d6c837ca9ec1334a63bea24d836de076a0,Complex Events Recognition under Uncertainty in a Sensor Network,CoRR,2014
+763b9ab0218760aaee314fc92c62efc9a2095b46,Efficient indexing for large scale visual search,2009 IEEE 12th International Conference on Computer Vision,2009
+768ea76f9690b74bff51b6c7bada3994681f79bc,The right place at the right time: Priming facial expressions with emotional face components in developmental visual agnosia,,2012
+760a712f570f7a618d9385c0cee7e4d0d6a78ed2,Sparse Representation with Kernels,,2012
+76e834df333586fa9906afbdabb9a33bef98a56b,Survey on LBP based texture descriptors for image classification,Expert Syst. Appl.,2012
+76e8fd009eb7e126af8de59953b1fb9d3d841800,Cross-modal domain adaptation for text-based regularization of image semantics in image retrieval systems,Computer Vision and Image Understanding,2014
+76295bf84f26477457bd78250d0d9f6f9bb3de12,Contextual RNN-GANs for Abstract Reasoning Diagram Generation,,2017
+76b9fe32d763e9abd75b427df413706c4170b95c,Gabor feature based robust representation and classification for face recognition with Gabor occlusion dictionary,Pattern Recognition,2013
+76cb86ab21796d81790b1c98c10e4090ea187c7a,Neural network ensemble with probabilistic fusion and its application to gait recognition,Neurocomputing,2009
+763d9eef06b454d722c88ffab8dfb9538a57c06b,Audio-video biometric recognition for non-collaborative access granting,J. Vis. Lang. Comput.,2009
+7644d90efef157e61fe4d773d8a3b0bad5feccec,Linear local tangent space alignment and application to face recognition,Neurocomputing,2007
+7667484b76a893287f3728e5b7604034ff868edf,Recognition of Low-Resolution Faces Using Multiple Still Images and Multiple Cameras,"2008 IEEE Second International Conference on Biometrics: Theory, Applications and Systems",2008
+1c727208d1d9bb1f712a27ec626dae862efc3a6c,Representative Selection with Structured Sparsity,Pattern Recognition,2017
+1ccf5670461638542b32fc7bd86cd47bf2f9d050,Combining Language and Vision with a Multimodal Skip-gram Model,,2015
+1c6be6874e150898d9db984dd546e9e85c85724e,Generalized quotient image,"Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",2004
+1ceb1c0fc4a9673da6c394ef729e02c9fb96a83a,"Visual re-identification across large, distributed camera networks",Image Vision Comput.,2015
+1cc084aaf9ffb015f76eb2406e11745ab847ef3e,Learning to Compose Neural Networks for Question Answering,,2016
+1c65f3b3c70e1ea89114f955624d7adab620a013,Local Polynomial Approximation-Local Binary Pattern (LPA-LBP) based Face Classification,,2011
+82475afbd13452349777c73f68c771b23e15d830,Benchmark Driven Framework for Development of Emotion Sensing Support Systems,2012 European Intelligence and Security Informatics Conference,2012
+82bef8481207de9970c4dc8b1d0e17dced706352,Motion History for Facial Action Detection,,2004
+82821e227683d66543a303f4faddc1376a91a463,Learning Multi-grid Generative ConvNets by Minimal Contrastive Divergence,CoRR,2017
+827f6ddae388c9ee727cb7d91fb276f774ee4cc9,An efficient 3D face recognition approach using local geometrical signatures,Pattern Recognition,2014
+822bc017e4dccbbc453fc142145bd853dfb062dd,Knowledge Discovery of Artistic Influences: A Metric Learning Approach,,2014
+8236dfad541d0caa066ccfb2bb04731e3c74db37,Effectiveness Comparison of Visual and Semantic Features for Noise Image Removal,,2016
+82a7ee86e3a8a0cf5a0447cabe94150e30b01f25,Unsupervised learning of object detectors for everyday scenes,,2011
+4953dc81247efe5a1c28c79fd1d4ab69bbb9f21c,Structured deep hashing with convolutional neural networks for fast person re-identification,Computer Vision and Image Understanding,2018
+49c88aa6a22a41eef4058578ce1470964439b35f,3D laser scan classification using web data and domain adaptation,,2009
+49a5d855f91c6ec6d1724a200d33e92c41f73480,Cascade Adversarial Machine Learning Regularized with a Unified Embedding,CoRR,2017
+49dd4b359f8014e85ed7c106e7848049f852a304,Feature extraction by learning Lorentzian metric tensor and its extensions,Pattern Recognition,2010
+49f200f4651a8832d9005ed9b5cec4200f0a411b,Learning to Detect Multiple Photographic Defects,,2016
+49659fb64b1d47fdd569e41a8a6da6aa76612903,Dogs Can Discriminate Emotional Expressions of Human Faces,Current Biology,2015
+49609ea8946d5c4d8fad96553b10e2b07f4e2485,Learning Human Pose Estimation Features with Convolutional Networks,CoRR,2013
+49812218d3b84ab65ddc52fd2e7e17c688d2dfe9,Feature selection from high-order tensorial data via sparse decomposition,Pattern Recognition Letters,2012
+4949924ea5a5e68e180f71dec743b7b3fe3fb9cf,Video interpolation using optical flow and Laplacian smoothness,Neurocomputing,2017
+4938651efabea4c55acb9485bdb0858a82e9013f,Labeled multi-Bernoulli tracking for industrial mobile platform safety,2017 IEEE International Conference on Mechatronics (ICM),2017
+40d3b108399253862a151f242e4906f280c88418,Human pose search using deep networks,Image Vision Comput.,2017
+40c3f90f0abf842ee6f6009c414fde4f86b82005,Synchronization Detection and Recovery of Steganographic Messages with Adversarial Learning,CoRR,2018
+404776aa18031828f3d5dbceed39907f038a47fe,Sparsely encoded local descriptor for face verification,Neurocomputing,2015
+407e4b401395682b15c431347ec9b0f88ceec04b,Multi-target tracking by learning local-to-global trajectory models,Pattern Recognition,2015
+40638a7a9e0a0499af46053c6efc05ce0b088a28,Which Training Methods for GANs do actually Converge?,,2018
+40cd062438c280c76110e7a3a0b2cf5ef675052c,Distance Maps: a Robust Illumination Preprocessing for Active Appearance Models,,2006
+40a1935753cf91f29ffe25f6c9dde2dc49bf2a3a,Generating a Diverse Set of High-Quality Clusterings,,2011
+40905b69c5d9fe95a25de37877f5045061c61a20,Iterative Closest Normal Point for 3D Face Recognition,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+4042bbb4e74e0934f4afbedbe92dd3e37336b2f4,WND-CHARM: Multi-purpose image classification using compound image transforms,Pattern recognition letters,2008
+4032597bf9727adc3f4e3191ec17b87d9ce0980b,Memories of good deeds past: The reinforcing power of prosocial behavior in children.,Journal of experimental child psychology,2016
+40492c5e4e7b790554c9a990549e01808127f625,A fast method for the implementation of common vector approach,Inf. Sci.,2010
+2e20ed644e7d6e04dd7ab70084f1bf28f93f75e9,DiscLDA: Discriminative Learning for Dimensionality Reduction and Classification,,2008
+2ec56b437ad9391ce5ed85b68561a4e58f21d976,Multi-target tracking on confidence maps: An application to people tracking,Computer Vision and Image Understanding,2013
+2e0eb98d045565978f048d1eebc0f0f2fdf020b5,Head Mounted Pupil Tracking Using Convolutional Neural Network,CoRR,2018
+2eb14814511d93fcd01e81f4f838647eb10af3be,Dense appearance modeling and efficient learning of camera transitions for person re-identification,2012 19th IEEE International Conference on Image Processing,2012
+2e0e056ed5927a4dc6e5c633715beb762628aeb0,Multilinear Supervised Neighborhood Preserving Embedding Analysis of Local Descriptor Tensor,,2012
+2e157e8b57f679c2f1b8e16d6e934f52312f08f6,2D Spherical Spaces for Face Relighting under Harsh Illumination,,2012
+2ee8900bbde5d3c81b7ed4725710ed46cc7e91cd,Graph embedding: a general framework for dimensionality reduction,2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05),2005
+2ea846a2def214b0bac54b671d7690e0d24f1496,Semi-supervised object recognition based on Connected Image Transformations,Expert Syst. Appl.,2013
+2e19371a2d797ab9929b99c80d80f01a1fbf9479,"L2, 1-based regression and prediction accumulation across views for robust facial landmark detection",Image Vision Comput.,2016
+2ed18139791ad8287b085c1539895d587800a373,Top-down influences on visual attention during listening are modulated by observer sex,Vision Research,2012
+2eb610d67ac07136fce4d9633edc28548aab76c8,"The Good, the Bad, and the Ugly Face Challenge Problem",Image Vision Comput.,2012
+2e3d081c8f0e10f138314c4d2c11064a981c1327,A Comprehensive Performance Evaluation of Deformable Face Tracking “In-the-Wild”,International Journal of Computer Vision,2017
+2e713b922c760b7cc8d3e7d12088e9806f2e9a8d,Exploring structure for long-term tracking of multiple objects in sports videos,Computer Vision and Image Understanding,2017
+2e86402b354516d0a8392f75430156d629ca6281,Joint Unsupervised Learning of Deep Representations and Image Clusters,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+2e1b1969ded4d63b69a5ec854350c0f74dc4de36,Comparative evaluation of 3D vs. 2D modality for automatic detection of facial action units,Pattern Recognition,2012
+2bfe6128731674488249316cd2db83fe9045278d,Real-time Human Pose Estimation with Convolutional Neural Networks,Unknown,2018
+2b8fa6187db53c53a01174838e7ff8b77205bedf,Multimode Image Clustering Using Optimal Image Descriptor,,2014
+2b3ceb40dced78a824cf67054959e250aeaa573b,Differentially private subspace clustering,,2015
+2baec98c19804bf19b480a9a0aa814078e28bb3d,Multi-conditional Latent Variable Model for Joint Facial Action Unit Detection,2015 IEEE International Conference on Computer Vision (ICCV),2015
+47d967496693a4842749df307280197fdb8b9c7a,Multiple Target Tracking Using Frame Triplets,,2012
+471908e99d6965f0f6d249c9cd013485dc2b21df,Many Paths to Equilibrium: GANs Do Not Need to Decrease a Divergence At Every Step,CoRR,2017
+472b22afde0446d85f4ea096510a9d2f342ab7c7,Robust classification of human actions from 3D data,2012 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT),2012
+476c216c1a9c74c665568f98203e8eff061d98c8,Efficient sequential feature selection based on adaptive eigenspace model,Neurocomputing,2015
+474c8e5bcbc744ff7045bfbedfdb336ad0ad12e3,Space-time representation of people based on 3D skeletal data: A review,Computer Vision and Image Understanding,2017
+47119c99f5aa1e47bbeb86de0f955e7c500e6a93,On Pairwise Cost for Multi-Object Network Flow Tracking,CoRR,2014
+47488b5e84c60a32f59a253750d06bcb8f6f7f63,Mining Mid-level Visual Patterns with Deep CNN Activations,International Journal of Computer Vision,2016
+47f8b3b3f249830b6e17888df4810f3d189daac1,Translational photometric alignment of single-view image sequences,Computer Vision and Image Understanding,2012
+47aeb3b82f54b5ae8142b4bdda7b614433e69b9a,"Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected ""In-the-Wild""",,2013
+47b38c14df17f60151b0f92a6be3e110d758c522,Au th or ' s pe rs on al co py Multi - view face and eye detection using discriminant features q,,2005
+476c00f8da4ef04477ca7398111841e2eccb6110,Face recognition for mobile phone applications,,2008
+477811ff147f99b21e3c28309abff1304106dbbe,Learning by expansion: Exploiting social media for image classification with few training examples,Neurocomputing,2012
+47e14fdc6685f0b3800f709c32e005068dfc8d47,Secure Face Matching Using Fully Homomorphic Encryption,CoRR,2018
+7880138c9ec1f0f78b7c896a93179e9b38f44a47,Copula Ordinal Regression for Joint Estimation of Facial Action Unit Intensity,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+78a4cabf0afc94da123e299df5b32550cd638939,Multi-view face recognition from single RGBD models of the faces,Computer Vision and Image Understanding,2017
+78a802b2c520cd32cc96f22238e1c05d88dd0068,Pedestrian Detection with Unsupervised Multi-stage Feature Learning,2013 IEEE Conference on Computer Vision and Pattern Recognition,2013
+784705fdf2c412fcf764841b980cfb85ef3944c1,A Complete Variational Tracker,,2014
+7897c8a9361b427f7b07249d21eb9315db189496,Feature selection via simultaneous sparse approximation for person specific face verification,2011 18th IEEE International Conference on Image Processing,2011
+78e076efc67a1d02339c6c42d5da570af374734b,Piecewise affine kernel tracking for non-planar targets,Pattern Recognition,2008
+783e48629dfbb44697b15a3bc0cb2aa3eea490eb,The Forgettable-Watcher Model for Video Question Answering,CoRR,2017
+78a9bebc5a9a3f10017cac4475fbc970f3a3ed35,Opposite effects of noradrenergic arousal on amygdala processing of fearful faces in men and women,NeuroImage,2013
+78a11b7d2d7e1b19d92d2afd51bd3624eca86c3c,Improved Deep Metric Learning with Multi-class N-pair Loss Objective,,2016
+78c823c2b3e6b198eb01dcc553f2e2642d23af15,High performance object detection by collaborative learning of Joint Ranking of Granules features,2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition,2010
+788a7b59ea72e23ef4f86dc9abb4450efefeca41,Robust Statistical Face Frontalization,2015 IEEE International Conference on Computer Vision (ICCV),2015
+789cff184f607384b4a45f361143e5c348ef97e5,Bayesian 3D shape from silhouettes,Digital Signal Processing,2013
+78c8f69b02badf1e295c78069a2272c539d373a9,Non-negative and sparse spectral clustering,Pattern Recognition,2014
+8b793aba8ef62e41dabf0d2bbe3e84b8ead59fe7,Feature distribution modelling techniques for 3D face verification,Pattern Recognition Letters,2010
+8b8728edc536020bc4871dc66b26a191f6658f7c,Robust gender recognition by exploiting facial attributes dependencies,Pattern Recognition Letters,2014
+8b4d8c26e4f76ae55474df2a3753bbbd0d75b8be,Eyetracking of social preference choices reveals normal but faster processing in autism.,Neuropsychologia,2015
+8b8dd053aae04f000e8b9d38f7397d7f1a3b5e99,Joint Sparse Principal Component Analysis,,2016
+8b98aa8be775eb4618779a682ba994b36adba24e,The role of priors in Bayesian models of perception,,2013
+133f42368e63928dc860cce7618f30ee186d328c,Local Zernike Moment Representation for Facial Affect Recognition,,2013
+137f8195eaa8e68f133395a9b9a232bffa7b2fc3,F Face Recognition Using Singular Value Decomposition along with Seven State,,2013
+136b9952f29632ab3fa2bbf43fed277204e13cb5,SUN database: Large-scale scene recognition from abbey to zoo Citation,,2010
+131130f105661a47e0ffb85c2fe21595785f948a,Supplemental Material: Unsupervised Feature Extraction Inspired by Latent Low-Rank Representation,,2014
+1330847470ccad3d47a09c70c76de2913f414695,Self-Updating with Facial Trajectories for Video-to-Video Face Recognition,2014 22nd International Conference on Pattern Recognition,2014
+131e395c94999c55c53afead65d81be61cd349a4,A Functional Regression approach to Facial Landmark Tracking,IEEE transactions on pattern analysis and machine intelligence,2017
+1384a83e557b96883a6bffdb8433517ec52d0bea,CSVideoNet: A Recurrent Convolutional Neural Network for Compressive Sensing Video Reconstruction,CoRR,2016
+13fd0a4d06f30a665fc0f6938cea6572f3b496f7,Regularized Extreme Learning Machine for Large-scale Media Content Analysis,,2015
+1339188247e3b8fd102b37501eb93cbeab71b870,Measuring the performance of face localization systems,Image Vision Comput.,2006
+13ef55829b636d248dca450bf4dbd743ef269131,Object Recognition on Horse Riding Simulator System,,2013
+7f2061fd27be3afac4d020a87ba40fded935a97f,Object categorization in sub-semantic space,Neurocomputing,2014
+7fb6bc6c920ca574677f0d3a40c5c377a095885b,Statistical synthesis of facial expressions for the portrayal of emotion,,2004
+7f15f56d7c0a17d9c81ca21029e7fd133b2b9347,Keybook: Unbias object recognition using keywords,Expert Syst. Appl.,2015
+7f4c9a659aa32482a646b7a7e1e6e68cead381e9,Efficient Tag Mining via Mixture Modeling for Real-Time Search-Based Image Annotation,2012 IEEE International Conference on Multimedia and Expo,2012
+7fc2979d8efa6cf5af0c66ca2556a83d434690d0,"Attribute And-Or Grammar for Joint Parsing of Human Pose, Parts and Attributes.",IEEE transactions on pattern analysis and machine intelligence,2017
+7fd6bb30ad5d7eb3078efbb85f94d2d60e701115,ReHAR: Robust and Efficient Human Activity Recognition,CoRR,2018
+7f3b7acee7851f933402f2a2cf4deb157e996851,A survey on Flickr multimedia research challenges,Eng. Appl. of AI,2016
+7f45650e4c9dd8cbc2bf2dd411fc24ba5631de60,Write a Classifier: Predicting Visual Classifiers from Unstructured Text,IEEE Transactions on Pattern Analysis and Machine Intelligence,2017
+7a9ef21a7f59a47ce53b1dff2dd49a8289bb5098,"Principles of Appearance Acquisition and Representation By Tim Weyrich , Jason Lawrence , Hendrik",,2009
+7ac6e6a4a7be438bc6aa4626d4beac780b875999,An interactive method for the image alignment problem based on partially supervised correspondence,Expert Syst. Appl.,2015
+7aa83aee1e8b2da7ec90c67e63161c24e85f4ba1,Face image classification by pooling raw features,Pattern Recognition,2016
+7a85b3ab0efb6b6fcb034ce13145156ee9d10598,Inter-image outliers and their application to image classification,Pattern Recognition,2010
+7ab930146f4b5946ec59459f8473c700bcc89233,Feature ranking for multi-label classification using Markov networks,Neurocomputing,2016
+7ada60106605bebb66812f85eed16d64d1acb972,Expression-assisted facial action unit recognition under incomplete AU annotation,Pattern Recognition,2017
+7a8c2743db1749c2d9f16f62ee633574c1176e34,Face Photo - Sketch Synthesis and Recognition,,
+14d96bbd718f20ef2115025148283584382286ea,Co-training framework of generative and discriminative trackers with partial occlusion handling,2011 IEEE Workshop on Applications of Computer Vision (WACV),2011
+141487cd6d32f6916bdcb029ac8159eba44e23de,Learning to Hash for Indexing Big Data - A Survey,Proceedings of the IEEE,2016
+14fa27234fa2112014eda23da16af606db7f3637,Unified formulation of linear discriminant analysis methods and optimal parameter selection,Pattern Recognition,2011
+14e949f5754f9e5160e8bfa3f1364dd92c2bb8d6,Multi-subregion based correlation filter bank for robust face recognition,Pattern Recognition,2014
+14d4c019c3eac3c3fa888cb8c184f31457eced02,Robust Subspace Discovery via Relaxed Rank Minimization,Neural computation,2014
+14fdce01c958043140e3af0a7f274517b235adf3,Discriminant analysis via support vectors,Neurocomputing,2010
+141fb4af72c7c33f57687f0233f53effc732c3db,Fast person re-identification based on dissimilarity representations,Pattern Recognition Letters,2012
+141eab5f7e164e4ef40dd7bc19df9c31bd200c5e,Local Linear Regression (LLR) for Pose Invariant Face Recognition,7th International Conference on Automatic Face and Gesture Recognition (FGR06),2006
+143c8b8a45d7176240b1bd7a6e7aab705866ccb2,Fine-grained Visual Categorization using PAIRS: Pose and Appearance Integration for Recognizing Subcategories,CoRR,2018
+1473a233465ea664031d985e10e21de927314c94,Exploiting Spatio-Temporal Structure with Recurrent Winner-Take-All Networks,IEEE transactions on neural networks and learning systems,2016
+14725e03c93088c071f51c68137b5b8fcfe2129e,Laplacian Reconstruction and Refinement for Semantic Segmentation,CoRR,2016
+14934f05299ee02675317cf65de7661970f80421,Deep learning evaluation using deep linguistic processing,CoRR,2017
+8eda0af45fe1fe32a22661aa1d03e7267a8181c8,VideoTicket: Detecting Identity Fraud Attempts via Audiovisual Certi cates and Signatures,,2008
+8e34de64c9cbd5d62b0ce53ab3d99092605d6c94,Social orienting: Reflexive versus voluntary control,Vision Research,2010
+8e0091f7360b7c1cf07dbd88ca13bc83a5b6a6d7,Sparse coding based visual tracking: Review and experimental comparison,Pattern Recognition,2013
+8e416d760feb5f23bc1a6dab98eb1f6e75ab8907,Multimodal Context for Natural Question and Response Generation,,2017
+8ed32c8fad924736ebc6d99c5c319312ba1fa80b,Centralized Gradient Pattern for Face Recognition,IEICE Transactions,2013
+8ec3325c12340d1b8b746b7e9b40616ace1f4d0b,InLiDa: A 3D Lidar Dataset for People Detection and Tracking in Indoor Environments,Unknown,2017
+225fb9181545f8750061c7693661b62d715dc542,Multi-Level ResNets with Stacked SRUs for Action Recognition,CoRR,2017
+22bebedc1a5f3556cb4f577bdbe032299a2865e8,Effective training of convolutional neural networks for face-based gender and age prediction,Pattern Recognition,2017
+22dada4a7ba85625824489375184ba1c3f7f0c8f,EventNet: A Large Scale Structured Concept Library for Complex Event Detection in Video,,2015
+22ffcf96be0e252397962f51401e6cc70ed27fbc,A Generative Framework for Real Time Object Detection,,2004
+22c530788e4f1a665e77621152b2c4267482d9bc,Sparse Multiscale Local Binary Patterns,,2006
+228594425c26d4fa97e8bc2e22329ebaec5d4b63,Which faces to tag: Adding prior constraints into active learning,2009 IEEE 12th International Conference on Computer Vision,2009
+22f656d0f8426c84a33a267977f511f127bfd7f3,From Facial Expression Recognition to Interpersonal Relation Prediction,International Journal of Computer Vision,2017
+22ec256400e53cee35f999244fb9ba6ba11c1d06,Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems,CoRR,2017
+22f7b7e6d4997a489bad794d44c6e02af7a1c506,Scalable brain network construction on white matter fibers,Proceedings of SPIE--the International Society for Optical Engineering,2011
+22ec8af0f0e5469e40592d29e28cfbdf1154c666,Gaze Behavior Consistency among Older and Younger Adults When Looking at Emotional Faces,,2017
+22a7f1aebdb57eecd64be2a1f03aef25f9b0e9a7,Attribute-restricted latent topic model for person re-identification,Pattern Recognition,2012
+228d187a24b1b602105e91dd06ee35a35dbbfc38,Fast computation of low-rank matrix approximations,J. ACM,2007
+25560ab44f2ac093c2ef22daae33b0dc9b828901,"How do children learn to follow gaze, share joint attention, imitate their teachers, and use tools during social interactions?",Neural networks : the official journal of the International Neural Network Society,2010
+25d58e8c9a0c46d44dede888c4548479e8fee994,The 'amygdala theory of autism' revisited: linking structure to behavior.,Neuropsychologia,2006
+25d75339720787e7003f2f103cf38cee8175972a,Optimistic and Pessimistic Neural Networks for Scene and Object Recognition,CoRR,2016
+259e35fa5a57cf16010621639957777ebad72367,Tied Factor Analysis for Face Recognition Across Large Pose Changes,,2006
+25fce91ce1b974865506c14d2e4714d8db2672d1,Towards a Practical Face Recognition System: Robust Alignment and Illumination by Sparse Representation,,2011
+256c91400aa7e92160c889654614f70213947f06,Abrupt motion tracking via nearest neighbor field driven stochastic sampling,Neurocomputing,2015
+2557e2ed0a19cbe2d78e3d4daa5d39e62be5d009,"Detection and Segmentation of Multiple, Partially Occluded Objects by Grouping, Merging, Assigning Part Detection Responses",2008 IEEE Conference on Computer Vision and Pattern Recognition,2008
+2582ba3b7ca215f1ab98c6dbcc0190f754c54059,Robust Recovery of Subspace Structures by Low-Rank Representation,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+256ef946b4cecd8889df8d799d0c9175ae986af9,Human Facial Expression Recognition Using Stepwise Linear Discriminant Analysis and Hidden Conditional Random Fields,IEEE Transactions on Image Processing,2015
+25b367dd1cc584a89e8fd7b34a7d98d212a9f168,A novel statistical generative model dedicated to face recognition,Image Vision Comput.,2010
+25b215169540e9109107a048c9e68159af82b771,Contrast invariant features for human detection in far infrared images,2012 IEEE Intelligent Vehicles Symposium,2012
+2574860616d7ffa653eb002bbaca53686bc71cdd,Culture shapes 7-month-olds’ perceptual strategies in discriminating facial expressions of emotion,Current Biology,2016
+25366ca0d124ca6222c7edf72681943969055024,Human activity prediction by mapping grouplets to recurrent Self-Organizing Map,Neurocomputing,2016
+25728e08b0ee482ee6ced79c74d4735bb5478e29,Thermal spatio-temporal data for stress recognition,EURASIP J. Image and Video Processing,2014
diff --git a/scraper/reports/stats/geocoded_papers.csv b/scraper/reports/stats/geocoded_papers.csv
new file mode 100644
index 00000000..2001c0cf
--- /dev/null
+++ b/scraper/reports/stats/geocoded_papers.csv
@@ -0,0 +1,12699 @@
+611961abc4dfc02b67edd8124abb08c449f5280a,Exploiting Image-trained CNN Architectures for Unconstrained Video Classification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+611961abc4dfc02b67edd8124abb08c449f5280a,Exploiting Image-trained CNN Architectures for Unconstrained Video Classification,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+61efa60e16c06e2820d863bc55f3c60e86f3f6e7,Pose Estimation of Players in Hockey Videos using Convolutional Neural Networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+616b7093cfe6ec679f25d63f62c16e937227258f,Bayesian Multi-object Tracking Using Motion Context from Multiple Objects,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+616e69647b02e69cffa7eeb83cf3e72b8c532653,Spatiotemporal Networks for Video Emotion Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+616e69647b02e69cffa7eeb83cf3e72b8c532653,Spatiotemporal Networks for Video Emotion Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+616e69647b02e69cffa7eeb83cf3e72b8c532653,Spatiotemporal Networks for Video Emotion Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+616e69647b02e69cffa7eeb83cf3e72b8c532653,Spatiotemporal Networks for Video Emotion Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+6115445ca062b8f865f0b447c059813088b9dd49,A Dataset and Exploration of Models for Understanding Video Data through Fill-in-the-Blank Question-Answering,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+61b288d120a44a0d92bae6e940eade40b1f26484,Accurate Object Detection with Joint Classification-Regression Random Forests,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+61dfbdfe718aca026cafa06adc63055bd0fc562e,A Multi-scale Triplet Deep Convolutional Neural Network for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+61dfbdfe718aca026cafa06adc63055bd0fc562e,A Multi-scale Triplet Deep Convolutional Neural Network for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+61dfbdfe718aca026cafa06adc63055bd0fc562e,A Multi-scale Triplet Deep Convolutional Neural Network for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+61e8584f5f37e6f47bdd2be2f93251ed5934cf48,Learning Binary Codes and Binary Weights for Efficient Classification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+61933c42ed53f4fff5653489fb376ee967934701,Spatio-Temporal Person Retrieval via Natural Language Queries,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2,Complex Bingham Distribution for Facial Feature Detection,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+6156eaad00aad74c90cbcfd822fa0c9bd4eb14c2,Complex Bingham Distribution for Facial Feature Detection,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,Greedy Feature Selection for Subspace Clustering Greedy Feature Selection for Subspace Clustering,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,Greedy Feature Selection for Subspace Clustering Greedy Feature Selection for Subspace Clustering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+61ffedd8a70a78332c2bbdc9feba6c3d1fd4f1b8,Greedy Feature Selection for Subspace Clustering Greedy Feature Selection for Subspace Clustering,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+614a547cb976fae955e276feb2ccc9a33f1c7806,Classifier-as-a-Service: Online Query of Cascades and Operating Points,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+61542874efb0b4c125389793d8131f9f99995671,Fair comparison of skin detection approaches on publicly available datasets,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+61f93ed515b3bfac822deed348d9e21d5dffe373,Deep Image Set Hashing,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+61f93ed515b3bfac822deed348d9e21d5dffe373,Deep Image Set Hashing,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+611849b55ef6b164f21e52cefd05300041e72152,A MultiTask Deep Network for Person Re-Identification,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+61be9e8b1f2d642eb0b91a6097fe1c50c37a285c,Face Alignment Based on 3D Face Shape Model and Markov Random Field,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+612075999e82596f3b42a80e6996712cc52880a3,CNNs with cross-correlation matching for face recognition in video surveillance using a single training sample per person,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa,Merging datasets through deep learning,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa,Merging datasets through deep learning,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+6111832ed676ad0789d030577c87d4a539242bd3,CU-Net: Coupled U-Nets,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+6111832ed676ad0789d030577c87d4a539242bd3,CU-Net: Coupled U-Nets,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+61e9e180d3d1d8b09f1cc59bdd9f98c497707eff,Semi-supervised Learning of Facial Attributes in Video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+611c8dcb8cc4c328f0b3be7961adb47689b371c1,The utility of multiple synthesized views in the recognition of unfamiliar faces,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+611c8dcb8cc4c328f0b3be7961adb47689b371c1,The utility of multiple synthesized views in the recognition of unfamiliar faces,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+6193c833ad25ac27abbde1a31c1cabe56ce1515b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+6123e52c1a560c88817d8720e05fbff8565271fb,Gated Siamese Convolutional Neural Network Architecture for Human Re-identification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+614079f1a0d0938f9c30a1585f617fa278816d53,Automatic Detection of ADHD and ASD from Expressive Behaviour in RGBD Data,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+614079f1a0d0938f9c30a1585f617fa278816d53,Automatic Detection of ADHD and ASD from Expressive Behaviour in RGBD Data,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+61acd4e07657094c2720bb60299dba0014ec89a6,Image annotation by kNN-sparse graph-based label propagation over noisily tagged web images,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+61acd4e07657094c2720bb60299dba0014ec89a6,Image annotation by kNN-sparse graph-based label propagation over noisily tagged web images,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+61acd4e07657094c2720bb60299dba0014ec89a6,Image annotation by kNN-sparse graph-based label propagation over noisily tagged web images,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+61acd4e07657094c2720bb60299dba0014ec89a6,Image annotation by kNN-sparse graph-based label propagation over noisily tagged web images,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+6196fa926ae752f927cd550b74259069e18abc71,Unsupervised Holistic Image Generation from Key Local Patches,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+61a7aae4f90ce5214fe899647e58e803b70ba5eb,Emotionally aware automated portrait painting,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+61a7aae4f90ce5214fe899647e58e803b70ba5eb,Emotionally aware automated portrait painting,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+61a7aae4f90ce5214fe899647e58e803b70ba5eb,Emotionally aware automated portrait painting,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+61847a342471d9482129bc2d6e0c79089f331040,Pose search: Retrieving people using their pose,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+6184ddbe780cb934f036b04dd1d28226b6bcbcce,Supervised hashing with kernels,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+6184ddbe780cb934f036b04dd1d28226b6bcbcce,Supervised hashing with kernels,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+6184ddbe780cb934f036b04dd1d28226b6bcbcce,Supervised hashing with kernels,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+0d746111135c2e7f91443869003d05cde3044beb,Partial face detection for continuous authentication,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+0d746111135c2e7f91443869003d05cde3044beb,Partial face detection for continuous authentication,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0d1d7471e8b08a4577b60a63b35fbd88dbf38ec0,A dataset for workflow recognition in industrial scenes,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People With Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0da75b0d341c8f945fae1da6c77b6ec345f47f2a,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People With Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0db43ed25d63d801ce745fe04ca3e8b363bf3147,Kernel Principal Component Analysis and its Applications in Face Recognition and Active Shape Models,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0daf696253a1b42d2c9d23f1008b32c65a9e4c1e,Unsupervised discovery of facial events,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0daf696253a1b42d2c9d23f1008b32c65a9e4c1e,Unsupervised discovery of facial events,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0d7810ba414b746b0d4f73aa94042bb0ea8f324d,Attention-based Few-Shot Person Re-identification Using Meta Learning,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+0d5f898d59ce592ce5cc62643753aee72c4153ce,Backprojection Revisited: Scalable Multi-view Object Detection and Similarity Metrics for Detections,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+0da2a7ee04092645867614db3574cb261f33b6e2,Watching Unlabeled Video Helps Learn New Human Actions from Very Few Labeled Snapshots,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+0de91641f37b0a81a892e4c914b46d05d33fd36e,RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0de91641f37b0a81a892e4c914b46d05d33fd36e,RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+0d6f874b5a0772d1ea88e85a010a01e381d02982,Sparse Kernel Clustering of Massive High-Dimensional Data sets with Large Number of Clusters,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+0db41739f514c4c911c54a4c90ab5f07db3862dc,NCA-Net for Tracking Multiple Objects across Multiple Cameras,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+0df0d1adea39a5bef318b74faa37de7f3e00b452,Appearance-based gaze estimation in the wild,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0d3018c0630fe3f44f96c7cb4c6cabc1517b100a,Fully Convolutional Crowd Counting on Highly Congested Scenes,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+0df90f86da6e92c7a351be6d5f7cf9c1452124d0,Person Search by Multi-Scale Matching,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0d08ab78ce86e053ff3003aef951a5174d56beb8,A Thermal Infrared Video Benchmark for Visual Analysis,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+0d08ab78ce86e053ff3003aef951a5174d56beb8,A Thermal Infrared Video Benchmark for Visual Analysis,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+0db6a58927a671c01089c53248b0e1c36bdc3231,Efficient Point Process Inference for Large-Scale Object Detection,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+0d3bb75852098b25d90f31d2f48fd0cb4944702b,A data-driven approach to cleaning large face datasets,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0d5bb6e4d2394e78c25ef9312ccbb3085d294d66,Spatial-Temporal Synergic Residual Learning for Video Person Re-Identification,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+0d0a348510cb2fbefbb3225ee18fafc1479eaeef,Multi-Language Image Description with Neural Sequence Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+0d0a348510cb2fbefbb3225ee18fafc1479eaeef,Multi-Language Image Description with Neural Sequence Models,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+0d0b880e2b531c45ee8227166a489bf35a528cb9,Structure Preserving Object Tracking,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+0d57d3d2d04fc96d731cac99a7a8ef79050dac75,Not Everybody's Special: Using Neighbors in Referring Expressions with Uncertain Attributes,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0daaa56d724c11e64338996e99a257fa69900236,Recurrent Convolutional Neural Network Regression for Continuous Pain Intensity Estimation in Video,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+0d75052f1d7350fa035a35566555ce7b65d1cd2f,Oracle Performance for Visual Captioning,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+0dc49271dc30794c8d4e7f9da025880fcdc8498b,Cascaded Mutual Modulation for Visual Reasoning,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0d3068b352c3733c9e1cc75e449bf7df1f7b10a4,Context Based Facial Expression Analysis in the Wild,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+0dfa460a35f7cab4705726b6367557b9f7842c65,Modeling Spatial-Temporal Clues in a Hybrid Deep Learning Framework for Video Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+0da5384dbd1646ed722bd9dc7f7387cbcadcb41f,Learning Deep Representation with Large-Scale Attributes,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0d2e29f07275fe05a44b04f16cd3edd0c3f448f0,Development of the Korean Facial Emotion Stimuli: Korea University Facial Expression Collection 2nd Edition,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+0d2e29f07275fe05a44b04f16cd3edd0c3f448f0,Development of the Korean Facial Emotion Stimuli: Korea University Facial Expression Collection 2nd Edition,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+0de0c329e07ffb91d100424259a4a18973d731a9,Functionally Modular and Interpretable Temporal Filtering for Robust Segmentation,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+0d35ab4d59c3731986965dcc935d11074832bc1d,Detecting adversarial example attacks to deep neural networks,University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.72012990,10.40789760,edu,
+0d14261e69a4ad4140ce17c1d1cea76af6546056,Adding Facial Actions into 3D Model Search to Analyse Behaviour in an Unconstrained Environment,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+0dbacb4fd069462841ebb26e1454b4d147cd8e98,Recent advances in discriminant non-negative Matrix Factorization,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+0db36bf08140d53807595b6313201a7339470cfe,Moving vistas: Exploiting motion for describing scenes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0dceca6bb3ac648c611f7097cf52a9b7f59be6f9,An Egocentric Look at Video Photographer Identity,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,EmotiW 2016: video and group-level emotion recognition challenges,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,EmotiW 2016: video and group-level emotion recognition challenges,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,EmotiW 2016: video and group-level emotion recognition challenges,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,EmotiW 2016: video and group-level emotion recognition challenges,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+0d781b943bff6a3b62a79e2c8daf7f4d4d6431ad,EmotiW 2016: video and group-level emotion recognition challenges,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+0d4c620aa869585e31ca7018c813569f3ec1a028,Survey on Vision-Based Path Prediction,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+0d4c620aa869585e31ca7018c813569f3ec1a028,Survey on Vision-Based Path Prediction,Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+0d7f770c3b6857d5ef5dfe5f1b23e69f4a575fd3,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0d7f770c3b6857d5ef5dfe5f1b23e69f4a575fd3,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+0d7f770c3b6857d5ef5dfe5f1b23e69f4a575fd3,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+0dcff2dbf287a6e2937f495e1cd887297863296d,A Novel Method for Estimating Free Space 3D Point-of-Regard Using Pupillary Reflex and Line-of-Sight Convergence Points,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+0dcff2dbf287a6e2937f495e1cd887297863296d,A Novel Method for Estimating Free Space 3D Point-of-Regard Using Pupillary Reflex and Line-of-Sight Convergence Points,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Nebraska - Lincoln,University of Nebraska - Lincoln,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA",40.81747230,-96.70444680,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0d5fa5be4bfe085de8f88dbee1c3b2a6e5ab9ee2,ICNet for Real-Time Semantic Segmentation on High-Resolution Images,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+95f858658c2955924c00e8abc2018c68c3837e83,Harmonious Attention Network for Person Re-Identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+95df7770a5036c87104df23f333aa05e67723cdc,DeepDiary: Automatically Captioning Lifelogging Image Streams,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+9507e5c59c45e68b964fdaf40e39569dcc754be3,A Hybrid Supervised-unsupervised Method on Image Topic Visualization with Convolutional Neural Network and LDA,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,A family of online boosting algorithms,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,A family of online boosting algorithms,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+951368a1a8b3c5cd286726050b8bdf75a80f7c37,A family of online boosting algorithms,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+95616c511e1eada5c4fba090fe739a4554711e22,Lip Contour Localization using Statistical Shape Models,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+956e9b69b3366ed3e1670609b53ba4a7088b8b7e,Semi-supervised dimensionality reduction for image retrieval,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+95a5cb872321addb28d5dc22ffad9586f113738a,MinMax Radon Barcodes for Medical Image Retrieval,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+95a5cb872321addb28d5dc22ffad9586f113738a,MinMax Radon Barcodes for Medical Image Retrieval,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+95a5cb872321addb28d5dc22ffad9586f113738a,MinMax Radon Barcodes for Medical Image Retrieval,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+9534a04de5e99a44df76ea30140f66a62e83fdaa,Iteratively Trained Interactive Segmentation,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+95a0c9f41d0cc6f45853d616d5476b8aee54ff0a,Deep Variational Inference Without Pixel-Wise Reconstruction,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+9528e2e8c20517ab916f803c0371abb4f0ed488b,Shallow and Deep Convolutional Networks for Saliency Prediction,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+9563456bfdd8b18df7f764400c04976771eb8728,Pay Attention to Those Sets! Learning Quantification from Images,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+9563456bfdd8b18df7f764400c04976771eb8728,Pay Attention to Those Sets! Learning Quantification from Images,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+956d6e48598cac9aa6129a87a7f8cdb634917aa1,R 2 SDH : Robust Rotated Supervised Discrete Hashing,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+9507b1a7af5442f8c247451a63400893de34d9f9,Distributed learning of CNNs on heterogeneous CPU/GPU architectures,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+9507b1a7af5442f8c247451a63400893de34d9f9,Distributed learning of CNNs on heterogeneous CPU/GPU architectures,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+95052cd12cfca8b0f8162dc53fe5615fc9c06b22,Don't Just Assume; Look and Answer: Overcoming Priors for Visual Question Answering,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+95f26d1c80217706c00b6b4b605a448032b93b75,New Robust Face Recognition Methods Based on Linear Regression,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+95f12d27c3b4914e0668a268360948bce92f7db3,Interactive Facial Feature Localization,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+954af3d46d023d73c7ee97f2264451080f542084,The Interplay between Emotion and Cognition in Autism Spectrum Disorder: Implications for Developmental Theory,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+95a3af61b398976c13d96baa32481e1bf4a31984,Geometric Enclosing Networks,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+95a3af61b398976c13d96baa32481e1bf4a31984,Geometric Enclosing Networks,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+95a3af61b398976c13d96baa32481e1bf4a31984,Geometric Enclosing Networks,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+956466b5c3036ada2e18f8f7c1b7bf0650779d08,Learning patch-dependent kernel forest for person re-identification,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+955e2a39f51c0b6f967199942d77625009e580f9,Naming Faces on the Web,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+95bc6f4ff5033a091b6ddc6a4290a58c7e6ddc66,FCNs in the Wild: Pixel-level Adversarial and Constraint-based Adaptation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+95bc6f4ff5033a091b6ddc6a4290a58c7e6ddc66,FCNs in the Wild: Pixel-level Adversarial and Constraint-based Adaptation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+9541e80b15ee3e1793f2caafc3502a6fd6947b24,Discriminative Semi-Supervised Dictionary Learning with Entropy Regularization for Pattern Classification,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+9541e80b15ee3e1793f2caafc3502a6fd6947b24,Discriminative Semi-Supervised Dictionary Learning with Entropy Regularization for Pattern Classification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+95cfe8da0d8225c8f6304713719846a7716894cf,Integrating Perception and Cognition for AGI,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+95cfe8da0d8225c8f6304713719846a7716894cf,Integrating Perception and Cognition for AGI,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+95cfe8da0d8225c8f6304713719846a7716894cf,Integrating Perception and Cognition for AGI,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+95debf4c4f88d48a71bae9bfea4032355805aa2f,Bounding the Probability of Error for High Precision Optical Character Recognition,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+95debf4c4f88d48a71bae9bfea4032355805aa2f,Bounding the Probability of Error for High Precision Optical Character Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+95debf4c4f88d48a71bae9bfea4032355805aa2f,Bounding the Probability of Error for High Precision Optical Character Recognition,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+9563d6fafb6ba09c082a57e8d9b31494029a45ac,Building a Large-scale Multimodal Knowledge Base for Visual Question Answering,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+9563d6fafb6ba09c082a57e8d9b31494029a45ac,Building a Large-scale Multimodal Knowledge Base for Visual Question Answering,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+950171acb24bb24a871ba0d02d580c09829de372,Speeding up 2 D-Warping for Pose-Invariant Face Recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+954909051c1d7d5a8ba885f1c09afe04c8aab0fb,IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+955aa3e7317e236e41f05ec2853b64236c252af0,SAM-RCNN: Scale-Aware Multi-Resolution Multi-Channel Pedestrian Detection,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+95896eef75a5fc6c8a7ac2531e76c423d678d2e7,Image Generation from Sketch Constraint Using Contextual GAN,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+95896eef75a5fc6c8a7ac2531e76c423d678d2e7,Image Generation from Sketch Constraint Using Contextual GAN,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+95f4b88d4b0a725d786b34558b60af47f5442230,Reconfigurable Processor for Deep Learning in Autonomous Vehicles,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+95f4b88d4b0a725d786b34558b60af47f5442230,Reconfigurable Processor for Deep Learning in Autonomous Vehicles,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+95f4b88d4b0a725d786b34558b60af47f5442230,Reconfigurable Processor for Deep Learning in Autonomous Vehicles,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+59f83e94a7f52cbb728d434426f6fe85f756259c,An Improved Illumination Normalization Approach based on Wavelet Tranform for Face Recognition from Single Training Image Per Person,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+59be98f54bb4ed7a2984dc6a3c84b52d1caf44eb,A deep-learning approach to facial expression recognition with candid images,CUNY City College,CUNY City College,"CUNY City College, 205 East 42nd Street, New York, NY 10017",45.55466080,5.40652550,edu,
+591a737c158be7b131121d87d9d81b471c400dba,Affect valence inference from facial action unit spectrograms,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+59b095bdbd4b3f4a8240ad011b1d0b318b526d78,Recognizing and Filtering Web Images Based on People's Existence,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+591a4dddbebd3d3ce3d86f9910be40aafcb73a90,Multi-person Tracking by Multicut and Deep Matching,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+5955e31c413a4a08d149de8af843355ac45525bc,Supervised Kernel Locally Principle Component Analysis for Face Recognition,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+59778de271938df6de938deac17fd614f4640ac5,Cleaning up after a face tracker: False positive removal,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+59778de271938df6de938deac17fd614f4640ac5,Cleaning up after a face tracker: False positive removal,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+59a0b3537bf4f764c192812c4b48049f5c8fccc3,Unsupervised Object Discovery from Images by Mining Local Features Using Hashing,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+593234ba1d2e16a887207bf65d6b55bbc7ea2247,Combining Language Sources and Robust Semantic Relatedness for Attribute-Based Knowledge Transfer,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+59f77456b4e2ffe84f99ac33796ee409143dbdac,ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+59e91bd46492391beadce041806297e856af6ee6,Escaping from Collapsing Modes in a Constrained Space,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+59e2037f5079794cb9128c7f0900a568ced14c2a,Clothing and People - A Social Signal Processing Perspective,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+59dac8b460a89e03fa616749a08e6149708dcc3a,A Convergent Solution to Matrix Bidirectional Projection Based Feature Extraction with Application to Face Recognition,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+59444832eb559c0060020b57cddbb899efc4567b,Vision-Based Fallen Person Detection for the Elderly,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+59e9934720baf3c5df3a0e1e988202856e1f83ce,UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+59138911e0526dd1d8c5466b2793b6bb02c35ca9,Describing Objects via Attribute Detection,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+59bece468ed98397d54865715f40af30221aa08c,Deformable part-based robust face detection under occlusion by using face decomposition into face components,University of Zagreb,"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia","Unska ul. 3, 10000, Zagreb, Croatia",45.80112100,15.97084090,edu,
+594cd8ed19aad3ce29d11c74d2c5fbf1a864be0c,Tips and Tricks for Visual Question Answering: Learnings from the 2017 Challenge,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+594cd8ed19aad3ce29d11c74d2c5fbf1a864be0c,Tips and Tricks for Visual Question Answering: Learnings from the 2017 Challenge,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+594cd8ed19aad3ce29d11c74d2c5fbf1a864be0c,Tips and Tricks for Visual Question Answering: Learnings from the 2017 Challenge,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+5996001b797ab2a0f55d5355cb168f25bfe56bbd,Content-Based Video Search over 1 Million Videos with 1 Core in 1 Second,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+59420fd595ae745ad62c26ae55a754b97170b01f,Objects as Attributes for Scene Classification,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+599adc0dcd4ebcc2a868feedd243b5c3c1bd1d0a,How Robust is 3D Human Pose Estimation to Occlusion?,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+59b2edf39e0490892d8865b8252bd7f11e2b2228,Face Recognition Using a Unified 3D Morphable Model,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+59b2edf39e0490892d8865b8252bd7f11e2b2228,Face Recognition Using a Unified 3D Morphable Model,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+591f04d62f44c22d1d82c9e074b066c21b420394,Learning What and Where to Draw,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+591f04d62f44c22d1d82c9e074b066c21b420394,Learning What and Where to Draw,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+59cee3d54dca04207f57b19c3d1a31402a75c3c3,L1-(2D)2PCANet: A Deep Learning Network for Face Recognition,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+5922e26c9eaaee92d1d70eae36275bb226ecdb2e,Boosting Classification Based Similarity Learning by using Standard Distances,Universitat de València,Universitat de València,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.47787665,-0.34257711,edu,
+596e414872debe1441b5e40216febe8788df9b35,Spatiotemporal dynamics of similarity-based neural representations of facial identity.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+59948ee0f334ccdc4e94e5bb6a7a019c764e1815,Deployment of Practical Methods for Counting Bicycle and Pedestrian Use of a Transportation Facility,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+59d10820e0a04d2d1acc43bb18a76c52e9946721,Attention to eyes and mouth in high-functioning children with autism.,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+59d10820e0a04d2d1acc43bb18a76c52e9946721,Attention to eyes and mouth in high-functioning children with autism.,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+59d8fa6fd91cdb72cd0fa74c04016d79ef5a752b,The Menpo Facial Landmark Localisation Challenge: A Step Towards the Solution,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+592b108b241d2d062c3035b6a5ba827180885bb7,Research on Gradient Local Binary Patterns Method for Human Detection,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+592bbab1e073908c75584879bc00911e7246aebf,Exploiting feature Representations Through Similarity Learning and Ranking Aggregation for Person Re-identification,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+592bbab1e073908c75584879bc00911e7246aebf,Exploiting feature Representations Through Similarity Learning and Ranking Aggregation for Person Re-identification,Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.50078110,2.11143663,edu,
+590739cab80ad1219143401be0d929bc2885901b,Sherlock: Modeling Structured Knowledge in Images,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+59e75aad529b8001afc7e194e21668425119b864,Membrane Nonrigid Image Registration,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+592e555ebe4bd2d821230e7074d7e9626af716b0,Open Set Adversarial Examples,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+59d45281707b85a33d6f50c6ac6b148eedd71a25,Rank Minimization across Appearance and Shape for AAM Ensemble Fitting,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+59d88030c99de99d18d16dd5ffab7c0bcf6ac58e,Collaborative Annotation of Semantic Objects in Images with Multi-granularity Supervisions,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+59319c128c8ac3c88b4ab81088efe8ae9c458e07,Effective Computer Model For Recognizing Nationality From Frontal Image,University of the Humanities,The University of the Humanities,"Хүмүүнлэгийн ухааны их сургууль, Ж.Самбуугийн гудамж, Гандан, Улаанбаатар, 975, Монгол улс",47.92189370,106.91955240,edu,
+59a6c9333c941faf2540979dcfcb5d503a49b91e,Sampling Clustering,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+928ccc8c4ae415202d187a229009dd48e57871ba,Winner-Take-All Multiple Category Boosting for Multi-view Face Detection,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+928ccc8c4ae415202d187a229009dd48e57871ba,Winner-Take-All Multiple Category Boosting for Multi-view Face Detection,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+927ec8dde9eb0e3bc5bf0b1a0ae57f9cf745fd9c,Learning Discriminative Features with Multiple Granularities for Person Re-Identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+92b9c19da2c144257617e39bc8ace7293e710914,"Automatic Tracking, Super-Resolution and Recognition of Human Faces from Surveillance Video",Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+92d5fd4ef31cf86a650c7b01c26f0ac93304f98a,Attention-Based Natural Language Person Retrieval,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+9264b390aa00521f9bd01095ba0ba4b42bf84d7e,Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches,University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.89256620,-122.81471592,edu,
+9264b390aa00521f9bd01095ba0ba4b42bf84d7e,Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches,Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.41073580,-4.05295501,edu,
+92166eb883b0505040c2d61c758985e5ec051f83,Learning Deep Feature Representations with Domain Guided Dropout for Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+92be73dffd3320fe7734258961fe5a5f2a43390e,Transferring Face Verification Nets To Pain and Expression Regression,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+92be73dffd3320fe7734258961fe5a5f2a43390e,Transferring Face Verification Nets To Pain and Expression Regression,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+92a3d5ab3eb540a11eddf1b836c1db28640b2746,Face Recognition using 3D Facial Shape and Color Map Information: Comparison and Combination,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+928857c96ef837f43ec87135de69780f6667cc70,Reconstruction Network for Video Captioning,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+92891d260e46adeff84ec5ea0817c0b6a70c253d,Model-Based Gait Enrolment in Real-World Imagery,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+924b14a9e36d0523a267293c6d149bca83e73f3b,Development and Evaluation of a Method Employed to Identify Internal State Utilizing Eye Movement Data,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+924b14a9e36d0523a267293c6d149bca83e73f3b,Development and Evaluation of a Method Employed to Identify Internal State Utilizing Eye Movement Data,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+928b8eb47288a05611c140d02441660277a7ed54,Exploiting Images for Video Recognition with Hierarchical Generative Adversarial Networks,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+926e97d5ce2a6e070f8ec07c5aa7f91d3df90ba0,Facial Expression Recognition Using Enhanced Deep 3D Convolutional Neural Networks,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+92e464a5a67582d5209fa75e3b29de05d82c7c86,Reconstruction for Feature Disentanglement in Pose-invariant Face Recognition,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+922838dd98d599d1d229cc73896d55e7a769aa7c,Learning hierarchical representations for face verification with convolutional deep belief networks,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+922838dd98d599d1d229cc73896d55e7a769aa7c,Learning hierarchical representations for face verification with convolutional deep belief networks,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+922838dd98d599d1d229cc73896d55e7a769aa7c,Learning hierarchical representations for face verification with convolutional deep belief networks,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+92a25b281f1637d125cefefcbfc382f48f456f4c,Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+92a25b281f1637d125cefefcbfc382f48f456f4c,Feature Extraction for Incomplete Data via Low-rank Tucker Decomposition,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+9294739e24e1929794330067b84f7eafd286e1c8,Expression Recognition Using Elastic Graph Matching,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+9294739e24e1929794330067b84f7eafd286e1c8,Expression Recognition Using Elastic Graph Matching,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+925811b9fdd6c0d901bdd63245ead6a781f38bcb,Informed Haar-Like Features Improve Pedestrian Detection,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+92de2ed3805968d6d95da4fa9c44423ef50a6a37,SegStereo: Exploiting Semantic Information for Disparity Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9257c88484247ac19e25c34de2261d34e7a06b41,CoMaL Tracking: Tracking Points at the Object Boundaries,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+92104ae97d3b57489751528a315966c0242a6efb,Input Stage-1 : Regression Stage-2 : Contextual Copy-Pasting qi pi [ ] ... Pixel Representation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+925b634cc26a8e74c2ee8889472a77e7af37874d,Shape Models of the Human Body for Distributed Inference,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+920246280e7e70900762ddfa7c41a79ec4517350,(MP)2T: Multiple People Multiple Parts Tracker,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+92175241bd9b55b53403b9f6ffd3a6c956733490,Migration Cost Aware Task Scheduling,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+92175241bd9b55b53403b9f6ffd3a6c956733490,Migration Cost Aware Task Scheduling,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+925e907458e7621ed4390db20d170e98d155d693,Question Answering under Instructor Guidance with Memory Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+923ec0da8327847910e8dd71e9d801abcbc93b08,Hide-and-Seek: Forcing a Network to be Meticulous for Weakly-Supervised Object and Action Localization,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+92574a72c660a86a7ded738a1350851f416bec03,Scene Parsing via Dense Recurrent Neural Networks with Attentional Selection,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+92f2c4f5583f0b58799f4834bc2808ee785e27f1,Kernel-Based Clustering of Big Data,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,Facial Age Estimation by Learning from Label Distributions,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,Facial Age Estimation by Learning from Label Distributions,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+0c741fa0966ba3ee4fc326e919bf2f9456d0cd74,Facial Age Estimation by Learning from Label Distributions,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0c435e7f49f3e1534af0829b7461deb891cf540a,Capturing Global Semantic Relationships for Facial Action Unit Recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0c435e7f49f3e1534af0829b7461deb891cf540a,Capturing Global Semantic Relationships for Facial Action Unit Recognition,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+0c435e7f49f3e1534af0829b7461deb891cf540a,Capturing Global Semantic Relationships for Facial Action Unit Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+0cd8d70a2476d91c4fd6699de0e106c94aa2d9ef,Visual Reasoning with Multi-hop Feature Modulation,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+0c56f414251d6c9f43623ee683dc6cae3be1045a,Towards Understanding Action Recognition,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+0c56f414251d6c9f43623ee683dc6cae3be1045a,Towards Understanding Action Recognition,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,Pareto discriminant analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,Pareto discriminant analysis,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,Pareto discriminant analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c30f6303dc1ff6d05c7cee4f8952b74b9533928,Pareto discriminant analysis,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+0cb5079c39933bd8897fde7edecf156ff57830d7,Runway to Realway: Visual Analysis of Fashion,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+0ccc535d12ad2142a8310d957cc468bbe4c63647,Better Exploiting OS-CNNs for Better Event Recognition in Images,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+0cd87a66028f9d3c519a9459a213905b42b4c3b0,Cross-Domain Forensic Shoeprint Matching,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+0cd87a66028f9d3c519a9459a213905b42b4c3b0,Cross-Domain Forensic Shoeprint Matching,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0cd87a66028f9d3c519a9459a213905b42b4c3b0,Cross-Domain Forensic Shoeprint Matching,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c79a39a870d9b56dc00d5252d2a1bfeb4c295f1,Face Recognition in Videos by Label Propagation,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+0cdb49142f742f5edb293eb9261f8243aee36e12,Combined Learning of Salient Local Descriptors and Distance Metrics for Image Set Face Verification,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+0cdb49142f742f5edb293eb9261f8243aee36e12,Combined Learning of Salient Local Descriptors and Distance Metrics for Image Set Face Verification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+0c6eff59e210c3af9865207302199412f3f91914,Parsing human motion with stretchable models,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+0cfab1c2839ddacc19bc9af2e821d5c5fd4f28c1,3D Pictorial Structures for Multiple Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0c2de1b4fe7c5da8adf6351533a9c39503ad7a4c,Deeply-Fused Nets,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+0c2de1b4fe7c5da8adf6351533a9c39503ad7a4c,Deeply-Fused Nets,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+0c8ac71e174a941ea7e14e7b503a12ae7eeca9db,Visual Semantic Complex Network for Web Images,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0c8ac71e174a941ea7e14e7b503a12ae7eeca9db,Visual Semantic Complex Network for Web Images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0cf2eecf20cfbcb7f153713479e3206670ea0e9c,Privacy-Protective-GAN for Face De-identification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+0cba3c4ec4c1dd85b637a078b9c05244196009e9,Automatic Expansion of a Food Image Dataset Leveraging Existing Categories with Domain Adaptation,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+0cd736baf31dceea1cc39ac72e00b65587f5fb9e,Learning Hash Functions Using Column Generation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+0c2d0734a2c9d3e4d8a585b3f2ad4f642bf06dea,Recurrent Generative Adversarial Networks for Proximal Learning and Automated Compressive Image Recovery,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0cf4105ec11fb5846e5ea1b9dea11f8ba16e391f,Strokelets: A Learned Multi-scale Representation for Scene Text Recognition,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+0c1d85a197a1f5b7376652a485523e616a406273,Joint Registration and Representation Learning for Unconstrained Face Identification,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+0c1d85a197a1f5b7376652a485523e616a406273,Joint Registration and Representation Learning for Unconstrained Face Identification,Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.44690250,54.39425630,edu,
+0ca2f48fad7f69fb415ecbb99945250cbf8f011c,Outliers Cleaning in Dynamic Systems,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+0ca2f48fad7f69fb415ecbb99945250cbf8f011c,Outliers Cleaning in Dynamic Systems,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+0ca66283f4fb7dbc682f789fcf6d6732006befd5,Active Dictionary Learning for Image Representation,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+0c07b1faeb4c63c603bcd124640c6ffe07df801c,Unsupervised Selection of Negative Examples for Grounded Language Learning,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0c9f715835bee028a358701cf5a73ecbc3a7e242,Semi-supervised Facial Expression Recognition Algorithm on The Condition of Multi-pose,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+0c1531a2fa4d6a270b9a09cde86bb0669f5975ff,Processing of Crawled Urban Imagery for Building Use Classification,University of Stuttgart,University of Stuttgart,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland",48.90953380,9.18318920,edu,
+0c7f27d23a162d4f3896325d147f412c40160b52,Models and Algorithms for Vision through the Atmosphere,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0ca35af582b95fbab3829f98308d104359c3b632,Recognizing objects by piecing together the Segmentation Puzzle,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+0c75db5e3c27bcb0d07311a950d0d25cb57c731e,Neural conditional ordinal random fields for agreement level estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0c75db5e3c27bcb0d07311a950d0d25cb57c731e,Neural conditional ordinal random fields for agreement level estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0cebf440622050f8149d14b803a969917348844b,Learning to Search Efficiently in High Dimensions,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+0c20fd90d867fe1be2459223a3cb1a69fa3d44bf,A Monte Carlo Strategy to Integrate Detection and Model-Based Face Analysis,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+0c553e57cb6fe7bdf3212fbf86bcc869958db27f,Straight until proven gay: A systematic bias toward straight categorizations in sexual orientation judgments.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+0c553e57cb6fe7bdf3212fbf86bcc869958db27f,Straight until proven gay: A systematic bias toward straight categorizations in sexual orientation judgments.,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+0c553e57cb6fe7bdf3212fbf86bcc869958db27f,Straight until proven gay: A systematic bias toward straight categorizations in sexual orientation judgments.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+0c2875bb47db3698dbbb3304aca47066978897a4,Recurrent Models for Situation Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c6fa98b7b99d807df7c027e8e97751f1bbb9140,Data programming with DDLite: putting humans in a different part of the loop,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0c663a7224a60488502a937ed3bc2b869260b6c0,Activity Auto-Completion: Predicting Human Activities from Partial Videos,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0c663a7224a60488502a937ed3bc2b869260b6c0,Activity Auto-Completion: Predicting Human Activities from Partial Videos,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0c663a7224a60488502a937ed3bc2b869260b6c0,Activity Auto-Completion: Predicting Human Activities from Partial Videos,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0cbc4dcf2aa76191bbf641358d6cecf38f644325,Visage: A Face Interpretation Engine for Smartphone Applications,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+0c7aac75ccd17d696cff2e1ce95db0493f5c18a2,VideoMatch: Matching Based Video Object Segmentation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0cbbbfac2fe925479c6b34712e056f840a10fa4d,Quality Evaluation Methods for Crowdsourced Image Segmentation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0cbbbfac2fe925479c6b34712e056f840a10fa4d,Quality Evaluation Methods for Crowdsourced Image Segmentation,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+0cbbbfac2fe925479c6b34712e056f840a10fa4d,Quality Evaluation Methods for Crowdsourced Image Segmentation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0ce8a45a77e797e9d52604c29f4c1e227f604080,Zernike Moment-based Feature Extraction for Facial Recognition of Identical Twins,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+0ce3a786aed896d128f5efdf78733cc675970854,Learning the Face Prior for Bayesian Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0c7844b63a05ec086fba231ad9eb3114ffb4139e,Automated Facial Trait Judgment and Election Outcome Prediction: Social Dimensions of Face,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+0c7844b63a05ec086fba231ad9eb3114ffb4139e,Automated Facial Trait Judgment and Election Outcome Prediction: Social Dimensions of Face,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+0c7844b63a05ec086fba231ad9eb3114ffb4139e,Automated Facial Trait Judgment and Election Outcome Prediction: Social Dimensions of Face,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+0c882588ed7436f7122af2b324c598adbede49c1,Random mesh projectors for inverse problems,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c882588ed7436f7122af2b324c598adbede49c1,Random mesh projectors for inverse problems,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c882588ed7436f7122af2b324c598adbede49c1,Random mesh projectors for inverse problems,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+0c882588ed7436f7122af2b324c598adbede49c1,Random mesh projectors for inverse problems,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c8769bf0501fdd7fbc94ca81601de4a40679295,On Duality Of Multiple Target Tracking and Segmentation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0cc5804c5f113c60ee5894f25ab7078364eef986,Epitomize Your Photos,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+0c5f74c8e323861d18d6090d8cce05dde22660d0,Enhancing Person Re-identification by Robust Structural Metric Learning,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+0c59071ddd33849bd431165bc2d21bbe165a81e0,Person Recognition in Personal Photo Collections,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58,Memory-Augmented Attribute Manipulation Networks for Interactive Fashion Search,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+0c12cbb9b9740dfa2816b8e5cde69c2f5a715c58,Memory-Augmented Attribute Manipulation Networks for Interactive Fashion Search,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0ce08f1cc6684495d12c2da157a056c7b88ffcd9,Multi-Modality Feature Transform: An Interactive Image Segmentation Approach,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+0cd7ff53729dafe9175009d7f04570dbbf41a608,Modelling the Effect of View Angle Variation on Appearance-Based Gait Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0c4d99f49654fe04a8e229a20a6e0e0f0d81337b,Multi-Scale Human Pose Tracking in 2D Monocular Images,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+0c5b03a6083950aacd9aee2d276a232e6ce3213c,The Main Memory System: Challenges and Opportunities,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c5b03a6083950aacd9aee2d276a232e6ce3213c,The Main Memory System: Challenges and Opportunities,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c5b03a6083950aacd9aee2d276a232e6ce3213c,The Main Memory System: Challenges and Opportunities,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c5b03a6083950aacd9aee2d276a232e6ce3213c,The Main Memory System: Challenges and Opportunities,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c124734ce6015bd4c506b101038aebc1412da49,Human Face Reconstruction Using Bayesian Deformable Models,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+0c678d593cec6ff51c18bde3847fffbf58a66282,"TOTAL BREGMAN DIVERGENCE, A ROBUST DIVERGENCE MEASURE, AND ITS APPLICATIONS By MEIZHU LIU A DISSERTATION PRESENTED TO THE GRADUATE SCHOOL OF THE UNIVERSITY OF FLORIDA IN PARTIAL FULFILLMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY",University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+0c6e29d82a5a080dc1db9eeabbd7d1529e78a3dc,Learning Bayesian Network Classifiers for Facial Expression Recognition using both Labeled and Unlabeled Data,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c940ccba1bd9380a0ac723d791777fc1746a060,Scheduling for HPC Systems with Process Variation Heterogeneity,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c61d6a33b9d3c190b4adc15658cfe969dedfbdf,Self-supervised Learning of Motion Capture,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0c61d6a33b9d3c190b4adc15658cfe969dedfbdf,Self-supervised Learning of Motion Capture,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+0c6602439185ad8268ebcd99d1ac4afd66fb4c7b,"Learning Robust, Transferable Sentence Representations for Text Classification","University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+0c0dc5c307483642e15283d0d52a4159483d3df6,Multicamera Video Summarization from Optimal Reconstruction,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+0cbc3221a07db517c30b9d6605cbe9d103e19955,How Smart Does Your Profile Image Look?: Estimating Intelligence from Social Network Profile Images,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+0cbc3221a07db517c30b9d6605cbe9d103e19955,How Smart Does Your Profile Image Look?: Estimating Intelligence from Social Network Profile Images,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+0c05f60998628884a9ac60116453f1a91bcd9dda,Optimizing Open-Ended Crowdsourcing: The Next Frontier in Crowdsourced Data Management,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0c0f353dbac84311ea4f1485d4a8ac0b0459be8c,Nexus : A GPU Cluster for Accelerating Neural Networks for Video Analysis,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+66284b8894bab0165c4210cd2df749f0b015c88e,Semi-Supervised Ranking for Re-identification with Few Labeled Image Pairs,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+663cca096b98c8f0444608b188e464028ee34368,CASENet: Deep Category-Aware Semantic Edge Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+66ef0364f2e865c35ce5003e129ba6fc57a2afa4,Semantic Segmentation Using Multiple Graphs with Block-Diagonal Constraints,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+66aad5b42b7dda077a492e5b2c7837a2a808c2fa,A Novel PCA-Based Bayes Classifier and Face Analysis,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,Evaluation of Gabor-Wavelet-Based Facial Action Unit Recognition in Image Sequences of Increasing Complexity,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,Evaluation of Gabor-Wavelet-Based Facial Action Unit Recognition in Image Sequences of Increasing Complexity,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+66b9d954dd8204c3a970d86d91dd4ea0eb12db47,Evaluation of Gabor-Wavelet-Based Facial Action Unit Recognition in Image Sequences of Increasing Complexity,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+66dcd855a6772d2731b45cfdd75f084327b055c2,Quality Classified Image Analysis with Application to Face Detection and Recognition,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+666939690c564641b864eed0d60a410b31e49f80,What Visual Attributes Characterize an Object Class?,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+66330846a03dcc10f36b6db9adf3b4d32e7a3127,Polylingual Multimodal Learning,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+66cd8e7338b20999786343651658520ca9544006,Pedestrian detection aided by deep learning semantic tasks,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+666300af8ffb8c903223f32f1fcc5c4674e2430b,Changing Fashion Cultures,Tokyo Denki University,Tokyo Denki University,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+66029f1be1a5cee9a4e3e24ed8fcb65d5d293720,HWANG AND GRAUMAN: ACCOUNTING FOR IMPORTANCE IN IMAGE RETRIEVAL 1 Accounting for the Relative Importance of Objects in Image Retrieval,University of Texas,The University of Texas,"The University of Texas at Tyler, 3900, University Boulevard, Tyler, Smith County, Texas, 75799, USA",32.31630780,-95.25369944,edu,
+669b9fd79eb39f712527ee616e35e50eea7fd2fa,Human Pose Estimation Using Body Parts Dependent Joint Regressors,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+6691dfa1a83a04fdc0177d8d70e3df79f606b10f,Illumination Modeling and Normalization for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+66fb1e7a65abbfa171a3fd92dc67006490df7450,Design of Continuous Authentication using Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+665265289471d08a4b472329eb42965b51ac485a,Fairness GAN,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+6625349c4705d25844ab6eb019e5962b012e9256,Identifying Emotions Using Topographic Conditioning Maps,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+665f0763ad7f320cb59fcb6a745906d3d6799d99,Deep Multitask Gaze Estimation with a Constrained Landmark-Gaze Model,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+66652367a369d18e1845dd14220dc94a9748c9fd,Learning Spatial Regularization with Image-Level Supervisions for Multi-label Image Classification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+66652367a369d18e1845dd14220dc94a9748c9fd,Learning Spatial Regularization with Image-Level Supervisions for Multi-label Image Classification,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+66652367a369d18e1845dd14220dc94a9748c9fd,Learning Spatial Regularization with Image-Level Supervisions for Multi-label Image Classification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+66a4a03ea58792e4be90b20c60ddc65de736537e,Learning auxiliary dictionaries for undersampled face recognition,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+669ddd0b5f742876fe84cfb3dd7ff30bcaab52be,Learning image similarities via Probabilistic Feature Matching,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+66cc90ea586c914e6a3b50fe703f4379d530fad7,Automatic integration of social information in emotion recognition.,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+668d39ceb83d06c61ab58cb689a1b744ff520669,Fast Video Classification via Adaptive Cascading of Deep Models,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+66c84abd01fdd84d9cd241dea8e487580f4f8922,Bridging Languages through Images with Deep Partial Canonical Correlation Analysis,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+66c84abd01fdd84d9cd241dea8e487580f4f8922,Bridging Languages through Images with Deep Partial Canonical Correlation Analysis,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+66986f4359c3507d671bad021d6fb2d6fa6aa2c0,Appearance Sharing for Collective Human Pose Estimation,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,"Using Co-Captured Face, Gaze and Verbal Reactions to Images of Using Co-Captured Face, Gaze and Verbal Reactions to Images of",Muhlenberg College,Muhlenberg College,"Muhlenberg College, 2400, West Chew Street, Rose Garden, Allentown, Lehigh County, Pennsylvania, 18104, USA",40.59676370,-75.51240620,edu,
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,"Using Co-Captured Face, Gaze and Verbal Reactions to Images of Using Co-Captured Face, Gaze and Verbal Reactions to Images of",Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+66f02fbcad13c6ee5b421be2fc72485aaaf6fcb5,"Using Co-Captured Face, Gaze and Verbal Reactions to Images of Using Co-Captured Face, Gaze and Verbal Reactions to Images of",Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+6681ec516067747a4576f737f10f8d9bbca2d8d1,Perturbative Neural Networks ( Supplementary Material ),Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6681ec516067747a4576f737f10f8d9bbca2d8d1,Perturbative Neural Networks ( Supplementary Material ),Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6681ec516067747a4576f737f10f8d9bbca2d8d1,Perturbative Neural Networks ( Supplementary Material ),Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6604fd47f92ce66dd0c669dd66b347b80e17ebc9,Simultaneous Cascaded Regression,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+66e9fb4c2860eb4a15f713096020962553696e12,A New Urban Objects Detection Framework Using Weakly Annotated Sets,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+666aa18ed45a0a92959d91d0f9a4c928aceb1450,Material : Modelling and unsupervised learning of symmetric deformable object categories,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+666aa18ed45a0a92959d91d0f9a4c928aceb1450,Material : Modelling and unsupervised learning of symmetric deformable object categories,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+66080593dc4ea2347d4ff8c10e4b4dedf0d16ad2,Improving biometric identification through quality-based face and fingerprint biometric fusion,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+663981cfc5fc843ec2682b77ae427ac351bc2180,Detecting Repeating Objects Using Patch Correlation Analysis,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+3e69ed088f588f6ecb30969bc6e4dbfacb35133e,Improving Performance of Texture Based Face Recognition Systems by Segmenting Face Region,Manonmaniam Sundaranar University,Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100445,edu,
+3e56cbce67d312af2b3a7d0981e9cb33d2236bea,Boosting attribute recognition with latent topics by matrix factorization,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3e56cbce67d312af2b3a7d0981e9cb33d2236bea,Boosting attribute recognition with latent topics by matrix factorization,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3e56cbce67d312af2b3a7d0981e9cb33d2236bea,Boosting attribute recognition with latent topics by matrix factorization,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3e56cbce67d312af2b3a7d0981e9cb33d2236bea,Boosting attribute recognition with latent topics by matrix factorization,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3e0a1884448bfd7f416c6a45dfcdfc9f2e617268,Understanding and Controlling User Linkability in Decentralized Learning,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3efb04a9847284680b48214855eb0a962efa5c7b,De-identification for Privacy Protection in Surveillance Videos,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+3e211a93388dcf29dda4cd6d3d515042f2cffee7,Breaking the Chain: Liberation from the Temporal Markov Assumption for Tracking Human Poses,Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+3e211a93388dcf29dda4cd6d3d515042f2cffee7,Breaking the Chain: Liberation from the Temporal Markov Assumption for Tracking Human Poses,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3ebce6710135d1f9b652815e59323858a7c60025,Component-based Face Detection,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+3e2ec9cea926bd02072aa41bd81eb4c593e205e9,Adversarial Information Factorization,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3e2ec9cea926bd02072aa41bd81eb4c593e205e9,Adversarial Information Factorization,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3e2ec9cea926bd02072aa41bd81eb4c593e205e9,Adversarial Information Factorization,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3eb5f1d466228c1345d92f906ab31ab93c160837,Single-Pedestrian Detection Aided by Multi-pedestrian Detection,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+3eb5f1d466228c1345d92f906ab31ab93c160837,Single-Pedestrian Detection Aided by Multi-pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3ef997bf6306d157c062f0744ea0d8ce8f390e2a,Visual Choice of Plausible Alternatives: An Evaluation of Image-based Commonsense Causal Reasoning,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+3ef997bf6306d157c062f0744ea0d8ce8f390e2a,Visual Choice of Plausible Alternatives: An Evaluation of Image-based Commonsense Causal Reasoning,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+3ef997bf6306d157c062f0744ea0d8ce8f390e2a,Visual Choice of Plausible Alternatives: An Evaluation of Image-based Commonsense Causal Reasoning,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+3e2e9ac490726c37a0797792dd2aa9d20404b9b0,Learning Invariant Riemannian Geometric Representations Using Deep Nets,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+3e2e9ac490726c37a0797792dd2aa9d20404b9b0,Learning Invariant Riemannian Geometric Representations Using Deep Nets,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+3ec653164169c1a1b5c12ece2130326606a24e6c,A Phase Discrepancy Analysis of Object Motion,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3ec653164169c1a1b5c12ece2130326606a24e6c,A Phase Discrepancy Analysis of Object Motion,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3ec653164169c1a1b5c12ece2130326606a24e6c,A Phase Discrepancy Analysis of Object Motion,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+3eca8ed3164324698d0171e62dec24e8abda9e26,Human emotion recognition using real 3D visual features from Gabor library,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+3e4bd67a10e291ad6d5614a6e97efb69b2dd051a,Learning Spatio-temporal Features with Partial Expression Sequences for on-the-Fly Prediction,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+3e42e336d67dad79ab6355c02f1f045f8a71a18f,Autism spectrum traits in normal individuals: a preliminary VBM analysis,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+3e42e336d67dad79ab6355c02f1f045f8a71a18f,Autism spectrum traits in normal individuals: a preliminary VBM analysis,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+3e98719cc0b570c7a0c7c903efb010075dd267e7,Real Time Person Tracking and Behavior Interpretation in Multi Camera Scenarios Applying Homography and Coupled HMMs,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+3ed9730e5ec8716e8cdf55f207ef973a9c854574,Visual Compiler: Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3ed81d552fb33fe64c766407570f3d8b062fb292,Data-Driven Edge Computing Resource Scheduling for Protest Crowds Incident Management,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+3ed81d552fb33fe64c766407570f3d8b062fb292,Data-Driven Edge Computing Resource Scheduling for Protest Crowds Incident Management,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+3e90a70d768415e28fbf0dd56e53f8933784c416,Highly Efficient Regression for Scalable Person Re-Identification,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+3eb298bfcc33f6e40bfd2e8788b13b256d2c0391,Towards Unified Human Parsing and Pose Estimation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3e421fd1775413bf89abd8e39a35e5e29d1a4dab,Addressing bias in machine learning algorithms: A pilot study on emotion recognition for intelligent systems,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+3e421fd1775413bf89abd8e39a35e5e29d1a4dab,Addressing bias in machine learning algorithms: A pilot study on emotion recognition for intelligent systems,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3e58fbb8cb96880e018ca18a60e2d86e3cb0c10a,Generative Partition Networks for Multi-Person Pose Estimation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3e58fbb8cb96880e018ca18a60e2d86e3cb0c10a,Generative Partition Networks for Multi-Person Pose Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3ec77809aaa7bd30858a4274e3c28a2a0259b30c,Latent trees for estimating intensity of Facial Action Units,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3ec77809aaa7bd30858a4274e3c28a2a0259b30c,Latent trees for estimating intensity of Facial Action Units,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+3ec77809aaa7bd30858a4274e3c28a2a0259b30c,Latent trees for estimating intensity of Facial Action Units,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3ed2ebfd783298a9a2e412529ffabdeb98bd552d,Modelling of Orthogonal Craniofacial Profiles,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+3e7ffb5658cf99968633ede18785c5cfdd6aa9eb,Semi-Supervised Deep Learning for Monocular Depth Map Prediction,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+3e03435792619833d4e2aa14344761b003c10c67,A Simultaneously Calibration Approach for Installation and Attitude Errors of an INS/GPS/LDS Target Tracker,Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.67684917,edu,
+3e03435792619833d4e2aa14344761b003c10c67,A Simultaneously Calibration Approach for Installation and Attitude Errors of an INS/GPS/LDS Target Tracker,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+3eda9f9c29cec4f44e210d40b54810de525d75fb,"Image Annotation Incorporating Low-Rankness, Tag and Visual Correlation and Inhomogeneous Errors",Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+3e51d634faacf58e7903750f17111d0d172a0bf1,A compressible template protection scheme for face recognition based on sparse representation,Tokyo Metropolitan University,Tokyo Metropolitan University,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本",35.62009250,139.38296706,edu,
+3ebbacf0bfe95781e70ee37085bb2addf30a40a7,Scalable Vision System for Mouse Homecage Ethology,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+3ebbacf0bfe95781e70ee37085bb2addf30a40a7,Scalable Vision System for Mouse Homecage Ethology,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+3e40991ab1daa2a4906eb85a5d6a01a958b6e674,LipNet: End-to-End Sentence-level Lipreading,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3eadf02a7ac57a2a0cc794180bf0b46b45a9e0a2,Discriminant Mutual Subspace Learning for Indoor and Outdoor Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3eadf02a7ac57a2a0cc794180bf0b46b45a9e0a2,Discriminant Mutual Subspace Learning for Indoor and Outdoor Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3eadf02a7ac57a2a0cc794180bf0b46b45a9e0a2,Discriminant Mutual Subspace Learning for Indoor and Outdoor Face Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3ed60f021fe469f2423d04917e69864251d23e08,Metadata of the chapter that will be visualized in SpringerLink,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3e78402eab72d87eda1f0b44ca7ff54ba0b6b914,Hierarchical object groups for scene classification,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+3e3a87eb24628ab075a3d2bde3abfd185591aa4c,Effects of sparseness and randomness of pairwise distance matrix on t-SNE results,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+3ea0bc7cd58d4214d4ed20e8acfa76054f73654d,Recycled linear classifiers for multiclass classification,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+3e76496aa3840bca2974d6d087bfa4267a390768,Dictionary Learning in Optimal Metric Subspace,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+3e76496aa3840bca2974d6d087bfa4267a390768,Dictionary Learning in Optimal Metric Subspace,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+3e682d368422ff31632760611039372a07eeabc6,Articulated Multi-person Tracking in the Wild,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3e207c05f438a8cef7dd30b62d9e2c997ddc0d3f,Objects as context for detecting their semantic parts,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+5017d635ba8dd630fc0375bfa71cf2a3397fae8d,Multiset Feature Learning for Highly Imbalanced Data Classification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+5017d635ba8dd630fc0375bfa71cf2a3397fae8d,Multiset Feature Learning for Highly Imbalanced Data Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5017d635ba8dd630fc0375bfa71cf2a3397fae8d,Multiset Feature Learning for Highly Imbalanced Data Classification,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+5017d635ba8dd630fc0375bfa71cf2a3397fae8d,Multiset Feature Learning for Highly Imbalanced Data Classification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+5040f7f261872a30eec88788f98326395a44db03,Generalised Scalable Robust Principal Component Analysis,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+505ea4493e4b733352c921401a96d92b4e6d4448,Coupled Discriminative Feature Learning for Heterogeneous Face Recognition,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+505ea4493e4b733352c921401a96d92b4e6d4448,Coupled Discriminative Feature Learning for Heterogeneous Face Recognition,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+500fbe18afd44312738cab91b4689c12b4e0eeee,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+500fbe18afd44312738cab91b4689c12b4e0eeee,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+500fbe18afd44312738cab91b4689c12b4e0eeee,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+500fbe18afd44312738cab91b4689c12b4e0eeee,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+500fbe18afd44312738cab91b4689c12b4e0eeee,ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition,University of Venezia,University of Venezia,"University, Fondamenta Toffetti, Dorsoduro, Venezia-Murano-Burano, Venezia, VE, VEN, 30123, Italia",45.43127420,12.32653770,edu,
+50c4ece0f07f2fbc3cf2fef98df24aeea0145899,Pedestrian Counting with Occlusion Handling Using Stereo Thermal Cameras,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+506c2fbfa9d16037d50d650547ad3366bb1e1cde,Convolutional Channel Features : Tailoring CNN to Diverse Tasks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+50137d663802224e683951c48970496b38b02141,DETRAC: A New Benchmark and Protocol for Multi-Object Tracking,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+504028218290d68859f45ec686f435f473aa326c,Multi-Fiber Networks for Video Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+50e5aac9037380108099c09ac53f8cc3f1b31bf3,Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+50e5aac9037380108099c09ac53f8cc3f1b31bf3,Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+50e5aac9037380108099c09ac53f8cc3f1b31bf3,Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+50e5aac9037380108099c09ac53f8cc3f1b31bf3,Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+50a8dc4c1d40967a95b684eb421edd03415fb7ab,Nothing Else Matters: Model-Agnostic Explanations By Identifying Prediction Invariance,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+50a8dc4c1d40967a95b684eb421edd03415fb7ab,Nothing Else Matters: Model-Agnostic Explanations By Identifying Prediction Invariance,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+50a8dc4c1d40967a95b684eb421edd03415fb7ab,Nothing Else Matters: Model-Agnostic Explanations By Identifying Prediction Invariance,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+509be79bd94d56ef7cd1af54e2be88983805bbe9,Thread Progress Equalization: Dynamically Adaptive Power-Constrained Performance Optimization of Multi-Threaded Applications,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+509be79bd94d56ef7cd1af54e2be88983805bbe9,Thread Progress Equalization: Dynamically Adaptive Power-Constrained Performance Optimization of Multi-Threaded Applications,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+509be79bd94d56ef7cd1af54e2be88983805bbe9,Thread Progress Equalization: Dynamically Adaptive Power-Constrained Performance Optimization of Multi-Threaded Applications,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+50399793a5334654dedcea635cad291dda77de96,Humanising GrabCut: Learning to segment humans using the Kinect,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+508d5e0ef6cbce1997d968c5d4534a7baba84948,Multi-view Pictorial Structures for 3D Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+50b58becaf67e92a6d9633e0eea7d352157377c3,Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+50d1021392b6b226cb6a022b69b55396dfec99fa,Leveraging single for multi-target tracking using a novel trajectory overlap affinity measure,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+50fc40ec6166dc33c6e59ef5dd75230651076f44,Efficient feature selection for linear discriminant analysis and its application to face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+50131e57e14eafd385d94fb31e63f86a5bab9b9f,Detection and Segmentation of Brain Metastases with Deep Convolutional Networks,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+50ed931266a22bc166afef38f4b217fe9b4d5d74,Efficient eye typing with 9-direction gaze estimation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+504c1cc2ddea7db0f684269be3df05e9e95b6e2c,"Automatic 3D Face Detection, Normalization and Recognition",University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+5050807e90a925120cbc3a9cd13431b98965f4b9,Unsupervised Learning of Discriminative Relative Visual Attributes,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+5050807e90a925120cbc3a9cd13431b98965f4b9,Unsupervised Learning of Discriminative Relative Visual Attributes,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+50894e607cd5eb616913b520c4e238a73f432b86,Neural correlates of eye gaze processing in the infant broader autism phenotype.,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+508702ed2bf7d1b0655ea7857dd8e52d6537e765,Saliency-Informed Spatio-Temporal Vector of Locally Aggregated Descriptors and Fisher Vectors for Visual Action Recognition,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+5061f591aa8ff224cd20cdcb3b62d156fb187bed,One Model To Learn Them All,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,Unbiased Feature Selection in Learning Random Forests for High-Dimensional Data,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,Unbiased Feature Selection in Learning Random Forests for High-Dimensional Data,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,Unbiased Feature Selection in Learning Random Forests for High-Dimensional Data,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,Unbiased Feature Selection in Learning Random Forests for High-Dimensional Data,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+5003754070f3a87ab94a2abb077c899fcaf936a6,Evaluation of LC - KSVD on UCF 101 Action Dataset,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+50af3b6f7192951b42c2531ee931c8244e505a5c,Weakly Supervised Learning for Attribute Localization in Outdoor Scenes,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+504002dbd2de78f8d55c860a76a6ee322eb816a8,3D Pose from Motion for Cross-View Action Recognition via Non-linear Circulant Temporal Encoding,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+5047cae1b6f47ac1715479abfa3daf1c1a063977,Predictor Combination at Test Time — Supplemental Document,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+5047cae1b6f47ac1715479abfa3daf1c1a063977,Predictor Combination at Test Time — Supplemental Document,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+5047cae1b6f47ac1715479abfa3daf1c1a063977,Predictor Combination at Test Time — Supplemental Document,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+503db524b9a99220d430e741c44cd9c91ce1ddf8,"Who's Better, Who's Best: Skill Determination in Video using Deep Ranking",University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+50953b9a15aca6ef3351e613e7215abdcae1435e,Learning coarse-to-fine sparselets for efficient object detection and scene classification,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+5060e2e7d94e002a5376f4edfd2e48ac01d6221f,"Automatic Description Generation from Images: A Survey of Models, Datasets, and Evaluation Measures",University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+501bfe67683ddfecf3710f5946c3b77f1ffe9adf,Pillar Networks++: Distributed non-parametric deep and wide networks,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+50797bf1a167ba294f640d3ae237cee962427cf0,Harry Potter's Marauder's Map: Localizing and Tracking Multiple Persons-of-Interest by Nonnegative Discretization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+50bf4f77d8b66ec838ad59a869630eace7e0e4a7,Deeply-Learned Part-Aligned Representations for Person Re-identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+50bf4f77d8b66ec838ad59a869630eace7e0e4a7,Deeply-Learned Part-Aligned Representations for Person Re-identification,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+50d80e2020698b4cf49e6b820df0aea497d8fdd3,Charades-Ego: A Large-Scale Dataset of Paired Third and First Person Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+50d2fa4e3a4e961cf35cef6d11ea745f9d1b3839,Spotlight the Negatives: A Generalized Discriminative Latent Model,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+500c096c3be8c6dc084cdbf1b24288926b2dfefc,Using Psychophysical Methods to Understand Mechanisms of Face Identification in a Deep Neural Network,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+500c096c3be8c6dc084cdbf1b24288926b2dfefc,Using Psychophysical Methods to Understand Mechanisms of Face Identification in a Deep Neural Network,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+500c096c3be8c6dc084cdbf1b24288926b2dfefc,Using Psychophysical Methods to Understand Mechanisms of Face Identification in a Deep Neural Network,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+500c096c3be8c6dc084cdbf1b24288926b2dfefc,Using Psychophysical Methods to Understand Mechanisms of Face Identification in a Deep Neural Network,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+500c096c3be8c6dc084cdbf1b24288926b2dfefc,Using Psychophysical Methods to Understand Mechanisms of Face Identification in a Deep Neural Network,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+50bf19a06915778a0bcbdef700f91b56258a4e1f,Common and distinct neural features of social and non-social reward processing in autism and social anxiety disorder.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+50bf19a06915778a0bcbdef700f91b56258a4e1f,Common and distinct neural features of social and non-social reward processing in autism and social anxiety disorder.,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+507af6591900a7165c529eca9fd370008c1ac87c,"For Black men, being tall increases threat stereotyping and police stops.",University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+507af6591900a7165c529eca9fd370008c1ac87c,"For Black men, being tall increases threat stereotyping and police stops.",Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+50c0de2cccf7084a81debad5fdb34a9139496da0,"The Influence of Annotation, Corpus Design, and Evaluation on the Outcome of Automatic Classification of Human Emotions",Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+68c3e61cefcfe4812df54be12625dabe66fb06a4,A Compact Deep Learning Model for Robust Facial Expression Recognition,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+68245f308f8049dc40f146e296d6e6a6bdba1ff4,Private and Shared Taste in Art and Face Appreciation,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+6861552bf6730529d3fac5d6f2bb7e0f491edea2,Neural Self Talk: Image Understanding via Continuous Questioning and Answering,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+68f89c1ee75a018c8eff86e15b1d2383c250529b,Final Report for Project Localizing Objects and Actions in Videos Using Accompanying Text,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+688550a6c72f14cb8f2d9d86802c7cfc3d3d800e,Discovering Influential Factors in Variational Autoencoder,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+68147c43ad2ddebf223bd14a7928cbe26c7f270e,RNN Encoder Decoder 3 D Skeleton Converter 2 D Pose Sequence Generator Input Image 2 D Pose Heatmaps 3 D Skeleton Hourglass network,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+68c5238994e3f654adea0ccd8bca29f2a24087fc,pLSA-based zero-shot learning,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+68ce1572b18c95fe9c60bc11d9d33f8310902154,Budgeted Nonparametric Learning from Data Streams,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+68e9c837431f2ba59741b55004df60235e50994d,Detecting Faces Using Region-based Fully Convolutional Networks,Tencent,"Tencent AI Lab, China","Ke Ji Zhong Yi Lu, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057",22.54471540,113.93571640,company,"Keji Middle 1st Rd, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057"
+687e17db5043661f8921fb86f215e9ca2264d4d2,A robust elastic and partial matching metric for face recognition,Microsoft,"Microsoft Corporation, Redmond, WA, USA","One Microsoft Way, Redmond, WA 98052, USA",47.64233180,-122.13693020,company,
+68c6df1249e1ee56835f79e1877506a16d8418f4,Criteria for Human-Compatible AI in Two-Player Vision-Language Tasks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+68c6df1249e1ee56835f79e1877506a16d8418f4,Criteria for Human-Compatible AI in Two-Player Vision-Language Tasks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+68249064f7d5046abef785ada541244fa67b4346,"Contribution of Developmental Psychology to the Study of Social Interactions: Some Factors in Play, Joint Attention and Joint Action and Implications for Robotics",University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+68c4a1d438ea1c6dfba92e3aee08d48f8e7f7090,AgeNet: Deeply Learned Regressor and Classifier for Robust Apparent Age Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+68f69e6c6c66cfde3d02237a6918c9d1ee678e1b,Enhancing Concept Detection by Pruning Data with MCA-Based Transaction Weights,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+68f69e6c6c66cfde3d02237a6918c9d1ee678e1b,Enhancing Concept Detection by Pruning Data with MCA-Based Transaction Weights,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+6880013eb0b91a2b334e0be0dced0a1a79943469,Discrimination-aware Channel Pruning for Deep Neural Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+6880013eb0b91a2b334e0be0dced0a1a79943469,Discrimination-aware Channel Pruning for Deep Neural Networks,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+682760f2f767fb47e1e2ca35db3becbb6153756f,The Effect of Pets on Happiness: A Large-Scale Multi-Factor Analysis Using Social Multimedia,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+682760f2f767fb47e1e2ca35db3becbb6153756f,The Effect of Pets on Happiness: A Large-Scale Multi-Factor Analysis Using Social Multimedia,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+682760f2f767fb47e1e2ca35db3becbb6153756f,The Effect of Pets on Happiness: A Large-Scale Multi-Factor Analysis Using Social Multimedia,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+683ec608442617d11200cfbcd816e86ce9ec0899,Dual Linear Regression Based Classification for Face Cluster Recognition,University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.89256620,-122.81471592,edu,
+6890af11d4c0a3189e974ffe7cf03088cf532ab7,3D landmark model discovery from a registered set of organic shapes,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+683f5c838ea2c9c50f3f5c5fa064c00868751733,3D Visual Proxemics: Recognizing Human Interactions in 3D from a Single Image,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+6821113166b030d2123c3cd793dd63d2c909a110,Acquisition and Indexing of Rgb-d Recordings for Facial Expressions and Emotion Recognition1,Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.37086525,18.61716016,edu,
+68eb46d2920d2e7568d543de9fa2fc42cb8f5cbb,FACE2GPS: Estimating geographic location from facial features,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+6834a469562cb563bc91ae08f4e2aa6b03e27b1a,Diffusion Decision Making for Adaptive k-Nearest Neighbor Classification,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+6834a469562cb563bc91ae08f4e2aa6b03e27b1a,Diffusion Decision Making for Adaptive k-Nearest Neighbor Classification,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+68a677a326a290a82bc08686465019414ebe1d98,ImageSpirit: Verbal Guided Image Parsing,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+68a677a326a290a82bc08686465019414ebe1d98,ImageSpirit: Verbal Guided Image Parsing,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+68a677a326a290a82bc08686465019414ebe1d98,ImageSpirit: Verbal Guided Image Parsing,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+68a04a3ae2086986877fee2c82ae68e3631d0356,Thermal & Reflectance Based Identification in Challenging Variable Illuminations,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+687fc9ffea3ca36d87817faf37492941ec6eb0b9,Making Better Use of the Crowd: How Crowdsourcing Can Advance Machine Learning Research,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+578e002828e5e106dd660c7273ebcb014e8068a6,Distantly Supervised Road Segmentation,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+57f5711ca7ee5c7110b7d6d12c611d27af37875f,Illumination invariance for face verification,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+57f5711ca7ee5c7110b7d6d12c611d27af37875f,Illumination invariance for face verification,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+570308801ff9614191cfbfd7da88d41fb441b423,Unsupervised Synchrony Discovery in Human Interaction,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+570308801ff9614191cfbfd7da88d41fb441b423,Unsupervised Synchrony Discovery in Human Interaction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+570308801ff9614191cfbfd7da88d41fb441b423,Unsupervised Synchrony Discovery in Human Interaction,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+57bf9888f0dfcc41c5ed5d4b1c2787afab72145a,Robust Facial Expression Recognition Based on Local Directional Pattern,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+57522ff758642e054d7c50753ec1c3fe598533f0,Information-Based Boundary Equilibrium Generative Adversarial Networks with Interpretable Representation Learning,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+57ebeff9273dea933e2a75c306849baf43081a8c,Deep Convolutional Network Cascade for Facial Point Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+57ebeff9273dea933e2a75c306849baf43081a8c,Deep Convolutional Network Cascade for Facial Point Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+57ebeff9273dea933e2a75c306849baf43081a8c,Deep Convolutional Network Cascade for Facial Point Detection,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+575d6a05bb27316ad677f19e79473e314e6c6f94,Stacked What-Where Auto-encoders,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+572791e2f290dc0ecb05e56bfa714c4b7af79b08,Extended MHT algorithm for multiple object tracking,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+572791e2f290dc0ecb05e56bfa714c4b7af79b08,Extended MHT algorithm for multiple object tracking,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+5778d49c8d8d127351eee35047b8d0dc90defe85,Probabilistic Subpixel Temporal Registration for Facial Expression Analysis,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+579b2962ac567a39742601cafe3fc43cf7a7109c,Video Paragraph Captioning Using Hierarchical Recurrent Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+579b2962ac567a39742601cafe3fc43cf7a7109c,Video Paragraph Captioning Using Hierarchical Recurrent Neural Networks,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,Modeling the joint density of two images under a variety of transformations,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,Modeling the joint density of two images under a variety of transformations,University of Frankfurt,University of Frankfurt,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland",50.13053055,8.69234224,edu,
+57ee3a8b0cafe211d1e9b477d210bb78b9d43bc1,Modeling the joint density of two images under a variety of transformations,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+57df27685196fad070bd2da14ed865fda87d93a9,Determining the best attributes for surveillance video keywords generation,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+57bd01c042a5f64659b3a9f91c048b8594f762f6,Advances in fine-grained visual categorization,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+571f493c0ade12bbe960cfefc04b0e4607d8d4b2,Review on Content Based Image Retrieval: From Its Origin to the New Age,Mahatma Gandhi Institute of Technology,Mahatma Gandhi Institute of Technology,"Gandipet Main Rd, Kokapet, Hyderabad, Telangana 500075, India",17.39084720,78.32176670,edu,
+57412e2966a04c106657c926bcfdcb5c3842444d,Camera and microphone array for 3D audiovisual face data collection,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+57412e2966a04c106657c926bcfdcb5c3842444d,Camera and microphone array for 3D audiovisual face data collection,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+57488aa24092fa7118aa5374c90b282a32473cf9,A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+57488aa24092fa7118aa5374c90b282a32473cf9,A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+57488aa24092fa7118aa5374c90b282a32473cf9,A Weakly Supervised Adaptive DenseNet for Classifying Thoracic Diseases and Identifying Abnormalities,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+574b62c845809fd54cc168492424c5fac145bc83,Learning Warped Guidance for Blind Face Restoration,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+574b62c845809fd54cc168492424c5fac145bc83,Learning Warped Guidance for Blind Face Restoration,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+574b62c845809fd54cc168492424c5fac145bc83,Learning Warped Guidance for Blind Face Restoration,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+576bfffd7d58a9c70ff73e39033f31739e6f09b2,Using Both Latent and Supervised Shared Topics for Multitask Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+576bfffd7d58a9c70ff73e39033f31739e6f09b2,Using Both Latent and Supervised Shared Topics for Multitask Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+57ec50237ee588d3b40640c4f98410cbd996ee84,Toward Guaranteed Illumination Models for Non-convex Objects,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+57ec50237ee588d3b40640c4f98410cbd996ee84,Toward Guaranteed Illumination Models for Non-convex Objects,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+5727ac51ad6fb67d81cc3ef2c04440c179bd53ab,Oxytocin attenuates amygdala responses to emotional faces regardless of valence.,University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.49684760,8.72981767,edu,
+57d33c0f8d6998d665a7ec6672a56cf8e7729c14,Detection of facial characteristics based on edge information,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+57efdcf4d56f15846c9c5104ce2cd414532ced7d,"The development of the Athens Emotional States Inventory (AESI): collection, validation and automatic processing of emotionally loaded sentences.",University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+57efdcf4d56f15846c9c5104ce2cd414532ced7d,"The development of the Athens Emotional States Inventory (AESI): collection, validation and automatic processing of emotionally loaded sentences.",National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+5793b25e2492d47f5faf9b93b8c0fe36802de8b6,Robust Optimization for Deep Regression,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+5793b25e2492d47f5faf9b93b8c0fe36802de8b6,Robust Optimization for Deep Regression,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+575141e42740564f64d9be8ab88d495192f5b3bc,Age Estimation Based on Multi-Region Convolutional Neural Network,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+575141e42740564f64d9be8ab88d495192f5b3bc,Age Estimation Based on Multi-Region Convolutional Neural Network,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+5789f8420d8f15e7772580ec373112f864627c4b,Efficient Global Illumination for Morphable Models,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+57588971cfef4be8e0706f30cfafbf6c293fed3b,Semantic Autoencoder for Zero-Shot Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+577c1d59e43f04a4bfda95b0b9e3b41d893bc0a2,Faster Evaluation of Labor-Intensive Features,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+574ad7ef015995efb7338829a021776bf9daaa08,AdaScan: Adaptive Scan Pooling in Deep Convolutional Neural Networks for Human Action Recognition in Videos,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+5741255d30f4848273c921ad177b32ff1cfe0671,DLD Journal F01,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+57632f82553f34bce21cc8419bc5381d50096592,A Weighted Variational Model for Simultaneous Reflectance and Illumination Estimation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+57632f82553f34bce21cc8419bc5381d50096592,A Weighted Variational Model for Simultaneous Reflectance and Illumination Estimation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+57632f82553f34bce21cc8419bc5381d50096592,A Weighted Variational Model for Simultaneous Reflectance and Illumination Estimation,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+57b052cf826b24739cd7749b632f85f4b7bcf90b,Fast Fashion Guided Clothing Image Retrieval: Delving Deeper into What Feature Makes Fashion,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+57f7d8c6ec690bd436e70d7761bc5f46e993be4c,Facial expression recognition using histogram variances faces,University of Aizu,University of Aizu,"会津大学, 磐越自動車道, 会津若松市, 福島県, 東北地方, 965-8580, 日本",37.52367280,139.93807246,edu,
+57db5b35f2473fc3608fe3519d6763c1d4984eed,Learning from interaction : models and applications,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+5746dcc8164ab95dcf8569bd4f37dec58e1112a3,Recognizing Artificial Faces Using Wavelet Based Adapted Median Binary Patterns,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+57417c4a523d93801c8901d6f3c3740eaa65c9ae,Inverse Visual Question Answering,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+57417c4a523d93801c8901d6f3c3740eaa65c9ae,Inverse Visual Question Answering,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+3ba5aa0995f129d2854d9690adb6d982bba4e675,Super-Resolution Person Re-Identification With Semi-Coupled Low-Rank Discriminant Dictionary Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+3ba5aa0995f129d2854d9690adb6d982bba4e675,Super-Resolution Person Re-Identification With Semi-Coupled Low-Rank Discriminant Dictionary Learning,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+3ba5aa0995f129d2854d9690adb6d982bba4e675,Super-Resolution Person Re-Identification With Semi-Coupled Low-Rank Discriminant Dictionary Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+3b4ec8af470948a72a6ed37a9fd226719a874ebc,A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+3b4ec8af470948a72a6ed37a9fd226719a874ebc,A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3b964d6a527f24b1a1f8499b0f4dbb0ed982d5e2,GADAM: Genetic-Evolutionary ADAM for Deep Neural Network Optimization,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+3b964d6a527f24b1a1f8499b0f4dbb0ed982d5e2,GADAM: Genetic-Evolutionary ADAM for Deep Neural Network Optimization,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3b152bdeedb97d68dd69bbb806c60c205e6fa696,Patch-Based Principal Component Analysis for Face Recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+3bb724ee496100e12087ced6564198d63d843259,Recognizing Degraded Handwritten Characters,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+3b6602e64e62e5703151d17475d4728bd2095256,Brief Communication Oxytocin Modulates Neural Circuitry for Social Cognition and Fear in Humans,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+3b6602e64e62e5703151d17475d4728bd2095256,Brief Communication Oxytocin Modulates Neural Circuitry for Social Cognition and Fear in Humans,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+3b092733f428b12f1f920638f868ed1e8663fe57,On the size of Convolutional Neural Networks and generalization performance,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3b73f8a2b39751efb7d7b396bf825af2aaadee24,Connecting Pixels to Privacy and Utility: Automatic Redaction of Private Information in Images,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3b2d5585af59480531616fe970cb265bbdf63f5b,Robust Face Recognition under Varying Light Based on 3D Recovery,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+3b64efa817fd609d525c7244a0e00f98feacc8b4,A Comprehensive Survey on Pose-Invariant Face Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3b5128bfe35875d0cead04b7d19024d841b605f9,Multispectral pedestrian detection: Benchmark dataset and baseline,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+3b01a839d174dad6f2635cff7ebe7e1aaad701a4,Image Co-localization by Mimicking a Good Detector's Confidence Score Distribution,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+3be9286e5d6a9d9167c64b05be6fb0712ffbba35,Recurrent Autoregressive Networks for Online Multi-object Tracking,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3b788fd0817336b3db3e111fa2ff50b665070e95,"Multi-view traffic sign detection, recognition, and 3D localisation",Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+3b7f6035a113b560760c5e8000540fc46f91fed5,Coupling Alignments with Recognition for Still-to-Video Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3b2a2357b12cf0a5c99c8bc06ef7b46e40dd888e,Learning Person Trajectory Representations for Team Activity Analysis,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+3bd1d41a656c8159305ba2aa395f68f41ab84f31,Entity-Based Opinion Mining from Text and Multimedia,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+3bcd72be6fbc1a11492df3d36f6d51696fd6bdad,Multi-Task Zero-Shot Action Recognition with Prioritised Data Augmentation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+3b9c08381282e65649cd87dfae6a01fe6abea79b,CUHK & ETHZ & SIAT Submission to ActivityNet Challenge 2016,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3b9c08381282e65649cd87dfae6a01fe6abea79b,CUHK & ETHZ & SIAT Submission to ActivityNet Challenge 2016,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+3b84d074b8622fac125f85ab55b63e876fed4628,End-to-End Localization and Ranking for Relative Attributes,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+3b67645cd512898806aaf1df1811035f2d957f6b,SCNet: Learning Semantic Correspondence,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,"Demo: Glimpse - Continuous, Real-Time Object Recognition on Mobile Devices",MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,"Demo: Glimpse - Continuous, Real-Time Object Recognition on Mobile Devices",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,"Demo: Glimpse - Continuous, Real-Time Object Recognition on Mobile Devices",MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,"Demo: Glimpse - Continuous, Real-Time Object Recognition on Mobile Devices",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3b4fd2aec3e721742f11d1ed4fa3f0a86d988a10,"Demo: Glimpse - Continuous, Real-Time Object Recognition on Mobile Devices",MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3be8f1f7501978287af8d7ebfac5963216698249,Deep Cascaded Regression for Face Alignment,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3be8f1f7501978287af8d7ebfac5963216698249,Deep Cascaded Regression for Face Alignment,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3bc376f29bc169279105d33f59642568de36f17f,Active shape models with SIFT descriptors and MARS,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+3b8a5be5508f809a2d68a78d21cbf1690db57d5c,Large Scale Sketch Based Image Retrieval Using Patch Hashing,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+3b6680a28c87dec9f369263b8428e41a3844ac5f,Action Recognition from a Small Number of Frames,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+3b6680a28c87dec9f369263b8428e41a3844ac5f,Action Recognition from a Small Number of Frames,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+3b38c06caf54f301847db0dd622a6622c3843957,Gender differences in emotion perception and self-reported emotional intelligence: A test of the emotion sensitivity hypothesis,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+3b5787604b619c273bf98232b0bd3bce5d4a34ee,Learning Discriminative Hidden Structural Parts for Visual Tracking,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3b5787604b619c273bf98232b0bd3bce5d4a34ee,Learning Discriminative Hidden Structural Parts for Visual Tracking,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3b9d48a09510ebd8bd5045ba455279abb0a9baf8,OPERATORS and THEIR APPLICATIONS,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+3ba5e820f160dfd02544120ab6c1678421fb2c3b,Future Semantic Segmentation with Convolutional LSTM,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+3bebff841ce7d40f0309bbc0e8cc454694061e82,Segmenting Scenes by Matching Image Composites,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3bebff841ce7d40f0309bbc0e8cc454694061e82,Segmenting Scenes by Matching Image Composites,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3b0b706fc94b35a1eddd830685e07870315b9565,Task-Driven Dynamic Fusion: Reducing Ambiguity in Video Description,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3b0b706fc94b35a1eddd830685e07870315b9565,Task-Driven Dynamic Fusion: Reducing Ambiguity in Video Description,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3b0b706fc94b35a1eddd830685e07870315b9565,Task-Driven Dynamic Fusion: Reducing Ambiguity in Video Description,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+3b466bb66ee79c8e9bcdb6cf9acb54b864dda735,"Joint inference of groups, events and human roles in aerial videos",Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+3bdef2961f9572d2d0f35148a7fa8a3a81f50dea,Finding the weakest link in person detectors,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3bdf9c8ba5f5cf1845fe69b3874f0036ea8c245a,Latent Space Optimal Transport for Generative Models,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+3bdf9c8ba5f5cf1845fe69b3874f0036ea8c245a,Latent Space Optimal Transport for Generative Models,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+3bdf9c8ba5f5cf1845fe69b3874f0036ea8c245a,Latent Space Optimal Transport for Generative Models,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+3b9b200e76a35178da940279d566bbb7dfebb787,Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+3b408a3ca6fb39b0fda4d77e6a9679003b2dc9ab,Improving Classification by Improving Labelling: Introducing Probabilistic Multi-Label Object Interaction Recognition,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+3bac7069b9d3051f40ef4eecacc517d02107ba4a,Early Recognition of Human Activities from First-Person Videos Using Onset Representations,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+3bac7069b9d3051f40ef4eecacc517d02107ba4a,Early Recognition of Human Activities from First-Person Videos Using Onset Representations,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+3bdcc99f45b58e4ddf4ffde5f58bea1ddada2744,Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,"Who Leads the Clothing Fashion: Style, Color, or Texture? A Computational Study",Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,"Who Leads the Clothing Fashion: Style, Color, or Texture? A Computational Study",Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,"Who Leads the Clothing Fashion: Style, Color, or Texture? A Computational Study",Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3b02aaccc9f063ae696c9d28bb06a8cd84b2abb8,"Who Leads the Clothing Fashion: Style, Color, or Texture? A Computational Study",University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+3bb6570d81685b769dc9e74b6e4958894087f3f1,Hu-Fu: Hardware and Software Collaborative Attack Framework Against Neural Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3bad18554678ab46bbbf9de41d36423bc8083c83,Weakly Supervised Object Boundaries,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3b3482e735698819a6a28dcac84912ec01a9eb8a,Individual recognition using gait energy image,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,Adversarially Optimizing Intersection over Union for Object Localization Tasks,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,Adversarially Optimizing Intersection over Union for Object Localization Tasks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3b37d95d2855c8db64bd6b1ee5659f87fce36881,Adversarially Optimizing Intersection over Union for Object Localization Tasks,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+3be027448ad49a79816cd21dcfcce5f4e1cec8a8,Actively selecting annotations among objects and attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+3bd56f4cf8a36dd2d754704bcb71415dcbc0a165,Robust Regression,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+3b23c39f21156f9ea86ad8bb2ca53b2cf56b4181,Predictable Performance and Fairness Through Accurate Slowdown Estimation in Shared Main Memory Systems,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3b23c39f21156f9ea86ad8bb2ca53b2cf56b4181,Predictable Performance and Fairness Through Accurate Slowdown Estimation in Shared Main Memory Systems,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+3b470b76045745c0ef5321e0f1e0e6a4b1821339,Consensus of Regression for Occlusion-Robust Facial Feature Localization,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+6f9c01a9b861882c6676227942005cef13f3cb29,Cross Quality Distillation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+6f71862aa00d61fc8fd7f205de35ee8af458ec0c,Semi - supervised Learning of Instance - level Recognition from Video,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+6f4a596c80b5ccaf44a076760761c4f132920b11,Integrating Visual and Linguistic Information to Describe Properties of Objects,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+6f4a671537c9e60f042808451ff0fc06032d1221,Play and Learn: Using Video Games to Train Computer Vision Models,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+6f8de996c9659459d4dc6a10cb3d8a43cb846422,Explainable Neural Computation via Stack Neural Module Networks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+6f8de996c9659459d4dc6a10cb3d8a43cb846422,Explainable Neural Computation via Stack Neural Module Networks,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+6faab65a009b36ee3f79d3e4afdf3cc84d57cd67,Adversarial Learning for Semi-supervised Semantic Segmentation,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+6faab65a009b36ee3f79d3e4afdf3cc84d57cd67,Adversarial Learning for Semi-supervised Semantic Segmentation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6f68ca4cc05ef8db344f0bf1ee394e93d519e77e,Matrix Factorization as Search,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+6f44303f9664a4ceabd0f4bc74cb3886aad5012f,An Integral Pose Regression System for the ECCV2018 PoseTrack Challenge,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+6f44303f9664a4ceabd0f4bc74cb3886aad5012f,An Integral Pose Regression System for the ECCV2018 PoseTrack Challenge,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6fa0c206873dcc5812f7ea74a48bb4bf4b273494,Real-Time Mobile Facial Expression Recognition System -- A Case Study,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+6f9824c5cb5ac08760b08e374031cbdabc953bae,Unconstrained human identification using comparative facial soft biometrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+6fc1e886659838b2ca08dbaca291420785fd51bd,Total Capture: 3D Human Pose Estimation Fusing Video and Inertial Sensors,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+6f491fcff042991767a8d5c3a919ce169e0e65f0,Dual-Agent Deep Reinforcement Learning for Deformable Face Tracking,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6fed504da4e192fe4c2d452754d23d3db4a4e5e3,Learning Deep Features via Congenerous Cosine Loss for Person Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+6fee701352f0f5c4abea3e918ddcf078243253cc,Alcohol and Remembering Sexual,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+6fbc58272384ede0efa72753d78f1ef6db381ad7,Robust Face Recognition with Deeply Normalized Depth Images,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+6fb33358bc7e1a73e88b4a87fb0962366ab959c9,Fuzzy 3D Face Ethnicity Categorization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6f26ab7edd971148723d9b4dc8ddf71b36be9bf7,Differences in Abundances of Cell-Signalling Proteins in Blood Reveal Novel Biomarkers for Early Detection Of Clinical Alzheimer's Disease,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+6f73807e309b262c5761c7a73c6a5609679f9f02,Shadow and Specular Removal by Photometric Linearization based on PCA with Outlier Exclusion,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac,Creating Capsule Wardrobes from Fashion Images,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+6f7a8b3e8f212d80f0fb18860b2495be4c363eac,Creating Capsule Wardrobes from Fashion Images,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+6f640d448f00321b9b3bddb3a787cacd2f45cd1a,Stereo 3D Object Trajectory Reconstruction,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+6ff9b66aec16d84b1133850e7e8ce188a5a9a7f4,Do-gooder derogation in children: the social costs of generosity,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+6ff9b66aec16d84b1133850e7e8ce188a5a9a7f4,Do-gooder derogation in children: the social costs of generosity,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+6ff9b66aec16d84b1133850e7e8ce188a5a9a7f4,Do-gooder derogation in children: the social costs of generosity,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+6fac6fb9b2fff94e2babc4906646cf6427c591a0,PKU-NEC @ TRECVid 2011 SED: Sequence-Based Event Detection in Surveillance Video*,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+6f0900a7fe8a774a1977c5f0a500b2898bcbe149,Quotient Based Multiresolution Image Fusion of Thermal and Visual Images Using Daubechies Wavelet Transform for Human Face Recognition,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,Video Captioning with Boundary-aware Hierarchical Language Decoding and Joint Video Prediction,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,Video Captioning with Boundary-aware Hierarchical Language Decoding and Joint Video Prediction,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,Video Captioning with Boundary-aware Hierarchical Language Decoding and Joint Video Prediction,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6fea198a41d2f6f73e47f056692f365c8e6b04ce,Video Captioning with Boundary-aware Hierarchical Language Decoding and Joint Video Prediction,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6fbb179a4ad39790f4558dd32316b9f2818cd106,Input Aggregated Network for Face Video Representation,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+6f84e61f33564e5188136474f9570b1652a0606f,Dual Motion GAN for Future-Flow Embedded Video Prediction,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6f4ec006b6b9da4982169adea2914aa3d14ee753,Adversarial Robustness: Softmax versus Openmax,"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.89207560,-104.79716389,edu,
+6f70e85442959079bfb67b925c660fe86cb4ba24,Person Re-Identification with Correspondence Structure Learning,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+6f70e85442959079bfb67b925c660fe86cb4ba24,Person Re-Identification with Correspondence Structure Learning,Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.80881680,113.53526640,edu,
+6f70e85442959079bfb67b925c660fe86cb4ba24,Person Re-Identification with Correspondence Structure Learning,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+6f70e85442959079bfb67b925c660fe86cb4ba24,Person Re-Identification with Correspondence Structure Learning,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+6fda12c43b53c679629473806c2510d84358478f,A Training Model for Fuzzy Classification System,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+6f813ccf106360cc9c3d6df849cc04d881d0a6e8,"360◦ User Profiling: Past, Future, and Applications",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+6f813ccf106360cc9c3d6df849cc04d881d0a6e8,"360◦ User Profiling: Past, Future, and Applications",Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+6fef65bd7287b57f0c3b36bf8e6bc987fd161b7d,Deep Discriminative Model for Video Classification,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+03a83517298203605b502648ded886fee5a7436e,"Extraction and recognition of periodically deforming objects by continuous, spatio-temporal shape description",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+031716b430e4256c09d5b3559ca9f0be51cb30b6,ROAD: Reality Oriented Adaptation for Semantic Segmentation of Urban Scenes,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+036c41d67b49e5b0a578a401eb31e5f46b3624e0,The Tower Game Dataset: A multimodal dataset for analyzing social interaction predicates,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+036c41d67b49e5b0a578a401eb31e5f46b3624e0,The Tower Game Dataset: A multimodal dataset for analyzing social interaction predicates,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+03b03f5a301b2ff88ab3bb4969f54fd9a35c7271,Pillar Networks for action recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+03ce2ff688f9b588b6f264ca79c6857f0d80ceae,Attention Clusters: Purely Attention Based Local Feature Integration for Video Classification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+033c3114f4951d338e34af67e1699ef779ab258d,Prioritization of arbitrary faces associated to self: An EEG study,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+03b99f5abe0e977ff4c902412c5cb832977cf18e,Of Gods and Goats: Weakly Supervised Learning of Figurative Art,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+03167776e17bde31b50f294403f97ee068515578,Chapter 11. Facial Expression Analysis,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+03167776e17bde31b50f294403f97ee068515578,Chapter 11. Facial Expression Analysis,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+03167776e17bde31b50f294403f97ee068515578,Chapter 11. Facial Expression Analysis,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+0353fe24ecd237f4d9ae4dbc277a6a67a69ce8ed,Discriminative Feature Representation for Person Re-identification by Batch-contrastive Loss,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+0334a8862634988cc684dacd4279c5c0d03704da,FaceNet2ExpNet: Regularizing a Deep Face Recognition Net for Expression Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+032ede597491cfbdf7424d221bd74742b6707397,Spectral Latent Variable Models for Perceptual Inference,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+032ede597491cfbdf7424d221bd74742b6707397,Spectral Latent Variable Models for Perceptual Inference,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+032ede597491cfbdf7424d221bd74742b6707397,Spectral Latent Variable Models for Perceptual Inference,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+030c43389bafdfefb4d6c7db0d121d0335d71342,Unsupervised metric fusion by cross diffusion,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+030c43389bafdfefb4d6c7db0d121d0335d71342,Unsupervised metric fusion by cross diffusion,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,Inhibition-Induced Forgetting Results from Resource Competition between Response Inhibition and Memory Encoding Processes.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,Inhibition-Induced Forgetting Results from Resource Competition between Response Inhibition and Memory Encoding Processes.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+03f98c175b4230960ac347b1100fbfc10c100d0c,Supervised Descent Method and Its Applications to Face Alignment,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0371b7cba37970f22040a10bd29219778dcc3947,Taming Social Tags: Computational Linguistic Analysis of Tags for Images in Museums,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+03264e2e2709d06059dd79582a5cc791cbef94b1,Convolutional Neural Networks for Facial Attribute-based Active Authentication on Mobile Devices,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+03264e2e2709d06059dd79582a5cc791cbef94b1,Convolutional Neural Networks for Facial Attribute-based Active Authentication on Mobile Devices,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0385b65a4941899340ef59f605fb3e943d62330c,Representing 3D texture on mesh manifolds for retrieval and recognition applications,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+03dd7ca6fdf2f4785089e286969f7ee5ccea0a02,From interactive to semantic image segmentation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20,A real time system for model-based interpretation of the dynamics of facial expressions,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+03a721080a69be37db3a2d56c006c60f472b419d,Explaining Explanations: An Approach to Evaluating Interpretability of Machine Learning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+03a24d15533dae78de78fd9d5f6c9050fb97f186,Pedestrian detection aided by scale-discriminative network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+035886f58b550be140b1d4dbba0ea0479030589f,Trajectory bundle estimation For perception-driven planning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+03ca829e8680ab4cdabd491b3b42639c58f4cdce,A graph-based algorithm for multi-target tracking with occlusion,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+03eb382e04cca8cca743f7799070869954f1402a,CLEVR: A Diagnostic Dataset for Compositional Language and Elementary Visual Reasoning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+032ab4966465facd284531865529b124ef173a0e,Web image prediction using multivariate point processes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+032ab4966465facd284531865529b124ef173a0e,Web image prediction using multivariate point processes,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+032ab4966465facd284531865529b124ef173a0e,Web image prediction using multivariate point processes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+03ea1c3f867703f840c0e65df86e09055ad6f774,Solving the Uncapacitated Facility Location Problem Using Message Passing Algorithms,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+0353903b504d6246edcdc6b2c7d32e59b5c0a863,Dynamic Processing Allocation in Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+03f98bfb129028b80ce98686c573830671ee1e3d,Examining Cooperation in Visual Dialog Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+03f98bfb129028b80ce98686c573830671ee1e3d,Examining Cooperation in Visual Dialog Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+03f98bfb129028b80ce98686c573830671ee1e3d,Examining Cooperation in Visual Dialog Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+03f98bfb129028b80ce98686c573830671ee1e3d,Examining Cooperation in Visual Dialog Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+03f98bfb129028b80ce98686c573830671ee1e3d,Examining Cooperation in Visual Dialog Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+03c48d8376990cff9f541d542ef834728a2fcda2,Temporal Action Localization in Untrimmed Videos via Multi-stage CNNs,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0319332ded894bf1afe43f174f5aa405b49305f0,Shearlet Network-based Sparse Coding Augmented by Facial Texture Features for Face Recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+0319332ded894bf1afe43f174f5aa405b49305f0,Shearlet Network-based Sparse Coding Augmented by Facial Texture Features for Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+03ce07616628ac7c7dac92ea714313b674217811,Deep Learning of Scene-Specific Classifier for Pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+03ce07616628ac7c7dac92ea714313b674217811,Deep Learning of Scene-Specific Classifier for Pedestrian Detection,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+0324a22f71927bee2a448f800287cde562dc2726,People detection in crowded scenes by context-driven label propagation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+03593afd7976bae2c105277f61f335b64fc3cd19,Visual Discovery at Pinterest,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+03d89c8eac079df1ff9acbded0336352cdb04624,End-to-End Video Captioning with Multitask Reinforcement Learning,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+03d364f8545dbcf0d7240c5bb8dc39636c698ddb,Fusion of Head and Full-Body Detectors for Multi-Object Tracking,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+03baf00a3d00887dd7c828c333d4a29f3aacd5f5,Entropy Based Feature Selection for 3D Facial Expression Recognition,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+0359f7357ea8191206b9da45298902de9f054c92,Going deeper in facial expression recognition using deep neural networks,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+03563dfaf4d2cfa397d3c12d742e9669f4e95bab,Deep learning from temporal coherence in video,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+03c6a002268c066fd6947452533e6b316f8576a6,Toward Driving Scene Understanding: A Dataset for Learning Driver Behavior and Causal Reasoning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+0369baf2366fca2f2afdf86efec4874dc8fad194,A scalable app for measuring autism risk behaviors in young children: A technical validity and feasibility study,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0369baf2366fca2f2afdf86efec4874dc8fad194,A scalable app for measuring autism risk behaviors in young children: A technical validity and feasibility study,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0369baf2366fca2f2afdf86efec4874dc8fad194,A scalable app for measuring autism risk behaviors in young children: A technical validity and feasibility study,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0369baf2366fca2f2afdf86efec4874dc8fad194,A scalable app for measuring autism risk behaviors in young children: A technical validity and feasibility study,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0369baf2366fca2f2afdf86efec4874dc8fad194,A scalable app for measuring autism risk behaviors in young children: A technical validity and feasibility study,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+03184ac97ebf0724c45a29ab49f2a8ce59ac2de3,Evaluation of output embeddings for fine-grained image classification,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+03184ac97ebf0724c45a29ab49f2a8ce59ac2de3,Evaluation of output embeddings for fine-grained image classification,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+0389a3b0fcdb4c244628e603ffaff620f6575bfc,Incorporating Deep Visual Features into Multiobjective based Multi-view Search Results Clustering,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+0389a3b0fcdb4c244628e603ffaff620f6575bfc,Incorporating Deep Visual Features into Multiobjective based Multi-view Search Results Clustering,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+03bd58a96f635059d4bf1a3c0755213a51478f12,Smoothed Low Rank and Sparse Matrix Recovery by Iteratively Reweighted Least Squares Minimization,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+03bd58a96f635059d4bf1a3c0755213a51478f12,Smoothed Low Rank and Sparse Matrix Recovery by Iteratively Reweighted Least Squares Minimization,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+0382a1301094c6ba60b18ffa8d12da6ca0863339,Online Learned Discriminative Part-Based Appearance Models for Multi-human Tracking,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+03bf59f6db62b5da617e42913e9cbb1e58b79f28,Automatic discovery of groups of objects for scene understanding,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+03bf59f6db62b5da617e42913e9cbb1e58b79f28,Automatic discovery of groups of objects for scene understanding,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+03af78f067ed1f6ea0108a4d2ab7120e7ef852ac,Strong supervision from weak annotation: Interactive training of deformable part models,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+03af78f067ed1f6ea0108a4d2ab7120e7ef852ac,Strong supervision from weak annotation: Interactive training of deformable part models,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0310d31020ae59bf3d6ac61b6206dfc0e79b4efe,A Differential Approach for Gaze Estimation with Calibration,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+03d10c88aebd7aabe603d455c7bafa9231c7cf51,Hyperconnectivity of the Right Posterior Temporo-parietal Junction Predicts Social Difficulties in Boys with Autism Spectrum Disorder.,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+03d10c88aebd7aabe603d455c7bafa9231c7cf51,Hyperconnectivity of the Right Posterior Temporo-parietal Junction Predicts Social Difficulties in Boys with Autism Spectrum Disorder.,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+03fe3d031afdcddf38e5cc0d908b734884542eeb,Engagement with Artificial Intelligence through Natural Interaction Models,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+03fe3d031afdcddf38e5cc0d908b734884542eeb,Engagement with Artificial Intelligence through Natural Interaction Models,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+03fe3d031afdcddf38e5cc0d908b734884542eeb,Engagement with Artificial Intelligence through Natural Interaction Models,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+9b9f3ec91e8ba185b0c7fd7545b0721e0cba9ba7,Regularity Guaranteed Human Pose Correction,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+9bbc952adb3e3c6091d45d800e806d3373a52bac,Learning Visual Classifiers using Human-centric Annotations,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9b9b6d34deebb534de66017381be7578e13b761d,"Submitted to the Alfred P . Sloan School of Management in Partial Fulfillment of the Requirements for the Degree of DOCTOR OF PHILOSOPHY IN MANAGEMENT at the MASSACHUSETTS INSTITUTE OF TECHNOLOGY February , 2007",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9be94fa0330dd493f127d51e4ef7f9fd64613cfc,Effects of pose and image resolution on automatic face recognition,North Dakota State University,North Dakota State University,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.89715500,-96.81827603,edu,
+9be94fa0330dd493f127d51e4ef7f9fd64613cfc,Effects of pose and image resolution on automatic face recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+9bd35145c48ce172b80da80130ba310811a44051,Face Detection with End-to-End Integration of a ConvNet and a 3D Model,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+9bd35145c48ce172b80da80130ba310811a44051,Face Detection with End-to-End Integration of a ConvNet and a 3D Model,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+9ba3e2b8b678910c4fdf379c278dbc007c19aa38,Face Verification via ECOC,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+9bd9050c53d90dfa86cb22501812afe6fc897406,Fine-Grained and Layered Object Recognition,Xi'an Jiaotong University,Xi'an Jiaotong University,"西安交通大学兴庆校区, 文治路, 乐居场, 碑林区 (Beilin), 西安市, 陕西省, 710048, 中国",34.24749490,108.97898751,edu,
+9bd9050c53d90dfa86cb22501812afe6fc897406,Fine-Grained and Layered Object Recognition,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,Visual Data Augmentation through Learning,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,Visual Data Augmentation through Learning,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+9b2a272d4526b3eeeda0beb0d399074d5380a2b3,Learning to Align Images Using Weak Geometric Supervision,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9b2a272d4526b3eeeda0beb0d399074d5380a2b3,Learning to Align Images Using Weak Geometric Supervision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493,Close the loop: Joint blind image restoration and recognition with sparse representation prior,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+9b928c0c7f5e47b4480cb9bfdf3d5b7a29dfd493,Close the loop: Joint blind image restoration and recognition with sparse representation prior,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+9b75cc65a03e5d817c89d71b24404e791f79eb6a,TextureGAN: Controlling Deep Image Synthesis with Texture Patches,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9be696618cfcea90879747a8512f21b10cceac48,Structural Consistency and Controllability for Diverse Colorization,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9bfe2732a905cb0aab370d1146a29b9d4129321d,Social Judgments Are Influenced by Both Facial Expression and Direction of Eye Gaze,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+9bfe2732a905cb0aab370d1146a29b9d4129321d,Social Judgments Are Influenced by Both Facial Expression and Direction of Eye Gaze,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+9bd7f95a4c752a44e96d2205ceb6fcefe9232c8b,Fine-grained Video Captioning for Sports Narrative,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+9bd36eff633c52c6f6e8ead009367f6b6c43f16f,"Image tag refinement towards low-rank, content-tag prior and error sparsity",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+9bd36eff633c52c6f6e8ead009367f6b6c43f16f,"Image tag refinement towards low-rank, content-tag prior and error sparsity","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9bd36eff633c52c6f6e8ead009367f6b6c43f16f,"Image tag refinement towards low-rank, content-tag prior and error sparsity",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9bdc406ad9e9fc0ce356e6d0e53780534f418849,DeepDiary: Automatic Caption Generation for Lifelogging Image Streams,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+9b2c359c36c38c289c5bacaeb5b1dd06b464f301,Dense Face Alignment,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+9b7878eb0681d107a3892c2a166beeb6c0e2d36f,A vision-grounded dataset for predicting typical locations for verbs,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+9b2607d4a8f7252bf13628afa1b5e5cb55ca65a6,Seeing the face through the eyes: a developmental perspective on face expertise.,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+9b8d73e83c111268745311e03f0c0f7f6c92c9f0,Incremental Convolutional Neural Network Training,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+9b7c922d00a6bebc60607168ebbda2ebdc703db7,Detection and Recognition in Natural Scene Images,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+9b1bcef8bfef0fb5eb5ea9af0b699aa0534fceca,Position-Squeeze and Excitation Block for Facial Attribute Analysis,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+9b07084c074ba3710fee59ed749c001ae70aa408,Computational Models of Face Perception.,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+9babe1957e56fadebb32a64338d54fce794c7094,An Enhanced Sparse Representation Strategy for Signal Classification,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+9be653e1bc15ef487d7f93aad02f3c9552f3ee4a,Computer Vision for Head Pose Estimation: Review of a Competition,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+9bfda2f5144867d5712a8fcbea9dd5fa69d3312b,Image Super-Resolution Using VDSR-ResNeXt and SRCGAN,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+9b246c88a0435fd9f6d10dc88f47a1944dd8f89e,PiCoDes: Learning a Compact Code for Novel-Category Recognition,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+9b246c88a0435fd9f6d10dc88f47a1944dd8f89e,PiCoDes: Learning a Compact Code for Novel-Category Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9b93406f3678cf0f16451140ea18be04784faeee,A Bayesian Approach to Alignment-Based Image Hallucination,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+9bac3639b2671dcdbdbbd36e8e9022d7334a3796,VSE++: Improving Visual-Semantic Embeddings with Hard Negatives,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+9bda68ea52bddf5365e3230761c95424ff1ddec5,SSP: Supervised Sparse Projections for Large-Scale Retrieval in High Dimensions,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+9bd0c4082a13de0be6c7daba999b55061011f3a5,1-2011 Image Matching with Distinctive Visual Vocabulary,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9bd0c4082a13de0be6c7daba999b55061011f3a5,1-2011 Image Matching with Distinctive Visual Vocabulary,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+9bd0c4082a13de0be6c7daba999b55061011f3a5,1-2011 Image Matching with Distinctive Visual Vocabulary,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9bd0c4082a13de0be6c7daba999b55061011f3a5,1-2011 Image Matching with Distinctive Visual Vocabulary,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9bd0c4082a13de0be6c7daba999b55061011f3a5,1-2011 Image Matching with Distinctive Visual Vocabulary,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9b684e2e2bb43862f69b12c6be94db0e7a756187,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+9b684e2e2bb43862f69b12c6be94db0e7a756187,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+9b684e2e2bb43862f69b12c6be94db0e7a756187,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+9b401be9fe35b759ae48c9dd5e9b7e4382511a55,Incremental learning of object detectors using a visual shape alphabet,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+9b401be9fe35b759ae48c9dd5e9b7e4382511a55,Incremental learning of object detectors using a visual shape alphabet,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+9b48372c7adb3780873df7c6d4134f93c2b0aebb,Robust multilinear principal component analysis,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+9ea223c070ec9a00f4cb5ca0de35d098eb9a8e32,Exploring Temporal Preservation Networks for Precise Temporal Action Localization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+9e42d44c07fbd800f830b4e83d81bdb9d106ed6b,Learning Discriminative Aggregation Network for Video-Based Face Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9e97dc079139f009c5c98617c28825dca0d70ae3,A survey: face recognition techniques under partial occlusion,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+9e48808f283598edb5a78ec2590a35ff163cc8ed,Stochastic Segmentation Trees for Multiple Ground Truths,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+9e1c3c7f1dce662a877727a821bdf41c5cd906bb,Learning Disentangling and Fusing Networks for Face Completion Under Structured Occlusions,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+9eb86327c82b76d77fee3fd72e2d9eff03bbe5e0,Max-Margin Invariant Features from Transformed Unlabelled Data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9e3d697dfd0364314aac51522ce3778bc542b17a,Gabor-Based Kernel Partial-Least-Squares Discrimination Features for Face Recognition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+9ea337ffdf652803c805074d61b2d6a8d7040e95,Ph.D. DISSERTATION PATTERN RECOGNITION USING COMPOSITE FEATURES,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+9e3db0bd1dfa9e033a2a055a9ac03728cd28e930,Coupled Marginalized Auto-Encoders for Cross-Domain Multi-View Learning,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+9ea25c97f3dee29f7861ab4110ca90b4ec0af01b,Robust Out-of-Sample Data Recovery,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+9ea25c97f3dee29f7861ab4110ca90b4ec0af01b,Robust Out-of-Sample Data Recovery,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+9ed7d774684a1770445c1c53e276011a8364b9e2,Uncovering Temporal Context for Video Question and Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9ed7d774684a1770445c1c53e276011a8364b9e2,Uncovering Temporal Context for Video Question and Answering,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+9ed943f143d2deaac2efc9cf414b3092ed482610,Independent Subspace of Dynamic Gabor Features for Facial Expression Classification,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+9e1c3b8b1653337094c1b9dba389e8533bc885b0,Demographic Classification with Local Binary Patterns,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9e8f9786ea868f042f7d984cddbd9a6dc23969ee,Robust Clothing-Invariant Gait Recognition,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9ec74521d03d41f4157a458513c79017dd066a38,Semantic Stixels fusing LIDAR for Scene Perception,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+9e8f95503bebdfb623d4e5b51347f72677d89d99,Multi-dimensional local binary pattern texture descriptors and their application for medical image analysis,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+9eb891b89443bdfc8434e4c9e08dda0253fd242c,Evaluation of Probabilistic Occupancy Map for Player Tracking in Team Sports,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+9e384187941e939453fc0c7585c1a8e76d535c02,A Robust Approach to Automatic Iris Localization,Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.23810230,127.19034310,edu,
+9e504d225a566fc57ff203f82cb1cb56b902a7f5,Gradient Local Auto-Correlations and Extreme Learning Machine for Depth-Based Activity Recognition,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+9e504d225a566fc57ff203f82cb1cb56b902a7f5,Gradient Local Auto-Correlations and Extreme Learning Machine for Depth-Based Activity Recognition,Changzhou University,"Changzhou University, Changzhou, China","1 Gehu Middle Rd, Wujin Qu, Changzhou Shi, Jiangsu Sheng, China",31.68423700,119.95514100,edu,
+9e504d225a566fc57ff203f82cb1cb56b902a7f5,Gradient Local Auto-Correlations and Extreme Learning Machine for Depth-Based Activity Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+9e1b3cf334aead8d2c29747f6ee7d1291dd83708,Netizen-Style Commenting on Fashion Photos: Dataset and Diversity Measures,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+9ef9046cc26946acedda3f515d9149a76e19cd6e,A Unified Multi-Faceted Video Summarization System,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+9ed4ad41cbad645e7109e146ef6df73f774cd75d,RPM: Random Points Matching for Pair wise Face-Similarity,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+9ed4ad41cbad645e7109e146ef6df73f774cd75d,RPM: Random Points Matching for Pair wise Face-Similarity,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+9e5378e7b336c89735d3bb15cf67eff96f86d39a,Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9e5378e7b336c89735d3bb15cf67eff96f86d39a,Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+9e1a96e0fc5d8867e82e6262a8d9499b3ae806e5,Spoofing 2D face recognition systems with 3D masks,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+9e182e0cd9d70f876f1be7652c69373bcdf37fb4,Talking Face Generation by Adversarially Disentangled Audio-Visual Representation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9e8d87dc5d8a6dd832716a3f358c1cdbfa97074c,What makes an image popular?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9e4cd22ab92adcd74014709167b6cbb97baa3d1c,BEAR MOUSE DOG TIGER Compatibility function : Classification model : Inference function : Objective function : Optimization TRAINING TESTING Seen Objects Seen Labels Unseen Objects Unseen Labels Feature Embedding Semantic Embedding Feature Extraction Semantic Extraction,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+9e8b20ac34f560ae12bb51f3e3713ea755d36c85,Learning to Write Stylized Chinese Characters by Reading a Handful of Examples,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+044d9a8c61383312cdafbcc44b9d00d650b21c70,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+044d9a8c61383312cdafbcc44b9d00d650b21c70,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+044d9a8c61383312cdafbcc44b9d00d650b21c70,300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+04f0292d9a062634623516edd01d92595f03bd3f,Distribution-based iterative pairwise classification of emotions in the wild using LGBP-TOP,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+04f0292d9a062634623516edd01d92595f03bd3f,Distribution-based iterative pairwise classification of emotions in the wild using LGBP-TOP,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+04f0292d9a062634623516edd01d92595f03bd3f,Distribution-based iterative pairwise classification of emotions in the wild using LGBP-TOP,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+041115cb5509466f7449451709387268a008aba2,Teaching Machines to Understand Baseball Games: Large-Scale Baseball Video Database for Multiple Video Understanding Tasks,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+04aca8c96971acce8ac4303bf514e83c87e692ce,A hand shape recognizer from simple sketches,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+044e0d86e2db70d4c0b767bf0994913e90e105e3,Answer-Type Prediction for Visual Question Answering,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+04522dc16114c88dfb0ebd3b95050fdbd4193b90,Minimum Bayes error features for visual recognition by sequential feature selection and extraction,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+04522dc16114c88dfb0ebd3b95050fdbd4193b90,Minimum Bayes error features for visual recognition by sequential feature selection and extraction,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+0486214fb58ee9a04edfe7d6a74c6d0f661a7668,Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+043efe5f465704ced8d71a067d2b9d5aa5b59c29,Occlusion-aware 3D Morphable Face Models,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+040ec1bab630b4609cb55c3e0e2dbd4c3064d8c4,Detection of social signals for recognizing engagement in human-robot interaction,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+04b2cddc8e04a02d685d6476f00d0d25d4dd5e72,The Scope and Limits of Simulation in Cognitive Models,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+04b2cddc8e04a02d685d6476f00d0d25d4dd5e72,The Scope and Limits of Simulation in Cognitive Models,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+04661729f0ff6afe4b4d6223f18d0da1d479accf,From Facial Parts Responses to Face Detection: A Deep Learning Approach,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+04661729f0ff6afe4b4d6223f18d0da1d479accf,From Facial Parts Responses to Face Detection: A Deep Learning Approach,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+043e7a08398b1d634fa2bf3ddb81942686effb30,Improved 3D Model Search for Facial Feature Location and Pose Estimation in 2D images,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+0493b82694d8754582bf54802c4dbf64586ab9c4,Symmetry-Factored Statistical Modelling of Craniofacial Shape,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+0471eb1882bb7f538b40a0f76c7073992e7bf213,X-GANs: Image Reconstruction Made Easy for Extreme Cases,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+04964e2697778dc843671c7764f0f912e46991ca,Are They Going to Cross? A Benchmark Dataset and Baseline for Pedestrian Crosswalk Behavior,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+047cd38ebf2ce7eeb885f654ed64d405a0421fab,Face Detection Using Statistical and Multi-Resolution Texture Features,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+04b22b008669fa981602c7723b44cb4a5cb2d480,Facial responsiveness of psychopaths to the emotional expressions of others,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+04c2cda00e5536f4b1508cbd80041e9552880e67,Hipster Wars: Discovering Elements of Fashion Styles,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+04c2cda00e5536f4b1508cbd80041e9552880e67,Hipster Wars: Discovering Elements of Fashion Styles,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,Sequential Face Alignment via Person-Specific Modeling in the Wild,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,Sequential Face Alignment via Person-Specific Modeling in the Wild,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+04ff69aa20da4eeccdabbe127e3641b8e6502ec0,Sequential Face Alignment via Person-Specific Modeling in the Wild,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+04ef0b28534cdac18a2059f73ecfe940d6bed277,Non-Linear Stationary Subspace Analysis -0.09cm with Application to Video Classification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+046a694bbb3669f2ff705c6c706ca3af95db798c,Conditional Convolutional Neural Network for Modality-Aware Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+046a694bbb3669f2ff705c6c706ca3af95db798c,Conditional Convolutional Neural Network for Modality-Aware Face Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0443b7a4372fb7bdcd69a0b55945f937c8b7d35b,Semi-supervised Coupled Dictionary Learning for Person Re-identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+0443b7a4372fb7bdcd69a0b55945f937c8b7d35b,Semi-supervised Coupled Dictionary Learning for Person Re-identification,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+040ab6a70bef710b61e3b6a183c2d81947ac8f88,"4D Cardiff Conversation Database (4D CCDb): a 4D database of natural, dyadic conversations",Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+048ff69503ea4937f10f69b1f29f655594253246,Isolating Sources of Disentanglement in Variational Autoencoders,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+0419726a00e16ea89868792ca94f5b1b262c5597,An analytical formulation of global occlusion reasoning for multi-target tracking,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+046865a5f822346c77e2865668ec014ec3282033,Discovering informative social subgraphs and predicting pairwise relationships from group photos,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+04df36ea27f14f96bb1b33d76103d1dee7c6e0ca,Blur invariant pattern recognition and registration in the Fourier domain,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+04eda7eee3e0282de50e54554f50870dd17defa1,How Hard Can It Be? Estimating the Difficulty of Visual Search in an Image,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+04be04189fe77a3bbd108b8c0ef78d63b0bd5118,EDeN: Ensemble of Deep Networks for Vehicle Classification,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff,Unsupervised Training for 3D Morphable Model Regression,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+047bb1b1bd1f19b6c8d7ee7d0324d5ecd1a3efff,Unsupervised Training for 3D Morphable Model Regression,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+0496d10bcdd29395846d05c2de711db62be10630,Grouplet: A structured image representation for recognizing human and object interactions,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0480b71244b59ed13cfe844c8bac8883a0c40573,Efficient Second Order Multi-Target Tracking with Exclusion Constraints,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0480b71244b59ed13cfe844c8bac8883a0c40573,Efficient Second Order Multi-Target Tracking with Exclusion Constraints,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+6a4ac9ac5ddfeeb8adcff1795eccd39de25a00c4,Composite Feature-Based Face Detection Using Skin Color Modeling and SVM Classification,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+6a4ac9ac5ddfeeb8adcff1795eccd39de25a00c4,Composite Feature-Based Face Detection Using Skin Color Modeling and SVM Classification,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+6ab6c1334c70db6e7705455a2db359e8d83042f9,Rationale for a 3D heterogeneous multi-core processor,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+6a67e6fbbd9bcd3f724fe9e6cecc9d48d1b6ad4d,Cooperative Learning with Visual Attributes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6a2b83c4ae18651f1a3496e48a35b0cd7a2196df,Top Rank Supervised Binary Coding for Visual Search,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+6a2b83c4ae18651f1a3496e48a35b0cd7a2196df,Top Rank Supervised Binary Coding for Visual Search,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+6aaa77e241fe55ae0c4ad281e27886ea778f9e23,F-Formation Detection: Individuating Free-Standing Conversational Groups in Images,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6a5fe819d2b72b6ca6565a0de117c2b3be448b02,Supervised and Projected Sparse Coding for Image Classification,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+6afeb764ee97fbdedfa8f66810dfc22feae3fa1f,Robust Principal Component Analysis with Complex Noise,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+6afeb764ee97fbdedfa8f66810dfc22feae3fa1f,Robust Principal Component Analysis with Complex Noise,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+6aa61d28750629febe257d1cb69379e14c66c67f,Kernel Hebbian Algorithm for Iterative Kernel Principal Component Analysis,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+6ae9f4dc7433ba3433b39ee932b22fd57922c2ee,Using Facially Expressive Robots to Calibrate Clinical Pain Perception,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+6ae9f4dc7433ba3433b39ee932b22fd57922c2ee,Using Facially Expressive Robots to Calibrate Clinical Pain Perception,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+6ae96f68187f1cdb9472104b5431ec66f4b2470f,Improving Task Performance in an Affect-mediated Computing System,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6ae96f68187f1cdb9472104b5431ec66f4b2470f,Improving Task Performance in an Affect-mediated Computing System,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6ae4f3ff909ae6171ad54e8c5d942d1c83706e45,Multi-Label Zero-Shot Learning with Structured Knowledge Graphs,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6ae4f3ff909ae6171ad54e8c5d942d1c83706e45,Multi-Label Zero-Shot Learning with Structured Knowledge Graphs,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6a4419ce2338ea30a570cf45624741b754fa52cb,Statistical transformer networks: learning shape and appearance models via self supervision,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+6af65e2a1eba6bd62843e7bf717b4ccc91bce2b8,A New Weighted Sparse Representation Based on MSLBP and Its Application to Face Recognition,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+6a657995b02bc9dee130701138ea45183c18f4ae,The Timing of Facial Motion in posed and Spontaneous Smiles,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+6a0368b4e132f4aa3bbdeada8d894396f201358a,One-Class Multiple Instance Learning via Robust PCA for Common Object Discovery,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+6a0368b4e132f4aa3bbdeada8d894396f201358a,One-Class Multiple Instance Learning via Robust PCA for Common Object Discovery,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+6ab33fa51467595f18a7a22f1d356323876f8262,Ordinal hyperplanes ranker with cost sensitivities for age estimation,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+6ab33fa51467595f18a7a22f1d356323876f8262,Ordinal hyperplanes ranker with cost sensitivities for age estimation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6ab33fa51467595f18a7a22f1d356323876f8262,Ordinal hyperplanes ranker with cost sensitivities for age estimation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6ab30c67fa966b6bcee61de6294245e2dd8604d8,3D Face Recognition Based on G-H Shape Variation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+6a2ac4f831bd0f67db45e7d3cdaeaaa075e7180a,Excitation Dropout: Encouraging Plasticity in Deep Neural Networks,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+6a8d382d34143143e98b040e006f473bd450502d,Object Relation Detection Based on One-shot Learning,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+6a4ebd91c4d380e21da0efb2dee276897f56467a,HOG active appearance models,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+6a7efb6f3471a2aff702d5e8080e066636335de4,Sparsely Grouped Multi-Task Generative Adversarial Networks for Facial Attribute Manipulation,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+6a7efb6f3471a2aff702d5e8080e066636335de4,Sparsely Grouped Multi-Task Generative Adversarial Networks for Facial Attribute Manipulation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6a7efb6f3471a2aff702d5e8080e066636335de4,Sparsely Grouped Multi-Task Generative Adversarial Networks for Facial Attribute Manipulation,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+6a7efb6f3471a2aff702d5e8080e066636335de4,Sparsely Grouped Multi-Task Generative Adversarial Networks for Facial Attribute Manipulation,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+6a7efb6f3471a2aff702d5e8080e066636335de4,Sparsely Grouped Multi-Task Generative Adversarial Networks for Facial Attribute Manipulation,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+32c801cb7fbeb742edfd94cccfca4934baec71da,Multi-source Multi-scale Counting in Extremely Dense Crowd Images,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+32c801cb7fbeb742edfd94cccfca4934baec71da,Multi-source Multi-scale Counting in Extremely Dense Crowd Images,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+32d8e555441c47fc27249940991f80502cb70bd5,Machine Learning Models that Remember Too Much,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+322eff0dbf5d7dc18688be29ad5fd7eb8c8d6d54,SLTP: A Fast Descriptor for People Detection in Depth Images,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+322eff0dbf5d7dc18688be29ad5fd7eb8c8d6d54,SLTP: A Fast Descriptor for People Detection in Depth Images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3294e27356c3b1063595885a6d731d625b15505a,Illumination Face Spaces Are Idiosyncratic,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+32ac4e5a8dee203c0b99e15484893fd9d62de43a,Hallucinating Faces: Global Linear Modal Based Super-Resolution and Position Based Residue Compensation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+320fa825e86f3b74ba3b3ebaef14e1186784f1ec,Exploit the Unknown Gradually : One-Shot Video-Based Person Re-Identification by Stepwise Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+320fa825e86f3b74ba3b3ebaef14e1186784f1ec,Exploit the Unknown Gradually : One-Shot Video-Based Person Re-Identification by Stepwise Learning,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+32b7b671c786aa74f7d8f9817b12b3a59c0b84c2,A Bayesian Model of Grounded Color Semantics,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+32b7b671c786aa74f7d8f9817b12b3a59c0b84c2,A Bayesian Model of Grounded Color Semantics,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+32cd02519928aa91dca18074778a59b2cba19765,Forecasting crowd dynamics through coarse-grained data analysis,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+32cd02519928aa91dca18074778a59b2cba19765,Forecasting crowd dynamics through coarse-grained data analysis,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+321d1f8d13075275b207dd048e9b655aa8846d57,Thermal Tracking of Sports Players,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+3240c9359061edf7a06bfeb7cc20c103a65904c2,PPR-FCN: Weakly Supervised Visual Relation Detection via Parallel Pairwise R-FCN,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+3210666306517c4ef9a4c1a4463c728b0e3aeb72,Angle Tree: Nearest Neighbor Search in High Dimensions with Low Intrinsic Dimensionality,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+324608bf8fecc064bc491da21291465ab42fa6b6,Matching-CNN meets KNN: Quasi-parametric human parsing,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,Object Specific Deep Learning Feature and Its Application to Face Detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,Object Specific Deep Learning Feature and Its Application to Face Detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,Object Specific Deep Learning Feature and Its Application to Face Detection,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+32ecbbd76fdce249f9109594eee2d52a1cafdfc7,Object Specific Deep Learning Feature and Its Application to Face Detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+32c20afb5c91ed7cdbafb76408c3a62b38dd9160,Viewing Real-World Faces in 3D,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b,Lighting Aware Preprocessing for Face Recognition across Varying Illumination,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+32a40c43a9bc1f1c1ed10be3b9f10609d7e0cb6b,Lighting Aware Preprocessing for Face Recognition across Varying Illumination,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+325723a7fa69f9976feeab5ba9abd3c11e3f7c80,Beyond Textures: Learning from Multi-domain Artistic Images for Arbitrary Style Transfer,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3283477ebd49488e1f3c78e6e828678ea2bb815b,Cauchy Principal Component Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3239f9fb3c11cc29c65664254133beb339f13f40,Low-Resolution Vision for Autonomous Mobile Robots,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+322a7dad274f440a92548faa8f2b2be666b2d01f,Pyramid Scene Parsing Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+322a7dad274f440a92548faa8f2b2be666b2d01f,Pyramid Scene Parsing Network,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+329394480fc5e9e96de4250cc1a2b060c3677c94,Improved Dense Trajectory with Cross Streams,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+329394480fc5e9e96de4250cc1a2b060c3677c94,Improved Dense Trajectory with Cross Streams,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+329394480fc5e9e96de4250cc1a2b060c3677c94,Improved Dense Trajectory with Cross Streams,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+32eeba2ff1ef4259de7802c8ee8cecb6d6c581a3,Isometric Projection,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+32eeba2ff1ef4259de7802c8ee8cecb6d6c581a3,Isometric Projection,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+32c9ebd2685f522821eddfc19c7c91fd6b3caf22,Finding Correspondence from Multiple Images via Sparse and Low-Rank Decomposition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+32c9ebd2685f522821eddfc19c7c91fd6b3caf22,Finding Correspondence from Multiple Images via Sparse and Low-Rank Decomposition,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+328bc4d5495723f9a1037660b5d9c1176713bf24,Unimpaired Attentional Disengagement and Social Orienting in Children With Autism,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+32f37cbc7806c37e8b618d935800bdcd6e7108cc,Rhythmic Gait Signatures from Videowithoutmotion Capture,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+32f37cbc7806c37e8b618d935800bdcd6e7108cc,Rhythmic Gait Signatures from Videowithoutmotion Capture,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+324c91551c3cde44bbcb9d97bc14db7ca6d31850,"Infancy and autism: progress, prospects, and challenges.",University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+3270b2672077cc345f188500902eaf7809799466,Multibiometric Systems: Fusion Strategies and Template Security,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+326b9c8391e89f5bd032aebd1b65e925083c269b,Automatic Pain Intensity Estimation with Heteroscedastic Conditional Ordinal Random Fields,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+326b9c8391e89f5bd032aebd1b65e925083c269b,Automatic Pain Intensity Estimation with Heteroscedastic Conditional Ordinal Random Fields,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+326b9c8391e89f5bd032aebd1b65e925083c269b,Automatic Pain Intensity Estimation with Heteroscedastic Conditional Ordinal Random Fields,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+323f9ae6bdd2a4e4dce4168f7f7e19c70585c9b5,Empirically Analyzing the Effect of Dataset Biases on Deep Face Recognition Systems,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+321c8ba38db118d8b02c0ba209be709e6792a2c7,Learn to Combine Multiple Hypotheses for Accurate Face Alignment,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3213390558a08e35222eec6fb028c8cfaa0c80c2,Non-sparse linear representations for visual tracking with online reservoir metric learning,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+3209e3af49b7b9c253100b7a39fcf8d013fe36a4,Coherence Constraints in Facial Expression Recognition,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+3209e3af49b7b9c253100b7a39fcf8d013fe36a4,Coherence Constraints in Facial Expression Recognition,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+3209e3af49b7b9c253100b7a39fcf8d013fe36a4,Coherence Constraints in Facial Expression Recognition,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+32c7e4f6d7848676922705484a00c94dac803af9,Learning Articulated Object Models from Language and Vision,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+321dc2958e7874a3896e7df96213cd808d3b2b27,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+321dc2958e7874a3896e7df96213cd808d3b2b27,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+321dc2958e7874a3896e7df96213cd808d3b2b27,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+32799cee51933ac4e1999358bad64817985826d7,Player Experience Extraction from Gameplay Video,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+329d58e8fb30f1bf09acb2f556c9c2f3e768b15c,Leveraging Intra and Inter-Dataset Variations for Robust Face Alignment,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+32c45df9e11e6751bcea1b928f398f6c134d22c6,Towards Unified Object Detection and Semantic Segmentation,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+35b9f09ed66955765dc7703e9cada605948c71d0,Similarity Measure Using Local Phase Features and Its Application to Biometric Recognition,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+353b6c1f431feac6edde12b2dde7e6e702455abd,Multi-scale Patch Based Collaborative Representation for Face Recognition with Margin Distribution Optimization,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+353b6c1f431feac6edde12b2dde7e6e702455abd,Multi-scale Patch Based Collaborative Representation for Face Recognition with Margin Distribution Optimization,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+35f2541ef1b5dc2df8283143b1b98c6309ed47dd,View Based Approach to Forensic Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+350da18d8f7455b0e2920bc4ac228764f8fac292,Automatic Detecting Neutral Face for Face Authentication and Facial Expression Analysis,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+350af77e01e78e8e3534f42b80b5dd35a602e73c,Hierarchical Recurrent Neural Encoder for Video Representation with Application to Captioning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+35fc0b28d0d674b28dd625d170bc641a36b17318,CSI: Composite Statistical Inference Techniques for Semantic Segmentation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+35fc0b28d0d674b28dd625d170bc641a36b17318,CSI: Composite Statistical Inference Techniques for Semantic Segmentation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+35fc0b28d0d674b28dd625d170bc641a36b17318,CSI: Composite Statistical Inference Techniques for Semantic Segmentation,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+35498b80ee457e409c0962e03a6e170a917c83af,Look into Person: Self-Supervised Structure-Sensitive Learning and a New Benchmark for Human Parsing,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+35498b80ee457e409c0962e03a6e170a917c83af,Look into Person: Self-Supervised Structure-Sensitive Learning and a New Benchmark for Human Parsing,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+35700f9a635bd3c128ab41718b040a0c28d6361a,DeepGait: A Learning Deep Convolutional Representation for View-Invariant Gait Recognition Using Joint Bayesian,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+35700f9a635bd3c128ab41718b040a0c28d6361a,DeepGait: A Learning Deep Convolutional Representation for View-Invariant Gait Recognition Using Joint Bayesian,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+3548cb9ee54bd4c8b3421f1edd393da9038da293,(Unseen) event recognition via semantic compositionality,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+3548415e23b536b9e41aa3d92c18880f38a1d80c,Superpixel-Based Feature for Aerial Image Scene Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+3548415e23b536b9e41aa3d92c18880f38a1d80c,Superpixel-Based Feature for Aerial Image Scene Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+3548415e23b536b9e41aa3d92c18880f38a1d80c,Superpixel-Based Feature for Aerial Image Scene Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+35f03f5cbcc21a9c36c84e858eeb15c5d6722309,Placing Broadcast News Videos in their Social Media Context Using Hashtags,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+356b431d4f7a2a0a38cf971c84568207dcdbf189,Recognize complex events from static images by fusing deep channels,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+356b431d4f7a2a0a38cf971c84568207dcdbf189,Recognize complex events from static images by fusing deep channels,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+35cdd4df9f039f475247bf03fdcc605e40683dce,Eye Detection and Face Recognition Using Evolutionary Computation,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+351c7e6c2e2fd894626be20a480fa5749e016dc7,LinkNet: Exploiting encoder representations for efficient semantic segmentation,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+351c7e6c2e2fd894626be20a480fa5749e016dc7,LinkNet: Exploiting encoder representations for efficient semantic segmentation,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+35ab5978376ea8113ff476076f18a677b4136d92,RT-GENE: Real-Time Eye Gaze Estimation in Natural Environments,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+35cbf049074382e757bbfc8cc45ccbe467833a7a,A simple neural network module for relational reasoning,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+3573dd5b2982e1406f2ef6a1680149d4f9bd95d1,LearningWord Embeddings for Low-resource Languages by PU Learning,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+3573dd5b2982e1406f2ef6a1680149d4f9bd95d1,LearningWord Embeddings for Low-resource Languages by PU Learning,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62,Exploring Models and Data for Image Question Answering,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+35f6e4a61bb3541348300be3347ab56d0be75744,Tinkering Under The Hood: Interactive Zero-Shot Learning with Pictorial Classifiers,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+35ec869dd0637c933d35ab823202c13b9b5d9aad,Effective Community Search for Large Attributed Graphs,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+35f921def890210dda4b72247849ad7ba7d35250,Exemplar-Based Graph Matching for Robust Facial Landmark Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+35c473bae9d146072625cc3d452c8f6b84c8cc47,ZoomNet: Deep Aggregation Learning for High-Performance Small Pedestrian Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+35c473bae9d146072625cc3d452c8f6b84c8cc47,ZoomNet: Deep Aggregation Learning for High-Performance Small Pedestrian Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+359fda4ff19dbd3634b867fbb3ef3cb6812691c5,Temporal Perception and Prediction in Ego-Centric Video,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+35ec9b8811f2d755c7ad377bdc29741b55b09356,"Efficient, Robust and Accurate Fitting of a 3D Morphable Model",University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+35b9af6057801fb2f28881840c8427c9cf648757,Deep Reinforcement Learning Attention Selection For Person Re-Identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+35472424eb5662d05928017942c32f4537cb5d5c,Robust object recognition via third-party collaborative representation,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+352fbe52e9bcbcf8625a408dfca36b30460c8251,3D Facial Landmark Detection & Face Registration A 3D Facial Landmark Model & 3D Local Shape Descriptors Approach,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3502544f66da8fdeda0daf8f6671a16c52e8e353,Learning to Reconstruct Shapes from Unseen Classes,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3505c9b0a9631539e34663310aefe9b05ac02727,A Joint Discriminative Generative Model for Deformable Model Construction and Classification,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3505c9b0a9631539e34663310aefe9b05ac02727,A Joint Discriminative Generative Model for Deformable Model Construction and Classification,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3506518d616343d3083f4fe257a5ee36b376b9e1,Unsupervised Domain Adaptation for Personalized Facial Emotion Recognition,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+3506518d616343d3083f4fe257a5ee36b376b9e1,Unsupervised Domain Adaptation for Personalized Facial Emotion Recognition,University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26224210,-123.24500520,edu,
+3506518d616343d3083f4fe257a5ee36b376b9e1,Unsupervised Domain Adaptation for Personalized Facial Emotion Recognition,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+356a0c92b61a56699211d5c5d9e4d78c9373e819,Multiple Object Tracking Using Local Motion Patterns,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+35f84e0020c26715691825594e2cf5553467a0e4,Fast Bilateral Solver for Semantic Video Segmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+35f84e0020c26715691825594e2cf5553467a0e4,Fast Bilateral Solver for Semantic Video Segmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3514f66f155c271981a734f1523572edcd8fd10e,A complementary local feature descriptor for face identification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3514f66f155c271981a734f1523572edcd8fd10e,A complementary local feature descriptor for face identification,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+35140ebfa0b6d75fd096aed72d40b16ea6a3828b,Support Discrimination Dictionary Learning for Image Classification,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+35140ebfa0b6d75fd096aed72d40b16ea6a3828b,Support Discrimination Dictionary Learning for Image Classification,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+35140ebfa0b6d75fd096aed72d40b16ea6a3828b,Support Discrimination Dictionary Learning for Image Classification,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+69ad645516fa5cd4cd45fc217edcbf83bc0f65be,Multi-camera People Tracking with Hierarchical Likelihood Grids,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+69556424ec4daaa2b932790dba7bc8b826abc574,"Variational Discriminator Bottleneck: Improving Imitation Learning, Inverse RL, and GANs by Constraining Information Flow","University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+6964af90cf8ac336a2a55800d9c510eccc7ba8e1,Temporal Relational Reasoning in Videos,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+692bc33f7466278900dd73f7f40c563f72cb6754,Fourth-person Captioning: Describing Daily Events by Uni-supervised and Tri-regularized Training,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+692bc33f7466278900dd73f7f40c563f72cb6754,Fourth-person Captioning: Describing Daily Events by Uni-supervised and Tri-regularized Training,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+692bc33f7466278900dd73f7f40c563f72cb6754,Fourth-person Captioning: Describing Daily Events by Uni-supervised and Tri-regularized Training,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+694d831156293642e63103cd1921eed37e77a68f,Detection of Multiple Pedestrians using Motion Information and Adaboost Algorithm,Yeungnam University,Yeungnam University,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국",35.83654030,128.75343090,edu,
+69291d44eb4fdf848a06defe99a74cb75026c70b,Automatic Detection of a Driver's Complex Mental States,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+69291d44eb4fdf848a06defe99a74cb75026c70b,Automatic Detection of a Driver's Complex Mental States,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+690f5d35489c63ec7309b9e4d77c929815065257,Complementary effects of gaze direction and early saliency in guiding fixations during free viewing.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+690f5d35489c63ec7309b9e4d77c929815065257,Complementary effects of gaze direction and early saliency in guiding fixations during free viewing.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+690f5d35489c63ec7309b9e4d77c929815065257,Complementary effects of gaze direction and early saliency in guiding fixations during free viewing.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+690f5d35489c63ec7309b9e4d77c929815065257,Complementary effects of gaze direction and early saliency in guiding fixations during free viewing.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+69adbfa7b0b886caac15ebe53b89adce390598a3,Face hallucination using cascaded super-resolution and identity priors,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+69adbfa7b0b886caac15ebe53b89adce390598a3,Face hallucination using cascaded super-resolution and identity priors,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+693e0da15094071de5eebd2f36f8b4023f91f161,Can facial metrology predict gender?,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+69d29012d17cdf0a2e59546ccbbe46fa49afcd68,Subspace clustering of dimensionality-reduced data,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+69b5dd48d0f6f95f4dba5ad8b35b51de446b632f,MGGAN: Solving Mode Collapse using Manifold Guided Training,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+697e6552326bc04a80b510f91e3a83c23159fa4b,Are You Talking to Me? Detecting Attention in First-Person Interactions,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+69a68f9cf874c69e2232f47808016c2736b90c35,Learning Deep Representation for Imbalanced Classification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+69a68f9cf874c69e2232f47808016c2736b90c35,Learning Deep Representation for Imbalanced Classification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+69a68f9cf874c69e2232f47808016c2736b90c35,Learning Deep Representation for Imbalanced Classification,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+696bfa059fcc459c30af21c84d116ad77fb11197,Isoradius Contours: New Representations and Techniques for 3D Face Registration and Matching,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+6926676e0b710717e373926e1302bfb441c5c503,Fisher Non-negative Matrix Factorization with Pairwise Weighting,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+69b18d62330711bfd7f01a45f97aaec71e9ea6a5,M-Track: A New Software for Automated Detection of Grooming Trajectories in Mice,SUNY Polytechnic Institute,State University of New York Polytechnic Institute,"State University of New York Polytechnic Institute, 100, Seymour Road, Maynard, Town of Marcy, Oneida County, New York, 13502, USA",43.13800205,-75.22943591,edu,
+6965de4410921cff014a48b071f2c4c52c1da0fd,Human object estimation via backscattered radio frequency signal,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6965de4410921cff014a48b071f2c4c52c1da0fd,Human object estimation via backscattered radio frequency signal,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+69a7c8bca699ee4100fbe6a83b72459c132a6f10,Resource Aware Person Re-identification across Multiple Resolutions,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6993bca2b3471f26f2c8a47adfe444bfc7852484,The Do’s and Don’ts for CNN-Based Face Verification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+69b647afe6526256a93033eac14ce470204e7bae,Training Deep Neural Networks via Direct Loss Minimization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+69b647afe6526256a93033eac14ce470204e7bae,Training Deep Neural Networks via Direct Loss Minimization,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+69eb6c91788e7c359ddd3500d01fb73433ce2e65,CAMGRAPH: Distributed Graph Processing for Camera Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+691964c43bfd282f6f4d00b8b0310c554b613e3b,Temporal Hallucinating for Action Recognition with Few Still Images,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+69abd57a49c6b430a83d9a1e09dce5a347c9c63e,Face Recognition from Multiple Images per Subject,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+69abd57a49c6b430a83d9a1e09dce5a347c9c63e,Face Recognition from Multiple Images per Subject,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+69c2b7565e080740e2bdb664e6b00fd760609889,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+69c2b7565e080740e2bdb664e6b00fd760609889,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+69c2b7565e080740e2bdb664e6b00fd760609889,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+69c2ac04693d53251500557316c854a625af84ee,"50 years of biometric research: Accomplishments, challenges, and opportunities",Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+696d114f57aa6798a5d16aaf847a78942ab9949f,Efficient Similarity Derived from Kernel-Based Transition Probability,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+69870df2c7a6d2e2bfef201968aecd24eb18794d,We are Humor Beings: Understanding and Predicting Visual Humor,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+69522bd70f1c64e9073753ccf335382be5aa1cd9,Geometric Feature Based Age Classification Using Facial Images,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+69522bd70f1c64e9073753ccf335382be5aa1cd9,Geometric Feature Based Age Classification Using Facial Images,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+69fb98e11df56b5d7ec7d45442af274889e4be52,Harnessing the Deep Net Object Models for Enhancing Human Action Recognition,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+69fb98e11df56b5d7ec7d45442af274889e4be52,Harnessing the Deep Net Object Models for Enhancing Human Action Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+69e054acbf09a4bebac1c4b14c3f6a1ac6d199b0,Can feature-based inductive transfer learning help person re-identification?,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+69e054acbf09a4bebac1c4b14c3f6a1ac6d199b0,Can feature-based inductive transfer learning help person re-identification?,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+3c78b642289d6a15b0fb8a7010a1fb829beceee2,Analysis of Facial Dynamics Using a Tensor Framework,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+3c78b642289d6a15b0fb8a7010a1fb829beceee2,Analysis of Facial Dynamics Using a Tensor Framework,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+3c1e1961db0f0a351d5a4e21cd30bcbd9f88be57,Discovering states and transformations in image collections,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+3cb488a3b71f221a8616716a1fc2b951dd0de549,Facial Age Estimation by Adaptive Label Distribution Learning,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+3cf1f89d73ca4b25399c237ed3e664a55cd273a2,Face Sketch Matching via Coupled Deep Transform Learning,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+3c84e2ed018dd1d971b526f87e9d7c1f08e6230f,Accelerating Dynamic Time Warping Clustering with a Novel Admissible Pruning Strategy,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+3c84e2ed018dd1d971b526f87e9d7c1f08e6230f,Accelerating Dynamic Time Warping Clustering with a Novel Admissible Pruning Strategy,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+3c563542db664321aa77a9567c1601f425500f94,TV-GAN: Generative Adversarial Network Based Thermal to Visible Face Recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+3c8cf97f00cd8b4303eccc4134fa79b15cc3d564,Data-driven image captioning via salient region discovery,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+3c8cf97f00cd8b4303eccc4134fa79b15cc3d564,Data-driven image captioning via salient region discovery,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+3ce4a61ada2720713535d7262e8229b33c5df79f,Life-Long Disentangled Representation Learning with Cross-Domain Latent Homologies,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+3cb52304ec2aa2fd4437ce0e170a0b16409c0cdb,3D Segmentation in CT Imagery with Conditional Random Fields and Histograms of Oriented Gradients,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,Robust facial landmark detection in the wild,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+3c6cac7ecf546556d7c6050f7b693a99cc8a57b3,Robust facial landmark detection in the wild,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0,Defeating Image Obfuscation with Deep Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3,Distance Metric Learning with Eigenvalue Optimization,University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.73693020,-3.53647672,edu,
+3cd9b0a61bdfa1bb8a0a1bf0369515a76ecd06e3,Distance Metric Learning with Eigenvalue Optimization,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+3cea3aba77649d718991d0cb30135887267c11e8,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3cea3aba77649d718991d0cb30135887267c11e8,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3cea3aba77649d718991d0cb30135887267c11e8,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3cea3aba77649d718991d0cb30135887267c11e8,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3cfc8c00d390abe5f94ba7a1251e085a794b35bb,A Convex Optimization Framework for Active Learning,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+3cfc8c00d390abe5f94ba7a1251e085a794b35bb,A Convex Optimization Framework for Active Learning,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+3cfc8c00d390abe5f94ba7a1251e085a794b35bb,A Convex Optimization Framework for Active Learning,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+3c97c32ff575989ef2869f86d89c63005fc11ba9,Face Detection with the Faster R-CNN,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+3c97c32ff575989ef2869f86d89c63005fc11ba9,Face Detection with the Faster R-CNN,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+3cd9411181cd4f12798c64f0442c199cc24a56a7,Leveraging from group classification for video concept detection,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+3cd40bfa1ff193a96bde0207e5140a399476466c,High Five: Recognising human interactions in TV shows,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3c051c8721b65fca8c506de68068dc8fca6adcc5,It Takes (Only) Two: Adversarial Generator-Encoder Networks,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3c8f916264e8d15ba1bc618c6adf395e86dd7b40,Generating Descriptions with Grounded and Co-referenced People,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3c1aef7c2d32a219bdbc89a44d158bc2695e360a,Adversarial Attack Type I: Generating False Positives,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3c8db2ca155ce4e15ec8a2c4c4b979de654fb296,Holistically-Nested Edge Detection,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+3c8db2ca155ce4e15ec8a2c4c4b979de654fb296,Holistically-Nested Edge Detection,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+3ce92cac0f3694be2f2918bf122679c6664a1e16,Deep Relative Attributes,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+3ce92cac0f3694be2f2918bf122679c6664a1e16,Deep Relative Attributes,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd,Simulating Pareidolia of Faces for Architectural Image Analysis,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+3c11a1f2bd4b9ce70f699fb6ad6398171a8ad3bd,Simulating Pareidolia of Faces for Architectural Image Analysis,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+3cd8ab6bb4b038454861a36d5396f4787a21cc68,Video-Based Facial Expression Recognition Using Hough Forest,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+3ca5d3b8f5f071148cb50f22955fd8c1c1992719,Evaluating race and sex diversity in the world's largest companies using deep neural networks,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3c9f2444b1de1bf960664d8c3109f8b8d5dee44b,Automatic Facial Feature Extraction for Face Recognition,Università degli Studi di Milano,Università degli Studi di Milano,"Università degli Studi di Milano, Via Camillo Golgi, Città Studi, Milano, MI, LOM, 20133, Italia",45.47567215,9.23336232,edu,
+3cafea5212ff4217beb293e2de8ca0f160ad623a,A Unified Feature Disentangler for Multi-Domain Image Translation and Manipulation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+3cafea5212ff4217beb293e2de8ca0f160ad623a,A Unified Feature Disentangler for Multi-Domain Image Translation and Manipulation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+3cafea5212ff4217beb293e2de8ca0f160ad623a,A Unified Feature Disentangler for Multi-Domain Image Translation and Manipulation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+3c6be0034477b07222f41f6fc558a64f0222a192,Egocentric Video Biometrics,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+3cc46bf79fb9225cf308815c7d41c8dd5625cc29,Age interval and gender prediction using PARAFAC2 applied to speech utterances,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+3cc46bf79fb9225cf308815c7d41c8dd5625cc29,Age interval and gender prediction using PARAFAC2 applied to speech utterances,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+3c2bdfc703a77ecd0a991b03e620e8a911d5f8f4,Combining Facial Appearance and Dynamics for Face Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3c90f2603ef99222697b76d7ab123f513a1f4baa,The Effects of Alcohol Intoxication on Accuracy and the Confidence–Accuracy Relationship in Photographic Simultaneous Line‐ups,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+3c90f2603ef99222697b76d7ab123f513a1f4baa,The Effects of Alcohol Intoxication on Accuracy and the Confidence–Accuracy Relationship in Photographic Simultaneous Line‐ups,Edge Hill University,Edge Hill University,"Edge Hill University, St Helens Road, West Lancashire, Lancs, North West England, England, L39 4QP, UK",53.55821550,-2.86904651,edu,
+56c700693b63e3da3b985777da6d9256e2e0dc21,Global refinement of random forest,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+56c700693b63e3da3b985777da6d9256e2e0dc21,Global refinement of random forest,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,Chinese Academy of Science,"Key Lab of Intelligent Information Processing, Institute of Computer Technology, Chinese Academy of Science (CAS), Beijing, 100190, China","Beijing, China",39.90419990,116.40739630,edu,
+56359d2b4508cc267d185c1d6d310a1c4c2cc8c2,Shape driven kernel adaptation in Convolutional Neural Network for robust facial trait recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+56d831143008ec10f8122e5086f5a55aec770ea1,A DIKW Paradigm to Cognitive Engineering,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+56e079f4eb40744728fd1d7665938b06426338e5,Bayesian Approaches to Distribution Regression,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+56e079f4eb40744728fd1d7665938b06426338e5,Bayesian Approaches to Distribution Regression,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+56e079f4eb40744728fd1d7665938b06426338e5,Bayesian Approaches to Distribution Regression,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+56e079f4eb40744728fd1d7665938b06426338e5,Bayesian Approaches to Distribution Regression,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5684d284310582ae0f69c5b7a4d6b791a13fcf49,Learning to Track at 100 FPS with Deep Regression Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+56e6f472090030a6f172a3e2f46ef9daf6cad757,Asian Face Image Database PF 01 Intelligent Multimedia Lab,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+56d3df5ce2ffb695728c091252087979be31f0c7,RMPE: Regional Multi-person Pose Estimation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+56f86bef26209c85f2ef66ec23b6803d12ca6cd6,Pyramidal RoR for image classification,North China Electric Power University,North China Electric Power University,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.87604460,115.49738730,edu,
+567078a51ea63b70396dca5dabb50a10a736d991,Conditional Generative Adversarial Network for Structured Domain Adaptation,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+567078a51ea63b70396dca5dabb50a10a736d991,Conditional Generative Adversarial Network for Structured Domain Adaptation,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+567078a51ea63b70396dca5dabb50a10a736d991,Conditional Generative Adversarial Network for Structured Domain Adaptation,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+566a39d753c494f57b4464d6bde61bf3593f7ceb,A Critical Review of Action Recognition Benchmarks,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,Comparison of Recent Machine Learning Techniques for Gender Recognition from Facial Images,Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673040,edu,
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,Comparison of Recent Machine Learning Techniques for Gender Recognition from Facial Images,Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673040,edu,
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,Comparison of Recent Machine Learning Techniques for Gender Recognition from Facial Images,Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673040,edu,
+56c2fb2438f32529aec604e6fc3b06a595ddbfcc,Comparison of Recent Machine Learning Techniques for Gender Recognition from Facial Images,Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673040,edu,
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+56f231fc40424ed9a7c93cbc9f5a99d022e1d242,Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling,Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.56803206,edu,
+56143653c9bb0f01fb8a58da02b7ef7241170eec,Best of Both Worlds: Transferring Knowledge from Discriminative Learning to a Generative Visual Dialog Model,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+56d5c8bee7d28d2fc6a2b1d00d80285f84618797,Multi-glimpse LSTM with color-depth feature fusion for human detection,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+56d5c8bee7d28d2fc6a2b1d00d80285f84618797,Multi-glimpse LSTM with color-depth feature fusion for human detection,HoHai University,HoHai University,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国",32.05765485,118.75500040,edu,
+563143c5f4fed0184c1f3e661917da94cfed1d46,Informed Democracy: Voting-based Novelty Detection for Action Recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+561ae67de137e75e9642ab3512d3749b34484310,DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+561ae67de137e75e9642ab3512d3749b34484310,DeepGestalt - Identifying Rare Genetic Syndromes Using Deep Learning,Rheinische-Friedrich-Wilhelms University,Rheinische-Friedrich-Wilhelms University,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+56c701467da819088c3f734f3ba36a793d645992,Title Underconnectivity of the Superior Temporal Sulcus Predicts Emotion Recognition Deficits in Autism Social Cognitive and Affective Neuroscience Advance Access Published Number of Words,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+56f942a738022cb9af243f3336ba1f035783f73c,A general framework for efficient clustering of large datasets based on activity detection,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+568cff415e7e1bebd4769c4a628b90db293c1717,Concepts Not Alone: Exploring Pairwise Relationships for Zero-Shot Video Activity Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+568cff415e7e1bebd4769c4a628b90db293c1717,Concepts Not Alone: Exploring Pairwise Relationships for Zero-Shot Video Activity Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+568cff415e7e1bebd4769c4a628b90db293c1717,Concepts Not Alone: Exploring Pairwise Relationships for Zero-Shot Video Activity Recognition,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+562989741c0627b2f966d3abd5f87047503d0fb8,From Same Photo: Cheating on Visual Kinship Challenges,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+562989741c0627b2f966d3abd5f87047503d0fb8,From Same Photo: Cheating on Visual Kinship Challenges,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+562989741c0627b2f966d3abd5f87047503d0fb8,From Same Photo: Cheating on Visual Kinship Challenges,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+560b46547720b3a892f90a337835875f74f4f4ec,Discriminating Color Faces for Recognition,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+561ed7e47524fb3218e6a38f41cd877a9c33d3b9,StyleNet: Generating Attractive Visual Captions with Styles,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+561ed7e47524fb3218e6a38f41cd877a9c33d3b9,StyleNet: Generating Attractive Visual Captions with Styles,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+569dbb5c8a84d3b378cb2e38bb86ad7d826c8d10,Joint Detection and Recounting of Abnormal Events by Learning Deep Generic Knowledge,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+560e0e58d0059259ddf86fcec1fa7975dee6a868,Face recognition in unconstrained videos with matched background similarity,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+560e0e58d0059259ddf86fcec1fa7975dee6a868,Face recognition in unconstrained videos with matched background similarity,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+563523dc73375693314c20e1fe2a65e34915cd8f,Human Pose Estimation from Monocular Images: A Comprehensive Survey,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+56a677c889e0e2c9f68ab8ca42a7e63acf986229,Mining Spatial and Spatio-Temporal ROIs for Action Recognition,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+56ffece2817a0363f551210733a611830ba1155d,Aligning where to see and what to tell: image caption with region-based attention and scene factorization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+56e25056153a15eae2a6b10c109f812d2b753cee,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+56e25056153a15eae2a6b10c109f812d2b753cee,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+56fcb57a328caf184c1634d934271b18b86b53e8,Multimodal Named Entity Recognition for Short Social Media Posts,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+56d162799d5e004723341492f776399693d76433,Learning Hypergraph-regularized Attribute Predictors,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+56d162799d5e004723341492f776399693d76433,Learning Hypergraph-regularized Attribute Predictors,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+56ae6d94fc6097ec4ca861f0daa87941d1c10b70,Distance Estimation of an Unknown Person from a Portrait,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+56f812661c3248ed28859d3b2b39e033b04ae6ae,Multiple feature fusion by subspace learning,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+56f812661c3248ed28859d3b2b39e033b04ae6ae,Multiple feature fusion by subspace learning,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+56f812661c3248ed28859d3b2b39e033b04ae6ae,Multiple feature fusion by subspace learning,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+56f812661c3248ed28859d3b2b39e033b04ae6ae,Multiple feature fusion by subspace learning,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+56bb321e0e180f72be9c4e9eb791b251073750e2,Labeling and modeling large databases of videos,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+5136f69da8a61447a300a50c67d80d84a31b1257,Deep Association Learning for Unsupervised Video Person Re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.03332810,135.72491540,edu,
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,Kogakuin University,Kogakuin University,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.69027840,139.69540096,edu,
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+51e2f8f402c3d972368483169503221fd3088383,End-to-end Recovery of Human Shape and Pose,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+516d0d9eb08825809e4618ca73a0697137ebabd5,Regularizing Long Short Term Memory with 3D Human-Skeleton Sequences for Action Recognition,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+519a724426b5d9ad384d38aaf2a4632d3824f243,Learning Models for Object Recognition from Natural Language Descriptions,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+5180df9d5eb26283fb737f491623395304d57497,Scalable Angular Discriminative Deep Metric Learning for Face Recognition,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+51cb2116c5a32d076f54b1a192cf4e850390f665,On Machine Learning and Structure for Mobile Robots,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+518edcd112991a1717856841c1a03dd94a250090,Rice University Endogenous Sparse Recovery by Eva L . Dyer,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+518edcd112991a1717856841c1a03dd94a250090,Rice University Endogenous Sparse Recovery by Eva L . Dyer,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+51c4ecf4539f56c4b1035b890f743b3a91dd758b,Situational object boundary detection,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+51c4ecf4539f56c4b1035b890f743b3a91dd758b,Situational object boundary detection,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+51e9630e2d3f353d43834d06ef5b75fbccf0243a,Random Forests of Local Experts for Pedestrian Detection,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+5191781bbfe562cfee0c57675a9fbe79a85473b9,Face Flow,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+51c02f135d6c960b1141bde539059a279f9beb78,Subspace clustering using a symmetric low-rank representation,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+510d2879c03a2a0fa01ac6d6b95eb1067f2d1bf9,Multimodal Hierarchical Reinforcement Learning Policy for Task-Oriented Visual Dialog,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+510d2879c03a2a0fa01ac6d6b95eb1067f2d1bf9,Multimodal Hierarchical Reinforcement Learning Policy for Task-Oriented Visual Dialog,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+510d2879c03a2a0fa01ac6d6b95eb1067f2d1bf9,Multimodal Hierarchical Reinforcement Learning Policy for Task-Oriented Visual Dialog,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+51683eac8bbcd2944f811d9074a74d09d395c7f3,"Automatic Analysis of Facial Actions: Learning from Transductive, Supervised and Unsupervised Frameworks",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+51683eac8bbcd2944f811d9074a74d09d395c7f3,"Automatic Analysis of Facial Actions: Learning from Transductive, Supervised and Unsupervised Frameworks",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+51683eac8bbcd2944f811d9074a74d09d395c7f3,"Automatic Analysis of Facial Actions: Learning from Transductive, Supervised and Unsupervised Frameworks",Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+51faacfa4fb1e6aa252c6970e85ff35c5719f4ff,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+51e8e8c4cac8260ef21c25f9f2a0a68aedbc6d58,Deep Generative Adversarial Compression Artifact Removal,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+512ef8e228329e02b651e2963260f569a72b4dde,3D Signatures for Fast 3D Face Recognition,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+513b8dc73a9fbc467e1ac130fe8c842b5839ca51,Dissertation Scalable Visual Navigation for Micro Aerial Vehicles using Geometric Prior Knowledge,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3,Beyond short snippets: Deep networks for video classification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3,Beyond short snippets: Deep networks for video classification,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+51cb09ee04831b95ae02e1bee9b451f8ac4526e3,Beyond short snippets: Deep networks for video classification,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+514a74aefb0b6a71933013155bcde7308cad2b46,Carnegie Mellon University Optimal Classifier Ensembles for Improved Biometric Verification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+514a74aefb0b6a71933013155bcde7308cad2b46,Carnegie Mellon University Optimal Classifier Ensembles for Improved Biometric Verification,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+512f7507034e35d7259845bc5e4e174ef2f652cf,SPIGAN: Privileged Adversarial Learning from Simulation,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+512f7507034e35d7259845bc5e4e174ef2f652cf,SPIGAN: Privileged Adversarial Learning from Simulation,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+512f7507034e35d7259845bc5e4e174ef2f652cf,SPIGAN: Privileged Adversarial Learning from Simulation,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+51273a7abfe2018ccf2789a8e25d0c2ae565bc77,Learning Detailed Face Reconstruction from a Single Image,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+51a8dabe4dae157aeffa5e1790702d31368b9161,Face recognition under generic illumination based on harmonic relighting,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+516668a41d6106232a7cd56d20d3b3da343e5f36,Predicting Deeper into the Future of Semantic Segmentation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+512b4c8f0f3fb23445c0c2dab768bcd848fa8392,Analysis and Synthesis of Facial Expressions by Feature- Points Tracking and Deformable Model,University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.06125530,46.32984840,edu,
+51eba481dac6b229a7490f650dff7b17ce05df73,Situation Recognition: Visual Semantic Role Labeling for Image Understanding,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+5173a20304ea7baa6bfe97944a5c7a69ea72530f,Best Basis Selection Method Using Learning Weights for Face Recognition,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+51ed4c92cab9336a2ac41fa8e0293c2f5f9bf3b6,"A Survey of Face Detection, Extraction and Recognition",Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+51236676c3bba877d82c31b393db1af4846527ac,Improving Sampling from Generative Autoencoders with Markov Chains,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5147c249aadf9dd20d24a025995e79f5d6e4e5f4,Systems Analysis of the WEKA Machine Learning Workbench for Affective Computing,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+5141cf2e59fb2ec9bb489b9c1832447d3cd93110,Learning Person Trajectory Representations for Team Activity Analysis,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+5185f2a40836a754baaa7419a1abdd1e7ffaf2ad,A Multimodality Framework for Creating Speaker/Non-Speaker Profile Databases for Real-World Video,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,A Community Detection Approach to Cleaning Extremely Large Face Database,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+518f3cb2c9f2481cdce7741c5a821c26378b75e9,The Unreasonable Effectiveness of Noisy Data for Fine-Grained Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+51d048b92f6680aca4a8adf07deb380c0916c808,"State of the Art on Monocular 3D Face Reconstruction, Tracking, and Applications",Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+51d048b92f6680aca4a8adf07deb380c0916c808,"State of the Art on Monocular 3D Face Reconstruction, Tracking, and Applications",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+517e5e6d8e17511fd74fc58ef53bdd57bb7b4651,TallyQA: Answering Complex Counting Questions,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+51972609a7c0070bf517c29f108f3e7240b94e59,3D Extended Histogram of Oriented Gradients (3DHOG) for Classification of Road Users in Urban Scenes,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+5134353bd01c4ea36bd007c460e8972b1541d0ad,Face Recognition with Multi-Resolution Spectral Feature Images,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+5134353bd01c4ea36bd007c460e8972b1541d0ad,Face Recognition with Multi-Resolution Spectral Feature Images,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+5134353bd01c4ea36bd007c460e8972b1541d0ad,Face Recognition with Multi-Resolution Spectral Feature Images,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+5160569ca88171d5fa257582d161e9063c8f898d,Local binary patterns as an image preprocessing for face authentication,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+3dc0a3803c6e1c3c32192a5378100faa2a57ee3e,FlipDial: A Generative Model for Two-Way Visual Dialogue,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3dc0a3803c6e1c3c32192a5378100faa2a57ee3e,FlipDial: A Generative Model for Two-Way Visual Dialogue,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3dc0a3803c6e1c3c32192a5378100faa2a57ee3e,FlipDial: A Generative Model for Two-Way Visual Dialogue,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3dc0a3803c6e1c3c32192a5378100faa2a57ee3e,FlipDial: A Generative Model for Two-Way Visual Dialogue,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,Face Alignment in Full Pose Range: A 3D Total Solution,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3d18ce183b5a5b4dcaa1216e30b774ef49eaa46f,Face Alignment in Full Pose Range: A 3D Total Solution,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3ddfa1e5e57c8f439796d092b3059075600198b1,Linear Representation Learning Using Sphere Factor Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+3ddfa1e5e57c8f439796d092b3059075600198b1,Linear Representation Learning Using Sphere Factor Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+3d143cfab13ecd9c485f19d988242e7240660c86,Discriminative Collaborative Representation for Classification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+3d143cfab13ecd9c485f19d988242e7240660c86,Discriminative Collaborative Representation for Classification,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+3d88b669e7a412f765f1dfa54724937b8f563611,Reasoning About Fine-Grained Attribute Phrases Using Reference Games,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+3d3fdeb8792859543d791e34af4005a80f348eed,Children's racial bias in perceptions of others' pain.,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+3d3fdeb8792859543d791e34af4005a80f348eed,Children's racial bias in perceptions of others' pain.,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+3d69d634f79dfcc717e18f73c886b854a157a3ef,Mix-and-Match Tuning for Self-Supervised Semantic Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3dabf7d853769cfc4986aec443cc8b6699136ed0,Data Mining Spontaneous Facial Behavior with Automatic Expression Coding,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+3dabf7d853769cfc4986aec443cc8b6699136ed0,Data Mining Spontaneous Facial Behavior with Automatic Expression Coding,Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.89271590,29.37863323,edu,
+3d0a787aac818909a01e039dd1878fbee52e8765,Viewpoint-aware Attentive Multiview Inference for Vehicle Re-identification,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+3d5575e9ba02128d94c20330f4525fc816411ec2,Learning Video Object Segmentation from Static Images,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3d741315108b95cdb56d312648f5ad1c002c9718,Image-based face recognition under illumination and pose variations.,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3dc6f72bda1707e6a96174ff943991bb2b7ff319,Visalogy: Answering Visual Analogy Questions,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+3dc6f72bda1707e6a96174ff943991bb2b7ff319,Visalogy: Answering Visual Analogy Questions,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3dc6f72bda1707e6a96174ff943991bb2b7ff319,Visalogy: Answering Visual Analogy Questions,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+3d22f972448a2336677ae6ff2877fae010c7dfa2,What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+3d22f972448a2336677ae6ff2877fae010c7dfa2,What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+3d275a4e4f44d452f21e0e0ff6145a5e18e6cf87,CIDEr: Consensus-based image description evaluation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f,Bimodal Human Emotion Classification in the Speaker-Dependent Scenario,University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.00920040,71.48774947,edu,
+3d1a6a5fd5915e0efb953ede5af0b23debd1fc7f,Bimodal Human Emotion Classification in the Speaker-Dependent Scenario,University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.00920040,71.48774947,edu,
+3d58204f9f89b66db916278dc2d269e1f79ffc43,Learning and recognizing faces: from still images to video sequences,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+3d0379688518cc0e8f896e30815d0b5e8452d4cd,Autotagging Facebook: Social network context improves photo annotation,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+3d0379688518cc0e8f896e30815d0b5e8452d4cd,Autotagging Facebook: Social network context improves photo annotation,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+3d6b71b359d5db96a69ca322a5336110d89fb10d,Partially-Supervised Image Captioning,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+3d24b386d003bee176a942c26336dbe8f427aadd,Sequential Person Recognition in Photo Albums with a Recurrent Network,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+3d0f9a3031bee4b89fab703ff1f1d6170493dc01,SVDD-Based Illumination Compensation for Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3d0f9a3031bee4b89fab703ff1f1d6170493dc01,SVDD-Based Illumination Compensation for Face Recognition,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+3d0c21d4780489bd624a74b07e28c16175df6355,Deep or Shallow Facial Descriptors? A Case for Facial Attribute Classification and Face Retrieval,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+3d0c21d4780489bd624a74b07e28c16175df6355,Deep or Shallow Facial Descriptors? A Case for Facial Attribute Classification and Face Retrieval,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+3d25eb8241345f86101fda145d95d89c27844fd1,Distributed Submodular Maximization: Identifying Representative Elements in Massive Data,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+3d204dbc13f59f1a1678c773b30a1d85e305f548,Modern Facial Attractiveness: Investigating Gendered Preferences for Dominance and Personality,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+3d0660e18c17db305b9764bb86b21a429241309e,Counting Everyday Objects in Everyday Scenes,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+3df8cc0384814c3fb05c44e494ced947a7d43f36,The Pose Knows: Video Forecasting by Generating Pose Futures,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+3dbfd2fdbd28e4518e2ae05de8374057307e97b3,Improving Face Detection,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+3d4e6fb9c238c490f57aed72bcf9a81ea5f28972,A Discriminative Model for Learning Semantic and Geometric Interactions in Indoor Scenes∗,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+3d68cedd80babfbb04ab197a0b69054e3c196cd9,Bimodal information analysis for emotion recognition,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+3dfb822e16328e0f98a47209d7ecd242e4211f82,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+3dd5a70191613e0867d32f368fad6ec25c63cfb4,Tensor-Based Cortical Surface Morphometry via Weighted Spherical Harmonic Representation,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+3d00fad9ebc9c4cd13bef710de91f4c9d1870887,LETHA: Learning from High Quality Inputs for 3D Pose Estimation in Low Quality Images,"CSIC-UPC, Barcelona, Spain","Institut de Robòtica i Informàtica Industrial, CSIC-UPC, Barcelona, Spain","C/ Llorens i Artigas 4-6, 08028 Barcelona, Spain",41.38295500,2.11573820,edu,
+3d00fad9ebc9c4cd13bef710de91f4c9d1870887,LETHA: Learning from High Quality Inputs for 3D Pose Estimation in Low Quality Images,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+3d00fad9ebc9c4cd13bef710de91f4c9d1870887,LETHA: Learning from High Quality Inputs for 3D Pose Estimation in Low Quality Images,École Polytechnique Fédérale de Lausanne,"École Polytechnique Fédérale de Lausanne (EPFL), Switzerland","Bibliothèque de l'EPFL, Route des Noyerettes, Ecublens, District de l'Ouest lausannois, Vaud, 1024, Schweiz/Suisse/Svizzera/Svizra",46.51841210,6.56846540,edu,
+3d33f16ffb3f56e63b8b5c51147b1a07840d734a,Developing Cognitions about Race: White 5- to 10-Year-Olds’ Perceptions of Hardship and Pain Running head: DEVELOPING COGNITIONS ABOUT RACE,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+3d33f16ffb3f56e63b8b5c51147b1a07840d734a,Developing Cognitions about Race: White 5- to 10-Year-Olds’ Perceptions of Hardship and Pain Running head: DEVELOPING COGNITIONS ABOUT RACE,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,A Smart Phone Image Database for Single Image Recapture Detection,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,A Smart Phone Image Database for Single Image Recapture Detection,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3d948e4813a6856e5b8b54c20e50cc5050e66abe,A Smart Phone Image Database for Single Image Recapture Detection,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+3dafecf541e7aba8b6431f6deb50d37e7ea8a8ff,Random Exemplar Hashing,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+3d855f0665a912ff2c7736cecf9b8eae3effc281,Evaluation of Deep Learning based Pose Estimation for Sign Language,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+3d855f0665a912ff2c7736cecf9b8eae3effc281,Evaluation of Deep Learning based Pose Estimation for Sign Language,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+3d855f0665a912ff2c7736cecf9b8eae3effc281,Evaluation of Deep Learning based Pose Estimation for Sign Language,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+3d9db1cacf9c3bb7af57b8112787b59f45927355,Improving Medical Students’ Awareness of Their Non-Verbal Communication through Automated Non-Verbal Behavior Feedback,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+3d9db1cacf9c3bb7af57b8112787b59f45927355,Improving Medical Students’ Awareness of Their Non-Verbal Communication through Automated Non-Verbal Behavior Feedback,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+3d9db1cacf9c3bb7af57b8112787b59f45927355,Improving Medical Students’ Awareness of Their Non-Verbal Communication through Automated Non-Verbal Behavior Feedback,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+3d9db1cacf9c3bb7af57b8112787b59f45927355,Improving Medical Students’ Awareness of Their Non-Verbal Communication through Automated Non-Verbal Behavior Feedback,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+3d5a4b31e6e25cd0cfefa0b5925674377cdaea7d,Training VAEs Under Structured Residuals,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+3d5a4b31e6e25cd0cfefa0b5925674377cdaea7d,Training VAEs Under Structured Residuals,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+580f86f1ace1feed16b592d05c2b07f26c429b4b,Dense-Captioning Events in Videos,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+58622d45472f454ea64fd456d9b52ed9f7dad7f4,Web-scale computer vision using MapReduce for multimedia data mining,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+58d47c187b38b8a2bad319c789a09781073d052d,Factorizable Net: An Efficient Subgraph-based Framework for Scene Graph Generation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+58d47c187b38b8a2bad319c789a09781073d052d,Factorizable Net: An Efficient Subgraph-based Framework for Scene Graph Generation,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+58d47c187b38b8a2bad319c789a09781073d052d,Factorizable Net: An Efficient Subgraph-based Framework for Scene Graph Generation,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+58eba9930b63cc14715368acf40017293b8dc94f,What Do I See? Modeling Human Visual Perception for Multi-person Tracking,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+589951bd421e2b701225fe6626fe980d94ad2770,Overview of ImageCLEF 2018 Medical Domain Visual Question Answering Task,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+58f445fa45d4bdafac43893a55b21348f9e1e6c2,To Join or Not to Join?: Thinking Twice about Joins before Feature Selection,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+58be1f5b9437d2da2240c71ef56cbc06b34acff3,Optimizing Matrix Mapping with Data Dependent Kernel for Image Classification,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+582edc19f2b1ab2ac6883426f147196c8306685a,Do We Really Need to Collect Millions of Faces for Effective Face Recognition?,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+586f7cafee0456c25e850dcf42b38195a8a80055,Generic Instance Search and Re-identification from One Example via Attributes and Categories,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+586f7cafee0456c25e850dcf42b38195a8a80055,Generic Instance Search and Re-identification from One Example via Attributes and Categories,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+58c3e04e8105cf8f2721c4a4a6487db752bb8852,Choose Your Neuron: Incorporating Domain Knowledge Through Neuron-Importance,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+58c3e04e8105cf8f2721c4a4a6487db752bb8852,Choose Your Neuron: Incorporating Domain Knowledge Through Neuron-Importance,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+58ed094f1359394fa216e957bb48a726862165ce,Neural Arithmetic Logic Units,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+58ef0c54f01073e43ae5e9662f450002540355e9,Semi-Supervised Zero-Shot Classification with Label Representation Learning,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+58ef0c54f01073e43ae5e9662f450002540355e9,Semi-Supervised Zero-Shot Classification with Label Representation Learning,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+582519e667fe1520dedaa04ffacbb2161b6a5b84,On GANs and GMMs,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+582519e667fe1520dedaa04ffacbb2161b6a5b84,On GANs and GMMs,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+5806ff24d62e868b73312f704e7ad8d74eecfbc0,Estimating 3D Human Pose from Single Images Using Iterative Refinement of the Prior,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+589b1677d6de28c47693c5816c32698860c32d10,"Tri-modal Person Re-identification with RGB, Depth and Thermal Features",Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+5812d8239d691e99d4108396f8c26ec0619767a6,GhostVLAD for set-based face recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+5830e0816667e08bb0efca538d892ea329307daa,Filling the Joints : Completion and Recovery of Incomplete 3 D Human Poses †,University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.37130240,24.47544080,edu,
+5809d5eedbbc5d9ec7e64dbe1c4a9ed4f126ffb6,Face Recognition with L1-norm Subspaces,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+58d76380d194248b3bb291b8c7c5137a0a376897,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+58d76380d194248b3bb291b8c7c5137a0a376897,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+589d06db45e2319b29fc96582ea6c8be369f57ed,Convolutional LSTM Networks for Video-based Person Re-identification ∗,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+584909d2220b52c0d037e8761d80cb22f516773f,OCR-Free Transcript Alignment,Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.70927481,edu,
+584909d2220b52c0d037e8761d80cb22f516773f,OCR-Free Transcript Alignment,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+584909d2220b52c0d037e8761d80cb22f516773f,OCR-Free Transcript Alignment,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+58303432a25cc86bfe9c77cf4c04f91695a24304,Deforming Autoencoders: Unsupervised Disentangling of Shape and Appearance,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+58303432a25cc86bfe9c77cf4c04f91695a24304,Deforming Autoencoders: Unsupervised Disentangling of Shape and Appearance,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+586bfd960cbdba91eecbb06de994dacd38b9ab0f,Unsupervised Surveillance Video Retrieval Based on Human Action and Appearance,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+5853875ecc400b3b365f73cbf44e8680da2bc5ca,Harvesting visual concepts for image search with complex queries,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+584f9ccba8576ecab61fd4575da7484c8f9a7bf2,Modular Generative Adversarial Networks,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+580e48d3e7fe1ae0ceed2137976139852b1755df,THE EFFECTS OF MOTION AND ORIENTATION ON PERCEPTION OF FACIAL EXPRESSIONS AND FACE RECOGNITION by,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+588a21c28ea77a71efab5b2ed4f307eda49b6d1b,Adaptive color transformation for person re-identification in camera networks,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+58a3f4d9e1611e29e6378bc2d7cbad7600fe806e,Ofa : an Optimized Fuzzy Approach to Solve Head Pose Estimation Problem,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+585260468d023ffc95f0e539c3fa87254c28510b,Cardea: Context-Aware Visual Privacy Protection from Pervasive Cameras,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+5849635e61ed7d6358f65f5a228a5148e4fea3b8,Deep Watershed Transform for Instance Segmentation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+58ecbe5e7d10b4176ceaecc36ae05e15908289c2,Intelligent Health Recommendation System for Computer Users,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+58ecbe5e7d10b4176ceaecc36ae05e15908289c2,Intelligent Health Recommendation System for Computer Users,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+58628e64e61bd2776a2a7258012eabe3c79ca90c,Active Grounding of Visual Situations,Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.68492999,edu,
+58628e64e61bd2776a2a7258012eabe3c79ca90c,Active Grounding of Visual Situations,Santa Fe Institute,Santa Fe Institute,"Santa Fe Institute, Hyde Park Road, Santa Fe, Santa Fe County, New Mexico, 87501, USA",35.70028780,-105.90864847,edu,
+58bac838068df358b536850a84ff806a23f061fc,An information theoretic approach to joint probabilistic face detection and tracking,University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+580f3ef6e77753ce0b157ebc02656f346080d9a8,A Robust and Scalable Approach to Face Identification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+676a136f5978783f75b5edbb38e8bb588e8efbbe,Matrix completion for resolving label ambiguity,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+6746292c46975ba575a48c2b05b09ab056c26967,The Relationship between Anxiety and the Social Judgements of Approachability And Trustworthiness,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+67545a21b41ec6dd60376aff84bc0945cdb79590,Person-specific expression recognition with transfer learning,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+676f9eabf4cfc1fd625228c83ff72f6499c67926,Face Identification and Clustering,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+679b7fa9e74b2aa7892eaea580def6ed4332a228,Communication and automatic interpretation of affect from facial expressions,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+679b7fa9e74b2aa7892eaea580def6ed4332a228,Communication and automatic interpretation of affect from facial expressions,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+679b7fa9e74b2aa7892eaea580def6ed4332a228,Communication and automatic interpretation of affect from facial expressions,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+67620ee24ddefbbbdfcb35e385795afc9cc30df9,Statutory Declaration,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+678166999912492688251a1ce98dfb79d3c60ddd,Estimation of Squared-Loss Mutual Information from Positive and Unlabeled Data,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+67538df7950dbba0ab7885a23b7abf6f56f39537,Person Re-identification by Salience Matching,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+67c25e73b89166563cf5b70ffc043bbff23a321c,Handling of False Stationary Detections in Background Subtraction in Video Preprocessing,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+67296e6cd0084c301339889c4ef1f71a04406b3d,The Periodic Table of Data Structures,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+67ab22dff1c21e8680f94948d80b77314b325d66,Learning for MultiTask Classification of Visual Attributes,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+67ab22dff1c21e8680f94948d80b77314b325d66,Learning for MultiTask Classification of Visual Attributes,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+67e3fac91c699c085d47774990572d8ccdc36f15,Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+67e3fac91c699c085d47774990572d8ccdc36f15,Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+67e3fac91c699c085d47774990572d8ccdc36f15,Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+67e3fac91c699c085d47774990572d8ccdc36f15,Multiple Skip Connections and Dilated Convolutions for Semantic Segmentation,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+67134d7bf637f7ac4e354bcb374d7c28c7740ab8,Scale-Adaptive Low-Resolution Person Re-Identification via Learning a Discriminating Surface,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+67134d7bf637f7ac4e354bcb374d7c28c7740ab8,Scale-Adaptive Low-Resolution Person Re-Identification via Learning a Discriminating Surface,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+673952e036b92617d56deac4166aea3064da7fed,Neural Aesthetic Image Reviewer,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+67c3c1194ee72c54bc011b5768e153a035068c43,Street Scenes: towards scene understanding in still images,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+673d4885370b27c863e11a4ece9189a6a45931cc,Recurrent Residual Module for Fast Inference in Videos,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+67a3cc056a539d17f00b0be550a2fc7cb2118dc5,Scalable Image Retrieval by Sparse Product Quantization,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6754c98ba73651f69525c770fb0705a1fae78eb5,Joint Cascade Face Detection and Alignment,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+6754c98ba73651f69525c770fb0705a1fae78eb5,Joint Cascade Face Detection and Alignment,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+67b38b88f3b3acb4ebba3c1941cbab7290bf59fa,Object-Based Visual Sentiment Concept Analysis and Application,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+672fae3da801b2a0d2bad65afdbbbf1b2320623e,Pose-Selective Max Pooling for Measuring Similarity,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+67598e0b447294ad7414b8c73819c7ff395eb63e,Fast Semantic Segmentation on Video Using Block Motion-Based Feature Interpolation,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+6729895ecfd8eed9e73e898b54d6c7f18c095a91,Learning Neural Network Classifiers with Low Model Complexity,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+676733fb6d457401962305204155d6f4b7df5059,An overview of face de-identification in still images and videos,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+6729fec79b6cb472b4326745a67c6dde5772ed95,Large-Scale Visual Active Learning with Deep Probabilistic Ensembles,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+677ebde61ba3936b805357e27fce06c44513a455,Facial Expression Recognition Based on Facial Components Detection and HOG Features,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+67e488d4d31d65a31d4bc2a3337c587720af2a12,Cross-Class Sample Synthesis for Zero-shot Learning,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+671697cf84dfbe53a1cb0bed29b9f649c653bbc5,Multispectral Deep Neural Networks for Pedestrian Detection,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+0bc53b338c52fc635687b7a6c1e7c2b7191f42e5,Loglet SIFT for Part Description in Deformable Part Models: Application to Face Alignment,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+0bd949f948f8f7afc0578d23d065b36c5c03c509,Regional Gating Neural Networks for Multi-label Image Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+0bd949f948f8f7afc0578d23d065b36c5c03c509,Regional Gating Neural Networks for Multi-label Image Classification,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+0bd949f948f8f7afc0578d23d065b36c5c03c509,Regional Gating Neural Networks for Multi-label Image Classification,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+0b5e4ac8a04c0ffd0f9045901525201db03c789d,Annotated reconstruction of 3D spaces using drones,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0b5e4ac8a04c0ffd0f9045901525201db03c789d,Annotated reconstruction of 3D spaces using drones,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0b5e4ac8a04c0ffd0f9045901525201db03c789d,Annotated reconstruction of 3D spaces using drones,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0bc3e6618786c5133b7f8b0033f8917e61b42a91,Enhancing Gloss-Based Corpora with Facial Features Using Active Appearance Models,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+0b62ece314846fa257d76e84dd9d002d1fcd21ae,Pedestrian Recognition with a Learned Metric,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+0b62ece314846fa257d76e84dd9d002d1fcd21ae,Pedestrian Recognition with a Learned Metric,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0b8b8776684009e537b9e2c0d87dbd56708ddcb4,Adversarial Discriminative Heterogeneous Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0bcde128b115af74d0986306184502ae7c8822f6,Learning Scalable Discriminative Dictionary with Sample Relatedness,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0b0eb6363a0c5b80c544aff091d547122986131b,Remembering faces with emotional expressions,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+0b0eb6363a0c5b80c544aff091d547122986131b,Remembering faces with emotional expressions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0b0eb6363a0c5b80c544aff091d547122986131b,Remembering faces with emotional expressions,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+0b0eb6363a0c5b80c544aff091d547122986131b,Remembering faces with emotional expressions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0b79f0999eab1e2ac586a97dfc9a71809e7ab262,Joint Modeling of Algorithm Behavior and Image Quality for Algorithm Performance Prediction,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+0b78fd881d0f402fd9b773249af65819e48ad36d,Analysis and Modeling of Affective Audio Visual Speech Based on PAD Emotion Space,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0b835284b8f1f45f87b0ce004a4ad2aca1d9e153,Cartooning for Enhanced Privacy in Lifelogging and Streaming Videos,Indiana University Bloomington,Indiana University Bloomington,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA",39.17720475,-86.51540030,edu,
+0be936107834d08f381018f374979e0949e6b932,Scalable Learning Through Error-correcting Codes based Clustering in Autonomous Systems,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+0ba87571341beaf6a5c9a30e049be7b1fc9a4c60,Choosing Linguistics over Vision to Describe Images,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+0b609b048b75d45fb17bf1e2763d83735db7d7da,Region-based representations for face recognition,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0bcc4ccbe7b12166bb6e8669ab6b5c7edfe6294e,Beyond Object Recognition: Visual Sentiment Analysis with Deep Coupled Adjective and Noun Neural Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+0bcc4ccbe7b12166bb6e8669ab6b5c7edfe6294e,Beyond Object Recognition: Visual Sentiment Analysis with Deep Coupled Adjective and Noun Neural Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+0bc695e580d41ad163d5ec601bdcf384a0bf91cd,A Linear Approach of 3D Face Shape and Texture Recovery using a 3D Morphable Model,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+0b51197109813d921835cb9c4153b9d1e12a9b34,The University of Chicago Jointly Learning Multiple Similarity Metrics from Triplet Constraints a Dissertation Submitted to the Faculty of the Division of the Physical Sciences in Candidacy for the Degree of Master of Science Department of Computer Science By,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+0bf3513d18ec37efb1d2c7934a837dabafe9d091,Robust Subspace Clustering via Thresholding Ridge Regression,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+0b574f70d0965d66986bb9e89df693126652a4a6,Discriminative learning of multiset integrated canonical correlation analysis for feature fusion,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0b3be3656a90edf9d8e7c88c89927eb42e674aa6,Automatic landmark annotation and dense correspondence registration for 3D human facial images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0be2245b2b016de1dcce75ffb3371a5e4b1e731b,On the Variants of the Self-Organizing Map That Are Based on Order Statistics,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+0b6314e9e741d19346d936eaaa7d6fcf46dd3ed7,Deep Learning in the Wild,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+0b85b50b6ff03a7886c702ceabad9ab8c8748fdc,Is there a dynamic advantage for facial expressions?,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0b84f07af44f964817675ad961def8a51406dd2e,Person Re-identification in the Wild,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+0b9c5bfb4d8349bb3f6ddd6fb612b7f9657c93f8,Inverting and Visualizing Features for Object Detection,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0b242d5123f79defd5f775d49d8a7047ad3153bc,How Important Is Weight Symmetry in Backpropagation?,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4,Variable-state latent conditional random fields for facial expression recognition and action unit detection,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0ba1d855cd38b6a2c52860ae4d1a85198b304be4,Variable-state latent conditional random fields for facial expression recognition and action unit detection,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+0b6f64c78c44dc043e2972fa7bfe2a5753768609,A future for learning semantic models of man-made environments,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+0b50e223ad4d9465bb92dbf17a7b79eccdb997fb,Implicit elastic matching with random projections for pose-variant face recognition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+0b50e223ad4d9465bb92dbf17a7b79eccdb997fb,Implicit elastic matching with random projections for pose-variant face recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0badf61e8d3b26a0d8b60fe94ba5c606718daf0b,Facial Expression Recognition Using Deep Belief Network,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+0badf61e8d3b26a0d8b60fe94ba5c606718daf0b,Facial Expression Recognition Using Deep Belief Network,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+0b4901e6724e533f6d5d2510e1c0199eea898c81,High Quality Bidirectional Generative Adversarial Networks,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+0b2966101fa617b90510e145ed52226e79351072,Beyond verbs: Understanding actions in videos with text,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+0b2966101fa617b90510e145ed52226e79351072,Beyond verbs: Understanding actions in videos with text,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+0b55b31765f101535eac0d50b9da377f82136d2f,Biometric binary string generation with detection rate optimized bit allocation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+0b48e1bd69c5b87f197397f933ce7f5261fa00bb,A Joint Model of Language and Perception for Grounded Attribute Learning,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+0ba0f000baf877bc00a9e144b88fa6d373db2708,Facial Expression Recognition Based on Local Directional Pattern Using SVM Decision-level Fusion,Normal University,Normal University,"云南师范大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05805090,102.69552410,edu,
+0be80da851a17dd33f1e6ffdd7d90a1dc7475b96,Weighted Feature Gaussian Kernel SVM for Emotion Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+0be8b12f194fb604be69c139a195799e8ab53fd3,Talking Heads: Detecting Humans and Recognizing Their Interactions,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0b183f5260667c16ef6f640e5da50272c36d599b,Spatio-temporal Event Classification Using Time-Series Kernel Based Structured Sparsity,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0b183f5260667c16ef6f640e5da50272c36d599b,Spatio-temporal Event Classification Using Time-Series Kernel Based Structured Sparsity,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0b183f5260667c16ef6f640e5da50272c36d599b,Spatio-temporal Event Classification Using Time-Series Kernel Based Structured Sparsity,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+0be764800507d2e683b3fb6576086e37e56059d1,Learning from Geometry,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0baee7f68c08f1a6b5190755adebc57145d18ccf,Unsupervised Discovery of Mid-Level Discriminative Patches,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0b642f6d48a51df64502462372a38c50df2051b1,A domain adaptation approach to improve speaker turn embedding using face representation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+0b7d1386df0cf957690f0fe330160723633d2305,Learning American English Accents Using Ensemble Learning with GMMs,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0b7d1386df0cf957690f0fe330160723633d2305,Learning American English Accents Using Ensemble Learning with GMMs,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0b6616f3ebff461e4b6c68205fcef1dae43e2a1a,Rectifying Self Organizing Maps for Automatic Concept Learning from Web Images,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+0b6616f3ebff461e4b6c68205fcef1dae43e2a1a,Rectifying Self Organizing Maps for Automatic Concept Learning from Web Images,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+0b90cd2e8abd6b23e7f8133f02e3e6d121cf4903,Advanced Steel Microstructural Classification by Deep Learning Methods,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+0ba402af3b8682e2aa89f76bd823ddffdf89fa0a,Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+0bf0029c9bdb0ac61fda35c075deb1086c116956,Modelling of Orthogonal Craniofacial Profiles,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+93cba94ff0ff96f865ce24ea01e9c006369d75ff,Knowledge Aided Consistency for Weakly Supervised Phrase Grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+935a7793cbb8f102924fa34fce1049727de865c2,Age estimation under changes in image quality: An experimental study,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+93b623ffb25fc32898f3c876c9aba0f5ec22d3ac,Understanding Visual Ads by Aligning Symbols and Objects using Co-Attention,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+9326d1390e8601e2efc3c4032152844483038f3f,Landmark Based Facial Component Reconstruction for Recognition across Pose,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+93a66d470c1840d11eaa96ead3b600450b3cc9f8,Gaze aversion as a cognitive load management strategy in autism spectrum disorder and Williams syndrome,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+9397e7acd062245d37350f5c05faf56e9cfae0d6,DeepFruits: A Fruit Detection System Using Deep Neural Networks,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+93747de3d40376761d1ef83ffa72ec38cd385833,Team members' emotional displays as indicators of team functioning.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+93747de3d40376761d1ef83ffa72ec38cd385833,Team members' emotional displays as indicators of team functioning.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+93747de3d40376761d1ef83ffa72ec38cd385833,Team members' emotional displays as indicators of team functioning.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+93747de3d40376761d1ef83ffa72ec38cd385833,Team members' emotional displays as indicators of team functioning.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+938d9dd3e35cb8af5fb6b8b3f7c7ff9d6ba8b253,"Corrigendum: Does Seeing Faces of Young Black Boys Facilitate the
+Identification of Threatening Stimuli?",University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+93721023dd6423ab06ff7a491d01bdfe83db7754,Robust Face Alignment Using Convolutional Neural Networks,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+935e639bebf905af2e35e8b1e7aa0538d7122185,A Network Structure to Explicitly Reduce Confusion Errors in Semantic Segmentation,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+93a8dbd0823cc1924bfe37d88af36d4f0545bb12,RGB-D multi-view object detection with object proposals and shape context,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+936a4af585f9a7d3b95c078ad31e8e41e22cb406,Adaptive sampling for large scale boosting,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+93971a49ef6cc88a139420349a1dfd85fb5d3f5c,Scalable Probabilistic Models: Applied to Face Identification in the Wild,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+93420d9212dd15b3ef37f566e4d57e76bb2fab2f,An All-In-One Convolutional Neural Network for Face Analysis,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+93af36da08bf99e68c9b0d36e141ed8154455ac2,A Dditive M Argin S Oftmax for F Ace V Erification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+93af36da08bf99e68c9b0d36e141ed8154455ac2,A Dditive M Argin S Oftmax for F Ace V Erification,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+93af36da08bf99e68c9b0d36e141ed8154455ac2,A Dditive M Argin S Oftmax for F Ace V Erification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+93f9607034c9b7b7693c60e9d2631adc15a2a524,Learning to Model the Tail,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a,Blind Recognition of Touched Keys on Mobile Devices,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a,Blind Recognition of Touched Keys on Mobile Devices,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a,Blind Recognition of Touched Keys on Mobile Devices,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a,Blind Recognition of Touched Keys on Mobile Devices,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+9358123bab4c98f75ac9b0c59b574ea2d7ff6b5a,Blind Recognition of Touched Keys on Mobile Devices,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+937ffb1c303e0595317873eda5ce85b1a17f9943,Eyes do not lie: spontaneous versus posed smiles,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+9340efcb976f6c28c7242480502e16f795895f28,Learning Deep Energy Models: Contrastive Divergence vs. Amortized MLE,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+934350482f3f19d431f35960a14dc249bd069303,Visual Question Answering as a Meta Learning Task,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+937729cea19a955147e059a6f0ef0571cc6785c4,An Analysis on Invertibility of Cancelable Biometrics based on BioHashing,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+93f37c69dd92c4e038710cdeef302c261d3a4f92,Compressed Video Action Recognition,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+93f37c69dd92c4e038710cdeef302c261d3a4f92,Compressed Video Action Recognition,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+9363bf52a5bb2ac94bf247ca56e7cf55fb29ee4e,Online Multi-person Tracking by Tracker Hierarchy,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+93a28e5131a762aeb888b76bcc6689e8696ab8d2,Pose Embeddings: A Deep Architecture for Learning to Match Human Poses,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+936227f7483938097cc1cdd3032016df54dbd5b6,Learning to generalize to new compositions in image understanding,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+936227f7483938097cc1cdd3032016df54dbd5b6,Learning to generalize to new compositions in image understanding,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+939123cf21dc9189a03671484c734091b240183e,Within- and cross- database evaluations for face gender classification via befit protocols,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+938acfc9001174fdf9007e5dea2cfc993a0b9a09,Disentangling Factors of Variation by Mixing Them,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+936a60174ccc8f9448d38b269a53bc212125370e,Adapted Deep Embeddings: A Synthesis of Methods for k-Shot Inductive Transfer Learning,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+938ae9597f71a21f2e47287cca318d4a2113feb2,Classifier Learning with Prior Probabilities for Facial Action Unit Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+938ae9597f71a21f2e47287cca318d4a2113feb2,Classifier Learning with Prior Probabilities for Facial Action Unit Recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+945cd58065f923e3cdc46a28c2b3f0c22ebfca9e,"Mutual eye gaze facilitates person categorization for typically developing children, but not for children with autism.",University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+946017d5f11aa582854ac4c0e0f1b18b06127ef1,Tracking Persons-of-Interest via Adaptive Discriminative Features,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+946017d5f11aa582854ac4c0e0f1b18b06127ef1,Tracking Persons-of-Interest via Adaptive Discriminative Features,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+946017d5f11aa582854ac4c0e0f1b18b06127ef1,Tracking Persons-of-Interest via Adaptive Discriminative Features,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+94e008564e4f091a887fdda379e7d26d90920c54,Stereological Study of Amygdala Glial Populations in Adolescents and Adults with Autism Spectrum Disorder,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+944ab8c7d73bf2ca439205543c906b7797c269f5,Efficient Construction of Neighborhood Graphs by the Multiple Sorting Method,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+94f093ce723a7112d5698a1e88f437503d2d40af,Identifying The Most Informative Features Using A Structurally Interacting Elastic Net,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+94f093ce723a7112d5698a1e88f437503d2d40af,Identifying The Most Informative Features Using A Structurally Interacting Elastic Net,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+94d5ebe936c101699e678f6f0cddd8a732986814,What you see is what you get: contextual modulation of face scanning in typical and atypical development,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+94d5ebe936c101699e678f6f0cddd8a732986814,What you see is what you get: contextual modulation of face scanning in typical and atypical development,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+94d5ebe936c101699e678f6f0cddd8a732986814,What you see is what you get: contextual modulation of face scanning in typical and atypical development,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+94d0c0ed5bb9c13c2c8231adfdd9d96cf837514a,Generalized Background Subtraction Using Superpixels with Label Integrated Motion Estimation,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+9445d51fd7977fb11a34a0e522efdcdee0d5cd95,First-Person Activity Forecasting with Online Inverse Reinforcement Learning,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+9405a9180139f23f4dd9d90aa4e86944b35b8c88,Weakly-Supervised Visual Grounding of Phrases with Linguistic Structures,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+9405a9180139f23f4dd9d90aa4e86944b35b8c88,Weakly-Supervised Visual Grounding of Phrases with Linguistic Structures,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+943262361be04747aba71d45fb4854cf72019851,Pose-Sensitive Embedding by Nonlinear NCA Regression,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+94d48f61cea7ce848af500f4a02f3ea4459bce27,A Neural Compositional Paradigm for Image Captioning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+94d48f61cea7ce848af500f4a02f3ea4459bce27,A Neural Compositional Paradigm for Image Captioning,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+94bf6b804dfcedb0bf6b0d5c711bb7fe305f3704,Looking at faces: autonomous perspective invariant facial gaze analysis,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+94106ca511a60fb4fa8402fef4bf22b9ebef83e9,Invariant Object Recognition Using Radon-based Transform,AGH University of Science and Technology,AGH University of Science and Technology,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.06570330,19.91895867,edu,
+947bd44270618f5a1b046b68f1ada3c11d97b440,GazeDPM: Early Integration of Gaze Information in Deformable Part Models,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+943a1e218b917172199e524944006aa349f58968,Joint Learning of Intrinsic Images and Semantic Segmentation,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+9487cea80f23afe9bccc94deebaa3eefa6affa99,"Fast, Dense Feature SDM on an iPhone",Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+9487cea80f23afe9bccc94deebaa3eefa6affa99,"Fast, Dense Feature SDM on an iPhone",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+94490145def938ca1f8bb265d10b66924937a367,Iterative Local Model Selection for tracking and mapping,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+94490145def938ca1f8bb265d10b66924937a367,Iterative Local Model Selection for tracking and mapping,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+949699d0b865ef35b36f11564f9a4396f5c9cddb,"Processing of facial identity and expression: a psychophysical, physiological, and computational perspective.",Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+949699d0b865ef35b36f11564f9a4396f5c9cddb,"Processing of facial identity and expression: a psychophysical, physiological, and computational perspective.",University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.49684760,8.72981767,edu,
+94e259345e82fa3015a381d6e91ec6cded3971b4,Classification of Photometric Factors Based on Photometric Linearization,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+94e259345e82fa3015a381d6e91ec6cded3971b4,Classification of Photometric Factors Based on Photometric Linearization,Okayama University,Okayama University,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.68933930,133.92222720,edu,
+949d20c44387918cde21f800d8d1cdf53f016bb4,Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+949d20c44387918cde21f800d8d1cdf53f016bb4,Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+94ac7f52e2e94ecf1fd3bac53028967b7dd62f36,Maximum-Margin Structured Learning with Deep Networks for 3D Human Pose Estimation,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+944efd74c6fd812c6c495a11e7b045c9b778702e,RCAA: Relational Context-Aware Agents for Person Search,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+944efd74c6fd812c6c495a11e7b045c9b778702e,RCAA: Relational Context-Aware Agents for Person Search,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+944efd74c6fd812c6c495a11e7b045c9b778702e,RCAA: Relational Context-Aware Agents for Person Search,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+0e2af97f07625cb3cf5e30f1c9d807124cbbc850,From Large Scale Image Categorization to Entry-Level Categories,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+0ee59e5baed4271ab85c85332550ca1539733a19,Atypical Modulations of N170 Component during Emotional Processing and Their Links to Social Behaviors in Ex-combatants,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+0ee59e5baed4271ab85c85332550ca1539733a19,Atypical Modulations of N170 Component during Emotional Processing and Their Links to Social Behaviors in Ex-combatants,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+0e08cf0b19f0600dadce0f6694420d643ea9828b,The Middle Child Problem: Revisiting Parametric Min-Cut and Seeds for Object Proposals,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+0e08cf0b19f0600dadce0f6694420d643ea9828b,The Middle Child Problem: Revisiting Parametric Min-Cut and Seeds for Object Proposals,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+0e41bb49b2b3b1fc4fadce856f164af51549bcb4,Max-Margin Structured Output Regression for Spatio-Temporal Action Localization,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,Pareto Models for Multiclass Discriminative Linear Dimensionality Reduction,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,Pareto Models for Multiclass Discriminative Linear Dimensionality Reduction,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0e5dcc6ae52625fd0637c6bba46a973e46d58b9c,Pareto Models for Multiclass Discriminative Linear Dimensionality Reduction,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+0eae752f8949d97e41831e509da721ad673dfc2b,Beyond Tree Structure Models: A New Occlusion Aware Graphical Model for Human Pose Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0e312aee6b03a697d112a1bd8d25d84d1a122d8e,An inner-loop free solution to inverse problems using deep neural networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0e312aee6b03a697d112a1bd8d25d84d1a122d8e,An inner-loop free solution to inverse problems using deep neural networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0e312aee6b03a697d112a1bd8d25d84d1a122d8e,An inner-loop free solution to inverse problems using deep neural networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0e312aee6b03a697d112a1bd8d25d84d1a122d8e,An inner-loop free solution to inverse problems using deep neural networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,Similarity Comparisons for Interactive Fine-Grained Categorization,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,Similarity Comparisons for Interactive Fine-Grained Categorization,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+0ee7990e2ae054aab5f1fc08670fe5eddb96fb19,Learning Latent Sub-events in Activity Videos Using Temporal Attention Filters,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+0e67717484684d90ae9d4e1bb9cdceb74b194910,Mining Pixels: Weakly Supervised Semantic Segmentation Using Image Labels,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0e25527a7df08c8cda5e86c7a255806289b0ff64,Automatic Eye Detection using Fast Corner Detector of North East Indian (NEI) Face Images,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+0e8defaafbbde9031fb2942eccaf980b2f20f04e,Regenerative morphing,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+0e8defaafbbde9031fb2942eccaf980b2f20f04e,Regenerative morphing,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+0ed0e48b245f2d459baa3d2779bfc18fee04145b,Semi-Supervised Dimensionality Reduction,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0ed0e48b245f2d459baa3d2779bfc18fee04145b,Semi-Supervised Dimensionality Reduction,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0e62b741d4421b284cc6a27cea0b1e95b799882e,Forest Walk Methods for Localizing Body Joints from Single Depth Image,Hankuk University of Foreign Studies,Hankuk University of Foreign Studies,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.59539790,127.06304990,edu,
+0e62b741d4421b284cc6a27cea0b1e95b799882e,Forest Walk Methods for Localizing Body Joints from Single Depth Image,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+0ef96d97365899af797628e80f8d1020c4c7e431,Improving the Speed of Kernel PCA on Large Scale Datasets,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+0e7f277538142fb50ce2dd9179cffdc36b794054,Combining image captions and visual analysis for image concept classification,Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+0e7f277538142fb50ce2dd9179cffdc36b794054,Combining image captions and visual analysis for image concept classification,Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+0e7d8ae484d8a0ecf65855dad9e7514730b4e07f,Knowing a Good HOG Filter When You See It: Efficient Selection of Filters for Detection,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0e7d8ae484d8a0ecf65855dad9e7514730b4e07f,Knowing a Good HOG Filter When You See It: Efficient Selection of Filters for Detection,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+0ee91554aedcb2cc4e2d2a15eb07eed1bbbac2c2,Fast Energy Minimization Using Learned State Filters,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+0e46943b2b12a8df6a62202651555a1d464cebec,Person Re-identification by Efficient Impostor-Based Metric Learning,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+0e923b74fd41f73f57e22f66397feeea67e834f0,Invariant encoding schemes for visual recognition,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0e1f6abdd24a4e929511740168e2f67351751302,Zero-Shot Recognition via Direct Classifier Learning with Transferred Samples and Pseudo Labels,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0e1f6abdd24a4e929511740168e2f67351751302,Zero-Shot Recognition via Direct Classifier Learning with Transferred Samples and Pseudo Labels,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+0e96646eb97bade66848b1fe50a9fc6ab946ed42,Learning Like a Child: Fast Novel Visual Concept Learning from Sentence Descriptions of Images,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,The Role of Perspective-Taking on Ability to Recognize Fear,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,The Role of Perspective-Taking on Ability to Recognize Fear,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,The Role of Perspective-Taking on Ability to Recognize Fear,Virginia Tech Carilion Research Institute,Virginia Tech Carilion Research Institute,"Virginia Tech Carilion Research Institute, South Jefferson Street, Crystal Spring, Roanoke, Virginia, 24016, USA",37.25795480,-79.94233291,edu,
+0ec0fc9ed165c40b1ef4a99e944abd8aa4e38056,The Role of Perspective-Taking on Ability to Recognize Fear,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+0e652a99761d2664f28f8931fee5b1d6b78c2a82,Making a Science of Model Search,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0e8f56d7e0b639e182d1d9693b79653cfd98aaa3,Auto-colorization Exploiting Annotated Dataset,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+0e64a202a673ebb9265d600d97c2ccff8acf64c9,Side-View Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+0e0179eb4b43016691f0f1473a08089dda21f8f0,The Art of Detection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0e950d7ad2282d49e8cada91d5d6b50b42a23979,Attribute Recognition by Joint Recurrent Learning of Context and Correlation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0ea7b7fff090c707684fd4dc13e0a8f39b300a97,Integrated Face Analytics Networks through Cross-Dataset Hybrid Training,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+0e36ada8cb9c91f07c9dcaf196d036564e117536,Much Ado About Time: Exhaustive Annotation of Temporal Data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0eea197144b631b33857821559886b6ea063b68c,Robust Multi-resolution Pedestrian Detection in Traffic Scenes,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,Similarity-Preserving Binary Signature for Linear Subspaces,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,Similarity-Preserving Binary Signature for Linear Subspaces,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0ebc50b6e4b01eb5eba5279ce547c838890b1418,Similarity-Preserving Binary Signature for Linear Subspaces,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+0e41758075d91e58412f012c2d03531c5baf7cdc,Visual category recognition using Spectral Regression and Kernel Discriminant Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+0e41758075d91e58412f012c2d03531c5baf7cdc,Visual category recognition using Spectral Regression and Kernel Discriminant Analysis,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+0ec1673609256b1e457f41ede5f21f05de0c054f,Blessing of Dimensionality: High-Dimensional Feature and Its Efficient Compression for Face Verification,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+0e815b773e480ef20a680dd35cd72ab26a141d2f,Person re-identification via efficient inference in fully connected CRF,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+0e815b773e480ef20a680dd35cd72ab26a141d2f,Person re-identification via efficient inference in fully connected CRF,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+0ee3aa2a78f9680bb65a823bd9195c879572ec1c,What Makes an Object Memorable?,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+0ee3aa2a78f9680bb65a823bd9195c879572ec1c,What Makes an Object Memorable?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0ee3aa2a78f9680bb65a823bd9195c879572ec1c,What Makes an Object Memorable?,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+0edc70f3b5550f997d9011c6d4860feec136cea9,Face Recognition: Holistic Approaches an Analytical Survey,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+0edc70f3b5550f997d9011c6d4860feec136cea9,Face Recognition: Holistic Approaches an Analytical Survey,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+0ea38a5ba0c8739d1196da5d20efb13406bb6550,Relative attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+0e21c9e5755c3dab6d8079d738d1188b03128a31,Constrained Clustering and Its Application to Face Clustering in Videos,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0e1403f2182609fb64ed72913f7294fea7d02bd6,Learning Support Vectors for Face Verification and Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+0e1403f2182609fb64ed72913f7294fea7d02bd6,Learning Support Vectors for Face Verification and Recognition,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+0e93a5a7f6dbdb3802173dca05717d27d72bfec0,Attribute Recognition by Joint Recurrent Learning of Context and Correlation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0ed1c1589ed284f0314ed2aeb3a9bbc760dcdeb5,Max-Margin Early Event Detectors,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0e4c87100aa7f585ccd969aa71dd5dfdf26e732d,Laplacian regularized low rank subspace clustering,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+0e9f55c0ff758a91c6764f833b14b09ca788db20,Locality preserving hashing,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+0ec03a13063e5811ec9461cf7af04f4f3110ccaa,Visual Question Answering with Question Representation Update (QRU),Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,Fast Subspace Search via Grassmannian Based Hashing,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,Fast Subspace Search via Grassmannian Based Hashing,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0ec2049a1dd7ae14c7a4c22c5bcd38472214f44d,Fast Subspace Search via Grassmannian Based Hashing,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+0e986ac9484e0587b6ccf01a5db735b9bf185157,Refining Architectures of Deep Convolutional Neural Networks,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+0ec67c69e0975cfcbd8ba787cc0889aec4cc5399,Locating Salient Object Features,Manchester University,Manchester University,"Manchester Metropolitan University – All Saints Campus, Lower Ormond Street, Hulme, Manchester, Greater Manchester, North West England, England, M15 6BX, UK",53.47020165,-2.23932183,edu,
+0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64,Estimating illumination parameters in real space with application to image relighting,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0ef0db6b7bf2244459497a3bf24e56c7850cf369,Weakly Supervised Phrase Localization with Multi-Scale Anchored Transformer Network,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0ee5c4112208995bf2bb0fb8a87efba933a94579,Fashion is Taking Shape: Understanding Clothing Preference Based on Body Shape From Online Sources,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0e1a18576a7d3b40fe961ef42885101f4e2630f8,Automated Detection and Identification of Persons in Video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0e01db4197f71450118f81ae5a69ce4916b46421,Weakly-supervised Discovery of Visual Pattern Configurations,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+6044b30751c19b3231782fb0475c9ca438940690,Real-time Action Recognition with Dissimilarity-based Training of Specialized Module Networks,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+60a006bdfe5b8bf3243404fae8a5f4a9d58fa892,A reference-based framework for pose invariant face recognition,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,Face recognition with learning-based descriptor,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,Face recognition with learning-based descriptor,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,Face recognition with learning-based descriptor,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+6043006467fb3fd1e9783928d8040ee1f1db1f3a,Face recognition with learning-based descriptor,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+60eea6b85ba791ab85b198cfe7473adec29bcfd2,Video Question Answering via Attribute-Augmented Attention Network Learning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+60aa6b163fd8bc16965807fdd47634bedb04989d,Autonomous exploration using rapid perception of low-resolution image information,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+607bfdbf583c4dfa29491eedc3934f2293e1fa96,A common allele in the oxytocin receptor gene (OXTR) impacts prosocial temperament and human hypothalamic-limbic structure and function.,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+607bfdbf583c4dfa29491eedc3934f2293e1fa96,A common allele in the oxytocin receptor gene (OXTR) impacts prosocial temperament and human hypothalamic-limbic structure and function.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+600f164c81dbaa0327e7bd659fd9eb7f511f9e9a,A benchmark study of large-scale unconstrained face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+60c24e44fce158c217d25c1bae9f880a8bd19fc3,Controllable Image-to-Video Translation: A Case Study on Facial Expression Generation,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+603bfd8e1230816526e213855c5de172443f9ee1,CAKE: a Compact and Accurate K-dimensional representation of Emotion,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+601e0569028924cca9b5f1afbca6f52aa7212c39,Single View Stereo Matching,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+601e0569028924cca9b5f1afbca6f52aa7212c39,Single View Stereo Matching,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+60542b1a857024c79db8b5b03db6e79f74ec8f9f,Learning to Detect Human-Object Interactions,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+60d4cef56efd2f5452362d4d9ac1ae05afa970d1,Learning End-to-end Video Classification with Rank-Pooling,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+60d4cef56efd2f5452362d4d9ac1ae05afa970d1,Learning End-to-end Video Classification with Rank-Pooling,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+608dede56161fd5f76bcf9228b4dd8c639d65b02,SphereReID: Deep Hypersphere Manifold Embedding for Person Re-Identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+606cfdcc43203351dbb944a3bb3719695e557e37,Ex Paucis Plura : Learning Affordance Segmentation from Very Few Examples,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+60c699b9ec71f7dcbc06fa4fd98eeb08e915eb09,Long-term video interpolation with bidirectional predictive network,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+60970e124aa5fb964c9a2a5d48cd6eee769c73ef,Subspace Clustering for Sequential Data,Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.06360710,147.35522340,edu,
+60593c176ba39e8cb63ba6a7bf936553984bb67c,From Categories to Individuals in Real Time -- A Unified Boosting Approach,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+604b79c12304af6826db4ea844dec6b2a2ca4e50,Faceted Navigation for Browsing Large Video Collection,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+603dee8661aa9bf0d7af6c61fe5fa2e85227f166,Customizing First Person Image Through Desired Actions,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+603dee8661aa9bf0d7af6c61fe5fa2e85227f166,Customizing First Person Image Through Desired Actions,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+603dee8661aa9bf0d7af6c61fe5fa2e85227f166,Customizing First Person Image Through Desired Actions,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+608305c25aae19dd346153dadedac851f0b7f9ff,A Fisher Kernel Approach for Multiple Instance Based Object Retrieval in Video Surveillance,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+60a20d5023f2bcc241eb9e187b4ddece695c2b9b,Invertible Nonlinear Dimensionality Reduction via Joint Dictionary Learning,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+60115d62f7d0e918af4d3040624df57353f76053,Are You Talking to Me? Reasoned Visual Dialog Generation through Adversarial Learning,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+60115d62f7d0e918af4d3040624df57353f76053,Are You Talking to Me? Reasoned Visual Dialog Generation through Adversarial Learning,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+6066d0a5f1123b9e158185113c1e18c4687610c4,Hierarchical Feature Pooling with Structure Learning: A New Method for Pedestrian Detection,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+601c9ac5859021c5c1321adeb38b177ebad346f0,Salient Color Names for Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+601c9ac5859021c5c1321adeb38b177ebad346f0,Salient Color Names for Person Re-identification,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+60737db62fb5fab742371709485e4b2ddf64b7b2,Crowdsourced Selection on Multi-Attribute Data,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+60c3b9a6622e359a90c384bf81fc0d46caacf469,Multimodal Visual Concept Learning with Weakly Supervised Techniques,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+603ecf880ad770b566c4ffa49ffeb06340375194,An expectation maximization approach to the synergy between image segmentation and object categorization,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+6008213e4270e88cb414459de759c961469b92dd,"Multi-Evidence Filtering and Fusion for Multi-Label Classification, Object Detection and Semantic Segmentation Based on Weakly Supervised Learning",University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+60496b400e70acfbbf5f2f35b4a49de2a90701b5,Avoiding Boosting Overfitting by Removing Confusing Samples,Moscow State University,Moscow State University,"ul. Leninskiye Gory, 1, Moskva, Russia, 119991",55.70393490,37.52866960,edu,
+603e10c9dbadd51ad0938e32b730221c020d677d,It's all Relative: Monocular 3D Human Pose Estimation from Weakly Supervised Data,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+606e920681b6bd2910a1cccda2403ba7e361a3a9,Feedback Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+344a5802999dddd0a6d1c4d511910af2eb922231,DroneFace: An Open Dataset for Drone Research,Feng Chia University,Feng Chia University,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.18005755,120.64836072,edu,
+34ed02e82e9816e7491b1af9f6f65d7fff87ff84,Active nonrigid ICP algorithm,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+34ed02e82e9816e7491b1af9f6f65d7fff87ff84,Active nonrigid ICP algorithm,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+34bb11bad04c13efd575224a5b4e58b9249370f3,Towards Good Practices for Action Video Encoding,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+34bb11bad04c13efd575224a5b4e58b9249370f3,Towards Good Practices for Action Video Encoding,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+34bb11bad04c13efd575224a5b4e58b9249370f3,Towards Good Practices for Action Video Encoding,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+34fd4689d406d28100709b3be71958721d6ef11a,Object Skeleton Extraction in Natural Images by Fusing Scale-Associated Deep Side Outputs,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+34fd4689d406d28100709b3be71958721d6ef11a,Object Skeleton Extraction in Natural Images by Fusing Scale-Associated Deep Side Outputs,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+34fd4689d406d28100709b3be71958721d6ef11a,Object Skeleton Extraction in Natural Images by Fusing Scale-Associated Deep Side Outputs,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,Image Retrieval and Ranking via Consistently Reconstructing Multi-attribute Queries,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,Image Retrieval and Ranking via Consistently Reconstructing Multi-attribute Queries,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,Image Retrieval and Ranking via Consistently Reconstructing Multi-attribute Queries,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3411ef1ff5ad11e45106f7863e8c7faf563f4ee1,Image Retrieval and Ranking via Consistently Reconstructing Multi-attribute Queries,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+3445cc781ebdcf65840bd6314bc0c8c634f1ef5e,A Neural Autoregressive Approach to Attention-based Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3445cc781ebdcf65840bd6314bc0c8c634f1ef5e,A Neural Autoregressive Approach to Attention-based Recognition,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+3445cc781ebdcf65840bd6314bc0c8c634f1ef5e,A Neural Autoregressive Approach to Attention-based Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+345cc31c85e19cea9f8b8521be6a37937efd41c2,Deep Manifold Traversal: Changing Labels with Convolutional Features,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+3434ba5677e5c98e82ee17a1f2d0ddef66d0b009,Interactive tracking and action retrieval to support human behavior analysis,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+3436b30d5c09a089252cea893fced7b3a5cbc675,The Singularity and the State of the Art in Artificial Intelligence: The technological singularity (Ubiquity symposium),New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+34ce703b7e79e3072eed7f92239a4c08517b0c55,What impacts skin color in digital photos?,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+345bea5f7d42926f857f395c371118a00382447f,Transfiguring portraits,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+349f305fff405c0f38b9df2e1648450eb841fcea,Multi-target Tracking with Motion Context in Tensor Power Iteration,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+3410a1489d04ec6fcfbb3d76d39055117931ccf0,Learning Collections of Part Models for Object Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+349668b75c4398c075fc681f563a80ad7cf6b4f2,Real-time face pose estimation from single range images,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+3463f12ad434d256cd5f94c1c1bfd2dd6df36947,Facial Expression Recognition with Fusion Features Extracted from Salient Facial Areas,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+346c9100b2fab35b162d7779002c974da5f069ee,Photo search by face positions and facial attributes on touch devices,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+34863ecc50722f0972e23ec117f80afcfe1411a9,An efficient face recognition algorithm based on robust principal component analysis,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+34863ecc50722f0972e23ec117f80afcfe1411a9,An efficient face recognition algorithm based on robust principal component analysis,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+349c50a22c9f5b46f4ed0f03912706b2c9d484d5,Zero-Shot Learning Across Heterogeneous Overlapping Domains,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+3445e917f0712be391591442bfa1bca82b7ebd1a,Layout Estimation of Highly Cluttered Indoor Scenes Using Geometric and Semantic Cues,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+34108098e1a378bc15a5824812bdf2229b938678,Reconstructive Sparse Code Transfer for Contour Detection and Semantic Labeling,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+348a16b10d140861ece327886b85d96cce95711e,Finding Good Features for Object Recognition,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+3419af6331e4099504255a38de6f6b7b3b1e5c14,Modified Eigenimage Algorithm for Painting Image Retrieval,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+34a256eb89fde78d61c2184787f5c3183dae49cc,Convex Co-embedding,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+34a256eb89fde78d61c2184787f5c3183dae49cc,Convex Co-embedding,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+34a256eb89fde78d61c2184787f5c3183dae49cc,Convex Co-embedding,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+34994e291f2ddfbb2938599730a5f7a79498dfe1,Single camera pose estimation using Bayesian filtering and Kinect motion priors,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+34ce6a2b0f4404ef4f2a7d3eb68718454840fb10,Affective Responses by Adults with Autism Are Reduced to Social Images but Elevated to Images Related to Circumscribed Interests,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+34ce6a2b0f4404ef4f2a7d3eb68718454840fb10,Affective Responses by Adults with Autism Are Reduced to Social Images but Elevated to Images Related to Circumscribed Interests,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+34aa3dca30dc5cbf86c92d5035e35d264540a829,Person Re-identification by Attributes,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+34032cf0f94cc6645b7fb5df821c72039151c0fa,Feature Level Fusion Based Bimodal Biometric Using Transformation Domine Techniques,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+34c8de02a5064e27760d33b861b7e47161592e65,Video Action Recognition Based on Deeper Convolution Networks with Pair-Wise Frame Motion Concatenation,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+34c8de02a5064e27760d33b861b7e47161592e65,Video Action Recognition Based on Deeper Convolution Networks with Pair-Wise Frame Motion Concatenation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+34f2aef5aa519d20379037259645d4c84526662c,An Anti-fraud System for Car Insurance Claim Based on Visual Evidence,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+34d8287c2c84b30ef056c0a07f13404ca5ec9471,Locating Facial Features with an Extended Active Shape Model,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+34626bed8996e105e562119e1b4aa290114c89bf,Visual to Sound: Generating Natural Sound for Videos in the Wild,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+34ccdec6c3f1edeeecae6a8f92e8bdb290ce40fd,A Virtual Assistant to Help Dysphagia Patients Eat Safely at Home,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+34b42bcf84d79e30e26413f1589a9cf4b37076f9,Learning Sparse Representations of High Dimensional Data on Large Scale Dictionaries,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+342067ae1d5b52b62c2f31b1426bad933ef90e38,Pedestrian Detection with Semantic Regions of Interest,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+342067ae1d5b52b62c2f31b1426bad933ef90e38,Pedestrian Detection with Semantic Regions of Interest,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+342067ae1d5b52b62c2f31b1426bad933ef90e38,Pedestrian Detection with Semantic Regions of Interest,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+349db48589d9c2177b2067b112b8411513242e95,Object recognition with hierarchical kernel descriptors,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+34022637860443c052375c45c4f700afcb438cd0,Automatic Recognition of Emotions and Membership in Group Videos,Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+34022637860443c052375c45c4f700afcb438cd0,Automatic Recognition of Emotions and Membership in Group Videos,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+34022637860443c052375c45c4f700afcb438cd0,Automatic Recognition of Emotions and Membership in Group Videos,Queen Mary University,Queen Mary University,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+34510d3b68b23cc829c5435ac12a5041a8adc50a,RTSeg: Real-Time Semantic Segmentation Comparative Study,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+34b925a111ba29f73f5c0d1b363f357958d563c1,SAPPHIRE: An always-on context-aware computer vision system for portable devices,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+34b925a111ba29f73f5c0d1b363f357958d563c1,SAPPHIRE: An always-on context-aware computer vision system for portable devices,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+5ab2c97ada652ff8f641e1b30cc27050c0ffa7e0,Comparing Emotion Recognition Skills among Children with and without Jailed Parents,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+5ab2c97ada652ff8f641e1b30cc27050c0ffa7e0,Comparing Emotion Recognition Skills among Children with and without Jailed Parents,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+5a97dbd14958386aa0d969b5a926bb64cfd01b4a,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+5a97dbd14958386aa0d969b5a926bb64cfd01b4a,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+5a97dbd14958386aa0d969b5a926bb64cfd01b4a,Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+5a5ae31263517355d15b7b09d74cb03e40093046,Super Resolution and Face Recognition Based People Activity Monitoring Enhancement Using Surveillance Camera,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+5a11ba25cd048f384a83882a5a4dc25db9493b80,Massive City-Scale Surface Condition Analysis Using Ground and Aerial Imagery,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+5a11ba25cd048f384a83882a5a4dc25db9493b80,Massive City-Scale Surface Condition Analysis Using Ground and Aerial Imagery,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5aa63f9c0310c4dd64801b379266b778f4778445,Brain Network Activity During Face Perception: The Impact of Perceptual Familiarity and Individual Differences in Childhood Experience.,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+5a87bc1eae2ec715a67db4603be3d1bb8e53ace2,A Novel Convergence Scheme for Active Appearance Models,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5aad56cfa2bac5d6635df4184047e809f8fecca2,A visual dictionary attack on Picture Passwords,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+5a8ca0cfad32f04449099e2e3f3e3a1c8f6541c0,Automatic Frontal Face Reconstruction Approach for Pose Invariant Face Recognition,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+5a15eedcd836337b50a2bfab82ded7a9b939aca5,Perception of temporal asymmetries in dynamic facial expressions,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+5a15eedcd836337b50a2bfab82ded7a9b939aca5,Perception of temporal asymmetries in dynamic facial expressions,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+5ac80e0b94200ee3ecd58a618fe6afd077be0a00,Unifying Geometric Features and Facial Action Units for Improved Performance of Facial Expression Analysis,Kent State University,Kent State University,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.14435250,-81.33982833,edu,
+5a716a15b94a84ef3a76edce1e9dadc0f633e498,Crowd Counting with Deep Negative Correlation Learning,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5a716a15b94a84ef3a76edce1e9dadc0f633e498,Crowd Counting with Deep Negative Correlation Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+5aadd85e2a77e482d44ac2a215c1f21e4a30d91b,Face Recognition using Principle Components and Linear Discriminant Analysis,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+5ac63895a7d3371a739d066bb1631fc178d8276a,Learning Semantic Feature Map for Visual Content Recognition,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+5ac63895a7d3371a739d066bb1631fc178d8276a,Learning Semantic Feature Map for Visual Content Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5a5f9e0ed220ce51b80cd7b7ede22e473a62062c,Videos as Space-Time Region Graphs,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+5acf8478d39c3e521436c66cfeec6187c0526e55,Aspects of cognitive understanding of the environment by vision-based semantic mapping,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+5a1c2ca8b81f924bc7584c2ea873c024cc979a1d,Deep Structured Energy-Based Image Inpainting,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+5ac946fc6543a445dd1ee6d5d35afd3783a31353,Featureless: Bypassing feature extraction in action categorization,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+5a96f2bfa2deae2bc35b250251d5fbe82ef4932b,Tensor Fusion Network for Multimodal Sentiment Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5a4ef8b9db11833e01ca8e715c6eec928bc80df0,Every Smile is Unique: Landmark-Guided Diverse Smile Generation,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+5a3800bee147ad58ab7d6c55d8a2be484c17a511,From Images to Sentences through Scene Description Graphs using Commonsense Reasoning and Knowledge,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+5a3800bee147ad58ab7d6c55d8a2be484c17a511,From Images to Sentences through Scene Description Graphs using Commonsense Reasoning and Knowledge,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5aa7f33cdc00787284b609aa63f5eb5c0a3212f6,Multiplicative mixing of object identity and image attributes in single inferior temporal neurons,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+5a226afa04f03086e402b22ee2c43089b68fa3ba,Multiview RGB-D Dataset for Object Instance Detection,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+5ad0e283c4c2aa7b9985012979835d0131fe73d8,Realtime Multi-person 2D Pose Estimation Using Part Affinity Fields,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5a48999cf31b26191e2db60d80794163d5f8c43d,Recognition of Activities of Daily Living with Egocentric Vision: A Review,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+5ae970294aaba5e0225122552c019eb56f20af74,Establishing Dense Correspondence of High Resolution 3D Faces via Möbius Transformations,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+5a672a0e5c95dd70041989d60672b7b2017f7018,Attention in Multimodal Neural Networks for Person Re-identification,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+5aa57a12444dbde0f5645bd9bcec8cb2f573c6a0,Face recognition using adaptive margin fisher's criterion and linear discriminant analysis (AMFC-LDA),"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+5a07945293c6b032e465d64f2ec076b82e113fa6,Pulling Actions out of Context : Explicit Separation for Effective Combination,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+5f0facd360d54bc0d532c90ec2ced4c54043d15b,One-Shot Learning of Scene Locations via Feature Trajectory Transfer,University of Salzburg,University of Salzburg,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.79475945,13.05417525,edu,
+5f771fed91c8e4b666489ba2384d0705bcf75030,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+5f771fed91c8e4b666489ba2384d0705bcf75030,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+5fa04523ff13a82b8b6612250a39e1edb5066521,Dockerface: an easy to install and use Faster R-CNN face detector in a Docker container,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5f94e354faeba1d330088b926d1f7886067bc93f,RefineNet: Multi-Path Refinement Networks with Identity Mappings for High-Resolution Semantic Segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+5fe89653d22d35cf98a6fe6e6793da82a55f5c9f,Collaborative Deep Reinforcement Learning for Multi-object Tracking,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5fcbdbc0ffd5ce2c5eb3b4c18d7ad2edb00d85d1,Focus On What's Important: Self-Attention Model for Human Pose Estimation,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+5fd80e47d53c64512a0b85a4c7a0beb24bc35766,Semi-supervised Zero-Shot Learning by a Clustering-based Approach,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+5f790739bb4e11bdf4fef85c293edc04aae903a3,Using Genetic Programming for Multiclass Classification by Simultaneously Solving Component Binary Classification Problems,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+5ff708d399962a07f77c9bbc0d5efda52aa6915e,Pedestrian Prediction by Planning Using Deep Neural Networks,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,Video2vec: Learning semantic spatio-temporal embeddings for video representation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,Video2vec: Learning semantic spatio-temporal embeddings for video representation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+5f6ab4543cc38f23d0339e3037a952df7bcf696b,Video2vec: Learning semantic spatio-temporal embeddings for video representation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+5f0b1f37fc9c65c56106438b9aa4c6e0909d6fc0,3D object class detection in the wild,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+5f7c4c20ae2731bfb650a96b69fd065bf0bb950e,A new fuzzy membership assignment and model selection approach based on dynamic class centers for fuzzy SVM family using the firefly algorithm,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+5f2a8e79d02ac5bf91109f29f999aa13be0983bb,Recognizing Disguised Faces: Human and Machine Evaluation,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+5fdd81fd5e4caa852b6be3e6bf7891578248d662,A Distributed Weighted Voting Approach for Accurate Eye Center Estimation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+5f94969b9491db552ffebc5911a45def99026afe,Multimodal Learning and Reasoning for Visual Question Answering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+5f94969b9491db552ffebc5911a45def99026afe,Multimodal Learning and Reasoning for Visual Question Answering,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+5f94969b9491db552ffebc5911a45def99026afe,Multimodal Learning and Reasoning for Visual Question Answering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+5f593354fec6d6ab770a3e000684b9280cef5bbc,Active Shape Models Using Local Binary Patterns,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+5f593354fec6d6ab770a3e000684b9280cef5bbc,Active Shape Models Using Local Binary Patterns,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+5f758a29dae102511576c0a5c6beda264060a401,Fine-grained Video Attractiveness Prediction Using Multimodal Deep Learning on a Large Real-world Dataset,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+5fcaa87dbde0c4ac437a6b674843927c70f76a78,Scaling the Indian Buffet Process via Submodular Maximization,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa,Slanted Stixels: Representing San Francisco's Steepest Streets,Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.50078110,2.11143663,edu,
+5feacd9dd73827fb438a6bf6c8b406f4f11aa2fa,Slanted Stixels: Representing San Francisco's Steepest Streets,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+5f9c3b25eaca97af3c86460d365a3dd485ecbf96,Presentation Attack Detection for Cadaver Iris,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+5f9c3b25eaca97af3c86460d365a3dd485ecbf96,Presentation Attack Detection for Cadaver Iris,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+5feb1341a49dd7a597f4195004fe9b59f67e6707,A Deep Ranking Model for Spatio-Temporal Highlight Detection from a 360 Video,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+5fb23b55de1c613517f55a1b878bc68bd4b543e3,Multi-shot Person Re-identification through Set Distance with Visual Distributional Representation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5f0d4a0b5f72d8700cdf8cb179263a8fa866b59b,Memo No . 85 06 / 2018 Deep Regression Forests for Age Estimation,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+5f39d07dd39e5d7cfba535ada3a0ab9d5d0efb5b,Perceptual dehumanization of faces is activated by norm violations and facilitates norm enforcement.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+5f57a1a3a1e5364792b35e8f5f259f92ad561c1f,Implicit Sparse Code Hashing,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+5f27ed82c52339124aa368507d66b71d96862cb7,"Semi-supervised Learning of Classifiers : Theory , Algorithms and Their Application to Human-Computer Interaction",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+5f27ed82c52339124aa368507d66b71d96862cb7,"Semi-supervised Learning of Classifiers : Theory , Algorithms and Their Application to Human-Computer Interaction","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5fd6863a59e88b45f62e82bb72dff3fb52c49be1,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+5fd6863a59e88b45f62e82bb72dff3fb52c49be1,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+5fd6863a59e88b45f62e82bb72dff3fb52c49be1,Differentiating Objects by Motion: Joint Detection and Tracking of Small Flying Objects,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+5fea26746f3140b12317fcf3bc1680f2746e172e,Semantic Jitter: Dense Supervision for Visual Comparisons via Synthetic Images,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+5fea26746f3140b12317fcf3bc1680f2746e172e,Semantic Jitter: Dense Supervision for Visual Comparisons via Synthetic Images,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+5f7fd05f09dd6433cb273a1d33bdf75873509983,A Complete View Depended Volleyball Video Dataset under the Uncontrolled Conditions,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+5f7fd05f09dd6433cb273a1d33bdf75873509983,A Complete View Depended Volleyball Video Dataset under the Uncontrolled Conditions,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+5fc662287842e5cb2d23b5fa917354e957c573bf,DenseNet: Implementing Efficient ConvNet Descriptor Pyramids,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+5f676d6eca4c72d1a3f3acf5a4081c29140650fb,To skip or not to skip? A dataset of spontaneous affective response of online advertising (SARA) for audience behavior analysis,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+5f676d6eca4c72d1a3f3acf5a4081c29140650fb,To skip or not to skip? A dataset of spontaneous affective response of online advertising (SARA) for audience behavior analysis,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+5f26ab1b415e3cfa9d9f20cc93154939f3c28ebc,Classifying covert photographs,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+5f9e6f2f84a6a9a64b1d5868e2782b4bae82b567,Robust 3D face recognition using adapted statistical models,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+5fb50e750f700f920f06b3982bd16ea920d11f68,Learning Temporal Transformations from Time-Lapse Videos,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+5f4379b83102d1147876007e328e1b209e4b59af,REP-2008-451: Face Relighting for Recognition,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+5ffa8cfea2f5bea0ec7cecfdf76f9478ca87df89,Context-Aware Captions from Context-Agnostic Supervision,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5f453a35d312debfc993d687fd0b7c36c1704b16,A Training Assistant Tool for the Automated Visual Inspection System,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+5f453a35d312debfc993d687fd0b7c36c1704b16,A Training Assistant Tool for the Automated Visual Inspection System,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+5f724a84647c5a70865509910070077962433dca,Reconstructive Memory for Abstract Selective Recall,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+5f50b2c187718e3ecba68a1eee492f6f1a0c3355,Simultaneous Edge Alignment and Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5f50b2c187718e3ecba68a1eee492f6f1a0c3355,Simultaneous Edge Alignment and Learning,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5f50b2c187718e3ecba68a1eee492f6f1a0c3355,Simultaneous Edge Alignment and Learning,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+5fac62a3de11125fc363877ba347122529b5aa50,AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+5f2989f2c323a4fb5de8720cdfbbae1887d8e6bb,A Belief Based Correlated Topic Model for Semantic Region Analysis in Far-Field Video Surveillance Systems,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+5f69d59ad195a69618231ad83c4ad6342a569074,Face Super-Resolution Through Wasserstein GANs,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+5f68e2131d9275d56092e9fca05bcfc65abea0d8,Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5f68e2131d9275d56092e9fca05bcfc65abea0d8,Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5f68e2131d9275d56092e9fca05bcfc65abea0d8,Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5f68e2131d9275d56092e9fca05bcfc65abea0d8,Cross-Modal Similarity Learning: A Low Rank Bilinear Formulation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5fba1b179ac80fee80548a0795d3f72b1b6e49cd,Virtual U: Defeating Face Liveness Detection by Building Virtual Models from Your Public Photos,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+33526226231cce669317ece44e6af262b8395dd9,CRF-CNN: Modeling Structured Information in Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+33526226231cce669317ece44e6af262b8395dd9,CRF-CNN: Modeling Structured Information in Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+33526226231cce669317ece44e6af262b8395dd9,CRF-CNN: Modeling Structured Information in Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+33526226231cce669317ece44e6af262b8395dd9,CRF-CNN: Modeling Structured Information in Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+33f7e78950455c37236b31a6318194cfb2c302a4,Parameterizing Object Detectors in the Continuous Pose Space,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+33f885a7e1369128534aa5f3b867bd42de9ec683,Annotation and taxonomy of gestures in lecture videos,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9,Facial Expression Recognition in Video using Adaboost and SVM,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9,Facial Expression Recognition in Video using Adaboost and SVM,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+33548531f9ed2ce6f87b3a1caad122c97f1fd2e9,Facial Expression Recognition in Video using Adaboost and SVM,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+33ac7fd3a622da23308f21b0c4986ae8a86ecd2b,Building an On-Demand Avatar-Based Health Intervention for Behavior Change,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+33ac7fd3a622da23308f21b0c4986ae8a86ecd2b,Building an On-Demand Avatar-Based Health Intervention for Behavior Change,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+33f3b212d665d769a209b7a278dd9907ae2be952,Recognising Human-Object Interaction via Exemplar Based Modelling,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+33f3b212d665d769a209b7a278dd9907ae2be952,Recognising Human-Object Interaction via Exemplar Based Modelling,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+33f3b212d665d769a209b7a278dd9907ae2be952,Recognising Human-Object Interaction via Exemplar Based Modelling,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+331429a6994b73c25ca0c4d0e2794e9119ac870c,Sidestepping Intractable Inference with Structured Ensemble Cascades,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+33030c23f6e25e30b140615bb190d5e1632c3d3b,Toward a General Framework for Words and Pictures,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+33ba256d59aefe27735a30b51caf0554e5e3a1df,Early Active Learning via Robust Representation and Structured Sparsity,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+33e1398b73c9789debed1168536c93632c6f3f10,Efficiently Scaling Out-of-Order Cores for Simultaneous Multithreading,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+33801601485eaa27b838a17e073d81796d8b78d9,A Method to Track Targets in Three-Dimensional Space Using an Imaging Sonar,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+33c3702b0eee6fc26fc49f79f9133f3dd7fa3f13,Machine learning techniques for automated analysis of facial expressions,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+33aff42530c2fd134553d397bf572c048db12c28,From Emotions to Action Units with Hidden and Semi-Hidden-Task Learning,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+33aff42530c2fd134553d397bf572c048db12c28,From Emotions to Action Units with Hidden and Semi-Hidden-Task Learning,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+334e65b31ad51b1c1f84ce12ef235096395f1ca7,2 Emotion in Human - Computer Interaction Acknowledgements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1,Co-Training of Audio and Video Representations from Self-Supervised Temporal Synchronization,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+3328413ee9944de1cc7c9c1d1bf2fece79718ba1,Co-Training of Audio and Video Representations from Self-Supervised Temporal Synchronization,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+3399f8f0dff8fcf001b711174d29c9d4fde89379,Face R-CNN,Tencent,"Tencent AI Lab, China","Ke Ji Zhong Yi Lu, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057",22.54471540,113.93571640,company,"Keji Middle 1st Rd, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057"
+335af998fd86806422a4500ee6defc26df8a5388,Nonlinear Dimensionality Reduction Applied to the Binary Classification of Images,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+335af998fd86806422a4500ee6defc26df8a5388,Nonlinear Dimensionality Reduction Applied to the Binary Classification of Images,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+33595d1135d9eecbda62bc568d2545aa3161276d,Resampling for Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+336067ade694c79b5838b2e8158acf18546bc5a5,Visually-Aware Personalized Recommendation using Interpretable Image Representations,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+336067ade694c79b5838b2e8158acf18546bc5a5,Visually-Aware Personalized Recommendation using Interpretable Image Representations,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+33737f966cca541d5dbfb72906da2794c692b65b,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+33737f966cca541d5dbfb72906da2794c692b65b,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+332740a7ababbfa0dbc974433bd5a213197c0dd1,Spectral error correcting output codes for efficient multiclass recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+332740a7ababbfa0dbc974433bd5a213197c0dd1,Spectral error correcting output codes for efficient multiclass recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+334e559e8decadcedbe8e495b3f5430536cff32c,"The Attentional Suppressive Surround: Eccentricity, Location-Based and Feature-Based Effects and Interactions",York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+334e559e8decadcedbe8e495b3f5430536cff32c,"The Attentional Suppressive Surround: Eccentricity, Location-Based and Feature-Based Effects and Interactions",York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+334e559e8decadcedbe8e495b3f5430536cff32c,"The Attentional Suppressive Surround: Eccentricity, Location-Based and Feature-Based Effects and Interactions",University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+334e559e8decadcedbe8e495b3f5430536cff32c,"The Attentional Suppressive Surround: Eccentricity, Location-Based and Feature-Based Effects and Interactions",University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+33f2761d08da1c5b1b6a8f65ee6930075cf9927e,Hyperspectral Image Classification Using Convolutional Neural Networks and Multiple Feature Learning,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+33f2761d08da1c5b1b6a8f65ee6930075cf9927e,Hyperspectral Image Classification Using Convolutional Neural Networks and Multiple Feature Learning,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+3312eb79e025b885afe986be8189446ba356a507,MOON: A Mixed Objective Optimization Network for the Recognition of Facial Attributes,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+3303f6694fa1c48afbd6e104b72e98b7f52b1651,Perceptual Adversarial Networks for Image-to-Image Transformation,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3303f6694fa1c48afbd6e104b72e98b7f52b1651,Perceptual Adversarial Networks for Image-to-Image Transformation,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+3342018e8defb402896d2133cda0417e49f1e9aa,Face Verification Across Age Progression,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+33402ee078a61c7d019b1543bb11cc127c2462d2,Self-Supervised Video Representation Learning with Odd-One-Out Networks,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958,Data-specific Adaptive Threshold for Face Recognition and Authentication,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+33133bf1625a469b7c6ac6a2c05c6849584d87bf,Active Learning in Face Recognition: Using Tracking to Build a Face Model,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+33f2b44742cc828347ccc5ec488200c25838b664,Pooling the Convolutional Layers in Deep ConvNets for Action Recognition,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+33f2b44742cc828347ccc5ec488200c25838b664,Pooling the Convolutional Layers in Deep ConvNets for Action Recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+33867c617e9f264c9e857d73358e0fd5b60a149a,Face Reconstruction on Mobile Devices Using a Height Map Shape Model and Fast Regularization,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+33867c617e9f264c9e857d73358e0fd5b60a149a,Face Reconstruction on Mobile Devices Using a Height Map Shape Model and Fast Regularization,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+337e67c8c5247695bb384c35272beaf47d464c75,Deep Adaptive Learning for Writer Identification based on Single Handwritten Word Images,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+334ac2a459190b41923be57744aa6989f9a54a51,Apples to Oranges: Evaluating Image Annotations from Natural Language Processing Systems,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+33ef419dffef85443ec9fe89a93f928bafdc922e,SelfKin: Self Adjusted Deep Model For Kinship Verification,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+33f73cf297065ace7f27e8d449765f1c51ef163c,Determining Interacting Objects in Human-Centric Activities via Qualitative Spatio-Temporal Reasoning,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+33ad23377eaead8955ed1c2b087a5e536fecf44e,Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+33ad23377eaead8955ed1c2b087a5e536fecf44e,Augmenting CRFs with Boltzmann Machine Shape Priors for Image Labeling,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+33594e1bfe93fc6c74c2bcfc1bc39c524fa9e2ca,Alternating Regression Forests for Object Detection and Pose Estimation,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+33d682c52eb24875c556ec007bc38068d3e682c0,VisDA: The Visual Domain Adaptation Challenge,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+33d682c52eb24875c556ec007bc38068d3e682c0,VisDA: The Visual Domain Adaptation Challenge,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+3373ca46fa2c19112aebd772983ce70183ac1690,Somatosensory Representations Link the Perception of Emotional Expressions and Sensory Experience123,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+05904c87cb1d0b1f17fcb018fa0344c020694f36,Modulation of the composite face effect by unintended emotion cues,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,Cost-Effective HITs for Relative Similarity Comparisons,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,Cost-Effective HITs for Relative Similarity Comparisons,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+053b263b4a4ccc6f9097ad28ebf39c2957254dfb,Cost-Effective HITs for Relative Similarity Comparisons,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+05a26e093a101a9e6d9cac4e39a29afd6f1ca77e,Computational modeling of social face perception in humans : Leveraging the active appearance model,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+05a26e093a101a9e6d9cac4e39a29afd6f1ca77e,Computational modeling of social face perception in humans : Leveraging the active appearance model,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+05a26e093a101a9e6d9cac4e39a29afd6f1ca77e,Computational modeling of social face perception in humans : Leveraging the active appearance model,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+056d5d942084428e97c374bb188efc386791e36d,Temporally Robust Global Motion Compensation by Keypoint-Based Congealing,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+05e45f61dc7577c50114a382abc6e952ae24cdac,"Object Detection and Recognition in Natural Settings by George William Dittmar A thesis submitted in partial fulfilment of the requirements of the degree Master of Science in Computer Science Thesis Committee: Melanie Mitchell, Chair",Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.68492999,edu,
+050836151004b1997972c3fcbff0b85de8308e38,Matching and Predicting Street Level Images,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+050836151004b1997972c3fcbff0b85de8308e38,Matching and Predicting Street Level Images,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+05bac1f503ad77b095730f3b55214f7785b3f65d,A Multi-Layer Feature-Assisted Approach in Crowd-Labelling,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+0595d18e8d8c9fb7689f636341d8a55cc15b3e6a,Discriminant Analysis on Riemannian Manifold of Gaussian Distributions for Face Recognition With Image Sets,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0595d18e8d8c9fb7689f636341d8a55cc15b3e6a,Discriminant Analysis on Riemannian Manifold of Gaussian Distributions for Face Recognition With Image Sets,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0573f3d2754df3a717368a6cbcd940e105d67f0b,Emotion recognition in the wild challenge 2013,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+0573f3d2754df3a717368a6cbcd940e105d67f0b,Emotion recognition in the wild challenge 2013,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+0573f3d2754df3a717368a6cbcd940e105d67f0b,Emotion recognition in the wild challenge 2013,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+0573f3d2754df3a717368a6cbcd940e105d67f0b,Emotion recognition in the wild challenge 2013,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+05a0d04693b2a51a8131d195c68ad9f5818b2ce1,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+05a0d04693b2a51a8131d195c68ad9f5818b2ce1,Dual-reference Face Retrieval: What Does He/She Look Like at Age 'X'?,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+059bf35362d896dddd5ebcd5b1b93682efa9f46f,Additive Component Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0593bd23851b9f545ff7218887c09f4c62b7aaad,Detectability prediction in dynamic scenes for enhanced environment perception,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0535101387033bf0b5ddf662a8c4d98caa1adc52,Fast Neighborhood Graph Search Using Cartesian Concatenation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+0535101387033bf0b5ddf662a8c4d98caa1adc52,Fast Neighborhood Graph Search Using Cartesian Concatenation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+05a312478618418a2efb0a014b45acf3663562d7,Accelerated sampling for the Indian Buffet Process,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+0568497f77d21122a6991e3d7147d5205451873a,Optimal feature selection for 3D facial expression recognition using coarse-to-fine classification,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+05818eddd8a35fed7f3041d591ef966f8e79bd9a,Web scale photo hash clustering on a single machine,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+055de0519da7fdf27add848e691087e0af166637,Joint Unsupervised Face Alignment and Behaviour Analysis,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0515e43c92e4e52254a14660718a9e498bd61cf5,Machine Learning Systems for Detecting Driver Drowsiness,Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.89271590,29.37863323,edu,
+0515e43c92e4e52254a14660718a9e498bd61cf5,Machine Learning Systems for Detecting Driver Drowsiness,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+05891725f5b27332836cf058f04f18d74053803f,One-shot Action Localization by Learning Sequence Matching Network,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+05891725f5b27332836cf058f04f18d74053803f,One-shot Action Localization by Learning Sequence Matching Network,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+052651838d27835f39270101e140055e60a59d68,Enhancing Exemplar SVMs using Part Level Transfer Regularization,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+059582bee125512b127296364e7700ebd9f80436,Action-driven 3D indoor scene evolution,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+059582bee125512b127296364e7700ebd9f80436,Action-driven 3D indoor scene evolution,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+059582bee125512b127296364e7700ebd9f80436,Action-driven 3D indoor scene evolution,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+059582bee125512b127296364e7700ebd9f80436,Action-driven 3D indoor scene evolution,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+0568fc777081cbe6de95b653644fec7b766537b2,Learning Expressionlets on Spatio-temporal Manifold for Dynamic Facial Expression Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0568fc777081cbe6de95b653644fec7b766537b2,Learning Expressionlets on Spatio-temporal Manifold for Dynamic Facial Expression Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0568fc777081cbe6de95b653644fec7b766537b2,Learning Expressionlets on Spatio-temporal Manifold for Dynamic Facial Expression Recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+05929f5a20147fe4349b6fe76819c023e53ad8f6,Minimizing Supervision for Free-space Segmentation,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+05929f5a20147fe4349b6fe76819c023e53ad8f6,Minimizing Supervision for Free-space Segmentation,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+05fa7085663bbbd1057c0d240158091930c59c6a,MovieQA: Understanding Stories in Movies through Question-Answering,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+05d80c59c6fcc4652cfc38ed63d4c13e2211d944,On sampling-based approximate spectral decomposition,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+05fdd29536d55fe3ad00689b6f60ada8bc761e91,HOGgles: Visualizing Object Detection Features,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0574dc64c8275b09ed587dc3977f4d3c990bd4df,Context-Aware Visual Policy Network for Sequence-Level Image Captioning,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+05f0b43fe16282656cf1fdce919ac0f9d433f4a5,Future Person Localization in First-Person Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+05f0b43fe16282656cf1fdce919ac0f9d433f4a5,Future Person Localization in First-Person Videos,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+05f0b43fe16282656cf1fdce919ac0f9d433f4a5,Future Person Localization in First-Person Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+05f0b43fe16282656cf1fdce919ac0f9d433f4a5,Future Person Localization in First-Person Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+05bb9a8877a82a474db3a0ee65772028a715e8cd,Learning mid-level features from object hierarchy for image classification,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+05bb9a8877a82a474db3a0ee65772028a715e8cd,Learning mid-level features from object hierarchy for image classification,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+055530f7f771bb1d5f352e2758d1242408d34e4d,A Facial Expression Recognition System from Depth Video,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+05812833afba3b2a5a4b54853b0a1ed1cc8932d1,Fast planar object detection and tracking via edgel templates,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+053a4e5a89716f3f9e71bd09718bd9021a5114e0,Privacy considerations for a pervasive eye tracking world,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+053a4e5a89716f3f9e71bd09718bd9021a5114e0,Privacy considerations for a pervasive eye tracking world,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+050eda213ce29da7212db4e85f948b812a215660,Combining Models and Exemplars for Face Recognition: An Illuminating Example,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+050eda213ce29da7212db4e85f948b812a215660,Combining Models and Exemplars for Face Recognition: An Illuminating Example,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+059dc8bbf912caed67f287ad8811d3fb41fa2eba,Exemplar-based linear discriminant analysis for robust object tracking,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+05b6c32304dd1673c14f1e1efce4e4d5c4402275,What are the Visual Features Underlying Rapid Object Recognition?,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+05b6c32304dd1673c14f1e1efce4e4d5c4402275,What are the Visual Features Underlying Rapid Object Recognition?,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+05014b04223562c7c7485a1277552564d0ddc6de,Deep Captioning with Multimodal Recurrent Neural Networks (m-RNN),"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+05e3acc8afabc86109d8da4594f3c059cf5d561f,Actor-Action Semantic Segmentation with Grouping Process Models,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+053f5a00d58541c417693a4e08a76005e135486e,Generating Animations by Sketching in Conceptual Space,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+05455f5e3c3989be4991cb74b73cdfd0d6522622,Learning Warped Guidance for Blind Face Restoration,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+05455f5e3c3989be4991cb74b73cdfd0d6522622,Learning Warped Guidance for Blind Face Restoration,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+05455f5e3c3989be4991cb74b73cdfd0d6522622,Learning Warped Guidance for Blind Face Restoration,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+052f994898c79529955917f3dfc5181586282cf8,Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+05318a267226f6d855d83e9338eaa9e718b2a8dd,Age estimation from face images: challenging problem for audience measurement systems,Yaroslavl State University,Yaroslavl State University,"ЯрГУ им. Демидова (Экономический факультет), 3, Комсомольская улица, Кировский район, Ярославль, городской округ Ярославль, Ярославская область, ЦФО, 150000, РФ",57.62521030,39.88456560,edu,
+05f3cc64e640a9aca2d0e6086aa6efaf103a3fe2,Robot-centric Activity Recognition from First-Person RGB-D Videos,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+05f3cc64e640a9aca2d0e6086aa6efaf103a3fe2,Robot-centric Activity Recognition from First-Person RGB-D Videos,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+05b0383b4cfe007bbad92e72ee361f95e7e9a458,Fast Matching of Binary Features,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+057d5f66a873ec80f8ae2603f937b671030035e6,Newtonian Image Understanding: Unfolding the Dynamics of Objects in Static Images,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+0549dc0290fe988ede74c4e030ae485c13eaa54a,Development of Vision Based Multiview Gait Recognition System with MMUGait Database,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+05f3f8f6f97db00bafa2efd2ac9aac570603c0c6,TGIF: A New Dataset and Benchmark on Animated GIF Description,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+05f3f8f6f97db00bafa2efd2ac9aac570603c0c6,TGIF: A New Dataset and Benchmark on Animated GIF Description,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+05b603d7c6004de3c028e40b4434804f752290b9,Combining Skeletal Pose with Local Motion for Human Activity Recognition,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+050a3346e44ca720a54afbf57d56b1ee45ffbe49,Multi-cue Zero-Shot Learning with Strong Supervision,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0517d08da7550241fb2afb283fc05d37fce5d7b7,Combination of Local Multiple Patterns and Exponential Discriminant Analysis for Facial Recognition,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+053f9b10532a87e346fad281e0be81337cb525a5,Session Variability Modelling for Face Authentication,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+058d50af5456665dccda2b41b17bdfead72bdec8,Learning Non-Metric Visual Similarity for Image Retrieval,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+058d50af5456665dccda2b41b17bdfead72bdec8,Learning Non-Metric Visual Similarity for Image Retrieval,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+053931267af79a89791479b18d1b9cde3edcb415,Attributes for Improved Attributes: A Multi-Task Network Utilizing Implicit and Explicit Relationships for Facial Attribute Classification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+05f3d1e9fb254b275354ca69018e9ed321dd8755,Face Recognition using Optimal Representation Ensemble,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+051f03bc25ec633592aa2ff5db1d416b705eac6c,Partial face recognition: An alignment free approach,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+9d4c467adc09fb50c5e799fc124f3e82da8c3c22,Temporal Sequence Distillation: Towards Few-Frame Action Recognition in Videos,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+9d4c467adc09fb50c5e799fc124f3e82da8c3c22,Temporal Sequence Distillation: Towards Few-Frame Action Recognition in Videos,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6,Improved Pseudoinverse Linear Discriminant Analysis Method for Dimensionality Reduction,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+9d55ec73cab779403cd933e6eb557fb04892b634,Kernel principal component analysis network for image classification,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+9d5ec256ee5a6ee2d9602f651e88132f2669f690,A novel M-estimator for robust PCA,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+9d5ec256ee5a6ee2d9602f651e88132f2669f690,A novel M-estimator for robust PCA,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+9dfd1e9daea4c54a05b06df905bf8ee1faccaa72,"New l2, 1-Norm Relaxation of Multi-Way Graph Cut for Clustering",Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+9dfd1e9daea4c54a05b06df905bf8ee1faccaa72,"New l2, 1-Norm Relaxation of Multi-Way Graph Cut for Clustering",Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+9dfd1e9daea4c54a05b06df905bf8ee1faccaa72,"New l2, 1-Norm Relaxation of Multi-Way Graph Cut for Clustering",Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+9d422e2c318ab63e6b49c83053757b4636f8308b,Object localization in ImageNet by looking out of the window,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+9dd227edfeb472076346cbe2c49811d1778a43a8,Domain-Invariant Projection Learning for Zero-Shot Recognition,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+9ded64e83d3ba51513ea00de27c0c770a02b0cf4,Image Classification using Transfer Learning from Siamese Networks based on Text Metadata Similarity,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9d66de2a59ec20ca00a618481498a5320ad38481,POP: Privacy-Preserving Outsourced Photo Sharing and Searching for Mobile Devices,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9d66de2a59ec20ca00a618481498a5320ad38481,POP: Privacy-Preserving Outsourced Photo Sharing and Searching for Mobile Devices,Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.83619630,-87.62655913,edu,
+9d3472849dc2cadf194ae29adbf46bdda861d8b7,Learning to Ask: Neural Question Generation for Reading Comprehension,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9d3472849dc2cadf194ae29adbf46bdda861d8b7,Learning to Ask: Neural Question Generation for Reading Comprehension,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+9db7af606e6eb6238ca900145c8270245e9d2959,PBGen: Partial Binarization of Deconvolution-Based Generators for Edge Intelligence,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+9db7af606e6eb6238ca900145c8270245e9d2959,PBGen: Partial Binarization of Deconvolution-Based Generators for Edge Intelligence,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+9d6c5dc5b212d8a8e94e7c52b0a2e4550aa2e117,Eye tracking studies of normative and atypical development q,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+9d4ebcd84c4ba2241cca3242e22888558b62a0e0,Demonstration of Santoku: Optimizing Machine Learning over Normalized Data,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+9d9106d48e30c07d45ab07c21f8c35d11ae4d35d,Under Review as a Conference Paper at Iclr 2017 Learning to Draw Samples: with Application to Amortized Mle for Generative Adversar- Ial Learning,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+9d7edd114f788763bb16280249fae97c4aa2c102,Image Quality Assessment Techniques Show Improved Training and Evaluation of Autoencoder Generative Adversarial Networks,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+9de6b3f7a60cea9749ae38ad9b700a7350212350,Non-frontal view facial expression recognition based on ergodic hidden Markov model supervectors,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9d146936d7d06622e271764c8a050a92bc168f3c,GAD: General Activity Detection for Fast Clustering on Large Data,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9dcc6dde8d9f132577290d92a1e76b5decc6d755,Facial Expression Analysis Based on Optimized Gabor Features,Istanbul University,Istanbul University,"İstanbul Üniversitesi, Besim Ömerpaşa Caddesi, Süleymaniye, Fatih, İstanbul, Marmara Bölgesi, 34116, Türkiye",41.01324240,28.96376090,edu,
+9dcc6dde8d9f132577290d92a1e76b5decc6d755,Facial Expression Analysis Based on Optimized Gabor Features,Bahçeşehir University,Bahcesehir University,"BAU Galata, 24, Kemeraltı Caddesi, Müeyyedzade, Beyoğlu, İstanbul, Marmara Bölgesi, 34425, Türkiye",41.02451875,28.97697953,edu,
+9d57c4036a0e5f1349cd11bc342ac515307b6720,Landmark Weighting for 3DMM Shape Fitting,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+9d57c4036a0e5f1349cd11bc342ac515307b6720,Landmark Weighting for 3DMM Shape Fitting,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+9d941a99e6578b41e4e32d57ece580c10d578b22,Illumination-Invariant and Deformation-Tolerant Inner Knuckle Print Recognition Using Portable Devices,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+9d941a99e6578b41e4e32d57ece580c10d578b22,Illumination-Invariant and Deformation-Tolerant Inner Knuckle Print Recognition Using Portable Devices,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+9d941a99e6578b41e4e32d57ece580c10d578b22,Illumination-Invariant and Deformation-Tolerant Inner Knuckle Print Recognition Using Portable Devices,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9d941a99e6578b41e4e32d57ece580c10d578b22,Illumination-Invariant and Deformation-Tolerant Inner Knuckle Print Recognition Using Portable Devices,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9db7bc834ad534d48b22a87ab5f706833cc18d79,Applications of Scene Attributes,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+9d896605fbf93315b68d4ee03be0770077f84e40,Baby Talk: Understanding and Generating Image Descriptions,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+9d896605fbf93315b68d4ee03be0770077f84e40,Baby Talk: Understanding and Generating Image Descriptions,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+9d61b0beb3c5903fc3032655dc0fd834ec0b2af3,Learning a Locality Preserving Subspace for Visual Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+9d7b2d1f3d6705bc8a4656fa27fb6dde20033f25,A discriminative key pose sequence model for recognizing human interactions,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+9ded7bd7ee896de568cbb0281ec553c21de93131,Face recognition using a novel image representation scheme and multi-scale local features,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+9ded7bd7ee896de568cbb0281ec553c21de93131,Face recognition using a novel image representation scheme and multi-scale local features,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+9ded7bd7ee896de568cbb0281ec553c21de93131,Face recognition using a novel image representation scheme and multi-scale local features,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74,Clustering-driven Deep Embedding with Pairwise Constraints,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+9d3aa3b7d392fad596b067b13b9e42443bbc377c,Facial Biometric Templates and Aging: Problems and Challenges for Artificial Intelligence,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+9db4b25df549555f9ffd05962b5adf2fd9c86543,Nonlinear 3D Face Morphable Model,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+9d3a6e459e0cecda20a8afd69d182877ff0224cf,A Framework for Articulated Hand Pose Estimation and Evaluation,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+9d06d43e883930ddb3aa6fe57c6a865425f28d44,Clustering Appearances of Objects Under Varying Illumination Conditions,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9c513b0f304b1bb29de478a1227ddb201ed50217,A Simple and Effective Technique for Face Clustering in TV Series,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+9c2aea0bd67c7fe232cca54ee2440b9d666479ea,Recycle-GAN: Unsupervised Video Retargeting,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9c7d3d2a524aedb8bf687441f26dac5ed8c490c5,Visual Explanations from Hadamard Product in Multimodal Deep Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+9c7d3d2a524aedb8bf687441f26dac5ed8c490c5,Visual Explanations from Hadamard Product in Multimodal Deep Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+9ca0626366e136dac6bfd628cec158e26ed959c7,In-the-wild Facial Expression Recognition in Extreme Poses,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+9c9ef6a46fb6395702fad622f03ceeffbada06e5,Exchanging Faces in Images,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+9c1cdb795fd771003da4378f9a0585730d1c3784,Stacked Deformable Part Model with Shape Regression for Object Part Localization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9cd0d36af668c354b0ff17f2e21cdde2c16b0d4e,Generative One-Class Models for Text-based Person Retrieval in Forensic Applications,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+9c43b59177cb5539ea649c188387fe374663bbb1,Learning Discriminative Latent Attributes for Zero-Shot Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9c9ba660ff8fdf74a81ceaae5ee2e590c7659cf8,Real-Time Pedestrian Tracking with Bacterial Foraging Optimization,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+9c85d54a9f6c6a522a267bfdf375251947caef5e,Recognition of Blurred Faces via Facial Deblurring Combined with Blur-Tolerant Descriptors,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+9c85d54a9f6c6a522a267bfdf375251947caef5e,Recognition of Blurred Faces via Facial Deblurring Combined with Blur-Tolerant Descriptors,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+9c85d54a9f6c6a522a267bfdf375251947caef5e,Recognition of Blurred Faces via Facial Deblurring Combined with Blur-Tolerant Descriptors,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+9c25e89c80b10919865b9c8c80aed98d223ca0c6,Gender Prediction by Gait Analysis Based on Time Series Variation of Joint Positions,Meiji University,Meiji University,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本",35.69750290,139.76139175,edu,
+9c25e89c80b10919865b9c8c80aed98d223ca0c6,Gender Prediction by Gait Analysis Based on Time Series Variation of Joint Positions,Meiji University,Meiji University,"明治大学, 錦華坂, 猿楽町1, 猿楽町, 東京, 千代田区, 東京都, 関東地方, 101-0051, 日本",35.69750290,139.76139175,edu,
+9c09b9410da8b1c5f0e3f6b65502160734214782,Pedestrian Attribute Detection Using CNN,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9c09b9410da8b1c5f0e3f6b65502160734214782,Pedestrian Attribute Detection Using CNN,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9c7444c6949427994b430787a153d5cceff46d5c,Boosting Kernel Discriminative Common Vectors for Face Recognition,Bharathidasan University,Bharathidasan University,"Bharathidasan University Road, Kajamalai, Ponmalai, Ponmalai Zone, Tiruchchirāppalli, Tiruchchirappalli district, Tamil Nadu, 620020, India",10.77788450,78.69663190,edu,
+9c373438285101d47ab9332cdb0df6534e3b93d1,Occupancy Detection in Vehicles Using Fisher Vector Image Representation,Xerox Research Center,Xerox Research Center,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada",43.51291090,-79.66640762,edu,
+9c373438285101d47ab9332cdb0df6534e3b93d1,Occupancy Detection in Vehicles Using Fisher Vector Image Representation,Xerox Research Center,Xerox Research Center,"Xerox Research Centre of Canada, 2660, Speakman Drive, Sheridan Park, Erin Mills, Ont., Peel Region, Ontario, L5J 2M4, Canada",43.51291090,-79.66640762,edu,
+9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d,Expression Recognition with Ri-HOG Cascade,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9cbb6e42a35f26cf1d19f4875cd7f6953f10b95d,Expression Recognition with Ri-HOG Cascade,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9cc43e3a756485b78b991605f44eec9be3530350,A Planar Light Probe,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+9c62f4b09ca590f74c75115184fc1a9833625edc,Empathic arousal and social understanding in individuals with autism: evidence from fMRI and ERP measurements.,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+9c958322235a3ea1f239e3dde9bb865931cf34ed,Locality-Constrained Low-Rank Coding for Image Classification,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+9c71e6f4e27b3a6f0f872ec683b0f6dfe0966c05,"Latent Dirichlet Allocation (LDA) and Topic modeling: models, applications, a survey",Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+9c4cc11d0df2de42d6593f5284cfdf3f05da402a,Enhanced Fisher linear discriminant models for face recognition,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,Domain-invariant Face Recognition using Learned Low-rank Transformation,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,Domain-invariant Face Recognition using Learned Low-rank Transformation,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+9cd6a81a519545bf8aa9023f6e879521f85d4cd1,Domain-invariant Face Recognition using Learned Low-rank Transformation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+9c2e5e2ba7c5b3a555c6c72f518e3631aab23c19,RefineNet: Multi-path Refinement Networks for High-Resolution Semantic Segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+9cadd166893f1b8aaecb27280a0915e6694441f5,Multi-Modal Emotion Recognition Fusing Video and Audio,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+9cadd166893f1b8aaecb27280a0915e6694441f5,Multi-Modal Emotion Recognition Fusing Video and Audio,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+02607f5d3c7638d0207279d96f39d435f102bf4d,Assignment 4: Reading Comprehension,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+02b1a5d4b113211198e9c66d51153eb63ca680e2,Scene Invariant Crowd Segmentation and Counting Using Scale-Normalized Histogram of Moving Gradients (HoMG),University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+02b1a5d4b113211198e9c66d51153eb63ca680e2,Scene Invariant Crowd Segmentation and Counting Using Scale-Normalized Histogram of Moving Gradients (HoMG),University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+02c3432b5d97b4ed7b8522c1fc4388bd4eda8e67,Using context to improve cascaded pedestrian detection,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+02cc96ad997102b7c55e177ac876db3b91b4e72c,"MuseumVisitors: A dataset for pedestrian and group detection, gaze estimation and behavior understanding",University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+02cc96ad997102b7c55e177ac876db3b91b4e72c,"MuseumVisitors: A dataset for pedestrian and group detection, gaze estimation and behavior understanding",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+02c44e0a7bb179be03601a7abedb006a28ad4e23,Treepedia 2.0: Applying Deep Learning for Large-Scale Quantification of Urban Tree Cover,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+02227c94dd41fe0b439e050d377b0beb5d427cda,Reading Digits in Natural Images with Unsupervised Feature Learning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,Discriminative learning from partially annotated examples,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,Discriminative learning from partially annotated examples,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+02d2c2b0ee77fec8d85c114c20fdae318e95a1bd,Human - Computer interaction for smart environment applications using hand gestures and facial expressions,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+0290523cabea481e3e147b84dcaab1ef7a914612,Generated Motion Maps,Tokyo Denki University,Tokyo Denki University,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+0229829e9a1eed5769a2b5eccddcaa7cd9460b92,Pooled motion features for first-person videos,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+02e05ad42dbe99257eee1bff3e28feaa005e5924,Remembering Who Was Where: A Happy Expression Advantage for Face Identity-Location Binding in Working Memory,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+02e05ad42dbe99257eee1bff3e28feaa005e5924,Remembering Who Was Where: A Happy Expression Advantage for Face Identity-Location Binding in Working Memory,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+02b852e698dfe85df39c24e7dd39dedf484893dd,Collaborative Learning for Weakly Supervised Object Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+025720574ef67672c44ba9e7065a83a5d6075c36,Unsupervised Learning of Video Representations using LSTMs,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+02c04a8b7c1232646ebc882caf3793327a510ba6,Cost-Effectiveness of Seven Approaches to Map Vegetation Communities - A Case Study from Northern Australia's Tropical Savannas,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+026e4ee480475e63ae68570d73388f8dfd4b4cde,Evaluating gender portrayal in Bangladeshi TV,Eastern University,Eastern University,"Eastern University, Huston Road, Radnor Township, Delaware County, Pennsylvania, 19087, USA",40.05056720,-75.37109326,edu,
+026e4ee480475e63ae68570d73388f8dfd4b4cde,Evaluating gender portrayal in Bangladeshi TV,Dhaka University,Dhaka University,"Faculty of Social Welfare, Dhaka University, Azimpur Koborsthan Road, বস্তি, হাজারীবাগ, ঢাকা, ঢাকা বিভাগ, 1950, বাংলাদেশ",23.73179150,90.38056250,edu,
+026e4ee480475e63ae68570d73388f8dfd4b4cde,Evaluating gender portrayal in Bangladeshi TV,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+02e628e99f9a1b295458cb453c09863ea1641b67,Two-Stage Convolutional Part Heatmap Regression for the 1st 3D Face Alignment in the Wild (3DFAW) Challenge,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+02f8c803fbf02bae0cd4ba8943fe3acccdf37402,The Bogazici face database: Standardized photographs of Turkish faces with supporting materials,Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.50078110,2.11143663,edu,
+02ddda27cef81c363ffffc4edfdfbd1dea14149e,Predicting People's 3D Poses from Short Sequences,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+02f4b900deabbe7efa474f2815dc122a4ddb5b76,Local and Global Optimization Techniques in Graph-based Clustering,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+02be981d45a8ca14e30e1cf9dfffd977f85e6ee4,Top-down saliency with Locality-constrained Contextual Sparse Coding,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+02fa3f9517bfede1c2b61570f792f6ed8de364f3,Detection of Human Rights Violations in Images: Can Convolutional Neural Networks Help?,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+02fa3f9517bfede1c2b61570f792f6ed8de364f3,Detection of Human Rights Violations in Images: Can Convolutional Neural Networks Help?,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+026b5b8062e5a8d86c541cfa976f8eee97b30ab8,MDLFace: Memorability augmented deep learning for video face recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+0235b2d2ae306b7755483ac4f564044f46387648,Recognition of Facial Attributes Using Adaptive Sparse Representations of Random Patches,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+02a2fa826a348cc3bc46a1a31a49dce8d06ca366,Individual differences in the spontaneous recruitment of brain regions supporting mental state understanding when viewing natural social scenes.,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+02a2fa826a348cc3bc46a1a31a49dce8d06ca366,Individual differences in the spontaneous recruitment of brain regions supporting mental state understanding when viewing natural social scenes.,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+02218fcd3aece5a7bd19255d74b12f63dfa5c1a7,ShapeWorld - A new test methodology for multimodal language understanding,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+02218fcd3aece5a7bd19255d74b12f63dfa5c1a7,ShapeWorld - A new test methodology for multimodal language understanding,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+02467703b6e087799e04e321bea3a4c354c5487d,Grouper: Optimizing Crowdsourced Face Annotations,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+02e39f23e08c2cb24d188bf0ca34141f3cc72d47,Removing illumination artifacts from face images using the nuisance attribute projection,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+022c485c5617bbf0b7f40475f9758cddd11a91af,Describing Textures in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+022c485c5617bbf0b7f40475f9758cddd11a91af,Describing Textures in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+023be757b1769ecb0db810c95c010310d7daf00b,Face Alignment Assisted by Head Pose Estimation,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+023be757b1769ecb0db810c95c010310d7daf00b,Face Alignment Assisted by Head Pose Estimation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+023be757b1769ecb0db810c95c010310d7daf00b,Face Alignment Assisted by Head Pose Estimation,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+0239d0af254c5304414db1672ea25ad2e1cdf2ee,Implicit Shape Kernel for Discriminative Learning of the Hough Transform Detector,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0269312a7d49209fd9f1875e24df6a1d178fb15c,Subject centric group feature for person re-identification,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+0209389b8369aaa2a08830ac3b2036d4901ba1f1,DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0209389b8369aaa2a08830ac3b2036d4901ba1f1,DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+02239ae5e922075a354169f75f684cad8fdfd5ab,Commonly Uncommon: Semantic Sparsity in Situation Recognition,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+02239ae5e922075a354169f75f684cad8fdfd5ab,Commonly Uncommon: Semantic Sparsity in Situation Recognition,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+02aa54dbb461f6bde6fe8ba0591c3c5cabed7e59,Residual Attention Networks for Image Classification,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+02d650d8a3a9daaba523433fbe93705df0a7f4b1,How Does Aging Affect Facial Components?,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+0294f992f8dfd8748703f953925f9aee14e1b2a2,Blur-Robust Face Recognition via Transformation Learning,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+02820c1491b10a1ff486fed32c269e4077c36551,Active user authentication for smartphones: A challenge data set and benchmark results,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+02820c1491b10a1ff486fed32c269e4077c36551,Active user authentication for smartphones: A challenge data set and benchmark results,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+027f769aed0cfcb3169ef60f182ce1decc0e99eb,Local Directional Pattern (LDP) for face recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+02c8de83c3bd2226a918c925400628902b6f175a,"Size Matters! How Thumbnail Number, Size, and Motion Influence Mobile Video Retrieval",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+021c3e8c3c64c25126315911f31cab1edca82ab3,DCAN: Deep Contour-Aware Networks for Accurate Gland Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+a40edf6eb979d1ddfe5894fac7f2cf199519669f,Improving Facial Attribute Prediction Using Semantic Segmentation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+a45ec771ca2db81088c52c173eed9ec2022a8a70,Impaired recognition of negative basic emotions in autism: a test of the amygdala theory.,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+a45ec771ca2db81088c52c173eed9ec2022a8a70,Impaired recognition of negative basic emotions in autism: a test of the amygdala theory.,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+a43c3ebeee65d44bbedac7548483485a14eacf52,Vocabulary-informed Extreme Value Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+a43c3ebeee65d44bbedac7548483485a14eacf52,Vocabulary-informed Extreme Value Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+a43c3ebeee65d44bbedac7548483485a14eacf52,Vocabulary-informed Extreme Value Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+a4d5ff6f1fb8b304c3e6fd5f1a7abd9b5c52955c,Spoofing 2D Face Detection: Machines See People Who Aren't There,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+a40b4bf3a921f07f4d07838f9092416189e077b5,Local Binary Convolutional Neural Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a40b4bf3a921f07f4d07838f9092416189e077b5,Local Binary Convolutional Neural Networks,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+a40b4bf3a921f07f4d07838f9092416189e077b5,Local Binary Convolutional Neural Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a4876b7493d8110d4be720942a0f98c2d116d2a0,Multi-velocity neural networks for gesture recognition in videos,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a4bb4474e38140d183241f0c8cee13167a6d2c60,Person Re-identification Using Clustering Ensemble Prototypes,"National Institute of Technology, Rourkela",National Institute of Technology Rourkela,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.25015890,84.90668557,edu,
+a470a81f989d5354239f1044c90e07b78c6beed7,RPAN: An End-to-End Recurrent Pose-Attention Network for Action Recognition in Videos,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+a470a81f989d5354239f1044c90e07b78c6beed7,RPAN: An End-to-End Recurrent Pose-Attention Network for Action Recognition in Videos,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+a470a81f989d5354239f1044c90e07b78c6beed7,RPAN: An End-to-End Recurrent Pose-Attention Network for Action Recognition in Videos,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+a46086e210c98dcb6cb9a211286ef906c580f4e8,Fusing Multi-Stream Deep Networks for Video Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+a4f29217d2120ed1490aea7e1c5b78c3b76e972f,Enhanced object detection via fusion with prior beliefs from image classification,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a4acd75470152933faf9957f04579aa662a912a0,Energy efficient job scheduling in single-ISA heterogeneous chip-multiprocessors,Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.18620474,edu,
+a46d4f5bf9c5baca38b52874e74d1e3f9b3b12cd,Evolutionary Generative Adversarial Networks,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+a46d4f5bf9c5baca38b52874e74d1e3f9b3b12cd,Evolutionary Generative Adversarial Networks,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+a46d4f5bf9c5baca38b52874e74d1e3f9b3b12cd,Evolutionary Generative Adversarial Networks,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+a47f834281c39b1b851757b807c92f43dc975206,Multi task sequence learning for depression scale prediction from video,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a47f834281c39b1b851757b807c92f43dc975206,Multi task sequence learning for depression scale prediction from video,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+a47f834281c39b1b851757b807c92f43dc975206,Multi task sequence learning for depression scale prediction from video,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+a47f834281c39b1b851757b807c92f43dc975206,Multi task sequence learning for depression scale prediction from video,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a481e394f58f2d6e998aa320dad35c0d0e15d43c,Selectively guiding visual concept discovery,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+a30d5b636086d80791578cbd0e0b02d87ab42d27,Actionness-Assisted Recognition of Actions,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a39d318b68a2c262b6351a05f447dfcb0555da88,Facial Expression Recognition Based on Local Fourier Coefficients and Facial Fourier Descriptors,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+a322479a6851f57a3d74d017a9cb6d71395ed806,Towards Pose Invariant Face Recognition in the Wild,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a322479a6851f57a3d74d017a9cb6d71395ed806,Towards Pose Invariant Face Recognition in the Wild,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+a322479a6851f57a3d74d017a9cb6d71395ed806,Towards Pose Invariant Face Recognition in the Wild,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+a322479a6851f57a3d74d017a9cb6d71395ed806,Towards Pose Invariant Face Recognition in the Wild,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a343bc9239a209af45c43f94b86651fd0074a364,Learning To Simulate,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a35338cb4686cff66710b7f8102e5eabfc38adb8,Attribute-Augmented Semantic Hierarchy: Towards a Unified Framework for Content-Based Image Retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a301ddc419cbd900b301a95b1d9e4bb770afc6a3,DECK: Discovering Event Composition Knowledge from Web Images for Zero-Shot Event Detection and Recounting in Videos,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a3c93737a4497350768b0dda08dbc0826670dc5b,Diagnosing state-of-the-art object proposal methods,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+a361e820a85fa91f23091068f8177c58489304b1,Hard to “tune in”: neural mechanisms of live face-to-face interaction with high-functioning autistic spectrum disorder,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+a33edec6f5e544cde888409fd028b468a2e0bfba,Robust Subspace Clustering via Half-Quadratic Minimization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a34fb5c4b8b58ca19c376b1312e4d9955fe1d857,Deep Learning for Image Captioning,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+a3f684930c5c45fcb56a2b407d26b63879120cbf,LPM for Fast Action Recognition with Large Number of Classes,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+a3825a14676c5f88478af2cd254ba4c531d5e92c,Multi-modal Cycle-Consistent Generalized Zero-Shot Learning,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+a36f8947c7e33f73157d3ffb0660776403fc197c,3D Geometry-Aware Semantic Labeling of Outdoor Street Scenes,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+a353d425a602d04f1dfde2142650fe0fb5193159,An Incremental Structured Part Model for Image Classification,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+a312599f9a842cf686c6cf80b770e05840d32a5a,MTS: A Multiple Temporal Scale Tracker Handling Occlusion and Abrupt Motion Variation,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+a3d0ebb50d49116289fb176d28ea98a92badada6,Unsupervised Learning of Object Landmarks through Conditional Image Generation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+a3d0ebb50d49116289fb176d28ea98a92badada6,Unsupervised Learning of Object Landmarks through Conditional Image Generation,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+a303fca91c181f1084d94948169ab73c45e2073e,Impact of Small Groups with Heterogeneous Preference on Behavioral Evolution in Population Evacuation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a303fca91c181f1084d94948169ab73c45e2073e,Impact of Small Groups with Heterogeneous Preference on Behavioral Evolution in Population Evacuation,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+a36aa784e00d479bb0e6cb8aa6b6cd2dfeadfe1b,Evaluation of different features for face recognition in video,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+a360526696a2698ad31dfca4c529e098d2dbdbd1,Image Captioning with Semantic Attention,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+a3fa023d7355662d066882df8dead0cac6a8321e,Supplementary Material for “Adversarial Inverse Graphics Networks: Learning 2D-to-3D Lifting and Image-to-Image Translation from Unpaired Supervision”,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a3f4f163e87b28901389e189bb7f0f655995793f,End-to-End Instance Segmentation and Counting with Recurrent Attention,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a3f4f163e87b28901389e189bb7f0f655995793f,End-to-End Instance Segmentation and Counting with Recurrent Attention,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a3f68fe7c296f6fa6ad508d1cf19d0f01f50e63f,Surveillance Face Super-Resolution via Shape Clustering and Subspace Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+a3f68fe7c296f6fa6ad508d1cf19d0f01f50e63f,Surveillance Face Super-Resolution via Shape Clustering and Subspace Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+a32d4195f7752a715469ad99cb1e6ebc1a099de6,The Potential of Using Brain Images for Authentication,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,Recognizing Violence in Movies,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,Recognizing Violence in Movies,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,Recognizing Violence in Movies,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+a3d78bc94d99fdec9f44a7aa40c175d5a106f0b9,Recognizing Violence in Movies,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+a3dab83995f27ec8d09cbc06fe815ade88232d10,Probabilistic Group Testing under Sum Observations: A Parallelizable 2-Approximation for Entropy Loss,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+a3dab83995f27ec8d09cbc06fe815ade88232d10,Probabilistic Group Testing under Sum Observations: A Parallelizable 2-Approximation for Entropy Loss,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+a3dab83995f27ec8d09cbc06fe815ade88232d10,Probabilistic Group Testing under Sum Observations: A Parallelizable 2-Approximation for Entropy Loss,Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.68492999,edu,
+a308077e98a611a977e1e85b5a6073f1a9bae6f0,Intelligent Screening Systems for Cervical Cancer,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+a31b862f1addbed64a2dac64d7d416e129cad6ad,DeepGarment : 3D Garment Shape Estimation from a Single Image,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+a35dd69d63bac6f3296e0f1d148708cfa4ba80f6,Audio Visual Emotion Recognition with Temporal Alignment and Perception Attention,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a33c5b508c64d1b01f3d4567835de6a4242b6911,High-fidelity facial and speech animation for VR HMDs,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a33c5b508c64d1b01f3d4567835de6a4242b6911,High-fidelity facial and speech animation for VR HMDs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a36c8a4213251d3fd634e8893ad1b932205ad1ca,Videos from the 2013 Boston Marathon : An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a3a2f3803bf403262b56ce88d130af15e984fff0,Building a Compact Relevant Sample Coverage for Relevance Feedback in Content-Based Image Retrieval,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a3b70bf7e676f92ebb6dec3e2889c9131634f8b9,Use of 3D faces facilitates facial expression recognition in children,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+a3d11e98794896849ab2304a42bf83e2979e5fb5,In Defense of the Triplet Loss for Person Re-Identification,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+b56f3a7c50bfcd113d0ba84e6aa41189e262d7ae,Harvesting Motion Patterns in Still Images from the Internet,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b5968e7bb23f5f03213178c22fd2e47af3afa04c,Multiple-Human Parsing in the Wild,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b5968e7bb23f5f03213178c22fd2e47af3afa04c,Multiple-Human Parsing in the Wild,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+b5cd9e5d81d14868f1a86ca4f3fab079f63a366d,Tag-based video retrieval by embedding semantic content in a continuous word space,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+b5cd9e5d81d14868f1a86ca4f3fab079f63a366d,Tag-based video retrieval by embedding semantic content in a continuous word space,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+b5e3beb791cc17cdaf131d5cca6ceb796226d832,Novel Dataset for Fine-Grained Image Categorization: Stanford Dogs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b5184bd428b9a89255900dce50b4320741706744,Discovering Disentangled Representations with the F Statistic Loss,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+b56ffd4b244b2c3094cdb930ee569fb4e3bd95f0,SitNet: Discrete Similarity Transfer Network for Zero-shot Hashing,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b56ffd4b244b2c3094cdb930ee569fb4e3bd95f0,SitNet: Discrete Similarity Transfer Network for Zero-shot Hashing,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+b59df3832c3914c9aefc7f11017360a24bd11480,Identifying First-Person Camera Wearers in Third-Person Videos,Indiana University Bloomington,Indiana University Bloomington,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA",39.17720475,-86.51540030,edu,
+b59df3832c3914c9aefc7f11017360a24bd11480,Identifying First-Person Camera Wearers in Third-Person Videos,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+b5c90dc06b63099c3d35c86c97fa24ebf9d41fb6,Learning to Classify Psychiatric Disorders based on fMR Images: Autism vs Healthy and ADHD vs Healthy,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b5c90dc06b63099c3d35c86c97fa24ebf9d41fb6,Learning to Classify Psychiatric Disorders based on fMR Images: Autism vs Healthy and ADHD vs Healthy,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b5354bcad6c11983f9614546371262c454c994ed,Maximum Similarity Based Feature Matching and Adaptive Multiple Kernel Learning for Object Recognition,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+b5354bcad6c11983f9614546371262c454c994ed,Maximum Similarity Based Feature Matching and Adaptive Multiple Kernel Learning for Object Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+b5f2846a506fc417e7da43f6a7679146d99c5e96,UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+b5da4943c348a6b4c934c2ea7330afaf1d655e79,Facial Landmarks Detection by Self-Iterative Regression based Landmarks-Attention Network,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+b5da4943c348a6b4c934c2ea7330afaf1d655e79,Facial Landmarks Detection by Self-Iterative Regression based Landmarks-Attention Network,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b5402c03a02b059b76be829330d38db8e921e4b5,Hybridized KNN and SVM for gene expression data classification,Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.80881680,113.53526640,edu,
+b5bd67ada6de799d96f65ef0f1b6ba1cb85e3dd8,Residual Codean Autoencoder for Facial Attribute Analysis,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+b593f13f974cf444a5781bbd487e1c69e056a1f7,Query Image Query Image Retrievals Retrievals Transferred Poses Transferred Poses,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b52c0faba5e1dc578a3c32a7f5cfb6fb87be06ad,Robust Face Recognition Technique under Varying Illumination,National Autonomous University of Mexico,Centro de Ciencias Aplicadas y Desarrollo Tecnológico,"University City, Mexico City, CDMX, Mexico",19.31888950,-99.18436760,edu,National Autonomous University of Mexico
+b56530be665b0e65933adec4cc5ed05840c37fc4,Reducing correspondence ambiguity in loosely labeled training data,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+b5fabc72ecdebd832fb02f1ea2e85672f2ef125e,Bregman Divergences for Infinite Dimensional Covariance Matrices,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+b5f4e617ac3fc4700ec8129fcd0dcf5f71722923,Hierarchical Wavelet Networks for Facial Feature Localization,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b5f4e617ac3fc4700ec8129fcd0dcf5f71722923,Hierarchical Wavelet Networks for Facial Feature Localization,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b5940250c0a136b85a4706b1bb13f52be0037837,3D Face Recognition Based on Multiple Keypoint Descriptors and Sparse Representation,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+b534163cf101520e0868c46a754748fd0e4e0ef9,Multiple-Shot Person Re-identification via Riemannian Discriminative Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b534163cf101520e0868c46a754748fd0e4e0ef9,Multiple-Shot Person Re-identification via Riemannian Discriminative Learning,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+b51b4ef97238940aaa4f43b20a861eaf66f67253,Unsupervised Modeling of Objects and Their Hierarchical Contextual Interactions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b5c945e04cdf204358e7964290867b38435ef458,Sliding-Window Optimization on an Ambiguity-Clearness Graph for Multi-object Tracking,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b5d7c5aba7b1ededdf61700ca9d8591c65e84e88,Data pruning for template-based automatic speech recognition,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+b5c749f98710c19b6c41062c60fb605e1ef4312a,Evaluating Two-Stream CNN for Video Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+b53259a81dcfa9913495bb47f62627c51e20f086,DYAN: A Dynamical Atoms-Based Network for Video Prediction,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+b50eb94ab9c9a6ecb76a40a0043a74fc48d5f554,Tackling the Story Ending Biases in The Story Cloze Test,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+b56e4f61bfd6da098941d2aee8a3ab1221ce834a,Augmented Reality Meets Deep Learning for Car Instance Segmentation in Urban Scenes,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+b5b620774304e6245a660b14c1207386d3abad17,SketchNet: Sketch Classification with Web Images,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+b5b620774304e6245a660b14c1207386d3abad17,SketchNet: Sketch Classification with Web Images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b5930275813a7e7a1510035a58dd7ba7612943bc,Face Recognition Using L-Fisherfaces,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+b5930275813a7e7a1510035a58dd7ba7612943bc,Face Recognition Using L-Fisherfaces,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+b5930275813a7e7a1510035a58dd7ba7612943bc,Face Recognition Using L-Fisherfaces,Shandong University of Science and Technology,Shandong University of Science and Technology,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国",36.00146435,120.11624057,edu,
+b59c8b44a568587bc1b61d130f0ca2f7a2ae3b88,An Enhanced Intelligent Agent with Image Description Generation,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+b59cee1f647737ec3296ccb3daa25c890359c307,Continuously Reproducing Toolchains in Pattern Recognition and Machine Learning Experiments,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+b249f10a30907a80f2a73582f696bc35ba4db9e2,Improved graph-based SFA: Information preservation complements the slowness principle,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+b281f6cf99eeb8dbb9bb0c31a57827c8c0493e7f,Multi-target Tracking by Rank-1 Tensor Approximation,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+b2fde24de782d2979b946c49986cabdb12e84eba,Latent Hough Transform for Object Detection,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+b28eb219db9370cf20063288225cc2f3e6e5f984,Fast and Accurate Head Pose Estimation via Random Projection Forests,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+b2a0e5873c1a8f9a53a199eecae4bdf505816ecb,Hybrid VAE: Improving Deep Generative Models using Partial Observations,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b2cd92d930ed9b8d3f9dfcfff733f8384aa93de8,"HyperFace: A Deep Multi-task Learning Framework for Face Detection, Landmark Localization, Pose Estimation, and Gender Recognition",University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b2b28eeeaa2b613bf30b5bfee5ec4272ce184bf3,Measuring Collectiveness via Refined Topological Similarity,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+b2b28eeeaa2b613bf30b5bfee5ec4272ce184bf3,Measuring Collectiveness via Refined Topological Similarity,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+b2d4ed138816c671c3f698290557d26600377025,Image Caption Validation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b29c83b6ee10857e09ac3503916ae1b129642cae,Mo 2 Cap 2 : Real-time Mo bile 3 D Mo tion Cap ture with a Cap-mounted Fisheye Camera,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b234cd7788a7f7fa410653ad2bafef5de7d5ad29,Unsupervised Temporal Ensemble Alignment for Rapid Annotation,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+b234cd7788a7f7fa410653ad2bafef5de7d5ad29,Unsupervised Temporal Ensemble Alignment for Rapid Annotation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b2c60061ad32e28eb1e20aff42e062c9160786be,Diverse and Controllable Image Captioning with Part-of-Speech Guidance,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b255474d62f082fa97f50ea1174bf339522f6c99,Facial mimicry in its social setting,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+b239b39c08a08d9c3b1da68a7bce162b580a746e,Gaze selection in complex social scenes,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+b239b39c08a08d9c3b1da68a7bce162b580a746e,Gaze selection in complex social scenes,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b2d9877443ec7da2490027ccc932468f05c7bf85,Robust Canonical Time Warping for the Alignment of Grossly Corrupted Sequences,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+b2d9877443ec7da2490027ccc932468f05c7bf85,Robust Canonical Time Warping for the Alignment of Grossly Corrupted Sequences,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+b288a369c6f05443cb794048065b7a86139733d3,Convolutional gated recurrent networks for video segmentation,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b2180fc4f5cb46b5b5394487842399c501381d67,Learning a Deep Compact Image Representation for Visual Tracking,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+b29fa452d737e2b6aa16d6f82a9a8daaea655287,Spontaneous Facial Actions Map onto Emotional Experiences in a Non-social Context: Toward a Component-Based Approach,Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+b29fa452d737e2b6aa16d6f82a9a8daaea655287,Spontaneous Facial Actions Map onto Emotional Experiences in a Non-social Context: Toward a Component-Based Approach,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+b29fa452d737e2b6aa16d6f82a9a8daaea655287,Spontaneous Facial Actions Map onto Emotional Experiences in a Non-social Context: Toward a Component-Based Approach,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+b2e7b1a8bd7375a043ad4eb1c88dbc7d436d9634,Effective face recognition using bag of features with additive kernels,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+b2e7b1a8bd7375a043ad4eb1c88dbc7d436d9634,Effective face recognition using bag of features with additive kernels,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+b29b42f7ab8d25d244bfc1413a8d608cbdc51855,Effective face landmark localization via single deep network,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+b29f348e8675f75ff160ec65ebeeb3f3979b65d8,An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+b29f348e8675f75ff160ec65ebeeb3f3979b65d8,An objective and subjective evaluation of content-based privacy protection of face images in video surveillance systems using JPEG XR,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+b2e5df82c55295912194ec73f0dca346f7c113f6,CUHK&SIAT Submission for THUMOS15 Action Recognition Challenge,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b2e5df82c55295912194ec73f0dca346f7c113f6,CUHK&SIAT Submission for THUMOS15 Action Recognition Challenge,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+b2e6944bebab8e018f71f802607e6e9164ad3537,Mixed Error Coding for Face Recognition with Mixed Occlusions,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+b2e308649c7a502456a8e3c95ac7fbe6f8216e51,Recurrent Regression for Face Recognition,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+b285337ba61c2bb54181dbbb4f4863efe1aa6ec2,Realtime 3D eye gaze animation using a single RGB camera,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+b239a756f22201c2780e46754d06a82f108c1d03,Robust multimodal recognition via multitask multivariate low-rank representations,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b242124e3bf1ebfc57b5279d4d75ade924a5d1e3,Vision-based Navigation of Autonomous Vehicle in Roadway Environments with Unexpected Hazards,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+b242124e3bf1ebfc57b5279d4d75ade924a5d1e3,Vision-based Navigation of Autonomous Vehicle in Roadway Environments with Unexpected Hazards,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+b242124e3bf1ebfc57b5279d4d75ade924a5d1e3,Vision-based Navigation of Autonomous Vehicle in Roadway Environments with Unexpected Hazards,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+b242124e3bf1ebfc57b5279d4d75ade924a5d1e3,Vision-based Navigation of Autonomous Vehicle in Roadway Environments with Unexpected Hazards,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e,Learning with Ambiguous Label Distribution for Apparent Age Estimation,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+d9912256502b9578cea7d149142832e0998d97ff,Metric Embedded Discriminative Vocabulary Learning for High-Level Person Representation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d949fadc9b6c5c8b067fa42265ad30945f9caa99,Rethinking Feature Discrimination and Polymerization for Large-scale Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,Precise Temporal Action Localization by Evolving Temporal Proposals,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,Precise Temporal Action Localization by Evolving Temporal Proposals,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,Precise Temporal Action Localization by Evolving Temporal Proposals,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+d93baa5ecf3e1196b34494a79df0a1933fd2b4ec,Precise Temporal Action Localization by Evolving Temporal Proposals,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+d961617db4e95382ba869a7603006edc4d66ac3b,Experimenting Motion Relativity for Action Recognition with a Large Number of Classes,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+d9bda208addf00a55df23821f6d4abdb85e73599,Pedestrian Detection and Tracking from Low-Resolution Unmanned Aerial Vehicle Thermal Imagery,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+d925540a8cdedb92c7f20ebfd9b8baf36fe6caa4,The utility of 3D landmarks for arbitrary pose face recognition,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+d92c9295a050b09db921b8ef986264dc5d7eba22,On the Flip Side: Identifying Counterexamples in Visual Question Answering,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+d92c9295a050b09db921b8ef986264dc5d7eba22,On the Flip Side: Identifying Counterexamples in Visual Question Answering,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+d92c9295a050b09db921b8ef986264dc5d7eba22,On the Flip Side: Identifying Counterexamples in Visual Question Answering,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+d9358de0d80f4f4d89c91b2b16fd52279b4834e4,Exploring Correlations for Multiple Facial Attributes Recognition through Graph Attention Network,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+d934eec76a2588934094098987de72bdf1214d48,One-Shot Unsupervised Cross Domain Translation,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+d95213a0ef820c93bf0a41e1ce24aea1dc9f137d,Pose-Guided Human Parsing by an AND/OR Graph Using Pose-Context Features,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+d91d1fc3f54fafaa66df12b9db2f83b477992e37,SPIN: Seamless Operating System Integration of Peer-to-Peer DMA Between SSDs and GPUs,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+d91d1fc3f54fafaa66df12b9db2f83b477992e37,SPIN: Seamless Operating System Integration of Peer-to-Peer DMA Between SSDs and GPUs,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+d91d1fc3f54fafaa66df12b9db2f83b477992e37,SPIN: Seamless Operating System Integration of Peer-to-Peer DMA Between SSDs and GPUs,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+d9c4586269a142faee309973e2ce8cde27bda718,Contextual Visual Similarity,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d9c4586269a142faee309973e2ce8cde27bda718,Contextual Visual Similarity,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d912b8d88d63a2f0cb5d58164e7414bfa6b41dfa,Facial identification problem: A tracking based approach,University of Milan,University of Milan,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA",38.67966620,-90.32628160,edu,
+d9533bede70753bf1fba1e4cc7ad492b88ccf373,STA: Spatial-Temporal Attention for Large-Scale Video-based Person Re-Identification,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d972b4da29aebf5da7c02e77a9118b0f60895985,Embedding Network for Visual Relation Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+d94969ec95d4c8cd7d0d4da3e83131b6f76cd7c4,Non-local NetVLAD Encoding for Video Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+d9584adbbb214465e4f2d4dfae1b12d33de7630b,Context Embedding Networks,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+d9b69093a82ee7cb9ad499c76c9b0d30aa377454,"PReMVOS : Proposal-generation , Refinement and Merging for the YouTube-VOS Challenge on Video Object Segmentation 2018",RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+d9bbaa38d7997f334ef8d662fd2ce380d495545a,Face Recognition System Based on Spectral Graph Wavelet Theory,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+d963e640d0bf74120f147329228c3c272764932b,Image Processing for Face Recognition Rate Enhancement,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,Generalizing a Person Retrieval Model Hetero- and Homogeneously,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,Generalizing a Person Retrieval Model Hetero- and Homogeneously,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,Generalizing a Person Retrieval Model Hetero- and Homogeneously,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+d9fdc9c63bb4838031eac017ba9b8e9bda3cb845,Trading-off performance and complexity in identification problem,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+d99ec70dac11292c63b7726c58c24dfacddb2889,Learning visual attribute from image and text,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+d915e634aec40d7ee00cbea96d735d3e69602f1a,Two-Stream convolutional nets for action recognition in untrimmed video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d915e634aec40d7ee00cbea96d735d3e69602f1a,Two-Stream convolutional nets for action recognition in untrimmed video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d90026a9ca2489707aff2807617f3782f78097be,"Survey on audiovisual emotion recognition: databases, features, and data fusion strategies",Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+d94395882da6da17cee0a6ea6f1058314f091f05,Inter-BMV: Interpolation with Block Motion Vectors for Fast Semantic Segmentation on Video,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+ac1d97a465b7cc56204af5f2df0d54f819eef8a6,A Look at Eye Detection for Unconstrained Environments,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+ac2e44622efbbab525d4301c83cb4d5d7f6f0e55,"A 3D Morphable Model Learnt from 10,000 Faces",University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+acc550d31b50c8d95794dc35dd1e271f979a0854,Optimized Kernel-based Projection Space of Riemannian Manifolds,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+acc550d31b50c8d95794dc35dd1e271f979a0854,Optimized Kernel-based Projection Space of Riemannian Manifolds,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+acc550d31b50c8d95794dc35dd1e271f979a0854,Optimized Kernel-based Projection Space of Riemannian Manifolds,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,Deep Cascaded Bi-Network for Face Hallucination,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,Deep Cascaded Bi-Network for Face Hallucination,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+ac6c3b3e92ff5fbcd8f7967696c7aae134bea209,Deep Cascaded Bi-Network for Face Hallucination,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+ac855f0de9086e9e170072cb37400637f0c9b735,Fast Geometrically-Perturbed Adversarial Faces,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+acdc333f7b32d987e65ce15f21db64e850ca9471,Direct Loss Minimization for Training Deep Neural Nets,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+acdc333f7b32d987e65ce15f21db64e850ca9471,Direct Loss Minimization for Training Deep Neural Nets,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ac207f5e368e285b0dd54387e3a898c550249b20,Stacked RNNs for Encoder-Decoder Networks: Accurate Machine Understanding of Images,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ac21c8aceea6b9495574f8f9d916e571e2fc497f,Pose-Independent Identity-based Facial Image Retrieval using Contextual Similarity,"King Abdullah University of Science and Technology, Saudi Arabia",King Abdullah University of Science and Technology 4700,"KAUST, Collaboration Avenue, ثول, منطقة مكة المكرمة, 23955, السعودية",22.31055485,39.10515486,edu,
+ac768ff426a3a04a835cdc627481afc898a138f9,Learning Joint Feature Adaptation for Zero-Shot Recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9,Co-Regularized Ensemble for Feature Selection,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+aca75c032cfb0b2eb4c0ae56f3d060d8875e43f9,Co-Regularized Ensemble for Feature Selection,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+ac5d9753a53b0d69308596908032f85b416c0056,Selectivity of Face Distortion Aftereffects for Differences in Expression or Gender,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+ac5d9753a53b0d69308596908032f85b416c0056,Selectivity of Face Distortion Aftereffects for Differences in Expression or Gender,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+ac5d9753a53b0d69308596908032f85b416c0056,Selectivity of Face Distortion Aftereffects for Differences in Expression or Gender,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+ac9516a589901f1421e8ce905dd8bc5b689317ca,A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+ac9bdf668852dea5fa8ec4262f10562eda9bedd2,Deep Structured Learning for Facial Expression Intensity Estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+ac9bdf668852dea5fa8ec4262f10562eda9bedd2,Deep Structured Learning for Facial Expression Intensity Estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+ac9227ff4262405b8eec8ebe4802b763bd6f55e1,Surface Matching and Registration by Landmark Curve-Driven Canonical Quasiconformal Mapping,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+ac9227ff4262405b8eec8ebe4802b763bd6f55e1,Surface Matching and Registration by Landmark Curve-Driven Canonical Quasiconformal Mapping,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+acfb90f474209f56455c4d1ae60d524d8c4c9df8,Pixel-wise Attentional Gating for Parsimonious Pixel Labeling,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+aca57cd3f1f4edea9918814aabd0460c682cd56e,Discriminant Projective Non-Negative Matrix Factorization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+aca57cd3f1f4edea9918814aabd0460c682cd56e,Discriminant Projective Non-Negative Matrix Factorization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+ac68dacd66ebe1c7eab56aaee9a8bef478be9a23,Sparkle: adaptive sample based scheduling for cluster computing,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+ac51d9ddbd462d023ec60818bac6cdae83b66992,An Efficient Robust Eye Localization by Learning the Convolution Distribution Using Eye Template,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+ac51d9ddbd462d023ec60818bac6cdae83b66992,An Efficient Robust Eye Localization by Learning the Convolution Distribution Using Eye Template,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+ac32cdb3e0a996c75d5df2973c1a2444a81c0a5e,Defense Against Adversarial Attacks with Saak Transform,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+ac32cdb3e0a996c75d5df2973c1a2444a81c0a5e,Defense Against Adversarial Attacks with Saak Transform,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+acc548285f362e6b08c2b876b628efceceeb813e,Objectifying Facial Expressivity Assessment of Parkinson's Patients: Preliminary Study,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+acc548285f362e6b08c2b876b628efceceeb813e,Objectifying Facial Expressivity Assessment of Parkinson's Patients: Preliminary Study,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+acc548285f362e6b08c2b876b628efceceeb813e,Objectifying Facial Expressivity Assessment of Parkinson's Patients: Preliminary Study,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+acc548285f362e6b08c2b876b628efceceeb813e,Objectifying Facial Expressivity Assessment of Parkinson's Patients: Preliminary Study,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+ac8269a3033ede3c1ce4381b1fef61375a54cfb9,Coordinating the Design and Management of Heterogeneous Datacenter Resources,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+acee2201f8a15990551804dd382b86973eb7c0a8,To boost or not to boost? On the limits of boosted trees for object detection,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+ac9a331327cceda4e23f9873f387c9fd161fad76,Deep Convolutional Neural Network for Age Estimation based on VGG-Face Model,University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.16648580,-73.19205640,edu,
+ac9a331327cceda4e23f9873f387c9fd161fad76,Deep Convolutional Neural Network for Age Estimation based on VGG-Face Model,University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.16648580,-73.19205640,edu,
+ac56b4d6f9775211dfc966e9151862fd508d3142,Three-dimensional information in face recognition: an eye-tracking study.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+ac56b4d6f9775211dfc966e9151862fd508d3142,Three-dimensional information in face recognition: an eye-tracking study.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+acaa781f353c769ae5f6101aab140f51b2d33cd2,Recent advances in correlation filter theory and applications,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+acaa781f353c769ae5f6101aab140f51b2d33cd2,Recent advances in correlation filter theory and applications,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+acc9821b61ea804bd1e0b0e23a45f08fbf760a37,Smile Detection in the Wild Based on Transfer Learning,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+ac752e998ea646411438fd517c36e1e8c6507d15,Guided saccades modulate object and face-specific activity in the fusiform gyrus.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+acfa01182d18d8f3fbbd7df6be0998269116ba6d,Character Image Patterns as Big Data,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+acfe5b5c99be70fa3120d410e7be55b9fe299f40,Factorizable Net: An Efficient Subgraph-Based Framework for Scene Graph Generation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+acfe5b5c99be70fa3120d410e7be55b9fe299f40,Factorizable Net: An Efficient Subgraph-Based Framework for Scene Graph Generation,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+acfe5b5c99be70fa3120d410e7be55b9fe299f40,Factorizable Net: An Efficient Subgraph-Based Framework for Scene Graph Generation,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+ac86ccc16d555484a91741e4cb578b75599147b2,Morphable Face Models - An Open Framework,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+ac5b3e24a7dd2970c323ca7679625a7d29602480,Warsaw set of emotional facial expression pictures: a validation study of facial display photographs,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+ac5b3e24a7dd2970c323ca7679625a7d29602480,Warsaw set of emotional facial expression pictures: a validation study of facial display photographs,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+ac75c662568cbb7308400cc002469a14ff25edfd,Regularization studies on LDA for face recognition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+ac0e595afa57db8c9310d72b2a2eb0758dc1e48f,Discriminative Learning of Latent Features for Zero-Shot Recognition,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea,From Gabor Magnitude to Gabor Phase Features: Tackling the Problem of Face Recognition under Severe Illumination Changes,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+ac5a1b5a90dfeb4d22c37d806385cb9046e5edcb,Modeling Camera Effects to Improve Visual Learning from Synthetic Data,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+ac7031769c08423774ae4346f7492f6814176268,Enabling Pedestrian Safety using Computer Vision Techniques: A Case Study of the 2018 Uber Inc. Self-driving Car Crash,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+ac7031769c08423774ae4346f7492f6814176268,Enabling Pedestrian Safety using Computer Vision Techniques: A Case Study of the 2018 Uber Inc. Self-driving Car Crash,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+ac392dba43796a25c2eee5f08671537634a77029,CyCADA: Cycle-Consistent Adversarial Domain Adaptation,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+ac61f5e442e653e2503aea85425f0b9dba9f768a,MagnifyMe: Aiding Cross Resolution Face Recognition via Identity Aware Synthesis,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+ac559888f996923c06b1cf90db6b57b12e582289,Benchmarking neuromorphic vision: lessons learnt from computer vision,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ac559888f996923c06b1cf90db6b57b12e582289,Benchmarking neuromorphic vision: lessons learnt from computer vision,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ac559888f996923c06b1cf90db6b57b12e582289,Benchmarking neuromorphic vision: lessons learnt from computer vision,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+ac559888f996923c06b1cf90db6b57b12e582289,Benchmarking neuromorphic vision: lessons learnt from computer vision,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ac2881bdf7b57dc1672a17b221d68a438d79fce8,Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+acfecef9e56ff36455aed13f8e6be1a79b42f20f,Hit or Run: Exploring Aggressive and Avoidant Reactions to Interpersonal Provocation Using a Novel Fight-or-Escape Paradigm (FOE),University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+ad01687649d95cd5b56d7399a9603c4b8e2217d7,Investigating Open-World Person Re-identi cation Using a Drone,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+ada53a115e1551f3fbad3dc5930c1187473a78a4,Efficient Object Category Recognition Using Classemes,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ad1f223e83338b0b08779b3736d5a3b7ccfec592,Deep Kinematic Pose Regression,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+ad1f223e83338b0b08779b3736d5a3b7ccfec592,Deep Kinematic Pose Regression,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ad1f223e83338b0b08779b3736d5a3b7ccfec592,Deep Kinematic Pose Regression,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+ad8540379884ec03327076b562b63bc47e64a2c7,Bee royalty offspring algorithm for improvement of facial expressions classification model,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+adaed4e92c93eb005198e41f87cf079e46050b5a,Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+adaed4e92c93eb005198e41f87cf079e46050b5a,Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+adaed4e92c93eb005198e41f87cf079e46050b5a,Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6,"Two Birds, One Stone: Jointly Learning Binary Code for Large-Scale Face Image Retrieval and Attributes Prediction",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+adce9902dca7f4e8a9b9cf6686ec6a7c0f2a0ba6,"Two Birds, One Stone: Jointly Learning Binary Code for Large-Scale Face Image Retrieval and Attributes Prediction",University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+adc0b5d9f010f8b7d9900fcb1703c3882e340d65,Nasal Oxytocin Treatment Biases Dogs’ Visual Attention and Emotional Response toward Positive Human Facial Expressions,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+ad6b23435649d3d88a6b33154b9e6e3e5648a33d,Visual Curiosity: Learning to Ask Questions to Learn Visual Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ad9ecacca5c28b098096ad0cbd81fe84405924e3,1 Face Recognition by Sparse Representation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+ad7dfaadf9d99eadbb001ff0e0974f53704012b1,Single Image Action Recognition Using Semantic Body Part Actions,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+adf423f2a76301e34aed59d4e6d6f5378dcdadb4,A Soft Computing Based Approach for Multi-Accent Classification in IVR Systems,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+add50a7d882eb38e35fe70d11cb40b1f0059c96f,High-fidelity Pose and Expression Normalization for face recognition in the wild,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ad5d2146a629b786712eb21d4dbfa31394ca07b4,Sputnik Tracker: Having a Companion Improves Robustness of the Tracker,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+ad784332cc37720f03df1c576e442c9c828a587a,Face recognition based on face-specific subspace,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ad784332cc37720f03df1c576e442c9c828a587a,Face recognition based on face-specific subspace,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+ad836360812f87e45795f8345de3bdc6b13add81,Kernelized structural SVM learning for supervised object segmentation,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+ada42b99f882ba69d70fff68c9ccbaff642d5189,Semantic Image Segmentation and Web-Supervised Visual Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+ad467baa4f59d18ed998757bcba3df3c2a753df8,Realtime Multilevel Crowd Tracking Using Reciprocal Velocity Obstacles,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+ada1a5f2d2a3fb471de4a561ed13c52d0904b578,InverseFaceNet : Deep Monocular Inverse Face Rendering — Supplemental Material —,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ada1a5f2d2a3fb471de4a561ed13c52d0904b578,InverseFaceNet : Deep Monocular Inverse Face Rendering — Supplemental Material —,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+ad0d4d5c61b55a3ab29764237cd97be0ebb0ddff,Weakly Supervised Action Localization by Sparse Temporal Pooling Network,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+addbddc42462975a02f4933d36f430b874b3d52b,"Social attention and real-world scenes: the roles of action, competition and social content.",University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+addbddc42462975a02f4933d36f430b874b3d52b,"Social attention and real-world scenes: the roles of action, competition and social content.",University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+addbddc42462975a02f4933d36f430b874b3d52b,"Social attention and real-world scenes: the roles of action, competition and social content.",University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+addbddc42462975a02f4933d36f430b874b3d52b,"Social attention and real-world scenes: the roles of action, competition and social content.",University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+ad5950257e053b08657ea298f7b89ba358b8bfcf,Textually Enriched Neural Module Networks for Visual Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ad46f1de2001474cce1047d88703f61580c8a5de,Face Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+ada13fd37da7a28e74aaed4a413533fa4f4b3b37,Answerer in Questioner's Mind: Information Theoretic Approach to Goal-Oriented Visual Dialog,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+ad9cb522cc257e3c5d7f896fe6a526f6583ce46f,Real-Time Recognition of Facial Expressions for Affective Computing Applications,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ad62c6e17bc39b4dec20d32f6ac667ae42d2c118,A Synchronization Ground Truth for the Jiku Mobile Video Dataset,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+ad08c97a511091e0f59fc6a383615c0cc704f44a,Towards the improvement of self-service systems via emotional virtual agents,University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447512,edu,
+ad08c97a511091e0f59fc6a383615c0cc704f44a,Towards the improvement of self-service systems via emotional virtual agents,University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447512,edu,
+ad08c97a511091e0f59fc6a383615c0cc704f44a,Towards the improvement of self-service systems via emotional virtual agents,University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447512,edu,
+ad08c97a511091e0f59fc6a383615c0cc704f44a,Towards the improvement of self-service systems via emotional virtual agents,University of Abertay,University of Abertay,"Abertay University, Bell Street, City Centre, Dundee, Dundee City, Scotland, DD1 1HG, UK",56.46323375,-2.97447512,edu,
+ad2339c48ad4ffdd6100310dcbb1fb78e72fac98,Video Fill In the Blank Using LR/RL LSTMs with Spatial-Temporal Attentions,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+ad7e2dd9ce31c2093d5b611372c44654d8d594de,Improving Consistency-Based Semi-Supervised Learning with Weight Averaging,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+ad2cb5c255e555d9767d526721a4c7053fa2ac58,Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+ad2cb5c255e555d9767d526721a4c7053fa2ac58,Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+ad247138e751cefa3bb891c2fe69805da9c293d7,A Novel Hybrid Method for Face Recognition Based on 2d Wavelet and Singular Value Decomposition,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+adc2a323af5f8be790b7fe5ded8b5b276f0a1b31,Taking A Closer Look at Domain Shift: Category-level Adversaries for Semantics Consistent Domain Adaptation,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+adc2a323af5f8be790b7fe5ded8b5b276f0a1b31,Taking A Closer Look at Domain Shift: Category-level Adversaries for Semantics Consistent Domain Adaptation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+ad8f0b5dd8d89c0b0805a77dc27a9ce22caf6c59,"The computational magic of the ventral stream : sketch of a theory ( and why some deep architectures work ) . December 30 , 2012 DRAFT",McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+adef82b510dd72999bb04e13660c9a77b5abeb4c,Face recognition by fractal transformations,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+ad84f49b2cd1b85a6d7df2304144a093f5b610a8,Learning from Noisy Labels with Deep Neural Networks,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+bbf56398dba5593a2aed1c3857fa011442b3aed6,Mind Your Language: Learning Visually Grounded Dialog in a Multi-Agent Setting,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+bbc4bdab563b8b4cea55dfd6a7ea32680e082933,Normalized Autobinomial Markov Channels For Pedestrian Detection,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+bb31312a7f07486676cae4f7a2ad7da43b0700e2,The impact of privacy protection filters on gender recognition,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+bb31312a7f07486676cae4f7a2ad7da43b0700e2,The impact of privacy protection filters on gender recognition,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+bbc4b376ebd296fb9848b857527a72c82828fc52,Attributes for Improved Attributes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+bbc4b376ebd296fb9848b857527a72c82828fc52,Attributes for Improved Attributes,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+bb4362bd6f0bc5bb467fc8f169723243caa97d1d,"Joint learning of visual attributes, object classes and visual saliency","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+bb4362bd6f0bc5bb467fc8f169723243caa97d1d,"Joint learning of visual attributes, object classes and visual saliency","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+bbd9b5e4d4761d923d21a060513e826bf5bfc620,Harvesting Multiple Views for Marker-Less 3D Human Pose Annotations,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+bbd9b5e4d4761d923d21a060513e826bf5bfc620,Harvesting Multiple Views for Marker-Less 3D Human Pose Annotations,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+bbb71cbca731295758563acdc67273b99618e1c0,SwapNet: Image Based Garment Transfer,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+bbb71cbca731295758563acdc67273b99618e1c0,SwapNet: Image Based Garment Transfer,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+bb06ef67a49849c169781657be0bb717587990e0,Impact of temporal subsampling on accuracy and performance in practical video classification,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+bb06ef67a49849c169781657be0bb717587990e0,Impact of temporal subsampling on accuracy and performance in practical video classification,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+bbf28f39e5038813afd74cf1bc78d55fcbe630f1,Style Aggregated Network for Facial Landmark Detection,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+bbe949c06dc4872c7976950b655788555fe513b8,Automatic Frequency Band Selection for Illumination Robust Face Recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+bbcf6f54d3e991f85a949544abf20b781d5ba2ed,Weighted principal component extraction with genetic algorithms,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+bbcb4920b312da201bf4d2359383fb4ee3b17ed9,Robust Face Recognition via Multi-Scale Patch-Based Matrix Regression,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+bb6bf94bffc37ef2970410e74a6b6dc44a7f4feb,Situation Recognition with Graph Neural Networks Supplementary Material,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+bb1bc9df5e9cec3e8a03a027b8016b8fc25be73a,Improving Bi-directional Generation between Different Modalities with Variational Autoencoders,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+bba153ebdf11e6fb8716e35749c671ac96c14176,Image Crowd Counting Using Convolutional Neural Network and Markov Random Field,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+bba153ebdf11e6fb8716e35749c671ac96c14176,Image Crowd Counting Using Convolutional Neural Network and Markov Random Field,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+bb21a57edd10c042bd137b713fcbf743021ab232,The More You Know: Using Knowledge Graphs for Image Classification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+bb491d3bd43d8fb018cb7f14ca4a17738225bafb,De-genderization by body contours reshaping,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+bb6f922cc6f94beacc93aead7af53e9bcb9fe3b4,A Multi-scale CNN for Affordance Segmentation in RGB Images,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+bb65479d3f38b73826596e5da9dbf5ee4199d42c,Scale Aggregation Network for Accurate and Efficient Crowd Counting,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+bb030eaf7c25953369ee111dc1555f4f85409bb4,Scenarios: A New Representation for Complex Scene Understanding,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+bbfe095e11ecfdb9d9e8577e119bbd67170d6925,DeepSIC: Deep Semantic Image Compression,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bbfe095e11ecfdb9d9e8577e119bbd67170d6925,DeepSIC: Deep Semantic Image Compression,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+bbfe095e11ecfdb9d9e8577e119bbd67170d6925,DeepSIC: Deep Semantic Image Compression,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bb0071e21e1f00568ea80dd22c5bcef06bdebe2c,Surface Reconstruction of Rotating Objects from Monocular Video,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+bbc5f4052674278c96abe7ff9dc2d75071b6e3f3,Nonlinear Hierarchical Part-Based Regression for Unconstrained Face Alignment,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+bbfe0527e277e0213aafe068113d719b2e62b09c,Dog Breed Classification Using Part Localization,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+bbfe0527e277e0213aafe068113d719b2e62b09c,Dog Breed Classification Using Part Localization,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+bbb274a7f79c94eb2862ca99dcb23de43b9ff8ae,A Geometric Morphometric Approach to the Analysis of Lip Shape during Speech: Development of a Clinical Outcome Measure,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+bbb274a7f79c94eb2862ca99dcb23de43b9ff8ae,A Geometric Morphometric Approach to the Analysis of Lip Shape during Speech: Development of a Clinical Outcome Measure,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+d74e14de664be4b784813d93e260abe379e2602d,Supplementary Material for : Video Prediction with Appearance and Motion Conditions,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d73d2c9a6cef79052f9236e825058d5d9cdc1321,Cutting the visual world into bigger slices for improved video concept detection. (Amélioration de la détection des concepts dans les vidéos en coupant de plus grandes tranches du monde visuel),EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+d794ffece3533567d838f1bd7f442afee13148fd,Hand Detection and Tracking in Videos for Fine-Grained Action Recognition,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+d7593148e4319df7a288180d920f2822eeecea0b,A Differential Approach for Gaze Estimation with Calibration,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+d7cbedbee06293e78661335c7dd9059c70143a28,MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f,Automating Image Analysis by Annotating Landmarks with Deep Neural Networks,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+d7d9c1fa77f3a3b3c2eedbeb02e8e7e49c955a2f,Automating Image Analysis by Annotating Landmarks with Deep Neural Networks,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+d7d6f1b1e832bc7f52ed34131e3f200badb601e3,EC : A Uniform Platform for Security Analysis of Deep Learning Model,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+d7d6f1b1e832bc7f52ed34131e3f200badb601e3,EC : A Uniform Platform for Security Analysis of Deep Learning Model,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d7c094f5be41a13a579d8922ec4d50c70be1c276,Image-Based Multi-Target Tracking through Multi-Bernoulli Filtering with Interactive Likelihoods,Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.03889625,-87.93155450,edu,
+d7f5f0066cecaf8760433e7dfb0eaaaf61aa6ef6,Relaxed collaborative representation for pattern classification,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+d78734c54f29e4474b4d47334278cfde6efe963a,Exploring Disentangled Feature Representation Beyond Face Identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+d78734c54f29e4474b4d47334278cfde6efe963a,Exploring Disentangled Feature Representation Beyond Face Identification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,Learning active facial patches for expression analysis,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,Learning active facial patches for expression analysis,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+d785fcf71cb22f9c33473cba35f075c1f0f06ffc,Learning active facial patches for expression analysis,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+d79365336115661b0e8dbbcd4b2aa1f504b91af6,Variational methods for conditional multimodal deep learning,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+d75a9e646500d543094f7c0ab80c9f5c30808304,Features for Multi-Target Multi-Camera Tracking and Re-Identification,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+d7221695df4de3f34d5e4a877b71c14bc88760d2,Proposal Incorporating Structural Bias into Neural Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d7f11ebb73bfe74a57c33f7e75f7981ad9385580,Real-Time Resource Allocation for Tracking Systems,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+d7f11ebb73bfe74a57c33f7e75f7981ad9385580,Real-Time Resource Allocation for Tracking Systems,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+d7f11ebb73bfe74a57c33f7e75f7981ad9385580,Real-Time Resource Allocation for Tracking Systems,University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.40617900,-2.96670819,edu,
+d7f11ebb73bfe74a57c33f7e75f7981ad9385580,Real-Time Resource Allocation for Tracking Systems,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+d787f691af05a56eb0e91437fc6b1dfe5fbccbb9,The Effect of Affective Context on Visuocortical Processing of Neutral Faces in Social Anxiety,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+d7ebd31d4616c297292a36785727f1bc5b470290,Sensitivity to eye gaze in autism: is it normal? Is it automatic? Is it social?,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+d73a0c3db0b347bc6f3796eb89d1342bf9ccee9b,Fast Multi-aspect 2D Human Detection,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+d7d01406bf8bec7e48b70e886d93e935b8885815,The perception of emotion in artificial agents,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+d7d01406bf8bec7e48b70e886d93e935b8885815,The perception of emotion in artificial agents,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+d7cbd030b282a7b0fe397df04a6a3c860608982f,Gaussian Descriptor Based on Local Features for Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d78fbd11f12cbc194e8ede761d292dc2c02d38a2,Enhancing Gray Scale Images for Face Detection under Unstable Lighting Condition,University of Dschang,University of Dschang,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.44094480,10.07120561,edu,
+d78fbd11f12cbc194e8ede761d292dc2c02d38a2,Enhancing Gray Scale Images for Face Detection under Unstable Lighting Condition,University of Dschang,University of Dschang,"Université de Dschang, Départementale 65, Fokoué, Menoua, OU, Cameroun",5.44094480,10.07120561,edu,
+d72973a72b5d891a4c2d873daeb1bc274b48cddf,A New Supervised Dimensionality Reduction Algorithm Using Linear Discriminant Analysis and Locality Preserving Projection,Guangdong Medical College,Guangdong Medical College,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国",23.12944890,113.34376110,edu,
+d72973a72b5d891a4c2d873daeb1bc274b48cddf,A New Supervised Dimensionality Reduction Algorithm Using Linear Discriminant Analysis and Locality Preserving Projection,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+d7c9bd2587204071b87feaad01d631e7ea591c6b,Cmu - Ucr - Bosch @ Trecvid 2017 : Video to Text Retrieval,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d700aedcb22a4be374c40d8bee50aef9f85d98ef,Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d7431a266a151fb92abb7ff93fd458f21c6c3c41,Probabilistic sequence models for image sequence processing and recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+d01067340615131f9109f71590ff66f418ce8f97,Deep View-Sensitive Pedestrian Attribute Inference in an end-to-end Model,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+d0e895a272d684a91c1b1b1af29747f92919d823,Classification of Mouth Action Units using Local Binary Patterns,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+d0e895a272d684a91c1b1b1af29747f92919d823,Classification of Mouth Action Units using Local Binary Patterns,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+d082f35534932dfa1b034499fc603f299645862d,"TAMING WILD FACES: WEB-SCALE, OPEN-UNIVERSE FACE IDENTIFICATION IN STILL AND VIDEO IMAGERY by ENRIQUE",University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+d0ac9913a3b1784f94446db2f1fb4cf3afda151f,Exploiting Multi-modal Curriculum in Noisy Web Data for Large-scale Concept Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d0471d5907d6557cf081edf4c7c2296c3c221a38,A Constrained Deep Neural Network for Ordinal Regression,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+d0f81c31e11af1783644704321903a3d2bd83fd6,3D Façade Labeling over Complex Scenarios: A Case Study Using Convolutional Neural Network and Structure-From-Motion,University of Stuttgart,University of Stuttgart,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland",48.90953380,9.18318920,edu,
+d0f81c31e11af1783644704321903a3d2bd83fd6,3D Façade Labeling over Complex Scenarios: A Case Study Using Convolutional Neural Network and Structure-From-Motion,University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.73693020,-3.53647672,edu,
+d0a188debff9baca296787dfb207f151cb78300a,Physical Representation-based Predicate Optimization for a Visual Analytics Database,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d0a188debff9baca296787dfb207f151cb78300a,Physical Representation-based Predicate Optimization for a Visual Analytics Database,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d0a188debff9baca296787dfb207f151cb78300a,Physical Representation-based Predicate Optimization for a Visual Analytics Database,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+d0a188debff9baca296787dfb207f151cb78300a,Physical Representation-based Predicate Optimization for a Visual Analytics Database,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d09d663055b3b6d588bf4de2f386bb144d09aea8,Deep Adaptive Temporal Pooling for Activity Recognition,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+d09d663055b3b6d588bf4de2f386bb144d09aea8,Deep Adaptive Temporal Pooling for Activity Recognition,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+d095bafdecbae3a234d92ee96005b45cb5b1f55f,Finger Vein Recognition based on Personalized Discriminative Bit Map,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+d0e684f9614ab97a8f4ec47775124242ce493f26,Group Tracking: Exploring Mutual Relations for Multiple Object Tracking,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d0d58e1885db56bdaa3890a1cd32c6d6a42f5f49,Person Identification Using Face and Iris Multimodal Biometric System,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+d06c8e3c266fbae4026d122ec9bd6c911fcdf51d,Role for 2D image generated 3D face models in the rehabilitation of facial palsy,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+d074b33afd95074d90360095b6ecd8bc4e5bb6a2,Human-Robot Collaboration: a Survey,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+d0137881f6c791997337b9cc7f1efbd61977270d,"University of Dundee An automated pattern recognition system for classifying indirect immunofluorescence images for HEp-2 cells and specimens Manivannan,",University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+d04d5692461d208dd5f079b98082eda887b62323,Subspace learning with frequency regularizer: Its application to face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d05513c754966801f26e446db174b7f2595805ba,Everything is in the Face? Represent Faces with Object Bank,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d05513c754966801f26e446db174b7f2595805ba,Everything is in the Face? Represent Faces with Object Bank,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d05513c754966801f26e446db174b7f2595805ba,Everything is in the Face? Represent Faces with Object Bank,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d0d1a18469bd41b9464b393b56d209d53869bd77,Long-Term On-Board Prediction of People in Traffic Scenes under Uncertainty,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+d060a7a715f2e233dd09777bf651be10fa19f3d3,Versatile Auxiliary Classifier + Generative Adversarial Network (VAC+GAN); Training Conditional Generators,National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829961,edu,
+d060a7a715f2e233dd09777bf651be10fa19f3d3,Versatile Auxiliary Classifier + Generative Adversarial Network (VAC+GAN); Training Conditional Generators,National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829961,edu,
+d060a7a715f2e233dd09777bf651be10fa19f3d3,Versatile Auxiliary Classifier + Generative Adversarial Network (VAC+GAN); Training Conditional Generators,National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829961,edu,
+d0509afe9c2c26fe021889f8efae1d85b519452a,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+d0509afe9c2c26fe021889f8efae1d85b519452a,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+d02bf4082850a667bf0b7b6205df1cf9c1899233,Quantifying the visual concreteness of words and topics in multimodal datasets,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+d02bf4082850a667bf0b7b6205df1cf9c1899233,Quantifying the visual concreteness of words and topics in multimodal datasets,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+d02bf4082850a667bf0b7b6205df1cf9c1899233,Quantifying the visual concreteness of words and topics in multimodal datasets,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+d02e27e724f9b9592901ac1f45830341d37140fe,DA-GAN: Instance-level Image Translation by Deep Attention Generative Adversarial Networks (with Supplementary Materials),Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+d02e27e724f9b9592901ac1f45830341d37140fe,DA-GAN: Instance-level Image Translation by Deep Attention Generative Adversarial Networks (with Supplementary Materials),Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+d02b32b012ffba2baeb80dca78e7857aaeececb0,Human Pose Estimation : Extension and Application,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+d055f36e7975fa5b7785575dd64b5f95b9088465,PixelNet: Towards a General Pixel-level Architecture,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d0933550b75237c285c8bb2393185475014dbc2d,An Intelligent Automated Door Control System Based on a Smart Camera,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+d0fe63de22729bcecf12a84554cdfbccdb44c391,SpatialVOC2K: A Multilingual Dataset of Images with Annotations and Features for Spatial Relations between Objects,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,Exploring Semantic Inter-Class Relationships (SIR) for Zero-Shot Action Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,Exploring Semantic Inter-Class Relationships (SIR) for Zero-Shot Action Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,Exploring Semantic Inter-Class Relationships (SIR) for Zero-Shot Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d0d7671c816ed7f37b16be86fa792a1b29ddd79b,Exploring Semantic Inter-Class Relationships (SIR) for Zero-Shot Action Recognition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+d01303062b21cd9ff46d5e3ff78897b8499480de,Multi-task Learning by Maximizing Statistical Dependence,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+d01303062b21cd9ff46d5e3ff78897b8499480de,Multi-task Learning by Maximizing Statistical Dependence,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+d01303062b21cd9ff46d5e3ff78897b8499480de,Multi-task Learning by Maximizing Statistical Dependence,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+d0dcef424ab6b32d00bdc66e8d4a61ebe911fff8,Working Memory Capacity and Fluid Intelligence,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5,Towards Universal Representation for Unseen Action Recognition,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+d0f54b72e3a3fe7c0e65d7d5a3b30affb275f4c5,Towards Universal Representation for Unseen Action Recognition,Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+be86d88ecb4192eaf512f29c461e684eb6c35257,Automatic Attribute Discovery and Characterization from Noisy Web Data,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+be86d88ecb4192eaf512f29c461e684eb6c35257,Automatic Attribute Discovery and Characterization from Noisy Web Data,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+beb49072f5ba79ed24750108c593e8982715498e,GeneGAN: Learning Object Transfiguration and Attribute Subspace from Unpaired Data,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+beb49072f5ba79ed24750108c593e8982715498e,GeneGAN: Learning Object Transfiguration and Attribute Subspace from Unpaired Data,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+beb49072f5ba79ed24750108c593e8982715498e,GeneGAN: Learning Object Transfiguration and Attribute Subspace from Unpaired Data,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+bec2c65a8419b9ecaf04e8c854b5ad391894a6f1,Construction of a bird image dataset for ecological investigations,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+be22647956f1bc8cf6f936ae3c85f5637492b6b8,Ambiguity Helps: Classification with Disagreements in Crowdsourced Annotations,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,Robust and Practical Face Recognition via Structured Sparsity,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,Robust and Practical Face Recognition via Structured Sparsity,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+becd5fd62f6301226b8e150e1a5ec3180f748ff8,Robust and Practical Face Recognition via Structured Sparsity,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+bebb8a97b2940a4e5f6e9d3caf6d71af21585eda,Mapping Emotional Status to Facial Expressions,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87,Interestingness Prediction by Robust Learning to Rank,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+be4f7679797777f2bc1fd6aad8af67cce5e5ce87,Interestingness Prediction by Robust Learning to Rank,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,ESGM : Event Enrichment and Summarization by Graph Model,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,ESGM : Event Enrichment and Summarization by Graph Model,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+beeadf57a976f23f4fd6fa8a330eac6c81d3e3cd,ESGM : Event Enrichment and Summarization by Graph Model,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf,Exploring computation-communication tradeoffs in camera systems,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+be28ed1be084385f5d389db25fd7f56cd2d7f7bf,Exploring computation-communication tradeoffs in camera systems,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+beecaf2d6e9d102b6b2459ea38e15179a4b55ffd,Surveillance Video Parsing with Single Frame Supervision,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bea0bb77c0d75c3d70fefc274bfbff93a3eff015,Video Captioning with Transferred Semantic Attributes,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+bea0bb77c0d75c3d70fefc274bfbff93a3eff015,Video Captioning with Transferred Semantic Attributes,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+bee512a8117ef26e5c9fbcc36da8d0d0fabcc5d5,Online Adaptative Curriculum Learning for GANs,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+bee512a8117ef26e5c9fbcc36da8d0d0fabcc5d5,Online Adaptative Curriculum Learning for GANs,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+bee512a8117ef26e5c9fbcc36da8d0d0fabcc5d5,Online Adaptative Curriculum Learning for GANs,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+bea56c0e615e6cea496f52331432bbc344d55192,Pose - Invariant Multimodal ( 2 D + 3 D ) Face Recognition using Geodesic Distance Map,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+bea56c0e615e6cea496f52331432bbc344d55192,Pose - Invariant Multimodal ( 2 D + 3 D ) Face Recognition using Geodesic Distance Map,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+be7bb84581b09f47668966d0cb70df0876c84a21,Fixation detection for head-mounted eye tracking based on visual similarity of gaze targets,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+be7bb84581b09f47668966d0cb70df0876c84a21,Fixation detection for head-mounted eye tracking based on visual similarity of gaze targets,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+be7bb84581b09f47668966d0cb70df0876c84a21,Fixation detection for head-mounted eye tracking based on visual similarity of gaze targets,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bec31269632c17206deb90cd74367d1e6586f75f,Large-scale Datasets: Faces with Partial Occlusions and Pose Variations in the Wild,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+be5276e9744c4445fe5b12b785650e8f173f56ff,Spatio-Temporal VLAD Encoding for Human Action Recognition in Videos,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+be5276e9744c4445fe5b12b785650e8f173f56ff,Spatio-Temporal VLAD Encoding for Human Action Recognition in Videos,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+be5276e9744c4445fe5b12b785650e8f173f56ff,Spatio-Temporal VLAD Encoding for Human Action Recognition in Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+bef6daed8cd1ac90ee1c0a42e5c019bbf523491c,"Total Variation, Cheeger Cuts","University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+bed8a74e75ed96868ce81ed1080433ef5be66a52,Supervising the New with the Old: Learning SFM from SFM,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+bec26ea7335ed723a1c4360d6365f2dd846161e7,A method to Suppress Facial Expression in Posed and Spontaneous Videos,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+be9cab9e9040b667e7902a4d9fbf1a358b350d60,Pedestrian Detection and Tracking Using HOG and Oriented-LBP Features,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+bec3c3e6bb9c738dad942f00fc69848018c3b1cc,Part-Activated Deep Reinforcement Learning for Action Prediction,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+bec3c3e6bb9c738dad942f00fc69848018c3b1cc,Part-Activated Deep Reinforcement Learning for Action Prediction,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+beb0239feac388e4ee04492159a45f7e2c71e1e3,POI: Multiple Object Tracking with High Performance Detection and Appearance Feature,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+beb0239feac388e4ee04492159a45f7e2c71e1e3,POI: Multiple Object Tracking with High Performance Detection and Appearance Feature,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+be57d2aaab615ec8bc1dd2dba8bee41a4d038b85,Automatic Analysis of Naturalistic Hand-Over-Face Gestures,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+be4f18e25b06f430e2de0cc8fddcac8585b00beb,A New Face Recognition Algorithm based on Dictionary Learning for a Single Training Sample per Person,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+be2ce56434c8cf50c08f8be6f4f9b9f7c716eabd,A Convnet for Non-maximum Suppression,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b38c7a58b4c5298705b8f63dcb6a1c21ee297af8,"Fusing Deep Learned and Hand-Crafted Features of Appearance, Shape, and Dynamics for Automatic Pain Estimation",University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+b38c7a58b4c5298705b8f63dcb6a1c21ee297af8,"Fusing Deep Learned and Hand-Crafted Features of Appearance, Shape, and Dynamics for Automatic Pain Estimation",University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+b3b532e8ea6304446b1623e83b0b9a96968f926c,Joint Network based Attention for Action Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b3b532e8ea6304446b1623e83b0b9a96968f926c,Joint Network based Attention for Action Recognition,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+b370eb9839be558e7db8390ce342312bd4835be9,Object Localization Does Not Imply Awareness of Object Category at the Break of Continuous Flash Suppression,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+b306bd9b485c6a6c1e4550beb1910ed9b6585359,Learning generative models of mid-level structure in natural images,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+b3154d981eca98416074538e091778cbc031ca29,Pedestrian Attribute Analysis Using a Top-View Camera in a Public Space,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+b3154d981eca98416074538e091778cbc031ca29,Pedestrian Attribute Analysis Using a Top-View Camera in a Public Space,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+b3cc2554449fb10002250bbc178e1009fc2fdb70,Face Recognition Based on Local Zernike Moments,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+b3e50a64709a62628105546e392cf796f95ea0fb,Clustering via Boundary Erosion,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b3e50a64709a62628105546e392cf796f95ea0fb,Clustering via Boundary Erosion,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b352bd38298608afab5df341857313c146c1418c,One-Shot Learning of Sketch Categories with Co-regularized Sparse Coding,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+b352bd38298608afab5df341857313c146c1418c,One-Shot Learning of Sketch Categories with Co-regularized Sparse Coding,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+b3200539538eca54a85223bf0ec4f3ed132d0493,Action Anticipation with RBF Kernelized Feature Mapping RNN,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+b33e062f36dec4e49558133426b50c2536cb0a1b,A Learning-Style Theory for Understanding Autistic Behaviors,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b33e062f36dec4e49558133426b50c2536cb0a1b,A Learning-Style Theory for Understanding Autistic Behaviors,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b33e062f36dec4e49558133426b50c2536cb0a1b,A Learning-Style Theory for Understanding Autistic Behaviors,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+b33e062f36dec4e49558133426b50c2536cb0a1b,A Learning-Style Theory for Understanding Autistic Behaviors,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b395e6a9f28c5acbf81a58599283753c033b9540,Online Face Recognition System Based on Local Binary Patterns and Facial Landmark Tracking,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+b3b467961ba66264bb73ffe00b1830d7874ae8ce,Finding Tiny Faces,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+b3b467961ba66264bb73ffe00b1830d7874ae8ce,Finding Tiny Faces,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b3ba7ab6de023a0d58c741d6abfa3eae67227caf,Zero-Shot Activity Recognition with Verb Attribute Induction,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+b3330adb131fb4b6ebbfacce56f1aec2a61e0869,Emotion recognition using facial images,SASTRA University,SASTRA University,"SASTRA University, SRC Campus, Big Bazaar Street, கும்பகோணம், Thanjavur district, Tamil Nadu, 612001, India",10.96286550,79.38530651,edu,
+b3c8752cada163af9f72d37d2781ecd49b4c8c52,Nonparametric guidance of autoencoder representations using label information,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+b3c8752cada163af9f72d37d2781ecd49b4c8c52,Nonparametric guidance of autoencoder representations using label information,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+b3345c179be86c3fa7f3fece7d1f0db93e2cf8dc,Perceptual Differences between Men and Women: A 3D Facial Morphometric Perspective,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+b3ee1a0ff6cb36621c65c4a7b05a5179db280d35,Neural Caption Generation for News Images,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+b3f3d6be11ace907c804c2d916830c85643e468d,A Logical Framework for Trust - Related Emotions : Formal and Behavioral Results by Manh Hung NGUYEN Co - supervisors,University of Toulouse,University of Toulouse,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA",30.17818160,-93.23605810,edu,
+b3e856729f89b082b4108561479ff09394bb6553,Pose Robust Video - Based Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b3b920c797259d1340fcd2cee619203821dabe23,Alpha-Beta Divergences Discover Micro and Macro Structures in Data,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+b3a82f7df6d19898da0d0a01285b8331e099cea4,Gait-Based Person Identification Using Motion Interchange Patterns,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+b3376f115f8b13695f1b8c1a7f00f4cfea4cae53,Human Body Orientation Estimation in Multiview Scenarios,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+b3658514a0729694d86a8b89c875a66cde20480c,Improving the Robustness of Subspace Learning Techniques for Facial Expression Recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+b357576afb70465e47144aef96955b1e4b9cc1f7,Oriented Response Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+b357576afb70465e47144aef96955b1e4b9cc1f7,Oriented Response Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+b3d592bfbdeddd4074cf7aa8a832f13cd9d3be0d,Visual Relationship Prediction via Label Clustering and Incorporation of Depth Information,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+b3416a5f7339c7e83d68ba1d00d00576880a8f04,Behavior-grounded multi-sensory object perception and exploration by a humanoid robot,Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.64464415,edu,
+b3b4a7e29b9186e00d2948a1d706ee1605fe5811,Image Preprocessing for Illumination Invariant Face Verification,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+b36faeba2383cef082f9f3f509dd2098a926e2f5,Speed Up Learning based Descriptor for Face Verification,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+b33e8db8ccabdfc49211e46d78d09b14557d4cba,Face Expression Recognition and Analysis: The State of the Art,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+b3a45118534144f50a56653dac8109c73fc2c0e8,A Dataset for Persistent Multi-target Multi-camera Tracking in RGB-D,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+b341a33f098ce9dc6dbf5c50e8a1f7fe43fb21f2,Deep learning evaluation using deep linguistic processing,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+b341a33f098ce9dc6dbf5c50e8a1f7fe43fb21f2,Deep learning evaluation using deep linguistic processing,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+b3014317cea72345a711d82d27f2c03c53932a31,Model-based Human Pose Estimation Using Labelled Voxels by ICP,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+b3014317cea72345a711d82d27f2c03c53932a31,Model-based Human Pose Estimation Using Labelled Voxels by ICP,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+dfd934ae448a1b8947d404b01303951b79b13801,The importance of internal facial features in learning new faces.,University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37525010,-4.13927692,edu,
+dfd934ae448a1b8947d404b01303951b79b13801,The importance of internal facial features in learning new faces.,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+dfd934ae448a1b8947d404b01303951b79b13801,The importance of internal facial features in learning new faces.,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+dfd934ae448a1b8947d404b01303951b79b13801,The importance of internal facial features in learning new faces.,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+df724040bf460858b3e325fab0a4dd3374a647a7,Capsules for Object Segmentation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+df724040bf460858b3e325fab0a4dd3374a647a7,Capsules for Object Segmentation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+dfe823d9851d222f299ad26283c7de4b4a3941e8,Kernel Fisher Discriminant Analysis in Full Eigenspace,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+df45ca54171804193c0b499e8f3d282cc8b06998,LVreID: Person Re-Identification with Long Sequence Videos,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+df45ca54171804193c0b499e8f3d282cc8b06998,LVreID: Person Re-Identification with Long Sequence Videos,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+df45ca54171804193c0b499e8f3d282cc8b06998,LVreID: Person Re-Identification with Long Sequence Videos,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+df0e280cae018cebd5b16ad701ad101265c369fa,Deep Attributes from Context-Aware Regional Neural Codes,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+df0e280cae018cebd5b16ad701ad101265c369fa,Deep Attributes from Context-Aware Regional Neural Codes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+df7e3f5cb90230f6bed1473c4984f336b56615c8,A Multi-Stage Approach to Facial Feature Detection,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+dfe5849fc844bd7b747b3ecbe0f28ffb7e6ee917,Semantically Consistent Image Completion with Fine-grained Details,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+df9a08016fa553a169d893ce2d3fca375bab4781,Partially-Supervised Image Captioning,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+df2c685aa9c234783ab51c1aa1bf1cb5d71a3dbb,SREFI: Synthesis of realistic example face images,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+df8341b479434721d3738cc672cf976c080ab7e2,Learning Deep Networks from Noisy Labels with Dropout Regularization,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+df8341b479434721d3738cc672cf976c080ab7e2,Learning Deep Networks from Noisy Labels with Dropout Regularization,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+df8341b479434721d3738cc672cf976c080ab7e2,Learning Deep Networks from Noisy Labels with Dropout Regularization,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+df8717e1153c48e457ea5ace1aa97c30ee7374bb,RenderNet: A deep convolutional network for differentiable rendering from 3D shapes,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+df8717e1153c48e457ea5ace1aa97c30ee7374bb,RenderNet: A deep convolutional network for differentiable rendering from 3D shapes,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+df58cf3fe7502a91a7d319be11680ee5b1c78e6d,Eye Detection and Gaze Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+df58cf3fe7502a91a7d319be11680ee5b1c78e6d,Eye Detection and Gaze Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+df8a4d17bd48cd9c9f1e74396fa95cdf3381012b,Distinct Neurophysiological Mechanisms Support the Online Formation of Individual and Across-Episode Memory Representations.,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+df8a4d17bd48cd9c9f1e74396fa95cdf3381012b,Distinct Neurophysiological Mechanisms Support the Online Formation of Individual and Across-Episode Memory Representations.,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+dfbf941adeea19f5dff4a70a466ddd1b77f3b727,Models for supervised learning in sequence data,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+dfd8602820c0e94b624d02f2e10ce6c798193a25,Structured Analysis Dictionary Learning for Image Classification,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+df01f6be9573a7864e86d960db7cf3cef3a8199d,Multi-Agent Reinforcement Learning for Multi-Object Tracking,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+dff838ba0567ef0a6c8fbfff9837ea484314efc6,"Progress Report, MSc. Dissertation: On-line Random Forest for Face Detection",University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+df9a016950ffaaa8526e7332f0a6568ad43d054f,A Fast Stereo-based System for Detecting and Tracking Pedestrians from a Moving Vehicle,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+df7312cbabb7d75d915ba0d91dea77100ded5c56,Preliminary Studies on a Large Face Database,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+df7312cbabb7d75d915ba0d91dea77100ded5c56,Preliminary Studies on a Large Face Database,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,Using opportunistic face logging from smartphone to infer mental health: challenges and future directions,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,Using opportunistic face logging from smartphone to infer mental health: challenges and future directions,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+df71a00071d5a949f9c31371c2e5ee8b478e7dc8,Using opportunistic face logging from smartphone to infer mental health: challenges and future directions,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+df3c2ac15c71b6cdc07f4268ee83d4fc1984545f,Random field topic model for semantic region analysis in crowded scenes from tracklets,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+df3c2ac15c71b6cdc07f4268ee83d4fc1984545f,Random field topic model for semantic region analysis in crowded scenes from tracklets,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+df3c2ac15c71b6cdc07f4268ee83d4fc1984545f,Random field topic model for semantic region analysis in crowded scenes from tracklets,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+df9269657505fcdc1e10cf45bbb8e325678a40f5,Open-Domain Audio-Visual Speech Recognition: A Deep Learning Approach,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+dfb6aa168177d4685420fcb184def0aa7db7cddb,The Effect of Lighting Direction/Condition on the Performance of Face Recognition Algorithms,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+dfb6aa168177d4685420fcb184def0aa7db7cddb,The Effect of Lighting Direction/Condition on the Performance of Face Recognition Algorithms,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+df2841a1d2a21a0fc6f14fe53b6124519f3812f9,Learning Image Attributes using the Indian Buffet Process,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+df2841a1d2a21a0fc6f14fe53b6124519f3812f9,Learning Image Attributes using the Indian Buffet Process,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+df31e9c882dfb3ea5a3abe3b139ceacb1d90a302,DeepGUM: Learning Deep Robust Regression with a Gaussian-Uniform Mixture Model,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+df9491cc46eacf66bb963a699b73ec1a82aec4eb,Learning Attributes from the Crowdsourced Relative Labels,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+df969647a0ee9ea25b23589f44be5240b5097236,How robust is familiar face recognition? A repeat detection study of more than 1000 faces,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+df969647a0ee9ea25b23589f44be5240b5097236,How robust is familiar face recognition? A repeat detection study of more than 1000 faces,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+dae0a4ef50b347f145ed6de8f6c7fb94d350f937,Managing Heterogeneous Datacenters with Tokens,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+dae0a4ef50b347f145ed6de8f6c7fb94d350f937,Managing Heterogeneous Datacenters with Tokens,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+dae0a4ef50b347f145ed6de8f6c7fb94d350f937,Managing Heterogeneous Datacenters with Tokens,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+dae0a4ef50b347f145ed6de8f6c7fb94d350f937,Managing Heterogeneous Datacenters with Tokens,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+dae0a4ef50b347f145ed6de8f6c7fb94d350f937,Managing Heterogeneous Datacenters with Tokens,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+daf74c34f7da0695b154f645c8b78a7397a98f16,ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+daf74c34f7da0695b154f645c8b78a7397a98f16,ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+da0b41c22e0918c99ba89a04eed4f8ed58cc1d66,Subspace Clustering using Ensembles of $K$-Subspaces,Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.68492999,edu,
+da0b41c22e0918c99ba89a04eed4f8ed58cc1d66,Subspace Clustering using Ensembles of $K$-Subspaces,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+da7fc2231134fef949882bc193bc1802b318c6ff,Qualitative Pose Estimation by Discriminative Deformable Part Models,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+da8eb0d7666d481ba0d50a03067dbc1913131495,Physics-based face database for color research,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+daf05febbe8406a480306683e46eb5676843c424,Robust Subspace Segmentation with Block-Diagonal Prior,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+daf05febbe8406a480306683e46eb5676843c424,Robust Subspace Segmentation with Block-Diagonal Prior,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+daf05febbe8406a480306683e46eb5676843c424,Robust Subspace Segmentation with Block-Diagonal Prior,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+da1049ae56eaca2e7d65946cf87b1e504d9fcb70,VisDA : A Synthetic-to-Real Benchmark for Visual Domain Adaptation,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+da1049ae56eaca2e7d65946cf87b1e504d9fcb70,VisDA : A Synthetic-to-Real Benchmark for Visual Domain Adaptation,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+da90c9ff02e76a7e686ffe13bcdedbf949c86dfa,Adaptive workload-aware task scheduling for single-ISA asymmetric multicore architectures,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+da115fc803e692d18802400940855eb6c78691e4,Deep Convolutional Neural Networks with Merge-and-Run Mappings,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+da115fc803e692d18802400940855eb6c78691e4,Deep Convolutional Neural Networks with Merge-and-Run Mappings,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+da44881db32c132eb9cdef524618e3c8ed340b47,Annotation-Free and One-Shot Learning for Instance Segmentation of Homogeneous Object Clusters,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+da11b7bba74c4abbfb181bd7d07c4e6480d6c3e2,Deep CNN Denoiser and Multi-layer Neighbor Component Embedding for Face Hallucination,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+da11b7bba74c4abbfb181bd7d07c4e6480d6c3e2,Deep CNN Denoiser and Multi-layer Neighbor Component Embedding for Face Hallucination,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+da11b7bba74c4abbfb181bd7d07c4e6480d6c3e2,Deep CNN Denoiser and Multi-layer Neighbor Component Embedding for Face Hallucination,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+da11b7bba74c4abbfb181bd7d07c4e6480d6c3e2,Deep CNN Denoiser and Multi-layer Neighbor Component Embedding for Face Hallucination,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+da67293fe8ab15539dd045675fa2395435f239b6,View-independent coding of face identity in frontal and temporal cortices is modulated by familiarity: an event-related fMRI study.,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+da67293fe8ab15539dd045675fa2395435f239b6,View-independent coding of face identity in frontal and temporal cortices is modulated by familiarity: an event-related fMRI study.,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+daa52dd09b61ee94945655f0dde216cce0ebd505,Recognizing Micro-Actions and Reactions from Paired Egocentric Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+daa52dd09b61ee94945655f0dde216cce0ebd505,Recognizing Micro-Actions and Reactions from Paired Egocentric Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+daa52dd09b61ee94945655f0dde216cce0ebd505,Recognizing Micro-Actions and Reactions from Paired Egocentric Videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+da5075fa79da6cd7b81e5d3dc24161217ef86368,ViP-CNN: A Visual Phrase Reasoning Convolutional Neural Network for Visual Relationship Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+daba4ff9ad8015f6c9626dbdfee950fda401424f,IntroVAE: Introspective Variational Autoencoders for Photographic Image Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+da4137396f26bf3e76d04eeed0c94e11b7824aa6,Transferable Semi-Supervised Semantic Segmentation,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+da4137396f26bf3e76d04eeed0c94e11b7824aa6,Transferable Semi-Supervised Semantic Segmentation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+da4137396f26bf3e76d04eeed0c94e11b7824aa6,Transferable Semi-Supervised Semantic Segmentation,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+daf9c461bc515736749e14da67045d8a542c24a1,Neighborhood MinMax Projections,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b4843913e5ba0f1bfc12f179587d3789676c3310,4 DFAB : A Large Scale 4 DDatabase for Facial Expression Analysis and Biometric Applications,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+b4843913e5ba0f1bfc12f179587d3789676c3310,4 DFAB : A Large Scale 4 DDatabase for Facial Expression Analysis and Biometric Applications,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+b4288f34528fbda2d2781454aadccae0d578d59a,Bayesian 3D Tracking from Monocular Video,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+b4288f34528fbda2d2781454aadccae0d578d59a,Bayesian 3D Tracking from Monocular Video,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+b4288f34528fbda2d2781454aadccae0d578d59a,Bayesian 3D Tracking from Monocular Video,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3,Rapid face recognition using hashing,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+b4d694961d3cde43ccef7d8fcf1061fe0d8f97f3,Rapid face recognition using hashing,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807,A short review and primer on electromyography in human computer interaction applications,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+b4ee1b468bf7397caa7396cfee2ab5f5ed6f2807,A short review and primer on electromyography in human computer interaction applications,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+b446bcd7fb78adfe346cf7a01a38e4f43760f363,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+b446bcd7fb78adfe346cf7a01a38e4f43760f363,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172,Face Aging with Contextual Generative Adversarial Nets,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172,Face Aging with Contextual Generative Adversarial Nets,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b411448a978e48352e1959addb9ca8dc762262a3,Probabilistic Subspace Clustering Via Sparse Representations,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+b411448a978e48352e1959addb9ca8dc762262a3,Probabilistic Subspace Clustering Via Sparse Representations,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+b446cf353744a4b640af88d1848a1b958169c9f2,Multi-attribute sparse representation with group constraints for face recognition under different variations,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab,Based on Graph-preserving Sparse Non-negative Matrix Factorization,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+b42a97fb47bcd6bfa72e130c08960a77ee96f9ab,Based on Graph-preserving Sparse Non-negative Matrix Factorization,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+b4d209845e1c67870ef50a7c37abaf3770563f3e,"Video Time: Properties, Encoders and Evaluation",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+b47602296ccda89bec7dfa592965dacf17ca1483,Conditional Image-to-Image Translation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+b4720674dcd92d28978e24727d5b40edb363dfe9,Input Fast-Forwarding for Better Deep Learning,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+b4720674dcd92d28978e24727d5b40edb363dfe9,Input Fast-Forwarding for Better Deep Learning,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+b4720674dcd92d28978e24727d5b40edb363dfe9,Input Fast-Forwarding for Better Deep Learning,Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.08187270,31.24454841,edu,
+b4ad2bdbf82c8bd1454f6d743b956bcfbad54101,Learning from Experience in Manipulation Planning: Setting the Right Goals,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b4ad2bdbf82c8bd1454f6d743b956bcfbad54101,Learning from Experience in Manipulation Planning: Setting the Right Goals,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+b4ad2bdbf82c8bd1454f6d743b956bcfbad54101,Learning from Experience in Manipulation Planning: Setting the Right Goals,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b4ad2bdbf82c8bd1454f6d743b956bcfbad54101,Learning from Experience in Manipulation Planning: Setting the Right Goals,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b4ad2bdbf82c8bd1454f6d743b956bcfbad54101,Learning from Experience in Manipulation Planning: Setting the Right Goals,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b41d2d4750ba7fdfe072d253f408e5b60c75eb1f,Single versus Multiple Sorting in All Pairs Similarity Search,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+b4e889af57295dff9498ba476893a359a91b8a3e,Improving Speaker Turn Embedding by Crossmodal Transfer Learning from Face Embedding,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+b438dc16bf97c1324ac66771efa67bdb9b853346,Evaluating asymmetric multiprocessing for mobile applications,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+b42a8325d5cabefd11cee59f4b2b5901eb7f18c6,Curriculum Learning of Visual Attribute Clusters for Multi-Task Classification,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+b42a8325d5cabefd11cee59f4b2b5901eb7f18c6,Curriculum Learning of Visual Attribute Clusters for Multi-Task Classification,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+b4c60fe73d1e788ebe0e24b0c8989e4fda110ac5,Multi-View Perceptron: a Deep Model for Learning Face Identity and View Representations,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b4c60fe73d1e788ebe0e24b0c8989e4fda110ac5,Multi-View Perceptron: a Deep Model for Learning Face Identity and View Representations,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b4c60fe73d1e788ebe0e24b0c8989e4fda110ac5,Multi-View Perceptron: a Deep Model for Learning Face Identity and View Representations,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+b4c60fe73d1e788ebe0e24b0c8989e4fda110ac5,Multi-View Perceptron: a Deep Model for Learning Face Identity and View Representations,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b4a60cfe62d78e315ed4206d455022ead27ecbf0,Deep hashing with triplet quantization loss,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+b46e7d361a030f96d54a9717127f17d0cc833e32,Contextualized Bilinear Attention Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b47dae9d6499c6a777847a26297a647f0de49214,Aberrant Social Attention and Its Underlying Neural Correlates in Adults with Autism Spectrum Disorder,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+b4362cd87ad219790800127ddd366cc465606a78,A Smartphone-Based Automatic Diagnosis System for Facial Nerve Palsy,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b4362cd87ad219790800127ddd366cc465606a78,A Smartphone-Based Automatic Diagnosis System for Facial Nerve Palsy,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b4362cd87ad219790800127ddd366cc465606a78,A Smartphone-Based Automatic Diagnosis System for Facial Nerve Palsy,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b4ee6b62f6a89feede06da5fb7e5ad6ec0265175,Recurrent 3D Pose Sequence Machines,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+b4a09b6a7c78c3d54a0ce59ae3ebb6d4ebfd7d06,"Weight, Sex, and Facial Expressions: On the Manipulation of Attributes in Generative 3D Face Models",University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+b4fe4bdd0e42aadf3f7046e9c681d3585ba8a205,Improving dual-tree algorithms,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+b4fe4bdd0e42aadf3f7046e9c681d3585ba8a205,Improving dual-tree algorithms,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+b45549a95120a744e6b882216f8a86481fedd255,Local feature hierarchy for face recognition across pose and illumination,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+b4f4b0d39fd10baec34d3412d53515f1a4605222,Every Picture Tells a Story: Generating Sentences from Images,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b41c90bce7fecdcf5980a9990f8693ff07997b65,Categorizing Concepts with Basic Level for Vision-to-Language,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+b41c90bce7fecdcf5980a9990f8693ff07997b65,Categorizing Concepts with Basic Level for Vision-to-Language,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+b47add32c0b26e72f5670644618076dfd8bc1404,Attribute-Guided Face Generation Using Conditional CycleGAN,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+b43b6551ecc556557b63edb8b0dc39901ed0343b,ICA and Gabor representation for facial expression recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+b4f5cf797a1c857f32e5740d53d9990bc925af2b,Review of Segmentation with Deep Learning and Discover Its Application in Ultrasound Images,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b4f67b4286ea99e7f0a57536282445e801b97847,Spatiotemporal KSVD Dictionary Learning for Online Multi-target Tracking,"University of Colorado, Denver",University of Colorado Denver,"University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.74287785,-105.00596398,edu,
+b4f67b4286ea99e7f0a57536282445e801b97847,Spatiotemporal KSVD Dictionary Learning for Online Multi-target Tracking,"University of Colorado, Denver",University of Colorado Denver,"University of Colorado (Denver Auraria campus), Lawrence Way, Auraria, Denver, Denver County, Colorado, 80217, USA",39.74287785,-105.00596398,edu,
+a255a54b8758050ea1632bf5a88a201cd72656e1,Nonparametric Facial Feature Localization,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a2b9cee7a3866eb2db53a7d81afda72051fe9732,Reconstructing a Fragmented Face from an Attacked Secure Identification Protocol,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a2bd81be79edfa8dcfde79173b0a895682d62329,Multi-Objective Vehicle Routing Problem Applied to Large Scale Post Office Deliveries,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+a20210d875221088d6428330787606e12605c68f,Person Independent Head Pose Estimation by Non-Linear Regression and Manifold Embedding,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+a287643d3eddca3dcc09b3532f2b070a28d4a022,Real-time Human Pose Estimation from Video with Convolutional Neural Networks,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+a287643d3eddca3dcc09b3532f2b070a28d4a022,Real-time Human Pose Estimation from Video with Convolutional Neural Networks,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+a2eb90e334575d9b435c01de4f4bf42d2464effc,A new sparse image representation algorithm applied to facial expression recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+a251171bb335608b3019f7b05b167b7e49a8dc23,Subspace Network: Deep Multi-Task Censored Regression for Modeling Neurodegenerative Diseases,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+a251171bb335608b3019f7b05b167b7e49a8dc23,Subspace Network: Deep Multi-Task Censored Regression for Modeling Neurodegenerative Diseases,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+a2e29b757f4021ed5b9eb7eebf78a0bddb460790,Visual scenes are categorized by function.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a2e29b757f4021ed5b9eb7eebf78a0bddb460790,Visual scenes are categorized by function.,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a2e29b757f4021ed5b9eb7eebf78a0bddb460790,Visual scenes are categorized by function.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a25106a76af723ba9b09308a7dcf4f76d9283589,Local Octal Pattern: A Proficient Feature Extraction for Face Recognition,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,Benchmarking Still-to-Video Face Recognition via Partial and Local Linear Discriminant Analysis on COX-S2V Dataset,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,Benchmarking Still-to-Video Face Recognition via Partial and Local Linear Discriminant Analysis on COX-S2V Dataset,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a29a22878e1881d6cbf6acff2d0b209c8d3f778b,Benchmarking Still-to-Video Face Recognition via Partial and Local Linear Discriminant Analysis on COX-S2V Dataset,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+a2e86c23cde8899ac39d0df43d6c5e4dcf0ae2e6,Deep Collaborative Learning for Visual Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,Discovering the Signatures of Joint Attention in Child-Caregiver Interaction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,Discovering the Signatures of Joint Attention in Child-Caregiver Interaction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,Discovering the Signatures of Joint Attention in Child-Caregiver Interaction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a2429cc2ccbabda891cc5ae340b24ad06fcdbed5,Discovering the Signatures of Joint Attention in Child-Caregiver Interaction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a2e3c367995a238155f0b180743d5487ecdf8df5,Novel Modular Weightless Neural Architectures for Biometrics-based Recognition,University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.29753440,1.07296165,edu,
+a20f132a30e99541aa7ba6dddac86e6a393778e8,Self Attention Grid for Person Re-Identification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+a2f69a94380ccfd463886d26f07c4dba791f84d4,5-HT1A-receptor agonist modified amygdala activity and amygdala-associated social behavior in a valproate-induced rat autism model.,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+a2f69a94380ccfd463886d26f07c4dba791f84d4,5-HT1A-receptor agonist modified amygdala activity and amygdala-associated social behavior in a valproate-induced rat autism model.,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+a2f69a94380ccfd463886d26f07c4dba791f84d4,5-HT1A-receptor agonist modified amygdala activity and amygdala-associated social behavior in a valproate-induced rat autism model.,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+a2cdd215586701c883dc3959c80f53ee5c091fe7,FaceLooks: A Smart Headband for Signaling Face-to-Face Behavior,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+a2cdd215586701c883dc3959c80f53ee5c091fe7,FaceLooks: A Smart Headband for Signaling Face-to-Face Behavior,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+a2cdd215586701c883dc3959c80f53ee5c091fe7,FaceLooks: A Smart Headband for Signaling Face-to-Face Behavior,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+a29566375836f37173ccaffa47dea25eb1240187,Vehicle Re-Identification in Context,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+a21685146f68de1f87e206c0a22dbc0188d55b2d,Robust Tracking and Human Activity Recognition,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+a2ab16c6eff749d2081d11ddc0b9e310eda62061,Attributes as Operators: Factorizing Unseen Attribute-Object Compositions,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a2bcfba155c990f64ffb44c0a1bb53f994b68a15,The Photoface database,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+a2bcfba155c990f64ffb44c0a1bb53f994b68a15,The Photoface database,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+a22c372911680793c7f94e3fd0b3843a2019f085,Designing Deep Convolutional Neural Networks for Continuous Object Orientation Estimation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+a247c12bc54f7792e381c6e71d98348f8059ca15,Learning Efficient Object Detection Models with Knowledge Distillation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a247c12bc54f7792e381c6e71d98348f8059ca15,Learning Efficient Object Detection Models with Knowledge Distillation,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+a25f1b02c63857482dcaa621f3a52e2b34d8b022,A System for Multimodal Context-Awareness,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+a2d1818eb461564a5153c74028e53856cf0b40fd,Orthogonal Deep Features Decomposition for Age-Invariant Face Recognition,Tencent,"Tencent AI Lab, China","Ke Ji Zhong Yi Lu, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057",22.54471540,113.93571640,company,"Keji Middle 1st Rd, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057"
+a25d12d3eaaba6ec8ef4a2690068e9fbd74b977a,Morph: Flexible Acceleration for 3D CNN-based Video Understanding,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a2183537ccf24eb95e8e7520b33f9aa8f190e80e,Subspace-Based Holistic Registration for Low-Resolution Facial Images,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+a2a17b7421bd46224127e35e3451b1af36528a6a,Nested multi-instance classification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+a2a17b7421bd46224127e35e3451b1af36528a6a,Nested multi-instance classification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+a59cdc49185689f3f9efdf7ee261c78f9c180789,A New Approach for Learning Discriminative Dictionary for Pattern Classification,Hanoi University of Science and Technology,Hanoi University of Science and Technology,"HUST, Trần Đại Nghĩa, Hai Bà Trưng, Hà Nội, 10999, Việt Nam",21.00395200,105.84360183,edu,
+a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+a5ae7d662ed086bc5b0c9a2c1dc54fcb23635000,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+a5bc960e186391ca0ba0718aec70069abb5134e5,Age Invariant Face Recognition Using Convolutional Neural Networks and Set Distances,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,Learning to see people like people,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,Learning to see people like people,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,Learning to see people like people,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a5c8fc1ca4f06a344b53dc81ebc6d87f54896722,Learning to see people like people,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,Why is facial expression analysis in the wild challenging?,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,Why is facial expression analysis in the wild challenging?,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+a5ade88747fa5769c9c92ffde9b7196ff085a9eb,Why is facial expression analysis in the wild challenging?,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+a54fcdcb02da0844d28b3191145bbc99675714df,"FATAUVA-Net: An Integrated Deep Learning Framework for Facial Attribute Recognition, Action Unit Detection, and Valence-Arousal Estimation",National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+a5219fff98dfe3ec81dee95c4ead69a8e24cc802,Dual-Glance Model for Deciphering Social Relationships,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a5219fff98dfe3ec81dee95c4ead69a8e24cc802,Dual-Glance Model for Deciphering Social Relationships,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+a59800f16ad02f550c600fff4179167bad0b8654,Neonatal Pain Expression Recognition Using Transfer Learning,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+a58bef564df2bebbcb24c58c4a69bc6c51ab2d39,Kernel Implicit Variational Inference,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a58bef564df2bebbcb24c58c4a69bc6c51ab2d39,Kernel Implicit Variational Inference,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a5f11c132eaab258a7cea2d681875af09cddba65,A spatiotemporal model with visual attention for video classification,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a59978ac12815cada0936dce760a6ff6aef376d9,Multi-Scale Face Restoration With Sequential Gating Ensemble Network,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+a5ee4693668d976dbd79a753c62e0614af2f5060,Hybrid Knowledge Routed Modules for Large-scale Object Detection,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a5ee4693668d976dbd79a753c62e0614af2f5060,Hybrid Knowledge Routed Modules for Large-scale Object Detection,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a5ee4693668d976dbd79a753c62e0614af2f5060,Hybrid Knowledge Routed Modules for Large-scale Object Detection,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a5766dd5f2efe0b44879799dd5499edfb6b44839,Illumination Quality Assessment for Face Images: A Benchmark and a Convolutional Neural Networks Based Model,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+a5766dd5f2efe0b44879799dd5499edfb6b44839,Illumination Quality Assessment for Face Images: A Benchmark and a Convolutional Neural Networks Based Model,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+a538b05ebb01a40323997629e171c91aa28b8e2f,Rectified Linear Units Improve Restricted Boltzmann Machines,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a57ee5a8fb7618004dd1def8e14ef97aadaaeef5,Fringe Projection Techniques: Whither we are?,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+a576d19473a12e16262266989376ad1e77e8e817,Unmanned Aerial Vehicle Object Tracking by Correlation Filter with Adaptive Appearance Model,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+a576d19473a12e16262266989376ad1e77e8e817,Unmanned Aerial Vehicle Object Tracking by Correlation Filter with Adaptive Appearance Model,Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.41073580,-4.05295501,edu,
+a50099f5364d3d4e82991418647c727f0f9c297c,A Generic Bi-Layer Data-Driven Crowd Behaviors Modeling Approach,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+a5ee556c355392db1750df92ae2dc8867073e771,Improved Local Coordinate Coding using Local Tangents,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+a566780ffbaf2e1ee88a821be4d0ffade934c518,Greedy Representative Selection for Unsupervised Data Analysis,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+a51882cfd0706512bf50e12c0a7dd0775285030d,Cross-Modal Face Matching: Beyond Viewed Sketches,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+a51cdc57f35f536468325a40a7777954c864935b,Fast and Robust Realtime Storefront Logo Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a52581a7b48138d7124afc7ccfcf8ec3b48359d0,Pose and Illumination Invariant Face Recognition Based on 3D Face Reconstruction,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+a5bec55a9668b103265bcf84ecca94128a6769cc,Accounting for Aliasing in Correlation Filters : Zero-Aliasing and Partial-Aliasing Correlation Filters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a5bec55a9668b103265bcf84ecca94128a6769cc,Accounting for Aliasing in Correlation Filters : Zero-Aliasing and Partial-Aliasing Correlation Filters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a5f094bd197126025cabc50b30e0f03d56d8c594,Online Motion Agreement Tracking,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+bdbaf77951b845859a7203a33d91b6a595f5f9f3,Tracking Revisited using RGBD Camera: Baseline and Benchmark,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+bdbaf77951b845859a7203a33d91b6a595f5f9f3,Tracking Revisited using RGBD Camera: Baseline and Benchmark,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+bd0265ba7f391dc3df9059da3f487f7ef17144df,Data-Driven Sparse Sensor Placement,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+bd0265ba7f391dc3df9059da3f487f7ef17144df,Data-Driven Sparse Sensor Placement,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+bd0265ba7f391dc3df9059da3f487f7ef17144df,Data-Driven Sparse Sensor Placement,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+bd572e9cbec095bcf5700cb7cd73d1cdc2fe02f4,Deep Learning for Computer Vision: A Brief Review,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+bd6099429bb7bf248b1fd6a1739e744512660d55,"Regularized Discriminant Analysis, Ridge Regression and Beyond",Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bd5802008156ed1ee6919ccaf21ba6c06bad2a4c,Robust eye contact detection in natural multi-person interactions using gaze and speaking behaviour,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bd5802008156ed1ee6919ccaf21ba6c06bad2a4c,Robust eye contact detection in natural multi-person interactions using gaze and speaking behaviour,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bd5802008156ed1ee6919ccaf21ba6c06bad2a4c,Robust eye contact detection in natural multi-person interactions using gaze and speaking behaviour,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bd5802008156ed1ee6919ccaf21ba6c06bad2a4c,Robust eye contact detection in natural multi-person interactions using gaze and speaking behaviour,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bd6158bed42b038863c8ace0c96700e87c1c0231,"Action Unit Detection with Region Adaptation, Multi-labeling Learning and Optimal Temporal Fusing",CUNY City College,CUNY City College,"CUNY City College, 205 East 42nd Street, New York, NY 10017",45.55466080,5.40652550,edu,
+bde3c1298d4136369c8607dd5dc3f0800a27a8df,Extracting adaptive contextual cues from unlabeled regions,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+bde3c1298d4136369c8607dd5dc3f0800a27a8df,Extracting adaptive contextual cues from unlabeled regions,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+bdf46e52b9cc967628f423b1a69555a1114cc3e3,Spontaneous vs. Posed Facial Expression Analysis Using Deformable Feature Models and Aggregated Classifiers,Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.44690250,54.39425630,edu,
+bdf46e52b9cc967628f423b1a69555a1114cc3e3,Spontaneous vs. Posed Facial Expression Analysis Using Deformable Feature Models and Aggregated Classifiers,Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.44690250,54.39425630,edu,
+bd8f3fef958ebed5576792078f84c43999b1b207,BUAA-iCC at ImageCLEF 2015 Scalable Concept Image Annotation Challenge,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+bd65efb5d1fbff19dcd3cd24452f359013eac188,Unsupervised Feature Learning With Symmetrically Connected Convolutional Denoising Auto-encoders,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+bd65efb5d1fbff19dcd3cd24452f359013eac188,Unsupervised Feature Learning With Symmetrically Connected Convolutional Denoising Auto-encoders,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+bd0201b32e7eca7818468f2b5cb1fb4374de75b9,Facial Emotion Expressions Recognition with Brain Activites Using Kinect Sensor V2,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+bd0201b32e7eca7818468f2b5cb1fb4374de75b9,Facial Emotion Expressions Recognition with Brain Activites Using Kinect Sensor V2,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+bd236913cfe07896e171ece9bda62c18b8c8197e,Deep Learning with Energy-efficient Binary Gradient Cameras,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+bd433d471af50b571d7284afb5ee435654ace99f,Going Deeper with Convolutional Neural Network for Intelligent Transportation,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d,Toward Scene Recognition by Discovering Semantic Structures and Parts,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d,Toward Scene Recognition by Discovering Semantic Structures and Parts,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+bd8a85acaa45d4068fca584e8d9e3bd3bb4eea4d,Toward Scene Recognition by Discovering Semantic Structures and Parts,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+bd1d466299f585f2f67500a6ceef19008c4cb637,Pose and Illumination Invariant Face Recognition Using Video Sequences,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+bd379f8e08f88729a9214260e05967f4ca66cd65,Learning Compositional Visual Concepts with Mutual Consistency,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+bd379f8e08f88729a9214260e05967f4ca66cd65,Learning Compositional Visual Concepts with Mutual Consistency,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+bd98a68ef57d60aa6c939504d06d95fe08e2aceb,ViS-HuD: Using Visual Saliency to Improve Human Detection with Convolutional Neural Networks,Ahmedabad University,Ahmedabad University,"School of Science and Technology, University Road, Gurukul, Gulbai tekra, Ahmedabad, Ahmedabad District, Gujarat, 380001, India",23.03787430,72.55180046,edu,
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bd21109e40c26af83c353a3271d0cd0b5c4b4ade,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+bd8b7599acf53e3053aa27cfd522764e28474e57,Learning long term face aging patterns from partially dense aging databases,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bd8b7599acf53e3053aa27cfd522764e28474e57,Learning long term face aging patterns from partially dense aging databases,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,SphereFace: Deep Hypersphere Embedding for Face Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,SphereFace: Deep Hypersphere Embedding for Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+bd8f77b7d3b9d272f7a68defc1412f73e5ac3135,SphereFace: Deep Hypersphere Embedding for Face Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+bd26dabab576adb6af30484183c9c9c8379bf2e0,SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9,Interpretable Partitioned Embedding for Customized Fashion Outfit Composition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+bd9c9729475ba7e3b255e24e7478a5acb393c8e9,Interpretable Partitioned Embedding for Customized Fashion Outfit Composition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+bdb1fca40fd98a966b627ba9b0f4a0ac801dffdc,"Multi-Scale , Multi-Temporal Vegetation Mapping and Assessment of Ecosystem Degradation at Gashaka Gumti National Park ( Nigeria )",University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+bd9157331104a0708aa4f8ae79b7651a5be797c6,SLAC: A Sparsely Labeled Dataset for Action Classification and Localization,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+bd4f2e7a196c0d6033a49390ee8836f4f551b7c8,ICDAR 2015 competition on Robust Reading,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+bd4f2e7a196c0d6033a49390ee8836f4f551b7c8,ICDAR 2015 competition on Robust Reading,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+bdaed05eefddd2829c937978852fcf3cedc84620,Multifactor Analysis for Face Recognition Based on Factor-Dependent Geometry,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+bdaed05eefddd2829c937978852fcf3cedc84620,Multifactor Analysis for Face Recognition Based on Factor-Dependent Geometry,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+bdaed05eefddd2829c937978852fcf3cedc84620,Multifactor Analysis for Face Recognition Based on Factor-Dependent Geometry,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+bd5c222323d6b46ea71f329cafe11d38533f6f3a,Repetition Suppression and Memory for Faces is Reduced in Adults with Autism Spectrum Conditions,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+bd17484e0a6773a74c51c41e773e202080682b3b,2D-3D Pose Consistency-based Conditional Random Fields for 3D Human Pose Estimation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+bd70f832e133fb87bae82dfaa0ae9d1599e52e4b,Combining Classifier for Face Identification at Unknown Views with a Single Model Image,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+d1c204da4e0ab653c32ae8fc325d5b69641b6ed7,Learning Globally Optimized Object Detector via Policy Gradient,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d1c204da4e0ab653c32ae8fc325d5b69641b6ed7,Learning Globally Optimized Object Detector via Policy Gradient,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d1c204da4e0ab653c32ae8fc325d5b69641b6ed7,Learning Globally Optimized Object Detector via Policy Gradient,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+d185f4f05c587e23c0119f2cdfac8ea335197ac0,"Facial Expression Analysis, Modeling and Synthesis: Overcoming the Limitations of Artificial Intelligence with the Art of the Soluble",Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+d185f4f05c587e23c0119f2cdfac8ea335197ac0,"Facial Expression Analysis, Modeling and Synthesis: Overcoming the Limitations of Artificial Intelligence with the Art of the Soluble",Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.03332810,135.72491540,edu,
+d10f8d58bf50f5b097b4344dc8cccbbe0c330bd9,Hard-Aware Deeply Cascaded Embedding,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+d10f8d58bf50f5b097b4344dc8cccbbe0c330bd9,Hard-Aware Deeply Cascaded Embedding,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+d10f8d58bf50f5b097b4344dc8cccbbe0c330bd9,Hard-Aware Deeply Cascaded Embedding,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+d140c5add2cddd4a572f07358d666fe00e8f4fe1,Statistically Learned Deformable Eye Models,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+d1ba33106567c880bf99daba2bd31fe88df4ecba,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+d1ba33106567c880bf99daba2bd31fe88df4ecba,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+d1ba33106567c880bf99daba2bd31fe88df4ecba,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+d10eff69699591d26dbb69ed17d8afe06bc581db,Wasserstein Introspective Neural Networks,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d182c6d9ac4777b5ad73afdd64b7b68d76037212,Aligned Image-Word Representations Improve Inductive Transfer Across Vision-Language Tasks,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d16968e5baac6d26b9cef5034f9d84bcc3ec627c,"Children Facial Expression Production: Influence of Age, Gender, Emotion Subtype, Elicitation Condition and Culture",Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+d16968e5baac6d26b9cef5034f9d84bcc3ec627c,"Children Facial Expression Production: Influence of Age, Gender, Emotion Subtype, Elicitation Condition and Culture",Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+d1d90bbc6bb4fdb0d928ff74bfd8671aaafa070e,Neural Person Search Machines,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+d1d90bbc6bb4fdb0d928ff74bfd8671aaafa070e,Neural Person Search Machines,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+d1d90bbc6bb4fdb0d928ff74bfd8671aaafa070e,Neural Person Search Machines,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+d115c4a66d765fef596b0b171febca334cea15b5,Combining Stacked Denoising Autoencoders and Random Forests for Face Detection,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+d1f3fb7e8be9d8db50f29403ffbbf6ec58623e61,Embodied Question Answering,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+d1b0d2ec2f01c3aab06119192cf9ba23146cc662,"Explanatory Dialogs : Towards Actionable , Interactive Explanations Gagan Bansal",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+d1d15291dadc44f0cd192dc88bab3b10e2b07ccd,"“I Look in Your Eyes, Honey”: Internal Face
+ Features Induce Spatial Frequency Preference for Human Face Processing",University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+d13e81c7a3d6f62948a68663acfecc3a480d9b1c,Scaling Distributed All-Pairs Algorithms: Manage Computation and Limit Data Replication with Quorums,Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.64464415,edu,
+d13bb317e87f3f6da10da11059ebf4350b754814,"Survey of the State of the Art in Natural Language Generation: Core tasks, applications and evaluation",University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+d1959ba4637739dcc6cc6995e10fd41fd6604713,Deep Learning for Semantic Video Understanding,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+d1881993c446ea693bbf7f7d6e750798bf958900,Large-Scale YouTube-8M Video Understanding with Deep Neural Networks,"Institute for System Programming, Moscow",Institute for System Programming,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ",55.74498810,37.66450421,edu,
+d1881993c446ea693bbf7f7d6e750798bf958900,Large-Scale YouTube-8M Video Understanding with Deep Neural Networks,"Institute for System Programming, Moscow",Institute for System Programming,"ИСП РАН, 25, улица Александра Солженицына, Швивая горка, Таганский район, Центральный административный округ, Москва, ЦФО, 109004, РФ",55.74498810,37.66450421,edu,
+d1d9e6027288cdd64509ea62f88a3cbd9320c180,Automated Markerless Analysis of Human Gait Motion for Recognition and Classification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+d1bca67dd26d719b3e7a51acecd7c54c7b78b34a,Spatial Pyramid Convolutional Neural Network for Social Event Detection in Static Image,Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.81641780,130.72703969,edu,
+d1e388269ea8ce7074f804f79e158038f629a0df,Batch-Based Activity Recognition from Egocentric Photo-Streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+d67277eb00d58d20eaa18c346761fe4eeaab9c49,Multi-Level Fusion based 3 D Object Detection from Monocular Images,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+d6b2d4f1bc08dc3d3922fb43b1b8e3614349f539,"Burçin Buket Oğul in partial fulfillment of the requirements for the degree of Master of Science in Information Systems , Middle East Technical",Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+d6b1c0c2107abb01ee4241963eab26e261510f12,Weakly supervised learning of semantic colour terms,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+d6b1b0e60e1764982ef95d4ade8fcaa10bfb156a,A Sketch-based Approach for Multimedia Retrieval,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+d69719b42ee53b666e56ed476629a883c59ddf66,Learning Facial Action Units from Web Images with Scalable Weakly Supervised Clustering,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+d69719b42ee53b666e56ed476629a883c59ddf66,Learning Facial Action Units from Web Images with Scalable Weakly Supervised Clustering,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+d647099e571f9af3a1762f895fd8c99760a3916e,Exploring facial expressions with compositional features,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+d69271c7b77bc3a06882884c21aa1b609b3f76cc,FaceBoxes: A CPU real-time face detector with high accuracy,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d69271c7b77bc3a06882884c21aa1b609b3f76cc,FaceBoxes: A CPU real-time face detector with high accuracy,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d6ca3dc01de060871839d5536e8112b551a7f9ff,Sleep-deprived fatigue pattern analysis using large-scale selfies from social media,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d6ca3dc01de060871839d5536e8112b551a7f9ff,Sleep-deprived fatigue pattern analysis using large-scale selfies from social media,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d6ca3dc01de060871839d5536e8112b551a7f9ff,Sleep-deprived fatigue pattern analysis using large-scale selfies from social media,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d6ca3dc01de060871839d5536e8112b551a7f9ff,Sleep-deprived fatigue pattern analysis using large-scale selfies from social media,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d61e794ec22a4d4882181da17316438b5b24890f,Detecting Sensor Level Spoof Attacks Using Joint Encoding of Temporal and Spatial Features,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+d675a54dd5e353f99a1bec3b2ddab925a6563653,Thin-Slicing Network: A Deep Structured Model for Pose Estimation in Videos,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+d6b1e14d211145bbc083b230d1724826de430fb7,Eye detection in the Middle-Wave Infrared spectrum: Towards recognition in the dark,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+d6d7b7e882a65663fafe470f0582afb4279879b7,Active Learning & its Applications,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+d67dcaf6e44afd30c5602172c4eec1e484fc7fb7,Illumination Normalization for Robust Face Recognition Using Discrete Wavelet Transform,Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.84450465,100.85620818,edu,
+d6c4069044b976c48c384c4562338942a842cf55,3D Human Pose Estimation with 2D Marginal Heatmaps,La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.77847540,144.29804700,edu,
+d68dbb71b34dfe98dee0680198a23d3b53056394,VIVA Face-off Challenge: Dataset Creation and Balancing Privacy,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d666ce9d783a2d31550a8aa47da45128a67304a7,On Relating Visual Elements to City Statistics,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+d666ce9d783a2d31550a8aa47da45128a67304a7,On Relating Visual Elements to City Statistics,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+d666ce9d783a2d31550a8aa47da45128a67304a7,On Relating Visual Elements to City Statistics,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+d6143ddbee74c10996d291c666fa17bd87f9d4e2,CT-GAN: Conditional Transformation Generative Adversarial Network for Image Attribute Modification,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+d6daaec16ac90de8f99640f687ad7e9e92a46840,Can gait biometrics be Spoofed?,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+d6daaec16ac90de8f99640f687ad7e9e92a46840,Can gait biometrics be Spoofed?,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+d6ad7334d6e2575d61f86f91b8edac8053af8c35,A framework for automatic question generation from text using deep reinforcement learning,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+d670583c4065132282dbcb4387ee6a83e85f8af1,"A Study of Question Effectiveness Using Reddit ""Ask Me Anything"" Threads",University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+bc21bf4c733e117d2d969fd5605bba4251467243,Fusion of dynamic and static features for gait recognition over time,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+bce887343456e4344b8174b99cea641a97a7bfa6,On gait as a biometric: Progress and prospects,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+bcab55f8bf0623df71623e673c767eed2159f05a,Deep Hybrid Scattering Image Learning,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+bce36092b1910ff3d492f86aa3a39ed8faaf72d2,Chapter 17 Face Recognition Using 3 D Images,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+bce61a182c7a1028eed0c0f67e779753a86503c2,Soft Activation Mapping of Lung Nodules in Low-Dose CT images,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+bce61a182c7a1028eed0c0f67e779753a86503c2,Soft Activation Mapping of Lung Nodules in Low-Dose CT images,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+bce61a182c7a1028eed0c0f67e779753a86503c2,Soft Activation Mapping of Lung Nodules in Low-Dose CT images,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+bc7a3573a464bca2cdca71f6f32e798464b85ee6,Exploiting Semantic Contextualization for Interpretation of Human Activity in Videos,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+bc7a3573a464bca2cdca71f6f32e798464b85ee6,Exploiting Semantic Contextualization for Interpretation of Human Activity in Videos,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+bc7a3573a464bca2cdca71f6f32e798464b85ee6,Exploiting Semantic Contextualization for Interpretation of Human Activity in Videos,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+bc9003ad368cb79d8a8ac2ad025718da5ea36bc4,Facial expression recognition with a three-dimensional face model,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+bc7e6175aed6538eca08962e133aede11fc75bcf,Multi-Scale Supervised Network for Human Pose Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+bc494a3442ec7adff4527e60947214c0015f3b3a,Convolutional Image Captioning,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+bcd59b43aaef7f466eda609e3f887a3db4ae3b41,Graph Correspondence Transfer for Person Re-identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+bcd59b43aaef7f466eda609e3f887a3db4ae3b41,Graph Correspondence Transfer for Person Re-identification,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+bcd59b43aaef7f466eda609e3f887a3db4ae3b41,Graph Correspondence Transfer for Person Re-identification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+bcd59b43aaef7f466eda609e3f887a3db4ae3b41,Graph Correspondence Transfer for Person Re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+bca7d615cc143a255c0dc65235ba1acbac86ba32,Learning to Adapt Structured Output Space for Semantic Segmentation,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+bcc0a12f8dbc3efcd3ef353b0173c49a8889e763,Automatic Face Anonymization in Visual Data: Are we really well protected?,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,Using deep autoencoders for facial expression recognition,"COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Islamabad","COMSATS Institute of Information Technology, Fence, Chak Shehzad, وفاقی دارالحکومت اسلام آباد, 45550, ‏پاکستان‎",33.65010145,73.15514949,edu,
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,Using deep autoencoders for facial expression recognition,Information Technology University,Information Technology University (ITU),"Information Technology University (ITU), Ferozepur Road, Sher Shah Block, Garden Town, Al Noor Town, Lahore District, پنجاب, 54600, ‏پاکستان‎",31.47602990,74.34275260,edu,
+bc15a2fd09df7046e7e8c7c5b054d7f06c3cefe9,Using deep autoencoders for facial expression recognition,National University of Sciences and Technology,National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.64434700,72.98850790,edu,
+bc25f5e10c839d08ac8827fbe7724cd713008803,Properties of Patch Based Approaches for the Recognition of Visual Object Classes,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+bc3a7dcc237041aa4b0d70e07c9bc441dbbc9c97,Passive and Motivated Perception of Emotional Faces: Qualitative and Quantitative Changes in the Face Processing Network,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+bc3a7dcc237041aa4b0d70e07c9bc441dbbc9c97,Passive and Motivated Perception of Emotional Faces: Qualitative and Quantitative Changes in the Face Processing Network,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+bc27434e376db89fe0e6ef2d2fabc100d2575ec6,Faceless Person Recognition; Privacy Implications in Social Media,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bc3e01016c4b2b75e9163c91fa65b64dcfb1acc9,Relaxed Softmax : Efficient Confidence Auto-Calibration for Safe Pedestrian Detection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+bc2953c2d177b18f0870ff9e7439e00a904a0b33,Please smile,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+bc2953c2d177b18f0870ff9e7439e00a904a0b33,Please smile,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+bc2953c2d177b18f0870ff9e7439e00a904a0b33,Please smile,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,Tracking the Trackers: An Analysis of the State of the Art in Multiple Object Tracking,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+bc12715a1ddf1a540dab06bf3ac4f3a32a26b135,Tracking the Trackers: An Analysis of the State of the Art in Multiple Object Tracking,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+bc9f3c466c6f6b386f4ef1195853d498cf3c182e,Mapping Instructions and Visual Observations to Actions with Reinforcement Learning,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+bc9f3c466c6f6b386f4ef1195853d498cf3c182e,Mapping Instructions and Visual Observations to Actions with Reinforcement Learning,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+bc910ca355277359130da841a589a36446616262,Conditional High-Order Boltzmann Machine: A Supervised Learning Model for Relation Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bca6e77e7e0db8f632af7395e99028025854ea0d,A Multiview-Based Parameter Free Framework for Group Detection,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+bccfda60d53fd1ca114355f606fcfcc2bc9da529,Person Re-identification with Cascaded Pairwise Convolutions,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+bccfda60d53fd1ca114355f606fcfcc2bc9da529,Person Re-identification with Cascaded Pairwise Convolutions,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+bc871497626afb469d25c4975aa657159269aefe,Adaptive learning algorithm for pattern classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bc871497626afb469d25c4975aa657159269aefe,Adaptive learning algorithm for pattern classification,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bc27c0d99e6f21b8a4fac6a0cf1079f6755554cc,Adaptive Sparse Kernel Principal Component Analysis for Computation and Store Space Constrained-based Feature Extraction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+bc27c0d99e6f21b8a4fac6a0cf1079f6755554cc,Adaptive Sparse Kernel Principal Component Analysis for Computation and Store Space Constrained-based Feature Extraction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+bc99f98b5f1fd158cc31d693061c402a36222dbb,Recent advances in understanding the neural bases of autism spectrum disorder.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+bc866c2ced533252f29cf2111dd71a6d1724bd49,A Multi-Modal Face Recognition Method Using Complete Local Derivative Patterns and Depth Maps,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+bc8e11b8cdf0cfbedde798a53a0318e8d6f67e17,Deep Learning for Fixed Model Reuse,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+bc2856e70ad3c8fe439dec6cc6a2e03d6e090fb7,What value high level concepts in vision to language problems ?,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+bcfd771ee51f2813e910b339d08d10057af1e294,Analysis of face recognition under varying facial expression: a survey,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+bc6a0a107068b5a1715510e815c0103eaf80672a,Cross-pose Face Recognition by Canonical Correlation Analysis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+bc6a0a107068b5a1715510e815c0103eaf80672a,Cross-pose Face Recognition by Canonical Correlation Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bc6a0a107068b5a1715510e815c0103eaf80672a,Cross-pose Face Recognition by Canonical Correlation Analysis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+bc6a0a107068b5a1715510e815c0103eaf80672a,Cross-pose Face Recognition by Canonical Correlation Analysis,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+ae5b2b449f59ae0f46f6a31ed4826d98241c394c,Accurate real-time people counting for crowded environments,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+ae5b2b449f59ae0f46f6a31ed4826d98241c394c,Accurate real-time people counting for crowded environments,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+ae5b2b449f59ae0f46f6a31ed4826d98241c394c,Accurate real-time people counting for crowded environments,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+aec46facf3131a5be4fc23db4ebfb5514e904ae3,Audio to the rescue,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+aea50d3414ecb20dc2ba77b0277d0df59bde2c2c,The #selfiestation: Design and Use of a Kiosk for Taking Selfies in the Enterprise,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+ae0765ebdffffd6e6cc33c7705df33b7e8478627,Self-Reinforced Cascaded Regression for Face Alignment,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+ae0765ebdffffd6e6cc33c7705df33b7e8478627,Self-Reinforced Cascaded Regression for Face Alignment,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+aefc7c708269b874182a5c877fb6dae06da210d4,Deep Learning of Invariant Features via Simulated Fixations in Video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+aefc7c708269b874182a5c877fb6dae06da210d4,Deep Learning of Invariant Features via Simulated Fixations in Video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ae233a6f07d61e2c032bd09d92bdf20c27305c1f,Assessment of Pain Using Facial Pictures Taken with a Smartphone,Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.03889625,-87.93155450,edu,
+ae233a6f07d61e2c032bd09d92bdf20c27305c1f,Assessment of Pain Using Facial Pictures Taken with a Smartphone,Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.03889625,-87.93155450,edu,
+aeaf5dbb3608922246c7cd8a619541ea9e4a7028,Weakly Supervised Facial Action Unit Recognition through Adversarial Training,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+ae836e2be4bb784760e43de88a68c97f4f9e44a1,Semi-SupervisedDimensionalityReduction ∗,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ae836e2be4bb784760e43de88a68c97f4f9e44a1,Semi-SupervisedDimensionalityReduction ∗,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+aece472ba64007f2e86300cc3486c84597f02ec7,Analyzing Image-Text Relations for Semantic Media Adaptation and Personalization,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+ae5bb02599244d6d88c4fe466a7fdd80aeb91af4,"Analysis of Recognition Algorithms using Linear, Generalized Linear, and Generalized Linear Mixed Models",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+ae5bb02599244d6d88c4fe466a7fdd80aeb91af4,"Analysis of Recognition Algorithms using Linear, Generalized Linear, and Generalized Linear Mixed Models",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+ae18ccb35a1a5d7b22f2a5760f706b1c11bf39a9,Sensing Highly Non-Rigid Objects with RGBD Sensors for Robotic Systems,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+aecce5d8e06da797c087fb361732e84e62c04c4f,Bilateral Ordinal Relevance Multi-instance Regression for Facial Action Unit Intensity Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+aecce5d8e06da797c087fb361732e84e62c04c4f,Bilateral Ordinal Relevance Multi-instance Regression for Facial Action Unit Intensity Estimation,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+aef59def2a65901de9d520d0442b42bb4a448f06,Facial Expression Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+aed124c053b9c510487d68e0faf32aff2a84c3b5,FERA 2017 - Addressing Head Pose in the Third Facial Expression Recognition and Analysis Challenge,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+ae1de0359f4ed53918824271c888b7b36b8a5d41,Low-cost Automatic Inpainting for Artifact Suppression in Facial Images,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+ae4390873485c9432899977499c3bf17886fa149,Facial Expression Recognition Using Digitalised Facial Features Based on Active Shape Model,Glyndwr University,Glyndwr University,"Glyndŵr University, Mold Road, Rhosrobin, Wrexham, Wales, LL11 2AW, UK",53.05373795,-3.00482075,edu,
+ae60fccb686272d12e909c9de99efb652e0934ec,The impact of internalizing symptoms on autistic traits in adolescents with restrictive anorexia nervosa,University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.72012990,10.40789760,edu,
+ae7ebf1c6111af9d00dfeceec4b48b528b437956,Exploring Human Cognition Using Large Image Databases,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+ae6e8851dfd9c97e37e1cbd61b21cc54d5e2b9c7,Paraphrasing Complex Network: Network Compression via Factor Transfer,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+ae85ff7fb5a7e7a232793c743ad11baf849a61bb,Exploring the Identity Manifold: Constrained Operations in Face Space,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+ae6193531d42fc20c9c991143ce323034d7aaa8d,Going Deeper with Semantics : Video Activity Interpretation using Semantic Contextualization,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+ae6193531d42fc20c9c991143ce323034d7aaa8d,Going Deeper with Semantics : Video Activity Interpretation using Semantic Contextualization,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+ae6193531d42fc20c9c991143ce323034d7aaa8d,Going Deeper with Semantics : Video Activity Interpretation using Semantic Contextualization,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+aea4128ba18689ff1af27b90c111bbd34013f8d5,Efficient k-Support Matrix Pursuit,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+aea4128ba18689ff1af27b90c111bbd34013f8d5,Efficient k-Support Matrix Pursuit,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+aea4128ba18689ff1af27b90c111bbd34013f8d5,Efficient k-Support Matrix Pursuit,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+aea4128ba18689ff1af27b90c111bbd34013f8d5,Efficient k-Support Matrix Pursuit,South China Normal University,South China Normal University,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.14319700,113.34009651,edu,
+ae587a4a8842fbe01b9a043b66f762a89dca5074,Multi-view Supervision for Single-View Reconstruction via Differentiable Ray Consistency,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+ae2c71080b0e17dee4e5a019d87585f2987f0508,Emotional Face Recognition in Children With Attention Deficit/Hyperactivity Disorder: Evidence From Event Related Gamma Oscillation,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+ae2c71080b0e17dee4e5a019d87585f2987f0508,Emotional Face Recognition in Children With Attention Deficit/Hyperactivity Disorder: Evidence From Event Related Gamma Oscillation,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+aefb110f14dd8d59c5465c7d91bd8b34a7c69597,A sequential guiding network with attention for image captioning,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+aefb110f14dd8d59c5465c7d91bd8b34a7c69597,A sequential guiding network with attention for image captioning,North China Electric Power University,North China Electric Power University,"华北电力大学, 永华北大街, 莲池区, 保定市, 莲池区 (Lianchi), 保定市, 河北省, 071000, 中国",38.87604460,115.49738730,edu,
+aec84e5aec1b6d83baeb4d447cde399864e25467,Automatic landmarking for non-cooperative 3D face recognition,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+ae47e73c25755427c9f5904425a35d7db737829b,Multi-scale convolutional neural networks for crowd counting,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+ae5f32e489c4d52e7311b66060c7381d932f4193,Appearance-and-Relation Networks for Video Classification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf,That personal profile image might jeopardize your rental opportunity! On the relative impact of the seller's facial expressions upon buying behavior on Airbnb™,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+aeef1fcaeb3e5f3eac93ee275426a7f5eb586e0b,Alive Caricature from 2 D to 3 D,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+aeef1fcaeb3e5f3eac93ee275426a7f5eb586e0b,Alive Caricature from 2 D to 3 D,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+aeef1fcaeb3e5f3eac93ee275426a7f5eb586e0b,Alive Caricature from 2 D to 3 D,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+aef8456577768f2ff029107149c9c6713e8707f6,Multiplexed fluorescence unmixing,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+d893f75206b122973cdbf2532f506912ccd6fbe0,Facial Expressions with Some Mixed Expressions Recognition Using Neural Networks,Pondicherry Engineering College,Pondicherry Engineering College,"Pondicherry Engineering College, PEC MAIN ROAD, Sri Ma, Puducherry, Puducherry district, Puducherry, 605001, India",12.01486930,79.84809104,edu,
+d802ed7d8b7aea71a10bd0d700fd11fde5729993,Development of an Active Shape Model Using the Discrete Cosine Transform,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+d802ed7d8b7aea71a10bd0d700fd11fde5729993,Development of an Active Shape Model Using the Discrete Cosine Transform,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+d814981606fe5954148e45c737f1debe7b5b36c4,Visual Textbook Network: Watch Carefully before Answering Visual Questions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d8f837265fe76e26c99052229c4997fbec20573a,View-Adaptive Metric Learning for Multi-view Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d8f837265fe76e26c99052229c4997fbec20573a,View-Adaptive Metric Learning for Multi-view Person Re-identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d8b2e37eb9d2ee0e84bceafce84812cfa0b88211,Mgan: Training Generative Adversarial Nets with Multiple Generators,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+d84a48f7d242d73b32a9286f9b148f5575acf227,Global and Local Consistent Age Generative Adversarial Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d84dccf9afffaf4e0cbb73f1ade34362a9fbe770,Neural-Symbolic VQA: Disentangling Reasoning from Vision and Language Understanding,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+d84dccf9afffaf4e0cbb73f1ade34362a9fbe770,Neural-Symbolic VQA: Disentangling Reasoning from Vision and Language Understanding,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+d84dccf9afffaf4e0cbb73f1ade34362a9fbe770,Neural-Symbolic VQA: Disentangling Reasoning from Vision and Language Understanding,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+d84dccf9afffaf4e0cbb73f1ade34362a9fbe770,Neural-Symbolic VQA: Disentangling Reasoning from Vision and Language Understanding,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+d8722ffbca906a685abe57f3b7b9c1b542adfa0c,Facial Expression Analysis for Human Computer Interaction,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+d8896861126b7fd5d2ceb6fed8505a6dff83414f,In-plane Rotational Alignment of Faces by Eye and Eye-pair Detection,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+d8d08bdcafdf892e3fc6ff3c38c2503ff9d41996,Learning image-to-image translation using paired and unpaired training samples,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+d8ddaddfd4843d483505718f3487e312310ba23e,Classification of Hematoxylin and Eosin Images Using Local Binary Patterns and 1-d Sift Algorithm,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+d8214e68bc7af0e24558fd9e79b2d777e46f2edc,Making Fisher Discriminant Analysis Scalable,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+d8214e68bc7af0e24558fd9e79b2d777e46f2edc,Making Fisher Discriminant Analysis Scalable,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+d8214e68bc7af0e24558fd9e79b2d777e46f2edc,Making Fisher Discriminant Analysis Scalable,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+d8214e68bc7af0e24558fd9e79b2d777e46f2edc,Making Fisher Discriminant Analysis Scalable,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+d8586c794456f88400231db046b0d33be7781185,Pedestrian Verification for Multi-Camera Detection,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+d847d2b75bf301007a9e67889bdae5b147559ed3,Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+d847d2b75bf301007a9e67889bdae5b147559ed3,Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+d8b251cbcda6289bbeaff56692da963aa5a80cd2,Multi-Task Multi-Sample Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+d8b3aafb25c235be5c62da07881807872ac3e831,AI Challenger : A Large-scale Dataset for Going Deeper in Image Understanding,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+d84230a2fc9950fccfd37f0291d65e634b5ffc32,Historical and Modern Image-to-Image Translation with Generative Adversarial Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d84230a2fc9950fccfd37f0291d65e634b5ffc32,Historical and Modern Image-to-Image Translation with Generative Adversarial Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d84230a2fc9950fccfd37f0291d65e634b5ffc32,Historical and Modern Image-to-Image Translation with Generative Adversarial Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d8bf148899f09a0aad18a196ce729384a4464e2b,Facial Expression Recognition and Expression Intensity Estimation,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+d8ce4c55d04b93bdb94c1d0427cfe40431bef941,"Simultaneous Human Segmentation, Depth and Pose Estimation via Dual Decomposition",Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+d8ce4c55d04b93bdb94c1d0427cfe40431bef941,"Simultaneous Human Segmentation, Depth and Pose Estimation via Dual Decomposition",Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+d80a3d1f3a438e02a6685e66ee908446766fefa9,Quantifying Facial Age by Posterior of Age Comparisons,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+d80a3d1f3a438e02a6685e66ee908446766fefa9,Quantifying Facial Age by Posterior of Age Comparisons,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+d822a13d173db2c5244b7f7d31babb513143f5a9,Unsupervised Feature Analysis with Class Margin Optimization,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+d822a13d173db2c5244b7f7d31babb513143f5a9,Unsupervised Feature Analysis with Class Margin Optimization,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+d822a13d173db2c5244b7f7d31babb513143f5a9,Unsupervised Feature Analysis with Class Margin Optimization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+d822a13d173db2c5244b7f7d31babb513143f5a9,Unsupervised Feature Analysis with Class Margin Optimization,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+d81b0a79558cabaaf3db22caf89454f4e012f21b,iParaphrasing: Extracting Visually Grounded Paraphrases via an Image,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+ab734bac3994b00bf97ce22b9abc881ee8c12918,Log-Euclidean Metric Learning on Symmetric Positive Definite Manifold with Application to Image Set Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ab734bac3994b00bf97ce22b9abc881ee8c12918,Log-Euclidean Metric Learning on Symmetric Positive Definite Manifold with Application to Image Set Classification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+ab05988c3af93e7753de79996cc409be0a8d2bd1,Approximate LDA Technique for Dimensionality Reduction in the Small Sample Size Case,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+ab44a7585b45e72affe1746fc302baccd6412969,Multiview Depth-based Pose Estimation,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+ab44a7585b45e72affe1746fc302baccd6412969,Multiview Depth-based Pose Estimation,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+abb396490ba8b112f10fbb20a0a8ce69737cd492,Robust Face Recognition Using Color Information,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+ab0d227b63b702ba80f70fd053175cd1b2fd28cc,Boosting Pseudo Census Transform Features for Face Alignment,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+abac0fa75281c9a0690bf67586280ed145682422,Describable Visual Attributes for Face Images,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+abb7c66487009ea20967b9c6708f660fd4197bbb,In2I : Unsupervised Multi-Image-to-Image Translation Using Generative Adversarial Networks,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+ab1728e84ac682ca0c53435f712a512ac139e9c8,University of Groningen Comparative Study Between Deep Learning and Bag of Visual Words for Wild-Animal,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+ab368594b9bd569e8d0fcf5c6010f1c31e3aa39e,"Node-Adapt, Path-Adapt and Tree-Adapt: Model-Transfer Domain Adaptation for Random Forest",Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+ab368594b9bd569e8d0fcf5c6010f1c31e3aa39e,"Node-Adapt, Path-Adapt and Tree-Adapt: Model-Transfer Domain Adaptation for Random Forest",Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+aba742ca4edc7dd37ff481d12e4b94c153baae77,Pose-Guided Human Parsing with Deep Learned Features,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+abc618b5c4f69a34c655bbb93c6003cc671b0f72,Is Faster R-CNN Doing Well for Pedestrian Detection?,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+abc618b5c4f69a34c655bbb93c6003cc671b0f72,Is Faster R-CNN Doing Well for Pedestrian Detection?,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ab6ed75e1b1952e4461fc603bcfd042bb462635f,Cross-View Person Identification by Matching Human Poses Estimated with Confidence on Each Body Joint,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+ab6ed75e1b1952e4461fc603bcfd042bb462635f,Cross-View Person Identification by Matching Human Poses Estimated with Confidence on Each Body Joint,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+ab0715642330502d5efca948e4753651cb004d84,Soft-NMS — Improving Object Detection with One Line of Code,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ab567ca60fc3f72f27746b4d9e505042ab282ca3,Guidelines for studying developmental prosopagnosia in adults and children.,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+ab567ca60fc3f72f27746b4d9e505042ab282ca3,Guidelines for studying developmental prosopagnosia in adults and children.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+ab567ca60fc3f72f27746b4d9e505042ab282ca3,Guidelines for studying developmental prosopagnosia in adults and children.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+abba1bf1348a6f1b70a26aac237338ee66764458,Facial Action Unit Detection Using Attention and Relation Learning,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+abba1bf1348a6f1b70a26aac237338ee66764458,Facial Action Unit Detection Using Attention and Relation Learning,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+abba1bf1348a6f1b70a26aac237338ee66764458,Facial Action Unit Detection Using Attention and Relation Learning,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+abba1bf1348a6f1b70a26aac237338ee66764458,Facial Action Unit Detection Using Attention and Relation Learning,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+abdd17e411a7bfe043f280abd4e560a04ab6e992,Pose-Robust Face Recognition via Deep Residual Equivariant Mapping,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ab134c9244c762f1429ccb7d737610d17d95f019,Efficient Interactive Annotation of Segmentation Datasets with Polygon-RNN++,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ab84d00079d0a29e44bdc4c83037dc76b0fbef05,Visual Text Correction,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+ab427f0c7d4b0eb22c045392107509451165b2ba,Learning scale ranges for the extraction of regions of interest,Western Kentucky University,Western Kentucky University,"Western Kentucky University, Avenue of Champions, Bowling Green, Warren County, Kentucky, 42101, USA",36.98453170,-86.45764430,edu,
+ab1f057bfe02b80a14f4c011abb9ceb2a9c98b6c,Dress Like a Star: Retrieving Fashion Products from Videos,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+ab1f057bfe02b80a14f4c011abb9ceb2a9c98b6c,Dress Like a Star: Retrieving Fashion Products from Videos,Aston University,Aston University,"Aston University, Aston Street, Digbeth, Birmingham, West Midlands Combined Authority, West Midlands, England, B4, UK",52.48620785,-1.88849915,edu,
+ab39b26d623aee22cd43f78d9cb1f5e0e55808fd,Structured Low-Rank Matrix Factorization with Missing and Grossly Corrupted Observations,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ab39b26d623aee22cd43f78d9cb1f5e0e55808fd,Structured Low-Rank Matrix Factorization with Missing and Grossly Corrupted Observations,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ab39b26d623aee22cd43f78d9cb1f5e0e55808fd,Structured Low-Rank Matrix Factorization with Missing and Grossly Corrupted Observations,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+ab34ed858412b08441259374a83f4b3adb615789,Multimedia Annotation Through Search and Mining,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+e5954958314b2184d7c7017ef2b8e1be47da23e5,The Variational Homoencoder: Learning to learn high capacity generative models from few examples,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+e5e5f31b81ed6526c26d277056b6ab4909a56c6c,Revisit Multinomial Logistic Regression in Deep Learning: Data Dependent Model Initialization for Image Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+e506cdb250eba5e70c5147eb477fbd069714765b,Heterogeneous Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+e5ee85c412942bdfd9df8cc519d4af31d6d08a67,Improved Training with Curriculum GANs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+e572c42d8ef2e0fadedbaae77c8dfe05c4933fbf,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e5a1864f6073f35920a8f7a0a368ff66b9dc6284,A Pose-Sensitive Embedding for Person Re-Identification with Expanded Cross Neighborhood Re-Ranking,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+e5687f9584deca1fafb68b50fa79b9fcfbd1d379,Zero-Shot Object Recognition Using Semantic Label Vectors,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,A Real-Time Facial Expression Recognition System for Online Games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,A Real-Time Facial Expression Recognition System for Online Games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,A Real-Time Facial Expression Recognition System for Online Games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,A Real-Time Facial Expression Recognition System for Online Games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+e59813940c5c83b1ce63f3f451d03d34d2f68082,A Real-Time Facial Expression Recognition System for Online Games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+e5b301ee349ba8e96ea6c71782295c4f06be6c31,The Case for Onloading Continuous High-Datarate Perception to the Phone,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e5b301ee349ba8e96ea6c71782295c4f06be6c31,The Case for Onloading Continuous High-Datarate Perception to the Phone,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+e534582cfc1b98001fa1ad17cc1df47aeab1257f,Are We Looking in the Wrong Place? Implications for Behavioural-Based Pain Assessment in Rabbits (Oryctolagus cuniculi) and Beyond?,Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+e59e1c43ee86e3e68b83d8a9916ebe6375606bb3,Zero-Shot Transfer VQA Dataset,"Baidu Research, USA","Baidu Research, USA","1195 Bordeaux Dr, Sunnyvale, CA 94089, USA",37.40922650,-122.02366150,company,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e50682179979e32c8d916c6c289d12d35cc0d0b2,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+e597aca96ea1c928f13d15b7c4b46e3d41861afe,Mitigation of Effects of Occlusion on Object Recognition with Deep Neural Networks through Low-Level Image Completion,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+e569f4bd41895028c4c009e5b46b935056188e91,"FISHER VECTOR FACES IN THE WILD 3 Facial landmark detection Aligned and cropped face Dense SIFT , GMM , and FV Discriminative dim",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e5b4700a615cde23b91be3eadf1c99642cd33e42,Joint Learning for Attribute-Consistent Person Re-Identification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+e5fbffd3449a2bfe0acb4ec339a19f5b88fff783,Self-supervised learning of a facial attribute embedding from video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e5d53a335515107452a30b330352cad216f88fc3,Generalized Loss-Sensitive Adversarial Learning with Manifold Margins,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+e5533c70706109ee8d0b2a4360fbe73fd3b0f35d,"How Far are We from Solving the 2D & 3D Face Alignment Problem? (and a Dataset of 230,000 3D Facial Landmarks)",University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e22adcd2a6a7544f017ec875ce8f89d5c59e09c8,Gender Privacy: An Ensemble of Semi Adversarial Networks for Confounding Arbitrary Gender Classifiers,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+e2b2bb648cfb60ee18bd66bc6e8a6f9daf7c9d74,Improving Context Modelling in Multimodal Dialogue Generation,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+e2b615e3b78aa18c293e7f03eb96591ccb721b55,Recurrent Segmentation for Variable Computational Budgets,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e212b2bc41645fe467a73d004067fcf1ca77d87f,Deep Active Contours,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+e2888084b375163f7c956adff102fdbc9fe7fb40,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+e2888084b375163f7c956adff102fdbc9fe7fb40,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+e2888084b375163f7c956adff102fdbc9fe7fb40,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+e26b87ff2e1553f4ee0d8b657295187abc6f312c,Whodunnit? Crime Drama as a Case for Natural Language Understanding,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+e2fb33d0ba0fe5e0c33b576e090b10fa4741d12d,Anonymizing k Facial Attributes via Adversarial Perturbations,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+e28915617dcad57c84f5feb2b93763548a44defd,Action-Affect Classification and Morphing using Multi-Task Representation Learning,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+e2d1e72fdb7e0b7a3ebb9ddc4cc161566ab74de2,Person Search via a Mask-Guided Two-Stream CNN Model,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e2d1e72fdb7e0b7a3ebb9ddc4cc161566ab74de2,Person Search via a Mask-Guided Two-Stream CNN Model,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+e20ab84ac7fa0a5d36d4cf2266b7065c60e1c804,Stacked U-Nets for Ground Material Segmentation in Remote Sensing Imagery,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+e293a31260cf20996d12d14b8f29a9d4d99c4642,LR-GAN: Layered Recursive Generative Adversarial Networks for Image Generation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+e2b4a1747e66f72baae9929f908ab064a4263f9e,WebCaricature: a benchmark for caricature face recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e2b4a1747e66f72baae9929f908ab064a4263f9e,WebCaricature: a benchmark for caricature face recognition,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+e24ab3b2a7b5938a48ea3c8c4bc29be2b02299fb,A Face Recognition System for Automated Door Opening with parallel Health Status Validation Using the Kinect v2,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+e2113e6c136c87802a35e75122db7e4e57c9774d,Grounding Referring Expressions in Images by Variational Context,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+e24a5e843d2ea999393b9f278f4b5c80f8a651d1,Learning to Learn with Compound HD Models,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+e22022de2db3432b3d77a49180b58d29058750d2,3D GLOH features for human action recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e22022de2db3432b3d77a49180b58d29058750d2,3D GLOH features for human action recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e22022de2db3432b3d77a49180b58d29058750d2,3D GLOH features for human action recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e2272f50ffa33b8e41509e4b795ad5a4eb27bb46,Region-based semantic segmentation with end-to-end training,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+e20abf7143f4a224824c3db7213049dee2573b4e,An investigation of the relationship between activation of a social cognitive neural network and social functioning.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+e2b093c6ebe4352ba9a1b281c621b798aae8d71c,NNEval: Neural Network Based Evaluation Metric for Image Captioning,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+e225a7cbfce4f7c9c29507c04190e6d6b6b46f7f,Label Denoising Adversarial Network ( LDAN ) for Inverse Lighting of Faces,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+e20e2db743e8db1ff61279f4fda32bf8cf381f8e,Deep Cross Polarimetric Thermal-to-Visible Face Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+e205faa8febcb7e33c482b00f84939b153575292,An information theoretic formulation of the Dictionary Learning and Sparse Coding Problems on Statistical Manifolds,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+e217382ceed42605eb2a9b570c55f9622635e111,Efficient Clothing Retrieval with Semantic-Preserving Visual Phrases,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+e217382ceed42605eb2a9b570c55f9622635e111,Efficient Clothing Retrieval with Semantic-Preserving Visual Phrases,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+f46097e264c7b0e47c4b1d1b476e5e6c1db9cc30,Bird Nest Images Classification Based on Canny Edge Detection and Local Binary Pattern,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+f437b3884a9e5fab66740ca2a6f1f3a5724385ea,Human identification technical challenges,DARPA,DARPA,"3701 Fairfax Dr, Arlington, VA 22203, USA",38.88334130,-77.10459770,mil,"3701 N. Fairfax Dr., Arlington, VA 22203"
+f42d3225afd9e463ddb7a355f64b54af8bd14227,Stacked U-Nets: A No-Frills Approach to Natural Image Segmentation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f43eeb578e0ca48abfd43397bbd15825f94302e4,Optical computer recognition of facial expressions associated with stress induced by performance demands.,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+f4b15fdcaaa3ad604b82df05f5d7f59dbcfe861d,An Event Reconstruction Tool for Conflict Monitoring Using Social Media,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f4f9697f2519f1fe725ee7e3788119ed217dca34,Selfie-Presentation in Everyday Life: A Large-Scale Characterization of Selfie Contexts on Instagram,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+f4421adced24d729d5ed22559308c2b4719b44c2,End-to-end 3 D face reconstruction with deep neural networks,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+f4210309f29d4bbfea9642ecadfb6cf9581ccec7,An Agreement and Sparseness-based Learning Instance Selection and its Application to Subjective Speech Phenomena,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+f4210309f29d4bbfea9642ecadfb6cf9581ccec7,An Agreement and Sparseness-based Learning Instance Selection and its Application to Subjective Speech Phenomena,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+f4dc1ca2051dc191751eb92a753f028228134e62,In Defense of Single-column Networks for Crowd Counting,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+f4dc1ca2051dc191751eb92a753f028228134e62,In Defense of Single-column Networks for Crowd Counting,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f4dc1ca2051dc191751eb92a753f028228134e62,In Defense of Single-column Networks for Crowd Counting,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f4dc1ca2051dc191751eb92a753f028228134e62,In Defense of Single-column Networks for Crowd Counting,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+f4dc1ca2051dc191751eb92a753f028228134e62,In Defense of Single-column Networks for Crowd Counting,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f499e84b489b0b4afe86e303803871700e561063,"A Framework for Fashion Data Gathering, Hierarchical-Annotation and Analysis for Social Media and Online Shop TOOLKIT FOR DETAILED STYLE ANNOTATIONS FOR ENHANCED FASHION RECOMMENDATION","KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+f4065d13bcad78b563108075f650c29a2f3f1917,Cost Effective Conceptual Design for Semantic Annotation,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+f4065d13bcad78b563108075f650c29a2f3f1917,Cost Effective Conceptual Design for Semantic Annotation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+f4557028562003c13eeca41b175dd4f4a03659bd,Part-based Deformable Object Detection with a Single Silhouette Sketch,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+f4557028562003c13eeca41b175dd4f4a03659bd,Part-based Deformable Object Detection with a Single Silhouette Sketch,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+f408ee71b9db38ec1b1b785057d50d6e0d9b30ba,LiDAR-Video Driving Dataset : Learning Driving Policies Effectively,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+f4d30896c5f808a622824a2d740b3130be50258e,"DS++: A flexible, scalable and provably tight relaxation for matching problems",Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+f4aed1314b2d38fd8f1b9d2bc154295bbd45f523,Subspace Clustering using Ensembles of $K$-Subspaces,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f4c029044afa6cb3b08d5e47701d532b3aed9a40,A Clustering Based Approach for Realistic and Efficient Data-Driven Crowd Simulation,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+f442e57e13d1da68723d68cb68d7c78e1788cc7f,Running head: AUTOMATIC MENTAL STATE DETECTION 1 Automated Mental State Detection for Mental Healthcare,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,Deep Clustering via Joint Convolutional Autoencoder Embedding and Relative Entropy Minimization,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,Deep Clustering via Joint Convolutional Autoencoder Embedding and Relative Entropy Minimization,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,Deep Clustering via Joint Convolutional Autoencoder Embedding and Relative Entropy Minimization,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+f3ca2c43e8773b7062a8606286529c5bc9b3ce25,Deep Clustering via Joint Convolutional Autoencoder Embedding and Relative Entropy Minimization,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+f3a57b32a53db39b188879c4ce2c22d6929f43e0,SOT for MOT,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f3a57b32a53db39b188879c4ce2c22d6929f43e0,SOT for MOT,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f3a57b32a53db39b188879c4ce2c22d6929f43e0,SOT for MOT,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f3a57b32a53db39b188879c4ce2c22d6929f43e0,SOT for MOT,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f3a57b32a53db39b188879c4ce2c22d6929f43e0,SOT for MOT,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f39b94e1ab8beeaf05f28c7bbc08664b7c37ed8c,Cross-domain attribute representation based on convolutional neural network,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+f39b94e1ab8beeaf05f28c7bbc08664b7c37ed8c,Cross-domain attribute representation based on convolutional neural network,Soochow University,Soochow University,"苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.30709510,120.63573987,edu,
+f33ef5b2707078528f23e067565f992f4b03f4a7,Actor and Observer: Joint Modeling of First and Third-Person Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f3a9baff7b059c528a7f72dd458db569892ee29c,"Spectral Matching, Learning, and Inference using Pairwise Interactions",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f3a9baff7b059c528a7f72dd458db569892ee29c,"Spectral Matching, Learning, and Inference using Pairwise Interactions",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f3a9baff7b059c528a7f72dd458db569892ee29c,"Spectral Matching, Learning, and Inference using Pairwise Interactions",University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+f3fc96377dc3456948fd3431ce940258926ce04e,Advancing Bag-of-Visual-Words Representations for Lesion Classification in Retinal Images,Khalifa University,Khalifa University,"Khalifa University, شارع طَوِي مُوَيلِح, قصر الشاطئ, حدبة الزَّعْفرانة, أبوظبي, أبو ظبي, 31757, الإمارات العربية المتحدة",24.44690250,54.39425630,edu,
+f35acbb0b2870e5735561196d246463aec8ae7aa,Representations and Techniques for 3D Object Recognition and Scene Interpretation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+f35acbb0b2870e5735561196d246463aec8ae7aa,Representations and Techniques for 3D Object Recognition and Scene Interpretation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f38ad869023c43b59431a3bb55f2fe8fb6ff0f05,A systematic review and meta-analysis of the fMRI investigation of autism spectrum disorders.,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f38ad869023c43b59431a3bb55f2fe8fb6ff0f05,A systematic review and meta-analysis of the fMRI investigation of autism spectrum disorders.,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f38ad869023c43b59431a3bb55f2fe8fb6ff0f05,A systematic review and meta-analysis of the fMRI investigation of autism spectrum disorders.,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f3880d1067915bbbaa0c47a736f46f488185250e,Illumination Normalization for Outdoor Face Recognition by Using Ayofa-filters,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+f3880d1067915bbbaa0c47a736f46f488185250e,Illumination Normalization for Outdoor Face Recognition by Using Ayofa-filters,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+f3880d1067915bbbaa0c47a736f46f488185250e,Illumination Normalization for Outdoor Face Recognition by Using Ayofa-filters,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+f3880d1067915bbbaa0c47a736f46f488185250e,Illumination Normalization for Outdoor Face Recognition by Using Ayofa-filters,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+f3015be0f9dbc1a55b6f3dc388d97bb566ff94fe,A Study on the Effective Approach to Illumination-Invariant Face Recognition Based on a Single Image,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+f302185b1416d8b47620c67b3942a8675bbb4679,Domain Adapted Word Embeddings for Improved Sentiment Classification,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+f3a59d85b7458394e3c043d8277aa1ffe3cdac91,Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+f3ec43a7b22f6e5414fec473acda8ffd843e7baf,A Coupled Evolutionary Network for Age Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+f39fc7c420277616eea29754d0d367297c6f02c1,Feature Extraction Based on Co-occurrence of Adjacent Local Binary Patterns,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+f3f77b803b375f0c63971b59d0906cb700ea24ed,Feature Extraction for Facial Expression Recognition based on Hybrid Face Regions,RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.80874650,144.96388750,edu,
+f39d3ed10131f986be5fb8a10b77d44bc9feada8,Boosting with Side Information,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+f39d3ed10131f986be5fb8a10b77d44bc9feada8,Boosting with Side Information,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+f3df296de36b7c114451865778e211350d153727,Spatio-Temporal Facial Expression Recognition Using Convolutional Neural Networks and Conditional Random Fields,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+f3906c390e378ece7f785fb553e0b89c2cbfeeb2,Automatic 3D Facial Region Retrieval from Multi-pose Facial Datasets,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+f3fed71cc4fc49b02067b71c2df80e83084b2a82,Learning Sparse Latent Representations with the Deep Copula Information Bottleneck,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+f312fce73aabd97bf4fc02fe2829f6959e251b1e,Runtime Support for Human-in-the-Loop Feature Engineering System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f312fce73aabd97bf4fc02fe2829f6959e251b1e,Runtime Support for Human-in-the-Loop Feature Engineering System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f312fce73aabd97bf4fc02fe2829f6959e251b1e,Runtime Support for Human-in-the-Loop Feature Engineering System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f367feef8f486966916bd0769de8c7b5a59250b1,Direct Shot Correspondence Matching,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+f367feef8f486966916bd0769de8c7b5a59250b1,Direct Shot Correspondence Matching,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f3e2fd2388c33b09df32c29f381e71b48dc227ab,Learning Hybrid Part Filters for Scene Recognition,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+f35a493afa78a671b9d2392c69642dcc3dd2cdc2,Automatic Attribute Discovery with Neural Activations,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+f35a493afa78a671b9d2392c69642dcc3dd2cdc2,Automatic Attribute Discovery with Neural Activations,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+f397b8c835425e4b18cc7d9088b7f810c6cf2563,Yimo Guo IMAGE AND VIDEO ANALYSIS BY LOCAL DESCRIPTORS AND DEFORMABLE IMAGE REGISTRATION,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+f328137ba1924c8b451be32c7bd8d1d9a5c392d6,Relative Attribute Learning with Deep Attentive Cross-image Representation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+eb100638ed73b82e1cce8475bb8e180cb22a09a2,Temporal Action Detection with Structured Segment Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+eb0cf5727275f89323e2cbb4c0f0515b8ece75f8,A hybrid probabilistic neural model for person tracking based on a ceiling-mounted camera,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+ebb527dad52f28610f9153952c10a95d8f01f5f9,How Can Selection of Biologically Inspired Features Improve the Performance of a Robust Object Recognition Model?,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+eb6ee56e085ebf473da990d032a4249437a3e462,Age/gender classification with whole-component convolutional neural networks (WC-CNN),University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+eb8519cec0d7a781923f68fdca0891713cb81163,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+eb8519cec0d7a781923f68fdca0891713cb81163,Temporal Non-volume Preserving Approach to Facial Age-Progression and Age-Invariant Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ebb1c29145d31c4afa3c9be7f023155832776cd3,CASME II: An Improved Spontaneous Micro-Expression Database and the Baseline Evaluation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ebb1c29145d31c4afa3c9be7f023155832776cd3,CASME II: An Improved Spontaneous Micro-Expression Database and the Baseline Evaluation,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+ebb1c29145d31c4afa3c9be7f023155832776cd3,CASME II: An Improved Spontaneous Micro-Expression Database and the Baseline Evaluation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+eb22aecb7b59ab01acdf498b33f5ba9ef1b64f64,A Multiple Motion Model Tracker Handling Occlusion and Rapid Motion Variation,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+eb43002e771de05db5e3e7e8eb6fcc75de0e30c4,Dataset Localization / Segmentation By dCNN Top Garments Bottom Garments Feature Extraction Feature Extraction Top Bottom A Table of Joint Distribution Inventory Dataset Localization / Segmentation By dCNN Feature Extraction Query garment Localization / Segmentation By dCNN Feature Extraction Recomm,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+eb43002e771de05db5e3e7e8eb6fcc75de0e30c4,Dataset Localization / Segmentation By dCNN Top Garments Bottom Garments Feature Extraction Feature Extraction Top Bottom A Table of Joint Distribution Inventory Dataset Localization / Segmentation By dCNN Feature Extraction Query garment Localization / Segmentation By dCNN Feature Extraction Recomm,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+ebb5eedb6ce41317971885ff33da17ae2c9e8f7a,Disguised Face Identification (DFI) with Facial KeyPoints Using Spatial Fusion Convolutional Network,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+ebb5eedb6ce41317971885ff33da17ae2c9e8f7a,Disguised Face Identification (DFI) with Facial KeyPoints Using Spatial Fusion Convolutional Network,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+eb2d1d406405537773e70f7e949df656ee8779aa,ShapeLearner: Towards Shape-Based Visual Knowledge Harvesting,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+ebca525383c4c451e97e801f2e2532d65e88dfeb,A Nonlinear Orthogonal Non-Negative Matrix Factorization Approach to Subspace Clustering,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,"On Graph-Structured Discrete Labelling Problems in Computer Vision : Learning , Inference and Applications",Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,"On Graph-Structured Discrete Labelling Problems in Computer Vision : Learning , Inference and Applications",Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,"On Graph-Structured Discrete Labelling Problems in Computer Vision : Learning , Inference and Applications",Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.26628870,82.99279690,edu,
+eb70c38a350d13ea6b54dc9ebae0b64171d813c9,"On Graph-Structured Discrete Labelling Problems in Computer Vision : Learning , Inference and Applications",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+eb9c20e96f22b9c890f7978878c5479d9e64bb47,Learning descriptive models of objects and activities from egocentric video,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+eb1208a7f535de6c6180e4dbeb6eef2a27500c52,"To be or Not to be Threatening, but What was the Question? Biased Face Evaluation in Social Anxiety and Depression Depends on How You Frame the Query",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+eb027969f9310e0ae941e2adee2d42cdf07d938c,VGGFace2: A Dataset for Recognising Faces across Pose and Age,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+ebe8408052d9bf05dc2007d01559dda6129840eb,Where to Focus: Deep Attention-based Spatially Recurrent Bilinear Networks for Fine-Grained Visual Recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+ebe8408052d9bf05dc2007d01559dda6129840eb,Where to Focus: Deep Attention-based Spatially Recurrent Bilinear Networks for Fine-Grained Visual Recognition,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+ebd28f04c2ab1e61430d309ecbf7c832173d65a5,Feedback based Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ebd28f04c2ab1e61430d309ecbf7c832173d65a5,Feedback based Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ebd28f04c2ab1e61430d309ecbf7c832173d65a5,Feedback based Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+eb7b387a3a006609b89ca5ed0e6b3a1d5ecb5e5a,Facial Expression Recognition using Neural Network,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+eba3fd6a446cb043c0347c9b4ce40567f1ce9110,Multi-task Relative Attributes Prediction by Incorporating Local Context and Global Style Information Features,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+eba0510f6d34320857b0554627b5f2925553f820,MouseFree Vision-Based Human-Computer Interaction through Real-Time Hand Tracking and Gesture Recognition,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+eba0510f6d34320857b0554627b5f2925553f820,MouseFree Vision-Based Human-Computer Interaction through Real-Time Hand Tracking and Gesture Recognition,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+eba0510f6d34320857b0554627b5f2925553f820,MouseFree Vision-Based Human-Computer Interaction through Real-Time Hand Tracking and Gesture Recognition,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+ebad62ebe00fe0f0c19ce04c3f7250506137fc71,Evaluating Auto-adaptation Methods for Fine-Grained Adaptable Processors,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+eb91eb5912de3d15f052a94cd0a188f553df90e7,On Detecting Domestic Abuse via Faces,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+eb4d7688cd03f3863a175149f5fa293140f9df30,On classification of distorted images with deep convolutional neural networks,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+ebe44c125f6d5c893df73d20b602e479a38e5b23,Algorithmic Identification of Looted Archaeological Sites from Space,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+c71f36c9376d444075de15b1102b4974481be84d,"3D morphable models : data pre-processing, statistical analysis and fitting",University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+c7fc1a9dd3c0b2653b0c9ff668cafaff7670da92,An Image-Based Bayesian Framework for Face Detection,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+c7c53d75f6e963b403057d8ba5952e4974a779ad,Aging effects in automated face recognition,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+c7c53d75f6e963b403057d8ba5952e4974a779ad,Aging effects in automated face recognition,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+c79cf7f61441195404472102114bcf079a72138a,Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c79cf7f61441195404472102114bcf079a72138a,Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c73dd452c20460f40becb1fd8146239c88347d87,Manifold Constrained Low-Rank Decomposition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+c79eb3b4d8324eb824493e53bfcb4d3980398523,Tracking people within groups with RGB-D data,University of Padova,University of Padova,"Via Giovanni Gradenigo, 6, 35131 Padova PD, Italy",45.40811720,11.89437860,edu,"University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+c79178a47403f317f837e4a8aa9fd03bfed1dfc7,Multiclass object detection by combining local appearances and context,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+c79178a47403f317f837e4a8aa9fd03bfed1dfc7,Multiclass object detection by combining local appearances and context,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c72ac3dec0d0b2d5ca4945b07bd6b72c365bdc13,Shorter spontaneous fixation durations in infants with later emerging autism,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+c733e4a14b51623120da9b4571b4409bc99ab0cd,Mainstream: Dynamic Stem-Sharing for Multi-Tenant Video Processing,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c72e6992f44ce75a40f44be4365dc4f264735cfb,Story Understanding in Video Advertisements,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+c74aba9a096379b3dbe1ff95e7af5db45c0fd680,Neuro-Fuzzy Analysis of Facial Action Units and Expressions,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+c7756864268459069a59c2276cf482377d5f997a,TorontoCity: Seeing the World with a Million Eyes,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c784d4918ad33f4dd2991155ea583b4789ba3c11,Bimodal Vein Recognition Based on Task-Specific Transfer Learning,China University of Mining and Technology,China University of Mining and Technology,"China University of Mining and Technology, 1号, 大学路, 泉山区 (Quanshan), 徐州市 / Xuzhou, 江苏省, 221116, 中国",34.21525380,117.13985410,edu,
+c71db5d3546e22227662ee0f0ce586495ef18899,SALICON: Saliency in Context,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+c7dd846c0abc896e5fd0940ac07927553cc55734,Neurofunctional Underpinnings of Audiovisual Emotion Processing in Teens with Autism Spectrum Disorders,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+c7dd846c0abc896e5fd0940ac07927553cc55734,Neurofunctional Underpinnings of Audiovisual Emotion Processing in Teens with Autism Spectrum Disorders,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+c7dd846c0abc896e5fd0940ac07927553cc55734,Neurofunctional Underpinnings of Audiovisual Emotion Processing in Teens with Autism Spectrum Disorders,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+c7dd846c0abc896e5fd0940ac07927553cc55734,Neurofunctional Underpinnings of Audiovisual Emotion Processing in Teens with Autism Spectrum Disorders,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+c7c61d35025943031a0cefeece9a9215fd4019e5,Egocentric Visual Event Classification with Location-Based Priors,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+c75ba6ef724c0c3a9c9510a70da4cc8729b59a35,FaceWarehouse: A 3D Facial Expression Database for Visual Computing,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+c75ba6ef724c0c3a9c9510a70da4cc8729b59a35,FaceWarehouse: A 3D Facial Expression Database for Visual Computing,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+c753521ba6fb06c12369d6fff814bb704c682ef5,Mancs: A Multi-task Attentional Network with Curriculum Sampling for Person Re-Identification,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+c76b611a986a2e09df22603d93b2d9125aaff369,Generating Self-Guided Dense Annotations for Weakly Supervised Semantic Segmentation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,The Impact of Product Photo on Online Consumer Purchase Intention: an Image-Processing Enabled Empirical Study,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,The Impact of Product Photo on Online Consumer Purchase Intention: an Image-Processing Enabled Empirical Study,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+c7c5f0fe1fcaf3787c7f78f7dc62f3497dcfdf3c,The Impact of Product Photo on Online Consumer Purchase Intention: an Image-Processing Enabled Empirical Study,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c7752341b28a0ff96e8b63986afc669fada6cd50,Thinking Outside the Box: Spatial Anticipation of Semantic Categories,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+c71217b2b111a51a31cf1107c71d250348d1ff68,One Network to Solve Them All — Solving Linear Inverse Problems Using Deep Projection Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c72b063e23b8b45b57a42ebc2f9714297c539a6f,TieNet: Text-Image Embedding Network for Common Thorax Disease Classification and Reporting in Chest X-rays,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+c793a38c3d16b093c12ba8a9d12dfa88159ecd38,Neurons in the fusiform gyrus are fewer and smaller in autism.,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+c793a38c3d16b093c12ba8a9d12dfa88159ecd38,Neurons in the fusiform gyrus are fewer and smaller in autism.,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+c793a38c3d16b093c12ba8a9d12dfa88159ecd38,Neurons in the fusiform gyrus are fewer and smaller in autism.,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+c76f64e87f88475069f7707616ad9df1719a6099,T-RECS: Training for Rate-Invariant Embeddings by Controlling Speed for Action Recognition,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+c76eee7a6656664bc37890f3754ae202255ffff3,Matching 3D Faces with Partial Data,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+c7f0c0636d27a1d45b8fcef37e545b902195d937,Towards Around-Device Interaction using Corneal Imaging,Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.95196483,edu,
+c7f0c0636d27a1d45b8fcef37e545b902195d937,Towards Around-Device Interaction using Corneal Imaging,Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.95196483,edu,
+c71b0ed402437470f229b3fdabb88ad044c092ea,Dynamic Conditional Networks for Few-Shot Learning,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+c71b0ed402437470f229b3fdabb88ad044c092ea,Dynamic Conditional Networks for Few-Shot Learning,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+c74b1643a108939c6ba42ae4de55cb05b2191be5,Non-negative Matrix Factorization for Face Illumination Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c74b1643a108939c6ba42ae4de55cb05b2191be5,Non-negative Matrix Factorization for Face Illumination Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c74b1643a108939c6ba42ae4de55cb05b2191be5,Non-negative Matrix Factorization for Face Illumination Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c75e6ce54caf17b2780b4b53f8d29086b391e839,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+c747c45c2fb3d678954bf1a16a3d9cc4dd4b8f01,Allelic Variation in the Oxytocin Receptor Gene and Early- Emerging Social Behaviors in Boys and Girls,University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.77920678,edu,
+c0723e0e154a33faa6ff959d084aebf07770ffaf,Interpolation Between Eigenspaces Using Rotation in Multiple Dimensions,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+c02c914de25034ecd2c3287c2e731ab1130e7bee,Multi-Scale Structure-Aware Network for Human Pose Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+c0387e788a52f10bf35d4d50659cfa515d89fbec,MARS: A Video Benchmark for Large-Scale Person Re-Identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c0825a62bbf6a906ec812d0f668478f001c24279,Recognition at a long distance: Very low resolution face recognition and hallucination,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+c02cc6af3cc93e86e86fb66412212babda8fb858,Interocularly merged face percepts eliminate binocular rivalry,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+c0ef854f4119a74b37211aa4cc36b8c1addd9057,Training object class detectors with click supervision Dim,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,Locality-Constrained Active Appearance Model,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c03e01717b2d93f04cce9b5fd2dcfd1143bcc180,Locality-Constrained Active Appearance Model,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+c031d1792f088c4feca14ed8ee05423a7f77fe8d,Cardinal sparse partial least square feature selection and its application in face recognition,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+c048513689fbba0a12a1ab9cb08ab3a533918519,Model SelectionWithin a Bayesian Approach to Extraction of Walker Motion,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+c0c80aeccb1628926738ea8f09d238061a8daa29,GBoost: A Generative Framework for Boosting with Applications to Real-Time Eye Coding ?,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+c06ca26b33ab2e9ce118ca02018be5834e8164a6,Robust Ear Recognition Using Gradient Ordinal Relationship Pattern,Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+c074dcc5000320ebf13e7a974befced1ab70a08f,Attentional Pooling for Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c05f9e4979cb33090db984226ff3cff6e2dc1950,Counting in Dense Crowds using Deep Features,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+c035c193eed5d72c7f187f0bc880a17d217dada0,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+c035c193eed5d72c7f187f0bc880a17d217dada0,"Local Gradient Gabor Pattern (LGGP) with Applications in Face Recognition, Cross-spectral Matching and Soft Biometrics",Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+c0262e24324a6a4e6af5bd99fc79e2eb802519b3,Learning Scene-specific Object Detectors Based on a Generative-Discriminative Model with Minimal Supervision,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+c0262e24324a6a4e6af5bd99fc79e2eb802519b3,Learning Scene-specific Object Detectors Based on a Generative-Discriminative Model with Minimal Supervision,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+c0ead9bada2fb7cdebf7dadbc8548d08387966ae,Young Adults with Autism Spectrum Disorder Show Early Atypical Neural Activity during Emotional Face Processing,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c0ead9bada2fb7cdebf7dadbc8548d08387966ae,Young Adults with Autism Spectrum Disorder Show Early Atypical Neural Activity during Emotional Face Processing,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c0ead9bada2fb7cdebf7dadbc8548d08387966ae,Young Adults with Autism Spectrum Disorder Show Early Atypical Neural Activity during Emotional Face Processing,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c0ead9bada2fb7cdebf7dadbc8548d08387966ae,Young Adults with Autism Spectrum Disorder Show Early Atypical Neural Activity during Emotional Face Processing,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+c0006a2268d299644e9f1b455601bcbe89ddc2b5,Semantic Video Segmentation by Gated Recurrent Flow Propagation,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c0cdaeccff78f49f4604a6d263dc6eb1bb8707d5,MLP Neural Network Based Approach for Facial Expression Analysis,Kent State University,Kent State University,"Kent State University, Lester A. Lefton Esplanade, Whitehall Terrace, Kent, Portage County, Ohio, 44242-0001, USA",41.14435250,-81.33982833,edu,
+c00f402b9cfc3f8dd2c74d6b3552acbd1f358301,Learning deep representation from coarse to fine for face alignment,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+c089c7d8d1413b54f59fc410d88e215902e51638,TVParser: An automatic TV video parsing method,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c0ee89dc2dad76147780f96294de9e421348c1f4,Efficiently detecting outlying behavior in video-game players,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+c0ee89dc2dad76147780f96294de9e421348c1f4,Efficiently detecting outlying behavior in video-game players,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+c0ca6b992cbe46ea3003f4e9b48f4ef57e5fb774,A Two-Layer Representation For Large-Scale Action Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+c01876292b5d1ce6e746fd2e2053453847905bb2,DF-Net: Unsupervised Joint Learning of Depth and Flow Using Cross-Task Consistency,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c00df53bd46f78ae925c5768d46080159d4ef87d,Learning Bag-of-Features Pooling for Deep Convolutional Neural Networks,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+c09cd44a4413de704bd74d825ca435b742b73ded,Illumination Compensation and Enhancement for Face Recognition,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+c0e0b878ec8c56679faccb3c3f5e2ae968182da5,A Multifactor Extension of Linear Discriminant Analysis for Face Recognition under Varying Pose and Illumination,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c05441dd1bc418fb912a6fafa84c0659a6850bf0,Face recognition under varying illumination based on adaptive homomorphic eight local directional patterns,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+ee0d7a1dd5f0821b6f48113a283b9196a38d1c6c,"Show, Attend and Translate: Unsupervised Image Translation with Self-Regularization and Attention",University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+eea931e63c523599ba75524938a0be9ea36e9c2b,A Latent Clothing Attribute Approach for Human Pose Estimation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+eecd9a070ed333077a066bfdcf776c51c2c74406,Deep image representations using caption generators,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+ee6b503ab512a293e3088fdd7a1c893a77902acb,Automatic Name-Face Alignment to Enable Cross-Media News Retrieval,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+ee6b503ab512a293e3088fdd7a1c893a77902acb,Automatic Name-Face Alignment to Enable Cross-Media News Retrieval,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+ee661eb1d6ebfdef0d0b0784529221c951cd1188,"3 D Human Sensing , Action and Emotion Recognition in Robot Assisted Therapy of Children with Autism",Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,Generation of Facial Expression Map using Supervised and Unsupervised Learning,Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.80114990,140.04591160,edu,
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,Generation of Facial Expression Map using Supervised and Unsupervised Learning,Akita University,Akita University,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本",39.72781420,140.13322566,edu,
+ee815f60dc4a090fa9fcfba0135f4707af21420d,EAC-Net: A Region-Based Deep Enhancing and Cropping Approach for Facial Action Unit Detection,CUNY City College,CUNY City College,"CUNY City College, 205 East 42nd Street, New York, NY 10017",45.55466080,5.40652550,edu,
+eed7920682789a9afd0de4efd726cd9a706940c8,Computers to Help with Conversations : Affective Framework to Enhance Human Nonverbal Skills,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+ee7093e91466b81d13f4d6933bcee48e4ee63a16,Discovering Person Identity via Large-Scale Observations,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ee7093e91466b81d13f4d6933bcee48e4ee63a16,Discovering Person Identity via Large-Scale Observations,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ee7034a5ef168f6bcb1b5892177870fc2563a646,Probabilistic State Space Decomposition for Human Motion Capture,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+ee0f87a93fee7a7dc8d13760464dbd6ce1526626,Constrained Semi-Supervised Learning Using Attributes and Comparative Attributes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ee89b903af1d8f26a8894a3773915c74f038883e,Half-CNN: A General Framework for Whole-Image Regression,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ee89b903af1d8f26a8894a3773915c74f038883e,Half-CNN: A General Framework for Whole-Image Regression,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+ee5fa8ac1c33fcf9a10a185ae23f0ea0534e770f,Morpho-MNIST: Quantitative Assessment and Diagnostics for Representation Learning,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+ee5fa8ac1c33fcf9a10a185ae23f0ea0534e770f,Morpho-MNIST: Quantitative Assessment and Diagnostics for Representation Learning,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+ee418372b0038bd3b8ae82bd1518d5c01a33a7ec,CSE 255 Winter 2015 Assignment 1: Eye Detection using Histogram of Oriented Gradients and Adaboost Classifier,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+eee06d68497be8bf3a8aba4fde42a13aa090b301,CR-GAN: Learning Complete Representations for Multi-view Generation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+eee06d68497be8bf3a8aba4fde42a13aa090b301,CR-GAN: Learning Complete Representations for Multi-view Generation,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+eee4cc389ca85d23700cba9627fa11e5ee65d740,Adversarial Open-World Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+eedf9480de99e3373d2321f61ee5b71ea3ebf493,Altered Social Reward and Attention in Anorexia Nervosa,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+eedf9480de99e3373d2321f61ee5b71ea3ebf493,Altered Social Reward and Attention in Anorexia Nervosa,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+eedf9480de99e3373d2321f61ee5b71ea3ebf493,Altered Social Reward and Attention in Anorexia Nervosa,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+eedf9480de99e3373d2321f61ee5b71ea3ebf493,Altered Social Reward and Attention in Anorexia Nervosa,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+ee04c6c9c672fedf39f601a466f64a98541cbe19,Analysis and Improvement of Low Rank Representation for Subspace segmentation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+eee2d2ac461f46734c8e674ae14ed87bbc8d45c6,Generalized Rank Pooling for Activity Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+eed93d2e16b55142b3260d268c9e72099c53d5bc,ICFVR 2017: 3rd international competition on finger vein recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c919a9f61656cdcd3a26076057ee006c48e8f609,High-Value Target Detection,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6,TS ^2 2 C: Tight Box Mining with Surrounding Segmentation Context for Weakly Supervised Object Detection,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+c94fd258a8f1e8f4033a7fe491f1372dcf7d3cd6,TS ^2 2 C: Tight Box Mining with Surrounding Segmentation Context for Weakly Supervised Object Detection,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+c96bd1a584d0e5d86148cfcab0f573825bc3fb5b,Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+c96bd1a584d0e5d86148cfcab0f573825bc3fb5b,Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c94b3a05f6f41d015d524169972ae8fd52871b67,The Fastest Deformable Part Model for Object Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c9424d64b12a4abe0af201e7b641409e182babab,"Which, When, and How: Hierarchical Clustering with Human-Machine Cooperation",Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+c9c1255057652584603945508b7151206e9e9069,On Sampling and Greedy MAP Inference of Constrained Determinantal Point Processes,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b,Person Search in Videos with One Portrait Through Visual and Temporal Links,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c97a5f2241cc6cd99ef0c4527ea507a50841f60b,Person Search in Videos with One Portrait Through Visual and Temporal Links,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c95cd36779fcbe45e3831ffcd3314e19c85defc5,Face recognition using multi-modal low-rank dictionary learning,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+c9e955cb9709f16faeb0c840f4dae92eb875450a,Proposal of Novel Histogram Features for Face Detection,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+c9b98c98357a154bceb2287c427c5fa9c17b4a07,Virtual CNN Branching: Efficient Feature Ensemble for Person Re-Identification,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+c904fb8be3e9948ccbf4f3c2549f0390a1f4903d,Towards social pattern characterization in egocentric photo-streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+c904fb8be3e9948ccbf4f3c2549f0390a1f4903d,Towards social pattern characterization in egocentric photo-streams,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c9987af05f7df6539c5742072c027dfcf0394354,"DS++: a flexible, scalable and provably tight relaxation for matching problems",Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,Beyond Context: Exploring Semantic Similarity for Tiny Face Detection,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,Beyond Context: Exploring Semantic Similarity for Tiny Face Detection,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+c92bb26238f6e30196b0c4a737d8847e61cfb7d4,Beyond Context: Exploring Semantic Similarity for Tiny Face Detection,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+c92f26b4a7116ab923e84e351662d1c8a6048b47,Illuminating Pedestrians via Simultaneous Detection and Segmentation,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+c9e4e1bb544a892fe07c99cc9a999f0762237cc3,Natural Language Person Retrieval,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+c9bbd7828437e70cc3e6863b278aa56a7d545150,Unconstrained Fashion Landmark Detection via Hierarchical Recurrent Transformer Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c9bbd7828437e70cc3e6863b278aa56a7d545150,Unconstrained Fashion Landmark Detection via Hierarchical Recurrent Transformer Networks,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+c9f588d295437009994ddaabb64fd4e4c499b294,Predicting Professions through Probabilistic Model under Social Context,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+c9d9cb2c647c6489814098438a9fbd916a8a1918,ALMN: Deep Embedding Learning with Geometrical Virtual Point Generating,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+c92da368a6a886211dc759fe7b1b777a64d8b682,Face Recognition System based on Face Pose Estimation and Frontal Face Pose Synthesis,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+c92da368a6a886211dc759fe7b1b777a64d8b682,Face Recognition System based on Face Pose Estimation and Frontal Face Pose Synthesis,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+c98983592777952d1751103b4d397d3ace00852d,Face Synthesis from Facial Identity Features,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+c9367ed83156d4d682cefc59301b67f5460013e0,Geometry-Contrastive Generative Adversarial Network for Facial Expression Synthesis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c9baea734a14e4302829769ac39fe8c48fbae5a1,Multiple Object Tracking in Urban Traffic Scenes with a Multiclass Object Detector,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+c901524f01c7a0db3bb01afa1d5828913c84628a,Image Region Selection and Ensemble for Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+fc8d33f351111ed43f56ba6809558d5227d4dcbe,Attention-Aware Compositional Network for Person Re-identification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+fc8d33f351111ed43f56ba6809558d5227d4dcbe,Attention-Aware Compositional Network for Person Re-identification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+fc3ee00a751ca4871e3ba40b81120b1bc3a57fc0,How2: A Large-scale Dataset for Multimodal Language Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fc3ee00a751ca4871e3ba40b81120b1bc3a57fc0,How2: A Large-scale Dataset for Multimodal Language Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fc3ee00a751ca4871e3ba40b81120b1bc3a57fc0,How2: A Large-scale Dataset for Multimodal Language Understanding,University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+fc3ee00a751ca4871e3ba40b81120b1bc3a57fc0,How2: A Large-scale Dataset for Multimodal Language Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fcd9df8238605f70a54492fb0c6bdc9f29afda98,3D Vehicle Trajectory Reconstruction in Monocular Video Data Using Environment Structure Constraints,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+fcc82154067dfe778423c2df4ed69f0bec6e1534,Automatic Analysis of Affect and Membership in Group Settings,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+fcc82154067dfe778423c2df4ed69f0bec6e1534,Automatic Analysis of Affect and Membership in Group Settings,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+fc857cebd4150e3fe3aee212f128241b178f0d0a,Amygdala damage impairs eye contact during conversations with real people.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+fc857cebd4150e3fe3aee212f128241b178f0d0a,Amygdala damage impairs eye contact during conversations with real people.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+fc5bdb98ff97581d7c1e5eb2d24d3f10714aa192,Initialization Strategies of Spatio-Temporal Convolutional Neural Networks,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+fc20149dfdff5fdf020647b57e8a09c06e11434b,Local Discriminant Wavelet Packet Coordinates for Face Recognition,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+fc0f5859a111fb17e6dcf6ba63dd7b751721ca61,Design of an Automatic Facial Expression Detector,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+fcbec158e6a4ace3d4311b26195482b8388f0ee9,Face Recognition from Still Images and Videos,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+fcc1ae9761926e9e7dbd23c2cb95ca39b0a71073,Assistive tagging: A survey of multimedia tagging with human-computer joint exploration,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+fcc1ae9761926e9e7dbd23c2cb95ca39b0a71073,Assistive tagging: A survey of multimedia tagging with human-computer joint exploration,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+fc450e42aa2a491ff0afda144718d4f73d4d89f2,An Analysis of Visual Question Answering Algorithms,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+fc027fccb19512a439fc17181c34ee1c3aad51b5,Joint Multi-person Pose Estimation and Semantic Part Segmentation,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+fcf91995dc4d9b0cee84bda5b5b0ce5b757740ac,Asymmetric Discrete Graph Hashing,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+fc950b230a0189cc63b2e2295b2dc761d5b2270c,Health care providers' judgments in chronic pain: the influence of gender and trustworthiness.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+fc950b230a0189cc63b2e2295b2dc761d5b2270c,Health care providers' judgments in chronic pain: the influence of gender and trustworthiness.,University of Northern British Columbia,University of Northern British Columbia,"UNBC, Campus Ring Road, College Heights, Prince George, Regional District of Fraser-Fort George, British Columbia, V2M 5K7, Canada",53.89256620,-122.81471592,edu,
+fc950b230a0189cc63b2e2295b2dc761d5b2270c,Health care providers' judgments in chronic pain: the influence of gender and trustworthiness.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+fc5a100c117cd7291d626f1ec3402bec235f2635,IQA: Visual Question Answering in Interactive Environments,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+fc798314994bf94d1cde8d615ba4d5e61b6268b6,"Face Recognition : face in video , age invariance , and facial marks",Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+fc7cd432db404e7724df7671d6e010109fe0c944,Pedestrian Detection Image Processing with FPGA,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+fc7cd432db404e7724df7671d6e010109fe0c944,Pedestrian Detection Image Processing with FPGA,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+fc23a386c2189f221b25dbd0bb34fcd26ccf60fa,A Discriminative Latent Model of Object Classes and Attributes,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+fc72b2bb34f6a8216767df80ae13e09d1ef0ebda,Combating Human Trafficking with Deep Multimodal Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fc68c5a3ab80d2d31e6fd4865a7ff2b4ab66ca9f,Evaluation Criteria for Affect-Annotated Databases,Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.37086525,18.61716016,edu,
+fc2bad3544c7c8dc7cd182f54888baf99ed75e53,Efficient Retrieval for Large Scale Metric Learning,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+fccd0406749aecf76741460de7499689ebf4c676,Integrating Egocentric Videos in Top-View Surveillance Videos: Joint Identification and Temporal Alignment,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+fc18642d17785ef1853749b5323bf87adb329537,Exploring Prior Knowledge for Pedestrian Detection,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+fca9ebaa30d69ccec8bb577c31693c936c869e72,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+fca9ebaa30d69ccec8bb577c31693c936c869e72,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+fd9d7efd0ecff49249844a0096e77b2f864fae0d,Language Guided Fashion Image Manipulation with Feature-wise Transformations,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7,Continuous Supervised Descent Method for Facial Landmark Localisation,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7,Continuous Supervised Descent Method for Facial Landmark Localisation,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+fdff2da5bdca66e0ab5874ef58ac2205fb088ed7,Continuous Supervised Descent Method for Facial Landmark Localisation,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+fdfd57d4721174eba288e501c0c120ad076cdca8,An Analysis of Action Recognition Datasets for Language and Vision Tasks,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b,Single Shot Temporal Action Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+fd33df02f970055d74fbe69b05d1a7a1b9b2219b,Single Shot Temporal Action Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+fdc0754852b9c8366341972f1b5b4320b48d64a9,Visual Relationship Detection with Internal and External Linguistic Knowledge Distillation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+fd15e397629e0241642329fc8ee0b8cd6c6ac807,Semi-Supervised Clustering with Neural Networks,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+fdfceb0fd9561723e604bed586bca9a8450c207e,Graph R-CNN for Scene Graph Generation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+fde41dc4ec6ac6474194b99e05b43dd6a6c4f06f,Multi-Expert Gender Classification on Age Group by Integrating Deep Neural Networks,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+fde5e4538967f325916c1f944242304466edb41d,Urban Vehicle Tracking Using a Combined 3D Model Detector and Classifier,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f,Deep Verification Learning,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+fd67d0efbd94c9d8f9d2f0a972edd7320bc7604f,Real-Time Semantic Clothing Segmentation,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+fde6d64175c459a26037a249e31c34cc0c9e3f7a,Unsupervised Learning of Monocular Depth Estimation and Visual Odometry with Deep Feature Reconstruction,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+fd36e838ffc2f56afdbd87a98f1dc4e05d20ed33,Robot-Centric Activity Recognition 'in the Wild',University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+fd53be2e0a9f33080a9db4b5a5e416e24ae8e198,Apparent Age Estimation Using Ensemble of Deep Learning Models,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+fd71ae9599e8a51d8a61e31e6faaaf4a23a17d81,Action Detection from a Robot-Car Perspective,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+fdabbcb3b49201a942fd36836563ef4ead86bc28,End-to-end learning potentials for structured attribute prediction,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+fd18475cf9165b33de1587a303fc68c5e77ed630,Visual Question Answering Using Various Methods,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+fd10b0c771a2620c0db294cfb82b80d65f73900d,Identifying The Most Informative Features Using A Structurally Interacting Elastic Net,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+fd10b0c771a2620c0db294cfb82b80d65f73900d,Identifying The Most Informative Features Using A Structurally Interacting Elastic Net,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+fd7b6c77b46420c27725757553fcd1fb24ea29a8,MEXSVMs: Mid-level Features for Scalable Action Recognition,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+fd7173634abac857405c78564e366c311a1cf4b3,Sliced-Wasserstein Autoencoder: An Embarrassingly Simple Generative Model,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e,Recognizing Frustration of Drivers From Face Video Recordings and Brain Activation Measurements With Functional Near-Infrared Spectroscopy,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3,Isyn Initialization Minimizing E ( Φ ) Analysis Synthesis Fitted model Redirection optical ow Warp eyelids Overlay eyeballs Stage 1 : Eye region tracking Stage 2 : Eye gaze redirection Input image Iobs New gaze target g ’ Iobs,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+fd892e912149e3f5ddd82499e16f9ea0f0063fa3,Isyn Initialization Minimizing E ( Φ ) Analysis Synthesis Fitted model Redirection optical ow Warp eyelids Overlay eyeballs Stage 1 : Eye region tracking Stage 2 : Eye gaze redirection Input image Iobs New gaze target g ’ Iobs,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+fdcc1e66697a724bd2d0d2da368de04a7eaf9209,The Devil is in the Decoder,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+fdcc1e66697a724bd2d0d2da368de04a7eaf9209,The Devil is in the Decoder,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+fdf8e293a7618f560e76bd83e3c40a0788104547,Interspecies Knowledge Transfer for Facial Keypoint Detection,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+fdf8e293a7618f560e76bd83e3c40a0788104547,Interspecies Knowledge Transfer for Facial Keypoint Detection,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+fdf8e293a7618f560e76bd83e3c40a0788104547,Interspecies Knowledge Transfer for Facial Keypoint Detection,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+fd451222670d2f185ae3211b5450fd6951e6af51,Surface Normals with Modular Approach and Weighted Voting Scheme in 3D Facial Expression Classification,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+fd451222670d2f185ae3211b5450fd6951e6af51,Surface Normals with Modular Approach and Weighted Voting Scheme in 3D Facial Expression Classification,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+f27b8b8f2059248f77258cf8595e9434cf0b0228,Deep Alignment Network: A Convolutional Neural Network for Robust Face Alignment,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+f2bc0ab0cdf34a1df441ed9678489cb810474c84,The Imaginary Part of Coherency in Autism: Differences in Cortical Functional Connectivity in Preschool Children,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+f2bc0ab0cdf34a1df441ed9678489cb810474c84,The Imaginary Part of Coherency in Autism: Differences in Cortical Functional Connectivity in Preschool Children,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+f2bc0ab0cdf34a1df441ed9678489cb810474c84,The Imaginary Part of Coherency in Autism: Differences in Cortical Functional Connectivity in Preschool Children,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+f22b157fb9f9963b21a82860cb47585556bd79d5,3 D GLOH Features for Human Action Recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+f22b157fb9f9963b21a82860cb47585556bd79d5,3 D GLOH Features for Human Action Recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+f22b157fb9f9963b21a82860cb47585556bd79d5,3 D GLOH Features for Human Action Recognition,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+f254cbfe9710de5e41589f8b7898112b06872ed2,DenseNet : Implementing Efficient ConvNet,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+f2da70f632db70eb42cf5bc5e2428f4bc53909ad,Association of Genetic Variation in the Promoter Region of OXTR with Differences in Social Affective Neural Processing,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f2da70f632db70eb42cf5bc5e2428f4bc53909ad,Association of Genetic Variation in the Promoter Region of OXTR with Differences in Social Affective Neural Processing,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f2da70f632db70eb42cf5bc5e2428f4bc53909ad,Association of Genetic Variation in the Promoter Region of OXTR with Differences in Social Affective Neural Processing,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f2eb8b38e5366dd98350af304c678c42d858017c,Support Neighbor Loss for Person Re-Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f2eb8b38e5366dd98350af304c678c42d858017c,Support Neighbor Loss for Person Re-Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f2843da00dc202eb8748b1b690f7b5dd0849af20,Regularized Bayesian Metric Learning for Person Re-identification,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+f2843da00dc202eb8748b1b690f7b5dd0849af20,Regularized Bayesian Metric Learning for Person Re-identification,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e,Lattice Long Short-Term Memory for Human Action Recognition,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e,Lattice Long Short-Term Memory for Human Action Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+f22d6d59e413ee255e5e0f2104f1e03be1a6722e,Lattice Long Short-Term Memory for Human Action Recognition,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,Analysis of Facial Images across Age Progression by Humans,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,Analysis of Facial Images across Age Progression by Humans,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,Analysis of Facial Images across Age Progression by Humans,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+f2b5177d7c4f568295f6c2b9e02078e36d9ed286,Challenges on Large Scale Surveillance Video Analysis,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+f2b5177d7c4f568295f6c2b9e02078e36d9ed286,Challenges on Large Scale Surveillance Video Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+f2b5177d7c4f568295f6c2b9e02078e36d9ed286,Challenges on Large Scale Surveillance Video Analysis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+f2b5177d7c4f568295f6c2b9e02078e36d9ed286,Challenges on Large Scale Surveillance Video Analysis,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+f2b13946d42a50fa36a2c6d20d28de2234aba3b4,Adaptive facial expression recognition using inter-modal top-down context,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+f2b13946d42a50fa36a2c6d20d28de2234aba3b4,Adaptive facial expression recognition using inter-modal top-down context,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+f2c30594d917ea915028668bc2a481371a72a14d,Scene Understanding Using Internet Photo Collections,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+f27bdc4f7ec2006425f999055df071d64640836e,Preserved Crossmodal Integration of Emotional Signals in Binge Drinking,Universitat de València,Universitat de València,"Campus dels Tarongers, Plaza de Manuel Broseta i Pont, Ciutat Jardí, Algirós, València, Comarca de València, València / Valencia, Comunitat Valenciana, 46022, España",39.47787665,-0.34257711,edu,
+f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+f2ad9b43bac8c2bae9dea694f6a4e44c760e63da,A Study on Illumination Invariant Face Recognition Methods Based on Multiple Eigenspaces,North Dakota State University,North Dakota State University,"North Dakota State University, 15th Avenue North, Fargo, Cass County, North Dakota, 58102, USA",46.89715500,-96.81827603,edu,
+f2977284cc3c6653df957d886101cc485de1a9f9,Learning Robust Objective Functions with Application to Face Model Fitting,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+f2977284cc3c6653df957d886101cc485de1a9f9,Learning Robust Objective Functions with Application to Face Model Fitting,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+f26b3a916aaa50fe6ef554fff744559815ccf954,Serotonin transporter genotype impacts amygdala habituation in youth with autism spectrum disorders.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f26b3a916aaa50fe6ef554fff744559815ccf954,Serotonin transporter genotype impacts amygdala habituation in youth with autism spectrum disorders.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f2d07a77711a8d74bbfa48a0436dae18a698b05a,Composite Statistical Learning and Inference for Semantic Segmentation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+f2d07a77711a8d74bbfa48a0436dae18a698b05a,Composite Statistical Learning and Inference for Semantic Segmentation,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+f2d07a77711a8d74bbfa48a0436dae18a698b05a,Composite Statistical Learning and Inference for Semantic Segmentation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+f2d07a77711a8d74bbfa48a0436dae18a698b05a,Composite Statistical Learning and Inference for Semantic Segmentation,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+f257300b2b4141aab73f93c146bf94846aef5fa1,Eigen Evolution Pooling for Human Action Recognition,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+f249c266321d661ae398c26ddb8c7409f6455ba1,Revisiting Faster R-CNN: A Deeper Look at Region Proposal Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f2e9616577a0eb866e78e6fd68c67809e4fce11c,Digital innovations in L 2 motivation : Harnessing the power of the Ideal L 2 Self,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+f2e9616577a0eb866e78e6fd68c67809e4fce11c,Digital innovations in L 2 motivation : Harnessing the power of the Ideal L 2 Self,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+f2abaa1476fe1f00358f3eaa77dde2f348f58982,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+f2abaa1476fe1f00358f3eaa77dde2f348f58982,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+f2abaa1476fe1f00358f3eaa77dde2f348f58982,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+f22a8c28a6de723e5451ce577a3ef8dfb26f5e2a,A CNN-Based Method of Vehicle Detection from Aerial Images Using Hard Example Mining,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+f2e70cc1603100548df96eef6cd9e28c547801b8,Submodular Optimzation via Reinforcement Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+f28b7d62208fdaaa658716403106a2b0b527e763,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+f28b7d62208fdaaa658716403106a2b0b527e763,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+f28b7d62208fdaaa658716403106a2b0b527e763,Clustering-driven Deep Embedding with Pairwise Constraints,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+f28b7d62208fdaaa658716403106a2b0b527e763,Clustering-driven Deep Embedding with Pairwise Constraints,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,Low-Rank-Sparse Subspace Representation for Robust Regression,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,Low-Rank-Sparse Subspace Representation for Robust Regression,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+f5149fb6b455a73734f1252a96a9ce5caa95ae02,Low-Rank-Sparse Subspace Representation for Robust Regression,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+f524b1aac4f2a29dab45d7e8726517798dbc9782,Anger superiority effect: The importance of dynamic emotional facial expressions,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+f524b1aac4f2a29dab45d7e8726517798dbc9782,Anger superiority effect: The importance of dynamic emotional facial expressions,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+f58d584c4ac93b4e7620ef6e5a8f20c6f6da295e,Feature Selection Guided Auto-Encoder,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f5eb0cf9c57716618fab8e24e841f9536057a28a,Rethinking Feature Distribution for Loss Functions in Image Classification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f55deed4fa5d6d806790610dad9cf7505c1adde8,Goal Driven Detection in Natural Scenes Anonymous,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+f571fe3f753765cf695b75b1bd8bed37524a52d2,Submodular Attribute Selection for Action Recognition in Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f571fe3f753765cf695b75b1bd8bed37524a52d2,Submodular Attribute Selection for Action Recognition in Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f571fe3f753765cf695b75b1bd8bed37524a52d2,Submodular Attribute Selection for Action Recognition in Video,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+f5fba67fa306d8692525c6f9d034ea6e99ad17f7,Vision-Based Intersection Monitoring : Behavior Analysis & Safety Issues,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+f53b8e719dbbdacf7365e4a0e5ecae875d00c3a9,StarGAN: Unified Generative Adversarial Networks for Multi-Domain Image-to-Image Translation,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+f54d9dbad1f60de83485232707c945f209af867e,Vision as an Interlingua: Learning Multilingual Semantic Embeddings of Untranscribed Speech,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+f5fae7810a33ed67852ad6a3e0144cb278b24b41,Multilingual Gender Classification with Multi-view Deep Learning: Notebook for PAN at CLEF 2018,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f5af4e9086b0c3aee942cb93ece5820bdc9c9748,Enhancing Person Annotation,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+f565ca9590820c341f1d29084e2d54ae490ffd41,Improving Deep Learning with Generic Data Augmentation,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+f565ca9590820c341f1d29084e2d54ae490ffd41,Improving Deep Learning with Generic Data Augmentation,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+f5db05adb6e89986d9ae2da0b81e1ce7c8efd9ba,Making Archetypal Analysis Practical,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f563ef1a0fd024edb91a889b17b64aca84624be6,Gait-Based Pedestrian Detection for Automated Surveillance,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+f5aee1529b98136194ef80961ba1a6de646645fe,Large-scale learning of discriminative image representations,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+f52efc206432a0cb860155c6d92c7bab962757de,Mugshot Database Acquisition in Video Surveillance Networks Using Incremental Auto-Clustering Quality Measures,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+f568eff0b3d8b9ae527e6b4483e2bc2ce5fd01bb,Multi-context Attention for Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+f568eff0b3d8b9ae527e6b4483e2bc2ce5fd01bb,Multi-context Attention for Human Pose Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f57364601b020dccca729c967b11c4a5da43f3f6,Robust Learning from Normals for 3D Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+f57364601b020dccca729c967b11c4a5da43f3f6,Robust Learning from Normals for 3D Face Recognition,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+f519723238701849f1160d5a9cedebd31017da89,Impact of multi-focused images on recognition of soft biometric traits,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+f5350ef1d45574e33f5b0f1c013a5bb00e1b1c55,Decoding Strategies for Neural Referring Expression Generation,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+f523c55a0a8057c5b08add761353ca79946feb07,Visual Translation Embedding Network for Visual Relation Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+f5eb411217f729ad7ae84bfd4aeb3dedb850206a,Tackling Low Resolution for Better Scene Understanding,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+f58ee95c2c4bdb1432e15d981dcbdb2038a55184,Multi-View Clustering via Deep Matrix Factorization,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f58ee95c2c4bdb1432e15d981dcbdb2038a55184,Multi-View Clustering via Deep Matrix Factorization,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f50b6aba0254809ba83c55d2b144508007c23c58,Online learning of robust object detectors during unstable tracking,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f5af3c28b290dc797c499283e2d0662570f9ed02,GenLR-Net : Deep framework for very low resolution face and object recognition with generalization to unseen categories,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+e3fae8109ff2f91ebfa1bced01452a3998c40ade,Kernel-Based Nonparametric Fisher Classifier for Hyperspectral Remote Sensing Imagery,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+e35b09879a7df814b2be14d9102c4508e4db458b,Optimal Sensor Placement and Enhanced Sparsity for Classification,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e35b09879a7df814b2be14d9102c4508e4db458b,Optimal Sensor Placement and Enhanced Sparsity for Classification,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e3b324101157daede3b4d16bdc9c2388e849c7d4,"Robust Real-Time 3 D Face Tracking from RGBD Videos under Extreme Pose , Depth , and Expression Variations",Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+e344cfff1ec2a46e230983157ef34efba5d65340,What makes an Image Iconic? A Fine-Grained Case Study,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+e3faabdc800d2400f072eb5b48e9ad6dc94d7625,Locally Linear Embedded Eigenspace Analysis,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+e37f0b9dadc0bc6dc56ab0fb2c348dcca436bcc0,Preschool negative emotionality predicts activity and connectivity of the fusiform face area and amygdala in later childhood,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+e37f0b9dadc0bc6dc56ab0fb2c348dcca436bcc0,Preschool negative emotionality predicts activity and connectivity of the fusiform face area and amygdala in later childhood,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+e3cc5d86b2032d01c1b40de0da3b7f4458c9c0ee,Globally Consistent Multi-People Tracking using Motion Patterns,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+e3f0e0dbc8e14e3dfb8fe9f9ecf6dab2c4713823,Human Action Recognition Based on Oriented Motion Salient Regions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+e3f0e0dbc8e14e3dfb8fe9f9ecf6dab2c4713823,Human Action Recognition Based on Oriented Motion Salient Regions,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+e3c011d08d04c934197b2a4804c90be55e21d572,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+e3c011d08d04c934197b2a4804c90be55e21d572,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+e3c011d08d04c934197b2a4804c90be55e21d572,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+e3c433ab9608d7329f944552ba1721e277a42d74,Transferring Rich Feature Hierarchies for Robust Visual Tracking,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+e3c433ab9608d7329f944552ba1721e277a42d74,Transferring Rich Feature Hierarchies for Robust Visual Tracking,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e3bb83684817c7815f5005561a85c23942b1f46b,Face Verification using Correlation Filters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e3bb83684817c7815f5005561a85c23942b1f46b,Face Verification using Correlation Filters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e3bb83684817c7815f5005561a85c23942b1f46b,Face Verification using Correlation Filters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e3a7fca5f94d85814b600e870b90259eefedaf6e,Composable Unpaired Image to Image Translation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+e3a7fca5f94d85814b600e870b90259eefedaf6e,Composable Unpaired Image to Image Translation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+e3e2c106ccbd668fb9fca851498c662add257036,"Appearance, context and co-occurrence ensembles for identity recognition in personal photo collections",University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+e3b20cf421812dc96477a2074d0bb1ee83e6c98b,Mapping Urban Tree Species Using Very High Resolution Satellite Imagery: Comparing Pixel-Based and Object-Based Approaches,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+e379e73e11868abb1728c3acdc77e2c51673eb0d,Face Databases,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa,Weakly Supervised Learning for Unconstrained Face Processing,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+e39a66a6d1c5e753f8e6c33cd5d335f9bc9c07fa,Weakly Supervised Learning for Unconstrained Face Processing,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+e3a6e9ddbbfc4c5160082338d46808cea839848a,Vision-Based Classification of Developmental Disorders Using Eye-Movements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e3a6e9ddbbfc4c5160082338d46808cea839848a,Vision-Based Classification of Developmental Disorders Using Eye-Movements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e3a6e9ddbbfc4c5160082338d46808cea839848a,Vision-Based Classification of Developmental Disorders Using Eye-Movements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e3a6e9ddbbfc4c5160082338d46808cea839848a,Vision-Based Classification of Developmental Disorders Using Eye-Movements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e3a6e9ddbbfc4c5160082338d46808cea839848a,Vision-Based Classification of Developmental Disorders Using Eye-Movements,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e34840e4b952444d291619c784cb1f02dfae1e1d,Label Efficient Learning of Transferable Representations across Domains and Tasks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e34840e4b952444d291619c784cb1f02dfae1e1d,Label Efficient Learning of Transferable Representations across Domains and Tasks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e35f4238bbc6c4acf4fce9591fa5cebf64fd0c2e,2017 / 2018 Mini-Project Creating Spaces that Understand People Employing Sensor Technologies to Inform the Design and Operation of Human-centred Spaces,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+e35f4238bbc6c4acf4fce9591fa5cebf64fd0c2e,2017 / 2018 Mini-Project Creating Spaces that Understand People Employing Sensor Technologies to Inform the Design and Operation of Human-centred Spaces,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db,Families in the Wild (FIW): Large-Scale Kinship Image Database and Benchmarks,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+e3c8e49ffa7beceffca3f7f276c27ae6d29b35db,Families in the Wild (FIW): Large-Scale Kinship Image Database and Benchmarks,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+e33b5e91eb12ee3d7a5d134669994cbde6673df9,Automatic learning of British Sign Language from signed TV broadcasts,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e37c8e2823cc3429caca4420f19adf329c62d313,Unsupervised Learning for Large-Scale Fiber Detection and Tracking in Microscopic Material Images,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+e38c7d4f8a4399f402ab6bb364ec662fe897bed1,"PReMVOS: Proposal-generation, Refinement and Merging for Video Object Segmentation",RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+e3e98557ece5247661d849dc2d168f7498209e59,Learning Feature Hierarchies for Object Recognition,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+e3906b524a18cfa329c20cc422de78ed66d05f01,The Facial Appearance of CEOs: Faces Signal Selection but Not Performance,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+e3906b524a18cfa329c20cc422de78ed66d05f01,The Facial Appearance of CEOs: Faces Signal Selection but Not Performance,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+e38371b69be4f341baa95bc854584e99b67c6d3a,DYAN: A Dynamical Atoms-Based Network for Video Prediction,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+e351beaa000aa5875d00fef12eb14e1cb91530bf,Learning Pain from Action Unit Combinations: A Weakly Supervised Approach via Multiple Instance Learning,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+e351beaa000aa5875d00fef12eb14e1cb91530bf,Learning Pain from Action Unit Combinations: A Weakly Supervised Approach via Multiple Instance Learning,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+e351beaa000aa5875d00fef12eb14e1cb91530bf,Learning Pain from Action Unit Combinations: A Weakly Supervised Approach via Multiple Instance Learning,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+e328d19027297ac796aae2470e438fe0bd334449,Automatic Micro-expression Recognition from Long Video Using a Single Spotted Apex,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+e328d19027297ac796aae2470e438fe0bd334449,Automatic Micro-expression Recognition from Long Video Using a Single Spotted Apex,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+e328d19027297ac796aae2470e438fe0bd334449,Automatic Micro-expression Recognition from Long Video Using a Single Spotted Apex,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+e3e36ccd836458d51676789fb133b092d42dac16,Deep learning prototype domains for person re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+e3a6e5a573619a97bd6662b652ea7d088ec0b352,Compare and Contrast: Learning Prominent Visual Differences,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+e3b3ab8ccb2c2998e4a6f326a4d4ac5f9b99dc7b,Video2Shop: Exact Matching Clothes in Videos to Online Shopping Images,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+cfc14272b915828a232e29dfc2099f842b144974,Challenging Images For Minds and Machines,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+cfeb26245b57dd10de8f187506d4ed5ce1e2b7dd,CapsNet comparative performance evaluation for image classification,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+cff0e53006c6145d96322e6401e840f405b6ed02,Guest Editorial: Apparent Personality Analysis,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+cf18432bb77bf41377c477b5aaab9abd0f1f306c,ReabsNet: Detecting and Revising Adversarial Examples,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+cf393385803f4a8501d0690250c848c7149338ac,A Neural Multi-sequence Alignment TeCHnique (NeuMATCH),University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+cf94200a476dc15d6da95db809349db4cfd8e92c,Leveraging Motion Priors in Videos for Improving Human Segmentation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+cf4e94d0337744280da87ff351412bbe702af2b7,An Informed Framework for Training Classifiers from Social Media,Hankuk University of Foreign Studies,Hankuk University of Foreign Studies,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.59539790,127.06304990,edu,
+cffebdf88e406c27b892857d1520cb2d7ccda573,Learning from Large-scale Visual Data for Robots,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+cfb1b2006d24a81bc3f489ca0eb391e7f03788d6,Nonlinear 3D Face Morphable Model,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+cf80b4f78e639504cbf056f29bc1efecf31b1bb2,Joint Flow: Temporal Flow Fields for Multi Person Tracking,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+cf54d15a176ac0d8e30eb0af2fdbb3a9908064f8,Implicit models for automatic pose estimation in static images,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+cf54d15a176ac0d8e30eb0af2fdbb3a9908064f8,Implicit models for automatic pose estimation in static images,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+cf75d967bb47e1085fd120d8373e32db835d515b,Pictorial Human Spaces: How Well Do Humans Perceive a 3D Articulated Pose?,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+cfd700cb28529a9119824389451ddde9c041275e,Sub-Selective Quantization for Large-Scale Image Search,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+cfd700cb28529a9119824389451ddde9c041275e,Sub-Selective Quantization for Large-Scale Image Search,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,Towards Arbitrary-View Face Alignment by Recommendation Trees,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+cf5c9b521c958b84bb63bea9d5cbb522845e4ba7,Towards Arbitrary-View Face Alignment by Recommendation Trees,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+cfa931e6728a825caada65624ea22b840077f023,Deformable Generator Network: Unsupervised Disentanglement of Appearance and Geometry,Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.67684917,edu,
+cf814b618fcbc9a556cdce225e74a8806867ba84,Facial Expression Recognition Using 3D Facial Feature Distances,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+cff911786b5ac884bb71788c5bc6acf6bf569eff,Multi-task Learning of Cascaded CNN for Facial Attribute Classification,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+cf528f9fe6588b71efa94c219979ce111fc9c1c9,On Evaluation of 6D Object Pose Estimation,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+cf09e2cb82961128302b99a34bff91ec7d198c7c,Office Entrance Control with Face Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+cf09e2cb82961128302b99a34bff91ec7d198c7c,Office Entrance Control with Face Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29,"Searching Video , Detecting Events and Describing Video",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29,"Searching Video , Detecting Events and Describing Video",Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+cfc4aa456d9da1a6fabd7c6ca199332f03e35b29,"Searching Video , Detecting Events and Describing Video","Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+cf64cdc889a4edaf641a307aa2b11d89d4d10a09,High-performance and energy-efficient mobile web browsing on big/little systems,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab,Quaero at TRECVID 2010: Semantic Indexing,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba,Face Recognition Using Histogram-based Features in Spatial and Frequency Domains,Kogakuin University,Kogakuin University,"工学院大学, 東通り, 新宿区, 東京都, 関東地方, 163-8677, 日本",35.69027840,139.69540096,edu,
+ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba,Face Recognition Using Histogram-based Features in Spatial and Frequency Domains,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+ca50b25eaad0c9146fc5a4a2cd4c472c77b970ba,Face Recognition Using Histogram-based Features in Spatial and Frequency Domains,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+ca11dc3a8064583aaf79061866bbcf04caece162,Disentangled Representations in Neural Models,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+caab1c1d53718315f54bc4df42eb9a727fa18483,"Show, Tell and Discriminate: Image Captioning by Self-retrieval with Partially Labeled Data",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ca42b7f881437976a6c60de0229ebbf31b58c3bd,Learn the Distribution ? S Ome T Heory and E Mpirics,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+ca42b7f881437976a6c60de0229ebbf31b58c3bd,Learn the Distribution ? S Ome T Heory and E Mpirics,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+ca42b7f881437976a6c60de0229ebbf31b58c3bd,Learn the Distribution ? S Ome T Heory and E Mpirics,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+ca8c296c5c74d351d866ac317d9680626b0bc6a7,LiveBot: Generating Live Video Comments Based on Visual and Textual Contexts,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+caa2ded6d8d5de97c824d29b0c7a18d220c596c8,Learning to Segment Breast Biopsy Whole Slide Images,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+caa2ded6d8d5de97c824d29b0c7a18d220c596c8,Learning to Segment Breast Biopsy Whole Slide Images,University of Vermont,University of Vermont,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.48116865,-73.20021790,edu,
+ca22c95ccea3e5ceaf95956811cb507af1bdd672,Electroconvulsive therapy selectively enhanced feedforward connectivity from fusiform face area to amygdala in major depressive disorder,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+ca4580c5c5d8475801de42e493c5f97096677927,Face Metamorphosis and Face Caricature: A User’s Guide,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+cabc9a1fef57fb2cad91bdb0a84e18934ee5bdbe,Virtual to Real Reinforcement Learning for Autonomous Driving,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+cabc9a1fef57fb2cad91bdb0a84e18934ee5bdbe,Virtual to Real Reinforcement Learning for Autonomous Driving,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+cabc9a1fef57fb2cad91bdb0a84e18934ee5bdbe,Virtual to Real Reinforcement Learning for Autonomous Driving,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+cabc9a1fef57fb2cad91bdb0a84e18934ee5bdbe,Virtual to Real Reinforcement Learning for Autonomous Driving,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+ca56ac26cd7e1fdc35033228b4936bf70a090825,Score Level Fusion of Ear and Face Local 3D Features for Fast and Expression-Invariant Human Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+caf912b716905ccbf46d6d00d6a0b622834a7cd9,Measuring Machine Intelligence Through Visual Question Answering,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ca54d0a128b96b150baef392bf7e498793a6371f,Improve Pedestrian Attribute Classification by Weighted Interactions from Other Attributes,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ca283c6fdf43b7ad59949207834a6a573381a9c9,Facial Identity Recognition in the Broader Autism Phenotype,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+ca283c6fdf43b7ad59949207834a6a573381a9c9,Facial Identity Recognition in the Broader Autism Phenotype,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+ca6b78e3d12134e12305fa4bcdf050ac102781df,OCNet: Object Context Network for Scene Parsing,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+cada850299f0aa71ecd9b37a2496802ad8d48455,Cost-effective conceptual design using taxonomies,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+cada850299f0aa71ecd9b37a2496802ad8d48455,Cost-effective conceptual design using taxonomies,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+ca1c710c14f95c3b0cf027fb068d53d595809a5c,Two-Stage Synthesis Networks for Transfer Learning in Machine Comprehension,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ca1c710c14f95c3b0cf027fb068d53d595809a5c,Two-Stage Synthesis Networks for Transfer Learning in Machine Comprehension,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+e48fb3ee27eef1e503d7ba07df8eb1524c47f4a6,Illumination invariant face recognition and impostor rejection using different MINACE filter algorithms,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e483f644eb20d79402bab1f5d96025598e101f82,Heterogeneous Multilayer Generalized Operational Perceptron,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+e483f644eb20d79402bab1f5d96025598e101f82,Heterogeneous Multilayer Generalized Operational Perceptron,Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+e4433daf01a4e55ffca764c1e161f83552db081f,Exposure Is Not Enough: Suppressing Stimuli from Awareness Can Abolish the Mere Exposure Effect,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+e4bc529ced68fae154e125c72af5381b1185f34e,Perceptual Goal Specifications for Reinforcement Learning,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+e4450b61f1ccbe5bbec1e777baad5dd69fd6edbe,Neuro-IoU: Learning a Surrogate Loss for Semantic Segmentation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+e4183e539b90ac02f55ccf16eb154bc269576290,The Unusual Effectiveness of Averaging in GAN Training,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+e4183e539b90ac02f55ccf16eb154bc269576290,The Unusual Effectiveness of Averaging in GAN Training,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+e4183e539b90ac02f55ccf16eb154bc269576290,The Unusual Effectiveness of Averaging in GAN Training,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+e483482e19b022a1cd7081dc2757bb8a85774ed7,"HOAI, LADICKÝ, ZISSERMAN: ACTION FROMWEAK ALIGNMENT OF BODY PARTS 1 Action Recognition From Weak Alignment of Body Parts",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e483482e19b022a1cd7081dc2757bb8a85774ed7,"HOAI, LADICKÝ, ZISSERMAN: ACTION FROMWEAK ALIGNMENT OF BODY PARTS 1 Action Recognition From Weak Alignment of Body Parts",Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+e483482e19b022a1cd7081dc2757bb8a85774ed7,"HOAI, LADICKÝ, ZISSERMAN: ACTION FROMWEAK ALIGNMENT OF BODY PARTS 1 Action Recognition From Weak Alignment of Body Parts",ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+e42d055d59f6b5b0bf677975d21544aad26a5417,Learning to Appreciate the Aesthetic Effects of Clothing,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+e42d055d59f6b5b0bf677975d21544aad26a5417,Learning to Appreciate the Aesthetic Effects of Clothing,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+e42998bbebddeeb4b2bedf5da23fa5c4efc976fa,Generic Active Appearance Models Revisited,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+e42998bbebddeeb4b2bedf5da23fa5c4efc976fa,Generic Active Appearance Models Revisited,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+e4b88898d8ac1086e82ecc2fba82fb174bf9adaa,PacGAN: The power of two samples in generative adversarial networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e4a6d0ce979c2067c6d0aec9e7a22113b8d3b7d7,Fine-grained Activity Recognition in Baseball Videos,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+e4df83b7424842ff5864c10fa55d38eae1c45fac,Locally Linear Discriminate Embedding for Face Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+e459158c2217904d5fe9a409896bd49622f17ebe,Face Video Retrieval via Deep Learning of Binary Hash Representations,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+e459158c2217904d5fe9a409896bd49622f17ebe,Face Video Retrieval via Deep Learning of Binary Hash Representations,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+e4cb27d2a3e1153cb517d97d61de48ff0483c988,Viktoria Plemakova Vehicle Detection Based on Convolutional Neural Networks,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+e4e3faa47bb567491eaeaebb2213bf0e1db989e1,Empirical Risk Minimization for Metric Learning Using Privileged Information,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+e4e3faa47bb567491eaeaebb2213bf0e1db989e1,Empirical Risk Minimization for Metric Learning Using Privileged Information,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+e405c59d9e13c4d72050535f00cd3696ac004740,Robust Estimation via Robust Gradient Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e4236c286787cc608ec42abba2e51eb36f108b14,Deep Word Embeddings for Visual Speech Recognition,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+e4444820fb3f6d1f41c6ea51c6b2ab8ceb04a3a5,View-Driven Deduplication with Active Learning,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e43045a061421bd79713020bc36d2cf4653c044d,A New Representation of Skeleton Sequences for 3D Action Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+e417e88c13e0f3d5bbd02e6682823b0514f4bc78,Deep Bi-Dense Networks for Image Super-Resolution,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+fe9a6a93af9c32f6b0454a7cf6897409124514bd,Designing a smart card face verification system,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+fe9a6a93af9c32f6b0454a7cf6897409124514bd,Designing a smart card face verification system,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+fef3efeffade0e39f2c279653b4785b372be410e,Near infrared face recognition: A literature survey,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+fef3efeffade0e39f2c279653b4785b372be410e,Near infrared face recognition: A literature survey,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+fe5c43aa19da5cbbf5a42e4697659875f7389b91,Tracking People in Broadcast Sports,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+febff0f6faa8dde77848845e4b3e6f6c91180d33,Embedding Deep Metric for Person Re-identification: A Study Against Large Variations,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+febff0f6faa8dde77848845e4b3e6f6c91180d33,Embedding Deep Metric for Person Re-identification: A Study Against Large Variations,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+febff0f6faa8dde77848845e4b3e6f6c91180d33,Embedding Deep Metric for Person Re-identification: A Study Against Large Variations,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+fe68d6fe52df8c28f7cf81b338c491e5bac6e33c,SCAN: Self-and-Collaborative Attention Network for Video Person Re-identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+fe68d6fe52df8c28f7cf81b338c491e5bac6e33c,SCAN: Self-and-Collaborative Attention Network for Video Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+fe60d81f726c8e20948b927b456a94a96d78fa26,"Multimodal Utterance-level Affect Analysis using Visual, Audio and Text Features",Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+fe60d81f726c8e20948b927b456a94a96d78fa26,"Multimodal Utterance-level Affect Analysis using Visual, Audio and Text Features","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+fe5df5fe0e4745d224636a9ae196649176028990,Using Context to Enhance the Understanding of Face Images,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+fe5df5fe0e4745d224636a9ae196649176028990,Using Context to Enhance the Understanding of Face Images,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+feb4367aafc60159c8dedcaba2d5a66fdd64066c,Explaining unexplainable food choices,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+fe961cbe4be0a35becd2d722f9f364ec3c26bd34,"Computer-based Tracking, Analysis, and Visualization of Linguistically Significant Nonmanual Events in American Sign Language (ASL)",Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+fe961cbe4be0a35becd2d722f9f364ec3c26bd34,"Computer-based Tracking, Analysis, and Visualization of Linguistically Significant Nonmanual Events in American Sign Language (ASL)",Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+feb5b8bf315a6b6222f62dd9533b1e0f891a27bd,The Nature and Consequences of Essentialist Beliefs About Race in Early Childhood.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+feb5b8bf315a6b6222f62dd9533b1e0f891a27bd,The Nature and Consequences of Essentialist Beliefs About Race in Early Childhood.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+fe8a8c4133698e4b68018d99c6a2bcec870c5464,A New Large Scale Dynamic Texture Dataset with Application to ConvNet Understanding,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+fecad388186269e3d8d71a75c42f56e661861c3e,Discovering Geo-Informative Attributes for Location Recognition and Exploration,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+fe5e1e869510d18d4c771b1fe924fca0a01f7222,Towards Energy-Efficient Mobile Sensing: Architectures and Frameworks for Heterogeneous Sensing and Computing,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+fe82d072a8d13cfefcd575db893f3374251f04a8,Multi-fiber Networks for Video Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+fe710adb0e9e647d7ede0583b40d2aeb36c1fc7f,Human Appearance Transfer,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+fe6409e8e09d47758d4e71981ad951423bdce212,Camera-based vehicle velocity estimation from monocular video,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+fec2a5a06a3aab5efe923a78d208ec747d5e4894,Generalizing to Unseen Domains via Adversarial Data Augmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+fec2a5a06a3aab5efe923a78d208ec747d5e4894,Generalizing to Unseen Domains via Adversarial Data Augmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+fec2a5a06a3aab5efe923a78d208ec747d5e4894,Generalizing to Unseen Domains via Adversarial Data Augmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+feeb0fd0e254f38b38fe5c1022e84aa43d63f7cc,Search Pruning with Soft Biometric Systems: Efficiency-Reliability Tradeoff,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+fe0cf8eaa5a5f59225197ef1bb8613e603cd96d4,Improved Face Verification with Simple Weighted Feature Combination,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+fed618637ac9d2fbdb0711f64ea752370dfaca61,Human Body Poses Recognition Using Neural Networks with Class Based Data Augmentation,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+fe95b902eb362ad39f91e2325300d3f7a9119c48,Modeling invariant object processing based on tight integration of simulated and empirical data in a Common Brain Space,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+fe95b902eb362ad39f91e2325300d3f7a9119c48,Modeling invariant object processing based on tight integration of simulated and empirical data in a Common Brain Space,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+c89ddb0e978b78c062fbf9ea992da83e4b38778e,2D and 3D Multimodal Hybrid Face Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+c8be6a59b1c29a1a44a0792985baf365298123e2,Visual Surveillance on DSP-Based Embedded Platforms,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+c8a5c5c8e1293b7e877a848b7a9e5426c5400651,FaceShop: Deep Sketch-based Face Image Editing,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c8e20a4981e907c77ccbfe6ae39673aa43249f41,Neuromorphic Hardware Accelerated Adaptive Authentication System,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+c87f7ee391d6000aef2eadb49f03fc237f4d1170,A real-time and unsupervised face Re-Identification system for Human-Robot Interaction,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+c8c714c100a754baf7d86d240ec35207fcf84b06,Person Re-identification Meets Image Search,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c8c714c100a754baf7d86d240ec35207fcf84b06,Person Re-identification Meets Image Search,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+c87035f4b5cdb8597db20e9dc319c2a06d752197,Learning Latent Subspaces in Variational Autoencoders,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c8ca6a2dc41516c16ea0747e9b3b7b1db788dbdd,Track Facial Points in Unconstrained Videos,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+c8292aa152a962763185e12fd7391a1d6df60d07,Camera Distance from Face Images,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+c8c83ab64d99b16ef3248cbeccc95f7049e324d5,Nearest Prime Simplicial Complex for Object Recognition,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+c8c83ab64d99b16ef3248cbeccc95f7049e324d5,Nearest Prime Simplicial Complex for Object Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c829be73584966e3162f7ccae72d9284a2ebf358,shuttleNet: A biologically-inspired RNN with loop connection and parameter sharing,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c829be73584966e3162f7ccae72d9284a2ebf358,shuttleNet: A biologically-inspired RNN with loop connection and parameter sharing,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+c87d5036d3a374c66ec4f5870df47df7176ce8b9,Temporal Dynamics of Natural Static Emotional Facial Expressions Decoding: A Study Using Event- and Eye Fixation-Related Potentials,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c88173aac29baa13d615c5be858290a14f0493c9,Generic Object Recognition with Local Receptive Fields Based Extreme Learning Machine,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+c8dc902b82831e1f1b587c590cdc34b5d12bdc5c,DeepFuse: A Deep Unsupervised Approach for Exposure Fusion with Extreme Exposure Image Pairs,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+c8a22550297a25dadd283089f009015bc0df5eed,Neural circuits in the brain that are activated when mitigating criminal sentences,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+c8da81fb5551941295ad815051d39dc461008751,Hybrid forests for left ventricle segmentation using only the first slice label,Moulay Ismail University,Moulay Ismail University,"Marjane 2, BP: 298، Meknes 50050, Morocco",33.85611100,-5.57439100,edu,Moulay Ismail University
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,Dictionaries for image and video-based face recognition [Invited].,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,Dictionaries for image and video-based face recognition [Invited].,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+c86ce9fc2bd5aea98869cf1f31d03e05e7ec672c,FOIL it! Find One mismatch between Image and Language caption,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+c8829013bbfb19ccb731bd54c1a885c245b6c7d7,Flexible Template and Model Matching Using Image Intensity,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c84b2cda2d645475f25d8b8f34b8f21ad3aa059c,Human Face Detection and Eye Localization in Video Using Wavelets,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+c8b4df94686ae4d308e859eddc0e00921a17fe75,GraphBit : Bitwise Interaction Mining via Deep Reinforcement Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd,High Resolution Face Completion with Multiple Controllable Attributes via Fully End-to-End Progressive Generative Adversarial Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd,High Resolution Face Completion with Multiple Controllable Attributes via Fully End-to-End Progressive Generative Adversarial Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd,High Resolution Face Completion with Multiple Controllable Attributes via Fully End-to-End Progressive Generative Adversarial Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd,High Resolution Face Completion with Multiple Controllable Attributes via Fully End-to-End Progressive Generative Adversarial Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+c88ce5ef33d5e544224ab50162d9883ff6429aa3,Face Match for Family Reunification: Real-World Face Image Retrieval,Central Washington University,Central Washington University,"Central Washington University, Dean Nicholson Boulevard, Ellensburg, Kittitas County, Washington, 98926, USA",47.00646895,-120.53673040,edu,
+c822bd0a005efe4ec1fea74de534900a9aa6fb93,Face recognition committee machines: dynamic vs. static structures,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c88c21eb9a8e08b66c981db35f6556f4974d27a8,Attribute Learning using Joint Human and Machine Computation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c88c21eb9a8e08b66c981db35f6556f4974d27a8,Attribute Learning using Joint Human and Machine Computation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c8d15a58794e4b383424d2d057a518689a278b8d,Field Effect Deep Networks for Image Recognition with Incomplete Data,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+c8d15a58794e4b383424d2d057a518689a278b8d,Field Effect Deep Networks for Image Recognition with Incomplete Data,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+c8d15a58794e4b383424d2d057a518689a278b8d,Field Effect Deep Networks for Image Recognition with Incomplete Data,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+c8c5944ec503744304e026284182fce26d74cd92,Pose Guided Visual Attention for Action Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+fbedfe317e60e5ec83c8fd0554bc345404ca90f5,Scene Graph Parsing as Dependency Parsing,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+fbedfe317e60e5ec83c8fd0554bc345404ca90f5,Scene Graph Parsing as Dependency Parsing,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+fb210da5526e967a6aaaa1a4cc1134fa0976ad11,DRPose3D: Depth Ranking in 3D Human Pose Estimation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+fb210da5526e967a6aaaa1a4cc1134fa0976ad11,DRPose3D: Depth Ranking in 3D Human Pose Estimation,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+fb210da5526e967a6aaaa1a4cc1134fa0976ad11,DRPose3D: Depth Ranking in 3D Human Pose Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+fb210da5526e967a6aaaa1a4cc1134fa0976ad11,DRPose3D: Depth Ranking in 3D Human Pose Estimation,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+fb210da5526e967a6aaaa1a4cc1134fa0976ad11,DRPose3D: Depth Ranking in 3D Human Pose Estimation,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+fb2cc3501fc89f92f5ee130d66e69854f8a9ddd1,Learning Discriminative Features via Label Consistent Neural Network,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+fba539b41786f837acc5e5d876aaa7c6f3fc376c,Neural Generative Models for 3D Faces with Application in 3D Texture Free Face Recognition,University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.16648580,-73.19205640,edu,
+fba539b41786f837acc5e5d876aaa7c6f3fc376c,Neural Generative Models for 3D Faces with Application in 3D Texture Free Face Recognition,University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.16648580,-73.19205640,edu,
+fbb6ee4f736519f7231830a8e337b263e91f06fe,Illumination Robust Facial Feature Detection via Decoupled Illumination and Texture Features,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+fb62fae47f2ccef2e11eefb112765cdbbe4f0400,Tensor-Variate Restricted Boltzmann Machines,Deakin University,Deakin University,"Deakin University, Pigdons Lane, Waurn Ponds, Geelong, City of Greater Geelong, Barwon South West, Victoria, 3216, Australia",-38.19928505,144.30365229,edu,
+fb62fae47f2ccef2e11eefb112765cdbbe4f0400,Tensor-Variate Restricted Boltzmann Machines,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+fb87045600da73b07f0757f345a937b1c8097463,Reflective Regression of 2D-3D Face Shape Across Large Pose,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+fb85867c989b9ee6b7899134136f81d6372526a9,Learning to Align Images using Weak Geometric Supervision,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+fb85867c989b9ee6b7899134136f81d6372526a9,Learning to Align Images using Weak Geometric Supervision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+fbfb0de017d57c5f282050dadb77797d97785ba5,Enabling EBGM Face Authentication on mobile devices,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+fb3bce3a6221eb65451584efa898ecbe211bdab6,Video to Text Summary: Joint Video Summarization and Captioning with Recurrent Neural Networks,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+fbd5c9bbfb43aa4734cde7863897600fd42eb8ff,Person Detection in the Restaurant of the Future,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb193923274c9b028254075c3b6decdae70b2ec0,Learning Social Image Embedding with Deep Multimodal Attention Networks,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a,Beauty and the Burst: Remote Identification of Encrypted Video Streams,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+fb54d3c37dc82891ff9dc7dd8caf31de00c40d6a,Beauty and the Burst: Remote Identification of Encrypted Video Streams,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+fba95853ca3135cc52a4b2bc67089041c2a9408c,Disguised Faces in the Wild,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+fba95853ca3135cc52a4b2bc67089041c2a9408c,Disguised Faces in the Wild,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+fb3844521f9719e4904e5d3d7e1e549e5881b1f4,An Event-Related Potential Study on the Effects of Cannabis on Emotion Processing.,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+fb3844521f9719e4904e5d3d7e1e549e5881b1f4,An Event-Related Potential Study on the Effects of Cannabis on Emotion Processing.,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+fb22404e46dd46b2c2cb9a85227a1ab6a8ae4f52,Micro-analytics for Student Performance Prediction Leveraging fine-grained learning analytics to predict performance,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+fb35a3dadbe6d9a1823eb12e33fccf9a3db3c2a2,Avoidant Responses to Interpersonal Provocation Are Associated with Increased Amygdala and Decreased Mentalizing Network Activity,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+fb084b1fe52017b3898c871514cffcc2bdb40b73,Illumination Normalization of Face Image Based on Illuminant Direction Estimation and Improved Retinex,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+fb084b1fe52017b3898c871514cffcc2bdb40b73,Illumination Normalization of Face Image Based on Illuminant Direction Estimation and Improved Retinex,University Politehnica Timisoara,University POLITEHNICA Timisoara,"UPT, Bulevardul Vasile Pârvan, Elisabetin, Timișoara, Timiș, 300223, România",45.74618900,21.22755075,edu,
+ed74afbd3e36f0fdf54da1e4fcb773c21b5de9b9,An Overview of Computational Approaches for Analyzing Interpretation,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+ed0cf5f577f5030ac68ab62fee1cf065349484cc,Revisiting data normalization for appearance-based gaze estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+ed0cf5f577f5030ac68ab62fee1cf065349484cc,Revisiting data normalization for appearance-based gaze estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+eddc4989cdb20c8cdfb22e989bdb2cb9031d0439,Binge Watching: Scaling Affordance Learning from Sitcoms,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+edde81b2bdd61bd757b71a7b3839b6fef81f4be4,Part Localization using Multi-Proposal Consensus for Fine-Grained Categorization,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+eda6da71c261df17b4b9da5e72aad7893a871a84,Moonshine: Distilling with Cheap Convolutions,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+eda6da71c261df17b4b9da5e72aad7893a871a84,Moonshine: Distilling with Cheap Convolutions,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+eda6da71c261df17b4b9da5e72aad7893a871a84,Moonshine: Distilling with Cheap Convolutions,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+edf98a925bb24e39a6e6094b0db839e780a77b08,Simplex Representation for Subspace Clustering,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+ed173a39f4cd980eef319116b6ba39cec1b37c42,Associative Embedding: End-to-End Learning for Joint Detection and Grouping,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+ed173a39f4cd980eef319116b6ba39cec1b37c42,Associative Embedding: End-to-End Learning for Joint Detection and Grouping,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ed173a39f4cd980eef319116b6ba39cec1b37c42,Associative Embedding: End-to-End Learning for Joint Detection and Grouping,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+ed9d11e995baeec17c5d2847ec1a8d5449254525,Efficient Gender Classification Using a Deep LDA-Pruned Net,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+edb5813a32ce1167feb263ca2803d0ae934d902c,Invisible Steganography via Generative Adversarial Networks,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+ed432ecd59021a96d8995269a34678c4c2774507,End-to-end Learning of Multi-sensor 3D Tracking by Detection,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ed07856461da6c7afa4f1782b5b607b45eebe9f6,D Morphable Models as Spatial Transformer Networks,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+ed07856461da6c7afa4f1782b5b607b45eebe9f6,D Morphable Models as Spatial Transformer Networks,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+ed2f711cf9bcd9d7ab039d746af109ed9573421a,Pixel-Wise Classification Method for High Resolution Remote Sensing Imagery Using Deep Neural Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ed2f711cf9bcd9d7ab039d746af109ed9573421a,Pixel-Wise Classification Method for High Resolution Remote Sensing Imagery Using Deep Neural Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+ed7f167c84372512dcbf9dd38d39879edde6819e,Iterative Visual Reasoning Beyond Convolutions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+eda8796530fd9ba23b39d50cf349fee01ccee144,Interactive Sketch-Driven Image Synthesis,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+ed1886e233c8ecef7f414811a61a83e44c8bbf50,Deep Alignment Network: A Convolutional Neural Network for Robust Face Alignment,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+edd7504be47ebc28b0d608502ca78c0aea6a65a2,Recurrent Residual Learning for Action Recognition,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+ed388878151a3b841f95a62c42382e634d4ab82e,DenseImage Network: Video Spatial-Temporal Evolution Encoding and Understanding,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ed388878151a3b841f95a62c42382e634d4ab82e,DenseImage Network: Video Spatial-Temporal Evolution Encoding and Understanding,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+edbb8cce0b813d3291cae4088914ad3199736aa0,Efficient Subspace Segmentation via Quadratic Programming,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+edbb8cce0b813d3291cae4088914ad3199736aa0,Efficient Subspace Segmentation via Quadratic Programming,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+edbb8cce0b813d3291cae4088914ad3199736aa0,Efficient Subspace Segmentation via Quadratic Programming,Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.84909214,edu,
+edff76149ec44f6849d73f019ef9bded534a38c2,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+edff76149ec44f6849d73f019ef9bded534a38c2,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+edff76149ec44f6849d73f019ef9bded534a38c2,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+edff76149ec44f6849d73f019ef9bded534a38c2,Privacy-Preserving Visual Learning Using Doubly Permuted Homomorphic Encryption,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ed96f2eb1771f384df2349879970065a87975ca7,Adversarial Attacks on Face Detectors using Neural Net based Constrained Optimization,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ed96f2eb1771f384df2349879970065a87975ca7,Adversarial Attacks on Face Detectors using Neural Net based Constrained Optimization,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c180f22a9af4a2f47a917fd8f15121412f2d0901,Facial Expression Recognition by ICA with Selective Prior,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+c1a18684feeb2b966e2f03c2622f9a702e14204c,Eye Detection using Wavelets and ANN,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+c1a18684feeb2b966e2f03c2622f9a702e14204c,Eye Detection using Wavelets and ANN,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+c146aa6d56233ce700032f1cb179700778557601,3D Morphable Models as Spatial Transformer Networks,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+c146aa6d56233ce700032f1cb179700778557601,3D Morphable Models as Spatial Transformer Networks,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c1f07ec629be1c6fe562af0e34b04c54e238dcd1,A Novel Facial Feature Localization Method Using Probabilistic-like Output,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+c1bf99570889a43ba2b16e6141b365d74608973d,Comparing social attention in autism and amygdala lesions: effects of stimulus and task condition.,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+c1bf99570889a43ba2b16e6141b365d74608973d,Comparing social attention in autism and amygdala lesions: effects of stimulus and task condition.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+c1bf99570889a43ba2b16e6141b365d74608973d,Comparing social attention in autism and amygdala lesions: effects of stimulus and task condition.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+c127ac138a22c155a79f362562a52c070e2b4022,Describing Natural Images Containing Novel Objects with Knowledge Guided Assitance,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+c127ac138a22c155a79f362562a52c070e2b4022,Describing Natural Images Containing Novel Objects with Knowledge Guided Assitance,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+c127ac138a22c155a79f362562a52c070e2b4022,Describing Natural Images Containing Novel Objects with Knowledge Guided Assitance,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+c127ac138a22c155a79f362562a52c070e2b4022,Describing Natural Images Containing Novel Objects with Knowledge Guided Assitance,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+c1ee2e1d53f9ffc9fca5e3e8da7c89dc2a2133d9,A Multifaceted Independent Performance Analysis of Facial Subspace Recognition Algorithms,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+c1ee2e1d53f9ffc9fca5e3e8da7c89dc2a2133d9,A Multifaceted Independent Performance Analysis of Facial Subspace Recognition Algorithms,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+c17ed26650a67e80151f5312fa15b5c423acc797,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+c17ed26650a67e80151f5312fa15b5c423acc797,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+c17ed26650a67e80151f5312fa15b5c423acc797,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+c17ed26650a67e80151f5312fa15b5c423acc797,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+c17ed26650a67e80151f5312fa15b5c423acc797,Multiple-Kernel Based Vehicle Tracking Using 3D Deformable Model and Camera Self-Calibration,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c136e338606acb0e3a0752a75cf1cef7db5de0a6,Combining features and decisions for face detection,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c13291eaf9ca1b91ef3feb9d58a9a894130631e3,Relation Networks for Object Detection,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c1aa52ad21d0ec20102eb5402c60ac91c49612bb,Spatial-Temporal Granularity-Tunable Gradients Partition (STGGP) Descriptors for Human Detection,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+c1586ee25e660f31cba0ca9ba5bf39ffcc020aab,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+c1586ee25e660f31cba0ca9ba5bf39ffcc020aab,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c17a332e59f03b77921942d487b4b102b1ee73b6,Learning an appearance-based gaze estimator from one million synthesised images,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+c17a332e59f03b77921942d487b4b102b1ee73b6,Learning an appearance-based gaze estimator from one million synthesised images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c17a332e59f03b77921942d487b4b102b1ee73b6,Learning an appearance-based gaze estimator from one million synthesised images,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+c11c89e303a6b46de324efa01a0f749b4246c516,A Novel Approach to Design a Customized Image Editor and Real-Time Control of Hand-Gesture Mimicking Robotic Movements on an I-Robot Create,Oklahoma State University,Oklahoma State University,"Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.12447560,-97.05004383,edu,
+c1980e5d5c998ddec31cda9da148c354406a5eca,Jointly Optimizing 3D Model Fitting and Fine-Grained Classification,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+c1980e5d5c998ddec31cda9da148c354406a5eca,Jointly Optimizing 3D Model Fitting and Fine-Grained Classification,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+c1cf5dda56c72b65e86f3a678f76644f22212748,Face Hallucination via Semi-kernel Partial Least Squares,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c16479cfa79fe9996ca16fc30add9099815abb04,Robust Face Recognition after Plastic Surgery Using Local Region Analysis,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+c10b0a6ba98aa95d740a0d60e150ffd77c7895ad,Deep Fisher Faces,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+c1298120e9ab0d3764512cbd38b47cd3ff69327b,Disguised Faces in the Wild,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+c1298120e9ab0d3764512cbd38b47cd3ff69327b,Disguised Faces in the Wild,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c64da9bbdc9942decc4566f89e13d991a6303683,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+c64da9bbdc9942decc4566f89e13d991a6303683,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+c64da9bbdc9942decc4566f89e13d991a6303683,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,Recurrent Neural Networks for Facial Action Unit Recognition from Image Sequences,University of Witwatersrand,University of Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.18888130,28.02479073,edu,
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,Recurrent Neural Networks for Facial Action Unit Recognition from Image Sequences,University of the Western Cape,University of the Western Cape,"University of the Western Cape, Park Road, Cape Town Ward 9, Bellville, City of Cape Town, Western Cape, 7493, South Africa",-33.93277620,18.62915407,edu,
+c65e4ffa2c07a37b0bb7781ca4ec2ed7542f18e3,Recurrent Neural Networks for Facial Action Unit Recognition from Image Sequences,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+c69ea9367e1244bfa5d3fc290b8a33be3abd8c24,"Many faces, one rule: the role of perceptual expertise in infants’ sequential rule learning",Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+c644a4fb7f8d30b7c7c0358e2b66a53553fb534c,Image Information Distance Analysis and Applications,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+c6096986b4d6c374ab2d20031e026b581e7bf7e9,A Framework for Using Context to Understand Images of People,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c6e99ff40ccae0d7ce8e32666ed7f75e3a381d9b,How does the topic of conversation affect verbal exchange and eye gaze? A comparison between typical development and high-functioning autism.,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+c6e99ff40ccae0d7ce8e32666ed7f75e3a381d9b,How does the topic of conversation affect verbal exchange and eye gaze? A comparison between typical development and high-functioning autism.,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+c6608fdd919f2bc4f8d7412bab287527dcbcf505,Unsupervised Alignment of Natural Language with Video,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+c63b614865bd9e5b4944894083e5e9d4aba82d86,Large Scale Similarity Learning Using Similar Pairs for Person Verification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c6bceb0eb8aded28edbe2607ecbe2f5ee2b57bdc,Random projections on manifolds of Symmetric Positive Definite matrices for image classification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+c6bceb0eb8aded28edbe2607ecbe2f5ee2b57bdc,Random projections on manifolds of Symmetric Positive Definite matrices for image classification,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+c66eb0e17076bff559d8f94a8f967d52db2bab01,Video Classification System for Moments in Time Challenge 2018,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+c66eb0e17076bff559d8f94a8f967d52db2bab01,Video Classification System for Moments in Time Challenge 2018,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+c62c910264658709e9bf0e769e011e7944c45c90,Recent Progress of Face Image Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+c6e2641d99c72bbffef8a97ec019dd9379dd8b3a,Temporal Action Detection by Joint Identification-Verification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+c678920facffd35853c9d185904f4aebcd2d8b49,Learning to Anonymize Faces for Privacy Preserving Action Detection,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+c660f261615f4a0185fda548b0ffb0e997a918ea,Finding Human Poses in Videos Using Concurrent Matching and Segmentation,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+c660500b49f097e3af67bb14667de30d67db88e3,Facial Asymmetry Quantification for Expression Invariant Human Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c660500b49f097e3af67bb14667de30d67db88e3,Facial Asymmetry Quantification for Expression Invariant Human Identification,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+c660500b49f097e3af67bb14667de30d67db88e3,Facial Asymmetry Quantification for Expression Invariant Human Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c6241e6fc94192df2380d178c4c96cf071e7a3ac,Action recognition with trajectory-pooled deep-convolutional descriptors,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c6241e6fc94192df2380d178c4c96cf071e7a3ac,Action recognition with trajectory-pooled deep-convolutional descriptors,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2,Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+c6ce8eb37dafed09e1c55735fd1f1e9dc9c6bfe2,Joint background reconstruction and foreground segmentation via a two-stage convolutional neural network,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c6f58adf4a5ee8499cbc9b9bc1e6f1c39f1f8eae,Earn to P Ay a Ttention,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+c6d78245ab09c5690e483962dd51e0408fbf5cc7,Neural responses to emotional expression information in high- and low-spatial frequency in autism: evidence for a cortical dysfunction,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+c6481bdef3a75f74b7c28bb957755f75003d869d,"MAPTrack - A Probabilistic Real Time Tracking Framework by Integrating Motion, Appearance and Position Models",Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.18620474,edu,
+c666aea88c48b287080de410d4830f64f0b5ca2a,Improved Object Detection and Pose Using Part-Based Models,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c6df59e1d77d84f418666235979cbce6d400d3ca,Spectral Clustering Based on Local PCA,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+c6df59e1d77d84f418666235979cbce6d400d3ca,Spectral Clustering Based on Local PCA,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+c6df59e1d77d84f418666235979cbce6d400d3ca,Spectral Clustering Based on Local PCA,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+c6542d17b212d808cba48cd2b1536446b14e38b3,You said that?,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+c65a394118d34beda5dd01ae0df163c3db88fceb,Finding the Best Picture: Cross-Media Retrieval of Content,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+ec8ec2dfd73cf3667f33595fef84c95c42125945,Pose-Invariant Face Alignment with a Single CNN,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+ec792ad2433b6579f2566c932ee414111e194537,Person Transfer GAN to Bridge Domain Gap for Person Re-Identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+ec792ad2433b6579f2566c932ee414111e194537,Person Transfer GAN to Bridge Domain Gap for Person Re-Identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+ec9f036195ccfdac51b6daf241c45ce7010d0d78,"Towards Open Ended Learning: Budgets, Model Selection, and Representation",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+ec0177cfdee435c6522ca4ee8a5f97ac0412472e,Reconstruction of images from Gabor graphs with applications in facial image processing,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+ec0177cfdee435c6522ca4ee8a5f97ac0412472e,Reconstruction of images from Gabor graphs with applications in facial image processing,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+ec83c63e28ae2a658bc76a6750e078c3a54b9760,Deep Descriptor Transforming for Image Co-Localization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ec83c63e28ae2a658bc76a6750e078c3a54b9760,Deep Descriptor Transforming for Image Co-Localization,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+ec488139105565477bb8a3c6cb3c874c35fcb2b6,Generative Adversarial Talking Head: Bringing Portraits to Life with a Weakly Supervised Neural Network,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+ece80165040e9d8304c5dd808a6cdb29c8ecbf5b,Looking at People Using Partial Least Squares,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+ece80165040e9d8304c5dd808a6cdb29c8ecbf5b,Looking at People Using Partial Least Squares,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+ecd473cfbce5f058a3c9388b220b21de1ece8eb8,Trajectory Shape Analysis and Anomaly Detection Utilizing Information Theory Tools,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+ecc2ea05877d720b725fb89bc3b0586a51cabdc7,Object Recognition in 3D Point Clouds Using Web Data and Domain Adaptation,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+ec2ee72168368537ddb0eaac50f9e8c1b1d52a8c,Classification and Representation via Separable Subspaces: Performance Limits and Algorithms,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+ec949cb716fb33cb9273fc90f36b0351056ef0e0,An Abnormal Crowd Behavior Detection Algorithm Based on Fluid Mechanics,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+ec949cb716fb33cb9273fc90f36b0351056ef0e0,An Abnormal Crowd Behavior Detection Algorithm Based on Fluid Mechanics,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b,Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b,Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+ece390deb6576dbc1fdf132f182a1cc75eb67832,Interleaved Group Convolutions for Deep Neural Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ece390deb6576dbc1fdf132f182a1cc75eb67832,Interleaved Group Convolutions for Deep Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+ec5f89e822d9fcbc7b7422dc401478fc29f9c02d,Those Virtual People all Look the Same to me: Computer-Rendered Faces Elicit a Higher False Alarm Rate Than Real Human Faces in a Recognition Memory Task,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+ec5f89e822d9fcbc7b7422dc401478fc29f9c02d,Those Virtual People all Look the Same to me: Computer-Rendered Faces Elicit a Higher False Alarm Rate Than Real Human Faces in a Recognition Memory Task,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ec3472acc24fe5ef9eb07a31697f2cd446c8facc,"PixelNet: Representation of the pixels, by the pixels, and for the pixels",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ec05078be14a11157ac0e1c6b430ac886124589b,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+ec05078be14a11157ac0e1c6b430ac886124589b,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+ec05078be14a11157ac0e1c6b430ac886124589b,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ec05078be14a11157ac0e1c6b430ac886124589b,Longitudinal Face Aging in the Wild - Recent Deep Learning Approaches,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+4e7ed13e541b8ed868480375785005d33530e06d,Face recognition using deep multi-pose representations,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+4e7ed13e541b8ed868480375785005d33530e06d,Face recognition using deep multi-pose representations,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+4e7ed13e541b8ed868480375785005d33530e06d,Face recognition using deep multi-pose representations,Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.70927481,edu,
+4e1ade72128a6e530577dbbe69bd0afa0ef0e140,Pose Partition Networks for Multi-person Pose Estimation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4e1ade72128a6e530577dbbe69bd0afa0ef0e140,Pose Partition Networks for Multi-person Pose Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4e8b61165c8908284619acc62c46c7afac85d8a0,Deep unsupervised multi-view detection of video game stream highlights,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+4e8b61165c8908284619acc62c46c7afac85d8a0,Deep unsupervised multi-view detection of video game stream highlights,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+4e8b61165c8908284619acc62c46c7afac85d8a0,Deep unsupervised multi-view detection of video game stream highlights,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+4e8b61165c8908284619acc62c46c7afac85d8a0,Deep unsupervised multi-view detection of video game stream highlights,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+4eba5f6824f29533e0cd2660e49f2699c7e6501f,Gradient Band-based Adversarial Training for Generalized Attack Immunity of A3C Path Finding,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+4e30107ee6a2e087f14a7725e7fc5535ec2f5a5f,Представление новостных сюжетов с помощью событийных фотографий (News Stories Representation Using Event Photos),Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.53179777,edu,
+4e5dc3b397484326a4348ccceb88acf309960e86,Secure Access Control and Large Scale Robust Representation for Online Multimedia Event Detection,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4e5dc3b397484326a4348ccceb88acf309960e86,Secure Access Control and Large Scale Robust Representation for Online Multimedia Event Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4e5dc3b397484326a4348ccceb88acf309960e86,Secure Access Control and Large Scale Robust Representation for Online Multimedia Event Detection,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4e9dd2f7982dc71db5505dba7d7264d263dd93d6,Learning a sequential search for landmarks,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+4e6c17966efae956133bf8f22edeffc24a0470c1,Face Classification: A Specialized Benchmark Study,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+4e6c17966efae956133bf8f22edeffc24a0470c1,Face Classification: A Specialized Benchmark Study,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4e6c17966efae956133bf8f22edeffc24a0470c1,Face Classification: A Specialized Benchmark Study,Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.56803206,edu,
+4e1836914bbcf94dc00e604b24b1b0d6d7b61e66,Dynamic Facial Expression Recognition Using Boosted Component-Based Spatiotemporal Features and Multi-classifier Fusion,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+4e1836914bbcf94dc00e604b24b1b0d6d7b61e66,Dynamic Facial Expression Recognition Using Boosted Component-Based Spatiotemporal Features and Multi-classifier Fusion,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+4ed4143034fc6303737c7ad5118a72d9a5d12cf2,Web Survey Gamification - Increasing Data Quality in Web Surveys by using Game Design Elements,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+4ed4143034fc6303737c7ad5118a72d9a5d12cf2,Web Survey Gamification - Increasing Data Quality in Web Surveys by using Game Design Elements,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+4ed4143034fc6303737c7ad5118a72d9a5d12cf2,Web Survey Gamification - Increasing Data Quality in Web Surveys by using Game Design Elements,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+4e4fa167d772f34dfffc374e021ab3044566afc3,Learning Low-Rank Representations with Classwise Block-Diagonal Structure for Robust Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4e4fa167d772f34dfffc374e021ab3044566afc3,Learning Low-Rank Representations with Classwise Block-Diagonal Structure for Robust Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4e4fa167d772f34dfffc374e021ab3044566afc3,Learning Low-Rank Representations with Classwise Block-Diagonal Structure for Robust Face Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4e5e6d405331aa4aafc88e3ab31c7f45720c00b2,Pose Tolerant Surface Alignment for 3D Face Verification with Symmetry Test Reject Option,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+4ed54d5093d240cc3644e4212f162a11ae7d1e3b,Learning Visual Compound Models from Parallel Image-Text Datasets,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+4ed54d5093d240cc3644e4212f162a11ae7d1e3b,Learning Visual Compound Models from Parallel Image-Text Datasets,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+4efaa2a1a14ba6e8bea779eae49d6220fc771f2a,"Individual Differences in the Speed of Facial Emotion Recognition Show Little Specificity but Are Strongly Related with General Mental Speed: Psychometric, Neural and Genetic Evidence",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4efaa2a1a14ba6e8bea779eae49d6220fc771f2a,"Individual Differences in the Speed of Facial Emotion Recognition Show Little Specificity but Are Strongly Related with General Mental Speed: Psychometric, Neural and Genetic Evidence",University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+4efaa2a1a14ba6e8bea779eae49d6220fc771f2a,"Individual Differences in the Speed of Facial Emotion Recognition Show Little Specificity but Are Strongly Related with General Mental Speed: Psychometric, Neural and Genetic Evidence",Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,How Related Exemplars Help Complex Event Detection in Web Videos?,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,How Related Exemplars Help Complex Event Detection in Web Videos?,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4e8c608fc4b8198f13f8a68b9c1a0780f6f50105,How Related Exemplars Help Complex Event Detection in Web Videos?,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4e559f23bcf502c752f2938ad7f0182047b8d1e4,A Fast Approximate AIB Algorithm for Distributional Word Clustering,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+4ed2d7ecb34a13e12474f75d803547ad2ad811b2,Common Action Discovery and Localization in Unconstrained Videos,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+4ee9efbeb26f684557fd8d39afc8e90e9958a495,Multimodal Unsupervised Image-to-Image Translation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+4e8168fbaa615009d1618a9d6552bfad809309e9,Deep Convolutional Neural Network Features and the Original Image,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+4e8168fbaa615009d1618a9d6552bfad809309e9,Deep Convolutional Neural Network Features and the Original Image,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4e370791915e4b56603451b4fd1bd0105f1bcefb,Palmprint and Face Multi-Modal Biometric Recognition Based on SDA-GSVD and Its Kernelization,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4e370791915e4b56603451b4fd1bd0105f1bcefb,Palmprint and Face Multi-Modal Biometric Recognition Based on SDA-GSVD and Its Kernelization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4e370791915e4b56603451b4fd1bd0105f1bcefb,Palmprint and Face Multi-Modal Biometric Recognition Based on SDA-GSVD and Its Kernelization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4e99eaf58aa5fb4665dffec0009e2464feb0f66c,An Indexing Method for Efficient Model-Based Search,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+4e0636a1b92503469b44e2807f0bb35cc0d97652,Adversarial Localization Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4e0636a1b92503469b44e2807f0bb35cc0d97652,Adversarial Localization Network,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4e0636a1b92503469b44e2807f0bb35cc0d97652,Adversarial Localization Network,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4eb2903ecfc5dee98c5671c9459bcea71c59c79d,Appearance-Based 3D Gaze Estimation with Personal Calibration,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+4efb9e426e349968523e1b1cdbbdbfd3e1912f84,Mean Box Pooling: A Rich Image Representation and Output Embedding for the Visual Madlibs Task,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+4e29533438d5c612ab24b80c840446eafcb5995f,Tradeoffs in Neural Variational Inference,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+4ea4116f57c5d5033569690871ba294dc3649ea5,Multi-View Face Alignment Using 3D Shape Model for View Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4ea9bfcb7791cc07882f78b4747b8c8064ec6f7d,Exemplar Cut,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+4e47a5eee68b2828bf7d36e7ef70e1d0f6920678,Analysis of the CMU Localization Algorithm Under Varied Conditions,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+4e47a5eee68b2828bf7d36e7ef70e1d0f6920678,Analysis of the CMU Localization Algorithm Under Varied Conditions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4e4d034caa72dce6fca115e77c74ace826884c66,Sex differences in facial emotion recognition across varying expression intensity levels from videos,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+4e2873a2ea525507f5cd08e54ba363b06bc10e0a,Multi-Modal Information Extraction in a Question-Answer Framework,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4e2873a2ea525507f5cd08e54ba363b06bc10e0a,Multi-Modal Information Extraction in a Question-Answer Framework,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4e2873a2ea525507f5cd08e54ba363b06bc10e0a,Multi-Modal Information Extraction in a Question-Answer Framework,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4ed613b6f0427d3ec4cad6c51dcc451786812959,Spatio-Temporal Attention Models for Grounded Video Captioning,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+4e614e344ecbb36770d45fc14d3b5152b653aa97,Exploration on Grounded Word Embedding: Matching Words and Images with Image-Enhanced Skip-Gram Model,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4ed727bfef7d61023d391fdcb95cfa1df901be5e,Face Recognition using Simplified Probabilistic Linear Discriminant Analysis,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+4e4ba3783e7fe7dcf4a3b4de1fe1d5b603029f3a,Efficient Iris Spoof Detection via Boosted Local Binary Patterns,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4e39951c2f8b4600239dec7e10b7ee1ba3a000dd,The Body as a Tool for Anger Awareness—Differential Effects of Angry Facial and Bodily Expressions on Suppression from Awareness,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+4e71ac257b104bbc161331ab2a66e86515427146,Deep Bimodal Regression for Apparent Personality Analysis,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4e6305b9c9ec58db62548c666357521fb4b3f6f9,Iterative Crowd Counting,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+4e91defcc0b5ddf18fa70c34d91ce94a0be0f4d7,Causalgan: Learning Causal Implicit Gener-,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4e6ee936eb50dd032f7138702fa39b7c18ee8907,The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+4e6ee936eb50dd032f7138702fa39b7c18ee8907,The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+2081f94fb82ab8d05ca92742fe949fc97147f926,CoQA: A Conversational Question Answering Challenge,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+205e895e03969c96f3c482b0bd26308b16a12bd0,Image Captioning with an Intermediate Attributes Layer,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+20d397c8d8865133ca7bbbd824e217e9fbf5a51a,Binarized Convolutional Neural Networks for Efficient Inference on GPUs,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+20d17ffeb8adcbbe7cfe7b73cc998a1d20a91553,Unsupervised Class-Specific Deblurring,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+20f8057b602ae9e24ef4ee436250f35dd9757327,AMNet: Memorability Estimation with Attention,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+20b994a78cd1db6ba86ea5aab7211574df5940b3,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+20b994a78cd1db6ba86ea5aab7211574df5940b3,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+20b994a78cd1db6ba86ea5aab7211574df5940b3,Enriched Long-Term Recurrent Convolutional Network for Facial Micro-Expression Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+2050847bc7a1a0453891f03aeeb4643e360fde7d,Accio: A Data Set for Face Track Retrieval in Movies Across Age,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+2050847bc7a1a0453891f03aeeb4643e360fde7d,Accio: A Data Set for Face Track Retrieval in Movies Across Age,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+2056fb4cfe4aaa8a5d833f7494589499c2c5e8f5,Dependent Choices in Employee Selection: Modeling Choice Compensation and Consistency,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+2056fb4cfe4aaa8a5d833f7494589499c2c5e8f5,Dependent Choices in Employee Selection: Modeling Choice Compensation and Consistency,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+2088d93e7f4fa27b8498428d2ed64f144ab8cf3e,Deep Regression Tracking with Shrinkage Loss,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+2088d93e7f4fa27b8498428d2ed64f144ab8cf3e,Deep Regression Tracking with Shrinkage Loss,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+20044724665208227ad54d9ea98b08dfb1420689,Evaluation of local features for person re-identification in image sequences,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+20a16efb03c366fa4180659c2b2a0c5024c679da,Screening Rules for Overlapping Group Lasso,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+20da5315cfe5eab69d99bbda270e73ab488a49ba,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+20da5315cfe5eab69d99bbda270e73ab488a49ba,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+20da5315cfe5eab69d99bbda270e73ab488a49ba,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+20da5315cfe5eab69d99bbda270e73ab488a49ba,Attentive Sequence to Sequence Translation for Localizing Clips of Interest by Natural Language Descriptions,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+203ea8ab1d9c48977be97e6caf3fdbcc84101354,Video Segmentation by Tracking Many Figure-Ground Segments,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+20e505cef6d40f896e9508e623bfc01aa1ec3120,Fast Online Incremental Attribute-based Object Classification using Stochastic Gradient Descent and Self- Organizing Incremental Neural Network,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+205e4d6e0de81c7dd6c83b737ffdd4519f4f7ffa,A model-based facial expression recognition algorithm using Principal Components Analysis,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+20adfee9f931b48ad6ae236dc50b8106573d03f7,"AREA Annotation , Recognition and Evaluation of Actions PROCEEDINGS",University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+20b437dc4fc44c17f131713ffcbb4a8bd672ef00,Head Pose Tracking from RGBD Sensor Based on Direct Motion Estimation,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+206e24f7d4b3943b35b069ae2d028143fcbd0704,Learning Structure and Strength of CNN Filters for Small Sample Size Training,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+2068f66a10254d457cdb5fab74b0128b24bfdb65,Learning Language-Visual Embedding for Movie Understanding with Natural-Language,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+20dbdf02497aa84510970d0f5e8b599073bca1bc,Ask Me Anything: Free-Form Visual Question Answering Based on Knowledge from External Sources,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+20fa38fca576d983b1658127d5cf058962b23179,Image as Data: Automated Visual Content Analysis for Political Science,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+208a2c50edb5271a050fa9f29d3870f891daa4dc,The resolution of facial expressions of emotion.,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+208a2c50edb5271a050fa9f29d3870f891daa4dc,The resolution of facial expressions of emotion.,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+207798603e3089a1c807c93e5f36f7767055ec06,Modeling the correlation between modality semantics and facial expressions,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+207798603e3089a1c807c93e5f36f7767055ec06,Modeling the correlation between modality semantics and facial expressions,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+207798603e3089a1c807c93e5f36f7767055ec06,Modeling the correlation between modality semantics and facial expressions,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2031b062f4c41f43a32835430b1d55a422baa564,VNect: real-time 3D human pose estimation with a single RGB camera,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+20be15dac7d8a5ba4688bf206ad24cab57d532d6,Face Shape Recovery and Recognition Using a Surface Gradient Based Statistical Model,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+2042aed660796b14925db17c0a8b9fbdd7f3ebac,Saliency in Crowd,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+20daf06cea3dbc0b92f7ba4adb8fe7d95d27455e,"Robust Face Recognition using Wavelet and DCT based Lighting Normalization, and Shifting-mean LDA",Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.81641780,130.72703969,edu,
+20daf06cea3dbc0b92f7ba4adb8fe7d95d27455e,"Robust Face Recognition using Wavelet and DCT based Lighting Normalization, and Shifting-mean LDA",Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+20b5cfa2d35ff437bcc81d4c7f82f8b1f69dcec3,Jointly Discovering Visual Objects and Spoken Words from Raw Sensory Input,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2011d4da646f794456bebb617d1500ddf71989ed,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2011d4da646f794456bebb617d1500ddf71989ed,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+20d320529adf99aff7ca7bd562123caeaa8e7af7,Faithful Multimodal Explanation for Visual Question Answering,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+203a1ecdf7e488d81e5661a6735b767c4fe2b37d,Integrating Relevance Feedback in Boosting for Content-Based Image Retrieval,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+203a1ecdf7e488d81e5661a6735b767c4fe2b37d,Integrating Relevance Feedback in Boosting for Content-Based Image Retrieval,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+202dc3c6fda654aeb39aee3e26a89340fb06802a,Spatio-Temporal Instance Learning: Action Tubes from Class Supervision,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+20ebbcb6157efaacf7a1ceb99f2f3e2fdf1384e6,Comparative Assessment of Independent Component Analysis (ICA) for Face Recognition,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+20405902028e631e239cbc0ff6148f5f1d8050a0,Sherlock: Modeling Structured Knowledge in Images,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+207e0ac5301a3c79af862951b70632ed650f74f7,Learning a Discriminative Null Space for Person Re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+20b4a81c0aeafb891f9888797eac78e242db9aeb,"Affective Computing and Interaction : Psychological , Cognitive and Neuroscientific Perspectives",Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+20b4a81c0aeafb891f9888797eac78e242db9aeb,"Affective Computing and Interaction : Psychological , Cognitive and Neuroscientific Perspectives",Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+20388099cc415c772926e47bcbbe554e133343d1,The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+20388099cc415c772926e47bcbbe554e133343d1,The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+20523cbb076af203ae2a293074a0445fe95309e9,Classification of weather situations on single color images,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+20b038c50cc7148dfb364e2de51cde120c907c9f,Integrated perception with recurrent multi-task neural networks,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+205cbac63de77af22e003c0c98c1a4a351747708,Attribute Guided Dictionary Learning,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+205cbac63de77af22e003c0c98c1a4a351747708,Attribute Guided Dictionary Learning,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+205cbac63de77af22e003c0c98c1a4a351747708,Attribute Guided Dictionary Learning,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+20cfb4136c1a984a330a2a9664fcdadc2228b0bc,Sparse Coding Trees with application to emotion classification,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+20c02e98602f6adf1cebaba075d45cef50de089f,Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+20c02e98602f6adf1cebaba075d45cef50de089f,Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+20c02e98602f6adf1cebaba075d45cef50de089f,Video Jigsaw: Unsupervised Learning of Spatiotemporal Context for Video Action Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+2020e8c0be8fa00d773fd99b6da55029a6a83e3d,An Evaluation of the Invariance Properties of a Biologically-Inspired System for Unconstrained Face Recognition,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+209df2d7724bc6defe87618b502e1d7c800a819f,Beyond KernelBoost,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+20289282fedfd60d9d4a7153f460f5c8e0a502b8,Goal Driven Detection in Natural Scenes,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+189619de93e83cdc26e275bc7652463328ab3f5c,Privacy-Aware Database System for Retrieving Facial Images,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+189b1859f77ddc08027e1e0f92275341e5c0fdc6,Sparse Representations and Distance Learning for Attribute Based Category Recognition,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+18a9f3d855bd7728ed4f988675fa9405b5478845,An Illumination Invariant Texture Based Face Recognition,Manonmaniam Sundaranar University,Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100445,edu,
+18166432309000d9a5873f989b39c72a682932f5,Learning a Warped Subspace Model of Faces with Images of Unknown Pose and Illumination,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+1862f2df2e278505c9ca970f9c5a25ea3aeb9686,Merging Deep Neural Networks for Mobile Devices,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+18095a530b532a70f3b615fef2f59e6fdacb2d84,Deep Structured Scene Parsing by Learning with Image Descriptions,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+18095a530b532a70f3b615fef2f59e6fdacb2d84,Deep Structured Scene Parsing by Learning with Image Descriptions,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae,Learning invariant representations and applications to face verification,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+18c6c92c39c8a5a2bb8b5673f339d3c26b8dcaae,Learning invariant representations and applications to face verification,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+18dd8e04ecb5421b13aac39c288cd8dc3a541178,Unsupervised Selective Transfer Learning for Object Recognition,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+182496e9533ad3a5eef6a06b815a276c18eaea2e,High autistic trait individuals do not modulate gaze behaviour in response to social presence but look away more when actively engaged in an interaction,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+184c3e66a746376716d5e816d95e1a7cb8e04390,Unsupervised learning of a scene-specific coarse gaze estimator,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+185263189a30986e31566394680d6d16b0089772,Efficient Annotation of Objects for Video Analysis,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+186d6d47855cb00c5bc99497932422b8963510cd,Image Retrieval with a Bayesian Model of Relevance Feedback,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+186d6d47855cb00c5bc99497932422b8963510cd,Image Retrieval with a Bayesian Model of Relevance Feedback,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+186370ecc1f05ef8d3f611873a039fcde3af68b5,Machine Learning with Interdependent and Non-identically Distributed Data (Dagstuhl Seminar 15152),University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+188e012533977266355bfabc62d6adbf0f92d6b1,Fast Neural Architecture Search of Compact Semantic Segmentation Models via Auxiliary Cells,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+18b9dc55e5221e704f90eea85a81b41dab51f7da,Attention-Based Temporal Weighted Convolutional Neural Network for Action Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+18fe63c013983bea53be7d559ef36a1f385ca6ea,Supervision Beyond Human Annotations for Learning Visual Representations,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+185aaed9d48f42463791726f1ddf4e1be64a47d9,Person Re-Identification with Vision and Language,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+185aaed9d48f42463791726f1ddf4e1be64a47d9,Person Re-Identification with Vision and Language,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+185aaed9d48f42463791726f1ddf4e1be64a47d9,Person Re-Identification with Vision and Language,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+18078e72bddefffc24a6e882790aca8531773bed,Sublinear scaling of country attractiveness observed from Flickr dataset,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+18f70d8e1697bc0b85753db2d4d64aeb696b052a,Evolutionary Discriminant Feature Extraction with Application to Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+18f70d8e1697bc0b85753db2d4d64aeb696b052a,Evolutionary Discriminant Feature Extraction with Application to Face Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+18c515d42666c95079f9a98eab59ac1cdfb10859,An ASM fitting method based on machine learning that provides a robust parameter initialization for AAM fitting,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+18c515d42666c95079f9a98eab59ac1cdfb10859,An ASM fitting method based on machine learning that provides a robust parameter initialization for AAM fitting,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+180953265b3ad550682c8f0dc693eda87b82ec91,Thinking of Images as What They Are: Compound Matrix Regression for Image Classification,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+180953265b3ad550682c8f0dc693eda87b82ec91,Thinking of Images as What They Are: Compound Matrix Regression for Image Classification,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+183c8da12a07e2002fd71edbabeca5b3bfb45d66,Grounding Natural Language Instructions with Unknown Object References using Learned Visual Attributes,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+182470fd0c18d0c5979dff75d089f1da176ceeeb,A Multimodal Annotation Schema for Non-Verbal Affective Analysis in the Health-Care Domain,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+182470fd0c18d0c5979dff75d089f1da176ceeeb,A Multimodal Annotation Schema for Non-Verbal Affective Analysis in the Health-Care Domain,Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.59345390,130.35578370,edu,
+18cf63b20521964f2115f6c939f70e582999bff5,Analysing False Positives and 3D Structure to Create Intelligent Thresholding and Weighting Functions for SIFT Features,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+1862cb5728990f189fa91c67028f6d77b5ac94f6,Speeding Up Tracking by Ignoring Features,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+18219d85bb14f851fc4714df19cc7f38dff8ddc3,Online Adaptation of Convolutional Neural Networks for the 2017 DAVIS Challenge on Video Object Segmentation,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+1862bfca2f105fddfc79941c90baea7db45b8b16,Annotator rationales for visual recognition,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+187d4d9ba8e10245a34f72be96dd9d0fb393b1aa,Mining Visual Actions from Movies,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+182f3aa4b02248ff9c0f9816432a56d3c8880706,Sparse Coding for Classification via Discrimination Ensemble,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1839830486082578d2612e46a89e0e727ea1773a,Learning Hash Codes with Listwise Supervision,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+1839830486082578d2612e46a89e0e727ea1773a,Learning Hash Codes with Listwise Supervision,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+1839830486082578d2612e46a89e0e727ea1773a,Learning Hash Codes with Listwise Supervision,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+1839830486082578d2612e46a89e0e727ea1773a,Learning Hash Codes with Listwise Supervision,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+188951263d3140b3b5f5579e7a745317356e75ce,Face-space architectures: evidence for the use of independent color-based features.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+18c57ddc9c0164ee792661f43a5578f7a00d0330,ChestX-Ray8: Hospital-Scale Chest X-Ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+187480101af3fb195993da1e2c17d917df24eb23,Unsupervised Visual Representation Learning by Context Prediction,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+187480101af3fb195993da1e2c17d917df24eb23,Unsupervised Visual Representation Learning by Context Prediction,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+18200e8db6fc63f16d5ed098b5abc17bf0939333,The Fastest Pedestrian Detector in the West,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+18200e8db6fc63f16d5ed098b5abc17bf0939333,The Fastest Pedestrian Detector in the West,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1875b2325b3efcb49dec51c6416f40862db4fe74,Functional abnormalities of the default network during self- and other-reflection in autism.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+18d8c54c1977f41b7ed71c1eeebf162298323c6f,Spatial Frequency Information Modulates Response Inhibition and Decision-Making Processes,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+18941b52527e6f15abfdf5b86a0086935706e83b,DeepGUM: Learning Deep Robust Regression with a Gaussian-Uniform Mixture Model,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+188d26a005b6aac1448b9c52529b93a186c33685,Predictive network with leveraging clinical measures as auxiliary task,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+188d26a005b6aac1448b9c52529b93a186c33685,Predictive network with leveraging clinical measures as auxiliary task,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+188d26a005b6aac1448b9c52529b93a186c33685,Predictive network with leveraging clinical measures as auxiliary task,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+18e1863e70cc93759a041b8aa745d0c0da51ad31,IBVis: Interactive Visual Analytics for Information Bottleneck Based Trajectory Clustering,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+181d0534f2c0233804a6f90c75c919d868fd58e1,Distinguishing Posed and Spontaneous Smiles by Facial Dynamics,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+18ab9be9af94f2bf4d3828161ffb232d1462526a,SHaPE: A Novel Graph Theoretic Algorithm for Making Consensus-Based Decisions in Person Re-identification Systems,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+18ab9be9af94f2bf4d3828161ffb232d1462526a,SHaPE: A Novel Graph Theoretic Algorithm for Making Consensus-Based Decisions in Person Re-identification Systems,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+18ced9c7bab4d7fa69ccf2d3c8783317ba94e59f,Pitfalls in Designing Zero-Effort Deauthentication: Opportunistic Human Observation Attacks,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+18193194b7000f442c9df5ab16735a1f3ccbb630,Do Explanations make VQA Models more Predictable to a Human?,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+18aaeaba26d95482fc40d560c49f0a7f22ea0870,Automatic Detection of Learning-Centered Affective States in the Wild,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+18aaeaba26d95482fc40d560c49f0a7f22ea0870,Automatic Detection of Learning-Centered Affective States in the Wild,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+18aaeaba26d95482fc40d560c49f0a7f22ea0870,Automatic Detection of Learning-Centered Affective States in the Wild,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+277c41ce2a485f09a842d793e599553ad751d34a,Robust Person Detection by Classifier Cubes and Local Verification,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+2744d19a3026a516431ad92f1b60a9237aa2ef6d,"The Development of Visuo-spatial Processing in Children with Autism, down Syndrome and Williams Syndrome",University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+2744d19a3026a516431ad92f1b60a9237aa2ef6d,"The Development of Visuo-spatial Processing in Children with Autism, down Syndrome and Williams Syndrome",University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+27a00f2490284bc0705349352d36e9749dde19ab,VoxCeleb2: Deep Speaker Recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+271e2856e332634eccc5e80ba6fa9bbccf61f1be,3D Spatio-Temporal face recognition using dynamic range model sequences,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+27b9e75bcaf9e12127f7181bcb7f1fcb105462c4,Local frequency descriptor for low-resolution face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+27b9e75bcaf9e12127f7181bcb7f1fcb105462c4,Local frequency descriptor for low-resolution face recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+272b364f0ed647dbdbc4ae80f10ddaf8ada3a07d,A Novel Method for Tracking Individuals of Fruit Fly Swarms Flying in a Laboratory Flight Arena,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+27846b464369095f4909f093d11ed481277c8bba,Real-Time Face Detection and Recognition in Complex Background,Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.83619630,-87.62655913,edu,
+27eb7a6e1fb6b42516041def6fe64bd028b7614d,Joint Unsupervised Deformable Spatio-Temporal Alignment of Sequences,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+27eb7a6e1fb6b42516041def6fe64bd028b7614d,Joint Unsupervised Deformable Spatio-Temporal Alignment of Sequences,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2717998d89d34f45a1cca8b663b26d8bf10608a9,Real-Time Action Recognition with Enhanced Motion Vector CNNs,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+2717998d89d34f45a1cca8b663b26d8bf10608a9,Real-Time Action Recognition with Enhanced Motion Vector CNNs,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+27405836469652ca9bfaf948c0c9dadd6465a566,"The Use of Lexical Basis Functions to Characterize Faces, and to Measure Their Perceived Similarity",Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+27c66b87e0fbb39f68ddb783d11b5b7e807c76e8,Fast Simplex-HMM for One-Shot Learning Activity Recognition,Zaragoza University,Zaragoza University,"Colegio Mayor Universitario Santa Isabel, Calle de Domingo Miral, Romareda, Zaragoza, Aragón, 50009, España",41.64062180,-0.90079399,edu,
+27c66b87e0fbb39f68ddb783d11b5b7e807c76e8,Fast Simplex-HMM for One-Shot Learning Activity Recognition,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+279edb192f630f057516d8e56eae61713b6a1895,"SfSNet : Learning Shape, Reflectance and Illuminance of Faces in the Wild",University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2753c410a4072d40a8eea5de392414999b7f4b6a,Deep Convolutional Poses for Human Interaction Recognition in Monocular Videos,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+27eb092a9adbfcb3aea1b13bde580f1fd5c7b8f0,xytocin Increases Gaze to the Eye Region f Human Faces dam,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+27eb092a9adbfcb3aea1b13bde580f1fd5c7b8f0,xytocin Increases Gaze to the Eye Region f Human Faces dam,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+27fb07897db51ce23db4ef93e2621717ee1db64c,Affect Infusion and Detection through Faces in Computer-mediated Knowledge-sharing Decisions,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+27c4369463ff28f4ab16e9d9eba6f48102c8793e,Triangle Generative Adversarial Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+271df16f789bd2122f0268c3e2fa46bc0cb5f195,Mining discriminative co-occurrence patterns for visual recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+271df16f789bd2122f0268c3e2fa46bc0cb5f195,Mining discriminative co-occurrence patterns for visual recognition,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+27ccf0cdf0c7a74640697dfb5d1cf85969a5da2e,Multilingual Image Description with Neural Sequence Models,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+27ccf0cdf0c7a74640697dfb5d1cf85969a5da2e,Multilingual Image Description with Neural Sequence Models,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+275b5091c50509cc8861e792e084ce07aa906549,Leveraging the User's Face as a Known Object in Handheld Augmented Reality,University of Munich,Universität München,"Geschwister-Scholl-Platz 1, 80539 München, Germany",48.15080600,11.58043000,edu,
+27218ff58c3f0e7d7779fba3bb465d746749ed7c,Active Learning for Image Ranking Over Relative Visual Attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+270f029b03ee1bdfeae4ff4c5167b450d185a981,Combining local appearance and holistic view: Dual-Source Deep Neural Networks for human pose estimation,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+272c6b6ccf144954a154b83bf5789341ee3f9ed2,A brain-computer interface for potential non-verbal facial communication based on EEG signals related to specific emotions,University of Tokushima,The University of Tokushima,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.07880680,134.55898100,edu,
+272c6b6ccf144954a154b83bf5789341ee3f9ed2,A brain-computer interface for potential non-verbal facial communication based on EEG signals related to specific emotions,University of Tokushima,The University of Tokushima,"大学前, 国道11号, 徳島市, 徳島県, 四国地方, 770-0815, 日本",34.07880680,134.55898100,edu,
+27c6cd568d0623d549439edc98f6b92528d39bfe,Regressive Tree Structured Model for Facial Landmark Localization,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+2788f382e4396290acfc8b21df45cc811586e66e,Deep Attributes Driven Multi-camera Person Re-identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+2788f382e4396290acfc8b21df45cc811586e66e,Deep Attributes Driven Multi-camera Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2788f382e4396290acfc8b21df45cc811586e66e,Deep Attributes Driven Multi-camera Person Re-identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,Recognition of facial expressions based on salient geometric features and support vector machines,Korea Electronics Technology Institute,Korea Electronics Technology Institute,"South Korea, Gyeonggi-do, Seongnam-si, Bundang-gu, 새나리로 25 (야탑동) KETI 전자부품연구원",37.40391700,127.15978600,edu,
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,Recognition of facial expressions based on salient geometric features and support vector machines,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+273b0511588ab0a81809a9e75ab3bd93d6a0f1e3,Recognition of facial expressions based on salient geometric features and support vector machines,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+27961bc8173ac84fdbecacd01e5ed6f7ed92d4bd,Automatic multi-view face recognition via 3D model based pose regularization,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+2776d11afa421ec7403606f902dc757de95583b2,Label Propagation from ImageNet to 3D Point Clouds,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2784d9212dee2f8a660814f4b85ba564ec333720,Learning class-specific image transformations with higher-order Boltzmann machines,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+2717b044ae9933f9ab87f16d6c611352f66b2033,GNAS: A Greedy Neural Architecture Search Method for Multi-Attribute Learning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+279acfde0286bb76dd7717abebc3c8acf12d2c5f,Local Gradient Order Pattern for Face Representation and Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+27d90cdd54bcc8f8ecfa60d886143288977a5c63,On the possibility of instance-based stroke recovery,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+274e8c0c513ff82713f2f332694cf2b29b7c3bb1,0 Multi Channel-Kernel Canonical Correlation Analysis for Cross-View Person Re-Identification,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+274e8c0c513ff82713f2f332694cf2b29b7c3bb1,0 Multi Channel-Kernel Canonical Correlation Analysis for Cross-View Person Re-Identification,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+274e8c0c513ff82713f2f332694cf2b29b7c3bb1,0 Multi Channel-Kernel Canonical Correlation Analysis for Cross-View Person Re-Identification,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+274e8c0c513ff82713f2f332694cf2b29b7c3bb1,0 Multi Channel-Kernel Canonical Correlation Analysis for Cross-View Person Re-Identification,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+27bcdd21f1be3d0990f86a231d29d46a5537e5cd,Rendering Portraitures from Monocular Camera and Beyond,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+27bcdd21f1be3d0990f86a231d29d46a5537e5cd,Rendering Portraitures from Monocular Camera and Beyond,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+274959f26d04848f71a355c09500fd7ebc271d69,Two-Stream Flow-Guided Convolutional Attention Networks for Action Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+27883967d3dac734c207074eed966e83afccb8c3,Two-Dimensional Maximum Local Variation Based on Image Euclidean Distance for Face Recognition,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+27883967d3dac734c207074eed966e83afccb8c3,Two-Dimensional Maximum Local Variation Based on Image Euclidean Distance for Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+270e5266a1f6e76954dedbc2caf6ff61a5fbf8d0,EmotioNet Challenge: Recognition of facial expressions of emotion in the wild,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+2742a61d32053761bcc14bd6c32365bfcdbefe35,Learning transformations for clustering and classification,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+2742a61d32053761bcc14bd6c32365bfcdbefe35,Learning transformations for clustering and classification,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+2781de1aa6f4c9621ad3af38fc58b894696f1791,FollowMe: Efficient Online Min-Cost Flow Tracking with Bounded Memory and Computation,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+2781de1aa6f4c9621ad3af38fc58b894696f1791,FollowMe: Efficient Online Min-Cost Flow Tracking with Bounded Memory and Computation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+27c6b0883e51ec901e587963070eb2ad96871a33,Performance Modeling of Multithreaded Programs for Mobile Asymmetric Chip Multiprocessors,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+27c6b0883e51ec901e587963070eb2ad96871a33,Performance Modeling of Multithreaded Programs for Mobile Asymmetric Chip Multiprocessors,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+2788a2461ed0067e2f7aaa63c449a24a237ec341,Random Erasing Data Augmentation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+27dafedccd7b049e87efed72cabaa32ec00fdd45,Unsupervised visual alignment with similarity graphs,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+273b9b7c63ac9196fb12734b49b74d0523ca4df4,The Secrets of Salient Object Segmentation,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+273b9b7c63ac9196fb12734b49b74d0523ca4df4,The Secrets of Salient Object Segmentation,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+27caf667432ad7dbb01921696857303641b34f83,Entropy Driven Hierarchical Search for 3D Human Pose Estimation,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+273dc39c3e7a18aac3cbd5f2db93969e9cc7613f,Exemplar-based Human Interaction Recognition: Features and Key Pose Sequence Model,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+27a299b834a18e45d73e0bf784bbb5b304c197b3,Social Role Discovery in Human Events,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+27a299b834a18e45d73e0bf784bbb5b304c197b3,Social Role Discovery in Human Events,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+277bfd1bc89044b4a523ef23f48bd053d5560657,Large-Scale Object Discovery and Detector Adaptation from Unlabeled Video,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2701bd6850dc1b811ef7697cc1cd19405b99f990,Privacy Preserving Multi-target Tracking,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+4b1682da96af72ce0ddaa9384ce294611807a8b3,Graph Distillation for Action Detection with Privileged Information,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4bab23e4ce9b6c65a067953fe202c20c387f00c8,Sparse Patch-Histograms for Object Classification in Cluttered Images,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+4b70374555c32c6a1e0db43674a7183170083450,Kernelized View Adaptive Subspace Learning for Person Re-identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+4b70374555c32c6a1e0db43674a7183170083450,Kernelized View Adaptive Subspace Learning for Person Re-identification,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+4b70374555c32c6a1e0db43674a7183170083450,Kernelized View Adaptive Subspace Learning for Person Re-identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4b8d80f91d271f61b26db5ad627e24e59955c56a,Learning Long-Range Vision for an Offroad Robot,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+4b8d80f91d271f61b26db5ad627e24e59955c56a,Learning Long-Range Vision for an Offroad Robot,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+4bb03b27bc625e53d8d444c0ba3ee235d2f17e86,Reading between the Lines: Object Localization Using Implicit Cues from Image Tags,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4b39b981133a91052956cc42d2967f349a95cd89,Towards Understanding Articulated Objects,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4bc9a767d7e63c5b94614ebdc24a8775603b15c9,Understanding Visual Information: from Unsupervised Discovery to Minimal Effort Domain Adaptation,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,A Discriminative Model for Age Invariant Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,A Discriminative Model for Age Invariant Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+4b519e2e88ccd45718b0fc65bfd82ebe103902f7,A Discriminative Model for Age Invariant Face Recognition,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+4b3f425274b0c2297d136f8833a31866db2f2aec,Toward Open-Set Face Recognition,"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.89207560,-104.79716389,edu,
+4b7c110987c1d89109355b04f8597ce427a7cd72,Feature- and Face-Exchange illusions: new insights and applications for the study of the binding problem,American University,American University,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.93804505,-77.08939224,edu,
+4b7c110987c1d89109355b04f8597ce427a7cd72,Feature- and Face-Exchange illusions: new insights and applications for the study of the binding problem,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+4b7c110987c1d89109355b04f8597ce427a7cd72,Feature- and Face-Exchange illusions: new insights and applications for the study of the binding problem,American University,American University,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.93804505,-77.08939224,edu,
+4bd088ba3f42aa1e43ae33b1988264465a643a1f,"IDE 0852 , May 2008 Multiview Face Detection Using Gabor Filters and Support Vector Machine",Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3,The pursuit of social acceptance: aberrant conformity in social anxiety disorder,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3,The pursuit of social acceptance: aberrant conformity in social anxiety disorder,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3,The pursuit of social acceptance: aberrant conformity in social anxiety disorder,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3,The pursuit of social acceptance: aberrant conformity in social anxiety disorder,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+4b4d8169664dcfc87cf7ab68d4a49ecd160d89f3,The pursuit of social acceptance: aberrant conformity in social anxiety disorder,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+4bfce41cc72be315770861a15e467aa027d91641,Active Annotation Translation,University of Iceland,University of Iceland,"Háskóli Íslands, Sturlugata, Háskóli, Reykjavík, Reykjavíkurborg, Höfuðborgarsvæðið, 121, Ísland",64.13727400,-21.94561454,edu,
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+4b61d8490bf034a2ee8aa26601d13c83ad7f843a,A Modulation Module for Multi-task Learning with Applications in Image Retrieval,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4bd3de97b256b96556d19a5db71dda519934fd53,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4bd3de97b256b96556d19a5db71dda519934fd53,Latent Factor Guided Convolutional Neural Networks for Age-Invariant Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4b1abc5b52db2ba854101b137d1fe3aed9e21274,The role of dictionary learning on sparse representation-based classification,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+4b18303edf701e41a288da36f8f1ba129da67eb7,An embarrassingly simple approach to zero-shot learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4b18303edf701e41a288da36f8f1ba129da67eb7,An embarrassingly simple approach to zero-shot learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4b86e711658003a600666d3ccfa4a9905463df1c,Fusion of Appearance Image and Passive Stereo Depth Map for Face Recognition Based on the Bilateral 2DLDA,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+4b3c1af4369c9ed6714451643ef9c06969849e73,Geometry-Consistent Adversarial Networks for One-Sided Unsupervised Domain Mapping,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+4b3c1af4369c9ed6714451643ef9c06969849e73,Geometry-Consistent Adversarial Networks for One-Sided Unsupervised Domain Mapping,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4b3c1af4369c9ed6714451643ef9c06969849e73,Geometry-Consistent Adversarial Networks for One-Sided Unsupervised Domain Mapping,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4b421db0f57608470ac1e26077ecb8a6cdccade5,Adaptive Semantic Segmentation with a Strategic Curriculum of Proxy Labels,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4b1e80211f34b731667a31f0f27937376866993a,Online Metric-Weighted Linear Representations for Robust Visual Tracking,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+4b1e80211f34b731667a31f0f27937376866993a,Online Metric-Weighted Linear Representations for Robust Visual Tracking,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+4ba3f9792954ee3ba894e1e330cd77da4668fa22,Nearest Neighbor Discriminant Analysis,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+4ba3f9792954ee3ba894e1e330cd77da4668fa22,Nearest Neighbor Discriminant Analysis,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+4b74f2d56cd0dda6f459319fec29559291c61bff,Person-Specific Subspace Analysis for Unconstrained Familiar Face Identification,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+4b74f2d56cd0dda6f459319fec29559291c61bff,Person-Specific Subspace Analysis for Unconstrained Familiar Face Identification,Rowland Institute,Rowland Institute,"Rowland Research Institute, Land Boulevard, East Cambridge, Cambridge, Middlesex County, Massachusetts, 02142, USA",42.36398620,-71.07782930,edu,
+4b74f2d56cd0dda6f459319fec29559291c61bff,Person-Specific Subspace Analysis for Unconstrained Familiar Face Identification,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+4b74f2d56cd0dda6f459319fec29559291c61bff,Person-Specific Subspace Analysis for Unconstrained Familiar Face Identification,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+4b74f2d56cd0dda6f459319fec29559291c61bff,Person-Specific Subspace Analysis for Unconstrained Familiar Face Identification,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+4ba38262fe20fab3e4c80215147b498f83843b93,Obtaining the Shape of a Moving Object with a Specular Surface,Cambridge Research Laboratory,Cambridge Research Laboratory,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK",52.17333465,0.14989946,edu,
+4ba38262fe20fab3e4c80215147b498f83843b93,Obtaining the Shape of a Moving Object with a Specular Surface,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+4bbe460ab1b279a55e3c9d9f488ff79884d01608,GAGAN: Geometry-Aware Generative Adversarial Networks,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+4bbe460ab1b279a55e3c9d9f488ff79884d01608,GAGAN: Geometry-Aware Generative Adversarial Networks,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+4b3eaedac75ac419c2609e131ea9377ba8c3d4b8,Fast Newton active appearance models,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+4b3eaedac75ac419c2609e131ea9377ba8c3d4b8,Fast Newton active appearance models,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+4b507a161af8a7dd41e909798b9230f4ac779315,A Theory of Multiplexed Illumination,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4b02387c2db968a70b69d98da3c443f139099e91,Detecting facial landmarks in the video based on a hybrid framework,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+4b02387c2db968a70b69d98da3c443f139099e91,Detecting facial landmarks in the video based on a hybrid framework,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+4b71d1ff7e589b94e0f97271c052699157e6dc4a,Pose-Encoded Spherical Harmonics for Face Recognition and Synthesis Using a Single Image,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4b0a2937f64df66cadee459a32ad7ae6e9fd7ed2,"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4b4ecc1cb7f048235605975ab37bb694d69f63e5,Nonlinear Embedding Transform for Unsupervised Domain Adaptation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+4be774af78f5bf55f7b7f654f9042b6e288b64bd,Variational methods for Conditional Multimodal Learning: Generating Human Faces from Attributes,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+4b004f3c524778d524bfb0cda923bc6e895f9ea9,Quantifying and Detecting Collective Motion by Manifold Learning,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+4b004f3c524778d524bfb0cda923bc6e895f9ea9,Quantifying and Detecting Collective Motion by Manifold Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4b321065f6a45e55cb7f9d7b1055e8ac04713b41,Affective Computing Models for Character Animation,Liverpool John Moores University,Liverpool John Moores University,"John Lennon Art and Design Building, Duckinfield Street, Knowledge Quarter, Liverpool, North West England, England, L3 5YD, UK",53.40507470,-2.97030029,edu,
+4b605e6a9362485bfe69950432fa1f896e7d19bf,A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+4be79ee47771c670aa63bcdaff870f9dd8575a0d,phi-LSTM: A Phrase-based Hierarchical LSTM Model for Image Captioning,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+4b3dd18882ff2738aa867b60febd2b35ab34dffc,Facial Feature Analysis of Spontaneous Facial Expression,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+4b3dd18882ff2738aa867b60febd2b35ab34dffc,Facial Feature Analysis of Spontaneous Facial Expression,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+11a2ef92b6238055cf3f6dcac0ff49b7b803aee3,Towards reduction of the training and search running time complexities for non-rigid object segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+11dc744736a30a189f88fa81be589be0b865c9fa,A Unified Multiplicative Framework for Attribute Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+11dc744736a30a189f88fa81be589be0b865c9fa,A Unified Multiplicative Framework for Attribute Learning,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+114b34fd2a2a2acd4a968cbaeb5e0d2251fb2835,Combining Appearance and Structure from Motion Features for Road Scene Understanding,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+11a210835b87ccb4989e9ba31e7559bb7a9fd292,A fuzzy approximator with Gaussian membership functions to estimate a human's head pose,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+11a210835b87ccb4989e9ba31e7559bb7a9fd292,A fuzzy approximator with Gaussian membership functions to estimate a human's head pose,Ferdowsi University of Mashhad,Ferdowsi University of Mashhad,"دانشگاه فردوسی مشهد, بولوار دانش, رضاشهر, منطقه ۹, مشهد, شهرستان مشهد, استان خراسان رضوی, 9177146164, ‏ایران‎",36.30766160,59.52690511,edu,
+118ca3b2e7c08094e2a50137b1548ada7935e505,A Dataset To Evaluate The Representations Learned By Video Prediction Models,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,"Automated measurement of mouse social behaviors using depth sensing, video tracking, and machine learning.",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,"Automated measurement of mouse social behaviors using depth sensing, video tracking, and machine learning.",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+111ac5d013ac59aa8da919a470cdf83b437f9721,Improved Class-Specific Codebook with Two-Step Classification for Scene-Level Classification of High Resolution Remote Sensing Images,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+114907f89466987b3c41c8d530e50b2ac67179cf,Face Identification by a Cascade of Rejection Classifiers,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+113c22eed8383c74fe6b218743395532e2897e71,MODEC: Multimodal Decomposable Models for Human Pose Estimation,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+11408af8861fb0a977412e58c1a23d61b8df458c,A robust learning algorithm based on SURF and PSM for facial expression recognition,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+11408af8861fb0a977412e58c1a23d61b8df458c,A robust learning algorithm based on SURF and PSM for facial expression recognition,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+11cc0774365b0cc0d3fa1313bef3d32c345507b1,Face Recognition Using Active Near-IR Illumination,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+117aae1dc5b3aee679a690f7dab84e9a23add930,Age and Video Captioning,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+115808104b2a9c3ab6e2e60582ab7e33b937b754,Visual7W: Grounded Question Answering in Images,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+11269e98f072095ff94676d3dad34658f4876e0e,Facial expression recognition with multithreaded cascade of rotation-invariant HOG,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+11269e98f072095ff94676d3dad34658f4876e0e,Facial expression recognition with multithreaded cascade of rotation-invariant HOG,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+11269e98f072095ff94676d3dad34658f4876e0e,Facial expression recognition with multithreaded cascade of rotation-invariant HOG,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+116170983869d56780343823621f2f30f62aa38e,"4D Cardiff Conversation Database (4D CCDb): a 4D database of natural, dyadic conversations",Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+113e5678ed8c0af2b100245057976baf82fcb907,Facing Imbalanced Data--Recommendations for the Use of Performance Metrics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+113e5678ed8c0af2b100245057976baf82fcb907,Facing Imbalanced Data--Recommendations for the Use of Performance Metrics,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+117c7cf24b9310ed785ef6fb84e95c73186f61e6,Historical Heterogeneity Predicts Smiling: Evidence from Large-Scale Observational Analyses,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+117c7cf24b9310ed785ef6fb84e95c73186f61e6,Historical Heterogeneity Predicts Smiling: Evidence from Large-Scale Observational Analyses,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+113ced4a8c5ecb6da1b2eb63c1300cd8df982917,Deep Convolutional Neural Networks for Efficient Pose Estimation in Gesture Videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+113ced4a8c5ecb6da1b2eb63c1300cd8df982917,Deep Convolutional Neural Networks for Efficient Pose Estimation in Gesture Videos,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+11c04c4f0c234a72f94222efede9b38ba6b2306c,Real-time human action recognition by luminance field trajectory analysis,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+11c04c4f0c234a72f94222efede9b38ba6b2306c,Real-time human action recognition by luminance field trajectory analysis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,Action Recognition by Learning Deep Multi-Granular Spatio-Temporal Video Representation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,Action Recognition by Learning Deep Multi-Granular Spatio-Temporal Video Representation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1128a4f57148cec96c0ef4ae3b5a0fbf07efbad9,Action Recognition by Learning Deep Multi-Granular Spatio-Temporal Video Representation,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+11824658170994e4d4655e8f688bace16a0d3e48,Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework,Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+11824658170994e4d4655e8f688bace16a0d3e48,Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+11138173fa5e72a6bba314881d8d5dd74c1ac83f,Optimizing Mean Reciprocal Rank for person re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+110e44112bb0b742ca2c8ee607fc359698ee1198,Semantic Label Sharing for Learning with Many Categories,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+110e44112bb0b742ca2c8ee607fc359698ee1198,Semantic Label Sharing for Learning with Many Categories,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+116888b8f08419f027f5047f0ff1557b16f69d5a,Fearful contextual expression impairs the encoding and recognition of target faces: an ERP study,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+116888b8f08419f027f5047f0ff1557b16f69d5a,Fearful contextual expression impairs the encoding and recognition of target faces: an ERP study,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+11a1e99fc65fb8567d7f52dce941231ea949db0a,Subgraph decomposition for multi-target tracking,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+11a47a91471f40af5cf00449954474fd6e9f7694,NIRFaceNet: A Convolutional Neural Network for Near-Infrared Face Identification,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+118e87ee5a8e0faa71b6ca5af6ff38f875132464,Pose Invariant Embedding for Deep Person Re-identification,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+11b45236b2798091ddab35c572a35f447bb8d717,The Case for Personal Data-Driven Decision Making,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+11a6af9b32a93c4053dc12f70afac64a4138b2d1,Multi-hypothesis motion planning for visual object tracking,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+11a6af9b32a93c4053dc12f70afac64a4138b2d1,Multi-hypothesis motion planning for visual object tracking,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+11fa5abb5d5d09efbf9dacae6a6ceb9b2647f877,DCTNet: A simple learning-free approach for face recognition,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+11b3877df0213271676fa8aa347046fd4b1a99ad,Unsupervised Identification of Multiple Objects of Interest from Multiple Images: dISCOVER,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1167136efcb52cf49e89b90949149312bab19cc3,Multi-camera Pedestrian Tracking using Group Structure,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+1167136efcb52cf49e89b90949149312bab19cc3,Multi-camera Pedestrian Tracking using Group Structure,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+1188c925d90e93a205c5fc15d11fb2ae02660f2e,Deep Photovoltaic Nowcasting,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+1130c38e88108cf68b92ecc61a9fc5aeee8557c9,Dynamically encoded actions based on spacetime saliency,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+11b89011298e193d9e6a1d99302221c1d8645bda,Structured Feature Selection,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+11fa30ccbf62a64f650844b9cc39797e5faa82d5,A Spatial and Temporal Features Mixture Model with Body Parts for Video-based Person Re-Identification,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+11fa30ccbf62a64f650844b9cc39797e5faa82d5,A Spatial and Temporal Features Mixture Model with Body Parts for Video-based Person Re-Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+111ae23b60284927f2545dfc59b0147bb3423792,Classroom Data Collection and Analysis using Computer Vision,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+11f476a5da2366cfa6e4b4e2654a0833fa7d4fa4,Weakly Supervised Learning of Mid-Level Features with Beta-Bernoulli Process Restricted Boltzmann Machines,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+1177977134f6663fff0137f11b81be9c64c1f424,Multi-manifold deep metric learning for image set classification,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+1177977134f6663fff0137f11b81be9c64c1f424,Multi-manifold deep metric learning for image set classification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1177977134f6663fff0137f11b81be9c64c1f424,Multi-manifold deep metric learning for image set classification,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+1177977134f6663fff0137f11b81be9c64c1f424,Multi-manifold deep metric learning for image set classification,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1177977134f6663fff0137f11b81be9c64c1f424,Multi-manifold deep metric learning for image set classification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+11f515ead5b4a7259668f2620e808fa8ba5ea65c,Phrase Localization and Visual Relationship Detection with Comprehensive Image-Language Cues,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc,Squared Earth Mover ’ s Distance Loss for Training Deep Neural Networks on Ordered-Classes,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+114d6a2503847a72afeb38e79243ad10abc7e123,Building Unified Human Descriptors For Multi-Type Activity Recognition,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+11ac88aebe0230e743c7ea2c2a76b5d4acbfecd0,Hybrid Cascade Model for Face Detection in the Wild Based on Normalized Pixel Difference and a Deep Convolutional Neural Network,University of Zagreb,"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia","Unska ul. 3, 10000, Zagreb, Croatia",45.80112100,15.97084090,edu,
+117f164f416ea68e8b88a3005e55a39dbdf32ce4,Neuroaesthetics in fashion: Modeling the perception of fashionability,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+7d520f474f2fc59422d910b980f8485716ce0a3e,Designing Convolutional Neural Networks for Urban Scene Understanding,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+7d520f474f2fc59422d910b980f8485716ce0a3e,Designing Convolutional Neural Networks for Urban Scene Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7dda2eb0054eb1aeda576ed2b27a84ddf09b07d4,Face Recognition and Representation by Tensor-based MPCA Approach,Chosun University,Chosun University,"조선대역, 서남로, 남동, 동구, 광주, 61473, 대한민국",35.14410310,126.92578580,edu,
+7d2556d674ad119cf39df1f65aedbe7493970256,Now You Shake Me : Towards Automatic 4 D Cinema,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+7d94fd5b0ca25dd23b2e36a2efee93244648a27b,Convolutional Network for Attribute-driven and Identity-preserving Human Face Generation,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+7d94fd5b0ca25dd23b2e36a2efee93244648a27b,Convolutional Network for Attribute-driven and Identity-preserving Human Face Generation,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+7d8c2d29deb80ceed3c8568100376195ce0914cb,Identity-Aware Textual-Visual Matching with Latent Co-attention,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+7d306512b545df98243f87cb8173df83b4672b18,Flag Manifolds for the Characterization of Geometric Structure in Large Data Sets,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+7db5404feaa08e3e53bbf4fea7d89bcf509cfdbd,An Enhanced Default Approach Bias Following Human Amygdala Lesions,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+7db5404feaa08e3e53bbf4fea7d89bcf509cfdbd,An Enhanced Default Approach Bias Following Human Amygdala Lesions,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+7d52c9da079a4929faa0b39d8acb92240eb3a1f4,Vision-based real estate price estimation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7d52c9da079a4929faa0b39d8acb92240eb3a1f4,Vision-based real estate price estimation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7d7b564aba3161231c789169cafec38342a18ea7,Forecasting user attention during everyday mobile interactions using device-integrated and wearable sensors,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7d7b564aba3161231c789169cafec38342a18ea7,Forecasting user attention during everyday mobile interactions using device-integrated and wearable sensors,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+7d7b564aba3161231c789169cafec38342a18ea7,Forecasting user attention during everyday mobile interactions using device-integrated and wearable sensors,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7d7b564aba3161231c789169cafec38342a18ea7,Forecasting user attention during everyday mobile interactions using device-integrated and wearable sensors,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7d41b67a641426cb8c0f659f0ba74cdb60e7159a,Soft biometric retrieval to describe and identify surveillance images,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+7d7ab791ae3cfa72b4feacf1e09a4493c1a5a87c,Individualness and Determinantal Point Processes for Pedestrian Detection,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+7d1688ce0b48096e05a66ead80e9270260cb8082,Real vs. Fake Emotion Challenge: Learning to Rank Authenticity from Facial Activity Descriptors,Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.64471248,edu,
+7d53678ef6009a68009d62cd07c020706a2deac3,Facial Feature Point Extraction Using the Adaptive Mean Shape in Active Shape Model,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+7d2df9b943a666caa9154dbc1a0ba3dda8cf423b,Automatic extraction of facial interest points based on 2D and 3D data,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,South China University of China,South China University of China,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04900470,113.39715710,edu,
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,South China University of China,South China University of China,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04900470,113.39715710,edu,
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,Education University of Hong Kong,The Education University of Hong Kong,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国",22.46935655,114.19474194,edu,
+7d7be6172fc2884e1da22d1e96d5899a29831ad2,L2GSCI: Local to Global Seam Cutting and Integrating for Accurate Face Contour Extraction,South China University of China,South China University of China,"华工站, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.04900470,113.39715710,edu,
+7d0ff6d0621b3846e8543bc162fd0215d8adfaf0,Efficient Large-Scale Similarity Search Using Matrix Factorization,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+7ddb2e298acbe29ccaea131e8a6475d451eb90ad,A Deep-Learning-Based Fashion Attributes Detection Model,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7dacb063f783df07f89934c962c3e170acb166cc,Multi-modal Factorized Bilinear Pooling with Co-attention Learning for Visual Question Answering,Hangzhou Dianzi University,Hangzhou Dianzi University,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.31255250,120.34309460,edu,
+7dacb063f783df07f89934c962c3e170acb166cc,Multi-modal Factorized Bilinear Pooling with Co-attention Learning for Visual Question Answering,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+7dacb063f783df07f89934c962c3e170acb166cc,Multi-modal Factorized Bilinear Pooling with Co-attention Learning for Visual Question Answering,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+7df4f96138a4e23492ea96cf921794fc5287ba72,A Jointly Learned Deep Architecture for Facial Attribute Analysis and Face Detection in the Wild,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+7dd578878e84337d6d0f5eb593f22cabeacbb94c,Classifiers for Driver Activity Monitoring,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+7d941dbab0bb645af81781bd3867ebde11c3641d,Handwritten Hangul recognition using deep convolutional neural networks,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7d1ac241fb603a4237cb681dbcf163a9f89e906a,Supplementary Material : Switching Convolutional Neural Network for Crowd Counting,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7df268a3f4da7d747b792882dfb0cbdb7cc431bc,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7d621ec871a03a01f5aa65253e9ae6c8aadaf798,Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+7d621ec871a03a01f5aa65253e9ae6c8aadaf798,Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+7de386bf2a1b2436c836c0cc1f1f23fccb24aad6,Finding What the Driver Does Final Report Prepared by : Harini Veeraraghavan,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+7de1d463fef3c63cb228f5b4a6a72e62f66630e6,Cascaded Interactional Targeting Network for Egocentric Video Analysis,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+7de1d463fef3c63cb228f5b4a6a72e62f66630e6,Cascaded Interactional Targeting Network for Egocentric Video Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7de1d463fef3c63cb228f5b4a6a72e62f66630e6,Cascaded Interactional Targeting Network for Egocentric Video Analysis,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+29ed326a7da1678880db02e5d0e7cb7376dffb98,A square-root sampling approach to fast histogram-based search,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+29ed326a7da1678880db02e5d0e7cb7376dffb98,A square-root sampling approach to fast histogram-based search,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+2914e8c62f0432f598251fae060447f98141e935,Activity Analysis of Spectator Performer Videos Using Motion Trajectories,University of Nebraska - Lincoln,University of Nebraska - Lincoln,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA",40.81747230,-96.70444680,edu,
+291dba3baa7d42f1e30b26a714e525cb73c05af1,Domain Adaptation for Semantic Segmentation via Class-Balanced Self-Training,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29954bf080407f23c8ac140202bd2ae5a48fdede,Abnormal Event Detection Based on Saliency Information,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+29b96e41948e35a5bc4a9e7ae978808bc5b0c841,Automatic collection of Web video shots corresponding to specific actions using Web images,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+2902f62457fdf7e8e8ee77a9155474107a2f423e,Non-rigid 3D Shape Registration using an Adaptive Template,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea,Parametric Dictionaries and Feature Augmentation for Continuous Domain Adaptation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+29d3ed0537e9ef62fd9ccffeeb72c1beb049e1ea,Parametric Dictionaries and Feature Augmentation for Continuous Domain Adaptation,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+29d2b60bdd998479df7f088859905379e30967a5,Toward a Taxonomy and Computational Models of Abnormalities in Images,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+29d2b60bdd998479df7f088859905379e30967a5,Toward a Taxonomy and Computational Models of Abnormalities in Images,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+29d2b60bdd998479df7f088859905379e30967a5,Toward a Taxonomy and Computational Models of Abnormalities in Images,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+29d2b60bdd998479df7f088859905379e30967a5,Toward a Taxonomy and Computational Models of Abnormalities in Images,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+297c27c74e5cc731b5bd1ad95726b4192e3b902d,Face Super-Resolution Guided by Facial Component Heatmaps,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+29b3be93a60bbc5fe842826030853f99753b08bd,Hierarchical Scene Annotation,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+29fc4de6b680733e9447240b42db13d5832e408f,Recognition of Facial Expressions Based on Tracking and Selection of Discriminative Geometric Features,Korea Electronics Technology Institute,Korea Electronics Technology Institute,"South Korea, Gyeonggi-do, Seongnam-si, Bundang-gu, 새나리로 25 (야탑동) KETI 전자부품연구원",37.40391700,127.15978600,edu,
+29fc4de6b680733e9447240b42db13d5832e408f,Recognition of Facial Expressions Based on Tracking and Selection of Discriminative Geometric Features,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+29fc4de6b680733e9447240b42db13d5832e408f,Recognition of Facial Expressions Based on Tracking and Selection of Discriminative Geometric Features,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+29a8492f5aaa212ad81c2e903c73937e7ced73ee,High-precision Immune Computation for Secure Face Recognition,Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.20619390,121.41047101,edu,
+29a8492f5aaa212ad81c2e903c73937e7ced73ee,High-precision Immune Computation for Secure Face Recognition,Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.20619390,121.41047101,edu,
+29a8492f5aaa212ad81c2e903c73937e7ced73ee,High-precision Immune Computation for Secure Face Recognition,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+29936a4dcc91adf9708b938f0d3fc0f38409eaff,MT-VAE: Learning Motion Transformations to Generate Multimodal Human Dynamics,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+29f21bf4aa648f0996b41b03fc11b07a0e550f46,3D Face Recognition using Mapped Depth Images,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+2912c3ea67678a1052d7d5cbe734a6ad90fc360e,Facial Feature Detection using a Virtual Structuring Element,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+29f4ac49fbd6ddc82b1bb697820100f50fa98ab6,The benefits and challenges of collecting richer object annotations,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+2910fcd11fafee3f9339387929221f4fc1160973,Evaluating Open-Universe Face Identification on the Web,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+2910fcd11fafee3f9339387929221f4fc1160973,Evaluating Open-Universe Face Identification on the Web,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+29a606ba5b9ae9bc16d05a832d4e54d769c63dae,Activation of mGluR2/3 underlies the effects of N-acetylcystein on amygdala-associated autism-like phenotypes in a valproate-induced rat model of autism,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+29a606ba5b9ae9bc16d05a832d4e54d769c63dae,Activation of mGluR2/3 underlies the effects of N-acetylcystein on amygdala-associated autism-like phenotypes in a valproate-induced rat model of autism,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+29732d196d199acdb9d5122207b4613bb3aedf8e,Neural correlates of affective context in facial expression analysis: A simultaneous EEG-fNIRS study,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+29732d196d199acdb9d5122207b4613bb3aedf8e,Neural correlates of affective context in facial expression analysis: A simultaneous EEG-fNIRS study,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,Multiple instance learning for labeling faces in broadcasting news video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,Multiple instance learning for labeling faces in broadcasting news video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29479bb4fe8c04695e6f5ae59901d15f8da6124b,Multiple instance learning for labeling faces in broadcasting news video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+293cefbe481a5a472d830a88ff140dfcc1869c31,Virtual Immortality: Reanimating Characters from TV Shows,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+291f527598c589fb0519f890f1beb2749082ddfd,Seeing People in Social Context: Recognizing People and Social Relationships,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+291265db88023e92bb8c8e6390438e5da148e8f5,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+29c340c83b3bbef9c43b0c50b4d571d5ed037cbd,Stacked Dense U-Nets with Dual Transformers for Robust Face Alignment,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+291e5377df2eec4835b5c6889896941831a11c69,Recovering 6D Object Pose: Multi-modal Analyses on Challenges,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+291e5377df2eec4835b5c6889896941831a11c69,Recovering 6D Object Pose: Multi-modal Analyses on Challenges,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+2976605dc3b73377696537291d45f09f1ab1fbf5,Cross-Stitch Networks for Multi-task Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29693ce8b14c552e4e46d05d55cbff3942f95c30,Machine Perception for Occupational Therapy,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29693ce8b14c552e4e46d05d55cbff3942f95c30,Machine Perception for Occupational Therapy,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+2953fa360c79f2c77bbc53c8154f49136333bfa6,Compact Tensor Pooling for Visual Question Answering,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2953fa360c79f2c77bbc53c8154f49136333bfa6,Compact Tensor Pooling for Visual Question Answering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+297d3df0cf84d24f7efea44f87c090c7d9be4bed,Appearance-Based 3-D Face Recognition from Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+297d3df0cf84d24f7efea44f87c090c7d9be4bed,Appearance-Based 3-D Face Recognition from Video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+29b86534d4b334b670914038c801987e18eb5532,Total Cluster: A person agnostic clustering method for broadcast videos,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+29b86534d4b334b670914038c801987e18eb5532,Total Cluster: A person agnostic clustering method for broadcast videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+29b86534d4b334b670914038c801987e18eb5532,Total Cluster: A person agnostic clustering method for broadcast videos,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+299b65d5d3914dad9aae2f936165dcebcf78db88,Weakly-and Semi-Supervised Learning of a Deep Convolutional Network for Semantic Image Segmentation,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+299b65d5d3914dad9aae2f936165dcebcf78db88,Weakly-and Semi-Supervised Learning of a Deep Convolutional Network for Semantic Image Segmentation,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a,Recent Progress of Face Image Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+29631ca6cff21c9199c70bcdbbcd5f812d331a96,Error Rates in Users of Automatic Face Recognition Software,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+29631ca6cff21c9199c70bcdbbcd5f812d331a96,Error Rates in Users of Automatic Face Recognition Software,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+2954deae38c40a244f6a9c0714987d786c69db7c,Human detection and pose estimation for motion picture logging and visualisation,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+2983efadb1f2980ab5ef20175f488f77b6f059d7,Emotion in Human–computer Interaction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+291de30ceecb5dcf0644c35e2b5935d341ea148b,Explainable Black-Box Attacks Against Model-based Authentication,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+291de30ceecb5dcf0644c35e2b5935d341ea148b,Explainable Black-Box Attacks Against Model-based Authentication,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+291de30ceecb5dcf0644c35e2b5935d341ea148b,Explainable Black-Box Attacks Against Model-based Authentication,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+291de30ceecb5dcf0644c35e2b5935d341ea148b,Explainable Black-Box Attacks Against Model-based Authentication,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+29be6e76d9ed777ca032c40a6ab374a44bde38bd,Latent SVMs for Human Detection with a Locally Affine Deformation Field,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+29be6e76d9ed777ca032c40a6ab374a44bde38bd,Latent SVMs for Human Detection with a Locally Affine Deformation Field,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+29fa7b334543b6b6a4927ea2c7ae4c6fa8f6a7c4,Latent Boosting for Action Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+29fa7b334543b6b6a4927ea2c7ae4c6fa8f6a7c4,Latent Boosting for Action Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+2911e7f0fb6803851b0eddf8067a6fc06e8eadd6,Joint Fine-Tuning in Deep Neural Networks for Facial Expression Recognition,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+291e6f2a365913100de8bd1071810b8155095f08,Efficient and Exact MAP-MRF Inference using Branch and Bound,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2917808d9018386af42e249ba4fb94bafcda54e5,Compact CNN for indexing egocentric videos,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+29fc5339e299b47c3d4f871974069a2971b4b8b6,Personalized Automatic Estimation of Self-Reported Pain Intensity from Facial Expressions,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+29fc5339e299b47c3d4f871974069a2971b4b8b6,Personalized Automatic Estimation of Self-Reported Pain Intensity from Facial Expressions,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+29445acb03961fb27ac9221875c0a25171502144,An Efficient Face Tracker Using Active Shape Model,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+29445acb03961fb27ac9221875c0a25171502144,An Efficient Face Tracker Using Active Shape Model,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+29921072d8628544114f68bdf84deaf20a8c8f91,Multi-task Curriculum Transfer Deep Learning of Clothing Attributes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+29b737cdb317e47e7cc219b438ea38e8fdceb45c,Approximate Distribution Matching for Sequence-to-Sequence Learning,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+2969f822b118637af29d8a3a0811ede2751897b5,Cascaded Shape Space Pruning for Robust Facial Landmark Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+290a96c9aa653eb6dd64d5b0fa5bae7bf208ae14,Joint Pose and Expression Modeling for Facial Expression Recognition,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+29f0414c5d566716a229ab4c5794eaf9304d78b6,Biometric Template Security,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7cab6a3247f56e0e2fc38133ea0fb89c48dadda7,Spatiotemporal Modeling for Crowd Counting in Videos,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+7cdd8c0f3c672000506696a6f8b96b9a99e778ae,Video Monitoring and Analysis of Human Behavior for Diagnosis of Obstructive Sleep Apnoea,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+7c02a578734af8008177bb3f27549198b2503178,Segmenting video into classes of algorithm-suitability,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+7c61d21446679776f7bdc7afd13aedc96f9acac1,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7c61d21446679776f7bdc7afd13aedc96f9acac1,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7c61d21446679776f7bdc7afd13aedc96f9acac1,Hierarchical Label Inference for Video Classification,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7c7ab59a82b766929defd7146fd039b89d67e984,Improving multiview face detection with multi-task deep convolutional neural networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7ca337735ec4c99284e7c98f8d61fb901dbc9015,Driver activity monitoring through supervised and unsupervised learning,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+7ca08c7a1b61258a8f36435be7a96abde64be081,Focal Visual-Text Attention for Visual Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,Deep Feature Consistent Variational Autoencoder,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,Deep Feature Consistent Variational Autoencoder,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,Deep Feature Consistent Variational Autoencoder,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+7c1cfab6b60466c13f07fe028e5085a949ec8b30,Deep Feature Consistent Variational Autoencoder,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+7c0bd7ce51c62671d5ffc1506786b0b7861ce00a,Utility-based acceleration of multithreaded applications on asymmetric CMPs,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+7c0bd7ce51c62671d5ffc1506786b0b7861ce00a,Utility-based acceleration of multithreaded applications on asymmetric CMPs,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7cd3062284a9f93df05cb11161d16114be945a5b,Deep Spatiotemporal Representation of the Face for Automatic Pain Intensity Estimation,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+7ce25a0852e2345be1a1bd02b8eb4cefb9d47073,Composite Cores: Pushing Heterogeneity Into a Core,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+7c17280c9193da3e347416226b8713b99e7825b8,VideoCapsuleNet: A Simplified Network for Action Detection,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,The unconstrained ear recognition challenge,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,The unconstrained ear recognition challenge,"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.89207560,-104.79716389,edu,
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,The unconstrained ear recognition challenge,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+7cffcb4f24343a924a8317d560202ba9ed26cd0b,The unconstrained ear recognition challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7c5dde400571fd357d1093e1829a8bd7917d8fcd,Retrospective Higher-Order Markov Processes for User Trails,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+7c5dde400571fd357d1093e1829a8bd7917d8fcd,Retrospective Higher-Order Markov Processes for User Trails,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+7c62f5e4a62758f44bd98f087f92b6b6b1f2043b,Combination features and models for human detection,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+7c4c442e9c04c6b98cd2aa221e9d7be15efd8663,Classifier learning with hidden information,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+7c132e0a2b7e13c78784287af38ad74378da31e5,Salient Parts based Multi-people Tracking,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+7c2ec6f4ab3eae86e0c1b4f586e9c158fb1d719d,Dissimilarity-Based Classifications in Eigenspaces,Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.23810230,127.19034310,edu,
+7cd5d7f8295b219b029a4231ae5cffb261e00ebe,Early Active Learning with Pairwise Constraint for Person Re-identification,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+7cd5d7f8295b219b029a4231ae5cffb261e00ebe,Early Active Learning with Pairwise Constraint for Person Re-identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7cf8a841aad5b7bdbea46a7bb820790e9ce12d0b,Supervised Heat Kernel Lpp Method for Face Recognition,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+7c9f884137a22c3bb5cefcd7dfc55e3a83979771,Person tracking-by-detection with efficient selection of part-detectors,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+7c9622ad1d8971cd74cc9e838753911fe27ccac4,Representation Learning with Smooth Autoencoder,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+7c622df16f06d9f1c1af7262e91c54906e1b7e0e,Locating Facial Features and Pose Estimation Using a 3D Shape Model,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+7c2c9b083817f7a779d819afee383599d2e97ed8,"Disentangling Motion, Foreground and Background Features in Videos",Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7c2c9b083817f7a779d819afee383599d2e97ed8,"Disentangling Motion, Foreground and Background Features in Videos","Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7c2c9b083817f7a779d819afee383599d2e97ed8,"Disentangling Motion, Foreground and Background Features in Videos",Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+7c45339253841b6f0efb28c75f2c898c79dfd038,Unsupervised Joint Alignment of Complex Images,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+7c825562b3ff4683ed049a372cb6807abb09af2a,Finding Tiny Faces Supplementary Materials,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+7c825562b3ff4683ed049a372cb6807abb09af2a,Finding Tiny Faces Supplementary Materials,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7c36afc9828379de97f226e131390af719dbc18d,Unsupervised face-name association via commute distance,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+7c36afc9828379de97f226e131390af719dbc18d,Unsupervised face-name association via commute distance,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+7ca7255c2e0c86e4adddbbff2ce74f36b1dc522d,Stereo Matching for Unconstrained Face Recognition Ph . D . Proposal,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+7c428ce264662bceae0b78f915d4d4797a2492f2,Transductive Unbiased Embedding for Zero-Shot Learning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+7c428ce264662bceae0b78f915d4d4797a2492f2,Transductive Unbiased Embedding for Zero-Shot Learning,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+7c428ce264662bceae0b78f915d4d4797a2492f2,Transductive Unbiased Embedding for Zero-Shot Learning,"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.28106540,120.02139087,edu,
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,Optimized features selection using hybrid PSO-GA for multi-view gender classification,Foundation University Rawalpindi Campus,Foundation University Rawalpindi Campus,"Foundation University Rawalpindi Campus, Main Parking Road, Police Lines, راولپنڈی, Rawalpindi Cantt, پنجاب, 46600, ‏پاکستان‎",33.56095040,73.07125966,edu,
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,Optimized features selection using hybrid PSO-GA for multi-view gender classification,University of Central Punjab,University of Central Punjab,"University of Central Punjab, Khyaban-e-Jinnah, PECHS, Wapda Town, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54000, ‏پاکستان‎",31.44661490,74.26797620,edu,
+7c42371bae54050dbbf7ded1e7a9b4109a23a482,Optimized features selection using hybrid PSO-GA for multi-view gender classification,University of Dammam,University of Dammam,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.39793625,50.19807924,edu,
+7c953868cd51f596300c8231192d57c9c514ae17,Detecting and Aligning Faces by Image Retrieval,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7c7a23e8e846c1e1a6c63925d73d0d0806a040ef,Visual Analysis of Tag Co-occurrence on Nouns and Adjectives,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+7c6dbaebfe14878f3aee400d1378d90d61373921,A Novel Biometric Feature Extraction Algorithm using Two Dimensional Fisherface in 2DPCA subspace for Face Recognition,University of Newcastle,University of Newcastle,"University of Newcastle Central Coast Campus, Technology Bridge, Ourimbah, Central Coast, NSW, 2258, Australia",-33.35788990,151.37834708,edu,
+7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb,Relief R-CNN: Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+7c2f6424b0bb2c28f282fbc0b4e98bf85d5584eb,Relief R-CNN: Utilizing Convolutional Feature Interrelationship for Fast Object Detection Deployment,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+7cd097f3866d56114c1778c0d9ac1c4a9a35cff9,MODELLING MULTI-OBJECT ACTIVITY BY GAUSSIAN PROCESSES 3 2 Activity Modelling and Anomaly Detection 2 . 1 Activity Representation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+7c80d91db5977649487388588c0c823080c9f4b4,DocFace: Matching ID Document Photos to Selfies,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7c30ea47f5ae1c5abd6981d409740544ed16ed16,Informed Democracy: Voting-based Novelty Detection for Action Recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+1693e615c3a7a843880eb5bbf4e3f1beb0580f5c,Nonparametric scene parsing: Label transfer via dense scene alignment,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1648cf24c042122af2f429641ba9599a2187d605,Boosting cross-age face verification via generative age normalization,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+166d069ea056fbb42b10ca660956fee881e6c875,Inverse Rendering with a Morphable Model: A Multilinear Approach,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+16fdd6d842475e6fbe58fc809beabbed95f0642e,Learning Temporal Embeddings for Complex Video Analysis,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+16dd9ea784a862c45d1d2af6d2fb83198f567719,Human Pose Estimation with Parsing Induced Learner,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+16dd9ea784a862c45d1d2af6d2fb83198f567719,Human Pose Estimation with Parsing Induced Learner,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+16bce9f940bb01aa5ec961892cc021d4664eb9e4,Mutual Component Analysis for Heterogeneous Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+16727cd69372019267589a27574147e8cf3b25f8,Human Attribute Recognition by Rich Appearance Dictionary,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+16727cd69372019267589a27574147e8cf3b25f8,Human Attribute Recognition by Rich Appearance Dictionary,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1665fe64f8439a1854595e2e73394517d44c35b4,An improved LBP algorithm for avatar face recognition,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+1633c30909f4f3d91ea4256c76c71abf62a52bd8,Deep Feature Learning for Hyperspectral Image Classification and Land Cover Estimation,University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.37130240,24.47544080,edu,
+1677d29a108a1c0f27a6a630e74856e7bddcb70d,Efficient Misalignment-Robust Representation for Real-Time Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+16eb964a5a0f5fa3692440f07dd60b23354f5f58,Observing the Natural World with Flickr,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+1601ac8e682622f489b4a18792025c0843d47b86,Transferring a generic pedestrian detector towards specific scenes,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+16c3f99f8f48d4ccb71b782f79601d5efeab8461,Pedestrian Detection Based on Informed Haar-like Features and Switchable Deep Network,Anhui Polytechnic University,Anhui Polytechnic University,"安徽工程大学, 鸠江北路, 芜湖市, 芜湖市区, 芜湖市 / Wuhu, 安徽省, 241000, 中国",31.34185955,118.40739712,edu,
+16b9d258547f1eccdb32111c9f45e2e4bbee79af,NormFace: L2 Hypersphere Embedding for Face Verification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+16b9d258547f1eccdb32111c9f45e2e4bbee79af,NormFace: L2 Hypersphere Embedding for Face Verification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+162ea969d1929ed180cc6de9f0bf116993ff6e06,Deep Face Recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+16d3954f0418bd9a2ac20a2be6db93d49213c680,CSRNet: Dilated Convolutional Neural Networks for Understanding the Highly Congested Scenes,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+16d3954f0418bd9a2ac20a2be6db93d49213c680,CSRNet: Dilated Convolutional Neural Networks for Understanding the Highly Congested Scenes,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+16f76f040f08448cf0a3984168d69197ea4af039,"Now you see race, now you don’t: Verbal cues influence children’s racial stability judgments",University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+162dfd0d2c9f3621d600e8a3790745395ab25ebc,Head Pose Estimation Based on Multivariate Label Distribution,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+16f940b4b5da79072d64a77692a876627092d39c,A framework for automated measurement of the intensity of non-posed Facial Action Units,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+16f940b4b5da79072d64a77692a876627092d39c,A framework for automated measurement of the intensity of non-posed Facial Action Units,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+16f940b4b5da79072d64a77692a876627092d39c,A framework for automated measurement of the intensity of non-posed Facial Action Units,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+16f940b4b5da79072d64a77692a876627092d39c,A framework for automated measurement of the intensity of non-posed Facial Action Units,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+16a431d87d0f01c6d70d2b7476dfb3948064b740,Face recognition on smartphones via optimised Sparse Representation Classification,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+16344102d21291483d1fa7484be28b563df434ce,Comparison of 2D/3D Features and Their Adaptive Score Level Fusion for 3D Face Recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+16701e3cbd43b52e32d567649a194245dcd31829,Crossing-Line Crowd Counting with Two-Phase Deep Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+166e7fd811d104254155c90506f2f7e77947534c,Hidden Hands: Tracking Hands with an Occlusion Aware Tracker,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+16572c545384174f8136d761d2b0866e968120a8,Sequential Max-Margin Event Detectors,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+16820ccfb626dcdc893cc7735784aed9f63cbb70,Real-time embedded age and gender classification in unconstrained video,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+16f341786f7fa8b117e8812a58742771c089e68f,Reducing Overfitting in Deep Networks by Decorrelating Representations,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+164b0e2a03a5a402f66c497e6c327edf20f8827b,Sparse Deep Transfer Learning for Convolutional Neural Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+164b0e2a03a5a402f66c497e6c327edf20f8827b,Sparse Deep Transfer Learning for Convolutional Neural Network,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1667a77db764e03a87a3fd167d88b060ef47bb56,Alternative Semantic Representations for Zero-Shot Human Action Recognition,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+169618b8dc9b348694a31c6e9e17b989735b4d39,Unsupervised Representation Learning by Sorting Sequences,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+160da454cc64c1117c3a164b9bf375d73fb81720,Scalable Metric Learning via Weighted Approximate Rank Component Analysis,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+16d2ead2c3e98aa1ee9c948855a027e1da2b8eea,Multi-view Deep Network for Cross-View Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+16e95a907b016951da7c9327927bb039534151da,3D Face Recognition Using Spherical Vector Norms Map,Beijing Union University,Beijing Union University,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",39.98900680,116.42067718,edu,
+16e95a907b016951da7c9327927bb039534151da,3D Face Recognition Using Spherical Vector Norms Map,Beijing Union University,Beijing Union University,"北京联合大学, 北四环东路, 飘亮阳光广场, 太阳宫乡, 朝阳区 / Chaoyang, 北京市, 100012, 中国",39.98900680,116.42067718,edu,
+16e95a907b016951da7c9327927bb039534151da,3D Face Recognition Using Spherical Vector Norms Map,Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052,edu,
+163e07487115641046022d57fcbc6dc9fd2669f2,Complementary feature extraction for branded handbag recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+16d6737b50f969247339a6860da2109a8664198a,Convolutional Neural Networks for Age and Gender Classification,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+16815ef660ef9e4091a81044d430591348df72ee,Combining Texture and Shape Cues for Object Recognition with Minimal Supervision,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+16283efecc7332e363c9419d7129bbd5d95cbf4d,Recognizing actions from still images,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+1610d2d4947c03a89c0fda506a74ba1ae2bc54c2,"Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose, Depth, and Expression Variation",Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+164f3b9740d9ceb14658237fddede0f86b5e0c47,CASENet: Deep Category-Aware Semantic Edge Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,Recent developments in social signal processing,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,Recent developments in social signal processing,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1659a8b91c3f428f1ba6aeba69660f2c9d0a85c6,Recent developments in social signal processing,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+164251f012186767d9f00a3baf8735dd2180fee7,ReSet: Learning Recurrent Dynamic Routing in ResNet-like Neural Networks,Moscow State University,Moscow State University,"ul. Leninskiye Gory, 1, Moskva, Russia, 119991",55.70393490,37.52866960,edu,
+166d8f840c502c5095c8651540dd393743d63ce9,Kernel Descriptors for Visual Recognition,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+167736556bea7fd57cfabc692ec4ae40c445f144,Improved Motion Description for Action Classification,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+167ea1631476e8f9332cef98cf470cb3d4847bc6,Visual Search at Pinterest,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+16243557482241171beccbbd694976103cc941ef,Learning Multiple Tasks with Deep Relationship Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+16243557482241171beccbbd694976103cc941ef,Learning Multiple Tasks with Deep Relationship Networks,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+16161051ee13dd3d836a39a280df822bf6442c84,Learning Efficient Object Detection Models with Knowledge Distillation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+16161051ee13dd3d836a39a280df822bf6442c84,Learning Efficient Object Detection Models with Knowledge Distillation,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+16fda65f258ca22d856bb0252891deecc59efc3d,Is Enough Enough? What Is Sufficiency in Biometric Data?,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+16c7c31a7553d99f1837fc6e88e77b5ccbb346b8,Person Re-identification by Descriptive and Discriminative Classification,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+420782499f38c1d114aabde7b8a8104c9e40a974,Fashion Style in 128 Floats: Joint Ranking and Classification Using Weak Data for Feature Extraction,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+42f422a9a67ba71a9ac699205940d8cc2dca8317,On-demand Learning for Deep Image Restoration,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+422e46aa845435822b7d93c2fb9103cd94128a21,PKU-NEC @ TRECVid 2012 SED: Uneven-Sequence Based Event Detection in Surveillance Video,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+423b941641728a21e37f41359a691815cdd84ceb,Reversible Recursive Instance-Level Object Segmentation,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+426f20cf5f836f410b6ed31a990ed1bbaaf6733b,Unlocking the urban photographic record through 4D scene modeling,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+4217473596b978f13a211cdf47b7d3f6588c785f,An efficient approach for clustering face images,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+42efcb8cac3889ac25368770058e000249f68d13,Analysing Soft Clothing Biometrics for Retrieval,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+4209f140d64ce6fb891eb6ada26eaeb40af123e2,Deep Fully-Connected Part-Based Models for Human Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4209f140d64ce6fb891eb6ada26eaeb40af123e2,Deep Fully-Connected Part-Based Models for Human Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4209f140d64ce6fb891eb6ada26eaeb40af123e2,Deep Fully-Connected Part-Based Models for Human Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4209f140d64ce6fb891eb6ada26eaeb40af123e2,Deep Fully-Connected Part-Based Models for Human Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4223666d1b0b1a60c74b14c2980069905088edc6,A Convergent Incoherent Dictionary Learning Algorithm for Sparse Coding,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+42afe6d016e52c99e2c0d876052ade9c192d91e7,Spontaneous vs. posed facial behavior: automatic analysis of brow actions,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+42afe6d016e52c99e2c0d876052ade9c192d91e7,Spontaneous vs. posed facial behavior: automatic analysis of brow actions,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+42afe6d016e52c99e2c0d876052ade9c192d91e7,Spontaneous vs. posed facial behavior: automatic analysis of brow actions,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+42d9ddd942ec89a3fc6a7beed174fd75c3dabff7,The Conditional Lucas & Kanade Algorithm,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+42d9ddd942ec89a3fc6a7beed174fd75c3dabff7,The Conditional Lucas & Kanade Algorithm,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+420c46d7cafcb841309f02ad04cf51cb1f190a48,Multi-Scale Context Aggregation by Dilated Convolutions,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+4233b07033a1ef8af188383f30602a5fd0aa2181,Keep it SMPL: Automatic Estimation of 3D Human Pose and Shape from a Single Image,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+42765c170c14bd58e7200b09b2e1e17911eed42b,Feature Extraction Based on Wavelet Moments and Moment Invariants in Machine Vision Systems,Democritus University of Thrace,Democritus University of Thrace,"Δημοκρίτειο Πανεπιστήμιο Θράκης, Μάκρη - Αλεξανδρούπολη, Αλεξανδρούπολη, Δήμος Αλεξανδρούπολης, Περιφερειακή Ενότητα Έβρου, Περιφέρεια Ανατολικής Μακεδονίας και Θράκης, Μακεδονία - Θράκη, 68100, Ελλάδα",40.84941785,25.83444939,edu,
+4226c9b155ef3c5c78bd122d870fec42ae695ad7,What is the Right Illumination Normalization for Face Recognition?,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+4226c9b155ef3c5c78bd122d870fec42ae695ad7,What is the Right Illumination Normalization for Face Recognition?,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+428818a9edfb547431be6d7ec165c6af576c83d5,Recurrent Topic-Transition GAN for Visual Paragraph Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+428818a9edfb547431be6d7ec165c6af576c83d5,Recurrent Topic-Transition GAN for Visual Paragraph Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+428818a9edfb547431be6d7ec165c6af576c83d5,Recurrent Topic-Transition GAN for Visual Paragraph Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+428818a9edfb547431be6d7ec165c6af576c83d5,Recurrent Topic-Transition GAN for Visual Paragraph Generation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+428818a9edfb547431be6d7ec165c6af576c83d5,Recurrent Topic-Transition GAN for Visual Paragraph Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+42e640fc7d37c51b157e7007117eacb78d7789a9,Emotional Speech of Mentally and Physically Disabled Individuals: Introducing the EmotAsS Database and First Findings,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+42e640fc7d37c51b157e7007117eacb78d7789a9,Emotional Speech of Mentally and Physically Disabled Individuals: Introducing the EmotAsS Database and First Findings,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+4203f10b41e7931a63598989aa14478c04b725c9,Using LIP to Gloss Over Faces in Single-Stage Face Detection Networks,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+42fc202713cb5205bba8be8a3b85a8be1e65d63f,QCC: A novel cluster algorithm based on Quasi-Cluster Centers,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+42e9bfa84eecbafe32e1d2f5d52acfd617b57d18,Exploiting Temporal Information for 3D Human Pose Estimation,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+4223917177405eaa6bdedca061eb28f7b440ed8e,B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+4223917177405eaa6bdedca061eb28f7b440ed8e,B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+4223917177405eaa6bdedca061eb28f7b440ed8e,B-spline Shape from Motion & Shading: An Automatic Free-form Surface Modeling for Face Reconstruction,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+428cfbd3c237d04edb06690a7e9e9a40c62fc8da,"Algorithmic clothing: hybrid recommendation, from street-style-to-shop",University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+428cfbd3c237d04edb06690a7e9e9a40c62fc8da,"Algorithmic clothing: hybrid recommendation, from street-style-to-shop",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+42eda7c20db9dc0f42f72bb997dd191ed8499b10,Gaze Embeddings for Zero-Shot Image Classification,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+42eda7c20db9dc0f42f72bb997dd191ed8499b10,Gaze Embeddings for Zero-Shot Image Classification,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+42c9394ca1caaa36f535721fa9a64b2c8d4e0dee,Label Efficient Learning of Transferable Representations across Domains and Tasks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+42c9394ca1caaa36f535721fa9a64b2c8d4e0dee,Label Efficient Learning of Transferable Representations across Domains and Tasks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+423be52973dab29c31a845ea54c9050aba0d650a,Walking on Minimax Paths for k-NN Search,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+428d1777846efa8e86b694791b8dbf114e188f30,Towards 4D Coupled Models of Conversational Facial Expression Interactions,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+428d1777846efa8e86b694791b8dbf114e188f30,Towards 4D Coupled Models of Conversational Facial Expression Interactions,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+42854a0175d866f190378a3034406e11cd160568,Joint Graph Decomposition and Node Labeling by Local Search,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+421a7b0b6cf45ccba3df41a99fbb272d324489d9,Facilitating Autism Research.,University of Connecticut,University of Connecticut,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA",41.80937790,-72.25364140,edu,
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,Action recognition with gradient boundary convolutional network,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,Action recognition with gradient boundary convolutional network,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4205cb47ba4d3c0f21840633bcd49349d1dc02c1,Action recognition with gradient boundary convolutional network,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+42bb241681c4bec1fa36211a204fa0dc8158e5ff,Localizing Objects While Learning Their Appearance,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+42ded74d4858bea1070dadb08b037115d9d15db5,Exigent: An Automatic Avatar Generation System,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+421046882e42a0572c8654ae1df06bc789088c2f,Human Semantic Parsing for Person Re-identification,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+421046882e42a0572c8654ae1df06bc789088c2f,Human Semantic Parsing for Person Re-identification,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+42ea8a96eea023361721f0ea34264d3d0fc49ebd,Parameterized Principal Component Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+42f6f5454dda99d8989f9814989efd50fe807ee8,Conditional generative adversarial nets for convolutional face generation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+42c1111c9cbb74b2755f58c6e9e84e7d1d11cc6e,Object Recognition Based on Amounts of Unlabeled Data,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+42c1111c9cbb74b2755f58c6e9e84e7d1d11cc6e,Object Recognition Based on Amounts of Unlabeled Data,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+42a712dbfe07262ba2b479e800008f08ad1c1388,Learning to Sample Using Stein Discrepancy,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+425833b5fe892b00dcbeb6e3975008e9a73a5a72,A Review of Performance Evaluation for Biometrics Systems,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,Face Recognition From Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+429961112a9b4f08f6b68acce8868b3468d72c6e,Learning Dilation Factors for Semantic Segmentation of Street Scenes,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+89dcf3d6f42f1a2fcdb0c81982ac1ea9e4ce2339,Convolutional Neural Networks for Detecting and Mapping Crowds in First Person Vision Applications,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+89c7f6a765aec6e7c754063bd723b1313f058948,Automatic Naming of Speakers in Video via Name-Face Mapping,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+8916cbd3eb66475182a177ade018ed8a3eed26b7,Fashion apparel detection: The role of deep convolutional neural network and pose-dependent priors,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+89002a64e96a82486220b1d5c3f060654b24ef2a,PIEFA: Personalized Incremental and Ensemble Face Alignment,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+8973910c8acfd296922d9691a533b3c5061ec815,Supplementary Material for Efficient Online Local Metric Adaptation via Negative Samples for Person Re-Identification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+89e7d23e0c6a1d636f2da68aaef58efee36b718b,Lucas-Kanade Scale Invariant Feature Transform for Uncontrolled Viewpoint Face Recognition,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+89f4bcbfeb29966ab969682eae235066a89fc151,A comparison of photometric normalisation algorithms for face verification,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+891ecd15c285aeb7286762b8a02e9897cd9df5a1,Driving recorder based on-road pedestrian tracking using visual SLAM and Constrained Multiple-Kernel,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+891ecd15c285aeb7286762b8a02e9897cd9df5a1,Driving recorder based on-road pedestrian tracking using visual SLAM and Constrained Multiple-Kernel,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+89f9fcc6b6bbc3c8c13f37d602d42a5c7196bcdd,Video Captioning via Hierarchical Reinforcement Learning,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+892db59add66fc581ae1a7338ff8bd6b7aa0f2b4,FPGA-based Normalization for Modified Gram-Schmidt Orthogonalization,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+892c911ca68f5b4bad59cde7eeb6c738ec6c4586,"The Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in North American English",Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+8986585975c0090e9ad97bec2ba6c4b437419dae,Unsupervised Hard Example Mining from Videos for Improved Object Detection,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+89cabb60aa369486a1ebe586dbe09e3557615ef8,Bayesian Networks as Generative Models for Face Recognition,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+89887e95169efb35726cbeecf6a252de6fbcac3b,GroupCap : Group-based Image Captioning with Structured Relevance and Diversity Constraints,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+89887e95169efb35726cbeecf6a252de6fbcac3b,GroupCap : Group-based Image Captioning with Structured Relevance and Diversity Constraints,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+8983485996d5d9d162e70d66399047c5d01ac451,Deep feature-based face detection on mobile devices,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+8983485996d5d9d162e70d66399047c5d01ac451,Deep feature-based face detection on mobile devices,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+89e4f5a1eb6a97459bb748f4f7bc5c2696354aad,Semantics from Sound: Modeling Audio and Text Thesis Proposal,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+8981be3a69cd522b4e57e9914bf19f034d4b530c,Fast Automatic Video Retrieval using Web Images,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+891433740bf6d318782c468638722aebf8bef2f5,Multi-Frame Video Super-Resolution Using Convolutional Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+891433740bf6d318782c468638722aebf8bef2f5,Multi-Frame Video Super-Resolution Using Convolutional Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+89f73328e509e3ab2df01481cf55cb53050f6343,Cortical Surface Thickness as a Classifier: Boosting for Autism Classification,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+89f73328e509e3ab2df01481cf55cb53050f6343,Cortical Surface Thickness as a Classifier: Boosting for Autism Classification,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+89cdedb35b487bcf07d6f53aa91463ea2de8da66,Sketch and Match: Scene Montage Using a Huge Image Collection,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+89cdedb35b487bcf07d6f53aa91463ea2de8da66,Sketch and Match: Scene Montage Using a Huge Image Collection,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+89b4111f14cdf342188f96d3962581fd0afa042f,A Study and Comparison of Human and Deep Learning Recognition Performance under Visual Distortions,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+89dbfb9b75d3902748d73bfb5965e7d11e83c10e,Learning Discriminative Appearance-Based Models Using Partial Least Squares,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+893292315f5ecc73e84c5585900c53072de38550,Uncertainty Flow Facilitates Zero-Shot Multi-Label Learning in Affective Facial Analysis,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+898ef892b4cb9c206afc2daae04eacb1a7c7f956,Dump Truck Recognition Based on SCPSR in Videos,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+894a3ab0a3ef82352b2c294dd2bde2bd3403da8c,Recommending Outfits from Personal Closet,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,Action and Gesture Temporal Spotting with Super Vector Representation,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,Action and Gesture Temporal Spotting with Super Vector Representation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+896f4d87257abd0f628c1ffbbfdac38c86a56f50,Action and Gesture Temporal Spotting with Super Vector Representation,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+89c45ace90d377502dc84825e5039290927ae9e2,"Changes in vegetation persistence across global savanna landscapes , 1982 – 2010",University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+89c45ace90d377502dc84825e5039290927ae9e2,"Changes in vegetation persistence across global savanna landscapes , 1982 – 2010",University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+451b6409565a5ad18ea49b063561a2645fa4281b,Action Sets: Weakly Supervised Action Segmentation without Ordering Constraints,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+4576b59a44f75120f6a2d17a4e9c52e894297661,Learning Geo-Temporal Image Features,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+45483f17551d9c6b550474dc7168ec31302e5d7b,Face recognition via collaborative representation based multiple one-dimensional embedding,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+45a44e61236f7c144d9ec11561e236b2960c7cf6,Multi-object Tracking with Neural Gating Using Bilinear LSTM,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+45a44e61236f7c144d9ec11561e236b2960c7cf6,Multi-object Tracking with Neural Gating Using Bilinear LSTM,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+454f403857e487d6a885180e0e0f7216a342fb0e,Unsupervised Learning of Multi-Level Descriptors for Person Re-Identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+455204fa201e9936b42756d362f62700597874c4,A Region Based Methodology for Facial Expression Recognition,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+455204fa201e9936b42756d362f62700597874c4,A Region Based Methodology for Facial Expression Recognition,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+456abee9c8d31f004b2f0a3b47222043e20f5042,Unsupervised Visual Sense Disambiguation for Verbs using Multimodal Embeddings,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,The role of structural facial asymmetry in asymmetry of peak facial expressions.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,The role of structural facial asymmetry in asymmetry of peak facial expressions.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,The role of structural facial asymmetry in asymmetry of peak facial expressions.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4558338873556d01fd290de6ddc55721c633a1ad,Training Constrained Deconvolutional Networks for Road Scene Semantic Segmentation,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+4552f4d46a2cc67ccc4dd8568e5c95aa2eedb4ec,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+45619a2b7b41fea02345badf880530519d3d4c8f,Learning Generalized Linear Models Over Normalized Data,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+459960be65dd04317dd325af5b7cbb883d822ee4,The Meme Quiz: A Facial Expression Game Combining Human Agency and Machine Involvement,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+45f858f9e8d7713f60f52618e54089ba68dfcd6d,What Actions are Needed for Understanding Human Actions in Videos?,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+45518c2350b9e727adf59f1626610917f71aea1e,Cross-Layer Design Space Exploration of Heterogeneous Multicore Processors With Predictive Models,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+45518c2350b9e727adf59f1626610917f71aea1e,Cross-Layer Design Space Exploration of Heterogeneous Multicore Processors With Predictive Models,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+45f4b06b7c9fa4cf548d33e40b2295b2d6ff806e,3D Generic Elastic Models for 2D Pose Synthesis and Face Recognition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+45f4b06b7c9fa4cf548d33e40b2295b2d6ff806e,3D Generic Elastic Models for 2D Pose Synthesis and Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+45f4b06b7c9fa4cf548d33e40b2295b2d6ff806e,3D Generic Elastic Models for 2D Pose Synthesis and Face Recognition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+45f4b06b7c9fa4cf548d33e40b2295b2d6ff806e,3D Generic Elastic Models for 2D Pose Synthesis and Face Recognition,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+45215e330a4251801877070c85c81f42c2da60fb,Domain Adaptive Dictionary Learning,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+45215e330a4251801877070c85c81f42c2da60fb,Domain Adaptive Dictionary Learning,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+457cf73263d80a1a1338dc750ce9a50313745d1d,Decomposing Motion and Content for Natural Video Sequence Prediction,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+457cf73263d80a1a1338dc750ce9a50313745d1d,Decomposing Motion and Content for Natural Video Sequence Prediction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+45e616093a92e5f1e61a7c6037d5f637aa8964af,Fine-grained evaluation on face detection in the wild,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+45ff38add61df32a027048624f58952a67a7c5f5,Deep Context Convolutional Neural Networks for Semantic Segmentation,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+451eed7fd8ae281d1cc76ca8cdecbaf47816e55a,Close Yet Distinctive Domain Adaptation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+459b4d0ed3031e2fe5b3b3f176a5204dfb28157a,3D face landmark labelling,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+459b4d0ed3031e2fe5b3b3f176a5204dfb28157a,3D face landmark labelling,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+459b4d0ed3031e2fe5b3b3f176a5204dfb28157a,3D face landmark labelling,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+454dd76eb0a82286c054a6dd9d9413e09ad66801,Graph-Structured Representations for Visual Question Answering,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+4597b7c4f13e1dfc456d156c6c05502fc5d38eec,Human Action Adverb Recognition: ADHA Dataset and A Three-Stream Hybrid Model,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+45c31cde87258414f33412b3b12fc5bec7cb3ba9,Coding Facial Expressions with Gabor Wavelets,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+45e8ef229fae18b0a2ab328037d8e520866c3c81,Learning Feature Pyramids for Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+45e8ef229fae18b0a2ab328037d8e520866c3c81,Learning Feature Pyramids for Human Pose Estimation,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+4542273a157bfd4740645a6129d1784d1df775d2,FaceRipper Automatic Face Indexer and Tagger for Personal Albums and Videos,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+4543052aeaf52fdb01fced9b3ccf97827582cef5,Quantized Densely Connected U-Nets for Efficient Landmark Localization,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+4543052aeaf52fdb01fced9b3ccf97827582cef5,Quantized Densely Connected U-Nets for Efficient Landmark Localization,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+4543052aeaf52fdb01fced9b3ccf97827582cef5,Quantized Densely Connected U-Nets for Efficient Landmark Localization,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+45513d0f2f5c0dac5b61f9ff76c7e46cce62f402,Face Discovery with Social Context,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+45a3ba54fc2210cf8a4fba0cbdce9dad3cefc826,Complete Cross-Validation for Nearest Neighbor Classifiers,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+45c182f8d003a2d505e4d1d491b5d03159a70b81,Training Generative Adversarial Networks Via Turing Test,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,Towards Interpretable Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+1f03f21ba6c1bf66b025029b10d4bc9bd7f65a81,VISCERAL: Towards Large Data in Medical Imaging - Challenges and Directions,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+1f9b2f70c24a567207752989c5bd4907442a9d0f,Deep Representations to Model User 'Likes',Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1f9b2f70c24a567207752989c5bd4907442a9d0f,Deep Representations to Model User 'Likes',"Institute for Infocomm Research, Singapore","Institute for Infocomm Research, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+1fe1bd6b760e3059fff73d53a57ce3a6079adea1,Fast-BoW: Scaling Bag-of-Visual-Words Generation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+1f05473c587e2a3b587f51eb808695a1c10bc153,Towards Good Practices for Very Deep Two-Stream ConvNets,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1f05473c587e2a3b587f51eb808695a1c10bc153,Towards Good Practices for Very Deep Two-Stream ConvNets,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1fa3948af1c338f9ae200038c45adadd2b39a3e4,Computational Explorations of Split Architecture in Modeling Face and Object Recognition,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+1fa3948af1c338f9ae200038c45adadd2b39a3e4,Computational Explorations of Split Architecture in Modeling Face and Object Recognition,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1f9102f425f28552e477cf71af0846550f3f9ed9,Incremental Domain Adaptation of Deformable Part-based Models,Universitat Autònoma de Barcelona,Universitat Autònoma de Barcelona,"Centre de Visió per Computador (CVC), Carrer de l'Albareda, Serraperera, UAB, Cerdanyola del Vallès, Vallès Occidental, BCN, CAT, 08214, España",41.50078110,2.11143663,edu,
+1fd6004345245daf101c98935387e6ef651cbb55,Learning Symmetry Features for Face Detection Based on Sparse Group Lasso,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1fc867b43092fe83c4e0bfa38a9a45ffaea86deb,Deep Speaker Embeddings for Short-Duration Speaker Verification,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+1f7b2087dd0784a04ba4d2a68c2db9588f36c33a,Modeling Sub-Event Dynamics in First-Person Action Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+1f7b2087dd0784a04ba4d2a68c2db9588f36c33a,Modeling Sub-Event Dynamics in First-Person Action Recognition,National University of Sciences and Technology,National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.64434700,72.98850790,edu,
+1fd7e1f5dd4c514bfb3d77fceb454bc01de83ec8,Holistic Shape-Based Object Recognition Using Bottom-Up Image Structures,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+1fd7e1f5dd4c514bfb3d77fceb454bc01de83ec8,Holistic Shape-Based Object Recognition Using Bottom-Up Image Structures,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+1f61c1ba961c6328923f4c6219c6889ccb538506,"Kernel analysis over Riemannian manifolds for visual recognition of actions, pedestrians and textures","CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+1f61c1ba961c6328923f4c6219c6889ccb538506,"Kernel analysis over Riemannian manifolds for visual recognition of actions, pedestrians and textures",University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+1f27f9c0da385080f05f8cfaf0771e5aee6d9ab2,Towards Robust 3D Face Verification using Gaussian Mixture Models,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+1f4b741a9da2fb7623ff68c8e1df3f3cce5e2542,Activity Recognition in Egocentric Life-Logging Videos,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+1f4b741a9da2fb7623ff68c8e1df3f3cce5e2542,Activity Recognition in Egocentric Life-Logging Videos,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+1fe59275142844ce3ade9e2aed900378dd025880,Facial Landmark Detection via Progressive Initialization,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1f1b4e91c6e6699a2191d1d62a0304870163e48e,Attention on Attention: Architectures for Visual Question Answering (VQA),Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1f1b4e91c6e6699a2191d1d62a0304870163e48e,Attention on Attention: Architectures for Visual Question Answering (VQA),Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1f1b4e91c6e6699a2191d1d62a0304870163e48e,Attention on Attention: Architectures for Visual Question Answering (VQA),Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1f527b5406356018e6dc401a4be8098a5a451891,Multiple Vehicle-like Target Tracking Based on the Velodyne LiDAR ?,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+1f527b5406356018e6dc401a4be8098a5a451891,Multiple Vehicle-like Target Tracking Based on the Velodyne LiDAR ?,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+1f527b5406356018e6dc401a4be8098a5a451891,Multiple Vehicle-like Target Tracking Based on the Velodyne LiDAR ?,Jacobs University,Jacobs University,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK",53.41291480,-2.96897915,edu,
+1f877687022f7b222c7ae1ec4ec21655a290220d,Soft-Gated Warping-GAN for Pose-Guided Person Image Synthesis,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1f877687022f7b222c7ae1ec4ec21655a290220d,Soft-Gated Warping-GAN for Pose-Guided Person Image Synthesis,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1f877687022f7b222c7ae1ec4ec21655a290220d,Soft-Gated Warping-GAN for Pose-Guided Person Image Synthesis,South China Normal University,South China Normal University,"华师, 五山路, 华南理工大学南新村, 天河区, 广州市, 广东省, 510630, 中国",23.14319700,113.34009651,edu,
+1fe121925668743762ce9f6e157081e087171f4c,Unsupervised learning of overcomplete face descriptors,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+1fefb2f8dd1efcdb57d5c2966d81f9ab22c1c58d,vExplorer: A Search Method to Find Relevant YouTube Videos for Health Researchers,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+1fdeba9c4064b449231eac95e610f3288801fd3e,Fine-Grained Head Pose Estimation Without Keypoints,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+1f2497491ee465f299eaa8a769640cf4f084ee09,Crowd Counting via Adversarial Cross-Scale Consistency Pursuit,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1f56d8442452c527140909d9f5b857b7eb7c997d,A Robust and Compact Descriptor Based on Center-Symmetric LBP,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1f745215cda3a9f00a65166bd744e4ec35644b02,Facial cosmetics database and impact analysis on automatic face recognition,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+1fd3dbb6e910708fa85c8a86e17ba0b6fef5617c,Age interval and gender prediction using PARAFAC2 on speech recordings and face images,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+1f4aa88107d7c4b91b1436b721b7630b93ce7d06,Deeply Learned Compositional Models for Human Pose Estimation,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+1f24cef78d1de5aa1eefaf344244dcd1972797e8,Outlier-Robust Tensor PCA,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1fe990ca6df273de10583860933d106298655ec8,A Wavelet-Based Image Preprocessing Method or Illumination Insensitive Face Recognition,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+1feeab271621128fe864e4c64bab9b2e2d0ed1f1,Perception-Link Behavior Model: Supporting a Novel Operator Interface for a Customizable Anthropomorphic Telepresence Robot,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+73b90573d272887a6d835ace89bfaf717747c59b,Feature Disentangling Machine - A Novel Approach of Feature Selection and Disentangling in Facial Expression Analysis,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+7363cc7e0c5b43ec12ba47bca587a325f719398a,Improving Occlusion and Hard Negative Handling for Single-Stage Pedestrian Detectors,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+73298c5610004a8337baeb79f33c1519c0ba59e4,Computing Egomotion with Local Loop Closures for Egocentric Videos,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+73298c5610004a8337baeb79f33c1519c0ba59e4,Computing Egomotion with Local Loop Closures for Egocentric Videos,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+73acee80dbcf4ed119d863e4ad6c7bf1bcc542ca,Using Human Knowledge to Judge Part Goodness: Interactive Part Selection,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b,Image Crowd Counting Using Convolutional Neural Network and Markov Random Field,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b,Image Crowd Counting Using Convolutional Neural Network and Markov Random Field,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+735418826055951ba8660bb008d92bfe6910330e,An Evaluation of Local Action Descriptors for Human Action Classification in the Presence of Occlusion,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+73f4be4b6e56f5bde875a8987f90ba799dde35b2,Deep Spatial Regression Model for Image Crowd Counting,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+73f4be4b6e56f5bde875a8987f90ba799dde35b2,Deep Spatial Regression Model for Image Crowd Counting,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+73ea06787925157df519a15ee01cc3dc1982a7e0,Fast Face Image Synthesis with Minimal Training,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+7372c1e9cb87dad88bc160536263e461bb7ab04c,Trajectory Energy Minimisation for Cell Growth Tracking and Genealogy Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7357f37a193992f06eba68ee71faef8c093e8aba,Pose-Invariant Face Recognition in Hyperspectral Images,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+7358fe63042e186c03df0fb2d5f933eda94cb36a,It Takes Two to Tango: Towards Theory of AI's Mind,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+739d400cb6fb730b894182b29171faaae79e3f01,A New Regularized Orthogonal Local Fisher Discriminant Analysis for Image Feature Extraction,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+73d15a975b0595e0cc2e0981a9396a89c474dc7e,Gender Effect on Face Recognition for a Large Longitudinal Database,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+732e4016225280b485c557a119ec50cffb8fee98,Are all training examples equally valuable?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+732e4016225280b485c557a119ec50cffb8fee98,Are all training examples equally valuable?,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+732e4016225280b485c557a119ec50cffb8fee98,Are all training examples equally valuable?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+732e4016225280b485c557a119ec50cffb8fee98,Are all training examples equally valuable?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+732e4016225280b485c557a119ec50cffb8fee98,Are all training examples equally valuable?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+73e4076a532ec6a0633aed5cf6009414cdaf1f6a,Illumination Normalization Using Self-lighting Ratios for 3D2D Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+738a985fba44f9f5acd516e07d0d9578f2ffaa4e,Machine Learning Techniques for Face Analysis,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+73fd7e74457e0606704c5c3d3462549f1b2de1ad,Learning Predictable and Discriminative Attributes for Visual Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+73c5bab5c664afa96b1c147ff21439135c7d968b,Whitened LDA for face recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+73c5bab5c664afa96b1c147ff21439135c7d968b,Whitened LDA for face recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+73c5bab5c664afa96b1c147ff21439135c7d968b,Whitened LDA for face recognition,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+738fadaf40249146f33da5b9efbb72a1fdf8767d,Unsupervised Learning of Invariant Representations in Hierarchical Architectures,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+739e67fe178d1f96419846b34d6b2a90e6f7d3c1,A Pursuit Method for Video Annotation by Zoltan Foley-Fisher A THESIS SUBMITTED IN PARTIAL FULFILLMENT OF THE REQUIREMENTS FOR THE DEGREE OF MASTER OF APPLIED SCIENCE,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+87defac1045bfa9af0162cd248d193e9be6eb25b,Out of Time: Automated Lip Sync in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+87a4711136040f5d6929d7e31d8dae881afa5d3f,Hand-tremor frequency estimation in videos,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+87cf55164a7cc676b68e84b7f39fcdbf7610ece4,Choosing Multi-illumination training Images based on the degree of linear independency,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+877100f430b72c5d60de199603ab5c65f611ce17,Within-person variability in men’s facial width-to-height ratio,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+8797c870c0881cd30fda186affee4bdec54aeecd,Binary Biometric Representation through Pairwise Adaptive Phase Quantization,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+87e05a26fb4c45dbe2b0b10c8ab20e7662d46912,Face engagement during infancy predicts later face recognition ability in younger siblings of children with autism.,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+870433ba89d8cab1656e57ac78f1c26f4998edfb,Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,L2-constrained Softmax Loss for Discriminative Face Verification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+87d1283ccc9bfb0c550ebed8ec0b025dc14b160f,"TVQA: Localized, Compositional Video Question Answering",University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+87b5d74ae97a991bf5b45f0f947525234c37d370,Noise Modelling for Denoising and 3 D Face Recognition Algorithms Performance Evaluation,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+87e4d8e0fc4019405001683678cd199fc9936369,Kernelized Multiview Projection,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+878634c30842b5812c56fe772719424bab69e7ad,Dynamic Neural Turing Machine with Soft and Hard Addressing Schemes,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+8758775ff9fa05b05f98a43cf5effe6b08cc1241,Deep Hashing via Discrepancy Minimization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8758775ff9fa05b05f98a43cf5effe6b08cc1241,Deep Hashing via Discrepancy Minimization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8758775ff9fa05b05f98a43cf5effe6b08cc1241,Deep Hashing via Discrepancy Minimization,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+87b763bdb23ed72e849f25a19879722dc2255ab1,Unsupervised Adversarial Depth Estimation Using Cycled Generative Networks,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+87b763bdb23ed72e849f25a19879722dc2255ab1,Unsupervised Adversarial Depth Estimation Using Cycled Generative Networks,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8724fc4d6b91eebb79057a7ce3e9dfffd3b1426f,Ordered Pooling of Optical Flow Sequences for Action Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+8748c232a93cfe595de6938f209a170fca51c1d5,Eye Tracking for Everyone,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+87dd1c52a3805c59eeab527b8c8c1214415026a6,A Generative-Discriminative Hybrid Method for Multi-View Object Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+87dd1c52a3805c59eeab527b8c8c1214415026a6,A Generative-Discriminative Hybrid Method for Multi-View Object Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+87309bdb2b9d1fb8916303e3866eca6e3452c27d,Kernel Coding: General Formulation and Special Cases,"Australian National University, Canberra","Australian National University, Canberra","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331,edu,
+87d0c3359a9a99fddfbc5a388e211762a79ed5d7,Why Did the Person Cross the Road (There)? Scene Understanding Using Probabilistic Logic Models and Common Sense Reasoning,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+87cd95dbe885762ec0f733bc9d232eb4d63cc995,Multilinear Hyperplane Hashing,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+87cd95dbe885762ec0f733bc9d232eb4d63cc995,Multilinear Hyperplane Hashing,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+87cd95dbe885762ec0f733bc9d232eb4d63cc995,Multilinear Hyperplane Hashing,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+87a66ccc68374ffb704ee6fb9fa7df369718095c,Multi-person Pose Estimation with Local Joint-to-Person Associations,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+871e6c1de2e0ba86bad8975b8411ad76a6a9aef9,Geometric Modeling of 3D-Face Features and Its Applications,Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+87747b6a1bff0944fc3e4891de9c3ba8868aef66,Body pose based pedestrian tracking in a particle filtering framework,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+87147418f863e3d8ff8c97db0b42695a1c28195b,Attributes for Improved Attributes: A Multi-Task Network for Attribute Classification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+87204e4e1a96b8f59cb91828199dacd192292231,Towards Real-Time Detection and Tracking of Basketball Players using Deep Neural Networks,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+8732d702aeb08e9c604b36dcaa5933aea91a228d,Development of social skills in children: neural and behavioral evidence for the elaboration of cognitive models,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8732d702aeb08e9c604b36dcaa5933aea91a228d,Development of social skills in children: neural and behavioral evidence for the elaboration of cognitive models,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+87b9d7d4f5fcef5680b9e74ce50c76be504c70a5,Scene specific people detection by simple human interaction,University of Padova,University of Padova,"Via Giovanni Gradenigo, 6, 35131 Padova PD, Italy",45.40811720,11.89437860,edu,"University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+87b9d7d4f5fcef5680b9e74ce50c76be504c70a5,Scene specific people detection by simple human interaction,University of Padova,University of Padova,"Via Giovanni Gradenigo, 6, 35131 Padova PD, Italy",45.40811720,11.89437860,edu,"University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+87e207fa31099d20450e60f056a0b1304dbc1bfa,Probabilistic fusion of regional scores in 3D face recognition,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+877397982198554e9294f0ddddd8d971cc87cefe,Understanding Degeneracies and Ambiguities in Attribute Transfer,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+80488ff21f7b69c1c9d20d88514a42bdad2602f4,"Unsupervised Depth Estimation, 3D Face Rotation and Replacement",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+806f466034e0c3e609e672559e23d5d8bea6fe3d,Adaptive memory: The mnemonic value of contamination,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+8052bc5f9beb389b3144d423e7b5d6fcf5d0cc4f,Adapting attributes by selecting features similar across domains,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+806d7b97c3535a3c62ce243fe7008149062d14c1,Learning to Count with CNN Boosting,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+80433f3c41f383abf495ff2b368616af6d545694,Robust Scene Text Detection with Convolution Neural Network Induced MSER Trees,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+80433f3c41f383abf495ff2b368616af6d545694,Robust Scene Text Detection with Convolution Neural Network Induced MSER Trees,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+8042b633b35aee9402bc2369b5c25413d2abc271,Nonnegative matrix factorization with α-divergence,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+80471bb250eca1be53a455489e187c0152ac78b9,DecideNet: Counting Varying Density Crowds Through Attention Guided Detection and Density Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+808b685d09912cbef4a009e74e10476304b4cccf,From Understanding to Controlling Privacy against Automatic Person Recognition in Social Media,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+80c8f118f37f990905205eee4f3b3811e0488bf9,Spatio-temporal Matching for Human Detection in Video,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+80c8f118f37f990905205eee4f3b3811e0488bf9,Spatio-temporal Matching for Human Detection in Video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8050f9b0f9ee0953e6125cd9b8211bb792953642,Pose and Shape Estimation with Discriminatively Learned Parts,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+80f53cece53b82915e096f3ad1730f9ce7ee5808,Ensemble of Randomized Linear Discriminant Analysis for face recognition with single sample per person,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+80f53cece53b82915e096f3ad1730f9ce7ee5808,Ensemble of Randomized Linear Discriminant Analysis for face recognition with single sample per person,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+80f53cece53b82915e096f3ad1730f9ce7ee5808,Ensemble of Randomized Linear Discriminant Analysis for face recognition with single sample per person,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+80277fb3a8a981933533cf478245f262652a33b5,Synergy-Based Learning of Facial Identity,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+80f5443b0204f28c44ee2dd94e72f8dbfa22910d,Visual Object Categorization using Topic Models,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+80840df0802399838fe5725cce829e1b417d7a2e,Fast Approximate L_infty Minimization: Speeding Up Robust Regression,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+80840df0802399838fe5725cce829e1b417d7a2e,Fast Approximate L_infty Minimization: Speeding Up Robust Regression,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+80eab89ff1c5c2cfc1ea62e2088cfc9b62de8d35,Emergent Translation in Multi-Agent Communication,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+80eab89ff1c5c2cfc1ea62e2088cfc9b62de8d35,Emergent Translation in Multi-Agent Communication,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+809ea255d144cff780300440d0f22c96e98abd53,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+809ea255d144cff780300440d0f22c96e98abd53,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+805b42d42a52e1e5e20de8950dc18ec9323575a7,Recurrent Neural Networks for Person Re-identification Revisited,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+805b42d42a52e1e5e20de8950dc18ec9323575a7,Recurrent Neural Networks for Person Re-identification Revisited,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+80fcc4e66906d04e14f5ebc68b0a17d4e5ff0194,Predictor Combination at Test Time,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+80fcc4e66906d04e14f5ebc68b0a17d4e5ff0194,Predictor Combination at Test Time,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+80fcc4e66906d04e14f5ebc68b0a17d4e5ff0194,Predictor Combination at Test Time,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+8027f50bbcee3938196c6d5519464df16c275f8d,On Human Motion Prediction Using Recurrent Neural Networks,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+80be8624771104ff4838dcba9629bacfe6b3ea09,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+80be8624771104ff4838dcba9629bacfe6b3ea09,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+80be8624771104ff4838dcba9629bacfe6b3ea09,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+80be8624771104ff4838dcba9629bacfe6b3ea09,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+8000c4f278e9af4d087c0d0895fff7012c5e3d78,Multi-task warped Gaussian process for personalized age estimation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+80bd795930837330e3ced199f5b9b75398336b87,Relative Forest for Attribute Prediction,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+74fcbd059e6749ee5073b7323d121132799f97a1,Gait-Assisted Person Re-identification in Wide Area Surveillance,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+74cefa1d796c84dc4343fdf383f15ca1e8ebb6ba,Low Resolution Camera for Human Detection and Tracking,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+74de03923a069ffc0fb79e492ee447299401001f,On Film Character Retrieval in Feature-Length Films,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+74de03923a069ffc0fb79e492ee447299401001f,On Film Character Retrieval in Feature-Length Films,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+74f643579949ccd566f2638b85374e7a6857a9fc,Monogenic Binary Pattern (MBP): A Novel Feature Extraction and Representation Model for Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+7481b7d5272326f4e9efcd49d31c7f42adb8ec4b,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7481b7d5272326f4e9efcd49d31c7f42adb8ec4b,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+7481b7d5272326f4e9efcd49d31c7f42adb8ec4b,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+7481b7d5272326f4e9efcd49d31c7f42adb8ec4b,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+74ce7e5e677a4925489897665c152a352c49d0a2,SPG-Net: Segmentation Prediction and Guidance Network for Image Inpainting,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+74e1efe5e3564c4c6a9aebcb18103e941e31e335,High Fidelity Semantic Shape Completion for Point Clouds using Latent Optimization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+74e1efe5e3564c4c6a9aebcb18103e941e31e335,High Fidelity Semantic Shape Completion for Point Clouds using Latent Optimization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+741e2682b45a3dccab341cf272312a3c75c4b49a,A Diverse Dataset for Pedestrian Detection,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+741e2682b45a3dccab341cf272312a3c75c4b49a,A Diverse Dataset for Pedestrian Detection,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+740e095a65524d569244947f6eea3aefa3cca526,Towards Human-like Performance Face Detection: A Convolutional Neural Network Approach,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8,Context and Subcategories for SlidingWindowObject Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+74e869bc7c99093a5ff9f8cfc3f533ccf1b135d8,Context and Subcategories for SlidingWindowObject Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+741485741734a99e933dd0302f457158c6842adf,A Novel Automatic Facial Expression Recognition Method Based on AAM,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+747fdee12e633addeae3b74c12643cbac2c925ec,Deep Differential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+747fdee12e633addeae3b74c12643cbac2c925ec,Deep Differential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+747fdee12e633addeae3b74c12643cbac2c925ec,Deep Differential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+74cce11cfd25618b0fee0bcceb2f23376121a1f6,Exploring Inter-Observer Differences in First-Person Object Views Using Deep Learning Models,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+74cce11cfd25618b0fee0bcceb2f23376121a1f6,Exploring Inter-Observer Differences in First-Person Object Views Using Deep Learning Models,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+74113bb67eef4cfa28ebfa8bd38a614c82bdfdea,Neural responses to facial expressions support the role of the amygdala in processing threat.,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+74113bb67eef4cfa28ebfa8bd38a614c82bdfdea,Neural responses to facial expressions support the role of the amygdala in processing threat.,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+748e72af01ba4ee742df65e9c030cacec88ce506,Discriminative Regions Selection for Facial Expression Recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+748e72af01ba4ee742df65e9c030cacec88ce506,Discriminative Regions Selection for Facial Expression Recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+74875368649f52f74bfc4355689b85a724c3db47,Object detection by labeling superpixels,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+744499b779a751bcc3a43a45eab6f7704140a701,Propagating LSTM: 3D Pose Estimation Based on Joint Interdependency,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,The 3D Menpo Facial Landmark Tracking Challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,The 3D Menpo Facial Landmark Tracking Challenge,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+7492c611b1df6bce895bee6ba33737e7fc7f60a6,The 3D Menpo Facial Landmark Tracking Challenge,University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.73693020,-3.53647672,edu,
+74ba4ba7a2c97826690b9d45edcc82532d1039bc,Gait Gate: An Online Walk-Through Multimodal Biometric Verification System Using a Single RGB-D Sensor,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+74ba4ba7a2c97826690b9d45edcc82532d1039bc,Gait Gate: An Online Walk-Through Multimodal Biometric Verification System Using a Single RGB-D Sensor,Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.08187270,31.24454841,edu,
+74dbcc09a3456ddacf5cece640b84045ebdf6be1,Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+74dbcc09a3456ddacf5cece640b84045ebdf6be1,Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+743c7e1aef6461d6582cf8deeb5d518e45215f89,Looking you in the mouth: abnormal gaze in autism resulting from impaired top-down modulation of visual attention.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+743c7e1aef6461d6582cf8deeb5d518e45215f89,Looking you in the mouth: abnormal gaze in autism resulting from impaired top-down modulation of visual attention.,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+74b9d1e80d3df707963fad57c50d7c25936da535,Reward Learning from Narrated Demonstrations,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7489990ea3d6ab4c1c86c9ed9f049399961dfaef,Normalized cutswith soft must-link constraints for image segmentation and clustering,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+74f17647637fe068e237d8d5a8cc37e081ec03d0,Semantic Edge Based Disparity Estimation Using Adaptive Dynamic Programming for Binocular Sensors,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+74f17647637fe068e237d8d5a8cc37e081ec03d0,Semantic Edge Based Disparity Estimation Using Adaptive Dynamic Programming for Binocular Sensors,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+1a229f1d21abe442520cba31a6e08663b3d31777,The heterogeneous block architecture,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1afdc0b42d25df25a7cd4b304493e9b521c84f0f,Algorithm 1 Dynamic Graph Matching ( DGM ) Input : Unlabelled features,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+1afdc0b42d25df25a7cd4b304493e9b521c84f0f,Algorithm 1 Dynamic Graph Matching ( DGM ) Input : Unlabelled features,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+1a41e5d93f1ef5b23b95b7163f5f9aedbe661394,Alignment-Free and High-Frequency Compensation in Face Hallucination,Ritsumeikan University,Ritsumeikan University,"立命館大学 (Ritsumeikan University), 衣笠宇多野線, 北区, 京都市, 京都府, 近畿地方, 6038577, 日本",35.03332810,135.72491540,edu,
+1ac20a7a76f7b83ccd8ea0aab64e2b24ecd23915,Impaired social brain network for processing dynamic facial expressions in autism spectrum disorders,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+1a65cc5b2abde1754b8c9b1d932a68519bcb1ada,Parsing Semantic Parts of Cars Using Graphical Models and Segment Appearance Consistency,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1a65cc5b2abde1754b8c9b1d932a68519bcb1ada,Parsing Semantic Parts of Cars Using Graphical Models and Segment Appearance Consistency,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+1aa766bbd49bac8484e2545c20788d0f86e73ec2,"Baseline face detection, head pose estimation, and coarse direction detection for facial data in the SHRP2 naturalistic driving study",Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d,Online robust image alignment via iterative convex optimization,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+1a46d3a9bc1e4aff0ccac6403b49a13c8a89fc1d,Online robust image alignment via iterative convex optimization,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+1a5340212809bbbce6e0d61720209179dcaa8a26,Backing Off: Hierarchical Decomposition of Activity for 3D Novel Pose Recovery,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+1a5340212809bbbce6e0d61720209179dcaa8a26,Backing Off: Hierarchical Decomposition of Activity for 3D Novel Pose Recovery,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+1ab56eb6128da34027242b1314e51b9b18b960db,Object Detection by 3D Aspectlets and Occlusion Reasoning,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+1ab56eb6128da34027242b1314e51b9b18b960db,Object Detection by 3D Aspectlets and Occlusion Reasoning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1a257ff73b1dd95f905dbbce9bb233033d09e959,A New Gabor Phase Difference Pattern for Face and Ear Recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+1a30e44d6b70d11f7b270c87eac099b75b2263f1,Understanding the Nature of First-Person Videos: Characterization and Classification Using Low-Level Features,"Institute for Infocomm Research, Singapore","Institute for Infocomm Research, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+1a86eb42952412ee02e3f6da06f874f1946eff6b,Deep Cross-Modal Projection Learning for Image-Text Matching,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+1a5d52e026f877f682bec19d0edb81aedc6e14a1,Robust Gaze Estimation via Normalized Iris Center-Eye Corner Vector,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+1a5d52e026f877f682bec19d0edb81aedc6e14a1,Robust Gaze Estimation via Normalized Iris Center-Eye Corner Vector,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+1ac2882559a4ff552a1a9956ebeadb035cb6df5b,How much training data for facial action unit detection?,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+1ac2882559a4ff552a1a9956ebeadb035cb6df5b,How much training data for facial action unit detection?,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+1a7a17c4f97c68d68fbeefee1751d349b83eb14a,Iterative Hessian Sketch: Fast and Accurate Solution Approximation for Constrained Least-Squares,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+1a39ce3f624844a1288b6deff545f6c4d79c4fae,DeePM: A Deep Part-Based Model for Object Detection and Semantic Part Localization,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+1aef6f7d2e3565f29125a4871cd60c4d86c48361,Subhashini VenugopalanProposal,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+1adbcca753c7b4f22cf3d6bc3a9579573d4d5846,Sample-Specific SVM Learning for Person Re-identification,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,Boosting VLAD with double assignment using deep features for action recognition in videos,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,Boosting VLAD with double assignment using deep features for action recognition in videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,Boosting VLAD with double assignment using deep features for action recognition in videos,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,Boosting VLAD with double assignment using deep features for action recognition in videos,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+1a167e10fe57f6d6eff0bb9e45c94924d9347a3e,Boosting VLAD with double assignment using deep features for action recognition in videos,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+1a86568fdba2b85a9f0b69d563dd22aa5a8d3562,Perceptual Fidelity Aware Mean Squared Error,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+1a86568fdba2b85a9f0b69d563dd22aa5a8d3562,Perceptual Fidelity Aware Mean Squared Error,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+1adb6341dd9bfe88d631009992fe8a4ef80e2f2b,A Simple and Fast Word Spotting Method,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+1a140d9265df8cf50a3cd69074db7e20dc060d14,Face Parts Localization Using Structured-Output Regression Forests,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1ac85387b1d5a05f752ddf671763f02e923a2a03,Deep Learning with Sets and Point Clouds,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1a85956154c170daf7f15f32f29281269028ff69,Active Pictorial Structures,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1aad2da473888cb7ebc1bfaa15bfa0f1502ce005,First-Person Activity Recognition: What Are They Doing to Me?,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+1a1f63cbd8465d1bbee9bca24124b52ea4ec2762,Multilinear Multitask Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+1a1f63cbd8465d1bbee9bca24124b52ea4ec2762,Multilinear Multitask Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+1a1f63cbd8465d1bbee9bca24124b52ea4ec2762,Multilinear Multitask Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+1a5a79b4937b89420049bc279a7b7f765d143881,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1a5a79b4937b89420049bc279a7b7f765d143881,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+1a5a79b4937b89420049bc279a7b7f765d143881,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,Virginia Commonwealth University,Virginia Commonwealth University,"Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.54821500,-77.45306424,edu,
+1a12eec3ceb1c81cde4ae6e8f27aac08b36317d4,Real-time Distracted Driver Posture Classification,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+1a12eec3ceb1c81cde4ae6e8f27aac08b36317d4,Real-time Distracted Driver Posture Classification,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+1a790c7669943af5868e49d15cf282cbbd506f02,An Overview of Recent Progress in Volumetric Semantic 3 D Reconstruction ( Invited Paper ),ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6,Deep Learning for Video Classification and Captioning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+1af52c853ff1d0ddb8265727c1d70d81b4f9b3a9,Face Recognition Under Illumination Variation Using Shadow Compensation and Pixel Selection,Dankook University,Dankook University,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.32195750,127.12507230,edu,
+1a0a06e659eb075d414286d61bd36931770db799,Relaxed Pairwise Learned Metric for Person Re-identification,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+1a398504e8822e4d079167be9684096fe862c0d5,Handling Data Imbalance in Automatic Facial Action Intensity Estimation,Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.64471248,edu,
+1a40092b493c6b8840257ab7f96051d1a4dbfeb2,Sports Videos in the Wild (SVW): A video dataset for sports analysis,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+1a6c9ef99bf0ab9835a91fe5f1760d98a0606243,ConceptMap: Mining Noisy Web Data for Concept Learning,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+1ae3dd081b93c46cda4d72100d8b1d59eb585157,Online Motion Agreement Tracking,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+1a660d8576ed749610e0e040076d27973aee44ee,Tracking by Identification Using Computer Vision and Radio,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+1a2e9a56e5f71bf95a2f68b6e67e2aaa1c6bf91e,FPM: Fine Pose Parts-Based Model with 3D CAD Models,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1a07d9213a6082d69f40bb5373da60ba0d19f2d6,Video Person Re-identification with Competitive Snippet-similarity Aggregation and Co-attentive Snippet Embedding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1ab881ec87167af9071b2ad8ff6d4ce3eee38477,Finding Happiest Moments in a Social Context,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+1ab881ec87167af9071b2ad8ff6d4ce3eee38477,Finding Happiest Moments in a Social Context,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+1afdedba774f6689eb07e048056f7844c9083be9,Markov Random Field Structures for Facial Action Unit Intensity Estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1afdedba774f6689eb07e048056f7844c9083be9,Markov Random Field Structures for Facial Action Unit Intensity Estimation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+1a2b3fa1b933042687eb3d27ea0a3fcb67b66b43,Max-margin Latent Dirichlet Allocation for Image Classification and Annotation,Fraser University,Fraser University,"Fraser, 3333, University Avenue Southeast, Prospect Park - East River Road, Minneapolis, Hennepin County, Minnesota, 55414, USA",44.96898360,-93.20941629,edu,
+285356448b8d6e4bd84c67758502a76336f30b0e,A Dataset and Architecture for Visual Reasoning with a Working Memory,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+285356448b8d6e4bd84c67758502a76336f30b0e,A Dataset and Architecture for Visual Reasoning with a Working Memory,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,Two-Stage Optimal Component Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,Two-Stage Optimal Component Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+2878b06f3c416c98496aad6fc2ddf68d2de5b8f6,Two-Stage Optimal Component Analysis,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+28b8d20162f007eab1acd9d7cdb8baac914de820,Unlimited Road-scene Synthetic Annotation (URSA) Dataset,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+28a900a07c7cbce6b6297e4030be3229e094a950,Local directional pattern variance (ldpv): a robust feature descriptor for facial expression recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+282503fa0285240ef42b5b4c74ae0590fe169211,Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+282503fa0285240ef42b5b4c74ae0590fe169211,Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+28dac6b73df69f35b11f8f10ef023674a2f39af5,Deep Learning of Graph Matching,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+286f443fa85bc9d892ab54878c0ace0264d0dcff,Principled Parallel Mean-Field Inference for Discrete Random Fields,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+283d226e346ac3e7685dd9a4ba8ae55ee4f2fe43,Bayesian Data Association for Temporal Scene Understanding,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+2829288498cf03d87301f12a5bebf7f9faca0884,Face recognition using OPRA-faces,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,MahNMF: Manhattan Non-negative Matrix Factorization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,MahNMF: Manhattan Non-negative Matrix Factorization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,MahNMF: Manhattan Non-negative Matrix Factorization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+28f5138d63e4acafca49a94ae1dc44f7e9d84827,MahNMF: Manhattan Non-negative Matrix Factorization,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+28c4103d1e27b4312115d3a6baacf3afbba01a55,Learning by Asking Questions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+28e287d5aae3eb2c2ddbe3791e76a4cafdca7ef1,Recovering the Missing Link: Predicting Class-Attribute Associations for Unsupervised Zero-Shot Learning,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+289d833a35c2156b7e332e67d1cb099fd0683025,HICO: A Benchmark for Recognizing Human-Object Interactions in Images,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+28e1668d7b61ce21bf306009a62b06593f1819e3,"Correction: Validation of the Amsterdam Dynamic Facial Expression Set – Bath Intensity Variations (ADFES-BIV): A Set of Videos Expressing Low, Intermediate, and High Intensity Emotions",University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+28737575297a20d431dd2b777a79a8be2c9c2bbd,Object Ranking on Deformable Part Models with Bagged LambdaMART,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+289c6413c9b1d37c0608ee0027d28466ef3a552f,Facial Action Unit Recognition and Inference for Facial Expression Analysis,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+28b59fcd3d642f8d92a7c868c0076b00bd7f55cf,Multi-target tracking in team-sports videos via multi-level context-conditioned latent behaviour models,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+28b59fcd3d642f8d92a7c868c0076b00bd7f55cf,Multi-target tracking in team-sports videos via multi-level context-conditioned latent behaviour models,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+28b59fcd3d642f8d92a7c868c0076b00bd7f55cf,Multi-target tracking in team-sports videos via multi-level context-conditioned latent behaviour models,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+286adff6eff2f53e84fe5b4d4eb25837b46cae23,Single-Image Depth Perception in the Wild,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2804e97b5c9dbaf4cb057c14478600cb2f9984de,Facial Model Fitting Based on Perturbation Learning and It's Evaluation on Challenging Real-World Diversities Images,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+286812ade95e6f1543193918e14ba84e5f8e852e,Robust 3D Face Shape Reconstruction from Single Images via Two-Fold Coupled Structure Learning,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+284bf12324805f23b920bec0174be003c248cc9b,Lower Sensitivity to Happy and Angry Facial Emotions in Young Adults with Psychiatric Problems,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+284bf12324805f23b920bec0174be003c248cc9b,Lower Sensitivity to Happy and Angry Facial Emotions in Young Adults with Psychiatric Problems,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+28f311b16e4fe4cc0ff6560aae3bbd0cb6782966,Learning Language from Perceptual Context,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+28d06fd508d6f14cd15f251518b36da17909b79e,What's in a Name? First Names as Facial Attributes,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+28d06fd508d6f14cd15f251518b36da17909b79e,What's in a Name? First Names as Facial Attributes,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+28d06fd508d6f14cd15f251518b36da17909b79e,What's in a Name? First Names as Facial Attributes,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+281486d172cf0c78d348ce7d977a82ff763efccd,A Cost-Sensitive Visual Question-Answer Framework for Mining a Deep And-OR Object Semantics from Web Images,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+288964068cd87d97a98b8bc927d6e0d2349458a2,Mean-Variance Loss for Deep Age Estimation from a Face,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+288964068cd87d97a98b8bc927d6e0d2349458a2,Mean-Variance Loss for Deep Age Estimation from a Face,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+28af188e26836934c9beea8b2bc8cd53447197fa,Variational Gaussian Process Auto-Encoder for Ordinal Prediction of Facial Action Units,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+28af188e26836934c9beea8b2bc8cd53447197fa,Variational Gaussian Process Auto-Encoder for Ordinal Prediction of Facial Action Units,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+28bf3aee9eecc2f7a7b4ac71bfe89534d3fe5f19,Occlusion-Aware R-CNN: Detecting Pedestrians in a Crowd,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+28bf3aee9eecc2f7a7b4ac71bfe89534d3fe5f19,Occlusion-Aware R-CNN: Detecting Pedestrians in a Crowd,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+28bf3aee9eecc2f7a7b4ac71bfe89534d3fe5f19,Occlusion-Aware R-CNN: Detecting Pedestrians in a Crowd,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+28bf3aee9eecc2f7a7b4ac71bfe89534d3fe5f19,Occlusion-Aware R-CNN: Detecting Pedestrians in a Crowd,Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.56803206,edu,
+28646c6220848db46c6944967298d89a6559c700,It takes two to tango : Cascading off-the-shelf face detectors,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+28633f80f1eae857d670cb245fbeb5d4e6e47a58,Explicit Reasoning over End-to-End Neural Architectures for Visual Question Answering,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+28c0cb56e7f97046d6f3463378d084e9ea90a89a,Automatic face recognition for film character retrieval in feature-length films,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+28be652db01273289499bc6e56379ca0237506c0,FaLRR: A fast low rank representation solver,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+286b5b80bc76dbb63094a85951bb8e8895ee9f14,TriKon: A hypervisor aware manycore processor,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+2862615e5767a8a81257138f04de6c5bd33e2984,Egocentric Future Localization,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+28226aedf1972af2008509cf3d1e7c6646c77f7b,Nuclear-L1 Norm Joint Regression for Face Reconstruction and Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+2836d68c86f29bb87537ea6066d508fde838ad71,Personalized Age Progression with Aging Dictionary,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+2836d68c86f29bb87537ea6066d508fde838ad71,Personalized Age Progression with Aging Dictionary,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+28de411a5b3eb8411e7bcb0003c426aa91f33e97,Emotion Detection Using Facial Expressions -A Review,Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.81563045,edu,
+28de411a5b3eb8411e7bcb0003c426aa91f33e97,Emotion Detection Using Facial Expressions -A Review,Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.81563045,edu,
+28df3f11894ce0c48dd8aee65a6ec76d9009cbbd,Recurrent Flow-Guided Semantic Forecasting,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+28b26597a7237f9ea6a9255cde4e17ee18122904,Network Interactions Explain Sensitivity to Dynamic Faces in the Superior Temporal Sulcus,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+281e961f0d8dd6251e3124b43944820faba8a53f,Improved Fusion of Visual and Language Representations by Dense Symmetric Co-Attention for Visual Question Answering,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+280d632ef3234c5ab06018c6eaccead75bc173b3,Efficient Image and Video Co-localization with Frank-Wolfe Algorithm,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+28d99dc2d673d62118658f8375b414e5192eac6f,Using Ranking-CNN for Age Estimation,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+28d99dc2d673d62118658f8375b414e5192eac6f,Using Ranking-CNN for Age Estimation,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+28ce99940265407517faf7c45755675054ef78c4,Distinct facial expressions represent pain and pleasure across cultures,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+28ce99940265407517faf7c45755675054ef78c4,Distinct facial expressions represent pain and pleasure across cultures,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+28ce99940265407517faf7c45755675054ef78c4,Distinct facial expressions represent pain and pleasure across cultures,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+280bc9751593897091015aaf2cab39805768b463,Gender Perception From Faces Using Boosted LBPH (Local Binary Patten Histograms),"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+282cee05661a690aa525f21b47c6ee39fb26a7c2,Build a Robust Learning Feature Descriptor by Using a New Image Visualization Method for Indoor Scenario Recognition,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+28ff4c98b7a922f4502c69003f686fe0f94083a6,On the regularization of image semantics by modal expansion,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+286e4e6b0360c06f659d351ac885aafb62a6b73d,Gait Verification Using Probabilistic Methods,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+2803c3247c11a30a8075dbc2db6ff96f58c2ae97,Perspective-Adjusting Appearance Model for Distributed Multi-View Person Tracking,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+28bdaf9b7fc5af73482e324d45acf91722f07340,Joint Object and Part Segmentation Using Deep Learned Potentials,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+28858a6e956d712331986b31d1646d6b497ff1a9,Independent Neural Computation of Value from Other People's Confidence.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+28858a6e956d712331986b31d1646d6b497ff1a9,Independent Neural Computation of Value from Other People's Confidence.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+2850aa5324998b6d656d9d9c20f0eaf9d8946e2f,Indoor-outdoor classification with human accuracies: Image or edge gist?,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+2850aa5324998b6d656d9d9c20f0eaf9d8946e2f,Indoor-outdoor classification with human accuracies: Image or edge gist?,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+288d2704205d9ca68660b9f3a8fda17e18329c13,Studying Very Low Resolution Recognition Using Deep Networks,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+289fb3709475f5c87df8d97f129af54029d27fee,Compositional Attention Networks for Machine Reasoning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+289fb3709475f5c87df8d97f129af54029d27fee,Compositional Attention Networks for Machine Reasoning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+17b46e2dad927836c689d6787ddb3387c6159ece,GeoFaceExplorer: exploring the geo-dependence of facial attributes,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+17b46e2dad927836c689d6787ddb3387c6159ece,GeoFaceExplorer: exploring the geo-dependence of facial attributes,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+178b55ded04d351c5a7df2e94a81aa3051d7fd8b,Visual Question Answering with Memory-Augmented Networks,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+1768909f779869c0e83d53f6c91764f41c338ab5,A large-scale car dataset for fine-grained categorization and verification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1768909f779869c0e83d53f6c91764f41c338ab5,A large-scale car dataset for fine-grained categorization and verification,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1768909f779869c0e83d53f6c91764f41c338ab5,A large-scale car dataset for fine-grained categorization and verification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+171ca25bc2cdfc79cad63933bcdd420d35a541ab,Calibration-Free Gaze Estimation Using Human Gaze Patterns,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+176bd61cc843d0ed6aa5af83c22e3feb13b89fe1,Investigating Spontaneous Facial Action Recognition through AAM Representations of the Face,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+171a4ef673e40d09d7091082c7fd23b3758fc3c2,Video-based face recognition using ensemble of haar-like deep convolutional neural networks,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+17555c227941654bc19d613742e2508f209c6d86,Albumentations: fast and flexible image augmentations,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+17d01f34dfe2136b404e8d7f59cebfb467b72b26,Riemannian Similarity Learning,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+17d4fd92352baf6f0039ec64d43ca572c8252384,MoE-SPNet: A mixture-of-experts scene parsing network,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+1774b5a76d139a5532284f797ea7a36318bbcefd,Recognizing Complex Events Using Large Margin Joint Low-Level Event Model,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+176f26a6a8e04567ea71677b99e9818f8a8819d0,MEG: Multi-Expert Gender Classification from Face Images in a Demographics-Balanced Dataset,Sapienza University of Rome,Sapienza University of Rome,"Piazzale Aldo Moro, 5, 00185 Roma RM, Italy",41.90376260,12.51443840,edu,
+176f26a6a8e04567ea71677b99e9818f8a8819d0,MEG: Multi-Expert Gender Classification from Face Images in a Demographics-Balanced Dataset,University of Naples Federico II,University of Naples Federico II,"Corso Umberto I, 40, 80138 Napoli NA, Italy",40.84549200,14.25780580,edu,
+1727601f148b937a49df10194edcee4800852a97,Deep Mutual Learning,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+1727601f148b937a49df10194edcee4800852a97,Deep Mutual Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1727601f148b937a49df10194edcee4800852a97,Deep Mutual Learning,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+173a1110e3f5fe6a5518d7ceb025730b073bad62,Divided Local Binary Pattern ( DLBP ) Features Description Method For Facial Expression Recognition ⋆,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+17f9b24a4871d29ca1a83fae12e4b96bce0fba63,Person Re-Identification Using Kernel-Based Metric Learning Methods,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+17f3358d219c05f3cb8d68bdfaf6424567d66984,Adversarial Examples for Generative Models,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+17f3358d219c05f3cb8d68bdfaf6424567d66984,Adversarial Examples for Generative Models,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+17370f848801871deeed22af152489e39b6e1454,Undersampled face recognition with one-pass dictionary learning,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+17479e015a2dcf15d40190e06419a135b66da4e0,Predicting First Impressions With Deep Learning,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+173a38768848cfe57a6b20b5ae019ce613e58781,Knowledge Acquisition for Visual Question Answering via Iterative Querying,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+173a38768848cfe57a6b20b5ae019ce613e58781,Knowledge Acquisition for Visual Question Answering via Iterative Querying,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+171042ba12818238e3c0994ff08d71f8c28d4134,Learning to Describe E-Commerce Images from Noisy Online Data,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+17fa1c2a24ba8f731c8b21f1244463bc4b465681,Deep multi-scale video prediction beyond mean square error,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+17579791ead67262fcfb62ed8765e115fb5eca6f,Real-Time Fashion-guided Clothing Semantic Parsing: a Lightweight Multi-Scale Inception Neural Network and Benchmark,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+17579791ead67262fcfb62ed8765e115fb5eca6f,Real-Time Fashion-guided Clothing Semantic Parsing: a Lightweight Multi-Scale Inception Neural Network and Benchmark,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+17e7a53456539dac2c9cf8631174c6388f64e24b,Learning to Detect Multiple Photographic Defects,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+175e9bb50cc062c6c1742a5d90c8dfe31d2e4e22,Where to Look: Focus Regions for Visual Question Answering,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+17614bcb0f96d576dee34e1349f8be3d56786dd2,Detecting Partially Occluded Objects with an Implicit Shape Model Random Field,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+177d1e7bbea4318d379f46d8d17720ecef3086ac,Learning Multi-channel Deep Feature Representations for Face Recognition,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+177d1e7bbea4318d379f46d8d17720ecef3086ac,Learning Multi-channel Deep Feature Representations for Face Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+17f472a7cb25bf1e76ff29181b1d40585e2ae5c1,Fusing binary templates for multi-biometric cryptosystems,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+170a5f5da9ac9187f1c88f21a88d35db38b4111a,Online Real-Time Multiple Spatiotemporal Action Localisation and Prediction,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+170a5f5da9ac9187f1c88f21a88d35db38b4111a,Online Real-Time Multiple Spatiotemporal Action Localisation and Prediction,Oxford University,Oxford University,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK",51.75208490,-1.25166460,edu,
+17b6eb93b41baeb5e1b0a16ecb0673a72368a34b,Generic Object Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+17a8d1b1b4c23a630b051f35e47663fc04dcf043,Differential Angular Imaging for Material Recognition,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+17a8d1b1b4c23a630b051f35e47663fc04dcf043,Differential Angular Imaging for Material Recognition,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+171d8a39b9e3d21231004f7008397d5056ff23af,"Simultaneous Facial Landmark Detection, Pose and Deformation Estimation Under Facial Occlusion",Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+171d8a39b9e3d21231004f7008397d5056ff23af,"Simultaneous Facial Landmark Detection, Pose and Deformation Estimation Under Facial Occlusion",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+171d8a39b9e3d21231004f7008397d5056ff23af,"Simultaneous Facial Landmark Detection, Pose and Deformation Estimation Under Facial Occlusion",Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,Boosting VLAD with Supervised Dictionary Learning and High-Order Statistics,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,Boosting VLAD with Supervised Dictionary Learning and High-Order Statistics,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,Boosting VLAD with Supervised Dictionary Learning and High-Order Statistics,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+17045163860fc7c38a0f7d575f3e44aaa5fa40d7,Boosting VLAD with Supervised Dictionary Learning and High-Order Statistics,Hengyang Normal University,Hengyang Normal University,"衡阳师范学院, 黄白路, 雁峰区, 衡阳市 / Hengyang, 湖南省, 中国",26.86611360,112.62092122,edu,
+170862138b7be1b8d92c3abf7cf2466bc435f1ec,Alive Caricature from 2D to 3D,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+170862138b7be1b8d92c3abf7cf2466bc435f1ec,Alive Caricature from 2D to 3D,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+170862138b7be1b8d92c3abf7cf2466bc435f1ec,Alive Caricature from 2D to 3D,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+17e563af203d469c456bb975f3f88a741e43fb71,Naming TV characters by watching and analyzing dialogs,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+171389529df11cc5a8b1fbbe659813f8c3be024d,Manifold Estimation in View-Based Feature Space for Face Synthesis across Poses,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+17f29dba3809527c3b9533247045a488417ec21c,Removal of 3D facial expressions: A learning-based approach,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+17f8f5fe7a6730ee8d735d055ccc12231aff4435,A Large-scale Distributed Video Parsing and Evaluation Platform,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+17d5e5c9a9ee4cf85dfbb9d9322968a6329c3735,Study on Parameter Selection Using SampleBoost,University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.20988790,-97.15147488,edu,
+17cf6195fd2dfa42670dc7ada476e67b381b8f69,Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments,Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882000,126.96190000,edu,
+17cf6195fd2dfa42670dc7ada476e67b381b8f69,Automatic Face Region Tracking for Highly Accurate Face Recognition in Unconstrained Environments,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+174f46eccb5852c1f979d8c386e3805f7942bace,The Shape-Time Random Field for Semantic Video Labeling,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+17113b0f647ce05b2e50d1d40c856370f94da7de,Zoom Better to See Clearer: Human Part Segmentation with Auto Zoom Net,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+172cd5d213cefd99e93039eaf3d8824b3ba203e4,Learned vs. Hand-Crafted Features for Pedestrian Gender Recognition,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+17670b60dcfb5cbf8fdae0b266e18cf995f6014c,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+17670b60dcfb5cbf8fdae0b266e18cf995f6014c,Longitudinal Face Modeling via Temporal Deep Restricted Boltzmann Machines,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+17027a05c1414c9a06a1c5046899abf382a1142d,Articulated motion discovery using pairs of trajectories,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+17ded725602b4329b1c494bfa41527482bf83a6f,Compact Convolutional Neural Network Cascade for Face Detection,Tomsk Polytechnic University,Tomsk Polytechnic University,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ",56.46255985,84.95565495,edu,
+17ded725602b4329b1c494bfa41527482bf83a6f,Compact Convolutional Neural Network Cascade for Face Detection,Tomsk Polytechnic University,Tomsk Polytechnic University,"Томский политехнический университет, улица Пирогова, Южная, Кировский район, Томск, городской округ Томск, Томская область, СФО, 634034, РФ",56.46255985,84.95565495,edu,
+179ae598004d76c56dcc95c5aab3419ec8996af1,Person independent 3D gaze estimation from remote RGB-D cameras,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+17423fe480b109e1d924314c1dddb11b084e8a42,Deep Disguised Faces Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,"Deep Learning Face Representation from Predicting 10,000 Classes",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,"Deep Learning Face Representation from Predicting 10,000 Classes",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+177bc509dd0c7b8d388bb47403f28d6228c14b5c,"Deep Learning Face Representation from Predicting 10,000 Classes",Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+171585599fcc0cb2c2c190a3ff395c2f5bd331dc,3-D–2-D spatiotemporal registration for sports motion analysis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+7b4e0a98dcb4ba34afcc5901f51384ba727473a0,Introduction to Emotion Recognition,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+7b63ed54345d8c06523f6b03c41a09b5c8f227e2,Facial expression recognition based on combination of spatio-temporal and spectral features in local facial regions,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+7b3231245a3d518085c8e747e2c2232963f49bc5,Tracking millions of humans in crowded space in crowded spaces,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+7bf0a1aa1d0228a51d24c0c3a83eceb937a6ae25,"Video-based Car Surveillance: License Plate, Make, and Model Recognition","University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+7b3b2912c1d7a70839bc71a150e33f8634d0fff3,Convolutional Neural Network-Based Embarrassing Situation Detection under Camera for Social Robot in Smart Homes,Oklahoma State University,Oklahoma State University,"Walmart East Bus Stop, East Virginia Avenue, Stillwater, Payne County, Oklahoma, 74075, USA",36.12447560,-97.05004383,edu,
+7b21db9efc3403fa054739921e29aedcc81b1fb1,Exploring Correlations in Multiple Facial Attributes through Graph Attention Network,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+7bcb505d93175d0b89ff7aca76caf579ddf12339,PixelNN: Example-based Image Synthesis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7b8baf809d9b643145e089b7a1650923487cf451,Do Deep Convolutional Nets Really Need to be Deep and Convolutional?,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+7b8baf809d9b643145e089b7a1650923487cf451,Do Deep Convolutional Nets Really Need to be Deep and Convolutional?,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+7b8baf809d9b643145e089b7a1650923487cf451,Do Deep Convolutional Nets Really Need to be Deep and Convolutional?,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7b8baf809d9b643145e089b7a1650923487cf451,Do Deep Convolutional Nets Really Need to be Deep and Convolutional?,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7bc1318403cdb4895a4437993d288068a8e85f5f,Fast-Converging Conditional Generative Adversarial Networks for Image Synthesis,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+7b4d985d03ebf8465757877f0eeaea00fa77676b,Dyadic Dynamics: The Impact of Emotional Responses to Facial Expressions on the Perception of Power,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+7b4d985d03ebf8465757877f0eeaea00fa77676b,Dyadic Dynamics: The Impact of Emotional Responses to Facial Expressions on the Perception of Power,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+7b4d985d03ebf8465757877f0eeaea00fa77676b,Dyadic Dynamics: The Impact of Emotional Responses to Facial Expressions on the Perception of Power,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+7bc8d81a38899b60704681125ec4fc584a3e7ba4,Look me in the eyes: constraining gaze in the eye-region provokes abnormally high subcortical activation in autism,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+7b43326477795a772c08aee750d3e433f00f20be,Computational Methods for Behavior Analysis,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+7bf04c79f2659a404c9b9b91e0375e1450c3adbe,Mahalanobis Distance Based Non-negative Sparse Representation for Face Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+7b79a754a8583f887857c539895a9dda6331ca2e,Binary-Decomposed DCNN for Accelerating Computation and Compressing Model Without Retraining,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+7b79a754a8583f887857c539895a9dda6331ca2e,Binary-Decomposed DCNN for Accelerating Computation and Compressing Model Without Retraining,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+7b79a754a8583f887857c539895a9dda6331ca2e,Binary-Decomposed DCNN for Accelerating Computation and Compressing Model Without Retraining,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+7b79a754a8583f887857c539895a9dda6331ca2e,Binary-Decomposed DCNN for Accelerating Computation and Compressing Model Without Retraining,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+7bdab6e725ab1bbf8fcd6d7c451f6c4cc215ada9,Complex Wavelet Transform-Based Face Recognition,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+7be60f8c34a16f30735518d240a01972f3530e00,Facial expression recognition with temporal modeling of shapes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+7b3fe45f887a37f78bb356874702adae91dda105,High Distortion and Non-Structural Image Matching via Feature Co-occurrence,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+7b45aa509184b05064eafb362f80ba5778566a4e,High-Level Interpretation of Urban Road Maps Fusing Deep Learning-Based Pixelwise Scene Segmentation and Digital Navigation Maps,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+7bd837a934c6cd6ee858bdfd4ee0f8fa3663fed7,A Generic Model to Compose Vision Modules for Holistic Scene Understanding,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7bd837a934c6cd6ee858bdfd4ee0f8fa3663fed7,A Generic Model to Compose Vision Modules for Holistic Scene Understanding,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7b8b1571639f901275da22ee8f1de852350bf38e,Improved Deep Learning of Object Category Using Pose Information,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+7b74b65983ae0abb09a540b6413a5a36b2df027a,Gated Transfer Network for Transfer Learning,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+7b0c88bc555e3ced093e5cecb5dc1996f42eeeec,Solving Linear Inverse Problems Using Gan Priors: An Algorithm with Provable Guarantees,Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.64464415,edu,
+8fe99c3d5ad9af54641dcd6b55e2b083a363d515,Fashion and Apparel Classification using Convolutional Neural Networks,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+8fe38962c24300129391f6d7ac24d7783e0fddd0,Visual Text Correction,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+8f3e120b030e6c1d035cb7bd9c22f6cc75782025,Bayesian Networks and the Imprecise Dirichlet Model Applied to Recognition Problems,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+8f0b6845689a0b6adda2feb52b9345f9d9a2a8b3,Social Attention in the Two Species of Pan: Bonobos Make More Eye Contact than Chimpanzees,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+8f0b6845689a0b6adda2feb52b9345f9d9a2a8b3,Social Attention in the Two Species of Pan: Bonobos Make More Eye Contact than Chimpanzees,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+8f0b6845689a0b6adda2feb52b9345f9d9a2a8b3,Social Attention in the Two Species of Pan: Bonobos Make More Eye Contact than Chimpanzees,University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.34119840,-2.79309380,edu,
+8f0b6845689a0b6adda2feb52b9345f9d9a2a8b3,Social Attention in the Two Species of Pan: Bonobos Make More Eye Contact than Chimpanzees,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+8f48c9ca2ea3101083be19344633372fe1a2efcd,Distinctive-attribute Extraction for Image Captioning,Korea Electronics Technology Institute,Korea Electronics Technology Institute,"South Korea, Gyeonggi-do, Seongnam-si, Bundang-gu, 새나리로 25 (야탑동) KETI 전자부품연구원",37.40391700,127.15978600,edu,
+8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11,Social perception in synaesthesia Social perception in synaesthesia for colour,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11,Social perception in synaesthesia Social perception in synaesthesia for colour,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11,Social perception in synaesthesia Social perception in synaesthesia for colour,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11,Social perception in synaesthesia Social perception in synaesthesia for colour,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+8fed5ea3b69ea441a8b02f61473eafee25fb2374,Two-Dimensional PCA with F-Norm Minimization,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+8fed5ea3b69ea441a8b02f61473eafee25fb2374,Two-Dimensional PCA with F-Norm Minimization,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+8f1ac3c8fe6bcb1da2cdef60bc218ba1e264074f,Unsupervised Template Learning for Fine-Grained Object Recognition,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+8f1ac3c8fe6bcb1da2cdef60bc218ba1e264074f,Unsupervised Template Learning for Fine-Grained Object Recognition,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+8f3da45ff0c3e1777c3a7830f79c10f5896bcc21,Riding Role Agent Vehicle Place Role Agent Vehicle Place Value Man Horse outside Value Dog Skateboard,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,Age Estimation Using Expectation of Label Distribution Learning,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,Age Estimation Using Expectation of Label Distribution Learning,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+8faa4f2e287ff1bcaba2e0cd84d82a66bb2982f5,Pedestrian Detection with RCNN,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8f08b2101d43b1c0829678d6a824f0f045d57da5,Supplementary Material for: Active Pictorial Structures,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+8f81eb82cd046891c88163bc7b472dcc779f5f08,TokyoTechCanon at TRECVID 2012,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+8f0b2d24dab016764eebeaa2070d31801948f6f5,Unified Perceptual Parsing for Scene Understanding,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+8f0b2d24dab016764eebeaa2070d31801948f6f5,Unified Perceptual Parsing for Scene Understanding,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+8ffc49aead99fdacb0b180468a36984759f2fc1e,Sparse Label Smoothing for Semi-supervised Person Re-Identification,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+8ff967ed7130f81abc896d7b84f7c629aed5cf49,Unified probabilistic framework for simultaneous detection and tracking of multiple objects with application to bio-image sequences,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+8f3d5f9ad240c186971edc652f8385dc2a53d2eb,Model-Based Background Subtraction System Application Domain: Pedestrian Tracking,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+8fbec9105d346cd23d48536eb20c80b7c2bbbe30,The effectiveness of face detection algorithms in unconstrained crowd scenes,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+8f8a5be9dc16d73664285a29993af7dc6a598c83,Neural Network based Face Recognition with Gabor Filters,Jahangirnagar University,Jahangirnagar University,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.88331200,90.26939210,edu,
+8f5ce25e6e1047e1bf5b782d045e1dac29ca747e,A Novel Discriminant Non-Negative Matrix Factorization Algorithm With Applications to Facial Image Characterization Problems,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+8f92cccacf2c84f5d69db3597a7c2670d93be781,Facial expression synthesis through facial expressions statistical analysis,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+8f48b2da711417d1f1f39069501577c84abb8d37,Elevated amygdala response to faces and gaze aversion in autism spectrum disorder.,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+8f6263e4d3775757e804796e104631c7a2bb8679,Characterizing Visual Representations within Convolutional Neural Networks: Toward a Quantitative Approach,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+8f6263e4d3775757e804796e104631c7a2bb8679,Characterizing Visual Representations within Convolutional Neural Networks: Toward a Quantitative Approach,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+8f60c343f76913c509ce623467bf086935bcadac,Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,Tokyo Denki University,Tokyo Denki University,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.77920678,edu,
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+8f27df2d4fb7dd7ed5587640dcbe4dc1eb37acfb,Unseen Action Recognition with Multimodal Learning,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+8f27df2d4fb7dd7ed5587640dcbe4dc1eb37acfb,Unseen Action Recognition with Multimodal Learning,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+8fe0a35dc47698b45f3812bb502b0921b349ae56,Online multi-person tracking via robust collaborative model,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+8f5facdc0a2a79283864aad03edc702e2a400346,Estimation Framework using Bio - Inspired Features for Facial Image,Bangalore Institute of Technology,Bangalore Institute of Technology,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India",12.95512590,77.57419850,edu,
+8a3c5507237957d013a0fe0f082cab7f757af6ee,Facial Landmark Detection by Deep Multi-task Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+8af411697e73f6cfe691fe502d4bfb42510b4835,Dynamic Local Ternary Pattern for Face Recognition and Verification,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+8af411697e73f6cfe691fe502d4bfb42510b4835,Dynamic Local Ternary Pattern for Face Recognition and Verification,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+8af411697e73f6cfe691fe502d4bfb42510b4835,Dynamic Local Ternary Pattern for Face Recognition and Verification,Hankuk University of Foreign Studies,Hankuk University of Foreign Studies,"외대앞, 휘경로, 이문동, 이문2동, 동대문구, 서울특별시, 02407, 대한민국",37.59539790,127.06304990,edu,
+8ad12d3ee186403b856639b58d7797aa4b89a6c7,Temporal Relational Reasoning in Videos,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+8a1ed5e23231e86216c9bdd62419c3b05f1e0b4d,Facial Keypoint Detection,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8a6b52fd31ebaf00e7abe57c4c50dee4683aee4b,VOP: Architecture of a Processor for Vector Operations in On-Line Learning of Neural Networks,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+8ae92b73020dac2c98c72cbaf823cff1567bd91b,Semantic Image Inpainting with Perceptual and Contextual Losses,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+8a7bd4202e49fcdb947d71c9f2da0e7a953c7021,Privacy and security assessment of biometric template protection,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+8a3f85c80c698f15639ced90b4e9d4baa23b572e,MCBoost: Multiple Classifier Boosting for Perceptual Co-clustering of Images and Visual Features,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+8a3f85c80c698f15639ced90b4e9d4baa23b572e,MCBoost: Multiple Classifier Boosting for Perceptual Co-clustering of Images and Visual Features,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+8a26431833b0ea8659ef1d24bff3ac9e56dcfcd0,VoxCeleb: a large-scale speaker identification dataset,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8a3eaaef13bdaee26142fd2784de07e1d24926ca,Design and evaluation of photometric image quality measures for effective face recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+8a8861ad6caedc3993e31d46e7de6c251a8cda22,StreetStyle: Exploring world-wide clothing styles from millions of photos,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8aff946f5d678f689cc9476e48d8b122671205ae,"Neuron numbers increase in the human amygdala from birth to adulthood, but not in autism",New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+8a2a77062770bbdbfdbfe06ad7c3ab1728a4c59a,Video Action Detection with Relational Dynamic-Poselets,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+8a2a77062770bbdbfdbfe06ad7c3ab1728a4c59a,Video Action Detection with Relational Dynamic-Poselets,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+8a2a77062770bbdbfdbfe06ad7c3ab1728a4c59a,Video Action Detection with Relational Dynamic-Poselets,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8a6ee59cda77eeb7e126e3bc3d82e742ae1b3e58,"DeeperCut: A Deeper, Stronger, and Faster Multi-person Pose Estimation Model",Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+8a6ee59cda77eeb7e126e3bc3d82e742ae1b3e58,"DeeperCut: A Deeper, Stronger, and Faster Multi-person Pose Estimation Model",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8a3bb63925ac2cdf7f9ecf43f71d65e210416e17,ShearFace: Efficient Extraction of Anisotropic Features for Face Recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+8a0159919ee4e1a9f4cbfb652a1be212bf0554fd,"Application of power laws to biometrics, forensics and network traffic analysis",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8ad0d8cf4bcb5c7eccf09f23c8b7d25439c4ae2b,Predicting the Future with Transformational States,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8acbf69f5877dac506bf04dc1802f327247cc27e,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+8acbf69f5877dac506bf04dc1802f327247cc27e,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+8a37b56c92fa18c1129ca029935db0e837b73675,Co-occurrence flow for pedestrian detection,Cambridge Research Laboratory,Cambridge Research Laboratory,"Strangeways Research Laboratory, Babraham Road, Romsey, Cambridge, Cambridgeshire, East of England, England, CB1 8RN, UK",52.17333465,0.14989946,edu,
+8a37b56c92fa18c1129ca029935db0e837b73675,Co-occurrence flow for pedestrian detection,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+8a5099b2ae6912b4df22534a1b3065e147c38b9c,Face Hallucination with Tiny Unaligned Images by Transformative Discriminative Neural Networks,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+8a091254ba45ab9fe7d72c8104409bee5aa8f199,Efficient Object Detection Using Orthogonal NMF Descriptor Hierarchies,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+8af9f7c920a87acb3ae127756f498a51b535790a,Exploring Contextual Engagement for Trauma Recovery,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b,Animals on the Web,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+8a0d10a7909b252d0e11bf32a7f9edd0c9a8030b,Animals on the Web,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+8a8224266b8ab1483f6548307ab96227147f34da,Zero-Shot Visual Question Answering,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+8a1294d2093b7f339e3d33da46e008aca3528893,FoveaNet: Perspective-Aware Urban Scene Parsing,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8a05ac5e375ebf80b5fb88f207a9d33d5765c27b,2D-3D Hybrid Face Recognition Based on PCA and Feature Modelling,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+8a7882f765822ecc1f72610277037228c24e7bf7,Examining Performance of Sketch-to-Image Translation Models with Multiclass Automatically Generated Paired Training Data,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+8aef5b3cfc80fafdcefc24c72a4796ca40f4bc8b,Person Re-Identification by Support Vector Ranking,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+8a12edaf81fd38f81057cf9577c822eb09ff6fc1,Measuring and mitigating targeted biometric impersonation,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8a12edaf81fd38f81057cf9577c822eb09ff6fc1,Measuring and mitigating targeted biometric impersonation,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+7e1e189fff0b0cef3c231e6b2d01b65bef6027e7,A Concept of Bimodal Visual Emotion Recognition in Computer Users,AGH University of Science and Technology,AGH University of Science and Technology,"AGH, Władysława Reymonta, Czarna Wieś, Krowodrza, Kraków, małopolskie, 30-059, RP",50.06570330,19.91895867,edu,
+7ed3b79248d92b255450c7becd32b9e5c834a31e,L 1-regularized Logistic Regression Stacking and Transductive CRF Smoothing for Action Recognition in Video,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+7ed3b79248d92b255450c7becd32b9e5c834a31e,L 1-regularized Logistic Regression Stacking and Transductive CRF Smoothing for Action Recognition in Video,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+7ed3b79248d92b255450c7becd32b9e5c834a31e,L 1-regularized Logistic Regression Stacking and Transductive CRF Smoothing for Action Recognition in Video,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+7ed3b79248d92b255450c7becd32b9e5c834a31e,L 1-regularized Logistic Regression Stacking and Transductive CRF Smoothing for Action Recognition in Video,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+7e8016bef2c180238f00eecc6a50eac473f3f138,Immersive Interactive Data Mining and Machine Learning Algorithms for Big Data Visualization,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+7e02b0bce72a88f2f70b199c5dc87a01fe217832,Learning Multi-target Tracking with Quadratic Object Interactions,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+7ed2c84fdfc7d658968221d78e745dfd1def6332,Evaluation of linear combination of views for object recognition on real and synthetic datasets,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+7e49a6f11a8843b2ff5bdbf7cf95617c6219f757,Multi-Modal Fusion for Moment in Time Video Classification,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+7ea35b35392c6ef5738635cec7d17b24fe3e4f04,Deep Forest,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+7eaa97be59019f0d36aa7dac27407b004cad5e93,Sampling Generative Networks,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+7eb895e7de883d113b75eda54389460c61d63f67,Can You Tell a Face from a HEVC Bitstream?,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7e467e686f9468b826133275484e0a1ec0f5bde6,Efficient On-the-fly Category Retrieval using ConvNets and GPUs,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+7e4b638e028498e900747b600f46cd723f1f231e,Data Augmentation for Visual Question Answering,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+7e988b6f688f248d803be9846a4cbd4126afc785,Learning Locality-Constrained Collaborative Representation for Face Recognition,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+7e988b6f688f248d803be9846a4cbd4126afc785,Learning Locality-Constrained Collaborative Representation for Face Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+7ee610cd384cf1950d6254562e00490ad05eec57,Selecting Optimal Orientations of Gabor Wavelet Filters for Facial Image Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7ee610cd384cf1950d6254562e00490ad05eec57,Selecting Optimal Orientations of Gabor Wavelet Filters for Facial Image Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7e984bbad042b145d1ff8351c4a7c5fb6a81e0b1,Graininess-Aware Deep Feature Learning for Pedestrian Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+7e3147a01108607fa65ace289094e5b5b525929c,Automatic Beautification for Group-Photo Facial Expressions Using Novel Bayesian GANs,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7e00fb79576fe213853aeea39a6bc51df9fdca16,Online multi-face detection and tracking using detector confidence and structured SVMs,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+7ee17d2001c9fcef63e3a56610cacc743861d944,Extracting Databases from Dark Data with DeepDive,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+7ee17d2001c9fcef63e3a56610cacc743861d944,Extracting Databases from Dark Data with DeepDive,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e435d78693aec1b87b6f690a8716a60a5e5ff8c,Multimodal sentiment analysis with word-level fusion and reinforcement learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7e2cfbfd43045fbd6aabd9a45090a5716fc4e179,Global Norm-Aware Pooling for Pose-Robust Face Recognition at Low False Positive Rate,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,Increased Loss Aversion in Unmedicated Patients with Obsessive–Compulsive Disorder,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,Increased Loss Aversion in Unmedicated Patients with Obsessive–Compulsive Disorder,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,Increased Loss Aversion in Unmedicated Patients with Obsessive–Compulsive Disorder,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,Increased Loss Aversion in Unmedicated Patients with Obsessive–Compulsive Disorder,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+7e526c32a9ba12d3aeb69c70ee38b178ba203d6e,Mixture of Heterogeneous Attribute Analyzers for Human Action Detection,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+7eb85bcb372261bad707c05e496a09609e27fdb3,A Compute-Efficient Algorithm for Robust Eyebrow Detection,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+7ef41e2be5116912fe8a4906b4fb89ac9dcf819d,A hybrid face recognition method using Markov random fields,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+7e2a443cb069f1e3b0d7c41fecf55774ac584895,The Role of Color in Face Processing and Autism Spectrum Disorders,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+7e2a443cb069f1e3b0d7c41fecf55774ac584895,The Role of Color in Face Processing and Autism Spectrum Disorders,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+7e2a443cb069f1e3b0d7c41fecf55774ac584895,The Role of Color in Face Processing and Autism Spectrum Disorders,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+7e01ce7a1c14971088afa3ee73f92db451e2c536,A Task-Oriented Approach for Cost-Sensitive Recognition,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+7e6f7ce8ec6f62c4bf68f84207973914fc8e79b9,Exploring bounding box context for multi-object tracker fusion,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+7e6b11674e989d6a86afda241a51f7fa3790b93e,Optimized Kernel-based Projection Space of Riemannian Manifolds,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+7e6b11674e989d6a86afda241a51f7fa3790b93e,Optimized Kernel-based Projection Space of Riemannian Manifolds,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+7e6b11674e989d6a86afda241a51f7fa3790b93e,Optimized Kernel-based Projection Space of Riemannian Manifolds,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+7ebb153704706e457ab57b432793d2b6e5d12592,Faces in Places: compound query retrieval,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+7e225e3e61527f35b7bf44d47e12cbadfc9441f8,Generating High-Quality Crowd Density Maps Using Contextual Pyramid CNNs,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922,Skiing and Thinking About It: Moment-to-Moment and Retrospective Analysis of Emotions in an Extreme Sport,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+7e19f7a82528fa79349f1fc61c7f0d35a9ad3a5e,Face Recognition: A Hybrid Neural Network Approach,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+7e19f7a82528fa79349f1fc61c7f0d35a9ad3a5e,Face Recognition: A Hybrid Neural Network Approach,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+7e0c75ce731131e613544e1a85ae0f2c28ee4c1f,Regression-based Estimation of Pain and Facial Expression Intensity,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7eff2b26a16e0898ebdd141e930d011a3d3e4e8b,Clothing retrieval with visual attention model,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7eff2b26a16e0898ebdd141e930d011a3d3e4e8b,Clothing retrieval with visual attention model,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7ef44b7c2b5533d00001ae81f9293bdb592f1146,Détection des émotions à partir de vidéos dans un environnement non contrôlé Detection of emotions from video in non-controlled environment,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+7ed27f10ff2961611bb8604096a64adfa38c9022,Deep Structured Learning for Facial Action Unit Intensity Estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7ed27f10ff2961611bb8604096a64adfa38c9022,Deep Structured Learning for Facial Action Unit Intensity Estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+7ed27f10ff2961611bb8604096a64adfa38c9022,Deep Structured Learning for Facial Action Unit Intensity Estimation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+7e186b41f5d2cfdf1940009e61d4e34a47b33c7c,Orientation Invariant Feature Embedding and Spatial Temporal Regularization for Vehicle Re-identification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+7e1ea2679a110241ed0dd38ff45cd4dfeb7a8e83,Extensions of Hierarchical Slow Feature Analysis for Efficient Classification and Regression on High-Dimensional Data,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+7e51a42049193726e9ac547b76e929d803e441f3,Holistic processing of the mouth but not the eyes in developmental prosopagnosia.,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+7e51a42049193726e9ac547b76e929d803e441f3,Holistic processing of the mouth but not the eyes in developmental prosopagnosia.,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+7e45b27ec7339dc557866b31e74c71a52e99fd32,Statistical Inference of Motion in the Invisible,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+10b36c003542545f1e2d73e8897e022c0c260c32,Towards a Principled Integration of Multi-camera Re-identification and Tracking Through Optimal Bayes Filters,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+108d44bee37d4190883a268274ff78a8fd20de54,Competitive Sparse Representation Classification for Face Recognition,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+108d44bee37d4190883a268274ff78a8fd20de54,Competitive Sparse Representation Classification for Face Recognition,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+108d44bee37d4190883a268274ff78a8fd20de54,Competitive Sparse Representation Classification for Face Recognition,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+108d44bee37d4190883a268274ff78a8fd20de54,Competitive Sparse Representation Classification for Face Recognition,Chongqing University of Posts and Telecommunications,Chongqing University of Posts and Telecommunications,"重庆邮电大学, 崇文路, 渝中区, 黄桷垭, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400065, 中国",29.53570460,106.60482474,edu,
+10ad82949b65bae59410aaab5aac88d2caa6a3d7,Detecting and Synthesizing Synchronous Joint Action in Human-Robot Teams,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+102280e80470ace006e14d6ec9adda082603dea1,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+102280e80470ace006e14d6ec9adda082603dea1,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+10550ee13855bd7403946032354b0cd92a10d0aa,Accelerating neuromorphic vision algorithms for recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+102e7bd7660357e1814c821c7f697f2eccececa4,Predicting Motivations of Actions by Leveraging Text,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+102e7bd7660357e1814c821c7f697f2eccececa4,Predicting Motivations of Actions by Leveraging Text,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+10793d1475607929fedc6d9a677911ad16843e58,Unsupervised Learning of Edges,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+10e12d11cb98ffa5ae82343f8904cfe321ae8004,A New Simplex Sparse Learning Model to Measure Data Similarity for Clustering,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+10464196584476a7f3d887fda42444d08f5f8ad4,Generalized Local Binary Patterns for Texture Classification,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+10126b467391e153d36f1a496ef5618097775ad1,An Active Age Estimation of Facial image using Anthropometric Model and Fast ICA,Bangalore Institute of Technology,Bangalore Institute of Technology,"Bangalore Institute of Technology, Krishna Rajendra Road, Mavalli, Vishveshwara Puram, South Zone, Bengaluru, Bangalore Urban, Karnataka, 560004, India",12.95512590,77.57419850,edu,
+100105d6c97b23059f7aa70589ead2f61969fbc3,Frontal to profile face verification in the wild,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+100105d6c97b23059f7aa70589ead2f61969fbc3,Frontal to profile face verification in the wild,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+10a36dea0167511b66deca65fdca978aa9afdb11,Simple Baseline for Visual Question Answering,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+100da509d4fa74afc6e86a49352751d365fceee5,Multiclass recognition and part localization with humans in the loop,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+100da509d4fa74afc6e86a49352751d365fceee5,Multiclass recognition and part localization with humans in the loop,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+101d1cff1aa5590a1f79bc485cbfec094a995f74,Persuasive Faces: Generating Faces in Advertisements (Supplementary Material),University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+10ffdfdbc0aafb89d94528f359425de0c7a81986,Interacting HiddenMarkovModels for Video Understanding,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+10af69f11301679b6fbb23855bf10f6af1f3d2e6,Beyond Gaussian Pyramid: Multi-skip Feature Stacking for action recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+100428708e4884300e4c1ac1f84cbb16e7644ccf,Regularized Shearlet Network for face recognition using single sample per person,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+100428708e4884300e4c1ac1f84cbb16e7644ccf,Regularized Shearlet Network for face recognition using single sample per person,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+1091ee239b2344a526a5617233914345389b04fe,Transferable Joint Attribute-Identity Deep Learning for Unsupervised Person Re-Identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+10f17534dba06af1ddab96c4188a9c98a020a459,People-LDA: Anchoring Topics to People using Face Recognition,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+102c02bd78c2a4d9a028b779933ff2f164e1e927,Instance Map Based Image Synthesis With a Denoising Generative Adversarial Network,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+10e0e6f1ec00b20bc78a5453a00c792f1334b016,Temporal Selective Max Pooling Towards Practical Face Recognition,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+102b968d836177f9c436141e382915a4f8549276,Affective multimodal human-computer interaction,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+102b968d836177f9c436141e382915a4f8549276,Affective multimodal human-computer interaction,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+102b968d836177f9c436141e382915a4f8549276,Affective multimodal human-computer interaction,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+102b968d836177f9c436141e382915a4f8549276,Affective multimodal human-computer interaction,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+10f3d78cf8ae69a5889bea88d3ebe2c6507e5720,LDMNet: Low Dimensional Manifold Regularized Neural Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+10f3d78cf8ae69a5889bea88d3ebe2c6507e5720,LDMNet: Low Dimensional Manifold Regularized Neural Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+1096445f1185265c56edb1be3bde6ac4e8d91386,Aspects of facial biometrics for verification of personal identity,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+1096445f1185265c56edb1be3bde6ac4e8d91386,Aspects of facial biometrics for verification of personal identity,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+108961c7366e36825ffed94ac9eab603e05b6bc6,Deep Visual-Semantic Alignments for Generating Image Descriptions,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+102caab9bdf31c1bb4838529be45608ef29efbbd,Nebula feature: A space-time feature for posed and spontaneous 4D facial behavior analysis,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+107c5030f2c55e0a7cf4c6159cbbd4f719b0d9fb,ViP-CNN: Visual Phrase Guided Convolutional Neural Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+108c973b51514f54cf2a078ca243ff0cde091f4b,3D face recognition on low-cost depth sensors,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+10a98632ed618c23c58af93e17d90ef654b1845f,Performance Evaluation of Illumination Invariant Face Recognition Algorthims,Punjabi University Patiala,Punjabi University Patiala,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India",30.35689810,76.45512720,edu,
+10d255fb0bb651b6e9cc69855a970c44f121f2c9,Learning Pose Grammar to Encode Human Body Configuration for 3D Pose Estimation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+10723c39f9dcfcbd45d4ed7460006dba78c6b67f,An accurate 3D human face model reconstruction scheme,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+101d4cfbd6f8a7a10bd33505e2b183183f1d8770,The 2013 SESAME Multimedia Event Detection and Recounting System,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+106092fafb53e36077eba88f06feecd07b9e78e7,Attend and Interact: Higher-Order Object Interactions for Video Understanding,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+10bbdbf86b3dd9a60f9be01401e0585250c97477,"ROY, MARCEL: HLBP FEATURE FOR FAST ILLUMINATION INVARIANT FACE DETECTION1 Haar Local Binary Pattern Feature for Fast Illumination Invariant Face Detection",IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+10b1794dae6128480e5c56ee83f0113930c101cf,Learning Instance-Aware Object Detection Using Determinantal Point Processes,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+10b1794dae6128480e5c56ee83f0113930c101cf,Learning Instance-Aware Object Detection Using Determinantal Point Processes,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+10b1794dae6128480e5c56ee83f0113930c101cf,Learning Instance-Aware Object Detection Using Determinantal Point Processes,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+103c8eaca2a2176babab2cc6e9b25d48870d6928,Panning for gold: finding relevant semantic content for grounded language learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+10d85459ab6a9350931fcb4709bba171cd31bbde,Two-person interaction detection using body-pose features and multiple instance learning,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+10d85459ab6a9350931fcb4709bba171cd31bbde,Two-person interaction detection using body-pose features and multiple instance learning,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+10f66f6550d74b817a3fdcef7fdeba13ccdba51c,Benchmarking Face Alignment,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+10d003ba5062c048f0e324c897f849b0c9bc2aab,What to Transfer? High-Level Semantics in Transfer Metric Learning for Action Similarity,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+10f3ead41bf8de97aee9c25b345b8b7495a99aab,Sparseness Meets Deepness: 3D Human Pose Estimation from Monocular Video,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+10f3ead41bf8de97aee9c25b345b8b7495a99aab,Sparseness Meets Deepness: 3D Human Pose Estimation from Monocular Video,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+10156890bc53cb6be97bd144a68fde693bf13612,Face Recognition Using Sparse Representation-Based Classification on K-Nearest Subspace,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53,Hollywood in Homes: Crowdsourcing Data Collection for Activity Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+107fc60a6c7d58a6e2d8572ad8c19cc321a9ef53,Hollywood in Homes: Crowdsourcing Data Collection for Activity Understanding,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+1057137d8ebbbfc4e816d74edd7ab04f61a893f8,Craniofacial Aging,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+1057137d8ebbbfc4e816d74edd7ab04f61a893f8,Craniofacial Aging,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+1057137d8ebbbfc4e816d74edd7ab04f61a893f8,Craniofacial Aging,Virginia Commonwealth University,Virginia Commonwealth University,"Virginia Commonwealth University, The Compass, Oregon Hill, Richmond, Richmond City, Virginia, 23284, USA",37.54821500,-77.45306424,edu,
+10c49dc22d5c7d885cba238634390013aeda6e0e,Machine-based Multimodal Pain Assessment Tool for Infants: A Review,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+1037664753b281543ce300fed0852a64d24334ba,Binary - Feature Based Recognition and Cryptographic Key Generation from Face Biometrics,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+1037664753b281543ce300fed0852a64d24334ba,Binary - Feature Based Recognition and Cryptographic Key Generation from Face Biometrics,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+1037664753b281543ce300fed0852a64d24334ba,Binary - Feature Based Recognition and Cryptographic Key Generation from Face Biometrics,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1037664753b281543ce300fed0852a64d24334ba,Binary - Feature Based Recognition and Cryptographic Key Generation from Face Biometrics,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+103a7c3eba36792886ae8005f6492332e6b05bad,Facial Recognition with Encoded Local Projections,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+103a7c3eba36792886ae8005f6492332e6b05bad,Facial Recognition with Encoded Local Projections,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+107231a511fa981ac8d13723d7aea52847580930,Manipulated Object Proposal: A Discriminative Object Extraction and Feature Fusion Framework for First-Person Daily Activity Recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+107231a511fa981ac8d13723d7aea52847580930,Manipulated Object Proposal: A Discriminative Object Extraction and Feature Fusion Framework for First-Person Daily Activity Recognition,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+107231a511fa981ac8d13723d7aea52847580930,Manipulated Object Proposal: A Discriminative Object Extraction and Feature Fusion Framework for First-Person Daily Activity Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+107231a511fa981ac8d13723d7aea52847580930,Manipulated Object Proposal: A Discriminative Object Extraction and Feature Fusion Framework for First-Person Daily Activity Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+10ca2e03ff995023a701e6d8d128455c6e8db030,Modeling Stylized Character Expressions via Deep Learning,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+10b987b076fe56e08c89693cdb7207c13b870540,Anticipating Visual Representations from Unlabeled Video,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+10b987b076fe56e08c89693cdb7207c13b870540,Anticipating Visual Representations from Unlabeled Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+1921e0a97904bdf61e17a165ab159443414308ed,Informatics Bachelor Thesis Retrieval of Web Images for Computer Vision Research,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+19150b001031cc6d964e83cd28553004f653cc24,Visual Relationship Detection with Language Priors,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1927d01b6b9acf865401b544e25b62a7ddbac5fa,An Enhanced Region Proposal Network for object detection using deep learning method,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+1922ad4978ab92ce0d23acc4c7441a8812f157e5,Face alignment by coarse-to-fine shape searching,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1922ad4978ab92ce0d23acc4c7441a8812f157e5,Face alignment by coarse-to-fine shape searching,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+19e62a56b6772bbd37dfc6b8f948e260dbb474f5,Cross-Domain Metric Learning Based on Information Theory,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+19e62a56b6772bbd37dfc6b8f948e260dbb474f5,Cross-Domain Metric Learning Based on Information Theory,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+1989a1f9ce18d8c2a0cee3196fe6fa363aab80c2,Robust online face tracking-by-detection,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+193debca0be1c38dabc42dc772513e6653fd91d8,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+193debca0be1c38dabc42dc772513e6653fd91d8,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+193debca0be1c38dabc42dc772513e6653fd91d8,Mnemonic Descent Method: A Recurrent Process Applied for End-to-End Face Alignment,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+192c7672216dcfb60da0e7953c1b044d1c209d3d,Marker-Less 3D Human Motion Capture with Monocular Image Sequence and Height-Maps,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+192c7672216dcfb60da0e7953c1b044d1c209d3d,Marker-Less 3D Human Motion Capture with Monocular Image Sequence and Height-Maps,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+192c7672216dcfb60da0e7953c1b044d1c209d3d,Marker-Less 3D Human Motion Capture with Monocular Image Sequence and Height-Maps,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1987f56cb6bcba142f9a0a580c4351fb3e407b8c,GANerated Hands for Real-time 3D Hand Tracking from Monocular RGB,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1921795408345751791b44b379f51b7dd54ebfa2,From Face Recognition to Models of Identity: A Bayesian Approach to Learning About Unknown Identities from Unsupervised Data,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1921795408345751791b44b379f51b7dd54ebfa2,From Face Recognition to Models of Identity: A Bayesian Approach to Learning About Unknown Identities from Unsupervised Data,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1903276bb462d3ccd4f1fac3a8e34a53045ef8a1,Attention-Aware Face Hallucination via Deep Reinforcement Learning,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+19cd6053bbb9b9c67da0c0881e31019f9ce28154,Random Laplace Feature Maps for Semigroup Kernels on Histograms,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+19cd6053bbb9b9c67da0c0881e31019f9ce28154,Random Laplace Feature Maps for Semigroup Kernels on Histograms,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+19458454308a9f56b7de76bf7d8ff8eaa52b0173,Deep Features for Recognizing Disguised Faces in the Wild,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+19a3e5495b420c1f5da283bf39708a6e833a6cc5,Attributes and categories for generic instance search from one example,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+19a3e5495b420c1f5da283bf39708a6e833a6cc5,Attributes and categories for generic instance search from one example,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+19e0cc41b9f89492b6b8c2a8a58d01b8242ce00b,Improving Heterogeneous Face Recognition with Conditional Adversarial Networks,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+19bcd3bd41825a67f48db701a68030c5e6763152,Partial Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+19bcd3bd41825a67f48db701a68030c5e6763152,Partial Person Re-Identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+19bcd3bd41825a67f48db701a68030c5e6763152,Partial Person Re-Identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+19e7bdf8310f9038e1a9cf412b8dd2c77ff64c54,Facial Action Coding Using Multiple Visual Cues and a Hierarchy of Particle Filters,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+1938d85feafdaa8a65cb9c379c9a81a0b0dcd3c4,Monogenic Binary Coding: An Efficient Local Feature Extraction Approach to Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+19d1b811df60f86cbd5e04a094b07f32fff7a32a,Three-dimensional face recognition: an eigensurface approach,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+1958da636ce92d36c0985a6cb00696d90b2475f3,Upper Body Pose Estimation with Temporal Sequential Forests,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+1958da636ce92d36c0985a6cb00696d90b2475f3,Upper Body Pose Estimation with Temporal Sequential Forests,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+19fed85436eff43e60b9476e3d8742dfedba6384,A Novel Multiple Kernel Sparse Representation based Classification for Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+199c2df5f2847f685796c2523221c6436f022464,Self quotient image for face recognition,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+19c0069f075b5b2d8ac48ad28a7409179bd08b86,Modifying the Memorability of Face Photographs,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+19676dd4422301a11aa5fe5e5316e2c412987302,Synthesizing Samples for Zero-shot Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+19676dd4422301a11aa5fe5e5316e2c412987302,Synthesizing Samples for Zero-shot Learning,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+1947791685597368400ca0429695658d1f68541c,Physiological responses to social and nonsocial stimuli in neurotypical adults with high and low levels of autistic traits: implications for understanding nonsocial drive in autism spectrum disorders.,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+1947791685597368400ca0429695658d1f68541c,Physiological responses to social and nonsocial stimuli in neurotypical adults with high and low levels of autistic traits: implications for understanding nonsocial drive in autism spectrum disorders.,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+19997d39447e570c7030a214eb4d81e3669ffd1f,Ultrasound confidence maps and applications in medical image processing,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+19bfe748ec8957ec82a7fef0f2585bb14ab8bdd4,Cross-connected Networks for Multi-task Learning of Detection and Segmentation,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+19d1855e021561d6da9d0200bb18e47f51cddda6,Visual Storytelling,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+19d1855e021561d6da9d0200bb18e47f51cddda6,Visual Storytelling,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+19be4580df2e76b70a39af6e749bf189e1ca3975,Adversarial Binary Coding for Efficient Person Re-identification,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+19a9f658ea14701502d169dc086651b1d9b2a8ea,Structural models for face detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+19242af1c54b2c876b3a930f2406b9553f294fba,Learning Subjective Adjectives from Images by Stacked Convolutional Auto-Encoders,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+197c406b95340dfcdef542db532e0f7a967b9cda,Softer-NMS: Rethinking Bounding Box Regression for Accurate Object Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+19d3b02185ad36fb0b792f2a15a027c58ac91e8e,Im2Text: Describing Images Using 1 Million Captioned Photographs,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+19da9f3532c2e525bf92668198b8afec14f9efea,Challenge: Face verification across age progression using real-world data,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+19868a469dc25ee0db00947e06c804b88ea94fd0,SP-SVM: Large Margin Classifier for Data on Multiple Manifolds,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+19868a469dc25ee0db00947e06c804b88ea94fd0,SP-SVM: Large Margin Classifier for Data on Multiple Manifolds,Santa Clara University,Santa Clara University,"Cowell Center, Accolti Way, Santa Clara, Santa Clara County, California, 95053, USA",37.34820285,-121.93563541,edu,
+19868a469dc25ee0db00947e06c804b88ea94fd0,SP-SVM: Large Margin Classifier for Data on Multiple Manifolds,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,Reconfiguring the Imaging Pipeline for Computer Vision,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,Reconfiguring the Imaging Pipeline for Computer Vision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+192235f5a9e4c9d6a28ec0d333e36f294b32f764,Reconfiguring the Imaging Pipeline for Computer Vision,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+19878141fbb3117d411599b1a74a44fc3daf296d,Eye-State Action Unit Detection by Gabor Wavelets,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+19878141fbb3117d411599b1a74a44fc3daf296d,Eye-State Action Unit Detection by Gabor Wavelets,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+19f076998ba757602c8fec04ce6a4ca674de0e25,Fast and de-noise support vector machine training method based on fuzzy clustering method for large real world datasets,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+191f8b564c4f90d2ba7423fcce4efd7e902f4f77,"Weakly Supervised Learning of Objects, Attributes and Their Associations",University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+4c130b216126434c8cd857431c9c4a7a7c10aca8,Can Saliency Information Benefit Image Captioning Models?,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+4c130b216126434c8cd857431c9c4a7a7c10aca8,Can Saliency Information Benefit Image Captioning Models?,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+4cb8a691a15e050756640c0a35880cdd418e2b87,Class-Based Matching of Object Parts,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+4c16fe03bb96328b715acfe40491a90034858800,The development of emotion-related neural circuitry in health and psychopathology.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+4cc681239c8fda3fb04ba7ac6a1b9d85b68af31d,Mining Spatial and Spatio-Temporal ROIs for Action Recognition,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,A Discriminative Feature Learning Approach for Deep Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4cfd770ccecae1c0b4248bc800d7fd35c817bbbd,A Discriminative Feature Learning Approach for Deep Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4c462a76517f70588a8406ad2a9fa290b7d77e5a,Zero-Shot Recognition via Structured Prediction,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+4c87aafa779747828054cffee3125fcea332364d,View-Constrained Latent Variable Model for Multi-view Facial Expression Classification,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+4c87aafa779747828054cffee3125fcea332364d,View-Constrained Latent Variable Model for Multi-view Facial Expression Classification,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+4cb36aea73a328da8ffcdc616407bae3c908aa07,Re-ranking Person Re-identification with k-Reciprocal Encoding,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+4cb36aea73a328da8ffcdc616407bae3c908aa07,Re-ranking Person Re-identification with k-Reciprocal Encoding,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+4cdae53cebaeeebc3d07cf6cd36fecb2946f3e56,Photorealistic Facial Texture Inference Using Deep Neural Networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+4c293a98e929edaff6ed70c22a844c04e604e9fc,Clustering by fast search and merge of local density peaks for gene expression microarray data,Beijing Normal University,Beijing Normal University,"北京师范大学, 19, 新街口外大街, 西城区, 100875, 中国",39.96014155,116.35970438,edu,
+4c293a98e929edaff6ed70c22a844c04e604e9fc,Clustering by fast search and merge of local density peaks for gene expression microarray data,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+4c8e5fc0877d066516bb63e6c31eb1b8b5f967eb,"MODI, KOVASHKA: CONFIDENCE AND DIVERSITY FOR ACTIVE SELECTION 1 Confidence and Diversity for Active Selection of Feedback in Image Retrieval",University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4c02d6874a761182f3776a5a04142e713cd939fa,Crowd Counting using Deep Recurrent Spatial-Aware Network,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+4c02d6874a761182f3776a5a04142e713cd939fa,Crowd Counting using Deep Recurrent Spatial-Aware Network,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+4c8ef4f98c6c8d340b011cfa0bb65a9377107970,Sentiment Recognition in Egocentric Photostreams,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+4c8ef4f98c6c8d340b011cfa0bb65a9377107970,Sentiment Recognition in Egocentric Photostreams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+4c822785c29ceaf67a0de9c699716c94fefbd37d,A Key Volume Mining Deep Framework for Action Recognition,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+4c822785c29ceaf67a0de9c699716c94fefbd37d,A Key Volume Mining Deep Framework for Action Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4c822785c29ceaf67a0de9c699716c94fefbd37d,A Key Volume Mining Deep Framework for Action Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4c33746fc5688da61059daa93978ac887f04cce8,Trainable performance upper bounds for image and video captioning,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+4c971c934a3c56d08af92117cc8b505e03754262,Sparse composite quantization,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+4c971c934a3c56d08af92117cc8b505e03754262,Sparse composite quantization,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+4c971c934a3c56d08af92117cc8b505e03754262,Sparse composite quantization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4c971c934a3c56d08af92117cc8b505e03754262,Sparse composite quantization,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4c7c1c2d6eebd227b1e768eaafa5a61e27552567,A Secure and Privacy Friendly 2D+3D Face Authentication System Robust Under Pose and Illumation Variation,University of Piraeus,University of Piraeus,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα",37.94173275,23.65303262,edu,
+4c8c89670a55e65ad9b92327d3386b5701dddabb,Automatic Eye Detection Error as a Predictor of Face Recognition Performance,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+4cefd47f3327b6d30bf99e61651b18319c4ee829,JUST at VQA-Med: A VGG-Seq2Seq Model,Jordan University of Science and Technology,Jordan University of Science and Technology,"Jordan University of Science and Technology, شارع الأردن, إربد‎, إربد, الأردن",32.49566485,35.99160717,edu,
+4ccf64fc1c9ca71d6aefdf912caf8fea048fb211,Light-weight Head Pose Invariant Gaze Tracking,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4cc0bcc342647693c21a0ca2cd1e4064faf2fb47,"THE SMILING BEHAVIOR OF INFANTS AT HIGH- AND LOW-RISK FOR AUTISM, THEIR MOTHERS, AND AN UNFAMILIAR ADULT: THE EFFECTS OF INTERACTION TASK, INFANT RISK-STATUS, AND INFANT AGE by",University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4cdb6144d56098b819076a8572a664a2c2d27f72,Face Synthesis for Eyeglass-Robust Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4cdb6144d56098b819076a8572a664a2c2d27f72,Face Synthesis for Eyeglass-Robust Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+4c6233765b5f83333f6c675d3389bbbf503805e3,Real-time high performance deformable model for face detection in the wild,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4c078c2919c7bdc26ca2238fa1a79e0331898b56,Unconstrained Facial Landmark Localization with Backbone-Branches Fully-Convolutional Networks,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+4cfa8755fe23a8a0b19909fa4dec54ce6c1bd2f7,Efficient likelihood Bayesian constrained local model,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+4cac9eda716a0addb73bd7ffea2a5fb0e6ec2367,Representing Videos based on Scene Layouts for Recognizing Agent-in-Place Actions,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4c4236b62302957052f1bbfbd34dbf71ac1650ec,Semi-supervised face recognition with LDA self-training,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+4cca640761c980c77a696a64ad3c1e95b82109be,Evaluating New Variants of Motion Interchange Patterns,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+4cd0da974af9356027a31b8485a34a24b57b8b90,Binarized Convolutional Landmark Localizers for Human Pose Estimation and Face Alignment with Limited Resources,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+4c56f119ebf7c71f2a83e4d79e8d88314b8e6044,An other-race effect for face recognition algorithms,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+4c56f119ebf7c71f2a83e4d79e8d88314b8e6044,An other-race effect for face recognition algorithms,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+4c302936f43c30430b0b07debd6ed6ef260b5225,Learning Sparse Basis Vectors in Small-Sample Datasets through Regularized Non-Negative Matrix Factorization,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+4c302936f43c30430b0b07debd6ed6ef260b5225,Learning Sparse Basis Vectors in Small-Sample Datasets through Regularized Non-Negative Matrix Factorization,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+4c2f3c6384888ee81453b01bb81f35871f618c3f,Automatic 3D modelling of craniofacial form,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+4c170a0dcc8de75587dae21ca508dab2f9343974,FaceTracer: A Search Engine for Large Collections of Images with Faces,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4c6decd726d04b916d9a2cdd468c64a8a0fc2fdb,Semantic Part Segmentation with Deep Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4cf0c6d3da8e20d6f184a4eaa6865d61680982b8,Face recognition based on 3D mesh model,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+4c5b38ac5d60ab0272145a5a4d50872c7b89fe1b,Facial expression recognition with emotion-based feature fusion,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+4c523db33c56759255b2c58c024eb6112542014e,Patch-based within-object classification,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+4c523db33c56759255b2c58c024eb6112542014e,Patch-based within-object classification,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+261a7be6c650de797c7490aeeefba98662acaa20,Shell PCA: Statistical Shape Modelling in Shell Space,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+261a7be6c650de797c7490aeeefba98662acaa20,Shell PCA: Statistical Shape Modelling in Shell Space,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+267bd60e442d87c44eaae3290610138e63d663ab,PoseTrack: Joint Multi-person Pose Estimation and Tracking,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+267bd60e442d87c44eaae3290610138e63d663ab,PoseTrack: Joint Multi-person Pose Estimation and Tracking,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+2607f0093fecd4fee5244d56fcf3f53ff22e949e,Attribute-augmented semantic hierarchy: towards bridging semantic gap and intention gap in image retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2607f0093fecd4fee5244d56fcf3f53ff22e949e,Attribute-augmented semantic hierarchy: towards bridging semantic gap and intention gap in image retrieval,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2607f0093fecd4fee5244d56fcf3f53ff22e949e,Attribute-augmented semantic hierarchy: towards bridging semantic gap and intention gap in image retrieval,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+26b906ad166ed81e59d999ed9bb577f30de81e97,Forecasting Human Dynamics from Static Images,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+261c3e30bae8b8bdc83541ffa9331b52fcf015e6,Shape-from-shading Driven 3D Morphable Models for Illumination Insensitive Face Recognition,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+2625314d30a8dfaf918e93a8e7b243b2e078d191,An Adversarial Approach to Hard Triplet Generation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+2625314d30a8dfaf918e93a8e7b243b2e078d191,An Adversarial Approach to Hard Triplet Generation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+26f03693c50eb50a42c9117f107af488865f3dc1,Eigenhill vs. Eigenface and Eigenedge,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+26029a63b2377ef81e3898f55bb204fd853c3e31,PRISM: PRincipled Implicit Shape Model,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+264a2b946fae4af23c646cc08fc56947b5be82cf,Robust object recognition in RGB-D egocentric videos based on Sparse Affine Hull Kernel,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2637a5d0b677eb3145e5bc484337f99b8486014f,L0 Regularized Stationary Time Estimation for Crowd Group Analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+269248eb8a44da5248cef840f7079b1294dbf237,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+269248eb8a44da5248cef840f7079b1294dbf237,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+2609079d682998da2bc4315b55a29bafe4df414e,On rank aggregation for face recognition from videos,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+268e91262c85ff1ce99dfc5751e2b6e44c808325,Frequency Domain Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+26d407b911d1234e8e3601e586b49316f0818c95,[POSTER] Feasibility of Corneal Imaging for Handheld Augmented Reality,Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.95196483,edu,
+264175a074c56667f90db9780580368925944577,Constructing Unrestricted Adversarial Examples with Generative Models,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+264175a074c56667f90db9780580368925944577,Constructing Unrestricted Adversarial Examples with Generative Models,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+264175a074c56667f90db9780580368925944577,Constructing Unrestricted Adversarial Examples with Generative Models,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+264175a074c56667f90db9780580368925944577,Constructing Unrestricted Adversarial Examples with Generative Models,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+26a471a491c2fb162ad403ed932b481d386306c7,Fast Zero-Shot Image Tagging,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+26c58e24687ccbe9737e41837aab74e4a499d259,"Codemaps - Segment, Classify and Search Objects Locally",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+264837a7c36ac409119cf71b22415d5c227a1870,Facial expression recognition under a wide range of head poses,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+2633ee01b41edf9df7bf399e55e14d0c7412523a,Robust Face Recognition through Local Graph Matching,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+2633ee01b41edf9df7bf399e55e14d0c7412523a,Robust Face Recognition through Local Graph Matching,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+26a44feb7a64db7986473ca801c251aa88748477,Unsupervised Learning of Mixture Models with a Uniform Background Component,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+26a44feb7a64db7986473ca801c251aa88748477,Unsupervised Learning of Mixture Models with a Uniform Background Component,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+264f7ab36ff2e23a1514577a6404229d7fe1242b,Facial Expression Recognition by De-expression Residue Learning,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+260fca0b9eb144fc54f1872b8cc418ae3fdce756,Class-specific nonlinear subspace learning based on optimized class representation,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+26acc572c644d57445170a309daf7765aca6ab45,Learning for Sequential Classification,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+263977d8867a68ac52ca4f7e19048ba2a51cda21,A Quantitative Assessment of 3D Facial Key Point Localization Fitting 2D Shape Models to Curvature Information,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+2670c4b556264605c32326f49ab4a8b4e83ab57f,Looking ahead: Anticipatory cueing of attention to objects others will look at.,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+2670c4b556264605c32326f49ab4a8b4e83ab57f,Looking ahead: Anticipatory cueing of attention to objects others will look at.,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+260928b80e6bb414f70aa8ed678d8808d214036b,Periocular Recognition Using CNN Features Off-the-Shelf,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+26aa0aff1ea1baf848a521363cc455044690e090,A 2D + 3D Rich Data Approach to Scene Understanding,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+260a975bb1562127634e3447890447d593e4d6dc,Tree-Structured Stick Breaking for Hierarchical Data,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+260a975bb1562127634e3447890447d593e4d6dc,Tree-Structured Stick Breaking for Hierarchical Data,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+266ed43dcea2e7db9f968b164ca08897539ca8dd,Beyond Principal Components: Deep Boltzmann Machines for face modeling,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+266ed43dcea2e7db9f968b164ca08897539ca8dd,Beyond Principal Components: Deep Boltzmann Machines for face modeling,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+26eb2c900814707ae962184ad4173e754247a80a,Resolving Language and Vision Ambiguities Together: Joint Segmentation & Prepositional Attachment Resolution in Captioned Scenes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+266b61c5696c83c069e67d242ad5b7d0f5f1dee9,DVQA: Understanding Data Visualizations via Question Answering,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+26cd9c812c279347ae96db31cee1cbee0f646fa4,Harnessing ISA diversity: Design of a heterogeneous-ISA chip multiprocessor,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+260aed27abfe751b3d90aad9c0805d35c359ebd5,Efficient Learning of Domain-invariant Image Representations,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+2642810e6c74d900f653f9a800c0e6a14ca2e1c7,Projection Bank: From High-Dimensional Data to Medium-Length Binary Codes,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+26c591cbb35d4d031d13e27a59adccb74bc89bc6,Learning to Forecast and Refine Residual Motion for Image-to-Video Generation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+26c591cbb35d4d031d13e27a59adccb74bc89bc6,Learning to Forecast and Refine Residual Motion for Image-to-Video Generation,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+26dacf88181021939c09ffb3529ffd2854fc7ee6,A Layered Approach for Robust Spatial Virtual Human Pose Reconstruction Using a Still Image,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+26c9e57116061594ef843141a6a8bc49759f766c,Beyond Physical Connections: Tree Models in Human Pose Estimation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+26d86dba4455e8322bd9ea53f490f3bbf95784d5,Geometry-Contrastive GAN for Facial Expression Transfer,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2654ef92491cebeef0997fd4b599ac903e48d07a,Facial expression recognition from near-infrared video sequences,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2679e4f84c5e773cae31cef158eb358af475e22f,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2679e4f84c5e773cae31cef158eb358af475e22f,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+2679e4f84c5e773cae31cef158eb358af475e22f,Adaptive Deep Metric Learning for Identity-Aware Facial Expression Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+26919156cec1cc5bec03f63f566c934b55b682cd,From Pictorial Structures to deformable structures,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+26d3f57dd09efff6315ae0064cdad4877f5297d7,Multiple Object Tracking by Learning Feature Representation and Distance Metric Jointly,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+26d3f57dd09efff6315ae0064cdad4877f5297d7,Multiple Object Tracking by Learning Feature Representation and Distance Metric Jointly,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+265c53ce3fbdb3f2623c4b20f38b94d3ed1d878c,Face Destylization,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+265c53ce3fbdb3f2623c4b20f38b94d3ed1d878c,Face Destylization,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+265c53ce3fbdb3f2623c4b20f38b94d3ed1d878c,Face Destylization,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+212165422ce25ccabb4d354fae2d2352b60f2b7d,Auto-Classification of Retinal Diseases in the Limit of Sparse Data Using a Two-Streams Machine Learning Model,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+212165422ce25ccabb4d354fae2d2352b60f2b7d,Auto-Classification of Retinal Diseases in the Limit of Sparse Data Using a Two-Streams Machine Learning Model,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+212165422ce25ccabb4d354fae2d2352b60f2b7d,Auto-Classification of Retinal Diseases in the Limit of Sparse Data Using a Two-Streams Machine Learning Model,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+21c99706bb26e9012bfb4d8d48009a3d45af59b2,Neural Module Networks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+217a21d60bb777d15cd9328970cab563d70b5d23,Hidden Factor Analysis for Age Invariant Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+217a21d60bb777d15cd9328970cab563d70b5d23,Hidden Factor Analysis for Age Invariant Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+21a2f67b21905ff6e0afa762937427e92dc5aa0b,Extra Facial Landmark Localization via Global Shape Reconstruction,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+2179e34ef3cca174101f57e3cef8e2360fc64303,InverseNet: Solving Inverse Problems with Splitting Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+215f26774779e260087c66eda49e22429619db94,Attributes Make Sense on Segmented Objects,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+21258aa3c48437a2831191b71cd069c05fb84cf7,A Robust and Efficient Doubly Regularized Metric Learning Approach,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+21f5652d4f88ac039c58aa530328e65a39eb7b38,Neural Processing of Facial Identity and Emotion in Infants at High-Risk for Autism Spectrum Disorders,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+21f5652d4f88ac039c58aa530328e65a39eb7b38,Neural Processing of Facial Identity and Emotion in Infants at High-Risk for Autism Spectrum Disorders,City University of New York,The City University of New York,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA",40.87228250,-73.89489171,edu,
+21f5652d4f88ac039c58aa530328e65a39eb7b38,Neural Processing of Facial Identity and Emotion in Infants at High-Risk for Autism Spectrum Disorders,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+21f5652d4f88ac039c58aa530328e65a39eb7b38,Neural Processing of Facial Identity and Emotion in Infants at High-Risk for Autism Spectrum Disorders,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+214eb90d0386379972cded05e9f57b884edb1675,Continuous Pain Intensity Estimation from Facial Expressions,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+212ffbe247d3cc3cb32a12c43a74a1146e3fe18c,3D Human Pose Estimation = 2D Pose Estimation + Matching,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+212ffbe247d3cc3cb32a12c43a74a1146e3fe18c,3D Human Pose Estimation = 2D Pose Estimation + Matching,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+21bd9374c211749104232db33f0f71eab4df35d5,Integrating facial makeup detection into multimodal biometric user verification system,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+214c966d1f9c2a4b66f4535d9a0d4078e63a5867,Brainwash: A Data System for Feature Engineering,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+21104bcf07ef0269ab133471a3200b9bf94b2948,Beyond Comparing Image Pairs: Setwise Active Learning for Relative Attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+215f43a46ad30cf0574a2a10cd81fe7741768746,Virtual Human Bodies with Clothing and Hair: From Images to Animation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+215f43a46ad30cf0574a2a10cd81fe7741768746,Virtual Human Bodies with Clothing and Hair: From Images to Animation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+215f43a46ad30cf0574a2a10cd81fe7741768746,Virtual Human Bodies with Clothing and Hair: From Images to Animation,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+212211d642aa75f66f8ad3ec04da3a4cc089a5b3,Learning to Localize Little Landmarks,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+21bdcd9be2e9e75ec1d060d8d748a372d9ced230,Neural mechanisms in Williams syndrome: a unique window to genetic influences on cognition and behaviour,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+218982f0878a3de667fac2bb18b9f50949aefc1c,Multi-pose multi-target tracking for activity understanding,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+218982f0878a3de667fac2bb18b9f50949aefc1c,Multi-pose multi-target tracking for activity understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+211435a4e14d00f4aaed191acfb548185ee800b9,Visual Saliency Based Multiple Objects Segmentation and its Parallel Implementation for Real-Time Vision Processing,Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.80114990,140.04591160,edu,
+2112edee4a60602e9e5dc5e4f9e352f983f0c8c1,Improved Foreground Detection via Block-Based Classifier Cascade With Probabilistic Decision Integration,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+2112edee4a60602e9e5dc5e4f9e352f983f0c8c1,Improved Foreground Detection via Block-Based Classifier Cascade With Probabilistic Decision Integration,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+21f990f3bb8c7dfe57f31d912fb555819f1a64bd,Randomness and sparsity induced codebook learning with application to cancer image classification,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+21f990f3bb8c7dfe57f31d912fb555819f1a64bd,Randomness and sparsity induced codebook learning with application to cancer image classification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+214f552070a7eb5ef5efe0d6ffeaaa594a3c3535,Learning Everything about Anything: Webly-Supervised Visual Concept Learning,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+2106de484c3f1e3a21f2708effc181f51ca7d709,Social interaction detection using a multi-sensor approach,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2161f6b7ee3c0acc81603b01dc0df689683577b9,End-to-End Deep Learning for Person Search,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2161f6b7ee3c0acc81603b01dc0df689683577b9,End-to-End Deep Learning for Person Search,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+217de4ff802d4904d3f90d2e24a29371307942fe,"POOF: Part-Based One-vs.-One Features for Fine-Grained Categorization, Face Verification, and Attribute Estimation",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+217de4ff802d4904d3f90d2e24a29371307942fe,"POOF: Part-Based One-vs.-One Features for Fine-Grained Categorization, Face Verification, and Attribute Estimation",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2103bb6772bf01e43a8a4e8e34f16baac7d7c331,Information theoretic sensor management for multi-target tracking with a single pan-tilt-zoom camera,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+21d1315761131ea6b3e2afe7a745b606341616fd,Generative Adversarial Network with Spatial Attention for Face Attribute Editing,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+21d1315761131ea6b3e2afe7a745b606341616fd,Generative Adversarial Network with Spatial Attention for Face Attribute Editing,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+211fe99400bde5116efea3b42719d00a34931dcd,Multimodal Differential Network for Visual Question Generation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+21e158bcda4e10da88ee8da3799a6144b60d791f,Population Matching Discrepancy and Applications in Deep Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44,Lessons from collecting a million biometric samples,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+2135a3d9f4b8f5771fa5fc7c7794abf8c2840c44,Lessons from collecting a million biometric samples,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,Spatiotemporal Derivative Pattern: A Dynamic Texture Descriptor for Video Matching,Tafresh University,Tafresh University,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.68092465,50.05341352,edu,
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,Spatiotemporal Derivative Pattern: A Dynamic Texture Descriptor for Video Matching,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+210b98394c3be96e7fd75d3eb11a391da1b3a6ca,Spatiotemporal Derivative Pattern: A Dynamic Texture Descriptor for Video Matching,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+21765df4c0224afcc25eb780bef654cbe6f0bc3a,Multi-channel Correlation Filters,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+21765df4c0224afcc25eb780bef654cbe6f0bc3a,Multi-channel Correlation Filters,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+21fa37258834c2e3f075a8465d8de1c178cdaaf5,Shape-based pedestrian parsing,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+216c6d29a6f57c37ef8f26f88b6ec9be5b855a66,From VQA to Multimodal CQA: Adapting Visual QA Models for Community QA Tasks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+216c6d29a6f57c37ef8f26f88b6ec9be5b855a66,From VQA to Multimodal CQA: Adapting Visual QA Models for Community QA Tasks,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+21bebef8ced5d1e77667c667b54287782556eebc,Image processing and recognition for biological images,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+214959c01b73e2d6eb4a39607de6fdc062526047,Collaborative Sparse Approximation for Multiple-Shot Across-Camera Person Re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+214959c01b73e2d6eb4a39607de6fdc062526047,Collaborative Sparse Approximation for Multiple-Shot Across-Camera Person Re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+212608e00fc1e8912ff845ee7a4a67f88ba938fc,Coupled Deep Learning for Heterogeneous Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+210fd81446006bf542b595fa0743b808cb86acbf,Combining Orientational Pooling Features for Scene Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+210fd81446006bf542b595fa0743b808cb86acbf,Combining Orientational Pooling Features for Scene Recognition,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+21cfe8372be299be84818b4bcbe07fa6736540b6,Articulated Pose Estimation Using Discriminative Armlet Classifiers,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+4dc056cfe5d06cb9e4cbf60ef5044f956ab92b91,Investigating Gait Recognition in the Short-Wave Infrared (SWIR) Spectrum: Dataset and Challenges,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+4dc056cfe5d06cb9e4cbf60ef5044f956ab92b91,Investigating Gait Recognition in the Short-Wave Infrared (SWIR) Spectrum: Dataset and Challenges,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+4d925db7c9e3cca2e8fed644f750d218a48cd081,Automatic Concept Discovery from Parallel Text and Visual Corpora,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+4d625677469be99e0a765a750f88cfb85c522cce,Understanding Hand-Object Manipulation with Grasp Types and Object Attributes,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+4dcc09fc3718721b41460dda559c1c6f507287b7,A Comprehensive Study on Upper-Body Detection with Deep Neural Networks,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+4dcc09fc3718721b41460dda559c1c6f507287b7,A Comprehensive Study on Upper-Body Detection with Deep Neural Networks,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+4d1757aacbc49c74a5d4e53259c92ab0e47544da,Weakly and Semi Supervised Human Body Part Parsing via Pose-Guided Knowledge Transfer,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+4dcd40005726e66a0e4ed33635b38bb8107a671a,Tasting families of features for image classification,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+4d15254f6f31356963cc70319ce416d28d8924a3,Quo vadis Face Recognition?,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+4d15254f6f31356963cc70319ce416d28d8924a3,Quo vadis Face Recognition?,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d15254f6f31356963cc70319ce416d28d8924a3,Quo vadis Face Recognition?,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4d89a8228bcf17f444d82ea271a548cb16fd0786,Multiclass Object Recognition Inspired by the Ventral Visual Pathway,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+4d89a8228bcf17f444d82ea271a548cb16fd0786,Multiclass Object Recognition Inspired by the Ventral Visual Pathway,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+4dff129a6f988d78c457ece463b774c3d81ac5c7,Emotion recognition in the wild from videos using images,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+4dff129a6f988d78c457ece463b774c3d81ac5c7,Emotion recognition in the wild from videos using images,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4dff129a6f988d78c457ece463b774c3d81ac5c7,Emotion recognition in the wild from videos using images,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4dff129a6f988d78c457ece463b774c3d81ac5c7,Emotion recognition in the wild from videos using images,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4d42d42de4445545b5e3045be296f917acd33ab5,Convolutional Neural Networks for Aerial Multi-Label Pedestrian Detection,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+4d9c64750ef4565dc47cec0c513458b53dd5c9a7,Unmanned Aerial Vehicle Images,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+4d9c64750ef4565dc47cec0c513458b53dd5c9a7,Unmanned Aerial Vehicle Images,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+4da5f0c1d07725a06c6b4a2646e31ea3a5f14435,End-to-End Training of Hybrid CNN-CRF Models for Semantic Segmentation using Structured Learning,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+4d6462fb78db88afff44561d06dd52227190689c,Face-to-Face Social Activity Detection Using Data Collected with a Wearable Device,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+4d6e98fb5fcb7b5983f615a45ac1d81d1b570ca0,Unsupervised Cross-Dataset Transfer Learning for Person Re-identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4d6e98fb5fcb7b5983f615a45ac1d81d1b570ca0,Unsupervised Cross-Dataset Transfer Learning for Person Re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+4dd00e37f4129b0c62e111906fd8b239520c08e9,Learning to Separate Domains in Generalized Zero-Shot and Open Set Learning: a probabilistic perspective,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+4dd00e37f4129b0c62e111906fd8b239520c08e9,Learning to Separate Domains in Generalized Zero-Shot and Open Set Learning: a probabilistic perspective,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+4dd71a097e6b3cd379d8c802460667ee0cbc8463,Real-time multi-view facial landmark detector learned by the structured output SVM,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+4db0968270f4e7b3fa73e41c50d13d48e20687be,Fashion Forward: Forecasting Visual Style in Fashion,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+4db0968270f4e7b3fa73e41c50d13d48e20687be,Fashion Forward: Forecasting Visual Style in Fashion,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4d803109f3d9cca7c514db21a0494972d5681faa,Attribute Adaptation for Personalized Image Search,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4d9c02567e7b9e065108eb83ea3f03fcff880462,Towards Facial Expression Recognition in the Wild: A New Database and Deep Recognition System,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,Hybrid Deep Learning for Face Verification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,Hybrid Deep Learning for Face Verification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4d3c4c3fe8742821242368e87cd72da0bd7d3783,Hybrid Deep Learning for Face Verification,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4d01d78544ae0de3075304ff0efa51a077c903b7,ART Network based Face Recognition with Gabor Filters,Jahangirnagar University,Jahangirnagar University,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.88331200,90.26939210,edu,
+4dd2be07b4f0393995b57196f8fc79d666b3aec5,Sparse localized facial motion dictionary learning for facial expression recognition,Yeungnam University,Yeungnam University,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국",35.83654030,128.75343090,edu,
+4db6456b6933d0ae60bd1d7bb7ae01cea2ca9a9d,Deep Learning in Information Security,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+4db6456b6933d0ae60bd1d7bb7ae01cea2ca9a9d,Deep Learning in Information Security,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+4d8ce7669d0346f63b20393ffaa438493e7adfec,Similarity Features for Facial Event Analysis,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+4d8ce7669d0346f63b20393ffaa438493e7adfec,Similarity Features for Facial Event Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4d1e46b1dcec1c9cbc4e7ff80dbf73e5e7ebcd67,WebCaricature: a benchmark for caricature recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4d1e46b1dcec1c9cbc4e7ff80dbf73e5e7ebcd67,WebCaricature: a benchmark for caricature recognition,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+4d16337cc0431cd43043dfef839ce5f0717c3483,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d16337cc0431cd43043dfef839ce5f0717c3483,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d16337cc0431cd43043dfef839ce5f0717c3483,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d16337cc0431cd43043dfef839ce5f0717c3483,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d16337cc0431cd43043dfef839ce5f0717c3483,A Scalable and Privacy-Aware IoT Service for Live Video Analytics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4d0b3921345ae373a4e04f068867181647d57d7d,Learning Attributes from Human Gaze,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4d58f886f5150b2d5e48fd1b5a49e09799bf895d,Texas 3D Face Recognition Database,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4d7b66a123135d37689005816aa15ab31167b6d3,Evaluation of the Impetuses of Scan Path in Real Scene Searching,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4d0e64da142299039841660ea03f24575174afa8,Deformation Analysis for 3D Face Matching,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+4d653b19ce1c7cba79fc2f11271fb90f7744c95c,Light-Weight RefineNet for Real-Time Semantic Segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+4d267098356dc4cfcd3f5aefcc26588ffb23b8dc,Smart Hashing Update for Fast Response,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+75aef130afb8c862575d457db6e168e8d77ae4f0,Content-based search and browsing in semantic multimedia retrieval,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+7574f999d2325803f88c4915ba8f304cccc232d1,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7574f999d2325803f88c4915ba8f304cccc232d1,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7574f999d2325803f88c4915ba8f304cccc232d1,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+75fcbb01bc7e53e9de89cb1857a527f97ea532ce,"Detection of Facial Landmarks from Neutral, Happy, and Disgust Facial Images",University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.77920678,edu,
+756eed9fe591cf53c7ebbaba05ceeb39b212f802,Learning to Refine Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+75e5a1a64d9d27dbb054fc8b8d47f0e23cbbbfa4,The importance of internal facial features in learning new faces.,University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37525010,-4.13927692,edu,
+75e5a1a64d9d27dbb054fc8b8d47f0e23cbbbfa4,The importance of internal facial features in learning new faces.,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+75e5a1a64d9d27dbb054fc8b8d47f0e23cbbbfa4,The importance of internal facial features in learning new faces.,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+75e5a1a64d9d27dbb054fc8b8d47f0e23cbbbfa4,The importance of internal facial features in learning new faces.,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+756275128fae4ffe8389261e498f9bb49a8381b2,Designing and Testing an Anonymous Face Recognition System,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+759f4f7601292c37e2f1c4a5a9f53075e9e355ec,Instance Retrieval at Fine-grained Level Using Multi-Attribute Recognition,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+757e4cb981e807d83539d9982ad325331cb59b16,Demographics versus Biometric Automatic Interoperability,Sapienza University of Rome,Sapienza University of Rome,"Piazzale Aldo Moro, 5, 00185 Roma RM, Italy",41.90376260,12.51443840,edu,
+757e4cb981e807d83539d9982ad325331cb59b16,Demographics versus Biometric Automatic Interoperability,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+75f302f1372136c5e43e523bacc0a2ddf04c3237,Schema Independent Relational Learning,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+75f302f1372136c5e43e523bacc0a2ddf04c3237,Schema Independent Relational Learning,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+75f302f1372136c5e43e523bacc0a2ddf04c3237,Schema Independent Relational Learning,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+75f302f1372136c5e43e523bacc0a2ddf04c3237,Schema Independent Relational Learning,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+75c30403bad798381afa70f225e402ee7d84cd34,Learning to generate images with perceptual similarity metrics,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+759f7f9e4a363223dc06903ef88fed27a3a64826,Modeling and Analysis of Dynamic Behaviors of Web Image Collections,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+759f7f9e4a363223dc06903ef88fed27a3a64826,Modeling and Analysis of Dynamic Behaviors of Web Image Collections,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+759f7f9e4a363223dc06903ef88fed27a3a64826,Modeling and Analysis of Dynamic Behaviors of Web Image Collections,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+759f7f9e4a363223dc06903ef88fed27a3a64826,Modeling and Analysis of Dynamic Behaviors of Web Image Collections,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+75fd9acf5e5b7ed17c658cc84090c4659e5de01d,Project-Out Cascaded Regression with an application to face alignment,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+75a54f49fccee29faca8931fa8ba700030dcaa75,Ringtail: A Generalized Nowcasting System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+75a54f49fccee29faca8931fa8ba700030dcaa75,Ringtail: A Generalized Nowcasting System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+75a54f49fccee29faca8931fa8ba700030dcaa75,Ringtail: A Generalized Nowcasting System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+75a54f49fccee29faca8931fa8ba700030dcaa75,Ringtail: A Generalized Nowcasting System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+75a54f49fccee29faca8931fa8ba700030dcaa75,Ringtail: A Generalized Nowcasting System,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+75b1d8339085ab03f45c0316b976755b6c5da9e9,SMD: A Locally Stable Monotonic Change Invariant Feature Descriptor,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+7523ead2a91191f0ecfb88fba5c0f2deeddaa256,Generating Chinese Captions for Flickr30K Images,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+7523ead2a91191f0ecfb88fba5c0f2deeddaa256,Generating Chinese Captions for Flickr30K Images,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+7552a6dbee4a915b578453ed9f35a4c6cc114aa1,Now You Shake Me : Towards Automatic 4 D Cinema,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+75bf3b6109d7a685236c8589f8ead7d769ea863f,Model Selection with Nonlinear Embedding for Unsupervised Domain Adaptation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+75003c069da53911f714d8d28b121ed9b29e0911,SORT: Second-Order Response Transform for Visual Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+75003c069da53911f714d8d28b121ed9b29e0911,SORT: Second-Order Response Transform for Visual Recognition,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+759cf57215fcfdd8f59c97d14e7f3f62fafa2b30,Real-time Distracted Driver Posture Classification,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+751970d4fb6f61d1b94ca82682984fd03c74f127,Array-based Electromyographic Silent Speech Interface,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+759b28cb6527f8820f1cffc3581884c5caa19091,Neighbor-Sensitive Hashing,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+759b28cb6527f8820f1cffc3581884c5caa19091,Neighbor-Sensitive Hashing,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+759b28cb6527f8820f1cffc3581884c5caa19091,Neighbor-Sensitive Hashing,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+756db84f76d745211464b5686a67bfdc23e18c19,How to generate realistic images using gated MRF’s,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+75776ce56e649dda68d9a8f13a9df911662e5b79,Face Modelling and Tracking from Range Scans,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+75ebe1e0ae9d42732e31948e2e9c03d680235c39,Hello! My name is... Buffy'' -- Automatic Naming of Characters in TV Video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+7573ff84d71de19fe7d387bb4a6de73cb28402f4,Zero-Shot Hashing via Transferring Supervised Knowledge,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+751e352fe52946ca3d0f51956706313ce521b658,Hierarchical power management for asymmetric multi-core in dark silicon era,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+754ee07789f6ff28fc121bb9f771895e971ac28c,Beyond Triplet Loss: A Deep Quadruplet Network for Person Re-identification,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+75b20672a6290a8e2769ba0226d9187c0ccd5843,Development of response inhibition in the context of relevant versus irrelevant emotions,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7566032327a19f9ba770022677de34d7e7aeaac8,What Makes Natural Scene Memorable?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+751b26e7791b29e4e53ab915bfd263f96f531f56,Mood meter: counting smiles in the wild,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+75da1df4ed319926c544eefe17ec8d720feef8c0,FDDB: A Benchmark for Face Detection in Unconstrained Settings,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+75da1df4ed319926c544eefe17ec8d720feef8c0,FDDB: A Benchmark for Face Detection in Unconstrained Settings,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+75259a613285bdb339556ae30897cb7e628209fa,Unsupervised Domain Adaptation for Zero-Shot Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+754f7f3e9a44506b814bf9dc06e44fecde599878,Quantized Densely Connected U-Nets for Efficient Landmark Localization,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+754f7f3e9a44506b814bf9dc06e44fecde599878,Quantized Densely Connected U-Nets for Efficient Landmark Localization,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+754f7f3e9a44506b814bf9dc06e44fecde599878,Quantized Densely Connected U-Nets for Efficient Landmark Localization,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+75b1790ffcf51489fcfbf14b11f1b90a076345cc,A Coarse-Fine Network for Keypoint Localization,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+75b1790ffcf51489fcfbf14b11f1b90a076345cc,A Coarse-Fine Network for Keypoint Localization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+75064b7675553c22112b76b5687e0aed4089b0ea,COCO-Text: Dataset and Benchmark for Text Detection and Recognition in Natural Images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+75064b7675553c22112b76b5687e0aed4089b0ea,COCO-Text: Dataset and Benchmark for Text Detection and Recognition in Natural Images,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+751223e9636f4624551b37d8891f0e06eeb64a5d,Multilinear Wavelets: A Statistical Shape Space for Human Faces,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7536b6a9f3cb4ae810e2ef6d0219134b4e546dd0,Semi-Automatic Image Labelling Using Depth Information,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+755416b8d2080f5d9e894130e5115a471e9d8793,Learning to Recognize Objects by Retaining Other Factors of Variation,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+7525e1fed92a780b6cb78190da360a3a7b611885,Data-specific concept correlation estimation for video annotation refinement,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+75d2ecbbcc934563dff6b39821605dc6f2d5ffcc,Capturing Subtle Facial Motions in 3D Face Tracking,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+75d2ecbbcc934563dff6b39821605dc6f2d5ffcc,Capturing Subtle Facial Motions in 3D Face Tracking,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+8102311b200c68e7928eb28563fd99cd5e8fbfc1,"Occlusion-Aware Object Localization, Segmentation and Pose Estimation",Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+81f63e7344cc242416e37d791f7eb83ec2c07681,Multimodal Co-Training for Selecting Good Examples from Webly Labeled Video,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+81363f85b2827c5d972b7b0691498464e922fdea,Transfer Learning via Unsupervised Task Discovery for Visual Question Answering,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+8150f267cd2852f27639d4d85c3a311360346c88,Salient Montages from Unconstrained Videos,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+81825711c2aaa1b9d3ead1a300e71c4353a41382,End-to-end training of object class detectors for mean average precision,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+81d0ab3201fbaef5aff57e9df2c12c7b4f228987,Talking Face Generation by Conditional Recurrent Adversarial Network,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+816bd8a7f91824097f098e4f3e0f4b69f481689d,Latent semantic analysis of facial action codes for automatic facial expression recognition,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+816bd8a7f91824097f098e4f3e0f4b69f481689d,Latent semantic analysis of facial action codes for automatic facial expression recognition,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+81ba5202424906f64b77f68afca063658139fbb2,Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+81ba5202424906f64b77f68afca063658139fbb2,Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+8145ff6adab3397a5ac52cc62a7c53dae59763db,ERP responses differentiate inverted but not upright face processing in adults with ASD.,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+81884e1de00e59f24bc20254584d73a1a1806933,Super-Identity Convolutional Neural Network for Face Hallucination,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+81884e1de00e59f24bc20254584d73a1a1806933,Super-Identity Convolutional Neural Network for Face Hallucination,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+81884e1de00e59f24bc20254584d73a1a1806933,Super-Identity Convolutional Neural Network for Face Hallucination,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+810cb5228315ac027bb8fbca94f6f8faa6ff8016,A Unified Contour-Pixel Model for Figure-Ground Segmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+810cb5228315ac027bb8fbca94f6f8faa6ff8016,A Unified Contour-Pixel Model for Figure-Ground Segmentation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+81c7d56f1a77097c8fa14b76cb359d7f436741a0,Looking at Outfit to Parse Clothing,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+81c7d56f1a77097c8fa14b76cb359d7f436741a0,Looking at Outfit to Parse Clothing,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+81c7d56f1a77097c8fa14b76cb359d7f436741a0,Looking at Outfit to Parse Clothing,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+81e11e33fc5785090e2d459da3ac3d3db5e43f65,A Novel Face Recognition Approach Using a Multimodal Feature Vector,"National Institute of Technology, Durgapur","National Institute of Technology, Durgapur, India","National Institute Of Technology, Durgapur, Priyadarshini Indira Sarani, Durgapur, Bānkurā, West Bengal, 713209, India",23.54869625,87.29105712,edu,
+810baa46ed829553bdb478dad2782cef2278ca60,A Scalable Approach to Column-Based Low-Rank Matrix Approximation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+810baa46ed829553bdb478dad2782cef2278ca60,A Scalable Approach to Column-Based Low-Rank Matrix Approximation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+810baa46ed829553bdb478dad2782cef2278ca60,A Scalable Approach to Column-Based Low-Rank Matrix Approximation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+810baa46ed829553bdb478dad2782cef2278ca60,A Scalable Approach to Column-Based Low-Rank Matrix Approximation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+810baa46ed829553bdb478dad2782cef2278ca60,A Scalable Approach to Column-Based Low-Rank Matrix Approximation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+8112972b8a6e0c7f9443dbcdfb4ed65c7484f8c2,Privacy-preserving Machine Learning through Data Obfuscation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+8112972b8a6e0c7f9443dbcdfb4ed65c7484f8c2,Privacy-preserving Machine Learning through Data Obfuscation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+8112972b8a6e0c7f9443dbcdfb4ed65c7484f8c2,Privacy-preserving Machine Learning through Data Obfuscation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+8175f126198c126f9708fa8a04f57af830fba6aa,"DCGANs for image super-resolution, denoising and debluring",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8175f126198c126f9708fa8a04f57af830fba6aa,"DCGANs for image super-resolution, denoising and debluring",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+81e366ed1834a8d01c4457eccae4d57d169cb932,Pose-Configurable Generic Tracking of Elongated Objects,Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.37086525,18.61716016,edu,
+8189e4f5fc09ae691c77bbd0d4e09b8853b02edf,Pose estimation of anime/manga characters: a case for synthetic data,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8189e4f5fc09ae691c77bbd0d4e09b8853b02edf,Pose estimation of anime/manga characters: a case for synthetic data,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8164ebc07f51c9e0db4902980b5ac3f5a8d8d48c,Shuffle-Then-Assemble: Learning Object-Agnostic Visual Relationship Features,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+81fe36a1a49eabe38c7d98602447eec518af1aa2,Graph Regularised Hashing,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+81fc86e86980a32c47410f0ba7b17665048141ec,Segment-based Methods for Facial Attribute Detection from Partial Faces,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+818ecc8c8d4dc398b01a852df90cb8d972530fa5,Unsupervised Training for 3D Morphable Model Regression,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+818ecc8c8d4dc398b01a852df90cb8d972530fa5,Unsupervised Training for 3D Morphable Model Regression,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+8170d124e78a3f127c2da291aa1116e85c13c02e,Automatic adaptation of fingerprint liveness detector to new spoof materials,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+81dbc36c38b820dff88bcca177bb644f55a4926f,Minimal Neighborhood Mean Projection Function and Its Application to Eye Location,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+81bf7a4b8b3c21d42cb82f946f762c94031e11b8,Segmentation of Nerve on Ultrasound Images Using Deep Adversarial Network,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+81bf7a4b8b3c21d42cb82f946f762c94031e11b8,Segmentation of Nerve on Ultrasound Images Using Deep Adversarial Network,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+815e77b8f2e8f17205e46162b3addd02b2ea8ff0,Marker-less Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+81a8b2e55bcea9d9b26e67fcbb5a30ca8a8defc3,Database size effects on performance on a smart card face verification system,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+81e31899aa9f0f54db069f0f4c2a29ed9587fe89,MTLE: A Multitask Learning Encoder of Visual Feature Representations for Video and Movie Description,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+81e31899aa9f0f54db069f0f4c2a29ed9587fe89,MTLE: A Multitask Learning Encoder of Visual Feature Representations for Video and Movie Description,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+81e31899aa9f0f54db069f0f4c2a29ed9587fe89,MTLE: A Multitask Learning Encoder of Visual Feature Representations for Video and Movie Description,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+812e06a4cee26629e198a0a6d991616933ab14d8,Improving Multiple Object Tracking with Optical Flow and Edge Preprocessing,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+812e06a4cee26629e198a0a6d991616933ab14d8,Improving Multiple Object Tracking with Optical Flow and Edge Preprocessing,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+814a3acea78a7a79a499c52ff2efc57254f8d02c,3D Pick&Mix: Object Part Blending in Joint Shape and Image Manifolds,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+814a3acea78a7a79a499c52ff2efc57254f8d02c,3D Pick&Mix: Object Part Blending in Joint Shape and Image Manifolds,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+81da427270c100241c07143885ba3051ec4a2ecb,Learning the Synthesizability of Dynamic Texture Samples,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+8120e2d8233f5335b09e673e63395a76ae0e6bae,Recent Advances in Deep Learning: An Overview,Chittagong University of Engineering and Technology,Chittagong University of Engineering and Technology,"Shaheed Tareq Huda Hall, Goal Chattar, চট্টগ্রাম, চট্টগ্রাম জেলা, চট্টগ্রাম বিভাগ, 4349, বাংলাদেশ",22.46221665,91.96942263,edu,
+867596b7c4a2e108dc5a024f85cdfd77a574f5a7,Sparse kernel logistic regression based on L 1/2 regularization,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+861c650f403834163a2c27467a50713ceca37a3e,Probabilistic Elastic Part Model for Unsupervised Face Detector Adaptation,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+86f191616423efab8c0d352d986126a964983219,Visual to Sound: Generating Natural Sound for Videos in the Wild,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+8670fea0d92c6a0e767d089083a39d5896db8534,"Monocular Depth Estimation with Affinity, Vertical Pooling, and Label Enhancement",SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+8670fea0d92c6a0e767d089083a39d5896db8534,"Monocular Depth Estimation with Affinity, Vertical Pooling, and Label Enhancement",Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+8670fea0d92c6a0e767d089083a39d5896db8534,"Monocular Depth Estimation with Affinity, Vertical Pooling, and Label Enhancement",Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+86cf00b2c22200745276239d32451ff14ee65296,Boosting Associated Pairing Comparison Features for pedestrian detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8656f48aa77f25462b3ad2edf2b1aa965b2b7b38,Paradigms for the Construction and Annotation of Emotional Corpora for Real-world Human-Computer-Interaction,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+8656f48aa77f25462b3ad2edf2b1aa965b2b7b38,Paradigms for the Construction and Annotation of Emotional Corpora for Real-world Human-Computer-Interaction,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+8627e6ccb42c909b5c1f94304af986472effb6f1,Understanding Convolutional Neural Networks in Terms of Category-Level Attributes,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+863ffd74d39c33b6351dea90c6f7f1e2bdf2d97c,A Baseline Algorithm for Face Detection and Tracking in Video,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+863ffd74d39c33b6351dea90c6f7f1e2bdf2d97c,A Baseline Algorithm for Face Detection and Tracking in Video,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+863f2a473e9e60dbfffe9f7eb576b9bbe3d3a6b4,The Intelligent Robot Contents for Children with Speech-Language Disorder,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+86c5478f21c4a9f9de71b5ffa90f2a483ba5c497,"Kernel Selection using Multiple Kernel Learning and Domain Adaptation in Reproducing Kernel Hilbert Space, for Face Recognition under Surveillance Scenario",Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+86c158ef6caaf247d5d14e07c5edded0147df8b7,Spatial Memory for Context Reasoning in Object Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8628edf89482aef7fba204f3f0a9e9f5b12ec477,Compositional Obverter Communication Learning From Raw Visual Input,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+86a84b3a61f67f59c5bc5545bc88296e46681ca5,Using Models of Objects with Deformable Parts for Joint Categorization and Segmentation of Objects,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+86ed5b9121c02bcf26900913f2b5ea58ba23508f,Actions ~ Transformations,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+86ed5b9121c02bcf26900913f2b5ea58ba23508f,Actions ~ Transformations,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+86204fc037936754813b91898377e8831396551a,Dense Face Alignment,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,Attentional Alignment Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,Attentional Alignment Networks,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+86b6afc667bb14ff4d69e7a5e8bb2454a6bbd2cd,Attentional Alignment Networks,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+862d17895fe822f7111e737cbcdd042ba04377e8,Semi-Latent GAN: Learning to generate and modify facial images from attributes,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+860cc25e1cee40d70d001180ff665809c6e36594,Efficient Boosted Weak Classifiers for Object Detection,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+860cc25e1cee40d70d001180ff665809c6e36594,Efficient Boosted Weak Classifiers for Object Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+86d0127e1fd04c3d8ea78401c838af621647dc95,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+86d0127e1fd04c3d8ea78401c838af621647dc95,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+86d0127e1fd04c3d8ea78401c838af621647dc95,A Novel Multi-Task Tensor Correlation Neural Network for Facial Attribute Prediction,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+86f17e74b905c8251223caf9b4e99784264c6252,Video retrieval by mimicking poses,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+86f17e74b905c8251223caf9b4e99784264c6252,Video retrieval by mimicking poses,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+86f17e74b905c8251223caf9b4e99784264c6252,Video retrieval by mimicking poses,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+86e005b54819ca54d35daa2ae7ead498f41d84ce,Weakly Supervised Object Recognition and Localization with Invariant High Order Features,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+86eab1845deb3614233360b6bc33ce1ff074458e,Learning Deep Neural Networks for Vehicle Re-ID with Visual-spatio-Temporal Path Proposals,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+86eab1845deb3614233360b6bc33ce1ff074458e,Learning Deep Neural Networks for Vehicle Re-ID with Visual-spatio-Temporal Path Proposals,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+86f3552b822f6af56cb5079cc31616b4035ccc4e,Towards Miss Universe automatic prediction: The evening gown competition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+86a8b3d0f753cb49ac3250fa14d277983e30a4b7,Exploiting Unlabeled Ages for Aging Pattern Analysis on a Large Database,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+860588fafcc80c823e66429fadd7e816721da42a,Unsupervised Discovery of Object Landmarks as Structural Representations,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+86a200647f89ed81db8031ccfbcb5368a32bed6c,SHOE: Sibling Hashing with Output Embeddings,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+861f4aac1178bf1c4dd1373dbf2794be54c195d4,Survey of Image Processing Techniques for Brain Pathology Diagnosis: Challenges and Opportunities,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+861f4aac1178bf1c4dd1373dbf2794be54c195d4,Survey of Image Processing Techniques for Brain Pathology Diagnosis: Challenges and Opportunities,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+86374bb8d309ad4dbde65c21c6fda6586ae4147a,Detect-and-Track: Efficient Pose Estimation in Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+86374bb8d309ad4dbde65c21c6fda6586ae4147a,Detect-and-Track: Efficient Pose Estimation in Videos,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+86374bb8d309ad4dbde65c21c6fda6586ae4147a,Detect-and-Track: Efficient Pose Estimation in Videos,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+72282287f25c5419dc6fd9e89ec9d86d660dc0b5,A Rotation Invariant Latent Factor Model for Moveme Discovery from Static Poses,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+727067392502bb44cadcd55680156e9517a3fd65,Does this Smile Make me Look White ? Exploring the Effects of Emotional Expressions on the Categorization of Multiracial Children,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+72aa01cc6dbadc631407b4d2d0addec172dc5037,Low-rank matrix recovery with structural incoherence for robust face recognition,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+7237b27dac6dfe5c07a2c6c36ad848e6bcc7ac77,Person Depth ReID: Robust Person Re-identification with Commodity Depth Sensors,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+7237b27dac6dfe5c07a2c6c36ad848e6bcc7ac77,Person Depth ReID: Robust Person Re-identification with Commodity Depth Sensors,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7237b27dac6dfe5c07a2c6c36ad848e6bcc7ac77,Person Depth ReID: Robust Person Re-identification with Commodity Depth Sensors,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7237b27dac6dfe5c07a2c6c36ad848e6bcc7ac77,Person Depth ReID: Robust Person Re-identification with Commodity Depth Sensors,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+7292510a78ef9dfda8aa54dab318a7780b2e8faf,Hardware for machine learning: Challenges and opportunities,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+72a672cd20a3b7b2a123772ac0f9a27cfe96effe,Image-based Ear Biometric Smartphone App for Patient Identification in Field Settings,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+72969929a95227fd7f9a1abaa832097c0c93dd71,Appearance Descriptors for Person Re-identification: a Comprehensive Review,University of Cagliari,"University of Cagliari, Italy","Via Università, 40, 09124 Cagliari CA, Italy",39.21736570,9.11492180,edu,
+72e8010136460340683a52c2aee4edaee0b48559,Repulsion Loss: Detecting Pedestrians in a Crowd,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+72e8010136460340683a52c2aee4edaee0b48559,Repulsion Loss: Detecting Pedestrians in a Crowd,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+72e8010136460340683a52c2aee4edaee0b48559,Repulsion Loss: Detecting Pedestrians in a Crowd,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+72a5e181ee8f71b0b153369963ff9bfec1c6b5b0,Expression Recognition in Videos Using a Weighted Component-Based Feature Descriptor,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+72a5e181ee8f71b0b153369963ff9bfec1c6b5b0,Expression Recognition in Videos Using a Weighted Component-Based Feature Descriptor,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+72458e19f8561e74471449fb4cfd97c8b9b527e8,A Computational Model of Observer Stress,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+72eab1e61706519e8c05cc042f0597b439874413,Genetic Programming for Multiclass Object Classification,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+72903a6b9894f13facf46a81bd7b659740b488e5,Worldwide AI,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+7224d58a7e1f02b84994b60dc3b84d9fe6941ff5,When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+720e06688e1038026070253891037652f5d0d9f5,Chess Q&A : Question Answering on Chess Games,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+72b34e9536665f776b0f282ddb63120afa21c84e,An experimental examination of catastrophizing-related interpretation bias for ambiguous facial expressions of pain using an incidental learning task,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+72e10a2a7a65db7ecdc7d9bd3b95a4160fab4114,Face alignment using cascade Gaussian process regression trees,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,Boosting Facial Expression Recognition in a Noisy Environment Using LDSP-Local Distinctive Star Pattern,Stamford University Bangladesh,Stamford University Bangladesh,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.74481660,90.40843514,edu,
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,Boosting Facial Expression Recognition in a Noisy Environment Using LDSP-Local Distinctive Star Pattern,Stamford University Bangladesh,Stamford University Bangladesh,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.74481660,90.40843514,edu,
+72160aae43cd9b2c3aae5574acc0d00ea0993b9e,Boosting Facial Expression Recognition in a Noisy Environment Using LDSP-Local Distinctive Star Pattern,Stamford University Bangladesh,Stamford University Bangladesh,"Stamford University Bangladesh, Siddeshwari Road, ফকিরাপুল, Paltan, ঢাকা, ঢাকা বিভাগ, 1217, বাংলাদেশ",23.74481660,90.40843514,edu,
+72cbbdee4f6eeee8b7dd22cea6092c532271009f,Masquer Hunter: Adversarial Occlusion-aware Face Detection,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+723f879b93097c22ffa4fe6b587d3a070a67136b,Learning Spatial Interest Regions from Videos to Inform Action Recognition in Still Images,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+72edfb91e4b3d42547591be9e8c6eb07e7190499,Do Children See in Black and White? Children's and Adults' Categorizations of Multiracial Individuals.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+721e5ba3383b05a78ef1dfe85bf38efa7e2d611d,"BULAT, TZIMIROPOULOS: CONVOLUTIONAL AGGREGATION OF LOCAL EVIDENCE 1 Convolutional aggregation of local evidence for large pose face alignment",University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+72007faf3bc77e1d98d3552f36c0b6b74aa9e379,The Relationship between Amygdala Activation and Passive Exposure Time to an Aversive Cue during a Continuous Performance Task,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+72a4390a6c3b2bc2c3e7d83fc1f99e65e6137573,Collective Activity Localization with Contextual Spatial Pyramid,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+72c69a7a791ff86f84f082b73e09733bb90edfd7,Face photo retrieval by sketch example,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+72c69a7a791ff86f84f082b73e09733bb90edfd7,Face photo retrieval by sketch example,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+72635f4e479e234a9ceb9c836153830621b308c7,Exemplar-Based Colour Constancy,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7249e94317ff7cb5dc39441f3473a2d4f1c1d30b,Action Attribute Detection from Sports Videos with Contextual Constraints,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+72ed3be320e435a1dc093c84071a22d3d64fd997,Eye Spy: Improving Vision through Dialog,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+721b890875660e87e7e3d9dd6917709b5fc5e34d,On optimizing subspaces for face recognition,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,Face Recognition with Contrastive Convolution,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,Face Recognition with Contrastive Convolution,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+72fd97d21d6465d4bb407b6f8f3accd4419a2fb4,Automated Identification of Individual Great White Sharks from Unrestricted Fin Imagery,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+72450d7e5cbe79b05839c30a4f0284af5aa80053,Natural Facial Expression Recognition Using Dynamic and Static Schemes,University of the Basque Country,University of the Basque Country,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.30927695,-2.01066785,edu,
+72b4b8f4a9f25cac5686231b44a2220945fd2ff6,Face Verification Using Modeled Eigenspectrum,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+725c3605c2d26d113637097358cd4c08c19ff9e1,Deep Reasoning with Knowledge Graph for Social Relationship Understanding,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+72dd9ecc3a1f32d53b4aeb03ea3db14236fbcb27,Let the Shape Speak - Discriminative Face Alignment using Conjugate Priors,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+72dd9ecc3a1f32d53b4aeb03ea3db14236fbcb27,Let the Shape Speak - Discriminative Face Alignment using Conjugate Priors,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+72e14386d0ef1aa09c52e07086fc310c440db16f,Gait Analysis of Gender and Age Using a Large-Scale Multi-view Gait Database,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+72e9acdd64e71fc2084acaf177aafaa2e075bd8c,The 2017 Hands in the Million Challenge on 3D Hand Pose Estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+445461a34adc4bcdccac2e3c374f5921c93750f8,Emotional Expression Classification Using Time-Series Kernels,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+445461a34adc4bcdccac2e3c374f5921c93750f8,Emotional Expression Classification Using Time-Series Kernels,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+446fbff6a2a7c9989b0a0465f960e236d9a5e886,Context Encoders: Feature Learning by Inpainting,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+44e6ce12b857aeade03a6e5d1b7fb81202c39489,VoxCeleb2: Deep Speaker Recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+44508e337a90223e935485d87d6fda15aaddd77a,Text detection and recognition in natural scene images,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+443acd268126c777bc7194e185bec0984c3d1ae7,Retrieving relative soft biometrics for semantic identification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+44054c64ae7ee16a8a8348bb57345aae95a8ddae,Social Orienting and Attention Is Influenced by the Presence of Competing Nonsocial Information in Adolescents with Autism,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+44054c64ae7ee16a8a8348bb57345aae95a8ddae,Social Orienting and Attention Is Influenced by the Presence of Competing Nonsocial Information in Adolescents with Autism,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+44ae568a8cafbd4f4d495bf612bd6bb5c5116425,Accel: A Corrective Fusion Network for Efficient Semantic Segmentation on Video,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+449b87347fe7f9c3f17e969fab1617fbfd9ccb1b,Flat vs. Expressive Storytelling: Young Children’s Learning and Retention of a Social Robot’s Narrative,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+449b87347fe7f9c3f17e969fab1617fbfd9ccb1b,Flat vs. Expressive Storytelling: Young Children’s Learning and Retention of a Social Robot’s Narrative,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+449b87347fe7f9c3f17e969fab1617fbfd9ccb1b,Flat vs. Expressive Storytelling: Young Children’s Learning and Retention of a Social Robot’s Narrative,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+44f23600671473c3ddb65a308ca97657bc92e527,Convolutional Two-Stream Network Fusion for Video Action Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+44f23600671473c3ddb65a308ca97657bc92e527,Convolutional Two-Stream Network Fusion for Video Action Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+44f23600671473c3ddb65a308ca97657bc92e527,Convolutional Two-Stream Network Fusion for Video Action Recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+442cf9b24661c9ea5c2a1dcabd4a5b8af1cd89da,Beyond One-hot Encoding: lower dimensional target embedding,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+44fbbd3def64d52c956277628a89aba77b24686b,Context Modulates Congruency Effects in Selective Attention to Social Cues,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+44fbbd3def64d52c956277628a89aba77b24686b,Context Modulates Congruency Effects in Selective Attention to Social Cues,Teesside University,Teesside University,"Teesside University, Southfield Road, Southfield, Linthorpe, Middlesbrough, North East England, England, TS1 3BZ, UK",54.57036950,-1.23509662,edu,
+4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f,Learning features from Improved Dense Trajectories using deep convolutional networks for Human Activity Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+4467a1ae8ddf0bc0e970c18a0cdd67eb83c8fd6f,Learning features from Improved Dense Trajectories using deep convolutional networks for Human Activity Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+44bfa5311f0921664e9036f63cadd71049a35f35,Faster R-CNN-Based Glomerular Detection in Multistained Human Whole Slide Images,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+44bfa5311f0921664e9036f63cadd71049a35f35,Faster R-CNN-Based Glomerular Detection in Multistained Human Whole Slide Images,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+446f003afd16aa932aa87c73543348f62eba0e67,Suspect identification based on descriptive facial attributes,"Noblis, Falls Church, VA, U.S.A.","Noblis, Falls Church, VA, U.S.A.","2002 Edmund Halley Dr, Reston, VA 20191, USA",38.95187000,-77.36325900,company,
+446f003afd16aa932aa87c73543348f62eba0e67,Suspect identification based on descriptive facial attributes,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+446f003afd16aa932aa87c73543348f62eba0e67,Suspect identification based on descriptive facial attributes,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+448c24fbe400ac164f3b97bce3cefc1577f91cca,Incremental sparse Bayesian ordinal regression,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+4469d0d0ac2f6f0221dc865b132958df33faa95e,Region-Based Interactive Ranking Optimization for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4469d0d0ac2f6f0221dc865b132958df33faa95e,Region-Based Interactive Ranking Optimization for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+44f48a4b1ef94a9104d063e53bf88a69ff0f55f3,Automatically Building Face Datasets of New Domains from Weakly Labeled Data with Pretrained Models,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+44482010dbd63fba4f7457cbdb7cf61e44c78617,Efficient activity detection with max-subgraph search,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+44ec89df8d9f42e323ea90599f23ae58e3a8925a,Recognizing human actions by attributes,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+44aeda8493ad0d44ca1304756cc0126a2720f07b,Face Alive Icons,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4417258e4fe9e60d044a72197cb67471272991a5,Encoding Cortical Surface by Spherical Harmonics,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+4417258e4fe9e60d044a72197cb67471272991a5,Encoding Cortical Surface by Spherical Harmonics,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+44bf7e376bf6d0a64134de99ac92df11546c055d,Perception driven 3D facial expression analysis based on reverse correlation and normal component,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+44d23df380af207f5ac5b41459c722c87283e1eb,Human Attribute Recognition by Deep Hierarchical Contexts,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+44484d2866f222bbb9b6b0870890f9eea1ffb2d0,Human Reidentification with Transferred Metric Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+44c9b5c55ca27a4313daf3760a3f24a440ce17ad,Revisiting hand-crafted feature for action recognition: a set of improved dense trajectories,Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+44c9b5c55ca27a4313daf3760a3f24a440ce17ad,Revisiting hand-crafted feature for action recognition: a set of improved dense trajectories,Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+44ce0051d9482d96169ff5564085fe9867eb3193,Differential activation of the amygdala and the 'social brain' during fearful face-processing in Asperger Syndrome.,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+44ce0051d9482d96169ff5564085fe9867eb3193,Differential activation of the amygdala and the 'social brain' during fearful face-processing in Asperger Syndrome.,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+44fbbaea6271e47ace47c27701ed05e15da8f7cf,Pupil Mimicry Correlates With Trust in In-Group Partners With Dilating Pupils.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+4407cde7ab8fc38ccb22f2799ab6f0ff7ab65283,Face Verification Using Error Correcting Output Codes,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+4459593cf12181988b8cec7e43f834f6831826cc,Mid-level Elements for Object Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,Head pose estimation in the wild using approximate view manifolds,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+441bf5f7fe7d1a3939d8b200eca9b4bb619449a9,Head pose estimation in the wild using approximate view manifolds,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+449808b7aa9ee6b13ad1a21d9f058efaa400639a,Recovering 3D facial shape via coupled 2D/3D space learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+449808b7aa9ee6b13ad1a21d9f058efaa400639a,Recovering 3D facial shape via coupled 2D/3D space learning,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+44787df6079918c6c4bf3dc871e2cad5a62c0e58,Super-Resolved Faces for Improved Face Recognition from Surveillance Video,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+44f18ef0800e276617e458bc21502947f35a7f94,EgoCap: egocentric marker-less motion capture with two fisheye cameras,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+44f18ef0800e276617e458bc21502947f35a7f94,EgoCap: egocentric marker-less motion capture with two fisheye cameras,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+2a65d7d5336b377b7f5a98855767dd48fa516c0f,Fast Supervised LDA for Discovering Micro-Events in Large-Scale Video Datasets,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,"Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,"Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues",City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,"Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2af2b74c3462ccff3a6881ff7cf4f321b3242fa9,"Name-Face Association in Web Videos: A Large-Scale Dataset, Baselines, and Open Issues",Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+2a7e02ebdb7622815dbce8cf227189e2c92d026c,Zero-Shot Object Detection by Hybrid Region Embedding,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+2a7e02ebdb7622815dbce8cf227189e2c92d026c,Zero-Shot Object Detection by Hybrid Region Embedding,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+2ad9e4596f38d58019a6f8073f238803f52a2773,Data-Driven 3D Primitives for Single Image Understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a77e3221d0512aa5674cf6f9041c1ce81fc07f0,An Automatic Hybrid Segmentation Approach for Aligned Face Portrait Images,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+2aad85de05e8b9137558926678c94442371d37ec,Head Pose Estimation Using Sparse Representation,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+2a4117849c88d4728c33b1becaa9fb6ed7030725,Memory Bounded Deep Convolutional Networks,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+2a4117849c88d4728c33b1becaa9fb6ed7030725,Memory Bounded Deep Convolutional Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+2ab8956fef9526741c1e68c94d9a9da74a87960c,Learning to Disambiguate by Asking Discriminative Questions,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2ab8956fef9526741c1e68c94d9a9da74a87960c,Learning to Disambiguate by Asking Discriminative Questions,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+2af620e17d0ed67d9ccbca624250989ce372e255,Meta-class features for large-scale object categorization on a budget,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+2a35d20b2c0a045ea84723f328321c18be6f555c,Boost Picking: A Universal Method on Converting Supervised Classification to Semi-supervised Classification,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+2a35d20b2c0a045ea84723f328321c18be6f555c,Boost Picking: A Universal Method on Converting Supervised Classification to Semi-supervised Classification,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+2a35d20b2c0a045ea84723f328321c18be6f555c,Boost Picking: A Universal Method on Converting Supervised Classification to Semi-supervised Classification,Beijing Institute of Technology,Beijing Institute of Technology University,"北京理工大学, 5, 中关村南大街, 中关村, 稻香园南社区, 海淀区, 北京市, 100872, 中国",39.95866520,116.30971281,edu,
+2a9b398d358cf04dc608a298d36d305659e8f607,Facial action unit recognition with sparse representation,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+2a9b398d358cf04dc608a298d36d305659e8f607,Facial action unit recognition with sparse representation,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+2af680736f32ae37d579a8b5656eec1c6b158dec,Biologically Significant Facial Landmarks: How Significant Are They for Gender Classification?,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+2a7b1257ec819688b46272024855c1858e031db6,Minimum Spectral Connectivity Projection Pursuit for Unsupervised Classification,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+2a7b1257ec819688b46272024855c1858e031db6,Minimum Spectral Connectivity Projection Pursuit for Unsupervised Classification,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+2a7b1257ec819688b46272024855c1858e031db6,Minimum Spectral Connectivity Projection Pursuit for Unsupervised Classification,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+2ad7100498f3e4d00ec4424099b90fddb659e972,Another probabilistic method proposed to distinguish between fixations and saccades are Kalman filters,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+2ad7100498f3e4d00ec4424099b90fddb659e972,Another probabilistic method proposed to distinguish between fixations and saccades are Kalman filters,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+2ac21d663c25d11cda48381fb204a37a47d2a574,Interpreting Hand-Over-Face Gestures,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+2a73b610bd8d670f3b57debcbad7930db80f40e1,Stacking With Auxiliary Features: Improved Ensembling for Natural Language and Vision,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2a4153655ad1169d482e22c468d67f3bc2c49f12,Face Alignment Across Large Poses: A 3D Solution,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2a4153655ad1169d482e22c468d67f3bc2c49f12,Face Alignment Across Large Poses: A 3D Solution,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+2a869bc7c1488023c6e791e9c9071badfbad749d,NUS-WIDE: a real-world web image database from National University of Singapore,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2a869bc7c1488023c6e791e9c9071badfbad749d,NUS-WIDE: a real-world web image database from National University of Singapore,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2a3768ac4f6b3bfbcce4001c0c2fd35cfcc7679d,Face Recognition with Variation in Pose Angle Using Face Graphs,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+2aa2b312da1554a7f3e48f71f2fce7ade6d5bf40,Estimating Sheep Pain Level Using Facial Action Unit Detection,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+2af9ee8ee3ab4a89ae0098a1f9caa1aa9dad4e8a,2D and 3D Pose Recovery from a Single Uncalibrated Video - A View and Activity Independent Framework,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+2a4fc35acaf09517e9c63821cadd428a84832416,Learning object class detectors from weakly annotated video,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+2a5823a387e248c4e7312d49cfbb02a25519251a,Weakly-supervised Deep Convolutional Neural Network Learning for Facial Action Unit Intensity Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+2a5823a387e248c4e7312d49cfbb02a25519251a,Weakly-supervised Deep Convolutional Neural Network Learning for Facial Action Unit Intensity Estimation,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+2a3e19d7c54cba3805115497c69069dd5a91da65,Looking at Hands in Autonomous Vehicles: A ConvNet Approach using Part Affinity Fields,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+2a31b4bf2a294b6e67956a6cd5ed6d875af548e0,Learning Affinity via Spatial Propagation Networks,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+2af19b5ff2ca428fa42ef4b85ddbb576b5d9a5cc,Multi-Region Probabilistic Histograms for Robust and Scalable Identity Inference,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+2afde5e414aa94e20e2b30a5aa277ac36ca41d6a,Optimizing kd-trees for scalable visual descriptor indexing,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+2a14b6d9f688714dc60876816c4b7cf763c029a9,Combining multiple sources of knowledge in deep CNNs for action recognition,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+2a31169e3a0f87987537220b743bbf6e79c440e5,Multi-view Feature Fusion Network for Vehicle Re- Identification,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+2a8d0125c8d27699ccd75c76bda774e065060709,A translational neuroscience framework for the development of socioemotional functioning in health and psychopathology.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2a92b610d2eed67b934ef2075264e243e6e1ea91,Learning Multi-Modal Navigation for Unmanned Ground Vehicles,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a92b610d2eed67b934ef2075264e243e6e1ea91,Learning Multi-Modal Navigation for Unmanned Ground Vehicles,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a92b610d2eed67b934ef2075264e243e6e1ea91,Learning Multi-Modal Navigation for Unmanned Ground Vehicles,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+2a88541448be2eb1b953ac2c0c54da240b47dd8a,Discrete Graph Hashing,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+2a88541448be2eb1b953ac2c0c54da240b47dd8a,Discrete Graph Hashing,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+2a051c1f2787690fa9fa916fd548b62ce571f778,Dense CNN Learning with Equivalent Mappings,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+2a75f34663a60ab1b04a0049ed1d14335129e908,Web-based database for facial expression analysis,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+2a171f8d14b6b8735001a11c217af9587d095848,Learning Social Relation Traits from Face Images,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2a0cec7f0f8b63f182ea0c52cb935580acabafcc,Uous and Discrete Addressing Schemes,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+2a4bbee0b4cf52d5aadbbc662164f7efba89566c,Pedestrian Attribute Recognition At Far Distance,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2a547bf34d185f80a0d476148721b6f05c276256,"Detection, Description and Tracking of Ants in Video Sequences",Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+2a0623ae989f2236f5e1fe3db25ab708f5d02955,3D Face Modelling for 2D+3D Face Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+2a0623ae989f2236f5e1fe3db25ab708f5d02955,3D Face Modelling for 2D+3D Face Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+2afdda6fb85732d830cea242c1ff84497cd5f3cb,Face image retrieval by using Haar features,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+2afdda6fb85732d830cea242c1ff84497cd5f3cb,Face image retrieval by using Haar features,Tamkang University,Tamkang University,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.17500615,121.45076751,edu,
+2a06b31e778bed978055cec7596bdf2690d13b49,Deformable part models revisited: A performance evaluation for object category pose estimation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2aa531b4aaf005db13ff93cc1bea7602d7fe2efb,Lidar-based Vehicle Localization in an Autonomous Valet Parking Scenario,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+2a2fd2538e19652721bc664f92056fbd08c604fd,Surveillance Video Analysis with External Knowledge and Internal Constraints,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a2fd2538e19652721bc664f92056fbd08c604fd,Surveillance Video Analysis with External Knowledge and Internal Constraints,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a2fd2538e19652721bc664f92056fbd08c604fd,Surveillance Video Analysis with External Knowledge and Internal Constraints,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2a2fd2538e19652721bc664f92056fbd08c604fd,Surveillance Video Analysis with External Knowledge and Internal Constraints,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2aa06417fd361832df384cf7c003ed1d3c5ee8df,Learning people detection models from few training samples,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+2ab034e1f54c37bfc8ae93f7320160748310dc73,Siamese Capsule Networks,University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.40617900,-2.96670819,edu,
+2a3991ae72740f3661f98d2ad58a0595bbcd07ad,Human Re-identification by Matching Compositional Template with Cluster Sampling,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2a3991ae72740f3661f98d2ad58a0595bbcd07ad,Human Re-identification by Matching Compositional Template with Cluster Sampling,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+2fd96238a7e372146cdf6c2338edc932031dd1f0,Face Aging with Contextual Generative Adversarial Nets,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+2fd96238a7e372146cdf6c2338edc932031dd1f0,Face Aging with Contextual Generative Adversarial Nets,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,Face Verification Using Boosted Cross-Image Features,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,Face Verification Using Boosted Cross-Image Features,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+2ff9ffedfc59422a8c7dac418a02d1415eec92f1,Face Verification Using Boosted Cross-Image Features,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+2fad06ed34169a5b1f736112364c58140577a6b4,Pedestrian Color Naming via Convolutional Neural Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2f20cf49eb6a0818313c29d64eb6d30ddfb8d747,Ranking Preserving Hashing for Fast Similarity Search,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+2f41c7ba65fa3d2819469fba450754266c98740e,Stixels Motion Estimation without Optical Flow Computation,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+2f53b97f0de2194d588bc7fb920b89cd7bcf7663,Facial Expression Recognition Using Sparse Gaussian Conditional Random Field,Shiraz University,Shiraz University,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎",29.63854740,52.52457060,edu,
+2f53b97f0de2194d588bc7fb920b89cd7bcf7663,Facial Expression Recognition Using Sparse Gaussian Conditional Random Field,Shiraz University,Shiraz University,"دانشگاه شیراز, میدان ارم, محدوده شهرداری منطقه یک - شهرداری شیراز, شیراز, بخش مرکزی شهرستان شیراز, شهرستان شیراز, استان فارس, 71348-34689, ‏ایران‎",29.63854740,52.52457060,edu,
+2f21c68ff9fbd82a3241f79f985ec7e1dcdac41a,Semantic Single-Image Dehazing,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+2f16baddac6af536451b3216b02d3480fc361ef4,Web-scale training for face identification,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+2f2aa67c5d6dbfaf218c104184a8c807e8b29286,Video analytics for surveillance camera networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+2fb71cb0f08102fe8c9ba5929c1dc96d87737039,Supervised and Unsupervised Transfer Learning for Question Answering,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+2f72cee2b9ae3d4271bda9f9bda1f11ad84ef616,End-to-End Detection and Re-identification Integrated Net for Person Search,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+2f72cee2b9ae3d4271bda9f9bda1f11ad84ef616,End-to-End Detection and Re-identification Integrated Net for Person Search,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a,Hierarchical Video Generation From Orthogonal Information: Optical Flow and Texture,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a,Hierarchical Video Generation From Orthogonal Information: Optical Flow and Texture,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+2fc43c2c3f7ad1ca7a1ce32c5a9a98432725fb9a,Hierarchical Video Generation From Orthogonal Information: Optical Flow and Texture,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+2f951dcba9539270ca3feb9becc4539feb89e80a,A Generalized Probabilistic Framework for Compact Codebook Creation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+2f951dcba9539270ca3feb9becc4539feb89e80a,A Generalized Probabilistic Framework for Compact Codebook Creation,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+2f951dcba9539270ca3feb9becc4539feb89e80a,A Generalized Probabilistic Framework for Compact Codebook Creation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+2f1b1cbdc1ea04be6f8c3ff08628b5eba9f01771,Boosted deformable model for human body alignment,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+2fa1037496dbcc04b705fcc4e9ed58cdc85df46e,Security Analysis of Deep Neural Networks Operating in the Presence of Cache Side-Channel Attacks,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2fa1037496dbcc04b705fcc4e9ed58cdc85df46e,Security Analysis of Deep Neural Networks Operating in the Presence of Cache Side-Channel Attacks,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+2fa941ed0f9546796499782e285a14cabf0186de,ClassMap: Efficient Multiclass Recognition via Embeddings,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+2fa941ed0f9546796499782e285a14cabf0186de,ClassMap: Efficient Multiclass Recognition via Embeddings,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+2fa04fc0bcbc92886902a62dbf538c490084efa4,Visual field bias in hearing and deaf adults during judgments of facial expression and identity,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+2fa04fc0bcbc92886902a62dbf538c490084efa4,Visual field bias in hearing and deaf adults during judgments of facial expression and identity,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2fa04fc0bcbc92886902a62dbf538c490084efa4,Visual field bias in hearing and deaf adults during judgments of facial expression and identity,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+2f52b6cd87e6d72a11168fef0865743dde9ea0ae,Adversarial Attacks Beyond the Image Space,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+2f52b6cd87e6d72a11168fef0865743dde9ea0ae,Adversarial Attacks Beyond the Image Space,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+2f52b6cd87e6d72a11168fef0865743dde9ea0ae,Adversarial Attacks Beyond the Image Space,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+2fb1ecd1451dc0c016cfe4cc43cb9620a766f1b2,Maximizing AUC with Deep Learning for Classification of Imbalanced Mammogram Datasets,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+2f5aa539fb27962aa4ba5b264ee503e6921bf531,Transfer re-identification: From person to set-based verification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2f5aa539fb27962aa4ba5b264ee503e6921bf531,Transfer re-identification: From person to set-based verification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+2f2406551c693d616a840719ae1e6ea448e2f5d3,Age estimation from face images: Human vs. machine performance,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+2fbe4ffef775bb9c2cea535a07ecd48ef30adcaf,Gradient Histogram Background Modeling for People Detection in Stationary Camera Environments,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+2fbe4ffef775bb9c2cea535a07ecd48ef30adcaf,Gradient Histogram Background Modeling for People Detection in Stationary Camera Environments,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+2fbe4ffef775bb9c2cea535a07ecd48ef30adcaf,Gradient Histogram Background Modeling for People Detection in Stationary Camera Environments,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+2fa2a186dfae16958bb3bc8752c57a749ccb4f41,Robust Depth Image Acquisition Using Modulated Pattern Projection and Probabilistic Graphical Models,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+2f7fc778e3dec2300b4081ba2a1e52f669094fcd,Action Representation Using Classifier Decision Boundaries,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+2f0e5a4b0ef89dd2cf55a4ef65b5c78101c8bfa1,Facial Expression Recognition Using a Hybrid CNN-SIFT Aggregator,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+2f5e057e35a97278a9d824545d7196c301072ebf,Capturing Long-Tail Distributions of Object Subcategories,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2f5e057e35a97278a9d824545d7196c301072ebf,Capturing Long-Tail Distributions of Object Subcategories,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2f04ba0f74df046b0080ca78e56898bd4847898b,Aggregate channel features for multi-view face detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+43694e7d5861a8bc8aa5884dba3efe2d387511c6,Supplementary Material: Annotating Object Instances with a Polygon-RNN,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+4300c7773a130b4995f60ba5ed920dd1782a3527,Support Vector Machines in face recognition with occlusions,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+43010792bf5cdb536a95fba16b8841c534ded316,Towards general motion-based face recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4320b0b7e65607e96326990675ac15880dc08b89,A Design Methodology for Efficient Implementation of Deconvolutional Neural Networks on an FPGA,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+438c4b320b9a94a939af21061b4502f4a86960e3,Reconstruction-Based Disentanglement for Pose-Invariant Face Recognition,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+43f0e2207d628deba1f91c810c38f33a1978cd58,Learning with Marginalized Corrupted Features,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+43f0e2207d628deba1f91c810c38f33a1978cd58,Learning with Marginalized Corrupted Features,Washington University,Washington University,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.64804450,-90.30996670,edu,
+439b6a5b91f1c5a751846bed7dd27c698a7ee2c4,Depth Information Guided Crowd Counting for Complex Crowd Scenes,Zhengzhou University,Zhengzhou University,"科学大道, 中原区 (Zhongyuan), 郑州市 / Zhengzhou, 河南省, 450001, 中国",34.80881680,113.53526640,edu,
+439b6a5b91f1c5a751846bed7dd27c698a7ee2c4,Depth Information Guided Crowd Counting for Complex Crowd Scenes,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+43e99b76ca8e31765d4571d609679a689afdc99e,Learning Dense Facial Correspondences in Unconstrained Images,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+431f013143de3159c0c0033fee2fb4840d213b6f,Preferential attention to animals and people is independent of the amygdala.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+431f013143de3159c0c0033fee2fb4840d213b6f,Preferential attention to animals and people is independent of the amygdala.,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+431f013143de3159c0c0033fee2fb4840d213b6f,Preferential attention to animals and people is independent of the amygdala.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+431f013143de3159c0c0033fee2fb4840d213b6f,Preferential attention to animals and people is independent of the amygdala.,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+4377b03bbee1f2cf99950019a8d4111f8de9c34a,Selective Encoding for Recognizing Unreliably Localized Faces,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+43a03cbe8b704f31046a5aba05153eb3d6de4142,Towards Robust Face Recognition from Video,Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+4307e8f33f9e6c07c8fc2aeafc30b22836649d8c,Supervised Earth Mover's Distance Learning and Its Computer Vision Applications,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+435642641312364e45f4989fac0901b205c49d53,Face Model Compression by Distilling Knowledge from Neurons,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+435642641312364e45f4989fac0901b205c49d53,Face Model Compression by Distilling Knowledge from Neurons,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+435642641312364e45f4989fac0901b205c49d53,Face Model Compression by Distilling Knowledge from Neurons,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+435bc494d3606d1137fb8b70d481bd6497f15090,Object Recognition by Integrated Information Using Web Images,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+435bc494d3606d1137fb8b70d481bd6497f15090,Object Recognition by Integrated Information Using Web Images,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+435bc494d3606d1137fb8b70d481bd6497f15090,Object Recognition by Integrated Information Using Web Images,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+439f3a865dfb7b42c600a095a6fcee1c1f4768ad,Applying deep learning to classify pornographic images and videos,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+438f1841a0b09c96759dc870d663d837d07388e3,Emotional Context Influences Micro-Expression Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+438f1841a0b09c96759dc870d663d837d07388e3,Emotional Context Influences Micro-Expression Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+4335805938a35a47cf86c985e993f73060405679,Automatic red-eye effect removal using combined intensity and colour information,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+4335805938a35a47cf86c985e993f73060405679,Automatic red-eye effect removal using combined intensity and colour information,Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.23810230,127.19034310,edu,
+435514bc2103deb604d762095d8faf77be544b9a,Feature Localisation in Three-Dimensional Faces,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+436b2f5bf23bc0bd80680ee2ed279cbd55939b86,Visual Saliency Maps Can Apply to Facial Expression Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+436b2f5bf23bc0bd80680ee2ed279cbd55939b86,Visual Saliency Maps Can Apply to Facial Expression Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+43e5817c18ec570d614669e3940d82791d285a10,Learning Class Prototypes via Structure Alignment for Zero-Shot Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+43e5817c18ec570d614669e3940d82791d285a10,Learning Class Prototypes via Structure Alignment for Zero-Shot Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+43f37a725dd58015bdca53937518042d81ca1078,Probabilistic fusion of gait features for biometric verification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+43f37a725dd58015bdca53937518042d81ca1078,Probabilistic fusion of gait features for biometric verification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+43f37a725dd58015bdca53937518042d81ca1078,Probabilistic fusion of gait features for biometric verification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+434d6726229c0f556841fad20391c18316806f73,Detecting Visual Relationships with Deep Relational Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+431e80aeee80a74f41d8af1336016340cd8e4848,Mapping Brain-Behavior Partial Correlations: Application to Autism,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+431e80aeee80a74f41d8af1336016340cd8e4848,Mapping Brain-Behavior Partial Correlations: Application to Autism,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+43b3dc931cd43a490de3206fd041e118e3651d8a,Learning Hierarchical Semantic Image Manipulation through Structured Representations,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+435dc062d565ce87c6c20a5f49430eb9a4b573c4,Lighting Condition Adaptation for Perceived Age Estimation,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+433a6d6d2a3ed8a6502982dccc992f91d665b9b3,Transferring Landmark Annotations for Cross-Dataset Face Alignment,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+433a6d6d2a3ed8a6502982dccc992f91d665b9b3,Transferring Landmark Annotations for Cross-Dataset Face Alignment,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+438e7999c937b94f0f6384dbeaa3febff6d283b6,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+438e7999c937b94f0f6384dbeaa3febff6d283b6,"Face Detection, Bounding Box Aggregation and Pose Estimation for Robust Facial Landmark Localisation in the Wild",Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+43776d1bfa531e66d5e9826ff5529345b792def7,Automatic Critical Event Extraction and Semantic Interpretation by Looking-Inside,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+4308bd8c28e37e2ed9a3fcfe74d5436cce34b410,Scalable Person Re-identification: A Benchmark,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4308bd8c28e37e2ed9a3fcfe74d5436cce34b410,Scalable Person Re-identification: A Benchmark,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+432d8cba544bf7b09b0455561fea098177a85db1,Towards a Neural Statistician,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+432d8cba544bf7b09b0455561fea098177a85db1,Towards a Neural Statistician,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+437edf4b1e8939a3833d8eb814447d9132d7d758,Image matching with distinctive visual vocabulary,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4348c8706b92a9bd90dbbd735f824ec79e96dd71,Pitfalls in Designing Zero-Effort Deauthentication: Opportunistic Human Observation Attacks,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+43bb4b073f7b2b9b626c7f3263cc61932271ab74,User-guided Hierarchical Attention Network for Multi-modal Social Image Popularity Prediction,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+43bb4b073f7b2b9b626c7f3263cc61932271ab74,User-guided Hierarchical Attention Network for Multi-modal Social Image Popularity Prediction,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+43bb4b073f7b2b9b626c7f3263cc61932271ab74,User-guided Hierarchical Attention Network for Multi-modal Social Image Popularity Prediction,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+43bb4b073f7b2b9b626c7f3263cc61932271ab74,User-guided Hierarchical Attention Network for Multi-modal Social Image Popularity Prediction,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+432d5eca44ff558642491f3bb7f44f500993fd38,Accurate face alignment and adaptive patch selection for heart rate estimation from videos under realistic scenarios,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+439647914236431c858535a2354988dde042ef4d,Face illumination normalization on large and small scale features,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+439647914236431c858535a2354988dde042ef4d,Face illumination normalization on large and small scale features,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+439647914236431c858535a2354988dde042ef4d,Face illumination normalization on large and small scale features,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+43d36a22629114e14a0952675e15c9c76f1f024c,Deep Lambertian Networks,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+439ca6ded75dffa5ddea203dde5e621dc4a88c3e,Robust real-time performance-driven 3D face tracking,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+439ca6ded75dffa5ddea203dde5e621dc4a88c3e,Robust real-time performance-driven 3D face tracking,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+88e090ffc1f75eed720b5afb167523eb2e316f7f,Attribute-Based Transfer Learning for Object Categorization with Zero/One Training Example,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4,Automatic facial expression recognition for affective computing based on bag of distances,National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.47510531,edu,
+8877e0b2dc3d2e8538c0cfee86b4e8657499a7c4,Automatic facial expression recognition for affective computing based on bag of distances,National Taichung University of Science and Technology,National Taichung University of science and Technology,"臺中科大, 129, 三民路三段, 錦平里, 賴厝廍, 北區, 臺中市, 40401, 臺灣",24.15031065,120.68325501,edu,
+886fc74b943011ce5ce192ff98d6ea9dcac7ef11,Atypical scanpaths in schizophrenia: evidence of a trait- or state-dependent phenomenon?,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+886fc74b943011ce5ce192ff98d6ea9dcac7ef11,Atypical scanpaths in schizophrenia: evidence of a trait- or state-dependent phenomenon?,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+886fc74b943011ce5ce192ff98d6ea9dcac7ef11,Atypical scanpaths in schizophrenia: evidence of a trait- or state-dependent phenomenon?,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+88af2da57863b60ddd3776d61113b552e827d3b8,3 D Face Recognition by Sliding Complex Wavelet Structural Similarity Index on Detail Geometry Images ⋆,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+88f7a3d6f0521803ca59fde45601e94c3a34a403,Semantic Aware Video Transcription Using Random Forest Classifiers,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+8812aef6bdac056b00525f0642702ecf8d57790b,A Unified Features Approach to Human Face Image Analysis and Interpretation,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+88f8519f442826f9b7b2649c1cfcbc5c82160428,Gender Classification Based on Support Vector Machine with Automatic Confidence,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+887502ea2d8a335d8e72deb23fec2784df713b8d,Nonlinear Local Metric Learning for Person Re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+887502ea2d8a335d8e72deb23fec2784df713b8d,Nonlinear Local Metric Learning for Person Re-identification,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+88e2574af83db7281c2064e5194c7d5dfa649846,A Robust Shape Reconstruction Method for Facial Feature Point Detection,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+88bef50410cea3c749c61ed68808fcff84840c37,Sparse representations of image gradient orientations for visual recognition and tracking,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+88bef50410cea3c749c61ed68808fcff84840c37,Sparse representations of image gradient orientations for visual recognition and tracking,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+88a0ff6b180703a2d90bc86b40520e35a08fe02c,The Normalized Distance Preserving Binary Codes and Distance Table,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+8856fbf333b2aba7b9f1f746e16a2b7f083ee5b8,Analyzing animal behavior via classifying each video frame using convolutional neural networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+8820d1d3fa73cde623662d92ecf2e3faf1e3f328,Continuous Video to Simple Signals for Swimming Stroke Detection with Convolutional Neural Networks,La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.77847540,144.29804700,edu,
+8820d1d3fa73cde623662d92ecf2e3faf1e3f328,Continuous Video to Simple Signals for Swimming Stroke Detection with Convolutional Neural Networks,Australian Institute of Sport,Australian Institute of Sport,"Australian Institute of Sport, Glenn McGrath Street, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.24737535,149.10445427,edu,
+8818b12aa0ff3bf0b20f9caa250395cbea0e8769,Fashion Conversation Data on Instagram_ICWSM 2017,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+8862a573a42bbaedd392e9e634c1ccbfd177a01d,Real-Time 3D Face Fitting and Texture Fusion on In-the-Wild Videos,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8862a573a42bbaedd392e9e634c1ccbfd177a01d,Real-Time 3D Face Fitting and Texture Fusion on In-the-Wild Videos,Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682404,edu,
+88e30a988d4a496d61eb241d4cafe5cc88688ae6,Using attributes for word spotting and recognition in polytonic greek documents,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+8824638e8077f62283d292804006ce94c92764bf,M2M-GAN: Many-to-Many Generative Adversarial Transfer Learning for Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+88cd4209db62a34d9cba0b9cbe9d45d1e57d21e5,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8895d6ae9f095a8413f663cc83f5b7634b3dc805,BEHL ET AL: INCREMENTAL TUBE CONSTRUCTION FOR HUMAN ACTION DETECTION 1 Incremental Tube Construction for Human Action Detection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8895d6ae9f095a8413f663cc83f5b7634b3dc805,BEHL ET AL: INCREMENTAL TUBE CONSTRUCTION FOR HUMAN ACTION DETECTION 1 Incremental Tube Construction for Human Action Detection,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+883767948f535ea2bf8a0c03047ca9064e1b078f,A Combination of Object Recognition and Localisation for an Autonomous Racecar,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+88bf14cd272fda73e5bc8fb48102a93149792e37,Coarse-to-Fine Volumetric Prediction for Single-Image 3D Human Pose,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+88bf14cd272fda73e5bc8fb48102a93149792e37,Coarse-to-Fine Volumetric Prediction for Single-Image 3D Human Pose,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+8880af06d8497e9deda01e0a0eabacf9e1cf0490,Editable Generative Adversarial Networks: Generating and Editing Faces Simultaneously,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+880760777e3671593ba50b7a17b0d30b655fc86d,"Visual Question Answering : Datasets , Methods , Challenges and Oppurtunities",Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+887745c282edf9af40d38425d5fdc9b3fe139c08,FAME: Face Association through Model Evolution,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+887745c282edf9af40d38425d5fdc9b3fe139c08,FAME: Face Association through Model Evolution,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+9f59d0a003558066d2ff4fc1c77f461b4d233663,Training Convolutional Networks with Noisy Labels,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+9f59d0a003558066d2ff4fc1c77f461b4d233663,Training Convolutional Networks with Noisy Labels,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+9f59d0a003558066d2ff4fc1c77f461b4d233663,Training Convolutional Networks with Noisy Labels,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+9f59d0a003558066d2ff4fc1c77f461b4d233663,Training Convolutional Networks with Noisy Labels,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fec253eb41438a9ab13bd5156a18c2c08ff610a,Yum-Me: A Personalized Nutrient-Based Meal Recommender System,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9f949f6e40e604ef05ed690ad732a2f6625997b1,Understanding Everyday Hands in Action from RGB-D Images,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+9f949f6e40e604ef05ed690ad732a2f6625997b1,Understanding Everyday Hands in Action from RGB-D Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9f33fe98e70c049ddf932247a44b9c9af85cf9cb,Detection of Anchor Points for 3D Face Veri.cation,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+9f5ce56dd0900368ff6f0bc4a4055e6f4ceb0bc7,Beauty-in-averageness and its contextual modulations : A Bayesian statistical account,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+9f5ce56dd0900368ff6f0bc4a4055e6f4ceb0bc7,Beauty-in-averageness and its contextual modulations : A Bayesian statistical account,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+9f6d04ce617d24c8001a9a31f11a594bd6fe3510,Attentional bias towards angry faces in trait-reappraisal,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+9f0c6797560de7f23bd3b016c9c328787c4cebf9,Automating Generation of Low Precision Deep Learning Operators,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f0c6797560de7f23bd3b016c9c328787c4cebf9,Automating Generation of Low Precision Deep Learning Operators,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f0c6797560de7f23bd3b016c9c328787c4cebf9,Automating Generation of Low Precision Deep Learning Operators,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f0c6797560de7f23bd3b016c9c328787c4cebf9,Automating Generation of Low Precision Deep Learning Operators,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f499948121abb47b31ca904030243e924585d5f,Hierarchical Attention Network for Action Recognition in Videos,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+9f499948121abb47b31ca904030243e924585d5f,Hierarchical Attention Network for Action Recognition in Videos,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+9f499948121abb47b31ca904030243e924585d5f,Hierarchical Attention Network for Action Recognition in Videos,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+9f2d7b7f5d983cfc02dc3b06dadddc4902afdd83,Semi-supervised learning for scalable and robust visual search,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+9fb7a23910f6464902f1b653025f3aeaa20b90dd,CNN-Based cascaded multi-task learning of high-level prior and density estimation for crowd counting,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+9f61362052e7675b3053a9d1b682ad917ce0e3d1,Social relevance drives viewing behavior independent of low-level salience in rhesus macaques,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f61362052e7675b3053a9d1b682ad917ce0e3d1,Social relevance drives viewing behavior independent of low-level salience in rhesus macaques,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+9f61362052e7675b3053a9d1b682ad917ce0e3d1,Social relevance drives viewing behavior independent of low-level salience in rhesus macaques,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+9f61362052e7675b3053a9d1b682ad917ce0e3d1,Social relevance drives viewing behavior independent of low-level salience in rhesus macaques,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9f6ca02ade848368a5e762cc3cf55a881c082faa,Motion Feature Network: Fixed Motion Filter for Action Recognition,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+9fb93b7c2bae866608f26c4254e5bd69cc5031d6,Fast Geometrically-Perturbed Adversarial Faces,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+9fc04a13eef99851136eadff52e98eb9caac919d,Rethinking the Camera Pipeline for Computer Vision,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9fc04a13eef99851136eadff52e98eb9caac919d,Rethinking the Camera Pipeline for Computer Vision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9fc04a13eef99851136eadff52e98eb9caac919d,Rethinking the Camera Pipeline for Computer Vision,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+9f4078773c8ea3f37951bf617dbce1d4b3795839,Leveraging Inexpensive Supervision Signals for Visual Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9f4078773c8ea3f37951bf617dbce1d4b3795839,Leveraging Inexpensive Supervision Signals for Visual Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9f829eb41c2ecb850fe20329e7da06eb369151f9,Deep Representation Learning with Target Coding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9f829eb41c2ecb850fe20329e7da06eb369151f9,Deep Representation Learning with Target Coding,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+9f829eb41c2ecb850fe20329e7da06eb369151f9,Deep Representation Learning with Target Coding,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9f5a73e6282c8c1c569622ce9eb505be237c2971,Localizing Actions from Video Labels and Pseudo-Annotations,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+9f5a73e6282c8c1c569622ce9eb505be237c2971,Localizing Actions from Video Labels and Pseudo-Annotations,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+9f65319b8a33c8ec11da2f034731d928bf92e29d,Taking Roll: a Pipeline for Face Recognition,Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.18620474,edu,
+9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73,3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+9f5383ec6ee5e810679e4a7e0a3f153f0ed3bb73,3D Shape and Pose Estimation of Face Images Using the Nonlinear Least-Squares Model,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9fe55487c40983b1da71c073104cdc2d6f5cc7bf,"Hybrid Human-Machine Vision Systems: Image Annotation using Crowds, Experts and Machines",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+9fa5d5b2cd6d625973d735e70d44824eb0118a33,Contour-Based Large Scale Image Retrieval,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+9fb31d0375552500bd494af20ab0c3109c9be3d2,Video Fill in the Blank with Merging LSTMs,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+9fbc0135e76b0fd972517e06e833593ecf6ac49a,Human emotions track changes in the acoustic environment.,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+9fbc0135e76b0fd972517e06e833593ecf6ac49a,Human emotions track changes in the acoustic environment.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+9f1b39e8d157b74181c666e85e5d55550d762409,Three-Stream Convolutional Networks for Video-based Person Re-Identification,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+9fae24003bbedecdb617f9779215d79d06b90dd8,Where Are the Blobs: Counting by Localization with Point Supervision,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,Active Tracking and Cloning of Facial Expressions Using Spatio-Temporal Information,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,Active Tracking and Cloning of Facial Expressions Using Spatio-Temporal Information,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6b011aa54aeabae8ac172a0cf0dd4333d1bfd327,Supervised algorithm selection for flow and other computer vision problems,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6b3e360b80268fda4e37ff39b7f303e3684e8719,Face Recognition from Sketches Using Advanced Correlation Filters Using Hybrid Eigenanalysis for Face Synthesis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,Multiview discriminative learning for age-invariant face recognition,Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.53179777,edu,
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,Multiview discriminative learning for age-invariant face recognition,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,Multiview discriminative learning for age-invariant face recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6bfb0f8dd1a2c0b44347f09006dc991b8a08559c,Multiview discriminative learning for age-invariant face recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+6b6866fbb4354e30ab34db9d6a8a07da4bf25777,Biometrics of Next Generation : An Overview,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6b6866fbb4354e30ab34db9d6a8a07da4bf25777,Biometrics of Next Generation : An Overview,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+6b359aefefe6b6c511c41afb873820462f5f42cc,Multi-view Gait Based Biometric System,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+6b359aefefe6b6c511c41afb873820462f5f42cc,Multi-view Gait Based Biometric System,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+6b1f12995c88412607d8c36b3d5b0aa6a5cba7a3,Learning semantic attributes via a common latent space,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+6b9e8acef979c13fa9ecc8fe9b635b312fedbcbe,Multiple Structured-Instance Learning for Semantic Segmentation with Uncertain Training Data,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+6bda5819d9bc2e174902d839a12127a57fdb43f7,A Precise Eye Localization Method Based on Ratio Local Binary Pattern,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+6bdfd62ae9eb026dbc37f6f2db897fbee5cf8a5d,Randomised Manifold Forests for Principal Angle-Based Face Recognition,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6bdfd62ae9eb026dbc37f6f2db897fbee5cf8a5d,Randomised Manifold Forests for Principal Angle-Based Face Recognition,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+6bca0d1f46b0f7546ad4846e89b6b842d538ee4e,Face Recognition from Surveillance - Quality Video,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+6b6afc9557dc0670bf2792bde4c4389ac52c707f,What Action Causes This? Towards Naive Physical Action-Effect Prediction,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6b6afc9557dc0670bf2792bde4c4389ac52c707f,What Action Causes This? Towards Naive Physical Action-Effect Prediction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+6b8d0569fffce5cc221560d459d6aa10c4db2f03,Interlinked Convolutional Neural Networks for Face Parsing,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6b68e3388ddecfcb0671dee6fba9a895aaf3d4e3,Fusing Shape and Appearance Information for Object Category Detection,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+6b68e3388ddecfcb0671dee6fba9a895aaf3d4e3,Fusing Shape and Appearance Information for Object Category Detection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+6bcee7dba5ed67b3f9926d2ae49f9a54dee64643,Assessment of Time Dependency in Face Recognition: An Initial Study,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+6bb51f431f348b2b3e1db859827e80f97a576c30,Irregular Convolutional Neural Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6b5f32d129f73bd1e2aa8323bf78cec3ed12c539,Facial Expression Recognition Based on 3D Dynamic Range Model Sequences,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+6b5cf028b9fa3191119067f087b189d97017d31f,Online Invigilation: A Holistic Approach: Process for Automated Online Invigilation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+6b5cf028b9fa3191119067f087b189d97017d31f,Online Invigilation: A Holistic Approach: Process for Automated Online Invigilation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+6b5cf028b9fa3191119067f087b189d97017d31f,Online Invigilation: A Holistic Approach: Process for Automated Online Invigilation,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6,Feature Extraction through Cross-Phase Congruency for Facial Expression Analysis,University of Oradea,University of Oradea,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,A Siamese Long Short-Term Memory Architecture for Human Re-identification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,A Siamese Long Short-Term Memory Architecture for Human Re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,A Siamese Long Short-Term Memory Architecture for Human Re-identification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3,DeepReID: Deep Filter Pairing Neural Network for Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+6bb0425baac448297fbd29a00e9c9b9926ce8870,Facial Expression Recognition Using Log-Gabor Filters and Local Binary Pattern Operators,RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.80874650,144.96388750,edu,
+6bccfe8068da78fe3caa43bba686919513fe451f,"Statistical Part-Based Models: Theory and Applications in Image Similarity, Object Detection and Region Labeling",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+6bc459c548bba7a04e2e255845b28060ec390407,The red one!: On learning to refer to things based on their discriminative properties,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6bb19408458dbae075be7f1612b969b565b4767a,Approximate Log-Hilbert-Schmidt Distances between Covariance Operators for Image Classification,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+6b47b1c5a628ddb939d0088b36753ca29b3f9b76,Real-time Three-stage Eye Feature Extraction,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+6b47b1c5a628ddb939d0088b36753ca29b3f9b76,Real-time Three-stage Eye Feature Extraction,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+07377c375ac76a34331c660fe87ebd7f9b3d74c4,Detailed Human Avatars from Monocular Video,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0729628db4bb99f1f70dd6cb2353d7b76a9fce47,Separating pose and expression in face images: a manifold learning approach,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+07f4ba45b771ed123b08261d88acda19406a7987,Real-Time Multiple People Tracking with Deeply Learned Candidate Selection and Person Re-Identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0744143542ffcb45b1ad83078c23efa9d3ec2be4,Multispectral Pedestrian Detection via Simultaneous Detection and Segmentation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+0790c400bfe6fbefe88ef7791476e1abf1952089,Deep Gaussian Conditional Random Field Network: A Model-Based Deep Network for Discriminative Denoising,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+07c90e85ac0f74b977babe245dea0f0abcf177e3,An Image Preprocessing Algorithm for Illumination Invariant Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1,Large scale unconstrained open set face database,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+076d3fc800d882445c11b9af466c3af7d2afc64f,Face attribute classification using attribute-aware correlation map and gated convolutional neural networks,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+07ed099e7d9c88d8e272d7191a4c7c5a68e3a6bd,Exploring Local Context for Multi-target Tracking in Wide Area Aerial Surveillance,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0773c320713dae62848fceac5a0ac346ba224eca,Digital facial augmentation for interactive entertainment,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+07a29f43713833da42b24e3915b63601c39d7627,Action Recognition and Localization by Hierarchical Space-Time Segments,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+07a29f43713833da42b24e3915b63601c39d7627,Action Recognition and Localization by Hierarchical Space-Time Segments,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+0701f2ee5a06e9ab760ab9326a33b1d4b8d83414,How many pixels make an image?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+071777bc168e9940bb04b207d3b061bbd5a0c01a,Improving Point of View Scene Recognition by Considering Textual Data,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling,Jacobs University,Jacobs University,"Liverpool Hope University, Shaw Street, Everton, Liverpool, North West England, England, L6 1HP, UK",53.41291480,-2.96897915,edu,
+070de852bc6eb275d7ca3a9cdde8f6be8795d1a3,A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+076fd6fd85b93858155a1c775f1897f83d52b4c2,Improving an Object Detector and Extracting Regions Using Superpixels,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+07191c2047b5b643dd72a0583c1d537ba59f977a,Interactive Segmentation from 1-Bit Feedback,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+074acb048b09fc95a2201ff00f67fd743b73e1fd,Looking around the backyard helps to recognize faces and digits,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+07050b9fcf949487e32aa30d0534e46d7eea58b0,Audio-Video Biometric System with Liveness Checks,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+07a472ea4b5a28b93678a2dcf89028b086e481a2,Head Dynamic Analysis: A Multi-view Framework,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+079e20d0d870a5bade46cc9b4338a3d637399654,"Semantic Segmentation , Urban Navigation , and Research Directions",Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+07faf42fe021a0965a07ef7273d89a452aec6b90,End-to-End Eye Movement Detection Using Convolutional Neural Networks,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+07faf42fe021a0965a07ef7273d89a452aec6b90,End-to-End Eye Movement Detection Using Convolutional Neural Networks,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+075d9baf2ac827327a5fe63bb1f873c4f54f95df,Robust multi-pose face tracking by multi-stage tracklet association,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+075d9baf2ac827327a5fe63bb1f873c4f54f95df,Robust multi-pose face tracking by multi-stage tracklet association,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+07fa153b8e6196ee6ef6efd8b743de8485a07453,Action Prediction From Videos via Memorizing Hard-to-Predict Samples,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+07fa153b8e6196ee6ef6efd8b743de8485a07453,Action Prediction From Videos via Memorizing Hard-to-Predict Samples,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+07fa153b8e6196ee6ef6efd8b743de8485a07453,Action Prediction From Videos via Memorizing Hard-to-Predict Samples,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+0708059e3bedbea1cbfae1c8cd6b7259d4b56b5b,Graph-regularized multi-class support vector machines for face and action recognition,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+074af31bd9caa61fea3c4216731420bd7c08b96a,Face verification using sparse representations,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+074af31bd9caa61fea3c4216731420bd7c08b96a,Face verification using sparse representations,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0740f71446e99273b89d89fa05ab439dc58c12e1,Reliable mapping and partitioning of performance-constrained openCL applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+074c658fac7d7ebd88be8a24b46f2b301a9aeeeb,Alternating Decision Forests,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+07f31bef7a7035792e3791473b3c58d03928abbf,Lessons from collecting a million biometric samples,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+07f31bef7a7035792e3791473b3c58d03928abbf,Lessons from collecting a million biometric samples,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+07b8a9a225b738c4074a50cf80ee5fe516878421,Convolutional Simplex Projection Network for Weakly Supervised Semantic Segmentation,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+07b358a22cbfba084189d287ba1ba50055c3cd09,TernausNetV2: Fully Convolutional Network for Instance Segmentation,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+07de8371ad4901356145722aa29abaeafd0986b9,Towards Usable Multimedia Event Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+07eb30d6bcb96d7d66192f0cf43038eabd6fdd13,Multivariate Amygdala Shape Modeling,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+07e639abf1621ceff27c9e3f548fadfa2052c912,5-HTTLPR Expression Outside the Skin: An Experimental Test of the Emotional Reactivity Hypothesis in Children,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+07e639abf1621ceff27c9e3f548fadfa2052c912,5-HTTLPR Expression Outside the Skin: An Experimental Test of the Emotional Reactivity Hypothesis in Children,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+07da958db2e561cc7c24e334b543d49084dd1809,Dictionary learning based dimensionality reduction for classification,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+07d249512522ae946089460c086b98205bcd17f3,Complex loss optimization via dual decomposition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0700d9c983b9c52341a4e17b70bdaff59cb539e5,Discovering Semantic Vocabularies for Cross-Media Retrieval,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+07d986b1005593eda1aeb3b1d24078db864f8f6a,Facial Expression Recognition Using Local Facial Features,National University of Kaohsiung,National University of Kaohsiung,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.73424255,120.28349755,edu,
+07d986b1005593eda1aeb3b1d24078db864f8f6a,Facial Expression Recognition Using Local Facial Features,National University of Kaohsiung,National University of Kaohsiung,"國立高雄大學, 中央廣場, 藍田, 藍田里, 楠梓區, 高雄市, 811, 臺灣",22.73424255,120.28349755,edu,
+07e6d293498c4f9048ee5a67ad32ca42d6af9b51,Video-based Side-view Face Recognition for Home Safety,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+07fd87460b3f454c2e7c971aca55df85a374bf8d,An Object-Based Bayesian Framework for Top-Down Visual Attention,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+07fd87460b3f454c2e7c971aca55df85a374bf8d,An Object-Based Bayesian Framework for Top-Down Visual Attention,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+07a17771ca169bc01deb8f7dac1ff0c574ddc512,User-generated Pornographic Video Detection Using Shot-based Sensor Pattern Noise,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+38b9f2faaffbc7c6ad7fb3fb01c387f3155de68f,Part-Based Feature Synthesis for Human Detection,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+383fae9c0b9b13af0ce5c5e88fa8ad40c7a3e7aa,"An Indoor and Outdoor, Multimodal, Multispectral and Multi-Illuminant Database for Face Recognition",University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+380d50f3ccc07fa4f41282395a78c51e33985c39,Deep Attention Neural Tensor Network for Visual Question Answering,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+380d50f3ccc07fa4f41282395a78c51e33985c39,Deep Attention Neural Tensor Network for Visual Question Answering,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3802c97f925cb03bac91d9db13d8b777dfd29dcc,Non-parametric Bayesian Constrained Local Models,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+38a2661b6b995a3c4d69e7d5160b7596f89ce0e6,Randomized Intraclass-Distance Minimizing Binary Codes for face recognition,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+38a2661b6b995a3c4d69e7d5160b7596f89ce0e6,Randomized Intraclass-Distance Minimizing Binary Codes for face recognition,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+38c61c11554135e09a2353afa536d010c7a53cbb,Learning the Detection of Faces in Natural Images,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+3868c75855df640a73b1fcdfa5df1bb92b878099,Labelled pupils in the wild: a dataset for studying pupil detection in unconstrained environments,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+38525bca4b1c5f9b8108743f57fd468492713bca,A Joint Speaker-Listener-Reinforcer Model for Referring Expressions,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+3836b6c5e29a7d0ff58c73e5d5c03dc7e8603819,Multimodal Neural Machine Translation for Low-resource Language Pairs using Synthetic Data,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+3836b6c5e29a7d0ff58c73e5d5c03dc7e8603819,Multimodal Neural Machine Translation for Low-resource Language Pairs using Synthetic Data,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+3836b6c5e29a7d0ff58c73e5d5c03dc7e8603819,Multimodal Neural Machine Translation for Low-resource Language Pairs using Synthetic Data,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+38b55d95189c5e69cf4ab45098a48fba407609b4,Locally Aligned Feature Transforms across Views,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3837cd26a92e6c20b4351b3fd7e83a422e56cb89,Mobile Robots and Marching Humans: Measuring Synchronous Joint Action While in Motion,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+38f88655debf4bf32978a7b39fbd56aea6ee5752,Class Rectification Hard Mining for Imbalanced Deep Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+38f88655debf4bf32978a7b39fbd56aea6ee5752,Class Rectification Hard Mining for Imbalanced Deep Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+38787338ba659f0bfbeba11ec5b7748ffdbb1c3d,Evaluation of the discrimination power of features extracted from 2-D and 3-D facial images for facial expression analysis,University of Piraeus,University of Piraeus,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα",37.94173275,23.65303262,edu,
+3857ffcf39ec6183f0cbbe8c5f565b1ccd0dce5d,Multi-Level Factorisation Net for Person Re-Identification,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+38eb71578f82477f4b032481bd401f19f14eaf25,Efficient Resource-constrained Retrospective Analysis of Long Video Sequences,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3803b91e784922a2dacd6a18f61b3100629df932,Temporal Multimodal Fusion for Video Emotion Classification in the Wild,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+3803b91e784922a2dacd6a18f61b3100629df932,Temporal Multimodal Fusion for Video Emotion Classification in the Wild,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+3884b78a06ccfde3249c16ac450b5254d033126a,Dual Path Networks for Multi-Person Human Pose Estimation,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+38c901a58244be9a2644d486f9a1284dc0edbf8a,Multi-Camera Action Dataset for Cross-Camera Action Recognition Benchmarking,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+38c901a58244be9a2644d486f9a1284dc0edbf8a,Multi-Camera Action Dataset for Cross-Camera Action Recognition Benchmarking,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+38c901a58244be9a2644d486f9a1284dc0edbf8a,Multi-Camera Action Dataset for Cross-Camera Action Recognition Benchmarking,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+38ede8e62e82d5012b3a165e55c9bd84442967db,Deep Analysis of Facial Behavioral Dynamics,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+38ede8e62e82d5012b3a165e55c9bd84442967db,Deep Analysis of Facial Behavioral Dynamics,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+38abaa549c4f398079dc5b1e5957315f66918e23,A fast method for estimating transient scene attributes,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+3852968082a16db8be19b4cb04fb44820ae823d4,Unsupervised Learning of Long-Term Motion Dynamics for Videos,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+38cc2f1c13420170c7adac30f9dfac69b297fb76,Recognition of human activities and expressions in video sequences using shape context descriptor,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+38cbb500823057613494bacd0078aa0e57b30af8,Deep Face Deblurring,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+38cbb500823057613494bacd0078aa0e57b30af8,Deep Face Deblurring,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,Shrinkage Expansion Adaptive Metric Learning,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,Shrinkage Expansion Adaptive Metric Learning,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+38f06a75eb0519ae1d4582a86ef4730cc8fb8d7f,Shrinkage Expansion Adaptive Metric Learning,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+38211dc39e41273c0007889202c69f841e02248a,ImageNet: A large-scale hierarchical image database,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+389db56845978baef0141b876774ea06cfb13e04,Information-theoretic criteria for the design of compressive subspace classifiers,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+384945abd53f6a6af51faf254ba8ef0f0fb3f338,Visual Recognition with Humans in the Loop,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+384945abd53f6a6af51faf254ba8ef0f0fb3f338,Visual Recognition with Humans in the Loop,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+381416c19b636c9bbab6ec5ebb1c1fa1be6faeca,Mirage cores: the illusion of many out-of-order cores using in-order hardware,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+381416c19b636c9bbab6ec5ebb1c1fa1be6faeca,Mirage cores: the illusion of many out-of-order cores using in-order hardware,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+381416c19b636c9bbab6ec5ebb1c1fa1be6faeca,Mirage cores: the illusion of many out-of-order cores using in-order hardware,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+381416c19b636c9bbab6ec5ebb1c1fa1be6faeca,Mirage cores: the illusion of many out-of-order cores using in-order hardware,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+38d8ff137ff753f04689e6b76119a44588e143f3,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+381d15951b5beb2456ac016ac7f15fd27aa07d1c,"The prodrome of autism: early behavioral and biological signs, regression, peri- and post-natal development and genetics.",Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+3859d584d3fb794c2b74b42f0f195d16ce8e3820,Combining Recognition and Geometry for Data - Driven 3 D Reconstruction,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+3896c62af5b65d7ba9e52f87505841341bb3e8df,Face Recognition from Still Images and Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3813d74ddf2540c06aa48fc42468bd0d97f51708,Asynchronous Multi-task Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3813d74ddf2540c06aa48fc42468bd0d97f51708,Asynchronous Multi-task Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3813d74ddf2540c06aa48fc42468bd0d97f51708,Asynchronous Multi-task Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+38bbca5f94d4494494860c5fe8ca8862dcf9676e,"Probabilistic , Features - based Object Recognition",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+38b5a83f7941fea5fd82466f8ce1ce4ed7749f59,Improving multi-target tracking via social grouping,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+38183fe28add21693729ddeaf3c8a90a2d5caea3,Scale-Aware Face Detection,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+38a9dfdf72d67cea75298cf29d3ea563e9ce3137,Temporal Segmentation of Egocentric Videos,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+38283e35371f2a426305dee60e80cd28abb4f349,CMU-AML Submission to Moments in Time Challenge 2018,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+38283e35371f2a426305dee60e80cd28abb4f349,CMU-AML Submission to Moments in Time Challenge 2018,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+38a9ca2c49a77b540be52377784b9f734e0417e4,Face verification using large feature sets and one shot similarity,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+38a9ca2c49a77b540be52377784b9f734e0417e4,Face verification using large feature sets and one shot similarity,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+38a9ca2c49a77b540be52377784b9f734e0417e4,Face verification using large feature sets and one shot similarity,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+385e45a0b9e88929ffe8a341c886a6de41d372f3,Robust Pose Features for Action Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+38ea19546355e41ee1d57febc07613e7d3122607,Dynamic Functional Brain Connectivity for Face Perception,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+38ea19546355e41ee1d57febc07613e7d3122607,Dynamic Functional Brain Connectivity for Face Perception,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+38ea19546355e41ee1d57febc07613e7d3122607,Dynamic Functional Brain Connectivity for Face Perception,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+38ea19546355e41ee1d57febc07613e7d3122607,Dynamic Functional Brain Connectivity for Face Perception,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+00c4325c669c52db182390b2ab4a2b9c20f06b8d,A False Trail to Follow: Differential Effects of the Facial Feedback Signals From the Upper and Lower Face on the Recognition of Micro-Expressions,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+00f7f7b72a92939c36e2ef9be97397d8796ee07c,3D ConvNets with Optical Flow Based Regularization,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+00f5bfc2fb760249ba4e9c72b72eea4574068339,VQS: Linking Segmentations to Questions and Answers for Supervised Attention in VQA and Question-Focused Semantic Segmentation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+00f5bfc2fb760249ba4e9c72b72eea4574068339,VQS: Linking Segmentations to Questions and Answers for Supervised Attention in VQA and Question-Focused Semantic Segmentation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0021f46bda27ea105d722d19690f5564f2b8869e,Deep Region and Multi-label Learning for Facial Action Unit Detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0081e2188c8f34fcea3e23c49fb3e17883b33551,Training Deep Face Recognition Systems with Synthetic Data,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+00732bed67ca05a601afe8376b5121545d5c7450,Path Aggregation Network for Instance Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+003927ec8deedf8cb515ad3b145ef2a5a556cbf4,On Autoencoders and Score Matching for Energy Based Models,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+003927ec8deedf8cb515ad3b145ef2a5a556cbf4,On Autoencoders and Score Matching for Energy Based Models,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+004ec53d1f12cc4c0a7c809bf3b7acaee2180fd9,An Affectively Aware Virtual Therapist for Depression Counseling,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+004ec53d1f12cc4c0a7c809bf3b7acaee2180fd9,An Affectively Aware Virtual Therapist for Depression Counseling,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+00dc942f23f2d52ab8c8b76b6016d9deed8c468d,Advanced Correlation-Based Character Recognition Applied to the Archimedes Palimpsest,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+000aac6ba1c67150d2d6fcc9acbe484b24de4c06,A Picture Is Worth a Thousand Tags: Automatic Web Based Image Tag Expansion,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+0000fcfd467a19cf0e59169c2f07d730a0f3a8b9,Exploring Visual Relationship for Image Captioning,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+0055c7f32fa6d4b1ad586d5211a7afb030ca08cc,Deep Learning for Detecting Multiple Space-Time Action Tubes in Videos,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+0055c7f32fa6d4b1ad586d5211a7afb030ca08cc,Deep Learning for Detecting Multiple Space-Time Action Tubes in Videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+004aa2cb4b68850ee006af8a6807b3c1a6a198f0,Deep Classifiers from Image Tags in the Wild,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+004aa2cb4b68850ee006af8a6807b3c1a6a198f0,Deep Classifiers from Image Tags in the Wild,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+009cd18ff06ff91c8c9a08a91d2516b264eee48e,Face and Automatic Target Recognition Based on Super-Resolved Discriminant Subspace,Chulalongkorn University,Chulalongkorn University,"จุฬาลงกรณ์มหาวิทยาลัย, 254, ถนนพญาไท, สยาม, แขวงปทุมวัน, เขตปทุมวัน, กรุงเทพมหานคร, 10330, ประเทศไทย",13.74311795,100.53287901,edu,
+0005a23c0db792ac9d0f5d408c39240ffe4c1d57,Understanding Fake Faces,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+00514ba3949302705b3b88af5eeef2d05cf8497d,Region-based Segmentation and Object Detection,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+00514ba3949302705b3b88af5eeef2d05cf8497d,Region-based Segmentation and Object Detection,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+00b543d51bf6d16b4027ded325387518cb7fcfe1,Tracking Ants Through Occlusions,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+00b543d51bf6d16b4027ded325387518cb7fcfe1,Tracking Ants Through Occlusions,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+0095564f9e0afe920324fc75cf0b76d3f4825206,Geometry Aware Constrained Optimization Techniques for Deep Learning ∗,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+007ab5528b3bd310a80d553cccad4b78dc496b02,Bidirectional Attention Flow for Machine Comprehension,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+008936436a5dada1366ccf239786f913a47c340d,Scribbler: Controlling Deep Image Synthesis with Sketch and Color,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+008936436a5dada1366ccf239786f913a47c340d,Scribbler: Controlling Deep Image Synthesis with Sketch and Color,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+005227ea30edc2907ca2c01d0729e247e2d9a350,A Semi-supervised Deep Generative Model for Human Body Analysis,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+00d0ad219577c70a3d6295e8839841b2f1898e29,Gang of GANs: Generative Adversarial Networks with Maximum Margin Ranking,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+00d0ad219577c70a3d6295e8839841b2f1898e29,Gang of GANs: Generative Adversarial Networks with Maximum Margin Ranking,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+00d0ad219577c70a3d6295e8839841b2f1898e29,Gang of GANs: Generative Adversarial Networks with Maximum Margin Ranking,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+00b08d22abc85361e1c781d969a1b09b97bc7010,Who is the hero? semi-supervised person re-identification in videos,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+007250c2dce81dd839a55f9108677b4f13f2640a,Advances in Component Based Face Detection,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+00e4f90555b98e2286d4d07c87220a6766c441f0,Localization of Multi-pose and Occluded Facial Features via Sparse Shape Representation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+00e3957212517a252258baef833833921dd308d4,Adaptively Weighted Multi-task Deep Network for Person Attribute Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+00e3957212517a252258baef833833921dd308d4,Adaptively Weighted Multi-task Deep Network for Person Attribute Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+006af49a030aa5b17046cfaf40de8f9246b96adf,Super-Resolution on Image and Video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+006af49a030aa5b17046cfaf40de8f9246b96adf,Super-Resolution on Image and Video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+006af49a030aa5b17046cfaf40de8f9246b96adf,Super-Resolution on Image and Video,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+00e8968c5922b71bf3be2e9733fce82f3c40cf44,Neuronal fiber pathway abnormalities in autism: an initial MRI diffusion tensor tracking study of hippocampo-fusiform and amygdalo-fusiform pathways.,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+009a18d04a5e3ec23f8ffcfc940402fd8ec9488f,Action Recognition by Weakly-Supervised Discriminative Region Localization,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0066caed1238de95a431d836d8e6e551b3cde391,Filtered Component Analysis to Increase Robustness to Local Minima in Appearance Models,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+001973a77bf8fa82314de667af5b041d856b0069,Trajectory Factory: Tracklet Cleaving and Re-Connection by Deep Siamese Bi-GRU for Multiple Object Tracking,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+00075519a794ea546b2ca3ca105e2f65e2f5f471,"Generating a Large, Freely-Available Dataset for Face-Related Algorithms",Amherst College,Amherst College,"Amherst College, Boltwood Avenue, Amherst, Hampshire, Massachusetts, 01004, USA",42.37289000,-72.51881400,edu,
+0019925779bff96448f0c75492717e4473f88377,Deep Heterogeneous Face Recognition Networks Based on Cross-Modal Distillation and an Equitable Distance Metric,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+007e86cb55f0ba0415a7764a1e9f9566c1e8784b,Adversarial Feature Learning,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+00b202871ec41b8049e8393e463660525ecb61b5,Subspace clustering based on low rank representation and weighted nuclear norm minimization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+00f1cccba86736cb6b6f39759ca6749f819252f0,Transfer Metric Learning for Kinship Verification with Locality-Constrained Sparse Features,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+00434a4491b6710308c653c430784872849d1f36,Evaluating Scientific Workflow Execution on an Asymmetric Multicore Processor,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+0074ccd17382bf077bf08d649a97541ad64478fd,Answer-Aware Attention on Grounded Question Answering in Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+00f1b6927785b6f4305cc35c1b0bfbbe2010c31f,Universal Conditional Machine,Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.53179777,edu,
+00bc6570d7bec88593cdeafc0feafa32c81aeea9,3D facial expression recognition using swarm intelligence,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+00e9011f58a561500a2910a4013e6334627dee60,Facial expression recognition using angle-related information from facial meshes,University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+000cd8d20d91ded078949dfcde76817221ea96c8,Learning Visual Attributes from Image and Text,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+0041ea67f32bef4949fedcef97562ad16fe5a7f9,Gradient based efficient feature selection,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+00bf7bcf31ee71f5f325ca5307883157ba3d580f,Efficient Online Local Metric Adaptation via Negative Samples for Person Re-identification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+00a967cb2d18e1394226ad37930524a31351f6cf,Fully-Adaptive Feature Sharing in Multi-Task Networks with Applications in Person Attribute Classification,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+00a967cb2d18e1394226ad37930524a31351f6cf,Fully-Adaptive Feature Sharing in Multi-Task Networks with Applications in Person Attribute Classification,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+00a967cb2d18e1394226ad37930524a31351f6cf,Fully-Adaptive Feature Sharing in Multi-Task Networks with Applications in Person Attribute Classification,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+00dec7b4e082e9345e1b34e36d42669f12c129f2,Lost in binarization: query-adaptive ranking for similar image search with compact codes,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+00f1e5e954f9eb7ffde3ca74009a8c3c27358b58,Unsupervised clustering for google searches of celebrity images,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+009678c2034cf4a9924a78d533d2ec81303a946e,"Connecting Gaze, Scene, and Attention: Generalized Attention Estimation via Joint Modeling of Gaze and Scene Saliency",Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+00a6d711f2bf7974384d2f4b5e61d0bbc493a6b7,Adaptive Margin Nearest Neighbor for Person Re-Identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+0058cbe110933f73c21fa6cc9ae0cd23e974a9c7,"Biswas, Jacobs: an Efficient Algorithm for Learning Distances",University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0058cbe110933f73c21fa6cc9ae0cd23e974a9c7,"Biswas, Jacobs: an Efficient Algorithm for Learning Distances",University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+00ebc3fa871933265711558fa9486057937c416e,Collaborative Representation based Classification for Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+00ebc3fa871933265711558fa9486057937c416e,Collaborative Representation based Classification for Face Recognition,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+00b29e319ff8b3a521b1320cb8ab5e39d7f42281,Towards Transparent Systems: Semantic Characterization of Failure Modes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+00b29e319ff8b3a521b1320cb8ab5e39d7f42281,Towards Transparent Systems: Semantic Characterization of Failure Modes,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+00823e6c0b6f1cf22897b8d0b2596743723ec51c,Understanding and Comparing Deep Neural Networks for Age and Gender Classification,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+000f90380d768a85e2316225854fc377c079b5c4,Full-Resolution Residual Networks for Semantic Segmentation in Street Scenes,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+0068204e6f250c7e8a26e5dcccc37b36808bca32,Seafloor image compression with large tilesize vector quantization,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0037d05fe2fc9553e58206f40ca39760396b5911,Automated Insect Identification through Concatenated Histograms of Local Appearance Features,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+0037d05fe2fc9553e58206f40ca39760396b5911,Automated Insect Identification through Concatenated Histograms of Local Appearance Features,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+6e60536c847ac25dba4c1c071e0355e5537fe061,Computer Vision and Natural Language Processing: Recent Approaches in Multimedia and Robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+6e60536c847ac25dba4c1c071e0355e5537fe061,Computer Vision and Natural Language Processing: Recent Approaches in Multimedia and Robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+6e60536c847ac25dba4c1c071e0355e5537fe061,Computer Vision and Natural Language Processing: Recent Approaches in Multimedia and Robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+6e1c597fdad6c43ce6e404f14f336576d8373acd,Efficiently and Effectively Learning Models of Similarity from Human Feeback,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+6e1c597fdad6c43ce6e404f14f336576d8373acd,Efficiently and Effectively Learning Models of Similarity from Human Feeback,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+6e173ad91b288418c290aa8891193873933423b3,Are you from North or South India? A hard race classification task reveals systematic representational differences between humans and machines,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6e91be2ad74cf7c5969314b2327b513532b1be09,Dimensionality Reduction with Subspace Structure Preservation,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+6e61641c9a9cddb38948b6600c0ebc3d2057c697,Pyramid Center-Symmetric Local Binary/Trinary Patterns for Effective Pedestrian Detection,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+6e61641c9a9cddb38948b6600c0ebc3d2057c697,Pyramid Center-Symmetric Local Binary/Trinary Patterns for Effective Pedestrian Detection,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+6e7a9779dee831658e973ee26ac8bfed2d6da033,Human Pose Estimation for Multiple Frames,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6e7a9779dee831658e973ee26ac8bfed2d6da033,Human Pose Estimation for Multiple Frames,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6e7a9779dee831658e973ee26ac8bfed2d6da033,Human Pose Estimation for Multiple Frames,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6eddea1d991e81c1c3024a6cea422bc59b10a1dc,Towards automatic analysis of gestures and body expressions in depression,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+6eddea1d991e81c1c3024a6cea422bc59b10a1dc,Towards automatic analysis of gestures and body expressions in depression,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+6e025c0415c9ff0705d4e4439a48e8fffe7d44c1,Dynamic Graph Generation Network: Generating Relational Knowledge from Diagrams,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+6e647a430d603f4d82e44b4a87de580a0fcaec88,BigSUR: large-scale structured urban reconstruction,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6e647a430d603f4d82e44b4a87de580a0fcaec88,BigSUR: large-scale structured urban reconstruction,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6eb8e193687c16f0edc3742d3549ad175ef648d1,Working memory load disrupts gaze-cued orienting of attention,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+6e97a99b2879634ecae962ddb8af7c1a0a653a82,Towards Context-aware Interaction Recognition,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+6e428db07a54a824f77a4c1a8fe9e70d6049e79c,Hierarchical Feature Hashing for Fast Dimensionality Reduction,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c,Fusing Aligned and Non-aligned Face Information for Automatic Affect Recognition in the Wild: A Deep Learning Approach,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+6ebfec00388b6975c8c38aed1ebe006eae79bcfe,Modeling Instance Appearance for Recognition – Can We Do Better Than EM?,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+6ebfec00388b6975c8c38aed1ebe006eae79bcfe,Modeling Instance Appearance for Recognition – Can We Do Better Than EM?,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6e968f74fd6b4b3b172c787f298b3d4746ec5cc9,A 3D Polygonal Line Chains Matching Method for Face Recognition,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+6e968f74fd6b4b3b172c787f298b3d4746ec5cc9,A 3D Polygonal Line Chains Matching Method for Face Recognition,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+6e209d7d33c0be8afae863f4e4e9c3e86826711f,Weakly-supervised segmentation by combining CNN feature maps and object saliency maps,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+6eebd8762996501b28d3d94a7c166c79d37e7a57,Sequential Spectral Learning to Hash with Multiple Representations,Pohang University of Science and Technology,Pohang University of Science and Technology,"포스텍, 77, 청암로, 효곡동, 남구, 포항시, 경북, 37673, 대한민국",36.01773095,129.32107509,edu,
+6e69de19576ea2dfa4cb84a450ce18eccd183a95,Easy Minimax Estimation with Random Forests for Human Pose Estimation,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+6e12226cf0da453dc4b9879d7af6b43af3c31d2b,Efficient Action Detection in Untrimmed Videos via Multi-task Learning,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+6ef190ad2c9c6e11d12bc1b51a4c8a11a4692fb8,Annotating Object Instances with a Polygon-RNN,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+6e00a406edb508312108f683effe6d3c1db020fb,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+6e00a406edb508312108f683effe6d3c1db020fb,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+6e00a406edb508312108f683effe6d3c1db020fb,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+6e40b120bf46807ef28ebdd8860e3109708bb888,Unsupervised Image-to-Image Translation with Stacked Cycle-Consistent Adversarial Networks,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+6e94c579097922f4bc659dd5d6c6238a428c4d22,Graph Based Multi-class Semi-supervised Learning Using Gaussian Process,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6eeff23d6e0127cfbbd0374a83341173a418ba7f,Dual Attention Network for Visual Question Answering,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+6e6923a8b39cd22d714ae9364d18bec8178e5632,Generating Image Descriptions Using Semantic Similarities in the Output Space,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+6eb1e006b7758b636a569ca9e15aafd038d2c1b1,Human Capabilities on Video-based Facial Expression Recognition,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+6eb1e006b7758b636a569ca9e15aafd038d2c1b1,Human Capabilities on Video-based Facial Expression Recognition,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+6eece104e430829741677cadc1dfacd0e058d60f,Use of Automated Facial Image Analysis for Measurement of Emotion Expression,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+6eece104e430829741677cadc1dfacd0e058d60f,Use of Automated Facial Image Analysis for Measurement of Emotion Expression,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+6eece104e430829741677cadc1dfacd0e058d60f,Use of Automated Facial Image Analysis for Measurement of Emotion Expression,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6ee5dbbc167167105162abd888ca4824a048fae0,Face recognition using non-linear image reconstruction,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+6ef78987104b7e66c1a71f87b94c4b0ebf34330e,Incorporating Side Information by Adaptive Convolution,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,LEGO Pictorial Scales for Assessing Affective Responses,University of Canterbury,University of Canterbury,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.52405280,172.58030625,edu,
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,LEGO Pictorial Scales for Assessing Affective Responses,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+6eb1b5935b0613a41b72fd9e7e53a3c0b32651e9,LEGO Pictorial Scales for Assessing Affective Responses,University of Canterbury,University of Canterbury,"University of Canterbury, Uni-Cycle, Ilam, Christchurch, Christchurch City, Canterbury, 8040, New Zealand/Aotearoa",-43.52405280,172.58030625,edu,
+6e12b8cb01abd5d6af6023e284009d417c53d160,Coarse-to-fine : A RNN-based hierarchical attention model for vehicle re-identification ?,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+6e12b8cb01abd5d6af6023e284009d417c53d160,Coarse-to-fine : A RNN-based hierarchical attention model for vehicle re-identification ?,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+6ea6be2c270f7d366e9e93ced7ea5a17d3a24c1a,Real-Time Semantic Segmentation Benchmarking Framework,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+6e80ad43c5f383c1d87b1ced2a336fe5cd44e044,Faster R-CNN for Robust Pedestrian Detection Using Semantic Segmentation Network,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+6e782073a013ce3dbc5b9b56087fd0300c510f67,Real Time Facial Emotion Recognition using Kinect V2 Sensor,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+6e4e5ef25f657de8fb383c8dfeb8e229eea28bb9,RON: Reverse Connection with Objectness Prior Networks for Object Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6eb5db8e6a79ad59bf4f4a5fccdd5b10237408d7,Cross Talk: The Microbiota and Neurodevelopmental Disorders,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+9a36bbabea698a9fe0e11e2cf77a013dd7769f42,Predicting Aggressive Tendencies by Visual Attention Bias Associated with Hostile Emotions,National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.19139696,edu,
+9a36bbabea698a9fe0e11e2cf77a013dd7769f42,Predicting Aggressive Tendencies by Visual Attention Bias Associated with Hostile Emotions,National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.19139696,edu,
+9a36bbabea698a9fe0e11e2cf77a013dd7769f42,Predicting Aggressive Tendencies by Visual Attention Bias Associated with Hostile Emotions,Tamkang University,Tamkang University,"淡江大學, 151, 英專路, 中興里, 鬼仔坑, 淡水區, 新北市, 25137, 臺灣",25.17500615,121.45076751,edu,
+9a6268d2bc1221ea154097feadea0c58f234d02f,Co-Attending Free-Form Regions and Detections With Multi-Modal Multiplicative Feature Embedding for Visual Question Answering,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9a6268d2bc1221ea154097feadea0c58f234d02f,Co-Attending Free-Form Regions and Detections With Multi-Modal Multiplicative Feature Embedding for Visual Question Answering,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9a6268d2bc1221ea154097feadea0c58f234d02f,Co-Attending Free-Form Regions and Detections With Multi-Modal Multiplicative Feature Embedding for Visual Question Answering,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+9a81f46fcf8c6c0efbe34649552b5056ce419a3d,Deep person re-identification with improved embedding and efficient training,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+9a45abde5e2ad08dcb6c267fba30a02fcd2e516e,Realistic Texture Extraction for 3 D Face Models Robust to Self-Occlusion,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+9a0c7a4652c49a177460b5d2fbbe1b2e6535e50a,Automatic and quantitative evaluation of attribute discovery methods,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+9ac43a98fe6fde668afb4fcc115e4ee353a6732d,Survey of Face Detection on Low-Quality Images,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+9a5473662819063cb60c1b29e6544b9314b9b29f,Predicting Social Interactions for Visual Tracking,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+9a4c45e5c6e4f616771a7325629d167a38508691,A facial features detector integrating holistic facial information and part-based model,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+9a4c45e5c6e4f616771a7325629d167a38508691,A facial features detector integrating holistic facial information and part-based model,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+9a4c45e5c6e4f616771a7325629d167a38508691,A facial features detector integrating holistic facial information and part-based model,Assiut University,Assiut University,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.18794105,31.17009498,edu,
+9a5c896a527fb6b72508d7a6309c5c375cb2967c,The IJCB 2014 PaSC video face and person recognition competition,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+9a5c896a527fb6b72508d7a6309c5c375cb2967c,The IJCB 2014 PaSC video face and person recognition competition,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+9a5c896a527fb6b72508d7a6309c5c375cb2967c,The IJCB 2014 PaSC video face and person recognition competition,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+9a5c896a527fb6b72508d7a6309c5c375cb2967c,The IJCB 2014 PaSC video face and person recognition competition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+9af9a88c60d9e4b53e759823c439fc590a4b5bc5,Learning Deep Convolutional Embeddings for Face Representation Using Joint Sample- and Set-Based Supervision,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+9a27d5efc7b74ba23c07d3a45f20285998bf1577,MONET: Multiview Semi-supervised Keypoint via Epipolar Divergence,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+9a7b7d61481e3a5bca1ef809358d46ac87405f67,Neural circuitry of emotional face processing in autism spectrum disorders.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+9a781a01b5a9c210dd2d27db8b73b7d62bc64837,An Attempt to Build Object Detection Models by Reusing Parts,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+9abd35b37a49ee1295e8197aac59bde802a934f3,Depth2Action: Exploring Embedded Depth for Large-Scale Action Recognition,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+9a9570bfebd3c970879f8d99804e74093d9bb6e9,Living a discrete life in a continuous world: Reference with distributed representations,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+9a9570bfebd3c970879f8d99804e74093d9bb6e9,Living a discrete life in a continuous world: Reference with distributed representations,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+9ac625867c50ce839d56a52ade92d3b971caff43,Accelerating Machine Learning Kernel in Hadoop Using FPGAs,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+9abd6bac662e8fdf4f71ccc26a89f7e360b7b879,Object Level Visual Reasoning in Videos,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+9a5a1763e0342d41cb1d1eef18a007be6e8dba89,Image Annotation with Discriminative Model and Annotation Refinement by Visual Similarity Matching,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+9a601fd18aea081d28408d133140ffb1f6dfcda6,Novel Pose-Variant Face Detection Method for Human-Robot Interaction Application,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+9adbbd9dadaf7b15bb585555e7a2e2223e711296,Identity information content depends on the type of facial movement,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+9adbbd9dadaf7b15bb585555e7a2e2223e711296,Identity information content depends on the type of facial movement,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+9a7fcd09afd8c3ae227e621795168c94ffbac71d,Action unit recognition transfer across datasets,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+36597e65169d576d0a68dca7023c57efcfee5c4f,Multiclass transfer learning from unconstrained priors,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+363cb83220451baa9f785a1fa738e41178e015c7,SPID: Surveillance Pedestrian Image Dataset and Performance Evaluation for Pedestrian Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+36f1110aed28165483f2dc7250fd187412467f61,Evaluating the WordsEye Text-to-Scene System: Imaginative and Realistic Sentences,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+364f357d4894aa004e442a5a92896a9b14a46862,Recovering 3-D Shape and Reflectance From a Small Number of Photographs,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+36972f34d35acab784359ddac4789e19118ac6d4,Graph-based Inference with Constraints for Object Detection and Segmentation,Lehigh University,Lehigh University,"Lehigh University, Library Drive, Sayre Park, Bethlehem, Northampton County, Pennsylvania, 18015, USA",40.60680280,-75.37824880,edu,
+36d858eb19bba43244b92f7faabfce47b13f2403,Materialization optimizations for feature selection workloads,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+36d858eb19bba43244b92f7faabfce47b13f2403,Materialization optimizations for feature selection workloads,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+364c79d2d98819b27641c651cf6553142ef747bf,Hedging your bets: Optimizing accuracy-specificity trade-offs in large scale visual recognition,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+36132cf4fea1717f7d39150d1a0cc79ad78b069e,Attribute-Centered Loss for Soft-Biometrics Guided Face Sketch-Photo Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+36fd702e5686f91b7e45434f8e2f6ef51feb2d54,Kernel-PCA Analysis of Surface Normals for Shape-from-Shading,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+36a0961cc64c4d3033aec820073d50c6470caa41,Indexing Methods for Efficient Multiclass Recognition Indexing Methods for Efficient Multiclass Recognition Indexing Methods for Efficient Multiclass Recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+36d487129fd0b828255e417e0d10cf13d7f525cf,Reduced functional integration and segregation of distributed neural systems underlying social and emotional information processing in autism spectrum disorders.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+36a3a96ef54000a0cd63de867a5eb7e84396de09,Automatic Photo Orientation Detection with Convolutional Neural Networks,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+360bb1bafa00dd1fa90a89766f5ef75061cfde75,Common-near-neighbor analysis for person re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+360bb1bafa00dd1fa90a89766f5ef75061cfde75,Common-near-neighbor analysis for person re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+36ddf5d08ce753fe00efc844be3769f09dda9f91,Towards incremental and large scale face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3676c29babe1563ee64a1149d2ae2f9f1369fe25,Visual saliency computation for image analysis,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+3676c29babe1563ee64a1149d2ae2f9f1369fe25,Visual saliency computation for image analysis,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+360d66e210f7011423364327b7eccdf758b5fdd2,Local feature extraction methods for facial expression recognition,RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.80874650,144.96388750,edu,
+36c948efd76f58ff1a5e42a2b69fbdc04913f7c4,A Review on Image Texture Analysis Methods,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,Deep Reinforcement Learning of Video Games,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+361c9ba853c7d69058ddc0f32cdbe94fbc2166d5,Deep Reinforcement Learning of Video Games,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+368e99f669ea5fd395b3193cd75b301a76150f9d,One-to-many face recognition with bilinear CNNs,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+362d884ff43d8c7cd6bce184944cfc04cdd57c18,Octopus-Man: QoS-driven task management for heterogeneous multicores in warehouse-scale computers,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+362d884ff43d8c7cd6bce184944cfc04cdd57c18,Octopus-Man: QoS-driven task management for heterogeneous multicores in warehouse-scale computers,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+361aa2cfbc51ece34be511986205095363db94c5,Automatic landmark detection and face recognition for side-view face images,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3615bbdd4fe81acd9e5d166af731b5556b19a2cd,Efficient object localization using Convolutional Networks,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+36a95f1a9fbe518427bbf33293488c71161313a9,Multi-Context Label Embedding,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+36df81e82ea5c1e5edac40b60b374979a43668a5,On-the-fly specific person retrieval,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+36091ff6b5d5a53d9641f5c3388b8c31b9ad4b49,Temporal Modular Networks for Retrieving Complex Compositional Activities in Videos,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3619a9b46ad4779d0a63b20f7a6a8d3d49530339,Fisher Vector Faces in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+36e8ef2e5d52a78dddf0002e03918b101dcdb326,Multiview Active Shape Models with SIFT Descriptors for the 300-W Face Landmark Challenge,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+36e8ef2e5d52a78dddf0002e03918b101dcdb326,Multiview Active Shape Models with SIFT Descriptors for the 300-W Face Landmark Challenge,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+367f2668b215e32aff9d5122ce1f1207c20336c8,Speaker-Dependent Human Emotion Recognition in Unimodal and Bimodal Scenarios,University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.00920040,71.48774947,edu,
+367f2668b215e32aff9d5122ce1f1207c20336c8,Speaker-Dependent Human Emotion Recognition in Unimodal and Bimodal Scenarios,University of Peshawar,University of Peshawar,"University of Peshawar, Road 2, JAHANGIR ABAD / جهانگیرآباد, پشاور‎, Peshāwar District, خیبر پختونخوا, 2500, ‏پاکستان‎",34.00920040,71.48774947,edu,
+36c2715522c3df4237d8e034dfe49d67eafd6382,"Scene Graph Generation from Objects, Phrases and Region Captions",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+36c2715522c3df4237d8e034dfe49d67eafd6382,"Scene Graph Generation from Objects, Phrases and Region Captions",University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+36c2715522c3df4237d8e034dfe49d67eafd6382,"Scene Graph Generation from Objects, Phrases and Region Captions",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+36c2db5ff76864d289781f93cbb3e6351f11984c,One colored image based 2.5D human face reconstruction,Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+3686c59fac958de0b3911d5b08213994836ee96e,Efficient Articulated Trajectory Reconstruction Using Dynamic Programming and Filters,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+3686c59fac958de0b3911d5b08213994836ee96e,Efficient Articulated Trajectory Reconstruction Using Dynamic Programming and Filters,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+36e21168155720d0210b8cc4ae031091d96701c8,Research Problems and Opportunities in Memory Systems,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+36e21168155720d0210b8cc4ae031091d96701c8,Research Problems and Opportunities in Memory Systems,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+36e21168155720d0210b8cc4ae031091d96701c8,Research Problems and Opportunities in Memory Systems,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3661a34f302883c759b9fa2ce03de0c7173d2bb2,Peak-Piloted Deep Network for Facial Expression Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3661a34f302883c759b9fa2ce03de0c7173d2bb2,Peak-Piloted Deep Network for Facial Expression Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+36c473fc0bf3cee5fdd49a13cf122de8be736977,Temporal Segment Networks: Towards Good Practices for Deep Action Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+36c473fc0bf3cee5fdd49a13cf122de8be736977,Temporal Segment Networks: Towards Good Practices for Deep Action Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+361164861d8e4676079219f6d099358a31fc4025,Multiple Hypothesis Tracking Revisited,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+361164861d8e4676079219f6d099358a31fc4025,Multiple Hypothesis Tracking Revisited,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+368d59cf1733af511ed8abbcbeb4fb47afd4da1c,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+368d59cf1733af511ed8abbcbeb4fb47afd4da1c,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+3632ac78294d39f8d51bb8f2ec270cf9c115d0f6,Typeface Completion with Generative Adversarial Networks,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+36b9f46c12240898bafa10b0026a3fb5239f72f3,Collaborative Deep Reinforcement Learning for Joint Object Search,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+36b9f46c12240898bafa10b0026a3fb5239f72f3,Collaborative Deep Reinforcement Learning for Joint Object Search,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+36b9f46c12240898bafa10b0026a3fb5239f72f3,Collaborative Deep Reinforcement Learning for Joint Object Search,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+36b9f46c12240898bafa10b0026a3fb5239f72f3,Collaborative Deep Reinforcement Learning for Joint Object Search,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+360f8874e42894af71ede97cd153853e09238350,Extracting Moving People from Internet Videos,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+36e25994cfeab3dc487f9a82139c08f26cebf92f,Annealed Generative Adversarial Networks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be,Models for supervised learning in sequence data,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+36d18202745ec9abb70d8f7e6a4f28a55871e657,Pairwise Body-Part Attention for Recognizing Human-Object Interactions,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3605647befd040a819f00b1539a6e3cc5ffb53b8,Vision-based bicyclist detection and tracking for intelligent vehicles,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+3605647befd040a819f00b1539a6e3cc5ffb53b8,Vision-based bicyclist detection and tracking for intelligent vehicles,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3634b4dd263c0f330245c086ce646c9bb748cd6b,Temporal Localization of Fine-Grained Actions in Videos by Domain Transfer from Web Images,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+367a786cfe930455cd3f6bd2492c304d38f6f488,A Training Assistant Tool for the Automated Visual Inspection System,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+5c812e8968b88c25d18a066f8a28c0421555d2c9,Highly-Economized Multi-view Binary Compression for Scalable Image Clustering,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+5c812e8968b88c25d18a066f8a28c0421555d2c9,Highly-Economized Multi-view Binary Compression for Scalable Image Clustering,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+5cd2a7ec2b47086b1e9ff6ebc096eae5e03d2f67,Simultaneous super-resolution and feature extraction for recognition of low-resolution faces,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5ce84883ab78e7e61a4e84a80cce8c86265f6ae9,Deep Sparse Coding for Invariant Multimodal Halle Berry Neurons,Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.03677740,-75.34202332,edu,
+5cbe1445d683d605b31377881ac8540e1d17adf0,On 3D face reconstruction via cascaded regression in shape space,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,Texture representation in AAM using Gabor wavelet and local binary patterns,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,Texture representation in AAM using Gabor wavelet and local binary patterns,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,Texture representation in AAM using Gabor wavelet and local binary patterns,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+5ca23ceb0636dfc34c114d4af7276a588e0e8dac,Texture representation in AAM using Gabor wavelet and local binary patterns,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+5c6215a32fa943d07cd2d0401d646f93faaf34e1,Latent Semantic Representation Learning for Scene Classification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+5cf549ca5680491f12a5ac5d42b171a64088da22,FRVT 2006: Quo Vidas Face Quality,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+5cf549ca5680491f12a5ac5d42b171a64088da22,FRVT 2006: Quo Vidas Face Quality,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+5cf549ca5680491f12a5ac5d42b171a64088da22,FRVT 2006: Quo Vidas Face Quality,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+5cbc66ace06eb35a22a3196cc13f75ddb0b7cefa,SHADHO: Massively Scalable Hardware-Aware Distributed Hyperparameter Optimization,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+5c493c42bfd93e4d08517438983e3af65e023a87,Multimodal Keyless Attention Fusion for Video Classification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5c9cfeb77f7e5040a4ca3775e524247a0bcb73db,The truth about cats and dogs,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+5c9cfeb77f7e5040a4ca3775e524247a0bcb73db,The truth about cats and dogs,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+5cac869f7e47c290ba14d27a5d6b5aadaddfaa69,Semantically Selective Augmentation for Deep Compact Person Re-Identification,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+5cb83eba8d265afd4eac49eb6b91cdae47def26d,Face Recognition with Local Line Binary Pattern,Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.84450465,100.85620818,edu,
+5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,Robust Face Detection by Simple Means,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+5c3dce55c61ee86073575ac75cc882a215cb49e6,Neural Codes for Image Retrieval,"Moscow Institute of Physics and Technology, Russia","Moscow Institute of Physics and Technology, Russia","МФТИ, 9, Институтский переулок, Виноградовские Горки, Лихачёво, Долгопрудный, городской округ Долгопрудный, Московская область, ЦФО, 141700, РФ",55.92903500,37.51866808,edu,
+5ccb73fa509b4c56c765cf5ef850060ca8686bfa,Identification by a hybrid 3D/2D gait recognition algorithm,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+5c97cef9cebf101b74699f583f3e324aebccde32,Multi-view multi-modal person authentication from a single walking image sequence,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+5cbef6da9c2cc630722f1e48a59c3aa84a00c44a,Binary Pattern Analysis for 3D Facial Action Unit Detection,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5cbef6da9c2cc630722f1e48a59c3aa84a00c44a,Binary Pattern Analysis for 3D Facial Action Unit Detection,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+5c6841db352d54ba6e18f362b8cc6509a15a8fd3,Exploring object-centric and scene-centric CNN features and their complementarity for human rights violations recognition in images,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+5c820e47981d21c9dddde8d2f8020146e600368f,Extended Supervised Descent Method for Robust Face Alignment,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+5c07464391cda9440cf05c67ab5f3b2b777459d6,Person Re-Identification by Localizing Discriminative Regions,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+5c70bca2b3dd0a47b6259d384a709be55a60369e,Model-based approaches for predicting gait changes over time,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+5c7adde982efb24c3786fa2d1f65f40a64e2afbf,Ranking Domain-Specific Highlights by Analyzing Edited Videos,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+5c48d6ea9b022c077b1873ec48ea4f37a91ac77a,A Structural Filter Approach to Human Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5c48d6ea9b022c077b1873ec48ea4f37a91ac77a,A Structural Filter Approach to Human Detection,"OMRON Corporation, Kyoto, Japan","Core Technology Center, OMRON Corporation, Kyoto, Japan","Kyoto, Kyoto Prefecture, Japan",35.01163630,135.76802940,company,
+5c36d8bb0815fd4ff5daa8351df4a7e2d1b32934,GeePS: scalable deep learning on distributed GPUs with a GPU-specialized parameter server,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5cfbeae360398de9e20e4165485837bd42b93217,Comparison Of Hog (Histogram of Oriented Gradients) and Haar Cascade Algorithms with a Convolutional Neural Network Based Face Detection Approaches,Firat University,Firat University,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye",39.72750370,39.47127034,edu,
+5cfbeae360398de9e20e4165485837bd42b93217,Comparison Of Hog (Histogram of Oriented Gradients) and Haar Cascade Algorithms with a Convolutional Neural Network Based Face Detection Approaches,Firat University,Firat University,"Erzincan Üniversitesi Hukuk Fakültesi Dekanlığı, Sivas-Erzincan yolu, Üçkonak, Erzincan, Erzincan merkez, Erzincan, Doğu Anadolu Bölgesi, 24000, Türkiye",39.72750370,39.47127034,edu,
+5cb8fe6b51813600d5b43a63ca4b8c1cb1237793,PortraitGAN for Flexible Portrait Manipulation,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+5c3d8cf726f17bbb326551253c810429d332d3f3,Complementing the Execution of AI Systems with Human Computation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+5c3d8cf726f17bbb326551253c810429d332d3f3,Complementing the Execution of AI Systems with Human Computation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+5ca14fa73da37855bfa880b549483ee2aba26669,Face Recognition under Varying Illuminations Using Local Binary Pattern And Local Ternary Pattern Fusion,Punjabi University Patiala,Punjabi University Patiala,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India",30.35689810,76.45512720,edu,
+5ca14fa73da37855bfa880b549483ee2aba26669,Face Recognition under Varying Illuminations Using Local Binary Pattern And Local Ternary Pattern Fusion,Punjabi University Patiala,Punjabi University Patiala,"Punjabi University Patiala, Rajpura Road, Patiala, Punjab, 147001, India",30.35689810,76.45512720,edu,
+5cd58501fd184a0fe5c05026ba1965ad12e68205,It’s All in the Eyes: Subcortical and Cortical Activation during Grotesqueness Perception in Autism,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+5c4c2e8181d50c74e26d2ad793d5aec668f61e23,Multi-view fusion for activity recognition using deep neural networks,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+5c4c2e8181d50c74e26d2ad793d5aec668f61e23,Multi-view fusion for activity recognition using deep neural networks,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+5c8ae37d532c7bb8d7f00dfde84df4ba63f46297,DiscrimNet: Semi-Supervised Action Recognition from Videos using Generative Adversarial Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5c8ae37d532c7bb8d7f00dfde84df4ba63f46297,DiscrimNet: Semi-Supervised Action Recognition from Videos using Generative Adversarial Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49,Facial Expression Intensity Estimation Using Ordinal Information,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+5ce2cb4c76b0cdffe135cf24b9cda7ae841c8d49,Facial Expression Intensity Estimation Using Ordinal Information,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+5c4d4fd37e8c80ae95c00973531f34a6d810ea3a,The Open World of Micro-Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+09b80d8eea809529b08a8b0ff3417950c048d474,Adding Unlabeled Samples to Categories by Learned Attributes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+09b80d8eea809529b08a8b0ff3417950c048d474,Adding Unlabeled Samples to Categories by Learned Attributes,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+09348656bbbe88881d1257650a170af5e22f1008,Parsing clothing in fashion photographs,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+09fd76c02abdd1bca7b98ab9fa66450cec33b9dc,User qualified ? N Feedback Pass ? Pass ? Annotator training Annotating images,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+09f58353e48780c707cf24a0074e4d353da18934,Unconstrained face recognition: Establishing baseline human performance via crowdsourcing,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+09f58353e48780c707cf24a0074e4d353da18934,Unconstrained face recognition: Establishing baseline human performance via crowdsourcing,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+09f58353e48780c707cf24a0074e4d353da18934,Unconstrained face recognition: Establishing baseline human performance via crowdsourcing,"Noblis, Falls Church, VA, U.S.A.","Noblis, Falls Church, VA, U.S.A.","2002 Edmund Halley Dr, Reston, VA 20191, USA",38.95187000,-77.36325900,company,
+09066d7d0bb6273bf996c8538d7b34c38ea6a500,"Yes, IoU loss is submodular - as a function of the mispredictions",Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+09b5b34d06fff4c76866d92516108ac68ac25ccf,Using Regression Techniques for Coping with the One-Sample-Size Problem of Face Recognition,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+0903b956a68073eee3760572059abd5b24b026da,Probabilistic Label Trees for Efficient Large Scale Image Classification,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0969e0dc05fca21ff572ada75cb4b703c8212e80,Semi-Supervised Classification Based on Low Rank Representation,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+09dd01e19b247a33162d71f07491781bdf4bfd00,Efficiently Scaling Up Video Annotation with Crowdsourced Marketplaces,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+09cf3f1764ab1029f3a7d57b70ae5d5954486d69,Comparison of ICA approaches for facial expression recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081,Where to Buy It: Matching Street Clothing Photos in Online Shops,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+09fa54f1ab7aaa83124d2415bfc6eb51e4b1f081,Where to Buy It: Matching Street Clothing Photos in Online Shops,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+09ca15b1c1d65012e5bc07e5a44bad7b72609a02,Discriminative Hough Forests for Object Detection,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+0959ef8fefe9e7041f508c2448fc026bc9e08393,Material recognition in the wild with the Materials in Context Database,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0951f42abbf649bb564a21d4ff5dddf9a5ea54d9,Joint Estimation of Age and Gender from Unconstrained Face Images Using Lightweight Multi-Task CNN for Mobile Applications,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+09628e9116e7890bc65ebeabaaa5f607c9847bae,Semantically Consistent Regularization for Zero-Shot Recognition,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+09733129161ca7d65cf56a7ad63c17f493386027,Face Recognition under Varying Illumination,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+09733129161ca7d65cf56a7ad63c17f493386027,Face Recognition under Varying Illumination,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+09733129161ca7d65cf56a7ad63c17f493386027,Face Recognition under Varying Illumination,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+096e389c28cdd15b8765baa29ae55d98f8c3c4b4,Passive Profiling and Natural Interaction Metaphors for Personalized Multimedia Museum Experiences,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+097340d3ac939ce181c829afb6b6faff946cdce0,Adding New Tasks to a Single Network with Weight Trasformations using Binary Masks,Sapienza University of Rome,Sapienza University of Rome,"Piazzale Aldo Moro, 5, 00185 Roma RM, Italy",41.90376260,12.51443840,edu,
+097d3892f5f2ba7be43a81908279f42a618839ec,Dynamic context driven human detection and tracking in meeting scenarios,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+09507f1f1253101d04a975fc5600952eac868602,Motion Feature Network: Fixed Motion Filter for Action Recognition,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+09669da2fe4764196eb0e2eff240291d54607882,Deep Disentangled Representations for Volumetric Reconstruction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+09a8ae8fc95bd3c9fb8022da2c32c519d5fc06bc,A Multiple Kernel Learning Approach to Joint Multi-class Object Detection,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+0914eb61b743300828c84f9e235ce6165a171be5,Analyzing the Behavior of Visual Question Answering Models,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+09edba9db405dde61630c70bba00ae9c5dd7ed37,Learning From Disagreements: Discriminative Performance Evaluation,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+09d03b792923695deb0492d8fc3582a50e5f1a1e,Band-Sifting Decomposition for Image-Based Material Editing,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+098a1ccc13b8d6409aa333c8a1079b2c9824705b,Attribute Pivots for Guiding Relevance Feedback in Image Search,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+09232b786e009655c5e03d2b3fcd7b40d75382bf,The Representation of Emotion in Autonomic and Central Nervous System Activity,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+091433bc8791bb66797b519811834a8a53af622d,simNet: Stepwise Image-Topic Merging Network for Generating Detailed and Comprehensive Image Captions,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+091433bc8791bb66797b519811834a8a53af622d,simNet: Stepwise Image-Topic Merging Network for Generating Detailed and Comprehensive Image Captions,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+097f482c548075305b5866d7d0fde7b67c30c52d,Unsupervised Learning of Generative Topic Saliency for Person Re-identification,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+09750ce4a8fa0a0fc596bdda8bf58db74fa9a0e1,Synthesizing Training Images for Boosting Human 3D Pose Estimation,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+090ff8f992dc71a1125636c1adffc0634155b450,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+090ff8f992dc71a1125636c1adffc0634155b450,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+090ff8f992dc71a1125636c1adffc0634155b450,Topic-Aware Deep Auto-Encoders (TDA) for Face Alignment,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+090b341def78df92d562e7d8e7f9d131a68ca769,A Novel Benchmark RGBD Dataset for Dormant Apple Trees and Its Application to Automatic Pruning,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+09b43b59879d59493df2a93c216746f2cf50f4ac,Deep Transfer Metric Learning,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+09c5fe448694eea3cf3166ccccb2c81048fe0601,Clustered Exemplar-SVM: Discovering sub-categories for visual recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0947c7c46943ebbb6a4b5c795c9b54552c8e0b5a,"QMAS: Querying, Mining and Summarization of Multi-modal Databases",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0947c7c46943ebbb6a4b5c795c9b54552c8e0b5a,"QMAS: Querying, Mining and Summarization of Multi-modal Databases",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0947c7c46943ebbb6a4b5c795c9b54552c8e0b5a,"QMAS: Querying, Mining and Summarization of Multi-modal Databases",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+093da3310d98b3c09e2770c2a6aa49eeca58cebe,Trimmed Event Recognition : submission to ActivityNet Challenge 2018,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+09b0ef3248ff8f1a05b8704a1b4cf64951575be9,Recognizing Activities of Daily Living with a Wrist-Mounted Camera,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+090b3189391f3e1917649b3a62696febbf0429e1,Taking the Perfect Selfie: Investigating the Impact of Perspective on the Perception of Higher Cognitive Variables,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+090b3189391f3e1917649b3a62696febbf0429e1,Taking the Perfect Selfie: Investigating the Impact of Perspective on the Perception of Higher Cognitive Variables,La Trobe University,La Trobe University,"La Trobe University, Keck Street, Flora Hill, Bendigo, City of Greater Bendigo, Loddon Mallee, Victoria, 3550, Australia",-36.77847540,144.29804700,edu,
+098f1939afa5a071e133c767ca49703b16443b9a,Combining Face and Iris Biometrics for Identity Verification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+098f1939afa5a071e133c767ca49703b16443b9a,Combining Face and Iris Biometrics for Identity Verification,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+096bd380a2f653dc5e43069b97e1505186c47d5b,Min Norm Point Algorithm for Higher Order MRF-MAP Inference,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+094357c1a2ba3fda22aa6dd9e496530d784e1721,A Unified Probabilistic Approach Modeling Relationships between Attributes and Objects,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+09e1072c509e1d24a34dfbbaba1c3700e1eb1338,Visualization of Time-Series Data in Parameter Space for Understanding Facial Dynamics,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+09e1072c509e1d24a34dfbbaba1c3700e1eb1338,Visualization of Time-Series Data in Parameter Space for Understanding Facial Dynamics,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+098a0bd7c948e9c94704ac5e8c768c8d430e1842,Cascaded Models for Articulated Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+098a0bd7c948e9c94704ac5e8c768c8d430e1842,Cascaded Models for Articulated Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+098a0bd7c948e9c94704ac5e8c768c8d430e1842,Cascaded Models for Articulated Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+098a0bd7c948e9c94704ac5e8c768c8d430e1842,Cascaded Models for Articulated Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+09ce14b84af2dc2f76ae1cf227356fa0ba337d07,Face reconstruction in the wild,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+09a106feed520651d785fd8a2df26910f5928f2e,"Think Leader, Think White? Capturing and Weakening an Implicit Pro-White Leadership Bias",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+090e4713bcccff52dcd0c01169591affd2af7e76,What Do You Do? Occupation Recognition in a Photo via Social Context,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+090e4713bcccff52dcd0c01169591affd2af7e76,What Do You Do? Occupation Recognition in a Photo via Social Context,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+5d2a01e3a445a92ecdce5f20656fd87e65982708,Learning Convolutional Feature Hierarchies for Visual Recognition,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+5da9a5367bd70c004ad9b7e8cee95059490e33fc,The TUM-DLR Multimodal Earth Observation Evaluation Benchmark,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+5d28a54b1b27280482463df85bb66bc2914ff893,Multi-Object Tracking with Correlation Filter for Autonomous Vehicle,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+5d58d4164493924906231a28153e50342fdf1198,Leveraging Long-Term Predictions and Online Learning in Agent-Based Multiple Person Tracking,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+5d58d4164493924906231a28153e50342fdf1198,Leveraging Long-Term Predictions and Online Learning in Agent-Based Multiple Person Tracking,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+5d494e5517a25365fe204eaae3c3247f7e57260e,Membership representation for detecting block-diagonal structure in low-rank or sparse subspace clustering,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+5d494e5517a25365fe204eaae3c3247f7e57260e,Membership representation for detecting block-diagonal structure in low-rank or sparse subspace clustering,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+5d494e5517a25365fe204eaae3c3247f7e57260e,Membership representation for detecting block-diagonal structure in low-rank or sparse subspace clustering,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+5d1ffb7ba3c53ecc5a90d40380ae235043c16344,On Label-Aware Community Search,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+5de5848dc3fc35e40420ffec70a407e4770e3a8d,WebVision Database: Visual Learning and Understanding from Web Data,ETH Zurich,"Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland","Sternwartstrasse 7, 8092 Zürich, Switzerland",47.37723980,8.55216180,edu,
+5df0fed3b37ffac6d0ae7c0a3ccce41c7044e8e8,Groupwise Tracking of Crowded Similar-Appearance Targets from Low-Continuity Image Sequences,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+5d2d797ee4053dada784639d7462abbfb2220031,Guided Open Vocabulary Image Captioning with Constrained Beam Search,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+5da8e841871e4a97534d981ee20002b183b45508,BSN: Boundary Sensitive Network for Temporal Action Proposal Generation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+5d92531e74c4c2cdce91fdcd3c7ff090c8c29504,Synthesizing Scenes for Instance Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5d92531e74c4c2cdce91fdcd3c7ff090c8c29504,Synthesizing Scenes for Instance Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5dc52c64991c655a12936867594326cf6352eb8e,Constructing Local Binary Pattern Statistics by Soft Voting,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+5d7f52a7d9814688c13b84ab35526fc9bf57d1bf,Large-Scale Category Structure Aware Image Categorization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5d7f52a7d9814688c13b84ab35526fc9bf57d1bf,Large-Scale Category Structure Aware Image Categorization,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+5d7f52a7d9814688c13b84ab35526fc9bf57d1bf,Large-Scale Category Structure Aware Image Categorization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5d3b6c9a0a8b71b875a565f5cd133d83817fdc38,3D facial expression recognition based on automatically selected features,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5d02f269e3b9764a3bf5d254a385fd61759a84a7,AVEC 2011-The First International Audio/Visual Emotion Challenge,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+5d02f269e3b9764a3bf5d254a385fd61759a84a7,AVEC 2011-The First International Audio/Visual Emotion Challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5dd496e58cfedfc11b4b43c4ffe44ac72493bf55,Discriminative convolutional Fisher vector network for action recognition,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+5d2b396447fae5a64cbe6b5ef5e99ca2b88c2914,Vehicle Re-Identification with the Space-Time Prior,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+5d88702cdc879396b8b2cc674e233895de99666b,Exploiting Feature Hierarchies with Convolutional Neural Networks for Cultural Event Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5d88702cdc879396b8b2cc674e233895de99666b,Exploiting Feature Hierarchies with Convolutional Neural Networks for Cultural Event Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5df93d7da8ab46f1d0e9deadd4e5e5568acd7651,Delving Deep into Multiscale Pedestrian Detection via Single Scale Feature Maps,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+5df93d7da8ab46f1d0e9deadd4e5e5568acd7651,Delving Deep into Multiscale Pedestrian Detection via Single Scale Feature Maps,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+5df93d7da8ab46f1d0e9deadd4e5e5568acd7651,Delving Deep into Multiscale Pedestrian Detection via Single Scale Feature Maps,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+5d479f77ecccfac9f47d91544fd67df642dfab3c,"Linking People in Videos with ""Their"" Names Using Coreference Resolution",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+5d479f77ecccfac9f47d91544fd67df642dfab3c,"Linking People in Videos with ""Their"" Names Using Coreference Resolution",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+5d5533b8b95f25f63e07786cf3e063c8db356f1f,Human Observers and Automated Assessment of Dynamic Emotional Facial Expressions: KDEF-dyn Database Validation,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+5df376748fe5ccd87a724ef31d4fdb579dab693f,A Dashboard for Affective E-learning: Data Visualization for Monitoring Online Learner Emotions,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+5dc7c33475b545271d1de726fd88bb68dfb7e11b,Generating Video Description using RNN with Semantic Attention,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+5dc7c33475b545271d1de726fd88bb68dfb7e11b,Generating Video Description using RNN with Semantic Attention,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+5d56587ee5652fc9bd7e3bdf5a533b4f627b6487,A Graph-Based Algorithm for Supervised Image Classification,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+310dbc59aa3309f2a3813728783d81a9f7f1c939,Estimating contact dynamics,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+31ffe7b6447221ade78c71c36e8e86279a8478b6,Batch-normalized recurrent highway networks,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+3167f415a861f19747ab5e749e78000179d685bc,RankBoost with l1 regularization for facial expression recognition and intensity estimation,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+3107316f243233d45e3c7e5972517d1ed4991f91,CVAE-GAN: Fine-Grained Image Generation through Asymmetric Training,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+3164189f84710de9f8150385a41a7079a57186df,Object detection using edge histogram of oriented gradient,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+31e0303d98fd1bb6a1074d4fe0b14228e91b388b,基於稀疏表示之語者識別 (Sparse Representation Based Speaker Identification) [In Chinese],National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.19139696,edu,
+31e0303d98fd1bb6a1074d4fe0b14228e91b388b,基於稀疏表示之語者識別 (Sparse Representation Based Speaker Identification) [In Chinese],National Central University,National Central University,"NCU, 300, 中大路, 上三座屋, 五權里, 樹林子, 中壢區, 桃園市, 320, 臺灣",24.96841805,121.19139696,edu,
+31f5eebfebac54cf5817deea7da32994637a5b28,Multi-view Metric Learning for Multi-view Video Summarization,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+31f5eebfebac54cf5817deea7da32994637a5b28,Multi-view Metric Learning for Multi-view Video Summarization,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+313d5eba97fe064bdc1f00b7587a4b3543ef712a,Compact Deep Aggregation for Set Retrieval,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+3167c8a1d0415eb7dc241e395f55d559c43a99f9,Noisy Sparse Subspace Clustering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3167c8a1d0415eb7dc241e395f55d559c43a99f9,Noisy Sparse Subspace Clustering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3150fd3b0065372f898b42a3628318210fcd566b,Beyond parametric score normalisation in biometric verification systems,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+3136cab00cfb223ceb9aff78af2c165b6e71a878,Open source biometric recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3136cab00cfb223ceb9aff78af2c165b6e71a878,Open source biometric recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3136cab00cfb223ceb9aff78af2c165b6e71a878,Open source biometric recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+3137a3fedf23717c411483c7b4bd2ed646258401,Joint Learning of Discriminative Prototypes and Large Margin Nearest Neighbor Classifiers,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+31c34a5b42a640b824fa4e3d6187e3675226143e,Shape and texture based facial action and emotion recognition,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+3154d7479881c7efd8a50909af921cfa8cff8e2e,Recognizing hand-object interactions in wearable camera videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+31531ff4f106d1e196e619b859d0dc510e01c5a8,A Convex Formulation for Spectral Shrunk Clustering,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+31531ff4f106d1e196e619b859d0dc510e01c5a8,A Convex Formulation for Spectral Shrunk Clustering,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+31531ff4f106d1e196e619b859d0dc510e01c5a8,A Convex Formulation for Spectral Shrunk Clustering,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+31531ff4f106d1e196e619b859d0dc510e01c5a8,A Convex Formulation for Spectral Shrunk Clustering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+31531ff4f106d1e196e619b859d0dc510e01c5a8,A Convex Formulation for Spectral Shrunk Clustering,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+31ea88f29e7f01a9801648d808f90862e066f9ea,Deep Multi-task Representation Learning: A Tensor Factorisation Approach,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+31e1d021bd06054bbfcd915794e84448ae681000,Joint Deep Learning for Pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+31835472821c7e3090abb42e57c38f7043dc3636,Flow Counting Using Realboosted Multi-sized Window Detectors,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+3163d481923cc75d53c2ca940e23a07e7c85069c,Kinship verification from videos using spatio-temporal texture features and deep learning,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+3163d481923cc75d53c2ca940e23a07e7c85069c,Kinship verification from videos using spatio-temporal texture features and deep learning,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+3147bb14bf4228735ecf4bc2a421590b3de86c0f,Efficient PSD Constrained Asymmetric Metric Learning for Person Re-Identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+31c5d8109f3110fc8b7eeb6265e832e809cdaa39,Recursive Fréchet Mean Computation on the Grassmannian and Its Applications to Computer Vision,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+312b2566e315dd6e65bd42cfcbe4d919159de8a1,An Accurate Algorithm for Generating a Music Playlist based on Facial Expressions,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,Exploring Stereotypes and Biased Data with the Crowd,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,Exploring Stereotypes and Biased Data with the Crowd,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+313e508202a6f4f2fc40a78b6237e52c2c0d22a2,Domain Adaptation for Ear Recognition Using Deep Convolutional Neural Networks,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+319aeaba5dfb4f7de44668bbedbbfdcb7ebc50fa,Gait Learning-Based Regenerative Model: A Level Set Approach,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+316d51aaa37891d730ffded7b9d42946abea837f,Unsupervised learning of clutter-resistant visual representations from natural videos,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+31d60b2af2c0e172c1a6a124718e99075818c408,Robust Facial Expression Recognition Using Near Infrared Cameras,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+316486bada6023816c785c0d4eb401658737be3f,QoS-Aware scheduling in heterogeneous datacenters with paragon,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+31f1e711fcf82c855f27396f181bf5e565a2f58d,Unconstrained Age Estimation with Deep Convolutional Neural Networks,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+31f1e711fcf82c855f27396f181bf5e565a2f58d,Unconstrained Age Estimation with Deep Convolutional Neural Networks,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+3127738c7a634d7b651405cb31fbc52ec7d5806a,Robust Subspace Recovery via Bi-Sparsity Pursuit,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+315a90543d60a5b6c5d1716fe9076736f0e90d24,Illumination invariant human face recognition: frequency or resonance?,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+3107085973617bbfc434c6cb82c87f2a952021b7,Spatio-temporal human action localisation and instance segmentation in temporally untrimmed videos,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+3107085973617bbfc434c6cb82c87f2a952021b7,Spatio-temporal human action localisation and instance segmentation in temporally untrimmed videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+315ff3853dd408b765fbf83344974eda9ac37705,Predicting the Category and Attributes of Visual Search Targets Using Deep Gaze Pooling,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+31182c5ffc8c5d8772b6db01ec98144cd6e4e897,3D Face Reconstruction with Region Based Best Fit Blending Using Mobile Phone for Virtual Reality Based Social Media,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+312620fb93a30b0448ec8ffd728b8ee2858ef74c,Compact Real-time avoidance on a Humanoid Robot for Human-robot Interaction,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+312620fb93a30b0448ec8ffd728b8ee2858ef74c,Compact Real-time avoidance on a Humanoid Robot for Human-robot Interaction,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+3146fabd5631a7d1387327918b184103d06c2211,Person-Independent 3D Gaze Estimation Using Face Frontalization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3146fabd5631a7d1387327918b184103d06c2211,Person-Independent 3D Gaze Estimation Using Face Frontalization,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+91fe43fd76571513c8caf3aca20a405f5d99f3fd,What is the Ground? Continuous Maps for Grounding Perceptual Primitives,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+91c184e7fb0c7cce5319b8db85c1488b3861976f,Visual Question Answer Diversity,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+91c184e7fb0c7cce5319b8db85c1488b3861976f,Visual Question Answer Diversity,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+91c184e7fb0c7cce5319b8db85c1488b3861976f,Visual Question Answer Diversity,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+91495c689e6e614247495c3f322d400d8098de43,A Deep-Learning Approach to Facial Expression Recognition with Candid Images,CUNY City College,CUNY City College,"CUNY City College, 205 East 42nd Street, New York, NY 10017",45.55466080,5.40652550,edu,
+910524c0d0fe062bf806bb545627bf2c9a236a03,Master Thesis Improvement of Facial Expression Recognition through the Evaluation of Dynamic and Static Features in Video Sequences,Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.64471248,edu,
+910524c0d0fe062bf806bb545627bf2c9a236a03,Master Thesis Improvement of Facial Expression Recognition through the Evaluation of Dynamic and Static Features in Video Sequences,Otto von Guericke University,Otto von Guericke University,"Otto-von-Guericke-Universität Magdeburg, 2, Universitätsplatz, Krökentorviertel/Breiter Weg NA, Alte Neustadt, Magdeburg, Sachsen-Anhalt, 39106, Deutschland",52.14005065,11.64471248,edu,
+91df860368cbcebebd83d59ae1670c0f47de171d,"COCO Attributes: Attributes for People, Animals, and Objects",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+91df860368cbcebebd83d59ae1670c0f47de171d,"COCO Attributes: Attributes for People, Animals, and Objects",Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9151f229e7b4e318b0b12afe99993da0ee5e0e34,Adversarial Multi-task Learning for Text Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+9151f229e7b4e318b0b12afe99993da0ee5e0e34,Adversarial Multi-task Learning for Text Classification,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+91bd017c1b19c36e430a22929d8de3af0795dfa4,Learning Linear Transformations for Fast Arbitrary Style Transfer,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+9110c589c6e78daf4affd8e318d843dc750fb71a,Facial Expression Synthesis Based on Emotion Dimensions for Affective Talking Avatar,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9110c589c6e78daf4affd8e318d843dc750fb71a,Facial Expression Synthesis Based on Emotion Dimensions for Affective Talking Avatar,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+9110c589c6e78daf4affd8e318d843dc750fb71a,Facial Expression Synthesis Based on Emotion Dimensions for Affective Talking Avatar,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+9184b0c04013bfdfd82f4f271b5f017396c2f085,Semantic Segmentation for Line Drawing Vectorization Using Neural Networks,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+919b1f80f818c2c1710a674536d4957890bbfd82,Targeted Kernel Networks: Faster Convolutions with Attentive Regularization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+916498961a51f56a592c3551b0acc25978571fa7,Optimal landmark detection using shape models and branch and bound,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+916498961a51f56a592c3551b0acc25978571fa7,Optimal landmark detection using shape models and branch and bound,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+918ab79e963d5e339a2696ee4aed123599f291e6,Machine Learning Methods for Solving Assignment Problems in Multi-Target Tracking,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+91e435fe71861a8da569a089b4841522ac9aa369,ST-GAN: Unsupervised Facial Image Semantic Transformation Using Generative Adversarial Networks,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+91e435fe71861a8da569a089b4841522ac9aa369,ST-GAN: Unsupervised Facial Image Semantic Transformation Using Generative Adversarial Networks,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+91e435fe71861a8da569a089b4841522ac9aa369,ST-GAN: Unsupervised Facial Image Semantic Transformation Using Generative Adversarial Networks,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+919327d4f264775bd4ab2923d7786d5b2c859409,An X-T slice based method for action recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+91ea1d8f11c4e3a20234888f6ea5309678975563,Disconnected Manifold Learning for Generative Adversarial Networks,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+91ea1d8f11c4e3a20234888f6ea5309678975563,Disconnected Manifold Learning for Generative Adversarial Networks,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+911ee14fbd3f0b9ccbd91090fbe4aa65d73f46f5,AlignGAN: Learning to Align Cross-Domain Images with Conditional Generative Adversarial Networks,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+911ee14fbd3f0b9ccbd91090fbe4aa65d73f46f5,AlignGAN: Learning to Align Cross-Domain Images with Conditional Generative Adversarial Networks,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+911ee14fbd3f0b9ccbd91090fbe4aa65d73f46f5,AlignGAN: Learning to Align Cross-Domain Images with Conditional Generative Adversarial Networks,Education University of Hong Kong,The Education University of Hong Kong,"香港教育大學 The Education University of Hong Kong, 露屏路 Lo Ping Road, 鳳園 Fung Yuen, 下坑 Ha Hang, 新界 New Territories, HK, DD5 1119, 中国",22.46935655,114.19474194,edu,
+912f107002506ab8c7ae411c8d34c200ba567b02,Optimal UV spaces for facial morphable model construction,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+91e507d2d8375bf474f6ffa87788aa3e742333ce,Robust Face Recognition Using Probabilistic Facial Trait Code,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+91cc3981c304227e13ae151a43fbb124419bc0ce,Fast Person Re-identification via Cross-Camera Semantic Binary Transformation,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+91cc3981c304227e13ae151a43fbb124419bc0ce,Fast Person Re-identification via Cross-Camera Semantic Binary Transformation,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+91cc3981c304227e13ae151a43fbb124419bc0ce,Fast Person Re-identification via Cross-Camera Semantic Binary Transformation,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+91a4ebf1ca0314a74c436729700ef09bddaa6222,Detailed Human Avatars from Monocular Video,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+9114f5247562c0a71ea9aef23d474e06dd96d8cb,Neural Sign Language Translation,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+9114f5247562c0a71ea9aef23d474e06dd96d8cb,Neural Sign Language Translation,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+91d216e72a774b10c1eac9bce5b1046fac8c8a97,Garments Texture Design Class Identification Using Deep Convolutional Neural Network,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+91f9f6623abc51086183cf1d2ea9954f503061fe,A2-Nets: Double Attention Networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+91f9f6623abc51086183cf1d2ea9954f503061fe,A2-Nets: Double Attention Networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+91f9f6623abc51086183cf1d2ea9954f503061fe,A2-Nets: Double Attention Networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+91f9f6623abc51086183cf1d2ea9954f503061fe,A2-Nets: Double Attention Networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+9103148dd87e6ff9fba28509f3b265e1873166c9,Face Analysis using 3D Morphable Models,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+9103148dd87e6ff9fba28509f3b265e1873166c9,Face Analysis using 3D Morphable Models,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+91b8f1e4299b0f7ad716ece76565c6689d5d1b98,"How clever is the FiLM model, and how clever can it be?",University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+91b8f1e4299b0f7ad716ece76565c6689d5d1b98,"How clever is the FiLM model, and how clever can it be?",University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+91b8f1e4299b0f7ad716ece76565c6689d5d1b98,"How clever is the FiLM model, and how clever can it be?",University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+9194c206269a09c251cb3d1c878f9f11639b053a,A New Feature Extraction Method Based on the Information Fusion of Entropy Matrix and Covariance Matrix and Its Application in Face Recognition,Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.70027525,edu,
+91bf682708317b1c84365ce9589c4c1d9fc014e8,Style Separation and Synthesis via Generative Adversarial Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+91bf682708317b1c84365ce9589c4c1d9fc014e8,Style Separation and Synthesis via Generative Adversarial Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+91bf682708317b1c84365ce9589c4c1d9fc014e8,Style Separation and Synthesis via Generative Adversarial Networks,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+65b737e5cc4a565011a895c460ed8fd07b333600,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+65b737e5cc4a565011a895c460ed8fd07b333600,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+65b737e5cc4a565011a895c460ed8fd07b333600,Transfer Learning for Cross-Dataset Recognition: A Survey,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+6555ef4e6f9582b5cb06199a70d4f54df04314ff,Part-based clothing segmentation for person retrieval,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+65f0b05052c3145a58c2653821e5429ca62555ce,Attacks Meet Interpretability: Attribute-steered Detection of Adversarial Samples,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+65b1760d9b1541241c6c0222cc4ee9df078b593a,Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+65355cbb581a219bd7461d48b3afd115263ea760,Recognition of ongoing complex activities by sequence prediction over a hierarchical label space,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+65bccb76384bc95c8fe53f2d2a8e3f048fd880bf,Bi-label Propagation for Generic Multiple Object Tracking,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+65bccb76384bc95c8fe53f2d2a8e3f048fd880bf,Bi-label Propagation for Generic Multiple Object Tracking,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+65d7f95fcbabcc3cdafc0ad38e81d1f473bb6220,Face Recognition for the Visually Impaired,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+65150ea455cf30ff75a73c1d25df84687d4930e4,3D Object Retrieval and Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+65bba9fba03e420c96ec432a2a82521ddd848c09,Connectionist Temporal Modeling for Weakly Supervised Action Labeling,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+656531036cee6b2c2c71954bb6540ef6b2e016d0,Jointly Learning Non-negative Projection and Dictionary with Discriminative Graph Constraints for Classification,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+656531036cee6b2c2c71954bb6540ef6b2e016d0,Jointly Learning Non-negative Projection and Dictionary with Discriminative Graph Constraints for Classification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+656531036cee6b2c2c71954bb6540ef6b2e016d0,Jointly Learning Non-negative Projection and Dictionary with Discriminative Graph Constraints for Classification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+65683bd97720bc18a022b23755b32c8c988e8d5c,Discovering social groups via latent structure learning.,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+65683bd97720bc18a022b23755b32c8c988e8d5c,Discovering social groups via latent structure learning.,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+652aac54a3caf6570b1c10c993a5af7fa2ef31ff,"Carnegie Mellon University Statistical Modeling for Networked Video : Coding Optimization , Error Concealment and Traffic Analysis",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+652aac54a3caf6570b1c10c993a5af7fa2ef31ff,"Carnegie Mellon University Statistical Modeling for Networked Video : Coding Optimization , Error Concealment and Traffic Analysis",Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+6506f9a8a2e73eeaea185273df909feccb68f944,Research on Dynamic Facial Expressions Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+656ef752b363a24f84cc1aeba91e4fa3d5dd66ba,Robust Open-Set Face Recognition for Small-Scale Convenience Applications,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+656268296532913eb34929e82ee19808429de06a,Amygdala Engagement in Response to Subthreshold Presentations of Anxious Face Stimuli in Adults with Autism Spectrum Disorders: Preliminary Insights,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+656268296532913eb34929e82ee19808429de06a,Amygdala Engagement in Response to Subthreshold Presentations of Anxious Face Stimuli in Adults with Autism Spectrum Disorders: Preliminary Insights,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+65b51d3e0b46e80236d496b25b424d22c6de4348,Towards Highly Accurate and Stable Face Alignment for High-Resolution Videos,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+6577c76395896dd4d352f7b1ee8b705b1a45fa90,Towards computational models of kinship verification,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+6577c76395896dd4d352f7b1ee8b705b1a45fa90,Towards computational models of kinship verification,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+6577a11fc1e022670a0867ca2622b72ef225616e,Conservative learning for learning object detectors,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+65fbd8c6b6a8814c3d8b28b4e14b2e262e60c58c,Bridging Cognitive Programs and Machine Learning,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+6583e5e9c01da5d70a9ccba799fd53bc4ec015d6,Deep Regression Bayesian Network and Its Applications,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+65c978a97f54cf255f01c6846d6c51b37c61f836,A Glimpse Far into the Future: Understanding Long-term Crowd Worker Accuracy,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+650bfe7acc3f03eb4ba91d9f93da8ef0ae8ba772,A Deep Learning Approach for Subject Independent Emotion Recognition from Facial Expressions,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+6577d30abd8bf5b21901572504bd82101a7eed75,Ear Biometrics in Human,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,"Face , Age and Gender Recognition using Local Descriptors",Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+65293ecf6a4c5ab037a2afb4a9a1def95e194e5f,"Face , Age and Gender Recognition using Local Descriptors",University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+658eb1fd14808d10e0f4fee99c5506a1bb0e351a,Multi-Discriminant Classification Algorithm for Face Verification,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+6515fe829d0b31a5e1f4dc2970a78684237f6edb,Constrained Maximum Likelihood Learning of Bayesian Networks for Facial Action Recognition,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+6515fe829d0b31a5e1f4dc2970a78684237f6edb,Constrained Maximum Likelihood Learning of Bayesian Networks for Facial Action Recognition,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+65086cbda9714c538417f7b25f9cf661e6d72833,Tracking Using Motion Patterns for Very Crowded Scenes,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+65821b839b8c6ecf6cba7be0ca132da59075e1b4,"Preliminary studies on the Good, the Bad, and the Ugly face recognition challenge problem",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+6261eb75066f779e75b02209fbd3d0f02d3e1e45,Fudan-Huawei at MediaEval 2015: Detecting Violent Scenes and Affective Impact in Movies with Deep Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,Greedy feature selection for subspace clustering,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,Greedy feature selection for subspace clustering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+622daa25b5e6af69f0dac3a3eaf4050aa0860396,Greedy feature selection for subspace clustering,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+624706748e2e62a4e07ae761543da6d41e3f8fcd,Language Grounding in Massive Online Data,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+62d9750adb300cd53fb107b174cb6a07fb8b96b5,Using 3D Models to Recognize 2D Faces in the Wild,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+6293d33e176ba7ccd59e94f8a137876c1d581e1f,Holistic features for real-time crowd behaviour anomaly detection,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+628a3f027b7646f398c68a680add48c7969ab1d9,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+626913b8fcbbaee8932997d6c4a78fe1ce646127,Learning from Millions of 3D Scans for Large-scale 3D Face Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+621227db6a9b0ad374cf737fea3760b49c4de42c,Hardware Trojan Attacks on Neural Networks,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+62e913431bcef5983955e9ca160b91bb19d9de42,Facial Landmark Detection with Tweaked Convolutional Neural Networks,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+62beb92e4de7b682619eba0aa39c14a39c95718f,Towards Effective Deep Embedding for Zero-Shot Learning,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+62beb92e4de7b682619eba0aa39c14a39c95718f,Towards Effective Deep Embedding for Zero-Shot Learning,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+624e9d9d3d941bab6aaccdd93432fc45cac28d4b,Object-Scene Convolutional Neural Networks for event recognition in images,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+624e9d9d3d941bab6aaccdd93432fc45cac28d4b,Object-Scene Convolutional Neural Networks for event recognition in images,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+624496296af19243d5f05e7505fd927db02fd0ce,Gauss-Newton Deformable Part Models for Face Alignment In-the-Wild,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+624496296af19243d5f05e7505fd927db02fd0ce,Gauss-Newton Deformable Part Models for Face Alignment In-the-Wild,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+62f7de0e26f5716beb32b9d14e646e76b3a2e2af,Continuous Hyper-parameter Learning for Support Vector Machines,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+62fd622b3ca97eb5577fd423fb9efde9a849cbef,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+62fd622b3ca97eb5577fd423fb9efde9a849cbef,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+62fd622b3ca97eb5577fd423fb9efde9a849cbef,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+621ff353960d5d9320242f39f85921f72be69dc8,Explicit occlusion detection based deformable fitting for facial landmark localization,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+62b83bf64f200ebb9fa16dfb7108b85e390b2207,Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+62b83bf64f200ebb9fa16dfb7108b85e390b2207,Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+62b83bf64f200ebb9fa16dfb7108b85e390b2207,Semantic Labeling in Very High Resolution Images via a Self-Cascaded Convolutional Neural Network,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+621e8882c41cdaf03a2c4a986a6404f0272ba511,On robust biometric identity verification via sparse encoding of faces: Holistic vs local approaches,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+621f656fedda378ceaa9c0096ebb1556a42e5e0f,Single Sample Face Recognition from Video via Stacked Supervised Auto-Encoder,Rio de Janeiro State University,Rio de Janeiro State University,"UERJ, 524, Rua São Francisco Xavier, Maracanã, Zona Norte do Rio de Janeiro, Rio de Janeiro, Microrregião Rio de Janeiro, Região Metropolitana do Rio de Janeiro, RJ, Região Sudeste, 20550-900, Brasil",-22.91117105,-43.23577971,edu,
+62aaa33c46a7c4c2d8a80c81954101576200799d,Making the V in VQA Matter: Elevating the Role of Image Understanding in Visual Question Answering,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+629b1bdf4d96bb41f7d3fce5c7d5617515303b71,Diving Deeper into IM2GPS,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+629b1bdf4d96bb41f7d3fce5c7d5617515303b71,Diving Deeper into IM2GPS,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+62dc594c1eac220d2116506d187d9fdd5ff8e795,Robust multi-view pedestrian tracking using neural networks,University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.73844400,-84.17918747,edu,
+62035628c85e13c10db4dfe2acedc5741874fc2e,Auto-Context R-CNN,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+9635d5e2b33b2fec49b31cb80928c28763a90d85,Semantic Image Inpainting with Deep Generative Models,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+965f8bb9a467ce9538dec6bef57438964976d6d9,Recognizing human faces under disguise and makeup,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+961a5d5750f18e91e28a767b3cb234a77aac8305,Face Detection without Bells and Whistles,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+96fb791de077106501397151d5cb4f245330ddba,Recurrent Transformer Networks for Semantic Correspondence,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+96f0e7416994035c91f4e0dfa40fd45090debfc5,Unsupervised Learning of Face Representations,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9677d2f6a994f598c1d631038d49401c5f707ee0,"See, Hear, and Read: Deep Aligned Representations",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+96fda2ce5803979ba0295413b2750e9733619dd5,Fast and Balanced: Efficient Label Tree Learning for Large Scale Object Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+96fda2ce5803979ba0295413b2750e9733619dd5,Fast and Balanced: Efficient Label Tree Learning for Large Scale Object Recognition,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+96fda2ce5803979ba0295413b2750e9733619dd5,Fast and Balanced: Efficient Label Tree Learning for Large Scale Object Recognition,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+96ad3c4455a9b05fb6db749495b4ae26a6fb2fab,HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+96ad3c4455a9b05fb6db749495b4ae26a6fb2fab,HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+96416b1b44fb05302c6e9a8ab1b74d9204995e73,Learning Effective Binary Visual Representations with Deep Networks,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+96c866f07ff999ee11459519aa361fa4fdfc2139,Consensus-based Sequence Training for Video Captioning,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+96921b313f4c8dd6cb2299de1a24d4e9803ffdc1,Discovery of Visual Semantics by Unsupervised and Self-Supervised Representation Learning,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+963d0d40de8780161b70d28d2b125b5222e75596,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+963d0d40de8780161b70d28d2b125b5222e75596,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+963d0d40de8780161b70d28d2b125b5222e75596,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+968b983fa9967ff82e0798a5967920188a3590a8,Children's recognition of disgust in others.,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+968b983fa9967ff82e0798a5967920188a3590a8,Children's recognition of disgust in others.,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,End-To-End Face Detection and Recognition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+969fd48e1a668ab5d3c6a80a3d2aeab77067c6ce,End-To-End Face Detection and Recognition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+96a9ca7a8366ae0efe6b58a515d15b44776faf6e,Grid Loss: Detecting Occluded Faces,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+969c4d89d7b22b36d8fc569156ca6e040b31565d,Soft Biometric Recognition from Comparative Crowdsourced Annotations,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+968ab65077c4be1c1071120052b2e4b4f3d3c59a,"""Seeing is believing: the quest for multimodal knowledge"" by Gerard de Melo and Niket Tandon, with Martin Vesely as coordinator",Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+968ab65077c4be1c1071120052b2e4b4f3d3c59a,"""Seeing is believing: the quest for multimodal knowledge"" by Gerard de Melo and Niket Tandon, with Martin Vesely as coordinator",Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+9612fd66fcd3902bc267a62c146398eb8d30830e,Classifying Actions and Measuring Action Similarity by Modeling the Mutual Context of Objects and Human Poses,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+96eacc464c0177efc4f802f220888c7f675f24af,Deep Semantic Face Deblurring Supplementary Material,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+9648a3790c62cee4253299f21368ce8028e3c8a6,MESO: Perceptual Memory to Support Online Learning in Adaptive Software,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+96e1ccfe96566e3c96d7b86e134fa698c01f2289,Semi-adversarial Networks: Convolutional Autoencoders for Imparting Privacy to Face Images,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+96e1ccfe96566e3c96d7b86e134fa698c01f2289,Semi-adversarial Networks: Convolutional Autoencoders for Imparting Privacy to Face Images,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+96bf907ec08df2d3176be66f369e3cc3d6cdc7f7,Environment Upgrade Reinforcement Learning for Non-differentiable Multi-stage Pipelines,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+96bf907ec08df2d3176be66f369e3cc3d6cdc7f7,Environment Upgrade Reinforcement Learning for Non-differentiable Multi-stage Pipelines,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+9696ad8b164f5e10fcfe23aacf74bd6168aebb15,4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+9696ad8b164f5e10fcfe23aacf74bd6168aebb15,4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+9627f28ea5f4c389350572b15968386d7ce3fe49,Load Balanced GANs for Multi-view Face Image Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+966e36f15b05ef8436afecf57a97b73d6dcada94,Dimensionality Reduction using Relative Attributes,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+96b4124cf7626301ed3bb2d2b2233a490804e35e,Saliency-Based Deformable Model for Pedestrian Detection,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+96b4124cf7626301ed3bb2d2b2233a490804e35e,Saliency-Based Deformable Model for Pedestrian Detection,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+96b4124cf7626301ed3bb2d2b2233a490804e35e,Saliency-Based Deformable Model for Pedestrian Detection,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+961e411c99d8ca6807076c4fb18e2d63a383aa0f,Non-negative matrix factorization methods for face recognition under extreme lighting variations,University of Oradea,University of Oradea,"Universitatea Creștină Partium - Clădirea Sulyok, 27, Strada Primăriei, Orașul Nou, Oradea, Bihor, 410209, România",47.05702220,21.92270900,edu,
+965faca4b89047ca0c90df0f12c06bc4cb9ec2dc,Statistical binary pattern and post-competitive representation for pattern recognition,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+965faca4b89047ca0c90df0f12c06bc4cb9ec2dc,Statistical binary pattern and post-competitive representation for pattern recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+96578785836d7416bf2e9c154f687eed8f93b1e4,Automated video-based facial expression analysis of neuropsychiatric disorders.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+96858cea5e9c72a93d438b6ba8d9e027db5416a7,Cooperative Learning of Audio and Video Models from Self-Supervised Synchronization,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+96858cea5e9c72a93d438b6ba8d9e027db5416a7,Cooperative Learning of Audio and Video Models from Self-Supervised Synchronization,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+969ed0575736943c2db62793583f99365d10fbac,Elevated amygdala response to faces following early deprivation.,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+969ed0575736943c2db62793583f99365d10fbac,Elevated amygdala response to faces following early deprivation.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+960870523484a7f66cf8afbe833afd7d343b68f5,Improving Gait Biometrics under Spoofing Attacks,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+960870523484a7f66cf8afbe833afd7d343b68f5,Improving Gait Biometrics under Spoofing Attacks,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+96094b030013ca2d9b6d5a14b6f1fbbc57eb8a89,What is in that picture ? Visual Question Answering System,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+96e0cfcd81cdeb8282e29ef9ec9962b125f379b0,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+96c6f50ce8e1b9e8215b8791dabd78b2bbd5f28d,Dynamic Attention-Controlled Cascaded Shape Regression Exploiting Training Data Augmentation and Fuzzy-Set Sample Weighting,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+96e731e82b817c95d4ce48b9e6b08d2394937cf8,Unconstrained face verification using deep CNN features,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+3a27d164e931c422d16481916a2fa6401b74bcef,Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant Face Verification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3a345ab83d1bfe4a63d3d44bc4ed243e10255a59,JOANNEUM RESEARCH and Vienna University of Technology at TRECVID 2010,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+3af8d38469fb21368ee947d53746ea68cd64eeae,Multimodal Intelligent Affect Detection with Kinect (Doctoral Consortium),Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+3af8d38469fb21368ee947d53746ea68cd64eeae,Multimodal Intelligent Affect Detection with Kinect (Doctoral Consortium),Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+3af8d38469fb21368ee947d53746ea68cd64eeae,Multimodal Intelligent Affect Detection with Kinect (Doctoral Consortium),Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+3aa53e8bb0a1a7e6d5fe4de146af92cd816755f4,Machine Understanding of Human Behavior,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3aa53e8bb0a1a7e6d5fe4de146af92cd816755f4,Machine Understanding of Human Behavior,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+3aa53e8bb0a1a7e6d5fe4de146af92cd816755f4,Machine Understanding of Human Behavior,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3aa53e8bb0a1a7e6d5fe4de146af92cd816755f4,Machine Understanding of Human Behavior,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+3a3a4408432408b62e2dc22de7820a5a2f7bbe9e,No Spare Parts: Sharing Part Detectors for Image Categorization,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+3a3a4408432408b62e2dc22de7820a5a2f7bbe9e,No Spare Parts: Sharing Part Detectors for Image Categorization,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+3aea679168c72c6df7ead45d4f7f1fd7f3680a11,Towards Accurate Multi-person Pose Estimation in the Wild,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+3ad6bd5c34b0866019b54f5976d644326069cb3d,Towards Next Generation Touring: Personalized Group Tours,RMIT University,RMIT University,"RMIT University, 124, La Trobe Street, Melbourne City, City of Melbourne, Victoria, 3000, Australia",-37.80874650,144.96388750,edu,
+3a3f75e0ffdc0eef07c42b470593827fcd4020b4,Normal Similarity Network for Generative Modelling,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+3a76e9fc2e89bdd10a9818f7249fbf61d216efc4,Face Sketch Matching via Coupled Deep Transform Learning,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2,End-to-End Deep Learning for Steering Autonomous Vehicles Considering Temporal Dependencies,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+3a95bfa1d4a989b162e07fa69b85cb6d31a674ab,Linear Ranking Analysis,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+3a0ea368d7606030a94eb5527a12e6789f727994,Categorization by Learning and Combining Object Parts,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+3a04eb72aa64760dccd73e68a3b2301822e4cdc3,Scalable Sparse Subspace Clustering,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+3a40059e9dc4b19ae7f49b8746d8dda22456767f,Geometry-Aware Face Completion and Editing,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3a53bad58f8467092477857ff9c2ae904d7108d2,Simultaneous perceptual and response biases on sequential face attractiveness judgments.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3a53bad58f8467092477857ff9c2ae904d7108d2,Simultaneous perceptual and response biases on sequential face attractiveness judgments.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f,Thermal-to-visible face recognition using partial least squares.,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+3af130e2fd41143d5fc49503830bbd7bafd01f8b,How Do We Evaluate the Quality of Computational Editing Systems?,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+3a2cf589f5e11ca886417b72c2592975ff1d8472,Spontaneously Emerging Object Part Segmentation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3a2cf589f5e11ca886417b72c2592975ff1d8472,Spontaneously Emerging Object Part Segmentation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3ada7640b1c525056e6fcd37eea26cd638815cd6,Abnormal Object Recognition: A Comprehensive Study,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+3ada7640b1c525056e6fcd37eea26cd638815cd6,Abnormal Object Recognition: A Comprehensive Study,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+3a4779f3f73d2ebfaccbc0dad0bdbf7ac0570c0d,Zero-shot Recognition via Semantic Embeddings and Knowledge Graphs,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3ab93fe26a46f8bc0999e68af71a0907a63a5e65,ESTHER: Extremely Simple Image Translation Through Self-Regularization,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+3a55188f8ee3abe6d179d16984885be6e3b6daf5,Learning Cross-Modal Deep Representations for Robust Pedestrian Detection,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+3a55188f8ee3abe6d179d16984885be6e3b6daf5,Learning Cross-Modal Deep Representations for Robust Pedestrian Detection,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+3a4a908350d856577ac48caec10c0809e8396acf,"Zero-effort payments: design, deployment, and lessons",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+3a2d3514b5cdccdb4c13aadb3929f3a78c03f020,"Deep semantic segmentation for automated driving: Taxonomy, roadmap and challenges",University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+3a27bdb9925d5b247868950a9575823b3194ac8b,Adaptation across the cortical hierarchy: low-level curve adaptation affects high-level facial-expression judgments.,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+3a5f5aca6138abcf22ede1af5572e01eb0f761d1,Optimizing Multivariate Performance Measures from Multi-View Data,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3ac78d0fd4f0c01650277bb25eab6957d4eeb655,Multimodal Memory Modelling for Video Captioning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3af1a375c7c1decbcf5c3a29774e165cafce390c,Quantifying Facial Expression Abnormality in Schizophrenia by Combining 2D and 3D Features,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3af1a375c7c1decbcf5c3a29774e165cafce390c,Quantifying Facial Expression Abnormality in Schizophrenia by Combining 2D and 3D Features,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+3ad2b6c283b1c4687c4f782efb64a209d3cf4cfe,Weakly-Supervised Semantic Segmentation by Iteratively Mining Common Object Features,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3ad2b6c283b1c4687c4f782efb64a209d3cf4cfe,Weakly-Supervised Semantic Segmentation by Iteratively Mining Common Object Features,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+3a2a37ca2bdc82bba4c8e80b45d9f038fe697c7d,Handling Uncertain Tags in Visual Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+3a888bf996bcac3feb7e130543df9ec8287db515,End-to-End Learning of Driving Models from Large-Scale Video Datasets,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+3a307b7e2e742dd71b6d1ca7fde7454f9ebd2811,Bilinear CNN Models for Fine-Grained Visual Recognition,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+3a818a5fe2c36ff29212b4da9f4fba3280dfd497,Mobile Product Image Search by Automatic Query Object Extraction,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+3ab0b3c02f4fa7f1d14315599f4f91563ae565f8,Canonical Time Warping for Alignment of Human Behavior,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+3ab0b3c02f4fa7f1d14315599f4f91563ae565f8,Canonical Time Warping for Alignment of Human Behavior,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3ab0b3c02f4fa7f1d14315599f4f91563ae565f8,Canonical Time Warping for Alignment of Human Behavior,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+3ab0b3c02f4fa7f1d14315599f4f91563ae565f8,Canonical Time Warping for Alignment of Human Behavior,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3a6ebdfb6375093885e846153a48139ef1ecfae6,The treasure beneath convolutional layers: Cross-convolutional-layer pooling for image classification,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+3a39cb039fb0f569ab88dfb058d98650a17c9f5c,Structured max-margin learning for multi-label image annotation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+3a39cb039fb0f569ab88dfb058d98650a17c9f5c,Structured max-margin learning for multi-label image annotation,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+3a387304d18f2786ee83804bd38efecc2a5fd323,Person Re-Identification Using Multiple Experts with Random Subspaces,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+3ad9f6c1d10a2d1e86c93a4182ee3b260a6f3edd,Object Detection and Tracking in Wide Area Surveillance Using Thermal Imagery,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+54948ee407b5d32da4b2eee377cc44f20c3a7e0c,Right for the Right Reason: Training Agnostic Networks,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+544519fa0794d41a04307973156016b6c679ffa5,Switchable Deep Network for Pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+544519fa0794d41a04307973156016b6c679ffa5,Switchable Deep Network for Pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+544519fa0794d41a04307973156016b6c679ffa5,Switchable Deep Network for Pedestrian Detection,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+545388d4a8e79cab605cea9b3e1ff1da0f848f8e,Fast Online Upper Body Pose Estimation from Video,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+545388d4a8e79cab605cea9b3e1ff1da0f848f8e,Fast Online Upper Body Pose Estimation from Video,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+54ffc4c83974d5915025f80e54e350cd30ef96d7,Sparse dictionary-based representation and recognition of action attributes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+54abfe3acd987b5041878c29ec74204a11e73ad1,Holistic Planimetric prediction to Local Volumetric prediction for 3D Human Pose Estimation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+54abfe3acd987b5041878c29ec74204a11e73ad1,Holistic Planimetric prediction to Local Volumetric prediction for 3D Human Pose Estimation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+54abfe3acd987b5041878c29ec74204a11e73ad1,Holistic Planimetric prediction to Local Volumetric prediction for 3D Human Pose Estimation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+54bb25a213944b08298e4e2de54f2ddea890954a,"AgeDB: The First Manually Collected, In-the-Wild Age Database",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54171243dfae9d7343c78026c9b94004df3853bb,"ResnetCrowd: A residual deep learning architecture for crowd counting, violent behaviour detection and crowd density level classification",Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+549d55a06c5402696e063ce36b411f341a64f8a9,Learning Deep Structure-Preserving Image-Text Embeddings,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+549d55a06c5402696e063ce36b411f341a64f8a9,Learning Deep Structure-Preserving Image-Text Embeddings,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5484ad04ac0a256b51fd1a3eae48483480862ab1,A survey on ear biometrics,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+5484ad04ac0a256b51fd1a3eae48483480862ab1,A survey on ear biometrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+5484ad04ac0a256b51fd1a3eae48483480862ab1,A survey on ear biometrics,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+5484ad04ac0a256b51fd1a3eae48483480862ab1,A survey on ear biometrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+54bae57ed37ce50e859cbc4d94d70cc3a84189d5,Face recognition committee machine,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+54f442c7fa4603f1814ebd8eba912a00dceb5cb2,The Indian Buffet Process: Scalable Inference and Extensions,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+548318d42e251b3ed7d98748a07cfcfcd0594575,Automatic Object Detection and Segmentation of the Histocytology Images Using Reshapable Agents,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+5418242dafa134e6021a30ecc8c566ac83823b56,Disentangled Person Image Generation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+541d69fdf97e5ded611ad0dd46f62bb9d2e19a51,SHESHADRI ET AL.: EXEMPLAR DRIVEN CHARACTER RECOGNITION IN THE WILD 1 Exemplar Driven Character Recognition in the Wild,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+541d69fdf97e5ded611ad0dd46f62bb9d2e19a51,SHESHADRI ET AL.: EXEMPLAR DRIVEN CHARACTER RECOGNITION IN THE WILD 1 Exemplar Driven Character Recognition in the Wild,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+54fc3551b3b08767d5d731092f10ba4573a2c822,Assessment of H.264 Video Compression on Automated Face Recognition Performance in Surveillance and Mobile Video Scenarios,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+54a9ed950458f4b7e348fa78a718657c8d3d0e05,Learning Neural Models for End-to-End Clustering,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+541f1436c8ffef1118a0121088584ddbfd3a0a8a,A Spatio-temporal Feature Based on Triangulation of Dense SURF,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7,Local Centroids Structured Non-Negative Matrix Factorization,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+54ce3ff2ab6e4465c2f94eb4d636183fa7878ab7,Local Centroids Structured Non-Negative Matrix Factorization,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+5409d9ff06ad715ee6996e44c88f930b9dd074fa,See all by looking at a few: Sparse modeling for finding representative objects,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+542015e2c78c51203963b76632b7ea2a6c46aa74,DNA-GAN: Learning Disentangled Representations from Multi-Attribute Images,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+54b16b233e6130354e7d3f0d001cc5491f85e998,Real-time fMRI-based neurofeedback in depression,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+5417bd72d1b787ade0c485f1188189474c199f4d,MAGAN: Margin Adaptation for Generative Adversarial Networks,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+54568bdce3405ffbe2a6f5820711f966e2d2faf3,How Do We Update Faces? Effects of Gaze Direction and Facial Expressions on Working Memory Updating,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+54d97ea9a5f92761dddd148fb0e602c2293e7c16,Associating Inter-image Salient Instances for Weakly Supervised Semantic Segmentation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+54d97ea9a5f92761dddd148fb0e602c2293e7c16,Associating Inter-image Salient Instances for Weakly Supervised Semantic Segmentation,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+545dc167a4879ce2d61836cb300479c305f8e096,Event-Centric Twitter Photo Summarization,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+544c06584c95bfdcafbd62e04fb796e575981476,Human Identification from Body Shape,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+5495e224ac7b45b9edc5cfeabbb754d8a40a879b,Feature Reconstruction Disentangling for Pose-invariant Face Recognition Supplementary Material,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+548a0523c9e66b793f2145dbd05dcb4d32fccfec,Joint Action Unit localisation and intensity estimation through heatmap regression,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+54204e28af73c7aca073835a14afcc5d8f52a515,Fine-Pruning: Defending Against Backdooring Attacks on Deep Neural Networks,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+540831094fd9b80469c8dacb9320b7e342b50e03,Emotion Recognition in Speech using Cross-Modal Transfer in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+54c5fad54492650f6eccb90bafcab8c2b779ee2f,Real-time text tracking in natural scenes,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+98324ad5027c6b163d7a670570ffe2f8df70717c,LSTM Pose Machines,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+98a18702cd9be53341f12c0e711df9d985120ad7,Feature Extraction and Localisation using Scale-Invariant Feature Transform on 2.5D Image,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+98142103c311b67eeca12127aad9229d56b4a9ff,GazeDirector: Fully Articulated Eye Gaze Redirection in Video,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+98142103c311b67eeca12127aad9229d56b4a9ff,GazeDirector: Fully Articulated Eye Gaze Redirection in Video,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+9820920d4544173e97228cb4ab8b71ecf4548475,Automated facial coding software outperforms people in recognizing neutral faces as neutral from standardized datasets,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+9820920d4544173e97228cb4ab8b71ecf4548475,Automated facial coding software outperforms people in recognizing neutral faces as neutral from standardized datasets,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+98143f005c6d18ecb9e5b21a8ac6fb9f0b6b5005,Coherent Object Detection with 3D Geometric Context from a Single Image,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+98b54eb04e531c34a20320e19b55f6721bd0d651,Recurrent CNN for 3D Gaze Estimation using Appearance and Shape Cues,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+988aa2583c63ada43ca260dd8b5a4a543725a483,Choosing the Right Home Location Definition Method for the Given Dataset,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+98bb029afe2a1239c3fdab517323066f0957b81b,Person Re-identification by Video Ranking,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+98bb029afe2a1239c3fdab517323066f0957b81b,Person Re-identification by Video Ranking,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+98f7081ed27e250d1f974d89377d1fbd3f78a347,Towards Automated Inferencing of Emotional State from Face Images,University of Piraeus,University of Piraeus,"Πανεπιστήμιο Πειραιώς, 80, Καραολή και Δημητρίου, Απόλλωνας, Νέο Φάληρο, Πειραιάς, Δήμος Πειραιώς, Περιφερειακή Ενότητα Πειραιώς, Περιφέρεια Αττικής, Αττική, 185 34, Ελλάδα",37.94173275,23.65303262,edu,
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,Recurrent Face Aging,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,Recurrent Face Aging,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+989332c5f1b22604d6bb1f78e606cb6b1f694e1a,Recurrent Face Aging,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+982f5c625d6ad0dac25d7acbce4dabfb35dd7f23,Facial Expression Recognition by SVM-based Two-stage Classifier on Gabor Features,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+98af221afd64a23e82c40fd28d25210c352e41b7,Exploring visual features through Gabor representations for facial expression detection,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+98af221afd64a23e82c40fd28d25210c352e41b7,Exploring visual features through Gabor representations for facial expression detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+98af221afd64a23e82c40fd28d25210c352e41b7,Exploring visual features through Gabor representations for facial expression detection,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+98cfbe37a68406ef194354de7e5ea453c4ea9adf,Generating Synthetic X-ray Images of a Person from the Surface Geometry,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+98cfbe37a68406ef194354de7e5ea453c4ea9adf,Generating Synthetic X-ray Images of a Person from the Surface Geometry,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+9893865afdb1de55fdd21e5d86bbdb5daa5fa3d5,Illumination Normalization Using Logarithm Transforms for Face Authentication,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9838ba7a31a096503def7b69bf48e5d327f95caa,Emotion-Based Crowd Representation for Abnormality Detection,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+98025d3d44e9379736adb1228919272ded9298ae,Visual Question Answering Dataset for Bilingual Image Understanding: A Study of Cross-Lingual Transfer Using Attention Maps,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+9887ab220254859ffc7354d5189083a87c9bca6e,Generic Image Classification Approaches Excel on Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+9887ab220254859ffc7354d5189083a87c9bca6e,Generic Image Classification Approaches Excel on Face Recognition,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+9802885e39e0847374a2efae801b8b719c09c64c,"An Effective Two-Finger, Two-Stage Biometric Strategy for the US-VISIT Program",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9899eb0ae24aa8c992244afe5f4455e9f96c1f18,"Characteristics of Brains in Autism Spectrum Disorder: Structure, Function and Connectivity across the Lifespan.",Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+985cd420c00d2f53965faf63358e8c13d1951fa8,Pixel-Level Hand Detection with Shape-Aware Structured Forests,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+98d04187f091f402a90a6a9a2108393ca5f91563,ADVIO: An Authentic Dataset for Visual-Inertial Odometry,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+98d04187f091f402a90a6a9a2108393ca5f91563,ADVIO: An Authentic Dataset for Visual-Inertial Odometry,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+989ca38616b52f23c2720ba5c6df2493dc025d0a,Markerless Feature Extraction for Gait Analysis,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+983f03659e42407b1779e407388ea86fa58043c6,Transferring activities: Updating human behavior analysis,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+98bda8768fd4a384695ecc736876a87f51c4ca0e,Pedestrian-Synthesis-GAN: Generating Pedestrian Data in Real Scene and Beyond,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+98bda8768fd4a384695ecc736876a87f51c4ca0e,Pedestrian-Synthesis-GAN: Generating Pedestrian Data in Real Scene and Beyond,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+987a649cb33302c41412419f8eeb77048aa5513e,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+987a649cb33302c41412419f8eeb77048aa5513e,Visual Psychophysics for Making Face Recognition Algorithms More Explainable,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+9821669a989a3df9d598c1b4332d17ae8e35e294,Minimal Correlation Classification,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+9865fe20df8fe11717d92b5ea63469f59cf1635a,Wildest Faces: Face Detection and Recognition in Violent Settings,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+9865fe20df8fe11717d92b5ea63469f59cf1635a,Wildest Faces: Face Detection and Recognition in Violent Settings,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+98c2053e0c31fab5bcb9ce5386335b647160cc09,A Distributed Framework for Spatio-Temporal Analysis on Large-Scale Camera Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+98c2053e0c31fab5bcb9ce5386335b647160cc09,A Distributed Framework for Spatio-Temporal Analysis on Large-Scale Camera Networks,University of Stuttgart,University of Stuttgart,"Pädagogische Hochschule Ludwigsburg, 46, Reuteallee, Ludwigsburg-Nord, Ludwigsburg, Landkreis Ludwigsburg, Regierungsbezirk Stuttgart, Baden-Württemberg, 71634, Deutschland",48.90953380,9.18318920,edu,
+98c2053e0c31fab5bcb9ce5386335b647160cc09,A Distributed Framework for Spatio-Temporal Analysis on Large-Scale Camera Networks,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+983bfb10fa228ecd1047ab4ac1d78c96448de059,Towards Person Identification and Re-identification with Attributes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+989282f579fdca0ebdc890cf05cac88c29f9eb49,Benchmarking and Error Diagnosis in Multi-instance Pose Estimation,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+98fb3890c565f1d32049a524ec425ceda1da5c24,A Robust Learning Framework Using PSM and Ameliorated SVMs for Emotional Recognition,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9841df3cc4dc89379039092816ef19af949257a8,LBP-based Hierarchical Sparse Patch Learning for Face Recognition,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+989c7cdafa9b90ab2ea0a9d8fa60634cc698f174,YoloFlow Real - time Object Tracking in Video CS 229 Course Project,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320,Aerial Imagery for Roof Segmentation: A Large-Scale Dataset towards Automatic Mapping of Buildings,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+9825c4dddeb2ed7eaab668b55403aa2c38bc3320,Aerial Imagery for Roof Segmentation: A Large-Scale Dataset towards Automatic Mapping of Buildings,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+985d032bd45c3b1a6434d19526f9209ade72691a,Robust Low-Rank Regularized Regression for Face Recognition with Occlusion,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+985d032bd45c3b1a6434d19526f9209ade72691a,Robust Low-Rank Regularized Regression for Face Recognition with Occlusion,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+980266ad6807531fea94252e8f2b771c20e173b3,Continuous Regression for Non-rigid Image Alignment,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+535ed3850e79ccd51922601546ef0fc48c5fb468,A feature embedding strategy for high-level CNN representations from multiple convnets,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+535ed3850e79ccd51922601546ef0fc48c5fb468,A feature embedding strategy for high-level CNN representations from multiple convnets,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+53d78c8dbac7c9be8eb148c6a9e1d672f1dd72f9,"Discriminative vs . Generative Object Recognition : Objects , Faces , and the Web",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+533d14e539ae5cdca0ece392487a2b19106d468a,Bidirectional Multirate Reconstruction for Temporal Modeling in Videos,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+53eeb83d2c8085d5457b364354525730805b4332,Seeing 3D objects in a single 2D image,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+532f070082eb565704a2f6481ed64bdbc7e6aa24,Learning to Track at 100 FPS with Deep Regression Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+537328af75f50d49696972a6c34bca97c14bc762,Exploiting Unintended Feature Leakage in Collaborative Learning,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+532c089b43983935e1001c5e35aa35440263beaf,G-Distillation: Reducing Overconfident Errors on Novel Samples,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+53698b91709112e5bb71eeeae94607db2aefc57c,Two-Stream Convolutional Networks for Action Recognition in Videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+536b1db1b1db8d4cfef813575304421ebe8332f7,A Procrustean Markov Process for Non-rigid Structure Recovery,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+53cf087dbcbe0c4b145297fb0a32732ab2b18b66,PSANet: Point-wise Spatial Attention Network for Scene Parsing,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+53cf087dbcbe0c4b145297fb0a32732ab2b18b66,PSANet: Point-wise Spatial Attention Network for Scene Parsing,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+5369b021f2abf5daa77fa5602569bb3b8bb18546,GMMCP tracker: Globally optimal Generalized Maximum Multi Clique problem for multiple object tracking,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+533d91cbb5e306c96b71b6f776382f3956e5dc7d,Faster Feature Engineering by Approximate Evaluation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+533d91cbb5e306c96b71b6f776382f3956e5dc7d,Faster Feature Engineering by Approximate Evaluation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+538c6000369594084b122c37b3219ad15b58cb37,Hierarchical Cascade of Classifiers for Efficient Poselet Evaluation,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+5394d42fd27b7e14bd875ec71f31fdd2fcc8f923,Visual Recognition Using Directional Distribution Distance,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+532f91d59d96d28379e09043592903d143218f4b,Cross-Domain Hallucination Network for Fine-Grained Object Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+532f91d59d96d28379e09043592903d143218f4b,Cross-Domain Hallucination Network for Fine-Grained Object Recognition,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+5397c34a5e396658fa57e3ca0065a2878c3cced7,Lighting normalization with generic intrinsic illumination subspace for face recognition,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+539287d8967cdeb3ef60d60157ee93e8724efcac,Learning Deep $\ell_0$ Encoders,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+539287d8967cdeb3ef60d60157ee93e8724efcac,Learning Deep $\ell_0$ Encoders,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+532f7ec8e0c8f7331417dd4a45dc2e8930874066,Semi-supervised dimensionality reduction on data with multiple representations for label propagation on facial images,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+532837c431617d37c03361ba5a7d5fdb082c55f4,Connecting Language and Vision to Actions,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+532837c431617d37c03361ba5a7d5fdb082c55f4,Connecting Language and Vision to Actions,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+539923c8f2f4641f71056b71e5628d1b9b633835,Mining actionlet ensemble for action recognition with depth cameras,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+539923c8f2f4641f71056b71e5628d1b9b633835,Mining actionlet ensemble for action recognition with depth cameras,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+539923c8f2f4641f71056b71e5628d1b9b633835,Mining actionlet ensemble for action recognition with depth cameras,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+5375149a74361b51d734613be5d2ccba0c6b6955,Boundary-Seeking Generative Adversarial Networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+5375149a74361b51d734613be5d2ccba0c6b6955,Boundary-Seeking Generative Adversarial Networks,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4,Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+534f41985a7350261a03b8c0dc54e218115dc4a5,A Hierarchical Model of Shape and Appearance for Human Action Classification,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+534f41985a7350261a03b8c0dc54e218115dc4a5,A Hierarchical Model of Shape and Appearance for Human Action Classification,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+53d0227c40b354cc438c035951da801c9dcd87b7,Fully-Coupled Two-Stream Spatiotemporal Networks for Extremely Low Resolution Action Recognition,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+53d0227c40b354cc438c035951da801c9dcd87b7,Fully-Coupled Two-Stream Spatiotemporal Networks for Extremely Low Resolution Action Recognition,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+534ac5cd8e2503b333efcc94d92e5359b54190b1,3d Face Reconstruction Using Stereo Vision a Thesis Submitted to the Graduate School of Natural and Applied Sciences of Middle East Technical University by Mehmet Di̇kmen in Partial Fullfillment of the Requirements for the Degree of Master of Science,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+53c14feecdf23c40c594c25a0075c7150fa2f9e2,Blockwise Parallel Decoding for Deep Autoregressive Models,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+53bfe2ab770e74d064303f3bd2867e5bf7b86379,Learning to Synthesize and Manipulate Natural Images,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+53ee7e9839e1ac76e1168480a7e3227d568f4062,An Adaptive Descriptor Design for Object Recognition in the Wild,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+536b5739c2162301bff19730a65bfbe8b86179b6,Posebits for Monocular Human Pose Estimation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+537d8c4c53604fd419918ec90d6ef28d045311d0,Active collaborative ensemble tracking,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+53df7d12472ee0c466a2bb59c4a17274858345de,Fine-Grained Visual Categorization with 2D-Warping,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+53ce84598052308b86ba79d873082853022aa7e9,Optimized Method for Real-Time Face Recognition System Based on PCA and Multiclass Support Vector Machine,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+3fe4109ded039ac9d58eb9f5baa5327af30ad8b6,Spatio-Temporal GrabCut human segmentation for face and pose recovery,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+3fefc856a47726d19a9f1441168480cee6e9f5bb,Perceptually Valid Dynamics for Smiles and Blinks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3fefc856a47726d19a9f1441168480cee6e9f5bb,Perceptually Valid Dynamics for Smiles and Blinks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3f6474bc611ec790444ffa6e644a258f3d2aed37,Variational Capsules for Image Analysis and Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3f7cf52fb5bf7b622dce17bb9dfe747ce4a65b96,Person Identity Label Propagation in Stereo Videos,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+3f42db34a79cf600b416a246ad3fd146a4afbdf4,Context-Sensitive Decision Forests for Object Detection,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+3f4684674d2f62e24b46140e2c5df29d061ffea1,Deep Ordinal Regression Network for Monocular Depth Estimation,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+3f4684674d2f62e24b46140e2c5df29d061ffea1,Deep Ordinal Regression Network for Monocular Depth Estimation,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+3f4684674d2f62e24b46140e2c5df29d061ffea1,Deep Ordinal Regression Network for Monocular Depth Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3f0c51989c516a7c5dee7dec4d7fb474ae6c28d9,Not Afraid of the Dark: NIR-VIS Face Recognition via Cross-Spectral Hallucination and Low-Rank Embedding,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+3f3ce530fe7e75c648b6959980008b0b1f99727a,Multi-Instance Visual-Semantic Embedding,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+3fb26f3abcf0d287243646426cd5ddeee33624d4,Joint Training of Cascaded CNN for Face Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3f9ca2526013e358cd8caeb66a3d7161f5507cbc,Improving Sparse Representation-Based Classification Using Local Principal Component Analysis,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+3f57c3fc2d9d4a230ccb57eed1d4f0b56062d4d5,Face Recognition across Poses Using a Single 3D Reference Model,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+3feb69531653e83d0986a0643e4a6210a088e3e5,Using Group Prior to Identify People in Consumer Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3feb69531653e83d0986a0643e4a6210a088e3e5,Using Group Prior to Identify People in Consumer Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+3f5741b49573122d278d1bff416ec34e1067a75a,A systemic approach to automatic metadata extraction from multimedia content,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+3f5741b49573122d278d1bff416ec34e1067a75a,A systemic approach to automatic metadata extraction from multimedia content,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+3f12701449a82a5e01845001afab3580b92da858,Joint Object Class Sequencing and Trajectory Triangulation (JOST),University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+3fde656343d3fd4223e08e0bc835552bff4bda40,Character Identification Using Graph Matching Algorithm,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+3fee5c6343c969f33a7db4c7f7da1e152effd911,Patterns of fixation during face recognition: Differences in autism across age.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+3fee5c6343c969f33a7db4c7f7da1e152effd911,Patterns of fixation during face recognition: Differences in autism across age.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+3fee5c6343c969f33a7db4c7f7da1e152effd911,Patterns of fixation during face recognition: Differences in autism across age.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+3fb054dbdff35b7ac3940c167e7292c7646e1ad9,Dictionary Learning and Sparse Coding on Statistical Manifolds,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+3ff4784d3f28c87f41c82ed9778c8c919b486cd4,Neural Baby Talk,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+3f9f5a8966c035dc179a60c042b160aee2bf8f53,Deep Second-Order Siamese Network for Pedestrian Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3f9f5a8966c035dc179a60c042b160aee2bf8f53,Deep Second-Order Siamese Network for Pedestrian Re-identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3f957142ef66f2921e7c8c7eadc8e548dccc1327,Merging SVMs with Linear Discriminant Analysis: A Combined Model,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3f957142ef66f2921e7c8c7eadc8e548dccc1327,Merging SVMs with Linear Discriminant Analysis: A Combined Model,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,Intensity-Depth Face Alignment Using Cascade Shape Regression,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3fdfd6fa7a1cc9142de1f53e4ac7c2a7ac64c2e3,Intensity-Depth Face Alignment Using Cascade Shape Regression,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+3faedba96bd6b72c6669bdcb82ae0788cdcb3a43,A Study of Identification Performance of Facial Regions from CCTV Images,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3f540faf85e1f8de6ce04fb37e556700b67e4ad3,Face Verification with Multi-Task and Multi-Scale Feature Fusion,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+3f4ba94a2964e62c52e7f283bea764ac19cffd40,A master-slave approach for object detection and matching with fixed and mobile cameras,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,Robust multi-camera view face recognition,"Dr. B. C. Roy Engineering College, India",Dr. B. C. Roy Engineering College,"Dr. B. C. Roy Engineering College, Lenin Sarani, Durgapur, Bānkurā, West Bengal, 713200, India",23.54409755,87.34269707,edu,
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,Robust multi-camera view face recognition,"National Institute of Technology, Rourkela",National Institute of Technology Rourkela,"National Institute of Technology, inside the department, Koel Nagar, Rourkela, Sundargarh, Odisha, 769002, India",22.25015890,84.90668557,edu,
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,Robust multi-camera view face recognition,Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+3f4bfa4e3655ef392eb5ad609d31c05f29826b45,Robust multi-camera view face recognition,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+3f0f97d8256c6fe22a346bc54f8df67f6f674f22,Through-Wall Human Pose Estimation Using Radio Signals,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+3fb4bf38d34f7f7e5b3df36de2413d34da3e174a,Persuasive Faces: Generating Faces in Advertisements,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+3f2dbb34932edcb69295e57d4b8d6a8f68e28df4,Real-Time Compressive Tracking,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+3f3d0852249ff7924e152efe948d0aee87d4238f,Learning a mixture of sparse distance metrics for classification and dimensionality reduction,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+3f688723fb984bce9b60329f8f1ec3346be7f7e3,"Patterns of eye movements when male and female observers judge female attractiveness, body fat and waist-to-hip ratio",University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+3f688723fb984bce9b60329f8f1ec3346be7f7e3,"Patterns of eye movements when male and female observers judge female attractiveness, body fat and waist-to-hip ratio",Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+3f4607f71888df5b69719cc926e8d07988f82dd9,A socio-technical approach for event detection in security critical infrastructure,Vienna University of Technology,Vienna University of Technology,"TU Wien, Hauptgebäude, Hoftrakt, Freihausviertel, KG Wieden, Wieden, Wien, 1040, Österreich",48.19853965,16.36986168,edu,
+3f623bb0c9c766a5ac612df248f4a59288e4d29f,"Genetic Programming for Region Detection, Feature Extraction, Feature Construction and Classification in Image Data",Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+3f4798c7701da044bdb7feb61ebdbd1d53df5cfe,Vector quantization with constrained likelihood for face recognition,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+3f4c262d836b2867a53eefb959057350bf7219c9,Recognizing Faces under Facial Expression Variations and Partial Occlusions,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+3f4377109a92cf4e422b7e2ae95ef3144323ea72,Bridging the Gap Between Synthetic and Real Data,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3f8b082c10561edd3ffc5d67a3d675cfdff6d94c,Information Bottleneck Learning Using Privileged Information for Visual Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+3f5e8f884e71310d7d5571bd98e5a049b8175075,Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+3f3a483402a3a2b800cf2c86506a37f6ef1a5332,DeepCut: Joint Subset Partition and Labeling for Multi Person Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3f3a483402a3a2b800cf2c86506a37f6ef1a5332,DeepCut: Joint Subset Partition and Labeling for Multi Person Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3f4b67309e6a2a9a1e303fbc0606225df0d3c2ab,Human-Object Interactions Are More than the Sum of Their Parts.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3f4b67309e6a2a9a1e303fbc0606225df0d3c2ab,Human-Object Interactions Are More than the Sum of Their Parts.,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+3fd7bfd90f0dfc3369bfe718e27aff30cf268c23,Learning Mid-level Filters for Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+3f0126f467802562505d5f551dfb6bd138180268,Occluded Pedestrian Detection Through Guided Attention in CNNs,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+3f0126f467802562505d5f551dfb6bd138180268,Occluded Pedestrian Detection Through Guided Attention in CNNs,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+3f5693584d7dab13ffc12122d6ddbf862783028b,Ranking CGANs: Subjective Control over Semantic Image Attributes,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+30193451e552286645baa00db7dcd05780d9e1da,On Available Corpora for Empirical Methods in Vision & Language,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+309dd5555dad9dfc3f3889cf11b5dec8ab797da6,Optimal Scheduling for Asymmetric Multi-core Server Processors,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+30caeca74168cd841759cef951c947f44ef0f547,Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+30caeca74168cd841759cef951c947f44ef0f547,Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+3039627fa612c184228b0bed0a8c03c7f754748c,Robust regression on image manifolds for ordered label denoising,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+30e2b97b06590b7e39e6e53976c5b8265ed7392c,Zero-Shot Event Detection by Multimodal Distributional Semantic Embedding of Videos,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+30d8fbb9345cdf1096635af7d39a9b04af9b72f9,Watching plants grow - a position paper on computer vision and Arabidopsis thaliana,Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.41073580,-4.05295501,edu,
+30654fd93360a339e271d4b194b7f7463b2c5dac,COSTA: Co-Occurrence Statistics for Zero-Shot Classification,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+303a7099c01530fa0beb197eb1305b574168b653,Occlusion-Free Face Alignment: Deep Regression Networks Coupled with De-Corrupt AutoEncoders,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+303a7099c01530fa0beb197eb1305b574168b653,Occlusion-Free Face Alignment: Deep Regression Networks Coupled with De-Corrupt AutoEncoders,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+3027727790598d913a8ff9a1bab4538176ad9fc8,BlinkML : Approximate Machine Learning with Probabilistic Guarantees,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,Deep Cascaded Regression for Face Alignment,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+30cd39388b5c1aae7d8153c0ab9d54b61b474ffe,Deep Cascaded Regression for Face Alignment,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+303517dfc327c3004ae866a6a340f16bab2ee3e3,Using Locality Preserving Projections in Face Recognition,DIT University,DIT UNIVERSITY,"DIT University, Dehradun-Mussoorie Road, Rājpur, Kincraig, Dehra Dūn, Uttarakhand, 248009, India",30.39833960,78.07534550,edu,
+30fd1363fa14965e3ab48a7d6235e4b3516c1da1,A Deep Semi-NMF Model for Learning Hidden Representations,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+30e28beb92239447aff0718119195c0539aa58d8,Data Summarization at Scale: A Two-Stage Submodular Approach,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+303be881f6cd4907c5e357bc1bb5547d8ea1da5a,Individual Differences in the Recognition of Enjoyment Smiles: No Role for Perceptual–Attentional Factors and Autistic-Like Traits,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+303be881f6cd4907c5e357bc1bb5547d8ea1da5a,Individual Differences in the Recognition of Enjoyment Smiles: No Role for Perceptual–Attentional Factors and Autistic-Like Traits,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+305887fe0fce91470c6cb042616cb36486dc0e3b,SelfKin: Self Adjusted Deep Model For Kinship Verification,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+3002b5180c4b4fbf9c07145b5b435846c729c724,Reconstruction of Partially Occluded Face by Fast Recursive PCA,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+30e18a16d4c7092694d55743ff92965e5dec2692,"Hormonal contraceptives, menstrual cycle and brain response to faces.",University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+30e18a16d4c7092694d55743ff92965e5dec2692,"Hormonal contraceptives, menstrual cycle and brain response to faces.",University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+30052dfa6397cf9732a7385dc55f207a0ad24ca4,Energy-Efficient Run-Time Mapping and Thread Partitioning of Concurrent OpenCL Applications on CPU-GPU MPSoCs,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+309acdd149f5f0ea12acb103b36bb59e6e631671,Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+309acdd149f5f0ea12acb103b36bb59e6e631671,Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+309acdd149f5f0ea12acb103b36bb59e6e631671,Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+3026722b4cbe9223eda6ff2822140172e44ed4b1,Jointly estimating demographics and height with a calibrated camera,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+30a3eee5e9302108416f6234d739373dde68d373,Learning to Count Objects in Natural Images for Visual Question Answering,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+30d21b5baf9514d26da749c6683c49b4fa55f2b5,Towards a unified account of face (and maybe object) processing,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,Collaborative Low-Rank Subspace Clustering,Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.06360710,147.35522340,edu,
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,Collaborative Low-Rank Subspace Clustering,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+304b1f14ca6a37552dbfac443f3d5b36dbe1a451,Collaborative Low-Rank Subspace Clustering,Western Sydney University,Western Sydney University,"Western Sydney University, Parramatta City Campus, Smith Street, Parramatta, Sydney, Parramatta, NSW, 2150, Australia",-33.81608480,151.00560034,edu,
+306127c3197eb5544ab1e1bf8279a01e0df26120,Sparse Coding and Dictionary Learning with Linear Dynamical Systems,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+3073eff17368262d7c605bbcaf3b2fb015754d39,Voice conversion versus speaker verification: an overview,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+3073eff17368262d7c605bbcaf3b2fb015754d39,Voice conversion versus speaker verification: an overview,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+30723ada764c6ec186927522d666eaa8eeae35b1,Deep Covariance Descriptors for Facial Expression Recognition,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,Template Adaptation for Face Verification and Identification,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+30a29f6c407749e97bc7c2db5674a62773af9d27,Tracking and Visual Quality Inspection in Harsh Environments (print-version),Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+303225eaedd489f61ac36e1f39cd04db7fd8bd41,Facial Affect Recognition for Cognitive-behavioural Therapy,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+304fa45de90874e89b7a5511c88551994ea8c89d,Charting the typical and atypical development of the social brain.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+302bf028487b50bed33bc6d36971b8ecf06393ab,Landmark Localisation in 3D Face Data,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+30a4637cbc461838c151073b265fb08e00492ff4,Weakly Supervised Object Localization with Progressive Domain Adaptation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+30fcfc6b7fe1809d79ea6ce08f50e2e53c203800,Deep Manifold Learning of Symmetric Positive Definite Matrices with Application to Face Recognition,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+30998485c920f62c307c29c4832b70bbce748eaf,Local Similarity-Aware Deep Feature Embedding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+301662c2a6ed86e48f21c1d24bfc67b403201b0c,Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+301662c2a6ed86e48f21c1d24bfc67b403201b0c,Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+3083d2c6d4f456e01cbb72930dc2207af98a6244,Perceived Age Estimation from Face Images,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+30722904751c2e1cf287f268befdec2e4223b086,Accurate Eye Center Localization via Hierarchical Adaptive Convolution,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+30722904751c2e1cf287f268befdec2e4223b086,Accurate Eye Center Localization via Hierarchical Adaptive Convolution,University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37525010,-4.13927692,edu,
+30def55b6277f1e636dfebe12799b12a1b3f48a2,Recurrent Neural Network for Learning DenseDepth and Ego-Motion from Video,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+30cbd41e997445745b6edd31f2ebcc7533453b61,What Makes a Video a Video : Analyzing Temporal Information in Video Understanding Models and Datasets,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+30a14aabde46aa236a7b437a4942a92d417f3653,Incremental Learning Framework for Indoor Scene Recognition,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+30e547dfab832ea0428b137d9e4824a22d8efd0b,Lazier Than Lazy Greedy,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+30f49d6595359a4a18c728ec83f99346d1e16348,Intact Reflexive but Deficient Voluntary Social Orienting in Autism Spectrum Disorder,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+30f49d6595359a4a18c728ec83f99346d1e16348,Intact Reflexive but Deficient Voluntary Social Orienting in Autism Spectrum Disorder,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+30f49d6595359a4a18c728ec83f99346d1e16348,Intact Reflexive but Deficient Voluntary Social Orienting in Autism Spectrum Disorder,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+301486e8dad7a41a1a99fd6fba28ce153fe1e56e,Are Elephants Bigger than Butterflies? Reasoning about Sizes of Objects,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+3075baf2abc1849d2dc2f1448c272ca2f8b7694d,Learning Segmentation Masks with the Independence Prior,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+3075baf2abc1849d2dc2f1448c272ca2f8b7694d,Learning Segmentation Masks with the Independence Prior,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+3070a1bd503c3767def898bbd50c7eea2bbf29c9,Wider or Deeper: Revisiting the ResNet Model for Visual Recognition,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+5e59193a0fc22a0c37301fb05b198dd96df94266,Example-Based Modeling of Facial Texture from Deficient Data,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+5e8677eb21c3a5d24c52bcb93404416f7eeebc31,Enhancing Probabilistic Appearance-Based Object Tracking with Depth Information: Object Tracking under Occlusion,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+5e7e055ef9ba6e8566a400a8b1c6d8f827099553,On the role of cortex-basal ganglia interactions for category learning: A neuro-computational approach.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+5e74d92d841d1bc1c9c2d80219f98bf892f239c4,Developmental changes in face visual scanning in autism spectrum disorder as assessed by data-based analysis,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+5ee96d5c4d467d00909472e3bc0d2c2d82ccb961,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+5ee96d5c4d467d00909472e3bc0d2c2d82ccb961,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+5ee96d5c4d467d00909472e3bc0d2c2d82ccb961,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+5e84fd5c73dfeea9d51e1cf59ea6f8ecf2097603,Lending A Hand: Detecting Hands and Recognizing Activities in Complex Egocentric Interactions,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+5e84fd5c73dfeea9d51e1cf59ea6f8ecf2097603,Lending A Hand: Detecting Hands and Recognizing Activities in Complex Egocentric Interactions,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+5e09a85527a2c471ce35b21a3b22ae1620c80176,Facial image analysis based on two-dimensional linear discriminant analysis exploiting symmetry,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+5e3c8cd50301a13ad53a3fc9e3567ede63a76215,Learning to Cluster for Proposal-Free Instance Segmentation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+5e3c8cd50301a13ad53a3fc9e3567ede63a76215,Learning to Cluster for Proposal-Free Instance Segmentation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5e3c8cd50301a13ad53a3fc9e3567ede63a76215,Learning to Cluster for Proposal-Free Instance Segmentation,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+5ec7e6b9cf06ab90cca7bda8e7a4b54ecb6859ac,CoDeL: A Human Co-detection and Labeling Framework,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+5e16f10f2d667d17c029622b9278b6b0a206d394,Learning to Rank Binary Codes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+5e16f10f2d667d17c029622b9278b6b0a206d394,Learning to Rank Binary Codes,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+5e16f10f2d667d17c029622b9278b6b0a206d394,Learning to Rank Binary Codes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+5e525d319af3739ccb205a890d0eb8bbed811d20,Learning from Synthetic Data: Addressing Domain Shift for Semantic Segmentation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5e525d319af3739ccb205a890d0eb8bbed811d20,Learning from Synthetic Data: Addressing Domain Shift for Semantic Segmentation,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+5ef3e7a2c8d2876f3c77c5df2bbaea8a777051a7,Rendering or normalization? An analysis of the 3D-aided pose-invariant face recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+5ea165d2bbd305dc125415487ef061bce75dac7d,Efficient human action recognition by luminance field trajectory and geometry information,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+5ed062553280d48a42b688bc63ed3f81f3507dbc,Parallel Distributed Face Search System for National and Border Security,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+5ed062553280d48a42b688bc63ed3f81f3507dbc,Parallel Distributed Face Search System for National and Border Security,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+5ea9cba00f74d2e113a10c484ebe4b5780493964,Automated Drowsiness Detection For Improved Driving Safety,Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.89271590,29.37863323,edu,
+5ea9cba00f74d2e113a10c484ebe4b5780493964,Automated Drowsiness Detection For Improved Driving Safety,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+5e1514de6d20d3b1d148d6925edc89a6c891ce47,Consistent-Aware Deep Learning for Person Re-identification in a Camera Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5e1514de6d20d3b1d148d6925edc89a6c891ce47,Consistent-Aware Deep Learning for Person Re-identification in a Camera Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8,"Overview of ImageCLEF 2018: Challenges, Datasets and Evaluation",University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8,"Overview of ImageCLEF 2018: Challenges, Datasets and Evaluation",Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8,"Overview of ImageCLEF 2018: Challenges, Datasets and Evaluation",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8,"Overview of ImageCLEF 2018: Challenges, Datasets and Evaluation",Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+5e821a4d0e26db7ee41a7f0f25036ba4ec094ac8,"Overview of ImageCLEF 2018: Challenges, Datasets and Evaluation",University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+5ef25b78dc947f1f4674da44945b050e3f4b9e17,3 D Face Recognition Based on Multiple Keypoint Descriptors and Sparse Representation,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+5edfa28559c054b23acc43ce0f975a04ae27b331,Multiple Tree Models for Occlusion and Spatial Constraints in Human Pose Estimation,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+5eec4db50ad8237d881562d036c275d87dd14683,End-to-End Deep Kronecker-Product Matching for Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+5e80e2ffb264b89d1e2c468fbc1b9174f0e27f43,Naming every individual in news video monologues,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5e706ba3d5c7237a580716aacda350b867c85e5f,Predicting Useful Neighborhoods for Lazy Local Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+5e706ba3d5c7237a580716aacda350b867c85e5f,Predicting Useful Neighborhoods for Lazy Local Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+5e0e516226413ea1e973f1a24e2fdedde98e7ec0,The Invariance Hypothesis and the Ventral Stream,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+5e55d9dabe06ee6b4d4b31dfd3723f6016a6c937,Visualization of Automated and Manual Trajectories in Wide-Area Motion Imagery,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+5e07d6951b7bc0c4113313a9586ce8178eacdf57,Learning to Reason: End-to-End Module Networks for Visual Question Answering,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+5e07d6951b7bc0c4113313a9586ce8178eacdf57,Learning to Reason: End-to-End Module Networks for Visual Question Answering,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+5e7cb894307f36651bdd055a85fdf1e182b7db30,A Comparison of Multi-class Support Vector Machine Methods for Face Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5b693cb3bedaa2f1e84161a4261df9b3f8e77353,"Robust Face Localisation Using Motion, Colour and Fusion",Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+5b73b7b335f33cda2d0662a8e9520f357b65f3ac,Intensity Rank Estimation of Facial Expressions Based on a Single Image,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+5b73b7b335f33cda2d0662a8e9520f357b65f3ac,Intensity Rank Estimation of Facial Expressions Based on a Single Image,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+5b6d05ce368e69485cb08dd97903075e7f517aed,Robust Active Shape Model for Landmarking Frontal Faces,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5b0bf1063b694e4b1575bb428edb4f3451d9bf04,Facial Shape Tracking via Spatio-Temporal Cascade Shape Regression,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+5b249cf39370503f22fc7d4b257d735555d647ce,DeepID-Net: Deformable deep convolutional neural networks for object detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+5b59e6b980d2447b2f3042bd811906694e4b0843,Two-stage cascade model for unconstrained face detection,University of Zagreb,"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia","Unska ul. 3, 10000, Zagreb, Croatia",45.80112100,15.97084090,edu,
+5b4abeb466a2c97a99b9621e0c83c95f4326e99b,Adversarial Examples: Attacks and Defenses for Deep Learning,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+5bb53fb36a47b355e9a6962257dd465cd7ad6827,Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+5bb53fb36a47b355e9a6962257dd465cd7ad6827,Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays,North Carolina Central University,North Carolina Central University,"North Carolina Central University, George Street, Hayti, Durham, Durham County, North Carolina, 27707, USA",35.97320905,-78.89755054,edu,
+5b16f0870546cd57a934f2ee039136a09abb96b9,Versatile Auxiliary Regressor with Generative Adversarial network (VAR+GAN),National University of Ireland Galway,National University of Ireland Galway,"National University of Ireland, Galway, Earl's Island, Townparks, Nun's Island, Galway Municipal District, Cathair na Gaillimhe, County Galway, Connacht, H91 F5TE, Ireland",53.27639715,-9.05829961,edu,
+5b24ef13fc9a51a9892f164bc142ffefc0b7a8ee,You said that?,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+5b9dc0f10704b5663c06c7dde2732d4a6076de55,Is Sparsity Really Relevant for Image Classification?,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+5b89744d2ac9021f468b3ffd32edf9c00ed7fed7,Beyond Mahalanobis metric: Cayley-Klein metric learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+5b7cb9b97c425b52b2e6f41ba8028836029c4432,Smooth Representation Clustering,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5b7cb9b97c425b52b2e6f41ba8028836029c4432,Smooth Representation Clustering,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+5b2bc289b607ca1a0634555158464f28fe68a6d3,Where's Waldo: Matching people in images of crowds,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+5b2bc289b607ca1a0634555158464f28fe68a6d3,Where's Waldo: Matching people in images of crowds,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+5b2bc289b607ca1a0634555158464f28fe68a6d3,Where's Waldo: Matching people in images of crowds,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+5b9d41e2985fa815c0f38a2563cca4311ce82954,Exploitation of 3D images for face authentication under pose and illumination variations,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+5bd91c5aa3468d3435ff33d03b3d8348724f96da,3D Human Pose Estimation from Monocular Image Sequences,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+5b6593a6497868a0d19312952d2b753232414c23,Face Recognition by 3D Registration for the Visually Impaired Using a RGB-D Sensor,City College of New York,"The City College of New York, New York, NY 10031, USA","CCNY, 160, Convent Avenue, Manhattanville, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10031, USA",40.81819805,-73.95100898,edu,
+5b6593a6497868a0d19312952d2b753232414c23,Face Recognition by 3D Registration for the Visually Impaired Using a RGB-D Sensor,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+5bf2132de8be99547af4aee6013fec8226c763b5,Deep Predictive Coding Networks for Video Prediction and Unsupervised Learning,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+5bf2132de8be99547af4aee6013fec8226c763b5,Deep Predictive Coding Networks for Video Prediction and Unsupervised Learning,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+5bf2132de8be99547af4aee6013fec8226c763b5,Deep Predictive Coding Networks for Video Prediction and Unsupervised Learning,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+5b9693d2f6b7b731f9abdbfa5c35d641b881daff,Modeling Human Motion Using Manifold Learning and Factorized Generative Models,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+5b70beeadc31ac8421bd9fe54fbe696b90eba1cf,Three-dimensional proxies for hand-drawn characters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+5bfec86bb67a1c49359e8a171917311d48688068,Natural Language Understanding with Distributed Representation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+5b719410e7829c98c074bc2947697fac3b505b64,Active Appearance Models for Affect Recognition Using Facial Expressions,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+5b0008ba87667085912ea474025d2323a14bfc90,SoS-RSC : A Sum-of-Squares Polynomial Approach to Robustifying Subspace Clustering Algorithms,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+5b0008ba87667085912ea474025d2323a14bfc90,SoS-RSC : A Sum-of-Squares Polynomial Approach to Robustifying Subspace Clustering Algorithms,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+5b97e997b9b654373bd129b3baf5b82c2def13d1,3D Face Tracking and Texture Fusion in the Wild,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+5b97e997b9b654373bd129b3baf5b82c2def13d1,3D Face Tracking and Texture Fusion in the Wild,Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682404,edu,
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14,3 D Face Morphable Models “ Inthe-Wild ”,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+5bd3d08335bb4e444a86200c5e9f57fd9d719e14,3 D Face Morphable Models “ Inthe-Wild ”,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+5b79ae0d1fd60d61d1b7e37ffe499f50088554c0,"Semantic Segmentation via Structured Patch Prediction, Context CRF and Guidance CRF",Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+5b79ae0d1fd60d61d1b7e37ffe499f50088554c0,"Semantic Segmentation via Structured Patch Prediction, Context CRF and Guidance CRF",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+5b600cfabfb3c99085ca949fc432684e7ac86471,Representation Independent Analytics Over Structured Data,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+5b600cfabfb3c99085ca949fc432684e7ac86471,Representation Independent Analytics Over Structured Data,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+5b600cfabfb3c99085ca949fc432684e7ac86471,Representation Independent Analytics Over Structured Data,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5bf70c1afdf4c16fd88687b4cf15580fd2f26102,Residual Codean Autoencoder for Facial Attribute Analysis,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+5b4fddc0b86deea2fc139c43ee07892ad211a2dd,Compositional Human Pose Regression,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+5b4b84ce3518c8a14f57f5f95a1d07fb60e58223,Diagnosing Error in Object Detectors,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5b042a76c6e61d411f68b8193ec67ad8dd1abc5e,iSAX 2.0: Indexing and Mining One Billion Time Series,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+5b7cf29f164ec59a15ddb55b4af84ca07231f35d,"The role of features, algorithms and data in visual recognition",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+5bce272adc5bd6934fe31ae3c648b4b62191353d,The Evolution of First Person Vision Methods: A Survey,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+5b7856b7669a5746b7f14d2ae5452aa2dc89d454,Domain-Adaptive Discriminative One-Shot Learning of Gestures,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+5b7856b7669a5746b7f14d2ae5452aa2dc89d454,Domain-Adaptive Discriminative One-Shot Learning of Gestures,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+5b6ecbf5f1eecfe1a9074d31fe2fb030d75d9a79,Improving 3D Face Details Based on Normal Map of Hetero-source Images,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5b86c36e3eb59c347b81125d5dd57dd2a2c377a9,Name Identification of People in News Video by Face Matching,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+5ba8bb7d204e7a5a29a043792546577500e2e5c1,Background Appearance Modeling with Applications to Visual Object Detection in an Open-Pit Mine,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+5ba8bb7d204e7a5a29a043792546577500e2e5c1,Background Appearance Modeling with Applications to Visual Object Detection in an Open-Pit Mine,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,The role of emotion transition for the perception of social dominance and affiliation.,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,The role of emotion transition for the perception of social dominance and affiliation.,Humboldt University,Humboldt University,"Humboldt-Universität zu Berlin, Dorotheenstraße, Spandauer Vorstadt, Mitte, Berlin, 10117, Deutschland",52.51875685,13.39356049,edu,
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,Variational Relevance Vector Machine for Tabular Data,Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.53179777,edu,
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,Variational Relevance Vector Machine for Tabular Data,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+5bde1718253ec28a753a892b0ba82d8e553b6bf3,Variational Relevance Vector Machine for Tabular Data,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+37c8514df89337f34421dc27b86d0eb45b660a5e,Facial Landmark Tracking by Tree-Based Deformable Part Model Based Detector,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+371f40f6d32ece05cc879b6954db408b3d4edaf3,Mining semantic affordances of visual object categories,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+37007af698b990a3ea8592b11d264b14d39c843f,DCMSVM: Distributed parallel training for single-machine multiclass classifiers,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+374a0df2aa63b26737ee89b6c7df01e59b4d8531,Temporal Action Localization with Pyramid of Score Distribution Features,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+378ae5ca649f023003021f5a63e393da3a4e47f0,Multi-class object localization by combining local contextual interactions,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+37619564574856c6184005830deda4310d3ca580,A deep pyramid Deformable Part Model for face detection,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+37ce1d3a6415d6fc1760964e2a04174c24208173,Pose-Invariant 3D Face Alignment,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+372688ad240474724683703e65a02f30e8d293ff,Putting the Scientist in the Loop -- Accelerating Scientific Progress with Interactive Machine Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+372688ad240474724683703e65a02f30e8d293ff,Putting the Scientist in the Loop -- Accelerating Scientific Progress with Interactive Machine Learning,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+372688ad240474724683703e65a02f30e8d293ff,Putting the Scientist in the Loop -- Accelerating Scientific Progress with Interactive Machine Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+3737961f634876f59d4ffd5bbd198bf38b2cfdda,Study the Behavior of Autistic Patients and Analysis of Amygdala Region of Brain to Explore Autism,Jahangirnagar University,Jahangirnagar University,"Jahangirnagar University, 1342, University Main Road, সাভার, সাভার উপজেলা, ঢাকা জেলা, ঢাকা বিভাগ, 1342, বাংলাদেশ",23.88331200,90.26939210,edu,
+373813010983b274401b9b65157df57ce50f7011,"Focus on quality, predicting FRVT 2006 performance",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+373813010983b274401b9b65157df57ce50f7011,"Focus on quality, predicting FRVT 2006 performance",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+373813010983b274401b9b65157df57ce50f7011,"Focus on quality, predicting FRVT 2006 performance",National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+373813010983b274401b9b65157df57ce50f7011,"Focus on quality, predicting FRVT 2006 performance",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+373813010983b274401b9b65157df57ce50f7011,"Focus on quality, predicting FRVT 2006 performance",Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+379aaada226a3629408dbb223c7a7252dcc425b8,Naturalistic Pain Synthesis for Virtual Patients,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+37e1fc37a3ee90f24d85ad6fd3e5c51d3f5ab4fd,Attentive Explanations: Justifying Decisions and Pointing to the Evidence,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+37ba12271d09d219dd1a8283bc0b4659faf3a6c6,Domain transfer for person re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+372fefe66aa693e271ec6298fac1695208f36aee,Face Deidentification with Generative Deep Neural Networks,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+372fefe66aa693e271ec6298fac1695208f36aee,Face Deidentification with Generative Deep Neural Networks,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+372fefe66aa693e271ec6298fac1695208f36aee,Face Deidentification with Generative Deep Neural Networks,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+37b3637dab65b91a5c91bb6a583e69c448823cc1,Learning a Hierarchical Latent-Variable Model of 3D Shapes,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+3759b4fa10eabe047ff417b3076458b44132dc8b,Person-of-interest detection system using cloud-supported computerized-eyewear,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+37bb9b45c6385789b819573b3716fe56a9e627db,Location Augmentation for CNN,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+377f2b65e6a9300448bdccf678cde59449ecd337,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+377f2b65e6a9300448bdccf678cde59449ecd337,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+3714a415e63204e9c331b919cff6a14f7121c902,Improving 3d Face Recognition Model Generation and Biometrics,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+370b6b83c7512419188f5373a962dd3175a56a9b,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+370b6b83c7512419188f5373a962dd3175a56a9b,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+370b6b83c7512419188f5373a962dd3175a56a9b,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+370b6b83c7512419188f5373a962dd3175a56a9b,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+370b6b83c7512419188f5373a962dd3175a56a9b,Face Alignment Refinement via Exploiting Low-Rank property and Temporal Stability,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+37a4199e63312f7901af853998951883e52ab062,Future Localization from an Egocentric Depth Image,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+37aa876f5202d1db6919f0a0dd5a0f76508c02fb,Occlusion-Aware Hand Pose Estimation Using Hierarchical Mixture Density Network,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+37d7cb06c0a1e632dedcc1f23db22cbdc130e6aa,Pyramid Person Matching Network for Person Re-identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+37d7cb06c0a1e632dedcc1f23db22cbdc130e6aa,Pyramid Person Matching Network for Person Re-identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+378b6d94bacffb0fcc1063476a7b9694e877ba12,Scalable Hardware Efficient Deep Spatio-Temporal Inference Networks,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+37ef18d71c1ca71c0a33fc625ef439391926bfbb,Extraction of Subject-Specific Facial Expression Categories and Generation of Facial Expression Feature Space using Self-Mapping,Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.80114990,140.04591160,edu,
+37ef18d71c1ca71c0a33fc625ef439391926bfbb,Extraction of Subject-Specific Facial Expression Categories and Generation of Facial Expression Feature Space using Self-Mapping,Akita University,Akita University,"秋田大学手形キャンパス, 秋田八郎潟線, 手形字扇田, 広面, 秋田市, 秋田県, 東北地方, 010-0864, 日本",39.72781420,140.13322566,edu,
+081189493ca339ca49b1913a12122af8bb431984,Supplemental Material for Photorealistic Facial Texture Inference Using Deep Neural Networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+08ee541925e4f7f376538bc289503dd80399536f,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08ee541925e4f7f376538bc289503dd80399536f,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08ee541925e4f7f376538bc289503dd80399536f,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08ee541925e4f7f376538bc289503dd80399536f,Runtime Neural Pruning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08b28a8f2699501d46d87956cbaa37255000daa3,MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+08b28a8f2699501d46d87956cbaa37255000daa3,MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+081e540e0f3b4741c1f27092f52fef01bb81f06d,Hashing Hyperplane Queries to Near Points with Applications to Large-Scale Active Learning,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+081e540e0f3b4741c1f27092f52fef01bb81f06d,Hashing Hyperplane Queries to Near Points with Applications to Large-Scale Active Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+081e540e0f3b4741c1f27092f52fef01bb81f06d,Hashing Hyperplane Queries to Near Points with Applications to Large-Scale Active Learning,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+08c2fdbe89fda66ec26453c4ea3f190e3e3d794f,A Biophysical 3D Morphable Model of Face Appearance,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+08eaa845a72a2b78e08e58592d8785942fced649,What's in a Question: Using Visual Questions as a Form of Supervision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0824768412cf2c3e9f550025eee06bb34e5f3afd,Latent Data Association: Bayesian Model Selection for Multi-target Tracking,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+0824768412cf2c3e9f550025eee06bb34e5f3afd,Latent Data Association: Bayesian Model Selection for Multi-target Tracking,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+08f6ad0a3e75b715852f825d12b6f28883f5ca05,Face recognition: Some challenges in forensics,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+08aaaf56277f7f9897353a6b09a63ea90b4cc554,Chapter 15 MULTIMEDIA INFORMATION NETWORKS IN SOCIAL MEDIA,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+084bd02d171e36458f108f07265386f22b34a1ae,Face Alignment at 3000 FPS via Regressing Local Binary Features,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+084bd02d171e36458f108f07265386f22b34a1ae,Face Alignment at 3000 FPS via Regressing Local Binary Features,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+081cb09791e7ff33c5d86fd39db00b2f29653fa8,Square Loss based regularized LDA for face recognition using image sets,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+081cb09791e7ff33c5d86fd39db00b2f29653fa8,Square Loss based regularized LDA for face recognition using image sets,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+0816b525b03e47d995b3d97f1f9132a4f7a2cf9d,Gaze-enabled egocentric video summarization via constrained submodular maximization,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+0816b525b03e47d995b3d97f1f9132a4f7a2cf9d,Gaze-enabled egocentric video summarization via constrained submodular maximization,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+087a507075819e5b7ad886fad3097b23470f35f2,Using false colors to protect visual privacy of sensitive content,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+08d2a558ea2deb117dd8066e864612bf2899905b,Person Re-identification with Deep Similarity-Guided Graph Neural Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+08e3a0f80f10fc40cc1c043cbc4c873a76a6f6e8,Enhanced Pavlovian aversive conditioning to positive emotional stimuli.,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+08e3a0f80f10fc40cc1c043cbc4c873a76a6f6e8,Enhanced Pavlovian aversive conditioning to positive emotional stimuli.,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+086131159999d79adf6b31c1e604b18809e70ba8,Deep Action Unit classification using a binned intensity loss and semantic context model,Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.03677740,-75.34202332,edu,
+086131159999d79adf6b31c1e604b18809e70ba8,Deep Action Unit classification using a binned intensity loss and semantic context model,Villanova University,Villanova University,"Villanova University, East Lancaster Avenue, Radnor Township, Delaware County, Pennsylvania, 19010, USA",40.03677740,-75.34202332,edu,
+080d9658e40581c7ba8c0cc1d86d1157eda92a3e,Periocular biometric recognition using image sets,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+089513ca240c6d672c79a46fa94a92cde28bd567,RNN Fisher Vectors for Action Recognition and Image Annotation,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+089513ca240c6d672c79a46fa94a92cde28bd567,RNN Fisher Vectors for Action Recognition and Image Annotation,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+086a91d8db2780a14a21335260e97a9b7b27f546,Iterative object and part transfer for fine-grained recognition,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+08d6aecf1ee531f8c62c22a256b2c2e58081df9d,Blocks That Shout: Distinctive Parts for Scene Classification,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+08d6aecf1ee531f8c62c22a256b2c2e58081df9d,Blocks That Shout: Distinctive Parts for Scene Classification,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+089b5e8eb549723020b908e8eb19479ba39812f5,A Cross Benchmark Assessment of a Deep Convolutional Neural Network for Face Recognition,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+0843ec2b76ef9401e60654fbfe71bac44ed19fae,A Two-Stage Approach for Bag Detection in Pedestrian Images,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08a1fc55d03e4a73cad447e5c9ec79a6630f3e2d,Tom-vs-Pete Classifiers and Identity-Preserving Alignment for Face Verification,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+08e69a871487b52510699c07859b4aaec122d3df,Visual Coreference Resolution in Visual Dialog Using Neural Module Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+08e69a871487b52510699c07859b4aaec122d3df,Visual Coreference Resolution in Visual Dialog Using Neural Module Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+084352b63e98d3b3310521fb3bda8cb4a77a0254,Part-based multiple-person tracking with partial occlusion handling,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+084352b63e98d3b3310521fb3bda8cb4a77a0254,Part-based multiple-person tracking with partial occlusion handling,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+087002ab569e35432cdeb8e63b2c94f1abc53ea9,Spatiotemporal analysis of RGB-D-T facial images for multimodal pain level recognition,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+087002ab569e35432cdeb8e63b2c94f1abc53ea9,Spatiotemporal analysis of RGB-D-T facial images for multimodal pain level recognition,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+08cb294a08365e36dd7ed4167b1fd04f847651a9,Examining visible articulatory features in clear and conversational speech,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+08cb294a08365e36dd7ed4167b1fd04f847651a9,Examining visible articulatory features in clear and conversational speech,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+081286ede247c5789081502a700b378b6223f94b,Neural Correlates of Facial Mimicry: Simultaneous Measurements of EMG and BOLD Responses during Perception of Dynamic Compared to Static Facial Expressions,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+081286ede247c5789081502a700b378b6223f94b,Neural Correlates of Facial Mimicry: Simultaneous Measurements of EMG and BOLD Responses during Perception of Dynamic Compared to Static Facial Expressions,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+081286ede247c5789081502a700b378b6223f94b,Neural Correlates of Facial Mimicry: Simultaneous Measurements of EMG and BOLD Responses during Perception of Dynamic Compared to Static Facial Expressions,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+08e995c080a566fe59884a527b72e13844b6f176,A New KSVM + KFD Model for Improved Classification and Face Recognition,University of Windsor,University of Windsor,"Bridge AA, Ambassador Bridge, Windsor, Essex, Ontario, N9C 2J9, Canada",42.30791465,-83.07176915,edu,
+085ceda1c65caf11762b3452f87660703f914782,Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+08d55271589f989d90a7edce3345f78f2468a7e0,Quality Aware Network for Set to Set Recognition,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+08d55271589f989d90a7edce3345f78f2468a7e0,Quality Aware Network for Set to Set Recognition,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+08d55271589f989d90a7edce3345f78f2468a7e0,Quality Aware Network for Set to Set Recognition,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+08ba1a7d91ce9b4ac26869bfe4bb7c955b0d1a24,Reducing JointBoost-based multiclass classification to proximity search,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+08ba1a7d91ce9b4ac26869bfe4bb7c955b0d1a24,Reducing JointBoost-based multiclass classification to proximity search,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+08a98822739bb8e6b1388c266938e10eaa01d903,SensorSift: balancing sensor data privacy and utility in automated face understanding,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e,A Discriminative Feature Learning Approach for Deep Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+084bebc5c98872e9307cd8e7f571d39ef9c1b81e,A Discriminative Feature Learning Approach for Deep Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+083ac08287af7df220d88dca2fbf5b1812e35ee8,Abnormal functional connectivity in autism spectrum disorders during face processing.,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+083ac08287af7df220d88dca2fbf5b1812e35ee8,Abnormal functional connectivity in autism spectrum disorders during face processing.,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+08bc0dd59187eaf919dfedf1d5849d1a875835df,On-line Hough Forests,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+08bc0dd59187eaf919dfedf1d5849d1a875835df,On-line Hough Forests,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+08f1e9e14775757298afd9039f46ec56e80677f9,Attentional Push: Augmenting Salience with Shared Attention Modeling,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+0862940a255d980d46ef041ab20f153276f96214,3D Object Representations for Fine-Grained Categorization,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0862940a255d980d46ef041ab20f153276f96214,3D Object Representations for Fine-Grained Categorization,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+084cf3858b07d64fc29cb7f0f4dc0653c6246d3d,A tool for fast ground truth generation for object detection and tracking from video,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+08d41d2f68a2bf0091dc373573ca379de9b16385,Recursive Chaining of Reversible Image-to-image Translators For Face Aging,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+08aab46667dbcd875751f1e8ce2daed0df643b12,Query-adaptive late fusion for image search and person re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+08aab46667dbcd875751f1e8ce2daed0df643b12,Query-adaptive late fusion for image search and person re-identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d,Robust Deep Appearance Models,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+6d0fe30444c6f4e4db3ad8b02fb2c87e2b33c58d,Robust Deep Appearance Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6dbdb07ce2991db0f64c785ad31196dfd4dae721,Seeing Small Faces from Robust Anchor's Perspective,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6d7a32f594d46f4087b71e2a2bb66a4b25da5e30,Towards Person Authentication by Fusing Visual and Thermal Face Biometrics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+6d43bac8348a76ca5e3b765ad5b4d8c302c186f1,i-RevNet: Deep Invertible Networks,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+6d2ca1ddacccc8c865112bd1fbf8b931c2ee8e75,ROC speak: semi-automated personalized feedback on nonverbal behavior from recorded videos,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+6d6834a094767356474d34b099a2f042ddb44e69,TripletGAN: Training Generative Model with Triplet Loss,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6d6834a094767356474d34b099a2f042ddb44e69,TripletGAN: Training Generative Model with Triplet Loss,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+6d6834a094767356474d34b099a2f042ddb44e69,TripletGAN: Training Generative Model with Triplet Loss,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+6d6834a094767356474d34b099a2f042ddb44e69,TripletGAN: Training Generative Model with Triplet Loss,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+6d741691b7164b636678340dbb5823e437e1c5a9,"Beyond Controllers Human Segmentation, Pose, and Depth Estimation as Game Input Mechanisms",Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+6dbe18855b85bc6f218c53993cf289e2607518b1,Learning Policies to Forecast Agent Behavior with Visual Data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6dbe18855b85bc6f218c53993cf289e2607518b1,Learning Policies to Forecast Agent Behavior with Visual Data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6d0177bb1cd292a2ad4a14e7b9173fcc8b72569c,Leveraging Textural Features for Recognizing Actions in Low Quality Videos,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+6d1cfdb82122cefbc0f27ee7a02d6a22483d6a05,Static Pose Estimation from Depth Images using Random Regression Forests and Hough Voting,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1,Semi-supervised learning for facial expression recognition,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+6d97e69bbba5d1f5c353f9a514d62aff63bc0fb1,Semi-supervised learning for facial expression recognition,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+6da446b59944db9b3d7412ad0efc6c189812d56a,Facial Expression Recognition Using Depth Information and Spatiotemporal Features,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+6dd3a95bd46e3ab9c3f649a2034bf5ddba19c710,Learning deep representations for semantic image parsing: a comprehensive overview,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+6d3c8c5869b512090b1283fba28f01c2748b0ebc,Recurrent neural networks for object detection in video sequences,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+6d07e176c754ac42773690d4b4919a39df85d7ec,Face Attribute Prediction Using Off-The-Shelf Deep Learning Networks,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf,Webly-Supervised Video Recognition by Mutually Voting for Relevant Web Images and Web Video Frames,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+6dd2a0f9ca8a5fee12edec1485c0699770b4cfdf,Webly-Supervised Video Recognition by Mutually Voting for Relevant Web Images and Web Video Frames,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6d77eef66324951d70d98d6dc99c0e95e5b2fdf6,Modelling Multi-object Activity by Gaussian Processes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+6dff2bcbfad53238d24f4467a9504ce33ecdfa4d,Illumination Normalization for Color Face Images,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+6d60c427036e63957f1ce72930146964c5743749,Deep Convolutional Neural Networks for Smile Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+6de1299a192fdb852846e3cfa4a428b8fe81523f,Learning Inverse Mapping by AutoEncoder Based Generative Adversarial Nets,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+6dc1f94b852538d572e4919238ddb10e2ee449a4,Objects as context for detecting their semantic parts,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+6dbe76f51091ca6a626a62846a946ce687c3dbe8,INCREMENTAL OBJECT MATCHING WITH PROBABILISTIC METHODS Doctoral dissertation,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+6d4559883ffb8cc611644dce9f1422a98139a7eb,Preserving Semantic Relations for Zero-Shot Learning,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+6d4e3616d0b27957c4107ae877dc0dd4504b69ab,Unsupervised Learning using Sequential Verification for Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6d5125c9407c7762620eeea7570af1a8ee7d76f3,Video Frame Interpolation by Plug-and-Play Deep Locally Linear Embedding,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+6d8e3f3a83514381f890ab7cd2a1f1c5be597b69,Improving Text Recognition in Images of Natural Scenes,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+6d8e3f3a83514381f890ab7cd2a1f1c5be597b69,Improving Text Recognition in Images of Natural Scenes,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19,Understanding Representations and Reducing their Redundancy in Deep Networks,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+6d0b4f5b3391463376c013a6c00d76daf38da578,"A Simple, Fast Diverse Decoding Algorithm for Neural Generation",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6dc784e98680f417d8dd1a78a417b8ce803ec143,Deep Adaptive Attention for Joint Facial Action Unit Detection and Face Alignment,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+6dc784e98680f417d8dd1a78a417b8ce803ec143,Deep Adaptive Attention for Joint Facial Action Unit Detection and Face Alignment,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+6dc784e98680f417d8dd1a78a417b8ce803ec143,Deep Adaptive Attention for Joint Facial Action Unit Detection and Face Alignment,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+6d5e12ee5d75d5f8c04a196dd94173f96dc8603f,"Learning a similarity metric discriminatively, with application to face verification",Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+6d5e12ee5d75d5f8c04a196dd94173f96dc8603f,"Learning a similarity metric discriminatively, with application to face verification",New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+0159b548d04a21938f066adc44bd7ca95bcb226b,Spectral Clustering with a Convex Regularizer on Millions of Images,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+016800413ebd1a87730a5cf828e197f43a08f4b3,Learning Attributes Equals Multi-Source Domain Generalization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+016800413ebd1a87730a5cf828e197f43a08f4b3,Learning Attributes Equals Multi-Source Domain Generalization,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+0136d9114d62aaedcfbb50ed9594d18e10424179,Learning Visual Models of Social Engagement,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+01239e3c4dd6b7b271df08c17398ceb260979ced,A Signal Processing Approach To Malware Analysis,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+01c840aa27a6c234c0e55e9a5874719bb4d8fbe3,Probabilistic Label Relation Graphs with Ising Models,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+01c9dc5c677aaa980f92c4680229db482d5860db,Temporal Action Detection Using a Statistical Language Model,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+013909077ad843eb6df7a3e8e290cfd5575999d2,A Semi-automatic Methodology for Facial Landmark Annotation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+013909077ad843eb6df7a3e8e290cfd5575999d2,A Semi-automatic Methodology for Facial Landmark Annotation,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+013909077ad843eb6df7a3e8e290cfd5575999d2,A Semi-automatic Methodology for Facial Landmark Annotation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+01c7a778cde86ad1b89909ea809d55230e569390,A Supervised Low-Rank Method for Learning Invariant Subspaces,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+01085ce83c6e8781f3d59bf8fb6a2f14c7fda9d6,Nearest neighbor based collection OCR,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+0149b14428de816bd62f80bbfd89238b765edaf7,Classification via Minimum Incremental Coding Length (MICL),"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+01576b5fe525d8dee025fd3776337d74dacdf224,Regression using Gaussian Process manifold kernel dimensionality reduction,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+0115f260069e2e501850a14845feb400142e2443,"An On-Line Handwriting Recognizer with Fisher Matching, Hypotheses Propagation Network and Context Constraint Models",New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+013b72b0941eec78c6a23bb8e94b9447793b7833,Head pose estimation and its application in TV viewers' behavior analysis,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+013b72b0941eec78c6a23bb8e94b9447793b7833,Head pose estimation and its application in TV viewers' behavior analysis,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+01cc8a712e67384f9ef9f30580b7415bfd71e980,Failing to ignore: paradoxical neural effects of perceptual load on early attentional selection in normal aging.,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+01cc8a712e67384f9ef9f30580b7415bfd71e980,Failing to ignore: paradoxical neural effects of perceptual load on early attentional selection in normal aging.,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+013af5a24ec62b000d00d86c1a504573c0f35a3e,Image-guided Non-local Dense Matching with Three-steps Optimization,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+01dc1e03f39901e212bdf291209b7686266aeb13,Actionness Estimation Using Hybrid Fully Convolutional Networks,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+01dc1e03f39901e212bdf291209b7686266aeb13,Actionness Estimation Using Hybrid Fully Convolutional Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+016f49a54b79ec787e701cc8c7d0280273f9b1ef,Self Organizing Maps for Reducing the Number of Clusters by One on Simplex Subspaces,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+01ae6eac1b235dc2057773d5e0bb7b08d7dda7aa,Depth Sweep Regression Forests for Estimating 3D Human Pose from Images,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+01ae6eac1b235dc2057773d5e0bb7b08d7dda7aa,Depth Sweep Regression Forests for Estimating 3D Human Pose from Images,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+01853c864e7eaf0c61cdb2315681224d6a14bde4,Discovering human interactions in videos with limited data labeling,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+01853c864e7eaf0c61cdb2315681224d6a14bde4,Discovering human interactions in videos with limited data labeling,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0108504305468275985da608b77dbbbe4aee34c7,An efficient branch-and-bound algorithm for optimal human pose estimation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+016eb7b32d1fdec0899151fb03799378bf59bbe5,Point Linking Network for Object Detection,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+01a078cd25b7ce1049efc07bd754980771150775,Visual object detection by parts-based modeling using extended histogram of gradients,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+01a31f75c9c3296cf3cb45b7bad97acb300b7459,Part-Aligned Bilinear Representations for Person Re-identification,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+01a31f75c9c3296cf3cb45b7bad97acb300b7459,Part-Aligned Bilinear Representations for Person Re-identification,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+01a8c7335f0d9321c95d6a57f2dd9f128735f1d7,One-Step Spectral Clustering via Dynamically Learning Affinity Matrix and Subspace,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric CNN Regression,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+01125e3c68edb420b8d884ff53fb38d9fbe4f2b8,Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric CNN Regression,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+01c09acf0c046296643de4c8b55a9330e9c8a419,Manifold Learning Using Euclidean -nearest Neighbor Graphs,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+01d23cbac762b0e46251f5dbde08f49f2d13b9f8,Combining Face Verification Experts,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+014143aa16604ec3f334c1407ceaa496d2ed726e,Large-scale manifold learning,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+016b1080c108718fc59e58e47b4867baebd57d8e,Cepstral Methods for Image Feature Extraction,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+014e249422b6bd6ff32b3f7d385b5a0e8c4c9fcf,Attention driven person re-identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+014e249422b6bd6ff32b3f7d385b5a0e8c4c9fcf,Attention driven person re-identification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+0182d090478be67241392df90212d6cd0fb659e6,Discovering localized attributes for fine-grained recognition,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+0182d090478be67241392df90212d6cd0fb659e6,Discovering localized attributes for fine-grained recognition,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+0106a2f6251dc9ffc90709c6f0d9b54c1e82326b,Applying scattering operators for face recognition: A comparative study,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+0106a2f6251dc9ffc90709c6f0d9b54c1e82326b,Applying scattering operators for face recognition: A comparative study,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+01d2b2bffd1c6d77398cfe7011d4cbd3a0bc7fd1,A Dual-Source Approach for 3D Pose Estimation from a Single Image,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+01d2b2bffd1c6d77398cfe7011d4cbd3a0bc7fd1,A Dual-Source Approach for 3D Pose Estimation from a Single Image,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+016a8ed8f6ba49bc669dbd44de4ff31a79963078,Face relighting for face recognition under generic illumination,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+015a5fde8c9c89ae9ae8349183018acb8f0e741f,Egocentric Height Estimation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+015a5fde8c9c89ae9ae8349183018acb8f0e741f,Egocentric Height Estimation,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+019c5cb085dbbc8a0fc78645e385aa4e0b468fb8,Continuous Learning of Context-dependent Processing in Neural Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+019c5cb085dbbc8a0fc78645e385aa4e0b468fb8,Continuous Learning of Context-dependent Processing in Neural Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+011d461718c39c9d196cb84b2e881c1660ef8f55,Context Aware Active Learning of Activity Recognition Models,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+0141c695e4cf87cc58e0d552004bcb53258c4915,Lying Pose Recognition for Elderly Fall Detection,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+0141c695e4cf87cc58e0d552004bcb53258c4915,Lying Pose Recognition for Elderly Fall Detection,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+0162e84bbe995ec06e8e59dd9023c67d8f0e8880,Learning to Hash with Partial Tags: Exploring Correlation between Tags and Hashing Bits for Large Scale Image Retrieval,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+016dd886d5cb01c55a0204e2988274cf9417b564,Strong Appearance and Expressive Spatial Models for Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+01018a509f32601e1bbf7f0159aad1a513e23f92,Computers in the Human Interaction Loop,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+012e281061126caf2e2c94ca6ba0116c8a8930fb,Human Body Detection and Safety Care System for a Flying Robot,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+01424c2510fbca67c3cb016ac919f6a58e37541f,2D Human Pose Estimation in TV Shows,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+015df3b57e44b8ddc51c87e5255fa4940bd91963,DSFD: Dual Shot Face Detector,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+01a903739564f575b81c87f7a9e2cb7b609f7ada,Image retrieval using scene graphs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0139eb62a87649bf7d259542b5afc6be121b094b,Unsupervised Feature Selection Using Nonnegative Spectral Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0139eb62a87649bf7d259542b5afc6be121b094b,Unsupervised Feature Selection Using Nonnegative Spectral Analysis,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+014043cd53e4faf203e8938f1f32cc494bb414af,Domain Adaptive Subspace Clustering,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+01c687f0cb8c8e1002376f834c9b43b4b653a52f,Beyond shape: incorporating color invariance into a biologically inspired feedforward model of category recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+01c687f0cb8c8e1002376f834c9b43b4b653a52f,Beyond shape: incorporating color invariance into a biologically inspired feedforward model of category recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+01c687f0cb8c8e1002376f834c9b43b4b653a52f,Beyond shape: incorporating color invariance into a biologically inspired feedforward model of category recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+01c687f0cb8c8e1002376f834c9b43b4b653a52f,Beyond shape: incorporating color invariance into a biologically inspired feedforward model of category recognition,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+01dfd60c0851c4e5a99176e99aa369e1b5f606b7,Disentangled Variational Representation for Heterogeneous Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+01dfd60c0851c4e5a99176e99aa369e1b5f606b7,Disentangled Variational Representation for Heterogeneous Face Recognition,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+01efec88d36070dc3bc49f341a77476f74d373bc,Generation and Comprehension of Unambiguous Object Descriptions,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+01efec88d36070dc3bc49f341a77476f74d373bc,Generation and Comprehension of Unambiguous Object Descriptions,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+015d24c1a8621bcb6b6beac3c4d5a34af5589ec6,Classification and feature selection with human performance data,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+01aad32349489cabfcb619024b297d8f854e9d1f,From visual attributes to adjectives through decompositional distributional semantics,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+019e471667c72b5b3728b4a9ba9fe301a7426fb2,Cross-age face verification by coordinating with cross-face age verification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+064a79968f593d17934c1cd14def70aac56aecb9,Pose Transferrable Person Re-Identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+067126ce1f1a205f98e33db7a3b77b7aec7fb45a,On Improving Dissimilarity-Based Classifications Using a Statistical Similarity Measure,Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.23810230,127.19034310,edu,
+067126ce1f1a205f98e33db7a3b77b7aec7fb45a,On Improving Dissimilarity-Based Classifications Using a Statistical Similarity Measure,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+06466276c4955257b15eff78ebc576662100f740,Where is who: large-scale photo retrieval by facial attributes and canvas layout,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+06096a9927d45ff82eed34e6b3d6c8fbdc397756,Image In painter Mask Generator Object Classifier Real / Fake ? Is there a person ?,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+06b4522433beca98aea99f924fbaeb8f861df8cd,Selection and combination of local Gabor classifiers for robust face verification,Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.08688410,29.04413167,edu,
+06b4522433beca98aea99f924fbaeb8f861df8cd,Selection and combination of local Gabor classifiers for robust face verification,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+06b4522433beca98aea99f924fbaeb8f861df8cd,Selection and combination of local Gabor classifiers for robust face verification,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+06f585a3a05dd3371cd600a40dc35500e2f82f9b,Better and Faster: Knowledge Transfer from Multiple Self-supervised Learning Tasks via Graph Distillation for Video Classification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+06f8aa1f436a33014e9883153b93581eea8c5c70,Leaving Some Stones Unturned: Dynamic Feature Prioritization for Activity Detection in Streaming Video,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+065ffca373469c95db28891889289d79e873e2a2,Ensemble Methods for Robust 3D Face Recognition Using Commodity Depth Sensors,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+06aec820d7d4b15f8c49ac4b8246377015693abd,Content Based Image Retrieval Using Signature Representation,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+06b6606e47e071bbe070093c78120207578126fd,Total Moving Face Reconstruction,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+06ba3492e3a9a2e98df2c81b91ec94787e3f97fb,"VQA-E: Explaining, Elaborating, and Enhancing Your Answers for Visual Questions",University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+061c84a4143e859a7caf6e6d283dfb30c23ee56e,DEEP-CARVING: Discovering visual attributes by carving deep neural nets,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+061e29eae705f318eee703b9e17dc0989547ba0c,Enhancing Expression Recognition in the Wild with Unlabeled Reference Data,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0612832338287cd6569cad32f147bed6df134223,A Comparative Study of Real-time Semantic Segmentation for Autonomous Driving,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+0612832338287cd6569cad32f147bed6df134223,A Comparative Study of Real-time Semantic Segmentation for Autonomous Driving,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+067a40d9fe0942abfc8a31342a95f165a88ca5d6,Optimal Neighborhood Preserving Visualization by Maximum Satisfiability,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+067a40d9fe0942abfc8a31342a95f165a88ca5d6,Optimal Neighborhood Preserving Visualization by Maximum Satisfiability,University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.77920678,edu,
+067a40d9fe0942abfc8a31342a95f165a88ca5d6,Optimal Neighborhood Preserving Visualization by Maximum Satisfiability,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+067a40d9fe0942abfc8a31342a95f165a88ca5d6,Optimal Neighborhood Preserving Visualization by Maximum Satisfiability,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+06850b60e33baa4ea9473811d58c0d5015da079e,A Survey of the Trends in Facial and Expression Recognition Databases and Methods,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+06850b60e33baa4ea9473811d58c0d5015da079e,A Survey of the Trends in Facial and Expression Recognition Databases and Methods,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+06a6347ac14fd0c6bb3ad8190cbe9cdfa5d59efc,Active image clustering: Seeking constraints from humans to complement algorithms,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+06554235c2c9361a14c0569206b58a355a63f01b,Zero-Shot Learning Through Cross-Modal Transfer,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+06eafdc0b281edb8ab4d65012da5d0c94b55970b,Face Recognition with Disparity Corrected Gabor Phase Differences,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+0687e472b5accce40299a6dd109c38e4167fea94,Learning Image Representations for Efficient Recognition of Novel Classes,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+06890d068a7fb82fa78443038ad26ca7623f7a98,Socially-Aware Large-Scale Crowd Forecasting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+06890d068a7fb82fa78443038ad26ca7623f7a98,Socially-Aware Large-Scale Crowd Forecasting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+06cf3f55b2bd959d9228d29e1aa3e71ba7cece94,Features for image retrieval: an experimental comparison,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+0630b3677323c8c987f16f37545ac6073293de8d,Enhancement and stylization of photographs by Vladimir Leonid,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+06bad0cdda63e3fd054e7b334a5d8a46d8542817,Sharing features between objects and their attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+06bad0cdda63e3fd054e7b334a5d8a46d8542817,Sharing features between objects and their attributes,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+06ca83cc7def5b0d582f4d933057c4370a6345d7,Training Object Class Detectors with Click Supervision,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+061d303381266e1ee751f5b7551d25324c043bed,Parametric Image Segmentation of Humans with Structural Shape Priors,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+0672976bb2c3b4bde4381f28bf4bbdeeabd3a22e,Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+06d656c53b17ad7c4ca6345d19cbca271d93ef02,Social and Egocentric Image Classification for Scientific and Privacy Applications,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+0641dbee7202d07b6c78a39eecd312c17607412e,Null space clustering with applications to motion segmentation and face clustering,"Australian National University, Canberra","Australian National University, Canberra","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331,edu,
+06400a24526dd9d131dfc1459fce5e5189b7baec,Event Recognition in Photo Collections with a Stopwatch HMM,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+062d67af7677db086ef35186dc936b4511f155d7,They are Not Equally Reliable: Semantic Event Search Using Differentiated Concept Classifiers,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+062d67af7677db086ef35186dc936b4511f155d7,They are Not Equally Reliable: Semantic Event Search Using Differentiated Concept Classifiers,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+06c2086f7f72536bf970ca629151b16927104df3,Recurrent CNN for 3D Gaze Estimation using Appearance and Shape Cues,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+0608313884bea3c286d6cf95ccf9bbff4c77c9f5,Discovering Groups of People in Images,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+0608313884bea3c286d6cf95ccf9bbff4c77c9f5,Discovering Groups of People in Images,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+066476a38f8751696f5f7b47c0fb7f1d8ecdac1a,Automatic adaptation of a generic pedestrian detector to a specific traffic scene,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+063f78c20405158d87114a8aef1bb7557230bd89,An improved deep learning architecture for person re-identification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+060ea3f72ee63d909600caad168cb26b4777b19e,Fusion of Likelihood Ratio Classifier with ICP-based Matcher for 3D Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+060034b59275c13746413ca9c67d6304cba50da6,Ordered Trajectories for Large Scale Human Action Recognition,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+060034b59275c13746413ca9c67d6304cba50da6,Ordered Trajectories for Large Scale Human Action Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+0611dae4ae932e0c5f28f08676d234dd9233732f,Challenge of multi-camera tracking,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+06f2df0ec9ab6968411e34f581dd8f5d40500d7f,The fusiform face area: a cortical region specialized for the perception of faces.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+06f2df0ec9ab6968411e34f581dd8f5d40500d7f,The fusiform face area: a cortical region specialized for the perception of faces.,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+063146e2b400cad120d41371a024de319eb67c05,Automatic recognition by gait: progress and prospects,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+06a632adff4f89e8ccb001bfffa1b8a558015938,BubbleView: An Interface for Crowdsourcing Image Importance Maps and Tracking Visual Attention,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+06a632adff4f89e8ccb001bfffa1b8a558015938,BubbleView: An Interface for Crowdsourcing Image Importance Maps and Tracking Visual Attention,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+06a632adff4f89e8ccb001bfffa1b8a558015938,BubbleView: An Interface for Crowdsourcing Image Importance Maps and Tracking Visual Attention,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+061ffd3967540424ac4e4066f4a605d8318bab90,Dirichlet-Based Histogram Feature Transform for Image Classification,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+061bba574c7c2ef0ba9de91afc4fcab70feddd4f,Paying Attention to Descriptions Generated by Image Captioning Models,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+061bba574c7c2ef0ba9de91afc4fcab70feddd4f,Paying Attention to Descriptions Generated by Image Captioning Models,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+064cd41d323441209ce1484a9bba02a22b625088,Selective Transfer Machine for Personalized Facial Action Unit Detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+064cd41d323441209ce1484a9bba02a22b625088,Selective Transfer Machine for Personalized Facial Action Unit Detection,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+0670849811a6ba4fbfcbe11126b811dd94e06e66,Robust Metric and Alignment for Profile-Based Face Recognition: An Experimental Comparison,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+0688c0568f3ab418719260d443cc0d86c3af2914,Curriculum Domain Adaptation for Semantic Segmentation of Urban Scenes,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+06625b0c5747ccb8524fec9f44e4a8aa1ecc2151,Nuclear Norm Based Matrix Regression with Applications to Face Recognition with Occlusion and Illumination Changes,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+06c8fcb0429afd3aee153ba42e1fd8aa93f7214f,Social roles in hierarchical models for human activity recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+06c2dfe1568266ad99368fc75edf79585e29095f,Bayesian Active Appearance Models,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+06f39834e870278243dda826658319be2d5d8ded,Recognizing unseen actions in a domain-adapted embedding space,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+06325345f9ffef958d9d7c704b28e6cbb3021b8c,Price theory based power management for heterogeneous multi-cores,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0657da204bbd5f1e92882b2ccdf4f883659ccd37,Predicting Deep Zero-Shot Convolutional Neural Networks Using Textual Descriptions,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+06a6976d9a3deac30b0a571d31f85c11ae4eb8ad,A Novel Human Detection Approach Based on Depth Map via Kinect,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+06a6976d9a3deac30b0a571d31f85c11ae4eb8ad,A Novel Human Detection Approach Based on Depth Map via Kinect,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+0691b9cd1b9b44bff297a62277be514ede9df01d,Inferring semantic concepts from community-contributed images and noisy tags,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+0691b9cd1b9b44bff297a62277be514ede9df01d,Inferring semantic concepts from community-contributed images and noisy tags,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+06d7ef72fae1be206070b9119fb6b61ce4699587,On One-Shot Similarity Kernels: Explicit Feature Maps and Properties,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+06d7ef72fae1be206070b9119fb6b61ce4699587,On One-Shot Similarity Kernels: Explicit Feature Maps and Properties,University of Patras,University of Patras,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.28994820,21.78864690,edu,
+06d7ef72fae1be206070b9119fb6b61ce4699587,On One-Shot Similarity Kernels: Explicit Feature Maps and Properties,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+06cb7c6601b7ee0d89cccd5311dcda9e5316e02d,A system for large-scale analysis of distributed cameras,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+062d0813815c2b9864cd9bb4f5a1dc2c580e0d90,Encouraging LSTMs to Anticipate Actions Very Early,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+06a9ed612c8da85cb0ebb17fbe87f5a137541603,Deep Learning of Player Trajectory Representations for Team Activity Analysis,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+6ca21247f5963f6d459e09278812d60c35d10335,Appearance-Based Gaze Estimation via Evaluation-Guided Asymmetric Regression,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+6ca21247f5963f6d459e09278812d60c35d10335,Appearance-Based Gaze Estimation via Evaluation-Guided Asymmetric Regression,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+6ca21247f5963f6d459e09278812d60c35d10335,Appearance-Based Gaze Estimation via Evaluation-Guided Asymmetric Regression,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+6ce111fdfb7ebc8f1fe23ceaf859f7be799d5c91,Activity understanding and unusual event detection in surveillance videos,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+6c66ae815e7e508e852ecb122fb796abbcda16a8,Expression Recognition Databases and Methods,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+6ce1a8240b0eba18c40136370e143209dec4a5a7,Predicting Future Instance Segmentation by Forecasting Convolutional Features,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+6c3b2fd0cb23ddb6ed707d6c9986a78d6b76bf43,Interleaved Group Convolutions for Deep Neural Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+6c3b2fd0cb23ddb6ed707d6c9986a78d6b76bf43,Interleaved Group Convolutions for Deep Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+6ca2c5ff41e91c34696f84291a458d1312d15bf2,LipNet: Sentence-level Lipreading,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+6c06ad0b4b7c981089b5a8037d5b9f9e5b928196,Image Retrieval with Structured Object Queries Using Latent Ranking SVM,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+6c06ad0b4b7c981089b5a8037d5b9f9e5b928196,Image Retrieval with Structured Object Queries Using Latent Ranking SVM,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,Multi-Task Learning with Low Rank Attribute Embedding for Person Re-Identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,Multi-Task Learning with Low Rank Attribute Embedding for Person Re-Identification,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+6c690af9701f35cd3c2f6c8d160b8891ad85822a,Multi-Task Learning with Low Rank Attribute Embedding for Person Re-Identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+6c5fbf156ef9fc782be0089309074cc52617b868,Controllable Video Generation with Sparse Trajectories,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+6ce23cf4f440021b7b05aa3c1c2700cc7560b557,Learning Local Convolutional Features for Face Recognition with 2D-Warping,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+6c0e0c3e66622023c64c664c3411a6fe1c87d5c5,Efficient Fine-Grained Classification and Part Localization Using One Compact Network,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+6c0e0c3e66622023c64c664c3411a6fe1c87d5c5,Efficient Fine-Grained Classification and Part Localization Using One Compact Network,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+6c0e0c3e66622023c64c664c3411a6fe1c87d5c5,Efficient Fine-Grained Classification and Part Localization Using One Compact Network,SRI International,SRI International,"SRI International Building, West 1st Street, Menlo Park, San Mateo County, California, 94025, USA",37.45857960,-122.17560525,edu,
+6c80c834d426f0bc4acd6355b1946b71b50cbc0b,Pose-Based Two-Stream Relational Networks for Action Recognition in Videos,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+6c032a08fba885960e531a02641d121b81cb7c32,Representation Of Objects In A Volumetric Frequency Domain With Application To Face Recognition,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+6c78add400f749c897dc3eb93996eda1c796e91c,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+6c78add400f749c897dc3eb93996eda1c796e91c,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+6c78add400f749c897dc3eb93996eda1c796e91c,Enhanced Random Forest with Image/Patch-Level Learning for Image Understanding,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+6ca45b402a204351691c6f12a84cba3be1c5fd56,An overview of content-based image retrieval techniques ( CBIR ),Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+6cf9322009fb8f36c01fc54d213e9cd745e62468,Semi-supervised distance metric learning for person re-identification,Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.70027525,edu,
+6c388fc4503636245fd464a05a9f843b303ad79a,Cross camera people counting with perspective estimation and occlusion handling,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+6c388fc4503636245fd464a05a9f843b303ad79a,Cross camera people counting with perspective estimation and occlusion handling,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+6c97af4c5d9908c288626d833818d7095f635765,Multi-Task Learning Improves Disease Models fromWeb Search,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6c97af4c5d9908c288626d833818d7095f635765,Multi-Task Learning Improves Disease Models fromWeb Search,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6c97af4c5d9908c288626d833818d7095f635765,Multi-Task Learning Improves Disease Models fromWeb Search,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+6c26743e131a67b25738beffcee05da6af5d87d9,First Person Action Recognition Using Deep Learned Descriptors,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+6c4e173fdafa89ac7b40e1dddf953dcc833db92d,Photometric Normalization for Face Recognition using Local discrete cosine Transform,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+6c9ed3378dd53a5ad9e30613ba2e1ef363bd1f9d,Atoms of recognition in human and computer vision.,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+6c9ed3378dd53a5ad9e30613ba2e1ef363bd1f9d,Atoms of recognition in human and computer vision.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+6c6bb85a08b0bdc50cf8f98408d790ccdb418798,Recognition of facial expressions in presence of partial occlusion,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+6c705285c554985ecfe1117e854e1fe1323f8c21,DIY Human Action Data Set Generation,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+6caa275cc502513550bde0a32416a3b32470161b,Sparse shape modelling for 3D face analysis,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+6c7a36efbe07ab295ddebc60c834cf74ec30ba50,Group Consistent Similarity Learning via Deep CRF for Person Re-Identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+6c7a36efbe07ab295ddebc60c834cf74ec30ba50,Group Consistent Similarity Learning via Deep CRF for Person Re-Identification,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+6c182ec9bd85cf61b01c90955c81d71926b0198a,A Deeply-Recursive Convolutional Network For Crowd Counting,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+6c182ec9bd85cf61b01c90955c81d71926b0198a,A Deeply-Recursive Convolutional Network For Crowd Counting,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+6c70cad229cf3f02d3d490b42c7bd92c6eade1d1,Towards Good Practices on Building Effective CNN Baseline Model for Person Re-identification,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+6c61e496afd6577aa330b1f48ad0cec1d35b32d0,Deep Interactive Evolution,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+6c61e496afd6577aa330b1f48ad0cec1d35b32d0,Deep Interactive Evolution,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+6c61e496afd6577aa330b1f48ad0cec1d35b32d0,Deep Interactive Evolution,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+3920a205990abc7883c70cc96a0410a2d056c2a8,Fast Object Segmentation in Unconstrained Video,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+3920a205990abc7883c70cc96a0410a2d056c2a8,Fast Object Segmentation in Unconstrained Video,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+397f572e759aed28ffd4deb2d3acf18c991e8cf9,Associative Embeddings for Large-Scale Knowledge Transfer with Self-Assessment,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+397f343180dc4c9f40c1c706217956126a09d157,Face-Cap: Image Captioning using Facial Expression Analysis,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+391af839051826ec317a6ea61010734baf536551,Question-Guided Hybrid Convolution for Visual Question Answering,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+393a62cab9e2a1cc82c1663fdbbf1aefb781c36b,Chained Predictions Using Convolutional Neural Networks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+390cc673792dbf47939f621aef5bb774ca01dc46,Isotropic Granularity-tunable gradients partition (IGGP) descriptors for human detection,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+396aacab076a3607429f58ce442d5d57b5aaa794,Semantic Instance Annotation of Street Scenes by 3D to 2D Label Transfer,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+3991223b1dc3b87883cec7af97cf56534178f74a,A unified framework for context assisted face clustering,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+3900fb44902396f94fb070be41199b4beecc9081,Bottom-Up Top-Down Cues for Weakly-Supervised Semantic Segmentation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+396a19e29853f31736ca171a3f40c506ef418a9f,Real World Real-time Automatic Recognition of Facial Expressions,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+39f48090df19dd0122590ef839226f8b2bcbe609,The MPI Emotional Body Expressions Database for Narrative Scenarios,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+39f48090df19dd0122590ef839226f8b2bcbe609,The MPI Emotional Body Expressions Database for Narrative Scenarios,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+392d35bb359a3b61cca1360272a65690a97a2b3f,Multi-Task Transfer Methods to Improve One-Shot Learning for Multimedia Event Detection,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+398dbeafe5c96b90a243d408b1280524be5bbab2,VGAN-Based Image Representation Learning for Privacy-Preserving Facial Expression Recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+398dbeafe5c96b90a243d408b1280524be5bbab2,VGAN-Based Image Representation Learning for Privacy-Preserving Facial Expression Recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+398dbeafe5c96b90a243d408b1280524be5bbab2,VGAN-Based Image Representation Learning for Privacy-Preserving Facial Expression Recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+39c48309b930396a5a8903fdfe781d3e40d415d0,Learning Spatial and Temporal Cues for Multi-Label Facial Action Unit Detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+39c48309b930396a5a8903fdfe781d3e40d415d0,Learning Spatial and Temporal Cues for Multi-Label Facial Action Unit Detection,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+39742f9b3a9f7adefbe936de68249148576b90da,Alcohol and remembering a hypothetical sexual assault: Can people who were under the influence of alcohol during the event provide accurate testimony?,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+39bbe9885ad1e12e79bc620d83f7768d2fc04994,Autism is characterized by dorsal anterior cingulate hyperactivation during social target detection.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+39b741be40e093f92519cd15cd2deb6e114d6200,Joint Probabilistic Data Association Revisited,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+39b741be40e093f92519cd15cd2deb6e114d6200,Joint Probabilistic Data Association Revisited,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+3947fe473d8cfa443ea4cf6571d2aebe7b2066b6,Evolutionary Architecture Search For Deep Multitask Networks,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+39c254cd706b9fb89b369b41b1c4d3949cb554f8,DNA-GAN: Learning Disentangled Representations from Multi-Attribute Images,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,Deep Joint Face Hallucination and Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,Deep Joint Face Hallucination and Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,Deep Joint Face Hallucination and Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+39f525f3a0475e6bbfbe781ae3a74aca5b401125,Deep Joint Face Hallucination and Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+3946b8f862ecae64582ef0912ca2aa6d3f6f84dc,Who and Where: People and Location Co-Clustering,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+3933416f88c36023a0cba63940eb92f5cef8001a,Learning Robust Subspace Clustering,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+39c118c4f3c02daf7edcf207dfc690814967e8e8,Simultaneous alignment and clustering for an image ensemble,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+39150acac6ce7fba56d54248f9c0badbfaeef0ea,"Digital Signal Processing for in - Vehicle and mobile systems , Istanbul , Turkey , June 2007 . MACHINE LEARNING SYSTEMS FOR DETECTING DRIVER DROWSINESS",Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.89271590,29.37863323,edu,
+39e55283e6eb3f0f9db07cf1b20e0de8d5aac10e,Diverse Image Captioning via GroupTalk,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+39c54d22a0f29605f96ab57720cde8c6aa743c10,Human Instance Segmentation from Video using Detector-based Conditional Random Fields,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+39c54d22a0f29605f96ab57720cde8c6aa743c10,Human Instance Segmentation from Video using Detector-based Conditional Random Fields,Oxford University,Oxford University,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK",51.75208490,-1.25166460,edu,
+39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc,Simultaneous Local Binary Feature Learning and Encoding for Face Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+39f03d1dfd94e6f06c1565d7d1bb14ab0eee03bc,Simultaneous Local Binary Feature Learning and Encoding for Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+3983370efe7a7521bde255017171724d845b3383,Learning Discriminators as Energy Networks in Adversarial Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3983370efe7a7521bde255017171724d845b3383,Learning Discriminators as Energy Networks in Adversarial Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+3983370efe7a7521bde255017171724d845b3383,Learning Discriminators as Energy Networks in Adversarial Learning,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+3983370efe7a7521bde255017171724d845b3383,Learning Discriminators as Energy Networks in Adversarial Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+395e007cf11dd3082d059b8c96dcffae628ffb4f,Learning-Based Incremental Creation of Web Image Databases,Alexandria University,Alexandria University,"جامعة الإسكندرية, الكورنيش, إبراهيمية, الإسكندرية, 21522, مصر",31.21051105,29.91314562,edu,
+394bf41cd8578ec10cd34452c688c3e3de1c16a7,Multi-view to Novel View: Synthesizing Novel Views With Self-learned Confidence,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+397349476582198639abc7a8b933e350cbc24c37,2D&3D-ComFusFace: 2D and 3D Face Recognition by Scalable Fusion of Common Features,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+397349476582198639abc7a8b933e350cbc24c37,2D&3D-ComFusFace: 2D and 3D Face Recognition by Scalable Fusion of Common Features,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+3991c704ef1030c5bfead2b58463d39842b52985,Can Facial Uniqueness be Inferred from Impostor Scores?,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+3983637022992a329f1d721bed246ae76bc934f7,Wide-baseline stereo for face recognition with large pose variation,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+39ecdbad173e45964ffe589b9ced9f1ebfe2d44e,Automatic recognition of lower facial action units,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+99e9ae76ee720314a90968be5f889d233c67054c,A Window to Your Smartphone: Exploring Interaction and Communication in Immersive VR with Augmented Virtuality,Memorial University of Newfoundland,Memorial University of Newfoundland,"Memorial University of Newfoundland, Overpass, St. John's, Newfoundland and Labrador, A1B 5S7, Canada",47.57272510,-52.73305444,edu,
+995495e36f4a2af999875ea4f197ca98c5e5c8de,Dynamic Task Prioritization for Multitask Learning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+999289b0ef76c4c6daa16a4f42df056bf3d68377,The Role of Color and Contrast in Facial Age Estimation,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+999289b0ef76c4c6daa16a4f42df056bf3d68377,The Role of Color and Contrast in Facial Age Estimation,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+99911a8effd2ab3af4b4ba802920f3e1720a83e6,Integral Human Pose Regression,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+99911a8effd2ab3af4b4ba802920f3e1720a83e6,Integral Human Pose Regression,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+99911a8effd2ab3af4b4ba802920f3e1720a83e6,Integral Human Pose Regression,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+9941a52ef4db2eb338eec061a950af6a95f82510,Encoding Neuroanatomical Information using Weighted Spherical Harmonic Representation,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+99c69fe118efbc47efc91ceaa3b2e711405eef20,Scale-Adaptive Convolutions for Scene Parsing,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+99c69fe118efbc47efc91ceaa3b2e711405eef20,Scale-Adaptive Convolutions for Scene Parsing,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+999cdddf1ca23e4a72028d2a88537cf4a7aa9396,Hyperprior Induced Unsupervised Disentanglement of Latent Representations,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+9954f7ee5288724184f9420e39cca9165efa6822,Estimation of object functions using deformable part model,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9954f7ee5288724184f9420e39cca9165efa6822,Estimation of object functions using deformable part model,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9954f7ee5288724184f9420e39cca9165efa6822,Estimation of object functions using deformable part model,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+9937a4d3fa66c0eea48b2090b5a9b6c51a1cce66,Human Pose Estimation Using Global and Local Normalization,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+9937a4d3fa66c0eea48b2090b5a9b6c51a1cce66,Human Pose Estimation Using Global and Local Normalization,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+9937a4d3fa66c0eea48b2090b5a9b6c51a1cce66,Human Pose Estimation Using Global and Local Normalization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+995d55fdf5b6fe7fb630c93a424700d4bc566104,The One Triangle Three Parallelograms Sampling Strategy and Its Application in Shape Regression,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+993d189548e8702b1cb0b02603ef02656802c92b,Highly-Economized Multi-View Binary Compression for Scalable Image Clustering,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+993d189548e8702b1cb0b02603ef02656802c92b,Highly-Economized Multi-View Binary Compression for Scalable Image Clustering,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+992eca71ee8314ede9bf680b6966730f6bb77bc5,Likability’s Effect on Interpersonal Motor Coordination: Exploring Natural Gaze Direction,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+992eca71ee8314ede9bf680b6966730f6bb77bc5,Likability’s Effect on Interpersonal Motor Coordination: Exploring Natural Gaze Direction,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+99b41df501f25f4aee9c1f94a75510b2fbcc6bed,Title Impaired social brain network for processing dynamic facialexpressions in autism spectrum disorders,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+99bc96eea249e28b3e741fbe15757a38d52631bc,Streaming Behaviour : Live Streaming as a Paradigm for Multiview Analysis of Emotional and Social Signals,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+99bc96eea249e28b3e741fbe15757a38d52631bc,Streaming Behaviour : Live Streaming as a Paradigm for Multiview Analysis of Emotional and Social Signals,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+99bc96eea249e28b3e741fbe15757a38d52631bc,Streaming Behaviour : Live Streaming as a Paradigm for Multiview Analysis of Emotional and Social Signals,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+99bc96eea249e28b3e741fbe15757a38d52631bc,Streaming Behaviour : Live Streaming as a Paradigm for Multiview Analysis of Emotional and Social Signals,"London, United Kingdom","London, United Kingdom","London, Greater London, England, SW1A 2DU, UK",51.50732190,-0.12764740,edu,
+9952d6630a2fcadf34e356de07ebd2254651c95e,Dual-Feature Bayesian MAP Classification: Exploiting Temporal Information for Video-Based Face Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+9952d6630a2fcadf34e356de07ebd2254651c95e,Dual-Feature Bayesian MAP Classification: Exploiting Temporal Information for Video-Based Face Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+9947687ffe0bd2d6cd4fe717e534cfcb59302a4e,Data-driven photographic style using local transfer,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+994b52bf884c71a28b4f5be4eda6baaacad1beee,Categorizing Big Video Data on the Web: Challenges and Opportunities,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+99783c792947f17e41c94ddaac31766277809049,Switching Convolutional Neural Network for Crowd Counting,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+995dd15671993b2165860c54bf5acbbe421c5f45,Learning a Context Aware Dictionary for Sparse Representation,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+99029377dac51a3f60063f61cdc5471866c348be,Making Third Person Techniques Recognize First-Person Actions in Egocentric Videos,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+99001ac9fdaf7649c0d0bd8d2078719bafd216d9,General Tensor Discriminant Analysis and Gabor Features for Gait Recognition,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+99001ac9fdaf7649c0d0bd8d2078719bafd216d9,General Tensor Discriminant Analysis and Gabor Features for Gait Recognition,University of Vermont,University of Vermont,"University of Vermont, Colchester Avenue, Burlington, Chittenden County, Vermont, 05401, USA",44.48116865,-73.20021790,edu,
+992b93ab9d016640551a8cebcaf4757288154f32,Nested Pictorial Structures,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+9901f473aeea177a55e58bac8fd4f1b086e575a4,Human and sheep facial landmarks localisation by triplet interpolated features,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+990bf0171deae7f788f4867c155a276fca5c891a,An Overview of First Person Vision and Egocentric Video Analysis for Personal Mobile Wearable Devices,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+99facca6fc50cc30f13b7b6dd49ace24bc94f702,VIPLFaceNet: an open source deep face recognition SDK,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+99facca6fc50cc30f13b7b6dd49ace24bc94f702,VIPLFaceNet: an open source deep face recognition SDK,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+99eddbd03e39c86260e282c7a0993617710d5cb1,An Adversarial Neuro-Tensorial Approach For Learning Disentangled Representations,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+99eddbd03e39c86260e282c7a0993617710d5cb1,An Adversarial Neuro-Tensorial Approach For Learning Disentangled Representations,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+99daa2839213f904e279aec7cef26c1dfb768c43,DocFace: Matching ID Document Photos to Selfies,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+523854a7d8755e944bd50217c14481fe1329a969,A Differentially Private Kernel Two-Sample Test,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+523854a7d8755e944bd50217c14481fe1329a969,A Differentially Private Kernel Two-Sample Test,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+52d9477a8293d44b0f8be5c07d56d468d035b0b0,The Power of Randomization: Distributed Submodular Maximization on Massive Datasets,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+52d9477a8293d44b0f8be5c07d56d468d035b0b0,The Power of Randomization: Distributed Submodular Maximization on Massive Datasets,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+52d9477a8293d44b0f8be5c07d56d468d035b0b0,The Power of Randomization: Distributed Submodular Maximization on Massive Datasets,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,Action Recognition with Coarse-to-Fine Deep Feature Integration and Asynchronous Fusion,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,Action Recognition with Coarse-to-Fine Deep Feature Integration and Asynchronous Fusion,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+521cfbc1949289a7ffc3ff90af7c55adeb43db2a,Action Recognition with Coarse-to-Fine Deep Feature Integration and Asynchronous Fusion,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+529073b49fb28e8919d6862f2ae445477c7337bd,Low-Rank Embedded Ensemble Semantic Dictionary for Zero-Shot Learning,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+529073b49fb28e8919d6862f2ae445477c7337bd,Low-Rank Embedded Ensemble Semantic Dictionary for Zero-Shot Learning,University of Massachusetts Dartmouth,University of Massachusetts Dartmouth,"University of Massachusetts Dartmouth, University Ring Road, Dartmouth, Bristol County, Massachusetts, 02747, USA",41.62772475,-71.00724501,edu,
+529073b49fb28e8919d6862f2ae445477c7337bd,Low-Rank Embedded Ensemble Semantic Dictionary for Zero-Shot Learning,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+520d586b50ecaa9753f714c6e76e6b819663d1a4,On the Dimensionality Reduction for Sparse Representation Based Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+525eb080b158a492bfd02b421891c7383303dac5,Learning Deep Models for Face Anti-Spoofing: Binary or Auxiliary Supervision,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+5288e1e7e914f73bf65c745f328844907226cd3e,Learning Deep Binary Descriptor with Multi-quantization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+5288e1e7e914f73bf65c745f328844907226cd3e,Learning Deep Binary Descriptor with Multi-quantization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+520796fed11df39bba7ea03844f4f465a6bf0655,"Investigation of Multimodal Features, Classifiers and Fusion Methods for Emotion Recognition",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+520796fed11df39bba7ea03844f4f465a6bf0655,"Investigation of Multimodal Features, Classifiers and Fusion Methods for Emotion Recognition",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+520796fed11df39bba7ea03844f4f465a6bf0655,"Investigation of Multimodal Features, Classifiers and Fusion Methods for Emotion Recognition",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+528c03761682f73eed7d736c19551856fe92b1e1,"Uncovering Interactions and Interactors: Joint Estimation of Head, Body Orientation and F-Formations from Surveillance Videos","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+528c03761682f73eed7d736c19551856fe92b1e1,"Uncovering Interactions and Interactors: Joint Estimation of Head, Body Orientation and F-Formations from Surveillance Videos",University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26224210,-123.24500520,edu,
+528c03761682f73eed7d736c19551856fe92b1e1,"Uncovering Interactions and Interactors: Joint Estimation of Head, Body Orientation and F-Formations from Surveillance Videos","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+5253c94f955146ba7d3566196e49fe2edea1c8f4,Internet Based Morphable Model,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+529b1f33aed49dbe025a99ac1d211c777ad881ec,Fast and exact bi-directional fitting of active appearance models,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+529b1f33aed49dbe025a99ac1d211c777ad881ec,Fast and exact bi-directional fitting of active appearance models,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+523b2cbc48decfabffb66ecaeced4fe6a6f2ac78,Photorealistic facial expression synthesis by the conditional difference adversarial autoencoder,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+52472ec859131844f38fc7d57944778f01d109ac,Improving Speaker Turn Embedding by Crossmodal Transfer Learning from Face Embedding,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+52c89ca39a9fcad716e1e43c0bd4e40101c15d64,Robust Face Recognition via Sparse Representation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+52c89ca39a9fcad716e1e43c0bd4e40101c15d64,Robust Face Recognition via Sparse Representation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+5287d8fef49b80b8d500583c07e935c7f9798933,Generative Adversarial Text to Image Synthesis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+5287d8fef49b80b8d500583c07e935c7f9798933,Generative Adversarial Text to Image Synthesis,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+529b933b0dc9c657cf829fd9bb7ff7c47d5e6d19,Integrating crowd simulation for pedestrian tracking in a multi-camera system,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,Structural similarity and distance in learning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,Structural similarity and distance in learning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+52c59f9f4993c8248dd3d2d28a4946f1068bcbbe,Structural similarity and distance in learning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+52bf00df3b970e017e4e2f8079202460f1c0e1bd,Learning High-level Prior with Convolutional Neural Networks for Semantic Segmentation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+52c91fcf996af72d191520d659af44e310f86ef9,Interactive Image Search with Attribute-based Guidance and Personalization,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+52f8eb239997d9a324d4794529c60522db8d08bf,Learning Multi-scale Block Local Binary Patterns for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+52572058f015761f2113aa25a341c607a286fca4,Real-Time Simultaneous Pose and Shape Estimation for Articulated Objects Using a Single Depth Camera,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+52885fa403efbab5ef21274282edd98b9ca70cbf,Discriminant Graph Structures for Facial Expression Recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+52885fa403efbab5ef21274282edd98b9ca70cbf,Discriminant Graph Structures for Facial Expression Recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+527dd9647c474490ac33ac5b0a19aa76b226610d,Intact perception but abnormal orientation towards face-like objects in young children with ASD,University of Toulouse,University of Toulouse,"Toulouse, Lake Charles, Calcasieu Parish, Louisiana, 70605, USA",30.17818160,-93.23605810,edu,
+5284e9d84ef74683c306314e7a79786438514c90,Exploring the Long Tail of Social Media Tags,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+52a152b985b298be4b382d0b6045e31f43850c6f,Rank Persistence: Assessing the Temporal Performance of Real-World Person Re-Identification,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+52b0104a43f55c5652001c06dfabfc4c327018bf,Hybrid-Indexing Multi-type Features for Large-Scale Image Search,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+52b0104a43f55c5652001c06dfabfc4c327018bf,Hybrid-Indexing Multi-type Features for Large-Scale Image Search,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+52bfa4a8b3e3b8e0c0031ae53caddb4c067c04e3,Procrustean Normal Distribution for Non-Rigid Structure from Motion,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+52049fb96156729ce0ad88f86fa617ecf7d237e1,Book chapter for Machine Learning for Human Motion Analysis: Theory and Practice,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+52049fb96156729ce0ad88f86fa617ecf7d237e1,Book chapter for Machine Learning for Human Motion Analysis: Theory and Practice,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+52d7eb0fbc3522434c13cc247549f74bb9609c5d,WIDER FACE: A Face Detection Benchmark,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+528069963f0bd0861f380f53270c96c269a3ea1c,4D (3D Dynamic) statistical models of conversational expressions and the synthesis of highly-realistic 4D facial expression sequences,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+52bc0f02e34ed1e2ce1f77d8f07aea2b87813e89,Face and Eye Detection for Person Authentication in Mobile Phones,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+523303e477d3b5f27373047c576b9b6dbe478f8d,Everyday Eye Contact Detection Using Unsupervised Gaze Target Discovery,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+5240941af3b263609acaa168f96e1decdb0b3fe4,Action classification in still images using human eye movements,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+5257c447f9c50ee8bb2011fb72f8bd40bc0291d8,Automatic gait recognition using area-based metrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+556b9aaf1bc15c928718bc46322d70c691111158,Exploiting qualitative domain knowledge for learning Bayesian network parameters with incomplete data,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+55e4cf29055d1556baf72cd17d2bdb692c8554c0,Do deep features retrieve X?: A tool for quick inspection of deep visual similarities,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+55089f9bc858ae7e9addf30502ac11be4347c05a,A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+55089f9bc858ae7e9addf30502ac11be4347c05a,A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+55089f9bc858ae7e9addf30502ac11be4347c05a,A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+55089f9bc858ae7e9addf30502ac11be4347c05a,A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+554f10d7b8933e9590551a4f891d034b9b8e8642,Learning Individualized Facial Expressions in an Avatar with PSO and Tabu Search,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+553ffb04c193eedde286c944f4816d46248d9822,"Hi, magic closet, tell me what to wear!",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f,Efficient and Effective Solutions for Video Classification,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+558fc9a2bce3d3993a9c1f41b6c7f290cefcf92f,Efficient and Effective Solutions for Video Classification,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+55138c2b127ebdcc508503112bf1d1eeb5395604,Ensemble Nystrom Method,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+55af4918d6b20d13c58c482d7e31e17db53c6ab5,When Fashion Meets Big Data: Discriminative Mining of Best Selling Clothing Features,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+55af4918d6b20d13c58c482d7e31e17db53c6ab5,When Fashion Meets Big Data: Discriminative Mining of Best Selling Clothing Features,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+55bb6235eaec0459183b5442f46501d29b824a9b,Re-identification of persons in multi-camera surveillance under varying viewpoints and illumination,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+55e18e0dde592258882134d2dceeb86122b366ab,Training a Multilingual Sportscaster: Using Perceptual Context to Learn Language,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+55206f0b5f57ce17358999145506cd01e570358c,O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+55d0eaf7393bb6bd0483c98894f16269d275c2bd,MMGAN: Manifold Matching Generative Adversarial Network,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+55eb5691479268718627a39237fadbe649b34ecc,Bayesian Optimization with an Empirical Hardness Model for approximate Nearest Neighbour Search,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+55f2626b7250b3b24dd0d2bab3ef3c3bbd9b3758,Answering Image Riddles using Vision and Reasoning through Probabilistic Soft Logic,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+55f2626b7250b3b24dd0d2bab3ef3c3bbd9b3758,Answering Image Riddles using Vision and Reasoning through Probabilistic Soft Logic,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+5583a131ab89aef81cee3e60d32160685c24d694,Gabor-feature-based local generic representation for face recognition with single sample per person,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+5583a131ab89aef81cee3e60d32160685c24d694,Gabor-feature-based local generic representation for face recognition with single sample per person,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+5583a131ab89aef81cee3e60d32160685c24d694,Gabor-feature-based local generic representation for face recognition with single sample per person,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+55966926e7c28b1eee1c7eb7a0b11b10605a1af0,Surpassing Human-Level Face Verification Performance on LFW with GaussianFace,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+552c55c71bccfc6de7ce1343a1cd12208e9a63b3,Accurate eye center location and tracking using isophote curvature,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+555f5ef266335af8189714297ccbcd6ab77d83f2,Marginalized CNN: Learning Deep Invariant Representations,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+5517b28795d7a68777c9f3b2b46845dcdb425b2c,Deep video gesture recognition using illumination invariants,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+55e87050b998eb0a8f0b16163ef5a28f984b01fa,Can you Find a Face in a HEVC Bitstream?,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+55bc7abcef8266d76667896bbc652d081d00f797,Impact of facial cosmetics on automatic gender and age estimation algorithms,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+55bc7abcef8266d76667896bbc652d081d00f797,Impact of facial cosmetics on automatic gender and age estimation algorithms,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+55b4b1168c734eeb42882082bd131206dbfedd5b,Learning to Align from Scratch,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+55b4b1168c734eeb42882082bd131206dbfedd5b,Learning to Align from Scratch,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+550c8162757b9fb649efab8529d86daa99700fb1,Athlete Pose Estimation by a Global-Local Network,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+555f75077a02f33a05841f9b63a1388ec5fbcba5,A Survey on Periocular Biometrics Research,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+55804f85613b8584d5002a5b0ddfe86b0d0e3325,Data Complexity in Machine Learning,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+557890cef6e9285909904fa141ccddddc0da90dd,Target Identity-aware Network Flow for online multiple target tracking,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+557890cef6e9285909904fa141ccddddc0da90dd,Target Identity-aware Network Flow for online multiple target tracking,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+55c0113534c62b7f3f238210cf501b42d91cc33a,Hand Keypoint Detection in Single Images Using Multiview Bootstrapping,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,CAS(ME)2: A Database of Spontaneous Macro-expressions and Micro-expressions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,CAS(ME)2: A Database of Spontaneous Macro-expressions and Micro-expressions,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+55eb7ec9b9740f6c69d6e62062a24bfa091bbb0c,CAS(ME)2: A Database of Spontaneous Macro-expressions and Micro-expressions,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+55b9b1c1c5487f5f62b44340104a9c4cc2ed7c96,The Color of the Cat is Gray: 1 Million Full-Sentences Visual Question Answering (FSVQA),University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+558540d73fec6fd3856fe0695ad8d9c0b5fe1773,Type-hover-swipe in 96 bytes: a motion sensing mechanical keyboard,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+55249d73df3c38aca08f45a60ff54d9ac8b678a0,General Regression and Representation Model for Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+97463b5a0fef72a576367f55d46aa3eb7576ae01,Methodical Analysis of Western-Caucasian and East-Asian Basic Facial Expressions of Emotions Based on Specific Facial Regions,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+97d3708dfcae89cbcbd260029601f2c1de4d7017,Semantic Localisation via Globally Unique Instance Segmentation,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+97ffadd639eb27d73b86fd5520d9d6b81772b891,Deep Generative Models for Distribution-Preserving Lossy Compression,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+970b4d2ed1249af97cdf2fffdc7b4beae458db89,HMDB: A large video database for human motion recognition,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+970b4d2ed1249af97cdf2fffdc7b4beae458db89,HMDB: A large video database for human motion recognition,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+97b3185d948c45a00a190ce0a26abd23e77c9edf,Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+9788b491ddc188941dadf441fc143a4075bff764,LOGAN: Membership Inference Attacks Against Generative Models∗,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+977beecdf0b5c3487d03738cff501c79770f0858,"Show, Reward and Tell: Automatic Generation of Narrative Paragraph from Photo Stream by Adversarial Training",Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+977beecdf0b5c3487d03738cff501c79770f0858,"Show, Reward and Tell: Automatic Generation of Narrative Paragraph from Photo Stream by Adversarial Training",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+974b9f0af3af675c092b96e7ac68e391cffdcf49,Person Re-identification Using Data-Driven Metric Adaptation,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+974b9f0af3af675c092b96e7ac68e391cffdcf49,Person Re-identification Using Data-Driven Metric Adaptation,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+97711a255ead64265fe3736ce8a2392ef5c75ff0,Cross-Dataset Person Re-identification Using Similarity Preserved Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+970c0d6c0fd2ebe7c5921a45bc70f6345c844ff3,Discriminative Log-Euclidean Feature Learning for Sparse Representation-Based Recognition of Faces from Videos,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+97f665219a42faa8e625625257cc35f5dcbaf0ba,Multi-View Pose and Facial Expression Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+9716416a15e79a36e3481bcdad79cdc905603e6d,Gaussian Word Embedding with a Wasserstein Distance Loss,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+9716416a15e79a36e3481bcdad79cdc905603e6d,Gaussian Word Embedding with a Wasserstein Distance Loss,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+974cadd15684c96618d04f845794cec5568a86a6,Greedy Inference Algorithms for Structured and Neural Models,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+97b930a4fa4670a609b6ee8811409090fe55b313,Integrating Gaze Tracking and Head-Motion Prediction for Mobile Device Authentication: A Proof of Concept,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+97b930a4fa4670a609b6ee8811409090fe55b313,Integrating Gaze Tracking and Head-Motion Prediction for Mobile Device Authentication: A Proof of Concept,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+97137d5154a9f22a5d9ecc32e8e2b95d07a5a571,Facial expression recognition based on local region specific features and support vector machines,Korea Electronics Technology Institute,Korea Electronics Technology Institute,"South Korea, Gyeonggi-do, Seongnam-si, Bundang-gu, 새나리로 25 (야탑동) KETI 전자부품연구원",37.40391700,127.15978600,edu,
+9730b9cd998c0a549601c554221a596deda8af5b,Spatio-Temporal Person Retrieval via Natural Language Queries,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+97265d64859e06900c11ae5bb5f03f3bd265f858,Multilabel Image Classification With Regional Latent Semantic Dependencies,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+97265d64859e06900c11ae5bb5f03f3bd265f858,Multilabel Image Classification With Regional Latent Semantic Dependencies,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+97265d64859e06900c11ae5bb5f03f3bd265f858,Multilabel Image Classification With Regional Latent Semantic Dependencies,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+978a219e07daa046244821b341631c41f91daccd,Emotional Intelligence: Giving Computers Effective Emotional Skills to Aid Interaction,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+976e0264bb57786952a987d4456850e274714fb8,Improving Semantic Concept Detection through the Dictionary of Visually-Distinct Elements,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+9758f3fd94239a8d974217fe12599f88fb413f3d,UC-HCC Submission to Thumos 2014,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+97e569159d5658760eb00ca9cb662e6882d2ab0e,Correlation Filters for Object Alignment,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+97e569159d5658760eb00ca9cb662e6882d2ab0e,Correlation Filters for Object Alignment,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+97e569159d5658760eb00ca9cb662e6882d2ab0e,Correlation Filters for Object Alignment,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+9772ccb519268f067da7707fc199ad942ac63c42,New approaches of ensemble learning and transfer learning for image classificaion,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+97295e92dfe49f37de65c5130097ccab84cfe2f7,Inner Space Preserving Generative Pose Machine,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+9785429538389146c8061ec856e74e957a246f2d,DARI: Distance Metric and Representation Integration for Person Verification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+975978ee6a32383d6f4f026b944099e7739e5890,Privacy-Preserving Age Estimation for Content Rating,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+975978ee6a32383d6f4f026b944099e7739e5890,Privacy-Preserving Age Estimation for Content Rating,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+975978ee6a32383d6f4f026b944099e7739e5890,Privacy-Preserving Age Estimation for Content Rating,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+975978ee6a32383d6f4f026b944099e7739e5890,Privacy-Preserving Age Estimation for Content Rating,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+975978ee6a32383d6f4f026b944099e7739e5890,Privacy-Preserving Age Estimation for Content Rating,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+97304c55262bbd9354aa78d2f52eb73d0a13c9ff,Deep Disentangled Representations for Volumetric Reconstruction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+975af82c9ce82a1fad760d58ba0a661217689aa9,Answerer in Questioner's Mind for Goal-Oriented Visual Dialogue,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+635e5b6219a655b73f47ae74751ae43577d22da6,Label Denoising Adversarial Network (LDAN) for Inverse Lighting of Face Images,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+63da9079437f6090b44eec60ec3986c25c13be73,Top down saliency estimation via superpixel-based discriminative dictionaries,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+63d8110ac76f57b3ba8a5947bc6bdbb86f25a342,On Modeling Variations for Face Authentication,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+635e3ce6fb0b28f38fb77f25770911bf08f0ff03,Face-to-face interference in typical and atypical development,Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+635e3ce6fb0b28f38fb77f25770911bf08f0ff03,Face-to-face interference in typical and atypical development,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+638e04272c312d64337b14f001529084f2c40bef,Modeling Naive Psychology of Characters in Simple Commonsense Stories,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+638e04272c312d64337b14f001529084f2c40bef,Modeling Naive Psychology of Characters in Simple Commonsense Stories,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+632b24ddd42fda4aebc5a8af3ec44f7fd3ecdc6c,Real-Time Facial Segmentation and Performance Capture from RGB Input,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+63b20102be65bbb3453152e504e79c2af2eb9059,"Automatic Discovery, Association Estimation and Learning of Semantic Attributes for a Thousand Categories",Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+63fd747875052931aed46a37c6da7d7ebb7768ec,Venues in Social Media: Examining Ambiance Perception Through Scene Semantics,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+63fd747875052931aed46a37c6da7d7ebb7768ec,Venues in Social Media: Examining Ambiance Perception Through Scene Semantics,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+63660c50e2669a5115c2379e622549d8ed79be00,Deep Salient Object Detection by Integrating Multi-level Cues,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+63660c50e2669a5115c2379e622549d8ed79be00,Deep Salient Object Detection by Integrating Multi-level Cues,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+634e02d6107529d672cbbdf5b97990966e289829,Cost-Effective Training of Deep CNNs with Active Model Adaptation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+630af2eb466fac956f9a43bf877be0eae6d80027,CariGANs: Unpaired Photo-to-Caricature Translation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+630af2eb466fac956f9a43bf877be0eae6d80027,CariGANs: Unpaired Photo-to-Caricature Translation,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+630af2eb466fac956f9a43bf877be0eae6d80027,CariGANs: Unpaired Photo-to-Caricature Translation,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+637de43801f26fab8f567787485c57ab92273ce5,Mask-aware Photorealistic Face Attribute Manipulation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+637de43801f26fab8f567787485c57ab92273ce5,Mask-aware Photorealistic Face Attribute Manipulation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+6324fada2fb00bd55e7ff594cf1c41c918813030,Uncertainty Reduction for Active Image Clustering via a Hybrid Global-Local Uncertainty Model,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+6308e9c991125ee6734baa3ec93c697211237df8,Learning the sparse representation for classification,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+6345eef2ffe46da6d77d07446c1329da7ea00f45,Trajectory Ensemble: Multiple Persons Consensus Tracking Across Non-overlapping Multiple Cameras over Randomly Dropped Camera Networks,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+6342a4c54835c1e14159495373ab18b4233d2d9b,Towards Pose-robust Face Recognition on Video,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+6355f7fd956466e8e9f09b297e6cdd155d66740e,EgoReID: Cross-view Self-Identification and Human Re-identification in Egocentric and Surveillance Videos,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+63c7c8dff73ec6798e38ed7466a4f8ff8a87f879,Learning to Generate Images of Outdoor Scenes from Attributes and Semantic Layouts,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+63f367d50b248680138cb4b3aec3143fad3a7112,Ordinal Random Forests for Object Detection,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+63b29886577a37032c7e32d8899a6f69b11a90de,Image-Set Based Face Recognition Using Boosted Global and Local Principal Angles,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+632029daf2a667cb87cd3078a853d68412ea6896,Clustering-Based Joint Feature Selection for Semantic Attribute Prediction,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+63a6c256ec2cf2e0e0c9a43a085f5bc94af84265,Complexity of multiverse networks and their multilayer generalization,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+63213d080a43660ac59ea12e3c35e6953f6d7ce8,ActionVLAD: Learning Spatio-Temporal Aggregation for Action Classification,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,Facial Action Unit Detection by Cascade of Tasks,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,Facial Action Unit Detection by Cascade of Tasks,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+630d1728435a529d0b0bfecb0e7e335f8ea2596d,Facial Action Unit Detection by Cascade of Tasks,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+633101e794d7b80f55f466fd2941ea24595e10e6,Face Attribute Prediction with classification CNN,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+63a2e2155193dc2da9764ae7380cdbd044ff2b94,A Dense SURF and Triangulation Based Spatio-temporal Feature for Action Recognition,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+633f4e4d1e29d336a5472a9cf43163fdceafecfa,PatchFCN for Intracranial Hemorrhage Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+631c4ca00eaa65b801c63d32c0f564e974009ddd,Self-attention Learning for Person Re-identification,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+631c4ca00eaa65b801c63d32c0f564e974009ddd,Self-attention Learning for Person Re-identification,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+63d865c66faaba68018defee0daf201db8ca79ed,Deep Regression for Face Alignment,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+63cff99eff0c38b633c8a3a2fec8269869f81850,Feature Correlation Filter for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+6341274aca0c2977c3e1575378f4f2126aa9b050,A multi-scale cascade fully convolutional network face detector,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+632441c9324cd29489cee3da773a9064a46ae26b,Video-based Cardiac Physiological Measurements Using Joint Blind Source Separation Approaches,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+632441c9324cd29489cee3da773a9064a46ae26b,Video-based Cardiac Physiological Measurements Using Joint Blind Source Separation Approaches,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+0fd53d7e1ab8f42c710cb77b5ec4cc2b22158a4c,Combined Data Association and Evolving Particle Filter for Tracking of Multiple Articulated Objects,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+0f3b3688af4e87b27ad38bf70aeffb64288bfe27,Unsupervised Construction of Human Body Models Using Principles of Organic Computing,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+0f9fe80fff218573a4805437ba7010fa823ca0e6,DIY Human Action Data Set Generation,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0f92f0cf1fb1d37f7f723892976ca61419768995,Gabor-based gradient orientation pyramid for kinship verification under uncontrolled environments,Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052,edu,
+0f92f0cf1fb1d37f7f723892976ca61419768995,Gabor-based gradient orientation pyramid for kinship verification under uncontrolled environments,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+0fa24e602d65af82fc429edb4e0980dc534d4b16,Adaptive Patch Features for Object Class Recognition with Learned Hierarchical Models,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+0f0a5d8a7a087204026a6b67000887dbf5b6a20f,Generating objects going well with the surroundings,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+0fe8b5503681128da84a8454a4cc94470adc09ea,Sparsity Potentials for Detecting Objects with the Hough Transform,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+0fe8b5503681128da84a8454a4cc94470adc09ea,Sparsity Potentials for Detecting Objects with the Hough Transform,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+0f7bf963a06682d69387c54632cec9e835423617,Gamesourcing to acquire labeled human pose estimation data,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+0f4724cc069609a9544ca7d9a429b52cfe89c182,"PersonLab: Person Pose Estimation and Instance Segmentation with a Bottom-Up, Part-Based, Geometric Embedding Model",Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+0f6d068ca799e99100fa5ff7503163fd1c9ae581,Common Subspace for Model and Similarity: Phrase Learning for Caption Generation from Images,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+0fc254272db096a9305c760164520ad9914f4c9e,Unsupervised convolutional neural networks for motion estimation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0f96eee0407b9ce9ea01629ed01bcf6802f97272,Attribute Learning for Understanding Unstructured Social Activity,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0fae5d9d2764a8d6ea691b9835d497dd680bbccd,Face Recognition using Canonical Correlation Analysis,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+0fae5d9d2764a8d6ea691b9835d497dd680bbccd,Face Recognition using Canonical Correlation Analysis,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+0f32df6ae76402b98b0823339bd115d33d3ec0a0,Emotion recognition from embedded bodily expressions and speech during dyadic interactions,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0f32df6ae76402b98b0823339bd115d33d3ec0a0,Emotion recognition from embedded bodily expressions and speech during dyadic interactions,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0f349677a24dce888851dcf44f5c886d9f4681ec,Running Head: FACIAL EXPRESSION ANIMATION 1 FACSGen 2.0 Animation Software: Generating 3D FACS-Valid Facial Expressions for Emotion Research,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+0f349677a24dce888851dcf44f5c886d9f4681ec,Running Head: FACIAL EXPRESSION ANIMATION 1 FACSGen 2.0 Animation Software: Generating 3D FACS-Valid Facial Expressions for Emotion Research,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+0f829fee12e86f980a581480a9e0cefccb59e2c5,Bird Part Localization Using Exemplar-Based Models with Enforced Pose and Subcategory Consistency,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0f7fdd7f98ee5fbfa7d293e0f1fa399b7a4ec13a,Two-Granularity Tracking: Mediating Trajectory and Detection Graphs for Tracking under Occlusions,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+0f5700e8aa4cba32828ca12cd4e3732a33148951,Spatio-Temporal Modeling and Prediction of Visual Attention in Graphical User Interfaces,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+0faee699eccb2da6cf4307ded67ba8434368257b,TAIGMAN: MULTIPLE ONE-SHOTS FOR UTILIZING CLASS LABEL INFORMATION 1 Multiple One-Shots for Utilizing Class Label Information,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+0faee699eccb2da6cf4307ded67ba8434368257b,TAIGMAN: MULTIPLE ONE-SHOTS FOR UTILIZING CLASS LABEL INFORMATION 1 Multiple One-Shots for Utilizing Class Label Information,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+0fabb4a40f2e3a2502cd935e54e090a304006c1c,Regularized Robust Coding for Face Recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+0f0366070b46972fcb2976775b45681e62a94a26,Reliable Posterior Probability Estimation for Streaming Face Recognition,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+0f0366070b46972fcb2976775b45681e62a94a26,Reliable Posterior Probability Estimation for Streaming Face Recognition,University of Colorado at Colorado Springs,University of Colorado at Colorado Springs,"1420 Austin Bluffs Pkwy, Colorado Springs, CO 80918, USA",38.89646790,-104.80505940,edu,
+0fd3a7ee228bbc3dd4a111dae04952a1ee58a8cd,Hair style retrieval by semantic mapping on informative patches,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0f533bc9fdfb75a3680d71c84f906bbd59ee48f1,Illumination invariant feature extraction based on natural images statistics &#x2014; Taking face images as an example,"Academia Sinica, Taiwan","Research Center for Institute of Information Science, Academia Sinica, Taiwan","115, Taiwan, Taipei City, Nangang District, 研究院路二段128號",25.04117270,121.61465180,edu,
+0f533bc9fdfb75a3680d71c84f906bbd59ee48f1,Illumination invariant feature extraction based on natural images statistics &#x2014; Taking face images as an example,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+0f22005f8f2bc134f02c4a76cde30349e3389b8d,ShuffleSeg: Real-time Semantic Segmentation Network,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+0f4eb63402a4f3bae8f396e12133684fb760def1,"LONG, LIU, SHAO: ATTRIBUTE EMBEDDING WITH VSAR FOR ZERO-SHOT LEARNING 1 Attribute Embedding with Visual-Semantic Ambiguity Removal for Zero-shot Learning",Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+0fbc9584cc276ba54d133730624199a631a2c6db,Extreme Clicking for Efficient Object Annotation,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+0f2f1e6e23e4bb9f16ba969d50582e0064ac471c,Basis mapping based boosting for object detection,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0fba39bf12486c7684fd3d51322e3f0577d3e4e8,Task Specific Local Region Matching,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+0fb8317a8bf5feaf297af8e9b94c50c5ed0e8277,Detecting Hands in Egocentric Videos: Towards Action Recognition,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+0f2f4edb7599de34c97f680cf356943e57088345,Stacked Hourglass Networks for Human Pose Estimation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+0fca9a022f4910dda7f8bdc92bbbe8a9c6e35303,Accelerating t-SNE using tree-based algorithms,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,On-the-Job Learning with Bayesian Decision Theory,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,On-the-Job Learning with Bayesian Decision Theory,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,On-the-Job Learning with Bayesian Decision Theory,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0fe96806c009e8d095205e8f954d41b2b9fd5dcf,On-the-Job Learning with Bayesian Decision Theory,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0f6d6a67d4439c021dcbaaeab61b6b29e88d45d9,A Semi-Supervised Data Augmentation Approach using 3D Graphical Engines,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+0fb17e7f2bb70ca6ad66bb13599fc6a33be9916b,Deep Canonical Time Warping,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0fb17e7f2bb70ca6ad66bb13599fc6a33be9916b,Deep Canonical Time Warping,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+0f22251fa9c4bb120f00767053430fbab141fac3,Support Vector Guided Dictionary Learning,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+0f22251fa9c4bb120f00767053430fbab141fac3,Support Vector Guided Dictionary Learning,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+0f22251fa9c4bb120f00767053430fbab141fac3,Support Vector Guided Dictionary Learning,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+0f22251fa9c4bb120f00767053430fbab141fac3,Support Vector Guided Dictionary Learning,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+0f42c64a74bc6e3e83821aa8ab5dd8e3a4b797cd,Controlled scanpath variation alters fusiform face activation.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0f1539368f90918fc3c4d5431e384986ad768506,Person Re-Identification by Deep Joint Learning of Multi-Loss Classification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0f940d2cdfefc78c92ec6e533a6098985f47a377,A hierarchical framework for simultaneous facial activity tracking,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+0fa88943665de1176b0fc6de4ed7469b40cdb08c,Learning to Draw Samples: With Application to Amortized MLE for Generative Adversarial Learning,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71,Evaluation of optimization components of a 3D to 2D landmark fitting algorithm for head pose estimation,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71,Evaluation of optimization components of a 3D to 2D landmark fitting algorithm for head pose estimation,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+0faeec0d1c51623a511adb779dabb1e721a6309b,Seeing is Worse than Believing: Reading People's Minds Better than Computer-Vision Methods Recognize Actions,National University of Ireland Maynooth,National University of Ireland Maynooth,"National University of Ireland Maynooth, River Apartments, Maynooth, Maynooth ED, Maynooth Municipal District, County Kildare, Leinster, KILDARE, Ireland",53.38469750,-6.60039458,edu,
+0f6911bc1e6abee8bbf9dd3f8d54d40466429da7,Zero-shot Learning with Semantic Output Codes,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+0f6911bc1e6abee8bbf9dd3f8d54d40466429da7,Zero-shot Learning with Semantic Output Codes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0f6911bc1e6abee8bbf9dd3f8d54d40466429da7,Zero-shot Learning with Semantic Output Codes,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+0f6911bc1e6abee8bbf9dd3f8d54d40466429da7,Zero-shot Learning with Semantic Output Codes,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0fa956029110bd82b34208cd18a77ca34d2c5eed,"Query-Focused Video Summarization: Dataset, Evaluation, and a Memory Network Based Approach",University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,"Mirror, mirror on the wall, tell me, is the error small?",Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0f81b0fa8df5bf3fcfa10f20120540342a0c92e5,"Mirror, mirror on the wall, tell me, is the error small?",Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+0fbdd4b8eb9e4c4cfbe5b76ab29ab8b0219fbdc0,Constrained Convolutional Neural Networks for Weakly Supervised Segmentation,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+0f0241124d6092a0bb56259ac091467c2c6938ca,Associating Faces and Names in Japanese Photo News Articles on the Web,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+0fb3b63090f95af97723efe565893eb25ea9188c,Anticipating the future by watching unlabeled video,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0f945f796a9343b51a3dc69941c0fa1a98c0f448,Local Hypersphere Coding Based on Edges between Visual Words,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+0f8b4a64eea40c1f0aa655d4e77e46543ff558b7,Curvilinear Structure Tracking by Low Rank Tensor Approximation with Model Propagation,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+0f8b4a64eea40c1f0aa655d4e77e46543ff558b7,Curvilinear Structure Tracking by Low Rank Tensor Approximation with Model Propagation,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+0fdb6823669959cb709fdb3070e7e5efeebb046a,Robust Recognition against Illumination Variations Based on SIFT,University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.06125530,46.32984840,edu,
+0a52919e4473eb7bc20982094e8497570d797b13,Building Class Sensitive Models for Tracking Applications,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+0a808a17f5c86413bd552a324ee6ba180a12f46d,Improving Deep Visual Representation for Person Re-identification by Global and Local Image-language Association,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0ae910ef0cb2f193a43d3a592b7b62ef8bd13058,Weakly Supervised Saliency Detection with A Category-Driven Map Generator,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+0a64f4fec592662316764283575d05913eb2135b,Joint Pixel and Feature-level Domain Adaptation in the Wild,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+0a0321785c8beac1cbaaec4d8ad0cfd4a0d6d457,Learning Invariant Deep Representation for NIR-VIS Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,Patch-based models for visual object classes,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112,Patch-based models for visual object classes,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0a789733ccb300d0dd9df6174faaa7e8c64e0409,High-Resolution Multispectral Dataset for Semantic Segmentation,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+0a5718f6a60ca18e6b6de5660c49040ac0045d7a,Automatic classification of the Parkinson’s patient stiffness from a single videosequence,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+0a5718f6a60ca18e6b6de5660c49040ac0045d7a,Automatic classification of the Parkinson’s patient stiffness from a single videosequence,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+0a5ffc55b584da7918c2650f9d8602675d256023,Efficient Face Alignment via Locality-constrained Representation for Robust Recognition,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+0a5ffc55b584da7918c2650f9d8602675d256023,Efficient Face Alignment via Locality-constrained Representation for Robust Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+0a5ffc55b584da7918c2650f9d8602675d256023,Efficient Face Alignment via Locality-constrained Representation for Robust Recognition,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+0a5ffc55b584da7918c2650f9d8602675d256023,Efficient Face Alignment via Locality-constrained Representation for Robust Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0a04d8b0099708fbceb63b58faa61ae0c772c8c4,Log-Gabor Weber Descriptor for Face Recognition,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+0aa0e5f96d512fcd2357129ad4363d6ae961327e,Unsupervised Hard Example Mining from Videos for Improved Object Detection,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+0aeb5020003e0c89219031b51bd30ff1bceea363,Sparsifying Neural Network Connections for Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0aeb5020003e0c89219031b51bd30ff1bceea363,Sparsifying Neural Network Connections for Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0aeb5020003e0c89219031b51bd30ff1bceea363,Sparsifying Neural Network Connections for Face Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+0aa74ad36064906e165ac4b79dec298911a7a4db,Variational Inference for the Indian Buffet Process,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+0aa74ad36064906e165ac4b79dec298911a7a4db,Variational Inference for the Indian Buffet Process,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+0aa74ad36064906e165ac4b79dec298911a7a4db,Variational Inference for the Indian Buffet Process,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+0aa74ad36064906e165ac4b79dec298911a7a4db,Variational Inference for the Indian Buffet Process,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+0abf67e7bd470d9eb656ea2508beae13ca173198,Going Deeper into First-Person Activity Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0af33f6b5fcbc5e718f24591b030250c6eec027a,Text Analysis for Automatic Image Annotation,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+0ad6dc4554fd5c0212993677c160af31fd27e243,Measuring Crowd Collectiveness,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0ad6dc4554fd5c0212993677c160af31fd27e243,Measuring Crowd Collectiveness,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+0ad6dc4554fd5c0212993677c160af31fd27e243,Measuring Crowd Collectiveness,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+0a850a9fc853c358aea1167e1f965cda8980b7fd,INDREX: in-database distributional relation extraction,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+0ae8377a984125802a69a93df7c9fe640b55aeac,Automated classification of tropical shrub species: a hybrid of leaf shape and machine learning approach,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+0ae8377a984125802a69a93df7c9fe640b55aeac,Automated classification of tropical shrub species: a hybrid of leaf shape and machine learning approach,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+0ae8377a984125802a69a93df7c9fe640b55aeac,Automated classification of tropical shrub species: a hybrid of leaf shape and machine learning approach,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.34119840,-2.79309380,edu,
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+0af7632427f70f2327cdf5188b814fa55d7551df,Hidden Markov models for face recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+0a84a63acef89a0f632ef08cb0b00af77ed8e7f5,Amphisbaena: Modeling two orthogonal ways to hunt on heterogeneous many-cores,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0a84a63acef89a0f632ef08cb0b00af77ed8e7f5,Amphisbaena: Modeling two orthogonal ways to hunt on heterogeneous many-cores,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0a84a63acef89a0f632ef08cb0b00af77ed8e7f5,Amphisbaena: Modeling two orthogonal ways to hunt on heterogeneous many-cores,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0ad1fefa54f69d9efa0112f2e60c19841d5e9346,ABC-CNN: An Attention Based Convolutional Neural Network for Visual Question Answering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0ad1fefa54f69d9efa0112f2e60c19841d5e9346,ABC-CNN: An Attention Based Convolutional Neural Network for Visual Question Answering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,FacePoseNet: Making a Case for Landmark-Free Face Alignment,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+0a24a16cb9f6d95453d4cd6d0bd5bdad4199e3cc,Training Deep Neural Networks with Different Datasets In-the-wild: The Emotion Recognition Paradigm,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0a24a16cb9f6d95453d4cd6d0bd5bdad4199e3cc,Training Deep Neural Networks with Different Datasets In-the-wild: The Emotion Recognition Paradigm,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+0a24a16cb9f6d95453d4cd6d0bd5bdad4199e3cc,Training Deep Neural Networks with Different Datasets In-the-wild: The Emotion Recognition Paradigm,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+0ae80aa149764e91544bbe45b80bb50434e7bda9,Ambient Sound Provides Supervision for Visual Learning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+0ad8149318912b5449085187eb3521786a37bc78,CP-mtML: Coupled Projection Multi-Task Metric Learning for Large Scale Face Retrieval,University of Caen,University of Caen,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+0a9d204db13d395f024067cf70ac19c2eeb5f942,Viewpoint-aware Video Summarization,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+0aae88cf63090ea5b2c80cd014ef4837bcbaadd8,3D Face Structure Extraction from Images at Arbitrary Poses and under Arbitrary Illumination Conditions,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+0aa303109a3402aa5a203877847d549c4a24d933,Who Do I Look Like? Determining Parent-Offspring Resemblance via Gated Autoencoders,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+0ad119275960fd1b68004feeb84d41b91bc273c8,Object Detection Using Generalization and Efficiency Balanced Co-Occurrence Features,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+0a3a33b872c84dac88bcd6f5bd460ef03584e0f7,Abnormal Neural Activation to Faces in the Parents of Children with Autism.,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+0a3a33b872c84dac88bcd6f5bd460ef03584e0f7,Abnormal Neural Activation to Faces in the Parents of Children with Autism.,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+0a3a33b872c84dac88bcd6f5bd460ef03584e0f7,Abnormal Neural Activation to Faces in the Parents of Children with Autism.,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+0a3a33b872c84dac88bcd6f5bd460ef03584e0f7,Abnormal Neural Activation to Faces in the Parents of Children with Autism.,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+0a82860d11fcbf12628724333f1e7ada8f3cd255,Action Temporal Localization in Untrimmed Videos via Multi-stage CNNs,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+0a267d927cfae039cf0a9c995a59ded563344eb6,Model Selection Management Systems: The Next Frontier of Advanced Analytics,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+0a4fc9016aacae9cdf40663a75045b71e64a70c9,Illumination Normalization Based on Homomorphic Wavelet Filtering for Face Recognition,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+0a0d5283439f088c158fcec732e2593bb3cd57ad,Who Blocks Who: Simultaneous clothing segmentation for grouping images,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+0a13581afdae66bcf52755bfb53410e6e54c1840,Restricting Greed in Training of Generative Adversarial Network,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+0ac442bb570b086d04c4d51a8410fcbfd0b1779d,WarpNet: Weakly Supervised Matching for Single-View Reconstruction,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0acc526e1fbef5bed4c63623e370a4710206e997,Shape guided contour grouping with particle filters,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+0acc526e1fbef5bed4c63623e370a4710206e997,Shape guided contour grouping with particle filters,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+0acc526e1fbef5bed4c63623e370a4710206e997,Shape guided contour grouping with particle filters,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+0acc526e1fbef5bed4c63623e370a4710206e997,Shape guided contour grouping with particle filters,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+0af3c97068638ec2b79b93ff8b3fde9bd999f153,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+0af3c97068638ec2b79b93ff8b3fde9bd999f153,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+0af3c97068638ec2b79b93ff8b3fde9bd999f153,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+0a01d9b6b468f3e25867a028244ce4376b5e8d82,Cross-modality pose-invariant facial expression,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+0ac664519b2b8abfb8966dafe60d093037275573,Facial action unit detection using kernel partial least squares,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+0a9345ea6e488fb936e26a9ba70b0640d3730ba7,Deep Bi-directional Cross-triplet Embedding for Cross-Domain Clothing Retrieval,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+0a9345ea6e488fb936e26a9ba70b0640d3730ba7,Deep Bi-directional Cross-triplet Embedding for Cross-Domain Clothing Retrieval,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+0a561e6f0aecd182ddaf526220acc75f6583816e,CollageParsing: Nonparametric Scene Parsing by Adaptive Overlapping Windows,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+0a70401d161c6c180d84e8139ee8bfbaadb2baad,Image retagging,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+0a79d0ba1a4876086e64fc0041ece5f0de90fbea,Face Illumination Normalization with Shadow Consideration,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+0aa74a922604e200fb92194301d4a4786cc1a74c,Human Factors in Forensic Face Identification,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+0aa74a922604e200fb92194301d4a4786cc1a74c,Human Factors in Forensic Face Identification,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+0aa74a922604e200fb92194301d4a4786cc1a74c,Human Factors in Forensic Face Identification,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+0a602b85c80cef7d38209226188aaab94d5349e8,THE FLORIDA STATE UNIVERSITY COLLEGE OF ARTS AND SCIENCES AUTOMATED FACE TRACKING AND RECOGNITION By MATTHEW,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+0a11b82aa207d43d1b4c0452007e9388a786be12,Feature Level Multiple Model Fusion Using Multilinear Subspace Analysis with Incomplete Training Set and Its Application to Face Image Analysis,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+0a11b82aa207d43d1b4c0452007e9388a786be12,Feature Level Multiple Model Fusion Using Multilinear Subspace Analysis with Incomplete Training Set and Its Application to Face Image Analysis,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+0ae247153afd87f98829359a8b5df0f68d788d75,A Corpus for Reasoning About Natural Language Grounded in Photographs,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+0ae247153afd87f98829359a8b5df0f68d788d75,A Corpus for Reasoning About Natural Language Grounded in Photographs,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+0ae74fabc585cfd1cf60ea3f9e218c59a4539091,Learning Models for Actions and Person-Object Interactions with Transfer to Question Answering,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+0a71b71421d8a33c41625963d19d5df85685dffc,Analyzing Behavior Specialized Acceleration,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+0ab1734693b15bd1aeae06c5736fc7ad12f90aa0,GLAD: Global-Local-Alignment Descriptor for Pedestrian Retrieval,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+0ab1734693b15bd1aeae06c5736fc7ad12f90aa0,GLAD: Global-Local-Alignment Descriptor for Pedestrian Retrieval,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+0ab1734693b15bd1aeae06c5736fc7ad12f90aa0,GLAD: Global-Local-Alignment Descriptor for Pedestrian Retrieval,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+0ab1734693b15bd1aeae06c5736fc7ad12f90aa0,GLAD: Global-Local-Alignment Descriptor for Pedestrian Retrieval,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+6424add0f4f99cb582ecc50c4a33ae18d9236021,Unconstrained Monocular 3D Human Pose Estimation by Action Detection and Cross-Modality Regression Forest,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+6424add0f4f99cb582ecc50c4a33ae18d9236021,Unconstrained Monocular 3D Human Pose Estimation by Action Detection and Cross-Modality Regression Forest,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+6424add0f4f99cb582ecc50c4a33ae18d9236021,Unconstrained Monocular 3D Human Pose Estimation by Action Detection and Cross-Modality Regression Forest,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+64b1de5ebd431354816ea2ebe04dd21b1953bd4f,The Phenomenology of Eye Movement Intentions and their Disruption in Goal-Directed Actions,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+64b1de5ebd431354816ea2ebe04dd21b1953bd4f,The Phenomenology of Eye Movement Intentions and their Disruption in Goal-Directed Actions,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+64d83ccbcb1d87bfafee57f0c2d49043ee3f565b,Super-Bit Locality-Sensitive Hashing,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+64d83ccbcb1d87bfafee57f0c2d49043ee3f565b,Super-Bit Locality-Sensitive Hashing,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+64d83ccbcb1d87bfafee57f0c2d49043ee3f565b,Super-Bit Locality-Sensitive Hashing,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+64d83ccbcb1d87bfafee57f0c2d49043ee3f565b,Super-Bit Locality-Sensitive Hashing,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,Learning Finite Beta-Liouville Mixture Models via Variational Bayes for Proportional Data Clustering,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,Learning Finite Beta-Liouville Mixture Models via Variational Bayes for Proportional Data Clustering,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+6409b8879c7e61acf3ca17bcc62f49edca627d4c,Learning Finite Beta-Liouville Mixture Models via Variational Bayes for Proportional Data Clustering,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+64b90c3220c43f58cb38da9af1a1b77da3dde63e,Recurrent Attention Models for Depth-Based Person Identification,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+645736f2027c5cc64e8ca98ef46f28ae9b1b0110,Distant Human Interaction Recognition with Kinect,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+6424574cb92b316928c37232869bfadcb5b4c20f,C-WSL: Count-Guided Weakly Supervised Localization,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+64c1d9a031ec0e6785dc92edc0d00cc0802e32b0,Key Person Aided Re-identification in Partially Ordered Pedestrian Set,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+64c1d9a031ec0e6785dc92edc0d00cc0802e32b0,Key Person Aided Re-identification in Partially Ordered Pedestrian Set,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+6425b6fb2465fbac50d084b66d93d5cc4fc81ae2,Priming Neural Networks,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+64e053ab54c44968a1e6fa146a72f59f101bc951,Personalized 3D-Aided 2D Facial Landmark Localization,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+64afa85b79c7ad60d8f3f9265259c654c03a01e8,Multi-task Learning Using Multi-modal Encoder-Decoder Networks with Shared Skip Connections,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+64afa85b79c7ad60d8f3f9265259c654c03a01e8,Multi-task Learning Using Multi-modal Encoder-Decoder Networks with Shared Skip Connections,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+64753fe167a46208e28237fa98db8daedbef83e4,Normal social cognition in developmental prosopagnosia.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b,Online Regression with Feature Selection in Stochastic Data Streams,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+64ec0c53dd1aa51eb15e8c2a577701e165b8517b,Online Regression with Feature Selection in Stochastic Data Streams,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+640aa9d6b87d893d1a75e3c49067b9ca1a2babe6,Integration of colour and uniform interlaced derivative patterns for object tracking,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+64e0690dd176a93de9d4328f6e31fc4afe1e7536,Tracking Multiple People Online and in Real Time,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+6459f1e67e1ea701b8f96177214583b0349ed964,Generalized subspace based high dimensional density estimation,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+6459f1e67e1ea701b8f96177214583b0349ed964,Generalized subspace based high dimensional density estimation,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+64a5709d41f4c2ef0383cee9932e89bb58085588,Surgeon Technical Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+64a5709d41f4c2ef0383cee9932e89bb58085588,Surgeon Technical Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+64a5709d41f4c2ef0383cee9932e89bb58085588,Surgeon Technical Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+6495d989fe33b19d2b7755f9077d8b5bf3190151,Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+64cf86ba3b23d3074961b485c16ecb99584401de,Single Image 3D Interpreter Network,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+64cf86ba3b23d3074961b485c16ecb99584401de,Single Image 3D Interpreter Network,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4,Deep Learning Face Attributes in the Wild,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4,Deep Learning Face Attributes in the Wild,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+645766177de2ef61619572bc09ce239c232d7d5c,Is the left hemisphere androcentric? Evidence of the learned categorical perception of gender,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+645766177de2ef61619572bc09ce239c232d7d5c,Is the left hemisphere androcentric? Evidence of the learned categorical perception of gender,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+64281b49a34786912085396bafd67429725f1bcf,Metaface learning for sparse representation based face recognition,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+64281b49a34786912085396bafd67429725f1bcf,Metaface learning for sparse representation based face recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+6479b61ea89e9d474ffdefa71f068fbcde22cc44,Some topics on similarity metric learning,University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.73693020,-3.53647672,edu,
+64b22e5af5dc07309c85a742728ff6f476bd71d1,Modeling Collective Crowd Behaviors in Video,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+64e75f53ff3991099c3fb72ceca55b76544374e5,Simultaneous Feature Selection and Classifier Training via Linear Programming: A Case Study for Face Expression Recognition,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+642486fd468e818fddb8a2ec156535a9d74fa4dc,From Superpixel to Human Shape Modelling for Carried Object Detection,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+64aeab4a2678efa0a60a4d57bf81e3ab640cd476,GenFace: Improving Cyber Security Using Realistic Synthetic Face Generation,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+64aeab4a2678efa0a60a4d57bf81e3ab640cd476,GenFace: Improving Cyber Security Using Realistic Synthetic Face Generation,University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.29753440,1.07296165,edu,
+64aeab4a2678efa0a60a4d57bf81e3ab640cd476,GenFace: Improving Cyber Security Using Realistic Synthetic Face Generation,University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.29753440,1.07296165,edu,
+64b14354afc0e33b1786c0c5ab1af46e76b4631c,Enhanced Fine-Form Perception Does Not Contribute to Gestalt Face Perception in Autism Spectrum Disorder,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+64a44e1d5cbefbb403811360a88f4d93e569ffbd,"Perspective distortion modeling, learning and compensation","University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+64b06918add69292c088455b62c4b0f06c727b1b,Virtual-to-Real: Learning to Control in Visual Semantic Segmentation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+64f9519f20acdf703984f02e05fd23f5e2451977,Learning Temporal Alignment Uncertainty for Efficient Event Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+64da1bfef7db423f31ff92713fbbe1994ad4124d,Generic Learning-Based Ensemble Framework for Small Sample Size Face Recognition in Multi-Camera Networks,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,Facial Expression Recognition using Spectral Supervised Canonical Correlation Analysis,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,Facial Expression Recognition using Spectral Supervised Canonical Correlation Analysis,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+64d5772f44efe32eb24c9968a3085bc0786bfca7,Morphable Displacement Field Based Image Matching for Face Recognition across Pose,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+64d7e62f46813b5ad08289aed5dc4825d7ec5cff,Mix and Match: Joint Model for Clothing and Attribute Recognition,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+90298f9f80ebe03cb8b158fd724551ad711d4e71,A Pursuit of Temporal Accuracy in General Activity Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+90e55d06f0c0234712bb133df05a24ccfe7fc87c,Recognizing human actions from still images with latent poses,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+90465149a7cb3f581697922f3c1b87de5be246cf,Interpolation-based Object Detection Using Motion Vectors for Embedded Real-time Tracking Systems,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+900207b3bc3a4e5244cae9838643a9685a84fee0,Reconstructing Geometry from Its Latent Structures,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+9004a833c65b89c88d2f50835dc47f2319a2c3d5,Augmented Attribute Representations,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+9016d7e5461aa3328efcfb74a7624487c4db2ffa,Brain structure anomalies in autism spectrum disorder--a meta-analysis of VBM studies using anatomic likelihood estimation.,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+90a2c7db91c3a2ad1249a4c9e6d7d872529cae6a,Unsupervised construction of human body models,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+90498b95fe8b299ce65d5cafaef942aa58bd68b7,Face Recognition: Primates in the Wild,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+90fbeb4c871d3916c2b428645a1e1482f05826e1,"Encode, Review, and Decode: Reviewer Module for Caption Generation",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+90cc2f08a6c2f0c41a9dd1786bae097f9292105e,Top-down Attention Recurrent VLAD Encoding for Action Recognition in Videos,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+90621c2f4390d5fe75d16ec0ca1fa4eb190904b3,Exploiting Texture Cues for Clothing Parsing in Fashion Images,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+90a4125974564a5ab6c2ce2ff685fc36e9cf0680,Object Region Mining with Adversarial Erasing: A Simple Classification to Semantic Segmentation Approach,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+90a4125974564a5ab6c2ce2ff685fc36e9cf0680,Object Region Mining with Adversarial Erasing: A Simple Classification to Semantic Segmentation Approach,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+90918dfd9d754e1cd07ed6acafec9001a4685ce5,Human detection using partial least squares analysis,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+90d07df2d165b034e38ec04b3f6343d483f6cb38,Using Generative Adversarial Networks to Design Shoes: The Preliminary Steps,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+90d07df2d165b034e38ec04b3f6343d483f6cb38,Using Generative Adversarial Networks to Design Shoes: The Preliminary Steps,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+90d07df2d165b034e38ec04b3f6343d483f6cb38,Using Generative Adversarial Networks to Design Shoes: The Preliminary Steps,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+90c4f15f1203a3a8a5bf307f8641ba54172ead30,A 2D Morphable Model of Craniofacial Profile and Its Application to Craniosynostosis,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+90282cd8e122e102124b765ecbb22025a238f249,Co-domain Embedding using Deep Quadruplet Networks for Unseen Traffic Sign Recognition,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+903727c8d2973c98aa215a1143f851847a3d5e66,Sparse Exact PGA on Riemannian Manifolds,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+90ea3a35e946af97372c3f32a170b179fe8352aa,Discriminant Learning for Face Recognition,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+9078307c58d74ed6aab70363a5addc054db7fd1d,A Maternal Influence on Reading the Mind in the Eyes Mediated by Executive Function: Differential Parental Influences on Full and Half-Siblings,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+90ad0daa279c3e30b360f9fe9371293d68f4cebf,Spatio-temporal Framework and Algorithms for Video-based Face Recognition,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+904949e9bf204c275ce366237ec1d3ebcf864a1a,Generating captions without looking beyond objects,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+907189aacae7bff389d6c6592d6e2586dab5168d,A Framework for Applying Point Clouds Grabbed by Multi-Beam LIDAR in Perceiving the Driving Environment,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+907189aacae7bff389d6c6592d6e2586dab5168d,A Framework for Applying Point Clouds Grabbed by Multi-Beam LIDAR in Perceiving the Driving Environment,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+90fbcea84f621ee5d73482c5cb02479778aecccd,Pose-Invariant Face Recognition via RGB-D Images,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+90fbcea84f621ee5d73482c5cb02479778aecccd,Pose-Invariant Face Recognition via RGB-D Images,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+90a46cf5ca0f13154864aeefe3e8e30e9fde754c,Learning Hierarchical Feature Representation in Depth Image,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+90a46cf5ca0f13154864aeefe3e8e30e9fde754c,Learning Hierarchical Feature Representation in Depth Image,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+90d9209d5dd679b159051a8315423a7f796d704d,Temporal Sequence Distillation: Towards Few-Frame Action Recognition in Videos,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+90d9209d5dd679b159051a8315423a7f796d704d,Temporal Sequence Distillation: Towards Few-Frame Action Recognition in Videos,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+90b4470032f2796a347a0080bcd833c2db0e8bf0,Improving Image Clustering With Multiple Pretrained CNN Feature Extractors,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+90dd2a53236b058c79763459b9d8a7ba5e58c4f1,Capturing Correlations Among Facial Parts for Facial Expression Analysis,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+90c2d4d9569866a0b930e91713ad1da01c2a6846,Dimensionality Reduction Based on Low Rank Representation,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+9019286143f89561509506c3164f36f0e7e3a364,DeepNav: Learning to Navigate Large Cities,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9019286143f89561509506c3164f36f0e7e3a364,DeepNav: Learning to Navigate Large Cities,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+9039b8097a78f460db9718bc961fdc7d89784092,3D Face Recognition Based on Local Shape Patterns and Sparse Representation Classifier,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+905ba09d4db4f5e150457599553610fc2cb7e105,Efficient Pose and Cell Segmentation using Column Generation,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+9067f14f5708b3ca1c6a8194b2d550fdffb3c1bd,A Pedestrian Detection Method Based on the HOG-LBP Feature and Gentle AdaBoost,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+90943f17cb224c287d1bf117441781d43d2f9b49,Unsupervised Learning of Depth and Ego-Motion from Monocular Video Using 3D Geometric Constraints,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+900bdd3fc700ebf9417c58df15a05eed8c52a90d,Comparative Deep Learning of Hybrid Representations for Image Recommendations,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bfd3d184c3a9f5ee59cb2d1ab92cc1a7124319fb,Weakly-supervised Learning of Mid-level Features for Pedestrian Attribute Recognition and Localization,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bff1e1ecf00c37ec91edc7c5c85c1390726c3687,Constrained Deep Metric Learning for Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+bf03f0fe8f3ba5b118bdcbb935bacb62989ecb11,Effect of Facial Expressions on Feature-Based Landmark Localization in Static Grey Scale Images,University of Tampere,University of Tampere,"Tampereen yliopisto, 4, Kalevantie, Ratinanranta, Tulli, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33100, Suomi",61.49412325,23.77920678,edu,
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,Robust Face Image Matching under Illumination Variations,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,Robust Face Image Matching under Illumination Variations,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,Robust Face Image Matching under Illumination Variations,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+bf87e84403589f33b7dd076c6e34b0c7eb39a7a7,The First 3D Face Alignment in the Wild (3DFAW) Challenge,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+bf87e84403589f33b7dd076c6e34b0c7eb39a7a7,The First 3D Face Alignment in the Wild (3DFAW) Challenge,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+bf87e84403589f33b7dd076c6e34b0c7eb39a7a7,The First 3D Face Alignment in the Wild (3DFAW) Challenge,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+bfc04ce7752fac884cf5a78b30ededfd5a0ad109,A Hybrid Model for Identity Obfuscation by Face Replacement,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bf54b5586cdb0b32f6eed35798ff91592b03fbc4,Methodical Analysis of Western-Caucasian and East-Asian Basic Facial Expressions of Emotions Based on Specific Facial Regions,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+bfdc7cde3bbfcba738a5eefe9143417ebf7d8f5c,"Composition Loss for Counting, Density Map Estimation and Localization in Dense Crowds",Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+bfdc7cde3bbfcba738a5eefe9143417ebf7d8f5c,"Composition Loss for Counting, Density Map Estimation and Localization in Dense Crowds",Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+bfdc7cde3bbfcba738a5eefe9143417ebf7d8f5c,"Composition Loss for Counting, Density Map Estimation and Localization in Dense Crowds",University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+bfdc7cde3bbfcba738a5eefe9143417ebf7d8f5c,"Composition Loss for Counting, Density Map Estimation and Localization in Dense Crowds",University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+bf6913250ed359fdf130d6465b90b2a0b6fae04e,Pragmatically Informative Image Captioning with Character-Level Reference,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103,Emotion Regulation in Adolescent Males with Attention-Deficit Hyperactivity Disorder: Testing the Effects of Comorbid Conduct Disorder,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+bf0f0eb0fb31ee498da4ae2ca9b467f730ea9103,Emotion Regulation in Adolescent Males with Attention-Deficit Hyperactivity Disorder: Testing the Effects of Comorbid Conduct Disorder,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+bf5c5c346e5d378731030edb53fd0c8a49781468,Bayesian Deep Generative Models for Semi-Supervised and Active Learning,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+bffe37791ee7aa277ba6d7c5ff2cb9bddddea09f,Neural correlates of emotion processing during observed self-face recognition in individuals with autism spectrum disorders,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+bf47f87ccf1b2f9ad18cabf29a715114185648a0,A Component-based Framework for Face Detection and Identification,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5,Visual face scanning and emotion perception analysis between autistic and typically developing children,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+bf3f8726f2121f58b99b9e7287f7fbbb7ab6b5f5,Visual face scanning and emotion perception analysis between autistic and typically developing children,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+bf3a09f7598afe4e3ab925636f167e55f2b70a9e,Multiple Human Pose Estimation with Temporally Consistent 3D Pictorial Structures,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bf9b34433bdf14e595a1ed89a23c416990639215,Smart Stadium for Smarter Living: Enriching the Fan Experience,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+bf8a520533f401347e2f55da17383a3e567ef6d8,Bounded-Distortion Metric Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+bf8a520533f401347e2f55da17383a3e567ef6d8,Bounded-Distortion Metric Learning,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+bf8a520533f401347e2f55da17383a3e567ef6d8,Bounded-Distortion Metric Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+bf8a520533f401347e2f55da17383a3e567ef6d8,Bounded-Distortion Metric Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+bf3d0e41e4d0a2ef6dbdd3018e3c7f728b5efceb,Non-Euclidean dissimilarity data in pattern recognition,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+bff567c58db554858c7f39870cff7c306523dfee,Neural Task Graphs: Generalizing to Unseen Tasks from a Single Video Demonstration,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+bf179c196b321bbcd58291e52b8259c3f4c1190c,Panoptic Segmentation with a Joint Semantic and Instance Segmentation Network,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+bf179c196b321bbcd58291e52b8259c3f4c1190c,Panoptic Segmentation with a Joint Semantic and Instance Segmentation Network,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+bf179c196b321bbcd58291e52b8259c3f4c1190c,Panoptic Segmentation with a Joint Semantic and Instance Segmentation Network,Eindhoven University of Technology,Eindhoven University of Technology,"Technische Universiteit Eindhoven, 2, De Rondom, Villapark, Eindhoven, Noord-Brabant, Nederland, 5600 MB, Nederland",51.44866020,5.49039957,edu,
+bfe9560daea296350c9fb4a9b2b9bf9d10fc1a3e,DualNet: Domain-invariant network for visual question answering,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+bf107f242abea2e52d82dcd834e58b774205ec84,Crowd Counting by Adapting Convolutional Neural Networks with Side Information,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+bf107f242abea2e52d82dcd834e58b774205ec84,Crowd Counting by Adapting Convolutional Neural Networks with Side Information,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+bf107f242abea2e52d82dcd834e58b774205ec84,Crowd Counting by Adapting Convolutional Neural Networks with Side Information,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+bffbd04ee5c837cd919b946fecf01897b2d2d432,Facial Feature Tracking and Occlusion Recovery in American Sign Language,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+bf63599a05692ba4c18476f696edf98bc28a4f3d,Fully Convolutional Neural Networks for Crowd Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+d35534f3f59631951011539da2fe83f2844ca245,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d35534f3f59631951011539da2fe83f2844ca245,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d35534f3f59631951011539da2fe83f2844ca245,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d35534f3f59631951011539da2fe83f2844ca245,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d315396cf26613a552a41630a9698b71b6fb5f9a,On-the-fly global activity prediction and anomaly detection,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+d38af10096aa90dfccd7e4cec9757900bf6958bd,MultiPoseNet: Fast Multi-Person Pose Estimation Using Pose Residual Network,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+d3b832f3c4e8b6d81eac24d6e070f756b9e8a7a1,Examples-Rules Guided Deep Neural Network for Makeup Recommendation,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+d3edbfe18610ce63f83db83f7fbc7634dde1eb40,Large Graph Hashing with Spectral Rotation,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+d372629db7d6516c4729c847eb3f6484ee86de94,The VQA-Machine: Learning How to Use Existing Vision Algorithms to Answer New Questions,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+d3424761e06a8f5f3c1f042f1f1163a469872129,"Pose - invariant , model - based object recognition , using linear combination of views and Bayesian statistics . Vasileios",University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+d377e648734f429ae50c889c43b7b2e9c5ca2d66,"Development of face discrimination abilities, and relationship to magnocellular pathway development, between childhood and adulthood.",Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+d33b26794ea6d744bba7110d2d4365b752d7246f,Transfer Feature Representation via Multiple Kernel Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d3015812feb640c79ca8a098e7e27c35f4355ede,Online Nearest Neighbor Search in Hamming Space,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+d3b5a52062e5f5415df527705cb24af9b0846617,Advances and Challenges in 3D and 2D+3D Human Face Recognition,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+d37fa0caee9b598149f73ccc593f54eb2e0ffb58,Application of Self-quotient ε- Filter to Impulse Noise Corrupted Image,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+d3d5d86afec84c0713ec868cf5ed41661fc96edc,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,Sabanci University,Sabanci University,"Sabanci Universitesi, Preveze Cad., Orta Mahallesi, Tepeören, Tuzla, İstanbul, Marmara Bölgesi, 34953, Türkiye",40.89271590,29.37863323,edu,
+d3d5d86afec84c0713ec868cf5ed41661fc96edc,A Comprehensive Analysis of Deep Learning Based Representation for Face Recognition,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+d392098688a999c70589c995bd4427c212eff69d,Object Repositioning Based on the Perspective in a Single Image,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+d3d37a44a7a0453445e6e433a527b0164ec99b88,Efficient Use of Geometric Constraints for Sliding-Window Object Detection in Video,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+d3e04963ff42284c721f2bc6a90b7a9e20f0242f,On Forensic Use of Biometrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+d35c82588645b94ce3f629a0b98f6a531e4022a3,Scalable Online Annotation & Object Localisation For Broadcast Media Production,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+d35c82588645b94ce3f629a0b98f6a531e4022a3,Scalable Online Annotation & Object Localisation For Broadcast Media Production,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+d3dcdd5bd1592ff8555629068e046ce0741d6062,Sparse Coding and Dictionary Learning for Symmetric Positive Definite Matrices: A Kernel Approach,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+d3dcdd5bd1592ff8555629068e046ce0741d6062,Sparse Coding and Dictionary Learning for Symmetric Positive Definite Matrices: A Kernel Approach,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+d3dcdd5bd1592ff8555629068e046ce0741d6062,Sparse Coding and Dictionary Learning for Symmetric Positive Definite Matrices: A Kernel Approach,"Australian National University, Canberra","Australian National University, Canberra","Australian National University, Garran Road, Acton, Canberra, Canberra Central, Australian Capital Territory, 2601, Australia",-35.28121335,149.11665331,edu,
+d3a1322c988b50049986365c27dcfce42828d2ca,van Gent Clustering Faces from the National Archives of Estonia Bachelor ’ s Thesis ( 9 ECTS ),University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+d3516392214e7c0dde80a2ea8ba45e70e462fea6,In Defense of the Classification Loss for Person Re-Identification,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+d399a5dc23866e4590d7a76174154a582b93a18d,Guiding Optical Flow Estimation,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+d394bd9fbaad1f421df8a49347d4b3fca307db83,Recognizing facial expressions at low resolution,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+d31e827d7570de3088f7ce582a4be2dbd38dc1b0,Amygdala activity for the modulation of goal-directed behavior in emotional contexts,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+d350e3753756b1c6946d5d9150626b2de4f7a8e4,Toward Diverse Text Generation with Inverse Reinforcement Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+d350e3753756b1c6946d5d9150626b2de4f7a8e4,Toward Diverse Text Generation with Inverse Reinforcement Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+d3b550e587379c481392fb07f2cbbe11728cf7a6,Small Sample Size Face Recognition using Random Quad-Tree based Ensemble Algorithm,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,Deep Keyframe Detection in Human Action Videos,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,Deep Keyframe Detection in Human Action Videos,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,Deep Keyframe Detection in Human Action Videos,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+d307a766cc9c728a24422313d4c3dcfdb0d16dd5,Deep Keyframe Detection in Human Action Videos,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+d31af74425719a3840b496b7932e0887b35e9e0d,A Multimodal Deep Log-Based User Experience (UX) Platform for UX Evaluation,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+d3312da8c703ed7842285289c3d9478f333dbd48,See the Forest for the Trees: Joint Spatial and Temporal Recurrent Neural Networks for Video-Based Person Re-identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d3b0839324d0091e70ce34f44c979b9366547327,Precise Box Score: Extract More Information from Datasets to Improve the Performance of Face Detection,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+d30050cfd16b29e43ed2024ae74787ac0bbcf2f7,Facial Expression Classification Using Convolutional Neural Network and Support Vector Machine,Marquette University,Marquette University,"Marquette University, West Wisconsin Avenue, University Hill, Milwaukee, Milwaukee County, Wisconsin, 53226, USA",43.03889625,-87.93155450,edu,
+d3faed04712b4634b47e1de0340070653546deb2,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+d3faed04712b4634b47e1de0340070653546deb2,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+d3faed04712b4634b47e1de0340070653546deb2,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+d3faed04712b4634b47e1de0340070653546deb2,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+d37013e4ce0f5dd6b61a4ffadecc401274966602,Reading affect in the face and voice: neural correlates of interpreting communicative intent in children and adolescents with autism spectrum disorders.,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d4ea0438b6c0479a7d7611130a0dc242a22f93eb,Pose2Instance: Harnessing Keypoints for Person Instance Segmentation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+d4a5eaf2e9f2fd3e264940039e2cbbf08880a090,An Occluded Stacked Hourglass Approach to Facial Landmark Localization and Occlusion Estimation,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+d45e856d22714d6ea7bd80a8c73d2be3b1f16f27,Learning Interpretable Spatial Operations in a Rich 3D Blocks World,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+d45e856d22714d6ea7bd80a8c73d2be3b1f16f27,Learning Interpretable Spatial Operations in a Rich 3D Blocks World,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d453e0d308919867b5814beb1394cd0cc1cb2378,STOIC: A database of dynamic and static faces expressing highly recognizable emotions,University of Glasgow,University of Glasgow,"University of Glasgow, University Avenue, Yorkhill, Hillhead, Glasgow, Glasgow City, Scotland, G, UK",55.87231535,-4.28921784,edu,
+d4a925cb0ca66b1cacec325751f4a85e5b74790d,Adversarially Learned Inference,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+d444e010049944c1b3438c9a25ae09b292b17371,Structure Preserving Video Prediction,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+d462d514c0a177eb82aec8175bf431189218e393,Face recognition using regularised generalised discriminant locality preserving projections,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+d46fda4b49bbc219e37ef6191053d4327e66c74b,Facial Expression Recognition Based on Complexity Perception Classification Algorithm,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+d448d67c6371f9abf533ea0f894ef2f022b12503,Weakly supervised collective feature learning from curated media,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+d406ec45ab1d1453cc207fff265077101154d613,Horizontal Pyramid Matching for Person Re-identification,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d4b34e327b62b7f3fbddfc403e4642b17245a3b7,Partial Person Re-identification with Alignment and Hallucination,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d492dbfaa42b4f8b8a74786d7343b3be6a3e9a1d,Deep Cost-Sensitive and Order-Preserving Feature Learning for Cross-Population Age Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+d428aa4c1c84da422f8c99eb0147a49439d16f0d,Audio-Visual Spontaneous Emotion Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+d46509935f7d485295587d4fc201c42108760379,Facial Image Analysis by CNN with Weighted Heterogeneous Learning,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+d4448f8aa320f04066cc43201d55ddd023eb712e,Clothing Change Aware Person Identification,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+d4448f8aa320f04066cc43201d55ddd023eb712e,Clothing Change Aware Person Identification,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+d46b4e6871fc9974542215f001e92e3035aa08d9,A Gabor Quotient Image for Face Recognition under Varying Illumination,Mahanakorn University of Technology,Mahanakorn University of Technology,"มหาวิทยาลัยเทคโนโลยีมหานคร, 140, ถนนเชื่อมสัมพันธ์, กรุงเทพมหานคร, เขตหนองจอก, กรุงเทพมหานคร, 10530, ประเทศไทย",13.84450465,100.85620818,edu,
+d4f1eb008eb80595bcfdac368e23ae9754e1e745,Unconstrained Face Detection and Open-Set Face Recognition Challenge,"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.89207560,-104.79716389,edu,
+d454ad60b061c1a1450810a0f335fafbfeceeccc,Deep Regression Forests for Age Estimation,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+d40cd10f0f3e64fd9b0c2728089e10e72bea9616,Enhancing Face Identification Using Local Binary Patterns and K-Nearest Neighbors,Hangzhou Dianzi University,Hangzhou Dianzi University,"杭州电子科技大学, 2号大街, 白杨街道, 江干区 (Jianggan), 杭州市 Hangzhou, 浙江省, 310018, 中国",30.31255250,120.34309460,edu,
+d46a5bba21f897f1c4b3366dcb663820ef1c282d,Cerebral Hemodynamic Response to Faces,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+d476b357c5bbc7bfae06a3876a5c0852d31d1b6e,A Novel Visual Organization Based on Topological Perception,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d476b357c5bbc7bfae06a3876a5c0852d31d1b6e,A Novel Visual Organization Based on Topological Perception,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+d46e793b945c4f391031656357625e902c4405e8,Face-off: automatic alteration of facial features,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+d4c2d26523f577e2d72fc80109e2540c887255c8,Face-space Action Recognition by Face-Object Interactions,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+d4d2014f05e17869b72f180fd0065358c722ac65,UNIVERSITY OF CALGARY A MULTIMODAL BIOMETRIC SYSTEM BASED ON RANK LEVEL FUSION by MD. MARUF MONWAR A THESIS SUBMITTED TO THE FACULTY OF GRADUATE STUDIES IN PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+d467035d83fb4e86c4a47b2ca87894388deb8c44,Relief R-CNN : Utilizing Convolutional Feature Interrelationship for Object Detection,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+d437a69d631b48583acc19c946b48e7d601d7853,Trace Norm Regularised Deep Multi-Task Learning,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+ba21fd28003994480f713b0a1276160fea2e89b5,Identification of Individuals from Ears in Real World Conditions,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+ba21fd28003994480f713b0a1276160fea2e89b5,Identification of Individuals from Ears in Real World Conditions,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+baaaf73ec28226d60d923bc639f3c7d507345635,Emotion Classification on face images,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ba2bbef34f05551291410103e3de9e82fdf9dddd,A Study on Cross-Population Age Estimation,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+ba2bbef34f05551291410103e3de9e82fdf9dddd,A Study on Cross-Population Age Estimation,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+baff74e4a9880d7477799822d8e68224466f3e76,What Can Help Pedestrian Detection?,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+baff74e4a9880d7477799822d8e68224466f3e76,What Can Help Pedestrian Detection?,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+baff74e4a9880d7477799822d8e68224466f3e76,What Can Help Pedestrian Detection?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+baff74e4a9880d7477799822d8e68224466f3e76,What Can Help Pedestrian Detection?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+bafd1978d6a68db89b4b75008e1bb53aea81f632,DeMIAN: Deep Modality Invariant Adversarial Network,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+baa0fe4d0ac0c7b664d4c4dd00b318b6d4e09143,Facial Expression Analysis using Active Shape Model,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+ba25c219b52d675b579941364ce6ee6700cea8e8,8D-THERMO CAM: Combination of Geometry with Physiological Information for Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+bacc83feb1146bb3d4cb3fa6304090c2ceb6e0d0,Attribute Learning for Network Intrusion Detection,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+baa1e6894024223a928aa00be698247ae253e7cb,"Patterns of eye movements when male and female observers judge female attractiveness , body fat and waist-to-hip ratio",University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+baa1e6894024223a928aa00be698247ae253e7cb,"Patterns of eye movements when male and female observers judge female attractiveness , body fat and waist-to-hip ratio",Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+ba82f4ebd5e62c049387dcb6a1bffbc5d23aea2b,Ordinal Depth Supervision for 3D Human Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+ba82f4ebd5e62c049387dcb6a1bffbc5d23aea2b,Ordinal Depth Supervision for 3D Human Pose Estimation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,The Application of Extended Geodesic Distance in Head Poses Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,The Application of Extended Geodesic Distance in Head Poses Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e,The Application of Extended Geodesic Distance in Head Poses Estimation,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+ba26bf9ffd328d23faca2deea9ebb3292bddcd93,Neural Styling for Interpretable Fair Representations,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+ba9e967208976f24a09730af94086e7ae0417067,An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+bac11ce0fb3e12c466f7ebfb6d036a9fe62628ea,Weakly Supervised Learning of Heterogeneous Concepts in Videos,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ba0ac513d656eef49666ea2231b516bab286661b,"SUBSPACE CLUSTERING BY (k, k)-SPARSE MATRIX FACTORIZATION",Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb,Natural and Effective Obfuscation by Head Inpainting,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+bab65e5a5e0768fbddfaa0fa85f9fe9a51d38b6c,Personalized Modeling of Facial Action Unit Intensity,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+bab65e5a5e0768fbddfaa0fa85f9fe9a51d38b6c,Personalized Modeling of Facial Action Unit Intensity,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+bab65e5a5e0768fbddfaa0fa85f9fe9a51d38b6c,Personalized Modeling of Facial Action Unit Intensity,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+a05e84f77e1dacaa1c59ba0d92919bdcfe4debbb,Video Question Answering via Hierarchical Spatio-Temporal Attention Networks,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+a05e84f77e1dacaa1c59ba0d92919bdcfe4debbb,Video Question Answering via Hierarchical Spatio-Temporal Attention Networks,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+a01d22166ed62f5ad485ae32827c70d583a88564,Zero-Shot Learning by Convex Combination of Semantic Embeddings,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a01d22166ed62f5ad485ae32827c70d583a88564,Zero-Shot Learning by Convex Combination of Semantic Embeddings,Google,"Google, Inc.","1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA",37.42199990,-122.08405750,company,"Google, Mountain View, CA"
+a065080353d18809b2597246bb0b48316234c29a,FHEDN: A based on context modeling Feature Hierarchy Encoder-Decoder Network for face detection,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+a0dc8911c47c3d0e1643ecfaa7032cee6fb5eb64,Street-to-shop: Cross-scenario clothing retrieval via parts alignment and auxiliary set,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4,A New Classification Approach using Discriminant Functions,Sakarya University,Sakarya University,"Sakarya Üniversitesi Diş Hekimliği Fakültesi, Adnan Menderes Caddesi, Güneşler, Adapazarı, Sakarya, Marmara Bölgesi, 54050, Türkiye",40.76433515,30.39407875,edu,
+a07a894108b5ddc19d18e66e969f47a3b2a6e006,On-the-Fly Performance Evaluation of Large-Scale Fiber Track- ing,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,Fusing with context: A Bayesian approach to combining descriptive attributes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+a0dc68c546e0fc72eb0d9ca822cf0c9ccb4b4c4f,Fusing with context: A Bayesian approach to combining descriptive attributes,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+a0c670e76594bb72992a92fd8d51b42cee868a50,Hierarchical Cross Network for Person Re-identification,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+a0021e3bbf942a88e13b67d83db7cf52e013abfd,Human concerned object detecting in video,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+a082b1ee9a5bafe678539e694197c0910d4a09b2,Point-pair descriptors for 3D facial landmark localisation,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+a0d6390dd28d802152f207940c7716fe5fae8760,Bayesian Face Revisited: A Joint Formulation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+a0d6390dd28d802152f207940c7716fe5fae8760,Bayesian Face Revisited: A Joint Formulation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+a0d6390dd28d802152f207940c7716fe5fae8760,Bayesian Face Revisited: A Joint Formulation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a04273851ae262e884b175c22decd56cbd24e14e,Correcting the Triplet Selection Bias for Triplet Loss,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+a04273851ae262e884b175c22decd56cbd24e14e,Correcting the Triplet Selection Bias for Triplet Loss,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+a04273851ae262e884b175c22decd56cbd24e14e,Correcting the Triplet Selection Bias for Triplet Loss,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a0ddf6e9697631f771d73b721a3d871db6a04f6c,Multi-view Facial Expressions Recognition using Local Linear Regression of Sparse Codes,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+a0aa32bb7f406693217fba6dcd4aeb6c4d5a479b,Cascaded Regressor based 3D Face Reconstruction from a Single Arbitrary View Image,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+a06f0b2d569cbef0822ae5e8625b4cb2a7f1d78c,Effective scaling registration approach by imposing the emphasis on the scale factor,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+a0e9064d59cb3b23b425bb954dd8c77fdc8637c8,The Neural Painter: Multi-Turn Image Generation,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+a06b6d30e2b31dc600f622ab15afe5e2929581a7,Robust Joint and Individual Variance Explained,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+a06b6d30e2b31dc600f622ab15afe5e2929581a7,Robust Joint and Individual Variance Explained,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+a0b1990dd2b4cd87e4fd60912cc1552c34792770,Deep Constrained Local Models for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a0b1990dd2b4cd87e4fd60912cc1552c34792770,Deep Constrained Local Models for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a0b1990dd2b4cd87e4fd60912cc1552c34792770,Deep Constrained Local Models for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a06a7b1236c16d3628b39e3c37d566499c3446f0,Global Binary Patterns: A Novel Shape Descriptor,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+a0e7f8771c7d83e502d52c276748a33bae3d5f81,Ensemble Nyström,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+a0d1e2934e6fbf42175fe6f04c281a976dc33975,"Social Attention, Affective Arousal and Empathy in Men with Klinefelter Syndrome (47,XXY): Evidence from Eyetracking and Skin Conductance",Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+a08f09c5923dd2a114da1504379e57e8eb87ced6,Estimation of psychological stress levels using Facial Expression Spatial Charts,Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.80114990,140.04591160,edu,
+a0061dae94d916f60a5a5373088f665a1b54f673,Lensless computational imaging through deep learning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a0061dae94d916f60a5a5373088f665a1b54f673,Lensless computational imaging through deep learning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a02f070080d4bd0fcef8b3234ca6b8ee7c97fb50,A principled approach to remove false alarms by modelling the context of a face detector,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+a00a757b26d5c4f53b628a9c565990cdd0e51876,The BURCHAK corpus: a Challenge Data Set for Interactive Learning of Visually Grounded Word Meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+a00a757b26d5c4f53b628a9c565990cdd0e51876,The BURCHAK corpus: a Challenge Data Set for Interactive Learning of Visually Grounded Word Meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+a00a757b26d5c4f53b628a9c565990cdd0e51876,The BURCHAK corpus: a Challenge Data Set for Interactive Learning of Visually Grounded Word Meanings,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+a00a757b26d5c4f53b628a9c565990cdd0e51876,The BURCHAK corpus: a Challenge Data Set for Interactive Learning of Visually Grounded Word Meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+a0848d7b1bb43f4b4f1b4016e58c830f40944817,Face Matching for Post-Disaster Family Reunification,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+a0c81783ec60bd64aefc49285eb082a8185d49c1,TGIF-QA: Toward Spatio-Temporal Reasoning in Visual Question Answering,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+a7dab944b42c87c52df2abe016158eafb110b2af,A Python-Based Open Source System for Geographic Object-Based Image Analysis (GEOBIA) Utilizing Raster Attribute Tables,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a7dab944b42c87c52df2abe016158eafb110b2af,A Python-Based Open Source System for Geographic Object-Based Image Analysis (GEOBIA) Utilizing Raster Attribute Tables,Aberystwyth University,Aberystwyth University,"Aberystwyth University, Llanbadarn Campus, Cefn Esgair, Waun Fawr, Comins Coch, Ceredigion, Wales, SY23 3JG, UK",52.41073580,-4.05295501,edu,
+a7dab944b42c87c52df2abe016158eafb110b2af,A Python-Based Open Source System for Geographic Object-Based Image Analysis (GEOBIA) Utilizing Raster Attribute Tables,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+a75095fcfa78972dd222810fb3e39d77ff6493aa,Fusing Complementary Operators to Enhance Foreground/Background Segmentation,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+a71d0bf3b8fa6cf0069fe12f3fe6d695fac7dd44,DRAG: A Database for Recognition and Analysis of Gait,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+a71d0bf3b8fa6cf0069fe12f3fe6d695fac7dd44,DRAG: A Database for Recognition and Analysis of Gait,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+a754989741afb89e588b52de375054dffbeda39d,Max-Margin Multiple-Instance Dictionary Learning,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+a77e462997e903fec8d831af11b7f61b209c27a6,Free Space Estimation using Occupancy Grids and Dynamic Object Detection,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a77342abe136fdbef8da9b43055356e3596c570c,Revealing the Secret of FaceHashing,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+a77342abe136fdbef8da9b43055356e3596c570c,Revealing the Secret of FaceHashing,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+a70650358cc226e7f613b49f93d7eca044ca608e,Modeling multimodal cues in a deep learning-based framework for emotion recognition in the wild,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+a70650358cc226e7f613b49f93d7eca044ca608e,Modeling multimodal cues in a deep learning-based framework for emotion recognition in the wild,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+a7fc39214fe447f650441d033401ca73b45c6633,Weakly Supervised Learning of Affordances,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+a70e36daf934092f40a338d61e0fe27be633f577,Enhanced facial feature tracking of spontaneous and continuous expressions,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+a72821008c41032e82f377b53bd96b5f7f8be025,Action Recognition Using Discriminative Structured Trajectory Groups,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a7a66d713776e78ae60617eee2715443a8565a23,Semantic-aware Grad-GAN for Virtual-to-Real Urban Scene Adaption,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a7191958e806fce2505a057196ccb01ea763b6ea,Convolutional Neural Network based Age Estimation from Facial Image and Depth Prediction from Single Image,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+a7191958e806fce2505a057196ccb01ea763b6ea,Convolutional Neural Network based Age Estimation from Facial Image and Depth Prediction from Single Image,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+a7e1327bd76945a315f2869bfae1ce55bb94d165,Kernel Fisher Discriminant Analysis with Locality Preserving for Feature Extraction and Recognition,Guangdong Medical College,Guangdong Medical College,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国",23.12944890,113.34376110,edu,
+a7e1327bd76945a315f2869bfae1ce55bb94d165,Kernel Fisher Discriminant Analysis with Locality Preserving for Feature Extraction and Recognition,Guangdong Medical College,Guangdong Medical College,"医学院, 真如路, 凤凰新村, 天河区, 广州市, 广东省, 510635, 中国",23.12944890,113.34376110,edu,
+a7c39a4e9977a85673892b714fc9441c959bf078,Automated Individualization of Deformable Eye Region Model and Its Application to Eye Motion Analysis,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+a7c39a4e9977a85673892b714fc9441c959bf078,Automated Individualization of Deformable Eye Region Model and Its Application to Eye Motion Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a7a0099caf89bedbf4de1c61499f999ea4fc7d98,Combining Class Taxonomies and Multi Task Learning To Regularize Fine-grained Recognition,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+a7e5c01e3dca9284f8acffad750cdbb29689d3fb,Introduction to the special issue on learning semantics,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a7e5c01e3dca9284f8acffad750cdbb29689d3fb,Introduction to the special issue on learning semantics,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+a7e5c01e3dca9284f8acffad750cdbb29689d3fb,Introduction to the special issue on learning semantics,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a7e5c01e3dca9284f8acffad750cdbb29689d3fb,Introduction to the special issue on learning semantics,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+a75edf8124f5b52690c08ff35b0c7eb8355fe950,Authentic Emotion Detection in Real-Time Video,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+a75edf8124f5b52690c08ff35b0c7eb8355fe950,Authentic Emotion Detection in Real-Time Video,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+a74bd6c9c4631117a036ce0e1c8e3d2a0b1f1f5e,Accelerated Learning-Based Interactive Image Segmentation Using Pairwise Constraints,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+a765b506d29cb46420e125c86ab6ff442905e9d6,DPatch: An Adversarial Patch Attack on Object Detectors,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+a765b506d29cb46420e125c86ab6ff442905e9d6,DPatch: An Adversarial Patch Attack on Object Detectors,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+a7b4291b1feebaed4a36808df2a17a3e452b9efa,"Zero-Shot Learning — The Good, the Bad and the Ugly",Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+a7b4291b1feebaed4a36808df2a17a3e452b9efa,"Zero-Shot Learning — The Good, the Bad and the Ugly",University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+a77008329c785c0d5d4dcb3d9c79073df85a9b4e,Neural codes of seeing architectural styles,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a77008329c785c0d5d4dcb3d9c79073df85a9b4e,Neural codes of seeing architectural styles,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a776acc53591c3eb0b53501d9758d984e2e52a97,Weakly Supervised Instance Segmentation using Class Peak Response,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+a776acc53591c3eb0b53501d9758d984e2e52a97,Weakly Supervised Instance Segmentation using Class Peak Response,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+a7e9d230bc44dfbe56757f3025d5b4caa49032f3,Unity in Diversity: Discovering Topics from Words - Information Theoretic Co-clustering for Visual Categorization,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+a75dfb5a839f0eb4b613d150f54a418b7812aa90,Multibiometric secure system based on deep learning,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+b88b83d2ffd30bf3bc3be3fb7492fd88f633b2fe,Subcategory-Aware Object Classification,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b8b2acc7e5bd94651c8bb025b6311c108c7a7d37,Iteratively Learning from the Best,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+b8b0f0ca35cb02334aaa3192559fb35f0c90f8fa,Face Recognition in Low-resolution Images by Using Local Zernike Moments,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+b88ceded6467e9b286f048bb1b17be5998a077bd,Sparse Subspace Clustering via Diffusion Process,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+b85308870c2b6b8b46ec78908bfd3140ed1398ad,Learning Latent Super-Events to Detect Multiple Activities in Videos,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+b8375ff50b8a6f1a10dd809129a18df96888ac8b,Natural Video Sequence Prediction,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+b8375ff50b8a6f1a10dd809129a18df96888ac8b,Natural Video Sequence Prediction,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+b88d5e12089f6f598b8c72ebeffefc102cad1fc0,Robust 2DPCA and Its Application,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+b88d5e12089f6f598b8c72ebeffefc102cad1fc0,Robust 2DPCA and Its Application,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+b84b7b035c574727e4c30889e973423fe15560d7,Human Age Estimation Using Ranking SVM,HoHai University,HoHai University,"河海大学, 河海路, 小市桥, 鼓楼区, 南京市, 江苏省, 210013, 中国",32.05765485,118.75500040,edu,
+b84b7b035c574727e4c30889e973423fe15560d7,Human Age Estimation Using Ranking SVM,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b849bfe51138d88f6cae2d602b5e2a42565fb1c7,Weakly Supervised Object Localization with Stable Segmentations,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+b85e71d4e68588211c877fff8cda267b3a6bb6c9,"End-to-end learning of motion , appearance and interaction cues for multi-target tracking",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b85e71d4e68588211c877fff8cda267b3a6bb6c9,"End-to-end learning of motion , appearance and interaction cues for multi-target tracking",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b85e71d4e68588211c877fff8cda267b3a6bb6c9,"End-to-end learning of motion , appearance and interaction cues for multi-target tracking",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b8a70bfba1cb51b92a3f168458f8b0af7f90df14,Action-Agnostic Human Pose Forecasting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b8d61dc56a4112e0317c6a7323417ee649476148,Cross Pixel Optical Flow Similarity for Self-Supervised Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+b8349ba39a034d7bc693b6613f2bc173f0ac27b8,Dissertation Object Detection from Aerial Image NGUYEN,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+b83155a18b95dcb551a0787b135d61d99eb82ac5,Three-dimensional Face Imaging and Recognition: a Sensor Design and Comparative Study,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+b8caf1b1bc3d7a26a91574b493c502d2128791f6,As Far as the Eye Can See: Relationship between Psychopathic Traits and Pupil Response to Affective Stimuli,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+b8caf1b1bc3d7a26a91574b493c502d2128791f6,As Far as the Eye Can See: Relationship between Psychopathic Traits and Pupil Response to Affective Stimuli,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+b8af24279c58a718091817236f878c805a7843e1,Context Aware Anomalous Behaviour Detection in Crowded Surveillance,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+b8084d5e193633462e56f897f3d81b2832b72dff,DeepID3: Face Recognition with Very Deep Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b8084d5e193633462e56f897f3d81b2832b72dff,DeepID3: Face Recognition with Very Deep Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b8084d5e193633462e56f897f3d81b2832b72dff,DeepID3: Face Recognition with Very Deep Neural Networks,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+b8556e7ace156cee0199c057c5bf6eacaae45e7c,Automatic fetal face detection by locating fetal facial features from 3D ultrasound images for navigating fetoscopic tracheal occlusion surgeries,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+b8556e7ace156cee0199c057c5bf6eacaae45e7c,Automatic fetal face detection by locating fetal facial features from 3D ultrasound images for navigating fetoscopic tracheal occlusion surgeries,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+b873246d9c474bf7799d6f45deb1155144dbd6b5,Image Analysis for Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+b81a5b676e5e8eee7dc99d5319ecb963f22d05c5,T ^2 2 Net: Synthetic-to-Realistic Translation for Solving Single-Image Depth Estimation Tasks,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+b8c08c1330779283b3fbf06d133faf8bd55ea941,Online Regression with Feature Selection in Stochastic Data Streams,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+b8c08c1330779283b3fbf06d133faf8bd55ea941,Online Regression with Feature Selection in Stochastic Data Streams,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+b8c08c1330779283b3fbf06d133faf8bd55ea941,Online Regression with Feature Selection in Stochastic Data Streams,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+b8ebda42e272d3617375118542d4675a0c0e501d,Deep Hashing Network for Unsupervised Domain Adaptation,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+b87b0fa1ac0aad0ca563844daecaeecb2df8debf,Non-photorealistic rendering of portraits,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+b87db5ac17312db60e26394f9e3e1a51647cca66,Semi-definite Manifold Alignment,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b87db5ac17312db60e26394f9e3e1a51647cca66,Semi-definite Manifold Alignment,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+b8df2c2fa02a37d09b73277ca4edde654ac80953,Exploiting Facial Landmarks for Emotion Recognition in the Wild,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+b859d1fc1a7ad756815490527319d458fa9af3d2,Learning Structure and Strength of CNN Filters for Small Sample Size Training,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+b81cae2927598253da37954fb36a2549c5405cdb,Experiments on Visual Information Extraction with the Faces of Wikipedia,Polytechnique Montreal,Polytechnique Montr´eal,"2900 Boulevard Edouard-Montpetit, Montréal, QC H3T 1J4, Canada",45.50438400,-73.61288290,edu,"Polytechnique Montreal, Montreal, Quebec, Canada"
+b10427999fbde2d90e3541c477e2f6ba4c8f08cc,Bridge Video and Text with Cascade Syntactic Structure,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b13b6e3dfdf6d708a923c547113d99047f1a0374,Neural activation to emotional faces in adolescents with autism spectrum disorders.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+b13b6e3dfdf6d708a923c547113d99047f1a0374,Neural activation to emotional faces in adolescents with autism spectrum disorders.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,Deep Spatio-temporal Manifold Network for Action Recognition,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,Deep Spatio-temporal Manifold Network for Action Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+b191aa2c5b8ece06c221c3a4a0914e8157a16129,Deep Spatio-temporal Manifold Network for Action Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.),Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.),Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.),Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b13bf657ca6d34d0df90e7ae739c94a7efc30dc3,Attribute and Simile Classifiers for Face Verification (In submission please do not distribute.),Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+b1177aad0db8bd6b605ffe0d68addaf97b1f9a6b,Visual Representations and Models: From Latent SVM to Deep Learning,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+b13a882e6168afc4058fe14cc075c7e41434f43e,Recognition of Humans and Their Activities Using Video,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b1665e1ddf9253dcaebecb48ac09a7ab4095a83e,Emotion Recognition Using Facial Expressions with Active Appearance Models,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+b1665e1ddf9253dcaebecb48ac09a7ab4095a83e,Emotion Recognition Using Facial Expressions with Active Appearance Models,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+b16580d27bbf4e17053f2f91bc1d0be12045e00b,Pose-Invariant Face Recognition with a Two-Level Dynamic Programming Algorithm,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+b13e819c48bcb2362614c18cdcd7a53d90944fea,3D Face Recognition in the Presence of Expression: A Guidance-based Constraint Deformation Approach,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+b1b993a1fbcc827bcb99c4cc1ba64ae2c5dcc000,Deep Variation-Structured Reinforcement Learning for Visual Relationship and Attribute Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b11bb6bd63ee6f246d278dd4edccfbe470263803,Joint Voxel and Coordinate Regression for Accurate 3D Facial Landmark Localization,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+b1e8476673ee55f3e33bfb7c5f309032522c4c1f,Context-Dependent Diffusion Network for Visual Relationship Detection,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+b1e8476673ee55f3e33bfb7c5f309032522c4c1f,Context-Dependent Diffusion Network for Visual Relationship Detection,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+b1d2001e877bb36c8ccc97bee62d9824a3b8874d,Top-Down Attention Recurrent VLAD Encoding for Action Recognition in Videos,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+b13e2e43672e66ba45d1b852a34737e4ce04226b,Face Painting: querying art with photos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c,Conveying facial expressions to blind and visually impaired persons through a wearable vibrotactile device,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+b1e4f8c15ff30cc7d35ab25ff3eddaf854e0a87c,Conveying facial expressions to blind and visually impaired persons through a wearable vibrotactile device,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+b1e218046a28d10ec0be3272809608dea378eddc,Overview of the Multiple Biometrics Grand Challenge,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+b10319193be303038a9f58e7552632791e3f1ada,From One-Trick Ponies to All-Rounders: On-Demand Learning for Image Restoration,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+b133b2d7df9b848253b9d75e2ca5c68e21eba008,"Kobe University, NICT and University of Siegen at TRECVID 2017 AVS Task",Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+b133b2d7df9b848253b9d75e2ca5c68e21eba008,"Kobe University, NICT and University of Siegen at TRECVID 2017 AVS Task",Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+b1df214e0f1c5065f53054195cd15012e660490a,Supplementary Material to Sparse Coding and Dictionary Learning with Linear Dynamical Systems,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b136b5f3fb84867ba89ad5e2ef3266e09d54e232,Training Convolutional Neural Networks with Limited Training Data for Ear Recognition in the Wild,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+b136b5f3fb84867ba89ad5e2ef3266e09d54e232,Training Convolutional Neural Networks with Limited Training Data for Ear Recognition in the Wild,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+b11872621d9550ec2f1d09f2f02237182744e2ee,Less is More: Unified Model for Unsupervised Multi-Domain Image-to-Image Translation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b11872621d9550ec2f1d09f2f02237182744e2ee,Less is More: Unified Model for Unsupervised Multi-Domain Image-to-Image Translation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b11872621d9550ec2f1d09f2f02237182744e2ee,Less is More: Unified Model for Unsupervised Multi-Domain Image-to-Image Translation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b11872621d9550ec2f1d09f2f02237182744e2ee,Less is More: Unified Model for Unsupervised Multi-Domain Image-to-Image Translation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b11872621d9550ec2f1d09f2f02237182744e2ee,Less is More: Unified Model for Unsupervised Multi-Domain Image-to-Image Translation,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+b185f0a39384ceb3c4923196aeed6d68830a069f,Describing Clothing by Semantic Attributes,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b185f0a39384ceb3c4923196aeed6d68830a069f,Describing Clothing by Semantic Attributes,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+b19e8bce7a3180456f8748caabade89dd802ea84,Inferring and Executing Programs for Visual Reasoning Supplementary Material,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e,Person re-identification using multiple first-person-views on wearable devices,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+b1fdd4ae17d82612cefd4e78b690847b071379d3,Supervised Descent Method,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b1fdd4ae17d82612cefd4e78b690847b071379d3,Supervised Descent Method,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+dddfc10d9649a936cc440c1f3590b14e51a81daa,Bringing Background into the Foreground: Making All Classes Equal in Weakly-Supervised Video Semantic Segmentation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+dda95e28395324aa87027d9692423b3a6f42dd4a,Improved Multi-Person Tracking with Active Occlusion Handling,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+dd7875abad93418e275825116e029766ada9b9c6,Kinect-based automatic 3D high-resolution face modeling,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+dd173dc349312810ec45ed4b346190ff2250ddd8,PaDNet: Pan-Density Crowd Counting,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+dd609e4bd83cfcdbf64fc794da73a36398076890,Recurrent Human Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+dd609e4bd83cfcdbf64fc794da73a36398076890,Recurrent Human Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ddf25fe84789821d204fd09026bb02d891d50399,Multi-Shot Human Re-Identification Using Adaptive Fisher Discriminant Analysis,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+dd471f321ead8b405da6194057b2778ef3db7ea7,Multi-Task Adversarial Network for Disentangled Feature Learning,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+dda403e6d9b61e3fa84fafb3aa2f70884d03a944,Transductive Multi-view Embedding for Zero-Shot Recognition and Annotation,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+dd85b6fdc45bf61f2b3d3d92ce5056c47bd8d335,Unsupervised Learning and Segmentation of Complex Activities from Video,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+dda0b381c162695f21b8d1149aab22188b3c2bc0,Occluded Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+dda0b381c162695f21b8d1149aab22188b3c2bc0,Occluded Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+dd7faa9ebacb64bcf4210c3be76202c592e3d637,"Comparison of Visible, Thermal Infra-Red and Range Images for Face Recognition",University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+dd6b6beba7202deb1ceeb241438fdfd48e88b394,Multiple Granularity Descriptors for Fine-Grained Categorization,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+dda35768681f74dafd02a667dac2e6101926a279,Multi-layer temporal graphical model for head pose estimation in real-world videos,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+ddeececa11517bea0d21804e3f724612dac1a5c5,"""Factual"" or ""Emotional"": Stylized Image Captioning with Adaptive Learning and Attention",University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+ddeececa11517bea0d21804e3f724612dac1a5c5,"""Factual"" or ""Emotional"": Stylized Image Captioning with Adaptive Learning and Attention",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+dd0aef0d44e740580212d6efb5286446494729ba,Multi-feature canonical correlation analysis for face photo-sketch image retrieval,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+dd0aef0d44e740580212d6efb5286446494729ba,Multi-feature canonical correlation analysis for face photo-sketch image retrieval,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+dd033d4886f2e687b82d893a2c14dae02962ea70,Facial Expression Recognition Using New Feature Extraction Algorithm,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+dd0be14c30714c77421dfe6cba31ed0b523434ae,Reducing Physical-attractiveness bias in hiring decisions: An experimental investigation,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+ddb49e36570af09d96059b3b6f08f9124aafe24f,A Non-Iterative Approach to Reconstruct Face Templates from Match Scores,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+dd900526f95079e6532a26d0423357bf8ad43afc,Modeling Image Virality with Pairwise Spatial Transformer Networks,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+dd0262d63ab7e2a9ab90478394b9fb56d17ed71c,Triple consistency loss for pairing distributions in GAN-based face synthesis,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+dd30d7e32046c333de78a9380ac6b76f4ce307b0,Probabilistic Siamese Network for Learning Representations,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ddaa8add8528857712424fd57179e5db6885df7c,Localizing Actions from Video Labels and Pseudo-Annotations,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+ddaa8add8528857712424fd57179e5db6885df7c,Localizing Actions from Video Labels and Pseudo-Annotations,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+dd096d3cac4a9f26d38e135f803621d932c84f83,Hallucinating very low-resolution and obscured face images,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+dda1be806ab56ca58187621a0c2e4d2b8ad429ac,Visual Tracking via Spatially Aligned Correlation Filters Network,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+dcea75410fefbe70a4736fabbf178a951b6743ed,Computer vision based interfaces for computer games,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+dc947dc7a948aa8cb8b82f18c0de8707f6064a7d,"""But You Promised"": Methods to Improve Crowd Engagement In Non-Ground Truth Tasks",Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+dc947dc7a948aa8cb8b82f18c0de8707f6064a7d,"""But You Promised"": Methods to Improve Crowd Engagement In Non-Ground Truth Tasks",Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+dcca36085752eec824d489ed556378159464a0c8,Person Re-identification via Recurrent Feature Aggregation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+dcca36085752eec824d489ed556378159464a0c8,Person Re-identification via Recurrent Feature Aggregation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+dca12da787c023c97058cdb7d56e18ef287084f7,Zebrafish tracking using convolutional neural networks,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+dcf71245addaf66a868221041aabe23c0a074312,S^3FD: Single Shot Scale-Invariant Face Detector,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dcf71245addaf66a868221041aabe23c0a074312,S^3FD: Single Shot Scale-Invariant Face Detector,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+dcc38db6c885444694f515d683bbb50521ff3990,Learning to Hallucinate Face Images via Component Generation and Enhancement,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+dcc38db6c885444694f515d683bbb50521ff3990,Learning to Hallucinate Face Images via Component Generation and Enhancement,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+dcc38db6c885444694f515d683bbb50521ff3990,Learning to Hallucinate Face Images via Component Generation and Enhancement,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+dc5cde7e4554db012d39fc41ac8580f4f6774045,Video Segmentation by Non-Local Consensus voting,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+dc7df544d7c186723d754e2e7b7217d38a12fcf7,Facial expression recognition using salient facial patches,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+dc7df544d7c186723d754e2e7b7217d38a12fcf7,Facial expression recognition using salient facial patches,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+dc83f97a2dd241bf1a9f53ad11d8f10eeb4f5dd6,Pixel Level Data Augmentation for Semantic Image Segmentation using Generative Adversarial Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+dc83f97a2dd241bf1a9f53ad11d8f10eeb4f5dd6,Pixel Level Data Augmentation for Semantic Image Segmentation using Generative Adversarial Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+dcb0afca54aa2bde50319ad5720d613a6eca36c3,Deep Pose Consensus Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+dcb0afca54aa2bde50319ad5720d613a6eca36c3,Deep Pose Consensus Networks,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+dc7203d64a985b86f2f44bf064220801ef279382,Multi-scale local Binary Pattern Histogram for Face Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+dc7203d64a985b86f2f44bf064220801ef279382,Multi-scale local Binary Pattern Histogram for Face Recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+dc77287bb1fcf64358767dc5b5a8a79ed9abaa53,Fashion Conversation Data on Instagram,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+dc81be32ca84d43f99a4c94d4a686c84956d30fd,Visual Question Reasoning on General Dependency Tree,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+dcc1512561b342c003b489f9235c0fca527ac0b0,Cross-Task Contributions of Frontobasal Ganglia Circuitry in Response Inhibition and Conflict-Induced Slowing.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+dcc1512561b342c003b489f9235c0fca527ac0b0,Cross-Task Contributions of Frontobasal Ganglia Circuitry in Response Inhibition and Conflict-Induced Slowing.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+dcc1512561b342c003b489f9235c0fca527ac0b0,Cross-Task Contributions of Frontobasal Ganglia Circuitry in Response Inhibition and Conflict-Induced Slowing.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+dcc1512561b342c003b489f9235c0fca527ac0b0,Cross-Task Contributions of Frontobasal Ganglia Circuitry in Response Inhibition and Conflict-Induced Slowing.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+dced05d28f353be971ea2c14517e85bc457405f3,Multimodal Priority Verification of Face and Speech Using Momentum Back-Propagation Neural Network,Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882000,126.96190000,edu,
+dc22d96593e552700f98dd4bf76ee838f9f11145,A Review of Artificial Intelligence Algorithms Used for Smart Machine Tools,National Chung Hsing University,National Chung Hsing University,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.67571165,edu,
+dce5e0a1f2cdc3d4e0e7ca0507592860599b0454,Facelet-Bank for Fast Portrait Manipulation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+dcda558e15e309d8e3158bf2cf8e921cdb59cf5f,Target Aware Network Adaptation for Efficient Representation Learning,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+dce15becf620afd938818ce5ebd793c798782b70,A New Face Recognition Algorithm based on Dictionary Learning for a Single Training Sample per Person,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+dc9d62087ff93a821e6bb8a15a8ae2da3e39dcdd,Learning with Confident Examples: Rank Pruning for Robust Classification with Noisy Labels,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+dcce3d7e8d59041e84fcdf4418702fb0f8e35043,Probabilistic identity characterization for face recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+dc72dd4690f4373a7dd14223a53ea4cc16bd5210,Framework for Objective Evaluation of Privacy Filters,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+dce3dff9216d63c4a77a2fcb0ec1adf6d2489394,Manifold Learning for Gender Classification from Face Sequences,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+dc3d7128e15ed8d97f9b29021216fc1d4053fbaa,Coarse-to-Fine Annotation Enrichment for Semantic Segmentation Learning,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+dc3d7128e15ed8d97f9b29021216fc1d4053fbaa,Coarse-to-Fine Annotation Enrichment for Semantic Segmentation Learning,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+dc3d7128e15ed8d97f9b29021216fc1d4053fbaa,Coarse-to-Fine Annotation Enrichment for Semantic Segmentation Learning,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+dc16b42a64741df2881604f28788f421e422d297,Cross-view image synthesis using geometry-guided conditional GANs,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+b64c2baf82c51a7538136c32f5193bdfef946297,UA-DETRAC 2017: Report of AVSS2017 & IWT4S Challenge on Advanced Traffic Monitoring,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b64c2baf82c51a7538136c32f5193bdfef946297,UA-DETRAC 2017: Report of AVSS2017 & IWT4S Challenge on Advanced Traffic Monitoring,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+b6f758be954d34817d4ebaa22b30c63a4b8ddb35,A Proximity-Aware Hierarchical Clustering of Faces,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b657702aed7aff8f1a86fa32d30a07197f8348c5,Adaptive Contour Fitting for Pose-Invariant 3D Face Shape Reconstruction,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+b6bf15f123a814538fff5db757a474be6fc0c72f,Event-Centric Twitter Photo Summarization,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+b62571691a23836b35719fc457e093b0db187956,A Novel approach for securing biometric template,Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.81563045,edu,
+b62571691a23836b35719fc457e093b0db187956,A Novel approach for securing biometric template,Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.81563045,edu,
+b69b239217d4e9a20fe4fe1417bf26c94ded9af9,A Temporally-Aware Interpolation Network for Video Frame Inpainting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+b6052dc718c72f2506cfd9d29422642ecf3992ef,A Survey on Human Motion Analysis from Depth Data,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+b6052dc718c72f2506cfd9d29422642ecf3992ef,A Survey on Human Motion Analysis from Depth Data,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+b6145d3268032da70edc9cfececa1f9ffa4e3f11,Face Recognition Using the Discrete Cosine Transform,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+b68881f3528fc39226ffa44220ddb41a467910b5,A preliminary investigation on the sensitivity of COTS face recognition systems to forensic analyst-style face processing for occlusions,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+b68881f3528fc39226ffa44220ddb41a467910b5,A preliminary investigation on the sensitivity of COTS face recognition systems to forensic analyst-style face processing for occlusions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+b61a3f8b80bbd44f24544dc915f52fd30bbdf485,"Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+b68e8550eb4be5f36b30b15487a15226729ae379,Extracting biometric binary strings with minimal area under the FRR curve for the hamming distance classifier,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+b61c0b11b1c25958d202b4f7ca772e1d95ee1037,Bridging Category-level and Instance-level Semantic Image Segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+b67fade804ad0ab12e484582190899fea14bc799,Making Better Use of the Crowd,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b6555e6d52c3c9a7e04bf6debe6a6f476c1c79d5,Rotation Invariant Kernels and Their Application to Shape Analysis,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+b62ffb6a17d75363c8873a236f1d8c49d07c8a0e,An MRF-Poselets Model for Detecting Highly Articulated Humans,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+b6ef158d95042f39765df04373c01546524c9ccd,Im 2 vid : Future Video Prediction for Static Image Action Recognition,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+b68150bfdec373ed8e025f448b7a3485c16e3201,Adversarial Image Perturbation for Privacy Protection A Game Theory Perspective,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b6810adcfd507b2e019ebc8afe4f44f953faf946,ML-LocNet: Improving Object Localization with Multi-view Learning Network,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b6810adcfd507b2e019ebc8afe4f44f953faf946,ML-LocNet: Improving Object Localization with Multi-view Learning Network,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+b613ea6c4fb5efdf17af090d64e9bdce41e28711,Where and When to Look? Spatio-temporal Attention for Action Recognition in Videos,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+b613ea6c4fb5efdf17af090d64e9bdce41e28711,Where and When to Look? Spatio-temporal Attention for Action Recognition in Videos,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+b613ea6c4fb5efdf17af090d64e9bdce41e28711,Where and When to Look? Spatio-temporal Attention for Action Recognition in Videos,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+b613ea6c4fb5efdf17af090d64e9bdce41e28711,Where and When to Look? Spatio-temporal Attention for Action Recognition in Videos,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+b603bcd53a045c7c991106423c79d5a2975b3da4,Unpaired Multi-Domain Image Generation via Regularized Conditional GANs,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+b64cfb39840969b1c769e336a05a30e7f9efcd61,CRF-Based Context Modeling for Person Identification in Broadcast Videos,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+b64cfb39840969b1c769e336a05a30e7f9efcd61,CRF-Based Context Modeling for Person Identification in Broadcast Videos,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3,Learning Spatio-Temporal Representation with Pseudo-3D Residual Networks,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+b6d3caccdcb3fbce45ce1a68bb5643f7e68dadb3,Learning Spatio-Temporal Representation with Pseudo-3D Residual Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b6d0e461535116a675a0354e7da65b2c1d2958d4,Deep Directional Statistics: Pose Estimation with Uncertainty Quantification,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b6a3802075d460093977f8566c451f950edf7a47,Facilitating and Exploring Planar Homogeneous Texture for Indoor Scene Understanding,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b6a01cd4572b5f2f3a82732ef07d7296ab0161d3,Kernel-Based Supervised Discrete Hashing for Image Retrieval,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+b6862bb11e7e72f7c2e71de9d8e5aa731f8a0df7,Probabilistic combination of static and dynamic gait features for verification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+b6d977251b551471f5dddfb0a2e8f9c542e684d2,Recurrent Tubelet Proposal and Recognition Networks for Action Detection,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+b6d977251b551471f5dddfb0a2e8f9c542e684d2,Recurrent Tubelet Proposal and Recognition Networks for Action Detection,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a9721f9680bb21a0849a912ed24eec9ba50def9e,Benchmarking face tracking,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+a9f652787e5669168c7b8f632c3a343dfbaa6f4b,Mining Spatial and Spatio-Temporal ROIs for Action Recognition,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+a9877bb6c56e32e3c3552e379fca67e5031ccce5,A HIERARCHICAL DYNAMIC MODEL FOR OBJECT RECOGNITION By RAKESH CHALASANI A DISSERTATION PRESENTED TO THE GRADUATE SCHOOL OF THE UNIVERSITY OF FLORIDA IN PARTIAL FULFILLMENT OF THE REQUIREMENTS FOR THE DEGREE OF DOCTOR OF PHILOSOPHY,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+a91c74e3bdbc560653e25fdb02d337a8d20186f4,Multiple Human Tracking in RGB-D Data: A Survey,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+a9e99d6efadcf5d8f67949c5fd4e1f1c024868de,Human Action Recognition in Still Images using Bag of Latent Poselets,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+a90e6751ae32cb2983891ef2216293311cd6a8e9,Clustering using Ensemble Clustering Technique,Chongqing University,Chongqing University,"重庆工商大学, 19, 翠林路, 重庆市, 重庆市中心, 南岸区 (Nan'an), 重庆市, 400067, 中国",29.50841740,106.57858552,edu,
+a996f22a2d0c685f7e4972df9f45e99efc3cbb76,Towards the Success Rate of One: Real-Time Unconstrained Salient Object Detection,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+a9ab913cd7d2330b93e0cdab3d5fe6cc47d74513,Beyond Flickr: Not All Image Tagging Is Created Equal,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+a9ae68734f2a8116917f75a02dc9c1f432b6c8eb,HUMAN POSE ESTIMATION FROM A SINGLE VIEW POINT by,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a941a3e8299fb7897fbba7467a52d14e13e7a706,Exploring Human-like Attention Supervision in Visual Question Answering,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+a941a3e8299fb7897fbba7467a52d14e13e7a706,Exploring Human-like Attention Supervision in Visual Question Answering,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+a941a3e8299fb7897fbba7467a52d14e13e7a706,Exploring Human-like Attention Supervision in Visual Question Answering,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+a96b6e645a8d3eb8efc7358a852cbfbaa32ae245,Small Group Detection in Crowds using Interaction Information,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+a9fc23d612e848250d5b675e064dba98f05ad0d9,Face Age Estimation Approach based on Deep Learning and Principle Component Analysis,Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.08187270,31.24454841,edu,
+a9fc23d612e848250d5b675e064dba98f05ad0d9,Face Age Estimation Approach based on Deep Learning and Principle Component Analysis,Benha University,Benha University,"كلية الهندسة بشبرا جامعة بنها, شارع اليازجي, روض الفرج, القاهرة, محافظة القاهرة, 2466, مصر",30.08187270,31.24454841,edu,
+a910f0468ffaf85aad72c96a7214565945cd2819,Learning Comment Generation by Leveraging User-Generated Data,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+a95c9d51b7fb53cf22cb13a806a780aa1f9d47e1,CNNs for Face Detection and Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a95c9d51b7fb53cf22cb13a806a780aa1f9d47e1,CNNs for Face Detection and Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a95c9d51b7fb53cf22cb13a806a780aa1f9d47e1,CNNs for Face Detection and Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+a96c3b0e4ba2949053a9e1e00751b76ef5b05816,Object Recognition with Hidden Attributes,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+a967426ec9b761a989997d6a213d890fc34c5fe3,Relative ranking of facial attractiveness,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a947448d1db19d99abe6de2f6b6d67804786a8b1,Unsupervised Triplet Hashing for Fast Image Retrieval,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+a94cae786d515d3450d48267e12ca954aab791c4,YawDD: a yawning detection dataset,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+a9e0e667537c9059b3050a64d22b8fe86787d913,"Detecting and Tracking Vehicles , Pedestrians , and Bicyclists at Intersections with a Stationary Lidar",Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a9e0e667537c9059b3050a64d22b8fe86787d913,"Detecting and Tracking Vehicles , Pedestrians , and Bicyclists at Intersections with a Stationary Lidar",Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a9a414604cff39f1a03c5547385dc421e6c8452e,Fully-Convolutional Siamese Networks for Object Tracking,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+a9be20954e9177d8b2bc39747acdea4f5496f394,Event-Specific Image Importance,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+a951f9b3aa95fe53cd9b19e15ebfdbde3fd5af62,Facial electromyographic responses to emotional information from faces and voices in individuals with pervasive developmental disorder.,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+d5afd7b76f1391321a1340a19ba63eec9e0f9833,Statistical Analysis of Human Facial Expressions,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+d5afd7b76f1391321a1340a19ba63eec9e0f9833,Statistical Analysis of Human Facial Expressions,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+d5375f51eeb0c6eff71d6c6ad73e11e9353c1f12,Manifold Ranking-Based Locality Preserving Projections,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+d5607567305b690f914fe8b043f3ca48aed57fc9,A Fast Face Detection Method via Convolutional Neural Network,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab,DOTA: A Large-scale Dataset for Object Detection in Aerial Images,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+d5d7e89e6210fcbaa52dc277c1e307632cd91dab,DOTA: A Large-scale Dataset for Object Detection in Aerial Images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+d58f1d2fc5ea941253ff71aac7683fd3909cc71f,A Unified Framework of Subspace and Distance Metric Learning for Face Recognition,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+d5fa9d98c8da54a57abf353767a927d662b7f026,Age Estimation based on Neural Networks using Face Features,"Islamic University of Gaza, Palestine",Islamic University of Gaza - Palestine,"The Islamic University of Gaza, Mostafa Hafez Street, South Remal, محافظة غزة, ‏قطاع غزة‎, PO BOX 108, الأراضي الفلسطينية",31.51368535,34.44019341,edu,
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.49684760,8.72981767,edu,
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.49684760,8.72981767,edu,
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,University of Zurich,University of Zurich,"ZHAW, Rosenstrasse, Heiligberg, Altstadt, Winterthur, Bezirk Winterthur, Zürich, 8400, Schweiz/Suisse/Svizzera/Svizra",47.49684760,8.72981767,edu,
+d522d63e0e8bdbee314b45085baf40caa08fd6b1,Survey of Hashing Techniques for Compact Bit Representations of Images,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+d517b13f2b152c913b81ce534a149493517dbdad,Big Data Deep Learning: Challenges and Perspectives,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+d517b13f2b152c913b81ce534a149493517dbdad,Big Data Deep Learning: Challenges and Perspectives,Oakland University,Oakland University,"Oakland University, 201, Meadow Brook Road, Rochester Hills, Oakland County, Michigan, 48309-4401, USA",42.66663325,-83.20655752,edu,
+d56c5f0a23ecef2eeaad1b882829d709fa172632,A Temporally-Aware Interpolation Network for Video Frame Inpainting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d5795049ff374404231e4d0aaa7725c2afcc73c3,Image-to-Video Person Re-Identification by Reusing Cross-modal Embeddings,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+d5b0e73b584be507198b6665bcddeba92b62e1e5,Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation,Macau University of Science and Technology,Macau University of Science and Technology,"Universidade de Ciência e Tecnologia de Macau 澳門科技大學 Macau University of Science and Technology, 偉龍馬路 Avenida Wai Long, 氹仔Taipa, 氹仔舊城區 Vila de Taipa, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, 澳門 Macau, 853, 中国",22.15263985,113.56803206,edu,
+d5b0e73b584be507198b6665bcddeba92b62e1e5,Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d5b0e73b584be507198b6665bcddeba92b62e1e5,Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+d5d472266aae563010e12ae90fe5fe6f3c484cba,Demystifying MMD GANs,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+d5d472266aae563010e12ae90fe5fe6f3c484cba,Demystifying MMD GANs,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+d561f8bb5d09e47348c86b40b5f6e4fe524fed36,Hierarchical Context Modeling Using Incremental Deep Boltzmann Machines,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+d561f8bb5d09e47348c86b40b5f6e4fe524fed36,Hierarchical Context Modeling Using Incremental Deep Boltzmann Machines,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+d589e218cc3f1b10e77d272cca5df3525e06fc95,Multi-View Image Generation from a Single-View,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+d589e218cc3f1b10e77d272cca5df3525e06fc95,Multi-View Image Generation from a Single-View,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+d5525d00bc2099700711751e33f0fae9a58577ca,Beyond Holistic Object Recognition: Enriching Image Understanding with Part States,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+d58c44bd9b464d9ac1db1344445c31364925f75a,TBN: Convolutional Neural Network with Ternary Inputs and Binary Weights,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+d56b65d0f65afdfdc217c880e9c8fdcafb23bfbe,Face Image Relighting using Locally Constrained Global Optimization,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+d28d32af7ef9889ef9cb877345a90ea85e70f7f1,Local-Global Landmark Confidences for Face Recognition,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+d28d32af7ef9889ef9cb877345a90ea85e70f7f1,Local-Global Landmark Confidences for Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+d26891a7769397bce150a2619ddae1636eae8263,Dynamic Context for Tracking behind Occlusions,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+d2c9f842ed2e36b14b9ea2bb2253159cf5c495ed,Unbiasing Semantic Segmentation For Robot Perception using Synthetic Data Feature Transfer,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+d22a8bac307e1550a9542c3d4e316496b968bf4f,Advancing large scale object retrieval,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+d28d697b578867500632b35b1b19d3d76698f4a9,Face Recognition Using Shape and Texture,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+d29279725abfae6bffb81e59296443f3d5f7a689,Post Processing Pedestrian Detection with Background Cues,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+d23ec100432d860b12308941f8539af82a28843f,Adversarial Semantic Scene Completion from a Single Depth Image,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+d29eec5e047560627c16803029d2eb8a4e61da75,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+d2d5d61dfdae1c6492d15eae5f0f37f460ba4030,Non-rigid object tracking in video sequences,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+d2d5d61dfdae1c6492d15eae5f0f37f460ba4030,Non-rigid object tracking in video sequences,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+d280bcbb387b1d548173917ae82cb6944e3ceca6,Facial grid transformation: A novel face registration approach for improving facial action unit recognition,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+d22dd4a6752a5ffa40aebd260ff63d2c2a9e1da1,Pose Invariant 3D Face Reconstruction,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+d22dd4a6752a5ffa40aebd260ff63d2c2a9e1da1,Pose Invariant 3D Face Reconstruction,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+d20efdf05444a9d7509b85f6d5cd59359b1062f2,First Person Action Recognition,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+d291569f332a216e2a12238a117d747b0f4ba880,Semantic Part Segmentation with Deep Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+d23bf3200adece389d6e7c866ca9105d999b23fa,Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d23bf3200adece389d6e7c866ca9105d999b23fa,Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d23bf3200adece389d6e7c866ca9105d999b23fa,Skill Assessment using Computer Vision based Analysis,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+d2336dbae3916135bc26dd064514441ea94a8a2b,Pairwise Kernels for Human Interaction Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+d2044b92486248f87bafe937779cd2167efe170c,"Connecting Deep Neural Networks to Physical, Perceptual, and Electrophysiological Auditory Signals",University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+d2044b92486248f87bafe937779cd2167efe170c,"Connecting Deep Neural Networks to Physical, Perceptual, and Electrophysiological Auditory Signals",Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+d2b2cb1d5cc1aa30cf5be7bcb0494198934caabb,A Restricted Visual Turing Test for Deep Scene and Event Understanding,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+d26b443f87df76034ff0fa9c5de9779152753f0c,A GPU-Oriented Algorithm Design for Secant-Based Dimensionality Reduction,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,Separability Oriented Preprocessing for Illumination-Insensitive Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,Separability Oriented Preprocessing for Illumination-Insensitive Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+d2cd9a7f19600370bce3ea29aba97d949fe0ceb9,Separability Oriented Preprocessing for Illumination-Insensitive Face Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+d22b378fb4ef241d8d210202893518d08e0bb213,Random Faces Guided Sparse Many-to-One Encoder for Pose-Invariant Face Recognition,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+d22b378fb4ef241d8d210202893518d08e0bb213,Random Faces Guided Sparse Many-to-One Encoder for Pose-Invariant Face Recognition,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+aac39ca161dfc52aade063901f02f56d01a1693c,The Analysis of Parameters t and k of LPP on Several Famous Face Databases,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+aadf4b077880ae5eee5dd298ab9e79a1b0114555,Using Hankel matrices for dynamics-based facial emotion recognition and pain detection,University of Palermo,DICGIM - University of Palermo,"Edificio 8, Viale delle Scienze, 90128 Palermo PA, Italy",38.10427160,13.34723540,edu,
+aa127e6b2dc0aaccfb85e93e8b557f83ebee816b,Advancing human pose and gesture recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+aa127e6b2dc0aaccfb85e93e8b557f83ebee816b,Advancing human pose and gesture recognition,Wolfson College,Wolfson College,"Wolfson College, Linton Road, Norham Manor, Oxford, Oxon, South East, England, OX2 6UD, UK",51.77110760,-1.25361700,edu,
+aabe235a028a4d533053d78034f85bea39690d4f,Active Learning with Cross-Class Similarity Transfer,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+aabe235a028a4d533053d78034f85bea39690d4f,Active Learning with Cross-Class Similarity Transfer,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+aa299218f9b7cda78c440117f12f193c3c4a86cb,Learning Latent Sub-events in Activity Videos Using Temporal Attention Filters,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+aacd2fefca976b963701669a77808fde973c1d02,Landmark classification in large-scale image collections,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+aa8ef6ba6587c8a771ec4f91a0dd9099e96f6d52,Improved face tracking thanks to local features correspondence,University of Brescia,University of Brescia,"Brescia University, West 7th Street, Owensboro, Daviess County, Kentucky, 42303, USA",37.76893740,-87.11138590,edu,
+aab3561acbd19f7397cbae39dd34b3be33220309,Quantization Mimic: Towards Very Tiny CNN for Object Detection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+aab3561acbd19f7397cbae39dd34b3be33220309,Quantization Mimic: Towards Very Tiny CNN for Object Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+aab3561acbd19f7397cbae39dd34b3be33220309,Quantization Mimic: Towards Very Tiny CNN for Object Detection,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+aab3561acbd19f7397cbae39dd34b3be33220309,Quantization Mimic: Towards Very Tiny CNN for Object Detection,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+aa912375eaf50439bec23de615aa8a31a3395ad3,Implementation of a New Methodology to Reduce the Effects of Changes of Illumination in Face Recognition-based Authentication,Howard University,Howard University,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.92152500,-77.01953566,edu,
+aa912375eaf50439bec23de615aa8a31a3395ad3,Implementation of a New Methodology to Reduce the Effects of Changes of Illumination in Face Recognition-based Authentication,Howard University,Howard University,"Howard University, College Street Northwest, Howard University, Washington, D.C., 20001, USA",38.92152500,-77.01953566,edu,
+aa4928142b99224e96536d402ef8869b8391cf79,Fusing Individual Algorithms and Humans Improves Face Recognition Accuracy,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+aa4928142b99224e96536d402ef8869b8391cf79,Fusing Individual Algorithms and Humans Improves Face Recognition Accuracy,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+aaeb8b634bb96a372b972f63ec1dc4db62e7b62a,Facial Expression Recognition System: A Digital Printing Application,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+aaeb8b634bb96a372b972f63ec1dc4db62e7b62a,Facial Expression Recognition System: A Digital Printing Application,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+aaa29212a3f9e6a35f78600231a690b4a3c83fd5,Distributional Learning of Appearance,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+aadc142d4e216432899326c7162540955f8b5590,Middle-Out Decoding,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+aadc142d4e216432899326c7162540955f8b5590,Middle-Out Decoding,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+aa0c30bd923774add6e2f27ac74acd197b9110f2,Dynamic Probabilistic Linear Discriminant Analysis for video classification,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+aa0c30bd923774add6e2f27ac74acd197b9110f2,Dynamic Probabilistic Linear Discriminant Analysis for video classification,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+aa0c30bd923774add6e2f27ac74acd197b9110f2,Dynamic Probabilistic Linear Discriminant Analysis for video classification,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+aa0c30bd923774add6e2f27ac74acd197b9110f2,Dynamic Probabilistic Linear Discriminant Analysis for video classification,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+aac934f2eed758d4a27562dae4e9c5415ff4cdb7,TS-LSTM and Temporal-Inception: Exploiting Spatiotemporal Dynamics for Activity Recognition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+aa2ddae22760249729ac2c2c4e24c8b665bcd40e,Interpretable Basis Decomposition for Visual Explanation,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+aaf4d938f2e66d158d5e635a9c1d279cdc7639c0,Toward visual understanding of everyday object,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+aa7e7637f3443a823ee799a560ab84103b0e9a7f,Autonomous Driving in Reality with Reinforcement Learning and Image Translation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+aa54d0ac723c1a45e31df69433a72f6dc711706a,Robust 3D Face Recognition Using Learned Visual Codebook,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+aa3c9de34ef140ec812be85bb8844922c35eba47,Men Also Like Shopping: Reducing Gender Bias Amplification using Corpus-level Constraints,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+aff92784567095ee526a705e21be4f42226bbaab,Face recognition in uncontrolled environments,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+aff8705fb2f2ae460cb3980b47f2e85c2e6dd41a,Attributes in Multiple Facial Images,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+af3d8552d31843136acc8eae994842c0cd5262b5,Deep Dual Pyramid Network for Barcode Segmentation using Barcode-30k Database,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+af3d8552d31843136acc8eae994842c0cd5262b5,Deep Dual Pyramid Network for Barcode Segmentation using Barcode-30k Database,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+afe987d36438efaa2b5116c444b5fc47462f11d9,SALL-E: Situated Agent for Language Learning,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+af2531b3834b92275a3353e4b2426217ddc4a839,Probabilistic multiple face detection and tracking using entropy measures,University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+af2531b3834b92275a3353e4b2426217ddc4a839,Probabilistic multiple face detection and tracking using entropy measures,University of Thessaloniki,University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+af13c355a2a14bb74847aedeafe990db3fc9cbd4,Happy and agreeable?: multi-label classification of impressions in social video,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+af13c355a2a14bb74847aedeafe990db3fc9cbd4,Happy and agreeable?: multi-label classification of impressions in social video,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+af2c7b9adbf898b251d3d5d0659fd21fcd0197ba,Contextual Combination of Appearance and Motion for Intersection Videos with Vehicles and Pedestrians,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+afb3bc6854003c7cc9e94cb16d62ef353b5a6569,Human layout estimation using structured output learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+afdf9a3464c3b015f040982750f6b41c048706f5,A Recurrent Encoder-Decoder Network for Sequential Face Alignment,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+afdf9a3464c3b015f040982750f6b41c048706f5,A Recurrent Encoder-Decoder Network for Sequential Face Alignment,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+af370cbe392b7fb2b9f26476a7e063e0f4c46815,Development of Neural Sensitivity to Face Identity Correlates with Perceptual Discriminability.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+af370cbe392b7fb2b9f26476a7e063e0f4c46815,Development of Neural Sensitivity to Face Identity Correlates with Perceptual Discriminability.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+afe9cfba90d4b1dbd7db1cf60faf91f24d12b286,Principal Directions of Synthetic Exact Filters for Robust Real-Time Eye Localization,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+afa84ff62c9f5b5c280de2996b69ad9fa48b7bc3,Two-Stream Flow-Guided Convolutional Attention Networks for Action Recognition,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+af58701bdd28a49d234ba87d8f1b90d1f001184e,Part-based pose estimation with local and non-local contextual information,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+af7cab9b4a2a2a565a3efe0a226c517f47289077,Deep Unsupervised Saliency Detection: A Multiple Noisy Labeling Perspective,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+af7cab9b4a2a2a565a3efe0a226c517f47289077,Deep Unsupervised Saliency Detection: A Multiple Noisy Labeling Perspective,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+af6c3c4826137ef638ded6ea1664e14a53d23798,Crowdsourcing Question-Answer Meaning Representations,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+af278274e4bda66f38fd296cfa5c07804fbc26ee,A Novel Maximum Entropy Markov Model for Human Facial Expression Recognition,SungKyunKwan University,SungKyunKwan University,"성균관대, 덕영대로, 천천동, 장안구, 수원시, 경기, 16357, 대한민국",37.30031270,126.97212300,edu,
+af278274e4bda66f38fd296cfa5c07804fbc26ee,A Novel Maximum Entropy Markov Model for Human Facial Expression Recognition,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+afd492a598476b5a9b13e2b6d28a76b0707c0a35,Open-set Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+af1a6c35f5d75122756d37faed062d5b5cd6bc71,Emotion Modelling and Facial Affect Recognition in Human-Computer and Human-Robot Interaction,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+afde6c95ca696de65599a27590b31112a3eb6f6d,A Framework for Sign Language Recognition using Support Vector Machines and Active Learning for Skin Segmentation and Boosted Temporal Sub-units,Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+afb3a84b7daa92d6e1894f5fefe9b38904976d7d,Generative adversarial network-based image super-resolution using perceptual content losses,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+af1eab707e690e73a5b9073ed07a0436fd4e0b66,Adult Content Recognition from Images Using a Mixture of Convolutional Neural Networks,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+af1eab707e690e73a5b9073ed07a0436fd4e0b66,Adult Content Recognition from Images Using a Mixture of Convolutional Neural Networks,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+af1eab707e690e73a5b9073ed07a0436fd4e0b66,Adult Content Recognition from Images Using a Mixture of Convolutional Neural Networks,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+af386bb1b5e8c9f65b3ae836198a93aa860d6331,Revisiting Dilated Convolution: A Simple Approach for Weakly- and Semi- Supervised Semantic Segmentation,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+af654a7ec15168b16382bd604889ea07a967dac6,Face recognition committee machine,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b76d6bb7aef87d03b6de039f01d3dba9224834b6,Crowd Counting by Adaptively Fusing Predictions from an Image Pyramid,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+b7426836ca364603ccab0e533891d8ac54cf2429,A Review on Human Activity Recognition Using Vision-Based Method,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+b7426836ca364603ccab0e533891d8ac54cf2429,A Review on Human Activity Recognition Using Vision-Based Method,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b73ba189d0d1a3e2502716fee60c6865a7964d6e,Towards Open-Universe Image Parsing with Broad Coverage,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+b73ba189d0d1a3e2502716fee60c6865a7964d6e,Towards Open-Universe Image Parsing with Broad Coverage,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b73795963dc623a634d218d29e4a5b74dfbc79f1,Identity Preserving Face Completion for Large Ocular Region Occlusion,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+b73795963dc623a634d218d29e4a5b74dfbc79f1,Identity Preserving Face Completion for Large Ocular Region Occlusion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+b73795963dc623a634d218d29e4a5b74dfbc79f1,Identity Preserving Face Completion for Large Ocular Region Occlusion,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24,Unified Solution to Nonnegative Data Factorization Problems,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+b750b3d8c34d4e57ecdafcd5ae8a15d7fa50bc24,Unified Solution to Nonnegative Data Factorization Problems,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b787113305ddef424def20920b8b098c7f18bd98,Sensor-Based Detection Approach for Passenger Flow Safety in Chinese High-speed Railway Transport Hub,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+b787113305ddef424def20920b8b098c7f18bd98,Sensor-Based Detection Approach for Passenger Flow Safety in Chinese High-speed Railway Transport Hub,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+b769007cb6931464168f63ebb4571e46d8c804b7,Human Pose Estimation and Activity Classification Using Convolutional Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b769007cb6931464168f63ebb4571e46d8c804b7,Human Pose Estimation and Activity Classification Using Convolutional Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b7676adde75c6d1bcabf56c7e2f7fa484155e8a8,ChaLearn Looking at People Challenge 2014: Dataset and Results,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+b7676adde75c6d1bcabf56c7e2f7fa484155e8a8,ChaLearn Looking at People Challenge 2014: Dataset and Results,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b7bc85b4f6a186e01365dd42993029ea06909c8f,"Pedestrian Detection, Tracking and Re-Identification for Search in Visual Surveillance Data",Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+b755e80ce1985fc300e1983adefc8f14830702c4,"Totally Looks Like - How Humans Compare, Compared to Machines",York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+b7b421be7c1dcbb8d41edb11180ba6ec87511976,A Deep Face Identification Network Enhanced by Facial Attributes Prediction,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+b7820f3d0f43c2ce613ebb6c3d16eb893c84cf89,Visual Data Synthesis via GAN for Zero-Shot Video Classification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b78e611c32dc0daf762cfa93044558cdb545d857,Temporal Action Detection with Structured Segment Networks Supplementary Materials,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b76ddd8e9098c4b361ef72ddaef42bf3c85f5825,Finding Coherent Motions and Semantic Regions in Crowd Scenes: A Diffusion and Clustering Approach,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b7c5f885114186284c51e863b58292583047a8b4,GAdaBoost: Accelerating Adaboost Feature Selection with Genetic Algorithms,American University in Cairo,The American University in Cairo,"الجامعة الأمريكية بالقاهرة, شارع القصر العينى, القاهرة القديمة, جاردن سيتي, القاهرة, محافظة القاهرة, 11582, مصر",30.04287695,31.23664139,edu,
+b7c8452ac9791563d9a739bd079b05e518b20aea,Web Video in Numbers - An Analysis of Web-Video Metadata,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+b73d82be8270db40577b002789a26e4a226df1ef,Lessons Learned in Multilingual Grounded Language Learning,University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+b7407b2ea67b8c82246f013f4966c4cac1507e60,Object Detection via End-to-End Integration of Aspect Ratio and Context Aware Part-based Models and Fully Convolutional Networks,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+b7407b2ea67b8c82246f013f4966c4cac1507e60,Object Detection via End-to-End Integration of Aspect Ratio and Context Aware Part-based Models and Fully Convolutional Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+b73d9e1af36aabb81353f29c40ecdcbdf731dbed,Head Pose Estimation on Top of Haar-Like Face Detection: A Study Using the Kinect Sensor,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+b7cff2a6fb3861f36bc779984b312ebae9f1f365,On Learning Associations of Faces and Voices,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b72c72e2c3d140c3064eae3aff17e0c0c177c963,3D-Aware Scene Manipulation via Inverse Graphics,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+b711d50a6c467f3db266f2199a9031f7391b184f,Deep Multi-Modal Image Correspondence Learning,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+b711d50a6c467f3db266f2199a9031f7391b184f,Deep Multi-Modal Image Correspondence Learning,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+b7a9fa746f22aa543c1e682554a834329b17d1c2,A Person Re-Identification System for Mobile Devices,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+b75c93c70e8534553006c084ddc72de39517ded4,Learnable Pooling Regions for Image Classification,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b75c93c70e8534553006c084ddc72de39517ded4,Learnable Pooling Regions for Image Classification,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+db848c3c32464d12da33b2f4c3a29fe293fc35d1,Pose Guided Human Video Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+dbb202b5dc073a2284044b4903a6057ac54c034f,Semi-Supervised Ground-to-Aerial Adaptation with Heterogeneous Features Learning for Scene Classification,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+dbd98092268bf3ebf8c63b2b40bdd01872358fa2,Deep Adversarial Subspace Clustering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+db1f48a7e11174d4a724a4edb3a0f1571d649670,Joint Constrained Clustering and Feature Learning based on Deep Neural Networks,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+db1f48a7e11174d4a724a4edb3a0f1571d649670,Joint Constrained Clustering and Feature Learning based on Deep Neural Networks,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+dbbdb23bb2512ef0922c5396cb95f713257b6ac8,Infinite-Label Learning with Semantic Output Codes,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+dbbdb23bb2512ef0922c5396cb95f713257b6ac8,Infinite-Label Learning with Semantic Output Codes,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+dbbdb23bb2512ef0922c5396cb95f713257b6ac8,Infinite-Label Learning with Semantic Output Codes,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+dbbdb23bb2512ef0922c5396cb95f713257b6ac8,Infinite-Label Learning with Semantic Output Codes,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+dbb16032dd8f19bdfd045a1fc0fc51f29c70f70a,Deep Face Recognition,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+dbda7c3a09ada41ad45f6dfa1aa803e2a87ddbcd,From what we perceive to what we remember: Characterizing representational dynamics of visual memorability,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+dbda7c3a09ada41ad45f6dfa1aa803e2a87ddbcd,From what we perceive to what we remember: Characterizing representational dynamics of visual memorability,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+dbda7c3a09ada41ad45f6dfa1aa803e2a87ddbcd,From what we perceive to what we remember: Characterizing representational dynamics of visual memorability,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+db0a4af734dab1854c2e8dfe499fe0e353226e45,Hot Anchors: A Heuristic Anchors Sampling Method in RCNN-Based Object Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+db0a4af734dab1854c2e8dfe499fe0e353226e45,Hot Anchors: A Heuristic Anchors Sampling Method in RCNN-Based Object Detection,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+dbd3b57b942cb860207c377ac41d777f51ceabfa,An Inverse Kinematic Mathematical Model Using Groebner Basis Theory for Arm Swing Movement in the Gait Cycle,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+dbbfb8ab9355d00ec3b2a9be12747e2e20458bb5,Data Analysis Project : Using Knowledge Graphs for Image Classification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+dbc749490275db26337c7e3201027e8cef8e371c,Multi-band Gradient Component Pattern (MGCP): A New Statistical Feature for Face Recognition,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+db5a00984fa54b9d2a1caad0067a9ff0d0489517,Supplementary Material for Multi-Task Adversarial Network for Disentangled Feature Learning,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+dbd958ffedc3eae8032be67599ec281310c05630,Automated Restyling of Human Portrait Based on Facial Expression Recognition and 3 D Reconstruction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+dbed26cc6d818b3679e46677abc9fa8e04e8c6a6,A Hierarchical Generative Model for Eye Image Synthesis and Eye Gaze Estimation,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+db3545a983ffd24c97c18bf7f068783102548ad7,Enriching the Student Model in an Intelligent Tutoring System,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+db3545a983ffd24c97c18bf7f068783102548ad7,Enriching the Student Model in an Intelligent Tutoring System,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+db84c6fd771a073023f2b42e48a68eb2d9d31e4a,A Deep Variational Autoencoder Approach for Robust Facial Symmetrization,Shandong University of Science and Technology,Shandong University of Science and Technology,"山东科技大学, 579, 前湾港路, 牛王庙, 北下庄, 黄岛区 (Huangdao), 青岛市, 山东省, 266500, 中国",36.00146435,120.11624057,edu,
+db84c6fd771a073023f2b42e48a68eb2d9d31e4a,A Deep Variational Autoencoder Approach for Robust Facial Symmetrization,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+dba493caf6647214c8c58967a8251641c2bda4c2,Automatic 3D Facial Expression Editing in Videos,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+db49a5e6d73de616c66904138a8a19ce0a329c4d,Learning Multiple Categories on Deep Convolution Networks,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+dbcab35c43c78411da8ceba4bdebe69f79308568,Social Style Characterization from Egocentric Photo-streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+dbcab35c43c78411da8ceba4bdebe69f79308568,Social Style Characterization from Egocentric Photo-streams,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+db36e682501582d1c7b903422993cf8d70bb0b42,Deep Trans-layer Unsupervised Networks for Representation Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+db36e682501582d1c7b903422993cf8d70bb0b42,Deep Trans-layer Unsupervised Networks for Representation Learning,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+db299ad09f629a0fcd45b74fa567da476d83a4f3,Dilated Residual Networks,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+db299ad09f629a0fcd45b74fa567da476d83a4f3,Dilated Residual Networks,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+dbf6d2619bd41ce4c36488e15d114a2da31b51c9,Data-Driven Modeling of Group Entitativity in Virtual Environments,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+dbf6d2619bd41ce4c36488e15d114a2da31b51c9,Data-Driven Modeling of Group Entitativity in Virtual Environments,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+dbf6d2619bd41ce4c36488e15d114a2da31b51c9,Data-Driven Modeling of Group Entitativity in Virtual Environments,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+dbf6d2619bd41ce4c36488e15d114a2da31b51c9,Data-Driven Modeling of Group Entitativity in Virtual Environments,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+dbf6d2619bd41ce4c36488e15d114a2da31b51c9,Data-Driven Modeling of Group Entitativity in Virtual Environments,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+dbe0e533d715f8543bcf197f3b8e5cffa969dfc0,"A Comprehensive Comparative Performance Analysis of Eigenfaces, Laplacianfaces and Orthogonal Laplacianfaces for Face Recognition",Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+dbe0e533d715f8543bcf197f3b8e5cffa969dfc0,"A Comprehensive Comparative Performance Analysis of Eigenfaces, Laplacianfaces and Orthogonal Laplacianfaces for Face Recognition",Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+db056590e54d7be16fb1c96deb9e94914ea9f838,Awareness of Road Scene Participants for Autonomous Driving,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+db3ce18f318ee732dab2e2f574062c94f7398943,"Image Semantic Transformation: Faster, Lighter and Stronger",Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+db6344e4f8a41c619573c8579595612a7cdfb080,Research on Face Recognition based on Wavelet Transformation and Improved Sparse Representation,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+db3acf0653d6e69887d184c7ebb1958f74a4d0b1,Weighting Deep and Classic Representation via l 2 Regularization for Image Classification,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+db3acf0653d6e69887d184c7ebb1958f74a4d0b1,Weighting Deep and Classic Representation via l 2 Regularization for Image Classification,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+db3acf0653d6e69887d184c7ebb1958f74a4d0b1,Weighting Deep and Classic Representation via l 2 Regularization for Image Classification,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+db3acf0653d6e69887d184c7ebb1958f74a4d0b1,Weighting Deep and Classic Representation via l 2 Regularization for Image Classification,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+dbede5113e4e91a3a26058e8b7253438a1df04c9,Online Dictionary Learning for Approximate Archetypal Analysis,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+db640eddc51258cf6b11e442745d9a4bd5d6995b,Simple Baselines for Human Pose Estimation and Tracking,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+db82f9101f64d396a86fc2bd05b352e433d88d02,A Spatio-Temporal Probabilistic Framework for Dividing and Predicting Facial Action Units,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+db428d03e3dfd98624c23e0462817ad17ef14493,Oxford Trecvid 2006 – Notebook Paper 1 High-level Feature Extraction 1.1 Bag of Visual Word Representation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+a8068de468ae9e1d6ebf021433467a449703acae,"Deep, Dense, and Low-Rank Gaussian Conditional Random Fields",University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+a8d665fa7357f696dcfd188b91fda88da47b964e,Scaling Video Analytics Systems to Large Camera Deployments,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+a8d665fa7357f696dcfd188b91fda88da47b964e,Scaling Video Analytics Systems to Large Camera Deployments,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a8d665fa7357f696dcfd188b91fda88da47b964e,Scaling Video Analytics Systems to Large Camera Deployments,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a8fa12c662447903fbb751eaa967f861ea33abff,"Finding People Using Scale, Rotation and Articulation Invariant Matching",Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+a838a1184cb9ca86ae910509bb318266101ae656,Question Relevance in Visual Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a838a1184cb9ca86ae910509bb318266101ae656,Question Relevance in Visual Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a838a1184cb9ca86ae910509bb318266101ae656,Question Relevance in Visual Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a826646a8e4e8a746111d3a6915c8f0fcfcc3a00,Scheduling multithreaded applications onto heterogeneous composite cores architecture,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+a84c039818d2abeba21f792c0522e9f75582518e,Prototypical Priors: From Improving Classification to Zero-Shot Learning,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+a8b63bede77e752ead39453838a8ab66aed7b970,The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+a8b63bede77e752ead39453838a8ab66aed7b970,The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+a8b63bede77e752ead39453838a8ab66aed7b970,The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+a8b63bede77e752ead39453838a8ab66aed7b970,The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+a80355dcee5156b064e31b39c6b72037044ed87c,L-Tree: A Local-Area-Learning-Based Tree Induction Algorithm for Image Classification,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+a896ddeb0d253739c9aaef7fc1f170a2ba8407d3,SSH: Single Stage Headless Face Detector,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+a825680aeb853fc34c65b5844c4c4391148f18c3,SSD-6D: Making RGB-Based 3D Detection and 6D Pose Estimation Great Again,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+a820941eaf03077d68536732a4d5f28d94b5864a,Leveraging Datasets with Varying Annotations for Face Alignment via Deep Regression Network,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+a820941eaf03077d68536732a4d5f28d94b5864a,Leveraging Datasets with Varying Annotations for Face Alignment via Deep Regression Network,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+a86ea8041bcc91097a8bbb450cb94a616ee85ae6,Diverse and Coherent Paragraph Generation from Images,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+a87bc818f7409ac97c8719aa8fae2c40d214ebbc,Deep Computational Phenotyping,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a8035ca71af8cc68b3e0ac9190a89fed50c92332,IIIT-CFW: A Benchmark Database of Cartoon Faces in the Wild,Indian Institute of Technology Sri City,"IIIT Chittoor, Sri City, India","630 Gnan Marg, Sri City, Andhra Pradesh 517646, India",13.55681710,80.02612830,edu,
+a89e1fc2681a9a399cc5008ea34b5ec3fe7ca845,Improving Fast Segmentation With Teacher-Student Learning,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a89e1fc2681a9a399cc5008ea34b5ec3fe7ca845,Improving Fast Segmentation With Teacher-Student Learning,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+a803453edd2b4a85b29da74dcc551b3c53ff17f9,Pose Invariant Face Recognition Under Arbitrary Illumination Based on 3D Face Reconstruction,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+a8d52265649c16f95af71d6f548c15afc85ac905,Situation Recognition with Graph Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+a8583e80a455507a0f146143abeb35e769d25e4e,A Distance-accuracy Hybrid Weighted Voting Scheme for Partial Face Recognition,Feng Chia University,Feng Chia University,"逢甲大學, 100, 文華路, 西平里, 西屯區, 臺中市, 40724, 臺灣",24.18005755,120.64836072,edu,
+a8583e80a455507a0f146143abeb35e769d25e4e,A Distance-accuracy Hybrid Weighted Voting Scheme for Partial Face Recognition,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+a8896fa513ff0587e2e8dea0f3ef585d4d04feff,Production-level facial performance capture using deep convolutional neural networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a8896fa513ff0587e2e8dea0f3ef585d4d04feff,Production-level facial performance capture using deep convolutional neural networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a8896fa513ff0587e2e8dea0f3ef585d4d04feff,Production-level facial performance capture using deep convolutional neural networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a8896fa513ff0587e2e8dea0f3ef585d4d04feff,Production-level facial performance capture using deep convolutional neural networks,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+a87e37d43d4c47bef8992ace408de0f872739efc,A Comprehensive Review on Handcrafted and Learning-Based Action Representation Approaches for Human Activity Recognition,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+a87e37d43d4c47bef8992ace408de0f872739efc,A Comprehensive Review on Handcrafted and Learning-Based Action Representation Approaches for Human Activity Recognition,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+a8ef9a39e68bbc7f6f25a8155cab52aab6708886,Generative Compression,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,3D Human Action Recognition using Hu Moment Invariants and Euclidean Distance Classifier,University of Arkansas at Little Rock,University of Arkansas at Little Rock,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA",34.72236805,-92.33830255,edu,
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,3D Human Action Recognition using Hu Moment Invariants and Euclidean Distance Classifier,University of Arkansas at Little Rock,University of Arkansas at Little Rock,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA",34.72236805,-92.33830255,edu,
+a8c8a96b78e7b8e0d4a4a422fcb083e53ad06531,3D Human Action Recognition using Hu Moment Invariants and Euclidean Distance Classifier,University of Arkansas at Little Rock,University of Arkansas at Little Rock,"University of Arkansas At Little Rock (UALR), 2801, U A L R Campus Drive, Little Rock, Pulaski County, Arkansas, 72204, USA",34.72236805,-92.33830255,edu,
+a84934a2db769b7523399c8eaf6d2d7582415c5c,Convolutional Pose Machines: A Deep Architecture for Estimating Articulated Poses,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a84934a2db769b7523399c8eaf6d2d7582415c5c,Convolutional Pose Machines: A Deep Architecture for Estimating Articulated Poses,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a89eae439dfa7cb727bd5193a5130ae6afcd42e8,On Recognizing Actions in Still Images via Multiple Features,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+a89eae439dfa7cb727bd5193a5130ae6afcd42e8,On Recognizing Actions in Still Images via Multiple Features,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+a84f80ca4e29b49cab1035ed8c7877caf2dbe914,Effects of Facial Symmetry and Gaze Direction on Perception of Social Attributes: A Study in Experimental Art History,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+a84f80ca4e29b49cab1035ed8c7877caf2dbe914,Effects of Facial Symmetry and Gaze Direction on Perception of Social Attributes: A Study in Experimental Art History,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+a8748a79e8d37e395354ba7a8b3038468cb37e1f,Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+a8748a79e8d37e395354ba7a8b3038468cb37e1f,Seeing the Forest from the Trees: A Holistic Approach to Near-Infrared Heterogeneous Face Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+a8a61badec9b8bc01f002a06e1426a623456d121,Joint Spatio-Temporal Action Localization in Untrimmed Videos with Per-Frame Segmentation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a8682d432865a9417b30a482b462a9e07c66c0d7,Matching Pixels using Co-Occurrence Statistics,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a8682d432865a9417b30a482b462a9e07c66c0d7,Matching Pixels using Co-Occurrence Statistics,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a8682d432865a9417b30a482b462a9e07c66c0d7,Matching Pixels using Co-Occurrence Statistics,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a8123a4e68642b602b5094f2f670ed7aefdd2f58,Online Video Object Detection Using Association LSTM,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+a8154d043f187c6640cb6aedeaa8385a323e46cf,Image Retrieval with Mixed Initiative and Multimodal Feedback,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+a8e2b2b1b76491336036005d81be57d256acdd0c,Fusing subcategory probabilities for texture classification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+a812368fe1d4a186322bf72a6d07e1cf60067234,Gaussian processes for modeling of facial expressions,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,Merge or Not? Learning to Group Faces via Imitation Learning,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,Merge or Not? Learning to Group Faces via Imitation Learning,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,Merge or Not? Learning to Group Faces via Imitation Learning,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+de7f5e4ccc2f38e0c8f3f72a930ae1c43e0fdcf0,Merge or Not? Learning to Group Faces via Imitation Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+de8381903c579a4fed609dff3e52a1dc51154951,Shape and Appearance Based Analysis of Facial Images for Assessing ICAO Compliance,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+de46cbf18c7da9efc9368241463919e22230b0b0,What We Have Learned about Autism Spectrum Disorder from Valproic Acid,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+dedc7b080b8e13d72f8dc33e248e7637d191fdbf,Beyond Dataset Bias: Multi-task Unaligned Shared Knowledge Transfer,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+dedc7b080b8e13d72f8dc33e248e7637d191fdbf,Beyond Dataset Bias: Multi-task Unaligned Shared Knowledge Transfer,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+de0cfd94d16468cdaaa0fe725e214930587ed8ce,Scalable Person Re-identification on Supervised Smoothed Manifold,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+de2e8127105a37ff1f59be13a010ab0d3f4fa650,Analyzing Hands with First-person Computer Vision,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+def569db592ed1715ae509644444c3feda06a536,Discovery and usage of joint attention in images,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+def569db592ed1715ae509644444c3feda06a536,Discovery and usage of joint attention in images,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+def569db592ed1715ae509644444c3feda06a536,Discovery and usage of joint attention in images,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+def569db592ed1715ae509644444c3feda06a536,Discovery and usage of joint attention in images,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+de3245c795bc50ebdb5d929c8da664341238264a,Generative Model With Coordinate Metric Learning for Object Recognition Based on 3D Models,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+decc4de8b6964ba473744741c3a46ac37f2d6e3e,A Pose Invariant 3 D Face Recognition Method,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+de8a01d36df1c3a881523c3748fcfa988710fa15,Physical adversarial examples for semantic image segmentation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+de8303e9206096dd9f4ba9d876057345ff1f164a,Tell-and-Answer: Towards Explainable Visual Question Answering using Attributes and Captions,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+de8303e9206096dd9f4ba9d876057345ff1f164a,Tell-and-Answer: Towards Explainable Visual Question Answering using Attributes and Captions,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+de8303e9206096dd9f4ba9d876057345ff1f164a,Tell-and-Answer: Towards Explainable Visual Question Answering using Attributes and Captions,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+de92ac27693598254554531d8cadfd4728c423a1,Online Multi-object Tracking Using CNN-Based Single Object Tracker with Spatial-Temporal Attention Mechanism,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+de92ac27693598254554531d8cadfd4728c423a1,Online Multi-object Tracking Using CNN-Based Single Object Tracker with Spatial-Temporal Attention Mechanism,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+defbca385b48173d3dbd7bb8b8fbd35ba06239c3,Motion-Appearance Co-Memory Networks for Video Question Answering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+def2983576001bac7d6461d78451159800938112,The Do’s and Don’ts for CNN-Based Face Verification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+de15af84b1257211a11889b6c2adf0a2bcf59b42,Anomaly detection in non-stationary and distributed environments,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+de15af84b1257211a11889b6c2adf0a2bcf59b42,Anomaly detection in non-stationary and distributed environments,Institute for Communication Systems,Institute for Communication Systems,"Institute for Communication Systems, Spine Road, Woodbridge Hill, Guildford, Surrey, South East, England, GU2 7XS, UK",51.24336920,-0.59322090,edu,
+de15af84b1257211a11889b6c2adf0a2bcf59b42,Anomaly detection in non-stationary and distributed environments,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+de3285da34df0262a4548574c2383c51387a24bf,Two-Stream Convolutional Networks for Dynamic Texture Synthesis,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+de0c4459c46c5efbad02cd9a1f4687a12883c5d7,Pedestrian Detection and Tracking in Urban Context Using a Mono-Camera,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+de725093e13cdc90209d981bea69730c7f6ee03d,A Sparse Coding Based Transfer Learning Framework for Pedestrian Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+de725093e13cdc90209d981bea69730c7f6ee03d,A Sparse Coding Based Transfer Learning Framework for Pedestrian Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+dec0c26855da90876c405e9fd42830c3051c2f5f,Supplementary Material : Learning Compositional Visual Concepts with Mutual Consistency,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+dec0c26855da90876c405e9fd42830c3051c2f5f,Supplementary Material : Learning Compositional Visual Concepts with Mutual Consistency,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+ded2eaddaf214e63aae6be34f4f319df0a10c13e,Gender Classification of Consumer Face Images using Gabor Filters,University of Gujrat,University of Gujrat,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.63744845,74.16174558,edu,
+deb78e302c2efdac51b742f4d3e8041b5838e533,Learning to Evaluate Image Captioning,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+deae19c928571d3c1101660b0d643d7a7ee893b2,Improved Human Parsing with a Full Relational Model,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+b0c66e95b5eb36471bcdcdaad7d9368556110109,Fuzzy Rule Based Quality Measures for Adaptive Multimodal Biometric Fusion at Operation Time,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+b0c66e95b5eb36471bcdcdaad7d9368556110109,Fuzzy Rule Based Quality Measures for Adaptive Multimodal Biometric Fusion at Operation Time,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+b06254f76e13d5f6ac0230fd8bdac35b901f9480,Parameterizing Region Covariance: An Efficient Way To Apply Sparse Codes On Second Order Statistics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b06254f76e13d5f6ac0230fd8bdac35b901f9480,Parameterizing Region Covariance: An Efficient Way To Apply Sparse Codes On Second Order Statistics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b06254f76e13d5f6ac0230fd8bdac35b901f9480,Parameterizing Region Covariance: An Efficient Way To Apply Sparse Codes On Second Order Statistics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b06254f76e13d5f6ac0230fd8bdac35b901f9480,Parameterizing Region Covariance: An Efficient Way To Apply Sparse Codes On Second Order Statistics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89,Deep Alternative Neural Network: Exploring Contexts as Early as Possible for Action Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b03b4d8b4190361ed2de66fcbb6fda0c9a0a7d89,Deep Alternative Neural Network: Exploring Contexts as Early as Possible for Action Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b0bf1be8731c60b2caf3a27f1e95b73875c4220b,Submission to Moments in Time Challenge 2018,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+b0bf1be8731c60b2caf3a27f1e95b73875c4220b,Submission to Moments in Time Challenge 2018,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+b0bf1be8731c60b2caf3a27f1e95b73875c4220b,Submission to Moments in Time Challenge 2018,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+b0bf1be8731c60b2caf3a27f1e95b73875c4220b,Submission to Moments in Time Challenge 2018,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+b0c820c6996b9cf9798d778a46860d28f1beae64,Dynamics Analysis of Facial Expressions for Person Identification,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+b0a1f562a55aae189d6a5cb826582b2e7fb06d3c,Multi-modal Mean-Fields via Cardinality-Based Clamping,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+b0b07732a9ab9b2b9d9dba41e1b9811629fa43dc,An Efficient Vision-Based Pedestrian Detection and Tracking System for ITS Applications,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+b0b07732a9ab9b2b9d9dba41e1b9811629fa43dc,An Efficient Vision-Based Pedestrian Detection and Tracking System for ITS Applications,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+b05633a18a48d9c18735fd0a186a2654297ae543,Development of holistic vs. featural processing in face recognition,Bournemouth University,Bournemouth University,"Bournemouth University, BU footpaths, Poole, South West England, England, BH10 4HX, UK",50.74223495,-1.89433739,edu,
+b04d06b737bc8e9543d5ac6a1afa33aaeb3619c0,A pr 2 01 4 Robust and Efficient Subspace Segmentation via Least Squares Regression,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+b04d06b737bc8e9543d5ac6a1afa33aaeb3619c0,A pr 2 01 4 Robust and Efficient Subspace Segmentation via Least Squares Regression,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+b04d06b737bc8e9543d5ac6a1afa33aaeb3619c0,A pr 2 01 4 Robust and Efficient Subspace Segmentation via Least Squares Regression,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+b04d06b737bc8e9543d5ac6a1afa33aaeb3619c0,A pr 2 01 4 Robust and Efficient Subspace Segmentation via Least Squares Regression,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+b06f2949eb748331c40a8b2381517fa09757ad17,Illumination Normalization in Face Recognition Using DCT and Supporting Vector Machine (SVM),National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+b06f2949eb748331c40a8b2381517fa09757ad17,Illumination Normalization in Face Recognition Using DCT and Supporting Vector Machine (SVM),National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+b013cce42dd769db754a57351d49b7410b8e82ad,Automatic point-based facial trait judgments evaluation,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+b013cce42dd769db754a57351d49b7410b8e82ad,Automatic point-based facial trait judgments evaluation,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b017963d83b3edf71e1673d7ffdec13a6d350a87,View Independent Face Detection Based on Combination of Local and Global Kernels,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+b01ed5c62abdc37c7318c155e12e366238bdc2f5,Multimodal Dual Attention Memory for Video Story Question Answering,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b0abf048d97a7beffc75fec1480d9bfe04a838a7,Learning Robust Representations for Computer Vision,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+b0abf048d97a7beffc75fec1480d9bfe04a838a7,Learning Robust Representations for Computer Vision,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+b06b960293b1c7744580e03539713c9fd83c0b63,Distributed Submodular Cover: Succinctly Summarizing Massive Data,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+b0724a7b4b63d58e249379b889656a899455e0c2,Easy Identification from Better Constraints : Multi-Shot Person Re-Identification from Reference Constraints,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+b084683e5bab9b2bc327788e7b9a8e049d5fff8f,Using LIP to Gloss Over Faces in Single-Stage Face Detection Networks,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+b099dfc5c823be79f9ca96168263c40d0020b92e,Co-Training for Demographic Classification Using Deep Learning from Label Proportions,Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.83619630,-87.62655913,edu,
+b099dfc5c823be79f9ca96168263c40d0020b92e,Co-Training for Demographic Classification Using Deep Learning from Label Proportions,Illinois Institute of Technology,Illinois Institute of Technology,"Illinois Institute of Technology, South State Street, Bronzeville, Chicago, Cook County, Illinois, 60616, USA",41.83619630,-87.62655913,edu,
+b03446a2de01126e6a06eb5d526df277fa36099f,A Torch Library for Action Recognition and Detection Using CNNs and LSTMs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+b018fa5cb9793e260b8844ae155bd06380988584,Project STAR IST - 2000 - 28764 Deliverable D 6 . 3 Enhanced face and arm / hand detector,Katholieke Universiteit Leuven,Katholieke Universiteit Leuven,"Laboratorium voor Bos, natuur en landschap, 102, Vital Decosterstraat, Sint-Maartensdal, Leuven, Vlaams-Brabant, Vlaanderen, 3000, België / Belgique / Belgien",50.88306860,4.70195030,edu,
+b05faf0ae510cbd7510a6242aafdda7de3088282,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+b05faf0ae510cbd7510a6242aafdda7de3088282,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+b05faf0ae510cbd7510a6242aafdda7de3088282,Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+b0a96954377390e80de59f0063e5703a21391eb3,Emotionally Representative Image Discovery for Social Events,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+b082f440ee91e2751701401919584203b37e1e1a,SeedNet : Automatic Seed Generation with Deep Reinforcement Learning for Robust Interactive Segmentation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+b051715249e47fa7e987e1a5504830af0521c38c,Sentribute: image sentiment analysis from a mid-level perspective,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+b051715249e47fa7e987e1a5504830af0521c38c,Sentribute: image sentiment analysis from a mid-level perspective,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+b051715249e47fa7e987e1a5504830af0521c38c,Sentribute: image sentiment analysis from a mid-level perspective,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+b051715249e47fa7e987e1a5504830af0521c38c,Sentribute: image sentiment analysis from a mid-level perspective,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+b0b4ce1962ad6732965aa7f4b3dd1bfd32f0ae5c,A Database of Morphed Facial Expressions of Emotions,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+b0d61c3e9851bb83cda8bc079e92d73a43e479bc,A Thin Shell Approach to the Registration of Implicit Surfaces,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+b0d61c3e9851bb83cda8bc079e92d73a43e479bc,A Thin Shell Approach to the Registration of Implicit Surfaces,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+a6f81619158d9caeaa0863738ab400b9ba2d77c2,Face Recognition using Convolutional Neural Network and Simple Logistic Classifier,K.N. Toosi University of Technology,K.N. Toosi University of Technology,"دانشکده مهندسی عمران و نقشه برداری, ولی عصر, کاووسیه, منطقه ۳ شهر تهران, تجریش, بخش رودبارقصران, شهرستان شمیرانات, استان تهران, 1968653111, ‏ایران‎",35.76427925,51.40970276,edu,
+a67da2dd79c01e8cc4029ecc5a05b97967403862,On Selecting Helpful Unlabeled Data for Improving Semi-Supervised Support Vector Machines,Myongji University,Myongji University,"명지대, 금학로, 역북동, 처인구, 용인시, 경기, 17144, 대한민국",37.23810230,127.19034310,edu,
+a6ac6463b5c89ac9eb013c978f213b309cc6a5c7,iSPA-Net: Iterative Semantic Pose Alignment Network,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+a6ac6463b5c89ac9eb013c978f213b309cc6a5c7,iSPA-Net: Iterative Semantic Pose Alignment Network,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+a6ac6463b5c89ac9eb013c978f213b309cc6a5c7,iSPA-Net: Iterative Semantic Pose Alignment Network,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+a6ac6463b5c89ac9eb013c978f213b309cc6a5c7,iSPA-Net: Iterative Semantic Pose Alignment Network,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,Gender Classification from Facial Images Using Texture Descriptors,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,Gender Classification from Facial Images Using Texture Descriptors,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+a6d7cf29f333ea3d2aeac67cde39a73898e270b7,Gender Classification from Facial Images Using Texture Descriptors,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+a6f4d114dae7664a5161a21fc2bdc76c86a2d69b,A 2D Human Body Model Dressed in Eigen Clothing,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+a611c978e05d7feab01fb8a37737996ad6e88bd9,Benchmarking 3D Pose Estimation for Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+a63104ad235f98bc5ee0b44fefbcdb49e32c205a,Has My Algorithm Succeeded? An Evaluator for Human Pose Estimators,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+a63104ad235f98bc5ee0b44fefbcdb49e32c205a,Has My Algorithm Succeeded? An Evaluator for Human Pose Estimators,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+a62bcfa204fca20acc7b90aaac01b55d315fc971,Automatically Learning the Objective Function for Model Fitting,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+a6582fda1ddd10c210e367119e01dfbec4a65b16,Implementing a Robust Explanatory Bias in a Person Re-identification Network,Naval Research Laboratory,Naval Research Laboratory,"Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.82313810,-77.01789020,mil,
+a6582fda1ddd10c210e367119e01dfbec4a65b16,Implementing a Robust Explanatory Bias in a Person Re-identification Network,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+a6582fda1ddd10c210e367119e01dfbec4a65b16,Implementing a Robust Explanatory Bias in a Person Re-identification Network,Naval Research Laboratory,Naval Research Laboratory,"Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.82313810,-77.01789020,mil,
+a6e8a8bb99e30a9e80dbf80c46495cf798066105,Ranking Generative Adversarial Networks: Subjective Control over Semantic Image Attributes,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+a663e729cb44cd02eda2d2a08d9117839dc67ca1,Deep Generative Models with Learnable Knowledge Constraints,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,Dynamic facial prosthetics for sufferers of facial paralysis.,Nottingham Trent University,Nottingham Trent University,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",52.95773220,-1.15617099,edu,
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,Dynamic facial prosthetics for sufferers of facial paralysis.,Nottingham University Hospital,Nottingham University Hospital,"Nottingham University Hospital, Central Route, Dunkirk, Wollaton, City of Nottingham, East Midlands, England, NG7 2UH, UK",52.94349670,-1.18631123,edu,
+a6ffe238eaf8632b4a8a6f718c8917e7f3261546,Dynamic facial prosthetics for sufferers of facial paralysis.,Nottingham Trent University,Nottingham Trent University,"Nottingham Trent University, Waverley Terrace, Lace Market, The Park, City of Nottingham, East Midlands, England, NG1 5JD, UK",52.95773220,-1.15617099,edu,
+a60540a8407fd117fd8e6857d4728e661f53dcc8,Deep Domain Generalization via Conditional Invariant Adversarial Networks,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+a60540a8407fd117fd8e6857d4728e661f53dcc8,Deep Domain Generalization via Conditional Invariant Adversarial Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a60540a8407fd117fd8e6857d4728e661f53dcc8,Deep Domain Generalization via Conditional Invariant Adversarial Networks,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+a60540a8407fd117fd8e6857d4728e661f53dcc8,Deep Domain Generalization via Conditional Invariant Adversarial Networks,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+a6e039f0b4f586c2014e42c36ea173e249636f28,Fusion of Depth and Vision Information for Human Detection ⋆,Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.20619390,121.41047101,edu,
+a6e039f0b4f586c2014e42c36ea173e249636f28,Fusion of Depth and Vision Information for Human Detection ⋆,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+a62cbd84251d325ea9e91642a9b37f3026cd3e20,Domain Transfer Through Deep Activation Matching,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+a62cbd84251d325ea9e91642a9b37f3026cd3e20,Domain Transfer Through Deep Activation Matching,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a660390654498dff2470667b64ea656668c98ecc,Facial expression recognition based on graph-preserving sparse non-negative matrix factorization,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+a660390654498dff2470667b64ea656668c98ecc,Facial expression recognition based on graph-preserving sparse non-negative matrix factorization,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+a60907b7ee346b567972074e3e03c82f64d7ea30,Head Motion Signatures from Egocentric Videos,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+a60907b7ee346b567972074e3e03c82f64d7ea30,Head Motion Signatures from Egocentric Videos,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+a6e43b73f9f87588783988333997a81b4487e2d5,Facial Age Estimation by Total Ordering Preserving Projection,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+a6496553fb9ab9ca5d69eb45af1bdf0b60ed86dc,Semi-supervised Neighborhood Preserving Discriminant Embedding: A Semi-supervised Subspace Learning Algorithm,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,One Shot Similarity Metric Learning for Action Recognition,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,One Shot Similarity Metric Learning for Action Recognition,Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.70927481,edu,
+a6b5ffb5b406abfda2509cae66cdcf56b4bb3837,One Shot Similarity Metric Learning for Action Recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a6bc69831dea3efc5804b8ab65cf5a06688ddae0,Crossing Generative Adversarial Networks for Cross-View Person Re-identification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+a6bc69831dea3efc5804b8ab65cf5a06688ddae0,Crossing Generative Adversarial Networks for Cross-View Person Re-identification,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+a68c07cb446f63fa6b48eda04c93392219c09700,Averted eye-gaze disrupts configural face encoding,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+a6e25cab2251a8ded43c44b28a87f4c62e3a548a,Let's Dance: Learning From Online Dance Videos,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+a6270914cf5f60627a1332bcc3f5951c9eea3be0,Joint Attention in Driver-Pedestrian Interaction: from Theory to Practice,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+a6c40d0fb4c0420d1d974f9fbfae83da514ebfbe,Individual and group tracking with the evaluation of social interactions,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+a6e43499f0884b4ec4d69460b798021b6e2ae73e,Spatial Bag of Features Learning for Large Scale Face Image Retrieval,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+a6b2c5c527557cc86ae2ce4332b18a7850ee4e1e,Exploring the Spatial Hierarchy of Mixture Models for Human Pose Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a6b2c5c527557cc86ae2ce4332b18a7850ee4e1e,Exploring the Spatial Hierarchy of Mixture Models for Human Pose Estimation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+a624f18087e663dbdbf176de45b863cc59bb2bb8,Aesthetic Evaluation of Facial Portraits Using Compositional Augmentation for Deep CNNs,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,Deep recurrent neural network reveals a hierarchy of process memory during dynamic natural vision.,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,Deep recurrent neural network reveals a hierarchy of process memory during dynamic natural vision.,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+a6bbd477851c5642a67817e43302d22bc4a95aaf,Density-Adaptive Kernel based Re-Ranking for Person Re-Identification,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+a6bd679c8a9346a39a003f536f36b7f77c0e09df,Enhanced Pavlovian Aversive Conditioning to Positive Emotional Stimuli,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+a6bd679c8a9346a39a003f536f36b7f77c0e09df,Enhanced Pavlovian Aversive Conditioning to Positive Emotional Stimuli,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+a6e21438695dbc3a184d33b6cf5064ddf655a9ba,PKU-MMD: A Large Scale Benchmark for Continuous Multi-Modal Human Action Understanding,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+b9bed097cb806ba48cd0245ab50d1a123022eafc,Living a discrete life in a continuous world: Reference in cross-modal entity tracking,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+b9bed097cb806ba48cd0245ab50d1a123022eafc,Living a discrete life in a continuous world: Reference in cross-modal entity tracking,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+b984c9815fd556cc845adae1f9a206d2a0fa2d33,Hierarchical Relational Networks for Group Activity Recognition and Retrieval,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+b959d5655a3b2f92c2c1a8a7896fecafafea979d,Ambientgan: Generative Models from Lossy Measurements,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+b959d5655a3b2f92c2c1a8a7896fecafafea979d,Ambientgan: Generative Models from Lossy Measurements,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+b959d5655a3b2f92c2c1a8a7896fecafafea979d,Ambientgan: Generative Models from Lossy Measurements,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+b9e6f9e22134d4b5ac66fbf2ec7b7b702c6f4eb7,MASON: A Model AgnoStic ObjectNess Framework,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+b9081856963ceb78dcb44ac410c6fca0533676a3,UntrimmedNets for Weakly Supervised Action Recognition and Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+b986a535e45751cef684a30631a74476e911a749,Improved Person Re-Identification Based on Saliency and Semantic Parsing with Deep Neural Network Models,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+b97a155bdd86491c8d32f02d6dfe5b73aaef4549,Eliminating the mere exposure effect through changes in context between exposure and test.,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+b97a155bdd86491c8d32f02d6dfe5b73aaef4549,Eliminating the mere exposure effect through changes in context between exposure and test.,University of Plymouth,University of Plymouth,"Charles Seale-Hayne Library, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37525010,-4.13927692,edu,
+b94ec9abc3009cfcd1e45647926b4e5084d95136,Classifying Unseen Instances by Learning Class-Independent Similarity Functions,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+b9d73e86a98e93d558366fc3dd002393677808a3,Adversarial Scene Editing: Automatic Object Removal from Weak Supervision,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b92f984f328851a5572e38ee816ebdcc515f2a0a,Deep Learning Based Surveillance System for Open Critical Areas,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+b907537c602b95948da809f7d4aff4bc959d8ba1,Superhuman Accuracy on the SNEMI3D Connectomics Challenge,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b907537c602b95948da809f7d4aff4bc959d8ba1,Superhuman Accuracy on the SNEMI3D Connectomics Challenge,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8,Supplementary Material for : Active Pictorial Structures,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+b9c9c7ef82f31614c4b9226e92ab45de4394c5f6,Face Recognition under Varying Illumination,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+b9054aadbbb91f74d373cc82d70b7c513e47139c,Visual Decoding of Targets During Visual Search From Human Eye Fixations,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b9054aadbbb91f74d373cc82d70b7c513e47139c,Visual Decoding of Targets During Visual Search From Human Eye Fixations,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b9054aadbbb91f74d373cc82d70b7c513e47139c,Visual Decoding of Targets During Visual Search From Human Eye Fixations,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+b9eb55c2c573e2fffd686b00a39185f0142ef816,The participation payoff: challenges and opportunities for multimedia access in networked communities,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+b9eb55c2c573e2fffd686b00a39185f0142ef816,The participation payoff: challenges and opportunities for multimedia access in networked communities,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+b9262301b11a4d41c8346626a86b603cd2e63992,A Survey of Major Techniques of Clothing in Vision ⋆,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a1af7ec84472afba0451b431dfdb59be323e35b7,LikeNet: A Siamese Motion Estimation Network Trained in an Unsupervised Way,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+a1f0188029436169002d75af8f23f7ebdad969dd,"Operational Automatic Remote Sensing Image Understanding Systems: Beyond Geographic Object-Based and Object-Oriented Image Analysis (GEOBIA/GEOOIA). Part 2: Novel system Architecture, Information/Knowledge Representation, Algorithm Design and Implementation",University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+a1dd806b8f4f418d01960e22fb950fe7a56c18f1,Interactively building a discriminative vocabulary of nameable attributes,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a1f9ef1236ddb57efc1ebbd87a1a69db9bc38c4b,Neuroanatomical and neurofunctional markers of social cognition in autism spectrum disorder.,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+a1d6c9a03075848014e9bd9baa6edda25e512963,A Fully Convolutional Tri-Branch Network (FCTN) for Domain Adaptation,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+a1c1970f7c728cc96aea798d65d38df7c9ea61dc,Eye Location Using Genetic Algorithm,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+a1fdf45e6649b0020eb533c70d6062b9183561ff,Where's YOUR focus: Personalized Attention,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+a1fdf45e6649b0020eb533c70d6062b9183561ff,Where's YOUR focus: Personalized Attention,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+a165619977bc69a910a771e1096551073122775b,Computational Crowd Camera : Enabling Remote - Vision via Sparse Collective Plenoptic Sampling,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a14db48785d41cd57d4eac75949a6b79fc684e70,Fast High Dimensional Vector Multiplication Face Recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a14db48785d41cd57d4eac75949a6b79fc684e70,Fast High Dimensional Vector Multiplication Face Recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a14db48785d41cd57d4eac75949a6b79fc684e70,Fast High Dimensional Vector Multiplication Face Recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+a14db48785d41cd57d4eac75949a6b79fc684e70,Fast High Dimensional Vector Multiplication Face Recognition,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+a1ee55d529e04a80f4eae3b30d0961a985a64fa4,Enabling low bitrate mobile visual recognition: a performance versus bandwidth evaluation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1,Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+a1b7bb2a4970b7c479aff3324cc7773c1daf3fc1,Longitudinal Study of Child Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+a155fcf9063a7a33368488123578180a0d1a5a78,An Autonomous Indoor Navigation System Based on Visual Scene Recognition Using Deep Neural Networks,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+a1b6aed0b998f0e6e049fcc209287c8b2801d054,An Expression Deformation Approach to Non-rigid 3D Face Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+a1a49e2c1a424ef2dc6a5cf787d5eadf8421aaa1,Human Body Segmentation based on Background Estimation in Modified HLS Color Space,Dankook University,Dankook University,"단국대학교 치과병원, 죽전로, 죽전동, 수지구, 용인시, 경기, 16900, 대한민국",37.32195750,127.12507230,edu,
+a14ed872503a2f03d2b59e049fd6b4d61ab4d6ca,Attentional Pooling for Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a1b89c488a723cc496cf931d97e2538ecf9b2991,Low Resolution Face Recognition in Surveillance Systems,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+a125bc55bdf4bec7484111eea9ae537be314ec62,Real-time Facial Expression Recognition in Image Sequences Using an AdaBoost-based Multi-classifier,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+a125bc55bdf4bec7484111eea9ae537be314ec62,Real-time Facial Expression Recognition in Image Sequences Using an AdaBoost-based Multi-classifier,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+a1497db913ea4031315e24a1027177ad0c4b680a,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+a1497db913ea4031315e24a1027177ad0c4b680a,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+a1497db913ea4031315e24a1027177ad0c4b680a,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+a1497db913ea4031315e24a1027177ad0c4b680a,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+a1dd88f44d045b360569a9a8721f728afbd951c3,Relief Impression Image Detection : Unsupervised Extracting Objects Directly From Feature Arrangements of Deep CNN,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+a1e1e7e976c22af9de26d9b74c2ece282e20218c,Looking at My Own Face: Visual Processing Strategies in Self–Other Face Recognition,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+a19904e76b5ded44e6aeb9af85997d160de6bb22,TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+a1bbe8b9eab55cdf58746fbf790eeaf626878615,Deep Poselets for Human Detection,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+a1be53dead395b2d83a4009bec76729fce95af83,Tree Identification from Images,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+a1653e88be986aee2f37792c3fb05f0ee7fbef94,Generative Semantic Manipulation with Mask-Contrasting GAN,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+a1653e88be986aee2f37792c3fb05f0ee7fbef94,Generative Semantic Manipulation with Mask-Contrasting GAN,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+a1ffab629c19b9c04fb047dae0471d3de73f3738,Leveraging Unlabeled Data for Crowd Counting by Learning to Rank,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+a1557a512ff254a27c11810d362609c237ff6e30,Predicting Images using Convolutional Networks : Visual Scene Understanding with Pixel Maps,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,Affective recommender systems: the role of emotions in recommender systems,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,Affective recommender systems: the role of emotions in recommender systems,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+a1ee0176a9c71863d812fe012b5c6b9c15f9aa8a,Affective recommender systems: the role of emotions in recommender systems,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+a1dd9038b1e1e59c9d564e252d3e14705872fdec,Attributes as Operators: Factorizing Unseen Attribute-Object Compositions,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+a16fb74ea66025d1f346045fda00bd287c20af0e,A Coupled Evolutionary Network for Age Estimation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+effa69fab7c4fdb30265a4bb404f869d327ae326,Recognizing Human Actions by Their Pose,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+ef3b777cbe362a5e97c5ef27eb3289ebfdb70b53,Improving Reinforcement Learning Based Image Captioning with Natural Language Prior,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+efd28eabebb9815e34031316624e7f095c7dfcfe,Combining Face with Face-Part Detectors under Gaussian Assumption,University of Salzburg,University of Salzburg,"Universität Salzburg - Unipark, 1, Erzabt-Klotz-Straße, Nonntal, Salzburg, 5020, Österreich",47.79475945,13.05417525,edu,
+eff87ecafed67cc6fc4f661cb077fed5440994bb,Evaluation of Expression Recognition Techniques,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+eff87ecafed67cc6fc4f661cb077fed5440994bb,Evaluation of Expression Recognition Techniques,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+efa2aacb0fbee857015fad1dba72767f56be6f39,Aggregating Crowdsourced Image Segmentations,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+efa2aacb0fbee857015fad1dba72767f56be6f39,Aggregating Crowdsourced Image Segmentations,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+efa2aacb0fbee857015fad1dba72767f56be6f39,Aggregating Crowdsourced Image Segmentations,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+efc7a620e21abbc882d5a26f0e7a78ae6960be20,Feed-back Method Based on Image Processing for Detecting Human Body Via,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+ef458499c3856a6e9cd4738b3e97bef010786adb,Learning Type-Aware Embeddings for Fashion Compatibility,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+ef2a5a26448636570986d5cda8376da83d96ef87,Recurrent Neural Networks and Transfer Learning for Action Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ef2a5a26448636570986d5cda8376da83d96ef87,Recurrent Neural Networks and Transfer Learning for Action Recognition,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+ef29e5515b9ae3af358e511a7faa8cdc69bd073b,Neural bases for impaired social cognition in schizophrenia and autism spectrum disorders.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ef2e36daf429899bb48d80ce6804731c3f99bb85,"Debnath, Banerjee, Namboodiri: Adapting Ransac-svm to Detect Outliers for Robust Classification",Indian Institute of Technology Kanpur,Indian Institute of Technology Kanpur,"Indian Institute of Technology Kanpur, 4th Avenue, Panki, Kanpur, Kanpur Nagar, Uttar Pradesh, 208016, India",26.51318800,80.23651945,edu,
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98,Fine-grained Activity Recognition with Holistic and Pose based Features,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+ef8de1bd92e9ee9d0d2dee73095d4d348dc54a98,Fine-grained Activity Recognition with Holistic and Pose based Features,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c391478faa3a8903678a7bbc4ab17c8f9601e273,Human Identification Based on Extracted Gait Features,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+c391478faa3a8903678a7bbc4ab17c8f9601e273,Human Identification Based on Extracted Gait Features,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+c37b5c43b58f2810bba78fcf2251d5b631428b48,Grounding affect recognition on a low-level description of body posture,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c37b5c43b58f2810bba78fcf2251d5b631428b48,Grounding affect recognition on a low-level description of body posture,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c39ef5554b9964f578572d403522380e95802650,Generative mixture of networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+c39ef5554b9964f578572d403522380e95802650,Generative mixture of networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+c39ef5554b9964f578572d403522380e95802650,Generative mixture of networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+c3fd3b9e0de036241d6e0f94fdc5364551e10b6b,The Amygdala Excitatory/Inhibitory Balance in a Valproate-Induced Rat Autism Model,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+c3fd3b9e0de036241d6e0f94fdc5364551e10b6b,The Amygdala Excitatory/Inhibitory Balance in a Valproate-Induced Rat Autism Model,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+c3f5cf5594e66dbbeb1af72ddfe7d5e24a4f56c0,Learning-Based Run-Time Power and Energy Management of Multi/Many-Core Systems: Current and Future Trends,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+c3f5cf5594e66dbbeb1af72ddfe7d5e24a4f56c0,Learning-Based Run-Time Power and Energy Management of Multi/Many-Core Systems: Current and Future Trends,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+c31fe741266d60177975754d23241879ade0279c,Self-supervised Learning of Geometrically Stable Features Through Probabilistic Introspection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+c397acf6a2876afe25bb07824f2d6030816cb009,Video-based Person Re-identification via 3D Convolutional Networks and Non-local Attention,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+c397acf6a2876afe25bb07824f2d6030816cb009,Video-based Person Re-identification via 3D Convolutional Networks and Non-local Attention,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+c322706351370b598612dc1e73b8bee78e0e8a5e,Face-specific and domain-general visual processing deficits in children with developmental prosopagnosia.,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+c322706351370b598612dc1e73b8bee78e0e8a5e,Face-specific and domain-general visual processing deficits in children with developmental prosopagnosia.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d,A 3D Morphable Eye Region Model for Gaze Estimation,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d,A 3D Morphable Eye Region Model for Gaze Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c34532fe6bfbd1e6df477c9ffdbb043b77e7804d,A 3D Morphable Eye Region Model for Gaze Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+c394a5dfe5bea5fbab4c2b6b90d2d03e01fb29c0,Person Reidentification and Recognition in Video,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+c34030a215f0731ead15b358d947f03c33e828bb,Identity Aware Synthesis for Cross Resolution Face Recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+c34787b4708b34742774ba3abba8ace39c6b9052,Input Image : Smile Intensity Generated Responses : Input Question : Input,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+c34787b4708b34742774ba3abba8ace39c6b9052,Input Image : Smile Intensity Generated Responses : Input Question : Input,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c37c3853ab428725f13906bb0ff4936ffe15d6af,Unsupervised Person Re-identification by Deep Learning Tracklet Association,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+c37c3853ab428725f13906bb0ff4936ffe15d6af,Unsupervised Person Re-identification by Deep Learning Tracklet Association,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+c30982d6d9bbe470a760c168002ed9d66e1718a2,Multi-camera head pose estimation using an ensemble of exemplars,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+c39ffc56a41d436748b9b57bdabd8248b2d28a32,Residual Attention Network for Image Classification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+c39ffc56a41d436748b9b57bdabd8248b2d28a32,Residual Attention Network for Image Classification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+c32cd207855e301e6d1d9ddd3633c949630c793a,On the Effect of Illumination and Face Recognition,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+c306c207ac7299872280b47c88f28db4811a319f,Adversarial Inverse Graphics Networks: Learning 2D-to-3D Lifting and Image-to-Image Translation from Unpaired Supervision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c3dd6c1ddbb9cfcc1bed6383ffaa0b1ce4d13625,TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c3e1ab13bb3c64ed129e286cec17465fc6bff0e1,Neural Networks for Efficient Bayesian Decoding of Natural Images from Retinal Neurons,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c32c8bfadda8f44d40c6cd9058a4016ab1c27499,Unconstrained Face Recognition From a Single Image,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c3cdf580a667a7b91191bbe149cd27b2054cbc43,R-VQA: Learning Visual Relation Facts with Semantic Attention for Visual Question Answering,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c3cdf580a667a7b91191bbe149cd27b2054cbc43,R-VQA: Learning Visual Relation Facts with Semantic Attention for Visual Question Answering,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+c3cdf580a667a7b91191bbe149cd27b2054cbc43,R-VQA: Learning Visual Relation Facts with Semantic Attention for Visual Question Answering,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+c4f4122d16e1fdb77cb94152d0d1222b69ddc32b,Face Image Superresolution via Locality Preserving Projection and Sparse Coding,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+c42adbb77919328fad1fdbcc1ae7cdf12c118134,Privacy-Preserving Human Activity Recognition from Extreme Low Resolution,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+c42adbb77919328fad1fdbcc1ae7cdf12c118134,Privacy-Preserving Human Activity Recognition from Extreme Low Resolution,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+c418a3441f992fea523926f837f4bfb742548c16,A Computer Approach for Face Aging Problems,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+c4fb2de4a5dc28710d9880aece321acf68338fde,Interactive Generative Adversarial Networks for Facial Expression Generation in Dyadic Interactions,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+c44177896137e5010a2b336b943c23df1f3f92d3,Deep Models Under the GAN: Information Leakage from Collaborative Deep Learning,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+c44177896137e5010a2b336b943c23df1f3f92d3,Deep Models Under the GAN: Information Leakage from Collaborative Deep Learning,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+c44177896137e5010a2b336b943c23df1f3f92d3,Deep Models Under the GAN: Information Leakage from Collaborative Deep Learning,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+c42b28d722dcf2b276fe41da1a811e6bf9e68010,Pose Normalization for Eye Gaze Estimation and Facial Attribute Description from Still Images,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+c44c84540db1c38ace232ef34b03bda1c81ba039,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+c44c84540db1c38ace232ef34b03bda1c81ba039,Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+c4458cc521d8da6faeedc8c4f09505dace844a05,Automatic Detection of Emotion Valence on Faces Using Consumer Depth Cameras,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+c4458cc521d8da6faeedc8c4f09505dace844a05,Automatic Detection of Emotion Valence on Faces Using Consumer Depth Cameras,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+c4458cc521d8da6faeedc8c4f09505dace844a05,Automatic Detection of Emotion Valence on Faces Using Consumer Depth Cameras,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+c43d8a3d36973e3b830684e80a035bbb6856bcf7,Image Super-Resolution Using Very Deep Residual Channel Attention Networks,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+c43d8a3d36973e3b830684e80a035bbb6856bcf7,Image Super-Resolution Using Very Deep Residual Channel Attention Networks,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+c46a4db7247d26aceafed3e4f38ce52d54361817,A CNN Cascade for Landmark Guided Semantic Part Segmentation,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+c41de506423e301ef2a10ea6f984e9e19ba091b4,Modeling Attributes from Category-Attribute Proportions,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+c41de506423e301ef2a10ea6f984e9e19ba091b4,Modeling Attributes from Category-Attribute Proportions,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+c45681fa9d9c36a6a196017ef283ac38904f91bb,Pixel-wise object tracking,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+c437d0485217685f9ea42c33e492090b58de1db6,Mining Histopathological Images via Composite Hashing and Online Learning,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+c437d0485217685f9ea42c33e492090b58de1db6,Mining Histopathological Images via Composite Hashing and Online Learning,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+c4934d9f9c41dbc46f4173aad2775432fe02e0e6,Generalization to New Compositions of Known Entities in Image Understanding,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+c4934d9f9c41dbc46f4173aad2775432fe02e0e6,Generalization to New Compositions of Known Entities in Image Understanding,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+c40c23e4afc81c8b119ea361e5582aa3adecb157,Coupled Marginal Fisher Analysis for Low-Resolution Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c4be56287fd666f9cfff257018a42e00dc56499d,The role of the fusiform-amygdala system in the pathophysiology of autism.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+c49aed65fcf9ded15c44f9cbb4b161f851c6fa88,Multiscale Facial Expression Recognition Using Convolutional Neural Networks,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+c458db5d616058fbd9de19acc6c82827396cf195,Person Re-Identification with Discriminatively Trained Viewpoint Invariant Dictionaries,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+c40174aeb1be3998a2f8faae28d6689611bb7aad,"Learning a dense multi-view representation for detection, viewpoint classification and synthesis of object categories",Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+c40174aeb1be3998a2f8faae28d6689611bb7aad,"Learning a dense multi-view representation for detection, viewpoint classification and synthesis of object categories",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c40174aeb1be3998a2f8faae28d6689611bb7aad,"Learning a dense multi-view representation for detection, viewpoint classification and synthesis of object categories",Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+c43ed9b34cad1a3976bac7979808eb038d88af84,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+c43ed9b34cad1a3976bac7979808eb038d88af84,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+c4ac98154efdd73fd3ec9954dfb5ed32b95f7ca5,Face Recognition Based on Improved Space Variant,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+c48ee576130473efe6dc3ee47f552bc581aa68b2,Computational Intelligence Challenges and Applications on Large-Scale Astronomical Time Series Databases,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+c48ee576130473efe6dc3ee47f552bc581aa68b2,Computational Intelligence Challenges and Applications on Large-Scale Astronomical Time Series Databases,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+c466ad258d6262c8ce7796681f564fec9c2b143d,Pose-Invariant Face Recognition Using A Single 3D Reference Model,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+ea3b13e512c846e2bb29d99f5f97fcf8c7f52836,Adding the Third Dimension to Spatial Relation Detection in 2D Images,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+ea00489323104d70dd43bac5e15390ec4d6dfe8f,Transfer for Person Re-identification,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+ea00489323104d70dd43bac5e15390ec4d6dfe8f,Transfer for Person Re-identification,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+ea00489323104d70dd43bac5e15390ec4d6dfe8f,Transfer for Person Re-identification,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+ea00489323104d70dd43bac5e15390ec4d6dfe8f,Transfer for Person Re-identification,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc,Using phase instead of optical flow for action recognition,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+ea46951b070f37ad95ea4ed08c7c2a71be2daedc,Using phase instead of optical flow for action recognition,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+ea985e35b36f05156f82ac2025ad3fe8037be0cd,CERTH/CEA LIST at MediaEval Placing Task 2015,Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.59345390,130.35578370,edu,
+ea3f9321d4609ac3a659b66aae204f0b0e2a8ba1,Distractor-Aware Siamese Networks for Visual Object Tracking,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+ea3f9321d4609ac3a659b66aae204f0b0e2a8ba1,Distractor-Aware Siamese Networks for Visual Object Tracking,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ea3f9321d4609ac3a659b66aae204f0b0e2a8ba1,Distractor-Aware Siamese Networks for Visual Object Tracking,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+ea80a050d20c0e24e0625a92e5c03e5c8db3e786,Face Verification and Face Image Synthesis under Illumination Changes using Neural Networks by,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+eacba5e8fbafb1302866c0860fc260a2bdfff232,VOS-GAN: Adversarial Learning of Visual-Temporal Dynamics for Unsupervised Dense Prediction in Videos,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+ea8707f8d527018c063a688bbd5a88f74506b288,Be Your Own Prada: Fashion Synthesis with Structural Coherence,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ea8707f8d527018c063a688bbd5a88f74506b288,Be Your Own Prada: Fashion Synthesis with Structural Coherence,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+ea050801199f98a1c7c1df6769f23f658299a3ae,The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+ea32e570ddf5661cdb030132e15e68e30ba6b24a,People Re-identification Based on Bags of Semantic Features,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+ea4a93a97bf0bff059034c707fa75a2ca13d8048,Extracting Minimalistic Corridor Geometry from Low-Resolution Images,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+ead1db02b36146ef5c3ef29a1cc411a8f01bc56b,A Overview About Image Segmentation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+eadbad2a715bd1b0822ec3790c65765c61924549,Scalable Deep $k$-Subspace Clustering,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+ea923da826b9e6f89159cc960db7aac91b5ecbd6,Approved by Major Professor(s): Approved by Head of Graduate Program: Date of Graduate Program Head's Approval: Abhilasha Bhargav-Spantzel Protocols and Systems for Privacy Preserving Protection of Digital Identity Doctor of Philosophy,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+ea4098d86802dff863fe9f91cbc75b195d452d34,"Tensorize , Factorize and Regularize : Robust Visual Relationship Learning",University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+ea4098d86802dff863fe9f91cbc75b195d452d34,"Tensorize , Factorize and Regularize : Robust Visual Relationship Learning",University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+ea7fbfd02bf17b310e1e7f4be12d106b4990c33d,Image Generation and Editing with Variational Info Generative AdversarialNetworks,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+ea6207e553a5c8a3e171a8f6b6297688ab43f92d,DeepObfuscation: Securing the Structure of Convolutional Neural Networks via Knowledge Distillation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+ea6207e553a5c8a3e171a8f6b6297688ab43f92d,DeepObfuscation: Securing the Structure of Convolutional Neural Networks via Knowledge Distillation,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+ea890846912f16a0f3a860fce289596a7dac575f,Benefits of social vs. non-social feedback on learning and generosity. Results from the Tipping Game,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+ea890846912f16a0f3a860fce289596a7dac575f,Benefits of social vs. non-social feedback on learning and generosity. Results from the Tipping Game,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+eaaed082762337e7c3f8a1b1dfea9c0d3ca281bf,Algebraic Simplification of Genetic Programs during Evolution,Victoria University of Wellington,Victoria University of Wellington,"Victoria University of Wellington, Waiteata Road, Aro Valley, Wellington, Wellington City, Wellington, 6040, New Zealand/Aotearoa",-41.29052775,174.76846919,edu,
+ea218cebea2228b360680cb85ca133e8c2972e56,Recover Canonical-View Faces in the Wild with Deep Neural Networks,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5,"Look, Listen and Learn - A Multimodal LSTM for Speaker Identification",SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+e1630014a5ae3d2fb7ff6618f1470a567f4d90f5,"Look, Listen and Learn - A Multimodal LSTM for Speaker Identification",University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+e1e2e32f29cf7d23881e98dfe018d9049bdb070d,Image Understanding using Vision and Reasoning through Scene Description Graph,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+e1e2e32f29cf7d23881e98dfe018d9049bdb070d,Image Understanding using Vision and Reasoning through Scene Description Graph,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+e19fb22b35c352f57f520f593d748096b41a4a7b,"Modeling Context for Image Understanding : When , For What , and How ?",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e1d1ed79174cd8442409bcb3f296101852ddcb95,Theory and Practice of Globally Optimal Deformation Estimation,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+e1d1ed79174cd8442409bcb3f296101852ddcb95,Theory and Practice of Globally Optimal Deformation Estimation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e1d1ed79174cd8442409bcb3f296101852ddcb95,Theory and Practice of Globally Optimal Deformation Estimation,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+e1d1ed79174cd8442409bcb3f296101852ddcb95,Theory and Practice of Globally Optimal Deformation Estimation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+e1e6a4146c082e5465cde38e9511de3d150b4ede,Targeting static and dynamic workloads with a reconfigurable VLIW processor,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+e1c59e00458b4dee3f0e683ed265735f33187f77,Spectral Rotation versus K-Means in Spectral Clustering,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+e1e490b5d0a179b8eea022b64e83bbd611114d4e,Gradient-based learning of higher-order image features,University of Frankfurt,University of Frankfurt,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland",50.13053055,8.69234224,edu,
+e121bf6f18e1cb114216a521df63c55030d10fbe,Robust Facial Component Detection for Face Alignment Applications,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+e103fa24d7fa297cd206b22b3bf670bfda6c65c4,Object Detection in Very High-Resolution Aerial Images Using One-Stage Densely Connected Feature Pyramid Network,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+e103fa24d7fa297cd206b22b3bf670bfda6c65c4,Object Detection in Very High-Resolution Aerial Images Using One-Stage Densely Connected Feature Pyramid Network,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+e11247abf2c359428d414a97ea21e0744e2ef9ac,Face Recognition from Sequential Sparse 3D data via Deep Registration,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+e188aa199d4307fdbbf60e9e6612bcb001e1cab6,Say Cheese: Personal Photography Layout Recommendation Using 3D Aesthetics Estimation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,Pre-Training CNNs Using Convolutional Autoencoders,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,Pre-Training CNNs Using Convolutional Autoencoders,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,Pre-Training CNNs Using Convolutional Autoencoders,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+e1ab3b9dee2da20078464f4ad8deb523b5b1792e,Pre-Training CNNs Using Convolutional Autoencoders,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+e1f6e2651b7294951b5eab5d2322336af1f676dc,Emotional Avatars: Appearance Augmentation and Animation based on Facial Expression Analysis,Sejong University,"Intelligent Media Laboratory, Digital Contents Research Institute, Sejong University, Seoul, South Korea","209 Neungdong-ro, Gunja-dong, Gwangjin-gu, Seoul, South Korea",37.55025960,127.07313900,edu,
+e1af55ad7bb26e5e1acde3ec6c5c43cffe884b04,Person Re-identification by Mid-level Attribute and Part-based Identity Learning,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+e10ca043fae02972f19292efacddd8e0f216b70c,Zero-Shot Object Detection: Learning to Simultaneously Recognize and Localize Novel Concepts,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+e1256ff535bf4c024dd62faeb2418d48674ddfa2,Towards Open-Set Identity Preserving Face Synthesis,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+e1256ff535bf4c024dd62faeb2418d48674ddfa2,Towards Open-Set Identity Preserving Face Synthesis,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+e1bf18d2933e5f24d598fcaa5318c45cea373c39,Large-Scale Machine Learning for Classification and Search,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,Deep Face Recognition: A Survey,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+cd32d7383b1e987329d2412f2907b7db6dd8d396,Explaining the Unexplained: A CLass-Enhanced Attentive Response (CLEAR) Approach to Understanding Deep Neural Networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+cd32d7383b1e987329d2412f2907b7db6dd8d396,Explaining the Unexplained: A CLass-Enhanced Attentive Response (CLEAR) Approach to Understanding Deep Neural Networks,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+cdc7bd87a2c9983dab728dbc8aac74d8c9ed7e66,l 1 l 2 l 3 l 4 l 5 ( a ) Class-Agnostic Temporal,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cd1349619415202e82475353e2b2a60da2e5bd65,Optimal Illumination for Three-Image Photometric Stereo using Sensitivity Analysis,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+cd4941cbef1e27d7afdc41b48c1aff5338aacf06,MovieGraphs: Towards Understanding Human-Centric Situations from Videos,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+cdef0eaff4a3c168290d238999fc066ebc3a93e8,Contrastive-center loss for deep neural networks,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+cd23dc3227ee2a3ab0f4de1817d03ca771267aeb,Face Recognition via Deep Sparse Graph Neural Networks,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+cd323dc4b67965a4f16b5b0a55fcc1ff0396b375,Action 2 Vec : A Crossmodal Embedding Approach to Action Learning,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+cdb1d32bc5c1a9bb0d9a5b9c9222401eab3e9ca0,Functional Faces: Groupwise Dense Correspondence Using Functional Maps,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae,Object Detection with Mask-based Feature Encoding,University of South Carolina,University of South Carolina,"University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA",33.99282980,-81.02685168,edu,
+cd9d654c6a4250e0cf8bcfddc2afab9e70ee6cae,Object Detection with Mask-based Feature Encoding,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+cdc535719aa041b7bbd529eab4582619a04b706e,Multi-channel Convolutional Neural Network Ensemble for Pedestrian Detection,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+cd0503a31a9f9040736ccfb24086dc934508cfc7,Maximizing Resource Utilization In Video Streaming Systems,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+cd0503a31a9f9040736ccfb24086dc934508cfc7,Maximizing Resource Utilization In Video Streaming Systems,Wayne State University,Wayne State University,"Parking Structure 3, East Warren Avenue, New Center, Detroit, Wayne County, Michigan, 48236, USA",42.35775700,-83.06286711,edu,
+cd2605b31feb84fb53a5a56b21499f4ebff20385,DEEPEYE: A Compact and Accurate Video Comprehension at Terminal Devices Compressed with Quantization and Tensorization,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+cd2605b31feb84fb53a5a56b21499f4ebff20385,DEEPEYE: A Compact and Accurate Video Comprehension at Terminal Devices Compressed with Quantization and Tensorization,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+cd2605b31feb84fb53a5a56b21499f4ebff20385,DEEPEYE: A Compact and Accurate Video Comprehension at Terminal Devices Compressed with Quantization and Tensorization,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+cda6c8904c324e4eb32e83cada17cd1a7d47a348,Unsupervised Multi-Domain Image Translation with Domain-Specific Encoders/Decoders,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+cd22375bf1d917b928aee006b65cd92c7bfe0927,FMCode: A 3D In-the-Air Finger Motion Based User Login Framework for Gesture Interface,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+cd22375bf1d917b928aee006b65cd92c7bfe0927,FMCode: A 3D In-the-Air Finger Motion Based User Login Framework for Gesture Interface,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd4850de71e4e858be5f5e6ef7f48d5bf7decea6,Distribution Entropy Boosted VLAD for Image Retrieval,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+cd6cab9357f333ad9966abc76f830c190a1b7911,"Recognition, reorganisation, reconstruction and reinteraction for scene understanding",Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+cd87fea30b68ad1c9ebcb71a224c53cde3516adb,EXTRACTING THE X FACTOR IN HUMAN PARSING 3 Factored module Factored task Aggregation module Input Main task Shared features Silhouette Body parts The X Factor bottleneck layers bottleneck layers bottleneck layers Initial module bottleneck layers initial block,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+cd2221518485f829d3fad81e33ef4033ffa66f75,Multiple Images Recovery Using a Single Affine Transformation,Anhui University,Anhui University,"安徽大学(磬苑校区), 111, 九龙路, 弘泰苑, 合肥国家级经济技术开发区, 芙蓉社区, 合肥经济技术开发区, 合肥市区, 合肥市, 安徽省, 230601, 中国",31.76909325,117.17795091,edu,
+cd2221518485f829d3fad81e33ef4033ffa66f75,Multiple Images Recovery Using a Single Affine Transformation,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+cd44668fd6b7e8d2606f8c634a5b571d172693ff,Convolutional Neural Networks for Iris Presentation Attack Detection : Toward Cross-Dataset and Cross-Sensor Generalization,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+cdb293381ff396d6e9c0f5e9578d411e759347fd,3 DR 2 N 2 : A Unified Approach for Single and Multiview 3 D Object Reconstruction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cd4cce724c8a33f72b068a267cd6152c31851013,Scanning Strategies Do Not Modulate Face Identification: Eye-Tracking and Near-Infrared Spectroscopy Study,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+cd000f4a7a64db5e00b200b93cc3f13c9e313c01,Attributes as Operators,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+cd436f05fb4aeeda5d1085f2fe0384526571a46e,Information Bottleneck Domain Adaptation with Privileged Information for Visual Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+cd468236213273b96d985dcc859f24c0a19e3077,Hopc : a Novel Similarity Metric Based on Geometric Structural Properties for Multi-modal Remote Sensing Image Matching,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+cd468236213273b96d985dcc859f24c0a19e3077,Hopc : a Novel Similarity Metric Based on Geometric Structural Properties for Multi-modal Remote Sensing Image Matching,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+cd010fc089c580c87c5cff4aa6a9b1d6d41e2470,"Digital Images Authentication Technique Based on DWT, DCT and Local Binary Patterns",University of Kent,University of Kent,"University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK",51.29753440,1.07296165,edu,
+cd1758d3b86c4f1caf01ec222b45daf15888d1a8,MMD GAN: Towards Deeper Understanding of Moment Matching Network,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+cd1758d3b86c4f1caf01ec222b45daf15888d1a8,MMD GAN: Towards Deeper Understanding of Moment Matching Network,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+cd2c54705c455a4379f45eefdf32d8d10087e521,A Hybrid Model for Identity Obfuscation by Face Replacement,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+cd9f1f429b41c4c125df231bab8872e012ff5316,STEM inSight: Developing a Research Skills Course for First- and Second-Year Students,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+cd9f1f429b41c4c125df231bab8872e012ff5316,STEM inSight: Developing a Research Skills Course for First- and Second-Year Students,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+cdf2510d1fa51e911ef8f2618d41707b0c037d3f,Face Identi cation in Multimedia Archives,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+cde8186c38c04dacac2e1fac1c3c68cf46516b9f,Hierarchical Network for Facial Palsy Detection,National Taiwan University of Science and Technology,National Taiwan University of Science and Technology,"臺科大, 43, 基隆路四段, 學府里, 下內埔, 大安區, 臺北市, 10607, 臺灣",25.01353105,121.54173736,edu,
+cd023d2d067365c83d8e27431e83e7e66082f718,Real-Time Rotation-Invariant Face Detection with Progressive Calibration Networks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+cd023d2d067365c83d8e27431e83e7e66082f718,Real-Time Rotation-Invariant Face Detection with Progressive Calibration Networks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+cdde9cfb726e177b781dffbbb41d15cf58d7f888,A New SIFT-Based Image Descriptor Applicable for Content Based Image Retrieval,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+cca9ae621e8228cfa787ec7954bb375536160e0d,Learning to Collaborate for User-Controlled Privacy,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+cca9ae621e8228cfa787ec7954bb375536160e0d,Learning to Collaborate for User-Controlled Privacy,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+ccc57280e2c50381a692a67ed53124ad1b735686,Self-Reflective Risk-Aware Artificial Cognitive Modeling for Robot Response to Human Behaviors,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+ccf413e4a730ee228769c82a8af1fddc2857fbe8,Deep Learning Based Multi-modal Addressee Recognition in Visual Scenes with Utterances,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+cc589c499dcf323fe4a143bbef0074c3e31f9b60,A 3D facial expression database for facial behavior research,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+cc4fc9a309f300e711e09712701b1509045a8e04,Continuous Supervised Descent Method for Facial Landmark Localisation,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+cc4fc9a309f300e711e09712701b1509045a8e04,Continuous Supervised Descent Method for Facial Landmark Localisation,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+cc4fc9a309f300e711e09712701b1509045a8e04,Continuous Supervised Descent Method for Facial Landmark Localisation,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+cc2eaa182f33defbb33d69e9547630aab7ed9c9c,Surpassing Humans and Computers with JELLYBEAN: Crowd-Vision-Hybrid Counting Algorithms,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+cc3ef62b4a7eb6c4e45302deb89df2e547b6efcc,Creating Picture Legends for Group Photos,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+cc353489ceaba1f58bd44f54316bc8319eba5fb9,Program Synthesis from Visual Specification,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+cc5a1cf7ad9d644f21a5df799ffbcb8d1e24abe1,MonoPerfCap: Human Performance Capture from Monocular Video,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+ccdea57234d38c7831f1e9231efcb6352c801c55,Illumination Processing in Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c,Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c,Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+ccb9ffa26b28dffc4f7d613821d1a9f0d60ea3f4,Online Adaptation of Convolutional Neural Networks for Video Object Segmentation,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+cc2cf8f69cd2d16c9bee2bb6c598548e7ff7cb05,Unsupervised Learning of Invariant Representations in Visual Cortex ( and in Deep Learning Architectures ),McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+cc2cf8f69cd2d16c9bee2bb6c598548e7ff7cb05,Unsupervised Learning of Invariant Representations in Visual Cortex ( and in Deep Learning Architectures ),MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+ccd3dcbccae7d903608530bddf6381db8e723a7d,Unsupervised Domain Adaptation for Semantic Segmentation with GANs,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ccd3dcbccae7d903608530bddf6381db8e723a7d,Unsupervised Domain Adaptation for Semantic Segmentation with GANs,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+cc38942825d3a2c9ee8583c153d2c56c607e61a7,Database Cross Matching: A Novel Source of Fictitious Forensic Cases,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+cc94b423c298003f0f164e63e63177d443291a77,Multi-View Semantic Labeling of 3D Point Clouds for Automated Plant Phenotyping,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+ccebecf0e24f76262d85f55712010632ea04c0af,Stepwise Nearest Neighbor Discriminant Analysis,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+ccf119021cf246fd75d37863646ccb85accee6a8,Unsupervised Learning and Segmentation of Complex Activities from Video,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+ccf934a335793fe416b0115183783d2c355b64ed,Query Based Adaptive Re-ranking for Person Re-identification,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+cc70f4af018de5e5bdc8075dbdf1bbe49a6f0b4a,Generative adversarial network-based synthesis of visible faces from polarimetrie thermal faces,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+ccb0353fd1aa19b50fca8d69f9b9c9f1752dd55b,Towards Better Understanding the Clothing Fashion Styles: A Multimodal Deep Learning Approach,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ccb0353fd1aa19b50fca8d69f9b9c9f1752dd55b,Towards Better Understanding the Clothing Fashion Styles: A Multimodal Deep Learning Approach,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ccb0353fd1aa19b50fca8d69f9b9c9f1752dd55b,Towards Better Understanding the Clothing Fashion Styles: A Multimodal Deep Learning Approach,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+ccdf24d85fc14b4710dcee268355548f166ba870,Recognition in-the-Tail: Training Detectors for Unusual Pedestrians with Synthetic Imposters,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ccdf24d85fc14b4710dcee268355548f166ba870,Recognition in-the-Tail: Training Detectors for Unusual Pedestrians with Synthetic Imposters,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+ccdf24d85fc14b4710dcee268355548f166ba870,Recognition in-the-Tail: Training Detectors for Unusual Pedestrians with Synthetic Imposters,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ccc65463198ee0a0db9b303a3dc903c762dbccaa,Adaptive Selection of Deep Learning Models on Embedded Systems,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+ccc65463198ee0a0db9b303a3dc903c762dbccaa,Adaptive Selection of Deep Learning Models on Embedded Systems,Lancaster University,Lancaster University,"Lancaster University, Library Avenue, Bowland College, Hala, Lancaster, Lancs, North West England, England, LA1 4AP, UK",54.00975365,-2.78757491,edu,
+cceab479d37060b0952439d9bd6fbbba5de1d550,VizWiz Grand Challenge: Answering Visual Questions from Blind People,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+cc7e66f2ba9ac0c639c80c65534ce6031997acd7,Facial Descriptors for Identity-Preserving Multiple People Tracking,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+cc9057d2762e077c53e381f90884595677eceafa,On the Exploration of Joint Attribute Learning for Person Re-identification,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+ccf16bcf458e4d7a37643b8364594656287f5bfc,Cascade for Landmark Guided Semantic Part Segmentation,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+e64fa00da02cc774559db5be88bc2862afbfd432,Histogram of Oriented Normal Vectors for Object Recognition with a Depth Sensor,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+e686e9a642880662e56558b13d3d32f051d549b3,Human face orientation detection using power spectrum based measurements,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+e6ac47a768188971d0b478182db9026221a0807d,Adaptation and Re-Identification Network: An Unsupervised Deep Transfer Learning Approach to Person Re-Identification,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+e6ac47a768188971d0b478182db9026221a0807d,Adaptation and Re-Identification Network: An Unsupervised Deep Transfer Learning Approach to Person Re-Identification,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef,Addressing the illumination challenge in two-dimensional face recognition: a survey,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+e6d9d3a2f1560e507a24b8cfe3d2f4369c79e0f6,Impact of eye detection error on face recognition performance,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+e6d9d3a2f1560e507a24b8cfe3d2f4369c79e0f6,Impact of eye detection error on face recognition performance,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+e659221538d256b2c3e0724deff749eda903fc7d,Fine-Grained Head Pose Estimation Without Keypoints,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+e6d4c0ac2352f108a078a4fd3f908a03b8571f2b,Racial Bias in Judgments of Physical Size and Formidability,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+e607b91f69ea2bff3194b07c5d22b4625bbe306e,Learning to See People Like People: Predicting Social Impressions of Faces,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+e64e5449d0d10cfcf63edc8a0b28fc96d09d3535,GLAC Net: GLocal Attention Cascading Networks for Multi-image Cued Story Generation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+e67e757a3d94b71b94e16c5a6a90d77bf61e9aab,Limited-Memory Fast Gradient Descent Method for Graph Regularized Nonnegative Matrix Factorization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+e6f20e7431172c68f7fce0d4595100445a06c117,Searching Action Proposals via Spatial Actionness Estimation and Temporal Path Inference and Tracking,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+e61a7a02cd2b68043012231f8da1d7077e665040,Utilization-aware load balancing for the energy efficient operation of the big.LITTLE processor,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+e608ccf3ac353cf7204ccf5659983d69bd09f515,Cross-Domain Image Retrieval with Attention Modeling,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+e65da9b728493c4619beca5728f622f6e91c9dd7,Histogram of Structure Tensors: Application to Pattern Clustering,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+e6eda2bfec3036cf431a45fa021070ab21bb3488,Mirror Representation for Modeling View-Specific Transform in Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+e6020915b9530fa585453f60a8934aed30558be4,Improving Neural Question Generation using Answer Separation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+e6020915b9530fa585453f60a8934aed30558be4,Improving Neural Question Generation using Answer Separation,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+f9c563ec6c8238aaf420327bd7f9d8fbf8de3bce,An Evaluation on Color Invariant Based Local Spatiotemporal Features for Action Recognition,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+f97342323ec16d67fcdd8969e5312e43d4a6edf8,Physical Models of Human Motion for Estimation and Scene Analysis,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+f9a1132c777b24e9361b1bcbccb9fcfc737f3194,VIPL-HR: A Multi-modal Database for Pulse Estimation from Less-constrained Face Video,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f9a1132c777b24e9361b1bcbccb9fcfc737f3194,VIPL-HR: A Multi-modal Database for Pulse Estimation from Less-constrained Face Video,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+f9717a0056ad863c5f9dc00916ab87bdf1cdf5f7,Pose Flow: Efficient Online Pose Tracking,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+f9d171019bfeb71733fe36f7fae14f342ca9e51c,Hough Forests Revisited: An Approach to Multiple Instance Tracking from Multiple Cameras,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+f942739b7f9bc3c0b84f760bb2fd4895e1363ec0,Students Wearing Police Uniforms Exhibit Biased Attention toward Individuals Wearing Hoodies,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+f942739b7f9bc3c0b84f760bb2fd4895e1363ec0,Students Wearing Police Uniforms Exhibit Biased Attention toward Individuals Wearing Hoodies,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+f942739b7f9bc3c0b84f760bb2fd4895e1363ec0,Students Wearing Police Uniforms Exhibit Biased Attention toward Individuals Wearing Hoodies,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+f9b8539d48d6350435ab5550fd47451e779d2466,Accelerating image recognition on mobile devices using GPGPU,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+f9e92f5768998fbb876fb41facb1bba17b10c7af,ConceptFusion: A Flexible Scene Classification Framework,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+f9e92f5768998fbb876fb41facb1bba17b10c7af,ConceptFusion: A Flexible Scene Classification Framework,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+f9e92f5768998fbb876fb41facb1bba17b10c7af,ConceptFusion: A Flexible Scene Classification Framework,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+f984a5ad2d379b4e4b51005a73cdbd978ce3d810,ExplainGAN: Model Explanation via Decision Boundary Crossing Transformations,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f9f01af981f8d25f0c96ea06d88be62dabb79256,Terahertz Image Detection with the Improved Faster Region-Based Convolutional Neural Network,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+f90b97008d921004487d1232ad20dcd9d678435f,Toward Marker-Free 3D Pose Estimation in Lifting: A Deep Multi-View Solution,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+f90b97008d921004487d1232ad20dcd9d678435f,Toward Marker-Free 3D Pose Estimation in Lifting: A Deep Multi-View Solution,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+f90b97008d921004487d1232ad20dcd9d678435f,Toward Marker-Free 3D Pose Estimation in Lifting: A Deep Multi-View Solution,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+f9034d80a0c318a8c564ce3aa9d8545d871b9663,Facial Expression Recognition with Inconsistently Annotated Datasets,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f9034d80a0c318a8c564ce3aa9d8545d871b9663,Facial Expression Recognition with Inconsistently Annotated Datasets,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+f91388f87e10674226f4def4cda411adc01da496,Failure to Affect Decision Criteria During Recognition Memory With Continuous Theta Burst Stimulation,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+f95d53ff893305741d60e234772003ec8579828b,A 3D Morphable Model of the Eye Region,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+f95d53ff893305741d60e234772003ec8579828b,A 3D Morphable Model of the Eye Region,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f95d53ff893305741d60e234772003ec8579828b,A 3D Morphable Model of the Eye Region,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+f963967e52a5fd97fa3ebd679fd098c3cb70340e,"Analysis, Interpretation, and Recognition of Facial Action Units and Expressions Using Neuro-Fuzzy Modeling",Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+f9e0209dc9e72d64b290d0622c1c1662aa2cc771,Contributions to Biometric Recognition: Matching Identical Twins and Latent Fingerprints,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+f92ade569cbe54344ffd3bb25efd366dcd8ad659,Effect of Super Resolution on High Dimensional Features for Unsupervised Face Recognition in the Wild,University of Bridgeport,University of Bridgeport,"University of Bridgeport, Park Avenue, Bridgeport Downtown South Historic District, Bridgeport, Fairfield County, Connecticut, 06825, USA",41.16648580,-73.19205640,edu,
+f9cf3bbca1598a0309c1395c5a46f17f774f4094,Convex Class Model on Symmetric Positive Definite Manifolds,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+f9b90d3c1e2c3d0f3d9a94e6a0aea5e3047bca78,Analysis of photometric factors based on photometric linearization.,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+f9b90d3c1e2c3d0f3d9a94e6a0aea5e3047bca78,Analysis of photometric factors based on photometric linearization.,Okayama University,Okayama University,"岡山大学, 津高法界院停車場線, 津島東2, 津島東, 北区, 岡山市, 岡山県, 中国地方, 700-0081, 日本",34.68933930,133.92222720,edu,
+f989a20fbcc2d576c0c4514a0e5085c741580778,Co-localization with Category-Consistent Features and Geodesic Distance Propagation,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+f989a20fbcc2d576c0c4514a0e5085c741580778,Co-localization with Category-Consistent Features and Geodesic Distance Propagation,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+f989a20fbcc2d576c0c4514a0e5085c741580778,Co-localization with Category-Consistent Features and Geodesic Distance Propagation,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+f970cc735d87ad8484a29a5bad69f529dd557471,000 Tiny Videos : A Large Dataset for Non-Parametric Content-Based Retrieval and Recognition,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+f94f366ce14555cf0d5d34248f9467c18241c3ee,Deep Convolutional Neural Network in Deformable Part Models for Face Detection,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+f909d04c809013b930bafca12c0f9a8192df9d92,Single Image Subspace for Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+f909d04c809013b930bafca12c0f9a8192df9d92,Single Image Subspace for Face Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+f9570079f33ab11394175d57db0aa94251c48c61,Compositional GAN: Learning Conditional Image Composition,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+f9e388544ae371cdd1d73b2e444cb46d9532f530,Image Quality Assessment Guided Deep Neural Networks Training,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+f9e388544ae371cdd1d73b2e444cb46d9532f530,Image Quality Assessment Guided Deep Neural Networks Training,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+f9e388544ae371cdd1d73b2e444cb46d9532f530,Image Quality Assessment Guided Deep Neural Networks Training,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f94f79168c1cfaebb8eab5151e01d56478ab0b73,Optimizing Region Selection for Weakly Supervised Object Detection,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+f9ccfe000092121a2016639732cdb368378256d5,Cognitive behaviour analysis based on facial information using depth sensors,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+f08e425c2fce277aedb51d93757839900d591008,Neural Motifs: Scene Graph Parsing with Global Context,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+f08e425c2fce277aedb51d93757839900d591008,Neural Motifs: Scene Graph Parsing with Global Context,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f02f0f6fcd56a9b1407045de6634df15c60a85cd,Learning Low-shot facial representations via 2D warping,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+f029e3fc47cab0b23da307dd2ec6d2a064091f83,Appearance and motion based data association for pedestrian tracking,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+f02feec9f8d15f929018e0f0aa14446f47112d22,Cross-Resolution Person Re-identification with Deep Antithetical Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f0c21345a13c0e1da2b74aef4e8b987feb266bb5,Deep Multitask Architecture for Integrated 2D and 3D Human Sensing,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+f0864a4e2f7dc4b3bacc36a0617a1860bcb6aba1,Multi-pedestrian detection in crowded scenes: A global view,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f0ca31fd5cad07e84b47d50dc07db9fc53482a46,Feature Patch Illumination Spaces and Karcher Compression for Face Recognition via Grassmannians,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+f074e86e003d5b7a3b6e1780d9c323598d93f3bc,Characteristic Number: Theory and Its Application to Shape Analysis,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+f074e86e003d5b7a3b6e1780d9c323598d93f3bc,Characteristic Number: Theory and Its Application to Shape Analysis,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+f06b30bf5874ad6168615b4443d011dd44e1ceda,Sparsity-Based Occlusion Handling Method for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+f06b30bf5874ad6168615b4443d011dd44e1ceda,Sparsity-Based Occlusion Handling Method for Person Re-identification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+f049ea3ca734c217c380a1802d15a6d85378f55d,Efficient misbehaving user detection in online video chat services,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+f049ea3ca734c217c380a1802d15a6d85378f55d,Efficient misbehaving user detection in online video chat services,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+f0a4a3fb6997334511d7b8fc090f9ce894679faf,Generative Face Completion,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+f06f4bc74dc0be0a628e99a5c86ab3e00ed00276,Heated-Up Softmax Embedding,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+f0681fc08f4d7198dcde803d69ca62f09f3db6c5,Spatiotemporal Features for Effective Facial Expression Recognition,Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.08688410,29.04413167,edu,
+f0f501e1e8726148d18e70c8e9f6feea9360d119,Jukka Komulainen SOFTWARE - BASED COUNTERMEASURES TO 2 D FACIAL,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+f0398ee5291b153b716411c146a17d4af9cb0edc,Learning Optical Flow via Dilated Networks and Occlusion Reasoning,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+f0f0e94d333b4923ae42ee195df17c0df62ea0b1,Scaling Manifold Ranking Based Image Retrieval,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+f03b9b0895f5fb3351bbf3db4b1139af85650543,Where is Misty? Interpreting Spatial Descriptors by Modeling Regions in Space,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+f0ae665f5b4a9314c77dc9ec285a335ee6ecc15b,A Heuristic Deformable Pedestrian Detection Method,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f0a3f12469fa55ad0d40c21212d18c02be0d1264,Sparsity Sharing Embedding for Face Verification,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+f05ad40246656a977cf321c8299158435e3f3b61,Face Recognition Using Face Patch Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+f0e26f749fb67182a5d3864e62a3460ac333e5e4,Spatial Knowledge Distillation to aid Visual Reasoning,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+f02a6bccdaee14ab55ad94263539f4f33f1b15bb,Segment-Tube: Spatio-Temporal Action Localization in Untrimmed Videos with Per-Frame Segmentation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+f76a04bdc43f1e440b274b299b07ce2e423431e6,Face Recognition from Video: a Review,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+f7a91f74b0f8ac03459770bf4ba20af58a72a559,Visual Scan Paths and Recognition of Facial Identity in Autism Spectrum Disorder and Typical Development,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+f7bed3080668246b517a0c787698b53f67140a7d,Weighted Hausdorff Distance: A Loss Function For Object Localization,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+f781e50caa43be13c5ceb13f4ccc2abc7d1507c5,Towards Flexible and Intelligent Vision Systems -- From Thresholding to CHLAC --,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+f730990ad4f10e7ce09e7680b7864751787445dd,JointFlow : Temporal Flow Fields for Multi Person Pose Tracking,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f7b4bc4ef14349a6e66829a0101d5b21129dcf55,Towards Light-weight Annotations: Fuzzy Interpolative Reasoning for Zero-shot Image Classificaiton,Newcastle University,Newcastle University,"Newcastle University, Claremont Walk, Haymarket, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE1 7RU, UK",54.98023235,-1.61452627,edu,
+f7e16e57b93b9dac11280427c7575a0a0ae4e0a8,Handcrafting vs Deep Learning: An Evaluation of NTraj+ Features for Pose Based Action Recognition,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+f7b422df567ce9813926461251517761e3e6cda0,Face aging with conditional generative adversarial networks,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+f7824758800a7b1a386db5bd35f84c81454d017a,KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+f74b62933362cce595ac247fc6f54ede68697d75,An Example-Based Two-Step Face Hallucination Method through Coefficient Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,Gan Quality Index (gqi) by Gan-induced Classifier,City University of New York,The City University of New York,"Lehman College of the City University of New York, 250, Bedford Park Boulevard West, Bedford Park, The Bronx, Bronx County, NYC, New York, 10468, USA",40.87228250,-73.89489171,edu,
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,Gan Quality Index (gqi) by Gan-induced Classifier,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+f74917fc0e55f4f5682909dcf6929abd19d33e2e,Gan Quality Index (gqi) by Gan-induced Classifier,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+f7040d2109cb42b373b1785ccb7a03faea824873,Human Detection in Video over Large Viewpoint Changes,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+f7040d2109cb42b373b1785ccb7a03faea824873,Human Detection in Video over Large Viewpoint Changes,"OMRON Corporation, Kyoto, Japan","Core Technology Center, OMRON Corporation, Kyoto, Japan","Kyoto, Kyoto Prefecture, Japan",35.01163630,135.76802940,company,
+f7ab8e56fc68575a0a5a94d315841f25630cf8a1,Exploiting Colour Information for Better Scene Text Recognition,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+f7ab8e56fc68575a0a5a94d315841f25630cf8a1,Exploiting Colour Information for Better Scene Text Recognition,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+f7fdf862b7edeb5fd9d8fad7062c1f029b419769,Visual interpretability for deep learning: a survey,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+f73b7efa3bec07c582ec4e42fbc43a4f4993c6bb,Learning a Discriminative Feature Network for Semantic Segmentation,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+f73b7efa3bec07c582ec4e42fbc43a4f4993c6bb,Learning a Discriminative Feature Network for Semantic Segmentation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+f78fe101b21be36e98cd3da010051bb9b9829a1e,Unsupervised Domain Adaptation for Facial Expression Recognition Using Generative Adversarial Networks,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+f78fe101b21be36e98cd3da010051bb9b9829a1e,Unsupervised Domain Adaptation for Facial Expression Recognition Using Generative Adversarial Networks,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+f726738954e7055bb3615fa7e8f59f136d3e0bdc,Are you eligible? Predicting adulthood from face images via class specific mean autoencoder,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef,Does attractiveness influence condom use intentions in heterosexual men? An experimental study,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef,Does attractiveness influence condom use intentions in heterosexual men? An experimental study,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+f7a271acccf9ec66c9b114d36eec284fbb89c7ef,Does attractiveness influence condom use intentions in heterosexual men? An experimental study,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+f7093b138fd31956e30d411a7043741dcb8ca4aa,Hierarchical Clustering in Face Similarity Score Space,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+f7ed39dec6e9060dc3dc58656ddf823916a2a643,Delta-encoder: an effective sample synthesis method for few-shot object recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+f7ed39dec6e9060dc3dc58656ddf823916a2a643,Delta-encoder: an effective sample synthesis method for few-shot object recognition,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+f7943ecda36b38725efda73d68b7ea70272451b8,Superimposition-guided Facial Reconstruction from Skull,Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.18620474,edu,
+f7943ecda36b38725efda73d68b7ea70272451b8,Superimposition-guided Facial Reconstruction from Skull,Louisiana State University,Louisiana State University,"LSU, Gourrier Avenue, Baton Rouge, East Baton Rouge Parish, Louisiana, 70803, USA",30.40550035,-91.18620474,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+f76dee0d19c9ee8d59466ad1e3bb91cae5a17ac5,Beyond bag of words: image representation in sub-semantic space,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+f759880a3314850d3a712bcd96494b62f60d5ece,Pigeonring: A Principle for Faster Thresholded Similarity Search,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+f759880a3314850d3a712bcd96494b62f60d5ece,Pigeonring: A Principle for Faster Thresholded Similarity Search,Nagoya University,Nagoya University,"SuperDARN (Hokkaido West), 太辛第1支線林道, 陸別町, 足寄郡, 十勝総合振興局, 北海道, 北海道地方, 日本",43.53750985,143.60768225,edu,
+f7a2424eb5af9613544a945772addcf2e19b5f92,Multi-view gait based human identification system with covariate analysis,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3,Large Margin Multi-metric Learning for Face and Kinship Verification in the Wild,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+f7c50d2be9fba0e4527fd9fbe3095e9d9a94fdd3,Large Margin Multi-metric Learning for Face and Kinship Verification in the Wild,"Advanced Digital Sciences Center, Singapore","Advanced Digital Sciences Center, Singapore","1 Create Way, 14-02 Create Tower, Singapore 138602",1.30372570,103.77377630,edu,
+f7af6fe6fb6393f7780163ae37c5931ce566daac,Synthetically Trained 3 D Visual Tracker of Underwater Vehicles,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+f7f1c57d38748d718309d7d55ce79e41d60f0940,Palmprint Recognition Using Deep Scattering Convolutional Network,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+f76808d6811cb3790e7fc3ddb08c733febbdefba,Robust Object Categorization and Segmentation Motivated by Visual Contexts in the Human Visual System,Yeungnam University,Yeungnam University,"영남대, 대학로, 부적리, 경산시, 경북, 712-749, 대한민국",35.83654030,128.75343090,edu,
+f7ac8523770b5965aadc27cb5364d77853113be4,Face Authentication Based on Multiple Profiles Extracted from Range Data,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+f775be87ca71180d1cf97d81678f4fd713343e01,Curriculum Learning for Multi-task Classification of Visual Attributes,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+f775be87ca71180d1cf97d81678f4fd713343e01,Curriculum Learning for Multi-task Classification of Visual Attributes,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+f735188dbcb276cd1da248110712fde0d1b2aec7,Classification and clustering via dictionary learning with structured incoherence and shared features,University of Minnesota,University of Minnesota,"WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA",44.97308605,-93.23708813,edu,
+e82360682c4da11f136f3fccb73a31d7fd195694,Online Face Recognition with Application to Proactive Augmented Reality,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+e8391fd7ef979a63c389ab0fa7c00fe67e4498f8,Multiple object tracking with combinatorial model based on appearance and local features,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+e88b1040dc8a546f181fcd973227ae6121f15b70,Segmentation of Floors in Corridor Images for Mobile Robot Navigation,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+e8096f4f625441ddb4914b17d1b9da3f80bae92e,Transfer Learning of Artist Group Factors to Musical Genre Classification,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+e8096f4f625441ddb4914b17d1b9da3f80bae92e,Transfer Learning of Artist Group Factors to Musical Genre Classification,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+e8096f4f625441ddb4914b17d1b9da3f80bae92e,Transfer Learning of Artist Group Factors to Musical Genre Classification,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+e8096f4f625441ddb4914b17d1b9da3f80bae92e,Transfer Learning of Artist Group Factors to Musical Genre Classification,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+e8f0f9b74db6794830baa2cab48d99d8724e8cb6,Active Image Labeling and Its Application to Facial Action Labeling,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+e8f0f9b74db6794830baa2cab48d99d8724e8cb6,Active Image Labeling and Its Application to Facial Action Labeling,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+e8875b317c2e0ed6fba0c908d599b3772a400bdd,Non-rigid 3D Shape Registration using an Adaptive Template,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+e87d6c284cdd6828dfe7c092087fbd9ff5091ee4,Unsupervised Creation of Parameterized Avatars,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+e8d8a42d0ee8849bbaf99c52cadeb2f1ebe564b0,Building Data-driven Models with Microstructural Images: Generalization and Interpretability,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e8523c4ac9d7aa21f3eb4062e09f2a3bc1eedcf7,Toward End-to-End Face Recognition Through Alignment Learning,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+e85a255a970ee4c1eecc3e3d110e157f3e0a4629,Fusing Hierarchical Convolutional Features for Human Body Segmentation and Clothing Fashion Classification,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+e8327930af0c719e3084d0ffb284704888976515,Exemplar-SVMs for Action Recognition,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+e8327930af0c719e3084d0ffb284704888976515,Exemplar-SVMs for Action Recognition,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+e8c9dcbf56714db53063b9c367e3e44300141ff6,Get the FACS fast: Automated FACS face analysis benefits from the addition of velocity,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+e8c9dcbf56714db53063b9c367e3e44300141ff6,Get the FACS fast: Automated FACS face analysis benefits from the addition of velocity,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+e8c9dcbf56714db53063b9c367e3e44300141ff6,Get the FACS fast: Automated FACS face analysis benefits from the addition of velocity,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+e8d9f431ac20f0ef88291cf1b370fbbca028315a,Unravelling Robustness of Deep Learning Based Face Recognition Against Adversarial Attacks,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+e883cf759c3abecf59bf9f13053b1eb59bde01a6,Deep Multitask Attribute-driven Ranking for Fine-grained Sketch-based Image Retrieval,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+e86e2106dbedbb6d8b1195b77540971b9d58a198,Violent Behaviour Detection using Local Trajectory Response,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e86e2106dbedbb6d8b1195b77540971b9d58a198,Violent Behaviour Detection using Local Trajectory Response,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e8f9a45fdd76fa33855d9a7a6e70ff1821d0e2e1,Parametric surface denoising,Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.64464415,edu,
+e8b3a257a0a44d2859862cdec91c8841dc69144d,Liquid Pouring Monitoring via Rich Sensory Inputs,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+e8b3a257a0a44d2859862cdec91c8841dc69144d,Liquid Pouring Monitoring via Rich Sensory Inputs,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+e8ef22b6da1dd3a4e014b96e6073a7b610fd97ea,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+fae76d0e710545972807f18e45936ec5c6f1fe5d,RPIfield : A New Dataset for Temporally Evaluating Person Re-Identification ∗,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+faefb598a66284e31154251b94cdb3e1bda53122,Deep Transfer Network: Unsupervised Domain Adaptation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+faefb598a66284e31154251b94cdb3e1bda53122,Deep Transfer Network: Unsupervised Domain Adaptation,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+faabc70615649169b559403d7f15d45fca537cbd,HDFD - A High Deformation Facial Dynamics Benchmark for Evaluation of Non-Rigid Surface Registration and Classification,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+faabc70615649169b559403d7f15d45fca537cbd,HDFD - A High Deformation Facial Dynamics Benchmark for Evaluation of Non-Rigid Surface Registration and Classification,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+faabc70615649169b559403d7f15d45fca537cbd,HDFD - A High Deformation Facial Dynamics Benchmark for Evaluation of Non-Rigid Surface Registration and Classification,Swansea University,Swansea University,"Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK",51.60915780,-3.97934429,edu,
+fa72b7140f9fa4fb975344109e597e9566c65f4a,Automatic 3D Face Recognition Using Discriminant Common Vectors,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+fa50b5a54aa340d6fe7f46feb02229f1ab0f12c0,Joint Image Captioning and Question Answering,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+fa90b825346a51562d42f6b59a343b98ea2e501a,Modeling Naive Psychology of Characters in Simple Commonsense Stories,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+fa90b825346a51562d42f6b59a343b98ea2e501a,Modeling Naive Psychology of Characters in Simple Commonsense Stories,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+faeefc5da67421ecd71d400f1505cfacb990119c,PastVision+: Thermovisual Inference of Recent Medicine Intake by Detecting Heated Objects and Cooled Lips,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+faeefc5da67421ecd71d400f1505cfacb990119c,PastVision+: Thermovisual Inference of Recent Medicine Intake by Detecting Heated Objects and Cooled Lips,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+fa4f59397f964a23e3c10335c67d9a24ef532d5c,"DAP3D-Net: Where, what and how actions occur in videos?",Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+fa518a033b1f6299d1826389bd1520cf52291b56,Facial Age Simulation using Age-specific 3D Models and Recursive PCA,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+fab04dfcb35a29a46504d2ad3acbc642c602c7e8,Trajectory-based 3 D Convolutional Descriptors for Action Recognition in Videos,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+fab2fc6882872746498b362825184c0fb7d810e4,Right wing authoritarianism is associated with race bias in face detection,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+fabbebafe1f7b1680f66edc8b4fff345658a58c3,Face recognition by fusion of local and global matching scores using DS theory: An evaluation with uni-classifier and multi-classifier paradigm,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+faa29975169ba3bbb954e518bc9814a5819876f6,Evolution-Preserving Dense Trajectory Descriptors,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+fa747db22e9e6cd7a64019eec6e0dd53e94be4b3,DGPose: Disentangled Semi-supervised Deep Generative Models for Human Body Analysis,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+faef5bedb0b1e92730febce4e6af33b803bd463a,GANimation: Anatomically-Aware Facial Animation from a Single Image,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+fa5aca45965e312362d2d75a69312a0678fdf5d7,Fast and Accurate Head Pose Estimation via Random Projection Forests: Supplementary Material,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+fa33e20a3265d9a506c11a392cde9c367c30284e,Commonsense Justification for Action Explanation,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+fa4e709a7008248869584feca81250a8da8291e4,Biometric Quantization through Detection Rate Optimized Bit Allocation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+fa9aabaa364732ddfb1d228cb8e93fa12c3bf52c,Facial Features Extraction based on Active Shape Model,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+fa9aabaa364732ddfb1d228cb8e93fa12c3bf52c,Facial Features Extraction based on Active Shape Model,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+fa9aabaa364732ddfb1d228cb8e93fa12c3bf52c,Facial Features Extraction based on Active Shape Model,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+faacebeb542792fae28745f51a943892be8d36a6,A Simple Yet Effective Baseline for 3d Human Pose Estimation,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+fa2d02343be1de448ac51c3a668c29f231b362f8,RAM: A Region-Aware Deep Model for Vehicle Re-Identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+fa2d02343be1de448ac51c3a668c29f231b362f8,RAM: A Region-Aware Deep Model for Vehicle Re-Identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+fae83b145e5eeda8327de9f19df286edfaf5e60c,Towards an Interactive E-learning System Based on Emotions and Affective Cognition,Ionian University,Ionian University,"Πανεπιστήμιο Πατρών, Λεωφ. Ιπποκράτους, κ. Ρίου (Αγίου Γεωργίου Ρίου), Πάτρα, Δήμος Πατρέων, Περιφερειακή Ενότητα Αχαΐας, Περιφέρεια Δυτικής Ελλάδας, Πελοπόννησος, Δυτική Ελλάδα και Ιόνιο, 26443, Ελλάδα",38.28994820,21.78864690,edu,
+fa3fb32fe0cd392960549b0adb7a535eb3656abd,The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+fa3fb32fe0cd392960549b0adb7a535eb3656abd,The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+fff755ee8522d5ab0931babeaded2f9113c44b95,A Hybrid Supervised-unsupervised Method on Image Topic Visualization with Convolutional Neural Network and LDA,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+ffea8775fc9c32f573d1251e177cd283b4fe09c9,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+ffea8775fc9c32f573d1251e177cd283b4fe09c9,Transformation on Computer-Generated Facial Image to Avoid Detection by Spoofing Detector,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+fff32fd598e41ec6dd6903082d77f43f16908cfd,Kernel Learning of Histogram of Local Gabor Phase Patterns for Face Recognition,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+ff9af51b07a7e80706361cd064a25d99cde64236,Prajna: Towards Recognizing Whatever You Want from Images without Image Labeling,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+ffc5a9610df0341369aa75c0331ef021de0a02a9,Transferred Dimensionality Reduction,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ff948365684d3aa1a834deb49f326e264b56677a,"Animal, but not human, faces engage the distributed face network in adolescents with autism.",Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+ff061f7e46a6213d15ac2eb2c49d9d3003612e49,Morphable Human Face Modelling,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+ff1f45bdad41d8b35435098041e009627e60d208,"NAGRANI, ZISSERMAN: FROM BENEDICT CUMBERBATCH TO SHERLOCK HOLMES 1 From Benedict Cumberbatch to Sherlock Holmes: Character Identification in TV series without a Script",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+ff5a3b2fae2ee1cf4f1c32ff7e5fdccf72815578,Multi-Person Pose Estimation via Column Generation,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+ff5a3b2fae2ee1cf4f1c32ff7e5fdccf72815578,Multi-Person Pose Estimation via Column Generation,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+ff5c698e1f451c7e6fc4f036fb79ba6ff899285f,Adaptive Unsupervised Multi-view Feature Selection for Visual Concept Recognition,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+ff5c698e1f451c7e6fc4f036fb79ba6ff899285f,Adaptive Unsupervised Multi-view Feature Selection for Visual Concept Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+ffe4bb47ec15f768e1744bdf530d5796ba56cfc1,AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces,Assiut University,Assiut University,"Assiut University, El Shaheed Ellwaa Hasn Kamel street, الوليدية, أسيوط, مصر",27.18794105,31.17009498,edu,
+ffc7de9e2519f54b0c843879013e24cb7ee2a2ac,A Hierarchical Generative Model for Eye Image Synthesis and Eye Gaze Estimation,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+ffe0f43206169deef3a2bf64cec90fe35bb1a8e5,"Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans
+",Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ffe0f43206169deef3a2bf64cec90fe35bb1a8e5,"Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans
+",Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ffe0f43206169deef3a2bf64cec90fe35bb1a8e5,"Automated Processing of Imaging Data through Multi-tiered Classification of Biological Structures Illustrated Using Caenorhabditis elegans
+",University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+ff11cb09e409996020a2dc3a8afc3b535e6b2482,Faster Bounding Box Annotation for Object Detection in Indoor Scenes,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+ffaad0204f4af763e3390a2f6053c0e9875376be,Non-Convex Sparse and Low-Rank Based Robust Subspace Segmentation for Data Mining,Donghua University,Donghua University,"东华大学, 新华路, 长宁区, 上海市, 210011, 中国",31.20619390,121.41047101,edu,
+ffaad0204f4af763e3390a2f6053c0e9875376be,Non-Convex Sparse and Low-Rank Based Robust Subspace Segmentation for Data Mining,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+ff825a46f0a4e9f6ad748aeefd18f34f6b4addfb,"The ""reading the mind in films"" task: complex emotion recognition in adults with and without autism spectrum conditions.",Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+fffa2943808509fdbd2fc817cc5366752e57664a,Combined Ordered and Improved Trajectories for Large Scale Human Action Recognition,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+fffa2943808509fdbd2fc817cc5366752e57664a,Combined Ordered and Improved Trajectories for Large Scale Human Action Recognition,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+ffd0ba45cc6b0c8f72a09617144786ffb26be771,Data-Free Knowledge Distillation for Deep Neural Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ffd0ba45cc6b0c8f72a09617144786ffb26be771,Data-Free Knowledge Distillation for Deep Neural Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ffd0ba45cc6b0c8f72a09617144786ffb26be771,Data-Free Knowledge Distillation for Deep Neural Networks,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ffa4bbfc1981fb5c44b09fe22a38b91573814e11,A High Precision Feature Based on LBP and Gabor Theory for Face Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+ff311fbb5600234fd639c96522d1b450b6190cdd,AnchorNet: A Weakly Supervised Network to Learn Geometry-Sensitive Features for Semantic Matching,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+c5cad01443d4de135250d2784f0d070defd6120a,Large Graph Exploration via Subgraph Discovery and Decomposition,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+c5cad01443d4de135250d2784f0d070defd6120a,Large Graph Exploration via Subgraph Discovery and Decomposition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+c5cad01443d4de135250d2784f0d070defd6120a,Large Graph Exploration via Subgraph Discovery and Decomposition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+c5cad01443d4de135250d2784f0d070defd6120a,Large Graph Exploration via Subgraph Discovery and Decomposition,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+c5468665d98ce7349d38afb620adbf51757ab86f,Pose-Encoded Spherical Harmonics for Robust Face Recognition Using a Single Image,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c5b6b81a75f7ec3211473eb1ca58897a6537a085,Exploiting Best Practice of Deep CNNs Features for National Costume Image Retrieval,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+c5d13e42071813a0a9dd809d54268712eba7883f,Face recognition robust to head pose changes based on the RGB-D sensor,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+c526f3e27c7d8ed5e07cf57ab378f17e1c548ebe,Learning Human Identity Using View-Invariant Multi-view Movement Representation,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+c5fcea39a6d3e0abdfcf15ff62cec0950813ed0a,Human Face Verification by Robust 3D Surface Alignment,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+c50d73557be96907f88b59cfbd1ab1b2fd696d41,Semiconductor sidewall shape estimation,Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+c51fb195bd9fe3b7d001179a3a39bb8252304f1b,A Survey of Advances in Biometric Gait Recognition,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+c595863b90b904a7b3197667b62efa16b0fd5ff6,Are Key-Foreign Key Joins Safe to Avoid when Learning High-Capacity Classifiers?,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+c595863b90b904a7b3197667b62efa16b0fd5ff6,Are Key-Foreign Key Joins Safe to Avoid when Learning High-Capacity Classifiers?,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+c5ee2621e5a0692677890df9a10963293ab14fc2,Feature Engineering for Knowledge Base Construction,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+c5ee2621e5a0692677890df9a10963293ab14fc2,Feature Engineering for Knowledge Base Construction,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+c5392bdc97e525403a38563ba19caef342879116,Multi-Instance Dynamic Ordinal Random Fields for Weakly-Supervised Pain Intensity Estimation,Universitat Pompeu Fabra,Universitat Pompeu Fabra,"Dipòsit de les Aigües, Carrer de Wellington, la Vila Olímpica del Poblenou, Ciutat Vella, Barcelona, BCN, CAT, 08071, España",41.39044285,2.18891949,edu,
+c5392bdc97e525403a38563ba19caef342879116,Multi-Instance Dynamic Ordinal Random Fields for Weakly-Supervised Pain Intensity Estimation,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+c5392bdc97e525403a38563ba19caef342879116,Multi-Instance Dynamic Ordinal Random Fields for Weakly-Supervised Pain Intensity Estimation,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+c5f71486c16add42c3394edb41b8c064b0123824,Ularized with a Unified Embedding,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+c5c4503a331b6fc09e01e66280a531bb9db0290d,Fast Bounding Box Estimation based Face Detection,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+c55b0bcd8081999f265468f87f281959bfc786f7,Extraction and Classification of Human Gait Features,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+c55b0bcd8081999f265468f87f281959bfc786f7,Extraction and Classification of Human Gait Features,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,Spatiotemporal Multiplier Networks for Video Action Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,Spatiotemporal Multiplier Networks for Video Action Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+c5935b92bd23fd25cae20222c7c2abc9f4caa770,Spatiotemporal Multiplier Networks for Video Action Recognition,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+c5ddfc020a3d1a4cb5d83c725a683f54a7bf7f1d,The processing of dynamic faces in the human brain : Support for an integrated neural framework of face processing,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+c5ddfc020a3d1a4cb5d83c725a683f54a7bf7f1d,The processing of dynamic faces in the human brain : Support for an integrated neural framework of face processing,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+c5ddfc020a3d1a4cb5d83c725a683f54a7bf7f1d,The processing of dynamic faces in the human brain : Support for an integrated neural framework of face processing,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+c5ddfc020a3d1a4cb5d83c725a683f54a7bf7f1d,The processing of dynamic faces in the human brain : Support for an integrated neural framework of face processing,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+c5421a18583f629b49ca20577022f201692c4f5d,Facial Age Classification using Subpattern-based Approaches,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+c57579b067c1a5f5b764344d3c7df227eeba9155,"People, Penguins and Petri Dishes: Adapting Object Counting Models To New Visual Domains And Object Types Without Forgetting",Dublin City University,DUBLIN CITY UNIVERSITY,"Dublin City University Glasnevin Campus, Lower Car Park, Wad, Whitehall A ED, Dublin 9, Dublin, County Dublin, Leinster, D09 FW22, Ireland",53.38522185,-6.25740874,edu,
+c5be0feacec2860982fbbb4404cf98c654142489,Semi-Qualitative Probabilistic Networks in Computer Vision Problems,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+c5be0feacec2860982fbbb4404cf98c654142489,Semi-Qualitative Probabilistic Networks in Computer Vision Problems,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+c5be0feacec2860982fbbb4404cf98c654142489,Semi-Qualitative Probabilistic Networks in Computer Vision Problems,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+c5be0feacec2860982fbbb4404cf98c654142489,Semi-Qualitative Probabilistic Networks in Computer Vision Problems,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+c58b7466f2855ffdcff1bebfad6b6a027b8c5ee1,Ultra-Resolving Face Images by Discriminative Generative Networks,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+c5c1575565e04cd0afc57d7ac7f7a154c573b38f,Face Refinement through a Gradient Descent Alignment Approach,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+c5ba4e0a8abadb68b3de135e3da522059a99b2cd,Performance evaluation of the 1st and 2nd generation Kinect for multimedia applications,University of Padova,University of Padova,"Via Giovanni Gradenigo, 6, 35131 Padova PD, Italy",45.40811720,11.89437860,edu,"University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+c51fbd2574e488e486483e39702a3d7754cc769b,Face Recognition from Still Images to Video Sequences: A Local-Feature-Based Framework,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+c5ab6895710b5eb7bb783456421dab70684c017c,Instance Segmentation and Object Detection with Bounding Shape Masks,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+c5ab6895710b5eb7bb783456421dab70684c017c,Instance Segmentation and Object Detection with Bounding Shape Masks,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+c5f1ae9f46dc44624591db3d5e9f90a6a8391111,Application of non-negative and local non negative matrix factorization to facial expression recognition,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+c53352a4239568cc915ad968aff51c49924a3072,Transfer Representation-Learning for Anomaly Detection,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c53352a4239568cc915ad968aff51c49924a3072,Transfer Representation-Learning for Anomaly Detection,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c563a1a197e8e9b5119063a8fd57fa5a7ca0da03,"Gaze cues in complex, real-world scenes direct the attention of high-functioning adults with autism",University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+c563a1a197e8e9b5119063a8fd57fa5a7ca0da03,"Gaze cues in complex, real-world scenes direct the attention of high-functioning adults with autism",University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+c563a1a197e8e9b5119063a8fd57fa5a7ca0da03,"Gaze cues in complex, real-world scenes direct the attention of high-functioning adults with autism",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+c28745625f048d86f2ad0f38a41ddc0683d36a96,"Looking, seeing and believing in autism: Eye movements reveal how subtle cognitive processing differences impact in the social domain.",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+c28745625f048d86f2ad0f38a41ddc0683d36a96,"Looking, seeing and believing in autism: Eye movements reveal how subtle cognitive processing differences impact in the social domain.",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+c2c5206f6a539b02f5d5a19bdb3a90584f7e6ba4,Affective Computing: A Review,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+c2fa83e8a428c03c74148d91f60468089b80c328,Optimal Mean Robust Principal Component Analysis,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+c29ca739fa740b3155c755655d590582305ef9a8,Diverse and Accurate Image Description Using a Variational Auto-Encoder with an Additive Gaussian Encoding Space,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+c286b2539ce1cbc11338409062f0c28a37dbc4c0,Heterogeneous Multilayer Generalized Operational Perceptron Dat,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+c232d4989ad1bd9ee19d8309cf0fdec2a5c3895f,Point-Triplet Descriptors for 3D Facial Landmark Localisation,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+c2d102743e265d1b7c6073b087d030425786deb9,Time-varying-geometry Object Surveillance Using a Multi-camera Active-vision System,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+c268c0d62eac349468f786ac50342213ef7865e0,Visual Motif Discovery via First-Person Vision,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+c268c0d62eac349468f786ac50342213ef7865e0,Visual Motif Discovery via First-Person Vision,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+c2148f81ffffeaff3fed49448fa5485f65917865,Micro-Attention for Micro-Expression recognition,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+c2148f81ffffeaff3fed49448fa5485f65917865,Micro-Attention for Micro-Expression recognition,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+c2c058afe227f2099aae4f204688b22239d6837a,Threatening faces fail to guide attention for adults with autistic-like traits.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+c2c058afe227f2099aae4f204688b22239d6837a,Threatening faces fail to guide attention for adults with autistic-like traits.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+c2021ac068c23ba6a5360312fbfa0c0d2cfb47fd,Multi-modal fusion for flasher detection in a mobile video chat application,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+c23153aade9be0c941390909c5d1aad8924821db,Efficient and Accurate Tracking for Face Diarization via Periodical Detection,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+c207fd762728f3da4cddcfcf8bf19669809ab284,Face Alignment Using Boosting and Evolutionary Search,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+c207fd762728f3da4cddcfcf8bf19669809ab284,Face Alignment Using Boosting and Evolutionary Search,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+c207fd762728f3da4cddcfcf8bf19669809ab284,Face Alignment Using Boosting and Evolutionary Search,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+c2af954c89972a716968f97a67cc3841290937d3,Derivative Variation Pattern for Illumination-invariant Image Representation,Tafresh University,Tafresh University,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.68092465,50.05341352,edu,
+c2af954c89972a716968f97a67cc3841290937d3,Derivative Variation Pattern for Illumination-invariant Image Representation,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+c2d7bc19196dd4a7ed1a08d60081d16e0c14f463,Gated Fusion Network for Joint Image Deblurring and Super-Resolution,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+c2d054f0d7f455d94f1d92959e0e549443977c55,SdcNet: A Computation-Efficient CNN for Object Recognition,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+c2d054f0d7f455d94f1d92959e0e549443977c55,SdcNet: A Computation-Efficient CNN for Object Recognition,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+c2e03efd8c5217188ab685e73cc2e52c54835d1a,Deep tree-structured face: A unified representation for multi-task facial biometrics,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+c28461e266fe0f03c0f9a9525a266aa3050229f0,Automatic Detection of Facial Feature Points via HOGs and Geometric Prior Models,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+c2fafcbbf334447e8e3a18a2339eaff63ed2b4e3,Gamma Activation in Young People with Autism Spectrum Disorders and Typically-Developing Controls When Viewing Emotions on Faces,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+c2fafcbbf334447e8e3a18a2339eaff63ed2b4e3,Gamma Activation in Young People with Autism Spectrum Disorders and Typically-Developing Controls When Viewing Emotions on Faces,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+c2b3bf311a9182b1452f5ade82fb6db6263e2ddc,Metric Learning-based Generative Adversarial Network,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+c2be88c6d99605abca7a7377935c8809eb8d328e,Open Set Chinese Character Recognition using Multi-typed Attributes,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+c253694c153cc016d745df089bae0220e7f297ee,Image Retrieval with Mixed Initiative and Multimodal Feedback,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f60a85bd35fa85739d712f4c93ea80d31aa7de07,VisDA: The Visual Domain Adaptation Challenge,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+f60a85bd35fa85739d712f4c93ea80d31aa7de07,VisDA: The Visual Domain Adaptation Challenge,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+f6c814f6efff8031b9ebc62cdf0f3b343441e7d3,XOGAN: One-to-Many Unsupervised Image-to-Image Translation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+f69de2b6770f0a8de6d3ec1a65cb7996b3c99317,Face Recognition System Based on Sparse Codeword Analysis,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+f68263a6f541429a8645ca2f4b0658cdbbd66638,Setting a world record in 3D Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+f61b4aa14b052e143db302402cf976ee93cb4eee,Real-time Semantic Image Segmentation via Spatial Sparsity,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+f678f31e7bb5eda34098b0fed608cfad5e372509,Discriminative Kernel Feature Extraction and Learning for Object Recognition and Detection,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+f65c03004e3b2ef4b4224396f7a31ee75a252d85,End-to-End Multi-Task Learning with Attention,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+f68255269d509ff617c2532bd2da71edf9576efc,New chaff point based fuzzy vault for multimodal biometric cryptosystem using particle swarm optimization,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+f6149fc5b39fa6b33220ccee32a8ee3f6bbcaf4a,Syn2Real: A New Benchmark forSynthetic-to-Real Visual Domain Adaptation,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+f69a289a3bc6b61c612ba6ff4033f122100daccb,Morphing between expressions dissociates continuous from categorical representations of facial expression in the human brain.,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f,Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+f66f3d1e6e33cb9e9b3315d3374cd5f121144213,Top-down control of visual responses to fear by the amygdala.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+f611f46455ed6ad9af85eeb22e294082dced9bed,Learning of Visual Attribute Clusters for MultiTask Classification,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+f611f46455ed6ad9af85eeb22e294082dced9bed,Learning of Visual Attribute Clusters for MultiTask Classification,University of Ioannina,University of Ioannina,"Πανεπιστήμιο Ιωαννίνων, Πανεπιστημίου, Κάτω Νεοχωρόπουλο, Νεοχωρόπουλο, Δήμος Ιωαννιτών, Π.Ε. Ιωαννίνων, Περιφέρεια Ηπείρου, Ήπειρος - Δυτική Μακεδονία, 45110, Ελλάδα",39.61623060,20.83963011,edu,
+f61d5f2a082c65d5330f21b6f36312cc4fab8a3b,Multi-Level Variational Autoencoder: Learning Disentangled Representations From Grouped Observations,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+f62d71e701c9fd021610e2076b5e0f5b2c7c86ca,Mahalanobis Distance Learning for Person Re-identification,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+f6328f02ab64c992d76967dbfd1a66d325173723,Mel- and Mellin-cepstral Feature Extraction Algorithms for Face Recognition,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+f6ff469fc4b3043530f64e8642ed822d119b42c9,"SeDAR - Semantic Detection and Ranging: Humans can Localise without LiDAR, can Robots?",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f6ff469fc4b3043530f64e8642ed822d119b42c9,"SeDAR - Semantic Detection and Ranging: Humans can Localise without LiDAR, can Robots?",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f6ff469fc4b3043530f64e8642ed822d119b42c9,"SeDAR - Semantic Detection and Ranging: Humans can Localise without LiDAR, can Robots?",University of Exeter,University of Exeter,"University of Exeter, Stocker Road, Exwick, Exeter, Devon, South West England, England, EX4 4QN, UK",50.73693020,-3.53647672,edu,
+f6ff469fc4b3043530f64e8642ed822d119b42c9,"SeDAR - Semantic Detection and Ranging: Humans can Localise without LiDAR, can Robots?",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f632790471b2bed7ba7c28b12cda9360ec586a63,Deep Binaries: Encoding Semantic-Rich Cues for Efficient Textual-Visual Cross Retrieval,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+f632790471b2bed7ba7c28b12cda9360ec586a63,Deep Binaries: Encoding Semantic-Rich Cues for Efficient Textual-Visual Cross Retrieval,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+f6f12e0fbfce067d02445abde76be0522e4db329,Online Multiple targets Detection and Tracking from Mobile robot in Cluttered indoor Environments with Depth Camera,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+f6f12e0fbfce067d02445abde76be0522e4db329,Online Multiple targets Detection and Tracking from Mobile robot in Cluttered indoor Environments with Depth Camera,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+f6ef7200c08170aa1bf68a2fafed10bb4296c595,Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+f6c7fbd84e6ac61af40e670e589ec52fa435f396,An Automated System for Garment Texture Design Class Identification,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+f6e00d6430cbbaa64789d826d093f7f3e323b082,Visual Object Recognition,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+f6e00d6430cbbaa64789d826d093f7f3e323b082,Visual Object Recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+e9b8f2ee742b32ae272c950cc6fa2d5a2d05f028,Hourglass-ShapeNetwork Based Semantic Segmentation for High Resolution Aerial Imagery,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+e9b8f2ee742b32ae272c950cc6fa2d5a2d05f028,Hourglass-ShapeNetwork Based Semantic Segmentation for High Resolution Aerial Imagery,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7,A Bayesian Scene-Prior-Based Deep Network Model for Face Verification,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+e9a5a38e7da3f0aa5d21499149536199f2e0e1f7,A Bayesian Scene-Prior-Based Deep Network Model for Face Verification,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+e94dfdc5581f6bc0338e21ad555b5f1734f8697e,Learning to Anonymize Faces for Privacy Preserving Action Detection,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+e9f82ce15b332767c0d9e6326e46bdd6a15fc689,Deep Low-Resolution Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+e9f82ce15b332767c0d9e6326e46bdd6a15fc689,Deep Low-Resolution Person Re-Identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+e9f82ce15b332767c0d9e6326e46bdd6a15fc689,Deep Low-Resolution Person Re-Identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+e97d824b8e80670d49d53c402f99e0fbeaafacdb,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+e97d824b8e80670d49d53c402f99e0fbeaafacdb,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+e97d824b8e80670d49d53c402f99e0fbeaafacdb,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+e97d824b8e80670d49d53c402f99e0fbeaafacdb,Neural Best-Buddies: Sparse Cross-Domain Correspondence,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+e912481d2d885244b1c72e5d74932429394a5789,Adaptive Appearance Rendering,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+e90a925fea8456718527a73a3621fba9b848de28,D Eep L Earning with S Ets and P Oint C Louds,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+e90e12e77cab78ba8f8f657db2bf4ae3dabd5166,Nonconvex Sparse Spectral Clustering by Alternating Direction Method of Multipliers and Its Convergence Analysis,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+e9c7d47fb82de9b71bdde1ad9b81eb2b2970b8fa,DarkRank: Accelerating Deep Metric Learning via Cross Sample Similarities Transfer,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+e985e7ec130ce4552222d7fb4b2d2f923fd2a501,Orthogonal and Idempotent Transformations for Learning Deep Neural Networks,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+e985e7ec130ce4552222d7fb4b2d2f923fd2a501,Orthogonal and Idempotent Transformations for Learning Deep Neural Networks,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+e902bad28f1370d5252e44fe4b7d0563aa9a2383,Let Features Decide for Themselves: Feature Mask Network for Person Re-identification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e9c008d31da38d9eef67a28d2c77cb7daec941fb,Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing the Early Softmax Saturation,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+e9c008d31da38d9eef67a28d2c77cb7daec941fb,Noisy Softmax: Improving the Generalization Ability of DCNN via Postponing the Early Softmax Saturation,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+e9f4624cc9c2d7e1b9fa2545982e7678b9a5aaae,Dynamic-structured Semantic Propagation Network,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e9ccd438d6d55ba0d11a63eb95c773d63b3ea4e5,Will you remember me ? Cultural differences in own-group face recognition biases ☆,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+e9e40e588f8e6510fa5537e0c9e083ceed5d07ad,Fast Face Detection Using Graphics Processor,"National Institute of Technology, Karnataka",National Institute of Technology Karnataka,"National Institute of Technology, Karnataka, NH66, ದಕ್ಷಿಣ ಕನ್ನಡ, Mangaluru taluk, Dakshina Kannada, Karnataka, 575025, India",13.01119095,74.79498825,edu,
+e996da9beadff6f6694540c6b1794312f814dbae,Age and gender classification from ear images,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+e959a426d02dd014c1346131ac38ed50114c17b7,A Focused Dynamic Attention Model for Visual Question Answering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+e9e39e31419d9a22790b327bc1d6107fa832bdab,Face recognition using adaptively weighted patch PZM array from a single exemplar image per person,Griffith University,Griffith University,"Griffith University Nathan Campus, Johnson Path, Nathan, Nathan Heights, QLD, 4111, Australia",-27.55339750,153.05336234,edu,
+e90e23a757c346170df4f403d0c18bcea2874ed7,Conditional Inference in Pre-trained Variational Autoencoders via Cross-coding,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+e90e23a757c346170df4f403d0c18bcea2874ed7,Conditional Inference in Pre-trained Variational Autoencoders via Cross-coding,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+e9bb045e702ee38e566ce46cc1312ed25cb59ea7,Integrating Geometric and Textural Features for Facial Emotion Classification Using SVM Frameworks,Indian Institute of Technology Roorkee,"Indian Institute of Technology, Roorkee","Indian Institute of Technology (IIT), Roorkee, LBS Jogging Track, Roorkee, Haridwar, Uttarakhand, 247667, India",29.86624610,77.89587081,edu,
+e9bb045e702ee38e566ce46cc1312ed25cb59ea7,Integrating Geometric and Textural Features for Facial Emotion Classification Using SVM Frameworks,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+e984017c5849ea78e3f50e374a5539770989536d,Bilinear Discriminant Analysis for Face Recognition,École Centrale de Lyon,Laboratoire LIRIS,"40 Avenue Guy de Collongue, 69130 Écully, France",45.78359660,4.76789480,edu,
+e9835bb131287d711e5e5435a5df8ce5302acb31,Person Re-identification by Unsupervised `1 Graph Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+e9c2b7677660820019ac5fe0fff9ac3409555b63,Multi-Entity Bayesian Networks for Knowledge-Driven Analysis of ICH Content,Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.59345390,130.35578370,edu,
+e9bbe558c73de60e40ce2bd8c7cb7a47dacfe594,Can White children grow up to be Black? Children's reasoning about the stability of emotion and race.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+e9bbe558c73de60e40ce2bd8c7cb7a47dacfe594,Can White children grow up to be Black? Children's reasoning about the stability of emotion and race.,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+e9bb5cf8eca585fb1b5b7e3ade05937cbb3ee040,Toward image-based facial hair modeling,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+e9bb5cf8eca585fb1b5b7e3ade05937cbb3ee040,Toward image-based facial hair modeling,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+e9bb5cf8eca585fb1b5b7e3ade05937cbb3ee040,Toward image-based facial hair modeling,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+e9ba629fd9533131735e2305929faf0c2c46538b,Holistic and Feature-based Information Towards Dynamic Multi-expressions Recognition,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+e93a65ff1c7c29736cef5701f079f75ecfb76f5f,From image statistics to scene gist: evoked neural activity reveals transition from low-level natural image structure to scene category.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+e9f1cdd9ea95810efed306a338de9e0de25990a0,FEPS: An Easy-to-Learn Sensory Substitution System to Perceive Facial Expressions,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+e9f1cdd9ea95810efed306a338de9e0de25990a0,FEPS: An Easy-to-Learn Sensory Substitution System to Perceive Facial Expressions,University of Memphis,University of Memphis,"The University of Memphis, Desoto Avenue, Memphis, Shelby County, Tennessee, 38152, USA",35.11893870,-89.93721960,edu,
+e97f4151b67e0569df7e54063d7c198c911edbdc,A New Information Fusion Method for Bimodal Robotic Emotion Recognition,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+f1250900074689061196d876f551ba590fc0a064,Learning to Recognize Actions From Limited Training Examples Using a Recurrent Spiking Neural Model,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+f1a772608cae0d3189ad1293d5b7631435f02e44,Saliency-based Bayesian modeling of dynamic viewing of static scenes,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+f1b4583c576d6d8c661b4b2c82bdebf3ba3d7e53,Faster than Real-Time Facial Alignment: A 3D Spatial Transformer Network Approach in Unconstrained Poses,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f1368b0001e454381eafc35324740c928cb2ad1e,Automated audio captioning with recurrent neural networks,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+f1278b44acc73b41c2993574392047f8d10e997f,Skeleton-Based Pose Estimation of Human Figures Dual Degree Project Report,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+f193ca76a878af87603ae8ac823a3e6d1c2e3c7e,Recurrent Multi-frame Single Shot Detector for Video Object Detection,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+f14872986435c015c562a92c6c0d142bbdf1b1fb,Action Completion: A Temporal Model for Moment Detection,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+f187b0ed2224b2861442a73ad2966c1789afc09a,Zero-Shot Learning via Revealing Data Distribution,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+f187b0ed2224b2861442a73ad2966c1789afc09a,Zero-Shot Learning via Revealing Data Distribution,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+f1cec5f837638efd8fd592cf5493f33ed1fb6995,Learning to detect violent videos using convolutional long short-term memory,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+f179f7888934b11dc5a2d8ff9205d1ca8b8a1599,Illuminant direction estimation for a single image based on local region complexity analysis and average gray value.,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+f10ddad356ac9376e8c982f96cead7f6bdee3251,Riemannian Set-level Common-Near-Neighbor Analysis for Multiple-shot Person Re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+f10ddad356ac9376e8c982f96cead7f6bdee3251,Riemannian Set-level Common-Near-Neighbor Analysis for Multiple-shot Person Re-identification,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+f1d3fee0a4dbd4cd30195d1218423bf22e23286d,Asking Friendly Strangers: Non-Semantic Attribute Transfer,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+f1245d318eb3d775e101355f5f085a9bc4a0339b,Face Verification with Disguise Variations via Deep Disguise,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+f106ff6b2dd497650e7e2096b24a23d620a2306b,Toward A Deep Understanding of What Makes a Scientific Visualization Memorable,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+f153cbec29d86a58b5f15231fd14e7037a210682,Lost in the categorical shuffle: evidence for the social non-prototypicality of black women.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+f153cbec29d86a58b5f15231fd14e7037a210682,Lost in the categorical shuffle: evidence for the social non-prototypicality of black women.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+f147057dfe4bbb4f9499de432cb2393547f2f339,CNN-based Facial Affect Analysis on Mobile Devices,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+f147057dfe4bbb4f9499de432cb2393547f2f339,CNN-based Facial Affect Analysis on Mobile Devices,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+f121154f0a7625fbb1613bd4cc2e705f9de8fd0c,Boosted Regression Active Shape Models,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+f1a62862bf3ab26588f880ec8d6f04d14b6cc2e7,FADA: An Efficient Dimension Reduction Scheme for Image Classification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+f1a62862bf3ab26588f880ec8d6f04d14b6cc2e7,FADA: An Efficient Dimension Reduction Scheme for Image Classification,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+f110f7be74261469fe9b0cc5a3b4ef35e2092d5b,Somatosensory Representations Link the Perception of Emotional Expressions and Sensory Experience123,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+f15b8efe8b9511207bb1261e218a54bcfa20349b,Sparse analysis model based dictionary learning and signal reconstruction,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f15b8efe8b9511207bb1261e218a54bcfa20349b,Sparse analysis model based dictionary learning and signal reconstruction,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+f1ac9370bdf4c408fdc242719cceb3eae19b9a16,Face Recognition after Plastic Surgery: A Comprehensive Study,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+f1ac9370bdf4c408fdc242719cceb3eae19b9a16,Face Recognition after Plastic Surgery: A Comprehensive Study,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+e78fdd62f67c38fcc6ac1421f045c9437f352b86,Deep Imbalanced Attribute Classification Using Visual Attention Aggregation,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+e79bfb8dc4ebbdeb971545bd31ffc1392ea0ad4c,Action Recognition with Exemplar Based 2.5D Graph Matching,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e78042d77765c0fd3c09651b679e15ffd6b7e8a1,Optimized Projection for Sparse Representation Based Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+e78042d77765c0fd3c09651b679e15ffd6b7e8a1,Optimized Projection for Sparse Representation Based Classification,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+e793f8644c94b81b7a0f89395937a7f8ad428a89,LPM for Action Recognition in Temporally Untrimmed Videos,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,3D Face Recognition Using Patched Locality Preserving Projections,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,3D Face Recognition Using Patched Locality Preserving Projections,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+e771661fa441f008c111ea786eb275153919da6e,Globally Optimal Object Tracking with Fully Convolutional Networks,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+e73a14bbf3d00fb72b710b6c62639d65bf4ee415,Scheduling and Tuning for Low Energy in Heterogeneous and Configurable Multicore Systems,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+e73a14bbf3d00fb72b710b6c62639d65bf4ee415,Scheduling and Tuning for Low Energy in Heterogeneous and Configurable Multicore Systems,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+e7bd805c001e04b3c015b7ec11497cd5247a1a77,Facial Component Extraction and Face Recognition with Support Vector Machines,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+e7397f7f0e83494825d63b75bdd40c3879f369cd,Open-world Person Re-Identification by Multi-Label Assignment Inference,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+e7b2b0538731adaacb2255235e0a07d5ccf09189,Learning Deep Representations with Probabilistic Knowledge Transfer,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+e74816bc0803460e20edbd30a44ab857b06e288e,Semi-Automated Annotation of Discrete States in Large Video Datasets,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+e74816bc0803460e20edbd30a44ab857b06e288e,Semi-Automated Annotation of Discrete States in Large Video Datasets,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+e7a922049a9bf54a0b13cd1d475a58e36c7c9b3e,The conceptual structure of face impressions.,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+e7a922049a9bf54a0b13cd1d475a58e36c7c9b3e,The conceptual structure of face impressions.,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+e7a922049a9bf54a0b13cd1d475a58e36c7c9b3e,The conceptual structure of face impressions.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+e762f25f13d6dbb95dc59af5e6fbb2160fcf4d55,Zero-Shot Detection,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+e73b9b16adcf4339ff4d6723e61502489c50c2d9,Anefficient Featureextractionmethodwith Localregionzernikemoment for Facial Recognition of Identicaltwins,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+e73b9b16adcf4339ff4d6723e61502489c50c2d9,Anefficient Featureextractionmethodwith Localregionzernikemoment for Facial Recognition of Identicaltwins,Azad University,Azad University,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.31734320,50.03672860,edu,
+e7a8549865978b478699647bd259f71c516c4479,Multiple People Tracking-by-Detection in a Multi-camera Environment,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+cb985b696085fdfdc664c74114b841d58382397c,Recurrent Scene Parsing with Perspective Understanding in the Loop,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+cb522158aa3c91fda3089d152b0005605056852b,3D Face Recognition Using Anthropometric and Curvelet Features Fusion,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+cb522158aa3c91fda3089d152b0005605056852b,3D Face Recognition Using Anthropometric and Curvelet Features Fusion,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+cb1e34d7fcb7fae914fcb65cb9cf25199d49cec9,SLAQ: quality-driven scheduling for distributed machine learning,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+cbbd13c29d042743f0139f1e044b6bca731886d0,Not-So-CLEVR: learning same-different relations strains feedforward neural networks.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a,"Emotion AI, Real-Time Emotion Detection using CNN",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cba45a87fc6cf12b3b0b6f57ba1a5282ef7fee7a,"Emotion AI, Real-Time Emotion Detection using CNN",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+cba130014e6cc590a09aaeca0590623b496f126b,HeteroVisor: Exploiting Resource Heterogeneity to Enhance the Elasticity of Cloud Platforms,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+cbf69bc3e4c9b7d8cd33be81686d45f6a5f2d544,Mouth Region Localization based on Gabor Features and Active Appearance Models,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+cbf69bc3e4c9b7d8cd33be81686d45f6a5f2d544,Mouth Region Localization based on Gabor Features and Active Appearance Models,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+cbf69bc3e4c9b7d8cd33be81686d45f6a5f2d544,Mouth Region Localization based on Gabor Features and Active Appearance Models,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+cb0c9a9882d8c3ef86cc8747b6ff8d68579dec61,Computer-Aided Detection of Acinar Shadows in Chest Radiographs,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+cb0c9a9882d8c3ef86cc8747b6ff8d68579dec61,Computer-Aided Detection of Acinar Shadows in Chest Radiographs,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+cb0c9a9882d8c3ef86cc8747b6ff8d68579dec61,Computer-Aided Detection of Acinar Shadows in Chest Radiographs,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+cb2917413c9b36c3bb9739bce6c03a1a6eb619b3,MiCT: Mixed 3D/2D Convolutional Tube for Human Action Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+cbb141925e919aef18f9168b79b4c4aeb871ccff,A Study Of Statistical Methods For Facial Shape-from-shading,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,"A Survey of Automatic Facial Micro-Expression Analysis: Databases, Methods, and Challenges",Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,"A Survey of Automatic Facial Micro-Expression Analysis: Databases, Methods, and Challenges",Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,"A Survey of Automatic Facial Micro-Expression Analysis: Databases, Methods, and Challenges",University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,"A Survey of Automatic Facial Micro-Expression Analysis: Databases, Methods, and Challenges",Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+cb13e29fb8af6cfca568c6dc523da04d1db1fff5,"A Survey of Automatic Facial Micro-Expression Analysis: Databases, Methods, and Challenges",Monash University Malaysia,Monash University Malaysia,"Monash University Malaysia, Jalan Lagoon Selatan, Kampung Lembah Kinrara, SS13, Subang Jaya, Selangor, 47500, Malaysia",3.06405715,101.60059740,edu,
+cb2bbc19ba323ac8a7d0530fb605462c8e608e1d,Concept Mask: Large-Scale Segmentation from Semantic Concepts,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+cb5dcd048b0eaa78a887a014be26a8a7b1325d36,Joint Learning of Set Cardinality and State Distribution,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+cb1b5e8b35609e470ce519303915236b907b13b6,On the vulnerability of ECG verification to online presentation attacks,University of Connecticut,University of Connecticut,"University of Connecticut, Glenbrook Road, Storrs, Tolland County, Connecticut, 06269, USA",41.80937790,-72.25364140,edu,
+cb1b5e8b35609e470ce519303915236b907b13b6,On the vulnerability of ECG verification to online presentation attacks,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+cb5ea214f4a3ddd50e821efea003340a8036408c,Jointly Feature Learning and Selection for Robust Tracking via a Gating Mechanism,Huaqiao University,Huaqiao University,"华侨大学站 HuaQiao University (BRT), 集美大道, 集美区, 集美区 (Jimei), 厦门市 / Xiamen, 福建省, 361024, 中国",24.60047120,118.08165740,edu,
+cbb27980eb04f68d9f10067d3d3c114efa9d0054,An Attention Model for Group-Level Emotion Recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+cbb27980eb04f68d9f10067d3d3c114efa9d0054,An Attention Model for Group-Level Emotion Recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+cbb27980eb04f68d9f10067d3d3c114efa9d0054,An Attention Model for Group-Level Emotion Recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+cba3061a883cdfb45c6d26fdee7dd53e6614d388,Per-patch Descriptor Selection Using Surface and Scene Properties,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+cb9057a47f6d3367a6756507ceb1b1f9b596eb7a,Fearful faces have a sensory advantage in the competition for awareness.,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+cb9057a47f6d3367a6756507ceb1b1f9b596eb7a,Fearful faces have a sensory advantage in the competition for awareness.,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+cb9057a47f6d3367a6756507ceb1b1f9b596eb7a,Fearful faces have a sensory advantage in the competition for awareness.,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+cb3010ae04bb144b49eb0c1061b695998d3a7441,Scene Parsing with Global Context Embedding,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+cba8b9949e71ff485a4ecba33128e2f206651cac,An RGBD segmentation model for robot vision learned from synthetic data,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+cbe1a5b67c1d19aa1fca10473c6e88b4a444f77b,MCGraph: multi-criterion representation for scene understanding,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+f86ddd6561f522d115614c93520faad122eb3b56,Visual Imagination from Texts,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+f8bebca34cc787dd2652deb182cf66d346d06094,Local Response Context Applied to Pedestrian Detection,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+f8bebca34cc787dd2652deb182cf66d346d06094,Local Response Context Applied to Pedestrian Detection,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f8809a55945c283d249f4c4adb5d74e452cdfaa0,Being Negative but Constructively: Lessons Learnt from Creating Better Visual Question Answering Datasets,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+f8015e31d1421f6aee5e17fc3907070b8e0a5e59,Towards Usable Multimedia Event Detection from Web Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f83dd9ff002a40228bbe3427419b272ab9d5c9e4,Facial Features Matching using a Virtual Structuring Element,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+f8beb25e944004d283e1b347e3473089da244335,Diminished Medial Prefrontal Activity behind Autistic Social Judgments of Incongruent Information,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+f8beb25e944004d283e1b347e3473089da244335,Diminished Medial Prefrontal Activity behind Autistic Social Judgments of Incongruent Information,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+f8beb25e944004d283e1b347e3473089da244335,Diminished Medial Prefrontal Activity behind Autistic Social Judgments of Incongruent Information,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+f8daab1e4f63051b78eb43e98ab723f6c425a6b5,Speaker Naming in Movies,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+f8f2d2910ce8b81cb4bbf84239f9229888158b34,A Generative Model for Recognizing Mixed Group Activities in Still Images,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+f8599ad5332cdf2c9919988ba300bb4b438b5834,Transitive Invariance for Self-Supervised Visual Representation Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+f832fdf1fac092b4140bf81d38e6bc6af5c1ea65,Instance-Level Human Parsing via Part Grouping Network,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+f88a0f44ff7ec5fe0facf0facac0a094c7bd6cb8,Augmenting Image Question Answering Dataset by Exploiting Image Captions,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+f8ddb2cac276812c25021b5b79bf720e97063b1e,A Comprehensive Empirical Study on Linear Subspace Methods for Facial Expression Analysis,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+f8e64dd25c3174dff87385db56abc48101b69009,Disentangling 3D Pose in A Dendritic CNN for Unconstrained 2D Face Alignment,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+f83e563288e5d7a54444bbcf28a28a37b72a0644,Fused DNN: A Deep Neural Network Fusion Approach to Fast and Robust Pedestrian Detection,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+f867cd63fa18da1c52061ba22954ee9d138906dd,Creating Body Shapes From Verbal Descriptions by Linking Similarity Spaces.,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+f8ae3654c41b6ef5c5035a6db65b80137ad9a267,Anticipation Effect after Implicit Distributional Learning,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+f89edc5a4d938bf6df0a780163b872b9edeef5d8,Unified Cortical Surface Morphometry and Its Application to Quantifying Amount of Gray Matter,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+f87b22e7f0c66225824a99cada71f9b3e66b5742,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+f87b22e7f0c66225824a99cada71f9b3e66b5742,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+f87b22e7f0c66225824a99cada71f9b3e66b5742,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+f87b22e7f0c66225824a99cada71f9b3e66b5742,Robust emotion recognition from low quality and low bit rate video: A deep learning approach,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+f89e5b1f61b221c7b00db55b64239a28f8ba9fe0,Ensemble Learning-Based Person Re-identification with Multiple Feature Representations,Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.70027525,edu,
+f89e5b1f61b221c7b00db55b64239a28f8ba9fe0,Ensemble Learning-Based Person Re-identification with Multiple Feature Representations,Yunnan University,Yunnan University,"云南大学, 一二一大街, 志城家园, 五华区, 五华区 (Wuhua), 昆明市 (Kunming), 云南省, 650030, 中国",25.05703205,102.70027525,edu,
+ce6d60b69eb95477596535227958109e07c61e1e,Unconstrained face verification using fisher vectors computed from frontalized faces,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ceb763d6657a07b47e48e8a2956bcfdf2cf10818,An Efficient Feature Extraction Method with Pseudo-zernike Moment for Facial Recognition of Identical Twins,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+ceb763d6657a07b47e48e8a2956bcfdf2cf10818,An Efficient Feature Extraction Method with Pseudo-zernike Moment for Facial Recognition of Identical Twins,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+ceac30061d8f7985987448f4712c49eeb98efad2,MemexQA: Visual Memex Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+cefd9936e91885ba7af9364d50470f6cb54315a4,Expectation and surprise determine neural population responses in the ventral visual stream.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+cefd9936e91885ba7af9364d50470f6cb54315a4,Expectation and surprise determine neural population responses in the ventral visual stream.,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+cefd9936e91885ba7af9364d50470f6cb54315a4,Expectation and surprise determine neural population responses in the ventral visual stream.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+ce386ab4511f38a7671576a9cd32e5557853180e,"Comparatives, Quantifiers, Proportions: A Multi-Task Model for the Learning of Quantities from Vision",University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+ce386ab4511f38a7671576a9cd32e5557853180e,"Comparatives, Quantifiers, Proportions: A Multi-Task Model for the Learning of Quantities from Vision",University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+ce37e11f4046a4b766b0e3228870ae4f26dddd67,Learning One-Shot Exemplar SVM from the Web for Face Verification,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+ce0aa94c79f60c35073f434a7fd6987180f81527,Achieving Anonymity against Major Face Recognition Algorithms,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+ce0aa94c79f60c35073f434a7fd6987180f81527,Achieving Anonymity against Major Face Recognition Algorithms,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+ceca60c4bf1a5c4e5893ae6685e7a9f80ca47f27,Visual Question: Predicting If a Crowd Will Agree on the Answer,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+ce93f83d69ee6ee981124ed1f20102335caf7b09,Deep Residual Network with Enhanced Upscaling Module for Super-Resolution,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+cee66bd89d1e25355e78573220adcd017a2d97d8,Spatio-temporal human action localisation and instance segmentation in temporally untrimmed videos,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+cee66bd89d1e25355e78573220adcd017a2d97d8,Spatio-temporal human action localisation and instance segmentation in temporally untrimmed videos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+ce5eac297174c17311ee28bda534faaa1d559bae,Automatic analysis of malaria infected red blood cell digitized microscope images,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+ce5eac297174c17311ee28bda534faaa1d559bae,Automatic analysis of malaria infected red blood cell digitized microscope images,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+ce4ad1ad4134d9131af21d4213e598f03475cfd3,A CNN Based Approach for Garments Texture Design Classification,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+ce6dbde2ad8b5b9aee2ccf4a7e33e63ccfc3689a,Overcoming Language Priors in Visual Question Answering with Adversarial Regularization,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+ce300b006f42c1b64ca0e53d1cf28d11a98ece8f,Learning Multi-Instance Enriched Image Representations via Non-Greedy Ratio Maximization of the l 1-Norm Distances,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+ce2e1bb891ffc0b114855a92f78e8aed289073af,GazeGAN - Unpaired Adversarial Image Generation for Gaze Estimation,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+ce13682b1771c221f0e0ed36da1cc3aaddc52188,ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+ce4df7862bbf7e70d0052470e4bced479bf83703,Generic Motion based Object Segmentation for Assisted Navigation,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+ce4df7862bbf7e70d0052470e4bced479bf83703,Generic Motion based Object Segmentation for Assisted Navigation,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+ce450e4849490924488664b44769b4ca57f1bc1a,Procedural Generation of Videos to Train Deep Action Recognition Networks,Toyota Research Institute,Toyota Research Institute,"Toyota Research Institute, 4440, West El Camino Real, Los Altos, Santa Clara County, California, 94022, USA",37.40253645,-122.11655107,edu,
+ceeb67bf53ffab1395c36f1141b516f893bada27,Face Alignment by Local Deep Descriptor Regression,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ceeb67bf53ffab1395c36f1141b516f893bada27,Face Alignment by Local Deep Descriptor Regression,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ceeb67bf53ffab1395c36f1141b516f893bada27,Face Alignment by Local Deep Descriptor Regression,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+ceeb67bf53ffab1395c36f1141b516f893bada27,Face Alignment by Local Deep Descriptor Regression,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+ced73382d686dee6232c313f014bc21ca7536db0,Detection of Tongue Protrusion Gestures from Video,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+ce1cbcf0f671423eada02e6699d637afbd9ef570,Max-Margin Boltzmann Machines for Object Segmentation,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+ce83369da319607fe2832485913b0f30c00920aa,Human Detection Based on Large Feature Sets Using Graphics Processing Units,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+ce032dae834f383125cdd852e7c1bc793d4c3ba3,Motion Interchange Patterns for Action Recognition in Unconstrained Videos,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+ce032dae834f383125cdd852e7c1bc793d4c3ba3,Motion Interchange Patterns for Action Recognition in Unconstrained Videos,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+ce032dae834f383125cdd852e7c1bc793d4c3ba3,Motion Interchange Patterns for Action Recognition in Unconstrained Videos,Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.70927481,edu,
+ce9e1dfa7705623bb67df3a91052062a0a0ca456,Deep Feature Interpolation for Image Content Changes,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+ce56be1acffda599dec6cc2af2b35600488846c9,Inferring Sentiment from Web Images with Joint Inference on Visual and Social Cues: A Regulated Matrix Factorization Approach,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+ce56be1acffda599dec6cc2af2b35600488846c9,Inferring Sentiment from Web Images with Joint Inference on Visual and Social Cues: A Regulated Matrix Factorization Approach,IBM Almaden Research Center,IBM Almaden Research Center,"IBM Almaden Research Center, San José, Santa Clara County, California, USA",37.21095605,-121.80748668,company,
+ce2fd44a8c43642b76f219fe32291c1b2644cb73,Human Pose Forecasting via Deep Markov Models,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+ce3edf04c9f0c9da462832cbf8c5a1982e3e6bf8,Learning Kinematic Descriptions using SPARE: Simulated and Physical ARticulated Extendable dataset,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+ce3edf04c9f0c9da462832cbf8c5a1982e3e6bf8,Learning Kinematic Descriptions using SPARE: Simulated and Physical ARticulated Extendable dataset,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+e03bda45248b4169e2a20cb9124ae60440cad2de,"Learning a Dictionary of Shape-Components in Visual Cortex : Comparison with Neurons , Humans and Machines by Thomas Serre",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+e03e86ac61cfac9148b371d75ce81a55e8b332ca,Unsupervised Learning using Sequential Verification for Action Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+e088a2537492ed5a22885e871a51102a95c97cb6,On the effect of Batch Normalization and Weight Normalization in Generative Adversarial Networks,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+e06d0d7513a42755ad8b33c21ec4c1660f5e0cc5,Selective Zero-Shot Classification with Augmented Attributes,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+e06d0d7513a42755ad8b33c21ec4c1660f5e0cc5,Selective Zero-Shot Classification with Augmented Attributes,"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.28106540,120.02139087,edu,
+e06d0d7513a42755ad8b33c21ec4c1660f5e0cc5,Selective Zero-Shot Classification with Augmented Attributes,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+e00c26e3d16a44baf7be389e94ed0025a0ea3867,An Evaluation of Super-Resolution for Face Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+e0b5815b0d3d6c02a114ee27dc6ea2d2c40a4458,Videos as Space-Time Region Graphs,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+e06f94ebf10b511d121725c318cd289c55349c2d,Training an adaptive dialogue policy for interactive learning of visually grounded word meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+e06f94ebf10b511d121725c318cd289c55349c2d,Training an adaptive dialogue policy for interactive learning of visually grounded word meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+e06f94ebf10b511d121725c318cd289c55349c2d,Training an adaptive dialogue policy for interactive learning of visually grounded word meanings,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+e0638e0628021712ac76e3472663ccc17bd8838c,Sign Language Recognition: State of the Art,Sharda University,Sharda University,"Sharda University, Yamuna Expressway, Greater Noida, Gautam Buddha Nagar, Uttar Pradesh, 201308, India",28.47375120,77.48361480,edu,
+e0da17d5a8460ab74d4e8db338779feb2bb9fbbe,Labelless Scene Classification with Semantic Matching,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+e0da17d5a8460ab74d4e8db338779feb2bb9fbbe,Labelless Scene Classification with Semantic Matching,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+e04428ce77d6d459b7063d6bda7a8f72a539f284,RecipeQA: A Challenge Dataset for Multimodal Comprehension of Cooking Recipes,Hacettepe University,Hacettepe University,"Hacettepe Üniversitesi Beytepe Kampüsü, Hacettepe-Beytepe Kampüs Yolu, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.86742125,32.73519072,edu,
+e0d878cc095eaae220ad1f681b33d7d61eb5e425,Temporal and Fine-Grained Pedestrian Action Recognition on Driving Recorder Database,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+e0d878cc095eaae220ad1f681b33d7d61eb5e425,Temporal and Fine-Grained Pedestrian Action Recognition on Driving Recorder Database,Tokyo Metropolitan University,Tokyo Metropolitan University,"首都大学東京, 由木緑道, 八王子市, 東京都, 関東地方, 1920364, 日本",35.62009250,139.38296706,edu,
+e00d4e4ba25fff3583b180db078ef962bf7d6824,Face Verification with Multi-Task and Multi-Scale Features Fusion,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+e030697c19dd1919dbdd889b69df7ab002a8af19,The expectancy bias : Expectancy-violating faces evoke earlier pupillary dilation than neutral or negative faces,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e030697c19dd1919dbdd889b69df7ab002a8af19,The expectancy bias : Expectancy-violating faces evoke earlier pupillary dilation than neutral or negative faces,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+e030697c19dd1919dbdd889b69df7ab002a8af19,The expectancy bias : Expectancy-violating faces evoke earlier pupillary dilation than neutral or negative faces,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+e01bb53b611c679141494f3ffe6f0b91953af658,FSRNet: End-to-End Learning Face Super-Resolution with Facial Priors,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+e01bb53b611c679141494f3ffe6f0b91953af658,FSRNet: End-to-End Learning Face Super-Resolution with Facial Priors,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+e01bb53b611c679141494f3ffe6f0b91953af658,FSRNet: End-to-End Learning Face Super-Resolution with Facial Priors,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+e042c4d038373a68cca109336598c0323e7a9b60,Culture moderates the relationship between interdependence and face recognition,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+e042c4d038373a68cca109336598c0323e7a9b60,Culture moderates the relationship between interdependence and face recognition,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+e042c4d038373a68cca109336598c0323e7a9b60,Culture moderates the relationship between interdependence and face recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,AI Thinking for Cloud Education Platform with Personalized Learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,AI Thinking for Cloud Education Platform with Personalized Learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,AI Thinking for Cloud Education Platform with Personalized Learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,AI Thinking for Cloud Education Platform with Personalized Learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+e0bfcf965b402f3f209f26ae20ee88bc4d0002ab,AI Thinking for Cloud Education Platform with Personalized Learning,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+e032f5bbee040b3898170b3f9091384658caf0d2,Navigation Behavior Design and Representations for a People Aware Mobile Robot System,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,University of Chicago,THE UNIVERSITY OF CHICAGO,"University of Chicago, South Ellis Avenue, Woodlawn, Chicago, Cook County, Illinois, 60637, USA",41.78468745,-87.60074933,edu,
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4,Robust continuous clustering.,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4,Robust continuous clustering.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+e00241f00fb31c660df6c6f129ca38370e6eadb3,What have we learned from deep representations for action recognition?,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+e00241f00fb31c660df6c6f129ca38370e6eadb3,What have we learned from deep representations for action recognition?,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Haifa,University of Haifa,"אוניברסיטת חיפה, חיפה, מחוז חיפה, ישראל",32.76162915,35.01986304,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,American University,American University,"American University, 4400, Massachusetts Avenue Northwest, Spring Valley, American University Park, D.C., 20016, USA",38.93804505,-77.08939224,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+e059650472dd7bfd6907b02de491e312a0cb6d4e,Parallel Genetic Algorithms and Machine Learning,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+e0dc6f1b740479098c1d397a7bc0962991b5e294,Face Detection: a Survey,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+e0dc6f1b740479098c1d397a7bc0962991b5e294,Face Detection: a Survey,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+e083d6f5084d8a8582af797999185c4e0d2c841a,R-CNNs for Pose Estimation and Action Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e083d6f5084d8a8582af797999185c4e0d2c841a,R-CNNs for Pose Estimation and Action Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e083d6f5084d8a8582af797999185c4e0d2c841a,R-CNNs for Pose Estimation and Action Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+e083d6f5084d8a8582af797999185c4e0d2c841a,R-CNNs for Pose Estimation and Action Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+4678a2ae263e7952887df31f76ab404df74a4649,High Performance Human Face Recognition using Gabor Based Pseudo Hidden Markov Model,Jadavpur University,Jadavpur University,"Jadavpur University, Chingrighata Flyover, Basani Devi Colony, Kolkata, Hāora, West Bengal, 700098, India",22.56115370,88.41310194,edu,
+468c8f09d2ad8b558b65d11ec5ad49208c4da2f2,MSR-CNN: Applying motion salient region based descriptors for action recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+468c8f09d2ad8b558b65d11ec5ad49208c4da2f2,MSR-CNN: Applying motion salient region based descriptors for action recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+46d7f41189c5e262df9ad1165d5a40d2b685bb0f,Discriminative Multiple Target Tracking,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+46d7f41189c5e262df9ad1165d5a40d2b685bb0f,Discriminative Multiple Target Tracking,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+466184b10fb7ce9857e6b5bd6b4e5003e09a0b16,Extended Grassmann Kernels for Subspace-Based Learning,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+466184b10fb7ce9857e6b5bd6b4e5003e09a0b16,Extended Grassmann Kernels for Subspace-Based Learning,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+46773c8a2fa5012f7b3e16b44214de0da3f68859,DisturbLabel: Regularizing CNN on the Loss Layer,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+46773c8a2fa5012f7b3e16b44214de0da3f68859,DisturbLabel: Regularizing CNN on the Loss Layer,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+46773c8a2fa5012f7b3e16b44214de0da3f68859,DisturbLabel: Regularizing CNN on the Loss Layer,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+46773c8a2fa5012f7b3e16b44214de0da3f68859,DisturbLabel: Regularizing CNN on the Loss Layer,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+46a70d4020609c175bfc9f19e99aebd1c8edb20b,Fast Human Pose Estimation,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+46a70d4020609c175bfc9f19e99aebd1c8edb20b,Fast Human Pose Estimation,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+46f698dacdb5f76d6b4dae67cb1ae4da2b789398,Deformable Distributed Multiple Detector Fusion for Multi-Person Tracking,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+4696031ddcdfab8b768817fd974b601b6b68c7f1,3D Pose Estimation from a Single Monocular Image,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+465c34c3334f29de28f973b7702a235509649429,Stereopsis via deep learning,University of Frankfurt,University of Frankfurt,"Frankfurt University of Applied Sciences, Kleiststraße, Nordend West, Frankfurt, Regierungsbezirk Darmstadt, Hessen, 60318, Deutschland",50.13053055,8.69234224,edu,
+46b7ee97d7dfbd61cc3745e8dfdd81a15ab5c1d4,3D facial geometric features for constrained local model,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+46ae4d593d89b72e1a479a91806c39095cd96615,A conditional random field approach for face identification in broadcast news using overlaid text,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+467b602a67cfd7c347fe7ce74c02b38c4bb1f332,Large Margin Local Metric Learning,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+46702e0127e16a4d6a1feda3ffc5f0f123957e87,Revisit Multinomial Logistic Regression in Deep Learning: Data Dependent Model Initialization for Image Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+466f80b066215e85da63e6f30e276f1a9d7c843b,Joint Head Pose Estimation and Face Alignment Framework Using Global and Local CNN Features,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+46e46dffe4f8b724ec51179b3be1ae321fdb2d39,Collaborative Deep Reinforcement Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+46e46dffe4f8b724ec51179b3be1ae321fdb2d39,Collaborative Deep Reinforcement Learning,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+46e46dffe4f8b724ec51179b3be1ae321fdb2d39,Collaborative Deep Reinforcement Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+46bdfee362a4de978d24d53fd704d64d82273718,Crowd Tracking with Dynamic Evolution of Group Structures,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+46bdfee362a4de978d24d53fd704d64d82273718,Crowd Tracking with Dynamic Evolution of Group Structures,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+46bdfee362a4de978d24d53fd704d64d82273718,Crowd Tracking with Dynamic Evolution of Group Structures,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+46ee0288c382c7af7fa4f3a5e3c74d60a12c519a,Memory Based Online Learning of Deep Representations from Video Streams,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+465d5bb11912005f0a4f0569c6524981df18a7de,IMOTION - Searching for Video Sequences Using Multi-Shot Sketch Queries,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+46c87fded035c97f35bb991fdec45634d15f9df2,Spatial-Aware Object Embeddings for Zero-Shot Localization and Classification of Actions,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+46e72046a9bb2d4982d60bcf5c63dbc622717f0f,Learning Discriminative Features with Class Encoder,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+46f32991ebb6235509a6d297928947a8c483f29e,Recognizing Expression Variant Faces from a Single Sample Image per Class,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+4610b1e9b18f913fbbdb5bee6502f55a47610ff5,Removing image artifacts due to dirty camera lenses and thin occluders,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4610b1e9b18f913fbbdb5bee6502f55a47610ff5,Removing image artifacts due to dirty camera lenses and thin occluders,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4610b1e9b18f913fbbdb5bee6502f55a47610ff5,Removing image artifacts due to dirty camera lenses and thin occluders,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+46149723fef89d3b04019b4f62e4c0ceff7c76a0,Diagnostics in Semantic Segmentation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+4641986af5fc8836b2c883ea1a65278d58fe4577,Scene Graph Generation by Iterative Message Passing,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4641986af5fc8836b2c883ea1a65278d58fe4577,Scene Graph Generation by Iterative Message Passing,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a,Deep Adaptive Temporal Pooling for Activity Recognition,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+464b3f0824fc1c3a9eaf721ce2db1b7dfe7cb05a,Deep Adaptive Temporal Pooling for Activity Recognition,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+46d85e1dc7057bef62647bd9241601e9896a1b02,Improving object proposals with multi-thresholding straddling expansion,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+46836605c2ef5f78796644da3d385f66825518ba,Action Detection in Crowd,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+468c97bdfb67614d458ba63eee04756add5631b3,Beyond Kmedoids: Sparse Model Based Medoids Algorithm for Representative Selection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+468c97bdfb67614d458ba63eee04756add5631b3,Beyond Kmedoids: Sparse Model Based Medoids Algorithm for Representative Selection,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+46d29ee2b97362299ef83c06ffc4461906f1ccda,It’s Written All Over Your Face: Full-Face Appearance-Based Gaze Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+46d29ee2b97362299ef83c06ffc4461906f1ccda,It’s Written All Over Your Face: Full-Face Appearance-Based Gaze Estimation,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+464cd3c5f0e9a05dd685a7b71fe88b913da520b4,Increasing CNN Robustness to Occlusions by Reducing Filter Support,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+464cd3c5f0e9a05dd685a7b71fe88b913da520b4,Increasing CNN Robustness to Occlusions by Reducing Filter Support,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+4698ed97f4a78e724c903ec1dd6e5538203237c8,Using phase instead of optical flow for action recognition,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+4698ed97f4a78e724c903ec1dd6e5538203237c8,Using phase instead of optical flow for action recognition,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+4657d87aebd652a5920ed255dca993353575f441,Image Normalization for Illumination Compensation in Facial Images,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+4622b82a8aff4ac1e87b01d2708a333380b5913b,Multi-label CNN based pedestrian attribute learning for soft biometrics,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+46e866f58419ff4259c65e8256c1d4f14927b2c6,On the Generalization Power of Face and Gait Gender Recognition Methods,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+46072f872eee3413f9d05482be6446f6b96b6c09,Trace Quotient Problems Revisited,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+46072f872eee3413f9d05482be6446f6b96b6c09,Trace Quotient Problems Revisited,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4698a599425c3a6bae1c698456029519f8f2befe,Transferring Rich Deep Features for Facial Beauty Prediction,University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.20988790,-97.15147488,edu,
+2cdb8df791cb15eef805443293319ec8690ff88f,An Effective Approach to Pose Invariant 3D Face Recognition,Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.84909214,edu,
+2cdb8df791cb15eef805443293319ec8690ff88f,An Effective Approach to Pose Invariant 3D Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2cdb8df791cb15eef805443293319ec8690ff88f,An Effective Approach to Pose Invariant 3D Face Recognition,Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.84909214,edu,
+2cdb8df791cb15eef805443293319ec8690ff88f,An Effective Approach to Pose Invariant 3D Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2ce38dbb4d4228ae4a7016b0422155a274b88659,Automatic 3D Face Extraction from Raw Scanned Triangle Mesh,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+2ce38dbb4d4228ae4a7016b0422155a274b88659,Automatic 3D Face Extraction from Raw Scanned Triangle Mesh,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+2c424f21607ff6c92e640bfe3da9ff105c08fac4,Learning Structured Output Representation using Deep Conditional Generative Models,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2c13e0f614712c6a478adea3ce011750d5e77587,Identity Verification Via the 3Bid Face Alignment System,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+2c29f5245e20b49acad4c63220a17f3b1fb8cd00,Preserving Modes and Messages via Diverse Particle Selection,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+2c29f5245e20b49acad4c63220a17f3b1fb8cd00,Preserving Modes and Messages via Diverse Particle Selection,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+2cd9a7eefc126469b566fc429657bb889d13b4fa,Robust object tracking based on RGB-D camera,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+2cd9a7eefc126469b566fc429657bb889d13b4fa,Robust object tracking based on RGB-D camera,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+2c258eec8e4da9e65018f116b237f7e2e0b2ad17,Deep Quantization: Encoding Convolutional Activations with Deep Generative Model,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+2c258eec8e4da9e65018f116b237f7e2e0b2ad17,Deep Quantization: Encoding Convolutional Activations with Deep Generative Model,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+2cb7b6c6456735e5cf778ef9864bf590f7813ccf,Generative Dual Adversarial Network for Generalized Zero-shot Learning,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+2cb7b6c6456735e5cf778ef9864bf590f7813ccf,Generative Dual Adversarial Network for Generalized Zero-shot Learning,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2ce073da76e6ed87eda2da08da0e00f4f060f1a6,Deep Saliency with Encoded Low Level Distance Map and High Level Features,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+2c203050a6cca0a0bff80e574bda16a8c46fe9c2,Discriminative Deep Hashing for Scalable Face Image Retrieval,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c,Crafting 3D faces using free form portrait sketching and plausible texture inference,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c,Crafting 3D faces using free form portrait sketching and plausible texture inference,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c,Crafting 3D faces using free form portrait sketching and plausible texture inference,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c,Crafting 3D faces using free form portrait sketching and plausible texture inference,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+2ce3dbf18c10e62f1ffdeba5f3b16cf6c4c53c6c,Crafting 3D faces using free form portrait sketching and plausible texture inference,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2c3430e0cbe6c8d7be3316a88a5c13a50e90021d,Multi-feature Spectral Clustering with Minimax Optimization,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2cde051e04569496fb525d7f1b1e5ce6364c8b21,Sparse 3D convolutional neural networks,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+2c1ffb0feea5f707c890347d2c2882be0494a67a,The Variational Homoencoder: Learning to learn high capacity generative models from few examples,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2cdc40f20b70ca44d9fd8e7716080ee05ca7924a,Real-time Convolutional Neural Networks for Emotion and Gender Classification,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+2c963e79a88a3f8ba71cd8d5c9f9f92c925f534c,An Effective Approach for Point Clouds Registration Based on the Hard and Soft Assignments,University of North Carolina at Charlotte,University of North Carolina at Charlotte,"Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA",35.31034410,-80.73261617,edu,
+2c2371629ad7bcde46e62859b2e812f6e5fc64cf,Action Recognition in the Presence of One Egocentric and Multiple Static Cameras,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+2cac70f9c8140a12b6a55cef834a3d7504200b62,Reconstructing High Quality Face-Surfaces using Model Based Stereo,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+2cac70f9c8140a12b6a55cef834a3d7504200b62,Reconstructing High Quality Face-Surfaces using Model Based Stereo,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+2c1f8ddbfbb224271253a27fed0c2425599dfe47,Understanding and Comparing Deep Neural Networks for Age and Gender Classification,Singapore University of Technology and Design,Singapore University of Technology and Design,"Singapore University of Technology and Design, Simpang Bedok, Changi Business Park, Southeast, 486041, Singapore",1.34021600,103.96508900,edu,
+2ca43325a5dbde91af90bf850b83b0984587b3cc,For Your Eyes Only – Biometric Protection of PDF Documents,Gdansk University of Technology,Gdansk University of Technology,"PG, Romualda Traugutta, Królewska Dolina, Wrzeszcz Górny, Gdańsk, pomorskie, 80-233, RP",54.37086525,18.61716016,edu,
+2c2261212051ae0d2586b90715cc411344570916,Considerations for Evaluating Models of Language Understanding and Reasoning,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+2c7946d5d2f1572c20e9843eb2033b8eb9771bf3,THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2c7946d5d2f1572c20e9843eb2033b8eb9771bf3,THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+2c7946d5d2f1572c20e9843eb2033b8eb9771bf3,THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2c7946d5d2f1572c20e9843eb2033b8eb9771bf3,THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+2cf6d4a3481b7ec40b704472017493ec17565e6f,Deep cross-domain building extraction for selective depth estimation from oblique aerial imagery,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+2cfc28a96b57e0817cc9624a5d553b3aafba56f3,P2F2: Privacy-preserving face finder,New Jersey Institute of Technology,New Jersey Institute of Technology,"New Jersey Institute of Technology, Warren Street, University Heights, Newark, Essex County, New Jersey, 07103, USA",40.74230250,-74.17928172,edu,
+2cae619d0209c338dc94593892a787ee712d9db0,Selective hidden random fields: Exploiting domain-specific saliency for event classification,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+2c0acaec54ab2585ff807e18b6b9550c44651eab,Face Quality Assessment for Face Verification in Video,Lomonosov Moscow State University,Lomonosov Moscow State University,"МГУ, улица Академика Хохлова, Московский государственный университет им. М. В. Ломоносова, район Раменки, Западный административный округ, Москва, ЦФО, 119234, РФ",55.70229715,37.53179777,edu,
+2ceaa8d6ee74105a6b5561661db299c885f9135b,Learning to Decode for Future Success,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2c7934a2f1671286370cd9adebc2872c6dd318f5,Visual Scene Understanding through Semantic Segmentation,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+2c7934a2f1671286370cd9adebc2872c6dd318f5,Visual Scene Understanding through Semantic Segmentation,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+2c62b9e64aeddf12f9d399b43baaefbca8e11148,Evaluation of Dense 3D Reconstruction from 2D Face Images in the Wild,Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682404,edu,
+2c0c5c40f98d9b645549f235a680be5b729ebe48,A Scanner Darkly: Protecting User Privacy from Perceptual Applications,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2c0c5c40f98d9b645549f235a680be5b729ebe48,A Scanner Darkly: Protecting User Privacy from Perceptual Applications,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+2c761495cf3dd320e229586f80f868be12360d4e,Revisiting Unreasonable Effectiveness of Data in Deep Learning Era,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2cbb2c7c0f3f78574b5e8cf197774d5b556b1202,Self-Adaptive Matrix Completion for Heart Rate Estimation from Face Videos under Realistic Conditions,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+2cbb2c7c0f3f78574b5e8cf197774d5b556b1202,Self-Adaptive Matrix Completion for Heart Rate Estimation from Face Videos under Realistic Conditions,University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26224210,-123.24500520,edu,
+2cbb2c7c0f3f78574b5e8cf197774d5b556b1202,Self-Adaptive Matrix Completion for Heart Rate Estimation from Face Videos under Realistic Conditions,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+2cbb2c7c0f3f78574b5e8cf197774d5b556b1202,Self-Adaptive Matrix Completion for Heart Rate Estimation from Face Videos under Realistic Conditions,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+2cbb2c7c0f3f78574b5e8cf197774d5b556b1202,Self-Adaptive Matrix Completion for Heart Rate Estimation from Face Videos under Realistic Conditions,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+2cff3c291e03dda9ed6cf9747eeffc5642762e52,A Survey: Face Recognition Techniques,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+2cff3c291e03dda9ed6cf9747eeffc5642762e52,A Survey: Face Recognition Techniques,National University of Sciences and Technology,National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.64434700,72.98850790,edu,
+2ca761938bd789b82d1a4ca85e7b8d5661093660,Enhancing Music Information Retrieval by Incorporating Image-Based Local Features,Open University,The Open University,"The Open University, East Lane, Walton, Monkston, Milton Keynes, South East, England, MK7 6AE, UK",52.02453775,-0.70927481,edu,
+2ca761938bd789b82d1a4ca85e7b8d5661093660,Enhancing Music Information Retrieval by Incorporating Image-Based Local Features,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+2c55ac6330ce91a24131a81807237807134ec371,Visual Phrase Learning and Its Application in Computed Tomographic Colonography,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+2c4def184f940e2dd4302bdc130999c27054de3e,A new spontaneous expression database and a study of classification-based expression analysis methods,Loughborough University,Loughborough University,"Computer Science, University Road, Charnwood, Leicestershire, East Midlands, England, LE11 3TP, UK",52.76635770,-1.22924610,edu,
+2c4def184f940e2dd4302bdc130999c27054de3e,A new spontaneous expression database and a study of classification-based expression analysis methods,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+2c19d3d35ef7062061b9e16d040cebd7e45f281d,End-to-end Video-level Representation Learning for Action Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+2c797d2daefba6cfceb8510219163dc7dcfa0a66,Discriminative Region Proposal Adversarial Networks for High-Quality Image-to-Image Translation,Ocean University of China,Ocean University of China,"中国海洋大学, 238, 松岭路 Sōnglǐng Road, 朱家洼, 崂山区 (Laoshan), 青岛市, 山东省, 266100, 中国",36.16161795,120.49355276,edu,
+2c3cac0f568ae9261ff9c80eeda55a13e83ae7fb,A discriminative framework for modelling object classes,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+2c17d36bab56083293456fe14ceff5497cc97d75,Unconstrained Face Alignment via Cascaded Compositional Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2c17d36bab56083293456fe14ceff5497cc97d75,Unconstrained Face Alignment via Cascaded Compositional Learning,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+2c17d36bab56083293456fe14ceff5497cc97d75,Unconstrained Face Alignment via Cascaded Compositional Learning,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+2cd7821fcf5fae53a185624f7eeda007434ae037,Exploring the geo-dependence of human face appearance,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+2cd7821fcf5fae53a185624f7eeda007434ae037,Exploring the geo-dependence of human face appearance,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+2c3c72fffcbbf66cbb649b64aa51199722140ad1,TVT: Two-View Transformer Network for Video Captioning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+7901a33fe442fca87be7f8bb295091feb25f69bc,Bayesian and Information-Theoretic Learning of High Dimensional Data,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+798d042a70b2c824998b3fc39a6e21799b588832,Face sketch recognition by Local Radon Binary Pattern: LRBP,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+794ddb1f3b7598985d4d289b5b0664be736a50c4,Exploiting Competition Relationship for Robust Visual Recognition,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+79e1b9e425621dd5a683026b7158479c10f6780a,Vehicle Detection Method Based on Edge Information and Local Transform Histogram,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+79e1b9e425621dd5a683026b7158479c10f6780a,Vehicle Detection Method Based on Edge Information and Local Transform Histogram,Hanyang University,Hanyang University,"한양대, 206, 왕십리로, 사근동, 성동구, 서울특별시, 04763, 대한민국",37.55572710,127.04366420,edu,
+793651f4cf210bd81922d173346b037d66f2b4a4,Bayes Optimality in Linear Discriminant Analysis,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+79d3cb01f4907e895a7afced8b090427c39b9b84,Spatial-Aware Object Embeddings for Zero-Shot Localization and Classification of Actions,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+79a36b19ea363c14af27a1f4112a9eccdd582837,The scope of social attention deficits in autism: prioritized orienting to people and animals in static natural scenes.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+79a36b19ea363c14af27a1f4112a9eccdd582837,The scope of social attention deficits in autism: prioritized orienting to people and animals in static natural scenes.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+79894ddf290d3c7a768d634eceb7888564b5cf19,Query-Guided Regression Network with Context Policy for Phrase Grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+7934c91f09e5bf819519d4348aafdda7c99267bb,Discovering gender differences in facial emotion recognition via implicit behavioral cues,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+7934c91f09e5bf819519d4348aafdda7c99267bb,Discovering gender differences in facial emotion recognition via implicit behavioral cues,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+7934c91f09e5bf819519d4348aafdda7c99267bb,Discovering gender differences in facial emotion recognition via implicit behavioral cues,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+799537fa855caf53a6a3a7cf20301a81e90da127,High-Order Attention Models for Visual Question Answering,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+799537fa855caf53a6a3a7cf20301a81e90da127,High-Order Attention Models for Visual Question Answering,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+799537fa855caf53a6a3a7cf20301a81e90da127,High-Order Attention Models for Visual Question Answering,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+79443a311d75fc0187314d21f8b065b33e5b41cd,The Association of Urban Greenness and Walking Behavior: Using Google Street View and Deep Learning Techniques to Estimate Residents’ Exposure to Urban Greenness,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+79744fc71bea58d2e1918c9e254b10047472bd76,Disentangling 3D Pose in A Dendritic CNN for Unconstrained 2D Face Alignment,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+79cc0f893af976fe1052240518f47f3bee56c6f6,Template Matching for Wide-Baseline Panoramic Images from a Vehicle-Borne Multi-Camera Rig,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+79cc0f893af976fe1052240518f47f3bee56c6f6,Template Matching for Wide-Baseline Panoramic Images from a Vehicle-Borne Multi-Camera Rig,Capital Normal University,Capital Normal University,"首都师范大学, 岭南路, 西冉村, 海淀区, 100048, 中国",39.92864575,116.30104052,edu,
+794c0dc199f0bf778e2d40ce8e1969d4069ffa7b,Odd Leaf Out: Improving Visual Recognition with Games,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+794c0dc199f0bf778e2d40ce8e1969d4069ffa7b,Odd Leaf Out: Improving Visual Recognition with Games,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+79f9a15b4e838d6db91249a85d72fadb07aee927,Less is More: Zero-Shot Learning from Online Textual Documents with Noise Suppression,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+794d344d6aa97e3cb67a44739207aa9c1360db8d,Probabilistic Low-Rank Subspace Clustering,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+794d344d6aa97e3cb67a44739207aa9c1360db8d,Probabilistic Low-Rank Subspace Clustering,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+7965b2ce7d64991218515e20fc1fc0459fd20a38,Video-based Person Re-identification Using Spatial-Temporal Attention Networks,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+7965b2ce7d64991218515e20fc1fc0459fd20a38,Video-based Person Re-identification Using Spatial-Temporal Attention Networks,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+7965b2ce7d64991218515e20fc1fc0459fd20a38,Video-based Person Re-identification Using Spatial-Temporal Attention Networks,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+799c02a3cde2c0805ea728eb778161499017396b,PersonRank: Detecting Important People in Images,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+799c02a3cde2c0805ea728eb778161499017396b,PersonRank: Detecting Important People in Images,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+7958893d88c007d6569c1f2f9771d1c63b99422f,Structured Uncertainty Prediction Networks,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+7958893d88c007d6569c1f2f9771d1c63b99422f,Structured Uncertainty Prediction Networks,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+795b76ebf17c559d82ea6976f1749096036d6817,Automatic Curation of Golf Highlights Using Multimodal Excitement Features,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+79ef25ed4863311000975b955651c0515fe38f45,Pyramid Attention Network for Semantic Segmentation,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+79ef25ed4863311000975b955651c0515fe38f45,Pyramid Attention Network for Semantic Segmentation,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+79ef25ed4863311000975b955651c0515fe38f45,Pyramid Attention Network for Semantic Segmentation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+79ef25ed4863311000975b955651c0515fe38f45,Pyramid Attention Network for Semantic Segmentation,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+792928e5e539dfbba334c36bee337449c4918d6a,Learning to Transfer: Transferring Latent Task Structures and Its Application to Person-Specific Facial Action Unit Detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+7966146d72f9953330556baa04be746d18702047,Harnessing Human Manipulation,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+79eb06c8acce1feef4a8654287d9cf5081e19600,Self-supervised learning of a facial attribute embedding from video,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+79fa57dedafddd3f3720ca26eb41c82086bfb332,Modeling facial expression space for recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+79a3a07661b8c6a36070fd767344e15c847a30ef,Contextual Pooling in Image Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+79d50641eec7fbb6588909f96aeaef4a7b42c9e9,DSSLIC: Deep Semantic Segmentation-based Layered Image Compression,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+7933a312c4a4ba431eb0357fd05e8609ca66eaa7,Backpropagation for Implicit Spectral Densities,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+7969cc315bbafcd38a637eb8cd5d45ba897be319,An enhanced deep feature representation for person re-identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+79db191ca1268dc88271abef3179c4fe4ee92aed,Facial Expression Based Automatic Album Creation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+79db191ca1268dc88271abef3179c4fe4ee92aed,Facial Expression Based Automatic Album Creation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+79db191ca1268dc88271abef3179c4fe4ee92aed,Facial Expression Based Automatic Album Creation,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+2d2b1f9446e9b4cdb46327cda32a8d9621944e29,Information revelation and privacy in online social networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2d990b04c2bd61d3b7b922b8eed33aeeeb7b9359,Discriminative Dictionary Learning with Pairwise Constraints,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2d7aa6af536a703471c56cc94bfd99471963b305,Learning to Separate Object Sounds by Watching Unlabeled Video,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2d25045ec63f9132371841c0beccd801d3733908,Multi-Layer Sparse Representation for Weighted LBP-Patches Based Facial Expression Recognition,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+2d4c5768a65f05f96ae71a269422d0c3d371b26a,Semantics-Aware Deep Correspondence Structure Learning for Robust Person Re-Identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+2d68cac1bd2f18631051cfbd4a46b67be1a939fe,PSSDL: Probabilistic Semi-supervised Dictionary Learning,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+2d4215a73e4cabc12a8ea5f49a3661d741add0c4,Unsupervised Detection of Regions of Interest Using Iterative Link Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2d4215a73e4cabc12a8ea5f49a3661d741add0c4,Unsupervised Detection of Regions of Interest Using Iterative Link Analysis,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2dd3dff173686d66af70e7180fabd8755dd1307d,Coupling detection and data association for multiple object tracking,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+2d080662a1653f523321974a57518e7cb67ecb41,On Constrained Local Model Feature Normalization for Facial Expression Recognition,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+2d4b9fe3854ccce24040074c461d0c516c46baf4,Temporal Action Localization by Structured Maximal Sums,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+2d4b9fe3854ccce24040074c461d0c516c46baf4,Temporal Action Localization by Structured Maximal Sums,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2d64839bcf82e0a89d7e4874909c6114083c8a4f,Real-Time Multi-Person Tracking with Time-Constrained Detection,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2d638ee3e358732f3c052b854dc16949fdd4a2c3,Challenges in Executing Data Intensive Biometric Workloads on a Desktop Grid,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+2d9e58ea582e054e9d690afca8b6a554c3687ce6,Learning local feature aggregation functions with backpropagation,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+2d164f88a579ba53e06b601d39959aaaae9016b7,Dynamic Facial Expression Recognition Using A Bayesian Temporal Manifold Model,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+2d96178c760b08a6892647fb53b0d46b113db163,Localization of Humans in Images Using Convolutional Networks,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+2d8001ffee6584b3f4d951d230dc00a06e8219f8,Feature Agglomeration Networks for Single Stage Face Detection,Singapore Management University,Singapore Management University,"Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore",1.29500195,103.84909214,edu,
+2d8001ffee6584b3f4d951d230dc00a06e8219f8,Feature Agglomeration Networks for Single Stage Face Detection,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+2d23fa205acca9c21e3e1a04674f1e5a9528550e,The Fast and the Flexible: Extended Pseudo Two-Dimensional Warping for Face Recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2d02cf53bc0f2d919b89bec8f9160b50916bb625,Joint Training of a Convolutional Network and a Graphical Model for Human Pose Estimation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+2d244d70ed1a2ba03d152189f1f90ff2b4f16a79,An Analytical Mapping for LLE and Its Application in Multi-Pose Face Synthesis,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+2d31ab536b3c8a05de0d24e0257ca4433d5a7c75,Materials discovery: Fine-grained classification of X-ray scattering images,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+2dbde64ca75e7986a0fa6181b6940263bcd70684,Pose Independent Face Recognition by Localizing Local Binary Patterns via Deformation Components,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+2dbde64ca75e7986a0fa6181b6940263bcd70684,Pose Independent Face Recognition by Localizing Local Binary Patterns via Deformation Components,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2d0363a3ebda56d91d704d5ff5458a527775b609,Attribute2Image: Conditional Image Generation from Visual Attributes,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2d48557e4107d93126e7f7b74fb04517697f6a52,Self-Training Ensemble Networks for Zero-Shot Image Recognition,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+2d48557e4107d93126e7f7b74fb04517697f6a52,Self-Training Ensemble Networks for Zero-Shot Image Recognition,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8,Perceptual Reward Functions,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+2d93a9aa8bed51d0d1b940c73ac32c046ebf1eb8,Perceptual Reward Functions,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+2dd2c7602d7f4a0b78494ac23ee1e28ff489be88,Large scale metric learning from equivalence constraints,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+2d05fd37ef8f148711819d777757bdcacfaaf175,3D Multi-Spectrum Sensor System with Face Recognition,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+2d84e30c61281d3d7cdd11676683d6e66a68aea6,Automatic Construction of Action Datasets Using Web Videos with Density-Based Cluster Analysis and Outlier Detection,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+2d98a1cb0d1a37c79a7ebcb727066f9ccc781703,Coupled Support Vector Machines for Supervised Domain Adaptation,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+2def47989c6f9143184b5eaaf3aca3f2833f3e05,Learning from Unscripted Deictic Gesture and Language for Human-Robot Interactions,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+2dd46b83a1cf5c7c811a462728d9797c270c2cb4,Recurrent Human Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2dced31a14401d465cd115902bf8f508d79de076,Can a Humanoid Face be Expressive? A Psychophysiological Investigation,University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.72012990,10.40789760,edu,
+2dced31a14401d465cd115902bf8f508d79de076,Can a Humanoid Face be Expressive? A Psychophysiological Investigation,University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.72012990,10.40789760,edu,
+2dced31a14401d465cd115902bf8f508d79de076,Can a Humanoid Face be Expressive? A Psychophysiological Investigation,Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+2dced31a14401d465cd115902bf8f508d79de076,Can a Humanoid Face be Expressive? A Psychophysiological Investigation,University of Pisa,University of Pisa,"Dipartimento di Fisica 'E. Fermi', 3, Largo Bruno Pontecorvo, San Francesco, Pisa, PI, TOS, 56127, Italia",43.72012990,10.40789760,edu,
+2deed841cfde51ce3b4e90880894efbbfdc18f18,Privacy-Preserving Egocentric Activity Recognition from Extreme Low Resolution,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+2deed841cfde51ce3b4e90880894efbbfdc18f18,Privacy-Preserving Egocentric Activity Recognition from Extreme Low Resolution,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+2da0f99ae90ea3e6ccbd3f43e52dbf5aa1553363,To Track or To Detect? An Ensemble Framework for Optimal Selection,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+2dfe0e7e81f65716b09c590652a4dd8452c10294,Incongruence Between Observers’ and Observed Facial Muscle Activation Reduces Recognition of Emotional Facial Expressions From Video Stimuli,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+2dfe0e7e81f65716b09c590652a4dd8452c10294,Incongruence Between Observers’ and Observed Facial Muscle Activation Reduces Recognition of Emotional Facial Expressions From Video Stimuli,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3,Machine Analysis of Facial Expressions,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+2d105eea4f594519bd337298c55b9af3da178293,Deep Randomized Ensembles for Metric Learning,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+2d38fd1df95f5025e2cee5bc439ba92b369a93df,Scalable Object-Class Search via Sparse Retrieval Models and Approximate Ranking,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+2d83ba2d43306e3c0587ef16f327d59bf4888dc3,Large-Scale Video Classification with Convolutional Neural Networks,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2d79d338c114ece1d97cde1aa06ab4cf17d38254,iLab-20M: A Large-Scale Controlled Object Dataset to Investigate Deep Learning,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+2d79d338c114ece1d97cde1aa06ab4cf17d38254,iLab-20M: A Large-Scale Controlled Object Dataset to Investigate Deep Learning,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+2d3482dcff69c7417c7b933f22de606a0e8e42d4,Labeled Faces in the Wild : Updates and New Reporting Procedures,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+2d1e009a7b7a6304903ba183e39395c358f652e8,ResearchDoom and CocoDoom: Learning Computer Vision with Games,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2d1e009a7b7a6304903ba183e39395c358f652e8,ResearchDoom and CocoDoom: Learning Computer Vision with Games,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2d1e009a7b7a6304903ba183e39395c358f652e8,ResearchDoom and CocoDoom: Learning Computer Vision with Games,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2d1e009a7b7a6304903ba183e39395c358f652e8,ResearchDoom and CocoDoom: Learning Computer Vision with Games,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2dc7d439e99f15a499cd2dcbdfbc1c0c7648964d,Computational Understanding of Image Memorability by Zoya Bylinskii,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2d4a3e9361505616fa4851674eb5c8dd18e0c3cf,Towards Privacy-Preserving Visual Recognition via Adversarial Training: A Pilot Study,Texas A&M University,Texas A&M University,"Texas A&M University, Horticulture Street, Park West, College Station, Brazos County, Texas, 77841, USA",30.61083650,-96.35212800,edu,
+2d21e6f8bd9e9f647f3517f51347ad89b4381a7f,Identifying Individual Facial Expressions by Deconstructing a Neural Network,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+2df5e2adf01a803405341af1943651f6d8658bce,Taking mobile multi-object tracking to the next level,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2d748f8ee023a5b1fbd50294d176981ded4ad4ee,Triplet Similarity Embedding for Face Verification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2dbff0b15221234e00bec4a00b4897c631904fcf,Learning Efficient Image Representation for Person Re-Identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2d3c17ced03e4b6c4b014490fe3d40c62d02e914,Video-driven state-aware facial animation,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+41f26101fed63a8d149744264dd5aa79f1928265,Spot On: Action Localization from Pointly-Supervised Proposals,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+41f26101fed63a8d149744264dd5aa79f1928265,Spot On: Action Localization from Pointly-Supervised Proposals,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+416b559402d0f3e2b785074fcee989d44d82b8e5,Multi-view Super Vector for Action Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+416b559402d0f3e2b785074fcee989d44d82b8e5,Multi-view Super Vector for Action Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+416364cfdbc131d6544582e552daf25f585c557d,Synthesis and recognition of facial expressions in virtual 3D views,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+414315d44a489d09c6e1933033ffba6396974ee1,Video Visual Relation Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+41199678ad9370ff8ca7e9e3c2617b62a297fac3,Multitask Deep Learning models for real-time deployment in embedded systems,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+411ee9236095f8f5ca3b9ef18fd3381c1c68c4b8,An Empirical Evaluation of the Local Texture Description Framework-Based Modified Local Directional Number Pattern with Various Classifiers for Face Recognition,Manonmaniam Sundaranar University,Manonmaniam Sundaranar University,"Manonmaniam Sundaranar University, Tenkasi-Tirunelveli, Gandhi Nagar, Tirunelveli, Tirunelveli Kattabo, Tamil Nadu, 627808, India",8.76554685,77.65100445,edu,
+411278b73afedca6976f02a8d3a38cdec3337f87,Cross-View Projective Dictionary Learning for Person Re-Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+411278b73afedca6976f02a8d3a38cdec3337f87,Cross-View Projective Dictionary Learning for Person Re-Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+411278b73afedca6976f02a8d3a38cdec3337f87,Cross-View Projective Dictionary Learning for Person Re-Identification,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+411318684bd2d42e4b663a37dcf0532a48f0146d,Improved Face Verification with Simple Weighted Feature Combination,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+41d9fbf55ea7142b13b68d8ddfe764896569cd32,Efficient Mining of Frequent and Distinctive Feature Configurations,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4140498e96a5ff3ba816d13daf148fffb9a2be3f,Constrained Ensemble Initialization for Facial Landmark Tracking in Video,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4168fd6fd9e672223fefc9706596121d653e39ff,Early and late stage processing abnormalities in autism spectrum disorders: An ERP study,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+4168fd6fd9e672223fefc9706596121d653e39ff,Early and late stage processing abnormalities in autism spectrum disorders: An ERP study,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+41f8477a6be9cd992a674d84062108c68b7a9520,An Automated System for Visual Biometrics,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+412a82b94129477d3cce2f737365219103715db2,A novel symbolization technique for time-series outlier detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+412a82b94129477d3cce2f737365219103715db2,A novel symbolization technique for time-series outlier detection,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+41be021880a916305c82199ddc2298eb271f6590,Benchmarks for Image Classification and Other High-dimensional Pattern Recognition Problems,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+419f05c5888804e0a9d9f2dc60839f2d8d65a7a6,Image Feature Extraction Using 2D Mel-Cepstrum,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+41aa8c1c90d74f2653ef4b3a2e02ac473af61e47,Compositional Structure Learning for Action Understanding,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+41aa8c1c90d74f2653ef4b3a2e02ac473af61e47,Compositional Structure Learning for Action Understanding,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+41971dfbf404abeb8cf73fea29dc37b9aae12439,Detection of Facial Feature Points Using Anthropometric Face Model,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+41da7c52a09072fd9c5275f03f4fa6f6d41e1aed,Viewpoint Invariant 3D Human Pose Estimation with Recurrent Error Feedback,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4157e45f616233a0874f54a59c3df001b9646cd7,Diagnostically relevant facial gestalt information from ordinary photos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4157e45f616233a0874f54a59c3df001b9646cd7,Diagnostically relevant facial gestalt information from ordinary photos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+4157e45f616233a0874f54a59c3df001b9646cd7,Diagnostically relevant facial gestalt information from ordinary photos,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+416087bc2df1e5150231d85b8103d816fc32a2a3,Local normal binary patterns for 3D facial action unit detection,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+416087bc2df1e5150231d85b8103d816fc32a2a3,Local normal binary patterns for 3D facial action unit detection,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+41b2068f134adf9afb3dae2d8811e2d21f471e3d,Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+412b3ef02c85087e5f1721176114672c722b17a4,A Taxonomy of Deep Convolutional Neural Nets for Computer Vision,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+412b3ef02c85087e5f1721176114672c722b17a4,A Taxonomy of Deep Convolutional Neural Nets for Computer Vision,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+412b3ef02c85087e5f1721176114672c722b17a4,A Taxonomy of Deep Convolutional Neural Nets for Computer Vision,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+413c98ff2d95b5b945825268fd8ffdc65880f715,Human Pose Estimation in Videos,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+41cbf0750fa0d08880068f9a89be92232795f357,Can the Early Human Visual System Compete with Deep Neural Networks?,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+41612c66beaad320af9b7d34407c7d0f4ca7bfea,Inhibition or Ideology ? The Neural Mechanisms of Evaluating Race-Targeted Government Assistance,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+4136a4c4b24c9c386d00e5ef5dffdd31ca7aea2c,Multi-Modal Person-Profiles from Broadcast News Video,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+41e1084e74564ced3e1fa845250162d6d0f2b9c3,A Texture-based Approach to Face Detection,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+411dc8874fd7b3a9a4c1fd86bb5b583788027776,Direct Shape Regression Networks for End-to-End Face Alignment,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+411dc8874fd7b3a9a4c1fd86bb5b583788027776,Direct Shape Regression Networks for End-to-End Face Alignment,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+4189862b2ce9c71e1b451deb58dd42f50c7d04a1,Autistic trait interactions underlie sex-dependent facial recognition abilities in the normal population,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+4189862b2ce9c71e1b451deb58dd42f50c7d04a1,Autistic trait interactions underlie sex-dependent facial recognition abilities in the normal population,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+41c87d3342a85712a3591b6d49d99be8fc8d35d9,Face-trait inferences show robust child – adult agreement : Evidence from three types of faces,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+41aa209e9d294d370357434f310d49b2b0baebeb,Beyond caption to narrative: Video captioning with multiple sentences,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+4156746cdc99a509549c4028c7122eb6dc90b1a1,CausalGAN: Learning Causal Implicit Generative Models with Adversarial Training,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4156f9fc5983b09eb97ad3d9abc248b15440b955,"2 Subspace Methods for Face Recognition : Singularity , Regularization , and Robustness",Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+41b1069c06735a20f9b4281001285ee2167da309,Intra-View and Inter-View Supervised Correlation Analysis for Multi-View Feature Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+41b1069c06735a20f9b4281001285ee2167da309,Intra-View and Inter-View Supervised Correlation Analysis for Multi-View Feature Learning,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+41b1069c06735a20f9b4281001285ee2167da309,Intra-View and Inter-View Supervised Correlation Analysis for Multi-View Feature Learning,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+4115652b7fad7a474b5af1f4c063b1f9717b1bf8,Exploring the Feasibility of Prompting Daily Task Execution using the NAO Humanoid Robot with Children with Autism Spectrum Disorder,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+417df443367334351111a064a601355450b2531f,Building structural similarity database for metric learning,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+419550e7b918c64785f087b17f7fde6c94bc6d4e,Distributional semantics with eyes: using image analysis to improve computational representations of word meaning,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+419550e7b918c64785f087b17f7fde6c94bc6d4e,Distributional semantics with eyes: using image analysis to improve computational representations of word meaning,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+839a2155995acc0a053a326e283be12068b35cb8,Handcrafted Local Features are Convolutional Neural Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,Quantifying naturalistic social gaze in fragile X syndrome using a novel eye tracking paradigm.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,Quantifying naturalistic social gaze in fragile X syndrome using a novel eye tracking paradigm.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+837635f647c42d03812a7f4ab5f87c5a49372a0b,Gait Recognition Using Gait Entropy Image,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8356832f883207187437872742d6b7dc95b51fde,Adversarial Perturbations Against Real-Time Video Classification Systems,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+83a811fd947415df2413d15386dbc558f07595cb,Fine-grained Discriminative Localization via Saliency-guided Faster R-CNN,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+83a811fd947415df2413d15386dbc558f07595cb,Fine-grained Discriminative Localization via Saliency-guided Faster R-CNN,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+835e510fcf22b4b9097ef51b8d0bb4e7b806bdfd,Unsupervised Learning of Sequence Representations by Autoencoders,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+832ed998ff123d4e0f86e6e3fd0d9f5428864600,"PReMVOS : Proposal-generation , Refinement and Merging for the DAVIS Challenge on Video Object Segmentation 2018",RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+83d16fb8f53156c9e2b28d75abb6532af515440f,Large-scale Document Labeling using Supervised Sequence Embedding,Drexel University,Drexel University,"Drexel University, Arch Street, Powelton Village, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.95740000,-75.19026706,edu,
+831d661d657d97a07894da8639a048c430c5536d,Weakly Supervised Facial Analysis with Dense Hyper-Column Features,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8370912226ee7783c459368593bc3f88310b1414,"Biometrics : An Overview of the Technology , Issues and Applications",Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+832b37fab195a8ed71614c87666b9f6e71e367c6,Robustness to Occlusions by Reducing Filter Support,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+832b37fab195a8ed71614c87666b9f6e71e367c6,Robustness to Occlusions by Reducing Filter Support,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+83295bce2340cb87901499cff492ae6ff3365475,Deep Multi-Center Learning for Face Alignment,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+83295bce2340cb87901499cff492ae6ff3365475,Deep Multi-Center Learning for Face Alignment,East China Normal University,East China Normal University,"华东师范大学, 3663, 中山北路, 曹家渡, 普陀区, 普陀区 (Putuo), 上海市, 200062, 中国",31.22849230,121.40211389,edu,
+83e96ed8a4663edaa3a5ca90b7ce75a1bb595b05,Recognition from Appearance Subspaces across Image Sets of Variable Scale,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+83e8bbbccb8613de490b1a362dd3fedc411cbfe0,A3FD: Accurate 3D Face Detection,University of Milan,University of Milan,"Milan Avenue, Ray Mar Terrace, University City, St. Louis County, Missouri, 63130, USA",38.67966620,-90.32628160,edu,
+831226405bb255527e9127b84e8eaedd7eb8e9f9,A Motion-Based Feature for Event-Based Pattern Recognition,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+831226405bb255527e9127b84e8eaedd7eb8e9f9,A Motion-Based Feature for Event-Based Pattern Recognition,Portland State University,Portland State University,"Portland State University, Southwest Park Avenue, University District, Portland Downtown, Portland, Multnomah County, Oregon, 97201, USA",45.51181205,-122.68492999,edu,
+8328ced86dffd1bfe300dca9e960ee328ae9ab0d,Gradient-Domain Techniques for Image & Video Processing,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+8384e104796488fa2667c355dd15b65d6d5ff957,A Discriminative Latent Model of Image Region and Object Tag Correspondence,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+8384e104796488fa2667c355dd15b65d6d5ff957,A Discriminative Latent Model of Image Region and Object Tag Correspondence,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+8323529cf37f955fb3fc6674af6e708374006a28,Evaluation of Face Resolution for Expression Analysis,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+832377d50d133da3514ae3c51c0e6043ab856eea,Human Pose Estimation from Depth Images via Inference Embedded Multi-task Learning,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+832377d50d133da3514ae3c51c0e6043ab856eea,Human Pose Estimation from Depth Images via Inference Embedded Multi-task Learning,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+837792b672a3a4a06a22b2c26a8ecd3812fe8330,A Unified Bayesian Framework for Adaptive Visual Tracking,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+833c1c0180ca36ea07ecfe44caf2b739c94f511e,Predicting Ground-Level Scene Layout from Aerial Imagery,University of Kentucky,University of Kentucky,"University of Kentucky, Columbia Avenue, Sorority Circle, Lexington, Fayette County, Kentucky, 40508, USA",38.03337420,-84.50177580,edu,
+83c695de8b42e592b3f23948f90b699b82c0b068,Fusing Crowd Density Maps and Visual Object Trackers for People Tracking in Crowd Scenes,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,Binghamton University,Binghamton University,"Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA",42.09580770,-75.91455689,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+833b6e61468fe655b5067ca91608fc37246c767b,FERA 2015 - second Facial Expression Recognition and Analysis challenge,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+83acde484baf81ee3a09c30ec250c11c111d2c0a,Tracking articulated human movements witha component based approach to boosted multiple instance learning,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+838ed2aae603dec5851ebf5e4bc64b54db7f34be,Real-Time Ensemble Based Face Recognition System for Humanoid Robots,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+8318f563f915031c677decc3d133c2aee803591d,Efficient Sparse Representation Classification Using Adaptive Clustering,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+83d956ed39127058e02395924f96b68e2f8289e0,Efficient video multicast in wireless surveillance networks for intelligent building,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+83d956ed39127058e02395924f96b68e2f8289e0,Efficient video multicast in wireless surveillance networks for intelligent building,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+836a4ee4bffafba259e2d824fc89020de86daab0,Identification of Structurally Damaged Areas in Airborne Oblique Images Using a Visual-Bag-of-Words Approach,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+8334da483f1986aea87b62028672836cb3dc6205,Fully Associative Patch-Based 1-to-N Matcher for Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+831b4d8b0c0173b0bac0e328e844a0fbafae6639,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+831b4d8b0c0173b0bac0e328e844a0fbafae6639,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+831b4d8b0c0173b0bac0e328e844a0fbafae6639,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1b8290ff2fe1b04df14f2504b38beb749e2e75ca,Classifying Unseen Instances by Learning Class-Independent Similarity Functions,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+1b7f9cc57ab8f3f551bdb0d5f153191ec403895e,Learning Multiple Tasks with Multilinear Relationship Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1be8aa30f905577c1d60150fb6ba84ddaabb2f6e,Motionlets: Mid-level 3D Parts for Human Motion Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1be8aa30f905577c1d60150fb6ba84ddaabb2f6e,Motionlets: Mid-level 3D Parts for Human Motion Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1b3e66bef13f114943d460b4f942e941b4761ba2,Subspace Approximation of Face Recognition Algorithms: An Empirical Study,University of South Florida,University of South Florida,"University of South Florida, Leroy Collins Boulevard, Tampa, Hillsborough County, Florida, 33620, USA",28.05999990,-82.41383619,edu,
+1b3e66bef13f114943d460b4f942e941b4761ba2,Subspace Approximation of Face Recognition Algorithms: An Empirical Study,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+1b92fdffa3f87c1081e88c41b5fb0d7d31b3873e,Illumination Insensitive Face Representation for Face Recognition Based on Modified Weberface,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+1b92fdffa3f87c1081e88c41b5fb0d7d31b3873e,Illumination Insensitive Face Representation for Face Recognition Based on Modified Weberface,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+1b635f494eff2e5501607ebe55eda7bdfa8263b8,USC at THUMOS 2014,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1b1d31dcd365c48ca39b4eadcdabf1c70104e490,On use of biometrics in forensics: Gait and ear,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+1bbe0371ca22c2fdb6e0d098049bbf6430324bdb,"Socializing the Semantic Gap: A Comparative Survey on Image Tag Assignment, Refinement and Retrieval",University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+1be9ee50f4d4f59b9761a366bba9127213dc4f33,You cannot gamble on others: Dissociable systems for strategic uncertainty and risk in the brain,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+1bcc4f0f58848190ae0b2098eadf06002d5f70b4,Scalable object-class retrieval with approximate and top-k ranking,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+1b5866c5b3715b410bfb4ccca6d42b32162d4ef1,Now You See Me: Deep Face Hallucination for Unviewed Sketches,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1b5866c5b3715b410bfb4ccca6d42b32162d4ef1,Now You See Me: Deep Face Hallucination for Unviewed Sketches,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+1bb652545b316701faf582d673a98060ee426f37,Robust Object Co-detection,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+1bb652545b316701faf582d673a98060ee426f37,Robust Object Co-detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+1b5875dbebc76fec87e72cee7a5263d325a77376,Learnt Quasi-Transitive Similarity for Retrieval from Large Collections of Faces,University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.34119840,-2.79309380,edu,
+1be10b1f05fe7a5dd28cbb63d61a992c5d9b611a,Light-weight Head Pose Invariant Gaze Tracking,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+1bd1645a629f1b612960ab9bba276afd4cf7c666,End-to-End People Detection in Crowded Scenes,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1bd1645a629f1b612960ab9bba276afd4cf7c666,End-to-End People Detection in Crowded Scenes,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1bdfb3deae6e6c0df6537efcd1d7edcb4d7a96e9,Groupwise Constrained Reconstruction for Subspace Clustering,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+1babf4bc962593593c83ac70f3b7ee64b3e5a680,Detecting Objects Using Deformation Dictionaries,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1babf4bc962593593c83ac70f3b7ee64b3e5a680,Detecting Objects Using Deformation Dictionaries,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1b21acf8daaf86c4f2228fa3f5e9aa38ab8ad30d,Cross-scene crowd counting via deep convolutional neural networks,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1b21acf8daaf86c4f2228fa3f5e9aa38ab8ad30d,Cross-scene crowd counting via deep convolutional neural networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1b21acf8daaf86c4f2228fa3f5e9aa38ab8ad30d,Cross-scene crowd counting via deep convolutional neural networks,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+1b73bd672c6abe6918f91812f4334db23189d1d6,Adversarial PoseNet: A Structure-Aware Convolutional Network for Human Pose Estimation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1b73bd672c6abe6918f91812f4334db23189d1d6,Adversarial PoseNet: A Structure-Aware Convolutional Network for Human Pose Estimation,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+1b73bd672c6abe6918f91812f4334db23189d1d6,Adversarial PoseNet: A Structure-Aware Convolutional Network for Human Pose Estimation,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1b794b944fd462a2742b6c2f8021fecc663004c9,A Hierarchical Probabilistic Model for Facial Feature Detection,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+1bca1a09e2ef62b1960c23ff6653ae2d5aef5718,Comparison of human face matching behavior and computational image similarity measure,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1bca1a09e2ef62b1960c23ff6653ae2d5aef5718,Comparison of human face matching behavior and computational image similarity measure,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,From Few to Many: Generative Models for Recognition Under Variable Pose and Illumination,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,From Few to Many: Generative Models for Recognition Under Variable Pose and Illumination,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+1b7ae509c8637f3c123cf6151a3089e6b8a0d5b2,From Few to Many: Generative Models for Recognition Under Variable Pose and Illumination,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,Exploiting Temporal Information for DCNN-Based Fine-Grained Object Classification,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,Exploiting Temporal Information for DCNN-Based Fine-Grained Object Classification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+1b41d4ffb601d48d7a07dbbae01343f4eb8cc38c,Exploiting Temporal Information for DCNN-Based Fine-Grained Object Classification,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+1b248ed8e7c9514648cd598960fadf9ab17e7fe8,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+1b248ed8e7c9514648cd598960fadf9ab17e7fe8,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+1b248ed8e7c9514648cd598960fadf9ab17e7fe8,"From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation",University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+1bbf4275c1dbe3203b0e2261114850fbe8ca7e0e,Higher level techniques for the artistic rendering of images and video,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+1b715b4cef51be6bd5dd73c0d30257d853411a52,Fourth-Person Sensing for Pro-active Services,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+1b60b8e70859d5c85ac90510b370b501c5728620,Using Detailed Independent 3D Sub-models to Improve Facial Feature Localisation and Pose Estimation,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+1bc5c938b79f23afb9931c99377d6ce7a99bf8fb,Multi-view 3D face reconstruction with deep recurrent neural networks,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113,k-Same-Net: k-Anonymity with Generative Deep Neural Networks for Face Deidentification,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+1bc9aaa41c08bbd0c01dd5d7d7ebf3e48ae78113,k-Same-Net: k-Anonymity with Generative Deep Neural Networks for Face Deidentification,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+1b93698c1784db4abf72b500e51d4806e6430522,Re-id: Hunting Attributes in the Wild,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1bcbf2a4500d27d036e0f9d36d7af71c72f8ab61,Recognizing facial expression: machine learning and application to spontaneous behavior,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+1b5baa2ff3b6f88865fd244d87d39d58282d8597,Large-Scale Image Classification using High Performance Clustering,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+1b72222651c5b0295981e26d1333fadfcfb6a480,High-Quality Face Image SR Using Conditional Generative Adversarial Networks,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+1b72222651c5b0295981e26d1333fadfcfb6a480,High-Quality Face Image SR Using Conditional Generative Adversarial Networks,National Chung Hsing University,National Chung Hsing University,"國立中興大學, 145, 興大路, 積善里, 頂橋子頭, 南區, 臺中市, 402, 臺灣",24.12084345,120.67571165,edu,
+1bad8a9640cdbc4fe7de12685651f44c4cff35ce,THETIS: Three Dimensional Tennis Shots a Human Action Dataset,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+1be0ce87bb5ba35fa2b45506ad997deef6d6a0a8,EXMOVES: Classifier-based Features for Scalable Action Recognition,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+1b6f3139b1e59b90ab1aaf978359229b75985b49,Learning with a Wasserstein Loss,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1b6f3139b1e59b90ab1aaf978359229b75985b49,Learning with a Wasserstein Loss,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1b6f3139b1e59b90ab1aaf978359229b75985b49,Learning with a Wasserstein Loss,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1badfeece64d1bf43aa55c141afe61c74d0bd25e,"OLÉ: Orthogonal Low-rank Embedding, A Plug and Play Geometric Loss for Deep Learning",Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+1bd65302bca0c1a593490088a0ce85988f3cc90a,"Ten Years of Pedestrian Detection, What Have We Learned?",Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7710f1fc67f11a91afaa951f1b26e07e280391c5,Msee: Stochastic Cognitive Linguistic Behavior Models for Semantic Sensing,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+77fe6c859a59ac4438794d38d018d1e3c02d36dd,The µ-opioid system promotes visual attention to faces and eyes.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+77fe6c859a59ac4438794d38d018d1e3c02d36dd,The µ-opioid system promotes visual attention to faces and eyes.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+77fe6c859a59ac4438794d38d018d1e3c02d36dd,The µ-opioid system promotes visual attention to faces and eyes.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+77fe6c859a59ac4438794d38d018d1e3c02d36dd,The µ-opioid system promotes visual attention to faces and eyes.,University of Oslo,University of Oslo,"UiO, Moltke Moes vei, Blindern, Nordre Aker, Oslo, 0851, Norge",59.93891665,10.72170765,edu,
+778ce81457383bd5e3fdb11b145ded202ebb4970,Semantic Compositional Networks for Visual Captioning,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+7778068b0ea08bf85824d49045a8facbf90c4803,Deep Subspace Clustering Networks,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+7735f63e5790006cb3d989c8c19910e40200abfc,Multispectral Imaging For Face Recognition Over Varying Illumination,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+7710232a3d8bb1ef4ab0b5b6042bed19380bf0de,Image description with a goal: Building efficient discriminating expressions for images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7710232a3d8bb1ef4ab0b5b6042bed19380bf0de,Image description with a goal: Building efficient discriminating expressions for images,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+7710232a3d8bb1ef4ab0b5b6042bed19380bf0de,Image description with a goal: Building efficient discriminating expressions for images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7710232a3d8bb1ef4ab0b5b6042bed19380bf0de,Image description with a goal: Building efficient discriminating expressions for images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+7710232a3d8bb1ef4ab0b5b6042bed19380bf0de,Image description with a goal: Building efficient discriminating expressions for images,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+77b1db2281292372c38926cc4aca32ef056011dc,Children’s Interpretation of Facial Expressions: The Long Path from Valence-Based to Specific Discrete Categories,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+77ee75f96e6498ccc7bb7ebcca2acd7cc4e33229,Sinkhorn AutoEncoders,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+77ee75f96e6498ccc7bb7ebcca2acd7cc4e33229,Sinkhorn AutoEncoders,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+77cfe37cd98910de3601795131305bea639a435a,Accelerated learning of Generalized Sammon Mappings,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+779db93204bee4a9540db1e79ceb0b45e5af77e9,Learning actions from the Web,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+779eb6a059990b9cee55e0add7bc34aed87b3733,3D Human Pose Estimation in the Wild by Adversarial Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+779eb6a059990b9cee55e0add7bc34aed87b3733,3D Human Pose Estimation in the Wild by Adversarial Learning,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+779eb6a059990b9cee55e0add7bc34aed87b3733,3D Human Pose Estimation in the Wild by Adversarial Learning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+77c53ec6ea448db4dad586e002a395c4a47ecf66,Face Recognition Based on Facial Features,National University of Sciences and Technology,National University of Sciences and Technology,"National University of Sciences and Technology (NUST), Kashmir Highway, جی - 10, ICT, وفاقی دارالحکومت اسلام آباد, 44000, ‏پاکستان‎",33.64434700,72.98850790,edu,
+77685c77a1fa39890006fe13f43738aac49a2c51,Attacking Visual Language Grounding with Adversarial Examples: A Case Study on Neural Image Captioning,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+775be2fe9e6d7ca97209692ee3f85fb0f1b125af,ELEGANT: Exchanging Latent Encodings with GAN for Transferring Multiple Face Attributes,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+77a0e3e366e061b0ceb4a7a901ee18e420185447,Discriminative Bimodal Networks for Visual Localization and Detection with Natural Language Queries,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+77a53d4141a8081657ce08b13dc3328ac4a4e689,You'll never walk alone: Modeling social behavior for multi-target tracking,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+774cbb45968607a027ae4729077734db000a1ec5,From Bikers to Surfers: Visual Recognition of Urban Tribes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+775c51b965e8ff37646a265aab64136b4a620526,Three viewpoints toward exemplar SVM,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+77acef6d0146465b9e9ad5817ad3e2c20ae64566,Informative Features for Model Comparison,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+77acef6d0146465b9e9ad5817ad3e2c20ae64566,Informative Features for Model Comparison,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+7729b0d1d3e26ce0eec1f019f3a98d6c7d926e10,Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+7729b0d1d3e26ce0eec1f019f3a98d6c7d926e10,Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+771431afa9b5c936dc970db8d02ae06f49d68638,TabletGaze : Dataset and Algorithm for Unconstrained Appearance-based Gaze Estimation in Mobile Tablets,Rice University,Rice University,"Rice University, Stockton Drive, Houston, Harris County, Texas, 77005-1890, USA",29.71679145,-95.40478113,edu,
+779e5beb515ed26c47dbfc08304fe49233063c1b,Generating more realistic images using gated MRF's,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+778eefd9f0f6189456fc25b7cdd2c3f4403a37a8,Audiovisual diarization of people in video content,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+77be85f6c3c465ef8e17d3ec6251794cf4ff5940,Generative Domain-Migration Hashing for Sketch-to-Image Retrieval,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+771e78ccea7a03dd94bca10a7215dfe3b0f4623b,Supervised Geodesic Propagation for Semantic Label Transfer,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+7792fbc59f3eafc709323cdb63852c5d3a4b23e9,Pose from Action: Unsupervised Learning of Pose Features based on Motion,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+7792fbc59f3eafc709323cdb63852c5d3a4b23e9,Pose from Action: Unsupervised Learning of Pose Features based on Motion,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+77fbbf0c5729f97fcdbfdc507deee3d388cd4889,Pose-Robust 3D Facial Landmark Estimation from a Single 2D Image,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+776362314f1479f5319aaf989624ac604ba42c65,Attribute Learning in Large-Scale Datasets,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+773ecf8cfa7e544ac48cf146b71df19146e1400e,Improving Shape Deformation in Unsupervised Image-to-Image Translation,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+773ecf8cfa7e544ac48cf146b71df19146e1400e,Improving Shape Deformation in Unsupervised Image-to-Image Translation,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+774b649c75078e10759b3b6c8ea581e68fc45a40,Robust Anchor Embedding for Unsupervised Video Person re-IDentification in the Wild,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+77eba8289e257df835e16ce8e0919acebd02f7e4,Face Mosaicing for Pose Robust Video-Based Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7771807cd05f78a4591f2d0b094ddd3e0bd5339a,Adaptive Feeding: Achieving Fast and Accurate Detections by Adaptively Combining Object Detectors,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+77fb9e36196d7bb2b505340b6b94ba552a58b01b,Detecting the Moment of Completion: Temporal Models for Localising Action Completion,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+48059da276a8a93e0bb3faaa8421589f09377559,Eigengaze - covert behavioral biometric exploiting visual attention characteristics,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+486840f4f524e97f692a7f6b42cd19019ee71533,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,École Centrale de Lyon,Laboratoire LIRIS,"40 Avenue Guy de Collongue, 69130 Écully, France",45.78359660,4.76789480,edu,
+486840f4f524e97f692a7f6b42cd19019ee71533,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,Safran Identity and Security,Safran Identity & Security,"11 Boulevard Gallieni, 92130 Issy-les-Moulineaux, France",48.83249300,2.26747400,company,
+48810b60f1fe6fcb344538d5de8c54e5d64c20bb,Deep Sketch-Photo Face Recognition Assisted by Facial Attributes,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.26628870,82.99279690,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.26628870,82.99279690,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+48463a119f67ff2c43b7c38f0a722a32f590dfeb,Intelligent Method for Face Recognition of Infant,Banaras Hindu University,Banaras Hindu University,"काशी हिन्दू विश्वविद्यालय, Semi Circle Road 2, ワーラーナシー, Jodhpur Colony, Vārānasi, Varanasi, Uttar Pradesh, 221005, India",25.26628870,82.99279690,edu,
+488d3e32d046232680cc0ba80ce3879f92f35cac,Facial Expression Recognition Using Texture Description of Displacement Image,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+488d3e32d046232680cc0ba80ce3879f92f35cac,Facial Expression Recognition Using Texture Description of Displacement Image,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+488d3e32d046232680cc0ba80ce3879f92f35cac,Facial Expression Recognition Using Texture Description of Displacement Image,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+482a31cd4705f3d56e468cc33486847fc100f568,Dynamic Probabilistic CCA for Analysis of Affective Behaviour,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+482a31cd4705f3d56e468cc33486847fc100f568,Dynamic Probabilistic CCA for Analysis of Affective Behaviour,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+482a31cd4705f3d56e468cc33486847fc100f568,Dynamic Probabilistic CCA for Analysis of Affective Behaviour,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+48381007b85e8a3b74e5401b2dfc1a5dfc897622,Sparse Representation and Dictionary Learning for Biometrics and Object Tracking,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+48381007b85e8a3b74e5401b2dfc1a5dfc897622,Sparse Representation and Dictionary Learning for Biometrics and Object Tracking,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+48b9f9ddf17bd29b957b09f9000576e53acf8719,Ringtail: Feature Selection For Easier Nowcasting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+48b9f9ddf17bd29b957b09f9000576e53acf8719,Ringtail: Feature Selection For Easier Nowcasting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+48b9f9ddf17bd29b957b09f9000576e53acf8719,Ringtail: Feature Selection For Easier Nowcasting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+48b9f9ddf17bd29b957b09f9000576e53acf8719,Ringtail: Feature Selection For Easier Nowcasting,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+480040b64a972bf51f8debabc4f9421fd2c7b829,"Towards recognizing ""cool"": can end users help computer vision recognize subjective attributes of objects in images?",Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+48019c177ec1e650d0d67feaaf38ae12a74fa644,Markov Network-Based Unified Classifier for Face Identification,Korea Advanced Institute of Science and Technology,Korea Advanced Institute of Science and Technology,"카이스트, 291, 대학로, 온천2동, 온천동, 유성구, 대전, 34141, 대한민국",36.36971910,127.36253700,edu,
+48d66e07041b8aa042d7a3d263fddc624bbc1e32,Multiclass Learning with Simplex Coding,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+48fea82b247641c79e1994f4ac24cad6b6275972,Mining discriminative components with low-rank and sparsity constraints for face recognition,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+48734cb558b271d5809286447ff105fd2e9a6850,Facial Expression Recognition Using Enhanced Deep 3D Convolutional Neural Networks,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+48a417cfeba06feb4c7ab30f06c57ffbc288d0b5,Robust Dictionary Learning by Error Source Decomposition,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+48c41ffab7ff19d24e8df3092f0b5812c1d3fb6e,Multi-modal Embedding for Main Product Detection in Fashion,Waseda University,Waseda University,"早稲田大学 北九州キャンパス, 2-2, 有毛引野線, 八幡西区, 北九州市, 福岡県, 九州地方, 808-0135, 日本",33.88987280,130.70856205,edu,
+488a61e0a1c3768affdcd3c694706e5bb17ae548,Fitting a 3D Morphable Model to Edges: A Comparison Between Hard and Soft Correspondences,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+48ffb705e94dde426b7241108ca915a5ecab6414,The Human Factor: Behavioral and Neural Correlates of Humanized Perception in Moral Decision Making,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+48910f9b6ccc40226cd4f105ed5291571271b39e,Learning Discriminative Fisher Kernels,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+48a9241edda07252c1aadca09875fabcfee32871,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+48a9241edda07252c1aadca09875fabcfee32871,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+48a9241edda07252c1aadca09875fabcfee32871,Convolutional Experts Constrained Local Model for Facial Landmark Detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+48f0055295be7b175a06df5bc6fa5c6b69725785,Facial Action Unit Recognition from Video Streams with Recurrent Neural Networks,University of the Witwatersrand,University of the Witwatersrand,"University of the Witwatersrand, Empire Road, Johannesburg Ward 60, Johannesburg, City of Johannesburg Metropolitan Municipality, Gauteng, 2001, South Africa",-26.18888130,28.02479073,edu,
+48729e4de8aa478ee5eeeb08a72a446b0f5367d5,Compressed face hallucination,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+48e6c6d981efe2c2fb0ae9287376fcae59da9878,Sidekick Policy Learning for Active Visual Exploration,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+48174c414cfce7f1d71c4401d2b3d49ba91c5338,Robust Performance-driven 3D Face Tracking in Long Range Depth Scenes,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+48143b1270a2df096577e6681b1f1ceadacf73e8,An Improved Evaluation Framework for Generative Adversarial Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4895bc6e7ebb894e73c08b9dea50eea293c8dcbc,A Deep Learning Pipeline for Image Understanding and Acoustic Modeling,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+480888bad59b314236f2d947ebf308ae146c98e4,Zoom Better to See Clearer: Human and Object Parsing with Hierarchical Auto-Zoom Net,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+483e19f50ff47b0bf5e57b0cea65a7f084779b92,Annotation Artifacts in Natural Language Inference Data,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+483e19f50ff47b0bf5e57b0cea65a7f084779b92,Annotation Artifacts in Natural Language Inference Data,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+483e19f50ff47b0bf5e57b0cea65a7f084779b92,Annotation Artifacts in Natural Language Inference Data,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+483e19f50ff47b0bf5e57b0cea65a7f084779b92,Annotation Artifacts in Natural Language Inference Data,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+488375ae857a424febed7c0347cc9590989f01f7,Convolutional neural networks for the analysis of broadcasted tennis games,University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.37130240,24.47544080,edu,
+4836b084a583d2e794eb6a94982ea30d7990f663,Cascaded Face Alignment via Intimacy Definition Feature,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+4889d2927a9120931978ec487f55114d99eeb65d,Comparing Generative Adversarial Network Techniques for Image Creation and Modification,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+4889d2927a9120931978ec487f55114d99eeb65d,Comparing Generative Adversarial Network Techniques for Image Creation and Modification,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+482e8a9323fca1e27fccf03d2a58a36873d0ae10,Assessing Social Cognition of Persons with Schizophrenia in a Chinese Population: A Pilot Study,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+482e8a9323fca1e27fccf03d2a58a36873d0ae10,Assessing Social Cognition of Persons with Schizophrenia in a Chinese Population: A Pilot Study,University of Texas at Dallas,University of Texas at Dallas,"University of Texas at Dallas, Richardson, Dallas County, Texas, 78080, USA",32.98207990,-96.75662780,edu,
+482321a30da9edc4da8efb73f8e7d763c56811f2,Categorizing Turn-Taking Interactions,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+488fff23542ff397cdb1ced64db2c96320afc560,Weakly supervised localization of novel objects using appearance transfer,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+48cdb6a640b4259c61c476fb529d7c176e8345a9,Eyelid-openness and mouth curvature influence perceived intelligence beyond attractiveness.,University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.34119840,-2.79309380,edu,
+48cdb6a640b4259c61c476fb529d7c176e8345a9,Eyelid-openness and mouth curvature influence perceived intelligence beyond attractiveness.,University of St Andrews,University of St Andrews,"University of St Andrews, North Street, Albany Park Student accommodation, Carngour, St Andrews, Fife, Scotland, KY16 9AJ, UK",56.34119840,-2.79309380,edu,
+486c9a0e5eb1e0bf107c31c2bf9689b25e18383b,Face Recognition: Primates in the Wild,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+48d784e556646cf1a42eff051cb2083a2d8e3234,Automatic action unit detection in infants using convolutional neural network,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+48d784e556646cf1a42eff051cb2083a2d8e3234,Automatic action unit detection in infants using convolutional neural network,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4813d9332a1f3ef2bf5846e81005895322310bed,3D Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+488370572904a8fd97f5bc68fbdf0b3b3984cc76,Alibaba-Venus at ActivityNet Challenge 2018-Task C Trimmed Event Recognition ( Moments in Time ),"Alibaba Group, Hangzhou, China","Alibaba Group, Hangzhou, China","Alibaba Group, 五常街道, 余杭区 (Yuhang), 杭州市 Hangzhou, 浙江省, 中国",30.28106540,120.02139087,edu,
+4896909796f9bd2f70a2cb24bf18daacd6a12128,Spatial Bag of Features Learning for Large Scale Face Image Retrieval,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+484c380c322b2b5cb5756c9e94608949fb5d5e4d,Temporal face embedding and propagation in photo collections,Technion,Technion,"Haifa, 3200003, Israel",32.77677830,35.02312710,edu,
+481fb0a74528fa7706669a5cce6a212ac46eaea3,Recognizing RGB Images by Learning from RGB-D Data,"Institute for Infocomm Research, Singapore","Institute for Infocomm Research, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+481fb0a74528fa7706669a5cce6a212ac46eaea3,Recognizing RGB Images by Learning from RGB-D Data,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+4822c1bf765cf99193a231c000c19ae5d0c10a00,Multi-object Tracking via Constrained Sequential Labeling,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+480492ca998b3393b370d176d1f990a3db1c8e12,Factorized Binary Codes for Large-Scale Nearest Neighbor Search,University of British Columbia,University of British Columbia,"University of British Columbia, Eagles Drive, Hawthorn Place, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.25839375,-123.24658161,edu,
+481fde422a31e21ac12644e0df95cf66528f52c2,Integration of Local Image Cues for Probabilistic 2D Pose Recovery,Kingston University,Kingston University,"Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK",51.42930860,-0.26840440,edu,
+4823dcfb0bdc1af20e4da85035b8fc2c71a6add1,Exploring Structural Information and Fusing Multiple Features for Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+706b1123217febf934ee5c33b4af27507a85771a,AVEC 2013: the continuous audio/visual emotion and depression recognition challenge,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+706b1123217febf934ee5c33b4af27507a85771a,AVEC 2013: the continuous audio/visual emotion and depression recognition challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+706b1123217febf934ee5c33b4af27507a85771a,AVEC 2013: the continuous audio/visual emotion and depression recognition challenge,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+706b1123217febf934ee5c33b4af27507a85771a,AVEC 2013: the continuous audio/visual emotion and depression recognition challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+706b1123217febf934ee5c33b4af27507a85771a,AVEC 2013: the continuous audio/visual emotion and depression recognition challenge,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+70eeacf9f86ba08fceb3dd703cf015016dac1930,Coupled information-theoretic encoding for face photo-sketch recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+70eeacf9f86ba08fceb3dd703cf015016dac1930,Coupled information-theoretic encoding for face photo-sketch recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+70eeacf9f86ba08fceb3dd703cf015016dac1930,Coupled information-theoretic encoding for face photo-sketch recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+70eeacf9f86ba08fceb3dd703cf015016dac1930,Coupled information-theoretic encoding for face photo-sketch recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+70580ed8bc482cad66e059e838e4a779081d1648,Gender Classification using Multi-Level Wavelets on Real World Face Images,Shaheed Zulfikar Ali Bhutto Institute of Science and Technology,Shaheed Zulfikar Ali Bhutto Institute of,"Shaheed Zulfikar Ali Bhutto Institute of Science and Technology - Karachi Campus, Block 5, Clifton Block 5, CBC, ڪراچي Karachi, Karāchi District, سنڌ, 75600, ‏پاکستان‎",24.81865870,67.03165850,edu,
+70cbbf1ac971a89e18240e70d86fde2ac5190bad,Mammoth Data in the Cloud: Clustering Social Images,Indiana University Bloomington,Indiana University Bloomington,"Indiana University Bloomington, East 17th Street, Bloomington, Monroe County, Indiana, 47408, USA",39.17720475,-86.51540030,edu,
+703dc33736939f88625227e38367cfb2a65319fe,Trespassing the Boundaries: Labeling Temporal Bounds for Object Interactions in Egocentric Video,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+7060f6062ba1cbe9502eeaaf13779aa1664224bb,A Glimpse Far into the Future: Understanding Long-term Crowd Worker Quality,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+7033b916a7f2510ca9766b7a8ed15920a9f9e2f3,Which concepts are worth extracting?,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+7033b916a7f2510ca9766b7a8ed15920a9f9e2f3,Which concepts are worth extracting?,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+70db3a0d2ca8a797153cc68506b8650908cb0ada,An Overview of Research Activities in Facial Age Estimation Using the FG-NET Aging Database,Cyprus University of Technology,Cyprus University of Technology,"Mitropoli Building - Cyprus University of Technology, Anexartisias, Limasol - Λεμεσός, Limassol - Λεμεσός, Κύπρος - Kıbrıs, 3036, Κύπρος - Kıbrıs",34.67567405,33.04577648,edu,
+70c012367fd77d6b6dbd97620724fcdf72bb15ea,ImVerde: Vertex-Diminished Random Walk for Learning Network Representation from Imbalanced Data,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+70c012367fd77d6b6dbd97620724fcdf72bb15ea,ImVerde: Vertex-Diminished Random Walk for Learning Network Representation from Imbalanced Data,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+70c012367fd77d6b6dbd97620724fcdf72bb15ea,ImVerde: Vertex-Diminished Random Walk for Learning Network Representation from Imbalanced Data,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+701f56f0eac9f88387de1f556acef78016b05d52,Direct Shape Regression Networks for End-to-End Face Alignment,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+701f56f0eac9f88387de1f556acef78016b05d52,Direct Shape Regression Networks for End-to-End Face Alignment,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+7002d6fc3e0453320da5c863a70dbb598415e7aa,Understanding Discrete Facial Expressions in Video Using an Emotion Avatar Image,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+7071cd1ee46db4bc1824c4fd62d36f6d13cad08a,Face Detection through Scale-Friendly Deep Convolutional Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+7004e0808b0905761b583d74524b932ba66c20dd,Paper Doll Parsing: Retrieving Similar Styles to Parse Clothing Items,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+707a542c580bcbf3a5a75cce2df80d75990853cc,Disentangled Variational Representation for Heterogeneous Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+707a542c580bcbf3a5a75cce2df80d75990853cc,Disentangled Variational Representation for Heterogeneous Face Recognition,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+70569810e46f476515fce80a602a210f8d9a2b95,Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+704d88168bdfabe31b6ff484507f4a2244b8c52b,MLtuner: System Support for Automatic Machine Learning Tuning,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+703f8f9ed65ab87e67716cbfbee0e323aed5b9f5,Fully Convolutional Adaptation Networks for Semantic Segmentation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+703f8f9ed65ab87e67716cbfbee0e323aed5b9f5,Fully Convolutional Adaptation Networks for Semantic Segmentation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+709198f1a7d42fb87d46a8f5dc48e23e6564df1c,Visual and semantic similarity in ImageNet,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+703c9c8f20860a1b1be63e6df1622b2021b003ca,Flip-Invariant Motion Representation,National Institute of Advanced Industrial Science and Technology,National Institute of Advanced Industrial Science and Technology,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+70ab0ec5358e40fdcf7247f31e6e927cb21442f1,Exploiting skeletal structure in computer vision annotation with Benders decomposition,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+70a69569ba61f3585cd90c70ca5832e838fa1584,Friendly Faces: Weakly Supervised Character Identification,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7017a4c7a972d546ef2d59d29bf7c0ba6888e2ba,Human Pose and Shape Estimation from Multi-View Images for Virtual Dressing Rooms,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+704fe3839742e8d022fdc110f3a502e42a0ef89e,Running Head: RACIAL BIAS AND AGE 1 The Generalization of Implicit Racial Bias to Young Black Boys: Automatic Stereotyping or Automatic Prejudice?,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+704fe3839742e8d022fdc110f3a502e42a0ef89e,Running Head: RACIAL BIAS AND AGE 1 The Generalization of Implicit Racial Bias to Young Black Boys: Automatic Stereotyping or Automatic Prejudice?,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+70af9756f10bf6128a47fef4509df7e8bb9a290e,Sidekick Policy Learning for Active Visual Exploration,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+70be5432677c0fbe000ac0c28dda351a950e0536,Detecting Social Groups in Crowded Surveillance Videos Using Visual Attention,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+70be5432677c0fbe000ac0c28dda351a950e0536,Detecting Social Groups in Crowded Surveillance Videos Using Visual Attention,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+70a3bea7e9a4f7af6e80832d467a457c18d2389a,Generative Adversarial Forests for Better Conditioned Adversarial Learning,Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+70c9d11cad12dc1692a4507a97f50311f1689dbf,Video Frame Synthesis Using Deep Voxel Flow,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+70c9d11cad12dc1692a4507a97f50311f1689dbf,Video Frame Synthesis Using Deep Voxel Flow,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+7031d7fde9f184b72416759f8a9be4155616f456,Benchmarking Face Detection in a Mobile/Tablet Environment,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+1e3df3ca8feab0b36fd293fe689f93bb2aaac591,Multi-task Recurrent Neural Network for Immediacy Prediction,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1e5ca4183929929a4e6f09b1e1d54823b8217b8e,Classification in the Presence of Heavy Label Noise: A Markov Chain Sampling Framework,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+1ef4815f41fa3a9217a8a8af12cc385f6ed137e1,Rendering of Eyes for Eye-Shape Registration and Gaze Estimation,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+1ef4815f41fa3a9217a8a8af12cc385f6ed137e1,Rendering of Eyes for Eye-Shape Registration and Gaze Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1e8e3a954762f58501b970928071ed1b58b4fe40,Self Scaled Regularized Robust Regression,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+1e394f669c4f63c593677d2850c3d022a6fc1ac8,Multi-view Registration Based on Weighted Low Rank and Sparse Matrix Decomposition of Motions,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+1ef368e79d1a33d700905221696e552745e1ec7f,On Crater Verification Using Mislocalized Crater Regions,University of Nevada,University of Nevada,"Orange 1, Evans Avenue, Reno, Washoe County, Nevada, 89557, USA",39.54694490,-119.81346566,edu,
+1ea74780d529a458123a08250d8fa6ef1da47a25,Videos from the 2013 Boston Marathon : An Event Reconstruction Dataset for Synchronization and Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1e07500b00fcd0f65cf30a11f9023f74fe8ce65c,Whole space subclass discriminant analysis for face recognition,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+1e8c87181ac8db93431a0c7470c71561e1ee565f,Convolutional Neural Networks for Aerial Vehicle Detection and Recognition,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+1e8c87181ac8db93431a0c7470c71561e1ee565f,Convolutional Neural Networks for Aerial Vehicle Detection and Recognition,University of Liverpool,University of Liverpool,"Victoria Building, Brownlow Hill, Knowledge Quarter, Liverpool, North West England, England, L3, UK",53.40617900,-2.96670819,edu,
+1e19ea6e7f1c04a18c952ce29386252485e4031e,MATLAB Based Face Recognition System Using PCA and Neural Network,Kurukshetra University,Kurukshetra University,"Kurukshetra University, SH6, Kurukshetra, Haryana, 132118, India",29.95826275,76.81563045,edu,
+1ed39db202e606f25aff93f3e4fe135283a50cc2,Video text detection and recognition: Dataset and benchmark,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+1ed39db202e606f25aff93f3e4fe135283a50cc2,Video text detection and recognition: Dataset and benchmark,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+1ec98785ac91808455b753d4bc00441d8572c416,Curriculum Learning for Facial Expression Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,Face Detection with a 3D Model,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+1ed6c7e02b4b3ef76f74dd04b2b6050faa6e2177,Face Detection with a 3D Model,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+1efacaa0eaa7e16146c34cd20814d1411b35538e,Action Completion: A Temporal Model for Moment Detection,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+1e3a9b0cfdeca614c5689a3419016c89bf9fbdfa,Facial color is an efficient mechanism to visually transmit emotion,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+1e3a9b0cfdeca614c5689a3419016c89bf9fbdfa,Facial color is an efficient mechanism to visually transmit emotion,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1eba6fc35a027134aa8997413647b49685f6fbd1,Superpower glass: delivering unobtrusive real-time social cues in wearable systems,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1eafc8e7316d7257955ef09f903d318d55fac1fc,Ensemble of furthest subspace pairs for enhanced image set matching,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+1eafc8e7316d7257955ef09f903d318d55fac1fc,Ensemble of furthest subspace pairs for enhanced image set matching,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf,A Multi-level Contextual Model for Person Recognition in Photo Albums,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+1e3739716e163fce6fded71eda078a18334aa83b,The HFB Face Database for Heterogeneous Face Biometrics research,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1e54025a6b399bfc210a52a8c3314e8f570c2204,DenseCap: Fully Convolutional Localization Networks for Dense Captioning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1e92c074ab9082863a48fecdbf212f1897687a74,Improving Deep Learning using Generic Data Augmentation,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+1e92c074ab9082863a48fecdbf212f1897687a74,Improving Deep Learning using Generic Data Augmentation,University of Cape Town,University of Cape Town,"University of Cape Town, Engineering Mall, Cape Town Ward 59, Cape Town, City of Cape Town, Western Cape, CAPE TOWN, South Africa",-33.95828745,18.45997349,edu,
+1ef5ce743a44d8a454dbfc2657e1e2e2d025e366,Accurate Corner Detection Methods using Two Step Approach,Thapar University,Thapar University,"Thapar University, Hostel Road, Patiala, Punjab, 147001, India",30.35566105,76.36581641,edu,
+1eb249b515f7c09ae2663c1b5c49243906aabf22,Learning Structured Semantic Embeddings for Visual Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1e58d7e5277288176456c66f6b1433c41ca77415,Bootstrapping Fine-Grained Classifiers: Active Learning with a Crowd in the Loop,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+1e5a1619fe5586e5ded2c7a845e73f22960bbf5a,Group Membership Prediction,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+1e747986c9efd481d380b28896115812eed54f8f,Bayesian Face Recognition Based on Markov Random Field Modeling,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1ecd20f7fc34344e396825d27bc5a9871ab0d0c2,SG-One: Similarity Guidance Network for One-Shot Semantic Segmentation,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+1ed5d99fe46c0b5083f97e65841cd8535a9451c1,Dating Historical Color Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1ed5d99fe46c0b5083f97e65841cd8535a9451c1,Dating Historical Color Images,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+1ed5d99fe46c0b5083f97e65841cd8535a9451c1,Dating Historical Color Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1ed5d99fe46c0b5083f97e65841cd8535a9451c1,Dating Historical Color Images,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+1ed5d99fe46c0b5083f97e65841cd8535a9451c1,Dating Historical Color Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1e0fc2e5537db53080ec9a875df614dd8018c873,Mixture of Counting CNNs: Adaptive Integration of CNNs Specialized to Specific Appearance for Crowd Counting,Hiroshima University,Hiroshima University,"Hiroshima University 広島大学 東広島キャンパス, 出会いの道 Deai-no-michi Str., 西条下見, 東広島市, 広島県, 中国地方, 739-0047, 日本",34.40197660,132.71231950,edu,
+1e9f1bbb751fe538dde9f612f60eb946747defaa,Identity-aware convolutional neural networks for facial expression recognition,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+1e917fe7462445996837934a7e46eeec14ebc65f,Expression Classification using Wavelet Packet Method on Asymmetry Faces,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+1e917fe7462445996837934a7e46eeec14ebc65f,Expression Classification using Wavelet Packet Method on Asymmetry Faces,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1e43d706d38cbacac563de9d0659230de00d73f2,Paragon: QoS-aware scheduling for heterogeneous datacenters,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1eab1ffed59092d6bf19900b7fb283e6dd0d01a2,Learning Socially Embedded Visual Representation from Scratch,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1e7032e91fa01b90896c3cbfe5edf4f35ffd9628,A mixture model for aggregation of multiple pre-trained weak classifiers,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+1edc9eefb555e044f12d8c8cd56e8cc950abf8bb,Global Alignment for Dynamic 3D Morphable Model Construction,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+1e0dd12f2bff234a4df71641bc95068733506858,Handwritten Word Spotting with Corrected Attributes,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+1e57ace361b941d9d210e59a9bbac7697b6bcff5,Maximizing all margins: Pushing face recognition with Kernel Plurality,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+1e57ace361b941d9d210e59a9bbac7697b6bcff5,Maximizing all margins: Pushing face recognition with Kernel Plurality,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+1ee896784275d0517963815b7c7ae1c788940409,A Causal And-Or Graph Model for Visibility Fluent Reasoning in Tracking Interacting Objects,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+1ee896784275d0517963815b7c7ae1c788940409,A Causal And-Or Graph Model for Visibility Fluent Reasoning in Tracking Interacting Objects,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1e006cb837d4d01efcc92167443ccf3282329f89,Mask-guided Contrastive Attention Model for Person Re-Identification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+1e006cb837d4d01efcc92167443ccf3282329f89,Mask-guided Contrastive Attention Model for Person Re-Identification,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+1e99c95ea015a0639448fdf60f9694fed5464500,Interactive Object Counting,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1e516273554d87bbe1902fa0298179c493299035,Age Classification in Unconstrained Conditions Using LBP Variants,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+1e8ead5045c4b4de598c4eb570bfd9da14970129,A General Two-Step Approach to Learning-Based Hashing,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+1e95a366f9212654f79027894dbedf1ef44ca4c3,From Zero-Shot Learning to Conventional Supervised Classification: Unseen Visual Data Synthesis,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+1e95a366f9212654f79027894dbedf1ef44ca4c3,From Zero-Shot Learning to Conventional Supervised Classification: Unseen Visual Data Synthesis,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+1e95a366f9212654f79027894dbedf1ef44ca4c3,From Zero-Shot Learning to Conventional Supervised Classification: Unseen Visual Data Synthesis,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1e95a366f9212654f79027894dbedf1ef44ca4c3,From Zero-Shot Learning to Conventional Supervised Classification: Unseen Visual Data Synthesis,Northumbria University,Northumbria University,"Northumbria University, Birkdale Close, High Heaton, Newcastle upon Tyne, Tyne and Wear, North East England, England, NE7 7TP, UK",55.00306320,-1.57463231,edu,
+1eb596303ce1f90e8070090be02c768e91fd75ed,Using Viseme Recognition to Improve a Sign Language Translation System,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+1e94cc91c5293c8fc89204d4b881552e5b2ce672,Unsupervised Alignment of Actions in Video with Text Descriptions,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+1e94cc91c5293c8fc89204d4b881552e5b2ce672,Unsupervised Alignment of Actions in Video with Text Descriptions,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+1e7995220c6f17dc649b0caeab34c617248aa167,Adversarial Geometry-Aware Human Motion Prediction,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1e4822f64f105a3f27888cc463e7e49e95c1e0f0,Facial Expression Recognition Based on Anatomical Structure of Human Face,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+1e9758d282568763b209252bc3aeb7b47d269881,Learning Ordered Representations with Nested Dropout,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+1e9758d282568763b209252bc3aeb7b47d269881,Learning Ordered Representations with Nested Dropout,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+1e9758d282568763b209252bc3aeb7b47d269881,Learning Ordered Representations with Nested Dropout,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+1e944bd5a3907546d633691b8c83fec77d880657,Learning Kinematic Models for Articulated Objects,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1e9461b2e48e11638b85c2f2dc7bca043f9d60a8,Gait Representation Using Flow Fields,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1e254f77ccbf3bd796ac3f60001384f59eba4ec2,Human Context: Modeling Human-Human Interactions for Monocular 3D Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1e1ab3d08fd71ab7368464d9adf78be1170fa728,Non-parametric estimation of Jensen-Shannon Divergence in Generative Adversarial Network training,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+1e0a5ce5204f3f7503c39df6d200627cc331efe2,Automatic Arabic Image Captioning using RNN-LSTM-Based Language Model and CNN,King Saud University,King Saud University,"King Saud University جامعة الملك سعود, road_16, King Saud University District, Al Maather Municipality, الرياض, منطقة الرياض, 12393 4057, السعودية",24.72464030,46.62335012,edu,
+1e8eec6fc0e4538e21909ab6037c228547a678ba,enVisage : Face Recognition in Videos,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,Pooling Faces: Template Based Face Recognition with Pooled Face Images,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+84984c7201a7e5bc8ef4c01f0a7cfbe08c2c523b,GNAS: A Greedy Neural Architecture Search Method for Multi-Attribute Learning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+84af45a22535589053d0b00c9d6050c1150f9eaf,Exploiting Processor Heterogeneity for Interactive Services,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+841855205818d3a6d6f85ec17a22515f4f062882,Low Resolution Face Recognition in the Wild,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+84e21140422935d0a18ef0a616ed1ce1541112b0,Probabilistic Joint Face-Skull Modelling for Facial Reconstruction,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+84c0f814951b80c3b2e39caf3925b56a9b2e1733,16 Computation and Palaeography : Potentials and Limits,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+841bf196ee0086c805bd5d1d0bddfadc87e424ec,Locally Kernel-based Nonlinear Regression for Face Recognition,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+841bf196ee0086c805bd5d1d0bddfadc87e424ec,Locally Kernel-based Nonlinear Regression for Face Recognition,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+844bfcee3bc559960ae7a2b1fd68fcf7a926dc5a,SPICE: Semantic Propositional Image Caption Evaluation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+842ad1d5b6ea8a982be544b562ec91d907f879bd,Synthesis-Based Low-Cost Gaze Analysis,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+84eac516ed3b75233c5110468d3fddaec83a2895,Test-Time Adaptation for 3D Human Pose Estimation,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+84eac516ed3b75233c5110468d3fddaec83a2895,Test-Time Adaptation for 3D Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+84eac516ed3b75233c5110468d3fddaec83a2895,Test-Time Adaptation for 3D Human Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+842d82081f4b27ca2d4bc05c6c7e389378f0c7b8,Usage of affective computing in recommender systems,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+84d665608c7c005d38290df392b0ba0157ba32ee,Social Cognitive Training Improves Emotional Processing and Reduces Aggressive Attitudes in Ex-combatants,Maastricht University,Maastricht University,"UNS60, Professor Ten Hoorlaan, Randwyck, Maastricht, Limburg, Nederland, 6229EV, Nederland",50.83367120,5.71589000,edu,
+84d665608c7c005d38290df392b0ba0157ba32ee,Social Cognitive Training Improves Emotional Processing and Reduces Aggressive Attitudes in Ex-combatants,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+84d665608c7c005d38290df392b0ba0157ba32ee,Social Cognitive Training Improves Emotional Processing and Reduces Aggressive Attitudes in Ex-combatants,University of Basel,University of Basel,"Faculty of Psychology, University of Basel, 60-62, Missionsstrasse, Grossbasel, Am Ring, Basel, Basel-Stadt, 4055, Schweiz/Suisse/Svizzera/Svizra",47.56126510,7.57529610,edu,
+84efa16406c8838550cbbed48f0355b936bbe845,Online Hierarchical Sparse Representation of Multifeature for Robust Object Tracking,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+84676c330e4f8962703ca531db761c96bfda8067,Scalable misbehavior detection in online video chat services,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+84e6669b47670f9f4f49c0085311dce0e178b685,Face frontalization for Alignment and Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+84e6669b47670f9f4f49c0085311dce0e178b685,Face frontalization for Alignment and Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+8432320153aa3a348138e27ef80ae3e8631bb6f8,Optimization of neural networks via finite-value quantum fluctuations,Tohoku University,Tohoku University,"Tohoku University, 五橋通, 青葉区, 仙台市, 宮城県, 東北地方, 980-0811, 日本",38.25309450,140.87365930,edu,
+84cf838be40e2ab05732fbefbb93ccb2afb0cb48,Recognizing Handwritten Characters,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+8488610866b29f279461f67ae948a3cfc72f6961,Sign language recognition using dynamic time warping and hand shape distance based on histogram of oriented gradient features,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+8488610866b29f279461f67ae948a3cfc72f6961,Sign language recognition using dynamic time warping and hand shape distance based on histogram of oriented gradient features,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+8488610866b29f279461f67ae948a3cfc72f6961,Sign language recognition using dynamic time warping and hand shape distance based on histogram of oriented gradient features,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+84c01c9760cd294718bd7c4b4c93596db1e5e068,Unsupervised Monocular Depth Estimation with Left-Right Consistency,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+84e3629b1c1c169125f777870e2009d8bcfdc2d7,Low-Latency Video Semantic Segmentation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+84e3629b1c1c169125f777870e2009d8bcfdc2d7,Low-Latency Video Semantic Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+84a0f6db2b7155a83728101728794713898a859a,Learning 3D Keypoint Descriptors for Non-rigid Shape Matching,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+847e07387142c1bcc65035109ccce681ef88362c,Feature Synthesis Using Genetic Programming for Face Expression Recognition,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+8473ccaa87f506f3d27e52d04ec4078668d7fc2e,Object Recognition Using Junctions,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+843e6f1e226480e8a6872d8fd7b7b2cd74b637a4,Palmprint Recognition Using Directional Representation and Compresses Sensing,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,Fashioning with Networks: Neural Style Transfer to Design Clothes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,Fashioning with Networks: Neural Style Transfer to Design Clothes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+84f904a71bee129a1cf00dc97f6cdbe1011657e6,Fashioning with Networks: Neural Style Transfer to Design Clothes,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+849f891973ad2b6c6f70d7d43d9ac5805f1a1a5b,ResNet Backbone Proposals Classification Loss Regression Loss Classification Loss Regression Loss RPN Classification Branch Box Regression Branch Conv Conv,Tencent,"Tencent AI Lab, China","Ke Ji Zhong Yi Lu, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057",22.54471540,113.93571640,company,"Keji Middle 1st Rd, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057"
+846c028643e60fefc86bae13bebd27341b87c4d1,Face Recognition Under Varying Illumination Based on MAP Estimation Incorporating Correlation Between Surface Points,Institute of Industrial Science,Institute of Industrial Science,"産業技術総合研究所;西事業所, 学園西大通り, Onogawa housing complex, つくば市, 茨城県, 関東地方, 305-0051, 日本",36.05238585,140.11852361,edu,
+8455d208f43ec69971eabfcb28fddf64c3c9896b,Quality-adaptive deep learning for pedestrian detection,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+8455d208f43ec69971eabfcb28fddf64c3c9896b,Quality-adaptive deep learning for pedestrian detection,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+845299d67c87dc7f5f610b0c4380feb4daa4d0cc,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+845299d67c87dc7f5f610b0c4380feb4daa4d0cc,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+845299d67c87dc7f5f610b0c4380feb4daa4d0cc,Zoom-Net: Mining Deep Feature Interactions for Visual Relationship Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+4a374a6fe2ecd5f4889d7141a0521dea087ee667,Dissertation S cene specific object detection and tracking,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+4a14a321a9b5101b14ed5ad6aa7636e757909a7c,Learning Semi-Supervised Representation Towards a Unified Optimization Framework for Semi-Supervised Learning,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+4a14a321a9b5101b14ed5ad6aa7636e757909a7c,Learning Semi-Supervised Representation Towards a Unified Optimization Framework for Semi-Supervised Learning,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4aa286914f17cd8cefa0320e41800a99c142a1cd,Leveraging Context to Support Automated Food Recognition in Restaurants,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+4a9d906935c9de019c61aedc10b77ee10e3aec63,Cross Modal Distillation for Supervision Transfer,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+4aeebd1c9b4b936ed2e4d988d8d28e27f129e6f1,See the Difference: Direct Pre-Image Reconstruction and Pose Estimation by Differentiating HOG,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+4a44381f7c639451a797b2d3016b1d4cb54736dc,Pedestrian Travel Time Estimation in Crowded Scenes,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4a44381f7c639451a797b2d3016b1d4cb54736dc,Pedestrian Travel Time Estimation in Crowded Scenes,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+4a8b8746def96caa3efd65548040c5c597c4312a,Building and using a semantivisual image hierarchy,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+4a8b8746def96caa3efd65548040c5c597c4312a,Building and using a semantivisual image hierarchy,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+4a07fe50742c31daffd77cdcac15eaca72070b2a,Examining CNN Representations With Respect to Dataset Bias,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+4ae59d2a28abd76e6d9fb53c9e7ece833dce7733,A Survey on Mobile Affective Computing,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+4ab10174a4f98f7e2da7cf6ccfeb9bc64c8e7da8,Efficient Metric Learning for Real-World Face Recognition,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+4a484d97e402ed0365d6cf162f5a60a4d8000ea0,A Crowdsourcing Approach for Finding Misidentifications of Bibliographic Records,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+4a83d9d07cbac4a8a279073e3873d01f3215f2f8,Anticipating Accidents in Dashcam Videos,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+4a83d9d07cbac4a8a279073e3873d01f3215f2f8,Anticipating Accidents in Dashcam Videos,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+4a3ba4a8f6945382b50d053b58aa0fc7c5199b4d,Efficient Evaluation of SVM Classifiers Using Error Space Encoding,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+4a8f521b929f72da2e9ee4af9f43e941f02bd114,Data-Driven Scene Understanding from 3D Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4a8f521b929f72da2e9ee4af9f43e941f02bd114,Data-Driven Scene Understanding from 3D Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4afded694bb067c45b591c98e0951e8988d7d2d6,3D RoI-aware U-Net for Accurate and Efficient Colorectal Tumor Segmentation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+4afded694bb067c45b591c98e0951e8988d7d2d6,3D RoI-aware U-Net for Accurate and Efficient Colorectal Tumor Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4afdb1c53d0173030868a9fecee4c0216dc45c9e,An analysis-by-synthesis method based on sparse representation for heterogeneous face biometrics,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4afdb1c53d0173030868a9fecee4c0216dc45c9e,An analysis-by-synthesis method based on sparse representation for heterogeneous face biometrics,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0,Facial Recognition with Encoded Local Projections,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+4aa093d1986b4ad9b073ac9edfb903f62c00e0b0,Facial Recognition with Encoded Local Projections,University of Waterloo,University of Waterloo,"University of Waterloo, 200, University Avenue West, Northdale, Beechwood, Waterloo, Regional Municipality of Waterloo, Ontario, N2L 3G1, Canada",43.47061295,-80.54724732,edu,
+4aabd6db4594212019c9af89b3e66f39f3108aac,The Mere Exposure Effect and Classical Conditioning,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+4adb97b096b700af9a58d00e45a2f980136fcbb5,Exploring Temporal Preservation Networks for Precise Temporal Action Localization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+4a2ba5d7b41ae1d8334c5b8bb1e76ce29e4367ee,Relational divergence based classification on Riemannian manifolds,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+4a2ba5d7b41ae1d8334c5b8bb1e76ce29e4367ee,Relational divergence based classification on Riemannian manifolds,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+4ac61814d0f624ebda190b240ede72f0b156ff22,Face Recognition by Support Vector Machines,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+4a691bd830cd0fdbb4a13ba91160e973386250dd,Viewpoint Adaptation for Person Detection,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+4ae291b070ad7940b3c9d3cb10e8c05955c9e269,Automatic Detection of Naturalistic Hand-over-Face Gesture Descriptors,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+4a06ae9d41b384d6b1954b42a63385310b5d43fc,Facial feature detection and tracking with automatic template selection,University of Manchester,University of Manchester,"University of Manchester - Main Campus, Brunswick Street, Curry Mile, Ardwick, Manchester, Greater Manchester, North West England, England, M13 9NR, UK",53.46600455,-2.23300881,edu,
+4ae234a7eda3fc4e28fadbc75ee2603a0e078fcb,Understanding Human Motion : Recognition and Retrieval of Human Activities,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+4aa8db1a3379f00db2403bba7dade5d6e258b9e9,Recognizing Combinations of Facial Action Units with Different Intensity Using a Mixture of Hidden Markov Models and Neural Network,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+4ac4e8d17132f2d9812a0088594d262a9a0d339b,Rank Constrained Recognition under Unknown Illuminations,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4a77c8ab5d538541ac5f37ae6200d34360ff36b6,Calibration in Eye Tracking Using Transfer Learning,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+4af50ecb45709829a840a75ddc84f56f288c5a64,Learning Detectors Quickly with Stationary Statistics,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+4af50ecb45709829a840a75ddc84f56f288c5a64,Learning Detectors Quickly with Stationary Statistics,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4aeb5520a941fb59f20093cbeaf4b84b35df78fc,Gait Representation Using Flow Fields,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+4ab4e283c6c635bee029b4857be670504fa9d1b9,Feature Extraction Based on Direct Calculation of Mutual Information,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+4af4098deffc22cf901f38b4634d316df68975ab,Image segmentation with patch-pair density priors,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+4af4098deffc22cf901f38b4634d316df68975ab,Image segmentation with patch-pair density priors,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4ab69672e1116427d685bf7c1edb5b1fd0573b5e,Spatial pooling of heterogeneous features for image applications,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4ab69672e1116427d685bf7c1edb5b1fd0573b5e,Spatial pooling of heterogeneous features for image applications,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4acd683b5f91589002e6f50885df51f48bc985f4,Bridging computer vision and social science: A multi-camera vision system for social interaction training analysis,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+4a1d640f5e25bb60bb2347d36009718249ce9230,Towards Multi-view and Partially-Occluded Face Alignment,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4ae33d64f8515a023f10e20af20f62a2a5a76f13,Sketch Recognition with Deep Visual-Sequential Fusion Model,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+4ae33d64f8515a023f10e20af20f62a2a5a76f13,Sketch Recognition with Deep Visual-Sequential Fusion Model,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+4a0707131dab1c64c03bfa0809b050d34fafeeb5,Cell Lineage Tracing in Lens-Free Microscopy Videos,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+4a961dda9fc9a07d6a0bfbe59cc38b2605e61d2f,Robust model adaptation for tracking with online weighted color and shape feature,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+4a79923948c6ccda965077287dc6fd1d3728d680,Dropout Training for Support Vector Machines,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4a1a7e5d52e097d1defb523575fb8de1a5b24171,Multicore Construction of k-d Trees for High Dimensional Point Data,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+4a9086cf2637b7ea54855187b978af7a89bfceff,Atypical neural specialization for social percepts in autism spectrum disorder.,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+2453dd38cde21f3248b55d281405f11d58168fa9,Multi-scale Patch Aggregation (MPA) for Simultaneous Detection and Segmentation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+24b37016fee57057cf403fe2fc3dda78476a8262,Automatic Recognition of Eye Blinking in Spontaneously Occurring Behavior,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+24b37016fee57057cf403fe2fc3dda78476a8262,Automatic Recognition of Eye Blinking in Spontaneously Occurring Behavior,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+24fe0a4a2304da39b8ff5630ba9a64d505326d0e,Progressive Operational Perceptron with Memory,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+24fe0a4a2304da39b8ff5630ba9a64d505326d0e,Progressive Operational Perceptron with Memory,Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,Robust Eye Center Localization through Face Alignment and Invariant Isocentric Patterns,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,Robust Eye Center Localization through Face Alignment and Invariant Isocentric Patterns,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+247cab87b133bd0f4f9e8ce5e7fc682be6340eac,Robust Eye Center Localization through Face Alignment and Invariant Isocentric Patterns,"Joint Research Institute, Foshan, China","Joint Research Institute, Foshan, China","广东顺德中山大学卡内基梅隆大学国际联合研究院, 南国东路, 顺德区, 五村, 顺德区 (Shunde), 佛山市 / Foshan, 广东省, 0757, 中国",22.83388935,113.28541825,edu,
+24fc311970e097efc317c0f98d2df37b828bfbad,Semi-supervised hierarchical semantic object parsing,Amirkabir University of Technology,Amirkabir University of Technology,"دانشگاه صنعتی امیرکبیر, ولی عصر, میدان ولیعصر, منطقه ۶ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, نبش برادران مظفر, ‏ایران‎",35.70451400,51.40972058,edu,
+24cb375a998f4af278998f8dee1d33603057e525,Projection Metric Learning on Grassmann Manifold with Application to Video based Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+24cb375a998f4af278998f8dee1d33603057e525,Projection Metric Learning on Grassmann Manifold with Application to Video based Face Recognition,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+247ca98c5a46616044cf6ae32b0d5b4140a7a161,High-performance Semantic Segmentation Using Very Deep Fully Convolutional Networks,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+24065d385bae5579be07607a1f63eb79cebf8773,Incremental Learning of NCM Forests for Large-Scale Image Classification,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+24f9248f01df3020351347c2a3f632e01de72090,Reconstructing a fragmented face from a cryptographic identification protocol,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+24662ce3f3499ec8c5ecc546dac69dbffad578c6,Sparse Representation Based Fisher Discrimination Dictionary Learning for Image Classification,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+24662ce3f3499ec8c5ecc546dac69dbffad578c6,Sparse Representation Based Fisher Discrimination Dictionary Learning for Image Classification,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+24959d1a9c9faf29238163b6bcaf523e2b05a053,High Accuracy Head Pose Tracking Survey,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,Synergistic methods for using language in robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,Synergistic methods for using language in robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,Synergistic methods for using language in robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+24f1febcdf56cd74cb19d08010b6eb5e7c81c362,Synergistic methods for using language in robotics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+248e2d3079f4f59789770a7f57244a434e8467d0,The Many Moods of Emotion,Orange,Orange Labs,"78 Rue Olivier de Serres, 75015 Paris, France",48.83321220,2.29421550,company,"78 Rue Olivier de Serres, Paris, 75015"
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,Attribute Recognition from Adaptive Parts,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,Attribute Recognition from Adaptive Parts,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,Attribute Recognition from Adaptive Parts,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+24cf9fe9045f50c732fc9c602358af89ae40a9f7,Attribute Recognition from Adaptive Parts,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+24f022d807352abf071880877c38e53a98254dcd,Are screening methods useful in feature selection? An empirical study,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+24475686f64825c6eb503e57636fc1fcda724407,Hough Regions for Joining Instance Localization and Segmentation,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+245f8b05bdd1ac65a09a476440dc4b05ac05d4a0,An Online Learning Approach to Generative Adversarial Networks,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+24dbe0a133908500d25753542bbb720d71678c42,Multi-modal Person Localization And Emergency Detection Using The Kinect,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+24dbe0a133908500d25753542bbb720d71678c42,Multi-modal Person Localization And Emergency Detection Using The Kinect,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+24dbe0a133908500d25753542bbb720d71678c42,Multi-modal Person Localization And Emergency Detection Using The Kinect,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+24078db5422dfddf14b00fa79c38efa553845a10,"RGBD Datasets: Past, Present and Future",University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+241d2c517dbc0e22d7b8698e06ace67de5f26fdf,"Online, Real-Time Tracking Using a Category-to-Individual Detector",California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+24d45df91ebcfac7a49cdfb7116e971e12880612,UNICITY: A depth maps database for people detection in security airlocks,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+24e6a28c133b7539a57896393a79d43dba46e0f6,Robust Bayesian method for simultaneous block sparse signal recovery with applications to face recognition,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+242dc739b7cc49a7f54967a7d75d8a82f92bef59,Affinity Derivation and Graph Merge for Instance Segmentation,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+242dc739b7cc49a7f54967a7d75d8a82f92bef59,Affinity Derivation and Graph Merge for Instance Segmentation,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+242dc739b7cc49a7f54967a7d75d8a82f92bef59,Affinity Derivation and Graph Merge for Instance Segmentation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+248db911e3a6a63ecd5ff6b7397a5d48ac15e77a,Enriching Texture Analysis with Semantic Data,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+241b86d3c71d14b8cc6044a425b047a0724cfdc9,Following Gaze in Video,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+24f1e2b7a48c2c88c9e44de27dc3eefd563f6d39,Recognition of Action Units in the Wild with Deep Nets and a New Global-Local Loss,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+24ab0116bf4f56290aa8f8dd98524bb43fab6d85,Dual Attention Matching Network for Context-Aware Feature Sequence based Person Re-Identification,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+24ab0116bf4f56290aa8f8dd98524bb43fab6d85,Dual Attention Matching Network for Context-Aware Feature Sequence based Person Re-Identification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+243e9d490fe98d139003bb8dc95683b366866c57,Distinctive Parts for Relative attributes,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+24d630946023cb421b9d960dd9983b4b5dcb800d,Efficient Detector Adaptation for Object Detection in a Video,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+248291285074203eb9ee8e0b8b517ac4ce7dc4aa,The Way Dogs (Canis familiaris) Look at Human Emotional Faces Is Modulated by Oxytocin. An Eye-Tracking Study,University of Padova,University of Padova,"Via Giovanni Gradenigo, 6, 35131 Padova PD, Italy",45.40811720,11.89437860,edu,"University of Padova, Via Gradenigo, 6 - 35131- Padova, Italy"
+24c6240c511f4daa7cf51e28b0a9fb15e365d4cc,Can Ground Truth Label Propagation from Video Help Semantic Segmentation?,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+24be26a04906987e7958c1544834bf9f18a92571,Referring Image Segmentation via Recurrent Refinement Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+240eb0b34872c431ecf9df504671281f59e7da37,Cutout-search: Putting a name to the picture,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+240eb0b34872c431ecf9df504671281f59e7da37,Cutout-search: Putting a name to the picture,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+23ed7f18100717ba814b2859196e10c5d4fed216,Incorporating External Knowledge to Answer Open-Domain Visual Questions with Dynamic Memory Networks,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+23e6e8ab8f62d8f67525313c823e3cb4424ac578,Exploiting Convolution Filter Patterns for Transfer Learning,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+231b769f2e13724754fa09e7e5ab7d4b843075a0,IOD-CNN: Integrating object detection networks for event recognition,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+23b22f10d3e0a5726f58ae10c494a28103979c6f,Scalable k-NN graph construction for visual descriptors,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+23b22f10d3e0a5726f58ae10c494a28103979c6f,Scalable k-NN graph construction for visual descriptors,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+23aef683f60cb8af239b0906c45d11dac352fb4e,Incorporating Context Information into Deep Neural Network Acoustic Models,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+235d5620d05bb7710f5c4fa6fceead0eb670dec5,Who's Doing What: Joint Modeling of Names and Verbs for Simultaneous Face and Pose Annotation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+23afec5c3edf6c65fc28d360a82820d34bbdc8a8,Confidence Preserving Machine for Facial Action Unit Detection,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+23afec5c3edf6c65fc28d360a82820d34bbdc8a8,Confidence Preserving Machine for Facial Action Unit Detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+23afec5c3edf6c65fc28d360a82820d34bbdc8a8,Confidence Preserving Machine for Facial Action Unit Detection,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+23a84a4a77b6662d553c9252331e6b7920053125,Latent Model Ensemble with Auto-localization,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+23a84a4a77b6662d553c9252331e6b7920053125,Latent Model Ensemble with Auto-localization,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+231e545fdb1a516e29604fbd740e207b6f25c7dc,Perception of dynamic changes in facial affect and identity in autism.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+231e545fdb1a516e29604fbd740e207b6f25c7dc,Perception of dynamic changes in facial affect and identity in autism.,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+231e545fdb1a516e29604fbd740e207b6f25c7dc,Perception of dynamic changes in facial affect and identity in autism.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+234e3f821c31d0b5b7c59c3c013ad258fa6f5912,Attention Directs Emotion : Directed Attention Drives Emotional Intensity and Distinctiveness of Facial Perception,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+235f8e797bc10561ecd684023d2c980d990ea217,End-to-End Learning of Deformable Mixture of Parts and Deep Convolutional Neural Networks for Human Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+23ce6f404c504592767b8bec7d844d87b462de71,A Deep Face Identification Network Enhanced by Facial Attributes Prediction,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+2313c827d3cb9a291b6a00d015c29580862bbdcc,Weakly- and Semi-supervised Panoptic Segmentation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2328b0b5d4e9d4b78b1b9002407a533c21ff66f1,Evaluation of dimensionality reduction methods for image auto-annotation,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+23a450a075d752f1ec2b1e5e225de13d3bc37636,Subspace Learning in Krein Spaces: Complete Kernel Fisher Discriminant Analysis with Indefinite Kernels,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+23a432a388552ab52437d428e5af2b6c195be635,D O D Eep C Onvolutional N Ets R Eally N Eed to Be D Eep and C Onvolutional ?,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+23a432a388552ab52437d428e5af2b6c195be635,D O D Eep C Onvolutional N Ets R Eally N Eed to Be D Eep and C Onvolutional ?,University of Alberta,University of Alberta,"University of Alberta, 87 Avenue NW, University of Alberta, Edmonton, Alberta, T6G, Canada",53.52385720,-113.52282665,edu,
+23a432a388552ab52437d428e5af2b6c195be635,D O D Eep C Onvolutional N Ets R Eally N Eed to Be D Eep and C Onvolutional ?,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+23a432a388552ab52437d428e5af2b6c195be635,D O D Eep C Onvolutional N Ets R Eally N Eed to Be D Eep and C Onvolutional ?,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+23fd653b094c7e4591a95506416a72aeb50a32b5,Emotion Recognition using Fuzzy Rule- based System,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+23fd653b094c7e4591a95506416a72aeb50a32b5,Emotion Recognition using Fuzzy Rule- based System,Amity University,Amity University,"Amity University, Faizabad Road, Uttardhauna, Gomti Nagar, Tiwariganj, Lucknow, Uttar Pradesh, 226010, India",26.85095965,81.04950965,edu,
+231a6d2ee1cc76f7e0c5912a530912f766e0b459,Shape Primitive Histogram: A Novel Low-Level Face Representation for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+23fc6c6e1cd52a77215a285a462840cbb96aec39,"Cortical patterns of category-selective activation for faces, places and objects in adults with autism.",New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+23fc6c6e1cd52a77215a285a462840cbb96aec39,"Cortical patterns of category-selective activation for faces, places and objects in adults with autism.",University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+23b466abb866e3f160f4573a69666f861aef59cc,"EmotiW 2018: Audio-Video, Student Engagement and Group-Level Affect Prediction",University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+23b466abb866e3f160f4573a69666f861aef59cc,"EmotiW 2018: Audio-Video, Student Engagement and Group-Level Affect Prediction",Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+2359c3f763e96e0ee62b1119c897a32ce9715a77,Neural Computing on a Raspberry Pi : Applications to Zebrafish Behavior Monitoring,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+23bdd2d82068419bf4923e6a0198fc0fa4468807,Bird Species Categorization Using Pose Normalized Deep Convolutional Nets,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+23fc83c8cfff14a16df7ca497661264fc54ed746,Comprehensive Database for Facial Expression Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+233913367b5006277b04a8f7651f51425f13697e,Efficient Inference with Multiple Heterogeneous Part Detectors for Human Pose Estimation,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+23339e409363a89cb5fe64e18e78a36286724de0,Semi-interactive tracing of persons in real-life surveillance data,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+23339e409363a89cb5fe64e18e78a36286724de0,Semi-interactive tracing of persons in real-life surveillance data,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+23c7465c16ea9343f74a400f92b970e84878b65a,Automatic classification of Chinese female facial beauty using Support Vector Machine,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+232b6e2391c064d483546b9ee3aafe0ba48ca519,Optimization Problems for Fast AAM Fitting in-the-Wild,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+232b6e2391c064d483546b9ee3aafe0ba48ca519,Optimization Problems for Fast AAM Fitting in-the-Wild,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+237eba4822744a9eabb121fe7b50fd2057bf744c,Facial Expression Synthesis Using PAD Emotional Parameters for a Chinese Expressive Avatar,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+237eba4822744a9eabb121fe7b50fd2057bf744c,Facial Expression Synthesis Using PAD Emotional Parameters for a Chinese Expressive Avatar,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2311d2488707655b79cf2b115e3c720bd4791918,Multi-Channel Pyramid Person Matching Network for Person Re-Identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+2311d2488707655b79cf2b115e3c720bd4791918,Multi-Channel Pyramid Person Matching Network for Person Re-Identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+232d99697a18c3065f2ba7c5f2d93d87731690f5,Robustifying eye center localization by head pose cues,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+23ca7c4367f7317c61ebb0574e3d04cfd9bc3893,Aberrant brain activation during gaze processing in boys with fragile X syndrome.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+23000287004800912e3469772f3a2a48704dd303,Power SVM: Generalization with exemplar classification uncertainty,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+23000287004800912e3469772f3a2a48704dd303,Power SVM: Generalization with exemplar classification uncertainty,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+23e75f5ce7e73714b63f036d6247fa0172d97cb6,Facial expression (mood) recognition from facial images using committee neural networks,University of Akron,University of Akron,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.07890350,-81.51971272,edu,
+23e75f5ce7e73714b63f036d6247fa0172d97cb6,Facial expression (mood) recognition from facial images using committee neural networks,University of Akron,University of Akron,"University of Akron, East State Street, Stadium District, Cascade Valley, Akron, Summit County, Ohio, 44308, USA",41.07890350,-81.51971272,edu,
+23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f,A Domain Based Approach to Social Relation Recognition,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+236171d2c673194045b4c2e2837ddcc4a2041b8a,A Hierarchical Pose-Based Approach to Complex Action Understanding Using Dictionaries of Actionlets and Motion Poselets,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2385007824daaf9eac9476fccb1501b7ac166ceb,Task-driven Visual Saliency and Attention-based Visual Question Answering,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+23aba7b878544004b5dfa64f649697d9f082b0cf,Locality-constrained discriminative learning and coding,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+2303d07d839e8b20f33d6e2ec78d1353cac256cf,Squeeze-and-Excitation on Spatial and Temporal Deep Feature Space for Action Recognition,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+23e19cc9d2318b07eeaf8a9d34245131eb1a58be,Hierarchical Convolutional Deep Learning in Computer Vision,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+23b48110cead14510ebb22dc388324466fd56c95,Robust Principal Component Analysis with Missing Data,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+23b48110cead14510ebb22dc388324466fd56c95,Robust Principal Component Analysis with Missing Data,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+23c3eb6ad8e5f18f672f187a6e9e9b0d94042970,Deep domain adaptation for describing people based on fine-grained clothing attributes,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+23e2b9d1ac20e114f48850ab32b3d9136bec6826,DeepSkeleton: Skeleton Map for 3D Human Pose Regression,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+23e2b9d1ac20e114f48850ab32b3d9136bec6826,DeepSkeleton: Skeleton Map for 3D Human Pose Regression,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+23e2b9d1ac20e114f48850ab32b3d9136bec6826,DeepSkeleton: Skeleton Map for 3D Human Pose Regression,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+232ff2dab49cb5a1dae1012fd7ba53382909ec18,Semantic Video Segmentation from Occlusion Relations within a Convex Optimization Framework,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+23a59bfb96c4f543673e05b3cf6dc01b4173745b,ReD-SFA: Relation Discovery Based Slow Feature Analysis for Trajectory Clustering,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+23a59bfb96c4f543673e05b3cf6dc01b4173745b,ReD-SFA: Relation Discovery Based Slow Feature Analysis for Trajectory Clustering,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,Deep Density Clustering of Unconstrained Faces ( Supplementary Material ),University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+23b8a55785318ce90957a392607e24f620c4fccc,Bayesian Optimization with Inequality Constraints,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4fc3c9aa51cd7922820bfd5547cf544ff99b415b,Generalized Zero-Shot Learning with Deep Calibration Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4fc3c9aa51cd7922820bfd5547cf544ff99b415b,Generalized Zero-Shot Learning with Deep Calibration Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+4f9e00aaf2736b79e415f5e7c8dfebda3043a97d,"Machine Audition : Principles , Algorithms and Systems",University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+4f93cd09785c6e77bf4bc5a788e079df524c8d21,On a large sequence-based human gait database,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+4f0d9200647042e41dea71c35eb59e598e6018a7,Experiments of Image Retrieval Using Weak Attributes,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4ffd50725b9cdff4ab0f13c9182cf3fdb671e76c,Portable performance on Asymmetric Multicore Processors,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4ffd50725b9cdff4ab0f13c9182cf3fdb671e76c,Portable performance on Asymmetric Multicore Processors,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4f19d33e808a6675f11fb624499d303368deafa1,Learning Monocular Depth by Distilling Cross-Domain Stereo Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4f5e1e70f51b30e4606f991ed0e912c84af90251,Using maximum consistency context for multiple target association in wide area traffic scenes,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+4f6e1fe403b13279cd4674615d6d07ce002c9dec,Is there a connection between face symmetry and face recognition?,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+4f7967158b257e86d66bdabfdc556c697d917d24,Guaranteed Parameter Estimation of Discrete Energy Minimization for 3D Scene Parsing,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4f69ad0e52e37ba06db1c2b89c180f3ba331cc4a,Automatic Generation of Grounded Visual Questions,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+4f69ad0e52e37ba06db1c2b89c180f3ba331cc4a,Automatic Generation of Grounded Visual Questions,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+4f922f6602f39baae94f63954005776e1da05671,Peer-Mediated Theatrical Engagement for Improving Reciprocal Social Interaction in Autism Spectrum Disorder,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+4f922f6602f39baae94f63954005776e1da05671,Peer-Mediated Theatrical Engagement for Improving Reciprocal Social Interaction in Autism Spectrum Disorder,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+4ffe93dfb895c86ebad874c70113c4870c9bd5e3,Pose Machines: Articulated Pose Estimation via Inference Machines,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+4f36755fd732684b977a041ee3b0acc3492e5b6e,Normalized Metadata Generation for Human Retrieval Using Multiple Video Surveillance Cameras,Chung-Ang University,Chung-Ang University,"중앙대학교, 서달로15길, 흑석동, 동작구, 서울특별시, 06981, 대한민국",37.50882000,126.96190000,edu,
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,Human Action Recognition Using Factorized Spatio-Temporal Convolutional Networks,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,Human Action Recognition Using Factorized Spatio-Temporal Convolutional Networks,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+4fcd19b0cc386215b8bd0c466e42934e5baaa4b7,Human Action Recognition Using Factorized Spatio-Temporal Convolutional Networks,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+4fa2b00f78b2a73b63ad014f3951ec902b8b24ae,Semi-supervised hashing for scalable image retrieval,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4fa2b00f78b2a73b63ad014f3951ec902b8b24ae,Semi-supervised hashing for scalable image retrieval,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4fcf1bfc2a8989412adb67c97ce1bee72a996fff,3D morphable model construction for robust ear and face recognition,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+4f81f0c0019862046710d70b6ea880f989949e9a,An Efficient Approach for Differentiating Alzheimer's Disease from Normal Elderly Based on Multicenter MRI Using Gray-Level Invariant Features,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+4f81f0c0019862046710d70b6ea880f989949e9a,An Efficient Approach for Differentiating Alzheimer's Disease from Normal Elderly Based on Multicenter MRI Using Gray-Level Invariant Features,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+4f9958946ad9fc71c2299847e9ff16741401c591,Facial Expression Recognition with Recurrent Neural Networks,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+4f773c8e7ca98ece9894ba3a22823127a70c6e6c,A Real-Time System for Head Tracking and Pose Estimation,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+4f3f08bcc36778d45dfd5c6f6b8aff070bcfe9a4,FBI-Pose: Towards Bridging the Gap between 2D Images and 3D Human Poses using Forward-or-Backward Information,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4fd74b807b47a5975e9b0ab354bfd780e0d921d2,Armadillo: An Open Source C++ Linear Algebra Library for Fast Prototyping and Computationally Intensive Experiments,"CSIRO, Australia","NICTA, PO Box 6020, St Lucia, QLD 4067, Australia","Research Way, Clayton VIC 3168, Australia",-37.90627370,145.13194490,edu,f.k.a. NICTA
+4ff11512e4fde3d1a109546d9c61a963d4391add,Selecting Vantage Points for an Autonomous Quadcopter Videographer,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+4f028efe6708fc252851eee4a14292b7ce79d378,An integrated shape and intensity coding scheme for face recognition,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac,Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7,Fashion Landmark Detection in the Wild,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f10a7697fb2a2c626d1190db2afba83c4ffe856,Cartoon-to-Photo Facial Translation with Generative Adversarial Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+4f0b8f730273e9f11b2bfad2415485414b96299f,BDD100K: A Diverse Driving Video Database with Scalable Annotation Tooling,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+4f0b8f730273e9f11b2bfad2415485414b96299f,BDD100K: A Diverse Driving Video Database with Scalable Annotation Tooling,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4f3484a1b08b332479f0cc0197528e9007292a90,Stream-Based Active Unusual Event Detection,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+4fc96ad5c9c0155961ace769f3a73b728854fa98,Three-Dimensional Face Recognition Using Surface Space Combinations,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+4f0eab1ee02f015313ebbbfada22407d1badd5d4,Sliced Wasserstein Distance for Learning Gaussian Mixture Models,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,Deep Density Clustering of Unconstrained Faces,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4fde52cd3af5c698f0807bc3b821ebb3a270a986,Impaired fixation to eyes during facial emotion labelling in children with bipolar disorder or severe mood dysregulation.,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+4fde52cd3af5c698f0807bc3b821ebb3a270a986,Impaired fixation to eyes during facial emotion labelling in children with bipolar disorder or severe mood dysregulation.,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+4fde52cd3af5c698f0807bc3b821ebb3a270a986,Impaired fixation to eyes during facial emotion labelling in children with bipolar disorder or severe mood dysregulation.,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+4fde52cd3af5c698f0807bc3b821ebb3a270a986,Impaired fixation to eyes during facial emotion labelling in children with bipolar disorder or severe mood dysregulation.,University of Denver,University of Denver,"University of Denver, Driscoll Bridge, Denver, Denver County, Colorado, 80208, USA",39.67665410,-104.96220300,edu,
+4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4f36c14d1453fc9d6481b09c5a09e91d8d9ee47a,Video-Based Face Recognition Using the Intra/Extra-Personal Difference Dictionary,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+4fb9f05dc03eb4983d8f9a815745bb47970f1b93,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4fb9f05dc03eb4983d8f9a815745bb47970f1b93,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+4fb9f05dc03eb4983d8f9a815745bb47970f1b93,"On Robust Face Recognition via Sparse Encoding: the Good, the Bad, and the Ugly",Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+4f6ab8af1b059e6130d5fd8c4e4adee4079ae2e6,Selective Search for Object Recognition,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+4f09328793b907074adc8d4e10d2d763d7a4b513,NTUA-SLP at SemEval-2018 Task 1: Predicting Affective Content in Tweets with Deep Attentive RNNs and Transfer Learning,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8d7d02bdd3a6dfc01982468ed3eb4e66d99a302f,Data Curation APIs,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8d7d02bdd3a6dfc01982468ed3eb4e66d99a302f,Data Curation APIs,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8d8d333eb194bce847a4bbfc85fe332643622a34,Learning Hand Articulations by Hallucinating Heat Distribution,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+8d03e982bac627dfea7a785a597d5946c6b2c4bb,"Landmark localization, feature matching and biomarker discovery from magnetic resonance images",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+8d03e982bac627dfea7a785a597d5946c6b2c4bb,"Landmark localization, feature matching and biomarker discovery from magnetic resonance images",Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+8d955b025495522e67e8cb6e29436001ebbd0abb,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+8d955b025495522e67e8cb6e29436001ebbd0abb,Disentangling Features in 3D Face Shapes for Joint Face Reconstruction and Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+8dbb08fdd8827383ce74dde937b74cf21b687cbb,Rediction and U Nsupervised L Earning,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+8d1253f315c821bd2b354550ae9ea6d3d7be1d31,Improved Low Resolution Heterogeneous Face Recognition Using Re-ranking,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+8dcc1e0f0215dd5fcb6d698c35180d40dadc8dac,VirtualWorlds as Proxy for Multi-object Tracking Analysis,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+8d4f12ed7b5a0eb3aa55c10154d9f1197a0d84f3,Cascaded pose regression,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+8d1b3fff760c2574a78a849f9b710f8880c94dd2,Improving Multi-Person Pose Estimation using Label Correction,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+8da1b0834688edb311a803532e33939e9ecf8292,CornerNet: Detecting Objects as Paired Keypoints,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+8dea22172bd3008ec3c8008bc6edfdfe1e33e439,CityPersons: A Diverse Dataset for Pedestrian Detection,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+8de6deefb90fb9b3f7d451b9d8a1a3264b768482,Multibiometric Systems : Fusion Strategies and Template Security,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+8d2c0c9155a1ed49ba576ac0446ec67725468d87,A Study of Two Image Representations for Head Pose Estimation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8d203e9f7aa88ee167f5eb620a63dcf2dc64fb2e,A Knowledge-Grounded Multimodal Search-Based Conversational Agent,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+8d0243b8b663ca0ab7cbe613e3b886a5d1c8c152,Development of Optical Computer Recognition (OCR) for Monitoring Stress and Emotions in Space,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+8d6c4af9d4c01ff47fe0be48155174158a9a5e08,"Labeling, discovering, and detecting objects in images",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+8d09e7c3c6b714574b2a4a7993ac94beb9d4f50d,Human eye localization using edge projections,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+8d09e7c3c6b714574b2a4a7993ac94beb9d4f50d,Human eye localization using edge projections,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+8dcc95debd07ebab1721c53fa50d846fef265022,MicroExpNet: An Extremely Small and Fast Model For Expression Recognition From Frontal Face Images,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+8d879f4aa3284aca8d671d8360c6b6f2f0f07a23,Non-local RoIs for Instance Segmentation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+8d879f4aa3284aca8d671d8360c6b6f2f0f07a23,Non-local RoIs for Instance Segmentation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+8d44aa6745ec0b30f1402531b3419f3310587dc7,Kernel Latent SVM for Visual Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+8d44aa6745ec0b30f1402531b3419f3310587dc7,Kernel Latent SVM for Visual Recognition,University of Manitoba,University of Manitoba,"University of Manitoba, Gillson Street, Normand Park, Saint Vital, Winnipeg, Manitoba, R3T 2N2, Canada",49.80915360,-97.13304179,edu,
+8d44aa6745ec0b30f1402531b3419f3310587dc7,Kernel Latent SVM for Visual Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+8d44aa6745ec0b30f1402531b3419f3310587dc7,Kernel Latent SVM for Visual Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+8dd9eafab9498d495f0f6bf487d6a9c3aa7f3c57,StNet: Local and Global Spatial-Temporal Modeling for Action Recognition,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+8dd9eafab9498d495f0f6bf487d6a9c3aa7f3c57,StNet: Local and Global Spatial-Temporal Modeling for Action Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+8d0dffcf36e76ebbb5ff9389750264d9fb77265f,Comparison of Visual Datasets for Machine Learning,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+8d0dffcf36e76ebbb5ff9389750264d9fb77265f,Comparison of Visual Datasets for Machine Learning,University of Miami,University of Miami,"University of Miami, Theo Dickenson Drive, Coral Gables, Miami-Dade County, Florida, 33124, USA",25.71733390,-80.27866887,edu,
+8d0dffcf36e76ebbb5ff9389750264d9fb77265f,Comparison of Visual Datasets for Machine Learning,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+8df05de407b829abb357e230bead5407cabe7305,U Can Touch This: How Tablets Can Be Used to Study Cognitive Development,Ruhr-University Bochum,Ruhr-University Bochum,"RUB, 150, Universitätsstraße, Ruhr-Universität, Querenburg, Bochum-Süd, Bochum, Regierungsbezirk Arnsberg, Nordrhein-Westfalen, 44801, Deutschland",51.44415765,7.26096541,edu,
+8dce6fa7a13cc94954cbc6be9a709a4ce696ead3,Vision and Language Integration: Moving beyond Objects,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+8d91f06af4ef65193f3943005922f25dbb483ee4,Facial Expression Classification Using Rotation Slepian-based Moment Invariants,University of Macau,University of Macau,"研究生宿舍 Residência de Estudantes de Pós-Graduação da Universidade de Macau, 澳門大學 Universidade de Macau, 嘉模堂區 Nossa Senhora do Carmo, 氹仔 Taipa, Universidade de Macau em Ilha de Montanha 澳門大學橫琴校區, 中国",22.12401870,113.54510901,edu,
+8d007d8d75cb84e3350889ad5e1cc6520688e65e,Optimizing Nondecomposable Loss Functions in Structured Prediction,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+8d007d8d75cb84e3350889ad5e1cc6520688e65e,Optimizing Nondecomposable Loss Functions in Structured Prediction,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+8d007d8d75cb84e3350889ad5e1cc6520688e65e,Optimizing Nondecomposable Loss Functions in Structured Prediction,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+8dc9de0c7324d098b537639c8214543f55392a6b,Pose-Invariant 3D Object Recognition Using Linear Combination of 2D Views and Evolutionary Optimisation,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+8dc10cb0c8f6e449c22bb11399aa886d850fc701,A Projected Gradient Descent Method for CRF Inference Allowing End-to-End Training of Arbitrary Pairwise Potentials,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8dc10cb0c8f6e449c22bb11399aa886d850fc701,A Projected Gradient Descent Method for CRF Inference Allowing End-to-End Training of Arbitrary Pairwise Potentials,Lund University,Lund University,"TEM at Lund University, 9, Klostergatan, Stadskärnan, Centrum, Lund, Skåne, Götaland, 22222, Sverige",55.70395710,13.19020110,edu,
+8d228b4c0787d9e29b0c1fff05f15198bda911c9,Scalable and Effective Deep CCA via Soft Decorrelation,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d9ffe9f7bf1ff3ecc320afe50a92a867a12aeb7,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8d44ac33d768fdc436c1b8ce995e2a6dbc4ad74b,Face recognition across large pose variations via Boosted Tied Factor Analysis,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,University of Canberra,University of Canberra,"University of Canberra, University Drive, Bruce, Belconnen, Australian Capital Territory, 2617, Australia",-35.23656905,149.08446994,edu,
+8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0,Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+8d94e72ebcbc8f93dc60eb42ac7058d6a94e8683,D-LinkNet : LinkNet with Pretrained Encoder and Dilated Convolution for High Resolution Satellite Imagery Road Extraction,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+15fbe139cbfd19513763db06b8ffa2e21168ca4e,GestureGAN for Hand Gesture-to-Gesture Translation in the Wild,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+15fbe139cbfd19513763db06b8ffa2e21168ca4e,GestureGAN for Hand Gesture-to-Gesture Translation in the Wild,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+15638d4611867b8f105fb541dbb61669fde6ab2a,Object Detection via Aspect Ratio and Context Aware Region-based Convolutional Networks,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+15638d4611867b8f105fb541dbb61669fde6ab2a,Object Detection via Aspect Ratio and Context Aware Region-based Convolutional Networks,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+159afb7fb0740f0b48b812ed5183c2229089044d,A Comparative Study on Multi-person Tracking Using Overlapping Cameras,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+150855fdaca2ff3ae5a51da4f82f120a92cac104,SmartSketcher: sketch-based image retrieval with dynamic semantic re-ranking,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+1505e0aea7f82488dad1448e79b22c3b0ebc65cf,"Fast, Approximate 3D Face Reconstruction from Multiple Views",University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+15b0e598d9692d77aa33370dd3a1a47ba5f99aa6,Learning Cooperative Visual Dialog Agents with Deep Reinforcement Learning,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+155199d7f10218e29ddaee36ebe611c95cae68c4,Towards Scalable Visual Navigation of Micro Aerial Vehicles,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+155199d7f10218e29ddaee36ebe611c95cae68c4,Towards Scalable Visual Navigation of Micro Aerial Vehicles,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+158452a25143013e4c406ee2d41a7399c34df3db,Detecting Snap Points in Egocentric Video with a Web Photo Prior,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+15db3bb041ee06a369f0cd478369c75618a35387,Pathological game use in adults with and without Autism Spectrum Disorder,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+15db3bb041ee06a369f0cd478369c75618a35387,Pathological game use in adults with and without Autism Spectrum Disorder,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+15db3bb041ee06a369f0cd478369c75618a35387,Pathological game use in adults with and without Autism Spectrum Disorder,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+158974dc6503cb4939b87a1fffe17871e8a48c91,Local Sparse Discriminant Analysis for Robust Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,Semi-Supervised Classification Using Linear Neighborhood Propagation,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,Semi-Supervised Classification Using Linear Neighborhood Propagation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+15d653972d176963ef0ad2cc582d3b35ca542673,CSVideoNet: A Real-Time End-to-End Learning Framework for High-Frame-Rate Video Compressive Sensing,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+159e792096756b1ec02ec7a980d5ef26b434ff78,Signed Laplacian Embedding for Supervised Dimension Reduction,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+159e792096756b1ec02ec7a980d5ef26b434ff78,Signed Laplacian Embedding for Supervised Dimension Reduction,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+155959429a6f44e7b980ff00f2d5c0343d71c4dd,Patch-Based Segmentation without Registration: Application to Knee MRI,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1586871a1ddfe031b885b94efdbff647cf03eff1,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+1586871a1ddfe031b885b94efdbff647cf03eff1,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+1586871a1ddfe031b885b94efdbff647cf03eff1,A Century of Portraits: A Visual Historical Record of American High School Yearbooks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+1582c29d0f752f95a12f5a8ce08d5e5c752f6822,Developmental changes in infants' categorization of anger and disgust facial expressions.,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+15cf7bdc36ec901596c56d04c934596cf7b43115,Face Extraction from Image based on K-Means Clustering Algorithms,Islamic Azad University,Islamic Azad University,"دانشگاه آزاد اسلامی, همدان, بخش مرکزی شهرستان همدان, شهرستان همدان, استان همدان, ‏ایران‎",34.84529990,48.55962120,edu,
+1576ed0f3926c6ce65e0ca770475bca6adcfdbb4,Keep it accurate and diverse: Enhancing action recognition performance by ensemble learning,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+156cd2a0e2c378e4c3649a1d046cd080d3338bca,Exemplar based approaches on Face Fiducial Detection and Frontalization,International Institute of Information Technology,International Institute of Information Technology,"International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India",17.44549570,78.34854698,edu,
+1574abc94d22b03f8c9630f0eb7ad1f8ed67880e,Cross-View Image Synthesis using Conditional GANs,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+15e1af79939dbf90790b03d8aa02477783fb1d0f,Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+15a0546ee32ac391f342a6188446dd6699a1d7b8,Person re-ID while Crossing Different Cameras: Combination of Salient-Gaussian Weighted BossaNova and Fisher Vector Encodings,"University of Sfax, Tunisia","REGIM-Labo: REsearch Groups in Intelligent Machines, University of Sfax, ENIS, BP 1173, Sfax, 3038, Tunisia","Université de Route de l'Aéroport Km 0.5 BP 1169 .3029 Sfax, Sfax, Tunisia",34.73610660,10.74272750,edu,"University of Sfax, Tunisia"
+151481703aa8352dc78e2577f0601782b8c41b34,Appearance Manifold of Facial Expression,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+15037913b5d3f299da509218f0b914227d10b929,Towards the use of social interaction conventions as prior for gaze model adaptation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+15037913b5d3f299da509218f0b914227d10b929,Towards the use of social interaction conventions as prior for gaze model adaptation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+15037913b5d3f299da509218f0b914227d10b929,Towards the use of social interaction conventions as prior for gaze model adaptation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+1565721ebdbd2518224f54388ed4f6b21ebd26f3,Face and landmark detection by using cascade of classifiers,Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.74875160,30.47653071,edu,
+1565721ebdbd2518224f54388ed4f6b21ebd26f3,Face and landmark detection by using cascade of classifiers,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+15d2703ac86652aaa8182ff60da19fc1bccb22ce,"Measurement, Modeling, and Synthesis of Time-Varying Appearance of Natural Phenomena",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+15d1582c8b65dbab5ca027467718a2c286ddce7a,"On robust face recognition via sparse coding: the good, the bad and the ugly",National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+15d1582c8b65dbab5ca027467718a2c286ddce7a,"On robust face recognition via sparse coding: the good, the bad and the ugly",University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+15d1582c8b65dbab5ca027467718a2c286ddce7a,"On robust face recognition via sparse coding: the good, the bad and the ugly",Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+15252b7af081761bb00535aac6bd1987391f9b79,Estimation of eye gaze direction angles based on active appearance models,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+1504eae5487e1e062fef96e1e424de5d3a5a3858,MSRC: multimodal spatial regression with semantic context for phrase grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1504eae5487e1e062fef96e1e424de5d3a5a3858,MSRC: multimodal spatial regression with semantic context for phrase grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1504eae5487e1e062fef96e1e424de5d3a5a3858,MSRC: multimodal spatial regression with semantic context for phrase grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1504eae5487e1e062fef96e1e424de5d3a5a3858,MSRC: multimodal spatial regression with semantic context for phrase grounding,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+152ca42d6701db43dbd8a37901d56a52e4a9e6f9,Social negative bootstrapping for visual categorization,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+15ee80e86e75bf1413dc38f521b9142b28fe02d1,Towards a deep learning framework for unconstrained face detection,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+15912abb1fe1457bb358d2d2b0e586c1987b6e25,Evaluation of the Pain Level from Speech: Introducing a Novel Pain Database and Benchmarks,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+15e27f968458bf99dd34e402b900ac7b34b1d575,Ranking 2DLDA features based on fisher discriminance,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+15f70a0ad8903017250927595ae2096d8b263090,Learning Robust Deep Face Representation,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+1564bf0a268662df752b68bee5addc4b08868739,With whom do I interact? Detecting social interactions in egocentric photo-streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+1564bf0a268662df752b68bee5addc4b08868739,With whom do I interact? Detecting social interactions in egocentric photo-streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+1564bf0a268662df752b68bee5addc4b08868739,With whom do I interact? Detecting social interactions in egocentric photo-streams,University of Barcelona,University of Barcelona,"Universitat de Barcelona, Carrer de la Diputació, l'Antiga Esquerra de l'Eixample, Eixample, Barcelona, BCN, CAT, 08013, España",41.38689130,2.16352385,edu,
+158e32579e38c29b26dfd33bf93e772e6211e188,Automated Real Time Emotion Recognition using Facial Expression Analysis,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+12044354032fdee40405cb12e8bbebb6d073a768,Women Are Seen More than Heard in Online Newspapers,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+12044354032fdee40405cb12e8bbebb6d073a768,Women Are Seen More than Heard in Online Newspapers,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+122f51cee489ba4da5ab65064457fbe104713526,Long Short Term Memory Recurrent Neural Network based Multimodal Dimensional Emotion Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+122f51cee489ba4da5ab65064457fbe104713526,Long Short Term Memory Recurrent Neural Network based Multimodal Dimensional Emotion Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+122f51cee489ba4da5ab65064457fbe104713526,Long Short Term Memory Recurrent Neural Network based Multimodal Dimensional Emotion Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+122f51cee489ba4da5ab65064457fbe104713526,Long Short Term Memory Recurrent Neural Network based Multimodal Dimensional Emotion Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+121503705689f46546cade78ff62963574b4750b,We Don’t Need No Bounding-Boxes: Training Object Class Detectors Using Only Human Verification,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+12c6f3ae8f20a1473a89b9cbb82d0f02275ea62b,Hand detection using multiple proposals,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+12c6f3ae8f20a1473a89b9cbb82d0f02275ea62b,Hand detection using multiple proposals,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+12811f1dc14c9377903d4c814e112071118071a5,I Have Seen Enough: Transferring Parts Across Categories,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+125d82fee1b9fbcc616622b0977f3d06771fc152,Hierarchical face parsing via deep learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+125d82fee1b9fbcc616622b0977f3d06771fc152,Hierarchical face parsing via deep learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+125d82fee1b9fbcc616622b0977f3d06771fc152,Hierarchical face parsing via deep learning,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1255afbf86423c171349e874b3ac297de19f00cd,Robust Face Recognition by Computing Distances From Multiple Histograms of Oriented Gradients,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+1275d6a800f8cf93c092603175fdad362b69c191,Deep Face Recognition: A Survey,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+12dcb25f10d42ad2b4352ba9fe7a6a32ee2635a6,The Automatic Scientist will be a Data System,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+12c2f7cee1f6abff0d4de9b4b90caa3b5c6084a0,Adult Content Recognition from Images Using a Mixture of Convolutional Neural Networks,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+12e5ff3d6771d725f09bb0b2f14d17a64d4c1c25,The fear gasping face as a threat display in a Melanesian society.,Boston College,Boston College,"Boston College, 140, Commonwealth Avenue, Chestnut Hill, Newton, Middlesex County, Massachusetts, 02467, USA",42.33544810,-71.16813864,edu,
+12e5ff3d6771d725f09bb0b2f14d17a64d4c1c25,The fear gasping face as a threat display in a Melanesian society.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+1235dd37312cb20aced0e97d953f6379d8a0c7d4,Grounded Textual Entailment,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+126535430845361cd7a3a6f317797fe6e53f5a3b,Robust Photometric Stereo via Low-Rank Matrix Completion and Recovery,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+126535430845361cd7a3a6f317797fe6e53f5a3b,Robust Photometric Stereo via Low-Rank Matrix Completion and Recovery,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+12c68afcd77584f3db55b42f38c3ac0e19389b60,Discriminative tag learning on YouTube videos with latent sub-tags,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+129d0e22d6b847c8002fd2c70bb508cdf3286fb8,"Investigating Audio, Visual, and Text Fusion Methods for End-to-End Automatic Personality Prediction",Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+12679cdcb4bc5e9c60a795c2418b40b5e1681652,Volterrafaces: Discriminant analysis using Volterra kernels,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+121fe33daf55758219e53249cf8bcb0eb2b4db4b,An Empirical Camera Model for Internet Color Vision,Middlebury College,Middlebury College,"Middlebury College, Old Chapel Road, Middlebury, Addison County, Vermont, 05753, USA",44.00907770,-73.17679460,edu,
+12408baf69419409d228d96c6f88b6bcde303505,Temporal Tessellation: A Unified Approach for Video Analysis,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+12408baf69419409d228d96c6f88b6bcde303505,Temporal Tessellation: A Unified Approach for Video Analysis,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,Colorado State University,Colorado State University,"Colorado State University, West Pitkin Street, Woodwest, Fort Collins, Larimer County, Colorado, 80526-2002, USA",40.57093580,-105.08655256,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+120bcc9879d953de7b2ecfbcd301f72f3a96fb87,Report on the FG 2015 Video Person Recognition Evaluation,National Institute of Standards and Technology,National Institute of Standards and Technology,"National Institute of Standards and Technology, Summer Walk Drive, Diamond Farms, Gaithersburg, Montgomery County, Maryland, 20878, USA",39.12549380,-77.22293475,edu,
+12095f9b35ee88272dd5abc2d942a4f55804b31e,DenseReg : Fully Convolutional Dense Shape Regression Inthe-Wild Rıza,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+12095f9b35ee88272dd5abc2d942a4f55804b31e,DenseReg : Fully Convolutional Dense Shape Regression Inthe-Wild Rıza,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+12fa75f90b0dcf254c33145fe08e7ce0f099066a,Active Learning with Cross-Class Knowledge Transfer,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+124d60fae338b1f87455d1fc4ede5fcfd806da1a,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+124d60fae338b1f87455d1fc4ede5fcfd806da1a,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+124d60fae338b1f87455d1fc4ede5fcfd806da1a,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.06360710,147.35522340,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+12441a74e709ddab53f9039cf507491df7b3840a,SCA-CNN: Spatial and Channel-Wise Attention in Convolutional Networks for Image Captioning,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Nebraska - Lincoln,University of Nebraska - Lincoln,"Sheldon Museum of Art, North 12th Street, West Lincoln, Lincoln, Lancaster County, Nebraska, 68588-0300, USA",40.81747230,-96.70444680,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+126214ef0dcef2b456cb413905fa13160c73ec8e,Modelling human perception of static facial expressions,University of Siena,University of Siena,"大學 University, 澤祥街 Chak Cheung Street, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.41338620,114.21005800,edu,
+126250d6077a6a68ae06277352eb42c4fa4c8b10,Learning Patch-based Structural Element Models with Hierarchical Palettes Abstract Learning Patch-based Structural Element Models with Hierarchical Palettes,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+12bce6e2db10faa4f370f9e40a6084296080b5cb,Learning to rank in person re-identification with metric ensembles,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+12692fbe915e6bb1c80733519371bbb90ae07539,Object Bank: A High-Level Image Representation for Scene Classification & Semantic Feature Sparsification,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+12692fbe915e6bb1c80733519371bbb90ae07539,Object Bank: A High-Level Image Representation for Scene Classification & Semantic Feature Sparsification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+12169ff906633e486599660ebf77dd73060640b9,Multi-stage Contextual Deep Learning for Pedestrian Detection,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+12f2325789febc95c9b453d12194bf4a778e60bd,Semantic Video Segmentation: A Review on Recent Approaches,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22,A Large-scale Attribute Dataset for Zero-shot Learning,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1251deae1b4a722a2155d932bdfb6fe4ae28dd22,A Large-scale Attribute Dataset for Zero-shot Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+12ccfc188de0b40c84d6a427999239c6a379cd66,Sparse Adversarial Perturbations for Videos,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+12169222eeee058578629e5097f250c3992530b1,Boosting relative spaces for categorizing objects with large intra-class variation,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+127b17fdd8860605680cfd053398fa95d12ccc03,Visual Question Generation as Dual Task of Visual Question Answering,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+127b17fdd8860605680cfd053398fa95d12ccc03,Visual Question Generation as Dual Task of Visual Question Answering,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+127b17fdd8860605680cfd053398fa95d12ccc03,Visual Question Generation as Dual Task of Visual Question Answering,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+127b17fdd8860605680cfd053398fa95d12ccc03,Visual Question Generation as Dual Task of Visual Question Answering,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1270044a3fa1a469ec2f4f3bd364754f58a1cb56,Video-Based Face Recognition Using Probabilistic Appearance Manifolds,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+12003a7d65c4f98fb57587fd0e764b44d0d10125,Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier,University of Ljubljana,University of Ljubljana,"UL Fakulteta za računalništvo in informatiko, 113, Večna pot, Vrtača, Rožna dolina, Ljubljana, Upravna Enota Ljubljana, Osrednjeslovenska, 1000, Slovenija",46.05015580,14.46907327,edu,
+12336e7d5d2ca4e1fdd2a52d50b2a5c987c08b0b,Assessing tracking assessment measures,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+120c85cf69ea656b02262b4bc5761117fe35674e,Learning Exemplar-Represented Manifolds in Latent Space for Classification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+124d43c5f76e70ec1f9eac62ef48f1dc2b547c04,Optimal Dimensionality Discriminant Analysis and Its Application to Image Recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8c8525e626c8857a4c6c385de34ffea31e7e41d1,Cross-Domain Image Retrieval with a Dual Attribute-Aware Ranking Network,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+8c8525e626c8857a4c6c385de34ffea31e7e41d1,Cross-Domain Image Retrieval with a Dual Attribute-Aware Ranking Network,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+8c04688425fa3e03c24d08b09faad49e33f2cc30,Adversarial Dropout Regularization,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+8c66378df977606d332fc3b0047989e890a6ac76,Hierarchical-PEP model for real-world face recognition,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+8c8a61fc2c0e426aa64e50756b777475f3beb49b,Robust Marker-Based Tracking for Measuring Crowd Dynamics,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+8c9c8111e18f8798a612e7386e88536dfe26455e,Comparing Bayesian Networks to Classify Facial Expressions,University of Coimbra,University of Coimbra,"Reitoria da Universidade de Coimbra, Rua de Entre-Colégios, Almedina, Alta, Almedina, Sé Nova, Santa Cruz, Almedina e São Bartolomeu, CBR, Coimbra, Baixo Mondego, Centro, 3000-062, Portugal",40.20759510,-8.42566148,edu,
+8c357c8716e7a0606587cc67b209276b08483f3e,Care about you: towards large-scale human-centric visual relationship detection,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+8ccd6aaf1ee4b66c13fffbf560e3920f9bdf5f10,A multitask deep learning model for real-time deployment in embedded systems,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+8c618c038c60a385d220193f87b8b0759aab0fd7,A Hierarchical Association Framework for Multi-Object Tracking in Airborne Videos,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+8c618c038c60a385d220193f87b8b0759aab0fd7,A Hierarchical Association Framework for Multi-Object Tracking in Airborne Videos,Vrije Universiteit Brussel,Vrije Universiteit Brussel,"Vrije Universiteit Brussel, 170, Quai de l'Industrie - Nijverheidskaai, Anderlecht, Brussel-Hoofdstad - Bruxelles-Capitale, Région de Bruxelles-Capitale - Brussels Hoofdstedelijk Gewest, 1070, België / Belgique / Belgien",50.84110070,4.32377555,edu,
+8c99f35d6c3851513adb2c2d5c385c989879e05b,The Intelligent ICU Pilot Study: Using Artificial Intelligence Technology for Autonomous Patient Monitoring,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8c99f35d6c3851513adb2c2d5c385c989879e05b,The Intelligent ICU Pilot Study: Using Artificial Intelligence Technology for Autonomous Patient Monitoring,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8c99f35d6c3851513adb2c2d5c385c989879e05b,The Intelligent ICU Pilot Study: Using Artificial Intelligence Technology for Autonomous Patient Monitoring,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8c99f35d6c3851513adb2c2d5c385c989879e05b,The Intelligent ICU Pilot Study: Using Artificial Intelligence Technology for Autonomous Patient Monitoring,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8c99f35d6c3851513adb2c2d5c385c989879e05b,The Intelligent ICU Pilot Study: Using Artificial Intelligence Technology for Autonomous Patient Monitoring,University of Florida,University of Florida,"University of Florida, Southwest 16th Avenue, Diamond Village Apartments, City of Gainesville Municipal Boundaries, Alachua County, Florida, 32611, USA",29.63287840,-82.34901330,edu,
+8c81705e5e4a1e2068a5bd518adc6955d49ae434,3D Object Recognition with Enhanced Grassmann Discriminant Analysis,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+8c269412a8c9e646641750dce2a1b2ee7b9c6b2e,On MultiView Face Recognition Using Lytro Images,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+8c269412a8c9e646641750dce2a1b2ee7b9c6b2e,On MultiView Face Recognition Using Lytro Images,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+8c192cd39f90eb8ff2969f8916ef8967607c5298,"See, Hear, and Read: Deep Aligned Representations",MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+8ca0a7f2e5a7b1676f9a409c3ed5749c8a569b83,A new approach for pedestrian density estimation using moving sensors and computer vision,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+8ca0a7f2e5a7b1676f9a409c3ed5749c8a569b83,A new approach for pedestrian density estimation using moving sensors and computer vision,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+8cb403c733a5f23aefa6f583a17cf9b972e35c90,Learning the semantic structure of objects from Web supervision,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8c1e828a4826a1fb3eb47ee432f5333b974fa141,Spatial Graph for Image Classification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8c522c293ffbb4d8f451789e3f05f5815bf40b92,An Efficient LBP-Based Descriptor for Facial Depth Images Applied to Gender Recognition Using RGB-D Face Data,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+8c6c743e21592304ee28ec073657bf128376ff8c,Power Normalizing Second-order Similarity Network for Few-shot Learning,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+8c6c743e21592304ee28ec073657bf128376ff8c,Power Normalizing Second-order Similarity Network for Few-shot Learning,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+8ccde9d80706a59e606f6e6d48d4260b60ccc736,RotDCF: Decomposition of Convolutional Filters for Rotation-Equivariant Deep Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+8ccde9d80706a59e606f6e6d48d4260b60ccc736,RotDCF: Decomposition of Convolutional Filters for Rotation-Equivariant Deep Networks,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+8c6b9c9c26ead75ce549a57c4fd0a12b46142848,Facial expression recognition using shape and texture information,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+8c7284a0958c31f57b0558d3951d4486379ffacc,The role of napping on memory consolidation in preschool children,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+8c71e28a4ffb283a9cf3c5549e2fc64e9b0ecd5c,Metric Learning with Dynamically Generated Pairwise Constraints for Ear Recognition,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82,Gated Convolutional Neural Network for Semantic Segmentation in High-Resolution Images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8c4ea76e67a2a99339a8c4decd877fe0aa2d8e82,Gated Convolutional Neural Network for Semantic Segmentation in High-Resolution Images,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+8c5d1a334e7a88dc5e54383df1eef13188c2b6b5,Multi-Cue Correlation Filters for Robust Visual Tracking,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+8ce5425f20f2c9e27d954a7d86503b9a0a33c34c,A poselet based key frame searching approach in sports training videos,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+8ce9949b88726e117552ce3aa6901a5178db3bb2,Liberating the Biometric Menagerie Through Score Normalization Improvements,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+8c3cf0c579a28890e21428fcad7f09175e65e43d,Adding object detection skills to visual dialogue agents,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+8c7811c029905f4f3e9f31e925634a42e413f6d8,Face Matching Between Near Infrared and Visible Light Images,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063,Learning Convolutional Networks for Content-weighted Image Compression,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063,Learning Convolutional Networks for Content-weighted Image Compression,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063,Learning Convolutional Networks for Content-weighted Image Compression,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063,Learning Convolutional Networks for Content-weighted Image Compression,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+8c4bcbaee18aaae417e2f2da7a7b95bd8edaf063,Learning Convolutional Networks for Content-weighted Image Compression,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+8cb55413f1c5b6bda943697bba1dc0f8fc880d28,Video-based Face Recognition on Real-World Data,University of Karlsruhe,University of Karlsruhe,"Karlshochschule International University, 36-38, Karlstraße, Innenstadt-West Westlicher Teil, Innenstadt-West, Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76133, Deutschland",49.00664235,8.39405152,edu,
+8552f6e3f73db564a2e625cceb1d1348d70b598c,Learning Compact Appearance Representation for Video-based Person Re-Identification,Shandong University,Shandong University,"山东大学, 泰安街, 鳌山卫街道, 即墨区, 青岛市, 山东省, 266200, 中国",36.36934730,120.67381800,edu,
+85304f24f5a1800e66de20ad05e20c8c032b7d03,Understanding and Discovering Deliberate Self-harm Content in Social Media,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+85faaad8eddebc960865e351c0e3ea81e25d42eb,Deep Group-shuffling Random Walk for Person Re-identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+85192977775e1f1001334a13de5d32736fbfd24c,Pedestrian Parsing via Deep Decompositional Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+85192977775e1f1001334a13de5d32736fbfd24c,Pedestrian Parsing via Deep Decompositional Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+85192977775e1f1001334a13de5d32736fbfd24c,Pedestrian Parsing via Deep Decompositional Network,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+85041e48b51a2c498f22850ce7228df4e2263372,Subspace Regression: Predicting a Subspace from One Sample,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+85c2de95080c1e8d955ac57f64a6b51ac186af32,Imputing human descriptions in semantic biometrics,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+85678d8aef7188bd59f18829de5b3980af7404b6,Deep Multi-task Learning to Recognise Subtle Facial Expressions of Mental States,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+85678d8aef7188bd59f18829de5b3980af7404b6,Deep Multi-task Learning to Recognise Subtle Facial Expressions of Mental States,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+85678d8aef7188bd59f18829de5b3980af7404b6,Deep Multi-task Learning to Recognise Subtle Facial Expressions of Mental States,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+857ad04fca2740b016f0066b152bd1fa1171483f,Sample Images can be Independently Restored from Face Recognition Templates,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+8520da50e5e234c14272921868ff36d55e6c7837,Unsupervised Feature Selection on Data Streams,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+8598e603438360884073fcf7b843ac489fad43b2,Emotion Recognition from Arbitrary View Facial Images,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+8598e603438360884073fcf7b843ac489fad43b2,Emotion Recognition from Arbitrary View Facial Images,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+85f14bb2ed4b9d680ae4062cbd571752a1ff1dfa,Efficient 3D Face Recognition with Gabor Patched Spectral Regression,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+85f14bb2ed4b9d680ae4062cbd571752a1ff1dfa,Efficient 3D Face Recognition with Gabor Patched Spectral Regression,Beijing Jiaotong University,Beijing Jiaotong University,"北京交通大学, 银杏大道, 稻香园南社区, 海淀区, 北京市, 100044, 中国",39.94976005,116.33629046,edu,
+85af6c005df806b57b306a732dcb98e096d15bfb,Getting to Know Low-light Images with The Exclusively Dark Dataset,University of Malaya,University of Malaya,"UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia",3.12267405,101.65356103,edu,
+8546885e83f7901340c7893fdfc017cef86d910a,Convolutional Long Short-Term Memory Networks for Recognizing First Person Interactions,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+85c7aab0f58f17816064699865cd0836bfbf2e82,A New Representation for Human Gait Recognition: Motion Silhouettes Image (MSI),Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+858adc1499e556dd4d2c65705dc62d2e3592b3bf,Semantic Feature Augmentation in Few-shot Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+85b32c201ef787e9e28538f1bcbefe30ad785535,Recognition of Vehicles as Changes in Satellite Imagery,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+85b32c201ef787e9e28538f1bcbefe30ad785535,Recognition of Vehicles as Changes in Satellite Imagery,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+85b32c201ef787e9e28538f1bcbefe30ad785535,Recognition of Vehicles as Changes in Satellite Imagery,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+850d84e4c73a8f0762c8c798b2b7fd6f2787263a,The Discovery of Perceptual Structure from Visual Co - occurrences in Space and Time,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+850d84e4c73a8f0762c8c798b2b7fd6f2787263a,The Discovery of Perceptual Structure from Visual Co - occurrences in Space and Time,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+853416c2a96ad46bdf3ef044f7a11e19d86fe073,Head Pose Classification in Crowded Scenes,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+85957b49896246bb416c0a182e52b355a8fa40b4,Feature Pyramid Network for Multi-Class Land Segmentation,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+85ac4459daedecf04c46c0fd90adf57238a5993a,MSRA-MM 2.0: A Large-Scale Web Multimedia Dataset,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+85ac4459daedecf04c46c0fd90adf57238a5993a,MSRA-MM 2.0: A Large-Scale Web Multimedia Dataset,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+856317f27248cdb20226eaae599e46de628fb696,A Method Based on Convex Cone Model for Image-Set Classification with CNN Features,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+852bdbcd091f48e07e9b989cb326e631e2932d7f,Visual scanning patterns and executive function in relation to facial emotion recognition in aging.,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+852bdbcd091f48e07e9b989cb326e631e2932d7f,Visual scanning patterns and executive function in relation to facial emotion recognition in aging.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+852bdbcd091f48e07e9b989cb326e631e2932d7f,Visual scanning patterns and executive function in relation to facial emotion recognition in aging.,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+855184c789bca7a56bb223089516d1358823db0b,Automatic Procedure to Fix Closed-Eyes Image,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+85d8c16ccd76e2eec303f98f2d1ab239dc3947a2,Self Adversarial Training for Human Pose Estimation,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+85476331edc9a9e3393f736f14aa80ad95f3c105,"""Wealth Makes Many Friends"": Children Expect More Giving From Resource-Rich Than Resource-Poor Individuals.",Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+85476331edc9a9e3393f736f14aa80ad95f3c105,"""Wealth Makes Many Friends"": Children Expect More Giving From Resource-Rich Than Resource-Poor Individuals.",Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+85639cefb8f8deab7017ce92717674d6178d43cc,Automatic Analysis of Spontaneous Facial Behavior: A Final Project Report,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+85674b1b6007634f362cbe9b921912b697c0a32c,Optimizing Facial Landmark Detection by Facial Attribute Learning,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+856c09ab10efbc8c61a84a951746654d947370f3,Human action recognition by learning bases of action attributes and parts,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+856c09ab10efbc8c61a84a951746654d947370f3,Human action recognition by learning bases of action attributes and parts,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+856c09ab10efbc8c61a84a951746654d947370f3,Human action recognition by learning bases of action attributes and parts,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+855cf31504da69daf03766b1357030dd07e485f7,Residual Dense Network for Image Super-Resolution,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+855cf31504da69daf03766b1357030dd07e485f7,Residual Dense Network for Image Super-Resolution,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+855cf31504da69daf03766b1357030dd07e485f7,Residual Dense Network for Image Super-Resolution,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+85dc159dd1eec52147b24f32f8ddab135abeb8ad,Visual Aesthetic Quality Assessment with Multi-task Deep Learning,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+851136b1d3f345d0d00c4ea36c66114444d04305,Sampling Representative Examples for Dimensionality Reduction and Recognition - Bootstrap Bumping LDA,Ohio State University,The Ohio State University,"The Ohio State University, Woody Hayes Drive, Columbus, Franklin County, Ohio, 43210, USA",40.00471095,-83.02859368,edu,
+1d21e5beef23eecff6fff7d4edc16247f0fd984a,Face Recognition from Video Using the Generic Shape-Illumination Manifold,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+1d2dab7790303bbe7894d0ff08ecf87d57b1fbca,A codebook-free and annotation-free approach for fine-grained image categorization,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1d187e1d0e9eb874f85e3ecdb75ca0a7bd98d8bc,Aggression in young children with concurrent callous–unemotional traits: can the neurosciences,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+1da7d851c8d6761b4e1ab3e037596969a295ae50,Fast search in Hamming space with multi-index hashing,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+1da2431a799f68888b7e035fe49fe47a4735b71b,Leveraging Video Descriptions to Learn Video Question Answering,National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+1da2431a799f68888b7e035fe49fe47a4735b71b,Leveraging Video Descriptions to Learn Video Question Answering,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1dddfa634589e347648e79ae4e261af23553981e,Learning feed-forward one-shot learners,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1dddfa634589e347648e79ae4e261af23553981e,Learning feed-forward one-shot learners,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1dddfa634589e347648e79ae4e261af23553981e,Learning feed-forward one-shot learners,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1dddfa634589e347648e79ae4e261af23553981e,Learning feed-forward one-shot learners,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1dddfa634589e347648e79ae4e261af23553981e,Learning feed-forward one-shot learners,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1d0ee1069bd433b5f754d70517d2e0fcc519515c,Propagative Hough Voting for Human Activity Recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1d1603a1ec73a9a0ff972f3898c94eed2c741e51,Pose Guided Person Image Generation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1d82e7736268917cc3d87a2ee0896b03e02a5ff6,The Promise of Premise: Harnessing Question Premises in Visual Question Answering,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+1dc35905a1deff8bc74688f2d7e2f48fd2273275,Pedestrian detection: A benchmark,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+1d3d05e294bb522b653bc6d11cb92d5c4140e41b,"AI Oriented Large-Scale Video Management for Smart City: Technologies, Standards and Beyond",Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1d3d05e294bb522b653bc6d11cb92d5c4140e41b,"AI Oriented Large-Scale Video Management for Smart City: Technologies, Standards and Beyond",Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1d3d05e294bb522b653bc6d11cb92d5c4140e41b,"AI Oriented Large-Scale Video Management for Smart City: Technologies, Standards and Beyond",City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+1d19c6857e798943cd0ecd110a7a0d514c671fec,Do Deep Neural Networks Learn Facial Action Units When Doing Expression Recognition?,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1d1a7ef193b958f9074f4f236060a5f5e7642fc1,Ensemble of Patterns of Oriented Edge Magnitudes Descriptors For Face Recognition,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+1dede3e0f2e0ed2984aca8cd98631b43c3f887b9,A vote of confidence based interest point detector,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1da09ba7340c77b3f943c15f80ff40f6f9d14eeb,MRF-Based Background Initialisation for Improved Foreground Detection in Cluttered Surveillance Videos,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+1d696a1beb42515ab16f3a9f6f72584a41492a03,"Deeply learned face representations are sparse, selective, and robust",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1d696a1beb42515ab16f3a9f6f72584a41492a03,"Deeply learned face representations are sparse, selective, and robust",Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1d696a1beb42515ab16f3a9f6f72584a41492a03,"Deeply learned face representations are sparse, selective, and robust",Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1d1caaa2312390260f7d20ad5f1736099818d358,Resource-Allocating Codebook for patch-based face recognition,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,University of Delaware,University of Delaware,"University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA",39.68103280,-75.75401840,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+1dc241ee162db246882f366644171c11f7aed96d,Deep Action- and Context-Aware Sequence Learning for Activity Recognition and Anticipation,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+1d0128b9f96f4c11c034d41581f23eb4b4dd7780,Automatic construction Of robust spherical harmonic subspaces,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+1dbcf2181cb9890397c88c7cba20941af9019a20,Interpreting CNN Models for Apparent Personality Trait Regression,Universitat Oberta de Catalunya,Universitat Oberta de Catalunya,"Universitat Oberta de Catalunya, 156, Rambla del Poblenou, Provençals del Poblenou, Sant Martí, Barcelona, BCN, CAT, 08018, España",41.40657415,2.19453410,edu,
+1daf148a6d5d86e8cbe76a13311514f1338bdb0d,Image Inpainting via Generative Multi-column Convolutional Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1d5b030747bd836aebf7a00ed061a2f7bdf0a84c,Discriminative Pose-Free Descriptors for Face and Object Matching,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,A maximum entropy feature descriptor for age invariant face recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,A maximum entropy feature descriptor for age invariant face recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1d3dd9aba79a53390317ec1e0b7cd742cba43132,A maximum entropy feature descriptor for age invariant face recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1d0dcb458aa4d30b51f7c74b159be687f39120a0,Pose-Driven Deep Convolutional Model for Person Re-identification,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1d0dcb458aa4d30b51f7c74b159be687f39120a0,Pose-Driven Deep Convolutional Model for Person Re-identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+1d0dcb458aa4d30b51f7c74b159be687f39120a0,Pose-Driven Deep Convolutional Model for Person Re-identification,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+1d5aad4f7fae6d414ffb212cec1f7ac876de48bf,Face retriever: Pre-filtering the gallery via deep neural net,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+1db23a0547700ca233aef9cfae2081cd8c5a04d7,Comparative study and evaluation of various data classification techniques in data mining,Raipur Institute of Technology,Raipur Institute of Technology,"Raipur Institute of Technology, NH53, Raipur, Chhattisgarh, 492101, India",21.22622430,81.80136640,edu,
+1db23a0547700ca233aef9cfae2081cd8c5a04d7,Comparative study and evaluation of various data classification techniques in data mining,Raipur Institute of Technology,Raipur Institute of Technology,"Raipur Institute of Technology, NH53, Raipur, Chhattisgarh, 492101, India",21.22622430,81.80136640,edu,
+1d93f7de9f6d2daa77d844dd928aaa1e699ed312,Visual Concept Learning: Combining Machine Vision and Bayesian Generalization on Concept Hierarchies,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+1dfa8cce7b8dfd4b954d3fd90bef7bf569c87fb8,Robust Object Tracking in Crowd Dynamic Scenes Using Explicit Stereo Depth,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+1dfa8cce7b8dfd4b954d3fd90bef7bf569c87fb8,Robust Object Tracking in Crowd Dynamic Scenes Using Explicit Stereo Depth,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+1dea4f56c04d12abbc9e1ed7c48c7ccc09e7f5bb,How magic changes our expectations about autism.,Cardiff University,Cardiff University,"Cardiff University, Park Place, Castle, Cardiff, Wales, CF, UK",51.48799610,-3.17969747,edu,
+1d97735bb0f0434dde552a96e1844b064af08f62,Weber binary pattern and Weber ternary pattern for illumination-robust face recognition,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+1d81fe4a386a7d96b256eac41b99604cd132e019,Variational Walkback: Learning a Transition Operator as a Stochastic Recurrent Net,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1d630cc482f7a261738eb8b3b2021cf27c38370e,Robust Pedestrian Classification Based on Hierarchical Kernel Sparse Representation,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+1d630cc482f7a261738eb8b3b2021cf27c38370e,Robust Pedestrian Classification Based on Hierarchical Kernel Sparse Representation,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+1d8c720c95096981edcdfe57941123dca515eb34,Video-Based Person Re-identification by Deep Feature Guided Pooling,Beijing University of Technology,Beijing University of Technology,"北京工业大学, 银杏大道, 大郊亭村, 朝阳区 / Chaoyang, 北京市, 3208, 中国",39.87391435,116.47722285,edu,
+1d8c720c95096981edcdfe57941123dca515eb34,Video-Based Person Re-identification by Deep Feature Guided Pooling,University of Texas at San Antonio,University of Texas at San Antonio,"UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA",29.58333105,-98.61944505,edu,
+1dcf08c37fe2e8e78d3f1857547a965a0ac29526,2D ear classification based on unsupervised clustering,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+1d730a452a5c03cc23f90d4fde71c08864f31c35,Using Machine Learning for Identification of Art Paintings,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1d730a452a5c03cc23f90d4fde71c08864f31c35,Using Machine Learning for Identification of Art Paintings,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1ddacefa549de21f734f43016115ce7d54ab3d94,Supervised hashing with latent factor models,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1ddacefa549de21f734f43016115ce7d54ab3d94,Supervised hashing with latent factor models,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1ddacefa549de21f734f43016115ce7d54ab3d94,Supervised hashing with latent factor models,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+1ddacefa549de21f734f43016115ce7d54ab3d94,Supervised hashing with latent factor models,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1d9b6745c0fd793db6dda8975b498ca517961d25,Visual Reasoning with Natural Language,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+1df84bf495d15569258513f229325d922b91e045,Generalization Properties of hyper-RKHS and its Application to Out-of-Sample Extensions,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1df84bf495d15569258513f229325d922b91e045,Generalization Properties of hyper-RKHS and its Application to Out-of-Sample Extensions,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+1df84bf495d15569258513f229325d922b91e045,Generalization Properties of hyper-RKHS and its Application to Out-of-Sample Extensions,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+1dacc2f4890431d867a038fd81c111d639cf4d7e,Using social outcomes to inform decision-making in schizophrenia: Relationships with symptoms and functioning.,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+1d911007a6f2832e006773f247fad1f729d1c6ae,Parametric T-Spline Face Morphable Model for Detailed Fitting in Shape Subspace,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+1de690714f143a8eb0d6be35d98390257a3f4a47,Face detection using spectral histograms and SVMs,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+1d5f22e73aa0d8115af0be61fc8832de501f4a1b,Comparison of 3D Scanning Versus 2D Photography for the Identification of Facial Soft-Tissue Landmarks,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+1de800d988f32380c54e430636ebf8913eadcc98,Predicting Images using Convolutional Networks: Visual Scene Understanding with Pixel Maps,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+1d7df3df839a6aa8f5392310d46b2a89080a3c25,Large-Margin Softmax Loss for Convolutional Neural Networks,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+1d7df3df839a6aa8f5392310d46b2a89080a3c25,Large-Margin Softmax Loss for Convolutional Neural Networks,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+1d6c09019149be2dc84b0c067595f782a5d17316,Encoding Video and Label Priors for Multi-label Video Classification on YouTube-8M dataset,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+1d6c09019149be2dc84b0c067595f782a5d17316,Encoding Video and Label Priors for Multi-label Video Classification on YouTube-8M dataset,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+1d6c09019149be2dc84b0c067595f782a5d17316,Encoding Video and Label Priors for Multi-label Video Classification on YouTube-8M dataset,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+1d6c09019149be2dc84b0c067595f782a5d17316,Encoding Video and Label Priors for Multi-label Video Classification on YouTube-8M dataset,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+719969807953d7ea8bda0397b1aadbaa6e205718,Automatic Dataset Augmentation,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+719969807953d7ea8bda0397b1aadbaa6e205718,Automatic Dataset Augmentation,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+71b973c87965e4086e75fd2379dd1bd8e3f8231e,Progressive Attention Networks for Visual Attribute Prediction,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+71b07c537a9e188b850192131bfe31ef206a39a0,300 Faces In-The-Wild Challenge: database and results,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+71b07c537a9e188b850192131bfe31ef206a39a0,300 Faces In-The-Wild Challenge: database and results,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+71b07c537a9e188b850192131bfe31ef206a39a0,300 Faces In-The-Wild Challenge: database and results,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+7180cb0c2773be3c15cc2737fed0fe19b08e1538,Mapping the emotional face. How individual face parts contribute to successful emotion recognition,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+7180cb0c2773be3c15cc2737fed0fe19b08e1538,Mapping the emotional face. How individual face parts contribute to successful emotion recognition,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+71899cbc5b4b25b2e834919f58d7620484d7e848,Predicting Geo-informative Attributes in Large-Scale Image Collections Using Convolutional Neural Networks,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+7142ac9e4d5498037aeb0f459f278fd28dae8048,Semi-Supervised Learning for Optical Flow with Generative Adversarial Networks,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+71f36c8e17a5c080fab31fce1ffea9551fc49e47,Predicting Failures of Vision Systems,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+71559cae0bc89398e75a2f24674d61cb51909390,Relighting Humans : Occlusion-Aware Inverse Rendering for Full-Body Human Images,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+71559cae0bc89398e75a2f24674d61cb51909390,Relighting Humans : Occlusion-Aware Inverse Rendering for Full-Body Human Images,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+7117ed0be436c0291bc6fb6ea6db18de74e2464a,Spatial Transformations,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+71797806fb9685a9a743c84c9e859948f7c6a77b,Learning to Describe Differences Between Pairs of Similar Images,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+71bf455415f283dc70a2f0343fa8387acbf00fb2,Multimodal Generative Models for Scalable Weakly-Supervised Learning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+71bf455415f283dc70a2f0343fa8387acbf00fb2,Multimodal Generative Models for Scalable Weakly-Supervised Learning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+71644fab2275cfd6a8f770a26aba4e6228e85dec,Multi-View Discriminant Analysis,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+71e6a46b32a8163c9eda69e1badcee6348f1f56a,Visually Interpreting Names as Demographic Attributes by Exploiting Click-Through Data,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+713594c18978b965be87651bb553c28f8501df0a,Fast Proximal Linearized Alternating Direction Method of Multiplier with Parallel Splitting,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+713594c18978b965be87651bb553c28f8501df0a,Fast Proximal Linearized Alternating Direction Method of Multiplier with Parallel Splitting,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+714a420173f328999c3b81fb70ce85be925b725f,Accelerating Dynamic Programs via Nested Benders Decomposition with Application to Multi-Person Pose Estimation,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+718824256b4461d62d192ab9399cfc477d3660b4,Selecting Training Data for Cross-Corpus Speech Emotion Recognition: Prototypicality vs. Generalization,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+719b741280607f258707d102feeb53dacf00ff8b,RAID: a relation-augmented image descriptor,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+719b741280607f258707d102feeb53dacf00ff8b,RAID: a relation-augmented image descriptor,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+718d3137adba9e3078fa1f698020b666449f3336,Accuracy Based Feature Ranking Metric for Multi-Label Text Classification,University of Gujrat,University of Gujrat,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.63744845,74.16174558,edu,
+718d3137adba9e3078fa1f698020b666449f3336,Accuracy Based Feature Ranking Metric for Multi-Label Text Classification,University of Gujrat,University of Gujrat,"University of Gujrat, University Road, Chandhar, Gujrāt District, پنجاب, 50700, ‏پاکستان‎",32.63744845,74.16174558,edu,
+71354f47df241ad2e8b6c065f89f1c5afe077530,Eyemotion: Classifying facial expressions in VR using eye-tracking cameras,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+716d6c2eb8a0d8089baf2087ce9fcd668cd0d4c0,Pose-Robust 3D Facial Landmark Estimation from a Single 2D Image,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+7143518f847b0ec57a0ff80e0304c89d7e924d9a,Speeding-Up Age Estimation in Intelligent Demographics System via Network Optimization,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+7143518f847b0ec57a0ff80e0304c89d7e924d9a,Speeding-Up Age Estimation in Intelligent Demographics System via Network Optimization,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+71de9b3b8f482863d544da0f26ac2876b4fc210a,Who Are Raising Their Hands ? Hand-Raiser Seeking Based on Object Detection and Pose Estimation,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+71912976a7a4a5321b7e7ea20163fe3928cc5b71,Predicting When Saliency Maps are Accurate and Eye Fixations Consistent,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+71912976a7a4a5321b7e7ea20163fe3928cc5b71,Predicting When Saliency Maps are Accurate and Eye Fixations Consistent,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+713db3874b77212492d75fb100a345949f3d3235,Deep Semantic Face Deblurring,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+7150323712cb700a68e7365a9c627b55c2c262dc,SlideNet: Fast and Accurate Slide Quality Assessment Based on Deep Neural Networks,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+71969ee27916d545c63fe852946dd6dcc015d1a8,Who are the Devils Wearing Prada in New York City?,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+71969ee27916d545c63fe852946dd6dcc015d1a8,Who are the Devils Wearing Prada in New York City?,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+71969ee27916d545c63fe852946dd6dcc015d1a8,Who are the Devils Wearing Prada in New York City?,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+71969ee27916d545c63fe852946dd6dcc015d1a8,Who are the Devils Wearing Prada in New York City?,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+71969ee27916d545c63fe852946dd6dcc015d1a8,Who are the Devils Wearing Prada in New York City?,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89,Testing Separability and Independence of Perceptual Dimensions with General Recognition Theory: A Tutorial and New R Package (grtools),Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+715b69575dadd7804b4f8ccb419a3ad8b7b7ca89,Testing Separability and Independence of Perceptual Dimensions with General Recognition Theory: A Tutorial and New R Package (grtools),Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+715c7187b27b452424379254f5dc55909913b339,The Amazing Mysteries of the Gutter: Drawing Inferences Between Panels in Comic Book Narratives,"University of Colorado, Boulder","University of Colorado, Boulder","Naropa University, Arapahoe Avenue, The Hill, Boulder, Boulder County, Colorado, 80309, USA",40.01407945,-105.26695944,edu,
+715c7187b27b452424379254f5dc55909913b339,The Amazing Mysteries of the Gutter: Drawing Inferences Between Panels in Comic Book Narratives,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+71167cf519940a7373adc221401c396198763ab0,"Scenes-Objects-Actions: A Multi-task, Multi-label Video Dataset",Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+71e56f2aebeb3c4bb3687b104815e09bb4364102,Video Co-segmentation for Meaningful Action Extraction,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+710c30c6c05ad1c9c0858f42364e9ca3f8e70bb4,"Classification of Land Use on Sand-Dune Topography by Object-Based Analysis, Digital Photogrammetry, and GIS Analysis in the Horqin Sandy Land, China",University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+710c30c6c05ad1c9c0858f42364e9ca3f8e70bb4,"Classification of Land Use on Sand-Dune Topography by Object-Based Analysis, Digital Photogrammetry, and GIS Analysis in the Horqin Sandy Land, China",Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+710c30c6c05ad1c9c0858f42364e9ca3f8e70bb4,"Classification of Land Use on Sand-Dune Topography by Object-Based Analysis, Digital Photogrammetry, and GIS Analysis in the Horqin Sandy Land, China",University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+7127f9e9a51236f213c5b7805be8714a3bcbfc28,Cross-Domain Self-supervised Multi-task Feature Learning using Synthetic Imagery,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+71b038958df0b7855fc7b8b8e7dcde8537a7c1ad,Kernel Methods for Unsupervised Domain Adaptation by Boqing Gong,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+71ed20748c919cd261024b146992ced4c9c2157b,Learning Semantic Patterns with Discriminant Localized Binary Projections,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+71ed20748c919cd261024b146992ced4c9c2157b,Learning Semantic Patterns with Discriminant Localized Binary Projections,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+71ed20748c919cd261024b146992ced4c9c2157b,Learning Semantic Patterns with Discriminant Localized Binary Projections,Beckman Institute,Beckman Institute,"Beckman Institute, The Presidents' Walk, Urbana, Champaign County, Illinois, 61801-2341, USA",40.11571585,-88.22750772,edu,
+71f9bed14188d861f248fb426a26a3a0b400843a,Robot-Centric Activity Prediction from First-Person Videos: What Will They Do to Me',California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+71f9bed14188d861f248fb426a26a3a0b400843a,Robot-Centric Activity Prediction from First-Person Videos: What Will They Do to Me',University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+71392858b6af5b50b1cd7c740560697101f60e46,Classifiers Combination for Improved Motion Segmentation,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+71cbe1b52e2fdb8fa8a8278eb590f8065d3e7fcb,’ Actions dans des Vidéos Réalistes Structured Models for Action Recognition in Real-world Videos,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+719e58a8b256cdcc88f7980e4798fe8e6aa1a808,Confidence Intervals for Tracking Performance Scores,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+767936728b07238bbf38661fc3c2000d0c17b598,An Own-Age Bias in Recognizing Faces with Horizontal Information,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+76f7664511917bb575081ad3555e383de54562f1,'Lighter' Can Still Be Dark: Modeling Comparative Color Descriptions,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+76f7664511917bb575081ad3555e383de54562f1,'Lighter' Can Still Be Dark: Modeling Comparative Color Descriptions,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+760a8a46089ca9fc7d06ea44b207b948569237ba,Learning a Deep Embedding Model for Zero-Shot Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+76b6577f47d6782bf75aca04e361a7b7381b4a84,Measuring and Modifying the Intrinsic Memorability of Images,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+76b5ce50ab603a6d175fd21f4b1404dff3c897c2,Adult Image and Video Recognition by a Deep Multicontext Network and Fine-to-Coarse Strategy,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+76b5ce50ab603a6d175fd21f4b1404dff3c897c2,Adult Image and Video Recognition by a Deep Multicontext Network and Fine-to-Coarse Strategy,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+76a2846d17521149a118fd54083f8a51646e2804,Context-aware Deep Feature Compression for High-speed Visual Tracking,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+76a2846d17521149a118fd54083f8a51646e2804,Context-aware Deep Feature Compression for High-speed Visual Tracking,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+76a2846d17521149a118fd54083f8a51646e2804,Context-aware Deep Feature Compression for High-speed Visual Tracking,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+7643861bb492bf303b25d0306462f8fb7dc29878,Speeding up 2D-warping for pose-invariant face recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+76229d9e68bca10f3876f351856d6911857be827,Robust features for facial action recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+76229d9e68bca10f3876f351856d6911857be827,Robust features for facial action recognition,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+76cd878c37bcdb8b3ae678e96c9b7700184ddb46,Athlete Pose Estimation from Monocular TV Sports Footage,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+76cd878c37bcdb8b3ae678e96c9b7700184ddb46,Athlete Pose Estimation from Monocular TV Sports Footage,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+76cd878c37bcdb8b3ae678e96c9b7700184ddb46,Athlete Pose Estimation from Monocular TV Sports Footage,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+76b227facbcd75cda35cb5bb8063d8d5cfcec4d0,Expression Recognition Using the Periocular Region: A Feasibility Study,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+76b227facbcd75cda35cb5bb8063d8d5cfcec4d0,Expression Recognition Using the Periocular Region: A Feasibility Study,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+76ad6daa899a8657c9c17480e5fc440fda53acec,A Multi-Task Deep Network for Person Re-Identification,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+7697c8a0eea8b4f7e9b5c3378879cf34ba6d79b3,On Decomposing an Unseen 3D Face into Neutral Face and Expression Deformations,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+76ce3d35d9370f0e2e27cfd29ea0941f1462895f,Efficient Parallel Implementation of Active Appearance Model Fitting Algorithm on GPU,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+76616a2709c03ade176db31fa99c7c61970eba28,Learning Heterogeneous Dictionary Pair with Feature Projection Matrix for Pedestrian Video Retrieval via Single Query Image,Wuhan University of Technology,Wuhan University of Technology,"武汉理工大学-余家头校区, 交通二路, 杨园街道, 武昌区 (Wuchang), 武汉市, 湖北省, 430062, 中国",30.60903415,114.35142840,edu,
+76616a2709c03ade176db31fa99c7c61970eba28,Learning Heterogeneous Dictionary Pair with Feature Projection Matrix for Pedestrian Video Retrieval via Single Query Image,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+76616a2709c03ade176db31fa99c7c61970eba28,Learning Heterogeneous Dictionary Pair with Feature Projection Matrix for Pedestrian Video Retrieval via Single Query Image,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+76616a2709c03ade176db31fa99c7c61970eba28,Learning Heterogeneous Dictionary Pair with Feature Projection Matrix for Pedestrian Video Retrieval via Single Query Image,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+768c332650a44dee02f3d1d2be1debfa90a3946c,Bayesian face recognition using support vector machine and face clustering,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+769461ff717d987482b28b32b1e2a6e46570e3ff,MIC-TJU in MediaEval 2017 Emotional Impact of Movies Task,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+7605857f551d128e7c3babfc019950250f81bca9,Reciprocal Attention Fusion for Visual Question Answering,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+765094fa3cd745bed29c20dad92dbed8c4cfebea,Applying Biometric Principles to Avatar Recognition,University of Calgary,University of Calgary,"University of Calgary, Service Tunnel, University Heights, Calgary, Alberta, T2N 1N7, Canada",51.07840380,-114.12870770,edu,
+765094fa3cd745bed29c20dad92dbed8c4cfebea,Applying Biometric Principles to Avatar Recognition,University of Louisville,University of Louisville,"University of Louisville, South Brook Street, Louisville, Jefferson County, Kentucky, 40208, USA",38.21675650,-85.75725023,edu,
+76e48cd3b4b25cdb6c094ff660ed8e43be1e2f34,What-and-Where to Match: Deep Spatially Multiplicative Integration Networks for Person Re-identification,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+76d955e83b1d64de95f37336322cbbca0019e3b2,Robust and Efficient Subspace Segmentation via Least Squares Regression,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+76d955e83b1d64de95f37336322cbbca0019e3b2,Robust and Efficient Subspace Segmentation via Least Squares Regression,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+76d955e83b1d64de95f37336322cbbca0019e3b2,Robust and Efficient Subspace Segmentation via Least Squares Regression,Hefei University of Technology,Hefei University of Technology,"合肥工业大学(屯溪路校区), 193号, 南一环路, 航运南村, 包公街道, 合肥市区, 合肥市, 安徽省, 230009, 中国",31.84691800,117.29053367,edu,
+76d955e83b1d64de95f37336322cbbca0019e3b2,Robust and Efficient Subspace Segmentation via Least Squares Regression,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+765263556ce90c5c0d86d3e6d8a21e04a307b60d,Comparing Visual Feature Coding for Learning Disjoint Camera Dependencies,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+764882e6779fbee29c3d87e00302befc52d2ea8d,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+764882e6779fbee29c3d87e00302befc52d2ea8d,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+764882e6779fbee29c3d87e00302befc52d2ea8d,Deep Approximately Orthogonal Nonnegative Matrix Factorization for Clustering,Guangdong University of Technology,Guangdong University of Technology,"广东工业大学, 东风东路, 黄花岗街道, 越秀区 (Yuexiu), 广州市, 广东省, 510080, 中国",23.13538360,113.29470496,edu,
+76d939f73a327bf1087d91daa6a7824681d76ea1,A Thermal Facial Emotion Database and Its Analysis,Japan Advanced Institute of Science and Technology,Japan Advanced Institute of Science and Technology,"JAIST (北陸先端科学技術大学院大学), 石川県道55号小松辰口線, Ishikawa Science Park, 能美市, 石川県, 中部地方, 923-1206, 日本",36.44429490,136.59285870,edu,
+7673d5fa77770629d040fae54c214c60ba69574c,Moving Object Detection from Mobile Platforms Using Stereo Data Registration,University of the Basque Country,University of the Basque Country,"Euskal Herriko Unibertsitatea, Ibaeta Campusa, Paseo Arriola pasealekua, Ibaeta, Donostia/San Sebastián, Donostialdea, Gipuzkoa, Euskadi, 20008, España",43.30927695,-2.01066785,edu,
+76fd59062e563353097694d38855e94efbd53143,3D Face Reconstruction from Light Field Images: A Model-Free Approach,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+76fd59062e563353097694d38855e94efbd53143,3D Face Reconstruction from Light Field Images: A Model-Free Approach,Hunan University,Hunan University,"Yejin University for Employees, 冶金西路, 和平乡, 珠晖区, 衡阳市 / Hengyang, 湖南省, 中国",26.88111275,112.62850666,edu,
+766039c203f76009c5efabe7b24914cc66fe117f,"""BAM!"" Depth-Based Body Analysis in Critical Care",Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+76615d7bc69ef0e50338a8c3e59c75d361ef0db4,Learning a compact latent representation of the Bag-of-Parts model,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+7688187b1ce5cbb1413d075f435ff294ba09cadc,Robust Precise Eye Location by Adaboost and SVM Techniques,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+1c9a61c8ec255d033201fb9b394b283a6b6acacc,Structured Feature Learning for Pose Estimation,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1cf53b650c4a3e212bd6f25e3c9fe8c757862a7d,Human Pose Estimation via Convolutional Part Heatmap Regression,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+1cc902dc999103c8ed27559affa5cdaed6fc2c38,Analysing comparative soft biometrics from crowdsourced annotations,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+1c6e067098fa86ee3f96365f28669b06f9ce0c7a,Object Detection from Video Tubelets with Convolutional Neural Networks,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1c05dc0f73f424561c488a282c711827047459c4,Supervised trace lasso for robust face recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1c6690ab404b23d5026dd3ad0c7a49ce2875c1b3,Anchors: High-Precision Model-Agnostic Explanations,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+1c6690ab404b23d5026dd3ad0c7a49ce2875c1b3,Anchors: High-Precision Model-Agnostic Explanations,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+1c6690ab404b23d5026dd3ad0c7a49ce2875c1b3,Anchors: High-Precision Model-Agnostic Explanations,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+1c9efb6c895917174ac6ccc3bae191152f90c625,Unifying Identification and Context Learning for Person Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1c2724243b27a18a2302f12dea79d9a1d4460e35,Fisher+Kernel criterion for discriminant analysis,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1c2724243b27a18a2302f12dea79d9a1d4460e35,Fisher+Kernel criterion for discriminant analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1c2724243b27a18a2302f12dea79d9a1d4460e35,Fisher+Kernel criterion for discriminant analysis,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+1ca8c09abb73a02519d8db77e4fe107acfc589b6,Automatic Understanding of Image and Video Advertisements,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+1cf4abbd052c94e63557b7922f7a5fc7e22c6e3f,Multimodal Similarity-Preserving Hashing,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+1c4ceae745fe812d8251fda7aad03210448ae25e,Optimization of Color Conversion for Face Recognition,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+1c4ceae745fe812d8251fda7aad03210448ae25e,Optimization of Color Conversion for Face Recognition,Virginia Polytechnic Institute and State University,Virginia Polytechnic Institute and State University,"Virginia Polytechnic Institute and State University, Duck Pond Drive, Blacksburg, Montgomery County, Virginia, 24061-9517, USA",37.21872455,-80.42542519,edu,
+1c1aa29b709370f78cc485b14c18b89a53229b62,Topological Data Mapping for Improved Generalization Capabilities using Counter Propagation Networks,Akita Prefectural University,Akita Prefectural University,"秋田県立大学, 秋田天王線, 潟上市, 秋田県, 東北地方, 011-0946, 日本",39.80114990,140.04591160,edu,
+1c408790a7bc5cc8b0c2e23668ad326d0ccbebd4,Automatic detection of pain intensity,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1c408790a7bc5cc8b0c2e23668ad326d0ccbebd4,Automatic detection of pain intensity,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+1cee993dc42626caf5dbc26c0a7790ca6571d01a,Optimal illumination for image and video relighting,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+1c71e653f86b06eb7d5b1d92694f34e6f57173de,Enhanced Attacks on Defensively Distilled Deep Neural Networks,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+1c147261f5ab1b8ee0a54021a3168fa191096df8,Face Recognition across Time Lapse Using Convolutional Neural Networks,George Mason University,George Mason University,"George Mason University, Aquia Creek Lane, Country Club View, Blue Oaks, Fairfax County, Virginia, 22030-9998, USA",38.83133325,-77.30798839,edu,
+1c17450c4d616e1e1eece248c42eba4f87de9e0d,Automatic Age Estimation from Face Images via Deep Ranking,Institute of Information Science,Institute of Information Science,"資訊科學研究所, 數理大道, 中研里, 南港子, 南港區, 臺北市, 11574, 臺灣",25.04107280,121.61475620,edu,
+1c686359a30e68183d1b23e069c56a7c0b1fdae3,3D Human Pose Estimation from Monocular Images with Deep Convolutional Neural Network,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+1c41965c5e1f97b1504c1bdde8037b5e0417da5e,Interaction-aware Spatio-temporal Pyramid Attention Networks for Action Classification,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a,Object Detection and Viewpoint Estimation with Auto-masking Neural Network,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1c1f21bf136fe2eec412e5f70fd918c27c5ccb0a,Object Detection and Viewpoint Estimation with Auto-masking Neural Network,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+1cbd3f96524ca2258fd2d5c504c7ea8da7fb1d16,Fusion of Audio-visual Features using Hierarchical Classifier Systems for the Recognition of Affective States and the State of Depression,Ulm University,Ulm University,"HNU, John-F.-Kennedy-Straße, Vorfeld, Wiley, Neu-Ulm, Landkreis Neu-Ulm, Schwaben, Bayern, 89231, Deutschland",48.38044335,10.01010115,edu,
+1cad5d682393ffbb00fd26231532d36132582bb4,"ZHENHENG YANG, JIYANG GAO, RAM NEVATIA: SPATIO-TEMPORAL ACTION DETECTION WITH CASCADE PROPOSAL AND LOCATION ANTICIPATION1 Spatio-Temporal Action Detection with Cascade Proposal and Location Anticipation",University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+1c2802c2199b6d15ecefe7ba0c39bfe44363de38,Personalizing Human Video Pose Estimation,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+1c2802c2199b6d15ecefe7ba0c39bfe44363de38,Personalizing Human Video Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1c2802c2199b6d15ecefe7ba0c39bfe44363de38,Personalizing Human Video Pose Estimation,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+1c2802c2199b6d15ecefe7ba0c39bfe44363de38,Personalizing Human Video Pose Estimation,University of Leeds,University of Leeds,"University of Leeds, Inner Ring Road, Woodhouse, Leeds, Yorkshire and the Humber, England, LS2 9NS, UK",53.80387185,-1.55245712,edu,
+1c2802c2199b6d15ecefe7ba0c39bfe44363de38,Personalizing Human Video Pose Estimation,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1c42f8ab39e22225ffd3222baeba4863435220a0,Differentiable Learning-to-Normalize via Switchable Normalization,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1c1a98df3d0d5e2034ea723994bdc85af45934db,Guided Unsupervised Learning of Mode Specific Models for Facial Point Detection in the Wild,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+1ca815327e62c70f4ee619a836e05183ef629567,Global supervised descent method,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1c8d585fb7e82abf43f45014494018a843774d2b,Consistent Iterative Multi-view Transfer Learning for Person Re-identification,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+1c8d585fb7e82abf43f45014494018a843774d2b,Consistent Iterative Multi-view Transfer Learning for Person Re-identification,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+1c8d585fb7e82abf43f45014494018a843774d2b,Consistent Iterative Multi-view Transfer Learning for Person Re-identification,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+1c028833faf11dd565c749741eb97ce811b490de,Person re-identification by probabilistic relative distance comparison,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+1c028833faf11dd565c749741eb97ce811b490de,Person re-identification by probabilistic relative distance comparison,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+1c7a050394371bcb064868dfe681ff4c29ce2101,Expressive Models and Comprehensive Benchmark for 2D Human Pose Estimation,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1c7a050394371bcb064868dfe681ff4c29ce2101,Expressive Models and Comprehensive Benchmark for 2D Human Pose Estimation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1cfca6b71b0ead87bbb79a8614ddec3a10100faa,Are screening methods useful in feature selection? An empirical study,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+1c2db743b37306e50c4234da53510c113f50f9ff,Exploring Weak Stabilization for Motion Feature Extraction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1c2db743b37306e50c4234da53510c113f50f9ff,Exploring Weak Stabilization for Motion Feature Extraction,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1caac27548cc7f98380e4e95ccbc8e6e164489c8,Human Pose Estimation Using Deep Consensus Voting,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,National Institutes of Health,National Institutes of Health,"NIH, Pooks Hill, Bethesda, Montgomery County, Maryland, USA",39.00041165,-77.10327775,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1cea72fb523432d80b77224433d57828da44828c,Distinct contributions of functional and deep neural network features to representational similarity of scenes in human brain and behavior,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1c99e412666d63e46e1c6606841837d3c18f48e6,Unsupervised object learning from dense equivariant image labelling,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1cd3250280a0703ba57bbc357287a7213f901b7e,Learning spatio-temporal models of facial expressions,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+1c530de1a94ac70bf9086e39af1712ea8d2d2781,Sparsity Conditional Energy Label Distribution Learning for Age Estimation,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+82f8652c2059187b944ce65e87bacb6b765521f6,Discriminative Object Categorization with External Semantic Knowledge,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+823db29d4c2a79e309ad2b394a4aaa83d9e15284,Online Multi-Object Tracking by Quadratic Pseudo-Boolean Optimization,"National University of Defense Technology, China","National University of Defence Technology, Changsha 410000, China","国防科学技术大学, 三一大道, 开福区, 开福区 (Kaifu), 长沙市 / Changsha, 湖南省, 410073, 中国",28.22902090,112.99483204,edu,
+823db29d4c2a79e309ad2b394a4aaa83d9e15284,Online Multi-Object Tracking by Quadratic Pseudo-Boolean Optimization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+82d446206a3e9afba7e5b8c112227df681ef422a,Super-resolution from internet-scale scene matching,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+82d446206a3e9afba7e5b8c112227df681ef422a,Super-resolution from internet-scale scene matching,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+82729f984c514bd0a5157c28b75ff0236d609384,Deep Feature Flow for Video Recognition,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+82729f984c514bd0a5157c28b75ff0236d609384,Deep Feature Flow for Video Recognition,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+828cfe547a2c9719dea68698dfa168b0bdd22aed,Max-margin transforms for visual domain adaptation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+8233c1d79ddad9d969b995d4ef2c6f8ea9acc646,Hamiltonian Streamline Guided Feature Extraction with Applications to Face Detection,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+8233c1d79ddad9d969b995d4ef2c6f8ea9acc646,Hamiltonian Streamline Guided Feature Extraction with Applications to Face Detection,University at Buffalo,State University of New York at Buffalo,"Buffalo, NY 14260, USA",43.00080930,-78.78896970,edu,
+824d1db06e1c25f7681e46199fd02cb5fc343784,Representing Relative Visual Attributes with a Reference-Point-Based Decision Model,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+824d1db06e1c25f7681e46199fd02cb5fc343784,Representing Relative Visual Attributes with a Reference-Point-Based Decision Model,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+82ccd62f70e669ec770daf11d9611cab0a13047e,Sparse Variation Pattern for Texture Classification,Tafresh University,Tafresh University,"دانشگاه تفرش, پاسداران, خرازان, بخش مرکزی, شهرستان تفرش, استان مرکزی, ‏ایران‎",34.68092465,50.05341352,edu,
+82ccd62f70e669ec770daf11d9611cab0a13047e,Sparse Variation Pattern for Texture Classification,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+82ccd62f70e669ec770daf11d9611cab0a13047e,Sparse Variation Pattern for Texture Classification,Azad University,Azad University,"پل دانشگاه آزاد, باراجین, پونک ۳, قزوین, بخش مرکزی, شهرستان قزوین, استان قزوین, ‏ایران‎",36.31734320,50.03672860,edu,
+82d9296eb2edc12f6cb830fba78d5bf9469a94b9,Pedestrian Detection Based on Sparse and Low-Rank Matrix Decomposition,Jiangsu University,Jiangsu University,"江苏大学, 301, 学府路, 京口区, 象山街道, 京口区 (Jingkou), 镇江市 / Zhenjiang, 江苏省, 212013, 中国",32.20302965,119.50968362,edu,
+829ddf932d7164ebc915095a4a94471049825410,Towards Around-Device Interaction using Corneal Imaging,Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.95196483,edu,
+829ddf932d7164ebc915095a4a94471049825410,Towards Around-Device Interaction using Corneal Imaging,Coburg University,Coburg University,"Hochschule für angewandte Wissenschaften Coburg, 2, Friedrich-Streib-Straße, Callenberg, Coburg, Oberfranken, Bayern, 96450, Deutschland",50.26506145,10.95196483,edu,
+82766851c790a5225f3b932239e831e1b60f5ee7,Combining motion and appearance for gender classification from video sequences,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+826f9e286eb0c9165c04bc5811aa7793050c7666,Joint regularized nearest points for image set based face recognition,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+826f9e286eb0c9165c04bc5811aa7793050c7666,Joint regularized nearest points for image set based face recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+828a7b3122ebd5b8b0c617902bc04ac5a6c60240,"Show, Adapt and Tell: Adversarial Training of Cross-Domain Image Captioner",National Tsing Hua University,National Tsing Hua University,"國立清華大學, 101, 克恭橋, 光明里, 赤土崎, 東區, 新竹市, 30013, 臺灣",24.79254840,120.99511830,edu,
+828a7b3122ebd5b8b0c617902bc04ac5a6c60240,"Show, Adapt and Tell: Adversarial Training of Cross-Domain Image Captioner",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+82d82272b365028294662ede914caf64e73495fb,Real-time Driver Drowsiness Detection for Android Application Using Deep Neural Networks Techniques,Qatar University,Qatar University,"Qatar University, Roindabout 3, Al Tarfa (68), أم صلال, 24685, ‏قطر‎",25.37461295,51.48980354,edu,
+82d82272b365028294662ede914caf64e73495fb,Real-time Driver Drowsiness Detection for Android Application Using Deep Neural Networks Techniques,State University of New Jersey,The State University of New Jersey,"Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA",40.51865195,-74.44099801,edu,
+826f1ac8ef16abd893062fdf5058a09881aed516,Identity-Preserving Face Recovery from Portraits,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+821864bf264f924ac7d63c02ad3fdfff3cefd990,Guide Me: Interacting with Deep Networks,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+823a47273e0e6101be67858f5c5f08e235f2d58a,Access to Awareness for Faces during Continuous Flash Suppression Is Not Modulated by Affective Knowledge.,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+823a47273e0e6101be67858f5c5f08e235f2d58a,Access to Awareness for Faces during Continuous Flash Suppression Is Not Modulated by Affective Knowledge.,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+82a7bdc2ca2ba706446fb1b1c8696e0d0d7cc8d0,AUTOMATIC DETECTION AND INTENSITY ESTIMATION OF SPONTANEOUS SMILES by,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+82a7bdc2ca2ba706446fb1b1c8696e0d0d7cc8d0,AUTOMATIC DETECTION AND INTENSITY ESTIMATION OF SPONTANEOUS SMILES by,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+822c7bfebcc456e3598304f69eb8f4a2aee46f02,Robust Degraded Face Recognition Using Enhanced Local Frequency Descriptor and Multi-scale Competition,Shanghai University,Shanghai University,"上海大学, 锦秋路, 大场镇, 宝山区 (Baoshan), 上海市, 201906, 中国",31.32235655,121.38400941,edu,
+820e4727827646c79a9a5d862c510d26be5356f1,"Gaze Behaviour , Motivational Factors , and Knowledge Sharing Completed Research Paper",Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+820e4727827646c79a9a5d862c510d26be5356f1,"Gaze Behaviour , Motivational Factors , and Knowledge Sharing Completed Research Paper",Monash University,Monash University,"Monash University, Mile Lane, Parkville, City of Melbourne, Victoria, 3000, Australia",-37.78397455,144.95867433,edu,
+82b43bc9213230af9db17322301cbdf81e2ce8cc,Attention-Set based Metric Learning for Video Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+82e66c4832386cafcec16b92ac88088ffd1a1bc9,OpenFace: A general-purpose face recognition library with mobile applications,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+82e66c4832386cafcec16b92ac88088ffd1a1bc9,OpenFace: A general-purpose face recognition library with mobile applications,Poznan University of Technology,Poznan University of Technology,"Dom Studencki nr 3, 3, Kórnicka, Święty Roch, Rataje, Poznań, wielkopolskie, 61-141, RP",52.40048370,16.95158083,edu,
+8241008f9d3d5e866f648eb454db2054202121ef,Heterogeneous Multi-task Learning for Human Pose Estimation with Deep Convolutional Neural Network,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+8241008f9d3d5e866f648eb454db2054202121ef,Heterogeneous Multi-task Learning for Human Pose Estimation with Deep Convolutional Neural Network,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+8241008f9d3d5e866f648eb454db2054202121ef,Heterogeneous Multi-task Learning for Human Pose Estimation with Deep Convolutional Neural Network,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+82eb267b8e86be0b444e841b4b4ed4814b6f1942,Single Image 3D Interpreter Network,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+82eb267b8e86be0b444e841b4b4ed4814b6f1942,Single Image 3D Interpreter Network,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+823e57c126124254cf96c723fe1bace505271220,Dorsal stream: from algorithm to neuroscience,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+4996e64c24cef33d0f7e5a2b1c3baf00e51493e6,Deep Relative Distance Learning: Tell the Difference between Similar Vehicles,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+499f1d647d938235e9186d968b7bb2ab20f2726d,Face Recognition via Archetype Hull Ranking,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+499f1d647d938235e9186d968b7bb2ab20f2726d,Face Recognition via Archetype Hull Ranking,IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+49ecf784afddf7d5cf31c90340eef9380c261f04,FACSCaps : Pose-Independent Facial Action Coding with Capsules,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+49ecf784afddf7d5cf31c90340eef9380c261f04,FACSCaps : Pose-Independent Facial Action Coding with Capsules,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4910c4d7eea372034339f21141550f6d7cb28665,Look Deeper into Depth: Monocular Depth Estimation with Semantic Booster and Attention-Driven Loss,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+4910c4d7eea372034339f21141550f6d7cb28665,Look Deeper into Depth: Monocular Depth Estimation with Semantic Booster and Attention-Driven Loss,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+49ac9738b551d0f8d9c64d5b6e8b08c69e3b0421,3D Pictorial Structures for Multiple View Articulated Pose Estimation,"KTH Royal Institute of Technology, Stockholm","KTH Royal Institute of Technology, Stockholm","KTH, Teknikringen, Lärkstaden, Norra Djurgården, Östermalms stadsdelsområde, Sthlm, Stockholm, Stockholms län, Svealand, 114 28, Sverige",59.34986645,18.07063213,edu,
+497d46649af7dab664cdb9d47242df6dc06b1a48,Integral Channel Features,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+497d46649af7dab664cdb9d47242df6dc06b1a48,Integral Channel Features,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+495fa8f7d9d0e4d472c49de34a9d17343668f4a4,Automatic Event Detection for Signal-based Surveillance,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+4990824c5ff6c993e0697e272026438c4a05c3d5,Innovative Sparse Representation Algorithms for Robust Face Recognition,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+4990824c5ff6c993e0697e272026438c4a05c3d5,Innovative Sparse Representation Algorithms for Robust Face Recognition,Curtin University,Curtin University,"Curtin University, Brand Drive, Waterford, Perth, Western Australia, 6102, Australia",-32.00686365,115.89691775,edu,
+49dcfbcb88139e4432cc0d3cfdd91af30f4d53dc,Multi-camera multi-object tracking by robust hough-based homography projections,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+49f70f707c2e030fe16059635df85c7625b5dc7e,Face recognition under illumination variations based on eight local directional patterns,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+4931562044a691fe41b638550b54a0a689674e83,Incorporating On-demand Stereo for Real Time Recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+4934fd234db9a095e51d36e738e706886d1dfa0a,Robust Clustering as Ensembles of Affinity Relations,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+4934fd234db9a095e51d36e738e706886d1dfa0a,Robust Clustering as Ensembles of Affinity Relations,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+49512d11c468dc2fe3fe832d8c4dc8e0a01b0a4b,The Long-Short Story of Movie Description,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+49820ae612b3c0590a8a78a725f4f378cb605cd1,Evaluation of Smile Detection Methods with Images in Real-World Scenarios,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+4948c1791412cf13b770d922399c625527b51a6f,Face Recognition Techniques and Approaches: a Survey,"COMSATS Institute of Information Technology, Lahore",COMSATS Institute of Information Technology,"COMSATS Institute of Information Technology, Ali Akbar Road, Dawood Residency, بحریہ ٹاؤن‬‎, Lahore District, پنجاب, 54700, ‏پاکستان‎",31.40063320,74.21372960,edu,
+49e975a4c60d99bcc42c921d73f8d89ec7130916,Human and computer recognition of facial expressions of emotion,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4960ab1cef23e5ccd60173725ea280f462164a0e,Video Object Segmentation by Learning Location-Sensitive Embeddings,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+4960ab1cef23e5ccd60173725ea280f462164a0e,Video Object Segmentation by Learning Location-Sensitive Embeddings,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+4950ae0e837657b611113e219bf848f0c657dcf9,Efficient Unsupervised Learning for Localization and Detection in Object Categories,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+4950ae0e837657b611113e219bf848f0c657dcf9,Efficient Unsupervised Learning for Localization and Detection in Object Categories,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+49e541e0bbc7a082e5c952fc70716e66e5713080,Group expression intensity estimation in videos via Gaussian Processes,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,A Deep Sum-Product Architecture for Robust Facial Attributes Analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,A Deep Sum-Product Architecture for Robust Facial Attributes Analysis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+4934d44aa89b6d871eb6709dd1d1eebf16f3aaf1,A Deep Sum-Product Architecture for Robust Facial Attributes Analysis,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+49a2f3262958465c8cfd5a59bc0f9f4effd1936b,Global Semantic Consistency for Zero-Shot Learning,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+49a2f3262958465c8cfd5a59bc0f9f4effd1936b,Global Semantic Consistency for Zero-Shot Learning,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+49396502143e920f7208bfd27202d6fead39992f,Dense Semantic and Topological Correspondence of 3D Faces without Landmarks,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+49396502143e920f7208bfd27202d6fead39992f,Dense Semantic and Topological Correspondence of 3D Faces without Landmarks,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+49ead21ec51e7df53583ef5ade06606c8a75dfb8,Hallucinating Very Low-Resolution Unaligned and Noisy Face Images by Transformative Discriminative Autoencoders,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,
+497243ed80033921c3c82c278780381a7d9d783e,Think Visually: Question Answering through Virtual Imagery,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+49ae4afe91239a8259dc0c390179d47bc395beda,Saliency Based Opportunistic Search for Object Part Extraction and Labeling,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+495015d21c26eac9a6bd64c836ee3370283641ec,VisKE: Visual knowledge extraction and question answering by visual verification of relation phrases,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+499f2b005e960a145619305814a4e9aa6a1bba6a,Robust human face recognition based on locality preserving sparse over complete block approximation,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+49e2c1bae80e6b75233348102dc44671ee52b548,Age and gender recognition using informative features of various types,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+497bf2df484906e5430aa3045cf04a40c9225f94,Hierarchical Recognition Scheme for Human Facial Expression Recognition Systems,Kyung Hee University,Kyung Hee University,"Kyung Hee Tae Kwon Do, Vons 2370 Truck Service Ramp, University City, San Diego, San Diego County, California, 92122, USA",32.85363330,-117.20352860,edu,
+497bf2df484906e5430aa3045cf04a40c9225f94,Hierarchical Recognition Scheme for Human Facial Expression Recognition Systems,Ajou University,Ajou University,"아주대학교, 성호대교, 이의동, 영통구, 수원시, 경기, 16499, 대한민국",37.28300030,127.04548469,edu,
+492f41e800c52614c5519f830e72561db205e86c,A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+492f41e800c52614c5519f830e72561db205e86c,A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+492f41e800c52614c5519f830e72561db205e86c,A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+4953da81a1a93ab3a30152d4403c5e8fa79edc09,Fast obstacle detection using targeted optical flow,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+4953da81a1a93ab3a30152d4403c5e8fa79edc09,Fast obstacle detection using targeted optical flow,Carleton University,Carleton University,"Carleton University, 1125, Colonel By Drive, Billings Bridge, Capital, Ottawa, Ontario, K1S 5B7, Canada",45.38608430,-75.69539267,edu,
+49435aab7cdf259335725acc96691f755e436f55,A database for fine grained activity detection of cooking activities,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+493ec9e567c5587c4cbeb5f08ca47408ca2d6571,Combining graph embedding and sparse regression with structure low-rank representation for semi-supervised learning,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+49570b41bd9574bd9c600e24b269d945c645b7bd,A Framework for Performance Evaluation of Face Recognition Algorithms,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+49f01ad8e60882d0f3c450345251b6c6b499c3a2,Cryptic Emotions and the Emergence of a Metatheory of Mind in Popular Filmmaking,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+4946ba10a4d5a7d0a38372f23e6622bd347ae273,RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+40883844c1ceab95cb92498a92bfdf45beaa288e,Improving Heterogeneous Face Recognition with Conditional Adversarial Networks,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+40b2652cf3bdee159dacb6e18c761003c31f4205,Database Learning: Toward a Database that Becomes Smarter Every Time,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+403ecc19291f21db6084db5c12f428e2af91ed3d,Semi-Supervised Classification Based on Mixture Graph,Southwest University,Southwest University,"西南大学, 天生路, 北碚区 (Beibei), 北碚区, 北碚区 (Beibei), 重庆市, 400711, 中国",29.82366295,106.42050016,edu,
+403ecc19291f21db6084db5c12f428e2af91ed3d,Semi-Supervised Classification Based on Mixture Graph,Jilin University,Jilin University,"吉林大学珠海校区, 丹桂路, 圣堂村, 金湾区, 珠海市, 广东省, 中国",22.05356500,113.39913285,edu,
+40a74eea514b389b480d6fe8b359cb6ad31b644a,Discrete Deep Feature Extraction: A Theory and New Architectures,University of Vienna,University of Vienna,"Uni Wien, 1, Universitätsring, Schottenviertel, KG Innere Stadt, Innere Stadt, Wien, 1010, Österreich",48.21313020,16.36068653,edu,
+403a108dec92363fd1f465340bd54dbfe65af870,Local Higher-Order Statistics (LHS) describing images with statistics of local non-binarized pixel patterns,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+40ee38d7ff2871761663d8634c3a4970ed1dc058,Three-Dimensional Face Recognition: A Fishersurface Approach,University of York,University of York,"University of York, Lakeside Way, Heslington, York, Yorkshire and the Humber, England, YO10 5FN, UK",53.94540365,-1.03138878,edu,
+404042a1dcfde338cf24bc2742c57c0fb1f48359,A Survey on Facial Features Localization,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+40757d94d6ef33555fc940d556ebfb0d32410fbb,Warmth and competence in your face! Visual encoding of stereotype content,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+40757d94d6ef33555fc940d556ebfb0d32410fbb,Warmth and competence in your face! Visual encoding of stereotype content,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+403d6a09c17268fb4bb0ae953107bf5f78ca9d05,Staining Pattern Classification of Antinuclear Autoantibodies Based on Block Segmentation in Indirect Immunofluorescence Images,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+4015e8195db6edb0ef8520709ca9cb2c46f29be7,Smile Detector Based on the Motion of Face Reference Points,University of Tartu,UNIVERSITY OF TARTU,"Paabel, University of Tartu, 17, Ülikooli, Kesklinn, Tartu linn, Tartu, Tartu linn, Tartu maakond, 53007, Eesti",58.38131405,26.72078081,edu,
+407bb798ab153bf6156ba2956f8cf93256b6910a,Fisher Pruning of Deep Nets for Facial Trait Classification,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+403530d1c418de29cbc595775ec45e16183950e5,Pioneer Networks: Progressively Growing Generative Autoencoder,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+40fa315150ddcaa1e0996046d140b8882f375f7d,Generative Image Inpainting with Contextual Attention,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+4091b6a3ab33e2aa923ee23c8db7e33d167ff67a,Transductive Multi-class and Multi-label Zero-shot Learning,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+407de9da58871cae7a6ded2f3a6162b9dc371f38,TraMNet - Transition Matrix Network for Efficient Action Tube Proposals,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+405526dfc79de98f5bf3c97bf4aa9a287700f15d,MegaFace: A Million Faces for Recognition at Scale,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+40b7e590dfd1cdfa1e0276e9ca592e02c1bd2b5b,Beyond Trade-off: Accelerate FCN-based Face Detector with Higher Accuracy,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+40a5b32e261dc5ccc1b5df5d5338b7d3fe10370d,Feedback-Controlled Sequential Lasso Screening,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,Automatic Lip Tracking and Action Units Classification using Two-Step Active Contours and Probabilistic Neural Networks,University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.06125530,46.32984840,edu,
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,Automatic Lip Tracking and Action Units Classification using Two-Step Active Contours and Probabilistic Neural Networks,University of Ottawa,University of Ottawa,"University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada",45.42580475,-75.68740118,edu,
+40a9f3d73c622cceee5e3d6ca8faa56ed6ebef60,Automatic Lip Tracking and Action Units Classification using Two-Step Active Contours and Probabilistic Neural Networks,University of Tabriz,University of Tabriz,"دانشگاه تبریز, شهید ایرج خلوتی, کوی انقلاب, تبریز, بخش مرکزی, شهرستان تبریز, استان آذربایجان شرقی, 5166616471, ‏ایران‎",38.06125530,46.32984840,edu,
+40e30ba448a079152ccd13f9ba670aa272df66b3,Cross - Pose Facial Expression Recognition,Bilkent University,Bilkent University,"Bilkent Üniversitesi, 3. Cadde, Üniversiteler Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87204890,32.75395155,edu,
+4078c37c39dc5c7c65a5494651ba6dd443cf9269,Empirical Performance Upper Bounds for Im- Age and Video Captioning,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+40e1743332523b2ab5614bae5e10f7a7799161f4,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+40e1743332523b2ab5614bae5e10f7a7799161f4,Wing Loss for Robust Facial Landmark Localisation with Convolutional Neural Networks,Jiangnan University,Jiangnan University,"江南大学站, 蠡湖大道, 滨湖区, 南场村, 滨湖区 (Binhu), 无锡市 / Wuxi, 江苏省, 214121, 中国",31.48542550,120.27395810,edu,
+40c8cffd5aac68f59324733416b6b2959cb668fd,Pooling Facial Segments to Face: The Shallow and Deep Ends,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+4065d038ecbda579a0791aaf46fc62bbcba5b1f3,Real-time Factored ConvNets: Extracting the X Factor in Human Parsing,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+40bb3ef2b4e556a3646a8cd77364a89b8773e4a4,First-Person Animal Activity Recognition from Egocentric Videos,Kyushu University,Kyushu University,"伊都ゲストハウス, 桜井太郎丸線, 西区, 福岡市, 福岡県, 九州地方, 819−0395, 日本",33.59914655,130.22359848,edu,
+40bb3ef2b4e556a3646a8cd77364a89b8773e4a4,First-Person Animal Activity Recognition from Egocentric Videos,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+4061524d5867325aab871ecf25ba58acd7872192,Aspect-based Question Generation,University of Illinois at Chicago,University of Illinois at Chicago,"University of Illinois at Chicago, West Taylor Street, Greektown, Chicago, Cook County, Illinois, 60607, USA",41.86898915,-87.64856256,edu,
+4061524d5867325aab871ecf25ba58acd7872192,Aspect-based Question Generation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+40273657e6919455373455bd9a5355bb46a7d614,Anonymizing k Facial Attributes via Adversarial Perturbations,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+40629398c85c12432979379800c267d2a8c62bf8,Timing-Based Local Descriptor for Dynamic Surfaces,Kyoto University,Kyoto University,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+4066f186ff58d300090c652925ed0aed3355efec,Solving Visual Madlibs with Multiple Cues,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+40559bd43d5480000e34e4fef3e8fe3782d1a688,Active query-driven visual search using probabilistic bisection and convolutional neural networks,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+40bb090a4e303f11168dce33ed992f51afe02ff7,Marginal Loss for Deep Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+40bb090a4e303f11168dce33ed992f51afe02ff7,Marginal Loss for Deep Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+40bb090a4e303f11168dce33ed992f51afe02ff7,Marginal Loss for Deep Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+40e7536f43c8a2623ce27e182a0e66028b58de89,Look Before You Leap: Bridging Model-Free and Model-Based Reinforcement Learning for Planned-Ahead Vision-and-Language Navigation,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+401a8272c60216d1ce8be58edc13b42b1bfdf912,Semi-Supervised Learning in Gigantic Image Collections,Courant Institute of Mathematical Sciences,Courant Institute of Mathematical Sciences,"Courant Institute of Mathematical Sciences, 251, Mercer Street, Washington Square Village, Greenwich Village, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72864840,-73.99568630,edu,
+406431d2286a50205a71f04e0b311ba858fc7b6c,3D facial expression classification using a statistical model of surface normals and a modular approach,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+406431d2286a50205a71f04e0b311ba858fc7b6c,3D facial expression classification using a statistical model of surface normals and a modular approach,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+40217a8c60e0a7d1735d4f631171aa6ed146e719,Part-Pair Representation for Part Localization,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+2e0bd693d12c43c2e86c7a4d8809445f380c5556,Webcam classification using simple features,Purdue University,Purdue University,"Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA",40.43197220,-86.92389368,edu,
+2e58ec57d71b2b2a3e71086234dd7037559cc17e,A Gender Recognition System from Facial Image,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+2e58ec57d71b2b2a3e71086234dd7037559cc17e,A Gender Recognition System from Facial Image,Institute of Information Technology,Institute of Information Technology,"Institute of Information Technology, Sir Sayed Road, ফকিরাপুল, সিদ্দিক বাজার, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.72898990,90.39826820,edu,
+2e58ec57d71b2b2a3e71086234dd7037559cc17e,A Gender Recognition System from Facial Image,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+2e58ec57d71b2b2a3e71086234dd7037559cc17e,A Gender Recognition System from Facial Image,University of Dhaka,University of Dhaka,"World War Memorial, Shahid Minar Rd, Jagannath Hall, DU, জিগাতলা, ঢাকা, ঢাকা বিভাগ, 1000, বাংলাদেশ",23.73169570,90.39652750,edu,
+2e68f29f26f91985e0ad12b3229e46edefe1e871,Discovering Shades of Attribute Meaning with the Crowd,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2ecb3e485b4935d3f7d25ebe8179724b9228bbec,Temporal-Coherency-Aware Human Pose Estimation in Video via Pre-trained Res-net and Flow-CNN,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+2ecb3e485b4935d3f7d25ebe8179724b9228bbec,Temporal-Coherency-Aware Human Pose Estimation in Video via Pre-trained Res-net and Flow-CNN,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+2e942d19333651bf6012374ea9e78d6937fd33ac,Detecting Faces Using Region-based Fully Convolutional Networks,Tencent,"Tencent AI Lab, China","Ke Ji Zhong Yi Lu, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057",22.54471540,113.93571640,company,"Keji Middle 1st Rd, Nanshan Qu, Shenzhen Shi, Guangdong Sheng, China, 518057"
+2e5ebb2ed819b97c6c54570d684576387dc55e93,Reasoning about Object Affordances in a Knowledge Base Representation,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2e079604c7a00c43f06e214280cea18a89dcecef,Bayesian Optimization and Semiparametric Models with Applications to Assistive Technology,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+2e0addeffba4be98a6ad0460453fbab52616b139,Face View Synthesis Using A Single Image,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+2e0addeffba4be98a6ad0460453fbab52616b139,Face View Synthesis Using A Single Image,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2e8a0cc071017845ee6f67bd0633b8167a47abed,Spatio-temporal covariance descriptors for action and gesture recognition,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2e9739056c9d1fe7b37046328f00cae603f59441,A Video-Based Method for Automatically Rating Ataxia,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2e9dc528c023a6634a51d5a74e95e5f432da9aaa,Eye movements while judging faces for trustworthiness and dominance,University of Lincoln,University of Lincoln,"University of Lincoln, Brayford Way, Whitton Park, New Boultham, Lincoln, Lincolnshire, East Midlands, England, LN6 7TS, UK",53.22853665,-0.54873472,edu,
+2e9dc528c023a6634a51d5a74e95e5f432da9aaa,Eye movements while judging faces for trustworthiness and dominance,University of Aberdeen,University of Aberdeen,"University of Aberdeen, High Street, Old Aberdeen, Aberdeen, Aberdeen City, Scotland, AB24 3EJ, UK",57.16461430,-2.10186013,edu,
+2e105974d58cdefcc866c5f6ca73ea033881ddd7,Question Type Guided Attention in Visual Question Answering,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2e105974d58cdefcc866c5f6ca73ea033881ddd7,Question Type Guided Attention in Visual Question Answering,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2e105974d58cdefcc866c5f6ca73ea033881ddd7,Question Type Guided Attention in Visual Question Answering,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+2e222383bd75d3c3961ac073e8aabd3557946601,Unsupervised Video Adaptation for Parsing Human Motion,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+2e222383bd75d3c3961ac073e8aabd3557946601,Unsupervised Video Adaptation for Parsing Human Motion,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2e222383bd75d3c3961ac073e8aabd3557946601,Unsupervised Video Adaptation for Parsing Human Motion,University of Queensland,University of Queensland,"University of Queensland, University Drive, Hill End, St Lucia, Brisbane, QLD, 4072, Australia",-27.49741805,153.01316956,edu,
+2e02597b9a8239700703920d5b74f765576d6f43,A model of the neural basis of predecisional processes : the fronto-limbic information acquisition network,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+2e02597b9a8239700703920d5b74f765576d6f43,A model of the neural basis of predecisional processes : the fronto-limbic information acquisition network,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+2e02597b9a8239700703920d5b74f765576d6f43,A model of the neural basis of predecisional processes : the fronto-limbic information acquisition network,University of Iowa,University of Iowa,"University of Iowa, Hawkeye Court, Iowa City, Johnson County, Iowa, 52246, USA",41.66590000,-91.57310307,edu,
+2e355890915492ddd46063828f8534b734b8f58f,Dynamic Label Graph Matching for Unsupervised Video Re-identification,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+2e355890915492ddd46063828f8534b734b8f58f,Dynamic Label Graph Matching for Unsupervised Video Re-identification,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+2e231f1e7e641dd3619bec59e14d02e91360ac01,Fusion Network for Face-Based Age Estimation,University of Bath,University of Bath,"University of Bath, Convocation Avenue, Claverton Down, Bath, Bath and North East Somerset, South West England, England, BA2 7PA, UK",51.37914420,-2.32523320,edu,
+2e231f1e7e641dd3619bec59e14d02e91360ac01,Fusion Network for Face-Based Age Estimation,Charles Sturt University,Charles Sturt University,"Charles Sturt University, Wagga Wagga, NSW, 2678, Australia",-35.06360710,147.35522340,edu,
+2e6cfeba49d327de21ae3186532e56cadeb57c02,Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model,Rensselaer Polytechnic Institute,Rensselaer Polytechnic Institute,"Rensselaer Polytechnic Institute, Sage Avenue, Downtown, City of Troy, Rensselaer County, New York, 12180, USA",42.72984590,-73.67950216,edu,
+2e480b3ef788512d647129509ea2e7d20464bf45,Scene image classification with biased spatial block and pLSA,Tianjin University,Tianjin University,"泰山航空港/天津大厦, 枣行路, 枣行 高王寺, 长城路, 大河, 岱岳区 (Daiyue), 泰安市, 山东省, 271000, 中国",36.20304395,117.05842113,edu,
+2e6e9d117b626e34ce0167f9d69cec6698b0eb05,Labeling of Human Motion Based on CBGA,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+2e55a287328b234db16fb538eddbbc185d51582a,UTS-CMU-D 2 DCRC Submission at TRECVID 2016 Video Localization,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+2e55a287328b234db16fb538eddbbc185d51582a,UTS-CMU-D 2 DCRC Submission at TRECVID 2016 Video Localization,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2ee817981e02c4709d65870c140665ed25b005cc,Sparse representations and Random Projections for robust and cancelable biometrics,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2ee817981e02c4709d65870c140665ed25b005cc,Sparse representations and Random Projections for robust and cancelable biometrics,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+2e98329fdec27d4b3b9b894687e7d1352d828b1d,Using Affect Awareness to Modulate Task Experience: A Study Amongst Pre-elementary School Kids,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2e59865aa2ddcecaf9275abcad9b134558c686c2,Joint Learning of Single-Image and Cross-Image Representations for Person Re-identification,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+2e59865aa2ddcecaf9275abcad9b134558c686c2,Joint Learning of Single-Image and Cross-Image Representations for Person Re-identification,Hong Kong Polytechnic University,Hong Kong Polytechnic University,"hong kong, 11, 育才道 Yuk Choi Road, 尖沙咀 Tsim Sha Tsui, 油尖旺區 Yau Tsim Mong District, 九龍 Kowloon, HK, 00000, 中国",22.30457200,114.17976285,edu,
+2e59865aa2ddcecaf9275abcad9b134558c686c2,Joint Learning of Single-Image and Cross-Image Representations for Person Re-identification,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2eb1dc9d1cf571462f7bc616b0dc52c8e402e331,"Towards Speech Emotion Recognition ""in the wild"" using Aggregated Corpora and Deep Multi-Task Learning",University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+2e9d33cba9f547a2e3febe088bae443f1d74d594,PipeLayer: A Pipelined ReRAM-Based Accelerator for Deep Learning,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+2e9d33cba9f547a2e3febe088bae443f1d74d594,PipeLayer: A Pipelined ReRAM-Based Accelerator for Deep Learning,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+2e8eb9dc07deb5142a99bc861e0b6295574d1fbd,Analysis by Synthesis: 3D Object Recognition by Object Reconstruction,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2e8eb9dc07deb5142a99bc861e0b6295574d1fbd,Analysis by Synthesis: 3D Object Recognition by Object Reconstruction,"University of California, Irvine","University of California, Irvine","University of California, Irvine, East Peltason Drive, Turtle Rock, Irvine, Orange County, California, 92612, USA",33.64319010,-117.84016494,edu,
+2ec55c3fb5fa493ebfacc58115cf28f283a50a02,How to Transfer? Zero-Shot Object Recognition via Hierarchical Transfer of Semantic Attributes,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+2e17cf6a339fd071ad222062f868e882ef4120a4,Inferring and Executing Programs for Visual Reasoning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2e3c893ac11e1a566971f64ae30ac4a1f36f5bb5,Simultaneous Object Detection and Ranking with Weak Supervision,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2ed3ce5cf9e262bcc48a6bd998e7fb70cf8a971c,Active AU Based Patch Weighting for Facial Expression Recognition,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,Improved Spatiotemporal Local Monogenic Binary Pattern for Emotion Recognition in The Wild,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,Improved Spatiotemporal Local Monogenic Binary Pattern for Emotion Recognition in The Wild,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,Improved Spatiotemporal Local Monogenic Binary Pattern for Emotion Recognition in The Wild,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,Improved Spatiotemporal Local Monogenic Binary Pattern for Emotion Recognition in The Wild,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2edc6df161f6aadbef9c12408bdb367e72c3c967,Improved Spatiotemporal Local Monogenic Binary Pattern for Emotion Recognition in The Wild,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+2e67d919815a073d1dbc6db3153697578257a28d,Understanding how image quality affects deep neural networks,Arizona State University,Arizona State University,"Arizona State University Polytechnic campus, East Texas Avenue, Mesa, Maricopa County, Arizona, 85212, USA",33.30715065,-111.67653157,edu,
+2e38ff75a80ec92111261bf368781c7eef89eb14,Face hallucination VIA sparse coding,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+2e38ff75a80ec92111261bf368781c7eef89eb14,Face hallucination VIA sparse coding,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+2e1fd8d57425b727fd850d7710d38194fa6e2654,Learning Structured Appearance Models from Captioned Images of Cluttered Scenes,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+2e1fd8d57425b727fd850d7710d38194fa6e2654,Learning Structured Appearance Models from Captioned Images of Cluttered Scenes,Bielefeld University,Bielefeld University,"Fachhochschule Bielefeld FB Gestaltung, 3, Lampingstraße, Mitte, Bielefeld, Regierungsbezirk Detmold, Nordrhein-Westfalen, 33615, Deutschland",52.02804210,8.51148270,edu,
+2e786f3353667b537636fc1912118961e512be88,Visual Tracking of Multiple Humans with Machine Learning based Robustness Enhancement applied to Real-World Robotic Systems,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+2eb96e784d2b34ba56654ebd0f357f0b121f73cb,A study of impaired judgment of eye-gaze direction and related face-processing deficits in autism spectrum disorders.,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2eb96e784d2b34ba56654ebd0f357f0b121f73cb,A study of impaired judgment of eye-gaze direction and related face-processing deficits in autism spectrum disorders.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+2eb96e784d2b34ba56654ebd0f357f0b121f73cb,A study of impaired judgment of eye-gaze direction and related face-processing deficits in autism spectrum disorders.,University of Sheffield,The University of Sheffield,"University of Sheffield, Portobello, Port Mahon, Saint George's, Sheffield, Yorkshire and the Humber, England, S1 4DP, UK",53.38152480,-1.48068143,edu,
+2e243d59184f781755339f6b415fff87f63c5ca2,Fixation and Saccade Based Face Recognition from Single Image per Person with Various Occlusions and Expressions,University of Warwick,University of Warwick,"University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK",52.37931310,-1.56042520,edu,
+2b4512097cb0056886f2d4d2ca7f5b034a647237,3D Facial Expression Recognition Based on Primitive Surface Feature Distribution,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,
+2be0ab87dc8f4005c37c523f712dd033c0685827,Relaxed local ternary pattern for face recognition,Institute of Media Innovation,Institute of Media Innovation,"Institute for Media Innovation, 50, Nanyang Drive, Pioneer, Southwest, 637553, Singapore",1.34339370,103.67933030,edu,
+2be0ab87dc8f4005c37c523f712dd033c0685827,Relaxed local ternary pattern for face recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2be0ab87dc8f4005c37c523f712dd033c0685827,Relaxed local ternary pattern for face recognition,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb,Image-Based Recommendations on Styles and Substitutes,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+2bb2ba7c96d40e269fc6a2d5384c739ff9fa16eb,Image-Based Recommendations on Styles and Substitutes,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+2bdba44420e400eceab79f02a8552ee97e940225,Pornography detection using BossaNova video descriptor,University of Campinas,University of Campinas,"USJ, 97, Rua Sílvia Maria Fabro, Kobrasol, Campinas, São José, Microrregião de Florianópolis, Mesorregião da Grande Florianópolis, SC, Região Sul, 88102-130, Brasil",-27.59539950,-48.61542180,edu,
+2b10b0f309546878ec418ae6e6f0a993fd7f3293,Human Perambulation as a Self Calibrating Biometric,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+2b339ece73e3787f445c5b92078e8f82c9b1c522,"Human Re-identification in Crowd Videos Using Personal, Social and Environmental Constraints",University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+2b64a72d53f13417c6352d3e89fd27df91b5d697,Learning Human Interaction by Interactive Phrases,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+2bb53e66aa9417b6560e588b6235e7b8ebbc294c,Semantic embedding space for zero-shot action recognition,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+2be8e06bc3a4662d0e4f5bcfea45631b8beca4d0,Watch and learn: Semi-supervised learning of object detectors from videos,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+2b0e1a62d7168df5f29e2e9c7fc72ae43c39fdb2,Emotion expression modulates perception of animacy from faces,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+2bcec23ac1486f4106a3aa588b6589e9299aba70,An Uncertain Future: Forecasting from Static Images Using Variational Autoencoders,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2b773fe8f0246536c9c40671dfa307e98bf365ad,Fast Discriminative Stochastic Neighbor Embedding Analysis,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+2bcd59835528c583bb5b310522a5ba6e99c58b15,Multi-class Open Set Recognition Using Probability of Inclusion,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+2b9410889dc6870cc6e0476dbc681049b28ccacb,Learning to Detect Carried Objects with Minimal Supervision,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+2bab44d3a4c5ca79fb8f87abfef4456d326a0445,Player identification in soccer videos,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+2b0102d77d3d3f9bc55420d862075934f5c85bec,Slicing Convolutional Neural Network for Crowd Video Understanding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2b0102d77d3d3f9bc55420d862075934f5c85bec,Slicing Convolutional Neural Network for Crowd Video Understanding,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2bce7f8a53fb8ec93dd218dbdf55b48ac54ae8b3,Predicting the Category and Attributes of Mental Pictures Using Deep Gaze Pooling,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,Feature-based face representations and image reconstruction from behavioral and neural data.,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,Feature-based face representations and image reconstruction from behavioral and neural data.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,Feature-based face representations and image reconstruction from behavioral and neural data.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+2b435ee691718d0b55d057d9be4c3dbb8a81526e,SURF-Face: Face Recognition Under Viewpoint Consistency Constraints,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+2b4bed0fadee29a84a272d7c52adc4a70e1a2b52,Human Motion Detection Using Fuzzy Rule-base Classification Of Moving Blob Regions,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+2b4bed0fadee29a84a272d7c52adc4a70e1a2b52,Human Motion Detection Using Fuzzy Rule-base Classification Of Moving Blob Regions,Multimedia University,Multimedia University,"Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia",2.92749755,101.64185301,edu,
+2b79da19774861621b6a9d0c769f95d33e5b6eb6,Maximum Classifier Discrepancy for Unsupervised Domain Adaptation,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+2bb2abecb4fa7071bc2760784c6f7661e7e725da,StarMap for Category-Agnostic Keypoint and Viewpoint Estimation,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,A comparative study of active appearance model annotation schemes for the face,"UNCW, USA","UNCW, USA","601 S College Rd, Wilmington, NC 28403, USA",34.22398690,-77.87013250,edu,
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,A comparative study of active appearance model annotation schemes for the face,"UNCW, USA","UNCW, USA","601 S College Rd, Wilmington, NC 28403, USA",34.22398690,-77.87013250,edu,
+2b5cb5466eecb131f06a8100dcaf0c7a0e30d391,A comparative study of active appearance model annotation schemes for the face,"UNCW, USA","UNCW, USA","601 S College Rd, Wilmington, NC 28403, USA",34.22398690,-77.87013250,edu,
+2b64a8c1f584389b611198d47a750f5d74234426,Deblurring Face Images with Exemplars,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+2b64a8c1f584389b611198d47a750f5d74234426,Deblurring Face Images with Exemplars,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+2b6c031c61b78a9f9ee958d291d29c8ab359404e,Vision of a Visipedia,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+2b10a07c35c453144f22e8c539bf9a23695e85fc,Standardization of Face Image Sample Quality,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+2b10a07c35c453144f22e8c539bf9a23695e85fc,Standardization of Face Image Sample Quality,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2bc16bf87ceec85822912ef612385e519a6f98b5,"Sequential Attend, Infer, Repeat: Generative Modelling of Moving Objects",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2bc16bf87ceec85822912ef612385e519a6f98b5,"Sequential Attend, Infer, Repeat: Generative Modelling of Moving Objects",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+2ba23d9b46027e47b4483243871760e315213ffe,Energy-based Generative Adversarial Network,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+2b84630680e2c906f8d7ac528e2eb32c99ef203a,We are not All Equal: Personalizing Models for Facial Expression Analysis with Transductive Parameter Transfer,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+2b84630680e2c906f8d7ac528e2eb32c99ef203a,We are not All Equal: Personalizing Models for Facial Expression Analysis with Transductive Parameter Transfer,University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26224210,-123.24500520,edu,
+2bf8541199728262f78d4dced6fb91479b39b738,Clothing Co-parsing by Joint Image Segmentation and Labeling,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+2bf8541199728262f78d4dced6fb91479b39b738,Clothing Co-parsing by Joint Image Segmentation and Labeling,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2b514d32318bb01ab04f75ef19ad1af63bce7943,ad-heap: an Efficient Heap Data Structure for Asymmetric Multicore Processors,University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+2b7ef95822a4d577021df16607bf7b4a4514eb4b,Emergence of Object-Selective Features in Unsupervised Feature Learning,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,Ring loss: Convex Feature Normalization for Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2b42f83a720bd4156113ba5350add2df2673daf0,Action Recognition and Detection by Combining Motion and Appearance Features,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2b42f83a720bd4156113ba5350add2df2673daf0,Action Recognition and Detection by Combining Motion and Appearance Features,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+2b42f83a720bd4156113ba5350add2df2673daf0,Action Recognition and Detection by Combining Motion and Appearance Features,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+2b86919eb8073d9b0e137b23cc9a14fab8bc601b,Joint Intensity and Spatial Metric Learning for Robust Gait Recognition,Osaka University,Osaka University,"大阪大学清明寮, 服部西町四丁目, 豊中市, 大阪府, 近畿地方, 日本",34.80809035,135.45785218,edu,
+2b2ba4857991c40fb854080dc5f9e48e60c35e68,"Data Hallucination , Falsification and Validation using Generative Models and Formal Methods by","University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+2b88c583cd62130f1e2c6921db9703a0c5746a90,Multicamera tracking of multiple humans based on colored visual hulls,University of Crete,University of Crete,"House of Europe, Μακεδονίας, Ρέθυμνο, Δήμος Ρεθύμνης, Περιφερειακή Ενότητα Ρεθύμνου, Περιφέρεια Κρήτης, Κρήτη, 930100, Ελλάδα",35.37130240,24.47544080,edu,
+2b852a4e5026ab962050a0ef23a6892e06abb152,EmojiGAN: learning emojis distributions with a generative model,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+2b852a4e5026ab962050a0ef23a6892e06abb152,EmojiGAN: learning emojis distributions with a generative model,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+2b852a4e5026ab962050a0ef23a6892e06abb152,EmojiGAN: learning emojis distributions with a generative model,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+472500bb0fc49354445b25f851905dda621a42d0,Understanding and Predicting Interestingness of Videos,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+4706d61276b953eadeac572bd449cfa70d2e0b82,Hierarchically Structured Reinforcement Learning for Topically Coherent Visual Story Generation,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+47fdbd64edd7d348713253cf362a9c21f98e4296,Facial point detection based on a convolutional neural network with optimal mini-batch procedure,Chubu University,Chubu University,"中部大学, 国道19号, 春日井市, 愛知県, 中部地方, 487-8501, 日本",35.27426550,137.01327841,edu,
+47382cb7f501188a81bb2e10cfd7aed20285f376,Articulated Pose Estimation Using Hierarchical Exemplar-Based Models,Columbia University ,Columbia University in the City of New York,"Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA",40.80717720,-73.96252798,edu,
+473366f025c4a6e0783e6174ca914f9cb328fe70,Review of Action Recognition and Detection Methods,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+478cab795c8bc62cb68d3ffa9b0dfc290201416c,Kernel Sharing With Joint Boosting For Multi-Class Concept Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+478cab795c8bc62cb68d3ffa9b0dfc290201416c,Kernel Sharing With Joint Boosting For Multi-Class Concept Detection,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+4764257e844f11e57ff72159bdcfb3dbfe17816a,Towards Instance Segmentation with Object Priority: Prominent Object Detection and Recognition,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+4764257e844f11e57ff72159bdcfb3dbfe17816a,Towards Instance Segmentation with Object Priority: Prominent Object Detection and Recognition,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+47ecc0924c2a17a6664d9ff6c31e2b9b6e490294,An SVM Confidence-Based Approach to Medical Image Annotation,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+4793f11fbca4a7dba898b9fff68f70d868e2497c,Kinship Verification through Transfer Learning,SUNY Buffalo,SUNY Buffalo,"SUNY College at Buffalo, Academic Drive, Elmwood Village, Buffalo, Erie County, New York, 14222, USA",42.93362780,-78.88394479,edu,
+4745baf6c4ae7a088f03340fcc05ad7d18a0aca2,Multi-label Image Classification with A Probabilistic Label Enhancement Model,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+4774b9853968b12156287bd42bb425d79f99e313,Online Multi-object Tracking Based on Hierarchical Association Framework,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+472c8606e68b34f4cc796a11963155fe3c6bfaec,Evolution of Images with Diversity and Constraints Using a Generator Network,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+472c8606e68b34f4cc796a11963155fe3c6bfaec,Evolution of Images with Diversity and Constraints Using a Generator Network,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+472c8606e68b34f4cc796a11963155fe3c6bfaec,Evolution of Images with Diversity and Constraints Using a Generator Network,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+473031328c58b7461753e81251379331467f7a69,Exploring Fisher vector and deep networks for action spotting,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+473031328c58b7461753e81251379331467f7a69,Exploring Fisher vector and deep networks for action spotting,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+47788b7a4700d1bbc972178f3680a028874afdb5,Adaptive Scheduling for Systems with Asymmetric Memory Hierarchies,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+47f84928dd6e40797255fa1e1bbb3c12b2659a7c,Input selection for fast feature engineering,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+47f84928dd6e40797255fa1e1bbb3c12b2659a7c,Input selection for fast feature engineering,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+47dfddafed43bc5afef93ac90ea3376a02046151,Max Margin AND/OR Graph learning for parsing the human body,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+47dfddafed43bc5afef93ac90ea3376a02046151,Max Margin AND/OR Graph learning for parsing the human body,University of Science and Technology of China,University of Science and Technology of China,"中国科学技术大学 东校区, 96号, 金寨路, 江淮化肥厂小区, 芜湖路街道, 合肥市区, 合肥市, 安徽省, 230026, 中国",31.83907195,117.26420748,edu,
+47dfddafed43bc5afef93ac90ea3376a02046151,Max Margin AND/OR Graph learning for parsing the human body,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+47dfddafed43bc5afef93ac90ea3376a02046151,Max Margin AND/OR Graph learning for parsing the human body,"University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+47de0569259e6a420c3eda69cdebf01bf85a1acd,An Integrated Development Environment for Faster Feature Engineering,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+47638197d83a8f8174cdddc44a2c7101fa8301b7,Object-Centric Anomaly Detection by Attribute-Based Reasoning,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+47638197d83a8f8174cdddc44a2c7101fa8301b7,Object-Centric Anomaly Detection by Attribute-Based Reasoning,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+47638197d83a8f8174cdddc44a2c7101fa8301b7,Object-Centric Anomaly Detection by Attribute-Based Reasoning,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+476f177b026830f7b31e94bdb23b7a415578f9a4,Intra-class multi-output regression based subspace analysis,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+476f177b026830f7b31e94bdb23b7a415578f9a4,Intra-class multi-output regression based subspace analysis,"University of California, Santa Barbara","University of California, Santa Barbara","UCSB, Santa Barbara County, California, 93106, USA",34.41459370,-119.84581950,edu,
+472ba8dd4ec72b34e85e733bccebb115811fd726,Cosine Similarity Metric Learning for Face Verification,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+47eba2f95679e106e463e8296c1f61f6ddfe815b,Deep Co-occurrence Feature Learning for Visual Object Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+47eba2f95679e106e463e8296c1f61f6ddfe815b,Deep Co-occurrence Feature Learning for Visual Object Recognition,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+472541ccd941b9b4c52e1f088cc1152de9b3430f,Learning in an Uncertain World: Representing Ambiguity Through Multiple Hypotheses,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+477ca04e9c6b9fd8326af7e11c6d60b6ada2f42a,Adapting Models to Signal Degradation using Distillation,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+4767a0c9f7261a4265db650d3908c6dd1d10a076,Joint tracking and segmentation of multiple targets,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+47203943c86e4d9355ffd99cd3d75f37211fd805,Semi-Crowdsourced Clustering: Generalizing Crowd Labeling by Robust Distance Metric Learning,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+47203943c86e4d9355ffd99cd3d75f37211fd805,Semi-Crowdsourced Clustering: Generalizing Crowd Labeling by Robust Distance Metric Learning,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+47190d213caef85e8b9dd0d271dbadc29ed0a953,The Devil of Face Recognition is in the Noise,"University of California, San Diego","University of California, San Diego","UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA",32.87935255,-117.23110049,edu,
+47190d213caef85e8b9dd0d271dbadc29ed0a953,The Devil of Face Recognition is in the Noise,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+47b6cd69c0746688f6e17b37d73fa12422826dbc,Self corrective Perturbations for Semantic Segmentation and Classification,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+47b6cd69c0746688f6e17b37d73fa12422826dbc,Self corrective Perturbations for Semantic Segmentation and Classification,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+47b6cd69c0746688f6e17b37d73fa12422826dbc,Self corrective Perturbations for Semantic Segmentation and Classification,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+47b6cd69c0746688f6e17b37d73fa12422826dbc,Self corrective Perturbations for Semantic Segmentation and Classification,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+479eb6579194d4d944671dfe5e90b122ca4b58fd,Structural inference embedded adversarial networks for scene parsing,Harbin Engineering University,Harbin Engineering University,"哈尔滨工程大学, 文庙街 - Wenmiao Street, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.77445695,126.67684917,edu,
+479eb6579194d4d944671dfe5e90b122ca4b58fd,Structural inference embedded adversarial networks for scene parsing,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+47d8d8ead70e26eb791c4dea5fe1a4d666ee2462,Single Image 3D without a Single 3D Image,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+47662d1a368daf70ba70ef2d59eb6209f98b675d,The CMU Face In Action (FIA) Database,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+47662d1a368daf70ba70ef2d59eb6209f98b675d,The CMU Face In Action (FIA) Database,GE Global Research Center,GE Global Research Center,"GE Global Research Center, Aqueduct, Niskayuna, Schenectady County, New York, USA",42.82982480,-73.87719385,edu,
+471635c61fffa75cd09121b14e4da155c667c5bf,Exploring the Design Space of Deep Convolutional Neural Networks at Large Scale,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+47dabb566f2bdd6b3e4fa7efc941824d8b923a13,Probabilistic Temporal Head Pose Estimation Using a Hierarchical Graphical Model,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+4747d169a5d6b48febfa111a8b28680159eb3bb2,Detecting People in Artwork with CNNs,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,Weizmann Institute of Science,Weizmann Institute of Science,"מכון ויצמן למדע, שדרת מרכוס זיו, מעונות שיין, אחוזות הנשיא, רחובות, מחוז המרכז, NO, ישראל",31.90784990,34.81334092,edu,
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+4771af2eeb920bde146c74ee0f56bd421793cd33,ste-GAN-ography: Generating Steganographic Images via Adversarial Training,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+4771af2eeb920bde146c74ee0f56bd421793cd33,ste-GAN-ography: Generating Steganographic Images via Adversarial Training,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+479cd0af9f345bd44cd180a5e26f3e799391e31d,Supervised local subspace learning for continuous head pose estimation,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+479cd0af9f345bd44cd180a5e26f3e799391e31d,Supervised local subspace learning for continuous head pose estimation,Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+479cd0af9f345bd44cd180a5e26f3e799391e31d,Supervised local subspace learning for continuous head pose estimation,University of Electronic Science and Technology of China,University of Electronic Science and Technology of China,"Columbus, OH 43210, USA",40.01419050,-83.03091430,edu,
+47493ad6e6d5591086c8a2b812bfae85aae50193,On gradient regularizers for MMD GANs,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+47493ad6e6d5591086c8a2b812bfae85aae50193,On gradient regularizers for MMD GANs,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+47493ad6e6d5591086c8a2b812bfae85aae50193,On gradient regularizers for MMD GANs,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+47493ad6e6d5591086c8a2b812bfae85aae50193,On gradient regularizers for MMD GANs,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+473cbc5ec2609175041e1410bc6602b187d03b23,Semantic Audio-Visual Data Fusion for Automatic Emotion Recognition,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+47e388dcd33feda1dadac82cd60fd7f7fb599594,"Attributed Grammars for Joint Estimation of Human Attributes, Part and Pose","University of California, Los Angeles","University of California, Los Angeles","200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA",34.06877880,-118.44500940,edu,
+784731961819abc5a5a199be1573abd828bd9af1,Recognizing Emily and Latisha: Inconsistent Effects of Name Stereotypicality on the Other-Race Effect,Universität Hamburg,Universität Hamburg,"Informatikum, 30, Vogt-Kölln-Straße, Stellingen, Eimsbüttel, Hamburg, 22527, Deutschland",53.59948200,9.93353436,edu,
+786f2e480cb81c9df8d213ac156a5333946a2b8f,Violations of Personal Space by Individuals with Autism Spectrum Disorder,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+786f2e480cb81c9df8d213ac156a5333946a2b8f,Violations of Personal Space by Individuals with Autism Spectrum Disorder,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+7811fd02bb77e2f6644f34c0f445d096199c3c2f,Model-Free Head Pose Estimation Based on Shape Factorisation and Particle Filtering,University of Malta,University of Malta,"University of Malta, Ring Road, Japanese Garden, L-Imsida, Malta, MSD 9027, Malta",35.90232260,14.48341890,edu,
+7803281f4b94cb25ed17786fd63807d223cf7af4,Input Reconstruction Side and top down view Part Segmentation Input Reconstruction Side and top down view Part Segmentation,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+78f08cc9f845dc112f892a67e279a8366663e26d,Semi-Autonomous Data Enrichment and Optimisation for Intelligent Speech Analysis,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+78d645d5b426247e9c8f359694080186681f57db,Gender Classification by LUT Based Boosting of Overlapping Block Patterns,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+78d645d5b426247e9c8f359694080186681f57db,Gender Classification by LUT Based Boosting of Overlapping Block Patterns,IDIAP Research Institute,IDIAP Research Institute,"Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra",46.10923700,7.08453549,edu,
+78560fc9c224c1b605b3ed30cc3345863c5988e2,Boosted multiple kernel learning for first-person activity recognition,Middle East Technical University,Middle East Technical University,"ODTÜ, 1, 1591.sk(315.sk), Çiğdem Mahallesi, Ankara, Çankaya, Ankara, İç Anadolu Bölgesi, 06800, Türkiye",39.87549675,32.78553506,edu,
+78560fc9c224c1b605b3ed30cc3345863c5988e2,Boosted multiple kernel learning for first-person activity recognition,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+7862d40da0d4e33cd6f5c71bbdb47377e4c6b95a,Demography-based facial retouching detection using subclass supervised sparse autoencoder,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+78569509e61269f5d2276b80f4fd41c22617ccc4,Localization Guided Learning for Pedestrian Attribute Recognition,City University of Hong Kong,City University of Hong Kong,"香港城市大學 City University of Hong Kong, 達康路 Tat Hong Avenue, 大窩坪 Tai Wo Ping, 深水埗區 Sham Shui Po District, 九龍 Kowloon, HK, KIL 3348, 中国",22.34000115,114.16970291,edu,
+78569509e61269f5d2276b80f4fd41c22617ccc4,Localization Guided Learning for Pedestrian Attribute Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+78569509e61269f5d2276b80f4fd41c22617ccc4,Localization Guided Learning for Pedestrian Attribute Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7859667ed6c05a467dfc8a322ecd0f5e2337db56,Web-Scale Transfer Learning for Unconstrained 1:N Face Identification,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+78c1ad33772237bf138084220d1ffab800e1200d,Decorrelated Batch Normalization,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+78c1ad33772237bf138084220d1ffab800e1200d,Decorrelated Batch Normalization,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+78436256ff8f2e448b28e854ebec5e8d8306cf21,Measuring and Understanding Sensory Representations within Deep Networks Using a Numerical Optimization Framework,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+78436256ff8f2e448b28e854ebec5e8d8306cf21,Measuring and Understanding Sensory Representations within Deep Networks Using a Numerical Optimization Framework,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+78436256ff8f2e448b28e854ebec5e8d8306cf21,Measuring and Understanding Sensory Representations within Deep Networks Using a Numerical Optimization Framework,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+78990bd69e12f7d123b6a0ce6b1674ea801f2319,Learning Joint Representations of Videos and Sentences with Web Image Search,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+78102141a1b78101515f93385e7b71a4aa1955c5,An Adaptation Framework for Head-Pose Classification in Dynamic Multi-view Scenarios,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+78102141a1b78101515f93385e7b71a4aa1955c5,An Adaptation Framework for Head-Pose Classification in Dynamic Multi-view Scenarios,University of Perugia,University of Perugia,"Caffe Perugia, 2350, Health Sciences Mall, University Endowment Lands, Metro Vancouver, British Columbia, V6T, Canada",49.26224210,-123.24500520,edu,
+78bdba66b1a5fb19824be37c4f5c2d20e0e3b34f,2 . Dilated Residual Networks,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+78bdba66b1a5fb19824be37c4f5c2d20e0e3b34f,2 . Dilated Residual Networks,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+78f62042bfb3bb49ba10e142d118a9bb058b2a19,WebSeg: Learning Semantic Segmentation from Web Searches,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+78f79c83b50ff94d3e922bed392737b47f93aa06,The computer expression recognition toolbox (CERT),University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+78f79c83b50ff94d3e922bed392737b47f93aa06,The computer expression recognition toolbox (CERT),University of Buffalo,University of Buffalo,"University of Nebraska at Kearney, 2504, 9th Avenue, Kearney, Buffalo County, Nebraska, 68849, USA",40.70217660,-99.09850612,edu,
+78fede85d6595e7a0939095821121f8bfae05da6,Discriminant Metric Learning Approach for Face Verification,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+78598e7005f7c96d64cc47ff47e6f13ae52245b8,Hand2Face: Automatic synthesis and recognition of hand over face occlusions,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7862f646d640cbf9f88e5ba94a7d642e2a552ec9,Being John Malkovich,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+78bfa428adb237c5ba85eda35e6a304b679c5c8c,Deep Micro-Dictionary Learning and Coding Network,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+78bfa428adb237c5ba85eda35e6a304b679c5c8c,Deep Micro-Dictionary Learning and Coding Network,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+78bdaca41440f03b4d18a4caf9f0dace9afa08b0,The University of Passau Open Emotion Recognition System for the Multimodal Emotion Challenge,University of Passau,"Chair of Complex & Intelligent Systems, University of Passau, Passau, Germany","Innstraße 41, 94032 Passau, Germany",48.56704660,13.45178350,edu,
+78bdaca41440f03b4d18a4caf9f0dace9afa08b0,The University of Passau Open Emotion Recognition System for the Multimodal Emotion Challenge,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+78bdaca41440f03b4d18a4caf9f0dace9afa08b0,The University of Passau Open Emotion Recognition System for the Multimodal Emotion Challenge,Northwestern Polytechnical University,Northwestern Polytechnical University,"西北工业大学 友谊校区, 127号, 友谊西路, 长安路, 碑林区 (Beilin), 西安市, 陕西省, 710072, 中国",34.24691520,108.91061982,edu,
+7869d8b9899226132d410ad6d409746bafe58f77,Biases Associated with Vulnerability to Bipolar Disorder,University of North Texas,University of North Texas,"University of North Texas, West Highland Street, Denton, Denton County, Texas, 76201, USA",33.20988790,-97.15147488,edu,
+78f08685d44b6c6f82983d9b0f9c6ac2f7203a5e,An Adaptive Ensemble Approach to Ambient Intelligence Assisted People Search,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+787fe79e880ecb78ec6df797add20a8f93878b68,Unsupervised Cross-dataset Person Re-identification by Transfer Learning of Spatial-Temporal Patterns,South China University of Technology,South China University of Technology,"华南理工大学, 大学城中环东路, 广州大学城, 新造, 番禺区 (Panyu), 广州市, 广东省, 510006, 中国",23.05020420,113.39880323,edu,
+784ee59ea98a0878f1ba709f4385bffcdb4911d7,The iterative nature of person construal: Evidence from event-related potentials,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+784ee59ea98a0878f1ba709f4385bffcdb4911d7,The iterative nature of person construal: Evidence from event-related potentials,University of Missouri,University of Missouri,"L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA",38.92676100,-92.29193783,edu,
+78a7b042dfdc8c062a1ae9b4b93195b434e91aca,Face Recognition Using a Time-of-Flight Camera,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+78a4eb59ec98994bebcf3a5edf9e1d34970c45f6,Conveying shape and features with image-based relighting,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+78f4e29fafad7a0156cff6d14e9b92c8b8533d4c,Multiple Target Tracking Based on Undirected Hierarchical Relation Hypergraph,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+78174c2be084e67f48f3e8ea5cb6c9968615a42c,Periocular Recognition Using CNN Features Off-the-Shelf,Halmstad University,Halmstad University,"Högskolan i Halmstad, 3, Kristian IV:s väg, Larsfrid, Nyhem, Halmstad, Hallands län, Götaland, 301 18, Sverige",56.66340325,12.87929727,edu,
+78e9abfcee29491ffa53e7a988401ea06fbbe719,Deep Structured Learning for Visual Relationship Detection,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+78e9abfcee29491ffa53e7a988401ea06fbbe719,Deep Structured Learning for Visual Relationship Detection,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+780557daaa39a445b24c41f637d5fc9b216a0621,"Large Video Event Ontology Browsing, Search and Tagging (EventNet Demo)",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+8ba67f45fbb1ce47a90df38f21834db37c840079,People search and activity mining in large-scale community-contributed photos,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+8bed7ff2f75d956652320270eaf331e1f73efb35,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,University of Calabria,"Modeling, Electronics, and Systems, University of Calabria, Rende, Italy","Via Pietro Bucci, 87036 Arcavacata, Rende CS, Italy",39.36502160,16.22571770,edu,
+8bed7ff2f75d956652320270eaf331e1f73efb35,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+8bed7ff2f75d956652320270eaf331e1f73efb35,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,University of Calabria,"Modeling, Electronics, and Systems, University of Calabria, Rende, Italy","Via Pietro Bucci, 87036 Arcavacata, Rende CS, Italy",39.36502160,16.22571770,edu,
+8bed7ff2f75d956652320270eaf331e1f73efb35,Emotion recognition in the wild using deep neural networks and Bayesian classifiers,Plymouth University,Plymouth University,"Plymouth University, Portland Square, Barbican, Plymouth, South West England, England, PL4 6AP, UK",50.37552690,-4.13937687,edu,
+8b7191a2b8ab3ba97423b979da6ffc39cb53f46b,Search pruning in video surveillance systems: Efficiency-reliability tradeoff,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+8b2dd5c61b23ead5ae5508bb8ce808b5ea266730,The intrinsic memorability of face photographs.,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+8bf243817112ac0aa1348b40a065bb0b735cdb9c,Learning a Repression Network for Precise Vehicle Search,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+8bfada57140aa1aa22a575e960c2a71140083293,Can we match Ultraviolet Face Images against their Visible Counterparts?,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,York University,York University,"York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada",43.77439110,-79.50481085,edu,
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,Ryerson University,Ryerson University,"Ryerson University, Gould Street, Downtown Yonge, Old Toronto, Toronto, Ontario, M5B 2G9, Canada",43.65815275,-79.37908010,edu,
+8befcd91c24038e5c26df0238d26e2311b21719a,A Joint Sequence Fusion Model for Video Question Answering and Retrieval,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+8bf05e179d50def46b008147fd3cce6c582a542f,Marginalizing Corrupted Features,Delft University of Technology,Delft University of Technology,"TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland",51.99882735,4.37396037,edu,
+8bbbdff11e88327816cad3c565f4ab1bb3ee20db,Automatic Semantic Face Recognition,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8b46df5e851e819473a726503a543a95e130e33d,The Role of Supervision and Geometry in Categorization1),Graz University of Technology,Graz University of Technology,"TU Graz, Inffeldgasse, Harmsdorf, Jakomini, Graz, Steiermark, 8010, Österreich",47.05821000,15.46019568,edu,
+8bdf6f03bde08c424c214188b35be8b2dec7cdea,Inference Attacks Against Collaborative Learning,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8b10383ef569ea0029a2c4a60cc2d8c87391b4db,Age classification using Radon transform and entropy based scaling SVM,University of Dundee,University of Dundee,"University of Dundee, Park Wynd, Law, Dundee, Dundee City, Scotland, DD1 4HN, UK",56.45796755,-2.98214831,edu,
+8b30259a8ab07394d4dac971f3d3bd633beac811,Representing Sets of Instances for Visual Recognition,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8,Temporal Perceptive Network for Skeleton-Based Action Recognition,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+8b61fdc47b5eeae6bc0a52523f519eaeaadbc8c8,Temporal Perceptive Network for Skeleton-Based Action Recognition,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+8b302c0edefa8f6a2ce6a41c32fab0f1ef36e523,"Large-scale, Cross-lingual Trend Mining and Summarisation of Real-time Media Streams D3.1.2 Regression models of trends Tools for Mining Non-stationary Data: functional prototype",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8b302c0edefa8f6a2ce6a41c32fab0f1ef36e523,"Large-scale, Cross-lingual Trend Mining and Summarisation of Real-time Media Streams D3.1.2 Regression models of trends Tools for Mining Non-stationary Data: functional prototype",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8b302c0edefa8f6a2ce6a41c32fab0f1ef36e523,"Large-scale, Cross-lingual Trend Mining and Summarisation of Real-time Media Streams D3.1.2 Regression models of trends Tools for Mining Non-stationary Data: functional prototype",University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+8b2d224c8b69191c02dce750257c39d46b1c4a7b,A Reinforcement Learning Framework for Natural Question Generation using Bi-discriminators,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+8b2d224c8b69191c02dce750257c39d46b1c4a7b,A Reinforcement Learning Framework for Natural Question Generation using Bi-discriminators,Fudan University,Fudan University,"复旦大学, 220, 邯郸路, 五角场街道, 杨浦区, 上海市, 200433, 中国",31.30104395,121.50045497,edu,
+8b19efa16a9e73125ab973429eb769d0ad5a8208,SCAR: Dynamic Adaptation for Person Detection and Persistence Analysis in Unconstrained Videos,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+8b6fded4d08bf0b7c56966b60562ee096af1f0c4,A Neural Network based Facial Expression Recognition using Fisherface,Semarang State University,Semarang State University,"Mandiri University, Jalan Tambora, RW 10, Tegalsari, Candisari, Semarang, Jawa Tengah, 50252, Indonesia",-7.00349485,110.41774949,edu,
+8b2704a5218a6ef70e553eaf0a463bd55129b69d,Geometric Feature-Based Facial Expression Recognition in Image Sequences Using Multi-Class AdaBoost and Support Vector Machines,Chonbuk National University,Chonbuk National University,"전북대학교, 567, 백제대로, 금암동, 덕진구, 전주시, 전북, 54896, 대한민국",35.84658875,127.13501330,edu,
+8b162c2a15bc7aa56cdc1be9773611bc21536782,On Automating Basic Data Curation Tasks,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8b162c2a15bc7aa56cdc1be9773611bc21536782,On Automating Basic Data Curation Tasks,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8b162c2a15bc7aa56cdc1be9773611bc21536782,On Automating Basic Data Curation Tasks,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8b162c2a15bc7aa56cdc1be9773611bc21536782,On Automating Basic Data Curation Tasks,University of New South Wales,University of New South Wales,"UNSW, International Square, UNSW, Kensington, Bay Gardens, Sydney, Randwick, NSW, 2033, Australia",-33.91758275,151.23124025,edu,
+8b68db1af010f36f7e9d174d6ca0fcb24c1049ee,Part I : Computation of Invariant Representations in Visual Cortex and in Deep Convolutional Architectures,McGovern Institute for Brain Research,McGovern Institute for Brain Research,"McGovern Institute for Brain Research (MIT), Main Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.36262950,-71.09144810,edu,
+8b68db1af010f36f7e9d174d6ca0fcb24c1049ee,Part I : Computation of Invariant Representations in Visual Cortex and in Deep Convolutional Architectures,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+8b266e68cc71f98ee42b04dc8f3e336c47f199cb,Learning Face Age Progression: A Pyramid Architecture of GANs,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,
+8b266e68cc71f98ee42b04dc8f3e336c47f199cb,Learning Face Age Progression: A Pyramid Architecture of GANs,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+8b2e3805b37c18618b74b243e7a6098018556559,Ariational a Utoencoder with D Eep F Eature C Onsistent and G Enerative a Dversar - Ial T Raining,University of Nottingham,University of Nottingham,"University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK",52.93874280,-1.20029569,edu,
+8b2e3805b37c18618b74b243e7a6098018556559,Ariational a Utoencoder with D Eep F Eature C Onsistent and G Enerative a Dversar - Ial T Raining,Shenzhen University,Shenzhen University,"深圳大学, 3688, 南海大道, 蛇口, 同乐村, 南山区, 深圳市, 广东省, 518060, 中国",22.53521465,113.93159110,edu,
+8b4a10cfa107f3c6546caa32e5012d342d02212a,Learning and Exploiting Camera Geometry for Computer Vision,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+8b74252625c91375f55cbdd2e6415e752a281d10,Using Convolutional 3D Neural Networks for User-independent continuous gesture recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8b74252625c91375f55cbdd2e6415e752a281d10,Using Convolutional 3D Neural Networks for User-independent continuous gesture recognition,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+8b74252625c91375f55cbdd2e6415e752a281d10,Using Convolutional 3D Neural Networks for User-independent continuous gesture recognition,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8b5122ea59d8d7f70e344ffb2553537b5ad07dd5,Image Translation by Domain-Adversarial Training,Zhejiang University of Technology,Zhejiang University of Technology,"浙江工业大学, 潮王路, 朝晖街道, 杭州市 Hangzhou, 浙江省, 310014, 中国",30.29315340,120.16204580,edu,
+8b0af7d056e7e8a5ef2bf1278fa0740771e23401,Correntropy Induced L2 Graph for Robust Subspace Clustering,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+8b0af7d056e7e8a5ef2bf1278fa0740771e23401,Correntropy Induced L2 Graph for Robust Subspace Clustering,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+8b0af7d056e7e8a5ef2bf1278fa0740771e23401,Correntropy Induced L2 Graph for Robust Subspace Clustering,Sun Yat-Sen University,Sun Yat-Sen University,"中大, 新港西路, 龙船滘, 康乐, 海珠区 (Haizhu), 广州市, 广东省, 510105, 中国",23.09461185,113.28788994,edu,
+8b0af7d056e7e8a5ef2bf1278fa0740771e23401,Correntropy Induced L2 Graph for Robust Subspace Clustering,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+8bad9d970664d4a9874ea71de9cc7b4360ba04ab,Person Independent Facial Expression Recognition Using 3D Facial Feature Positions,Eastern Mediterranean University,Eastern Mediterranean University,"Eastern Mediterranean University (EMU) - Stadium, Nehir Caddesi, Gazimağusa, Αμμόχωστος - Mağusa, Kuzey Kıbrıs, 99450, Κύπρος - Kıbrıs",35.14479945,33.90492318,edu,
+8b2064a6a535cd2b49e348560c4f9e2c3a8f4748,A Method Based on Convex Cone Model for Image-Set Classification With CNN Features,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259,Real-time 3 D Face Fitting and Texture Fusion on Inthe-wild Videos,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8b38124ff02a9cf8ad00de5521a7f8a9fa4d7259,Real-time 3 D Face Fitting and Texture Fusion on Inthe-wild Videos,Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682404,edu,
+8ba606d7667c50054d74083867230abbed755574,"ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+8ba606d7667c50054d74083867230abbed755574,"ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale",Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+8bc814c9653ef7fe248986788dd2a53375317a3a,Trace Ratio Criterion for Feature Selection,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+8bc814c9653ef7fe248986788dd2a53375317a3a,Trace Ratio Criterion for Feature Selection,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+13f74a94d492919a1ff13af16e2df2ab1bedf04a,Selecting the Best Performing Rotation Invariant Patterns in Local Binary/Ternary Patterns,University of Bologna,Università di Bologna,"Via Zamboni, 33, 40126 Bologna BO, Italy",44.49623180,11.35415700,edu,
+13af83892724343cfdf88debbf00ea1343a10db1,Impact of involuntary subject movement on 3D face scans,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+13af83892724343cfdf88debbf00ea1343a10db1,Impact of involuntary subject movement on 3D face scans,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+13a94d8f5eafbbf411c9a70e1b30937a532664ef,Non-rigid registration of 3D surfaces by deformable 2D triangular meshes,Bogazici University,Bogazici University,"Boğaziçi Üniversitesi Kuzey Yerleşkesi, Okulaltı 1. Sokak, Rumelihisarı, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34467, Türkiye",41.08688410,29.04413167,edu,
+134f1cee8408cca648d8b4ca44b38b0a7023af71,Partially Shared MultiTask Convolutional Neural Network with Local Constraint for Face Attribute Learning,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+13d53896f0ee30121c8dc75dcbfd5ff6c722199b,Building Context-Aware Object Detectors: Tying Objects and Context in a loop,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+13d6dde8767ac7176dcd6d4367974292bc627863,Multi-attribute Queries: To Merge or Not to Merge?,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+13d6dde8767ac7176dcd6d4367974292bc627863,Multi-attribute Queries: To Merge or Not to Merge?,Sharif University of Technology,Sharif University of Technology,"دانشگاه صنعتی شریف, خیابان آزادی, زنجان, منطقه ۹ شهر تهران, تهران, بخش مرکزی شهرستان تهران, شهرستان تهران, استان تهران, 14588, ‏ایران‎",35.70362270,51.35125097,edu,
+13d6dde8767ac7176dcd6d4367974292bc627863,Multi-attribute Queries: To Merge or Not to Merge?,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+13719bbb4bb8bbe0cbcdad009243a926d93be433,Deep LDA-Pruned Nets for Efficient Facial Gender Classification,McGill University,McGill University,"McGill University, Rue Sherbrooke Ouest, Quartier des Spectacles, Ville-Marie, Montréal, Agglomération de Montréal, Montréal (06), Québec, H3A 3P8, Canada",45.50397610,-73.57496870,edu,
+1328c0a8a357b303f6e853581360370ef2975612,Obtaining MPEG-4 compliant animatable 3D face models by using TPS method,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+133b0d480a8fac7c7e0c7511b5bdb0dc7d387d42,This Hand Is My Hand: A Probabilistic Approach to Hand Disambiguation in Egocentric Video,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+133b0d480a8fac7c7e0c7511b5bdb0dc7d387d42,This Hand Is My Hand: A Probabilistic Approach to Hand Disambiguation in Egocentric Video,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+1329206dbdb0a2b9e23102e1340c17bd2b2adcf5,Part-Based R-CNNs for Fine-Grained Category Detection,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+13bda03fc8984d5943ed8d02e49a779d27c84114,Efficient object detection using cascades of nearest convex model classifiers,Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.74875160,30.47653071,edu,
+1337acf12805a24968c0518e695ca94f103e630f,"VARADARAJAN ET AL.,: A TOPIC MODEL APPROACH TO CLASSIFY FOOTBALL PLAYS 1 A Topic Model Approach to Represent and Classify American Football Plays","University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+13a994d489c15d440c1238fc1ac37dad06dd928c,Learning Discriminant Face Descriptor for Face Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+13c0c418df650ad94ac368c81e2133ec9e166381,Mid-level deep pattern mining,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+131125a5aadb48ec3eceb404cedbff713c401feb,Building a Large-scale Multimodal Knowledge Base for Visual Question Answering,University of Wisconsin Madison,University of Wisconsin Madison,"University of Wisconsin-Madison, Marsh Lane, Madison, Dane County, Wisconsin, 53705-2221, USA",43.07982815,-89.43066425,edu,
+131125a5aadb48ec3eceb404cedbff713c401feb,Building a Large-scale Multimodal Knowledge Base for Visual Question Answering,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+131178dad3c056458e0400bed7ee1a36de1b2918,Visual Reranking through Weakly Supervised Multi-graph Learning,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+131178dad3c056458e0400bed7ee1a36de1b2918,Visual Reranking through Weakly Supervised Multi-graph Learning,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+131178dad3c056458e0400bed7ee1a36de1b2918,Visual Reranking through Weakly Supervised Multi-graph Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+132527383890565d18f1b7ad50d76dfad2f14972,Facial Expression Classification Using PCA and Hierarchical Radial Basis Function Network,National Taipei University,National Taipei University,"國立臺北大學, 151, 大學路, 龍恩里, 隆恩埔, 三峽區, 新北市, 23741, 臺灣",24.94314825,121.36862979,edu,
+13604bbdb6f04a71dea4bd093794e46730b0a488,Robust Loss Functions under Label Noise for Deep Neural Networks,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+13604bbdb6f04a71dea4bd093794e46730b0a488,Robust Loss Functions under Label Noise for Deep Neural Networks,Indian Institute of Science Bangalore,Indian Institute of Science Bangalore,"IISc, Gulmohar Marg, RMV Stage II - 1st Block, Aramane Nagara Ward, West Zone, Bengaluru, Bangalore Urban, Karnataka, 560012, India",13.02223470,77.56718325,edu,
+1362b43a76412ed9ac67fd182a72b9457cae5aed,Delving into egocentric actions,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+137aa2f891d474fce1e7a1d1e9b3aefe21e22b34,Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+13b1b18b9cfa6c8c44addb9a81fe10b0e89db32a,A Hierarchical Deep Temporal Model for Group Activity Recognition,Simon Fraser University,Simon Fraser University,"SFU Burnaby, South Campus Road, Barnet, Burnaby, Metro Vancouver, British Columbia, V5A 4X6, Canada",49.27674540,-122.91777375,edu,
+133477ccff666305d183cf1c35dcee40d0f2955a,Detach and Adapt: Learning Cross-Domain Disentangled Deep Representation,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+133477ccff666305d183cf1c35dcee40d0f2955a,Detach and Adapt: Learning Cross-Domain Disentangled Deep Representation,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+133477ccff666305d183cf1c35dcee40d0f2955a,Detach and Adapt: Learning Cross-Domain Disentangled Deep Representation,National Chiao Tung University,National Chiao Tung University,"NCTU;交大;交通大學;交大光復校區;交通大學光復校區, 1001, 大學路, 光明里, 赤土崎, 東區, 新竹市, 30010, 臺灣",24.78676765,120.99724412,edu,
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,Dataset Issues in Object Recognition,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,Dataset Issues in Object Recognition,Oxford University,Oxford University,"University College, Logic Lane, Grandpont, Oxford, Oxon, South East, England, OX1 4EX, UK",51.75208490,-1.25166460,edu,
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,Dataset Issues in Object Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+1329bcac5ebd0b08ce33ae1af384bd3e7a0deaca,Dataset Issues in Object Recognition,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+13c250fb740cb5616aeb474869db6ab11560e2a6,A thesis submitted in conformity with the requirements,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+13940d0cc90dbf854a58f92d533ce7053aac024a,Local learning by partitioning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+13940d0cc90dbf854a58f92d533ce7053aac024a,Local learning by partitioning,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+13ac93950986fc023d45e9647197d80b86fa4867,Subspace clustering applied to face images,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+131bfa2ae6a04fd3b921ccb82b1c3f18a400a9c1,Elastic Graph Matching versus Linear Subspace Methods for Frontal Face Verification,Aristotle University of Thessaloniki,Aristotle University of Thessaloniki,"Αριστοτέλειο Πανεπιστήμιο Θεσσαλονίκης, Εγνατία, Σαράντα Εκκλησίες, Ευαγγελίστρια, Θεσσαλονίκη, Δήμος Θεσσαλονίκης, Περιφερειακή Ενότητα Θεσσαλονίκης, Περιφέρεια Κεντρικής Μακεδονίας, Μακεδονία - Θράκη, 54124, Ελλάδα",40.62984145,22.95889350,edu,
+137239cd29634465f35ce261718efece57cfc617,Video Surveillance Classification-based Multiple Instance Object Retrieval: Evaluation and Dataset,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+1389ba6c3ff34cdf452ede130c738f37dca7e8cb,A Convolution Tree with Deconvolution Branches: Exploiting Geometric Relationships for Single Shot Keypoint Detection,University of Maryland College Park,University of Maryland College Park,"University of Maryland, College Park, Farm Drive, Acredale, College Park, Prince George's County, Maryland, 20742, USA",38.99203005,-76.94610290,edu,
+131e9edbe4b0322a467b7e8c35f6b0c0ca750e21,Contextual Action Recognition with R*CNN,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1395f0561db13cad21a519e18be111cbe1e6d818,Semantic segmentation using regions and parts,Facebook,Facebook,"250 Bryant St, Mountain View, CA 94041, USA",37.39367170,-122.08072620,company,"Facebook, Mountain View, CA"
+13aef395f426ca8bd93640c9c3f848398b189874,1 Image Preprocessing and Complete 2 DPCA with Feature Extraction for Gender Recognition NSF REU 2017 : Statistical Learning and Data Mining,University of North Carolina Wilmington,University of North Carolina Wilmington,"Kenan House, 1705, Market Street, Wilmington, New Hanover County, North Carolina, 28403, USA",34.23755810,-77.92701290,edu,
+133f1f2679892d408420d8092283539010723359,What Makes for Effective Detection Proposals?,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+13be4f13dac6c9a93f969f823c4b8c88f607a8c4,Families in the Wild (FIW): Large-Scale Kinship Image Database and Benchmarks,Northeastern University,Northeastern University,"Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA",42.33836680,-71.08793524,edu,
+136112d29f8abfd8804f9b9c0e15d00f7c013c6c,Space-time tree ensemble for action recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+136112d29f8abfd8804f9b9c0e15d00f7c013c6c,Space-time tree ensemble for action recognition,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+135c957f6a80f250507c7707479e584c288f430f,Image-Based Synthesis and Re-synthesis of Viewpoints Guided by 3D Models,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+1343f43e231d793a0bb45eb13ae2560e99aff6e1,Measuring Image Distances via Embedding in a Semantic Manifold,Dartmouth College,Dartmouth College,"Dartmouth College, Tuck Mall, Hanover, Grafton County, New Hampshire, 03755, USA",43.70479270,-72.29259090,edu,
+13ea9a2ed134a9e238d33024fba34d3dd6a010e0,SVDNet for Pedestrian Retrieval,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+13ea9a2ed134a9e238d33024fba34d3dd6a010e0,SVDNet for Pedestrian Retrieval,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+131e00d1296a952ed236bc264dc16f7e486c6e79,Crowdsourcing Feature Discovery via Adaptively Chosen Comparisons,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+131e00d1296a952ed236bc264dc16f7e486c6e79,Crowdsourcing Feature Discovery via Adaptively Chosen Comparisons,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+1316296fae6485c1510f00b1b57fb171b9320ac2,FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+1316296fae6485c1510f00b1b57fb171b9320ac2,FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+132045bbf158060cbbb20f86f212ce89c4358eda,Role of Color in Face Recognition A Comparison Study Using Traditional and New Face Recognition Algorithms,Concordia University,Concordia University,"Concordia University, 2811, Northeast Holman Street, Concordia, Portland, Multnomah County, Oregon, 97211, USA",45.57022705,-122.63709346,edu,
+139ee1b1d98e7ac9d659a5d1bbe8c75588539b29,Identification of EFHC2 as a quantitative trait locus for fear recognition in Turner syndrome.,University College London,University College London,"UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK",51.52316070,-0.12820370,edu,
+7f57e9939560562727344c1c987416285ef76cda,Accessorize to a Crime: Real and Stealthy Attacks on State-of-the-Art Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7f57e9939560562727344c1c987416285ef76cda,Accessorize to a Crime: Real and Stealthy Attacks on State-of-the-Art Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7f57e9939560562727344c1c987416285ef76cda,Accessorize to a Crime: Real and Stealthy Attacks on State-of-the-Art Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+7f57e9939560562727344c1c987416285ef76cda,Accessorize to a Crime: Real and Stealthy Attacks on State-of-the-Art Face Recognition,University of North Carolina,University of North Carolina,"University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA",35.90503535,-79.04775327,edu,
+7fc5b6130e9d474dfb49d9612b6aa0297d481c8e,Dimensionality Reduction on Grassmannian via Riemannian Optimization: A Generalized Perspective,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+7fc5b6130e9d474dfb49d9612b6aa0297d481c8e,Dimensionality Reduction on Grassmannian via Riemannian Optimization: A Generalized Perspective,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80,Real Time System for Facial Analysis,Tampere University of Technology,Tampere University of Technology,"TTY, 10, Korkeakoulunkatu, Finninmäki, Hervanta, Tampere, Tampereen seutukunta, Pirkanmaa, Länsi- ja Sisä-Suomen aluehallintovirasto, Länsi-Suomi, Manner-Suomi, 33720, Suomi",61.44964205,23.85877462,edu,
+7fbdb1d05a34d28b7f93544248edf7a2e0b8cd15,POL-LWIR Vehicle Detection: Convolutional Neural Networks Meet Polarised Infrared Sensors,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+7ffef9f26c39377ee937d29b8990580266a7a8a5,Deep Metric Learning with Hierarchical Triplet Loss,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+7fce5769a7d9c69248178989a99d1231daa4fce9,Towards Face Recognition Using Eigenface,King Faisal University,King Faisal University,"University of Dammam, King Faisal Rd, العقربية, الخبر, المنطقة الشرقية, ٣١٩٥٢, السعودية",26.39777800,50.18305600,edu,
+7fa2605676c589a7d1a90d759f8d7832940118b5,A new approach to clothing classification using mid-level layers,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+7fb5006b6522436ece5bedf509e79bdb7b79c9a7,Multi-Task Convolutional Neural Network for Face Recognition,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7fac20f3908c69bd336ea252e28c79f5abaa6dbe,Speaking the Same Language: Matching Machine to Human Captions by Adversarial Training,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7f05df12dff3defee495507abd4870a0a30c3590,Placing Images with Refined Language Models and Similarity Search with PCA-reduced VGG Features,Information Technologies Institute,Information Technologies Institute,"公益財団法人九州先端科学技術研究所, Fukuoka SRP Center Building 7F, 百道ランプ下り入り口, 早良区, 福岡市, 福岡県, 九州地方, 814-0001, 日本",33.59345390,130.35578370,edu,
+7f44f2d7b4a84b6d87dd6f7089ce3ee1e6359272,What's in the Chinese Babyface? Cultural Differences in Understanding the Babyface,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+7f44f2d7b4a84b6d87dd6f7089ce3ee1e6359272,What's in the Chinese Babyface? Cultural Differences in Understanding the Babyface,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+7f44f2d7b4a84b6d87dd6f7089ce3ee1e6359272,What's in the Chinese Babyface? Cultural Differences in Understanding the Babyface,Kobe University,Kobe University,"神戸大学, 灘三田線, 灘区, 神戸市, 兵庫県, 近畿地方, 657-00027, 日本",34.72757140,135.23710000,edu,
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,Efficient Online Spatio-Temporal Filtering for Video Event Detection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,Efficient Online Spatio-Temporal Filtering for Video Event Detection,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+7fa3d4be12e692a47b991c0b3d3eba3a31de4d05,Efficient Online Spatio-Temporal Filtering for Video Event Detection,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+7fda1edac608bc67e55ac3d7c9dc5a542d8f8aee,Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7f445191fa0475ff0113577d95502a96dc702ef9,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+7f445191fa0475ff0113577d95502a96dc702ef9,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+7f445191fa0475ff0113577d95502a96dc702ef9,Towards an Unequivocal Representation of Actions,University of Bristol,University of Bristol,"Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK",51.45848370,-2.60977520,edu,
+7fa5ede4a34dbe604ce317d529eed78db6642bc0,Soft Proposal Networks for Weakly Supervised Object Localization,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+7fa5ede4a34dbe604ce317d529eed78db6642bc0,Soft Proposal Networks for Weakly Supervised Object Localization,Duke University,Duke University,"Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA",35.99905220,-78.92906290,edu,
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,Multi-Attribute Robust Component Analysis for Facial UV Maps,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,Multi-Attribute Robust Component Analysis for Facial UV Maps,Middlesex University,Middlesex University,"Middlesex University, Greyhound Hill, Hendon, The Hyde, London Borough of Barnet, London, Greater London, England, NW4 4JP, UK",51.59029705,-0.22963221,edu,
+7f82f8a416170e259b217186c9e38a9b05cb3eb4,Multi-Attribute Robust Component Analysis for Facial UV Maps,University of London,University of London,"Birkbeck College, Malet Street, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1E 7HX, UK",51.52176680,-0.13019072,edu,
+7fd4e67938d02452e256c69822285778f95eb045,Genetic and Evolutionary Feature Extraction via X-TOOLSS,Clemson University,Clemson University,"Clemson University, Old Stadium Road, Clemson Heights, Pickens County, South Carolina, 29631, USA",34.66869155,-82.83743476,edu,
+7fab17ef7e25626643f1d55257a3e13348e435bd,Age Progression/Regression by Conditional Adversarial Autoencoder,University of Tennessee,University of Tennessee,"University of Tennessee, Melrose Avenue, Fort Sanders, Knoxville, Knox County, Tennessee, 37916, USA",35.95424930,-83.93073950,edu,
+7f6599e674a33ed64549cd512ad75bdbd28c7f6c,Kernel Alignment Inspired Linear Discriminant Analysis,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+7f33a5fcc5db4625c66972f0e6f06540b64d4f1e,Image Surveillance Assistant Architecture: Status and Planned Extensions,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+7f33a5fcc5db4625c66972f0e6f06540b64d4f1e,Image Surveillance Assistant Architecture: Status and Planned Extensions,Naval Research Laboratory,Naval Research Laboratory,"Naval Research Laboratory Post Office, 4555, Overlook Avenue Southwest, Washington, D.C., 20375, USA",38.82313810,-77.01789020,mil,
+7f9260c00a86a0d53df14469f1fa10e318ee2a3c,How iris recognition works,University of Cambridge,University of Cambridge,"Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK",52.17638955,0.14308882,edu,
+7f2a4cd506fe84dee26c0fb41848cb219305173f,Face Detection and Pose Estimation Based on Evaluating Facial Feature Selection,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+7f836c047bc86d52e3a28098b53311cb2186acaa,Deep Compositional Question Answering with Neural Module Networks,"University of California, Berkeley","University of California, Berkeley","Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA",37.86871260,-122.25586815,edu,
+7fd700f4a010d765c506841de9884df394c1de1c,Correlational spectral clustering,Max Planck Institute for Biological Cybernetics,Max Planck Institute for Biological Cybernetics,"Max-Planck-Institut für Biologische Kybernetik, 8, Max-Planck-Ring, Max-Planck-Institut, Wanne, Tübingen, Landkreis Tübingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72076, Deutschland",48.53691250,9.05922533,edu,
+7f201b4226d62bf449a68ebcc159acf8b95289be,PinterNet: A thematic label curation tool for large image datasets,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,Facial expression recognition for multiplayer online games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,Facial expression recognition for multiplayer online games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,Facial expression recognition for multiplayer online games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,Facial expression recognition for multiplayer online games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7f59657c883f77dc26393c2f9ed3d19bdf51137b,Facial expression recognition for multiplayer online games,University of Wollongong,University of Wollongong,"University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia",-34.40505545,150.87834655,edu,
+7fb74f5abab4830e3cdaf477230e5571d9e3ca57,Polyhedral Conic Classifiers for Visual Object Detection and Classification,Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.74875160,30.47653071,edu,
+7f23a4bb0c777dd72cca7665a5f370ac7980217e,Improving Person Re-identification by Attribute and Identity Learning,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+7ff18900bf1d8acbcb81e2f6d8e77fe95e1ddbd0,CoDraw: Visual Dialog for Collaborative Drawing,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+7fc3442c8b4c96300ad3e860ee0310edb086de94,Similarity Scores Based on Background Samples,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+7fc3442c8b4c96300ad3e860ee0310edb086de94,Similarity Scores Based on Background Samples,Open University of Israel,Open University of Israel,"האוניברסיטה הפתוחה, 15, אבא חושי, חיפה, גבעת דאונס, חיפה, מחוז חיפה, NO, ישראל",32.77824165,34.99565673,edu,
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,Predicting Personal Traits from Facial Images Using Convolutional Neural Networks Augmented with Facial Landmark Information,Hebrew University of Jerusalem,The Hebrew University of Jerusalem,"האוניברסיטה העברית בירושלים, Reagan Plaza, קרית מנחם בגין, הר הצופים, ירושלים, מחוז ירושלים, NO, ישראל",31.79185550,35.24472300,edu,
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,Predicting Personal Traits from Facial Images Using Convolutional Neural Networks Augmented with Facial Landmark Information,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+7f8d44e7fd2605d580683e47bb185de7f9ea9e28,Predicting Personal Traits from Facial Images Using Convolutional Neural Networks Augmented with Facial Landmark Information,Cambridge University,Cambridge University,"University, Cambridge Road, Old Portsmouth, Portsmouth, South East, England, PO1 2HB, UK",50.79440260,-1.09717480,edu,
+7fd73c91462153e16d207faa8ec0e3f507c72ae5,Multi-Sample Fusion with Template Protection,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+7fbf1885f27fb72d5e553c4a2147375f928465ee,Not All Pixels Are Equal: Difficulty-Aware Semantic Segmentation via Deep Layer Cascade,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+7fbf1885f27fb72d5e553c4a2147375f928465ee,Not All Pixels Are Equal: Difficulty-Aware Semantic Segmentation via Deep Layer Cascade,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+7fad07a6cf4c0985c7146e12d8e6639234e447fd,Graph Distillation for Action Detection with Privileged Modalities,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+7f1f3d7b1a4e7fc895b77cb23b1119a6f13e4d3a,Multi-subregion based probabilistic approach toward pose-invariant face recognition,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,Robust FEC-CNN: A High Accuracy Facial Landmark Detection System,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2,Robust FEC-CNN: A High Accuracy Facial Landmark Detection System,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+7fb8d9c36c23f274f2dd84945dd32ec2cc143de1,Semantic Segmentation with Second-Order Pooling,Institute of Systems and Robotics,Institute of Systems and Robotics,"Institut für Robotik und Kognitive Systeme, 160, Ratzeburger Allee, Strecknitz, Sankt Jürgen, Strecknitz, Lübeck, Schleswig-Holstein, 23562, Deutschland",53.83383710,10.70359390,edu,
+7fb8d9c36c23f274f2dd84945dd32ec2cc143de1,Semantic Segmentation with Second-Order Pooling,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+7fa4e972da46735971aad52413d17c4014c49e6e,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7fa4e972da46735971aad52413d17c4014c49e6e,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7fa4e972da46735971aad52413d17c4014c49e6e,How to Train Triplet Networks with 100K Identities?,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+7f310839e62c2623f6267b533047b323f61d2b27,Learning to Combine Kernels for Object Categorization,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+7f310839e62c2623f6267b533047b323f61d2b27,Learning to Combine Kernels for Object Categorization,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+7f310839e62c2623f6267b533047b323f61d2b27,Learning to Combine Kernels for Object Categorization,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+7f310839e62c2623f6267b533047b323f61d2b27,Learning to Combine Kernels for Object Categorization,Harbin Institute of Technology,Harbin Institute of Technology,"哈尔滨工业大学, 司令街, 南岗区, 哈尔滨市 / Harbin, 黑龙江省, 150000, 中国",45.74139210,126.62552755,edu,
+7f205b9fca7e66ac80758c4d6caabe148deb8581,A A Survey on Mobile Social Signal Processing,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7f205b9fca7e66ac80758c4d6caabe148deb8581,A A Survey on Mobile Social Signal Processing,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7f205b9fca7e66ac80758c4d6caabe148deb8581,A A Survey on Mobile Social Signal Processing,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7f205b9fca7e66ac80758c4d6caabe148deb8581,A A Survey on Mobile Social Signal Processing,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,Laplacian MinMax Discriminant Projection and its Applications,Zhejiang Normal University,Zhejiang Normal University,"浙江师范大学, 688, 迎宾大道, 柳湖花园, 金华市, 婺城区 (Wucheng), 金华市 / Jinhua, 浙江省, 321004, 中国",29.13646725,119.63768652,edu,
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,Laplacian MinMax Discriminant Projection and its Applications,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7a04b5db3f589ac857b51effa1be3eae7fa8dd4e,Abnormal spatiotemporal processing of emotional facial expressions in childhood autism: dipole source analysis of event-related potentials.,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+7a04b5db3f589ac857b51effa1be3eae7fa8dd4e,Abnormal spatiotemporal processing of emotional facial expressions in childhood autism: dipole source analysis of event-related potentials.,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+7a04b5db3f589ac857b51effa1be3eae7fa8dd4e,Abnormal spatiotemporal processing of emotional facial expressions in childhood autism: dipole source analysis of event-related potentials.,University of Hong Kong,University of Hong Kong,"海洋科學研究所 The Swire Institute of Marine Science, 鶴咀道 Cape D'Aguilar Road, 鶴咀低電台 Cape D'Aguilar Low-Level Radio Station, 石澳 Shek O, 芽菜坑村 Nga Choy Hang Tsuen, 南區 Southern District, 香港島 Hong Kong Island, HK, 中国",22.20814690,114.25964115,edu,
+7a4ea124a971bdda4acea4b539092d4d22c0e169,Anticipating Traffic Accidents with Adaptive Loss and Large-scale Incident DB,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+7ab02556d18d116228a964e38b7f454cf9f2b189,Findings of the E2E NLG Challenge,Heriot-Watt University,Heriot-Watt University,"Heriot-Watt University - Edinburgh Campus, Third Gait, Currie, Gogarbank, City of Edinburgh, Scotland, EH14 4AS, UK",55.91029135,-3.32345777,edu,
+7a7b386385ec5a458c6d45f58c399941c2f054d6,3D Model-Based Face Recognition in Video,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7ab90189c9c66298c900fde3de4c8d77fd035d80,Long-Term On-Board Prediction of Pedestrians in Traffic Scenes,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+7a7b84b8d8c1edb07f16180ef2c243ef30d85e1d,TrustFA: TrustZone-Assisted Facial Authentication on Smartphone,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+7a2cee9a210e7b418fa6169f8cf027f7993a3ee5,LETTER TO THE EDITOR Spontaneous versus deliberate vicarious representations: different routes to empathy in psychopathy and autism,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+7a2cee9a210e7b418fa6169f8cf027f7993a3ee5,LETTER TO THE EDITOR Spontaneous versus deliberate vicarious representations: different routes to empathy in psychopathy and autism,University of Birmingham,University of Birmingham,"University of Birmingham Edgbaston Campus, Ring Road North, Bournbrook, Birmingham, West Midlands Combined Authority, West Midlands, England, B15 2TP, UK",52.45044325,-1.93196134,edu,
+7a9c317734acaf4b9bd8e07dd99221c457b94171,Lorentzian Discriminant Projection and Its Applications,Dalian University of Technology,Dalian University of Technology,"大连理工大学, 红凌路, 甘井子区, 凌水镇, 甘井子区 / Ganjingzi, 大连市 / Dalian, 辽宁省, 116023, 中国",38.88140235,121.52281098,edu,
+7a39a3ca168dfebb2e2d55b3fca0f750b32896da,BiSeNet: Bilateral Segmentation Network for Real-Time Semantic Segmentation,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+7a693500b5ac96f2f439989baf250e3305f69fa5,Bi-Sparsity Pursuit: A Paradigm for RobustSubspace Recovery,North Carolina State University,North Carolina State University,"North Carolina State University, Oval Drive, West Raleigh, Raleigh, Wake County, North Carolina, 27695, USA",35.77184965,-78.67408695,edu,
+7acbc7edfeee7c3a19b6f204e1c290172150db5c,On the Effects of Illumination Normalization with LBP-Based Watchlist Screening,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+7a6bacdbc04d06842dd68d27b9f065f472b5aa1f,Probabilistic AND-OR Attribute Grouping for Zero-Shot Learning,Bar-Ilan University,Bar-Ilan University,"אוניברסיטת בר אילן, כביש גהה, גבעת שמואל, קריית מטלון, גבעת שמואל, מחוז תל אביב, NO, ישראל",32.06932925,34.84334339,edu,
+7af8fa8897c6f1ec1e7f9eadb01f74b48c185588,Improving Global Multi-target Tracking with Local Updates,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+7af8fa8897c6f1ec1e7f9eadb01f74b48c185588,Improving Global Multi-target Tracking with Local Updates,Aalborg University,Aalborg University,"AAU, Pontoppidanstræde, Sønder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark",57.01590275,9.97532827,edu,
+7a3d46f32f680144fd2ba261681b43b86b702b85,Multi-label Learning Based Deep Transfer Neural Network for Facial Attribute Classification,Xiamen University,Xiamen University,"厦门大学, 思明南路 Siming South Road, 思明区, 思明区 (Siming), 厦门市 / Xiamen, 福建省, 361005, 中国",24.43994190,118.09301781,edu,
+7a3d46f32f680144fd2ba261681b43b86b702b85,Multi-label Learning Based Deep Transfer Neural Network for Facial Attribute Classification,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+7a7f2403e3cc7207e76475e8f27a501c21320a44,Emotion recognition from multi-modal information,National Cheng Kung University,National Cheng Kung University,"成大, 1, 大學路, 大學里, 前甲, 東區, 臺南市, 70101, 臺灣",22.99919160,120.21625134,edu,
+7aafeb9aab48fb2c34bed4b86755ac71e3f00338,Real Time 3D Facial Movement Tracking Using a Monocular Camera,Tongji University,Tongji University,"同济大学, 1239, 四平路, 江湾, 虹口区, 上海市, 200092, 中国",31.28473925,121.49694909,edu,
+7aafeb9aab48fb2c34bed4b86755ac71e3f00338,Real Time 3D Facial Movement Tracking Using a Monocular Camera,Kumamoto University,Kumamoto University,"熊本大学黒髪キャンパス, 熊本菊陽線, 中央区, 熊本市, 熊本県, 九州地方, 860-0863, 日本",32.81641780,130.72703969,edu,
+7a84368ebb1a20cc0882237a4947efc81c56c0c0,Robust and efficient parametric face alignment,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+7a84368ebb1a20cc0882237a4947efc81c56c0c0,Robust and efficient parametric face alignment,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+7a72ac1c77110d03dc0482f2556e9bdb36582fcb,Following Gaze: Gaze-Following Behavior as a Window into Social Cognition,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+7a72ac1c77110d03dc0482f2556e9bdb36582fcb,Following Gaze: Gaze-Following Behavior as a Window into Social Cognition,Yale University,Yale University,"Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA",41.25713055,-72.98966960,edu,
+7a9890cdbb62a60ba88a515655535151b568bc44,TAEF: A cross-distance/environment face recognition method,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+7a76c45fdaaa2756233d00b4b1f2e3a580df9870,Multi-view Gender Classification Using Local Binary Patterns and Support Vector Machines,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+7a0b78879a13bd42c63cd947f583129137b16830,A Multiresolution 3D Morphable Face Model and Fitting Framework,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7a0b78879a13bd42c63cd947f583129137b16830,A Multiresolution 3D Morphable Face Model and Fitting Framework,Reutlingen University,Reutlingen University,"Campus Hohbuch, Campus Hochschule Reutlingen, Reutlingen, Landkreis Reutlingen, Regierungsbezirk Tübingen, Baden-Württemberg, 72762, Deutschland",48.48187645,9.18682404,edu,
+7a8e54033d166bb5bcb2acfc89c2659b45baa6e6,Creativity: Generating Diverse Questions Using Variational Autoencoders,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+7ae8bca039d0d3de01001c3cd587f1961c4bbe22,Learning Visual Symbols for Parsing Human Poses in Images,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+7a65fc9e78eff3ab6062707deaadde024d2fad40,A Study on Apparent Age Estimation,West Virginia University,West Virginia University,"88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA",39.65404635,-79.96475355,edu,
+7ac9aaafe4d74542832c273acf9d631cb8ea6193,Deep Micro-Dictionary Learning and Coding Network,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+7ac9aaafe4d74542832c273acf9d631cb8ea6193,Deep Micro-Dictionary Learning and Coding Network,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+7a1ce696e260899688cb705f243adf73c679f0d9,Predicting Missing Demographic Information in Biometric Records Using Label Propagation Techniques,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7a1ce696e260899688cb705f243adf73c679f0d9,Predicting Missing Demographic Information in Biometric Records Using Label Propagation Techniques,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+7a7d9cf8a6e28da11b71057948975fd179ef34be,Multicanonical Stochastic Variational Inference,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+7a7d9cf8a6e28da11b71057948975fd179ef34be,Multicanonical Stochastic Variational Inference,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+7a7d9cf8a6e28da11b71057948975fd179ef34be,Multicanonical Stochastic Variational Inference,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+7a7d9cf8a6e28da11b71057948975fd179ef34be,Multicanonical Stochastic Variational Inference,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+7a7d9cf8a6e28da11b71057948975fd179ef34be,Multicanonical Stochastic Variational Inference,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+7a923514c02872e9118b49f81d52f750a2c209a6,End-to-End Learning of Energy-Constrained Deep Neural Networks,University of Rochester,University of Rochester,"Memorial Art Gallery, 500, University Avenue, East End, Rochester, Monroe County, New York, 14607, USA",43.15769690,-77.58829158,edu,
+7a00365f9c7bced9ce47246794932f60564cb662,Converging evidence of configural processing of faces in high-functioning adults with autism spectrum disorders,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+7add83ab4ec0e856d88f6e76ea4f585e80def1fa,People re-identification in camera networks based on probabilistic color histograms,EURECOM,EURECOM,"Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France",43.61438600,7.07112500,edu,
+7ade8aade0d464ea9a677c7c22a51d1f81edb6e9,Learning Behavior Patterns from Video: A Data-driven Framework for Agent-based Crowd Modeling,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+7ade8aade0d464ea9a677c7c22a51d1f81edb6e9,Learning Behavior Patterns from Video: A Data-driven Framework for Agent-based Crowd Modeling,Xidian University,Xidian University,"Xidian University (New Campus), 266号, 银杏大道, 南雷村, 长安区 (Chang'an), 西安市, 陕西省, 710126, 中国",34.12358250,108.83546000,edu,
+7af15295224c3ad69d56f17ff635763dd008a8a4,Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+7af15295224c3ad69d56f17ff635763dd008a8a4,Learning Support Vectors for Face Authentication: Sensitivity to Mis-Registrations,Czech Technical University,Czech Technical University,"České vysoké učení technické v Praze, Resslova, Nové Město, Praha, okres Hlavní město Praha, Hlavní město Praha, Praha, 11121, Česko",50.07642960,14.41802312,edu,
+7a0cd36d02ad962f628d9d504d02a850e27d5bfb,PoseTrack: A Benchmark for Human Pose Estimation and Tracking,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+7a131fafa7058fb75fdca32d0529bc7cb50429bd,Beyond Face Rotation: Global and Local Perception GAN for Photorealistic and Identity Preserving Frontal View Synthesis,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+14d5bd23667db4413a7f362565be21d462d3fc93,An Online Learned Elementary Grouping Model for Multi-target Tracking,"University of California, Riverside","University of California, Riverside","University of California, Riverside, Linden Street, Riverside, Riverside County, California, 92521, USA",33.98071305,-117.33261035,edu,
+1486f2e32deac2b61d37b52e48d07fcd5208a164,Occlusion Patterns for Object Class Detection,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+14f55f333c29871867b48e1a9084132542d88083,Human centric object detection in highly crowded scenes,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+14151238780bedb19c585ab3374b3240d61899b9,Appearance-Based Classification and Recognition Using Spectral Histogram Representations and Hierarchical Learning for Oca,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+14151238780bedb19c585ab3374b3240d61899b9,Appearance-Based Classification and Recognition Using Spectral Histogram Representations and Hierarchical Learning for Oca,Florida State University,Florida State University,"Florida State University, 600, West College Avenue, Tallahassee, Leon County, Florida, 32306-1058, USA",30.44235995,-84.29747867,edu,
+14f0bce6645f39a44f5b0e695b5f28ea55fd9625,A-CCNN: Adaptive CCNN for Density Estimation and Crowd Counting,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+14f0bce6645f39a44f5b0e695b5f28ea55fd9625,A-CCNN: Adaptive CCNN for Density Estimation and Crowd Counting,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+14629c6989721e452fd9a49b5c20b8e849bce82a,Batch Algorithm with Additional Shape Constraints for Non-Rigid Factorization,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+141ee531d03fb6626043e33dd8f269a6f1f63a4b,How Robust is 3D Human Pose Estimation to Occlusion?,RWTH Aachen University,RWTH Aachen University,"RWTH Aachen, Mies-van-der-Rohe-Straße, Königshügel, Aachen-Mitte, Aachen, Städteregion Aachen, Regierungsbezirk Köln, Nordrhein-Westfalen, 52074, Deutschland",50.77917030,6.06728733,edu,
+14b87359f6874ff9b8ee234b18b418e57e75b762,Face Alignment Using a Ranking Model based on Regression Trees,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+14b87359f6874ff9b8ee234b18b418e57e75b762,Face Alignment Using a Ranking Model based on Regression Trees,Istanbul Technical University,Istanbul Technical University,"Istanbul Technical University, walking path from main road to Simit restaurant, İstanbul Teknik Üniversitesi, Maslak, F.S.M Mahallesi, Sarıyer, İstanbul, Marmara Bölgesi, 34469, Türkiye",41.10427915,29.02231159,edu,
+142e5b4492bc83b36191be4445ef0b8b770bf4b0,Discriminative Analysis of Brain Function at Resting-State for Attention-Deficit/Hyperactivity Disorder,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+142e5b4492bc83b36191be4445ef0b8b770bf4b0,Discriminative Analysis of Brain Function at Resting-State for Attention-Deficit/Hyperactivity Disorder,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+14401d4aae737a3ed118eca071f27f11dac7eda6,iVQA: Inverse Visual Question Answering,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+14401d4aae737a3ed118eca071f27f11dac7eda6,iVQA: Inverse Visual Question Answering,Southeast University,Southeast University,"SEU, 体育馆路, 新街口, 月季园, 玄武区, 南京市, 江苏省, 210008, 中国",32.05752790,118.78682252,edu,
+14b016c7a87d142f4b9a0e6dc470dcfc073af517,Modest proposals for improving biometric recognition papers,San Jose State University,San Jose State University,"SJSU, El Paseo de Cesar E. Chavez, Downtown Historic District, Japantown, San José, Santa Clara County, California, 95113, USA",37.33519080,-121.88126008,edu,
+1423037dd56f85453cd4257861821aeeb7478bc1,"Universal representations: The missing link between faces, text, planktons, and cat breeds",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+1423037dd56f85453cd4257861821aeeb7478bc1,"Universal representations: The missing link between faces, text, planktons, and cat breeds",University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+14df5bbb6fc34819f4ef43bb2b8cb1ada35613fe,RED: Reinforced Encoder-Decoder Networks for Action Anticipation,University of Southern California,University of Southern California,"University of Southern California, Watt Way, Saint James Park, LA, Los Angeles County, California, 90089, USA",34.02241490,-118.28634407,edu,
+14abfe2c7a94bd882efb78da387d8973ace54c0b,Modularity Matters: Learning Invariant Relational Reasoning Tasks,Aalto University,Aalto University,"Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etelä-Suomi, Manner-Suomi, 02150, Suomi",60.18558755,24.82427330,edu,
+147e699946e8c54d2176b4d868db03dd1c7bdb8f,Emotion and False Memory,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+147e699946e8c54d2176b4d868db03dd1c7bdb8f,Emotion and False Memory,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+1491c73713ff0b931e5bc1e990b9e762bfe7b60b,Fast and Simple Mixture of Softmaxes with BPE and Hybrid-LightRNN for Language Generation,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+14a3194bb454f1f2e3fc1452045ac18c69959368,Fast Object Detection Using Multistage Particle Window Deformable Part Model,National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.47510531,edu,
+14a3194bb454f1f2e3fc1452045ac18c69959368,Fast Object Detection Using Multistage Particle Window Deformable Part Model,National Chung Cheng University,National Chung Cheng University,"國立中正大學, 168, 鳳凰大道, 民雄鄉, 嘉義縣, 62102, 臺灣",23.56306355,120.47510531,edu,
+14b66748d7c8f3752dca23991254fca81b6ee86c,A BoW-equivalent Recurrent Neural Network for Action Recognition,University of Bonn,University of Bonn,"Rheinische Friedrich-Wilhelms-Universität Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk Köln, Nordrhein-Westfalen, 53113, Deutschland",50.73381240,7.10224650,edu,
+14e8dbc0db89ef722c3c198ae19bde58138e88bf,HapFACS: An Open Source API/Software to Generate FACS-Based Expressions for ECAs Animation and for Corpus Generation,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+14e8dbc0db89ef722c3c198ae19bde58138e88bf,HapFACS: An Open Source API/Software to Generate FACS-Based Expressions for ECAs Animation and for Corpus Generation,Florida International University,Florida International University,"FIU, Southwest 14th Street, Sweetwater, University Park, Miami-Dade County, Florida, 33199, USA",25.75533775,-80.37628897,edu,
+14a7e7290f81e313804a000b125bcd1c341bf9b4,A Survey on Recent Advances of Computer Vision Algorithms for Egocentric Video,Indiana University,Indiana University,"Indiana University East, West Cart Road, Richmond, Wayne County, Indiana, 47374, USA",39.86948105,-84.87956905,edu,
+14e38bafe584fa0f3cf5899027c61247ff14204c,An overview of recent progress in volumetric semantic 3D reconstruction,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+146bbf00298ee1caecde3d74e59a2b8773d2c0fc,University of Groningen 4 D Unconstrained Real - time Face Recognition Using a Commodity Depthh Camera,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+14e9158daf17985ccbb15c9cd31cf457e5551990,ConvNets with Smooth Adaptive Activation Functions for Regression,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+14e9158daf17985ccbb15c9cd31cf457e5551990,ConvNets with Smooth Adaptive Activation Functions for Regression,Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+14e9158daf17985ccbb15c9cd31cf457e5551990,ConvNets with Smooth Adaptive Activation Functions for Regression,Stony Brook University Hospital,Stony Brook University Hospital,"Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.90826665,-73.11520891,edu,
+14ce7635ff18318e7094417d0f92acbec6669f1c,DeepFace: Closing the Gap to Human-Level Performance in Face Verification,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,
+14de80b1b86ea342ba44c584e9e39b9089472658,M-PACT: An Open Source Platform for Repeatable Activity Classification Research,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+14de80b1b86ea342ba44c584e9e39b9089472658,M-PACT: An Open Source Platform for Repeatable Activity Classification Research,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+14de80b1b86ea342ba44c584e9e39b9089472658,M-PACT: An Open Source Platform for Repeatable Activity Classification Research,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+140438a77a771a8fb656b39a78ff488066eb6b50,Localizing Parts of Faces Using a Consensus of Exemplars,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+140438a77a771a8fb656b39a78ff488066eb6b50,Localizing Parts of Faces Using a Consensus of Exemplars,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+14d6ddb48d1b8a593665576d7e25f17be1447b2e,Recognizing human actions using multiple features,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+14d6ddb48d1b8a593665576d7e25f17be1447b2e,Recognizing human actions using multiple features,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+14d6ddb48d1b8a593665576d7e25f17be1447b2e,Recognizing human actions using multiple features,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+143bee9120bcd7df29a0f2ad6f0f0abfb23977b8,Shared Gaussian Process Latent Variable Model for Multi-view Facial Expression Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+143bee9120bcd7df29a0f2ad6f0f0abfb23977b8,Shared Gaussian Process Latent Variable Model for Multi-view Facial Expression Recognition,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+14d72dc9f78d65534c68c3ed57305f14bd4b5753,Exploiting Multi-grain Ranking Constraints for Precisely Searching Visually-similar Vehicles,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+1450b9f2e69e2a4d0400bffaa535712b5fbab562,Different Visual Preference Patterns in Response to Simple and Complex Dynamic Social Stimuli in Preschool-Aged Children with Autism Spectrum Disorders,Soochow University,Soochow University,"苏州大学(天赐庄校区), 清荫路, 钟楼社区, 双塔街道, 姑苏区, 苏州市, 江苏省, 215001, 中国",31.30709510,120.63573987,edu,
+14b162c2581aea1c0ffe84e7e9273ab075820f52,Training Object Class Detectors from Eye Tracking Data,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+14ff9c89f00dacc8e0c13c94f9fadcd90e4e604d,Correlation filter cascade for facial landmark localization,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+1462fea6c71be9c442f443488fc7c45e1840e9ed,Learning a perceptual manifold for image set classification,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+14421119527aa5882e1552a651fbd2d73bc94637,Searching for objects driven by context,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+14421119527aa5882e1552a651fbd2d73bc94637,Searching for objects driven by context,University of Edinburgh,University of Edinburgh,"New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK",55.94951105,-3.19534913,edu,
+14b69626b64106bff20e17cf8681790254d1e81c,Hybrid Super Vector with Improved Dense Trajectories for Action Recognition,Shenzhen Institutes of Advanced Technology,Shenzhen Institutes of Advanced Technology,"中国科学院深圳先进技术研究院, 1068, 科研路, 深圳大学城, 三坑村, 南山区, 深圳市, 广东省, 518000, 中国",22.59805605,113.98533784,edu,
+14b69626b64106bff20e17cf8681790254d1e81c,Hybrid Super Vector with Improved Dense Trajectories for Action Recognition,Southwest Jiaotong University,Southwest Jiaotong University,"西南交通大学 - Xinan Jiaotong University, 二环高架路, 沁园小区, 金牛区, 金牛区 (Jinniu), 成都市 / Chengdu, 四川省, 610084, 中国",30.69784700,104.05208110,edu,
+14b69626b64106bff20e17cf8681790254d1e81c,Hybrid Super Vector with Improved Dense Trajectories for Action Recognition,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+14070478b8f0d84e5597c3e67c30af91b5c3a917,Detecting Social Actions of Fruit Flies,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+14fb3283d4e37760b7dc044a1e2906e3cbf4d23a,Weak attributes for large-scale image retrieval,Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+14c37ea85ba8d74d053a34aedd7e484659fd54d4,Beyond trees: MRF inference via outer-planar decomposition,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+148316962e1ebb7086837e25cbee9ecbd71e5940,Efficient Multi-Person Pose Estimation with Provable Guarantees,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+148316962e1ebb7086837e25cbee9ecbd71e5940,Efficient Multi-Person Pose Estimation with Provable Guarantees,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+14811696e75ce09fd84b75fdd0569c241ae02f12,Margin-based discriminant dimensionality reduction for visual recognition,Eskisehir Osmangazi University,Eskisehir Osmangazi University,"Eskişehir Osmangazi Üniversitesi Meşelik Yerleşkesi, Kütahya-Eskişehir yolu, Sazova Mahallesi, Karagözler, Tepebaşı, Eskişehir, İç Anadolu Bölgesi, 26160, Türkiye",39.74875160,30.47653071,edu,
+14811696e75ce09fd84b75fdd0569c241ae02f12,Margin-based discriminant dimensionality reduction for visual recognition,University of Caen,University of Caen,"京都大学, 今出川通, 吉田泉殿町, 左京区, 京都市, 京都府, 近畿地方, 606-8501, 日本",35.02749960,135.78154513,edu,
+14811696e75ce09fd84b75fdd0569c241ae02f12,Margin-based discriminant dimensionality reduction for visual recognition,Rowan University,Rowan University,"Rowan University, Esbjornson Walk, Glassboro, Gloucester County, New Jersey, 08028, USA",39.71035260,-75.11932666,edu,
+14e759cb019aaf812d6ac049fde54f40c4ed1468,Subspace Methods,University of Tsukuba,University of Tsukuba,"University of Tsukuba, つばき通り, Kananemoto-satsukabe village, つくば市, 茨城県, 関東地方, 305-8377, 日本",36.11120580,140.10551760,edu,
+149c21e5f1c52429fb1585d30b50bc850a16edcd,A 3D Audio-visual Corpus for Speech Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+149c21e5f1c52429fb1585d30b50bc850a16edcd,A 3D Audio-visual Corpus for Speech Recognition,University of Western Australia,University of Western Australia,"UWA, 35, Underwood Avenue, Daglish, Perth, Western Australia, 6009, Australia",-31.95040445,115.79790037,edu,
+14f936e4eca8382ad835bf18b4a11d2e6682fd71,Simultaneous Children Recognition and Tracking for Childcare Assisting System by Using Kinect Sensors,University of Electro-Communications,The University of Electro-Communications,"電気通信大学, 甲州街道, 調布市, 東京都, 関東地方, 182-0026, 日本",35.65729570,139.54255868,edu,
+146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,AFFACT: Alignment-free facial attribute classification technique,"University of Colorado, Colorado Springs",University of Colorado Colorado Springs,"Main Hall, The Spine, Colorado Springs, El Paso County, Colorado, 80907, USA",38.89207560,-104.79716389,edu,
+1432654a204391b6e2ec197138be0f7c8cb83ae5,Coreset-Based Neural Network Compression,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+1432654a204391b6e2ec197138be0f7c8cb83ae5,Coreset-Based Neural Network Compression,"University of Illinois, Urbana-Champaign","University of Illinois, Urbana-Champaign","B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA",40.11116745,-88.22587665,edu,
+148eb413bede35487198ce7851997bf8721ea2d6,People Search in Surveillance Videos,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+148eb413bede35487198ce7851997bf8721ea2d6,People Search in Surveillance Videos,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+148eb413bede35487198ce7851997bf8721ea2d6,People Search in Surveillance Videos,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+147951fa2e0df54c5ddda0ff82dec057dcc61f66,Transforming neutral visual speech into expressive visual speech,University of East Anglia,University of East Anglia,"Arts (Lower Walkway Level), The Square, Westfield View, Earlham, Norwich, Norfolk, East of England, England, NR4 7TJ, UK",52.62215710,1.24091360,edu,
+14014a1bdeb5d63563b68b52593e3ac1e3ce7312,Expression-Invariant Age Estimation,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+14418ae9a6a8de2b428acb2c00064da129632f3e,Discovering the Spatial Extent of Relative Attributes,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+14ba910c46d659871843b31d5be6cba59843a8b8,Face Recognition in Movie Trailers via Mean Sequence Sparse Representation-Based Classification,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+14d0afea52c4e9b7a488f6398e4a92bd4f4b93c7,Rethinking the Faster R-CNN Architecture for Temporal Action Localization,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+14318d2b5f2cf731134a6964d8193ad761d86942,FaceDNA: Intelligent Face Recognition System with Intel RealSense 3D Camera,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+14effecdf4e99a6ef3bb590582ca07e642d49632,Multiple-Gaze Geometry: Inferring Novel 3D Locations from Gazes Observed in Monocular Video,University of Arizona,University of Arizona,"University of Arizona, North Highland Avenue, Rincon Heights, Barrio Viejo, Tucson, Pima County, Arizona, 85721, USA",32.23517260,-110.95095832,edu,
+146f989d2cea0e6825543d45c073f90dd8ae9939,Zero-Shot Learning via Semantic Similarity Embedding,Boston University,Boston University,"BU, Bay State Road, Fenway, Boston, Suffolk County, Massachusetts, 02215, USA",42.35042530,-71.10056114,edu,
+14c0f9dc9373bea1e27b11fa0594c86c9e632c8d,Adaptive Exponential Smoothing for Online Filtering of Pixel Prediction Maps,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+1439bf9ba7ff97df9a2da6dae4784e68794da184,LGE-KSVD: Flexible Dictionary Learning for Optimized Sparse Representation Classification,Rochester Institute of Technology,Rochester Institute of Technology,"Rochester Institute of Technology (RIT), 1, Lomb Memorial Drive, Bailey, Henrietta Town, Monroe County, New York, 14623, USA",43.08250655,-77.67121663,edu,
+14819d286c9b46c8e57c7e809db879f9e1451226,Shape Anchors for Data-Driven Multi-view Reconstruction,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+14819d286c9b46c8e57c7e809db879f9e1451226,Shape Anchors for Data-Driven Multi-view Reconstruction,Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+14819d286c9b46c8e57c7e809db879f9e1451226,Shape Anchors for Data-Driven Multi-view Reconstruction,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+14819d286c9b46c8e57c7e809db879f9e1451226,Shape Anchors for Data-Driven Multi-view Reconstruction,MIT CSAIL,MIT CSAIL,"32 Vassar St, Cambridge, MA 02139, USA",42.36194070,-71.09043780,edu,
+141768ab49a5a9f5adcf0cf7e43a23471a7e5d82,Relative facial action unit detection,McMaster University,McMaster University,"McMaster University, Westdale, Hamilton, Ontario, Canada",43.26336945,-79.91809684,edu,
+14bca107bb25c4dce89210049bf39ecd55f18568,Emotion recognition from facial images with arbitrary views,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+1412f4024babbc01b671f7ee4a22d86db1545268,Proximity and gaze influences facial temperature: a thermal infrared imaging study,University of Portsmouth,University of Portsmouth,"University of Portsmouth - North Zone, Portland Street, Portsea, Portsmouth, South East, England, PO1 3DE, UK",50.79805775,-1.09834911,edu,
+147f31b603931c688687c6d64d330c9be2ab2f2f,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,Huazhong University of Science and Technology,Huazhong University of Science and Technology,"华中大, 珞喻路, 东湖新技术开发区, 关东街道, 东湖新技术开发区(托管), 洪山区 (Hongshan), 武汉市, 湖北省, 430074, 中国",30.50975370,114.40628810,edu,
+147f31b603931c688687c6d64d330c9be2ab2f2f,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,"IBM Research, North Carolina",IBM Research,"IBM, East Cornwallis Road, Research Triangle Park, Nelson, Durham County, North Carolina, 27709, USA",35.90422720,-78.85565763,company,
+147f31b603931c688687c6d64d330c9be2ab2f2f,Jointly Attentive Spatial-Temporal Pooling Networks for Video-Based Person Re-identification,Northwestern University,Northwestern University,"Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA",42.05511640,-87.67581113,edu,
+8eae6ed5fa66b5eb63bdb6cc23d3b385a7fee37c,A 3D Dynamic Database for Unconstrained Face Recognition,University of Florence,University of Florence,"Piazza di San Marco, 4, 50121 Firenze FI, Italy",43.77764260,11.25976500,edu,
+8ec82da82416bb8da8cdf2140c740e1574eaf84f,Lip Reading in Profile,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8e0ede53dc94a4bfcf1238869bf1113f2a37b667,Joint patch and multi-label learning for facial action unit detection,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+8e0ede53dc94a4bfcf1238869bf1113f2a37b667,Joint patch and multi-label learning for facial action unit detection,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+8e3c97e420e0112c043929087d6456d8ab61e95c,Robust Global Motion Compensation in Presence of Predominant Foreground,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+8e0ab1b08964393e4f9f42ca037220fe98aad7ac,UV-GAN: Adversarial Facial UV Map Completion for Pose-invariant Face Recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+8e63715d458ff79170a010c283c79427ce81ff0c,Demography-based facial retouching detection using subclass supervised sparse autoencoder,University of Notre Dame,University of Notre Dame,"University of Notre Dame du Lac, Holy Cross Drive, Notre Dame, Maple Lane, Saint Joseph County, Indiana, 46556, USA",41.70456775,-86.23822026,edu,
+8e610860a0a273d5a2676e9d53328820f2f59a85,Diversity in Fashion Recommendation Using Semantic Parsing,"IIIT Delhi, India","IIIT Delhi, India","Okhla Industrial Estate, Phase III, Near Govind Puri Metro Station, New Delhi, Delhi 110020, India",28.54562820,77.27315050,edu,
+8e8c141e06d52cee1917b7268abca315bf3af714,Random-Profiles-Based 3D Face Recognition System,Yonsei University,Yonsei University,"연세대, 연세로, 신촌동, 창천동, 서대문구, 서울특별시, 03789, 대한민국",37.56004060,126.93692480,edu,
+8e54329a35b11e48d398dd3df3b27c72f48f5b2b,SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+8e54329a35b11e48d398dd3df3b27c72f48f5b2b,SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+8eda955303623b68ab207abb233fac17b92c6632,Homographic Class Template for Logo Localization and Recognition,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+8e4808e71c9b9f852dc9558d7ef41566639137f3,Adversarial Generative Nets: Neural Network Attacks on State-of-the-Art Face Recognition,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8e4808e71c9b9f852dc9558d7ef41566639137f3,Adversarial Generative Nets: Neural Network Attacks on State-of-the-Art Face Recognition,University of North Carolina at Chapel Hill,University of North Carolina at Chapel Hill,"University of North Carolina at Chapel Hill, East Cameron Avenue, Chapel Hill, Orange County, North Carolina, 27514, USA",35.91139710,-79.05045290,edu,
+8e4355225f0db7945952fbdf29e234e71313d30b,3D Human Pose Estimation with Relational Networks,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958,Segment-based SVMs for Time Series Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8e0ad1ccddc7ec73916eddd2b7bbc0019d8a7958,Segment-based SVMs for Time Series Analysis,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8e457222d6f38847489d63557ec2e0de7356e2a5,Super-resolution pipeline for fast adjudication in watchlist screening,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+8eeac4ca19ddc919423c42447c28ce546a25c4f8,Image composition for object pop-out,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+8e861a7809905a003fffa821574e68ae0c0788e7,Dictionary Learning with Iterative Laplacian Regularisation for Unsupervised Person Re-identification,Queen Mary University of London,Queen Mary University of London,"Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK",51.52472720,-0.03931035,edu,
+8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be,Parsing Pose of People with Interaction,California Institute of Technology,California Institute of Technology,"California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA",34.13710185,-118.12527487,edu,
+8e74244e220a1c9e89417caa1ad22f649884d311,ArtTrack: Articulated Multi-Person Tracking in the Wild,Max Planck Institute for Informatics,Max Planck Institute for Informatics,"MPII, E1 4, Campus, Universität, Sankt Johann, Bezirk Mitte, Saarbrücken, Regionalverband Saarbrücken, Saarland, 66123, Deutschland",49.25795660,7.04577417,edu,
+8e69534ae2f00025226c3a46dc6efb4faa3d396a,Object Class Detection Using Local Image Features and Point Pattern Matching Constellation Search,University of Surrey,University of Surrey,"University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK",51.24303255,-0.59001382,edu,
+8e36255da222c01a880c9b88d61f139f7bdba62f,Graph filtering for data reduction and reconstruction,University of Texas at Arlington,University of Texas at Arlington,"University of Texas at Arlington, South Nedderman Drive, Arlington, Tarrant County, Texas, 76010, USA",32.72836830,-97.11201835,edu,
+8e0a86634b286567433736a667e3a0bb7902470e,Dense Semantic Image Segmentation with Objects and Attributes,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+8e0a86634b286567433736a667e3a0bb7902470e,Dense Semantic Image Segmentation with Objects and Attributes,Oxford Brookes University,Oxford Brookes University,"Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK",51.75552050,-1.22615970,edu,
+8ed33184fccde677ec8413ae06f28ea9f2ca70f3,Multimodal Visual Concept Learning with Weakly Supervised Techniques,National Technical University of Athens,National Technical University of Athens,"Εθνικό Μετσόβιο Πολυτεχνείο, Στουρνάρη, Μουσείο, Αθήνα, Δήμος Αθηναίων, Π.Ε. Κεντρικού Τομέα Αθηνών, Περιφέρεια Αττικής, Αττική, 11250, Ελλάδα",37.98782705,23.73179733,edu,
+8e59851a9b59d818f2c0beaf23760e9326439a86,Graph Based Image Segmentation,Hong Kong University of Science and Technology,Hong Kong University of Science and Technology,"香港科技大學 Hong Kong University of Science and Technology, 大學道 University Road, 大埔仔 Tai Po Tsai, 大埔仔村 Tai Po Tsai Village, 新界 New Territories, HK, DD253 1209, 中国",22.33863040,114.26203370,edu,
+8e4ff1aa78f8997b683f873c46999f384db4de18,Renewing the respect for similarity,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8e4ff1aa78f8997b683f873c46999f384db4de18,Renewing the respect for similarity,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+8ee5b1c9fb0bded3578113c738060290403ed472,Extending explicit shape regression with mixed feature channels and pose priors,Karlsruhe Institute of Technology,Karlsruhe Institute of Technology,"KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-Württemberg, 76351, Deutschland",49.10184375,8.43312560,edu,
+8e88a97e09a853cf768ca1c732ba5f008fff77ca,Regularized Residual Quantization: a multi-layer sparse dictionary learning approach,University of Geneva,University of Geneva,"University of Chicago-Yerkes Observatory, 373, West Geneva Street, Williams Bay, Walworth County, Wisconsin, 53191, USA",42.57054745,-88.55578627,edu,
+8e7548911c41b6f3a6ccbda6d3ab913eaa41e721,Feature Learning with Rank-Based Candidate Selection for Product Search,National Taiwan University,National Taiwan University,"臺大;台大, 1, 羅斯福路四段, 學府里, 大安區, 臺北市, 10617, 臺灣",25.01682835,121.53846924,edu,
+8efda5708bbcf658d4f567e3866e3549fe045bbb,Pre-trained Deep Convolutional Neural Networks for Face Recognition,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+8efda5708bbcf658d4f567e3866e3549fe045bbb,Pre-trained Deep Convolutional Neural Networks for Face Recognition,University of Groningen,University of Groningen,"Academiegebouw, Professorgang, Binnenstad, Groningen, Nederland, 9712EA, Nederland",53.21967825,6.56251482,edu,
+8e956117b2e22470814778fed6f4641e475efb44,Learning Attribute Representations with Localization for Flexible Fashion Search,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,
+8e956117b2e22470814778fed6f4641e475efb44,Learning Attribute Representations with Localization for Flexible Fashion Search,"A*STAR, Singapore","Institute for Infocomm Research, A*STAR, Singapore","1 Fusionopolis Way, #21-01 Connexis, Singapore 138632",1.29889260,103.78731070,edu,
+8e64d872a419f122f870026179ccbc5daa1645fd,Modified Local Binary Pattern (MLBP) for Robust Face Recognition,Old Dominion University,Old Dominion University,"Old Dominion University, Elkhorn Avenue, Lamberts Point, Norfolk, Virginia, 23508, USA",36.88568200,-76.30768579,edu,
+8e64d872a419f122f870026179ccbc5daa1645fd,Modified Local Binary Pattern (MLBP) for Robust Face Recognition,University of Dayton,University of Dayton,"University of Dayton, Caldwell Street, South Park Historic District, Dayton, Montgomery, Ohio, 45409, USA",39.73844400,-84.17918747,edu,
+22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b,A Database for Person Re-Identification in Multi-Camera Surveillance Networks,Queensland University of Technology,Queensland University of Technology,"Queensland University of Technology, Macgregor Lane, Merthyr, South Brisbane, Brisbane, QLD, 4000, Australia",-27.47715625,153.02841004,edu,
+2227f978f084ebb18cb594c0cfaf124b0df6bf95,Pillar Networks for action recognition,Imperial College London,Imperial College London,"Imperial College London, Exhibition Road, Brompton, Royal Borough of Kensington and Chelsea, London, Greater London, England, SW7 2AZ, UK",51.49887085,-0.17560797,edu,
+228db5326a10cd67605ce103a7948207a65feeb1,Stacked Attention Networks for Image Question Answering,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+22e2066acfb795ac4db3f97d2ac176d6ca41836c,Coarse-to-Fine Auto-Encoder Networks (CFAN) for Real-Time Face Alignment,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+22e2066acfb795ac4db3f97d2ac176d6ca41836c,Coarse-to-Fine Auto-Encoder Networks (CFAN) for Real-Time Face Alignment,University of Chinese Academy of Sciences,University of Chinese Academy of Sciences,"University of Chinese Academy of Sciences, UCAS, Yuquanlu, 玉泉路, 田村, 海淀区, 100049, 中国",39.90828040,116.24585270,edu,
+22dff8fa7cc57f7b4f2903c6fbf6ffa7f1bea0d7,3D Human Pose Estimation Using Convolutional Neural Networks with 2D Pose Information,Seoul National University,Seoul National University,"서울대학교, 서호동로, 서둔동, 권선구, 수원시, 경기, 16614, 대한민국",37.26728000,126.98411510,edu,
+22717ad3ad1dfcbb0fd2f866da63abbde9af0b09,A Learning-based Control Architecture for Socially Assistive Robots Providing Cognitive Interventions,University of Toronto,University of Toronto,"University of Toronto, St. George Street, Bloor Street Culture Corridor, Old Toronto, Toronto, Ontario, M5S 1A5, Canada",43.66333345,-79.39769975,edu,
+22bc3624a1e6d46f5b7c9208751d4f14fc87e946,"Book chapter for Artificial Intelligence for Maximizing Content Based Image Retrieval Event detection, query, and retrieval for video surveillance",IBM Thomas J. Watson Research Center,IBM Thomas J. Watson Research Center,"IBM Yorktown research lab, Adams Road, Millwood, Town of New Castle, Westchester County, New York, 10562, USA",41.21002475,-73.80407056,company,
+2288696b6558b7397bdebe3aed77bedec7b9c0a9,Action Recognition with Joint Attention on Multi-Level Deep Features,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+2288696b6558b7397bdebe3aed77bedec7b9c0a9,Action Recognition with Joint Attention on Multi-Level Deep Features,"Beijing, China","Beijing, China","北京市, 东城区, 北京市, 100010, 中国",39.90621700,116.39127570,edu,
+22bc12fb82db4c5a5f52bd1ba70e25ffac94f428,Transfer learning for object category detection,University of Oxford,University of Oxford,"Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK",51.75345380,-1.25400997,edu,
+22f1b026bd78fdc2e945bcf88a6d69d44b484ec6,The emperor's new masks: On demographic differences and disguises,University of Pennsylvania,University of Pennsylvania,"Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA",39.94923440,-75.19198985,edu,
+22493d8d4d7b4604cae23638dce4981b36e30147,Learning an Image-Based Motion Context for Multiple People Tracking,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+221c9fff1c25368a6b72ca679c67a3d6b35e2c00,Memory-Based Face Recognition for Visitor Identification,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+22dc91889312e796ad36b363bc5ed959714e4694,Deep Di erential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+22dc91889312e796ad36b363bc5ed959714e4694,Deep Di erential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+22dc91889312e796ad36b363bc5ed959714e4694,Deep Di erential Recurrent Neural Networks,University of Central Florida,University of Central Florida,"University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA",28.59899755,-81.19712501,edu,
+22954dd92a795d7f381465d1b353bcc41901430d,Learning Visual Storylines with Skipping Recurrent Neural Networks,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+22259622612a839d97133d4809f80447dfeb5d56,Incremental Machine Learning Approach for Component-based Recognition,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+22a12ec4258f223b43761e5c4729787d1aaa623b,Optimal Bloom Filters and Adaptive Merging for LSM-Trees,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+22a12ec4258f223b43761e5c4729787d1aaa623b,Optimal Bloom Filters and Adaptive Merging for LSM-Trees,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+22a12ec4258f223b43761e5c4729787d1aaa623b,Optimal Bloom Filters and Adaptive Merging for LSM-Trees,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+22a12ec4258f223b43761e5c4729787d1aaa623b,Optimal Bloom Filters and Adaptive Merging for LSM-Trees,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+22a12ec4258f223b43761e5c4729787d1aaa623b,Optimal Bloom Filters and Adaptive Merging for LSM-Trees,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+22f21a5b230d6bcc2c4ab4e4d5ae57a20f09f348,How do we use our hands? Discovering a diverse set of common grasps,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2296d79753118cfcd0fecefece301557f4cb66e2,Exploring Disentangled Feature Representation Beyond Face Identification,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+2296d79753118cfcd0fecefece301557f4cb66e2,Exploring Disentangled Feature Representation Beyond Face Identification,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+221252be5d5be3b3e53b3bbbe7a9930d9d8cad69,Do We Need More Training Data or Better Models for Object Detection?,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+22db07c472b2d7bc7704b8c2bbd8f620e2e68ca9,MoDeep: A Deep Learning Framework Using Motion Features for Human Pose Estimation,New York University,New York University,"NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA",40.72925325,-73.99625394,edu,
+22f44121a6de3ff942c5fbf4ab1d6734315baf66,Sensor-assisted facial recognition: an enhanced biometric authentication system for smartphones,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+22f44121a6de3ff942c5fbf4ab1d6734315baf66,Sensor-assisted facial recognition: an enhanced biometric authentication system for smartphones,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+22f44121a6de3ff942c5fbf4ab1d6734315baf66,Sensor-assisted facial recognition: an enhanced biometric authentication system for smartphones,"University of California, Davis","University of California, Davis","University of California, Davis, Apiary Drive, Yolo County, California, 95616-5270, USA",38.53363490,-121.79077264,edu,
+22df6b6c87d26f51c0ccf3d4dddad07ce839deb0,Fast action proposals for human action detection and search,Nanyang Technological University,Nanyang Technological University,"NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore",1.34841040,103.68297965,edu,
+22f21d58c6aecdb4f57c50fa9eb4952643eec0e9,Domain Transfer Support Vector Ranking for Person Re-identification without Target Camera Label Information,Hong Kong Baptist University,Hong Kong Baptist University,"香港浸會大學 Hong Kong Baptist University, 安明街 On Ming Street, 石門 Shek Mun, 石古壟 Shek Kwu Lung, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1132, 中国",22.38742010,114.20822220,edu,
+22f2f77120cd28e9b2516179239380adef46b1be,Discovering Object Functionality,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+221c18238b829c12b911706947ab38fd017acef7,A Richly Annotated Dataset for Pedestrian Attribute Recognition,Chinese Academy of Sciences,Chinese Academy of Sciences,"中国科学院心理研究所, 16, 林萃路, 朝阳区 / Chaoyang, 北京市, 100101, 中国",40.00447950,116.37023800,edu,
+221c18238b829c12b911706947ab38fd017acef7,A Richly Annotated Dataset for Pedestrian Attribute Recognition,Temple University,Temple University,"Temple University School of Podiatric Medicine, Race Street, Chinatown, Philadelphia, Philadelphia County, Pennsylvania, 19103, USA",39.95472495,-75.15346905,edu,
+22344ddcae83e732ba0c2116d7ee9016aebb12be,Model-Based Feature Extraction for Gait Analysis and Recognition,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+22fe619996b59c09cb73be40103a123d2e328111,The German Traffic Sign Recognition Benchmark: A multi-class classification competition,University of Copenhagen,University of Copenhagen,"Københavns Universitet, Krystalgade, Kødbyen, Vesterbro, København, Københavns Kommune, Region Hovedstaden, 1165, Danmark",55.68015020,12.57232700,edu,
+227de3327012e8141cc58068fe9bc197773254b8,Unsupervised Adversarial Learning of 3D Human Pose from 2D Joint Locations,Keio University,Keio University,"綱島市民の森, けつわり坂, 港北区, 横浜市, 神奈川県, 関東地方, 223-0053, 日本",35.54169690,139.63471840,edu,
+227de3327012e8141cc58068fe9bc197773254b8,Unsupervised Adversarial Learning of 3D Human Pose from 2D Joint Locations,"National Institute of Informatics, Japan","National Institute of Informatics, Japan","2 Chome-1-2 Hitotsubashi, Chiyoda, Tokyo 100-0003, Japan",35.69248530,139.75825330,edu,
+22e678d3e915218a7c09af0d1602e73080658bb7,Adventures in archiving and using three years of webcam images,Washington University,Washington University,"Dero, Wallace Drive, St. Louis County, Missouri, MO 63130, USA",38.64804450,-90.30996670,edu,
+22b5bcd590f6d4c04b8de28217b001da9667ec33,Write a Classifier: Zero-Shot Learning Using Purely Textual Descriptions,Rutgers University,Rutgers University,"Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA",40.47913175,-74.43168868,edu,
+2201f187a7483982c2e8e2585ad9907c5e66671d,Joint Face Alignment and 3D Face Reconstruction,"Sichuan University, Chengdu","Sichuan Univ., Chengdu","四川大学(华西校区), 校东路, 武侯区, 武侯区 (Wuhou), 成都市 / Chengdu, 四川省, 610014, 中国",30.64276900,104.06751175,edu,
+2201f187a7483982c2e8e2585ad9907c5e66671d,Joint Face Alignment and 3D Face Reconstruction,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+22a5c5b366e56339b34a66ce2a4a106592656e40,A model of dynamic compilation for heterogeneous compute platforms,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+2241eda10b76efd84f3c05bdd836619b4a3df97e,One-to-many face recognition with bilinear CNNs,University of Massachusetts,University of Massachusetts,"University of Massachusetts, Hicks Way, Amherst, Hampshire, Massachusetts, 01003, USA",42.38897850,-72.52869870,edu,
+22267d537cbaed08c2005c42f251bb6097aa1505,Hierarchical Grid-based People Tracking with Multi-camera Setup,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+228408c76823355010bb13e5b3f32823b35a176c,Daily Living Activities Recognition via Efficient High and Low Level Cues Combination and Fisher Kernel Representation,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+228408c76823355010bb13e5b3f32823b35a176c,Daily Living Activities Recognition via Efficient High and Low Level Cues Combination and Fisher Kernel Representation,University Politehnica of Bucharest,University Politehnica of Bucharest,"Universitatea Politehnica din București, Novum Invest, București, Militari, Sector 6, Municipiul București, 060042, România",44.43918115,26.05044565,edu,
+22646cf884cc7093b0db2c1731bd52f43682eaa8,Human Action Adverb Recognition: ADHA Dataset and A Three-Stream Hybrid Model,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,
+22f94c43dd8b203f073f782d91e701108909690b,MovieScope: Movie trailer classification using Deep Neural Networks,University of Virginia,University of Virginia,"University of Virginia, Rotunda Alley, Carr's Hill, Albemarle County, Virginia, 22904-4119, USA",38.03536820,-78.50353220,edu,
+2297ead8a0000dab33ebc73b7d5781b3258322b6,Classifying and Visualizing Emotions with Emotional DAN,Warsaw University of Technology,Warsaw University of Technology,"Politechnika Warszawska, 1, Plac Politechniki, VIII, Śródmieście, Warszawa, mazowieckie, 00-661, RP",52.22165395,21.00735776,edu,
+220377caca34bed8a0081d48d153aecc11c211e1,Spectral-Pruning: Compressing deep neural network via spectral analysis,University of Tokyo,University of Tokyo,"東京大学 柏キャンパス, 学融合の道, 柏市, 千葉県, 関東地方, 277-8583, 日本",35.90204480,139.93622009,edu,
+2231f44be9a8472a46d8e8a628b4e52b9a8f44e0,Visual Dialog,Georgia Institute of Technology,Georgia Institute of Technology,"Georgia Tech, Atlantic Drive Northwest, Bellwood, Rockdale, Atlanta, Fulton County, Georgia, 30318, USA",33.77603300,-84.39884086,edu,
+22143664860c6356d3de3556ddebe3652f9c912a,Facial Expression Recognition for Human-Robot Interaction - A Prototype,Electrical and Computer Engineering,Electrical and Computer Engineering,"Electrical and Computer Engineering, Boston Avenue, South Overton, Lubbock, Lubbock County, Texas, 79409, USA",33.58667840,-101.87539204,edu,
+2271d554787fdad561fafc6e9f742eea94d35518,Multimodale Mensch-Roboter-Interaktion für Ambient Assisted Living,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+22f71559c88fe32b405a6fedf7ee099c32d9377e,Causal and compositional generative models in online perception,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+22f71559c88fe32b405a6fedf7ee099c32d9377e,Causal and compositional generative models in online perception,Korea University,Korea University,"고려대, 안암로, 제기동, 동대문구, 서울특별시, 02796, 대한민국",37.59014110,127.03623180,edu,
+224e78cc643e38c2cdcdaaa5123ecd7cf7a08674,Learning non-redundant codebooks for classifying complex objects,Oregon State University,Oregon State University,"OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA",45.51982890,-122.67797964,edu,
+22c169fa05a0d5710bc111e451161e9d9141c29d,A Novel Inference of a Restricted Boltzmann Machine,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+22c169fa05a0d5710bc111e451161e9d9141c29d,A Novel Inference of a Restricted Boltzmann Machine,Tokyo Institute of Technology,Tokyo Institute of Technology,"東京工業大学, 厚木街道, 緑区, 町田市, 神奈川県, 関東地方, 226-0026, 日本",35.51675380,139.48342251,edu,
+227cef669b362a7756564519be22c7d060348f66,Factors in Finetuning Deep Model for Object Detection with Long-Tail Distribution,Chinese University of Hong Kong,The Chinese University of Hong Kong,"中大 CUHK, NA梯 New Asia Stairs, 馬料水 Ma Liu Shui, 九肚村 Kau To Village, 沙田區 Sha Tin District, 新界 New Territories, HK, DD193 1191, 中国",22.42031295,114.20788644,edu,
+222db9e290b34ae30c39486697d8e8dac3175770,Stabilizing Training of Generative Adversarial Networks through Regularization,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+222db9e290b34ae30c39486697d8e8dac3175770,Stabilizing Training of Generative Adversarial Networks through Regularization,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+222db9e290b34ae30c39486697d8e8dac3175770,Stabilizing Training of Generative Adversarial Networks through Regularization,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+222db9e290b34ae30c39486697d8e8dac3175770,Stabilizing Training of Generative Adversarial Networks through Regularization,ETH Zürich,ETH Zürich,"ETH Zürich, 101, Rämistrasse, Hochschulen, Altstadt, Zürich, Bezirk Zürich, Zürich, 8092, Schweiz/Suisse/Svizzera/Svizra",47.37645340,8.54770931,edu,
+228c28bd18a2d58cd771a75e8718b14dc32051e0,An effective neutrosophic set-based preprocessing method for face recognition,Utah State University,Utah State University,"Utah State University, Champ Drive, Logan, Cache County, Utah, 84322, USA",41.74115040,-111.81223090,edu,
+220f6ef6f4bf4729871822e08080142359012e10,Implementation of Automatic Multiple Person Tracking System with Open Cv on Beagle Board,Anna University,Anna University,"Anna University, Nuclear Physics Road, Srinagar Colony, Ward 171, Zone 13 Adyar, Chennai, Chennai district, Tamil Nadu, 600025, India",13.01058380,80.23537360,edu,
+2520c3d5d114974167561591a57f80e89650f862,Direct Pose Estimation and Refinement,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2520c3d5d114974167561591a57f80e89650f862,Direct Pose Estimation and Refinement,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+25ff865460c2b5481fa4161749d5da8501010aa0,Seeing What is Not There: Learning Context to Determine Where Objects are Missing,University of Maryland,University of Maryland,"The Grand Garage, 5, North Paca Street, Seton Hill, Baltimore, Maryland, 21201, USA",39.28996850,-76.62196103,edu,
+25d474ff23515eeccbc071897c144957edfbd7a5,Dual Swap Disentangling,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+25d474ff23515eeccbc071897c144957edfbd7a5,Dual Swap Disentangling,Stevens Institute of Technology,Stevens Institute of Technology,"Stevens Institute of Technology, River Terrace, Hoboken, Hudson County, New Jersey, 07030, USA",40.74225200,-74.02709490,edu,
+25d474ff23515eeccbc071897c144957edfbd7a5,Dual Swap Disentangling,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+25d474ff23515eeccbc071897c144957edfbd7a5,Dual Swap Disentangling,University of Sydney,University of Sydney,"USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia",-33.88890695,151.18943366,edu,
+25d474ff23515eeccbc071897c144957edfbd7a5,Dual Swap Disentangling,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,
+25e9a2ec45c34d4610359196dc505a72c3833336,Benchmarking KAZE and MCM for Multiclass Classification,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+25d514d26ecbc147becf4117512523412e1f060b,Annotated crowd video face database,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
+2534997443c7e183c9f8e370ea1e82989ecc940d,Skeleton Search: Category-Specific Object Recognition and Segmentation Using a Skeletal Shape Model,Brown University,Brown University,"Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA",41.82686820,-71.40123146,edu,
+2525483e2d899c435437bd874208071183223b46,Autism as an Infantile Post-trauma Stress Disorder : A Hypothesis,Peking University,Peking University,"北京大学, 5号, 颐和园路, 稻香园南社区, 海淀区, 北京市, 100871, 中国",39.99223790,116.30393816,edu,
+25b1a031a0559a0bc4079e9011bdf527e1a39d19,Modelling the Time-Variant Covariates for Gait Recognition,University of Southampton,University of Southampton,"Waterfront Campus, European Way, Port of Southampton, St Mary's, Southampton, South East, England, SO14 3JW, UK",50.89273635,-1.39464295,edu,
+25bdcfdcdd9a944ce5adb8d2663856f242c580a1,Goal-Oriented Visual Question Generation via Intermediate Rewards,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+25bdcfdcdd9a944ce5adb8d2663856f242c580a1,Goal-Oriented Visual Question Generation via Intermediate Rewards,University of Technology Sydney,University of Technology Sydney,"University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia",-33.88096510,151.20107299,edu,
+25bdcfdcdd9a944ce5adb8d2663856f242c580a1,Goal-Oriented Visual Question Generation via Intermediate Rewards,Nanjing University,Nanjing University,"NJU, 三江路, 鼓楼区, 南京市, 江苏省, 210093, 中国",32.05659570,118.77408833,edu,
+25642be46de0f2e74e0da81a14646f8bfcc9000a,"What Does Classifying More Than 10, 000 Image Categories Tell Us?",Princeton University,Princeton University,"Lot 9, University Place, Princeton Township, Mercer County, New Jersey, 08540, USA",40.34829285,-74.66308325,edu,
+25642be46de0f2e74e0da81a14646f8bfcc9000a,"What Does Classifying More Than 10, 000 Image Categories Tell Us?",Columbia University,Columbia University,"Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA",40.84198360,-73.94368971,edu,
+25642be46de0f2e74e0da81a14646f8bfcc9000a,"What Does Classifying More Than 10, 000 Image Categories Tell Us?",Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+25c3cdbde7054fbc647d8be0d746373e7b64d150,ForgetMeNot: Memory-Aware Forensic Facial Sketch Matching,Beijing University of Posts and Telecommunications,Beijing University of Posts and Telecommunications,"北京邮电大学, 西土城路, 海淀区, 北京市, 100082, 中国",39.96014880,116.35193921,edu,
+252e48be0fd63d3a786021efa8733f8891101a82,Unsupervised Feature Learning With Winner-Takes-All Based STDP,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,Neural Networks with Smooth Adaptive Activation Functions for Regression,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,Neural Networks with Smooth Adaptive Activation Functions for Regression,Stony Brook University,Stony Brook University,"Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA",40.91531960,-73.12706260,edu,
+25bf288b2d896f3c9dab7e7c3e9f9302e7d6806b,Neural Networks with Smooth Adaptive Activation Functions for Regression,Oak Ridge National Laboratory,Oak Ridge National Laboratory,"Oak Ridge National Laboratory, Oak Ridge, Roane County, Tennessee, USA",35.93006535,-84.31240032,edu,
+255bb1a38169c7b78fb4da747cde18a961755d7a,A Bayesian generative model for learning semantic hierarchies,University of Michigan,University of Michigan,"University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA",42.29421420,-83.71003894,edu,
+255bb1a38169c7b78fb4da747cde18a961755d7a,A Bayesian generative model for learning semantic hierarchies,University of Washington,University of Washington,"University of Washington, Rainier Vista, Montlake, University District, Seattle, King County, Washington, 98195, USA",47.65432380,-122.30800894,edu,
+255bb1a38169c7b78fb4da747cde18a961755d7a,A Bayesian generative model for learning semantic hierarchies,Stanford University,Stanford University,"Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA",37.43131385,-122.16936535,edu,
+255bb1a38169c7b78fb4da747cde18a961755d7a,A Bayesian generative model for learning semantic hierarchies,MIT,Massachusetts Institute,"MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA",42.35839610,-71.09567788,edu,
+25f5df29342a04936ba0d308b4d1b8245a7e8f5c,Convolutional Pose Machines,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+25f5df29342a04936ba0d308b4d1b8245a7e8f5c,Convolutional Pose Machines,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+253b9b36565b83d0196c3bd9bf05089d9aafa242,Human Body Part Classification in Monocular Soccer Images,Technical University Munich,Technical University Munich,"TUM, 21, Arcisstraße, Bezirksteil Königsplatz, Stadtbezirk 03 Maxvorstadt, München, Obb, Bayern, 80333, Deutschland",48.14955455,11.56775314,edu,
+25e979e3c2d4fde4f297bf845796664424ab4c29,Whitening and Coloring transform for GANs,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+25e979e3c2d4fde4f297bf845796664424ab4c29,Whitening and Coloring transform for GANs,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+25e979e3c2d4fde4f297bf845796664424ab4c29,Whitening and Coloring transform for GANs,University of Trento,University of Trento,"University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia",46.06588360,11.11598940,edu,
+2588acc7a730d864f84d4e1a050070ff873b03d5,Action Recognition by an Attention-Aware Temporal Weighted Convolutional Neural Network,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+25c108a56e4cb757b62911639a40e9caf07f1b4f,Recurrent Scale Approximation for Object Detection in CNN,SenseTime,SenseTime,"China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1号-7",39.99300800,116.32988200,company,"1 Zhongguancun E Rd, Haidian Qu, China"
+2594a77a3f0dd5073f79ba620e2f287804cec630,Regularizing face verification nets for pain intensity regression,Johns Hopkins University,"Johns Hopkins University, 3400 N. Charles St, Baltimore, MD 21218, USA","3400 N Charles St, Baltimore, MD 21218, USA",39.32905300,-76.61942500,edu,
+2594a77a3f0dd5073f79ba620e2f287804cec630,Regularizing face verification nets for pain intensity regression,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+2581a12189eb1a0b5b27a7fd1c2cbe44c88fcc20,Analyzing Classifiers: Fisher Vectors and Deep Neural Networks,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+2581a12189eb1a0b5b27a7fd1c2cbe44c88fcc20,Analyzing Classifiers: Fisher Vectors and Deep Neural Networks,TU Berlin,TU Berlin,"Franklinstraße 28-29, 10587 Berlin, Germany",52.51806410,13.32504250,edu,"Franklinstr. 28/29, 10587, Germany"
+256f09fe3163564958381d7f3727b5c27c19144c,Image2Emoji: Zero-shot Emoji Prediction for Visual Media,University of Amsterdam,University of Amsterdam,"Institute for Logic, Language and Computation (ILLC), 107, Science Park, Oost-Watergraafsmeer, Amsterdam, Oost, Amsterdam, Noord-Holland, Nederland, 1098XG, Nederland",52.35536550,4.95016440,edu,
+25e2d3122d4926edaab56a576925ae7a88d68a77,Communicative-Pragmatic Treatment in Schizophrenia: A Pilot Study,University of Oulu,University of Oulu,"Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi",65.05921570,25.46632601,edu,
+25e2d3122d4926edaab56a576925ae7a88d68a77,Communicative-Pragmatic Treatment in Schizophrenia: A Pilot Study,Harvard University,Harvard University,"Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA",42.36782045,-71.12666653,edu,
+2550df6b33260cbe6fd60331ca6c7a8c0b48e80d,Human detection using depth information by Kinect,University of Texas at Austin,University of Texas at Austin,"University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA",30.28415100,-97.73195598,edu,
+2563b2adba98788a217565ba5a648f83cb75eeeb,Weight-Optimal Local Binary Patterns,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+250252b9693778e0af653efebfe17f68d649c8a7,3D Face Recognition,University of Houston,University of Houston,"UH, 4800, Calhoun Road, Houston, Harris County, Texas, 77004, USA",29.72079020,-95.34406271,edu,
+25c56f52c528112da99d0ae7e559500ef7532d3a,Towards Literate Artificial Intelligence,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+25dba68e4db0ce361032126b91f734f9252cae7c,DeepSetNet: Predicting Sets with Deep Neural Networks,University of Adelaide,University of Adelaide,"University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia",-34.91892260,138.60423668,edu,
+257e008c01a32b9b642553f3f1e59e61efcac4a6,Gender discrimination of eyes and mouths by individuals with autism.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+257e008c01a32b9b642553f3f1e59e61efcac4a6,Gender discrimination of eyes and mouths by individuals with autism.,University of Pittsburgh,University of Pittsburgh,"University of Pittsburgh, Sutherland Drive, West Oakland, PGH, Allegheny County, Pennsylvania, 15240, USA",40.44415295,-79.96243993,edu,
+2599445b0990979483db54c707c9a33b18231910,Binary Biometric Representation through Pairwise Polar Quantization,University of Twente,University of Twente,"University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland",52.23801390,6.85667610,edu,
+25ee08db14dca641d085584909b551042618b8bf,Learning to Segment Instances in Videos with Spatial Propagation Network,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,
+25ee08db14dca641d085584909b551042618b8bf,Learning to Segment Instances in Videos with Spatial Propagation Network,"University of California, Merced","University of California, Merced","University of California, Merced, Ansel Adams Road, Merced County, California, USA",37.36566745,-120.42158888,edu,
+2577211aeaaa1f2245ddc379564813bee3d46c06,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2577211aeaaa1f2245ddc379564813bee3d46c06,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,Microsoft Research Asia,"Microsoft Live Labs Research, China",Microsoft Research Asia,35.86166000,104.19539700,company,
+25c3068e7964d3b894916a82b1fa93c9d6792886,Face Recognition with Histograms of Oriented Gradients,Robotics Institute,Robotics Institute,"Institute for Field Robotics, ประชาอุทิศ, กรุงเทพมหานคร, เขตราษฎร์บูรณะ, กรุงเทพมหานคร, 10140, ประเทศไทย",13.65450525,100.49423171,edu,
+25afe234435ede5fd95e47c3b58ed2c1da318f46,Towards Measuring and Inferring User Interest from Gaze,Cornell University,Cornell University,"Cornell University, Forest Home Drive, Forest Home, Tompkins County, New York, 14853, USA",42.45055070,-76.47835130,edu,
+258dda85eadcd2081d1e0131826aceac7f1e2415,Supervision Beyond Manual Annotations for Learning Visual Representations,Carnegie Mellon University,Carnegie Mellon University,"Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA",37.41021930,-122.05965487,edu,
+2544249e92b324a7f79da6eb556c387a4fa5226e,Monocular Video-Based Trailer Coupler Detection Using Multiplexer Convolutional Neural Network,Michigan State University,Michigan State University,"Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA",42.71856800,-84.47791571,edu,
+255971561c250d1ccee1402397586d2c7d0cd545,SmartBox : Benchmarking Adversarial Detection and Mitigation Algorithms for Face Recognition,Indian Institute of Technology Delhi,"IIIT-Delhi, India","IIIT-Delhi, Mathura Road, Friends Colony, South East Delhi, Delhi, 110020, India",28.54632595,77.27325504,edu,
diff --git a/scraper/reports/stats/no_separator_papers.csv b/scraper/reports/stats/no_separator_papers.csv
new file mode 100644
index 00000000..53e82bdd
--- /dev/null
+++ b/scraper/reports/stats/no_separator_papers.csv
@@ -0,0 +1,1264 @@
+614a547cb976fae955e276feb2ccc9a33f1c7806,Classifier-as-a-Service: Online Query of Cascades and Operating Points,,2012
+610c341985633b2d31368f8642519953c39ff7e8,Computational Load Balancing on the Edge in Absence of Cloud and Fog,Unknown,2018
+617a6935643615f09ef2b479609baa0d5f87cd67,To Be Taken At Face Value? Computerised Identification,,2002
+61acd4e07657094c2720bb60299dba0014ec89a6,Image annotation by kNN-sparse graph-based label propagation over noisily tagged web images,ACM TIST,2011
+0da2a7ee04092645867614db3574cb261f33b6e2,Watching Unlabeled Video Helps Learn New Human Actions from Very Few Labeled Snapshots,2013 IEEE Conference on Computer Vision and Pattern Recognition,2013
+0d30a662061a495e4b5aeb92a2edfac868b225ea,Quantification of Emotions for Facial Expression: Generation of Emotional Feature Space Using Self-Mapping,,2012
+0dd151d003ac9b7f3d6936ccdd5ff38fce76c29f,A Review and Comparison of Measures for Automatic Video Surveillance Systems,EURASIP J. Image and Video Processing,2008
+0dc34e186e8680336e88c3b5e73cde911a8774b8,Image Classification Using Naive Bayes Classifier With Pairwise Local Observations,J. Inf. Sci. Eng.,2017
+0d2e29f07275fe05a44b04f16cd3edd0c3f448f0,Development of the Korean Facial Emotion Stimuli: Korea University Facial Expression Collection 2nd Edition,,2017
+0d1d9a603b08649264f6e3b6d5a66bf1e1ac39d2,Effects of emotional expressions on persuasion,,2016
+0dd72a3522b99aedea83b47c5d7b33a1df058fd0,A Set of Selected SIFT Features for 3D Facial Expression Recognition,2010 20th International Conference on Pattern Recognition,2010
+0da4c3d898ca2fff9e549d18f513f4898e960aca,The Headscarf Effect Revisited: Further Evidence for a Culture-Based Internal Face Processing Advantage.,Perception,2015
+95f990600abb9c8879e4f5f7cd03f3d696fcdec4,An Online Algorithm for Constrained Face Clustering in Videos,Unknown,2018
+9501db000474dbd182579d311dfb1b1ab8fa871f,Supplementary of Multi-scale Deep Learning Architectures for Person Re-identification,,2017
+954af3d46d023d73c7ee97f2264451080f542084,The Interplay between Emotion and Cognition in Autism Spectrum Disorder: Implications for Developmental Theory,,2012
+5955bb0325ec4dd3b56759aeb96cc9c18b09bf3e,Self-Supervised Depth Learning Improves Semantic Segmentation,Unknown,2017
+59d10820e0a04d2d1acc43bb18a76c52e9946721,Attention to eyes and mouth in high-functioning children with autism.,Journal of autism and developmental disorders,2006
+592bbab1e073908c75584879bc00911e7246aebf,Exploiting feature Representations Through Similarity Learning and Ranking Aggregation for Person Re-identification,2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017),2017
+92c2dd6b3ac9227fce0a960093ca30678bceb364,On Color Texture Normalization for Active Appearance Models,IEEE Transactions on Image Processing,2009
+927ba64123bd4a8a31163956b3d1765eb61e4426,Customer satisfaction measuring based on the most significant facial emotion,Unknown,2018
+92f0e02c9f4e95098452d0fd78ba46cd6e7b1f6d,Dynamic machine learning for supervised and unsupervised classification. (Apprentissage automatique dynamique pour la classification supervisée et non supervisée),Unknown,2016
+927ad0dceacce2bb482b96f42f2fe2ad1873f37a,Interest-Point based Face Recognition System,,2012
+0c3c83b7f030fe661548d362ddf33f37bb44043d,Crowd Motion Analysis Based on Social Force Graph with Streak Flow Attribute,J. Electrical and Computer Engineering,2015
+0c553e57cb6fe7bdf3212fbf86bcc869958db27f,Straight until proven gay: A systematic bias toward straight categorizations in sexual orientation judgments.,Journal of personality and social psychology,2016
+0c3f7272a68c8e0aa6b92d132d1bf8541c062141,Kruskal-Wallis-Based Computationally Efficient Feature Selection for Face Recognition,,2014
+0ce4110d4c3d8b19ca0f7f75bc680aa9ba8d239a,Genetic Algorithms for Classifiers’ Training Sets Optimisation Applied to Human Face Recognition,,2007
+0cc5804c5f113c60ee5894f25ab7078364eef986,Epitomize Your Photos,Int. J. Computer Games Technology,2011
+0c1d5801f2b86afa969524dc74708a78450300d9,12 : Conditional Random Fields,,2014
+0c5b03a6083950aacd9aee2d276a232e6ce3213c,The Main Memory System: Challenges and Opportunities,,2015
+6603e7de5b155c86407edc43099b46b974b7f0bb,Local Feature Based Face Recognition,Unknown,2018
+66cc90ea586c914e6a3b50fe703f4379d530fad7,Automatic integration of social information in emotion recognition.,Journal of experimental psychology. General,2015
+66533107f9abdc7d1cb8f8795025fc7e78eb1122,Visual Servoing for a User's Mouth with Effective Intention Reading in a Wheelchair-based Robotic Arm,,2001
+6681ec516067747a4576f737f10f8d9bbca2d8d1,Perturbative Neural Networks ( Supplementary Material ),Unknown,2018
+661da40b838806a7effcb42d63a9624fcd684976,An Illumination Invariant Accurate Face Recognition with Down Scaling of DCT Coefficients,CIT,2010
+660c8a9fa166c1d81e65192e011eacfec208ec00,Discrimination of visual pedestrians data by combining projection and prediction learning,Unknown,2014
+3e56cbce67d312af2b3a7d0981e9cb33d2236bea,Boosting attribute recognition with latent topics by matrix factorization,JASIST,2017
+3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07,Facial Expression Recognition with Local Binary Patterns and Linear Programming,,2004
+3e0415f0e8c36f20042d6a1f8b7c216fb5543c3a,RGB-D Segmentation of Poultry Entrails,Unknown,2016
+3e42e336d67dad79ab6355c02f1f045f8a71a18f,Autism spectrum traits in normal individuals: a preliminary VBM analysis,,2015
+3e04feb0b6392f94554f6d18e24fadba1a28b65f,Subspace Image Representation for Facial Expression Analysis and Face Recognition and its Relation to the Human Visual System,,2007
+3ed60f021fe469f2423d04917e69864251d23e08,Metadata of the chapter that will be visualized in SpringerLink,Unknown,2012
+5087d9bdde0ba5440eb8658be7183bf5074a2a94,Object Detection via a Multi-region and Semantic Segmentation-Aware CNN Model,2015 IEEE International Conference on Computer Vision (ICCV),2015
+50a8dc4c1d40967a95b684eb421edd03415fb7ab,Nothing Else Matters: Model-Agnostic Explanations By Identifying Prediction Invariance,CoRR,2016
+50894e607cd5eb616913b520c4e238a73f432b86,Neural correlates of eye gaze processing in the infant broader autism phenotype.,Biological psychiatry,2009
+50eb2ee977f0f53ab4b39edc4be6b760a2b05f96,Emotion recognition based on texture analysis of facial expression,2011 International Conference on Image Information Processing,2011
+50e45e9c55c9e79aaae43aff7d9e2f079a2d787b,Unbiased Feature Selection in Learning Random Forests for High-Dimensional Data,,2015
+50af3b6f7192951b42c2531ee931c8244e505a5c,Weakly Supervised Learning for Attribute Localization in Outdoor Scenes,2013 IEEE Conference on Computer Vision and Pattern Recognition,2013
+5047cae1b6f47ac1715479abfa3daf1c1a063977,Predictor Combination at Test Time — Supplemental Document,,2017
+50984f8345a3120d0e6c0a75adc2ac1a13e37961,Impaired face processing in autism: fact or artifact?,Journal of autism and developmental disorders,2006
+50bf19a06915778a0bcbdef700f91b56258a4e1f,Common and distinct neural features of social and non-social reward processing in autism and social anxiety disorder.,Social cognitive and affective neuroscience,2014
+507af6591900a7165c529eca9fd370008c1ac87c,"For Black men, being tall increases threat stereotyping and police stops.",Proceedings of the National Academy of Sciences of the United States of America,2018
+50c0de2cccf7084a81debad5fdb34a9139496da0,"The Influence of Annotation, Corpus Design, and Evaluation on the Outcome of Automatic Classification of Human Emotions",Front. ICT,2016
+68ae4db6acf5361486f153ee0c0d540e0823682a,FlashReport Memory conformity for con fi dently recognized items : The power of social in fl uence on memory reports,Unknown,2012
+68e4ed4daa2ae94c789443ed222601a4a47f9a45,Building Extraction from Polarimetric Interferometric Sar Data Using Bayesian Network,,2009
+688754568623f62032820546ae3b9ca458ed0870,Resting high frequency heart rate variability is not associated with the recognition of emotional facial expressions in healthy human adults,,2016
+68249064f7d5046abef785ada541244fa67b4346,"Contribution of Developmental Psychology to the Study of Social Interactions: Some Factors in Play, Joint Attention and Joint Action and Implications for Robotics",,2018
+68c279d4fcc02710056e73a3b0d0d564a7615cad,Unified framework for fast exact and approximate search in dissimilarity spaces,ACM Trans. Database Syst.,2007
+68c17aa1ecbff0787709be74d1d98d9efd78f410,Gender Classification from Face Images Using Mutual Information and Feature Fusion,,2012
+68f61154a0080c4aae9322110c8827978f01ac2e,"Recognizing blurred , non-frontal , illumination and expression variant partially occluded faces",Unknown,2016
+6821113166b030d2123c3cd793dd63d2c909a110,Acquisition and Indexing of Rgb-d Recordings for Facial Expressions and Emotion Recognition1,,2015
+57bf9888f0dfcc41c5ed5d4b1c2787afab72145a,Robust Facial Expression Recognition Based on Local Directional Pattern,,
+57522ff758642e054d7c50753ec1c3fe598533f0,Information-Based Boundary Equilibrium Generative Adversarial Networks with Interpretable Representation Learning,,2018
+5740a5f9cbfe790afc0ba9a425cfb71197927470,Supplementary Material for Superpixel Sampling Networks,Unknown,2018
+57f8e1f461ab25614f5fe51a83601710142f8e88,Region Selection for Robust Face Verification using UMACE Filters,,2007
+57a1466c5985fe7594a91d46588d969007210581,A taxonomy of face-models for system evaluation,2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops,2010
+5727ac51ad6fb67d81cc3ef2c04440c179bd53ab,Oxytocin attenuates amygdala responses to emotional faces regardless of valence.,Biological psychiatry,2007
+577c1d59e43f04a4bfda95b0b9e3b41d893bc0a2,Faster Evaluation of Labor-Intensive Features,Unknown,2015
+5700291077b509b11fb227f84ee9fc2de8f2df99,Line search and trust region strategies for canonical decomposition of semi-nonnegative semi-symmetric 3rd order tensors,,2017
+57a14a65e8ae15176c9afae874854e8b0f23dca7,Seeing Mixed Emotions: The Specificity of Emotion Perception From Static and Dynamic Facial Expressions Across Cultures,,2018
+3b152bdeedb97d68dd69bbb806c60c205e6fa696,Patch-Based Principal Component Analysis for Face Recognition,,2017
+3b6602e64e62e5703151d17475d4728bd2095256,Brief Communication Oxytocin Modulates Neural Circuitry for Social Cognition and Fear in Humans,,2005
+3b7f6035a113b560760c5e8000540fc46f91fed5,Coupling Alignments with Recognition for Still-to-Video Face Recognition,2013 IEEE International Conference on Computer Vision,2013
+3bd1d41a656c8159305ba2aa395f68f41ab84f31,Entity-Based Opinion Mining from Text and Multimedia,,2015
+3b466bb66ee79c8e9bcdb6cf9acb54b864dda735,"Joint inference of groups, events and human roles in aerial videos",2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2015
+3b6310052026fc641d3fa639647342c45d8f5bd5,Eye Contact Modulates Cognitive Processing Differently in Children With Autism,,2015
+3b2f78a4edf5da876e52513d0e3960da7d3a253f,Qualitative Evaluation of Detection and Tracking Performance,2012 IEEE Ninth International Conference on Advanced Video and Signal-Based Surveillance,2012
+3b23c39f21156f9ea86ad8bb2ca53b2cf56b4181,Predictable Performance and Fairness Through Accurate Slowdown Estimation in Shared Main Memory Systems,CoRR,2018
+6f5d57460e0e156497c4667a875cc5fa83154e3a,Retinal Verification Using a Feature Points-Based Biometric Pattern,EURASIP J. Adv. Sig. Proc.,2009
+6f957df9a7d3fc4eeba53086d3d154fc61ae88df,Modélisation et suivi des déformations faciales : applications à la description des expressions du visage dans le contexte de la langue des signes,,2007
+6fee701352f0f5c4abea3e918ddcf078243253cc,Alcohol and Remembering Sexual,,
+6ff9b66aec16d84b1133850e7e8ce188a5a9a7f4,Do-gooder derogation in children: the social costs of generosity,,2015
+6f813ccf106360cc9c3d6df849cc04d881d0a6e8,"360◦ User Profiling: Past, Future, and Applications",,2016
+6f5151c7446552fd6a611bf6263f14e729805ec7,Facial Action Unit Recognition using Filtered Local Binary Pattern Features with Bootstrapped and Weighted ECOC Classi ers,,2010
+0344f29da9641edc36bc4952e1f7a4bfd8dd9bb3,Facial expression at retrieval affects recognition of facial identity,,2015
+03167776e17bde31b50f294403f97ee068515578,Chapter 11. Facial Expression Analysis,,2004
+032c1e19a59cdbeb3fb741a812980f52c1461ce1,"Mining textural knowledge in biological images: Applications, methods and trends",,2017
+030ef31b51bd4c8d0d8f4a9a32b80b9192fe4c3f,Inhibition-Induced Forgetting Results from Resource Competition between Response Inhibition and Memory Encoding Processes.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2015
+033e3fe75da26d8d3dd3cb0f99640181655e6746,From generic to specific deep representations for visual recognition,2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW),2015
+03fc466fdbc8a2efb6e3046fcc80e7cb7e86dc20,A real time system for model-based interpretation of the dynamics of facial expressions,2008 8th IEEE International Conference on Automatic Face & Gesture Recognition,2008
+035886f58b550be140b1d4dbba0ea0479030589f,Trajectory bundle estimation For perception-driven planning,Unknown,2013
+03f14159718cb495ca50786f278f8518c0d8c8c9,Performance evaluation of HOG and Gabor features for vision-based vehicle detection,"2015 IEEE International Conference on Control System, Computing and Engineering (ICCSCE)",2015
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45,Chapter 1 Introduction to information security foundations and applications,Unknown,2018
+0324a22f71927bee2a448f800287cde562dc2726,People detection in crowded scenes by context-driven label propagation,2016 IEEE Winter Conference on Applications of Computer Vision (WACV),2016
+03bd58a96f635059d4bf1a3c0755213a51478f12,Smoothed Low Rank and Sparse Matrix Recovery by Iteratively Reweighted Least Squares Minimization,IEEE Transactions on Image Processing,2015
+034f7fcf5a393ac3307ac3609c2b971df6efaff6,Can Synthetic Data Handle Unconstrained Gaze Estimation?,Unknown,2017
+03d10c88aebd7aabe603d455c7bafa9231c7cf51,Hyperconnectivity of the Right Posterior Temporo-parietal Junction Predicts Social Difficulties in Boys with Autism Spectrum Disorder.,Autism research : official journal of the International Society for Autism Research,2015
+03fe3d031afdcddf38e5cc0d908b734884542eeb,Engagement with Artificial Intelligence through Natural Interaction Models,Unknown,2017
+9b9b6d34deebb534de66017381be7578e13b761d,"Submitted to the Alfred P . Sloan School of Management in Partial Fulfillment of the Requirements for the Degree of DOCTOR OF PHILOSOPHY IN MANAGEMENT at the MASSACHUSETTS INSTITUTE OF TECHNOLOGY February , 2007",,2007
+9bd9050c53d90dfa86cb22501812afe6fc897406,Fine-Grained and Layered Object Recognition,IJPRAI,2012
+9b474d6e81e3b94e0c7881210e249689139b3e04,VG-RAM Weightless Neural Networks for Face Recognition,,2009
+9bfe2732a905cb0aab370d1146a29b9d4129321d,Social Judgments Are Influenced by Both Facial Expression and Direction of Eye Gaze,,2011
+9bfda2f5144867d5712a8fcbea9dd5fa69d3312b,Image Super-Resolution Using VDSR-ResNeXt and SRCGAN,CoRR,2018
+9e594ae4f549e0d838f497de31a5b597a6826d55,Recognition of Emotion from Facial Expressions with Direct or Averted Eye Gaze and Varying Expression Intensities in Children with Autism Disorder and Typically Developing Children,,2014
+9e1a21c9af589fc2148ce96aa93c9df4a9e5ae02,Undoing the Damage of Dataset Bias,,2012
+9e384187941e939453fc0c7585c1a8e76d535c02,A Robust Approach to Automatic Iris Localization,,2009
+9ed3e04586f311b1e2b5ded9c9c4bfeeecf27f0c,Understanding rapid category detection via multiply degraded images.,Journal of vision,2009
+9ef9046cc26946acedda3f515d9149a76e19cd6e,A Unified Multi-Faceted Video Summarization System,CoRR,2017
+9e8dd40aea9204ad670b312a46ba807bfc0c61ce,Distribution-sensitive learning for imbalanced datasets Citation,Unknown,2013
+9e1712ac91c7a882070a8e2740ed476d59d6d5d4,Expressive image manipulations for a variety of visual representations. (Manipulations d'image expressives pour une variété de représentations visuelles),Unknown,2009
+9e263d429c3b87aae2653b6fb925b32b63c172cd,Enhanced image and video representation for visual recognition,Unknown,2014
+048eb50c398fa01bd15329945113341102d96454,Addressing perceptual insensitivity to facial affect in violent offenders: first evidence for the efficacy of a novel implicit training approach.,Psychological medicine,2014
+044da4715e439b4f91cee8eec55299e30a615c56,Inducing a Concurrent Motor Load Reduces Categorization Precision for Facial Expressions,,2016
+040eb316cec08b36ae0b57fede86043ee0526686,Learning Reliable and Scalable Representations Using Multimodal Multitask Deep Learning,Unknown,2018
+047d7cf4301cae3d318468fe03a1c4ce43b086ed,Co-Localization of Audio Sources in Images Using Binaural Features and Locally-Linear Regression,"IEEE/ACM Transactions on Audio, Speech, and Language Processing",2015
+04317e63c08e7888cef480fe79f12d3c255c5b00,Face Recognition Using a Unified 3D Morphable Model,Unknown,2016
+0419726a00e16ea89868792ca94f5b1b262c5597,An analytical formulation of global occlusion reasoning for multi-target tracking,2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops),2011
+04df36ea27f14f96bb1b33d76103d1dee7c6e0ca,Blur invariant pattern recognition and registration in the Fourier domain,,2009
+0470b0ab569fac5bbe385fa5565036739d4c37f8,Automatic face naming with caption-based supervision,2008 IEEE Conference on Computer Vision and Pattern Recognition,2008
+6af35225cfd744b79577c126e553f549e5b5cdcc,Title Discriminative Hessian Eigenmaps for face recognition,Unknown,2010
+6a657995b02bc9dee130701138ea45183c18f4ae,The Timing of Facial Motion in posed and Spontaneous Smiles,IJWMIP,2004
+6af98f9843ba629ae1b0347e8b8d81a263f8d7f2,Does this recession make me look black? The effect of resource scarcity on the categorization of biracial faces.,Psychological science,2012
+6ad5a38df8dd4cdddd74f31996ce096d41219f72,Multi-cue onboard pedestrian detection,2009 IEEE Conference on Computer Vision and Pattern Recognition,2009
+6a9c460952a96a04e12caa7bae07ae2f7df1238e,Exploiting scene context for on-line object tracking in unconstrained environments. (Exploitation du contexte de scène pour le suivi d'objet en ligne dans des environnements non contraints),Unknown,2016
+324f39fb5673ec2296d90142cf9a909e595d82cf,Relationship Matrix Nonnegative Decomposition for Clustering,,2014
+32575ffa69d85bbc6aef5b21d73e809b37bf376d,Measuring Biometric Sample Quality in Terms of Biometric Information,2006 Biometrics Symposium: Special Session on Research at the Biometric Consortium Conference,2006
+32d6ee09bd8f1a7c42708d6dd8a5fb85ac4e08bc,Non-Interfering Effects of Active Post-Encoding Tasks on Episodic Memory Consolidation in Humans,,2017
+3535ba0cba9bf03443d52cbfc9a87090ca2e5d49,Supplementary Material : Synthesized Classifiers for Zero-Shot Learning,Unknown,2016
+35d7b5738350a1bbfd8d7a591433d1664f909009,VisemeNet: Audio-Driven Animator-Centric Speech Animation,ACM Trans. Graph.,2018
+35410a58514cd5fd66d9c43d42e8222526170c1b,Shared mechanism for emotion processing in adolescents with and without autism,,2017
+35f084ddee49072fdb6e0e2e6344ce50c02457ef,A bilinear illumination model for robust face recognition,Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1,2005
+690f5d35489c63ec7309b9e4d77c929815065257,Complementary effects of gaze direction and early saliency in guiding fixations during free viewing.,Journal of vision,2014
+69dc87575b56ba7f60fa24bdd4fceabeeaf39a80,Decoding of nonverbal language in alcoholism: A perception or a labeling problem?,Psychology of addictive behaviors : journal of the Society of Psychologists in Addictive Behaviors,2016
+69c2ac04693d53251500557316c854a625af84ee,"50 years of biometric research: Accomplishments, challenges, and opportunities",Pattern Recognition Letters,2016
+3cb2841302af1fb9656f144abc79d4f3d0b27380,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,Unknown,2017
+3cc3cf57326eceb5f20a02aefae17108e8c8ab57,Benchmark for Evaluating Biological Image Analysis Tools,,2007
+3caf02979d7cd83d2f3894574c86babf3e201bf3,Seeing to hear? Patterns of gaze to speaking faces in children with autism spectrum disorders,,2014
+3c917f071bfc1244c75fca3ceed0a8c46bb975cc,Reduced acetylcholinesterase activity in the fusiform gyrus in adults with autism spectrum disorders.,Archives of general psychiatry,2011
+3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8,Measuring Gaze Orientation for Human-Robot Interaction,,2009
+3c68763caa67dee55bca76f0f71dd4530f3fd57c,Ranking to Learn and Learning to Rank: On the Role of Ranking in Pattern Recognition Applications,CoRR,2017
+3c9f2444b1de1bf960664d8c3109f8b8d5dee44b,Automatic Facial Feature Extraction for Face Recognition,,2007
+3c8da376576938160cbed956ece838682fa50e9f,Aiding face recognition with social context association rule based re-ranking,IEEE International Joint Conference on Biometrics,2014
+3c90f2603ef99222697b76d7ab123f513a1f4baa,The Effects of Alcohol Intoxication on Accuracy and the Confidence–Accuracy Relationship in Photographic Simultaneous Line‐ups,,2017
+568067d7232c753e182dbc1d7075364560ffc363,Scope of physiological and behavioural pain assessment techniques in children – a review,,2018
+56c701467da819088c3f734f3ba36a793d645992,Title Underconnectivity of the Superior Temporal Sulcus Predicts Emotion Recognition Deficits in Autism Social Cognitive and Affective Neuroscience Advance Access Published Number of Words,,2013
+560b46547720b3a892f90a337835875f74f4f4ec,Discriminating Color Faces for Recognition,,2008
+56852a56dd830a6ee3882773c453025ddec652e2,Emotion recognition through static faces and moving bodies: a comparison between typically developed adults and individuals with high level of autistic traits,,2015
+56c5d08103c5bf4b263a81da73135455136bbe6d,Kernel MBPLS for a Scalable and Multi-Camera Person Re-Identification System,Unknown,2018
+56bb321e0e180f72be9c4e9eb791b251073750e2,Labeling and modeling large databases of videos,,2012
+512befa10b9b704c9368c2fbffe0dc3efb1ba1bf,Evidence and a computational explanation of cultural differences in facial expression recognition.,Emotion,2010
+51a8dabe4dae157aeffa5e1790702d31368b9161,Face recognition under generic illumination based on harmonic relighting,IJPRAI,2005
+51b70582fb0d536d4a235f91bf6ad382f29e2601,Detection of emotions from video in non-controlled environment. (Détection des émotions à partir de vidéos dans un environnement non contrôlé),Unknown,2013
+511a8cdf2127ef8aa07cbdf9660fe9e0e2dfbde7,A Community Detection Approach to Cleaning Extremely Large Face Database,,2018
+3d3fdeb8792859543d791e34af4005a80f348eed,Children's racial bias in perceptions of others' pain.,The British journal of developmental psychology,2014
+3d741315108b95cdb56d312648f5ad1c002c9718,Image-based face recognition under illumination and pose variations.,"Journal of the Optical Society of America. A, Optics, image science, and vision",2005
+3d67e97227846f579d1825e00d395d30e17f5d0e,Face and ECG Based Multi-Modal Biometric Authentication,,2012
+3d33f16ffb3f56e63b8b5c51147b1a07840d734a,Developing Cognitions about Race: White 5- to 10-Year-Olds’ Perceptions of Hardship and Pain Running head: DEVELOPING COGNITIONS ABOUT RACE,,2017
+3d67aa108e65e636158abc0f31b703af3d31baa6,Decorrelating Semantic Visual Attributes by Resisting the Urge to Share,,2013
+3d9db1cacf9c3bb7af57b8112787b59f45927355,Improving Medical Students’ Awareness of Their Non-Verbal Communication through Automated Non-Verbal Behavior Feedback,Front. ICT,2016
+58cbd5a31e92cff29e29e8b25ee79f30ff4e6d4b,Culture shapes spatial frequency tuning for face identification.,Journal of experimental psychology. Human perception and performance,2017
+587c48ec417be8b0334fa39075b3bfd66cc29dbe,Serial dependence in the perception of attractiveness,,2015
+6775c818b26263c885b0ce85c224dfd942c9652e,Pedestrian and Object Detection Using Learned Convolutional Filters,,2015
+67c3c1194ee72c54bc011b5768e153a035068c43,Street Scenes: towards scene understanding in still images,,2006
+67a3cc056a539d17f00b0be550a2fc7cb2118dc5,Scalable Image Retrieval by Sparse Product Quantization,IEEE Transactions on Multimedia,2017
+6757254d27b761ada5dbd88642bd0112fcb962cf,Gait Recognition Using Wearable Motion Recording Sensors,EURASIP J. Adv. Sig. Proc.,2009
+0b0eb6363a0c5b80c544aff091d547122986131b,Remembering faces with emotional expressions,,2014
+0b937abb3b356a2932d804f9fc4b463485f63d0e,Visual word disambiguation by semantic contexts,2011 International Conference on Computer Vision,2011
+0b24cca96ca61248a3fa3973525a967f94292835,Two Novel Face Recognition Approaches,Unknown,2018
+0b85b50b6ff03a7886c702ceabad9ab8c8748fdc,Is there a dynamic advantage for facial expressions?,Journal of vision,2011
+0b19177107a102ee81e5ef1bb9fb2f2881441503,Comparing Robustness of Pairwise and Multiclass Neural-Network Systems for Face Recognition,EURASIP J. Adv. Sig. Proc.,2008
+0b8ef6f5ec5dfc3eded5241fd3d636a596b94d26,Stereological analysis of amygdala neuron number in autism.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2006
+0be80da851a17dd33f1e6ffdd7d90a1dc7475b96,Weighted Feature Gaussian Kernel SVM for Emotion Recognition,,2016
+0be8b12f194fb604be69c139a195799e8ab53fd3,Talking Heads: Detecting Humans and Recognizing Their Interactions,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+0b6c10ea6bf8a6c254e00fcc2163c4b6fc0f1c3a,"Anti-Spoofing for Text-Independent Speaker Verification: An Initial Database, Comparison of Countermeasures, and Human Performance","IEEE/ACM Transactions on Audio, Speech, and Language Processing",2016
+93a66d470c1840d11eaa96ead3b600450b3cc9f8,Gaze aversion as a cognitive load management strategy in autism spectrum disorder and Williams syndrome,,2012
+93747de3d40376761d1ef83ffa72ec38cd385833,Team members' emotional displays as indicators of team functioning.,Cognition & emotion,2016
+93610676003ef1dcda3864b236bca3852cb05388,RECOGNIZING ACTIVITIES WITH CLUSTER-TREES OF TRACKLETS 1 Recognizing activities with cluster-trees of tracklets,Unknown,2015
+93a4c7ac0b09671db8cd3adbe62851d7befc4658,Machine Analysis of Facial Expressions,Unknown,2018
+94d5ebe936c101699e678f6f0cddd8a732986814,What you see is what you get: contextual modulation of face scanning in typical and atypical development,,2014
+949079cc466e875df1ee6bd6590052ba382a35cf,0 Large-Scale Face Image Retrieval :,Unknown,2012
+940865fc3f7ee5b386c4188c231eb6590db874e9,Security and Surveillance System for Drivers Based on User Profile and learning systems for Face Recognition,Network Protocols & Algorithms,2015
+0ee59e5baed4271ab85c85332550ca1539733a19,Atypical Modulations of N170 Component during Emotional Processing and Their Links to Social Behaviors in Ex-combatants,,2017
+0ec17d929f62660fb3d1bcdd791f9639034f5344,How Do We Evaluate Facial Emotion Recognition?,,2016
+0e73d2b0f943cf8559da7f5002414ccc26bc77cd,Similarity Comparisons for Interactive Fine-Grained Categorization,2014 IEEE Conference on Computer Vision and Pattern Recognition,2014
+0e1983e9d0e8cb4cbffef7af06f6bc8e3f191a64,Estimating illumination parameters in real space with application to image relighting,,2005
+607bfdbf583c4dfa29491eedc3934f2293e1fa96,A common allele in the oxytocin receptor gene (OXTR) impacts prosocial temperament and human hypothalamic-limbic structure and function.,Proceedings of the National Academy of Sciences of the United States of America,2010
+609ff585468ad0faba704dde1a69edb9f847c201,LogDet Rank Minimization with Application to Subspace Clustering,,2015
+60189e2b592056d43a28b6ffa491867f793ebe1e,Bağlamın Hiyerarşik Doğası,,2016
+60040e4eae81ab6974ce12f1c789e0c05be00303,Graphical Facial Expression Analysis and Design Method: An Approach to Determine Humanoid Skin Deformation,,2012
+60bffecd79193d05742e5ab8550a5f89accd8488,Proposal Classification using sparse representation and applications to skin lesion diagnosis,,
+346dbc7484a1d930e7cc44276c29d134ad76dc3f,Artists portray human faces with the Fourier statistics of complex natural scenes.,Network,2007
+34cd99528d873e842083abec429457233fdb3226,Person Re-identification using group context,Unknown,2018
+34d484b47af705e303fc6987413dc0180f5f04a9,RI:Medium: Unsupervised and Weakly-Supervised Discovery of Facial Events,,2010
+349668b75c4398c075fc681f563a80ad7cf6b4f2,Real-time face pose estimation from single range images,2008 IEEE Conference on Computer Vision and Pattern Recognition,2008
+341de07abfb89bf78f3a72513c8bce40d654e0a3,Sparse and Deep Generalizations of the FRAME Model,,2017
+5ab2c97ada652ff8f641e1b30cc27050c0ffa7e0,Comparing Emotion Recognition Skills among Children with and without Jailed Parents,,2016
+5a15eedcd836337b50a2bfab82ded7a9b939aca5,Perception of temporal asymmetries in dynamic facial expressions,,2015
+5aa7f33cdc00787284b609aa63f5eb5c0a3212f6,Multiplicative mixing of object identity and image attributes in single inferior temporal neurons,,2018
+5a226afa04f03086e402b22ee2c43089b68fa3ba,Multiview RGB-D Dataset for Object Instance Detection,2016 Fourth International Conference on 3D Vision (3DV),2016
+5ac8edd62fe23911e19d639287135f91e22421cc,Gender and 3D facial symmetry: What's the relationship?,2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG),2013
+5f02e49aa0fe467bbeb9de950e4abb6c99133feb,"Enhancing person re-identification by late fusion of low-, mid- and high-level features",IET Biometrics,2018
+5f39d07dd39e5d7cfba535ada3a0ab9d5d0efb5b,Perceptual dehumanization of faces is activated by norm violations and facilitates norm enforcement.,Journal of experimental psychology. General,2016
+5fea26746f3140b12317fcf3bc1680f2746e172e,Semantic Jitter: Dense Supervision for Visual Comparisons via Synthetic Images,2017 IEEE International Conference on Computer Vision (ICCV),2017
+5fc371760fd4c8abe94b91ae2ca03d428ac05faa,Fear-specific amygdala function in children and adolescents on the fragile x spectrum: a dosage response of the FMR1 gene.,Cerebral cortex,2014
+3394168ff0719b03ff65bcea35336a76b21fe5e4,Object Detection Combining Recognition and Segmentation,,2007
+334e559e8decadcedbe8e495b3f5430536cff32c,"The Attentional Suppressive Surround: Eccentricity, Location-Based and Feature-Based Effects and Interactions",,2018
+3369692338841f14ce032fc5d0b5b4fe7cc79f1a,Visualising mental representations: A primer on noise-based reverse correlation in social psychology,,2017
+05904c87cb1d0b1f17fcb018fa0344c020694f36,Modulation of the composite face effect by unintended emotion cues,,2017
+050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371,Spatio-Temporal Scale Selection in Video Data,Journal of Mathematical Imaging and Vision,2017
+05ef5efd9e42f49dbb9e50ec3fe367f275a94931,Biologically Inspired Processing for Lighting Robust Face Recognition,Unknown,2018
+05b6c32304dd1673c14f1e1efce4e4d5c4402275,What are the Visual Features Underlying Rapid Object Recognition?,,2011
+05fcbe4009543ec8943bdc418ee81e9594b899a4,Social perception in autism spectrum disorders: impaired category selectivity for dynamic but not static images in ventral temporal cortex.,Cerebral cortex,2014
+05e3acc8afabc86109d8da4594f3c059cf5d561f,Actor-Action Semantic Segmentation with Grouping Process Models,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+0559fb9f5e8627fecc026c8ee6f7ad30e54ee929,Facial Expression Recognition,,2011
+0549dc0290fe988ede74c4e030ae485c13eaa54a,Development of Vision Based Multiview Gait Recognition System with MMUGait Database,,2014
+05f3f8f6f97db00bafa2efd2ac9aac570603c0c6,TGIF: A New Dataset and Benchmark on Animated GIF Description,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+9d8ff782f68547cf72b7f3f3beda9dc3e8ecfce6,Improved Pseudoinverse Linear Discriminant Analysis Method for Dimensionality Reduction,IJPRAI,2012
+9d2ad0b408bddc9c5a713e250b52aa48f1786a46,Visual Recognition Using Local Quantized Patterns,Unknown,2012
+9d0bf3b351fb4d80cee5168af8367c5f6c8b2f3a,"The Tromso Infant Faces Database (TIF): Development, Validation and Application to Assess Parenting Experience on Clarity and Intensity Ratings",,2017
+9dc70aa3d51a9403e1894a7fa535ace99b527861,3 Bayesian Tracking by Online Co-Training and Sequential Evolutionary Importance Resampling,Unknown,2012
+9dd47158cd7ee3725be3aa7a2ce9b25a7d4aed74,Clustering-driven Deep Embedding with Pairwise Constraints,CoRR,2018
+9cf69de9e06e39f7f7ce643b3327bf69be8b9678,SHREC ’ 18 track : Recognition of geometric patterns over 3 D models,Unknown,2018
+9cf6d66a0b4e5a3347466a60caea411d67c4b5b7,Joint transfer component analysis and metric learning for person re-identification,Unknown,2018
+9cd8e1ccc5a410c7f31c7e404588597c0bb1952b,Whats Your Type ? Personalized Prediction of Facial Attractiveness,,
+9ce0d64125fbaf625c466d86221505ad2aced7b1,Recognizing expressions of children in real life scenarios View project PhD ( Doctor of Philosophy ) View project,Unknown,2017
+9cf07922cf91c4aea66c8d72606ca444f4607cc6,Distinct neural activation patterns underlie economic decisions in high and low psychopathy scorers.,Social cognitive and affective neuroscience,2014
+023ed32ac3ea6029f09b8c582efbe3866de7d00a,Discriminative learning from partially annotated examples,,2016
+02e05ad42dbe99257eee1bff3e28feaa005e5924,Remembering Who Was Where: A Happy Expression Advantage for Face Identity-Location Binding in Working Memory,,2018
+0252256fa23eceb54d9eea50c9fb5c775338d9ea,Application-driven Advances in Multi-biometric Fusion,Unknown,2018
+02a2fa826a348cc3bc46a1a31a49dce8d06ca366,Individual differences in the spontaneous recruitment of brain regions supporting mental state understanding when viewing natural social scenes.,Cerebral cortex,2011
+02b72a5a4389cb32a7dd784b1c9084e8412e2e78,Hierarchical Bayesian Image Models,Unknown,2018
+a45ec771ca2db81088c52c173eed9ec2022a8a70,Impaired recognition of negative basic emotions in autism: a test of the amygdala theory.,Social neuroscience,2006
+a4c430b7d849a8f23713dc283794d8c1782198b2,Video Concept Embedding,,2016
+a48c71153265d6da7fbc4b16327320a5cbfa6cba,Unite the People: Closing the loop between 3D and 2D Human Representations Supplementary Material,,2017
+a32f28156b47fd262e04426806037d138bb3ed0b,Fisher’s linear discriminant (FLD) and support vector machine (SVM) in non-negative matrix factorization (NMF) residual space for face recognition,,2010
+a361e820a85fa91f23091068f8177c58489304b1,Hard to “tune in”: neural mechanisms of live face-to-face interaction with high-functioning autistic spectrum disorder,,2012
+a3f684930c5c45fcb56a2b407d26b63879120cbf,LPM for Fast Action Recognition with Large Number of Classes,,2013
+a3177f82ea8391d9d733be47e4a0656a7b56e64c,The Roles of Emotions in the Law,,2016
+a36aa784e00d479bb0e6cb8aa6b6cd2dfeadfe1b,Evaluation of different features for face recognition in video,Unknown,2014
+a3fa023d7355662d066882df8dead0cac6a8321e,Supplementary Material for “Adversarial Inverse Graphics Networks: Learning 2D-to-3D Lifting and Image-to-Image Translation from Unpaired Supervision”,,2017
+a32d4195f7752a715469ad99cb1e6ebc1a099de6,The Potential of Using Brain Images for Authentication,,2014
+a308077e98a611a977e1e85b5a6073f1a9bae6f0,Intelligent Screening Systems for Cervical Cancer,,2014
+a3be57fc74460463f03c2a14e81e7e62c05c692e,Object Detection,,2014
+a35d3ba191137224576f312353e1e0267e6699a1,Increasing security in DRM systems through biometric authentication,,2001
+a3b70bf7e676f92ebb6dec3e2889c9131634f8b9,Use of 3D faces facilitates facial expression recognition in children,,2017
+b55489547790f7fb2c8b4689530b5660fbc8ee64,Face Scanning in Autism Spectrum Disorder and Attention Deficit/Hyperactivity Disorder: Human Versus Dog Face Scanning,,2015
+b5e3beb791cc17cdaf131d5cca6ceb796226d832,Novel Dataset for Fine-Grained Image Categorization: Stanford Dogs,,2012
+b55d0c9a022874fb78653a0004998a66f8242cad,Hybrid Facial Representations for Emotion Recognition Woo,,2013
+b5930275813a7e7a1510035a58dd7ba7612943bc,Face Recognition Using L-Fisherfaces,J. Inf. Sci. Eng.,2010
+b2b28eeeaa2b613bf30b5bfee5ec4272ce184bf3,Measuring Collectiveness via Refined Topological Similarity,TOMCCAP,2016
+b216040f110d2549f61e3f5a7261cab128cab361,Weighted Voting of Discriminative Regions for Face Recognition,IEICE Transactions,2017
+b255474d62f082fa97f50ea1174bf339522f6c99,Facial mimicry in its social setting,,2015
+b239b39c08a08d9c3b1da68a7bce162b580a746e,Gaze selection in complex social scenes,Unknown,2008
+b29fa452d737e2b6aa16d6f82a9a8daaea655287,Spontaneous Facial Actions Map onto Emotional Experiences in a Non-social Context: Toward a Component-Based Approach,,2017
+d9df2ed64494f54c0e2529f2c05a16423a57235c,A Novel Approach for Facial Expression Analysis in real time applications using SIFT flow and SVM,,2015
+d90026a9ca2489707aff2807617f3782f78097be,"Survey on audiovisual emotion recognition: databases, features, and data fusion strategies",,2014
+ac1d97a465b7cc56204af5f2df0d54f819eef8a6,A Look at Eye Detection for Unconstrained Environments,,2010
+ac5d9753a53b0d69308596908032f85b416c0056,Selectivity of Face Distortion Aftereffects for Differences in Expression or Gender,,2012
+accbd6cd5dd649137a7c57ad6ef99232759f7544,Facial Expression Recognition with Local Binary Patterns and Linear Programming,Unknown,2004
+ac51d9ddbd462d023ec60818bac6cdae83b66992,An Efficient Robust Eye Localization by Learning the Convolution Distribution Using Eye Template,,2015
+acc548285f362e6b08c2b876b628efceceeb813e,Objectifying Facial Expressivity Assessment of Parkinson's Patients: Preliminary Study,,2014
+ac56b4d6f9775211dfc966e9151862fd508d3142,Three-dimensional information in face recognition: an eye-tracking study.,Journal of vision,2011
+ac5b3e24a7dd2970c323ca7679625a7d29602480,Warsaw set of emotional facial expression pictures: a validation study of facial display photographs,,2014
+ac9dfbeb58d591b5aea13d13a83b1e23e7ef1fea,From Gabor Magnitude to Gabor Phase Features: Tackling the Problem of Face Recognition under Severe Illumination Changes,,2009
+ac559888f996923c06b1cf90db6b57b12e582289,Benchmarking neuromorphic vision: lessons learnt from computer vision,,2015
+acfecef9e56ff36455aed13f8e6be1a79b42f20f,Hit or Run: Exploring Aggressive and Avoidant Reactions to Interpersonal Provocation Using a Novel Fight-or-Escape Paradigm (FOE),,2017
+adc0b5d9f010f8b7d9900fcb1703c3882e340d65,Nasal Oxytocin Treatment Biases Dogs’ Visual Attention and Emotional Response toward Positive Human Facial Expressions,,2017
+ad9ecacca5c28b098096ad0cbd81fe84405924e3,1 Face Recognition by Sparse Representation,,2011
+ada1a5f2d2a3fb471de4a561ed13c52d0904b578,InverseFaceNet : Deep Monocular Inverse Face Rendering — Supplemental Material —,Unknown,2018
+addbddc42462975a02f4933d36f430b874b3d52b,"Social attention and real-world scenes: the roles of action, competition and social content.",Quarterly journal of experimental psychology,2008
+ad08c97a511091e0f59fc6a383615c0cc704f44a,Towards the improvement of self-service systems via emotional virtual agents,,2012
+adf62dfa00748381ac21634ae97710bb80fc2922,ViFaI : A trained video face indexing scheme Harsh,Unknown,2011
+bb1f4c8e4f310047e50b7dc41d87292025d42eb7,Intersubject Differences in False Nonmatch Rates for a Fingerprint-Based Authentication System,EURASIP J. Adv. Sig. Proc.,2009
+bb22104d2128e323051fb58a6fe1b3d24a9e9a46,Analyzing Facial Expression by Fusing Manifolds,,2007
+bbab2c3d0ebc0957c5e962298ffd8c6d4bc25c5a,Have we met before? Neural correlates of emotional learning in women with social phobia.,Journal of psychiatry & neuroscience : JPN,2014
+bbcf6f54d3e991f85a949544abf20b781d5ba2ed,Weighted principal component extraction with genetic algorithms,Appl. Soft Comput.,2012
+d74e14de664be4b784813d93e260abe379e2602d,Supplementary Material for : Video Prediction with Appearance and Motion Conditions,Unknown,2018
+d73d2c9a6cef79052f9236e825058d5d9cdc1321,Cutting the visual world into bigger slices for improved video concept detection. (Amélioration de la détection des concepts dans les vidéos en coupant de plus grandes tranches du monde visuel),,2014
+d7f7eb0fbe3339d13f5a6a23df0fd27fdb357d48,Intention-Aware Multi-Human Tracking for Human-Robot Interaction via Particle Filtering over Sets,,2014
+d708ce7103a992634b1b4e87612815f03ba3ab24,FCVID: Fudan-Columbia Video Dataset,,2016
+d787f691af05a56eb0e91437fc6b1dfe5fbccbb9,The Effect of Affective Context on Visuocortical Processing of Neutral Faces in Social Anxiety,,2015
+d79f9ada35e4410cd255db39d7cc557017f8111a,Evaluation of accurate eye corner detection methods for gaze estimation,,2014
+d06c8e3c266fbae4026d122ec9bd6c911fcdf51d,Role for 2D image generated 3D face models in the rehabilitation of facial palsy,,2017
+d074b33afd95074d90360095b6ecd8bc4e5bb6a2,Human-Robot Collaboration: a Survey,I. J. Humanoid Robotics,2008
+d0137881f6c791997337b9cc7f1efbd61977270d,"University of Dundee An automated pattern recognition system for classifying indirect immunofluorescence images for HEp-2 cells and specimens Manivannan,",,2016
+d0a9bbd3bd9dcb62f9874fc1378a7f1a17f44563,Prototype Generation Using Self-Organizing Maps for Informativeness-Based Classifier,,2017
+be48780eb72d9624a16dd211d6309227c79efd43,Interactive Visual and Semantic Image Retrieval,,2013
+be4a20113bc204019ea79c6557a0bece23da1121,DeepCache: Principled Cache for Mobile Deep Vision,Unknown,2017
+be6f29e129a99529f7ed854384d1f4da04c4ca1f,Spatially Consistent Nearest Neighbor Representations for Fine-Grained Classification. (Représentations d'images basées sur un principe de voisins partagés pour la classification fine),Unknown,2016
+be75a0ff3999754f20e63fde90f4c68b4af22d60,R4-A.1: Dynamics-Based Video Analytics,Unknown,2016
+b370eb9839be558e7db8390ce342312bd4835be9,Object Localization Does Not Imply Awareness of Object Category at the Break of Continuous Flash Suppression,,2017
+b37f57edab685dba5c23de00e4fa032a3a6e8841,Towards social interaction detection in egocentric photo-streams,,2015
+b3adc7617dff08d7427142837a326b95d2e83969,A Panoramic View of Performance,,2009
+b3f0a87043f7843b79744ec19dc0b93324d055d5,Improvements to Tracking Pedestrians in Video Streams Using a Pre-trained Convolutional Neural Network,Unknown,2017
+b32cf547a764a4efa475e9c99a72a5db36eeced6,Mimicry of ingroup and outgroup emotional expressions,Unknown,2018
+dfbf49ed66a9e48671964872c84f75d7f916c131,Supplementary Material for Sparsity Invariant CNNs,Unknown,2017
+dfbf941adeea19f5dff4a70a466ddd1b77f3b727,Models for supervised learning in sequence data,Unknown,2018
+df28cd627afe6d20eb198b8406ff25ece340653d,The Acquisition of Sign Language by Deaf Children with Autism Spectrum Disorder,Unknown,2013
+dff838ba0567ef0a6c8fbfff9837ea484314efc6,"Progress Report, MSc. Dissertation: On-line Random Forest for Face Detection",,2014
+df969647a0ee9ea25b23589f44be5240b5097236,How robust is familiar face recognition? A repeat detection study of more than 1000 faces,,2018
+da288fca6b3bcaee87a034529da5621bb90123d1,Aesthetics and Emotions in Images,IEEE Signal Processing Magazine,2011
+da1049ae56eaca2e7d65946cf87b1e504d9fcb70,VisDA : A Synthetic-to-Real Benchmark for Visual Domain Adaptation,Unknown,
+daef6fa60c7d79930ad0a341aab69f1f4fa80442,Supplement for BIER,,2017
+b4b1b39f8902208bbd37febfb68e08809098036d,TRECVid Semantic Indexing of Video : A 6-year Retrospective,Unknown,2016
+b47dae9d6499c6a777847a26297a647f0de49214,Aberrant Social Attention and Its Underlying Neural Correlates in Adults with Autism Spectrum Disorder,,2015
+a2e29b757f4021ed5b9eb7eebf78a0bddb460790,Visual scenes are categorized by function.,Journal of experimental psychology. General,2016
+a2a42aa37641490213b2de9eb8e83f3dab75f5ed,Multilinear Supervised Neighborhood Preserving Embedding Analysis of Local Descriptor Tensor,Unknown,2018
+a2183537ccf24eb95e8e7520b33f9aa8f190e80e,Subspace-Based Holistic Registration for Low-Resolution Facial Images,EURASIP J. Adv. Sig. Proc.,2010
+a2dd13729206a7434ef1f0cd016275c0d6f3bb6d,SFV: Reinforcement Learning of Physical Skills from Videos,CoRR,2018
+a59cdc49185689f3f9efdf7ee261c78f9c180789,A New Approach for Learning Discriminative Dictionary for Pattern Classification,J. Inf. Sci. Eng.,2016
+a57ee5a8fb7618004dd1def8e14ef97aadaaeef5,Fringe Projection Techniques: Whither we are?,,2009
+a50099f5364d3d4e82991418647c727f0f9c297c,A Generic Bi-Layer Data-Driven Crowd Behaviors Modeling Approach,IEICE Transactions,2017
+bd96c3af9c433b4eaf95c8a28f072e1b0fc2de1a,A Study on Facial Expression Recognition Model using an Adaptive Learning Capability,Unknown,2012
+bdb74f1b633b2c48d5e9d101e09bad2db8d68be6,Chapter 1 . Medical Image Annotation (,,
+bdbba95e5abc543981fb557f21e3e6551a563b45,Speeding up the Hyperparameter Optimization of Deep Convolutional Neural Networks,International Journal of Computational Intelligence and Applications,2018
+d16968e5baac6d26b9cef5034f9d84bcc3ec627c,"Children Facial Expression Production: Influence of Age, Gender, Emotion Subtype, Elicitation Condition and Culture",,2018
+d1295a93346411bb833305acc0e092c9e3b2eff1,The eMPaThy iMBalance hyPoThesis oF aUTisM : a TheoReTical aPPRoach To cogniTiVe and eMoTional eMPaThy in aUTisTic deVeloPMenT,Unknown,2009
+d1d9e6027288cdd64509ea62f88a3cbd9320c180,Automated Markerless Analysis of Human Gait Motion for Recognition and Classification,,2011
+d68dbb71b34dfe98dee0680198a23d3b53056394,VIVA Face-off Challenge: Dataset Creation and Balancing Privacy,,2015
+bcab55f8bf0623df71623e673c767eed2159f05a,Deep Hybrid Scattering Image Learning,CoRR,2018
+bce36092b1910ff3d492f86aa3a39ed8faaf72d2,Chapter 17 Face Recognition Using 3 D Images,,2011
+bcf19b964e7d1134d00332cf1acf1ee6184aff00,Trajectory-Set Feature for Action Recognition,IEICE Transactions,2017
+bc99f98b5f1fd158cc31d693061c402a36222dbb,Recent advances in understanding the neural bases of autism spectrum disorder.,Current opinion in pediatrics,2011
+bc4537bc5834b41a631d9a807500d199b438fb27,Perceptual Integration Deficits in Autism Spectrum Disorders Are Associated with Reduced Interhemispheric Gamma-Band Coherence.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2015
+bc8e1c2284008319ee325ff7ea19916726235f55,Autonomic responses to social and nonsocial pictures in adolescents with autism spectrum disorder.,Autism research : official journal of the International Society for Autism Research,2014
+aef59def2a65901de9d520d0442b42bb4a448f06,Facial Expression Recognition,,2009
+ae89b7748d25878c4dc17bdaa39dd63e9d442a0d,On evaluating face tracks in movies,2013 IEEE International Conference on Image Processing,2013
+ae60fccb686272d12e909c9de99efb652e0934ec,The impact of internalizing symptoms on autistic traits in adolescents with restrictive anorexia nervosa,,2015
+ae2c71080b0e17dee4e5a019d87585f2987f0508,Emotional Face Recognition in Children With Attention Deficit/Hyperactivity Disorder: Evidence From Event Related Gamma Oscillation,,2017
+ae71f69f1db840e0aa17f8c814316f0bd0f6fbbf,That personal profile image might jeopardize your rental opportunity! On the relative impact of the seller's facial expressions upon buying behavior on Airbnb™,Computers in Human Behavior,2017
+d82681348489f4f04690e65b9ffe21b68c89b5ff,Cross-Subject EEG Feature Selection for Emotion Recognition Using Transfer Recursive Feature Elimination,,2017
+ab87ab1cf522995510561cd9f494223704f1de91,Human Centric Facial Expression Recognition,Unknown,2018
+aba770a7c45e82b2f9de6ea2a12738722566a149,Face Recognition in the Scrambled Domain via Salience-Aware Ensembles of Many Kernels,IEEE Transactions on Information Forensics and Security,2016
+ab1728e84ac682ca0c53435f712a512ac139e9c8,University of Groningen Comparative Study Between Deep Learning and Bag of Visual Words for Wild-Animal,Unknown,2017
+ab2b09b65fdc91a711e424524e666fc75aae7a51,Multi-modal Biomarkers to Discriminate Cognitive State *,Unknown,2015
+ab567ca60fc3f72f27746b4d9e505042ab282ca3,Guidelines for studying developmental prosopagnosia in adults and children.,Wiley interdisciplinary reviews. Cognitive science,2016
+ab1f98b59fa98216f052ae19adce6fd94ebb800d,"Explaining First Impressions: Modeling, Recognizing, and Explaining Apparent Personality from Videos",CoRR,2018
+e597aca96ea1c928f13d15b7c4b46e3d41861afe,Mitigation of Effects of Occlusion on Object Recognition with Deep Neural Networks through Low-Level Image Completion,,2016
+e5d13afe956d8581a69e9dc2d1f43a43f1e2f311,Automatic Facial Feature Extraction for Face Recognition,Unknown,2018
+e2e920dfcaab27528c6fa65b6613d9af24793cb0,A comprehensive evaluation of multiband-accelerated sequences and their effects on statistical outcome measures in fMRI,,2016
+e2b615e3b78aa18c293e7f03eb96591ccb721b55,Recurrent Segmentation for Variable Computational Budgets,CoRR,2017
+e2e8db754b1ab4cd8aa07f5c5940f6921a1b7187,Interpretable visual models for human perception-based object retrieval,,2011
+e2af85dc41269bc7c50fcf2fb35bfeb75e3d6ee4,xytocin Improves “ Mind-Reading ” in Humans,Unknown,2007
+e20abf7143f4a224824c3db7213049dee2573b4e,An investigation of the relationship between activation of a social cognitive neural network and social functioning.,Schizophrenia bulletin,2008
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148,Running Head : Anxiety and Emotional Faces in WS 2,Unknown,2014
+f43eeb578e0ca48abfd43397bbd15825f94302e4,Optical computer recognition of facial expressions associated with stress induced by performance demands.,"Aviation, space, and environmental medicine",2005
+f4b729d218139f1e93cc9d4df05fbf699d2e9d07,Introduction to the Special Issue on Recent Advances in Biometric Systems [Guest Editorial],"IEEE Trans. Systems, Man, and Cybernetics, Part B",2007
+f38ad869023c43b59431a3bb55f2fe8fb6ff0f05,A systematic review and meta-analysis of the fMRI investigation of autism spectrum disorders.,Neuroscience and biobehavioral reviews,2012
+f36647e63a11486ef9cf7a5a1c86a40fda5d408a,CS 229 Final Report: Artistic Style Transfer for Face Portraits,,2016
+f3cf10c84c4665a0b28734f5233d423a65ef1f23,Title Temporal Exemplar-based Bayesian Networks for facialexpression recognition,Unknown,2008
+f397b8c835425e4b18cc7d9088b7f810c6cf2563,Yimo Guo IMAGE AND VIDEO ANALYSIS BY LOCAL DESCRIPTORS AND DEFORMABLE IMAGE REGISTRATION,Unknown,2013
+eb1208a7f535de6c6180e4dbeb6eef2a27500c52,"To be or Not to be Threatening, but What was the Question? Biased Face Evaluation in Social Anxiety and Depression Depends on How You Frame the Query",,2013
+ebe44c125f6d5c893df73d20b602e479a38e5b23,Algorithmic Identification of Looted Archaeological Sites from Space,Front. ICT,2017
+c757f6ee46208c1c26572265803068f8d837c384,Thermal imaging systems for real-time applications in smart cities,IJCAT,2016
+c72ac3dec0d0b2d5ca4945b07bd6b72c365bdc13,Shorter spontaneous fixation durations in infants with later emerging autism,,2015
+c784d4918ad33f4dd2991155ea583b4789ba3c11,Bimodal Vein Recognition Based on Task-Specific Transfer Learning,IEICE Transactions,2017
+c7dd846c0abc896e5fd0940ac07927553cc55734,Neurofunctional Underpinnings of Audiovisual Emotion Processing in Teens with Autism Spectrum Disorders,,2013
+c7d7cf88d2e9f3194aec2121eb19dbfed170dba8,Unconstrained Gaze Estimation Using Random Forest Regression Voting,,2016
+c793a38c3d16b093c12ba8a9d12dfa88159ecd38,Neurons in the fusiform gyrus are fewer and smaller in autism.,Brain : a journal of neurology,2008
+c719a718073128a985c957cdfa3f298706a180e6,Comparative Evaluations of Selected Tracking-by-Detection Approaches,Unknown,2018
+c02cc6af3cc93e86e86fb66412212babda8fb858,Interocularly merged face percepts eliminate binocular rivalry,,2017
+c08420b1bfa093e89e35e3b8d3a9e3e881f4f563,A Classification Framework for Large-Scale Face Recognition Systems,Unknown,2009
+c0de99c5f15898e2d28f9946436fec2b831d4eae,ClothCap: seamless 4D clothing capture and retargeting,ACM Trans. Graph.,2017
+c02847a04a99a5a6e784ab580907278ee3c12653,Fine Grained Video Classification for Endangered Bird Species Protection,,2017
+c0ead9bada2fb7cdebf7dadbc8548d08387966ae,Young Adults with Autism Spectrum Disorder Show Early Atypical Neural Activity during Emotional Face Processing,,2018
+c0e0b878ec8c56679faccb3c3f5e2ae968182da5,A Multifactor Extension of Linear Discriminant Analysis for Face Recognition under Varying Pose and Illumination,EURASIP J. Adv. Sig. Proc.,2010
+eee8a37a12506ff5df72c402ccc3d59216321346,Volume C,,2008
+eeb6d084f9906c53ec8da8c34583105ab5ab8284,Generation of Facial Expression Map using Supervised and Unsupervised Learning,,2012
+eed7920682789a9afd0de4efd726cd9a706940c8,Computers to Help with Conversations : Affective Framework to Enhance Human Nonverbal Skills,Unknown,2013
+ee3a905ec8cd2e62dc642fad33d6f5f8516968a8,It depends: Approach and avoidance reactions to emotional expressions are influenced by the contrast emotions presented in the task.,Journal of experimental psychology. Human perception and performance,2016
+eedf9480de99e3373d2321f61ee5b71ea3ebf493,Altered Social Reward and Attention in Anorexia Nervosa,,2010
+c94ae3d1c029a70cabdab906fe1460d84fd42acd,"Comparison of wavelet, Gabor and curvelet transform for face recognition",,2011
+fc857cebd4150e3fe3aee212f128241b178f0d0a,Amygdala damage impairs eye contact during conversations with real people.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2007
+fc7627e57269e7035e4d56105358211076fe4f04,The Association of Quantitative Facial Color Features with Cold Pattern in Traditional East Asian Medicine,,2017
+fcbec158e6a4ace3d4311b26195482b8388f0ee9,Face Recognition from Still Images and Videos,,2004
+fc950b230a0189cc63b2e2295b2dc761d5b2270c,Health care providers' judgments in chronic pain: the influence of gender and trustworthiness.,Pain,2016
+fc64f43cdcf4898b15ddce8b441d2ab9daa324f0,Gabor Filter-based Face Recognition Technique,,2010
+fdb33141005ca1b208a725796732ab10a9c37d75,A connectionist computational method for face recognition,Applied Mathematics and Computer Science,2016
+fdbacf2ff0fc21e021c830cdcff7d347f2fddd8e,Recognizing Frustration of Drivers From Face Video Recordings and Brain Activation Measurements With Functional Near-Infrared Spectroscopy,,2018
+f24e379e942e134d41c4acec444ecf02b9d0d3a9,Analysis of Facial Images across Age Progression by Humans,,2011
+f27bdc4f7ec2006425f999055df071d64640836e,Preserved Crossmodal Integration of Emotional Signals in Binge Drinking,,2017
+f26b3a916aaa50fe6ef554fff744559815ccf954,Serotonin transporter genotype impacts amygdala habituation in youth with autism spectrum disorders.,Social cognitive and affective neuroscience,2014
+f2e9616577a0eb866e78e6fd68c67809e4fce11c,Digital innovations in L 2 motivation : Harnessing the power of the Ideal L 2 Self,Unknown,2018
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f,Perceived Age Estimation from Face Images,Unknown,2018
+f28b7d62208fdaaa658716403106a2b0b527e763,Clustering-driven Deep Embedding with Pairwise Constraints,CoRR,2018
+f524b1aac4f2a29dab45d7e8726517798dbc9782,Anger superiority effect: The importance of dynamic emotional facial expressions,,2013
+f5c83679b73ab59c2ada2b72610acdd63669b226,2d-3d Pose Invariant Face Recognition System for Multimedia Applications,,2009
+f558a3812106764fb1af854a02da080cc42c197f,Amygdala volume and nonverbal social impairment in adolescent and adult males with autism.,Archives of general psychiatry,2006
+e379e73e11868abb1728c3acdc77e2c51673eb0d,Face Databases,,2005
+e3660a13fcd75cf876a6ce355c2c1a578cfb57cb,2DHMM-Based Face Recognition Method,,2015
+cff0e53006c6145d96322e6401e840f405b6ed02,Guest Editorial: Apparent Personality Analysis,IEEE Trans. Affective Computing,2018
+cf875336d5a196ce0981e2e2ae9602580f3f6243,"7 What 1 S It Mean for a Computer to ""have"" Emotions?",,
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150,Detection of emotions from video in non-controlled environment. (Détection des émotions à partir de vidéos dans un environnement non contrôlé),Unknown,2013
+cffc94574c8796cbd8234422a979e57e67eca7b5,Multiracial Children's and Adults' Categorizations of Multiracial Individuals.,Journal of cognition and development : official journal of the Cognitive Development Society,2017
+cf814b618fcbc9a556cdce225e74a8806867ba84,Facial Expression Recognition Using 3D Facial Feature Distances,,2007
+ca11dc3a8064583aaf79061866bbcf04caece162,Disentangled Representations in Neural Models,CoRR,2016
+e4df83b7424842ff5864c10fa55d38eae1c45fac,Locally Linear Discriminate Embedding for Face Recognition,,2010
+e443cb55dcc54de848e9f0c11a6194568a875011,From passive to interactive object learning and recognition through self-identification on a humanoid robot,Auton. Robots,2016
+e48e94959c4ce799fc61f3f4aa8a209c00be8d7f,Design of an Efficient Real-Time Algorithm Using Reduced Feature Dimension for Recognition of Speed Limit Signs,,2013
+fef3efeffade0e39f2c279653b4785b372be410e,Near infrared face recognition: A literature survey,Computer Science Review,2016
+fe464b2b54154d231671750053861f5fd14454f5,Multi Joint Action in CoTeSys-Setup and Challenges-Technical report CoTeSys-TR-1001,Unknown,2010
+feb6e267923868bff6e2108603d00fdfd65251ca,Unsupervised Discovery of Visual Face Categories,International Journal on Artificial Intelligence Tools,2013
+feb5b8bf315a6b6222f62dd9533b1e0f891a27bd,The Nature and Consequences of Essentialist Beliefs About Race in Early Childhood.,Child development,2018
+feaedb6766f42e867aab7f1a33ba4d7ddacfc7aa,UvA-DARE ( Digital Academic Repository ) Tag-based Video Retrieval by Embedding Semantic Content in a Continuous Word,Unknown,2016
+fe95b902eb362ad39f91e2325300d3f7a9119c48,Modeling invariant object processing based on tight integration of simulated and empirical data in a Common Brain Space,,2012
+c8a5c5c8e1293b7e877a848b7a9e5426c5400651,FaceShop: Deep Sketch-based Face Image Editing,ACM Trans. Graph.,2018
+c87f7ee391d6000aef2eadb49f03fc237f4d1170,A real-time and unsupervised face Re-Identification system for Human-Robot Interaction,CoRR,2017
+c87d5036d3a374c66ec4f5870df47df7176ce8b9,Temporal Dynamics of Natural Static Emotional Facial Expressions Decoding: A Study Using Event- and Eye Fixation-Related Potentials,,2018
+c8a22550297a25dadd283089f009015bc0df5eed,Neural circuits in the brain that are activated when mitigating criminal sentences,,2012
+c8e84cdff569dd09f8d31e9f9ba3218dee65e961,Dictionaries for image and video-based face recognition [Invited].,"Journal of the Optical Society of America. A, Optics, image science, and vision",2014
+c81326a1ecb7e71ae38a665779b8d959d3938d1a,A Novel Neural Network Model Specified for Representing Logical Relations,CoRR,2017
+c8fc81a54ccef6d8111e7253283fc55e7e0f8ebd,High Resolution Face Completion with Multiple Controllable Attributes via Fully End-to-End Progressive Generative Adversarial Networks,CoRR,2018
+c8e32484bbbc63908080284790edafc4b66008d2,Suivi par ré-identification dans un réseau de caméras à champs disjoints,Traitement du Signal,2012
+fbd047862ea869973ecf8fc35ae090ca00ff06d8,Literature review of fingerprint quality assessment and its evaluation,IET Biometrics,2016
+fbd17af24e86fe487e28f99ba3e402dd6cfcd16a,Towards Detailed Recognition of Visual Categories,,2013
+edf98a925bb24e39a6e6094b0db839e780a77b08,Simplex Representation for Subspace Clustering,CoRR,2018
+c18d80d00f2a7107bfe780eeec21b51a634ea925,Computational perspectives on the other-race effect,,2013
+c175f1666f3444e407660c5935a05b2a53f346f0,Modifying the Memorability of Face,Unknown,2013
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290,Unconstrained face identification with multi-scale block-based correlation,Unknown,2017
+c69ea9367e1244bfa5d3fc290b8a33be3abd8c24,"Many faces, one rule: the role of perceptual expertise in infants’ sequential rule learning",,2015
+c6e99ff40ccae0d7ce8e32666ed7f75e3a381d9b,How does the topic of conversation affect verbal exchange and eye gaze? A comparison between typical development and high-functioning autism.,Neuropsychologia,2010
+c6657c1263bac59b006d1da1174ec4bcea0dff3d,Global-local visual processing in high functioning children with autism: structural vs. implicit task biases.,Journal of autism and developmental disorders,2006
+c694b397a3a0950cd20699a687fe6c8a3173b107,Explaining autism spectrum disorders: central coherence vs. predictive coding theories.,Journal of neurophysiology,2014
+c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8,Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.,Scientific reports,2016
+c6dab0aba7045f078313a4186cd507ff8eb8ce32,Atypical disengagement from faces and its modulation by the control of eye fixation in children with autism spectrum disorder.,Journal of autism and developmental disorders,2011
+ec0177cfdee435c6522ca4ee8a5f97ac0412472e,Reconstruction of images from Gabor graphs with applications in facial image processing,IJWMIP,2015
+ecdf8e5393eead0b63c5bc4fbe426db5a70574eb,Linear Subspace Learning for Facial Expression Analysis,Unknown,2012
+ec6855acd0871d3e000872a5dd89db97c1554e18,Contrasting emotion processing and executive functioning in attention-deficit/hyperactivity disorder and bipolar disorder.,Behavioral neuroscience,2016
+ec5f89e822d9fcbc7b7422dc401478fc29f9c02d,Those Virtual People all Look the Same to me: Computer-Rendered Faces Elicit a Higher False Alarm Rate Than Real Human Faces in a Recognition Memory Task,,2018
+4e5dc3b397484326a4348ccceb88acf309960e86,Secure Access Control and Large Scale Robust Representation for Online Multimedia Event Detection,,2014
+4ed4143034fc6303737c7ad5118a72d9a5d12cf2,Web Survey Gamification - Increasing Data Quality in Web Surveys by using Game Design Elements,,2017
+4efaa2a1a14ba6e8bea779eae49d6220fc771f2a,"Individual Differences in the Speed of Facial Emotion Recognition Show Little Specificity but Are Strongly Related with General Mental Speed: Psychometric, Neural and Genetic Evidence",,2017
+4e613c9342d6e90f7af5fd3f246c6d82a33fe98d,Estimating Human Pose in Images,,2009
+2004afb2276a169cdb1f33b2610c5218a1e47332,Deep Convolutional Neural Network Used in Single Sample per Person Face Recognition,,2018
+20a16efb03c366fa4180659c2b2a0c5024c679da,Screening Rules for Overlapping Group Lasso,CoRR,2014
+20c59a55795eaa4f2629cc83fb556dc8c5bcfc1f,Modeling and visual recognition of human actions and interactions,Unknown,2013
+208a2c50edb5271a050fa9f29d3870f891daa4dc,The resolution of facial expressions of emotion.,Journal of vision,2011
+2031b062f4c41f43a32835430b1d55a422baa564,VNect: real-time 3D human pose estimation with a single RGB camera,ACM Trans. Graph.,2017
+20eaa3ebe2b6e1aff7c4585733c9fb0cfc941919,Image similarity using Deep CNN and Curriculum Learning,CoRR,2017
+20388099cc415c772926e47bcbbe554e133343d1,The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults,,2014
+182496e9533ad3a5eef6a06b815a276c18eaea2e,High autistic trait individuals do not modulate gaze behaviour in response to social presence but look away more when actively engaged in an interaction,,2017
+18f70d8e1697bc0b85753db2d4d64aeb696b052a,Evolutionary Discriminant Feature Extraction with Application to Face Recognition,EURASIP J. Adv. Sig. Proc.,2009
+183c8da12a07e2002fd71edbabeca5b3bfb45d66,Grounding Natural Language Instructions with Unknown Object References using Learned Visual Attributes,,2017
+18804d8e981fa66135c0ffa6fdb2b8b3fec6d753,Predicting human gaze beyond pixels.,Journal of vision,2014
+1875b2325b3efcb49dec51c6416f40862db4fe74,Functional abnormalities of the default network during self- and other-reflection in autism.,Social cognitive and affective neuroscience,2008
+27a0a7837f9114143717fc63294a6500565294c2,Face Recognition in Unconstrained Environments: A Comparative Study,,2015
+27421586a04584d38dd961b37d0ca85408acfe59,Large brains in autism: the challenge of pervasive abnormality.,"The Neuroscientist : a review journal bringing neurobiology, neurology and psychiatry",2005
+27eb092a9adbfcb3aea1b13bde580f1fd5c7b8f0,xytocin Increases Gaze to the Eye Region f Human Faces dam,,2007
+270733d986a1eb72efda847b4b55bc6ba9686df4,Recognizing Facial Expressions Using Model-Based Image Interpretation,Unknown,2008
+272c6b6ccf144954a154b83bf5789341ee3f9ed2,A brain-computer interface for potential non-verbal facial communication based on EEG signals related to specific emotions,,2014
+27169761aeab311a428a9dd964c7e34950a62a6b,Face Recognition Using 3D Head Scan Data Based on Procrustes Distance,2008 International Conference on Intelligent Engineering Systems,2008
+27a299b834a18e45d73e0bf784bbb5b304c197b3,Social Role Discovery in Human Events,2013 IEEE Conference on Computer Vision and Pattern Recognition,2013
+4b7c110987c1d89109355b04f8597ce427a7cd72,Feature- and Face-Exchange illusions: new insights and applications for the study of the binding problem,,2014
+4b86e711658003a600666d3ccfa4a9905463df1c,Fusion of Appearance Image and Passive Stereo Depth Map for Face Recognition Based on the Bilateral 2DLDA,EURASIP J. Image and Video Processing,2007
+4ba3f9792954ee3ba894e1e330cd77da4668fa22,Nearest Neighbor Discriminant Analysis,IJPRAI,2006
+4b71d1ff7e589b94e0f97271c052699157e6dc4a,Pose-Encoded Spherical Harmonics for Face Recognition and Synthesis Using a Single Image,EURASIP J. Adv. Sig. Proc.,2008
+11943efec248fcac57ff6913424e230d0a02e977,Auxiliary Tasks in Multi-task Learning,CoRR,2018
+11aa527c01e61ec3a7a67eef8d7ffe9d9ce63f1d,"Automated measurement of mouse social behaviors using depth sensing, video tracking, and machine learning.",Proceedings of the National Academy of Sciences of the United States of America,2015
+116888b8f08419f027f5047f0ff1557b16f69d5a,Fearful contextual expression impairs the encoding and recognition of target faces: an ERP study,,2015
+7dce05b7765541b3fb49a144fb39db331c14fdd1,Modélisation et suivi des déformations faciales : applications à la description des expressions du visage dans le contexte de la langue des signes,Unknown,2007
+7d1ac241fb603a4237cb681dbcf163a9f89e906a,Supplementary Material : Switching Convolutional Neural Network for Crowd Counting,,2017
+7d621ec871a03a01f5aa65253e9ae6c8aadaf798,Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades,,2015
+29ce6b54a87432dc8371f3761a9568eb3c5593b0,Age Sensitivity of Face Recognition Algorithms,2013 Fourth International Conference on Emerging Security Technologies,2013
+295266d09fde8f85e6e577b5181cbc73a1594b6b,Parallel effects of processing fluency and positive affect on familiarity-based recognition decisions for faces,,2014
+2933da06df9e47da8e855266f5ff50e03c0ccd27,Combination of RGB-D Features for Head and Upper Body Orientation Classification,Unknown,2016
+29c7dfbbba7a74e9aafb6a6919629b0a7f576530,Automatic Facial Expression Analysis and Emotional Classification,,2004
+29230bbb447b39b7fc3de7cb34b313cc3afe0504,Face Detection and Recognition Using Maximum Likelihood Classifiers on Gabor Graphs,IJPRAI,2009
+29a606ba5b9ae9bc16d05a832d4e54d769c63dae,Activation of mGluR2/3 underlies the effects of N-acetylcystein on amygdala-associated autism-like phenotypes in a valproate-induced rat model of autism,,2014
+2939169aed69aa2626c5774d9b20e62c905e479b,Fast Exact HyperGraph Matching with Dynamic Programming for Spatio-Temporal Data,Unknown,2017
+29c6b06ac98dbdaf25e4cc9a05b4ab314923cccd,Assessment of the communicative and coordination skills of children with Autism Spectrum Disorders and typically developing children using social signal processing,,2013
+2983efadb1f2980ab5ef20175f488f77b6f059d7,Emotion in Human–computer Interaction,,2011
+29f0414c5d566716a229ab4c5794eaf9304d78b6,Biometric Template Security,EURASIP J. Adv. Sig. Proc.,2008
+7c79d3a40c1a1f5b9692ed23396b0f13453c225c,The influence of vocal training and acting experience on measures of voice quality and emotional genuineness,,2014
+7c825562b3ff4683ed049a372cb6807abb09af2a,Finding Tiny Faces Supplementary Materials,Unknown,2017
+7cf8440b1c02c021f6ba8543ad490b4788bbe280,"Unsupervised Decoding of Long-Term, Naturalistic Human Neural Recordings with Automated Video and Audio Annotations",Frontiers in human neuroscience,2016
+1696f6861c208b6a7cac95fbeba524867ad3e8d6,Using deep learning to quantify the beauty of outdoor places,,2017
+16f76f040f08448cf0a3984168d69197ea4af039,"Now you see race, now you don’t: Verbal cues influence children’s racial stability judgments",,2017
+16c884be18016cc07aec0ef7e914622a1a9fb59d,Exploiting Multimodal Data for Image Understanding,,2010
+16e95a907b016951da7c9327927bb039534151da,3D Face Recognition Using Spherical Vector Norms Map,J. Inf. Sci. Eng.,2017
+164f3b9740d9ceb14658237fddede0f86b5e0c47,CASENet: Deep Category-Aware Semantic Edge Detection,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+167736556bea7fd57cfabc692ec4ae40c445f144,Improved Motion Description for Action Classification,Front. ICT,2016
+4213502d0f226b9845b00c2882851ba4c57742ab,Does Rabbit Antithymocyte Globulin (Thymoglobuline®) Have a Role in Avoiding Delayed Graft Function in the Modern Era of Kidney Transplantation?,,2018
+426b47af132293e9ffe6071a3ede59cfdc1aa3fb,Promoting social behavior with oxytocin in high-functioning autism spectrum disorders.,Proceedings of the National Academy of Sciences of the United States of America,2010
+42765c170c14bd58e7200b09b2e1e17911eed42b,Feature Extraction Based on Wavelet Moments and Moment Invariants in Machine Vision Systems,,2012
+42e793b1dd6669b74ad106071c432aa5015b8631,How do people think about interdependence? A multidimensional model of subjective outcome interdependence.,Journal of personality and social psychology,2018
+42dc36550912bc40f7faa195c60ff6ffc04e7cd6,Visible and Infrared Face Identification via Sparse Representation,,2013
+425833b5fe892b00dcbeb6e3975008e9a73a5a72,A Review of Performance Evaluation for Biometrics Systems,Int. J. Image Graphics,2005
+4276eb27e2e4fc3e0ceb769eca75e3c73b7f2e99,Face Recognition From Video,,2008
+421b3a33ec70af2d733310f6c83ad713a314951d,Using nasal curves matching for expression robust 3D nose recognition,"2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)",2013
+89c84628b6f63554eec13830851a5d03d740261a,Image Enhancement and Automated Target Recognition Techniques for Underwater Electro-Optic Imagery,,2010
+8973910c8acfd296922d9691a533b3c5061ec815,Supplementary Material for Efficient Online Local Metric Adaptation via Negative Samples for Person Re-Identification,,2017
+893239f17dc2d17183410d8a98b0440d98fa2679,UvA-DARE ( Digital Academic Repository ) Expression-Invariant Age Estimation,Unknown,2017
+89e4f5a1eb6a97459bb748f4f7bc5c2696354aad,Semantics from Sound: Modeling Audio and Text Thesis Proposal,,2006
+89c45ace90d377502dc84825e5039290927ae9e2,"Changes in vegetation persistence across global savanna landscapes , 1982 – 2010",Unknown,2016
+45483f17551d9c6b550474dc7168ec31302e5d7b,Face recognition via collaborative representation based multiple one-dimensional embedding,IJWMIP,2016
+4541c9b4b7e6f7a232bdd62ae653ba5ec0f8bbf6,The role of structural facial asymmetry in asymmetry of peak facial expressions.,Laterality,2006
+45518c2350b9e727adf59f1626610917f71aea1e,Cross-Layer Design Space Exploration of Heterogeneous Multicore Processors With Predictive Models,,2014
+451d777ee33833a3b5eb6ba5292fae162c6d265f,Exploiting Feature Correlations by Brownian Statistics for People Detection and Recognition,"IEEE Transactions on Systems, Man, and Cybernetics: Systems",2017
+453e311c6de1285cd5ea6d93fd78a636eac0ba82,Multi patches 3D facial representation for person authentication using AdaBoost,2010 5th International Symposium On I/V Communications and Mobile Network,2010
+45ca696076e9c073e6cf699766f808899589bc88,Aalborg Universitet Thermal Tracking of Sports Players,Unknown,2017
+1f89439524e87a6514f4fbe7ed34bda4fd1ce286,Devising Face Authentication System and Performance Evaluation Based on Statistical Models,,2015
+1fa9c5af78b3ca04476f4ee6910684dc19008f5e,Supplementary Material : Cross-Dataset Adaptation for Visual Question Answering,Unknown,2018
+1fb2082d3f772933b586cca65af2099512b9c68b,Comparison of Spectral-Only and Spectral/Spatial Face Recognition for Personal Identity Verification,EURASIP J. Adv. Sig. Proc.,2009
+1fe990ca6df273de10583860933d106298655ec8,A Wavelet-Based Image Preprocessing Method or Illumination Insensitive Face Recognition,J. Inf. Sci. Eng.,2015
+73bbbfac7b144f835840fe7f7b5139283bf4f3f1,Do we spontaneously form stable trustworthiness impressions from facial appearance?,Journal of personality and social psychology,2016
+73c13ba142588f45aaa92805fe75ca2691ac981b,A Comparative Study of Social Scene Parsing Strategies between Children with and without Autism Spectrum Disorder,,2016
+7372c1e9cb87dad88bc160536263e461bb7ab04c,Trajectory Energy Minimisation for Cell Growth Tracking and Genealogy Analysis,,2017
+735c38361d77e707ac48f0d040493c65ca559d3c,Machine Learning for Simplifying the Use of Cardiac Image Databases. (Apprentissage automatique pour simplifier l'utilisation de banques d'images cardiaques),Unknown,2015
+7373c4a23684e2613f441f2236ed02e3f9942dd4,Feature extraction through Binary Pattern of Phase Congruency for facial expression recognition,2012 12th International Conference on Control Automation Robotics & Vision (ICARCV),2012
+73599349402bf8f0d97f51862d11d128cdba44ef,Affective analysis of videos: detecting emotional content in real-life scenarios,Unknown,2017
+738fadaf40249146f33da5b9efbb72a1fdf8767d,Unsupervised Learning of Invariant Representations in Hierarchical Architectures,CoRR,2013
+8797c870c0881cd30fda186affee4bdec54aeecd,Binary Biometric Representation through Pairwise Adaptive Phase Quantization,EURASIP J. Information Security,2011
+8722ab37a03336f832e4098224cb63cd02cdfe0a,Face recognition with 3 D face asymmetry,Unknown,2016
+87bdafbcf3569c06eef4a397beffc451f5101f94,Facial expression: An under-utilised tool for the assessment of welfare in mammals.,ALTEX,2017
+8732d702aeb08e9c604b36dcaa5933aea91a228d,Development of social skills in children: neural and behavioral evidence for the elaboration of cognitive models,,2015
+806f466034e0c3e609e672559e23d5d8bea6fe3d,Adaptive memory: The mnemonic value of contamination,,2017
+80265d7c9fe6a948dd8c975bd4d696fb7ba099c9,Face Recognition Based on Human Visual Perception Theories and Unsupervised ANN,,2009
+801b0ae343a11a15fd7abc5720831afea6f0a61d,Similarity Learning with Listwise Ranking for Person Re-Identification,Unknown,2018
+74de03923a069ffc0fb79e492ee447299401001f,On Film Character Retrieval in Feature-Length Films,,2005
+744fa8062d0ae1a11b79592f0cd3fef133807a03,Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification.,IEEE transactions on cybernetics,2017
+74113bb67eef4cfa28ebfa8bd38a614c82bdfdea,Neural responses to facial expressions support the role of the amygdala in processing threat.,Social cognitive and affective neuroscience,2014
+743c7e1aef6461d6582cf8deeb5d518e45215f89,Looking you in the mouth: abnormal gaze in autism resulting from impaired top-down modulation of visual attention.,Social cognitive and affective neuroscience,2006
+1a229f1d21abe442520cba31a6e08663b3d31777,The heterogeneous block architecture,2014 IEEE 32nd International Conference on Computer Design (ICCD),2014
+1a41e5d93f1ef5b23b95b7163f5f9aedbe661394,Alignment-Free and High-Frequency Compensation in Face Hallucination,,2014
+1ac20a7a76f7b83ccd8ea0aab64e2b24ecd23915,Impaired social brain network for processing dynamic facial expressions in autism spectrum disorders,,2012
+1a9e0bf9f7a9495bcdf1aeb214ccc9df9f2a9030,Challenges and Opportunities The Main Memory System : Challenges and Opportunities,Unknown,2015
+1a515f0b852c2e93272677dbf6ecb05c7be0ea2e,Reduced serotonin receptor subtypes in a limbic and a neocortical region in autism.,Autism research : official journal of the International Society for Autism Research,2013
+1ad823bf77c691f1d2b572799f8a8c572d941118,Précis of “Towards The Deep Model : Understanding Visual Recognition Through Computational Models”,,
+1a5a79b4937b89420049bc279a7b7f765d143881,Are Rich People Perceived as More Trustworthy? Perceived Socioeconomic Status Modulates Judgments of Trustworthiness and Trust Behavior Based on Facial Appearance,,2018
+1afd481036d57320bf52d784a22dcb07b1ca95e2,Automated Content Metadata Extraction Services Based on MPEG Standards,Comput. J.,2013
+1a4b6ee6cd846ef5e3030a6ae59f026e5f50eda6,Deep Learning for Video Classification and Captioning,CoRR,2016
+1a2431e3b35a4a4794dc38ef16e9eec2996114a1,Automated Face Recognition: Challenges and Solutions,Unknown,2018
+1a51bc5f9f12f6794297a426739350ae57c87731,Image classification with CNN-based Fisher vector coding,2016 Visual Communications and Image Processing (VCIP),2016
+284bf12324805f23b920bec0174be003c248cc9b,Lower Sensitivity to Happy and Angry Facial Emotions in Young Adults with Psychiatric Problems,,2016
+28b26597a7237f9ea6a9255cde4e17ee18122904,Network Interactions Explain Sensitivity to Dynamic Faces in the Superior Temporal Sulcus,,2015
+28c9198d30447ffe9c96176805c1cd81615d98c8,No evidence that a range of artificial monitoring cues influence online donations to charity in an MTurk sample,,2016
+28ce99940265407517faf7c45755675054ef78c4,Distinct facial expressions represent pain and pleasure across cultures,,2018
+28858a6e956d712331986b31d1646d6b497ff1a9,Independent Neural Computation of Value from Other People's Confidence.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2017
+17a85799c59c13f07d4b4d7cf9d7c7986475d01c,Extending Procrustes Analysis: Building Multi-view 2-D Models from 3-D Human Shape Samples,,2015
+176bd61cc843d0ed6aa5af83c22e3feb13b89fe1,Investigating Spontaneous Facial Action Recognition through AAM Representations of the Face,,2007
+17dea513763c57dcd0e62085045fb5be6770c600,"Dynamic thread mapping for high-performance, power-efficient heterogeneous many-core systems",2013 IEEE 31st International Conference on Computer Design (ICCD),2013
+175e9bb50cc062c6c1742a5d90c8dfe31d2e4e22,Where to Look: Focus Regions for Visual Question Answering,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+17db741725b9f8406f69b27a117e99bee1a9a323,Person Re-identification with a Body Orientation-Specific Convolutional Neural Network,Unknown,2018
+17e769ef3d86e74c21f2616c7f7a6f20a4e2fbaa,Bag of Machine Learning Concepts for Visual Concept Recognition in Images,,2013
+7b4e0a98dcb4ba34afcc5901f51384ba727473a0,Introduction to Emotion Recognition,Unknown,2014
+7b3231245a3d518085c8e747e2c2232963f49bc5,Tracking millions of humans in crowded space in crowded spaces,,2017
+7b4d985d03ebf8465757877f0eeaea00fa77676b,Dyadic Dynamics: The Impact of Emotional Responses to Facial Expressions on the Perception of Power,,2018
+7bc8d81a38899b60704681125ec4fc584a3e7ba4,Look me in the eyes: constraining gaze in the eye-region provokes abnormally high subcortical activation in autism,,2017
+7bdab6e725ab1bbf8fcd6d7c451f6c4cc215ada9,Complex Wavelet Transform-Based Face Recognition,EURASIP J. Adv. Sig. Proc.,2008
+7b45aa509184b05064eafb362f80ba5778566a4e,High-Level Interpretation of Urban Road Maps Fusing Deep Learning-Based Pixelwise Scene Segmentation and Digital Navigation Maps,Unknown,2018
+7b9ebcc8b9c05ef661182fe73438b7725584817d,Restoring effects of oxytocin on the attentional preference for faces in autism,,2017
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7,Non-linear dimensionality reduction and sparse representation models for facial analysis. (Réduction de la dimension non-linéaire et modèles de la représentations parcimonieuse pour l'analyse du visage),Unknown,2014
+8f5a2750f7ed015efa85887db3f6c6d2c0cb7b11,Social perception in synaesthesia Social perception in synaesthesia for colour,Unknown,2018
+8f08b2101d43b1c0829678d6a824f0f045d57da5,Supplementary Material for: Active Pictorial Structures,,2015
+8f81eb82cd046891c88163bc7b472dcc779f5f08,TokyoTechCanon at TRECVID 2012,,2012
+8f5566fa00f8c79f4720e14084489e784688ab0b,The role of the amygdala in atypical gaze on emotional faces in autism spectrum disorders.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2012
+8f48b2da711417d1f1f39069501577c84abb8d37,Elevated amygdala response to faces and gaze aversion in autism spectrum disorder.,Social cognitive and affective neuroscience,2014
+8fa290b5d92c1f427edb62d29988056383e02047,Absence of preferential unconscious processing of eye contact in adolescents with autism spectrum disorder.,Autism research : official journal of the International Society for Autism Research,2014
+8a91cb96dd520ba3e1f883aa6d57d4d716c5d1c8,Low Cost Eye Tracking: The Current Panorama,,2016
+8a7bd4202e49fcdb947d71c9f2da0e7a953c7021,Privacy and security assessment of biometric template protection,,2012
+8a7726e58c2e24b0a738b48ae35185aaaacb8fe9,PILOT ASSESSMENT OF NONVERBAL PRAGMATIC ABILITY IN PEOPLE WITH ASPERGER SYNDROME Introduction,,2014
+8a54f8fcaeeede72641d4b3701bab1fe3c2f730a,What do you think of my picture? Investigating factors of influence in profile images context perception,,2015
+8aff946f5d678f689cc9476e48d8b122671205ae,"Neuron numbers increase in the human amygdala from birth to adulthood, but not in autism",,2018
+8a722c17e6bda2df13f03ca522119f4c8b5bfff8,Connecting Missing Links: Object Discovery from Sparse Observations Using 5 Million Product Images,,2012
+8aa5f1b2639da73c2579ea9037a4ebf4579fdc4f,A Steerable multitouch Display for Surface Computing and its Evaluation,International Journal on Artificial Intelligence Tools,2013
+8aef5b3cfc80fafdcefc24c72a4796ca40f4bc8b,Person Re-Identification by Support Vector Ranking,,2010
+7ed2c84fdfc7d658968221d78e745dfd1def6332,Evaluation of linear combination of views for object recognition on real and synthetic datasets,,2007
+7ef0cc4f3f7566f96f168123bac1e07053a939b2,Triangular Similarity Metric Learning: a Siamese Architecture Approach. ( L'apprentissage de similarité triangulaire en utilisant des réseaux siamois),Unknown,2016
+7e18b5f5b678aebc8df6246716bf63ea5d8d714e,Increased Loss Aversion in Unmedicated Patients with Obsessive–Compulsive Disorder,,2017
+7ec7163ec1bc237c4c2f2841c386f2dbfd0cc922,Skiing and Thinking About It: Moment-to-Moment and Retrospective Analysis of Emotions in an Extreme Sport,,2018
+7ef44b7c2b5533d00001ae81f9293bdb592f1146,Détection des émotions à partir de vidéos dans un environnement non contrôlé Detection of emotions from video in non-controlled environment,Unknown,2003
+7e51a42049193726e9ac547b76e929d803e441f3,Holistic processing of the mouth but not the eyes in developmental prosopagnosia.,Cognitive neuropsychology,2012
+101d1cff1aa5590a1f79bc485cbfec094a995f74,Persuasive Faces: Generating Faces in Advertisements (Supplementary Material),Unknown,2018
+10ffdfdbc0aafb89d94528f359425de0c7a81986,Interacting HiddenMarkovModels for Video Understanding,Unknown,2018
+10ce3a4724557d47df8f768670bfdd5cd5738f95,Fisher Light-Fields for Face Recognition across Pose and Illumination,,2002
+1037664753b281543ce300fed0852a64d24334ba,Binary - Feature Based Recognition and Cryptographic Key Generation from Face Biometrics,,2007
+190d8bd39c50b37b27b17ac1213e6dde105b21b8,Mining Weakly Labeled Web Facial Images for Search-Based Face Annotation,IEEE Transactions on Knowledge and Data Engineering,2011
+19a30ad283f2ab2d84f1c666d17492da14056d75,Visuomotor Coordination in Reach-To-Grasp Tasks: From Humans to Humanoids and Vice Versa,,2015
+19da9f3532c2e525bf92668198b8afec14f9efea,Challenge: Face verification across age progression using real-world data,,2011
+4c293a98e929edaff6ed70c22a844c04e604e9fc,Clustering by fast search and merge of local density peaks for gene expression microarray data,,2017
+4cf17bca0e19070fbe9bb25644787f65fa6ebe1a,Human Pose Estimation,,2014
+4c56f119ebf7c71f2a83e4d79e8d88314b8e6044,An other-race effect for face recognition algorithms,TAP,2011
+4cfa2fe87c250534fd2f285c2300e7ca2cd9e325,"Visual, Auditory, and Cross Modal Sensory Processing in Adults with Autism: An EEG Power and BOLD fMRI Investigation",Frontiers in human neuroscience,2016
+269248eb8a44da5248cef840f7079b1294dbf237,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,PACMHCI,2017
+2677a79b6381f3e7787c5dca884fa53d0b28dfe2,Supplementary Document : Single-Shot Multi-Person 3 D Pose Estimation From Monocular RGB 1,Unknown,2018
+268e91262c85ff1ce99dfc5751e2b6e44c808325,Frequency Domain Face Recognition,,2007
+265644f1b6740ca34bfbe9762b90b33021adde62,Deep Learning in Medical Imaging: General Overview.,Korean journal of radiology,2017
+269c1f9df4a36b361d32bfdc81457b0a32b60966,Dimensionality Reduction of Visual Features for Efficient Retrieval and Classification,,2016
+2670c4b556264605c32326f49ab4a8b4e83ab57f,Looking ahead: Anticipatory cueing of attention to objects others will look at.,Cognitive neuroscience,2016
+2663fa2f1777dc779a73d678c7919cce37b5fb61,Relevance - Weighted ( 2 D ) 2 LDA Image Projection Technique for Face Recognition,,
+26c884829897b3035702800937d4d15fef7010e4,Facial Expression Recognition by Supervised Independent Component Analysis Using MAP Estimation,IEICE Transactions,2008
+26919156cec1cc5bec03f63f566c934b55b682cd,From Pictorial Structures to deformable structures,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+21679eb7e953bd132803703c27dcd56484d497e6,"utism , oxytocin and interoception",Unknown,2014
+21a2f67b21905ff6e0afa762937427e92dc5aa0b,Extra Facial Landmark Localization via Global Shape Reconstruction,,2017
+21f5652d4f88ac039c58aa530328e65a39eb7b38,Neural Processing of Facial Identity and Emotion in Infants at High-Risk for Autism Spectrum Disorders,,2013
+2155739f578e33449546f45a0b4cf64dbd614025,what is facereader ?,,
+21bebef8ced5d1e77667c667b54287782556eebc,Image processing and recognition for biological images,,2013
+4dade6faf6d5d6db53d5bcb2e107311da1ad48ac,Facial Expression Biometrics Using Statistical Shape Models,EURASIP J. Adv. Sig. Proc.,2009
+4d6e7d73f5226142ffc42b4e8380882d5071e187,Discretion Within Constraint: Homophily and Structure in a Formal Organization,Organization Science,2013
+4d803109f3d9cca7c514db21a0494972d5681faa,Attribute Adaptation for Personalized Image Search,2013 IEEE International Conference on Computer Vision,2013
+4d231311cdfe3aba13766bd0b358d4db0a9af3d3,Processing and Recognising Faces in 3D Images,Unknown,2018
+75aef130afb8c862575d457db6e168e8d77ae4f0,Content-based search and browsing in semantic multimedia retrieval,,2006
+7574f999d2325803f88c4915ba8f304cccc232d1,Transfer Learning for Cross-Dataset Recognition: A Survey,Unknown,2017
+758572c5779a47e898caff7232af76eda253163b,Csr: Medium: Collaborative Research: Architecture and System Support for Power-agile Computing,,2015
+7538ad235caf4dbc64a8b94a6146e1212d4de1ff,Amygdala dysfunction in men with the fragile X premutation.,Brain : a journal of neurology,2007
+75b20672a6290a8e2769ba0226d9187c0ccd5843,Development of response inhibition in the context of relevant versus irrelevant emotions,,2013
+812725dc3968aaff6429ec7c3f44ba1ca2116013,Acoplamiento de micro multitudes para el desarrollo de videojuegos controlados por movimiento,Research in Computing Science,2014
+8145ff6adab3397a5ac52cc62a7c53dae59763db,ERP responses differentiate inverted but not upright face processing in adults with ASD.,Social cognitive and affective neuroscience,2012
+81c03eda1d175fbe351980ac4cffe42c5dec47b0,User observation & dataset collection for robot training,,2011
+816eff5e92a6326a8ab50c4c50450a6d02047b5e,fLRR: Fast Low-Rank Representation Using Frobenius Norm,,2014
+86c5478f21c4a9f9de71b5ffa90f2a483ba5c497,"Kernel Selection using Multiple Kernel Learning and Domain Adaptation in Reproducing Kernel Hilbert Space, for Face Recognition under Surveillance Scenario",CoRR,2016
+86e5f81bde496549e9df2b1abdef0879a3135adb,The Visual QA Devil in the Details: The Impact of Early Fusion and Batch Norm on CLEVR,CoRR,2018
+86cdc6ae46f53ac86b9e0ace2763c5fe15633055,Experimental Force-Torque Dataset for Robot Learning of Multi-Shape Insertion,CoRR,2018
+861f4aac1178bf1c4dd1373dbf2794be54c195d4,Survey of Image Processing Techniques for Brain Pathology Diagnosis: Challenges and Opportunities,Unknown,2018
+72a87f509817b3369f2accd7024b2e4b30a1f588,Fault diagnosis of a railway device using semi-supervised independent factor analysis with mixing constraints,Pattern Analysis and Applications,2011
+72903a6b9894f13facf46a81bd7b659740b488e5,Worldwide AI,,2012
+72b34e9536665f776b0f282ddb63120afa21c84e,An experimental examination of catastrophizing-related interpretation bias for ambiguous facial expressions of pain using an incidental learning task,,2014
+7249b263d0a84d2d9d03f2f7b378778d129f9af9,Research Statement Research Focus,Unknown,2012
+72edfb91e4b3d42547591be9e8c6eb07e7190499,Do Children See in Black and White? Children's and Adults' Categorizations of Multiracial Individuals.,Child development,2015
+72944b4266523effe97708bff89e1d57d6aebf50,"A Multi-Sensory, Automated and Accelerated Sensory Integration Program",,2016
+7240aad3fa4adf65e401345c877ee58a01b76fb1,A new theoretical approach to improving face recognition in disorders of central vision: face caricaturing.,Journal of vision,2014
+44054c64ae7ee16a8a8348bb57345aae95a8ddae,Social Orienting and Attention Is Influenced by the Presence of Competing Nonsocial Information in Adolescents with Autism,,2016
+44442a26062c20dab7db4a9862349b598efca119,Modelling errors in a biometric re-identification system,IET Biometrics,2015
+449b87347fe7f9c3f17e969fab1617fbfd9ccb1b,Flat vs. Expressive Storytelling: Young Children’s Learning and Retention of a Social Robot’s Narrative,,2017
+44fbbd3def64d52c956277628a89aba77b24686b,Context Modulates Congruency Effects in Selective Attention to Social Cues,,2018
+2a218c17944d72bfdc7f078f0337cab67536e501,Detection bank: an object detection based video representation for multimedia event recognition,,2012
+2a12c72b0328a23b0d7ea63db1f93abf3054beec,Extended Feature Descriptor and Vehicle Motion Model with Tracking-by-Detection for Pedestrian Active Safety,IEICE Transactions,2014
+2a3768ac4f6b3bfbcce4001c0c2fd35cfcc7679d,Face Recognition with Variation in Pose Angle Using Face Graphs,,2009
+2af9ee8ee3ab4a89ae0098a1f9caa1aa9dad4e8a,2D and 3D Pose Recovery from a Single Uncalibrated Video - A View and Activity Independent Framework,,2011
+2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83,The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments,PACMHCI,2017
+2a87f95e36938ca823b33c72a633d8d902d5cb86,xytocin Improves “Mind-Reading” in Humans,,2006
+2aa06417fd361832df384cf7c003ed1d3c5ee8df,Learning people detection models from few training samples,CVPR 2011,2011
+2a6327a8bdbd31e2c08863b96c4f09245db8cab7,Targets ' facial width-to-height ratio biases pain judgments ☆,Unknown,2017
+2f23f7d08c7b8670289cfedd1e571f44a3bace8b,Contextual Information and Covariance Descriptors for People Surveillance: An Application for Safety of Construction Workers,EURASIP J. Image and Video Processing,2011
+2f3f4e0c8a9c63e714a10a6711c67f5e84e4c7c1,IoT Based Embedded Smart Lock Control System,Unknown,2016
+2fa04fc0bcbc92886902a62dbf538c490084efa4,Visual field bias in hearing and deaf adults during judgments of facial expression and identity,,2013
+43694e7d5861a8bc8aa5884dba3efe2d387511c6,Supplementary Material: Annotating Object Instances with a Polygon-RNN,,2017
+432be99dde7d93001044048501c72c70e4ea2927,People and Mobile Robot Classification Through Spatio-Temporal Analysis of Optical Flow,IJPRAI,2015
+431f013143de3159c0c0033fee2fb4840d213b6f,Preferential attention to animals and people is independent of the amygdala.,Social cognitive and affective neuroscience,2015
+43d4927f5113c5e376ab05d41e33063a6d06d727,Pedestrian Detection: Exploring Virtual Worlds,,2012
+886fc74b943011ce5ce192ff98d6ea9dcac7ef11,Atypical scanpaths in schizophrenia: evidence of a trait- or state-dependent phenomenon?,Journal of psychiatry & neuroscience : JPN,2011
+88dc2b2f6d033b290ed56b844c98c3ee6efde80b,Experimental manipulation of face-evoked activity in the fusiform gyrus of individuals with autism.,Social neuroscience,2011
+88e2574af83db7281c2064e5194c7d5dfa649846,A Robust Shape Reconstruction Method for Facial Feature Point Detection,,2017
+88a0ff6b180703a2d90bc86b40520e35a08fe02c,The Normalized Distance Preserving Binary Codes and Distance Table,J. Inf. Sci. Eng.,2017
+8856fbf333b2aba7b9f1f746e16a2b7f083ee5b8,Analyzing animal behavior via classifying each video frame using convolutional neural networks,,2015
+887cd2271ca5a58501786d49afa53139f48c66f3,"Visual orienting in children with autism: Hyper‐responsiveness to human eyes presented after a brief alerting audio‐signal, but hyporesponsiveness to eyes presented without sound",,2017
+88132a786442ab8a5038d81164384c1c1f7231c8,Limited attentional bias for faces in toddlers with autism spectrum disorders.,Archives of general psychiatry,2010
+9f22e0749405dfc3e3211474b933aa7514722e4b,Theory of mind - not emotion recognition - mediates the relationship between executive functions and social functioning in patients with schizophrenia.,Psychiatria Danubina,2018
+9f6d04ce617d24c8001a9a31f11a594bd6fe3510,Attentional bias towards angry faces in trait-reappraisal,,2011
+9f61362052e7675b3053a9d1b682ad917ce0e3d1,Social relevance drives viewing behavior independent of low-level salience in rhesus macaques,,2014
+9fb1bd7d98a2fa79e1b9cb21b865ec7af0c1283f,Not All Distraction Is Bad: Working Memory Vulnerability to Implicit Socioemotional Distraction Correlates with Negative Symptoms and Functional Impairment in Psychosis,,2014
+9fdfe1695adac2380f99d3d5cb6879f0ac7f2bfd,Active Tracking and Cloning of Facial Expressions Using Spatio-Temporal Information,Unknown,2002
+6b7f7817b2e5a7e7d409af2254a903fc0d6e02b6,Feature Extraction through Cross-Phase Congruency for Facial Expression Analysis,IJPRAI,2009
+6b17b219bd1a718b5cd63427032d93c603fcf24f,Videos from the 2013 Boston Marathon: An Event Reconstruction Dataset for Synchronization and Localization,,2017
+07d49098ada2d8e1ca0608c70e559dd517ca3432,Modélisation de contextes pour l'annotation sémantique de vidéos. (Context based modeling for video semantic annotation),Unknown,2013
+073c9ec4ff069218f358b9dd8451a040cf1a4a82,Object Classification and Detection in High Dimensional Feature Space,,2013
+38eb71578f82477f4b032481bd401f19f14eaf25,Efficient Resource-constrained Retrospective Analysis of Long Video Sequences,,2009
+3885cfd634c025c6e27c4db8211d72f54f864f90,Implications of holistic face processing in autism and schizophrenia,,2013
+381d15951b5beb2456ac016ac7f15fd27aa07d1c,"The prodrome of autism: early behavioral and biological signs, regression, peri- and post-natal development and genetics.","Journal of child psychology and psychiatry, and allied disciplines",2010
+3859d584d3fb794c2b74b42f0f195d16ce8e3820,Combining Recognition and Geometry for Data - Driven 3 D Reconstruction,,2013
+3896c62af5b65d7ba9e52f87505841341bb3e8df,Face Recognition from Still Images and Video,,2011
+38ea19546355e41ee1d57febc07613e7d3122607,Dynamic Functional Brain Connectivity for Face Perception,,2015
+00c4325c669c52db182390b2ab4a2b9c20f06b8d,A False Trail to Follow: Differential Effects of the Facial Feedback Signals From the Upper and Lower Face on the Recognition of Micro-Expressions,,2018
+009cd18ff06ff91c8c9a08a91d2516b264eee48e,Face and Automatic Target Recognition Based on Super-Resolved Discriminant Subspace,,2012
+00d14af37bc75b6477b4846f6ab561cdc89c96a2,"UvA-DARE ( Digital Academic Repository ) Infants ’ Temperament and Mothers ’ , and Fathers ’ Depression Predict Infants ’ Attention to Objects Paired with Emotional",Unknown,2018
+00e39fad9846084eb435b6cddd675ee11f2dfb90,Person Re-identification Using Haar-based and DCD-based Signature,2010 7th IEEE International Conference on Advanced Video and Signal Based Surveillance,2010
+000cd8d20d91ded078949dfcde76817221ea96c8,Learning Visual Attributes from Image and Text,,2015
+002d1619748a99aa683b5c30b7eafebdfe6adfc4,Nearest feature line embedding for face hallucination,,2013
+00796052277d41e2bb3a1284d445c1747aed295f,Performance and Energy Consumption Characterization and Modeling of Video Decoding on Multi-core Heterogenous SoC and their Applications,,2015
+6ed738ff03fd9042965abdfaa3ed8322de15c116,K-MEAP: Generating Specified K Clusters with Multiple Exemplars by Efficient Affinity Propagation,2014 IEEE International Conference on Data Mining,2014
+6ee1f57cbf7daa37576efca7e7d24040a5c94ee2,Multimodal Neural Network for Overhead Person Re-Identification,2017 International Conference of the Biometrics Special Interest Group (BIOSIG),2017
+6eb8e193687c16f0edc3742d3549ad175ef648d1,Working memory load disrupts gaze-cued orienting of attention,,2015
+6e209d7d33c0be8afae863f4e4e9c3e86826711f,Weakly-supervised segmentation by combining CNN feature maps and object saliency maps,2016 23rd International Conference on Pattern Recognition (ICPR),2016
+6e7d799497b94954dc4232d840628c3a00263e42,Deep Multimodal Pain Recognition: A Database and Comparision of Spatio-Temporal Visual Modalities,,2018
+6ee64c19efa89f955011531cde03822c2d1787b8,Table S1: Review of Existing Facial Expression Databases That Are Often Used in Social Psycholgy,,
+6eece104e430829741677cadc1dfacd0e058d60f,Use of Automated Facial Image Analysis for Measurement of Emotion Expression,,2004
+6e93fd7400585f5df57b5343699cb7cda20cfcc2,Comparing a novel model based on the transferable belief model with humans during the recognition of partially occluded facial expressions.,Journal of vision,2009
+6e80ad43c5f383c1d87b1ced2a336fe5cd44e044,Faster R-CNN for Robust Pedestrian Detection Using Semantic Segmentation Network,,2018
+6eb5db8e6a79ad59bf4f4a5fccdd5b10237408d7,Cross Talk: The Microbiota and Neurodevelopmental Disorders,,2017
+9ac82909d76b4c902e5dde5838130de6ce838c16,Recognizing Facial Expressions Automatically from Video,,2010
+9a3535cabf5d0f662bff1d897fb5b777a412d82e,Large-scale geo-facial image analysis,EURASIP J. Image and Video Processing,2015
+9a7b7d61481e3a5bca1ef809358d46ac87405f67,Neural circuitry of emotional face processing in autism spectrum disorders.,Journal of psychiatry & neuroscience : JPN,2010
+9a88d23234ee41965ac17fc5774348563448a94d,3021977 GI P_212 Cover.indd,,2013
+9a08459b0cb133f0f4352c58225446f9dc95ecc4,Metadata of the chapter that will be visualized in SpringerLink,Unknown,2015
+9a5a1763e0342d41cb1d1eef18a007be6e8dba89,Image Annotation with Discriminative Model and Annotation Refinement by Visual Similarity Matching,,2011
+9adbbd9dadaf7b15bb585555e7a2e2223e711296,Identity information content depends on the type of facial movement,,2016
+36d8cc038db71a473d0c94c21f2b68a840dff21c,Unsupervised Detector Adaptation by Joint Dataset Feature Learning,,2014
+36fe39ed69a5c7ff9650fd5f4fe950b5880760b0,Tracking von Gesichtsmimik mit Hilfe von Gitterstrukturen zur Klassifikation von schmerzrelevanten Action Units,,2010
+36d487129fd0b828255e417e0d10cf13d7f525cf,Reduced functional integration and segregation of distributed neural systems underlying social and emotional information processing in autism spectrum disorders.,Cerebral cortex,2012
+3674f3597bbca3ce05e4423611d871d09882043b,Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions,Journal of Multimedia,2012
+36f039e39efde3558531b99d85cd9e3ab7d396b3,Efficiency of Recognition Methods for Single Sample per Person Based Face Recognition,,2011
+365b72a225a18a930b96e7c0b215b9fede8a0968,Storyline Reconstruction for Unordered Images,,2016
+361d6345919c2edc5c3ce49bb4915ed2b4ee49be,Models for supervised learning in sequence data,Unknown,2018
+5cc9fdd3a588f6e62e46d7884c1dbeef92a782f2,Spontaneous attention to faces in Asperger syndrome using ecologically valid static stimuli.,Autism : the international journal of research and practice,2013
+5c7db2907c586f4f2d6ae5937b0dc0f4d1bc834a,Deliverable D2.1 Audio-visual Algorithms for Person Tracking and Characterization (baseline),,2017
+5c81048593a6729b2d0b948a1129a97bdbf82f11,Moving Object Localization Using Optical Flow for Pedestrian Detection from a Moving Vehicle,,2014
+5c8672c0d2f28fd5d2d2c4b9818fcff43fb01a48,Robust Face Detection by Simple Means,,2012
+5c48f97a8a8217025abafeababaef6288fd7ded6,Model syndromes for investigating social cognitive and affective neuroscience: a comparison of Autism and Williams syndrome.,Social cognitive and affective neuroscience,2006
+5ce40105e002f9cb428a029e8dec6efe8fad380e,Co-design of architectures and algorithms for mobile robot localization and model-based detection of obstacles. (Co-conception d'architectures et d'algorithmes pour la localisation de robots mobiles et la détection d'obstacles basée sur des modèles),Unknown,2017
+09d03b792923695deb0492d8fc3582a50e5f1a1e,Band-Sifting Decomposition for Image-Based Material Editing,ACM Trans. Graph.,2015
+0947c7c46943ebbb6a4b5c795c9b54552c8e0b5a,"QMAS: Querying, Mining and Summarization of Multi-modal Databases",2010 IEEE International Conference on Data Mining,2010
+090b3189391f3e1917649b3a62696febbf0429e1,Taking the Perfect Selfie: Investigating the Impact of Perspective on the Perception of Higher Cognitive Variables,,2017
+09e3967a34cca8dc0f00c9ee7a476a96812a55e0,1 Machine Learning Methods for Social Signal Processing,,2014
+098a0bd7c948e9c94704ac5e8c768c8d430e1842,Cascaded Models for Articulated Pose Estimation,,2010
+5d1608e03ab9c529d0b05631f9d2a3afcbf1c3e3,Sparsity and Robustness in Face Recognition,CoRR,2011
+5d09d5257139b563bd3149cfd5e6f9eae3c34776,Pattern recognition with composite correlation filters designed with multi-objective combinatorial optimization,,2014
+5d5533b8b95f25f63e07786cf3e063c8db356f1f,Human Observers and Automated Assessment of Dynamic Emotional Facial Expressions: KDEF-dyn Database Validation,,2018
+31e0303d98fd1bb6a1074d4fe0b14228e91b388b,基於稀疏表示之語者識別 (Sparse Representation Based Speaker Identification) [In Chinese],,2014
+31786e6d5187d7bc41678cbd2d1bf8edf1ddfed9,Capture de mouvements humains par capteurs RGB-D. (Capture human motions by RGB-D sensor ),Unknown,2015
+3152e89963b8a4028c4abf6e1dc19e91c4c5a8f4,Exploring Stereotypes and Biased Data with the Crowd,CoRR,2018
+910da5e0afef96c8acca3c6a4314a9ab5121b1e4,Détection d'obstacles multi-capteurs supervisée par stéréovision. (Multi-sensor road obstacle deetection controled by stereovision),Unknown,2008
+65b737e5cc4a565011a895c460ed8fd07b333600,Transfer Learning for Cross-Dataset Recognition: A Survey,,2017
+65683bd97720bc18a022b23755b32c8c988e8d5c,Discovering social groups via latent structure learning.,Journal of experimental psychology. General,2018
+656a5d4d84c450792402b3c69eecbdbca4cad4cb,2.1. Imagenet and Related Datasets,,
+656b6133fd671f129fce0091a8dab39c97e604f2,Multiview Discriminative Geometry Preserving Projection for Image Classification,,2014
+628a3f027b7646f398c68a680add48c7969ab1d9,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,Unknown,2017
+6226f2ea345f5f4716ac4ddca6715a47162d5b92,Object Detection: Current and Future Directions,Front. Robotics and AI,2015
+629722342f719ee413e9bb07072a2fc2b4f09a26,Gender Classification by Information Fusion of Hair and Face,,2008
+62dd66f9f4995cfdaafb479de50363ce0255b1bd,2 Feature Extraction Based on Wavelet Moments and Moment Invariants in Machine Vision Systems,Unknown,2012
+968b983fa9967ff82e0798a5967920188a3590a8,Children's recognition of disgust in others.,Psychological bulletin,2013
+968ab65077c4be1c1071120052b2e4b4f3d3c59a,"""Seeing is believing: the quest for multimodal knowledge"" by Gerard de Melo and Niket Tandon, with Martin Vesely as coordinator",SIGWEB Newsletter,2016
+96eacc464c0177efc4f802f220888c7f675f24af,Deep Semantic Face Deblurring Supplementary Material,Unknown,2018
+96723b42451c42ec396381596490143aac8f85cd,A Computer Vision Approach for the Eye Accessing Cue Model Used in Neuro-linguistic Programming,Unknown,2013
+966e36f15b05ef8436afecf57a97b73d6dcada94,Dimensionality Reduction using Relative Attributes,,2014
+96094b030013ca2d9b6d5a14b6f1fbbc57eb8a89,What is in that picture ? Visual Question Answering System,,2017
+3a53bad58f8467092477857ff9c2ae904d7108d2,Simultaneous perceptual and response biases on sequential face attractiveness judgments.,Journal of experimental psychology. General,2015
+3a1c3307f57ef09577ac0dc8cd8b090a4fe8091f,Thermal-to-visible face recognition using partial least squares.,"Journal of the Optical Society of America. A, Optics, image science, and vision",2015
+3acb6b3e3f09f528c88d5dd765fee6131de931ea,Novel representation for driver emotion recognition in motor vehicle videos,2017 IEEE International Conference on Image Processing (ICIP),2017
+3a27bdb9925d5b247868950a9575823b3194ac8b,Adaptation across the cortical hierarchy: low-level curve adaptation affects high-level facial-expression judgments.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2008
+3af0400c011700f3958062edfdfed001e592391c,The Intense World Theory – A Unifying Theory of the Neurobiology of Autism,,2010
+3af28e9e9e883c235b6418a68bda519b08f9ae26,Implications of Adult Facial Aging on Biometrics,Unknown,2018
+5432392d916e730c53962be202c115133e6d7777,Face processing in a case of high functioning autism with developmental prosopagnosia.,Acta neurobiologiae experimentalis,2018
+54568bdce3405ffbe2a6f5820711f966e2d2faf3,How Do We Update Faces? Effects of Gaze Direction and Facial Expressions on Working Memory Updating,,2012
+545dc167a4879ce2d61836cb300479c305f8e096,Event-Centric Twitter Photo Summarization,,2014
+5495e224ac7b45b9edc5cfeabbb754d8a40a879b,Feature Reconstruction Disentangling for Pose-invariant Face Recognition Supplementary Material,,2017
+54756f824befa3f0c2af404db0122f5b5bbf16e0,Computer Vision — Visual Recognition,,2009
+9820920d4544173e97228cb4ab8b71ecf4548475,Automated facial coding software outperforms people in recognizing neutral faces as neutral from standardized datasets,,2015
+987dd3dd6079e5fa8a10a1c53b2580fd71e27ede,Concept-Based Video Retrieval By Cees,Unknown,2009
+9802885e39e0847374a2efae801b8b719c09c64c,"An Effective Two-Finger, Two-Stage Biometric Strategy for the US-VISIT Program",Operations Research,2009
+9899eb0ae24aa8c992244afe5f4455e9f96c1f18,"Characteristics of Brains in Autism Spectrum Disorder: Structure, Function and Connectivity across the Lifespan.",Experimental neurobiology,2015
+98960be5ae51d30118f091f7091299a49f2f34bb,Global and Feature Based Gender Classification of Faces: a Comparison of Human Performance and Computational Models,,2004
+98582edd6029c94844f5a40d246eaa86f74d8512,Learning Visual Scene Attributes,,2013
+98126d18be648640fc3cfeb7ffc640a2ec1d5f6f,Supplemental Material: Discovering Groups of People in Images,,2014
+98a60b218ff8addaf213e97e2f4b54d39e45f5b9,Benchmarking Real World Object Recognition,,2005
+984ecfbda7249e67eca8d9b1697e81f80e2e483d,Visual object categorization with new keypoint-based adaBoost features,2009 IEEE Intelligent Vehicles Symposium,2009
+5388638c7801b11958d937c89ece764bc769e298,Identity processing in multiple-face tracking.,Journal of vision,2009
+3fee5c6343c969f33a7db4c7f7da1e152effd911,Patterns of fixation during face recognition: Differences in autism across age.,Autism : the international journal of research and practice,2018
+3f600008dd9745e8357f5b7b3c1a69b8be6b7767,Atypical reflexive gaze patterns on emotional faces in autism spectrum disorders.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2010
+3f4377109a92cf4e422b7e2ae95ef3144323ea72,Bridging the Gap Between Synthetic and Real Data,,2015
+303be881f6cd4907c5e357bc1bb5547d8ea1da5a,Individual Differences in the Recognition of Enjoyment Smiles: No Role for Perceptual–Attentional Factors and Autistic-Like Traits,,2011
+30e18a16d4c7092694d55743ff92965e5dec2692,"Hormonal contraceptives, menstrual cycle and brain response to faces.",Social cognitive and affective neuroscience,2014
+30256c10cb7ec139b4245855850998c39b297975,Functional magnetic resonance imaging of autism spectrum disorders,,2012
+30d21b5baf9514d26da749c6683c49b4fa55f2b5,Towards a unified account of face (and maybe object) processing,,2012
+307a810d1bf6f747b1bd697a8a642afbd649613d,An affordable contactless security system access for restricted area,,2016
+3073eff17368262d7c605bbcaf3b2fb015754d39,Voice conversion versus speaker verification: an overview,,2014
+301662c2a6ed86e48f21c1d24bfc67b403201b0c,Repetition Suppression in Ventral Visual Cortex Is Diminished as a Function of Increasing Autistic Traits,,2015
+30f84c48bdf2f6152075dd9651a761a84b2f2166,"No fear, no panic: probing negation as a means for emotion regulation.",Social cognitive and affective neuroscience,2013
+3083d2c6d4f456e01cbb72930dc2207af98a6244,Perceived Age Estimation from Face Images,,2011
+30f49d6595359a4a18c728ec83f99346d1e16348,Intact Reflexive but Deficient Voluntary Social Orienting in Autism Spectrum Disorder,,2015
+5eee9c417157916ee66689718af65965c423b2b7,Autism and Asperger’s Syndrome: A Cognitive Neuroscience Perspective,,2009
+5e74d92d841d1bc1c9c2d80219f98bf892f239c4,Developmental changes in face visual scanning in autism spectrum disorder as assessed by data-based analysis,,2015
+5eae1a3e0dfd0834be6a003b979bf5b3dc923453,"Far-Field, Multi-Camera, Video-to-Video Face Recognition",,2007
+5eefe98aafffe665b19de515e3ba90c9c0b7219c,Trimmed Event Recognition Submission to ActivityNet Challenge 2018,Unknown,
+5e0e516226413ea1e973f1a24e2fdedde98e7ec0,The Invariance Hypothesis and the Ventral Stream,,2013
+5be6340c55d4a45e96e811bdeac3972328ca9247,People Identification and Tracking Through Fusion of Facial and Gait Features,,2014
+5b6bdf478860b1e3f797858e71abd14f98684b61,Distributed neural computation for the visual perception of motion. (Calcul neuronal distribué pour la perception visuelle du mouvement),,2011
+5b94093939ac42aba54ab41eb1725aeba1bd5c34,RGB-D Segmentation of Poultry Entrails,,2016
+5bc0a89f4f73523967050374ed34d7bc89e4d9e1,The role of emotion transition for the perception of social dominance and affiliation.,Cognition & emotion,2016
+3765df816dc5a061bc261e190acc8bdd9d47bec0,Presentation and validation of the Radboud Faces Database,Unknown,2010
+37a23e76674e606ce779131d2c93496e8a53bb2f,The discrete cosine transform (DCT) plus local normalization: a novel two-stage method for de-illumination in face recognition,,2012
+081189493ca339ca49b1913a12122af8bb431984,Supplemental Material for Photorealistic Facial Texture Inference Using Deep Neural Networks,,2017
+0834dff6e1d37ecb36137e019f8e2c933d5e74f6,Building Part-Based Object Detectors via 3D Geometry,2013 IEEE International Conference on Computer Vision,2013
+08e3a0f80f10fc40cc1c043cbc4c873a76a6f6e8,Enhanced Pavlovian aversive conditioning to positive emotional stimuli.,Journal of experimental psychology. General,2018
+081286ede247c5789081502a700b378b6223f94b,Neural Correlates of Facial Mimicry: Simultaneous Measurements of EMG and BOLD Responses during Perception of Dynamic Compared to Static Facial Expressions,,2018
+083ac08287af7df220d88dca2fbf5b1812e35ee8,Abnormal functional connectivity in autism spectrum disorders during face processing.,Brain : a journal of neurology,2008
+6d84d92d9ed6c226f0cc6401bc425a23432c9f96,Autism spectrum disorders: clinical and research frontiers.,Archives of disease in childhood,2008
+6d432962055a8c521e6b388d5a0a2140a0019a5e,Sensor network reconfiguration and big multimedia data fusion for situational awareness in smart environments,,2014
+6d8612f7856f569f5635ff07a6b94480a9c7c284,Ensemble perception of emotions in autistic and typical children and adolescents,,2017
+6d7ba173121edd5defadfde04f7c1e7bc72859c2,The study of autism as a distributed disorder.,Mental retardation and developmental disabilities research reviews,2007
+01cc8a712e67384f9ef9f30580b7415bfd71e980,Failing to ignore: paradoxical neural effects of perceptual load on early attentional selection in normal aging.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2010
+01ababc0985143ad57320b0599fb2f581d79d3c2,Unobtrusive Low Cost Pupil Size Measurements using Web cameras,CoRR,2013
+01018a509f32601e1bbf7f0159aad1a513e23f92,Computers in the Human Interaction Loop,,2009
+06b4522433beca98aea99f924fbaeb8f861df8cd,Selection and combination of local Gabor classifiers for robust face verification,"2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)",2012
+06f8aa1f436a33014e9883153b93581eea8c5c70,Leaving Some Stones Unturned: Dynamic Feature Prioritization for Activity Detection in Streaming Video,,2016
+06e768d74f076b251d53b0c86fc9910d7243bdc6,Effective and efficient visual description based on local binary patterns and gradient distribution for object recognition,Unknown,2016
+066d71fcd997033dce4ca58df924397dfe0b5fd1,Iranian Face Database and Evaluation with a New Detection Algorithm,,2007
+06f7e0aee7fc5807ab862432a4e5ade2cda73c4b,Flowing ConvNets for Human Pose Estimation in Videos,2015 IEEE International Conference on Computer Vision (ICCV),2015
+0630b3677323c8c987f16f37545ac6073293de8d,Enhancement and stylization of photographs by Vladimir Leonid,,2013
+066476a38f8751696f5f7b47c0fb7f1d8ecdac1a,Automatic adaptation of a generic pedestrian detector to a specific traffic scene,CVPR 2011,2011
+06f2df0ec9ab6968411e34f581dd8f5d40500d7f,The fusiform face area: a cortical region specialized for the perception of faces.,"Philosophical transactions of the Royal Society of London. Series B, Biological sciences",2006
+06680961e99aadb366968e5f515da58864ecd784,ENabler for Design Specifications FP 6 - IST - 2005 - 27916,,2006
+06c8fcb0429afd3aee153ba42e1fd8aa93f7214f,Social roles in hierarchical models for human activity recognition,2012 IEEE Conference on Computer Vision and Pattern Recognition,2012
+06f969d3858b6d14425fcbe7ff12b72e213ee240,Recognizing Cardiac Magnetic Resonance Acquisition Planes,,2014
+6c4e173fdafa89ac7b40e1dddf953dcc833db92d,Photometric Normalization for Face Recognition using Local discrete cosine Transform,IJPRAI,2013
+6c9ed3378dd53a5ad9e30613ba2e1ef363bd1f9d,Atoms of recognition in human and computer vision.,Proceedings of the National Academy of Sciences of the United States of America,2016
+6cfc337069868568148f65732c52cbcef963f79d,Audio-Visual Speaker Localization via Weighted Clustering Israel -,Unknown,2018
+6c70cad229cf3f02d3d490b42c7bd92c6eade1d1,Towards Good Practices on Building Effective CNN Baseline Model for Person Re-identification,CoRR,2018
+39db629b96eda72a23a49d54f32689e0651ca4ae,Applying artificial vision models to human scene understanding,,2015
+39bce1d5e4b31a555f12f0a44e92abcad73aab4f,"Explorer "" Here ' s looking at you , kid """,Unknown,2017
+39742f9b3a9f7adefbe936de68249148576b90da,Alcohol and remembering a hypothetical sexual assault: Can people who were under the influence of alcohol during the event provide accurate testimony?,Memory,2016
+39bbe9885ad1e12e79bc620d83f7768d2fc04994,Autism is characterized by dorsal anterior cingulate hyperactivation during social target detection.,Social cognitive and affective neuroscience,2009
+39b0bce87eec467adfe5bebcfe628ff5bd397fc7,"R4-A.2: Rapid Similarity Prediction, Forensic Search & Retrieval in Video",,2015
+397349476582198639abc7a8b933e350cbc24c37,2D&3D-ComFusFace: 2D and 3D Face Recognition by Scalable Fusion of Common Features,,2012
+992eca71ee8314ede9bf680b6966730f6bb77bc5,Likability’s Effect on Interpersonal Motor Coordination: Exploring Natural Gaze Direction,,2017
+99b41df501f25f4aee9c1f94a75510b2fbcc6bed,Title Impaired social brain network for processing dynamic facialexpressions in autism spectrum disorders,Unknown,2017
+9947687ffe0bd2d6cd4fe717e534cfcb59302a4e,Data-driven photographic style using local transfer,,2015
+992ebd81eb448d1eef846bfc416fc929beb7d28b,Exemplar-Based Face Parsing Supplementary Material,,2013
+9963af1199679e176f0836e6d63572b3a69fa7da,23 Generating Facial Expressions with Deep Belief Nets,Unknown,2008
+52f71cc9c312aa845867ad1695c25a6d1d94ba0e,The invariance assumption in process-dissociation models: an evaluation across three domains.,Journal of experimental psychology. General,2015
+52884a0c7913be319c1a2395f009cea47b03f128,Explorer Learning Grounded Meaning Representations with Autoencoders,Unknown,2015
+52b6df1fe810d36fd615eb7c47aa1fd29376e769,Graph Mining for Object Tracking in Videos,Unknown,2012
+52417b0406886154f0b4e2343ad6ac18c0484ec4,Ecological legacies of civil war: 35-year increase in savanna tree cover following wholesale large-mammal declines,,2015
+526ce5c72af5e1f93b8029a26e2eed7d1ac009f5,0 Constructing Kernel Machines in the Empirical Kernel Feature Space,Unknown,2018
+527dd9647c474490ac33ac5b0a19aa76b226610d,Intact perception but abnormal orientation towards face-like objects in young children with ASD,,2016
+52049fb96156729ce0ad88f86fa617ecf7d237e1,Book chapter for Machine Learning for Human Motion Analysis: Theory and Practice,,2011
+55ba5e4c07f6ecf827bfee04e96de35a170f7485,This Dissertation entitled MODELING THE HUMAN FACE THROUGH MULTIPLE VIEW THREE-DIMENSIONAL STEREOPSIS: A SURVEY AND COMPARATIVE ANALYSIS OF FACIAL RECOGNITION OVER MULTIPLE MODALITIES,,2006
+5592574c82eec9367e9173b7820ff329a27b6c21,Image Enhancement and Automated Target Recognition Techniques for Underwater Electro-Optic Imagery,Unknown,2007
+97bcf007516cb70d8cb17b7de6452aa06c4b9c76,GABAergic neurotransmission alterations in autism spectrum disorders,,2015
+978a219e07daa046244821b341631c41f91daccd,Emotional Intelligence: Giving Computers Effective Emotional Skills to Aid Interaction,,2008
+970e723404885e94e77780766b39ee951dd7abb3,Multimodal Learning of Geometry-Preserving Binary Codes for Semantic Image Retrieval,IEICE Transactions,2017
+630af2eb466fac956f9a43bf877be0eae6d80027,CariGANs: Unpaired Photo-to-Caricature Translation,Unknown,2018
+63344dee49a1ab7e27ac34eefc30fb948a0bf9bb,Geometry and Illumination Modelling for Scene Understanding,,2011
+0fd53d7e1ab8f42c710cb77b5ec4cc2b22158a4c,Combined Data Association and Evolving Particle Filter for Tracking of Multiple Articulated Objects,EURASIP J. Image and Video Processing,2011
+0f5e10cfca126682e1bad1a07848919489df6a65,Facial emotion processing in patients with social anxiety disorder and Williams-Beuren syndrome: an fMRI study.,Journal of psychiatry & neuroscience : JPN,2016
+0f5bf2a208d262aa0469bd3185f6e2e56acada81,Pose Estimation and Segmentation of People in 3D Movies,2013 IEEE International Conference on Computer Vision,2013
+0f829fee12e86f980a581480a9e0cefccb59e2c5,Bird Part Localization Using Exemplar-Based Models with Enforced Pose and Subcategory Consistency,2013 IEEE International Conference on Computer Vision,2013
+0f395a49ff6cbc7e796656040dbf446a40e300aa,The Change of Expression Configuration Affects Identity-Dependent Expression Aftereffect but Not Identity-Independent Expression Aftereffect,,2015
+0f42c64a74bc6e3e83821aa8ab5dd8e3a4b797cd,Controlled scanpath variation alters fusiform face activation.,Social cognitive and affective neuroscience,2007
+0f21a39fa4c0a19c4a5b4733579e393cb1d04f71,Evaluation of optimization components of a 3D to 2D landmark fitting algorithm for head pose estimation,Unknown,2018
+0f2ffd582674bd856247bc5482d85e6db3b49b8f,A neural signature of the creation of social evaluation.,Social cognitive and affective neuroscience,2014
+0a55e4191c90ec1edb8d872237a2dacd5f6eda90,"Intentional Minds: A Philosophical Analysis of Intention Tested through fMRI Experiments Involving People with Schizophrenia, People with Autism, and Healthy Individuals",,2011
+0a60e76e6983e1647469172a50907023913b0c9f,Longitudinal study of amygdala volume and joint attention in 2- to 4-year-old children with autism.,Archives of general psychiatry,2009
+0a814669f4a0198e46a3a0d91a1bbb81bb089216,"Deficits in facial, body movement and vocal emotional processing in autism spectrum disorders.",Psychological medicine,2010
+0a3a33b872c84dac88bcd6f5bd460ef03584e0f7,Abnormal Neural Activation to Faces in the Parents of Children with Autism.,Cerebral cortex,2015
+0a4fc9016aacae9cdf40663a75045b71e64a70c9,Illumination Normalization Based on Homomorphic Wavelet Filtering for Face Recognition,J. Inf. Sci. Eng.,2013
+0a85afebaa19c80fddb660110a4352fd22eb2801,Neural Animation and Reenactment of Human Actor Videos,CoRR,2018
+0ac664519b2b8abfb8966dafe60d093037275573,Facial action unit detection using kernel partial least squares,2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops),2011
+0acf23485ded5cb9cd249d1e4972119239227ddb,Dual coordinate solvers for large-scale structural SVMs,CoRR,2013
+641f0989b87bf7db67a64900dcc9568767b7b50f,Reconstructing faces from their signatures using RBF regression,Unknown,2013
+64753fe167a46208e28237fa98db8daedbef83e4,Normal social cognition in developmental prosopagnosia.,Cognitive neuropsychology,2009
+645766177de2ef61619572bc09ce239c232d7d5c,Is the left hemisphere androcentric? Evidence of the learned categorical perception of gender,,2015
+64782a2bc5da11b1b18ca20cecf7bdc26a538d68,Facial Expression Recognition using Spectral Supervised Canonical Correlation Analysis,J. Inf. Sci. Eng.,2013
+90ce227ec08053ea6acf9f9f9f53d8b7169574f2,An Introduction to Evaluating Biometric Systems,IEEE Computer,2000
+903210406f14a12b481524d543b14f16114797e2,Pretest of images for the beauty dimension,Unknown,2015
+90fbcea84f621ee5d73482c5cb02479778aecccd,Pose-Invariant Face Recognition via RGB-D Images,,2016
+90a754f597958a2717862fbaa313f67b25083bf9,A Review of Human Activity Recognition Methods,Front. Robotics and AI,2015
+bf961e4a57a8f7e9d792e6c2513ee1fb293658e9,Robust Face Image Matching under Illumination Variations,EURASIP J. Adv. Sig. Proc.,2004
+bf96a0f037e7472e4b6cb1dae192a5fedbbbd88a,Visual Listening In: Extracting Brand Image Portrayed on Social Media,Unknown,2018
+bffe37791ee7aa277ba6d7c5ff2cb9bddddea09f,Neural correlates of emotion processing during observed self-face recognition in individuals with autism spectrum disorders,Unknown,2017
+bfffcd2818a1679ac7494af63f864652d87ef8fa,Neural Importance Sampling,CoRR,2018
+bffbd04ee5c837cd919b946fecf01897b2d2d432,Facial Feature Tracking and Occlusion Recovery in American Sign Language,,2006
+d3e04963ff42284c721f2bc6a90b7a9e20f0242f,On Forensic Use of Biometrics,,2014
+d3faed04712b4634b47e1de0340070653546deb2,Neural Best-Buddies: Sparse Cross-Domain Correspondence,ACM Trans. Graph.,2018
+d37013e4ce0f5dd6b61a4ffadecc401274966602,Reading affect in the face and voice: neural correlates of interpreting communicative intent in children and adolescents with autism spectrum disorders.,Archives of general psychiatry,2007
+d41c11ebcb06c82b7055e2964914b9af417abfb2,CDI-Type I: Unsupervised and Weakly-Supervised Discovery of Facial Events,,2011
+d4001826cc6171c821281e2771af3a36dd01ffc0,Modélisation de contextes pour l'annotation sémantique de vidéos. (Context based modeling for video semantic annotation),,2013
+d4e4369babdba158bfdce1b605f92d6b1b665be4,The amygdala and the relevance detection theory of autism: an evolutionary perspective,,2013
+d4f8168242f688af29bcbbe1cc5aec7cd12a601c,Edinburgh Research Explorer Visually Grounded Meaning Representations,Unknown,2017
+d46a5bba21f897f1c4b3366dcb663820ef1c282d,Cerebral Hemodynamic Response to Faces,,2012
+d409d8978034de5e5e8f9ee341d4a00441e3d05f,Annual research review: re-thinking the classification of autism spectrum disorders.,"Journal of child psychology and psychiatry, and allied disciplines",2012
+bad7254ae08f8bf1305e70c7de28374f67f151fd,Ré-identification de personnes à partir des séquences vidéo. (Person re-identification from video sequence),Unknown,2014
+ba25c219b52d675b579941364ce6ee6700cea8e8,8D-THERMO CAM: Combination of Geometry with Physiological Information for Face Recognition,,2005
+ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906,Uporaba emotivno pogojenega računalništva v priporočilnih sistemih,,2011
+bab47c7bf80c9310f947cbdaf71b3c983c497b68,Systematic Parameter Optimization and Application of Automated Tracking in Pedestrian Dominant Situations Date of submission : 2014-0801,Unknown,2014
+a022eff5470c3446aca683eae9c18319fd2406d5,Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description sémantique des traits visuels humains),Unknown,2017
+a0f193c86e3dd7e0020c0de3ec1e24eaff343ce4,A New Classification Approach using Discriminant Functions,J. Inf. Sci. Eng.,2005
+a0e7f8771c7d83e502d52c276748a33bae3d5f81,Ensemble Nyström,,2012
+a0061dae94d916f60a5a5373088f665a1b54f673,Lensless computational imaging through deep learning,CoRR,2017
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07,"Study on Human Face Recognition under Invariant Pose, Illumination and Expression using LBP, LoG and SVM",Unknown,2017
+a7e5c01e3dca9284f8acffad750cdbb29689d3fb,Introduction to the special issue on learning semantics,Machine Learning,2013
+a760ce8baddf2da7946d2ed6f02ac3927f39a9da,Face Recognition Using a Unified 3D Morphable Model,Unknown,2016
+a77008329c785c0d5d4dcb3d9c79073df85a9b4e,Neural codes of seeing architectural styles,,2017
+b8b46df1b013c30d791972ee109425a94e3adc06,"Automaticity, Control, and the Social Brain",,2014
+b8471908880c916ebc70ac900e9446705ed258f4,Transitional and translational studies of risk for anxiety.,Depression and anxiety,2011
+b13b6e3dfdf6d708a923c547113d99047f1a0374,Neural activation to emotional faces in adolescents with autism spectrum disorders.,"Journal of child psychology and psychiatry, and allied disciplines",2011
+b13254c2c9ca90f57e385d34abc7fe78d74e5222,Real-Time Multi-object Tracking with Occlusion and Stationary Objects Handling for Conveying Systems,Unknown,2016
+b18858ad6ec88d8b443dffd3e944e653178bc28b,Trojaning Attack on Neural Networks,,2017
+b1df214e0f1c5065f53054195cd15012e660490a,Supplementary Material to Sparse Coding and Dictionary Learning with Linear Dynamical Systems,,2016
+b19e8bce7a3180456f8748caabade89dd802ea84,Inferring and Executing Programs for Visual Reasoning Supplementary Material,,2017
+dd609e4bd83cfcdbf64fc794da73a36398076890,Recurrent Human Pose Estimation,,2016
+dca12da787c023c97058cdb7d56e18ef287084f7,Zebrafish tracking using convolutional neural networks,,2017
+dcba9cd587be2ed5437370e12e3591bdde86dc3c,Template for Regular Entry,,2008
+dc23beb1e5c7402b1a9d5a7c854e62a253d0815e,Microscopic crowd simulation : evaluation and development of algorithms. (Simulation microscopique de foules : évaluation et développement d'algorithmes),Unknown,2016
+dc22de0ed56958013234cf7128952390fb47345a,Towards dense object tracking in a 2D honeybee hive,CoRR,2017
+b6bf15f123a814538fff5db757a474be6fc0c72f,Event-Centric Twitter Photo Summarization,Unknown,2014
+b69fbf046faf685655b5fa52fef07fb77e75eff4,Modeling guidance and recognition in categorical search: bridging human and computer object detection.,Journal of vision,2013
+b62486261104d5136aea782ee8596425b5f228da,Modelling perceptions of criminality and remorse from faces using a data-driven computational approach.,Cognition & emotion,2017
+b64cfb39840969b1c769e336a05a30e7f9efcd61,CRF-Based Context Modeling for Person Identification in Broadcast Videos,Front. ICT,2016
+b689d344502419f656d482bd186a5ee6b0140891,Structural resemblance to emotional expressions predicts evaluation of emotionally neutral faces.,Emotion,2009
+a96b6e645a8d3eb8efc7358a852cbfbaa32ae245,Small Group Detection in Crowds using Interaction Information,IEICE Transactions,2017
+a94b832facb57ea37b18927b13d2dd4c5fa3a9ea,Domain transfer convolutional attribute embedding,J. Exp. Theor. Artif. Intell.,2018
+a9e0e667537c9059b3050a64d22b8fe86787d913,"Detecting and Tracking Vehicles , Pedestrians , and Bicyclists at Intersections with a Stationary Lidar",Unknown,2018
+a975f1aea5dbb748955da0e17eef8d2270a49f25,Object Recognition,,
+a951f9b3aa95fe53cd9b19e15ebfdbde3fd5af62,Facial electromyographic responses to emotional information from faces and voices in individuals with pervasive developmental disorder.,"Journal of child psychology and psychiatry, and allied disciplines",2007
+d5813a4a0cca115b05e03d8d8c1ac8bf07176e96,Supplementary Material: Reinforced Video Captioning with Entailment Rewards,,2017
+d5440779ca69a2f010e57250f53a9be0116305e3,Emotional face expression modulates occipital-frontal effective connectivity during memory formation in a bottom-up fashion,,2015
+d5fe9c84710b71a754676b2ee67cec63e8cd184b,FPGA Implementation of a HOG-based Pedestrian Recognition System,,2010
+d2044b92486248f87bafe937779cd2167efe170c,"Connecting Deep Neural Networks to Physical, Perceptual, and Electrophysiological Auditory Signals",,2018
+d2b86b6dc93631990e21a12278e77f002fb4b116,Aalborg Universitet Attention in Multimodal Neural Networks for Person Re-identification,Unknown,2018
+aa420d32c48a3fd526a91285673cd55ca9fe2447,R 4-A . 1 : Dynamics-Based Video Analytics,Unknown,2015
+aaf4d938f2e66d158d5e635a9c1d279cdc7639c0,Toward visual understanding of everyday object,,2015
+aa94f214bb3e14842e4056fdef834a51aecef39c,Reconhecimento de padrões faciais: Um estudo,,2015
+afa57e50570a6599508ee2d50a7b8ca6be04834a,Motion in action : optical flow estimation and action localization in videos. (Le mouvement en action : estimation du flot optique et localisation d'actions dans les vidéos),Unknown,2016
+af370cbe392b7fb2b9f26476a7e063e0f4c46815,Development of Neural Sensitivity to Face Identity Correlates with Perceptual Discriminability.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2016
+af8f59ceed0392159c3475c58af5b7ca8e4f6412,Facial Expression Recognition,Unknown,2018
+af8cd04bbe4902123d7042985159a6a5da9d9fb9,Représenter pour suivre : Exploitation de représentations parcimonieuses pour le suivi multi-objets. (Representing to follow: Exploitation of parsimonious representations for multi-object tracking),Unknown,2017
+af1a6c35f5d75122756d37faed062d5b5cd6bc71,Emotion Modelling and Facial Affect Recognition in Human-Computer and Human-Robot Interaction,,2009
+afb51f0e173cd9ab1d41075862945ae6bc593cde,Large databases of real and synthetic images for feature evaluation and prediction,,2012
+b7426836ca364603ccab0e533891d8ac54cf2429,A Review on Human Activity Recognition Using Vision-Based Method,,2017
+b73ba189d0d1a3e2502716fee60c6865a7964d6e,Towards Open-Universe Image Parsing with Broad Coverage,,2013
+b7774c096dc18bb0be2acef07ff5887a22c2a848,Distance metric learning for image and webpage comparison. (Apprentissage de distance pour la comparaison d'images et de pages Web),Unknown,2015
+b78e611c32dc0daf762cfa93044558cdb545d857,Temporal Action Detection with Structured Segment Networks Supplementary Materials,,2017
+a8638a07465fe388ae5da0e8a68e62a4ee322d68,How to predict the global instantaneous feeling induced by a facial picture?,,2017
+a84f80ca4e29b49cab1035ed8c7877caf2dbe914,Effects of Facial Symmetry and Gaze Direction on Perception of Social Attributes: A Study in Experimental Art History,,2016
+de46cbf18c7da9efc9368241463919e22230b0b0,What We Have Learned about Autism Spectrum Disorder from Valproic Acid,,2013
+de95fa1dd69a2d0d2b76539357062062f8b1e7b8,Face to Age,,2016
+decc4de8b6964ba473744741c3a46ac37f2d6e3e,A Pose Invariant 3 D Face Recognition Method,Unknown,2005
+dec0c26855da90876c405e9fd42830c3051c2f5f,Supplementary Material : Learning Compositional Visual Concepts with Mutual Consistency,Unknown,2018
+b05633a18a48d9c18735fd0a186a2654297ae543,Development of holistic vs. featural processing in face recognition,,2014
+b05ac3b2286c30fcab385f682b3519a823857112,UvA-DARE ( Digital Academic Repository ) Spatial frequency information modulates response inhibition and decision-making processes,Unknown,2017
+b0fafe26b03243a22e12b021266872afdb96572c,Factors of Transferability for a Generic ConvNet Representation,IEEE Transactions on Pattern Analysis and Machine Intelligence,2016
+b018fa5cb9793e260b8844ae155bd06380988584,Project STAR IST - 2000 - 28764 Deliverable D 6 . 3 Enhanced face and arm / hand detector,,
+a67d54cf585c9491ab8a3e2d58d9c4b223359602,Spatial information and end-to-end learning for visual recognition. (Informations spatiales et apprentissage bout-en-bout pour la reconnaissance visuelle),Unknown,2014
+a68c07cb446f63fa6b48eda04c93392219c09700,Averted eye-gaze disrupts configural face encoding,,2014
+a6b1d79bc334c74cde199e26a7ef4c189e9acd46,Deep recurrent neural network reveals a hierarchy of process memory during dynamic natural vision.,Human brain mapping,2018
+b97a155bdd86491c8d32f02d6dfe5b73aaef4549,Eliminating the mere exposure effect through changes in context between exposure and test.,Cognition & emotion,2013
+b93bf0a7e449cfd0db91a83284d9eba25a6094d8,Supplementary Material for : Active Pictorial Structures,Unknown,2015
+b9c9c7ef82f31614c4b9226e92ab45de4394c5f6,Face Recognition under Varying Illumination,,
+b941d4a85be783a6883b7d41c1afa7a9db451831,Radiofrequency ablation planning for cardiac arrhythmia treatment using modeling and machine learning approaches,Unknown,2017
+a13a4e4cc8f4744b40668fe7cca660ae0e88537d,Explorer Multi 30 K : Multilingual English-German Image Descriptions,Unknown,2017
+a165619977bc69a910a771e1096551073122775b,Computational Crowd Camera : Enabling Remote - Vision via Sparse Collective Plenoptic Sampling,,2013
+a15c728d008801f5ffc7898568097bbeac8270a4,ForgetIT Deliverable Template,,2016
+a1132e2638a8abd08bdf7fc4884804dd6654fa63,Real-Time Video Face Recognition for Embedded Devices,Unknown,2012
+a1e1e7e976c22af9de26d9b74c2ece282e20218c,Looking at My Own Face: Visual Processing Strategies in Self–Other Face Recognition,,2018
+a1af05502eac70296ee22e5ab7e066420f5fe447,A Probabilistic Approach for Breast Boundary Extraction in Mammograms,,2013
+a18c8f76f2599d6d61f26cb1d4025ea386919dfe,Video Event Detection: From Subvolume Localization To Spatio-Temporal Path Search.,IEEE transactions on pattern analysis and machine intelligence,2013
+ef940b76e40e18f329c43a3f545dc41080f68748,A Face Recognition and Spoofing Detection Adapted to Visually-Impaired People,Unknown,2017
+ef2084979a3191403c1b8b48f503d06f346afb8f,Une méthode de reconnaissance des expressions du visage basée sur la perception,Unknown,2017
+ef75007cd6e5b990d09e7f3c4ba119be6c2546fb,Lecture 20: Object Recognition 20.1 Introduction 20.2.1 Neocognitron,,
+c3a1a3d13bf1cb2b9c054857b857c3fb9d7176f6,Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction avec un robot. (Audio-visual detection of emotional (laugh and smile) and attentional markers for elderly people in social interaction with a robot),Unknown,2015
+c3dc704790e1a170919087baab0ad10d7df6c24e,Oxytocin in the socioemotional brain: implications for psychiatric disorders,,2015
+c317181fa1de2260e956f05cd655642607520a4f,Objective Classes for Micro-Facial Expression Recognition,CoRR,2017
+c32c8bfadda8f44d40c6cd9058a4016ab1c27499,Unconstrained Face Recognition From a Single Image,,2008
+c46bcb02f92612cf525fd84c6cc79b0638c2eac9,New Fuzzy LBP Features for Face Recognition,CoRR,2015
+c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f,Towards Unconstrained Face Recognition Using 3D Face Model,,2012
+c433ef13220c2ed4d2558283f8515b0e6e09bcad,A Public Video Dataset for Road Transportation Applications,Unknown,2013
+c4be56287fd666f9cfff257018a42e00dc56499d,The role of the fusiform-amygdala system in the pathophysiology of autism.,Archives of general psychiatry,2010
+c48ee576130473efe6dc3ee47f552bc581aa68b2,Computational Intelligence Challenges and Applications on Large-Scale Astronomical Time Series Databases,IEEE Computational Intelligence Magazine,2014
+ea923da826b9e6f89159cc960db7aac91b5ecbd6,Approved by Major Professor(s): Approved by Head of Graduate Program: Date of Graduate Program Head's Approval: Abhilasha Bhargav-Spantzel Protocols and Systems for Privacy Preserving Protection of Digital Identity Doctor of Philosophy,Unknown,2007
+eafda8a94e410f1ad53b3e193ec124e80d57d095,Observer-Based Measurement of Facial Expression With the Facial Action Coding System,Unknown,2006
+ea890846912f16a0f3a860fce289596a7dac575f,Benefits of social vs. non-social feedback on learning and generosity. Results from the Tipping Game,,2014
+ea5eaaadb8bc928fb7543d6fa24f9f4a229ff979,Mirror Neuron Forum.,Perspectives on psychological science : a journal of the Association for Psychological Science,2011
+e1e5d64318ec0a493995fb83ef4f433ddde82e77,Affects the Gaze-cueing Effect,,2013
+e1e6a4146c082e5465cde38e9511de3d150b4ede,Targeting static and dynamic workloads with a reconfigurable VLIW processor,Unknown,2018
+e10662a59b5f8e1f5684409023f11ca727647320,Performance Evaluation of Deep Learning Networks for Semantic Segmentation of Traffic Stereo-Pair Images,CoRR,2018
+cd0f7b3f545cc4bfa5e2d7185789e8ead7e3cee2,"Children’s and Adults’ Predictions of Black, White, and Multiracial Friendship Patterns",,2017
+cd596a2682d74bdfa7b7160dd070b598975e89d9,Mood Detection: Implementing a facial expression recognition system,,2009
+ccd5bd5ce40640ebc6665b97a86ba3d28e457d11,Contributions to a fast and robust object recognition in images. (Contributions à une reconnaissance d'objet rapide et robuste en images),Unknown,2011
+cc5a1cf7ad9d644f21a5df799ffbcb8d1e24abe1,MonoPerfCap: Human Performance Capture from Monocular Video,,2017
+ccdea57234d38c7831f1e9231efcb6352c801c55,Illumination Processing in Face Recognition,IJPRAI,2014
+ccc65463198ee0a0db9b303a3dc903c762dbccaa,Adaptive Selection of Deep Learning Models on Embedded Systems,CoRR,2018
+e6d4c0ac2352f108a078a4fd3f908a03b8571f2b,Racial Bias in Judgments of Physical Size and Formidability,,2017
+e6868f172df3736e052fec4c00b63780b3d739fe,Effects of a Common Variant in the CD38 Gene on Social Processing in an Oxytocin Challenge Study: Possible Links to Autism,Neuropsychopharmacology,2012
+f9f08511f77c29ff948e146434dfb23608d3deb5,Question Answering Using Match-LSTM and Answer Pointer,,2017
+f942739b7f9bc3c0b84f760bb2fd4895e1363ec0,Students Wearing Police Uniforms Exhibit Biased Attention toward Individuals Wearing Hoodies,,2017
+f9028b47a4755a7349108b1dc281f13add5c6c12,Atypical gaze patterns in children and adults with autism spectrum disorders dissociated from developmental changes in gaze behaviour,,2010
+f91388f87e10674226f4def4cda411adc01da496,Failure to Affect Decision Criteria During Recognition Memory With Continuous Theta Burst Stimulation,,2018
+f935225e7811858fe9ef6b5fd3fdd59aec9abd1a,Spatiotemporal dynamics and connectivity pattern differences between centrally and peripherally presented faces.,NeuroImage,2006
+f9b90d3c1e2c3d0f3d9a94e6a0aea5e3047bca78,Analysis of photometric factors based on photometric linearization.,"Journal of the Optical Society of America. A, Optics, image science, and vision",2007
+f93606d362fcbe62550d0bf1b3edeb7be684b000,Nearest Neighbor Classifier Based on Nearest Feature Decisions,Comput. J.,2012
+f006161327d3ea3484064c1a86e4c87c729fd7b8,Rough Sets Methods in Feature Reduction and Classification,,2001
+f0f501e1e8726148d18e70c8e9f6feea9360d119,Jukka Komulainen SOFTWARE - BASED COUNTERMEASURES TO 2 D FACIAL,,2015
+f76a04bdc43f1e440b274b299b07ce2e423431e6,Face Recognition from Video: a Review,IJPRAI,2012
+f78fe101b21be36e98cd3da010051bb9b9829a1e,Unsupervised Domain Adaptation for Facial Expression Recognition Using Generative Adversarial Networks,,2018
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f,Title On color texture normalization for active appearance models,Unknown,2017
+f76808d6811cb3790e7fc3ddb08c733febbdefba,Robust Object Categorization and Segmentation Motivated by Visual Contexts in the Human Visual System,EURASIP J. Adv. Sig. Proc.,2011
+e8304700fd89461ec9ecf471179ad87f08f3c2f7,Chapter 1 . Learning to Learn New Models of Human Activities in Indoor Settings (,,2013
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db,Deep Learning,Unknown,2014
+e810ddd9642db98492bd6a28b08a8655396c1555,Facing facts: neuronal mechanisms of face perception.,Acta neurobiologiae experimentalis,2008
+e8dda897372e6b4cf903234c7a9c40117711d8d8,What do you think of my picture? Investigating factors of influence in profile images context perception,Unknown,2015
+e8c6c3fc9b52dffb15fe115702c6f159d955d308,Linear Subspace Learning for Facial Expression Analysis,Unknown,2012
+e8867f819f39c1838bba7d446934258035d4101c,Face recognition performance with superresolution.,Applied optics,2012
+fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6,Draft: Evaluation Guidelines for Gender Classification and Age Estimation,,2011
+faeefc5da67421ecd71d400f1505cfacb990119c,PastVision+: Thermovisual Inference of Recent Medicine Intake by Detecting Heated Objects and Cooled Lips,Front. Robotics and AI,2017
+fa08a4da5f2fa39632d90ce3a2e1688d147ece61,Supplementary material for “ Unsupervised Creation of Parameterized Avatars ” 1 Summary of Notations,,
+fab04dfcb35a29a46504d2ad3acbc642c602c7e8,Trajectory-based 3 D Convolutional Descriptors for Action Recognition in Videos,Unknown,2018
+faf40ce28857aedf183e193486f5b4b0a8c478a2,Automated Human Identification Using Ear Imaging,Unknown,2015
+fac8cff9052fc5fab7d5ef114d1342daba5e4b82,Modeling Phase Spectra Using Gaussian Mixture Models for Human Face Identification,Unknown,2005
+fa5aca45965e312362d2d75a69312a0678fdf5d7,Fast and Accurate Head Pose Estimation via Random Projection Forests: Supplementary Material,,2015
+fa4e709a7008248869584feca81250a8da8291e4,Biometric Quantization through Detection Rate Optimized Bit Allocation,EURASIP J. Adv. Sig. Proc.,2009
+fac0151ed0494caf10c7d778059f176ba374e29c,Recognising Complex Mental States from Naturalistic Human-Computer Interactions,Unknown,2014
+fff32fd598e41ec6dd6903082d77f43f16908cfd,Kernel Learning of Histogram of Local Gabor Phase Patterns for Face Recognition,EURASIP J. Adv. Sig. Proc.,2008
+ff2e25cb67209de8ae922abdfc31f922b130276e,Information Granulation and Pattern Recognition,,2004
+ff70cfaf3e085a6c32bfa7ebedb98adfb7658210,TABULA RASA Trusted Biometrics under Spoofing Attacks,,2011
+ff9195f99a1a28ced431362f5363c9a5da47a37b,Serial dependence in the perception of attractiveness,,2016
+ff825a46f0a4e9f6ad748aeefd18f34f6b4addfb,"The ""reading the mind in films"" task: complex emotion recognition in adults with and without autism spectrum conditions.",Social neuroscience,2006
+c51fbd2574e488e486483e39702a3d7754cc769b,Face Recognition from Still Images to Video Sequences: A Local-Feature-Based Framework,EURASIP J. Image and Video Processing,2011
+c28745625f048d86f2ad0f38a41ddc0683d36a96,"Looking, seeing and believing in autism: Eye movements reveal how subtle cognitive processing differences impact in the social domain.",Autism research : official journal of the International Society for Autism Research,2016
+c2adfc55e0ab9be6e8f5e4ebeb20770dca307cef,"The effect of diagnosis, age, and symptom severity on cortical surface area in the cingulate cortex and insula in autism spectrum disorders.",Journal of child neurology,2013
+c2d065bc8067384c40b3e8146cadc9a0c4c1d633,SLC25A12 expression is associated with neurite outgrowth and is upregulated in the prefrontal cortex of autistic subjects,Molecular Psychiatry,2008
+c2c058afe227f2099aae4f204688b22239d6837a,Threatening faces fail to guide attention for adults with autistic-like traits.,Autism research : official journal of the International Society for Autism Research,2017
+c223b2b7d38dc4e0ad418c404b2d3c43c62213bc,Trade-off Between GPGPU based Implementations of Multi Object Tracking Particle Filter,Unknown,2017
+c2b1007824fa7ce3a7a94209f0be0902a3454bae,Project Description 1 Introduction,,
+c2eed73654b544a705b194ade58cd82488c6c5b9,"Scene Understanding by Labeling Pixels Key Insights ˽ Recent Progress on Image Understanding, a Long-standing Challenge of Ai, Is Enabling Numerous New Applications in Robot Perception, Surveillance and Environmental Monitoring, Content- Based Image Search, and Social-media Summarization",,2014
+f68263a6f541429a8645ca2f4b0658cdbbd66638,Setting a world record in 3D Face Recognition,,2015
+f663ad5467721159263c1cde261231312893f45d,UvA-DARE ( Digital Academic Repository ) Gaze Embeddings for Zero-Shot Image Classification,Unknown,
+f69a289a3bc6b61c612ba6ff4033f122100daccb,Morphing between expressions dissociates continuous from categorical representations of facial expression in the human brain.,Proceedings of the National Academy of Sciences of the United States of America,2012
+f66f3d1e6e33cb9e9b3315d3374cd5f121144213,Top-down control of visual responses to fear by the amygdala.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2013
+f6c7f1cbfa412fb6244992b7fb2eda0a9e0d933e,Expertise Moderates Incidentally Learned Associations Between Words and Images,,2018
+f6328f02ab64c992d76967dbfd1a66d325173723,Mel- and Mellin-cepstral Feature Extraction Algorithms for Face Recognition,Comput. J.,2011
+f6f12e0fbfce067d02445abde76be0522e4db329,Online Multiple targets Detection and Tracking from Mobile robot in Cluttered indoor Environments with Depth Camera,IJPRAI,2014
+f68f20868a6c46c2150ca70f412dc4b53e6a03c2,Differential Evolution to Optimize Hidden Markov Models Training: Application to Facial Expression Recognition,CIT,2015
+e97d824b8e80670d49d53c402f99e0fbeaafacdb,Neural Best-Buddies: Sparse Cross-Domain Correspondence,ACM Trans. Graph.,2018
+e9ccd438d6d55ba0d11a63eb95c773d63b3ea4e5,Will you remember me ? Cultural differences in own-group face recognition biases ☆,Unknown,2016
+e9e39e31419d9a22790b327bc1d6107fa832bdab,Face recognition using adaptively weighted patch PZM array from a single exemplar image per person,Pattern Recognition,2008
+e9bbe558c73de60e40ce2bd8c7cb7a47dacfe594,Can White children grow up to be Black? Children's reasoning about the stability of emotion and race.,Developmental psychology,2016
+e93a65ff1c7c29736cef5701f079f75ecfb76f5f,From image statistics to scene gist: evoked neural activity reveals transition from low-level natural image structure to scene category.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2013
+f193ca76a878af87603ae8ac823a3e6d1c2e3c7e,Recurrent Multi-frame Single Shot Detector for Video Object Detection,Unknown,2018
+f179f7888934b11dc5a2d8ff9205d1ca8b8a1599,Illuminant direction estimation for a single image based on local region complexity analysis and average gray value.,Applied optics,2014
+f153cbec29d86a58b5f15231fd14e7037a210682,Lost in the categorical shuffle: evidence for the social non-prototypicality of black women.,Cultural diversity & ethnic minority psychology,2014
+f16921c1c6e8bce89bce7679cbd824d65b494e4d,The face of love: spontaneous accommodation as social emotion regulation.,Personality & social psychology bulletin,2011
+e7f00f6e5994c5177ec114ee353cc7064d40a78f,Back to Basic: Do Children with Autism Spontaneously Look at Screen Displaying a Face or an Object?,,2013
+e726174d516605f80ff359e71f68b6e8e6ec6d5d,3D Face Recognition Using Patched Locality Preserving Projections,J. Inf. Sci. Eng.,2010
+e78394213ae07b682ce40dc600352f674aa4cb05,Expression-invariant three-dimensional face recognition,,2005
+e7f4951c1106bff0460665ef67d11fb9c2d07c41,Machine Vision-Based Analysis of Gaze and Visual Context: an Application to Visual Behavior of Children with Autism Spectrum Disorders,,2011
+e7a922049a9bf54a0b13cd1d475a58e36c7c9b3e,The conceptual structure of face impressions.,Proceedings of the National Academy of Sciences of the United States of America,2018
+cb4fc4d49783f2049c48a062169f04eb744443ec,Paying More Attention to Saliency: Image Captioning with Saliency and Context Attention,TOMCCAP,2018
+cb1e34d7fcb7fae914fcb65cb9cf25199d49cec9,SLAQ: quality-driven scheduling for distributed machine learning,Unknown,2017
+cb310356d1c5f567b2a8796b708f6e1e10fa1917,Serotonin and the neural processing of facial emotions in adults with autism: an fMRI study using acute tryptophan depletion.,Archives of general psychiatry,2012
+cb3ba84146d1324e1cdbde3764ca3b354ee09a2a,"On the Interplay Between Throughput, Fairness and Energy Efficiency on Asymmetric Multicore Processors",Comput. J.,2018
+cb2e10d1a6792354bc0ce24ee99ecf2142d16f9b,Enhancing Real-Time Human Detection Based on Histograms of Oriented Gradients,,2008
+f8eedcca6263062b6bab11ead255f719452f1c81,Motion in action : optical flow estimation and action localization in videos. (Le mouvement en action : estimation du flot optique et localisation d'actions dans les vidéos),Unknown,2016
+f8b26b2ec62cf76f58f95938233bc22ae1902144,UvA-DARE ( Digital Academic Repository ) Visual Tracking : An Experimental Survey Smeulders,Unknown,2018
+f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464,"KDEF-PT: Valence, Emotional Intensity, Familiarity and Attractiveness Ratings of Angry, Neutral, and Happy Faces",,2017
+f89e5b1f61b221c7b00db55b64239a28f8ba9fe0,Ensemble Learning-Based Person Re-identification with Multiple Feature Representations,Complexity,2018
+ce57cc478421adf85a9058a0cc8fad8ebfd81c52,Multimodal Attribute Extraction,CoRR,2017
+cefd9936e91885ba7af9364d50470f6cb54315a4,Expectation and surprise determine neural population responses in the ventral visual stream.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2010
+ceca60c4bf1a5c4e5893ae6685e7a9f80ca47f27,Visual Question: Predicting If a Crowd Will Agree on the Answer,CoRR,2016
+ceb4040acf7f27b4ca55da61651a14e3a1ef26a8,Angry Crowds: Detecting Violent Events in Videos,Unknown,2016
+ced73382d686dee6232c313f014bc21ca7536db0,Detection of Tongue Protrusion Gestures from Video,IEICE Transactions,2011
+ce83369da319607fe2832485913b0f30c00920aa,Human Detection Based on Large Feature Sets Using Graphics Processing Units,Informatica (Slovenia),2011
+e03bda45248b4169e2a20cb9124ae60440cad2de,"Learning a Dictionary of Shape-Components in Visual Cortex : Comparison with Neurons , Humans and Machines by Thomas Serre",Unknown,2006
+e0dedb6fc4d370f4399bf7d67e234dc44deb4333,Supplementary Material: Multi-Task Video Captioning with Video and Entailment Generation,,2017
+e043d79f4dc41c9decaf637d8ffdd11f8ed59f2b,Distance metric learning for image and webpage comparison. (Apprentissage de distance pour la comparaison d'images et de pages Web),Unknown,2015
+e030697c19dd1919dbdd889b69df7ab002a8af19,The expectancy bias : Expectancy-violating faces evoke earlier pupillary dilation than neutral or negative faces,Unknown,2017
+e042c4d038373a68cca109336598c0323e7a9b60,Culture moderates the relationship between interdependence and face recognition,,2015
+e096db52fc8316e66273b456c58b073f9b689074,Harnessing Repetitive Behaviours to Engage Attention and Learning in a Novel Therapy for Autism: An Exploratory Analysis,,2012
+e00d391d7943561f5c7b772ab68e2bb6a85e64c4,Robust continuous clustering.,Proceedings of the National Academy of Sciences of the United States of America,2017
+e0244a8356b57a5721c101ead351924bcfb2eef4,Power as an emotional liability: Implications for perceived authenticity and trust after a transgression.,Journal of experimental psychology. General,2017
+e059650472dd7bfd6907b02de491e312a0cb6d4e,Parallel Genetic Algorithms and Machine Learning,,1996
+e0515dc0157a89de48e1120662afdd7fe606b544,Perception Science in the Age of Deep Neural Networks,,2017
+46d7f41189c5e262df9ad1165d5a40d2b685bb0f,Discriminative Multiple Target Tracking,,2010
+46a01565e6afe7c074affb752e7069ee3bf2e4ef,Local Descriptors Encoded by Fisher Vectors for Person Re-identification,,2012
+46282f10271875647219b641dac2cc01c7dc8ab2,Psychopathic traits are associated with reduced fixations to the eye region of fearful faces.,Journal of abnormal psychology,2018
+46b031a3e368f25dd1e42f70f21165fef7b16de2,"Faces in the mirror, from the neuroscience of mimicry to the emergence of mentalizing.",Journal of anthropological sciences = Rivista di antropologia : JASS,2016
+46c3e8c2b2042b193659c0a613adc72100a2f301,Vision for Robotics By Danica Kragic and Markus Vincze,,2009
+4679f4a7da1cf45323c1c458b30d95dbed9c8896,Recognizing Facial Expressions Using Model-Based Image Interpretation,Unknown,2008
+2cdb8df791cb15eef805443293319ec8690ff88f,An Effective Approach to Pose Invariant 3D Face Recognition,,2011
+2cc4ae2e864321cdab13c90144d4810464b24275,Face Recognition Using Optimized 3D Information from Stereo Images,,2005
+2c883977e4292806739041cf8409b2f6df171aee,Are Haar-Like Rectangular Features for Biometric Recognition Reducible?,,2013
+2cdd9e445e7259117b995516025fcfc02fa7eebb,Temporal Exemplar-Based Bayesian Networks for Facial Expression Recognition,2008 Seventh International Conference on Machine Learning and Applications,2008
+2c07d9a383e0bb7e1c8ba07084ba8bcf71af2aad,Robust Ear Recognition via Nonnegative Sparse Representation of Gabor Orientation Information,,2014
+2cac70f9c8140a12b6a55cef834a3d7504200b62,Reconstructing High Quality Face-Surfaces using Model Based Stereo,2007 IEEE 11th International Conference on Computer Vision,2007
+2c7946d5d2f1572c20e9843eb2033b8eb9771bf3,THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition,,2013
+2c7934a2f1671286370cd9adebc2872c6dd318f5,Visual Scene Understanding through Semantic Segmentation,,2015
+794f76c111ba1a4ca718e84ae74ee8d2a67c4173,Ventromedial prefrontal cortex mediates visual attention during facial emotion recognition.,Brain : a journal of neurology,2014
+79a36b19ea363c14af27a1f4112a9eccdd582837,The scope of social attention deficits in autism: prioritized orienting to people and animals in static natural scenes.,Neuropsychologia,2010
+793e896c2f66fb66bfc6c834f2678cf349af4e20,Incorporating Computation Time Measures During Heterogeneous Features Selection in a Boosted Cascade People Detector,IJPRAI,2016
+79335495e54446541a3655d145911beba7c29d7d,The face inversion effect in opponent-stimulus rivalry,,2014
+2dced31a14401d465cd115902bf8f508d79de076,Can a Humanoid Face be Expressive? A Psychophysiological Investigation,,2015
+2dfe0e7e81f65716b09c590652a4dd8452c10294,Incongruence Between Observers’ and Observed Facial Muscle Activation Reduces Recognition of Emotional Facial Expressions From Video Stimuli,,2018
+2dd5f1d69e0e8a95a10f3f07f2c0c7fa172994b3,Machine Analysis of Facial Expressions,,2007
+2dc7d439e99f15a499cd2dcbdfbc1c0c7648964d,Computational Understanding of Image Memorability by Zoya Bylinskii,Unknown,2015
+2d6d4899c892346a9bc8902481212d7553f1bda4,Neural Face Editing with Intrinsic Image Disentangling SUPPLEMENTARY MATERIAL,,2017
+4129e1075c7856d8bebbf0655ae00a4843109429,A Tale of Two Losses : Discriminative Deep Feature Learning for Person Re-Identification,Unknown,2017
+41ddd29d9e56bb87b9f988afc75cd597657b2600,R4-A.3: Human Detection & Re-Identification for Mass Transit Environments,,2017
+413160257096b9efcd26d8de0d1fa53133b57a3d,Customer satisfaction measuring based on the most significant facial emotion,Unknown,2018
+412b3ef02c85087e5f1721176114672c722b17a4,A Taxonomy of Deep Convolutional Neural Nets for Computer Vision,Front. Robotics and AI,2016
+41ed93fd97aa76b4abfda7a09168ad1799f34664,Video Event Detection: From Subvolume Localization to Spatiotemporal Path Search,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+41612c66beaad320af9b7d34407c7d0f4ca7bfea,Inhibition or Ideology ? The Neural Mechanisms of Evaluating Race-Targeted Government Assistance,Unknown,2017
+41e1084e74564ced3e1fa845250162d6d0f2b9c3,A Texture-based Approach to Face Detection,,2004
+41fafb5392ad5e33e5169d870812ab5edca301a1,Tree-Structured Stick Breaking Processes for Hierarchical Data,Unknown,2010
+4189862b2ce9c71e1b451deb58dd42f50c7d04a1,Autistic trait interactions underlie sex-dependent facial recognition abilities in the normal population,,2013
+41c87d3342a85712a3591b6d49d99be8fc8d35d9,Face-trait inferences show robust child – adult agreement : Evidence from three types of faces,Unknown,2015
+4156f9fc5983b09eb97ad3d9abc248b15440b955,"2 Subspace Methods for Face Recognition : Singularity , Regularization , and Robustness",,2012
+83fd2d2d5ad6e4e153672c9b6d1a3785f754b60e,Quantifying naturalistic social gaze in fragile X syndrome using a novel eye tracking paradigm.,"American journal of medical genetics. Part B, Neuropsychiatric genetics : the official publication of the International Society of Psychiatric Genetics",2015
+8387c58a5a3fd847f9b03760842dd49fec7cbb0e,Two-year-olds with autism orient to nonsocial contingencies rather than biological motion,,2009
+831226405bb255527e9127b84e8eaedd7eb8e9f9,A Motion-Based Feature for Event-Based Pattern Recognition,,2016
+832aae00e16c647716f1be38de233c9c15af9a28,Feature fusion for facial landmark detection,Pattern Recognition,2014
+83e7c51c4d6f04049f5a3dbf4ac9e129ed96caee,Spatio-temporal Pain Recognition in CNN-Based Super-Resolved Facial Images,Unknown,2016
+83b700f0777a408eb36eef4b1660beb3f6dc1982,Violent behaviour detection using local trajectory response,,2016
+1b02b9413b730b96b91d16dcd61b2420aef97414,Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction avec un robot. (Audio-visual detection of emotional (laugh and smile) and attentional markers for elderly people in social interaction with a robot),Unknown,2015
+1bb14ddc0326a8e5b44eafd915738c2b1342f392,Title On color texture normalization for active appearance models,Unknown,2017
+1b6394178dbc31d0867f0b44686d224a19d61cf4,EPML: Expanded Parts Based Metric Learning for Occlusion Robust Face Verification,,2014
+1bbec7190ac3ba34ca91d28f145e356a11418b67,Explorer Action Recognition with Dynamic Image Networks,Unknown,2017
+1be9ee50f4d4f59b9761a366bba9127213dc4f33,You cannot gamble on others: Dissociable systems for strategic uncertainty and risk in the brain,,2013
+1b2ad281ef74e366ec58221b13edc6eefdb170f8,Use and Usefulness of Dynamic Face Stimuli for Face Perception Studies—a Review of Behavioral Findings and Methodology,,2018
+1bca1a09e2ef62b1960c23ff6653ae2d5aef5718,Comparison of human face matching behavior and computational image similarity measure,Science in China Series F: Information Sciences,2009
+771431afa9b5c936dc970db8d02ae06f49d68638,TabletGaze : Dataset and Algorithm for Unconstrained Appearance-based Gaze Estimation in Mobile Tablets,Unknown,2015
+77dc158a979731d2ed01145b1d3ead34a6c33487,Preference for geometric patterns early in life as a risk factor for autism.,Archives of general psychiatry,2011
+77052654a37b88719c014c5afd3db89cb2288aeb,Lung Cancer Prediction Using Neural Network Ensemble with Histogram of Oriented Gradient Genomic Features,,2015
+77d31d2ec25df44781d999d6ff980183093fb3de,The Multiverse Loss for Robust Transfer Learning,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+77d4843a177031b2b5721824280033e2e601334c,Comparative Evaluation of 3D versus 2D Modality for Automatic Detection of Facial Action Units,,2011
+48b4f49ec708677fc9f70edc74fd0f92ef986406,CS168: The Modern Algorithmic Toolbox Lecture #6: Stochastic Gradient Descent and Regularization,,2016
+48a5b6ee60475b18411a910c6084b3a32147b8cd,Pedestrian Attribute Recognition with Part-based CNN and Combined Feature Representations,Unknown,2018
+482e8a9323fca1e27fccf03d2a58a36873d0ae10,Assessing Social Cognition of Persons with Schizophrenia in a Chinese Population: A Pilot Study,,2017
+48cdb6a640b4259c61c476fb529d7c176e8345a9,Eyelid-openness and mouth curvature influence perceived intelligence beyond attractiveness.,Journal of experimental psychology. General,2016
+4813d9332a1f3ef2bf5846e81005895322310bed,3D Face Recognition,,2007
+48319e611f0daaa758ed5dcf5a6496b4c6ef45f2,Non Binary Local Gradient Contours for Face Recognition,CoRR,2014
+70990e1b13cec2b3e4831a00c6ac901dae76b27a,"Mareckova , Klara ( 2013 ) Sex differences and the role of sex hormones in face development and face processing",Unknown,2016
+7031d7fde9f184b72416759f8a9be4155616f456,Benchmarking Face Detection in a Mobile/Tablet Environment,,2011
+70b42bbd76e6312d39ea06b8a0c24beb4a93e022,Solving Multiple People Tracking in a Minimum Cost Arborescence,2015 IEEE Winter Applications and Computer Vision Workshops,2015
+1e2087908e6ce34032c821c7fb6629f2d0733086,Affective Embodied Conversational Agents for Natural Interaction,Unknown,2008
+1e3a9b0cfdeca614c5689a3419016c89bf9fbdfa,Facial color is an efficient mechanism to visually transmit emotion,,2018
+1ecb56e7c06a380b3ce582af3a629f6ef0104457,"A New Way of Discovery of Belief, Desire and Intention in the BDI Agent-Based Software Modeling",JACIII,2004
+1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9,9 Entropy Regularization,,
+1e0dd12f2bff234a4df71641bc95068733506858,Handwritten Word Spotting with Corrected Attributes,2013 IEEE International Conference on Computer Vision,2013
+84fd7c00243dc4f0df8ab1a8c497313ca4f8bd7b,Perceived Age Estimation from Face Images,Unknown,2018
+84d665608c7c005d38290df392b0ba0157ba32ee,Social Cognitive Training Improves Emotional Processing and Reduces Aggressive Attitudes in Ex-combatants,,2017
+84efa16406c8838550cbbed48f0355b936bbe845,Online Hierarchical Sparse Representation of Multifeature for Robust Object Tracking,,2016
+4af997701ce14ba689f7f964a72bcae0a2432435,The role of gaze direction in face memory in autism spectrum disorder.,Autism research : official journal of the International Society for Autism Research,2013
+4a8085987032e85ac8017d9977a4b76b0d8fa4ac,Object Recognition using Template Matching,,2008
+4ad51a99e489939755f1d4f5d1f5bc509c49e96d,Preferences for facially communicated big five personality traits and their relation to self-reported big five personality,Unknown,2018
+4a9086cf2637b7ea54855187b978af7a89bfceff,Atypical neural specialization for social percepts in autism spectrum disorder.,Social neuroscience,2011
+24b6d839662e5d56f17fc26eab4d2901f6835ddf,Real Time Lip Motion Analysis for a Person Authentication System using Near Infrared Illumination,2006 International Conference on Image Processing,2006
+247df1d4fca00bc68e64af338b84baaecc34690b,Evaluation of Gender Classification Methods with Automatically Detected and Aligned Faces,IEEE Transactions on Pattern Analysis and Machine Intelligence,2008
+2475ad865b2102cef83a87adfe0d2e71d4791e53,A Supervised Clustering Algorithm for the Initialization of RBF Neural Network Classifiers,2007 IEEE 15th Signal Processing and Communications Applications,2007
+248291285074203eb9ee8e0b8b517ac4ce7dc4aa,The Way Dogs (Canis familiaris) Look at Human Emotional Faces Is Modulated by Oxytocin. An Eye-Tracking Study,,2017
+24585f90bdf30583733841f70430d36948f16ae2,An efficient method for human face recognition using nonsubsampled contourlet transform and support vector machine,,2009
+231e545fdb1a516e29604fbd740e207b6f25c7dc,Perception of dynamic changes in facial affect and identity in autism.,Social cognitive and affective neuroscience,2007
+23fc6c6e1cd52a77215a285a462840cbb96aec39,"Cortical patterns of category-selective activation for faces, places and objects in adults with autism.",Autism research : official journal of the International Society for Autism Research,2008
+23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3,Determining Mood from Facial Expressions,,2014
+23ca7c4367f7317c61ebb0574e3d04cfd9bc3893,Aberrant brain activation during gaze processing in boys with fragile X syndrome.,Archives of general psychiatry,2008
+2312bc2d48a0f68bd5ab1b024d5726786455da3a,Learning Deep Context-Aware Features over Body and Latent Parts for Person Re-identification,2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2017
+23b93f3b237481bd1d36941ca3312bb16f4beb58,Reconnaissance d'événements et d'actions à partir de la profondeur thermique 3D. (Event and action recognition from thermal and 3D depth Sensing),Unknown,2016
+23120f9b39e59bbac4438bf4a8a7889431ae8adb,Improved RGB-D-T based face recognition,IET Biometrics,2016
+23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,Deep Density Clustering of Unconstrained Faces ( Supplementary Material ),Unknown,2018
+4f922f6602f39baae94f63954005776e1da05671,Peer-Mediated Theatrical Engagement for Improving Reciprocal Social Interaction in Autism Spectrum Disorder,,2014
+4f8bd3519a6e8a05db9e35b027c0c65c91d2ff62,Brain Oxytocin is a Main Regulator of Prosocial Behaviour - Link to Psychopathology,Unknown,2018
+4f1a74cfa7c8383a5dea97cb48c197da5b4f5ee0,LINKS: Learning-Based Multi-source IntegratioN FrameworK for Segmentation of Infant Brain Images,NeuroImage,2014
+4f15b1e750007465024181dd002dfc6d1baa48c9,Face Recognition and Computer Graphics for Modelling,,2014
+4fde52cd3af5c698f0807bc3b821ebb3a270a986,Impaired fixation to eyes during facial emotion labelling in children with bipolar disorder or severe mood dysregulation.,Journal of psychiatry & neuroscience : JPN,2013
+8d6344658fa9673b1f4ac0d0bad53617ee127aaa,Adolescent and adult risk-taking in virtual social contexts,,2014
+8de2dbe2b03be8a99628ffa000ac78f8b66a1028,Action Recognition in Videos,,2008
+8d3fbdb9783716c1832a0b7ab1da6390c2869c14,Discriminant Subspace Analysis for Uncertain Situation in Facial Recognition,,2008
+8d09c8c6b636ef70633a3f1bb8ff6b4d4136b5cf,3D Twins Expression Challenge,,2011
+8d6c4af9d4c01ff47fe0be48155174158a9a5e08,"Labeling, discovering, and detecting objects in images",,2008
+8df05de407b829abb357e230bead5407cabe7305,U Can Touch This: How Tablets Can Be Used to Study Cognitive Development,,2016
+8d007d8d75cb84e3350889ad5e1cc6520688e65e,Optimizing Nondecomposable Loss Functions in Structured Prediction,IEEE Transactions on Pattern Analysis and Machine Intelligence,2013
+8dffbb6d75877d7d9b4dcde7665888b5675deee1,Emotion Recognition with Deep-Belief Networks,,2010
+15affdcef4bb9d78b2d3de23c9459ee5b7a43fcb,Semi-Supervised Classification Using Linear Neighborhood Propagation,2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06),2006
+1542b8a1805d73a755d4b2eb402c5c861e6acd02,PMCTrack: Delivering Performance Monitoring Counter Support to the OS Scheduler,Comput. J.,2017
+1582c29d0f752f95a12f5a8ce08d5e5c752f6822,Developmental changes in infants' categorization of anger and disgust facial expressions.,Developmental psychology,2017
+15d1326f054f4fadea463f217ce54bad6908705a,Sensor fusion in smart camera networks for Ambient Intelligence - Report on PhD Thesis and Defense,JAISE,2013
+124476c2815bbfb523c77943c74356f94f79b580,Recognition of Faces in Unconstrained Environments: A Comparative Study,EURASIP J. Adv. Sig. Proc.,2009
+12e5ff3d6771d725f09bb0b2f14d17a64d4c1c25,The fear gasping face as a threat display in a Melanesian society.,Proceedings of the National Academy of Sciences of the United States of America,2016
+120b22e7a47923e42a123b9b68a93ccac5aaea6d,Paper on Ear Biometric Authentication,Unknown,2016
+1297ee7a41aa4e8499c7ddb3b1fed783eba19056,Effects of emotional expressions on persuasion,Unknown,2016
+124538b3db791e30e1b62f81d4101be435ee12ef,"Basic level scene understanding: categories, attributes and structures",,2013
+8ca0a7f2e5a7b1676f9a409c3ed5749c8a569b83,A new approach for pedestrian density estimation using moving sensors and computer vision,Unknown,2018
+8c5852530abaefcdce805d1e339677351c6ec7fe,Lernen situationsunabhängiger Personenerkennung,Informatik-Spektrum,2012
+8c244417db2082f4d5897548e72ef304ae886e52,Tree Based Space Partition of Trajectory Pattern Mining For Frequent Item Sets,,2016
+8c6b9c9c26ead75ce549a57c4fd0a12b46142848,Facial expression recognition using shape and texture information,,2006
+8c5dcd5a0b3c9940e544993327eab6425ce645d5,nsemble perception of emotions in autistic and typical children and dolescents,Unknown,2017
+850d84e4c73a8f0762c8c798b2b7fd6f2787263a,The Discovery of Perceptual Structure from Visual Co - occurrences in Space and Time,,2016
+852bdbcd091f48e07e9b989cb326e631e2932d7f,Visual scanning patterns and executive function in relation to facial emotion recognition in aging.,"Neuropsychology, development, and cognition. Section B, Aging, neuropsychology and cognition",2013
+85476331edc9a9e3393f736f14aa80ad95f3c105,"""Wealth Makes Many Friends"": Children Expect More Giving From Resource-Rich Than Resource-Poor Individuals.",Child development,2017
+1d187e1d0e9eb874f85e3ecdb75ca0a7bd98d8bc,Aggression in young children with concurrent callous–unemotional traits: can the neurosciences,,2008
+1d0a6759de0d55d15439b0367f0aa49c1e248c5c,"Networking in Autism: Leveraging Genetic, Biomarker and Model System Findings in the Search for New Treatments",Neuropsychopharmacology,2012
+1d9497450f60b874eb6ecbf82e3d0808a6fe236c,Nonconvex proximal splitting with computational errors∗,,2016
+1df1aa9179506554744bf16b238d05ebd1e2d4d5,Abnormality in face scanning by children with autism spectrum disorder is limited to the eye region: evidence from multi-method analyses of eye tracking data.,Journal of vision,2013
+1dea4f56c04d12abbc9e1ed7c48c7ccc09e7f5bb,How magic changes our expectations about autism.,Psychological science,2010
+1dff919e51c262c22630955972968f38ba385d8a,Toward an Affect-Sensitive Multimodal Human–Computer Interaction,,2001
+1dc07322715e093c560b30fdf1e168e58e9a9409,DRBF and IRBF Based Face Recognition and Extraction of Facial Expressions from the Blur Image,,2014
+1dacc2f4890431d867a038fd81c111d639cf4d7e,Using social outcomes to inform decision-making in schizophrenia: Relationships with symptoms and functioning.,Journal of abnormal psychology,2016
+1d729693a888a460ee855040f62bdde39ae273af,Photorealistic Face De-Identification by Aggregating Donors' Face Components,Unknown,2014
+71b07c537a9e188b850192131bfe31ef206a39a0,300 Faces In-The-Wild Challenge: database and results,Image Vision Comput.,2016
+71559cae0bc89398e75a2f24674d61cb51909390,Relighting Humans : Occlusion-Aware Inverse Rendering for Full-Body Human Images,Unknown,2018
+71f1e72670e676b6902cce0d6fc0b4f63b46ca28,Survey paper: Face Detection and Face Recognition,,2004
+710ce8cf25f31df8547b888519b414187e989257,Amygdala activation predicts gaze toward fearful eyes.,The Journal of neuroscience : the official journal of the Society for Neuroscience,2009
+71cbe1b52e2fdb8fa8a8278eb590f8065d3e7fcb,’ Actions dans des Vidéos Réalistes Structured Models for Action Recognition in Real-world Videos,Unknown,2012
+767936728b07238bbf38661fc3c2000d0c17b598,An Own-Age Bias in Recognizing Faces with Horizontal Information,,2016
+76b6577f47d6782bf75aca04e361a7b7381b4a84,Measuring and Modifying the Intrinsic Memorability of Images,,2015
+76ce3d35d9370f0e2e27cfd29ea0941f1462895f,Efficient Parallel Implementation of Active Appearance Model Fitting Algorithm on GPU,,2014
+7606a74de57f67257c77a8bb0295ff4593566040,Content-based Image Retrieval Using Constrained Independent Component Analysis : Facial Image Retrieval Based on Compound Queries,Unknown,2012
+760ba44792a383acd9ca8bef45765d11c55b48d4,Class-specific classifier: avoiding the curse of dimensionality,IEEE Aerospace and Electronic Systems Magazine,2004
+1ca9ab2c1b5e8521cba20f78dcf1895b3e1c36ac,"Explorer "" Here ' s looking at you , kid",,2017
+1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee,Deep fusion of visual signatures for client-server facial analysis,,2016
+1c4ceae745fe812d8251fda7aad03210448ae25e,Optimization of Color Conversion for Face Recognition,EURASIP J. Adv. Sig. Proc.,2004
+1cee993dc42626caf5dbc26c0a7790ca6571d01a,Optimal illumination for image and video relighting,,2005
+1c51aeece7a3c30302ebd83bdcaa65df0bfc48fe,Unsupervised Video Indexing based on Audiovisual Characterization of Persons. (Indexation vidéo non-supervisée basée sur la caractérisation des personnes),,2010
+1c7a050394371bcb064868dfe681ff4c29ce2101,Expressive Models and Comprehensive Benchmark for 2D Human Pose Estimation,,2014
+1ca40e1d0ae377296ac6804c81c1e5bcbc5475c8,RVM-Based Human Action Classification in Crowd through Projection and Star Skeletonization,EURASIP J. Image and Video Processing,2009
+1c0e8c3fb143eb5eb5af3026eae7257255fcf814,Weakly Supervised Deep Detection Networks,2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),2016
+8239e4a37825979f66ff0419ccd50a08aebfbadf,Tracing the Colors of Clothing in Paintings with Image Analysis,,2016
+82a922e775ec3a83d2d5637030860f587697ae42,Dense Multiperson Tracking with Robust Hierarchical Linear Assignment,IEEE Transactions on Cybernetics,2015
+82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d,Robust Facial Expression Recognition Using a State-based Model of Spatially-localized Facial,,
+49a038852b9e51af658405231045559d728e0970,DeepCache: Principled Cache for Mobile Deep Vision,Unknown,2018
+40757d94d6ef33555fc940d556ebfb0d32410fbb,Warmth and competence in your face! Visual encoding of stereotype content,,2013
+4091b6a3ab33e2aa923ee23c8db7e33d167ff67a,Transductive Multi-class and Multi-label Zero-shot Learning,CoRR,2014
+40389b941a6901c190fb74e95dc170166fd7639d,Automatic Facial Expression Recognition,,2014
+40377a1bc15a9ec28ea54cc53d5cf0699365634f,Некооперативная Биометрическая Идентификация По 3d- Моделям Лица С Использованием Видеокамер Высокого Разрешения,,
+40f2b3af6b55efae7992996bd0c474a9c1574008,xytocin Increases Retention of Social Cognition n Autism,,2006
+403e7fed4fa1785af8309b1c4c736d98fa75be5b,Social status gates social attention in monkeys,Current Biology,2006
+40f6c9355dbf01a240b4c26b0fd00b5cfbd5f67d,An eye-tracking method to reveal the link between gazing patterns and pragmatic abilities in high functioning autism spectrum disorders,,2014
+2eb37a3f362cffdcf5882a94a20a1212dfed25d9,Local Feature Based Face Recognition,,2012
+2ea8029283e6bbb03c023070d042cb19647f06af,Neurobiological mechanisms associated with facial affect recognition deficits after traumatic brain injury,Brain Imaging and Behavior,2015
+2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e,3DPeS: 3D people dataset for surveillance and forensics,,2011
+2e708431df3e7a9585a338e1571f078ddbe93a71,Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification.,IEEE transactions on cybernetics,2017
+2ba64deeb3e170e4776e2d2704771019cf9c8639,Differences between Old and Young Adults’ Ability to Recognize Human Faces Underlie Processing of Horizontal Information,,2012
+2b4d092d70efc13790d0c737c916b89952d4d8c7,Robust Facial Expression Recognition using Local Haar Mean Binary Pattern,,2017
+2b0e1a62d7168df5f29e2e9c7fc72ae43c39fdb2,Emotion expression modulates perception of animacy from faces,,2017
+2b773fe8f0246536c9c40671dfa307e98bf365ad,Fast Discriminative Stochastic Neighbor Embedding Analysis,,2013
+2b73e3d541b0208ae54b3920fef4bfd9fd0c84a7,Feature-based face representations and image reconstruction from behavioral and neural data.,Proceedings of the National Academy of Sciences of the United States of America,2016
+2b8a61184b6423e3d5285803eb1908ff955db1a8,Processing and analysis of 2 . 5 D face models for non-rigid mapping based face recognition using differential geometry tools,Unknown,2012
+2b8667df1a0332386d8d799fbac0327496ce02c9,Stranger danger: Parenthood increases the envisioned bodily formidability of menacing men,,2014
+470b89e2c5248eb58e09129aa9b4d8bc77497e7e,Neurobiology of Disease Cortical Folding Abnormalities in Autism Revealed by Surface-Based Morphometry,,2007
+4780cece6d4adeb0b070fbefbd587b89f4acf3f7,Shared and idiosyncratic cortical activation patterns in autism revealed under continuous real-life viewing conditions.,Autism research : official journal of the International Society for Autism Research,2009
+47440f514318b438ebf04d9932f5dafdb488a536,Emotion Recognition from Facial Images Using Binary Face Relevance Maps,,2016
+784731961819abc5a5a199be1573abd828bd9af1,Recognizing Emily and Latisha: Inconsistent Effects of Name Stereotypicality on the Other-Race Effect,,2018
+789c76749a15614d97ac8f4ec18b3ce7d80a2d28,Explorer Multiplicative LSTM for sequence modelling,Unknown,2017
+783f3fccde99931bb900dce91357a6268afecc52,Adapted Active Appearance Models,EURASIP J. Image and Video Processing,2009
+8bdbb685174d6023e63c55fdf9ad9b2ac78e79bd,Learning Human Poses from Actions-Supplementary Material,Unknown,2018
+8b0a4d41ee469547163ea154ad2b522d6d335671,The unique contributions of perceiver and target characteristics in person perception.,Journal of personality and social psychology,2017
+8b8b3375bc51ae357528a1f015c4d094418c9f71,"An Efficient Feature Extraction Method, Global Between Maximum and Local Within Minimum, and Its Applications",,2014
+8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0,Multimodal Interaction on a Social Robotic Platform,,2013
+8b5122ea59d8d7f70e344ffb2553537b5ad07dd5,Image Translation by Domain-Adversarial Training,,2018
+13f03aab62fc29748114a0219426613cf3ba76ae,MORPH-II: Feature Vector Documentation,Unknown,2018
+13ec6666b8b722ad9eb68a21a302e3f2f1ab4df7,Biometric Human Identification of Hand Geometry Features Using Discrete Wavelet Transform,,2011
+132527383890565d18f1b7ad50d76dfad2f14972,Facial Expression Classification Using PCA and Hierarchical Radial Basis Function Network,J. Inf. Sci. Eng.,2006
+1394ca71fc52db972366602a6643dc3e65ee8726,EmoReact: a multimodal approach and dataset for recognizing emotional responses in children,,2016
+138778d75fc4e2fd490897ac064b9ac84b6b9f04,Generation and visualization of emotional states in virtual characters,Journal of Visualization and Computer Animation,2008
+13e348264fe1077caa44e1b59c71e67a8e4b5ad9,Effect of Eyes Detection and Position Estimation Methods on the Accuracy of Comparative Testing of Face Detection Algorithms,,2011
+139ee1b1d98e7ac9d659a5d1bbe8c75588539b29,Identification of EFHC2 as a quantitative trait locus for fear recognition in Turner syndrome.,Human molecular genetics,2007
+7f21a7441c6ded38008c1fd0b91bdd54425d3f80,Real Time System for Facial Analysis,CoRR,2018
+7f44f2d7b4a84b6d87dd6f7089ce3ee1e6359272,What's in the Chinese Babyface? Cultural Differences in Understanding the Babyface,,2016
+7f0fadae16cc74b6176ba940aa2f8b5a0a67e09e,An Expert Local Mesh Correlation Histograms for Biomedical Image Indexing and Retrieval,,2015
+7f3c6bf191a8633d10fad32e23fa06a3c925ffee,The benefits of simply observing: mindful attention modulates the link between motivation and behavior.,Journal of personality and social psychology,2015
+7fc76446d2b11fc0479df6e285723ceb4244d4ef,Laplacian MinMax Discriminant Projection and its Applications,Journal of Research and Practice in Information Technology,2010
+7a2cee9a210e7b418fa6169f8cf027f7993a3ee5,LETTER TO THE EDITOR Spontaneous versus deliberate vicarious representations: different routes to empathy in psychopathy and autism,,2014
+7a72ac1c77110d03dc0482f2556e9bdb36582fcb,Following Gaze: Gaze-Following Behavior as a Window into Social Cognition,,2010
+7a00365f9c7bced9ce47246794932f60564cb662,Converging evidence of configural processing of faces in high-functioning adults with autism spectrum disorders,,2008
+143e3ec5a5a11547da2d77a17d0ca7b1940280b5,"People detection, tracking and re-identification through a video camera network. (Détection, suivi et ré-identification de personnes à travers un réseau de caméra vidéo)",Unknown,2013
+147e699946e8c54d2176b4d868db03dd1c7bdb8f,Emotion and False Memory,,2016
+146bbf00298ee1caecde3d74e59a2b8773d2c0fc,University of Groningen 4 D Unconstrained Real - time Face Recognition Using a Commodity Depthh Camera,,2017
+14e759cb019aaf812d6ac049fde54f40c4ed1468,Subspace Methods,,2014
+14418ae9a6a8de2b428acb2c00064da129632f3e,Discovering the Spatial Extent of Relative Attributes,2015 IEEE International Conference on Computer Vision (ICCV),2015
+1412f4024babbc01b671f7ee4a22d86db1545268,Proximity and gaze influences facial temperature: a thermal infrared imaging study,,2014
+8e33183a0ed7141aa4fa9d87ef3be334727c76c0,Robustness of Face Recognition to Image Manipulations,,2018
+8edb2219370a86c4277549813d36a6c139503fb4,Facial feature units’ localization using horizontal information of most significant bit planes,,2011
+8eb2e7c9017b4a110978a1bb504accbc7b9ba211,Marching into battle: synchronized walking diminishes the conceptualized formidability of an antagonist in men.,Biology letters,2014
+8e8c511ebc12a093d3f73a4717ec71c32e4dbd49,The use of visual information in the recognition of posed and spontaneous facial expressions.,Journal of vision,2018
+8e4ff1aa78f8997b683f873c46999f384db4de18,Renewing the respect for similarity,,2012
+8e88a97e09a853cf768ca1c732ba5f008fff77ca,Regularized Residual Quantization: a multi-layer sparse dictionary learning approach,CoRR,2017
+2258e01865367018ed6f4262c880df85b94959f8,Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,EURASIP J. Image and Video Processing,2008
+2270c94d3f9d9451b3d337aa5ba2d5681cb98497,Evaluation of GIST descriptors for web-scale image search,,2009
+22f21d58c6aecdb4f57c50fa9eb4952643eec0e9,Domain Transfer Support Vector Ranking for Person Re-identification without Target Camera Label Information,2013 IEEE International Conference on Computer Vision,2013
+22532c6e38ded690dc1420f05c18e23f6f24804d,Chapter 5 Genetic & Evolutionary Biometrics,Unknown,2017
+2251a1efad0cef802fd64fc79cc1b7007b64f425,Estimating 3D Pose via Stochastic Search and Expectation Maximization,,2010
+2534997443c7e183c9f8e370ea1e82989ecc940d,Skeleton Search: Category-Specific Object Recognition and Segmentation Using a Skeletal Shape Model,International Journal of Computer Vision,2009
+25474c21613607f6bb7687a281d5f9d4ffa1f9f3,Recognizing disguised faces,,2012
+252e48be0fd63d3a786021efa8733f8891101a82,Unsupervised Feature Learning With Winner-Takes-All Based STDP,,2018
+253d2fd2891a97d4caa49d87094dac1ec18c7752,Bio-authentication for Layered Remote Health Monitor Framework,Unknown,2014
+25e2d3122d4926edaab56a576925ae7a88d68a77,Communicative-Pragmatic Treatment in Schizophrenia: A Pilot Study,,2016
+257e008c01a32b9b642553f3f1e59e61efcac4a6,Gender discrimination of eyes and mouths by individuals with autism.,Autism research : official journal of the International Society for Autism Research,2010
diff --git a/scraper/reports/stats/unknown_papers.csv b/scraper/reports/stats/unknown_papers.csv
new file mode 100644
index 00000000..2e064804
--- /dev/null
+++ b/scraper/reports/stats/unknown_papers.csv
@@ -0,0 +1,61110 @@
+6163381244823241373f6741a282f2c4a868b59c,Multimodal biometrics for identity documents (MBioID).,"Multimodal Biometrics for Identity
+Documents 1
+State-of-the-Art
+Research Report
+PFS 341-08.05
+(Version 2.0)
+Damien Dessimoz
+Prof. Christophe Champod
+Jonas Richiardi
+Dr. Andrzej Drygajlo
+{damien.dessimoz,
+{jonas.richiardi,
+June 2006
+This project was sponsored by the Foundation Banque Cantonale Vaudoise."
+610e0bee525a6573932e077f091505f54a5c4ede,"The Wisdom of MaSSeS: Majority, Subjectivity, and Semantic Similarity in the Evaluation of VQA","Majority, Subjectivity, and Semantic Similarity in the Evaluation of VQA
+The Wisdom of MaSSeS:
+Shailza Jolly∗
+SAP SE, Berlin
+TU Kaiserslautern
+Sandro Pezzelle∗
+SAP SE, Berlin
+CIMeC - University of Trento
+Tassilo Klein
+SAP SE, Berlin
+Andreas Dengel
+DFKI, Kaiserslautern
+CS Department, TU Kaiserslautern
+Moin Nabi
+SAP SE, Berlin"
+61c4969c78cff37357ac794af5ac8e439751b39f,Midrange Geometric Interactions for Semantic Segmentation,"Int J Comput Vis
+DOI 10.1007/s11263-015-0828-7
+Midrange Geometric Interactions for Semantic Segmentation
+Constraints for Continuous Multi-label Optimization
+Julia Diebold1 · Claudia Nieuwenhuis2 · Daniel Cremers1
+Received: 1 June 2014 / Accepted: 15 May 2015
+© Springer Science+Business Media New York 2015"
+610a4451423ad7f82916c736cd8adb86a5a64c59,A Survey on Search Based Face Annotation Using Weakly Labelled Facial Images,"Volume 4, Issue 11, November 2014 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+A Survey on Search Based Face Annotation Using Weakly
+Labelled Facial Images
+Shital A. Shinde*, Prof. Archana Chaugule
+Department of Computer Engg, DYPIET Pimpri,
+Savitri Bai Phule Pune University, Maharashtra India"
+61366c2eed49519e3adef44e8b7146db1fcc2113,Convex NMF on Non-Convex Massiv Data,"Convex NMF on Non-Convex Massiv Data
+Kristian Kersting1 and Mirwaes Wahabzada1 and Christian Thurau2 and Christian Bauckhage2
+Knowledge Discovery Department, 2Vision and Social Media Group
+Fraunhofer IAIS, Schloss Birlinghoven, 53754 Sankt Augustin, Germany"
+617c4e23fc7ca51d98dacb28779214b3e79e9720,Open-Ended Visual Question-Answering,"Open-Ended Visual
+Question-Answering
+Escola T`ecnica Superior d’Enginyeria de Telecomunicaci´o de Barcelona
+Submitted to the Faculty of the
+A Degree Thesis
+In partial fulfilment
+of the requirements for the degree in
+SCIENCE AND TELECOMMUNICATION TECHNOLOGIES
+ENGINEERING
+Author:
+Advisors: Xavier Gir´o i Nieto, Santiago Pascual de la Puente
+Issey Masuda Mora
+Universitat Polit`ecnica de Catalunya (UPC)
+June 2016"
+61e97d8440627bdc9772b3b2083c65f44a51107d,Oxytocin and vasopressin in the human brain: social neuropeptides for translational medicine,"R E V I E W S
+Oxytocin and vasopressin in the
+human brain: social neuropeptides
+for translational medicine
+Andreas Meyer‑Lindenberg*, Gregor Domes‡, Peter Kirsch* and Markus Heinrichs‡"
+618c13f1e13cc5346ed5c069a77acaa720b6a1a8,Learning More Universal Representations for Transfer-Learning,"SUBMISSION TO PAMI, SEPTEMBER 2018
+Learning More Universal Representations
+for Transfer-Learning
+Youssef Tamaazousti, Hervé Le Borgne, Céline Hudelot, Mohamed-El-Amine Seddik
+nd Mohamed Tamaazousti"
+619eaaa60f0194d456591983a6f26b04cd9e9a52,"Munafo, M. (2017). Impaired Recognition of Basic Emotions from Facial Expressions in Young People with Autism Spectrum Disorder: Assessing the Importance of Expression","Griffiths, S. L., Jarrold, C., Penton-Voak, I., Woods, A., Skinner, A., &
+Munafo, M. (2017). Impaired Recognition of Basic Emotions from Facial
+Expressions in Young People with Autism Spectrum Disorder: Assessing the
+Importance of Expression Intensity. Journal of Autism and Developmental
+Disorders. DOI: 10.1007/s10803-017-3091-7
+Publisher's PDF, also known as Version of record
+Link to published version (if available):
+0.1007/s10803-017-3091-7
+Link to publication record in Explore Bristol Research
+PDF-document
+This is the final published version of the article (version of record). It first appeared online via Springer at
+http://link.springer.com/article/10.1007%2Fs10803-017-3091-7. Please refer to any applicable terms of use of
+the publisher.
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms"
+61f4e08b938986ea80f711c73cadbc84e1811181,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+61764c068ad7d2ec988e6ec315d6ed2ed7489c2e,PhD Forum: Dynamic Camera Positioning and Reconfiguration for Multi Camera Networks,"Dynamic Camera Positioning and
+Reconfiguration for Multi Camera
+Networks
+Krishna Reddy Konda
+Advisor: Dr Nicola Conci
+February 2015"
+610c341985633b2d31368f8642519953c39ff7e8,Computational Load Balancing on the Edge in Absence of Cloud and Fog,"Computational Load Balancing on the Edge in Absence of Cloud
+nd Fog
+Citation for published version:
+Sthapit, S, Thompson, J, Robertson, NM & Hopgood, J 2018, 'Computational Load Balancing on the Edge
+in Absence of Cloud and Fog' IEEE Transactions on Mobile Computing. DOI: 10.1109/TMC.2018.2863301
+Digital Object Identifier (DOI):
+0.1109/TMC.2018.2863301
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+IEEE Transactions on Mobile Computing
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please"
+6180bc0816b1776ca4b32ced8ea45c3c9ce56b47,Fast Randomized Algorithms for Convex Optimization and Statistical Estimation,"Fast Randomized Algorithms for Convex Optimization and
+Statistical Estimation
+Mert Pilanci
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-147
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-147.html
+August 14, 2016"
+61f04606528ecf4a42b49e8ac2add2e9f92c0def,Deep Deformation Network for Object Landmark Localization,"Deep Deformation Network for Object Landmark
+Localization
+Xiang Yu, Feng Zhou and Manmohan Chandraker
+NEC Laboratories America, Department of Media Analytics"
+61c4b35443b152679c923d5db6c26daaec304172,Fast and stable human detection using multiple classifiers based on subtraction stereo with HOG features,"Fast and Stable Human Detection Using Multiple Classifiers
+Based on Subtraction Stereo with HOG Features
+Makoto Arie, Alessandro Moro, Yuma Hoshikawa, Toru Ubukata, Kenji Terabayashi, Kazunori Umeda"
+6106028c73d22570a01212814e1e4f4edb4abed6,Counting moving people in crowds using motion statistics of feature-points,"Multimed Tools Appl
+DOI 10.1007/s11042-013-1367-2
+Counting moving people in crowds using motion
+statistics of feature-points
+Mahdi Hashemzadeh· Gang Pan· Min Yao
+© Springer Science+Business Media New York 2013"
+617a6935643615f09ef2b479609baa0d5f87cd67,To Be Taken At Face Value? Computerised Identification,"Information and Communications Technology Law
+To Be Taken At Face Value?
+Computerised Identification
+Michael Bromby
+Joseph Bell Centre for Forensic Statistics and Legal Reasoning
+Glasgow Caledonian University and University of Edinburgh
+Scientific evidence such as fingerprints, blood, hair and DNA samples are often
+presented during legal proceedings. Without such evidence, a description provided by
+the victim or any eyewitnesses is often the only means to identify a suspect. With the
+dvent of closed circuit television (CCTV), many crimes are now recorded by
+ameras in the public or private domain, leading to a new form of forensic
+identification – facial biometrics. Decisions on how to view and interpret biometric
+evidence are important for both prosecution and defence, not least for the judge and
+jury who must decide the case. A jury may accept eyewitnesses as reliable sources of
+evidence more readily
+False
+eyewitness accounts appear reliable when confidently presented to a mock jury. The
+decision-making process of the judge and jury may be seriously flawed if an
+eyewitness has made a genuine mistake. Using computerised recognition, the judicial
+decision of whether to accept an alibi or whether to accept the eyewitness account"
+614a7c42aae8946c7ad4c36b53290860f6256441,Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks,"Joint Face Detection and Alignment using
+Multi-task Cascaded Convolutional Networks
+Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, Senior Member, IEEE, and Yu Qiao, Senior Member, IEEE"
+617b719e6c31cdfe7c5c485a755435b95f0c4991,Visual Classification of Images by Learning Geometric Appearances Through Boosting,"Visual Classification of Images by Learning
+Geometric Appearances through Boosting
+Martin Antenreiter, Christian Savu-Krohn, and Peter Auer
+Chair of Information Technology (CiT)
+University of Leoben, Austria"
+6155d504d59c52dc3a6b8ad6aeae8bf249afd5ac,Analysis of Feature Fusion Based on HIK SVM and Its Application for Pedestrian Detection,Hindawi Publishing Corporation
+61c07d7387dcbfb8fa697f15316e3b265d78a2fa,Multi-modal Approach for Affective Computing,"Multi-modal Approach for Affective Computing
+Siddharth1,2, Tzyy-Ping Jung2 and Terrence J. Sejnowski2"
+619f9c1552f8f4f7c5927a7369c79e34d6294083,A Volumetric / Iconic Frequency DomainRepresentation,"AVolumetric/IconicFrequencyDomain
+RepresentationforObjects
+withapplicationfor
+PoseInvariantFaceRecognition
+AppearedinIEEETrans.onPatternAnalysisandMachineIntelligence
+Vol.
+JezekielBen-ArieandDibyenduNandy
+DepartmentofElectricalEngineeringandComputerScience
+TheUniversityofIllinoisatChicago
+ContactAddress:
+Dr.JezekielBen-Arie
+TheUniversityofIllinoisatChicago
+DepartmentofElectricalEngineeringandComputerScience(M/C)
+SouthMorganStreetChicago,IL
+Phone:() -
+Fax:() -
+ThisworkwassupportedbytheNationalScienceFoundationunderGrantNo.IRI-  
+ndGrantNo.IRI-  ."
+61b0cfd75f5bce59cf79abb7b602e404fa5584e7,Person Re-Identification by Semantic Region Representation and Topology Constraint,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+Person Re-Identification by Semantic Region
+Representation and Topology Constraint
+Jianjun Lei, Senior Member, IEEE, Lijie Niu, Huazhu Fu, Senior Member, IEEE, Bo Peng,
+Qingming Huang, Fellow, IEEE, and Chunping Hou"
+614f4f8fe47e7c0bcf64aa0ad39dc371e4b4ab7b,promoting access to White Rose research papers,"promoting access to White Rose research papers
+Universities of Leeds, Sheffield and York
+http://eprints.whiterose.ac.uk/
+This is an author produced version of a paper published in Journal of Autism
+nd Developmental Disorders.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/10325
+Published paper
+Freeth, M., Chapman, P., Ropar, D., Mitchell, P. (2010) Do gaze cues in complex
+scenes capture and direct the attention of high functioning adolescents with ASD?
+evidence from eye-tracking, Journal of Autism and Developmental Disorders (In
+Press)
+http://dx.doi.org/10.1007/s10803-009-0893-2
+White Rose Research Online"
+617253f275f14490c61dc9d8cb23ceb9c9d4ba35,A coarse-to-fine curvature analysis-based rotation invariant 3D face landmarking,"A coarse-to-fine curvature analysis-based rotation invariant 3D face
+landmarking
+Przemyslaw Szeptycki, Mohsen Ardabilian and Liming Chen"
+61f0cb2e3fdc6a5d0719184e51d2dc483a945ac1,Bilinear Attention Networks,"Bilinear Attention Networks
+Jin-Hwa Kim1∗, Jaehyun Jun2, Byoung-Tak Zhang2,3
+SK T-Brain, 2Seoul National University, 3Surromind Robotics"
+61b17f719bab899dd50bcc3be9d55673255fe102,Detecting Sarcasm in Multimodal Social Platforms,"Detecting Sarcasm in Multimodal Social Platforms
+Rossano Schifanella
+University of Turin
+Corso Svizzera 185
+0149, Turin, Italy
+Paloma de Juan
+Yahoo
+29 West 43rd Street
+New York, NY 10036
+Joel Tetreault
+Yahoo
+29 West 43rd Street
+New York, NY 10036
+Liangliang Cao
+Yahoo
+29 West 43rd Street
+New York, NY 10036
+inc.com"
+61bab86023de164bca3e35fc22944a7262970e1d,Child Facial Expression Detection,"CHILD FACIAL EXPRESSION
+DETECTION
+Eden Benhamou
+Deborah Wolhandler
+Supervisors:
+Alon Zvirin
+Michal Zivan
+Spring 2018"
+61dfebbb02dad16b56cd9e6c54b5da3ab41caf1c,Exploiting Local Class Information in Extreme Learning Machine,"Iosifidis, A., Tefas, A., & Pitas, I. (2014). Exploiting Local Class Information
+in Extreme Learning Machine. Paper presented at International Joint
+Conference on Computational Intelligence (IJCCI), Rome, Italy.
+Peer reviewed version
+Link to publication record in Explore Bristol Research
+PDF-document
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms"
+611f9faa6f3aeff3ccd674d779d52c4f9245376c,Multiresolution Models for Object Detection,"Multiresolution models for object detection
+Dennis Park, Deva Ramanan, and Charless Fowlkes
+UC Irvine, Irvine CA 92697, USA,"
+0d1a87dad1e4538cc7bd3c923767c8bf1a9b779f,The Riemannian Geometry of Deep Generative Models,"The Riemannian Geometry of Deep Generative Models
+Hang Shao
+University of Utah
+Salt Lake City, UT
+Abhishek Kumar
+IBM Research AI
+Yorktown Heights, NY
+P. Thomas Fletcher
+University of Utah
+Salt Lake City, UT"
+0db787317ba0d63ec8f9918905e7db181a489026,Automatic Eye Localization in Color Images,"Automatic Eye Localization in Color Images
+José Gilvan Rodrigues Maia1, Fernando de Carvalho Gomes1, Osvaldo de Souza2
+Departamento de Computação – Universidade Federal do Ceará (UFC)
+Depto de Engenharia de Teleinformática – Universidade Federal do Ceará (UFC)
+60455-760 – Fortaleza – CE – Brasil
+{gilvan,"
+0d88ab0250748410a1bc990b67ab2efb370ade5d,Error handling in multimodal biometric systems using reliability measures,"Author(s) :
+ERROR HANDLING IN MULTIMODAL BIOMETRIC SYSTEMS USING
+RELIABILITY MEASURES (ThuPmOR6)
+(EPFL, Switzerland)
+(EPFL, Switzerland)
+(EPFL, Switzerland)
+(EPFL, Switzerland)
+Krzysztof Kryszczuk
+Jonas Richiardi
+Plamen Prodanov
+Andrzej Drygajlo"
+0d82013cbe9f65ddb34e5d99eab730fce4f0effe,A system based on sequence learning for event detection in surveillance video,"978-1-4799-2341-0/13/$31.00 ©2013 IEEE
+ICIP 2013"
+0d538084f664b4b7c0e11899d08da31aead87c32,Deformable Part Descriptors for Fine-Grained Recognition and Attribute Prediction,"Deformable Part Descriptors for
+Fine-grained Recognition and Attribute Prediction
+Ning Zhang1
+Ryan Farrell1,2
+Forrest Iandola1
+ICSI / UC Berkeley 2Brigham Young University
+Trevor Darrell1"
+0dcdef6b8d97483f4d4dab461e1cb5b3c4d1fe1a,Probabilistic Semantic Inpainting with Pixel Constrained CNNs,"Probabilistic Semantic Inpainting with Pixel Constrained CNNs
+Emilien Dupont
+Suhas Suresha
+Schlumberger Software Technology Innovation Center"
+0dccc881cb9b474186a01fd60eb3a3e061fa6546,Effective face frontalization in unconstrained images,"Effective Face Frontalization in Unconstrained Images
+Tal Hassner1, Shai Harel1 †, Eran Paz1 † and Roee Enbar2
+The open University of Israel. 2Adience.
+Figure 1: Frontalized faces. Top: Input photos; bottom: our frontalizations,
+obtained without estimating 3D facial shapes.
+“Frontalization” is the process of synthesizing frontal facing views of faces
+ppearing in single unconstrained photos. Recent reports have suggested
+that this process may substantially boost the performance of face recogni-
+tion systems. This, by transforming the challenging problem of recognizing
+faces viewed from unconstrained viewpoints to the easier problem of rec-
+ognizing faces in constrained, forward facing poses. Previous frontalization
+methods did this by attempting to approximate 3D facial shapes for each
+query image. We observe that 3D face shape estimation from unconstrained
+photos may be a harder problem than frontalization and can potentially in-
+troduce facial misalignments. Instead, we explore the simpler approach of
+using a single, unmodified, 3D surface as an approximation to the shape of
+ll input faces. We show that this leads to a straightforward, efficient and
+easy to implement method for frontalization. More importantly, it produces
+esthetic new frontal views and is surprisingly effective when used for face
+recognition and gender estimation."
+0d96c9d14f079b7b8b6b56b4fa86f611a4ff237f,Semi-supervised low-rank mapping learning for multi-label classification,"Semi-supervised Low-Rank Mapping Learning for Multi-label Classification
+Liping Jing1, Liu Yang1, Jian Yu1, Michael K. Ng2
+Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University. 2Department of Mathematics, Hong Kong Baptist University.
+With the rapid growth of online content such as images, videos, web pages,
+it is crucial to design a scalable and effective classification system to au-
+tomatically organize, store, and search the content. In conventional clas-
+sification, each instance is assumed to belong to exactly one class among
+finite number of candidate classes. However, in modern applications, an
+instance can have multiple labels. For example, an image can be annotated
+y many conceptual tags in semantic scene classification. Multi-label data
+have ubiquitously occurred in many application domains: multimedia infor-
+mation retrieval, tag recommendation, query categorization, gene function
+prediction, medical diagnosis, drug discovery and marketing. An important
+nd challenging research problem [1, 4] in multi-label learning is how to
+exploit and make use of label correlations.
+In this paper, we develop a novel method for multi-label learning when
+there is only a small number of labeled data. Our main idea is to design
+Semi-supervised Low-Rank Mapping (SLRM) from a feature space to a
+label space based on given multi-label data. More specifically, the SLRM
+model can be formularized as"
+0d6b28691e1aa2a17ffaa98b9b38ac3140fb3306,Review of Perceptual Resemblance of Local Plastic Surgery Facial Images using Near Sets,"Review of Perceptual Resemblance of Local
+Plastic Surgery Facial Images using Near Sets
+Prachi V. Wagde1, Roshni Khedgaonkar2
+,2 Department of Computer Technology,
+YCCE Nagpur, India"
+0dc2fdf1b97c76de1e7380e8126f8acc7d87e23a,Robust PCA Via Nonconvex Rank Approximation,"Robust PCA via Nonconvex Rank Approximation
+Department of Computer Science, Southern Illinois University, Carbondale, IL 62901, USA
+Zhao Kang, Chong Peng, Qiang Cheng
+{zhao.kang, pchong,"
+0d2a9f3357717e0a44eb82d5eabfc047cc4d46f1,Classifier Ensembles with Trajectory Under-Sampling for Face Re-Identification,"Classifier Ensembles with Trajectory Under-Sampling
+for Face Re-Identification
+Roghayeh Soleymani1, Eric Granger1 and Giorgio Fumera2
+Laboratoire d’imagerie, de vision et d’intelligence artificielle, École de technologie supérieure,
+Pattern Recognition and Applications Group, Dept. of Electrical and Electronic Engineering, University of
+Université du Québec, Montreal, Canada
+Cagliari,Cagliari, Italy
+Keywords:
+Person Re-Identification, Class Imbalance, Ensemble Methods."
+0dab1ab19a44b73ce0fdd15014b635eb7362af3c,Reinforcement Cutting-Agent Learning for Video Object Segmentation,"Reinforcement Cutting-Agent Learning for Video Object Segmentation
+Junwei Han1, Le Yang1, Dingwen Zhang1
+, Xiaojun Chang3, Xiaodan Liang3
+Northwestern Polytechincal University, 2Xidian University, 3Carnegie Mellon University"
+0d7ddcf97b1341d8d4bbc4718f4ca3094e994a1f,Homographic Active Shape Models for View-Independent Facial Analysis,"Homographic Active Shape Models for View-Independent
+Facial Analysis
+Federico M. Sukno12 and Jos´e J. Guerrero32 and Alejandro F. Frangi1
+Department of Technology, Pompeu Fabra University, Barcelona, Spain;
+Aragon Institute of Engineering Research, University of Zaragoza, Spain;
+Computer Science and System Engineering Department, University of Zaragoza, Spain"
+0dd74bbda5dd3d9305636d4b6f0dad85d6e19572,Heterogeneous Face Attribute Estimation: A Deep Multi-Task Learning Approach,"Heterogeneous Face Attribute Estimation:
+A Deep Multi-Task Learning Approach
+Hu Han, Member, IEEE, Anil K. Jain, Fellow, IEEE, Fang Wang,
+Shiguang Shan, Senior Member, IEEE and Xilin Chen, Fellow, IEEE"
+0d07db3510c7f9c2ceab65444cb8fc8ec49197b2,Learning-based Composite Metrics for Improved Caption Evaluation,"Learning-based Composite Metrics for Improved Caption Evaluation
+Naeha Sharif, Lyndon White, Mohammed Bennamoun and Syed Afaq Ali Shah,
+{naeha.sharif,
+nd {mohammed.bennamoun,
+The University of Western Australia.
+5 Stirling Highway, Crawley, Western Australia"
+0d130b5536bb1b909ff9a62737d768d4b4fab2f6,Semantic Segmentation with Scarce Data,"Semantic Segmentation with Scarce Data
+Isay Katsman * 1 Rohun Tripathi * 1 Andreas Veit 1 Serge Belongie 1"
+0d3882b22da23497e5de8b7750b71f3a4b0aac6b,Context is routinely encoded during emotion perception.,"Research Article
+Context Is Routinely Encoded
+During Emotion Perception
+1(4) 595 –599
+© The Author(s) 2010
+Reprints and permission:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797610363547
+http://pss.sagepub.com
+Lisa Feldman Barrett1,2,3 and Elizabeth A. Kensinger1,3
+Boston College; 2Psychiatric Neuroimaging Program, Massachusetts General Hospital, Harvard Medical School; and 3Athinoula A. Martinos
+Center for Biomedical Imaging, Massachusetts General Hospital, Harvard Medical School"
+0d185e6de595bd3844909d3606e9218a498a9bd8,Trace optimization and eigenproblems in dimension reduction methods,"TRACE OPTIMIZATION AND EIGENPROBLEMS IN DIMENSION
+REDUCTION METHODS
+E. KOKIOPOULOU∗, J. CHEN†, AND Y. SAAD†"
+0d90d046db16d3d5ce70590e6dab32cdd58928f6,A robust feature extraction algorithm based on class-Modular Image Principal Component Analysis for face verification,"978-1-4577-0539-7/11/$26.00 ©2011 IEEE
+ICASSP 2011"
+0d52f1ae438a395fadebf04990d0d1750cdd0218,Face Recognition in Various Illuminations,"Saurabh D. Parmar et al Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 4, Issue 5( Version 5), May 2014, pp.98-102
+RESEARCH ARTICLE
+Face Recognition in Various Illuminations
+Saurabh D. Parmar, Vaishali J. Kalariya
+Research Scholar, CE/IT Department-School of Engineering, R.K. University, Rajkot
+Professor, CE/IT Department-School of Engineering, R.K. University, Rajkot
+OPEN ACCESS"
+0d760e7d762fa449737ad51431f3ff938d6803fe,LCDet: Low-Complexity Fully-Convolutional Neural Networks for Object Detection in Embedded Systems,"LCDet: Low-Complexity Fully-Convolutional Neural Networks for
+Object Detection in Embedded Systems
+Subarna Tripathi
+UC San Diego ∗
+Gokce Dane
+Qualcomm Inc.
+Byeongkeun Kang
+UC San Diego
+Vasudev Bhaskaran
+Qualcomm Inc.
+Truong Nguyen
+UC San Diego"
+0d30a662061a495e4b5aeb92a2edfac868b225ea,Quantification of Emotions for Facial Expression: Generation of Emotional Feature Space Using Self-Mapping,"Chapter 7
+Quantification of Emotions for Facial Expression:
+Generation of Emotional Feature Space Using Self-
+Mapping
+Masaki Ishii, Toshio Shimodate, Yoichi Kageyama,
+Tsuyoshi Takahashi and Makoto Nishida
+Additional information is available at the end of the chapter
+http://dx.doi.org/10.5772/51136
+. Introduction
+Facial expression recognition for the purpose of emotional communication between humans
+nd machines has been investigated in recent studies [1-7].
+The shape (static diversity) and motion (dynamic diversity) of facial components, such as
+the eyebrows, eyes, nose, and mouth, manifest expression. From the viewpoint of static di‐
+versity, owing to the individual variation in facial configurations, it is presumed that a facial
+expression pattern due to the manifestation of a facial expression includes subject-specific
+features. In addition, from the viewpoint of dynamic diversity, because the dynamic
+hanges in facial expressions originate from subject-specific facial expression patterns, it is
+presumed that the displacement vector of facial components has subject-specific features.
+On the other hand, although an emotionally generated facial expression pattern of an indi‐
+vidual is unique, internal emotions expressed and recognized by humans via facial expres‐"
+0d48c282737793b234c56382053cc69cdddeccb0,A Poodle or a Dog? Evaluating Automatic Image Annotation Using Human Descriptions at Different Levels of Granularity,"Proceedings of the 25th International Conference on Computational Linguistics, pages 38–45,
+Dublin, Ireland, August 23-29 2014."
+0dd151d003ac9b7f3d6936ccdd5ff38fce76c29f,A Review and Comparison of Measures for Automatic Video Surveillance Systems,"Hindawi Publishing Corporation
+EURASIP Journal on Image and Video Processing
+Volume 2008, Article ID 824726, 30 pages
+doi:10.1155/2008/824726
+Research Article
+A Review and Comparison of Measures for
+Automatic Video Surveillance Systems
+Axel Baumann, Marco Boltz, Julia Ebling, Matthias Koenig, Hartmut S. Loos, Marcel Merkel,
+Wolfgang Niem, Jan Karl Warzelhan, and Jie Yu
+Corporate Research, Robert Bosch GmbH, D-70049 Stuttgart, Germany
+Correspondence should be addressed to Julia Ebling,
+Received 30 October 2007; Revised 28 February 2008; Accepted 12 June 2008
+Recommended by Andrea Cavallaro
+Today’s video surveillance systems are increasingly equipped with video content analysis for a great variety of applications.
+However, reliability and robustness of video content analysis algorithms remain an issue. They have to be measured against
+ground truth data in order to quantify the performance and advancements of new algorithms. Therefore, a variety of measures
+have been proposed in the literature, but there has neither been a systematic overview nor an evaluation of measures for
+specific video analysis tasks yet. This paper provides a systematic review of measures and compares their effectiveness for specific
+spects, such as segmentation, tracking, and event detection. Focus is drawn on details like normalization issues, robustness, and
+representativeness. A software framework is introduced for continuously evaluating and documenting the performance of video"
+0d0cee830772c3b2b274bfb5c3ad0ee42d8a0a57,Multimodal Convolutional Neural Networks for Matching Image and Sentence,"Multimodal Convolutional Neural Networks for Matching Image and Sentence
+Lin Ma
+Zhengdong Lu
+Lifeng Shang
+Hang Li
+{Lu.Zhengdong, Shang.Lifeng,
+Noah’s Ark Lab, Huawei Technologies"
+0dd72887465046b0f8fc655793c6eaaac9c03a3d,Real-Time Head Orientation from a Monocular Camera Using Deep Neural Network,"Real-time Head Orientation from a Monocular
+Camera using Deep Neural Network
+Byungtae Ahn, Jaesik Park, and In So Kweon
+KAIST, Republic of Korea"
+0dc34e186e8680336e88c3b5e73cde911a8774b8,Image Classification Using Naive Bayes Classifier With Pairwise Local Observations,"JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 32, XXXX-XXXX (2017)
+Image Classification Using Naive Bayes Classifier With
+Pairwise Local Observations
+SHIH-CHUNG HSU1, I-CHIEH CHEN1 AND CHUNG-LIN HUANG2
+Department of Electrical Engineering, National Tsing-Hua University, Hsin-Chu, Taiwan
+Department of M-Commerce and Multimedia Applications, Asia Univ., Tai-Chung, Taiwan
+E-mail:
+We propose a pairwise local observation-based Naive Bayes (NBPLO) classifier for
+image classification. First, we find the salient regions (SRs) and the Keypoints (KPs) as
+the local observations. Second, we describe the discriminative pairwise local observations
+using Bag-of-features (BoF) histogram. Third, we train the object class models by using
+random forest to develop the NBPLO classifier for image classification. The two major
+ontributions in this paper are multiple pairwise local observations and regression object
+lass model training for NBPLO classifier. In the experiments, we test our method using
+Scene-15 and Caltech-101 database and compare the results with the other methods.
+Keywords: Local observation-based Naive Bayes classifier (NBPLO), Salient Region(SR),
+Keypoint(KP), Bag-of-feature(BoF).
+. INTRODUCTION
+Image classification has been a challenging unsolved problem due to the complexity of
+image contents. It has been a popular research subject of many recently published re-"
+0d0199e48d22ff4b80c983e3b28532f908467da7,Linear regression motion analysis for unsupervised temporal segmentation of human actions,"Linear Regression Motion Analysis for Unsupervised Temporal
+Segmentation of Human Actions
+Simon Jones, Ling Shao
+Department of Electronic and Electrical Engineering
+The University of Shef‌f‌ield, Mappin St, Shef‌f‌ield, S1 3JD, UK"
+0d30066576c029cd888d7c759349379bdb0e88c2,"How Much Information Kinect Facial Depth Data Can Reveal About Identity, Gender and Ethnicity?","How Much Information Kinect Facial Depth
+Data Can Reveal about Identity, Gender and
+Ethnicity?
+Elhocine Boutellaaa;b, Messaoud Bengherabia, Samy Ait-Aoudiab, Abdenour
+Hadidc
+Centre de D(cid:19)eveloppement des Technologies Avanc(cid:19)ees (DZ),
+Ecole Nationale Sup(cid:18)erieure d’Informatique (DZ),
+University of Oulu (FI)"
+0d076edd62e258316bc310fafcec88db3ab85434,Automatic detection and tracking of pedestrians from a moving stereo rig,"Automatic detection and tracking of pedestrians from a
+moving stereo rig
+Konrad Schindlera, Andreas Essb, Bastian Leibec, Luc Van Goolb,d
+Photogrammetry and Remote Sensing, ETH Z¨urich, Switzerland
+Computer Vision Lab, ETH Z¨urich, Switzerland
+UMIC research centre, RWTH Aachen, Germany
+dESAT/PSI–VISICS, IBBT, KU Leuven, Belgium"
+0da611ca979327840161df87564fd07299c268b5,Bodyprint: Biometric User Identification on Mobile Devices Using the Capacitive Touchscreen to Scan Body Parts,"Bodyprint
+Biometric User Identification on Mobile Devices
+Using the Capacitive Touchscreen to Scan Body Parts
+Christian Holz
+Senaka Buthpitiya
+Marius Knaust"
+0d82ac80275283c3dd26aca9e629ee6a9ca8a07a,An object-based semantic world model for long-term change detection and semantic querying,"An Object-Based Semantic World Model for
+Long-Term Change Detection and Semantic Querying
+Julian Mason and Bhaskara Marthi"
+0dfb47e206c762d2f4caeb99fd9019ade78c2c98,Custom Pictorial Structures for Re-identification,"CHENG et al.: CUSTOM PICTORIAL STRUCTURES FOR RE-IDENTIFICATION
+Custom Pictorial Structures for
+Re-identification
+Dong Seon Cheng1
+Marco Cristani1,2
+Michele Stoppa2
+Loris Bazzani1
+Vittorio Murino1,2
+http://profs.sci.univr.it/~swan
+Dipartimento di Informatica
+University of Verona
+Italy
+Istituto Italiano di Tecnologia
+Via Morego, 30
+6163 Genova, Italy"
+0d8e7cda7d8a2ff737c0ad72f31dfd4d80d3a09a,Network Structure & Information Advantage,"A research and education initiative at the MIT
+Sloan School of Management
+Network Structure & Information Advantage
+Paper 235
+Sinan Aral
+Marshall Van Alstyne
+July 2007
+For more information,
+please visit our website at http://digital.mit.edu
+or contact the Center directly at
+or 617-253-7054"
+0d21472dbf20d4c1bd48a15267b4a59eff80e309,Multi-component Models for Object Detection,"Multi-component Models for Object Detection
+Chunhui Gu1, Pablo Arbel´aez2, Yuanqing Lin3, Kai Yu4, and Jitendra Malik2
+Google Inc., Mountain View, CA, USA
+UC Berkeley, Berkeley, CA, USA
+NEC Labs America, Cupertino, CA, USA
+Baidu Inc., Beijing, China"
+0d0041aefb16c5f7b1e593b440bb3df7b05b411c,Secure JPEG scrambling enabling privacy in photo sharing,"Secure JPEG Scrambling Enabling
+Privacy in Photo Sharing
+Lin Yuan, Pavel Korshunov, Touradj Ebrahimi
+Multimedia Signal Processing Group, EPFL
+De-ID workshop, Ljubljana, Slovenia
+8/14/2015
+Workshop on De-identification for Privacy Protection in Multimedia"
+0d33b6c8b4d1a3cb6d669b4b8c11c2a54c203d1a,Detection and Tracking of Faces in Videos: A Review of Related Work,"Detection and Tracking of Faces in Videos: A Review
+© 2016 IJEDR | Volume 4, Issue 2 | ISSN: 2321-9939
+of Related Work
+Seema Saini, 2 Parminder Sandal
+Student, 2Assistant Professor
+, 2Dept. of Electronics & Comm., S S I E T, Punjab, India
+________________________________________________________________________________________________________"
+0d8a2034bbdefa214d8debecc704cadc5b9ec6e8,Submitted for the Degree of Doctor of Philosophy at the University of Sussex,"A University of Sussex DPhil thesis
+Available online via Sussex Research Online:
+http://sro.sussex.ac.uk/
+This thesis is protected by copyright which belongs to the author.
+This thesis cannot be reproduced or quoted extensively from without first
+obtaining permission in writing from the Author
+The content must not be changed in any way or sold commercially in any
+format or medium without the formal permission of the Author
+When referring to this work, full bibliographic details including the
+uthor, title, awarding institution and date of the thesis must be given
+Please visit Sussex Research Online for more information and further details"
+0dd72a3522b99aedea83b47c5d7b33a1df058fd0,A Set of Selected SIFT Features for 3D Facial Expression Recognition,"A Set of Selected SIFT Features for 3D Facial
+Expression Recognition
+Stefano Berretti, Alberto Del Bimbo, Pietro Pala, Boulbaba Ben Amor,
+Daoudi Mohamed
+To cite this version:
+Stefano Berretti, Alberto Del Bimbo, Pietro Pala, Boulbaba Ben Amor, Daoudi Mohamed. A Set
+of Selected SIFT Features for 3D Facial Expression Recognition. 20th International Conference on
+Pattern Recognition, Aug 2010, Istanbul, Turkey. pp.4125 - 4128, 2010. <hal-00829354>
+HAL Id: hal-00829354
+https://hal.archives-ouvertes.fr/hal-00829354
+Submitted on 3 Jun 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+0da4c3d898ca2fff9e549d18f513f4898e960aca,The Headscarf Effect Revisited: Further Evidence for a Culture-Based Internal Face Processing Advantage.,"Wang, Y., Thomas, J., Weissgerber, S. C., Kazemini, S., Ul-Haq, I., &
+Quadflieg, S. (2015). The Headscarf Effect Revisited: Further Evidence for a
+36. 10.1068/p7940
+Peer reviewed version
+Link to published version (if available):
+0.1068/p7940
+Link to publication record in Explore Bristol Research
+PDF-document
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms.html
+Take down policy
+Explore Bristol Research is a digital archive and the intention is that deposited content should not be
+removed. However, if you believe that this version of the work breaches copyright law please contact
+nd include the following information in your message:
+• Your contact details
+• Bibliographic details for the item, including a URL
+• An outline of the nature of the complaint"
+95ace502ba23a8a5543b882937de23b892112cca,Facial Dynamics Interpreter Network: What Are the Important Relations Between Local Dynamics for Facial Trait Estimation?,"Facial Dynamics Interpreter Network: What are
+the Important Relations between Local
+Dynamics for Facial Trait Estimation?
+Seong Tae Kim and Yong Man Ro*
+School of Electrical Engineering, KAIST, Daejeon, Republic of Korea"
+95f990600abb9c8879e4f5f7cd03f3d696fcdec4,An Online Algorithm for Constrained Face Clustering in Videos,"Manuscript version: Author’s Accepted Manuscript
+The version presented in WRAP is the author’s accepted manuscript and may differ from the
+published version or Version of Record.
+Persistent WRAP URL:
+http://wrap.warwick.ac.uk/109574
+How to cite:
+Please refer to published version for the most recent bibliographic citation information.
+If a published version is known of, the repository item page linked to above, will contain
+details on accessing it.
+Copyright and reuse:
+The Warwick Research Archive Portal (WRAP) makes this work by researchers of the
+University of Warwick available open access under the following conditions.
+Copyright © and all moral rights to the version of the paper presented here belong to the
+individual author(s) and/or other copyright owners. To the extent reasonable and
+practicable the material made available in WRAP has been checked for eligibility before
+eing made available.
+Copies of full items can be used for personal research or study, educational, or not-for-profit
+purposes without prior permission or charge. Provided that the authors, title and full
+ibliographic details are credited, a hyperlink and/or URL is given for the original metadata
+page and the content is not changed in any way."
+956317de62bd3024d4ea5a62effe8d6623a64e53,Lighting Analysis and Texture Modification of 3D Human Face Scans,"Lighting Analysis and Texture Modification of 3D Human
+Face Scans
+Author
+Zhang, Paul, Zhao, Sanqiang, Gao, Yongsheng
+Published
+Conference Title
+Digital Image Computing Techniques and Applications
+https://doi.org/10.1109/DICTA.2007.4426825
+Copyright Statement
+© 2007 IEEE. Personal use of this material is permitted. However, permission to reprint/
+republish this material for advertising or promotional purposes or for creating new collective
+works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+this work in other works must be obtained from the IEEE.
+Downloaded from
+http://hdl.handle.net/10072/17889
+Link to published version
+http://www.ieee.org/
+Griffith Research Online
+https://research-repository.griffith.edu.au"
+9501db000474dbd182579d311dfb1b1ab8fa871f,Supplementary of Multi-scale Deep Learning Architectures for Person Re-identification,"Supplementary of Multi-scale Deep Learning Architectures for Person
+Re-identification
+Xuelin Qian1 Yanwei Fu2,5,* Yu-Gang Jiang1,3 Tao Xiang4 Xiangyang Xue1,2
+Shanghai Key Lab of Intelligent Info. Processing, School of Computer Science, Fudan University;
+School of Data Science, Fudan University; 3Tencent AI Lab;
+Queen Mary University of London; 5University of Technology Sydney;
+. Multi-scale stream layers
+Multi-scale-A layer (Fig. 1), analyses the data stream with
+the size 1 × 1, 3 × 3 and 5 × 5 of receptive field. Further-
+more, in order to increase both depth and width of this layer,
+we split the filter size of 5 × 5 into two 3 × 3 streams cas-
+aded (i.e. stream-4 and stream-3 in Tab 1 and Fig. 1). The
+weights of each stream are also tied with the corresponding
+stream in another branch. Such a design art is, in general,
+inspired by, and yet different from the inception architec-
+tures [11, 12, 10]. The key difference lies in the weights
+which are not tied between any two streams from the same
+ranch, but are tied between the two corresponding streams
+of different branches.
+Reduction layer (Fig. 2) further passes the data stream"
+95296302a7fc82edf782cece082d7319cfa584b7,Detection-free Bayesian Multi-object Tracking via Spatio-Temporal Video Bundles Grouping,"Detection-free Bayesian Multi-object Tracking
+via Spatio-Temporal Video Bundles Grouping
+Technical Report, November 2013
+Yongyi Lu, Liang Lin, Yuanlu Xu, Zefeng Lai"
+9595a267de2b0ecf7e4e2962a606c8854551e203,On the Relation between Color Image Denoising and Classification,"On the Relation between Color Image Denoising
+nd Classification
+Jiqing Wu, Radu Timofte, Member, IEEE, Zhiwu Huang, Member, IEEE, and Luc Van Gool, Member, IEEE"
+959bcb16afdf303c34a8bfc11e9fcc9d40d76b1c,Temporal Coherency based Criteria for Predicting Video Frames using Deep Multi-stage Generative Adversarial Networks,"Temporal Coherency based Criteria for Predicting
+Video Frames using Deep Multi-stage Generative
+Adversarial Networks
+Prateep Bhattacharjee1, Sukhendu Das2
+Visualization and Perception Laboratory
+Department of Computer Science and Engineering
+Indian Institute of Technology Madras, Chennai, India"
+95be490aef44da67ca1cef76b16df14b6e40c421,Learning Cross-View Binary Identities for Fast Person Re-Identification,"Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+Learning Cross-View Binary Identities
+for Fast Person Re-Identification
+Feng Zheng1, Ling Shao2
+Department of Electronic and Electrical Engineering, The University of Sheffield.
+Department of Computer Science and Digital Technologies, Northumbria University."
+95593fb20df8ce1273cebe0690cf2cdab054b9b5,Robust Multi-image HDR Reconstruction for the Modulo Camera,
+951f21a5671a4cd14b1ef1728dfe305bda72366f,Use of l2/3-norm Sparse Representation for Facial Expression Recognition,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Use of ℓ2/3-norm Sparse Representation for Facial
+Expression Recognition
+Sandeep Rangari1, Sandeep Gonnade2
+MATS University, MATS School of Engineering and Technology, Arang, Raipur, India
+MATS University, MATS School of Engineering and Technology, Arang, Raipur, India
+three
+to discriminate
+represents emotion,"
+95aef5184b89daebd0c820c8102f331ea7cae1ad,Recognising facial expressions in video sequences,"Recognising facial expressions in video sequences
+Jos´e M. Buenaposada1, Enrique Mu˜noz2⋆, Luis Baumela2
+ESCET, Universidad Rey Juan Carlos
+C/Tulip´an s/n, 28933 M´ostoles, Spain
+Facultad Inform´atica, Universidad Polit´ecnica de Madrid
+Campus Montegancedo s/n, 28660 Boadilla del Monte, Spain
+http://www.dia.fi.upm.es/~pcr
+Received: 7 Jan 2007 / Accepted: 10 July 2007/ Online: 18 Oct 2007"
+95225bab187483e37823daab5c503f6b327fb008,Improved MinMax Cut Graph Clustering with Nonnegative Relaxation,"Improved MinMax Cut Graph Clustering with
+Nonnegative Relaxation
+Feiping Nie, Chris Ding, Dijun Luo, and Heng Huang
+Department of Computer Science and Engineering,
+University of Texas, Arlington, America"
+9588a42bff63fb36015e10fac9f3121154c3ab1d,Explaining Potential Risks in Traffic Scenes by Combining Logical Inference and Physical Simulation,"International Journal of Machine Learning and Computing, Vol. 6, No. 5, October 2016
+Explaining Potential Risks in Traffic Scenes by Combining
+Logical Inference and Physical Simulation
+Ryo Takahashi, Naoya Inoue, Yasutaka Kuriya, Sosuke Kobayashi, and Kentaro Inui
+from observation and"
+9547a7bce2b85ef159b2d7c1b73dea82827a449f,Facial expression recognition using Gabor motion energy filters,"Facial Expression Recognition Using Gabor Motion Energy Filters
+Tingfan Wu
+Dept. Computer Science Engineering
+UC San Diego
+Marian S. Bartlett
+Javier R. Movellan
+Institute for Neural Computation
+UC San Diego"
+95a9e256c8f8bbce0d86199cacea92b15004dd45,Using Semantic Similarity for Multi-Label Zero-Shot Classification of Text Documents,"Using Semantic Similarity for Multi-Label Zero-Shot
+Classification of Text Documents
+Jinseok Nam2,3
+Sappadla Prateek Veeranna1
+Johannes F¨urnkranz2 ∗
+Eneldo Loza Menc´ıa2
+- Birla Institute of Technology and Science - Pilani - India
+- Knowledge Engineering Group - TU Darmstadt - Germany
+- Knowledge Discovery in Scientific Literature - DIPF - Germany"
+9513503867b29b10223f17c86e47034371b6eb4f,Comparison of Optimisation Algorithms for Deformable Template Matching,"Comparison of optimisation algorithms for
+deformable template matching
+Vasileios Zografos
+Link¨oping University, Computer Vision Laboratory
+ISY, SE-581 83 Link¨oping, SWEDEN"
+95ed2269c4a13771cc8dfe0ff2d4d6a7f4d73033,Deep Learning for Domain Adaption: Engagement Recognition,"Engagement Recognition using Deep Learning and Facial Expression
+Omid Mohamad Nezami , Len Hamey , Deborah Richards , and Mark Dras
+Macquarie University, Sydney, NSW, Australia"
+956c634343e49319a5e3cba4f2bd2360bdcbc075,A novel incremental principal component analysis and its application for face recognition,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+A Novel Incremental Principal Component Analysis
+nd Its Application for Face Recognition
+Haitao Zhao, Pong Chi Yuen, Member, IEEE, and James T. Kwok, Member, IEEE"
+95a835cdb5dc46e4de071865f9dccdaf9ec944ad,Euclidean and geodesic distance between a facial feature points in two-dimensional face recognition system,"The International Arab Journal of Information Technology, Vol. 14, No. 4A, Special Issue 2017 565
+Euclidean and Geodesic Distance between a Facial
+Feature Points in Two-Dimensional Face
+Recognition System
+Rachid Ahdid1,2, Said Safi1, and Bouzid Manaut2
+Department of Mathematics and Informatics, Sultan Moulay Slimane University, Morocco
+Poladisciplinary Faculty, Sultan Moulay Slimane University, Morocco"
+95deb62b82ede5c6732c5c498d3f9452866eaba7,Unsupervised Video Understanding by Reconciliation of Posture Similarities,"Unsupervised Video Understanding by Reconciliation of Posture Similarities
+Timo Milbich, Miguel Bautista, Ekaterina Sutter, Bj¨orn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany
+{timo.milbich, miguel.bautista, ekaterina.sutter,"
+951af0494e8812fdb7d578b68c342ab876acb27e,THÈSE DE DOCTORAT DE L’ÉCOLE NORMALE SUPÉRIEURE DE CACHAN présentée par JULIEN MAIRAL pour obtenir le grade de DOCTEUR DE L’ÉCOLE NORMALE,"THÈSEDEDOCTORATDEL’ÉCOLENORMALESUPÉRIEUREDECACHANprésentéeparJULIENMAIRALpourobtenirlegradedeDOCTEURDEL’ÉCOLENORMALESUPÉRIEUREDECACHANDomaine:MATHÉMATIQUESAPPLIQUÉESSujetdelathèse:Représentationsparcimonieusesenapprentissagestatistique,traitementd’imageetvisionparordinateur—Sparsecodingformachinelearning,imageprocessingandcomputervisionThèseprésentéeetsoutenueàCachanle30novembre2010devantlejurycomposéde:FrancisBACHDirecteurderecherche,INRIAParis-RocquencourtDirecteurdethèseStéphaneMALLATProfesseur,EcolePolytechnique,New-YorkUniversityRapporteurEricMOULINESProfesseur,Télécom-ParisTechExaminateurBrunoOLSHAUSENProfesseur,UniversityofCalifornia,BerkeleyRapporteurJeanPONCEProfesseur,EcoleNormaleSupérieure,ParisDirecteurdethèseGuillermoSAPIROProfesseur,UniversityofMinnesotaExaminateurJean-PhilippeVERTDirecteurderecherche,EcolesdesMines-ParisTechExaminateurThèsepréparéeauseindel’équipeWillowdulaboratored’informatiquedel’ÉcoleNormaleSupérieure,Paris.(INRIA/ENS/CNRSUMR8548).23avenued’Italie,75214Paris."
+95ea564bd983129ddb5535a6741e72bb1162c779,Multi-Task Learning by Deep Collaboration and Application in Facial Landmark Detection,"Multi-Task Learning by Deep Collaboration and
+Application in Facial Landmark Detection
+Ludovic Trottier
+Philippe Giguère
+Brahim Chaib-draa
+Laval University, Québec, Canada"
+9561c7ef4f89019eb7fb779a7b18ef810964b491,Real-Time Object Segmentation Using a Bag of Features Approach,"Real-Time Object Segmentation Using a
+Bag of Features Approach
+David ALDAVERT a,1, Arnau RAMISA c,b, Ramon LOPEZ DE MANTARAS b and
+Ricardo TOLEDO a
+Computer Vision Center, Dept. Ciencies de la Computació, Universitat Autonòma de
+Barcelona, Catalunya, Spain
+Institut d’Investigació d’Inteligencia Artificial (IIIA-CSIC), Campus UAB, Catalunya,
+Spain
+INRIA-Grenoble, LEAR Team, France"
+95029b1041a169e5b4e1ad79f60bfedb7a6844d0,Learning Superpixels with Segmentation-Aware Affinity Loss,"Learning Superpixels with Segmentation-Aware Affinity Loss
+Wei-Chih Tu1 Ming-Yu Liu2 Varun Jampani2 Deqing Sun2 Shao-Yi Chien1 Ming-Hsuan Yang2
+Jan Kautz2
+National Taiwan University 2NVIDIA 3UC Merced"
+958c599a6f01678513849637bec5dc5dba592394,Generalized Zero-Shot Learning for Action Recognition with Web-Scale Video Data,"Noname manuscript No.
+(will be inserted by the editor)
+Generalized Zero-Shot Learning for Action
+Recognition with Web-Scale Video Data
+Kun Liu · Wu Liu · Huadong Ma ·
+Wenbing Huang · Xiongxiong Dong
+Received: date / Accepted: date"
+950cfcbaafad1e2aaae43728fe499d8a4c90f6ec,Object Instance Detection and Dynamics Modeling in a Long-Term Mobile Robot Context,"Object Instance Detection and Dynamics Modeling in
+Long-Term Mobile Robot Context
+NILS BORE
+Doctoral Thesis
+Stockholm, Sweden 2017"
+955dc25def91eff6bfa5698249bb189ccfa83367,Geometric Model for Human Body Orientation Classification,"CommIT (Communication and Information Technology) Journal, Vol. 9 No. 1, pp. 29–33
+GEOMETRIC MODEL FOR HUMAN
+BODY ORIENTATION CLASSIFICATION
+Igi Ardiyanto
+Department of Electrical Engineering and Information Technology,
+Faculty of Engineering, Gadjah Mada University
+Yogyakarta 55281, Indonesia
+Email:"
+95aa80cf672771730393e1d7d263ab6f6d6e535d,Learning articulated body models for people re-identification,"Learning Articulated Body Models
+for People Re-identification
+Davide Baltieri, Roberto Vezzani, Rita Cucchiara
+University of Modena and Reggio Emilia
+Via Vignolese 905, 41125 Modena - Italy
+{davide.baltieri, roberto.vezzani,"
+59b11427853b7892a9f0d8ab6683d96ce59c2ff2,A Multi-Face Challenging Dataset for Robust Face Recognition,"A Multi-Face Challenging Dataset for Robust Face Recognition
+Shiv Ram Dubey and Snehasis Mukherjee"
+59fc69b3bc4759eef1347161e1248e886702f8f7,Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition,"Final Report of Final Year Project
+HKU-Face: A Large Scale Dataset for
+Deep Face Recognition
+Haoyu Li
+035141841
+COMP4801 Final Year Project
+Project Code: 17007"
+59bdd317abe8d87fb525eb4e3197a9311e2766e7,Demystifying Unsupervised Feature Learning a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"DEMYSTIFYING UNSUPERVISED FEATURE LEARNING
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Adam Coates
+September 2012"
+59ef1efb9239a101c2782fab8adc09b7af07d336,Cross-Domain Image Matching with Deep Feature Maps,"Cross-Domain Image Matching with Deep Feature Maps
+Bailey Kong · James Supan˘ci˘c, III · Deva Ramanan · Charless C.
+Fowlkes
+Received: date / Accepted: date"
+59b71e19819c1c6aee98020b34bf92e605f33819,Max-min convolutional neural networks for image classification,"MAX-MIN CONVOLUTIONAL NEURAL NETWORKS FOR IMAGE CLASSIFICATION
+Michael Blot, Matthieu Cord, Nicolas Thome
+Sorbonne Universit´es, UPMC Univ Paris 06, CNRS, LIP6 UMR 7606, 4 place Jussieu 75005 Paris"
+59cca46a0442fc6bd0525e5f13cef5b5a9747d34,Cross-Domain Shoe Retrieval With a Semantic Hierarchy of Attribute Classification Network,"Cross-Domain Shoe Retrieval With a Semantic
+Hierarchy of Attribute Classification Network
+Huijing Zhan, Student Member, IEEE, Boxin Shi, Member, IEEE, and Alex C. Kot, Fellow, IEEE"
+59bfeac0635d3f1f4891106ae0262b81841b06e4,Face Verification Using the LARK Face Representation,"Face Verification Using the LARK Face
+Representation
+Hae Jong Seo, Student Member, IEEE, Peyman Milanfar, Fellow, IEEE,"
+59efb1ac77c59abc8613830787d767100387c680,DIF : Dataset of Intoxicated Faces for Drunk Person Identification,"DIF : Dataset of Intoxicated Faces for Drunk Person
+Identification
+Devendra Pratap Yadav
+Indian Institute of Technology Ropar
+Abhinav Dhall
+Indian Institute of Technology Ropar"
+59b21f61ac46e1f982cbd9f49cb855ba5fcd3c45,CCNY at TRECVID 2014: Surveillance Event Detection,"CCNY at TRECVID 2014: Surveillance Event Detection
+Yang Xian, Xuejian Rong, Xiaodong Yang, and Yingli Tian
+Graduate Center and City College
+City University of New York
+{xrong, xyang02,"
+59f8d0e79eb02c30a5f872038129c4b5dd9bc73a,Design of a Face Recognition System for Security Control,"International Conference on African Development Issues (CU-ICADI) 2015: Information and Communication Teclmology Track
+Design of a Face Recognition System for Security
+Control
+Ambrose A. Azeta, Nicholas A. Omoregbe, Adewole Adewumi, Dolapo Oguntade
+Department of Computer and Information Sciences,
+Covenant University,
+Ota, Ogun-State, Nigeria"
+598f330fc061852162f2aaaf59ea9a3a55d3f6f7,A new strategy based on spatiogram similarity association for multi-pedestrian tracking,"A NEW STRATEGY BASED ON SPATIOGRAM
+SIMILARITY ASSOCIATION FOR
+MULTI-PEDESTRIAN TRACKING
+Nabila MANSOURI1 5, Yousra BEN JEMAA2, Cina MOTAMED 3, Antonio PINTI 4 and Eric WATELAIN1 6
+University of Lille North of France, UVHC, LAMIH laboratory
+e-mail:
+University of Sfax-Tunisie, U2S laboratory
+e-mail:
+University of Lille North of France, ULCO, LISIC laboratory
+e-mail:
+University of Orleans -France, I3MTO laboratory
+e-mail:
+5 University of Sfax-Tunisie, ReDCAD laboratory
+6 University of south Toulon-Var, HandiBio laboratory"
+595d0fe1c259c02069075d8c687210211908c3ed,A Survey on Learning to Hash,"A Survey on Learning to Hash
+Jingdong Wang, Ting Zhang, Jingkuan Song, Nicu Sebe, and Heng Tao Shen"
+5921d9a8e143b6d82a2722d9ee27bafa363475f0,Driving Policy Transfer via Modularity and Abstraction,
+599b7e1b4460c8ad77def2330ec76a2e0dfedb84,Robust Subspace Clustering via Smoothed Rank Approximation,"Robust Subspace Clustering via Smoothed Rank
+Approximation
+Zhao Kang, Chong Peng, and Qiang Cheng∗"
+59eefa01c067a33a0b9bad31c882e2710748ea24,Fast Landmark Localization with 3D Component Reconstruction and CNN for Cross-Pose Recognition,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+Fast Landmark Localization
+with 3D Component Reconstruction and CNN for
+Cross-Pose Recognition
+Gee-Sern (Jison) Hsu, Hung-Cheng Shie, Cheng-Hua Hsieh"
+59e266adc3525b4325156f0cc0052c1d76b1c9ae,Contextual Spatial Analysis and Processing for Visual Surveillance Applications,"Contextual Spatial Analysis and Processing
+for Visual Surveillance Applications
+Vikas Reddy
+A thesis submitted for the degree of Doctor of Philosophy at
+The University of Queensland in September 2011
+(revised in March 2012)
+School of Information Technology and Electrical Engineering"
+5911dcef05ffec02cc1dd88ec6feb1f1e0e8bdcb,Happy Companion: A System of Multimodal Human-Computer Affective Interaction,"Happy Companion: A System of Multimodal Human-Computer
+Affective Interaction
+Jia Jia1,2,3, Lianhong Cai1,2,3, Sirui Wang4, Xiaolan Fu4
+State Key Laboratory on Intelligent Technology and Systems"
+5955bb0325ec4dd3b56759aeb96cc9c18b09bf3e,Self-Supervised Depth Learning Improves Semantic Segmentation,"Self-Supervised Depth Learning Improves Semantic Segmentation
+Huaizu Jiang, Erik Learned-Miller
+Univ. of Massachusetts, Amherst
+Amherst MA 01003
+. Introduction
+How does a newborn agent learn about the world?
+When an animal (or robot) moves, its visual system is
+exposed to a shower of information. Usually, the speed
+with which something moves in the image is inversely
+proportional to its depth.1 As an agent continues to
+experience visual stimuli under its own motion, it is
+natural for it to form associations between the appear-
+nce of objects and their relative motion in the image.
+For example, an agent may learn that objects that look
+like mountains typically don’t move in the image (or
+hange appearance much) as the agent moves. Objects
+like nearby cars and people, however, appear to move
+rapidly in the image as the agent changes position rel-
+tive to them. This continuous pairing of images with
+motion acts as a kind of “automatic” supervision that"
+591bd78a06814e75cae7cdef50ad91cf22e66c23,3D face recognition based on evolution of iso-geodesic distance curves,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+59d225486161b43b7bf6919b4a4b4113eb50f039,Complex Event Recognition from Images with Few Training Examples,"Complex Event Recognition from Images with Few Training Examples
+Unaiza Ahsan∗
+Chen Sun∗∗
+James Hays∗
+Irfan Essa∗
+*Georgia Institute of Technology
+**University of Southern California1"
+59945763707557baace208253c029265b4b6e0a9,Face Recognition under Partial Occlusion and Small Dense Noise a Thesis Submitted in Partial Fulfillment of the Requirements for the Degree of Master of Technology,"FACE RECOGNITION UNDER PARTIAL
+OCCLUSION AND SMALL DENSE NOISE
+A THESIS SUBMITTED IN PARTIAL FULFILLMENT OF THE
+REQUIREMENTS FOR THE DEGREE OF
+MASTER OF TECHNOLOGY
+ELECTRONIC SYSTEMS AND COMMUNICATIONS
+ROHIT KUMAR
+ROLL NO. -212EE1210
+Department of Electrical Engineering
+National Institute of Technology, Rourkela-769008
+| P a g e"
+5945464d47549e8dcaec37ad41471aa70001907f,Every Moment Counts: Dense Detailed Labeling of Actions in Complex Videos,"Noname manuscript No.
+(will be inserted by the editor)
+Every Moment Counts: Dense Detailed Labeling of Actions in Complex
+Videos
+Serena Yeung · Olga Russakovsky · Ning Jin · Mykhaylo Andriluka · Greg Mori ·
+Li Fei-Fei
+Received: date / Accepted: date"
+59c9d416f7b3d33141cc94567925a447d0662d80,Matrix factorization over max-times algebra for data mining,"Universität des Saarlandes
+Max-Planck-Institut für Informatik
+Matrix factorization over max-times
+lgebra for data mining
+Masterarbeit im Fach Informatik
+Master’s Thesis in Computer Science
+von / by
+Sanjar Karaev
+ngefertigt unter der Leitung von / supervised by
+Dr. Pauli Miettinen
+egutachtet von / reviewers
+Dr. Pauli Miettinen
+Prof. Gerhard Weikum
+November 2013
+UNIVERSITASSARAVIENSIS"
+59a35b63cf845ebf0ba31c290423e24eb822d245,The FaceSketchID System: Matching Facial Composites to Mugshots,"The FaceSketchID System: Matching Facial
+Composites to Mugshots
+Scott J. Klum, Student Member, IEEE, Hu Han, Member, IEEE, Brendan F. Klare, Member, IEEE,
+nd Anil K. Jain, Fellow, IEEE
+tedious, and may not"
+598ccf73ba504a31d65b50c7ede8982c3b1d9192,Learning a Family of Detectors,"LEARNING A FAMILY OF DETECTORS
+QUAN YUAN
+Dissertation submitted in partial fulfillment
+of the requirements for the degree of
+Doctor of Philosophy
+BOSTON
+UNIVERSITY"
+59f325e63f21b95d2b4e2700c461f0136aecc171,Kernel sparse representation with local patterns for face recognition,"978-1-4577-1302-6/11/$26.00 ©2011 IEEE
+FOR FACE RECOGNITION
+. INTRODUCTION"
+59b202ccc01bae85a88ad0699da7a8ae6aa50fef,"Looking at Vehicles on the Road: A Survey of Vision-Based Vehicle Detection, Tracking, and Behavior Analysis","Looking at Vehicles on the Road: A Survey of
+Vision-Based Vehicle Detection, Tracking,
+nd Behavior Analysis
+Sayanan Sivaraman, Member, IEEE, and Mohan Manubhai Trivedi, Fellow, IEEE"
+590065c40574dc797e5aeb380d6e6dab79fad6e5,Face Detection Using Boosted Jaccard Distance-based Regression,"FACE DETECTION USING BOOSTED
+JACCARD DISTANCE-BASED REGRESSION
+Cosmin Atanasoaei Chris McCool
+Sébastien Marcel
+Idiap-RR-02-2012
+JANUARY 2012
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+590a52702bdf7f9522cff02f477de1fa98fc2ff3,"Visual tracking of hands, faces and facial features of multiple persons","DOI 10.1007/s00138-012-0409-5
+ORIGINAL PAPER
+Visual tracking of hands, faces and facial features
+of multiple persons
+Haris Baltzakis · Maria Pateraki · Panos Trahanias
+Received: 17 November 2010 / Revised: 9 December 2011 / Accepted: 18 January 2012
+© Springer-Verlag 2012"
+590c277e8ca10f2c2d7e32eb4a9dc61078a67b96,Statistical Approaches to Face Recognition a Qualifying Examination Report,"StatisticalApproachesTo
+FaceRecognition
+AQualifyingExaminationReport
+AraV.Ne(cid:12)an
+PresentedtotheQualifyingExaminationCommittee
+InPartialFul(cid:12)llmentoftheRequirementsforthe
+DegreeofDoctorofPhilosophyinElectricalEngineering
+Dr.AlbinJ.Gasiewski
+Dr.Je(cid:11)Geronimo
+Dr.MonsonH.HayesIII
+Dr.RussellM.Mersereau
+Dr.RonaldW.Schafer
+GeorgiaInstituteofTechnology
+SchoolofElectricalEngineering
+December, "
+59031a35b0727925f8c47c3b2194224323489d68,Sparse Variation Dictionary Learning for Face Recognition with a Single Training Sample per Person,"Sparse Variation Dictionary Learning for Face Recognition with A Single
+Training Sample Per Person
+Meng Yang, Luc Van Gool
+ETH Zurich
+Switzerland"
+59ee327192c270fc727c5f6d2ef90058ed072b14,Motion Models for People Tracking,"Motion Models for People Tracking
+David J. Fleet"
+926c67a611824bc5ba67db11db9c05626e79de96,Enhancing Bilinear Subspace Learning by Element Rearrangement,"Enhancing Bilinear Subspace Learning
+y Element Rearrangement
+Dong Xu, Shuicheng Yan, Stephen Lin,
+Thomas S. Huang, and
+Shih-Fu Chang"
+923412acb90ed2acbb29290147a567f39d2dfc95,FACSGen: A Tool to Synthesize Emotional Facial Expressions Through Systematic Manipulation of Facial Action Units,"J Nonverbal Behav
+DOI 10.1007/s10919-010-0095-9
+O R I G I N A L P A P E R
+FACSGen: A Tool to Synthesize Emotional Facial
+Expressions Through Systematic Manipulation of Facial
+Action Units
+Etienne B. Roesch • Lucas Tamarit •
+Lionel Reveret • Didier Grandjean •
+David Sander • Klaus R. Scherer
+Ó Springer Science+Business Media, LLC 2010"
+923ede53b0842619831e94c7150e0fc4104e62f7,Masked correlation filters for partially occluded face recognition,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+92b61b09d2eed4937058d0f9494d9efeddc39002,BoxCars: Improving Vehicle Fine-Grained Recognition using 3D Bounding Boxes in Traffic Surveillance,"Under review in IJCV manuscript No.
+(will be inserted by the editor)
+BoxCars: Improving Vehicle Fine-Grained Recognition using
+D Bounding Boxes in Traf‌f‌ic Surveillance
+Jakub Sochor · Jakub ˇSpaˇnhel · Adam Herout
+Received: date / Accepted: date"
+923e9b437a55853120f1778f55fcd956d81260f8,Zoom Out-and-In Network with Map Attention Decision for Region Proposal and Object Detection,"Noname manuscript No.
+(will be inserted by the editor)
+Zoom Out-and-In Network with Map Attention Decision
+for Region Proposal and Object Detection
+Hongyang Li · Yu Liu · Wanli Ouyang · Xiaogang Wang
+Received: date / Accepted: date"
+92020e6540fe9feb38616334645a0ba28dcac69d,Face Recognition Based on Local Derivative Tetra Pattern,"ISSN: 0976-9102 (ONLINE)
+ICTACT JOURNAL ON IMAGE AND VIDEO PROCESSING, FEBRUARY 2017, VOLUME: 07, ISSUE: 03
+FACE RECOGNITION BASED ON LOCAL DERIVATIVE TETRA PATTERN
+A. Geetha1, M. Mohamed Sathik2 and Y. Jacob Vetharaj3
+Department of Computer Applications, Nesamony Memorial Christian College, India
+Department of Computer Science, Sadakathullah Appa College, India
+Department of Computer Science, Nesamony Memorial Christian College, India"
+92b748f2629b3227a9c56bc9e580f45eb5bdfba5,Novel Adaptive Eye Detection and Tracking for Challenging Lighting Conditions,"Version
+This is the Accepted Manuscript version. This version is defined in the NISO
+recommended practice RP-8-2008 http://www.niso.org/publications/rp/
+Suggested Reference
+Rezaei, M., & Klette, R. (2013). Novel Adaptive Eye Detection and Tracking for
+Challenging Lighting Conditions. In Lecture Notes in Computer Science Vol. 7729
+(pp. 427-440). Daejeon, Korea: Springer Berlin Heidelberg.
+The final publication is available at Springer via http://dx.doi.org/10.1007/978-3-
+642-37484-5_35
+Copyright
+Items in ResearchSpace are protected by copyright, with all rights reserved, unless
+otherwise indicated. Previously published items are made available in accordance
+with the copyright policy of the publisher.
+http://www.sherpa.ac.uk/romeo/issn/0302-9743/
+https://researchspace.auckland.ac.nz/docs/uoa-docs/rights.htm"
+920a92900fbff22fdaaef4b128ca3ca8e8d54c3e,Learning Pattern Transformation Manifolds with Parametric Atom Selection,"LEARNING PATTERN TRANSFORMATION MANIFOLDS WITH PARAMETRIC ATOM
+SELECTION
+Elif Vural and Pascal Frossard
+Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+Signal Processing Laboratory (LTS4)
+Switzerland-1015 Lausanne"
+9207671d9e2b668c065e06d9f58f597601039e5e,Face Detection Using a 3D Model on Face Keypoints,"Face Detection Using a 3D Model on
+Face Keypoints
+Adrian Barbu, Gary Gramajo"
+9282239846d79a29392aa71fc24880651826af72,Classification of extreme facial events in sign language videos,"Antonakos et al. EURASIP Journal on Image and Video Processing 2014, 2014:14
+http://jivp.eurasipjournals.com/content/2014/1/14
+RESEARCH
+Open Access
+Classification of extreme facial events in sign
+language videos
+Epameinondas Antonakos1,2*, Vassilis Pitsikalis1 and Petros Maragos1"
+92115b620c7f653c847f43b6c4ff0470c8e55dab,Training Deformable Object Models for Human Detection Based on Alignment and Clustering,"Training Deformable Object Models for Human
+Detection Based on Alignment and Clustering
+Benjamin Drayer and Thomas Brox
+Department of Computer Science,
+Centre of Biological Signalling Studies (BIOSS),
+University of Freiburg, Germany"
+927ac98da38db528b780f14996bb02b05009c9cc,Hand pose estimation through semi-supervised and weakly-supervised learning,"Hand Pose Estimation through Semi-Supervised and Weakly-Supervised Learning
+Natalia Neverovaa,∗, Christian Wolfa, Florian Neboutb, Graham W. Taylorc
+Universit´e de Lyon, INSA-Lyon, CNRS, LIRIS, F-69621, France
+Awabot SAS, France
+School of Engineering, University of Guelph, Canada"
+92c2dd6b3ac9227fce0a960093ca30678bceb364,On Color Texture Normalization for Active Appearance Models,"Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+version when available.
+Title
+On color texture normalization for active appearance models
+Author(s)
+Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+Publication
+009-05-12
+Publication
+Information
+Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+Texture Normalization for Active Appearance Models. Image
+Processing, IEEE Transactions on, 18(6), 1372-1378.
+Publisher
+Link to
+publisher's
+version
+http://dx.doi.org/10.1109/TIP.2009.2017163
+Item record
+http://hdl.handle.net/10379/1350"
+92679c8cff92442f39de3405c21c8028162fe56a,Temporal 3 D ConvNets using Temporal Transition Layer,"Temporal 3D ConvNets using Temporal Transition Layer
+Ali Diba1
+, Mohsen Fayyaz2, Vivek Sharma3, A.Hossein Karami4, M.Mahdi Arzani4,
+Rahman Yousefzadeh4, Luc Van Gool1
+ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+92373095869f1b9e93823f0bd16bb8527c1665dc,How face blurring affects body language processing of static gestures in women and men,"Social Cognitive and Affective Neuroscience, 2018, 590–603
+doi: 10.1093/scan/nsy033
+Advance Access Publication Date: 14 May 2018
+Original article
+How face blurring affects body language processing
+of static gestures in women and men
+Alice Mado Proverbio, Laura Ornaghi, and Veronica Gabaro
+Department of Psychology, Neuro-MI Center for Neuroscience, University of Milano-Bicocca, Milano, Italy
+Correspondence should be addressed to Alice Mado Proverbio, Department of Psychology, University of Milano-Bicocca, piazza dell’Ateneo Nuovo 1, U6
+Building, Milano, Italy. E-mail:"
+92a93693f43a49a7b320d5771c6afaff98b27864,Audio-visual signal processing in a multimodal assisted living environment,"INTERSPEECH 2014
+Audio-Visual Signal Processing in a Multimodal Assisted Living Environment
+Alexey Karpov 1,5, Lale Akarun 2, Hülya Yalçın 3, Alexander Ronzhin 1, Barış Evrim Demiröz 2,
+Aysun Çoban 2 and Miloš Železný 4
+St. Petersburg Institute for Informatics and Automation of Russian Academy of Sciences, Russia
+Boğaziçi University, İstanbul, Turkey
+İstanbul Technical University, İstanbul, Turkey
+University of West Bohemia, Pilsen, Czech Republic
+5 University ITMO, St. Petersburg, Russia"
+927ba64123bd4a8a31163956b3d1765eb61e4426,Customer satisfaction measuring based on the most significant facial emotion,"Customer satisfaction measuring based on the most
+significant facial emotion
+Mariem Slim, Rostom Kachouri, Ahmed Atitallah
+To cite this version:
+Mariem Slim, Rostom Kachouri, Ahmed Atitallah. Customer satisfaction measuring based on the
+most significant facial emotion. 15th IEEE International Multi-Conference on Systems, Signals
+Devices (SSD 2018), Mar 2018, Hammamet, Tunisia. <hal-01790317>
+HAL Id: hal-01790317
+https://hal-upec-upem.archives-ouvertes.fr/hal-01790317
+Submitted on 11 May 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+92f0e02c9f4e95098452d0fd78ba46cd6e7b1f6d,Dynamic machine learning for supervised and unsupervised classification. (Apprentissage automatique dynamique pour la classification supervisée et non supervisée),"Dynamic machine learning for supervised and
+unsupervised classification
+Adela-Maria Sîrbu
+To cite this version:
+Adela-Maria Sîrbu. Dynamic machine learning for supervised and unsupervised classification. Machine
+Learning [cs.LG]. INSA de Rouen, 2016. English. <NNT : 2016ISAM0002>. <tel-01402052>
+HAL Id: tel-01402052
+https://tel.archives-ouvertes.fr/tel-01402052
+Submitted on 24 Nov 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+9263ca6211ec39469f0daa8790ccaecbd5898423,Exploring Models and Data for Remote Sensing Image Caption Generation,"Exploring Models and Data for
+Remote Sensing Image Caption Generation
+Xiaoqiang Lu, Senior Member, IEEE, Binqiang Wang, Xiangtao Zheng, and Xuelong Li, Fellow, IEEE"
+927ad0dceacce2bb482b96f42f2fe2ad1873f37a,Interest-Point based Face Recognition System,"Interest-Point based Face Recognition System
+Interest-Point based Face Recognition System
+Cesar Fernandez and Maria Asuncion Vicente
+Miguel Hernandez University
+Spain
+. Introduction
+Among all applications of face recognition systems, surveillance is one of the most
+hallenging ones. In such an application, the goal is to detect known criminals in crowded
+environments, like airports or train stations. Some attempts have been made, like those of
+Tokio (Engadget, 2006) or Mainz (Deutsche Welle, 2006), with limited success.
+The first task to be carried out in an automatic surveillance system involves the detection of
+ll the faces in the images taken by the video cameras. Current face detection algorithms are
+highly reliable and thus, they will not be the focus of our work. Some of the best performing
+examples are the Viola-Jones algorithm (Viola & Jones, 2004) or the Schneiderman-Kanade
+lgorithm (Schneiderman & Kanade, 2000).
+The second task to be carried out involves the comparison of all detected faces among the
+database of known criminals. The ideal behaviour of an automatic system performing this
+task would be to get a 100% correct identification rate, but this behaviour is far from the
+apabilities of current face recognition algorithms. Assuming that there will be false
+identifications, supervised surveillance systems seem to be the most realistic option: the"
+92a044df6c37571aac25756252dda27676492bb5,Implementation of Real-time System on Fpga Board for Human's Face Detection and Tracking Author Mohd,"IMPLEMENTATION OF REAL-TIME SYSTEM ON FPGA BOARD FOR HUMAN'S
+FACE DETECTION AND TRACKING AUTHOR
+MOHD NORHAFIZ HASHIM
+A project report submitted in partial
+Fulfillment of the requirement for the award of the
+Degree of Master Electrical Engineering
+Fakulti Kejuruteraan Elektrik dan Elektronik
+Universiti Tun Hussein Onn Malaysia
+JANUARY 2014"
+921aaac9b33ec6a417bfc8bb0e21e11e743342c2,Image enhancement for improving face detection under non-uniform lighting conditions,"978-1-4244-1764-3/08/$25.00 ©2008 IEEE
+ICIP 2008"
+929bd1d11d4f9cbc638779fbaf958f0efb82e603,"Improving the Performance of Facial Expression Recognition Using Dynamic, Subtle and Regional Features","This is the author’s version of a work that was submitted/accepted for pub-
+lication in the following source:
+Zhang, Ligang & Tjondronegoro, Dian W. (2010) Improving the perfor-
+mance of facial expression recognition using dynamic, subtle and regional
+features.
+In Kok, WaiWong, B. Sumudu, U. Mendis, & Abdesselam ,
+Bouzerdoum (Eds.) Neural Information Processing. Models and Applica-
+tions, Lecture Notes in Computer Science, Sydney, N.S.W, pp. 582-589.
+This file was downloaded from: http://eprints.qut.edu.au/43788/
+(cid:13) Copyright 2010 Springer-Verlag
+Conference proceedings published, by Springer Verlag, will be available
+via Lecture Notes in Computer Science http://www.springer.de/comp/lncs/
+Notice: Changes introduced as a result of publishing processes such as
+opy-editing and formatting may not be reflected in this document. For a
+definitive version of this work, please refer to the published source:
+http://dx.doi.org/10.1007/978-3-642-17534-3_72"
+92980965514210b4f6dd074d122078d54684f724,Track Everything: Limiting Prior Knowledge in Online Multi-Object Recognition,"Track Everything: Limiting Prior Knowledge in
+Online Multi-Object Recognition
+Sebastien C. Wong∗, Senior Member, IEEE, Victor Stamatescu†, Member, IEEE, Adam Gatt‡, Member, IEEE,
+David Kearney†, Ivan Lee† Senior Member, IEEE and Mark D. McDonnell†, Senior Member, IEEE ∗ Defence
+Science and Technology Group, Edinburgh, SA, Australia † Computational Learning Systems Laboratory, School
+of Information Technology and Mathematical Sciences, University of South Australia, Mawson Lakes, SA,
+Australia ‡ Australian Defence Force, Edinburgh, SA, Australia
+An important practical consideration in the design of online
+object recognition systems is the finite amount of labeled and
+nnotated data available for training. When scarce, this can de-
+grade classification performance due to overfitting and reduce
+the detection probability of highly tuned object detectors. Even
+when larger data sets are available, these may be biased in such
+way that their image statistics do not accurately reflect the
+data encountered by the system at run time [2]. In the case
+of classifier-based object recognition [3] and detection [4], the
+use of features, which are higher-level representations of an
+object than the raw image, can mitigate these problems by
+providing a degree of invariance across different data sets.
+In the case of tracking and object detection algorithms, the"
+926ca7ce14332f9f848c28565d0f2f9a2d1e35a8,Impaired facial and vocal emotion decoding in schizophrenia is underpinned by basic perceptivo-motor deficits,"Cognitive Neuropsychiatry
+ISSN: 1354-6805 (Print) 1464-0619 (Online) Journal homepage: http://www.tandfonline.com/loi/pcnp20
+Impaired facial and vocal emotion decoding in
+schizophrenia is underpinned by basic perceptivo-
+motor deficits
+C. Mangelinckx, J. B. Belge, P. Maurage & E. Constant
+To cite this article: C. Mangelinckx, J. B. Belge, P. Maurage & E. Constant (2017): Impaired facial
+nd vocal emotion decoding in schizophrenia is underpinned by basic perceptivo-motor deficits,
+Cognitive Neuropsychiatry, DOI: 10.1080/13546805.2017.1382342
+To link to this article: http://dx.doi.org/10.1080/13546805.2017.1382342
+Published online: 03 Oct 2017.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcnp20
+Download by: [University of Virginia, Charlottesville]
+Date: 06 October 2017, At: 09:26"
+0cb7e4c2f6355c73bfc8e6d5cdfad26f3fde0baf,F Acial E Xpression R Ecognition Based on Wapa and Oepa F Ast Ica,"International Journal of Artificial Intelligence & Applications (IJAIA), Vol. 5, No. 3, May 2014
+FACIAL EXPRESSION RECOGNITION BASED ON
+WAPA AND OEPA FASTICA
+Humayra Binte Ali1 and David M W Powers2
+Computer Science, Engineering and Mathematics School, Flinders University, Australia
+Computer Science, Engineering and Mathematics School, Flinders University, Australia"
+0c5a2bb5d1a1e9bb332207be61e13d0afb8f278c,A Supervised Learning Methodology for Real-Time Disguised Face Recognition in the Wild,"A Supervised Learning Methodology for Real-Time Disguised Face
+Recognition in the Wild
+Saumya Kumaar3, Abhinandan Dogra4, Abrar Majeedi4, Hanan Gani4, Ravi M. Vishwanath2 and S N Omkar1"
+0c24ccc6d6c386a8d555a81166eaf6e8d4dfccc3,Person count localization in videos from noisy foreground and detections,"Person Count Localization in Videos from Noisy Foreground and Detections
+Sheng Chen1, Alan Fern1, Sinisa Todorovic1
+Oregon State University.
+In this paper, we introduce a new problem, person count localization from
+noisy foreground and person detections. Our formulation strikes a middle-
+ground between person detection and frame-level counting. Given a video,
+our goal is to output for each frame a set of:
+. Detections optimally covering both isolated individuals and crowds
+of people in the video; and
+. Counts assigned to each detection indicating the number of people
+inside.
+The problem of detecting people in videos of crowded scenes, where
+people frequently appear under severe occlusion by other people in the
+rowd is an important line of research, since detecting people in video frames
+has become the standard initial step of many approaches to activity recogni-
+tion [1, 3, 4], and multi-object tracking by detection [6, 8, 9]. They typically
+use as input human appearance, pose, and orientation, and thus critically
+depend on robust person detections. In many domains, however, such as
+videos of American football or public spaces crowded with pedestrians, de-
+tecting every individual person is highly unreliable, and remains an open"
+0c8a0a81481ceb304bd7796e12f5d5fa869ee448,A Spatial Regularization of LDA for Face Recognition,"International Journal of Fuzzy Logic and Intelligent Systems, vol. 10, no. 2, June 2010, pp. 95-100
+A Spatial Regularization of LDA for Face Recognition
+Lae-Jeong Park
+Department of Electronics Engineering, Gangnung-Wonju National University
+23 Chibyun-Dong, Kangnung, 210-702, Korea
+Tel : +82-33-640-2389, Fax : +82-33-646-0740, E-mail :"
+0c36c988acc9ec239953ff1b3931799af388ef70,Face Detection Using Improved Faster RCNN,"Face Detection Using Improved Faster RCNN
+Changzheng Zhang, Xiang Xu, Dandan Tu*
+Huawei Cloud BU, China
+{zhangzhangzheng, xuxiang12,
+Figure1.Face detection results of FDNet1.0"
+0c5ddfa02982dcad47704888b271997c4de0674b,Model-driven and Data-driven Approaches for some Object Recognition Problems,
+0c069a870367b54dd06d0da63b1e3a900a257298,Weakly Supervised Learning of Foreground-Background Segmentation Using Masked RBMs,"Author manuscript, published in ""ICANN 2011 - International Conference on Artificial Neural Networks (2011)"""
+0c75c7c54eec85e962b1720755381cdca3f57dfb,Face Landmark Fitting via Optimized Part Mixtures and Cascaded Deformable Model,"Face Landmark Fitting via Optimized Part
+Mixtures and Cascaded Deformable Model
+Xiang Yu, Member, IEEE, Junzhou Huang, Member, IEEE,
+Shaoting Zhang, Senior Member, IEEE, and Dimitris N. Metaxas, Fellow, IEEE"
+0c769c19d894e0dbd6eb314781dc1db3c626df57,Joint Detection and Identification Feature Learning for Person Search,"Joint Detection and Identification Feature Learning for Person Search
+Tong Xiao1∗ Shuang Li1∗ Bochao Wang2 Liang Lin2 Xiaogang Wang1
+The Chinese University of Hong Kong 2Sun Yat-Sen University"
+0cdac46ec42be2d81f64ec4ee53d88be43290d52,Temporal Poselets for Collective Activity Detection and Recognition,"Temporal Poselets for Collective Activity Detection and Recognition
+Moin Nabi
+Alessio Del Bue
+Vittorio Murino
+Pattern Analysis and Computer Vision (PAVIS)
+Istituto Italiano di Tecnologia (IIT)
+Via Morego 30, Genova, Italy"
+0c3c469e46668ea2c38a6de610d675975f337522,Self-tuned Visual Subclass Learning with Shared Samples An Incremental Approach,"Self-tuned Visual Subclass Learning with Shared Samples
+An Incremental Approach
+Updated ICCV 2013 Submission
+Hossein Azizpour
+Royal Insitute of Technology(KTH)
+Stefan Carlsson
+Royal Insitute of Technology(KTH)"
+0c95ff762bdf6a20609f49f1eb5248de3f748866,Fine-Grained Walking Activity Recognition via Driving Recorder Dataset,"Fine-grained Walking Activity Recognition
+via Driving Recorder Dataset
+Hirokatsu Kataoka (AIST), Yoshimitsu Aoki (Keio Univ.), Yutaka Satoh (AIST)
+Shoko Oikawa (NTSEL), Yasuhiro Matsui (NTSEL)
+Email:
+http://hirokatsukataoka.net/"
+0ca96dc1557032ff9259562a5b8fc026334997a6,Spectral Graph-Based Method of Multimodal Word Embedding,"Proceedings of TextGraphs-11: the Workshop on Graph-based Methods for Natural Language Processing, ACL 2017, pages 39–44,
+Vancouver, Canada, August 3, 2017. c(cid:13)2017 Association for Computational Linguistics"
+0c049cc7320f9b92f91210ab6961aa6644c867cd,Delving Deep Into Coarse-to-Fine Framework for Facial Landmark Localization,"Delving Deep into Coarse-to-fine Framework
+for Facial Landmark Localization
+Xi Chen, Erjin Zhou, Yuchen Mo, Jiancheng Liu, Zhimin Cao
+Megvii Research
+{chenxi, zej, moyuchen, liujiancheng,"
+0ca36ecaf4015ca4095e07f0302d28a5d9424254,Improving Bag-of-Visual-Words Towards Effective Facial Expressive Image Classification,"Improving Bag-of-Visual-Words Towards Effective Facial Expressive
+Image Classification
+Dawood Al Chanti1 and Alice Caplier1
+Univ. Grenoble Alpes, CNRS, Grenoble INP∗ , GIPSA-lab, 38000 Grenoble, France
+Keywords:
+BoVW, k-means++, Relative Conjunction Matrix, SIFT, Spatial Pyramids, TF.IDF."
+0cc2fc148eef46c1141edd276d903853052fc19d,Estado del arte en reconocimiento facial,"Estado del arte en reconocimiento facial
+Martín Adrián Garduño Santana, L. E. Díaz-Sánchez, Israel Tabarez Paz,
+Marcelo Romero Huertas
+Universidad Autónoma del Estado de México, Toluca, México
+Resumen. En este trabajo se resumen los métodos más utilizados para el
+reconocimiento facial, incluyendo las ventajas y desventajas de los sistemas
+desarrollados hasta ahora. También se describen las futuras líneas de
+investigación y se discute el rumbo del reconocimiento facial en los próximos
+ños. Esta revisión es relevante pues se busca la implementación de un novedoso
+sistema de reconocimiento facial.
+Palabras clave: reconocimiento facial, sistemas biométricos, ciudades
+inteligentes, imágenes 2D y 3D.
+Face Recognition: a Survey"
+0c8d675bcd4489e886f35bee2a347c948ffee270,Semantic bottleneck for computer vision tasks,"Semantic bottleneck for computer vision tasks
+Maxime Bucher1,2, St´ephane Herbin1, and Fr´ed´eric Jurie2
+ONERA, Universit´e Paris-Saclay, FR-91123 Palaiseau, France
+Normandie Univ, UNICAEN, ENSICAEN, CNRS"
+0c3c83b7f030fe661548d362ddf33f37bb44043d,Crowd Motion Analysis Based on Social Force Graph with Streak Flow Attribute,"Hindawi Publishing Corporation
+Journal of Electrical and Computer Engineering
+Volume 2015, Article ID 492051, 12 pages
+http://dx.doi.org/10.1155/2015/492051
+Research Article
+Crowd Motion Analysis Based on Social Force Graph with
+Streak Flow Attribute
+Shaonian Huang,1,2 Dongjun Huang,1 and Mansoor Ahmed Khuhro1
+School of Information Science and Engineering, Central South University, Changsha 410083, China
+School of Computer and Information Engineering, Hunan University of Commerce, Changsha 420005, China
+Correspondence should be addressed to Shaonian Huang;
+Received 28 July 2015; Accepted 27 September 2015
+Academic Editor: Stefano Basagni
+Copyright © 2015 Shaonian Huang et al. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+ited.
+Over the past decades, crowd management has attracted a great deal of attention in the area of video surveillance. Among various
+tasks of video surveillance analysis, crowd motion analysis is the basis of numerous subsequent applications of surveillance video.
+In this paper, a novel social force graph with streak flow attribute is proposed to capture the global spatiotemporal changes and
+the local motion of crowd video. Crowd motion analysis is hereby implemented based on the characteristics of social force graph."
+0c17c42d71eacd2244e43fa55a8ed96607337cca,Automatic Face Reenactment,"Automatic Face Reenactment
+Pablo Garrido1
+Thorsten Thorm¨ahlen2
+Levi Valgaerts1
+Patrick P´erez3
+Ole Rehmsen1
+Christian Theobalt1
+Philipps-Universit¨at Marburg
+Technicolor
+MPI for Informatics"
+0cfca73806f443188632266513bac6aaf6923fa8,Predictive Uncertainty in Large Scale Classification using Dropout - Stochastic Gradient Hamiltonian Monte Carlo,"Predictive Uncertainty in Large Scale Classification
+using Dropout - Stochastic Gradient Hamiltonian
+Monte Carlo.
+Vergara, Diego∗1, Hern´andez, Sergio∗2, Valdenegro-Toro, Mat´ıas∗∗3 and Jorquera, Felipe∗4.
+Laboratorio de Procesamiento de Informaci´on Geoespacial, Universidad Cat´olica del Maule, Chile.
+German Research Centre for Artificial Intelligence, Bremen, Germany.
+Email:"
+0cd032a93890d61b9bd187119abee0d6aeb899f7,Iterative Quantization: A Procrustean Approach to Learning Binary Codes for Large-Scale Image Retrieval,"IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Iterative Quantization:
+A Procrustean Approach to Learning Binary
+Codes for Large-scale Image Retrieval
+Yunchao Gong, Svetlana Lazebnik, Albert Gordo, Florent Perronnin"
+0c5f9f5083b9fca4dcdbc4b122099ac1f630728b,Visual Semantic Role Labeling,"Visual Semantic Role Labeling
+Saurabh Gupta
+UC Berkeley
+Jitendra Malik
+UC Berkeley"
+0cec42a1593a02ce3f4a44d375e3b95f5797aa21,Recognizing Scene Categories of Historical Postcards,"Recognizing Scene Categories of Historical
+Postcards
+Rene Grzeszick, Gernot A. Fink
+{rene.grzeszick,
+Department of Computer Science, TU Dortmund"
+0cff123a31dcc115377ecca6ba137bebca909ff8,Anxiety dissociates the adaptive functions of sensory and motor response enhancements to social threats,"RESEARCH ARTICLE
+Anxiety dissociates the adaptive functions
+of sensory and motor response
+enhancements to social threats
+Marwa El Zein1,2*, Valentin Wyart1†, Julie Gre` zes1†
+Laboratoire de Neurosciences Cognitives, De´ partement d’Etudes Cognitives, Ecole
+Normale Supe´ rieure, PSL Research University, Paris, France; 2Universite´ Pierre et
+Marie Curie, Paris, France"
+0c3f7272a68c8e0aa6b92d132d1bf8541c062141,Kruskal-Wallis-Based Computationally Efficient Feature Selection for Face Recognition,"Hindawi Publishing Corporation
+e Scientific World Journal
+Volume 2014, Article ID 672630, 6 pages
+http://dx.doi.org/10.1155/2014/672630
+Research Article
+Kruskal-Wallis-Based Computationally Efficient Feature
+Selection for Face Recognition
+Sajid Ali Khan,1,2 Ayyaz Hussain,3 Abdul Basit,1 and Sheeraz Akram1
+Department of Software Engineering, Foundation University, Rawalpindi 46000, Pakistan
+Department of Computer Science, Shaheed Zulfikar Ali Bhutto Institute of Science and Technology Islamabad,
+Islamabad 44000, Pakistan
+Department of Computer Science and Software Engineering, International Islamic University, Islamabad 44000, Pakistan
+Correspondence should be addressed to Sajid Ali Khan;
+Received 5 December 2013; Accepted 10 February 2014; Published 21 May 2014
+Academic Editors: S. Balochian, V. Bhatnagar, and Y. Zhang
+Copyright © 2014 Sajid Ali Khan et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Face recognition in today’s technological world, and face recognition applications attain much more importance. Most of the
+existing work used frontal face images to classify face image. However these techniques fail when applied on real world face images.
+The proposed technique effectively extracts the prominent facial features. Most of the features are redundant and do not contribute"
+0c87f5a6deba422c0db261c4497b9b013b4ef5b8,Robust Face Detection using Convolutional Neural Network,"International Journal of Computer Applications (0975 – 8887)
+Volume 170 – No.6, July 2017
+Robust Face Detection using Convolutional
+Robert Yao Aaronson
+Sch. of Comp. Sci.& Tech
+Jiangsu Univ. of Sci. & Tech.
+No. 2 Mengxi Road Jingkou
+District Zhenjiang Prov. 212003
+Neural Network
+Wu Chen
+Sch. of Comp. Sci. & Tech
+Jiangsu Univ. of Sci. & Tech.
+No. 2 Mengxi Road Jingkou
+District Zhenjiang Prov. 212003
+Ben-Bright Benuwa
+Sch. of Comp. Sci. & Comm.
+Eng. Jiangsu Univ. Xuefu Road
+01 Jingkou District Zhenjiang
+Prov. 212003
+supported by"
+0ceda9dae8b9f322df65ca2ef02caca9758aec6f,Context-Aware CNNs for Person Head Detection,"Context-aware CNNs for person head detection
+Tuan-Hung Vu∗
+Anton Osokin†
+INRIA/ENS
+Ivan Laptev∗"
+0c990e779067c563a79ae17c9d36094a745d7ed8,Model interpolation for eye localization using the Discriminative Generalized Hough Transform,"Model Interpolation for Eye Localization Using the
+Discriminative Generalized Hough Transform
+Ferdinand Hahmann, Heike Ruppertshofen, Gordon B¨oer, Hauke Schramm
+Institute of Applied Computer Science
+University of Applied Sciences Kiel
+Grenzstraße 3
+4149 Kiel"
+0cfcc1cd8bae5f5899cef0995debd7b38c46e817,Discrete texture traces: Topological representation of geometric context,"Discrete Texture Traces: Topological Representation of Geometric Context
+Jan Ernst∗ and Maneesh K. Singh
+Siemens Corporation, Corporate Research and Technology, Princeton, NJ, USA
+Department of Computer Science and Mathematics, Goethe University, Frankfurt am Main, Germany
+Visvanathan Ramesh†"
+0cbefba0f41982bdff091d0e5f0d5ef93185a55c,"Challenges in Monocular Visual Odometry: Photometric Calibration, Motion Bias, and Rolling Shutter Effect","Challenges in Monocular Visual Odometry:
+Photometric Calibration, Motion Bias and
+Rolling Shutter Effect
+Nan Yang1,2,∗, Rui Wang1,2,∗, Xiang Gao1 and Daniel Cremers1,2"
+0ce4110d4c3d8b19ca0f7f75bc680aa9ba8d239a,Genetic Algorithms for Classifiers’ Training Sets Optimisation Applied to Human Face Recognition,"JOURNAL OF MEDICAL INFORMATICS & TECHNOLOGIES Vol. 11/2007, ISSN 1642-6037
+Michał KAWULOK*
+GENETIC ALGORITHMS FOR CLASSIFIERS’ TRAINING SETS
+OPTIMISATION APPLIED TO HUMAN FACE RECOGNITION
+support vector machines,
+genetic algorithms,
+human face recognition
+Human face recognition is a multi-stage process within which many classification problems must be
+solved. This is performed by learning machines which elaborate classification rules based on a given training set.
+Therefore, one of the most important issues is selection of a training set which would properly represent the data
+that will be further classified. This paper presents an approach which utilizes genetic algorithms for selecting
+lassifiers’ training sets. This approach was implemented for the Support Vector Machines which is applied in
+two areas of automatic human face recognition: face verification and feature vectors comparison. Effectiveness
+of the presented concept was confirmed with appropriate experiments which results are described in this paper.
+. INTRODUCTION
+Face recognition [7, 13, 14] is among the most popular biometric techniques which are
+eing developed nowadays and it is worth noticing that this is the method which is the most
+frequently used naturally by humans. Automatic face recognition is characterized by a low
+level of required interaction with a person who is being recognized, but offers relatively low
+effectiveness comparing to other biometric methods [4, 9]. A face recognition system"
+0c1d5801f2b86afa969524dc74708a78450300d9,12 : Conditional Random Fields,"0-708: Probabilistic Graphical Models 10-708, Spring 2014
+2 : Conditional Random Fields
+Lecturer: Eric P. Xing
+Scribes: Qin Gao, Siheng Chen
+Hidden Markov Model
+.1 General parametric form
+In hidden Markov model (HMM), we have three sets of parameters,
+t = 1|yi
+transition probability matrix A : p(yj
+initialprobabilities : p(y1) ∼ Multinomial(π1, π2, ..., πM ),
+emission probabilities : p(xt|yi
+t) ∼ Multinomial(bi,1, bi,2, ..., bi,K).
+t−1 = 1) = ai,j,
+.2 Inference
+The inference can be done with forward algorithm which computes αk
+) recursively by
+t ≡ µt−1→t(k) = P (x1, ..., xt−1, xt, yk
+nd the backward algorithm which computes βk
+t = 1) recursively by
+(cid:88)"
+0c5afb209b647456e99ce42a6d9d177764f9a0dd,Recognizing Action Units for Facial Expression Analysis,"Recognizing Action Units for
+Facial Expression Analysis
+Ying-li Tian, Member, IEEE, Takeo Kanade, Fellow, IEEE, and Jeffrey F. Cohn, Member, IEEE"
+0c98defb5a83ea5dc5d90538d1cc8c4b6267a1cb,Perception of Dynamic Facial Expressions of Emotion: Electrophysiological Evidence,"Humboldt-Universität zu Berlin
+Dissertation
+Perception of Dynamic Facial Expressions
+of Emotion: Electrophysiological Evidence
+zur Erlangung des akademischen Grades Doctor rerum naturalium im Fach Psychologie
+Mathematisch-Naturwisseschafttlichen Fakultät II
+Guillermo Recio
+Dekan: Prof. Dr. Dr. Elmar Kulke
+Gutachter/in: 1. Prof. Dr. Werner Sommer
+2. Prof. Dr. Annekathrin Schacht
+3. Prof. Dr. Birgit Stürmer
+Datum der Einreichung:
+7.09.2012
+Datum der Promotion:
+07.03.2013"
+0c377fcbc3bbd35386b6ed4768beda7b5111eec6,A Unified Probabilistic Framework for Spontaneous Facial Action Modeling and Understanding,"A Unified Probabilistic Framework
+for Spontaneous Facial Action Modeling
+nd Understanding
+Yan Tong, Member, IEEE, Jixu Chen, Student Member, IEEE, and Qiang Ji, Senior Member, IEEE"
+0cb2dd5f178e3a297a0c33068961018659d0f443,IARPA Janus Benchmark-B Face Dataset,"© 2017 Noblis, Inc. IARPA Janus Benchmark-B Face Dataset Cameron Whitelam, Emma Taborsky*, Austin Blanton, Brianna Maze*, Jocelyn Adams*, Tim Miller*, Nathan Kalka*, Anil K. Jain**, James A. Duncan*, Kristen Allen, Jordan Cheney*, Patrick Grother*** Noblis* Michigan State University** NIST*** 21 July 2017"
+0c1d40de56698e672d3906b96f47ae1361fc3912,Face recognition using kernel principal component analysis,"Advances in Vision Computing: An International Journal (AVC) Vol.1, No.1, March 2014
+Face Recognition Using Kernel
+PrincipalComponent Analysis
+Jayanthi T and 2Dr. Aji S
+Assistant Professor,Department of Computer Applications,
+Mohandas College of Engineering and Technology, Anad, Nedumangad
+Thiruvananthapuram, India
+Assistant Professor,Department of Computer Science,University of Kerala
+Kariyavattom,Thiruvananthapuram, India"
+0cd8895b4a8f16618686f622522726991ca2a324,Discrete Choice Models for Static Facial Expression Recognition,"Discrete Choice Models for Static Facial Expression
+Recognition
+Gianluca Antonini1, Matteo Sorci1, Michel Bierlaire2, and Jean-Philippe Thiran1
+Ecole Polytechnique Federale de Lausanne, Signal Processing Institute
+Ecole Polytechnique Federale de Lausanne, Operation Research Group
+Ecublens, 1015 Lausanne, Switzerland
+Ecublens, 1015 Lausanne, Switzerland"
+0cf7da0df64557a4774100f6fde898bc4a3c4840,Shape matching and object recognition using low distortion correspondences,"Shape Matching and Object Recognition using Low Distortion Correspondences
+Alexander C. Berg Tamara L. Berg
+Jitendra Malik
+Department of Electrical Engineering and Computer Science
+U.C. Berkeley"
+0cbe059c181278a373292a6af1667c54911e7925,'Owl' and 'Lizard': patterns of head pose and eye pose in driver gaze classification,"Owl and Lizard: Patterns of Head Pose and Eye
+Pose in Driver Gaze Classification
+Lex Fridman1, Joonbum Lee1, Bryan Reimer1, and Trent Victor2
+Massachusetts Institute of Technology (MIT)
+Chalmers University of Technology, SAFER"
+0c4659b35ec2518914da924e692deb37e96d6206,Registering a MultiSensor Ensemble of Images,"Registering a MultiSensor Ensemble of Images
+Jeff Orchard, Member, IEEE, and Richard Mann"
+0c53b45321131e61d1266cb960fc47c401f856f1,Space-Time Body Pose Estimation in Uncontrolled Environments,"Space-time Body Pose Estimation in Uncontrolled Environments
+Marcel Germann
+ETH Zurich
+Switzerland
+Tiberiu Popa
+ETH Zurich
+Switzerland
+Remo Ziegler
+LiberoVision AG
+Switzerland
+Richard Keiser
+LiberoVision AG
+Switzerland
+Markus Gross
+ETH Zurich
+Switzerland"
+0cd8fabfc8e22be8275c317e7ccd37e640711c62,Experiments on an RGB-D Wearable Vision System for Egocentric Activity Recognition,"Experiments on an RGB-D Wearable Vision System
+for Egocentric Activity Recognition
+Mohammad Moghimi1, Pablo Azagra2, Luis Montesano2, Ana C. Murillo1,2 and Serge Belongie3
+UC San Diego
+La Jolla, CA
+DIIS - I3A
+University of Zaragoza, Spain
+{montesano,
+Cornell Tech
+New York, NY
+tech.cornell.edu"
+0cdf238fd44684b49302c22b062772e7c66ea182,Autonomous Decision Making Robots,"International Journal of Artificial Intelligence and Applications (IJAIA), Vol.9, No.2, March 2018
+UNSUPERVISED ROBOTIC SORTING: TOWARDS
+AUTONOMOUS DECISION MAKING ROBOTS
+Joris Gu´Erin, St´Ephane Thiery, Eric Nyiri And Olivier Gibaru
+Arts et M´etiers ParisTech, Lille, FRANCE"
+0ca475433d74abb3c0f38fbe9d212058dc771570,Learning pairwise feature dissimilarities for person re-identification,"Learning Pairwise Feature Dissimilarities
+for Person Re-Identification
+Niki Martinel
+University of Udine
+Udine, Italy
+Christian Micheloni
+University of Udine
+Udine, Italy
+Claudio Piciarelli
+University of Udine
+Udine, Italy"
+0c03bb741972c99b71d8d733b92e5fa9430cbede,Learning rank reduced interpolation with principal component analysis,"Learning Rank Reduced Interpolation
+with Principal Component Analysis
+Matthias Ochs1, Henry Bradler1 and Rudolf Mester1,2"
+0c2c53d71942ad3171b693f565812f1db43215e0,Descriptive visual words and visual phrases for image applications,"Descriptive Visual Words and Visual Phrases for Image
+Shiliang Zhang1, Qi Tian2, Gang Hua3, Qingming Huang4, Shipeng Li2
+Applications
+Key Lab of Intelli. Info.
+Process., Inst. of Comput.
+Tech., CAS, Beijing 100080,
+China
+Microsoft Research Asia,
+Beijing 100080, China
+Microsoft Live Labs
+Research, Redmond, WA
+78052, U.S.A.
+Graduate University of
+Chinese Academy of
+Sciences, Beijing 100049,
+China
+{slzhang, {qitian, ganghua,"
+0c30850067c296a01b72cf4803c9712926ae5a95,Text-Dependent Audiovisual Synchrony Detection for Spoofing Detection in Mobile Person Recognition,"INTERSPEECH 2016
+September 8–12, 2016, San Francisco, USA
+Text-Dependent Audiovisual Synchrony Detection for Spoofing Detection in
+Mobile Person Recognition
+Amit Aides1,2, Hagai Aronowitz1
+Dept of Electrical Engineering,Technion - Israel Institute of Technology, Haifa, Israel
+IBM Research - Haifa, Israel
+{amitaid,"
+0cf333cab1a9ccf671cebf31b78180f863c1caa7,Automated Evaluation of Semantic Segmentation Robustness for Autonomous Driving,"Automated Evaluation of Semantic Segmentation
+Robustness for Autonomous Driving
+Wei Zhou, Member, IEEE, Julie Stephany Berrio, Member, IEEE,
+Stewart Worrall, Member, IEEE, and Eduardo Nebot, Member, IEEE"
+0c25a4636ebde18e229f7e459f1adaab1e9a2db9,Multi - class Classification and Clustering based Multi - object Tracking,"Multi-class Classification and Clustering based
+Multi-object Tracking
+Nii Longdon Sowah, Qingbo Wu, Fanman Meng"
+0ced7b814ec3bb9aebe0fcf0cac3d78f36361eae,Central Local Directional Pattern Value Flooding Co-occurrence Matrix based Features for Face Recognition,"Dr. P Chandra Sekhar Reddy, International Journal of Computer Science and Mobile Computing, Vol.6 Issue.1, January- 2017, pg. 221-227
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IMPACT FACTOR: 6.017
+IJCSMC, Vol. 6, Issue. 1, January 2017, pg.221 – 227
+Central Local Directional Pattern Value
+Flooding Co-occurrence Matrix based
+Features for Face Recognition
+Dr. P Chandra Sekhar Reddy
+Professor, CSE Department, Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+0ccd410b6ae977a945a84bad1c2785cef4c73214,Pseudo two-dimensional Hidden Markov Models for face detection in colour images,"Pseudo two-dimensional Hidden Markov Models
+for face detection in colour images
+ephane Marchand-Maillet
+Bernard M
+erialdo
+Department of Multimedia Communications
+EURECOM Institute
+
+http:www.eurecom.fr~marchand
+To be presented in the
+nd Int. Conf. on Audio- and Video-based Biometric Person Authentication"
+0c9d9ebecfce885f3b1e7bd82ec1b74e9f17b9de,Attribute expansion with sequential learning for object classification,"ATTRIBUTE EXPANSION WITH SEQUENTIAL LEARNING FOR OBJECT
+CLASSIFICATION
+Biao Niuy, Bin Liz, Peng Liy, Xi Zhangy, Jian Chengy, Hanqing Luy
+National Laboratory of Pattern Recognition, CASIA, Beijing, China 100190
+ShiJiaZhuang Vocational Technology Institute, Hebei, China 050000
+{bniu, pli, xi.zhang, jcheng,"
+0c922f8be9f0368c1abd53b8d9554f06b73a56cf,High-Level Fusion of Depth and Intensity for Pedestrian Classification,"High-Level Fusion of Depth and
+Intensity for Pedestrian Classification
+Marcus Rohrbach1,3,(cid:2), Markus Enzweiler2,(cid:2), and Dariu M. Gavrila1,4
+Environment Perception, Group Research, Daimler AG, Ulm, Germany
+Image & Pattern Analysis Group, Dept. of Math.
+nd Computer Science, Univ. of Heidelberg, Germany
+Dept. of Computer Science, TU Darmstadt, Germany
+Intelligent Systems Lab, Fac. of Science, Univ. of Amsterdam, The Netherlands"
+0c79485f64733bd128ef8c395034b6bc77abf94d,Fully automatic expression-invariant face correspondence,"Fully Automatic Expression-Invariant Face Correspondence
+Augusto Salazar∗†
+Stefanie Wuhrer†‡
+Chang Shu‡
+Flavio Prieto §
+February 1, 2013"
+0c53ef79bb8e5ba4e6a8ebad6d453ecf3672926d,Weakly Supervised PatchNets: Describing and Aggregating Local Patches for Scene Recognition,"SUBMITTED TO JOURNAL
+Weakly Supervised PatchNets: Describing and
+Aggregating Local Patches for Scene Recognition
+Zhe Wang, Limin Wang, Yali Wang, Bowen Zhang, and Yu Qiao, Senior Member, IEEE"
+6601a0906e503a6221d2e0f2ca8c3f544a4adab7,Detection of Ancient Settlement Mounds : Archaeological Survey Based on the SRTM Terrain Model,"SRTM-2 2/9/06 3:27 PM Page 321
+Detection of Ancient Settlement Mounds:
+Archaeological Survey Based on the
+SRTM Terrain Model
+B.H. Menze, J.A. Ur, and A.G. Sherratt"
+660b73b0f39d4e644bf13a1745d6ee74424d4a16,Constructing Kernel Machines in the Empirical Kernel Feature Space,",250+OPEN ACCESS BOOKS106,000+INTERNATIONALAUTHORS AND EDITORS113+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book Reviews, Refinements and New Ideas in Face RecognitionDownloaded from: http://www.intechopen.com/books/reviews-refinements-and-new-ideas-in-face-recognitionPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at"
+66d512342355fb77a4450decc89977efe7e55fa2,Learning Non-linear Transform with Discrim- Inative and Minimum Information Loss Priors,"Under review as a conference paper at ICLR 2018
+LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+Anonymous authors
+Paper under double-blind review"
+661be86559295d3b2cbabcd31cc90848f601f55c,Learning to Steer by Mimicking Features from Heterogeneous Auxiliary Networks,"Learning to Steer by Mimicking Features from Heterogeneous Auxiliary Networks
+The Chinese University of Hong Kong 2SenseTime Group Limited 3Nanyang Technological University
+Yuenan Hou1, Zheng Ma2, Chunxiao Liu2, and Chen Change Loy3
+{mazheng,"
+661c16658db873efeee3621603fe6bd53eaffac1,LLE Score: A New Filter-Based Unsupervised Feature Selection Method Based on Nonlinear Manifold Embedding and Its Application to Image Recognition,"LLE score: a new filter-based unsupervised feature
+selection method based on nonlinear manifold
+embedding and its application to image recognition
+Chao Yao, Ya-Feng Liu, Member, IEEE, Bo Jiang, Jungong Han, and Junwei Han, Senior Member, IEEE."
+6643a7feebd0479916d94fb9186e403a4e5f7cbf,Chapter 8 3 D Face Recognition,"Chapter 8
+D Face Recognition
+Ajmal Mian and Nick Pears"
+66c792b7e9946f8cb92fac185267d03371437451,Adaptive Affinity Fields for Semantic Segmentation,"Adaptive Affinity Fields for Semantic Segmentation
+Tsung-Wei Ke*, Jyh-Jing Hwang*, Ziwei Liu, and Stella X. Yu
+UC Berkeley / ICSI"
+661ca4bbb49bb496f56311e9d4263dfac8eb96e9,Datasheets for Datasets,"Datasheets for Datasets
+Timnit Gebru 1 Jamie Morgenstern 2 Briana Vecchione 3 Jennifer Wortman Vaughan 1 Hanna Wallach 1
+Hal Daumé III 1 4 Kate Crawford 1 5"
+6668ca5ab57d68070f90671a4f92a6bc25f80470,Measuring cues for stand-off deception detection based on full-body non-verbal features in body-worn cameras,"Measuring cues for stand-off deception detection based on full-body
+non-verbal features in body-worn cameras
+Henri Bouma 1, Gertjan Burghouts, Richard den Hollander, Sophie Van Der Zee, Jan Baan,
+Johan-Martijn ten Hove, Sjaak van Diepen, Paul van den Haak, Jeroen van Rest
+TNO, Oude Waalsdorperweg 63, 2597 AK The Hague, The Netherlands"
+66b37797286952e7735901e152b4cdea171e8567,Recovering 3D Planes from a Single Image via Convolutional Neural Networks,"Recovering 3D Planes from a Single Image via
+Convolutional Neural Networks
+Fengting Yang and Zihan Zhou
+The Pennsylvania State University
+{fuy34,"
+66f55dc04aaf4eefdecef202211ad7563f7a703b,Synthesizing Programs for Images using Reinforced Adversarial Learning,"Synthesizing Programs for Images using Reinforced Adversarial Learning
+Yaroslav Ganin 1 Tejas Kulkarni 2 Igor Babuschkin 2 S. M. Ali Eslami 2 Oriol Vinyals 2"
+66d087f3dd2e19ffe340c26ef17efe0062a59290,Dog Breed Identification,"Dog Breed Identification
+Whitney LaRow
+Brian Mittl
+Vijay Singh"
+66660f5e8b2a4a695abe0f9e1df32d230126f773,Applying Deep Learning to Improve Maritime Situational Awareness,"Applying Deep Learning to Improve
+Maritime Situational Awareness
+Kathy Tang
+Stottler Henke Associates, Inc.
+650 S. Amphlett Blvd. Ste. 300
+San Mateo, CA 94402
+Intelligence"
+6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c,Ordinal Regression with Multiple Output CNN for Age Estimation,"Ordinal Regression with Multiple Output CNN for Age Estimation
+Zhenxing Niu1
+Gang Hua3
+Xidian University 2Xi’an Jiaotong University 3Microsoft Research Asia
+Xinbo Gao1
+Mo Zhou1
+Le Wang2"
+66719918aa6562d14ea53286bf248d6f1a7d6b14,Perceive Your Users in Depth: Learning Universal User Representations from Multiple E-commerce Tasks,"Perceive Your Users in Depth: Learning Universal User
+Representations from Multiple E-commerce Tasks
+Yabo Ni∗, Dan Ou∗, Shichen Liu, Xiang Li, Wenwu Ou, Anxiang Zeng, Luo Si
+Search Algorithm Team, Alibaba Group, Seattle & Hangzhou, China"
+66b9e9d488ef2bad9bf0d2fb98f73f38fec2bff8,Context-aware Cascade Attention-based RNN for Video Emotion Recognition,"Context-aware Cascade Attention-based RNN for
+Video Emotion Recognition
+Man-Chin Sun
+Emotibot Inc.
+Taipei, Taiwan
+Shih-Huan Hsu
+Emotibot Inc.
+Taipei, Taiwan
+Min-Chun Yang
+Emotibot Inc.
+Taipei, Taiwan
+Jen-Hsien Chien
+Emotibot Inc.
+Taipei, Taiwan"
+669727b3258bb3edc38709147f348dc67e3fcac4,A Lightweight approach for biometric template protection,"A Lightweight approach for biometric template protection*
+Hisham Al-Assam, Harin Sellahewa, & Sabah Jassim
+University of Buckingham, Buckingham MK18 1EG, U.K.
+{hisham.al-assam , harin.sellahewa,"
+66837b29270f3e03df64941a081d70c687c7955c,ActionXPose: A Novel 2D Multi-view Pose-based Algorithm for Real-time Human Action Recognition,"ActionXPose: A Novel 2D Multi-view Pose-based
+Algorithm for Real-time Human Action Recognition
+Federico Angelini, Student Member, IEEE, Zeyu Fu, Student Member, IEEE, Yang Long, Senior Member, IEEE,
+Ling Shao, Senior Member, IEEE, and Syed Mohsen Naqvi, Senior Member, IEEE"
+66c92c9145c2b6a304eb1b3a58e2a717884fe064,Emotions in Pervasive Computing Environments,"IJCSI International Journal of Computer Science Issues, Vol. 6, No. 1, 2009
+ISSN (Online): 1694-0784
+ISSN (Print): 1694-0814
+Emotions in Pervasive Computing Environments
+Nevin VUNKA JUNGUM1 and Éric LAURENT2
+1 Computer Science and Engineering Department,
+University of Mauritius
+Réduit, Mauritius
+Laboratoire de Psychologie, ENACT-MCA,
+University of Franche-Comté
+Besançon, France"
+66a2c229ac82e38f1b7c77a786d8cf0d7e369598,A Probabilistic Adaptive Search System for Exploring the Face Space,"Proceedings of the 2016 Industrial and Systems Engineering Research Conference
+H. Yang, Z. Kong, and MD Sarder, eds.
+A Probabilistic Adaptive Search System
+for Exploring the Face Space
+Andres G. Abad and Luis I. Reyes Castro
+Escuela Superior Politecnica del Litoral (ESPOL)
+Guayaquil-Ecuador"
+669ae4a3a21b5800829ac9ee7e780fa42f9bc5ad,LDADEEP+: Latent aspect discovery with deep representations,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+6603e7de5b155c86407edc43099b46b974b7f0bb,Local Feature Based Face Recognition,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+66ee33bf0064eee159f3563e32b15c5bbd4140a0,Face Recognition Under Varying Viewing Conditions with Subspace Distance,"Face Recognition Under Varying Viewing Conditions with Subspace Distance
+Jen-Mei Chang
+Department of Mathematics and Statistics
+California State University, Long Beach
+250 Bellflower Blvd.
+Long Beach, California 90840-1001"
+66a9935e958a779a3a2267c85ecb69fbbb75b8dc,Fast and Robust Fixed-Rank Matrix Recovery,"FAST AND ROBUST FIXED-RANK MATRIX RECOVERY
+Fast and Robust Fixed-Rank Matrix
+Recovery
+German Ros*, Julio Guerrero, Angel Sappa, Daniel Ponsa and
+Antonio Lopez"
+66533107f9abdc7d1cb8f8795025fc7e78eb1122,Visual Servoing for a User's Mouth with Effective Intention Reading in a Wheelchair-based Robotic Arm,"Vi a
+i a Whee
+W y g Sgy Dae i iy g S g iz ad Ze ga Biey
+y EECS AST 373 1  g Dg Y g G  Taej 305 701 REA
+z VR Cee ETR 161 ajg Dg Y g G  Taej 305 350 REA
+Abac
+Thee exi he c eaive aciviy bewee a h
+a beig ad ehabi
+a eae ehabi
+e ad ha he bee(cid:12) f ehabi
+ ch a ai
+eadig i e f he eeia
+fied
+f ad afey f a
+
+bic a ye ARES  ad i h a b
+ieaci ech
+ech
+
+vi a
+66810438bfb52367e3f6f62c24f5bc127cf92e56,Face Recognition of Illumination Tolerance in 2D Subspace Based on the Optimum Correlation Filter,"Face Recognition of Illumination Tolerance in 2D
+Subspace Based on the Optimum Correlation
+Filter
+Xu Yi
+Department of Information Engineering, Hunan Industry Polytechnic, Changsha, China
+images will be tested to project"
+668e93e89835ec662d21cf695b7347339ce74c78,Likelihood Ratio Fusion within Scores of Independent Component Analysis Features Based Face Biometrics Verification Systems,"June. 2015. Vol. 6, No.3
+ISSN 2305-1493
+International Journal of Scientific Knowledge
+Computing and Information Technology
+© 2012 - 2015 IJSK & K.A.J. All rights reserved
+www.ijsk.org/ijsk
+LIKELIHOOD RATIO FUSION WITHIN SCORES OF
+INDEPENDENT COMPONENT ANALYSIS FEATURES BASED
+FACE BIOMETRICS VERIFICATION SYSTEMS
+SOLTANE MOHAMED
+Electrical Engineering & Computing Department, Faculty of Sciences & Technology,
+DOCTOR YAHIA FARES UNIVERSITY OF MEDEA, 26000 MEDEA, ALGERIA
+Laboratoire des Systèmes Électroniques Avancées (LSEA)"
+66c0fcf637bede76a6ea61b58655c5fc7e890630,Improving the Generalization of Neural Networks by Changing the Structure of Artificial Neuron,"Improving the Generalization of Neural Networks by Changing the Structure of Artificial Neuron. pp 195-204
+IMPROVING THE GENERALIZATION OF NEURAL NETWORKS BY CHANGING THE STRUCTURE OF
+ARTIFICIAL NEURON
+Mohammad Reza Daliri1, Mehdi Fatan2
+Biomedical Engineering Department and Iran Neural Technology Center,
+Faculty of Electrical Engineering, Iran University of Science and Technology (IUST),
+Narmak, 16846-13114 Tehran, Iran (Email:
+Mechatronics Group, Faculty of Electrical Engineering,
+Qazvin Islamic Azad University, Qazvin, Iran (Email:
+Corresponding author: M.R. Daliri, Email:"
+66e2c3d23af8ed76b116121827b9bc5e99cf4acc,Video Prediction with Appearance and Motion Conditions,"Video Prediction with Appearance and Motion Conditions
+Yunseok Jang 1 2 Gunhee Kim 2 Yale Song 3"
+66af2afd4c598c2841dbfd1053bf0c386579234e,Context-assisted face clustering framework with human-in-the-loop,"Noname manuscript No.
+(will be inserted by the editor)
+Context Assisted Face Clustering Framework with
+Human-in-the-Loop
+Liyan Zhang · Dmitri V. Kalashnikov ·
+Sharad Mehrotra
+Received: date / Accepted: date"
+66e6f08873325d37e0ec20a4769ce881e04e964e,The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding,"Int J Comput Vis (2014) 108:59–81
+DOI 10.1007/s11263-013-0695-z
+The SUN Attribute Database: Beyond Categories for Deeper Scene
+Understanding
+Genevieve Patterson · Chen Xu · Hang Su ·
+James Hays
+Received: 27 February 2013 / Accepted: 28 December 2013 / Published online: 18 January 2014
+© Springer Science+Business Media New York 2014"
+665e6aa652b99350a08090faaf9d4bcc7800186e,Detection-Free Multiobject Tracking by Reconfigurable Inference With Bundle Representations,"Detection-Free Multiobject Tracking by
+Reconfigurable Inference With
+Bundle Representations
+Liang Lin, Yongyi Lu, Chenglong Li, Hui Cheng, and Wangmeng Zuo, Senior Member, IEEE"
+661da40b838806a7effcb42d63a9624fcd684976,An Illumination Invariant Accurate Face Recognition with Down Scaling of DCT Coefficients,"An Illumination Invariant Accurate
+Face Recognition with Down Scaling
+of DCT Coefficients
+Virendra P. Vishwakarma, Sujata Pandey and M. N. Gupta
+Department of Computer Science and Engineering, Amity School of Engineering and Technology, New Delhi, India
+In this paper, a novel approach for illumination normal-
+ization under varying lighting conditions is presented.
+Our approach utilizes the fact that discrete cosine trans-
+form (DCT) low-frequency coefficients correspond to
+illumination variations in a digital image. Under varying
+illuminations, the images captured may have low con-
+trast; initially we apply histogram equalization on these
+for contrast stretching. Then the low-frequency DCT
+oefficients are scaled down to compensate the illumi-
+nation variations. The value of scaling down factor and
+the number of low-frequency DCT coefficients, which
+re to be rescaled, are obtained experimentally. The
+lassification is done using k−nearest neighbor classi-
+fication and nearest mean classification on the images
+obtained by inverse DCT on the processed coefficients."
+66886f5af67b22d14177119520bd9c9f39cdd2e6,Learning Additive Kernel For Feature Transformation and Its Application to CNN Features,"T. KOBAYASHI: LEARNING ADDITIVE KERNEL
+Learning Additive Kernel For Feature
+Transformation and Its Application to CNN
+Features
+Takumi Kobayashi
+National Institute of Advanced Industrial
+Science and Technology
+Tsukuba, Japan"
+664ccdcc614a8ecfbfedadc7b42b9537fe43d3f1,Probabilistic integration of sparse audio-visual cues for identity tracking,"Probabilistic Integration of Sparse Audio-Visual Cues for
+Identity Tracking
+Keni Bernardin
+Universität Karlsruhe, ITI
+Am Fasanengarten 5
+76131, Karlsruhe, Germany
+Rainer Stiefelhagen
+Universität Karlsruhe, ITI
+Am Fasanengarten 5
+76131, Karlsruhe, Germany
+Alex Waibel
+Universität Karlsruhe, ITI
+Am Fasanengarten 5
+76131, Karlsruhe, Germany"
+660c8a9fa166c1d81e65192e011eacfec208ec00,Discrimination of visual pedestrians data by combining projection and prediction learning,"Discrimination of visual pedestrians data by combining
+projection and prediction learning
+Mathieu Lefort, Alexander Gepperth
+To cite this version:
+Mathieu Lefort, Alexander Gepperth. Discrimination of visual pedestrians data by combining
+projection and prediction learning. ESANN - European Symposium on Artificial Neural Net-
+works, Computational Intelligence and Machine Learning, Apr 2014, Bruges, Belgium. 2014.
+<hal-01061654>
+HAL Id: hal-01061654
+https://hal.inria.fr/hal-01061654
+Submitted on 8 Sep 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+66f8115136a11684e3b95c5aaa1476a871d58a66,Face recognition using multiple image view line segments,"JAMESCOOKUNIVERSITY
+FaceRecognitionusingMultiple
+ImageViewLineSegments
+StefanAeberhardandOlivierdeVel
+TR /
+DEPARTMENTOFCOMPUTERSCIENCETOWNSVILLE
+QUEENSLAND
+AUSTRALIA"
+66b955311ab6841c4644414d8ce2faf6ca721602,RoboCupRescue 2009 - Robot League Team Darmstadt Rescue Robot,"RoboCupRescue 2009 - Robot League Team
+Darmstadt Rescue Robot Team (Germany)
+Micha Andriluka1, Martin Friedmann1, Stefan Kohlbrecher1, Johannes Meyer2,
+Karen Petersen1, Christian Reinl1, Peter Schauß1, Paul Schnitzspan1, Armin
+Strobel2, Dirk Thomas1, Anguelina Vatcheva1, Oskar von Stryk1(cid:63)
+Department of Computer Science (1) and Department of Mechanical Engineering (2),
+Technische Universit¨at Darmstadt,
+Karolinenplatz 5, D-64289 Darmstadt, Germany
+E-Mail:
+Web: www.gkmm.tu-darmstadt.de/rescue"
+66860100a3355f26ffcb9dcbf27e27e4757d641d,Feature Selection in Supervised Saliency Prediction,"Feature Selection in Supervised Saliency Prediction
+Ming Liang, Student Member, IEEE, and Xiaolin Hu, Senior Member, IEEE"
+3edb0fa2d6b0f1984e8e2c523c558cb026b2a983,Automatic Age Estimation Based on Facial Aging Patterns,"Automatic Age Estimation Based on
+Facial Aging Patterns
+Xin Geng, Zhi-Hua Zhou, Senior Member, IEEE,
+Kate Smith-Miles, Senior Member, IEEE"
+3e4b38b0574e740dcbd8f8c5dfe05dbfb2a92c07,Facial Expression Recognition with Local Binary Patterns and Linear Programming,"FACIAL EXPRESSION RECOGNITION WITH LOCAL BINARY PATTERNS
+AND LINEAR PROGRAMMING
+Xiaoyi Feng1, 2, Matti Pietikäinen1, Abdenour Hadid1
+Machine Vision Group, Infotech Oulu and Dept. of Electrical and Information Engineering
+P. O. Box 4500 Fin-90014 University of Oulu, Finland
+2 College of Electronics and Information, Northwestern Polytechnic University
+710072 Xi’an, China
+In this work, we propose a novel approach to recognize facial expressions from static
+images. First, the Local Binary Patterns (LBP) are used to efficiently represent the facial
+images and then the Linear Programming (LP) technique is adopted to classify the seven
+facial expressions anger, disgust, fear, happiness, sadness, surprise and neutral.
+Experimental results demonstrate an average recognition accuracy of 93.8% on the JAFFE
+database, which outperforms the rates of all other reported methods on the same database.
+Introduction
+Facial expression recognition from static
+images is a more challenging problem
+than from image sequences because less
+information for expression actions
+vailable. However, information in a
+single image is sometimes enough for"
+3e6b70e5be3dbe688866d8dd4382ce05b201fd28,Evaluation of Face Recognition Techniques,"PIAGENG 2009: Image Processing and Photonics for Agricultural Engineering, edited by Honghua Tan, Qi Luo,
+Proc. of SPIE Vol. 7489, 74890M · © 2009 SPIE · CCC code: 0277-786X/09/$18 · doi: 10.1117/12.836686
+Proc. of SPIE Vol. 7489 74890M-1
+Downloaded from SPIE Digital Library on 24 Jan 2010 to 130.194.78.137. Terms of Use: http://spiedl.org/terms"
+3e6fa6cf1fe2e23fdf7716f89b160333c7a93b26,A Performance Evaluation of Single and Multi-feature People Detection,"A Performance Evaluation of Single and
+Multi-Feature People Detection
+Christian Wojek, Bernt Schiele
+{wojek,
+Computer Science Department
+TU Darmstadt"
+3e4acf3f2d112fc6516abcdddbe9e17d839f5d9b,Deep Value Networks Learn to Evaluate and Iteratively Refine Structured Outputs,"Deep Value Networks Learn to
+Evaluate and Iteratively Refine Structured Outputs
+Michael Gygli 1 * Mohammad Norouzi 2 Anelia Angelova 2"
+3e0415f0e8c36f20042d6a1f8b7c216fb5543c3a,RGB-D Segmentation of Poultry Entrails,"Aalborg Universitet
+RGB-D Segmentation of Poultry Entrails
+Philipsen, Mark Philip; Jørgensen, Anders; Guerrero, Sergio Escalera; Moeslund, Thomas B.
+Published in:
+IX International Conference on Articulated Motion and Deformable Objects
+DOI (link to publication from Publisher):
+0.1007/978-3-319-41778-3_17
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Philipsen, M. P., Jørgensen, A., Guerrero, S. E., & Moeslund, T. B. (2016). RGB-D Segmentation of Poultry
+Entrails. In IX International Conference on Articulated Motion and Deformable Objects (pp. 168-174). Springer.
+Lecture Notes in Computer Science, Vol.. 9756, DOI: 10.1007/978-3-319-41778-3_17
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain"
+3efea06ad6398f9db07acf34479c81a99479e80b,Localizing Moments in Video with Natural Language,"Localizing Moments in Video with Natural Language
+Lisa Anne Hendricks1
+, Oliver Wang2, Eli Shechtman2, Josef Sivic2
+, Trevor Darrell1, Bryan Russell2
+UC Berkeley, 2Adobe Research, 3INRIA
+https://people.eecs.berkeley.edu/˜lisa_anne/didemo.html
+Figure 1: We consider localizing moments in video with natural language and demonstrate that incorporating local and
+global video features is important for this task. To train and evaluate our model, we collect the Distinct Describable Moments
+(DiDeMo) dataset which consists of over 40,000 pairs of localized video moments and corresponding natural language."
+3e0a12352fe3e9fb9246ee0f81ff7fbf0600f818,Facial Surface Analysis using Iso-Geodesic Curves in Three Dimensional Face Recognition System,"Facial Surface Analysis using Iso-Geodesic Curves
+in Three Dimensional Face Recognition System
+Rachid AHDID, El Mahdi BARRAH, Said SAFI and Bouzid MANAUT"
+3e3f305dac4fbb813e60ac778d6929012b4b745a,Feature sampling and partitioning for visual vocabulary generation on large action classification datasets,"Feature sampling and partitioning for visual vocabulary
+generation on large action classification datasets.
+Michael Sapienza1, Fabio Cuzzolin1, and Philip H.S. Torr2
+Department of Computing and Communications Technology, Oxford Brookes University.
+Department of Engineering Science, University of Oxford."
+3ea8a6dc79d79319f7ad90d663558c664cf298d4,Automatic Facial Expression Recognition from Video Sequences,"(cid:13) Copyright by Ira Cohen, 2000"
+3e8de2f904dea8368477daebab0c0dc97e0229f4,Detection and Classification of Vehicles from Omnidirectional Videos using Temporal Average of Silhouettes,"Detection and Classification of Vehicles from Omnidirectional Videos
+using Temporal Average of Silhouettes
+Computer Vision Research Group, Department of Computer Engineering, Izmir Institute of Technology, 35430,
+Hakki Can Karaimer and Yalin Bastanlar
+Izmir, Turkey
+{cankaraimer,
+Keywords:
+Omnidirectional Camera, Omnidirectional Video, Object Detection, Vehicle Detection, Vehicle
+Classification."
+3eff18934f5870b27f80c8b1d7104967460e3035,Driver hand localization and grasp analysis: A vision-based real-time approach,
+3e4f84ce00027723bdfdb21156c9003168bc1c80,A co-training approach to automatic face recognition,"© EURASIP, 2011 - ISSN 2076-1465
+9th European Signal Processing Conference (EUSIPCO 2011)
+INTRODUCTION"
+3e56a9b6c6aced2cb14f9cd7f89d145851c44113,Zero and Few Shot Learning with Semantic Feature Synthesis and Competitive Learning,"Zero and Few Shot Learning with Semantic
+Feature Synthesis and Competitive Learning
+Zhiwu Lu, Jiechao Guan, Aoxue Li, Tao Xiang, An Zhao, and Ji-Rong Wen"
+3e08d000ba3dd382c16e4295435ef8264235ccbc,Multiple People Tracking in Smart Camera Networks by Greedy Joint-Likelihood Maximization,
+3e2588aaa719c63e48fe599a7f0dbea10a41b4eb,Using Sparse Semantic Embeddings Learned from Multimodal Text and Image Data to Model Human Conceptual Knowledge,"Using sparse semantic embeddings learned from multimodal text and
+image data to model human conceptual knowledge
+Steven Derby1
+Paul Miller1
+Brian Murphy1,2
+Barry Devereux1
+Queen’s University Belfast, Belfast, United Kingdom
+{sderby02, p.miller, brian.murphy,
+BrainWaveBank Ltd., Belfast, United Kingdom"
+3edf3a996790fef8957e21c68ddf48b52238e662,Product of tracking experts for visual tracking of surgical tools,"Product of Tracking Experts for Visual Tracking of Surgical Tools
+Suren Kumar, Madusudanan Sathia Narayanan, Pankaj Singhal, Jason J. Corso and Venkat Krovi
+State University of New York (SUNY) at Buffalo"
+3eec9e8d5051e84624ea7e009a8947403dee99d1,"Material Recognition Meets 3D Reconstruction: Novel Tools for Efficient, Automatic Acquisition Systems","Material Recognition Meets 3D
+Reconstruction: Novel Tools for Efficient,
+Automatic Acquisition Systems
+Dissertation
+Erlangung des Doktorgrades (Dr. rer. nat.)
+Mathematisch-Naturwissenschaftlichen Fakultät
+der Rheinischen Friedrich-Wilhelms-Universität Bonn
+vorgelegt von
+Dipl.-Ing. Michael Weinmann
+us Karlsruhe
+Bonn, Dezember 2015"
+3e04feb0b6392f94554f6d18e24fadba1a28b65f,Subspace Image Representation for Facial Expression Analysis and Face Recognition and its Relation to the Human Visual System,"Subspace Image Representation for Facial
+Expression Analysis and Face Recognition
+nd its Relation to the Human Visual System
+Ioan Buciu1,2 and Ioannis Pitas1
+Department of Informatics, Aristotle University of Thessaloniki GR-541 24,
+Thessaloniki, Box 451, Greece.
+Electronics Department, Faculty of Electrical Engineering and Information
+Technology, University of Oradea 410087, Universitatii 1, Romania.
+Summary. Two main theories exist with respect to face encoding and representa-
+tion in the human visual system (HVS). The first one refers to the dense (holistic)
+representation of the face, where faces have “holon”-like appearance. The second one
+laims that a more appropriate face representation is given by a sparse code, where
+only a small fraction of the neural cells corresponding to face encoding is activated.
+Theoretical and experimental evidence suggest that the HVS performs face analysis
+(encoding, storing, face recognition, facial expression recognition) in a structured
+nd hierarchical way, where both representations have their own contribution and
+goal. According to neuropsychological experiments, it seems that encoding for face
+recognition, relies on holistic image representation, while a sparse image represen-
+tation is used for facial expression analysis and classification. From the computer
+vision perspective, the techniques developed for automatic face and facial expres-"
+3ed186b4337f48e263ef60acffb49f16d5a85511,Discriminatively learned filter bank for acoustic features,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+3eebe8a5adaa49e54ea909b4e2aeb436025c84d5,3D Face Recognition Using Radon Transform and Symbolic Factorial Discriminant Analysis,"Proc. of Int. Conf. onMultimedia Processing, Communication and Info. Tech., MPCIT
+D Face Recognition Using Radon Transform and
+Symbolic Factorial Discriminant Analysis
+P. S. Hiremath, Manjunath Hiremath
+Department of Computer Science Gulbarga University, Gulbarga 585106 Karnataka, India
+Email:"
+3ede3ed28329bf48fbd06438a69c4f855bef003f,Large-scale geo-facial image analysis,"Islam et al. EURASIP Journal on Image and Video Processing (2015) 2015:17
+DOI 10.1186/s13640-015-0070-9
+RESEARCH
+Open Access
+Large-scale geo-facial image analysis
+Mohammad T. Islam1, Connor Greenwell1, Richard Souvenir2 and Nathan Jacobs1*"
+3e685704b140180d48142d1727080d2fb9e52163,Single Image Action Recognition by Predicting Space-Time Saliency,"Single Image Action Recognition by Predicting
+Space-Time Saliency
+Marjaneh Safaei and Hassan Foroosh"
+3efb04937f6d87ab9540700e04d8133102c67bc0,Ask Your Neurons: A Deep Learning Approach to Visual Question Answering,"myjournal
+Ask Your Neurons:
+A Deep Learning Approach to Visual Question Answering
+Mateusz Malinowski · Marcus Rohrbach · Mario Fritz
+Received: date / Accepted: date"
+3ee522805e16bf7816ec4abfaf0c7648b5cb5c95,From Numerical Sensor Data to Semantic Representations :,"From Numerical Sensor Data to Semantic Representations:
+A Data-driven Approach for Generating Linguistic Descriptions
+Hadi Banaee
+Akademisk avhandling
+Avhandling för filosofie doktorsexamen i datavetenskap,
+som kommer att försvaras offentligt
+fredag den 20 april 2018 kl. 13.15,
+Hörsal T, Örebro universitet, Örebro
+Opponent: Prof. Antonio Chella
+University of Palermo
+Italy
+Örebro universitet
+Institutionen för Naturvetenskap och Teknik
+701 82 Örebro"
+3e67058c6ddd0afae692b7665f82124945ea2c5a,On the Learning of Deep Local Features for Robust Face Spoofing Detection,"On the Learning of Deep Local Features for
+Robust Face Spoofing Detection
+Gustavo Botelho de Souza1, Jo˜ao Paulo Papa2 and Aparecido Nilceu Marana2 - in Proc. of SIBGRAPI 2018
+UFSCar - Federal University of S˜ao Carlos. Rod. Washington Lu´ıs, Km 235. S˜ao Carlos (SP), Brazil. 13565-905.
+UNESP - S˜ao Paulo State University. Av. Eng. Luiz Edmundo Carrijo Coube, 14-01. Bauru (SP), Brazil. 17033-360.
+E-mail: {papa,"
+3e4ec7bdd279573d328a26b720854894e68230ed,Efficient Relative Attribute Learning Using Graph Neural Networks,"Ef‌f‌icient Relative Attribute Learning using
+Graph Neural Networks
+Zihang Meng1, Nagesh Adluru1, Hyunwoo J. Kim1⋆,
+Glenn Fung2, and Vikas Singh1
+University of Wisconsin – Madison
+American Family Insurance"
+3e3ba138edbcf594cd0479ac2cddd5a8e3ee6a18,Edge detection for facial expression recognition,"Edge Detection for Facial Expression Recognition
+Jesús García-Ramírez, Ivan Olmos-Pineda, J. Arturo Olvera-López, Manuel Martín
+Ortíz
+Faculty of Computer Science, Benemérita Universidad Autónoma de Puebla, Av. San Claudio
+olvera,
+y 14 sur. Puebla, Pue. C.P. 72570, México"
+3e309126c78261f242d21826bfac37412f5437cd,Attribute CNNs for Word Spotting in Handwritten,"International Journal on Document Analysis and Recognition manuscript No.
+(will be inserted by the editor)
+Attribute CNNs for Word Spotting in Handwritten
+Documents
+Sebastian Sudholt · Gernot A. Fink
+Received: date / Accepted: date"
+3e687d5ace90c407186602de1a7727167461194a,Photo Tagging by Collection-Aware People Recognition,"Photo Tagging by Collection-Aware People Recognition
+Cristina Nader Vasconcelos
+Vinicius Jardim
+Asla S´a
+Paulo Cezar Carvalho"
+3e0db33884ca8c756b26dc0df85c498c18d5f2ec,Exploiting Pedestrian Interaction via Global Optimization and Social Behaviors,"Exploiting pedestrian interaction via global optimization
+nd social behaviors
+Laura Leal-Taix´e, Gerard Pons-Moll, and Bodo Rosenhahn
+Leibniz Universit¨at Hannover, Appelstr. 9A, Hannover, Germany"
+3e18b439a6fff09a0e4c245eb1298531cc766a72,"Semi-automatic Face Image Finding Method , Which Uses the 3 D Model of the Head for Recognising an Unknown Face","Technologies of Computer Control
+doi: 10.7250/tcc.2015.001
+______________________________________________________________________________________________ 2015 / 16
+Semi-automatic Face Image Finding Method, Which
+Uses the 3D Model of the Head for Recognising an
+Olga Krutikova1, Aleksandrs Glazs2
+, 2 Riga Technical University"
+3e159084e12ece3664a17bf4dd0eed8c5f06a33f,Deep Neural Networks with Inexact Matching for Person Re-Identification,"Deep Neural Networks with Inexact Matching for
+Person Re-Identification
+Arulkumar Subramaniam
+Indian Institute of Technology Madras
+Chennai, India 600036
+Moitreya Chatterjee
+Indian Institute of Technology Madras
+Chennai, India 600036
+Anurag Mittal
+Indian Institute of Technology Madras
+Chennai, India 600036"
+3e7b5b07da3465103929b4347852d456c0f0ed58,Video Processing From Electro-Optical Sensors for Object Detection and Tracking in a Maritime Environment: A Survey,"Video Processing from Electro-optical Sensors for
+Object Detection and Tracking in Maritime
+Environment: A Survey
+Dilip K. Prasad1,∗, Deepu Rajan2, Lily Rachmawati3, Eshan Rajabally4, and Chai Quek2"
+3e4bd583795875c6550026fc02fb111daee763b4,Convolutional Sketch Inversion,"Convolutional Sketch Inversion
+Ya˘gmur G¨u¸cl¨ut¨urk∗, Umut G¨u¸cl¨u∗, Rob van Lier, and Marcel A. J.
+van Gerven
+Radboud University, Donders Institute for Brain, Cognition and
+Behaviour, Nijmegen, the Netherlands
+Figure 1: Example results of our convolutional sketch inversion models. Our models
+invert face sketches to synthesize photorealistic face images. Each row shows the sketch
+inversion / photo synthesis pipeline that transforms a different sketch of the same face
+to a different image of the same face via a different deep neural network. Each deep
+neural network layer is represented by the top three principal components of its feature
+maps."
+50f0c495a214b8d57892d43110728e54e413d47d,Pairwise support vector machines and their application to large scale problems,"Submitted 8/11; Revised 3/12; Published 8/12
+Pairwise Support Vector Machines and their Application to Large
+Scale Problems
+Carl Brunner
+Andreas Fischer
+Institute for Numerical Mathematics
+Technische Universit¨at Dresden
+01062 Dresden, Germany
+Klaus Luig
+Thorsten Thies
+Cognitec Systems GmbH
+Grossenhainer Str. 101
+01127 Dresden, Germany
+Editor: Corinna Cortes"
+506f744801c97f005fa04a09e4a4ae5fdabe94d7,MARCOnI&#x2014;ConvNet-Based MARker-Less Motion Capture in Outdoor and Indoor Scenes,"Local Submodularization
+for Binary Pairwise Energies
+Lena Gorelick, Yuri Boykov, Olga Veksler, Ismail Ben Ayed, and Andrew Delong"
+501096cca4d0b3d1ef407844642e39cd2ff86b37,Illumination Invariant Face Image Representation Using Quaternions,"Illumination Invariant Face Image
+Representation using Quaternions
+Dayron Rizo-Rodr´ıguez, Heydi M´endez-V´azquez, and Edel Garc´ıa-Reyes
+Advanced Technologies Application Center. 7a # 21812 b/ 218 and 222,
+Rpto. Siboney, Playa, P.C. 12200, La Habana, Cuba."
+501eda2d04b1db717b7834800d74dacb7df58f91,Discriminative Sparse Representation for Expression Recognition,"Pedro Miguel Neves Marques Discriminative Sparse Representation for Expression Recognition Master Thesis in Electrical and Computer Engineering September, 2014"
+50da9965104d944a8ae648c9aaec43be8ea1c501,Improving the Correspondence Establishment Based on Interactive Homography Estimation,"Improving the Correspondence Establishment
+Based on Interactive Homography Estimation*
+Xavier Cortés, Carlos Moreno, and Francesc Serratosa
+Universitat Rovira i Virgili, Departament d’Enginyeria Informàtica i Matemàtiques, Spain"
+5083c6be0f8c85815ead5368882b584e4dfab4d1,Automated Face Analysis for Affective Computing Jeffrey,"Please do not quote. In press, Handbook of affective computing. New York, NY: Oxford
+Automated Face Analysis for Affective Computing
+Jeffrey F. Cohn & Fernando De la Torre"
+5080655990fe0e0446bcb038b3e0adad0218bd29,Quantum Cuts A Quantum Mechanical Spectral Graph Partitioning Method for Salient Object Detection Julkaisu,"Çağlar Aytekin
+Quantum Cuts
+A Quantum Mechanical Spectral Graph Partitioning Method for Salient
+Object Detection
+Julkaisu 1440 • Publication 1440
+Tampere 2016"
+50c5a552c191bff34ca74e0f8dbac159e3814533,"Overview of the ImageCLEF 2015 Scalable Image Annotation, Localization and Sentence Generation task","Overview of the ImageCLEF 2015 Scalable
+Image Annotation, Localization and Sentence
+Generation task
+Andrew Gilbert, Luca Piras, Josiah Wang, Fei Yan, Emmanuel Dellandrea,
+Robert Gaizauskas, Mauricio Villegas and Krystian Mikolajczyk"
+5056186a5001921d0a24587e26167a7ee9d88cf9,Optimizing the Capacity of a Convolutional Neural Network for Image Segmentation and Pattern Recognition,"World Academy of Science, Engineering and Technology
+International Journal of Computer and Information Engineering
+Vol:12, No:10, 2018
+Optimizing the Capacity of a Convolutional Neural
+Network for Image Segmentation and Pattern
+Recognition
+Yalong Jiang, Zheru Chi"
+5087d9bdde0ba5440eb8658be7183bf5074a2a94,Object Detection via a Multi-region and Semantic Segmentation-Aware CNN Model,"Object detection via a multi-region
+semantic segmentation-aware CNN model
+Spyros Gidaris, Nikos Komodakis
+To cite this version:
+Spyros Gidaris, Nikos Komodakis. Object detection via a multi-region
+semantic segmentation-aware CNN model. ICCV 2015, Dec 2015, Santiago, Chile. ICCV 2015, 2016,
+<10.1109/ICCV.2015.135>. <hal-01245664>
+HAL Id: hal-01245664
+https://hal.archives-ouvertes.fr/hal-01245664
+Submitted on 17 Dec 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+50b6d2db19fb71ff5cfde8e2bfa484b10fbb39fe,Perception of Suicide Risk in Mental Health Professionals.,"RESEARCH ARTICLE
+Perception of Suicide Risk in Mental Health
+Professionals
+Tim M. Gale1,2*, Christopher J. Hawley3, John Butler4, Adrian Morton5, Ankush Singhal6
+Department of Research, Hertfordshire Partnership University NHS Foundation Trust, Hatfield, United
+Kingdom, 2 Department of Psychology, University of Hertfordshire, Hatfield, United Kingdom, 3 Department
+of Post-graduate Medicine, University of Hertfordshire, Hatfield, United Kingdom, 4 School of Health,
+University of Central Lancaster, Preston, United Kingdom, 5 Reigate Psychology Service, Reigate, Surrey,
+United Kingdom, 6 Psychological Medicine Service, The Royal Oldham Hospital, Oldham, United Kingdom
+11111"
+5090e374a0d505040ca6fe957936a12026f5347a,Human Emotion Classification From Videos,"Human Emotion Classification From Videos
+Maria Soledad Elli (mselli) - Dhvani Kotak (dkotak)"
+50bc8a4e7e6ab9837c6244b29ff800f523494d65,Learning to Answer Questions from Image Using Convolutional Neural Network,"Learning to Answer Questions From Image Using Convolutional Neural Network
+Noah’s Ark Lab, Huawei Technologies
+Lin Ma
+Zhengdong Lu
+Hang Li"
+506e2850a564b6085d8f0af4834a97ddd301d423,Alexandra Teynor Visual Object Class Recognition using Local Descriptions,"Alexandra Teynor
+Visual Object Class Recognition
+using Local Descriptions
+Dissertation zur Erlangung des Doktorgrades
+der Fakultät für Angewandte Wissenschaften
+der Albert-Ludwigs-Universität Freiburg im Breisgau
+August 2008"
+5058a7ec68c32984c33f357ebaee96c59e269425,A Comparative Evaluation of Regression Learning Algorithms for Facial Age Estimation,"A Comparative Evaluation of Regression Learning
+Algorithms for Facial Age Estimation
+Carles Fern´andez1, Ivan Huerta2, and Andrea Prati2
+Herta Security
+Pau Claris 165 4-B, 08037 Barcelona, Spain
+DPDCE, University IUAV
+Santa Croce 1957, 30135 Venice, Italy"
+50ff21e595e0ebe51ae808a2da3b7940549f4035,Age Group and Gender Estimation in the Wild With Deep RoR Architecture,"IEEE TRANSACTIONS ON LATEX CLASS FILES, VOL. XX, NO. X, AUGUST 2017
+Age Group and Gender Estimation in the Wild with
+Deep RoR Architecture
+Ke Zhang, Member, IEEE, Ce Gao, Liru Guo, Miao Sun, Student Member, IEEE, Xingfang Yuan, Student
+Member, IEEE, Tony X. Han, Member, IEEE, Zhenbing Zhao, Member, IEEE and Baogang Li"
+5042b358705e8d8e8b0655d07f751be6a1565482,Review on Emotion Detection in Image,"International Journal of
+Emerging Research in Management &Technology
+ISSN: 2278-9359 (Volume-4, Issue-8)
+Research Article
+August
+Review on Emotion Detection in Image
+Aswinder Kaur* Kapil Dewan
+CSE & PCET, PTU HOD, CSE & PCET, PTU
+Punjab, India Punj ab, India"
+50e47857b11bfd3d420f6eafb155199f4b41f6d7,3D Human Face Reconstruction Using a Hybrid of Photometric Stereo and Independent Component Analysis,"International Journal of Computer, Consumer and Control (IJ3C), Vol. 2, No.1 (2013)
+D Human Face Reconstruction Using a Hybrid of Photometric
+Stereo and Independent Component Analysis
+*Cheng-Jian Lin, 2Shyi-Shiun Kuo, 1Hsueh-Yi Lin, 2Shye-Chorng Kuo and 1Cheng-Yi Yu"
+50eb75dfece76ed9119ec543e04386dfc95dfd13,Learning Visual Entities and Their Visual Attributes from Text Corpora,"Learning Visual Entities and their Visual Attributes from Text Corpora
+Erik Boiy
+Dept. of Computer Science
+K.U.Leuven, Belgium
+Koen Deschacht
+Dept. of Computer Science
+K.U.Leuven, Belgium
+Marie-Francine Moens
+Dept. of Computer Science
+K.U.Leuven, Belgium"
+50a0930cb8cc353e15a5cb4d2f41b365675b5ebf,Robust Facial Landmark Detection and Face Tracking in Thermal Infrared Images using Active Appearance Models,
+507660f778fe913f6e1957fe39a87cbf50a52b2e,Sparse Camera Network for Visual Surveillance -- A Comprehensive Survey,"Sparse Camera Network for Visual
+Surveillance – A Comprehensive Survey
+Mingli Song, Member, IEEE, Dacheng Tao, Senior Member, IEEE,
+nd Stephen J. Maybank, Fellow, IEEE"
+50fb5e2f0c2fe8679c218ff88d4906e5a0812d34,"Sketch-editing games: human-machine communication, game theory and applications","Sketch-Editing Games: Human-Machine Communication,
+Game Theory and Applications
+Andre Ribeiro
+Takeo Igarashi
+JST, Erato, Igarashi
+Design Interface Project,
+-28-1-7F, Koishikawa
+JST, Erato, Igarashi
+Design Interface Project,
+-28-1-7F, Koishikawa
+sketches). We argue"
+50eb2ee977f0f53ab4b39edc4be6b760a2b05f96,Emotion recognition based on texture analysis of facial expression,"Australian Journal of Basic and Applied Sciences, 11(5) April 2017, Pages: 1-11
+AUSTRALIAN JOURNAL OF BASIC AND
+APPLIED SCIENCES
+ISSN:1991-8178 EISSN: 2309-8414
+Journal home page: www.ajbasweb.com
+Emotion Recognition Based on Texture Analysis of Facial Expressions
+Using Wavelets Transform
+Suhaila N. Mohammed and 2Loay E. George
+Assistant Lecturer, Computer Science Department, College of Science, Baghdad University, Baghdad, Iraq,
+Assistant Professor, Computer Science Department, College of Science, Baghdad University, Baghdad, Iraq,
+Address For Correspondence:
+Suhaila N. Mohammed, Baghdad University, Computer Science Department, College of Science, Baghdad, Iraq.
+A R T I C L E I N F O
+Article history:
+Received 18 January 2017
+Accepted 28 March 2017
+Available online 15 April 2017
+Keywords:
+Facial Emotion, Face Detection,
+Template Based Methods, Texture"
+50d15cb17144344bb1879c0a5de7207471b9ff74,"Divide, Share, and Conquer: Multi-task Attribute Learning with Selective Sharing","Divide, Share, and Conquer: Multi-task
+Attribute Learning with Selective Sharing
+Chao-Yeh Chen*, Dinesh Jayaraman*, Fei Sha, and Kristen Grauman"
+505942c5f9b5779bda2859e22e9ed0b1c0c7b54a,Towards 3D Face Recognition in the Real: A Registration-Free Approach Using Fine-Grained Matching of 3D Keypoint Descriptors,"Int J Comput Vis
+DOI 10.1007/s11263-014-0785-6
+Towards 3D Face Recognition in the Real: A Registration-Free
+Approach Using Fine-Grained Matching of 3D Keypoint
+Descriptors
+Huibin Li · Di Huang · Jean-Marie Morvan ·
+Yunhong Wang · Liming Chen
+Received: 26 April 2013 / Accepted: 27 October 2014
+© Springer Science+Business Media New York 2014"
+503c16d9cb1560f13a7d6baedf8c9f889b22459d,Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation,"Encoder-Decoder with Atrous Separable
+Convolution for Semantic Image Segmentation
+Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and
+Hartwig Adam
+{lcchen, yukun, gpapan, fschroff,
+Google Inc."
+50d961508ec192197f78b898ff5d44dc004ef26d,A Low Indexed Content Based Neural Network Approach for Natural Objects Recognition,"International Journal of Computer science & Information Technology (IJCSIT), Vol 1, No 2, November 2009
+A LOW INDEXED CONTENT BASED
+NEURAL NETWORK APPROACH FOR
+NATURAL OBJECTS RECOGNITION
+G.Shyama Chandra Prasad1 and Dr. A.Govardhan 2 Dr. T.V.Rao 3
+Research Scholar, JNTUH, Hyderabad, AP. India
+Principal, JNTUH College of Engineering, jagitial, Karimnagar, AP, India
+Principal, Chaithanya Institute of Engineering and Technology, Kakinada, AP, India"
+50ccc98d9ce06160cdf92aaf470b8f4edbd8b899,Towards robust cascaded regression for face alignment in the wild,"Towards Robust Cascaded Regression for Face Alignment in the Wild
+Chengchao Qu1,2 Hua Gao3
+Eduardo Monari2
+J¨urgen Beyerer2,1
+Jean-Philippe Thiran3
+Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT)
+Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB)
+Signal Processing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne (EPFL)"
+5028c0decfc8dd623c50b102424b93a8e9f2e390,Revisiting Classifier Two-sample Tests,"Published as a conference paper at ICLR 2017
+REVISITING CLASSIFIER TWO-SAMPLE TESTS
+David Lopez-Paz1, Maxime Oquab1,2
+Facebook AI Research, 2WILLOW project team, Inria / ENS / CNRS"
+500993a8852f766d4bac7b5039b9072b587e4d09,HARRISON: A Benchmark on HAshtag Recommendation for Real-world Images in Social Networks,"PARK, LI, KIM: HARRISON: A BENCHMARK FOR IMAGE HASHTAG RECOMMENDATION1
+HARRISON: A Benchmark on HAshtag
+Recommendation for Real-world Images in
+SOcial Networks
+School of Electrical Engineering
+KAIST
+South Korea
+Minseok Park
+Hanxiang Li
+Junmo Kim"
+50984f8345a3120d0e6c0a75adc2ac1a13e37961,Impaired face processing in autism: fact or artifact?,"DOI 10.1007/s10803-005-0050-5
+Published Online: February 14, 2006
+Impaired Face Processing in Autism: Fact or Artifact?
+Boutheina Jemel,1,3–5 Laurent Mottron,2–4 and Michelle Dawson2
+Within the last 10 years, there has been an upsurge of interest in face processing abilities in
+utism which has generated a proliferation of new empirical demonstrations employing a
+variety of measuring techniques. Observably atypical social behaviors early in the develop-
+ment of children with autism have led to the contention that autism is a condition where the
+processing of social
+is impaired. While several empirical
+sources of evidence lend support to this hypothesis, others suggest that there are conditions
+under which autistic individuals do not differ from typically developing persons. The present
+paper reviews this bulk of empirical evidence, and concludes that the versatility and abilities of
+face processing in persons with autism have been underestimated.
+information, particularly faces,
+KEY WORDS: Autism; face processing; FFA; configural; local bias.
+Impaired face processing is one of the most
+the social cognition
+ommonly cited aspects of
+deficits observed among persons with autism spec-"
+505e55d0be8e48b30067fb132f05a91650666c41,A Model of Illumination Variation for Robust Face Recognition,"A Model of Illumination Variation for Robust Face Recognition
+Florent Perronnin and Jean-Luc Dugelay
+Institut Eur´ecom
+Multimedia Communications Department
+BP 193, 06904 Sophia Antipolis Cedex, France
+fflorent.perronnin,"
+507c9672e3673ed419075848b4b85899623ea4b0,Multi-View Facial Expression Classification,"Faculty of Informatics
+Institute for Anthropomatics
+Chair Prof. Dr.-Ing. R. Stiefelhagen
+Facial Image Processing and Analysis Group
+Multi-View Facial Expression
+Classification
+DIPLOMA THESIS OF
+Nikolas Hesse
+ADVISORS
+Dr.-Ing. Hazım Kemal Ekenel
+Dipl.-Inform. Hua Gao
+Dipl.-Inform. Tobias Gehrig
+MARCH 2011
+KIT – University of the State of Baden-Württemberg and National Laboratory of the Helmholtz Association
+www.kit.edu"
+5020a75c45416073d0b07b1deb7382bc80de1779,Human Detection Using Learned Part Alphabet and Pose Dictionary,"Human Detection using Learned Part Alphabet
+nd Pose Dictionary
+Anonymous ECCV submission
+Paper ID 895"
+50e5dd45a94a56cb973e51dc3347e621266db7e4,3D Face Recognition Using Concurrent Neural Modules,"D Face Recognition Using Concurrent Neural Modules
+VICTOR-EMIL NEAGOE , IONUT MITRACHE, AND DANIEL CARAUSU
+Depart. Electronics, Telecommunications & Information Technology
+Polytechnic University of Bucharest
+Splaiul Independentei No. 313, Sector 6, Bucharest
+ROMANIA
+Email:"
+684c8acd49148020e9bf9c4f4aefc03708a6dac0,Video-Based Person Re-Identification With Accumulative Motion Context,"Video-based Person Re-identification with
+Accumulative Motion Context
+Hao Liu, Zequn Jie, Karlekar Jayashree, Meibin Qi, Jianguo Jiang and Shuicheng Yan, Fellow, IEEE, Jiashi Feng"
+68df1f746a3434ee8bcc8918d46809ddaad38b12,Subspace learning in minimax detection,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+Email: {raja.fazliza, david.mary,
+SUBSPACE LEARNING IN MINIMAX DETECTION
+Raja Fazliza R. Suleiman, David Mary and Andr´e Ferrari
+Campus Valrose, 06108 Nice Cedex 02, FRANCE
+Laboratoire J.-L. Lagrange, UMR7293,
+. INTRODUCTION AND PRIOR WORKS
+(cid:26) H0"
+680d662c30739521f5c4b76845cb341dce010735,Part and Attribute Discovery from Relative Annotations,"Int J Comput Vis (2014) 108:82–96
+DOI 10.1007/s11263-014-0716-6
+Part and Attribute Discovery from Relative Annotations
+Subhransu Maji · Gregory Shakhnarovich
+Received: 25 February 2013 / Accepted: 14 March 2014 / Published online: 26 April 2014
+© Springer Science+Business Media New York 2014"
+68ae4db6acf5361486f153ee0c0d540e0823682a,FlashReport Memory conformity for con fi dently recognized items : The power of social in fl uence on memory reports,"Journal of Experimental Social Psychology 48 (2012) 783–786
+Contents lists available at SciVerse ScienceDirect
+Journal of Experimental Social Psychology
+j o u r n a l h o m e pa ge : w ww . e l s e v i e r . c o m/ l o c a t e / j e s p
+FlashReport
+Memory conformity for confidently recognized items: The power of social influence
+on memory reports
+Ruth Horry ⁎, Matthew A. Palmer 1, Michelle L. Sexton, Neil Brewer
+Flinders University, Australia
+r t i c l e
+i n f o
+b s t r a c t
+Article history:
+Received 14 September 2011
+Revised 9 December 2011
+Available online 22 December 2011
+Keywords:
+Memory conformity
+Confidence
+Face recognition"
+68e4ed4daa2ae94c789443ed222601a4a47f9a45,Building Extraction from Polarimetric Interferometric Sar Data Using Bayesian Network,"BUILDING EXTRACTION FROM POLARIMETRIC INTERFEROMETRIC SAR DATA
+USING BAYESIAN NETWORK
+Wenju He and Olaf Hellwich
+Berlin University of Technology
+{wenjuhe,
+. INTRODUCTION
+Many researches have been done to extract buildings from high resolution Synthetic Aperture Radar (SAR) data. The extraction
+problem is far from solved due to many constraints, e.g. SAR side-look imaging, speckle, and lack of object extent in SAR
+images. Building detection algorithms usually use intensity information or textures. Layovers and shadows can be discriminated
+from other objects since they have distinct appearances. The detection is hindered by the small geometric extent of buildings
+in SAR images and the orientation dependency of reflections. Many buildings are occluded with surrounding environments.
+The interactions between radar and various buildings are hard to model. Polarimetric SAR data can resolve some ambiguities
+ecause polarimetry can be used to analyze physical scattering properties. Scatterers formed by buildings have strong double-
+ounce reflections. Polarimetric SAR data also allow us to extract rich features for object detection. Polarimetric interferometric
+SAR (PolinSAR) data are more promising since they are able to provide object height information. Furthermore, coherent
+scatterer and permanent scatterer analysis using interferometric SAR (InSAR) data are powerful in urban change detection
+pplications. As to building localization, a height map retrieved from PolinSAR data is very advantageous. PolinSAR data are
+expected to further resolve ambiguities in building detection problems.
+For meter-resolution PolinSAR data, however, it is hard to retrieve phases of building roofs from interferometric phase
+ecause of complex scattering mechanisms and building geometries. Building height image was derived from InSAR digital"
+683260bf133c282439b91ac4427d42d73a5988b5,"Optimizing Program Performance via Similarity, Using Feature-aware and Feature-agnostic Characterization Approaches","UNIVERSITY OF CALIFORNIA,
+IRVINE
+Optimizing Program Performance via Similarity,
+Using Feature-aware and Feature-agnostic Characterization Approaches
+DISSERTATION
+submitted in partial satisfaction of the requirements
+for the degree of
+DOCTOR OF PHILOSOPHY
+in Information and Computer Science
+Rosario Cammarota
+Dissertation Committee:
+Professor Alexander V. Veidenbaum, Chair
+Professor Alexandru Nicolau
+Professor Nikil Dutt"
+68a2ee5c5b76b6feeb3170aaff09b1566ec2cdf5,Age Classification Based on Simple Lbp Transitions,"AGE CLASSIFICATION BASED ON
+SIMPLE LBP TRANSITIONS
+Research Scholar & Assoc Professor, Aditya institute of Technology and Management, Tekkalli-532 201, A.P.,
+Gorti Satyanarayana Murty
+India,
+Dr. V.Vijaya Kumar
+A. Obulesu
+Dean-Computer Sciences (CSE & IT), Anurag Group of Institutions, Hyderabad – 500088, A.P., India.,
+3Asst. Professor, Dept. Of CSE, Anurag Group of Institutions, Hyderabad – 500088, A.P., India."
+6821a3fa67d9d58655c26e24b568fda1229ac5be,Fast and robust object segmentation with the Integral Linear Classifier,"Fast and Robust Object Segmentation with the Integral Linear Classifier
+David Aldavert
+Computer Vision Center
+Dept. Computer Science
+Arnau Ramisa
+INRIA-Grenoble
+Artificial Intelligence Research
+Univ. Aut`onoma de Barcelona
+Institute (IIIA-CSIC)
+Ramon Lopez de Mantaras
+Artificial Intelligence Research
+Institute (IIIA-CSIC)
+Campus UAB
+Ricardo Toledo
+Computer Vision Center
+Dept. Computer Science
+Univ. Aut`onoma de Barcelona"
+68d2afd8c5c1c3a9bbda3dd209184e368e4376b9,Representation Learning by Rotating Your Faces,"Representation Learning by Rotating Your Faces
+Luan Tran, Xi Yin, and Xiaoming Liu, Member, IEEE"
+688cb9fd33769b152806c04ef6fc276629a9f300,LocNet: Improving Localization Accuracy for Object Detection,"LocNet: Improving Localization Accuracy for Object Detection
+Spyros Gidaris
+Universite Paris Est, Ecole des Ponts ParisTech
+Nikos Komodakis
+Universite Paris Est, Ecole des Ponts ParisTech"
+68eb5404a22fcca595cc6360e9a77a4b09156eb2,Appearance-based person reidentification in camera networks: problem overview and current approaches,"J Ambient Intell Human Comput (2011) 2:127–151
+DOI 10.1007/s12652-010-0034-y
+O R I G I N A L R E S E A R C H
+Appearance-based person reidentification in camera networks:
+problem overview and current approaches
+Gianfranco Doretto • Thomas Sebastian •
+Peter Tu • Jens Rittscher
+Received: 30 January 2010 / Accepted: 4 October 2010 / Published online: 14 January 2011
+Ó Springer-Verlag 2011"
+6872615b0298aa01affa3b8d71e4d5547244278f,Weighted Fourier Image Analysis and Modeling,"WEIGHTED FOURIER IMAGE ANALYSIS
+AND MODELING
+Shubing Wang
+A dissertation submitted in partial fulfillment of the
+requirements for the degree of
+Doctor of Philosophy
+(Statistics)
+t the
+UNIVERSITY OF WISCONSIN – MADISON"
+6859b891a079a30ef16f01ba8b85dc45bd22c352,"2D Face Recognition Based on PCA & Comparison of Manhattan Distance, Euclidean Distance & Chebychev Distance","International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 4, Issue 10, October 2014)
+D Face Recognition Based on PCA & Comparison of
+Manhattan Distance, Euclidean Distance & Chebychev
+Distance
+Rajib Saha1, Sayan Barman2
+RCC Institute of Information Technology, Kolkata, India"
+68d08ed9470d973a54ef7806318d8894d87ba610,Drive Video Analysis for the Detection of Traffic Near-Miss Incidents,"Drive Video Analysis for the Detection of Traffic Near-Miss Incidents
+Hirokatsu Kataoka1, Teppei Suzuki1
+, Shoko Oikawa3, Yasuhiro Matsui4 and Yutaka Satoh1"
+68caf5d8ef325d7ea669f3fb76eac58e0170fff0,Long-term face tracking in the wild using deep learning,
+68003e92a41d12647806d477dd7d20e4dcde1354,Fuzzy Based Image Dimensionality Reduction Using Shape Primitives for Efficient Face Recognition,"ISSN: 0976-9102 (ONLINE)
+DOI: 10.21917/ijivp.2013.0101
+ICTACT JOURNAL ON IMAGE AND VIDEO PROCESSING, NOVEMBER 2013, VOLUME: 04, ISSUE: 02
+FUZZY BASED IMAGE DIMENSIONALITY REDUCTION USING SHAPE
+PRIMITIVES FOR EFFICIENT FACE RECOGNITION
+P. Chandra Sekhar Reddy1, B. Eswara Reddy2 and V. Vijaya Kumar3
+Deprtment of Computer Science and Engineering, Nalla Narasimha Reddy Education Society’s Group of Institutions, India
+E-Mail:
+Deprtment of Computer Science and Engineering, JNTUA College of Engineering, India
+Deprtment of Computer Science and Engineering, Anurag Group of Institutions, India
+E-mail:
+E-mail:"
+68d4056765c27fbcac233794857b7f5b8a6a82bf,Example-Based Face Shape Recovery Using the Zenith Angle of the Surface Normal,"Example-Based Face Shape Recovery Using the
+Zenith Angle of the Surface Normal
+Mario Castel´an1, Ana J. Almaz´an-Delf´ın2, Marco I. Ram´ırez-Sosa-Mor´an3,
+nd Luz A. Torres-M´endez1
+CINVESTAV Campus Saltillo, Ramos Arizpe 25900, Coahuila, M´exico
+Universidad Veracruzana, Facultad de F´ısica e Inteligencia Artificial, Xalapa 91000,
+ITESM, Campus Saltillo, Saltillo 25270, Coahuila, M´exico
+Veracruz, M´exico"
+684f5166d8147b59d9e0938d627beff8c9d208dd,Discriminative Block-Diagonal Representation Learning for Image Recognition,"IEEE TRANS. NNLS, JUNE 2017
+Discriminative Block-Diagonal Representation
+Learning for Image Recognition
+Zheng Zhang, Yong Xu, Senior Member, IEEE, Ling Shao, Senior Member, IEEE, Jian Yang, Member, IEEE"
+683fbd7593cf5c22ef54004bb89c469eab2f656e,URJC&UNED at ImageCLEF 2013 Photo Annotation Task,"URJCyUNED at ImageCLEF 2012 Photo
+Annotation task⋆
+Jes´us S´anchez-Oro1, Soto Montalvo1, Antonio S. Montemayor1, Ra´ul Cabido1,
+Juan J. Pantrigo1, Abraham Duarte1, V´ıctor Fresno2, and Raquel Mart´ınez2
+Universidad Rey Juan Carlos, M(cid:19)ostoles, Spain
+Universidad Nacional de Educaci(cid:19)on a Distancia, Madrid, Spain"
+68333b73613c59914bfe1264a440b3cf854dc15c,Mugeetion: Musical Interface Using Facial Gesture and Emotion,"Mugeetion: Musical Interface Using Facial Gesture and Emotion
+Eunjeong Stella Koh
+Music Department
+UC San Diego"
+6864b089c8586b0e3f6bd6736cabea96b1c4a28a,Robust classification for occluded ear via Gabor scale feature-based non-negative sparse representation,"Robust classification for occluded ear via
+Gabor scale feature-based non-negative
+sparse representation
+Baoqing Zhang
+Zhichun Mu
+Chen Li
+Hui Zeng
+Downloaded From: http://opticalengineering.spiedigitallibrary.org/ on 01/02/2016 Terms of Use: http://spiedigitallibrary.org/ss/TermsOfUse.aspx"
+68becbe61cf30ef93b2679866d3a511e919ffb2f,"Motor, emotional, and cognitive empathy in children and adolescents with autism spectrum disorder and conduct disorder.","J Abnorm Child Psychol (2013) 41:425–443
+DOI 10.1007/s10802-012-9689-5
+Motor, Emotional, and Cognitive Empathy in Children
+nd Adolescents with Autism Spectrum Disorder
+nd Conduct Disorder
+Danielle Bons & Egon van den Broek & Floor Scheepers &
+Pierre Herpers & Nanda Rommelse & Jan K. Buitelaaar
+Published online: 25 October 2012
+# Springer Science+Business Media New York 2012"
+688680d9902f688b9ac2d47c399ceebd1014d785,GIS-supported people tracking re-acquisition in a multi-camera environment,"GIS-supported People Tracking Re-Acquisition in a Multi-Camera
+Environment
+Anastasios Dimou1, Vasileios Lovatsis1, Andreas Papadakis2, Stelios Pantelopoulos2 and Petros
+Daras1
+CERTH-ITI, 6th kilometer Harilaou-Thermi, Thessaloniki, Greece
+SingularLogic, Athens, Greece
+Keywords:
+GIS, Re-Identification, Multi-camera."
+685f8df14776457c1c324b0619c39b3872df617b,Face Recognition with Preprocessing and Neural Networks,"Master of Science Thesis in Electrical Engineering
+Department of Electrical Engineering, Linköping University, 2016
+Face Recognition with
+Preprocessing and Neural
+Networks
+David Habrman"
+68484ae8a042904a95a8d284a7f85a4e28e37513,Spoofing Deep Face Recognition with Custom Silicone Masks,"Spoofing Deep Face Recognition with Custom Silicone Masks
+Sushil Bhattacharjee Amir Mohammadi
+S´ebastien Marcel
+Idiap Research Institute. Centre du Parc, Rue Marconi 19, Martigny (VS), Switzerland
+{sushil.bhattacharjee; amir.mohammadi;"
+688754568623f62032820546ae3b9ca458ed0870,Resting high frequency heart rate variability is not associated with the recognition of emotional facial expressions in healthy human adults,"ioRxiv preprint first posted online Sep. 27, 2016;
+http://dx.doi.org/10.1101/077784
+The copyright holder for this preprint (which was not
+peer-reviewed) is the author/funder. It is made available under a
+CC-BY-NC-ND 4.0 International license
+Resting high frequency heart rate variability is not associated with the
+recognition of emotional facial expressions in healthy human adults.
+Brice Beffara1,2,3, Nicolas Vermeulen3,4, Martial Mermillod1,2
+Univ. Grenoble Alpes, LPNC, F-38040, Grenoble, France
+CNRS, LPNC UMR 5105, F-38040, Grenoble, France
+IPSY, Université Catholique de Louvain, Louvain-la-Neuve, Belgium
+Fund for Scientific Research (FRS-FNRS), Brussels, Belgium
+Correspondence concerning this article should be addressed to Brice Beffara, Of‌f‌ice E250, Institut
+de Recherches en Sciences Psychologiques, IPSY - Place du Cardinal Mercier, 10 bte L3.05.01 B-1348
+Louvain-la-Neuve, Belgium. E-mail:
+Author note
+This study explores whether the myelinated vagal connection between the heart and the brain
+is involved in emotion recognition. The Polyvagal theory postulates that the activity of the
+myelinated vagus nerve underlies socio-emotional skills. It has been proposed that the perception
+of emotions could be one of this skills dependent on heart-brain interactions. However, this"
+688ae87c5e40583ecf9ec6d06d4d15a3e62f5556,A New Angle on L2 Regularization,"A New Angle on L2 Regularization
+(interactive version available at https://thomas-tanay.github.io/post--L2-regularization/)
+Thomas Tanay
+Lewis D Grif‌f‌in
+CoMPLEX, UCL
+CoMPLEX, UCL
+Deep neural networks have been shown to be vulnerable to the
+dversarial example phenomenon: all models tested so far can have their
+lassifications dramatically altered by small image perturbations [1, 2].
+The following predictions were for instance made by a state-of-the-art
+network trained to recognize celebrities [3]:"
+68b44eb4c7440046783146064ae9e715a72766dc,An Investigation of Physiological Arousal in Children with Autism and Co-morbid Challenging Behaviour,"An Investigation of Physiological Arousal in Children with
+Autism and Co-morbid Challenging Behaviour
+Sinéad Lydon
+A thesis submitted to Trinity College Dublin, the University of Dublin,
+in partial fulfillment of the requirements for the Degree of Doctor of
+Philosophy (PhD) in Psychology
+Supervisors: Dr. Olive Healy (Trinity College Dublin) and
+Professor Brian Hughes (National University of Ireland, Galway)."
+688f5cb02dc6c779fa9fd18f44b792f9626bdcd0,Visual pattern discovery in image and video data: a brief survey,"Visual Pattern Discovery in Image and Video Data:
+A Brief Survey
+Hongxing Wang, Gangqiang Zhao and Junsong Yuan"
+68f9cb5ee129e2b9477faf01181cd7e3099d1824,ALDA Algorithms for Online Feature Extraction,"ALDA Algorithms for Online Feature Extraction
+Youness Aliyari Ghassabeh, Hamid Abrishami Moghaddam"
+68b01afed57ed7130d993dffc03dcbfa36d4e038,Adversarial Learning with Local Coordinate Coding,"Adversarial Learning with Local Coordinate Coding
+Jiezhang Cao * 1 Yong Guo * 1 Qingyao Wu * 1 Chunhua Shen 2 Junzhou Huang 3 Mingkui Tan 1"
+687ef116d7115498f12dff1b3338d959f164ef6b,Using Thought-Provoking Children's Questions to Drive Artificial Intelligence Research,"Using Thought-Provoking Children’s Questions
+to Drive Artificial Intelligence Research
+Erik T. Mueller and Henry Minsky
+Minsky Institute for Artificial Intelligence
+http://minskyinstitute.org/
+September 14, 2015 00:09"
+68ba19afe924699b4a0c84af91c05deb5b03e3bd,Do Characteristics of Faces That Convey Trustworthiness and Dominance Underlie Perceptions of Criminality?,"Do Characteristics of Faces That Convey Trustworthiness
+nd Dominance Underlie Perceptions of Criminality?
+Heather D. Flowe*
+College of Medicine, Biological Sciences and Psychology, University of Leicester, Leicester, United Kingdom"
+68415682aa3e25178c9504866f64cf4b2a32273e,Capturing Complex 3D Human Motions with Kernelized Low-Rank Representation from Monocular RGB Camera,"Article
+Capturing Complex 3D Human Motions with
+Kernelized Low-Rank Representation from
+Monocular RGB Camera
+Xuan Wang 1,2,3,4, Fei Wang 1,2,3,4,* and Yanan Chen 1,2,3,4
+The Institute of Artificial Intelligence and Robotics, Xi’an Jiaotong University, No.28 Xianning West Road,
+Xi’an 710048, China; (X.W.); (Y.C.)
+The School of Software Engineering, Xi’an Jiaotong University, No.28 Xianning West Road,
+Xi’an 710048, China
+National Engineering Laboratory for Visual Information Processing and Application, Xi’an Jiaotong
+University, No.28 Xianning West Road, Xi’an 710048, China
+Shaanxi Digital Technology and Intelligent System Key Laboratory, Xi’an Jiaotong University, No.28
+Xianning West Road, Xi’an 710048, China
+* Correspondence:
+Received: 5 July 2017; Accepted: 24 August 2017; Published: 3 September 2017"
+68d40176e878ebffbc01ffb0556e8cb2756dd9e9,Locality Repulsion Projection and Minutia Extraction Based Similarity Measure for Face Recognition,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Humming Bird ( 01st March 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+Locality Repulsion Projection and Minutia Extraction Based
+Similarity Measure for Face Recognition
+Agnel AnushyaP.1,RamyaP.2
+AgnelAnushya P. is currently pursuing M.E (Computer Science and engineering) at Vins Christian college of
+Ramya P. is currently working as an Asst. Professor in the dept. of Information Technology at Vins Christian
+Engineering.
+ollege of Engineering."
+68c279d4fcc02710056e73a3b0d0d564a7615cad,Unified framework for fast exact and approximate search in dissimilarity spaces,"Unified Framework for Fast Exact and
+Approximate Search in Dissimilarity Spaces
+TOM´AˇS SKOPAL
+Charles University in Prague
+In multimedia systems we usually need to retrieve DB objects based on their similarity to a query
+object, while the similarity assessment is provided by a measure which defines a (dis)similarity
+score for every pair of DB objects. In most existing applications, the similarity measure is required
+to be a metric, where the triangle inequality is utilized to speedup the search for relevant objects
+y use of metric access methods (MAMs), e.g. the M-tree. A recent research has shown, however,
+that non-metric measures are more appropriate for similarity modeling due to their robustness and
+ease to model a made-to-measure similarity. Unfortunately, due to the lack of triangle inequality,
+the non-metric measures cannot be directly utilized by MAMs. From another point of view, some
+sophisticated similarity measures could be available in a black-box non-analytic form (e.g. as an
+lgorithm or even a hardware device), where no information about their topological properties is
+provided, so we have to consider them as non-metric measures as well. From yet another point
+of view, the concept of similarity measuring itself is inherently imprecise and we often prefer fast
+ut approximate retrieval over an exact but slower one.
+To date, the mentioned aspects of similarity retrieval have been solved separately, i.e. exact
+vs. approximate search or metric vs. non-metric search. In this paper we introduce a similarity
+retrieval framework which incorporates both of the aspects into a single unified model. Based on"
+6889d649c6bbd9c0042fadec6c813f8e894ac6cc,Analysis of Robust Soft Learning Vector Quantization and an application to Facial Expression Recognition,"Analysis of Robust Soft Learning Vector
+Quantization and an application to Facial
+Expression Recognition"
+680402e42c874c14a32146865d985588985744a4,Detection and Tracking of Multiple Humans in High-density Crowds,"DETECTION AND TRACKING OF MULTIPLE HUMANS IN
+HIGH-DENSITY CROWDS
+Irshad Ali
+A research study submitted in partial fulfillment of the requirements for the
+degree of Master of Engineering in
+Computer Science
+Examination Committee: Dr. Matthew N. Dailey (Chairperson)
+Dr. Manukid Parnichkun (Member)
+Dr. Nitin V. Afzulpurkar (Member)
+Nationality: Pakistani
+Previous Degree: Bachelor of Science in Computer Engineering
+Samara State Technical University, Russia
+Scholarship Donor: Higher Education Commission (HEC), Pakistan - AIT
+Fellowship
+Asian Institute of Technology
+School of Engineering and Technology
+Thailand
+May 2009"
+68c17aa1ecbff0787709be74d1d98d9efd78f410,Gender Classification from Face Images Using Mutual Information and Feature Fusion,"International Journal of Optomechatronics, 6: 92–119, 2012
+Copyright # Taylor & Francis Group, LLC
+ISSN: 1559-9612 print=1559-9620 online
+DOI: 10.1080/15599612.2012.663463
+GENDER CLASSIFICATION FROM FACE IMAGES
+USING MUTUAL INFORMATION AND FEATURE
+FUSION
+Claudio Perez, Juan Tapia, Pablo Este´vez, and Claudio Held
+Department of Electrical Engineering and Advanced Mining Technology
+Center, Universidad de Chile, Santiago, Chile
+In this article we report a new method for gender classification from frontal face images
+using feature selection based on mutual information and fusion of features extracted from
+intensity, shape, texture, and from three different spatial scales. We compare the results of
+three different mutual information measures: minimum redundancy and maximal relevance
+(mRMR), normalized mutual information feature selection (NMIFS), and conditional
+mutual information feature selection (CMIFS). We also show that by fusing features
+extracted from six different methods we significantly improve the gender classification
+results relative to those previously published, yielding 99.13% of the gender classification
+rate on the FERET database.
+Keywords: Feature fusion, feature selection, gender classification, mutual information, real-time gender"
+68f61154a0080c4aae9322110c8827978f01ac2e,"Recognizing blurred , non-frontal , illumination and expression variant partially occluded faces","Research Article
+Journal of the Optical Society of America A
+Recognizing blurred, non-frontal, illumination and
+expression variant partially occluded faces
+ABHIJITH PUNNAPPURATH1* AND AMBASAMUDRAM NARAYANAN RAJAGOPALAN1
+Department of Electrical Engineering, Indian Institute of Technology Madras, Chennai 600036, India.
+*Corresponding author:
+Compiled June 26, 2016
+The focus of this paper is on the problem of recognizing faces across space-varying motion blur, changes
+in pose, illumination, and expression, as well as partial occlusion, when only a single image per subject
+is available in the gallery. We show how the blur incurred due to relative motion between the camera and
+the subject during exposure can be estimated from the alpha matte of pixels that straddle the boundary
+etween the face and the background. We also devise a strategy to automatically generate the trimap re-
+quired for matte estimation. Having computed the motion via the matte of the probe, we account for pose
+variations by synthesizing from the intensity image of the frontal gallery, a face image that matches the
+pose of the probe. To handle illumination and expression variations, and partial occlusion, we model the
+probe as a linear combination of nine blurred illumination basis images in the synthesized non-frontal
+pose, plus a sparse occlusion. We also advocate a recognition metric that capitalizes on the sparsity of the
+occluded pixels. The performance of our method is extensively validated on synthetic as well as real face
+data. © 2016 Optical Society of America"
+6844a700aee36bd809d1188f6f9e81707c513f19,Interactive model-based reconstruction of the human head using an RGB-D sensor,"Interactive Model-based Reconstruction of the
+Human Head using an RGB-D Sensor
+M. Zollh¨ofer, J. Thies, M. Colaianni, M. Stamminger, G. Greiner
+Computer Graphics Group, University Erlangen-Nuremberg, Germany"
+682f735ef796370f510218eb7afb4d2a36cd1256,On Offline Evaluation of Vision-Based Driving Models,
+6888f3402039a36028d0a7e2c3df6db94f5cb9bb,Classifier-to-generator Attack: Estimation,"Under review as a conference paper at ICLR 2018
+CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+Anonymous authors
+Paper under double-blind review"
+68b6ec13d06facacf5637f90828ab5b6e352be60,Neural Proximal Gradient Descent for Compressive Imaging,"Neural Proximal Gradient Descent for Compressive
+Imaging
+Morteza Mardani1,2, Qingyun Sun4, Shreyas Vasawanala2, Vardan Papyan3,
+Hatef Monajemi3, John Pauly1, and David Donoho3
+Depts. of Electrical Eng., Radiology, Statistics, and Mathematics; Stanford University"
+6898b0934d2bc34acc61a3c63fbb20337d7b9a95,Learning Styles and Emotion Recognition in a Fuzzy Expert System,"Learning Styles and Emotion Recognition in a Fuzzy
+Expert System
+Ramón Zatarain-Cabada, M. Lucía Barrón-Estrada, Rosalío Zatarain-Cabada
+Instituto Tecnológico de Culiacán, Juan de Dios Bátiz s/n, Col. Guadalupe, Culiacán Sinaloa,
+80220, Mexico
+{rzatarain,"
+5782d17ad87262739d69dcbe76cadfa881179a91,Data Analysis Project: What Makes Paris Look like Paris?,"Data Analysis Project: What Makes Paris Look like
+Paris?
+Machine Learning Department
+Carnegie-Mellon University
+Pittsburgh, PA 15213
+Carl Doersch⇤"
+57235f22abcd6bb928007287b17e235dbef83347,Exemplar Guided Unsupervised Image-to-Image Translation with Semantic Consistency,"EXEMPLAR GUIDED UNSUPERVISED
+IMAGE-TO-
+IMAGE TRANSLATION WITH SEMANTIC CONSISTENCY
+Liqian Ma1 Xu Jia2
+KU-Leuven/PSI, TRACE (Toyota Res in Europe)
+{liqian.ma, xu.jia, tinne.tuytelaars,
+{georgous,
+Stamatios Georgoulis1,3 Tinne Tuytelaars2 Luc Van Gool1,3
+KU-Leuven/PSI, IMEC 3ETH Zurich"
+57165586f65f25edd9d14f0173c4c35dab8c2e66,Aligning plot synopses to videos for story-based retrieval,"Noname manuscript No.
+(will be inserted by the editor)
+Aligning Plot Synopses to Videos for Story-based Retrieval
+Makarand Tapaswi · Martin B¨auml · Rainer Stiefelhagen
+Received: date / Accepted: date"
+572785b5d6f6fa4b174d79725f82c056b0fb4565,"Computer Vision for Autonomous Vehicles: Problems, Datasets and State-of-the-Art","Computer Vision for Autonomous Vehicles:
+Problems, Datasets and State-of-the-Art
+Joel Janaia,∗, Fatma G¨uneya,∗, Aseem Behla,∗, Andreas Geigera,b
+Autonomous Vision Group, Max Planck Institute for Intelligent Systems, Spemannstr. 41, D-72076 T¨ubingen, Germany
+Computer Vision and Geometry Group, ETH Z¨urich, Universit¨atstrasse 6, CH-8092 Z¨urich, Switzerland"
+576372383bfd6ce6944d885e60b19151efdffc99,Can we unify monocular detectors for autonomous driving by using the pixel-wise semantic segmentation of CNNs?,"Can we unify monocular detectors for autonomous driving
+y using the pixel-wise semantic segmentation of CNNs?
+Eduardo Romera, Luis M. Bergasa, Roberto Arroyo"
+57fd229097e4822292d19329a17ceb013b2cb648,Fast Structural Binary Coding,"Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence (IJCAI-16)
+Fast Structural Binary Coding
+⇤Department of Electrical and Computer Engineering,University of California, San Diego
+Dongjin Song⇤, Wei Liu], and David A. Meyer†
+La Jolla, USA, 92093-0409. Email:
+] Didi Research, Didi Kuaidi, Beijing, China. Email:
+Department of Mathematics,University of California, San Diego
+La Jolla, USA, 92093-0112. Email:"
+57c59011614c43f51a509e10717e47505c776389,Unsupervised Human Action Detection by Action Matching,"Unsupervised Human Action Detection by Action Matching
+Basura Fernando∗ Sareh Shirazi† Stephen Gould∗
+The Australian National University †Queensland University of Technology"
+5725c06b406b5291915a6bef8b5c3d20b2873aa0,Face Recognition Using Principal Component Analysis Based Feature Space by Incorporating with Probabilistic Neural Network,"International Journal of Computer Science Trends and Technology (IJCST) – Volume 4 Issue 2, Mar - Apr 2016
+RESEARCH ARTICLE
+OPEN ACCESS
+Face Recognition Using Principal Component Analysis
+Based Feature Space By Incorporating With Probabilistic
+Muhammad Tahir, Shahid Akbar, Shahzad, Maqsood Hayat, Nazia Azim
+Neural Network
+Department of Computer Science
+Abdul Wali Khan University
+Mardan - Pakistan"
+5740a5f9cbfe790afc0ba9a425cfb71197927470,Supplementary Material for Superpixel Sampling Networks,"Supplementary Material for
+Superpixel Sampling Networks
+Varun Jampani1, Deqing Sun1, Ming-Yu Liu1,
+Ming-Hsuan Yang1,2, Jan Kautz1
+NVIDIA
+UC Merced
+In Section 1, we formally define the Acheivable Segmentation Accuracy (ASA)
+used for evaluating superpixels. Then, in Section 2, we report F-measure and
+Compactness scores with more visual results on different datasets. We also in-
+lude a supplementary video1 that gives an overview of Superpixel Sampling
+Networks (SSN) with a glimpse of experimental results.
+Evaluation Metrics
+Here, we formally define the Achievable Segmentation Accuracy (ASA) met-
+ric that is used in the main paper. Given an image I with n pixels, let H ∈
+{0, 1,··· , m}n×1 denotes the superpixel segmentation with m superpixels. H is
+j=1 H j, where jth segment is repre-
+sented as H j. Similarly, let G ∈ {0, 1,··· , w}n×1 denotes ground-truth (GT)
+l=1 Gl, where Gl denotes lth GT segment.
+ASA Score. The ASA score between a given superpixel segmentation H and
+the GT segmentation G is defined as"
+573c11e7e00389a033787984223ced536e15c904,Pictorial structures revisited: People detection and articulated pose estimation,"Pictorial Structures Revisited: People Detection and Articulated Pose Estimation
+Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele
+Department of Computer Science, TU Darmstadt"
+5720784b7e45693109b867992e3f93e4c747e536,Sparse Methods for Robust and Efficient Visual Recognition,
+57f8e1f461ab25614f5fe51a83601710142f8e88,Region Selection for Robust Face Verification using UMACE Filters,"Region Selection for Robust Face Verification using UMACE Filters
+Salina Abdul Samad*, Dzati Athiar Ramli, Aini Hussain
+Department of Electrical, Electronic and Systems Engineering, Faculty of Engineering,
+Universiti Kebangsaan Malaysia, 43600 Bangi, Selangor, Malaysia.
+In this paper, we investigate the verification performances of four subdivided face images with varying expressions. The
+objective of this study is to evaluate which part of the face image is more tolerant to facial expression and still retains its personal
+haracteristics due to the variations of the image. The Unconstrained Minimum Average Correlation Energy (UMACE) filter is
+implemented to perform the verification process because of its advantages such as shift–invariance, ability to trade-off between
+discrimination and distortion tolerance, e.g. variations in pose, illumination and facial expression. The database obtained from the
+facial expression database of Advanced Multimedia Processing (AMP) Lab at CMU is used in this study. Four equal
+sizes of face regions i.e. bottom, top, left and right halves are used for the purpose of this study. The results show that the bottom
+half of the face region gives the best performance in terms of the PSR values with zero false accepted rate (FAR) and zero false
+rejection rate (FRR) compared to the other three regions.
+. Introduction
+Face recognition is a well established field of research,
+nd a large number of algorithms have been proposed in the
+literature. Various classifiers have been explored to improve
+the accuracy of face classification. The basic approach is to
+use distance-base methods which measure Euclidean distance
+etween any two vectors and then compare it with the preset"
+57a1466c5985fe7594a91d46588d969007210581,A taxonomy of face-models for system evaluation,"A Taxonomy of Face-models for System Evaluation
+Vijay N. Iyer, Shane. R. Kirkbride, Brian C. Parks, Walter J. Scheirer and Terrance. E. Boult
+Motivation and Data Types
+Synthetic Data Types
+Unverified – Have no underlying physical or
+statistical basis
+Physics -Based – Based on structure and
+materials combined with the properties
+formally modeled in physics.
+Statistical – Use statistics from real
+data/experiments to estimate/learn model
+parameters. Generally have measurements
+of accuracy
+Guided Synthetic – Individual models based
+on individual people. No attempt to capture
+properties of large groups, a unique model
+per person. For faces, guided models are
+omposed of 3D structure models and skin
+textures, capturing many artifacts not
+easily parameterized. Can be combined with"
+57246142814d7010d3592e3a39a1ed819dd01f3b,Verification of Very Low-Resolution Faces Using An Identity-Preserving Deep Face Super-resolution Network,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Verification of Very Low-Resolution Faces Using An
+Identity-Preserving Deep Face Super-resolution Network
+Ataer-Cansizoglu, E.; Jones, M.J.; Zhang, Z.; Sullivan, A.
+TR2018-116 August 24, 2018"
+57680f0d53392178bb3c431e03bcd8626c12f620,Semantic Image Segmentation,"Workshop track - ICLR 2017
+ADVERSARIAL EXAMPLES FOR
+SEMANTIC IMAGE SEGMENTATION
+Volker Fischer1, Mummadi Chaithanya Kumar2, Jan Hendrik Metzen1 & Thomas Brox2
+Bosch Center for Artificial Intelligence, Robert Bosch GmbH
+University of Freiburg
+{volker.fischer,"
+57ff1222a78a230c46fc81f22daa57981b0fa306,Face recognition in multi-camera surveillance videos using Dynamic Bayesian Network,"Face Recognition
+in Multi-Camera
+Surveillance
+Videos using Dynamic Bayesian Network
+Center for Research
+Le An, Mehran Kafai, Bir
+Bhanu
+in Intelligent
+Systems,
+University
+of California,
+Riverside
+.edu, mkafai bhanu"
+57e9b0d3ab6295e914d5a30cfaa3b2c81189abc1,Self-Learning Scene-Specific Pedestrian Detectors Using a Progressive Latent Model,"Self-learning Scene-specific Pedestrian Detectors
+using a Progressive Latent Model
+Qixiang Ye1,4, Tianliang Zhang 1, Qiang Qiu4, Baochang Zhang2, Jie Chen3, and Guillermo Sapiro4
+EECE, University of Chinese Academy of Sciences.
+ASEE, Beihang University. 3CMV, Oulu University. 4ECE, Duke University."
+57b55a7a1adc8ec06285ebaf93995d67cf80c719,External Data Overcomplete Dictionary Similarity Graph ≈ + Probeimage Gallery Compressed Dictionary With Coefficient Design Phase : Operational Phase : CD Compressed Dictionary,
+574705812f7c0e776ad5006ae5e61d9b071eebdb,A Novel Approach for Face Recognition Using PCA and Artificial Neural Network,"Karthik G et al, International Journal of Computer Science and Mobile Computing, Vol.3 Issue.5, May- 2014, pg. 780-787
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 3, Issue. 5, May 2014, pg.780 – 787
+RESEARCH ARTICLE
+A Novel Approach for Face Recognition
+Using PCA and Artificial Neural Network
+Karthik G1, Sateesh Kumar H C2
+¹Deptartment of Telecommunication Engg., Dayananda Sagar College of Engg., India
+²Department of Telecommunication Engg., Dayananda Sagar College of Engg., India
+email : 2 email :"
+57e8e226e605fe6491111c5dc9461527c5fce56c,Articulated Object Detection,"Articulated Object Detection
+Maciej Halber
+MEng Computer Science
+Submission Date: 26th April 2013
+Supervisors
+Niloy J. Mitra
+Simon Julier
+This report is submitted as part requirement for the MEng Degree in Computer
+Science at UCL. It is substantially the result of my own work except where ex-
+plicitly indicated in the text. The report may be freely copied and distributed
+provided the source is explicitly acknowledged."
+5712cfc11c561c453da6a31d515f4340dacc91a4,3D Facial Expression Reconstruction using Cascaded Regression,"SUBMITTED TO PATTERN RECOGNITION LETTERS
+Cascaded Regression using Landmark
+Displacement for 3D Face Reconstruction
+Fanzi Wu, Songnan Li, Tianhao Zhao, and King Ngi Ngan,Lv Sheng"
+571b83f7fc01163383e6ca6a9791aea79cafa7dd,SeqFace: Make full use of sequence information for face recognition,"SeqFace: Make full use of sequence information for face recognition
+Wei Hu1 ∗
+Yangyu Huang2
+Guodong Yuan2
+Fan Zhang1
+Ruirui Li1
+Wei Li1
+College of Information Science and Technology,
+Beijing University of Chemical Technology, China
+YUNSHITU Corp., China"
+5700291077b509b11fb227f84ee9fc2de8f2df99,Line search and trust region strategies for canonical decomposition of semi-nonnegative semi-symmetric 3rd order tensors,"Line search and trust region strategies for canonical
+decomposition of semi-nonnegative semi-symmetric 3rd
+Julie Coloigner, Ahmad Karfoul, Laurent Albera, Pierre Comon
+order tensors
+To cite this version:
+Julie Coloigner, Ahmad Karfoul, Laurent Albera, Pierre Comon. Line search and trust region
+strategies for canonical decomposition of semi-nonnegative semi-symmetric 3rd order tensors.
+Linear Algebra and Applications, Elsevier - Academic Press, 2014, 450, pp.334-374.
+HAL Id: hal-00945606
+https://hal.archives-ouvertes.fr/hal-00945606
+Submitted on 12 Feb 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+57a14a65e8ae15176c9afae874854e8b0f23dca7,Seeing Mixed Emotions: The Specificity of Emotion Perception From Static and Dynamic Facial Expressions Across Cultures,"UvA-DARE (Digital Academic Repository)
+Seeing mixed emotions: The specificity of emotion perception from static and dynamic
+facial expressions across cultures
+Fang, X.; Sauter, D.A.; van Kleef, G.A.
+Published in:
+Journal of Cross-Cultural Psychology
+0.1177/0022022117736270
+Link to publication
+Citation for published version (APA):
+Fang, X., Sauter, D. A., & van Kleef, G. A. (2018). Seeing mixed emotions: The specificity of emotion perception
+from static and dynamic facial expressions across cultures. Journal of Cross-Cultural Psychology, 49(1), 130-
+48. DOI: 10.1177/0022022117736270
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible."
+573b687ad970e1931debbf366004c0983de28718,A Corpus for Investigating the Multimodal Nature of Multi-Speaker Spontaneous Conversations – EVA Corpus,"A Corpus for Investigating the Multimodal Nature of Multi-Speaker
+Spontaneous Conversations – EVA Corpus
+IZIDOR MLAKAR, ZDRAVKO KAČIČ, MATEJ ROJC
+Faculty of Electrical Engineering and Computer Science, University of Maribor
+SLOVENIA"
+57126589b3fe62c35a36a2646dac3045d095ecf5,Adversarial Defense based on Structure-to-Signal Autoencoders,"Adversarial Defense based on
+Structure-to-Signal Autoencoders
+Joachim Folz(cid:63), Sebastian Palacio(cid:63), Joern Hees, Damian Borth, and Andreas
+Dengel
+German Research Center for Artificial Intelligence (DFKI)
+TU Kaiserslautern"
+57fd8bafa4526b9a56fe43fac22dd62b2ab94563,Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering,"Under review as a conference paper at ICLR 2018
+BEYOND SHARED HIERARCHIES: DEEP MULTITASK
+LEARNING THROUGH SOFT LAYER ORDERING
+Anonymous authors
+Paper under double-blind review"
+57d37ad025b5796457eee7392d2038910988655a,Aeaeêêìáîî Áåèääååaeììáçae Çç Àááêêêàáááä Aeçîîäìì Ììììçê,"GEERATVEEETATF
+
+DagaEha
+UdeheS eviif
+f.DahaWeiha
+ATheiS biediaia
+Re ieefheDegeef
+aefSciece
+TheSch
+
+Decebe2009"
+57e562cf99b3dfbb6baa5bbf665aa6fd97ffe8ca,Expression-Compensated 3D Face Recognition with Geodesically Aligned Bilinear Models,"Expression-Compensated 3D Face Recognition with Geodesically
+Aligned Bilinear Models
+Iordanis Mpiperis1,2,Sotiris Malassiotis1 and Michael G. Strintzis1,2"
+3b319645bfdc67da7d02db766e17a3e0a37be47b,On the relationship between visual attributes and convolutional networks,"On the Relationship between Visual Attributes and Convolutional Networks
+Victor Escorcia1,2, Juan Carlos Niebles2, Bernard Ghanem1
+King Abdullah University of Science and Technology (KAUST), Saudi Arabia. 2Universidad del Norte, Colombia.
+The seminal work of Krizhevsky et al. [3] that trained a large convo-
+lutional network (conv-net) for image-level object recognition on the Ima-
+geNet challenge is considered a major stepping stone for subsequent work in
+onv-net based visual recognition. Such a network is able to automatically
+learn a hierarchy of nonlinear features that richly describe image content as
+well as discriminate between object classes. Recent work [4] has shown that
+features extracted from a conv-net trained on ImageNet are general purpose
+(or black-box) enough to achieve state-of-the-art results in various other
+recognition tasks, including scene, fine-grained, and even action recogni-
+tion. However, unlike hand-crafted features, those learned by a conv-net
+re usually not visually intuitive and straightforward to interpret. Despite
+their excellent recognition performance, understanding and interpreting the
+inner workings of conv-nets remains mostly elusive to the community. It
+is this lack of deep understanding that is currently motivating researchers
+to look under the hood and comprehend how and why these deep networks
+work so well in practice. Inspired by recent observations on the analysis of
+onv-nets [1], this paper takes another step in a similar direction, namely"
+3b1aaac41fc7847dd8a6a66d29d8881f75c91ad5,Sparse Representation-Based Open Set Recognition,"Sparse Representation-based Open Set Recognition
+He Zhang, Student Member, IEEE and Vishal M. Patel, Senior Member, IEEE"
+3b311a1ce30f9c0f3dc1d9c0cf25f13127a5e48c,A Coarse-to-fine Pyramidal Model for Person Re-identification via Multi-Loss Dynamic Training,"A Coarse-to-fine Pyramidal Model for Person Re-identification via Multi-Loss
+Dynamic Training
+Feng Zheng, Xing Sun, Xinyang Jiang, Xiaowei Guo, Zongqiao Yu, Feiyue Huang
+{winfredsun, sevjiang, scorpioguo, quentinyu,
+YouTu Lab, Tencent
+Shanghai, China"
+3b4177556f1c9f5a8f8e1b2e8d824dee20e388e4,Spatial Weighting for Bag-of-Features,"Spatial Weighting for Bag-of-Features
+Marcin Marsza(cid:7)ek
+Cordelia Schmid
+INRIA Rh(cid:136)one-Alpes, LEAR - GRAVIR
+665 av de l’Europe, 38330 Montbonnot, France"
+3ba3ef6d8394055d43bf4fe62227fbae8ab9b195,Finding images of difficult entities in the long tail,"Finding Images of Difficult Entities in the Long Tail
+Bilyana Taneva
+Max-Planck Institute for
+Informatics
+Saarbrücken, Germany
+Mouna Kacimi
+Free University of
+Bozen-Bolzano
+Italy
+Gerhard Weikum
+Max-Planck Institute for
+Informatics
+Saarbrücken, Germany"
+3bc776eb1f4e2776f98189e17f0d5a78bb755ef4,View Synthesis from Image and Video for Object Recognition Applications,
+3bfa75238e15e869b902ceb62b31ffddbe8ccb0d,Describing Images using Inferred Visual Dependency Representations,"Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics
+nd the 7th International Joint Conference on Natural Language Processing, pages 42–52,
+Beijing, China, July 26-31, 2015. c(cid:13)2015 Association for Computational Linguistics"
+3b14bdb0b1a7353d94973ef4c1578e1bd4a4e35e,Three dimensional binary edge feature representation for pain expression analysis,"Three Dimensional Binary Edge
+Feature Representation for Pain
+Expression Analysis
+Xing Zhang1, Lijun Yin1, Jeffrey F. Cohn2
+State University of New York at Binghamton; 2University of Pittsburgh"
+3beb94f61b5909fca8917b0475983ea2c66f1df2,Shape model fitting algorithm without point correspondence,"0th European Signal Processing Conference (EUSIPCO 2012)
+© EURASIP, 2012 - ISSN 2076-1465
+. INTRODUCTION"
+3b1b94441010615195a5c404409ce2416860508c,Image Captioning and Visual Question Answering Based on Attributes and External Knowledge,"MANUSCRIPT, 2016
+Image Captioning and Visual Question
+Answering Based on Attributes
+nd External Knowledge
+Qi Wu, Chunhua Shen, Peng Wang, Anthony Dick, Anton van den Hengel"
+3b304585d5af0afe98a85d6e0559315fbf3a7807,An Improved Labelling for the INRIA Person Data Set for Pedestrian Detection,"An Improved Labelling for the INRIA Person
+Data Set for Pedestrian Detection
+Matteo Taiana, Jacinto Nascimento, and Alexandre Bernardino(cid:63)
+Institute for Systems and Robotics, IST, Lisboa, Portugal,
+WWW home page: http://users.isr.ist.utl.pt/~mtaiana"
+3b15a48ffe3c6b3f2518a7c395280a11a5f58ab0,On knowledge transfer in object class recognition,"On Knowledge Transfer in
+Object Class Recognition
+A dissertation approved by
+TECHNISCHE UNIVERSITÄT DARMSTADT
+Fachbereich Informatik
+for the degree of
+Doktor-Ingenieur (Dr.-Ing.)
+presented by
+MICHAEL STARK
+Dipl.-Inform.
+orn in Mainz, Germany
+Prof. Dr.-Ing. Michael Goesele, examiner
+Prof. Martial Hebert, Ph.D., co-examiner
+Prof. Dr. Bernt Schiele, co-examiner
+Date of Submission: 12th of August, 2010
+Date of Defense: 23rd of September, 2010
+Darmstadt, 2010"
+3bbdfa097a4c39012cb322b23051e360c2f7f023,Learning Race from Face: A Survey,"Learning Race from Face: A Survey
+Siyao Fu, Member, IEEE, Haibo He, Senior Member, IEEE, and Zeng-Guang Hou, Senior Member, IEEE"
+3baa3d5325f00c7edc1f1427fcd5bdc6a420a63f,Enhancing convolutional neural networks for face recognition with occlusion maps and batch triplet loss,"Enhancing Convolutional Neural Networks for Face Recognition with
+Occlusion Maps and Batch Triplet Loss
+Daniel S´aez Triguerosa,b, Li Menga,∗, Margaret Hartnettb
+School of Engineering and Technology, University of Hertfordshire, Hatfield AL10 9AB, UK
+IDscan Biometrics (a GBG company), London E14 9QD, UK"
+3b1ba9818e2ee6a54e7ec033c5b2ec8bdbe2935f,Social Signaling Descriptor for Group Behaviour Analysis,"Social Signaling Descriptor for Group
+Behaviour Analysis
+Eduardo M. Pereira1,2(B), Lucian Ciobanu1, and Jaime S. Cardoso1,2
+Faculty of Engineering of the University of Porto, Rua Dr. Roberto Frias, 378,
+INESC TEC, Porto, Portugal
+200 - 465 Porto, Portugal"
+3b996a2e641be7bd395620d30364a27d1558cbad,Tracking Related Multiple Targets in Videos,"Tracking Related Multiple Targets
+in Videos
+DISSERTATION
+zur Erlangung des akademischen Grades
+Doktor/in der technischen Wissenschaften
+eingereicht von
+Nicole M. Artner
+Matrikelnummer 0727746
+n der
+Fakultät für Informatik der Technischen Universität Wien
+Betreuung: O.Univ.Prof. Dipl.Ing. Dr.techn. Walter G. Kropatsch
+Diese Dissertation haben begutachtet:
+(O.Univ.Prof. Dipl.Ing. Dr.techn.
+(Prof. Em. Dr. Horst Bunke)
+Walter G. Kropatsch)
+Wien, 10.10.2013
+(Nicole M. Artner)
+A-1040 Wien (cid:2) Karlsplatz 13 (cid:2) Tel. +43-1-58801-0 (cid:2) www.tuwien.ac.at
+Technische Universität Wien"
+3b6310052026fc641d3fa639647342c45d8f5bd5,Eye Contact Modulates Cognitive Processing Differently in Children With Autism,"Child Development, xxxx 2014, Volume 00, Number 0, Pages 1–11
+Eye Contact Modulates Cognitive Processing Differently in
+Children With Autism
+Terje Falck-Ytter
+Karolinska Institutet and Uppsala University
+Christoffer Carlstr€om and Martin Johansson
+Uppsala University
+In humans, effortful cognitive processing frequently takes place during social interaction, with eye contact
+eing an important component. This study shows that the effect of eye contact on memory for nonsocial infor-
+mation is different in children with typical development than in children with autism, a disorder of social
+ommunication. Direct gaze facilitated memory performance in children with typical development (n = 25,
+6 years old), but no such facilitation was seen in the clinical group (n = 10, 6 years old). Eye tracking con-
+ducted during the cognitive test revealed strikingly similar patterns of eye movements, indicating that the
+results cannot be explained by differences in overt attention. Collectively, these findings have theoretical sig-
+nificance and practical implications for testing practices in children.
+Being looked at is a strong signal, indicating that
+the other person is attending to you and processing
+information about you. In many nonhuman species,
+direct gaze functions as an aversive stimulus, likely
+ecause of the threat value associated with eye con-"
+3b92916dd9d772cf1d167461a548115013a954a8,Unsupervised Framework for Interactions Modeling between Multiple Objects,
+3ba8f8b6bfb36465018430ffaef10d2caf3cfa7e,Local Directional Number Pattern for Face Analysis: Face and Expression Recognition,"Local Directional Number Pattern for Face
+Analysis: Face and Expression Recognition
+Adin Ramirez Rivera, Student Member, IEEE, Jorge Rojas Castillo, Student Member, IEEE,
+nd Oksam Chae, Member, IEEE"
+3b38dc6d4f676ace52672f6788b66c9abb10d702,Ph.D. Showcase: Measuring Terrain Distances Through Extracted Channel Networks,"Ph.D. Showcase: Measuring Terrain Distances Through
+Extracted Channel Networks
+PhD Student:
+Christopher Stuetzle
+Dept. Computer Science
+PhD Superviser:
+W. Randolph Franklin
+Dept. Electrical Engineering
+PhD Superviser:
+Barbara Cutler
+Dept. Computer Science
+Mehrad Kamalzare
+Dept. Civil Engineering
+Zhongxian Chen
+Dept. Computer Science
+Thomas Zimmie
+Dept. Civil Engineering"
+3b9ee03255eb5a0040676eead1767db431e83562,Conference on Computer Vision and Pattern Recognition,"013 IEEE Conference on Computer Vision and Pattern Recognition
+013 IEEE Conference on Computer Vision and Pattern Recognition
+013 IEEE Conference on Computer Vision and Pattern Recognition
+063-6919/13 $26.00 © 2013 IEEE
+063-6919/13 $26.00 © 2013 IEEE
+063-6919/13 $26.00 © 2013 IEEE
+DOI 10.1109/CVPR.2013.236
+DOI 10.1109/CVPR.2013.236
+DOI 10.1109/CVPR.2013.236"
+3b9d94752f8488106b2c007e11c193f35d941e92,"Appearance, Visual and Social Ensembles for Face Recognition in Personal Photo Collections","#2052
+CVPR 2013 Submission #2052. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#2052
+Appearance, Visual and Social Ensembles for
+Face Recognition in Personal Photo Collections
+Anonymous CVPR submission
+Paper ID 2052"
+3bd63bea64c770df5049879f4398e65f958ebd23,Predicting an Object Location Using a Global Image Representation,"Predicting an Object Location using a Global Image Representation
+Jose A. Rodriguez-Serrano and Diane Larlus
+Computer Vision Group, Xerox Research Centre Europe"
+3b47e618c5ceb1c16db7f709dd1cfe53d7417b54,Discrimination on the Grassmann Manifold: Fundamental Limits of Subspace Classifiers,"Discrimination on the Grassmann Manifold:
+Fundamental Limits of Subspace Classifiers
+Matthew Nokleby, Member, IEEE, Miguel Rodrigues, Member, IEEE, and Robert Calderbank, Fellow,"
+3b557c4fd6775afc80c2cf7c8b16edde125b270e,Face recognition: Perspectives from the real world,"Face Recognition: Perspectives from the
+Real-World
+Bappaditya Mandal
+Institute for Infocomm Research, A*STAR,
+Fusionopolis Way, #21-01 Connexis (South Tower), Singapore 138632.
+Phone: +65 6408 2071; Fax: +65 6776 1378;
+E-mail:"
+3b2f78a4edf5da876e52513d0e3960da7d3a253f,Qualitative Evaluation of Detection and Tracking Performance,"Qualitative Evaluation of Detection and Tracking
+Performance
+Swaminathan Sankaranarayanan, Francois Bremond, David Tax
+To cite this version:
+Swaminathan Sankaranarayanan, Francois Bremond, David Tax. Qualitative Evaluation of Detection
+nd Tracking Performance. 9th IEEE International Conference On Advanced Video and Signal Based
+Surveillance (AVSS 12), Sep 2012, Beijing, China. IEEE, pp.362-367, 2012, 2012 IEEE Ninth Inter-
+national Conference on Advanced Video and Signal-Based Surveillance. <10.1109/AVSS.2012.57>.
+<hal-00763587>
+HAL Id: hal-00763587
+https://hal.inria.fr/hal-00763587
+Submitted on 14 Dec 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+3b2697d76f035304bfeb57f6a682224c87645065,ImageNet Large Scale Visual Recognition Challenge,"Noname manuscript No.
+(will be inserted by the editor)
+ImageNet Large Scale Visual Recognition Challenge
+Olga Russakovsky* · Jia Deng* · Hao Su · Jonathan Krause ·
+Sanjeev Satheesh · Sean Ma · Zhiheng Huang · Andrej Karpathy ·
+Aditya Khosla · Michael Bernstein · Alexander C. Berg · Li Fei-Fei
+Received: date / Accepted: date"
+3b8ad690f8d43d189ea2f2559c41b6eebac8dcc8,Mobile 3D object detection in clutter,"Mobile 3D Object Detection in Clutter
+David Meger and James J. Little"
+3bf66814817f582510e0f0a717112b78aca075a0,UNIVERSITY OF CALIFORNIA RIVERSIDE Bio-Image Analysis for Understanding Plant Development and Mosquito Behaviors A Dissertation submitted in partial satisfaction of the requirements for the degree of Doctor of Philosophy,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Bio-Image Analysis for Understanding Plant Development and Mosquito Behaviors
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Katya Mkrtchyan
+March 2017
+Dissertation Committee:
+Professor Amit Roy-Chowdhury, Chairperson
+Professor Eamonn Keogh
+Professor Stefano Lonardi
+Professor Tamar Shinar"
+3b2df7d70ecbe3d0d65d27801d159ddaa150bf42,Doubly Sparse Relevance Vector Machine for Continuous Facial Behavior Estimation,"Doubly Sparse Relevance Vector Machine for
+Continuous Facial Behavior Estimation
+Sebastian Kaltwang, Sinisa Todorovic, Member, IEEE and Maja Pantic, Fellow, IEEE"
+3b410ae97e4564bc19d6c37bc44ada2dcd608552,Scalability Analysis of Audio-Visual Person Identity Verification,"Scalability Analysis of Audio-Visual Person
+Identity Verification
+Jacek Czyz1, Samy Bengio2, Christine Marcel2, and Luc Vandendorpe1
+Communications Laboratory,
+Universit´e catholique de Louvain, B-1348 Belgium,
+IDIAP, CH-1920 Martigny,
+Switzerland"
+6f42cb23262066b4034aba99bf674783ed6cac8b,An Empirical Evaluation of various Deep Learning Architectures for Bi-Sequence Classification Tasks,"Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers,
+pages 2762–2773, Osaka, Japan, December 11-17 2016."
+6f5d57460e0e156497c4667a875cc5fa83154e3a,Retinal Verification Using a Feature Points-Based Biometric Pattern,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 235746, 13 pages
+doi:10.1155/2009/235746
+Research Article
+Retinal Verification Using a Feature Points-Based
+Biometric Pattern
+M. Ortega,1 M. G. Penedo,1 J. Rouco,1 N. Barreira,1 and M. J. Carreira2
+VARPA Group, Faculty of Informatics, Department of Computer Science, University of Coru˜na, 15071 A Coru˜na, Spain
+Department of Electronics and Computer Science, University of Santiago de Compostela, 15782 Santiago de Compostela, Spain
+Correspondence should be addressed to M. Ortega,
+Received 14 October 2008; Accepted 12 February 2009
+Recommended by Natalia A. Schmid
+Biometrics refer to identity verification of individuals based on some physiologic or behavioural characteristics. The typical
+uthentication process of a person consists in extracting a biometric pattern of him/her and matching it with the stored pattern
+for the authorised user obtaining a similarity value between patterns. In this work an ef‌f‌icient method for persons authentication
+is showed. The biometric pattern of the system is a set of feature points representing landmarks in the retinal vessel tree. The
+pattern extraction and matching is described. Also, a deep analysis of similarity metrics performance is presented for the biometric
+system. A database with samples of retina images from users on different moments of time is used, thus simulating a hard and real
+environment of verification. Even in this scenario, the system allows to establish a wide confidence band for the metric threshold"
+6fc129d384431d17eb7aa22afd6ab68f1084f038,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+6f5ce5570dc2960b8b0e4a0a50eab84b7f6af5cb,Low Resolution Face Recognition Using a Two-Branch Deep Convolutional Neural Network Architecture,"Low Resolution Face Recognition Using a
+Two-Branch Deep Convolutional Neural Network
+Architecture
+Erfan Zangeneh, Mohammad Rahmati, and Yalda Mohsenzadeh"
+6fd3bafa25bf6d376bc9d1cc1311eb260d10d024,Facial Recognition Utilizing Patch Based Game Theory,"International Journal of Machine Learning and Computing, Vol. 5, No. 4, August 2015
+Facial Recognition Utilizing Patch Based Game Theory
+Foysal Ahmad, Kaushik Roy, Brian O‟Connor, Joseph Shelton, Pablo Arias, Albert Esterline, and Gerry
+Dozier
+theory. Texture based"
+6f8fa219ea82ded79757de59250b7213f9f5a104,OriNet: A Fully Convolutional Network for 3D Human Pose Estimation,"Chenxu Luo1
+Xiao Chu2
+Alan Yuille1
+Department of Computer Science
+The Johns Hopkins University
+Baltimore, MD 21218, USA
+Baidu Research (USA)
+Sunnyvale, CA 94089, USA
+LUO ET AL.: ORINET: A FULLY CONVOLUTIONAL NETWORK FOR 3D HUMAN POSE
+OriNet: A Fully Convolutional Network for 3D
+Human Pose Estimation"
+6f288a12033fa895fb0e9ec3219f3115904f24de,Learning Expressionlets via Universal Manifold Model for Dynamic Facial Expression Recognition,"Learning Expressionlets via Universal Manifold
+Model for Dynamic Facial Expression Recognition
+Mengyi Liu, Student Member, IEEE, Shiguang Shan, Senior Member, IEEE, Ruiping Wang, Member, IEEE,
+Xilin Chen, Senior Member, IEEE"
+6feb0d42232c31eecee5d90290287afe803e88a5,Recognizing Challenging Handwritten Annotations with Fully Convolutional Networks,"Recognizing Challenging Handwritten Annotations
+with Fully Convolutional Networks
+Andreas K¨olsch∗†, Ashutosh Mishra∗, Saurabh Varshneya∗†, Muhammad Zeshan Afzal∗†, Marcus Liwicki∗†‡§
+{a koelsch12, a ashutosh16, s
+MindGarage, University of Kaiserslautern, Germany
+Insiders Technologies GmbH, Kaiserslautern, Germany
+University of Fribourg, Switzerland
+§Lule˚a, University of Technology, Sweden"
+6f41b528abc34c249038f612a6c1033790ace628,Discriminant Subspace Analysis: An Adaptive Approach for Image Classification,"Discriminant Subspace Analysis: An Adaptive
+Approach for Image Classification
+Yijuan Lu, Member, IEEE, and Qi Tian, Senior Member, IEEE"
+6f957df9a7d3fc4eeba53086d3d154fc61ae88df,Modélisation et suivi des déformations faciales : applications à la description des expressions du visage dans le contexte de la langue des signes,"Mod´elisation et suivi des d´eformations faciales :
+pplications `a la description des expressions du visage
+dans le contexte de la langue des signes
+Hugo Mercier
+To cite this version:
+Hugo Mercier. Mod´elisation et suivi des d´eformations faciales : applications `a la description
+des expressions du visage dans le contexte de la langue des signes. Interface homme-machine
+[cs.HC]. Universit´e Paul Sabatier - Toulouse III, 2007. Fran¸cais. <tel-00185084>
+HAL Id: tel-00185084
+https://tel.archives-ouvertes.fr/tel-00185084
+Submitted on 5 Nov 2007
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+6f3a8528841ea323d965d558195710fd8f916ffd,Knowledge Factorization,"Knowledge Factorization
+Anubhav Ashok
+Khushi Gupta
+Nishant Agrawal"
+6f089f9959cc711e16f1ebe0c6251aaf8a65959a,Improvement in object detection using Super Pixels,"International Journal of Engineering Research in Electronic and Communication
+Engineering (IJERECE) Vol 3, Issue 5, May 2016
+Improvement in object detection using Super Pixels
+[1] Shruti D Kadam [2] H.Mallika
+Dept. of Electronics and communication
+M. S. Ramaiah Institute of Technology, Bangalore, Karnataka
+[1] [2]"
+6f5a3c34360caad4644aea897b8fe7dd72076d0f,Self-calibrating Marker Tracking in 3D with Event-Based Vision Sensors,"Self-Calibrating Marker Tracking in 3D
+with Event-Based Vision Sensors
+Georg R. Müller, Jörg Conradt
+Technische Universität München, Arcisstr. 21,
+80290 München, Germany"
+6f1a784ebb8df0689361afe26a2e5f7a1f4c66ca,A unified probabilistic framework for measuring the intensity of spontaneous facial action units,"A Unified Probabilistic Framework For Measuring The Intensity of
+Spontaneous Facial Action Units
+Yongqiang Li1, S. Mohammad Mavadati2, Mohammad H. Mahoor and Qiang Ji
+(AU),"
+6f7d06ced04ead3b9a5da86b37e7c27bfcedbbdd,Multi-Scale Fully Convolutional Network for Fast Face Detection,"Pages 51.1-51.12
+DOI: https://dx.doi.org/10.5244/C.30.51"
+6f9873e2a7bc279c4f0a45c1a6e831ef3ba78ae7,Improving GAN Training via Binarized Representation Entropy (BRE) Regularization,"Published as a conference paper at ICLR 2018
+IMPROVING GAN TRAINING VIA
+BINARIZED REPRESENTATION ENTROPY (BRE)
+REGULARIZATION
+Yanshuai Cao, Gavin Weiguang Ding, Kry Yik-Chau Lui, Ruitong Huang
+Borealis AI
+Canada"
+6fa9bae381274518d3972294d81e460f0c63900b,Personalized Recommendations in Police Photo Lineup Assembling Task,"S. Krajˇci (ed.): ITAT 2018 Proceedings, pp. 157–160
+CEUR Workshop Proceedings Vol. 2203, ISSN 1613-0073, c(cid:13) 2018 Ladislav Peška and Hana Trojanová"
+6f1be86c77492af422e936028858c9180b52b698,Indoor Scene Understanding in 2.5/3D: A Survey,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JULY 2015
+Indoor Scene Understanding in 2.5/3D: A Survey
+Muzammal Naseer, Salman H Khan, Fatih Porikli"
+6f6b4e2885ea1d9bea1bb2ed388b099a5a6d9b81,"Structured Output SVM Prediction of Apparent Age, Gender and Smile from Deep Features","Structured Output SVM Prediction of Apparent Age,
+Gender and Smile From Deep Features
+Michal Uˇriˇc´aˇr
+CMP, Dept. of Cybernetics
+FEE, CTU in Prague
+Radu Timofte
+Computer Vision Lab
+D-ITET, ETH Zurich
+Rasmus Rothe
+Computer Vision Lab
+D-ITET, ETH Zurich
+Luc Van Gool
+PSI, ESAT, KU Leuven
+CVL, D-ITET, ETH Zurich
+Jiˇr´ı Matas
+CMP, Dept. of Cybernetics
+FEE, CTU in Prague"
+6f3391fda6b25796b5e051f822f91243f69276cb,Performance Comparison of Various Face Detection Techniques,"International Journal of Scientific Research Engineering & Technology (IJSRET)
+Volume 2 Issue1 pp 019-0027 April 2013
+ISSN 2278 - 0882
+www.ijsret.org
+Performance Comparison of Various Face Detection Techniques
+Mohammed Javed, 2Bhaskar Gupta
+M.Tech. Student, Jamia Hamdard, New Delhi
+Associate Professor,ECE,BBDIT,Ghaziabad,UP
+Corresponding Author"
+6f08885b980049be95a991f6213ee49bbf05c48d,Author's Personal Copy Multi-kernel Appearance Model ☆,"This article appeared in a journal published by Elsevier. The attached
+opy is furnished to the author for internal non-commercial research
+nd education use, including for instruction at the authors institution
+nd sharing with colleagues.
+Other uses, including reproduction and distribution, or selling or
+licensing copies, or posting to personal, institutional or third party
+websites are prohibited.
+In most cases authors are permitted to post their version of the
+rticle (e.g. in Word or Tex form) to their personal website or
+institutional repository. Authors requiring further information
+regarding Elsevier’s archiving and manuscript policies are
+encouraged to visit:
+http://www.elsevier.com/authorsrights"
+6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de,Crowd Counting via Scale-Adaptive Convolutional Neural Network,"Crowd counting via scale-adaptive convolutional neural network
+Lu Zhang∗†
+Tencent Youtu
+Miaojing Shi∗
+Qiaobo Chen†
+Inria Rennes & Tencent Youtu
+Shanghai Jiaotong University"
+6fa39c0221c8bcae9146d31646cd9f70aba7190c,Review on Histopathological Slide Analysis using Digital Microscopy,"International Journal of Advanced Science and Technology
+Vol.62, (2014), pp.65-96
+http://dx.doi.org/10.14257/ijast.2014.62.06
+Review on Histopathological Slide Analysis using Digital Microscopy
+Sangita Bhattacharjee1, Jashojit Mukherjee1, Sanjay Nag1, Indra Kanta Maitra2 and
+Samir K. Bandyopadhyay1
+Department of Computer Science and Engineering, University of Calcutta, India
+B. P. Poddar Institute of Management and Technology, Kolkata, India"
+6f41e2ba877ec690bd1c9e5e8742c4088f95c346,Clockwork Convnets for Video Semantic Segmentation,"Clockwork Convnets for Video Semantic Segmentation
+Evan Shelhamer(cid:63)
+Kate Rakelly(cid:63)
+Judy Hoffman(cid:63)
+Trevor Darrell
+UC Berkeley"
+6f8fc12004fa068c424369793fd39426e772b07d,Demystifying Core Ranking in Pinterest Image Search,"Demystifying Core Ranking in Pinterest Image Search
+Linhong Zhu
+Pinterest & USC/ISI"
+6fe149e588a5bf15bf89edfedb1a29cc31384ddc,Fully Convolutional Networks for Automated Segmentation of Abdominal Adipose Tissue Depots in Multicenter Water-Fat MRI,"Fully Convolutional Networks for Automated Segmentation
+of Abdominal Adipose Tissue Depots in Multicenter
+Water-Fat MRI
+Taro Langner1*, Anders Hedstr¨om2, Katharina Paulmichl3,4, Daniel Weghuber3,4,
+Anders Forslund5, Peter Bergsten5,6, H˚akan Ahlstr¨om1,2, Joel Kullberg1,2
+Dept. of Radiology, Uppsala University, Uppsala, Sweden
+Antaros Medical, BioVenture Hub, M¨olndal, Sweden
+Dept. of Pediatrics, Paracelsus Medical University, 5020 Salzburg, Austria
+Obesity Research Unit, Paracelsus Medical University, 5020 Salzburg, Austria
+5Dept. of Women’s and Children’s Health, Uppsala University, Uppsala, SE 751 05, Sweden
+6Dept. of Medical Cell Biology, Uppsala University, Uppsala, SE 751 23, Sweden"
+6f35b6e2fa54a3e7aaff8eaf37019244a2d39ed3,Learning probabilistic classifiers for human–computer interaction applications,"DOI 10.1007/s00530-005-0177-4
+R E G U L A R PA P E R
+Nicu Sebe · Ira Cohen · Fabio G. Cozman ·
+Theo Gevers · Thomas S. Huang
+Learning probabilistic classifiers for human–computer
+interaction applications
+Published online: 10 May 2005
+(cid:1) Springer-Verlag 2005
+intelligent
+interaction,"
+6f3054f182c34ace890a32fdf1656b583fbc7445,Age Estimation Robust to Optical and Motion Blurring by Deep Residual CNN,"Article
+Age Estimation Robust to Optical and Motion
+Blurring by Deep Residual CNN
+Jeon Seong Kang, Chan Sik Kim, Young Won Lee, Se Woon Cho and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu,
+Seoul 100-715, Korea; (J.S.K.); (C.S.K.);
+(Y.W.L.); (S.W.C.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 9 March 2018; Accepted: 10 April 2018; Published: 13 April 2018"
+6f53466b17a2f9da4dbd1d870e822a1f8e837044,Image Aesthetic Assessment: An experimental survey,"Image Aesthetic Assessment:
+An Experimental Survey
+Yubin Deng, Chen Change Loy, Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+6fa3857faba887ed048a9e355b3b8642c6aab1d8,Face Recognition in Challenging Environments: An Experimental and Reproducible Research Survey,"Face Recognition in Challenging Environments:
+An Experimental and Reproducible Research
+Survey
+Manuel G¨unther and Laurent El Shafey and S´ebastien Marcel"
+6f8ea33c29de7ef94f674c4c847185a127c6ea2f,Cue Integration by Similarity Rank List Coding - Application to Invariant Object Recognition,"nd IEEE International Workshops on Foundations and Applications of Self* Systems
+nd IEEE International Workshops on Foundations and Applications of Self* Systems
+Cue Integration by Similarity Rank List Coding —
+Application to Invariant Object Recognition
+Raul Grieben and Rolf P. W¨urtz
+Institut f¨ur Neuroinformatik, Ruhr-Universit¨at Bochum,44780 Bochum, Germany"
+6f79c4b82f9ccdee918659a8f7091b8ab99fe889,Mono-Camera 3D Multi-Object Tracking Using Deep Learning Detections and PMBM Filtering,"Mono-Camera 3D Multi-Object Tracking Using
+Deep Learning Detections and PMBM Filtering
+Samuel Scheidegger∗†, Joachim Benjaminsson∗†, Emil Rosenberg†, Amrit Krishnan∗, Karl Granstr¨om†
+Zenuity, †Department of Electrical Engineering, Chalmers University of Technology"
+6f7ce89aa3e01045fcd7f1c1635af7a09811a1fe,A novel rank order LoG filter for interest point detection,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+6fe2efbcb860767f6bb271edbb48640adbd806c3,Soft Biometrics; Human Identification Using Comparative Descriptions,"SOFT BIOMETRICS: HUMAN IDENTIFICATION USING COMPARATIVE DESCRIPTIONS
+Soft Biometrics; Human Identification using
+Comparative Descriptions
+Daniel A. Reid, Mark S. Nixon, Sarah V. Stevenage"
+6fdc0bc13f2517061eaa1364dcf853f36e1ea5ae,DAISEE: Dataset for Affective States in E-Learning Environments,"DAISEE: Dataset for Affective States in
+E-Learning Environments
+Abhay Gupta1, Richik Jaiswal2, Sagar Adhikari2, Vineeth Balasubramanian2
+Microsoft India R&D Pvt. Ltd.
+Department of Computer Science, IIT Hyderabad
+{cs12b1032, cs12b1034,"
+6f5151c7446552fd6a611bf6263f14e729805ec7,Facial Action Unit Recognition using Filtered Local Binary Pattern Features with Bootstrapped and Weighted ECOC Classi ers,".=?E= )?JE 7EJ 4A?CEJE KIEC
+?= *E=HO 2=JJAH .A=JKHAI MEJD
+-++ +=IIEAHI
+55EJD
++AJHA BH 8EIE 5FAA?D 5EC= 2H?AIIEC 7ELAHIEJO B 5KHHAO
+5KHHAO /7 %:0 7
+)>IJH=?J 9EJDE JDA ?JANJ B=?A ANFHAIIE ?=IIE?=JE KIEC JDA
+B=?E= =?JE IOIJA .)+5 MA JDA FH>A B
+EC B=?E= =?JE KEJI )7I 6DA EI J JH=E = IECA
+AHHH?HHA?JEC KJFKJ -++ KJE?=II ?=IIEAH J AIJE=JA JDA
+FH>=>EEJEAI JD=J A=?D A B IALAH= ?O ??KHHEC )7 CHKFI EI
+FHAIAJ E JDA FH>A E=CA 2=JJ I?=EC EI J ?=E>H=JA JDA -++
+KJFKJI J FH>=>EEJEAI =FFHFHE=JA IKI B JDAIA FH>=>EEJEAI =HA
+J=A J >J=E = IAF=H=JA FH>=>EEJO BH A=?D )7 .A=JKHA
+ANJH=?JE EI >O CAAH=JEC = =HCA K>AH B ?= >E=HO F=J
+JAH *2 BA=JKHAI JDA IAA?JEC BH JDAIA KIEC B=IJ ?HHA=JE
+JAHEC .+*. 6DA >E=I L=HE=?A FHFAHJEAI B JDA ?=IIEAH
+=HA MA IDM JD=J >JD JDAIA IKH?AI B AHHH ?= >A HA
+>O AD=?EC -++ JDHKCD JDA =FFE?=JE B >JIJH=FFEC
+?=IIIAF=H=>EEJO MAECDJEC"
+03e83659f0fc98dd03c354a2cc7a90d585ff9cf5,Face Recognition Using Holistic Features and Within Class Scatter-Based PCA,"GSTF JOURNAL ON COMPUTING, VOL. 3, NO. 2, JUNE 2013
+(cid:2)(cid:3)(cid:4)(cid:5)(cid:1)(cid:6)(cid:7)(cid:8)(cid:9)(cid:10)(cid:7)(cid:11)(cid:8)(cid:12)(cid:13)(cid:7)(cid:11)(cid:14)(cid:1)(cid:15)(cid:13)(cid:16)(cid:10)(cid:7)(cid:11)(cid:14)(cid:1)(cid:13)(cid:7)(cid:1)(cid:17)(cid:13)(cid:18)(cid:19)(cid:16)(cid:8)(cid:12)(cid:7)(cid:20)(cid:1)(cid:21)(cid:15)(cid:13)(cid:17)(cid:22)(cid:23)(cid:1)(cid:24)(cid:13)(cid:14)(cid:25)(cid:1)(cid:26)(cid:1)(cid:27)(cid:13)(cid:25)(cid:1)(cid:28)(cid:23)(cid:1)(cid:15)(cid:16)(cid:14)(cid:29)(cid:1)(cid:28)(cid:30)(cid:31)(cid:26)
+DOI 10.7603/s40601-013-0002-4
+Face Recognition Using Holistic Features and
+Within Class Scatter-Based PCA
+I Gede Pasek Suta Wijaya, Non-Member, IEEE, Keiichi Uchimura, Non-Member, IEEE,
+Gou Koutaki, Non-Member, IEEE"
+034050422f90938a43e9cfd292187aef124fef61,Race recognition from face images using Weber local descriptor,"Paper 1569528513
+IWSSIP 2012, 11-13 April 2012, Vienna, Austria
+. INTRODUCTION"
+03c56c176ec6377dddb6a96c7b2e95408db65a7a,A Novel Geometric Framework on Gram Matrix Trajectories for Human Behavior Understanding,"A Novel Geometric Framework on Gram Matrix
+Trajectories for Human Behavior Understanding
+Anis Kacem, Mohamed Daoudi, Boulbaba Ben Amor, Stefano Berretti, and Juan Carlos Alvarez-Paiva"
+031d22b08d9e8235f46679b89e273ab8723d3e67,Zero-Aliasing Correlation Filters for Object Recognition,"Zero-Aliasing Correlation Filters for Object
+Recognition
+Joseph A. Fernandez, Student Member, IEEE, Vishnu Naresh Boddeti, Member, IEEE, Andres Rodriguez,
+Member, IEEE, B. V. K. Vijaya Kumar, Fellow, IEEE"
+0322e69172f54b95ae6a90eb3af91d3daa5e36ea,Face Classification using Adjusted Histogram in Grayscale,"Face Classification using Adjusted Histogram in
+Grayscale
+Weenakorn Ieosanurak, and Watcharin Klongdee"
+03f7041515d8a6dcb9170763d4f6debd50202c2b,Clustering Millions of Faces by Identity,"Clustering Millions of Faces by Identity
+Charles Otto, Student Member, IEEE, Dayong Wang, Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+038ce930a02d38fb30d15aac654ec95640fe5cb0,Approximate structured output learning for Constrained Local Models with application to real-time facial feature detection and tracking on low-power devices,"Approximate Structured Output Learning for Constrained Local
+Models with Application to Real-time Facial Feature Detection and
+Tracking on Low-power Devices
+Shuai Zheng, Paul Sturgess and Philip H. S. Torr"
+03df507b31691baeb7343d3eb70d048943e2d4f4,Exploring the Use of Local Descriptors for Fish Recognition in LifeCLEF 2015,"Exploring the use of local descriptors for fish
+recognition in LifeCLEF 2015
+Jorge Cabrera-G´amez, Modesto Castrill´on-Santana, Antonio Dom´ınguez-Brito,
+Daniel Hern´andez-Sosa, Josep Isern-Gonz´alez, and Javier Lorenzo-Navarro
+Universidad de Las Palmas de Gran Canaria
+SIANI
+Spain
+http://berlioz.dis.ulpgc.es/roc-siani"
+03c1fc9c3339813ed81ad0de540132f9f695a0f8,Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification,"Proceedings of Machine Learning Research 81:1–15, 2018
+Conference on Fairness, Accountability, and Transparency
+Gender Shades: Intersectional Accuracy Disparities in
+Commercial Gender Classification∗
+Joy Buolamwini
+MIT Media Lab 75 Amherst St. Cambridge, MA 02139
+Timnit Gebru
+Microsoft Research 641 Avenue of the Americas, New York, NY 10011
+Editors: Sorelle A. Friedler and Christo Wilson"
+032c1e19a59cdbeb3fb741a812980f52c1461ce1,"Mining textural knowledge in biological images: Applications, methods and trends","Computational and Structural Biotechnology Journal 15 (2017) 56–67
+j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / c s b j
+Mining textural knowledge in biological images: Applications, methods
+nd trends
+Santa Di Cataldo*, Elisa Ficarra
+Dept. of Computer and Control Engineering, Politecnico di Torino, Cso Duca degli Abruzzi 24, Torino 10129, Italy
+A R T I C L E
+I N F O
+A B S T R A C T
+Article history:
+Received 25 August 2016
+Received in revised form 14 November 2016
+Accepted 15 November 2016
+Available online 24 November 2016
+Keywords:
+Textural analysis
+Bioimaging
+Textural features extraction
+Texture classification
+Feature encoding"
+035c606bc6a05e2018e57859737877043673b7b9,Fine-Grained Image Classification by Exploring Bipartite-Graph Labels,"Fine-grained Image Classification by Exploring Bipartite-Graph Labels
+Feng Zhou
+NEC Labs
+Yuanqing Lin
+NEC Labs
+www.f-zhou.com"
+0339459a5b5439d38acd9c40a0c5fea178ba52fb,Multimodal recognition of emotions in car environments,"D|C|I&I 2009 Prague
+Multimodal recognition of emotions in car
+environments
+Dragoş DatcuA and Léon J.M. RothkrantzB"
+0393723dff4c00262c1daf34c26d27fa6fc52ab6,Pedestrian detection in outdoor images using color and gradients,"Pedestrian Detection in Outdoor Images using Color and Gradients
+Marcel H¨aselich
+Michael Klostermann
+Dietrich Paulus
+Active Vision Group, University of Koblenz-Landau, 56070 Koblenz, Germany
+{mhaeselich, michaelk,"
+030ff7012b92b805a60976f8dbd6a08c1cecebe6,DCAN: Dual Channel-Wise Alignment Networks for Unsupervised Scene Adaptation,
+0315c68902edca77d2c15cfc1f1335d55343c715,Towards optimal distortion-based visual privacy filters,"TOWARDS OPTIMAL DISTORTION-BASED VISUAL PRIVACY FILTERS
+Pavel Korshunov and Touradj Ebrahimi
+Multimedia Signal Processing Group, EPFL, Lausanne, Switzerland"
+03889b0e8063532ae56d36dd9c54c3784a69e4d4,Learning to Play Guess Who? and Inventing a Grounded Language as a Consequence,"Learning to Play Guess Who? and Inventing a
+Grounded Language as a Consequence
+Emilio Jorge1, Mikael Kågebäck2, and Emil Gustavsson1
+Fraunhofer-Chalmers Centre , Göteborg, Sweden ,
+Computer Science & Engineering , Chalmers University of Technology , Göteborg, Sweden ,"
+033e3fe75da26d8d3dd3cb0f99640181655e6746,From generic to specific deep representations for visual recognition,"Factors of Transferability for a Generic ConvNet Representation
+Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, Stefan Carlsson
+{azizpour, razavian, sullivan, atsuto,
+Computer Vision and Active Perception (CVAP), Royal Institute of Technology (KTH), Stockholm, SE-10044 Sweden
+Evidence is mounting that Convolutional Networks (ConvNets) are the most effective representation learning method for visual
+recognition tasks. In the common scenario, a ConvNet is trained on a large labeled dataset (source) and the feed-forward units
+ctivation of the trained network, at a certain layer of the network, is used as a generic representation of an input image for a
+task with relatively smaller training set (target). Recent studies have shown this form of representation transfer to be suitable for a
+wide range of target visual recognition tasks. This paper introduces and investigates several factors affecting the transferability of
+such representations. It includes parameters for training of the source ConvNet such as its architecture, distribution of the training
+data, etc. and also the parameters of feature extraction such as layer of the trained ConvNet, dimensionality reduction, etc. Then,
+y optimizing these factors, we show that significant improvements can be achieved on various (17) visual recognition tasks. We
+further show that these visual recognition tasks can be categorically ordered based on their distance from the source task such that
+correlation between the performance of tasks and their distance from the source task w.r.t. the proposed factors is observed.
+Index Terms—Convolutional Neural Networks, Transfer Learning, Representation Learning, Deep Learning, Visual Recognition
+I. INTRODUCTION
+C ONVOLUTIONAL NETWORKS (ConvNets) trace back
+to the early works on digit and character recognition
+[11], [23]. Prior to 2012, though, in computer vision field,
+neural networks were more renowned for their propensity to"
+032825000c03b8ab4c207e1af4daeb1f225eb025,A Novel Approach for Human Face Detection in Color Images Using Skin Color and Golden Ratio,"J. Appl. Environ. Biol. Sci., 7(10)159-164, 2017
+ISSN: 2090-4274
+© 2017, TextRoad Publication
+Journal of Applied Environmental
+nd Biological Sciences
+www.textroad.com
+A Novel Approach for Human Face Detection in Color Images Using Skin
+Color and Golden Ratio
+Faizan Ullah*1, Dilawar Shah1, Sabir Shah1, Abdus Salam2, Shujaat Ali1
+Department of Computer Science, Bacha Khan University, Charsadda, KPK, Pakistan1
+Department of Computer Science, Abdul WaliKhan University, Mardan, KPK, Pakistan2
+Received: May 9, 2017
+Accepted: August 2, 2017"
+03a8f53058127798bc2bc0245d21e78354f6c93b,Max-margin additive classifiers for detection,"Max-Margin Additive Classifiers for Detection
+Subhransu Maji and Alexander C. Berg
+Sam Hare
+VGG Reading Group
+October 30, 2009"
+034f7d5b3878f8b2db92a7cb7f12edcd5681eca7,FAST Pre-Filtering-Based Real Time Road Sign Detection for Low-Cost Vehicle Localization,"Article
+FAST Pre-Filtering-Based Real Time Road Sign
+Detection for Low-Cost Vehicle Localization
+Kyoungtaek Choi 1, Jae Kyu Suhr 2
+Department of Electronic Engineering, Korea National University of Transportation, 50 Daehak-ro,
+nd Ho Gi Jung 1,*
+Chungju-si 27469, Korea;
+School of Intelligent Mechatronics Engineering, Sejong University, 209 Neungdong-ro, Gwangjin-gu,
+Seoul 05006, Korea;
+* Correspondence: Tel. +82-43-841-5366
+Received: 11 September 2018; Accepted: 16 October 2018; Published: 22 October 2018"
+0313924b600ebb8f608705d96c06b133b3b9627a,Deciphering the Crowd: Modeling and Identification of Pedestrian Group Motion,"Sensors 2013, 13, 875-897; doi:10.3390/s130100875
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Deciphering the Crowd: Modeling and Identification of
+Pedestrian Group Motion
+Zeynep Y¨ucel *, Francesco Zanlungo, Tetsushi Ikeda, Takahiro Miyashita and Norihiro Hagita
+Intelligent Robotics and Communication Laboratories, Advanced Telecommunications Research
+Institute International, Kyoto 619-0288, Japan; E-Mails: (F.Z.); (T.I.);
+(T.M.); (N.H.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +81-774-95-1405.
+Received: 14 December 2012; in revised form: 20 December 2012 / Accepted: 4 January 2013 /
+Published: 14 January 2013"
+03b98b4a2c0b7cc7dae7724b5fe623a43eaf877b,Acume: A Novel Visualization Tool for Understanding Facial Expression and Gesture Data,"Acume: A Novel Visualization Tool for Understanding Facial
+Expression and Gesture Data"
+0306a275e80d11d65c4261b8f3d45317a49c1bf7,Optimal Architecture for Deep Neural Networks with Heterogeneous Sensitivity,"Optimal Architecture for Deep Neural Networks
+with Heterogeneous Sensitivity
+Hyunjoong Cho, Jinhyeok Jang, Chanhyeok Lee, and Seungjoon Yang"
+035ef7b25991b0f7ea841a2270ed053198aab09e,"Retrieval of Images with Objects of Specific Size, Location, and Spatial Configuration","Retrieval of images with objects of specific size, location and spatial configuration
+Niloufar Pourian
+B.S. Manjunath
+Department of Electrical and Computer Engineering
+University of California, Santa Barbara, United States"
+036fac2b87cf04c3d93e8a59da618d56a483a97d,Query Adaptive Late Fusion for Image Retrieval,"MANUSCRIPT
+Query Adaptive Late Fusion for Image Retrieval
+Zhongdao Wang, Liang Zheng, Shengjin Wang"
+038b8b2b629a8ba1e2ad6f9319e16b68e83e518a,Assessing Water Stress of Desert Tamarugo Trees Using in situ Data and Very High Spatial Resolution Remote Sensing,"Remote Sens. 2013, 5, 5064-5088; doi:10.3390/rs5105064
+OPEN ACCESS
+ISSN 2072-4292
+www.mdpi.com/journal/remotesensing
+Article
+Assessing Water Stress of Desert Tamarugo Trees Using in situ
+Roberto O. Chávez 1,*, Jan G. P. W. Clevers 1, Martin Herold 1, Edmundo Acevedo 2
+nd Mauricio Ortiz 2,3
+6700 AA Wageningen, The Netherlands; E-Mails: (J.G.P.W.C.);
+(M.H.)
+Laboratorio de Relación Suelo-Agua-Planta, Facultad de Ciencias Agronómicas,
+Universidad de Chile, Casilla 1004, Santiago, Chile; E-Mail: (E.A.);
+(M.O.)
+Centro de Estudios Avanzados en Fruticultura (CEAF), Conicyt-Regional R08I1001,
+Av. Salamanca s/n, Rengo, Chile
+* Author to whom correspondence should be addressed; E-Mails: or
+Tel.: +31-317-481-552; Fax: +31-317-419-000.
+Received: 24 July 2013; in revised form: 12 September 2013 / Accepted: 9 October 2013 /
+Published: 15 October 2013"
+03f6d738f9b916f80ce22c3ba605a0fa4d7830c1,Automated Reconstruction of Evolving Curvilinear Tree Structures,"POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Prof. P. Dillenbourg, président du juryProf. P. Fua, directeur de thèseDr F. Moreno-Noguer, rapporteurDr R. Sznitman, rapporteurProf. S. Süsstrunk, rapporteuseAutomated Reconstruction of Evolving Curvilinear Tree StructuresTHÈSE NO 6930 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 18 MARS 2016À LA FACULTÉ INFORMATIQUE ET COMMUNICATIONSLABORATOIRE DE VISION PAR ORDINATEURPROGRAMME DOCTORAL EN INFORMATIQUE ET COMMUNICATIONS Suisse2016PAR(cid:51)(cid:85)(cid:93)(cid:72)(cid:80)(cid:92)(cid:86)(cid:227)(cid:68)(cid:90)(cid:3)(cid:53)(cid:68)(cid:73)(cid:68)(cid:227)(cid:3)(cid:42)(cid:226)(cid:50)(cid:58)(cid:36)(cid:38)(cid:46)(cid:44)"
+034516f37171e7e6cffb8afa84c1f5d6d12d887f,Comparative Analysis of Content Based Image Retrieval using Texture Features for Plant Leaf Diseases,"International Journal of Applied Engineering Research ISSN 0973-4562 Volume 11, Number 9 (2016) pp6244-6249
+© Research India Publications. http://www.ripublication.com
+Comparative Analysis of Content Based Image Retrieval using Texture
+Features for Plant Leaf Diseases
+Ph.D. Scholar, Bharati Vidyapeeth Deemed University College of Engineering Pune, Maharashtra, India
+Jayamala K.Patil
+Professor, Defense Institute of Advanced Tech., Deemed University, Girinagar
+Raj Kumar"
+03adcf58d947a412f3904a79f2ab51cfdf0e838a,Video-based face recognition: a survey,"World Journal of Science and Technology 2012, 2(4):136-139
+ISSN: 2231 – 2587
+Available Online: www.worldjournalofscience.com
+_________________________________________________________________
+Proceedings of ""Conference on Advances in Communication and Computing (NCACC'12)”
+Held at R.C.Patel Institute of Technology, Shirpur, Dist. Dhule,Maharastra,India.
+April 21, 2012
+Video-based face recognition: a survey
+Shailaja A Patil1 and Pramod J Deore2
+Department of Electronics and Telecommunication, R.C.Patel Institute of Technology,Shirpur,Dist.Dhule.Maharashtra,India."
+03ae36b2ed0215b15c5bc7d42fbe20b1491e551a,Learning scene-specific pedestrian detectors without real data,"Learning Scene-Specific Pedestrian Detectors without Real Data
+Hironori Hattori1, Vishnu Naresh Boddeti2, Kris Kitani2, Takeo Kanade2
+Sony Corporation. 2Carnegie Mellon University.
+Figure 1: Overview: For every grid location, geometrically correct renderings of pedestrian are synthetically generated using known scene information
+such as camera calibration parameters, obstacles (red), walls (blue) and walkable areas (green). All location-specific pedestrian detectors are trained
+jointly to learn a smoothly varying appearance model. Multiple scene-and-location-specific detectors are run in parallel at every grid location.
+Consider the scenario in which a new surveillance system is installed
+in a novel location and an image-based pedestrian detector must be trained
+without access to real scene-specific pedestrian data. A similar situation
+may arise when a new imaging system (i.e., a custom camera with unique
+lens distortion) has been designed and must be able to detect pedestrians
+without the expensive process of collecting data with the new imaging de-
+vice. One can use a generic pedestrian detection algorithm trained over co-
+pious amounts of real data to work robustly across many scenes. However,
+generic models are not always best-suited for detection in specific scenes.
+In many surveillance scenarios, it is more important to have a customized
+pedestrian detection model that is optimized for a single scene. Optimiz-
+ing for a single scene however often requires a labor intensive process of
+ollecting labeled data – drawing bounding boxes of pedestrians taken with
+particular camera in a specific scene. The process also takes time, as"
+03f14159718cb495ca50786f278f8518c0d8c8c9,Performance evaluation of HOG and Gabor features for vision-based vehicle detection,"015 IEEE International Conference on Control System, Computing and Engineering, Nov 27 – Nov 29, 2015 Penang, Malaysia
+015 IEEE International Conference on Control System,
+Computing and Engineering (ICCSCE2015)
+Technical Session 1A – DAY 1 – 27th Nov 2015
+Time: 3.00 pm – 4.30 pm
+Venue: Jintan
+Topic: Signal and Image Processing
+.00 pm – 3.15pm
+.15 pm – 3.30pm
+.30 pm – 3.45pm
+.45 pm – 4.00pm
+.00 pm – 4.15pm
+.15 pm – 4.30pm
+.30 pm – 4.45pm
+A 01 ID3
+Can Subspace Based Learning Approach Perform on Makeup Face
+Recognition?
+Khor Ean Yee, Pang Ying Han, Ooi Shih Yin and Wee Kuok Kwee
+A 02 ID35
+Performance Evaluation of HOG and Gabor Features for Vision-based"
+0394040749195937e535af4dda134206aa830258,Geodesic entropic graphs for dimension and entropy estimation in manifold learning,"Geodesic Entropic Graphs for Dimension and
+Entropy Estimation in Manifold Learning
+Jose A. Costa and Alfred O. Hero III
+December 16, 2003"
+03f3bde03f83c3ff4f346d761fde4ce031dd4c69,Deep Models Calibration with Bayesian Neural Networks,"Under review as a conference paper at ICLR 2019
+DEEP MODELS CALIBRATION WITH BAYESIAN NEURAL
+NETWORKS
+Anonymous authors
+Paper under double-blind review"
+0365ea467c169134e858bb668a8e19bd251019e7,Orthogonal Neighborhood Preserving Projections: A Projection-Based Dimensionality Reduction Technique,"Orthogonal Neighborhood Preserving Projections: A
+projection-based dimensionality reduction technique ∗
+E. Kokiopoulou †
+Y. Saad‡
+March 21, 2006"
+03161081b47eba967fd3e663c57ec2f99f66eebd,Face and Facial Feature Localization,"Face and facial feature localization
+Paola Campadelli?, Raffaella Lanzarotti??, Giuseppe Lipori, and Eleonora Salvi
+Dipartimento di Scienze dell’Informazione
+Universit(cid:30)a degli Studi di Milano
+Via Comelico, 39/41 - 20135 Milano, Italy
+fcampadelli, lanzarotti,
+http://homes.dsi.unimi.it/(cid:24)campadel/LAIV/"
+031532cc5c4e64e02e796360a16f89580a0ba552,Nonnegative Decompositions for Dynamic Visual Data Analysis,"Nonnegative Decompositions for
+Dynamic Visual Data Analysis
+Lazaros Zafeiriou, Member, IEEE, Yannis Panagakis, Member, IEEE,
+Maja Pantic, Fellow, IEEE, and Stefanos Zafeiriou, Member, IEEE"
+03ea398fcefc53a1bd041346c895aadcffed0261,Learning an Alphabet of Shape and Appearance for Multi-Class Object Detection,"Int J Comput Vis
+DOI 10.1007/s11263-008-0139-3
+Learning an Alphabet of Shape and Appearance for Multi-Class
+Object Detection
+Andreas Opelt · Axel Pinz · Andrew Zisserman
+Received: 28 February 2007 / Accepted: 4 April 2008
+© The Author(s) 2008"
+03ed6f09a29fe5d0dbf6d59798f88a5311c966d3,Re-identification with RGB-D Sensors,"Re-identi(cid:12)cation with RGB-D sensors
+Igor Barros Barbosa1;3, Marco Cristani1;2, Alessio Del Bue1,
+Loris Bazzani1, and Vittorio Murino1
+Pattern Analysis and Computer Vision (PAVIS) - Istituto Italiano di Tecnologia
+(IIT), Via Morego 30, 16163 Genova, Italy
+Dipartimento di Informatica, University of Verona,
+Strada Le Grazie 15, 37134 Verona, Italy
+Universit(cid:19)e de Bourgogne, 720 Avenue de lEurope, 71200 Le Creusot, France"
+036a8cb922a30d766b0fc0ba5954098a1d2a09f5,Learning Similarities for Rigid and Non-rigid Object Detection,"Learning Similarities for Rigid and Non-Rigid Object Detection
+Asako Kanezaki
+The Univ. of Tokyo
+Emanuele Rodol`a
+TU Munich
+Daniel Cremers
+TU Munich
+Tatsuya Harada
+The Univ. of Tokyo"
+037e17ac0272b4db0d4761067dbf0ee56d91e6dd,A New Multi-modal Dataset for Human Affect Analysis,"A New Multi-Modal Dataset for Human Affect
+Analysis
+nonymous for review
+nonymous for review"
+03ac1c694bc84a27621da6bfe73ea9f7210c6d45,Chapter 1 Introduction to information security foundations and applications,"Chapter 1
+Introduction to information security
+foundations and applications
+Ali Ismail Awad1,2
+.1 Background
+Information security has extended to include several research directions like user
+uthentication and authorization, network security, hardware security, software secu-
+rity, and data cryptography. Information security has become a crucial need for
+protecting almost all information transaction applications. Security is considered as
+n important science discipline whose many multifaceted complexities deserve the
+synergy of the computer science and engineering communities.
+Recently, due to the proliferation of Information and Communication Tech-
+nologies, information security has started to cover emerging topics such as cloud
+omputing security, smart cities’ security and privacy, healthcare and telemedicine,
+the Internet-of-Things (IoT) security [1], the Internet-of-Vehicles security, and sev-
+eral types of wireless sensor networks security [2,3]. In addition, information security
+has extended further to cover not only technical security problems but also social and
+organizational security challenges [4,5].
+Traditional systems’ development approaches were focusing on the system’s
+usability where security was left to the last stage with less priority. However, the"
+03c53fb96a9acd2ec6ba52a2497410f980793bfa,Trainable Convolution Filters and Their Application to Face Recognition,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Trainable Convolution Filters and their
+Application to Face Recognition
+Ritwik Kumar, Member, IEEE, Arunava Banerjee, Member, IEEE,
+Baba C. Vemuri, Fellow, IEEE, and Hanspeter Pfister, Senior Member, IEEE"
+0394e684bd0a94fc2ff09d2baef8059c2652ffb0,Median Robust Extended Local Binary Pattern for Texture Classification,"Median Robust Extended Local Binary Pattern
+for Texture Classification
+Li Liu, Songyang Lao, Paul W. Fieguth, Member, IEEE, Yulan Guo,
+Xiaogang Wang, and Matti Pietikäinen, Fellow, IEEE
+Index Terms— Texture descriptors, rotation invariance, local
+inary pattern (LBP), feature extraction, texture analysis.
+how the texture recognition process works in humans as
+well as in the important role it plays in the wide variety of
+pplications of computer vision and image analysis [1], [2].
+The many applications of texture classification include medical
+image analysis and understanding, object recognition, biomet-
+rics, content-based image retrieval, remote sensing, industrial
+inspection, and document classification.
+As a classical pattern recognition problem, texture classifi-
+ation primarily consists of two critical subproblems: feature
+extraction and classifier designation [1], [2]. It is generally
+greed that the extraction of powerful texture features plays a
+relatively more important role, since if poor features are used
+even the best classifier will fail to achieve good recognition
+results. Consequently, most research in texture classification"
+038277dbfcd767b0a0899de42d3277b5b253cc8e,Review and Implementation of High-Dimensional Local Binary Patterns and Its Application to Face Recognition,"TR-IIS-14-003
+Review and Implementation of
+High-Dimensional Local Binary
+Patterns and Its Application to
+Face Recognition
+Bor-Chun Chen, Chu-Song Chen, Winston Hsu
+July. 24, 2014 || Technical Report No. TR-IIS-14-003
+http://www.iis.sinica.edu.tw/page/library/TechReport/tr2014/tr14.html"
+03f4c0fe190e5e451d51310bca61c704b39dcac8,CHEAVD: a Chinese natural emotional audio-visual database,"J Ambient Intell Human Comput
+DOI 10.1007/s12652-016-0406-z
+O R I G I N A L R E S E A R C H
+CHEAVD: a Chinese natural emotional audio–visual database
+Ya Li1
+• Jianhua Tao1,2,3
+• Linlin Chao1
+• Wei Bao1,4
+• Yazhu Liu1,4
+Received: 30 March 2016 / Accepted: 22 August 2016
+Ó Springer-Verlag Berlin Heidelberg 2016"
+03de6b2a3c81b26eecbec2705173da3dba25ecbb,FineTag: Multi-attribute Classification at Fine-grained Level in Images,"FineTag: Multi-attribute Classification at
+Fine-grained Level in Images
+Roshanak Zakizadeh, Michele Sasdelli, Yu Qian and Eduard Vazquez
+Cortexica Vision Systems, London, UK"
+033fde43e6ff235fd560435bc060d5ffd14fb827,Pose Estimation and Tracking of Eating Persons in Real-life Settings,"ASCI { IPA { SIKS tracks, ICT.OPEN, Veldhoven, November 14{15, 2011
+Pose Estimation and Tracking of Eating Persons in Real-life Settings
+Lu Zhang
+EWI-TUDelft
+Laurens van der Maaten
+EWI-TUDelft
+Nicole Koenderink
+Wageningen UR, FBR
+Franck Golbach
+Wageningen UR, FBR
+Emile Hendriks
+EWI-TUDelft"
+031055c241b92d66b6984643eb9e05fd605f24e2,Multi-fold MIL Training for Weakly Supervised Object Localization,"Multi-fold MIL Training for Weakly Supervised Object Localization
+Ramazan Gokberk Cinbis
+Jakob Verbeek Cordelia Schmid
+Inria∗"
+0332ae32aeaf8fdd8cae59a608dc8ea14c6e3136,Large Scale 3D Morphable Models,"Int J Comput Vis
+DOI 10.1007/s11263-017-1009-7
+Large Scale 3D Morphable Models
+James Booth1
+Stefanos Zafeiriou1
+· Anastasios Roussos1,3 · Allan Ponniah2 · David Dunaway2 ·
+Received: 15 March 2016 / Accepted: 24 March 2017
+© The Author(s) 2017. This article is an open access publication"
+03650399cbf53d916d10a507852c9e94a02ee13f,3D faces in motion: Fully automatic registration and statistical analysis,"D Faces in Motion: Fully Automatic Registration and Statistical Analysis
+Timo Bolkarta,∗, Stefanie Wuhrera
+Saarland University, Saarbr¨ucken, Germany"
+034f7fcf5a393ac3307ac3609c2b971df6efaff6,Can Synthetic Data Handle Unconstrained Gaze Estimation?,"Can Synthetic Data Handle Unconstrained Gaze
+Estimation ?
+Amine Kacete, Renaud Séguier, Michel Collobert, Jérôme Royan
+To cite this version:
+Amine Kacete, Renaud Séguier, Michel Collobert, Jérôme Royan. Can Synthetic Data Handle Uncon-
+strained Gaze Estimation ?. Conférence Nationale sur les Applications Pratiques de l’Intelligence Ar-
+tificielle, Jul 2017, Caen, France. Conférence Nationale sur les Applications Pratiques de l’Intelligence
+Artificielle. <hal-01561526>
+HAL Id: hal-01561526
+https://hal.archives-ouvertes.fr/hal-01561526
+Submitted on 12 Jul 2017
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+034addac4637121e953511301ef3a3226a9e75fd,Implied Feedback: Learning Nuances of User Behavior in Image Search,"Implied Feedback: Learning Nuances of User Behavior in Image Search
+Devi Parikh
+Virginia Tech"
+03701e66eda54d5ab1dc36a3a6d165389be0ce79,Improved Principal Component Regression for Face Recognition Under Illumination Variations,"Improved Principal Component Regression for Face
+Recognition Under Illumination Variations
+Shih-Ming Huang and Jar-Ferr Yang, Fellow, IEEE"
+9b5b2fd938a9337475cb90a143cf7568f8f63709,Illumination Processing in Face Recognition,"Illumination Processing in Face Recognition187Illumination Processing in Face RecognitionYongping Li, Chao Wang and Xinyu AoX Illumination Processing in Face Recognition Yongping Li, Chao Wang and Xinyu Ao Shanghai Institute of Applied Physics, Chinese Academy of Sciences China 1. Introduction Driven by the demanding of public security, face recognition has emerged as a viable solution and achieved comparable accuracies to fingerprint system under controlled lightning environment. In recent years, with wide installing of camera in open area, the automatic face recognition in watch-list application is facing a serious problem. Under the open environment, lightning changes is unpredictable, and the performance of face recognition degrades seriously. Illumination processing is a necessary step for face recognition to be useful in the uncontrolled environment. NIST has started a test called FRGC to boost the research in improving the performance under changing illumination. In this chapter, we will focus on the research effort made in this direction and the influence on face recognition caused by illumination. First of all, we will discuss the quest on the image formation mechanism under various illumination situations, and the corresponding mathematical modelling. The Lambertian lighting model, bilinear illuminating model and some recent model are reviewed. Secondly, under different state of face, like various head pose and different facial expression, how illumination influences the recognition result, where the different pose and illuminating will be examined carefully. Thirdly, the current methods researcher employ to counter the change of illumination to maintain good performance on face recognition are assessed briefly. The processing technique in video and how it will improve face recognition on video, where Wang’s (Wang & Li, 2009) work will be discussed to give an example on the related advancement in the fourth part. And finally, the current state-of-art of illumination processing and its future trends will be discussed. 2. The formation of camera imaging and its difference from the human visual system With the camera invented in 1814 by Joseph N, recording of human face began its new era. Since we do not need to hire a painter to draw our figures, as the nobles did in the middle age. And the machine recorded our image as it is, if the camera is in good condition. Currently, the imaging system is mostly to be digital format. The central part is CCD (charge-coupled device) or CMOS (complimentary metal-oxide semiconductor). The CCD/CMOS operates just like the human eyes. Both CCD and CMOS image sensors operate 11www.intechopen.com"
+9b318098f3660b453fbdb7a579778ab5e9118c4c,Joint Patch and Multi-label Learning for Facial Action Unit and Holistic Expression Recognition,"Joint Patch and Multi-label Learning for Facial
+Action Unit and Holistic Expression Recognition
+Kaili Zhao, Wen-Sheng Chu, Student Member, IEEE, Fernando De la Torre,
+Jeffrey F. Cohn, and Honggang Zhang, Senior Member, IEEE
+lassifiers without"
+9b69ea8034a24db2bb1a1eef73ec11b6367d2f2e,Face Recognition System Using PCA and DCT in HMM,"International Journal of Advanced Research in Computer and Communication Engineering
+Vol. 4, Issue 1, January 2015
+Face Recognition System Using PCA and DCT
+ISSN (Online) : 2278-1021
+ISSN (Print) : 2319-5940
+in HMM
+SamerKais Jameel
+Lecturer, Computer Science, University of Raparin, Sulaimaniya, Iraq"
+9b74de11c62ce16d0b4509554556e6b6b0d4f5c0,Bayesian Probabilistic Co-Subspace Addition,"Bayesian Probabilistic Co-Subspace Addition
+Lei Shi
+Baidu.com, Inc"
+9b3ed8190d99b107837de142324e4aa2be8b7eb2,An Efficient Multimodal 2D-3D Hybrid Approach to Automatic Face Recognition,"An Efficient Multimodal 2D-3D Hybrid
+Approach to Automatic Face Recognition
+Ajmal S. Mian, Mohammed Bennamoun, and Robyn Owens"
+9b19be86280c8dbb3fdccc24297449290bd2b6aa,Robust Compressive Phase Retrieval via Deep Generative Priors,"Robust Compressive Phase Retrieval via Deep Generative
+Priors
+Fahad Shamshad, Ali Ahmed
+Dept. of Electrical Engg., Information Technology University, Lahore, Pakistan.
+{fahad.shamshad,"
+9bcfa6d23ea628ccfabf6900ef05437e7cecb1c6,A Hybrid Approach for Secure Biometric Authentication Using Fusion of Iris and Ear,"Volume 5, Issue 8, August 2015 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+A Hybrid Approach for Secure Biometric Authentication Using
+Fusion of Iris and Ear
+Pamalpreet Kaur*, Er. Nirvair Neeru
+CSE Deptt. Punjabi University,
+Patiala, India"
+9b474d6e81e3b94e0c7881210e249689139b3e04,VG-RAM Weightless Neural Networks for Face Recognition,"VG-RAM Weightless Neural Networks for
+Face Recognition
+Alberto F. De Souza, Claudine Badue, Felipe Pedroni, Stiven Schwanz Dias,
+Hallysson Oliveira and Soterio Ferreira de Souza
+Departamento de Inform´atica
+Universidade Federal do Esp´ırito Santo
+Av. Fernando Ferrari, 514, 29075-910 - Vit´oria-ES
+Brazil
+. Introduction
+Computerized human face recognition has many practical applications, such as access control,
+security monitoring, and surveillance systems, and has been one of the most challenging and
+ctive research areas in computer vision for many decades (Zhao et al.; 2003). Even though
+urrent machine recognition systems have reached a certain level of maturity, the recognition
+of faces with different facial expressions, occlusions, and changes in illumination and/or pose
+is still a hard problem.
+A general statement of the problem of machine recognition of faces can be formulated as fol-
+lows: given an image of a scene, (i) identify or (ii) verify one or more persons in the scene
+using a database of faces. In identification problems, given a face as input, the system reports
+ack the identity of an individual based on a database of known individuals; whereas in veri-
+fication problems, the system confirms or rejects the claimed identity of the input face. In both"
+9bf6fbccfdf013cfd076f9357a05fb00b50735ee,JAR-Aibo: A Multi-view Dataset for Evaluation of Model-Free Action Recognition Systems,"JAR-Aibo: A Multi-View Dataset for Evaluation
+of Model-Free Action Recognition Systems
+Marco K¨orner and Joachim Denzler
+Friedrich Schiller University of Jena
+Computer Vision Group
+Ernst-Abbe-Platz 3, 07743 Jena, Germany
+http://www.inf-cv.uni-jena.de"
+9be5129fec3b6f1efc22e19dae3ae684961f5efb,Probability based Extended Direct Attribute Prediction,"Probability based Extended Direct Attribute Prediction
+International Journal of Computer Applications (0975 – 8887)
+Volume 155 – No 5, December 2016
+Manju
+Research Scholar,
+Department of computer science,
+Baba Mastnath University, Rohtak"
+9b95153e4d3972d59fabef0fddce9b7207836b1b,Nonlinear Discrete Hashing,"Nonlinear Discrete Hashing
+Zhixiang Chen, Jiwen Lu, Senior Member, IEEE, Jianjiang Feng, Member, IEEE, and Jie Zhou, Senior Member, IEEE"
+9bcfadd22b2c84a717c56a2725971b6d49d3a804,How to Detect a Loss of Attention in a Tutoring System using Facial Expressions and Gaze Direction,"How to Detect a Loss of Attention in a Tutoring System
+using Facial Expressions and Gaze Direction
+Mark ter Maat"
+9bdd3ce1879f8fd32d2a3f2c4cedcadcf292a1a5,Geometric Active Learning via Enclosing Ball Boundary,"IEEE TRANSACTIONS ON KNOWLEDGE AND DATA ENGINEERING
+Geometric Active Learning via Enclosing Ball
+Boundary
+Xiaofeng Cao, Ivor W. Tsang, Jianliang Xu, Zenglin Shi, Guandong Xu"
+9bd973e64750a94dcf528da402b39e3a53118312,An FPGA-Accelerated Design for Deep Learning Pedestrian Detection in Self-Driving Vehicles,"An FPGA-Accelerated Design for Deep
+Learning Pedestrian Detection in Self-Driving
+Vehicles
+Abdallah Moussawi, Kamal Haddad, and Anthony Chahine
+Department of Electrical and Computer Engineering
+American University of Beirut
+Beirut, Lebanon
+Email:"
+9b30771968b577ea1b71c0cfaee31f3824bfa027,Capturing Form of Non-verbal Conversational Behavior for Recreation on Synthetic Conversational Agent EVA,"Capturing Form of Non-verbal Conversational Behavior for Recreation
+on Synthetic Conversational Agent EVA
+IZIDOR MLAKAR, 2MATEJ ROJC
+Roboti c.s. d.o.o, 2Faculty of Electrical Engineering and Computer Science, University of Maribor
+Tržaška cesta 23, 2Smetanova ulica 17
+SLOVENIA"
+9badcba793a54dd90383a55d7dfee1281c510f75,Local Gradients Smoothing: Defense against localized adversarial attacks,"Local Gradients Smoothing: Defense against localized adversarial attacks
+Muzammal Naseer
+Australian National University (ANU)
+Salman H. Khan
+Data61, CSIRO
+Fatih Porikli
+Australian National University (ANU)"
+9b6d61491120bdd579f53e8c5f7cbe1e05cbc91e,Modeling Multimodal Behaviors from Speech Prosody,"Modeling Multimodal Behaviors From Speech
+Prosody
+Yu Ding1, Catherine Pelachaud1, and Thierry Arti`eres2
+CNRS-LTCI, Institut Mines-TELECOM, TELECOM ParisTech, Paris, France
+{yu.ding,
+Universit´e Pierre et Marie Curie (LIP6), Paris, France"
+9b555d8c8f518d907fa273d8691b008d55aedd92,Reasoning with shapes: profiting cognitive susceptibilities to infer linear mapping transformations between shapes,"REASONING WITH SHAPES
+Reasoning with shapes: profiting cognitive
+susceptibilities to infer linear mapping
+transformations between shapes
+Vahid Jalili"
+9be0de78bb69e7b243e92ab7530f9fd5a08c62cc,Spontaneous Trait Inferences on Social Media,"Article
+Spontaneous Trait Inferences
+on Social Media
+Ana Levordashka1 and Sonja Utz1
+Social Psychological and
+Personality Science
+017, Vol. 8(1) 93-101
+ª The Author(s) 2016
+Reprints and permission:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/1948550616663803
+journals.sagepub.com/home/spp"
+9b678aa28facf4f90081d41c2c484c6addddb86d,Fully Convolutional Attention Networks for Fine-Grained Recognition,"Fully Convolutional Attention Networks for Fine-Grained Recognition
+Xiao Liu, Tian Xia, Jiang Wang, Yi Yang, Feng Zhou and Yuanqing Lin
+Baidu Research
+{liuxiao12,xiatian,wangjiang03,yangyi05, zhoufeng09,"
+9b164cef4b4ad93e89f7c1aada81ae7af802f3a4,A Fully Automatic and Haar like Feature Extraction-Based Method for Lip Contour Detection,"Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
+Vol. 2(1), 17-20, January (2013)
+Res.J.Recent Sci.
+A Fully Automatic and Haar like Feature Extraction-Based Method for Lip
+Contour Detection
+Zahedi Morteza and Mohamadian Zahra
+School of Computer Engineering, Shahrood University of Technology, Shahrood, IRAN
+Received 26th September 2012, revised 27th October 2012, accepted 6th November 2012
+Available online at: www.isca.in"
+9bac481dc4171aa2d847feac546c9f7299cc5aa0,Matrix Product State for Higher-Order Tensor Compression and Classification,"Matrix Product State for Higher-Order Tensor
+Compression and Classification
+Johann A. Bengua1, Ho N. Phien1, Hoang D. Tuan1 and Minh N. Do2"
+9b7c6ef333c6e64f2dfa97a1a3614d0775d81a8a,A New Evaluation Protocol and Benchmarking Results for Extendable Cross-media Retrieval,"A New Evaluation Protocol and Benchmarking
+Results for Extendable Cross-media Retrieval
+Ruoyu Liu, Yao Zhao, Liang Zheng, Shikui Wei, and Yi Yang"
+9b4e90866c1f096a57383fb7320ac9d516a2f88d,Towards lightweight convolutional neural networks for object detection,"TOWARDS LIGHTWEIGHT CONVOLUTIONAL NEURAL
+NETWORKS FOR OBJECT DETECTION
+Dmitriy Anisimov, Tatiana Khanova
+Intel
+Nizhny Novgorod, Russia"
+9b7974d9ad19bb4ba1ea147c55e629ad7927c5d7,Faical Expression Recognition by Combining Texture and Geometrical Features,"Faical Expression Recognition by Combining
+Texture and Geometrical Features
+Renjie Liu, Ruofei Du, Bao-Liang Lu*"
+9b6d0b3fbf7d07a7bb0d86290f97058aa6153179,"NII , Japan at the first THUMOS Workshop 2013","NII, Japan at the first THUMOS Workshop 2013
+Sang Phan, Duy-Dinh Le, Shin’ichi Satoh
+National Institute of Informatics
+-1-2 Hitotsubashi, Chiyoda-ku, Tokyo, Japan 101-8430"
+9e8637a5419fec97f162153569ec4fc53579c21e,Segmentation and Normalization of Human Ears Using Cascaded Pose Regression,"Segmentation and Normalization of Human Ears
+using Cascaded Pose Regression
+Anika Pflug and Christoph Busch
+University of Applied Sciences Darmstadt - CASED,
+Haardtring 100,
+64295 Darmstadt, Germany
+http://www.h-da.de"
+9ebe5d78163a91239f10c453d76082dfa329851d,Teacher's Perception in the Classroom,"Teachers’ Perception in the Classroom
+¨Omer S¨umer1
+Patricia Goldberg1
+Kathleen St¨urmer1
+Tina Seidel3
+Peter Gerjets2 Ulrich Trautwein1
+Enkelejda Kasneci1
+University of T¨ubingen, Germany
+Leibniz-Institut f¨ur Wissensmedien, Germany
+Technical University of Munich, Germany"
+9e4b052844d154c3431120ec27e78813b637b4fc,Local gradient pattern - A novel feature representation for facial expression recognition,"Journal of AI and Data Mining
+Vol. 2, No .1, 2014, 33-38.
+Local gradient pattern - A novel feature representation for facial
+expression recognition
+M. Shahidul Islam
+Department of Computer Science, School of Applied Statistics, National Institute of Development Administration, Bangkok, Thailand.
+Received 23 April 2013; accepted 16 June 2013
+*Corresponding author: (M.Shahidul Islam)"
+9e6c15150179ce848402e89bd245831d9935f4f9,Bi-modal Face Recognition - How combining 2D and 3D Clues Can Increase the Precision,"Bi-modal face recognition
+How combining 2D and 3D clues can increase the precision
+Amel Aissaoui1, Jean Martinet2
+USTHB, Algeria
+Lille 1 University, France
+issaoui
+Keywords:
+Face recognition, multimodal, 2D, 3D, LBP, RGB-depth."
+9e594ae4f549e0d838f497de31a5b597a6826d55,Recognition of Emotion from Facial Expressions with Direct or Averted Eye Gaze and Varying Expression Intensities in Children with Autism Disorder and Typically Developing Children,"Hindawi Publishing Corporation
+Autism Research and Treatment
+Volume 2014, Article ID 816137, 11 pages
+http://dx.doi.org/10.1155/2014/816137
+Research Article
+Recognition of Emotion from Facial Expressions with Direct or
+Averted Eye Gaze and Varying Expression Intensities in Children
+with Autism Disorder and Typically Developing Children
+Dina Tell,1 Denise Davidson,2 and Linda A. Camras3
+Department of Health Promotion, Loyola University Chicago, Marcella Niehoff School of Nursing, 2160 S. First Avenue,
+Maywood, IL 60153, USA
+Department of Psychology, Loyola University Chicago, 1032 W. Sheridan Road, Chicago, IL 60660, USA
+Department of Psychology, DePaul University, 2219 N. Kenmore Avenue, Chicago, IL 60614, USA
+Correspondence should be addressed to Denise Davidson;
+Received 8 November 2013; Revised 7 February 2014; Accepted 12 February 2014; Published 3 April 2014
+Academic Editor: Geraldine Dawson
+Copyright © 2014 Dina Tell et al. This is an open access article distributed under the Creative Commons Attribution License, which
+permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Eye gaze direction and expression intensity effects on emotion recognition in children with autism disorder and typically developing
+hildren were investigated. Children with autism disorder and typically developing children identified happy and angry expressions"
+9ea73660fccc4da51c7bc6eb6eedabcce7b5cead,Talking head detection by likelihood-ratio test,"Talking Head Detection by Likelihood-Ratio Test†
+Carl Quillen, Kara Greenfield, and William Campbell
+MIT Lincoln Laboratory,
+Lexington MA 02420, USA"
+9e9052256442f4e254663ea55c87303c85310df9,Review On Attribute - assisted Reranking for Image Search,"International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+Volume 4 Issue 10, October 2015
+Review On Attribute-assisted Reranking for
+Image Search
+Waghmare Supriya, Wavhal Archana, Patil Nital, Tapkir Yogita, Prof. Yogesh Thorat"
+9eeada49fc2cba846b4dad1012ba8a7ee78a8bb7,A New Facial Expression Recognition Method Based on Local Gabor Filter Bank and PCA plus LDA,"Hong-Bo Deng, Lian-Wen Jin, Li-Xin Zhen, Jian-Cheng Huang
+A New Facial Expression Recognition Method Based on Local Gabor Filter Bank and PCA plus LDA
+A New Facial Expression Recognition Method Based on
+Local Gabor Filter Bank and PCA plus LDA
+Hong-Bo Deng1, Lian-Wen Jin1, Li-Xin Zhen2, Jian-Cheng Huang2
+School of Electronic and Information Engineering, South China
+University of Technology, Guangzhou, 510640, P.R.China
+Motorola China Research Center, Shanghai, 210000, P.R.China
+{hbdeng,
+{Li-Xin.Zhen,"
+9ef2b2db11ed117521424c275c3ce1b5c696b9b3,Robust Face Alignment Using a Mixture of Invariant Experts,"Robust Face Alignment Using a Mixture of Invariant Experts
+Oncel Tuzel†
+Salil Tambe‡∗
+Tim K. Marks†
+Intel Corporation
+Mitsubishi Electric Research Labs (MERL)
+{oncel,"
+9e5acdda54481104aaf19974dca6382ed5ff21ed,Automatic localization of facial landmarks from expressive images of high complexity,"Yulia Gizatdinova and Veikko Surakka
+Automatic localization of facial
+landmarks from expressive images
+of high complexity
+DEPARTMENT OF COMPUTER SCIENCES
+UNIVERSITY OF TAMPERE
+D‐2008‐9
+TAMPERE 2008"
+9e9c600919332dcabbd32bbe81a00d1e47449193,Automatic 3D face verification from range data,"- 1330-7803-7663-3/03/$17.00 ©2003 IEEEThis paper was originally published in the Proceedings of the 2003 IEEEInternational Conference on Acoustics, Speech, & Signal Processing,April 6-10, 2003, Hong Kong (cancelled). Reprinted with permission.(cid:224)"
+9e0285debd4b0ba7769b389181bd3e0fd7a02af6,From Face Images and Attributes to Attributes,"From face images and attributes to attributes
+Robert Torfason, Eirikur Agustsson, Rasmus Rothe, Radu Timofte
+Computer Vision Laboratory, ETH Zurich, Switzerland"
+9ed3e04586f311b1e2b5ded9c9c4bfeeecf27f0c,Understanding rapid category detection via multiply degraded images.,"http://journalofvision.org/9/6/19/
+Understanding rapid category detection via multiply
+degraded images
+Chetan Nandakumar
+Vision Science Graduate Program,
+University of California, Berkeley, Berkeley, CA, USA
+Jitendra Malik
+Department of Electrical Engineering and
+Computer Science, University of California,
+Berkeley, Berkeley, CA, USA
+Rapid category detection, as discovered by S. Thorpe, D. Fize, and C. Marlot (1996), demonstrated that the human visual
+system can detect object categories in natural images in as little as 150 ms. To gain insight into this phenomenon and to
+determine its relevance to naturally occurring conditions, we degrade the stimulus set along various image dimensions and
+investigate the effects on perception. To investigate how well modern-day computer vision algorithms cope with
+degradations, we conduct an analog of this same experiment with state-of-the-art object recognition algorithms. We
+discover that rapid category detection in humans is quite robust to naturally occurring degradations and is mediated by a
+non-linear interaction of visual features. In contrast, modern-day object recognition algorithms are not as robust.
+Keywords: rapid category detection, degraded images, object recognition, eye tracking
+Citation: Nandakumar, C., & Malik, J. (2009). Understanding rapid category detection via multiply degraded images. Journal
+of Vision, 9(6):19, 1–8, http://journalofvision.org/9/6/19/, doi:10.1167/9.6.19."
+9e6ecc12794f1d3215f93376a32b350a0492ceb0,Modeling and Predicting Face Recognition System Performance Based on Analysis of Similarity Scores,"Modeling and Predicting Face
+Recognition System Performance
+Based on Analysis of Similarity Scores
+Peng Wang, Member, IEEE,
+Qiang Ji, Sr. Member, IEEE, and
+James L. Wayman, Sr. Member, IEEE"
+9edd7c738171b0f36b65ae771711c38ed1dc38ad,Long-Term Multi-Cue Tracking of Hands in Vehicles,"Long-Term Multi-Cue Tracking of Hands in Vehicles
+Akshay Rangesh, Eshed Ohn-Bar, and Mohan Manubhai Trivedi, Fellow, IEEE"
+9e759860762d40505f25d6fc5c4f4c1f6500d68b,Elastic Net Hypergraph Learning for Image Clustering and Semi-Supervised Classification,"Elastic Net Hypergraph Learning for Image
+Clustering and Semi-supervised Classification
+Qingshan Liu, Seninor Member, IEEE, Yubao Sun, Cantian Wang, Tongliang Liu and Dacheng Tao, Fellow, IEEE"
+9ef73533507b46278d0d27c41e16af2b8ecf23ef,A comparative assessment of appearance based feature extraction techniques and their susceptibility to image degradations in face recognition systems,"A comparative assessment of appearance based
+feature extraction techniques and their susceptibility
+to image degradations in face recognition systems
+Vitomir ˇStruc and Nikola Paveˇsi´c, Member, IEEE"
+9eb111f6990d1494a3904f22be9836c202efd7d1,Exploiting workload similarities for efficient scheduling in diverse asymmetric chip multiprocessing Research,Exploiting workload similarities for efficient scheduling in diverse asymmetric chip multiprocessing Dani Shaket
+9e8dd40aea9204ad670b312a46ba807bfc0c61ce,Distribution-sensitive learning for imbalanced datasets Citation,"Distribution-sensitive learning for imbalanced datasets
+The MIT Faculty has made this article openly available. Please share
+how this access benefits you. Your story matters.
+Citation
+As Published
+Publisher
+Version
+Accessed
+Citable Link
+Terms of Use
+Detailed Terms
+Song, Yale, Louis-Philippe Morency, and Randall Davis.
+“Distribution-Sensitive Learning for Imbalanced Datasets.” 2013
+0th IEEE International Conference and Workshops on
+Automatic Face and Gesture Recognition (FG) (n.d.).
+http://dx.doi.org/10.1109/FG.2013.6553715
+Institute of Electrical and Electronics Engineers (IEEE)
+Author's final manuscript
+Fri Jan 08 19:33:51 EST 2016
+http://hdl.handle.net/1721.1/86107"
+9ee5218a2a74fafbc4227f6c7c587b72e141bd33,Iris Compression and Recognition using Spherical Geometry Image,"(IJARAI) International Journal of Advanced Research in Artificial Intelligence,
+Vol. 4, No.6, 2015
+Iris Compression and Recognition using Spherical
+Geometry Image
+College of Computers and Information Technology University of Tabuk Tabuk, KSA
+Rabab M. Ramadan
+in 3D domain to test"
+9e2120e48d497b373c53563275c3786c11749883,Topological and metric robot localization through computer vision techniques,"Topological and metric robot localization through computer vision
+techniques
+A. C. Murillo, J. J. Guerrero and C. Sag¨u´es
+DIIS - I3A, University of Zaragoza, Spain"
+9ee4d3c173c41ffb6f5aa3c40951aefe3da11d5b,Forming A Random Field via Stochastic Cliques: From Random Graphs to Fully Connected Random Fields,"Forming A Random Field via Stochastic
+Cliques: From Random Graphs to Fully
+Connected Random Fields
+M. J. Shafiee, A. Wong and P. Fieguth"
+9e1712ac91c7a882070a8e2740ed476d59d6d5d4,Expressive image manipulations for a variety of visual representations. (Manipulations d'image expressives pour une variété de représentations visuelles),"Expressive image manipulations for a variety of visual
+representations
+Adrien Bousseau
+To cite this version:
+Adrien Bousseau. Expressive image manipulations for a variety of visual representations. Human-
+Computer Interaction [cs.HC]. Université Joseph-Fourier - Grenoble I, 2009. English. <tel-00429151>
+HAL Id: tel-00429151
+https://tel.archives-ouvertes.fr/tel-00429151
+Submitted on 31 Oct 2009
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+9e263d429c3b87aae2653b6fb925b32b63c172cd,Enhanced image and video representation for visual recognition,"Enhanced image and video representation for visual
+recognition
+Mihir Jain
+To cite this version:
+Mihir Jain. Enhanced image and video representation for visual recognition. Computer Vision
+nd Pattern Recognition [cs.CV]. Universit´e Rennes 1, 2014. English. <tel-00996793>
+HAL Id: tel-00996793
+https://tel.archives-ouvertes.fr/tel-00996793
+Submitted on 27 May 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires"
+040dc119d5ca9ea3d5fc39953a91ec507ed8cc5d,Large-scale Bisample Learning on ID vs. Spot Face Recognition,"Noname manuscript No.
+(will be inserted by the editor)
+Large-scale Bisample Learning on ID vs. Spot Face Recognition
+Xiangyu Zhu∗ · Hao Liu∗ · Zhen Lei · Hailin Shi · Fan Yang · Dong
+Yi · Stan Z. Li
+Received: date / Accepted: date"
+0422a9bc1bde71d3b4fc4f52b4a62b15f2fb101f,A Customized Vision System for Tracking Humans Wearing Reflective Safety Clothing from Industrial Vehicles and Machinery,"Sensors 2014, 14, 17952-17980; doi:10.3390/s141017952
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+A Customized Vision System for Tracking Humans
+Wearing Reflective Safety Clothing from Industrial
+Vehicles and Machinery
+Rafael Mosberger *, Henrik Andreasson and Achim J. Lilienthal
+AASS Research Centre, Örebro University, 70182 Örebro, Sweden;
+E-Mails: (H.A.); (A.J.L.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +46-1930-1113; Fax: +46-1930-3463.
+External Editor: Vittorio M.N. Passaro
+Received: 8 July 2014; in revised form: 5 September 2014 / Accepted: 9 September 2014 /
+Published: 26 September 2014"
+04adf2e51df06a03b6decf520b0952a54a538a18,Randomized Robust Subspace Recovery and Outlier Detection for High Dimensional Data Matrices,"Randomized Robust Subspace Recovery for High Dimensional
+Data Matrices
+Mostafa Rahmani, Student Member, IEEE and George K. Atia, Member, IEEE"
+047f6afa87f48de7e32e14229844d1587185ce45,An Improvement of Energy-Transfer Features Using DCT for Face Detection,"An Improvement of Energy-Transfer Features
+Using DCT for Face Detection
+Radovan Fusek, Eduard Sojka, Karel Mozdˇreˇn, and Milan ˇSurkala
+Technical University of Ostrava, FEECS, Department of Computer Science,
+7. listopadu 15, 708 33 Ostrava-Poruba, Czech Republic"
+0485e96bb0c1276fe2a27271b939b6e67997acfc,Active Learning for Structured Probabilistic Models,"Active Learning for Structured Probabilistic Models
+Qing Sun
+Virginia Tech
+Ankit Laddha ∗
+Virginia Tech
+Dhruv Batra
+Virginia Tech"
+04afb510e11e963fb18e3271ac966164db806120,Harvesting Social Images for Bi-Concept Search,"Harvesting Social Images for Bi-Concept Search
+Xirong Li, Cees G. M. Snoek, Senior Member, IEEE, Marcel Worring, Member, IEEE, and
+Arnold W. M. Smeulders, Member, IEEE"
+04b851f25d6d49e61a528606953e11cfac7df2b2,Optical Flow Guided Feature: A Fast and Robust Motion Representation for Video Action Recognition,"Optical Flow Guided Feature: A Fast and Robust Motion Representation for
+Video Action Recognition
+Shuyang Sun1,2, Zhanghui Kuang2, Lu Sheng3, Wanli Ouyang1, Wei Zhang2
+The University of Sydney 2SenseTime Research 3The Chinese University of Hong Kong
+{shuyang.sun
+{wayne.zhang"
+0447bdb71490c24dd9c865e187824dee5813a676,Manifold Estimation in View-based Feature Space for Face Synthesis Across Pose,"Manifold Estimation in View-based Feature
+Space for Face Synthesis Across Pose
+Paper 27"
+04bb0a1ccca86a4c1084fc7472ea07189c110aa7,Tracking Interacting Objects Using Intertwined Flows,"Tracking Interacting Objects Using
+Intertwined Flows
+Xinchao Wang∗ , Engin T¨uretken∗, Franc¸ois Fleuret, and Pascal Fua, Fellow, IEEE"
+0435a34e93b8dda459de49b499dd71dbb478dc18,"VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification Using Convolutional Neural Networks","VEGAC: Visual Saliency-based Age, Gender, and Facial Expression Classification
+Using Convolutional Neural Networks
+Ayesha Gurnani£1, Vandit Gajjar£1, Viraj Mavani£1, Yash Khandhediya£1
+Department of Electronics and Communication Engineering and
+Computer Vision Group, L. D. College of Engineering, Ahmedabad, India
+{gurnani.ayesha.52, gajjar.vandit.381, mavani.viraj.604,
+the need for handcrafted facial descriptors and data
+preprocessing. D-CNN models have been not only
+successfully applied to human face analysis, but also for
+the visual saliency detection [21, 22, 23]. Visual Saliency
+is fundamentally an intensity map where higher intensity
+signifies regions, where a general human being would
+look, and lower intensities mean decreasing level of visual
+ttention. It’s a measure of visual attention of humans
+ased on the content of the image. It has numerous
+pplications in computer vision and image processing
+tasks. It is still an open problem when considering the MIT
+Saliency Benchmark [24].
+In previous five years, considering age estimation,
+gender classification and facial expression classification"
+041ac91c85276f61bec3f0f3c42782e4f9a31f88,Detailed Dense Inference with Convolutional Neural Networks via Discrete Wavelet Transform,"Detailed Dense Inference with Convolutional Neural Networks
+via Discrete Wavelet Transform
+Lingni Ma1, J¨org St¨uckler2, Tao Wu1 and Daniel Cremers1"
+04f7eab5d03ac6ad678f2fc8adf29bc1a84a2084,Tree based object matching using multi-scale covariance descriptor,"Tree based object matching using multi-scale covariance
+descriptor
+Walid AYEDI1,2, Hichem SNOUSSI1, Fethi SMACH2 and Mohamed ABID2
+Charles Delaunay Institute (FRE CNRS 2848), University of Technology of Troyes, 10010 Troyes, France
+Sfax University, National Engineering School of Sfax, 3052 Sfax, Tunisia"
+044ba70e6744e80c6a09fa63ed6822ae241386f2,Early Prediction for Physical Human Robot Collaboration in the Operating Room,"TO APPEAR IN AUTONOMOUS ROBOTS, SPECIAL ISSUE IN LEARNING FOR HUMAN-ROBOT COLLABORATION
+Early Prediction for Physical Human Robot
+Collaboration in the Operating Room
+Tian Zhou, Student Member, IEEE, and Juan Wachs, Member, IEEE"
+0462aa8b7120a34f111e81f77acd1cc7d81680a6,Color Emotions in Large Scale Content Based Image Indexing,"Link¨oping Studies in Science and Technology
+Dissertations, No. 1362
+Color Emotions in Large Scale Content Based
+Image Indexing
+Martin Solli
+Department of Science and Technology
+Link¨oping University, SE-601 74 Norrk¨oping, Sweden
+Norrk¨oping, March 2011"
+04741341e26bdcd9ed1de18e5a95c31d7b64fa36,Adversarial Action Prediction Networks,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, FEBRUARY 2018
+Adversarial Action Prediction Networks
+Yu Kong, Member, IEEE, Zhiqiang Tao, Student Member, IEEE and Yun Fu, Senior Member, IEEE"
+045b45adbcb83a34d087c917b79274858a878937,A Methodology for Extracting Standing Human Bodies From Single Images,"Invention Journal of Research Technology in Engineering & Management (IJRTEM) ISSN: 2455-3689
+www.ijrtem.com ǁ Volume 1 ǁ Issue 8 ǁ
+A Methodology for Extracting Standing Human Bodies from Single Images
+Dr. Y. Raghavender Rao1, N. Devadas Naik2
+Head ECE JNTUHCEJ Jagtityal
+Asst professor Sri Chaitanya engineering college"
+04dca7c7f85d607cba64ca56de3364a4085effa1,ExprGAN: Facial Expression Editing with Controllable Expression Intensity,"ExprGAN: Facial Expression Editing with Controllable Expression Intensity
+Hui Ding,1 Kumar Sricharan2, Rama Chellappa3
+,3University of Maryland, College Park
+PARC, Palo Alto"
+048eb50c398fa01bd15329945113341102d96454,Addressing perceptual insensitivity to facial affect in violent offenders: first evidence for the efficacy of a novel implicit training approach.,"doi:10.1017/S0033291713001517
+O R I G I N A L A R T I C L E
+Addressing perceptual insensitivity to facial affect
+in violent offenders: first evidence for the efficacy
+of a novel implicit training approach
+M. Schönenberg*, S. Christian, A.-K. Gaußer, S. V. Mayer, M. Hautzinger and A. Jusyte
+Department of Clinical Psychology and Psychotherapy, University of Tübingen, Germany
+Background. Although impaired recognition of affective facial expressions has been conclusively linked to antisocial
+ehavior, little is known about the modifiability of this deficit. This study investigated whether and under which circum-
+stances the proposed perceptual insensitivity can be addressed with a brief implicit training approach.
+Method. Facial affect recognition was assessed with an animated morph task, in which the participants (44 male incar-
+erated violent offenders and 43 matched controls) identified the onset of emotional expressions in animated morph clips
+that gradually changed from neutral to one of the six basic emotions. Half of the offenders were then implicitly trained to
+direct attention to salient face regions (attention training, AT) using a modified dot-probe task. The other half underwent
+the same protocol but the intensity level of the presented expressions was additionally manipulated over the course of
+training sessions (sensitivity to emotional expressions training, SEE training). Subsequently, participants were reassessed
+with the animated morph task.
+Results. Facial affect recognition was significantly impaired in violent offenders as compared with controls. Further, our
+results indicate that only the SEE training group exhibited a pronounced improvement in emotion recognition.
+Conclusions. We demonstrated for the first time that perceptual insensitivity to facial affect can be addressed by an"
+040601d28b683c3c8b48b29e93b6aa3c26dbdf5f,"Facial Expression Recognition for Color Images using Gabor, Log Gabor Filters and PCA","International Journal of Computer Applications (0975 – 8887)
+Volume 113 – No. 4, March 2015
+Facial Expression Recognition for Color Images using
+Gabor, Log Gabor Filters and PCA
+Shail Kumari Shah
+PG Scholar,
+Computer Engg. Dept.
+Vineet Khanna
+Assistant Professor,
+Computer Engg. Dept.
+Rajasthan College of Engineering for Women
+Rajasthan Technical University, Jaipur, India
+Rajasthan College of Engineering for Women
+Rajasthan Technical University, Jaipur, India"
+04bf170753cee3d1da1b9ab41a5b0874685142fa,Casualty Detection for Mobile Rescue Robots via Ground-Projected Point Clouds,"TAROS2018, 037, v5 (final): ’Casualty Detection for Mobile Rescue Robots via Ground- . . ."
+0480b458439069687ec41c90178ba7e9a056bcca,Gender Classification Using Gradient Direction Pattern,"Sci.Int(Lahore),25(4),797-799,2013
+ISSN 1013-5316; CODEN: SINTE 8
+GENDER CLASSIFICATION USING GRADIENT DIRECTION PATTERN
+Department of Computer Science, School of Applied Statistics,
+National Institute of Development Administration, Bangkok, Thailand.
+Mohammad Shahidul Islam"
+0449b56b6b19a3c42766962782bfb88576b5bd62,Spontaneous and cued gaze-following in autism and Williams syndrome,"Spontaneous and cued gaze-following in autism
+nd Williams syndrome
+Riby et al.
+Riby et al. Journal of Neurodevelopmental Disorders 2013, 5:13
+http://www.jneurodevdisorders.com/content/5/1/13"
+04b29b6f1210f4309f3d5ab9e6bd2c8a026ce244,Face Recognition in the Presence of Expressions,"Journal of Software Engineering and Applications, 2012, 5, 321-329
+http://dx.doi.org/10.4236/jsea.2012.55038 Published Online May 2012 (http://www.SciRP.org/journal/jsea)
+Face Recognition in the Presence of Expressions
+Xia Han1*, Moi Hoon Yap2, Ian Palmer3
+Centre for Visual Computing, University of Bradford, Bradford, UK; 2School of Computing, Mathematics, and Digital Technology,
+Manchester Metropolitan University (MMU), Manchester, UK; 3School of Computing, Informatics and Media, University of
+Bradford, Bradford, UK.
+Email:
+Received February 21st, 2012; revised March 25th, 2012; accepted April 27th, 2012"
+04dcdb7cb0d3c462bdefdd05508edfcff5a6d315,Assisting the training of deep neural networks with applications to computer vision,"Assisting the training of deep neural networks
+with applications to computer vision
+Adriana Romero
+tesi doctoral està subjecta a
+Aquesta
+CompartirIgual 4.0. Espanya de Creative Commons.
+Esta tesis doctoral está sujeta a la licencia Reconocimiento - NoComercial – CompartirIgual
+.0. España de Creative Commons.
+This doctoral thesis is licensed under the Creative Commons Attribution-NonCommercial-
+ShareAlike 4.0. Spain License.
+llicència Reconeixement- NoComercial –"
+044fdb693a8d96a61a9b2622dd1737ce8e5ff4fa,Dynamic Texture Recognition Using Local Binary Patterns with an Application to Facial Expressions,"Dynamic Texture Recognition Using Local Binary
+Patterns with an Application to Facial Expressions
+Guoying Zhao and Matti Pietik¨ainen, Senior Member, IEEE"
+0410659b6a311b281d10e0e44abce9b1c06be462,"A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning","A Gift from Knowledge Distillation:
+Fast Optimization, Network Minimization and Transfer Learning
+Junho Yim1
+Donggyu Joo1
+Jihoon Bae2
+Junmo Kim1
+School of Electrical Engineering, KAIST, South Korea
+Electronics and Telecommunications Research Institute
+{junho.yim, jdg105,"
+04b08a2735eff524f17d3f1a63eb7fc6484d4f83,Facial emotion detection using deep learning,IT 16 040Examensarbete 30 hpJuni 2016Facial emotion detection using deep learning Daniel Llatas SpiersInstitutionen för informationsteknologiDepartment of Information Technology
+04cdc847f3b10d894582969feee0f37fbd3745e5,Compressed Sensing with Deep Image Prior and Learned Regularization,"Compressed Sensing with Deep Image Prior
+nd Learned Regularization
+David Van Veen∗†
+Ajil Jalal∗†
+Eric Price ‡
+Sriram Vishwanath †
+Alexandros G. Dimakis †
+June 19, 2018"
+04ff060369c86ccb07414935bd3e3b85e4896261,Object detection can be improved using human-derived contextual expectations,"Object detection can be improved using
+human-derived contextual expectations
+Harish Katti, Marius V. Peelen, and S. P. Arun"
+04f55f81bbd879773e2b8df9c6b7c1d324bc72d8,Multi-view Face Analysis Based on Gabor Features,"Multi-view Face Analysis Based on Gabor Features
+Hongli Liu, Weifeng Liu, Yanjiang Wang
+College of Information and Control Engineering in China University of Petroleum,
+Qingdao 266580, China"
+046f1c194a09fc84f535c27a3373622223a80c67,Memory-efficient groupby-aggregate using compressed buffer trees,"Memory-Efficient GroupBy-Aggregate using
+Compressed Buffer Trees
+Hrishikesh Amur†, Wolfgang Richter(cid:63), David G. Andersen(cid:63),
+Michael Kaminsky‡, Karsten Schwan†, Athula Balachandran(cid:63), Erik Zawadzki(cid:63)
+(cid:63)Carnegie Mellon University, †Georgia Institute of Technology, ‡Intel Labs Pittsburgh"
+04f6a747cba48be1cabbf5efe6ce3eb85e061395,Discriminative Detection and Alignment in Volumetric Data,"Discriminative Detection
+nd Alignment in Volumetric Data
+Dominic Mai1,2, Philipp Fischer1, Thomas Blein4, Jasmin D¨urr3,
+Klaus Palme2,3, Thomas Brox1,2, and Olaf Ronneberger1,2
+Lehrstuhl f¨ur Mustererkennung und Bildverabeitung, Institut f¨ur Informatik
+BIOSS Centre of Biological Signalling Studies
+Institut f¨ur Biologie II, Albert-Ludwigs-Universit¨at Freiburg
+INRA Versailles"
+04d9abdae728f09e1d1f78e36a5de551c3a690f5,Color Local Texture Features Based Face Recognition,"International Journal of Innovations in Engineering and Technology (IJIET)
+Color Local Texture Features Based Face
+Recognition
+Priyanka V. Bankar
+Department of Electronics and Communication Engineering
+SKN Sinhgad College of Engineering, Korti, Pandharpur, Maharashtra, India
+Department of Electronics and Communication Engineering
+SKN Singhgad College of Engineering, Korti, Pandharpur, Maharashtra, India
+Anjali C. Pise"
+04743c503620baffd75f93f8e4583fcba369ac9d,Proofread Sentence Generation as Multi-Task Learning with Editing Operation Prediction,"Proceedings of the The 8th International Joint Conference on Natural Language Processing, pages 436–441,
+Taipei, Taiwan, November 27 – December 1, 2017 c(cid:13)2017 AFNLP"
+04f4679765d2f71576dd77c1b00a2fd92e5c6da4,Part Detector Discovery in Deep Convolutional Neural Networks,"Part Detector Discovery in Deep Convolutional
+Neural Networks
+Marcel Simon, Erik Rodner, and Joachim Denzler
+Computer Vision Group, Friedrich Schiller University of Jena, Germany
+www.inf-cv.uni-jena.de"
+0431e8a01bae556c0d8b2b431e334f7395dd803a,Learning Localized Perceptual Similarity Metrics for Interactive Categorization,"Learning Localized Perceptual Similarity Metrics for Interactive Categorization
+Catherine Wah ∗
+Google Inc.
+google.com"
+04b4c779b43b830220bf938223f685d1057368e9,Video retrieval based on deep convolutional neural network,"Video retrieval based on deep convolutional
+neural network
+Yajiao Dong
+School of Information and Electronics,
+Beijing Institution of Technology, Beijing, China
+Jianguo Li
+School of Information and Electronics,
+Beijing Institution of Technology, Beijing, China"
+044da4715e439b4f91cee8eec55299e30a615c56,Inducing a Concurrent Motor Load Reduces Categorization Precision for Facial Expressions,"Journal of Experimental Psychology:
+Human Perception and Performance
+016, Vol. 42, No. 5, 706 –718
+0096-1523/16/$12.00
+© 2015 The Author(s)
+http://dx.doi.org/10.1037/xhp0000177
+Inducing a Concurrent Motor Load Reduces Categorization Precision for
+Facial Expressions
+Alberta Ipser and Richard Cook
+City University London
+Motor theories of expression perception posit that observers simulate facial expressions within their own
+motor system, aiding perception and interpretation. Consistent with this view, reports have suggested that
+locking facial mimicry induces expression labeling errors and alters patterns of ratings. Crucially,
+however, it is unclear whether changes in labeling and rating behavior reflect genuine perceptual
+phenomena (e.g., greater internal noise associated with expression perception or interpretation) or are
+products of response bias. In an effort to advance this literature, the present study introduces a new
+psychophysical paradigm for investigating motor contributions to expression perception that overcomes
+some of the limitations inherent in simple labeling and rating tasks. Observers were asked to judge
+whether smiles drawn from a morph continuum were sincere or insincere, in the presence or absence of
+motor load induced by the concurrent production of vowel sounds. Having confirmed that smile"
+04616814f1aabe3799f8ab67101fbaf9fd115ae4,UNIVERSITÉ DE CAEN BASSE NORMANDIE U . F . R . de Sciences,"UNIVERSIT´EDECAENBASSENORMANDIEU.F.R.deSciences´ECOLEDOCTORALESIMEMTH`ESEPr´esent´eeparM.GauravSHARMAsoutenuele17D´ecembre2012envuedel’obtentionduDOCTORATdel’UNIVERSIT´EdeCAENSp´ecialit´e:InformatiqueetapplicationsArrˆet´edu07aoˆut2006Titre:DescriptionS´emantiquedesHumainsPr´esentsdansdesImagesVid´eo(SemanticDescriptionofHumansinImages)TheworkpresentedinthisthesiswascarriedoutatGREYC-UniversityofCaenandLEAR–INRIAGrenobleJuryM.PatrickPEREZDirecteurdeRechercheINRIA/Technicolor,RennesRapporteurM.FlorentPERRONNINPrincipalScientistXeroxRCE,GrenobleRapporteurM.JeanPONCEProfesseurdesUniversit´esENS,ParisExaminateurMme.CordeliaSCHMIDDirectricedeRechercheINRIA,GrenobleDirectricedeth`eseM.Fr´ed´ericJURIEProfesseurdesUniversit´esUniversit´edeCaenDirecteurdeth`ese"
+045fbe21ea8e501d443fa2d297c1292264712c62,Links between multisensory processing and autism,"Exp Brain Res
+DOI 10.1007/s00221-012-3223-4
+R E S E A R C H A R T I C L E
+Links between multisensory processing and autism
+Sarah E. Donohue • Elise F. Darling •
+Stephen R. Mitroff
+Received: 1 June 2012 / Accepted: 7 August 2012
+Ó Springer-Verlag 2012"
+04241ba56d4499a00beb6991d2460d571a218d85,Learning appearance in virtual scenarios for pedestrian detection,"Learning Appearance in Virtual Scenarios for Pedestrian Detection
+Javier Mar´ın, David V´azquez, David Ger´onimo and Antonio M. L´opez
+Computer Vision Center and Computer Science Dpt. UAB, 08193 Bellaterra, Barcelona, Spain
+{jmarin, dvazquez, dgeronimo,"
+041d3eedf5e45ce5c5229f0181c5c576ed1fafd6,How to Take a Good Selfie?,"How to Take a Good Selfie?
+Mahdi M. Kalayeh(cid:63) Misrak Seifu◦ Wesna LaLanne(cid:5) Mubarak Shah(cid:63)
+(cid:63)Center for Research in Computer Vision at University of Central Florida
+◦Jackson State University
+(cid:5)University of Central Florida"
+040eb316cec08b36ae0b57fede86043ee0526686,Learning Reliable and Scalable Representations Using Multimodal Multitask Deep Learning,"Learning Reliable and Scalable Representations
+Using Multimodal Multitask Deep Learning
+Abhinav Valada, and Wolfram Burgard
+Department of Computer Science, University of Freiburg, Germany
+I. INTRODUCTION
+Modality 1
+Modality 2
+Unimodal Seg.
+Multimodal Seg.
+Fifties - in 5 years robots would be everywhere.
+Sixties - in 10 years robots would be everywhere.
+Seventies - in 20 years robots would be everywhere.
+Eighties - in 40 years robots would be everywhere.
+-Marvin Minsky
+Those were the words from one of the pioneers of AI
+when asked to comment on the progress of robotics in the
+twentieth century. This shows the high expectations and
+unforeseen challenges that we are faced with for deploying
+robots in complex real-world environments. One of the primary
+impediments has been the robustness of scene understanding"
+047d7cf4301cae3d318468fe03a1c4ce43b086ed,Co-Localization of Audio Sources in Images Using Binaural Features and Locally-Linear Regression,"Co-Localization of Audio Sources in Images Using
+Binaural Features and Locally-Linear Regression
+Antoine Deleforge, Radu Horaud, Yoav Y. Schechner, Laurent Girin
+To cite this version:
+Antoine Deleforge, Radu Horaud, Yoav Y. Schechner, Laurent Girin. Co-Localization of Audio
+Sources in Images Using Binaural Features and Locally-Linear Regression. IEEE Transactions
+on Audio Speech and Language Processing, 2015, 15p. <hal-01112834>
+HAL Id: hal-01112834
+https://hal.inria.fr/hal-01112834
+Submitted on 3 Feb 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+04317e63c08e7888cef480fe79f12d3c255c5b00,Face Recognition Using a Unified 3D Morphable Model,"Face Recognition Using a Unified 3D Morphable Model
+Hu, G., Yan, F., Chan, C-H., Deng, W., Christmas, W., Kittler, J., & Robertson, N. M. (2016). Face Recognition
+Using a Unified 3D Morphable Model. In Computer Vision – ECCV 2016: 14th European Conference,
+Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII (pp. 73-89). (Lecture Notes in
+Computer Science; Vol. 9912). Springer Verlag. DOI: 10.1007/978-3-319-46484-8_5
+Published in:
+Computer Vision – ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14,
+016, Proceedings, Part VIII
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+The final publication is available at Springer via http://dx.doi.org/10.1007/978-3-319-46484-8_5
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to"
+047f8d5d5134dd12c67038623417f05ab9885056,Motion Synthesis In : Static Scan + Expression Out : Best Fitting Sequence + Angry Out : Animated Sequence Statistical Analysis Expression Recognition,"D Faces in Motion: Fully Automatic Registration and Statistical Analysis
+Timo Bolkarta,∗, Stefanie Wuhrera
+Saarland University, Saarbr¨ucken, Germany"
+0464b56c5beee717b074ed950abcc959372256a6,Fast and Robust Optimization Approaches for Pedestrian Detection,"Fast and Robust Optimization Approaches for
+Pedestrian Detection
+Victor Hugo Cunha de Melo∗, David Menotti (Co-advisor)†, William Robson Schwartz (Advisor)∗
+Computer Science Department, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+Computer Science Department, Universidade Federal de Ouro Preto, Ouro Preto, Brazil
+Email:"
+040806bc41c0dd50273921d8d839fda58d20b01e,Socio-affective touch expression database,"RESEARCH ARTICLE
+Socio-affective touch expression database
+Haemy Lee Masson*, Hans Op de Beeck*
+Department of Brain and Cognition, KU Leuven, Leuven, Belgium
+* (HLM); (HOB)"
+040033d73d1efe316c8f0a8ed702b833a0550d83,Generating Expressions that Refer to Visible Objects,"Atlanta, Georgia, 9–14 June 2013. c(cid:13)2013 Association for Computational Linguistics
+Proceedings of NAACL-HLT 2013, pages 1174–1184,"
+04379f40d2a26dd769c53488b7b08a5123f89347,3D Facial Expression Recognition Based on Histograms of Surface Differential Quantities,"D Facial Expression Recognition Based on
+Histograms of Surface Differential Quantities
+Huibin Li1,2, Jean-Marie Morvan1,3,4, and Liming Chen1,2
+Universit´e de Lyon, CNRS
+Ecole Centrale de Lyon, LIRIS UMR5205, F-69134, Lyon, France
+Universit´e Lyon 1, Institut Camille Jordan,
+3 blvd du 11 Novembre 1918, F-69622 Villeurbanne - Cedex, France
+King Abdullah University of Science and Technology, GMSV Research Center,
+Bldg 1, Thuwal 23955-6900, Saudi Arabia"
+04bd29ec1ae0b64367ec37ddde51a0d8f8b7f670,Few-shot Object Detection,"SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, 2017.
+Few-shot Object Detection
+Xuanyi Dong, Liang Zheng, Fan Ma, Yi Yang, Deyu Meng"
+042510b39c6cdb463610fdda2081b36ff469a353,Human Pose Estimation from Video and IMUs,"Human Pose Estimation from Video and IMUs
+Timo von Marcard, Gerard Pons-Moll, and Bodo Rosenhahn"
+0470b0ab569fac5bbe385fa5565036739d4c37f8,Automatic face naming with caption-based supervision,"Automatic Face Naming with Caption-based Supervision
+Matthieu Guillaumin, Thomas Mensink, Jakob Verbeek, Cordelia Schmid
+To cite this version:
+Matthieu Guillaumin, Thomas Mensink, Jakob Verbeek, Cordelia Schmid. Automatic Face Naming
+with Caption-based Supervision. CVPR 2008 - IEEE Conference on Computer Vision
+Pattern Recognition,
+iety,
+<10.1109/CVPR.2008.4587603>. <inria-00321048v2>
+008,
+pp.1-8,
+008, Anchorage, United
+<http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4587603>.
+IEEE Computer
+States.
+HAL Id: inria-00321048
+https://hal.inria.fr/inria-00321048v2
+Submitted on 11 Apr 2011
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-"
+6a3cbe2bb27b2a7d32c358e0be4ed268f7d4455c,Shape Tracking with Occlusions via Coarse-to-Fine Region-Based Sobolev Descent,"Modeling Shape, Appearance and Self-Occlusions
+for Articulated Object Tracking
+Yanchao Yang and Ganesh Sundaramoorthi"
+6a951df76a56fc89e5df3fbba2e5699ccad4f199,Relative Pairwise Relationship Constrained Non-negative Matrix Factorisation,"IEEE TRANSACTIONS ON KNOWLEDGE AND DATA ENGINEERING
+Relative Pairwise Relationship Constrained
+Non-negative Matrix Factorisation
+Shuai Jiang, Kan Li, and Richard Yida Xu"
+6a3a07deadcaaab42a0689fbe5879b5dfc3ede52,Learning to Estimate Pose by Watching Videos,"Learning to Estimate Pose by Watching Videos
+Prabuddha Chakraborty and Vinay P. Namboodiri
+Department of Computer Science and Engineering
+IIT Kanpur
+{prabudc, vinaypn}"
+6afed8dc29bc568b58778f066dc44146cad5366c,Kernel Hebbian Algorithm for Single-Frame Super-Resolution,"Kernel Hebbian Algorithm for Single-Frame
+Super-Resolution
+Kwang In Kim1, Matthias O. Franz1, and Bernhard Sch¨olkopf1
+Max Planck Institute f¨ur biologische Kybernetik
+Spemannstr. 38, D-72076 T¨ubingen, Germany
+{kimki, mof,
+http://www.kyb.tuebingen.mpg.de/"
+6a951a47aa545e08508b0b2c6a2bef45e154a3a9,DeepCoder: Semi-Parametric Variational Autoencoders for Automatic Facial Action Coding,"DeepCoder: Semi-parametric Variational Autoencoders
+for Automatic Facial Action Coding
+Dieu Linh Tran∗, Robert Walecki, Ognjen (Oggi) Rudovic*, Stefanos Eleftheriadis,
+Bj¨orn Schuller and Maja Pantic
+{linh.tran, r.walecki14, bjoern.schuller,"
+6ad32b70ee21b6fc16ff4caf7b4ada2aaf13cabc,Efficient Subwindow Search: A Branch and Bound Framework for Object Localization,"Efficient Subwindow Search: A Branch and Bound
+Framework for Object Localization
+Christoph H. Lampert, Matthew B. Blaschko, and Thomas Hofmann
+n image of as low resolution as 320×240 contains more than
+one billion rectangular subimages. In general, the number of
+subimages grows quadratically with the number of image pix-
+els, which makes it computationally too expensive to evaluate
+the quality function exhaustively for all of these. Instead, one
+typically uses heuristics to speed up the search that introduce
+the risk of mispredicting the location of an object or even
+missing it."
+6a16b91b2db0a3164f62bfd956530a4206b23fea,A Method for Real-Time Eye Blink Detection and Its Application,"A Method for Real-Time Eye Blink Detection and Its Application
+Chinnawat Devahasdin Na Ayudhya
+Mahidol Wittayanusorn School
+Puttamonton, Nakornpatom 73170, Thailand"
+6a41ba9db0affa701ea125e09a2fe7eb583e3ac9,Frontal imgelerden otomatik yüz tanıma Automatic face recognition from frontal images,"Frontal imgelerden otomatik yüz tanıma
+Automatic face recognition from frontal images
+Hasan Serhan Yavuz, Hakan Çevikalp, Rıfat Edizkan
+Elektrik ve Elektronik Mühendisliği Bölümü
+Eskişehir Osmangazi Üniversitesi
+Eskişehir, Türkiye
+fotoğraflanan
+laboratuarımızda
+Özetçe—Yüz tanıma basitçe kişilere ait olan yüz imgelerinden
+kimlik tespit edilmesi olarak tanımlanabilir. Bu çalışmada,
+sayısal kamera
+frontal
+imgeler kullanılarak yüz tanıma yapılmıştır. Otomatik yüz
+tanıma süreci sırasıyla yüz sezme, göz sezme, sezilen gözlerin orta
+noktalarını kullanarak belirlenen standart bir yüz şablonuna
+uyacak biçimde haritalama yapma ve sonrasında hizalanan yüz
+imgelerini sınıflandırma basamaklarından oluşur. Literatürde
+yüz imgesi hazırlama süreci genellikle elle yapılmaktadır. Yüz
+imgelerinin tamamı birebir aynı biçimde kesildiği için çok
+yüksek tanıma oranları elde edilir ancak bir otomatik yüz tanıma"
+6ada03f390f92704f3df1556846697c54c00f7da,Human-Machine Cooperation in Large-Scale Multimedia Retrieval: A Survey,"Human-Machine Cooperation in Large-Scale
+Multimedia Retrieval: A Survey
+Kimiaki Shirahama,1 Marcin Grzegorzek,1 and Bipin Indurkhya2
+University of Siegen, 2AGH University of Science and Technology
+Correspondence:
+Correspondence concerning this
+rticle should be addressed to Kimiaki
+Shirahama, Pattern Recognition Group,
+University of Siegen, Hoelderlinstrasse 3,
+57076 Siegen, Germany, or via email to
+Keywords:
+large-scale multimedia retrieval, human-
+machine cooperation, machine-based
+methods, human-based methods
+Large-Scale Multimedia Retrieval (LSMR) is the task to fast analyze a large amount of multi-
+media data like images or videos and accurately find the ones relevant to a certain semantic
+meaning. Although LSMR has been investigated for more than two decades in the fields
+of multimedia processing and computer vision, a more interdisciplinary approach is neces-
+sary to develop an LSMR system that is really meaningful for humans. To this end, this paper
+ims to stimulate attention to the LSMR problem from diverse research fields. By explaining"
+6a1e5f4dbabf451122bf35228c8b25c79c7d235f,Learning to See the Invisible: End-to-End Trainable Amodal Instance Segmentation,"Learning to See the Invisible: End-to-End
+Trainable Amodal Instance Segmentation
+Patrick Follmann, Rebecca K¨onig, Philipp H¨artinger, Michael Klostermann
+MVTec Software GmbH,
+www.mvtec.com,"
+6a806978ca5cd593d0ccd8b3711b6ef2a163d810,Facial Feature Tracking for Emotional Dynamic Analysis,"Facial feature tracking for Emotional Dynamic
+Analysis
+Thibaud Senechal1, Vincent Rapp1, and Lionel Prevost2
+ISIR, CNRS UMR 7222
+Univ. Pierre et Marie Curie, Paris
+{rapp,
+LAMIA, EA 4540
+Univ. of Fr. West Indies & Guyana"
+6a27ffd788a0db64fef74e673786763c82902a26,Discriminative deep transfer metric learning for cross-scenario person re-identification,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+Discriminativedeeptransfermetriclearningforcross-scenariopersonre-identificationTongguangNiXiaoqingGuHongyuanWangZhongbaoZhangShoubingChenCuiJinTongguangNi,XiaoqingGu,HongyuanWang,ZhongbaoZhang,ShoubingChen,CuiJin,“Discriminativedeeptransfermetriclearningforcross-scenariopersonre-identification,”J.Electron.Imaging27(4),043026(2018),doi:10.1117/1.JEI.27.4.043026."
+6a8a3c604591e7dd4346611c14dbef0c8ce9ba54,An Affect-Responsive Interactive Photo Frame,"ENTERFACE’10, JULY 12TH - AUGUST 6TH, AMSTERDAM, THE NETHERLANDS.
+An Affect-Responsive Interactive Photo Frame
+Hamdi Dibeklio˘glu, Ilkka Kosunen, Marcos Ortega Hortas, Albert Ali Salah, Petr Zuz´anek"
+6a1fd51107770edbdd832a1934ff5461e891f2e1,A Robust and Dominant Local Binary Pattern and Its Application,"IJSRD - International Journal for Scientific Research & Development| Vol. 2, Issue 10, 2014 | ISSN (online): 2321-0613
+A Robust and Dominant Local Binary Pattern and Its Application
+Keerthana A.V1 Ashwin M2
+Student of M.E 2Associate Professor
+,2Department of Computer Science & Engineering
+,2Adhiyamaan College of Engineering, Krishnagiri, Tamilnadu, India
+Local
+ternary
+Pattern, modified"
+6a0b70abb9a81a96d4baa9b396deb9da4cc20f8f,Clustering through ranking on manifolds,"Clustering Through Ranking On Manifolds
+Markus Breitenbach
+Dept. of Computer Science; University of Colorado, Boulder, USA
+Gregory Z. Grudic
+Dept. of Computer Science; University of Colorado, Boulder, USA"
+6a52e6fce541126ff429f3c6d573bc774f5b8d89,Role of Facial Emotion in Social Correlation,"Role of Facial Emotion in Social Correlation
+Pankaj Mishra, Rafik Hadfi, and Takayuki Ito
+Department of Computer Science and Engineering
+Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan
+{pankaj.mishra,"
+6a14652508138fcf0aa8c518109165f65c88fd3f,Programming a humanoid robot in natural language: an experiment with description logics,"Programming a humanoid robot in natural language:
+n experiment with description logics
+Nicola Vitucci , Alessio Mauro Franchi, Giuseppina Gini
+DEIB, Politecnico di Milano
+Milano, Italy"
+6ae13c7dcd1d10d2dfe58546a49da09b0b471d68,Person-independent facial expression recognition based on compound local binary pattern (CLBP),"The International Arab Journal of Information Technology, Vol. 11, No. 2, March 2014 195
+Person-Independent Facial Expression Recognition
+Based on Compound Local Binary Pattern (CLBP)
+Department of Computer Science and Engineering, Islamic University of Technology, Bangladesh
+Faisal Ahmed1, Hossain Bari2, and Emam Hossain3
+2Samsung Bangladesh R & D Center Ltd, Bangladesh
+3Department of Computer Science and Engineering, Ahsanullah University of Science and Technology,
+Bangladesh"
+6af35225cfd744b79577c126e553f549e5b5cdcc,Title Discriminative Hessian Eigenmaps for face recognition,"Title
+Discriminative Hessian Eigenmaps for face recognition
+Author(s)
+Si, S; Tao, D; Chan, KP
+Citation
+The 2010 IEEE International Conference on Acoustics, Speech
+nd Signal Processing (ICASSP), Dallas, TX., 14-19 March 2010.
+In IEEE International Conference on Acoustics, Speech and
+Signal Processing Proceedings, 2010, p. 5586-5589
+Issued Date
+http://hdl.handle.net/10722/125723
+Rights
+IEEE International Conference on Acoustics, Speech and Signal
+Processing Proceedings. Copyright © IEEE.; ©2010 IEEE.
+Personal use of this material is permitted. However, permission
+to reprint/republish this material for advertising or promotional
+purposes or for creating new collective works for resale or
+redistribution to servers or lists, or to reuse any copyrighted
+omponent of this work in other works must be obtained from
+the IEEE.; This work is licensed under a Creative Commons"
+6a553f7ef42000001f407e95f4955e7ddde46a83,A Dataset of Laryngeal Endoscopic Images with Comparative Study on Convolution Neural Network Based Semantic Segmentation,"IJCARS manuscript No.
+(will be inserted by the editor)
+A Dataset of Laryngeal Endoscopic Images with
+Comparative Study on Convolution Neural Network
+Based Semantic Segmentation
+Max-Heinrich Laves · Jens Bicker · Lüder
+A. Kahrs · Tobias Ortmaier
+Received: date / Accepted: date"
+6a6280189ead63b2eec733b8e8ac507e830928fd,Face localization in color images with complex background,"Face localization in color images with complex
+ackground
+Paola Campadelli, Raffaella Lanzarotti, Giuseppe Lipori
+Dipartimento di Scienze dell’Informazione
+Universit(cid:30)a degli Studi di Milano
+Via Comelico, 39/41 20135 Milano, Italy
+fcampadelli, lanzarotti,"
+6ac7fe3a292dc5e0f7d27e11b85ed8277905e9ba,Detecting Traffic Lights by Single Shot Detection,"Detecting Traffic Lights by Single Shot Detection
+Julian M¨uller1 and Klaus Dietmayer1"
+6a55d6db1b31f44c9bb37b070fbf7c8f64a31f13,Aging and Emotion Recognition : An Examination of Stimulus and Attentional Mechanisms,"Cleveland State University
+ETD Archive
+Aging and Emotion Recognition: An Examination
+of Stimulus and Attentional Mechanisms
+Stephanie Nicole Sedall
+Follow this and additional works at: http://engagedscholarship.csuohio.edu/etdarchive
+Part of the Experimental Analysis of Behavior Commons
+How does access to this work benefit you? Let us know!
+Recommended Citation
+Sedall, Stephanie Nicole, ""Aging and Emotion Recognition: An Examination of Stimulus and Attentional Mechanisms"" (2016). ETD
+Archive. 903.
+http://engagedscholarship.csuohio.edu/etdarchive/903
+This Thesis is brought to you for free and open access by It has been accepted for inclusion in ETD Archive by an
+uthorized administrator of For more information, please contact"
+6aa21d78af359853ee07288cfc8d047e914ce458,Facial Expression Recognition using Log-Euclidean Statistical Shape Models,"FACIAL EXPRESSION RECOGNITION USING
+LOG-EUCLIDEAN STATISTICAL SHAPE MODELS
+Bartlomiej W. Papiez, Bogdan J. Matuszewski, Lik-Kwan Shark and Wei Quan
+Applied Digital Signal and Image Processing Research Centre, University of Central Lancashire, PR1 2HE Preston, U.K.
+Keywords:
+Facial expression representation, Facial expression recognition, Vectorial log-Euclidean statistics, Statistical
+shape modelling."
+6a75ef6b36489cb59c61f21f3cd09c50ad5b2995,MVTec D2S: Densely Segmented Supermarket Dataset,"MVTec D2S: Densely Segmented Supermarket
+Dataset
+Patrick Follmann1,2[0000−0001−5400−2384], Tobias B¨ottger1,2[0000−0002−5404−8662],
+Philipp H¨artinger1[0000−0002−7093−6280], Rebecca K¨onig1[0000−0002−4169−6759],
+nd Markus Ulrich1[0000−0001−8457−5554]
+MVTec Software GmbH, 80634 Munich, Germany
+https://www.mvtec.com/research
+Technical University of Munich, 80333 Munich, Germany"
+6ac1dc59e823d924e797afaf5c4a960ed7106f2a,Deep Facial Expression Recognition: A Survey,"Deep Facial Expression Recognition: A Survey
+Shan Li and Weihong Deng∗, Member, IEEE"
+6ae47c7793e2f0f684ae07357335c7cf338d66ef,Optimistic and pessimistic neural networks for object recognition,"published in: International Conference on Image Processing (ICIP) 2017
+OPTIMISTIC AND PESSIMISTIC NEURAL NETWORKS FOR OBJECT RECOGNITION
+Rene Grzeszick
+Sebastian Sudholt
+Gernot A. Fink
+email:
+TU Dortmund University, Germany"
+6acc92f30c7a141384b9b1bbec8dffe16b08a438,Improving Bag-of-Visual-Words Towards Effective Facial Expressive Image Classification,"Improving Bag-of-Visual-Words Towards Effective Facial Expressive
+Image Classification
+Dawood Al Chanti1 and Alice Caplier1
+Univ. Grenoble Alpes, CNRS, Grenoble INP∗ , GIPSA-lab, 38000 Grenoble, France
+Keywords:
+BoVW, k-means++, Relative Conjunction Matrix, SIFT, Spatial Pyramids, TF.IDF."
+6af98f9843ba629ae1b0347e8b8d81a263f8d7f2,Does this recession make me look black? The effect of resource scarcity on the categorization of biracial faces.,"Short Report
+Does This Recession Make Me Look Black?
+The Effect of Resource Scarcity on the
+Categorization of Biracial Faces
+3(12) 1476 –1478
+© The Author(s) 2012
+Reprints and permission:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797612450892
+http://pss.sagepub.com
+Christopher D. Rodeheffer, Sarah E. Hill, and Charles G. Lord
+Texas Christian University
+Received 2/27/12; Revision accepted 5/10/12
+Prosperity makes friends; adversity tries them.
+—Publilius Syrus (Lyman, 1856, p. 73)
+In-group biases are a ubiquitous feature of human social life
+(e.g., Brewer, 1979; Halevy, Bornstein, & Sagiv, 2008; Mullen,
+Dovidio, Johnson, & Copper, 1992; Tajfel, 1982). One explana-
+tion offered for these biases is that they arise from resource
+ompetition between groups (e.g., Kurzban & Neuberg, 2005;"
+6aefe7460e1540438ffa63f7757c4750c844764d,Non-rigid Segmentation Using Sparse Low Dimensional Manifolds and Deep Belief Networks,"Non-rigid Segmentation using Sparse Low Dimensional Manifolds and
+Deep Belief Networks ∗
+Jacinto C. Nascimento
+Instituto de Sistemas e Rob´otica
+Instituto Superior T´ecnico, Portugal"
+6ad5a38df8dd4cdddd74f31996ce096d41219f72,Multi-cue onboard pedestrian detection,"Objectives
+Implementation details
+Experiments on TUD-Brussels
+Conclusion
+{wojek, walk,
+Multi-Cue Onboard Pedestrian Detection
+Christian Wojek, Stefan Walk, Bernt Schiele
+Computer Science Department, TU Darmstadt, Germany
+Detect pedestrians from a moving platform
+• Exploit motion information
+• Leverage complementarity of features
+• Evaluate different classifiers
+• New datasets with image pairs
+Features
+• HOG [1]
+8× 8 pixel cells, 2× 2 blocks
+9-bin histograms, unsigned gradients
+• Haar wavelets [2]
+2 and 16 pixel masks
+horizontal, vertical and diagonal re-"
+6a1da83440c7685f5a03e7bda17be9025e0892e3,Semantic Match Consistency for Long-Term Visual Localization,"Semantic Match Consistency for Long-Term
+Visual Localization
+Carl Toft1, Erik Stenborg1, Lars Hammarstrand1, Lucas Brynte1, Marc
+Pollefeys2,3, Torsten Sattler2, Fredrik Kahl1
+Department of Electrical Engineering, Chalmers University of Technology, Sweden
+Department of Computer Science, ETH Z¨urich, Switzerland
+Microsoft, Switzerland"
+6a7ec333ccabd41b9d20f05c145b3377f6045f43,Face Recognition under Varying,(cid:13) 2010 Zihan Zhou
+6a9c460952a96a04e12caa7bae07ae2f7df1238e,Exploiting scene context for on-line object tracking in unconstrained environments. (Exploitation du contexte de scène pour le suivi d'objet en ligne dans des environnements non contraints),"Exploiting scene context for on-line object tracking in
+unconstrained environments
+Salma Moujtahid
+To cite this version:
+Salma Moujtahid. Exploiting scene context for on-line object tracking in unconstrained environments.
+Modeling and Simulation. Université de Lyon, 2016. English. <NNT : 2016LYSEI110>. <tel-
+01783935>
+HAL Id: tel-01783935
+https://tel.archives-ouvertes.fr/tel-01783935
+Submitted on 2 May 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+6ade1e0d4744d2eb5bf7bab97289ffd7eeb5a661,Simulated+unsupervised Learning with Adaptive Data Generation and Bidirectional Mappings,"Published as a conference paper at ICLR 2018
+SIMULATED+UNSUPERVISED LEARNING WITH
+ADAPTIVE DATA GENERATION AND
+BIDIRECTIONAL MAPPINGS
+Kangwook Lee∗, Hoon Kim∗& Changho Suh
+School of Electrical Engineering
+KAIST
+Daejeon, South Korea"
+6a536aa4ecd6359d54a34aca7eff828e4df02730,Multimodal Observation and Interpretation of Subjects Engaged in Problem Solving,"Multimodal Observation and Interpretation of Subjects Engaged
+in Problem Solving
+Univ. Grenoble Alpes, Inria, CNRS, Grenoble INP*, LIG,
+Univ. Grenoble Alpes, Inria, CNRS, Grenoble INP*, LIG,
+(cid:140)omas Guntz
+F-38000 Grenoble, France
+Dominique Vaufreydaz
+F-38000 Grenoble, France
+Ra(cid:130)aella Balzarini
+F-38000 Grenoble, France
+James Crowley
+F-38000 Grenoble, France
+Univ. Grenoble Alpes, Inria, CNRS, Grenoble INP*, LIG,
+Univ. Grenoble Alpes, Inria, CNRS, Grenoble INP*, LIG,"
+6a69b790a7ec5a396607eb717da2b271a750faaa,Stacked Latent Attention for Multimodal Reasoning,"Stacked Latent Attention for Multimodal Reasoning
+Haoqi Fan
+Jiatong Zhou
+Facebook Research
+Facebook Research
+Hacker Way
+Hacker Way"
+6a7e464464f70afea78552c8386f4d2763ea1d9c,Facial Landmark Localization – A Literature Survey,"Review Article
+International Journal of Current Engineering and Technology
+E-ISSN 2277 – 4106, P-ISSN 2347 - 5161
+©2014 INPRESSCO
+, All Rights Reserved
+Available at http://inpressco.com/category/ijcet
+Facial Landmark Localization – A Literature Survey
+Dhananjay RathodȦ*, Vinay A, Shylaja SSȦ and S NatarajanȦ
+ȦDepartment of Information Science and Engineering, PES Institute of Technology, Bangalore, Karnataka, India
+Accepted 25 May 2014, Available online 01 June2014, Vol.4, No.3 (June 2014)"
+32925200665a1bbb4fc8131cd192cb34c2d7d9e3,An Active Appearance Model with a Derivative-Free Optimization,"MVA2009 IAPR Conference on Machine Vision Applications, May 20-22, 2009, Yokohama, JAPAN
+An Active Appearance Model with a Derivative-Free
+Optimization
+Jixia ZHANG‡, Franck DAVOINE†, Chunhong PAN‡
+CNRS†, Institute of Automation of the Chinese Academy of Sciences‡
+95, Zhongguancun Dong Lu, PO Box 2728 − Beijing 100190 − PR China
+LIAMA Sino-French IT Lab."
+322c063e97cd26f75191ae908f09a41c534eba90,Improving Image Classification Using Semantic Attributes,"Noname manuscript No.
+(will be inserted by the editor)
+Improving Image Classification using Semantic Attributes
+Yu Su · Fr´ed´eric Jurie
+Received: date / Accepted: date"
+320e2c950d5b31cb371208a6b752a94585ac6665,Context-Patch Face Hallucination Based on Thresholding Locality-constrained Representation and Reproducing Learning,"Context-Patch Face Hallucination Based on
+Thresholding Locality-constrained Representation
+nd Reproducing Learning
+Junjun Jiang, Member, IEEE, Yi Yu, Suhua Tang, Member, IEEE, Jiayi Ma, Member, IEEE, Akiko Aizawa, and
+Kiyoharu Aizawa, Fellow, IEEE"
+329c06c00c627c0b041d330f3c0142a88b7cb1e5,Bayesian Sparsification of Gated Recurrent Neural Networks,"Bayesian Sparsification of Gated Recurrent Neural
+Networks
+Ekaterina Lobacheva1∗, Nadezhda Chirkova1∗, Dmitry Vetrov1,2
+Samsung-HSE Laboratory, National Research University Higher School of Economics
+Samsung AI Center
+Moscow, Russia
+{elobacheva, nchirkova,"
+32bd968e6cf31e69ee5fca14d3eadeec7f4187c6,Monocular Pedestrian Detection: Survey and Experiments,"Monocular Pedestrian Detection:
+Survey and Experiments
+Markus Enzweiler, Student Member, IEEE, and Dariu M. Gavrila"
+325b048ecd5b4d14dce32f92bff093cd744aa7f8,Multi-Image Graph Cut Clothing Segmentation for Recognizing People,"#2670
+CVPR 2008 Submission #2670. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#2670
+Multi-Image Graph Cut Clothing Segmentation for Recognizing People
+Anonymous CVPR submission
+Paper ID 2670"
+327f3d65a380f70bc39fe99c7ad55d76a5f7fff4,A data-synthesis-driven method for detecting and extracting vague cognitive regions,"International Journal of Geographical Information
+Science
+ISSN: 1365-8816 (Print) 1362-3087 (Online) Journal homepage: http://www.tandfonline.com/loi/tgis20
+A data-synthesis-driven method for detecting and
+extracting vague cognitive regions
+Song Gao, Krzysztof Janowicz, Daniel R. Montello, Yingjie Hu, Jiue-An Yang,
+Grant McKenzie, Yiting Ju, Li Gong, Benjamin Adams & Bo Yan
+To cite this article: Song Gao, Krzysztof Janowicz, Daniel R. Montello, Yingjie Hu, Jiue-An
+Yang, Grant McKenzie, Yiting Ju, Li Gong, Benjamin Adams & Bo Yan (2017): A data-synthesis-
+driven method for detecting and extracting vague cognitive regions, International Journal of
+Geographical Information Science, DOI: 10.1080/13658816.2016.1273357
+To link to this article: http://dx.doi.org/10.1080/13658816.2016.1273357
+Published online: 08 Jan 2017.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=tgis20
+Download by: [UC Santa Barbara Library]
+Date: 09 January 2017, At: 09:44"
+32f7e1d7fa62b48bedc3fcfc9d18fccc4074d347,Hierarchical Sparse and Collaborative Low-Rank representation for emotion recognition,"HIERARCHICAL SPARSE AND COLLABORATIVE LOW-RANK REPRESENTATION FOR
+EMOTION RECOGNITION
+Xiang Xiang, Minh Dao, Gregory D. Hager, Trac D. Tran
+Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA
+{xxiang, minh.dao, ghager1,"
+32743e72cdb481b7a30a3d81a96569dcbea4e409,Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for Mobile and Embedded Applications,"Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for
+Mobile and Embedded Applications
+Baohua Sun,
+Lin Yang,
+Patrick Dong, Wenhan Zhang,
+Gyrfalcon Technology Inc.
+Jason Dong, Charles Young
+900 McCarthy Blvd. Milpitas, CA 95035"
+32a6f6aa50ce2a631bf4de7432f830b29b6b05f2,Through the eyes of a child: preschoolers' identification of emotional expressions from the child affective facial expression (CAFE) set.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+Through the eyes of a child: preschoolers’
+identification of emotional expressions from the
+hild affective facial expression (CAFE) set
+Vanessa LoBue, Lewis Baker & Cat Thrasher
+To cite this article: Vanessa LoBue, Lewis Baker & Cat Thrasher (2017): Through the eyes of a
+hild: preschoolers’ identification of emotional expressions from the child affective facial expression
+(CAFE) set, Cognition and Emotion, DOI: 10.1080/02699931.2017.1365046
+To link to this article: http://dx.doi.org/10.1080/02699931.2017.1365046
+Published online: 10 Aug 2017.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+Download by: [173.56.101.121]
+Date: 10 August 2017, At: 05:46"
+324f39fb5673ec2296d90142cf9a909e595d82cf,Relationship Matrix Nonnegative Decomposition for Clustering,"Hindawi Publishing Corporation
+Mathematical Problems in Engineering
+Volume 2011, Article ID 864540, 15 pages
+doi:10.1155/2011/864540
+Research Article
+Relationship Matrix Nonnegative
+Decomposition for Clustering
+Ji-Yuan Pan and Jiang-She Zhang
+Faculty of Science and State Key Laboratory for Manufacturing Systems Engineering, Xi’an Jiaotong
+University, Xi’an Shaanxi Province, Xi’an 710049, China
+Correspondence should be addressed to Ji-Yuan Pan,
+Received 18 January 2011; Revised 28 February 2011; Accepted 9 March 2011
+Academic Editor: Angelo Luongo
+Copyright q 2011 J.-Y. Pan and J.-S. Zhang. This is an open access article distributed under
+the Creative Commons Attribution License, which permits unrestricted use, distribution, and
+reproduction in any medium, provided the original work is properly cited.
+Nonnegative matrix factorization (cid:2)NMF(cid:3) is a popular tool for analyzing the latent structure of non-
+negative data. For a positive pairwise similarity matrix, symmetric NMF (cid:2)SNMF(cid:3) and weighted
+NMF (cid:2)WNMF(cid:3) can be used to cluster the data. However, both of them are not very ef‌f‌icient
+for the ill-structured pairwise similarity matrix. In this paper, a novel model, called relationship"
+32cde90437ab5a70cf003ea36f66f2de0e24b3ab,The Cityscapes Dataset for Semantic Urban Scene Understanding,"The Cityscapes Dataset for Semantic Urban Scene Understanding
+Marius Cordts1,2
+Markus Enzweiler1
+Mohamed Omran3
+Rodrigo Benenson3
+Sebastian Ramos1,4
+Timo Rehfeld1,2
+Uwe Franke1
+Stefan Roth2
+Bernt Schiele3
+Daimler AG R&D, 2TU Darmstadt, 3MPI Informatics, 4TU Dresden
+www.cityscapes-dataset.net
+train/val – fine annotation – 3475 images
+train – coarse annotation – 20 000 images
+test – fine annotation – 1525 images"
+323d6d93b059372bbe26a86bad1b9d94b076f50e,(A) Vision for 2050 - Context-Based Image Understanding for a Human-Robot Soccer Match,"Electronic Communications of the EASST
+Volume 62 (2013)
+Specification, Transformation, Navigation
+Special Issue dedicated to Bernd Krieg-Br¨uckner
+on the Occasion of his 60th Birthday
+(A) Vision for 2050 – Context-Based Image Understanding for a
+Human-Robot Soccer Match
+Udo Frese, Tim Laue, Oliver Birbach, and Thomas R¨ofer
+9 pages
+Guest Editors: Till Mossakowski, Markus Roggenbach, Lutz Schr¨oder
+Managing Editors: Tiziana Margaria, Julia Padberg, Gabriele Taentzer
+ISSN 1863-2122"
+3274a13562029f36e2f0fad3270e3ecb9ca013bd,Real-time UAV Target Tracking System Based on Optical Flow and Particle Filter Integration,"Real-time UAV Target Tracking System Based on Optical Flow and
+Particle Filter Integration
+WESAM ASKAR
+Electrical Engineering
+Military Tech. College
+EGYPT
+OSAMA ELMOWAFY
+Computer Engineering
+New Cairo Academy
+ALIAA YOUSSIF
+Computer Engineering
+Helwan University
+GAMAL ELNASHAR
+Electrical Engineering
+Military Tech. College
+EGYPT
+EGYPT
+EGYPT"
+325c9f6f848407a22b86e3253cb7f29fac19e40c,Change Detection in Crowded Underwater Scenes - Via an Extended Gaussian Switch Model Combined with a Flux Tensor Pre-segmentation,"Change Detection in Crowded Underwater Scenes
+via an Extended Gaussian Switch Model combined with a Flux Tensor
+Pre-Segmentation
+Martin Radolko1,2, Fahimeh Farhadifard1,2 and Uwe von Lukas1,2
+Institute for Computer Science, University Rostock, Rostock, Germany
+Fraunhofer Institute for Computer Fraphics Research IGD , Rostock, Germany
+{Martin.Radolko,
+Keywords:
+Change Detection, Background Subtraction, Video Segmentation, Video Segregation, Underwater Segmenta-
+tion, Flux Tensor"
+32d8194269faf6ae505a8d7937a3423e4830187e,Big Five Personality Recognition from Multiple Text Genres,"Big Five Personality Recognition from
+Multiple Text Genres
+Vitor Garcia dos Santos, Ivandr´e Paraboni, and Barbara Barbosa Claudino Silva
+University of S˜ao Paulo, School of Arts, Sciences and Humanities, S˜ao Paulo, Brazil"
+324cf94743359df3ada2f86ee8cd3bb6dccae695,FERA 2015 - Second Facial Expression Recognition and Analysis Challenge,"FG 2015
+FG 2015 Submission. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+FG 2015
+FERA 2015 - Second Facial Expression Recognition and Analysis
+Challenge
+Anonymous FG 2015 submission
+– DO NOT DISTRIBUTE –"
+321bd4d5d80abb1bae675a48583f872af3919172,Entropy-weighted feature-fusion method for head-pose estimation,"Wang et al. EURASIP Journal on Image and Video Processing (2016) 2016:44
+DOI 10.1186/s13640-016-0152-3
+EURASIP Journal on Image
+nd Video Processing
+R EV I E W
+Entropy-weighted feature-fusion method
+for head-pose estimation
+Xiao-Meng Wang*, Kang Liu and Xu Qian
+Open Access"
+32575ffa69d85bbc6aef5b21d73e809b37bf376d,Measuring Biometric Sample Quality in Terms of Biometric Information,"-)5741/ *1-641+ 5)2- 37)16; 1 6-45 . *1-641+ 1.4)61
+;K=H=
+5?D B 1BH=JE 6A?DCO -CEAAHEC
+7ELAHIEJO B JJ=M=
+J=HE
+)*564)+6
+6DEI F=FAH = AM =FFH=?D J A=
+IKHA L=HE=JEI E >EAJHE? I=FA GK=EJO 9A >ACE MEJD
+JDA EJKEJE JD=J J = >EAJHE? I=FA ME HA
+JDA =KJ B EBH=JE =L=E=>A 1 H
+J A=IKHA JDA =KJ B EBH=JE MA
+>EAJHE? EBH=JE =I JDA E K?AHJ=EJO
+=>KJ JDA B = FAHI J = IAJ B >EAJHE? A=
+IKHAAJI 9A JDA IDM JD=J JDA >EAJHE? EBH=JE BH
+= FAHI =O >A >O JDA HA=JELA AJHFO D(p(cid:107)q)
+>AJMAA JDA FFK=JE BA=JKHA q JDA FAHII
+BA=JKHA p 6DA >EAJHE? EBH=JE BH = IOI
+JA EI JDA A= D(p(cid:107)q) BH = FAHII E JDA FFK=JE 1
+J FH=?JE?=O A=IKHA D(p(cid:107)q) MEJD I=
+FAI MA = =CHEJD MDE?D HACK=HEAI = /=KIIE="
+320ea4748b1f7e808eabedbedb75cce660122d26,"Detecting Avocados to Zucchinis: What Have We Done, and Where Are We Going?","Detecting avocados to zucchinis: what have we done, and where are we going?
+Olga Russakovsky1, Jia Deng1, Zhiheng Huang1, Alexander C. Berg2, Li Fei-Fei1
+Stanford University1 , UNC Chapel Hill2"
+325000c2ebe4fcfd08946aef91aee8bec22026a5,Multi-Label Learning With Fused Multimodal Bi-Relational Graph,"Multi-Label Learning With Fused
+Multimodal Bi-Relational Graph
+Jiejun Xu, Vignesh Jagadeesh, and B. S. Manjunath, Fellow, IEEE"
+32b9be86de4f82c5a43da2a1a0a892515da8910d,Robust False Positive Detection for Real-Time Multi-target Tracking,"Robust False Positive Detection for Real-Time
+Multi-Target Tracking
+Henrik Brauer, Christos Grecos, and Kai von Luck
+University of the West of Scotland
+University of Applied Sciences Hamburg"
+3265c7799f9d14e29de37b1e37aec4330cd1d747,Class-Specific Binary Correlograms for Object Recognition,"Class-Specific Binary Correlograms for Object
+Recognition
+Jaume Amores1, Nicu Sebe2, Petia Radeva3
+IMEDIA Research Group, INRIA, France
+Univ. of Amsterdam, The Netherlands
+Computer Vision Center, UAB, Spain"
+323fabb6cb4e74518fd4c7ad6ea5a1b2674e63d3,Object recognition based on radial basis function neural networks: Experiments with RGB-D camera embedded on mobile robots,"Object Recognition Based on Radial Basis Function
+Neural Networks: experiments with RGB-D camera
+embedded on mobile robots
+Saeed Gholami Shahbandi
+LISA - University of Angers
+Philippe Lucidarme
+LISA - University of Angers
+62 av. Notre Dame du Lac, 49000 Angers, France
+62 av. Notre Dame du Lac, 49000 Angers, France"
+3214ce1c8c86c0c4670e3f8b8f4351d8fa44434d,Deep Semantic Pyramids for Human Attributes and Action Recognition,"Deep Semantic Pyramids for Human Attributes
+nd Action Recognition
+Fahad Shahbaz Khan1(B), Rao Muhammad Anwer2, Joost van de Weijer3,
+Michael Felsberg1, and Jorma Laaksonen2
+Computer Vision Laboratory, Link¨oping University, Link¨oping, Sweden
+Department of Information and Computer Science,
+Aalto University School of Science, Aalto, Finland
+Computer Vision Center, CS Department, Universitat Autonoma de Barcelona,
+Barcelona, Spain"
+32d6ee09bd8f1a7c42708d6dd8a5fb85ac4e08bc,Non-Interfering Effects of Active Post-Encoding Tasks on Episodic Memory Consolidation in Humans,"ORIGINAL RESEARCH
+published: 29 March 2017
+doi: 10.3389/fnbeh.2017.00054
+Non-Interfering Effects of Active
+Post-Encoding Tasks on Episodic
+Memory Consolidation in Humans
+Samarth Varma 1*, Atsuko Takashima 1,2, Sander Krewinkel 1, Maaike van Kooten 1,
+Lily Fu 1, W. Pieter Medendorp 1, Roy P. C. Kessels 1 and Sander M. Daselaar 1
+Donders Institute for Brain, Cognition and Behaviour, Radboud University, Nijmegen, Netherlands, 2Department of
+Neurobiology of Language, Max Planck Institute for Psycholinguistics, Nijmegen, Netherlands
+So far, studies that investigated interference effects of post-learning processes on
+episodic memory consolidation in humans have used tasks involving only complex and
+meaningful information. Such tasks require reallocation of general or encoding-specific
+resources away from consolidation-relevant activities. The possibility that interference
+an be elicited using a task that heavily taxes our limited brain resources, but has
+low semantic and hippocampal related long-term memory processing demands, has
+never been tested. We address this question by investigating whether consolidation
+ould persist in parallel with an active, encoding-irrelevant, minimally semantic task,
+regardless of its high resource demands for cognitive processing. We distinguish the
+impact of such a task on consolidation based on whether it engages resources that"
+32f0c95cee39eba143452d6a0fe93283575257e6,Generative Adversarial Networks for Extreme Learned Image Compression,"GENERATIVE ADVERSARIAL NETWORKS FOR
+EXTREME LEARNED IMAGE COMPRESSION
+Eirikur Agustsson∗, Michael Tschannen∗, Fabian Mentzer∗, Radu Timofte & Luc Van Gool
+{aeirikur, mentzerf, radu.timofte,
+ETH Zurich"
+32728e1eb1da13686b69cc0bd7cce55a5c963cdd,Automatic Facial Emotion Recognition Method Based on Eye Region Changes,"Automatic Facial Emotion Recognition Method Based on Eye
+Region Changes
+Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran
+Mina Navraan
+Nasrollah Moghadam Charkari*
+Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran
+Muharram Mansoorizadeh
+Faculty of Electrical and Computer Engineering, Bu-Ali Sina University, Hamadan, Iran
+Received: 19/Apr/2015 Revised: 19/Mar/2016 Accepted: 19/Apr/2016"
+32ef19e90e7834ec09ef19fcef7cd2aa6eff85a9,Modeling Natural Images Using Gated MRFs,"JOURNAL OF PAMI, VOL. ?, NO. ?, JANUARY 20??
+Modeling Natural Images Using Gated MRFs
+Marc’Aurelio Ranzato, Volodymyr Mnih, Joshua M. Susskind, Geoffrey E. Hinton"
+326df1b94624b7958cff0f7e3d16e612ea9d7e4d,Similarity Rank Correlation for Face Recognition Under Unenrolled Pose,"Similarity Rank Correlation for Face
+Recognition Under Unenrolled Pose
+Marco K. M¨uller, Alexander Heinrichs, Andreas H.J. Tewes,
+Achim Sch¨afer, and Rolf P. W¨urtz
+Institut f¨ur Neuroinformatik, Ruhr-Universit¨at, D–44780 Bochum, Germany"
+323cd51bc18c700fa88044dd24ae663a7eabaa68,Utilizing student activity patterns to predict performance,"Casey and Azcona International Journal of Educational Technology
+in Higher Education (2017) 14:4
+DOI 10.1186/s41239-017-0044-3
+R ES EAR CH A R T I C LE
+Utilizing student activity patterns to predict
+performance
+Kevin Casey1* and David Azcona2
+Open Access
+* Correspondence:
+Maynooth University, Maynooth,
+Ireland
+Full list of author information is
+vailable at the end of the article"
+32c6086b1605698c8b775b6920741981e85b217d,Designing and sharing activity recognition systems across platforms: methods from wearable computing,"IEEE RAM - SPECIAL ISSUE TOWARDS A WWW FOR ROBOTS
+Designing and sharing activity recognition systems
+cross platforms: methods from wearable computing
+Daniel Roggen, Member, IEEE, and St´ephane Magnenat, Member, IEEE, and Markus Waibel, Member, IEEE,
+nd Gerhard Tr¨oster, Senior Member, IEEE"
+321fbbe7da848b602f376219ed9aed6a7f4b7f57,Effective Use of Frequent Itemset Mining for Image Classification,"Effective Use of Frequent Itemset Mining for
+Image Classification
+Basura Fernando1, Elisa Fromont2, and Tinne Tuytelaars1
+KU Leuven, ESAT-PSI, IBBT (Belgium)
+University of Saint-Etienne(France)"
+324d82129642f84838be71bd7401f38c80fb87d7,Adaptive Mixtures of Factor Analyzers,"Adaptive Mixtures of Factor Analyzers
+Heysem Kayaa,∗, Albert Ali Salaha
+Department of Computer Engineering
+Bo˘gazi¸ci University, 34342, Bebek, ˙Istanbul"
+324b9369a1457213ec7a5a12fe77c0ee9aef1ad4,Dynamic Facial Analysis: From Bayesian Filtering to Recurrent Neural Network,"Dynamic Facial Analysis: From Bayesian Filtering to Recurrent Neural Network
+Jinwei Gu Xiaodong Yang Shalini De Mello Jan Kautz
+NVIDIA"
+3295ec2e52cd83cec75fc7c7064a843756b4d1ee,An Efficient Pedestrian Detection Approach Using a Novel Split Function of Hough Forests,"Regular Paper
+Journal of Computing Science and Engineering,
+Vol. 8, No. 4, December 2014, pp. 207-214
+An Efficient Pedestrian Detection Approach Using a Novel Split
+Function of Hough Forests
+Trung Dung Do, Thi Ly Vu, Van Huan Nguyen, Hakil Kim*, and Chongho Lee
+School of Information and Communication Engineering, Inha University, Incheon, Korea
+{dotrungdung, vuthily, {hikim,"
+32df63d395b5462a8a4a3c3574ae7916b0cd4d1d,Facial expression recognition using ensemble of classifiers,"978-1-4577-0539-7/11/$26.00 ©2011 IEEE
+ICASSP 2011"
+35570297681daa3973498eabead361d0be961672,Configuration Estimates Improve Pedestrian Finding,"Configuration Estimates Improve Pedestrian Finding
+Duan Tran∗
+U.Illinois at Urbana-Champaign
+Urbana, IL 61801 USA
+D.A. Forsyth
+U.Illinois at Urbana-Champaign
+Urbana, IL 61801 USA"
+35af45f799c65d21bbb3cd24f666de861bad33b0,Multi-Target Tracking by Discrete-Continuous Energy Minimization,"Multi-Target Tracking by
+Discrete-Continuous Energy Minimization
+Anton Milan, Member, IEEE, Konrad Schindler, Senior Member, IEEE and
+Stefan Roth, Member, IEEE,"
+35308a3fd49d4f33bdbd35fefee39e39fe6b30b7,Efficient and effective human action recognition in video through motion boundary description with a compact set of trajectories,"biblio.ugent.be The UGent Institutional Repository is the electronic archiving and dissemination platform for allUGent research publications. Ghent University has implemented a mandate stipulating that allacademic publications of UGent researchers should be deposited and archived in this repository.Except for items where current copyright restrictions apply, these papers are available in OpenAccess. This item is the archived peer-reviewed author-version of: Efficient and effective human action recognition in video through motion boundary description witha compact set of trajectories Jeong-Jik Seo, Jisoo Son, Hyung-Il Kim, Wesley De Neve, and Yong Man Ro In: 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition,1, 1-6, 2015. To refer to or to cite this work, please use the citation to the published version: Seo, J., Son, J., Kim, H., De Neve, W., and Ro, Y. M. (2015). Efficient and effective human actionrecognition in video through motion boundary description with a compact set of trajectories. 11thIEEE International Conference and Workshops on Automatic Face and Gesture Recognition 1 1-6.http://dx.doi.org/10.1109/FG.2015.7163123"
+3535ba0cba9bf03443d52cbfc9a87090ca2e5d49,Supplementary Material : Synthesized Classifiers for Zero-Shot Learning,"Supplementary Material:
+Synthesized Classifiers for Zero-Shot Learning
+Soravit Changpinyo∗, Wei-Lun Chao∗
+U. of Southern California
+Los Angeles, CA
+Boqing Gong
+U. of Central Florida
+Orlando, FL
+schangpi,
+Fei Sha
+U. of California
+Los Angeles, CA
+In this Supplementary Material, we provide details
+omitted in the main text.
+• Section 1: cross-validation strategies (Section 3.2
+of the main paper).
+• Section 2: learning metrics for semantic similarity
+(Section 3.1 of the main paper).
+• Section 3: details on experimental setup (Sec-
+tion 4.1 of the main paper)."
+35c0220ab8a8281129a00ac32ef2f488fb562eb7,Part Annotations via Pairwise Correspondence,"Part Annotations via Pairwise Correspondence
+Subhransu Maji Gregory Shakhnarovich
+{smaji,
+Toyota Technological Institute at Chicago, Chicago, IL"
+3514140d9c2e692abed0aebe0531f78c250f5806,Discriminative Transformation Learning for Fuzzy Sparse Subspace Clustering,"Discriminative Transformation Learning for Fuzzy
+Sparse Subspace Clustering
+Zaidao Wen, Biao Hou, Member, IEEE, Qian Wu and Licheng Jiao, Senior Member, IEEE"
+352d61eb66b053ae5689bd194840fd5d33f0e9c0,Analysis Dictionary Learning based Classification: Structure for Robustness,"Analysis Dictionary Learning based
+Classification: Structure for Robustness
+Wen Tang, Ashkan Panahi, Hamid Krim, and Liyi Dai"
+3538d2b5f7ab393387ce138611ffa325b6400774,A DSP-based approach for the implementation of face recognition algorithms,"A DSP-BASED APPROACH FOR THE IMPLEMENTATION OF FACE RECOGNITION
+ALGORITHMS
+A. U. Batur
+B. E. Flinchbaugh
+M. H. Hayes IIl
+Center for Signal and Image Proc.
+Georgia Inst. Of Technology
+Atlanta, GA
+Imaging and Audio Lab.
+Texas Instruments
+Dallas, TX
+Center for Signal and Image Proc.
+Georgia Inst. Of Technology
+Atlanta, CA"
+3504907a2e3c81d78e9dfe71c93ac145b1318f9c,Unconstrained Still/Video-Based Face Verification with Deep Convolutional Neural Networks,"Noname manuscript No.
+(will be inserted by the editor)
+Unconstrained Still/Video-Based Face Verification with Deep
+Convolutional Neural Networks
+Jun-Cheng Chen∗
+Kumar∗ · Ching-Hui Chen∗ · Vishal M. Patel · Carlos D. Castillo ·
+Rama Chellappa
+· Rajeev Ranjan∗ · Swami Sankaranarayanan∗ · Amit
+Received: date / Accepted: date"
+35692e80fa2fc17a1d37a40b3d4ffca28a1bcc7b,Appearance-based people recognition by local dissimilarity representations,"Appearance-based People Recognition by Local
+Dissimilarity Representations
+Riccardo Satta, Giorgio Fumera, Fabio Roli
+Dept. of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123 Cagliari, Italy
+riccardo.satta, fumera,"
+35fe83665c61adb513781c7208b92706ae2a1578,Answering Visual What-If Questions: From Actions to Predicted Scene Descriptions,
+35b1c1f2851e9ac4381ef41b4d980f398f1aad68,Geometry Guided Convolutional Neural Networks for Self-Supervised Video Representation Learning,"Geometry Guided Convolutional Neural Networks for
+Self-Supervised Video Representation Learning
+Chuang Gan1, Boqing Gong2, Kun Liu3, Hao Su 4, Leonidas J. Guibas 5
+MIT-IBM Watson AI Lab , 2 Tencent AI Lab, 3 BUPT, 4 UCSD, 5 Stanford University"
+359a4142f6a55a58a3e18628e3ee52c76744fcb0,Prevalence of face recognition deficits in middle childhood.,"ISSN: 1747-0218 (Print) 1747-0226 (Online) Journal homepage: http://www.tandfonline.com/loi/pqje20
+Prevalence of face recognition deficits in middle
+hildhood
+Rachel J Bennetts, Ebony Murray, Tian Boyce & Sarah Bate
+To cite this article: Rachel J Bennetts, Ebony Murray, Tian Boyce & Sarah Bate (2016):
+Prevalence of face recognition deficits in middle childhood, The Quarterly Journal of
+Experimental Psychology, DOI: 10.1080/17470218.2016.1167924
+To link to this article: http://dx.doi.org/10.1080/17470218.2016.1167924
+View supplementary material
+Accepted author version posted online: 21
+Mar 2016.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pqje20
+Download by: [Rachel Bennetts]
+Date: 22 March 2016, At: 07:06"
+35058a8166a8fa4479167ba33b3010cc8c839f44,A Study on Gait-Based Gender Classification,"A Study on Gait-Based Gender Classification
+Shiqi Yu, Member, IEEE, Tieniu Tan, Fellow, IEEE,
+Kaiqi Huang, Member, IEEE, Kui Jia, and Xinyu Wu"
+351c02d4775ae95e04ab1e5dd0c758d2d80c3ddd,ActionSnapping: Motion-Based Video Synchronization,"ActionSnapping: Motion-based Video
+Synchronization
+Jean-Charles Bazin and Alexander Sorkine-Hornung
+Disney Research"
+35c0954acde9c86df8bbcb6edccbcd702796f5eb,"Multimodal Database of Emotional Speech , Video and Gestures","World Academy of Science, Engineering and Technology
+International Journal of Computer and Information Engineering
+Vol:12, No:10, 2018
+Multimodal Database of Emotional Speech, Video
+nd Gestures
+Tomasz Sapi´nski, Dorota Kami´nska, Adam Pelikant, Egils Avots, Cagri Ozcinar, Gholamreza Anbarjafari"
+35e4b6c20756cd6388a3c0012b58acee14ffa604,Gender Classification in Large Databases,"Gender Classification in Large Databases
+E. Ram´on-Balmaseda, J. Lorenzo-Navarro, and M. Castrill´on-Santana (cid:63)
+Universidad de Las Palmas de Gran Canaria
+SIANI
+Spain"
+357df3ee0f0c30d5c8abc5a1bdf70122322d6fbd,Object Detectors Emerge in Deep Scene CNNs,"Under review as a conference paper at ICLR 2015
+OBJECT DETECTORS EMERGE IN DEEP SCENE CNNS
+Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, Antonio Torralba
+Department of Computer Science and Artificial Intelligence, MIT"
+35be5bea87c465c97127c64919d115e235d62e82,"The automatic detection of chronic pain-related expression : requirements , challenges and a multimodal dataset","IEEE TRANSACTIONS ON JOURNAL NAME, MANUSCRIPT ID
+The automatic detection of chronic pain-
+related expression: requirements, challenges
+nd a multimodal dataset
+Min S. H. Aung, Sebastian Kaltwang, Bernardino Romera-Paredes, Brais Martinez, Aneesha
+Singh, Matteo Cella, Michel Valstar, Hongying Meng, Andrew Kemp, Moshen Shafizadeh, Aaron
+C. Elkins, Natalie Kanakam, Amschel de Rothschild, Nick Tyler, Paul J. Watson, Amanda C. de C.
+Williams, Maja Pantic, and Nadia Bianchi-Berthouze*
+face videos, head mounted and room audio signals,"
+35f3c4012e802332faf0a1426e9acf8365601551,Bidirectional Conditional Generative Adversarial Networks,"Bidirectional Conditional
+Generative Adversarial Networks
+Ayush Jaiswal, Wael AbdAlmageed, Yue Wu, and Premkumar Natarajan
+USC Information Sciences Institute, Marina del Rey, CA, USA
+{ajaiswal, wamageed, yue wu,"
+355de7460120ddc1150d9ce3756f9848983f7ff4,Midge: Generating Image Descriptions From Computer Vision Detections,"Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics, pages 747–756,
+Avignon, France, April 23 - 27 2012. c(cid:13)2012 Association for Computational Linguistics"
+35e808424317cf03b51516df7d083f45791311ae,A Survey for Action Recognition Research,"A Survey for Action Recognition Research
+Yuancheng Ye"
+355c8c0dbd80de9d23affb37ac102179b6b2a908,“A Distorted Skull Lies in the Bottom Center...” Identifying Paintings from Text Descriptions,"Anupam Guha, Mohit Iyyer, and Jordan Boyd-Graber. A Distorted Skull Lies in the Bottom Center:
+Identifying Paintings from Text Descriptions. NAACL Human-Computer Question Answering Workshop, 2016.
+Title = {A Distorted Skull Lies in the Bottom Center: Identifying Paintings from Text Descriptions},
+Author = {Anupam Guha and Mohit Iyyer and Jordan Boyd-Graber},
+Booktitle = {NAACL Human-Computer Question Answering Workshop},
+Year = {2016},
+Location = {San Diego, CA},
+Url = {docs/2016_naacl_paintings.pdf},
+Links:
+• Data [http://www.cs.umd.edu/~aguha/data/paintdata.rar]
+Downloaded from http://cs.colorado.edu/~jbg/docs/2016_naacl_paintings.pdf"
+35035f79256a3f19a111fff34df6d14876d83fab,Satyam: Democratizing Groundtruth for Machine Vision,"SATYAM: DEMOCRATIZING GROUNDTRUTH FOR MACHINE VISION
+Hang Qiu?, Krishna Chintalapudi†, Ramesh Govindan?"
+35457de70ea13415b8abd3898a4a83021946501f,Learning Robust and Discriminative Subspace With Low-Rank Constraints,"Calhoun: The NPS Institutional Archive
+Faculty and Researcher Publications
+Funded by Naval Postgraduate School
+Learning Robust and Discriminative Subspace
+With Low-Rank Constraints
+Sheng Li
+http://hdl.handle.net/10945/52406"
+3506ef7168e07840187ec978b47f3a05a753101d,Robust 3D Face Landmark Localization Based on Local Coordinate Coding,"Robust 3D Face Landmark Localization based on
+Local Coordinate Coding
+Mingli Song, Senior Member, IEEE, Dacheng Tao, Senior Member, IEEE, Shengpeng Sun, Chun Chen, and
+Stephen J. Maybank Fellow, IEEE,"
+3575d74eb548c3187ec5b0d27383ac966b9d7110,Feature Extraction and Face Recognition through Neural Network,"International Journal of Advanced Computer Technology (IJACT)
+ISSN:2319-7900
+Feature Extraction and Face Recognition through Neural
+Network
+Sanjay Kumar Dekate,Research scholar, Dr. C. V. Raman University, Bilaspur, India
+Dr. Anupam Shukla,Professor, ABV-IIITM, Gwalior, India"
+353480b21d5745590db5f70b016a27e25f5b9aec,Cross-Modal and Hierarchical Modeling of Video and Text,"Cross-Modal and Hierarchical Modeling of Video
+nd Text
+Bowen Zhang(cid:63)1, Hexiang Hu(cid:63)1, and Fei Sha2
+Dept. of Computer Science, U. of Southern California, Los Angeles, CA 90089
+Netflix, 5808 Sunset Blvd, Los Angeles, CA 90028"
+35410a58514cd5fd66d9c43d42e8222526170c1b,Shared mechanism for emotion processing in adolescents with and without autism,"Received: 04 August 2016
+Accepted: 05 January 2017
+Published: 20 February 2017
+Shared mechanism for emotion
+processing in adolescents with and
+without autism
+Christina Ioannou1, Marwa El Zein1, Valentin Wyart1, Isabelle Scheid2,3,
+Frédérique Amsellem3,4, Richard Delorme3,4, Coralie Chevallier1,* & Julie Grèzes1,*
+Although, the quest to understand emotional processing in individuals with Autism Spectrum Disorders
+(ASD) has led to an impressive number of studies, the picture that emerges from this research remains
+inconsistent. Some studies find that Typically Developing (TD) individuals outperform those with
+ASD in emotion recognition tasks, others find no such difference. In this paper, we move beyond
+focusing on potential group differences in behaviour to answer what we believe is a more pressing
+question: do individuals with ASD use the same mechanisms to process emotional cues? To this end,
+we rely on model-based analyses of participants’ accuracy during an emotion categorisation task in
+which displays of anger and fear are paired with direct vs. averted gaze. Behavioural data of 20 ASD
+nd 20 TD adolescents revealed that the ASD group displayed lower overall performance. Yet, gaze
+direction had a similar impact on emotion categorisation in both groups, i.e. improved accuracy for
+salient combinations (anger-direct, fear-averted). Critically, computational modelling of participants’
+ehaviour reveals that the same mechanism, i.e. increased perceptual sensitivity, underlies the"
+3596c23a0f13c36d2c71c4cba4351363954dd02a,PathFinder: An autonomous mobile robot guided by Computer Vision,"PathFinder: An autonomous mobile robot guided by Computer
+Vision
+Andre R. de Geus1,2, Marcelo H. Stoppa1, Sergio F. da Silva1,2
+Modeling and Optimization Program, Federal University of Goias, Catalao, Goias, Brazil
+Biotechnology Institute, Federal University of Goias, Catalao, Goias, Brazil
+Email:"
+35f084ddee49072fdb6e0e2e6344ce50c02457ef,A bilinear illumination model for robust face recognition,"A Bilinear Illumination Model
+for Robust Face Recognition
+The Harvard community has made this
+rticle openly available. Please share how
+this access benefits you. Your story matters
+Citation
+Lee, Jinho, Baback Moghaddam, Hanspeter Pfister, and Raghu
+Machiraju. 2005. A bilinear illumination model for robust face
+recognition. Proceedings of the Tenth IEEE International Conference
+on Computer Vision: October 17-21, 2005, Beijing, China. 1177-1184.
+Los Almamitos, C.A.: IEEE Computer Society.
+Published Version
+doi:10.1109/ICCV.2005.5
+Citable link
+http://nrs.harvard.edu/urn-3:HUL.InstRepos:4238979
+Terms of Use
+This article was downloaded from Harvard University’s DASH
+repository, and is made available under the terms and conditions
+pplicable to Other Posted Material, as set forth at http://
+nrs.harvard.edu/urn-3:HUL.InstRepos:dash.current.terms-of-"
+3533a7714b19396bba8297e0ca22f85ac68ca18a,Dense Captioning with Joint Inference and Visual Context,"Dense Captioning with Joint Inference and Visual Context
+Linjie Yang
+Kevin Tang
+Jianchao Yang
+Li-Jia Li
+{linjie.yang, kevin.tang,
+Snap Inc."
+35e730f7967155b9394f9e5d3cadf2b955ce9a7b,Deep Affinity Network for Multiple Object Tracking,"JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2017
+Deep Affinity Network
+for Multiple Object Tracking
+ShiJie Sun, Naveed Akhtar, HuanSheng Song, Ajmal Mian, Mubarak Shah"
+3521904cced380b849325d6fda2a4d855edbe405,Finding Images of Rare and Ambiguous Entities,"Finding Images of Rare and
+Ambiguous Entities
+Bilyana Taneva
+Mouna Kacimi
+Gerhard Weikum
+MPI–I–2011–5–002
+May 2011"
+353a89c277cca3e3e4e8c6a199ae3442cdad59b5,Learning from Multiple Views of Data,
+35e0256b33212ddad2db548484c595334f15b4da,Attentive Fashion Grammar Network for Fashion Landmark Detection and Clothing Category Classification,"Attentive Fashion Grammar Network for
+Fashion Landmark Detection and Clothing Category Classification
+Wenguan Wang∗1,2, Yuanlu Xu∗2, Jianbing Shen†1, and Song-Chun Zhu2
+Beijing Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, China
+Department of Computer Science and Statistics, University of California, Los Angeles, USA"
+35d94887e4eb075f2603b2c69b19d31471351ff7,People detection and tracking from aerial thermal views,
+3555d849b85e9416e9496c9976084b0e692b63cd,Towards Effective Gans,"Under review as a conference paper at ICLR 2018
+TOWARDS EFFECTIVE GANS
+FOR DATA DISTRIBUTIONS WITH DIVERSE MODES
+Anonymous authors
+Paper under double-blind review"
+3597ca03bded3717f5c88273e4b7dbf24545ff83,Mouse Pose Estimation From Depth Images,"Mouse Pose Estimation From Depth Images
+Ashwin Nanjappa1, Li Cheng∗1, Wei Gao1, Chi Xu1, Adam Claridge-Chang2, and
+Zoe Bichler3
+Bioinformatics Institute, A*STAR, Singapore
+Institute of Molecular and Cell Biology, A*STAR, Singapore
+National Neuroscience Institute, Singapore"
+35e6f6e5f4f780508e5f58e87f9efe2b07d8a864,Summarization of User-Generated Sports Video by Using Deep Action Recognition Features,"This paper is a preprint (IEEE accepted status). IEEE copyright notice. 2018 IEEE.
+Personal use of this material is permitted. Permission from IEEE must be obtained for all
+other uses, in any current or future media, including reprinting/republishing this material for
+dvertising or promotional purposes, creating new collective works, for resale or redistribu-
+tion to servers or lists, or reuse of any copyrighted.
+A. Tejero-de-Pablos, Y. Nakashima, T. Sato, N. Yokoya, M. Linna and E. Rahtu, ”Sum-
+marization of User-Generated Sports Video by Using Deep Action Recognition Features,” in
+doi: 10.1109/TMM.2018.2794265
+keywords: Cameras; Feature extraction; Games; Hidden Markov models; Semantics;
+Three-dimensional displays; 3D convolutional neural networks; Sports video summarization;
+ction recognition; deep learning; long short-term memory; user-generated video,
+URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8259321&isnumber=4456689"
+35800a537017803dd08274710388734db66b54f0,Sliced Wasserstein Generative Models,"Sliced Wasserstein Generative Models
+Jiqing Wu * 1 Zhiwu Huang * 1 Wen Li 1 Janine Thoma 1 Luc Van Gool 1 2"
+35e87e06cf19908855a16ede8c79a0d3d7687b5c,Strategies for Multi-View Face Recognition for Identification of Human Faces: A Review,"Strategies for Multi-View Face Recognition for
+Identification of Human Faces: A Review
+Pritesh G. Shah
+Department of Computer Science
+Mahatma Gandhi Shikshan Mandal’s,
+Arts, Science and Commerce College, Chopda
+Dist: Jalgaon (M.S)
+Dr. R.R.Manza
+Department of Computer Science and IT
+Dr. Babasaheb Ambedkar Marathwada University
+Aurangabad."
+352110778d2cc2e7110f0bf773398812fd905eb1,Matrix Completion for Weakly-Supervised Multi-Label Image Classification,"TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, JUNE 2014
+Matrix Completion for Weakly-supervised
+Multi-label Image Classification
+Ricardo Cabral, Fernando De la Torre, João P. Costeira, Alexandre Bernardino"
+354ddc8976a762ee03fb78b73adc3b5312e5f2a5,Accurate Eye Center Location through Invariant Isocentric Patterns,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Accurate Eye Center Location through Invariant
+Isocentric Patterns
+Roberto Valenti, Student Member, IEEE, and Theo Gevers, Member, IEEE,"
+351de1f7862bd13a82fcfcaa698b4efd53bc2c35,Automatic 3D face verification from range data,- 1930-7803-7663-3/03/$17.00 ©2003 IEEEICASSP 2003(cid:224)
+35b9ded80ce2b30ee115b8198d146890b9028d51,Regularizing max-margin exemplars by reconstruction and generative models,"Regularizing Max-Margin Exemplars by Reconstruction and Generative Models
+Jose C. Rubio and Bj¨orn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany"
+694dda2a9f6d86c4bf3f57d85dfd376e2067ec62,How Much Face Information Is Needed?,"HOW MUCH FACE INFORMATION IS NEEDED?
+P2CA:
+Davide Onofrio*, Antonio Rama+, Francesc Tarres+, Stefano Tubaro*
+*Dipartimento di Elettronica e Informazione - Politecnico di Milano
++Department Teoria del Senyal i Comunicacions de la Universitat Politècnica de Catalunya"
+69c8b0ec77d3164df2069a5133780a36ec8e91ad,Unsupervised 3D Reconstruction from a Single Image via Adversarial Learning,"Unsupervised 3D Reconstruction from a Single Image via Adversarial Learning
+Lingjing Wang
+NYU Multimedia and Visual Computing Lab
+Courant Institute of Mathematical Science
+NYU Tandon School of Engineering, USA
+Yi Fang ∗
+NYU Multimedia and Visual Computing Lab
+Dept. of ECE, NYU Abu Dhabi, UAE
+Dept. of ECE, NYU Tandon School of Engineering, USA"
+693905c29feb7f9be3517308c8a9c2dc68aa8682,Self-supervised CNN for Unconstrained 3D Facial Performance Capture from an RGB-D Camera,"Self-supervised CNN for Unconstrained 3D Facial
+Performance Capture from an RGB-D Camera
+Yudong Guo, Juyong Zhang†, Lin Cai, Jianfei Cai and Jianmin Zheng"
+699a7c88a6d226f59c7a5619b3cfad714415c31a,"Incorporating Luminance, Depth and Color Information by Fusion-based Networks for Semantic Segmentation","Incorporating Luminance, Depth and Color Information by
+Fusion-based Networks for Semantic Segmentation
+Shao-Yuan Lo
+Shang-Wei Hung
+National Chiao Ting University, UC San Diego
+National Chiao Ting University
+Figure 1: Flowchart of the proposed semantic segmentation
+system. Y: luminance information.
+omplexity. Lately, DenseNet [11] designs the invention of
+dding dense connections between each layer, which
+enhances the information flow in networks, and thus it
+previously
+outperforms many
+network
+rchitectures including ResNet [12].
+proposed
+With the help of depth sensors such as Kinect, depth
+maps can be obtained along with RGB images. Since the
+depth channel provides complementary information to the
+RGB channels, containing the depth information is believed"
+6911686f00c99c51c21f057c45d561c88027f676,Articulated pose estimation with parts connectivity using discriminative local oriented contours,"Articulated Pose Estimation with Parts Connectivity
+using Discriminative Local Oriented Contours
+Norimichi Ukita
+Nara Institute of Science and Technology"
+6937fe93e6238ee21904c172809bea0086da4570,Contour Grouping Based on Contour-Skeleton Duality,"Int J Comput Vis (2009) 83: 12–29
+DOI 10.1007/s11263-009-0208-2
+Contour Grouping Based on Contour-Skeleton Duality
+Nagesh Adluru · Longin Jan Latecki
+Received: 30 May 2008 / Accepted: 6 January 2009 / Published online: 27 January 2009
+© Springer Science+Business Media, LLC 2009"
+6903496ee5d4c24ca5f3f18211f406e0ba8442d6,Multi-Mapping Image-to-Image Translation with Central Biasing Normalization,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2018
+Multi-Mapping Image-to-Image Translation with
+Central Biasing Normalization
+Xiaoming Yu, Zhenqiang Ying, Student Member, IEEE, Thomas Li, Shan Liu, and Ge Li, Member, IEEE,"
+69ff40fd5ce7c3e6db95a2b63d763edd8db3a102,Human Age Estimation via Geometric and Textural Features,"HUMAN AGE ESTIMATION VIA GEOMETRIC AND TEXTURAL
+FEATURES
+Merve KILINC1 and Yusuf Sinan AKGUL2
+TUBITAK BILGEM UEKAE, Anibal Street, 41470, Gebze, Kocaeli, Turkey
+GIT Vision Lab, http://vision.gyte.edu.tr/, Department of Computer Engineering, Gebze Institute of Technology, 41400,
+Kocaeli, Turkey
+Keywords:
+Age estimation:age classification:geometric features:LBP:Gabor:LGBP:cross ratio:FGNET:MORPH"
+6900bb437679dd0b0c5cea0acdaa9429d0127d38,Self-Erasing Network for Integral Object Attention,"Self-Erasing Network for Integral Object Attention
+Qibin Hou
+Peng-Tao Jiang
+Colledge of Computer Science, Nankai University
+Yunchao Wei
+Urbana-Champaign, IL, USA
+Colledge of Computer Science, Nankai University
+Ming-Ming Cheng ∗"
+69447482c6d7d0fde4001231ca84c31f866a2d5d,Survey of Advanced Facial Feature Tracking and Facial Expression Recognition,"ISSN (Print) : 2319-5940
+ISSN (Online) : 2278-1021
+International Journal of Advanced Research in Computer and Communication Engineering
+Vol. 2, Issue 10, October 2013
+Survey of Advanced Facial Feature Tracking and
+Facial Expression Recognition
+Karthick.K1, J.Jasmine2
+PG Scholar, Department of Computer science and Technology, Kalaignar Karunanidhi Institute of Technology,
+Coimbatore, Tamilnadu, India1
+Assistant Professor, Department of Computer science and Technology, Kalaignar Karunanidhi Institute of Technology,
+Coimbatore, Tamilnadu, India2"
+6957baa0db5576997aef9de43b93fe8fd4d07632,Identifica\c{c}\~ao autom\'atica de picha\c{c}\~ao a partir de imagens urbanas,"Identificac¸˜ao autom´atica de pichac¸˜ao a partir de
+imagens urbanas
+Eric K. Tokuda and Roberto M. Cesar-Jr.
+Institute of Mathematics and Statistics
+University of S˜ao Paulo (USP)
+Brazil
+Claudio Silva
+Tandon School of Engineering
+New York University (NYU)"
+69a55c30c085ad1b72dd2789b3f699b2f4d3169f,Automatic Happiness Strength Analysis of a Group of People using Facial Expressions,"International Journal of Computer Trends and Technology (IJCTT) – Volume 34 Number 3 - April 2016
+Automatic Happiness Strength Analysis of a
+Group of People using Facial Expressions
+Sagiri Prasanthi#1, Maddali M.V.M. Kumar*2,
+#1PG Student, #2Assistant Professor
+#1, #2Department of MCA, St. Ann’s College of Engineering & Technology, Andhra Pradesh, India
+is a collective concern"
+695e4c975740d2aedcfc42d7ec445b4b2b56cbeb,Principal Component Analysis: An Efficient Facial Feature Extraction Technique,"SSRG International Journal of Electronics and Communication Engineering - (ICRTESTM) - Special Issue – April 2017
+Principal Component Analysis: An Efficient
+Facial Feature Extraction Technique
+Research scholar, ECE Dept, JJTU, Rajasthan, India, 333001(Associate Professor, SVIT, Secunderabad-500
+Drakshayani Desai, 2Dr. Ramakrishna Seemakurti.
+Research Guide (Pricipal,, SVIT, Secunderabd, India, 500003) (Approved Research Guide, JJTU, Jhunjhunu-
+33001, Rajasthan, India)"
+69aef3ce50967a00c568849fed630c573f6cd1eb,3-D Face Analysis and Identification Based on Statistical Shape Modelling,"-D Face Analysis and Identification Based on Statistical Shape
+Modelling
+Wei Quan*, Charlie Frowd †
+*School of Computing, Engineering and Physical Sciences
+University of Central Lancashire, Preston PR1 2HE, UK.
+Department of Psychology
+University of Winchester, Winchester SO22 4NR, UK.
+Keywords: shape modelling, face analysis, identification."
+69d9b79757d76b73ed940754f4d05288b76eb8c3,Preschool Externalizing Behavior Predicts Gender-Specific Variation in Adolescent Neural Structure,"RESEARCH ARTICLE
+Preschool Externalizing Behavior Predicts
+Gender-Specific Variation in Adolescent
+Neural Structure
+Jessica Z. K. Caldwell1*¤, Jeffrey M. Armstrong2, Jamie L. Hanson1, Matthew J. Sutterer1,
+Diane E. Stodola1, Michael Koenigs2, Ned H. Kalin2, Marilyn J. Essex2☯, Richard
+J. Davidson1,2,3☯
+Department of Psychology, University of Wisconsin–Madison, Madison, Wisconsin, United States of
+America, 2 Department of Psychiatry, University of Wisconsin–Madison, Madison, Wisconsin, United States
+of America, 3 Center for Investigating Healthy Minds, University of Wisconsin–Madison, Madison,
+Wisconsin, United States of America
+☯ These authors contributed equally to this work.
+¤. Current address: Marquette General Hospital/Michigan State University, Marquette, MI, United States of
+America"
+6953911c6756ca70de1555df14a06f13305e1926,Author Profiling based on Text and Images: Notebook for PAN at CLEF 2018,"Author Profiling based on Text and Images
+Notebook for PAN at CLEF 2018
+Luka Stout, Robert Musters, and Chris Pool
+Anchormen, The Netherlands"
+69526cdf6abbfc4bcd39616acde544568326d856,Face Verification Using Template Matching,"[17] B. Moghaddam, T. Jebara, and A. Pentland, “Bayesian face recogni-
+tion,” Pattern Recognit., vol. 33, no. 11, pp. 1771–1782, Nov. 2000.
+[18] A. Nefian, “A hidden Markov model-based approach for face detection
+nd recognition,” Ph.D. dissertation, Dept. Elect. Comput. Eng. Elect.
+Eng., Georgia Inst. Technol., Atlanta, 1999.
+[19] P. J. Phillips et al., “Overview of the face recognition grand challenge,”
+presented at the IEEE CVPR, San Diego, CA, Jun. 2005.
+[20] H. T. Tanaka, M. Ikeda, and H. Chiaki, “Curvature-based face surface
+recognition using spherical correlation-principal direction for curved
+object recognition,” in Proc. Int. Conf. Automatic Face and Gesture
+Recognition, 1998, pp. 372–377.
+[21] M. Turk and A. Pentland, “Eigenfaces for recognition,” J. Cognit. Sci.,
+pp. 71–86, 1991.
+[22] V. N. Vapnik, Statistical Learning Theory. New York: Wiley, 1998.
+[23] W. Zhao, R. Chellappa, A. Rosenfeld, and P. Phillips, “Face recogni-
+tion: A literature survey,” ACM Comput. Surveys, vol. 35, no. 44, pp.
+99–458, 2003.
+[24] W. Zhao, R. Chellappa, and P. J. Phillips, “Subspace linear discrimi-
+nant analysis for face recognition,” UMD TR4009, 1999.
+Face Verification Using Template Matching"
+6971bdac5119c4cc1b6d92adac605e13f1bcd80f,Limiting the reconstruction capability of generative neural network using negative learning,"LIMITING THE RECONSTRUCTION CAPABILITY OF GENERATIVE NEURAL NETWORK
+USING NEGATIVE LEARNING
+Asim Munawar, Phongtharin Vinayavekhin and Giovanni De Magistris
+IBM Research - Tokyo"
+69dc87575b56ba7f60fa24bdd4fceabeeaf39a80,Decoding of nonverbal language in alcoholism: A perception or a labeling problem?,"tapraid5/ze6-adb/ze6-adb/ze600216/ze62965d15z
+xppws S⫽1
+/8/16
+6:36 Art: 2015-0668
+APA NLM
+Psychology of Addictive Behaviors
+016, Vol. 30, No. 2, 175–183
+0893-164X/16/$12.00
+© 2016 American Psychological Association
+http://dx.doi.org/10.1037/adb0000147
+Decoding of Nonverbal Language in Alcoholism:
+A Perception or a Labeling Problem?
+Université Libre de Bruxelles and Centre Hospitalier
+Charles Kornreich
+Universitaire Brugmann
+Géraldine Petit and Heidi Rolin
+Université Libre de Bruxelles
+Elsa Ermer
+University of Maryland Baltimore
+Salvatore Campanella and Paul Verbanck"
+69ee78388e0f40941496ab92efe3e0fa065ad22e,Person Re-Identification with RGB-D Camera in Top-View Configuration through Multiple Nearest Neighbor Classifiers and Neighborhood Component Features Selection,"Article
+Person Re-Identification with RGB-D Camera in
+Top-View Configuration through Multiple Nearest
+Neighbor Classifiers and Neighborhood Component
+Features Selection
+Marina Paolanti *
+Emanuele Frontoni
+, Luca Romeo, Daniele Liciotti
+, Rocco Pietrini, Annalisa Cenci,
+nd Primo Zingaretti
+Department of Information Engineering, Universitá Politecnica delle Marche, I-60131 Ancona, Italy;
+(L.R.); (D.L.); (R.P.);
+(A.C.); (E.F.); (P.Z.)
+* Correspondence:
+Received: 30 August 2018 ; Accepted: 11 October 2018 ; Published: 15 October 2018"
+690d669115ad6fabd53e0562de95e35f1078dfbb,"Progressive versus Random Projections for Compressive Capture of Images, Lightfields and Higher Dimensional Visual Signals","Progressive versus Random Projections for Compressive Capture of Images,
+Lightfields and Higher Dimensional Visual Signals
+Rohit Pandharkar
+MIT Media Lab
+75 Amherst St, Cambridge, MA
+Ashok Veeraraghavan
+01 Broadway, Cambridge MA
+Ramesh Raskar
+MIT Media Lab
+75 Amherst St, Cambridge, MA"
+695f6dc7165aa3fca15d1b1deb4c496fc093ac19,Learning Discriminative Visual N-grams from Mid-level Image Features,"GUPTA, PANDEY, CHIA: VISUAL N-GRAMS
+Learning Discriminative Visual N-grams
+from Mid-level Image Features
+Raj Kumar Gupta
+Megha Pandey
+Alex YS Chia
+Institute of High Performance
+Computing (A*STAR)
+Singapore
+Institute of Infocomm Research
+(A*STAR)
+Singapore
+Rakuten Institute of Technology
+Singapore"
+698812f7d37e148c0a99e768f0a7d24e7b9605ab,Image Classification and Retrieval from User-Supplied Tags,"Image Classification and Retrieval from User-Supplied Tags
+Hamid Izadinia
+Univ. of Washington
+Ali Farhadi
+Univ. of Washington
+Aaron Hertzmann
+Adobe Research
+Matthew D. Hoffman
+Adobe Research"
+699b6cbd72ee0274699b939863813499c377ea00,Enlightening Deep Neural Networks with Knowledge of Confounding Factors,"Enlightening Deep Neural Networks
+with Knowledge of Confounding Factors
+Yu Zhong
+Gil Ettinger
+{yu.zhong,
+Systems & Technology Research"
+69d1b055807ef35a8f9490775348cce899421841,An Improved ABC Algorithm Approach Using SURF for Face Identification,"An Improved ABC Algorithm Approach Using
+SURF for Face Identification
+Chidambaram Chidambaram1,2, Marlon Subtil Mar¸cal2, Leyza Baldo Dorini2,
+Hugo Vieira Neto2, and Heitor Silv´erio Lopes2
+State University of Santa Catarina-UDESC, Brazil
+Federal University of Technology - Paran´a - UTFPR, Brazil
+http://www.sbs.udesc.br
+http://www.utfpr.edu.br"
+6960bfc668aad1b537fbf3f1b48328e7d440b80b,Fully Automatic Recognition of the Temporal Phases of Facial Actions,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 1, FEBRUARY 2012
+Fully Automatic Recognition of the
+Temporal Phases of Facial Actions
+Michel F. Valstar, Member, IEEE, and Maja Pantic, Senior Member, IEEE"
+69063f7e0a60ad6ce16a877bc8f11b59e5f7348e,Class-Specific Image Deblurring,"Class-Specific Image Deblurring
+Saeed Anwar1, Cong Phuoc Huynh1
+, Fatih Porikli1
+The Australian National University∗ Canberra ACT 2601, Australia
+NICTA, Locked Bag 8001, Canberra ACT 2601, Australia"
+691eb8eb9f5d5fbf5d76349098b78e5d6fc25ccc,Deep Learning of Part-Based Representation of Data Using Sparse Autoencoders With Nonnegativity Constraints,"Deep Learning of Part-based Representation of Data
+Using Sparse Autoencoders with Nonnegativity
+Constraints
+Ehsan Hosseini-Asl, Member, IEEE, Jacek M. Zurada, Life Fellow, IEEE, Olfa Nasraoui, Senior Member, IEEE"
+69f27ca2f1280587004c8fae6b3b0021305e52eb,Title of dissertation : Scene and Video Understanding,
+695b040a9550a46b5ffe31e4a6abbadfac02c1ad,Face recognition with illumination distinction description,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+69f49bae5b1c15adc644b47e6c3b6c3f7aa84171,Variational Bayesian Inference for Audio-Visual Tracking of Multiple Speakers,"Variational Bayesian Inference for Audio-Visual
+Tracking of Multiple Speakers
+Yutong Ban, Xavier Alameda-Pineda, Laurent Girin and Radu Horaud"
+692aecba13add2b8c1d82db303f5b2ec743ceb44,FaceForensics: A Large-scale Video Dataset for Forgery Detection in Human Faces,"FaceForensics: A Large-scale Video Dataset for Forgery
+Detection in Human Faces
+Andreas R¨ossler1 Davide Cozzolino2 Luisa Verdoliva2 Christian Riess3
+Justus Thies1
+Matthias Nießner1
+Technical University of Munich
+University Federico II of Naples
+University of Erlangen-Nuremberg"
+6997039127d9b262d4a9aa9467c4f4fa3d596085,Classification of Vehicle Types in Car Parks using Computer Vision Techniques,"Classification of Vehicle Types in Car Parks using
+Computer Vision Techniques
+Chadly Marouane
+Research & Development
+VIRALITY GmbH
+Rauchstraße 7
+81679 Munich, Germany
+Lorenz Schauer
+Ludwig-Maximilians-
+Universität
+München
+Philipp Bauer
+Ludwig-Maximilians-
+Universität
+München
+Oettingenstraße 67
+80538 München, Germany
+Oettingenstraße 67
+80538 München, Germany"
+6946acb595095407871992da62298254658f8d84,An Efficient Method for Face Recognition System In Various Assorted Conditions,"An Efficient Method for Face Recognition System
+In Various Assorted Conditions
+V.Karthikeyan
+K.Vijayalakshmi
+P.Jeyakumar
+finding"
+69a605b2ef38c59e0c8da284d6f27d33e3573620,Automated Multi - Modal Search and Rescue Using Boosted Histogram of Oriented Gradients,"AUTOMATED MULTI-MODAL SEARCH AND RESCUE USING BOOSTED
+HISTOGRAM OF ORIENTED GRADIENTS
+A Thesis
+presented to
+the Faculty of California Polytechnic State University,
+San Luis Obispo
+In Partial Fulfillment
+of the Requirements for the Degree
+Master of Science in Electrical Engineering
+Matthew Lienemann
+December 2015"
+3c3eb65a936296d6ae5058b564f6d0e0c07772cf,A metric for sets of trajectories that is practical and mathematically consistent,"A metric for sets of trajectories that is
+practical and mathematically consistent
+Jos´e Bento
+Jia Jie Zhu"
+3cb2841302af1fb9656f144abc79d4f3d0b27380,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/319928941
+When 3D-Aided 2D Face Recognition Meets Deep
+Learning: An extended UR2D for Pose-Invariant
+Face Recognition
+Article · September 2017
+CITATIONS
+authors:
+READS
+Xiang Xu
+University of Houston
+Pengfei Dou
+University of Houston
+8 PUBLICATIONS 10 CITATIONS
+9 PUBLICATIONS 29 CITATIONS
+SEE PROFILE
+SEE PROFILE
+Ha Le
+University of Houston
+7 PUBLICATIONS 2 CITATIONS
+Ioannis A Kakadiaris"
+3cc3cf57326eceb5f20a02aefae17108e8c8ab57,Benchmark for Evaluating Biological Image Analysis Tools,"BENCHMARK FOR EVALUATING BIOLOGICAL IMAGE ANALYSIS TOOLS
+Elisa Drelie Gelasca, Jiyun Byun, Boguslaw Obara, B.S. Manjunath
+Center for Bio-Image Informatics, Electrical and Computer Engineering Department,
+University of California, Santa Barbara 93106-9560,
+http://www.bioimage.ucsb.edu
+Biological images are critical components for a detailed understanding of the structure and functioning of cells and proteins.
+Image processing and analysis tools increasingly play a significant role in better harvesting this vast amount of data, most of
+which is currently analyzed manually and qualitatively. A number of image analysis tools have been proposed to automatically
+extract the image information. As the studies relying on image analysis tools have become widespread, the validation of
+these methods, in particular, segmentation methods, has become more critical. There have been very few efforts at creating
+enchmark datasets in the context of cell and tissue imaging, while, there have been successful benchmarks in other fields, such
+s the Berkeley segmentation dataset [1], the handwritten digit recognition dataset MNIST [2] and face recognition dataset [3, 4].
+In the field of biomedical image processing, most of standardized benchmark data sets concentrates on macrobiological images
+such as mammograms and magnet resonance imaging (MRI) images [5], however, there is still a lack of a standardized dataset
+for microbiological structures (e.g. cells and tissues) and it is well known in biomedical imaging [5].
+We propose a benchmark for biological images to: 1) provide image collections with well defined ground truth; 2) provide
+image analysis tools and evaluation methods to compare and validate analysis tools. We include a representative dataset of
+microbiological structures whose scales range from a subcellular level (nm) to a tissue level (µm), inheriting intrinsic challenges
+in the domain of biomedical image analysis (Fig. 1). The dataset is acquired through two of the main microscopic imaging
+techniques: transmitted light microscopy and confocal laser scanning microscopy. The analysis tools1in the benchmark are"
+3cec488a0910b69f50811cebe8c655dca22078d5,Evidence Extraction for Machine Reading Comprehension with Deep Probabilistic Logic,"Confidential TACL submission. DO NOT DISTRIBUTE.
+Evidence Extraction for Machine Reading Comprehension
+with Deep Probabilistic Logic
+Anonymous TACL submission"
+3c1c8e171450a9b279df939d4c9209d8dbf6b2fe,Large scale mining and retrieval of visual data in a multimodal context,"Diss. ETH No. 18190
+Large-Scale Mining and Retrieval of Visual Data in
+Multimodal Context
+A dissertation submitted to the
+SWISS FEDERAL INSTITUTE OF TECHNOLOGY ZURICH
+for the degree of
+Doctor of Technical Sciences
+presented by
+Till Quack
+MSc. ETH Zuerich
+orn 15. September 1978
+itizen of Germany
+ccepted on the recommendation of
+Prof. Dr. Luc Van Gool, examiner
+Prof. Dr. Andrew Zisserman, co-examiner
+September 2008"
+3cfbe1f100619a932ba7e2f068cd4c41505c9f58,A Realistic Simulation Tool for Testing Face Recognition Systems under Real-World Conditions,"A Realistic Simulation Tool for Testing Face Recognition
+Systems under Real-World Conditions∗
+M. Correa, J. Ruiz-del-Solar, S. Parra-Tsunekawa, R. Verschae
+Department of Electrical Engineering, Universidad de Chile
+Advanced Mining Technology Center, Universidad de Chile"
+3caebf3075e52483c7a7179b3491882af0aaaa37,Lateralization of Cognitive Functions: The Visual Half-Field Task Revisited,"Lateralization of Cognitive Functions: The Visual Half-Field
+Task Revisited
+Ark Verma
+Promotor: Prof. Dr. Marc Brysbaert
+Proefschrift ingediend tot het behalen van de academische graad
+van Doctor in de Psychologie"
+3ca983d40b9de7dc12b989fce213b4abee652c9e,Will the Pedestrian Cross? A Study on Pedestrian Path Prediction,"Will the Pedestrian Cross?
+A Study on Pedestrian Path Prediction
+Christoph G. Keller and Dariu M. Gavrila"
+3caf02979d7cd83d2f3894574c86babf3e201bf3,Seeing to hear? Patterns of gaze to speaking faces in children with autism spectrum disorders,"ORIGINAL RESEARCH ARTICLE
+published: 08 May 2014
+doi: 10.3389/fpsyg.2014.00397
+Seeing to hear? Patterns of gaze to speaking faces in
+hildren with autism spectrum disorders
+Julia R. Irwin1,2* and Lawrence Brancazio1,2
+Haskins Laboratories, New Haven, CT, USA
+Department of Psychology, Southern Connecticut State University, New Haven, CT, USA
+Edited by:
+Jean-Luc Schwartz, National Centre
+for Scientific Research, France
+Reviewed by:
+Satu Saalasti, Brain and Mind
+Laboratory, Aalto University School of
+Science, Finland
+David House, Royal Institute of
+Technology, Sweden
+*Correspondence:
+Julia R. Irwin, Haskins Laboratories,
+00 George Street, New Haven,"
+3cd7b15f5647e650db66fbe2ce1852e00c05b2e4,"ACTIVE, an Extensible Cataloging Platform for Automatic Indexing of Audiovisual Content",
+3ceef6572b00bef961c0246a220edcc48553ed2d,Descriptor Learning for Omnidirectional Image Matching,"Descriptor learning for omnidirectional image matching
+Jonathan Masci1,2,3
+Davide Migliore1,4
+Michael M. Bronstein2
+J¨urgen Schmidhuber1,2,3
+Istituto Dalle Molle di Studi sull’Intelligenza Artificiale (IDSIA), Manno, Switzerland
+Faculty of Informatics, Universit`a della Svizzera Italiana (USI), Lugano, Switzerland
+Scuola Universitaria Professionale della Svizzera Italiana (SUPSI), Lugano, Switzerland
+Evidence Srl, Pisa, Italy"
+3c70360a4ba30b860d337308633842acbb908ee4,Multi-aspect object detection with Boosted Hough Forest,"REDONDO-CABRERA ET AL.: OBJECT DETECTION WITH BOOSTED HOUGH FOREST
+Because better detections are still possible:
+Multi-aspect Object Detection with Boosted
+Hough Forest
+Carolina Redondo-Cabrera
+Roberto López-Sastre
+University of Alcalá
+Alcalá de Henares, ES"
+3c5f390f99272c59fcf822ab78c90ee6bfa7926a,iCub : Learning Emotion Expressions using Human Reward,"iCub: Learning Emotion Expressions using Human Reward
+Nikhil Churamani, Francisco Cruz, Sascha Griffiths and Pablo Barros"
+3c77e4ce48d1bbcdb682cdc790806e2d5f2d2e1a,Recognition of Genuine Smiles,"Recognition of Genuine Smiles
+Hamdi Dibeklioğlu, Member, IEEE, Albert Ali Salah, Member, IEEE, and Theo Gevers, Member, IEEE"
+3ca4ce8ab704b44701bf7ef8dda01c8dbb226fac,On-the-fly hand detection training with application in egocentric action recognition,"On-the-Fly Hand Detection Training with Application in Egocentric Action
+Recognition
+Jayant Kumar∗, Qun Li∗, Survi Kyal, Edgar A. Bernal, and Raja Bala
+{Jayant.Kumar, Qun.Li, Survi.Kyal, Edgar.Bernal,
+PARC, A Xerox Company
+800 Phillips Road, Webster, NY 14580"
+3c917f071bfc1244c75fca3ceed0a8c46bb975cc,Reduced acetylcholinesterase activity in the fusiform gyrus in adults with autism spectrum disorders.,"ORIGINAL ARTICLE
+Reduced Acetylcholinesterase Activity
+in the Fusiform Gyrus in Adults With Autism
+Spectrum Disorders
+Katsuaki Suzuki, MD, PhD; Genichi Sugihara, MD, PhD; Yasuomi Ouchi, MD, PhD; Kazuhiko Nakamura, MD, PhD;
+Masatsugu Tsujii, MA; Masami Futatsubashi, BS; Yasuhide Iwata, MD, PhD; Kenji J. Tsuchiya, MD, PhD;
+Kaori Matsumoto, MA; Kiyokazu Takebayashi, MD, PhD; Tomoyasu Wakuda, MD, PhD; Yujiro Yoshihara, MD, PhD;
+Shiro Suda, MD, PhD; Mitsuru Kikuchi, MD, PhD; Nori Takei, MD, PhD, MSc; Toshirou Sugiyama, MD, PhD;
+Toshiaki Irie, PhD; Norio Mori, MD, PhD
+Context: Both neuropsychological and functional mag-
+netic resonance imaging studies have shown deficien-
+ies in face perception in subjects with autism spectrum
+disorders (ASD). The fusiform gyrus has been regarded
+s the key structure in face perception. The cholinergic
+system is known to regulate the function of the visual
+pathway, including the fusiform gyrus.
+Objectives: To determine whether central acetylcho-
+linesterase activity, a marker for the cholinergic system,
+is altered in ASD and whether the alteration in acetyl-
+holinesterase activity, if any, is correlated with their so-"
+3c9ad25e91cace6ac93069480745d4578b7f29f5,Automatic Article Commenting: the Task and Dataset,"Automatic Article Commenting: the Task and Dataset
+Lianhui Qin1∗, Lemao Liu2, Victoria Bi2, Yan Wang2,
+Xiaojiang Liu2, Zhiting Hu, Hai Zhao1, Shuming Shi2
+Department of Computer Science and Engineering, Shanghai Jiao Tong University1, Tencent AI Lab2,"
+3ce8a74b47f81ec66046f2486afa1a89e3165dfd,LSH banding for large-scale retrieval with memory and recall constraints,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+3cb8128b41b419a1fdc7a95bf8e65a37aff79676,Shifting the Baseline: Single Modality Performance on Visual Navigation&QA,"Single Modality Performance on Visual Navigation & QA
+Shifting the Baseline:
+Jesse Thomason
+Yonatan Bisk
+Paul G. Allen School of Computer Science and Engineering
+Daniel Gordan"
+3c2819dae899559f1c61b3b34aeb5d41a6398440,A Stable and Invariant Three-polar Surface Representation: Application to 3D Face Description,"A Stable and Invariant Three-polar Surface Representation:
+Application to 3D Face Description
+Majdi Jribi
+Faouzi Ghorbel
+CRISTAL Laboratory,
+GRIFT research group
+ENSI,La Manouba
+University
+010, La manouba,
+Tunisia
+CRISTAL Laboratory,
+GRIFT research group
+ENSI,La Manouba
+University
+010, La manouba,
+Tunisia"
+3c793fa4d7f673f1e9f6799729ec266ce573ec60,Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification,"Margin Sample Mining Loss: A Deep Learning Based Method for Person
+Re-identification
+Qiqi Xiao , Hao Luo , Chi Zhang"
+3c374cb8e730b64dacb9fbf6eb67f5987c7de3c8,Measuring Gaze Orientation for Human-Robot Interaction,"Measuring Gaze Orientation for Human-Robot
+Interaction
+R. Brochard∗, B. Burger∗, A. Herbulot∗†, F. Lerasle∗†
+CNRS; LAAS; 7 avenue du Colonel Roche, 31077 Toulouse Cedex, France
+Universit´e de Toulouse; UPS; LAAS-CNRS : F-31077 Toulouse, France
+Introduction
+In the context of Human-Robot interaction estimating gaze orientation brings
+useful information about human focus of attention. This is a contextual infor-
+mation : when you point something you usually look at it. Estimating gaze
+orientation requires head pose estimation. There are several techniques to esti-
+mate head pose from images, they are mainly based on training [3, 4] or on local
+face features tracking [6]. The approach described here is based on local face
+features tracking in image space using online learning, it is a mixed approach
+since we track face features using some learning at feature level. It uses SURF
+features [2] to guide detection and tracking. Such key features can be matched
+etween images, used for object detection or object tracking [10]. Several ap-
+proaches work on fixed size images like training techniques which mainly work
+on low resolution images because of computation costs whereas approaches based
+on local features tracking work on high resolution images. Tracking face features
+such as eyes, nose and mouth is a common problem in many applications such as"
+3c0bbfe664fb083644301c67c04a7f1331d9515f,The Role of Color and Contrast in Facial Age Estimation,"The Role of Color and Contrast in Facial Age Estimation
+Paper ID: 7
+No Institute Given"
+3c4f6d24b55b1fd3c5b85c70308d544faef3f69a,A Hybrid Deep Learning Architecture for Privacy-Preserving Mobile Analytics,"A Hybrid Deep Learning Architecture for
+Privacy-Preserving Mobile Analytics
+Seyed Ali Ossia(cid:63), Ali Shahin Shamsabadi(cid:63), Ali Taheri(cid:63), Hamid R. Rabiee(cid:63),
+Nic Lane‡, Hamed Haddadi†
+(cid:63)Sharif University of Technology, ‡University College London, †Queen Mary University of London"
+3cb0ef5aabc7eb4dd8d32a129cb12b3081ef264f,Absolute Head Pose Estimation From Overhead Wide-Angle Cameras,"Absolute Head Pose Estimation From Overhead Wide-Angle Cameras
+Ying-Li Tian, Lisa Brown, Jonathan Connell,
+Sharat Pankanti, Arun Hampapur, Andrew Senior, Ruud Bolle
+IBM T.J. Watson Research Center
+9 Skyline Drive, Hawthorne, NY 10532 USA
+{ yltian,lisabr,jconnell,sharat,arunh,aws,bolle"
+3cc0d9c1f690addd2c82e60f2a460e3c557ff242,Sort Story: Sorting Jumbled Images and Captions into Stories,"Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 925–931,
+Austin, Texas, November 1-5, 2016. c(cid:13)2016 Association for Computational Linguistics"
+3c0420a0dd90d0900613ac1f1a1174b626df26d9,Learning Discriminative Chamfer Regularization,"YARLAGADDA ∗, EIGENSTETTER ∗, OMMER: CHAMFER REGULARIZATION
+Learning Discriminative Chamfer
+Regularization
+Pradeep Yarlagadda ∗
+Angela Eigenstetter ∗
+Björn Ommer
+Interdisciplinary Center for Scientific
+Computing (IWR)
+University of Heidelberg
+Germany"
+3c68763caa67dee55bca76f0f71dd4530f3fd57c,Ranking to Learn and Learning to Rank: On the Role of Ranking in Pattern Recognition Applications,"Ranking to Learn and Learning to Rank:
+On the Role of Ranking in Pattern Recognition Applications
+Giorgio Roffo
+Submitted to the Department of Computer Science
+in partial fulfillment of the requirements for the degree of
+European Doctor of Philosophy
+S.S.D. ING-INF05
+Cycle XXIX/2014
+t the
+Universit`a degli Studi di Verona
+May 2017
+(cid:13) Universit`a degli Studi di Verona 2017. All rights reserved.
+Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Department of Computer Science
+May 25, 2017
+Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Prof. Marco Cristani
+Associate Professor
+Thesis Tutor
+Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ."
+3c49dafc82ee24e70e338b896868cd9f82f0edd7,Biologically Motivated 3 D Face Recognition,"BIOLOGICALLY MOTIVATED 3D FACE RECOGNITION
+Albert Ali Salah
+B.S, in Computer Engineering, Bo˘gazi¸ci University, 1998
+M.S, in Computer Engineering, Bo˘gazi¸ci University, 2000
+Submitted to the Institute for Graduate Studies in
+Science and Engineering in partial fulfillment of
+the requirements for the degree of
+Doctor of Philosophy
+Graduate Program in
+Bo˘gazi¸ci University"
+3c56acaa819f4e2263638b67cea1ec37a226691d,Body Joint Guided 3-D Deep Convolutional Descriptors for Action Recognition,"Body Joint guided 3D Deep Convolutional
+Descriptors for Action Recognition
+Congqi Cao, Yifan Zhang, Member, IEEE, Chunjie Zhang, Member, IEEE, and Hanqing Lu, Senior Member, IEEE"
+3c8da376576938160cbed956ece838682fa50e9f,Aiding face recognition with social context association rule based re-ranking,"Chapter 4
+Aiding Face Recognition with
+Social Context Association Rule
+ased Re-Ranking
+Humans are very ef‌f‌icient at recognizing familiar face images even in challenging condi-
+tions. One reason for such capabilities is the ability to understand social context between
+individuals. Sometimes the identity of the person in a photo can be inferred based on the
+identity of other persons in the same photo, when some social context between them is
+known. This chapter presents an algorithm to utilize the co-occurrence of individuals as
+the social context to improve face recognition. Association rule mining is utilized to infer
+multi-level social context among subjects from a large repository of social transactions.
+The results are demonstrated on the G-album and on the SN-collection pertaining to 4675
+identities prepared by the authors from a social networking website. The results show that
+ssociation rules extracted from social context can be used to augment face recognition and
+improve the identification performance.
+Introduction
+Face recognition capabilities of humans have inspired several researchers to understand
+the science behind it and use it in developing automated algorithms. Recently, it is also
+rgued that encoding social context among individuals can be leveraged for improved
+utomatic face recognition [175]. As shown in Figure 4.1, often times a person’s identity"
+3ca1e06dfbaeed0f8dc49bf345369fb8e43da53d,Cross-View Asymmetric Metric Learning for Unsupervised Person Re-Identification,"Cross-view Asymmetric Metric Learning for
+Unsupervised Person Re-identification
+Hong-Xing Yu, Ancong Wu, Wei-Shi Zheng
+Code is available at the project page:
+https://github.com/KovenYu/CAMEL
+For reference of this work, please cite:
+Hong-Xing Yu, Ancong Wu, Wei-Shi Zheng. “Cross-view Asymmetric
+Metric Learning for Unsupervised Person Re-identification.” Proceedings
+of the IEEE International Conference on Computer Vision. 2017.
+title={Cross-view Asymmetric Metric Learning for Unsupervised Person
+Re-identification},
+uthor={Yu, Hong-Xing and Wu, Ancong and Zheng, Wei-Shi},
+ooktitle={Proceedings of the IEEE International Conference on Computer
+Vision},
+year={2017}"
+56e95fa26fb417776824e5adf6d6d511e5b30110,Object and Action Classification with Latent Window Parameters,"Int J Comput Vis
+DOI 10.1007/s11263-013-0646-8
+Object and Action Classification with Latent Window Parameters
+Hakan Bilen · Vinay P. Namboodiri · Luc J. Van Gool
+Received: 1 October 2012 / Accepted: 18 July 2013
+© Springer Science+Business Media New York 2013"
+56e4dead93a63490e6c8402a3c7adc493c230da5,Face Recognition Techniques: A Survey,"World Journal of Computer Application and Technology 1(2): 41-50, 2013
+DOI: 10.13189/wjcat.2013.010204
+http://www.hrpub.org
+Face Recognition Techniques: A Survey
+V.Vijayakumari
+Department of Electronics and Communication, Sri krishna College of Technology, Coimbatore, India
+*Corresponding Author:
+Copyright © 2013 Horizon Research Publishing All rights reserved."
+56b9c6efe0322f0087d2f82b52129cc6b41ab356,"Acquire, Augment, Segment & Enjoy: Weakly Supervised Instance Segmentation of Supermarket Products","Acquire, Augment, Segment & Enjoy:
+Weakly Supervised Instance Segmentation of
+Supermarket Products
+Patrick Follmann+*, Bertram Drost+, and Tobias B¨ottger+*
++MVTec Software GmbH, Munich, Germany
+Technical University of Munich (TUM)
+July 9, 2018"
+56bc524d7cc1ff2fad8f27c0414cac437fc2b4f0,Protest Activity Detection and Perceived Violence Estimation from Social Media Images,"To appear in Proceedings of the 25th ACM International Conference on Multimedia 2017
+Protest Activity Detection and Perceived Violence Estimation
+from Social Media Images
+Donghyeon Won
+Zachary C. Steinert-Threlkeld
+Jungseock Joo"
+56e885b9094391f7d55023a71a09822b38b26447,Face Retrieval using Frequency Decoded Local Descriptor,"FREQUENCY DECODED LOCAL BINARY PATTERN
+Face Retrieval using Frequency Decoded Local
+Descriptor
+Shiv Ram Dubey, Member, IEEE"
+568727a76dc1242e3d48392f9c19678a27c63482,High Entropy Ensembles for Holistic Figure-ground Segmentation,"GALLO et al.: HEE FOR HOLISTIC FIGURE-GROUND SEGMENTATION
+High Entropy Ensembles for Holistic
+Figure-ground Segmentation
+Ignazio Gallo
+Alessandro Zamberletti
+Simone Albertini
+Lucia Noce
+Applied Recognition Technology
+Laboratory
+Department of Theoretical and Applied
+Science
+University of Insubria
+Varese, Italy"
+56d4eeb7fcdfd4f3156b9bdd20a9f35c995ebcac,Local Similarity Based Linear Discriminant Analysis for Face Recognition with Single Sample per Person,"Local Similarity based Linear Discriminant
+Analysis for Face Recognition with Single
+Sample per Person
+Fan Liu1, Ye Bi1, Yan Cui2, Zhenmin Tang1
+School of Computer Science and Engineering, Nanjing University of Science and
+Key Laboratory of Broadband Wireless Communication and Sensor Network
+Technology, Nanjing University of Posts and Telecommunications, China
+Technology, China"
+56fcc0ef7c10ff322626fec29f532af1860ff2f7,Occlusion and Abandoned Object Detection for Surveillance Applications,"International Journal of Computer Applications Technology and Research
+Volume 2– Issue 6, 708 - 713, 2013, ISSN: 2319–8656
+Occlusion and Abandoned Object Detection for
+Surveillance Applications
+M. Chitra
+RVS college of Engineering
+nd Technology
+Karaikal, India
+M.Kalaiselvi Geetha
+Annamalai University
+Chidambaram, India
+L.Menaka
+RVS college of Engineering
+nd Technology
+Karaikal, India
+is challenging and"
+568067d7232c753e182dbc1d7075364560ffc363,Scope of physiological and behavioural pain assessment techniques in children – a review,"Scope of physiological and behavioural pain assessment techniques
+in children – a review
+Saranya Devi Subramaniam1, Brindha Doss1 ✉, Lakshmi Deepika Chanderasekar2, Aswini Madhavan1,
+Antony Merlin Rosary2
+Department of Biomedical Engineering, PSG College of Technology, Coimbatore 641004, India
+Department of Electronics & Communication Engineering, PSG College of Technology, Coimbatore, 641004, India
+✉ E-mail:
+Published in Healthcare Technology Letters; Received on 7th February 2018; Accepted on 10th May 2018
+Pain is an unpleasant subjective experience. At present, clinicians are using self-report or pain scales to recognise and monitor pain in children.
+However, these techniques are not efficient to observe the pain in children having cognitive disorder and also require highly skilled observers
+to measure pain. Using these techniques it is also difficult to choose the analgesic drug dosages to the patients after surgery. Thus, this
+onceptual work explains the demand for automatic coding techniques to evaluate pain and also it documents some evidence of
+techniques that act as an alternative approach for objectively determining pain in children. In this review, some good indicators of pain in
+hildren are explained in detail; they are facial expressions from an RGB image, thermal image and also feature from well proven
+physiological signals such as electrocardiogram, skin conductance, body temperature, surgical pleth index, pupillary reflex dilation,
+nalgesia nociception index, photoplethysmography, perfusion index etc.
+. Introduction: The children will encounter pain resulting from
+injuries, disease, after surgery and other health problems. The
+‘International Association for the Study of Pain (IASP)’, an
+interdisciplinary organisation created in 1973 to study pain and"
+564babec16b895d385d06d38545febd66ef02f35,Robust Statistics for Feature-based Active Appearance Models,
+562f35a662545d839876deeb605ca2c864507a82,Revealing Variations in Perception of Mental States from Dynamic Facial Expressions: A Cautionary Note,"Revealing Variations in Perception of Mental States from
+Dynamic Facial Expressions: A Cautionary Note
+Elisa Back1*, Timothy R. Jordan2
+Department of Psychology, Kingston University London, Kingston upon Thames, United Kingdom, 2 Department of Psychology, Zayed University, Dubai, United Arab
+Emirates"
+564d4ee76c0511bc395dfc8ef8e3b3867fc34a6d,Robust group sparse representation via half-quadratic optimization for face recognition,"Robust Group Sparse Representation via Half-Quadratic Optimization
+for Face Recognition
+Yong Peng and Bao-Liang Lu(cid:3), Senior Member, IEEE"
+56a653fea5c2a7e45246613049fb16b1d204fc96,Quaternion Collaborative and Sparse Representation With Application to Color Face Recognition,"Quaternion Collaborative and Sparse Representation
+With Application to Color Face Recognition
+Cuiming Zou, Kit Ian Kou, Member, IEEE, and Yulong Wang, Student Member, IEEE
+representation-based"
+5666ed763698295e41564efda627767ee55cc943,Relatively-Paired Space Analysis: Learning a Latent Common Space From Relatively-Paired Observations,"Manuscript
+Click here to download Manuscript: template.tex
+Click here to view linked References
+Noname manuscript No.
+(will be inserted by the editor)
+Relatively-Paired Space Analysis: Learning a Latent Common
+Space from Relatively-Paired Observations
+Zhanghui Kuang · Kwan-Yee K. Wong
+Received: date / Accepted: date"
+564555b7fdc45938d813650de7a7b1cd40005aa8,Implementation of SIFT In Various Applications,"International Journal of Engineering Research and Development
+e-ISSN: 2278-067X, p-ISSN: 2278-800X, www.ijerd.com
+Volume 7, Issue 4 (May 2013), PP. 59-64
+Implementation of SIFT In Various Applications
+,2,3Deen Bandhu Chotu Ram University of Science and Technology Murthal, Haryana, India.
+Ritu Rani1, S. K. Grewal 2, Indiwar 3"
+5615d6045301ecbc5be35e46cab711f676aadf3a,Discriminatively Learned Hierarchical Rank Pooling Networks,"Discriminatively Learned Hierarchical Rank Pooling Networks
+Basura Fernando · Stephen Gould
+Received: date / Accepted: date"
+56cf859363f1b5231418b40b957a9132a78ea546,VLASE: Vehicle Localization by Aggregating Semantic Edges,"VLASE: Vehicle Localization by Aggregating Semantic Edges
+Xin Yu1∗, Sagar Chaturvedi1∗, Chen Feng2, Yuichi Taguchi2, Teng-Yok Lee2, Clinton Fernandes1, Srikumar Ramalingam1"
+56f5a94047966eac4b2f97ded4b50513f9a09951,Is the Kidney Donor Risk Index a Useful Tool in Non-US Patients?,"791148 CJKXXX10.1177/2054358118791148Canadian Journal of Kidney Health and DiseaseYoung et al
+research-article20182018
+Original Research Article
+Is the Kidney Donor Risk Index a
+Useful Tool in Non-US Patients?
+Ann Young1, Greg A. Knoll2,3, Eric McArthur2,
+Stephanie N. Dixon2,4, Amit X. Garg2,5,
+Charmaine E. Lok1,2,6, Ngan N. Lam7, and S. Joseph Kim1,2,6,8
+Canadian Journal of Kidney Health
+nd Disease
+Volume 5: 1 –10
+© The Author(s) 2018
+Reprints and permissions:
+sagepub.com/journals-permissions
+DOI: 10.1177/2054358118791148
+https://doi.org/10.1177/2054358118791148
+journals.sagepub.com/home/cjk"
+56852a56dd830a6ee3882773c453025ddec652e2,Emotion recognition through static faces and moving bodies: a comparison between typically developed adults and individuals with high level of autistic traits,"ORIGINAL RESEARCH
+published: 23 October 2015
+doi: 10.3389/fpsyg.2015.01570
+Emotion recognition through static
+faces and moving bodies: a
+omparison between typically
+developed adults and individuals
+with high level of autistic traits†
+Rossana Actis-Grosso1,2*, Francesco Bossi1 and Paola Ricciardelli1,2
+Department of Psychology, University of Milano-Bicocca, Milano, Italy, 2 Milan Centre for Neuroscience, Milano, Italy
+We investigated whether the type of stimulus (pictures of static faces vs. body motion)
+ontributes differently to the recognition of emotions. The performance (accuracy and
+response times) of 25 Low Autistic Traits (LAT group) young adults (21 males) and 20
+young adults (16 males) with either High Autistic Traits or with High Functioning Autism
+Spectrum Disorder (HAT group) was compared in the recognition of four emotions
+(Happiness, Anger, Fear, and Sadness) either shown in static faces or conveyed by
+moving body patch-light displays (PLDs). Overall, HAT individuals were as accurate as
+LAT ones in perceiving emotions both with faces and with PLDs. Moreover, they correctly
+described non-emotional actions depicted by PLDs, indicating that they perceived the
+motion conveyed by the PLDs per se. For LAT participants, happiness proved to be"
+56a0ead811a1bf15e42be8a9a007b0299636f213,Talk the Walk: Navigating New York City through Grounded Dialogue,"Talk the Walk: Navigating New York City through
+Grounded Dialogue
+Harm de Vries1, Kurt Shuster3, Dhruv Batra3,2, Devi Parikh3,2, Jason Weston3 & Douwe Kiela3
+MILA, Université de Montréal; 2Georgia Institute of Technology; 3Facebook AI Research"
+566038a3c2867894a08125efe41ef0a40824a090,Face recognition and gender classification in personal memories,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+56dca23481de9119aa21f9044efd7db09f618704,Riemannian Dictionary Learning and Sparse Coding for Positive Definite Matrices,"Riemannian Dictionary Learning and Sparse
+Coding for Positive Definite Matrices
+Anoop Cherian
+Suvrit Sra"
+560447750f45ea18cb21f202e30344c4fe12c52e,Removal Of Blurred And Illuminated Face Image With Different Poses,"International Journal of Scientific & Engineering Research, Volume 5, Issue 3, March-2014 33
+ISSN 2229-5518
+Removal Of Blurred And Illuminated
+Face Image With Different Poses
+C.Indhumathi, C.Dhanamani"
+56c5d08103c5bf4b263a81da73135455136bbe6d,Kernel MBPLS for a Scalable and Multi-Camera Person Re-Identification System,"Kernel MBPLS for a Scalable and Multi-Camera Person
+Re-Identification System
+Raphael Pratesa,*, William Robson Schwartza
+Smart Surveillance Interest Group, Computer Science Department, Universidade Federal de Minas Gerais, Minas
+Gerais, Brazil
+Person re-identification aims at establishing global identities for individuals as they move
+cross a camera network.
+It is a challenging task due to the drastic appearance changes that
+occur between cameras as consequence of different pose and illumination conditions. Pairwise
+matching models yield state-of-the-art results in most of the person re-identification datasets by
+apturing nuances that are robust and discriminative for a specific pair of cameras. Nonetheless,
+pairwise models are not scalable with the number of surveillance cameras. Therefore, elegant solu-
+tions combining scalability with high matching rates are crucial for the person re-identification in
+real-world scenarios. In this work, we tackle this problem proposing a multi-camera nonlinear re-
+gression model called Kernel Multiblock Partial Least Squares (Kernel MBPLS), a single subspace
+model for the entire camera network that uses all the labeled information. In this subspace, probe
+nd gallery individual can be successfully matched. Experimental results in three multi-camera
+person re-identification datasets (WARD, RAID and SAIVT-SoftBIO) demonstrate that the Ker-
+nel MBPLS presents favorable aspects such as the scalability and robustness with respect to the
+number of cameras combined with the high matching rates."
+5665d98136cc39322d47cb782b8e49d141c5a29e,An Agile Framework for Real-time Visual Tracking in Videos,"REPORT DOCUMENTATION PAGE
+Form Approved OMB NO. 0704-0188
+this collection of
+information
+is estimated
+instructions,
+The public reporting burden
+Send comments
+searching existing data sources, gathering and maintaining
+to Washington
+regarding
+this burden estimate or any other aspect of
+Information Operations and Reports, 1215 Jefferson Davis Highway, Suite 1204, Arlington VA, 22202-4302.
+Headquarters Services, Directorate
+Respondents should be aware that notwithstanding any other provision of law, no person shall be subject to any oenalty for failing to comply with a collection of
+information if it does not display a currently valid OMB control number.
+PLEASE DO NOT RETURN YOUR FORM TO THE ABOVE ADDRESS.
+. REPORT DATE (DD-MM-YYYY)
+the data needed, and completing and reviewing
+this collection of"
+516a27d5dd06622f872f5ef334313350745eadc3,Fine-Grained Facial Expression Analysis Using Dimensional Emotion Model,"> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+Fine-Grained Facial Expression Analysis Us-
+ing Dimensional Emotion Model
+ǂFeng Zhou, ǂShu Kong, Charless C. Fowlkes, Tao Chen, *Baiying Lei, Member, IEEE"
+513d9d0fdc9efa0f042ed1a3c8eab1fbb564f67b,Efficient Processing of Deep Neural Networks: A Tutorial and Survey,"Efficient Processing of Deep Neural Networks:
+A Tutorial and Survey
+Vivienne Sze, Senior Member, IEEE, Yu-Hsin Chen, Student Member, IEEE, Tien-Ju Yang, Student
+Member, IEEE, Joel Emer, Fellow, IEEE"
+51c3050fb509ca685de3d9ac2e965f0de1fb21cc,Fantope Regularization in Metric Learning,"Fantope Regularization in Metric Learning
+Marc T. Law
+Nicolas Thome
+Matthieu Cord
+Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France"
+51c7c5dfda47647aef2797ac3103cf0e108fdfb4,Cs 395t: Celebrity Look-alikes *,"CS 395T: Celebrity Look-Alikes ∗
+Adrian Quark"
+511dda02d39dc8107ac385ea8a572970e2eb9b7b,"Face recognition using distributed, mobile computing","014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+Klipsch School of Electrical and Computer Engineering
+Gregorio Hinojos and Phillip L. De Leon
+Las Cruces, New Mexico, U.S.A.
+New Mexico State University
+. INTRODUCTION"
+519f4eb5fe15a25a46f1a49e2632b12a3b18c94d,Non-Lambertian Reflectance Modeling and Shape Recovery of Faces Using Tensor Splines,"Non-Lambertian Reflectance Modeling and
+Shape Recovery of Faces using Tensor Splines
+Ritwik Kumar, Student Member, IEEE, Angelos Barmpoutis, Member, IEEE,
+Arunava Banerjee, Member, IEEE, and Baba C. Vemuri, Fellow, IEEE"
+5171157c2c09a85ad6558c5c03da6b75b0cf5fe6,Dynamic Coattention Networks For Question Answering,"Published as a conference paper at ICLR 2017
+DYNAMIC COATTENTION NETWORKS
+FOR QUESTION ANSWERING
+Caiming Xiong∗, Victor Zhong∗, Richard Socher
+Salesforce Research
+Palo Alto, CA 94301, USA
+{cxiong, vzhong,"
+518439ba2895c84ba686db5b83674c440e637c0b,The Price of Fair PCA: One Extra Dimension,"The Price of Fair PCA: One Extra Dimension
+Samira Samadi
+Georgia Tech
+Uthaipon Tantipongpipat
+Georgia Tech
+Jamie Morgenstern
+Georgia Tech
+Mohit Singh
+Georgia Tech
+Santosh Vempala
+Georgia Tech"
+519db7bb7d1778bddfbe3725220756627373d69a,A Comparative Study of Local Matching Approach for Face Recognition,"A Comparative Study of Local Matching
+Approach for Face Recognition
+Jie Zou, Member, IEEE, Qiang Ji, Senior Member, IEEE, and George Nagy, Fellow, IEEE
+to holistic methods,"
+516a014f4654c90a22ae3d363b6e80bda68a084d,Adaptive human-centered representation for activity recognition of multiple individuals from 3D point cloud sequences,"Adaptive Human-Centered Representation for Activity Recognition of
+Multiple Individuals from 3D Point Cloud Sequences
+Hao Zhang1, Christopher Reardon2, Chi Zhang2, and Lynne E. Parker2"
+51c7236feaa2ae23cef78c7bca75c69d7081e24a,Deep multi-frame face super-resolution,"Deep multi-frame face super-resolution
+Evgeniya Ustinova, Victor Lempitsky
+October 17, 2017"
+51cc78bc719d7ff2956b645e2fb61bab59843d2b,Face and Facial Expression Recognition with an Embedded System for Human-Robot Interaction,"Face and Facial Expression Recognition with an
+Embedded System for Human-Robot Interaction
+Yang-Bok Lee1, Seung-Bin Moon1, and Yong-Guk Kim 1*
+School of Computer Engineering, Sejong University, Seoul, Korea"
+517cc1084952133b6d2ecd0a535cdc3ddf8955d7,A Graphical Social Topology Model for Multi-Object Tracking,"A Graphical Social Topology Model for
+Multi-Object Tracking
+Shan Gao, Xiaogang Chen, Qixiang Ye, Senior Member, IEEE, Arjan Kuijper, Member, IEEE,
+Xiangyang Ji, Member, IEEE,"
+511b06c26b0628175c66ab70dd4c1a4c0c19aee9,Face Recognition using Laplace Beltrami Operator by Optimal Linear Approximations,"International Journal of Engineering Research and General ScienceVolume 2, Issue 5, August – September 2014
+ISSN 2091-2730
+Face Recognition using Laplace Beltrami Operator by Optimal Linear
+Approximations
+Tapasya Sinsinwar1, P.K.Dwivedi2
+Professor and Director Academics, Institute of Engineering and Technology, Alwar, Rajasthan Technical University, Kota(Raj.)
+Research Scholar (M.Tech, IT), Institute of Engineering and Technology"
+5122a5d4bdf58b4f413d4de1fb250d4ab5e0608a,Gender Classification from Pose-Based GEIs,"Gender Classification from Pose-Based GEIs(cid:2)
+Ra´ul Mart´ın-F´elez, Ram´on A. Mollineda, and J. Salvador S´anchez
+Institute of New Imaging Technologies (INIT)
+Universitat Jaume I. Av. Sos Baynat s/n, 12071, Castell´o de la Plana, Spain"
+5146832515ba8b4ad48372967d9fb7dcdea61869,CUNI System for WMT16 Automatic Post-Editing and Multimodal Translation Tasks,"Proceedings of the First Conference on Machine Translation, Volume 2: Shared Task Papers, pages 646–654,
+Berlin, Germany, August 11-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+51a81a17328ad36f1bbc15e240076b68d3271c0c,Laplacian object: One-shot object detection by locality preserving projection,"LAPLACIAN OBJECT: ONE-SHOT OBJECT DETECTION BY LOCALITY PRESERVING
+PROJECTION
+Sujoy Kumar Biswas and Peyman Milanfar
+Electrical Engineering Department
+University of California, Santa Cruz
+156 High Street, Santa Cruz, CA, 95064"
+5193328862366e114781cb6b196ae958c1553357,Incremental Learning in Person Re-Identification,"Incremental Learning in Person Re-Identification
+Prajjwal Bhargava
+SRM University
+Chennai"
+511662e02373433c8c9e27d1425707069e3695b7,Effects of image compression on ear biometrics,"Engineering and Technology Copyright. The copy of record is available at IET Digital Library.
+Research Article
+Effects of image compression on ear
+iometrics
+ISSN 2047-4938
+Received on 23rd October 2015
+Revised on 27th January 2016
+Accepted on 15th February 2016
+doi: 10.1049/iet-bmt.2015.0098
+www.ietdl.org
+Christian Rathgeb1 ✉, Anika Pflug2, Johannes Wagner1, Christoph Busch1
+da/sec – Biometrics and Internet Security Research Group, Hochschule Darmstadt, Germany
+Media Security and IT Forensics – Fraunhofer Institute for Secure Information Technology, Germany
+✉ E-mail:"
+5120fb7db8eadb26118847d0553fca1c22ed6f07,Deep Extreme Tracker Based on Bootstrap Particle Filter,"Journal of Theoretical and Applied Information Technology
+31st August 2014. Vol. 66 No.3
+© 2005 - 2014 JATIT & LLS. All rights reserved.
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+DEEP EXTREME TRACKER BASED ON
+BOOTSTRAP PARTICLE FILTER
+ALEXANDER A S GUNAWAN,
+2 MOHAMAD IVAN FANANY,
+WISNU JATMIKO
+Bina Nusantara University, Mathematics Department, School of Computer Science, Jakarta, Indonesia
+, 3 Universitas Indonesia, Faculty of Computer Science, Depok, Indonesia
+E-mail: 1 2 3"
+51b70582fb0d536d4a235f91bf6ad382f29e2601,Detection of emotions from video in non-controlled environment. (Détection des émotions à partir de vidéos dans un environnement non contrôlé),"Detection of emotions from video in non-controlled
+environment
+Rizwan Ahmed Khan
+To cite this version:
+Rizwan Ahmed Khan. Detection of emotions from video in non-controlled environment. Image
+Processing. Universit´e Claude Bernard - Lyon I, 2013. English. <NNT : 2013LYO10227>.
+<tel-01166539v2>
+HAL Id: tel-01166539
+https://tel.archives-ouvertes.fr/tel-01166539v2
+Submitted on 23 Jun 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+51319bb12c67fb5b11cbf2012a7e2059718b52eb,Local Fisher Discriminant Analysis for Pedestrian Re-identification,"Local Fisher Discriminant Analysis for Pedestrian Re-identification
+Sateesh Pedagadi, James Orwell
+Kingston University London
+Sergio Velastin
+Universidad de Santiago de Chile
+Boghos Boghossian
+Ipsotek Ltd, UK"
+5161e38e4ea716dcfb554ccb88901b3d97778f64,SSPP-DAN: Deep domain adaptation network for face recognition with single sample per person,"SSPP-DAN: DEEP DOMAIN ADAPTATION NETWORK FOR
+FACE RECOGNITION WITH SINGLE SAMPLE PER PERSON
+Sungeun Hong, Woobin Im, Jongbin Ryu, Hyun S. Yang
+School of Computing, KAIST, Republic of Korea"
+5121f42de7cb9e41f93646e087df82b573b23311,Classifying Online Dating Profiles on Tinder using FaceNet Facial Embeddings,"CLASSIFYING ONLINE DATING PROFILES ON TINDER USING FACENET FACIAL
+EMBEDDINGS
+Charles F. Jekel and Raphael T. Haftka
+Department of Mechanical & Aerospace Engineering - University of Florida - Gainesville, FL 32611"
+51cf3fa26b7c31c10427317fb5d72a6712023279,What Shape Is Your Conjugate? A Survey of Computational Convex Analysis and Its Applications,"A SURVEY OF COMPUTATIONAL CONVEX ANALYSIS AND ITS APPLICATIONS
+WHAT SHAPE IS YOUR CONJUGATE?
+YVES LUCET"
+51d1a6e15936727e8dd487ac7b7fd39bd2baf5ee,"A Fast and Accurate System for Face Detection, Identification, and Verification","JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+A Fast and Accurate System for Face Detection,
+Identification, and Verification
+Rajeev Ranjan, Ankan Bansal, Jingxiao Zheng, Hongyu Xu, Joshua Gleason, Boyu Lu, Anirudh Nanduri,
+Jun-Cheng Chen, Carlos D. Castillo, Rama Chellappa"
+5194a8acc87dd05a92a21f94fea966a2815f9b38,Noise aware analysis operator learning for approximately cosparse signals,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+51e43578ad761c7c4d58cb159eee0f8e6cf0f7a4,Incremental indexing and distributed image search using shared randomized vocabularies,"Introduction
+Method
+Results
+Incremental Indexing and Distributed Image Search
+using Shared Randomized Vocabularies
+Rapha¨el Mar´ee, Philippe Denis, Louis Wehenkel, Pierre Geurts
+GIGA Bioinformatics
+GIGA Research ; Dept. EE & CS (Montefiore Institute)
+University of Li`ege, Belgium
+MIR 2010
+March 29–31, 2010
+Philadelphia, Pennsylvania, USA
+Mar´ee et al.
+Shared Randomized Vocabularies
+(1 / 44)"
+51d97f4e4385a3da78bf9277a5426216198698c3,Improving the Accuracy of Face Detection for Damaged Video and Distant Targets,"Improving the Accuracy of Face Detection for Damaged Video and
+Distant Targets
+Department of Communication Engineering, Oriental Institute of Technology, New Taiepi City, Taiwan
+Jun-Horng Chen
+Keywords:
+Error Concealment, Face Detection, Super-resolution."
+514fdf2152dda3a39fc05eb6e1c80314837d96a2,Detailed 3D Representations for Object Recognition and Modeling,"Detailed 3D Representations for
+Object Recognition and Modeling
+M. Zeeshan Zia, Student Member, IEEE, Michael Stark, Member, IEEE,
+Bernt Schiele, Member, IEEE, and Konrad Schindler, Member, IEEE"
+51bfc693d170b4171f5bd9f9aed51f1fe8b5304d,Zero-Shot Recognition via Direct Classifier Learning with Transferred Samples and Pseudo Labels,"Zero-shot Recognition via Direct Classifier Learning
+with Transferred Samples and Pseudo Labels
+AAAI Anonymous Submission 182"
+5157dde17a69f12c51186ffc20a0a6c6847f1a29,Evolutionary Cost-Sensitive Extreme Learning Machine,"Evolutionary Cost-sensitive Extreme Learning
+Machine
+Lei Zhang, Member, IEEE, and David Zhang, Fellow, IEEE"
+3dec830b2514e82c714162622b3077966660112f,Statistical Evaluation of Face Recognition Techniques under Variable Environmental Constraints,"International Journal of Statistics and Probability; Vol. 4, No. 4; 2015
+ISSN 1927-7032 E-ISSN 1927-7040
+Published by Canadian Center of Science and Education
+Statistical Evaluation of Face Recognition Techniques under Variable
+Environmental Constraints
+Louis Asiedu1, Atinuke O. Adebanji2, Francis Oduro3
+& Felix O. Mettle4
+Department of Statistics, University of Ghana, Legon-Accra, Ghana
+Department of Mathematics, Kwame Nkrumah University of Science and Technology, Kumasi, Ghana
+Department of Mathematics, Kwame Nkrumah University of Science and Technology, Kumasi, Ghana
+Department of Statistics, University of Ghana, Legon-Accra, Ghana
+Correspondence: Louis Asiedu, Department of Statistics, University of Ghana, Legon-Accra, Ghana. Tel:
+33-543-426-707. E-mail:
+Received: August 1, 2015 Accepted: August 19, 2015 Online Published: October 9, 2015
+doi:10.5539/ijsp.v4n4p93 URL: http://dx.doi.org/10.5539/ijsp.v4n4p93"
+3d74d4177f5c1444b73221c12f359e858625a691,Composite-ISA Cores : Enabling Multi-ISA Heterogeneity Using a Single ISA,"ISCA 2018 Submission #283
+Confidential Draft: DO NOT DISTRIBUTE
+Composite-ISA Cores: Enabling Multi-ISA Heterogeneity
+Using a Single ISA"
+3d6229044f6605604818f39f08c5270a5a132a03,Projective Nonnegative Matrix Factorization based on α-Divergence,"Projective Nonnegative Matrix Factorization based on
+-Divergence
+Zhirong Yang and Erkki Oja
+Department of Information and Computer Science∗
+Aalto University School of Science and Technology
+P.O.Box 15400, FI-00076, Aalto, Finland"
+3dbb2ca6942eb49538d92823fe22c7475e866ca1,Institutionen För Systemteknik Department of Electrical Engineering Examensarbete Autonomous Morphometrics Using Depth Cameras for Object Classification and Identification Autonomous Morphometrics Using Depth Cameras for Object Classification and Identification Examensarbete Utfört I Datorseende Vid Tekniska Högskolan Vid Linköpings Universitet Av,"Institutionen för systemteknik
+Department of Electrical Engineering
+Examensarbete
+Autonomous Morphometrics using Depth Cameras for
+Object Classification and Identification
+Examensarbete utfört i Datorseende
+vid Tekniska högskolan vid Linköpings universitet
+Felix Björkeson
+LiTH-ISY-EX--13/4680--SE
+Linköping 2013
+Department of Electrical Engineering
+Linköpings universitet
+SE-581 83 Linköping, Sweden
+Linköpings tekniska högskola
+Linköpings universitet
+581 83 Linköping"
+3da97d97b12fcf22208c36f471119f33a08d9b6f,Multi-modal Biometric system using ear and face(2D+3D) Modalities,"Multi-modal Biometric system using ear and
+face(2D+3D) Modalities
+M.Pujitha Raj
+Computer Science and engineering
+Amrita University
+Coimbatore, India
+B.Achyut Sarma
+Computer Science and engineering
+Amrita University
+Coimbatore, India"
+3daafe6389d877fe15d8823cdf5ac15fd919676f,Human Action Localization with Sparse Spatial Supervision,"Human Action Localization
+with Sparse Spatial Supervision
+Philippe Weinzaepfel, Xavier Martin, and Cordelia Schmid, Fellow, IEEE"
+3daf1191d43e21a8302d98567630b0e2025913b0,Can Autism be Catered with Artificial Intelligence-Assisted Intervention Technology? A Literature Review,"Can Autism be Catered with Artificial Intelligence-Assisted Intervention
+Technology? A Literature Review
+Muhammad Shoaib Jaliawala∗, Rizwan Ahmed Khan∗†
+Faculty of Information Technology, Barrett Hodgson University, Karachi, Pakistan
+Universit´e Claude Bernard Lyon 1, France"
+3dcc51a37f2e5e91d77ff00f18178484c4e938cb,Excitation Dropout: Encouraging Plasticity,"Under review as a conference paper at ICLR 2019
+EXCITATION DROPOUT: ENCOURAGING PLASTICITY
+IN DEEP NEURAL NETWORKS
+Anonymous authors
+Paper under double-blind review"
+3d36f941d8ec613bb25e80fb8f4c160c1a2848df,Out-of-Sample Generalizations for Supervised Manifold Learning for Classification,"Out-of-sample generalizations for supervised
+manifold learning for classification
+Elif Vural and Christine Guillemot"
+3d7a5d1fbec861542631fcb10f58e38f4f51a04c,Face Recognition Application of Blur-Robust,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Face Recognition Application of Blur-Robust
+Pitta Santhosh Kumar1, Ankush Jain2
+M.Tech student, Department of CSE, Anurag Group of Institutions, Hyderabad, India
+Assistant professor, Department of CSE, Anurag Group of Institutions, Hyderabad, India"
+3d5a1be4c1595b4805a35414dfb55716e3bf80d8,Hidden Two-Stream Convolutional Networks for Action Recognition,"Hidden Two-Stream Convolutional Networks for
+Action Recognition
+Yi Zhu, Zhenzhong Lan, Shawn Newsam, Alexander G. Hauptmann"
+3de3c479164312ab3a1795ee84f20c16632c04c4,Scalable Deep Learning Logo Detection,"Scalable Deep Learning Logo Detection
+Hang Su∗, Shaogang Gong†, Xiatian Zhu‡
+† Queen Mary University of London ‡ Vision Semantics Ltd."
+3d62b2f9cef997fc37099305dabff356d39ed477,Joint Face Alignment and 3D Face Reconstruction with Application to Face Recognition,"Joint Face Alignment and 3D Face
+Reconstruction with Application to Face
+Recognition
+Feng Liu, Qijun Zhao, Member, IEEE, Xiaoming Liu, Member, IEEE and Dan Zeng"
+3d97f739ae76c8db1146da4aaeb0dc1ef3d31c33,Données multimodales pour l ’ analyse d ’ image,"UNIVERSITÉDEGRENOBLENoattribuéparlabibliothèqueTHÈSEpourobtenirlegradedeDOCTEURDEL’UNIVERSITÉDEGRENOBLESpécialité:MathématiquesetInformatiquepréparéeauLaboratoireJeanKuntzmanndanslecadredel’ÉcoleDoctoraleMathématiques,SciencesetTechnologiesdel’Information,InformatiqueprésentéeetsoutenuepubliquementparMatthieuGuillauminle27septembre2010ExploitingMultimodalDataforImageUnderstandingDonnéesmultimodalespourl’analysed’imageDirecteursdethèse:CordeliaSchmidetJakobVerbeekJURYM.ÉricGaussierUniversitéJosephFourierPrésidentM.AntonioTorralbaMassachusettsInstituteofTechnologyRapporteurMmeTinneTuytelaarsKatholiekeUniversiteitLeuvenRapporteurM.MarkEveringhamUniversityofLeedsExaminateurMmeCordeliaSchmidINRIAGrenobleExaminatriceM.JakobVerbeekINRIAGrenobleExaminateur"
+3d91ba69bfbb2ba018419342d279f2d7571530f6,Qualitative Tracking Performance Evaluation without Ground-Truth,"Qualitative Tracking Performance Evaluation without Ground-Truth∗
+Dept. of Computer Science and Engineering
+Dept. of Computer Science and Engineering
+Jihun Hamm
+Bohyung Han
+POSTECH, Korea"
+3da4fa2365c01f53180050c7d332107089d913c0,Face Recognition Using Parzenfaces,"Face Recognition Using Parzenfaces
+Zhirong Yang and Jorma Laaksonen
+Laboratory of Computer and Information Science ⋆
+Helsinki University of Technology
+P.O. Box 5400, FI-02015 TKK, Espoo, Finland
+{zhirong.yang,"
+3dd4d719b2185f7c7f92cc97f3b5a65990fcd5dd,Ensemble of Hankel Matrices for Face Emotion Recognition,"Ensemble of Hankel Matrices for
+Face Emotion Recognition
+Liliana Lo Presti and Marco La Cascia
+DICGIM, Universit´a degli Studi di Palermo,
+V.le delle Scienze, Ed. 6, 90128 Palermo, Italy,
+DRAFT
+To appear in ICIAP 2015"
+3da12b99cd8040bb374eed160f8016b3fe492967,Multiperson Tracking by Online Learned Grouping Model With Nonlinear Motion Context,"Multi-person Tracking by Online Learned Grouping
+Model with Non-linear Motion Context
+Xiaojing Chen, Zhen Qin, Le An, Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+3d1b0c7e9ef0e31dd635041539e795dc07ebee86,Tracking people in 3D using a bottom-up top-down detector,"Tracking People in 3D Using a Bottom-Up Top-Down Detector
+Luciano Spinello, Matthias Luber and Kai O. Arras
+Social Robotics Lab, University of Freiburg, Germany
+{spinello, luber,"
+3d88180732d63a4babf3a4b1a82dd7fdf27a7520,"Facial expression, size, and clutter: Inferences from movie structure to emotion judgments and back.","23Attention, Perception, &Psychophysics ISSN 1943-3921Volume 78Number 3 Atten Percept Psychophys (2016)78:891-901DOI 10.3758/s13414-015-1003-5Facial expression, size, and clutter:Inferences from movie structure to emotionjudgments and backJames E. Cutting & Kacie L. Armstrong"
+3db123d094c7ba33bbd3c4ccbea77e2093ad6174,Online Visual Multi-Object Tracking via Labeled Random Finite Set Filtering,"JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, X XXXX
+A Labeled Random Finite Set Online
+Multi-Object Tracker for Video Data
+Du Yong Kim, Ba-Ngu Vo, Member, IEEE, and Ba-Tuong Vo, Member, IEEE"
+3dc3f0b64ef80f573e3a5f96e456e52ee980b877,Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition,"AXU E
+DETECT AD RECGT
+Aa V. e(cid:12)a ad 
+Cee f Siga
+Sch
+Gegia i e f Tech
+faa"
+3d67e97227846f579d1825e00d395d30e17f5d0e,Face and ECG Based Multi-Modal Biometric Authentication,"Face and ECG Based Multi-Modal
+Biometric Authentication
+Ognian Boumbarov1, Yuliyan Velchev1, Krasimir Tonchev1
+nd Igor Paliy2
+Technical University of Sofia
+Ternopil National Economic University
+Bulgaria
+Ukraine
+. Introduction
+A biometric system is essentially a pattern recognition system. This system measures
+nd analyses human body physiological characteristics, such as face and facial features,
+fingerprints, eye, retinas, irises, voice patterns or behavioral characteristic for enrollment,
+verification or identification (Bolle & Pankanti, 1998). Uni-modal biometric systems have
+poor performance and accuracy, and over last few decades the multi-modal biometric systems
+have become very popular. The main objective of multi biometrics is to reduce one or more
+false accept rate, false reject rate and failure to enroll rate. Face Recognition (FR) is still
+onsidered as one of the most challenging problems in pattern recognition. The FR systems
+try to recognize the human face in video sequences as 3D object (Chang et al., 2003; 2005), in
+unconstrained conditions, in comparison to the early attempts of 2D frontal faces in controlled
+onditions. Despite the effort spent on research today there is not a single, clearly defined,"
+3dcebd4a1d66313dcd043f71162d677761b07a0d,Local binary pattern domain local appearance face recognition,"Yerel Đkili Örüntü Ortamında Yerel Görünüme Dayalı Yüz Tanıma
+Local Binary Pattern Domain Local Appearance Face Recognition
+Hazım K. Ekenel1, Mika Fischer1, Erkin Tekeli2, Rainer Stiefelhagen1, Aytül Erçil2
+Institut für Theorestische Informatik, Universität Karlsruhe (TH), Karlsruhe, Germany
+Faculty of Engineering and Natural Sciences, Sabancı University, Đstanbul, Turkey
+Özetçe
+Bu bildiride, ayrık kosinüs dönüşümü tabanlı yerel görünüme
+dayalı yüz tanıma algoritması ile yüz imgelerinin yerel ikili
+örüntüye (YĐÖ) dayalı betimlemesini birleştiren hızlı bir yüz
+tanıma algoritması sunulmuştur. Bu tümleştirmedeki amaç,
+yerel ikili örüntünün dayanıklı imge betimleme yeteneği ile
+yrık kosinüs dönüşümünün derli-toplu veri betimleme
+yeteneğinden yararlanmaktır. Önerilen yaklaşımda, yerel
+görünümün modellenmesinden önce girdi yüz imgesi yerel
+ikili örüntü ile betimlenmiştir. Elde edilen YĐÖ betimlemesi,
+irbirleri ile örtüşmeyen bloklara ayrılmış ve her blok
+üzerinde yerel özniteliklerin çıkartımı için ayrık kosinüs
+dönüşümü uygulanmıştır. Çıkartımı yapılan yerel öznitelikler
+daha sonra arka arkaya eklenerek global öznitelik vektörü
+oluşturulmuştur. Önerilen algoritma, CMU PIE ve FRGC"
+3d7fce66c1880f4b29171e415cfad57d8b96ced2,Exploiting Ambiguities in the Analysis of Cumulative Matching Curves for Person Re-identification,
+3df5e17e87144b1e84b5ab9467bc2c2f233b66c7,Convolutional Architecture Exploration for Action Recognition and Image Classification,"Convolutional Architecture Exploration for
+Action Recognition and Image Classification
+JT Turner∗1,2, David Aha1, Leslie Smith1, and Kalyan Moy Gupta2
+Knexus Research Corporation;
+74 Waterfront Street Suite 310; National Harbor, MD 20745
+Navy Center for Applied Research in Artificial Intelligence;
+Naval Research Laboratory (Code 5514); Washington, DC 20375"
+3d42e17266475e5d34a32103d879b13de2366561,The Global Dimensionality of Face Space,"Proc.4thIEEEInt’lConf.AutomaticFace&GestureRecognition,Grenoble,France,pp264–270
+The Global Dimensionality of Face Space
+(cid:3)
+http://venezia.rockefeller.edu/
+The Rockefeller University
+Penio S. Penev
+Laboratory of Computational Neuroscience
+Lawrence Sirovich
+Laboratory for Applied Mathematics
+Mount Sinai School of Medicine
+(cid:13) IEEE2000
+230 York Avenue, New York, NY 10021
+One Gustave L. Levy Place, New York, NY 10029"
+3d8c8acb8c59e9f23f048f44a23f36ffd791cdf5,Visual tracking over multiple temporal scales,"Khan, Muhammad Haris (2015) Visual tracking over
+multiple temporal scales. PhD thesis, University of
+Nottingham.
+Access from the University of Nottingham repository:
+http://eprints.nottingham.ac.uk/33056/1/Thesis.pdf
+Copyright and reuse:
+The Nottingham ePrints service makes this work by researchers of the University of
+Nottingham available open access under the following conditions.
+This article is made available under the University of Nottingham End User licence and may
+e reused according to the conditions of the licence. For more details see:
+http://eprints.nottingham.ac.uk/end_user_agreement.pdf
+For more information, please contact"
+3dba6c86541aad3ec8f54c55d57eca9aa98f4ed2,PAC-Bayesian Majority Vote for Late Classifier Fusion,"PAC-Bayesian Majority Vote for Late Classifier Fusion∗
+Aix-Marseille Univ., LIF-QARMA, CNRS, UMR 7279, F-13013, Marseille, France
+Emilie Morvant
+St´ephane Ayache
+Amaury Habrard
+Univ. of St-Etienne, Lab. Hubert Curien, CNRS, UMR 5516, F-42000, St-Etienne, France
+May 2, 2014"
+3df7401906ae315e6aef3b4f13126de64b894a54,Robust learning of discriminative projection for multicategory classification on the Stiefel manifold,"Robust Learning of Discriminative Projection for Multicategory Classification on
+the Stiefel Manifold
+Duc-Son Pham and Svetha Venkatesh
+Dept. of Computing, Curtin University of Technology
+GPO Box U1987, Perth, WA 6845, Australia"
+3dd1338a5d0aa47fa2aef31654ee1392b8089991,Crowdsourcing the construction of a 3D object recognition database for robotic grasping,"014 IEEE International Conference on Robotics & Automation (ICRA)
+Hong Kong Convention and Exhibition Center
+May 31 - June 7, 2014. Hong Kong, China
+978-1-4799-3685-4/14/$31.00 ©2014 IEEE"
+3d1382fa43c31e594ed2d84dda9984b1db047b0e,Compositional Memory for Visual Question Answering,"Compositional Memory for Visual Question Answering
+Aiwen Jiang1,2
+Fang Wang2
+Fatih Porikli2
+Yi Li∗ 2,3
+NICTA and ANU
+{fang.wang,
+Toyota Research Institute North America
+feature as the first word to initialize the sequential learning.
+While the use of holistic approach is straightforward and
+onvenient, it is, however, debatably problematic. For ex-
+mple, in the VQA problems many answers are directly re-
+lated to the contents of some image regions. Therefore, it
+is dubious if the holistic features are rich enough to provide
+the information only available at regions. Also, it may hin-
+der the exploration of finer-grained local features for VQA.
+In this paper we propose a Compositional Memory for
+n end-to-end training framework. Our approach takes the
+dvantage of the recent progresses in image captioning [3,
+], natural language processing [5], and computer vision to"
+3d21b7b4f48e614bc2f2b87eb110aa329b7d66d8,Recognizing Human Actions by Using Effective Codebooks and Tracking,"Recognizing Human Actions by using Effective
+Codebooks and Tracking
+Lamberto Ballan, Lorenzo Seidenari, Giuseppe Serra, Marco Bertini and Alberto
+Del Bimbo"
+3d1af6c531ebcb4321607bcef8d9dc6aa9f0dc5a,Random Multispace Quantization as an Analytic Mechanism for BioHashing of Biometric and Random Identity Inputs,"Random Multispace Quantization as
+n Analytic Mechanism for BioHashing
+of Biometric and Random Identity Inputs
+Andrew B.J. Teoh, Member, IEEE, Alwyn Goh, and David C.L. Ngo, Member, IEEE"
+3dffacda086689c1bcb01a8dad4557a4e92b8205,Multiple Object Tracking: A Literature Review,"Multiple Object Tracking: A Literature Review
+Wenhan Luo, Junliang Xing, Anton Milan, Xiaoqin Zhang, Wei Liu, Xiaowei Zhao and Tae-Kyun Kim"
+3d67aa108e65e636158abc0f31b703af3d31baa6,Decorrelating Semantic Visual Attributes by Resisting the Urge to Share,"Decorrelating Semantic Visual Attributes by Resisting the Urge
+Supplementary material for CVPR 2014 submission ID 0824
+to Share
+In this document, we provide supplementary material for our CVPR 2014 submission “Decorrelating Semantic
+Visual Attributes by Resisting the Urge to Share”(Paper ID 0824). Sec 1 gives additional details for our experi-
+mental setup (Sec 4 of the paper). Sec 1.1 lists the groups used in all three datasets in our experiments. Sec 1.2
+discusses the details of the image descriptors used for each dataset. Sec 2 discusses how attributes are localized
+for our experiments in Sec 4.1 in the paper. Sec 3 discusses how it is posible to set parameters that generalize well
+to novel test sets, using only training data. Sec 4 discusses the details of the optimization of our formulation (Eq 4
+in the paper).
+Datasets
+.1 Groups
+(see para on Semantic groups in Sec 4 in the paper)
+Fig 1, 2 and 3 show the attribute groups used in our experiments on the CUB, AwA and aPY datasets
+respectively. The 28 CUB groups come pre-specified with the dataset [6]. The groups on AwA match exactly the
+groups specified in [5]. Those on aPY also match the groups outlined in [5] on the 25 attributes (see paper) used
+in our experiments (aPY-25). In each figure, attribute groups are enclosed in shaded boxes, and phrases in larger
+font labeling the boxes indicate the rationale for the grouping.
+.2 Features
+(see also Sec 3.2 and para on Features in Sec 4 in the paper)"
+3dc78b41ed926b88c9cc4d40c6c5250bfafad74a,A pilot study for mood-based classification of TV programmes,"Research & Development
+White Paper
+WHP 231
+September 2012
+A Pilot Study for
+Mood-based Classification of TV Programmes
+Jana Eggink, Penelope Allen, Denise Bland
+BRITISH BROADCASTING CORPORATION"
+3d94f81cf4c3a7307e1a976dc6cb7bf38068a381,Data-Dependent Label Distribution Learning for Age Estimation,"Data-Dependent Label Distribution Learning
+for Age Estimation
+Zhouzhou He, Xi Li, Zhongfei Zhang, Fei Wu, Xin Geng, Yaqing Zhang, Ming-Hsuan Yang, and Yueting Zhuang"
+3d5187a957cc90f4143e6302786d65dbedf7d9bb,Stacking With Auxiliary Features for Visual Question Answering,"To Appear In Proceedings of the 16th Annual Conference of the North American
+Chapter of the Association for Computational Linguistics: Human Language
+Technologies 2018."
+3d9d1f8075ebdd03f86b4e40b9a5d08447ade8d3,Comparison of Illumination Normalization Methods for Face Recognition∗,"COMPARISON OF ILLUMINATION NORMALIZATION METHODS FOR
+FACE RECOGNITION(cid:3)
+Mauricio Villegas Santamar·(cid:17)a and Roberto Paredes Palacios
+Instituto Tecnol·ogico de Inform·atica
+Universidad Polit·ecnica de Valencia
+Camino de Vera s/n, 46022 Valencia (Spain)"
+3d5b8127ce57279f9fd77d3a24d8034b485163a4,System ( tm ) for Image and Vision Computing Manuscript Draft Manuscript Number : IMAVIS-D16-00270 R 2 Title : Extended three-dimensional rotation invariant local binary patterns,"Elsevier Editorial System(tm) for Image and
+Vision Computing
+Manuscript Draft
+Manuscript Number: IMAVIS-D-16-00270R2
+Title: Extended three-dimensional rotation invariant local binary
+patterns
+Article Type: Full Length Article
+Keywords: Local binary patterns (LBP); Three-dimensions; Rotation
+invariance; Texture classification
+Corresponding Author: Mr. Leonardo Citraro, MSc.
+Corresponding Author's Institution: University of Southampton
+First Author: Leonardo Citraro, MSc.
+Order of Authors: Leonardo Citraro, MSc.; Sasan Mahmoodi, Professor, Phd;
+Angela Darekar, Phd; Brigitte Vollmer, Professor, Phd"
+3db588f1e58c1207685771d8015fa9427d731a53,An automatic 3D expression recognition framework based on sparse representation of conformal images,"An Automatic 3D Expression Recognition Framework based on Sparse
+Representation of Conformal Images
+Wei Zeng, Huibin Li, Liming Chen, Jean-Marie Morvan, Xianfeng David Gu"
+3d740c4f2246ce8e63d0eacc2cc1a5c31259e9ee,Discovering Attribute Shades of Meaning with the Crowd,"http://dx.doi.org/10.1007/s11263-014-0798-1
+Discovering Attribute Shades of Meaning with the Crowd
+Adriana Kovashka · Kristen Grauman
+Received: date / Accepted: date"
+3da9a9091cfa8f4bf625829faf7a4c35a8fe91e0,Working memory network alterations in high-functioning adolescents with an autism spectrum disorder.,"PDF hosted at the Radboud Repository of the Radboud University
+Nijmegen
+The following full text is a publisher's version.
+For additional information about this publication click this link.
+http://hdl.handle.net/2066/183247
+Please be advised that this information was generated on 2018-05-20 and may be subject to
+hange."
+3d42aedd347f927a6bce28d0fa509c6d2132c11f,3D Hand Pose Detection in Egocentric RGB-D Images,"International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+D Hand Pose Detection in Egocentric RGB-D Images
+Gr´egory Rogez · J. S. Supanˇciˇc III · Maryam Khademi ·
+J. M. M. Montiel · Deva Ramanan
+Received: date / Accepted: date"
+58b80f0e484d32c9fe5b57648848e048270d435b,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+58cbd5a31e92cff29e29e8b25ee79f30ff4e6d4b,Culture shapes spatial frequency tuning for face identification.,"Journal of Experimental Psychology:
+Human Perception and Performance
+017, Vol. 43, No. 2, 294 –306
+0096-1523/17/$12.00
+© 2016 American Psychological Association
+http://dx.doi.org/10.1037/xhp0000288
+Culture Shapes Spatial Frequency Tuning for Face Identification
+Université de Montréal and Université du Québec en Outaouais
+Jessica Tardif
+Daniel Fiset
+Université du Québec en Outaouais
+Ye Zhang
+Hangzhou Normal University
+Amanda Estéphan
+Université du Québec en Outaouais
+Qiuju Cai, Canhuang Luo, and Dan Sun
+Hangzhou Normal University
+Frédéric Gosselin
+Université de Montréal
+Caroline Blais"
+58d16e23e1192be4acaf6a29c1f5995817146554,Bringing back simplicity and lightliness into neural image captioning,"Bringing back simplicity and lightliness into neural image captioning
+Jean-Benoit Delbrouck and St´ephane Dupont
+{jean-benoit.delbrouck,
+TCTS Lab, University of Mons, Belgium"
+5834555d239c27369e7a4167bb0c0fed725d761e,Improved illumination invariant homomorphic filtering using the dual tree complex wavelet transform,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+5801690199c1917fa58c35c3dead177c0b8f9f2d,Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis,"Remote Sens. 2010, 2, 2748-2772; doi:10.3390/rs2122748
+OPEN ACCESS
+Article
+Application of Object Based Classification and High Resolution
+Satellite Imagery for Savanna Ecosystem Analysis
+ISSN 2072-4292
+www.mdpi.com/journal/remotesensing
+Cerian Gibbes *, Sanchayeeta Adhikari, Luke Rostant, Jane Southworth, and Youliang Qiu
+Department of Geography & Land Use and Environmental Change Institute (LUECI), University of
+Florida, 3141 Turlington Hall, P. O. Box 117315, Gainesville, FL 32611, USA;
+E-Mails: (S.A.); (L.R.); (J.S.);
+(Y.Q.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +1-352-392-0494; Fax: +1-352-392-8855.
+Received: 16 October 2010; in revised form: 7 December 2010 / Accepted: 8 December 2010 /
+Published: 10 December 2010"
+58a6eb3584b2f5df2f25d39a218904d510cae516,The UAVid Dataset for Video Semantic Segmentation,"The UAVid Dataset for Video Semantic Segmentation
+Ye Lyu1, George Vosselman1, Guisong Xia2, Alper Yilmaz3, Michael Ying Yang1∗"
+5892f8367639e9c1e3cf27fdf6c09bb3247651ed,Estimating Missing Features to Improve Multimedia Information Retrieval,"Estimating Missing Features to Improve Multimedia Information Retrieval
+Abraham Bagherjeiran
+Nicole S. Love
+Chandrika Kamath (cid:3)"
+58cb6677b77d5a79fc5b8058829693ca30b36ac5,Learning Similarity Metrics by Factorising Adjacency Matrices,"Learning Similarity Metrics by Factorising Adjacency Matrices
+Henry Gouk†
+Bernhard Pfahringer†
+Michael Cree‡
+Department of Computer Science, University of Waikato, Hamilton, New Zealand
+School of Engineering, University of Waikato, Hamilton, New Zealand"
+587f81ae87b42c18c565694c694439c65557d6d5,DeepFace: Face Generation using Deep Learning,"DeepFace: Face Generation using Deep Learning
+Hardie Cate
+Fahim Dalvi
+Zeshan Hussain"
+580054294ca761500ada71f7d5a78acb0e622f19,A Subspace Model-Based Approach to Face Relighting Under Unknown Lighting and Poses,"A Subspace Model-Based Approach to Face
+Relighting Under Unknown Lighting and Poses
+Hyunjung Shim, Student Member, IEEE, Jiebo Luo, Senior Member, IEEE, and Tsuhan Chen, Fellow, IEEE"
+58abb5001087f51dd2e9ab17b9fb8fb3567988e8,Array of Multilayer Perceptrons with No-class Resampling Training for Face Recognition,"Inteligencia Artificial 44(2009), 5-13
+doi: 10.4114/ia.v13i44.1041
+INTELIGENCIA ARTIFICIAL
+http://erevista.aepia.org/
+Array of Multilayer Perceptrons with No-class
+Resampling Training for Face Recognition
+D. Capello1, C. Mart´ınez2,3, D. Milone2 and G. Stegmayer1
+CIDISI-UTN-FRSF, CONICET, Lavaise 610 - Santa Fe (Argentina)
+Sinc(i)-FICH-UNL, CONICET, Ciudad Universitaria UNL - Santa Fe (Argentina)
+Laboratorio de Cibern´etica-FI-UNER, C.C. 47 Suc. 3-3100, Entre R´ıos (Argentina)"
+587c48ec417be8b0334fa39075b3bfd66cc29dbe,Serial dependence in the perception of attractiveness,"Journal of Vision (2016) 16(15):28, 1–8
+Serial dependence in the perception of attractiveness
+Ye Xia
+Department of Psychology, University of California,
+Berkeley, CA, USA
+Allison Yamanashi Leib
+Department of Psychology, University of California,
+Berkeley, CA, USA
+David Whitney
+Department of Psychology, University of California,
+Berkeley, CA, USA
+Helen Wills Neuroscience Institute, University of
+California, Berkeley, CA, USA
+Vision Science Group, University of California,
+Berkeley, CA, USA
+The perception of attractiveness is essential for choices
+of food, object, and mate preference. Like perception of
+other visual features, perception of attractiveness is
+stable despite constant changes of image properties due
+to factors like occlusion, visual noise, and eye"
+58081cb20d397ce80f638d38ed80b3384af76869,Embedded Real-Time Fall Detection Using Deep Learning For Elderly Care,"Embedded Real-Time Fall Detection Using Deep
+Learning For Elderly Care
+Hyunwoo Lee∗
+Jooyoung Kim
+Dojun Yang
+Joon-Ho Kim
+Samsung Research, Samsung Electronics
+{hyun0772.lee, joody.kim, dojun.yang,"
+58a5c2f9f60bdc6ab640767cb21fd6ba04eef5d7,Towards a Unified 3D Affective Model,"Towards a Unified 3D Affective Model
+Kuderna-Iulian Benţa1, Hannelore-Inge Lisei2, Marcel Cremene1
+Technical University of Cluj-Napoca, 400016 Cluj-Napoca, România,
+“Babeş-Bolyai“ University, 400084 Cluj-Napoca, România,
+{Iulian.Benta, Marcel.Cremene,"
+581e920ddb6ecfc2a313a3aa6fed3d933b917ab0,Automatic Mapping of Remote Crowd Gaze to Stimuli in the Classroom,"Automatic Mapping of Remote Crowd Gaze to
+Stimuli in the Classroom
+Thiago Santini1, Thomas K¨ubler1, Lucas Draghetti1, Peter Gerjets2, Wolfgang
+Wagner3, Ulrich Trautwein3, and Enkelejda Kasneci1
+University of T¨ubingen, T¨ubingen, Germany
+Leibniz-Institut f¨ur Wissensmedien, T¨ubingen, Germany
+Hector Research Institute of Education Sciences and Psychology, T¨ubingen,
+Germany"
+58fa85ed57e661df93ca4cdb27d210afe5d2cdcd,Facial expression recognition by re-ranking with global and local generic features,"Cancún Center, Cancún, México, December 4-8, 2016
+978-1-5090-4847-2/16/$31.00 ©2016 IEEE"
+58888b30e9123c1b1709be1efa92898e090d7bd2,Person Re-Identification by Discriminative Selection in Video Ranking,"Person Re-Identification by Discriminative
+Selection in Video Ranking
+Taiqing Wang, Shaogang Gong, Xiatian Zhu, and Shengjin Wang"
+5860cf0f24f2ec3f8cbc39292976eed52ba2eafd,COMPUTATION EvaBio: A TOOL FOR PERFORMANCE EVALUATION IN BIOMETRICS,"International Journal of Automated Identification Technology, 3(2), July-December 2011, pp. 51-60
+COMPUTATION EvaBio: A TOOL FOR PERFORMANCE
+EVALUATION IN BIOMETRICS
+Julien Mahier, Baptiste Hemery, Mohamad El-Abed*, Mohamed T. El-Allam, Mohamed Y.
+Bouhaddaoui and Christophe Rosenberger
+GREYC Laboratory, ENSICAEN - University of Caen Basse Normandie - CNRS,
+6 Boulevard Maréchal Juin, 14000 Caen Cedex - France"
+5882e62866fe1fcf7f8458e0bd0bcb39057afce3,Attention to Head Locations for Crowd Counting,"Attention to Head Locations for Crowd Counting
+Youmei Zhang, Chunluan Zhou, Faliang Chang, and Alex C. Kot, Fellow Member, IEEE"
+5872a8ae1879c3f20d94e7cc5a4fcef47b654c7e,Sparse Matching of Salient Facial Curves for Recognizing 3 D Faces,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2015): 78.96 | Impact Factor (2015): 6.391
+Sparse Matching of Salient Facial Curves for
+Recognizing 3D Faces
+Madhura Patil1, L. J. Sankpal2
+Pune University, Sinhgad Academy of Engineering, Kondhwa, Pune 411048, India
+Professor, Pune University, Sinhgad Academy of Engineering, Kondhwa, Pune 411048
+cknowledgment
+unique mark
+cknowledgment.
+increase acquisition commotion
+furthermore"
+589b30ebdb76659ce5d3a19cd9fa0e7a3466d85d,Very Low Resolution Face Recognition Problem,"Very Low Resolution Face Recognition Problem
+Wilman ZOU
+Pong C. Yuen"
+58bf72750a8f5100e0c01e55fd1b959b31e7dbce,PyramidBox: A Context-assisted Single Shot Face Detector,"PyramidBox: A Context-assisted Single Shot
+Face Detector.
+Xu Tang∗, Daniel K. Du∗, Zeqiang He, and Jingtuo Liu†
+Baidu Inc."
+58542eeef9317ffab9b155579256d11efb4610f2,"Face Recognition Revisited On Pose , Alignment , Color , Illumination And Expression-Pyten","International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+Face Recognition Revisited on Pose, Alignment,
+Color, Illumination and Expression-PyTen
+Mugdha Tripathi
+Computer Science, BIT Noida, India"
+58823377757e7dc92f3b70a973be697651089756,Automatic facial expression analysis,"Technical Report
+UCAM-CL-TR-861
+ISSN 1476-2986
+Number 861
+Computer Laboratory
+Automatic facial expression analysis
+Tadas Baltrusaitis
+October 2014
+5 JJ Thomson Avenue
+Cambridge CB3 0FD
+United Kingdom
+phone +44 1223 763500
+http://www.cl.cam.ac.uk/"
+58f7b9ebdb9b380cdfbef12b8abefceee0160a58,Public Document Document Evolution Executive Summary,"Project N° IST-2002-507634 - BioSecure
+D7.2.2 – Revision: b3
+Contract Number:
+Project Acronym:
+Project Title:
+Instrument:
+Start Date of Project:
+Duration:
+Deliverable Number:
+Title of Deliverable:
+8 April 2005
+IST-2002-507634
+BioSecure
+Biometrics for Secure Authentication
+Network of Excellence
+01 June, 2004
+6 months
+D7.2.2
+Report on the face state of the art
+Contractual Due Date:"
+5865e824e3d8560e07840dd5f75cfe9bf68f9d96,Embodied conversational agents for multimodal automated social skills training in people with autism spectrum disorders,"RESEARCH ARTICLE
+Embodied conversational agents for
+multimodal automated social skills training in
+people with autism spectrum disorders
+Hiroki Tanaka1*, Hideki Negoro2, Hidemi Iwasaka3, Satoshi Nakamura1
+Graduate School of Information Science, Nara Institute of Science and Technology, Ikoma-shi, Nara, 630-
+0101, Japan, 2 Center for Special Needs Education, Nara University of Education, Nara-shi, Nara, 630-8538,
+Japan, 3 Developmental Center for Child and Adult, Shigisan Hospital, Ikoma-gun, Nara, 636-0815, Japan"
+58bb77dff5f6ee0fb5ab7f5079a5e788276184cc,Facial expression recognition with PCA and LBP features extracting from active facial patches,"Facial Expression Recognition with PCA and LBP
+Features Extracting from Active Facial Patches
+Yanpeng Liua, Yuwen Caoa, Yibin Lia, Ming Liu, Rui Songa
+Yafang Wang, Zhigang Xu , Xin Maa†"
+585efe3c8efd1a4fa2ed8221c278997521668bc1,Recognizing Face Images with Disguise Variations,
+58db008b204d0c3c6744f280e8367b4057173259,Facial Expression Recognition,"International Journal of Current Engineering and Technology
+ISSN 2277 - 4106
+© 2012 INPRESSCO. All Rights Reserved.
+Available at http://inpressco.com/category/ijcet
+Research Article
+Facial Expression Recognition
+Riti Kushwahaa and Neeta Naina*
+Department of Computer Engineering Malaviya National Institute of Technology, Jaipur, Rajasthan, India
+Accepted 3June 2012, Available online 8 June 2012"
+677585ccf8619ec2330b7f2d2b589a37146ffad7,A flexible model for training action localization with varying levels of supervision,"A flexible model for training action localization
+with varying levels of supervision
+Guilhem Chéron∗ 1 2
+Jean-Baptiste Alayrac∗ 1
+Ivan Laptev1
+Cordelia Schmid2"
+67a6bd37e91f2c334b1092fd9e9b16be93f82377,Data Driven Visual Recognition,"Data Driven Visual Recognition
+OMID AGHAZADEH
+Doctoral Thesis
+Stockholm, Sweden, 2014"
+6720edcea05b31a9b9a6db98ee71e8ed31efdc38,Practices in source code sharing in astrophysics,"Practices
+source
+sharing
+astrophysics
+Shamir1,
+Wallin2,
+Alice
+Allen3,
+Bruce
+Berriman4,
+Peter
+Teuben5,
+Robert
+Nemiroff6,
+Jessica
+Mink7,
+Robert
+Hanisch8,
+Kimberly
+DuPrie3"
+6768b558cc58e113096540c123ef3b2c2d2469a1,Maximum Margin Linear Classifiers in Unions of Subspaces,"LYU, ZEPEDA, PÉREZ: US-SVM
+Maximum Margin Linear Classifiers in
+Unions of Subspaces
+Xinrui Lyu1,2
+Joaquin Zepeda1
+Patrick Pérez1
+Technicolor
+5576, Cesson-Sevigne, France
+École Polytechnique Fédérale de
+Lausanne (EPFL)
+CH-1015, Lausanne, Switzerland"
+67bf0b6bc7d09b0fe7a97469f786e26f359910ef,Abnormal use of facial information in high-functioning autism.,"J Autism Dev Disord
+DOI 10.1007/s10803-006-0232-9
+O R I G I N A L P A P E R
+Abnormal Use of Facial Information in High-Functioning
+Autism
+Michael L. Spezio Æ Ralph Adolphs Æ
+Robert S. E. Hurley Æ Joseph Piven
+Ó Springer Science+Business Media, LLC 2006"
+6789bddbabf234f31df992a3356b36a47451efc7,Unsupervised Generation of Free-Form and Parameterized Avatars.,"Unsupervised Generation of Free-Form and
+Parameterized Avatars
+Adam Polyak, Yaniv Taigman, and Lior Wolf, Member, IEEE"
+6733adb12458678c606759233f6f55782bace372,Photogenic Facial Expression Discrimination,"PHOTOGENIC FACIAL EXPRESSION DISCRIMINATION
+Luana Bezerra Batista and Herman Martins Gomes
+Departamento de Sistemas e Computação
+João Marques de Carvalho
+Departamento de Engenharia Elétrica
+Universidade Federal de Campina Grande
+Campina Grande, Paraíba, Brasil, 58.109-970
+Keywords:
+Facial Expression Recognition, Photogeny, Principal Component Analysis, Multi-Layer Perceptron."
+67490b6f34c827f107b046adeef0f5476132d4f8,"How good are detection proposals, really?","J. HOSANG ET AL.: HOW GOOD ARE DETECTION PROPOSALS, REALLY?
+How good are detection proposals, really?
+Jan Hosang
+http://mpi-inf.mpg.de/~jhosang
+Rodrigo Benenson
+http://mpi-inf.mpg.de/~benenson
+Bernt Schiele
+http://mpi-inf.mpg.de/~schiele
+MPI Informatics
+Saarbrücken, Germany"
+674fcadf1b895e3a79380d3ac5afb43d406fd31a,Facial Asymmetry Assessment from 3D Shape Sequences: The Clinical Case of Facial Paralysis,
+675b2caee111cb6aa7404b4d6aa371314bf0e647,AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions,"AVA: A Video Dataset of Spatio-temporally Localized Atomic Visual Actions
+Chunhui Gu∗
+Yeqing Li∗
+Chen Sun∗
+David A. Ross∗
+Sudheendra Vijayanarasimhan∗
+Carl Vondrick∗
+George Toderici∗
+Caroline Pantofaru∗
+Susanna Ricco∗
+Rahul Sukthankar∗
+Cordelia Schmid† ∗
+Jitendra Malik‡ ∗"
+67dca0d4b87ab2a4f18b5a1ef76f6ba17b599245,Top-Down Regularization of Deep Belief Networks,"Top-Down Regularization of Deep Belief Networks
+Hanlin Goh∗, Nicolas Thome, Matthieu Cord
+Laboratoire d’Informatique de Paris 6
+UPMC – Sorbonne Universit´es, Paris, France
+Joo-Hwee Lim†
+Institute for Infocomm Research
+A*STAR, Singapore"
+67a56dd94906a5460c263e1a1b87fa3a52c4b453,Face Analysis by Local Directional Number Pattern,"International Journal of Engineering Research and General Science Volume 3, Issue 1, January-February, 2015
+ISSN 2091-2730
+FACE ANALYSIS BY LOCAL DIRECTIONAL NUMBER PATTERN
+Manjunatha S B, Guruprasad A M, Vineesh P
+Coorg Institute of Technology, Ponnampet, Coorg-District, Karnataka, 9611962024"
+67f88f37e4853b870debef2bd29b257b5b19f255,EgoSampling: Wide View Hyperlapse from Single and Multiple Egocentric Videos,"EgoSampling: Wide View Hyperlapse from
+Single and Multiple Egocentric Videos
+Tavi Halperin Yair Poleg Chetan Arora Shmuel Peleg"
+67484723e0c2cbeb936b2e863710385bdc7d5368,Anchor Cascade for Efficient Face Detection,"Anchor Cascade for Efficient Face Detection
+Baosheng Yu and Dacheng Tao, Fellow, IEEE"
+678b367b2d5250f278c994238bbf816098252d9d,IrisDenseNet: Robust Iris Segmentation Using Densely Connected Fully Convolutional Networks in the Images by Visible Light and Near-Infrared Light Camera Sensors,"Article
+IrisDenseNet: Robust Iris Segmentation Using
+Densely Connected Fully Convolutional Networks in
+the Images by Visible Light and Near-Infrared Light
+Camera Sensors
+Muhammad Arsalan, Rizwan Ali Naqvi, Dong Seop Kim, Phong Ha Nguyen, Muhammad Owais
+nd Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (M.A.); (R.A.N.);
+(D.S.K.); (P.H.N.); (M.O.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 2 April 2018; Accepted: 8 May 2018; Published: 10 May 2018"
+670637d0303a863c1548d5b19f705860a23e285c,Face swapping: automatically replacing faces in photographs,"Face Swapping: Automatically Replacing Faces in Photographs
+Dmitri Bitouk
+Neeraj Kumar
+Samreen Dhillon∗
+Columbia University†
+Peter Belhumeur
+Shree K. Nayar
+Figure 1: We have developed a system that automatically replaces faces in an input image with ones selected from a large collection of
+face images, obtained by applying face detection to publicly available photographs on the internet. In this example, the faces of (a) two
+people are shown after (b) automatic replacement with the top three ranked candidates. Our system for face replacement can be used for face
+de-identification, personalized face replacement, and creating an appealing group photograph from a set of “burst” mode images. Original
+images in (a) used with permission from Retna Ltd. (top) and Getty Images Inc. (bottom).
+Rendering, Computational Photography
+Introduction
+Advances in digital photography have made it possible to cap-
+ture large collections of high-resolution images and share them
+on the internet. While the size and availability of these col-
+lections is leading to many exciting new applications,
+lso creating new problems. One of the most
+important of"
+6742c0a26315d7354ab6b1fa62a5fffaea06da14,What does 2D geometric information really tell us about 3D face shape?,"BAS AND SMITH: WHAT DOES 2D GEOMETRIC INFORMATION REALLY TELL US ABOUT 3D FACE SHAPE?
+What does 2D geometric information
+really tell us about 3D face shape?
+Anil Bas and William A. P. Smith, Member, IEEE"
+6775c818b26263c885b0ce85c224dfd942c9652e,Pedestrian and Object Detection Using Learned Convolutional Filters,"U.P.B. Sci. Bull., Series C, Vol. 77, Iss. 2, 2015
+ISSN 2286-3540
+PEDESTRIAN AND OBJECT DETECTION USING LEARNED
+CONVOLUTIONAL FILTERS
+Anamaria R ˘ADOI1 , Dan Alexandru STOICHESCU2
+Object detection is still a very active field in Computer Vision. Until now, part
+ased models proved to be one of the most interesting and successful approaches
+in object and pedestrian detection. The method applies a machine learning ap-
+proach not to the input images themselves, but to histograms of gradients. How-
+ever, its performances are still limited when compared to what humans can do.
+The purpose of the present paper is to show that sparse representations can be
+successfully used in object detection. The main advantage of using this method is
+related to the possibility of learning only those filters that are able to express the
+most frequent patterns that appear in the analyzed images. The experiments are
+arried out on two widely used datasets, namely VOC2007 and INRIA Person.
+Keywords: learned filterbanks, stochastic gradient descent, pedestrian detection,
+object detection, Histogram of Oriented Gradients.
+. Introduction
+Object detection is a major challenge for many areas of research, starting
+from medicine and going to applications such as street surveillance or video appli-"
+67bee729d046662c6ebd9d3d695823c9d820343a,Generating Factoid Questions With Recurrent Neural Networks: The 30M Factoid Question-Answer Corpus,"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 588–598,
+Berlin, Germany, August 7-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+67c703a864aab47eba80b94d1935e6d244e00bcb,Face Retrieval Based On Local Binary Pattern and Its Variants: A Comprehensive Study,"(IJACSA) International Journal of Advanced Computer Science and Applications
+Vol. 7, No. 6, 2016
+Face Retrieval Based On Local Binary Pattern and Its
+Variants: A Comprehensive Study
+Department of Computer Vision and Robotics, University of Science, VNU-HCM, Viet Nam
+Phan Khoi, Lam Huu Thien, Vo Hoai Viet
+face searching,"
+6752b59da83c03e64c73f9248a67304713b6efa9,Chapter 3 Re - identification by Covariance Descriptors,"Chapter 3
+Re-identification by Covariance Descriptors
+Sławomir B ˛ak and François Brémond"
+67c30688bd46d305c610a83a0b28e86e10ef5cc4,Ship Detection in Harbour Surveillance based on Large-Scale Data and CNNs,
+67e00f7e928e6eab0faf1917252778b36bf64e39,Sparse radial sampling LBP for writer identification,"Sparse Radial Sampling LBP for Writer
+Identification
+Anguelos Nicolaou∗, Andrew D. Bagdanov∗, Marcus Liwicki†, and Dimosthenis Karatzas∗
+Computer Vision Center, Edifici O, Universitad Autonoma de Barcelona,Bellaterra, Spain
+DIVA research group, Department of Informatics, University of Fribourg, Switzerland
+Email:"
+6737a429dd615a0d9ac78d836c6b65bfd9ec36e8,Image Classification by Transfer Learning Based on the Predictive Ability of Each Attribute,"Image Classification by Transfer Learning Based
+on the Predictive Ability of Each Attribute
+Masahiro Suzuki, Haruhiko Sato, Satoshi Oyama, and Masahito Kurihara"
+6757254d27b761ada5dbd88642bd0112fcb962cf,Gait Recognition Using Wearable Motion Recording Sensors,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 415817, 16 pages
+doi:10.1155/2009/415817
+Research Article
+Gait Recognition Using Wearable Motion Recording Sensors
+Davrondzhon Gafurov and Einar Snekkenes
+Norwegian Information Security Laboratory, Gjøvik University College, P.O. Box 191, 2802 Gjøvik, Norway
+Correspondence should be addressed to Davrondzhon Gafurov,
+Received 1 October 2008; Revised 26 January 2009; Accepted 26 April 2009
+Recommended by Natalia A. Schmid
+This paper presents an alternative approach, where gait is collected by the sensors attached to the person’s body. Such wearable
+sensors record motion (e.g. acceleration) of the body parts during walking. The recorded motion signals are then investigated for
+person recognition purposes. We analyzed acceleration signals from the foot, hip, pocket and arm. Applying various methods,
+the best EER obtained for foot-, pocket-, arm- and hip- based user authentication were 5%, 7%, 10% and 13%, respectively.
+Furthermore, we present the results of our analysis on security assessment of gait. Studying gait-based user authentication (in case
+of hip motion) under three attack scenarios, we revealed that a minimal effort mimicking does not help to improve the acceptance
+hances of impostors. However, impostors who know their closest person in the database or the genders of the users can be a
+threat to gait-based authentication. We also provide some new insights toward the uniqueness of gait in case of foot motion. In
+particular, we revealed the following: a sideway motion of the foot provides the most discrimination, compared to an up-down or"
+67fd4f209aa6e8359fc86bdc12c62bbdb0529077,Scalable Nearest Neighbor Algorithms for High Dimensional Data,"Scalable Nearest Neighbor Algorithms
+for High Dimensional Data
+Marius Muja, Member, IEEE and David G. Lowe, Member, IEEE"
+67ba3524e135c1375c74fe53ebb03684754aae56,A compact pairwise trajectory representation for action recognition,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+679136c2844eeddca34e98e483aca1ff6ef5e902,Scene-Specific Pedestrian Detection Based on Parallel Vision,"Scene-Specific Pedestrian Detection Based on
+Parallel Vision
+Wenwen Zhang, Kunfeng Wang, Member, IEEE, Hua Qu, Jihong Zhao, and Fei-Yue Wang, Fellow, IEEE"
+676c76c4e3ac2f91a2209ecdae8d20be4de7c9c0,Performance of Gabor mean Feature Extraction Techniques for Ear Biometrics Recognition System,"International Journal of Computer Applications (0975 – 8887)
+Volume 168 – No.12, June 2017
+Performance of Gabor mean Feature Extraction
+Techniques for Ear Biometrics Recognition System
+Bhanu Vadhwani
+Rajasthan College of Engg.
+for Women, India
+Vineet Khanna
+JaipuRajasthan College of
+Engg. for Women
+Shubhlakshmi Agarwal
+The ICFAI University, Jaipur, India
+Sandeep Kumar Gupta
+Machine Learning Research
+Lab, Jaipur, India"
+67751b7ce7f934ffadcf095f4189b31f890e9fdc,Pilot Comparative Study of Different Deep Features for Palmprint Identification in Low-Quality Images,"Ninth Hungarian Conference on Computer Graphics and Geometry, Budapest, 2018
+Pilot Comparative Study of Different Deep Features
+for Palmprint Identification in Low-Quality Images
+A.S. Tarawneh1, D. Chetverikov1,2 and A.B. Hassanat3
+Eötvös Loránd University, Budapest, Hungary
+Institute for Computer Science and Control, Budapest, Hungary
+Mutah University, Karak, Jordan"
+6769cfbd85329e4815bb1332b118b01119975a95,Tied factor analysis for face recognition across large pose changes,"Tied factor analysis for face recognition across
+large pose changes"
+0b4189d874ee67f259a1a366ac93740d500064a5,Single-Shot Multi-person 3D Pose Estimation from Monocular RGB,"Single-Shot Multi-Person 3D Pose Estimation From Monocular RGB
+Dushyant Mehta[1,2], Oleksandr Sotnychenko[1,2], Franziska Mueller[1,2],
+Weipeng Xu[1,2], Srinath Sridhar[3], Gerard Pons-Moll[1,2], Christian Theobalt[1,2]
+[1] MPI For Informatics
+[2] Saarland Informatics Campus
+[3] Stanford University"
+0be43cf4299ce2067a0435798ef4ca2fbd255901,Title A temporal latent topic model for facial expression recognition,"Title
+A temporal latent topic model for facial expression recognition
+Author(s)
+Shang, L; Chan, KP
+Citation
+The 10th Asian Conference on Computer Vision (ACCV 2010),
+Queenstown, New Zealand, 8-12 November 2010. In Lecture
+Notes in Computer Science, 2010, v. 6495, p. 51-63
+Issued Date
+http://hdl.handle.net/10722/142604
+Rights
+Creative Commons: Attribution 3.0 Hong Kong License"
+0b6bd0a6f396e1479dc30318102bf49c12959783,Face Recognition Using Local Binary Decisions,"Face recognition using local binary decisions
+Author
+James, Alex, Dimitrijev, Sima
+Published
+Journal Title
+https://doi.org/10.1109/LSP.2008.2006339
+Copyright Statement
+© 2008 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing this
+material for advertising or promotional purposes, creating new collective works, for resale or
+redistribution to servers or lists, or reuse of any copyrighted component of this work in other
+works.
+Downloaded from
+http://hdl.handle.net/10072/23556
+Griffith Research Online
+https://research-repository.griffith.edu.au"
+0b2277a0609565c30a8ee3e7e193ce7f79ab48b0,Cost-Sensitive Semi-Supervised Discriminant Analysis for Face Recognition,"Cost-Sensitive Semi-Supervised Discriminant
+Analysis for Face Recognition
+Jiwen Lu, Member, IEEE, Xiuzhuang Zhou, Member, IEEE, Yap-Peng Tan, Senior Member, IEEE,
+Yuanyuan Shang, Member, IEEE, and Jie Zhou, Senior Member, IEEE"
+0b2c543e0c47454c4512569175094e6cb6ae02a9,The VizWiz Grand Challenge: A Large Visual Question Answering Dataset from Blind People,"#1687
+CVPR 2016 Submission #1687. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+The VizWiz Grand Challenge:
+A Large Visual Question Answering Dataset from Blind People
+Anonymous CVPR submission
+Paper ID 1687"
+0b57eb772ad9129ea4011c7fcb16c57967409018,“A Distorted Skull Lies in the Bottom Center...” Identifying Paintings from Text Descriptions,"Proceedings of 2016 NAACL Human-Computer Question Answering Workshop, pages 43–47,
+San Diego, California, June 12-17, 2016. c(cid:13)2016 Association for Computational Linguistics"
+0b0b0d9b15613a6e3c4f9a4dd1c17c0313ca4303,Evaluation of 3D Face Recognition in the presence of facial expressions: an Annotated Deformable Model approach,"D face recognition
+in the presence of facial expressions:
+An annotated deformable model approach
+I.A. Kakadiaris, Member, IEEE, G. Passalis, G. Toderici, N. Murtuza, Y. Lu,
+N. Karampatziakis, and T. Theoharis
+August 15, 2006
+DRAFT"
+0b9ce839b3c77762fff947e60a0eb7ebbf261e84,Logarithmic Fourier Pca: a New Approach to Face Recognition,"Proceedings of the IASTED International Conference
+Computer Vision (CV 2011)
+June 1 - 3, 2011 Vancouver, BC, Canada
+LOGARITHMIC FOURIER PCA: A NEW APPROACH TO FACE
+RECOGNITION
+Lakshmiprabha Nattamai Sekar,
+Jhilik Bhattacharya,
+omjyoti
+Majumder
+Surface Robotics Lab
+Central Mechanical Engineering Research Institute
+Mahatma Gandhi Avenue,
+Durgapur - 713209, West Bengal, India.
+email: 1 n prabha 2 3"
+0bcd89b356dc78aaf3573086f13e94b8e7b5bee6,Comparative Testing of Face Detection Algorithms,"Comparative Testing of Face Detection
+Algorithms⋆
+Nikolay Degtyarev and Oleg Seredin
+Tula State University
+http://lda.tsu.tula.ru"
+0bf26d2fd1b375f50c0a6bef086f09f7698c3156,Predicting Entry-Level Categories,"Noname manuscript No.
+(will be inserted by the editor)
+Predicting Entry-Level Categories
+Vicente Ordonez · Wei Liu · Jia Deng · Yejin Choi ·
+Alexander C. Berg · Tamara L. Berg
+Received: date / Accepted: date"
+0b278c9dc9b16b46ed602eab884ad7a37a988031,Robust Face-Name Graph Matching for Movie Character Identification,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2015): 78.96 | Impact Factor (2015): 6.391
+Robust Face-Name Graph Matching for Movie
+Character Identification
+Jonnadula Narasimha1, S Nishanth Kumar2, Chiluka Shiva Kumar3, D Vamshi Krishna Rao4
+Associate Professor, Department of Computer Science and Engineering, CMR Technical Campus,
+Medchal, Hyderabad, Telangana, India
+, 3, 4Department of Computer Science and Engineering, CMR Technical Campus, Medchal, Hyderabad, Telangana, India"
+0b6a5200c33434cbfa9bf24ba482f6e06bf5fff7,"The use of deep learning in image segmentation, classification and detection","The Use of Deep Learning in Image
+Segmentation, Classification and Detection
+Mihai-Sorin Badea, Iulian-Ionuț Felea, Laura Maria Florea, Constantin Vertan
+The Image Processing and Analysis Lab (LAPI), Politehnica University of Bucharest, Romania"
+0b605b40d4fef23baa5d21ead11f522d7af1df06,Label-Embedding for Attribute-Based Classification,"Label-Embedding for Attribute-Based Classification
+Zeynep Akataa,b, Florent Perronnina, Zaid Harchaouib and Cordelia Schmidb
+Computer Vision Group∗, XRCE, France
+LEAR†, INRIA, France"
+0b61cad6ae6e7ab99f2e3c187bd8530da71f10ae,Gameplay Genre Video Classification by Using Mid-Level Video Representation,"Gameplay genre video classification by using
+mid-level video representation
+Renato Augusto de Souza‡, Raquel Pereira de Almeida‡, Arghir-Nicolae Moldovan∗,
+Zenilton Kleber G. do Patrocínio Jr.‡, Silvio Jamil F. Guimarães‡
+Audio-Visual Information Proc. Lab. (VIPLAB)
+Computer Science Department – ICEI – PUC Minas
+School of Computing, National College of Ireland, Dublin, Ireland
+named GameGenre, consists of 700 videos (more than 116
+hours), classified into 7 game genres."
+0b0535fbdc468d1fd6ff32545a717a8af14f634f,The Discriminative Generalized Hough Transform as a Proposal Generator for a Deep Network in Automatic Pedestrian Localization,
+0b0eb562d7341231c3f82a65cf51943194add0bb,Line with Your Paper Identification Number ( Double - Click Here to Edit,"> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+Facial Image Analysis Based on Local Binary
+Patterns: A Survey
+Di Huang, Caifeng Shan, Mohsen Ardebilian, Liming Chen"
+0b3a146c474166bba71e645452b3a8276ac05998,Whos In the Picture,"Who’s in the Picture?
+Tamara L. Berg, Alexander C. Berg, Jaety Edwards and D.A. Forsyth
+Berkeley, CA 94720
+Computer Science Division
+U.C. Berkeley"
+0b937abb3b356a2932d804f9fc4b463485f63d0e,Visual word disambiguation by semantic contexts,"Visual word disambiguation by semantic contexts
+Yu Su, Frédéric Jurie
+To cite this version:
+Yu Su, Frédéric Jurie. Visual word disambiguation by semantic contexts. IEEE Intenational Confer-
+ence on Computer Vision (ICCV), 2011, Spain. pp.311-318, 2011, <10.1109/ICCV.2011.6126257>.
+<hal-00808655>
+HAL Id: hal-00808655
+https://hal.archives-ouvertes.fr/hal-00808655
+Submitted on 5 Apr 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+0b6f810f287561ff694a9406c7b319fd8549ca68,Face Recognition Based on Texture Features using Local Ternary Patterns,"I.J. Image, Graphics and Signal Processing, 2015, 10, 37-46
+Published Online September 2015 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijigsp.2015.10.05
+Face Recognition Based on Texture Features
+using Local Ternary Patterns
+Associate Professor, Dept. of CSE, BVRIT Hyderabad College of Engineering for Women, Hyderabad, T.S., India.
+K. Srinivasa Reddy
+Director-CACR, Dean-Computer Sciences (CSE & IT), Anurag Group of Institutions, Hyderabad, T.S., India.
+Email:
+V. Vijaya Kumar
+Email:
+B. Eswara Reddy
+Professor, Dept. of CSE, JNTUA, Ananthapuram, A.P., India.
+Email:"
+0bb574ad77f55f395450b4a9f863ecfdd4880bcd,Learning the Base Distribution in Implicit Generative Models,"Learning the Base Distribution in Implicit Generative Models
+Y. Cem Subakan(cid:91), Oluwasanmi Koyejo(cid:91), Paris Smaragdis(cid:91),(cid:93)
+(cid:91)UIUC, (cid:93)Adobe Inc."
+0b0958493e43ca9c131315bcfb9a171d52ecbb8a,A Unified Neural Based Model for Structured Output Problems,"A Unified Neural Based Model for Structured Output Problems
+Soufiane Belharbi∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien Adam∗2
+LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+April 13, 2015"
+0b20f75dbb0823766d8c7b04030670ef7147ccdd,Feature selection using nearest attributes,"Feature selection using nearest attributes
+Alex Pappachen James, Member, IEEE, and Sima Dimitrijev, Senior Member, IEEE"
+0b5a82f8c0ee3640503ba24ef73e672d93aeebbf,On Learning 3D Face Morphable Model from In-the-wild Images,"On Learning 3D Face Morphable Model
+from In-the-wild Images
+Luan Tran, and Xiaoming Liu, Member, IEEE"
+0b174d4a67805b8796bfe86cd69a967d357ba9b6,A Survey on Face Detection and Recognition Approaches,"Research Journal of Recent Sciences _________________________________________________ ISSN 2277-2502
+Vol. 3(4), 56-62, April (2014)
+Res.J.Recent Sci."
+0ba6f4fb548d8289fb42d68ac64d55f9e3a274ca,Auto-Context and Its Application to High-Level Vision Tasks and 3D Brain Image Segmentation,"Auto-context and Its Application to High-level Vision Tasks
+nd 3D Brain Image Segmentation
+Lab of Neuro Imaging, University of California, Los Angeles
+Zhuowen Tu and Xiang Bai
+July 9, 2009"
+0b87d91fbda61cdea79a4b4dcdcb6d579f063884,Research on Theory and Method for Facial Expression Recognition Sys- tem Based on Dynamic Image Sequence,"The Open Automation and Control Systems Journal, 2015, 7, 569-579
+Open Access
+Research on Theory and Method for Facial Expression Recognition Sys-
+tem Based on Dynamic Image Sequence
+Send Orders for Reprints to
+Yang Xinfeng1,* and Jiang Shan2
+School of Computer & Information Engineering, Nanyang Institute of Technology, Henan, Nanyang, 473000, P.R.
+China
+Henan University of Traditional Chinese Medicine, Henan, Zhengzhou, 450000, P.R. China"
+0b24cca96ca61248a3fa3973525a967f94292835,Two Novel Face Recognition Approaches,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+0b70facac4d10c7c73e7fdf3a85848ce429d98ab,"Segmentation features, visibility modeling and shared parts for object detection","Segmentation Features, Visibility Modeling and
+Shared Parts for Object Detection
+Patrick Ott
+Submitted in accordance with the requirements
+for the degree of Doctor of Philosophy.
+The University of Leeds
+School of Computing
+February 2012
+The candidate confirms that the work submitted is his own and that the appropriate
+redit has been given where reference has been made to the work of others.
+This copy has been supplied on the understanding that it is copyright material
+nd that no quotation from the thesis may be published without proper
+cknowledgment."
+0b79356e58a0df1d0efcf428d0c7c4651afa140d,Bayesian Modeling of Facial Similarity,"Appears In: Advances in Neural Information Processing Systems , MIT Press,  .
+Bayesian Modeling of Facial Similarity
+Baback Moghaddam
+Mitsubishi Electric Research Laboratory
+
+Cambridge, MA
+Tony Jebara and Alex Pentland
+Massachusettes Institute of Technology
+
+Cambridge, MA
+0b572a2b7052b15c8599dbb17d59ff4f02838ff7,Automatic Subspace Learning via Principal Coefficients Embedding,"Automatic Subspace Learning via Principal
+Coefficients Embedding
+Xi Peng, Jiwen Lu, Senior Member, IEEE, Zhang Yi, Fellow, IEEE and Rui Yan, Member, IEEE,"
+0bc9f1749e23b37ea5b5588c5bfe23879174d343,Pythia v0.1: the Winning Entry to the VQA Challenge 2018,"Pythia v0.1: the Winning Entry to the VQA Challenge 2018
+Yu Jiang∗, Vivek Natarajan∗, Xinlei Chen∗, Marcus Rohrbach, Dhruv Batra, Devi Parikh
+Facebook AI Research"
+0b888196dda951287dddb60bd44798aab16d6fca,Learning Common Sense through Visual Abstraction,
+0ba544ff0d837ba5279b03eb91246d00f2c78817,Direct Prediction of 3D Body Poses from Motion Compensated Sequences,"Direct Prediction of 3D Body Poses from Motion Compensated Sequences
+Bugra Tekin1
+Artem Rozantsev1
+Vincent Lepetit1,2
+Pascal Fua1
+CVLab, EPFL, Lausanne, Switzerland,
+TU Graz, Graz, Austria,"
+0bc7d8e269a8c8018a7cb120ff25adf02d45c7ed,Exploiting Dissimilarity Representations for Person Re-identification,"Exploiting Dissimilarity Representations for
+Person Re-Identification
+Riccardo Satta, Giorgio Fumera, and Fabio Roli
+Dept. of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123 Cagliari, Italy"
+0b02bfa5f3a238716a83aebceb0e75d22c549975,Learning Probabilistic Models for Recognizing Faces under Pose Variations,"Learning Probabilistic Models for Recognizing Faces
+under Pose Variations
+M. Saquib Sarfraz and Olaf Hellwich
+Computer vision and Remote Sensing, Berlin university of Technology
+Sekr. FR-3-1, Franklinstr. 28/29, Berlin, Germany"
+0beaf17d42b1171dd245131825d2de67000f45ac,Expert Gate: Lifelong Learning with a Network of Experts,"Expert Gate: Lifelong Learning with a Network of Experts
+Rahaf Aljundi
+Punarjay Chakravarty
+Tinne Tuytelaars
+KU Leuven, ESAT-PSI, iMinds, Belgium
+{rahaf.aljundi, Punarjay.Chakravarty,"
+0bce54bfbd8119c73eb431559fc6ffbba741e6aa,Recurrent Neural Networks,"Published as a conference paper at ICLR 2018
+SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+RECURRENT NEURAL NETWORKS
+V´ıctor Campos∗†, Brendan Jou‡, Xavier Gir´o-i-Nieto§, Jordi Torres†, Shih-Fu ChangΓ
+Barcelona Supercomputing Center, ‡Google Inc,
+§Universitat Polit`ecnica de Catalunya, ΓColumbia University
+{victor.campos,"
+0b19177107a102ee81e5ef1bb9fb2f2881441503,Comparing Robustness of Pairwise and Multiclass Neural-Network Systems for Face Recognition,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2008, Article ID 468693, 7 pages
+doi:10.1155/2008/468693
+Research Article
+Comparing Robustness of Pairwise and Multiclass
+Neural-Network Systems for Face Recognition
+J. Uglov, L. Jakaite, V. Schetinin, and C. Maple
+Computing and Information System Department, University of Bedfordshire, Luton LU1 3JU, UK
+Correspondence should be addressed to V. Schetinin,
+Received 16 June 2007; Revised 28 August 2007; Accepted 19 November 2007
+Recommended by Konstantinos N. Plataniotis
+Noise, corruptions, and variations in face images can seriously hurt the performance of face-recognition systems. To make these
+systems robust to noise and corruptions in image data, multiclass neural networks capable of learning from noisy data have been
+suggested. However on large face datasets such systems cannot provide the robustness at a high level. In this paper, we explore a
+pairwise neural-network system as an alternative approach to improve the robustness of face recognition. In our experiments, the
+pairwise recognition system is shown to outperform the multiclass-recognition system in terms of the predictive accuracy on the
+test face images.
+Copyright © 2008 J. Uglov et al. This is an open access article distributed under the Creative Commons Attribution License, which
+permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited."
+0b1cf351a4a6758606bea32d29c7d529e79ab7ce,Fake Face Detection System Using Pupil Reflection 양재준,"한국지능시스템학회 논문지 2010, Vol. 20, No. 5, pp. 645-651
+동공의 반사특징을 이용한 얼굴위조판별 시스템
+Fake Face Detection System Using Pupil Reflection
+양재준*․조성원*․정선태**
+JaeJun Yang, Seongwon Cho and Sun-Tae Chung
+* 홍익대학교 전기정보제어공학과
+**숭실대학교 정보통신전자공학부
+최근 지능형 범죄가 늘면서 첨단 보안 기술에 대한 요구가 점차 늘어나고 있다. 현재까지 보고된 위조영상검출방법은 실용
+화를 위하여 정확도 개선이 요구된다. 본 논문에서는 사람의 얼굴에 대하여 동공의 반사광을 이용한 얼굴위조판별 시스템
+을 제안한다. 제안된 시스템은 먼저 다중 스케일 가버특징 벡터를 기반으로 눈의 위치를 찾은 후 2단계의 템플릿 매칭을
+통해서 설정된 적용범위를 벗어나는 눈에 대하여 위조판별을 고려하지 않음으로써 정확도를 높이는 방법을 사용한다. 신뢰
+도가 확보된 눈의 위치를 기반으로 적외선 조명에 반사되는 동공의 특징을 이용하여 눈위치 근처에서의
+화소값을 계산하
+여 위조 여부를 판단한다. 실험을 통하여 본 논문에서 제안한 방법이 더욱 신뢰성 높은 위조판별시스템임을 확인하였다.
+키워드 : 변조영상 검출, 얼굴 검출, EBGM, 템플릿 매칭, 얼굴 식별"
+0b8ef6f5ec5dfc3eded5241fd3d636a596b94d26,Stereological analysis of amygdala neuron number in autism.,"7674 • The Journal of Neuroscience, July 19, 2006 • 26(29):7674 –7679
+Neurobiology of Disease
+Stereological Analysis of Amygdala Neuron Number
+in Autism
+Cynthia Mills Schumann and David G. Amaral
+Department of Psychiatry and Behavioral Sciences and The M.I.N.D. Institute, University of California, Davis, Sacramento, California 95817
+The amygdala is one of several brain regions suspected to be pathological in autism. Previously, we found that young children with autism
+have a larger amygdala than typically developing children. Past qualitative observations of the autistic brain suggest increased cell density
+in some nuclei of the postmortem autistic amygdala. In this first, quantitative stereological study of the autistic brain, we counted and
+measured neurons in several amygdala subdivisions of 9 autism male brains and 10 age-matched male control brains. Cases with
+omorbid seizure disorder were excluded from the study. The amygdaloid complex was outlined on coronal sections then partitioned into
+five reliably defined subdivisions: (1) lateral nucleus, (2) basal nucleus, (3) accessory basal nucleus, (4) central nucleus, and (5) remaining
+nuclei. There is no difference in overall volume of the amygdala or in individual subdivisions. There are also no changes in cell size.
+However, there are significantly fewer neurons in the autistic amygdala overall and in its lateral nucleus. In conjunction with the findings
+from previous magnetic resonance imaging studies, the autistic amygdala appears to undergo an abnormal pattern of postnatal devel-
+opment that includes early enlargement and ultimately a reduced number of neurons. It will be important to determine in future studies
+whether neuron loss in the amygdala is a consistent characteristic of autism and whether cell loss occurs in other brain regions as well.
+Key words: autism; neuropathology; stereology; neuronal density; medial temporal lobe; neuroanatomy; amygdaloid complex
+Introduction
+Autism is a lifelong neurodevelopmental disorder characterized"
+0bdd8f824fa4d4e770e34268a78dca12fb6a135b,Compact Hash Codes for Efficient Visual Descriptors Retrieval in Large Scale Databases,"Compact Hash Codes for Efficient Visual Descriptors
+Retrieval in Large Scale Databases
+Simone Ercoli, Marco Bertini and Alberto Del Bimbo
+Media Integration and Communication Center, Università degli Studi di Firenze
+Viale Morgagni 65 - 50134 Firenze, Italy"
+0bdfc21178347ed4f137d4c7d0ba14c996c66b6e,Automated X-Ray Object Recognition Using an Efficient Search Algorithm in Multiple Views,"Automated X-ray object recognition using
+n efficient search algorithm in multiple views
+Domingo Mery, Vladimir Riffo, Irene Zuccar, Christian Pieringer
+Department of Computer Science – Pontificia Universidad Cat´olica de Chile
+Av. Vicu˜na Mackenna 4860(143) – Santiago de Chile
+http://dmery.ing.puc.cl"
+0b4c4ea4a133b9eab46b217e22bda4d9d13559e6,MORF: Multi-Objective Random Forests for face characteristic estimation,"MORF: Multi-Objective Random Forests for Face Characteristic Estimation
+Dario Di Fina1
+MICC - University of Florence
+Svebor Karaman1,3
+Andrew D. Bagdanov2
+{dario.difina,
+CVC - Universitat Autonoma de Barcelona
+Alberto Del Bimbo1
+DVMM Lab - Columbia University"
+0b9db62b26b811e8c24eb9edc37901a4b79a897f,Structured Face Hallucination,"Structured Face Hallucination
+Chih-Yuan Yang Sifei Liu Ming-Hsuan Yang
+Electrical Engineering and Computer Science
+University of California at Merced
+{cyang35, sliu32,"
+0b6c10ea6bf8a6c254e00fcc2163c4b6fc0f1c3a,"Anti-Spoofing for Text-Independent Speaker Verification: An Initial Database, Comparison of Countermeasures, and Human Performance","Anti-Spoofing for Text-Independent Speaker Verification: An
+Initial Database, Comparison of Countermeasures, and Human
+Performance
+Citation for published version:
+Wu, Z, De Leon, P, Demiroglu, C, Khodabakhsh, A, King, S, Ling, Z, Saito, D, Stewart, B, Toda, T, Wester,
+M & Yamagishi, J 2016, 'Anti-Spoofing for Text-Independent Speaker Verification: An Initial Database,
+Comparison of Countermeasures, and Human Performance' IEEE/ACM Transactions on Audio, Speech,
+nd Language Processing, vol. 24, no. 4, pp. 768 - 783. DOI: 10.1109/TASLP.2016.2526653
+Digital Object Identifier (DOI):
+0.1109/TASLP.2016.2526653
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy"
+0b4d3e59a0107f0dad22e74054bab1cf1ad9c32e,Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations,"Int J Comput Vis
+DOI 10.1007/s11263-016-0981-7
+Visual Genome: Connecting Language and Vision Using
+Crowdsourced Dense Image Annotations
+· Yuke Zhu1 · Oliver Groth2 · Justin Johnson1 · Kenji Hata1 ·
+Ranjay Krishna1
+Joshua Kravitz1 · Stephanie Chen1 · Yannis Kalantidis3 · Li-Jia Li4 ·
+David A. Shamma5 · Michael S. Bernstein1 · Li Fei-Fei1
+Received: 23 February 2016 / Accepted: 12 September 2016
+© The Author(s) 2017. This article is published with open access at Springerlink.com"
+0b2d49cb2d2de06b022e2c636e337d294171dc22,New features and insights for pedestrian detection,"New Features and Insights for Pedestrian Detection
+Stefan Walk1 Nikodem Majer1 Konrad Schindler1 Bernt Schiele1,2
+Computer Science Department, TU Darmstadt
+MPI Informatics, Saarbr¨ucken"
+0bf2765d431c16de7b8f9c644684e69fa52598eb,Integrating Remote PPG in Facial Expression Analysis Framework,"Integrating Remote PPG in Facial Expression Analysis
+Framework
+H. Emrah Tasli
+Marten den Uyl
+Vicarious Perception Technologies, Amsterdam, The Netherlands
+Amogh Gudi"
+0b8c92463f8f5087696681fb62dad003c308ebe2,On matching sketches with digital face images,"On Matching Sketches with Digital Face Images
+Himanshu S. Bhatt, Samarth Bharadwaj, Richa Singh, and Mayank Vatsa
+in local"
+0bc0f9178999e5c2f23a45325fa50300961e0226,Recognizing facial expressions from videos using Deep Belief Networks,"Recognizing facial expressions from videos using Deep
+Belief Networks
+CS 229 Project
+Advisor: Prof. Andrew Ng
+Adithya Rao Narendran Thiagarajan"
+0babc4af06d210cf38bdf8324c339b6cf3f424fa,A Predictive Model of Patient Readmission Using Combined ICD-9 Codes as Engineered Features,"A Predictive Model of Patient Readmission Using Combined ICD-9
+Codes as Engineered Features"
+0b5c3cf7c8c643cb09d55a08b15de22e134081be,Online Tracking and Offline Recognition Using Scale Invariant Feature Transform,"IJMTES | International Journal of Modern Trends in Engineering and Science ISSN: 2348-3121
+Online Tracking and Offline Recognition Using Scale
+Invariant Feature Transform
+A. Bahmidha Banu1; Dr. V. Venkatesa kumar2
+PG Scholar, Department of CSE, Anna University Regional Centre, Tamilnadu,
+Assistant Professor, Department of CSE, Anna University Regional Centre, , Tamilnadu,
+________________________________________________________________________________________________________"
+0bfabcf5c74cc17fe8b5777093699789411868b9,Predictive Tagging of Social Media Images using Unsupervised Learning,"International Journal of Computer Applications (0975 – 8887)
+Volume 65– No.24, March 2013
+Predictive Tagging of Social Media Images using
+Unsupervised Learning
+Nishchol Mishra
+Asstt. Professor
+School of IT
+RGPV, Bhopal
+India
+Sanjay Silakari, PhD.
+Professor, Deptt. Of CSE
+UIT- RGPV
+Bhopal
+India"
+0bc82ec532228427a497ac47391d524e3b4537ae,Fluid Annotation: A Human-Machine Collaboration Interface for Full Image Annotation,"Fluid Annotation: A Human-Machine Collaboration Interface
+for Full Image Annotation
+Mykhaylo Andriluka∗
+Jasper R. R. Uijlings∗
+Google Research
+Z¨urich, Switzerland
+Vi(cid:138)orio Ferrari"
+0b4453df81091bcdafedc07b64bea946bf3441b2,Fast and Accurate 3D Face Recognition Using Registration to an Intrinsic Coordinate System and Fusion of Multiple Region Classifiers,"Int J Comput Vis
+DOI 10.1007/s11263-011-0426-2
+Fast and Accurate 3D Face Recognition
+Using Registration to an Intrinsic Coordinate System and Fusion of Multiple Region
+Classifiers
+Luuk Spreeuwers
+Received: 20 September 2010 / Accepted: 7 February 2011
+© The Author(s) 2011. This article is published with open access at Springerlink.com"
+0b4b6932d5df74b366d9235b40334bc40d719c72,Temporal Ensembling for Semi-Supervised Learning,"Temporal Ensembling for Semi-Supervised Learning
+Samuli Laine
+NVIDIA
+Timo Aila
+NVIDIA"
+93cfc6fd29d50fe6589f9506b503f32f6d0372f4,A Face-to-Face Neural Conversation Model,"A Face-to-Face Neural Conversation Model
+Hang Chu1,2 Daiqing Li1 Sanja Fidler1,2
+University of Toronto 2Vector Institute
+{chuhang1122, daiqing,"
+9391618c09a51f72a1c30b2e890f4fac1f595ebd,Globally Tuned Cascade Pose Regression via Back Propagation with Application in 2D Face Pose Estimation and Heart Segmentation in 3D CT Images,"Globally Tuned Cascade Pose Regression via
+Back Propagation with Application in 2D Face
+Pose Estimation and Heart Segmentation in 3D
+CT Images
+Peng Sun
+James K Min
+Guanglei Xiong
+Dalio Institute of Cardiovascular Imaging, Weill Cornell Medical College
+April 1, 2015
+This work was submitted to ICML 2015 but got rejected. We put the initial
+submission ”as is” in Page 2 - 11 and add updated contents at the tail. The
+ode of this work is available at https://github.com/pengsun/bpcpr5."
+93498110032a458fddebfae80d7a93991e11673d,Brownian descriptor: A rich meta-feature for appearance matching,"Brownian descriptor: a Rich Meta-Feature for Appearance Matching
+Sławomir B ˛ak
+Ratnesh Kumar
+François Brémond
+INRIA Sophia Antipolis, STARS group
+004, route des Lucioles, BP93
+06902 Sophia Antipolis Cedex - France"
+93675f86d03256f9a010033d3c4c842a732bf661,Localized Growth and Characterization of Silicon Nanowires,Universit´edesSciencesetTechnologiesdeLilleEcoleDoctoraleSciencesPourl’ing´enieurUniversit´eLilleNord-de-FranceTHESEPr´esent´ee`al’Universit´edesSciencesetTechnologiesdeLillePourobtenirletitredeDOCTEURDEL’UNIVERSIT´ESp´ecialit´e:MicroetNanotechnologieParTaoXULocalizedgrowthandcharacterizationofsiliconnanowiresSoutenuele25Septembre2009Compositiondujury:Pr´esident:TuamiLASRIRapporteurs:ThierryBARONHenriMARIETTEExaminateurs:EricBAKKERSXavierWALLARTDirecteurdeth`ese:BrunoGRANDIDIER
+938566dc8ee83a12d07e4d26bbb75e65ca7963cd,Multi-Scale Singularity Trees (MSSTs),"Multi-Scale Singularity Trees
+(MSSTs)
+Kerawit Somchaipeng"
+936c7406de1dfdd22493785fc5d1e5614c6c2882,Detecting Visual Text,"012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 762–772,
+Montr´eal, Canada, June 3-8, 2012. c(cid:13)2012 Association for Computational Linguistics"
+93d3f2e546314305e8102538c4714e30e9146858,Image categorization combining neighborhood methods and boosting,"Image Categorization Combining Neighborhood Methods
+nd Boosting
+Matthew Cooper
+FX Palo Alto Laboratory
+Palo Alto, CA 94304 USA"
+93610676003ef1dcda3864b236bca3852cb05388,RECOGNIZING ACTIVITIES WITH CLUSTER-TREES OF TRACKLETS 1 Recognizing activities with cluster-trees of tracklets,"Recognizing activities with cluster-trees of tracklets
+Adrien Gaidon, Zaid Harchaoui, Cordelia Schmid
+To cite this version:
+Adrien Gaidon, Zaid Harchaoui, Cordelia Schmid. Recognizing activities with cluster-trees of
+tracklets. Richard Bowden and John P. Collomosse and Krystian Mikolajczyk. BMVC 2012
+- British Machine Vision Conference, Sep 2012, Guildford, United Kingdom. BMVA Press,
+pp.30.1-30.13, 2012, <10.5244/C.26.30>. <hal-00722955v2>
+HAL Id: hal-00722955
+https://hal.inria.fr/hal-00722955v2
+Submitted on 7 Aug 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+93cbb3b3e40321c4990c36f89a63534b506b6daf,Learning from examples in the small sample case: face expression recognition,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+Learning From Examples in the Small Sample Case:
+Face Expression Recognition
+Guodong Guo and Charles R. Dyer, Fellow, IEEE"
+93a4c7ac0b09671db8cd3adbe62851d7befc4658,Machine Analysis of Facial Expressions,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+93ed1c9274906f1916d58cd618a9a82858448a3f,Deep Learning for Accurate Population Counting in Aerial Imagery,"Deep Learning for Accurate Population Counting in
+Aerial Imagery
+Matt Epperson, James Rotenberg, Eric Lo, Sebastian Afshari & Brian Kim"
+931a70ec0bfc1d86894ff37a6f702a033e0129e3,ParlAI: A Dialog Research Software Platform,"ParlAI: A Dialog Research Software Platform
+Alexander H. Miller, Will Feng, Adam Fisch, Jiasen Lu,
+Dhruv Batra, Antoine Bordes, Devi Parikh and Jason Weston
+Facebook AI Research"
+93dce341666b6a57f8888dddb25a3fd37df69b02,Deep Layer Aggregation,"Deep Layer Aggregation
+Fisher Yu Dequan Wang
+Evan Shelhamer
+Trevor Darrell
+UC Berkeley"
+934a77d099a38374ef1babe02d95952c089cce5f,Set of texture descriptors for music genre classification,"Set of texture descriptors for music genre classification
+Loris Nanni
+Yandre Costa
+Department of
+Information Engineering
+University of Padua
+viale Gradenigo 6
+5131, Padua, Italy
+State University of
+Maringa (UEM)
+Av. Colombo, 5790
+87020-900, Maringa,
+Parana, Brazil"
+93e962f8886eae13b02ad2aa98bdedfbd7e68709,Dual Conditional GANs for Face Aging and Rejuvenation,"Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+Source: datasets(b)Target: our outputs are a series of images belonging to the same person010Input FaceOutputs2 78x/Age groupy/Personality 2178Non-sequential facial imagesSequential facial imagesFigure1:Anillustrationofourfaceagingandrejuvenationpro-cess.As(a)shows,ourtrainingexamplesarenon-sequentialandun-paired,andweaimtosimultaneouslyrenderaseriesofage-changedfacialimagesofapersonandpreservepersonality,asshownin(b).specificallydescribethechangesoffacesindifferentages.Thesemethodsparametricallymodelshapeandtexturepa-rametersfordifferentfeaturesofeachagegroup,e.g.,mus-cles[Suoetal.,2012],wrinkles[RamanathanandChellappa,2008;Suoetal.,2010]andfacialstructure[RamanathanandChellappa,2006;Lanitisetal.,2002].Ontheotherhand,prototype-basedmethods[KemelmacherShlizermanetal.,2014;Tiddemanetal.,2001]dividefacesintogroupsbyage,andthenconstructanaveragefaceasitsprototypeforeachagegroup.Afterthat,thesemethodscantransferthetexturedifferencebetweentheprototypestotheinputfacialimage.Morerecently,thedeeplearning-basedmethod[Wangetal.,2016;Liuetal.,2017]achievedthestate-of-the-artper-formance.In[Wangetal.,2016],RNNisappliedonthecoefficientsofeigenfacesforagepatterntransition.Itper-formsthegroup-basedlearningwhichrequiresthetrueageoftestingfacestolocalizethetransitionstatewhichmightnotbeconvenient.Inaddition,theseapproachesonlypro-videageprogressionfromyoungerfacetoolderones.Toachieveflexiblebidirectionalagechanges,itmayneedtoretrainthemodelinversely.GenerativeAdversarialNet-"
+935ce31268232b25c9f685128ae0ae9e5c3a0e8e,Implementation of Human detection system using DM 3730,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Implementation of Human detection system using
+DM3730
+Amaraneni Srilaxmi1, Shaik Khaddar Sharif2
+VNR Vignana Jyothi Institute of Engineering & Technology, Bachupally, Hyderabad, India
+VNR Vignana Jyothi Institute of Engineering & Technology, Bachupally, Hyderabad, India
+digital
+ontent management,"
+93798ead90afe86636ca582a92cadd846905a95d,Learning Visual Classifiers From Limited Labeled Images,
+930663a0812a7a53963563b647c5957807d3d97d,A unified view of non-monotonic core selection and application steering in heterogeneous chip multiprocessors,"A Unified View of Non-monotonic Core Selection
+nd Application Steering in Heterogeneous
+Chip Multiprocessors
+Sandeep Navada*, Niket K. Choudhary*,
+Salil V. Wadhavkar*
+CPU Design Center
+Qualcomm
+Raleigh, NC, USA
+{snavada, niketc,"
+930a6ea926d1f39dc6a0d90799d18d7995110862,Privacy-preserving photo sharing based on a secure JPEG,"Privacy-Preserving Photo Sharing
+ased on a Secure JPEG
+Lin Yuan, Pavel Korshunov, and Touradj Ebrahimi
+Multimedia Signal Processing Group, EPFL, Lausanne, Switzerland
+Email: {lin.yuan, pavel.korshunov,"
+94b9c0a6515913bad345f0940ee233cdf82fffe1,Face Recognition using Local Ternary Pattern for Low Resolution Image,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Face Recognition using Local Ternary Pattern for
+Low Resolution Image
+Vikas1, Amanpreet Kaur2
+Research Scholar, CGC Group of Colleges, Gharuan, Punjab, India
+Assistant Professor, Department of Computer Science Engineering, Chandigarh University, Gharuan, Punjab, India"
+94826cb68980e3b89118569c93cfd36f3945fa99,Computer face-matching technology using two-dimensional photographs accurately matches the facial gestalt of unrelated individuals with the same syndromic form of intellectual disability,"Dudding-Byth et al. BMC Biotechnology (2017) 17:90
+DOI 10.1186/s12896-017-0410-1
+Open Access
+R ES EAR CH A R T I C LE
+Computer face-matching technology using
+two-dimensional photographs accurately
+matches the facial gestalt of unrelated
+individuals with the same syndromic form
+of intellectual disability
+Tracy Dudding-Byth1,2,3,11*†
+Susan M. White5,6, John Attia3,4, Han Brunner7, Bert de Vries7, David Koolen7, Tjitske Kleefstra7, Seshika Ratwatte4,8,
+Carlos Riveros3, Steve Brain9 and Brian C. Lovell9,10
+, Anne Baxter1†, Elizabeth G. Holliday3,4, Anna Hackett1,4,11, Sheridan O’Donnell1,"
+94eeae23786e128c0635f305ba7eebbb89af0023,On the Emergence of Invariance and Disentangling in Deep Representations,"Journal of Machine Learning Research 18 (2018) 1-34
+Submitted 01/17; Revised 4/18; Published 6/18
+Emergence of Invariance and Disentanglement
+in Deep Representations∗
+Alessandro Achille
+Department of Computer Science
+University of California
+Los Angeles, CA 90095, USA
+Stefano Soatto
+Department of Computer Science
+University of California
+Los Angeles, CA 90095, USA
+Editor: Yoshua Bengio"
+940ab36a8b2cdf6cb6a08093bd382ad375717942,Human violence recognition and detection in surveillance videos,"Human Violence Recognition and Detection in Surveillance Videos
+Piotr Bilinski
+nd Francois Bremond
+INRIA Sophia Antipolis, STARS team
+004 Route des Lucioles, BP93, 06902 Sophia Antipolis, France"
+9499b8367a84fccb3651a95e4391d6e17fd92ec5,Face Recognition Issues in a Border Control Environment,"Face Recognition Issues in a Border Control
+Environment
+Marijana Kosmerlj, Tom Fladsrud, Erik Hjelm˚as, and Einar Snekkenes
+Department of Computer Science and Media Technology
+NISlab
+Gjøvik University College
+P. O. Box 191, N-2802 Gjøvik, Norway"
+942bb63e78d9edfe3b8d0a4bf9a3511c736a6930,"Implementing Efficient, Portable Computations for Machine Learning","Implementing Efficient, Portable Computations for Machine
+Learning
+Matthew Walter Moskewicz
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2017-37
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2017/EECS-2017-37.html
+May 9, 2017"
+9432e1157f252ee626511b2270126436b0e80b73,A set theoretic approach to object-based image restoration,"Image Processing: Algorithms and Systems IV, edited by Edward R. Dougherty,
+Jaakko T. Astola, Karen O. Egiazarian, Proc. of SPIE-IS&T Electronic Imaging,
+SPIE Vol. 5672 © 2005 SPIE and IS&T · 0277-786X/05/$15"
+944faf7f14f1bead911aeec30cc80c861442b610,Action Tubelet Detector for Spatio-Temporal Action Localization,"Action Tubelet Detector for Spatio-Temporal Action Localization
+Vicky Kalogeiton1,2
+Philippe Weinzaepfel3
+Vittorio Ferrari2
+Cordelia Schmid1"
+9458c518a6e2d40fb1d6ca1066d6a0c73e1d6b73,A Benchmark and Comparative Study of Video-Based Face Recognition on COX Face Database,"A Benchmark and Comparative Study of
+Video-Based Face Recognition
+on COX Face Database
+Zhiwu Huang, Student Member, IEEE, Shiguang Shan, Senior Member, IEEE,
+Ruiping Wang, Member, IEEE, Haihong Zhang, Member, IEEE,
+Shihong Lao, Member, IEEE, Alifu Kuerban,
+nd Xilin Chen, Senior Member, IEEE"
+940a675de8a48b54bac6b420f551529d2bc53b99,"Advances , Challenges , and Opportunities in Automatic Facial Expression Recognition","Advances, Challenges, and Opportunities in
+Automatic Facial Expression Recognition
+Brais Martinez and Michel F. Valstar"
+9434524669777d281a8a7358f20181c9e157942e,VSEM: An open library for visual semantics representation,"Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 187–192,
+Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics"
+948af4b04b4a9ae4bff2777ffbcb29d5bfeeb494,Face Recognition From Single Sample Per Person by Learning of Generic Discriminant Vectors,"Available online at www.sciencedirect.com
+Procedia Engineering 41 ( 2012 ) 465 – 472
+International Symposium on Robotics and Intelligent Sensors 2012 (IRIS 2012)
+Face Recognition From Single Sample Per Person by Learning of
+Generic Discriminant Vectors
+Fadhlan Hafiza*, Amir A. Shafieb, Yasir Mohd Mustafahb
+Faculty of Electrical Engineering, University of Technology MARA, Shah Alam, 40450 Selangor, Malaysia
+Faculty of Engineering, International Islamic University, Jalan Gombak, 53100 Kuala Lumpur, Malaysia"
+947399fef66bd8c536c6f784a0501b34e4e094bf,Towards Recovery of Conditional Vectors from Conditional Generative Adversarial Networks,"Towards Recovery of Conditional Vectors from
+Conditional Generative Adversarial Networks
+Sihao Ding
+Andreas Wallin
+{sihao.ding,"
+9458642e7645bfd865911140ee8413e2f5f9fcd6,Efficient Multiple People Tracking Using Minimum Cost Arborescences,"Ef‌f‌icient Multiple People Tracking Using
+Minimum Cost Arborescences
+Roberto Henschel1, Laura Leal-Taix´e2, Bodo Rosenhahn1
+Institut f¨ur Informationsverarbeitung, Leibniz Universit¨at Hannover,
+Institute of Geodesy and Photogrammetry, ETH Zurich,"
+949079cc466e875df1ee6bd6590052ba382a35cf,0 Large-Scale Face Image Retrieval :,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+94686d5df14875ed800a9f710bfa43ba4eb19b75,Occlusion Handling for Pedestrian Tracking Using Partial Object Template-based Component Particle Filter,"IADIS International Journal on Computer Science and Information Systems
+Vol. 8, No. 2, pp. 40-50
+ISSN: 1646-3692
+OCCLUSION HANDLING FOR PEDESTRIAN
+TRACKING USING PARTIAL OBJECT
+TEMPLATE-BASED COMPONENT PARTICLE
+FILTER
+Daw-Tung Lin. Department of Computer Science and Information Engineering, National Taipei
+University, Taiwan.
+Yen-Hsiang Chang. Department of Computer Science and Information Engineering, National Taipei
+University, Taiwan."
+941166547968081463398c9eb041f00eb04304f7,Structure-Preserving Sparse Decomposition for Facial Expression Analysis,"Structure-Preserving Sparse Decomposition for
+Facial Expression Analysis
+Sima Taheri, Student Member, IEEE, Qiang Qiu, Student Member, IEEE, and Rama Chellappa, Fellow, IEEE"
+940865fc3f7ee5b386c4188c231eb6590db874e9,Security and Surveillance System for Drivers Based on User Profile and learning systems for Face Recognition,"Network Protocols and Algorithms
+ISSN 1943-3581
+015, Vol. 7, No. 1
+Security and Surveillance System for Drivers based on
+User Profile and Learning Systems for Face
+Recognition
+Loubna Cherrat
+Mathematic and Application Laboratory, FSTT of Tangier
+Tangier (Morocco)
+Tel: 06-64-43-39-18 E-mail:
+Mostafa Ezziyyani
+Mathematic and Application Laboratory, FSTT of Tangier
+Tangier (Morocco)
+Tel: 06-61-63-03-01 E-mail:
+Annas EL Mouden
+Mathematic and Application Laboratory, FSTT of Tangier
+Tangier (Morocco)
+Tel: 06-66-63-73-63 E-mail:
+Mohammed Hassar
+Mathematic and Application Laboratory, FSTT of Tangier"
+9441253b638373a0027a5b4324b4ee5f0dffd670,A Novel Scheme for Generating Secure Face Templates Using BDA,"A Novel Scheme for Generating Secure Face
+Templates Using BDA
+Shraddha S. Shinde
+Prof. Anagha P. Khedkar
+P.G. Student, Department of Computer Engineering,
+Associate Professor, Department of Computer
+MCERC,
+Nashik (M.S.), India
+e-mail:"
+948853c269cf97251ba5082db0481ce6f96cf886,Efficient Distributed Training of Vehicle Vision Systems,"Efficient Distributed Training of Vehicle Vision Systems
+Sung-Li Chiang
+Xinlei Pan
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-195
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-195.html
+December 11, 2016"
+94a11b601af77f0ad46338afd0fa4ccbab909e82,"Title of dissertation : EFFICIENT SENSING , SUMMARIZATION AND CLASSIFICATION OF VIDEOS",
+0e23229289b1fbea14bc425718bc0a227d100b8e,Survey of Recent Advances in Visual Question Answering,"Survey of Recent Advances in Visual Question Answering
+Supriya Pandhre∗
+Indian Institute of Technology Hyderabad
+Hyderabad, India
+Shagun Sodhani
+Adobe Systems
+Noida, India"
+0efdd82a4753a8309ff0a3c22106c570d8a84c20,Lda with Subgroup Pca Method for Facial Image Retrieval,"LDA WITH SUBGROUP PCA METHOD FOR FACIAL IMAGE RETRIEVAL
+Wonjun Hwang, Tae-Kyun Kim, Seokcheol Kee
+Human Computer Interaction Lab., Samsung Advanced Institute of Technology, Korea."
+0ed78b9562661c550e382ed30de252d877a04cdc,An Evaluation of Video-to-Video Face Verification,"An Evaluation of Video-to-Video Face Verification
+Norman Poh, Member, IEEE, Chi Ho Chan, Josef Kittler, Sébastien Marcel, Christopher Mc Cool,
+Enrique Argones Rúa, José Luis Alba Castro, Mauricio Villegas, Student Member, IEEE, Roberto Paredes,
+Vitomir ˇStruc, Member, IEEE, Nikola Paveˇsic´, Albert Ali Salah, Hui Fang, and Nicholas Costen
+features,"
+0ec17d929f62660fb3d1bcdd791f9639034f5344,How Do We Evaluate Facial Emotion Recognition?,"Psychology & Neuroscience
+016, Vol. 9, No. 2, 153–175
+983-3288/16/$12.00
+© 2016 American Psychological Association
+http://dx.doi.org/10.1037/pne0000047
+How Do We Evaluate Facial Emotion Recognition?
+Ana Idalina de Paiva-Silva
+Universidade de Brasília and Universidade Federal
+de Goiás
+Marta Kerr Pontes,
+Juliana Silva Rocha Aguiar, and
+Wânia Cristina de Souza
+Universidade de Brasília
+The adequate interpretation of facial expressions of emotion is crucial for social
+functioning and human interaction. New methods are being applied, and a review of the
+methods that are used to evaluate facial emotion recognition is timely for the field. An
+extensive review was conducted using the Web of Science, PsycINFO, and PubMed
+databases. The following keywords were used to identify articles that were published
+within the past 20 years: emotion recognition, face, expression, and assessment. The
+initial search yielded 291 articles. After applying the exclusion criteria, 115 articles"
+0e9f7d8554e065a586163845dd2bfba26e55cefb,Registration of 3D Face Scans with Average Face Models,"Registration of 3D Face Scans with Average Face Models
+Albert Ali Salah1,2, Ne¸se Aly¨uz1, Lale Akarun1
+{salah, nese.alyuz,
+Bo˘gazi¸ci University,
+4342 Bebek, ˙Istanbul, Turkey
+Phone: +90 212 359 4523-24
+Fax: +90 212 287 2461
+Centrum voor Wiskunde en Informatica,
+Kruislaan 413, 1098 SJ, 94079, The Netherlands
+Phone: +31 020 592 4214
+Fax: +31 020 592 4199"
+0ef40a21edf2b48c73fd51c21d213ee69ca30a4b,Hidden Markov model as a framework for situational awareness,
+0eac652139f7ab44ff1051584b59f2dc1757f53b,Efficient Branching Cascaded Regression for Face Alignment under Significant Head Rotation,"Efficient Branching Cascaded Regression
+for Face Alignment under Significant Head Rotation
+Brandon M. Smith
+Charles R. Dyer
+University of Wisconsin–Madison"
+0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a,The Pascal Visual Object Classes (VOC) Challenge,"Int J Comput Vis (2010) 88: 303–338
+DOI 10.1007/s11263-009-0275-4
+The PASCAL Visual Object Classes (VOC) Challenge
+Mark Everingham · Luc Van Gool ·
+Christopher K. I. Williams · John Winn ·
+Andrew Zisserman
+Received: 30 July 2008 / Accepted: 16 July 2009 / Published online: 9 September 2009
+© Springer Science+Business Media, LLC 2009"
+0e13f7fc698cbe78ddbf3412b13ca27a4d878fa8,Greater need to belong predicts a stronger preference for extraverted faces,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/306357929
+Greater need to belong predicts a stronger
+preference for extraverted faces ☆
+Article in Personality and Individual Differences · January 2017
+DOI: 10.1016/j.paid.2016.08.012
+CITATION
+authors, including:
+READS
+Mitch Brown
+University of Southern Mississippi
+6 PUBLICATIONS 5 CITATIONS
+SEE PROFILE
+Some of the authors of this publication are also working on these related projects:
+Metaphor and Disease View project
+Limbal Rings View project
+All content following this page was uploaded by Mitch Brown on 10 November 2016.
+The user has requested enhancement of the downloaded file."
+0e031312cb6e1634e3115e428505e2be9ef46b75,Explicit Knowledge-based Reasoning for Visual Question Answering,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+giraffe people people Attributes: glass house room standing walking wall zoo Scenes: museum indoor Visual Question: How many giraffes are there in the image? Answer: Two. Common-Sense Question: Is this image related to zoology? Answer: Yes. Reason: Object/Giraffe --> Herbivorous animals --> Animal --> Zoology; Attribute/Zoo --> Zoology. KB-Knowledge Question: What are the common properties between the animal in this image and zebra? Answer: Herbivorous animals; Animals; Megafauna of Africa. Figure1:ArealexampleoftheproposedKB-VQAdatasetandtheresultsgivenbyAhab,theproposedVQAapproach.Ourapproachanswersquestionsbyextractingseveraltypesofvisualconceptsfromanimageandaligningthemtolarge-scalestructuredknowl-edgebases.Apartfromanswers,ourapproachcanalsoproviderea-sonsandexplanationsforcertaintypesofquestions.itisansweringthequestionbasedonimageinformation,orjusttheprevalenceofaparticularanswerinthetrainingset.Thesecondproblemisthatbecausethemodelistrainedonindividualquestion/answerpairs,therangeofquestionsthatcanbeaccuratelyansweredislimited.Answeringgeneralquestionsposedbyhumansaboutimagesinevitablyrequiresreferencetoadiversevarietyofinformationnotcontainedintheimageitself.CapturingsuchlargeamountofinformationwouldrequireanimplausiblylargeLSTM,andacompletelyimpracticalamountoftrainingdata.Thethird,andmajor,problemwiththeLSTMapproachisthatitisincapableofex-plicitreasoningexceptinverylimitedsituations[Rockt¨ascheletal.,2016].OurmaincontributionisamethodwecallAhab1foran-sweringawidevarietyofquestionsaboutimagesthatrequire1Ahab,thecaptaininthenovelMobyDick,iseitherabrilliantvisionary,oradeludedfanatic,dependingonyourperspective."
+0eb45876359473156c0d4309f548da63470d30ee,A Deeply-Initialized Coarse-to-fine Ensemble of Regression Trees for Face Alignment,"A Deeply-initialized Coarse-to-fine Ensemble of
+Regression Trees for Face Alignment
+Roberto Valle1[0000−0003−1423−1478], Jos´e M.
+Buenaposada2[0000−0002−4308−9653], Antonio Vald´es3, and Luis Baumela1
+Univ. Polit´ecnica de Madrid, Spain.
+Univ. Rey Juan Carlos, Spain.
+Univ. Complutense de Madrid, Spain."
+0e50fe28229fea45527000b876eb4068abd6ed8c,Angle Principal Component Analysis,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+0eff410cd6a93d0e37048e236f62e209bc4383d1,Learning discriminative MspLBP features based on Ada-LDA for multi-class pattern classification,"Anchorage Convention District
+May 3-8, 2010, Anchorage, Alaska, USA
+978-1-4244-5040-4/10/$26.00 ©2010 IEEE"
+0ecaabbf846bbc78c91bf7ff71b998b61c0082d8,Automated Visual Fin Identification of Individual Great White Sharks,"Noname manuscript No.
+(will be inserted by the editor)
+Automated Visual Fin Identification
+of Individual Great White Sharks
+Benjamin Hughes and Tilo Burghardt
+Received: date / Accepted: date"
+0ee737085af468f264f57f052ea9b9b1f58d7222,SiGAN: Siamese Generative Adversarial Network for Identity-Preserving Face Hallucination,"SiGAN: Siamese Generative Adversarial Network
+for Identity-Preserving Face Hallucination
+Chih-Chung Hsu, Member, IEEE, Chia-Wen Lin, Fellow, IEEE, Weng-Tai Su, Student Member, IEEE,
+nd Gene Cheung, Senior Member, IEEE,"
+0ee661a1b6bbfadb5a482ec643573de53a9adf5e,On the Use of Discriminative Cohort Score Normalization for Unconstrained Face Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, MONTH YEAR
+On the Use of Discriminative Cohort Score
+Normalization for Unconstrained Face Recognition
+Massimo Tistarelli, Senior Member, IEEE, Yunlian Sun, and Norman Poh, Member, IEEE"
+0e986f51fe45b00633de9fd0c94d082d2be51406,"Face detection, pose estimation, and landmark localization in the wild","Face Detection, Pose Estimation, and Landmark Localization in the Wild
+Xiangxin Zhu Deva Ramanan
+Dept. of Computer Science, University of California, Irvine"
+0e36bf238d2db6c970ade0b5f68811ed6debc4e8,Recognizing Partial Biometric Patterns,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 4, AUGUST 2018
+Recognizing Partial Biometric Patterns
+Lingxiao He, Student Member, IEEE, Zhenan Sun, Member, IEEE, Yuhao Zhu and Yunbo Wang"
+0e49a23fafa4b2e2ac097292acf00298458932b4,Unsupervised Detection of Outlier Images Using Multi-Order Image Transforms,"Theory and Applications of Mathematics & Computer Science 3 (1) (2013) 13–31
+Unsupervised Detection of Outlier Images Using Multi-Order
+Image Transforms
+Lior Shamira,∗
+Lawrence Technological University, 21000 W Ten Mile Rd., Southfield, MI 48075, United States."
+0e95f68171b27621a39e393afb7c74ef1506fe85,Content Based Image Retrieval Using Enhanced Local Tetra Patterns,"CONTENT BASED IMAGE RETRIEVAL USING
+ENHANCED LOCAL TETRA PATTERNS
+Divya Gupta1, Anjali Jindal2
+Assistant Professor, Computer Science Department
+SRM University, Delhi NCR Campus, India
+M.Tech Student (Computer Science and Engineering)
+SRM University, Delhi NCR Campus, India"
+0ed91520390ebdee13a0ac13d028f65d959bdc10,Hard Example Mining with Auxiliary Embeddings,"Hard Example Mining with Auxiliary Embeddings
+Evgeny Smirnov
+Speech Technology Center
+Aleksandr Melnikov
+ITMO University
+Andrei Oleinik
+ITMO University
+melnikov
+Elizaveta Ivanova
+Ilya Kalinovskiy
+Speech Technology Center
+Speech Technology Center
+Eugene Luckyanets
+ITMO University"
+0e78af9bd0f9a0ce4ceb5f09f24bc4e4823bd698,Spontaneous Subtle Expression Recognition: Imbalanced Databases & Solutions,"Spontaneous Subtle Expression Recognition:
+Imbalanced Databases & Solutions (cid:63)
+Anh Cat Le Ngo1, Raphael Chung-Wei Phan1, John See2
+Faculty of Engineering,
+Multimedia University (MMU), Cyberjaya, Malaysia
+Faculty of Computing & Informatics,
+Multimedia University (MMU), Cyberjaya, Malaysia"
+0e2ea7af369dbcaeb5e334b02dd9ba5271b10265,Multi-Level Feature Abstraction from Convolutional Neural Networks for Multimodal Biometric Identification,
+0e7fdc0b03a1481b2fa1b5d592125f41b6cb7ad7,Dual CNN Models for Unsupervised Monocular Depth Estimation,"Dual CNN Models for Unsupervised Monocular Depth Estimation
+Computer Vision Group,
+Indian Institute of Information Technology, Sri City,
+Vamshi Krishna Repala
+Shiv Ram Dubey
+Andhra Pradesh-517646, India
+vamshi.r14,"
+0e7c70321462694757511a1776f53d629a1b38f3,2012 Proceedings of the Performance Metrics for Intelligent Systems (PerMI'12) Workshop,"NIST Special Publication 1136
+012 Proceedings of the
+Performance Metrics for Intelligent
+Systems (PerMI ‘12) Workshop
+Rajmohan Madhavan
+Elena R. Messina
+Brian A. Weiss
+http://dx.doi.org/10.6028/NIST.SP.1136"
+0e5640677feb2e1d78639b516f7977e80d9d394f,Volume-based Human Re-identification with RGB-D Cameras,"Cosar, S., Coppola, C. and Bellotto, N.
+Volume-based Human Re-identification with RGB-D Cameras.
+In Proceedings of the 12th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP 2017) - Volume 4: VISAPP, pages
+89-397
+ISBN: 978-989-758-225-7
+Copyright c(cid:13) 2017 by SCITEPRESS – Science and Technology Publications, Lda. All rights reserved"
+0efb7d1413ada560ab1aee1ea4cc94d80737e662,Performance Analysis of Eye localization Methods for Real Time Vision Interface using Low Grade Video Camera,"International Journal of Computer Applications (0975 – 8887)
+Volume 114 – No. 2, March 2015
+Performance Analysis of Eye localization Methods for
+Real Time Vision Interface using Low Grade Video
+Krupa Jariwala
+Assistant Professor
+Computer Engineering Department
+SVNIT, Surat
+Camera
+Upena Dalal, Ph.D.
+Associate Professor
+Electronics Engineering Department
+SVNIT, Surat"
+0edd3517579a110da989405309e4235e47dd8937,Performance and security analysis of Gait-based user authentication,"Performance and Security Analysis
+of Gait-based User Authentication
+Doctoral Dissertation by
+Davrondzhon Gafurov
+Submitted to the Faculty of Mathematics and Natural Sciences at the
+University of Oslo in partial fulfillment of the requirements for the degree
+Philosophiae Doctor (PhD) in Computer Science"
+607850dc8e640c25f027f2eee202dee5605cf27c,A Survey on Face Detection and Recognition Techniques in Different Application Domain,"I.J. Modern Education and Computer Science, 2014, 8, 34-44
+Published Online August 2014 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijmecs.2014.08.05
+A Survey on Face Detection and Recognition
+Techniques in Different Application Domain
+Subrat Kumar Rath, Siddharth Swarup Rautaray
+School of Computer Engineering, KIIT University, Bhubaneswar, Odisha, India
+related
+technology
+recognition,
+to biometric science
+the popularity and"
+600025c9a13ff09c6d8b606a286a79c823d89db8,A Review on Linear and Non-linear Dimensionality Reduction Techniques,"Machine Learning and Applications: An International Journal (MLAIJ) Vol.1, No.1, September 2014
+A REVIEW ON LINEAR AND NON-LINEAR
+DIMENSIONALITY REDUCTION
+TECHNIQUES
+Arunasakthi. K, 2KamatchiPriya. L
+Assistant Professor
+Department of Computer Science and Engineering
+Ultra College of Engineering and Technology for Women,India.
+Assistant Professor
+Department of Computer Science and Engineering
+Vickram College of Engineering, Enathi, Tamil Nadu, India."
+602ff4fd0f5bd10c9fb971ecd2317e542f070883,Object Detection from the Satellite Images using Divide and Conquer Model,"SSRG International Journal of Computer Science and Engineering (SSRG-IJCSE) – volume1 issue10 Dec 2014
+Object Detection from the Satellite Images
+using Divide and Conquer Model
+Lakhwinder Kaur, Guru Kashi University
+Er.Vinod Kumar Sharma (Assistant professor), Guru Kashi University"
+60fb007eef153fdf9c3d6620c419bef1c657c555,A soft-biometrics dataset for person tracking and re-identification,"A Soft-Biometrics Dataset for Person Tracking and Re-Identification
+Arne Schumann, Eduardo Monari
+Fraunhofer Institute for Optronics, System Technologies and Image Exploitation
+{arne.schumann,"
+60f7de07de4d090990120483bd5407369b29a120,ℓ₁-Norm Heteroscedastic Discriminant Analysis Under Mixture of Gaussian Distributions.,"L1-Norm Heteroscedastic Discriminant Analysis
+under Mixture of Gaussian Distributions
+Wenming Zheng, Member, IEEE, Cheng Lu, Zhouchen Lin, Fellow, IEEE, Tong Zhang, Zhen Cui, Wankou Yang"
+60ec284f67c1012419e5dea508d1bae4bc144bb2,Curvelet Based Multiresolution Analysis of Face Images for Recognition using Robust Local Binary Pattern Descriptor,"Proc. of Int. Conf. on Recent Trends in Signal Processing, Image Processing and VLSI, ICrtSIV
+Curvelet Based Multiresolution Analysis of Face
+Images for Recognition using Robust Local Binary
+Pattern Descriptor
+Nagaraja S. and Prabhakar C.J
+Department of P.G. Studies and Research in Computer Science,
+Kuvempu University, Karnataka, India
+Email: { nagarajas27, psajjan"
+604a4f7c0958c5cac017b853a7d0f5f5b4a4c509,Can We Teach Empathy ? Techniques Using Standardized Patients to Assist Learners with Empathy ( Submission # 1039 ) Gayle,
+60ea05df719973ac4d9d70d3141e671131a55db5,A Practical Subspace Approach To Landmarking,"A Practical Subspace Approach To Landmarking
+Signals and systems group, Faculty of Electrical Engineering, Mathematics and Computer Science, University of
+G. M. Beumer, and R.N.J. Veldhuis
+Twente, Enschede, The Netherlands
+Email:"
+60e2b9b2e0db3089237d0208f57b22a3aac932c1,Frankenstein: Learning Deep Face Representations Using Small Data,"Frankenstein: Learning Deep Face Representations
+using Small Data
+Guosheng Hu, Member, IEEE, Xiaojiang Peng, Yongxin Yang, Timothy M. Hospedales, and Jakob Verbeek"
+6097c33a382c62a44379926ee96b23b51dba49c4,From Depth Data to Head Pose Estimation: a Siamese approach,"From Depth Data to Head Pose Estimation: a Siamese approach
+Marco Venturelli, Guido Borghi, Roberto Vezzani, Rita Cucchiara
+University of Modena and Reggio Emilia, DIEF
+{marco.venturelli, guido.borghi, roberto.vezzani,
+Via Vivarelli 10, Modena, Italy
+Keywords:
+Head Pose Estimation, Deep Learning, Depth Maps, Automotive"
+6025f0761024006e0ea5782a7cea29ed69231fbf,Neural Mechanisms of Qigong Sensory Training Massage for Children With Autism Spectrum Disorder: A Feasibility Study,"Original Article
+Neural Mechanisms of Qigong Sensory
+Training Massage for Children With Autism
+Spectrum Disorder: A Feasibility Study
+Global Advances in Health and Medicine
+Volume 7: 1–10
+! The Author(s) 2018
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/2164956118769006
+journals.sagepub.com/home/gam
+Kristin K Jerger, MD, LMBT1, Laura Lundegard2, Aaron Piepmeier, PhD1,
+Keturah Faurot, PA, MPH, PhD1, Amanda Ruffino, BA1,
+Margaret A Jerger, PhD, CCC-SLP1, and Aysenil Belger, PhD3"
+60ab5c64375c4f5f8949a184fd9bfb68778ae6ea,Understanding and Verifying Kin Relationships in a Photo,"N. S. Syed et al Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 3, Issue 6, Nov-Dec 2013, pp.1225-1229
+RESEARCH ARTICLE OPEN ACCESS
+Understanding and Verifying Kin Relationships in a Photo
+Ms.N.S.Syed, 2mr.B.K.Patil, 3mr.Zafar Ul Hasan
+(Department of Computer Science, Everest College of Engg. & Tech., Aurangabad, M.S., India )
+(Department of Computer Science, Everest College of Engg. & Tech., Aurangabad, M.S., India )
+(Department of Computer Science, Sandip Institute of Technology and Research Centre, Nashik, M.S,India)"
+60ce4a9602c27ad17a1366165033fe5e0cf68078,Combination of Face Regions in Forensic Scenarios.,"TECHNICAL NOTE
+DIGITAL & MULTIMEDIA SCIENCES
+J Forensic Sci, 2015
+doi: 10.1111/1556-4029.12800
+Available online at: onlinelibrary.wiley.com
+Pedro Tome,1 Ph.D.; Julian Fierrez,1 Ph.D.; Ruben Vera-Rodriguez,1 Ph.D.; and Javier Ortega-Garcia,1
+Ph.D.
+Combination of Face Regions in Forensic
+Scenarios*"
+609ff585468ad0faba704dde1a69edb9f847c201,LogDet Rank Minimization with Application to Subspace Clustering,"Hindawi Publishing Corporation
+Computational Intelligence and Neuroscience
+Volume 2015, Article ID 824289, 10 pages
+http://dx.doi.org/10.1155/2015/824289
+Research Article
+LogDet Rank Minimization with Application to
+Subspace Clustering
+Zhao Kang,1 Chong Peng,1 Jie Cheng,2 and Qiang Cheng1
+Computer Science Department, Southern Illinois University, Carbondale, IL 62901, USA
+Department of Computer Science and Engineering, University of Hawaii at Hilo, Hilo, HI 96720, USA
+Correspondence should be addressed to Qiang Cheng;
+Received 25 March 2015; Revised 15 June 2015; Accepted 18 June 2015
+Academic Editor: Jos´e Alfredo Hernandez
+Copyright © 2015 Zhao Kang et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Low-rank matrix is desired in many machine learning and computer vision problems. Most of the recent studies use the nuclear
+norm as a convex surrogate of the rank operator. However, all singular values are simply added together by the nuclear norm, and
+thus the rank may not be well approximated in practical problems. In this paper, we propose using a log-determinant (LogDet)
+function as a smooth and closer, though nonconvex, approximation to rank for obtaining a low-rank representation in subspace
+lustering. Augmented Lagrange multipliers strategy is applied to iteratively optimize the LogDet-based nonconvex objective"
+60efdb2e204b2be6701a8e168983fa666feac1be,Transferring Deep Object and Scene Representations for Event Recognition in Still Images,"Int J Comput Vis
+DOI 10.1007/s11263-017-1043-5
+Transferring Deep Object and Scene Representations for Event
+Recognition in Still Images
+Limin Wang1
+· Zhe Wang2 · Yu Qiao3 · Luc Van Gool1
+Received: 31 March 2016 / Accepted: 1 September 2017
+© Springer Science+Business Media, LLC 2017"
+60189e2b592056d43a28b6ffa491867f793ebe1e,Bağlamın Hiyerarşik Doğası,"Ba˘glamın Hiyerar¸sik Do˘gası
+Fethiye Irmak Do˘gan, Sinan Kalkan
+Bilgisayar Mühendisli˘gi Bölümü
+Orta Do˘gu Teknik Üniversitesi
+Ankara, Türkiye
+Email:
+Özetçe —Ba˘glam, insan bili¸si için oldukça elzemdir ve du-
+ru¸s, davranı¸s, konu¸sma biçimi gibi gündelik insan hayatı için
+önemli pek çok sürece etki etmektedir. Yakın zamanda hay-
+tımızda yer edinmesini bekledi˘gimiz robotların da i¸slevlerini
+yerine do˘gru ve verimli bir biçimde getirebilmesi için, ba˘glamı
+lgılama ve kullanma yetene˘gine sahip olması beklenmektedir.
+Ancak ba˘glam, yapay veya do˘gal bili¸s için ne kadar elzem
+olsa da, ba˘glamın yapısı yeterince çalı¸sılmı¸s ve çözümlenebilmi¸s
+de˘gildir. Bu çalı¸smada, ba˘glamın çözümlenememi¸s ö˘gelerinden
+ir tanesine, ba˘glamın yapısının hiyerar¸sik olup olmadı˘gına
+odaklanılmaktadır. Yaptı˘gımız irdelemeye göre, ba˘glama ait
+muhtelif sosyal, uzamsal ve zamansal özellikler ve olgular,
+a˘glamın hiyerar¸sik bir yapıya sahip oldu˘gunu önermektedir. Bu
+konudaki sinirbilim, psikoloji bulguları ve bili¸simsel modelleme"
+60824ee635777b4ee30fcc2485ef1e103b8e7af9,Cascaded Collaborative Regression for Robust Facial Landmark Detection Trained Using a Mixture of Synthetic and Real Images With Dynamic Weighting,"Cascaded Collaborative Regression for Robust Facial
+Landmark Detection Trained using a Mixture of Synthetic and
+Real Images with Dynamic Weighting
+Zhen-Hua Feng, Student Member, IEEE, Guosheng Hu, Student Member, IEEE, Josef Kittler,
+Life Member, IEEE, William Christmas, and Xiao-Jun Wu"
+60c06e5884a672e0ba3bf1d3488307489583b7e5,Audiovisual speech perception and eye gaze behavior of adults with asperger syndrome.,"J Autism Dev Disord
+DOI 10.1007/s10803-011-1400-0
+O R I G I N A L P A P E R
+Audiovisual Speech Perception and Eye Gaze Behavior of Adults
+with Asperger Syndrome
+Satu Saalasti • Jari Ka¨tsyri • Kaisa Tiippana •
+Mari Laine-Hernandez • Lennart von Wendt •
+Mikko Sams
+Ó Springer Science+Business Media, LLC 2011"
+60c12b3a1bfd547f5a165c95774a1a17d18a5941,People recognition by mobile robots,"People Recognition by Mobile Robots
+Grzegorz Cielniak and Tom Duckett
+Centre for Applied Autonomous Sensor Systems
+Dept. of Technology, ¨Orebro University
+SE-70182 ¨Orebro, Sweden
+Phone: +46 19 30 11 13, +46 19 30 34 83
+Email:
+Telefax: +46 19 30 34 63"
+60bc358296ae11ac8f11286bba0a49ac7e797d26,Diverse Image-to-Image Translation via Disentangled Representations,"Diverse Image-to-Image Translation via
+Disentangled Representations
+Hsin-Ying Lee(cid:63)1, Hung-Yu Tseng(cid:63)1, Jia-Bin Huang2, Maneesh Singh3,
+Ming-Hsuan Yang1,4
+University of California, Merced 2Virginia Tech 3Verisk Analytics 4Google Cloud
+Photo to van Gogh
+Content
+Attribute Generated
+Winter to summer
+Photograph to portrait
+Input
+Output
+Input
+Output
+Fig. 1: Unpaired diverse image-to-image translation. (Lef t) Our model
+learns to perform diverse translation between two collections of images without
+ligned training pairs. (Right) Example-guided translation."
+60d75d32d345c519fa5c0d8d6b6eb62e633a8d13,Person reidentification by semisupervised dictionary rectification learning with retraining module,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+PersonreidentificationbysemisuperviseddictionaryrectificationlearningwithretrainingmoduleHongyuanWangZongyuanDingJiZhangSuolanLiuTongguangNiFuhuaChenHongyuanWang,ZongyuanDing,JiZhang,SuolanLiu,TongguangNi,FuhuaChen,“Personreidentificationbysemisuperviseddictionaryrectificationlearningwithretrainingmodule,”J.Electron.Imaging27(4),043043(2018),doi:10.1117/1.JEI.27.4.043043."
+60ffc8db53b02e95d852f5a06f97686486f72195,Video Matching Using DC-image and Local Features,"Video Matching Using DC-image and Local
+Features
+Saddam Bekhet, Amr Ahmed and Andrew Hunter"
+6084cac63fe6fcc1436610f1db4a3764ec2e3692,TST/BTD: An End-to-End Visual Recognition System,"TST/BTD: An End-to-End Visual Recognition System
+Taehee Lee
+Stefano Soatto
+Technical Report UCLA-CSD100008
+February 8, 2010, Revised March 18, 2010"
+60161c712a491764b6f227d72e9d01e956caa873,"Wrong Today, Right Tomorrow: Experience-Based Classification for Robot Perception","Wrong Today, Right Tomorrow:
+Experience-Based Classification for
+Robot Perception
+Jeffrey Hawke†, Corina Gur˘au†, Chi Hay Tong and Ingmar Posner"
+60cc2e8abc20c145727e7089c55bdba5722436d0,Higher Order Matching for Consistent Multiple Target Tracking,"Higher Order Matching for Consistent Multiple Target Tracking
+Chetan Arora
+Amir Globerson
+School of Computer Science and Engineering
+The Hebrew University
+http://www.cs.huji.ac.il/˜chetan/"
+604d7533bdcfb06f4ae217a2cd9fd2e1467192f8,Gender Recognition using Hog with Maximized Inter-Class Difference,
+60cdcf75e97e88638ec973f468598ae7f75c59b4,Face Annotation Using Transductive Kernel Fisher Discriminant,"Face Annotation Using Transductive
+Kernel Fisher Discriminant
+Jianke Zhu, Steven C.H. Hoi, and Michael R. Lyu"
+60a33bcfe4b40cf46772e6aa1ead10489e924847,Bayesian representation learning with oracle constraints,"When crowds hold privileges: Bayesian unsupervised
+representation learning with oracle constraints
+Theofanis Karaletsos
+Computational Biology Program, Sloan Kettering Institute
+275 York Avenue, New York, USA
+Serge Belongie
+Cornell Tech
+11 Eighth Avenue #302, New York, USA
+Gunnar R¨atsch
+Computational Biology Program, Sloan Kettering Institute
+275 York Avenue, New York, USA"
+60040e4eae81ab6974ce12f1c789e0c05be00303,Graphical Facial Expression Analysis and Design Method: An Approach to Determine Humanoid Skin Deformation,"Yonas Tadesse1,2
+e-mail:
+Shashank Priya
+e-mail:
+Center for Energy Harvesting
+Materials and Systems (CEHMS),
+Bio-Inspired Materials and
+Devices Laboratory (BMDL),
+Center for Intelligent Material
+Systems and Structure (CIMSS),
+Department of Mechanical Engineering,
+Virginia Tech,
+Blacksburg, VA 24061
+Graphical Facial Expression
+Analysis and Design Method:
+An Approach to Determine
+Humanoid Skin Deformation
+The architecture of human face is complex consisting of 268 voluntary muscles that perform
+oordinated action to create real-time facial expression. In order to replicate facial expres-
+sion on humanoid face by utilizing discrete actuators, the first and foremost step is the identi-"
+60b3601d70f5cdcfef9934b24bcb3cc4dde663e7,Binary Gradient Correlation Patterns for Robust Face Recognition,"SUBMITTED TO IEEE TRANS. ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Binary Gradient Correlation Patterns
+for Robust Face Recognition
+Weilin Huang, Student Member, IEEE, and Hujun Yin, Senior Member, IEEE"
+60bd1d33d74619f08baf0d7477b3f8cb8fc711e5,Amygdala Connectivity during Involuntary Attention to Emotional Faces in Typical Development and Autism Spectrum Disorders,"AMYGDALA CONNECTIVITY DURING INVOLUNTARY ATTENTION TO EMOTIONAL FACES
+IN TYPICAL DEVELOPMENT AND AUTISM SPECTRUM DISORDERS
+A Dissertation
+Submitted to the Faculty of the
+Graduate School of Arts and Sciences
+of Georgetown University
+in partial fulfillment of the requirement for the
+degree of
+Doctor of Philosophy
+in Psychology
+Eric R. Murphy, M.A.
+Washington, DC
+August 27th, 2013"
+60b66ec51ddadd132453f700d1781e8e7a8f78c8,Self-Validated Labeling of Markov Random Fields for Image Segmentation,"Self-Validated Labeling of Markov Random
+Fields for Image Segmentation
+Wei Feng, Jiaya Jia, Member, IEEE, and Zhi-Qiang Liu"
+60c7711bf9a00f697fff61474433da01f8550bf4,A Hybrid Approach of Facial Emotion Detection using Genetic Algorithm along with Artificial Neural Network,"A Hybrid Approach of Facial Emotion Detection using Genetic Algorithm along with Artificial Neural Network
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 175
+Number 4
+Year of Publication: 2017
+Authors:
+Amrendra Sharan, Sunil Kumar Chhillar
+10.5120/ijca2017915494
+{bibtex}2017915494.bib{/bibtex}"
+6047e9af00dcffbd2effbfa600735eb111f7de65,A Discriminative Representation of Convolutional Features for Indoor Scene Recognition,"A Discriminative Representation of Convolutional
+Features for Indoor Scene Recognition
+S. H. Khan, M. Hayat, M. Bennamoun, Member, IEEE, R. Togneri, and F. Sohel, Senior Member, IEEE"
+60bffecd79193d05742e5ab8550a5f89accd8488,Proposal Classification using sparse representation and applications to skin lesion diagnosis,"PhD Thesis Proposal
+Classification using sparse representation and applications to skin
+lesion diagnosis
+I. Description
+In only a few decades, sparse representation modeling has undergone a tremendous expansion with
+successful applications in many fields including signal and image processing, computer science,
+machine learning, statistics. Mathematically, it can be considered as the problem of finding the
+sparsest solution (the one with the fewest non-zeros entries) to an underdetermined linear system
+of equations [1]. Based on the observation for natural images (or images rich in textures) that small
+scale structures tend to repeat themselves in an image or in a group of similar images, a signal
+source can be sparsely represented over some well-chosen redundant basis (a dictionary). In other
+words, it can be approximately representable by a linear combination of a few elements (also called
+toms or basis vectors) of a redundant/over-complete dictionary.
+Such models have been proven successful in many tasks including denoising [2]-[5], compression
+[6],[7], super-resolution [8],[9], classification and pattern recognition [10]-[16]. In the context of
+lassification, the objective is to find the class to which a test signal belongs, given training data
+from multiple classes. Sparse representation has become a powerful technique in classification and
+pplications, including texture classification [16], face recognition [12], object detection [10], and
+segmentation of medical images [17], [18]. In conventional Sparse Representation Classification
+(SRC) schemes, learned dictionaries and sparse representation are involved to classify image pixels"
+60e065dbb795cc0d76ec187116eb87d1f42b5485,A General Framework for Density Based Time Series Clustering Exploiting a Novel Admissible Pruning Strategy,"IEEE TRANSACTIONS ON KNOWLEDGE AND DATA ENGINEERING, MANUSCRIPT ID
+A General Framework for Density Based
+Time Series Clustering Exploiting a Novel
+Admissible Pruning Strategy
+Nurjahan Begum1, Liudmila Ulanova1, Hoang Anh Dau1, Jun Wang2, and Eamonn Keogh1"
+601834a4150e9af028df90535ab61d812c45082c,A short review and primer on using video for psychophysiological observations in human-computer interaction applications,"A short review and primer on using video for
+psychophysiological observations in
+human-computer interaction applications
+Teppo Valtonen1
+Quantified Employee unit, Finnish Institute of Occupational Health,
+teppo. valtonen fi,
+POBox 40, 00250, Helsinki, Finland"
+60978f66eac568ae65d3acdc6559273fc30bc8c4,GReTA-A Novel Global and Recursive Tracking Algorithm in Three Dimensions,"GReTA – a novel Global and Recursive
+Tracking Algorithm in three dimensions
+Alessandro Attanasi, Andrea Cavagna, Lorenzo Del Castello, Irene Giardina, Asja Jeli´c,
+Stefania Melillo, Leonardo Parisi, Fabio Pellacini, Edward Shen, Edmondo Silvestri, Massimiliano Viale"
+346dbc7484a1d930e7cc44276c29d134ad76dc3f,Artists portray human faces with the Fourier statistics of complex natural scenes.,"This article was downloaded by:[University of Toronto]
+On: 21 November 2007
+Access Details: [subscription number 785020433]
+Publisher: Informa Healthcare
+Informa Ltd Registered in England and Wales Registered Number: 1072954
+Registered office: Mortimer House, 37-41 Mortimer Street, London W1T 3JH, UK
+Systems
+Publication details, including instructions for authors and subscription information:
+http://www.informaworld.com/smpp/title~content=t713663148
+Artists portray human faces with the Fourier statistics of
+omplex natural scenes
+Christoph Redies a; Jan Hänisch b; Marko Blickhan a; Joachim Denzler b
+Institute of Anatomy I, School of Medicine, Friedrich Schiller University, Germany
+Department of Computer Science, Friedrich Schiller University, D-07740 Jena,
+Germany
+First Published on: 28 August 2007
+To cite this Article: Redies, Christoph, Hänisch, Jan, Blickhan, Marko and Denzler,
+Joachim (2007) 'Artists portray human faces with the Fourier statistics of complex
+To link to this article: DOI: 10.1080/09548980701574496
+URL: http://dx.doi.org/10.1080/09548980701574496"
+34b124ecdc3471167cea1675a74a0232a881bc69,Infrared face recognition based on LBP co-occurrence matrix,"Int. J. Wireless and Mobile Computing, Vol. 8, No. 1, 2015
+Infrared face recognition based on LBP
+o-occurrence matrix and partial least squares
+Zhihua Xie and Guodong Liu*
+Key Lab of Optic-Electronic and Communication,
+Jiangxi Sciences and Technology Normal University,
+Nanchang, China
+Email:
+Email:
+*Corresponding author"
+343d21ae54b45ef219ac4ba024265eeabf4d6edd,Where Will They Go? Predicting Fine-Grained Adversarial Multi-agent Motion Using Conditional Variational Autoencoders,"Where Will They Go? Predicting Fine-Grained
+Adversarial Multi-Agent Motion using
+Conditional Variational Autoencoders
+Panna Felsen1,2, Patrick Lucey2, and Sujoy Ganguly2
+BAIR, UC Berkeley
+STATS
+{plucey,"
+34d53d2a418051c56cad9e0c90ea793af6cbb729,Structured Multi-class Feature Selection for Effective Face Recognition,"Structured multi-class feature selection for
+effective face recognition
+Giovanni Fusco, Luca Zini, Nicoletta Noceti, and Francesca Odone
+DIBRIS - Universit`a di Genova
+via Dodecaneso, 35
+6146-IT, Italy"
+34c7254d2f420df6309260b2bb461a9c107dfd5a,Semi-supervised image classification based on a multi-feature image query language,"University of Huddersfield Repository
+Pein, Raoul Pascal
+Semi-Supervised Image Classification based on a Multi-Feature Image Query Language
+Original Citation
+Pein, Raoul Pascal (2010) Semi-Supervised Image Classification based on a Multi-Feature Image
+Query Language. Doctoral thesis, University of Huddersfield.
+This version is available at http://eprints.hud.ac.uk/9244/
+The University Repository is a digital collection of the research output of the
+University, available on Open Access. Copyright and Moral Rights for the items
+on this site are retained by the individual author and/or other copyright owners.
+Users may access full items free of charge; copies of full text items generally
+an be reproduced, displayed or performed and given to third parties in any
+format or medium for personal research or study, educational or not-for-profit
+purposes without prior permission or charge, provided:
+• The authors, title and full bibliographic details is credited in any copy;
+• A hyperlink and/or URL is included for the original metadata page; and
+• The content is not changed in any way.
+For more information, including our policy and submission procedure, please
+ontact the Repository Team at:
+http://eprints.hud.ac.uk/"
+34b3b14b4b7bfd149a0bd63749f416e1f2fc0c4c,The AXES submissions at TrecVid 2013,"The AXES submissions at TrecVid 2013
+Robin Aly1, Relja Arandjelovi´c3, Ken Chatfield3, Matthijs Douze6, Basura Fernando4, Zaid Harchaoui6,
+Kevin McGuinness2, Noel E. O’Conner2, Dan Oneata6, Omkar M. Parkhi3, Danila Potapov6, Jérôme Revaud6,
+Cordelia Schmid6, Jochen Schwenninger5, David Scott2, Tinne Tuytelaars4, Jakob Verbeek6, Heng Wang6,
+Andrew Zisserman3
+University of Twente 2Dublin City University 3Oxford University
+KU Leuven 5Fraunhofer Sankt Augustin 6INRIA Grenoble"
+34cd99528d873e842083abec429457233fdb3226,Person Re-identification using group context,"Person Re-identification using group context
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla
+Baskurt
+To cite this version:
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla Baskurt. Person Re-
+identification using group context. Advanced Concepts for Intelligent Vision systems, Sep 2018,
+Poitiers, France. <hal-01895373>
+HAL Id: hal-01895373
+https://hal.archives-ouvertes.fr/hal-01895373
+Submitted on 15 Oct 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+344f647463ef160956143ebc8ce370cca144961a,Confidence-Aware Probability Hypothesis Density Filter for Visual Multi-Object Tracking,
+3413af6c689eedb4fe3e7d6c5dc626647976307a,Horizontally Scalable Submodular Maximization,"Horizontally Scalable Submodular Maximization
+Mario Lucic1
+Olivier Bachem1
+Morteza Zadimoghaddam2
+Andreas Krause1
+Department of Computer Science, ETH Zurich, Switzerland
+Google Research, New York"
+34d484b47af705e303fc6987413dc0180f5f04a9,RI:Medium: Unsupervised and Weakly-Supervised Discovery of Facial Events,"RI:Medium: Unsupervised and Weakly-Supervised
+Discovery of Facial Events
+Introduction
+The face is one of the most powerful channels of nonverbal communication. Facial expression has been a
+focus of emotion research for over a hundred years [11]. It is central to several leading theories of emotion
+[16, 28, 44] and has been the focus of at times heated debate about issues in emotion science [17, 23, 40].
+Facial expression figures prominently in research on almost every aspect of emotion, including psychophys-
+iology [30], neural correlates [18], development [31], perception [4], addiction [24], social processes [26],
+depression [39] and other emotion disorders [46], to name a few. In general, facial expression provides cues
+bout emotional response, regulates interpersonal behavior, and communicates aspects of psychopathology.
+While people have believed for centuries that facial expressions can reveal what people are thinking and
+feeling, it is relatively recently that the face has been studied scientifically for what it can tell us about
+internal states, social behavior, and psychopathology.
+Faces possess their own language. Beginning with Darwin and his contemporaries, extensive efforts
+have been made to manually describe this language. A leading approach, the Facial Action Coding System
+(FACS) [19] , segments the visible effects of facial muscle activation into ”action units.” Because of its
+descriptive power, FACS has become the state of the art in manual measurement of facial expression and is
+widely used in studies of spontaneous facial behavior. The FACS taxonomy was develop by manually ob-
+serving graylevel variation between expressions in images and to a lesser extent by recording the electrical
+ctivity of underlying facial muscles [9]. Because of its importance to human social dynamics, person per-"
+3402b5e354eebcf443789f3c8d3c97eccd3ae55e,Multimodal Machine Learning: A Survey and Taxonomy,"Multimodal Machine Learning:
+A Survey and Taxonomy
+Tadas Baltruˇsaitis, Chaitanya Ahuja, and Louis-Philippe Morency"
+341002fac5ae6c193b78018a164d3c7295a495e4,von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification,"von Mises-Fisher Mixture Model-based Deep
+learning: Application to Face Verification
+Md. Abul Hasnat, Julien Bohn´e, Jonathan Milgram, St´ephane Gentric and Liming Chen"
+34ae449ae64cd2c6bfc2f102eac82bd606cd12f7,A Unified Model with Structured Output for Fashion Images Classification,"A Unified Model with Structured Output for Fashion Images
+Classification
+Beatriz Quintino Ferreira
+ISR, Instituto Superior Técnico, Universidade de Lisboa,
+Portugal
+João Faria
+Farfetch"
+34ec83c8ff214128e7a4a4763059eebac59268a6,Action Anticipation By Predicting Future Dynamic Images,"Action Anticipation By Predicting Future
+Dynamic Images
+Cristian Rodriguez, Basura Fernando and Hongdong Li
+Australian Centre for Robotic Vision, ANU, Canberra, Australia
+{cristian.rodriguez, basura.fernando,"
+34128e93f4af820cea65477526645cdc82e0e59b,Decomposed Learning for Joint Object Segmentation and Categorization,"TSAI et al.: DECOMPOSED LEARNING FOR OBJECT RECOGNITION
+Decomposed Learning for Joint Object
+Segmentation and Categorization
+Yi-Hsuan Tsai
+Jimei Yang
+Ming-Hsuan Yang
+Electrical Engineering and Computer
+Science
+University of California
+Merced, USA"
+34c594abba9bb7e5813cfae830e2c4db78cf138c,Transport-based single frame super resolution of very low resolution face images,"Transport-Based Single Frame Super Resolution of Very Low Resolution Face Images
+Soheil Kolouri1, Gustavo K. Rohde1,2
+Department of Biomedical Engineering, Carnegie Mellon University. 2Department of Electrical and Computer Engineering, Carnegie Mellon University.
+We describe a single-frame super-resolution method for reconstructing high-
+resolution (abbr. high-res) faces from very low-resolution (abbr. low-res)
+face images (e.g. smaller than 16× 16 pixels) by learning a nonlinear La-
+grangian model for the high-res face images. Our technique is based on the
+mathematics of optimal transport, and hence we denote it as transport-based
+SFSR (TB-SFSR). In the training phase, a nonlinear model of high-res fa-
+ial images is constructed based on transport maps that morph a reference
+image into the training face images. In the testing phase, the resolution of
+degraded image is enhanced by finding the model parameters that best fit
+the given low resolution data.
+Generally speaking, most SFSR methods [2, 3, 4, 5] are based on a
+linear model for the high-res images. Hence, ultimately, the majority of
+SFSR models in the literature can be written as, Ih(x) = ∑i wiψi(x), where
+Ih is a high-res image or a high-res image patch, w’s are weight coefficients,
+nd ψ’s are high-res images (or image patches), which are learned from the
+training images using a specific model. Here we propose a fundamentally
+different approach toward modeling high-res images. In our approach the"
+3412d9f3c620155bf3eb203f5817a310000f0c63,Biomarkers in autism spectrum disorder: the old and the new,"DOI 10.1007/s00213-013-3290-7
+REVIEW
+Biomarkers in autism spectrum disorder: the old and the new
+Barbara Ruggeri & Ugis Sarkans & Gunter Schumann &
+Antonio M. Persico
+Received: 15 April 2013 /Accepted: 7 September 2013
+# Springer-Verlag Berlin Heidelberg 2013"
+3490683560ca18d19884949dccca0ad7c98d4749,Content-Based Filtering for Video Sharing Social Networks,"Content-Based Filtering for Video Sharing Social Networks
+Eduardo Valle1, Sandra Avila2, Fillipe de Souza2,
+Marcelo Coelho2,3, Arnaldo de A. Araújo2
+RECOD Lab — DCA / FEEC / UNICAMP, Campinas, SP, Brazil
+NPDI Lab — DCC / UFMG, Belo Horizonte, MG, Brazil
+Preparatory School of Air Cadets — EPCAR, Barbacena, MG, Brazil
+{sandra, fdms, mcoelho,"
+340798e6b7a9863005863f38c1bbfda5cf85d201,"Image Retrieval, Object Recognition, and Discriminative Models","Image Retrieval, Object Recognition,
+nd Discriminative Models
+Von der Fakult¨at f¨ur Mathematik, Informatik und Naturwissenschaften der
+RWTH Aachen University zur Erlangung des akademischen Grades eines
+Doktors der Naturwissenschaften genehmigte Dissertation
+vorgelegt von
+Diplom-Informatiker Thomas Deselaers
+us Aachen
+Berichter:
+Universit¨atsprofessor Dr.-Ing. Hermann Ney
+Universit¨atsprofessor Dr. Bernt Schiele
+Tag der m¨undlichen Pr¨ufung: 2. Dezember 2008
+Diese Dissertation ist auf den Internetseiten der Hochschulbibliothek online verf¨ugbar."
+348035720dba98ff54f2ff8c375ace09287c89f6,3D Human Pose Estimation in RGBD Images for Robotic Task Learning,"D Human Pose Estimation in RGBD Images for Robotic Task Learning
+Christian Zimmermann*, Tim Welschehold*, Christian Dornhege, Wolfram Burgard and Thomas Brox"
+341ed69a6e5d7a89ff897c72c1456f50cfb23c96,"DAGER: Deep Age, Gender and Emotion Recognition Using Convolutional Neural Network","DAGER: Deep Age, Gender and Emotion
+Recognition Using Convolutional Neural
+Networks
+Afshin Dehghan
+Enrique G. Ortiz
+Guang Shu
+Syed Zain Masood
+{afshindehghan, egortiz, guangshu,
+Computer Vision Lab, Sighthound Inc., Winter Park, FL"
+3493b2232449635aff50fc17e03163cb4b66f1b5,Visual exploration of machine learning results using data cube analysis,"Visual Exploration of Machine Learning Results
+using Data Cube Analysis
+Minsuk Kahng
+Georgia Tech
+Atlanta, GA, USA
+Dezhi Fang
+Georgia Tech
+Atlanta, GA, USA
+Duen Horng (Polo) Chau
+Georgia Tech
+Atlanta, GA, USA"
+341de07abfb89bf78f3a72513c8bce40d654e0a3,Sparse and Deep Generalizations of the FRAME Model,"Annals of Mathematical Sciences and Applications
+Volume 3, Number 1, 211–254, 2018
+Sparse and deep generalizations of the
+FRAME model
+Ying Nian Wu, Jianwen Xie, Yang Lu, and Song-Chun Zhu
+In the pattern theoretical framework developed by Grenander and
+dvocated by Mumford for computer vision and pattern recog-
+nition, different patterns are represented by statistical generative
+models. The FRAME (Filters, Random fields, And Maximum En-
+tropy) model is such a generative model for texture patterns. It
+is a Markov random field model (or a Gibbs distribution, or an
+energy-based model) of stationary spatial processes. The log prob-
+bility density function of the model (or the energy function of the
+Gibbs distribution) is the sum of translation-invariant potential
+functions that are one-dimensional non-linear transformations of
+linear filter responses. In this paper, we review two generalizations
+of this model. One is a sparse FRAME model for non-stationary
+patterns such as objects, where the potential functions are loca-
+tion specific, and they are non-zero only at a selected collection of
+locations. The other generalization is a deep FRAME model where"
+341633ccce0f8c055dfc633765d905c269e28f82,Collaborative Representation for Face Recognition based on Bilateral Filtering,"Collaborative Representation for Face
+Recognition based on Bilateral Filtering
+Rokan Khaji1, Hong Li2, Ramadan Abdo Musleh3, Hongfeng Li4, Qabas Ali5
+School of Mathematics and Statistics,
+Huazhong University of Science & Technology , Wuhan, 430074, China
+Department of Mathematics, College of Science, Diyala University, Diyala, 32001 ,Iraq
+,3,4School of Mathematics and Statistics,
+Huazhong University of Science & Technology , Wuhan, 430074, China
+5Department of Electronics and Information Engineering,
+Huazhong University of Science & Technology , Wuhan, 430074, China."
+34b6466e3e69547f6d464ad6b5660b1e629a5c35,Similar and Class Based Image Retrieval Using Hash Code,"IJCSNS International Journal of Computer Science and Network Security, VOL.15 No.3, March 2015
+Similar and Class Based Image Retrieval Using Hash Code
+B.Bharathi 1, Nagarjuna Reddy Akkim2
+Faculty of computing, Sathyabama University, Chennai, India
+Introduction"
+34f8086eb67eb2cd332cd2d6bca0dd8f1e8f1062,Face Recognition and Growth Prediction using a 3D Morphable Face Model,"Saarland University
+Faculty of Natural Sciences and Technology I
+Department of Computer Science
+Master’s Program in Computer Science
+Master’s Thesis
+Face Recognition and
+Growth Prediction using
+3D Morphable Face Model
+submitted by Kristina Scherbaum
+on October 30, 2007
+Supervisor
+Prof. Dr. Hans-Peter Seidel
+Saarland University – Computer Science Department
+Advisor
+Prof. Dr. Volker Blanz
+Universit¨at Siegen – Dekanat FB 12
+Reviewers
+Prof. Dr. Hans-Peter Seidel
+Prof. Dr. Volker Blanz"
+34e23b934794a5abff251698df09cbac5ad2dd56,Towards Engineering a Web-Scale Multimedia Service: A Case Study Using Spark,"Towards Engineering a Web-Scale Multimedia Service:
+A Case Study Using Spark∗
+Gylfi Þór Guðmundsson
+Reykjavik University
+Reykjavík, Iceland
+Björn Þór Jónsson
+Reykjavik University, Iceland
+IT University of Copenhagen, Denmark
+Laurent Amsaleg
+IRISA–CNRS
+Rennes, France
+Michael J. Franklin
+University of Chicago
+Chicago, IL, USA"
+3423f3dcb0edee1c5c6a5505b9e8c0bbdcffbd51,Nurses' Reactions to Patient Weight: Effects on Clinical Decisions,"University of Wisconsin Milwaukee
+UWM Digital Commons
+Theses and Dissertations
+May 2017
+Nurses' Reactions to Patient Weight: Effects on
+Clinical Decisions
+Heidi M. Pfeiffer
+University of Wisconsin-Milwaukee
+Follow this and additional works at: http://dc.uwm.edu/etd
+Part of the Psychology Commons
+Recommended Citation
+Pfeiffer, Heidi M., ""Nurses' Reactions to Patient Weight: Effects on Clinical Decisions"" (2017). Theses and Dissertations. 1524.
+http://dc.uwm.edu/etd/1524
+This Dissertation is brought to you for free and open access by UWM Digital Commons. It has been accepted for inclusion in Theses and Dissertations
+y an authorized administrator of UWM Digital Commons. For more information, please contact"
+344682f69dd9bec68d89a79b0b7f28a3891ab857,Perception of Social Cues of Danger in Autism Spectrum Disorders,"Perception of Social Cues of Danger in Autism Spectrum
+Disorders
+Nicole R. Zu¨ rcher1,2, Ophe´ lie Rogier1, Jasmine Boshyan2, Loyse Hippolyte1, Britt Russo1, Nanna Gillberg3,
+Adam Helles3, Torsten Ruest1, Eric Lemonnier4, Christopher Gillberg3, Nouchine Hadjikhani1,2,3*
+Brain Mind Institute, EPFL, Lausanne, Switzerland, 2 Athinoula A. Martinos Center for Biomedical Imaging, Harvard Medical School, Massachusetts General Hospital,
+Charlestown, Massachusetts, United States of America, 3 Gillberg Centrum, University of Gothenburg, Gothenburg, Sweden, 4 Laboratoire de Neurosciences, Universite´ de
+Brest, Brest, France"
+340d1a9852747b03061e5358a8d12055136599b0,Audio-Visual Recognition System Insusceptible to Illumination Variation over Internet Protocol _ICIE_28_,"Audio-Visual Recognition System Insusceptible
+to Illumination Variation over Internet Protocol
+Yee Wan Wong, Kah Phooi Seng, and Li-Minn Ang"
+3468740e4a9fc72a269f4f0ca8470ccd60925f92,Robustness Analysis of Visual QA Models by Basic Questions,"Robustness Analysis of Visual QA Models by Basic Questions
+Jia-Hong Huang
+Bernard Ghanem
+Cuong Duc Dao* Modar Alfadly*
+C. Huck Yang
+King Abdullah University of Science and Technology ; Georgia Institute of Technology
+{jiahong.huang, dao.cuong, modar.alfadly, ;"
+34b4f264578fc674dd2bf8d478ec1314739a5629,3D Novel Face Sample Modeling for Face Recognition,"D Novel Face Sample Modeling for Face
+Recognition
+Yun Ge, Yanfeng Sun, Baocai Yin, Hengliang Tang
+Beijing Key Laboratory of Multimedia and Intelligent Software Technology
+College of Computer Science and Technology, BJUT, Beijing, China
+Email:"
+34df09a9445089c8f23eff5b2a43a822c9713f6e,Boosting Chamfer Matching by Learning Chamfer Distance Normalization,"Boosting Chamfer Matching by Learning
+Chamfer Distance Normalization
+Tianyang Ma, Xingwei Yang, and Longin Jan Latecki
+Dept. of Computer and Information Sciences,Temple Unviersity, Philadelphia.
+{tianyang.ma,xingwei,latecki}.temple.edu"
+3410136b86b813b075a258842450835906d58600,A facial expression image database and norm for Asian population: A preliminary report,"Image Quality and System Performance VI, edited by Susan P. Farnand, Frans Gaykema,
+Proc. of SPIE-IS&T Electronic Imaging, SPIE Vol. 7242, 72421D · © 2009 SPIE-IS&T
+CCC code: 0277-786X/09/$18 · doi: 10.1117/12.806130
+SPIE-IS&T/ Vol. 7242 72421D-1
+Downloaded from SPIE Digital Library on 07 Oct 2009 to 140.112.113.225. Terms of Use: http://spiedl.org/terms"
+5a9126f4478384f6615bf57b6da7299dc17b9a6b,3-D Facial Landmark Localization With Asymmetry Patterns and Shape Regression from Incomplete Local Features,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2012
+D Facial Landmark Localization with Asymmetry
+Patterns and Shape Regression from Incomplete
+Local Features
+Federico M. Sukno, John L. Waddington, and Paul F. Whelan"
+5a3da29970d0c3c75ef4cb372b336fc8b10381d7,CNN-based Real-time Dense Face Reconstruction with Inverse-rendered Photo-realistic Face Images.,"CNN-based Real-time Dense Face Reconstruction
+with Inverse-rendered Photo-realistic Face Images
+Yudong Guo, Juyong Zhang†, Jianfei Cai, Boyi Jiang and Jianmin Zheng"
+5a93f9084e59cb9730a498ff602a8c8703e5d8a5,Face Recognition using Local Quantized Patterns,"HUSSAIN ET. AL: FACE RECOGNITION USING LOCAL QUANTIZED PATTERNS
+Face Recognition using Local Quantized
+Patterns
+Sibt ul Hussain
+Thibault Napoléon
+Fréderic Jurie
+GREYC — CNRS UMR 6072,
+University of Caen Basse-Normandie,
+Caen, France"
+5ad65c6474c135a6c15e7127d8bb91de8c8a55a1,Designing Empathetic Animated Agents for a B-Learning Training Environment within the Electrical Domain,"Hernández, Y., Pérez-Ramírez, M., Zatarain-Cabada, R., Barrón-Estrada, L., & Alor-Hernández, G. (2016). Designing
+Empathetic Animated Agents for a B-Learning Training Environment within the Electrical Domain. Educational Technology &
+Society, 19 (2), 116–131.
+Designing Empathetic Animated Agents for a B-Learning Training
+Environment within the Electrical Domain
+Yasmín Hernández1*, Miguel Pérez-Ramírez1, Ramón Zatarain-Cabada2, Lucía Barrón-
+Estrada2 and Giner Alor-Hernández3
+Instituto de Investigaciones Eléctricas, Gerencia de Tecnologías de la Información, Cuernavaca, México // 2Instituto
+Tecnológico de Culiacán, Departamento de Posgrado, Culiacán, México // 3Instituto Tecnológico de Orizaba,
+División de Estudios de Posgrado e Investigación, Orizaba, México // // //
+// //
+*Corresponding author"
+5a14209a5241877f92743d04282598f41fd3e50f,From BoW to CNN: Two Decades of Texture Representation for Texture Classification,"From BoW to CNN: Two Decades of Texture Representation for Texture
+Classification
+Li Liu 1,2 · Jie Chen 2 · Paul Fieguth 3 ·
+Guoying Zhao 2 · Rama Chellappa 4 · Matti Pietik¨ainen 2
+Received: date / Accepted: date"
+5afd6c5eb5cc1e8496bb78b8f7b3a00b2900deb3,Self-Supervised Learning of Pose Embeddings from Spatiotemporal Relations in Videos,"Self-supervised Learning of Pose Embeddings
+from Spatiotemporal Relations in Videos
+¨Omer S¨umer∗
+Tobias Dencker∗
+Bj¨orn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany"
+5ac18d505ed6d10e8692cbb7d33f6852e6782692,"The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale","IJCV submission in review
+The Open Images Dataset V4
+Unified image classification, object detection, and visual relationship detection at scale
+Alina Kuznetsova Hassan Rom Neil Alldrin
+Shahab Kamali
+Stefan Popov Matteo Malloci Tom Duerig Vittorio Ferrari
+Jasper Uijlings
+Ivan Krasin
+Jordi Pont-Tuset"
+5ad4e9f947c1653c247d418f05dad758a3f9277b,WLFDB: Weakly Labeled Face Databases,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE (TPAMI)
+WLFDB: Weakly Labeled Face Databases
+Dayong Wang†, Steven C.H. Hoi∗, and Jianke Zhu‡"
+5ac707ab88c565b1ed34fac89939f0cd2451eb22,Automated Object Recognition in Baggage Screening using Multiple X-ray Views,"Automated Object Recognition in Baggage Screening
+using Multiple X-ray Views
+Domingo Mery and Vladimir Riffo
+Department of Computer Science – Pontificia Universidad Cat´olica de Chile
+Av. Vicu˜na Mackenna 4860(143) – Santiago de Chile
+http://dmery.ing.puc.cl"
+5aeaee0e3a324970c02ae8463e1b358597457d03,Towards a Types-As-Classifiers Approach to Dialogue Processing in Human-Robot Interaction,"Towards a Types-As-Classifiers Approach to Dialogue Processing in
+Human-Robot Interaction
+HOUGH, J; JAMONE, L; Schlangen, D; Walck, G; Haschke, R; Workshop on Dialogue and
+Perception (DaP 2018)
+© The Author(s) 2018
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/45947
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+5a34a9bb264a2594c02b5f46b038aa1ec3389072,Label-Embedding for Image Classification,"Label-Embedding for Image Classification
+Zeynep Akata, Member, IEEE, Florent Perronnin, Member, IEEE, Zaid Harchaoui, Member, IEEE,
+nd Cordelia Schmid, Fellow, IEEE"
+5af5802cc6128bafbde1ae12e0ab41612aee9e3b,An object tracking method using extreme learning machine with online learning,"An Object Tracking Method Using Extreme
+Learning Machine with Online Learning
+Yuanlong Yu, Liyan Xie, and Zhiyong Huang
+College of Mathematics and Computer Science
+Fuzhou University
+Fuzhou, Fujian, 350116, China
+Emails: hzy"
+5ade87a54c8baec555c37d59071c6fb4a9a55cf7,Deep Learning For Video Saliency Detection,"Deep Learning For Video Saliency Detection
+Wenguan Wang, and Jianbing Shen, Senior Member, IEEE, and Ling Shao, Senior Member,"
+5a6b2f3a542322be153fc9104f3064f2a1bc76eb,"A French-Spanish Multimodal Speech Communication Corpus Incorporating Acoustic Data, Facial, Hands and Arms Gestures Information","Interspeech 2018
+-6 September 2018, Hyderabad
+0.21437/Interspeech.2018-2212"
+5a0209515ab62e008efeca31f80fa0a97031cd9d,Dataset fingerprints: Exploring image collections through data mining,"Dataset Fingerprints: Exploring Image Collections Through Data Mining
+Konstantinos Rematas1, Basura Fernando1, Frank Dellaert2, and Tinne Tuytelaars1
+KU Leuven, ESAT-PSI, iMinds
+Georgia Tech
+Figure 1: Given an image collection, our system extracts patterns of discriminative mid level features and uses the connection
+etween them to enable structure specific browsing."
+5a1669abdc4f958c589843cff2f4d83a11fe8007,Robust Recognition via ` 1-Minimization April,"Robust Recognition via ‘1-Minimization
+April 13, 2007"
+5a8d20ecd92d22bf077208a5e7b1bb008a9b7dbc,A new manifold distance measure for visual object categorization,"A New Manifold Distance Measure for Visual Object
+Categorization
+Fengfu Li, Xiayuan Huang, Hong Qiao and Bo Zhang
+index. The proposed distance is more robust"
+5aaa84090c50da903ea1d61495c0fe96a5470909,Image-embodied Knowledge Representation Learning,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+of armourhas partFigure1:Examplesofentityimages.Fig.1demonstratessomeexamplesofentityimages.Eachentityhasmultipleimageswhichcanprovidesignificantvisu-alinformationthatintuitivelydescribestheappearancesandbehavioursofthisentity.Toutilizetherichinformationinimages,weproposetheImage-embodiedKnowledgeRepre-sentationLearningmodel(IKRL).Morespecifically,wefirstproposeanimageencoderwhichconsistsofaneuralrep-resentationmoduleandaprojectionmoduletogeneratetheimage-basedrepresentationforeachimageinstance.Second,weconstructtheaggregatedimage-basedrepresentationforeachentityjointlyconsideringallitsimageinstanceswithanattention-basedmethod.Finally,wejointlylearntheknowl-edgerepresentationswithtranslation-basedmethods.WeevaluatetheIKRLmodelonknowledgegraphcom-pletionandtripleclassification.Experimentalresultsdemon-stratethatourmodelachievesthestate-of-the-artperfor-mancesonbothtasks,whichconfirmsthesignificanceofvi-sualinformationinknowledgerepresentationlearning.ItalsoindicatesthatourIKRLmodeliscapableofencodingimageinformationwellintoknowledgerepresentations.Wedemon-stratethemaincontributionsofthisworkasfollows:(cid:15)WeproposeanovelIKRLmodelconsideringvisualin-formationinentityimagesforknowledgerepresentationlearning.Tothebestofourknowledge,thisisthefirstattempttocombineimageswithknowledgegraphsforknowledgerepresentationlearning.(cid:15)Weevaluateourmodelsonareal-worlddatasetandre-ceivepromisingperformancesonbothknowledgegraph"
+5af1e8a38b64c6694b9a34cd0b1596f2c905d3ff,Context-based trajectory descriptor for human activity profiling,"Context-based Trajectory Descriptor for Human
+Activity Profiling
+Eduardo M. Pereira
+INESC TEC and
+Faculty of Engineering
+of the University of Porto
+Rua Dr. Roberto Frias, 378
+Porto, Portugal 4200 - 465
+Email:
+Lucian Ciobanu
+INESC TEC
+Rua Dr. Roberto Frias, 378
+Porto, Portugal 4200 - 465
+Email:
+Jaime S. Cardoso
+INESC TEC and
+Faculty of Engineering
+of the University of Porto
+Rua Dr. Roberto Frias, 378
+Porto, Portugal 4200 - 465"
+5a4c6246758c522f68e75491eb65eafda375b701,Contourlet structural similarity for facial expression recognition,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+5aad5e7390211267f3511ffa75c69febe3b84cc7,Driver Gaze Region Estimation Without Using Eye Movement,"Driver Gaze Estimation
+Without Using Eye Movement
+Lex Fridman, Philipp Langhans, Joonbum Lee, Bryan Reimer
+MIT AgeLab"
+5a86842ab586de9d62d5badb2ad8f4f01eada885,Facial Emotion Recognition and Classification Using Hybridization Method,"International Journal of Engineering Research and General Science Volume 3, Issue 3, May-June, 2015
+ISSN 2091-2730
+Facial Emotion Recognition and Classification Using Hybridization
+Method
+Anchal Garg , Dr. Rohit Bajaj
+Deptt. of CSE, Chandigarh Engg. College, Mohali, Punjab, India.
+07696449500"
+5ad88a16e2efe9bb67c20cdbd9b003ffb79c12ef,Real-time video event detection in crowded scenes using MPEG derived features: A multiple instance learning approach,"Manuscript Draft
+Manuscript Number: PRLETTERS-D-13-00222R2
+Title: Real-Time Video Event Detection in Crowded Scenes using MPEG Derived Features: a Multiple
+Instance Learning Approach
+Article Type: Special Issue: SIPRCA
+Keywords: Event Detection; Crowded Scene; Multiple Instance Learning;
+MPEG domain; Sparse Approximation; Random Matrix; Traffic
+Surveillance; Naive Bayes Model
+Corresponding Author: Mr. Jingxin Xu, M.D
+Corresponding Author's Institution: Queensland University of Technology
+First Author: Jingxin Xu, M.D
+Order of Authors: Jingxin Xu, M.D; Simon Denman, PhD; Vikas Reddy, PhD; Clinton Fookes, PhD;
+Sridha Sridhran, PhD"
+5ac8edd62fe23911e19d639287135f91e22421cc,Gender and 3D facial symmetry: What's the relationship?,"Gender and 3D Facial Symmetry: What’s the
+Relationship?
+Baiqiang Xia, Boulbaba Ben Amor, Hassen Drira, Mohamed Daoudi,
+Lahoucine Ballihi
+To cite this version:
+Baiqiang Xia, Boulbaba Ben Amor, Hassen Drira, Mohamed Daoudi, Lahoucine Ballihi. Gender
+nd 3D Facial Symmetry: What’s the Relationship?. 10th IEEE Conference on Automatic Face and
+Gesture Recognition (FG 2013), Apr 2013, shanghai, China. 2013. <hal-00771988>
+HAL Id: hal-00771988
+https://hal.archives-ouvertes.fr/hal-00771988
+Submitted on 9 Jan 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+5a4ec5c79f3699ba037a5f06d8ad309fb4ee682c,Automatic age and gender classification using supervised appearance model,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+AutomaticageandgenderclassificationusingsupervisedappearancemodelAliMainaBukarHassanUgailDavidConnahAliMainaBukar,HassanUgail,DavidConnah,“Automaticageandgenderclassificationusingsupervisedappearancemodel,”J.Electron.Imaging25(6),061605(2016),doi:10.1117/1.JEI.25.6.061605."
+5aed0f26549c6e64c5199048c4fd5fdb3c5e69d6,Human Expression Recognition using Facial Features,"International Journal of Computer Applications® (IJCA) (0975 – 8887)
+International Conference on Knowledge Collaboration in Engineering, ICKCE-2014
+Human Expression Recognition using Facial Features
+G.Saranya
+Post graduate student, Dept. of ECE
+Parisutham Institute of Technology & Science
+Thanjavur.
+Affiliated to Anna university, Chennai
+recognition can be used"
+5a7520380d9960ff3b4f5f0fe526a00f63791e99,The Indian Spontaneous Expression Database for Emotion Recognition,"The Indian Spontaneous Expression
+Database for Emotion Recognition
+S L Happy, Student Member, IEEE, Priyadarshi Patnaik, Aurobinda Routray, Member, IEEE,
+nd Rajlakshmi Guha"
+5f4a873118e033e5e168ee99d64474b4cc4d94a3,Lessons Learned from Crime Caught on Camera,"Article
+Lessons Learned
+from Crime Caught
+on Camera
+Marie Rosenkrantz Lindegaard1,2
+nd Wim Bernasco1,3
+Journal of Research in Crime and
+Delinquency
+018, Vol. 55(1) 155-186
+ª The Author(s) 2018
+Reprints and permission:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0022427817727830
+journals.sagepub.com/home/jrc"
+5fff61302adc65d554d5db3722b8a604e62a8377,Additive Margin Softmax for Face Verification,"Additive Margin Softmax for Face Verification
+Feng Wang
+UESTC
+Weiyang Liu
+Georgia Tech
+Haijun Liu
+UESTC
+Jian Cheng
+UESTC
+haijun"
+5f943f9bfe3154fbd368034903ea11620d2946eb,Cascade Category-Aware Visual Search,"MiniManuscript.com
+The one stop shop for academic literature.
+07:00am 8 Dec, 2018
+Cascade Category-Aware Visual Search.
+Authors Zhang S, Tian Q, Huang Q, Gao W, Rui Y
+Volume
+Issue
+Pages"
+5fa6e4a23da0b39e4b35ac73a15d55cee8608736,RED-Net: A Recurrent Encoder–Decoder Network for Video-Based Face Alignment,"IJCV special issue (Best papers of ECCV 2016) manuscript No.
+(will be inserted by the editor)
+RED-Net:
+A Recurrent Encoder-Decoder Network for Video-based Face Alignment
+Xi Peng · Rogerio S. Feris · Xiaoyu Wang · Dimitris N. Metaxas
+Submitted: April 19 2017 / Revised: December 12 2017"
+5f871838710a6b408cf647aacb3b198983719c31,Locally Linear Regression for Pose-Invariant Face Recognition,"Locally Linear Regression for Pose-Invariant
+Face Recognition
+Xiujuan Chai, Shiguang Shan, Member, IEEE, Xilin Chen, Member, IEEE, and Wen Gao, Senior Member, IEEE"
+5fc621cdef59c38ef898a2adc2b4472a8396119a,Synthesizing Samples for Zero-shot Learning,"Synthesizing Samples for Zero-shot Learning
+IJCAI Anonymous Submission 2625"
+5f34c96ddcf992e1b8660b5cb01e3c311b05023c,Towards Online Iris and Periocular Recognition Under Relaxed Imaging Constraints,"IEEE Trans. Image Processing, 2013
+Towards Online Iris and Periocular Recognition under
+Relaxed Imaging Constraints
+Chun-Wei Tan, Ajay Kumar"
+5f58bf2c25826cb6ee927a1461aa72bd623157ff,Tasting Families of Features for Image Classification,"ICCV 2011 Submission #549. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+Tasting Families of Features for Image Classification
+Anonymous ICCV submission
+Paper ID 549"
+5f92de3683b4fee28ad3f431c889e7c8bff604f8,"Performance study of Face Recognition systems using LBP and ICA descriptors with sparse representation-MRLSR and KNN Classifiers , respectively","International Journal of Computer Trends and Technology (IJCTT) – Volume 42 Number 1 – December 2016
+Performance study of Face Recognition
+systems using LBP and ICA descriptors
+with sparse representation - MRLSR and
+KNN Classifiers, respectively
+K Sarath1 and G. Sreenivasulu2
+PG scholar, Department of Electronics and Communication Engineering, SVU College of Engineering,
+Professor, Department of Electronics and Communication Engineering, SVU College of Engineering,
+Tirupathi, India
+Tirupathi, India
+sparse
+representation"
+5f344a4ef7edfd87c5c4bc531833774c3ed23542,Semisupervised Learning of Classifiers with Application to Human-computer Interaction,"
+5f02e49aa0fe467bbeb9de950e4abb6c99133feb,"Enhancing person re-identification by late fusion of low-, mid- and high-level features","Aalborg Universitet
+Enhancing Person Re-identification by Late Fusion of Low-, Mid-, and High-Level
+Features
+Lejbølle, Aske Rasch; Nasrollahi, Kamal; Moeslund, Thomas B.
+Published in:
+DOI (link to publication from Publisher):
+0.1049/iet-bmt.2016.0200
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Lejbølle, A. R., Nasrollahi, K., & Moeslund, T. B. (2018). Enhancing Person Re-identification by Late Fusion of
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain
+? You may freely distribute the URL identifying the publication in the public portal ?
+Take down policy"
+5f19b98e5cd22198d25660d609cbd3f4a69c94e7,Combining Head Pose and Eye Location Information for Gaze Estimation,"Combining Head Pose and Eye Location Information
+for Gaze Estimation
+Roberto Valenti, Member, IEEE, Nicu Sebe, Member, IEEE, and Theo Gevers, Member, IEEE"
+5fc15baee1383d502775fab8ee91d56f4875429c,Factorial Discriminant Analysis for 3 D Face Recognition System using SVM Classifier,"International Journal of Computer Applications (0975 – 8887)
+International Conference on Information and Communication Technologies (ICICT-2014)
+Factorial Discriminant Analysis for
+D Face Recognition System using SVM Classifier
+P. S. Hiremath
+Department of P. G. Studies and Research in
+Computer Science,
+Gulbarga University, Gulbarga-585106
+Karnataka, India
+turned"
+5f6116b6e5f21da66a304e9f59f3e224e188caef,Behavior Is Everything: Towards Representing Concepts with Sensorimotor Contingencies,"Behavior is Everything – Towards Representing Concepts
+with Sensorimotor Contingencies
+Nicholas Hay, Michael Stark, Alexander Schlegel, Carter Wendelken,
+Dennis Park, Eric Purdy, Tom Silver, D. Scott Phoenix, and Dileep George
+Vicarious AI, San Francisco, CA, USA"
+5f0b7245bedfc984b327b8e144c3cba9d9b2a807,Morphological Primitive Patterns with Grain Components on LDP for Child and Adult Age Classification,"International Journal of Computer Applications (0975 – 8887)
+Volume 21– No.3, May 2011
+Morphological Primitive Patterns with Grain Components
+on LDP for Child and Adult Age Classification
+B.Sujatha
+Dr.V.Vijaya Kumar
+Associate Professor
+G.I.E.T, Rajahmundry
+Dean, Dept. of Comp. Sciences
+Head, SRRF-G.I.E.T
+JNTUK,Kakinada
+Andhra Pradesh, India
+Rajahmundry
+Andhra Pradesh, India
+M.Rama Bai
+Associate Professor
+M.G.I.T, JNTUH
+Hyderabad
+Andhra Pradesh, India"
+5f7354634e13c9fad64163d53beb0a8eb5df30e1,Sketch-Based Image Retrieval: Benchmark and Bag-of-Features Descriptors,"Sketch-Based Image Retrieval: Benchmark
+nd Bag-of-Features Descriptors
+Mathias Eitz, Kristian Hildebrand, Tamy Boubekeur and Marc Alexa"
+5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2,The Cityscapes Dataset,"The Cityscapes Dataset
+Marius Cordts1,2
+Mohamed Omran3
+Rodrigo Benenson3
+Sebastian Ramos1,4
+Uwe Franke1
+Timo Scharw¨achter1,2
+Markus Enzweiler1
+Stefan Roth2
+Bernt Schiele3
+Daimler AG R&D, 2TU Darmstadt, 3MPI Informatics, 4TU Dresden
+www.cityscapes-dataset.net"
+5f534bacc658f620a15b5647adecb0ea813286c8,Reliable object detection and segmentation using inpainting,"Reliable Object Detection and Segmentation using Inpainting
+Ji Hoon Joung, M. S. Ryoo, Sunglok Choi, and Sung-Rak Kim"
+5f5906168235613c81ad2129e2431a0e5ef2b6e4,A Unified Framework for Compositional Fitting of Active Appearance Models,"Noname manuscript No.
+(will be inserted by the editor)
+A Unified Framework for Compositional Fitting of
+Active Appearance Models
+Joan Alabort-i-Medina · Stefanos Zafeiriou
+Received: date / Accepted: date"
+5fb5d9389e2a2a4302c81bcfc068a4c8d4efe70c,Multiple Facial Attributes Estimation Based on Weighted Heterogeneous Learning,"Multiple Facial Attributes Estimation based on
+Weighted Heterogeneous Learning
+H.Fukui* T.Yamashita* Y.Kato* R.Matsui*
+T. Ogata** Y.Yamauchi* H.Fujiyoshi*
+*Chubu University
+**Abeja Inc.
+200, Matuoto-cho, Kasugai,
+-1-20, Toranomon, Minato-ku,
+Aichi, Japan
+Tokyo, Japan"
+5f769ba95ffea0ce76ac9d8e7cd47e2d1c91e1bf,Using Geometry to Detect Grasps in 3D Point Clouds,"Localizing antipodal grasps in point clouds
+Andreas ten Pas and Robert Platt"
+5f0e9cc18374a670dfea4698424c9d48494f3093,Online Domain Adaptation for Multi-Object Tracking,"GAIDON & VIG: ONLINE DOMAIN ADAPTATION FOR MULTI-OBJECT TRACKING
+Online Domain Adaptation for Multi-Object Tracking
+Computer Vision Group
+Xerox Research Centre Europe
+Meylan, France
+Adrien Gaidon
+Eleonora Vig"
+5fc371760fd4c8abe94b91ae2ca03d428ac05faa,Fear-specific amygdala function in children and adolescents on the fragile x spectrum: a dosage response of the FMR1 gene.,"doi:10.1093/cercor/bhs341
+Fear-Specific Amygdala Function in Children and Adolescents on the Fragile X Spectrum:
+A Dosage Response of the FMR1 Gene
+So-Yeon Kim1, Jessica Burris1, Frederick Bassal1, Kami Koldewyn5, Sumantra Chattarji6, Flora Tassone2, David Hessl2,3 and
+Susan M. Rivera1,2,4
+Center for Mind and Brain, University of California, Davis, CA 95618, USA, 2MIND Institute, University of California, Davis, CA
+95817, USA, 3Department of Psychiatry and Behavioral Sciences, University of California, Davis, CA 95817, USA, 4Department of
+Psychology, University of California, Davis, CA 95616, USA, 5McGovern Institute for Brain Research, MIT, MA 02139, USA and
+6National Center for Biological Sciences, Bangalore 560065, India
+Address correspondence to Susan M. Rivera, Center for Mind and Brain, University of California, Davis, 267 Cousteau Place, Davis, CA 95618,
+USA. Email:
+Mutations of the fragile X mental retardation 1 (FMR1) gene are the
+genetic cause of fragile X syndrome (FXS). The presence of signifi-
+ant socioemotional problems has been well documented in FXS
+lthough the brain basis of those deficits remains unspecified. Here,
+we investigated amygdala dysfunction and its relation to socioemo-
+tional deficits and FMR1 gene expression in children and adoles-
+ents on the FX spectrum (i.e., individuals whose trinucleotide CGG
+repeat expansion from 55 to over 200 places them somewhere
+within the fragile X diagnostic range from premutation to full"
+5f107c92dd1c3f294b53627a5de1c7c46d996994,Complex Eye Movement Pattern Biometrics: The Effects of Environment and Stimulus,"Complex Eye Movement Pattern Biometrics:
+The Effects of Environment and Stimulus
+Corey D. Holland, Student Member, IEEE and Oleg V. Komogortsev, Member, IEEE"
+5fd147f57fc087b35650f7f3891d457e4c745d48,Coulomb GANs: Provably Optimal Nash Equilibria via Potential Fields,"Published as a conference paper at ICLR 2018
+COULOMB GANS: PROVABLY OPTIMAL NASH EQUI-
+LIBRIA VIA POTENTIAL FIELDS
+Thomas Unterthiner1
+Bernhard Nessler1
+Calvin Seward1,2
+Günter Klambauer1
+Martin Heusel1
+Hubert Ramsauer1
+Sepp Hochreiter1
+LIT AI Lab & Institute of Bioinformatics, Johannes Kepler University Linz, Austria
+Zalando Research, Mühlenstraße 25, 10243 Berlin, Germany"
+5fc664202208aaf01c9b62da5dfdcd71fdadab29,Automatic Face Recognition from Video,rXiv:1504.05308v1 [cs.CV] 21 Apr 2015
+5fcde9236d654a0f92a76c1a3f07c0cad954985c,Personality-Dependent Referring Expression Generation,"Personality-dependent Referring Expression Generation
+Ivandr´e Paraboni, Danielle Sampaio Monteiro, and Alex Gwo Jen Lan
+University of S˜ao Paulo, School of Arts, Sciences and Humanities, S˜ao Paulo, Brazil"
+5f5164cf998a10d2bef37741adb562ab07fac413,A Comprehensive Study on Cross-View Gait Based Human Identification with Deep CNNs,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2016.2545669, IEEE
+Transactions on Pattern Analysis and Machine Intelligence
+A Comprehensive Study on Cross-View Gait Based
+Human Identification with Deep CNNs
+Zifeng Wu, Yongzhen Huang, Liang Wang, Xiaogang Wang, and Tieniu Tan"
+5f0f8c9acc3e8eb50ca6e7d9c33cf3d9a8a54985,Structured Inhomogeneous Density Map Learning for Crowd Counting,"Structured Inhomogeneous Density Map Learning
+for Crowd Counting
+Hanhui Li, Xiangjian He, Hefeng Wu, Saeed Amirgholipour Kasmani, Ruomei Wang, Xiaonan Luo, Liang Lin"
+5fa1724a79a9f7090c54925f6ac52f1697d6b570,The Development of Multimodal Lexical Resources,"Proceedings of the Workshop on Grammar and Lexicon: Interactions and Interfaces,
+pages 41–47, Osaka, Japan, December 11 2016."
+5ff64afd70434b12e043ff39a91271eab6391124,Building Extraction in Very High Resolution Remote Sensing Imagery Using Deep Learning and Guided Filters,"Article
+Building Extraction in Very High Resolution
+nd Guided Filters
+Yongyang Xu 1 ID , Liang Wu 1,2, Zhong Xie 1,2,* and Zhanlong Chen 1
+Department of Information Engineering, China University of Geosciences, Wuhan 430074, China;
+(Y.X.); (L.W.); (Z.C.)
+National Engineering Research Center of Geographic Information System, Wuhan 430074, China
+* Correspondence:
+Received: 19 December 2017; Accepted: 16 January 2018; Published: 19 January 2018"
+33919313bb3cf09b00f9fa2253b30af33a52bc51,Minding the Gaps for Block Frank-Wolfe Optimization of Structured SVMs,"Minding the Gaps for Block Frank-Wolfe Optimization of Structured SVMs
+Anton Osokin∗,1 Jean-Baptiste Alayrac∗,1
+Isabella Lukasewitz1 Puneet K. Dokania2 Simon Lacoste-Julien1
+INRIA – ´Ecole Normale Sup´erieure, Paris, France
+Both authors contributed equally.
+INRIA – CentraleSup´elec, Chˆatenay-Malabry, France"
+33236cd0b9454ab88ec9deddfb8ce8e492056770,Salient social cues are prioritized in autism spectrum disorders despite overall decrease in social attention.,"J Autism Dev Disord
+DOI 10.1007/s10803-012-1710-x
+O R I G I N A L P A P E R
+Salient Social Cues are Prioritized in Autism Spectrum Disorders
+Despite Overall Decrease in Social Attention
+Coralie Chevallier • Pascal Huguet •
+Francesca Happe´ • Nathalie George •
+Laurence Conty
+Ó Springer Science+Business Media New York 2012"
+33a1a049d15e22befc7ddefdd3ae719ced8394bf,An Efficient Approach to Facial Feature Detection for Expression Recognition,"FULL PAPER
+International Journal of Recent Trends in Engineering, Vol 2, No. 1, November 2009
+An Efficient Approach to Facial Feature Detection
+for Expression Recognition
+S.P. Khandait1, P.D. Khandait2 and Dr.R.C.Thool2
+Deptt. of Info.Tech., K.D.K.C.E., Nagpur, India
+2Deptt.of Electronics Engg., K.D.K.C.E., Nagpur, India, 2Deptt. of Info.Tech., SGGSIET, Nanded"
+33d045b39bc4645ff2a8bffd83a49697631ff968,Learning Discrete Representations via Information Maximizing Self-Augmented Training,"Learning Discrete Representations via Information Maximizing
+Self Augmented Training
+Weihua Hu 1 Takeru Miyato 2 3 Seiya Tokui 2 1 Eiichi Matsumoto 2 1 Masashi Sugiyama 4 1"
+332339c32d41cc8176d360082b4d9faa90dadffa,"UberNet: Training a Universal Convolutional Neural Network for Low-, Mid-, and High-Level Vision Using Diverse Datasets and Limited Memory","UberNet : Training a ‘Universal’ Convolutional Neural Network for Low-, Mid-,
+nd High-Level Vision using Diverse Datasets and Limited Memory
+Iasonas Kokkinos
+CentraleSup´elec - INRIA"
+333aa36e80f1a7fa29cf069d81d4d2e12679bc67,Suggesting Sounds for Images from Video Collections,"Suggesting Sounds for Images
+from Video Collections
+Matthias Sol`er1, Jean-Charles Bazin2, Oliver Wang2, Andreas Krause1 and
+Alexander Sorkine-Hornung2
+Computer Science Department, ETH Z¨urich, Switzerland
+Disney Research, Switzerland"
+33ea400ca2105b9a3cd0e3c7c147e06c2d3c6d79,Vision based Decision-Support and Safety Systems for Robotic Surgery,"Vision based Decision-Support and Safety Systems for
+Robotic Surgery
+Suren Kumar
+PhD Candidate
+Madusudanan Sathia
+Narayanan*
+PhD Candidate
+Sukumar Misra
+Surgical Intern
+Sudha Garimella
+Assistant Professor
+Pankaj Singhal
+Director of Robotic Surgery
+Jason J. Corso
+Assistant Professor"
+33891ca0f8fab0eab503f4b4bcee009a1cf3b880,A video database of human faces under near Infra-Red illumination for human computer interaction applications,"A Video Database of Human Faces under Near Infra-Red
+Illumination for Human Computer Interaction Aplications
+S L Happy, Anirban Dasgupta, Anjith George, and Aurobinda Routray
+Department of Electrical Engineering
+Indian Institute of Technology Kharagpur"
+33792bb27ef392973e951ca5a5a3be4a22a0d0c6,Two-Dimensional Whitening Reconstruction for Enhancing Robustness of Principal Component Analysis,"Two-dimensional Whitening Reconstruction for
+Enhancing Robustness of Principal Component
+Analysis
+Xiaoshuang Shi, Zhenhua Guo, Feiping Nie, Lin Yang, Jane You, and Dacheng Tao"
+3328674d71a18ed649e828963a0edb54348ee598,A face and palmprint recognition approach based on discriminant DCT feature extraction,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 34, NO. 6, DECEMBER 2004
+A Face and Palmprint Recognition Approach Based
+on Discriminant DCT Feature Extraction
+Xiao-Yuan Jing and David Zhang"
+3355aff37b5e4ba40fc689119fb48d403be288be,Deep Private-Feature Extraction,"Deep Private-Feature Extraction
+Seyed Ali Osia, Ali Taheri, Ali Shahin Shamsabadi, Kleomenis Katevas, Hamed Haddadi, Hamid R. Rabiee"
+339937141ffb547af8e746718fbf2365cc1570c8,Facial Emotion Recognition in Real Time,"Facial Emotion Recognition in Real Time
+Dan Duncan
+Gautam Shine
+Chris English"
+33ae696546eed070717192d393f75a1583cd8e2c,Subspace selection to suppress confounding source domain information in AAM transfer learning,
+33c485b59249af2d763d6951cd11e4080f3bbb3d,Learning to Fuse 2D and 3D Image Cues for Monocular Body Pose Estimation,"Fusing 2D Uncertainty and 3D Cues for Monocular Body Pose Estimation
+Bugra Tekin
+Pablo M´arquez-Neila
+Mathieu Salzmann
+Pascal Fua
+EPFL, Switzerland"
+3316521a5527c7700af8ae6aef32a79a8b83672c,People-tracking-by-detection and people-detection-by-tracking,"People-Tracking-by-Detection and People-Detection-by-Tracking
+Mykhaylo Andriluka
+Stefan Roth
+Bernt Schiele
+Computer Science Department
+TU Darmstadt, Germany
+{andriluka, sroth,"
+3393459600368be2c4c9878a3f65a57dcc0c2cfa,Eigen-PEP for Video Face Recognition,"Eigen-PEP for Video Face Recognition
+Haoxiang Li†, Gang Hua†, Xiaohui Shen‡, Zhe Lin‡, Jonathan Brandt‡
+Stevens Institute of Technology ‡Adobe Systems Inc."
+330bcf952a5a20aac0e334aad1de4cd6ba6ed6eb,Pedestrian Detection at Day/Night Time with Visible and FIR Cameras: A Comparison,"Article
+Pedestrian Detection at Day/Night Time with Visible
+nd FIR Cameras: A Comparison
+Alejandro González 1,2,*, Zhijie Fang 1,2, Yainuvis Socarras 1,2, Joan Serrat 1,2, David Vázquez 1,2,
+Jiaolong Xu 1,2 and Antonio M. López 1,2
+Autonomous University of Barcelona, Cerdanyola, Barcelona 08193, Spain; (Z.F.);
+(Y.S.); (J.S.); (D.V.); (J.X.);
+(A.M.L.)
+Computer Vision Center, Cerdanyola, Barcelona 08193, Spain
+* Correspondence: Tel.: +34-622-605-455
+Academic Editor: Vittorio M. N. Passaro
+Received: 17 March 2016; Accepted: 30 May 2016; Published: 4 June 2016"
+3323a905a3960a663a9884540e8c3586cf362ba9,Face Hallucination Using Sparse Representation Algorithm,"International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+Volume 4 Issue 9, September 2015
+Face Hallucination Using Sparse Representation
+Algorithm
+Sudhir Kumar Vikram Mutneja"
+336b2ae3e4db996538f930b754f7d233af56a628,Learning local descriptors by optimizing the keypoint-correspondence criterion,"Learning Local Descriptors by Optimizing the
+Keypoint-Correspondence Criterion: Applications to
+Face Matching, Learning from Unlabeled Videos
+nd 3D-Shape Retrieval
+Nenad Markuˇs†, Igor S. Pandˇzi´c†, and J¨orgen Ahlberg‡
+University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia
+Computer Vision Laboratory, Dept. of Electrical Engineering, Link¨oping University, SE-581 83 Link¨oping, Sweden"
+3352426a67eabe3516812cb66a77aeb8b4df4d1b,Joint Multi-view Face Alignment in the Wild,"JOURNAL OF LATEX CLASS FILES, VOL. 4, NO. 5, APRIL 2015
+Joint Multi-view Face Alignment in the Wild
+Jiankang Deng, Student Member, IEEE, George Trigeorgis, Yuxiang Zhou, and Stefanos Zafeiriou, Member, IEEE"
+333be4858994e6d9364341aeb520f7800a0f6a07,Unsupervised Pixel-Level Domain Adaptation with Generative Adversarial Networks,"Unsupervised Pixel–Level Domain Adaptation
+with Generative Adversarial Networks
+Konstantinos Bousmalis
+Google Brain
+San Francisco, CA
+Nathan Silberman
+Google Research
+New York, NY
+David Dohan
+Google Brain
+Mountain View, CA
+Dumitru Erhan
+Google Brain
+San Francisco, CA
+Dilip Krishnan
+Google Research
+Cambridge, MA"
+334d6c71b6bce8dfbd376c4203004bd4464c2099,Biconvex Relaxation for Semidefinite Programming in Computer Vision,"BICONVEX RELAXATION FOR SEMIDEFINITE PROGRAMMING IN
+COMPUTER VISION
+SOHIL SHAH*, ABHAY KUMAR*, DAVID JACOBS,
+CHRISTOPH STUDER, AND TOM GOLDSTEIN"
+33695e0779e67c7722449e9a3e2e55fde64cfd99,Riemannian coding and dictionary learning: Kernels to the rescue,"Riemannian Coding and Dictionary Learning: Kernels to the Rescue
+Mehrtash Harandi, Mathieu Salzmann
+Australian National University & NICTA
+While sparse coding on non-flat Riemannian manifolds has recently become
+increasingly popular, existing solutions either are dedicated to specific man-
+ifolds, or rely on optimization problems that are difficult to solve, especially
+when it comes to dictionary learning. In this paper, we propose to make use
+of kernels to perform coding and dictionary learning on Riemannian man-
+ifolds. To this end, we introduce a general Riemannian coding framework
+with its kernel-based counterpart. This lets us (i) generalize beyond the spe-
+ial case of sparse coding; (ii) introduce efficient solutions to two coding
+schemes; (iii) learn the kernel parameters; (iv) perform unsupervised and
+supervised dictionary learning in a much simpler manner than previous Rie-
+mannian coding approaches.
+i=1, di ∈ M, be a dictionary on a Rie-
+mannian manifold M, and x ∈ M be a query point on the manifold. We
+(cid:17)
+define a general Riemannian coding formulation as
+More specifically, let D = {di}N
+(cid:93)N"
+330dda431e0343a96f9d630a0b4ee526bd93ad11,Domain Adaptation for Visual Applications: A Comprehensive Survey,"Domain Adaptation for Visual Applications: A Comprehensive
+Survey
+Gabriela Csurka"
+33e20449aa40488c6d4b430a48edf5c4b43afdab,The Faces of Engagement: Automatic Recognition of Student Engagementfrom Facial Expressions,"TRANSACTIONS ON AFFECTIVE COMPUTING
+The Faces of Engagement: Automatic
+Recognition of Student Engagement from Facial
+Expressions
+Jacob Whitehill, Zewelanji Serpell, Yi-Ching Lin, Aysha Foster, and Javier R. Movellan"
+333e7ad7f915d8ee3bb43a93ea167d6026aa3c22,3D Assisted Face Recognition: Dealing With Expression Variations,"This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+The final version of record is available at http://dx.doi.org/10.1109/TIFS.2014.2309851
+DRAFT
+D Assisted Face Recognition: Dealing With
+Expression Variations
+Nesli Erdogmus, Member, IEEE, Jean-Luc Dugelay, Fellow Member, IEEE"
+334166a942acb15ccc4517cefde751a381512605,Facial Expression Analysis using Deep Learning,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+Volume: 04 Issue: 10 | Oct -2017 www.irjet.net p-ISSN: 2395-0072
+Facial Expression Analysis using Deep Learning
+Hemanth Singh1, Raman Patel2
+,2 M.Tech Student, SSG Engineering College, Odisha, India
+---------------------------------------------------------------------***---------------------------------------------------------------------
+examination structures need to analyse the facial exercises"
+335486cb9bb326e2b33fb03a74d0f9d671490ae7,Real-time pedestrian detection with deformable part models,"Real-time Pedestrian Detection with Deformable Part Models
+Hyunggi Cho, Paul E. Rybski, Aharon Bar-Hillel and Wende Zhang"
+3369692338841f14ce032fc5d0b5b4fe7cc79f1a,Visualising mental representations: A primer on noise-based reverse correlation in social psychology,"European Review of Social Psychology
+ISSN: 1046-3283 (Print) 1479-277X (Online) Journal homepage: http://www.tandfonline.com/loi/pers20
+Visualising mental representations: A primer
+on noise-based reverse correlation in social
+psychology
+L. Brinkman, A. Todorov & R. Dotsch
+To cite this article: L. Brinkman, A. Todorov & R. Dotsch (2017) Visualising mental
+representations: A primer on noise-based reverse correlation in social psychology, European
+Review of Social Psychology, 28:1, 333-361, DOI: 10.1080/10463283.2017.1381469
+To link to this article: http://dx.doi.org/10.1080/10463283.2017.1381469
+© 2017 The Author(s). Published by Informa
+UK Limited, trading as Taylor & Francis
+Group.
+Published online: 16 Oct 2017.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pers20
+Download by: [Princeton University]"
+3347d3e9f8a2da66e1c00f6a1e56bb37d27145ae,devant le jury composé de:,"Spécialité: Informatique et Télécommunications Ecole doctorale: Informatique, Télécommunications et Electronique de Paris Présentée par Raluca-Diana ŞAMBRA-PETRE Pour obtenir le grade de DOCTEUR DE TELECOM SUDPARIS MODELISATION ET INFERENCE 2D/3D DE CONNAISSANCES POUR L'ACCES INTELLIGENT AUX CONTENUS VISUELS ENRICHIS Soutenue le 18 Juin 2013 à Paris devant le jury composé de : Président de jury: Madame le Maître de Conférences, HDR Catherine ACHARD Rapporteur: Monsieur le Professeur Marc ANTONINI Rapporteur: Monsieur le Professeur Constantin VERTAN Examinateur: Monsieur le Professeur Miroslaw BOBER Examinateur: Monsieur le Docteur Olivier MARTINOT Directeur de thèse: Monsieur le Professeur Titus ZAHARIA Thèse n°: 2013TELE0012 THESE DE DOCTORAT CONJOINT TELECOM SUDPARIS et L'UNIVERSITE PIERRE ET MARIE CURIE"
+3389fa2f292b72320f4554261eae34d57e2db7b6,Morphable Reflectance Fields for enhancing face recognition,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Morphable Reflectance Fields for Enhancing
+Face Recognition
+Ritwik Kumar, Michael Jones, Tim Marks
+TR2010-039
+July 2010"
+330126c9dd71b3b0319d6429737186f1f20057a7,Deep Ordinal Regression Based on Data Relationship for Small Datasets,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+33e5d1c93e4195a1bfd303a94f0fc3f1c5e233bd,3D Face Recognition Under Expression Variations using Similarity Metrics Fusion,"(cid:176)2007 IEEE. Personal use of this material is permitted.
+However, permission to reprint/republish this material for ad-
+vertising or promotional purposes or for creating new collec-
+tive works for resale or redistribution to servers or lists, or to
+reuse any copyrighted component of this work in other works
+must be obtained from the IEEE."
+3387805b752dadfa34cb8eb63d9dc86aff49934a,"UNIVERSITY OF CALIFORNIA RIVERSIDE Exploration of Contextual Relationships for Robust Video Analysis: Applications in Camera Networks, Bio-image Analysis and Activity Forecasting A Dissertation submitted in partial satisfaction of the requirements for the degree of Doctor of Philosophy in Electrical Engineering","UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Exploration of Contextual Relationships for Robust Video Analysis:
+Applications in Camera Networks, Bio-image Analysis and Activity Forecasting
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Electrical Engineering
+Anirban Chakraborty
+August 2014
+Dissertation Committee:
+Dr. Amit K. Roy-Chowdhury, Chairperson
+Dr. Ertem Tuncel
+Dr. Stefano Lonardi"
+33e7bc26047de3c1b607f04a644c2c03920201fd,Learning to Navigate Autonomously in Outdoor Environments : MAVNet,"Learning to Navigate Autonomously in Outdoor Environments :
+MAVNet
+Saumya Kumaar2, Arpit Sangotra3, Sudakshin Kumar3, Mayank Gupta3, Navaneethkrishnan B2 and S N Omkar1"
+05ce0e4e9ae2c7b2320decb3bb29e066f1dd96d3,Patch-wise low-dimensional probabilistic linear discriminant analysis for Face Recognition,"PATCH-WISE LOW-DIMENSIONAL PROBABILISTIC LINEAR DISCRIMINANT ANALYSIS
+FOR FACE RECOGNITION
+Vitomir ˇStruc, Nikola Paveˇsi´c
+Jerneja ˇZganec-Gros, Boˇstjan Vesnicer
+Faculty of Electrical Engineering UL
+Trˇzaˇska cesta 25, 1000 Ljubljana, Slovenia
+Alpineon Ltd., Ulica Iga Grudna 15
+000 Ljubljana, Slovenia"
+05b8673d810fadf888c62b7e6c7185355ffa4121,A Comprehensive Survey to Face Hallucination,"(will be inserted by the editor)
+A Comprehensive Survey to Face Hallucination
+Nannan Wang · Dacheng Tao · Xinbo Gao · Xuelong Li · Jie Li
+Received: date / Accepted: date"
+05e658fed4a1ce877199a4ce1a8f8cf6f449a890,Domain Transfer Learning for Object and Action Recognition,
+0569d7d3d8f96140adc8ec5a6016fdc97e7ef8aa,Random tree walk toward instantaneous 3D human pose estimation,"Random Tree Walk toward Instantaneous 3D Human Pose Estimation
+Ho Yub Jung1, Soochahn Lee2, Yong Seok Heo3, Il Dong Yun1
+Div. of Comp. & Elect. Sys. Eng., Hankuk University of Foreign Studies. 2Dept. of Elect. Eng., Soonchunghyang University. 3Dept. of Elect. & Comp. Eng.,
+Ajou University.
+Figure 1: The red lines represents the random tree walks trained to find
+the head position. The random walk starts from the body center in (a). In
+(b), the head position is found with fewer steps by starting from the chest,
+which is much closer than the body center. (c) illustrates the kinematic tree
+implemented along with RTW. The adjacent joint positions can be used as
+the starting positions for new RTW. (d) shows the RTW path examples.
+Figure 2: Example results of the RTW from EVAL db [1]. Proposed ap-
+proach achieves the state-of-the-art accuracy without using the temporal
+prior. 64 RTW steps are taken for each joint to estimate human pose from
+single depth image. The RTW paths are drawn, and the expectations of
+RTW steps are used to find the joint positions. The pose estimation from a
+single frame takes less than 1 millisecond.
+The availability of accurate depth cameras have made real-time human
+pose estimation possible; however, there are still demands for faster algo-
+rithms on low power processors. This paper introduces 1000 frames per
+second pose estimation method on a single core 3.20 GHz CPU with no"
+05e3167206bc440d5aacf2256fd2e2e421b0808c,People Detection and Re-identification for Multi Surveillance Cameras,"People detection and re-identification for multi surveillance cameras
+Etienne Corvee, Slawomir Bak and Francois Bremond
+INRIA, Sophia Antipolis, Pulsar Team
+{etienne.corvee, slawomir.bak,
+Keywords:
+people detection, people tracking, people re-identification, local binary pattern, mean Riemannian covariance"
+05ad478ca69b935c1bba755ac1a2a90be6679129,Attribute Dominance: What Pops Out?,"Attribute Dominance: What Pops Out?
+Naman Turakhia
+Georgia Tech"
+050e7e32fdc48150f66cb5edf166790c69652b8b,Land Cover Segmentation of Airborne LiDAR Data Using Stochastic Atrous Network,"Article
+Land Cover Segmentation of Airborne LiDAR Data
+Using Stochastic Atrous Network
+Hasan Asy’ari Arief 1,* ID , Geir-Harald Strand 1,2 ID , Håvard Tveite 1 ID and Ulf Geir Indahl 1
+Faculty of Science and Technology, Norwegian University of Life Sciences, 1432 Ås, Norway;
+(G.H.S.); (H.T.); (U.G.I.)
+Division of Survey and Statistics, Norwegian Institute of Bioeconomy Research, 1431 Ås, Norway
+* Correspondence: Tel.: +47-453-91-706
+Received: 30 April 2018; Accepted: 17 June 2018; Published: 19 June 2018"
+051d8bbf12877c46ae9a598a386c5b72d1b103ac,Object Detection using Geometrical Context Feedback,"Int J Comput Vis (2012) 100:154–169
+DOI 10.1007/s11263-012-0547-2
+Object Detection using Geometrical Context Feedback
+Min Sun · Sid Yingze Bao · Silvio Savarese
+Received: 17 December 2010 / Accepted: 16 July 2012 / Published online: 2 August 2012
+© Springer Science+Business Media, LLC 2012"
+054738ce39920975b8dcc97e01b3b6cc0d0bdf32,Towards the design of an end-to-end automated system for image and video-based recognition,"Towards the Design of an End-to-End Automated
+System for Image and Video-based Recognition
+Rama Chellappa1, Jun-Cheng Chen3, Rajeev Ranjan1, Swami Sankaranarayanan1, Amit Kumar1,
+Vishal M. Patel2 and Carlos D. Castillo4"
+05a22ebec697cfa5e8e2883d68e6f4762bbdebd7,Few-Example Object Detection with Model Communication.,"Few-Example Object Detection
+with Model Communication
+Xuanyi Dong, Liang Zheng, Fan Ma, Yi Yang, Deyu Meng"
+05e03c48f32bd89c8a15ba82891f40f1cfdc7562,Scalable Robust Principal Component Analysis Using Grassmann Averages,"Scalable Robust Principal Component
+Analysis using Grassmann Averages
+Søren Hauberg, Aasa Feragen, Raffi Enficiaud, and Michael J. Black"
+05ce73c39368aca1d10ab48dbe0dee80ee084bdb,Multi-label Learning with the Rnns for Fashion Search,"Under review as a conference paper at ICLR 2017
+MULTI-LABEL LEARNING WITH THE RNNS
+FOR FASHION SEARCH
+Se-Yeoung Kim, Sang-Il Na, Ha-Yoon Kim, Moon-Ki Kim, Byoung-Ki Jeon
+Machine Intelligence Lab., SK Planet
+Seongnam City, South Korea
+Taewan Kim ∗
+Naver Labs, Naver Corp.
+Seongnam City, South Korea"
+056ba488898a1a1b32daec7a45e0d550e0c51ae4,Cascaded Continuous Regression for Real-Time Incremental Face Tracking,"Cascaded Continuous Regression for Real-time
+Incremental Face Tracking
+Enrique S´anchez-Lozano, Brais Martinez,
+Georgios Tzimiropoulos, and Michel Valstar
+Computer Vision Laboratory. University of Nottingham"
+050fdbd2e1aa8b1a09ed42b2e5cc24d4fe8c7371,Spatio-Temporal Scale Selection in Video Data,"Contents
+Scale Space and PDE Methods
+Spatio-Temporal Scale Selection in Video Data . . . . . . . . . . . . . . . . . . . . .
+Tony Lindeberg
+Dynamic Texture Recognition Using Time-Causal Spatio-Temporal
+Scale-Space Filters . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Ylva Jansson and Tony Lindeberg
+Corner Detection Using the Affine Morphological Scale Space . . . . . . . . . . .
+Luis Alvarez
+Nonlinear Spectral Image Fusion. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Martin Benning, Michael Möller, Raz Z. Nossek, Martin Burger,
+Daniel Cremers, Guy Gilboa, and Carola-Bibiane Schönlieb
+Tubular Structure Segmentation Based on Heat Diffusion. . . . . . . . . . . . . . .
+Fang Yang and Laurent D. Cohen
+Analytic Existence and Uniqueness Results for PDE-Based Image
+Reconstruction with the Laplacian . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Laurent Hoeltgen, Isaac Harris, Michael Breuß, and Andreas Kleefeld
+Combining Contrast Invariant L1 Data Fidelities with Nonlinear
+Spectral Image Decomposition . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Leonie Zeune, Stephan A. van Gils, Leon W.M.M. Terstappen,"
+05a6a40c840c069631a825509f3095697592e1c4,IAN: The Individual Aggregation Network for Person Search,"IAN: The Individual Aggregation Network for
+Person Search
+Jimin XIAO, Member, IEEE, Yanchun XIE, Tammam TILLO, Senior Member, IEEE, Kaizhu HUANG, Senior
+Member, IEEE, Yunchao WEI, Member, IEEE, Jiashi FENG"
+052880031be0a760a5b606b2ad3d22f237e8af70,Datasets on object manipulation and interaction: a survey,"Datasets on object manipulation and interaction: a survey
+Yongqiang Huang and Yu Sun"
+05bba1f1626f02ef4ca497090b4a04d47f36ebb6,Social projection increases for positive targets: ascertaining the effect and exploring its antecedents.,"545039 PSPXXX10.1177/0146167214545039Personality and Social Psychology BulletinMachunsky et al.
+research-article2014
+Article
+Social Projection Increases for
+Positive Targets: Ascertaining the
+Effect and Exploring Its Antecedents
+Maya Machunsky1, Claudia Toma2, Vincent Yzerbyt3,
+nd Olivier Corneille3
+Personality and Social
+Psychology Bulletin
+014, Vol. 40(10) 1373 –1388
+© 2014 by the Society for Personality
+nd Social Psychology, Inc
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0146167214545039
+pspb.sagepub.com"
+053c2f592a7f153e5f3746aa5ab58b62f2cf1d21,Performance Evaluation of Illumination Normalization Techniques for Face Recognition,"International Journal of Research in
+Engineering & Technology (IJRET)
+ISSN 2321-8843
+Vol. 1, Issue 2, July 2013, 11-20
+© Impact Journals
+PERFORMANCE EVALUATION OF ILLUMINATION NORMALIZATION TECHNIQUES
+FOR FACE RECOGNITION
+A. P. C. SARATHA DEVI & V. MAHESH
+Department of Information Technology, PSG College of Technology, Coimbatore, Tamil Nadu, India"
+05ef5efd9e42f49dbb9e50ec3fe367f275a94931,Biologically Inspired Processing for Lighting Robust Face Recognition,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+05487784c1c94e17c26862e342c1b81acfe11258,Spontaneous facial expression analysis based on temperature changes and head motions,"Spontaneous Facial Expression Analysis
+Based on Temperature
+Changes and Head Motions
+Peng Liu and Lijun Yin
+State University of New York-at Binghamton"
+051830b0ea58d1568f19ec3297e301d9789c9a76,Bringing Semantics into Focus Using Visual Abstraction,
+05ea7930ae26165e7e51ff11b91c7aa8d7722002,Learning And-Or Model to Represent Context and Occlusion for Car Detection and Viewpoint Estimation,"Learning And-Or Model to Represent Context and
+Occlusion for Car Detection and Viewpoint Estimation
+Tianfu Wu∗, Bo Li∗ and Song-Chun Zhu"
+05384ac77be3211fb7d221802bc79eb3c9fa2873,A Novel Image Classification System Based on Evidence Probabilistic Transformation,"International Journal of Research in Computer and
+Communication Technology, Vol 4,Issue 2 ,February -2015
+ISSN (Online) 2278- 5841
+ISSN (Print) 2320- 5156
+A Novel Image Classification System Based on Evidence
+Probabilistic Transformation
+Department of Computer Science, Mansoura University, Mansoura 35516, Egypt
+A.E. Amin
+information
+different
+identity
+paper
+evidence"
+056892b7e573608e64c3c9130e8ce33353a94de2,Semantic Image Segmentation with Task-Specific Edge Detection Using CNNs and a Discriminatively Trained Domain Transform,"Semantic Image Segmentation with Task-Specific Edge Detection Using CNNs
+nd a Discriminatively Trained Domain Transform
+Liang-Chieh Chen∗
+Jonathan T. Barron, George Papandreou, Kevin Murphy
+{barron, gpapan,
+Alan L. Yuille"
+056e2c82db905b93f7762a2ee7778d3aacc5a1f0,Bag of Attributes for Video Event Retrieval,"Bag of Attributes for Video Event Retrieval
+Leonardo A. Duarte1, Ot´avio A. B. Penatti2, and Jurandy Almeida1
+Institute of Science and Technology
+Federal University of S˜ao Paulo – UNIFESP
+2247-014, S˜ao Jos´e dos Campos, SP – Brazil
+Email: {leonardo.assuane,
+Advanced Technologies
+SAMSUNG Research Institute
+3097-160, Campinas, SP – Brazil
+Email:"
+05fcbe4009543ec8943bdc418ee81e9594b899a4,Social perception in autism spectrum disorders: impaired category selectivity for dynamic but not static images in ventral temporal cortex.,"doi:10.1093/cercor/bhs276
+Social Perception in Autism Spectrum Disorders: Impaired Category Selectivity
+for Dynamic but not Static Images in Ventral Temporal Cortex
+Jill Weisberg1, Shawn C. Milleville1, Lauren Kenworthy1,2, Gregory L. Wallace1, Stephen J. Gotts1,
+Michael S. Beauchamp3 and Alex Martin1
+NIMH, Laboratory of Brain and Cognition, Bethesda, MD 20850, 2Children’s National Medical Center, Center for Autism
+Spectrum Disorders, Rockville, MD 20850 and 3Department of Neurobiology and Anatomy, University of Texas Medical School
+t Houston, Houston, TX 77030, USA
+Address correspondence to Jill Weisberg, San Diego State University Research Foundation, Laboratory for Language and Cognitive Neuroscience,
+6495 Alvarado Rd, Suite 200, San Diego, CA 92120, USA. Email:
+Studies of autism spectrum disorders (ASDs) reveal dysfunction in
+the neural systems mediating object processing (particularly faces)
+nd social cognition, but few investigations have systematically as-
+sessed the specificity of the dysfunction. We compared cortical
+responses in typically developing adolescents and those with ASD
+to stimuli from distinct conceptual domains known to elicit cat-
+egory-related activity in separate neural systems. In Experiment 1,
+subjects made category decisions to photographs, videos, and
+point-light displays of people and tools. In Experiment 2, subjects
+interpreted displays of simple, geometric shapes in motion depicting"
+051a84f0e39126c1ebeeb379a405816d5d06604d,Biometric Recognition Performing in a Bioinspired System,"Cogn Comput (2009) 1:257–267
+DOI 10.1007/s12559-009-9018-7
+Biometric Recognition Performing in a Bioinspired System
+Joan Fa`bregas Æ Marcos Faundez-Zanuy
+Published online: 20 May 2009
+Ó Springer Science+Business Media, LLC 2009"
+053ff27aba868c64823dbbe2167a762dd3f33b53,Probabilistic Slow Features for Behavior Analysis,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Probabilistic Slow Features for Behavior Analysis
+Lazaros Zafeiriou, Student Member, IEEE, Mihalis A. Nicolaou, Member, IEEE,
+Stefanos Zafeiriou, Member, IEEE, Symeon Nikitidis,
+nd Maja Pantic, Fellow, IEEE
+feature"
+0559fb9f5e8627fecc026c8ee6f7ad30e54ee929,Facial Expression Recognition,"Facial Expression Recognition
+Bogdan J. Matuszewski, Wei Quan and Lik-Kwan Shark
+ADSIP Research Centre, University of Central Lancashire
+. Introduction
+Facial expressions are visible signs of a person’s affective state, cognitive activity and
+personality. Humans can perform expression recognition with a remarkable robustness
+without conscious effort even under a variety of adverse conditions such as partially
+occluded faces, different appearances and poor illumination. Over the last two decades, the
+dvances in imaging technology and ever increasing computing power have opened up a
+possibility of automatic facial expression recognition and this has led to significant research
+efforts from the computer vision and pattern recognition communities. One reason for this
+growing interest is due to a wide spectrum of possible applications in diverse areas, such as
+more engaging human-computer interaction (HCI) systems, video conferencing, augmented
+reality. Additionally from the biometric perspective, automatic recognition of facial
+expressions has been investigated in the context of monitoring patients in the intensive care
+nd neonatal units for signs of pain and anxiety, behavioural research, identifying level of
+oncentration, and improving face recognition.
+Automatic facial expression recognition is a difficult task due to its inherent subjective
+nature, which is additionally hampered by usual difficulties encountered in pattern
+recognition and computer vision research. The vast majority of the current state-of-the-art"
+05a7be10fa9af8fb33ae2b5b72d108415519a698,Multilayer and Multimodal Fusion of Deep Neural Networks for Video Classification,"Multilayer and Multimodal Fusion of Deep Neural Networks
+for Video Classification
+Xiaodong Yang Pavlo Molchanov Jan Kautz
+{xiaodongy, pmolchanov,
+NVIDIA"
+057d879fe2d6c40ef79fe901cc62625a3b2ea8ba,EgoSampling: Fast-forward and stereo for egocentric videos,"EgoSampling: Fast-Forward and Stereo for Egocentric Videos
+Yair Poleg
+Tavi Halperin
+The Hebrew University
+The Hebrew University
+Jerusalem, Israel
+Jerusalem, Israel
+Chetan Arora
+Delhi, India
+Shmuel Peleg
+The Hebrew University
+Jerusalem, Israel"
+056be8a896f71be4a1dee67b01f4d59e3e982304,Generative Models of Visually Grounded Imagination,"Published as a conference paper at ICLR 2018
+GENERATIVE MODELS OF VISUALLY GROUNDED
+IMAGINATION
+Ramakrishna Vedantam∗
+Georgia Tech
+Ian Fischer
+Google Inc.
+Jonathan Huang
+Google Inc.
+Kevin Murphy
+Google Inc."
+050a149051a5d268fcc5539e8b654c2240070c82,Magisterské a doktorské studijnı́ programy,MAGISTERSKÉ A DOKTORSKÉSTUDIJNÍ PROGRAMY31. 5. 2018SBORNÍKSTUDENTSKÁ VĚDECKÁ KONFERENCE
+05fd17673f1500d46196b0e38857eb3eaf09296e,Fourier Descriptors Based on the Structure of the Human Primary Visual Cortex with Applications to Object Recognition,"(will be inserted by the editor)
+Fourier descriptors based on the structure of the human
+primary visual cortex with applications to object recognition
+Amine Bohi · Dario Prandi · Vincente Guis · Fr´ed´eric Bouchara ·
+Jean-Paul Gauthier
+Received: date / Accepted: date"
+0580edbd7865414c62a36da9504d1169dea78d6f,Baseline CNN structure analysis for facial expression recognition,"Baseline CNN structure analysis for facial expression recognition
+Minchul Shin1, Munsang Kim2 and Dong-Soo Kwon1"
+05a2547d976420f7d1de19907e16280d15199008,Semantic Road Layout Understanding by Generative Adversarial Inpainting,"Road layout understanding by generative
+dversarial inpainting
+Lorenzo Berlincioni, Federico Becattini, Leonardo Galteri, Lorenzo Seidenari,
+Alberto Del Bimbo"
+0534304bc09e92b2cfa0a8da59cfcf0be84d70a4,Towards reliable real-time person detection,"Towards Reliable Real-Time Person Detection
+Silviu-Tudor SERBAN1, Srinidhi MUKANAHALLIPATNA SIMHA1, Vasanth
+BATHRINARAYANAN1, Etienne CORVEE1 and Francois BREMOND1
+INRIA Sophia Antipolis - Mediterranee, 2004 route des Lucioles, Sophia Antipolis, France
+{silviu-tudor.serban,srinidhi.mukanahallipatna
+Keywords:
+Random sampling, Adaboost, Soft cascade, LBP channel features"
+0582d338a5e5b325c282e2ff13bfd62cf4d08108,Affordance Research in Developmental Robotics: A Survey,"Affordance Research in Developmental
+Robotics: A Survey
+Huaqing Min, Chang’an Yi, Ronghua Luo, Jinhui Zhu, and Sheng Bi
+apture"
+051aa14e0b7dd4231636db39398c0c15b2687682,Robust Subspace Clustering via Thresholding,"Robust Subspace Clustering via Thresholding
+Reinhard Heckel and Helmut B¨olcskei
+Dept. of IT & EE, ETH Zurich, Switzerland
+July 2013; last revised August 2015"
+054953d915f65b66485b653cd2ffbf61568b2849,Face Description with Local Invariant Features: Application to Face Recognition,"Face Description with Local Invariant Features: Application to Face Recognition
+{tag} {/tag}
+International Journal of Computer Applications
+© 2010 by IJCA Journal
+Number 24 - Article 12
+Year of Publication: 2010
+Authors:
+Sanjay A. Pardeshi
+Dr. S.N. Talbar
+10.5120/555-726"
+9d58e8ab656772d2c8a99a9fb876d5611fe2fe20,Beyond Temporal Pooling: Recurrence and Temporal Convolutions for Gesture Recognition in Video,"Beyond Temporal Pooling: Recurrence and Temporal
+Convolutions for Gesture Recognition in Video
+Lionel Pigou, A¨aron van den Oord∗ , Sander Dieleman∗ ,
+{lionel.pigou,aaron.vandenoord,sander.dieleman,
+Mieke Van Herreweghe & Joni Dambre
+mieke.vanherreweghe,
+Ghent University
+February 11, 2016"
+9d42df42132c3d76e3447ea61e900d3a6271f5fe,AutoCAP: An Automatic Caption Generation System based on the Text Knowledge Power Series Representation Model,"International Journal of Computer Applications (0975 – 8887)
+Advanced Computing and Communication Techniques for High Performance Applications (ICACCTHPA-2014)
+AutoCAP: An Automatic Caption Generation System
+ased on the Text Knowledge Power Series
+Representation Model
+Krishnapriya P S
+M.Tech Dept of CSE
+NSS College of Engineering
+Palakkad, Kerala"
+9d4c05c7c9284c8e303641b95e997f11df2dd1a7,Misalignment-robust Face Recognition via Efficient Locality-constrained Representation,"Misalignment-robust Face Recognition via Effi-
+ient Locality-constrained Representation
+Yandong Wen, Weiyang Liu, Meng Yang, Member, IEEE, Yuli Fu, Zhifeng Li, Senior Member, IEEE"
+9d8fd639a7aeab0dd1bc6eef9d11540199fd6fe2,L Earning to C Luster,"Workshop track - ICLR 2018
+LEARNING TO CLUSTER
+Benjamin B. Meier, Thilo Stadelmann & Oliver D¨urr
+ZHAW Datalab, Zurich University of Applied Sciences
+Winterthur, Switzerland"
+9d2ad0b408bddc9c5a713e250b52aa48f1786a46,Visual Recognition Using Local Quantized Patterns,"Visual Recognition using Local Quantized Patterns
+Sibt Ul Hussain, Bill Triggs
+To cite this version:
+Sibt Ul Hussain, Bill Triggs. Visual Recognition using Local Quantized Patterns. Andrew Fitzgibbon,
+Svetlana Lazebnik, Pietro Perona, Yoichi Sato, and Cordelia Schmid. ECCV 2012 - 12th European
+Conference on Computer Vision, Oct 2012, Florence, Italy. Springer, 7573, pp.716-729, 2012, Lecture
+Notes in Computer Science. <10.1007/978-3-642-33709-3_51>. <hal-00695627>
+HAL Id: hal-00695627
+https://hal.archives-ouvertes.fr/hal-00695627
+Submitted on 9 May 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+9d357bbf014289fb5f64183c32aa64dc0bd9f454,Face Identification by Fitting a 3D Morphable Model Using Linear Shape and Texture Error Functions,"Face Identification by Fitting a 3D Morphable Model
+using Linear Shape and Texture Error Functions
+Sami Romdhani, Volker Blanz, and Thomas Vetter
+University of Freiburg, Instit¨ut f¨ur Informatik,
+Georges-K¨ohler-Allee 52, 79110 Freiburg, Germany,
+fromdhani, volker,"
+9d0bf3b351fb4d80cee5168af8367c5f6c8b2f3a,"The Tromso Infant Faces Database (TIF): Development, Validation and Application to Assess Parenting Experience on Clarity and Intensity Ratings","METHODS
+published: 24 March 2017
+doi: 10.3389/fpsyg.2017.00409
+The Tromso Infant Faces Database
+(TIF): Development, Validation and
+Application to Assess Parenting
+Experience on Clarity and Intensity
+Ratings
+Jana K. Maack†, Agnes Bohne†, Dag Nordahl, Lina Livsdatter, Åsne A. W. Lindahl,
+Morten Øvervoll, Catharina E. A. Wang and Gerit Pfuhl*
+Department of Psychology, UiT – The Arctic University of Norway, Tromsø, Norway
+Newborns and infants are highly depending on successfully communicating their needs;
+e.g., through crying and facial expressions. Although there is a growing interest in
+the mechanisms of and possible influences on the recognition of facial expressions in
+infants, heretofore there exists no validated database of emotional infant faces. In the
+present article we introduce a standardized and freely available face database containing
+Caucasian infant face images from 18 infants 4 to 12 months old. The development
+nd validation of the Tromsø Infant Faces (TIF) database is presented in Study 1. Over
+700 adults categorized the photographs by seven emotion categories (happy, sad,
+disgusted, angry, afraid, surprised, neutral) and rated intensity, clarity and their valance."
+9d6a2180a5f452356526edd8b4833180fa09cb3f,Photo Aesthetics Analysis via DCNN Feature Encoding,"Photo Aesthetics Analysis
+via DCNN Feature Encoding
+Hui-Jin Lee, Ki-Sang Hong, Henry Kang, and Seungyong Lee"
+9d67af2158807aa815b5a4485b076f7a18ce6ab4,Model Adaptation with Synthetic and Real Data for Semantic Dense Foggy Scene Understanding,"Model Adaptation with Synthetic and Real Data
+for Semantic Dense Foggy Scene Understanding
+Christos Sakaridis1(
+ETH Z¨urich, Z¨urich, Switzerland
+KU Leuven, Leuven, Belgium"
+9df7ea3eed6b0c9c067521119698cfa79cc1f91d,Representations and Matching Techniques for 3D Free-form Object and Face Recognition,"Representations and Matching
+Techniques for 3D Free-form Object and
+Face Recognition
+Ajmal Saeed Mian
+This thesis is presented for the degree of
+Doctor of Philosophy
+of The University of Western Australia
+School of Computer Science and Software Engineering.
+March 2006"
+9dc263210770e7e836040c8e9d0edff40814254b,A track before detect approach for sequential Bayesian tracking of multiple speech sources,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+9da9ee38d5845d39497b10b0ab442580e75ee4d3,Dynamic Video Segmentation Network,"Dynamic Video Segmentation Network
+Yu-Syuan Xu, Tsu-Jui Fu∗, Hsuan-Kung Yang∗, Student Member, IEEE and Chun-Yi Lee, Member, IEEE
+Elsa Lab, Department of Computer Science, National Tsing Hua Uiversity
+{yusean0118, rayfu1996ozig,"
+9d8978ee319d671283a90761aaed150c7cc9154b,Fader Networks: Manipulating Images by Sliding Attributes,"Fader Networks:
+Manipulating Images by Sliding Attributes
+Guillaume Lample1,2, Neil Zeghidour1,3, Nicolas Usunier1,
+Antoine Bordes1, Ludovic Denoyer2, Marc’Aurelio Ranzato1"
+9d839dfc9b6a274e7c193039dfa7166d3c07040b,Augmented faces,"Augmented Faces
+Matthias Dantone1
+Lukas Bossard1
+Till Quack1,2
+Luc van Gool1,3
+ETH Z¨urich
+Kooaba AG
+K.U. Leuven"
+9d1940f843c448cc378214ff6bad3c1279b1911a,Shape-aware Instance Segmentation,"Shape-aware Instance Segmentation
+Zeeshan Hayder1,2, Xuming He2,1
+Australian National University & 2Data61/CSIRO ∗
+Mathieu Salzmann2,3
+CVLab, EPFL, Switzerland"
+9da2abae3072fd9fcff0e13b8f00fc21f22d0085,NOKMeans: Non-Orthogonal K-means Hashing,"NOKMeans: Non-Orthogonal K-means Hashing
+Xiping Fu, Brendan McCane, Steven Mills, and Michael Albert
+Dep. of Computer Science, University of Otago, Dunedin, NZ"
+9d3ac3d29164c2665c371a3c71de75bea753eb47,Skeleton-Aided Articulated Motion Generation,"Skeleton-aided Articulated Motion Generation
+Yichao Yan, Jingwei Xu, Bingbing Ni, Xiaokang Yang"
+9d35d4fba9217404a7aab84a7d09e53c324710be,Biometrics Project: Bayesian Face Recognition,"Biometrics Project: Bayesian Face Recognition
+Jinwei Gu
+Computer Science Department"
+9d36c81b27e67c515df661913a54a797cd1260bb,3d Face Recognition Techniques - a Review,"Preeti.B.Sharma, Mahesh M. Goyani / International Journal of Engineering Research and
+Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+Vol. 2, Issue 1,Jan-Feb 2012, pp.787-793
+3D FACE RECOGNITION TECHNIQUES - A REVIEW
+Preeti B. Sharma*, Mahesh M. Goyani**
+*(Department of Information Technology, Gujarat Technological University, India)
+**( Department of Computer Engineering, Gujarat Technological University, India)
+security at many places"
+9d743bbef448e7c145aeb11e55cc05fdbafe9d6d,Person tracking and gesture recognition in challenging visibility conditions using 3D thermal sensing,"Person Tracking and Gesture Recognition
+in Challenging Visibility Conditions
+Using 3D Thermal Sensing
+Ariel Kapusta and Patrick Beeson
+IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)
+August, 30, 2016"
+9dc70aa3d51a9403e1894a7fa535ace99b527861,3 Bayesian Tracking by Online Co-Training and Sequential Evolutionary Importance Resampling,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,700
+08,500
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact"
+9d757c0fede931b1c6ac344f67767533043cba14,Search Based Face Annotation Using PCA and Unsupervised Label Refinement Algorithms,"Search Based Face Annotation Using PCA and
+Unsupervised Label Refinement Algorithms
+Shital Shinde1, Archana Chaugule2
+Computer Department, Savitribai Phule Pune University
+D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18
+Mahatma Phulenagar, 120/2 Mahaganpati soc, Chinchwad, Pune-19, MH, India
+D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18
+Computer Department, D.Y.PIET, Pimpri, Pune-18, MH, India
+presents"
+9d1e32f6af50354b64ca8f004746073473559056,A visual surveillance system for person re-identification,"International Conference on Quality Control by Artificial Vision 2017, edited by Hajime Nagahara,Kazunori Umeda, Atsushi Yamashita, Proc. of SPIE Vol. 10338, 103380D · © 2017 SPIECCC code: 0277-786X/17/$18 · doi: 10.1117/12.2266509Proc. of SPIE Vol. 10338 103380D-1"
+9d5db7427b44d83bf036ff4cff382c23c6c7b6d8,Video redaction: a survey and comparison of enabling technologies,"Downloaded From: https://biomedicaloptics.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 10/14/2018
+Terms of Use: https://biomedicaloptics.spiedigitallibrary.org/terms-of-use
+Videoredaction:asurveyandcomparisonofenablingtechnologiesShaganSahAmeyaShringiRaymondPtuchaAaronBurryRobertLoceShaganSah,AmeyaShringi,RaymondPtucha,AaronBurry,RobertLoce,“Videoredaction:asurveyandcomparisonofenablingtechnologies,”J.Electron.Imaging26(5),051406(2017),doi:10.1117/1.JEI.26.5.051406."
+9d60ad72bde7b62be3be0c30c09b7d03f9710c5f,A Survey: Face Recognition Techniques,"A Survey: Face Recognition Techniques
+Arun Agrawal
+Assistant Professor, ITM GOI
+Ranjana Sikarwar
+M Tech, ITM GOI
+video
+(Eigen
+passport-verification,"
+9d138bc60593c2770d968ba56172332773e02fa5,GPLAC: Generalizing Vision-Based Robotic Skills Using Weakly Labeled Images,
+9d24179aa33a94c8c61f314203bf9e906d6b64de,Searching for People through Textual and Visual Attributes,"Searching for People through
+Textual and Visual Attributes
+Junior Fabian, Ramon Pires, Anderson Rocha
+Institute of Computing
+University of Campinas (Unicamp)
+Campinas-SP, Brazil
+Fig. 1. The proposed approach aims at searching for people using textual and visual attributes. Given an image database of faces, we extract the points of
+interest (PoIs) to construct a visual dictionary that allow us to obtain the feature vectors by a quantization process (top). Then we train attribute classifiers to
+generate a score for each image (middle). Finally, given a textual query (e.g., male), we fusion obtained scores to return a unique final rank (bottom)."
+9d9166e1d9e80bbe772423384af53a3d5da898ae,Object Geolocation Using MRF Based Multi-Sensor Fusion,"OBJECT GEOLOCATION USING MRF BASED MULTI-SENSOR FUSION
+Vladimir A. Krylov and Rozenn Dahyot
+ADAPT Centre, School of Computer Science and Statistics, Trinity College Dublin, Dublin, Ireland"
+9d518344d5c7d889f9c90c6193be4757fa584770,3 D registration based on a multi-references local parametrisation : Application to 3 D faces,"D registration based on a multi-references local parametrisation:
+Application to 3D faces
+Wieme Gadacha1, Faouzi Ghorbel1
+CRISTAL laboratory, GRIFT research group
+National School of Computer Sciences (NSCS), La Manouba 2010, Tunisia"
+9da2b79c6942852e8076cdaa4d4c93eb1ae363f1,Constraint-Based Visual Generation,"Constraint-Based Visual Generation
+Giuseppe Marra
+Francesco Giannini
+Marco Gori
+Michelangelo Diligenti
+Department of Information Engineering and Mathematical Sciences
+http://sailab.diism.unisi.it/
+October 9, 2018"
+9cabbb686883635d8755706ee4f1349d812d7ccb,Detection and Tracking of General Movable Objects in Large 3D Maps,"Detection and Tracking of General
+Movable Objects in Large 3D Maps
+Nils Bore, Johan Ekekrantz, Patric Jensfelt and John Folkesson
+Robotics, Perception and Learning Lab
+Royal Institute of Technology (KTH)
+Stockholm, SE-100 44, Sweden
+Email: {nbore, ekz, patric,"
+9cb152758ee57f2abcc0b59348752e528a2ed2f7,Full Video Processing for Mobile Audio-Visual Identity Verification,
+9cdb83ed96f5aa74bc4e2e9edacfbb5263e8fc37,Learning Mutual Visibility Relationship for Pedestrian Detection with a Deep Model,"Manuscript
+Click here to download Manuscript: Mutual-DBN-J2.pdf
+Click here to view linked References
+Noname manuscript No.
+(will be inserted by the editor)
+Learning Mutual Visibility Relationship for Pedestrian Detection with a
+Deep Model
+Wanli Ouyang · Xingyu Zeng · Xiaogang Wang
+Received: date / Accepted: date"
+9c1305383ce2c108421e9f5e75f092eaa4a5aa3c,Speaker Retrieval for Tv Show Videos by Associating Audio Speaker Recognition Result to Visual Faces∗,"SPEAKER RETRIEVAL FOR TV SHOW VIDEOS BY ASSOCIATING AUDIO SPEAKER
+RECOGNITION RESULT TO VISUAL FACES∗
+Yina Han*’, Joseph Razik’, Gerard Chollet’, and Guizhong Liu*
+*School of Electrical and Information Engineering, Xi’an Jiaotong University, Xi’an, China
+’CNRS-LTCI, TELECOM-ParisTech, Paris, France"
+9cd7487e0eed11dabc94dd867178204c53eb2270,Self-Organizing Traffic Lights : A Pedestrian Oriented Approach,"Self-Organizing Traffic Lights: A Pedestrian
+Oriented Approach
+Jessica S. Souza1, Cesar A. M. Ferreira2, Cassio E. dos Santos Jr3, Victor H. C. Melo4, William Robson Schwartz4
+Computer Science Department, Federal University of Minas Gerais, Belo Horizonte, Brazil
+the vehicular and pedestrian traffic. One of"
+9ca82f5936723a773fb44336cd66c315f2024d34,Latent-Class Hough Forests for 3D Object Detection and Pose Estimation,"Latent-Class Hough Forests for 3D Object Detection
+nd Pose Estimation
+Alykhan Tejani, Danhang Tang, Rigas Kouskouridas, and Tae-Kyun Kim
+Imperial Collge London"
+9c1860de6d6e991a45325c997bf9651c8a9d716f,3D reconstruction and face recognition using kernel-based ICA and neural networks,"D Reconstruction and Face Recognition Using Kernel-Based
+ICA and Neural Networks
+Cheng-Jian Lin Ya-Tzu Huang
+Chi-Yung Lee
+Dept. of Electrical Dept. of CSIE Dept. of CSIE
+Engineering Chaoyang University Nankai Institute of
+National University of Technology Technology
+of Kaohsiung"
+9c341221e19fac7a5e38b9fe5c62361f780a7f08,Productivity Effects of Information Diffusion in Networks Paper 234,"A research and education initiative at the MIT
+Sloan School of Management
+Productivity Effects of Information
+Diffusion in Networks
+Paper 234
+July 2007
+Sinan Aral
+Erik Brynjolfsson
+Marshall Van Alstyne
+For more information,
+please visit our website at http://digital.mit.edu
+or contact the Center directly at
+or 617-253-7054"
+9c2f3e9c223153b70f37ee84224d67b5a577bd58,Towards unlocking web video: Automatic people tracking and clustering,"Towards Unlocking Web Video: Automatic People Tracking and Clustering
+Alex Holub*, Pierre Moreels*, Atiq Islam*, Andrei Makhanov*, Rui Yang*
+Ooyala Inc, 800 W. El Camino Real, Suite 350, Mountain View, CA 94040
+*All authors contributed equally to this work"
+9cc4abd2ec10e5fa94ff846c5ee27377caf17cf0,Improved Techniques for GAN based Facial Inpainting,"Improved Techniques for GAN based Facial
+Inpainting
+Avisek Lahiri*, Arnav Jain*, Divyasri Nadendla and Prabir Kumar Biswas, Senior Member, IEEE"
+9cf69de9e06e39f7f7ce643b3327bf69be8b9678,SHREC ’ 18 track : Recognition of geometric patterns over 3 D models,"SHREC’18 track: Recognition of geometric patterns
+over 3D models
+S Biasotti, E. Moscoso Thompson, L Bathe, S Berretti, A. Giachetti, T
+Lejemble, N Mellado, K Moustakas, Iason Manolas, Dimitrios Dimou, et al.
+To cite this version:
+S Biasotti, E. Moscoso Thompson, L Bathe, S Berretti, A. Giachetti, et al.. SHREC’18 track: Recog-
+nition of geometric patterns over 3D models. Eurographics Workshop on 3D Object Retrieval, 2018.
+<hal-01774423>
+https://hal-mines-paristech.archives-ouvertes.fr/hal-01774423
+HAL Id: hal-01774423
+Submitted on 30 Apr 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+9c576520ed9c960270715f790a62b9337ce88bd2,Beyond Pixels: Leveraging Geometry and Shape Cues for Online Multi-Object Tracking,"Beyond Pixels: Leveraging Geometry and Shape Cues for Online
+Multi-Object Tracking
+Sarthak Sharma1∗, Junaid Ahmed Ansari1∗, J. Krishna Murthy2, K. Mahdava Krishna1
+Robotics Research Center, KCIS, IIIT Hyderabad, India
+Mila, Universite de Montreal, Canada
+denotes equal contribution
+Fig. 1. An illustration of the proposed method. The first two rows show objects tracks in frames t and t + 1. The bottom row depicts how 3D position
+nd orientation information is propagated from frame t to frame t + 1. This information is used to specify search areas for each object in the subsequent
+frame, and this greatly reduces the number of pairwise costs that are to be computed."
+9ca7899338129f4ba6744f801e722d53a44e4622,Deep neural networks regularization for structured output prediction,"Deep Neural Networks Regularization for Structured
+Output Prediction
+Soufiane Belharbi∗
+INSA Rouen, LITIS
+76000 Rouen, France
+Clément Chatelain
+INSA Rouen, LITIS
+76000 Rouen, France
+Romain Hérault
+INSA Rouen, LITIS
+76000 Rouen, France
+Sébastien Adam
+INSA Rouen, LITIS
+76000 Rouen, France
+Normandie Univ, UNIROUEN, UNIHAVRE,
+Normandie Univ, UNIROUEN, UNIHAVRE,
+Normandie Univ, UNIROUEN, UNIHAVRE,
+Normandie Univ, UNIROUEN, UNIHAVRE,"
+9c3b9dee9da817134325357afbebbd1a0d67cab2,Deep Learning for Saliency Prediction in Natural Video,"Deep Learning for Saliency Prediction in Natural Video
+Souad CHAABOUNIa,b, Jenny BENOIS-PINEAUa, Ofer HADARc, Chokri
+BEN AMARb
+Universit´e de Bordeaux, Laboratoire Bordelais de Recherche en Informatique, Bˆatiment
+Sfax university, Research Groups in Intelligent Machines, National Engineering School of
+A30, F-33405 Talence cedex, France
+Communication Systems Engineering department, Ben Gurion University of the Nagev
+Sfax (ENIS), Tunisia"
+9c731b820c495904a6f7d255d7e6a3bf9e5fc365,Geometric inpainting of 3D structures,"Geometric inpainting of 3D structures
+Pratyush Sahay, A.N. Rajagopalan
+Indian Institute of Technology Madras
+Chennai, India"
+9c889616034adce2af05d74eac44cf43a8106468,Binary Quadratic Programing for Online Tracking of Hundreds of People in Extremely Crowded Scenes,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Binary Quadratic Programing for Online Tracking
+of Hundreds of People in Extremely Crowded
+Scenes
+Afshin Dehghan, Member, IEEE, and Mubarak Shah, Fellow, IEEE"
+9cf6d66a0b4e5a3347466a60caea411d67c4b5b7,Joint transfer component analysis and metric learning for person re-identification,"Joint transfer component analysis and
+metric learning for person re-identification
+Yixiu Liu, Yunzhou Zhang✉, Sonya Coleman and
+Jianning Chi
+nd efficient metric
+A novel
+learning strategy for person
+re-identification is proposed. Person re-identification is formulated as
+multi-domain learning problem. The assumption that the feature dis-
+tributions from different camera views are the same is overthrown in
+this Letter. ID-based transfer component analysis (IDB-TCA) is pro-
+posed to learn a shared subspace, in which the differences in the
+feature distribution between source domain and target domain are sig-
+nificantly reduced. Experimental evaluation on the CUHK01 dataset
+demonstrates that metric learning with IDB-TCA embedded outper-
+forms state-of-art metric methods for person re-identification.
+Introduction: Person re-identification, aiming to finding the images that
+match the target person in a large-scale image library, greatly reduces the
+time cost of human search. Due to its great significance to visual super-
+vision, it has rapidly become a research hotspot in the field of computer"
+9c93512df188d7dbab63ebe47586a930559e6279,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+9cd8e1ccc5a410c7f31c7e404588597c0bb1952b,Whats Your Type ? Personalized Prediction of Facial Attractiveness,"Whats Your Type? Personalized Prediction of
+Facial Attractiveness
+Sam Crognale, Computer Science, Danish Shabbir Electrical Engineering
+INTRODUCTION
+Attempts to obtain a universal model of facial beauty by
+the way of symmetry, golden ratios, and measured
+placement of various facial features fall short in explaining
+the varied attraction that is actually witnessed in the world.
+In this investigation, we devise an application to give a user
+some insight about their ‘type’ as users swipe yes or no on a
+large dataset of images
+There is a wealth of interesting literature attempting to
+map the psychophysics of attraction. For example, Johnston
+nd Franklin (1993) use a genetic algorithm which evolves a
+“most beautiful” female face according to interactive user
+selections. They sought to mimic the way humans filter for
+features they find the most attractive.
+Our approach builds on Kagian et. al (2007), where it was
+shown that feature selection and training procedure with the
+original geometric features instead of the eigenfeatures fails"
+9c1664f69d0d832e05759e8f2f001774fad354d6,Action Representations in Robotics: A Taxonomy and Systematic Classification,"Action representations in robotics: A
+taxonomy and systematic classification
+Journal Title
+XX(X):1–32
+(cid:13)The Author(s) 2016
+Reprints and permission:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/ToBeAssigned
+www.sagepub.com/
+Philipp Zech, Erwan Renaudo, Simon Haller, Xiang Zhang and Justus Piater"
+9caa7f125d3e861450bc3685699fceeaebea04d8,Designing Video Surveillance Systems as Services,"Designing Video Surveillance Systems as
+Services
+R. Cucchiara and A. Prati and R. Vezzani"
+9c2039d036c01e421176d33c1436633d03be4678,Review of person re-identification techniques,"Received on 21st February 2013
+Revised on 14th November 2013
+Accepted on 18th December 2013
+doi: 10.1049/iet-cvi.2013.0180
+www.ietdl.org
+ISSN 1751-9632
+Review of person re-identification techniques
+Mohammad Ali Saghafi1, Aini Hussain1, Halimah Badioze Zaman2,
+Mohamad Hanif Md. Saad1
+Faculty of Engineering and Built Environment, Universiti Kebangsaan Malaysia (UKM), Bangi, Malaysia
+Institute of Visual Informatics, Universiti Kebangsaan Malaysia (UKM), Bangi, Malaysia
+E-mail:"
+9c07704226e536834c4a8c01e1eb428584bacec6,Benchmarking Single-Image Dehazing and Beyond,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Benchmarking Single Image Dehazing and Beyond
+Boyi Li*, Wenqi Ren*, Member, IEEE, Dengpan Fu*, Dacheng Tao, Fellow, IEEE, Dan Feng, Member, IEEE,
+Wenjun Zeng, Fellow, IEEE and Zhangyang Wang†, Member, IEEE."
+9cc3172efb42d2f9fa1b9ae7b7eef9cc349cdef9,Imbalanced Deep Learning by Minority Class Incremental Rectification,"Imbalanced Deep Learning by Minority Class
+Incremental Rectification
+Qi Dong, Shaogang Gong, and Xiatian Zhu"
+9c59304a619b7d503be95bd560f90be976a5309a,DenseASPP for Semantic Segmentation in Street Scenes,"DenseASPP for Semantic Segmentation in Street Scenes
+Maoke Yang
+Kun Yu
+Chi Zhang
+DeepMotion
+Zhiwei Li
+Kuiyuan Yang
+{maokeyang, kunyu, chizhang, zhiweili,"
+9cd3ea5cbbe0716fe19ff750940222cdedb22fc8,Learning to Attend On Essential Terms: An Enhanced Retriever-Reader Model for Scientific Question Answering,"Learning to Attend On Essential Terms: An Enhanced Retriever-Reader
+Model for Scientific Question Answering
+Jianmo Ni1,2∗, Chenguang Zhu1, Weizhu Chen1, Julian McAuley2
+Microsoft Business Applications Group AI Research
+Department of Computer Science, UC San Diego"
+9c065dfb26ce280610a492c887b7f6beccf27319,Learning from Video and Text via Large-Scale Discriminative Clustering,"Learning from Video and Text via Large-Scale Discriminative Clustering
+Antoine Miech1,2
+Jean-Baptiste Alayrac1,2
+Piotr Bojanowski2
+Ivan Laptev 1,2
+Josef Sivic1,2,3
+´Ecole Normale Sup´erieure
+Inria
+CIIRC"
+9c781f7fd5d8168ddae1ce5bb4a77e3ca12b40b6,Attribute Based Face Classification Using Support Vector Machine,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+Volume: 03 Issue: 07 | July-2016 www.irjet.net p-ISSN: 2395-0072
+Attribute Based Face Classification Using Support Vector Machine
+Brindha.M1, Amsaveni.R2
+Research Scholar, Dept. of Computer Science, PSGR Krishnammal College for Women, Coimbatore
+Assistant Professor, Dept. of Information Technology, PSGR Krishnammal College for Women, Coimbatore."
+9c8da385750db215dc0728dc310251b320d319af,Deep embodiment: grounding semantics in perceptual modalities,"Technical Report
+UCAM-CL-TR-899
+ISSN 1476-2986
+Number 899
+Computer Laboratory
+Deep embodiment:
+grounding semantics
+in perceptual modalities
+Douwe Kiela
+February 2017
+5 JJ Thomson Avenue
+Cambridge CB3 0FD
+United Kingdom
+phone +44 1223 763500
+http://www.cl.cam.ac.uk/"
+9c8a2d66b8fd6973751b8ee2fe6738327968cfcb,Exploring a model of far-from-equilibrium computation,"Exploring a model of far-from-equilibrium
+omputation
+R˘azvan V. Florian
+Center for Cognitive and Neural Studies (Coneural)
+Str. Saturn 24, 400504 Cluj-Napoca, Romania
+July 10, 2005"
+9c49e4ba8ad0ba4634fe9306fb612695ed2b8cae,Satellite Imagery Feature Detection using Deep Convolutional Neural Network: A Kaggle Competition,"Satellite Imagery Feature Detection using
+Deep Convolutional Neural Network: A Kaggle Competition
+Vladimir Iglovikov
+True Accord
+Sergey Mushinskiy
+Open Data Science
+Vladimir Osin
+AeroState"
+9ce0d64125fbaf625c466d86221505ad2aced7b1,Recognizing expressions of children in real life scenarios View project PhD ( Doctor of Philosophy ) View project,"Saliency Based Framework for Facial Expression
+Recognition
+Rizwan Ahmed Khan, Alexandre Meyer, Hubert Konik, Saïda Bouakaz
+To cite this version:
+Rizwan Ahmed Khan, Alexandre Meyer, Hubert Konik, Saïda Bouakaz. Saliency Based Framework for
+Facial Expression Recognition. Frontiers of Computer Science, 2017, <10.1007/s11704-017-6114-9>.
+<hal-01546192>
+HAL Id: hal-01546192
+https://hal.archives-ouvertes.fr/hal-01546192
+Submitted on 23 Jun 2017
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+9c6d92f3d796242332ebf419a4f9b584864cfa15,Genetic Model Optimization for Hausdorff Distance-Based Face Localization,"(cid:176) In Proc. International ECCV 2002 Workshop on Biometric Authentication,
+Springer, Lecture Notes in Computer Science, LNCS-2359, pp. 103{111,
+Copenhagen, Denmark, June 2002.
+Genetic Model Optimization
+for Hausdorfi Distance-Based Face Localization
+Klaus J. Kirchberg, Oliver Jesorsky, and Robert W. Frischholz
+BioID AG, Germany
+WWW home page: http://www.bioid.com"
+9ca2dfe8a6265c4f6ea12bae0e7ff6ffc9128226,Dialog-based Interactive Image Retrieval,"Dialog-based Interactive Image Retrieval
+Xiaoxiao Guo†
+IBM Research AI
+Hui Wu†
+IBM Research AI
+Steven Rennie
+Fusemachines Inc.
+Gerald Tesauro
+IBM Research AI"
+9cf07922cf91c4aea66c8d72606ca444f4607cc6,Distinct neural activation patterns underlie economic decisions in high and low psychopathy scorers.,"doi:10.1093/scan/nst093
+SCAN (2014) 9,1099^1107
+Distinct neural activation patterns underlie economic
+decisions in high and low psychopathy scorers
+Joana B. Vieira,1,2,3 Pedro R. Almeida,1,4 Fernando Ferreira-Santos,1 Fernando Barbosa,1 Joa˜o Marques-Teixeira,1
+nd Abigail A. Marsh3
+Laboratory of Neuropsychophysiology, Faculty of Psychology and Educational Sciences, 2Faculty of Medicine, University of Porto, 4200-135
+Porto, Portugal, 3Department of Psychology, Georgetown University, Washington, DC 20057, USA, and 4School of Criminology, Faculty of Law,
+University of Porto, 4200-135 Porto, Portugal
+Psychopathic traits affect social functioning and the ability to make adaptive decisions in social interactions. This study investigated how psychopathy
+ffects the neural mechanisms that are recruited to make decisions in the ultimatum game. Thirty-five adult participants recruited from the community
+underwent functional magnetic resonance imaging scanning while they performed the ultimatum game under high and low cognitive load. Across load
+onditions, high psychopathy scorers rejected unfair offers in the same proportion as low scorers, but perceived them as less unfair. Among low
+scorers, the perceived fairness of offers predicted acceptance rates, whereas in high scorers no association was found. Imaging results revealed
+that responses in each group were associated with distinct patterns of brain activation, indicating divergent decision mechanisms. Acceptance of
+unfair offers was associated with dorsolateral prefrontal cortex activity in low scorers and ventromedial prefrontal cortex activity in high scorers. Overall,
+our findings point to distinct motivations for rejecting unfair offers in individuals who vary in psychopathic traits, with rejections in high psychopathy
+scorers being probably induced by frustration. Implications of these results for models of ventromedial prefrontal cortex dysfunction in psychopathy
+re discussed.
+Keywords: psychopathy; functional magnetic resonance imaging; ultimatum game; ventromedial prefrontal cortex"
+022edc074693c52d4e689947bd2def8b2117fa8b,A super-resolution method for low-quality face image through RBF-PLS regression and neighbor embedding,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+022d74ae2f8680e780b18e0cbb041d5c5a57c7a5,Video Salient Object Detection via Fully Convolutional Networks,"Video Salient Object Detection via
+Fully Convolutional Networks
+Wenguan Wang, Jianbing Shen, Senior Member, IEEE, and Ling Shao, Senior Member, IEEE"
+02601d184d79742c7cd0c0ed80e846d95def052e,Graphical Representation for Heterogeneous Face Recognition,"Graphical Representation for Heterogeneous
+Face Recognition
+Chunlei Peng, Xinbo Gao, Senior Member, IEEE, Nannan Wang, Member, IEEE, and Jie Li"
+02fbf86b975c3f45b04de8288d1565cce8b53f62,A real-time pedestrian detection system based on structure and appearance classification,"Anchorage Convention District
+May 3-8, 2010, Anchorage, Alaska, USA
+978-1-4244-5040-4/10/$26.00 ©2010 IEEE"
+02e43d9ca736802d72824892c864e8cfde13718e,Transferring a semantic representation for person re-identification and search,"Transferring a Semantic Representation for Person Re-Identification and
+Search
+Shi, Z; Yang, Y; Hospedales, T; XIANG, T; IEEE Conference on Computer Vision and
+Pattern Recognition
+© 2015 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing
+this material for advertising or promotional purposes, creating new collective works, for resale
+or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+other works.
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/10075
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+02fda07735bdf84554c193811ba4267c24fe2e4a,Illumination Invariant Face Recognition Using Near-Infrared Images,"Illumination Invariant Face Recognition
+Using Near-Infrared Images
+Stan Z. Li, Senior Member, IEEE, RuFeng Chu, ShengCai Liao, and Lun Zhang"
+02ccd5f0eb9a48a6af088197b950fb30a8e3abcc,Scaling for Multimodal 3D Object Detection,"Scaling for Multimodal 3D Object Detection
+Andrej Karpathy
+Stanford"
+02a99a43670ab83e77de9d935eb8d3d164e1972c,Joint Segmentation and Pose Tracking of Human in Natural Videos,"Joint Segmentation and Pose Tracking of Human in Natural Videos∗
+Taegyu Lim1,2
+Seunghoon Hong2
+Bohyung Han2
+Joon Hee Han2
+DMC R&D Center, Samsung Electronics, Korea
+Department of Computer Science and Engineering, POSTECH, Korea"
+0241513eeb4320d7848364e9a7ef134a69cbfd55,Supervised translation-invariant sparse coding,"Supervised Translation-Invariant Sparse
+Coding
+¹Jianchao Yang, ²Kai Yu, and ¹Thomas Huang
+¹University of Illinois at Urbana Champaign
+²NEC Laboratories America at Cupertino"
+02dd0af998c3473d85bdd1f77254ebd71e6158c6,PPP: Joint Pointwise and Pairwise Image Label Prediction,"PPP: Joint Pointwise and Pairwise Image Label Prediction
+Yilin Wang1 Suhang Wang1
+Jiliang Tang2 Huan Liu1 Baoxin Li1
+Department of Computer Science, Arizona State Univerity
+Yahoo Research"
+026ca771bd3995748b477e100ed4283a9bf8215a,Predicting performance of a face recognition system based on image quality,"Predicting Performance of a Face
+Recognition System Based on
+Image Quality
+Abhishek Dutta"
+023da8828f9c039c20ac9267a6b37813b74d4824,Free supervision from video games,"Free supervision from video games
+Philipp Kr¨ahenb¨uhl
+UT Austin"
+02086be014c4a276663e66ffde4d14f9c4cebe7e,BiggerPicture: data-driven image extrapolation using graph matching,"This is an Open Access document downloaded from ORCA, Cardiff University's institutional
+repository: http://orca.cf.ac.uk/67868/
+This is the author’s version of a work that was submitted to / accepted for publication.
+Citation for final published version:
+Wang, Miao, Lai, Yukun, Liang, Yuan, Martin, Ralph Robert and Hu, Shi-Min 2014. Biggerpicture:
+data-driven image extrapolation using graph matching. ACM Transactions on Graphics 33 (6) , 173.
+0.1145/2661229.2661278 file
+Publishers page: http://dx.doi.org/10.1145/2661229.2661278
+<http://dx.doi.org/10.1145/2661229.2661278>
+Changes made as a result of publishing processes such as copy-editing, formatting and page
+numbers may not be reflected in this version. For the definitive version of this publication, please
+refer to the published source. You are advised to consult the publisher’s version if you wish to cite
+Please note:
+this paper.
+This version is being made available in accordance with publisher policies. See
+http://orca.cf.ac.uk/policies.html for usage policies. Copyright and moral rights for publications
+made available in ORCA are retained by the copyright holders."
+02b0bf28f34c3c403abecd2fb4fb7d4969c0e0db,Learning Disentangled Joint Continuous and Discrete Representations,"Learning Disentangled Joint Continuous and Discrete
+Representations
+Schlumberger Software Technology Innovation Center
+Emilien Dupont
+Menlo Park, CA, USA"
+0252256fa23eceb54d9eea50c9fb5c775338d9ea,Application-driven Advances in Multi-biometric Fusion,"Application-driven Advances
+in Multi-biometric Fusion
+dem Fachbereich Informatik
+der Technischen Universität Darmstadt
+vorzulegende
+DISSERTATION
+zur Erlangung des akademischen Grades eines
+Doktor-Ingenieurs (Dr.-Ing.)
+M.Sc. Naser Damer
+geboren in Amman, Jordanien
+Referenten der Arbeit:
+Prof. Dr. Arjan Kuijper
+Technische Universität Darmstadt
+Prof. Dr. Dieter W. Fellner
+Technische Universität Darmstadt
+Prof. Dr. Raghavendra Ramachandra
+Norwegian University of Science and Technology
+Tag der Einreichung:
+Tag der mündlichen Prüfung:
+2/01/2018"
+020d97ca2bf617b7ffed5a31aa8a27ffa5efadbb,An Efficient and Flexible FPGA Implementation of a Face Detection System,"Fekih, H. B., Elhossini, A., & Juurlink, B.
+An Efficient and Flexible FPGA
+Implementation of a Face Detection
+System.
+Chapter in book |
+This version is available at https://doi.org/10.14279/depositonce-6778
+Accepted manuscript (Postprint)
+This is a post-peer-review, pre-copyedit version of an article published in Lecture Notes in Computer
+Science. The final authenticated version is available online at:
+http://dx.doi.org/10.1007/978-3-319-16214-0_20.
+Fekih, H. B., Elhossini, A., & Juurlink, B. (2015). An Efficient and Flexible FPGA Implementation of a Face
+Detection System. In Lecture Notes in Computer Science (pp. 243–254). Springer International
+Publishing. https://doi.org/10.1007/978-3-319-16214-0_20
+Terms of Use
+Copyright applies. A non-exclusive, non-transferable and
+limited right to use is granted. This document is intended
+solely for personal, non-commercial use."
+028dc6a134f1204bd9ae28213e2e6665e82ddcb0,Integral Normalized Gradient Image A Novel Illumination Insensitive Representation,"Integral Normalized Gradient Image
+A Novel Illumination Insensitive
+Representation
+Samsung Advanced Institute of Technology
+E-mail:"
+029317f260b3303c20dd58e8404a665c7c5e7339,Character Identification in Feature-Length Films Using Global Face-Name Matching,"Character Identification in Feature-Length Films
+Using Global Face-Name Matching
+Yi-Fan Zhang, Student Member, IEEE, Changsheng Xu, Senior Member, IEEE, Hanqing Lu, Senior Member, IEEE,
+nd Yeh-Min Huang, Member, IEEE"
+0273414ba7d56ab9ff894959b9d46e4b2fef7fd0,Photographic home styles in Congress: a computer vision approach,"Photographic home styles in Congress: a
+omputer vision approach∗
+L. Jason Anastasopoulos†.
+Dhruvil Badani‡
+Crystal Lee§
+Shiry Ginosar¶
+Jake Williams(cid:107)
+December 1, 2016"
+02aff7faf2f6b775844809805424417eed30f440,"A Tale of Three Probabilistic Families: Discriminative, Descriptive and Generative Models","QUARTERLY OF APPLIED MATHEMATICS
+VOLUME , NUMBER 0
+XXXX XXXX, PAGES 000–000
+A TALE OF THREE PROBABILISTIC FAMILIES: DISCRIMINATIVE,
+DESCRIPTIVE AND GENERATIVE MODELS
+YING NIAN WU (Department of Statistics, University of California, Los Angeles),
+RUIQI GAO (Department of Statistics, University of California, Los Angeles),
+TIAN HAN (Department of Statistics, University of California, Los Angeles),
+SONG-CHUN ZHU (Department of Statistics, University of California, Los Angeles)"
+02e133aacde6d0977bca01ffe971c79097097b7f,Convolutional Neural Fabrics,
+02567fd428a675ca91a0c6786f47f3e35881bcbd,Deep Label Distribution Learning With Label Ambiguity,"ACCEPTED BY IEEE TIP
+Deep Label Distribution Learning
+With Label Ambiguity
+Bin-Bin Gao, Chao Xing, Chen-Wei Xie, Jianxin Wu, Member, IEEE, and Xin Geng, Member, IEEE"
+0296fc4d042ca8657a7d9dd02df7eb7c0a0017ad,Subspace Learning from Image Gradient Orientations,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Subspace Learning from Image Gradient
+Orientations
+Georgios Tzimiropoulos, Member, IEEE, Stefanos Zafeiriou Member, IEEE, and Maja Pantic Fellow, IEEE"
+02bee2cef6b04e6b57cfa3fd54cabc756f0c2e8d,Data-driven methods for interactive visual content creation and manipulation,"Data-driven Methods for
+Interactive Visual Content Creation
+nd Manipulation
+Dissertation zur Erlangung des Grades des
+Doktors der Ingenieurwissenschaften der
+Naturwissenschaftlich-Technischen Fakultäten der
+Universität des Saarlandes
+Vorgelegt durch
+Arjun Jain
+Max-Planck-Institut Informatik
+Campus E1 4
+66123 Saarbrücken
+Germany
+m 4. February 2013 in Saarbrücken"
+02e9f1bb203a5ade98308eaff4f6a5c96a2c11e0,Self-Supervised Relative Depth Learning for Urban Scene Understanding,"Self-Supervised Relative Depth Learning for
+Urban Scene Understanding
+Huaizu Jiang1,
+Erik Learned-Miller1
+Gustav Larsson2, Michael Maire3, Greg Shakhnarovich3
+UMass Amherst
+University of Chicago
+TTI-Chicago"
+02af5e40653b5a545b62aa6aebfaca6557f4173d,Sensor fusion for human safety in industrial workcells,"Sensor Fusion for Human Safety in Industrial Workcells*
+Paul Rybski1, Peter Anderson-Sprecher1, Daniel Huber1, Chris Niessl1, Reid Simmons1
+Figure 1: An example of our approach. (a) The workcell as seen
+y one of the 3D sensors. The red region indicates the adaptive
+danger zone surrounding the moving robot arm. (b) As the person
+enters the workcell, the green region indicates the adaptive safety
+zone surrounding the person. (c) When the person gets too close
+to the robot, the safety zone and danger zones intersect (shown
+with a red circle), and the robot automatically halts. LIGHTEN THE
+CONTRAST ON THESE FIGURES TO MAKE THEM EASIER TO SEE"
+029fa43a49a2f5df4bee8aa6a9574f8da5098f98,"Learning event representation: As sparse as possible, but not sparser","Learning event representation: As sparse as possible, but not sparser
+Tuan Do and James Pustejovsky
+Department of Computer Science
+Brandeis University
+Waltham, MA 02453 USA"
+027beed800f7d5e20194caf6d689345045e8d0d4,Smoothed Dilated Convolutions for Improved Dense Prediction,"Smoothed Dilated Convolutions for Improved Dense Prediction
+Zhengyang Wang
+Washington State University
+Pullman, Washington, USA
+Shuiwang Ji
+Washington State University
+Pullman, Washington, USA"
+02a2c5b332d883d726929474060a7e62411c010a,Totally Corrective Multiclass Boosting with Binary Weak Learners,"SEPTEMBER 2010
+with Binary Weak Learners
+Zhihui Hao, Chunhua Shen, Nick Barnes, and Bo Wang"
+02f038ed453de0551813159284746126168f5e15,Multi Channel-Kernel Canonical Correlation Analysis for Cross-View Person Re-Identification,"This is a pre-print version, the final version of the manuscript with more experiments can be found at:
+https://doi.org/10.1145/3038916
+Multi Channel-Kernel Canonical Correlation
+Analysis for Cross-View Person Re-Identification
+Giuseppe Lisanti, Svebor Karaman, Iacopo Masi"
+02e4025fd63f168810724156fb6b20b0b14dccdc,Local inter-session variability modelling for object classification,"This is the author’s version of a work that was submitted/accepted for pub-
+lication in the following source:
+Anantharajah, Kaneswaran, Ge, ZongYuan, McCool, Christopher, Den-
+man, Simon, Fookes, Clinton B., Corke, Peter, Tjondronegoro, Dian W., &
+Sridharan, Sridha
+(2014)
+Local inter-session variability modelling for object classification. In
+014), 24-26 March 2014, Steamboat Springs, CO.
+This file was downloaded from: https://eprints.qut.edu.au/67786/
+(cid:13) Copyright 2014 [please consult the author]
+Notice: Changes introduced as a result of publishing processes such as
+opy-editing and formatting may not be reflected in this document. For a
+definitive version of this work, please refer to the published source:"
+02b72a5a4389cb32a7dd784b1c9084e8412e2e78,Hierarchical Bayesian Image Models,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,700
+08,500
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact"
+02e97e65fd0ec9a6d98a255d0396eb796a5e444a,Online Multiple View Tracking: Targets Association Across Cameras,"Q.LE, D.CONTE, M.HIDANE: COLLABORATIVE TRACKING
+Online Multiple View Tracking:
+Targets Association Across Cameras
+Quoc Cuong LE1
+Donatello CONTE1
+Moncef HIDANE2
+LIFAT
+University of Tours,
+Tours, France
+Computer Science Department
+INSA Centre Val de Loire,
+Blois, France"
+0278acdc8632f463232e961563e177aa8c6d6833,Selective Transfer Machine for Personalized Facial Expression Analysis,"Selective Transfer Machine for Personalized
+Facial Expression Analysis
+Wen-Sheng Chu, Fernando De la Torre, and Jeffrey F. Cohn
+INTRODUCTION
+Index Terms—Facial expression analysis, personalization, domain adaptation, transfer learning, support vector machine (SVM)
+A UTOMATIC facial AU detection confronts a number of"
+0291b43490e02303c9414f03980e606950ec7261,Pose-conditioned joint angle limits for 3D human pose reconstruction,"Pose-Conditioned Joint Angle Limits for 3D Human Pose Reconstruction
+Ijaz Akhter, Michael J. Black
+Max Planck Institute for Intelligent Systems, Tübingen, Germany
+Figure 1: Joint-limit dataset. We captured a new dataset for learning pose-
+dependent joint angle limits. This includes an extensive variety of stretching
+poses. A few sample images are shown here. We use this dataset to learn
+pose-conditioned joint-angle limits. The dataset and the learned joint-angle
+model will be made publicly available.
+Figure 2: We use our joint-angle-limit prior for 3D pose estimation given
+D joint locations in an image. The proposed prior helps in reducing the
+space of possible solutions to only valid 3D human poses. Our prior can
+e also used for many other problems where estimating 3D human pose is
+mbiguous.
+Accurate modeling of priors over 3D human pose is fundamental to many
+problems in computer vision. Most previous priors are either not general
+enough for the diverse nature of human poses or not restrictive enough to
+void invalid 3D poses. We propose a physically-motivated prior that only
+llows anthropometrically valid poses and restricts the ones that are invalid.
+One can use joint-angle limits to evaluate whether two connected bones
+re valid or not. However, it is established in biomechanics that there are"
+02bee6bf61566cfc3963fe42b320a740a9458920,Efficient Pedestrian Detection via Rectangular Features Based on a Statistical Shape Model,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Efficient Pedestrian Detection via Rectangular
+Features Based on a Statistical Shape Model
+Shanshan Zhang, Student Member, IEEE, Christian Bauckhage, Member, IEEE, and Armin B. Cremers"
+02a88a2f2765b17c9ea76fe13148b4b8a9050b95,DeepPose: Human Pose Estimation via Deep Neural Networks,"DeepPose: Human Pose Estimation via Deep Neural Networks
+Alexander Toshev
+Christian Szegedy
+Google
+600 Amphitheatre Pkwy
+Mountain View, CA 94043
+mainly by the first challenge, the need to search in the large
+space of all possible articulated poses. Part-based models
+lend themselves naturally to model articulations ([16, 8])
+nd in the recent years a variety of models with efficient
+inference have been proposed ([6, 19]).
+The above efficiency, however, is achieved at the cost of
+limited expressiveness – the use of local detectors, which
+reason in many cases about a single part, and most impor-
+tantly by modeling only a small subset of all interactions
+etween body parts. These limitations, as exemplified in
+Fig. 1, have been recognized and methods reasoning about
+pose in a holistic manner have been proposed [15, 21] but
+with limited success in real-world problems.
+In this work we ascribe to this holistic view of human"
+02d6df5060281cf13fbef68a8f1ddc29983fe8b3,An Enhanced Default Approach Bias Following Amygdala Lesions in Humans.,"583804 PSSXXX10.1177/0956797615583804Harrison et al.Default Approach Bias Following Amygdala Lesions
+research-article2015
+Research Article
+An Enhanced Default Approach Bias
+Following Amygdala Lesions in Humans
+1 –13
+© The Author(s) 2015
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797615583804
+pss.sagepub.com
+Laura A. Harrison1, Rene Hurlemann2, and Ralph Adolphs1
+California Institute of Technology and 2University of Bonn"
+02cce8b08e4839d16f2142c5723fc009ccb4e3e1,Improving spatial codification in semantic segmentation,"IMPROVING SPATIAL CODIFICATION IN SEMANTIC SEGMENTATION
+Carles Ventura(cid:63)
+Kevin McGuinness†
+Xavier Gir´o-i-Nieto(cid:63)
+Ferran Marqu´es(cid:63)
+Ver´onica Vilaplana(cid:63)
+Noel E. O’Connor†
+(cid:63) Universitat Polit`ecnica de Catalunya (UPC), Barcelona, Spain
+Insight Centre for Data Analytics, Dublin City University (DCU), Ireland"
+026050f71175d235f3f91ca0e99e994c00f9b5a6,Supervised Discrete Hashing,"Supervised Discrete Hashing
+Fumin Shen1, Chunhua Shen2, Wei Liu3, Heng Tao Shen4
+University of Electronic Science and Technology of China. 2 University of Adelaide; and Australian Centre for Robotic Vision. 3IBM Research.
+The University of Queensland.
+Recently, learning based hashing techniques have attracted broad research
+interests due to the resulting efficient storage and retrieval of images, videos,
+documents, etc. However, a major difficulty of learning to hash lies in han-
+dling the discrete constraints imposed on the needed hash codes. In general,
+the discrete constraints imposed on the binary codes that the target hash
+functions generate lead to mixed-integer optimization problems—which is
+generally NP hard. To simplify the optimization involved in a binary code
+learning procedure, most of the aforementioned methods choose to first
+solve a relaxed problem through directly discarding the discrete constraints,
+nd then threshold the continuous outputs to be binary. This greatly simpli-
+fies the optimization but, unfortunately, the approximated solution is typi-
+ally of low quality and often makes the final hash functions less effective,
+possibly due to the accumulated quantization errors. This is especially the
+ase when long-length codes are needed.
+Directly learning the binary codes without relaxations would be pre-
+ferred if (and only if) a tractable and scalable solver is available. The impor-"
+026509ad687f9cdaba8f2dac0fe5720e0553a8bd,Integrated pedestrian classification and orientation estimation,"Integrated Pedestrian Classification
+nd Orientation Estimation
+Markus Enzweiler1
+Dariu M. Gavrila2,3
+Image & Pattern Analysis Group, Univ. of Heidelberg, Germany
+Environment Perception, Group Research, Daimler AG, Ulm, Germany
+Intelligent Autonomous Systems Group, Univ. of Amsterdam, The Netherlands"
+02f1d5c896ced7f6f002eb7514ba49eca940b75c,A Comparison of Efficient Global Image Features for Localizing Small Mobile Robots,"A Comparison of Efficient Global Image Features
+for Localizing Small Mobile Robots
+Marius Hofmeister, Philipp Vorst and Andreas Zell
+Computer Science Department, University of Tübingen, Tübingen, Germany"
+a49b661e42aea6f205e543a80106fc9c6ff0f9d4,Deep Virtual Stereo Odometry: Leveraging Deep Depth Prediction for Monocular Direct Sparse Odometry,"Deep Virtual Stereo Odometry:
+Leveraging Deep Depth Prediction for
+Monocular Direct Sparse Odometry
+Nan Yang1,2, Rui Wang1,2, J¨org St¨uckler1, and Daniel Cremers1,2
+Technical University of Munich
+Artisense"
+a45450824c6e8e6b42fd9bbf52871104b6c6ce8b,Optimizing the Latent Space of Generative Networks,"Optimizing the Latent Space of Generative Networks
+Piotr Bojanowski, Armand Joulin, David Lopez-Paz, Arthur Szlam
+{bojanowski, ajoulin, dlp,
+Facebook AI Research"
+a46f285b928aa547df8d8d8d63d2f9256a73aae7,Networked Decision Making for Poisson Processes With Applications to Nuclear Detection,"[16] E. D. Sontag, “Input-to-state stability: Basic concepts and results,” in
+Nonlinear and Optimal Control Theory, P. Nistri and G. Stefani, Eds.
+Berlin, Germany: Springer–Verlag, 2006, pp. 163–220.
+[17] Z.-P. Jiang, A. R. Teel, and L. Praly, “Small-gain theorem for ISS sys-
+tems and applications,” Mathem. of Control, Signals, and Syst., vol. 7,
+pp. 95–120, 1994.
+[18] A. R. Teel, “A nonlinear small gain theorem for the analysis of control
+systems with saturation,” IEEE Trans. Autom. Control, vol. AC-41, no.
+9, pp. 1256–1270, Sep. 1996.
+[19] Z.-P. Jiang and I. M. Y. Mareels, “A small-gain control method for
+nonlinear cascaded systems with dynamic uncertainties,” IEEE Trans.
+Autom. Control, vol. 42, no. 3, pp. 292–308, Mar. 1997.
+[20] S. Dashkovskiy, Z.-P. Jiang, and B. Rüffer, “Special issue on robust sta-
+ility and control of large-scale nonlinear systems,” Mathem. of Con-
+trol, Signals, and Syst., vol. 24, no. 1, pp. 1–2, 2012.
+[21] H. K. Khalil, Nonlinear Systems, third ed. Upper Saddle River, NJ:
+Prentice–Hall, 2002.
+[22] R. A. Horn and C. R. Johnson, Matrix Analysis. Cambridge, U.K.:
+Cambridge University Press, 1985.
+[23] W. Ren and R. W. Beard, “Consensus seeking in multiagent systems"
+a49acd70550c209965a6d39d7ff92d11f0a5b1b6,"YouTube Scale, Large Vocabulary Video Annotation","YouTube Scale, Large Vocabulary
+Video Annotation
+Nicholas Morsillo, Gideon Mann and Christopher Pal"
+a427ee25ef515ddd9cf50b4cc3a7376f57d58926,Human-Drone-Interaction: A Case Study to Investigate the Relation Between Autonomy and User Experience,"Human-Drone-Interaction: A Case Study to
+Investigate the Relation between Autonomy and
+User Experience
+Patrick Ferdinand Christ1,3(cid:63), Florian Lachner2,3(cid:63), Axel H¨osl3, Bjoern Menze1,
+Klaus Diepold3, and Andreas Butz2
+Image-based Biomedical Modeling Group,
+Technical University of Munich (TUM)
+{patrick.christ,
+Chair for Human-Computer-Interaction,
+University of Munich (LMU)
+{florian.lachner, axel.hoesl,
+Center for Digital and Technology Management,
+TUM and LMU
+Chair for Data Processing,
+Technical University of Munich (TUM)"
+a4a90a2db209db2d5c49adfd2091ede2d4130f60,Interactive Grounded Language Acquisition and Generalization in a 2D World,"Published as a conference paper at ICLR 2018
+INTERACTIVE GROUNDED LANGUAGE ACQUISITION
+AND GENERALIZATION IN A 2D WORLD
+Haonan Yu1, Haichao Zhang1 & Wei Xu1,2
+Baidu Research, Sunnyvale USA
+National Engineering Laboratory for Deep Learning Technology and Applications, Beijing China"
+a4a5ad6f1cc489427ac1021da7d7b70fa9a770f2,Gated spatio and temporal convolutional neural network for activity recognition: towards gated multimodal deep learning,"Yudistira and Kurita EURASIP Journal on Image and Video
+Processing (2017) 2017:85
+DOI 10.1186/s13640-017-0235-9
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+Gated spatio and temporal convolutional
+neural network for activity recognition:
+towards gated multimodal deep learning
+Novanto Yudistira1* and Takio Kurita2"
+a4f38e32c23fd1f5a1e1157a4e62b38731f2e5d8,Online Learning for Ship Detection in Maritime Surveillance,"Online Learning for Ship Detection
+in Maritime Surveillance
+Rob Wijnhoven1
+ViNotion1
+, Kris van Rens1, Egbert G. T. Jaspers1, Peter H. N. de With2
+University of Technol. Eindhoven2 CycloMedia Technol.3
+P.O. Box 2346
+5600 CH Eindhoven
+The Netherlands
+P.O. Box 513
+5600 MB Eindhoven
+The Netherlands"
+a416513aaf97060287bf3e64ccdc1ccf85106c07,Seasonal Separation of African Savanna Components Using Worldview-2 Imagery: A Comparison of Pixel- and Object-Based Approaches and Selected Classification Algorithms,"Article
+Seasonal Separation of African Savanna Components
+Using Worldview-2 Imagery: A Comparison of Pixel-
+nd Object-Based Approaches and Selected
+Classification Algorithms
+˙Zaneta Kaszta 1,2,*, Ruben Van De Kerchove 1,3, Abel Ramoelo 4, Moses Azong Cho 4,
+Sabelo Madonsela 4, Renaud Mathieu 4,5 and Eléonore Wolff 1
+Institut de Gestion de l’Environnement et d’Aménagement de Territoire (IGEAT),
+Université Libre de Bruxelles, Brussels 1050, Belgium;
+School of Applied Environmental Sciences, Pietermaritzburg 3209, South Africa
+Mol 2400, Belgium;
+Council for Scientific and Industrial Research, Pretoria 0001, South Africa; (A.R.);
+(M.A.C.); (S.M.); (R.M.)
+5 Department of Geography, Geoinformatics and Meteorology, University of Pretoria,
+Pretoria 0028, South Africa
+* Correspondence: Tel.: +32-02-650-68-20
+Academic Editors: Giles M. Foody, Magaly Koch, Clement Atzberger and Prasad S. Thenkabail
+Received: 15 May 2016; Accepted: 8 September 2016; Published: 16 September 2016"
+a4bab165158b9627280fb3052b1c731210f2a901,"Pedestrian Localization, Tracking and Behavior Analysis from Multiple Cameras","Pedestrian Localization, Tracking and Behavior Analysis
+from Multiple Cameras
+THÈSE NO 4629 (2010)
+PRÉSENTÉE LE 9 AVRIL 2010
+À LA FACULTÉ INFORMATIQUE ET COMMUNICATIONS
+LABORATOIRE DE VISION PAR ORDINATEUR
+PROGRAMME DOCTORAL EN INFORMATIQUE, COMMUNICATIONS ET INFORMATION
+ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Jérôme BERCLAZ
+cceptée sur proposition du jury:
+Prof. P. Thiran, président du jury
+Prof. P. Fua, Dr F. Fleuret, directeurs de thèse
+Prof. M. Bierlaire, rapporteur
+Prof. H. Bischof, rapporteur
+Dr J. Ferryman, rapporteur
+Suisse"
+a40f8881a36bc01f3ae356b3e57eac84e989eef0,"End-to-end semantic face segmentation with conditional random fields as convolutional, recurrent and adversarial networks","End-to-end semantic face segmentation with conditional
+random fields as convolutional, recurrent and adversarial
+networks
+Umut Güçlü*, 1, Yağmur Güçlütürk*, 1,
+Meysam Madadi2, Sergio Escalera3, Xavier Baró4, Jordi González2,
+Rob van Lier1, Marcel van Gerven1"
+a4a0b5f08198f6d7ea2d1e81bd97fea21afe3fc3,Efficient Recurrent Residual Networks Improved by Feature Transfer,"E
+Feature Transfer
+MSc Thesis
+written by
+Yue Liu
+under the supervision of Dr. Silvia-Laura Pintea, Dr. Jan van Gemert,
+nd Dr. Ildiko Suveg and submitted to the Board of Examiners for the
+degree of
+Master of Science
+t the Delft University of Technology.
+Date of the public defense: Members of the Thesis Committee:
+August 31, 2017
+Prof. Marcel Reinders
+Dr. Jan van Gemert
+Dr. Julian Urbano Merino
+Dr. Silvia-Laura Pintea
+Dr. Ildiko Suveg (Bosch)
+Dr. Gonzalez Adrlana (Bosch)"
+a4ee9f089ab9a48a6517a6967281247339a51747,Resembled Generative Adversarial Networks: Two Domains with Similar Attributes,"DUHYEON BANG, HYUNJUNG SHIM: RESEMBLED GAN
+Resembled Generative Adversarial Networks:
+Two Domains with Similar Attributes
+School of Integrated Technology, Yonsei
+University, South Korea
+Duhyeon Bang
+Hyunjung Shim"
+a47e51dd3f73817679ff0e987a0064d43db25060,Grad-CAM: Why did you say that? Visual Explanations from Deep Networks via Gradient-based Localization,"Visual Explanations from Deep Networks via Gradient-based Localization
+Grad-CAM: Why did you say that?
+Ramprasaath R. Selvaraju
+Abhishek Das
+Devi Parikh
+Ramakrishna Vedantam
+Dhruv Batra
+Virginia Tech
+Michael Cogswell
+{ram21, abhshkdz, vrama91, cogswell, parikh,
+(a) Original Image
+(b) Guided Backprop ‘Cat’
+(c) Grad-CAM for ‘Cat’
+(d) Guided Grad-CAM ‘Cat’
+(e) Occlusion Map ‘Cat’
+(f) ResNet Grad-CAM ‘Cat’
+(g) Original Image
+(h) Guided Backprop ‘Dog’
+(i) Grad-CAM for ‘Dog’
+(l) ResNet Grad-CAM ‘Dog’"
+a44b91f46ba66c8279b93caab6842444de0c9343,Frequency-domain Tracking Spatial-domain Detection Generic Object Proposal Histogram based Representation Detection Result Tracking State Estimation Spatial Regressor Correlation Model IFFT Search Space Feature Extraction Correlation Map Correlation Model FFT,"Monocular Long-term Target Following on UAVs
+Rui Li ∗
+Minjian Pang†
+Cong Zhao ‡
+Guyue Zhou ‡
+Lu Fang †§"
+a493a731dadababb6f2ae0b4b6233d861206345b,Studio2Shop: from studio photo shoots to fashion articles,"Studio2Shop: from studio photo shoots to fashion articles
+Julia Lasserre1, Katharina Rasch1 and Roland Vollgraf
+Zalando Research, Muehlenstr. 25, 10243 Berlin, Germany
+Keywords:
+omputer vision, deep learning, fashion, item recognition, street-to-shop"
+a44590528b18059b00d24ece4670668e86378a79,Learning the Hierarchical Parts of Objects by Deep Non-Smooth Nonnegative Matrix Factorization,"Learning the Hierarchical Parts of Objects by Deep
+Non-Smooth Nonnegative Matrix Factorization
+Jinshi Yu, Guoxu Zhou, Andrzej Cichocki
+IEEE Fellow, and Shengli Xie IEEE Senior Member"
+a453863082a7fb42c9b402023294390eb4167fbe,Identifying Where to Focus in Reading Comprehension for Neural Question Generation,"Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2067–2073
+Copenhagen, Denmark, September 7–11, 2017. c(cid:13)2017 Association for Computational Linguistics"
+a472d59cff9d822f15f326a874e666be09b70cfd,Visual Learning with Weakly Labeled Video a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"VISUAL LEARNING WITH WEAKLY LABELED VIDEO
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Kevin Tang
+May 2015"
+a47ac8569ab1970740cff9f1643f77e9143a62d4,Associative Compression Networks for Representation Learning,"Associative Compression Networks for Representation Learning
+Alex Graves 1 Jacob Menick 1 A¨aron van den Oord 1"
+a4c430b7d849a8f23713dc283794d8c1782198b2,Video Concept Embedding,"Video Concept Embedding
+Anirudh Vemula
+Rahul Nallamothu
+Syed Zahir Bokhari
+. Introduction
+In the area of natural language processing, there has been
+much success in learning distributed representations for
+words as vectors. Doing so has an advantage over using
+simple labels, or a one-hot coding scheme for representing
+individual words. In learning distributed vector representa-
+tions for words, we manage to capture semantic relatedness
+of words in vector distance. For example, the word vector
+for ”car” and ”road” should end up being closer together in
+the vector space representation than ”car” and ”penguin”.
+This has been very useful in NLP areas of machine transla-
+tion and semantic understanding.
+In the computer vision domain, video understanding is a
+very important topic.
+It is made hard due to the large
+mount of high dimensional data in videos. One strategy"
+a48c71153265d6da7fbc4b16327320a5cbfa6cba,Unite the People: Closing the loop between 3D and 2D Human Representations Supplementary Material,"Unite the People: Closing the loop between 3D and 2D Human Representations
+Supplementary Material
+Christoph Lassner1,2
+Javier Romero2
+Martin Kiefel2
+Federica Bogo2,3
+Michael J. Black2
+Peter V. Gehler1,2
+Bernstein Center for Comp. Neuroscience1
+Max-Planck Institute for Intelligent Systems2
+Microsoft3
+Otfried-M¨uller-Str. 25, T¨ubingen
+Spemannstr. 41, T¨ubingen
+1 Station Rd., Cambridge
+. Introduction
+We have obtained human segmentation labels to inte-
+grate shape information into the SMPLify 3D fitting pro-
+edure and for the evaluation of methods introduced in the
+main paper. The labels consist of foreground segmentation
+for multiple human pose datasets and six body part segmen-"
+a4f37cfdde3af723336205b361aefc9eca688f5c,Recent Advances in Face Recognition,"Recent Advances
+in Face Recognition"
+a32ebfa79097fdf5c9d44d2f74e33b7c8343425c,A Deeper Look at Dataset Bias,"Chapter 2
+A Deeper Look at Dataset Bias
+Tatiana Tommasi, Novi Patricia, Barbara Caputo and Tinne Tuytelaars"
+a30869c5d4052ed1da8675128651e17f97b87918,Fine-Grained Comparisons with Attributes,"Fine-Grained Comparisons with Attributes
+Aron Yu and Kristen Grauman"
+a32f28156b47fd262e04426806037d138bb3ed0b,Fisher’s linear discriminant (FLD) and support vector machine (SVM) in non-negative matrix factorization (NMF) residual space for face recognition,"Optica Applicata, Vol. XL, No. 3, 2010
+Fisher’s linear discriminant (FLD)
+nd support vector machine (SVM)
+in non-negative matrix factorization (NMF)
+residual space for face recognition
+CHANGJUN ZHOU, XIAOPENG WEI*, QIANG ZHANG, XIAOYONG FANG
+Key Laboratory of Advanced Design and Intelligent Computing, Dalian University,
+Ministry of Education, Dalian, 116622, China
+*Corresponding author:
+A novel method of Fisher’s linear discriminant (FLD) in the residual space is put forward for
+the representation of face images for face recognition, which is robust to the slight local
+feature changes. The residual images are computed by subtracting the reconstructed images from
+the original face images, and the reconstructed images are obtained by performing non-negative
+matrix factorization (NMF) on original images. FLD is applied to the residual images for extracting
+FLD subspace and the corresponding coefficient matrices. Furthermore, features are obtained by
+mapping the residual image to FLD subspace. Finally, the features are utilized to train and test
+support vector machines (SVMs) for face recognition. The computer simulation illustrates that
+this method is effective on the ORL database and the extended Yale face database B.
+Keywords: face recognition, Fisher linear discriminant (FLD), non-negative matrix factorization (NMF),
+residual image."
+a3ebacd8bcbc7ddbd5753935496e22a0f74dcf7b,"First International Workshop on Adaptive Shot Learning for Gesture Understanding and Production ASL4GUP 2017 Held in conjunction with IEEE FG 2017, in May 30, 2017, Washington DC, USA","First International Workshop on Adaptive Shot Learning
+for Gesture Understanding and Production
+ASL4GUP 2017
+Held in conjunction with IEEE FG 2017, in May 30, 2017,
+Washington DC, USA"
+a3d8b5622c4b9af1f753aade57e4774730787a00,Pose-Aware Person Recognition,"Pose-Aware Person Recognition
+Vijay Kumar (cid:63)
+Anoop Namboodiri (cid:63)
+(cid:63) CVIT, IIIT Hyderabad, India
+Manohar Paluri †
+Facebook AI Research
+C. V. Jawahar (cid:63)"
+a3fdba7975494c34552b33cf839f21d62734e6f0,Excavate Condition-invariant Space by Intrinsic Encoder,"Excavate Condition-invariant Space by Intrinsic Encoder
+Jian Xu, Chunheng Wang, Cunzhao Shi, and Baihua Xiao
+Institute of Automation, Chinese Academy of Sciences (CASIA)"
+a3017bb14a507abcf8446b56243cfddd6cdb542b,Face Localization and Recognition in Varied Expressions and Illumination,"Face Localization and Recognition in Varied
+Expressions and Illumination
+Hui-Yu Huang, Shih-Hang Hsu"
+a3c8c7da177cd08978b2ad613c1d5cb89e0de741,A Spatio-temporal Approach for Multiple Object Detection in Videos Using Graphs and Probability Maps,"A Spatio-temporal Approach for Multiple
+Object Detection in Videos Using Graphs
+nd Probability Maps
+Henrique Morimitsu1(B), Roberto M. Cesar Jr.1, and Isabelle Bloch2
+University of S˜ao Paulo, S˜ao Paulo, Brazil
+Institut Mines T´el´ecom, T´el´ecom ParisTech, CNRS LTCI, Paris, France"
+a3ccf7fa5c130c8bcd20cbcd356ad7a47cdd4296,SymNMF: nonnegative low-rank approximation of a similarity matrix for graph clustering,"Journal of Global Optimization manuscript No.
+(will be inserted by the editor)
+SymNMF: Nonnegative Low-Rank Approximation of
+Similarity Matrix for Graph Clustering
+Da Kuang · Sangwoon Yun · Haesun Park
+The final publication is available at Springer via http://dx.doi.org/10.1007/s10898-014-0247-2."
+a378fc39128107815a9a68b0b07cffaa1ed32d1f,Determining a Suitable Metric when Using Non-Negative Matrix Factorization,"Determining a Suitable Metric When using Non-negative Matrix Factorization∗
+David Guillamet and Jordi Vitri`a
+Computer Vision Center, Dept. Inform`atica
+Universitat Aut`onoma de Barcelona
+08193 Bellaterra, Barcelona, Spain"
+a32dadf343f811e6837b8ac5bab873674fa626b3,Moving Object Detection and Tracking in Forward Looking Infra-Red Aerial Imagery,"Moving Object Detection and Tracking
+in Forward Looking Infra-Red Aerial Imagery
+Subhabrata Bhattacharya, Haroon Idrees, Imran Saleemi, Saad Ali
+nd Mubarak Shah"
+a34d75da87525d1192bda240b7675349ee85c123,Naive-Deep Face Recognition: Touching the Limit of LFW Benchmark or Not?,"Naive-Deep Face Recognition: Touching the Limit of LFW Benchmark or Not?
+Erjin Zhou
+Face++, Megvii Inc.
+Zhimin Cao
+Face++, Megvii Inc.
+Qi Yin
+Face++, Megvii Inc."
+a3dc109b1dff3846f5a2cc1fe2448230a76ad83f,Active Appearance Model and Pca Based Face Recognition System,"J.Savitha et al, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.4, April- 2015, pg. 722-731
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 4, Issue. 4, April 2015, pg.722 – 731
+RESEARCH ARTICLE
+ACTIVE APPEARANCE MODEL AND PCA
+BASED FACE RECOGNITION SYSTEM
+Mrs. J.Savitha M.Sc., M.Phil.
+Ph.D Research Scholar, Karpagam University, Coimbatore, Tamil Nadu, India
+Email:
+Dr. A.V.Senthil Kumar
+Director, Hindustan College of Arts and Science, Coimbatore, Tamil Nadu, India
+Email:"
+a3f69a073dcfb6da8038607a9f14eb28b5dab2db,3D-Aided Deep Pose-Invariant Face Recognition,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+a38045ed82d6800cbc7a4feb498e694740568258,African American and Caucasian males ' evaluation of racialized female facial averages,"UNLV Theses, Dissertations, Professional Papers, and Capstones
+5-2010
+African American and Caucasian males' evaluation
+of racialized female facial averages
+Rhea M. Watson
+University of Nevada Las Vegas
+Follow this and additional works at: http://digitalscholarship.unlv.edu/thesesdissertations
+Part of the Cognition and Perception Commons, Race and Ethnicity Commons, and the Social
+Psychology Commons
+Repository Citation
+Watson, Rhea M., ""African American and Caucasian males' evaluation of racialized female facial averages"" (2010). UNLV Theses,
+Dissertations, Professional Papers, and Capstones. 366.
+http://digitalscholarship.unlv.edu/thesesdissertations/366
+This Thesis is brought to you for free and open access by Digital It has been accepted for inclusion in UNLV Theses, Dissertations,
+Professional Papers, and Capstones by an authorized administrator of Digital For more information, please contact"
+a357bc79b1ac6f2474ff6b9f001419745a8bc21c,Toward More Realistic Face Recognition Evaluation Protocols for the YouTube Faces Database,"Toward More Realistic Face Recognition Evaluation Protocols
+for the YouTube Faces Database
+Yoanna Mart´ınez-D´ıaz, Heydi M´endez-V´azquez, Leyanis L´opez-Avila
+Advanced Technologies Application Center (CENATAV)
+7A ♯21406 Siboney, Playa, P.C. 12200, Havana, Cuba
+Leonardo Chang
+L. Enrique Sucar
+Massimo Tistarelli
+Tecnol´ogico de Monterrey,
+Estado de Mexico, Mexico
+INAOE,
+University of Sassari,
+Puebla, Mexico
+Sassari, Italy"
+a3f78cc944ac189632f25925ba807a0e0678c4d5,Action Recognition in Realistic Sports Videos,"Action Recognition in Realistic Sports Videos
+Khurram Soomro and Amir Roshan Zamir"
+a3177f82ea8391d9d733be47e4a0656a7b56e64c,The Roles of Emotions in the Law,"Emotion Researcher | ISRE's Sourcebook for Research on Emotion and Affect
+Emotion Researcher
+ISRE's Sourcebook for Research on Emotion and Affect
+Interviews
+Articles
+Spotlight
+Contact
+How To Cite ER
+Table of Contents
+New Editor Search
+THE ROLES OF EMOTIONS IN THE LAW
+Time for new blood at the helm of Emotion
+Researcher! ISRE is seeking one or more new
+editors, who should take over in April 2017. It
+is a fun and highly rewarding job. Nominations
+of suitable candidates are also encouraged.
+Editor’s Column
+In this issue of Emotion Researcher, we focus on the roles emotions play in the law. We will explore
+the emotions of jurors, judges, defendants, attorneys and other legal actors.
+Call for Papers"
+a3fd234763844663f72a8fa22a076eeadce7245c,DelugeNets: Deep Networks with Efficient and Flexible Cross-Layer Information Inflows,"DelugeNets: Deep Networks with Efficient and Flexible Cross-layer Information
+Inflows
+Jason Kuen1
+Xiangfei Kong1
+Gang Wang2
+Yap-Peng Tan1
+Nanyang Technological University1 Alibaba Group2"
+a30e987e9909a4e307c35809275cf80431211f22,Automatic Sapstain Detection in Processed Timber Through Image Feature Analysis,"Automatic Sapstain Detection in Processed
+Timber Through Image Feature Analysis
+Jeremiah Deng
+The Information Science
+Discussion Paper Series
+Number 2009/04
+April 2009
+ISSN 1177-455X"
+a3fe284b029269ad5f071dd37bb137593c67dfc2,Feature Learning for the Image Retrieval Task,"Feature Learning for the Image Retrieval Task
+Aakanksha Rana, Joaquin Zepeda, Patrick Perez
+Technicolor R&I, 975 avenue des Champs Blancs, CS 17616, 35576 Cesson Sevigne, France"
+a3a6e3cadfed3c0a520e4417fc27da561324fbc6,Facing the challenge of teaching emotions to individuals with low- and high-functioning autism using a new Serious game: a pilot study,"Serret et al. Molecular Autism 2014, 5:37
+http://www.molecularautism.com/content/5/1/37
+R ES EAR CH
+Facing the challenge of teaching emotions to
+individuals with low- and high-functioning autism
+using a new Serious game: a pilot study
+Sylvie Serret1*, Stephanie Hun1, Galina Iakimova2, Jose Lozada3, Margarita Anastassova3, Andreia Santos1,
+Stephanie Vesperini1 and Florence Askenazy4
+Open Access"
+a32f693e98ae35da5508c8eee245a876b6e130a1,Small Sample Scene Categorization from Perceptual Relations Ilan Kadar and,"Small Sample Scene Categorization from Perceptual Relations
+Ilan Kadar and Ohad Ben-Shahar
+Dept. of Computer Science, Ben-Gurion University
+Beer-Sheva, Israel"
+a3fcf3d32a5a4fcc83027e3d367ecc0df3ec4f64,Iris Recognition: On the Segmentation of Degraded Images Acquired in the Visible Wavelength,"Iris Recognition: On the Segmentation
+of Degraded Images Acquired
+in the Visible Wavelength
+Hugo Proenc¸ a"
+a3ed080262f130051d2a02e846f5d227a440b294,ContextNet: Exploring Context and Detail for Semantic Segmentation in Real-time,"ContextNet: Exploring Context and Detail
+for Semantic Segmentation in Real-time
+Rudra P K Poudel, Ujwal Bonde, Stephan Liwicki, and Christopher Zach
+Toshiba Research, Cambridge, UK"
+a35d85c2efd1fb090267980ebb3fd7b6381e3b74,Very Low Resolution Image Classification,"Very Low Resolution Image Classification
+Adam Vest1
+Muhammadabdullah Jamal2
+Boqing Gong2
+University of Louisville 2 University of Central Florida"
+a3a6a6a2eb1d32b4dead9e702824375ee76e3ce7,Multiple Local Curvature Gabor Binary Patterns for Facial Action Recognition,"Multiple Local Curvature Gabor Binary
+Patterns for Facial Action Recognition
+Anıl Y¨uce, Nuri Murat Arar and Jean-Philippe Thiran
+Signal Processing Laboratory (LTS5),
+´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland"
+a33262933df8534de571027d78ccd936bb9ec263,Real-Time Deep Learning Method for Abandoned Luggage Detection in Video,"Real-Time Deep Learning Method for Abandoned Luggage Detection in Video
+University of Bucharest, 14 Academiei, Bucharest, Romania
+Sorina Smeureanu∗‡, Radu Tudor Ionescu∗‡
+SecurifAI, 24 Mircea Vod˘a, Bucharest, Romania
+E-mails:"
+a32c5138c6a0b3d3aff69bcab1015d8b043c91fb,Video redaction: a survey and comparison of enabling technologies,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/19/2018
+Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+Videoredaction:asurveyandcomparisonofenablingtechnologiesShaganSahAmeyaShringiRaymondPtuchaAaronBurryRobertLoceShaganSah,AmeyaShringi,RaymondPtucha,AaronBurry,RobertLoce,“Videoredaction:asurveyandcomparisonofenablingtechnologies,”J.Electron.Imaging26(5),051406(2017),doi:10.1117/1.JEI.26.5.051406."
+a3bf7248e38ed6f9456f0f309b36470c5c0dabd0,Predicting the Driver's Focus of Attention: the DR(eye)VE Project,"Predicting the Driver’s Focus of Attention:
+the DR(eye)VE Project
+Andrea Palazzi∗, Davide Abati∗, Simone Calderara, Francesco Solera, and Rita Cucchiara"
+a3eab933e1b3db1a7377a119573ff38e780ea6a3,Sparse Representation for accurate classification of corrupted and occluded facial expressions,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+a308ad39f3cc25096f493280319621a25c2c7f46,Monocular 3D Scene Modeling and Inference: Understanding Multi-Object Traffic Scenes,"Monocular 3D Scene Modeling and Inference:
+Understanding Multi-Object Traffic Scenes
+Christian Wojek1,2, Stefan Roth1, Konrad Schindler1,3, and Bernt Schiele1,2
+Computer Science Department, TU Darmstadt
+MPI Informatics, Saarbr¨ucken
+Photogrammetry and Remote Sensing Group, ETH Z¨urich"
+a3be57fc74460463f03c2a14e81e7e62c05c692e,Object Detection,"Object Detection
+Yali Amit and Pedro Felzenszwalb, University of Chicago
+Related Concepts
+– Object Recognition
+– Image Classification
+Definition
+Object detection involves detecting instances of objects from a particular
+lass in an image.
+Background
+The goal of object detection is to detect all instances of objects from a known
+lass, such as people, cars or faces in an image. Typically only a small number
+of instances of the object are present in the image, but there is a very large
+number of possible locations and scales at which they can occur and that need
+to somehow be explored.
+Each detection is reported with some form of pose information. This could
+e as simple as the location of the object, a location and scale, or the extent
+of the object defined in terms of a bounding box. In other situations the pose
+information is more detailed and contains the parameters of a linear or non-linear
+transformation. For example a face detector may compute the locations of the
+eyes, nose and mouth, in addition to the bounding box of the face. An example"
+a3b87364aa68b371ca9831d333b934402fbc3713,Which neural mechanisms mediate the effects of a parenting intervention program on parenting behavior: design of a randomized controlled trial,"Kolijn et al. BMC Psychology (2017) 5:9
+DOI 10.1186/s40359-017-0177-0
+Open Access
+ST UD Y P R O T O C O L
+Which neural mechanisms mediate the
+effects of a parenting intervention program
+on parenting behavior: design of a
+randomized controlled trial
+Laura Kolijn1,2,3, Saskia Euser1,2,3, Bianca G. van den Bulk1,2,3, Renske Huffmeijer1,2,3,
+Marinus H. van IJzendoorn1,2,3 and Marian J. Bakermans-Kranenburg1,2,3*"
+a3a34c1b876002e0393038fcf2bcb00821737105,Face Identification across Different Poses and Illuminations with a 3D Morphable Model,"Face Identification across Different Poses and Illuminations
+with a 3D Morphable Model
+V. Blanz, S. Romdhani, and T. Vetter
+University of Freiburg
+Georges-K¨ohler-Allee 52, 79110 Freiburg, Germany
+fvolker, romdhani,"
+a3f1db123ce1818971a57330d82901683d7c2b67,Poselets and Their Applications in High-Level Computer Vision,"Poselets and Their Applications in High-Level
+Computer Vision
+Lubomir Bourdev
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-52
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-52.html
+May 1, 2012"
+a3d071d2a5c11329aa324b2cae6b7b6ca7800213,C-VQA: A Compositional Split of the Visual Question Answering (VQA) v1.0 Dataset,"C-VQA: A Compositional Split of the
+Visual Question Answering (VQA) v1.0 Dataset
+Aishwarya Agrawal∗, Aniruddha Kembhavi†, Dhruv Batra‡, Devi Parikh‡
+Virginia Tech, †Allen Institute for Artificial Intelligence, ‡Georgia Institute of Technology
+{dbatra,"
+a3a97bb5131e7e67316b649bbc2432aaa1a6556e,Role of the hippocampus and orbitofrontal cortex during the disambiguation of social cues in working memory.,"Cogn Affect Behav Neurosci
+DOI 10.3758/s13415-013-0170-x
+Role of the hippocampus and orbitofrontal cortex
+during the disambiguation of social cues in working memory
+Robert S. Ross & Matthew L. LoPresti & Karin Schon &
+Chantal E. Stern
+# Psychonomic Society, Inc. 2013"
+a35d3ba191137224576f312353e1e0267e6699a1,Increasing security in DRM systems through biometric authentication,"Javier Ortega-Garcia, Josef Bigun, Douglas Reynolds,
+nd Joaquin Gonzalez-Rodriguez
+Increasing security in DRM systems
+through biometric authentication.
+ecuring the exchange
+of intellectual property
+nd providing protection
+to multimedia contents in
+distribution systems have enabled the
+dvent of digital rights management
+(DRM) systems [5], [14], [21], [47],
+[51], [53]. Rights holders should be able to
+license, monitor, and track the usage of rights
+in a dynamic digital trading environment, espe-
+ially in the near future when universal multimedia
+ccess (UMA) becomes a reality, and any multimedia
+ontent will be available anytime, anywhere. In such
+DRM systems, encryption algorithms, access control,
+key management strategies, identification and tracing
+of contents, or copy control will play a prominent role"
+a3d8887625040d3c07f779ac5353452fd48058e4,A Study of Activity Recognition and Questionable Observer Detection,"International Journal of Computer Applications (0975 – 8887)
+Volume 182 – No. 15, September 2018
+A Study of Activity Recognition and Questionable
+Observer Detection
+D. M. Anisuzzaman
+Department of Computer Science and Engineering,
+Ahsanullah University of Science and Technology,
+Dhaka, Bangladesh"
+b55489547790f7fb2c8b4689530b5660fbc8ee64,Face Scanning in Autism Spectrum Disorder and Attention Deficit/Hyperactivity Disorder: Human Versus Dog Face Scanning,"ORIGINAL RESEARCH
+published: 23 October 2015
+doi: 10.3389/fpsyt.2015.00150
+Face scanning in autism spectrum
+disorder and attention deficit/
+hyperactivity disorder: human
+versus dog face scanning
+Mauro Muszkat 1, Claudia Berlim de Mello 2, Patricia de Oliveira Lima Muñoz 3,
+Tania Kiehl Lucci 3, Vinicius Frayze David 3, José de Oliveira Siqueira 3 and Emma Otta 3*
+Departamento de Psicobiologia, Universidade Federal de São Paulo, São Paulo, Brazil, 2 Programa de Pós Graduação em
+Educação e Saúde, Universidade Federal de São Paulo, São Paulo, Brazil, 3 Departamento de Psicologia Experimental,
+Instituto de Psicologia, Universidade de São Paulo, São Paulo, Brazil
+This study used eye tracking to explore attention allocation to human and dog faces in chil-
+dren and adolescents with autism spectrum disorder (ASD), attention deficit/hyperactivity
+disorder (ADHD), and typical development (TD). Significant differences were found among
+the three groups. TD participants looked longer at the eyes than ASD and ADHD ones,
+irrespective of the faces presented. In spite of this difference, groups were similar in that
+they looked more to the eyes than to the mouth areas of interest. The ADHD group gazed
+longer at the mouth region than the other groups. Furthermore, groups were also similar
+in that they looked more to the dog than to the human faces. The eye-tracking tech-"
+b50f2ad8d7f08f99d4ba198120120f599f98095e,Spatiotemporal data fusion for precipitation nowcasting,"Spatiotemporal data fusion for precipitation
+nowcasting
+Vladimir Ivashkin
+Yandex, Moscow, Russia
+Vadim Lebedev
+Yandex, Moscow, Russia"
+b5f5781cba3c3da807359a6f600aa19c666a3f81,Comparing Attention to Socially-Relevant Stimuli in Autism Spectrum Disorder and Developmental Coordination Disorder,"Journal of Abnormal Child Psychology
+https://doi.org/10.1007/s10802-017-0393-3
+Comparing Attention to Socially-Relevant Stimuli in Autism
+Spectrum Disorder and Developmental Coordination Disorder
+Emma Sumner 1
+& Hayley C. Leonard 2 & Elisabeth L. Hill 3
+# The Author(s) 2018. This article is an open access publication"
+b58672881dd8112cd3e6dedebcf8367ce2c9d78b,Mechanistic Analytical Modeling of Superscalar In-Order Processor Performance,"Mechanistic Analytical Modeling of Superscalar In-Order
+Processor Performance
+MAXIMILIEN B. BREUGHE, STIJN EYERMAN, and LIEVEN EECKHOUT,
+Ghent University, Belgium
+Superscalar in-order processors form an interesting alternative to out-of-order processors because of their
+energy efficiency and lower design complexity. However, despite the reduced design complexity, it is nontrivial
+to get performance estimates or insight in the application–microarchitecture interaction without running
+slow, detailed cycle-level simulations, because performance highly depends on the order of instructions within
+the application’s dynamic instruction stream, as in-order processors stall on interinstruction dependences
+nd functional unit contention. To limit the number of detailed cycle-level simulations needed during design
+space exploration, we propose a mechanistic analytical performance model that is built from understanding
+the internal mechanisms of the processor.
+The mechanistic performance model for superscalar in-order processors is shown to be accurate with an
+verage performance prediction error of 3.2% compared to detailed cycle-accurate simulation using gem5. We
+lso validate the model against hardware, using the ARM Cortex-A8 processor and show that it is accurate
+within 10% on average. We further demonstrate the usefulness of the model through three case studies:
+(1) design space exploration, identifying the optimum number of functional units for achieving a given
+performance target; (2) program–machine interactions, providing insight into microarchitecture bottlenecks;
+nd (3) compiler–architecture interactions, visualizing the impact of compiler optimizations on performance.
+Categories and Subject Descriptors: C.0 [Computer Systems Organization]: General—Modeling of com-"
+b569f22ce779d221ec008c0baa354796d71e3d80,Image Classification for Arabic: Assessing the Accuracy of Direct English to Arabic Translations,"Image Classification for Arabic: Assessing the Accuracy of
+Direct English to Arabic Translations
+Information Systems Department, Prince Sattam Bin Abdulaziz university, Al Kharj, Saudi Arabia
+Abdulkareem Alsudais"
+b558be7e182809f5404ea0fcf8a1d1d9498dc01a,Bottom-up and top-down reasoning with convolutional latent-variable models,"Bottom-up and top-down reasoning with convolutional latent-variable models
+Peiyun Hu
+UC Irvine
+Deva Ramanan
+UC Irvine"
+b5fffbc0e590ce67d485f1602c8158befcef9fa8,The use of hidden Markov models to verify the identity based on facial asymmetry,"Kubanek and Bobulski EURASIP Journal on Image and Video
+Processing (2017) 2017:45
+DOI 10.1186/s13640-017-0193-2
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+The use of hidden Markov models to
+verify the identity based on facial asymmetry
+Mariusz Kubanek and Janusz Bobulski*"
+b5cd8151f9354ee38b73be1d1457d28e39d3c2c6,Finding Celebrities in Video,"Finding Celebrities in Video
+Nazli Ikizler
+Jai Vasanth
+Linus Wong
+David Forsyth
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2006-77
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2006/EECS-2006-77.html
+May 23, 2006"
+b5476afccf97fc498f51170e65ac9cd9665fd2ce,Wide Range Face Pose Estimation by Modelling the 3D Arrangement of Robustly Detectable Sub-parts,"Wide Range Face Pose Estimation
+y Modelling the 3D Arrangement
+of Robustly Detectable Sub-Parts
+Thiemo Wiedemeyer1, Martin Stommel2 and Otthein Herzog3
+TZI Center for Computing and Communication Technologies,
+University Bremen, Am Fallturm 1, 28359 Bremen, Germany"
+b5fc4f9ad751c3784eaf740880a1db14843a85ba,Significance of image representation for face verification,"SIViP (2007) 1:225–237
+DOI 10.1007/s11760-007-0016-5
+ORIGINAL PAPER
+Significance of image representation for face verification
+Anil Kumar Sao · B. Yegnanarayana ·
+B. V. K. Vijaya Kumar
+Received: 29 August 2006 / Revised: 28 March 2007 / Accepted: 28 March 2007 / Published online: 1 May 2007
+© Springer-Verlag London Limited 2007"
+b525a863eab597055e02351acfeab64754d22690,Pictorial Structures Revisited : Multiple Human Pose Estimation,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+D Pictorial Structures Revisited:
+Multiple Human Pose Estimation
+Vasileios Belagiannis, Sikandar Amin, Mykhaylo Andriluka,
+Bernt Schiele, Nassir Navab, and Slobodan Ilic"
+b5af4b9d68f1b9b2c2999a726f6d2fbb2a49a3bf,Modulating early visual processing by language,"Modulating early visual processing by language
+Harm de Vries∗
+University of Montreal
+Florian Strub∗
+Univ. Lille, CNRS, Centrale Lille,
+Jérémie Mary†
+Univ. Lille, CNRS, Centrale Lille,
+Inria, UMR 9189 CRIStAL
+Inria, UMR 9189 CRIStAL
+Hugo Larochelle
+Google Brain
+Olivier Pietquin
+DeepMind
+Aaron Courville
+University of Montreal, CIFAR Fellow"
+b5f9c5af707f55d96b1d3d65d970270d35a60987,Comparison of face Recognition Algorithms on Dummy Faces,"The International Journal of Multimedia & Its Applications (IJMA) Vol.4, No.4, August 2012
+Comparison of face Recognition Algorithms on
+Dummy Faces
+Aruni Singh, Sanjay Kumar Singh, Shrikant Tiwari
+Department of Computer Engineering, IT-BHU, Varanasi-India"
+b5ba0c50cfe2559f4197bb35cf50441118b768c8,audEERING's approach to the One-Minute-Gradual Emotion Challenge,"udEERING’s approach to the One-Minute-Gradual Emotion Challenge
+Andreas Triantafyllopoulos, Hesam Sagha, Florian Eyben, Bj¨orn Schuller
+udEERING GmbH, Gilching, Germany"
+b5cf931cf0bd606575bc793c0c8ec6d913d08bc6,"Geometric primitive feature extraction - concepts, algorithms, and applications","GEOMETRIC PRIMITIVE FEATURE EXTRACTION –
+CONCEPTS, ALGORITHMS, AND APPLICATIONS
+DILIP KUMAR PRASAD
+School of Computer Engineering
+A Thesis submitted to the Nanyang Technological University
+in fulfillment of the requirement for the degree of
+Doctor of Philosophy"
+b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57,Modeling Social and Temporal Context for Video Analysis,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Modeling Social and Temporal Context for Video Analysis
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Zhen Qin
+June 2015
+Dissertation Committee:
+Dr. Christian R. Shelton, Chairperson
+Dr. Tao Jiang
+Dr. Stefano Lonardi
+Dr. Amit Roy-Chowdhury"
+b599f323ee17f12bf251aba928b19a09bfbb13bb,Autonomous Quadcopter Videographer,"AUTONOMOUS QUADCOPTER VIDEOGRAPHER
+REY R. COAGUILA
+B.S. Universidad Peruana de Ciencias Aplicadas, 2009
+A thesis submitted in partial fulfillment of the requirements
+for the degree of Master of Science in Computer Science
+in the Department of Electrical Engineering and Computer Science
+in the College of Engineering and Computer Science
+t the University of Central Florida
+Orlando, Florida
+Spring Term
+Major Professor: Gita R. Sukthankar"
+b55853483873d3947e8c962f1152128059369d93,DoShiCo challenge: Domain shift in control prediction,"DoShiCo challenge:
+Domain Shift in Control prediction
+Klaas Kelchtermans∗ and Tinne Tuytelaars∗"
+b58e71a3336193bed5785b2818a4fec85dd5f5ff,Object Detection and Tracking for Autonomous Navigation in Dynamic Environments,"Object detection and tracking for autonomous navigation
+in dynamic environments
+Andreas Ess · Konrad Schindler · Bastian Leibe · Luc Van Gool"
+b5160e95192340c848370f5092602cad8a4050cd,Video Classification With CNNs: Using The Codec As A Spatio-Temporal Activity Sensor,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, TO APPEAR
+Video Classification With CNNs: Using The Codec
+As A Spatio-Temporal Activity Sensor
+Aaron Chadha, Alhabib Abbas and Yiannis Andreopoulos, Senior Member, IEEE"
+b501361ad3ad4f78a3966830a40d2b4f68466c80,Night-time Vehicle Detection for Automatic Headlight Beam Control,"International Journal of Computer Applications (0975 – 8887)
+Volume 157 – No 7, January 2017
+Night-time Vehicle Detection for Automatic Headlight
+Beam Control
+Pushkar Sevekar
+Student, Department of
+Electronics Engineering
+A.I.S.S.M.S. Institute of
+Information Technology,
+Pune, India"
+b58417561ea400b60bd976104e43b1361e1314ba,Target Tracking In Real Time Surveillance Cameras and Videos,"Target Tracking In Real Time Surveillance
+Cameras and Videos
+Nayyab Naseem Mehreen Sirshar
+Department of Software Engineering Department of Software Engineering
+Fatima Jinnah Women University Fatima Jinnah Women University"
+b52886610eda6265a2c1aaf04ce209c047432b6d,Microexpression Identification and Categorization Using a Facial Dynamics Map,"Microexpression Identification and Categorization
+using a Facial Dynamics Map
+Feng Xu, Junping Zhang, James Z. Wang"
+b5790f1bc586a77ff2cbea002b7ad2646e32af6b,Person Re-Identification Ranking Optimisation by Discriminant Context Information Analysis,"Person Re-Identification Ranking Optimisation by
+Discriminant Context Information Analysis
+Jorge Garc´ıa1, Niki Martinel2, Christian Micheloni2 and Alfredo Gardel1
+Department of Electronics, University of Alcala, Alcal´a de Henares, Spain
+Department of Mathematics and Computer Science, University of Udine, Udine, Italy"
+b573a57b3da678631bd78f25ecdeac7cd36fa617,A Multi-view RGB-D Approach for Human Pose Estimation in Operating Rooms,"A Multi-view RGB-D Approach for Human Pose Estimation in Operating Rooms
+Abdolrahim Kadkhodamohammadi1, Afshin Gangi1,2, Michel de Mathelin1, Nicolas Padoy1
+ICube, University of Strasbourg, CNRS, IHU Strasbourg, France
+Radiology Department, University Hospital of Strasbourg, France
+{kadkhodamohammad, gangi, demathelin,"
+b5f9d5be7561bb6eacee9012275b17c75696c388,A Teacher Student Network for Faster Video Classification,"Under review as a conference paper at ICLR 2019
+A TEACHER STUDENT NETWORK FOR FASTER VIDEO
+CLASSIFICATION
+Anonymous authors
+Paper under double-blind review"
+b5793958cd1654b4817ebb57f5484dfd8861f916,Recurrent Image Captioner: Describing Images with Spatial-Invariant Transformation and Attention Filtering,"Recurrent Image Captioner: Describing Images with Spatial-Invariant
+Transformation and Attention Filtering
+Hao Liu
+UESTC, China
+Yang Yang
+UESTC, China
+Fumin Shen
+UESTC, China
+Lixin Duan
+UESTC, China
+Heng Tao Shen
+UESTC, China"
+b5c5a57f5ecd8e11cd47814d584daba53aa14d3c,SOSVR Team Description Paper Robocup 2017 Rescue Virtual Robot League,"SOSVR Team Description Paper
+Robocup 2017 Rescue Virtual Robot League
+Mahdi Taherahmadi, Sajjad Azami, MohammadHossein GohariNejad, Mostafa
+Ahmadi, and Saeed Shiry Ghidary
+Cognitive Robotics Lab, Amirkabir University of Technology (Tehran Polytechnic),
+No. 424, Hafez Ave., Tehran, Iran. P. O. Box"
+b5857b5bd6cb72508a166304f909ddc94afe53e3,SSIG and IRISA at Multimodal Person Discovery,"SSIG and IRISA at Multimodal Person Discovery
+Cassio E. dos Santos Jr1, Guillaume Gravier2, William Robson Schwartz1
+Department of Computer Science, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+IRISA & Inria Rennes , CNRS, Rennes, France"
+b5050d74dd8f0384506bcd365b31044c80d476c0,Discriminative Multimetric Learning for Kinship Verification,"Discriminative Multimetric Learning
+for Kinship Verification
+Haibin Yan, Jiwen Lu, Member, IEEE, Weihong Deng, and Xiuzhuang Zhou, Member, IEEE"
+b51e3d59d1bcbc023f39cec233f38510819a2cf9,"Can a biologically-plausible hierarchy effectively replace face detection, alignment, and recognition pipelines?","CBMM Memo No. 003
+March 27, 2014
+Can a biologically-plausible hierarchy effectively
+replace face detection, alignment, and
+recognition pipelines?
+Qianli Liao1, Joel Z Leibo1, Youssef Mroueh1, Tomaso Poggio1"
+b54c477885d53a27039c81f028e710ca54c83f11,Semi-Supervised Kernel Mean Shift Clustering,"Semi-Supervised Kernel Mean Shift Clustering
+Saket Anand, Member, IEEE, Sushil Mittal, Member, IEEE, Oncel Tuzel, Member, IEEE,
+nd Peter Meer, Fellow, IEEE"
+b503f481120e69b62e076dcccf334ee50559451e,Recognition of Facial Action Units with Action Unit Classifiers and an Association Network,"Recognition of Facial Action Units with Action
+Unit Classifiers and An Association Network
+Junkai Chen1, Zenghai Chen1, Zheru Chi1 and Hong Fu1,2
+Department of Electronic and Information Engineering, The Hong Kong Polytechnic
+University, Hong Kong
+Department of Computer Science, Chu Hai College of Higher Education, Hong Kong"
+b55d0c9a022874fb78653a0004998a66f8242cad,Hybrid Facial Representations for Emotion Recognition Woo,"Hybrid Facial Representations
+for Emotion Recognition
+Woo-han Yun, DoHyung Kim, Chankyu Park, and Jaehong Kim
+Automatic facial expression recognition is a widely
+studied problem in computer vision and human-robot
+interaction. There has been a range of studies for
+representing facial descriptors for facial expression
+recognition. Some prominent descriptors were presented
+in the first facial expression recognition and analysis
+hallenge (FERA2011). In that competition, the Local
+Gabor Binary Pattern Histogram Sequence descriptor
+showed the most powerful description capability. In this
+paper, we introduce hybrid facial representations for facial
+expression recognition, which have more powerful
+description capability with lower dimensionality. Our
+descriptors consist of a block-based descriptor and a pixel-
+ased descriptor. The block-based descriptor represents
+the micro-orientation and micro-geometric structure
+information. The pixel-based descriptor represents texture
+information. We validate our descriptors on two public"
+b5f7b17b0feb3a1f3af60dce61fd9a9c6b067368,The Benefits of Dense Stereo for Pedestrian Detection,"The Benefits of Dense Stereo
+for Pedestrian Detection
+Christoph G. Keller, Markus Enzweiler, Marcus Rohrbach, David Fernández Llorca,
+Christoph Schnörr, and Dariu M. Gavrila"
+b22b4817757778bdca5b792277128a7db8206d08,SCAN: Learning Hierarchical Compositional Visual Concepts,"Published as a conference paper at ICLR 2018
+SCAN: LEARNING HIERARCHICAL
+COMPOSITIONAL VISUAL CONCEPTS
+Irina Higgins, Nicolas Sonnerat, Loic Matthey, Arka Pal,
+Christopher P Burgess, Matko Bošnjak, Murray Shanahan,
+Matthew Botvinick, Demis Hassabis, Alexander Lerchner
+DeepMind, London, UK
+{irinah,sonnerat,lmatthey,arkap,cpburgess,"
+b26f6e3cad2b3d129c0e70e9307ce9197cad2123,Robust Wearable Camera Localization as a Target Tracking Problem on SE(3),"G.BOURMAUD ET AL.: ROBUST WEARABLE CAMERA LOCALIZATION
+Robust Wearable Camera Localization as a
+Target Tracking Problem on SE(3)
+Guillaume Bourmaud
+Audrey Giremus
+IMS Laboratory CNRS UMR 5218
+University of Bordeaux
+France"
+b266be4d9fab8bf307ee2e6fdd6180ac7f6ef893,Look into Person: Joint Body Parsing&Pose Estimation Network and A New Benchmark,"Look into Person: Joint Body Parsing & Pose
+Estimation Network and A New Benchmark
+Xiaodan Liang, Ke Gong, Xiaohui Shen, and Liang Lin"
+b2e2260b8d811948e71898d3adfa8aa6b64fe125,Learning Arbitrary Potentials in CRFs with Gradient Descent,"Learning Arbitrary Potentials in CRFs with Gradient Descent
+M˚ans Larsson1
+Fredrik Kahl1,2
+Chalmers Univ. of Technology 2Lund Univ.
+Shuai Zheng3 Anurag Arnab3
+Oxford Univ.
+Philip Torr3 Richard Hartley4
+Australian National Univ."
+b2444e837095706998b03fa5fed223411b9d4d55,Color Based Tracing in Real-Life Surveillance Data,"Color Based Tracing in Real-life Surveillance
+Michael J. Metternich, Marcel Worring, and Arnold W.M. Smeulders
+ISLA-University of Amsterdam,
+Science Park 107, 1098 XG Amsterdam, The Netherlands
+http://www.science.uva.nl/research/isla/"
+b2046c78d4e2f00a72ee9a76875746d2d3f47e1c,Variational Infinite Hidden Conditional Random Fields,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Variational Infinite
+Hidden Conditional Random Fields
+Konstantinos Bousmalis, Student Member, IEEE, Stefanos Zafeiriou, Member, IEEE,
+Louis-Philippe Morency, Member, IEEE, Maja Pantic, Fellow, IEEE,
+nd Zoubin Ghahramani, Member, IEEE"
+b216040f110d2549f61e3f5a7261cab128cab361,Weighted Voting of Discriminative Regions for Face Recognition,"IEICE TRANS. INF. & SYST., VOL.E100–D, NO.11 NOVEMBER 2017
+LETTER
+Weighted Voting of Discriminative Regions for Face Recognition∗
+Wenming YANG†, Member, Riqiang GAO†a), and Qingmin LIAO†, Nonmembers
+SUMMARY
+This paper presents a strategy, Weighted Voting of Dis-
+riminative Regions (WVDR), to improve the face recognition perfor-
+mance, especially in Small Sample Size (SSS) and occlusion situations.
+In WVDR, we extract the discriminative regions according to facial key
+points and abandon the rest parts. Considering different regions of face
+make different contributions to recognition, we assign weights to regions
+for weighted voting. We construct a decision dictionary according to the
+recognition results of selected regions in the training phase, and this dic-
+tionary is used in a self-defined loss function to obtain weights. The final
+identity of test sample is the weighted voting of selected regions. In this
+paper, we combine the WVDR strategy with CRC and SRC separately, and
+extensive experiments show that our method outperforms the baseline and
+some representative algorithms.
+key words: discriminative regions, small sample size, occlusion, weighted
+strategy, face recognition"
+b28e142376a2dd639f58935f2f63a9dc7651131e,Investigation of Gait Representations in Lower Knee Gait Recognition,
+b261439b5cde39ec52d932a222450df085eb5a91,Facial Expression Recognition using Analytical Hierarchy Process,"International Journal of Computer Trends and Technology (IJCTT) – volume 24 Number 2 – June 2015
+Facial Expression Recognition using Analytical Hierarchy
+Process
+MTech Student 1 , Assistant Professor 2 , Department of Computer Science and Engineeringt1, 2, Disha Institute of
+Management and Technology, Raipur Chhattisgarh, India1, 2
+Vinita Phatnani1, Akash Wanjari2,
+its significant contribution"
+b29e60ddcabff5002c3ddec135ec94dd991d8d5a,Compressing deep convolutional neural networks in visual emotion recognition,"Compressing deep convolutional neural networks in visual emotion
+recognition
+A.G. Rassadin1, A.V. Savchenko1
+National Research University Higher School of Economics, Laboratory of Algorithms and Technologies for Network Analysis, 25/12 Bolshaya Pecherskaya
+Street, 603155, Nizhny Novgorod, Russia"
+b277bde51641d6b08693c171aea761beb14af800,Face Kernel Extraction from Local Features,"FACE KERNEL EXTRACTION FROM
+LOCAL FEATURES
+A thesis submitted to the University of Manchester
+for the degree of Doctor of Philosophy
+in the Faculty of Engineering and Physical Sciences
+Maria Pavlou
+School of Electrical Engineering and Electronics"
+b2e67e67e5bbb19a02524afcc217929b0a76a9a7,Chapter 12 Using Ocular Data for Unconstrained Biometric Recognition,"Face Recognition in Adverse ConditionsMaria De MarsicoSapienza University of Rome, ItalyMichele NappiUniversity of Salerno, ItalyMassimo TistarelliUniversity of Sassari, ItalyA volume in the Advances in Computational Intelligence and Robotics (ACIR) Book Series"
+b2b535118c5c4dfcc96f547274cdc05dde629976,Automatic Recognition of Facial Displays of Unfelt Emotions,"JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+Automatic Recognition of Facial Displays of
+Unfelt Emotions
+Kaustubh Kulkarni*, Ciprian Adrian Corneanu*, Ikechukwu Ofodile*, Student Member, IEEE, Sergio
+Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+nd Gholamreza Anbarjafari, Senior Member, IEEE"
+b235b4ccd01a204b95f7408bed7a10e080623d2e,Regularizing Flat Latent Variables with Hierarchical Structures,"Regularizing Flat Latent Variables with Hierarchical Structures
+Rongcheng Lin(cid:117) , Huayu Li(cid:117) , Xiaojun Quan† , Richang Hong(cid:63) , Zhiang Wu∓ , Yong Ge(cid:117)
+(cid:117)UNC Charlotte. Email: {rlin4, hli38,
+(cid:63) Hefei University of Technology. Email:
+Institute for Infocomm Research. Email:
+∓ Nanjing University of Finance and Economics. Email:"
+b20a5427d79c660fe55282da2533071629bfc533,Deep Learning Advances on Different 3D Data Representations: A Survey,"Deep Learning Advances on Different 3D Data
+Representations: A Survey
+Eman Ahmed, Alexandre Saint, Abd El Rahman Shabayek, Kseniya Cherenkova, Rig Das, Gleb Gusev,
+Djamila Aouada and Bj¨orn Ottersten"
+b2504b0b2a7e06eab02a3584dd46d94a3f05ffdf,Conditional Neural Processes,"Conditional Neural Processes
+Marta Garnelo 1 Dan Rosenbaum 1 Chris J. Maddison 1 Tiago Ramalho 1 David Saxton 1 Murray Shanahan 1 2
+Yee Whye Teh 1 Danilo J. Rezende 1 S. M. Ali Eslami 1"
+b285e50220fb6c09cf3c724c7e48093373df3c58,Semisupervised Classifier Evaluation and Recalibration,"Semisupervised Classifier Evaluation
+nd Recalibration
+Peter Welinder∗, Max Welling†, and Pietro Perona‡
+October 7, 2012"
+b2c25af8a8e191c000f6a55d5f85cf60794c2709,A novel dimensionality reduction technique based on kernel optimization through graph embedding,"Noname manuscript No.
+(will be inserted by the editor)
+A Novel Dimensionality Reduction Technique based on
+Kernel Optimization Through Graph Embedding
+N. Vretos, A. Tefas and I. Pitas
+the date of receipt and acceptance should be inserted later"
+b2f4871cf9f61c44b16c733369d8730e90d9cc0d,The role of emotion in problem solving: first results from observing chess,"The Role of Emotion in Problem Solving: First Results
+from Observing Chess
+Thomas Guntz1, James L. Crowley1, Dominique Vaufreydaz1, Raffaella Balzarini1,
+Philippe Dessus1,2
+Univ. Grenoble Alpes, CNRS, Inria, Grenoble INP, LIG, 38000 Grenoble, France
+Univ. Grenoble Alpes, LaRAC, 38000 Grenoble, France
+Author version"
+b2624c3cb508bf053e620a090332abce904099a1,Dynamic Memory Networks for Visual and Textual Question Answering,"Dynamic Memory Networks for Visual and Textual Question Answering
+Caiming Xiong*, Stephen Merity*, Richard Socher
+MetaMind, Palo Alto, CA USA
+{CMXIONG,SMERITY,RICHARD}METAMIND.IO
+*indicates equal contribution."
+b2abaffc4d68ebf910dd85c0f7a367895ab90e2a,Iris recognition using scattering transform and textural features,"IRIS RECOGNITION USING SCATTERING TRANSFORM AND TEXTURAL FEATURES
+Shervin Minaee, AmirAli Abdolrashidi and Yao Wang
+ECE Department, NYU Polytechnic School of Engineering, USA
+{shervin.minaee, abdolrashidi,"
+d904f945c1506e7b51b19c99c632ef13f340ef4c,0 ° 15 ° 30 ° 45 ° 60 ° 75 ° 90 °,"A scalable 3D HOG model for fast object detection and viewpoint estimation
+Marco Pedersoli
+Tinne Tuytelaars
+KU Leuven, ESAT/PSI - iMinds
+Kasteelpark Arenberg 10 B-3001 Leuven, Belgium"
+d914c53cdf26acc64259d381fbd45c4e150633ee,Pedestrian Tracking in the Compressed Domain Using Thermal Images,"Pedestrian Tracking in the Compressed Domain
+Using Thermal Images
+Ichraf Lahouli1,2,3, Robby Haelterman1, Zied Chtourou2, Geert De Cubber1,
+nd Rabah Attia3
+Royal Military Academy,
+Brussels, Belgium
+VRIT Lab, Military Academy of Tunisia,
+Nabeul, Tunisia
+SERCOM Lab, Tunisia Polytechnic School,
+La Marsa, Tunisia"
+d9f0640716ec25278e6f1a4fdda5596660504c54,A Correlated Parts Model for Object Detection in Large 3D Scans,"EUROGRAPHICS 2013 / I. Navazo, P. Poulin
+(Guest Editors)
+Volume 32 (2013), Number 2
+A Correlated Parts Model for Object Detection in Large 3D
+Scans
+M. Sunkel1, S. Jansen1, M. Wand1,2, H.-P. Seidel1
+MPI Informatik
+Saarland University
+Figure 1: Based on sparse user annotations a shape model is learned. The detected instances are transformed into descriptors
+for the second hierarchy level. Hierarchical detections shown on the right are obtained using only the example marked red."
+d9810786fccee5f5affaef59bc58d2282718af9b,Adaptive Frame Selection for Enhanced Face Recognition in Low-Resolution Videos,"Adaptive Frame Selection for
+Enhanced Face Recognition in
+Low-Resolution Videos
+Raghavender Reddy Jillela
+Thesis submitted to the
+College of Engineering and Mineral Resources
+t West Virginia University
+in partial fulfillment of the requirements
+for the degree of
+Master of Science
+Electrical Engineering
+Arun Ross, PhD., Chair
+Xin Li, PhD.
+Donald Adjeroh, PhD.
+Lane Department of Computer Science and Electrical Engineering
+Morgantown, West Virginia
+Keywords: Face Biometrics, Super-Resolution, Optical Flow, Super-Resolution using
+Optical Flow, Adaptive Frame Selection, Inter-Frame Motion Parameter, Image Quality,
+Image-Level Fusion, Score-Level Fusion
+Copyright 2008 Raghavender Reddy Jillela"
+d929534024614e3153c986e55d758ea7471d3fff,How Not to Evaluate a Developmental System,"How Not to Evaluate a Developmental System
+Frederick Shic and Brian Scassellati"
+d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c,Face Album: Towards automatic photo management based on person identity on mobile phones,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+d930ec59b87004fd172721f6684963e00137745f,Face Pose Estimation using a Tree of Boosted Classifiers,"Face Pose Estimation using a
+Tree of Boosted Classifiers
+Javier Cruz Mota
+Project Assistant: Julien Meynet
+Professor: Jean-Philippe Thiran
+Signal Processing Institute,
+´Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+September 11, 2006"
+d951ff5f378b2a5f878423029123ad6b3491b444,Foveal Vision for Instance Segmentation of Road Images,"Foveal Vision for Instance Segmentation of Road Images
+Benedikt Ortelt1, Christian Herrmann2,3, Dieter Willersinn2, J¨urgen Beyerer2,3
+Robert Bosch GmbH, Leonberg, Germany
+Fraunhofer IOSB, Karlsruhe, Germany
+Karlsruhe Institute of Technology KIT, Vision and Fusion Lab, Karlsruhe, Germany
+Keywords:
+Instance Segmentation, Multi-Scale Analysis, Foveated Imaging, Cityscapes."
+d9fe0b257ec50a12ba1af749fad56a6f705d16a4,High Frequency Regions for Face Recognition,"The International Journal of Multimedia & Its Applications (IJMA) Vol.4, No.1, February 2012
+FEATURE IMAGE GENERATION USING LOW, MID
+AND HIGH FREQUENCY REGIONS FOR FACE
+RECOGNITION
+Vikas Maheshkar1, Sushila Kamble2, Suneeta Agarwal3 and Vinay Kumar
+Srivastava4
+-3Department of Computer Science and Engineering, MNNIT, Allahabad
+Department of Electronics & Communication Engineering, MNNIT, Allahabad"
+d9318c7259e394b3060b424eb6feca0f71219179,Face Matching and Retrieval Using Soft Biometrics,"Face Matching and Retrieval Using Soft Biometrics
+Unsang Park, Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+d9ee64038aea3a60120e9f7de16eb4130940a103,Message Passing Multi-Agent GANs,"Message Passing Multi-Agent GANs
+Arnab Ghosh∗, Viveka Kulharia∗, Vinay Namboodiri
+IIT Kanpur"
+d97e7799142e2c66b63fe63bc52632fdf305f313,Lanczos Vectors versus Singular Vectors for Effective Dimension Reduction,"Lanczos Vectors versus Singular Vectors for
+Effective Dimension Reduction
+Jie Chen and Yousef Saad"
+d9fda0030ca349da7b1dafca015bea95a6aabea0,ISA2: Intelligent Speed Adaptation from Appearance,"ISA2: Intelligent Speed Adaptation from Appearance
+Carlos Herranz-Perdiguero1 and Roberto J. L´opez-Sastre1"
+d950af49c44bc5d9f4a5cc1634e606004790b1e5,Divide and Fuse: A Re-ranking Approach for Person Re-identification,"YU ET AL.: DIVIDE AND FUSE: A RE-RANKING APPROACH FOR PERSON RE-ID
+Divide and Fuse: A Re-ranking Approach for
+Person Re-identification
+Huazhong University of Science and
+Technology
+Wuhan, China
+Rui Yu
+Zhichao Zhou
+Song Bai
+Xiang Bai ∗"
+d9ef1a80738bbdd35655c320761f95ee609b8f49,A Research - Face Recognition by Using Near Set Theory,"Volume 5, Issue 4, 2015 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+A Research - Face Recognition by Using Near Set Theory
+Manisha V. Borkar, Bhakti Kurhade
+Department of Computer Science and Engineering
+Abha Gaikwad -Patil College of Engineering, Nagpur, Maharashtra, India"
+d930d20ba42a5d868dd78dd73bac0f72110e0bc5,Multivariate Shape Modeling and Its Application to Characterizing Abnormal Amygdala Shape in Autism,"Multivariate Shape Modeling and Its Application to
+Characterizing Abnormal Amygdala Shape in Autism
+Moo K. Chunga,b∗,Keith J. Worsleyd, Brendon, M. Nacewiczb,
+Kim M. Daltonb, Richard J. Davidsonb,c
+Department of Biostatistics and Medical Informatics
+Waisman Laboratory for Brain Imaging and Behavior
+Department of Psychology and Psychiatry
+University of Wisconsin, Madison, WI 53706, USA
+dDepartment of Statistics
+University of Chicago, Chicago, IL 60637, USA
+September 22, 2009"
+d94b37958657aa703d8a3d02a66ee251b4c3f597,Learning deep features from body and parts for person re-identification in camera networks,"Zhang and Si EURASIP Journal on Wireless Communications and
+Networking (2018) 2018:52
+https://doi.org/10.1186/s13638-018-1060-2
+RESEARCH
+Open Access
+Learning deep features from body and
+parts for person re-identification in camera
+networks
+Zhong Zhang1,2* and Tongzhen Si1,2"
+d9df2ed64494f54c0e2529f2c05a16423a57235c,A Novel Approach for Facial Expression Analysis in real time applications using SIFT flow and SVM,"Australian Journal of Basic and Applied Sciences, 9(21) Special 2015, Pages: 1-6
+ISSN:1991-8178
+Australian Journal of Basic and Applied Sciences
+Journal home page: www.ajbasweb.com
+A Novel Approach for Facial Expression Analysis in real time applications using SIFT
+flow and SVM
+K. Suganya Devi and 2P. Srinivasan
+Department of Computer Science and Engineering, University college of Engg Panruti, Panruti 607106, Tamilnadu, India
+Department of Physics, University college of Engg Panruti, Panruti 607106, Tamilnadu, India
+A R T I C L E I N F O
+Article history:
+Article Received : 12 January 2015
+Revised: 1 May 2015
+Accepted: 8 May 2015
+Keywords:
+Expression recognition, Facial region
+selection, Facial expression, Sparse
+learning technique, Scale Invariant
+Feature Transform flow, SVM
+A B S T R A C T"
+d9c4b1ca997583047a8721b7dfd9f0ea2efdc42c,Learning Inference Models for Computer Vision,Learning Inference Models for Computer Vision
+d94c7a89adf6f568bbe1510910850d5083a58b4f,Deep Cross Modal Learning for Caricature Verification and Identification (CaVINet),"Deep Cross Modal Learning for Caricature Verification and
+Identification(CaVINet)
+https://lsaiml.github.io/CaVINet/
+Jatin Garg∗
+Indian Institute of Technology Ropar
+Himanshu Tolani∗
+Indian Institute of Technology Ropar
+Skand Vishwanath Peri∗
+Indian Institute of Technology Ropar
+Narayanan C Krishnan
+Indian Institute of Technology Ropar"
+d9bc16dcbc13502389704e4a0bdd8ee7af618069,Learning pullback HMM distances for action recognition,Learning pullback HMM distances for action recognition
+d9bad7c3c874169e3e0b66a031c8199ec0bc2c1f,"It All Matters: Reporting Accuracy, Inference Time and Power Consumption for Face Emotion Recognition on Embedded Systems","It All Matters:
+Reporting Accuracy, Inference Time and Power Consumption
+for Face Emotion Recognition on Embedded Systems
+Jelena Milosevic
+Institute of Telecommunications, TU Wien
+Andrew Forembsky
+Movidius an Intel Company
+Dexmont Pe˜na
+Movidius an Intel Company
+David Moloney
+Movidius an Intel Company
+Miroslaw Malek
+ALaRI, Faculty of Informatics, USI"
+d9327b9621a97244d351b5b93e057f159f24a21e,Laplacian smoothing transform for face recognition,"SCIENCE CHINA
+Information Sciences
+. RESEARCH PAPERS .
+December 2010 Vol. 53 No. 12: 2415–2428
+doi: 10.1007/s11432-010-4099-1
+Laplacian smoothing transform for face recognition
+GU SuiCheng, TAN Ying
+& HE XinGui
+Key Laboratory of Machine Perception (MOE); Department of Machine Intelligence,
+School of Electronics Engineering and Computer Science; Peking University, Beijing 100871, China
+Received March 16, 2009; accepted April 1, 2010"
+d92581c452e780710938cfbfa0f1ca2ffccc5d5e,Facial Feature Extraction Based on Local Color and Texture for Face Recognition using Neural Network,"International Journal of Science and Engineering Applications
+Volume 2 Issue 4, 2013, ISSN-2319-7560 (Online)
+Facial Feature Extraction Based on Local Color and Texture
+for Face Recognition using Neural Network
+S.Cynthia Christabel
+M.Annalakshmi
+Sethu Institute of Technology.
+Sethu Institute of Technology.
+Kariapatti.
+Kariapatti.
+Mr.D.Prince Winston
+Aruppukottai."
+aca232de87c4c61537c730ee59a8f7ebf5ecb14f,Ebgm Vs Subspace Projection for Face Recognition,"EBGM VS SUBSPACE PROJECTION FOR FACE RECOGNITION
+Andreas Stergiou, Aristodemos Pnevmatikakis, Lazaros Polymenakos
+9.5 Km Markopoulou Avenue, P.O. Box 68, Peania, Athens, Greece
+Athens Information Technology
+Keywords:
+Human-Machine Interfaces, Computer Vision, Face Recognition."
+ac7f898ff5789914d423526c392ee61b979fdd8e,"Target Tracking with Kalman Filtering, KNN and LSTMs","Target Tracking with Kalman Filtering, KNN and LSTMs
+Dan Iter
+Jonathan Kuck
+Philip Zhuang
+December 17, 2016"
+ac6a9f80d850b544a2cbfdde7002ad5e25c05ac6,Privacy-Protected Facial Biometric Verification Using Fuzzy Forest Learning,"Privacy-Protected Facial Biometric Verification
+Using Fuzzy Forest Learning
+Richard Jiang, Ahmed Bouridane, Senior Member, IEEE, Danny Crookes, Senior Member, IEEE,
+M. Emre Celebi, Senior Member, IEEE, and Hua-Liang Wei"
+aca8c4a62ed6e590889f1e859d7bc79311fa6f4d,Beyond Universal Saliency: Personalized Saliency Prediction with Multi-task CNN,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+Semantic labels Observer A Observer B Observer C Figure1:AnillustrationofPSMdataset.Ourdatasetprovidesbotheyefixationsofdifferentsubjectsandsemanticlabels.Duetothelargeamountofobjectsinourdataset,foreachimage,wedidn’tful-lysegmentitandonlylabelledobjectsthatcoveratleastthreegazepointsfromeachindividual.AnotabledifferencebetweenPSManditspredecessorsisthateachsubjectslooks4timesonPSMdatatoderivesolidfixationgroundtruthmaps.Bothcommonalityanddis-tinctivenessexistforPSMsviewedbydifferentparticipant.ThismotivatesustomodelPSMbasedonUSM.recognizingheterogeneityacrossindividuals.ExamplesinFig.1illustratethatwhilemultipleobjectsaredeemedhigh-lysalientwithinthesameimage(eg,humanface(firstrow),text(lasttowrows)andobjectof(highcolorcontrast),differ-entindividualshaveverydifferentfixationpreferenceswhenviewingtheimage.Fortherestofthepaper,weusetermuniversalsaliencytodescribesalientregionsthatincurhighfixationsacrossallsubjectsandtermpersonalizedsaliencytodescribetheheterogeneousones.Motivation.Infact,heterogeneityinsaliencypreferencehasbeenwidelyrecognizedinpsychology:”Interestingnessishighlysubjectiveandthereareindividualswhodidnotconsideranyimageinterestinginsomesequences”[Gyglietal.,2013].Therefore,onceweknowaperson’spersonal-izedinterestingnessovereachimage(personalizedsaliency),weshalldesigntailoredalgorithmstocatertohim/herneed-s.Forexample,intheapplicationofimageretargeting,thetextsonthetableinthefourthrowinFig.1shouldbepre-"
+ac83b9ad20ecf63c7818ff1e43a99b4c626fac12,Accuracy and Security Evaluation of Multi-Factor Biometric Authentication,"Accuracy and Security Evaluation of Multi-Factor Biometric Authentication
+Hisham Al-Assam, Harin Sellahewa, Sabah Jassim
+Department of Applied Computing
+University of Buckingham
+Buckingham, MK18 1EG, United Kingdom
+{hisham.al-assam, harin.sellahewa,"
+ac57b04359818c17d416ee53ae05a5f126eca4db,Detection and classification of the behavior of people in an intelligent building by camera,"Detection and classification of the behavior of people in an
+intelligent building by camera
+Henni Sid Ahmed1, Belbachir Mohamed Faouzi2, Jean Caelen3
+Universite of sciences and technology USTO in Oran Algeria, laboratory LSSD, Faculty genie
+electrique, department electronique, BP 1505 el menouar Oran 31000 Algeria
+Universite of sciences and technology USTO in Oran Algeria, laboratory LSSD, Faculty genie
+electrique, department electronique, BP 1505 el menouar Oran 31000 Algeria
+Universite Joseph Fourier, Grenoble, F , LIG Grenoble computer laboratory ,domaine
+universitaire BP 53, 220 rue de la chimie 38041 Grenoble cedex 9 France
+Emails: 1
+Submitted: Apr. 10, 2013 Accepted: July 30, 2013 Published: Sep. 3, 2013"
+accbd6cd5dd649137a7c57ad6ef99232759f7544,Facial Expression Recognition with Local Binary Patterns and Linear Programming,"FACIAL EXPRESSION RECOGNITION WITH LOCAL BINARY PATTERNS
+AND LINEAR PROGRAMMING
+Xiaoyi Feng1, 2, Matti Pietikäinen1, Abdenour Hadid1
+Machine Vision Group, Infotech Oulu and Dept. of Electrical and Information Engineering
+P. O. Box 4500 Fin-90014 University of Oulu, Finland
+2 College of Electronics and Information, Northwestern Polytechnic University
+710072 Xi’an, China
+In this work, we propose a novel approach to recognize facial expressions from static
+images. First, the Local Binary Patterns (LBP) are used to efficiently represent the facial
+images and then the Linear Programming (LP) technique is adopted to classify the seven
+facial expressions anger, disgust, fear, happiness, sadness, surprise and neutral.
+Experimental results demonstrate an average recognition accuracy of 93.8% on the JAFFE
+database, which outperforms the rates of all other reported methods on the same database.
+Introduction
+Facial expression recognition from static
+images is a more challenging problem
+than from image sequences because less
+information for expression actions
+vailable. However, information in a
+single image is sometimes enough for"
+ac88405d34b7b6fa701e25d9fbdb56126cc9a8c3,On the Diversity of Realistic Image Synthesis,"On the Diversity of Realistic Image Synthesis
+Zichen Yang, Haifeng Liu, Member, IEEE and Deng Cai, Member, IEEE"
+ac4c19e52a58aea27593b99f0ebe5316339b9646,A Probabilistic Approach for Image Retrieval Using Descriptive Textual Queries,"A Probabilistic Approach for Image Retrieval Using
+Descriptive Textual Queries
+Yashaswi Verma
+CVIT, IIIT Hyderabad, India
+C. V. Jawahar
+CVIT, IIIT Hyderabad, India"
+ac479607e6b44c69022a56b5847a055535ae63ed,Cross-domain fashion image retrieval,"Cross-domain fashion image retrieval
+Bojana Gaji´c, Ramon Baldrich
+Computer Vision Center
+Universitat Autnoma de Barcelona
+Edifici O. UAB. Bellaterra, Spain.
+{bgajic,"
+ac968bf321f1dfa2d216dccc22fa5315de63d7bd,Face Template Protection using Deep Convolutional Neural Network,"Face Template Protection using Deep Convolutional Neural Network
+Arun Kumar Jindal, Srinivas Chalamala, Santosh Kumar Jami
+TCS Research, Tata Consultancy Services, India
+{jindal.arun, chalamala.srao,"
+acaa89fb6263aef7ad58a37d9cac79c8fcaa29ca,Person Re-identification in Identity Regression Space,"Noname manuscript No.
+(will be inserted by the editor)
+Person Re-Identification in Identity Regression Space
+Hanxiao Wang · Xiatian Zhu · Shaogang Gong · Tao Xiang
+Received: date / Accepted: date"
+acee1e7700e9f084ff64805a2c67d16fe69e63a8,250 years Lambert surface: does it really exist?,"50 years Lambert surface: does it really
+exist?
+Institut f¨ur Lasertechnologien in der Medizin und Meßtechnik, Helmholtzstr.12, D-89081 Ulm,
+Alwin Kienle∗ and Florian Foschum
+Germany"
+ac26166857e55fd5c64ae7194a169ff4e473eb8b,Personalized Age Progression with Bi-Level Aging Dictionary Learning,"Personalized Age Progression with Bi-level
+Aging Dictionary Learning
+Xiangbo Shu, Jinhui Tang, Senior Member, IEEE, Zechao Li, Hanjiang Lai, Liyan Zhang
+nd Shuicheng Yan, Fellow, IEEE"
+ac559873b288f3ac28ee8a38c0f3710ea3f986d9,Team DEEP-HRI Moments in Time Challenge 2018 Technical Report,"Team DEEP-HRI Moments in Time Challenge 2018 Technical Report
+Chao Li, Zhi Hou, Jiaxu Chen, Yingjia Bu, Jiqiang Zhou, Qiaoyong Zhong, Di Xie and Shiliang Pu
+Hikvision Research Institute"
+ac8e09128e1e48a2eae5fa90f252ada689f6eae7,Leolani: A Reference Machine with a Theory of Mind for Social Communication,"Leolani: a reference machine with a theory of
+mind for social communication
+Piek Vossen, Selene Baez, Lenka Baj˘ceti´c, and Bram Kraaijeveld
+VU University Amsterdam, Computational Lexicology and Terminology Lab, De
+Boelelaan 1105, 1081HV Amsterdam, The Netherlands
+www.cltl.nl"
+acc5318592303852feba755a1202fb3c683b3b53,Correction of AI systems by linear discriminants: Probabilistic foundations,"Correction of AI systems by linear discriminants: Probabilistic foundations
+A.N. Gorbana,b,∗, A. Golubkovc, B. Grechuka, E.M. Mirkesa,b, I.Y. Tyukina,b
+Department of Mathematics, University of Leicester, Leicester, LE1 7RH, UK
+Lobachevsky University, Nizhni Novgorod, Russia
+Saint-Petersburg State Electrotechnical University, Saint-Petersburg, Russia"
+ac9feef881ed00a5a5e53bddb88f135a9cffe048,A General Method for Appearance-Based People Search Based on Textual Queries,"A general method for appearance-based people
+search based on textual queries
+Riccardo Satta, Giorgio Fumera, and Fabio Roli
+Dept. of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123 Cagliari, Italy"
+ac8441e30833a8e2a96a57c5e6fede5df81794af,Hierarchical Representation Learning for Kinship Verification,"IEEE TRANSACTIONS ON IMAGE PROCESSING
+Hierarchical Representation Learning for Kinship
+Verification
+Naman Kohli, Student Member, IEEE, Mayank Vatsa, Senior Member, IEEE, Richa Singh, Senior Member, IEEE,
+Afzel Noore, Senior Member, IEEE, and Angshul Majumdar, Senior Member, IEEE"
+acc37d228f6cb2205497df81532c582ed71dd9fe,Deep Ordinal Ranking for Multi-Category Diagnosis of Alzheimer's Disease using Hippocampal MRI data,"Deep Ordinal Ranking for Multi-Category Diagnosis of Alzheimer’s
+Disease using Hippocampal MRI data
+Hongming Li, Mohamad Habes, Yong Fan
+nd for the Alzheimer's Disease Neuroimaging Initiative*
+Section for Biomedical Image Analysis (SBIA), Center for Biomedical Image Computing and
+Analytics (CBICA), Department of Radiology, Perelman School of Medicine, University of
+Pennsylvania, Philadelphia, PA, 19104, USA
+*Data used in preparation of this article were obtained from the Alzheimer’s Disease Neuroimaging Initiative (ADNI) database
+(adni.loni.usc.edu). As such, the investigators within the ADNI contributed to the design and implementation of ADNI and/or
+provided data but did not participate in analysis or writing of this report. A complete listing of ADNI investigators can be found
+t: http://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf"
+acf13c52c86a3b38642ba0c6cbcd1b771778965c,NAACL HLT 2018 Generalization in the Age of Deep Learning Proceedings of the Workshop,"NAACLHLT2018GeneralizationintheAgeofDeepLearningProceedingsoftheWorkshopJune5,2018NewOrleans,Louisiana"
+ac5c93b789bdd557b90ce77221f1c01ead63041f,Robust People Detection using Computer Vision Spring Term 2013,"Autonomous Systems Lab
+Prof. Roland Siegwart
+Master-Thesis
+Robust People Detection
+using Computer Vision
+Spring Term 2013
+Supervised by:
+Jerome Maye
+Paul Beardsley
+Author:
+Endri Dibra"
+ac12ba5bf81de83991210b4cd95b4ad048317681,Combining Deep Facial and Ambient Features for First Impression Estimation,"Combining Deep Facial and Ambient Features
+for First Impression Estimation
+Furkan G¨urpınar1, Heysem Kaya2, Albert Ali Salah3
+Program of Computational Science and Engineering, Bo˘gazi¸ci University,
+Bebek, Istanbul, Turkey
+Department of Computer Engineering, Namık Kemal University,
+C¸ orlu, Tekirda˘g, Turkey
+Department of Computer Engineering, Bo˘gazi¸ci University,
+Bebek, Istanbul, Turkey"
+ac0d88ca5f75a4a80da90365c28fa26f1a26d4c4,MOT16: A Benchmark for Multi-Object Tracking,"MOT16: A Benchmark for Multi-Object Tracking
+Anton Milan∗, Laura Leal-Taix´e∗, Ian Reid, Stefan Roth, and Konrad Schindler"
+acb83d68345fe9a6eb9840c6e1ff0e41fa373229,"Kernel methods in computer vision: object localization, clustering, and taxonomy discovery","Kernel Methods in Computer Vision:
+Object Localization, Clustering,
+nd Taxonomy Discovery
+vorgelegt von
+Matthew Brian Blaschko, M.S.
+us La Jolla
+Von der Fakult¨at IV - Elektrotechnik und Informatik
+der Technischen Universit¨at Berlin
+zur Erlangung des akademischen Grades
+Doktor der Naturwissenschaften
+Dr. rer. nat.
+genehmigte Dissertation
+Promotionsausschuß:
+Vorsitzender: Prof. Dr. O. Hellwich
+Berichter: Prof. Dr. T. Hofmann
+Berichter: Prof. Dr. K.-R. M¨uller
+Berichter: Prof. Dr. B. Sch¨olkopf
+Tag der wissenschaftlichen Aussprache: 23.03.2009
+Berlin 2009"
+ade1034d5daec9e3eba1d39ae3f33ebbe3e8e9a7,Multimodal Caricatural Mirror,"Multimodal Caricatural Mirror
+Martin O.(1), Adell J.(2), Huerta A.(3), Kotsia I.(4), Savran A.(5), Sebbe R.(6)
+(1) : Université catholique de Louvain, Belgium
+(2) Universitat Polytecnica de Barcelona, Spain
+(3) Universidad Polytècnica de Madrid, Spain
+(4) Aristotle University of Thessaloniki, Greece
+(5) Bogazici University, Turkey
+(6) Faculté Polytechnique de Mons, Belgium"
+adf7ccb81b8515a2d05fd3b4c7ce5adf5377d9be,Apprentissage de métrique appliqué à la détection de changement de page Web et aux attributs relatifs,"Apprentissage de métrique appliqué à la
+détection de changement de page Web et
+ux attributs relatifs
+Marc T. Law* — Nicolas Thome* — Stéphane Gançarski* — Mat-
+thieu Cord*
+* Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris,
+France
+RÉSUMÉ. Nous proposons dans cet article un nouveau schéma d’apprentissage de métrique.
+Basé sur l’exploitation de contraintes qui impliquent des quadruplets d’images, notre approche
+vise à modéliser des relations sémantiques de similarités riches ou complexes. Nous étudions
+omment ce schéma peut être utilisé dans des contextes tels que la détection de régions impor-
+tantes dans des pages Web ou la reconnaissance à partir d’attributs relatifs."
+ad3caae50feee550b047e17699cfe7bb9e243cf5,Sparse similarity-preserving hashing,"Sparse similarity-preserving hashing
+Jonathan Masci
+Alex M. Bronstein
+Michael M. Bronstein
+Pablo Sprechmann
+Guillermo Sapiro"
+ad7a7f70e460d4067d7170bcc0f1ea62eedd7234,CBinfer: Exploiting Frame-to-Frame Locality for Faster Convolutional Network Inference on Video Streams,"CBinfer: Exploiting Frame-to-Frame Locality for Faster
+Convolutional Network Inference on Video Streams
+Lukas Cavigelli, Luca Benini"
+adb2d1e241933ef363bcf03d865a9219d2911780,Classification of Age from Facial Features of Humans,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+Classification of Age from Facial Features of
+Poonam Shirode1, S. M. Handore2
+, 2Department of E&TC, K.J’s Educational Institute’s TCOER, Pune, Maharashtra, India
+Humans"
+ade18cf978e4b00fb74352a7eba90b4f4509d645,Articulated Multi-body Tracking under Egomotion,"Articulated Multi-body Tracking Under Egomotion
+S. Gammeter1, A. Ess1, T. J¨aggli1, K. Schindler1, B. Leibe1,2, and L. Van Gool1,3
+ETH Z¨urich
+RWTH Aachen
+KU Leuven, IBBT"
+ad30152944a42975f16a53cf0e0666e9937e9d73,Dyadic Interaction Detection from Pose and Flow,"Dyadic interaction detection from pose and flow
+Anonymous ECCV submission
+Paper ID 17"
+ada73060c0813d957576be471756fa7190d1e72d,VRPBench: A Vehicle Routing Benchmark Tool,"VRPBench: A Vehicle Routing Benchmark Tool
+October 19, 2016
+Guilherme A. Zeni1 , Mauro Menzori1, P. S. Martins1, Luis A. A. Meira1"
+adaff7ff015b4be77e8c0bdb9d002b614d6e2851,A Hybrid Method for Face Recognition using LLS CLAHE Method,"International Journal of Computer Applications (0975 – 8887)
+Volume 152 – No.7, October 2016
+A Hybrid Method for Face Recognition using LLS
+CLAHE Method
+Mohandas College of Engineering and
+A. Thamizharasi
+Assistant Professor,
+Department of Computer
+Science & Engineering,
+Technology,
+Anad, Nedumangad P.O.,
+Trivandrum, Kerala, India"
+adca02d4b34a9851d1c9c0a7c1bb8d5178b59b85,Modeling the dynamics of individual behaviors for group detection in crowds using low-level features,"Modeling the dynamics of individual behaviors for group
+detection in crowds using low-level features
+Omar Adair Islas Ram´ırez
+Giovanna Varni
+Mihai Andries
+Mohamed Chetouani
+Raja Chatila"
+ad01c5761c89fdf523565cc0dec77b9a6ec8e694,Global and Local Consistent Wavelet-domain Age Synthesis,"Global and Local Consistent Wavelet-domain Age
+Synthesis
+Peipei Li†, Yibo Hu†, Ran He Member, IEEE and Zhenan Sun Member, IEEE"
+ada4901e0022b4fdeb9ec3ae26b986199f7ae3be,Human Face Recognition based on Improved PCA Algorithm,"Human Face Recognition based on Improved
+PCA Algorithm
+Xu Yue
+College of art and design, LanZhou JiaoTong University, Lanzhou, China
+Email:
+Linhao Li
+AT&T Labs, 200 South Laurel Ave, #D4-3C05, NJ, USA
+Email:"
+ad9937ff6c5bff4dae72ca90eddc4dd77751b3fa,FusionNet and AugmentedFlowNet: Selective Proxy Ground Truth for Training on Unlabeled Images,"FusionNet and AugmentedFlowNet:
+Selective Proxy Ground Truth
+for Training on Unlabeled Images
+Osama Makansi*, Eddy Ilg*, and Thomas Brox
+University of Freiburg, Germany"
+ad2afeb4c1975c637291bc3f7087d665c3f501c8,WebVision Challenge: Visual Learning and Understanding With Web Data,"WebVision Challenge: Visual Learning and
+Understanding With Web Data
+Wen Li, Limin Wang, Wei Li, Eirikur Agustsson, Jesse Berent, Abhinav Gupta, Rahul Sukthankar,
+nd Luc Van Gool"
+adfaf01773c8af859faa5a9f40fb3aa9770a8aa7,Large Scale Visual Recognition,"LARGE SCALE VISUAL RECOGNITION
+JIA DENG
+A DISSERTATION
+PRESENTED TO THE FACULTY
+OF PRINCETON UNIVERSITY
+IN CANDIDACY FOR THE DEGREE
+OF DOCTOR OF PHILOSOPHY
+RECOMMENDED FOR ACCEPTANCE
+BY THE DEPARTMENT OF
+COMPUTER SCIENCE
+ADVISER: FEI-FEI LI
+JUNE 2012"
+adf5caca605e07ee40a3b3408f7c7c92a09b0f70,Line-Based PCA and LDA Approaches for Face Recognition,"Line-based PCA and LDA approaches for Face Recognition
+Vo Dinh Minh Nhat, and Sungyoung Lee
+Kyung Hee University – South of Korea
+{vdmnhat,"
+adaf2b138094981edd615dbfc4b7787693dbc396,Statistical methods for facial shape-from-shading and recognition,"Statistical Methods For Facial
+Shape-from-shading and Recognition
+William A. P. Smith
+Submitted for the degree of Doctor of Philosophy
+Department of Computer Science
+0th February 2007"
+adf1b20cffb0ab12d20f878d07373efc4c1bc6c4,Image Retagging Using Collaborative Tag Propagation,"Image Retagging Using Collaborative
+Tag Propagation
+Dong Liu, Shuicheng Yan, Senior Member, IEEE, Xian-Sheng Hua, Member, IEEE, and
+Hong-Jiang Zhang, Fellow, IEEE"
+ad88fcfd12b62d607259db8d98e2a1a0a9642ca0,Real-time tracking-with-detection for coping with viewpoint change,"Real-Time Tracking-with-Detection for Coping With Viewpoint Change
+Shaul Oron · Aharon Bar-Hillel · Shai Avidan
+Received: 11 May 2014 / Revised: 02 Nov 2014 / Accepted: 09 Mar 2015"
+ad75879082132a73fe173a890a0f414f2c279739,A comparison of CNN-based face and head detectors for real-time video surveillance applications,"A Comparison of CNN-based Face and Head Detectors for
+Real-Time Video Surveillance Applications
+Le Thanh Nguyen-Meidine1, Eric Granger 1, Madhu Kiran1 and Louis-Antoine Blais-Morin2
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+Genetec Inc., Montreal, Canada"
+adefabe194863b4f764ec982e3120554165c841c,Radius based Block Local Binary Pattern on T-Zone Face Area for Face Recognition,"Journal of Computer Science 11 (1): 96-108, 2015
+ISSN: 1549-3636
+© 2015 Science Publications
+RADIUS BASED BLOCK LOCAL BINARY PATTERN ON T-
+ZONE FACE AREA FOR FACE RECOGNITION
+Md. Jan Nordin, 2Abdul Aziz K. Abdul Hamid,
+Sumazly Ulaiman and 2R.U. Gobithaasan
+Center for Artificial Intelligent Technology, Universiti Kebangsaan Malaysia, Bangi, Selangor, Malaysia
+School of Informatics and App. Maths, Universiti Malaysia Terengganu, Terengganu, Malaysia
+Received 2014-02-20; Revised 2014-04-29; Accepted 2014-08-04"
+adf62dfa00748381ac21634ae97710bb80fc2922,ViFaI : A trained video face indexing scheme Harsh,"ViFaI: A trained video face indexing scheme
+Harsh Nayyar
+Audrey Wei
+. Introduction
+With the increasing prominence of inexpensive
+video recording devices (e.g., digital camcorders and
+video recording smartphones),
+the average user’s
+video collection today is increasing rapidly. With this
+development, there arises a natural desire to rapidly
+ccess a subset of one’s collection of videos. The solu-
+tion to this problem requires an effective video index-
+ing scheme. In particular, we must be able to easily
+process a video to extract such indexes.
+Today, there also exist large sets of labeled (tagged)
+face images. One important example is an individual’s
+Facebook profile. Such a set of of tagged images of
+one’s self, family, friends, and colleagues represents
+n extremely valuable potential training set.
+In this work, we explore how to leverage the afore-"
+add85ee833e2a1c5cdbcd206d5423d63f20cda24,International Journal of Advanced Robotic Systems Embedded Face Detection and Recognition Regular Paper,"International Journal of Advanced Robotic Systems
+Embedded Face Detection
+nd Recognition
+Regular Paper
+Göksel Günlü
+Department of Electrical and Electronics Engineering Turgut Özal University, Ankara, Turkey
+* Corresponding author E-mail:
+Received 07 May 2012; Accepted 28 Jun 2012
+DOI: 10.5772/51132
+© 2012 Günlü; licensee InTech. This is an open access article distributed under the terms of the Creative
+Commons Attribution License (http://creativecommons.org/licenses/by/3.0), which permits unrestricted use,
+distribution, and reproduction in any medium, provided the original work is properly cited."
+bb2944569a2b3d3b8340b36d4903c8cddf20047f,Improving Regression Performance with Distributional Losses,"Improving Regression Performance with Distributional Losses
+Ehsan Imani 1 Martha White 1"
+bb06c12e83255b2c3afca1e3e115e721c53b46b3,Beyond Local Appearance: Category Recognition from Pairwise Interactions of Simple Features,"Beyond Local Appearance: Category Recognition from Pairwise Interactions of
+Simple Features
+Marius Leordeanu1
+Martial Hebert1
+Rahul Sukthankar2,1
+Carnegie Mellon University 2Intel Research Pittsburgh"
+bb7c5a521607a02e7a291dca7fc33b595c3b7bff,Texture Classification using Local Binary Patterns and Modular PCA,"ISSN: 2278 – 1323
+International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+Volume 5, Issue 5, May 2016
+Texture Classification using Local Binary
+Patterns and Modular PCA
+Sayanshree Ghosh, Srimanta Kundu and Sayantari Ghosh
+www.ijarcet.org"
+bb35ef89addbbc28d960bc0cab70d8a29fdf6eee,A Survey on Multi-Task Learning,"A Survey on Multi-Task Learning
+Yu Zhang and Qiang Yang"
+bb489e4de6f9b835d70ab46217f11e32887931a2,Everything You Wanted to Know about Deep Learning for Computer Vision but Were Afraid to Ask,"Everything you wanted to know about Deep Learning for Computer Vision but were
+fraid to ask
+Moacir A. Ponti, Leonardo S. F. Ribeiro, Tiago S. Nazare
+ICMC – University of S˜ao Paulo
+S˜ao Carlos/SP, 13566-590, Brazil
+Tu Bui, John Collomosse
+CVSSP – University of Surrey
+Guildford, GU2 7XH, UK
+Email: [ponti, leonardo.sampaio.ribeiro,
+Email: [t.bui,
+tools,"
+bb97664df153ac563e46ec2233346129cafe601b,A study on the use of Boundary Equilibrium GAN for Approximate Frontalization of Unconstrained Faces to aid in Surveillance,"A study on the use of Boundary Equilibrium GAN for Approximate
+Frontalization of Unconstrained Faces to aid in Surveillance
+Wazeer Zulfikar, Sebastin Santy, Sahith Dambekodi and Tirtharaj Dash
+BITS Pilani - KK Birla Goa Campus, Goa, India
+{f20150003, f20150357, f20150192,"
+bba281fe9c309afe4e5cc7d61d7cff1413b29558,An unpleasant emotional state reduces working memory capacity: electrophysiological evidence,"Social Cognitive and Affective Neuroscience, 2017, 984–992
+doi: 10.1093/scan/nsx030
+Advance Access Publication Date: 11 April 2017
+Original article
+An unpleasant emotional state reduces working
+memory capacity: electrophysiological evidence
+Jessica S. B. Figueira,1 Leticia Oliveira,1 Mirtes G. Pereira,1 Luiza B. Pacheco,1
+Isabela Lobo,1,2 Gabriel C. Motta-Ribeiro,3 and Isabel A. David1
+Laboratorio de Neurofisiologia do Comportamento, Departamento de Fisiologia e Farmacologia, Instituto
+Biome´dico, Universidade Federal Fluminense, Niteroi, Brazil, 2MograbiLab, Departamento de Psicologia,
+Pontifıcia Universidade Catolica do Rio de Janeiro, Rio de Janeiro, Brazil, and 3Laboratorio de Engenharia
+Pulmonar, Programa de Engenharia Biome´dica, COPPE, Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brazil
+Correspondence should be addressed to Isabel A. David, Departamento de Fisiologia e Farmacologia, Instituto Biome´dico, Universidade Federal
+Fluminense, Rua Hernani Pires de Mello, 101, Niteroi, RJ 24210-130, Brazil. E-mail:"
+bb79bb04e569f9319fbc9d8e1f275bbb2cf8d32e,NMT-Keras: a Very Flexible Toolkit with a Focus on Interactive NMT and Online Learning,"NMT-Keras: a Very Flexible Toolkit with a Focus
+on Interactive NMT and Online Learning
+Álvaro Peris, Francisco Casacuberta
+Pattern Recognition and Human Language Technology Research Center, Universitat Politècnica de València, Spain"
+bbc76f0e50ab96e7318816e24c65fd3459d0497c,Survey of Pedestrian Detection for Advanced Driver Assistance Systems,"JULY 2010
+Survey of Pedestrian Detection for
+Advanced Driver Assistance Systems
+David Gero´ nimo, Antonio M. Lo´ pez, Angel D. Sappa, Member, IEEE, and Thorsten Graf"
+bb131650627cf2d1da570589f6c540041df1ae92,Improving the Intra Class Distance using RBSQI Technique for Facial Images with Illumination Variations,"Volume 2 Special Issue ISSN 2079-8407
+Journal of Emerging Trends in Computing and Information Sciences
+©2010-11 CIS Journal. All rights reserved.
+http://www.cisjournal.org
+Improving the Intra Class Distance using RBSQI Technique for Facial
+Images with Illumination Variations
+K. R. Singh1, M. A. Zaveri2, M.M. Raghuwanshi3
+,2Computer Engineering Department, S.V.National Institute of Technology, Surat, 329507, India.
+NYSS College of Engineering and Research, Nagpur, 441 110, India."
+bb1f4c8e4f310047e50b7dc41d87292025d42eb7,Intersubject Differences in False Nonmatch Rates for a Fingerprint-Based Authentication System,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 896383, 9 pages
+doi:10.1155/2009/896383
+Research Article
+Intersubject Differences in False Nonmatch Rates for
+Fingerprint-Based Authentication System
+Jeroen Breebaart, Ton Akkermans, and Emile Kelkboom
+Philips Research, HTC 34 MS61, 5656 AE Eindhoven, The Netherlands
+Correspondence should be addressed to Jeroen Breebaart,
+Received 4 September 2008; Accepted 7 July 2009
+Recommended by Jonathon Phillips
+The intersubject dependencies of false nonmatch rates were investigated for a minutiae-based biometric authentication process
+using single enrollment and verification measurements. A large number of genuine comparison scores were subjected to statistical
+inference tests that indicated that the number of false nonmatches depends on the subject and finger under test. This result was also
+observed if subjects associated with failures to enroll were excluded from the test set. The majority of the population (about 90%)
+showed a false nonmatch rate that was considerably smaller than the average false nonmatch rate of the complete population.
+The remaining 10% could be characterized as “goats” due to their relatively high probability for a false nonmatch. The image
+quality reported by the template extraction module only weakly correlated with the genuine comparison scores. When multiple
+verification attempts were investigated, only a limited benefit was observed for “goats,” since the conditional probability for a false"
+bba22e04fbe124bf58330e5d911d873a80afa0eb,Probabilistic Global Scale Estimation for MonoSLAM Based on Generic Object Detection,"Probabilistic Global Scale Estimation for MonoSLAM
+Based on Generic Object Detection
+Centro de Investigaci´on en Matem´aticas - Universidad de Guanajuato
+Jalisco S/N, Col. Valenciana CP: 36023 Guanajuato, Gto, Mxico
+Edgar Sucar, Jean-Bernard Hayet"
+bb22104d2128e323051fb58a6fe1b3d24a9e9a46,Analyzing Facial Expression by Fusing Manifolds,")=OEC .=?E= -NFHAIIE >O .KIEC
+9A;= +D=C1,2 +DK5C +DA1,3 ;E2EC 0KC1,2,3
+1IJEJKJA B 1BH=JE 5?EA?A 5EE?= 6=EM=
+,AFJ B +FKJAH 5?EA?A 1BH=JE -CEAAHEC =JE= 6=EM= 7ELAHIEJO
+IJEJKJA B AJMHEC =JE= 6=EM= 7ELAHIEJO
+{wychang,
+)>IJH=?J .A=JKHA HAFHAIAJ=JE ?=IIE?=JE =HA JM =H EIIKAI E B=?E=
+ANFHAIIE ==OIEI 1 JDA F=IJ IJ AEJDAH DEIJE? H ?= HAFHA
+IAJ=JE BH ==OIEI 1 AIIA?A ?= EBH=JE =EO B?KIAI  JDA IK>JA
+L=HE=JEI B ANFHAIIEI DEIJE? HAFHAIAJ=JE IJHAIIAI  C>=
+JEAI 6 J=A JDA B >JD = HAFHAIAJ=JE EI E JDEI
+F=FAH A=HEC EI J ?D=H=?JAHEA C>= ?= EBH=
+JE 7EA IA KIEC A=H
+EC =FFH=?DAI B JDA HAFHAIAJ=JE =HA >O
+= A=HEC JA?DEGKA 6 EJACH=JA JDAIA
+ABBA?JELAO = BKIE ?=IIEAH EI MDE?D ?= DAF J AFO IKEJ=>A
+?>E=JE MAECDJI B B=?E= ?FAJI J = ANFHAIIE +FHADA
+IELA ?F=HEII  B=?E= ANFHAIIE HA?CEJE =HA J JDA
+ABBA?JELAAII B KH =CHEJD
+A=EEC DK= AJEI F=OI = EFHJ=J HA E DK= ?KE?=JE 6"
+bbab2c3d0ebc0957c5e962298ffd8c6d4bc25c5a,Have we met before? Neural correlates of emotional learning in women with social phobia.,"Research Paper
+Have we met before? Neural correlates of emotional
+learning in women with social phobia
+Inga Laeger, MA; Kati Keuper, MA; Carina Heitmann, MA; Harald Kugel, PhD;
+Christian Dobel, PhD; Annuschka Eden, MA; Volker Arolt, MD; Pienie Zwitserlood, PhD;
+Udo Dannlowski, MD, PhD*; Peter Zwanzger, MD*
+Laeger, Heitmann, Arolt, Dannlowski, Zwanzger — Department of Psychiatry, University of Muenster, Germany; Keuper,
+Dobel, Eden — Institute for Biomagnetism and Biosignalanalysis, University of Muenster, Germany; Kugel — Department of
+Clinical Radiology, University of Muenster, Germany; Zwitserlood — Institute for Psychology, University of Muenster, Ger-
+many; Dannlowski — Department of Psychiatry, University of Marburg, Germany
+Background: Altered memory processes are thought to be a key mechanism in the etiology of anxiety disorders, but little is known about
+the neural correlates of fear learning and memory biases in patients with social phobia. The present study therefore examined whether pa-
+tients with social phobia exhibit different patterns of neural activation when confronted with recently acquired emotional stimuli. Methods:
+Patients with social phobia and a group of healthy controls learned to associate pseudonames with pictures of persons displaying either a
+fearful or a neutral expression. The next day, participants read the pseudonames in the magnetic resonance imaging scanner. Afterwards,
+memory tests were carried out. Results: We enrolled 21 patients and 21 controls in our study. There were no group differences for
+learning performance, and results of the memory tests were mixed. On a neural level, patients showed weaker amygdala activation than
+ontrols for the contrast of names previously associated with fearful versus neutral faces. Social phobia severity was negatively related to
+mygdala activation. Moreover, a detailed psychophysiological interaction analysis revealed an inverse correlation between disorder
+severity and frontolimbic connectivity for the emotional > neutral pseudonames contrast. Limitations: Our sample included only women."
+bbf534b8ee9455b8e492a252bef26f9293d4f91a,Effects of cannabis use and subclinical depression on the P3 event-related potential in an emotion processing task,"Observational Study
+Medicine®
+Effects of cannabis use and subclinical
+depression on the P3 event-related potential
+in an emotion processing task
+Lucy J. Troup, PhD
+, Robert D. Torrence, MS, Jeremy A. Andrzejewski, BSc, Jacob T. Braunwalder, BSc"
+bb7f2c5d84797742f1d819ea34d1f4b4f8d7c197,From Images to 3D Shape Attributes.,"TO APPEAR IN TPAMI
+From Images to 3D Shape Attributes
+David F. Fouhey, Abhinav Gupta, Andrew Zisserman"
+bb893fac40eb901229567abb507a8cb82553d198,Will the Pedestrian Cross? Probabilistic Path Prediction Based on Learned Motion Features,"Will the Pedestrian Cross?
+Probabilistic Path Prediction Based on Learned Motion Features
+Christoph G. Keller1, Christoph Hermes2, and Dariu M. Gavrila3,4
+Image & Pattern Analysis Group, Univ. of Heidelberg, Germany
+Applied Informatics Group, Univ. of Bielefeld, Germany
+Environment Perception, Group Research, Daimler AG, Ulm, Germany
+Intelligent Systems Lab, Fac. of Science, Univ. of Amsterdam, The Netherlands"
+bb7c093c41fcec269b6a7a950902cc95429bb289,Robust video object tracking via Bayesian model averaging based feature fusion,"Robust video object tracking via Bayesian model
+veraging based feature fusion
+Yi Dai, Bin Liu, Member, IEEE"
+bbf5575f0d20b79b61c8c0d8b7c2a57224c359de,Emotion Recognition from Decision Level Fusion of Visual and Acoustic Features using Hausdorff Classifier,"Emotion Recognition from Decision Level Fusion
+of Visual and Acoustic Features using Hausdorff
+Classifier
+H.D.Vankayallapati1, K.R.Anne2, and K. Kyamakya1
+Institute of Smart System Technologies, Transportation Informatics Group
+University of Klagenfurt, Klagenfurt, Austria.
+Department of Information Technology, TIFAC-CORE in Telematics
+VR Siddhartha Engineering College, Vijayawada, India."
+bb667cbbf050040fa39cd9e756cd5bf485fccf32,Effective Deterministic Initialization for $k$-Means-Like Methods via Local Density Peaks Searching,"Effective Deterministic Initialization for
+k-Means-Like Methods via Local Density Peaks
+Searching
+Fengfu Li, Hong Qiao, and Bo Zhang"
+bb021f58f8822d12f5747d583a46005ade4a0b10,Breaking Microsoft’s CAPTCHA,"Breaking Microsoft’s CAPTCHA
+Colin Hong Bokil Lopez-Pineda Karthik Rajendran Adri`a Recasens
+May 2015"
+bb6ac4e26499dea5bdedb05b269f40f56247b4c6,An Action Unit based Hierarchical Random Forest Model to Facial Expression Recognition,
+bbc4bbf7aa80a8108d62644fea24e6f70a805df9,Inducing Wavelets into Random Fields via Generative Boosting,"Inducing Wavelets into Random Fields via Generative
+Boosting
+Jianwen Xie, Yang Lu, Song-Chun Zhu, and Ying Nian Wu∗
+Department of Statistics, University of California, Los Angeles, USA"
+bb980dd94463b03c6584513bcccf780e43f089b2,Prediction Error Meta Classification in Semantic Segmentation: Detection via Aggregated Dispersion Measures of Softmax Probabilities,"Prediction Error Meta Classification in Semantic
+Segmentation: Detection via Aggregated Dispersion
+Measures of Softmax Probabilities
+Matthias Rottmann∗, Pascal Colling∗, Thomas Paul Hack†,
+Fabian H¨uger‡, Peter Schlicht‡ and Hanno Gottschalk∗"
+bb451dc2420e1a090c4796c19716f93a9ef867c9,A Review on: Automatic Movie Character Annotation by Robust Face-Name Graph Matching,"International Journal of Computer Applications (0975 – 8887)
+Volume 104 – No.5, October 2014
+A Review on: Automatic Movie Character Annotation
+y Robust Face-Name Graph Matching
+Bhandare P.S.
+Research Scholar
+Sinhgad College of
+Engineering, korti, Pandharpur,
+Solapur University, INDIA
+Gadekar P.R.
+Assistant Professor
+Sinhgad College of
+Engineering, korti, Pandharpur,
+Solapur University, INDIA
+Bandgar Vishal V.
+Assistant Professor
+College of Engineering (Poly),
+Pandharpur, Solapur, INDIA
+Bhise Avdhut S.
+HOD, Department of"
+bbd1eb87c0686fddb838421050007e934b2d74ab,Look at Boundary: A Boundary-Aware Face Alignment Algorithm,"(68 points) COFW (29 points) AFLW (19 points) Figure1:Thefirstcolumnshowsthefaceimagesfromdifferentdatasetswithdifferentnumberoflandmarks.Thesecondcolumnillustratestheuniversallydefinedfacialboundariesestimatedbyourmethods.Withthehelpofboundaryinformation,ourapproachachieveshighaccuracylocalisationresultsacrossmultipledatasetsandannotationprotocols,asshowninthethirdcolumn.Differenttofacedetection[45]andrecognition[75],facealignmentidentifiesgeometrystructureofhumanfacewhichcanbeviewedasmodelinghighlystructuredout-put.Eachfaciallandmarkisstronglyassociatedwithawell-definedfacialboundary,e.g.,eyelidandnosebridge.However,comparedtoboundaries,faciallandmarksarenotsowell-defined.Faciallandmarksotherthancornerscanhardlyremainthesamesemanticallocationswithlargeposevariationandocclusion.Besides,differentannotationschemesofexistingdatasetsleadtoadifferentnumberoflandmarks[28,5,66,30](19/29/68/194points)andanno-tationschemeoffuturefacealignmentdatasetscanhardlybedetermined.Webelievethereasoningofauniquefacial"
+d745cf8c51032996b5fee6b19e1b5321c14797eb,Viewpoint Invariant Pedestrian Recognition with an Ensemble of Localized Features,"Viewpoint Invariant Pedestrian Recognition
+with an Ensemble of Localized Features
+Douglas Gray and Hai Tao
+University of California, Santa Cruz
+{dgray,
+http://vision.soe.ucsc.edu/"
+d79121a03584123fad02c4f2607f0e63d08ff7c2,Tracking Occluded Objects and Recovering Incomplete Trajectories by Reasoning About Containment Relations and Human Actions,"Tracking Occluded Objects and Recovering Incomplete Trajectories
+y Reasoning about Containment Relations and Human Actions
+Wei Liang1,2
+Yixin Zhu2
+Song-Chun Zhu2
+Beijing Laboratory of Intelligent Information Technology, Beijing Institute of Technology, China
+Center for Vision, Cognition, Learning, and Autonomy, University of California, Los Angeles, USA"
+d7ed878c08c90186e3bf607c20ff943834ad0d68,Semantic Data Integration,"Semantic Data Integration
+Michelle Cheatham and Catia Pesquita"
+d78dde04ac4215ed0ed6f2bd5d85094b389d7f5e,A Warping Window Approach to Real-time Vision-based Pedestrian Detection in a Truck's Blind Spot Zone,"A warping window approach to real-time vision-based pedestrian
+detection in a truck’s blind spot zone
+Kristof Van Beeck1, Toon Goedem´e1;2 and Tinne Tuytelaars2
+IIW/EAVISE, Lessius Mechelen - Campus De Nayer, J. De Nayerlaan 5, 2860, Sint-Katelijne-Waver, Belgium
+ESAT/PSI-VISICS, KU Leuven, IBBT, Kasteelpark Arenberg 10, 3100, Heverlee, Belgium
+fkristof.vanbeeck,
+Keywords:
+Computer vision: Pedestrian tracking: Real-time: Active safety systems"
+d74c6e6fbd8952cbad96013e227374c903797162,With Great Training Comes Great Vulnerability: Practical Attacks against Transfer Learning,"With Great Training Comes Great Vulnerability:
+Practical Attacks against Transfer Learning
+Bolun Wang
+Yuanshun Yao
+Bimal Viswanath
+Haitao Zheng
+UC Santa Barbara
+University of Chicago
+Virginia Tech
+University of Chicago
+Ben Y. Zhao
+University of Chicago"
+d7c6e4348542fd2b5e64a73d9c1fd0172e2b1774,Grounding language acquisition by training semantic parsers using captioned videos,"Grounding language acquisition by training semantic parsers
+using captioned videos
+Candace Ross
+CSAIL, MIT
+Andrei Barbu
+CSAIL, MIT
+Yevgeni Berzak
+BCS, MIT
+Battushig Myanganbayar
+CSAIL, MIT"
+d7f7eb0fbe3339d13f5a6a23df0fd27fdb357d48,Intention-Aware Multi-Human Tracking for Human-Robot Interaction via Particle Filtering over Sets,"Intention-Aware Multi-Human Tracking for
+Human-Robot Interaction via Particle Filtering over Sets
+Aijun Bai
+Univ. of Sci. & Tech. of China
+Reid Simmons
+Carnegie Mellon Univ.
+Manuela Veloso
+Carnegie Mellon Univ.
+The Approach
+The ability for an autonomous robot to track and identify
+multiple humans and understand their intentions is crucial
+for socialized human-robot interactions in dynamic envi-
+ronments (Michalowski and Simmons 2006). Take CoBot
+(Rosenthal, Biswas, and Veloso 2010) trying to enter an ele-
+vator as an example. When the elevator door opens, suppose
+there are multiple humans occupied, CoBot needs to track
+each human’s state and intention in terms of whether he/she
+is going to exit the elevator or not. For the purposes of safely
+nd friendly interacting with humans, CoBot can only make
+the decision to enter the elevator when any human who in-"
+d7731565ec4cb1b910290ccb580405cb55224286,Robust Face Recognition via Adaptive Sparse Representation,"Robust Face Recognition via Adaptive Sparse
+Representation
+Jing Wang, Canyi Lu, Meng Wang, Member, IEEE, Peipei Li,
+Shuicheng Yan, Senior Member, IEEE, Xuegang Hu"
+d7eae9f76dcfa978b99eef430feb9420eac702eb,A Multi-Layer K-means Approach for Multi-Sensor Data Pattern Recognition in Multi-Target Localization,"A Multi-Layer K-means Approach for Multi-Sensor Data Pattern
+Recognition in Multi-Target Localization
+Samuel Silva, Rengan Suresh, Feng Tao, Johnathan Votion, Yongcan Cao"
+d7fe2a52d0ad915b78330340a8111e0b5a66513a,Photo-to-Caricature Translation on Faces in the Wild,"Unpaired Photo-to-Caricature Translation on Faces in
+the Wild
+Ziqiang Zhenga, Chao Wanga, Zhibin Yua, Nan Wanga, Haiyong Zhenga,∗,
+Bing Zhenga
+No. 238 Songling Road, Department of Electronic Engineering, Ocean University of
+China, Qingdao, China"
+d7f19812ee77e508b314d0ac6ab49d05ac81e0d1,Active Visual-Based Detection and Tracking of Moving Objects from Clustering and Classification Methods,"Active Visual-based Detection and Tracking of Moving
+Objects from Clustering and Classification methods
+David Márquez-Gámez Michel Devy
+CNRS; LAAS; Université de Toulouse
+7 avenue du Colonel Roche, F-31077 Toulouse Cedex, France"
+d7c659ce0442bf1047e7d2e942837b18105f6f47,Depth-Adaptive Deep Neural Network for Semantic Segmentation,"Depth Adaptive Deep Neural Network
+for Semantic Segmentation
+Byeongkeun Kang, Yeejin Lee, and Truong Q. Nguyen, Fellow, IEEE"
+d76f68c2d0a45ab224065d57836bf3da360c82f2,Learning to Segment Human by Watching YouTube,"Learning to Segment Human by Watching
+YouTube
+Xiaodan Liang, Yunchao Wei, Liang Lin, Yunpeng Chen, Xiaohui Shen, Jianchao Yang,
+Shuicheng Yan"
+d7a0f9ab321e728b981e12775b4906f55d3aab15,3D Object Reconstruction using Computer Vision: Reconstruction and Characterization Applications for External Human Anatomical Structures,"D Object Reconstruction using
+Computer Vision: Reconstruction
+nd Characterization Applications for
+External Human Anatomical Structures
+Teresa Cristina de Sousa Azevedo
+BSc in Electrical and Computer Engineering by
+Faculdade de Engenharia da Universidade do Porto (2002)
+MSc in Biomedical Engineering by
+Faculdade de Engenharia da Universidade do Porto (2007)
+Thesis submitted for the fulfilment of the requirements for the
+PhD degree in Informatics Engineering by
+Faculdade de Engenharia da Universidade do Porto
+Supervisor:
+João Manuel R. S. Tavares
+Associate Professor of the Department of Mechanical Engineering
+Faculdade de Engenharia da Universidade do Porto
+Co-supervisor:
+Mário A. P. Vaz
+Associate Professor of the Department of Mechanical Engineering
+Faculdade de Engenharia da Universidade do Porto"
+d708ce7103a992634b1b4e87612815f03ba3ab24,FCVID: Fudan-Columbia Video Dataset,"FCVID: Fudan-Columbia Video Dataset
+Yu-Gang Jiang, Zuxuan Wu, Jun Wang, Xiangyang Xue, Shih-Fu Chang
+Available at: http://bigvid.fudan.edu.cn/FCVID/
+OVERVIEW
+Recognizing visual contents in unconstrained videos
+has become a very important problem for many ap-
+plications, such as Web video search and recommen-
+dation, smart content-aware advertising, robotics, etc.
+Existing datasets for video content recognition are
+either small or do not have reliable manual labels.
+In this work, we construct and release a new Inter-
+net video dataset called Fudan-Columbia Video Dataset
+(FCVID), containing 91,223 Web videos (total duration
+,232 hours) annotated manually according to 239
+ategories. We believe that the release of FCVID can
+stimulate innovative research on this challenging and
+important problem.
+COLLECTION AND ANNOTATION
+The categories in FCVID cover a wide range of topics
+like social events (e.g., “tailgate party”), procedural"
+d7da0f595d135474cc2193d382b22458b313cdbf,Multi-View Constraint Propagation with Consensus Prior Knowledge,Multi-View Constraint Propagation with Consensus Prior Knowledge
+d78b190f98f9630cab261eabc399733af052f05c,Unsupervised Deep Domain Adaptation for Pedestrian Detection,
+d73221adda13a99e8dd8dab101abcfeae6b7b706,The ApolloScape Dataset for Autonomous Driving,"The ApolloScape Dataset for Autonomous Driving
+Xinyu Huang, Xinjing Cheng, Qichuan Geng, Binbin Cao,
+Dingfu Zhou, Peng Wang, Yuanqing Lin, and Ruigang Yang
+Baidu Research, Beijing, China
+National Engineering Laboratory of Deep Learning Technology and Application, China"
+d7612e01c10f351a3e2ff1a57465c3d17ddbb193,Rain Streaks Removal in an Image by using Image Decomposition,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2015): 6.391
+Rain Streaks Removal in an Image by using Image
+Decomposition
+Priyanka A. Chougule1, J. A. Shaikh2
+Research Student, Electronics Dept., PVPIT, Budhgaon
+Associate Professor, Electronics Dept. PVPIT, Budhgaon"
+d7b6bbb94ac20f5e75893f140ef7e207db7cd483,griffith . edu . au Face Recognition across Pose : A Review,"Griffith Research Online
+https://research-repository.griffith.edu.au
+Face Recognition across Pose: A
+Review
+Author
+Zhang, Paul, Gao, Yongsheng
+Published
+Journal Title
+Pattern Recognition
+https://doi.org/10.1016/j.patcog.2009.04.017
+Copyright Statement
+Copyright 2009 Elsevier. This is the author-manuscript version of this paper. Reproduced in accordance
+with the copyright policy of the publisher. Please refer to the journal's website for access to the
+definitive, published version.
+Downloaded from
+http://hdl.handle.net/10072/30193"
+d7144bc7d91841963b037f210f9356d28f76e70e,A comparison of features for regression-based driver head pose estimation under varying illumination conditions,"A COMPARISON OF FEATURES FOR REGRESSION-BASED DRIVER HEAD POSE
+ESTIMATION UNDER VARYING ILLUMINATION CONDITIONS
+Dimitri J. Walger1, Toby P. Breckon2, Anna Gaszczak3, Thomas Popham3
+Cranfield University, Bedfordshire, UK 2Durham University, Durham, UK
+Jaguar Land Rover, Warwickshire, UK"
+d7d6200e41d574e2f3ddd9ded299613683519c7c,Accurate Iris Recognition at a Distance Using Stabilized Iris Encoding and Zernike Moments Phase Features,"IEEE Trans. Image Processing, 2014
+Accurate Iris Recognition at a Distance Using
+Stabilized Iris Encoding and Zernike Moments Phase Features
+Chun-Wei Tan, Ajay Kumar"
+d75d074c11a62780b836376249391da39660cad6,Task Scheduling Frameworks for Heterogeneous Computing Toward Exascale,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 9, No. 10, 2018
+Task Scheduling Frameworks for Heterogeneous
+Computing Toward Exascale
+Suhelah Sandokji1, Fathy Eassa2
+Faculty of Computing and Information Technology, KAU
+Jeddah ,Saudi Arabia
+studies consider partitioning"
+d7e8672caecc7e4b17e8d9d3cbd673d402c7e7af,Robust Stereo-Based Person Detection and Tracking for a Person Following Robot,"Robust Stereo-Based Person Detection and Tracking
+for a Person Following Robot
+Junji Satake and Jun Miura
+Department of Information and Computer Sciences
+Toyohashi University of Technology"
+d7d9fa9a5a57f9f3da7ab2c87ca58127665774cc,Improving Shadow Suppression for Illumination Robust Face Recognition,"Improving Shadow Suppression for Illumination
+Robust Face Recognition
+Wuming Zhang, Xi Zhao, Jean-Marie Morvan and Liming Chen, Senior Member, IEEE"
+d7d166aee5369b79ea2d71a6edd73b7599597aaa,Fast Subspace Clustering Based on the Kronecker Product,"Fast Subspace Clustering Based on the
+Kronecker Product
+Lei Zhou1, Xiao Bai1, Xianglong Liu1, Jun Zhou2, and Hancock Edwin3
+Beihang University 2Grif‌f‌ith University 3University of York, UK"
+d7e8c6da1a95f41d8097b7b713890ccde13ef1d8,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+d79f9ada35e4410cd255db39d7cc557017f8111a,Evaluation of accurate eye corner detection methods for gaze estimation,"Journal of Eye Movement Research
+7(3):3, 1-8
+Evaluation of accurate eye corner detection methods for gaze
+estimation
+Jose Javier Bengoechea
+Public University of Navarra, Spain
+Juan J. Cerrolaza
+Childrens National Medical Center, USA
+Arantxa Villanueva
+Public University of Navarra, Spain
+Rafael Cabeza
+Public University of Navarra, Spain
+Accurate detection of iris center and eye corners appears to be a promising
+pproach for low cost gaze estimation.
+In this paper we propose novel eye
+inner corner detection methods. Appearance and feature based segmentation
+pproaches are suggested. All these methods are exhaustively tested on a realistic
+dataset containing images of subjects gazing at different points on a screen.
+We have demonstrated that a method based on a neural network presents the
+est performance even in light changing scenarios."
+d7f3836f2d28adf15fc809bd4f90afb1f61ba8e0,Segment-before-Detect: Vehicle Detection and Classification through Semantic Segmentation of Aerial Images,"Article
+Segment-before-Detect: Vehicle Detection and
+Classification through Semantic Segmentation of
+Aerial Images
+Nicolas Audebert 1,2,*, Bertrand Le Saux 1 and Sébastien Lefèvre 2
+ONERA, The French Aerospace Lab, F-91761 Palaiseau, France;
+Institut de Recherche en Informatique et Systèmes Aléatoires (IRISA), University Bretagne Sud, UMR 6074,
+F-56000 Vannes, France;
+* Correspondence:
+Academic Editors: Norman Kerle, Markus Gerke and Prasad S. Thenkabail
+Received: 28 December 2016; Accepted: 7 April 2017; Published: 13 April 2017"
+d03265ea9200a993af857b473c6bf12a095ca178,Multiple deep convolutional neural networks averaging for face alignment,"Multiple deep convolutional neural
+networks averaging for face
+lignment
+Shaohua Zhang
+Hua Yang
+Zhouping Yin
+Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 05/28/2015 Terms of Use: http://spiedl.org/terms"
+d0462aa7754ffdf39962e2003344937258a0e42e,You Can’t Gamble on Others: Dissociable Systems for Strategic Uncertainty and Risk in the Brain,"You Can’t Gamble on Others: Dissociable Systems for
+Strategic Uncertainty and Risk in the Brain
+W. Gavin Ekins1, Ricardo Caceda, C. Monica Capra1, and Gregory S. Berns1*
+1Center for Neuropolicy and Economics Department, Emory University, Atlanta, GA 30322 USA
+*Correspondance:"
+d096bdd5743cbb33f0cd0ae984d188b2c302f054,Extractive and Abstractive Caption Generation Model for News Images,"ISSN:2321-1156
+International Journal of Innovative Research in Technology & Science(IJIRTS)"
+d00f6ec074bbe777ba2e419b39729283a28101c5,Hashtag Recommendation for Multimodal Microblog Using Co-Attention Network,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+d0d186779ae4a4e53101a26dc741254e822e07ab,Multi Camera for Surveillance System Ground Detection and 3D Reconstruction,"Multi Camera for Surveillance System Ground Detection and
+International Journal of Smart Home
+Vol. 9, No. 1 (2015), pp. 103-110
+http://dx.doi.org/10.14257/ijsh.2015.9.1.11
+D Reconstruction
+Xu Yongzhe1 and Byungsoo Lee1
+Department of Computer Engineering, University of Incheon, Korea"
+d0ad7324fab174609f26c617869fa328960617e2,Person Identification From Text Independent Lip Movement Using the Longest Matching Segment Method,"Person Identification From Text Independent Lip Movement
+Using the Longest Matching Segment Method
+Paul C. Brown, Ji Ming, Daryl Stewart
+Institute of ECIT, Electronics and Computer Engineering Cluster, Queen(cid:48)s University Belfast,
+Belfast BT7 1NN, UK"
+d0a6a700779ac8cb70d7bb95f9a5afdda60152d9,Pyramid Mean Representation of Image Sequences for Fast Face Retrieval in Unconstrained Video Data,"Pyramid Mean Representation of Image Sequences for
+Fast Face Retrieval in Unconstrained Video Data
+Christian Herrmann1,2 and J¨urgen Beyerer1,2
+Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany
+Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB,
+Karlsruhe, Germany"
+d04631e40b237ae29cb8d2bd187b04033580e63b,Multi-cue Based Multi-target Tracking with Boosted MHT,"Multi-cue Based Multi-target Tracking
+with Boosted MHT
+Long Ying1,2, Tianzhu Zhang1,2, Shengsheng Qian1,2, and Changsheng Xu1,2
+Institute of Automation, Chinese Academy of Science, Beijing, China
+China-Singapore Institute of Digital Media, Singapore"
+d07e9b04c1480d65e37e44bec3be95fc3206c17b,Combining classifiers for face recognition,- 130-7803-7965-9/03/$17.00 ©2003 IEEEICME 2003(cid:224)
+d0f709ab39e280467d854064132570c1d5316de5,Multi-Object Tracking and Identification over Sets,"Multi-Object Tracking and Identification over Sets
+Aijun Bai
+UC Berkeley"
+d04d53038d4267cf25badc5d6acccd2fc910a8a7,Online Multi-Object Tracking with Structural Invariance Constraint,"ZHOU, JIANG, WEI, DONG, WANG: ONLINE MULTI-OBJECT TRACKING WITH SIC
+Online Multi-Object Tracking
+with Structural Invariance Constraint
+Xiao Zhou
+Peilin Jiang
+Zhao Wei
+Hang Dong
+Fei Wang
+National Engineering
+Laboratory for Visual Information
+Processing and Application,
+XJTU, 99 Yanxiang Road,
+Xi’an, Shaanxi 710054, China
+School of Software Engineering,
+XJTU, 28 West Xianning Road,
+Xi’an, Shaanxi 710049, China"
+d0de92865a53576af3dd118f4d1fa73be12aee9b,PCANet-II: When PCANet Meets the Second Order Pooling,"PCANet-II:WhenPCANetMeetstheSecondOrderPoolingLeiTian,XiaopengHong"
+d014011b24c62d5b689c782c09b89c52970f46e7,"SRDA: Generating Instance Segmentation Annotation via Scanning, Reasoning and Domain Adaptation","SRDA: Generating Instance Segmentation
+Annotation Via Scanning, Reasoning And
+Domain Adaptation
+Wenqiang Xu(cid:63), Yonglu Li(cid:63), Cewu Lu
+Department of Computer Science and Engineering,
+Shanghai Jiaotong University
+{vinjohn,yonglu"
+d05825a394f11a391c8815f6b0d394cdb4cfaa95,I2T2I: Learning text to image synthesis with textual data augmentation,
+d0e1ad4f3f608124cd3efc2d5bd01b421ffc3274,Running head: SUPPRESSING BEHAVIOUR DOES NOT INFLUENCE WORKING MEMORY CAPACITY DEPARTMENT OF PSYCHOLOGY Suppressing behaviour related to discomfort induced with a cold pressure task does not influence working memory capacity in a 2-back task,"Running
+head:
+SUPPRESSING
+BEHAVIOUR
+INFLUENCE
+WORKING
+MEMORY
+CAPACITY
+DEPARTMENT OF PSYCHOLOGY
+Suppressing behaviour related to discomfort
+induced with a cold pressure task does not
+influence working memory capacity in a 2-back
+task.
+Erik Danielski
+Master thesis spring 2013
+Supervisors: Martin Wolgast & Emelie Stiernströmer"
+d00c335fbb542bc628642c1db36791eae24e02b7,Deep Learning-Based Gaze Detection System for Automobile Drivers Using a NIR Camera Sensor,"Article
+Deep Learning-Based Gaze Detection System for
+Automobile Drivers Using a NIR Camera Sensor
+Rizwan Ali Naqvi, Muhammad Arsalan, Ganbayar Batchuluun, Hyo Sik Yoon and
+Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro, 1-gil, Jung-gu,
+Seoul 100-715, Korea; (R.A.N.); (M.A.);
+(G.B.); (H.S.Y.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 5 January 2018; Accepted: 1 February 2018; Published: 3 February 2018"
+d03f1257066ce5dd843c6977858a1daef0671f3d,Stories for Images-in-Sequence by using Visual and Narrative Components,"Stories for Images-in-Sequence by using Visual
+nd Narrative Components (cid:63)
+Marko Smilevski1,2, Ilija Lalkovski2, and Gjorgji Madjarov1,3
+Ss. Cyril and Methodius University, Skopje, Macedonia
+Pendulibrium, Skopje, Macedonia
+Elevate Global, Skopje, Macedonia"
+d0631ba22add59684fff926d80d2e6948dfb7d7e,MUTT: Metric Unit TesTing for Language Generation Tasks,"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 1935–1943,
+Berlin, Germany, August 7-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+d01e65591745fc46a3f69a6c9387be17caf55c16,State-Driven Particle Filter for Multi-person Tracking,"State-Driven Particle Filter
+for Multi-Person Tracking
+David Gerónimo1, Frédéric Lerasle2,3, and Antonio M. López1
+Computer Vision Center and Department of Computer Science
+Edifici O, 08193 Campus Universitat Autònoma de Barcelona, Bellaterra, Spain.
+CNRS-LAAS, 7 avenue du Colonel Roche, F-31077 Toulouse, France
+Université de Toulouse (UPS), F-31077 Toulouse, France"
+d0a9bbd3bd9dcb62f9874fc1378a7f1a17f44563,Prototype Generation Using Self-Organizing Maps for Informativeness-Based Classifier,"Hindawi
+Computational Intelligence and Neuroscience
+Volume 2017, Article ID 4263064, 15 pages
+https://doi.org/10.1155/2017/4263064
+Research Article
+Prototype Generation Using Self-Organizing Maps for
+Informativeness-Based Classifier
+Leandro Juvêncio Moreira1 and Leandro A. Silva2
+Graduate Program in Electrical Engineering and Computing, Mackenzie Presbyterian University, Sao Paulo, SP, Brazil
+Computing and Informatics Faculty & Graduate Program in Electrical Engineering and Computing,
+Mackenzie Presbyterian University, Sao Paulo, SP, Brazil
+Correspondence should be addressed to Leandro A. Silva;
+Received 31 January 2017; Revised 13 June 2017; Accepted 15 June 2017; Published 25 July 2017
+Academic Editor: Toshihisa Tanaka
+Copyright © 2017 Leandro Juvˆencio Moreira and Leandro A. Silva. This is an open access article distributed under the Creative
+Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the
+original work is properly cited.
+The 𝑘 nearest neighbor is one of the most important and simple procedures for data classification task. The 𝑘NN, as it is called,
+requires only two parameters: the number of𝑘 and a similarity measure. However, the algorithm has some weaknesses that make it
+nalysis and all training dataset is necessary. Another weakness is the optimal choice of 𝑘 parameter when the object analyzed"
+d0144d76b8b926d22411d388e7a26506519372eb,Improving Regression Performance with Distributional Losses,"Improving Regression Performance with Distributional Losses
+Ehsan Imani 1 Martha White 1"
+d0e20aa3d61b77d17f005a1d24d7cf47600836ef,Rethinking Atrous Convolution for Semantic Image Segmentation,"Rethinking Atrous Convolution for Semantic Image Segmentation
+Liang-Chieh Chen George Papandreou Florian Schroff Hartwig Adam
+{lcchen, gpapan, fschroff,
+Google Inc."
+d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae,A 3-D Audio-Visual Corpus of Affective Communication,"A 3-D Audio-Visual Corpus
+of Affective Communication
+Gabriele Fanelli, Juergen Gall, Harald Romsdorfer, Member, IEEE, Thibaut Weise, and
+Luc Van Gool, Member, IEEE"
+d0a21f94de312a0ff31657fd103d6b29db823caa,Facial Expression Analysis,"Facial Expression Analysis
+Fernando De la Torre and Jeffrey F. Cohn"
+d03e4e938bcbc25aa0feb83d8a0830f9cd3eb3ea,Face Recognition with Patterns of Oriented Edge Magnitudes,"Face Recognition with Patterns of Oriented
+Edge Magnitudes
+Ngoc-Son Vu1,2 and Alice Caplier2
+Vesalis Sarl, Clermont Ferrand, France
+Gipsa-lab, Grenoble INP, France"
+d02c54192dbd0798b43231efe1159d6b4375ad36,3 D Reconstruction and Face Recognition Using Kernel-Based ICA and Neural Networks,"D Reconstruction and Face Recognition Using Kernel-Based
+ICA and Neural Networks
+Cheng-Jian Lin Ya-Tzu Huang
+Chi-Yung Lee
+Dept. of Electrical Dept. of CSIE Dept. of CSIE
+Engineering Chaoyang University Nankai Institute of
+National University of Technology Technology
+of Kaohsiung"
+d00787e215bd74d32d80a6c115c4789214da5edb,Faster and Lighter Online Sparse Dictionary Learning,"Faster and Lighter Online
+Sparse Dictionary Learning
+Project report
+By: Shay Ben-Assayag, Omer Dahary
+Supervisor: Jeremias Sulam"
+be8c517406528edc47c4ec0222e2a603950c2762,Measuring Facial Action,"Harrigan / The new handbook of methods in nonverbal behaviour research 02-harrigan-chap02 Page Proof page 7
+7.6.2005
+5:45pm
+B A S I C R E S E A RC H
+M E T H O D S A N D
+P RO C E D U R E S"
+beb3fd2da7f8f3b0c3ebceaa2150a0e65736d1a2,Adaptive Histogram Equalization and Logarithm Transform with Rescaled Low Frequency DCT Coefficients for Illumination Normalization,"RESEARCH PAPER
+International Journal of Recent Trends in Engineering Vol 1, No. 1, May 2009,
+Adaptive Histogram Equalization and Logarithm
+Transform with Rescaled Low Frequency DCT
+Coefficients for Illumination Normalization
+Virendra P. Vishwakarma, Sujata Pandey and M. N. Gupta
+Department of Computer Science and Engineering
+Amity School of Engineering Technology, 580, Bijwasan, New Delhi-110061, India
+(Affiliated to Guru Gobind Singh Indraprastha University, Delhi, India)
+Email:
+illumination normalization. The
+lighting conditions. Most of the"
+bee609ea6e71aba9b449731242efdb136d556222,Multi-Target Tracking in Multiple Non-Overlapping Cameras using Constrained Dominant Sets,"Multi-Target Tracking in Multiple
+Non-Overlapping Cameras using Constrained
+Dominant Sets
+Yonatan Tariku Tesfaye*, Student Member, IEEE, Eyasu Zemene*, Student Member, IEEE,
+Andrea Prati, Senior member, IEEE, Marcello Pelillo, Fellow, IEEE, and Mubarak Shah, Fellow, IEEE"
+be48b5dcd10ab834cd68d5b2a24187180e2b408f,Constrained Low-Rank Learning Using Least Squares-Based Regularization,"FOR PERSONAL USE ONLY
+Constrained Low-rank Learning Using Least
+Squares Based Regularization
+Ping Li, Member, IEEE, Jun Yu, Member, IEEE, Meng Wang, Member, IEEE,
+Luming Zhang, Member, IEEE, Deng Cai, Member, IEEE, and Xuelong Li, Fellow, IEEE,"
+be9dde86ebd10ecb05808e034e3cadd210fe0bfb,SLAMIT: A Sub-map based SLAM system On-line creation of multi-leveled map,"Master of Science Thesis in Electrical Engineering
+Department of Electrical Engineering, Linköping University, 2016
+SLAMIT: A Sub-map based
+SLAM system
+On-line creation of multi-leveled map
+Karl Holmquist"
+be48780eb72d9624a16dd211d6309227c79efd43,Interactive Visual and Semantic Image Retrieval,"Interactive Visual and Semantic Image Retrieval
+Joost van de Weijer, Fahad Khan and Marc Masana Castrillo
+Introduction
+One direct consequence of recent advances in digital visual data generation and
+the direct availability of this information through the World-Wide Web, is a urgent
+demand for efficient image retrieval systems. The disclosure of the content of these
+millions of photos available on the internet is of great importance. The objective
+of image retrieval is to allow users to efficiently browse through this abundance
+of images. Due to the non-expert nature of the majority of the internet users, such
+systems should be user friendly, and therefore avoid complex user interfaces.
+Traditionally, two sources of information are exploited in the description of im-
+ges on the web. The first approach, called text-based image retrieval, describes
+images by a set of labels or keywords [1]. These labels can be automatically ex-
+tracted from for example the image name (e.g. ’car.jpg’ would provide information
+bout the presence of a car in the image), or alternatively from the webpage text
+surrounding the image. Another, more expensive way would be to manually label
+images with a set of keywords. Shortcomings of the text-based approach to image
+retrieval are obvious: many objects in the scene will not be labeled, words suffer
+from the confusions in case of synonyms or homonyms, and words often fall short
+in describing the esthetics, composition and color scheme of a scene. However, un-"
+bea2c35ef78eb40df52e27cf4098f28a79bcbad5,TabletGaze: A Dataset and Baseline Algorithms for Unconstrained Appearance-based Gaze Estimation in Mobile Tablets,"TabletGaze: Unconstrained Appearance-based Gaze
+Estimation in Mobile Tablets
+Qiong Huang, Student Member, IEEE,, Ashok Veeraraghavan, Member, IEEE,,
+nd Ashutosh Sabharwal, Fellow, IEEE"
+be437b53a376085b01ebd0f4c7c6c9e40a4b1a75,Face Recognition and Retrieval Using Cross Age Reference Coding,"ISSN (Online) 2321 – 2004
+ISSN (Print) 2321 – 5526
+INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+Vol. 4, Issue 5, May 2016
+IJIREEICE
+Face Recognition and Retrieval Using Cross
+Age Reference Coding
+Sricharan H S1, Srinidhi K S1, Rajath D N1, Tejas J N1, Chandrakala B M2
+BE, DSCE, Bangalore1
+Assistant Professor, DSCE, Bangalore2"
+bea5780d621e669e8069f05d0f2fc0db9df4b50f,Convolutional Deep Belief Networks on CIFAR-10,"Convolutional Deep Belief Networks on CIFAR-10
+Alex Krizhevsky
+Introduction
+We describe how to train a two-layer convolutional Deep Belief Network (DBN) on the 1.6 million tiny images
+dataset.
+When training a convolutional DBN, one must decide what to do with the edge pixels of teh images. As
+the pixels near the edge of an image contribute to the fewest convolutional lter outputs, the model may
+see it t to tailor its few convolutional lters to better model the edge pixels. This is undesirable becaue it
+usually comes at the expense of a good model for the interior parts of the image. We investigate several ways
+of dealing with the edge pixels when training a convolutional DBN. Using a combination of locally-connected
+onvolutional units and globally-connected units, as well as a few tricks to reduce the eects of overtting,
+we achieve state-of-the-art performance in the classication task of the CIFAR-10 subset of the tiny images
+dataset.
+The dataset
+Throughout this paper we employ two subsets of the 80 million tiny images dataset [2]. The 80 million
+tiny images dataset is a collection of 32 × 32 color images obtained by searching various online image search"
+be07f2950771d318a78d2b64de340394f7d6b717,3D HMM-based Facial Expression Recognition using Histogram of Oriented Optical Flow,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/290192867
+D HMM-based Facial Expression Recognition
+using Histogram of Oriented Optical Flow
+ARTICLE in SYNTHESIS LECTURES ON ARTIFICIAL INTELLIGENCE AND MACHINE LEARNING · DECEMBER 2015
+DOI: 10.14738/tmlai.36.1661
+READS
+AUTHORS, INCLUDING:
+Sheng Kung
+Oakland University
+Djamel Bouchaffra
+Institute of Electrical and Electronics Engineers
+PUBLICATION 0 CITATIONS
+57 PUBLICATIONS 402 CITATIONS
+SEE PROFILE
+SEE PROFILE
+All in-text references underlined in blue are linked to publications on ResearchGate,
+letting you access and read them immediately.
+Available from: Djamel Bouchaffra
+Retrieved on: 11 February 2016"
+be313072e9706df300d86bfac54079acfb9c1ef0,Descripteurs à divers niveaux de concepts pour la classification d ’ images multi-objets,"Descripteurs à divers niveaux de concepts pour la classification
+d’images multi-objets
+Y. Tamaazousti1 3
+H. Le Borgne1
+C. Hudelot2 3
+CentraleSupélec, Laboratoire de Mathématiques et Informatique pour la Complexité et les Systèmes
+CEA LIST, Laboratoire Vision et Ingénierie des Contenus
+Université Paris-Saclay, Laboratoire MICS
+{Youssef.tamaazousti,
+Résumé
+La classification d’images au moyen de descripteurs sé-
+mantiques repose sur des caractéristiques formées par
+les sorties de classifieurs binaires, chacun détectant un
+oncept visuel dans l’image. Les approches existantes
+onsidèrent souvent
+les concepts visuels indépendam-
+ment les uns des autres, alors qu’ils sont souvent liés.
+Ces relations sont parfois prises en compte, au moyen
+d’un schéma ascendant dépendant fortement de descrip-
+teurs bas-niveaux, induisant des relations non-pertinentes"
+bea185a15d5df7bbfce83bc684c316412703efbb,Pixelnn: Example-based Image Synthesis,"Under review as a conference paper at ICLR 2018
+PIXELNN: EXAMPLE-BASED IMAGE SYNTHESIS
+Anonymous authors
+Paper under double-blind review"
+be24e5fd1ec27d444c66183e89b5033db9155de9,"A Continuous, Full-scope, Spatio-temporal Tracking Metric based on KL-divergence","A Continuous, Full-scope, Spatio-temporal Tracking
+Metric based on KL-divergence
+Terry Adams
+U.S. Government
+Suite 6587
+Ft. Meade, MD 20755
+Email:"
+be21529c47b79b688b420c5e296086698ba11350,CNN-Based Multimodal Human Recognition in Surveillance Environments,"Article
+CNN-Based Multimodal Human Recognition in
+Surveillance Environments
+Ja Hyung Koo, Se Woon Cho, Na Rae Baek, Min Cheol Kim and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pil-dong-ro, 1-gil, Jung-gu,
+Seoul 100-715, Korea; (J.H.K.); (S.W.C.);
+(N.R.B.); (M.C.K.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 7 August 2018; Accepted: 8 September 2018; Published: 11 September 2018"
+be6f29e129a99529f7ed854384d1f4da04c4ca1f,Spatially Consistent Nearest Neighbor Representations for Fine-Grained Classification. (Représentations d'images basées sur un principe de voisins partagés pour la classification fine),"Spatially Consistent Nearest Neighbor Representations
+for Fine-Grained Classification
+Valentin Leveau
+To cite this version:
+Valentin Leveau. Spatially Consistent Nearest Neighbor Representations for Fine-Grained Classifica-
+tion. Computer Science [cs]. Université Montpellier, 2016. English. <tel-01410137>
+HAL Id: tel-01410137
+https://hal.archives-ouvertes.fr/tel-01410137
+Submitted on 6 Dec 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+beb4546ae95f79235c5f3c0e9cc301b5d6fc9374,A Modular Approach to Facial Expression Recognition,"A Modular Approach to Facial Expression Recognition
+Michal Sindlar
+Cognitive Artificial Intelligence, Utrecht University, Heidelberglaan 6, 3584 CD, Utrecht
+Marco Wiering
+Intelligent Systems Group, Utrecht University, Padualaan 14, 3508 TB, Utrecht"
+befd21f74248ca5f22f608043d64cdea67829737,Decoupled Access-Execute on ARM big.LITTLE,"Decoupled Access-Execute on ARM big.LITTLE
+Anton Weber
+Uppsala University
+nton.weber.0295
+Kim-Anh Tran
+Uppsala University
+kim-anh.tran
+Stefanos Kaxiras
+Uppsala University
+stefanos.kaxiras
+Alexandra Jimborean
+lexandra.jimborean
+Uppsala University"
+be0bd420b78be8dfc0aad65dddae10ff1ec30a94,People Orientation Recognition by Mixtures of Wrapped Distributions on Random Trees,"People Orientation Recognition by Mixtures
+of Wrapped Distributions on Random Trees
+Davide Baltieri, Roberto Vezzani, and Rita Cucchiara
+DIEF - University of Modena and Reggio Emilia
+Via Vignolese 905, 41125 - Modena, Italy
+http://imagelab.ing.unimore.it"
+be707bf7c7096df0fcf5bb07ef0fa53494d6a781,Effective Classifiers for Detecting Objects,"Effective Classifiers for Detecting Objects
+Michael Mayo
+Dept. of Computer Science
+University of Waikato
+Private Bag 3105, Hamilton, New Zealand
+in the
+literature:
+Introduction
+image. Many image databases such as Caltech-101 [1]
+onsist of images with the objects of interest in a
+dominant foreground position, occupying most of the
+image."
+bebea83479a8e1988a7da32584e37bfc463d32d4,Discovery of Latent 3D Keypoints via End-to-end Geometric Reasoning,"Discovery of Latent 3D Keypoints via
+End-to-end Geometric Reasoning
+Supasorn Suwajanakorn∗ Noah Snavely
+Jonathan Tompson Mohammad Norouzi
+{supasorn, snavely, tompson,
+Google AI"
+beeeade98988e55afe81faaedf06dc00848ec751,ARBEE: Towards Automated Recognition of Bodily Expression of Emotion In the Wild,"Int J Comput Vis manuscript No.
+(will be inserted by the editor)
+ARBEE: Towards Automated Recognition of Bodily
+Expression of Emotion In the Wild
+Yu Luo · Jianbo Ye · Reginald B. Adams, Jr. · Jia Li ·
+Michelle G. Newman · James Z. Wang
+Received: date / Accepted: date"
+beb7a0329c3042c2ce63b5789e2581bb8e2dbbea,Generating Visual Representations for Zero-Shot Classification,"Generating Visual Representations for Zero-Shot Classification
+Maxime Bucher, St´ephane Herbin
+ONERA - The French Aerospace Lab
+Palaiseau, France
+Normandie Univ, UNICAEN, ENSICAEN, CNRS
+Fr´ed´eric Jurie
+Caen, France"
+bed7834ae7d371171977a590872f60d137c2f951,GuessWhat?! Visual Object Discovery through Multi-modal Dialogue,"GuessWhat?! Visual object discovery through multi-modal dialogue
+Harm de Vries
+University of Montreal
+Florian Strub
+Univ. Lille, CNRS, Centrale Lille,
+Inria, UMR 9189 CRIStAL
+Sarath Chandar
+University of Montreal
+Olivier Pietquin
+DeepMind
+Hugo Larochelle
+Twitter
+Aaron Courville
+University of Montreal"
+bed06e7ff0b510b4a1762283640b4233de4c18e0,Face Interpretation Problems on Low Quality Images,"Bachelor Project
+Czech
+Technical
+University
+in Prague
+Faculty of Electrical Engineering
+Department of Cybernetics
+Face Interpretation Problems on Low
+Quality Images
+Adéla Šubrtová
+Supervisor: Ing. Jan Čech, Ph.D
+May 2018"
+beec0138d21271379bdfa89317a0a1d648733bad,Model-Free Multiple Object Tracking with Shared Proposals,"Model-Free Multiple Object Tracking with
+Shared Proposals
+Gao Zhu1, Fatih Porikli1,2,3, Hongdong Li1,3
+Australian National University1, Data61/CSIRO2,
+ARC Centre of Excellence for Robotic Vision3"
+befa14324bb71e5d0f30808e54abc970d52f758c,A Convex Approach for Image Hallucination,"OAGM/AAPR Workshop 2013 (arXiv:1304.1876)
+A Convex Approach for Image Hallucination
+Institute for Computer Graphics and Vision, University of Technology Graz
+Peter Innerhofer, Thomas Pock"
+be25d7bff3b5928adf6c0a7f5495d47113f80997,Learning to Drive: Perception for Autonomous Cars a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"LEARNING TO DRIVE:
+PERCEPTION FOR AUTONOMOUS CARS
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+David Michael Stavens
+May 2011"
+be4c2b6fdde83179dd601541f57ee5d14fe1e98a,Graphical Generative Adversarial Networks,"Graphical Generative Adversarial Networks
+Chongxuan Li 1 Max Welling 2 Jun Zhu 1 Bo Zhang 1"
+becb704450c6b2f7f57f03955036a5b66380b816,A Software Architecture for RGB-D People Tracking Based on ROS Framework for a Mobile Robot,"A software architecture for RGB-D
+people tracking based on ROS
+framework for a mobile robot
+Matteo Munaro, Filippo Basso, Stefano Michieletto, Enrico Pagello, and
+Emanuele Menegatti"
+be993d793e393127e3fb34d27fda255894edaedc,UnFlow: Unsupervised Learning of Optical Flow With a Bidirectional Census Loss,"UnFlow: Unsupervised Learning of Optical Flow
+with a Bidirectional Census Loss
+Simon Meister, Junhwa Hur, Stefan Roth
+Department of Computer Science
+TU Darmstadt, Germany"
+be72b20247fb4dc4072d962ced77ed89aa40372f,"Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN","Ef‌f‌icient Facial Representations for Age, Gender
+nd Identity Recognition in Organizing Photo
+Albums using Multi-output CNN
+Andrey V. Savchenko
+Samsung-PDMI Joint AI Center, St. Petersburg Department of Steklov Institute of
+Mathematics
+National Research University Higher School of Economics
+Nizhny Novgorod, Russia"
+be75a0ff3999754f20e63fde90f4c68b4af22d60,R4-A.1: Dynamics-Based Video Analytics,"R4-A.1: Dynamics-Based Video Analytics
+PARTICIPANTS
+Octavia Camps
+Mario Sznaier
+Title
+Co-PI
+Co-PI
+Faculty/Staff
+Institution
+Graduate, Undergraduate and REU Students
+Oliver Lehmann
+Mengran Gou
+Yongfang Cheng
+Yin Wang
+Sadjad Asghari-Esfeden
+Angels Rates
+Degree Pursued
+Institution
+Email
+Month/Year of Graduation"
+be5b455abd379240460d022a0e246615b0b86c14,"The MR2: A multi-racial, mega-resolution database of facial stimuli.","Behav Res
+DOI 10.3758/s13428-015-0641-9
+The MR2: A multi-racial, mega-resolution database of facial
+stimuli
+Nina Strohminger1,6 · Kurt Gray2 · Vladimir Chituc3 · Joseph Heffner4 ·
+Chelsea Schein2 · Titus Brooks Heagins5
+© Psychonomic Society, Inc. 2015"
+be62019734554152c4feef62ba3092894b402efb,ARISTA - image search to annotation on billions of web photos,"The Twenty-Third IEEE Conference on Computer Vision and Pattern Recognition
+Poster Spotlights
+Session: Thursday Poster Session, Thurs 17 June 2010, 10:30 - 12:10 am
+ARISTA - Image Search to Annotation
+on Billions of Web Photos
+Xin-Jing Wang, Lei Zhang, Ming Liu, Yi Li,
+Wei-Ying Ma"
+beab10d1bdb0c95b2f880a81a747f6dd17caa9c2,DeepDeblur: Fast one-step blurry face images restoration,"DeepDeblur: Fast one-step blurry face images restoration
+Lingxiao Wang, Yali Li, Shengjin Wang
+Tsinghua Unversity"
+b331ca23aed90394c05f06701f90afd550131fe3,Double regularized matrix factorization for image classification and clustering,"Zhou et al. EURASIP Journal on Image and Video Processing (2018) 2018:49
+https://doi.org/10.1186/s13640-018-0287-5
+EURASIP Journal on Image
+nd Video Processing
+R ES EAR CH
+Double regularized matrix factorization for
+image classification and clustering
+Wei Zhou1*
+, Chengdong Wu2, Jianzhong Wang3,4, Xiaosheng Yu2 and Yugen Yi5
+Open Access"
+b37f57edab685dba5c23de00e4fa032a3a6e8841,Towards social interaction detection in egocentric photo-streams,"Towards Social Interaction Detection in Egocentric Photo-streams
+Maedeh Aghaei, Mariella Dimiccoli, Petia Radeva
+University of Barcelona and Computer Vision Centre, Barcelona, Spain
+Recent advances in wearable camera technology have
+led to novel applications in the field of Preventive Medicine.
+For some of them, such as cognitive training of elderly peo-
+ple by digital memories and detection of unhealthy social
+trends associated to neuropsychological disorders, social in-
+teraction are of special interest. Our purpose is to address
+this problem in the domain of egocentric photo-streams cap-
+tured by a low temporal resolution wearable camera (2fpm).
+These cameras are suited for collecting visual information
+for long period of time, as required by the aforementioned
+pplications. The major difficulties to be handled in this
+ontext are the sparsity of observations as well as the unpre-
+dictability of camera motion and attention orientation due
+to the fact that the camera is worn as part of clothing (see
+Fig. 1). Inspired by the theory of F-formation which is a
+pattern that people tend to follow when interacting [5], our
+proposed approach consists of three steps: multi-faces as-"
+b33b88a5fa5d4f20c24dd0e5f3b3529b7545c9e6,Object Detection in Real Images,"SCHOOL OF COMPUTER ENGINEERING
+PhD Confirmation Report
+Object Detection in Real Images
+Submitted by: Dilip Kumar Prasad
+Research Student (PhD)
+School of Computer Engineering
+E-mail:
+Supervisor: Dr. Maylor K. H. Leung
+Associate Professor,
+School of Computer Engineering
+E-mail:
+August 2010"
+b3d8705d46a1d63b40a76bbcf8822b2e90b3b9ad,Efficient Labelling of Pedestrian Supervisions,"Electronic Letters on Computer Vision and Image Analysis 15(1):77-99, 2016
+Efficient Labelling of Pedestrian Supervisions
+Kyaw Kyaw Htike
+School of Information Technology, UCSI University, Kuala Lumpur, Malaysia
+Received 7th Mar 2016; accepted 26th Jun 2016"
+b30bdbad88c72938c476f1ea6827d8b10c300da4,Supervised Mixed Norm Autoencoder for Kinship Verification in Unconstrained Videos,"Supervised Mixed Norm Autoencoder for Kinship
+Verification in Unconstrained Videos
+Naman Kohli, Student Member, IEEE, Daksha Yadav, Student Member, IEEE, Mayank Vatsa,
+Senior Member, IEEE, Richa Singh, Senior Member, IEEE, and Afzel Noore, Senior Member, IEEE."
+b3adc7617dff08d7427142837a326b95d2e83969,A Panoramic View of Performance,"Comp. by: BVijayalakshmi Stage: Galleys ChapterID: 0000883562 Date:27/1/09 Time:17:57:10
+Evaluation of Gait Recognition
+, ZONGYI LIU
+SUDEEP SARKAR
+Computer Science and Engineering, University of
+South Florida, Tampa, FL, USA
+Amazon.com, Seattle, WA, USA
+Synonyms
+Gait recognition; Progress in gait recognition
+Definition
+Gait recognition refers to automated vision methods
+that use video of human gait to recognize or to identify
+person. Evaluation of gait recognition refers to the
+enchmarking of progress in the design of gait recog-
+nition algorithms on standard, common, datasets.
+Introduction
+Design of biometric algorithms and evaluation of per-
+formance goes hand in hand. It is important to con-
+stantly evaluate and analyze progress being at various
+levels of biometrics design. This evaluation can be of"
+b3cb91a08be4117d6efe57251061b62417867de9,Label propagation approach for predicting missing biographic labels in face-based biometric records,"T. Swearingen and A. Ross. ""A label propagation approach for predicting missing biographic labels in
+A Label Propagation Approach for
+Predicting Missing Biographic Labels
+in Face-Based Biometric Records
+Thomas Swearingen and Arun Ross"
+b336f946d34cb427452517f503ada4bbe0181d3c,Diagnosing Error in Temporal Action Detectors,"Diagnosing Error in Temporal Action Detectors
+Humam Alwassel, Fabian Caba Heilbron, Victor Escorcia, and Bernard
+Ghanem
+King Abdullah University of Science and Technology (KAUST), Saudi Arabia
+http://www.humamalwassel.com/publication/detad/
+{humam.alwassel, fabian.caba, victor.escorcia,"
+b340f275518aa5dd2c3663eed951045a5b8b0ab1,Visual inference of human emotion and behaviour,"Visual Inference of Human Emotion and Behaviour
+Shaogang Gong
+Caifeng Shan
+Tao Xiang
+Dept of Computer Science
+Queen Mary College, London
+Dept of Computer Science
+Queen Mary College, London
+Dept of Computer Science
+Queen Mary College, London
+England, UK
+England, UK
+England, UK"
+b38e5da11281be44c82d184079d762c9d526ba2e,Understanding Grounded Language Learning Agents,"Under review as a conference paper at ICLR 2018
+UNDERSTANDING GROUNDED LANGUAGE LEARNING
+AGENTS
+Anonymous authors
+Paper under double-blind review"
+b34487edb8d47c0101d514b8cb63148d80deee54,Utility of Satellite and Aerial Images for Quantification of Canopy Cover and Infilling Rates of the Invasive Woody Species Honey Mesquite (Prosopis Glandulosa) on Rangeland,"Remote Sens. 2012, 4, 1947-1962; doi:10.3390/rs4071947
+OPEN ACCESS
+ISSN 2072-4292
+www.mdpi.com/journal/remotesensing
+Article
+Utility of Satellite and Aerial Images for Quantification of
+Canopy Cover and Infilling Rates of the Invasive Woody Species
+Honey Mesquite (Prosopis Glandulosa) on Rangeland
+Mustafa Mirik * and R. James Ansley
+Texas AgriLife Research, P.O. Box 1658, 11708 Hwy 70 South, Vernon, TX 76385, USA;
+E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +1-940-552-9941; Fax: +1-940-552-2317.
+Received: 9 May 2012; in revised form: 5 June 2012 / Accepted: 25 June 2012 /
+Published: 29 June 2012"
+b3655bcc6f491ae995c652c7f51e1b9b3a36d39c,User authentication based on foot motion,"Noname manuscript No.
+(will be inserted by the editor)
+User Authentication Based on Foot Motion
+Davrondzhon Gafurov, Patrick Bours and Einar Snekkenes
+Received: date / Accepted: date"
+b3d936c0d82f9b2032949af685a10708c6856d2c,Deep Learning from Noisy Image Labels with Quality Embedding,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Deep Learning from Noisy Image Labels with
+Quality Embedding
+Jiangchao Yao, Jiajie Wang,
+Ivor Tsang, Ya Zhang, Jun Sun, Chengqi Zhang, Rui Zhang"
+b3f0a87043f7843b79744ec19dc0b93324d055d5,Improvements to Tracking Pedestrians in Video Streams Using a Pre-trained Convolutional Neural Network,"Western University
+Electronic Thesis and Dissertation Repository
+August 2016
+Improvements to Tracking Pedestrians in Video
+Streams Using a Pre-trained Convolutional Neural
+Network
+Marjan Ramin
+The University of Western Ontario
+Supervisor
+Dr. Jagath Samarabandu
+The University of Western Ontario
+Graduate Program in Electrical and Computer Engineering
+A thesis submitted in partial fulfillment of the requirements for the degree in Master of Engineering Science
+© Marjan Ramin 2016
+Follow this and additional works at: https://ir.lib.uwo.ca/etd
+Part of the Computer Engineering Commons
+Recommended Citation
+Ramin, Marjan, ""Improvements to Tracking Pedestrians in Video Streams Using a Pre-trained Convolutional Neural Network"" (2016).
+Electronic Thesis and Dissertation Repository. 3886.
+https://ir.lib.uwo.ca/etd/3886"
+b375db63742f8a67c2a7d663f23774aedccc84e5,Brain-Inspired Classroom Occupancy Monitoring on a Low-Power Mobile Platform,"Brain-inspired Classroom Occupancy
+Monitoring on a Low-Power Mobile Platform
+Department of Electrical, Electronic and Information Engineering, University of Bologna, Italy
+Francesco Conti∗, Antonio Pullini† and Luca Benini∗†
+Integrated Systems Laboratory, ETH Zurich, Switzerland"
+b3e2bd3f89e49833d45c30af7d5c923489b4d5fc,Fast Approximate kNN Graph Construction for High Dimensional Data via Recursive Lanczos Bisection,"Fast Approximate kNN Graph Construction for High
+Dimensional Data via Recursive Lanczos Bisection∗
+Jie Chen†
+Haw-ren Fang†
+Yousef Saad†
+October 2, 2008"
+b3ca58539e1407e0fb6b308194234279f78eb1d7,Structure Aligning Discriminative Latent Embedding for Zero-Shot Learning,"GUNE ET AL: STRUCTURE ALIGNING DISCRIMINATIVE LATENT EMBEDDING FOR ZSL 1
+Structure Aligning Discriminative Latent
+Embedding for Zero-Shot Learning
+Omkar Gune
+Biplab Banerjee
+Subhasis Chaudhuri
+Indian Institute of Technology Bombay,
+Mumbai, India
+Indian Institute of Technology Bombay,
+Mumbai, India
+Indian Institute of Technology Bombay,
+Mumbai, India"
+b3c60b642a1c64699ed069e3740a0edeabf1922c,Max-Margin Object Detection,"Max-Margin Object Detection
+Davis E. King"
+b362b812ececef21100d7a702447fcf5ab6d4715,Understanding and Improving Interpolation in Autoencoders via an Adversarial Regularizer,"Understanding and Improving Interpolation in
+Autoencoders via an Adversarial Regularizer
+David Berthelot∗
+Google Brain
+Colin Raffel∗
+Google Brain
+Aurko Roy
+Google Brain
+Ian Goodfellow
+Google Brain"
+b3f7c772acc8bc42291e09f7a2b081024a172564,"A novel approach for performance parameter estimation of face recognition based on clustering , shape and corner detection","www.ijmer.com Vol. 3, Issue. 5, Sep - Oct. 2013 pp-3225-3230 ISSN: 2249-6645
+International Journal of Modern Engineering Research (IJMER)
+A novel approach for performance parameter estimation of face
+recognition based on clustering, shape and corner detection
+.Smt.Minj Salen Kujur , 2.Prof. Prashant Jain,
+Department of Electronics & Communication Engineering college Jabalpur"
+b3c398da38d529b907b0bac7ec586c81b851708f,Face recognition under varying lighting conditions using self quotient image,"Face Recognition under Varying Lighting Conditions Using Self Quotient
+Haitao Wang, 2Stan Z Li, 1Yangsheng Wang
+Image
+Institute of Automation, Chinese Academy of
+Sciences, Beijing, 100080, China,
+Email:"
+b32cf547a764a4efa475e9c99a72a5db36eeced6,Mimicry of ingroup and outgroup emotional expressions,"UvA-DARE (Digital Academic Repository)
+Mimicry of ingroup and outgroup emotional expressions
+Sachisthal, M.S.M.; Sauter, D.A.; Fischer, A.H.
+Published in:
+Comprehensive Results in Social Psychology
+0.1080/23743603.2017.1298355
+Link to publication
+Citation for published version (APA):
+Sachisthal, M. S. M., Sauter, D. A., & Fischer, A. H. (2016). Mimicry of ingroup and outgroup emotional
+expressions. Comprehensive Results in Social Psychology, 1(1-3), 86-105. DOI:
+0.1080/23743603.2017.1298355
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 08 Aug 2018"
+b34e7a2218abd5894525a60ed4f106cb9c3dc1e8,Understanding Grounded Language Learning Agents,"Under review as a conference paper at ICLR 2018
+UNDERSTANDING GROUNDED LANGUAGE LEARNING
+AGENTS
+Anonymous authors
+Paper under double-blind review"
+b32631f456397462b3530757f3a73a2ccc362342,Discriminant Tensor Dictionary Learning with Neighbor Uncorrelation for Image Set Based Classification,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+b348d5c7ac93d1148265284d71234e200c9c5f02,GibbsNet: Iterative Adversarial Inference for Deep Graphical Models,"GibbsNet: Iterative Adversarial Inference for Deep
+Graphical Models
+Alex Lamb
+MILA, Universite de Montreal
+Yaroslav Ganin
+MILA, Universite de Montreal
+R Devon Hjelm
+MILA, Universite de Montreal
+Joseph Paul Cohen
+MILA, Universite de Montreal
+Institute for Reproducible Research
+Aaron Courville
+MILA, Universite de Montreal
+CIFAR
+Yoshua Bengio
+MILA, Universite de Montreal
+CIFAR"
+dfd18b71f5c53ec2a95fcbe327cf7710da3b4851,Robust Submodular Maximization: A Non-Uniform Partitioning Approach,"Robust Submodular Maximization:
+A Non-Uniform Partitioning Approach
+Ilija Bogunovic 1 Slobodan Mitrovi´c 2 Jonathan Scarlett 1 Volkan Cevher 1"
+df90850f1c153bfab691b985bfe536a5544e438b,"Face Tracking Algorithm Robust to Pose , Illumination and Face Expression Changes : a 3 D Parametric Model Approach","FACE TRACKING ALGORITHM ROBUST TO POSE,
+ILLUMINATION AND FACE EXPRESSION CHANGES: A 3D
+PARAMETRIC MODEL APPROACH
+Marco Anisetti, Valerio Bellandi
+University of Milan - Department of Information Technology
+via Bramante 65 - 26013, Crema (CR), Italy
+Luigi Arnone, Fabrizio Beverina
+STMicroelectronics - Advanced System Technology Group
+via Olivetti 5 - 20041, Agrate Brianza, Italy
+Keywords:
+Face tracking, expression changes, FACS, illumination changes."
+df8da144a695269e159fb0120bf5355a558f4b02,Face Recognition using PCA and Eigen Face Approach,"International Journal of Computer Applications (0975 – 8887)
+International Conference on Recent Trends in engineering & Technology - 2013(ICRTET'2013)
+Face Recognition using PCA and Eigen Face
+Approach
+Anagha A. Shinde
+ME EXTC [VLSI & Embedded System]
+Sinhgad Academy of Engineering
+EXTC Department
+Pune, India"
+df577a89830be69c1bfb196e925df3055cafc0ed,"Shift: A Zero FLOP, Zero Parameter Alternative to Spatial Convolutions","Shift: A Zero FLOP, Zero Parameter Alternative to Spatial Convolutions
+Bichen Wu, Alvin Wan∗, Xiangyu Yue∗, Peter Jin, Sicheng Zhao,
+Noah Golmant, Amir Gholaminejad, Joseph Gonzalez, Kurt Keutzer
+UC Berkeley"
+df353e3a46cca8c1ef274994f5a6dcb580231726,Data-driven fundamental models for pedestrian movements,"POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Prof. P. Frossard, président du juryProf. M. Bierlaire, directeur de thèseProf. H. Mahmassani, rapporteurProf. S. Hoogendoorn, rapporteurProf. N. Geroliminis, rapporteurData-driven fundamental models for pedestrian movementsTHÈSE NO 7613 (2017)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 5 MAI 2017À LA FACULTÉ DE L'ENVIRONNEMENT NATUREL, ARCHITECTURAL ET CONSTRUITLABORATOIRE TRANSPORT ET MOBILITÉPROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2017PARMarija NIKOLIĆ"
+df50e6e2ad60825167c6b3e641eb5cda0f3dc505,Theoretical vs. empirical discriminability: the application of ROC methods to eyewitness identification,"Wixted and Mickes Cognitive Research: Principles and Implications (2018) 3:9
+https://doi.org/10.1186/s41235-018-0093-8
+Cognitive Research: Principles
+nd Implications
+TU T O R I A L R E V I EW
+Theoretical vs. empirical discriminability:
+the application of ROC methods to
+eyewitness identification
+John T. Wixted1* and Laura Mickes2
+Open Access"
+dfb342327c5e883d21a1f91cd283b36dbc2a3661,Game of Sketches: Deep Recurrent Models of Pictionary-style Word Guessing,"Deep Recurrent Models of Pictionary-style Word
+Guessing
+Ravi Kiran Sarvadevabhatla, Member, IEEE, Shiv Surya, Trisha Mittal and R. Venkatesh Babu Senior
+Member, IEEE"
+dff612c198dc50a7bef5a9cd48da5da1f893fa72,A fast stereo-based multi-person tracking using an approximated likelihood map for overlapping silhouette templates,"A Fast Stereo-Based Multi-Person Tracking
+using an Approximated Likelihood Map
+for Overlapping Silhouette Templates
+Junji Satake
+Jun Miura
+Department of Computer Science and Engineering
+Toyohashi University of Technology
+Email: {satake,
+Toyohashi, Japan"
+df51dfe55912d30fc2f792561e9e0c2b43179089,Face Hallucination Using Linear Models of Coupled Sparse Support,"Face Hallucination using Linear Models of Coupled
+Sparse Support
+Reuben A. Farrugia, Member, IEEE, and Christine Guillemot, Fellow, IEEE
+grid and fuse them to suppress the aliasing caused by under-
+sampling [5], [6]. On the other hand, learning based meth-
+ods use coupled dictionaries to learn the mapping relations
+etween low- and high- resolution image pairs to synthesize
+high-resolution images from low-resolution images [4], [7].
+The research community has lately focused on the latter
+ategory of super-resolution methods, since they can provide
+higher quality images and larger magnification factors."
+df054fa8ee6bb7d2a50909939d90ef417c73604c,Image Quality-aware Deep Networks Ensemble for Efficient Gender Recognition in the Wild,"Image Quality-Aware Deep Networks Ensemble for Efficient
+Gender Recognition in the Wild
+Mohamed Selim1, Suraj Sundararajan1, Alain Pagani2 and Didier Stricker1,2
+Augmented Vision Lab, Technical University Kaiserslautern, Kaiserslautern, Germany
+German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany
+{mohamed.selim, alain.pagani, s
+Keywords:
+Gender, Face, Deep Neural Networks, Quality, In the Wild"
+df4525d7d99f7237c864adbcb2dab30d8f7447e0,Kernel Cross-View Collaborative Representation based Classification for Person Re-Identification,"Kernel Cross-View Collaborative Representation based Classification for Person
+Re-Identification
+Raphael Prates and William Robson Schwartz
+Universidade Federal de Minas Gerais, Brazil
+6627, Av. Pres. Antˆonio Carlos - Pampulha, Belo Horizonte - MG, 31270-901"
+df80fed59ffdf751a20af317f265848fe6bfb9c9,Learning Deep Sharable and Structural Detectors for Face Alignment,"Learning Deep Sharable and Structural
+Detectors for Face Alignment
+Hao Liu, Jiwen Lu, Senior Member, IEEE, Jianjiang Feng, Member, IEEE, and Jie Zhou, Senior Member, IEEE"
+df3b560a5d6c8cc5fa1477d3a89245a0d3b60715,Human tracking with multiple parallel metrics,"Human tracking with multiple parallel metrics
+P. M. Birch*, W. Hassan, R. C. D. Young, C.R. Chatwin
+Dept. of Engineering and Design, University of Sussex, Falmer, UK, BN1 9QT
+Keywords: HOG, Correlation, Tracking"
+dfe2d36ca249876e5ab5500f155e3a5094dbc170,Application of common sense computing for the development of a novel knowledge-based opinion mining engine,"Application of Common Sense Computing for
+the Development of a Novel Knowledge-Based
+Opinion Mining Engine
+A thesis submitted in accordance with the requirements of
+the University of Stirling for the degree of Doctor of Philosophy
+Erik Cambria
+Principal Supervisor: Amir Hussain (University of Stirling, UK)
+Additional Supervisor: Catherine Havasi (MIT Media Laboratory, USA)
+Industrial Supervisor: Chris Eckl (Sitekit Solutions Ltd, UK)
+Department of Computing Science & Mathematics
+University of Stirling, Scotland, UK
+December 2011"
+df310591dfba9672252d693bc87da73c246749c9,Fusion of Holistic and Part Based Features for Gender Classification in the Wild,"Fusion of Holistic and Part Based Features
+for Gender Classification in the Wild
+Modesto Castrill´on-Santana(B), Javier Lorenzo-Navarro,
+nd Enrique Ram´on-Balmaseda
+Universidad de Las Palmas de Gran Canaria, Las Palmas de Gran Canaria, Spain
+http://berlioz.dis.ulpgc.es/roc-siani"
+dfcb4773543ee6fbc7d5319b646e0d6168ffa116,Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative Adversarial Networks,"Unifying Variational Autoencoders and Generative Adversarial Networks
+Adversarial Variational Bayes:
+Lars Mescheder 1
+Sebastian Nowozin 2
+Andreas Geiger 1 3"
+dfbf49ed66a9e48671964872c84f75d7f916c131,Supplementary Material for Sparsity Invariant CNNs,"Supplementary Material for
+Sparsity Invariant CNNs
+Jonas Uhrig(cid:63),1,2 Nick Schneider(cid:63),1,3
+Lukas Schneider1,4
+Uwe Franke1
+Thomas Brox2 Andreas Geiger4,5
+Daimler R&D Sindelfingen
+University of Freiburg
+KIT Karlsruhe
+ETH Z¨urich
+5MPI T¨ubingen
+. Convergence Analysis
+We find that Sparse Convolutions converge much faster than standard convolutions for most input-output-combinations,
+especially for those on Synthia with irregularly sparse depth input, as considered in Section 5.1 of the main paper. In Figure
+, we show the mean average error in meters on our validation subset of Synthia over the process of training with identical
+solver settings (Adam with momentum terms of β1 = 0.9, β2 = 0.999 and delta 1e−8). We chose for each variant the
+maximal learning rate which still causes the network to converge (which turned out to be 1e−3 for all three variants). We
+find that Sparse Convolutions indeed train much faster and much smoother compared to both ConvNet variants, most likely
+aused by the explicit ignoring of invalid regions in the update step. Interestingly, the ConvNet variant with concatenated
+visibility mask in the input converges smoother than the variant with only sparse depth in the input, however, additionally"
+dfbc3a6a629433f24f4e06fdfe8389f83afa7094,Learning OpenCV,"Learning OpenCV
+Gary Bradski and Adrian Kaehler
+Beijing · Cambridge · Farnham · Köln · Sebastopol · Taipei · Tokyo"
+df999184b1bb5691cd260b2b77df7ef00c0fe7b1,On Latent Distributions Without Finite Mean in Generative Models,"On Latent Distributions Without Finite Mean in
+Generative Models
+Damian Le´sniak∗
+Igor Sieradzki∗
+Jagiellonian University
+Igor Podolak"
+df28cd627afe6d20eb198b8406ff25ece340653d,The Acquisition of Sign Language by Deaf Children with Autism Spectrum Disorder,"The Acquisition of Sign
+Language by Deaf Children
+with Autism Spectrum
+Disorder
+Aaron Shield and Richard P. Meier
+Introduction
+Autism spectrum disorder (ASD) consists of a set of neurobiological
+developmental disorders characterized by communicative and social deficits
+s well as repetitive, stereotyped behaviors.1 In this chapter, we use the
+terms ‘ASD’ and ‘autism’ interchangeably; although ‘autism’ is not a clinical
+term, it is the term popularly used to refer to the range of disorders found
+in ASD.
+The language deficits of hearing children with autism are well docu-
+mented, and can range from the very mild in highly fluent speakers to the
+very severe in children with a total absence of productive spoken language.
+For those children who do acquire speech, the most common characteristics
+of autistic language include echolalia (echoing the utterances of others),
+pronoun reversal, idiosyncratic language use and neologisms (the creation
+of new words), difficulty with pragmatics (problems interpreting the use
+of language in context and the non-literal use of language), and abnormal"
+dfaa547451aae219cd2ca7a761e6c16c1e1d0add,Representation Learning by Rotating Your Faces,"Representation Learning by Rotating Your Faces
+Luan Tran, Xi Yin, and Xiaoming Liu, Member, IEEE"
+dfa80e52b0489bc2585339ad3351626dee1a8395,Human Action Forecasting by Learning Task Grammars,"Human Action Forecasting by Learning Task Grammars
+Tengda Han
+Jue Wang
+Anoop Cherian
+Stephen Gould"
+dfe7700ed053d4788ecea4a18431806581e03291,Grammatical facial expression recognition using customized deep neural network architecture,"Grammatical facial expression recognition using customized
+deep neural network architecture
+Devesh Walawalkar"
+dffb64ac066bbcfe6aea6b11408b5ea62a40e9fb,"A New Face Recognition Scheme for Faces with Expressions , Glasses and Rotation","International Journal of Computer Engineering and Technology (IJCET), ISSN 0976-6367(Print),
+INTERNATIONAL JOURNAL OF COMPUTER ENGINEERING &
+ISSN 0976 - 6375(Online), Volume 5, Issue 4, April (2014), pp. 11-23 © IAEME
+TECHNOLOGY (IJCET)
+ISSN 0976 – 6367(Print)
+ISSN 0976 – 6375(Online)
+Volume 5, Issue 4, April (2014), pp. 11-23
+© IAEME: www.iaeme.com/ijcet.asp
+Journal Impact Factor (2014): 8.5328 (Calculated by GISI)
+www.jifactor.com
+IJCET
+© I A E M E
+A NEW FACE RECOGNITION SCHEME FOR FACES WITH EXPRESSIONS,
+GLASSES AND ROTATION
+Walaa M Abdel-Hafiez1, Mohamed Heshmat2, Moheb Girgis3, Seham Elaw4
+, 2, 4Faculty of Science, Mathematical and Computer Science Department,
+Sohag University, 82524, Sohag, Egypt
+3Faculty of Science, Department of Computer Science,
+Minia University, El-Minia, Egypt"
+dfecaedeaf618041a5498cd3f0942c15302e75c3,A recursive framework for expression recognition: from web images to deep models to game dataset,"Noname manuscript No.
+(will be inserted by the editor)
+A Recursive Framework for Expression Recognition: From
+Web Images to Deep Models to Game Dataset
+Wei Li · Christina Tsangouri · Farnaz Abtahi · Zhigang Zhu
+Received: date / Accepted: date"
+df5fe0c195eea34ddc8d80efedb25f1b9034d07d,Robust modified Active Shape Model for automatic facial landmark annotation of frontal faces,"Robust Modified Active Shape Model for Automatic Facial Landmark
+Annotation of Frontal Faces
+Keshav Seshadri and Marios Savvides"
+dfc784c860795f4f9aa704b7655f6d1321018980,Unsupervised Co-Activity Detection from Multiple Videos Using Absorbing Markov Chain,"Unsupervised Co-activity Detection from
+Multiple Videos using Absorbing Markov Chain
+Donghun Yeo, Bohyung Han, Joon Hee Han
+Department of Computer Science and Engineering, POSTECH, Korea"
+df2494da8efa44d70c27abf23f73387318cf1ca8,Supervised Filter Learning for Representation Based Face Recognition,"RESEARCH ARTICLE
+Supervised Filter Learning for Representation
+Based Face Recognition
+Chao Bi1, Lei Zhang2, Miao Qi1, Caixia Zheng1, Yugen Yi3, Jianzhong Wang1*,
+Baoxue Zhang4*
+College of Computer Science and Information Technology, Northeast Normal University, Changchun,
+China, 2 Changchun Institute of Optics, Fine Mechanics and Physics, CAS, Changchun, China, 3 School of
+Software, Jiangxi Normal University, Nanchang, China, 4 School of Statistics, Capital University of
+Economics and Business, Beijing, China
+11111
+* (JW); (BZ)"
+df674dc0fc813c2a6d539e892bfc74f9a761fbc8,An Image Mining System for Gender Classification & Age Prediction Based on Facial Features,"IOSR Journal of Computer Engineering (IOSR-JCE)
+e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 10, Issue 6 (May. - Jun. 2013), PP 21-29
+www.iosrjournals.org
+An Image Mining System for Gender Classification & Age
+Prediction Based on Facial Features
+1.Ms.Dhanashri Shirkey , 2Prof.Dr.S.R.Gupta,
+M.E(Scholar),Department Computer Science & Engineering, PRMIT & R, Badnera
+Asstt.Prof. Department Computer Science & Engineering, PRMIT & R, Badnera"
+da7ffe21508ad8d6dd9de7da378e184cb43a56c8,3D Landmark Localisation,"D Landmark Localisation
+Luke Gahan, Supervised by Prof. Paul F. Whelan"
+dab6921a578c9ded6904a5a18bdd054aee62d2ad,Learning to Recognize Faces by Successive Meetings,"Learning to recognize faces
+y successive meetings
+M. Castrill´on-Santana, O. D´eniz-Su´arez,
+J. Lorenzo-Navarro and M. Hern´andez-Tejera
+IUSIANI
+Edif. Ctral. del Parque Cient´ıfico Tecnol´ogico
+Universidad de Las Palmas de Gran Canaria
+Las Palmas de Gran Canaria, 35017
+Spain"
+dac07680925b6c56b7ddf184dbdaf143a5d4816d,Object Ordering with Bidirectional Matchings for Visual Reasoning,"Object Ordering with Bidirectional Matchings for Visual Reasoning
+Hao Tan and Mohit Bansal
+UNC Chapel Hill
+{haotan,"
+dad7b8be074d7ea6c3f970bd18884d496cbb0f91,Super-Sparse Regression for Fast Age Estimation from Faces at Test Time,"Super-Sparse Regression for Fast Age
+Estimation From Faces at Test Time
+Ambra Demontis, Battista Biggio, Giorgio Fumera, and Fabio Roli
+Dept. of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123 Cagliari, Italy
+WWW home page: http://prag.diee.unica.it"
+da523ee3b7e8077713ebb7d903c3dc3bcb78921a,Multi-person Tracking-by-Detection Based on Calibrated Multi-camera Systems,"Multi-Person Tracking-by-Detection based on
+Calibrated Multi-Camera Systems
+Xiaoyan Jiang, Erik Rodner, and Joachim Denzler
+Computer Vision Group Jena
+Friedrich Schiller University of Jena
+http://www.inf-cv.uni-jena.de"
+da288fca6b3bcaee87a034529da5621bb90123d1,Aesthetics and Emotions in Images,"[ Dhiraj Joshi,
+Ritendra Datta,
+Elena Fedorovskaya,
+Quang-Tuan Luong,
+James Z. Wang,
+Jia Li, and Jiebo Luo]
+PUBLICDOMAINPICTURES.NET &
+© BRAND X PICTURES
+[ A computational perspective]
+In this tutorial, we define and discuss key aspects of the problem of computational inference of aesthetics
+nd emotion from images. We begin with a background discussion on philosophy, photography, paintings,
+visual arts, and psychology. This is followed by introduction of a set of key computational problems that the
+research community has been striving to solve and the computational framework required for solving
+them. We also describe data sets available for performing assessment and outline several real-world applica-
+tions where research in this domain can be employed. A significant number of papers that have attempted to
+solve problems in aesthetics and emotion inference are surveyed in this tutorial. We also discuss future direc-
+tions that researchers can pursue and make a strong case for seriously attempting to solve problems in this
+research domain.
+Digital Object Identifier 10.1109/MSP.2011.941851
+Date of publication: 22 August 2011"
+dadb7ddfde3478238d23a8bacf5eddecc59e84c9,Vocabulary Image Captioning with Constrained Beam Search,"Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 947–956
+Copenhagen, Denmark, September 7–11, 2017. c(cid:13)2017 Association for Computational Linguistics
+image containing previously unseen object (‘suitcase’)CNN-RNNCaptioning ModelA catsitting insideofa suitcase.cat, suitcase, insideConstrainedBeamSearchBeamSearchA cat sitting on top ofa refrigerator.Image TagsFigure1:Wesuccessfullycaptionimagescontain-ingpreviouslyunseenobjectsbyincorporatingse-manticattributes(i.e.,imagetags)duringRNNde-coding.ActualexamplefromSection4.2.prisingly,modelstrainedonthesedatasetsdonotgeneralizewelltoout-of-domainimagescontain-ingnovelscenesorobjects(Tranetal.,2016).Thislimitationseverelyhinderstheuseofthesemodelsinrealworldapplicationsdealingwithim-agesinthewild.Althoughavailableimage-captiontrainingdataislimited,manyimagecollectionsareaugmentedwithground-truthtextfragmentssuchassemanticattributes(i.e.,imagetags)orobjectannotations.Eveniftheseannotationsdonotexist,theycanbegeneratedusing(potentiallytaskspecific)imagetaggers(Chenetal.,2013;Zhangetal.,2016)orobjectdetectors(Renetal.,2015;Krauseetal.,2016),whichareeasiertoscaletonewconcepts.Inthispaperourgoalistoincorporatetextfrag-mentssuchastheseduringcaptiongeneration,toimprovethequalityofresultingcaptions.Thisgoalposestwokeychallenges.First,RNNsaregenerallyopaque,anddifficulttoinfluenceattesttime.Second,textfragmentsmayincludewords"
+da55917aa3a8a95179bae92c5b01e4c8f2f61b75,What makes a place? Building bespoke place dependent object detectors for robotics,"What Makes a Place? Building Bespoke Place Dependent Object Detectors
+for Robotics
+Jeffrey Hawke, Alex Bewley, Ingmar Posner"
+da4170c862d8ae39861aa193667bfdbdf0ecb363,Multi-Task CNN Model for Attribute Prediction,"Multi-task CNN Model for Attribute Prediction
+Abrar H. Abdulnabi, Student Member, IEEE, Gang Wang, Member, IEEE, , Jiwen Lu, Member, IEEE
+nd Kui Jia, Member, IEEE"
+da013b84a93cc89d78f2d9a346fc275e3c159565,Affordable Self Driving Cars and Robots with Semantic Segmentation,"Affordable Self Driving Cars and Robots with Semantic Segmentation
+Gaurav Bansal
+Jeff Chen
+Evan Darke"
+dabf269f516adc6bf87a7ceb455cceda4466917a,Investigation of Facial Artifacts on Face Biometrics using Eigenface based Single and Multiple Neural Networks,"Investigation of Facial Artifacts on Face Biometrics
+using Eigenface based Single and Multiple Neural Networks
+K. Sundaraj
+University Malaysia Perlis (UniMAP)
+School of Mechatronics Engineering
+02600 Jejawi - Perlis
+MALAYSIA"
+da9080d5b433f73444078ac79c3a8a4515ad958e,IIS at ImageCLEF 2015: Multi-label Classification Task,"IIS at ImageCLEF 2015:
+Multi-label classification task
+Antonio J Rodr´ıguez-S´anchez1, Sabrina Fontanella1,2,
+Justus Piater1, and Sandor Szedmak1
+Intelligent and Interactive Systems, Department of Computer Science,
+University of Innsbruck, Austria
+Department of Computer Science, University of Salerno, Italy
+https://iis.uibk.ac.at/"
+da995212c9c8a933307cd893d862f5bf7d99f3ec,Synthesizing Samples for Zero-shot Learning,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+EmbeddingSample EmbeddingElephantLionPandaMonkeyDolphinDog0.140.490.660.721.060.59Figure1:FrameworkofembeddingbasedZSLapproaches.occurfrequentlyenough,andthenewconceptsemergeev-erydayespeciallyintheWeb,whichmakesitdifficultandex-pensivetocollectandlabelasufficientlylargetrainingsetformodellearning[Changpinyoetal.,2016].Howtotraineffec-tiveclassificationmodelsfortheuncommonclasseswithoutusingthelabeledsamplesbecomesanimportantandpracti-calproblemandhasgatheredconsiderableresearchinterestsfromthemachinelearningandcomputervisioncommunities.Itisestimatedthathumanscanrecognizeapproximate30;000basicobjectcategoriesandmanymoresubordinateonesandtheyareabletoidentifynewclassesgivenanat-tributedescription[Lampertetal.,2014].Basedonthisob-servation,manyzero-shotlearning(ZSL)approacheshavebeenproposed[Akataetal.,2015;Romera-ParedesandTorr,2015;ZhangandSaligrama,2016a;Guoetal.,2017a].ThegoalofZSListobuildclassifiersfortargetunseenclassesgivennolabeledsamples,withclassattributesassidein-formationandfullylabeledsourceseenclassesasknowl-edgesource.Differentfrommanysupervisedlearningap-proacheswhichtreateachclassindependently,ZSLasso-ciatesclasseswithanintermediaryattributeorsemantics-paceandthentransfersknowledgefromthesourceseenclassestothetargetunseenclassesbasedontheassocia-tion.Inthisway,onlytheattributevectorofatarget(un-seen)classisrequiredandtheclassificationmodelcanbebuiltevenwithoutanylabeledsamplesforthisclass.Inparticular,anembeddingfunctionislearnedusingthela-beledsamplesofsourceseenclassesthatmapstheimagesandclassesintoacommonembeddingspacewherethedis-tanceorsimilaritybetweenthemcanbemeasured.Becausetheattributesaresharedbybothsourceandtargetclass-es,theembeddingfunctionlearnedbysourceclassescanbedirectlyappliedtotargetclasses[Farhadietal.,2009;Socheretal.,2013].Finally,givenatestimage,wemapit"
+da1ba46027b7236c937d276fb54e99906036c4ef,Using 3D Representations of the Nasal Region for Improved Landmarking and Expression Robust Recognition,"Using 3D Representations of the Nasal
+Region for Improved Landmarking and
+Expression Robust Recognition
+Jiangning Gao1
+Adrian N Evans1
+Department of Electronic and
+Electrical Engineering, University
+of Bath, Bath, UK, BA2 7AY."
+dac2103843adc40191e48ee7f35b6d86a02ef019,Unsupervised Celebrity Face Naming in Web Videos,"Unsupervised Celebrity Face Naming in Web Videos
+Lei Pang and Chong-Wah Ngo"
+dae420b776957e6b8cf5fbbacd7bc0ec226b3e2e,Recognizing Emotions in Spontaneous Facial Expressions,"RECOGNIZING EMOTIONS IN SPONTANEOUS FACIAL EXPRESSIONS
+Michael Grimm, Dhrubabrata Ghosh Dastidar, and Kristian Kroschel
+Institut f¨ur Nachrichtentechnik
+Universit¨at Karlsruhe (TH), Germany"
+da833d8ec9c91d55256effccd370b2e62a896ccb,Front-view Gait Recognition,"Front-view Gait Recognition
+Michela Goffredo, John N. Carter and Mark S. Nixon"
+daa02cf195818cbf651ef81941a233727f71591f,Face recognition system on Raspberry Pi,"Face recognition system on Raspberry Pi
+Olegs Nikisins, Rihards Fuksis, Arturs Kadikis, Modris Greitans
+Institute of Electronics and Computer Science,
+4 Dzerbenes Street, Riga, LV 1006, Latvia"
+da8d0855e7760e86fbec47a3cfcf5acd8c700ca8,F 2 ConText : How to Extract Holistic Contexts of Persons of Interest for Enhancing Exploratory Analysis,"Accepted on 15 Sep 2018. To appear in Knowledge and Information Systems.
+Under consideration for publication in Knowledge and Information Sys-
+F2ConText: How to Extract Holistic
+Contexts of Persons of Interest for
+Enhancing Exploratory Analysis
+Md Abdul Kader1, Arnold P. Boedihardjo2 and M. Shahriar Hossain3
+IBM Innovation Center, Austin, TX 78758
+Radiant Solutions, Herndon, VA 20171
+The University of Texas at El Paso, El Paso, TX 79968"
+da1e0b9e445493d3e6dc0e3c23be194228c5d796,Video Segmentation using Teacher-Student Adaptation in a Human Robot Interaction (HRI) Setting,"Video Segmentation using Teacher-Student Adaptation
+in a Human Robot Interaction (HRI) Setting
+Mennatullah Siam1, Chen Jiang1, Steven Lu1, Laura Petrich1,
+Mahmoud Gamal2, Mohamed Elhoseiny3, Martin Jagersand1"
+daefac0610fdeff415c2a3f49b47968d84692e87,Multimodal Frame Identification with Multilingual Evaluation,"New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+Proceedings of NAACL-HLT 2018, pages 1481–1491"
+daef6fa60c7d79930ad0a341aab69f1f4fa80442,Supplement for BIER,"Supplement for BIER
+. Introduction
+In this document we provide further insights into Boost-
+ing Independent Embeddings Robustly (BIER). First, in
+Section 2 we describe our method for loss functions op-
+erating on triplets. Next, in Section 3 we show how our
+method behaves when we vary the embedding size and the
+number of groups. In Section 4 we summarize the effect of
+our boosting based training approach and our initialization
+pproach. We provide an experiment evaluating the impact
+of end-to-end training in Section 5. Further, in Section 6 we
+demonstrate that our method is applicable to generic im-
+ge classification problems. Finally, we show a qualitative
+omparison of the different embeddings in our ensemble in
+Section 7 and some qualitative results in Section 8.
+. BIER for Triplets
+For loss functions operating on triplets of samples, we
+illustrate our training method in Algorithm 1. In contrast
+to our tuple based algorithm, we sample triplets x(1), x(2)
+nd x(3) which satisfy the constraint that the first pair (x(1),"
+da24f3e196c5345ce08dfcc835574035da197f48,A Global Alignment Kernel based Approach for Group-level Happiness Intensity Estimation,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2017
+A Global Alignment Kernel based Approach for
+Group-level Happiness Intensity Estimation
+Xiaohua Huang, Abhinav Dhall, Roland Goecke, Member, IEEE, Matti Pietik¨ainen, Fellow, IEEE, and
+Guoying Zhao, Senior Member, IEEE"
+b49affdff167f5d170da18de3efa6fd6a50262a2,Linking Names and Faces : Seeing the Problem in Different Ways,"Author manuscript, published in ""Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+(2008)"""
+b4d117e109b3a6762d1b675defd9f2b228613ac1,Financialized methods for market-based multi-sensor fusion,"Congress Center Hamburg
+Sept 28 - Oct 2, 2015. Hamburg, Germany
+978-1-4799-9993-4/15/$31.00 ©2015 IEEE"
+b498640d8f0ac5a628563ff84dbef8d35d12a7ec,Overcoming catastrophic forgetting with hard attention to the task,"Overcoming Catastrophic Forgetting with Hard Attention to the Task
+Joan Serr`a 1 D´ıdac Sur´ıs 1 2 Marius Miron 1 3 Alexandros Karatzoglou 1"
+b4b6a0129bf6a716fca80a4cfc322687a72fa927,Automatic Generation of Planar Marionettes from Frontal Images,"Automatic Generation of Planar Marionettes from Frontal Images
+Elad Richardson and Gil Ben-Shachar
+Supervised by Anastasia Dubrovina and Aaron Weltzer"
+b4a3f480e2004bdc8106de2f772283101bb290d0,Multi-stage ranking approach for fast person re-identification,"IET Research Journals
+A Multi-Stage Ranking Approach for Fast Person Re-Identification
+A Multi-Stage Ranking Approach for Fast
+Person Re-Identification
+Bahram Lavi, Giorgio Fumera , Fabio Roli
+Department of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123, Cagliari, Italy
+E-mail:
+ISSN 1751-8644
+doi: 0000000000
+www.ietdl.org"
+b40881a905cf6c4963658df4f64b860f9b1755fe,Unrestricted Facial Geometry Reconstruction Using Image-to-Image Translation,"Unrestricted Facial Geometry Reconstruction Using Image-to-Image Translation
+Matan Sela
+Elad Richardson
+Ron Kimmel
+Department of Computer Science, Technion - Israel Institute of Technology
+Figure 1: Results of the proposed method. Reconstructed geometries are shown next to the corresponding input images."
+b4270de7380d305b4417f662686093c40d842da4,Graphical Models for Wide-Area Activity Analysis in Continuous Videos,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Graphical Models for Wide-Area Activity Analysis in Continuous Videos
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Nandita M. Nayak
+May 2014
+Dissertation Committee:
+Professor Amit K. Roy-Chowdhury, Chairperson
+Professor Christian Shelton
+Professor Eamonn Keogh
+Professor Victor Zordan"
+b49aa569ff63d045b7c0ce66d77e1345d4f9745c,Convolutional Neural Networks for Crop Yield Prediction using Satellite Images,"Convolutional Neural Networks for Crop Yield Prediction using Satellite Images
+H. Russello"
+b41374f4f31906cf1a73c7adda6c50a78b4eb498,Iterative Gaussianization: From ICA to Random Rotations,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Iterative Gaussianization: From ICA to
+Random Rotations
+Valero Laparra, Gustavo Camps-Valls, Senior Member, IEEE, and Jesús Malo"
+b408b939c0f3be9cce0f84871a78a71d1684cd77,Identifying spatial relations in images using convolutional neural networks,"Identifying Spatial Relations in Images using
+Convolutional Neural Networks
+Mandar Haldekar, Ashwinkumar Ganesan
+Dept. Of Computer Science & Engineering,
+Tim Oates
+Dept. Of Computer Science & Engineering,
+UMBC,
+Baltimore, MD
+mandarh1,
+UMBC,
+Baltimore, MD"
+b44d8ecac21867c540d9122a150c8d8c0875cbe6,Mixture Density Generative Adversarial Networks,"Mixture Density Generative Adversarial Networks
+Hamid Eghbal-zadeh1 ∗
+Werner Zellinger2
+Gerhard Widmer1
+LIT AI Lab & Institute of Computational Perception
+Department of Knowledge-Based Mathematical Systems
+{hamid.eghbal-zadeh, werner.zellinger,
+Johannes Kepler University of Linz, Austria"
+b4b1b39f8902208bbd37febfb68e08809098036d,TRECVid Semantic Indexing of Video : A 6-year Retrospective,"UvA-DARE (Digital Academic Repository)
+TRECVid Semantic Indexing of Video: A 6-year Retrospective
+Awad, G.; Snoek, C.G.M.; Smeaton, A.F.; Quénot, G.
+Published in:
+ITE Transactions on Media Technology and Applications
+0.3169/mta.4.187
+Link to publication
+Citation for published version (APA):
+Awad, G., Snoek, C. G. M., Smeaton, A. F., & Quénot, G. (2016). TRECVid Semantic Indexing of Video: A 6-
+year Retrospective. ITE Transactions on Media Technology and Applications, 4(3), 187-208. DOI:
+0.3169/mta.4.187
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 02 Nov 2018"
+b4223cc72543656c28b55af1ffdabb1e47a0f2dd,Stacking with Auxiliary Features for Visual Question Answering,"New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+Proceedings of NAACL-HLT 2018, pages 2217–2226"
+b4fe9594e1de682e7270645ba95ab64727b6632e,Generative Adversarial Positive-Unlabelled Learning,"Generative Adversarial Positive-Unlabelled Learning
+Ming Hou1, Brahim Chaib-draa2, Chao Li1, Qibin Zhao1,
+Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan
+Department of Computer Science and Software Engineering, Laval University, Quebec, Canada"
+b4c02e071432a9a986501b7317b524f216e87ec8,Visual Saliency Prediction using Deep learning Techniques A Degree Thesis,"Visual Saliency Prediction
+using Deep learning Techniques
+A Degree Thesis
+Submitted to the Faculty of the
+Escola Tècnica d'Enginyeria de Telecomunicació de
+Barcelona
+Universitat Politècnica de Catalunya
+Junting Pan
+In partial fulfilment
+of the requirements for the degree in
+TELECOMUNICATION ENGINEERING
+Advisor: Xavier Giró i Nieto
+Barcelona, July 2015"
+b49425f78907fcc447d181eb713abffc74dd85e4,Sampling Matters in Deep Embedding Learning,"Sampling Matters in Deep Embedding Learning
+Chao-Yuan Wu∗
+UT Austin
+R. Manmatha
+A9/Amazon
+Alexander J. Smola
+Amazon
+Philipp Kr¨ahenb¨uhl
+UT Austin"
+b4ee64022cc3ccd14c7f9d4935c59b16456067d3,Unsupervised Cross-Domain Image Generation,"Unsupervised Cross-Domain Image Generation
+Xinru Hua, Davis Rempe, and Haotian Zhang"
+b45a9f95980c434582c920bf15a8099ec267c1f7,Robust Kronecker Component Analysis,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Robust Kronecker Component Analysis
+Mehdi Bahri, Student Member, IEEE, Yannis Panagakis, and Stefanos Zafeiriou, Member, IEEE"
+b4f6962068c27d10df9016090a0ca14f65f26b70,A Statisitical Shape Model for Deformable Surface Registration,"A STATISITICAL SHAPE MODEL FOR DEFORMABLE
+SURFACE REGISTRATION
+Wei Quan, Bogdan J. Matuszewski and Lik-Kwan Shark
+Applied Digital Signal and Image Processing (ADSIP) Research Centre
+University of Central Lancashire, Preston PR1 2HE, United Kingdom
+{wquan, bmatuszewski1,
+Keywords:
+Deformable Registration, Surface Matching, Shape Modelling and Face Articulation."
+b40290a694075868e0daef77303f2c4ca1c43269,Combining Local and Global Information for Hair Shape Modeling,"第 40 卷 第 4 期
+014 年 4 月
+自 动 化 学 报
+ACTA AUTOMATICA SINICA
+Vol. 40, No. 4
+April, 2014
+融合局部与全局信息的头发形状模型
+王 楠 1 艾海舟 1
+摘 要 头发在人体表观中具有重要作用, 然而, 因为缺少有效的形状模型, 头发分割仍然是一个非常具有挑战性的问题. 本
+文提出了一种基于部件的模型, 它对头发形状以及环境变化更加鲁棒. 该模型将局部与全局信息相结合以描述头发的形状. 局
+部模型通过一系列算法构建, 包括全局形状词表生成, 词表分类器学习以及参数优化; 而全局模型刻画不同的发型, 采用支持
+向量机 (Support vector machine, SVM) 来学习, 它为所有潜在的发型配置部件并确定势函数. 在消费者图片上的实验证明
+了本文算法在头发形状多变和复杂环境等条件下的准确性与有效性.
+关键词 头发形状建模, 部件模型, 部件配置算法, 支持向量机
+引用格式 王楠, 艾海舟. 融合局部与全局信息的头发形状模型. 自动化学报, 2014, 40(4): 615−623
+DOI 10.3724/SP.J.1004.2014.00615
+Combining Local and Global Information for Hair Shape Modeling
+WANG Nan1
+AI Hai-Zhou1"
+b4ee2a6b5fdf66f57e94a998cff2acef4af7d256,Monocular Visual Scene Understanding: Understanding Multi-Object Traffic Scenes,"Monocular Visual Scene Understanding:
+Understanding Multi-Object Traffic Scenes
+Christian Wojek, Stefan Walk, Stefan Roth, Konrad Schindler, Bernt Schiele"
+b419e0e1192d307d536421d811d10657f65eb72b,Face Recognition using DCT based Energy Discriminant Mask,"International Journal of Computer Applications (0975 – 8887)
+Volume 170 – No.5, July 2017
+Face Recognition using DCT based Energy
+Discriminant Mask
+Vikas Maheshkar
+Division of Information technology
+New Delhi, India"
+b47386e10125462d60d66f8d6d239a69c5966853,Robust Multi Gradient Entropy Method for Face Recognition System for Low Contrast Noisy Images,"International Journal of Emerging Trends & Technology in Computer Science (IJETTCS)
+Web Site: www.ijettcs.org Email:
+ISSN 2278-6856
+Volume 2, Issue 3, May – June 2013
+ROBUST MULTI GRADIENT ENTROPY
+METHOD FOR FACE RECOGNITION
+SYSTEM FOR LOW CONTRAST NOISY
+IMAGES
+C. Naga Raju1, P.Prathap Naidu2, R. Pradeep Kumar Reddy3, G. Sravana Kumari4
+Associate Professor, CSE Dept, YSR Engg College of YVU
+Asst. Professor, CSE Dept, RGM Engg College
+Asst. Professor, CSE Dept, YSR Engg College.
+M.Tech In CSE RGM Engg College
+the most
+recognition under difficult"
+b47ea4d5b0040d85181925bda74da4ab5303768f,LIFEisGAME:A Facial Character Animation System to Help Recognize Facial Expressions,"LIFEisGAME:A Facial Character Animation System to
+Help Recognize Facial Expressions
+Tiago Fernandes1,5, Samanta Alves2, José Miranda3,5, Cristina Queirós2,
+Verónica Orvalho1,4
+Instituto de Telecomunicações, Lisboa, Portugal,
+Faculdade de Psicologia da Universidade do Porto, Porto, Portugal,
+Instituto Politécnico da Guarda, Porto, Portugal,
+Faculdade de Ciências da Universidade do Porto, Porto, Portugal,
+5 Faculdade de Engenharia da Universidade do Porto, Porto, Portugal,"
+b4b0bf0cbe1a2c114adde9fac64900b2f8f6fee4,Autonomous Learning Framework Based on Online Hybrid Classifier for Multi-view Object Detection in Video,"Autonomous Learning Framework Based on Online Hybrid
+Classifier for Multi-view Object Detection in Video
+Dapeng Luoa*Zhipeng Zenga Longsheng Weib Yongwen Liua Chen Luoc Jun Chenb Nong Sangd
+School of Electronic Information and Mechanics, China University of Geosciences, Wuhan, Hubei 430074, China
+School of Automation, China University of Geosciences, Wuhan, Hubei 430074, China
+Huizhou School Affiliated to Beijing Normal University, Huizhou 516002, China
+dNational Key Laboratory of Science and Technology on Multispectral Information Processing, School of Automation, Huazhong
+University of Science and Technology, Wuhan, 430074, China"
+b411850a3614fbb06bc77e6f776b2f23af563a90,Size Does Matter: Improving Object Recognition and 3D Reconstruction with Cross-Media Analysis of Image Clusters,"Size does matter: improving object recognition
+nd 3D reconstruction with cross-media analysis
+of image clusters
+Stephan Gammeter1, Till Quack1, David Tingdahl2, and Luc van Gool1,2
+BIWI, ETH Z¨urich1 http://www.vision.ee.ethz.ch
+VISICS, K.U. Leuven2 http://www.esat.kuleuven.be/psi/visics"
+a285b6edd47f9b8966935878ad4539d270b406d1,Facial Expression Recognition Based on Local Binary Patterns and Kernel Discriminant Isomap,"Sensors 2011, 11, 9573-9588; doi:10.3390/s111009573
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Facial Expression Recognition Based on Local Binary Patterns
+nd Kernel Discriminant Isomap
+Xiaoming Zhao 1,* and Shiqing Zhang 2
+Department of Computer Science, Taizhou University, Taizhou 317000, China
+School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, China;
+E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +86-576-8513-7178; Fax: ++86-576-8513-7178.
+Received: 31 August 2011; in revised form: 27 September 2011 / Accepted: 9 October 2011 /
+Published: 11 October 2011"
+a2ad9ae7c5adbbce9ded16ac3ebdfa96505c0f46,Déjà Image-Captions: A Corpus of Expressive Descriptions in Repetition,"Human Language Technologies: The 2015 Annual Conference of the North American Chapter of the ACL, pages 504–514,
+Denver, Colorado, May 31 – June 5, 2015. c(cid:13)2015 Association for Computational Linguistics"
+a2359c0f81a7eb032cff1fe45e3b80007facaa2a,Towards Structured Analysis of Broadcast Badminton Videos,"Towards Structured Analysis of Broadcast Badminton Videos
+Anurag Ghosh
+Suriya Singh
+C.V.Jawahar
+{anurag.ghosh,
+CVIT, KCIS, IIIT Hyderabad"
+a28f831b4014fa75a69f3c56e39d9c40fc0af48f,AAD: Adaptive Anomaly Detection through traffic surveillance videos,"AAD: Adaptive Anomaly Detection through traffic
+surveillance videos
+Mohammad Farhadi Bajestani
+Seyed Soroush Heidari Rahmat Abadi
+Seyed Mostafa Derakhshandeh Fard
+Roozbeh Khodadadeh"
+a271f83cb1f72e0f9ca077499f51adb086fb449d,Unsupervised and Semi-supervised Methods for Human Action Analysis,"Unsupervised and
+Semi-supervised Methods
+for Human Action Analysis
+Simon Jones
+September 22, 2014
+A thesis submitted in partial fulfillment of the
+requirements for the degree of
+Doctor of Philosophy
+Department of Electronic and Electrical Engineering
+The University of Shef‌f‌ield"
+a290019f7125f6ebdc0dcec3b03b771de6905dd0,Heterogeneous AdaBoost with Real-time Constraints - Application to the Detection of Pedestrians by Stereovision,"HETEROGENEOUS ADABOOST WITH REAL-TIME
+Application to the Detection of Pedestrians by stereovision
+CONSTRAINTS
+Lo¨ıc Jourdheuil1, Nicolas Allezard1, Thierry Chateau2 and Thierry Chesnais1
+CEA, LIST, Laboratoire Vision et Ing´enierie des Contenus, Gif-sur-Yvette, France
+LASMEA, UMR UBP-CNRS 6602, 24 Avenue des Landais, AUBIERE, France
+{loic.jourdheuil, nicolas.allezard,
+Keywords:
+Adaboost. stereovision. real time."
+a24f84b156bbb1edeb1d0761f5940de318b7ed9d,Copula Eigenfaces - Semiparametric Principal Component Analysis for Facial Appearance Modeling,
+a2db611b6179f3bc4cfe0e891df7b9d4ab58d642,On the usability of deep networks for object-based image analysis,"ON THE USABILITY OF DEEP NETWORKS FOR OBJECT-BASED IMAGE ANALYSIS
+Nicolas Audeberta, b, Bertrand Le Sauxa, Sébastien Lefèvreb
+ONERA, The French Aerospace Lab, F-91761 Palaiseau, France
+Univ. Bretagne-Sud, UMR 6074, IRISA, F-56000 Vannes, France -
+KEY WORDS: deep learning, vehicle detection, semantic segmentation, object classification"
+a212be7ec1ff75ecfee52c7c49c73d7244a87eb7,Video Scene-Aware Dialog Track in DSTC 7,"Video Scene-Aware Dialog Track in DSTC7
+Chiori Hori∗, Tim K. Marks∗, Devi Parikh∗∗, and Dhruv Batra∗∗
+Mitsubishi Electric Research Laboratories
+Cambridge, MA, USA
+{chori,
+School of Interactive Computing
+Georgia Tech
+{parikh,"
+a2a42aa37641490213b2de9eb8e83f3dab75f5ed,Multilinear Supervised Neighborhood Preserving Embedding Analysis of Local Descriptor Tensor,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+a2505774d5654685c6d899760759520b339e6c1e,Ranking Eigenfaces Through Adaboost and Perceptron Ensembles,"Ranking Eigenfaces Through Adaboost and
+Perceptron Ensembles
+Tiene A. Filisbino, Gilson A. Giraldi
+Laborat´orio Nacional de Computac¸˜ao Cient´ıfica - LNCC
+Petr´opolis, Brasil
+Email:
+Carlos Eduardo Thomaz
+Departamento de Engenharia El´etrica
+Centro Universit´ario da FEI
+S˜ao Bernardo do Campo - Brasil
+Email:"
+a2bfab80a4b48717aa647cb38069632c5962c6a6,Countering Bias in Tracking Evaluations,
+a27735e4cbb108db4a52ef9033e3a19f4dc0e5fa,Intention from Motion,"Intention from Motion
+Andrea Zunino, Jacopo Cavazza, Atesh Koul, Andrea Cavallo, Cristina Becchio and Vittorio Murino"
+a2aa272b32c356ec9933b32ca5809c09f2d21b9f,Clockwork Convnets for Video Semantic Segmentation,"Clockwork Convnets for Video Semantic Segmentation
+Evan Shelhamer(cid:63)
+Kate Rakelly(cid:63)
+Judy Hoffman(cid:63)
+Trevor Darrell
+UC Berkeley"
+a2f2996145d3d670608af1cbbda59c1ac28d4f7c,Real-Time Hand Posture Recognition for Human-Robot Interaction Tasks,"Article
+Real-Time Hand Posture Recognition for
+Human-Robot Interaction Tasks
+Uriel Haile Hernandez-Belmonte and Victor Ayala-Ramirez *
+Received: 30 October 2015; Accepted: 18 December 2015; Published: 4 January 2016
+Academic Editor: Lianqing Liu
+Universidad de Guanajuato DICIS, Carr. Salamanca-Valle Km. 3.5 + 1.8, Palo Blanco, Salamanca, C.P. 36885,
+Mexico;
+* Correspondence: Tel.: +52-464-647-9940 (ext. 2413); Fax: +52-464-647-9940 (ext. 2311)"
+a27740f8a3834d6bc605a6b383c4d802ced373c9,"Exploiting feature representations through similarity learning, post-ranking and ranking aggregation for person re-identification","Exploiting feature representations through similarity learning, post-ranking and
+ranking aggregation for person re-identification
+Julio C. S. Jacques Juniora,b,∗, Xavier Bar´oa,b, Sergio Escalerac,b
+Faculty of Computer Science, Multimedia and Telecommunication - Universitat Oberta de Catalunya, Spain
+Computer Vision Center - Universitat Aut`onoma de Barcelona, Spain
+Department of Mathematics and Informatics - University of Barcelona, Spain"
+a27c7afac5a34141ec5415defed6d4d85325230a,Utrecht Multi-Person Motion (UMPM) benchmark,"Utrecht Multi-Person Motion (UMPM)
+enchmark
+N.P. van der Aa, X. Luo, G.-J. Giezeman
+R.T. Tan, R.C. Veltkamp
+Technical Report UU-CS-2011-027
+September 2011
+Department of Information and Computing Sciences
+Utrecht University, Utrecht, The Netherlands
+www.cs.uu.nl"
+a2afaa782be91f5baf9e9f1794d57dd29143cbf4,IGCV$2$: Interleaved Structured Sparse Convolutional Neural Networks,"IGCV2: Interleaved Structured Sparse Convolutional Neural Networks
+Guotian Xie1,2,∗ Jingdong Wang3 Ting Zhang3
+Jianhuang Lai1,2 Richang Hong4 Guo-Jun Qi5
+Sun Yat-Sen University 2Guangdong Key Laboratory of Information Security Technology
+Microsoft Research 4Hefei University of Technology 5University of Central Florida"
+a2fce1c551a3c3b1cac16a96f86a59cd7fbd4c80,Attachment and Children’s Biased Attentional Processing: Evidence for the Exclusion of Attachment-Related Information,"Attachment and Children’s Biased Attentional
+Processing: Evidence for the Exclusion of Attachment-
+Related Information
+Eva Vandevivere1*, Caroline Braet1, Guy Bosmans2, Sven C. Mueller3, Rudi De Raedt3
+Department of Developmental, Personality and Social Psychology, Ghent University, Gent, Belgium, 2 Parenting and Special Education Research Unit, Leuven, Belgium,
+Department of Experimental Clinical and Health Psychology, Ghent University, Ghent, Belgium"
+a237e3d89c460e1b2e3f12c5d4275bd0c6eb47a8,Domain Adaptation on Graphs by Learning Aligned Graph Bases,"Domain Adaptation on Graphs by Learning
+Aligned Graph Bases
+Mehmet Pilancı and Elif Vural"
+a2b9c998264ab1920ea8f2e07c3590ebb3dc6f35,Shopper Analytics: A Customer Activity Recognition System Using a Distributed RGB-D Camera Network,"Shopper Analytics: a customer activity
+recognition system using a distributed RGB-D
+amera network
+Daniele Liciotti, Marco Contigiani, Emanuele Frontoni, Adriano Mancini,
+Primo Zingaretti1, and Valerio Placidi2
+Dipartimento di Ingegneria dell’Informazione, Universit`a Politecnica delle Marche,
+{d.liciotti, m.contigiani,e.frontoni, a.mancini,
+Via Brecce Bianche, 60131 Ancona, Italy,
+Grottini Lab srl,
+Via S.Maria in Potenza, 62017, Porto Recanati, Italy,"
+a2fbaa0b849ecc74f34ebb36d1442d63212b29d2,An Efficient Approach to Face Recognition of Surgically Altered Images,"Volume 5, Issue 6, June 2015 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+An Efficient Approach to Face Recognition of Surgically
+Altered Images
+Er. Supriya, Er. Sukhpreet Kaur
+Department of computer science and engineering
+SUS college of Engineering and Technology,
+Tangori, District, Mohali, Punjab, India"
+a21b8aadb27cd10d8a228fe1aad27c0c88d67f15,Design and Implementation of PC Operated Flying Robot for Rescue Operation in Coalmines,"ISSN: 2278 – 7798
+International Journal of Science, Engineering and Technology Research (IJSETR)
+Volume 2, Issue 1, January 2013
+Design and Implementation of PC Operated
+Flying Robot for Rescue Operation in
+Coalmines
+Aditya Kumar T , Pravin A, M S Madhan mohan, T V Janardhanarao"
+a23e7e71fb92a56c2e7717f6356e8b69fc2f4bfc,"Multimodal fusion of audio, scene, and face features for first impression estimation","Multimodal Fusion of Audio, Scene, and Face
+Features for First Impression Estimation
+Furkan G¨urpınar
+Program of Computational
+Science and Engineering
+Bo˘gazic¸i University
+Bebek, Istanbul, Turkey
+Email:
+Heysem Kaya
+Albert Ali Salah
+Department of Computer Engineering
+Department of Computer Engineering
+Namık Kemal University
+C¸ orlu, Tekirda˘g, Turkey
+Email:
+Bo˘gazic¸i University
+Bebek, Istanbul, Turkey
+Email:"
+a2dd13729206a7434ef1f0cd016275c0d6f3bb6d,SFV: Reinforcement Learning of Physical Skills from Videos,"SFV: Reinforcement Learning of Physical Skills from Videos
+XUE BIN PENG, University of California, Berkeley
+ANGJOO KANAZAWA, University of California, Berkeley
+JITENDRA MALIK, University of California, Berkeley
+PIETER ABBEEL, University of California, Berkeley
+SERGEY LEVINE, University of California, Berkeley
+Fig. 1. Simulated characters performing highly dynamic skills learned by imitating video clips of human demonstrations. Left: Humanoid performing
+artwheel B on irregular terrain. Right: Backflip A retargeted to a simulated Atlas robot.
+Data-driven character animation based on motion capture can produce
+highly naturalistic behaviors and, when combined with physics simula-
+tion, can provide for natural procedural responses to physical perturbations,
+environmental changes, and morphological discrepancies. Motion capture
+remains the most popular source of motion data, but collecting mocap data
+typically requires heavily instrumented environments and actors. In this
+paper, we propose a method that enables physically simulated characters
+to learn skills from videos (SFV). Our approach, based on deep pose esti-
+mation and deep reinforcement learning, allows data-driven animation to
+leverage the abundance of publicly available video clips from the web, such
+s those from YouTube. This has the potential to enable fast and easy de-
+sign of character controllers simply by querying for video recordings of the"
+a50b4d404576695be7cd4194a064f0602806f3c4,Efficiently Estimating Facial Expression and Illumination in Appearance-based Tracking,"In Proceedings of BMVC, Edimburgh, UK, September 2006
+Efficiently estimating facial expression and
+illumination in appearance-based tracking
+Jos´e M. Buenaposada†, Enrique Mu˜noz‡, Luis Baumela‡
+ESCET, U. Rey Juan Carlos
+C/ Tulip´an, s/n
+8933 M´ostoles, Spain
+Facultad Inform´atica, UPM
+Campus de Montegancedo s/n
+8660 Boadilla del Monte, Spain
+http://www.dia.fi.upm.es/~pcr"
+a511463a423f842bdb524009f6ce6c6b0ffa0f77,Kernel diff-hash,"Kernel diff-hash
+Michael M. Bronstein
+Institute of Computational Science
+Faculty of Informatics,
+Universit`a della Svizzera Italiana
+Via G. Buf‌f‌i 13, Lugano 6900, Switzerland
+November 3, 2011"
+a5e5094a1e052fa44f539b0d62b54ef03c78bf6a,Detection without Recognition for Redaction,"Detection without Recognition for Redaction
+Shagan Sah1, Ram Longman1, Ameya Shringi1, Robert Loce2, Majid Rabbani1, and Raymond Ptucha1
+Rochester Institute of Technology - 83 Lomb Memorial Drive, Rochester, NY USA, 14623
+Conduent, Conduent Labs - US, 800 Phillips Rd, MS128, Webster, NY USA, 14580
+Email:"
+a55dea7981ea0f90d1110005b5f5ca68a3175910,"Are 1, 000 Features Worth A Picture? Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers","Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers
+Are 1,000 Features Worth A Picture?
+Vikram Mohanty, David Thames, Kurt Luther
+Department of Computer Science and Center for Human-Computer Interaction
+Virginia Tech, Arlington, VA, USA"
+a5c63f38e2e6ca7fff48fc5cd1dbdb8f6362c99f,A Neural Approach to Blind Motion Deblurring,"A Neural Approach to Blind Motion Deblurring
+Ayan Chakrabarti
+Toyota Technological Institute at Chicago"
+a55ec6bade29f23f8cb1337edf417b2da2f48695,Deep Asymmetric Networks with a Set of Node-wise Variant Activation Functions,"Deep Asymmetric Networks with a Set of
+Node-wise Variant Activation Functions
+Jinhyeok Jang, Hyunjoong Cho, Jaehong Kim, Jaeyeon Lee, and Seungjoon Yang"
+a5be204b71d1daaf6897270f2373d1a5e37c3010,Improving Spatiotemporal Self-supervision by Deep Reinforcement Learning,"Improving Spatiotemporal Self-Supervision
+y Deep Reinforcement Learning
+Uta B¨uchler(cid:63), Biagio Brattoli(cid:63), and Bj¨orn Ommer
+Heidelberg University, HCI / IWR, Germany"
+a56c1331750bf3ac33ee07004e083310a1e63ddc,Efficient Point-to-Subspace Query in ℓ1 with Application to Robust Object Instance Recognition,"Vol. xx, pp. x
+(cid:13) xxxx Society for Industrial and Applied Mathematics
+Ef‌f‌icient Point-to-Subspace Query in (cid:96)1 with Application to Robust Object
+Instance Recognition
+Ju Sun∗, Yuqian Zhang†, and John Wright‡"
+a5006c29b0609296b5c1368ff1113eeb12b119ad,In-flight launch of unmanned aerial vehicles,"In-flight launch of unmanned aerial vehicles
+Niels Nauwynck, Haris Balta, Geert De Cubber, and Hichem Sahli"
+a59e338fec32adee012e31cdb0513ec20d6c8232,Phase Retrieval Under a Generative Prior,"Phase Retrieval Under a Generative Prior
+Paul Hand∗, Oscar Leong∗, and Vladislav Voroninski†
+July 12, 2018"
+a565990d6b176bf9c82eec9354b0936fb141e631,Scheduling on Heterogeneous Multi-core Processors Using Stable Matching Algorithm,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 7, No. 6, 2016
+Scheduling on Heterogeneous Multi-core Processors
+Using Stable Matching Algorithm
+Muhammad Rehman Zafar
+Department of Computer Science
+Bahria University
+Islamabad, Pakistan
+Muhammad Asfand-e-Yar
+Department of Computer Science
+Bahria University
+Islamabad, Pakistan"
+a54e0f2983e0b5af6eaafd4d3467b655a3de52f4,Face Recognition Using Convolution Filters and Neural Networks,"Face Recognition Using Convolution Filters and
+Neural Networks
+V. Rihani
+Head, Dept. of E&E,PEC
+Sec-12, Chandigarh – 160012
+Amit Bhandari
+Department of CSE & IT, PEC
+Sec-12, Chandigarh – 160012
+C.P. Singh
+Physics Department, CFSL,
+Sec-36, Chandigarh - 160036
+to: (a)
+potential method"
+a52d6daf72281521ee99dabd82cd80093e8d6f4a,Person re-identification across different datasets with multi-task learning,"Person re-identification across different datasets
+with multi-task learning
+Matthieu Ospici, Antoine Cecchi
+Atos BDS R&D"
+a5625cfe16d72bd00e987857d68eb4d8fc3ce4fb,VFSC: A Very Fast Sparse Clustering to Cluster Faces from Videos,"VFSC: A Very Fast Sparse Clustering to Cluster Faces
+from Videos
+Dinh-Luan Nguyen, Minh-Triet Tran
+University of Science, VNU-HCMC, Ho Chi Minh city, Vietnam"
+a5da6a6d4243a89e974a6467cb5c6df6d914a946,Static and Dynamic Approaches for Pain Intensity Estimation using Facial Expressions,
+a546fd229f99d7fe3cf634234e04bae920a2ec33,Fast Fight Detection,"RESEARCH ARTICLE
+Fast Fight Detection
+Ismael Serrano Gracia1*, Oscar Deniz Suarez1*, Gloria Bueno Garcia1*, Tae-Kyun Kim2
+Department of Systems Engineering and Automation, E.T.S.I. Industriales, Ciudad Real, Castilla-La
+Mancha, Spain, 2 Department of Electrical and Electronic Engineering, Imperial College, London, UK
+* (ISG); (ODS); (GBG)"
+a5531b5626c1ee3b6f9aed281a98338439d06d12,Multichannel Attention Network for Analyzing Visual Behavior in Public Speaking,"Multichannel Attention Network for Analyzing
+Visual Behavior in Public Speaking
+Rahul Sharma, Tanaya Guha and Gaurav Sharma
+IIT Kanpur
+{rahus, tanaya,"
+a5ae7fe2bb268adf0c1cd8e3377f478fca5e4529,Exemplar Hidden Markov Models for classification of facial expressions in videos,"Exemplar Hidden Markov Models for Classification of Facial Expressions in
+Videos
+Univ. of California San Diego
+Univ. of Canberra, Australian
+Univ. of California San Diego
+Abhinav Dhall
+Marian Bartlett
+Karan Sikka
+California, USA
+National University
+Australia
+California, USA"
+a577eefb31ba63baa087f321537b0be2784ec013,Security Event Recognition for Visual Surveillance,"Security Event Recognition for Visual Surveillance
+Michael Ying Yang∗, Senior Member, IEEE, Wentong Liao, Chun Yang, Yanpeng Cao, Member, IEEE and Bodo
+Rosenhahn Member, IEEE"
+a55efc4a6f273c5895b5e4c5009eabf8e5ed0d6a,"Continuous Head Movement Estimator for Driver Assistance: Issues, Algorithms, and On-Road Evaluations","Continuous Head Movement Estimator for
+Driver Assistance: Issues, Algorithms,
+nd On-Road Evaluations
+Ashish Tawari, Student Member, IEEE, Sujitha Martin, Student Member, IEEE, and
+Mohan Manubhai Trivedi, Fellow, IEEE"
+a51d5c2f8db48a42446cc4f1718c75ac9303cb7a,Cross-validating Image Description Datasets and Evaluation Metrics,"Cross-validating Image Description Datasets and Evaluation Metrics
+Josiah Wang and Robert Gaizauskas
+Department of Computer Science
+University of Sheffield, UK
+{j.k.wang,"
+a52d9e9daf2cb26b31bf2902f78774bd31c0dd88,Understanding and Designing Convolutional Networks for Local Recognition Problems,"Understanding and Designing Convolutional Networks
+for Local Recognition Problems
+Jonathan Long
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-97
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-97.html
+May 13, 2016"
+a52d6c456122007f10c90989a1e81dc8e1c599da,Query-Adaptive Image Search With Hash Codes,"Query-Adaptive Image Search with Hash Codes
+Yu-Gang Jiang, Jun Wang, Member, IEEE, Xiangyang Xue, Member, IEEE, Shih-Fu Chang, Fellow, IEEE"
+a5a44a32a91474f00a3cda671a802e87c899fbb4,Moments in Time Dataset: one million videos for event understanding,"Moments in Time Dataset: one million
+videos for event understanding
+Mathew Monfort, Bolei Zhou, Sarah Adel Bargal,
+Alex Andonian, Tom Yan, Kandan Ramakrishnan, Lisa Brown,
+Quanfu Fan, Dan Gutfruend, Carl Vondrick, Aude Oliva"
+bd0e100a91ff179ee5c1d3383c75c85eddc81723,Okutama-Action: An Aerial View Video Dataset for Concurrent Human Action Detection,"Okutama-Action: An Aerial View Video Dataset for Concurrent Human Action
+Detection∗
+Mohammadamin Barekatain1, Miquel Mart´ı2,3, Hsueh-Fu Shih4, Samuel Murray2, Kotaro Nakayama5,
+Yutaka Matsuo5, Helmut Prendinger6
+Technical University of Munich, Munich, 2KTH Royal Institute of Technology, Stockholm,
+Polytechnic University of Catalonia, Barcelona, 4National Taiwan University, Taipei, 5University of
+Tokyo, Tokyo, 6National Institute of Informatics, Tokyo"
+bd07d1f68486052b7e4429dccecdb8deab1924db,Face representation under different illumination conditions,
+bd96c3af9c433b4eaf95c8a28f072e1b0fc2de1a,A Study on Facial Expression Recognition Model using an Adaptive Learning Capability,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+bdb74f1b633b2c48d5e9d101e09bad2db8d68be6,Chapter 1 . Medical Image Annotation (,"Chapter 1
+Medical image annotation 1
+Thanks to the rapid development of modern medical devices and the use of
+digital systems, more and more medical images are being generated. This
+has lead to an increase in the demand for automatic methods to index, com-
+pare, analyze and annotate them. Until 2005, automatic categorization of
+medical images was often restricted to a small number of classes. The Image-
+CLEF medical image annotation challenge was born in this scenario, propos-
+ing a task reflecting real life constraints of content based image classification
+in medical applications. In this chapter we report about our experience first
+s participants, then as co-organizers. This research activity started in 2007,
+supported by a 1-year IM2 fellowship. By leveraging over the initial IM2
+support, in 2008 a 4-year project started (EMMA, Enhanced Multimodal
+Medical data Access), sponsored by the Halser foundation. Since 2009, B.
+Caputo has been an ImageCLEF task organizers, respectively for the medi-
+al annotation and robot vision tasks. Since 2013, she is main organizer of
+ImageCLEF.
+Introduction
+This chapter presents the algorithms and results of the Idiap team partici-
+pation to the ImageCLEFmed annotation task in 2007, 2008 and 2009. The"
+bdbf414a2059d542f501ad9b1d21eacc9831082b,Two-Layer Mixture Network Ensemble for Apparel Attributes Classification,"Two-Layer Mixture Network Ensemble for Apparel
+Attributes Classification
+Tianqi Han, Zhihui Fu, and Hongyu Li*
+AI Lab, ZhongAn Information Technology Service Co., Ltd.
+Shanghai, China"
+bdf64dd341925ea7b9b3abbb49cab3cf978f8e21,Probable Etiopathogenesis (samprapti) of Autism in Frame of Ayurveda in Relation to Intense World Theory,"Global J Res. Med. Plants & Indigen. Med. | Volume 2, Issue 6 | June 2013 | 448–459
+ISSN 2277-4289 | www.gjrmi.com | International, Peer reviewed, Open access, Monthly Online Journal
+Review article
+PROBABLE ETIOPATHOGENESIS (SAMPRAPTI) OF AUTISM IN FRAME
+OF AYURVEDA IN RELATION TO INTENSE WORLD THEORY
+Yadav Deepmala1*, Behera Banshidhar2, Kumar Abhimanyu3
+Asst.Professor, Dept. of Kaumarbhritya, M.S.M. Institute of Ayurveda, Khanpur kalan, Haryana-131305,
+India
+Lecturer, Dept. of Dravyaguna, Gaur Brahman Ayurvedic College, Rohtak, Haryana – 124001, India
+Director, All India Institute of Ayurveda, Gautampuri, Mathura road, Sarita Vihar, New Delhi-110076,
+India
+*Corresponding Author: E-mail: Mob +919414893921, +919414458895
+Received: 10/05/2013; Revised: 26/05/2013; Accepted: 30/05/2013"
+bda61e9bcf02d02f61882790dbbdad8e4fed0986,Face Recognition through Combined SVD and LBP Features,"Face Recognition through Combined SVD and LBP
+International Journal of Computer Applications (0975 – 8887)
+Volume 88 – No.9, February 2014
+Features
+Rahul Kumar Mittal
+M.Tech. Scholar
+BGIET, Sangrur
+Punjab (India)
+Anupam Garg
+Assistant Professor
+BGIET, Sangrur
+Punjab (India)"
+bd13f50b8997d0733169ceba39b6eb1bda3eb1aa,Occlusion Coherence: Detecting and Localizing Occluded Faces,"Occlusion Coherence: Detecting and Localizing Occluded Faces
+Golnaz Ghiasi, Charless C. Fowlkes
+University of California at Irvine, Irvine, CA 92697"
+bd78a853df61d03b7133aea58e45cd27d464c3cf,A Sparse Representation Approach to Facial Expression Recognition Based on LBP plus LFDA,"A Sparse Representation Approach to Facial
+Expression Recognition Based on LBP plus LFDA
+Ritesh Bora, V.A.Chakkarvar
+Computer science and Engineering Department,
+Government College of Engineering, Aurangabad [Autonomous]
+Station Road, Aurangabad, Maharashtra, India."
+bd17d6ba5525dec8762dbaacf6cc3e0cc3f5ff90,Necst: Neural Joint Source-channel Coding,"Under review as a conference paper at ICLR 2019
+NECST: NEURAL JOINT SOURCE-CHANNEL CODING
+Anonymous authors
+Paper under double-blind review"
+bd88bb2e4f351352d88ee7375af834360e223498,A Multi - camera video data set for research on High - Definition surveillance,"HDA dataset - DRAFT
+A Multi-camera video data set for research on
+High-Definition surveillance
+Athira Nambiar, Matteo Taiana, Dario Figueira,
+Jacinto Nascimento and Alexandre Bernardino
+Computer and Robot Vision Lab, Institute for Systems and Robotics
+Instituto Superior Técnico
+Lisbon, Portugal"
+bd2d7c7f0145028e85c102fe52655c2b6c26aeb5,Attribute-based People Search: Lessons Learnt from a Practical Surveillance System,"Attribute-based People Search: Lessons Learnt from a
+Practical Surveillance System
+Rogerio Feris
+IBM Watson
+http://rogerioferis.com
+Russel Bobbitt
+IBM Watson
+Lisa Brown
+IBM Watson
+Sharath Pankanti
+IBM Watson"
+bd0a6bea1985ece3388b1dae47fa76aab3562d6d,One Deep Music Representation to Rule Them All? : A comparative analysis of different representation learning strategies,"Noname manuscript No.
+(will be inserted by the editor)
+One Deep Music Representation to Rule Them All?
+A comparative analysis of different representation learning strategies
+Jaehun Kim · Juli´an Urbano ·
+Cynthia C. S. Liem · Alan Hanjalic
+Received: date / Accepted: date"
+bd2752acf6821282655933d1946f43bb4ac5e901,Flexible Network Binarization with Layer-wise Priority,"Flexible Network Binarization with Layer-wise Priority
+Lixue Zhuang*, Yi Xu*, Bingbing Ni*, Hongteng Xu†
+Shanghai Jiao Tong University*, Duke University†
+{qingliang, xuyi,"
+bdbba95e5abc543981fb557f21e3e6551a563b45,Speeding up the Hyperparameter Optimization of Deep Convolutional Neural Networks,"Vol. 17, No. 2 (2018) 1850008 (15 pages)
+#.c The Author(s)
+DOI: 10.1142/S1469026818500086
+Speeding up the Hyperparameter Optimization of Deep
+Convolutional Neural Networks
+Tobias Hinz*, Nicolas Navarro-Guerrero†, Sven Magg‡
+nd Stefan Wermter§
+Knowledge Technology, Department of Informatics
+Universit€at Hamburg
+Vogt-K€olln-Str. 30, Hamburg 22527, Germany
+Received 15 August 2017
+Accepted 23 March 2018
+Published 18 June 2018
+Most learning algorithms require the practitioner to manually set the values of many hyper-
+parameters before the learning process can begin. However, with modern algorithms, the
+evaluation of a given hyperparameter setting can take a considerable amount of time and the
+search space is often very high-dimensional. We suggest using a lower-dimensional represen-
+tation of the original data to quickly identify promising areas in the hyperparameter space. This
+information can then be used to initialize the optimization algorithm for the original, higher-
+dimensional data. We compare this approach with the standard procedure of optimizing the"
+d1dfdc107fa5f2c4820570e369cda10ab1661b87,Super SloMo: High Quality Estimation of Multiple Intermediate Frames for Video Interpolation,"Super SloMo: High Quality Estimation of Multiple Intermediate Frames
+for Video Interpolation
+Huaizu Jiang1
+Deqing Sun2
+Varun Jampani2
+Ming-Hsuan Yang3,2
+Erik Learned-Miller1
+Jan Kautz2
+UMass Amherst
+NVIDIA 3UC Merced"
+d19df82c5ea644937bf182fabdc0e36e78ea6867,Emotional Facial Expression Recognition from Two Different Feature Domains,"EMOTIONAL FACIAL EXPRESSION RECOGNITION FROM TWO
+DIFFERENT FEATURE DOMAINS
+Jonghwa Kim and Frank Jung
+Institute of Computer Science, University of Augsburg, Germany
+Keywords:"
+d168c2bd29fcad2083586430dd76f54da69bc8a6,Person Re-Identification by Iterative Re-Weighted Sparse Ranking,"Person Re-Identification by Iterative
+Re-Weighted Sparse Ranking
+Giuseppe Lisanti, Iacopo Masi, Andrew D. Bagdanov, Member, IEEE, and
+Alberto Del Bimbo, Member, IEEE"
+d1dae2993bdbb2667d1439ff538ac928c0a593dc,Gamma Correction Technique Based Feature Extraction for Face Recognition System,"International Journal of Computational Intelligence and Informatics, Vol. 3: No. 1, April - June 2013
+Gamma Correction Technique Based Feature Extraction
+for Face Recognition System
+B Vinothkumar
+P Kumar
+Electronics and Communication Engineering
+K S Rangasamy College of Technology
+Electronics and Communication Engineering
+K S Rangasamy College of Technology
+Tamilnadu, India
+Tamilnadu, India"
+d1dd0c714950cbd89f76ec6b039201eadf74cade,Person Re-identification Using Robust Brightness Transfer Functions Based on Multiple Detections,"Person Re-identification Using Robust
+Brightness Transfer Functions Based
+on Multiple Detections
+Amran Bhuiyan(B), Behzad Mirmahboub, Alessandro Perina,
+nd Vittorio Murino
+Pattern Analysis and Computer Vision (PAVIS),
+Istituto Italiano di Tecnologia, Genova, Italy"
+d1503151b39038a87acbd9ecce073ddc211a597d,Efficient Semantic Segmentation using Gradual Grouping,"Efficient Semantic Segmentation using Gradual Grouping
+Nikitha Vallurupalli1, Sriharsha Annamaneni1, Girish Varma1,
+C V Jawahar1, Manu Mathew2, Soyeb Nagori2
+Center for Visual Information Technology, Kohli Center on Intelligent Systems, IIIT-Hyderabad, India
+Texas Instruments, Bangalore, India"
+d1a0425f764ce8847d20d278e4a4267c8258c4dc,3D Human Pose Estimation with Siamese Equivariant Embedding,"D Human Pose Estimation with Siamese Equivariant
+Embedding
+M´arton V´egesa,∗, Viktor Vargaa, Andr´as L˝orincza
+E¨otv¨os Lor´and University, Budapest, Hungary"
+d1295a93346411bb833305acc0e092c9e3b2eff1,The eMPaThy iMBalance hyPoThesis oF aUTisM : a TheoReTical aPPRoach To cogniTiVe and eMoTional eMPaThy in aUTisTic deVeloPMenT,"the Psychological record, 2009, 59, 489-510
+The eMPaThy iMBalance hyPoThesis oF aUTisM:
+TheoReTical aPPRoach To cogniTiVe and
+eMoTional eMPaThy in aUTisTic deVeloPMenT
+Adam Smith
+Dundee, Scotland
+There has been a widely held belief that people with autism spectrum disorders
+lack empathy. This article examines the empathy imbalance hypothesis (EIH) of
+utism. According to this account, people with autism have a deficit of cognitive
+empathy but a surfeit of emotional empathy. The behavioral characteristics of
+utism might be generated by this imbalance and a susceptibility to empathic
+overarousal. The EIH builds on the theory of mind account and provides an
+lternative to the extreme-male-brain theory of autism. Empathy surfeit is a re-
+urrent theme in autistic narratives, and empirical evidence for the EIH is grow-
+ing. A modification of the pictorial emotional Stroop paradigm could facilitate
+n experimental test of the EIH.
+Autism is a pervasive developmental disorder that continues to fascinate
+researchers, challenge clinicians, and distress affected families. empathy
+is a set of processes and outcomes at the heart of human social behavior.
+Fascination with autism is often interwoven with the study of empathy because"
+d1e66107eb084ea0ef5a97f3363f8787b8df91ed,Max-Margin Regularization for Reducing Accidentalness in Chamfer Matching,"Max-margin Regularization for Reducing
+Accidentalness in Chamfer Matching
+Angela Eigenstetter*, Pradeep Yarlagadda* and Bj¨orn Ommer
+Interdisciplinary Center for Scientific Computing, University of Heidelberg, Germany"
+d12c343e60f9cc1a0c6c94c138f38e6bffe22001,Diverse Sampling for Self-Supervised Learning of Semantic Segmentation,"Diverse Sampling for Self-Supervised Learning of Semantic Segmentation
+Mohammadreza Mostajabi ∗
+Nicholas Kolkin ∗
+Toyota Technological Institute at Chicago
+{mostajabi, nick.kolkin,
+Gregory Shakhnarovich"
+d1c103c63d930d3ae7397618f486117a48e35f16,Does gaze direction modulate facial expression processing in children with autism spectrum disorder?,"BIROn - Birkbeck Institutional Research Online
+Enabling open access to Birkbeck’s published research output
+Does gaze direction modulate facial expression
+processing in children with autism spectrum disorder?
+Journal Article
+http://eprints.bbk.ac.uk/2561
+Version: Accepted (Refereed)
+Citation:
+© 2009 Wiley Blackwell
+Publisher version
+______________________________________________________________
+All articles available through Birkbeck ePrints are protected by intellectual property law, including
+opyright law. Any use made of the contents should comply with the relevant law.
+______________________________________________________________
+Akechi, H.; Senju, A.; Kikuchi, Y.; Tojo, Y.; Osanai, H.; Hasegawa, T.
+(2009)
+Does gaze direction modulate facial expression processing in children
+with autism spectrum disorder?
+Deposit Guide
+Contact:"
+d1f58798db460996501f224fff6cceada08f59f9,Transferrable Representations for Visual Recognition,"Transferrable Representations for Visual Recognition
+Jeffrey Donahue
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2017-106
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2017/EECS-2017-106.html
+May 14, 2017"
+d16c8ac2d194a6e862be0d1c4edf1ca2cdf5dc18,Robust Subspace Approaches to Visual Learning and Recognition DOCTORAL,"Univerza v Ljubljani
+Fakulteta za raˇcunalniˇstvo in informatiko
+Danijel Skoˇcaj
+Robustni pristopi k vizualnemu uˇcenju
+in razpoznavanju na osnovi podprostorov
+DOKTORSKA DISERTACIJA
+Ljubljana, 2003
+Mentor: prof. dr. Aleˇs Leonardis"
+d1c091bf9402f1caf13892a3fae39326507401be,Speeding up Semantic Segmentation for Autonomous Driving,"Speeding up Semantic Segmentation for Autonomous
+Driving
+Michael Treml ∗1, José Arjona-Medina∗1, Thomas Unterthiner∗1,
+Rupesh Durgesh2, Felix Friedmann2, Peter Schuberth2,
+Andreas Mayr1, Martin Heusel1, Markus Hofmarcher1, Michael Widrich1,
+Bernhard Nessler1, Sepp Hochreiter1
+Institute of Bioinformatics, Johannes Kepler University Linz, Austria
+Audi Electronics Venture GmbH, Germany
+{treml, arjona, unterthiner, nessler,
+{rupesh.durgesh, felix.friedmann,"
+d102f18d319d9545588075010f5d10b1ff77f967,Effects of Degradations on Deep Neural Network Architectures,"Effects of Degradations on Deep Neural Network
+Architectures
+Prasun Roy∗, Subhankar Ghosh∗, Saumik Bhattacharya∗ and Umapada Pal
+Indian Statistical Institute Kolkata, India - 700108"
+d170adb2c508edaedb731ada8cb995172a839a1f,Cascade of Boolean detector combinations,"Mahkonen et al. EURASIP Journal on Image and Video
+Processing (2018) 2018:61
+https://doi.org/10.1186/s13640-018-0303-9
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+Cascade of Boolean detector
+ombinations
+Katariina Mahkonen*
+, Tuomas Virtanen and Joni Kämäräinen"
+d1d4c49e764a200bc90113b0ba9c34664d0f9462,"Memo No . 082 May 10 , 2018 Scene Graph Parsing as Dependency Parsing","CBMM Memo No. 082
+May 10, 2018
+Scene Graph Parsing as Dependency Parsing
+Yu-Siang Wang1, Chenxi Liu2, Xiaohui Zeng3, Alan Yuille2
+: National Taiwan University
+: Johns Hopkins University
+: Hong Kong University of Science and Technology"
+d1a43737ca8be02d65684cf64ab2331f66947207,IJB – S : IARPA Janus Surveillance Video Benchmark ∗,"IJB–S: IARPA Janus Surveillance Video Benchmark (cid:3)
+Nathan D. Kalka y
+Stephen Elliott z
+Brianna Maze y
+Kaleb Hebert y
+James A. Duncan y
+Julia Bryan z
+Kevin O’Connor z
+Anil K. Jain x"
+d122d66c51606a8157a461b9d7eb8b6af3d819b0,Automated Recognition of Facial Expressions,"Vol-3 Issue-4 2017
+IJARIIE-ISSN(O)-2395-4396
+AUTOMATED RECOGNITION OF FACIAL
+EXPRESSIONS
+Pavan S. Ahire, PG Student, Dept. of Computer Engineering, METs Institute of Engineering,
+Prof. R. P. Dahake, Dept. of Computer Engineering, METs Institute of Engineering,
+Adgoan,Nashik,Maharashtra.
+Adgoan, Nashik, Maharashtra."
+d142e74c6a7457e77237cf2a3ded4e20f8894e1a,Human Emotion Estimation from Eeg and Face Using Statistical Features and Svm,"HUMAN EMOTION ESTIMATION FROM
+EEG AND FACE USING STATISTICAL
+FEATURES AND SVM
+Strahil Sokolov1, Yuliyan Velchev2, Svetla Radeva3 and Dimitar Radev4
+,3Department of Information Technologies,
+University of telecommunications and post, Sofia, Bulgaria
+2,4Department of Telecommunications,
+University of telecommunications and post, Sofia, Bulgaria"
+d1082eff91e8009bf2ce933ac87649c686205195,Pruning of Error Correcting Output Codes by optimization of accuracy–diversity trade off,"(will be inserted by the editor)
+Pruning of Error Correcting Output Codes by
+Optimization of Accuracy-Diversity Trade off
+S¨ureyya ¨Oz¨o˘g¨ur Aky¨uz · Terry
+Windeatt · Raymond Smith
+Received: date / Accepted: date"
+d1bfb6a9182e5712d8aef46b2fe93ef4ad4fe705,Local Color Contrastive Descriptor for Image Classification,"Local Color Contrastive Descriptor for Image
+Classification
+Sheng Guo, Student Member, IEEE, Weilin Huang, Member, IEEE, and Yu Qiao, Senior Member, IEEE"
+d1c0592f4f9f0ff2e14e0591d87539e5141b7361,Mobile Emotion Recognition Engine,"Mobile Emotion Recognition Engine
+Alberto Scicali1"
+d138270d3c06e85fa2c3da6f953818da4b72313a,An Analytical Framework for Estimating Scale-Out and Scale-Up Power Efficiency of Heterogeneous Manycores,"An Analytical Framework for Estimating
+Scale-Out and Scale-Up Power Efficiency
+of Heterogeneous Manycores
+Jun Ma, Guihai Yan, Member, IEEE, Yinhe Han, Member, IEEE, and Xiaowei Li, Senior Member, IEEE"
+d1d6f1d64a04af9c2e1bdd74e72bd3ffac329576,Neural Face Editing with Intrinsic Image Disentangling,"Neural Face Editing with Intrinsic Image Disentangling
+Zhixin Shu1 Ersin Yumer2 Sunil Hadap2 Kalyan Sunkavalli2 Eli Shechtman 2 Dimitris Samaras1,3
+Stony Brook University 2Adobe Research 3 CentraleSup´elec, Universit´e Paris-Saclay"
+d1dc5a8b4d13d2c51eec7bcb29d08f471d3b65dc,Adversarially Occluded Samples for Person Re-identification ( Supplementary Material ) 1 . Improvement of Ranking Results,"Adversarially Occluded Samples for Person Re-identification
+Houjing Huang 1
+Dangwei Li 1
+Zhang Zhang 1
+Xiaotang Chen 1
+Kaiqi Huang 1
+CRIPAC & NLPR, CASIA 2 University of Chinese Academy of Sciences
+CAS Center for Excellence in Brain Science and Intelligence Technology
+{houjing.huang, dangwei.li, zzhang, xtchen,"
+d198b5bc5eae22f7a788729c0ea15b6b60b62f36,Transfer Learning for Estimating Causal Effects using Neural Networks,"Transfer Learning for Estimating Causal Effects
+using Neural Networks
+Sören R. Künzel∗
+UC Berkeley
+Varsha Ramakrishnan
+UC Berkeley
+Bradly C. Stadie∗
+UC Berkeley
+Nikita Vemuri
+UC Berkeley
+Jasjeet S. Sekhon
+UC Berkeley
+Pieter Abbeel
+UC Berkeley"
+d6dab84451254d7fbb5b9e1d40a7d2a92dec13b3,Enhanced Local Binary Patterns for Automatic Face Recognition,"ENHANCED LOCAL BINARY PATTERNS FOR AUTOMATIC FACE RECOGNITION
+Pavel Kr´al1
+, Anton´ın Vrba1
+Dept. of Computer Science & Engineering 2New Technologies for the Information Society
+Faculty of Applied Sciences
+University of West Bohemia
+Plzeˇn, Czech Republic
+Faculty of Applied Sciences
+University of West Bohemia
+Plzeˇn, Czech Republic"
+d6255a0db6f8f157c5c901d758c7a5f36416ab51,Face Recognition Using Gabor Wavelet Transform,"FACE RECOGNITION USING GABOR WAVELET TRANSFORM
+A THESIS SUBMITTED TO
+THE GRADUATE SCHOOL OF NATURAL SCIENCES
+THE MIDDLE EAST TECHNICAL UNIVERSITY
+BURCU KEPENEKCI
+IN PARTIAL FULLFILMENT OF THE REQUIREMENTS FOR THE DEGREE
+MASTER OF SCIENCE
+THE DEPARTMENT OF ELECTRICAL AND ELECTRONICS ENGINEERING
+SEPTEMBER 2001"
+d69df51cff3d6b9b0625acdcbea27cd2bbf4b9c0,Robust Remote Heart Rate Determination for E-Rehabilitation - A Method that Overcomes Motion and Intensity Artefacts,
+d64b24e9b01f4681d92fc29f36e46d94db7b8bb0,Avoiding Extraverts: Pathogen Concern Downregulates Preferences for Extraverted Faces,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/305793723
+Avoiding Extraverts: Pathogen Concern
+Downregulates Preferences for Extraverted
+Faces
+Article · August 2016
+DOI: 10.1007/s40806-016-0064-6
+CITATIONS
+authors, including:
+Mitch Brown
+University of Southern Mississippi
+6 PUBLICATIONS 5 CITATIONS
+SEE PROFILE
+READS
+Some of the authors of this publication are also working on these related projects:
+Limbal Rings View project
+Morality and Mate Preferences View project
+All content following this page was uploaded by Mitch Brown on 06 December 2016.
+The user has requested enhancement of the downloaded file. All in-text references underlined in blue are added to the original document
+nd are linked to publications on ResearchGate, letting you access and read them immediately."
+d660abfbe5f84c1c49f1e7174eb166b8b23e53c4,"AMIGOS: A dataset for Mood, personality and affect research on Individuals and GrOupS","AMIGOS: A dataset for Mood, personality and
+ffect research on Individuals and GrOupS
+Nicu Sebe, Senior Member, IEEE, and Ioannis Patras, Senior Member, IEEE"
+d689cdb4e535be040316722229e6362de6617f9e,Geometric Deep Particle Filter for Motorcycle Tracking: Development of Intelligent Traffic System in Jakarta,"INTERNATIONAL JOURNAL ON SMART SENSING AND INTELLIGENT SYSTEMS VOL. 8, NO. 1, MARCH 2015
+GEOMETRIC DEEP PARTICLE FILTER FOR MOTORCYCLE
+TRACKING: DEVELOPMENT OF INTELLIGENT TRAFFIC
+SYSTEM IN JAKARTA
+Alexander A S Gunawan1, Wisnu Jatmiko2
+Bina Nusantara University, Mathematics Department,
+School of Computer Science, Jakarta, Indonesia
+Faculty of Computer Science,Universitas Indonesia, Depok, Indonesia
+Submitted: Oct. 4, 2014 Accepted: Jan. 20, 2015 Published: Mar. 1, 2015"
+d61578468d267c2d50672077918c1cda9b91429b,Face Image Retrieval Using Pose Specific Set Sparse Feature Representation,"Abdul Afeef N et al, International Journal of Computer Science and Mobile Computing, Vol.3 Issue.9, September- 2014, pg. 314-323
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 3, Issue. 9, September 2014, pg.314 – 323
+RESEARCH ARTICLE
+Face Image Retrieval Using Pose Specific
+Set Sparse Feature Representation
+Department of Computer Science, Viswajyothi College of Engineering and Technology Kerala, India
+Assistant Professor of Computer Science, Viswajyothi College of Engineering and Technology Kerala, India
+Abdul Afeef N1, Sebastian George2"
+d6eda0c16d226976506396653d14044c185eaf3e,Toward Multimodal Image-to-Image Translation,"Toward Multimodal Image-to-Image Translation
+Jun-Yan Zhu
+UC Berkeley
+Richard Zhang
+UC Berkeley
+Deepak Pathak
+UC Berkeley
+Trevor Darrell
+UC Berkeley
+Alexei A. Efros
+UC Berkeley
+Oliver Wang
+Adobe Research
+Eli Shechtman
+Adobe Research"
+d687fa99586a9ad229284229f20a157ba2d41aea,Face Recognition Based on Wavelet Packet Coefficients and Radial Basis Function Neural Networks,"Journal of Intelligent Learning Systems and Applications, 2013, 5, 115-122
+http://dx.doi.org/10.4236/jilsa.2013.52013 Published Online May 2013 (http://www.scirp.org/journal/jilsa)
+Face Recognition Based on Wavelet Packet Coefficients
+nd Radial Basis Function Neural Networks
+Thangairulappan Kathirvalavakumar1*, Jeyasingh Jebakumari Beulah Vasanthi2
+Department of Computer Science, Virudhunagar Hindu Nadars’ Senthikumara Nadar College, Virudhunagar, India; 2Department of
+Computer Applications, Ayya Nadar Janaki Ammal College, Sivakasi, India.
+Email:
+Received December 12th, 2012; revised April 19th, 2013; accepted April 26th, 2013
+Copyright © 2013 Thangairulappan Kathirvalavakumar, Jeyasingh Jebakumari Beulah Vasanthi. This is an open access article dis-
+tributed under the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any me-
+dium, provided the original work is properly cited."
+d69e644016042d1032995bc9f51e2d72a1c1cd93,Beyond Trees: Adopting MITI to Learn Rules and Ensemble Classifiers for Multi-Instance Data,"Beyond Trees: Adopting MITI to Learn Rules
+nd Ensemble Classifiers for Multi-instance Data
+Luke Bjerring and Eibe Frank
+Department of Computer Science, University of Waikato"
+d6efd1b7b39d91b067488e0c4bf800ce3e3704d8,Visual Analysis of Pedestrian Motion,"Visual Analysis of Pedestrian Motion
+PRS Transfer Report
+Supervised by Dr Ian Reid
+David Ellis
+St John’s College
+Robotics Research Group
+Department of Engineering Science
+Michaelmas 2009"
+d6a9ea9b40a7377c91c705f4c7f206a669a9eea2,Visual Representations for Fine-grained Categorization,"Visual Representations for Fine-grained
+Categorization
+Ning Zhang
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2015-244
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2015/EECS-2015-244.html
+December 17, 2015"
+d671a210990f67eba9b2d3dda8c2cb91575b4a7a,Social Environment Description from Data Collected with a Wearable Device,"Journal of Machine Learning Research ()
+Submitted ; Published
+Social Environment Description from Data Collected with a
+Wearable Device
+Pierluigi Casale
+Computer Vision Center
+Autonomous University of Barcelona
+Barcelona, Spain
+Editor: Radeva Petia, Pujol Oriol"
+d665213b59f2460faf171d3b03ecd9c96d606883,A Multimodal Nonverbal Human-robot Communication System,"VI International Conference on Computational Bioengineering
+ICCB 2015
+M. Cerrolaza and S.Oller (Eds)
+A MULTIMODAL NONVERBAL HUMAN-ROBOT COMMUNICATION
+SYSTEM
+S. SALEH†*, M. SAHU†, Z. ZAFAR† AND K. BERNS†
+Robotics Research Lab. - Dept. of Computer Science
+University of Kaiserslautern
+Kaiserslautern, Germany
+web page: http://agrosy.cs.uni-kl.de
+e-mail: {saleh, sahu, zafar,
+* Dept. of Computer Science, University of Basrah
+Basrah, Iraq
+Key words: HRI, Facial Expression Recognition, Nonverbal Communication"
+d6683c74c17d4fcc48ce3d9df9df6aea38fd4923,Learning Instance Weights in Multi-Instance Learning,"Learning Instance Weights in
+Multi-Instance Learning
+James Foulds
+This thesis is submitted in partial fulfillment of
+the requirements for the degree of
+Master of Science
+t the
+University of Waikato.
+Department of Computer Science
+Hamilton, New Zealand
+February 2007 - February 2008
+(cid:13) 2008 James Foulds"
+d65b82b862cf1dbba3dee6541358f69849004f30,2.5D Elastic graph matching,"Contents lists available at ScienceDirect
+j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / c v i u
+.5D Elastic graph matching
+Stefanos Zafeiriou
+, Maria Petrou
+Imperial College, Department of Electrical and Electronic Engineering, London, UK
+r t i c l e
+i n f o
+b s t r a c t
+Article history:
+Received 29 November 2009
+Accepted 1 December 2010
+Available online 17 March 2011
+Keywords:
+Elastic graph matching
+D face recognition
+Multiscale mathematical morphology
+Geodesic distances
+In this paper, we propose novel elastic graph matching (EGM) algorithms for face recognition assisted by
+the availability of 3D facial geometry. More specifically, we conceptually extend the EGM algorithm in"
+d6102a7ddb19a185019fd2112d2f29d9258f6dec,Fashion Style Generator,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+GeneratorPatch……Global+…lstyle(2)lstyle(1)lcontent(1)lcontent(2)φθϕsϕcDiscriminatorDGXX(1)X(2)(a) Framework of the training stage(b) Examples of fashion style generationFigure1:Fashionstylegeneratorframeworkoverview.TheinputXconsistsofasetofclothingpatchesX(1)andfullclothingimagesX(2).Thesystemconsistsoftwocomponents:animagetransfor-mationnetworkGservedasfashionstylegenerator,andadiscrimi-natornetworkDcalculatesbothglobalandpatchbasedcontentandstylelosses.Gisaconvolutionalencoderdecodernetworkparam-eterizedbyweights(cid:18).Sixgeneratedshirtswithdifferentstylesbyourmethodareshownasexamples.(Wehighlyrecommendtozoominallthefigureswithcolorversionformoredetails.)recentneuralstyletransferworks[Gatysetal.,2015].Tak-ingVanGogh’s“StarryNight”astheexamplestyleimage,styleisbetweenthelow-levelcolor/texture(e.g.,blueandyellowcolor,roughorsmoothertexture)andthehigh-levelobjects(e.g.,houseandmountain).“Style”isarelativelyab-stractconcept.Fashionstylegenerationhasatleasttwoprac-ticalusages.Designerscouldquicklyseehowtheclothinglookslikeinagivenstyletofacilitatethedesignprocessing.Shopperscouldsynthesizetheclothingimagewiththeidealstyleandapplyclothingretrievaltools[Jiangetal.,2016b]tosearchthesimilaritems.Fashionstylegenerationisrelatedtoexistingneuralstyletransferworks[Gatysetal.,2015;LiandWand,2016a;EfrosandFreeman,2001],buthasitsownchallenges.Infashionstylegeneration,thesyntheticclothingimageshould"
+d6bfa9026a563ca109d088bdb0252ccf33b76bc6,Unsupervised Temporal Segmentation of Facial Behaviour,"Unsupervised Temporal Segmentation of Facial Behaviour
+Abhishek Kar
+Advisors: Dr. Amitabha Mukerjee & Dr. Prithwijit Guha
+Department of Computer Science and Engineering, IIT Kanpur"
+d6adb54f5d25dda71d157b5d574c70c732fdd722,Feature Map Filtering: Improving Visual Place Recognition with Convolutional Calibration,"Pre-print of article that will appear in Proceedings of the Australasian Conference on Robotics and Automation
+018.
+Please cite this paper as:
+Stephen Hausler, Adam Jacobson, and Michael Milford. Feature Map Filtering: Improving Visual Place Recognition
+with Convolutional Calibration. Proceedings of Australasian Conference on Robotics and Automation, 2018.
+ibtex:
+uthor = {Hausler, Stephen and Jacobson, Adam and Milford, Michael},
+title = {Feature Map Filtering: Improving Visual Place Recognition with Convolutional Calibration},
+ooktitle = {Proceedings of Australasian Conference on Robotics and Automation (ACRA)},
+year = {2018},"
+d6dfe23018172d29c36746d24f73bf86e1aaa0a6,Searching Scenes by Abstracting Things,
+d65bcbcddec932480c434f0ffa778e429cdd4ee7,Periocular biometrics: When iris recognition fails,"Periocular Biometrics: When Iris Recognition Fails
+Samarth Bharadwaj, Himanshu S. Bhatt, Mayank Vatsa and Richa Singh"
+d6c7092111a8619ed7a6b01b00c5f75949f137bf,A Novel Feature Extraction Technique for Facial Expression Recognition,"A Novel Feature Extraction Technique for Facial Expression
+Recognition
+*Mohammad Shahidul Islam1, Surapong Auwatanamongkol2
+1 Department of Computer Science, School of Applied Statistics,
+National Institute of Development Administration,
+Bangkok, 10240, Thailand
+Department of Computer Science, School of Applied Statistics,
+National Institute of Development Administration,
+Bangkok, 10240, Thailand"
+d6ceebb0cde7fb0fbe916472d7b613a2d7d2e1e6,Do faces capture the attention of individuals with Williams syndrome or autism? Evidence from tracking eye movements.,"Do faces capture the attention of individuals with Williams syndrome
+or Autism? Evidence from tracking eye movements
+Deborah M Riby & Peter J B Hancock
+http://dx.doi.org/10.1007/s10803-008-0641-z"
+d65f11b44180d9997ad5ba6e6970fe4874891f4f,Unobtrusive emotion sensing and interpretation in smart environment,"Journal of Ambient Intelligence and Smart Environments 7 (2015) 59–83
+DOI 10.3233/AIS-140298
+IOS Press
+Unobtrusive emotion sensing and
+interpretation in smart environment
+Oleg Starostenko *, Ximena Cortés, J. Afredo Sánchez and Vicente Alarcon-Aquino
+Department of Computing, Electronics and Mechatronics, Universidad de las Americas Puebla, Cholula,
+Pue. 72810, Mexico"
+d6b514a68abff3ab14af9fc0152cd5b28bd0192c,Instance Segmentation by Deep Coloring,"JULY 2018
+Instance Segmentation by Deep Coloring
+Victor Kulikov, Victor Yurchenko, and Victor Lempitsky"
+d64c362b631f0c94b22952e2d0860054f0854358,Offline Handwritten Devanagari Numeral Recognition Using Artificial Neural Network,"International Journals of Advanced Research in
+Computer Science and Software Engineering
+ISSN: 2277-128X (Volume-7, Issue-8)
+Research Article
+August
+Offline Handwritten Devanagari Numeral Recognition
+Using Artificial Neural Network
+P E Ajmire
+Associate Professor & Head, Department of Computer Science & Application, G. S. Science, Arts & Commerce
+College, Khamgaon, Maharashtra, India
+DOI: 10.23956/ijarcsse/V7I7/0157"
+d623428f02e80a689eb58d022237daeae2ae7b9c,Guided depth upsampling for precise mapping of urban environments,"Guided Depth Upsampling for Precise Mapping of Urban Environments
+Sascha Wirges1, Bj¨orn Roxin2 , Eike Rehder2, Tilman K¨uhner1 and Martin Lauer2"
+d680cfe583fe61e49656cc7b9dbd480c6159cf0b,Pedestrian Detection in Far-Infrared Daytime Images Using a Hierarchical Codebook of SURF,"Sensors 2015, 15, 8570-8594; doi:10.3390/s150408570
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Pedestrian Detection in Far-Infrared Daytime Images Using a
+Hierarchical Codebook of SURF
+Bassem Besbes 1, Alexandrina Rogozan 2,*, Adela-Maria Rus 2,3,*, Abdelaziz Bensrhair 2
+nd Alberto Broggi 4
+Diotasoft, 15 Boulevard Emile Baudot, Massy 91300, France; E-Mail:
+LITIS Laboratory, National Institute of Applied Sciences, 76801 Saint-Etienne-du-Rouvray Cedex,
+France; E-Mail:
+Faculty of Computer Science, Babes-Bolyai University, Kogalniceanu no.1,
+Cluj-Napoca RO-400084, Romania
+Dipartimento di Ingegneria dell’ Informazione, Universita di Parma, Parco Area delle Scienze,
+Parma 181/a 43124, Italy; E-Mail:
+* Authors to whom correspondence should be addressed; E-Mails: (A.R.);
+(A.-M.R.); Tel.: +33-2-3295-6670 (A.R.); +40-2-6440-5300 (A.-M.R.).
+Academic Editor: Felipe Jimenez"
+d69b542b3714b5e90c384d39b5ab0c4bf9dd5375,Geometry and Probability for Motion and Action,"IN PARTNERSHIP WITH:
+Institut polytechnique de
+Grenoble
+Université Pierre Mendes-France
+(Grenoble)
+Université Joseph Fourier
+(Grenoble)
+Activity Report 2012
+Project-Team E-MOTION
+Geometry and Probability for Motion and
+Action
+IN COLLABORATION WITH: Laboratoire d’Informatique de Grenoble (LIG)
+RESEARCH CENTER
+Grenoble - Rhône-Alpes
+THEME
+Robotics"
+d69ef8b5658fabd0ac092fb2bfd0c9c109574dcc,Neural Class-Specific Regression for face verification,"Neural Class-Specific Regression for face
+verification
+Guanqun Cao, Alexandros Iosifidis, Moncef Gabbouj"
+bcee40c25e8819955263b89a433c735f82755a03,Biologically Inspired Vision for Human-Robot Interaction,"Biologically inspired vision for human-robot
+interaction
+M. Saleiro, M. Farrajota, K. Terzi´c, S. Krishna, J.M.F. Rodrigues, and J.M.H.
+du Buf
+Vision Laboratory, LARSyS, University of the Algarve, 8005-139 Faro, Portugal,
+{masaleiro, mafarrajota, kterzic, jrodrig,"
+bcf7fb98ab0137d8a8b8a952819f5e13ec4648aa,Face Recognition with Single Sample per Class Using Cs-lbp and Gabor Filter,"Journal of Theoretical and Applied Information Technology
+31st October 2014. Vol. 68 No.3
+© 2005 - 2014 JATIT & LLS. All rights reserved.
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+FACE RECOGNITION WITH SINGLE SAMPLE PER
+CLASS USING CS-LBP AND GABOR FILTER
+A.USHA RUBY,
+DR.J.GEORGE CHELLIN CHANDRAN
+Research Scholar, Department of CSE, Bharath University
+Principal, CSI College of Engineering, Ketti
+E-mail: ,"
+bc995457cf5f4b2b5ef62106856571588d7d70f2,Comparison of Maximum Likelihood and GAN-based training of Real NVPs,"Comparison of Maximum Likelihood and GAN-based training of Real NVPs
+Ivo Danihelka 1 2 Balaji Lakshminarayanan 1 Benigno Uria 1 Daan Wierstra 1 Peter Dayan 3"
+bc6de183cd8b2baeebafeefcf40be88468b04b74,Age Group Recognition using Human Facial Images,"Age Group Recognition using Human Facial Images
+International Journal of Computer Applications (0975 – 8887)
+Volume 126 – No.13, September 2015
+Shailesh S. Kulkarni
+Dept. of Electronics and Telecommunication
+Government College of Engineering,
+Aurangabad, Maharashtra, India"
+bcf73131c2be397fa2105ac45df3ce1a55c07c2f,Automated markerless extraction of walking people using deformable contour models,"This is a preprint of an article published in Computer Animation and Virtual
+Worlds, 15(3-4):399-406, 2004.
+This journal may be found at:
+http://www.interscience.wiley.com"
+bcf19b964e7d1134d00332cf1acf1ee6184aff00,Trajectory-Set Feature for Action Recognition,"IEICE TRANS. INF. & SYST., VOL.E100–D, NO.8 AUGUST 2017
+LETTER
+Trajectory-Set Feature for Action Recognition
+Kenji MATSUI†, Nonmember, Toru TAMAKI†a), Member, Bisser RAYTCHEV†, Nonmember,
+nd Kazufumi KANEDA†, Member
+SUMMARY We propose a feature for action recognition called
+Trajectory-Set (TS), on top of the improved Dense Trajectory (iDT).
+The TS feature encodes only trajectories around densely sampled inter-
+est points, without any appearance features. Experimental results on the
+UCF50 action dataset demonstrates that TS is comparable to state-of-the-
+rts, and outperforms iDT; the accuracy of 95.0%, compared to 91.7% by
+key words: action recognition, trajectory, improved Dense Trajectory
+the two-stream CNN [2] that uses a single frame and a opti-
+al flow stack. In their paper stacking trajectories was also
+reported but did not perform well, probably the sparseness
+of trajectories does not fit to CNN architectures. In contrast,
+we take a hand-crafted approach that can be fused later with
+CNN outputs.
+Introduction
+Action recognition has been well studied in the computer"
+bc1fa3efa43dfb79f6f8243d29327c8ee06e8a97,Learning object classes with generic knowledge,"ETH Zurich, D-ITET, BIWI
+Technical Report No 275
+Learning object classes with generic knowledge
+Thomas Deselaers, Bogdan Alexe, and Vittorio Ferrari"
+bc843c35530e38396e8ba55b8891dbe8324054a8,Group Visual Sentiment Analysis,"Group Visual Sentiment Analysis
+Zeshan Hussain, Tariq Patanam and Hardie Cate
+June 6, 2016"
+bca09d92a25e5cc96df5c8d2eb87e2854cdc02b1,Pose Invariant 3 D Face Authentication based on Gaussian Fields Approach,"To the Graduate Council:
+I am submitting herewith a thesis written by Venkat Rao Ayyagari entitled “Pose
+Invariant 3D Face Authentication based on Gaussian Fields Approach”. I have examined
+the final electronic copy of this thesis for form and content and recommend that it be
+ccepted in partial fulfillment of the requirements for the degree of Master of Science,
+with a major in Electrical Engineering.
+Mongi A. Abidi
+Major Professor
+We have read this thesis and
+recommend its acceptance:
+Andreas Koschan
+Seong G. Kong
+Accepted for the Council:
+Anne Mayhew
+Vice Chancellor and Dean of
+Graduate Studies
+(Original signatures are on file with official student records.)"
+bcc172a1051be261afacdd5313619881cbe0f676,A fast face clustering method for indexing applications on mobile phones,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+bc7f431c4c5cecfc7bf95b2f0704d81469f23580,An Intelligent Apparel Recommendation System for Online Shopping Using Style Classification,"I J A B E R, Vol. 13, No. 2, (2015): 671-686
+AN INTELLIGENT APPAREL RECOMMENDATION
+SYSTEM FOR ONLINE SHOPPING USING STYLE
+CLASSIFICATION
+C. Perkinian* and P. Vikkraman**"
+bc749f0e81eafe9e32d56336750782f45d82609d,Combination of Texture and Geometric Features for Age Estimation in Face Images,
+bc15e0ebe7ff84e090aa2d74d753d87906d497f7,The Impact of Preprocessing on Deep Representations for Iris Recognition on Unconstrained Environments,"The Impact of Preprocessing on Deep
+Representations for Iris Recognition on
+Unconstrained Environments
+Luiz A. Zanlorensi∗, Eduardo Luz†, Rayson Laroca∗, Alceu S. Britto Jr.‡, Luiz S. Oliveira∗, David Menotti∗
+Department of Informatics, Federal University of Paran´a (UFPR), Curitiba, PR, Brazil
+Computing Department, Federal University of Ouro Preto (UFOP), Ouro Preto, MG, Brazil
+Postgraduate Program in Informatics, Pontifical Catholic University of Paran´a (PUCPR), Curitiba, PR, Brazil"
+bc4e86b6d2d386805466b822a04ea0c015debfff,Robust 3D Face Recognition from Expression Categorisation,"Cook, Jamie A and Cox, Mark and Chandran, Vinod and Sridharan,
+Sridha (2007) Robust 3D Face Recognition from Expression
+Categorisation. In Proceedings International Conference on Biometrics
+642, pages pp. 271-280, Seoul, Korea.
+This is the author-manuscript version of this work - accessed from
+http://eprints.qut.edu.au
+Copyright 2007 Springer"
+bca52740ba679b67a508894e68a0e52f6bf62079,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+bc4537bc5834b41a631d9a807500d199b438fb27,Perceptual Integration Deficits in Autism Spectrum Disorders Are Associated with Reduced Interhemispheric Gamma-Band Coherence.,"6352 • The Journal of Neuroscience, December 16, 2015 • 35(50):16352–16361
+Neurobiology of Disease
+Perceptual Integration Deficits in Autism Spectrum
+Disorders Are Associated with Reduced Interhemispheric
+Gamma-Band Coherence
+Ina Peiker,1* Nicole David,1* X Till R. Schneider,1 Guido Nolte,1 Daniel Scho¨ttle,2 and XAndreas K. Engel1
+Departments of 1Neurophysiology and Pathophysiology and 2Psychiatry and Psychotherapy, University Medical Center Hamburg-Eppendorf, 20246
+Hamburg, Germany
+The integration of visual details into a holistic percept is essential for object recognition. This integration has been reported as a key deficit
+in patients with autism spectrum disorders (ASDs). The weak central coherence account posits an altered disposition to integrate features
+into a coherent whole in ASD. Here, we test the hypothesis that such weak perceptual coherence may be reflected in weak neural coherence
+cross different cortical sites. We recorded magnetoencephalography from 20 adult human participants with ASD and 20 matched
+ontrols, who performed a slit-viewing paradigm, in which objects gradually passed behind a vertical or horizontal slit so that only
+fragments of the object were visible at any given moment. Object recognition thus required perceptual integration over time and, in case
+of the horizontal slit, also across visual hemifields. ASD participants were selectively impaired in the horizontal slit condition, indicating
+specific difficulties in long-range synchronization between the hemispheres. Specifically, the ASD group failed to show condition-related
+enhancement of imaginary coherence between the posterior superior temporal sulci in both hemispheres during horizontal slit-viewing
+in contrast to controls. Moreover, local synchronization reflected in occipitocerebellar beta-band power was selectively reduced for
+horizontal compared with vertical slit-viewing in ASD. Furthermore, we found disturbed connectivity between right posterior superior
+temporal sulcus and left cerebellum. Together, our results suggest that perceptual integration deficits co-occur with specific patterns of"
+bc8e1c2284008319ee325ff7ea19916726235f55,Autonomic responses to social and nonsocial pictures in adolescents with autism spectrum disorder.,"RESEARCH ARTICLE
+Autonomic Responses to Social and Nonsocial Pictures in
+Adolescents With Autism Spectrum Disorder
+Anneke Louwerse, Joke H. M. Tulen, Jos N. van der Geest, Jan van der Ende, Frank C. Verhulst, and
+Kirstin Greaves-Lord
+It remains unclear why individuals with autism spectrum disorder (ASD) tend to respond in an atypical manner in social
+situations. Investigating autonomic and subjective responses to social vs. nonsocial stimuli may help to reveal underlying
+mechanisms of these atypical responses. This study examined autonomic responses (skin conductance level and heart
+rate) and subjective responses to social vs. nonsocial pictures in 37 adolescents with an ASD and 36 typically developing
+(TD) adolescents. Thirty-six pictures from the International Affective Picture System were presented, divided into six
+ategories based on social content (social vs. nonsocial) and pleasantness (pleasant, neutral, and unpleasant). Both in
+dolescents with ASD as well as TD adolescents, pictures with a social content resulted in higher skin conductance
+responses (SCRs) for pleasant and unpleasant pictures than for neutral pictures. No differences in SCRs were found for
+the three nonsocial picture categories. Unpleasant pictures, both with and without a social content, showed more heart
+rate deceleration than neutral pictures. Self-reported arousal ratings were influenced by the social and affective content
+of a picture. No differences were found between individuals with ASD and TD individuals in their autonomic and
+subjective responses to the picture categories. These results suggest that adolescents with ASD do not show atypical
+utonomic or subjective responses to pictures with and without a social content. These findings make it less likely that
+impairments in social information processing in individuals with ASD can be explained by atypical autonomic responses
+to social stimuli. Autism Res 2013, (cid:129)(cid:129): (cid:129)(cid:129)–(cid:129)(cid:129). © 2013 International Society for Autism Research, Wiley Periodicals, Inc."
+bc811a66855aae130ca78cd0016fd820db1603ec,Towards three-dimensional face recognition in the real Huibin,"Towards three-dimensional face recognition in the real
+Huibin Li
+To cite this version:
+Huibin Li. Towards three-dimensional face recognition in the real. Other. Ecole Centrale de
+Lyon, 2013. English. <NNT : 2013ECDL0037>. <tel-00998798>
+HAL Id: tel-00998798
+https://tel.archives-ouvertes.fr/tel-00998798
+Submitted on 2 Jun 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires
+publics ou priv´es."
+bc98027b331c090448492eb9e0b9721e812fac84,"Face Representation Using Combined Method of Gabor Filters, Wavelet Transformation and DCV and Recognition Using RBF","Journal of Intelligent Learning Systems and Applications, 2012, 4, 266-273
+http://dx.doi.org/10.4236/jilsa.2012.44027 Published Online November 2012 (http://www.SciRP.org/journal/jilsa)
+Face Representation Using Combined Method of Gabor
+Filters, Wavelet Transformation and DCV and Recognition
+Using RBF
+Kathirvalavakumar Thangairulappan1*, Jebakumari Beulah Vasanthi Jeyasingh2
+Department of Computer Science, VHNSN College, Virudhunagar, India; 2Department of Computer Applications, ANJA College,
+Sivakasi, India.
+Email:
+Received April 27th, 2012; revised July 19th, 2012; accepted July 26th, 2012"
+bcaa5fab589d95890d539a3119657fa253176f0d,"Evaluating the Efficiency of a Night-Time, Middle-Range Infrared Sensor for Applications in Human Detection and Recognition","THE PROBLEM: MID-RANGE FR AT NIGHT
+No Active Illumination
+NIR Led Illuminator
+Night Time 120 meters
+eters
+Infrared Imaging Systems: Design, Analysis, Modeling, and Testing XXIII, edited by Gerald C. Holst, Keith A. Krapels,
+Proc. of SPIE Vol. 8355, 83551B · © 2012 SPIE · CCC code: 0277-786X/12/$18 · doi: 10.1117/12.917831
+Proc. of SPIE Vol. 8355 83551B-1
+From: http://proceedings.spiedigitallibrary.org/ on 04/30/2013 Terms of Use: http://spiedl.org/terms"
+bc9af4c2c22a82d2c84ef7c7fcc69073c19b30ab,MoCoGAN: Decomposing Motion and Content for Video Generation,"MoCoGAN: Decomposing Motion and Content for Video Generation
+Sergey Tulyakov,
+Snap Research
+Ming-Yu Liu, Xiaodong Yang,
+NVIDIA
+Jan Kautz"
+bcac3a870501c5510df80c2a5631f371f2f6f74a,Structured Face Hallucination,"#1387
+CVPR 2013 Submission #1387. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#1387
+Structured Face Hallucination
+Anonymous CVPR submission
+Paper ID 1387"
+bc4627e1bc3bbe21c46c4011ec4f9bd377ec83a4,Towards recognition of degraded words by probabilistic parsing,"Towards Recognition of Degraded Words by Probabilistic
+Parsing
+Karthika Mohan
+IIIT, Hyderabad
+AP, India 500 032
+K. J. Jinesh
+IIIT, Hyderabad
+AP, India 500 032
+C. V. Jawahar
+IIIT, Hyderabad
+AP, India 500 032"
+ae419d28ab936cbbc420dcfd1decb16a45afc8a9,Real-time face verification using multiple feature combination and a support vector machine supervisor,
+ae8d5be3caea59a21221f02ef04d49a86cb80191,Skip RNN: Learning to Skip State Updates in Recurrent Neural Networks,"Published as a conference paper at ICLR 2018
+SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+RECURRENT NEURAL NETWORKS
+V´ıctor Campos∗†, Brendan Jou‡, Xavier Gir´o-i-Nieto§, Jordi Torres†, Shih-Fu ChangΓ
+Barcelona Supercomputing Center, ‡Google Inc,
+§Universitat Polit`ecnica de Catalunya, ΓColumbia University
+{victor.campos,"
+ae2b2493f35cecf1673eb3913fdce37e037b53a2,Optimal Transport Maps for Distribution Pre- Serving Operations on Latent Spaces of Gener-,"OPTIMAL TRANSPORT MAPS FOR DISTRIBUTION PRE-
+SERVING OPERATIONS ON LATENT SPACES OF GENER-
+ATIVE MODELS
+Eirikur Agustsson
+D-ITET, ETH Zurich
+Switzerland
+Alexander Sage
+D-ITET, ETH Zurich
+Switzerland
+Radu Timofte
+D-ITET, ETH Zurich
+Merantix GmbH
+Luc Van Gool
+D-ITET, ETH Zurich
+ESAT, KU Leuven"
+aeee98c90799cd44dde4046754cff27c8ed28d44,Deep convolutional neural networks for brain image analysis on magnetic resonance imaging: a review,"Deep convolutional neural networks for brain image analysis on magnetic
+resonance imaging: a review
+Jose Bernal∗, Kaisar Kushibar, Daniel S. Asfaw, Sergi Valverde, Arnau Oliver, Robert Mart´ı, Xavier Llad´o
+Computer Vision and Robotics Institute
+Dept. of Computer Architecture and Technology
+University of Girona
+Ed. P-IV, Av. Lluis Santal´o s/n, 17003 Girona (Spain)"
+aeee02b8c8bb749a1203fa634407319dd6874667,VIDEO-SURVEILLANCE IN CLOUD Platform and software aaS for people detection and soft-biometry,"VIDEO-SURVEILLANCE IN CLOUD
+Platform and software aaS for people detection and soft-
+iometry
+R. Cucchiara°,*, A. Prati°,+, R. Vezzani°,*, S. Calderara°,*, C. Grana°,*
+°SOFTECH-ICT, *Università di Modena e Reggio Emilia, +Università IUAV di Venezia"
+aed5b3b976077ecdcf3f88ffc511f63d9f9e8697,"A Qualitative Comparison of CoQA, SQuAD 2.0 and QuAC","A Qualitative Comparison of CoQA, SQuAD 2.0 and QuAC
+Mark Yatskar
+Allen Institute for Artificial Intelligence"
+aeabcbdff7ab810b961a9f7e4399b6c0421d00cd,TrafficPredict: Trajectory Prediction for Heterogeneous Traffic-Agents,"TrafficPredict: Trajectory Prediction for Heterogeneous Traffic-Agents
+Yuexin Ma1,2, Xinge Zhu3, Sibo Zhang1, Ruigang Yang1, Wenping Wang2, Dinesh Manocha4
+Baidu Research, Baidu Inc.1, The University of Hong Kong2,
+The Chinese University of Hong Kong3, University of Maryland at College Park4"
+ae0514be12d200bd9fecf0d834bdcb30288c7a1e,Automatic Opinion Question Generation,"Automatic Opinion Question Generation
+Yllias Chali
+University of Lethbridge
+401 University Drive
+Lethbridge, Alberta, T1K 3M4
+Tina Baghaee
+University of Lethbridge
+401 University Drive
+Lethbridge, Alberta, T1K 3M4"
+ae2cf545565c157813798910401e1da5dc8a6199,Cascade of Boolean detector combinations,"Mahkonen et al. EURASIP Journal on Image and Video
+Processing (2018) 2018:61
+https://doi.org/10.1186/s13640-018-0303-9
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+Cascade of Boolean detector
+ombinations
+Katariina Mahkonen*
+, Tuomas Virtanen and Joni Kämäräinen"
+ae818858a88299090748446b8662e68628612c65,Analysis of Expressiveness of Portuguese Sign Language Speakers,"FACULDADE DE ENGENHARIA DA UNIVERSIDADE DO PORTO
+Analysis of Expressiveness of
+Portuguese Sign Language Speakers
+Maria Inês Coutinho Vigário Rodrigues
+MASTER THESIS
+Integrated Master in Bioengineering
+Supervisor: Luis Filipe Pinto de Almeida Teixeira (PhD)
+Co-supervisor: Eduardo José Marques Pereira (Eng.)
+June 2014"
+aebb9649bc38e878baef082b518fa68f5cda23a5,A Multi - scale TVQI - based Illumination Normalization Model,
+ae299fad29ba650fbf1e14c7c95ba8ae32e095f0,Person Re-Identification by Robust Canonical Correlation Analysis,"Person Re-Identification by Robust
+Canonical Correlation Analysis
+Le An, Songfan Yang, Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+ae9ab89c51d264fb7b6b57d37399a7c629836e35,Obtaining Better Image Representations by Combining Complementary Activation Features of Multiple ConvNet Layers for Transfer Learning,"Obtaining Better Image Representations by
+Combining Complementary Activation Features of
+Multiple ConvNet Layers for Transfer Learning
+Jumabek Alikhanov
+School of Computer and
+Information Engineering
+Seunghyun Ko
+School of Computer and
+Information Engineering
+Jo Geun Sik
+School of Computer and
+Information Engineering
+Inha University Incheon, South Korea
+Inha University Incheon, South Korea
+Inha University Incheon, South Korea
+Email:
+Email:
+Email:"
+ae5195c44ef7bff090bb5a17a9fe5f86a8c3b316,Web Scale Image Annotation: Learning to Rank with Joint Word-Image Embeddings,"Web Scale Image Annotation: Learning to Rank with Joint
+Word-Image Embeddings"
+aeeea6eec2f063c006c13be865cec0c350244e5b,"Induced Disgust, Happiness and Surprise: an Addition to the MMI Facial Expression Database","Induced Disgust, Happiness and Surprise: an Addition to the MMI Facial
+Expression Database
+Michel F. Valstar, Maja Pantic
+Imperial College London / Twente University
+Department of Computing / EEMCS
+80 Queen’s Gate / Drienerlolaan 5
+London / Twente"
+ae13485e75f5e7fc9a9659ce960c8b299c7b889b,Sparse Modeling for High - Dimensional Multi - Manifold Data Analysis,"SPARSE MODELING FOR HIGH-DIMENSIONAL
+MULTI-MANIFOLD DATA ANALYSIS
+Ehsan Elhamifar
+A dissertation submitted to The Johns Hopkins University in conformity with the
+requirements for the degree of Doctor of Philosophy.
+Baltimore, Maryland
+October, 2012
+(cid:13) Ehsan Elhamifar 2012
+All rights reserved"
+ae8ed3b0b8043c5af76390751938edfd100fa9cd,An Overview of MultiTask Learning in Deep Neural Networks,"of 21
+9 May 2017
+An Overview of Multi-Task Learning in Deep
+Neural Networks 
+Table of contents:
+Introduction
+Motivation
+Two MTL methods for Deep Learning
+Hard parameter sharing
+Soft parameter sharing
+Why does MTL work?
+Implicit data augmentation
+Attention focusing
+Eavesdropping
+Representation bias
+Regularization
+MTL in non-neural models
+Block-sparse regularization
+http://sebastianruder.com/multi-task/index.html
+5/31/17, 9:38 AM"
+ae9257f3be9f815db8d72819332372ac59c1316b,Deciphering the enigmatic face: the importance of facial dynamics in interpreting subtle facial expressions.,"P SY CH O L O GIC AL SC I E NC E
+Research Article
+Deciphering the Enigmatic Face
+The Importance of Facial Dynamics in Interpreting Subtle
+Facial Expressions
+Zara Ambadar,1 Jonathan W. Schooler,2 and Jeffrey F. Cohn1
+University of Pittsburgh and 2University of British Columbia, Vancouver, British Columbia, Canada"
+ae33dc04adcb83a486517c48078cdd4af7dcc7c7,The adaptative local Hausdorff-distance map as a new dissimilarity measure,"The adaptative local Hausdorff-distance map
+s a new dissimilarity measure
+´Etienne Baudrier∗, Gilles Millon, Fr´ed´eric Nicolier, Su Ruan
+Centre de Recherche en STIC (CReSTIC)
+IUT de Troyes, 9, rue de Qu´ebec, 10026 TROYES CEDEX, FRANCE
+{e.baudrier, g.millon, f.nicolier,"
+ae89b7748d25878c4dc17bdaa39dd63e9d442a0d,On evaluating face tracks in movies,"On evaluating face tracks in movies
+Alexey Ozerov, Jean-Ronan Vigouroux, Louis Chevallier, Patrick Pérez
+To cite this version:
+Alexey Ozerov, Jean-Ronan Vigouroux, Louis Chevallier, Patrick Pérez. On evaluating face tracks
+in movies. IEEE International Conference on Image Processing (ICIP 2013), Sep 2013, Melbourne,
+Australia. 2013. <hal-00870059>
+HAL Id: hal-00870059
+https://hal.inria.fr/hal-00870059
+Submitted on 4 Oct 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+aeff403079022683b233decda556a6aee3225065,DeepFace: Face Generation using Deep Learning,"DeepFace: Face Generation using Deep Learning
+Hardie Cate
+Fahim Dalvi
+Zeshan Hussain"
+ae753fd46a744725424690d22d0d00fb05e53350,Describing Clothing by Semantic Attributes,"Describing Clothing by Semantic Attributes
+Anonymous ECCV submission
+Paper ID 727"
+ae0a0ee1c6e2adcddffebf9b0e429a25b7d9c0e1,"A Review and Analysis of Eye-Gaze Estimation Systems, Algorithms and Performance Evaluation Methods in Consumer Platforms","A Review and Analysis of Eye-Gaze Estimation
+Systems, Algorithms and Performance
+Evaluation Methods in Consumer Platforms
+Anuradha Kar, Student Member, IEEE, Peter Corcoran Fellow, IEEE"
+aeec61ef41d55b5c1becfdc00c2e4dbca0e379c0,Automatic Recognition by Gait,"I N V I T E D
+P A P E R
+Automatic Recognition by Gait
+Recognizing people by the way they walk promises to be useful for identifying
+individuals from a distance; improved techniques are under development.
+By Mark S. Nixon, Member IEEE, and John N. Carter, Member IEEE"
+ae8cc8db9e05c79adad03da64a4a9ba0b00f4eb5,Large Scale Local Online Similarity/Distance Learning Framework based on Passive/Aggressive,"International Journal of Machine Learning and Cybernetics
+DOI –x
+ORI GI NAL ARTI CLE
+Large Scale Local Online Similarity/Distance Learning Framework based on
+Passive/Aggressive
+Baida Hamdan1, Davood Zabihzadeh*1, Monsefi Reza1
+Computer Department, Engineering Faculty, Ferdowsi University of Mashhad (FUM), Mashhad, IRAN
+* Corresponding Author"
+ae85c822c6aec8b0f67762c625a73a5d08f5060d,Retrieving Similar Styles to Parse Clothing,"This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+The final version of record is available at http://dx.doi.org/10.1109/TPAMI.2014.2353624
+IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. M, NO. N, MONTH YEAR
+Retrieving Similar Styles to Parse Clothing
+Kota Yamaguchi, Member, IEEE, M. Hadi Kiapour, Student Member, IEEE,
+Luis E. Ortiz, and Tamara L. Berg, Member, IEEE"
+aed5aecd3f0a07036e570c84c06cd37ab8904acc,The Resiliency of Memorability: A Predictor of Memory Separate from Attention and Priming,"The Resiliency of Memorability: A Predictor of Memory
+Separate from Attention and Priming
+Wilma A. Bainbridge
+Department of Brain and Cognitive Sciences, Massachusetts Institute of Technology. Cambridge, MA. USA.
+Keywords: Memorability, top-down attention, bottom-up attention, priming, visual search,
+spatial cueing, directed forgetting, depth of encoding"
+ae87896c38f1871457d811a0588487db0155a833,Attentional allocation of ASD individuals : Searching for a Face - in - the - Crowd,"Attentional allocation of ASD individuals: Searching for a Face-in-the-Crowd
+David J. Moore, John Reidy and Lisa Heavey
+Department of Psychology, Sociology and Politics,
+Sheffield Hallam University
+Running Header: Attentional allocation of ASD individuals"
+aef3ecc926ed79478f9d1f38c0fec2a29bae9c3b,Counting in High Density Crowd Videos,"Counting in High Density Crowd Videos
+Edgar Lopez
+University of Texas at El Paso"
+aee90db1f66b77113b0a62701deb01ca96b6d9e6,"Discriminant Saliency, the Detection of Suspicious Coincidences, and Applications to Visual Recognition","JUNE 2009
+Discriminant Saliency, the Detection
+of Suspicious Coincidences,
+nd Applications to Visual Recognition
+Dashan Gao, Member, IEEE, Sunhyoung Han, Student Member, IEEE, and
+Nuno Vasconcelos, Senior Member, IEEE"
+d88e3d5ca820cb240de4b662f0a6fd1172a678c7,Image Quality-based Adaptive Illumination Normalisation for Face Recognition,"Harin Sellahewa and Sabah A. Jassim, ""Image quality-based adaptive illumination normalisation for face recognition"",
+Proc. SPIE 7306, Optics and Photonics in Global Homeland Security V and Biometric Technology for Human
+Identification VI, 73061V (May 05, 2009); doi:10.1117/12.819087; http://dx.doi.org/10.1117/12.819087
+Copyright 2009 Society of Photo Optical Instrumentation Engineers. One print or electronic copy may be made for
+personal use only. Systematic reproduction and distribution, duplication of any material in this paper for a fee or for
+ommercial purposes, or modification of the content of the paper are prohibited.” (http://spie.org/x1125.xml)"
+d84263e22c7535cb1a2a72c88780d5a407bd9673,Stability of Scattering Decoder For Nonlinear Diffractive Imaging,"Stability of Scattering Decoder for Nonlinear Diffractive Imaging
+Yu Sun1 and Ulugbek S. Kamilov1,2
+Department of Computer Science & Engineering, Washington University in St Louis.
+Department of Electrical & Systems Engineering, Washington University in St. Louis"
+d80564cea654d11b52c0008891a0fd2988112049,Semi-supervised Conditional GANs,"Semi-supervised Conditional GANs
+Kumar Sricharan∗1, Raja Bala1, Matthew Shreve1,
+Hui Ding1, Kumar Saketh2, and Jin Sun1
+Interactive and Analytics Lab, Palo Alto Research Center, Palo Alto, CA
+Verizon Labs, Palo Alto, CA
+August 22, 2017"
+d827c72d6c9e35066b40bd205bbd71ce487a1c39,Ensemble of Face/eye Detectors for Accurate Automatic Face Detection,"International Journal of Latest Research in Science
+Volume 4, Issue 3: Page No.8-18, May-June 2015
+http://www.mnkjournals.com/ijlrst.htm
+nd Technology ISSN (Online):2278-5299
+ENSEMBLE OF FACE/EYE DETECTORS FOR
+ACCURATE AUTOMATIC FACE DETECTION
+Loris Nanni, 2Alessandra Lumini, 3Sheryl Brahnam
+Department of Information Engineering at the University of Padua, Padua, Italy
+DISI, University of Bologna, Cesena, Italy
+Computer Information Systems, Missouri State University, USA"
+d861c658db2fd03558f44c265c328b53e492383a,Automated face extraction and normalization of 3D Mesh Data,"Automated Face Extraction and Normalization of 3D Mesh Data
+Jia Wu1, Raymond Tse2, Linda G. Shapiro1"
+d833c48334e906537f21757b6f9fa44da66f6c76,MEMC-Net: Motion Estimation and Motion Compensation Driven Neural Network for Video Interpolation and Enhancement,"MEMC-Net: Motion Estimation and Motion
+Compensation Driven Neural Network for
+Video Interpolation and Enhancement
+Wenbo Bao, Wei-Sheng Lai, Xiaoyun Zhang, Zhiyong Gao, and Ming-Hsuan Yang"
+d8d1fb804d1f4760393c6fd70c9072fa1b39f02c,An Efficient Approach to Onboard Stereo Vision System Pose Estimation,"An Efficient Approach to Onboard Stereo
+Vision System Pose Estimation
+Angel Domingo Sappa, Member, IEEE, Fadi Dornaika, Daniel Ponsa, David Gerónimo, and Antonio López"
+d8abf01fce0d44665949e7a73716fff7731fa6da,Places: An Image Database for Deep Scene Understanding,"Places: An Image Database for Deep Scene
+Understanding
+Bolei Zhou, Aditya Khosla, Agata Lapedriza, Antonio Torralba and Aude Oliva"
+d8b58c5b403dc28437af8244ec812efdfbc6b2e0,MVOR: A Multi-view RGB-D Operating Room Dataset for 2D and 3D Human Pose Estimation,"MVOR: A Multi-view RGB-D Operating Room
+Dataset for 2D and 3D Human Pose Estimation
+Vinkle Srivastav1, Thibaut Issenhuth1, Abdolrahim Kadkhodamohammadi1,
+Michel de Mathelin1, Afshin Gangi1,2, and
+Nicolas Padoy1
+ICube, University of Strasbourg, CNRS, IHU Strasbourg, France
+Radiology Department, University Hospital of Strasbourg, France"
+d813ec3a3442f2885b76ac0133c4c5d76f9f8065,Panoptic Studio: A Massively Multiview System for Social Interaction Capture,"Panoptic Studio: A Massively Multiview System
+for Social Interaction Capture
+Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Godisart,
+Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh"
+d8f0bda19a345fac81a1d560d7db73f2b4868836,Online Activity Understanding and Labeling in Natural Videos,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Online Activity Understanding and Labeling in Natural Videos
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Md Mahmudul Hasan
+August 2016
+Dissertation Committee:
+Dr. Amit K. Roy-Chowdhury, Chairperson
+Dr. Eamonn Keogh
+Dr. Evangelos Christidis
+Dr. Christian Shelton"
+d809c0ab068861c139a544e5d8eeaa73cc8a3f6b,Monocular Semantic Occupancy Grid Mapping with Convolutional Variational Encoder-Decoder Networks,"Monocular Semantic Occupancy Grid Mapping
+with Convolutional Variational Encoder-Decoder Networks
+Chenyang Lu1, Ren´e van de Molengraft2, and Gijs Dubbelman1"
+d888895cd56d336aa1367fac8072da782bdbc0fb,AttnGAN: Fine-Grained Text to Image Generation with Attentional Generative Adversarial Networks,"AttnGAN: Fine-Grained Text to Image Generation
+with Attentional Generative Adversarial Networks
+Tao Xu∗1, Pengchuan Zhang2, Qiuyuan Huang2,
+Han Zhang3, Zhe Gan4, Xiaolei Huang1, Xiaodong He2
+Lehigh University 2Microsoft Research 3Rutgers University 4Duke University
+{tax313, {penzhan, qihua,"
+d82b93f848d5442f82154a6011d26df8a9cd00e7,Neural Network Based Age Classification Using Linear Wavelet Transforms,"NEURAL NETWORK BASED AGE CLASSIFICATION USING
+LINEAR WAVELET TRANSFORMS
+NITHYASHRI JAYARAMAN1 & G.KULANTHAIVEL2
+Department of Computer Science & Engineering,
+Sathyabama University Old Mamallapuram Road, Chennai, India
+Electronics Engineering, National Institute of Technical Teachers
+Training & Research, Taramani, Chennai, India
+E-mail :"
+d881a59d00971c754e02bfaaf4c48ec6dfbc1343,Neighborhood Sensitive Mapping for Zero-Shot Classification using Independently Learned Semantic Embeddings,"Neighborhood Sensitive Mapping for Zero-Shot
+Classification using Independently Learned
+Semantic Embeddings
+Gaurav Singh1, Fabrizio Silvestri2, and John Shawe-Taylor1
+UCL, UK
+Yahoo, UK"
+d87ccfc42cf6a72821d357aab0990e946918350b,Exploiting the Potential of Standard Convolutional Autoencoders for Image Restoration by Evolutionary Search,"Exploiting the Potential of Standard Convolutional Autoencoders
+for Image Restoration by Evolutionary Search
+Masanori Suganuma 1 2 Mete Ozay 1 Takayuki Okatani 1 2"
+d84568d42a02b6d365889451f208f423edb1f0f3,Age Synthesis and Estimation From Face Image Ms,"www.ijecs.in
+International Journal Of Engineering And Computer Science ISSN:2319-7242
+Volume 3 Issue 4 April, 2014 Page No. 5462-5466
+Age Synthesis and Estimation From Face Image
+Ms. Deepali R. gadbail1, Prof. S.S. Dhande2, Prof.Kanchan M. Pimple3
+M s. Deepali R Gadbail,
+Computer Science and Engineering Department,
+Sipna COET,Amravati.
+Prof. S. S. Dhande,
+Computer Science and Engineering Department,
+Sipna COET,Amravati.
+Prof.Kanchan M . Pimple,
+IBSS College of engg. & tech.,Amravati"
+d83d2fb5403c823287f5889b44c1971f049a1c93,Introducing the sick face,"Motiv Emot
+DOI 10.1007/s11031-013-9353-6
+O R I G I N A L P A P E R
+Introducing the sick face
+Sherri C. Widen • Joseph T. Pochedly •
+Kerrie Pieloch • James A. Russell
+Ó Springer Science+Business Media New York 2013"
+d8671247f6188620c6e382ffcd15d3e909647c63,Multicamera human detection and tracking supporting natural interaction with large-scale displays,"DOI 10.1007/s00138-012-0408-6
+ORIGINAL PAPER
+Multicamera human detection and tracking supporting natural
+interaction with large-scale displays
+Xenophon Zabulis · Dimitris Grammenos ·
+Thomas Sarmis · Konstantinos Tzevanidis ·
+Pashalis Padeleris · Panagiotis Koutlemanis ·
+Antonis A. Argyros
+Received: 8 March 2011 / Revised: 9 January 2012 / Accepted: 17 January 2012
+© Springer-Verlag 2012"
+d8db46f1775641051d8596dad3d37d1d731558f7,Survey on Deep Learning Techniques for Person Re-Identification Task,
+d8b568392970b68794a55c090c4dd2d7f90909d2,PDA Face Recognition System Using Advanced Correlation Filters,"PDA Face Recognition System
+Using Advanced Correlation
+Filters
+Chee Kiat Ng
+Advisor: Prof. Khosla/Reviere"
+d83ae5926b05894fcda0bc89bdc621e4f21272da,Frugal Forests: Learning a Dynamic and Cost Sensitive Feature Extraction Policy for Anytime Activity Classification,"The Thesis committee for Joshua Allen Kelle certifies that this is the approved
+version of the following thesis:
+Frugal Forests: Learning a Dynamic and Cost Sensitive
+Feature Extraction Policy for Anytime Activity Classification
+APPROVED BY
+SUPERVISING COMMITTEE:
+Kristen Grauman, Supervisor
+Peter Stone"
+d8029237cde893218d21ba551fd127d045ae3422,Eye-Strip based Person Identification based on Non-Subsampled Contourlet Transform,"International Journal of Computer Applications (0975 – 8887)
+Volume 121 – No.12, July 2015
+Eye-Strip based Person Identification based on
+Non-Subsampled Contourlet Transform
+Hemprasad Y. Patil
+Dept. of ECE
+Visvesvaraya National Institute
+of Technology, Nagpur, India
+Ashwin G. Kothari
+Dept. of ECE
+Visvesvaraya National Institute
+of Technology, Nagpur, India
+Kishor M. Bhurchandi
+Dept. of ECE
+Visvesvaraya National Institute
+of Technology, Nagpur, India
+transform
+sub-band"
+d8af6a45eaea68adda8597ae65f91ece152f7b21,Sparse and Dense Data with CNNs: Depth Completion and Semantic Segmentation,"Sparse and Dense Data with CNNs:
+Depth Completion and Semantic Segmentation
+Maximilian Jaritz1, 2, Raoul de Charette1, Emilie Wirbel2, Xavier Perrotton2, Fawzi Nashashibi1
+{maximilian.jaritz, raoul.de-charette,
+Inria RITS Team
+{emilie.wirbel,
+Valeo"
+d806790866ab9bad77f60436fe77232db8e0c1ba,Deep Directional Network for Object Tracking,"Article
+Deep Directional Network for Object Tracking
+Zhaohua Hu 1,2,* and Xiaoyi Shi 1
+School of Electronic & Information Engineering, Nanjing University of Information Science & Technology,
+Nanjing 210044, China;
+Jiangsu Collaborative Innovation Center on Atmospheric Environment and Equipment Technology,
+Nanjing University of Information Science & Technology, Nanjing 210044, China
+* Correspondence: Tel.: +86-025-58731196
+Received: 10 October 2018; Accepted: 1 November 2018; Published: 5 November 2018"
+d82681348489f4f04690e65b9ffe21b68c89b5ff,Cross-Subject EEG Feature Selection for Emotion Recognition Using Transfer Recursive Feature Elimination,"ORIGINAL RESEARCH
+published: 10 April 2017
+doi: 10.3389/fnbot.2017.00019
+Cross-Subject EEG Feature Selection
+for Emotion Recognition Using
+Transfer Recursive Feature
+Elimination
+Zhong Yin 1*, Yongxiong Wang 1*, Li Liu 1, Wei Zhang 1 and Jianhua Zhang 2
+Shanghai Key Lab of Modern Optical System, Engineering Research Center of Optical Instrument and System, Ministry of
+Education, University of Shanghai for Science and Technology, Shanghai, China, 2 Department of Automation, East China
+University of Science and Technology, Shanghai, China
+Using machine-learning methodologies to analyze EEG signals becomes increasingly
+ttractive for recognizing human emotions because of the objectivity of physiological
+data and the capability of the learning principles on modeling emotion classifiers from
+heterogeneous features. However, the conventional subject-specific classifiers may
+induce additional burdens to each subject for preparing multiple-session EEG data
+s training sets. To this end, we developed a new EEG feature selection approach,
+transfer recursive feature elimination (T-RFE), to determine a set of the most robust EEG
+indicators with stable geometrical distribution across a group of training subjects and
+specific testing subject. A validating set is introduced to independently determine"
+d86fabd4498c8feaed80ec342d254fb877fb92f5,Region-Object Relevance-Guided Visual Relationship Detection,"Y. GOUTSU: REGION-OBJECT RELEVANCE-GUIDED VRD
+Region-Object Relevance-Guided
+Visual Relationship Detection
+Yusuke Goutsu
+National Institute of Informatics
+Tokyo, Japan"
+d8e061960423a17748dedbcfe4b6a6918f79c262,Fast Prototyping and Computationally Intensive Experiments,"Armadillo: An Open Source C++ Linear Algebra Library for
+Fast Prototyping and Computationally Intensive Experiments
+Conrad Sanderson
+http://conradsanderson.id.au
+Technical Report, NICTA, Australia
+http://nicta.com.au
+September 2010
+(revised December 2011)"
+d865c5e85191cfc0da714290d8583a2fb1179fd4,"Learning Hierarchical Space Tiling for Scene Modeling, Parsing and Attribute Tagging","Learning Hierarchical Space Tiling for Scene
+Modeling, Parsing and Attribute Tagging
+Shuo Wang, Yizhou Wang, and Song-Chun Zhu"
+d8f7b26d25a026fe43487b6f77993e11b8b333e0,Photo Indexing and Retrieval based on Content and Context,"PhD Dissertation
+International Doctorate School in Information and
+Communication Technologies
+DISI - University of Trento
+Photo Indexing and Retrieval
+ased on Content and Context
+Mattia Broilo
+Advisor:
+Prof. Francesco G. B. De Natale
+Universit`a degli Studi di Trento
+February 2011"
+d850aff9d10a01ad5f1d8a1b489fbb3998d0d80e,Recognizing and Segmenting Objects in the Presence of Occlusion and Clutter,"UNIVERSITY OF CALIFORNIA,
+IRVINE
+Recognizing and Segmenting Objects in the Presence of Occlusion and Clutter
+DISSERTATION
+submitted in partial satisfaction of the requirements
+for the degree of
+DOCTOR OF PHILOSOPHY
+in Computer Science
+Golnaz Ghiasi
+Dissertation Committee:
+Professor Charless Fowlkes, Chair
+Professor Deva Ramanan
+Professor Alexander Ihler"
+d88eb94d7054d2668b1a8dfa311721f37ae1f059,Straight to the Facts: Learning Knowledge Base Retrieval for Factual Visual Question Answering,"Straight to the Facts: Learning Knowledge Base
+Retrieval for Factual Visual Question Answering
+Medhini Narasimhan, Alexander G. Schwing
+University of Illinois Urbana-Champaign"
+d81dbc2960e527e91c066102aabdaf9eb8b15f85,Deep Directed Generative Models with Energy-Based Probability Estimation,"Deep Directed Generative Models
+with Energy-Based Probability Estimation
+Taesup Kim, Yoshua Bengio∗
+Department of Computer Science and Operations Research
+Université de Montréal
+Montréal, QC, Canada"
+d8c04365ed0627a5043996cdd26c1a56b5a630b8,Learning Monocular Depth Estimation with Unsupervised Trinocular Assumptions,"Learning monocular depth estimation with unsupervised trinocular assumptions
+Matteo Poggi, Fabio Tosi, Stefano Mattoccia
+University of Bologna, Department of Computer Science and Engineering
+Viale del Risorgimento 2, Bologna, Italy
+{m.poggi, fabio.tosi5,"
+d89cfed36ce8ffdb2097c2ba2dac3e2b2501100d,Robust Face Recognition via Multimodal Deep Face Representation,"Robust Face Recognition via Multimodal Deep
+Face Representation
+Changxing Ding, Student Member, IEEE, Dacheng Tao, Fellow, IEEE"
+ab87ab1cf522995510561cd9f494223704f1de91,Human Centric Facial Expression Recognition,"Human Centric Facial Expression Recognition
+K. Clawson 1*, L. S. Delicato, 2** and C. Bowerman, 1***
+Faculty of Computer Science, University of Sunderland, Sunderland, SR1 3SD, UK
+. Faculty of Health, Sciences and Wellbeing, University of Sunderland, SR1 3QR, UK
+Facial expression recognition (FER) is an area of active research, both in computer science and in
+ehavioural science. Across these domains there is evidence to suggest that humans and machines
+find it easier to recognise certain emotions, for example happiness, in comparison to others. Recent
+ehavioural studies have explored human perceptions of emotion further, by evaluating the relative
+ontribution of features in the face when evaluating human sensitivity to emotion. It has been
+identified that certain facial regions have more salient features for certain expressions of emotion,
+especially when emotions are subtle in nature. For example, it is easier to detect fearful expressions
+when the eyes are expressive. Using this observation as a starting point for analysis, we similarly
+examine the effectiveness with which knowledge of facial feature saliency may be integrated into
+urrent approaches to automated FER. Specifically, we compare and evaluate the accuracy of ‘full-
+face’ versus upper and lower facial area convolutional neural network (CNN) modelling for emotion
+recognition in static images, and propose a human centric CNN hierarchy which uses regional image
+inputs to leverage current understanding of how humans recognise emotions across the face.
+Evaluations using the CK+ dataset demonstrate that our hierarchy can enhance classification
+ccuracy
+individual CNN architectures, achieving overall true positive"
+ab8f9a6bd8f582501c6b41c0e7179546e21c5e91,Nonparametric Face Verification Using a Novel Face Representation,"Nonparametric Face Verification Using a Novel
+Face Representation
+Hae Jong Seo, Student Member, IEEE, Peyman Milanfar, Fellow, IEEE,"
+ab58a7db32683aea9281c188c756ddf969b4cdbd,Efficient Solvers for Sparse Subspace Clustering,"Efficient Solvers for Sparse Subspace Clustering
+Farhad Pourkamali-Anaraki and Stephen Becker"
+aba770a7c45e82b2f9de6ea2a12738722566a149,Face Recognition in the Scrambled Domain via Salience-Aware Ensembles of Many Kernels,"Face Recognition in the Scrambled Domain via Salience-Aware
+Ensembles of Many Kernels
+Jiang, R., Al-Maadeed, S., Bouridane, A., Crookes, D., & Celebi, M. E. (2016). Face Recognition in the
+Scrambled Domain via Salience-Aware Ensembles of Many Kernels. IEEE Transactions on Information
+Forensics and Security, 11(8), 1807-1817. DOI: 10.1109/TIFS.2016.2555792
+Published in:
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+(c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/
+republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists,
+or reuse of any copyrighted components of this work in other works.
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to"
+ab8af4cb5243544e38852bb670aafe5a2fd9b3ec,Real-Time Human Detection Using Relational Depth Similarity Features,"Real-Time Human Detection using Relational
+Depth Similarity Features
+Sho Ikemura, Hironobu Fujiyoshi
+Dept. of Computer Science, Chubu University.
+Matsumoto 1200, Kasugai, Aichi, 487-8501 Japan.
+http://www.vision.cs.chubu.ac.jp"
+ab302d79e419348499acbda4a627b67dec89936f,Robust Correlated and Individual Component Analysis,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2014
+Robust Correlated and Individual Component
+Analysis
+Yannis Panagakis, Member, IEEE, Mihalis A. Nicolaou, Member, IEEE,
+Stefanos Zafeiriou, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+abfcafaa765433b8f5b8be7eae392a8daec54b8e,Facial EMG Responses to Emotional Expressions Are Related to Emotion Perception Ability,"Facial EMG Responses to Emotional Expressions Are
+Related to Emotion Perception Ability
+Janina Ku¨ necke1*, Andrea Hildebrandt1, Guillermo Recio1,2, Werner Sommer1, Oliver Wilhelm2
+Department of Psychology, Humboldt Universita¨t zu Berlin, Berlin, Germany, 2 Department of Psychology, University Ulm, Ulm, Germany"
+ab0f9bc35b777eaefff735cb0dd0663f0c34ad31,Semi-supervised Learning of Geospatial Objects through Multi-modal Data Integration,"Semi-Supervised Learning of Geospatial Objects
+Through Multi-Modal Data Integration
+Yi Yang and Shawn Newsam
+Electrical Engineering and Computer Science
+University of California, Merced, CA, 95343
+Email:"
+abc4d51d510cd8222484f7f4f11a739e8bce42ff,On Fast Non-metric Similarity Search by Metric Access Methods,"On Fast Non-metric Similarity Search
+y Metric Access Methods
+Tom´aˇs Skopal
+Charles University in Prague, FMP, Department of Software Engineering,
+Malostransk´e n´am. 25, 118 00 Prague 1, Czech Republic"
+ab98abfbdfd700c27bee31ca1f8850db72120c5d,Video Event Detection by Exploiting Word Dependencies from Image Captions,"Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers,
+pages 3318–3327, Osaka, Japan, December 11-17 2016."
+ab8778793b0f2f06d9e97b6277f3b1125f31432c,Stochastic Models for Face Image Analysis,"Stochastic Models for Face Image Analysis
+St(cid:19)ephane Marchand-Maillet and Bernard M(cid:19)erialdo
+Department of Multimedia Communications
+Institut EURECOM { B.P.  
+
+ab989225a55a2ddcd3b60a99672e78e4373c0df1,"Sample, computation vs storage tradeoffs for classification using tensor subspace models","Sample, Computation vs Storage Tradeoffs for
+Classification Using Tensor Subspace Models
+Mohammadhossein Chaghazardi and Shuchin Aeron, Senior Member, IEEE"
+abddbb57258d85b1f3d9789128fd284d30a91e23,A research and education initiative at the MIT Sloan School of Management Network Structure & Information Advantage Paper 235,"A research and education initiative at the MIT
+Sloan School of Management
+Network Structure & Information Advantage
+Paper 235
+Sinan Aral
+Marshall Van Alstyne
+July 2007
+For more information,
+please visit our website at http://digital.mit.edu
+or contact the Center directly at
+or 617-253-7054"
+abba22ed4713a5ee5fa91fcf7b8dde58a9b621db,Acquisition of a 3D Audio-Visual Corpus of Affective Speech,"BIWI Technical Report n. 270
+Acquisition of a 3D Audio-Visual Corpus of
+Affective Speech
+Gabriele Fanelli, Juergen Gall, Harald Romsdorfer, Thibaut Weise,
+nd Luc Van Gool"
+ab69f49fedb6936ce04b2e9d1f161772b2f24b7d,Architecture-aware optimization of an HEVC decoder on asymmetric multicore processors,"(will be inserted by the editor)
+Architecture-Aware Optimization of an HEVC decoder on
+Asymmetric Multicore Processors
+Rafael Rodr´ıguez-S´anchez · Enrique S. Quintana-Ort´ı
+Received: date / Revised: date"
+ab6776f500ed1ab23b7789599f3a6153cdac84f7,A Survey on Various Facial Expression Techniques,"International Journal of Scientific & Engineering Research, Volume 6, Issue 4, April-2015 1212
+ISSN 2229-5518
+A Survey on Various Facial Expression
+Techniques
+Md. Sarfaraz Jalil, Joy Bhattacharya"
+ab036048cf90296171ad2bb7265c5a5b7f3252f7,Multimodal Recurrent Neural Networks With Information Transfer Layers for Indoor Scene Labeling,"Multimodal Recurrent Neural Networks with
+Information Transfer Layers for Indoor Scene
+Labeling
+Abrar H. Abdulnabi, Student Member, IEEE, Bing Shuai, Student Member, IEEE,
+Zhen Zuo, Student Member, IEEE, Lap-Pui Chau, Fellow, IEEE, and Gang Wang, Senior Member, IEEE"
+ab1719f573a6c121d7d7da5053fe5f12de0182e7,Combining visual recognition and computational linguistics : linguistic knowledge for visual recognition and natural language descriptions of visual content,"Combining Visual Recognition
+nd Computational Linguistics
+Linguistic Knowledge for Visual Recognition
+nd Natural Language Descriptions
+of Visual Content
+Thesis for obtaining the title of
+Doctor of Engineering Science
+(Dr.-Ing.)
+of the Faculty of Natural Science and Technology I
+of Saarland University
+Marcus Rohrbach, M.Sc.
+Saarbrücken
+March 2014"
+ab559473a01836e72b9fb9393d6e07c5745528f3,cGANs with Projection Discriminator,"Published as a conference paper at ICLR 2018
+CGANS WITH PROJECTION DISCRIMINATOR
+Takeru Miyato1, Masanori Koyama2
+Preferred Networks, Inc. 2Ritsumeikan University"
+abe9f3b91fd26fa1b50cd685c0d20debfb372f73,The Pascal Visual Object Classes Challenge: A Retrospective,"(will be inserted by the editor)
+The Pascal Visual Object Classes Challenge – a Retrospective
+Mark Everingham, S. M. Ali Eslami, Luc Van Gool,
+Christopher K. I. Williams, John Winn, Andrew Zisserman
+Received: date / Accepted: date"
+ab969cfae95f62d68c61830128b35786eb6c84a9,Contents 1 Introduction 2,"Contents1Introduction22Tracking:FundamentalNotions22.1Trackingbydetection........................................22.2TrackingusingFlow........................................22.3Flowmodelsfromkinematicmodels................................22.4TrackingwithProbability......................................23Tracking:Relationsbetween3Dand2D23.1KinematicInferencewithMultipleViews.............................23.2Liftingto3D............................................33.3MultipleModes,RandomizedSearchandHumanTracking....................34Tracking:DataAssociationforHumanTracking54.1DetectingHumans.........................................54.2TrackingbyMatchingRevisited..................................64.3Evaluation..............................................75MotionSynthesisandAnimation95.1Motioncapture...........................................95.2Footskate..............................................95.3ResolvingKinematicAmbiguitieswithExamples.........................95.4MotionSignalProcessing......................................95.5MotionGraphs...........................................95.6MotionPrimitives..........................................105.7EnrichingaMotionCollection...................................105.8MotionfromPhysicalConsiderations...............................105.8.1SimplifiedCharacters....................................105.8.2ModifiedPhysics......................................115.8.3ReducedDimensions....................................115.8.4ModifyingExistingMotions................................116DescribingActivities126.1WhatshouldanActivityRepresentationdo?............................126.1.1NecessaryPropertiesofanActivityRepresentation....................136.1.2WhatDataisAvailable?..................................136.2MiscellaneousMethods.......................................146.2.1ActivityRepresentationMethodsbasedaroundTemporalLogics.............146.2.2ActivityRepresentationMethodsbasedonTemplates...................146.3ActivityRepresentationusingHiddenMarkovModelsandFiniteStateRepresentations.....146.4TheSpeechAnalogy........................................146.4.1FiniteStateTransducers..................................156.4.2WhyshouldweCare?...................................156.5ActivityRecognitionMethodsbasedaroundHMM’s.......................166.6SignLanguageRecognition.....................................176.7Morerecentmaterial........................................171"
+ab2b09b65fdc91a711e424524e666fc75aae7a51,Multi-modal Biomarkers to Discriminate Cognitive State *,"Multi-modal Biomarkers to Discriminate Cognitive State*
+Thomas F. Quatieri 1, James R. Williamson1, Christopher J. Smalt1,
+Joey Perricone, Tejash Patel, Laura Brattain, Brian S. Helfer, Daryush D. Mehta, Jeffrey Palmer
+Kristin Heaton2, Marianna Eddy3, Joseph Moran3
+MIT Lincoln Laboratory, Lexington, Massachusetts, USA
+USARIEM, 3NSRDEC
+. Introduction
+Multimodal biomarkers based on behavorial, neurophysiolgical, and cognitive measurements have
+recently obtained increasing popularity in the detection of cognitive stress- and neurological-based
+disorders. Such conditions are significantly and adversely affecting human performance and quality
+of life for a large fraction of the world’s population. Example modalities used in detection of these
+onditions include voice, facial expression, physiology, eye tracking, gait, and EEG analysis.
+Toward the goal of finding simple, noninvasive means to detect, predict and monitor cognitive
+stress and neurological conditions, MIT Lincoln Laboratory is developing biomarkers that satisfy
+three criteria. First, we seek biomarkers that reflect core components of cognitive status such as
+working memory capacity, processing speed, attention, and arousal. Second, and as importantly, we
+seek biomarkers that reflect timing and coordination relations both within components of each
+modality and across different modalities. This is based on the hypothesis that neural coordination
+cross different parts of the brain is essential in cognition (Figure 1). An example of timing and
+oordination within a modality is the set of finely timed and synchronized physiological"
+abb1289cfdc4c23d72d0680c3ec100eae74d4fdb,PatchMatch: A Fast Randomized Matching Algorithm with Application to Image and Video,"PatchMatch: A Fast Randomized Matching
+Algorithm with Application to Image and Video
+Connelly Barnes
+A Dissertation
+Presented to the Faculty
+of Princeton University
+in Candidacy for the Degree
+of Doctor of Philosophy
+Recommended for Acceptance
+y the Department of
+Computer Science
+Adviser: Adam Finkelstein
+May 2011"
+ab43c43d5eb2c5bee6de1b25c8bcb8068ab8bcd2,Deep Class-Wise Hashing: Semantics-Preserving Hashing via Class-wise Loss,"Deep Class-Wise Hashing:
+Semantics-Preserving Hashing via Class-wise Loss
+Xuefei Zhe, Shifeng Chen, Member, IEEE, and Hong Yan, Fellow, IEEE"
+ab87dfccb1818bdf0b41d732da1f9335b43b74ae,Structured Dictionary Learning for Classification,"SUBMITTED TO IEEE TRANSACTIONS ON SIGNAL PROCESSING
+Structured Dictionary Learning for Classification
+Yuanming Suo, Student Member, IEEE, Minh Dao, Student Member, IEEE, Umamahesh Srinivas, Student
+Member, IEEE, Vishal Monga, Senior Member, IEEE, and Trac D. Tran, Fellow, IEEE"
+abc1ef570bb2d7ea92cbe69e101eefa9a53e1d72,Raisonnement abductif en logique de description exploitant les domaines concrets spatiaux pour l'interprétation d'images,"Raisonnement abductif en logique de
+description exploitant les domaines concrets
+spatiaux pour l’interprétation d’images
+Yifan Yang 1, Jamal Atif 2, Isabelle Bloch 1
+. LTCI, Télécom ParisTech, Université Paris-Saclay, Paris, France
+. Université Paris-Dauphine, PSL Research University, CNRS, UMR 7243,
+LAMSADE, 75016 Paris, France
+RÉSUMÉ. L’interprétation d’images a pour objectif non seulement de détecter et reconnaître des
+objets dans une scène mais aussi de fournir une description sémantique tenant compte des in-
+formations contextuelles dans toute la scène. Le problème de l’interprétation d’images peut être
+formalisé comme un problème de raisonnement abductif, c’est-à-dire comme la recherche de la
+meilleure explication en utilisant une base de connaissances. Dans ce travail, nous présentons
+une nouvelle approche utilisant une méthode par tableau pour la génération et la sélection
+d’explications possibles d’une image donnée lorsque les connaissances, exprimées dans une
+logique de description, comportent des concepts décrivant les objets mais aussi les relations
+spatiales entre ces objets. La meilleure explication est sélectionnée en exploitant les domaines
+oncrets pour évaluer le degré de satisfaction des relations spatiales entre les objets."
+abb3df5b61dc7550db96fc112f98fb99a9db8c93,End-to-End Learning of Deep Visual Representations for Image Retrieval,"Noname manuscript No.
+(will be inserted by the editor)
+End-to-end Learning of Deep Visual Representations
+for Image Retrieval
+Albert Gordo · Jon Almaz´an · Jerome Revaud · Diane Larlus
+Received: date / Accepted: date"
+ab450a7968555532d9ea79f81189c0d52f9c5f11,RGB-D Face Recognition in Surveillance Videos,"RGB-D Face Recognition in Surveillance Videos
+Anurag Chowdhury
+IIIT-D-MTech-CS-GEN-14-002
+June 23, 2016
+Indraprastha Institute of Information Technology Delhi
+New Delhi
+Thesis Advisors
+Dr. Richa Singh
+Dr. Mayank Vatsa
+Submitted in partial fulfillment of the requirements
+for the Degree of M.Tech. in Computer Science
+(cid:13) Chowdhury, 2016
+Keywords : RGB-D, Kinect, Face Detection, Face Recognition, Deep Learning"
+abeda55a7be0bbe25a25139fb9a3d823215d7536,Understanding Human-Centric Images: From Geometry to Fashion,"UNIVERSITATPOLITÈCNICADECATALUNYAProgramadeDoctorat:AUTOMÀTICA,ROBÒTICAIVISIÓTesiDoctoralUnderstandingHuman-CentricImages:FromGeometrytoFashionEdgarSimoSerraDirectors:FrancescMorenoNoguerCarmeTorrasMay2015"
+ab1f98b59fa98216f052ae19adce6fd94ebb800d,"Explaining First Impressions: Modeling, Recognizing, and Explaining Apparent Personality from Videos","Preprint submitted to International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+Explaining First Impressions: Modeling,
+Recognizing, and Explaining Apparent Personality
+from Videos
+Hugo Jair Escalante∗ · Heysem Kaya∗ ·
+Albert Ali Salah∗ · Sergio Escalera ·
+Ya˘gmur G¨u¸cl¨ut¨urk · Umut G¨u¸cl¨u ·
+Xavier Bar´o · Isabelle Guyon · Julio
+Jacques Junior · Meysam Madadi ·
+Stephane Ayache · Evelyne Viegas ·
+Furkan G¨urpınar · Achmadnoer Sukma
+Wicaksana · Cynthia C. S. Liem ·
+Marcel A. J. van Gerven · Rob van Lier
+Received: date / Accepted: date
+Means equal contribution by the authors.
+Hugo Jair Escalante
+INAOE, Mexico and ChaLearn, USA E-mail:
+Heysem Kaya
+Namık Kemal University, Department of Computer Engineering, Turkey"
+abf659847660763c94b44c0baaf9198046a11845,Video Image Object Tracking Algorithm based on Improved Principal Component Analysis,"Video Image Object Tracking Algorithm based
+on Improved Principal Component Analysis
+. Engineering Technology Research Center of Optoelectronic Technology Appliance, AnHui Tongling Anhui 244000,
+. Hefei University of Technology, Hefei Anhui 230009, China
+China
+Wang Liping 1, 2
+dopts
+DPCA
+lgorithm
+to reduce dimension of object"
+ab41364a58b34844b281046c3d8678f7d537a97e,Learning Deep Hierarchical Visual Feature Coding,"Learning Deep Hierarchical Visual Feature Coding
+Hanlin Goh, Nicolas Thome, Member, IEEE, Matthieu Cord, Member, IEEE, and Joo-Hwee Lim, Member, IEEE"
+ab8fb278db4405f7db08fa59404d9dd22d38bc83,Implicit and Automated Emotional Tagging of Videos,"UNIVERSITÉ DE GENÈVE
+Département d'Informatique
+FACULTÉ DES SCIENCES
+Professeur Thierry Pun
+Implicit and Automated Emotional
+Tagging of Videos
+THÈSE
+présenté à la Faculté des sciences de l'Université de Genève
+pour obtenir le grade de Docteur ès sciences, mention informatique
+Mohammad SOLEYMANI
+Téhéran (IRAN)
+Thèse No 4368
+GENÈVE
+Repro-Mail - Université de Genève"
+ab03a1656d9e45c80379512161f6c90dfbb0b6b3,Active Learning for Regression Tasks with Expected Model Output Changes,"KÄDING ET AL.: ACTIVE LEARNING FOR REGRESSION TASKS WITH EMOC
+Active Learning for Regression Tasks
+with Expected Model Output Changes
+Computer Vision Group
+Friedrich Schiller University Jena
+Jena, Germany
+Carl Zeiss AG
+Jena, Germany
+Christoph Käding1
+Erik Rodner2
+Alexander Freytag2
+Oliver Mothes1
+Björn Barz1
+Joachim Denzler1"
+e5bcbfd346121769b674a7ad35e594758de5553f,A Dataset for Lane Instance Segmentation in Urban Environments,"A Dataset for Lane Instance Segmentation in
+Urban Environments
+Brook Roberts, Sebastian Kaltwang, Sina Samangooei,
+Mark Pender-Bare, Konstantinos Tertikas, and John Redford
+FiveAI Ltd., Cambridge CB2 1NS, U.K."
+e592f6dc3bf1d53044cd59ce4a75fdacd0ecc80d,Hand Vein Infrared Image Segmentation for Biometric Recognition,"Hand Vein Infrared Image Segmentation for Biometric
+Recognition
+Ignacio Irving Morales-Montiel1, J. Arturo Olvera-López1, Manuel Martín-Ortíz1, and
+Eber E. Orozco-Guillén2
+Facultad de Ciencias de la Computación
+Benemérita Universidad Autónoma de Puebla
+Av. San Claudio y 14 sur. Ciudad Universitaria.
+Puebla, Pue., Mexico
+Mazatlán, Sin., Mexico
+Programa de Ingeniería en Informática
+Universidad Politécnica de Sinaloa
+Carretera Municipal Libre Mazatlán Higueras Km. 3."
+e5c4b75cb79aa5155ffd9498b3fcc790eb794e72,Object Recognition using Discriminative Robust Local Binary Pattern,"WWW.IJITECH.ORG
+ISSN 2321-8665
+Vol.03,Issue.05,
+July-2015,
+Pages:0700-0706
+Object Recognition using Discriminative Robust Local Binary Pattern
+T. LAVANYA
+, A. SUJATHA
+PG Scholar, Dept of DE & CS, Dr.K.V.Subba Reddy Engineering College for Women, AP, India,
+Associate Professor, Dept of DE & CS, Dr.K.V.Subba Reddy Engineering College for Women, AP, India,
+E-mail:
+E-mail:"
+e5320955580401d5a5b2ae8b507e8f0b47e08118,Deep Supervision with Intermediate Concepts,"Deep Supervision with Intermediate Concepts
+Chi Li, M. Zeeshan Zia, Quoc-Huy Tran, Xiang Yu, Gregory D. Hager, and Manmohan Chandraker"
+e5563a0d6a2312c614834dc784b5cc7594362bff,Real-Time Demographic Profiling from Face Imagery with Fisher Vectors,"Noname manuscript No.
+(will be inserted by the editor)
+Real-Time Demographic Profiling from Face Imagery with
+Fisher Vectors
+Lorenzo Seidenari · Alessandro Rozza · Alberto Del Bimbo
+Received: ... / Accepted: ..."
+e524f222a117890126bd9597934d0504adce85ec,Error Correction for Dense Semantic Image Labeling,"Yu-Hui Huang1∗ Xu Jia2∗ Stamatios Georgoulis1
+Tinne Tuytelaars2
+Luc Van Gool1,3
+KU-Leuven/ESAT-PSI, Toyota Motor Europe (TRACE)
+ETH/DITET-CVL
+KU-Leuven/ESAT-PSI, IMEC"
+e5823a9d3e5e33e119576a34cb8aed497af20eea,DocFace+: ID Document to Selfie Matching,"DocFace+: ID Document to Selfie* Matching
+Yichun Shi, Student Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+e596a4aedb5cda6f0df35d38549564a0dd5546a7,Public Document Document Evolution Executive Summary,"Project N° IST-2002-507634 - BioSecure
+D 9.1.3 – Revision: b2
+09 June 2006
+Contract Number :
+Project Acronym :
+Project Title :
+Instrument :
+Start Date of Project :
+Duration :
+Deliverable Number :
+Title of Deliverable :
+Contractual Due Date :
+Actual Date of Completion :
+IST-2002-507634
+BioSecure
+Biometrics for Secure Authentication
+Network of Excellence
+01 June, 2004
+6 months
+D 9.1.3"
+e564268a03b21fa092390db0c11ba1c33d2323f9,Multi-view Stereo with Single-View Semantic Mesh Refinement,"Multi-View Stereo with Single-View Semantic Mesh Refinement
+Andrea Romanoni Marco Ciccone
+Francesco Visin Matteo Matteucci
+{andrea.romanoni, marco.ciccone, francesco.visin,
+Politecnico di Milano, Italy"
+e5dcec59afdab7c15e3a874e9b602b8fc42b9019,Nonparametric Video Retrieval and Frame Classification using Tiny Videos,"International Conference on Recent Trends in Computational Methods, Communication and Controls (ICON3C 2012)
+Proceedings published in International Journal of Computer Applications® (IJCA)
+Nonparametric Video Retrieval and Frame Classification
+using Tiny Videos
+A.K. M. Shanawas Fathima,
+PG Student,
+Department of CSE
+GCE, Tirunelveli.
+R. Kanthavel,
+Department of CSE,
+Government College of Engineering,
+Tirunelveli."
+e59a68c328c69c294991f87b741a5d4e952defba,NISTIR 7972 Performance Metrics for Evaluating Object and Human Detection and Tracking Systems,"This publication is available free of charge from http://dx.doi.org/10.6028/NIST.IR.7972
+NISTIR 7972
+Performance Metrics for Evaluating
+Object and Human Detection and
+Tracking Systems
+Afzal Godil
+Roger Bostelman
+Will Shackleford
+Tsai Hong
+Michael Shneier
+http://dx.doi.org/10.6028/NIST.IR.7972"
+e510f2412999399149d8635a83eca89c338a99a1,Face Recognition using Block-Based DCT Feature Extraction,"Journal of Advanced Computer Science and Technology, 1 (4) (2012) 266-283
+(cid:13)Science Publishing Corporation
+www.sciencepubco.com/index.php/JACST
+Face Recognition using Block-Based
+DCT Feature Extraction
+K Manikantan1, Vaishnavi Govindarajan1,
+V V S Sasi Kiran1, S Ramachandran2
+Department of Electronics and Communication Engineering,
+M S Ramaiah Institute of Technology, Bangalore, Karnataka, India 560054
+E-mail:
+E-mail:
+E-mail:
+Department of Electronics and Communication Engineering,
+S J B Institute of Technology, Bangalore, Karnataka, India 560060
+E-mail:"
+e56c4c41bfa5ec2d86c7c9dd631a9a69cdc05e69,Human Activity Recognition Based on Wearable Sensor Data: A Standardization of the State-of-the-Art,"Human Activity Recognition Based on Wearable
+Sensor Data: A Standardization of the
+State-of-the-Art
+Artur Jord˜ao, Antonio C. Nazare Jr., Jessica Sena and William Robson Schwartz
+Smart Surveillance Interest Group, Computer Science Department
+Universidade Federal de Minas Gerais, Brazil
+Email: {arturjordao, antonio.nazare, jessicasena,"
+e5604c3f61eb7e8b80bf423f7828d8c1fa0f1d32,Towards Image Understanding from Deep Compression without Decoding,"Published as a conference paper at ICLR 2018
+TOWARDS IMAGE UNDERSTANDING FROM
+DEEP COMPRESSION WITHOUT DECODING
+Robert Torfason
+ETH Zurich, Merantix
+Fabian Mentzer
+ETH Zurich
+Eirikur Agustsson
+ETH Zurich
+Michael Tschannen
+ETH Zurich
+Radu Timofte
+ETH Zurich, Merantix
+Luc Van Gool
+ETH Zurich, KU Leuven"
+e5342233141a1d3858ed99ccd8ca0fead519f58b,Finger print and Palm print based Multibiometric Authentication System with GUI Interface,"ISSN: 2277 – 9043
+International Journal of Advanced Research in Computer Science and Electronics Engineering (IJARCSEE)
+Volume 2, Issue 2, February 2013
+Finger print and Palm print based Multibiometric
+Authentication System with GUI Interface
+KALAIGNANASELVI.A#1, NARASIMMALOU.T*2
+#PG Scholar, Dept. of CSE, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India.
+*Assistant Professor, Dept. of CSE, Dr.Pauls Engineering College, Villupuram District, Tamilnadu, India."
+e52be9a083e621d9ed29c8e9914451a6a327ff59,UvA - DARE ( Digital Academic Repository ) Communication and Automatic Interpretation of Affect from Facial Expressions,"UvA-DARE (Digital Academic Repository)
+Communication and Automatic Interpretation of Affect from Facial Expressions
+Salah, A.A.; Sebe, N.; Gevers, T.
+Published in:
+Affective computing and interaction: psychological, cognitive, and neuroscientific perspectives
+Link to publication
+Citation for published version (APA):
+Salah, A. A., Sebe, N., & Gevers, T. (2010). Communication and Automatic Interpretation of Affect from Facial
+Expressions. In D. Gökçay, & G. Yildirim (Eds.), Affective computing and interaction: psychological, cognitive,
+nd neuroscientific perspectives (pp. 157-183). Hershey, PA: Information Science Reference.
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 12 Sep 2017
+UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl)"
+e5d13afe956d8581a69e9dc2d1f43a43f1e2f311,Automatic Facial Feature Extraction for Face Recognition,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,700
+08,500
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact"
+e58434a01c45505995b000f5e631843a2f2ea582,Scale coding bag of deep features for human attribute and action recognition,"Noname manuscript No.
+(will be inserted by the editor)
+Scale Coding Bag of Deep Features for Human Attribute
+nd Action Recognition
+Fahad Shahbaz Khan, Joost van de Weijer, Rao Muhammad Anwer,
+Andrew D. Bagdanov, Michael Felsberg, Jorma Laaksonen
+Received:"
+e58f08ad6e0edd567f217ef08de1701a8c29fcc8,Pseudo-task Augmentation: From Deep Multitask Learning to Intratask Sharing - and Back,"Pseudo-task Augmentation: From Deep Multitask
+Learning to Intratask Sharing—and Back
+Elliot Meyerson 1 2 Risto Miikkulainen 1 2"
+e577847c36251dc31282ad57ea969ea8297369be,Face scanning and spontaneous emotion preference in Cornelia de Lange syndrome and Rubinstein-Taybi syndrome,"Crawford et al. Journal of Neurodevelopmental Disorders (2015) 7:22
+DOI 10.1186/s11689-015-9119-4
+R ES EAR CH
+Face scanning and spontaneous emotion
+preference in Cornelia de Lange syndrome
+nd Rubinstein-Taybi syndrome
+Hayley Crawford1,2*, Joanna Moss2,3, Joseph P. McCleery4, Giles M. Anderson5 and Chris Oliver2
+Open Access"
+e5799fd239531644ad9270f49a3961d7540ce358,Kinship classification by modeling facial feature heredity,"KINSHIP CLASSIFICATION BY MODELING FACIAL FEATURE HEREDITY
+Ruogu Fang1, Andrew C. Gallagher1, Tsuhan Chen1, Alexander Loui2
+Dept. of Elec. and Computer Eng., Cornell University 2Eastman Kodak Company"
+e5eb7fa8c9a812d402facfe8e4672670541ed108,Performance of PCA Based Semi-supervised Learning in Face Recognition Using MPEG-7 Edge Histogram Descriptor,"Performance of PCA Based Semi-supervised
+Learning in Face Recognition Using MPEG-7
+Edge Histogram Descriptor
+Shafin Rahman, Sheikh Motahar Naim, Abdullah Al Farooq and Md. Monirul Islam
+Department of Computer Science and Engineering
+Bangladesh University of Engineering and Technology(BUET)
+Dhaka-1000, Bangladesh
+Email: {shafin buet, naim sbh2007,"
+e2b8ba13586bb9a96e4813472d1f763d37ead47d,Media Content Access: Image-Based Filtering,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 9, No. 3, 2018
+Media Content Access: Image-Based Filtering
+Rehan Ullah Khan1, Ali Alkhalifah2
+Information Technology Department
+Qassim University, Al-Qassim, KSA"
+e2059946b69e0854f21919c1cf13c3f618f48d12,Deep Architectures and Ensembles for Semantic Video Classification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2018
+Deep Architectures and Ensembles for Semantic
+Video Classification
+Eng-Jon Ong, Sameed Husain, Mikel Bober-Irizar, Miroslaw Bober∗"
+e267c813d8804019fbd8e018171dd05255b10fee,Performance Analysis of Pca Based Techniques for Face Authentication,"Canadian Journal of Pure and Applied Sciences
+Vol. 9, No. 1, pp. 3299-3306, February 2015
+Online ISSN: 1920-3853; Print ISSN: 1715-9997
+Available online at www.cjpas.net
+PERFORMANCE ANALYSIS OF PCA BASED TECHNIQUES
+FOR FACE AUTHENTICATION
+*Krishna Dharavath, Fazal Ahmed Talukdar, Rabul Hussain Laskar
+Speech and Image Processing Research Lab.,
+Department of Electronics and Communication Engineering
+National Institute of Technology Silchar, India"
+e2edc7e7a2832e2f6014945afce4f76643cab02c,Universität Augsburg An annotated data set for pose estimation of swimmers,"Universit¨at Augsburg
+An annotated data set for pose
+estimation of swimmers
+Thomas Greif and Rainer Lienhart
+Report 2009-18
+Januar 2010
+Institut f¨ur Informatik
+D-86135 Augsburg"
+e260847323b48a79bd88dd95a1499cd3053d3645,Reconstructing perceived faces from brain activations with deep adversarial neural decoding,"PDF hosted at the Radboud Repository of the Radboud University
+Nijmegen
+The following full text is a publisher's version.
+For additional information about this publication click this link.
+http://hdl.handle.net/2066/179505
+Please be advised that this information was generated on 2018-07-04 and may be subject to
+hange."
+e27ef52c641c2b5100a1b34fd0b819e84a31b4df,SARC3D: A New 3D Body Model for People Tracking and Re-identification,"SARC3D: a new 3D body model for People
+Tracking and Re-identification
+Davide Baltieri, Roberto Vezzani, and Rita Cucchiara
+Dipartimento di Ingegneria dell’Informazione - University of Modena and Reggio
+Emilia, Via Vignolese, 905 - 41125 Modena - Italy"
+e23a75430f777e982b0715b6f8a048d4bbfea438,Maximum Margin Metric Learning over Discriminative Nullspace for Person Re-identification,"Maximum Margin Metric Learning Over Discriminative
+Nullspace for Person Re-identification
+T M Feroz Ali1 and Subhasis Chaudhuri1
+Indian Institute of Technology Bombay, Mumbai, India"
+e2baf990bc60ef0d24b7556d238e40566ad23d2f,Modified Gabor Filter based Vehicle Verification,"International Journal of Computer Applications® (IJCA) (0975 – 8887)
+National Conference cum Workshop on Bioinformatics and Computational Biology, NCWBCB- 2014
+Modified Gabor Filter based Vehicle Verification
+Amrutha Ramachandran
+Mtech,AE&C,
+Dept. of EC,
+NCERC,Kerala.
+towards
+ollision
+voidance
+ccess,potential"
+e21cdb56c23e2a834a611d51abce545d2e8d01a2,Gender and Identity Classification for a Naive and Evolving System,"Gender and Identity Classification for a Naive and Evolving System
+M. Castrill´on-Santana, O. D´eniz-Su´arez, J. Lorenzo-Navarro and M. Hern´andez-Tejera
+IUSIANI - Edif. Ctral. del Parque Cient´ıfico Tecnol´ogico
+Universidad de Las Palmas de Gran Canaria, Spain"
+e295f31df11ec700851c2413b9bba644a91b0629,3D face reconstruction in a binocular passive stereoscopic system using face properties,"D FACE RECONSTRUCTION IN A BINOCULAR PASSIVE STEREOSCOPIC SYSTEM
+USING FACE PROPERTIES
+Amel AISSAOUI, Jean MARTINET and Chaabane DJERABA
+LIFL UMR Lille1-CNRS n 8022, IRCICA, 50 avenue Halley, 59658 Villeneuve d’Ascq, France"
+e27acf161f569aa876e46ffae2058bb275f12a60,Interactive learning of heterogeneous visual concepts with local features,"Interactive Learning of Heterogeneous Visual Concepts
+with Local Features
+Wajih Ouertani
+INRIA − IMEDIA project
+nd INRA, France
+Michel Crucianu
+INRIA − IMEDIA project
+nd CEDRIC − CNAM, France
+Nozha Boujemaa
+INRIA − IMEDIA project
+78153 Le Chesnay, France"
+e2e8db754b1ab4cd8aa07f5c5940f6921a1b7187,Interpretable visual models for human perception-based object retrieval,"Interpretable Visual Models for Human
+Perception-Based Object Retrieval
+Ahmed Rebai, Alexis Joly, Nozha Boujemaa
+To cite this version:
+Ahmed Rebai, Alexis Joly, Nozha Boujemaa.
+Based Object Retrieval.
+trieval, Apr 2011, Trento,
+<10.1145/1991996.1992017>. <hal-00642232>
+Italy.
+Interpretable Visual Models for Human Perception-
+ICMR’11 - First ACM International Conference on Multimedia Re-
+ACM, pp.21:1–21:8, 2011, <http://www.icmr2011.org/>.
+HAL Id: hal-00642232
+https://hal.inria.fr/hal-00642232
+Submitted on 17 Nov 2011
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or"
+e2fc290a245d9f5c545e2e92ee8fcaff4908b97f,Picture-to-Identity linking of social network accounts based on Sensor Pattern Noise,"Picture-to-Identity linking of social network accounts based on
+Sensor Pattern Noise
+Riccardo Satta∗ and Pasquale Stirparo∗+
+Institute for the Protection and Security of the Citizen,
+Joint Research Centre (JRC), European Commission, Ispra (VA), Italy
++Royal Institute of Technology (KTH), Stockholm, Sweden
+{riccardo.satta,
+Keywords:
+linking, digital image forensics
+social network, Sensor Pattern Noise, identity,"
+e2d265f606cd25f1fd72e5ee8b8f4c5127b764df,Real-Time End-to-End Action Detection with Two-Stream Networks,"Real-Time End-to-End Action Detection
+with Two-Stream Networks
+Alaaeldin El-Nouby∗†, Graham W. Taylor∗†‡
+School of Engineering, University of Guelph
+Vector Institute for Artificial Intelligence
+Canadian Institute for Advanced Research"
+e282bf5a679ca4e8b7d9a2ed56d3b40dc440ab53,Referenceless Quality Estimation for Natural Language Generation,"Referenceless Quality Estimation for Natural Language Generation
+Ondˇrej Duˇsek 1 Jekaterina Novikova 1 Verena Rieser 1"
+e24294adfcdb0334c310823c591f15e8829dc224,Deep Neural Networks and Regression Models for Object Detection and Pose Estimation,
+e2279676b01e477b5e7333bab276678f4ad34753,Searching Image with Hash Code Generations,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+Volume: 02 Issue: 05 | Aug-2015 www.irjet.net p-ISSN: 2395-0072
+SEARCHING IMAGE WITH HASH CODE GENERATIONS
+R.Lawanya,*2Mrs.G.Sangeetha Lakshmi, 3Ms.A.Sivasankari
+,*2,3Department of Computer Science,DKM College for Women, Vellore,
+Tamil Nadu, India.
+----------------------------------------------------------------------------------------------------------------------"
+e2af85dc41269bc7c50fcf2fb35bfeb75e3d6ee4,xytocin Improves “ Mind-Reading ” in Humans,"PRIORITY COMMUNICATION
+Oxytocin Improves “Mind-Reading” in Humans
+Gregor Domes, Markus Heinrichs, Andre Michel, Christoph Berger, and Sabine C. Herpertz
+Background: The ability to “read the mind” of other individuals, that is, to infer their mental state by interpreting subtle social cues, is
+indispensable in human social interaction. The neuropeptide oxytocin plays a central role in social approach behavior in nonhuman
+mammals.
+Methods: In a double-blind, placebo-controlled, within-subject design, 30 healthy male volunteers were tested for their ability to infer
+the affective mental state of others using the Reading the Mind in the Eyes Test (RMET) after intranasal administration of 24 IU oxytocin.
+Results: Oxytocin improved performance on the RMET compared with placebo. This effect was pronounced for difficult compared with
+easy items.
+Conclusions: Our data suggest that oxytocin improves the ability to infer the mental state of others from social cues of the eye region.
+Oxytocin might play a role in the pathogenesis of autism spectrum disorder, which is characterized by severe social impairment.
+Key Words: Emotion, oxytocin, peptide, social cognition, theory of
+T he ability to infer the internal state of another person to
+dapt one’s own behavior is a cornerstone of all human
+social interactions. Humans have to infer internal states
+from external cues such as facial expressions in order to make
+sense of or predict another person’s behavior, an ability that is
+referred to as “mind-reading” (Siegal and Varley 2002; Stone et al
+998). In particular, individuals with autism have distinct diffi-"
+e2afea1a84a5bdbcb64d5ceadaa2249195e1fd82,DOOM Level Generation Using Generative Adversarial Networks,"DOOM Level Generation using Generative
+Adversarial Networks
+Edoardo Giacomello
+Dipartimento di Elettronica,
+Informazione e Bioinformatica
+Politecnico di Milano
+Pier Luca Lanzi
+Dipartimento di Elettronica,
+Informazione e Bioinformatica
+Politecnico di Milano
+Daniele Loiacono
+Dipartimento di Elettronica,
+Informazione e Bioinformatica
+Politecnico di Milano"
+e23ed8642a719ff1ab08799257d9566ed3bba403,Unsupervised Visual Attribute Transfer with Reconfigurable Generative Adversarial Networks,"Unsupervised Visual Attribute Transfer with
+Reconfigurable Generative Adversarial Networks
+Taeksoo Kim, Byoungjip Kim, Moonsu Cha, Jiwon Kim
+SK T-Brain"
+e21c45b14d75545d40ed07896f26ec6f766f6a4b,Fisher GAN,"Fisher GAN
+Youssef Mroueh∗, Tom Sercu∗
+Equal Contribution
+AI Foundations, IBM Research AI
+IBM T.J Watson Research Center"
+e22cf1ca10c11991c2a43007e37ca652d8f0d814,A Biologically Inspired Visual Working Memory,"Under review as a conference paper at ICLR 2019
+A BIOLOGICALLY INSPIRED VISUAL WORKING
+MEMORY FOR DEEP NETWORKS
+Anonymous authors
+Paper under double-blind review"
+e21b1c10bee6a984971dcba414c22078dcfd21c2,Recent progress in semantic image segmentation,"Artificial Intelligence Review
+https://doi.org/10.1007/s10462-018-9641-3
+Recent progress in semantic image segmentation
+Xiaolong Liu1 · Zhidong Deng1 · Yuhan Yang2
+© The Author(s) 2018"
+e2a9b3e9001d57483acbb63dc2cfb91a90d3c12d,"Image worth Evaluation for False Biometric Detection: Submission to Iris, Fingerprint and Face Recognition","Volume 5, Issue 2, February 2015 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+Image worth Evaluation for False Biometric Detection: Submission to
+Iris, Fingerprint and Face Recognition
+Boggarapu Srinivasulu, 2 Dr. M. Ekambaram Naidu, 3Dr. E. Sreenivasa Reddy
+Assistant Professor, Dept of CSE, Mother Theresa Institute of Engineering & Technology
+Palamaner, Chittoor Dist, AP, India
+Principal & Professor (CSE), TRR Engineering College, Hyderabad, India
+Dean& Professor (CSE), Acharya Nagarjuna University, Nagarjunanagar, Guntur, India"
+f496235629c02c98ad83b37d3d054ccfd0de0131,Learning Cross-Modal Deep Embeddings for Multi-Object Image Retrieval using Text and Sketch,"Learning Cross-Modal Deep Embeddings for
+Multi-Object Image Retrieval using Text and Sketch
+Sounak Dey, Anjan Dutta, Suman K. Ghosh, Ernest Valveny, Josep Llad´os
+Computer Vision Center, Computer Science Department
+Autonomous University of Barcelona
+Email: {sdey, adutta, sghosh, ernest,
+Barcelona, Spain
+Umapada Pal
+CVPR Unit
+Indian Statistical Institute
+Kolkata, India
+Email:"
+f412d9d7bc7534e7daafa43f8f5eab811e7e4148,Running Head : Anxiety and Emotional Faces in WS 2,"Durham Research Online
+Deposited in DRO:
+6 December 2014
+Version of attached le:
+Accepted Version
+Peer-review status of attached le:
+Peer-reviewed
+Citation for published item:
+Kirk, H. E. and Hocking, D. R. and Riby, D. M. and Cornish, K. M. (2013) 'Linking social behaviour and
+nxiety to attention to emotional faces in Williams syndrome.', Research in developmental disabilities., 34
+(12). pp. 4608-4616.
+Further information on publisher's website:
+http://dx.doi.org/10.1016/j.ridd.2013.09.042
+Publisher's copyright statement:
+NOTICE: this is the author's version of a work that was accepted for publication in Research in Developmental
+Disabilities. Changes resulting from the publishing process, such as peer review, editing, corrections, structural
+formatting, and other quality control mechanisms may not be reected in this document. Changes may have been made
+to this work since it was submitted for publication. A denitive version was subsequently published in Research in
+Developmental Disabilities, 34, 12, December 2013, 10.1016/j.ridd.2013.09.042.
+Additional information:"
+f442a2f2749f921849e22f37e0480ac04a3c3fec,Critical Features for Face Recognition in Humans and Machines,"Critical Features for Face Recognition in Humans and Machines Naphtali Abudarham1, Lior Shkiller1, Galit Yovel1,2 1School of Psychological Sciences, 2Sagol School of Neuroscience Tel Aviv University, Tel Aviv, Israel Correspondence regarding this manuscript should be addressed to: Galit Yovel School of Psychological Sciences & Sagol School of Neuroscience Tel Aviv University Tel Aviv, 69978, Israel Email:"
+f4b40b3dc27897fdc40f419a42d64fd1ff80cc9d,A Dual-Source Approach for 3D Human Pose Estimation from a Single Image,"SUBMITTED TO COMPUTER VISION AND IMAGE UNDERSTANDING.
+A Dual-Source Approach for 3D Human Pose
+Estimation from a Single Image
+Umar Iqbal*, Andreas Doering*, Hashim Yasin, Björn Krüger, Andreas Weber, and Juergen Gall"
+f44af3b10a67fe62fd26eb82dd228a3cdeb980e1,"Understand, Compose and Respond - Answering Visual Questions by a Composition of Abstract Procedures","Understand, Compose and Respond
+Understand, Compose and Respond - Answering Visual"
+f4f6fc473effb063b7a29aa221c65f64a791d7f4,Facial expression recognition in the wild based on multimodal texture features,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 4/20/2018 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+FacialexpressionrecognitioninthewildbasedonmultimodaltexturefeaturesBoSunLiandongLiGuoyanZhouJunHeBoSun,LiandongLi,GuoyanZhou,JunHe,“Facialexpressionrecognitioninthewildbasedonmultimodaltexturefeatures,”J.Electron.Imaging25(6),061407(2016),doi:10.1117/1.JEI.25.6.061407."
+f4ce7c36586c27783a1b0e737c2834f39f9d029d,Advanced non linear dimensionality reduction methods for multidimensional time series : applications to human motion analysis,"Advanced Nonlinear
+Dimensionality Reduction
+Methods for Multidimensional
+Time Series: Application to
+Human Motion Analysis
+Michał Lewandowski
+Submitted in partial fulfilment of the requirements of
+Kingston University for the degree of
+Doctor of Philosophy
+June, 2011"
+f4373f5631329f77d85182ec2df6730cbd4686a9,Recognizing Gender from Human Facial Regions using Genetic Algorithm,"Soft Computing manuscript No.
+(will be inserted by the editor)
+Recognizing Gender from Human Facial Regions using
+Genetic Algorithm
+Avirup Bhattacharyya · Rajkumar Saini ·
+Partha Pratim Roy · Debi Prosad Dogra ·
+Samarjit Kar
+Received: date / Accepted: date"
+f423e2072441925a16d95e7092005abf602b7145,Survey on 2D and 3D Human Pose Recovery,"Survey on 2D and 3D Human Pose
+Recovery
+Xavier Perez-Sala, Email: a;c,
+Sergio Escalera, Email: b;c and
+Cecilio Angulo, Email: a
+CETpD-UPC Technical Research Center for Dependency Care and
+Autonomous Living, Universitat Polit(cid:18)ecnica de Catalunya, Ne(cid:18)apolis, Rambla de
+l’Exposici(cid:19)o, 59-69, 08800 Vilanova i la Geltru, Spain
+Dept. Mathematics, Universitat de Barcelona, Gran Via de les Corts Catalanes
+Computer Vision Center, Campus UAB, Edi(cid:12)ci 0, 08193, Bellaterra, Spain
+585, 08007, Barcelona, Spain"
+f43327075c17e71ee713ad727aa473230a432a90,Geometry meets semantics for semi-supervised monocular depth estimation,"Geometry meets semantics for semi-supervised
+monocular depth estimation
+Pierluigi Zama Ramirez, Matteo Poggi, Fabio Tosi,
+Stefano Mattoccia, and Luigi Di Stefano
+University of Bologna,
+Viale del Risorgimento 2, Bologna, Italy"
+f439f9a0bd535eab00cbb93c1fa7083615a08d1a,Procedural Modeling and Physically Based Rendering for Synthetic Data Generation in Automotive Applications,"Procedural Modeling and Physically Based Rendering for Synthetic Data
+Generation in Automotive Applications
+Apostolia Tsirikoglou1,∗ Joel Kronander1 Magnus Wrenninge2,† Jonas Unger1,‡
+Link¨oping University, Sweden
+7D Labs
+Figure 1: Example images produced using our method for synthetic data generation."
+f47404424270f6a20ba1ba8c2211adfba032f405,Identification of Face Age range Group using Neural Network,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+Identification of Face Age range Group using Neural
+Network
+Sneha Thakur1, Ligendra Verma2
+1M.Tech scholar, CSE, RITEE Raipur
+2 Reader, MCA dept, RITEE Raipur"
+f4b729d218139f1e93cc9d4df05fbf699d2e9d07,Introduction to the Special Issue on Recent Advances in Biometric Systems [Guest Editorial],"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 37, NO. 5, OCTOBER 2007
+Guest Editorial
+Introduction to the Special Issue on Recent
+Advances in Biometric Systems
+W E ARE pleased to present 14 papers in this special
+issue devoted to recent advances in biometric systems.
+A total of 78 papers were submitted for consideration for the
+special issue. Those that appear in this special issue result from
+careful review process and consideration of timing for the
+special issue. Other papers, which were originally submitted for
+onsideration for the special issue, may be undergoing major
+revisions and resubmission and appear at a later time in a
+regular issue of this journal or possibly in some other journal.
+In particular, several submissions in the area of iris biometrics
+ould not be considered for this special issue due to their
+experimental results being based primarily on the CASIA 1
+iris image dataset [1].
+Papers on a broad variety of topics were submitted to the
+special issue. The large active areas of biometrics such as face,
+fingerprint, voice, signature, and iris were naturally well repre-"
+f43b60a33c585827bfa354d3d49fb148a1c26c3f,Identifying Well-formed Natural Language Questions,"Identifying Well-formed Natural Language Questions
+Manaal Faruqui Dipanjan Das
+Google AI Language"
+f4ebbeb77249d1136c355f5bae30f02961b9a359,Human Computation for Attribute and Attribute Value Acquisition,"Human Computation for Attribute and Attribute Value Acquisition
+Edith Law, Burr Settles, Aaron Snook, Harshit Surana, Luis von Ahn, Tom Mitchell
+School of Computer Science
+Carnegie Melon University"
+f445493badf53febbaeab340a4fca98d9e4ab7f7,Do CIFAR-10 Classifiers Generalize to CIFAR-10?,"Do CIFAR-10 Classifiers Generalize to CIFAR-10?
+Benjamin Recht
+UC Berkeley
+Rebecca Roelofs
+UC Berkeley
+Ludwig Schmidt
+Vaishaal Shankar
+UC Berkeley
+June 4, 2018"
+f4808e78bc648f9e1829c83a68a3e8ed4e7cf325,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+f42dca4a4426e5873a981712102aa961be34539a,Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost Optical-Flow Estimation in the Wild,"Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost
+Optical-Flow Estimation in the Wild
+Nima Sedaghat
+University of Freiburg
+Germany"
+f49f1028052baa1588376a78a9dc64812748555e,Feature Fusion using Extended Jaccard Graph and Stochastic Gradient Descent for Robot,"JOURNAL OF LATEX CLASS FILES
+Feature Fusion using Extended Jaccard Graph and
+Stochastic Gradient Descent for Robot
+Shenglan Liu, Muxin Sun, Wei Wang, Feilong Wang"
+f31c9328b5b4678388c19a39064a8056313f7cf4,Two-Stream Multi-Rate Recurrent Neural Network for Video-Based Pedestrian Re-Identification,"IEEE TRANSACTIONS ON INDUSTRIAL INFORMATICS, VOL. XX, NO. XX, AUGUST 201X
+Two-Stream Multi-Rate Recurrent Neural
+Network for Video-Based Pedestrian
+Re-Identification
+Zhiqiang Zeng, Zhihui Li*, De Cheng, Huaxiang Zhang, Kun Zhan and Yi Yang"
+f3a34525fa7021322f132c80c9517f240cf1e742,Pose and Pathosformel in Aby Warburg's Bilderatlas,"Pose and Pathosformel in Aby Warburg’s
+Bilderatlas
+Leonardo Impett, Sabine S¨usstrunk
+School of Computer and Communication Sciences,
+´Ecole F´ed´erale Polytechnique de Lausanne, Switzerland"
+f34c85c24661ba9990146737fd557f7508677263,A New Pedestrian Detection Descriptor Based on the Use of Spatial Recurrences,"A New Pedestrian Detection Descriptor
+Based on the Use of Spatial Recurrences
+Carlos Serra-Toro and V. Javier Traver
+Departamento de Lenguajes y Sistemas Inform´aticos &
+Institute of New Imaging Technologies,
+Universitat Jaume I, 12071 Castell´on, Spain"
+f375bc91a5f7b1f2d36e41841ccc22f202be2dcf,Unsupervised Learning of Depth and Ego-Motion from Video,"Unsupervised Learning of Depth and Ego-Motion from Video
+Tinghui Zhou∗
+UC Berkeley
+Matthew Brown
+Google
+Noah Snavely
+Google
+David G. Lowe
+Google"
+f3b3d2c0d1d84a7f7bbaaaecb58457c15a947544,Understanding Grounded Language Learning Agents,"UNDERSTANDING GROUNDED LANGUAGE LEARNING
+AGENTS
+Felix Hill, Karl Moritz Hermann, Phil Blunsom & Stephen Clark
+Deepmind
+London
+{felixhill, kmh, pblunsom,"
+f36647e63a11486ef9cf7a5a1c86a40fda5d408a,CS 229 Final Report: Artistic Style Transfer for Face Portraits,"CS 229 Final Report: Artistic Style Transfer for Face Portraits
+Daniel Hsu, Marcus Pan, Chen Zhu
+{dwhsu, mpanj,
+Dec 16, 2016
+Introduction
+The goal of our project is to learn the content and style
+representations of face portraits, and then to combine
+them to produce new pictures. The content features of
+face are the features that identify a face, such as the
+outline shape. The stylistic features are the artistic char-
+cteristics of a certain portrait or painting, such as brush
+strokes, or background color. We forward-pass a content
+image, and several style images through a CNN to ex-
+tract the desired content and style features. Then we
+initialize a white noise image, and perform gradient de-
+scent on its pixels until it matches the desired style and
+ontent features.
+vNet. We hope our project can be a supplement to ex-
+isting implementations.
+Gradient Descent Loss Functions"
+f36c3ddd43ea7c2e803694aad89e5fd903715c81,"Biometric quality: a review of fingerprint, iris, and face","Bharadwaj et al. EURASIP Journal on Image and Video Processing 2014, 2014:34
+http://jivp.eurasipjournals.com/content/2014/1/34
+REVIEW
+Open Access
+Biometric quality: a review of fingerprint, iris,
+nd face
+Samarth Bharadwaj, Mayank Vatsa* and Richa Singh"
+f3d9e347eadcf0d21cb0e92710bc906b22f2b3e7,"NosePose: a competitive, landmark-free methodology for head pose estimation in the wild","NosePose: a competitive, landmark-free
+methodology for head pose estimation in the wild
+Fl´avio H. B. Zavan, Antonio C. P. Nascimento, Olga R. P. Bellon and Luciano Silva
+IMAGO Research Group - Universidade Federal do Paran´a"
+f34a6c1bc9a7872c8dc4c35b678f87bb966ab0ab,"PHOG-Derived Aesthetic Measures Applied to Color Photographs of Artworks, Natural Scenes and Objects","PHOG-Derived Aesthetic Measures Applied
+to Color Photographs of Artworks,
+Natural Scenes and Objects
+Christoph Redies2, Seyed Ali Amirshahi1,2,
+Michael Koch1,2, and Joachim Denzler1
+Computer Vision Group, Friedrich Schiller University Jena, Germany
+http://www.inf-cv.uni-jena.de
+Institute of Anatomy I, Friedrich Schiller University,
+Jena University Hospital, Germany
+http://www.anatomie1.uniklinikum-jena.de"
+f33c427dc152c20537d2857bee1dda2287e85860,Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks,
+f39b88ac61264e9a33dcdf47722f0d048a8e490f,Interactive Data Integration and Entity Resolution for Exploratory Visual Data Analytics,"(cid:13)Copyright 2015
+Kristi Morton"
+f3ea181507db292b762aa798da30bc307be95344,Covariance Pooling For Facial Expression Recognition,"Covariance Pooling for Facial Expression Recognition
+Computer Vision Lab, ETH Zurich, Switzerland
+VISICS, KU Leuven, Belgium
+Dinesh Acharya†, Zhiwu Huang†, Danda Pani Paudel†, Luc Van Gool†‡
+{acharyad, zhiwu.huang, paudel,"
+f3062992cb10107b9d1e3699c8a61d5281886c4b,Foreground Consistent Human Pose Estimation Using Branch and Bound,"Foreground Consistent Human Pose Estimation
+Using Branch and Bound(cid:2)
+Jens Puwein1, Luca Ballan1, Remo Ziegler2, and Marc Pollefeys1
+Department of Computer Science, ETH Zurich, Switzerland
+Vizrt"
+f3b56b873c48929361c1cada7b18177e3f4d2727,"Development of a N-type GM-PHD Filter for Multiple Target, Multiple Type Visual Tracking","Development of a N-type GM-PHD Filter for
+Multiple Target, Multiple Type Visual Tracking
+Nathanael L. Baisa , Student Member, IEEE, and Andrew Wallace, Fellow, IET
+faced challenges not only in the uncertainty caused by data
+ssociation but also in algorithmic complexity that increases
+exponentially with the number of targets and measurements.
+For instance, the MHT has an exponential complexity with
+time and cubic with the number of targets.
+To address the problems of increasing complexity, a unified
+framework which directly extends single to multiple target
+tracking by representing multi-target states and observations
+s random finite sets (RFS) was developed by Mahler [7].
+This estimates the states and cardinality of an unknown and
+time varying number of targets in the scene, and allows for
+target birth, death, handling clutter (false alarms), and missing
+detections. Mahler [7] proposed to propagate the first-order
+moment of the multi-target posterior, called the Probability
+Hypothesis Density (PHD), rather than the full multi-target
+posterior."
+f3dc67bb4cd3601ae9bdb7df4ed5036f525ff21d,Multimodal 2 DCNN action recognition from RGB-D Data with Video Summarization,"Master’s Thesis
+Multimodal 2DCNN action recognition from
+RGB-D Data with Video Summarization
+Vicent Roig Ripoll
+Master
+Artificial Intelligence
+Advisor: Sergio Escalera Guerrero
+Co-advisor: Maryam Asadi-Aghbolaghi
+October, 2017"
+f3ca251ac3b05397ea6d72f2a9a6f0cf619a2a32,Leveraging Weakly Annotated Data for Fashion Image Retrieval and Label Prediction,"Leveraging Weakly Annotated Data for Fashion Image Retrieval and Label
+Prediction
+Charles Corbi`ere1, Hedi Ben-Younes1,2, Alexandre Ram´e1, and Charles Ollion1
+Heuritech, Paris, France
+UPMC-LIP6, Paris, France"
+f3cf10c84c4665a0b28734f5233d423a65ef1f23,Title Temporal Exemplar-based Bayesian Networks for facialexpression recognition,"Title
+Temporal Exemplar-based Bayesian Networks for facial
+expression recognition
+Author(s)
+Shang, L; Chan, KP
+Citation
+Proceedings - 7Th International Conference On Machine
+Learning And Applications, Icmla 2008, 2008, p. 16-22
+Issued Date
+http://hdl.handle.net/10722/61208
+Rights
+This work is licensed under a Creative Commons Attribution-
+NonCommercial-NoDerivatives 4.0 International License.;
+International Conference on Machine Learning and Applications
+Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+this material is permitted. However, permission to
+reprint/republish this material for advertising or promotional
+purposes or for creating new collective works for resale or
+redistribution to servers or lists, or to reuse any copyrighted
+omponent of this work in other works must be obtained from"
+f32db58cbb8319eb8f2cfa2720c810f8410eb569,A software suite for large-scale video- and image-based analytics,"The 8th International Conference on Bioinspired Information and Communications Technologies (BICT2014), pp. 384-385, Boston, December 1-3, 2014
+A software suite for large-scale video- and image-based
+nalytics
+Jasmin Léveillé
+Isao Hayashi
+Kansai University"
+f3f65a8113d6a2dcbc690fd47dfee2dff0f41097,Generating 3D Faces Using Convolutional Mesh Autoencoders,"Generating 3D faces using Convolutional Mesh
+Autoencoders
+Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J. Black
+Max Planck Institute for Intelligent Systems
+{aranjan, tbolkart, ssanyal,
+T¨ubingen, Germany"
+f3b7938de5f178e25a3cf477107c76286c0ad691,Object Detection with Deep Learning: A Review,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, MARCH 2017
+Object Detection with Deep Learning: A Review
+Zhong-Qiu Zhao, Member, IEEE, Peng Zheng,
+Shou-tao Xu, and Xindong Wu, Fellow, IEEE"
+ebd36259defde84deb0d4c09695b54befe538ac8,Robust Generalized Low Rank Approximations of Matrices,"RESEARCH ARTICLE
+Robust Generalized Low Rank
+Approximations of Matrices
+Jiarong Shi*, Wei Yang, Xiuyun Zheng
+School of Science, Xi'an University of Architecture and Technology, Xi'an, China"
+eb526174fa071345ff7b1fad1fad240cd943a6d7,Deeply vulnerable: a study of the robustness of face recognition to presentation attacks,"Deeply Vulnerable – A Study of the Robustness of Face Recognition to
+Presentation Attacks
+Amir Mohammadi, Sushil Bhattacharjee, and S´ebastien Marcel ∗†"
+eb6243b1c9506f9450dab2a09db9c17fc2c2d364,3D Face Recognition system Based on Texture Gabor Features using PCA and Support Vector Machine as a Classifier,"ISSN(Online): 2319-8753
+ISSN (Print): 2347-6710
+International Journal of Innovative Research in Science,
+Engineering and Technology
+(An ISO 3297: 2007 Certified Organization)
+Vol. 5, Issue 8, August 2016
+D Face Recognition system Based on Texture
+Gabor Features using PCA and Support
+Vector Machine as a Classifier
+Rajesh Yadav 1, Dr. Chandra kumarJha 2
+Assistant Professor, Department of Computer Science, Gurgaon Institute of Technology &Management, Gurgaon,
+Haryana, India1
+Associate Professor, Department of Computer Science &Engineering, AIM & ACT, Banasthali University, Jaipur,
+Rajasthan, India2"
+eb566490cd1aa9338831de8161c6659984e923fd,From Lifestyle Vlogs to Everyday Interactions,"From Lifestyle Vlogs to Everyday Interactions
+David F. Fouhey, Wei-cheng Kuo, Alexei A. Efros, Jitendra Malik
+EECS Department, UC Berkeley"
+eba31ad9871c6dd5c2e7c62a121bbb417dcb1223,Adaptive Ensemble Selection for Face Re-identification under Class Imbalance,"Adaptive Ensemble Selection for Face
+Re-Identification Under Class Imbalance(cid:63)
+Paulo Radtke1, Eric Granger1, Robert Sabourin1 and Dmitry Gorodnichy2
+. Laboratoire d’imagerie, de vision et d’intelligence artificielle
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+{eric.granger,
+. Science and Engineering Directorate, Canada Border Services Agency
+Ottawa, Canada,"
+eb9312458f84a366e98bd0a2265747aaed40b1a6,Facial Expression Sequence Synthesis Based on Shape and Texture Fusion Model,"-4244-1437-7/07/$20.00 ©2007 IEEE
+IV - 473
+ICIP 2007"
+eb716dd3dbd0f04e6d89f1703b9975cad62ffb09, Visual Object Category Discovery in Images and Videos,"Copyright
+Yong Jae Lee"
+ebc2643567b1c614727cd7ecf1d0604972572568,Robust Subspace Estimation Using Low-rank,"ROBUST SUBSPACE ESTIMATION USING LOW-RANK OPTIMIZATION.
+THEORY AND APPLICATIONS IN SCENE RECONSTRUCTION, VIDEO
+DENOISING, AND ACTIVITY RECOGNITION.
+OMAR OREIFEJ
+B.S. University of Jordan, 2006
+M.S. University of Central Florida, 2009
+A dissertation submitted in partial fulfillment of the requirements
+for the degree of Doctor of Philosophy
+in the Department of Electrical Engineering and Computer Science
+in the College of Engineering and Computer Science
+t the University of Central Florida
+Orlando, Florida
+Spring Term
+Major Professor: Mubarak Shah"
+eb4d2ec77fae67141f6cf74b3ed773997c2c0cf6,A new soft biometric approach for keystroke dynamics based on gender recognition,"Int. J. Information Technology and Management, Vol. 11, Nos. 1/2, 2012
+A new soft biometric approach for keystroke
+dynamics based on gender recognition
+Romain Giot* and Christophe Rosenberger
+GREYC Research Lab,
+ENSICAEN – Université de Caen Basse Normandie – CNRS,
+4000 Caen, France
+Fax: +33-231538110
+E-mail:
+E-mail:
+*Corresponding author"
+eb4edbec8cb122de07951e3cf54c33fc30dd1c19,Examining the Effects of Supervision for Transfer from Synthetic to Real Driving Domains,"Examining the Effects of Supervision for Transfer from Synthetic to Real
+Driving Domains
+Vashisht Madhavan"
+ebb7cc67df6d90f1c88817b20e7a3baad5dc29b9,Fast algorithms for Higher-order Singular Value Decomposition from incomplete data,"Journal of Computational Mathematics
+Vol.xx, No.x, 200x, 1–25.
+http://www.global-sci.org/jcm
+doi:??
+Fast algorithms for Higher-order Singular Value Decomposition
+from incomplete data*
+Department of Mathematics, University of Alabama, Tuscaloosa, AL
+Yangyang Xu
+Email:"
+ebabf19e66ef1253fda8d39a0569787c65e60a9e,Multi-person Tracking with Sparse Detection and Continuous Segmentation,"Multi-Person Tracking with Sparse Detection and
+Continuous Segmentation
+Dennis Mitzel1, Esther Horbert1, Andreas Ess2, Bastian Leibe1
+UMIC Research Centre RWTH Aachen University, Germany
+Computer Vision Laboratory, ETH Zurich, Switzerland"
+ebabd1f7bc0274fec88a3dabaf115d3e226f198f,Driver Drowsiness Detection System Based on Feature Representation Learning Using Various Deep Networks,"Driver drowsiness detection system based on feature
+representation learning using various deep networks
+Sanghyuk Park, Fei Pan, Sunghun Kang and Chang D. Yoo
+School of Electrical Engineering, KAIST,
+Guseong-dong, Yuseong-gu, Dajeon, Rep. of Korea
+{shine0624, feipan, sunghun.kang, cd"
+eb48a58b873295d719827e746d51b110f5716d6c,Face Alignment Using K-Cluster Regression Forests With Weighted Splitting,"Face Alignment Using K-cluster Regression Forests
+With Weighted Splitting
+Marek Kowalski and Jacek Naruniec"
+ebd5df2b4105ba04cef4ca334fcb9bfd6ea0430c,Fast Localization of Facial Landmark Points,"Fast Localization of Facial Landmark Points
+Nenad Markuˇs*, Miroslav Frljak*, Igor S. Pandˇzi´c*, J¨orgen Ahlberg†, and Robert Forchheimer†
+* University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia
+Link¨oping University, Department of Electrical Engineering, SE-581 83 Link¨oping, Sweden
+March 28, 2014"
+eb33adf3f8eb5c07b58a1433734ab1fee5d77c93,"Singleton, C. J., Ashwin, C. and Brosnan, M. (2014) Physiological Responses to Social and Nonsocial Stimuli in Neurotypical Adults With High and Low Levels of Autistic Traits:Implications for Understanding Nonsocial Drive in Autism Spectrum","Singleton, C. J., Ashwin, C. and Brosnan, M. (2014) Physiological
+Responses to Social and Nonsocial Stimuli in Neurotypical
+Adults With High and Low Levels of Autistic Traits:Implications
+for Understanding Nonsocial Drive in Autism Spectrum
+Disorders. Autism Research, 7 (6). pp. 695-703. ISSN 1939-3792
+Link to official URL (if available): http://dx.doi.org/10.1002/aur.1422
+Opus: University of Bath Online Publication Store
+http://opus.bath.ac.uk/
+This version is made available in accordance with publisher policies.
+Please cite only the published version using the reference above.
+See http://opus.bath.ac.uk/ for usage policies.
+Please scroll down to view the document."
+eb0e0a40372db32d30ceaefad046b213fac977f4,Scene Understanding Using Back Propagation by Neural Network,"Scene Understanding Using Back Propagation by Neural Network
+SCENE UNDERSTANDING USING BACK PROPAGATION BY
+NEURAL NETWORK
+ARTI TIWARI1 & JAGVIR VERMA2
+,2Department of Elex & Telecomm. Engg.Chouksey Engg. College,Bilaspur
+intelligent human-computer"
+eb0e5db282f88d47b65f98df70c2e7c78b8647a6,Image Provenance Analysis at Scale,"Image Provenance Analysis at Scale
+Daniel Moreira, Aparna Bharati, Student Member, IEEE, Joel Brogan, Student Member, IEEE,
+Allan Pinto, Student Member, IEEE, Michael Parowski, Kevin W. Bowyer, Fellow, IEEE,
+Patrick J. Flynn, Fellow, IEEE, Anderson Rocha, Senior Member, IEEE,
+nd Walter J. Scheirer, Senior Member, IEEE"
+eb044760b6502431da6b6f3d5ad11aaab851a1ff,Video Storytelling,"A SUBMISSION TO IEEE TRANSACTIONS ON MULTIMEDIA
+Video Storytelling
+Junnan Li, Yongkang Wong, Member, IEEE, Qi Zhao, Member, IEEE, Mohan S. Kankanhalli, Fellow, IEEE"
+ebf204e0a3e137b6c24e271b0d55fa49a6c52b41,Visual Tracking Using Deep Motion Features,"Master of Science Thesis in Electrical Engineering
+Department of Electrical Engineering, Linköping University, 2016
+Visual Tracking Using
+Deep Motion Features
+Susanna Gladh"
+c7774fd600630684cc1d6be8313e2935bb198880,Adapting Hausdorff Metrics to Face Detection Systems: A Scale-Normalized Hausdorff Distance Approach,"Adapting Hausdorff metrics to face detection
+systems: a scale-normalized Hausdorff distance
+pproach
+Pablo Suau
+Departamento de Ciencia de la Computaci´on e Inteligencia Artificial
+Universidad de Alicante, Ap. de correos 99, 03080, Alicante (Spain)"
+c74a42afeae520ff6ab280d17bccf0d082ba8de5,The Concept of Comprehensive Data Analysis from Ultra-Wideband Subsystem for Smart City Positioning Purposes,"Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 25 October 2018 doi:10.20944/preprints201810.0609.v1
+Article
+The Concept of Comprehensive Data Analysis from
+Ultra-Wideband Subsystem for Smart City
+Positioning Purposes
+Damian Grzechca *, Krzysztof Hanzel and Krzysztof Paszek
+Faculty of Automatic Control, Electronics and Computer Science,
+Silesian University of Technology Gliwice, Poland;
+* Correspondence: Tel.: +48-32-237-2717"
+c7fff0d0a6312965b269c6180b2112babd40564c,Unsupervised Person Re-identification: Clustering and Fine-tuning,"Unsupervised Person Re-identification:
+Clustering and Fine-tuning
+Hehe Fan, Liang Zheng and Yi Yang"
+c726ea46544968335f1e51be633f15d0cc0f0311,Generalized feature learning and indexing for object localization and recognition,"Generalized Feature Learning and Indexing for Object Localization and
+Recognition
+Ning Zhou∗
+UNC, Charlotte
+Anelia Angelova∗
+Google Inc
+Jianping Fan
+UNC, Charlotte"
+c7ea9611446817f7b668882061ab11c7e998296c,Towards a Crowd Analytic Framework For Crowd Management in Majid-al-Haram,"Towards a Crowd Analytic Framework For Crowd
+Management in Majid-al-Haram
+Sultan Daud Khan1,*, Muhammad Tayyab1, Muhammad Khurram Amin1, Akram Nour1,
+Anas Basalamah1, Saleh Basalamah1, and Sohaib Ahmad Khan1,2,*
+Technology Innovation Center, Wadi Makkah, Makkah Al Mukarramah, Saudi Arabia
+Science and Technology Unit, Umm Al Qura University, Makkah Al Mukarramah, Saudi Arabia"
+c7e4c7be0d37013de07b6d829a3bf73e1b95ad4e,Dynemo: a Video Database of Natural Facial Expressions of Emotions,"The International Journal of Multimedia & Its Applications (IJMA) Vol.5, No.5, October 2013
+DYNEMO: A VIDEO DATABASE OF NATURAL FACIAL
+EXPRESSIONS OF EMOTIONS
+Anna Tcherkassof1, Damien Dupré1, Brigitte Meillon2, Nadine Mandran2,
+Michel Dubois1 and Jean-Michel Adam2
+LIP, Univ. Grenoble Alpes, BP 47 - 38040 Grenoble Cedex 9, France
+LIG, Univ. Grenoble Alpes, BP 53 - 38041 Grenoble Cedex 9, France"
+c757f6ee46208c1c26572265803068f8d837c384,Thermal imaging systems for real-time applications in smart cities,"Aalborg Universitet
+Thermal Imaging Systems for Real-Time Applications in Smart Cities
+Gade, Rikke; Moeslund, Thomas B.; Nielsen, Søren Zebitz; Skov-Petersen, Hans; Andersen,
+Hans Jørgen; Basselbjerg, Kent; Dam, Hans Thorhauge; Jensen, Ole B.; Jørgensen, Anders;
+Lahrmann, Harry Spaabæk; Madsen, Tanja Kidholm Osmann; Skouboe, Esben Bala; Povey,
+Bo Ø.
+Published in:
+International Journal of Computer Applications in Technology
+DOI (link to publication from Publisher):
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Gade, R., Moeslund, T. B., Nielsen, S. Z., Skov-Petersen, H., Andersen, H. J., Basselbjerg, K., ... Povey, B. Ø.
+(2016). Thermal Imaging Systems for Real-Time Applications in Smart Cities. International Journal of Computer
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research."
+c76d143b3fa0d25e21580c583d39ab07fc937e71,Institutionen för systemteknik Department of Electrical Engineering Examensarbete 3 D Position Estimation of a Person of Interest in Multiple Video Sequences : People Detection,"Institutionen för systemteknik
+Department of Electrical Engineering
+Examensarbete
+D Position Estimation of a Person of Interest in
+Multiple Video Sequences: People Detection
+Examensarbete utfört i Datorseende
+vid Tekniska högskolan vid Linköpings universitet
+Johannes Markström
+LiTH-ISY-EX--13/4721--SE
+Linköping 2013
+Department of Electrical Engineering
+Linköpings universitet
+SE-581 83 Linköping, Sweden
+Linköpings tekniska högskola
+Linköpings universitet
+581 83 Linköping"
+c7eb127e9cd67d645b9a7f59c03bc73183faefeb,Human Detection in Indoor Environments Using Multiple Visual Cues and a Mobile Robot,"Human Detection in Indoor Environments Using
+Multiple Visual Cues and a Mobile Robot
+Stefan Pszcz´o(cid:2)lkowski and Alvaro Soto
+Pontificia Universidad Catolica de Chile
+Santiago 22, Chile"
+c70ad19c90491e2de8de686b6a49f9bbe44692c0,Seeing with Humans: Gaze-Assisted Neural Image Captioning,"Seeing with Humans: Gaze-Assisted
+Neural Image Captioning
+Yusuke Sugano and Andreas Bulling"
+c7c405b6fc95ff2ccf2cb5b59942db4343558fc4,Pseudo 2D Hidden Markov Model Based Face Recognition System Using Singular Values Decomposition Coefficients,"Pseudo 2D Hidden Markov Model Based Face Recognition System Using Singular
+Values Decomposition Coefficients
+Mukundhan Srinivasan
+Department of Electronics & Communication Engineering
+Alpha College of Engineering
+Chennai, TN India
+Sabarigirish Vijayakumar
+Retail Domain
+Tata Consultancy Services (TCS)
+Chennai, TN India"
+c7de0c85432ad17a284b5b97c4f36c23f506d9d1,RANSAC-Based Training Data Selection for Speaker State Recognition,"INTERSPEECH 2011
+RANSAC-based Training Data Selection for Speaker State Recognition
+Elif Bozkurt1, Engin Erzin1, C¸ i˘gdem Ero˘glu Erdem2, A.Tanju Erdem3
+Multimedia, Vision and Graphics Laboratory, Koc¸ University, Istanbul, Turkey
+Department of Electrical and Electronics Engineering, Bahc¸es¸ehir University, Istanbul, Turkey
+Department of Electrical and Computer Engineering, ¨Ozye˘gin University, Istanbul, Turkey
+ebozkurt,"
+c7f63fc2ff20513c6dc233ec3419417b43b39209,Human Detection from Aerial Imagery for Automatic Counting of Shellfish Gatherers,"Human Detection from Aerial Imagery for Automatic Counting of
+Shellfish gatherers
+Mathieu Laroze, Luc Courtrai and Sébastien Lefèvre
+Univ. Bretagne-Sud, UMR 6074 IRISA
+{mathieu.laroze, luc.courtrai,
+F-56000, Vannes, France
+Keywords:
+Human Detection, Image Stitching, Aerial Imagery, Image Mosaicing, Patch Classification, Object Detection"
+c7f752eea91bf5495a4f6e6a67f14800ec246d08,Exploring the Transfer Learning Aspect of Deep Neural Networks in Facial Information Processing,"EXPLORING THE TRANSFER
+LEARNING ASPECT OF DEEP
+NEURAL NETWORKS IN FACIAL
+INFORMATION PROCESSING
+A DISSERTATION SUBMITTED TO THE UNIVERSITY OF MANCHESTER
+FOR THE DEGREE OF MASTER OF SCIENCE
+IN THE FACULTY OF ENGINEERING AND PHYSICAL SCIENCES
+Crefeda Faviola Rodrigues
+School of Computer Science"
+c7391b43bd0216daf697fb77906b76c71f5c50e2,Where Should You Attend While Driving?,"Where Should You Attend While Driving?
+Simone Calderara
+Stefano Alletto
+Andrea Palazzi∗
+Francesco Solera∗
+Rita Cucchiara
+University of Modena and Reggio Emilia"
+c7d7cf88d2e9f3194aec2121eb19dbfed170dba8,Unconstrained Gaze Estimation Using Random Forest Regression Voting,"Unconstrained Gaze Estimation Using Random Forest
+Regression Voting
+Amine Kacete, Renaud Séguier, Michel Collobert, Jérôme Royan
+To cite this version:
+Amine Kacete, Renaud Séguier, Michel Collobert, Jérôme Royan. Unconstrained Gaze Estimation
+Using Random Forest Regression Voting. Springer. ACCV 13th Asian Conference on Computer
+Vision, Nov 2016, Taipei, Taiwan. <http://www.accv2016.org/>. <hal-01393591>
+HAL Id: hal-01393591
+https://hal.archives-ouvertes.fr/hal-01393591
+Submitted on 7 Nov 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+c758b9c82b603904ba8806e6193c5fefa57e9613,Heterogeneous Face Recognition with CNNs,"Heterogeneous Face Recognition with CNNs
+Shreyas Saxena
+Jakob Verbeek
+INRIA Grenoble, Laboratoire Jean Kuntzmann"
+c7ecb2ca791fe23c182a06e7700c4e41f5ffa79d,A Review of Sentiment Analysis in Spanish Una Revisión Sobre el Análisis de Sentimientos en Español,"DOI: http://dx.doi.org/10.18180/tecciencia.2017.22.5
+A Review of Sentiment Analysis in Spanish
+Una Revisión Sobre el Análisis de Sentimientos en Español
+Carlos Henríquez Miranda1*, Jaime Guzmán2
+Universidad Autónoma, Barranquilla, Colombia
+Universitario Nacional de Colombia, Bogotá, Colombia
+Received: 11 Dec 2015
+Accepted: 6 Sep 2016
+Available Online: 7 Dec 2016"
+c7c03324833ba262eeaada0349afa1b5990c1ea7,A Wearable Face Recognition System on Google Glass for Assisting Social Interactions,"A Wearable Face Recognition System on Google
+Glass for Assisting Social Interactions
+Bappaditya Mandal∗, Chia Shue Ching, Liyuan Li, Vijay Ramaseshan
+Chandrasekhar, Cheston Tan Yin Chet and Lim Joo Hwee
+Visual Computing Department, Institute for Infocomm Research, Singapore
+Email address: (∗Contact author: Bappaditya Mandal);
+{scchia, lyli, vijay, cheston-tan,"
+c72914e2e999c99753d1d0058c459af69af6662a,CEREALS - Cost-Effective REgion-based Active Learning for Semantic Segmentation,"MACKOWIAK ET AL.: CEREALS
+CEREALS – Cost-Effective REgion-based
+Active Learning for Semantic Segmentation
+Robert Bosch GmbH
+Corporate Research - Computer Vision
+Robert-Bosch-Straße 200
+1139 Hildesheim, DE
+Heidelberg Collaboratory for Image
+Processing (HCI)
+Berliner Straße 43,
+69120 Heidelberg, DE
+Radek Mackowiak1
+Philip Lenz1
+Omair Ghori1
+Ferran Diego1
+Oliver Lange1
+Carsten Rother2"
+c719a718073128a985c957cdfa3f298706a180e6,Comparative Evaluations of Selected Tracking-by-Detection Approaches,"Comparative Evaluations of Selected
+Tracking-by-Detection Approaches
+Alhayat Ali Mekonnen, Frédéric Lerasle
+To cite this version:
+Alhayat Ali Mekonnen, Frédéric Lerasle. Comparative Evaluations of Selected Tracking-by-Detection
+Approaches. IEEE Transactions on Circuits and Systems for Video Technology, Institute of Electrical
+nd Electronics Engineers, 2018, <10.1109/TCSVT.2018.2817609>. <hal-01815850>
+HAL Id: hal-01815850
+https://hal.laas.fr/hal-01815850
+Submitted on 14 Jun 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+c737e65d7e8696f5a2878ac623c61aeff434f92d,The influences of face inversion and facial expression on sensitivity to eye contact in high-functioning adults with autism spectrum disorders.,"J Autism Dev Disord (2013) 43:2536–2548
+DOI 10.1007/s10803-013-1802-2
+O R I G I N A L P A P E R
+The Influences of Face Inversion and Facial Expression
+on Sensitivity to Eye Contact in High-Functioning Adults
+with Autism Spectrum Disorders
+Mark D. Vida • Daphne Maurer • Andrew J. Calder •
+Gillian Rhodes • Jennifer A. Walsh •
+Matthew V. Pachai • M. D. Rutherford
+Published online: 8 March 2013
+Ó Springer Science+Business Media New York 2013"
+c7c8d150ece08b12e3abdb6224000c07a6ce7d47,DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification,"DeMeshNet: Blind Face Inpainting for Deep MeshFace Verification
+National Laboratory of Pattern Recognition, CASIA
+Center for Research on Intelligent Perception and Computing, CASIA
+Shu Zhang Ran He Tieniu Tan"
+c78fdd080df01fff400a32fb4cc932621926021f,Robust Automatic Facial Expression Detection Method,"Robust Automatic Facial Expression Detection
+Method
+Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan,
+Institute for Pattern Recognition and Artificial Intelligence/ Huazhong University of Science and Technology, Wuhan,
+Yan Ouyang
+China
+Nong Sang
+China
+Email:"
+c7742e63579cfea8655606ec6bd9047140efe96a,D and Pseudo-2d Hidden Markov Models for Image Analysis. Theoretical Introduction 1d and Pseudo-2d Hidden Markov Models for Image Analysis. Theoretical Introduction,"D and Pseudo-D Hidden Markov Models
+for Image Analysis.
+Theoretical Introduction
+ephane Marchand-Maillet - Multimedia Communications
+Email:
+Phone: + 
+Date: November 
+Technical Report RR- - Part A
+Con
+ecom’s research is partially supported by its industrial members:
+Ascom, Cegetel, France Telecom, Hitachi, IBM France, Motorola,
+Swisscom, Texas Instruments, and Thomson CSF.
+Multimedia Communications
+Institut EURECOM  BP  .
+
+T.R. RR- - Part A  November 
+c0e5a471179d2d8c7025febe77a90c3a99c7c9fa,Learning With ℓ1-Graph for Image Analysis,"IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. 19, NO. 4, APRIL 2010
+Learning With `1-Graph for Image Analysis
+Bin Cheng, Jianchao Yang, Student Member, IEEE, Shuicheng Yan, Senior Member, IEEE, Yun Fu, Member, IEEE,
+nd Thomas S. Huang, Life Fellow, IEEE"
+c0014e048a5d15ddfeffa075a1b819bcb93dd351,Simple and Efficient Visual Gaze Estimation,"Simple and Efficient Visual Gaze Estimation
+Roberto Valenti
+Nicu Sebe
+Intelligent Systems Lab
+Amsterdam
+Kruislaan 403, 1018SJ
+Amsterdam, The Netherlands
+Theo Gevers"
+c03f48e211ac81c3867c0e787bea3192fcfe323e,Mahalanobis Metric Scoring Learned from Weighted Pairwise Constraints in I-Vector Speaker Recognition System,"INTERSPEECH 2016
+September 8–12, 2016, San Francisco, USA
+Mahalanobis Metric Scoring Learned from Weighted Pairwise Constraints in
+I-vector Speaker Recognition System
+Zhenchun Lei1, Yanhong Wan1, Jian Luo1, Yingen Yang1
+School of Computer Information Engineering, Jiangxi Normal University, Nanchang, China"
+c038beaa228aeec174e5bd52460f0de75e9cccbe,Temporal Segment Networks for Action Recognition in Videos,"Temporal Segment Networks for Action
+Recognition in Videos
+Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaoou Tang, and Luc Van Gool"
+c043f8924717a3023a869777d4c9bee33e607fb5,Emotion Separation Is Completed Early and It Depends on Visual Field Presentation,"Emotion Separation Is Completed Early and It Depends
+on Visual Field Presentation
+Lichan Liu1,2*, Andreas A. Ioannides1,2
+Lab for Human Brain Dynamics, RIKEN Brain Science Institute, Wakoshi, Saitama, Japan, 2 Lab for Human Brain Dynamics, AAI Scientific Cultural Services Ltd., Nicosia,
+Cyprus"
+c05a7c72e679745deab9c9d7d481f7b5b9b36bdd,"Naval Postgraduate School Monterey, California Approved for Public Release; Distribution Is Unlimited Biometric Challenges for Future Deployments: a Study of the Impact of Geography, Climate, Culture, and Social Conditions on the Effective Collection of Biometrics","NPS-CS-11-005
+NAVAL
+POSTGRADUATE
+SCHOOL
+MONTEREY, CALIFORNIA
+BIOMETRIC CHALLENGES FOR FUTURE DEPLOYMENTS:
+A STUDY OF THE IMPACT OF GEOGRAPHY, CLIMATE, CULTURE,
+AND SOCIAL CONDITIONS ON THE EFFECTIVE
+COLLECTION OF BIOMETRICS
+Paul C. Clark, Heather S. Gregg, with preface by Cynthia E. Irvine
+April 2011
+Approved for public release; distribution is unlimited"
+c0f17f99c44807762f2a386ac6579c364330e082,A Review on Deep Learning Techniques Applied to Semantic Segmentation,"A Review on Deep Learning Techniques
+Applied to Semantic Segmentation
+A. Garcia-Garcia, S. Orts-Escolano, S.O. Oprea, V. Villena-Martinez, and J. Garcia-Rodriguez"
+c0a0adb7f02d5509969e6107c914f7cc6e9ec881,Semantic Instance Segmentation via Deep Metric Learning,"Semantic Instance Segmentation via Deep Metric Learning
+Alireza Fathi∗
+Zbigniew Wojna∗
+Vivek Rathod∗
+Peng Wang†
+Sergio Guadarrama∗
+Kevin P. Murphy∗
+Hyun Oh Song∗"
+c08420b1bfa093e89e35e3b8d3a9e3e881f4f563,A Classification Framework for Large-Scale Face Recognition Systems,"Kent Academic Repository
+Full text document (pdf)
+Citation for published version
+Zhou, Ziheng and Deravi, Farzin (2009) A Classification Framework for Large-Scale Face Recognition
+Systems. In: 3rd IAPR/IEEE International Conference on Biometrics, 2-5 June, University of
+Sassari, Italy.
+https://doi.org/10.1007/978-3-642-01793-3_35
+Link to record in KAR
+http://kar.kent.ac.uk/23302/
+Document Version
+Author's Accepted Manuscript
+Copyright & reuse
+Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all
+ontent is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions
+for further reuse of content should be sought from the publisher, author or other copyright holder.
+Versions of research
+The version in the Kent Academic Repository may differ from the final published version.
+Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the
+published version of record.
+Enquiries"
+c03c16668426d8b069e75cb440686e12a9adbcd7,Deep Unsupervised Similarity Learning Using Partially Ordered Sets,"Deep Unsupervised Similarity Learning using Partially Ordered Sets
+Miguel A. Bautista∗ , Artsiom Sanakoyeu∗ , Bj¨orn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany"
+c0de99c5f15898e2d28f9946436fec2b831d4eae,ClothCap: seamless 4D clothing capture and retargeting,"ClothCap: Seamless 4D Clothing Capture and Retargeting
+GERARD PONS-MOLL∗, Max Planck Institute for Intelligent Systems, Tübingen, Germany
+SERGI PUJADES∗, Max Planck Institute for Intelligent Systems, Tübingen, Germany
+SONNY HU, Body Labs, New York, NY, USA
+MICHAEL J. BLACK, Max Planck Institute for Intelligent Systems, Tübingen, Germany
+Fig. 1. ClothCap. From left to right: (1) An example 3D textured scan that is part of a 4D sequence. (2) Our multi-part aligned mesh model, layered over the
+ody. (3) The estimated minimally clothed shape (MCS) under the clothing. (4) The body made fatter and dressed in the same clothing. Note that the clothing
+dapts in a natural way to the new body shape. (5) This new body shape posed in a new, never seen, pose. This illustrates how ClothCap supports a range of
+pplications related to clothing capture, modeling, retargeting, reposing, and try-on.
+Dressing virtual avatars and animating them with high quality, visu-
+lly plausible, results is a challenging task. Highly realistic physical
+simulation of clothing on human bodies in motion is complex: cloth-
+ing models are laborious to construct, patterns must be graded so
+that they can be sized to different characters, and the physical param-
+eters of the cloth must be known. Instead, we propose a data-driven
+lothing capture (ClothCap) approach; we capture dynamic clothing
+on humans from 4D scans and transform it to more easily dress
+virtual avatars.
+INTRODUCTION
+Designing and simulating realistic clothing is challenging. Previous methods"
+c0afa514524a4cf4b1772c1738ceb6989bff1b71,Impact of Tone-mapping Algorithms on Subjective and Objective Face Recognition in HDR Images,"Impact of Tone-mapping Algorithms on Subjective and
+Objective Face Recognition in HDR Images
+Pavel Korshunov
+MMSPG, EPFL
+Marco V. Bernardo
+Optics Center, UBI
+Touradj Ebrahimi
+MMSPG, EPFL
+António M. G. Pinheiro
+Optics Center, UBI"
+c08ef9ebf46e5a88c4ee1aa64dac104ddc07bee2,Classification of vehicles for urban traffic scenes,"Classification of Vehicles
+for Urban Traffic Scenes
+Norbert Erich Buch
+Submitted in partial fulfilment of the requirements of
+Kingston University for the degree of
+Doctor of Philosophy
+June, 2010
+Collaborating partner:
+Traffic Directorate at Transport for London"
+c0ff7dc0d575658bf402719c12b676a34271dfcd,A New Incremental Optimal Feature Extraction Method for On-Line Applications,"A New Incremental Optimal Feature Extraction
+Method for On-line Applications
+Youness Aliyari Ghassabeh, Hamid Abrishami Moghaddam
+Electrical Engineering Department, K. N. Toosi University of
+Technology, Tehran, Iran"
+c02847a04a99a5a6e784ab580907278ee3c12653,Fine Grained Video Classification for Endangered Bird Species Protection,"Fine Grained Video Classification for
+Endangered Bird Species Protection
+Non-Thesis MS Final Report
+Chenyu Wang
+. Introduction
+.1 Background
+This project is about detecting eagles in videos. Eagles are endangered species at the brim of
+extinction since 1980s. With the bans of harmful pesticides, the number of eagles keep increasing.
+However, recent studies on golden eagles’ activities in the vicinity of wind turbines have shown
+significant number of turbine blade collisions with eagles as the major cause of eagles’ mortality. [1]
+This project is a part of a larger research project to build an eagle detection and deterrent system
+on wind turbine toward reducing eagles’ mortality. [2] The critical component of this study is a
+omputer vision system for eagle detection in videos. The key requirement are that the system should
+work in real time and detect eagles at a far distance from the camera (i.e. in low resolution).
+There are three different bird species in my dataset - falcon, eagle and seagull. The reason for
+involving only these three species is based on the real world situation. Wind turbines are always
+installed near coast and mountain hill where falcons and seagulls will be the majority. So my model
+will classify the minority eagles out of other bird species during the immigration season and protecting
+them by using the deterrent system.
+.2 Brief Approach"
+c03ef6e94808185c1080ac9b155ac3b159b4f1ec,Learning to Avoid Errors in GANs by Manipulating Input Spaces,"Learning to Avoid Errors in GANs by Manipulating
+Input Spaces
+Alexander B. Jung
+TU Dortmund"
+c038186138b76a625500ff84c9dadb18aae29f1c,Learning Implicit Transfer for Person Re-identification,"Learning Implicit Transfer
+for Person Re-identi(cid:12)cation
+Tamar Avraham, Ilya Gurvich, Michael Lindenbaum, and Shaul Markovitch
+Computer science department, Technion - I.I.T., Haifa 32000, Israel."
+c02dbf756b9e9e2bed37cb7d295529397cad616a,Semantic Segmentation of RGBD Videos with Recurrent Fully Convolutional Neural Networks,"Semantic Segmentation of RGBD Videos with Recurrent Fully Convolutional
+Neural Networks
+Ekrem Emre Yurdakul, Y¨ucel Yemez
+Computer Engineering Department, Koc¸ University
+Istanbul, Turkey"
+c082afd5928165ccaf6d419aff5d0456d8ef78f3,Face recognition by fusing binary edge feature and second-order mutual information,"Face Recognition by Fusing Binary Edge Feature and
+Second-order Mutual Information
+Jiatao Song, Beijing Chen, Wei Wang, Xiaobo Ren
+School of Electronic and Information Engineering,
+Ningbo University of Technology
+Ningbo, China"
+c0be23ae7f327f9415e583aee1936b9932c9b58b,Copycat CNN: Stealing Knowledge by Persuading Confession with Random Non-Labeled Data,"NetworkCNNimageslabelsFakeDatasetimages24132labelsTarget NetworkCNNimageslabelsOriginalDatasetFakeDatasetFig.1:Ontheleft,thetargetnetworkistrainedwithanoriginal(confidential)datasetandisservedpubliclyasanAPI,receivingimagesasinputandprovidingclasslabelsasoutput.Ontheright,itispresentedtheprocesstogetstolenlabelsandtocreateafakedataset:randomnaturalimagesaresenttotheAPIandthelabelsareobtained.Afterthat,thecopycatnetworkistrainedusingthisfakedataset.cloud-basedservicestocustomersallowingthemtooffertheirownmodelsasanAPI.Becauseoftheresourcesandmoneyinvestedincreatingthesemodels,itisinthebestinterestofthesecompaniestoprotectthem,i.e.,toavoidthatsomeoneelsecopythem.Someworkshavealreadyinvestigatedthepossibilityofcopyingmodelsbyqueryingthemasablack-box.In[1],forexample,theauthorsshowedhowtoperformmodelextractionattackstocopyanequivalentornear-equivalentmachinelearningmodel(decisiontree,logisticregression,SVM,andmultilayerperceptron),i.e.,onethatachievescloseto100%agreementonaninputspaceofinterest.In[2],theauthorsevaluatedtheprocessofcopyingaNaiveBayesandSVMclassifierinthecontextoftextclassification.Bothworksfocusedongeneralclassifiersandnotondeepneuralnetworksthatrequirelargeamountsofdatatobetrainedleavingthequestionofwhetherdeepmodelscanbeeasilycopied.Althoughthesecondusesdeeplearningtostealtheclassifiers,itdoesnottrytouseDNNstostealfromdeepmodels.Additionally,theseworksfocusoncopyingbyqueryingwithproblemdomaindata.Inrecentyears,researchershavebeenexploringsomeintriguingpropertiesofdeepneuralnetworks[3],[4].More©2018IEEE.Personaluseofthismaterialispermitted.PermissionfromIEEEmustbeobtainedforallotheruses,inanycurrentorfuturemedia,includingreprinting/republishingthismaterialforadvertisingorpromotionalpurposes,creatingnewcollectiveworks,forresaleorredistributiontoserversorlists,orreuseofanycopyrightedcomponentofthisworkinotherworks."
+c0c8d720658374cc1ffd6116554a615e846c74b5,Modeling Multimodal Clues in a Hybrid Deep Learning Framework for Video Classification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Modeling Multimodal Clues in a Hybrid Deep
+Learning Framework for Video Classification
+Yu-Gang Jiang, Zuxuan Wu, Jinhui Tang, Zechao Li, Xiangyang Xue, Shih-Fu Chang"
+c06447df3e50ec451240205cefa0708caee8ab8c,Picture it in your mind: generating high level visual representations from textual descriptions,"Picture It In Your Mind: Generating High Level Visual
+Representations From Textual Descriptions
+Fabio Carrara
+ISTI-CNR
+via G. Moruzzi, 1
+56124 Pisa, Italy
+Andrea Esuli
+ISTI-CNR
+via G. Moruzzi, 1
+56124 Pisa, Italy
+Tiziano Fagni
+ISTI-CNR
+via G. Moruzzi, 1
+56124 Pisa, Italy
+Fabrizio Falchi
+ISTI-CNR
+via G. Moruzzi, 1
+56124 Pisa, Italy
+Alejandro Moreo
+Fernández"
+c0e9d06383442d89426808d723ca04586db91747,Cascaded SR-GAN for Scale-Adaptive Low Resolution Person Re-identification,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+c04fec95a448f9b01dd4399b3a5a365f67448bdf,From Image Sequence to Frontal Image: Reconstruction of the Unknown Face A Forensic Case,"From Image Sequence to Frontal Image:
+Reconstruction of the Unknown Face
+A Forensic Case
+Christiaan van Dam"
+c0d21722d83c126af4175add38ffc893a33ee01e,Near-Online Multi-target Tracking with Aggregated Local Flow Descriptor,"Near-Online Multi-target Tracking with Aggregated Local Flow Descriptor
+Wongun Choi
+NEC Laboratories America
+0080 N. Wolfe Rd, Cupertino, CA, USA"
+eee8a37a12506ff5df72c402ccc3d59216321346,Volume C,"Uredniki:
+dr. Tomaž Erjavec
+Odsek za tehnologije znanja
+Institut »Jožef Stefan«, Ljubljana
+dr. Jerneja Žganec Gros
+Alpineon d.o.o, Ljubljana
+Založnik: Institut »Jožef Stefan«, Ljubljana
+Tisk: Birografika BORI d.o.o.
+Priprava zbornika: Mitja Lasič
+Oblikovanje naslovnice: dr. Damjan Demšar
+Tiskano iz predloga avtorjev
+Naklada: 50
+Ljubljana, oktober 2008
+Konferenco IS 2008 sofinancirata
+Ministrstvo za visoko šolstvo, znanost in tehnologijo
+Institut »Jožef Stefan«
+Informacijska družba
+ISSN 1581-9973
+CIP - Kataložni zapis o publikaciji
+Narodna in univerzitetna knjižnica, Ljubljana"
+eea77e2a891e49e65d4bed54c1b24411f33203a3,Exploring Guide Context in City Scenario Using Color and Gradient Features,"The Open Construction and Building Technology Journal, 2015, 9, 177-181
+Open Access
+Exploring Guide Context in City Scenario Using Color and Gradient
+Features
+Send Orders for Reprints to
+Zhuo Bian*
+Art Academy of Northeast Agriculture University, Harbin 150001, China"
+ee4fd1a1df6a01e7dabe82090b1024e2eb6d78a1,Effective Emotional Classification Combining Facial Classifiers and User Assessment,"Effective Emotional Classification Combining Facial
+Classifiers and User Assessment
+Isabelle Hupont1, Sandra Baldassarri2, Rafael Del Hoyo1, and Eva Cerezo2
+Instituto Tecnológico de Aragón, Zaragoza (Spain)
+Departamento de Informática e Ingeniería de Sistemas,
+Instituto de Investigación en Ingeniería de Aragón, Universidad de Zaragoza (Spain)"
+ee87aa52d9642607d86f011c0d7326c4bdc63121,Automatic Detection of Facial Midline as a Guide for Facial Feature Extraction,"Automatic Detection of Facial Midline
+s a Guide for Facial Feature Extraction
+Nozomi Nakao, Wataru Ohyama, Tetsushi Wakabayashi and Fumitaka Kimura
+Graduate School of Engineering, Mie University
+577 Kurimamachiya-cho, Tsu-shi, Mie, 5148507, Japan"
+eed25d9b5b5b28e8454a359d54c9de5a05cc4682,Context-aware home monitoring system for Parkinson ' s disease patients : ambient and wearable sensing for freezing of gait detection,"Context-aware Home Monitoring System
+for Parkinson’s Disease Patients
+Ambient and Wearable Sensing for Freezing of Gait Detection
+B(cid:2456)(cid:2459)(cid:2450)(cid:2460) T(cid:2442)(cid:2452)(cid:2442)(cid:20)(cid:2444)"
+eeec69e910430bebe3808773f5a6a155d77059a0,Multi-shot Pedestrian Re-identification via Sequential Decision Making,"Multi-shot Pedestrian Re-identification via Sequential Decision Making
+Jianfu Zhang1, Naiyan Wang2 and Liqing Zhang1
+Shanghai Jiao Tong University∗, 2TuSimple"
+ee18e29a2b998eddb7f6663bb07891bfc7262248,Local Linear Discriminant Analysis Framework Using Sample Neighbors,"Local Linear Discriminant Analysis Framework
+Using Sample Neighbors
+Zizhu Fan, Yong Xu, Member, IEEE, and David Zhang, Fellow, IEEE"
+ee3a905ec8cd2e62dc642fad33d6f5f8516968a8,It depends: Approach and avoidance reactions to emotional expressions are influenced by the contrast emotions presented in the task.,"tapraid5/zfn-xhp/zfn-xhp/zfn00515/zfn3313d15z
+xppws S⫽1
+8/4/15
+5:44 Art: 2014-0213
+APA NLM
+Journal of Experimental Psychology:
+Human Perception and Performance
+015, Vol. 41, No. 5, 000
+0096-1523/15/$12.00
+© 2015 American Psychological Association
+http://dx.doi.org/10.1037/xhp0000130
+It Depends: Approach and Avoidance Reactions to Emotional Expressions
+re Influenced by the Contrast Emotions Presented in the Task
+AQ: au
+Andrea Paulus and Dirk Wentura
+Saarland University
+Studies examining approach and avoidance reactions to emotional expressions have yielded conflicting
+results. For example, expressions of anger have been reported to elicit approach reactions in some studies
+ut avoidance reactions in others. Nonetheless, the results were often explained by the same general
+underlying process, namely the influence that the social message signaled by the expression has on"
+eefb8768f60c17d76fe156b55b8a00555eb40f4d,Subspace Scores for Feature Selection in Computer Vision,"Subspace Scores for Feature Selection in Computer Vision
+Cameron Musco
+Christopher Musco"
+ee463f1f72a7e007bae274d2d42cd2e5d817e751,Automatically Extracting Qualia Relations for the Rich Event Ontology,"Automatically Extracting Qualia Relations for the Rich Event Ontology
+Ghazaleh Kazeminejad1, Claire Bonial2, Susan Windisch Brown1 and Martha Palmer1
+{ghazaleh.kazeminejad, susan.brown,
+University of Colorado Boulder, 2U.S. Army Research Lab"
+eed1dd2a5959647896e73d129272cb7c3a2e145c,The Elements of Fashion Style,"INPUTSTYLE DOCUMENTTOP ITEMS“ ”I need an outfit for a beach wedding that I'm going to early this summer. I'm so excited -- it's going to be warm and exotic and tropical... I want my outfit to look effortless, breezy, flowy, like I’m floating over the sand! Oh, and obviously no white! For a tropical spot, I think my outfit should be bright and"
+ee92d36d72075048a7c8b2af5cc1720c7bace6dd,Face recognition using mixtures of principal components,"FACE RECOGNITION USING MIXTURES OF PRINCIPAL COMPONENTS
+Deepak S. Turaga and Tsuhan Chen
+Video and Display Processing
+Philips Research USA
+Briarcliff Manor, NY 10510"
+ee335fb785c332b1ac43565b007461002616f1e0,Processing Large Amounts of Images on Hadoop with OpenCV,"Processing Large Amounts of Images
+on Hadoop with OpenCV
+Timofei Epanchintsev1,2 and Andrey Sozykin1,2
+IMM UB RAS, Yekaterinburg, Russia,
+Ural Federal University, Yekaterinburg, Russia"
+eebe66c4d1a41b3c7830846306044c8f3fe0d350,Domain adaptation networks for noisy image classification,"Faculty of Electrical Engineering, Mathematics and Computer Science
+Department of Intelligent Systems
+Domain adaptation
+networks for noisy image
+lassification
+Master Thesis
+Chengqiu Zhang
+Committee:
+Supervisors:
+Dr. Jan van Gemert
+Prof. Martha Larson
+Dr. Silvia-Laura Pintea Dr. Jan van Gemert
+Dr. Ildiko Suveg
+Dr. Marco Loog
+Dr. Silvia-Laura Pintea
+Dr. Adriana Gonzalez
+Eindhoven, Aug 2017"
+ee9385efb66ee0b1bee31c1632141729bb7fb6f5,Numerical simplification for bloat control and analysis of building blocks in genetic programming,"Noname manuscript No.
+(will be inserted by the editor)
+Numerical Simplification for Bloat Control and Analysis of
+Building Blocks in Genetic Programming
+David Kinzett · Mark Johnston · Mengjie Zhang
+the date of receipt and acceptance should be inserted later"
+eedfb384a5e42511013b33104f4cd3149432bd9e,Multimodal probabilistic person tracking and identification in smart spaces,"Multimodal Probabilistic Person
+Tracking and Identification
+in Smart Spaces
+zur Erlangung des akademischen Grades eines
+Doktors der Ingenieurwissenschaften
+der Fakultät für Informatik
+der Universität Fridericiana zu Karlsruhe (TH)
+genehmigte
+Dissertation
+Keni Bernardin
+us Karlsruhe
+Tag der mündlichen Prüfung: 20.11.2009
+Erster Gutachter:
+Zweiter Gutachter:
+Prof. Dr. A. Waibel
+Prof. Dr. R. Stiefelhagen"
+c9f3a5fe33782dd486cb32d9667fba0514711f04,Face and Expression Recognition Using Local Directional Number Pattern,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Face and Expression Recognition Using Local
+Directional Number Pattern
+Gopu Prasoona1, Dasu Vaman Ravi Prasad2
+Computer Science, CVSR College of Engineering, Venkatapur, RR dist, India
+Computer Science and Engineering, CVSR College of Engineering, Venkatapur, RR dist, India
+refers
+to digital"
+c91103e6612fa7e664ccbc3ed1b0b5deac865b02,Automatic Facial Expression Recognition Using Statistical-Like Moments,"Automatic facial expression recognition using
+statistical-like moments
+Roberto D’Ambrosio, Giulio Iannello, and Paolo Soda
+{r.dambrosio, g.iannello,
+Integrated Research Center, Universit`a Campus Bio-Medico di Roma,
+Via Alvaro del Portillo, 00128 Roma, Italy"
+c92e701c908908bda407f12edf6984b283e8c258,Where Should You Attend While Driving?,"Where Should You Attend While Driving?
+Simone Calderara
+Stefano Alletto
+Andrea Palazzi∗
+Francesco Solera∗
+Rita Cucchiara
+University of Modena and Reggio Emilia"
+c90b109301244e59771fec431a8d50a78e395956,Alternative face models for 3D face registration,"Alternative face models for 3D face registration
+Albert Ali Salah, Ne¸se Aly¨uz, Lale Akarun
+Bo˘gazi¸ci University, 34342 Bebek, ˙Istanbul, Turkey"
+c9876861cc0e33fffe8c3ce7484ae27d3b2eeb75,A Corpus for Analyzing Linguistic and Paralinguistic Features in Multi-Speaker Spontaneous Conversations – EVA Corpus,"A Corpus for Analyzing Linguistic and Paralinguistic Features in
+Multi-Speaker Spontaneous Conversations – EVA Corpus
+IZIDOR MLAKAR, ZDRAVKO KAČIČ, MATEJ ROJC
+Faculty of Electrical Engineering and Computer Science, University of Maribor
+SLOVENIA"
+c9c3ba7bebee553490a9ddbc6840292ed5aed90b,SCHOOL OF COMPUTER ENGINEERING PhD Confirmation Report on Object Detection in Real Images,"SCHOOL OF COMPUTER ENGINEERING
+PhD Confirmation Report
+Object Detection in Real Images
+Submitted by: Dilip Kumar Prasad
+Research Student (PhD)
+School of Computer Engineering
+E-mail:
+Supervisor: Dr. Maylor K. H. Leung
+Associate Professor,
+School of Computer Engineering
+E-mail:
+August 2010"
+c933c4bef57be3585abb13bacb74aca29588a6ac,People Detection in Color and Infrared Video Using HOG and Linear SVM,"People Detection in Color and Infrared Video
+using HOG and Linear SVM
+Pablo Tribaldos1, Juan Serrano-Cuerda1, Mar´ıa T. L´opez1;2,
+Antonio Fern´andez-Caballero1;2, and Roberto J. L´opez-Sastre3
+Instituto de Investigaci(cid:19)on en Inform(cid:19)atica de Albacete (I3A), 02071-Albacete, Spain
+Universidad de Castilla-La Mancha, Departamento de Sistemas Inform(cid:19)aticos,
+02071-Albacete, Spain
+Universidad de Alcal(cid:19)a, Dpto. de Teor(cid:19)(cid:16)a de la se~nal y Comunicaciones,
+8805-Alcal(cid:19)a de Henares (Madrid), Spain"
+c9b90cf9cdd901bd3072d6dfd8ddc523c55944b1,Adversarial Generator-Encoder Networks,"Adversarial Generator-Encoder Networks
+Dmitry Ulyanov 1 2 Andrea Vedaldi 3 Victor Lempitsky 1"
+c94c2cf52fef0503c09268c7d1faee60465ee08e,BenchIP: Benchmarking Intelligence Processors,"BENCHIP: Benchmarking Intelligence
+Processors
+Jinhua Tao1, Zidong Du1,2, Qi Guo1,2, Huiying Lan1, Lei Zhang1
+Shengyuan Zhou1, Lingjie Xu3, Cong Liu4, Haifeng Liu5, Shan Tang6
+Allen Rush7,Willian Chen7, Shaoli Liu1,2, Yunji Chen1, Tianshi Chen1,2
+ICT CAS,2Cambricon,3Alibaba Infrastructure Service, Alibaba Group
+IFLYTEK,5JD,6RDA Microelectronics,7AMD"
+c9d7219d54eccb9e49b72044d805e103fe17ba80,Towards Information-Seeking Agents,"Under review as a conference paper at ICLR 2017
+TOWARDS INFORMATION-SEEKING AGENTS
+Philip Bachman∗
+phil.bachman
+Alessandro Sordoni∗
+lessandro.sordoni
+Adam Trischler
+dam.trischler
+Maluuba Research
+Montréal, QC, Canada"
+c95c30fb990576704f2ccb3dc3335aaf43208856,CS231A Project report,"CS231A Project report
+Cecile Foret
+March 19, 2014."
+c95d8b9bddd76b8c83c8745747e8a33feedf3941,Image Ordinal Classification and Understanding: Grid Dropout with Masking Label,"label:(1, 0, 1, 0, 1, 1, 1, 1, 1)Masking label:(0, 1, 1, 1, 0, 1, 1, 1, 1)Entire imageInput imageNeuron dropout’s gradCAMGrid dropout’s gradCAMFig.1.Above:imageordinalclassificationwithrandomlyblackoutpatches.Itiseasyforhumantorecognizetheageregardlessofthemissingpatches.Themaskinglabelisalsousefultoimageclassification.Bottom:griddropout’sgrad-CAMisbetterthanthatofneurondropout.Thatistosay,griddropoutcanhelplearningfeaturerepresentation.problem[1].Withtheproliferationofconvolutionalneuralnetwork(CNN),workshavebeencarriedoutonordinalclas-sificationwithCNN[1][2][3].Thoughgoodperformanceshavebeenloggedwithmoderndeeplearningapproaches,therearetwoproblemsinimageordinalclassification.Ononehand,theamountofordinaltrainingdataisverylim-itedwhichprohibitstrainingcomplexmodelsproperly,andtomakemattersworse,collectinglargetrainingdatasetwithordinallabelisdifficult,evenharderthanlabellinggenericdataset.Therefore,insufficienttrainingdataincreasestheriskofoverfitting.Ontheotherhand,lessstudiesareconductedtounderstandwhatdeepmodelshavelearnedonordinaldata978-1-5386-1737-3/18/$31.00c(cid:13)2018IEEE"
+c924137ca87e8b4e1557465405744f8b639b16fc,Seeding Deep Learning using Wireless Localization,"ADDRESSING TRAINING BIAS VIA AUTOMATED IMAGE ANNOTATION
+Zhujun Xiao 1 Yanzi Zhu 2 Yuxin Chen 1 Ben Y. Zhao 1 Junchen Jiang 1 Haitao Zheng 1"
+c936b9a958a67cdd5665b923569d9d786c934029,Software Specification Document For,"Software Specification
+Document
+Crowd_Count++
+Version 1.0
+November 2015
+Juan Mejia      Michael Safdieh      Rosario Antunez
+Prepared by:"
+c9bbf31afbec278ca735e91cf5e9c70dd3aa41a4,Enhancing 3D Face Recognition By Mimics Segmentation,"Enhancing 3D Face Recognition By Mimics Segmentation
+Boulbaba Ben Amor, Mohsen Ardabilian, and Liming Chen
+MI Department, LIRIS Laboratory, CNRS 5205
+Ecole Centrale de Lyon, 36 av. Guy de Collongue, 69134 Lyon , France
+{Boulbaba.Ben-Amor, Mohsen.Ardabilian,"
+c94ae3d1c029a70cabdab906fe1460d84fd42acd,"Comparison of wavelet, Gabor and curvelet transform for face recognition","Optica Applicata, Vol. XLI, No. 1, 2011
+Comparison of wavelet, Gabor and curvelet
+transform for face recognition
+JIULONG ZHANG, YINGHUI WANG, ZHIYU ZHANG, CHUNLI XIA
+Computer Science and Engineering School, Xian University of Technology,
+Xi'an, 710048, P.R. China
+There has been much research about using Gabor wavelet for face recognition. Other multiscale
+geometrical tools, such as curvelet and contourlet, have also been used for face recognition, thus
+it is interesting to know which method performs best, especially under illumination and expression
+hanges. In this paper, we make a systematic comparison of wavelet, Gabor and curvelet for
+recognition, and find the best subband irrelevant to expression and illumination changes. We
+ombine the multiscale analysis with subspace decomposition as our algorithm. Experiments show
+that for expression changes, the properties of the coarse layer of curvelet and wavelet are very
+good. Whilst for illumination changes, the low frequency parts of the two methods are similarly
+influenced, but the detail coefficients of curvelet and the high frequency of wavelet work fine with
+PCA, with the former outperforming the latter. When these two factors change simultaneously,
+the detail layer of curvelet is better relative to the others.
+Keywords: wavelet transform, Gabor wavelet, curvelet transform, face recognition, multiscale analysis.
+. Introduction
+Among the so many popular methods for face recognition, the wavelet transform is"
+c9311a0c5045d86a617bd05a5cc269f44e81508d,Accurate Eye Centre Localisation by Means of Gradients,"ACCURATE EYE CENTRE LOCALISATION BY MEANS OF
+GRADIENTS
+Institute for Neuro- and Bioinformatics, University of L¨ubeck, Ratzeburger Allee 160, D-23538 L¨ubeck, Germany
+Pattern Recognition Company GmbH, Innovations Campus L¨ubeck, Maria-Goeppert-Strasse 1, D-23562 L¨ubeck, Germany
+{timm,
+Fabian Timm and Erhardt Barth
+Keywords:"
+c99a23a5bb5d5b10098395f59e9f8f79c79a75bd,Prediction Using Audience Chat Reactions,"Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 972–978
+Copenhagen, Denmark, September 7–11, 2017. c(cid:13)2017 Association for Computational Linguistics"
+c93996cb126589b30c04bf1256c97a4431c0e8b6,Robustness Analysis of Pedestrian Detectors for Surveillance,"Robustness Analysis of Pedestrian Detectors
+for Surveillance
+Yuming Fang, Senior Memmber, IEEE, Guanqun Ding, Yuan Yuan, Weisi Lin, Fellow, IEEE,
+nd Haiwen Liu, Senior Memmber, IEEE"
+c9b139b78e5337580047138d7fc2dff3b8fcf31f,Offline Face Recognition System Based on Gabor- Fisher Descriptors and Hidden Markov Models,"Offline Face Recognition System Based on Gabor-
+Fisher Descriptors and Hidden Markov Models
+Zineb Elgarrai1, Othmane Elmeslouhi2, Mustapha Kardouchi3, Hakim Allali1, Sid-Ahmed Selouani4
+FST of Hassan 1st University Settat /LAVETTE Laboratory,
+FPO of Ibnou Zohr University /LabSIE Laboratory
+Université de Moncton /Département d’Informatique,
+Université de Moncton/Département de Gestion de l’Information"
+c97774191be232678a45d343a25fcc0c96c065e7,Co-Training of Audio and Video Representations from Self-Supervised Temporal Synchronization,"Co-Training of Audio and Video Representations from
+Self-Supervised Temporal Synchronization
+Undergraduate Thesis
+written by
+Bruno Korbar
+under the supervision of Professor Lorenzo Torresani and Du Tran, and
+submitted to the Committee as a culminating experience for the degree of
+Bachelor of Arts in Computer Science
+t Dartmouth College.
+Date of the public presentation: Members of the Thesis Committee:
+May 29, 2018
+Prof Lorenzo Torresani
+Prof Saeed Hassanpour
+Prof Venkatramanan Siva Subrahmanian
+Dartmouth Computer Science Technical Report TR2018-849"
+fc04a50379e08ddde501816eb1f9560c36d01a39,Image Pre-processing Using OpenCV Library on MORPH-II Face Database,"Image Pre-processing Using OpenCV Library on MORPH-II Face Database
+B. Yip, R. Towner, T. Kling, C. Chen, and Y. Wang"
+fc1e37fb16006b62848def92a51434fc74a2431a,A Comprehensive Analysis of Deep Regression,"DRAFT
+A Comprehensive Analysis of Deep Regression
+St´ephane Lathuili`ere, Pablo Mesejo, Xavier Alameda-Pineda, Member IEEE, and Radu Horaud"
+fc7627e57269e7035e4d56105358211076fe4f04,The Association of Quantitative Facial Color Features with Cold Pattern in Traditional East Asian Medicine,"Hindawi
+Evidence-Based Complementary and Alternative Medicine
+Volume 2017, Article ID 9284856, 9 pages
+https://doi.org/10.1155/2017/9284856
+Research Article
+The Association of Quantitative Facial Color Features with
+Cold Pattern in Traditional East Asian Medicine
+Sujeong Mun, Ilkoo Ahn, and Siwoo Lee
+Mibyeong Research Center, Korea Institute of Oriental Medicine, 1672 Yuseong-daero, Yuseong-gu, Daejeon 305-811, Republic of Korea
+Correspondence should be addressed to Siwoo Lee;
+Received 30 June 2017; Accepted 13 September 2017; Published 17 October 2017
+Academic Editor: Kenji Watanabe
+Copyright © 2017 Sujeong Mun et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Introduction. Facial diagnosis is a major component of the diagnostic method in traditional East Asian medicine. We investigated
+the association of quantitative facial color features with cold pattern using a fully automated facial color parameterization system.
+Methods. The facial color parameters of 64 participants were obtained from digital photographs using an automatic color correction
+nd color parameter calculation system. Cold pattern severity was evaluated using a questionnaire. Results. The 𝑎∗ values of the
+whole face, lower cheek, and chin were negatively associated with cold pattern score (CPS) (whole face: 𝐵 = −1.048, 𝑃 = 0.021;
+lower cheek: 𝐵 = −0.494, 𝑃 = 0.007; chin: 𝐵 = −0.640, 𝑃 = 0.031), while 𝑏∗ value of the lower cheek was positively associated"
+fc50c9392fd23b6c88915177c6ae904a498aacea,Scaling Egocentric Vision: The EPIC-KITCHENS Dataset,"Scaling Egocentric Vision:
+The EPIC-KITCHENS Dataset
+Dima Damen1, Hazel Doughty1, Giovanni Maria Farinella2, Sanja Fidler3,
+Antonino Furnari2, Evangelos Kazakos1, Davide Moltisanti1,
+Jonathan Munro1, Toby Perrett1, Will Price1, and Michael Wray1
+Uni. of Bristol, UK 2Uni. of Catania, Italy,
+Uni. of Toronto, Canada"
+fc30d7dbf4c3cdd377d8cd4e7eeabd5d73814b8f,Multiple Object Tracking by Efficient Graph Partitioning,"Multiple Object Tracking
+y Ef‌f‌icient Graph Partitioning
+Ratnesh Kumar, Guillaume Charpiat, Monique Thonnat
+STARS Team, INRIA, Sophia Antipolis, France"
+fcd3d69b418d56ae6800a421c8b89ef363418665,Effects of Aging over Facial Feature Analysis and Face Recognition,"Effects of Aging over Facial Feature Analysis and Face
+Recognition
+Bilgin Esme & Bulent Sankur
+Bogaziçi Un. Electronics Eng. Dept. March 2010"
+fcd77f3ca6b40aad6edbd1dab9681d201f85f365,Machine Learning Based Attacks and Defenses in Computer Security: Towards Privacy and Utility Balance in Sensor Environments,"(cid:13)Copyright 2014
+Miro Enev"
+fc3e097ea7dd5daa7d314ecebe7faad9af5e62fb,Variational Inference and Model Selection with Generalized Evidence Bounds,"Variational Inference and Model Selection
+with Generalized Evidence Bounds
+Chenyang Tao * Liqun Chen * Ruiyi Zhang Ricardo Henao Lawrence Carin"
+fc068f7f8a3b2921ec4f3246e9b6c6015165df9a,Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline),"Beyond Part Models: Person Retrieval with Refined Part Pooling
+(and A Strong Convolutional Baseline)
+Yifan Sun†, Liang Zheng‡, Yi Yang‡, Qi Tian§, Shengjin Wang†∗
+Tsinghua University ‡University of Technology Sydney §University of Texas at San Antonio
+{liangzheng06,"
+fcc6fd9b243474cd96d5a7f4a974f0ef85e7ddf7,InclusiveFaceNet: Improving Face Attribute Detection with Race and Gender Diversity,"Improving Face Attribute Detection with Race and Gender Diversity
+InclusiveFaceNet:
+Hee Jung Ryu 1 Hartwig Adam * 1 Margaret Mitchell * 1"
+fc64f43cdcf4898b15ddce8b441d2ab9daa324f0,Gabor Filter-based Face Recognition Technique,"THE PUBLISHING HOUSE
+OF THE ROMANIAN ACADEMY
+PROCEEDINGS OF THE ROMANIAN ACADEMY, Series A,
+Volume 11, Number 3/2010, pp. 277–283
+GABOR FILTER-BASED FACE RECOGNITION TECHNIQUE
+Tudor BARBU
+Institute of Computer Science, Romanian Academy, Iaşi, Romania
+E-mail:
+We propose a novel human face recognition approach in this paper, based on two-dimensional Gabor
+filtering and supervised classification. The feature extraction technique proposed in this article uses
+D Gabor filter banks and produces robust 3D face feature vectors. A supervised classifier, using
+minimum average distances, is developed for these vectors. The recognition process is completed by a
+threshold-based face verification method, also provided. A high facial recognition rate is obtained
+using our technique. Some experiments, whose satisfactory results prove the effectiveness of this
+recognition approach, are also described in the paper.
+Key words: Face recognition; Face identification; Feature vector; 2D Gabor filter; Supervised classification;
+Face verification.
+. INTRODUCTION
+This article approaches an important biometric domain, which is human face recognition. Face
+represents a physiological biometric identifier that is widely used in person recognition. During the past"
+fc74e14a3195fdf91157d5ea86d35c576fcf01d6,Detection and Handling of Occlusion in an Object Detection System,"Detection and Handling of Occlusion in an
+Object Detection System
+R.M.G. Op het Velda, R.G.J. Wijnhovenb, Y. Bondarauc and Peter H.N. de Withd
+,bViNotion B.V., Horsten 1, 5612 AX, Eindhoven, The Netherlands;
+,c,dEindhoven University of Technology, Den Dolech 2, 5612 AZ, Eindhoven, The Netherlands"
+fc27c2c8a2486f5918451fbef198f46b5bf45d2c,Robust Real-Time Multi-View Eye Tracking,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, VOL. XX, NO. XX, 2018
+Robust Real-Time Multi-View Eye Tracking
+Nuri Murat Arar, Student Member, IEEE, and Jean-Philippe Thiran, Senior Member, IEEE"
+fc73090889036a0e42ea40827ac835cd5e135b16,Deep Learning based Large Scale Visual Recommendation and Search for E-Commerce,"Deep Learning based Large Scale Visual Recommendation and
+Search for E-Commerce
+Devashish Shankar, Sujay Narumanchi, Ananya H A,
+Pramod Kompalli, Krishnendu Chaudhury
+Flipkart Internet Pvt. Ltd.,
+Bengaluru, India."
+fcb64ef4421cebb80eb33f62c7726f339eb2bb62,Deep View-Aware Metric Learning for Person Re-Identification,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+fcf8bb1bf2b7e3f71fb337ca3fcf3d9cf18daa46,Feature Selection via Sparse Approximation for Face Recognition,"MANUSCRIPT SUBMITTED TO IEEE TRANS. PATTERN ANAL. MACH. INTELL., JULY 2010
+Feature Selection via Sparse Approximation for
+Face Recognition
+Yixiong Liang, Lei Wang, Yao Xiang, and Beiji Zou"
+fcd9221f8ef306155f59817a3b0bdae05e9e0ae2,GEFeWS: A Hybrid Genetic-Based Feature Weighting and Selection Algorithm for Multi-Biometric Recognition,"GEFeWS: A Hybrid Genetic-Based Feature Weighting and
+Selection Algorithm for Multi-Biometric Recognition
+Aniesha Alford+, Khary Popplewell#, Gerry Dozier#, Kelvin Bryant#, John Kelly+,
+Josh Adams#, Tamirat Abegaz^, and Joseph Shelton#
+Center for Advanced Studies in Identity Sciences
++Electrical and Computer Engineering Department,
+#Computer Science Department
+^Computational Science and Engineering Department
+North Carolina A & T State University
+601 E Market St., Greensboro, NC 27411"
+fcabf1c0f4a26431d4df95ddeec2b1dff9b3e928,Semantic Segmentation using Adversarial Networks,
+fcbf808bdf140442cddf0710defb2766c2d25c30,Unsupervised Semantic Action Discovery from Video Collections,"IJCV manuscript No.
+(will be inserted by the editor)
+Unsupervised Semantic Action Discovery from Video
+Collections
+Ozan Sener · Amir Roshan Zamir · Chenxia Wu · Silvio Savarese ·
+Ashutosh Saxena
+Received: date / Accepted: date"
+fd51665efe2520a55aa58b2f1863a3bd9870529f,Understanding Compressive Adversarial Privacy,"Understanding Compressive Adversarial Privacy
+Xiao Chen, Peter Kairouz, Ram Rajagopal"
+fd4ac1da699885f71970588f84316589b7d8317b,Supervised Descent Method for Solving Nonlinear Least Squares Problems in Computer Vision,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+Supervised Descent Method
+for Solving Nonlinear Least Squares
+Problems in Computer Vision
+Xuehan Xiong, and Fernando De la Torre"
+fde3f34a1accadb73269e4beef487611f682b781,"Before A Computer Can Draw, It Must First Learn To See","Before A Computer Can Draw, It Must First Learn To See
+Derrall Heath and Dan Ventura
+Computer Science Department
+Brigham Young University
+Provo, UT 84602 USA"
+fdf533eeb1306ba418b09210387833bdf27bb756,Exploiting Unrelated Tasks in Multi-Task Learning,
+fdb956c7705b7f57f56f944a0f3f4ede1d6f77fa,Does Fast Fashion Increase the Demand for Premium Brands ?,"Does Fast Fashion Increase the Demand for Premium Brands?
+A Structural Analysis
+Zijun (June) Shi1, Param Vir Singh, Dokyun Lee, Kannan Srinivasan
+(Preliminary draft. Please do not cite without the authors’ permission.)"
+fdda5852f2cffc871fd40b0cb1aa14cea54cd7e3,Im2Flow: Motion Hallucination from Static Images for Action Recognition,"Im2Flow: Motion Hallucination from Static Images for Action Recognition
+Ruohan Gao
+UT Austin
+Bo Xiong
+UT Austin
+Kristen Grauman
+UT Austin"
+fd1b917476b114919de0ae1b6a4b96a52a410c20,A Memory Based Face Recognition Method,"A Memory Based Face Recognition Method
+Alex Pappachen James
+B. Tech. (Hons), M. Tech.
+Grif‌f‌ith School of Engineering
+Science, Environment, Engineering and Technology
+Grif‌f‌ith University
+Submitted in fulfilment of the requirements of the degree of
+Doctor of Philosophy
+November 2008"
+fdfaf46910012c7cdf72bba12e802a318b5bef5a,Computerized Face Recognition in Renaissance Portrait Art,"Computerized Face Recognition in Renaissance
+Portrait Art
+Ramya Srinivasan, Conrad Rudolph and Amit Roy-Chowdhury"
+fd4c46bfd3bb00ed93b0bb5b28ef0336f59f0c15,Expressing Emotions through Vibration for Perception and Control,"Expressing Emotions through Vibration
+for Perception and Control
+Shafiq ur Réhman
+Doctoral Thesis, April 2010
+Department of Applied Physics and Electronics
+Umeå University, Sweden
+UNIVERSITETSSERVICEProfil & CopyshopÖppettider:Måndag - fredag 10-16Tel. 786 52 00 alt 070-640 52 01Universumhuset"
+fd6d2e4f939b8d804a6b5908bded8f1ad2563e38,Stabilizing GAN Training with Multiple Random Projections,"Stabilizing GAN Training with
+Multiple Random Projections
+Behnam Neyshabur Srinadh Bhojanapalli Ayan Chakrabarti
+Toyota Technological Institute at Chicago
+6045 S. Kenwood Ave., Chicago, IL 60637"
+fdbe7c520568d9a32048270d2c87113c635dc7e6,Live Stream Oriented Age and Gender Estimation using Boosted LBP Histograms Comparisons,"Live Stream Oriented Age and Gender Estimation using Boosted LBP
+Histograms Comparisons
+LAMIA, University of the French West Indies and Guiana, Campus de Fouillole, BP 250, 97157 Pointe `a Pitre, France
+Lionel Prevost1, Philippe Phothisane2 and Erwan Bigorgne2
+Eikeo, 11 rue L´eon Jouhaux, 75010 Paris, France
+Keywords:
+Face Analysis, Boosting, Gender Estimation, Age Estimation."
+fd0a1a2ecf69a6c1a6efcb18b8f23e4d5402f601,"ExtremeWeather: A large-scale climate dataset for semi-supervised detection, localization, and understanding of extreme weather events","ExtremeWeather: A large-scale climate dataset for
+semi-supervised detection, localization, and
+understanding of extreme weather events
+Evan Racah1,2, Christopher Beckham1,3, Tegan Maharaj1,3,
+Samira Ebrahimi Kahou4, Prabhat2, Christopher Pal1,3
+MILA, Université de Montréal,
+Lawrence Berkeley National Lab, Berkeley, CA,
+École Polytechnique de Montréal,
+Microsoft Maluuba,"
+fd67b9812fa4aef6c5dfb633df4406105cdb4e8f,Zero-Shot Learning with Generative Latent Prototype Model,"Zero-Shot Learning with Generative Latent
+Prototype Model
+Yanan Li, Student Member, IEEE, Donghui Wang, Member, IEEE"
+fdca08416bdadda91ae977db7d503e8610dd744f,ICT - 2009 . 7 . 1 KSERA Project 2010 - 248085,"ICT-2009.7.1
+KSERA Project
+010-248085
+Deliverable D3.1
+Deliverable D3.1
+Human Robot Interaction
+Human Robot Interaction
+8 October 2010
+Public Document
+The KSERA project (http://www.ksera
+KSERA project (http://www.ksera-project.eu) has received funding from the European Commission
+project.eu) has received funding from the European Commission
+under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+under the 7th Framework Programme (FP7) for Research and Technological Development under grant
+greement n°2010-248085."
+fd4537b92ab9fa7c653e9e5b9c4f815914a498c0,One-Sided Unsupervised Domain Mapping,
+fdf31db5aa8cf8a7f9ac84fcc7b0949e8e000a41,MODELING FASHION Anonymous ICME submission,"MODELING FASHION
+Anonymous ICME submission"
+fd8bb112b197e23183feeb6d1f4506d180caa4fc,Fashion Clothes Matching Scheme Learned from Fashionista ’ S Suggestions in Microblog,"FASHION CLOTHES MATCHING SCHEME LEARNED FROM FASHIONISTA’S
+SUGGESTIONS IN MICROBLOG
+Guangyu Gao1, Yihang Zhang1, Songyang Du2
+School of Software, Beijing Institute of Technology. Beijing 100081, China
+Beijing Special Vehicle Research Institute. Beijing 100072, China"
+fd96432675911a702b8a4ce857b7c8619498bf9f,Improved Face Detection and Alignment using Cascade Deep Convolutional Network,"Improved Face Detection and Alignment using Cascade
+Deep Convolutional Network
+Weilin Cong†, Sanyuan Zhao†, Hui Tian‡, and Jianbing Shen†
+Beijing Key Laboratory of Intelligent Information Technology, School of
+Computer Science,Beijing Institute of Technology, Beijing 100081, P.R.China
+China Mobile Research Institute, Xuanwu Men West Street, Beijing"
+fdd94d77377df6e55d14e41a28141dc241d8b5d6,Current Status and Future Prospects of Clinical Psychology: Toward a Scientifically Principled Approach to Mental and Behavioral Health Care.,"Current Status and Future Prospects of Clinical Psychology: Toward a Scientifically
+Principled Approach to Mental and Behavioral Health Care
+Author(s): Timothy B. Baker, Richard M. McFall and Varda Shoham
+Source: Psychological Science in the Public Interest, Vol. 9, No. 2 (November 2008), pp. 67-
+Published by: Sage Publications, Inc. on behalf of the Association for Psychological Science
+Stable URL: http://www.jstor.org/stable/20697320
+Accessed: 07-02-2017 15:41 UTC
+REFERENCES
+Linked references are available on JSTOR for this article:
+http://www.jstor.org/stable/20697320?seq=1&cid=pdf-reference#references_tab_contents
+You may need to log in to JSTOR to access the linked references.
+JSTOR is a not-for-profit service that helps scholars, researchers, and students discover, use, and build upon a wide range of content in a trusted
+digital archive. We use information technology and tools to increase productivity and facilitate new forms of scholarship. For more information about
+JSTOR, please contact
+Your use of the JSTOR archive indicates your acceptance of the Terms & Conditions of Use, available at
+http://about.jstor.org/terms
+Sage Publications, Inc., Association for Psychological Science are collaborating with JSTOR to
+digitize, preserve and extend access to Psychological Science in the Public Interest
+This content downloaded from 129.133.179.122 on Tue, 07 Feb 2017 15:41:42 UTC
+All use subject to http://about.jstor.org/terms"
+fd0e1fecf7e72318a4c53463fd5650720df40281,End-to-End Comparative Attention Networks for Person Re-Identification,"End-to-End Comparative Attention Networks for
+Person Re-identification
+Hao Liu, Jiashi Feng, Meibin Qi, Jianguo Jiang and Shuicheng Yan, Fellow, IEEE"
+fd4f9955ec28b63443039cb9d4e15bae796defe4,Predictably Angry - Facial Cues Provide a Credible Signal of Destructive Behavior,"Predictably Angry
+Facial cues provide a credible signal of destructive behavior
+Boris van Leeuwen1, Charles N. Noussair2, Theo Offerman3,
+Sigrid Suetens4, Matthijs van Veelen5, and Jeroen van de Ven6
+November 2016"
+fdb33141005ca1b208a725796732ab10a9c37d75,A connectionist computational method for face recognition,"Int.J.Appl. Math. Comput.Sci.,2016,Vol. 26,No. 2,451–465
+DOI: 10.1515/amcs-2016-0032
+A CONNECTIONIST COMPUTATIONAL METHOD FOR FACE RECOGNITION
+FRANCISCO A. PUJOL a, HIGINIO MORA a,∗
+, JOS ´E A. GIRONA-SELVA a
+Department of Computer Technology
+University of Alicante, 03690, San Vicente del Raspeig, Alicante, Spain
+e-mail:
+In this work, a modified version of the elastic bunch graph matching (EBGM) algorithm for face recognition is introduced.
+First, faces are detected by using a fuzzy skin detector based on the RGB color space. Then, the fiducial points for the facial
+graph are extracted automatically by adjusting a grid of points to the result of an edge detector. After that, the position of
+the nodes, their relation with their neighbors and their Gabor jets are calculated in order to obtain the feature vector defining
+each face. A self-organizing map (SOM) framework is shown afterwards. Thus, the calculation of the winning neuron and
+the recognition process are performed by using a similarity function that takes into account both the geometric and texture
+information of the facial graph. The set of experiments carried out for our SOM-EBGM method shows the accuracy of our
+proposal when compared with other state-of the-art methods.
+Keywords: pattern recognition, face recognition, neural networks, self-organizing maps.
+Introduction
+libraries,
+In recent years, there has been intensive research carried"
+fd23502287ae4ca8db63e4e5080c359610398be5,Real-Time Pedestrian Detection with Deep Network Cascades,"ANGELOVA ET AL.: REAL-TIME PEDESTRIAN DETECTION WITH DEEP CASCADES
+Real-Time Pedestrian Detection With Deep
+Network Cascades
+Anelia Angelova1
+Alex Krizhevsky1
+Vincent Vanhoucke1
+Abhijit Ogale2
+Dave Ferguson2
+Google Research
+600 Amphitheatre Parkway
+Mountain View, CA, USA
+Google X
+600 Amphitheatre Parkway
+Mountain View, CA, USA"
+fd9286f0e465deffad59123f46fa4f66cb15c3e4,Learning Answer Embeddings for Visual Question Answering,"Learning Answer Embeddings for Visual Question Answering
+Hexiang Hu∗
+U. of Southern California
+Los Angeles, CA
+Wei-Lun Chao∗
+Los Angeles, CA
+U. of Southern California
+U. of Southern California
+Fei Sha
+Los Angeles, CA"
+fd8b1715ad34858bf8650ac549c4249d86edbb7c,Paper Title (use style: paper title),"International Association of Scientific Innovation and Research (IASIR)
+(An Association Unifying the Sciences, Engineering, and Applied Research)
+ISSN (Print): 2279-0063
+ISSN (Online): 2279-0071
+International Journal of Software and Web Sciences (IJSWS)
+www.iasir.net
+A survey of techniques for human segmentation from static images
+Ms.Ashwini T. Magar, Prof.J.V.Shinde
+Late G.N.Sapkal College of Engineering,
+Computer Engineering Department, Nashik,
+University of Pune, India.
+__________________________________________________________________________________________"
+fde0180735699ea31f6c001c71eae507848b190f,Face Detection and Sex Identification from Color Images using AdaBoost with SVM based Component Classifier,"International Journal of Computer Applications (0975 – 8887)
+Volume 76– No.3, August 2013
+Face Detection and Sex Identification from Color Images
+using AdaBoost with SVM based Component Classifier
+Tonmoy Das
+Lecturer, Department of EEE
+University of Information
+Technology and Sciences
+(UITS)
+Dhaka, Bangladesh
+Manamatha Sarnaker
+B.Sc. in EEE
+International University of
+Business Agriculture and
+Technology (IUBAT)
+Dhaka-1230, Bangladesh
+Md. Hafizur Rahman
+Lecturer, Department of EEE
+International University of
+Business Agriculture and"
+fd615118fb290a8e3883e1f75390de8a6c68bfde,Joint Face Alignment with Non-parametric Shape Models,"Joint Face Alignment with Non-Parametric
+Shape Models
+Brandon M. Smith and Li Zhang
+University of Wisconsin – Madison
+http://www.cs.wisc.edu/~lizhang/projects/joint-align/"
+fdc60fe4654b5efe0752acabef0ec6258062be0f,Multi-Sensor Fusion Adopted 2-D Laser Rangefinder and Camera for Pedestrian Detection,"2nd ITS World Congress, Bordeaux, France, 5–9 October 2015
+Paper number ITS-1576
+Multi-Sensor Fusion Adopted 2-D Laser Rangefinder and Camera
+for Pedestrian Detection
+Kuo-Ching Chang*, Chi-Kuo Chen, Pao-Kai Tseng
+Automotive Research & Testing Center, Taiwan
++886-4-7811222 Ext. 2323,"
+fd069af1ede370625703f7984e52f282fcd6342e,Guided Feature Transformation (GFT): A Neural Language Grounding Module for Embodied Agents,"Guided Feature Transformation (GFT): A Neural
+Language Grounding Module for Embodied Agents
+Haonan Yu†, Xiaochen Lian†, Haichao Zhang†, and Wei Xu‡
+Baidu Research, Sunnyvale CA USA
+Horizon Robotics, Cupertino CA USA"
+fdee0cf79e9a2695857afeee6526352918c9f315,Quantization for Rapid Deployment of Deep Neural Networks,"Quantization for Rapid Deployment of Deep Neural Networks
+Jun Haeng Lee∗, Sangwon Ha∗, Saerom Choi, Won-Jo Lee, Seungwon Lee
+Samsung Advanced Institute of Technology
+Samsung-ro 130, Suwon-si, Republic of Korea
+{junhaeng2.lee,"
+fdaf65b314faee97220162980e76dbc8f32db9d6,Face recognition using both visible light image and near-infrared image and a deep network,"Accepted Manuscript
+Face recognition using both visible light image and near-infrared image and a deep
+network
+Kai Guo, Shuai Wu, Yong Xu
+Reference:
+S2468-2322(17)30014-8
+0.1016/j.trit.2017.03.001
+TRIT 41
+To appear in:
+CAAI Transactions on Intelligence Technology
+Received Date: 30 January 2017
+Accepted Date: 28 March 2017
+Please cite this article as: K. Guo, S. Wu, Y. Xu, Face recognition using both visible light image and
+near-infrared image and a deep network, CAAI Transactions on Intelligence Technology (2017), doi:
+0.1016/j.trit.2017.03.001.
+This is a PDF file of an unedited manuscript that has been accepted for publication. As a service to
+our customers we are providing this early version of the manuscript. The manuscript will undergo
+opyediting, typesetting, and review of the resulting proof before it is published in its final form. Please
+note that during the production process errors may be discovered which could affect the content, and all
+legal disclaimers that apply to the journal pertain."
+f218df397afb1f070ee093bb9a19616f61b562c4,A Neural Network Model of Face Detection for Active Vision Implementation,"International Journal of Modern Engineering Research (IJMER)
+www.ijmer.com Vol. 2, Issue. 5, Sept.-Oct. 2012 pp-2969-2974 ISSN: 2249-6645
+A Neural Network Model of Face Detection for Active Vision
+Implementation
+Yasuomi D. Sato*, ** Yasutaka Kuriya*
+* Department of Brain Science and Engineering, Graduate School for Life Science and Systems Engineering, Kyushu
+** Frankfurt Institute for Advanced Studies (FIAS), Goethe University Frankfurt, Germany
+Institute of Technology, Japan
+impaired"
+f22058a3003cee6b17c6c25c8a635a653e78614c,Multimodal Attention in Recurrent Neural Networks for Visual Question Answering,"Global Journal of Computer Science and Technology: D
+Neural & Artificial Intelligence
+Volume 17 Issue 1 Version 1.0 Year 2017
+Type: Double Blind Peer Reviewed International Research Journal
+Publisher: Global Journals Inc. (USA)
+Online ISSN: 0975-4172 & Print ISSN: 0975-4350
+Multimodal Attention in Recurrent Neural Networks for Visual
+Question Answering
+By Lorena Kodra & Elinda Kajo Meçe
+Polytechnic University of Tirana"
+f26d34d8a8d082ce2c81937f61c28f3769c38372,Probability of Seeing Increases Saccadic Readiness,"Probability of Seeing Increases Saccadic Readiness
+The´ re` se Collins*
+Laboratoire Psychologie de la Perception, Universite´ Paris Descartes & CNRS, Paris, France"
+f2efc85f9e20840c591b4590fd9ed202f727546a,Distributed signature fusion for person re-identification,"Distributed Signature Fusion for
+Person Re-Identification
+Niki Martinel
+University of Udine
+Udine, Italy
+Christian Micheloni
+University of Udine
+Udine, Italy
+Claudio Piciarelli
+University of Udine
+Udine, Italy"
+f2889f3ab8e330e1ba6b23d493f8d727f49a9bc8,Recent Advances in Neural Program Synthesis,"Recent Advances in Neural Program Synthesis
+Neel Kant
+Machine Learning at Berkeley
+UC Berkeley"
+f26a8dcfbaf9f46c021c41a3545fcfa845660c47,Human Pose Regression by Combining Indirect Part Detection and Contextual Information,"Human Pose Regression by Combining Indirect Part Detection and Contextual
+Information
+Diogo C. Luvizon
+Hedi Tabia
+ETIS Lab., UMR 8051, Universit´e Paris Seine,
+Universit´e Cergy-Pontoise, ENSEA, CNRS.
+{diogo.luvizon, hedi.tabia,
+David Picard"
+f2bccfb12c1546bdf73b11904ac44b1cfa130072,RoarNet: A Robust 3D Object Detection based on RegiOn Approximation Refinement,"RoarNet: A Robust 3D Object Detection based on
+RegiOn Approximation Refinement
+Kiwoo Shin∗†, Youngwook Paul Kwon∗‡ and Masayoshi Tomizuka†"
+f2b2d50d6ca72666bab34e0f101ae1b18b434925,High-Fidelity Monocular Face Reconstruction based on an Unsupervised Model-based Face Autoencoder.,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+High-Fidelity Monocular Face Reconstruction based on an
+Unsupervised Model-based Face Autoencoder
+Ayush Tewari, Michael Zollh¨ofer, Florian Bernard, Pablo Garrido,
+Hyeongwoo Kim, Patrick P´erez, and Christian Theobalt
+(Invited Paper)"
+f29aae30c2cb4c73a3c814408ee5692e22176329,Pairwise Relational Networks using Local Appearance Features for Face Recognition,"Pairwise Relational Networks using Local
+Appearance Features for Face Recognition
+Bong-Nam Kang
+Yonghyun Kim, Daijin Kim
+Department of Creative IT Engineering
+Department of Computer Science and Engineering
+POSTECH, Korea
+POSTECH, Korea"
+f2b95f135b95c3df4f6ebe6015098a2e1667711d,Weakly Supervised Object Localization Using Things and Stuff Transfer,"Weakly Supervised Object Localization Using Things and Stuff Transfer
+Miaojing Shi1,2
+Holger Caesar1
+University of Edinburgh 2Tencent Youtu Lab
+Vittorio Ferrari1"
+f2e9494d0dca9fb6b274107032781d435a508de6,Title of Dissertation : UNCONSTRAINED FACE RECOGNITION,
+f2877cdbffb0c9a4de1f562099d2f0597bcfec0b,"COGNIMUSE: a multimodal video database annotated with saliency, events, semantics and emotion with application to summarization","Zlatintsi et al. EURASIP Journal on Image and Video Processing (2017) 2017:54
+DOI 10.1186/s13640-017-0194-1
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+COGNIMUSE: a multimodal video
+database annotated with saliency, events,
+semantics and emotion with application to
+summarization
+Athanasia Zlatintsi1*
+Niki Efthymiou1, Katerina Pastra4, Alexandros Potamianos1 and Petros Maragos1
+, Petros Koutras1, Georgios Evangelopoulos2, Nikolaos Malandrakis3,"
+f20f93a5b2291283c0e40bd0418927efb06acb6a,A Tale of Two Encodings : Comparing Bag-of-Words and Word 2 vec for VQA,"A Tale of Two Encodings: Comparing Bag-of-Words and Word2vec for VQA
+Berthy Feng
+Princeton University ’19
+Divya Thuremella
+Princeton University ’18"
+f2a7f9bd040aa8ea87672d38606a84c31163e171,Human Action Recognition without Human,"Human Action Recognition without Human
+Yun He, Soma Shirakabe, Yutaka Satoh, Hirokatsu Kataoka
+National Institute of Advanced Industrial Science and Technology (AIST)
+Tsukuba, Ibaraki, Japan
+{yun.he, shirakabe-s, yu.satou,"
+f2d95a5b29986a6a28746b30adfa43497b27ff02,Global Self-Similarity and Saliency Measures Based on Sparse Representations for Classification of Objects and Spatio-temporal Sequences,"Global Self-Similarity and Saliency Measures Based on
+Sparse Representations for Classification of Objects and
+Spatio-temporal Sequences.
+A DISSERTATION
+SUBMITTED TO THE FACULTY OF THE GRADUATE SCHOOL
+OF THE UNIVERSITY OF MINNESOTA
+Guruprasad Somasundaram
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+Doctor of Philosophy
+Nikolaos Papanikolopoulos
+November, 2012"
+f2b79ae191fc03a93ed50eea773279f67c8351e1,Annotating Images with Suggestions - User Study of a Tagging System,"Annotating images with suggestions — user
+study of a tagging system
+Michal Hradiˇs, Martin Kol´aˇr, Aleˇs L´an´ık, Jiˇr´ı Kr´al, Pavel Zemˇc´ık and Pavel
+Smrˇz
+Faculty of Information Technology
+VUT — Brno University of Technology
+Brno Czech Republic"
+f23d4ed760a35fbfaeab47efde3d876c1818d3d1,Dynamicity and Durability in Scalable Visual Instance Search,"Dynamicity and Durability in Scalable Visual Instance Search
+Herwig Lejsek∗
+Videntifier Technologies, Iceland
+Björn Þór Jónsson†
+Reykjavík University, Iceland
+ITU Copenhagen, Denmark
+Laurent Amsaleg
+IRISA–CNRS, France
+Friðrik Heiðar Ásmundsson∗
+Videntifier Technologies, Iceland"
+f20e0eefd007bc310d2a753ba526d33a8aba812c,Accurate and robust face recognition from RGB-D images with a deep learning approach,"Lee et al.: RGB-D FACE RECOGNITION WITH A DEEP LEARNING APPROACH
+Accurate and robust face recognition from
+RGB-D images with a deep learning
+pproach
+Yuancheng Lee
+http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=150
+Jiancong Chen
+http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=153
+Ching-Wei Tseng
+http://cv.cs.nthu.edu.tw/php/people/profile.php?uid=156
+Computer Vision Lab,
+Department of
+Computer Science,
+National Tsing Hua
+University,
+Hsinchu, Taiwan
+Shang-Hong Lai
+http://www.cs.nthu.edu.tw/~lai/"
+f231046d5f5d87e2ca5fae88f41e8d74964e8f4f,Perceived Age Estimation from Face Images,"We are IntechOpen,
+the first native scientific
+publisher of Open Access books
+,350
+08,000
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+f22a7a7a8cdd323270d1f8173c0289d61981dc73,Face Recognition System Using Wavelet Normalization,"ISSN(Online): 2319-8753
+ISSN (Print): 2347-6710
+International Journal of Innovative Research in Science,
+Engineering and Technology
+(An ISO 3297: 2007 Certified Organization)
+Vol. 4, Issue 12, December 2015
+Face Recognition System Using
+Wavelet Normalization
+R.Anitha 1, S.Ramila 2
+Assistant Professor, Dept. of CSE, Sri Krishna College of Technology, Coimbatore, India 1
+Assistant Professor, Dept. of CSE, Sri Krishna College of Technology, Coimbatore, India 2"
+f202c78e58d33a65c19183414ad0ee91be440d61,Investigating the Influence of Biological Sex on the Behavioral and Neural Basis of Face Recognition,"New Research
+Sensory and Motor Systems
+Investigating the Influence of Biological Sex on
+the Behavioral and Neural Basis of Face
+Recognition
+K. Suzanne Scherf,1,2 Daniel B. Elbich,1 and Natalie V. Motta-Mena1
+DOI:http://dx.doi.org/10.1523/ENEURO.0104-17.2017
+Department of Psychology, Pennsylvania State University, University Park, PA 16802, and 2Department of
+Neuroscience, Pennsylvania State University, University Park, PA 16802"
+f2b547b0bbda1478cbecbd5c184c3c42c3db7e3c,Semi-parametric Image Synthesis,
+f565ac8e175e4659fadd3b5b6507ebac2d90a2b7,Interpretable Visual Question Answering by Reasoning on Dependency Trees,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, XXX
+Interpretable Visual Question Answering by
+Reasoning on Dependency Trees
+Qingxing Cao, Xiaodan Liang, Bailin Li and Liang Lin"
+f59ac278349083a50871822ea08172258030265a,Large-Scale Fiber Tracking Through Sparsely Sampled Image Sequences of Composite Materials,"Large-Scale Fiber Tracking Through Sparsely
+Sampled Image Sequences of Composite Materials
+Youjie Zhou, Student Member, IEEE, Hongkai Yu, Student Member, IEEE, Jeff Simmons, Member, IEEE,
+Craig P. Przybyla, and Song Wang, Senior Member, IEEE
+nd accurate"
+f5c99652c4c89e56156faf2bed361a15de6162d5,Towards Large-Scale Multimedia Retrieval Enriched by Knowledge about Human Interpretation Retrospective Survey,"Noname manuscript No.
+(will be inserted by the editor)
+Towards Large-Scale Multimedia Retrieval Enriched
+y Knowledge about Human Interpretation
+Retrospective Survey
+Kimiaki Shirahama · Marcin Grzegorzek
+Received: date / Accepted: date"
+f56edb6f2bf4f5bc9d54284289212b8d4a437c1b,Detection and Localization of Texture-less Objects with Deep Neural Networks,"Bachelor Thesis
+Czech
+Technical
+University
+in Prague
+Faculty of Electrical Engineering
+Department of Cybernetics
+Detection and Localization of Texture-less
+Objects with Deep Neural Networks
+Pavel Haluza
+Supervisor: Ing. Tomáš Hodaň
+May 2017"
+f5050ffebf973d4d848049dcf661891acd950b82,"Face and object discrimination in autism, and relationship to IQ and age.","J Autism Dev Disord
+DOI 10.1007/s10803-013-1955-z
+O R I G I N A L P A P E R
+Face and Object Discrimination in Autism, and Relationship to IQ
+nd Age
+Pamela M. Pallett • Shereen J. Cohen •
+Karen R. Dobkins
+Ó Springer Science+Business Media New York 2013
+faces, yet"
+f553f8022b1417bc7420523220924b04e3f27b8e,Finding your Lookalike: Measuring Face Similarity Rather than Face Identity,"Finding your Lookalike:
+Measuring Face Similarity Rather than Face Identity
+Amir Sadovnik, Wassim Gharbi, Thanh Vu
+Lafayette College
+Easton, PA
+Andrew Gallagher
+Google Research
+Mountain View, CA"
+f580b0e1020ad67bdbb11e8d99a59c21a8df1e7d,Compressed Sensing using Generative Models,"Compressed Sensing using Generative Models
+Ashish Bora∗
+Ajil Jalal†
+Eric Price‡
+Alexandros G. Dimakis§"
+f5770dd225501ff3764f9023f19a76fad28127d4,Real Time Online Facial Expression Transfer with Single Video Camera,"Real Time Online Facial Expression Transfer
+with Single Video Camera"
+f51771c6cd9061acc9c468e7b44d5d3b6c552b32,Discriminative Dictionaries and Projections for Visual Classification,
+f5c83679b73ab59c2ada2b72610acdd63669b226,2d-3d Pose Invariant Face Recognition System for Multimedia Applications,"D-3D POSE INVARIANT FACE RECOGNITION
+SYSTEM FOR MULTIMEDIA APPLICATIONS
+Authors:
+Antonio Rama1, Francesc Tarrés1
+Jürgen Rurainsky2
+{tonirama,
+Department of Signal Theory and Communications
+Universitat Politècnica de Catalunya (UPC)
+Image Processing Department
+Fraunhofer Institute for Telecommunications
+Heinrich-Hertz-Institut (HHI)
+Automatic Face recognition of people is a challenging problem which has re-
+eived much attention during the recent years due to its potential multimedia ap-
+plications in different fields such as 3D videoconference, security applications or
+video indexing. However, there is no technique that provides a robust solution to
+ll situations and different applications, yet. Face recognition includes a set of
+hallenges like expression variations, occlusions of facial parts, similar identities,
+resolution of the acquired images, aging of the subjects and many others. Among
+ll these challenges, most of the face recognition techniques have evolved in order
+to overcome two main problems: illumination and pose variation. Either of these"
+f5a52b69dde106cb69cb7c35dd8ca23071966876,Nonparametric Scene Parsing via Label Transfer,"Nonparametric Scene Parsing
+via Label Transfer
+Ce Liu, Member, IEEE, Jenny Yuen, Student Member, IEEE, and
+Antonio Torralba, Member, IEEE"
+f558a3812106764fb1af854a02da080cc42c197f,Amygdala volume and nonverbal social impairment in adolescent and adult males with autism.,"ORIGINAL ARTICLE
+Amygdala Volume and Nonverbal Social Impairment
+in Adolescent and Adult Males With Autism
+Brendon M. Nacewicz, BS; Kim M. Dalton, PhD; Tom Johnstone, PhD; Micah T. Long, BS; Emelia M. McAuliff, BS;
+Terrence R. Oakes, PhD; Andrew L. Alexander, PhD; Richard J. Davidson, PhD
+Background: Autism is a syndrome of unknown cause,
+marked by abnormal development of social behavior. At-
+tempts to link pathological features of the amygdala, which
+plays a key role in emotional processing, to autism have
+shown little consensus.
+Objective: To evaluate amygdala volume in individu-
+ls with autism spectrum disorders and its relationship
+to laboratory measures of social behavior to examine
+whether variations in amygdala structure relate to symp-
+tom severity.
+Design: We conducted 2 cross-sectional studies of amyg-
+dala volume, measured blind to diagnosis on high-
+resolution, anatomical magnetic resonance images. Par-
+ticipants were 54 males aged 8 to 25 years, including 23
+with autism and 5 with Asperger syndrome or pervasive"
+f558af209dd4c48e4b2f551b01065a6435c3ef33,An Enhanced Attribute Reranking Design for Web Image Search,"International Journal of Emerging Technology in Computer Science & Electronics (IJETCSE)
+ISSN: 0976-1353 Volume 23 Issue 1 –JUNE 2016.
+AN ENHANCED ATTRIBUTE
+RERANKING DESIGN FOR WEB IMAGE
+SEARCH
+Sai Tejaswi Dasari#1 and G K Kishore Babu*2
+#Student,Cse, CIET, Lam,Guntur, India
+* Assistant Professort,Cse, CIET, Lam,Guntur , India"
+f5083b4e28e42a2da7bafd2a742ab8e21c12559f,Deep Learning for Automated Image Classification of Seismic Damage to Built Infrastructure,"Eleventh U.S. National Conference on Earthquake Engineering
+Integrating Science, Engineering & Policy
+June 25-29, 2018
+Los Angeles, California
+DEEP LEARNING FOR AUTOMATED
+IMAGE CLASSIFICATION OF SEISMIC
+DAMAGE TO BUILT INFRASTRUCTURE
+B. Patterson1 , G. Leone1, M. Pantoja1, and A. Behrouzi2"
+f5adb841e30eb635b91e95c03575f3b8767c9ed5,Learning Optimal Parameters For Multi-target Tracking,"WANG, FOWLKES: LEARNING MULTI-TARGET TRACKING
+Learning Optimal Parameters
+For Multi-target Tracking
+Shaofei Wang
+Charless Fowlkes
+Dept of Computer Science
+University of California
+Irvine, CA, USA"
+e378ce25579f3676ca50c8f6454e92a886b9e4d7,Robust Video Super-Resolution with Learned Temporal Dynamics,"Robust Video Super-Resolution with Learned Temporal Dynamics
+Ding Liu1 Zhaowen Wang2 Yuchen Fan1 Xianming Liu3
+Zhangyang Wang4 Shiyu Chang5 Thomas Huang1
+University of Illinois at Urbana-Champaign 2Adobe Research
+Facebook 4Texas A&M University 5IBM Research"
+e393a038d520a073b9835df7a3ff104ad610c552,Automatic temporal segment detection via bilateral long short-term memory recurrent neural networks,"Automatic temporal segment
+detection via bilateral long short-
+term memory recurrent neural
+networks
+Bo Sun
+Siming Cao
+Jun He
+Lejun Yu
+Liandong Li
+Bo Sun, Siming Cao, Jun He, Lejun Yu, Liandong Li, “Automatic temporal segment
+detection via bilateral long short-term memory recurrent neural networks,” J.
+Electron. Imaging 26(2), 020501 (2017), doi: 10.1117/1.JEI.26.2.020501.
+Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 03/03/2017 Terms of Use: http://spiedigitallibrary.org/ss/termsofuse.aspx"
+e312e7657cb98cf03d3b2bf8b21b0ff75fbd4613,No 272 2 D Articulated Human Pose Estimation and Retrieval in ( Almost ) Unconstrained Still Images,"ETH Zurich, D-ITET, BIWI
+Technical Report No 272
+D Articulated Human Pose Estimation and Retrieval in (Almost)
+Unconstrained Still Images
+M. Eichner, M. Marin-Jimenez, A. Zisserman, V. Ferrari"
+e3f2e337d4470545398cc6753a54c21debf9c37b,Potential Contrast – A New Image Quality Measure,"Potential Contrast – A New Image Quality Measure
+Arie Shaus, Shira Faigenbaum-Golovin, Barak Sober, Eli Turkel, Eli Piasetzky; Tel Aviv University; Tel Aviv, Israel"
+e3b0caa1ff9067665e349a2480b057e2afdbc41f,Interactive Effects of Obvious and Ambiguous Social Categories on Perceptions of Leadership: When Double-Minority Status May Be Beneficial.,"702373 PSPXXX10.1177/0146167217702373Personality and Social Psychology BulletinWilson et al.
+research-article2017
+Article
+Interactive Effects of Obvious and
+Ambiguous Social Categories on
+Perceptions of Leadership: When
+Double-Minority Status May
+Be Beneficial
+Personality and Social
+Psychology Bulletin
+017, Vol. 43(6) 888 –900
+© 2017 by the Society for Personality
+nd Social Psychology, Inc
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0146167217702373
+https://doi.org/10.1177/0146167217702373
+journals.sagepub.com/home/pspb
+John Paul Wilson1, Jessica D. Remedios2, and Nicholas O. Rule3"
+e315959d6e806c8fbfc91f072c322fb26ce0862b,An Efficient Face Recognition System Based on Sub-Window Extraction Algorithm,"An Efficient Face Recognition System Based on Sub-Window
+International Journal of Soft Computing and Engineering (IJSCE)
+ISSN: 2231-2307, Volume-1, Issue-6, January 2012
+Extraction Algorithm
+Manish Gupta, Govind sharma"
+e39d1345a5aef8a5ee32c0a774de877b903de50c,Unsupervised Learning of Semantics of Object Detections for Scene Categorization,"Unsupervised Learning of Semantics of Object
+Detections for Scene Categorization
+Grégoire Mesnil, Salah Rifai, Antoine Bordes, Xavier Glorot, Yoshua Bengio
+nd Pascal Vincent"
+e38c93bb8f7ee103eba4b78443d94f55a63bdf08,Extracting Pathlets From Weak Tracking Data ∗,"Extracting Pathlets From Weak Tracking Data∗
+Kevin Streib
+James W. Davis
+Dept. of Computer Science and Engineering
+Ohio State University, Columbus, OH 43210"
+e33b1833b2d0cd7b0450b22b96a567a59c9e4685,Attribute Discovery via Predictable Discriminative Binary Codes,"Attribute Discovery via
+Predictable Discriminative Binary Codes
+Mohammad Rastegari†
+Ali Farhadi‡
+David Forsyth†
+University of Illinois at Urbana Champagin
+Carnegi Mellon University
+http://vision.ri.cmu.edu/projects/dbc/dbc.html"
+e3f63d12be07c743e7590957f4ed38b06cd98aba,A Novel Approach to Face Detection Algorithm,"A Novel Approach to Face Detection Algorithm
+{tag} {/tag}
+International Journal of Computer Applications
+© 2011 by IJCA Journal
+Number 2 - Article 4
+Year of Publication: 2011
+Authors:
+Pritam Singh
+A.S. Thoke
+Kesari Verma
+10.5120/3537-4836"
+e3c420b29b8590442decd330ef70494c2209f149,Learning a Part-Based Pedestrian Detector in a Virtual World,"Learning a Part-based Pedestrian Detector in Virtual
+World
+Jiaolong Xu, David V´azquez, Antonio M. L´opez Member, IEEE, Javier Mar´ın and Daniel Ponsa"
+e39a0834122e08ba28e7b411db896d0fdbbad9ba,Maximum Likelihood Estimation of Depth Maps Using Photometric Stereo,"Maximum Likelihood Estimation of Depth Maps
+Using Photometric Stereo
+Adam P. Harrison, Student Member, IEEE, and Dileepan Joseph, Member, IEEE"
+e30dc2abac4ecc48aa51863858f6f60c7afdf82a,Facial Signs and Psycho-physical Status Estimation for Well-being Assessment,"Facial Signs and Psycho-physical Status Estimation for Well-being
+Assessment
+F. Chiarugi, G. Iatraki, E. Christinaki, D. Manousos, G. Giannakakis, M. Pediaditis,
+A. Pampouchidou, K. Marias and M. Tsiknakis
+Computational Medicine Laboratory, Institute of Computer Science, Foundation for Research and Technology - Hellas,
+{chiarugi, giatraki, echrist, mandim, ggian, mped, pampouch, kmarias,
+70013 Vasilika Vouton, Heraklion, Crete, Greece
+Keywords:
+Facial Expression, Stress, Anxiety, Feature Selection, Well-being Evaluation, FACS, FAPS, Classification."
+e3b40ffd57a676aef377ef463849fd6b9a3d3b5d,Morphable hundred-core heterogeneous architecture for energy-aware computation,"Received on 16th April 2014
+Revised on 23rd June 2014
+Accepted on 7th August 2014
+doi: 10.1049/iet-cdt.2014.0078
+www.ietdl.org
+ISSN 1751-8601
+Morphable hundred-core heterogeneous architecture
+for energy-aware computation
+Nuno Neves, Henrique Mendes, Ricardo Jorge Chaves, Pedro Tomás, Nuno Roma
+INESC-ID, Instituto Superior Técnico, Universidade de Lisboa, Rua Alves Redol, 9, 1000-029 Lisboa, Portugal
+E-mail:"
+e3e44385a71a52fd483c58eb3cdf8d03960c0b70,A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video,"Copyright
+Sangho Park"
+e3582dffe5f3466cc5bc9d736934306c551ab33c,AttGAN: Facial Attribute Editing by Only Changing What You Want,"SUBMITTED MANUSCRIPT TO IEEE TRANSACTIONS ON IMAGE PROCESSING
+AttGAN: Facial Attribute Editing by
+Only Changing What You Want
+Zhenliang He, Wangmeng Zuo, Senior Member, IEEE, Meina Kan, Member, IEEE,
+Shiguang Shan, Senior Member, IEEE, and Xilin Chen, Fellow, IEEE
+i.e.,"
+e3b92cc14f2c33bfdc07b794292a30384f8d0ad1,Local Segmentation for Pedestrian Tracking in Dense Crowds,"Local Segmentation for Pedestrian Tracking in
+Dense Crowds
+Clement Creusot
+Toshiba RDC, Kawasaki, Japan,
+http://clementcreusot.com/pedestrian"
+e3bbdd6efc906f6ae17e5b1d62497420991b977d,Visual Explanation by High-Level Abduction: On Answer-Set Programming Driven Reasoning about Moving Objects,"Visual Explanation by High-Level Abduction
+On Answer-Set Programming Driven Reasoning about Moving Objects
+Jakob Suchan1, Mehul Bhatt1,2, Przemysław Wał˛ega3, and Carl Schultz4
+Cognitive Vision – www.cognitive-vision.org
+EASE CRC – http://ease-crc.org
+HCC Lab., University of Bremen, Germany, 2MPI Lab., Örebro University, Sweden
+University of Warsaw, Poland, and 4Aarhus University, Denmark"
+e3f0c5a51d6c5085fbcb64d872d7db438da27474,Ubiquitously Supervised Subspace Learning,"Ubiquitously Supervised Subspace Learning
+Jianchao Yang, Student Member, IEEE, Shuicheng Yan, Member, IEEE, and Thomas S. Huang, Life Fellow, IEEE"
+e39f9565903a9701657ce3ade94c37d8a12f702e,Audio-Visual Scene Analysis with Self-Supervised Multisensory Features,"Audio-Visual Scene Analysis with
+Self-Supervised Multisensory Features
+Andrew Owens Alexei A. Efros
+UC Berkeley"
+e39af9fb267c9deb81f9c73bbd71f5674b4358c0,Conceptualizing and Measuring Well-Being Using Statistical Semantics and Numerical Rating Scales,"Conceptualizing and Measuring Well-Being Using Statistical Semantics and Numerical
+Rating Scales
+Kjell, Oscar
+Published: 2018-03-01
+Document Version
+Publisher's PDF, also known as Version of record
+Link to publication
+Citation for published version (APA):
+Kjell, O. (2018). Conceptualizing and Measuring Well-Being Using Statistical Semantics and Numerical Rating
+Scales Lund
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors
+nd/or other copyright owners and it is a condition of accessing publications that users recognise and abide by the
+legal requirements associated with these rights.
+• Users may download and print one copy of any publication from the public portal for the purpose of private
+study or research.
+• You may not further distribute the material or use it for any profit-making activity or commercial gain
+• You may freely distribute the URL identifying the publication in the public portal
+LUND UNIVERSITYPO Box 117221 00 Lund+46 46-222 00 00"
+e31f24b92a19aeb9a7611a9ca09223c8f5238ae1,Expression Empowered ResiDen Network for Facial Action Unit Detection,"RESIDEN: RESIDUE FLOW IN DENSENET
+Expression Empowered ResiDen Network
+for Facial Action Unit Detection
+Shreyank Jyoti
+Abhinav Dhall
+Learning Affect and Semantic Image
+nalysIs (LASII) Group,
+Indian Institute of Technology Ropar
+Punjab, India"
+e3917d6935586b90baae18d938295e5b089b5c62,Face localization and authentication using color and depth images,"Face Localization and Authentication
+Using Color and Depth Images
+Filareti Tsalakanidou, Sotiris Malassiotis, and Michael G. Strintzis, Fellow, IEEE"
+e3144f39f473e238374dd4005c8b83e19764ae9e,Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost Optical-Flow Estimation in the Wild,"Next-Flow: Hybrid Multi-Tasking with Next-Frame Prediction to Boost
+Optical-Flow Estimation in the Wild
+Nima Sedaghat
+University of Freiburg
+Germany"
+e38709a2ec162a6f2a2fa3b4b6463e752267b154,Super-resolution for Face Recognition Based on Correlated Features and Nonlinear Mappings,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+e309632d479b8f59e615d0f3c4bc69938361d187,Deep Learning for Imbalance Data Classification using Class Expert Generative Adversarial Network,"Deep Learning for Imbalance Data Classification using Class Expert
+Generative Adversarial Network
+Fannya, Tjeng Wawan Cenggoroa,b
+Computer Science Department, School of Computer Science, Bina Nusantara University, Jakarta, Indonesia 11480
+Bioinformatics and Data Science Research Center, Bina Nusantara University, Jakarta, Indonesia 11480"
+e3c5c5623af4b1a1f719cac24850dcaa6a304bd5,Training Effective Node Classifiers for Cascade Classification,"ppearing in Int. J. Comput. Vis.; content may change prior to final publication.
+Training Effective Node Classifiers for Cascade
+Classification
+Chunhua Shen · Peng Wang · Sakrapee Paisitkriangkrai ·
+Anton van den Hengel
+December 2012"
+e3660a13fcd75cf876a6ce355c2c1a578cfb57cb,2DHMM-Based Face Recognition Method,"DHMM-BASED FACE RECOGNITION
+METHOD
+Janusz Bobulski1
+Czestochowa University of Technology
+Institute of Computer and Information Science
+Dabrowskiego Street 73, 42-200 Czestochowa, Poland
+Summary. So far many methods of recognizing the face arose, each has the merits
+nd demerits. Among these methods are methods based on Hidden Markov models,
+nd their advantage is the high ef‌f‌iciency. However, the traditional HMM uses one-
+dimensional data, which is not a good solution for image processing, because the
+images are two-dimensional. Transforming the image in a one-dimensional feature
+vector, we remove some of the information that can be used for identification. The
+rticle presents the full ergodic 2D-HMM and applied for face identification.
+Introduction
+Face recognition has great potentials in many applications dealing with unco-
+operative subjects, in which the full power of face recognition being a passive
+iometric technique can be implemented and utilised. Face recognition has
+een an active area of research in image processing and computer vision due
+to its extensive range of prospective applications relating to biometrics, infor-
+mation security, video surveillance, law enforcement, identity authentication,"
+cf77d2e7411814b30aca203376709b12a0eb3e08,Obtaining Better Image Representations by Combining Complementary Activation Features of Multiple ConvNet Layers for Transfer Learning,"Obtaining Better Image Representations by
+Combining Complementary Activation Features of
+Multiple ConvNet Layers for Transfer Learning
+Jumabek Alikhanov
+School of Computer and
+Information Engineering
+Seunghyun Ko
+School of Computer and
+Information Engineering
+Jo Geun Sik
+School of Computer and
+Information Engineering
+Inha University Incheon, South Korea
+Inha University Incheon, South Korea
+Inha University Incheon, South Korea
+Email:
+Email:
+Email:"
+cf98c333c8d7d5870c1ce5538bb0c3de3de16657,Panoptic Segmentation,"Panoptic Segmentation
+Alexander Kirillov1,2 Kaiming He1 Ross Girshick1 Carsten Rother2
+Piotr Doll´ar1
+Facebook AI Research (FAIR)
+HCI/IWR, Heidelberg University, Germany"
+cf40951840bfa9b8721d722e9422c73e3a6fbf59,Real-time Appearance-based Person Re-identification Over Multiple KinectTM Cameras,"Real-time appearance-based person re-identification
+over multiple KinectTMcameras
+Riccardo Satta, Federico Pala, Giorgio Fumera and Fabio Roli
+Department of Electrical and Electronic Engineering, University of Cagliari, Italy
+{riccardo.satta, fumera,
+Keywords:
+Video surveillance, Person Re-identification, Kinect"
+cf280435c471ee099148c4eb9eb2e106ccb2b218,HoME: a Household Multimodal Environment,"HoME: a Household Multimodal Environment
+Simon Brodeur1, Ethan Perez2,3∗, Ankesh Anand2∗, Florian Golemo2,4∗,
+Luca Celotti1, Florian Strub2,5, Jean Rouat1, Hugo Larochelle6,7, Aaron Courville2,7
+Université de Sherbrooke, 2MILA, Université de Montréal, 3Rice University, 4INRIA Bordeaux,
+5Univ. Lille, Inria, UMR 9189 - CRIStAL, 6Google Brain, 7CIFAR Fellow
+{simon.brodeur, luca.celotti,
+{florian.golemo,
+{ankesh.anand,"
+cfc22c35ad191cf9d70f4a3655840748b0e1322c,Real-Time Dense Mapping for Self-driving Vehicles using Fisheye Cameras,"Real-Time Dense Mapping
+for Self-Driving Vehicles using Fisheye Cameras
+Zhaopeng Cui1, Lionel Heng2, Ye Chuan Yeo2, Andreas Geiger3, Marc Pollefeys1,4, and Torsten Sattler1"
+cfcf66e4b22dc7671a5941e94e9d4afae75ba2f8,The Cramer Distance as a Solution to Biased Wasserstein Gradients,"The Cramer Distance as a Solution to Biased
+Wasserstein Gradients
+Marc G. Bellemare1, Ivo Danihelka1,3, Will Dabney1, Shakir Mohamed1
+Balaji Lakshminarayanan1, Stephan Hoyer2, Rémi Munos1
+Google DeepMind, London UK, 2Google
+CoMPLEX, Computer Science, UCL"
+cfffae38fe34e29d47e6deccfd259788176dc213,Training bookcowgrass flower ? ? water sky doggrass water boat water chair road ? cow grass chair grass dog building ?,"TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, DECEMBER 2012
+Matrix Completion for Weakly-supervised
+Multi-label Image Classification
+Ricardo Cabral, Fernando De la Torre, João P. Costeira, Alexandre Bernardino"
+cfd4004054399f3a5f536df71f9b9987f060f434,Person Recognition in Social Media Photos,"Person Recognition in Personal Photo Collections
+Seong Joon Oh,Rodrigo Benenson, Mario Fritz, and Bernt Schiele, Fellow, IEEE"
+cf216fcd4cf537e53b9ed4f46e59c445e845cfc5,Nonnegative Restricted Boltzmann Machines for Parts-based Representations Discovery and Predictive Model Stabilization,"Noname manuscript No.
+(will be inserted by the editor)
+Nonnegative Restricted Boltzmann Machines for
+Parts-based Representations Discovery and
+Predictive Model Stabilization
+Tu Dinh Nguyen, Truyen Tran, Dinh
+Phung, Svetha Venkatesh
+the date of receipt and acceptance should be inserted later"
+cf8f5cad6aa87a6364f6b5dd985116b902050acf,Slack and Margin Rescaling as Convex Extensions of Supermodular Functions,"Slack and Margin Rescaling as Convex Extensions of
+Supermodular Functions
+Matthew B. Blaschko
+Center for Processing Speech & Images
+Departement Elektrotechniek, KU Leuven
+Kasteelpark Arenberg 10
+001 Leuven, Belgium"
+cfd933f71f4a69625390819b7645598867900eab,Person Authentication Using Face And Palm Vein: A Survey Of Recognition And Fusion Techniques,"INTERNATIONAL JOURNAL OF TECHNOLOGY ENHANCEMENTS AND EMERGING ENGINEERING RESEARCH, VOL 3, ISSUE 03 55
+ISSN 2347-4289
+Person Authentication Using Face And Palm Vein:
+A Survey Of Recognition And Fusion Techniques
+Preethi M, Dhanashree Vaidya, Dr. S. Kar, Dr. A. M. Sapkal, Dr. Madhuri A. Joshi
+Dept. of Electronics and Telecommunication, College of Engineering, Pune, India,
+Image Processing & Machine Vision Section, Electronics & Instrumentation Services Division, BARC
+Email:"
+cf2a313b039b8adfee2a14ca5e81f2f5da52b0f2,Learning Fashion Traits with Label Uncertainty,"Learning Fashion Traits with Label Uncertainty
+Gal Levi
+Eli Alshan
+Assaf Neuberger
+Amazon Lab 126
+Herzliya, Israel 4672560
+Amazon Lab 126
+Herzliya, Israel 4672560
+Amazon Lab 126
+Herzliya, Israel 4672560
+Sharon Alpert
+Amazon Lab 126
+Herzliya, Israel 4672560
+Eduard Oks
+Amazon Lab 126
+Herzliya, Israel 4672560"
+cf65c5cfa2a2b0370407810479f179f5fbe88fb1,Multi-Modal Biometrics: An Overview,"Multi-Modal Biometrics: An Overview
+Kevin W. Bowyer,1 K. I. Chang,1 P. Yan,1 P. J. Flynn,1 E. Hansley,2 S. Sarkar2
+. Computer Science and Engineering / University of Notre Dame / Notre Dame, IN 46556 USA
+. Computer Science and Engineering / University of South Florida / Tampa, FL 33620 USA"
+cf875336d5a196ce0981e2e2ae9602580f3f6243,"7 What 1 S It Mean for a Computer to ""have"" Emotions?","7 What 1
+Rosalind W. Picard
+It Mean for a Computer to ""Have"" Emotions?
+There is a lot of talk about giving machines emotions, some of
+it fluff. Recently at a large technical meeting, a researcher stood up
+nd talked of how a Bamey stuffed animal [the purple dinosaur for
+kids) ""has emotions."" He did not define what he meant by this, but
+fter repeating it several times, it became apparent that children
+ttributed emotions to Barney, and that Barney had deliberately
+expressive behaviors that would encourage the kids to think. Bar-
+ney had emotions. But kids have attributed emotions to dolls and
+stuffed animals for as long a s we know; and most of my technical
+olleagues would agree that such toys have never had and still do
+not have emotions. What is different now that prompts a researcher
+to make such a claim? Is the computational plush an example of a
+omputer that really does have emotions?
+If not Barney, then what would be an example of a computa-
+tional system that has emotions? I am not a philosopher, and this
+paper will not be a discussion of the meaning of this question in
+ny philosophical sense. However, as an engineer I am interested"
+cfd8c66e71e98410f564babeb1c5fd6f77182c55,Comparative Study of Coarse Head Pose Estimation,"Comparative Study of Coarse Head Pose Estimation
+Lisa M. Brown and Ying-Li Tian
+IBM T.J. Watson Research Center
+Hawthorne, NY 10532"
+cfbb2d32586b58f5681e459afd236380acd86e28,Improving alignment of faces for recognition,"Improving Alignment of Faces for Recognition
+Md. Kamrul Hasan
+Christopher J. Pal
+D´epartement de g´enie informatique et g´enie logiciel
+´Ecole Polytechnique de Montr´eal,
+D´epartement de g´enie informatique et g´enie logiciel
+´Ecole Polytechnique de Montr´eal,
+Qu´ebec, Canada
+Qu´ebec, Canada"
+cfa92e17809e8d20ebc73b4e531a1b106d02b38c,Parametric classification with soft labels using the evidential EM algorithm: linear discriminant analysis versus logistic regression,"Advances in Data Analysis and Classification manuscript No.
+(will be inserted by the editor)
+Parametric Classification with Soft Labels using the
+Evidential EM Algorithm
+Linear Discriminant Analysis vs. Logistic Regression
+Benjamin Quost · Thierry Denœux ·
+Shoumei Li
+Received: date / Accepted: date"
+cf5a0115d3f4dcf95bea4d549ec2b6bdd7c69150,Detection of emotions from video in non-controlled environment. (Détection des émotions à partir de vidéos dans un environnement non contrôlé),"Detection of emotions from video in non-controlled
+environment
+Rizwan Ahmed Khan
+To cite this version:
+Rizwan Ahmed Khan. Detection of emotions from video in non-controlled environment. Image
+Processing. Universit´e Claude Bernard - Lyon I, 2013. English. <NNT : 2013LYO10227>.
+<tel-01166539v2>
+HAL Id: tel-01166539
+https://tel.archives-ouvertes.fr/tel-01166539v2
+Submitted on 23 Jun 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+cffc94574c8796cbd8234422a979e57e67eca7b5,Multiracial Children's and Adults' Categorizations of Multiracial Individuals.,"Journal of Cognition and Development
+ISSN: 1524-8372 (Print) 1532-7647 (Online) Journal homepage: http://www.tandfonline.com/loi/hjcd20
+Multiracial Children’s and Adults’ Categorizations
+of Multiracial Individuals
+Steven O. Roberts & Susan A. Gelman
+To cite this article: Steven O. Roberts & Susan A. Gelman (2017) Multiracial Children’s and
+Adults’ Categorizations of Multiracial Individuals, Journal of Cognition and Development, 18:1,
+-15, DOI: 10.1080/15248372.2015.1086772
+To link to this article: http://dx.doi.org/10.1080/15248372.2015.1086772
+Accepted author version posted online: 23
+Feb 2016.
+Published online: 23 Feb 2016.
+Submit your article to this journal
+Article views: 75
+View related articles
+View Crossmark data
+Citing articles: 2 View citing articles
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=hjcd20
+Download by: [University of Michigan]"
+cf009a6b02fbef514a4bac9695a928080ceac764,COLUMBUS: Feature Selection on Data Analytics Systems,"COLUMBUS: Feature Selection on Data Analytics Systems
+Arun Kumar
+Pradap Konda
+Christopher R´e
+February 28, 2013"
+cf7e6d057e6ef01904770be3dfc9da29f9c1e197,An Adaptive Detection Method of Multiple Faces,"TELKOMNIKA Indonesian Journal of Electrical Engineering
+Vol.12, No.4, April 2014, pp. 2743 ~ 2752
+DOI: http://dx.doi.org/10.11591/telkomnika.v12i4.4368
+An Adaptive Detection Method of Multiple Faces
+ 2743
+China West Normal University, No. 1 Shida Road, Computer School, Nanchong, China
+*Corresponding author, e-mail:
+Wei Li"
+cf7b4fa0a8b58473b94496f353f3c8d0f9531b71,Recognition of 3 D Frontal Face Images Using Local Ternary Patterns and MLDA Algorithm,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Recognition of 3D Frontal Face Images Using Local
+Ternary Patterns and MLDA Algorithm
+Dr. T. Karthikeyan1, T. K. Sumathi2
+Associate Professor, PSG College of Arts & Science, Coimbatore
+Research Scholar, Karpagam University, Coimbatore
+identification"
+cfc9056155bf32648448b588a752f694b4e8249c,Combining Contrast Information and Local Binary Patterns for Gender Classification,"Combining Contrast Information and Local
+Binary Patterns for Gender Classification
+Juha Ylioinas, Abdenour Hadid, and Matti Pietik¨ainen
+Machine Vision Group, PO Box 4500,
+FI-90014 University of Oulu, Finland"
+cfdc632adcb799dba14af6a8339ca761725abf0a,Probabilistic Formulations of Regression with Mixed Guidance,"Probabilistic Formulations of Regression with Mixed
+Guidance
+Aubrey Gress, Ian Davidson University of California, Davis"
+cfbfcf538c1c9bbf170a524995098fe4aacde374,Symmetric generalized low rank approximations of matrices,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+cfc30ce53bfc204b8764ebb764a029a8d0ad01f4,Regularizing Deep Neural Networks by Noise: Its Interpretation and Optimization,"Regularizing Deep Neural Networks by Noise:
+Its Interpretation and Optimization
+Hyeonwoo Noh
+Tackgeun You
+Dept. of Computer Science and Engineering, POSTECH, Korea
+Jonghwan Mun
+Bohyung Han"
+cf6527d8d42a9958eea7d8d1f90ea4c86d591408,Convolutional Neural Network-Based Classification of Driver’s Emotion during Aggressive and Smooth Driving Using Multi-Modal Camera Sensors,"Article
+Convolutional Neural Network-Based Classification
+of Driver’s Emotion during Aggressive and Smooth
+Driving Using Multi-Modal Camera Sensors
+Kwan Woo Lee, Hyo Sik Yoon, Jong Min Song and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (K.W.L.); (H.S.Y.);
+(J.M.S.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 20 February 2018; Accepted: 21 March 2018; Published: 23 March 2018"
+cf74dceae075bde213d2aafad115d2afc893c21b,Master's Thesis : Deep Learning for Visual Recognition,"Master’s Thesis
+Deep Learning for Visual Recognition
+Supervised by Nicolas Thome and Matthieu Cord
+Remi Cadene
+Wednesday 7th September, 2016"
+cf805d478aeb53520c0ab4fcdc9307d093c21e52,Finding Tiny Faces in the Wild with Generative Adversarial Network,"Finding Tiny Faces in the Wild with Generative Adversarial Network
+Yancheng Bai1
+Yongqiang Zhang1
+Mingli Ding2
+Bernard Ghanem1
+Visual Computing Center, King Abdullah University of Science and Technology (KAUST)
+School of Electrical Engineering and Automation, Harbin Institute of Technology (HIT)
+Institute of Software, Chinese Academy of Sciences (CAS)
+{zhangyongqiang,
+Figure1. The detection results of tiny faces in the wild. (a) is the original low-resolution blurry face, (b) is the result of
+re-sizing directly by a bi-linear kernel, (c) is the generated image by the super-resolution method, and our result (d) is learned
+y the super-resolution (×4 upscaling) and refinement network simultaneously. Best viewed in color and zoomed in."
+cf103f2fe5595a55f918ecbd9119800f4747fc8e,Human recognition based on ear shape images using PCA-Wavelets and different classification methods,"Human recognition based on ear shape images using
+PCA-Wavelets and different classification methods
+Ali Mahmoud Mayya1* and Mariam Mohammad Saii
+PhD student, Computer Engineering, Tishreen University, Syria"
+cf86616b5a35d5ee777585196736dfafbb9853b5,Learning Multiscale Active Facial Patches for Expression Analysis,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Learning Multiscale Active Facial Patches for
+Expression Analysis
+Lin Zhong, Qingshan Liu, Peng Yang, Junzhou Huang, and Dimitris N. Metaxas, Senior Member, IEEE"
+cabe652bb3b150f35db9db1434cec69f081c4a60,Towards Scene Understanding: Deep and Layered Recognition and Heuristic Parsing of Objects,"Towards Scene Understanding: Deep and Layered Recognition
+nd Heuristic Parsing of Objects
+Dissertation Submitted to
+Xi’an Jiaotong University
+In partial fulfillment of the requirement
+for the degree of
+Doctor of Engineering Science
+Yang Wu
+(Control Science and Engineering)
+Supervisor: Prof. Nanning Zheng
+May 2010"
+cacd51221c592012bf2d9e4894178c1c1fa307ca,Face and Expression Recognition Techniques: A Review,"ISSN: 2277-3754
+ISO 9001:2008 Certified
+International Journal of Engineering and Innovative Technology (IJEIT)
+Volume 4, Issue 11, May 2015
+Face and Expression Recognition Techniques: A
+Review
+Advanced Communication & Signal Processing Laboratory, Department of Electronics & Communication
+engineering, Government College of Engineering Kannur, Kerala, India.
+Rishin C. K, Aswani Pookkudi, A. Ranjith Ram"
+ca0363d29e790f80f924cedaf93cb42308365b3d,Facial Expression Recognition in Image Sequences Using Geometric Deformation Features and Support Vector Machines,"Facial Expression Recognition in Image Sequences
+using Geometric Deformation Features and Support
+Vector Machines
+Irene Kotsiay and Ioannis Pitasy,Senior Member IEEE
+yAristotle University of Thessaloniki
+Department of Informatics
+Box 451
+54124 Thessaloniki, Greece
+email:"
+cae87d5a724507e06f6d8178cfbec043db854fe3,Bayesian Nonparametric Latent Feature Models,"Bayesian Nonparametric Latent Feature Models
+Kurt Miller
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2011-78
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2011/EECS-2011-78.html
+June 28, 2011"
+cac3bf3ceba79e6a6c8e51eb44c6862b81661f85,Learning Data-Driven Representations for Robust Monocular Computer Vision Applications,"Learning Data-Driven Representations for
+Robust Monocular Computer Vision
+Applications
+Dissertation
+der Mathematisch-Naturwissenschaftlichen Fakultät
+der Eberhard Karls Universität Tübingen
+zur Erlangung des Grades eines
+Doktors der Naturwissenschaften
+(Dr. rer.-nat.)
+Dipl.-math. Christian Joachim Herdtweck
+vorgelegt von
+us Stuttgart
+Tübingen"
+cad52d74c1a21043f851ae14c924ac689e197d1f,From Ego to Nos-Vision: Detecting Social Relationships in First-Person Views,"From Ego to Nos-vision:
+Detecting Social Relationships in First-Person Views
+Stefano Alletto, Giuseppe Serra, Simone Calderara, Francesco Solera and Rita Cucchiara
+Universit`a degli Studi di Modena e Reggio Emilia
+Via Vignolese 905, 41125 Modena - Italy"
+ca6b2b75db9ff8444744df9149601a4ef2beefd4,MirBot: A Multimodal Interactive Image Retrieval System,"MirBot: A multimodal interactive
+image retrieval system
+Antonio Pertusa, Antonio-Javier Gallego, and Marisa Bernabeu
+DLSI, University of Alicante
+http://www.dlsi.ua.es"
+cad24ba99c7b6834faf6f5be820dd65f1a755b29,"Understanding hand-object manipulation by modeling the contextual relationship between actions, grasp types and object attributes","Understanding hand-object
+manipulation by modeling the
+ontextual relationship between actions,
+grasp types and object attributes
+Minjie Cai1, Kris M. Kitani2 and Yoichi Sato1
+Journal Title
+XX(X):1–14
+(cid:13)The Author(s) 2016
+Reprints and permission:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/ToBeAssigned
+www.sagepub.com/"
+cadba72aa3e95d6dcf0acac828401ddda7ed8924,Algorithms and VLSI Architectures for Low-Power Mobile Face Verification,"THÈSE PRÉSENTÉE À LA FACULTÉ DES SCIENCES
+POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Algorithms and VLSI Architectures
+for Low-Power Mobile Face Verification
+Jean-Luc Nagel
+Acceptée sur proposition du jury:
+Prof. F. Pellandini, directeur de thèse
+PD Dr. M. Ansorge, co-directeur de thèse
+Prof. P.-A. Farine, rapporteur
+Dr. C. Piguet, rapporteur
+Soutenue le 2 juin 2005
+INSTITUT DE MICROTECHNIQUE
+UNIVERSITÉ DE NEUCHÂTEL"
+ca1db9dc493a045e3fadf8d8209eaa4311bbdc70,Effective Image Retrieval via Multilinear Multi-index Fusion,"JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, JUNE 2017
+Effective Image Retrieval via Multilinear
+Multi-index Fusion
+Zhizhong Zhang, Yuan Xie, Member, IEEE, Wensheng Zhang, Qi Tian, Fellow, IEEE,"
+cab372bc3824780cce20d9dd1c22d4df39ed081a,"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs","DeepLab: Semantic Image Segmentation with
+Deep Convolutional Nets, Atrous Convolution,
+nd Fully Connected CRFs
+Liang-Chieh Chen, George Papandreou, Senior Member, IEEE, Iasonas Kokkinos, Member, IEEE,
+Kevin Murphy, and Alan L. Yuille, Fellow, IEEE"
+ca37eda56b9ee53610c66951ee7ca66a35d0a846,Semantic Concept Discovery for Large-Scale Zero-Shot Event Detection,"Semantic Concept Discovery for Large-Scale Zero-Shot Event Detection
+Xiaojun Chang1,2, Yi Yang1, Alexander G. Hauptmann2, Eric P. Xing3 and Yao-Liang Yu3∗
+Centre for Quantum Computation and Intelligent Systems, University of Technology Sydney.
+Language Technologies Institute, Carnegie Mellon University.
+Machine Learning Department, Carnegie Mellon University.
+{cxj273, {alex, epxing,"
+ca400e0c7a739ce5555b2e3eccccbcea65e71b11,Neural Mechanisms of Emotion Regulation in Autism Spectrum Disorder.,"J Autism Dev Disord
+DOI 10.1007/s10803-015-2359-z
+S I : E M O T I O N R E G U L A T I O N A N D P S Y C H I A T R I C C O M O R B I D I T Y I N A S D
+Neural Mechanisms of Emotion Regulation in Autism Spectrum
+Disorder
+J. Anthony Richey • Cara R. Damiano • Antoinette Sabatino • Alison Rittenberg •
+Chris Petty • Josh Bizzell • James Voyvodic • Aaron S. Heller • Marika C. Coffman •
+Moria Smoski • Richard J. Davidson • Gabriel S. Dichter
+Ó Springer Science+Business Media New York 2015
+ccount of"
+ca8b529e389381c8b51ddf83788b7a3eafb8f859,Efficient CNN Implementation for Eye-Gaze Estimation on Low-Power/Low-Quality Consumer Imaging Systems,"Efficient CNN Implementation for Eye-Gaze
+Estimation on Low-Power/Low-Quality Consumer
+Imaging Systems
+Joseph Lemley, Student Member, IEEE, Anuradha Kar, Student Member, IEEE, Alexandru
+Drimbarean, Member, IEEE, and Peter Corcoran, Fellow, IEEE"
+ca754b826476b3e4083a0a6fbac3ac39b494fd43,Supporting data-driven I/O on GPUs using GPUfs,"Supporting data-driven I/O on GPUs using GPUfs
+Sagi Shahar
+Mark Silberstein
+Technion - Israel Institute of Technology
+Technion - Israel Institute of Technology
+Computations on large data sets necessarily involve file
+ccesses, but current GPUs cannot access a host file system
+directly because they lack file system access support. There-
+fore, an application developer needs to coordinate GPU ac-
+esses to secondary storage via explicit application-level
+management code running on a CPU. This code performs
+file accesses on GPU’s behalf and manages low level data
+transfers to/from GPU memory. Furthermore, all the data
+that a GPU may need must be resident in the GPU mem-
+ory prior to computations, and it is the responsibility of a
+GPU developer to ensure that this is the case. As a result, all
+the potential GPU accesses to data must be known before the
+GPU execution starts. This requirement impedes the use of
+GPUs to run data processing algorithms with irregular data
+ccess pattern on large datasets."
+ca581cd5bd0cecf346f2bc47f4b67bfee31b9da1,"Providing Fairness in Heterogeneous Multicores with a Predictive, Adaptive Scheduler","Providing Fairness in Heterogeneous Multicores with a Predictive, Adaptive
+Scheduler
+Saeid Barati
+University of Chicago
+Henry Hoffmann
+University of Chicago"
+ca606186715e84d270fc9052af8500fe23befbda,"Using subclass discriminant analysis, fuzzy integral and symlet decomposition for face recognition","Using Subclass Discriminant Analysis, Fuzzy Integral and Symlet Decomposition for
+Face Recognition
+Seyed Mohammad Seyedzade
+Department of Electrical Engineering,
+Iran Univ. of Science and Technology,
+Narmak, Tehran, Iran
+Email:
+Sattar Mirzakuchaki
+Amir Tahmasbi
+Department of Electrical Engineering,
+Iran Univ. of Science and Technology,
+Department of Electrical Engineering,
+Iran Univ. of Science and Technology,
+Narmak, Tehran, Iran
+Email:
+Narmak, Tehran, Iran
+Email:"
+ca494a2f20c267210a677ed9c509c4570f420fdf,Learning to Globally Edit Images with Textual Description,"Learning to Globally Edit Images
+with Textual Description
+Hai Wang † Jason D. Williams ‡ Sing Bing Kang §"
+cad7845e9668884caf4842b14983ec0e45bbbc75,Urban Tracker: Multiple object tracking in urban mixed traffic,"Urban Tracker: Multiple Object Tracking in Urban Mixed Traffic
+Jean-Philippe Jodoin, Guillaume-Alexandre Bilodeau
+LITIV lab., Dept. of computer & software eng.
+´Ecole Polytechnique de Montr´eal
+Montr´eal, QC, Canada
+Nicolas Saunier
+Dept. of civil, geo. and mining eng.
+´Ecole Polytechnique de Montr´eal
+Montr´eal, QC, Canada"
+e4896772d51a66b743e0d072d53cf26f6b61fc75,Automated Identification of Trampoline Skills Using Computer Vision Extracted Pose Estimation,"Automated Identification of Trampoline Skills
+Using Computer Vision Extracted Pose Estimation
+Paul W. Connolly, Guenole C. Silvestre and Chris J. Bleakley
+School of Computer Science, University College Dublin, Belfield, Dublin 4, Ireland."
+e4bf70e818e507b54f7d94856fecc42cc9e0f73d,Face Recognition under Varying Blur in an Unconstrained Environment,"IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+FACE RECOGNITION UNDER VARYING BLUR IN AN
+UNCONSTRAINED ENVIRONMENT
+Anubha Pearline.S1, Hemalatha.M2
+M.Tech, Information Technology,Madras Institute of Technology, TamilNadu,India,
+Assistant Professor, Information Technology,Madras Institute of Technology, TamilNadu,India, email:,"
+e4485930357db8248543eb78ce3bc9f32050694e,Drawn to danger: trait anger predicts automatic approach behaviour to angry faces.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+Drawn to danger: trait anger predicts automatic
+pproach behaviour to angry faces
+Lotte Veenstra, Iris K. Schneider, Brad J. Bushman & Sander L. Koole
+To cite this article: Lotte Veenstra, Iris K. Schneider, Brad J. Bushman & Sander L. Koole (2016):
+Drawn to danger: trait anger predicts automatic approach behaviour to angry faces, Cognition
+nd Emotion, DOI: 10.1080/02699931.2016.1150256
+To link to this article: http://dx.doi.org/10.1080/02699931.2016.1150256
+Published online: 19 Feb 2016.
+Submit your article to this journal
+Article views: 39
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+Download by: [Vrije Universiteit Amsterdam]
+Date: 04 April 2016, At: 13:19"
+e4d2cc8fe567e8e1f2e0c5eb751ff9e9361346c0,ALTERED BRAIN ACTIVITY IN AUTISTIC CHILDREN VERSUS HEALTHY CONTROLS WHILE PERFORMING SIMPLE TASKS USING fMRI,"Copyright Warning & Restrictions
+The copyright law of the United States (Title 17, United
+States Code) governs the making of photocopies or other
+reproductions of copyrighted material.
+Under certain conditions specified in the law, libraries and
+rchives are authorized to furnish a photocopy or other
+reproduction. One of these specified conditions is that the
+photocopy or reproduction is not to be “used for any
+purpose other than private study, scholarship, or research.”
+If a, user makes a request for, or later uses, a photocopy or
+reproduction for purposes in excess of “fair use” that user
+may be liable for copyright infringement,
+This institution reserves the right to refuse to accept a
+opying order if, in its judgment, fulfillment of the order
+would involve violation of copyright law.
+Please Note: The author retains the copyright while the
+New Jersey Institute of Technology reserves the right to
+distribute this thesis or dissertation
+Printing note: If you do not wish to print this page, then select
+“Pages from: first page # to: last page #” on the print dialog screen"
+e4d33362b4f99ab77fd6ceaafa183c087c79faea,Design and implementation of a high performance pedestrian detection,"June 23-26, 2013, Gold Coast, Australia
+978-1-4673-2754-1/13/$31.00 ©2013 Crown"
+e4a05b1a478a2aeb6c0b1a4a42f8bdb4f97122f6,Quality Fusion Rule for Face Recognition in Video,"Quality Fusion Rule for Face Recognition in Video
+Chao Wang, Yongping Li, and Xinyu Ao
+The center for Advanced Detection and Instrumentation, Shanghai Institute of Applied Physics,
+Chinese Academy of Science, 201800 Shanghai, China"
+e4501da190012623d5048d57b7e650de27643b8d,Learning Actionlet Ensemble for 3D Human Action Recognition,"Chapter 2
+Learning Actionlet Ensemble for 3D Human
+Action Recognition"
+e4a1b46b5c639d433d21b34b788df8d81b518729,Side Information for Face Completion: a Robust PCA Approach,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Side Information for Face Completion: a Robust
+PCA Approach
+Niannan Xue, Student Member, IEEE, Jiankang Deng, Student Member,IEEE,
+Shiyang Cheng, Student Member,IEEE, Yannis Panagakis, Member,IEEE,
+nd Stefanos Zafeiriou, Member, IEEE"
+e4c81c56966a763e021938be392718686ba9135e,Bio-Inspired Architecture for Clustering into Natural and Non-Natural Facial Expressions,",100+OPEN ACCESS BOOKS103,000+INTERNATIONALAUTHORS AND EDITORS106+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book Visual Cortex - Current Status and PerspectivesDownloaded from: http://www.intechopen.com/books/visual-cortex-current-status-and-perspectivesPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at"
+e4d08ef1b4350c7e03bdfb716200370c2ea87a6a,A novel approach for face recognition using fused GMDH-based networks,"The International Arab Journal of Information Technology, Vol. 15, No. 3, May 2018 369
+A Novel Approach for Face Recognition Using
+Fused GMDH-Based Networks
+El-Sayed El-Alfy1, Zubair Baig2, and Radwan Abdel-Aal1
+College of Computer Sciences and Engineering, King Fahd University of Petroleum and Minerals, KSA
+School of Science and Security Research Institute, Edith Cowan University, Australia"
+e4e95b8bca585a15f13ef1ab4f48a884cd6ecfcc,Face Recognition with Independent Component Based Super-resolution,"Face Recognition with Independent Component Based
+Super-resolution
+Osman Gokhan Sezer†,a, Yucel Altunbasakb, Aytul Ercila
+Faculty of Engineering and Natural Sciences, Sabanci Univ., Istanbul, Turkiye, 34956
+School of Elec. and Comp. Eng. , Georgia Inst. of Tech., Atlanta, GA, USA, 30332-0250"
+e4cbe39daed8700a1d6f4a25a3a98645c4f231d0,A nonconvex formulation for low rank subspace clustering: algorithms and convergence analysis,"Comput Optim Appl (2018) 70:395–418
+https://doi.org/10.1007/s10589-018-0002-6
+A nonconvex formulation for low rank subspace
+lustering: algorithms and convergence analysis
+Hao Jiang1 · Daniel P. Robinson1
+René Vidal1 · Chong You1
+Received: 14 July 2017 / Published online: 27 March 2018
+© Springer Science+Business Media, LLC, part of Springer Nature 2018"
+e46732f0c818b059420f68162363c9d1a9dc5395,Geometric and Physical Constraints for Head Plane Crowd Density Estimation in Videos,"Geometric and Physical Constraints for
+Head Plane Crowd Density Estimation in Videos
+Weizhe Liu(cid:63) Krzysztof Lis Mathieu Salzmann
+Pascal Fua
+Computer Vision Laboratory, ´Ecole Polytechnique F´ed´erale de Lausanne
+{weizhe.liu, krzysztof.lis, mathieu.salzmann,
+(EPFL)"
+e42e7735f94a8f498ef0bf790ab43a668f904848,Low-Latency Detec on and Tracking of Aircra in Very High-Resolu on Video Feeds,"Linköping University | Department of Computer and Information Science
+Master thesis, 30 ECTS | Datateknik
+018 | LIU-IDA/LITH-EX-A--18/022--SE
+Low-Latency Detec(cid:415)on and
+Tracking of Aircra(cid:332) in Very
+High-Resolu(cid:415)on Video Feeds
+Låglatent detek(cid:415)on och spårning av flygplan i högupplösta
+videokällor
+Jarle Mathiesen
+Supervisor : Magnus Bång
+Examiner : Erik Berglund
+Linköpings universitet
+SE–581 83 Linköping
++46 13 28 10 00 , www.liu.se"
+e43ea078749d1f9b8254e0c3df4c51ba2f4eebd5,Facial Expression Recognition Based on Constrained Local Models and Support Vector Machines,"Facial Expression Recognition Based on Constrained
+Local Models and Support Vector Machines
+Nikolay Neshov1, Ivo Draganov2, Agata Manolova3"
+e45bcda905b897513f4cff9e5c0a5bf475674a02,"Domain Stylization: A Strong, Simple Baseline for Synthetic to Real Image Domain Adaptation","Domain Stylization: A Strong, Simple Baseline for
+Synthetic to Real Image Domain Adaptation
+Aysegul Dundar, Ming-Yu Liu, Ting-Chun Wang, John Zedlewski, Jan Kautz
+NVIDIA"
+e48fa574960b23ba65b7ff1a732cc521213b5120,Mining Automatically Estimated Poses from Video Recordings of Top Athletes,"Mining Automatically Estimated Poses from Video Recordings
+of Top Athletes
+Rainer Lienhart∗
+University of Augsburg
+uni-augsburg.de
+Moritz Einfalt
+University of Augsburg
+uni-augsburg.de
+Dan Zecha
+University of Augsburg"
+e4c2f8e4aace8cb851cb74478a63d9111ca550ae,Distributed One-class Learning,"DISTRIBUTED ONE-CLASS LEARNING
+Ali Shahin Shamsabadi(cid:63), Hamed Haddadi†, Andrea Cavallaro(cid:63)
+(cid:63)Queen Mary University of London,†Imperial College London"
+e41e1e4d9e578c29bf648e7098c466935b50f1a9,A Generative Model for Simultaneous Estimation of Human Body Shape and Pixel-Level Segmentation,"A Generative Model for Simultaneous
+Estimation of Human Body Shape and
+Pixel-level Segmentation
+Ingmar Rauschert and Robert T. Collins
+Pennsylvania State University,
+University Park, 16802 PA, USA"
+e443cb55dcc54de848e9f0c11a6194568a875011,From passive to interactive object learning and recognition through self-identification on a humanoid robot,"From passive to interactive object learning and
+recognition through self-identification on a humanoid
+robot
+Natalia Lyubova, Serena Ivaldi, David Filliat
+To cite this version:
+Natalia Lyubova, Serena Ivaldi, David Filliat. From passive to interactive object learning and
+recognition through self-identification on a humanoid robot. Autonomous Robots, Springer
+Verlag, 2015, pp.23. .
+HAL Id: hal-01166110
+https://hal.archives-ouvertes.fr/hal-01166110
+Submitted on 22 Jun 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+e44d8409bb5233bd1822555bf85095a80e27fd49,Spatio-temporal interaction model for crowd video analysis,"Spatio-temporal interaction model for crowd video analysis
+Indian Institute of Technology Bombay
+Indian Institute of Technology Bombay
+Neha Bhargava
+India
+Subhasis Chaudhuri
+India"
+e40007540c4813c81bc8b54dda4dd6f6c21deaa8,3d Face Recognition Using Patch Geodesic Derivative Pattern,"International Journal of Smart Electrical Engineering, Vol.2, No.3, Summer 2013 ISSN: 2251-9246
+pp.127:132
+D Face Recognition using Patch Geodesic Derivative Pattern"
+e475e857b2f5574eb626e7e01be47b416deff268,Facial Emotion Recognition Using Nonparametric Weighted Feature Extraction and Fuzzy Classifier,"Facial Emotion Recognition Using Nonparametric
+Weighted Feature Extraction and Fuzzy Classifier
+Maryam Imani and Gholam Ali Montazer"
+e4391993f5270bdbc621b8d01702f626fba36fc2,Head Pose Estimation Using Multi-scale Gaussian Derivatives,"Author manuscript, published in ""18th Scandinavian Conference on Image Analysis (2013)""
+DOI : 10.1007/978-3-642-38886-6_31"
+e4d8ba577cabcb67b4e9e1260573aea708574886,Um Sistema De Recomendaç˜ao Inteligente Baseado Em V ´ Idio Aulas Para Educaç˜ao a Distˆancia an Intelligent Recommendation System Based on Video Lectures for Distance Education (revelation),"UM SISTEMA DE RECOMENDAC¸ ˜AO INTELIGENTE BASEADO EM V´IDIO
+AULAS PARA EDUCAC¸ ˜AO A DIST ˆANCIA
+Gaspare Giuliano Elias Bruno
+Tese de Doutorado apresentada ao Programa
+de P´os-gradua¸c˜ao em Engenharia de Sistemas e
+Computa¸c˜ao, COPPE, da Universidade Federal
+do Rio de Janeiro, como parte dos requisitos
+necess´arios `a obten¸c˜ao do t´ıtulo de Doutor em
+Engenharia de Sistemas e Computa¸c˜ao.
+Orientadores: Edmundo Albuquerque de
+Souza e Silva
+Rosa Maria Meri Le˜ao
+Rio de Janeiro
+Janeiro de 2016"
+e467f7e2434ca74bdd4b19808a6b3d78b8c5ba1a,Feature Construction Using Evolution-COnstructed Features for General Object Recognition,"Feature Construction Using Evolution-COnstructed Features
+for General Object Recognition
+Kirt Dwayne Lillywhite
+A dissertation submitted to the faculty of
+Brigham Young University
+in partial fulfillment of the requirements for the degree of
+Doctor of Philosophy
+Dah-Jye Lee, Chair
+James K Archibald
+Bryan S. Morse
+Dan A. Ventura
+Brent E. Nelson
+Department of Electrical and Computer Engineering
+Brigham Young University
+April 2012
+Copyright c(cid:13) 2012 Kirt Dwayne Lillywhite
+All Rights Reserved"
+e4d90019c312ed87a236a11374caeea9cc4e6940,Comparison Comparison PCA Train GMM Feature Reduction Classify GMM Threshold,"COVER SHEET
+Cook, Jamie and Chandran, Vinod and Sridharan, Sridha and Fookes, Clinton (2004) Face
+Recognition from 3D Data using Iterative Closest Point Algorithm and Gaussian Mixture Models.
+In Proceedings 3D Data Processing, Visualisation and Transmission, Thessaloniki, Greece.
+Accessed from http://eprints.qut.edu.au
+Copyright 2004 the authors."
+e4abc40f79f86dbc06f5af1df314c67681dedc51,Head Detection with Depth Images in the Wild,"Head Detection with Depth Images in the Wild
+Diego Ballotta, Guido Borghi, Roberto Vezzani and Rita Cucchiara
+Department of Engineering ”Enzo Ferrari”
+University of Modena and Reggio Emilia, Italy
+Keywords:
+Head Detection, Head Localization, Depth Maps, Convolutional Neural Network"
+e4d0e87d0bd6ead4ccd39fc5b6c62287560bac5b,Implicit video multi-emotion tagging by exploiting multi-expression relations,"Implicit Video Multi-Emotion Tagging by Exploiting Multi-Expression
+Relations
+Zhilei Liu, Shangfei Wang*, Zhaoyu Wang and Qiang Ji"
+e48432872be1e0449f50c6807b274d57c87a641f,Human Body Extraction from Single Images Using Images Processing Techniques,"Human Body Extraction from Single Images Using Images
+Processing Techniques
+T.Ravichandra Babu
+Associate Professor & HOD,
+Department of ECE,
+Katravath Rajendhar
+PG Scholar-SSP,
+Department of ECE,
+Krishnamurthy Institute of Technology and
+Krishnamurthy Institute of Technology and
+Engineering.
+Engineering.
+that can
+images
+to cope with"
+e48e94959c4ce799fc61f3f4aa8a209c00be8d7f,Design of an Efficient Real-Time Algorithm Using Reduced Feature Dimension for Recognition of Speed Limit Signs,"Hindawi Publishing Corporation
+The Scientific World Journal
+Volume 2013, Article ID 135614, 6 pages
+http://dx.doi.org/10.1155/2013/135614
+Research Article
+Design of an Efficient Real-Time Algorithm Using Reduced
+Feature Dimension for Recognition of Speed Limit Signs
+Hanmin Cho,1 Seungwha Han,2 and Sun-Young Hwang1
+Department of Electronic Engineering, Sogang University, Seoul 121-742, Republic of Korea
+Samsung Techwin R&D Center, Security Solution Division, 701 Sampyeong-dong, Bundang-gu, Seongnam-si,
+Gyeonggi 463-400, Republic of Korea
+Correspondence should be addressed to Sun-Young Hwang;
+Received 28 August 2013; Accepted 1 October 2013
+Academic Editors: P. Daponte, M. Nappi, and N. Nishchal
+Copyright © 2013 Hanmin Cho et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+We propose a real-time algorithm for recognition of speed limit signs from a moving vehicle. Linear Discriminant Analysis (LDA)
+required for classification is performed by using Discrete Cosine Transform (DCT) coefficients. To reduce feature dimension in
+LDA, DCT coefficients are selected by a devised discriminant function derived from information obtained by training. Binarization
+nd thinning are performed on a Region of Interest (ROI) obtained by preprocessing a detected ROI prior to DCT for further"
+e496d6be415038de1636bbe8202cac9c1cea9dbe,Facial Expression Recognition in Older Adults using Deep Machine Learning,"Facial Expression Recognition in Older Adults using
+Deep Machine Learning
+Andrea Caroppo, Alessandro Leone and Pietro Siciliano
+National Research Council of Italy, Institute for Microelectronics and Microsystems, Lecce,
+Italy"
+e43cc682453cf3874785584fca813665878adaa7,Face Recognition using Local Derivative Pattern Face Descriptor,"www.ijecs.in
+International Journal Of Engineering And Computer Science ISSN:2319-7242
+Volume 3 Issue 10 October, 2014 Page No.8830-8834
+Face Recognition using Local Derivative Pattern Face
+Descriptor
+Pranita R. Chavan1, Dr. Dnyandeo J. Pete2
+Department of Electronics and Telecommunication
+Datta Meghe College of Engineering
+Airoli, Navi Mumbai, India 1,2
+Mob: 99206746061
+Mob: 99870353142"
+fec6648b4154fc7e0892c74f98898f0b51036dfe,"A Generic Face Processing Framework: Technologies, Analyses and Applications","A Generic Face Processing
+Framework: Technologies,
+Analyses and Applications
+JANG Kim-fung
+A Thesis Submitted in Partial Ful(cid:12)lment
+of the Requirements for the Degree of
+Master of Philosophy
+Computer Science and Engineering
+Supervised by
+Prof. Michael R. Lyu
+(cid:13)The Chinese University of Hong Kong
+July 2003
+The Chinese University of Hong Kong holds the copyright of this thesis. Any
+person(s) intending to use a part or whole of the materials in the thesis in
+proposed publication must seek copyright release from the Dean of the
+Graduate School."
+fea0a5ed1bc83dd1b545a5d75db2e37a69489ac9,Enhancing Recommender Systems for TV by Face Recognition,"Enhancing Recommender Systems for TV by Face Recognition
+Toon De Pessemier, Damien Verlee and Luc Martens
+iMinds - Ghent University, Technologiepark 15, B-9052 Ghent, Belgium
+{toon.depessemier,
+Keywords:
+Recommender System, Face Recognition, Face Detection, TV, Emotion Detection."
+fecce467b42856eadb8dd0c08674d9381f52efab,The Role of Shape in Visual Recognition,"The Role of Shape in Visual Recognition
+Bj¨orn Ommer"
+fe4986bbb10f3417372a02fed1218acb5162ddec,Classification model of arousal and valence mental states by EEG signals analysis and Brodmann correlations,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 6, No. 6, 2015
+Classification model of arousal and valence mental
+states by EEG signals analysis and Brodmann
+orrelations
+Adrian Rodriguez Aguin˜aga and Miguel Angel Lo´pez Ram´ırez
+Instituto Tecnolo´gico de Tijuana
+Calzada del Tecnolo´gico S/N, Toma´s Aquino, 22414
+Tijuana, B.C. Me´xico
+Mar´ıa del Rosario Baltazar Flores
+Instituto Tecnolo´gico de Leo´n
+Av. Tecnolo´gico S/N
+Industrial Julia´n de Obrego´n, 37290
+Leo´n, Gto. Me´xico"
+fe9c460d5ca625402aa4d6dd308d15a40e1010fa,Neural Architecture for Temporal Emotion Classification,"Neural Architecture for Temporal Emotion
+Classification
+Roland Schweiger, Pierre Bayerl, and Heiko Neumann
+Universit¨at Ulm, Neuroinformatik, Germany"
+fec9fb202906e6f136ae92c3a3540b2a84257c4e,Automatic Facial Feature Detection for Facial Expression Recognition,"AUTOMATIC FACIAL FEATURE DETECTION FOR FACIAL
+EXPRESSION RECOGNITION
+Taner Danisman, Marius Bilasco, Nacim Ihaddadene and Chabane Djeraba
+LIFL - UMR CNRS 8022, University of Science and Technology of Lille, Villeneuve d'Ascq, France
+Keywords:
+Facial Feature Detection, Emotion Recognition, Eye Detection, Mouth Corner Detection."
+fe01e1099dc2ce02158de607be993f9fc8aade57,Aerial LaneNet: Lane Marking Semantic Segmentation in Aerial Imagery using Wavelet-Enhanced Cost-sensitive Symmetric Fully Convolutional Neural Networks,"Aerial LaneNet: Lane Marking Semantic
+Segmentation in Aerial Imagery using
+Wavelet-Enhanced Cost-sensitive Symmetric Fully
+Convolutional Neural Networks
+Seyed Majid Azimi, Peter Fischer, Marco Körner, and Peter Reinartz"
+fec5c0100c72d7c1c823a91dc146ecd5e98e77ff,Coherence criterion for region labelling and description,"Coherence criterion for region labelling and
+description
+Hichem Houissa
+INRIA Rocquencourt
+Domaine de Voluceau
+Nozha Boujemaa
+INRIA Rocquencourt
+Domaine de Voluceau
+Email:
+Email:"
+fe7f5c7da203c48aa1a9a2468aae55c6e0053df9,Interactive Text2Pickup Network for Natural Language based Human-Robot Collaboration,"Interactive Text2Pickup Network for Natural Language based
+Human-Robot Collaboration
+Hyemin Ahn, Sungjoon Choi, Nuri Kim, Geonho Cha, and Songhwai Oh"
+fe7e3cc1f3412bbbf37d277eeb3b17b8b21d71d5,Performance Evaluation of Gabor Wavelet Features for Face Representation and Recognition,"IOSR Journal of VLSI and Signal Processing (IOSR-JVSP)
+Volume 6, Issue 2, Ver. I (Mar. -Apr. 2016), PP 47-53
+e-ISSN: 2319 – 4200, p-ISSN No. : 2319 – 4197
+www.iosrjournals.org
+Performance Evaluation of Gabor Wavelet Features for Face
+Representation and Recognition
+M. E. Ashalatha1, Mallikarjun S. Holi2
+Dept. of Biomedical Engineering, Bapuji Institute of Engineering & Technology Davanagere, Karnataka,India
+Dept. of Electronics and Instrumentation Engineering, University B.D.T.College of Engineering, Visvesvaraya
+Technological University, Davanagere, Karnataka, India"
+fea0895326b663bf72be89151a751362db8ae881,Homocentric Hypersphere Feature Embedding for Person Re-identification,"Homocentric Hypersphere Feature Embedding for
+Person Re-identification
+Wangmeng Xiang, Jianqiang Huang, Xianbiao Qi, Xiansheng Hua, Fellow, IEEE and Lei Zhang, Fellow, IEEE"
+feb4bcd20de6ce4f9503ef01c87390e662538c15,Monocular Depth Estimation with Augmented Ordinal Depth Relationships,"Monocular Depth Estimation with Augmented
+Ordinal Depth Relationships
+Yuanzhouhan Cao, Tianqi Zhao, Ke Xian, Chunhua Shen, Zhiguo Cao"
+fef89593599b78db7d133fc6893519b3ee8ff8d2,3D Face recognition by ICP-based shape matching,"D Face recognition by ICP-based shape matching
+Boulbaba Ben Amor1, Karima Ouji1, Mohsen Ardabilian1, Liming Chen1
+LIRIS Lab, Lyon Research Center for Images and Intelligent Information Systems, UMR 5205 CNRS
+Centrale Lyon, France"
+fe466e84fa2e838adc3c37ee327cd68004ae08fe,MUTAN: Multimodal Tucker Fusion for Visual Question Answering,"MUTAN: Multimodal Tucker Fusion for Visual Question Answering
+Hedi Ben-younes 1,2 *
+R´emi Cadene 1*
+Matthieu Cord 1
+Nicolas Thome 3
+Sorbonne Universit´es, UPMC Univ Paris 06, CNRS, LIP6 UMR 7606, 4 place Jussieu, 75005 Paris
+Heuritech, 248 rue du Faubourg Saint-Antoine, 75012 Paris
+Conservatoire National des Arts et M´etiers"
+fe41550ed350df4cd731a5df3dca5b0ea13511db,Compact Generalized Non-local Network,"Compact Generalized Non-local Network
+Kaiyu Yue1,3 Ming Sun1 Yuchen Yuan1 Feng Zhou2 Errui Ding1 Fuxin Xu3
+Baidu VIS 2Baidu Research
+Central South University
+{yuekaiyu, sunming05, yuanyuchen02, zhoufeng09,"
+feaedb6766f42e867aab7f1a33ba4d7ddacfc7aa,UvA-DARE ( Digital Academic Repository ) Tag-based Video Retrieval by Embedding Semantic Content in a Continuous Word,"UvA-DARE (Digital Academic Repository)
+Tag-based Video Retrieval by Embedding Semantic Content in a Continuous Word
+Space
+Agharwal, A.; Kovvuri, R.; Nevatia, R.; Snoek, C.G.M.
+Published in:
+016 IEEE Winter Conference on Applications of Computer Vision: WACV 2016: Lake Placid, New York, USA,
+7-10 March 2016
+0.1109/WACV.2016.7477706
+Link to publication
+Citation for published version (APA):
+Agharwal, A., Kovvuri, R., Nevatia, R., & Snoek, C. G. M. (2016). Tag-based Video Retrieval by Embedding
+Semantic Content in a Continuous Word Space. In 2016 IEEE Winter Conference on Applications of Computer
+Vision: WACV 2016: Lake Placid, New York, USA, 7-10 March 2016 (pp. 1354-1361). Piscataway, NJ: Institute
+of Electrical and Electronic Engineers. DOI: 10.1109/WACV.2016.7477706
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask"
+fe030b87e3c985c9dedab130949e2868e3e5e7d5,Explaining Neural Networks Semantically,"Under review as a conference paper at ICLR 2019
+EXPLAINING NEURAL NETWORKS SEMANTICALLY
+AND QUANTITATIVELY
+Anonymous authors
+Paper under double-blind review"
+fea83550a21f4b41057b031ac338170bacda8805,Learning a Metric Embedding for Face Recognition using the Multibatch Method,"Learning a Metric Embedding
+for Face Recognition
+using the Multibatch Method
+Oren Tadmor
+Yonatan Wexler
+Tal Rosenwein
+Shai Shalev-Shwartz
+Amnon Shashua
+Orcam Ltd., Jerusalem, Israel"
+fe005c5036ad646051cc779aafb63534bda14f06,The Hand Vein Pattern Used as a Biometric Feature,"The Hand Vein Pattern Used as a Biometric Feature
+Master Literature Thesis
+Annemarie Nadort
+Amsterdam - May 2007"
+fe35639349a87808481e64f9cbea065339063154,Understanding deep learning via backtracking and deconvolution,"Fang J Big Data (2017) 4:40
+DOI 10.1186/s40537-017-0101-8
+METHODOLOGY
+Understanding deep learning
+via backtracking and deconvolution
+Open Access
+Xing Fang*
+*Correspondence:
+School of Information
+Technology, Illinois State
+University, Normal, IL, USA"
+febb6454a3bfbc76f4c7934854d377ac15666215,Improving the Accuracy of Face Annotation in Social Network,"International Journal of Computer Applications (0975 – 8887)
+Volume 182 – No. 14, September 2018
+Improving the Accuracy of Face Annotation in Social
+Network
+C. Jayaramulu
+Research Scholar
+individual
+Dayananda Sagar University, Bangalore
+photographs."
+fed9e971e042b40cc659aca6e338d79dc1d4b59c,Grouping-by-id: Guarding against Adversar-,"Under review as a conference paper at ICLR 2018
+GROUPING-BY-ID: GUARDING AGAINST ADVERSAR-
+IAL DOMAIN SHIFTS
+Anonymous authors
+Paper under double-blind review"
+fe8b2b2a2ace6d6af28dc0f1d63400554c8c675d,Random walk distances in data clustering and applications,"Adv Data Anal Classif (2013) 7:83–108
+DOI 10.1007/s11634-013-0125-7
+REGULAR ARTICLE
+Random walk distances in data clustering
+nd applications
+Sijia Liu · Anastasios Matzavinos ·
+Sunder Sethuraman
+Received: 28 September 2011 / Revised: 24 May 2012 / Accepted: 30 September 2012 /
+Published online: 6 March 2013
+© Springer-Verlag Berlin Heidelberg 2013"
+fe0c51fd41cb2d5afa1bc1900bbbadb38a0de139,Bayesian face recognition using 2D Gaussian-Hermite moments,"Rahman et al. EURASIP Journal on Image and Video Processing (2015) 2015:35
+DOI 10.1186/s13640-015-0090-5
+RESEARCH
+Open Access
+Bayesian face recognition using 2D
+Gaussian-Hermite moments
+S. M. Mahbubur Rahman1*, Shahana Parvin Lata2 and Tamanna Howlader2"
+c8db8764f9d8f5d44e739bbcb663fbfc0a40fb3d,Modeling for part-based visual object detection based on local features,"Modeling for part-based visual object
+detection based on local features
+Von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+der Rheinisch-Westf¨alischen Technischen Hochschule Aachen
+zur Erlangung des akademischen Grades eines Doktors
+der Ingenieurwissenschaften genehmigte Dissertation
+vorgelegt von
+Diplom-Ingenieur
+Mark Asbach
+us Neuss
+Berichter:
+Univ.-Prof. Dr.-Ing. Jens-Rainer Ohm
+Univ.-Prof. Dr.-Ing. Til Aach
+Tag der m¨undlichen Pr¨ufung: 28. September 2011
+Diese Dissertation ist auf den Internetseiten der
+Hochschulbibliothek online verf¨ugbar."
+c85aa12331bdeaba06d4c3e44b969e6060c3310c,Ensemble of Part Detectors for Simultaneous Classification and Localization,"Ensemble of Part Detectors for Simultaneous
+Classification and Localization
+Xiaopeng Zhang, Hongkai Xiong, Senior Member, IEEE, Weiyao Lin, Qi Tian, Fellow, IEEE"
+c86e6ed734d3aa967deae00df003557b6e937d3d,Generative Adversarial Networks with Decoder-Encoder Output Noise,"Generative Adversarial Networks with
+Decoder-Encoder Output Noise
+Guoqiang Zhong, Member, IEEE, Wei Gao, Yongbin Liu, Youzhao Yang
+onditional distribution of their neighbors. In [32], Portilla and
+Simoncelli proposed a parametric texture model based on joint
+statistics, which uses a decomposition method that is called
+steerable pyramid decomposition to decompose the texture
+of images. An example-based super-resolution algorithm [11]
+was proposed in 2002, which uses a Markov network to model
+the spatial relationship between the pixels of an image. A
+scene completion algorithm [16] was proposed in 2007, which
+pplied a semantic scene match technique. These traditional
+lgorithms can be applied to particular image generation tasks,
+such as texture synthesis and super-resolution. Their common
+haracteristic is that they predict the images pixel by pixel
+rather than generate an image as a whole, and the basic idea
+of them is to make an interpolation according to the existing
+part of the images. Here, the problem is, given a set of images,
+an we generate totally new images with the same distribution
+of the given ones?"
+c8a4b4fe5ff2ace9ab9171a9a24064b5a91207a3,Locating facial landmarks with binary map cross-correlations,"LOCATING FACIAL LANDMARKS WITH BINARY MAP CROSS-CORRELATIONS
+J´er´emie Nicolle
+K´evin Bailly
+Vincent Rapp
+Mohamed Chetouani
+Univ. Pierre & Marie Curie, ISIR - CNRS UMR 7222, F-75005, Paris - France
+{nicolle, bailly, rapp,"
+c84ca95638893700d8f806e844984a5b2c50b5e3,Automatic Facial Expression Recognition Using 3D Faces,"Paper 071, ENG 101
+Automatic Facial Expression Recognition Using 3D Faces
+Chao Li, Antonio Soares
+Florida A&M University
+hao.li,"
+c8f035510b72b84c21430a887ed03c8836eeddc2,Optical-inertial Synchronization of MoCap Suit with Single Camera Setup for Reliable Position Tracking,
+c8f216dbd43dda14783677f44bb336c92211cd46,Synthesis from 3 D Mesh Sequences Driven by Combined Speech Features,"VISUAL SPEECH SYNTHESIS FROM 3D MESH SEQUENCES DRIVEN BY COMBINED
+SPEECH FEATURES
+Felix Kuhnke and J¨orn Ostermann
+Institut f¨ur Informationsverarbeitung, Leibniz Universit¨at Hannover, Germany"
+c866a2afc871910e3282fd9498dce4ab20f6a332,Surveillance Face Recognition Challenge,"Noname manuscript No.
+(will be inserted by the editor)
+Surveillance Face Recognition Challenge
+Zhiyi Cheng · Xiatian Zhu · Shaogang Gong
+Received: date / Accepted: date"
+c8dcb7b3c5ed43e61b90b50fedc76568d8e30675,Guarding against Adversarial Domain Shifts,"Under review as a conference paper at ICLR 2018
+GUARDING AGAINST ADVERSARIAL DOMAIN SHIFTS
+WITH COUNTERFACTUAL REGULARIZATION
+Anonymous authors
+Paper under double-blind review"
+c84233f854bbed17c22ba0df6048cbb1dd4d3248,Exploring Locally Rigid Discriminative Patches for Learning Relative Attributes,"Y. VERMA, C. V. JAWAHAR: EXPLORING PATCHES FOR RELATIVE ATTRIBUTES
+Exploring Locally Rigid Discriminative
+Patches for Learning Relative Attributes
+Yashaswi Verma
+http://researchweb.iiit.ac.in/~yashaswi.verma/
+C. V. Jawahar
+http://www.iiit.ac.in/~jawahar/
+IIIT-Hyderabad, India
+http://cvit.iiit.ac.in"
+c840d85f6dce0fb69fb6113923f17e1e314c6134,Disparity Sliding Window: Object Proposals From Disparity Images,"Disparity Sliding Window: Object Proposals From Disparity Images
+Julian M¨uller1, Andreas Fregin2 and Klaus Dietmayer1"
+c8fc65c83473c633e2bf1c13031ccd10617cc8a2,Every Object Tells a Story,"Every Object Tells a Story
+James Pustejovsky
+Computer Science Department
+Brandeis University
+Waltham, MA 02453
+Nikhil Krishnaswamy
+Computer Science Department
+Brandeis University
+Waltham, MA 02453"
+c896946612069f162864edfbecf5c1a8a077ed79,The Image Multi Feature Retrieval based on SVM Semantic Classification,"International Journal of Hybrid Information Technology
+Vol.9, No.3 (2016), pp. 291-300
+http://dx.doi.org/10.14257/ijhit.2016.9.3.27
+The Image Multi Feature Retrieval based on SVM Semantic
+Classification
+Che Chang1,2*, Yu Xiaoyang1 and Bai Yamei3
+. Measuring and Control Technology and Instrumentations,Harbin University of
+Science and Technology, Harbin, China
+. School of Engineering,Harbin University, Harbin, China
+. School of Electronic and Information Engineering,Harbin Huade University
+Harbin, China
+E-mail:"
+c8ebe4c7d884c468d572a1ccf8583ac912215088,Emotion Dysregulation and Anxiety in Adults with ASD: Does Social Motivation Play a Role?,"J Autism Dev Disord
+DOI 10.1007/s10803-015-2567-6
+S . I . : A S D I N A D U L T H O O D : C O M O R B I D I T Y A N D I N T E R V E N T I O N
+Emotion Dysregulation and Anxiety in Adults with ASD: Does
+Social Motivation Play a Role?
+Deanna Swain1
+• Angela Scarpa1
+• Susan White1
+• Elizabeth Laugeson2
+Ó Springer Science+Business Media New York 2015"
+c8855bebdaa985dfc4c1a07e5f74a0e29787e47e,Multi-label Object Attribute Classification using a Convolutional Neural Network,"Multi-label Object Attribute Classification using
+Convolutional Neural Network
+Soubarna Banik, Mikko Lauri, Simone Frintrop
+Department of Informatics, Universit¨at Hamburg"
+c81ee278d27423fd16c1a114dcae486687ee27ff,Search Based Face Annotation Using Weakly Labeled Facial Images,"Search Based Face Annotation Using Weakly
+Labeled Facial Images
+Shital Shinde1, Archana Chaugule2
+Computer Department, Savitribai Phule Pune University
+D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18
+Mahatma Phulenagar, 120/2 Mahaganpati soc, Chinchwad, Pune-19, MH, India
+D.Y.Patil Institute of Engineering and Technology, Pimpri, Pune-18, Savitribai Phule Pune University
+DYPIET, Pimpri, Pune-18, MH, India"
+c867caf3f29abb2f3fd5c4c7e98e5f551a70be25,DeLS-3D: Deep Localization and Segmentation with a 3D Semantic Map,"DeLS-3D: Deep Localization and Segmentation with a 3D Semantic Map
+Peng Wang, Ruigang Yang, Binbin Cao, Wei Xu, Yuanqing Lin
+Baidu Research
+National Engineering Laboratory for Deep Learning Technology and Applications
+{wangpeng54, yangruigang, caobinbin, wei.xu,"
+c81326a1ecb7e71ae38a665779b8d959d3938d1a,A Novel Neural Network Model Specified for Representing Logical Relations,"A Novel Neural Network Model Specified for Representing Logical
+Relations
+Gang Wang
+With computers to handle more and more complicated things in variable environments, it becomes an urgent requirement that
+the artificial intelligence has the ability of automatic judging and deciding according to numerous specific conditions so as to deal
+with the complicated and variable cases. ANNs inspired by brain is a good candidate. However, most of current numeric ANNs are
+not good at representing logical relations because these models still try to represent logical relations in the form of ratio based on
+functional approximation. On the other hand, researchers have been trying to design novel neural network models to make neural
+network model represent logical relations. In this work, a novel neural network model specified for representing logical relations is
+proposed and applied. New neurons and multiple kinds of links are defined. Inhibitory links are introduced besides exciting links.
+Different from current numeric ANNs, one end of an inhibitory link connects an exciting link rather than a neuron. Inhibitory
+model can simulate the operations of Boolean logic gates, and construct complex logical relations with the advantages of simpler
+neural network structures than recent works in this area. This work provides some ideas to make neural networks represent logical
+relations more directly and efficiently, and the model could be used as the complement to current numeric ANN to deal with logical
+issues and expand the application areas of ANN.
+Index Terms—Brain-inspired computing, logical representation, neural network structure, inhibitory link.
+I. INTRODUCTION
+With computers to handle more and more complicated
+things in variable environments like driverless car and ad-
+vanced medical diagnosis expert system, higher artificial intel-"
+c8ee4812c32b0ad4e26d53b99e1514514bbcaf14,A NEaT Design for Reliable and Scalable Network Stacks,"A NEaT Design for Reliable and Scalable
+Network Stacks
+Tomas Hruby
+Cristiano Giuffrida
+Lionel Sambuc
+Herbert Bos
+Andrew S. Tanenbaum
+Vrije Universiteit Amsterdam"
+c8bcd8e0b2ab6cc00a565efbcf904235c33ac2dc,Unsupervised Person Image Synthesis in Arbitrary Poses,"Unsupervised Person Image Synthesis in Arbitrary Poses
+Albert Pumarola
+Antonio Agudo
+Alberto Sanfeliu
+Francesc Moreno-Noguer
+Institut de Rob`otica i Inform`atica Industrial (CSIC-UPC)
+08028, Barcelona, Spain
+Figure 1: Given an original image of a person (left) and a desired body pose defined by a 2D skeleton (bottom-row), our
+model generates new photo-realistic images of the person under that pose (top-row). The main contribution of our work is to
+train this generative model with unlabeled data."
+c83a05de1b4b20f7cd7cd872863ba2e66ada4d3f,A Deep Learning Perspective on the Origin of Facial Expressions,"BREUER, KIMMEL: A DEEP LEARNING PERSPECTIVE ON FACIAL EXPRESSIONS
+A Deep Learning Perspective on the Origin
+of Facial Expressions
+Ran Breuer
+Ron Kimmel
+Department of Computer Science
+Technion - Israel Institute of Technology
+Technion City, Haifa, Israel
+Figure 1: Demonstration of the filter visualization process."
+c8e32484bbbc63908080284790edafc4b66008d2,Suivi par ré-identification dans un réseau de caméras à champs disjoints,"Suivi par r´e-identification dans un r´eseau de cam´eras `a
+hamps disjoints
+Boris Meden, Patrick Sayd, Fr´ed´eric Lerasle
+To cite this version:
+Boris Meden, Patrick Sayd, Fr´ed´eric Lerasle. Suivi par r´e-identification dans un r´eseau de
+am´eras `a champs disjoints. RFIA 2012 (Reconnaissance des Formes et Intelligence Artificielle),
+Jan 2012, Lyon, France. pp.978-2-9539515-2-3, 2012.
+HAL Id: hal-00656507
+https://hal.archives-ouvertes.fr/hal-00656507
+Submitted on 17 Jan 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+c813413fc84be33d7c4ccdd4a1f025ccc73a77bd,Discriminative Bayesian Active Shape Models,"Discriminative Bayesian Active Shape Models
+Pedro Martins, Rui Caseiro, Jo˜ao F. Henriques, Jorge Batista
+Institute of Systems and Robotics - University of Coimbra, Portugal"
+c81b303005459285a5864ea4de71f77025cd5be5,Norm-Induced Entropies for Decision Forests,"Norm-induced entropies for decision forests
+Christoph Lassner
+Rainer Lienhart
+Multimedia Computing and Computer Vision Lab, University of Augsburg"
+c8adbe00b5661ab9b3726d01c6842c0d72c8d997,Deep Architectures for Face Attributes,"Deep Architectures for Face Attributes
+Tobi Baumgartner, Jack Culpepper
+Computer Vision and Machine Learning Group, Flickr, Yahoo,
+{tobi,"
+fb04a8cb4b573d6b565a5b0c369d775e6bfb04f1,Title of dissertation : LOOKING AT PEOPLE USING PARTIAL LEAST SQUARES,
+fb4c3b2f893baa1fbf8d16da2e09aa9868c61a7a,Decoupled Weight Decay Regularization,"Under review as a conference paper at ICLR 2019
+DECOUPLED WEIGHT DECAY REGULARIZATION
+Anonymous authors
+Paper under double-blind review"
+fb4545782d9df65d484009558e1824538030bbb1,"Learning Visual Patterns: Imposing Order on Objects, Trajectories and Networks",
+fbbccf0454c84bea1fd5c5a1dcd9fd7bba301a44,Face Detection Using Gradient Vector Flow,"Proceedings of the Second International Conference on Machine Learning and Cybernetics, Wan, 2-5 November 2003
+FACE DETECTION USING GRADIENT VECTOR FLOW
+MAYANK VATSA, RICHA SINCH, P. GUPTA
+Department of Computer Science & Engineering Indian Institute of Technology Kanpur
+Kanpur INDIA, 208016
+E-MAIL: (mayankv, richas, pg} cse.iitk.ac.in"
+fbf196d83a41d57dfe577b3a54b1b7fa06666e3b,Extreme Learning Machine for Large-Scale Action Recognition,"Extreme Learning Machine for Large-Scale
+Action Recognition
+G¨ul Varol and Albert Ali Salah
+Department of Computer Engineering, Bo˘gazi¸ci University, Turkey"
+fbd7d591e6eecb9a947e377d5b1a865a9f86a11f,Consensual and Privacy-Preserving Sharing of Multi-Subject and Interdependent Data,"Consensual and Privacy-Preserving Sharing of
+Multi-Subject and Interdependent Data
+Alexandra-Mihaela Olteanu
+EPFL, UNIL–HEC Lausanne
+K´evin Huguenin
+UNIL–HEC Lausanne
+Italo Dacosta
+Jean-Pierre Hubaux"
+fb3af250a2ff85145519fea9ece7187452d02a50,The WILDTRACK Multi-Camera Person Dataset,"The WILDTRACK Multi-Camera Person
+Dataset
+Tatjana Chavdarova1, Pierre Baqu´e2, St´ephane Bouquet2,
+Andrii Maksai2, Cijo Jose1, Louis Lettry3,
+Pascal Fua2, Luc Van Gool3 and Fran¸cois Fleuret1
+Machine Learning group, Idiap Research Institute & ´Ecole
+Polytechnique F´ed´erale de Lausanne
+CVLab, ´Ecole Polytechnique F´ed´erale de Lausanne
+Computer Vision Lab, ETH Zurich"
+fbd781143a3f4c9d03c227cfbd1f528d658195ce,A Gender Recognition Experiment on the CASIA Gait Database Dealing with Its Imbalanced Nature,"A GENDER RECOGNITION EXPERIMENT ON THE CASIA GAIT
+DATABASE DEALING WITH ITS IMBALANCED NATURE
+Ra´ul Mart´ın-F´elez, Ram´on A. Mollineda and J. Salvador S´anchez
+Institute of New Imaging Technologies (INIT) and Dept. Llenguatges i Sistemes Inform`atics
+Universitat Jaume I. Av. Sos Baynat s/n, 12071, Castell´o de la Plana, Spain
+{martinr, mollined,
+Keywords:
+Gender recognition, Gait analysis, Class imbalance problem, Human silhouette, Appearance-based method."
+fbd047862ea869973ecf8fc35ae090ca00ff06d8,Literature review of fingerprint quality assessment and its evaluation,"A Literature Review of Fingerprint Quality Assessment
+nd Its Evaluation
+Zhigang Yao, Jean-Marie Le Bars, Christophe Charrier, Christophe
+Rosenberger
+To cite this version:
+Zhigang Yao, Jean-Marie Le Bars, Christophe Charrier, Christophe Rosenberger. A Literature Review
+of Fingerprint Quality Assessment and Its Evaluation.
+IET journal on Biometrics, 2016. <hal-
+01269240>
+HAL Id: hal-01269240
+https://hal.archives-ouvertes.fr/hal-01269240
+Submitted on 5 Feb 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+fbb6e707c8a5f189d8ad416597e23671b884448b,Altered gaze following during live interaction in infants at risk for autism: an eye tracking study,"Thorup et al. Molecular Autism (2016) 7:12
+DOI 10.1186/s13229-016-0069-9
+R ES EAR CH
+Altered gaze following during live
+interaction in infants at risk for autism:
+n eye tracking study
+Emilia Thorup1*, Pär Nyström1, Gustaf Gredebäck1, Sven Bölte3,2, Terje Falck-Ytter3,1 and The EASE Team
+Open Access"
+fb95fb1e0bf99347a69f76c9fd65e039024e73b7,Photograph Based Pair-matching Recognition of Human Faces,"World Academy of Science, Engineering and Technology
+International Journal of Computer and Information Engineering
+Vol:5, No:12, 2011
+Photograph Base
+sed Pair-matching Recogn
+gnition of
+Human Faces
+Min Y
+n Yao, Kota Aoki, and Hiroshi Nagahashi
+(cid:1)"
+fbc93b13b8a6a5e4ed11310ce4da3be0b7541da8,Real-time Pedestrian Detection in a Truck's Blind Spot Camera,"Real-time pedestrian detection in a truck’s blind spot camera
+Kristof Van Beeck1,2 and Toon Goedem´e1,2
+EAVISE, Campus De Nayer - KU Leuven, J. De Nayerlaan 5, 2860 Sint-Katelijne-Waver, Belgium
+ESAT-PSI, KU Leuven, Kasteel Arenbergpark 10, 3100 Heverlee, Belgium
+{kristof.vanbeeck,
+Keywords:
+Pedestrian detection, Tracking, Real-time, Computer vision, Active safety systems"
+fbf20dc3367864462d7630aad81c436e50d1cd60,Iterative Bayesian Learning for Crowdsourced Regression,"Iterative Bayesian Learning for Crowdsourced Regression
+Jungseul Ok∗, Sewoong Oh∗, Yunhun Jang †, Jinwoo Shin†, and Yung Yi†
+October 9, 2018"
+fbb304770d33f44006d134906481208ad087ce63,Visual Self-Localization with Tiny Images,"Visual Self-Localization with Tiny Images
+Marius Hofmeister, Sara Erhard and Andreas Zell
+University of T¨ubingen, Department of Computer Science, Sand 1, 72076 T¨ubingen"
+fbd17af24e86fe487e28f99ba3e402dd6cfcd16a,Towards Detailed Recognition of Visual Categories,"Research Statement: Towards Detailed Recognition of Visual Categories
+Subhransu Maji
+As humans, we have a remarkable ability to perceive the world around us in minute detail purely
+from the light that is reflected off it – we can estimate material and metric properties of objects, localize
+people in images, describe what they are doing, and even identify them. Automatic methods for such
+detailed recognition of images are essential for most human-centric applications and large scale analysis
+of the content of media collections for market research, advertisement, and social studies. For example,
+in order to shop for shoes in an on-line catalogue, a system should be able to understand the style of a
+shoe, the length of its heels, or the shininess of its material. In order to support visual demographics
+nalysis for advertisement, a system should be able to not only identify the people in a scene, but also
+to understand what kind (style and brand) of clothes they are wearing, whether they are wearing any
+ccessories, and so on.
+Despite several successes, such detailed recognition is beyond the current computer vision systems.
+This is a challenging task, and to make progress we have to make advances on several fronts. We need
+etter representations of visual categories that can enable fine-grained reasoning about their properties,
+s well as machine learning methods that can leverage ‘big-data’ to learn such representations. In order
+to enable benchmarks for evaluating recognition tasks and to guide learning and inference in models
+that solve challenging problems, we need to develop better ways of human-computer interaction. My
+research touches upon several such themes in the intersection of computer vision, machine learning, and
+human-computer interaction including:"
+fba464cb8e3eff455fe80e8fb6d3547768efba2f,Survey Paper on Emotion Recognition,"International Journal of Engineering and Applied Sciences (IJEAS)
+ISSN: 2394-3661, Volume-3, Issue-2, February 2016
+Survey Paper on Emotion Recognition
+Prachi Shukla, Sandeep Patil"
+fb66546a16751810754430286fe4c636e4411ca4,Complementary feature sets for optimal face recognition,"Singh et al. EURASIP Journal on Image and Video Processing 2014, 2014:35
+http://jivp.eurasipjournals.com/content/2014/1/35
+R ES EAR CH
+Complementary feature sets for optimal face
+recognition
+Chandan Singh1, Neerja Mittal2* and Ekta Walia3
+Open Access"
+fb2379346def4846ac24bc41349e7cac7c1e7243,ClusterNet: 3D Instance Segmentation in RGB-D Images,"ClusterNet: 3D Instance Segmentation in RGB-D Images
+Lin Shao, Ye Tian, and Jeannette Bohg"
+fbb9cdd699baf86e9d616b259ada02449c2322ca,Active Testing: An Efficient and Robust Framework for Estimating Accuracy,"Active Testing: An Efficient and Robust Framework for Estimating Accuracy.
+Phuc Nguyen 1 Deva Ramanan 2 Charless Fowlkes 1"
+fb748a6953e72ad6d508109f8d809c25570ff07b,"The ""Eye Avoidance"" Hypothesis of Autism Face Processing.","NIH Public Access
+Author Manuscript
+J Autism Dev Disord. Author manuscript; available in PMC 2015 April 23.
+The “eye avoidance” hypothesis of autism face processing
+James W. Tanaka1 and Andrew Sung2
+Department of Psychology, University of Victoria, British Columbia
+Department of Special Education and Leadership Studies, University of Victoria, British
+Columbia"
+fb1732a1476798c42a0123aaf127036bf8daef09,LightDenseYOLO: A Fast and Accurate Marker Tracker for Autonomous UAV Landing by Visible Light Camera Sensor on Drone,"Article
+LightDenseYOLO: A Fast and Accurate Marker
+Tracker for Autonomous UAV Landing by Visible
+Light Camera Sensor on Drone
+Phong Ha Nguyen, Muhammad Arsalan, Ja Hyung Koo, Rizwan Ali Naqvi, Noi Quang Truong
+nd Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu, Seoul
+00-715, Korea; (P.H.N.); (M.A.); (J.H.K.);
+(R.A.N.); (N.Q.T.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 3 May 2018; Accepted: 22 May 2018; Published: 24 May 2018"
+fbb2f81fc00ee0f257d4aa79bbef8cad5000ac59,Reading Hidden Emotions: Spontaneous Micro-expression Spotting and Recognition,"Reading Hidden Emotions: Spontaneous
+Micro-expression Spotting and Recognition
+Xiaobai Li, Student Member, IEEE, Xiaopeng Hong, Member, IEEE, Antti Moilanen, Xiaohua Huang, Student
+Member, IEEE, Tomas Pfister, Guoying Zhao, Senior Member, IEEE, and Matti Pietik¨ainen, Fellow, IEEE"
+fb82681ac5d3487bd8e52dbb3d1fa220eeac855e,1 Network Notebook,"CONNECTIONS
+VOLUME IV, NUMBER 2
+Summer 1981
+CONTENTS
+NETWORK NOTEBOOK
+MEETING CALENDAR
+RESEARCH REPORTS
+Social Networks :
+A Beginner's Bookshelf
+Linton C . Freeman (California-Irvine)
+Summary of Research on Informant Accuracy in Network Data,
+nd on the Reverse Small World Problem
+H . Russell Bernard (Florida), Peter D . Killworth (Cambridge)
+& Lee Sailer (Pittsburgh)
+Russell's Paradox (Part II)
+Linton C . Freeman (California-Irvine)
+Goedel's Spoof :
+A Reply to Freeman
+Peter D . Killworth (Cambridge) & H . Russell Bernard (Florida)
+The Norwegian Connection :"
+fb76adeff0309ff4c8de4d0b413a8e3a637774d0,client2vec: Towards Systematic Baselines for Banking Applications,"lient2vec: Towards Systematic Baselines for Banking
+Applications
+Leonardo Baldassini
+BBVA Data & Analytics
+Jose Antonio Rodr´ıguez Serrano
+BBVA Data & Analytics"
+fb9ad920809669c1b1455cc26dbd900d8e719e61,3 D Gaze Estimation from Remote RGB-D Sensors THÈSE,"D Gaze Estimation from Remote RGB-D Sensors
+THÈSE NO 6680 (2015)
+PRÉSENTÉE LE 9 OCTOBRE 2015
+À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+LABORATOIRE DE L'IDIAP
+PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE
+ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Kenneth Alberto FUNES MORA
+cceptée sur proposition du jury:
+Prof. K. Aminian, président du jury
+Dr J.-M. Odobez, directeur de thèse
+Prof. L.-Ph. Morency, rapporteur
+Prof. D. Witzner Hansen, rapporteur
+Dr R. Boulic, rapporteur
+Suisse"
+ed28e8367fcb7df7e51963add9e2d85b46e2d5d6,A Novel Approach of Face Recognition Using Convolutional Neural Networks with Auto Encoder,"International J. of Engg. Research & Indu. Appls. (IJERIA).
+ISSN 0974-1518, Vol.9, No. III (December 2016), pp.23-42
+A NOVEL APPROACH OF FACE RECOGNITION USING
+CONVOLUTIONAL NEURAL NETWORKS WITH AUTO
+ENCODER
+T. SYED AKHEEL1 AND DR. S. A. K JILANI2
+Research Scholar, Dept. of Electronics & Communication Engineering,
+Rayalaseema University Kurnool, Andhra Pradesh.
+2 Research Supervisor, Professor, Dept. of Electronics & Communication Engineering,
+Madanapalle Institute of Technology & Science, Madanapalle, Andhra Pradesh."
+ed732b3a1f8fe733686a35688b090f426d018f9b,Dual-Process Theories in Social Cognitive Neuroscience,"This article was originally published in Brain Mapping: An Encyclopedic
+Reference, published by Elsevier, and the attached copy is provided by
+Elsevier for the author's benefit and for the benefit of the author's institution,
+for non-commercial research and educational use including without limitation
+use in instruction at your institution, sending it to specific colleagues who you
+know, and providing a copy to your institution’s administrator.
+All other uses, reproduction and distribution, including without limitation
+ommercial reprints, selling or licensing copies or access, or posting on open
+internet sites, your personal or institution’s website or repository, are
+prohibited. For exceptions, permission may be sought for such use through
+Elsevier's permissions site at:
+http://www.elsevier.com/locate/permissionusematerial
+Spunt R.P. (2015) Dual-Process Theories in Social Cognitive Neuroscience. In:
+Arthur W. Toga, editor. Brain Mapping: An Encyclopedic Reference, vol. 3, pp.
+11-215. Academic Press: Elsevier."
+ed6003db58b67f1dfac654868b437efcef6e2ccb,Restricted Isometry Property of Gaussian Random Projection for Finite Set of Subspaces,"Restricted Isometry Property of Gaussian Random Projection
+for Finite Set of Subspaces
+Gen Li and Yuantao Gu∗
+submitted April 7, 2017, revised August 11, 2017, accepted November 8, 2017"
+ed9967868fcca2ec38402d2bb3e6946b8e554472,Efficient Eye Location for Biomedical Imaging using Two-level Classifier Scheme,"International Journal of Control, Automation, and Systems, vol. 6, no. 6, pp. 828-835, December 2008
+Efficient Eye Location for Biomedical Imaging using Two-level Classifier
+Scheme
+Mi Young Nam, Xi Wang, and Phill Kyu Rhee*"
+edc5c359ed0fc24a3e85628f57fde59cd9b26dd4,Search Space Optimization and False Alarm Rejection Face Detection Framework,"Journal of Theoretical and Applied Information Technology
+30th September 2015. Vol.79. No.3
+© 2005 - 2015 JATIT & LLS. All rights reserved.
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+SEARCH SPACE OPTIMIZATION AND FALSE ALARM
+REJECTION FACE DETECTION FRAMEWORK
+ALI SHARIFARA, 2MOHD SHAFRY MOHD RAHIM, 3 HAMED SAYYADI,
+FARHAD NAVABIFAR
+,2, Department of Computer Graphics and Multimedia, Faculty of Computing University Technology
+Malaysia (UTM).81310 Skudai Johor, Malaysia.
+Department of Computer Systems and Communications, Faculty of Computing University Technology
+Malaysia (UTM), 81310 Skudai Johor, Malaysia.
+Department of Computer Engineering Mobarakeh Branch-Islamic Azad University, Mobarakeh, Esfahan,
+E-mail:
+Iran."
+ed07fa6df6a8fc27015d25717c9f730dc9eede84,of the 19 th Workshop on the Semantics and Pragmatics of Dialogue,"SEMDIAL 2015
+goDIAL
+Proceedings of the 19th Workshop on
+the Semantics and Pragmatics of Dialogue
+Christine Howes and Staffan Larsson (eds.)
+Gothenburg, 24–26 August 2015"
+ed08ac6da6f8ead590b390b1d14e8a9b97370794,An Efficient Approach for 3D Face Recognition Using ANN Based Classifiers,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer
+nd Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol. 3, Issue 9, September 2015
+An Efficient Approach for 3D Face
+Recognition Using ANN Based Classifiers
+Vaibhav M. Pathak1, Suhas S.Satonkar2, Dr.Prakash B.Khanale3
+Assistant Professor, Dept. of C.S., Shri Shivaji College, Parbhani, M.S, India1
+Assistant Professor, Dept. of C.S., Arts, Commerce and Science College, Gangakhed, M.S, India2
+Associate Professor, Dept. of C.S., Dnyanopasak College Parbhani, M.S, India3"
+ed3c4d2d28faaccbaef876a7daaecc3cccadb48f,3D Human Pose Estimation from a Single Image via Distance Matrix Regression,"D Human Pose Estimation from a Single Image via Distance Matrix Regression
+Institut de Rob`otica i Inform`atica Industrial (CSIC-UPC), 08028, Barcelona, Spain
+Francesc Moreno-Noguer"
+edf074a5eb3a1f71cc710ccc42849dceb27e3531,Towards real-time unsupervised monocular depth estimation on CPU,"Towards real-time unsupervised monocular depth estimation on CPU
+Matteo Poggi1, Filippo Aleotti2, Fabio Tosi1, Stefano Mattoccia1"
+ed6801362ab442097e7f753f163b9e9c0584b257,Learning Based 2D to 3D Conversion with Input Image Denoising,"International Journal of Scientific Research Engineering & Technology (IJSRET), ISSN 2278 – 0882
+Volume 4, Issue 5, May 2015
+Learning Based 2D to 3D Conversion with Input Image Denoising
+Divya K.P.1, Sneha K.2, Nafla C.N.3
+(Department of CSE, RCET, Akkikkvu, Thrissur)
+(Asst. Professor, Department of CSE, RCET, Akkikkvu, Thrissur)
+(Department of CSE, RCET, Akkikkvu, Thrissur)"
+edef98d2b021464576d8d28690d29f5431fd5828,Pixel-Level Alignment of Facial Images for High Accuracy Recognition Using Ensemble of Patches,"Pixel-Level Alignment of Facial Images
+for High Accuracy Recognition
+Using Ensemble of Patches
+Hoda Mohammadzade, Amirhossein Sayyafan, Benyamin Ghojogh"
+ed38d22cd5558d1abb40b477027d52ff7b6d09db,Title of thesis : SIMULTANEOUS MULTI - VIEW FACE TRACKING AND RECOGNITION IN VIDEO USING PARTICLE FILTERING,
+edceeaa885f3eb29761580095059f8a34be8408b,SitNet: Discrete Similarity Transfer Network for Zero-shot Hashing,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+ModelSimilar?Figure1:Zero-shothashing.Thehashingmodeltrainedwithseenconceptsshouldgeneralizewellontheunseenconcepts.supervisedhashinglikeSupervisedDiscreteHashing[Shenetal.,2015a].Withthesupervisedinformationlikesemanticsimilaritymatrixorclasslabels,thesupervisedapproachesachievesuperiorretrievalperformancebecausetheintrinsicsemanticpropertyinthedataisbetterexplored.Recentlythedeepconvolutionalneuralnetwork(CN-N)hasachievedgreatsuccessinmanycomputervisiontasks,likeimageclassification[Heetal.,2016]andfacerecognition[Wenetal.,2016].InspiredbyCNN’spower-fulfeatureextractionability,someworkshaveattemptedtobuildhashingmodelsbasedonCNN[Laietal.,2015;Liuetal.,2016;Xiaetal.,2014]haveappeared.Theyre-quirethehashcodesproducedbythelastfullyconnectedlay-ertopreservethesimilaritygivenbythesupervisedinfor-mation.ItisdemonstratedthattheimageretrievalaccuracyissignificantlyimprovedbyCNN-basedhashingapproachescomparedwiththenon-CNNones[Liuetal.,2016].Itshouldbenoticedthattheexistinghashingapproachesmainlyfocusontheclose-setretrieval,i.e.,theconceptsofpossibletestingsamples(bothdatabasesamplesandquerysamples)arewithinthetrainingset.However,theexplosivegrowthofWebimagesviolatesthissettingbecausethenewconceptsabouttheimagesmayemergerapidly.Itisexpen-sivetoannotatesufficienttrainingdataforthenewconcept-stimely,andalso,impracticaltoretrainthehashingmodelwhereastheretrievalsystemmeetsanewconcept.Asillus-tratedinFigure1,theexistingapproachesperformwellontheseenconceptsbecausetheyaregivencorrectguidance,buttheymayeasilyfailontheunseenconceptsthattheynev-ermeetbeforesuchasthe“dicycle”whichisakindofvehicle"
+edcf668846a3aaf55120aef0c806854936208b3d,Human Recognition in RGBD Combining Object Detectors and Conditional Random Fields,
+ed90a9d379f6412a1580e7eda5cb91640000dc42,Highly Efficient 8-bit Low Precision Inference of Convolutional Neural Networks with IntelCaffe,"Highly Efficient 8-bit Low Precision Inference of
+Convolutional Neural Networks with IntelCaffe
+Jiong Gong, Haihao Shen, Guoming Zhang, Xiaoli Liu, Shane Li, Ge Jin, Niharika Maheshwari,
+Evarist Fomenko, Eden Segal
+{jiong.gong, haihao.shen, guoming.zhang, xiaoli.liu, li.shane, ge.jin, niharika.maheshwari, evarist.m.fomenko,
+Intel Corporation"
+ed5519a03f52e47047079da2e0c480eb8c4a9805,An Evaluation of Trajectory Prediction Approaches and Notes on the TrajNet Benchmark,"An Evaluation of Trajectory Prediction Approaches and
+Notes on the TrajNet Benchmark.
+Stefan Becker ∗, Ronny Hug ∗, Wolfgang H¨ubner and Michael Arens
+Fraunhofer Institute for Optronics, System Technologies, and Image Exploitation IOSB
+Gutleuthausstr. 1, 76275 Ettlingen, Germany"
+eda20a2f33d0f6db44a2e7d060efad3caa6621e0,"Classification with Global, Local and Shared Features","Classification with Global, Local and Shared
+Features
+Hakan Bilen1, Vinay P. Namboodiri2, Luc J. Van Gool1,3
+ESAT-PSI/IBBT,VISICS/KU Leuven, Belgium
+Alcatel-Lucent Bell Labs, Antwerp, Belgium
+Computer Vision Laboratory, BIWI/ETH Z¨urich, Switzerland"
+ed04e161c953d345bcf5b910991d7566f7c486f7,Mirror my emotions! Combining facial expression analysis and synthesis on a robot,"Combining facial expression analysis and synthesis on a
+Mirror my emotions!
+robot
+Stefan Sosnowski1 and Christoph Mayer2 and Kolja K¨uhnlenz3 and Bernd Radig4"
+edbfbcebb14234b438d90d6dcd9b667e9071952d,Learning Fashion Compatibility with Bidirectional LSTMs,"A.B.C.D.?Task 1: Fill in the blankTask 2: Outfit generation given texts or imagesWhat to dress for a biz meeting?(a)(b)Task 3: Compatibility predictionScore: 0.7Figure1:Wefocusonthreetasksoffashionrecommenda-tion.Task1:recommendingafashionitemthatmatchesthestyleofanexistingset.Task2:generatinganoutfitbasedonusers’text/imageinputs.Task3:predictingthecompatibil-ityofanoutfit.conductedonautomaticfashionanalysisinthemultimediacom-munity.However,mostofthemfocusonclothingparsing[9,26],clothingrecognition[12],orclothingretrieval[10].Although,thereareafewworksthatinvestigatedfashionrecommendation[6,8,10],theyeitherfailtoconsiderthecompositionofitemstoformanout-fit[10]oronlysupportoneofthetworecommendationcategoriesdiscussedabove[6,8].Inaddition,itisdesirablethatrecommenda-tionscantakemultimodalinputsfromusers.Forexample,ausercanprovidekeywordslike“business”,oranimageofabusinessshirt,oracombinationofimagesandtext,togenerateacollec-tionoffashionitemsforabusinessoccasion.However,nopriorapproachsupportsmultimodalinputsforrecommendation.Keytofashionrecommendationismodelingthecompatibilityoffashionitems.Wecontendthatacompatibleoutfit(asshowninFigure3)shouldhavetwokeyproperties:(1)itemsintheout-fitshouldbevisuallycompatibleandsharesimilarstyle;(2)these"
+ed2420d0fc7087d61633bd9a5b2907d1c2de1810,Facial symmetry evaluation from high – density scanned data,
+eddb1a126eafecad2cead01c6c3bb4b88120d78a,Applications of a Graph Theoretic Based Clustering Framework in Computer Vision and Pattern Recognition,"DEPARTMENT DESIGN AND PLANNING IN COMPLEX ENVIRONMENTS
+DOTTORATO DI RICERCA IN NUOVE TECNOLOGIE, INFORMAZIONE TERRITORIO E
+UNIVERSIT‘A IUAV DI VENEZIA
+AMBIENTE, XXX CICLO
+APPLICATIONS OF A GRAPH THEORETIC BASED
+CLUSTERING FRAMEWORK IN COMPUTER VISION AND
+PATTERN RECOGNITION
+Doctoral Dissertation of:
+Yonatan Tariku Tesfaye
+Supervisor:
+Prof. Andrea Prati
+The Chair of the Doctoral Program:
+Prof. Fabio Peron"
+ed6a47f0e2e621d8420082ba1d0078189d76352f,3d Facial Expression Intensity Measurement Analysis,"Proceedings of the 6th International Conference on Computing and Informatics, ICOCI 2017
+5-27April, 2017 Kuala Lumpur. Universiti Utara Malaysia (http://www.uum.edu.my )
+Paper No.
+How to cite this paper:
+Alicia Cheong Chiek Ying, Hamimah Ujir, & Irwandi Hipiny. (2017). 3D facial expression intensity measurement
+nalysis in Zulikha, J. & N. H. Zakaria (Eds.), Proceedings of the 6th International Conference of Computing &
+Informatics (pp 43-48). Sintok: School of Computing.
+D FACIAL EXPRESSION INTENSITY MEASUREMENT
+ANALYSIS
+Alicia Cheong Chiek Ying1, Hamimah Ujir2and Irwandi Hipiny3
+Sarawak Information Systems Sdn. Bhd. (SAINS),
+Universiti Malaysia Sarawak,
+Universiti Malaysia Sarawak,"
+ed02b45d05e58803596891d660837c21be70a0af,Entity type modeling for multi-document summarization : generating descriptive summaries of geo-located entities,"Entity Type Modeling for Multi-Document
+Summarization: Generating Descriptive Summaries of
+Geo-Located Entities
+Ahmet Aker
+A thesis submitted in fulfilment of requirements for the degree of
+Doctor of Philosophy
+Department of Computer Science
+University of Sheffield
+November 2013"
+c1d2d12ade031d57f8d6a0333cbe8a772d752e01,Convex optimization techniques for the efficient recovery of a sparsely corrupted low-rank matrix,"Journal of Math-for-Industry, Vol.2(2010B-5), pp.147–156
+Convex optimization techniques for the ef‌f‌icient recovery of a sparsely
+orrupted low-rank matrix
+Silvia Gandy and Isao Yamada
+Received on August 10, 2010 / Revised on August 31, 2010"
+c1c34a3ab7815af1b9bcaf2822e4b9da8505f915,Image transmorphing with JPEG,"IMAGE TRANSMORPHING WITH JPEG
+Lin Yuan and Touradj Ebrahimi
+Multimedia Signal Processing Group, EPFL, Lausanne, Switzerland"
+c158009b33989c6677f1daa3f5926887c9471c5e,Controlling Complex Systems and Developing Dynamic Technology,"Electronic Thesis and Dissertations
+Peer Reviewed
+Title:
+Controlling Complex Systems and Developing Dynamic Technology
+Author:
+Avizienis, Audrius Victor
+Acceptance Date:
+Series:
+UCLA Electronic Theses and Dissertations
+Degree:
+Ph.D., Chemistry 0153UCLA
+Advisor(s):
+Gimzewski, James K
+Committee:
+Kodambaka, Suneel, Baugh, Delroy A
+Permalink:
+https://escholarship.org/uc/item/35c10822"
+c18d80d00f2a7107bfe780eeec21b51a634ea925,Computational perspectives on the other-race effect,"This article was downloaded by: [The University of Texas at Dallas], [Alice
+O'Toole]
+On: 25 July 2013, At: 12:46
+Publisher: Routledge
+Informa Ltd Registered in England and Wales Registered Number: 1072954
+Registered office: Mortimer House, 37-41 Mortimer Street, London W1T 3JH,
+Visual Cognition
+Publication details, including instructions for authors
+nd subscription information:
+http://www.tandfonline.com/loi/pvis20
+Computational perspectives on
+the other-race effect
+Alice J. O'Toole a & Vaidehi Natu a
+School of Behavioural and Brain Sciences , University
+of Texas at Dallas , Richardson , TX , USA
+Published online: 14 Jun 2013.
+To cite this article: Visual Cognition (2013): Computational perspectives on the other-
+race effect, Visual Cognition, DOI: 10.1080/13506285.2013.803505
+To link to this article: http://dx.doi.org/10.1080/13506285.2013.803505
+PLEASE SCROLL DOWN FOR ARTICLE"
+c19ed5102ecd953d5c78d5a0b87eaa51658e07d8,Recovering Accurate 3D Human Pose in the Wild Using IMUs and a Moving Camera,"Recovering Accurate 3D Human Pose in The
+Wild Using IMUs and a Moving Camera
+Timo von Marcard1, Roberto Henschel1, Michael J. Black2, Bodo Rosenhahn1,
+nd Gerard Pons-Moll3
+Leibniz Universit¨at Hannover, Germany
+MPI for Intelligent Systems, T¨ubingen, Germany
+MPI for Informatics, Saarland Informatics Campus, Germany"
+c1b2668186fcd01b3c0e93a9a0a68e3eb88a09ab,Eliminating the Blind Spot: Adapting 3D Object Detection and Monocular Depth Estimation to 360 ^\circ ∘ Panoramic Imagery,"Eliminating the Blind Spot: Adapting 3D Object
+Detection and Monocular Depth Estimation to
+60◦ Panoramic Imagery
+Gr´egoire Payen de La Garanderie, Amir Atapour Abarghouei,
+nd Toby P. Breckon
+Department of Computer Science
+Durham University"
+c1c8ea4b2118095bea55cf6b51c36dbf95cc7f2c,Learning 3D Segment Descriptors for Place Recognition,"Learning 3D Segment Descriptors for Place Recognition
+Andrei Cramariuc
+Renaud Dubé
+Hannes Sommer
+Roland Siegwart
+Igor Gilitschenski∗"
+c160bcbc8f0517a97e46042c84343bf3f0477478,A Dynamic Approach and a New Dataset for Hand-detection in First Person Vision,"A Dynamic Approach and a New Dataset for
+Hand-Detection in First Person Vision.
+Alejandro Betancourt1,2, Pietro Morerio1, Emilia I. Barakova2, Lucio Marcenaro1,
+Matthias Rauterberg2, Carlo S. Regazzoni1
+Department of Naval, Electric, Electronic and Telecommunications Engineering - University
+Designed Intelligence Group, Department of Industrial Design - Eindhoven University of
+Technology, The Netherlands.
+of Genoa, Italy."
+c165003060eeb01e05800a5ee4cd327f1e0bf5e3,SDC-Net: Video Prediction Using Spatially-Displaced Convolution,"SDC-Net: Video prediction using
+spatially-displaced convolution
+Fitsum A. Reda, Guilin Liu, Kevin J. Shih, Robert Kirby, Jon Barker,
+David Tarjan, Andrew Tao, and Bryan Catanzaro
+Nvidia Corporation, Santa Clara CA 95051, USA
+Fig. 1. Frame prediction on a YouTube video frame featuring a panning camera. Left
+to right: Ground-truth, MCNet [34] result, and our SDC-Net result. The SDC-Net
+predicted frame is sharper and preserves fine image details, while color distortion and
+lurriness is seen in the tree and text in MCNet’s predicted frame."
+c19845c84abc9e3afe17003fdcd545ed020d0624,A face biometric benchmarking review and characterisation,"A Face Biometric
+Benchmarking Review and
+Characterisation
+Sandra Mau
+Senior Research Engineer
+NICTA Advanced Surveillance
+BeFIT workshop – ICCV 2011"
+c10a15e52c85654db9c9343ae1dd892a2ac4a279,Learning the Relative Importance of Objects from Tagged Images for Retrieval and Cross-Modal Search,"Int J Comput Vis (2012) 100:134–153
+DOI 10.1007/s11263-011-0494-3
+Learning the Relative Importance of Objects from Tagged Images
+for Retrieval and Cross-Modal Search
+Sung Ju Hwang · Kristen Grauman
+Received: 16 December 2010 / Accepted: 23 August 2011 / Published online: 18 October 2011
+© Springer Science+Business Media, LLC 2011"
+c1059a702f53c44bb26d3313964e811adf01d9b4,Low and mid-level features for target detection in satellite images,"ISSN: 2278 – 1323
+International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+Volume 2, Issue 2, February 2013
+Low and mid-level features for target detection in satellite images
+Rajani.D.C"
+c1bbcdf3b5901e3378a89808b07e53a502c295f0,Allostasis and the human brain: Integrating models of stress from the social and life sciences.,"Psychol Rev. Author manuscript; available in PMC 2011 January 1.
+Published in final edited form as:
+Psychol Rev. 2010 January; 117(1): 134–174.
+doi: 10.1037/a0017773
+Allostasis and the human brain: Integrating models of stress from the social and life sciences
+Barbara L. Ganzel, Pamela A. Morris, and Elaine Wethington
+Author information ► Copyright and License information ►
+The publisher's final edited version of this article is available at Psychol Rev
+See other articles in PMC that cite the published article."
+c1dfabe36a4db26bf378417985a6aacb0f769735,Describing Visual Scene through EigenMaps,"Journal of Computer Vision and Image Processing, NWPJ-201109-50
+Describing Visual Scene through EigenMaps
+Shizhi Chen, Student Member, IEEE, and YingLi Tian, Senior Member, IEEE"
+c175381a6b84ebd0a920ff44ccdccabd98bdfb94,Paper on Retrieval Magnets for Facial Duplication by Search Based Face Annotation,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+A Review Paper on Retrieval Magnets for Facial
+Duplication by Search Based Face Annotation
+Deepika B. Patil1, Ayesha Butalia 2
+P.G. Student, Department of Computer Engineering, GMRCEM, Wagholi, Pune, India,
+Professor, Department of Computer Engineering, GMRCEM, Wagholi, Pune, India,"
+c1ff88493721af1940df0d00bcfeefaa14f1711f,Subspace Regression: Predicting a Subspace from one Sample,"#1369
+CVPR 2010 Submission #1369. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#1369
+Subspace Regression: Predicting a Subspace from one Sample
+Anonymous CVPR submission
+Paper ID 1369"
+c1100efda7c00d3181a6a065ab1474c2f864e267,Video visual analytics,"Video Visual Analytics
+Von der Fakultät Informatik, Elektrotechnik und
+Informationstechnik der Universität Stuttgart
+genehmigte Abhandlung
+zur Erlangung der Würde eines
+Doktors der Naturwissenschaften (Dr. rer. nat.)
+Vorgelegt von
+Markus Johannes Höferlin
+us Herrenberg
+Hauptberichter: Prof. Dr. Daniel Weiskopf
+Mitberichter:
+Prof. Dr. Gunther Heidemann
+Prof. Min Chen, BSc, PhD, FBCS, FEG, FLSW
+Tag der mündlichen Prüfung: 27. Mai 2013
+Visualisierungsinstitut
+der Universität Stuttgart"
+c132a6e869cd171e403784c172961471733dce31,In-vehicle Pedestrian Detection Using Stereo Vision Technology,"IN-VEHICLE PEDESTRIAN DETECTION USING STEREO VISION
+TECHNOLOGY
+Wei Zhang, Ph.D., P.E.
+Highway Research Engineer, Office of Safety Research & Development, HRDS-10
+Federal Highway Administration
+6300 Georgetown Pike, McLean, VA 22101, USA, e-mail:
+Submitted to the 3rd International Conference on Road Safety and Simulation, September 14-16,
+011, Indianapolis, USA"
+c16bae6b2e578df2cba8e436e02bdeda281c2743,Tensor Discriminant Color Space for Face Recognition,"Tensor Discriminant Color Space for Face
+Recognition
+Su-Jing Wang, Jian Yang, Member, IEEE, Na Zhang, and Chun-Guang Zhou*"
+c11eb653746afa8148dc9153780a4584ea529d28,Global and Local Consistent Wavelet-domain Age Synthesis,"Global and Local Consistent Wavelet-domain Age
+Synthesis
+Peipei Li†, Yibo Hu†, Ran He Member, IEEE and Zhenan Sun Member, IEEE"
+c1b971cd7263e788e114cf8c4aa076a2e170990f,Establishing the fundamentals for an elephant early warning and monitoring system,"Establishing the fundamentals for an elephant
+early warning and monitoring system
+Zeppelzauer and Stoeger
+Zeppelzauer and Stoeger BMC Res Notes (2015) 8:409
+DOI 10.1186/s13104-015-1370-y"
+c1ebbdb47cb6a0ed49c4d1cf39d7565060e6a7ee,Robust Facial Landmark Localization Based on Texture and Pose Correlated Initialization,"Robust Facial Landmark Localization Based on
+Yiyun Pan, Junwei Zhou, Member, IEEE, Yongsheng Gao, Senior Member, IEEE, Shengwu Xiong"
+c175f1666f3444e407660c5935a05b2a53f346f0,Modifying the Memorability of Face,"Modifying the Memorability of Face Photographs
+The MIT Faculty has made this article openly available. Please share
+how this access benefits you. Your story matters.
+Citation
+As Published
+Publisher
+Version
+Accessed
+Citable Link
+Terms of Use
+Detailed Terms
+Khosla, Aditya, Wilma A. Bainbridge, Antonio Torralba, and Aude
+Oliva. “Modifying the Memorability of Face Photographs.” 2013
+IEEE International Conference on Computer Vision (December
+013).
+http://dx.doi.org/10.1109/ICCV.2013.397
+Institute of Electrical and Electronics Engineers (IEEE)
+Author's final manuscript
+Mon Nov 05 02:44:57 EST 2018
+http://hdl.handle.net/1721.1/90986"
+c1c3e32ecf6da8e1372fab7d504cb8cd2c86fd93,Face recognition based on artificial immune networks and principal component analysis with single training image per person,"Face recognition based on artificial immune networks and principal
+omponent analysis with single training image per person
+, Department of Mechanical Engineering, Tatung University, Taiwan, ROC,
+Guan-Chun Luh"
+c1087c588960dd7c00a2b5feed57fbdb70d066f1,Quantifying cortical surface asymmetry via logistic discriminant analysis,"Quantifying Cortical Surface Asymmetry
+via Logistic Discriminant Analysis
+Moo K. Chung1,2, Daniel J. Kelley2, Kim M. Dalton2, Richard J. Davidon2,3
+Department of Biostatistics and Medical Informatics
+Waisman Laboratory for Brain Imaging and Behavior
+Department of Psychology and Psychiatry
+University of Wisconsin, Madison, WI 53706, USA"
+c1130d5c7bb1311e04cffbaf2bf6cbe734adc2ac,DFNet: Semantic Segmentation on Panoramic Images with Dynamic Loss Weights and Residual Fusion Block,"DFNet: Semantic Segmentation on Panoramic Images with Dynamic Loss
+Weights and Residual Fusion Block
+Wei Jiang, Yan Wu∗
+technique, moreover,"
+c1bd99083098cf8dbfed8d25514755bc5356bc06,Fly Page (This sheet is left blank and not counted) GENERALIZED DISCRIMINANT ANALYSIS IN CONTENT-BASED IMAGE RETRIEVAL APPROVED BY SUPERVISING,"Fly Page
+(This sheet is left blank and not counted)"
+c1dd69df9dfbd7b526cc89a5749f7f7fabc1e290,Unconstrained face identification with multi-scale block-based correlation,"Unconstrained face identification with multi-scale block-based
+orrelation
+Gaston, J., MIng, J., & Crookes, D. (2016). Unconstrained face identification with multi-scale block-based
+orrelation. In Proceedings of the 2017 IEEE International Conference on Acoustics, Speech and Signal
+Processing (pp. 1477-1481). [978-1-5090-4117-6/17] Institute of Electrical and Electronics Engineers (IEEE).
+Published in:
+Proceedings of the 2017 IEEE International Conference on Acoustics, Speech and Signal Processing
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+© 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future
+media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or
+redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy"
+c68ec931585847b37cde9f910f40b2091a662e83,A Comparative Evaluation of Dotted Raster-Stereography and Feature-Based Techniques for Automated Face Recognition,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 9, No. 6, 2018
+A Comparative Evaluation of Dotted Raster-
+Stereography and Feature-Based Techniques for
+Automated Face Recognition
+Muhammad Wasim
+S. Talha Ahsan
+Department of Computer Science
+Department of Electrical Engineering
+Usman Institute of Technology
+Usman Institute of Technology
+Karachi, Pakistan
+Karachi, Pakistan
+Lubaid Ahmed, Syed Faisal Ali,
+Fauzan Saeed
+Department of Computer Science
+Usman Institute of Technology
+Karachi, Pakistan
+feature-based
+system. The"
+c696c9bbe27434cb6279223a79b17535cd6e88c8,Facial Expression Recognition with Pyramid Gabor Features and Complete Kernel Fisher Linear Discriminant Analysis,"International Journal of Information Technology Vol.11 No.9 2005
+Discriminant Analysis
+Facial Expression Recognition with Pyramid Gabor
+Features and Complete Kernel Fisher Linear
+Duan-Duan Yang1, Lian-Wen Jin1, Jun-Xun Yin1, Li-Xin Zhen2, Jian-Cheng Huang2
+School of Electronic and Information Engineering, South China
+University of Technology, Guangzhou, 510640, P.R.China
+{ddyang,
+Motorola China Research Center, Shanghai, 210000, P.R.China
+{Li-Xin.Zhen,"
+c6d6193c8f611331c8178c3857f9ef92607a4507,A Study on Using Mid-Wave Infrared Images for Face Recognition,"Sensing Technologies for Global Health, Military Medicine, Disaster Response, and Environmental Monitoring II; and
+Biometric Technology for Human Identification IX, edited by Sárka O. Southern, et al., Proc. of SPIE Vol. 8371, 83711K
+© 2012 SPIE · CCC code: 0277-786X/12/$18 · doi: 10.1117/12.918899
+Proc. of SPIE Vol. 8371 83711K-1
+From: http://spiedigitallibrary.org/ on 04/30/2013 Terms of Use: http://spiedl.org/terms"
+c610888cadcf2aa45e7367f43e42eaa7a586652e,Fast Convergence for Object Detection by Learning how to Combine Error Functions,"(cid:13) 2018 IEEE.
+Personal use of this material is permitted. Permission from
+IEEE must be obtained for all other uses, in any current or
+future media, including reprinting/republishing this material
+for advertising or promotional purposes, creating new
+ollective works, for resale or redistribution to servers or
+lists, or reuse of any copyrighted component of this work in
+other works.
+Accepted version."
+c614450c9b1d89d5fda23a54dbf6a27a4b821ac0,Face Image Retrieval of Efficient Sparse Code words and Multiple Attribute in Binning Image,"Vol.60: e17160480, January-December 2017
+http://dx.doi.org/10.1590/1678-4324-2017160480
+ISSN 1678-4324 Online Edition
+Engineering,Technology and Techniques
+BRAZILIAN ARCHIVES OF
+BIOLOGY AND TECHNOLOGY
+A N I N T E R N A T I O N A L J O U R N A L
+Face Image Retrieval of Efficient Sparse Code words and
+Multiple Attribute in Binning Image
+Suchitra S1*.
+Srm Easwari Engineering College, Ramapuram, Bharathi Salai, Chennai, Tamil Nadu, India."
+c6c3cee8adacff8a63ab84dc847141315e874400,Disentangling by Factorising,"Disentangling by Factorising
+Hyunjik Kim 1 2 Andriy Mnih 1"
+c6f3399edb73cfba1248aec964630c8d54a9c534,A comparison of CNN-based face and head detectors for real-time video surveillance applications,"A Comparison of CNN-based Face and Head Detectors for
+Real-Time Video Surveillance Applications
+Le Thanh Nguyen-Meidine1, Eric Granger 1, Madhu Kiran1 and Louis-Antoine Blais-Morin2
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+Genetec Inc., Montreal, Canada"
+c62c4e5d8243da6bc1fde64097b2ab8971e6e51f,"A Unified Approach for Conventional Zero-Shot, Generalized Zero-Shot, and Few-Shot Learning","JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2017
+A Unified approach for Conventional Zero-shot,
+Generalized Zero-shot and Few-shot Learning
+Shafin Rahman, Salman H. Khan and Fatih Porikli"
+c636cd6eba286357fe807c0ca4b02c3b9b7b5619,Training Deep Networks with Synthetic Data: Bridging the Reality Gap by Domain Randomization,"Training Deep Networks with Synthetic Data:
+Bridging the Reality Gap by Domain Randomization
+Jonathan Tremblay∗
+Aayush Prakash∗
+David Acuna∗†
+Mark Brophy∗
+Varun Jampani
+Cem Anil†
+Thang To
+Eric Cameracci
+Shaad Boochoon
+Stan Birchfield
+NVIDIA
+also University of Toronto
+{jtremblay,aayushp,dacunamarrer,markb,vjampani,"
+c600e985ae3af9143b41271abd040a1c1e89177e,Nonparametric Video Retrieval and Frame Classification using Tiny Videos,"Nonparametric Video Retrieval and Frame Classification using Tiny Videos
+{tag} {/tag}
+IJCA Proceedings on International Conference in
+Recent trends in Computational Methods, Communication and Controls (ICON3C 2012)
+© 2012 by IJCA Journal
+ICON3C - Number 3
+Year of Publication: 2012
+Authors:
+A. K. M. Shanawas Fathima
+R. Kanthavel
+{bibtex}icon3c1024.bib{/bibtex}"
+c694b397a3a0950cd20699a687fe6c8a3173b107,Explaining autism spectrum disorders: central coherence vs. predictive coding theories.,"J Neurophysiol 112: 2669 –2671, 2014.
+First published May 28, 2014; doi:10.1152/jn.00242.2014.
+Neuro Forum
+Explaining autism spectrum disorders: central coherence vs. predictive coding
+theories
+Jason S. Chan and Marcus J. Naumer
+Institute of Medical Psychology, Goethe-University, Frankfurt, Germany
+Submitted 27 March 2014; accepted in final form 23 May 2014
+Chan JS, Naumer MJ. Explaining autism spectrum disorders: central
+oherence vs. predictive coding theories. J Neurophysiol 112: 2669–2671,
+014. First published May 28, 2014; doi:10.1152/jn.00242.2014.—In this
+rticle, we review a recent paper by Stevenson et al. (J Neurosci 34:
+691–697, 2014). This paper illustrates the need to present different forms of
+stimuli in order to characterize the perceptual abilities of people with autism
+spectrum disorder (ASD). Furthermore, we will discuss their behavioral
+results and offer an opposing viewpoint to the suggested neuronal drivers of
+utism spectrum disorder; multisensory integration; temporal binding
+window
+THE DIFFERENCE in propagation time between an auditory and a
+visual stimulus can be substantial, depending on the distance"
+c6d5d47513d6a7a1b0b92b33efda3f2a866d34ad,Characterizing International Travel Behavior from Geotagged Photos: A Case Study of Flickr,"RESEARCH ARTICLE
+Characterizing International Travel Behavior
+from Geotagged Photos: A Case Study of
+Flickr
+Yihong Yuan*, Monica Medel
+Department of Geography, Texas State University, San Marcos, Texas, 78666, United States of America"
+c679fd4e29597c64e5921fad796183ae30db8396,LG ] 5 M ar 2 01 6 A Latent-Variable Grid Model,"A Latent-Variable Grid Model
+Rajasekaran Masatran
+Computer Science and Engineering, Indian Institute of Technology Madras
+FREESHELL · ORG"
+c6638c7c1ec7b8fd5cdba039536fb44d12cff5c2,Towards a Development of Augmented Reality for Jewellery App,"Revati Mukesh Raspayle et al, International Journal of Computer Science and Mobile Computing, Vol.5 Issue.6, June- 2016, pg. 129-137
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IMPACT FACTOR: 5.258
+IJCSMC, Vol. 5, Issue. 6, June 2016, pg.129 – 137
+Towards a Development of Augmented
+Reality for Jewellery App
+Er. Revati Mukesh Raspayle1, Prof. Kavita Kelkar2
+¹Student (M.Tech) CSE, Mumbai University, Computer Engineering, K.J SOMAIYA COE Vidyavihar,
+²Assistant Professor, Mumbai University, Computer Engineering, K.J SOMAIYA COE Vidyavihar,
+Mumbai 400077, India
+Mumbai 400077, India"
+c693c578d783323d130d642bd04d391aac7e8f81,Semantic Pyramids for Gender and Action Recognition,"Semantic Pyramids for Gender and Action
+Recognition
+Fahad Shahbaz Khan, Joost van de Weijer, Rao Muhammad Anwer, Michael Felsberg, Carlo Gatta"
+c6badb2cc1191f9dd5e5bea7df75a76349176d01,Densely tracking sequences of 3D face scans,"Densely tracking sequences of 3D face scans
+Huaxiong DING
+Ecole Centrale de LYON
+Liming Chen
+Ecole Centrale de LYON"
+c6c086748474dcda06d773891848aa1472de3560,Activity Recognition Based on a Magnitude-Orientation Stream Network,"Activity Recognition based on a
+Magnitude-Orientation Stream Network
+Carlos Caetano, Victor H. C. de Melo, Jefersson A. dos Santos, William Robson Schwartz
+Smart Surveillance Interest Group, Department of Computer Science
+Universidade Federal de Minas Gerais, Belo Horizonte, Brazil"
+c6eb026d3a0081f4cb5cde16d3170f8ecf8ce706,Face Recognition: From Traditional to Deep Learning Methods,"Face Recognition: From Traditional to Deep
+Learning Methods
+Daniel S´aez Trigueros, Li Meng
+School of Engineering and Technology
+University of Hertfordshire
+Hatfield AL10 9AB, UK
+Margaret Hartnett
+GBG plc
+London E14 9QD, UK"
+c6ffa09c4a6cacbbd3c41c8ae7a728b0de6e10b6,Feature extraction using constrained maximum variance mapping,"This article appeared in a journal published by Elsevier. The attached
+opy is furnished to the author for internal non-commercial research
+nd education use, including for instruction at the authors institution
+nd sharing with colleagues.
+Other uses, including reproduction and distribution, or selling or
+licensing copies, or posting to personal, institutional or third party
+websites are prohibited.
+In most cases authors are permitted to post their version of the
+rticle (e.g. in Word or Tex form) to their personal website or
+institutional repository. Authors requiring further information
+regarding Elsevier’s archiving and manuscript policies are
+encouraged to visit:
+http://www.elsevier.com/copyright"
+c6fdbdbbbc7642daae22df0b7812e78d0647afb3,Unsupervised feature learning with C-SVDDNet,"Unsupervised Feature Learning with C-SVDDNet
+Dong Wang and Xiaoyang Tan"
+c6dab0aba7045f078313a4186cd507ff8eb8ce32,Atypical disengagement from faces and its modulation by the control of eye fixation in children with autism spectrum disorder.,"BIROn - Birkbeck Institutional Research Online
+Enabling open access to Birkbeck’s published research output
+Atypical disengagement from faces and its modulation
+y the control of eye fixation in children with Autism
+Spectrum Disorder
+Journal Article
+http://eprints.bbk.ac.uk/4677
+Version: Accepted (Refereed)
+Citation:
+© 2011 Springer
+Publisher version
+______________________________________________________________
+All articles available through Birkbeck ePrints are protected by intellectual property law, including
+opyright law. Any use made of the contents should comply with the relevant law.
+______________________________________________________________
+Kikuchi, Y.; Senju, A.; Akechi, H.; Tojo, Y.; Osanai, H.; Hasegawa, T.
+(2011)
+Atypical disengagement from faces and its modulation by the control of
+eye fixation in children with Autism Spectrum Disorder
+Deposit Guide"
+c6260f83e86dd4d1ece92e528422ecc6e36c13ef,Siamese networks for generating adversarial examples,"Siamese networks for generating adversarial examples
+Mandar Kulkarni
+Data Scientist
+Schlumberger"
+c62c07de196e95eaaf614fb150a4fa4ce49588b4,SSR-Net: A Compact Soft Stagewise Regression Network for Age Estimation,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+c607572fd2594ca83f732c9790fd590da9e69eb1,Comparative Evaluation of Deep Architectures for Face Recognition in Unconstrained Environment ( FRUE ),"Comparative Evaluation of Deep Architectures for Face
+Recognition in Unconstrained Environment (FRUE)
+Deeksha Gupta
+Department of Computer Science and Applications,
+MCM DAV College for Women, Chandigarh, (India)"
+ec90d333588421764dff55658a73bbd3ea3016d2,Protocol for Systematic Literature Review of Face Recognition in Uncontrolled Environment,"Research Article
+Protocol for Systematic Literature Review of Face
+Recognition in Uncontrolled Environment
+Faizan Ullah, Sabir Shah, Dilawar Shah, Abdusalam, Shujaat Ali
+Department of Computer Science, Bacha Khan University, Charsadda, KPK, Pakistan"
+ec1e03ec72186224b93b2611ff873656ed4d2f74,D Reconstruction of “ Inthe-Wild ” Faces in Images and Videos,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+D Reconstruction of “In-the-Wild” Faces in
+Images and Videos
+James Booth, Anastasios Roussos, Evangelos Ververas, Epameinondas Anton-
+kos, Stylianos Ploumpis, Yannis Panagakis, and Stefanos Zafeiriou"
+ec89c5f2f5acce23b0d05736cd9f32d4ca6dc382,Body Actions Change the Appearance of Facial Expressions,"Body Actions Change the Appearance of Facial
+Expressions
+Carlo Fantoni1,2*, Walter Gerbino1
+Department of Life Sciences, Psychology Unit ‘‘Gaetano Kanizsa’’, University of Trieste, Trieste, Italy, 2 Center for Neuroscience and Cognitive Istituto
+Italiano di Tecnologia, Rovereto, Italy"
+ec9e8d69b67bcb2814b538091fa288b6bdbb990f,GURLS: a Toolbox for Regularized Least Squares Learning,"Computer Science and ArtificialIntelligence LaboratoryTechnical Reportmassachusetts institute of technology, cambridge, ma 02139 usa — www.csail.mit.eduMIT-CSAIL-TR-2012-003CBCL-306January 31, 2012GURLS: a Toolbox for Regularized Least Squares LearningAndrea Tacchetti, Pavan S. Mallapragada, Matteo Santoro, and Lorenzo Rosasco"
+ece31d41b4da5457d570c04d22f19fcd026776b6,Learning Deep Disentangled Embeddings with the F-Statistic Loss,"Learning Deep Disentangled Embeddings
+With the F-Statistic Loss
+Karl Ridgeway
+University of Colorado
+Boulder, Colorado
+Department of Computer Science
+Department of Computer Science
+Michael C. Mozer
+University of Colorado
+Boulder, Colorado"
+ec2027c2dd93e4ee8316cc0b3069e8abfdcc2ecf,Latent Variable PixelCNNs for Natural Image Modeling,"Latent Variable PixelCNNs for Natural Image Modeling
+Alexander Kolesnikov 1 Christoph H. Lampert 1"
+ec7a545ba99542b2b74340d2e863590e4f450bb7,Sparse Subspace Clustering by Orthogonal Matching Pursuit,"Sparse Subspace Clustering by Orthogonal Matching Pursuit
+Center for Imaging Science, Johns Hopkins University, Baltimore, MD, 21218, USA
+Chong You
+nd Ren´e Vidal"
+ec443db55db1a6721387b2054b94f6df020994ae,Weakly Supervised Visual Dictionary Learning by Harnessing Image Attributes,"Weakly Supervised Visual Dictionary Learning
+y Harnessing Image Attributes
+Yue Gao, Senior Member, IEEE, Rongrong Ji, Senior Member, IEEE, Wei Liu, Member, IEEE,
+Qionghai Dai, Senior Member, IEEE, and Gang Hua, Senior Member, IEEE"
+ec25f39fa6b4ef4529981a1ae051086e93642d27,Deformable Part Models are Convolutional Neural Networks Tech report,"Deformable Part Models are Convolutional Neural Networks
+Tech report
+Ross Girshick Forrest Iandola Trevor Darrell
+Jitendra Malik
+UC Berkeley"
+ec12f805a48004a90e0057c7b844d8119cb21b4a,Distance-Based Descriptors and Their Application in the Task of Object Detection,"Distance-Based Descriptors and Their
+Application in the Task of Object Detection
+Radovan Fusek(B) and Eduard Sojka
+Department of Computer Science, Technical University of Ostrava, FEECS,
+7. Listopadu 15, 708 33 Ostrava-Poruba, Czech Republic"
+eca9b9dd665556423278b85f79e1d589009a7ea7,Person Re-Identi fi cation by Robust Canonical Correlation Analysis,"IEEE SIGNAL PROCESSING LETTERS, VOL. 22, NO. 8, AUGUST 2015
+Person Re-Identification by Robust
+Canonical Correlation Analysis
+Le An, Songfan Yang, Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+ecf2ba5ea183a6be63b57543a19dd41e8017daaf,Cooperative Learning of Energy-Based Model and Latent Variable Model via MCMC Teaching,"Cooperative Learning of Energy-Based Model and Latent Variable Model via
+MCMC Teaching
+Jianwen Xie 1,2, Yang Lu 1,3, Ruiqi Gao 1, Ying Nian Wu 1
+Department of Statistics, University of California, Los Angeles, USA
+Hikvision Research America
+Amazon RSML (Retail System Machine Learning) Group"
+ec1223c8fc16751dd577d3418f61d44a139c7dc3,Group Influences on Engaging Self-Control: Children Delay Gratification and Value It More When Their In-Group Delays and Their Out-Group Doesn't.,"RUNNING HEAD: GROUP INFLUENCES ON SELF-CONTROL
+Group Influences on Engaging Self-control: Children Delay Gratification and Value It More
+When Their In-Group Delays and Their Out-Group Doesn’t
+Sabine Doebel* and Yuko Munakata
+Department of Psychology and Neuroscience, University of Colorado Boulder
+*Corresponding author"
+ecd0a2e55f456b69243d1278fee15d8dbfc98c28,Heterogeneous Multicores: When Slower is Faster,"Heterogeneous Multicores: When Slower is Faster
+Tomas Hruby
+Herbert Bos
+The Network Institute, VU University Amsterdam
+Andrew S. Tanenbaum"
+ecc09ab9c61dc3a3a15f55332f63bccbf443f291,Cross-Domain Deep Face Matching for Real Banking Security Systems,"Cross-Domain Deep Face Matching for Real
+Banking Security Systems
+Johnatan S. Oliveira1,∗, Gustavo B. Souza2,∗, Anderson R. Rocha3, Fl´avio E. Deus1 and Aparecido N. Marana4
+Department of Electrical Engineering, University of Bras´ılia (UnB), Bras´ılia, Brazil.
+Department of Computing, Federal University of S˜ao Carlos (UFSCar), S˜ao Carlos, Brazil.
+Institute of Computing, University of Campinas (Unicamp), Campinas, Brazil.
+Department of Computing, S˜ao Paulo State University (Unesp), Bauru, Brazil.
+E-mails: {jow,
+Equal contributors."
+ec54000c6c0e660dd99051bdbd7aed2988e27ab8,Two in One: Joint Pose Estimation and Face Recognition with Pca,"TWO IN ONE: JOINT POSE ESTIMATION AND FACE RECOGNITION WITH P2CA1
+Francesc Tarres*, Antonio Rama*
+{tarres,
+Davide Onofrio+, Stefano Tubaro+
+{d.onofrio,
+*Dept. Teoria del Senyal i Comunicacions - Universitat Politècnica de Catalunya, Barcelona, Spain
++Dipartimento di Elettronica e Informazione - Politecnico di Milano, Meiland, Italy"
+ecf4690ddd3ad26f9cd1749d16ef1aa06d391f92,Does Exposure to Hostile Environments Predict Enhanced Emotion Detection?,"PDF hosted at the Radboud Repository of the Radboud University
+Nijmegen
+The following full text is a publisher's version.
+For additional information about this publication click this link.
+http://hdl.handle.net/2066/191999
+Please be advised that this information was generated on 2018-06-28 and may be subject to
+hange."
+ecdf8e5393eead0b63c5bc4fbe426db5a70574eb,Linear Subspace Learning for Facial Expression Analysis,"Linear Subspace Learning for
+Facial Expression Analysis
+Caifeng Shan
+Philips Research
+The Netherlands
+. Introduction
+Facial expression, resulting from movements of the facial muscles, is one of the most
+powerful, natural, and immediate means for human beings to communicate their emotions
+nd intentions. Some examples of facial expressions are shown in Fig. 1. Darwin (1872) was
+the first to describe in detail the specific facial expressions associated with emotions in
+nimals and humans; he argued that all mammals show emotions reliably in their faces.
+Psychological studies (Mehrabian, 1968; Ambady & Rosenthal, 1992) indicate that facial
+expressions, with other non-verbal cues, play a major and fundamental role in face-to-face
+ommunication.
+Fig. 1. Facial expressions of George W. Bush.
+Machine analysis of facial expressions, enabling computers to analyze and interpret facial
+expressions as humans do, has many important applications including intelligent human-
+omputer interaction, computer animation, surveillance and security, medical diagnosis,
+law enforcement, and awareness system (Shan, 2007). Driven by its potential applications
+nd theoretical interests of cognitive and psychological scientists, automatic facial"
+ec6855acd0871d3e000872a5dd89db97c1554e18,Contrasting emotion processing and executive functioning in attention-deficit/hyperactivity disorder and bipolar disorder.,"016, Vol. 130, No. 5, 531–543
+0735-7044/16/$12.00
+© 2016 American Psychological Association
+http://dx.doi.org/10.1037/bne0000158
+Contrasting Emotion Processing and Executive Functioning in
+Attention-Deficit/Hyperactivity Disorder and Bipolar Disorder
+Stephen Soncin, Donald C. Brien, and Brian C. Coe
+Queen’s University
+Queen’s University and Hotel Dieu Hospital, Kingston,
+Alina Marin
+Ontario, Canada
+Douglas P. Munoz
+Queen’s University
+Attention-deficit/hyperactivity disorder (ADHD) and bipolar disorder (BD) are highly comorbid and
+share executive function and emotion processing deficits, complicating diagnoses despite distinct clinical
+features. We compared performance on an oculomotor task that assessed these processes to capture subtle
+differences between ADHD and BD. The interaction between emotion processing and executive func-
+tioning may be informative because, although these processes overlap anatomically, certain regions that
+re compromised in each network are different in ADHD and BD. Adults, aged 18 – 62, with ADHD (n ⫽
+2), BD (n ⫽ 20), and healthy controls (n ⫽ 21) performed an interleaved pro- and antisaccade task"
+ec4af4a6e89d61c05dcdf89f7f5d0a404bed4027,Bodily action penetrates affective perception.,"Bodily action penetrates affective
+perception
+Carlo Fantoni, Sara Rigutti and Walter Gerbino
+Department of Life Sciences, Psychology Unit “Gaetano Kanizsa,” University of Trieste, Trieste, Italy"
+ec0104286c96707f57df26b4f0a4f49b774c486b,An Ensemble CNN2ELM for Age Estimation,"An Ensemble CNN2ELM for Age Estimation
+Mingxing Duan , Kenli Li, Senior Member, IEEE, and Keqin Li, Fellow, IEEE"
+ecbaa92c289f4f5ff9a57b19a2725036a92311f5,Focused Evaluation for Image Description with Binary Forced-Choice Tasks,"Proceedings of the 5th Workshop on Vision and Language, pages 19–28,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+ec91c6d6235f31c751b03489d7b1d472dfc9da26,Face Database Retrieval Using Pseudo 2D Hidden Markov Models,"Face Database Retrieval Using Pseudo 2D Hidden Markov Models
+Fraunhofer Institute for Media Communication IMK
+Stefan Eickeler
+Schloss Birlinghoven
+53754 Sankt Augustin, Germany"
+ec3621e900cc50afd067584bb1246a8b4e338fa8,Structured Triplet Learning with POS-Tag Guided Attention for Visual Question Answering,"Structured Triplet Learning with POS-tag Guided Attention
+for Visual Question Answering
+Zhe Wang1 Xiaoyi Liu2 Liangjian Chen1 Limin Wang3 Yu Qiao4 Xiaohui Xie1 Charless Fowlkes1
+Dept. of CS, UC Irvine
+Microsoft
+CVL, ETH Zurich
+SIAT, CAS"
+ec7d418ddf95d231b2afc70ed8c94d0764abec61,Knowledge Transfer Using Latent Variable Models,"Copyright
+Ayan Acharya"
+4edc7f27d4512b69be54abfc6b9876e5b00725ab,Facial Expression Recognition using Convolutional Neural Networks: State of the Art,"Facial Expression Recognition using
+Convolutional Neural Networks: State of the Art
+Christopher Pramerdorfer, Martin Kampel
+Computer Vision Lab, TU Wien
+Vienna, Austria
+Email:"
+4e1d7bad6cde28e65b12c5824b1016859e1ae704,Enhanced Face Recognition Using Discrete Cosine Transform,"Enhanced Face Recognition Using Discrete
+Cosine Transform
+Zahraddeen Sufyanu, Member, IAENG, Fatma S. Mohamad, Abdulganiyu A. Yusuf, and Mustafa B.
+Mamat"
+4efb08fcd652c60764b6fd278cee132b71c612a1,Pixel Deconvolutional Networks,"PIXEL DECONVOLUTIONAL NETWORKS
+Hongyang Gao
+Washington State University
+Hao Yuan
+Washington State University
+Zhengyang Wang
+Washington State University
+Shuiwang Ji
+Washington State University"
+4e32fbb58154e878dd2fd4b06398f85636fd0cf4,A Hierarchical Matcher using Local Classifier Chains,"A Hierarchical Matcher using Local Classifier Chains
+L. Zhang and I.A. Kakadiaris
+Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204"
+4eca3e3c4876fc7ec81224d4ec2f159c9e7c72c3,Facial recognition using new LBP representations,
+4ea6954b47baec061fa3f3e1228833eba7be07f9,Multi-pseudo Regularized Label for Generated Data in Person Re-Identification.,"Multi-pseudo Regularized Label for Generated Data
+in Person Re-Identification
+Yan Huang, Jingsong Xu, Qiang Wu, Member, IEEE Zhedong Zheng, Zhaoxiang Zhang, Senior Member, IEEE
+nd Jian Zhang, Senior Member, IEEE"
+4ea53e76246afae94758c1528002808374b75cfa,A Review of Scholastic Examination and Models for Face Recognition and Retrieval in Video,"Lasbela, U. J.Sci. Techl., vol.IV , pp. 57-70, 2015
+Review ARTICLE
+A Review of Scholastic Examination and Models for Face Recognition
+ISSN 2306-8256
+nd Retrieval in Video
+Varsha Sachdeva1, Junaid Baber2, Maheen Bakhtyar2, Muzamil Bokhari3, Imran Ali4
+Department of Computer Science, SBK Women’s University, Quetta, Balochistan
+Department of CS and IT, University of Balochistan, Quetta
+Department of Physics, University of Balochistan, Quetta
+Institute of Biochemistry, University of Balochistan, Quetta"
+4e97b53926d997f451139f74ec1601bbef125599,Discriminative Regularization for Generative Models,"Discriminative Regularization for Generative Models
+Alex Lamb, Vincent Dumoulin and Aaron Courville
+Montreal Institute for Learning Algorithms, Universit´e de Montr´eal"
+4e5698894946680e4d6e766346355b2dc1959819,Cross-pose Facial Expression Recognition,Cross-pose Facial Expression Recognition
+4ec3c7fa51d823a43b3808c7c6baa2e153104bdf,Neuron Pruning for Compressing Deep Networks using Maxout Architectures,"Neuron Pruning for Compressing Deep
+Networks using Maxout Architectures
+Fernando Moya Rueda, Rene Grzeszick, Gernot A. Fink
+TU Dortmund University
+Department of Computer Science"
+4e27fec1703408d524d6b7ed805cdb6cba6ca132,SSD-Sface: Single shot multibox detector for small faces,"SSD-Sface: Single shot multibox detector for small faces
+C. Thuis"
+4e6c9be0b646d60390fe3f72ce5aeb0136222a10,Long-Term Temporal Convolutions for Action Recognition,"Long-term Temporal Convolutions
+for Action Recognition
+G¨ul Varol, Ivan Laptev, and Cordelia Schmid, Fellow, IEEE"
+4ec4392246a7760d189cd6ea48a81664cd2fe4bf,GPU Accelerated ACF Detector,
+4ebf84c6389e842e90c39850f0152671ba7fa0dc,Adversarial Attribute-Image Person Re-identification,"Adversarial Attribute-Image Person Re-identification
+Zhou Yin, Wei-Shi Zheng, Ancong Wu, Hong-Xing Yu, Hai Wan, Xiaowei Guo, Feiyue
+Huang, Jianhuang Lai
+For reference of this work, please cite:
+Adversarial Attribute-Image Person Re-identification
+Zhou Yin, Wei-Shi Zheng, Ancong Wu, Hong-Xing Yu, Hai Wan, Xiaowei Guo, Feiyue Huang, Jianhuang
+Lai, IJCAI, 2018
+title={Adversarial Attribute-Image Person Re-identification},
+uthor={Zhou Yin, Wei-Shi Zheng, Ancong Wu, Hong-Xing Yu, Hai Wan, Xiaowei Guo, Feiyue Huang,
+Jianhuang Lai},
+journal={ International Joint Conference on Artificial Intelligence},
+year={2018}"
+4e82908e6482d973c280deb79c254631a60f1631,Improving Efficiency and Scalability in Visual Surveillance Applications,
+4eaaefc53fd61d27b9ce310c188fe76003a341bd,Assessing Generative Models via Precision and Recall,"Assessing Generative Models via Precision and Recall
+Mehdi S. M. Sajjadi∗
+MPI for Intelligent Systems,
+Max Planck ETH Center
+for Learning Systems
+Olivier Bachem
+Google Brain
+Mario Lucic
+Google Brain
+Olivier Bousquet
+Google Brain
+Sylvain Gelly
+Google Brain"
+4eb0b82b294f601510cd965adcf0e8c386cbaf22,Face Detection for Augmented Reality Application Using Boosting-based Techniques,"Face Detection for Augmented Reality Application
+Using Boosting-based Techniques
+Youssef Hbali1, Lahoucine Ballihi2, Mohammed Sadgal1, El Fazziki Abdelaziz1
+Cadi Ayyad University. B.P. 2390, Avenue Prince My Abdellah, Marrakech, Morocco
+LRIT-CNRST URAC 29, Mohammed V University In Rabat, Faculty of Sciences Rabat, Morocco"
+4ed0be0b5d67cff63461ba79f2a7928d652cf310,Threat of Adversarial Attacks on Deep Learning in Computer Vision: A Survey,"JOURNAL OF LATEX CLASS FILES, VOL. PP, AUGUST 2017
+Threat of Adversarial Attacks on Deep Learning
+in Computer Vision: A Survey
+ACKNOWLEDGEMENTS: The authors thank Nicholas Carlini (UC Berkeley) and Dimitris Tsipras (MIT) for feedback to improve the survey
+quality. We also acknowledge X. Huang (Uni. Liverpool), K. R. Reddy (IISC), E. Valle (UNICAMP), Y. Yoo (CLAIR) and others for providing pointers
+to make the survey more comprehensive. This research was supported by ARC grant DP160101458.
+Naveed Akhtar and Ajmal Mian"
+4e25cd4e40494aa5073fcfbef7506336b84152f4,"Independent Component Analysis, Principal Component Analysis and Rough Sets in Face Recognition","Independent Component Analysis, Principal
+Component Analysis and Rough Sets in Face
+Recognition
+Roman W. ´Swiniarski1 and Andrzej Skowron2
+Department of Mathematical and Computer Sciences
+San Diego State University
+5500 Campanile Drive San Diego, CA 92182, USA
+Institute of Computer Science, Polish Academy of Sciences
+Ordona 21, 01-237 Warsaw, Poland
+Institute of Mathematics, Warsaw University
+Banacha 2, 02-097 Warsaw, Poland"
+4e608c77043f56b0abfb2760fb2fd2516b5412b0,Spectral Face Recognition Using Orthogonal Subspace Bases,
+4ef0a6817a7736c5641dc52cbc62737e2e063420,Study of Face Recognition Techniques,"International Journal of Advanced Computer Research (ISSN (Print): 2249-7277 ISSN (Online): 2277-7970)
+Volume-4 Number-4 Issue-17 December-2014
+Study of Face Recognition Techniques
+Sangeeta Kaushik1*, R. B. Dubey2 and Abhimanyu Madan3
+Received: 10-November-2014; Revised: 18-December-2014; Accepted: 23-December-2014
+©2014 ACCENTS"
+4e71e03d4122aad182ad51ab187d4b55b41fc957,Clustering-Based Discriminant Analysis for Eye Detection,"Clustering-Based Discriminant Analysis
+for Eye Detection
+Shuo Chen and Chengjun Liu
+paper
+three
+proposes"
+4ee380e444063f9b948a2fd82e5c11b97a570ad1,Operating system support to an online hardware-software co-design scheduler for heterogeneous multicore architectures,"Universidade de São Paulo
+Biblioteca Digital da Produção Intelectual - BDPI
+Departamento de Sistemas de Computação - ICMC/SSC
+Comunicações em Eventos - ICMC/SSC
+014-08-20
+Operating system support to an online
+hardware-software co-design scheduler for
+heterogeneous multicore architectures
+IEEE International Conference on Embedded and Real-Time Computing Systems and Applications,
+0th, 2014, Chongqing.
+http://www.producao.usp.br/handle/BDPI/48567
+Downloaded from: Biblioteca Digital da Produção Intelectual - BDPI, Universidade de São Paulo"
+4e33798e364826af1241d28d57977bec9a579709,Active learning with version spaces for object detection,"Active learning with version spaces for object detection 1
+Soumya Roy 2
+Vinay P. Namboodiri 2
+Arijit Biswas 3"
+4eb22856671b9340e5ae532a021be62b9d31c9bc,The Minority Glass Ceiling Hypothesis: Exploring Reasons and Remedies for the Underrepresentation of Racial-ethnic Minorities in Leadership Positions,"THE MINORITY GLASS CEILING HYPOTHESIS:
+EXPLORING REASONS AND REMEDIES FOR THE
+UNDERREPRESENTATION OF RACIAL-ETHNIC MINORITIES IN
+LEADERSHIP POSITIONS
+Seval Gündemir"
+4e3c07283334a9b90dac011033fa2403bcf3c473,A novel feature selection method and its application,"J Intell Inf Syst (2013) 41:235–268
+DOI 10.1007/s10844-013-0243-x
+A novel feature selection method and its application
+Bing Li· Tommy W. S. Chow· Di Huang
+Received: 11 April 2012 / Revised: 8 March 2013 / Accepted: 11 March 2013 /
+Published online: 4 April 2013
+© Springer Science+Business Media New York 2013"
+4e613c9342d6e90f7af5fd3f246c6d82a33fe98d,Estimating Human Pose in Images,"Estimating Human Pose in Images
+Navraj Singh
+December 11, 2009
+Introduction
+This project attempts to improve the performance of an existing method of estimating the pose of humans in still images.
+Tasks such as object detection and classification have received much attention already in the literature. However, sometimes we are
+interested in more detailed aspects of objects like pose. This is a challenging task due to the large variety of poses an object can
+take in a variety of settings. For human pose estimation, aspects such as clothing, occlusion of body parts, etc. make the task even
+harder.
+The approaches taken up in the literature to solve this problem focus on either a top-down approach, bottom-up approach,
+or a hybrid of the two. The top-down approach involves comparing test images with stored examples of humans in various poses
+using some similarity measure. This approach might require a very large set of examples of human poses. The bottom-up approach,
+on the other hand, uses low level human body part detectors and in some manner assembles the information to predict the entire
+ody pose. This project attempts to build upon a mostly bottom-up approach, called LOOPS (Localizing Object Outlines using
+Probabilistic Shape), that was developed in [1] by G. Heitz, et al. in Prof. Daphne Koller's group. Specifically, we investigate the
+onstruction and incorporation of a skin detector into the LOOPS pipeline, and a couple of pairwise features in the appearance
+model. The overall improvement in the localization is negligible, with some improvement in head localization. Since the
+improvements considered are within the framework of LOOPS, a brief overview of the LOOPS method is discussed next.
+Brief Overview of the LOOPS method as applied to humans
+The main random variables defined in the LOOPS method, described in detail in [1], are the locations of a set of key"
+4ecd459aa4b4590bdc552e07b6d0bbe132fb1fcf,Learning of Graph Compressed Dictionaries for Sparse Representation Classification,"Learning of Graph Compressed Dictionaries for Sparse
+Representation Classification
+Farshad Nourbakhsh and Eric Granger
+Laboratoire d’imagerie de vision et d’intelligence artificielle
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montr´eal, Canada
+Keywords:
+Matrix Factorization, Graph Compression, Dictionary Learning, Sparse Representation Classification,
+Clustering, Face Recognition, Video Surveillance"
+4ee87ed965e78adb1035a5322350afac9ca901f5,Multi-target tracking of time-varying spatial patterns,"Multi-Target Tracking of Time-varying Spatial Patterns
+Jingchen Liu1
+Yanxi Liu1,2
+Department of Computer Science and Engineering
+Department of Electrical Engineering
+The Pennsylvania State University
+University Park, PA 16802, USA
+{jingchen,"
+4e4a47e2d285e55f3d0b6d449d6b9893615db5cd,Use of l2/3-norm Sparse Representation for Facial Expression Recognition,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Use of ℓ2/3-norm Sparse Representation for Facial
+Expression Recognition
+Sandeep Rangari1, Sandeep Gonnade2
+MATS University, MATS School of Engineering and Technology, Arang, Raipur, India
+MATS University, MATS School of Engineering and Technology, Arang, Raipur, India
+three
+to discriminate
+represents emotion,"
+4e0e49c280acbff8ae394b2443fcff1afb9bdce6,Automatic Learning of Gait Signatures for People Identification,"Automatic learning of gait signatures for people identification
+F.M. Castro
+Univ. of Malaga
+fcastro<at>uma.es
+M.J. Mar´ın-Jim´enez
+Univ. of Cordoba
+mjmarin<at>uco.es
+N. Guil
+Univ. of Malaga
+nguil<at>uma.es
+N. P´erez de la Blanca
+Univ. of Granada
+nicolas<at>ugr.es"
+4e61f3dc6aa7994613a3708e823aadd478c73f5f,Generating Discriminative Object Proposals via Submodular Ranking,"Generating Discriminative Object Proposals via Submodular Ranking
+Yangmuzi Zhang∗, Zhuolin Jiang†, Xi Chen∗, and Larry S. Davis∗
+University of Maryland at College Park, MD
+Raytheon BBN Technologies, USA
+Email:"
+4eb600aa4071b9a73da49e5374d6e22ca46eaba6,Understanding bag-of-words model: a statistical framework,"Noname manuscript No.
+(will be inserted by the editor)
+Understanding Bag-of-Words Model: A Statistical Framework
+Yin Zhang ⋅ Rong Jin ⋅ Zhi-Hua Zhou
+Received: date / Accepted: date"
+4e8206dd2e163c6a139bfd0ec3adf410e7b78c4a,A Multi-scale Boosted Detector for Efficient and Robust Gesture Recognition,"A Multi-scale Boosted Detector for Ef‌f‌icient and
+Robust Gesture Recognition
+Camille Monnier, Stan German, Andrey Ost
+Charles River Analytics
+Cambridge, MA, USA"
+4e12080616da4b540c8f79db2dd1b654cd8345ce,Pose-Driven Deep Models for Person Re-Identification,"Pose-Driven Deep Models for Person
+Re-Identification
+Masters thesis of
+Andreas Eberle
+At the faculty of Computer Science
+Institute for Anthropomatics and Robotics
+Reviewer:
+Second reviewer:
+Advisors:
+Prof. Dr.-Ing. Rainer Stiefelhagen
+Prof. Dr.-Ing. Jürgen Beyerer
+Dr.-Ing. Saquib Sarfraz
+Dipl.-Inform. Arne Schumann
+Duration: 31. August 2017 –
+8. February 2018
+KIT – University of the State of Baden-Wuerttemberg and National Laboratory of the Helmholtz Association
+www.kit.edu"
+20a432a065a06f088d96965f43d0055675f0a6c1,The Effects of Regularization on Learning Facial Expressions with Convolutional Neural Networks,"In: Proc. of the 25th Int. Conference on Artificial Neural Networks (ICANN)
+Part II, LNCS 9887, pp. 80-87, Barcelona, Spain, September 2016
+The final publication is available at Springer via
+http://dx.doi.org//10.1007/978-3-319-44781-0_10
+The Effects of Regularization on Learning Facial
+Expressions with Convolutional Neural Networks
+Tobias Hinz, Pablo Barros, and Stefan Wermter
+University of Hamburg Department of Computer Science,
+Vogt-Koelln-Strasse 30, 22527 Hamburg, Germany
+http://www.informatik.uni-hamburg.de/WTM"
+20a3ce81e7ddc1a121f4b13e439c4cbfb01adfba,Sparse-MVRVMs Tree for Fast and Accurate Head Pose Estimation in the Wild,"Sparse-MVRVMs Tree for Fast and Accurate
+Head Pose Estimation in the Wild
+Mohamed Selim, Alain Pagani, and Didier Stricker
+Augmented Vision Research Group,
+German Research Center for Artificial Intelligence (DFKI),
+Tripstaddterstr. 122, 67663 Kaiserslautern, Germany
+Technical University of Kaiserslautern
+http://www.av.dfki.de"
+2057837e059a1dde8c6c4c0587e652b79c04780a,Learning to Recognize Novel Objects in One Shot through Human-Robot Interactions in Natural Language Dialogues,"Learning to Recognize Novel Objects in One Shot through Human-Robot
+Interactions in Natural Language Dialogues
+Thomas Williams
+Matthias Scheutz
+Evan Krause
+HRI Laboratory
+Tufts University
+00 Boston Ave
+Medford, MA 02155, USA
+Michael Zillich
+Inst. for Automation and Control
+Technical University Vienna
+Gusshausstr 27-29/E376
+040 Vienna, Austria
+HRI Laboratory
+Tufts University
+00 Boston Ave
+HRI Laboratory
+Tufts University
+00 Boston Ave"
+2004afb2276a169cdb1f33b2610c5218a1e47332,Deep Convolutional Neural Network Used in Single Sample per Person Face Recognition,"Hindawi
+Computational Intelligence and Neuroscience
+Volume 2018, Article ID 3803627, 11 pages
+https://doi.org/10.1155/2018/3803627
+Research Article
+Deep Convolutional Neural Network Used in Single Sample per
+Person Face Recognition
+Junying Zeng , Xiaoxiao Zhao , Junying Gan , Chaoyun Mai
+nd Fan Wang
+, Yikui Zhai,
+School of Information Engineering, Wuyi University, Jiangmen 529020, China
+Correspondence should be addressed to Xiaoxiao Zhao;
+Received 27 November 2017; Revised 23 May 2018; Accepted 26 July 2018; Published 23 August 2018
+Academic Editor: Jos´e Alfredo Hern´andez-P´erez
+Copyright © 2018 Junying Zeng et al. 0is is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Face recognition (FR) with single sample per person (SSPP) is a challenge in computer vision. Since there is only one sample to be
+trained, it makes facial variation such as pose, illumination, and disguise dif‌f‌icult to be predicted. To overcome this problem, this paper
+proposes a scheme combined traditional and deep learning (TDL) method to process the task. First, it proposes an expanding sample
+method based on traditional approach. Compared with other expanding sample methods, the method can be used easily and"
+2084e54505cfe4fd81005167b1b11d10b5f837d1,Person Re-Identification by Discriminative Selection in Video,"Person Re-Identification by Discriminative Selection in Video Ranking
+Wang, T; Gong, S; Zhu, X; Wang, S
+•(cid:9)“The final publication is available at http://link.springer.com/chapter/10.1007%2F978-3-319-
+0593-2_45”
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/11432
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+20e504782951e0c2979d9aec88c76334f7505393,Robust LSTM-Autoencoders for Face De-Occlusion in the Wild,"Robust LSTM-Autoencoders for Face De-Occlusion
+in the Wild
+Fang Zhao, Jiashi Feng, Jian Zhao, Wenhan Yang, Shuicheng Yan"
+209324c152fa8fab9f3553ccb62b693b5b10fb4d,Visual Genome Crowdsourced Visual Knowledge Representations a Thesis Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Masters of Science,"CROWDSOURCED VISUAL KNOWLEDGE REPRESENTATIONS
+VISUAL GENOME
+A THESIS
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+MASTERS OF SCIENCE
+Ranjay Krishna
+March 2016"
+20a052963f2c46aff817f34a09c396c44b3e46da,Visually Grounded Meaning Representations,"Visually Grounded Meaning Representations
+Carina Silberer, Member, IEEE, Vittorio Ferrari, Member, IEEE, Mirella Lapata, Member, IEEE"
+20e783a2df0486cd1c8b6b59fc76220f5718b304,Stereo-based Pedestrian Detection Using Two-stage Classifiers,"4-26
+MVA2011 IAPR Conference on Machine Vision Applications, June 13-15, 2011, Nara, JAPAN
+Stereo-based Pedestrian Detection Using Two-stage Classifiers
+Manabu Nishiyama, Akihito Seki, Tomoki Watanabe
+Corporate Research and Development Center, Toshiba Corporation
+, Komukai-Toshiba-cho, Saiwai-ku, Kawasaki, 212-8582, Japan"
+202cbc83c22a9c7b3d878cc1bed1c5cf152eb6fb,Learning Embeddings for Product Visual Search with Triplet Loss and Online Sampling,"Learning Embeddings for Product Visual Search with
+Triplet Loss and Online Sampling
+Eric Dodds, Huy Nguyen, Simao Herdade, Jack Culpepper, Andrew Kae, Pierre Garrigues
+{eric.mcvoy.dodds, huyng, sherdade, jackcul, andrewkae,
+Yahoo Research"
+208e903211ddc62b997afb5a1bd3c2c43e0e69ee,Real-Time Action Detection in Video Surveillance using Sub-Action Descriptor with Multi-CNN,"Real-Time Action Detection in Video Surveillance using Sub-Action
+Descriptor with Multi-CNN
+Cheng-Bin Jin*, Shengzhe Li†, and Hakil Kim*
+*Inha University, Incheon, Korea
+Visionin Inc., Incheon, Korea"
+20ade100a320cc761c23971d2734388bfe79f7c5,Subspace Clustering via Good Neighbors,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Subspace Clustering via Good Neighbors
+Jufeng Yang, Jie Liang, Kai Wang, Ming-Hsuan Yang"
+202d8d93b7b747cdbd6e24e5a919640f8d16298a,Face Classification via Sparse Approximation,"Face Classification via Sparse Approximation
+Elena Battini S˝onmez1, Bulent Sankur2 and Songul Albayrak3
+Computer Science Department, Bilgi University, Dolapdere, Istanbul, TR
+Electric and Electronic Engineering Department, Bo¯gazici University, Istanbul, TR
+Computer Engineering Department, Yıldız Teknik University, Istanbul, TR"
+205b34b6035aa7b23d89f1aed2850b1d3780de35,Log-domain polynomial filters for illumination-robust face recognition,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+Shenzhen Key Lab. of Information Sci&Tech,
+♯Nagaoka University of Technology, Japan
+RECOGNITION
+. INTRODUCTION"
+200f68f899f0bf72dd2c49ba2b4a5027e0291531,Efficient Activity Detection in Untrimmed Video with Max-Subgraph Search,"Efficient Activity Detection in Untrimmed Video
+with Max-Subgraph Search
+Chao Yeh Chen and Kristen Grauman"
+20e64f44ce2977a4dc5099fce6f73842613f0865,"Ridge Regression, Hubness, and Zero-Shot Learning","Ridge Regression, Hubness, and Zero-Shot Learning(cid:63)
+Yutaro Shigeto1, Ikumi Suzuki2, Kazuo Hara3, Masashi Shimbo1, and
+Yuji Matsumoto1
+Nara Institute of Science and Technology, Ikoma, Nara, Japan
+The Institute of Statistical Mathematics, Tachikawa, Tokyo, Japan
+National Institute of Genetics, Mishima, Shizuoka, Japan"
+2049ca79ce94ddfe0cc3d39bf770f580a740f3ac,Activity analysis : finding explanations for sets of events,ActivityAnalysis:FindingExplanationsforSetsofEventsbyDimaJamalAlDamenSubmittedinaccordancewiththerequirementsforthedegreeofDoctorofPhilosophy.TheUniversityofLeedsSchoolofComputingSeptember2009Thecandidateconfirmsthattheworksubmittedisherownandthattheappropriatecredithasbeengivenwherereferencehasbeenmadetotheworkofothers.Thiscopyhasbeensuppliedontheunderstandingthatitiscopyrightmaterialandthatnoquotationfromthethesismaybepublishedwithoutproperacknowledgement.
+20a6de85d7d5f445dfaba90ab2e33879142023fc,Autonomous Vehicles that Interact with Pedestrians: A Survey of Theory and Practice,"THIS WORK HAS BEEN SUBMITTED TO THE IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS.
+Autonomous Vehicles that Interact with Pedestrians:
+A Survey of Theory and Practice
+Amir Rasouli and John K. Tsotsos"
+20f9a09defe5b02b98c464ca6df36b3b6358f60b,The State-of-the-Art in Visual Object Tracking,Volume 36 Number 3 September 2012
+20c59a55795eaa4f2629cc83fb556dc8c5bcfc1f,Modeling and visual recognition of human actions and interactions,"Modeling and visual recognition of human actions and
+interactions
+Ivan Laptev
+To cite this version:
+Ivan Laptev. Modeling and visual recognition of human actions and interactions. Computer Vision and
+Pattern Recognition [cs.CV]. Ecole Normale Supérieure de Paris - ENS Paris, 2013. <tel-01064540>
+HAL Id: tel-01064540
+https://tel.archives-ouvertes.fr/tel-01064540
+Submitted on 16 Sep 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+20e210bb6b1d3e637e2b2674aeead3fad8c2c70e,Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks via Attention Transfer,"Published as a conference paper at ICLR 2017
+PAYING MORE ATTENTION TO ATTENTION:
+IMPROVING THE PERFORMANCE OF CONVOLUTIONAL
+NEURAL NETWORKS VIA ATTENTION TRANSFER
+Sergey Zagoruyko, Nikos Komodakis
+Universit´e Paris-Est, ´Ecole des Ponts ParisTech
+Paris, France"
+20e903faf8e2e656a89d983541b15f2e0d614eeb,Image to Image Translation for Domain Adaptation,"Image to Image Translation for Domain Adaptation
+Zak Murez1,2
+Soheil Kolouri2 David Kriegman1 Ravi Ramamoorthi1 Kyungnam Kim2
+University of California, San Diego; 2 HRL Laboratories, LLC;"
+200f1a55c5974c4cac243bed3131ac5a9338840d,Human Computation for Object Detection,"May 09, 2013
+TR Number: UCSC-SOE-15-03
+Human Computation for Object Detection
+Rajan Vaish1, Sascha T. Ishikawa1, Sheng Lundquist2, Reid Porter2, James Davis1
+University of California at Santa Cruz1, Los Alamos National Laboratory2
+{rvaish, stishika, {slundquist,"
+204db062f4952ce446cbb28fbc40d4a7f4424b03,Systematic evaluation of super-resolution using classification,"SYSTEMATIC EVALUATION OF
+SUPER-RESOLUTION USING CLASSIFICATION
+Vinay P. Namboodiri1, Vincent De Smet1 and Luc Van Gool1,2
+ESAT-PSI/IBBT, K.U.Leuven, Belgium
+Computer Vision Laboratory, BIWI/ETH Z¨urich, Switzerland"
+203fcd66c043e44fefd783b8f54105f0a577fc25,Analyzing Content and Customer Engagement in Social Media with Deep Learning,"Analyzing Content and Customer Engagement in
+Social Media with Deep Learning
+(The bulk of this work was done by a student.)"
+20f272f4bdf562aa8b4dae84b67cfafa34a00738,Periocular biometrics: An emerging technology for unconstrained scenarios,"Periocular Biometrics:
+An Emerging Technology for Unconstrained
+Scenarios
+Gil Santos and Hugo Proenc¸a
+IT - Instituto de Telecomunicac¸ ˜oes
+Universidade da Beira Interior
+Covilh˜a, Portugal
+Email:"
+20100323ec5c32ae91add8e866d891a78f1a2bbe,Unsupervised Object Discovery and Tracking in Video Collections,"Unsupervised Object Discovery and Tracking in Video Collections
+Suha Kwak1,∗
+Minsu Cho1,∗
+Ivan Laptev1,∗
+Jean Ponce2,∗
+Cordelia Schmid1,†
+Inria
+´Ecole Normale Sup´erieure / PSL Research University"
+20717f1cb12ab208458c0f2505b237d8f061f97a,Learning Classifiers from Synthetic Data Using a Multichannel Autoencoder,"Learning Classifiers from Synthetic Data Using a
+Multichannel Autoencoder
+Xi Zhang, Yanwei Fu, Andi Zang, Leonid Sigal, Gady Agam"
+2067ab35379381f05acaa7406a30d0ee02c0b8cc,Directional Statistics-based Deep Metric Learning for Image Classification and Retrieval,"Directional Statistics-based Deep Metric Learning
+for Image Classification and Retrieval
+Xuefei Zhe, Shifeng Chen, and Hong Yan, Fellow, IEEE"
+2059d2fecfa61ddc648be61c0cbc9bc1ad8a9f5b,Co-Localization of Audio Sources in Images Using Binaural Features and Locally-Linear Regression,"TRANSACTIONS ON AUDIO, SPEECH, AND LANGUAGE PROCESSING, VOL. 23, NO. 4, APRIL 2015
+Co-Localization of Audio Sources in Images Using
+Binaural Features and Locally-Linear Regression
+Antoine Deleforge∗ Radu Horaud∗ Yoav Y. Schechner‡ Laurent Girin∗†
+INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+Univ. Grenoble Alpes, GIPSA-Lab, France
+Dept. Electrical Eng., Technion-Israel Inst. of Technology, Haifa, Israel"
+20b8a76e988e796f0f225876a69842f6839e4c98,Real-time Gender Recognition for Uncontrolled Environment of Real-life Images,"REAL-TIME GENDER RECOGNITION FOR UNCONTROLLED
+ENVIRONMENT OF REAL-LIFE IMAGES
+Duan-Yu Chen and Kuan-Yi Lin
+Department of Electrical Engineering, Yuan-Ze University, Taiwan
+Keywords:
+Gender recognition, Uncontrolled environment, Real-life images."
+202a923504ea81e94c06a81581539b893b461ee5,YELP: masking sound-based opportunistic attacks in zero-effort deauthentication,"YELP: Masking Sound-based Opportunistic A(cid:130)acks in
+Zero-E(cid:128)ort Deauthentication
+University of Alabama at Birmingham
+University of Alabama at Birmingham
+University of Alabama at Birmingham
+Prakash Shrestha
+S Abhishek Anand
+Nitesh Saxena"
+20111924fbf616a13d37823cd8712a9c6b458cd6,Linear Regression Line based Partial Face Recognition,"International Journal of Computer Applications (0975 – 8887)
+Volume 130 – No.11, November2015
+Linear Regression Line based Partial Face Recognition
+Naveena M.
+Department of Studies in
+Computer Science,
+Manasagagothri,
+Mysore.
+G. Hemantha Kumar
+Department of Studies in
+Computer Science,
+Manasagagothri,
+Mysore.
+P. Nagabhushan
+Department of Studies in
+Computer Science,
+Manasagagothri,
+Mysore.
+images. In"
+2056ba48e687d619c0ce69d0be323d48c5b90701,Similarity Mapping with Enhanced Siamese Network for Multi-Object Tracking,"Similarity Mapping with Enhanced Siamese Network
+for Multi-Object Tracking
+Minyoung Kim
+Cupertino, CA
+Stefano Alletto
+Modena, MO
+Panasonic Silicon Valley Laboratory
+University of Modena and Reggio Emilia
+Panasonic Silicon Valley Laboratory
+Luca Rigazio
+Cupertino, CA"
+20eaa3ebe2b6e1aff7c4585733c9fb0cfc941919,Image similarity using Deep CNN and Curriculum Learning,"Image similarity using Deep CNN and Curriculum Learning
+Srikar Appalaraju
+Vineet Chaoji
+Amazon Development Centre (India) Pvt. Ltd.
+Image similarity involves fetching similar looking images given a reference image. Our solution called SimNet, is a deep
+Siamese network which is trained on pairs of positive and negative images using a novel online pair mining strategy inspired
+y Curriculum learning. We also created a multi-scale CNN, where the final image embedding is a joint representation of
+top as well as lower layer embedding’s. We go on to show that this multi-scale Siamese network is better at capturing fine
+grained image similarities than traditional CNN’s.
+Keywords — Multi-scale CNN, Siamese network, Curriculum learning, Transfer learning.
+I. INTRODUCTION
+The ability to find a similar set of images for a given
+image has multiple uses-cases from visual search to
+duplicate product detection to domain specific image
+lustering. Our approach called SimNet, tries to identify
+similar images for a new image using multi-scale Siamese
+network. Fig. 1 shows examples of image samples from
+CIFAR10 [39] on which SimNet is trained on.
+Fig. 1 examples of CIFAR 10 images. Task is - given a new image
+ut belonging to one of the 10 categories, find similar set of images."
+20532b1f80b509f2332b6cfc0126c0f80f438f10,A Deep Matrix Factorization Method for Learning Attribute Representations,"A deep matrix factorization method for learning
+ttribute representations
+George Trigeorgis, Konstantinos Bousmalis, Student Member, IEEE, Stefanos Zafeiriou, Member, IEEE
+Bj¨orn W. Schuller, Senior member, IEEE"
+205af28b4fcd6b569d0241bb6b255edb325965a4,Facial expression recognition and tracking for intelligent human-robot interaction,"Intel Serv Robotics (2008) 1:143–157
+DOI 10.1007/s11370-007-0014-z
+SPECIAL ISSUE
+Facial expression recognition and tracking for intelligent human-robot
+interaction
+Y. Yang · S. S. Ge · T. H. Lee · C. Wang
+Received: 27 June 2007 / Accepted: 6 December 2007 / Published online: 23 January 2008
+© Springer-Verlag 2008"
+20928315086a49e0cdea0ec66f2e78e9c564f794,Person Detection for Indoor Videosurveillance Using Spatio-temporal Integral Features,"Person Detection for Indoor Videosurveillance
+using Spatio-Temporal Integral Features
+Adrien Descamps1, Cyril Carincotte2, and Bernard Gosselin1
+TCTS Lab, University of Mons, Mons, Belgium
+Multitel ASBL, 2 Rue Pierre et Marie Curie, Mons, Belgium"
+203abfcc3df8de6606cf34fa32cf225627f52d00,Learning Robot Vision for Assisted Living,"Robotic Vision:
+Technologies for Machine
+Learning and Vision Applications
+José García-Rodríguez
+University of Alicante, Spain
+Miguel Cazorla
+University of Alicante, Spain"
+20260d36506911e04ad1efed1e60b06bfc178d52,Deep 3D face identification,"Deep 3D Face Identification
+Donghyun Kim
+Matthias Hernandez
+Jongmoo Choi
+G´erard Medioni
+USC Institute for Robotics and Intelligent Systems (IRIS)
+Unversity of Southern California
+{kim207, mthernan, jongmooc,"
+20a0b23741824a17c577376fdd0cf40101af5880,Learning to Track for Spatio-Temporal Action Localization,"Learning to track for spatio-temporal action localization
+Philippe Weinzaepfela
+Zaid Harchaouia,b
+NYU
+Inria∗
+Cordelia Schmida"
+18bca470bf51f5cc42148cd7e34fa58280be8eb2,Face Expressional Recognition using Geometry and Behavioral Traits,"IJCSNS International Journal of Computer Science and Network Security, VOL.9 No.8, August 2009
+Face Expressional Recognition using Geometry and Behavioral
+Traits
+J. K. Kani Mozhi, Sr. Lect / Dept. of MCA, K. S. Rangasamy College of Technology, Tiruchengode. India.
+J. K. Kani Mozhi 1 and Dr. R. S. D. Wahida Banu 2
+Dr. R. S. D. Wahida Banu, Prof. & Head / Dept. of ECE, Govt. College of Engg., Salem, India.
+recognition"
+18c72175ddbb7d5956d180b65a96005c100f6014,From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 23, NO. 6,
+JUNE 2001
+From Few to Many: Illumination Cone
+Models for Face Recognition under
+Variable Lighting and Pose
+Athinodoros S. Georghiades, Student Member, IEEE, Peter N. Belhumeur, Member, IEEE, and
+David J. Kriegman, Senior Member, IEEE"
+18636347b8741d321980e8f91a44ee054b051574,Facial marks: Soft biometric for face recognition,"978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+ICIP 2009"
+18ab703c9959fbea7ad253a4062eb705b245552c,Efficient trajectory extraction and parameter learning for data-driven crowd simulation,"Efficient Trajectory Extraction and Parameter Learning for Data-Driven
+Crowd Simulation
+Aniket Bera∗
+Sujeong Kim†
+Dinesh Manocha‡
+The University of North Carolina at Chapel Hill"
+181045164df86c72923906aed93d7f2f987bce6c,Rheinisch-westfälische Technische Hochschule Aachen,"RHEINISCH-WESTFÄLISCHE TECHNISCHE
+HOCHSCHULE AACHEN
+KNOWLEDGE-BASED SYSTEMS GROUP
+PROF. GERHARD LAKEMEYER, PH. D.
+Detection and Recognition of Human
+Faces using Random Forests for a
+Mobile Robot
+MASTER OF SCIENCE THESIS
+VAISHAK BELLE
+MATRICULATION NUMBER: 26 86 51
+SUPERVISOR:
+SECOND SUPERVISOR:
+PROF. GERHARD LAKEMEYER, PH. D.
+PROF. ENRICO BLANZIERI, PH. D.
+ADVISERS:
+STEFAN SCHIFFER, THOMAS DESELAERS"
+18d5b0d421332c9321920b07e0e8ac4a240e5f1f,Collaborative Representation Classification Ensemble for Face Recognition,"Collaborative Representation Classification
+Ensemble for Face Recognition
+Xiao Chao Qu, Suah Kim, Run Cui and Hyoung Joong Kim"
+18269fcaba9feba85552b039a9052cd67e6d9c8b,Emotional facial sensing and multimodal fusion in a continuous 2D affective space,"J Ambient Intell Human Comput (2012) 3:31–46
+DOI 10.1007/s12652-011-0087-6
+O R I G I N A L R E S E A R C H
+Emotional facial sensing and multimodal fusion in a continuous
+D affective space
+Eva Cerezo • Isabelle Hupont • Sandra Baldassarri •
+Sergio Ballano
+Received: 3 February 2011 / Accepted: 24 September 2011 / Published online: 30 October 2011
+Ó Springer-Verlag 2011"
+18ccd8bd64b50c1b6a83a71792fd808da7076bc9,Object detection and segmentation from joint embedding of parts and pixels,"Object Detection and Segmentation
+from Joint Embedding of Parts and Pixels
+Michael Maire1, Stella X. Yu2, Pietro Perona1
+California Institute of Technology - Pasadena, CA 91125
+Boston College - Chestnut Hill, MA 02467"
+18d51a366ce2b2068e061721f43cb798177b4bb7,Looking into your eyes: observed pupil size influences approach-avoidance responses.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+Looking into your eyes: observed pupil size
+influences approach-avoidance responses
+Marco Brambilla, Marco Biella & Mariska E. Kret
+To cite this article: Marco Brambilla, Marco Biella & Mariska E. Kret (2018): Looking into your
+eyes: observed pupil size influences approach-avoidance responses, Cognition and Emotion, DOI:
+0.1080/02699931.2018.1472554
+To link to this article: https://doi.org/10.1080/02699931.2018.1472554
+View supplementary material
+Published online: 11 May 2018.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20"
+18c4a0e82fdddda2530b7281ad567abc0373a89f,Automatic Subspace Learning via Principal Coefficients Embedding,"Automatic Subspace Learning via Principal
+Coefficients Embedding
+Xi Peng, Jiwen Lu, Senior Member, IEEE, Zhang Yi, Fellow, IEEE and Rui Yan, Member, IEEE,"
+18cc17c06e34baaa3e196db07e20facdbb17026d,Describing Videos by Exploiting Temporal Structure,"Describing Videos by Exploiting Temporal Structure
+Li Yao
+Universit´e de Montr´eal
+Atousa Torabi
+Universit´e de Montr´eal
+Kyunghyun Cho
+Universit´e de Montr´eal
+Nicolas Ballas
+Universit´e de Montr´eal
+Christopher Pal
+´Ecole Polytechnique de Montr´eal
+Hugo Larochelle
+Universit´e de Sherbrooke
+Aaron Courville
+Universit´e de Montr´eal"
+1885acea0d24e7b953485f78ec57b2f04e946eaf,Combining Local and Global Features for 3D Face Tracking,"Combining Local and Global Features for 3D Face Tracking
+Pengfei Xiong, Guoqing Li, Yuhang Sun
+Megvii (face++) Research
+{xiongpengfei, liguoqing,"
+1868aeb7f13e64ebc78869b371ef321572d6167f,Weakly Supervised Automatic Annotation of Pedestrian Bounding Boxes,"Weakly Supervised Automatic Annotation of Pedestrian Bounding Boxes
+David V´azquez1, Jiaolong Xu1, Sebastian Ramos1, Antonio M. L´opez1,2 and Daniel Ponsa1,2
+Computer Vision Center
+Dept. of Computer Science
+Autonomous University of Barcelona
+08193 Bellaterra, Barcelona, Spain
+{dvazquez, jiaolong, sramosp, antonio,"
+18d4210a5bb56e92045ef0637208685abaaca6a5,GIANT: geo-informative attributes for location recognition and exploration,"GIANT: Geo-Informative Attributes for
+locatioN recogniTion and exploration
+National Lab of Pattern Recognition, Institute of Automation, CAS, Beijing 100190, China
+China-Singapore Institute of Digital Media, Singapore, 139951, Singapore
+Quan Fang1,2, Jitao Sang1,2, Changsheng Xu1,2
+{qfang, jtsang,"
+18de899c853120a1a2cd502ebc3e970b92e1882f,Age Regression from Soft Aligned Face Images Using Low Computational Resources,"Age regression from soft aligned face images
+using low computational resources
+Juan Bekios-Calfa1, Jos´e M. Buenaposada2, and Luis Baumela3
+Dept. de Ingenier´ıa de Sistemas y Computaci´on, Universidad Cat´olica del Norte
+Av. Angamos 0610, Antofagasta, Chile
+Dept. de Ciencias de la Computaci´on, Universidad Rey Juan Carlos
+Calle Tulip´an s/n, 28933, M´ostoles, Spain
+Dept. de Inteligencia Artificial, Universidad Polit´ecnica de Madrid
+Campus Montegancedo s/n, 28660 Boadilla del Monte, Spain"
+18a849b1f336e3c3b7c0ee311c9ccde582d7214f,"Efficiently Scaling up Crowdsourced Video Annotation A Set of Best Practices for High Quality, Economical Video Labeling","Int J Comput Vis
+DOI 10.1007/s11263-012-0564-1
+Efficiently Scaling up Crowdsourced Video Annotation
+A Set of Best Practices for High Quality, Economical Video Labeling
+Carl Vondrick · Donald Patterson · Deva Ramanan
+Received: 31 October 2011 / Accepted: 20 August 2012
+© Springer Science+Business Media, LLC 2012"
+18cd79f3c93b74d856bff6da92bfc87be1109f80,A N a Pplication to H Uman F Ace P Hoto - S Ketch S Ynthesis and R Ecognition,"International Journal of Advances in Engineering & Technology, May 2012.
+©IJAET ISSN: 2231-1963
+AN APPLICATION TO HUMAN FACE PHOTO-SKETCH
+SYNTHESIS AND RECOGNITION
+Amit R. Sharma and 2Prakash. R. Devale
+Student and 2Professor & Head,
+Department of Information Tech., Bharti Vidyapeeth Deemed University, Pune, India"
+189355bff03076cc5bddaa11239626051931144d,Learning Representations for Automatic Colorization,"Learning Representations for Automatic Colorization
+Gustav Larsson1, Michael Maire2, and Gregory Shakhnarovich2
+University of Chicago
+Toyota Technological Institute at Chicago"
+18fe745e0840b7b086fb7d14850a95ebbd5ae57b,Evaluation and Acceleration of High-Throughput Fixed-Point Object Detection on FPGAs,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+Evaluation and Acceleration of High-Throughput
+Fixed-Point Object Detection on FPGAs
+Xiaoyin Ma, Student Member, IEEE, Walid A. Najjar, Fellow, IEEE, Amit K. Roy-Chowdhury, Sr. Member, IEEE"
+1856e71437886af2366b620bcfe4caf891425f7b,Analyzing the Distribution of a Large-Scale Character Pattern Set Using Relative Neighborhood Graph,"Analyzing the Distribution of
+Large-scale Character Pattern Set
+Using Relative Neighborhood Graph
+Masanori Goto(cid:3), Ryosuke Ishiday, Yaokai Fengy and Seiichi Uchiday
+(cid:3)GLORY LTD., Hyogo, Japan
+Email:
+yKyushu University, Fukuoka, Japan
+Email:"
+1883387726897d94b663cc4de4df88e5c31df285,Measures of Effective Video Tracking,"Measures of effective video tracking
+Tahir Nawaz, Fabio Poiesi, Andrea Cavallaro"
+18a7edd0bfe5a3d6ceb4d2053081e479cfa1e920,Transductive Kernel Map Learning and its Application to Image Annotation,"TRANSDUCTIVE LEARNING, KERNEL MAP, IMAGE ANNOTATION: BMVC SUBMISSION 1
+Transductive Kernel Map Learning
+nd its Application to Image Annotation
+Dinh-Phong Vo
+Hichem Sahbi
+LTCI CNRS Telecom ParisTech
+6 rue Barrault, 75013, Paris, France"
+1886b6d9c303135c5fbdc33e5f401e7fc4da6da4,Knowledge Guided Disambiguation for Large-Scale Scene Classification With Multi-Resolution CNNs,"Knowledge Guided Disambiguation for Large-Scale
+Scene Classification with Multi-Resolution CNNs
+Limin Wang, Sheng Guo, Weilin Huang, Member, IEEE, Yuanjun Xiong, and Yu Qiao, Senior Member, IEEE"
+1888bf50fd140767352158c0ad5748b501563833,A Guided Tour of Face Processing,"PA R T 1
+THE BASICS"
+18babfe4c7230522527a068654eeea10b1a827fd,Discriminative Label Propagation for Multi-object Tracking with Sporadic Appearance Features,"Discriminative Label Propagation for Multi-Object Tracking with Sporadic
+Appearance Features
+Amit Kumar K.C. and Christophe De Vleeschouwer
+ISPGroup, ELEN Department, ICTEAM Institute
+Universit´e catholique de Louvain
+Louvain-la-Neuve, B-1348, Belgium
+{amit.kc,"
+1819d9a9099dafc987dd236c2174945e7922be13,Eigenfeature Regularization and Extraction in Face Recognition,"Eigenfeature Regularization and Extraction
+in Face Recognition
+Xudong Jiang, Senior Member, IEEE, Bappaditya Mandal, and Alex Kot, Fellow, IEEE"
+183ad3409a53914247affc599b33af38d94937be,A Latent-Variable Lattice Model,"An Inertial Latent-Variable Sequence Model
+Rajasekaran Masatran
+Indian Institute of Technology Madras, Chennai, TN, India
+MASATRAN AT FREESHELL.ORG"
+18f348d56a2ff1c0904685ce8b6818b84867b7a4,ML-o-scope: a diagnostic visualization system for deep machine learning pipelines,"ML-o-scope: a diagnostic visualization system for
+deep machine learning pipelines
+Daniel Bruckner
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2014-99
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-99.html
+May 16, 2014"
+18d7684c6b96caf51adb519738720eceb1b13050,Hidden Relationships: Bayesian Estimation With Partial Knowledge,"Hidden Relationships:
+Bayesian Estimation with Partial Knowledge
+Tomer Michaeli and Yonina C. Eldar, Senior Member, IEEE
+the joint probability function of"
+18a4399b8afb460cbd4de2225f39ed23a95336d6,HMS-Net: Hierarchical Multi-scale Sparsity-invariant Network for Sparse Depth Completion,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+HMS-Net: Hierarchical Multi-scale
+Sparsity-invariant Network for Sparse Depth
+Completion
+Zixuan Huang, Junming Fan, Shuai Yi, Xiaogang Wang, Senior Member, IEEE,
+Hongsheng Li, Member, IEEE"
+18727c3f4ada0cec9e5914340cc672d0554d7784,"3-D Face Detection, Landmark Localization, and Registration Using a Point Distribution Model","D face detection, landmark localization and
+registration using a Point Distribution Model
+Prathap Nair*, Student Member, IEEE, and Andrea Cavallaro, Member, IEEE"
+18001ed8ce46cf9df5574b1e360550ed9401cd76,Sentic blending: Scalable multimodal fusion for the continuous interpretation of semantics and sentics,"Sentic Blending:
+Scalable Multimodal Fusion for the Continuous
+Interpretation of Semantics and Sentics
+Erik Cambria, Member, IEEE, Newton Howard, Member, IEEE,
+Jane Hsu, Member, IEEE, and Amir Hussain, Senior Member, IEEE"
+18aae0f20fdc6aab093c72c81005247d2cbc8512,Bayesian CP Factorization of Incomplete Tensors with Automatic Rank Determination,"Bayesian CP Factorization of Incomplete
+Tensors with Automatic Rank Determination
+Qibin Zhao, Member, IEEE, Liqing Zhang, Member, IEEE, and Andrzej Cichocki Fellow, IEEE"
+18233c55982050292ba7f6a5462c0e7576c3398d,Face Recognition using Eye Distance and PCA Approaches,"Face Recognition using Eye Distance and PCA
+Approaches
+Ripal Patel , Nidhi Rathod , Ami Shah , Mayur Sevak
+Electronics & Telecommunication Department,
+BVM Engineering College.
+Vallabh Vidyanagar-388120, Gujarat, India"
+180cf5ab4e021e64b9bf08f2ffc4a4712acd9a30,Multi-view anchor graph hashing,"MULTI-VIEW ANCHOR GRAPH HASHING
+Saehoon Kim1 and Seungjin Choi1,2
+Department of Computer Science and Engineering, POSTECH, Korea
+Division of IT Convergence Engineering, POSTECH, Korea
+{kshkawa,"
+185360fe1d024a3313042805ee201a75eac50131,Person De-Identification in Videos,"Person De-Identification in Videos
+Prachi Agrawal and P. J. Narayanan"
+1824b1ccace464ba275ccc86619feaa89018c0ad,One millisecond face alignment with an ensemble of regression trees,"One Millisecond Face Alignment with an Ensemble of Regression Trees
+Vahid Kazemi and Josephine Sullivan
+KTH, Royal Institute of Technology
+Computer Vision and Active Perception Lab
+Teknikringen 14, Stockholm, Sweden"
+18858cc936947fc96b5c06bbe3c6c2faa5614540,Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification,"Proceedings of Machine Learning Research 81:1–15, 2018
+Conference on Fairness, Accountability, and Transparency
+Gender Shades: Intersectional Accuracy Disparities in
+Commercial Gender Classification∗
+Joy Buolamwini
+MIT Media Lab 75 Amherst St. Cambridge, MA 02139
+Timnit Gebru
+Microsoft Research 641 Avenue of the Americas, New York, NY 10011
+Editors: Sorelle A. Friedler and Christo Wilson"
+2783efc96a0d59473e4236ccf1db6ed7e958839e,An Overview of Multi-Task Learning in Deep Neural Networks,"An Overview of Multi-Task Learning
+in Deep Neural Networks∗
+Sebastian Ruder
+Insight Centre for Data Analytics, NUI Galway
+Aylien Ltd., Dublin"
+27e97b67a8401def58eb41b4b00d3dfb0e4ad1a8,Knowledge Based Face Detection Using Fusion Features,"International Journal of Computer Engineering and Applications, ICCSTAR-2016, Special Issue,
+May.16
+Knowledge Based Face Detection Using Fusion Features.
+Savitri Kulkarni
+Assistant Professor,Department of CSE
+City Engineering College,
+2Annapurna N S
+UG Student (B.E) Department of CSE
+City Engineering College,"
+2704959c75a2e6741867ae18f11fa822fa544c74,Hierarchical Convex NMF for Clustering Massive Data,"JMLR: Workshop and Conference Proceedings 13: 253-268
+nd Asian Conference on Machine Learning (ACML2010), Tokyo, Japan, Nov. 8–10, 2010.
+Hierarchical Convex NMF for Clustering Massive Data
+Kristian Kersting
+Mirwaes Wahabzada
+Knowledge Discovery Department
+Fraunhofer IAIS, Schloss Birlinghoven
+53754 Sankt Augustin, Germany
+Christian Thurau
+Christian Bauckhage
+Vision and Social Media Group
+Fraunhofer IAIS, Schloss Birlinghoven
+53754 Sankt Augustin, Germany
+Editor: Masashi Sugiyama and Qiang Yang"
+275ad26b7e4d7847f7ad4eedda65f327007a9452,Query-by-Example Image Retrieval using Visual Dependency Representations,"Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers,
+pages 109–120, Dublin, Ireland, August 23-29 2014."
+27fda2c61f3fe1f74e18bd11555df7751d178bca,Real-time 3D head pose and facial landmark estimation from depth images using triangular surface patch features,"Real-time 3D Head Pose and Facial Landmark Estimation from Depth Images
+Using Triangular Surface Patch Features
+Chavdar Papazov
+Tim K. Marks
+Michael Jones
+Mitsubishi Electric Research Laboratories (MERL)
+01 Broadway, Cambridge, MA 02139"
+27a0a7837f9114143717fc63294a6500565294c2,Face Recognition in Unconstrained Environments: A Comparative Study,"Face Recognition in Unconstrained Environments: A
+Comparative Study
+Rodrigo Verschae, Javier Ruiz-Del-Solar, Mauricio Correa
+To cite this version:
+Rodrigo Verschae, Javier Ruiz-Del-Solar, Mauricio Correa. Face Recognition in Unconstrained
+Environments: A Comparative Study: . Workshop on Faces in ’Real-Life’ Images: Detection,
+Alignment, and Recognition, Oct 2008, Marseille, France. 2008. <inria-00326730>
+HAL Id: inria-00326730
+https://hal.inria.fr/inria-00326730
+Submitted on 5 Oct 2008
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+27421586a04584d38dd961b37d0ca85408acfe59,Large brains in autism: the challenge of pervasive abnormality.,"Large Brains in Autism:
+The Challenge of Pervasive Abnormality
+MARTHA R. HERBERT
+Pediatric Neurology, Center for Morphometric Analysis
+Massachusetts General Hospital
+REVIEW I
+The most replicated finding in autism neuroanatomy—a tendency to unusually large brains—has seemed
+paradoxical in relation to the specificity of the abnormalities in three behavioral domains that define autism.
+We now know a range of things about this phenomenon, including that brains in autism have a growth spurt
+shortly after birth and then slow in growth a few short years afterward, that only younger but not older
+rains are larger in autism than in controls, that white matter contributes disproportionately to this volume
+increase and in a nonuniform pattern suggesting postnatal pathology, that functional connectivity among
+regions of autistic brains is diminished, and that neuroinflammation (including microgliosis and astrogliosis)
+ppears to be present in autistic brain tissue from childhood through adulthood. Alongside these pervasive
+rain tissue and functional abnormalities, there have arisen theories of pervasive or widespread neural
+information processing or signal coordination abnormalities (such as weak central coherence, impaired
+omplex processing, and underconnectivity), which are argued to underlie the specific observable behav-
+ioral features of autism. This convergence of findings and models suggests that a systems- and chronic
+disease–based reformulation of function and pathophysiology in autism needs to be considered, and
+it opens the possibility for new treatment targets. NEUROSCIENTIST 11(5):417–440; 2005. DOI:"
+2792e5d569b94406ca28f86c9999f569a3d60c6d,Illumination Multiplexing within Fundamental Limits,"Illumination Multiplexing within Fundamental Limits
+Netanel Ratner
+Yoav Y. Schechner
+Department of Electrical Engineering
+Technion - Israel Institute of Technology
+Haifa 32000, ISRAEL"
+276dbb667a66c23545534caa80be483222db7769,An Introduction to Image-based 3D Surface Reconstruction and a Survey of Photometric Stereo Methods,"D Res. 2, 03(2011)4
+0.1007/3DRes.03(2011)4
+DR REVIEW w
+An Introduction to Image-based 3D Surface Reconstruction and a
+Survey of Photometric Stereo Methods
+Steffen Herbort • Christian Wöhler
+introduction
+image-based 3D
+techniques. Then we describe
+Received: 21Feburary 2011 / Revised: 20 March 2011 / Accepted: 11 May 2011
+© 3D Research Center, Kwangwoon University and Springer 2011"
+270733d986a1eb72efda847b4b55bc6ba9686df4,Recognizing Facial Expressions Using Model-Based Image Interpretation,"We are IntechOpen,
+the first native scientific
+publisher of Open Access books
+,350
+08,000
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+27a4bbd7bc90ad118f15c61bb30079d6e6bff78e,3D Deformable Super-Resolution for Multi-Camera 3D Face Scanning,"J Math Imaging Vis
+DOI 10.1007/s10851-012-0399-y
+D Deformable Super-Resolution for Multi-Camera 3D Face
+Scanning
+Karima Ouji · Mohsen Ardabilian · Liming Chen ·
+Faouzi Ghorbel
+© Springer Science+Business Media New York 2012"
+277096c5e536784da9856ac083a972715ce9f9c3,Gender Recognition from Human-Body Images Using Visible-Light and Thermal Camera Videos Based on a Convolutional Neural Network for Image Feature Extraction,"Article
+Gender Recognition from Human-Body Images
+Using Visible-Light and Thermal Camera Videos
+Based on a Convolutional Neural Network for
+Image Feature Extraction
+Dat Tien Nguyen, Ki Wan Kim, Hyung Gil Hong, Ja Hyung Koo, Min Cheol Kim and
+Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (D.T.N.); (K.W.K.);
+(H.G.H.); (J.H.K.); (M.C.K.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Academic Editor: Joonki Paik
+Received: 31 January 2017; Accepted: 18 March 2017; Published: 20 March 2017"
+27169761aeab311a428a9dd964c7e34950a62a6b,Face Recognition Using 3D Head Scan Data Based on Procrustes Distance,"International Journal of the Physical Sciences Vol. 5(13), pp. 2020 -2029, 18 October, 2010
+Available online at http://www.academicjournals.org/IJPS
+ISSN 1992 - 1950 ©2010 Academic Journals
+Full Length Research Paper
+Face recognition using 3D head scan data based on
+Ahmed Mostayed1, Sikyung Kim1, Mohammad Mynuddin Gani Mazumder1* and Se Jin Park2
+Procrustes distance
+Department of Electrical Engineering, Kongju National University, South Korea.
+Korean Research Institute of Standards and Science (KRISS), Korea.
+Accepted 6 July, 2010
+Recently, face recognition has attracted significant attention from the researchers and scientists in
+various fields of research, such as biomedical informatics, pattern recognition, vision, etc due its
+pplications in commercially available systems, defense and security purpose. In this paper a practical
+method for face reorganization utilizing head cross section data based on Procrustes analysis is
+proposed. This proposed method relies on shape signatures of the contours extracted from face data.
+The shape signatures are created by calculating the centroid distance of the boundary points, which is
+translation and rotation invariant signature. The shape signatures for a selected region of interest
+(ROI) are used as feature vectors and authentication is done using them. After extracting feature
+vectors a comparison analysis is performed utilizing Procrustes distance to differentiate their face
+pattern from each other. The proposed scheme attains an equal error rate (EER) of 4.563% for the 400"
+272ac22c670fd0c7c3f1b4ca02e925ff22dd4b27,Articulated part-based model for joint object detection and pose estimation,"Articulated Part-based Model for Joint Object Detection and Pose Estimation
+Dept. of Electrical and Computer Engineering, University of Michigan at Ann Arbor, USA
+Min Sun
+Silvio Savarese
+COARSE
+LEVEL"
+27ae7c8c650ffef74c465640f423d9008014e1ca,Dimensionality Reduction with Adaptive Approximation,"TobepublishedintheProceedingsofIEEEICME2007,Beijing,China
+DIMENSIONALITY REDUCTION WITH ADAPTIVE APPROXIMATION
+Effrosyni Kokiopoulou and Pascal Frossard
+Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+Signal Processing Institute - ITS
+CH- 1015 Lausanne, Switzerland"
+27b87bdee46964757b83b5afb4184e438cad6b1b,Sequence searching with deep-learnt depth for condition- and viewpoint-invariant route-based place recognition,"Sequence Searching with Deep-learnt Depth for Condition- and Viewpoint-
+invariant Route-based Place Recognition
+Michael Milford, Stephanie Lowry, Niko
+Sunderhauf, Sareh Shirazi, Edward Pepperell,
+Ben Upcroft
+Queensland University of Technology Australia
+Australian Centre for Robotic Vision"
+27173d0b9bb5ce3a75d05e4dbd8f063375f24bb5,Effect of Different Occlusion on Facial Expressions Recognition,"Ankita Vyas Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 4, Issue 10( Part - 3), October 2014, pp.40-44
+RESEARCH ARTICLE
+OPEN ACCESS
+Effect of Different Occlusion on Facial Expressions Recognition
+Ankita Vyas*, Ramchand Hablani**
+*(Department of Computer Science, RGPV University, Indore)
+** (Department of Computer Science, RGPV University, Indore)"
+27f1fd71538ba420c63aa4c74704718a0633b22a,Multimodal News Article Analysis,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+2785c5769489825671a6138fdf0537fcd444038a,A Deep Cascade Network for Unaligned Face Attribute Classification,"A Deep Cascade Network for Unaligned Face Attribute Classification
+Hui Ding,1 Hao Zhou,2 Shaohua Kevin Zhou,3 Rama Chellappa4
+,2,4University of Maryland, College Park
+Siemens Healthineers, New Jersey"
+27187d4c36f71d08898a53dfda0e81df11b25f21,Worst Case Linear Discriminant Analysis as Scalable Semidefinite Feasibility Problems,"MANUSCRIPT
+Worst-Case Linear Discriminant Analysis as
+Scalable Semidefinite Feasibility Problems
+Hui Li, Chunhua Shen, Anton van den Hengel, Qinfeng Shi"
+2725a68be6bc677bd435c19664569ecd45c52d7a,DeepProposal: Hunting Objects by Cascading Deep Convolutional Layers,"DeepProposal: Hunting Objects by Cascading Deep Convolutional Layers
+Amir Ghodrati1∗, Ali Diba1∗, Marco Pedersoli2†‡, Tinne Tuytelaars1, Luc Van Gool1,3
+KU Leuven, ESAT-PSI, iMinds
+Inria
+CVL, ETH Zurich"
+273b973092a4491974d173cc5258c74aede692cc,Monocular Long-Term Target Following on UAVs,"Monocular Long-term Target Following on UAVs
+Rui Li ∗
+Minjian Pang†
+Cong Zhao ‡
+Guyue Zhou ‡
+Lu Fang †§"
+2770b095613d4395045942dc60e6c560e882f887,GridFace: Face Rectification via Learning Local Homography Transformations,"GridFace: Face Rectification via Learning Local
+Homography Transformations
+Erjin Zhou, Zhimin Cao, and Jian Sun
+Face++, Megvii Inc."
+27cccf992f54966feb2ab4831fab628334c742d8,"Facial Expression Recognition by Statistical, Spatial Features and using Decision Tree","International Journal of Computer Applications (0975 – 8887)
+Volume 64– No.18, February 2013
+Facial Expression Recognition by Statistical, Spatial
+Features and using Decision Tree
+Nazil Perveen
+Assistant Professor
+CSIT Department
+GGV BIlaspur, Chhattisgarh
+India
+Darshan Kumar
+Assistant Professor
+Electronics (ECE) Department
+JECRC Jaipur, Rajasthan India
+IshanBhardwaj
+Student of Ph.D.
+Electrical Department
+NIT Raipur, Chhattisgarh India"
+27f9b43737e234cefb3c5cd72324a36cbe61ee3c,Sparse Manifold Clustering and Embedding,"Sparse Manifold Clustering and Embedding
+Ehsan Elhamifar
+Center for Imaging Science
+Johns Hopkins University
+Ren´e Vidal
+Center for Imaging Science
+Johns Hopkins University"
+27f8b01e628f20ebfcb58d14ea40573d351bbaad,Events based Multimedia Indexing and Retrieval,"DEPARTMENT OF INFORMATION ENGINEERING AND COMPUTER SCIENCE
+ICT International Doctoral School
+Events based Multimedia Indexing
+nd Retrieval
+Kashif Ahmad
+SUBMITTED TO THE DEPARTMENT OF
+INFORMATION ENGINEERING AND COMPUTER SCIENCE (DISI)
+IN THE PARTIAL FULFILMENT OF THE REQUIREMENTS FOR THE DEGREE
+DOCTOR OF PHILOSOPHY
+Advisor:
+Examiners: Prof. Marco Carli, Universit`a degli Studi di Roma Tre, Italy
+Prof. Nicola Conci, Universit`a degli Studi di Trento, Italy
+Prof. Pietro Zanuttigh, Universit`a degli Studi di Padova, Italy
+Prof. Giulia Boato, Universit`a degli Studi di Trento, Italy
+December 2017"
+27c978bdb9de3a5135349976fdbc514ff547dcab,Multi-Objective Stochastic Optimization by Co-Direct Sequential Simulation for History Matching of Oil Reservoirs,"Multi-Objective Stochastic Optimization by Co-Direct Sequential
+Simulation for History Matching of Oil Reservoirs
+Jo˜ao Daniel Trigo Pereira Carneiro∗
+under the supervision of Am´ılcar de Oliveira Soares†
+Dep. Mines, IST, Lisbon, Portugal
+December 2010"
+2799d53ca80d67f104bef207a667fa12b4c59d62,Multiple-Person Tracking for a Mobile Robot Using Stereo,"MVA2009 IAPR Conference on Machine Vision Applications, May 20-22, 2009, Yokohama, JAPAN
+Multiple-Person Tracking for a Mobile Robot using Stereo
+Junji Satake
+Jun Miura
+Toyohashi University of Technology
+-1 Hibarigaoka, Tempaku-cho, Toyohashi, Aichi 441-8580, Japan
+{satake,"
+27ae95d9ad6492511296360ba0618f5d0565cf9e,Person re-Identification over distributed spaces and time,"Person re-Identification over distributed spaces and time
+Prosser, Bryan James
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/jspui/handle/123456789/2513
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+276d35fef150f61adf53270eb6e50625022d4e7f,The ACRV picking benchmark: A robotic shelf picking benchmark to foster reproducible research,"A Robotic Shelf Picking Benchmark to Foster Reproducible Research
+The ACRV Picking Benchmark:
+J¨urgen Leitner1,2, Adam W. Tow1,2, Niko S¨underhauf1,2, Jake E. Dean2, Joseph W. Durham3, Matthew
+Cooper2, Markus Eich1,2, Christopher Lehnert2, Ruben Mangels2, Christopher McCool2, Peter T. Kujala1,2,
+Lachlan Nicholson2, Trung Pham1,4, James Sergeant1,2, Fangyi Zhang1,2, Ben Upcroft1,2, and Peter Corke1,2."
+27183d23f50884a0e06b978acf9ad77dbcbfb112,Autonomous indoor helicopter flight using a single onboard camera,"The 2009 IEEE/RSJ International Conference on
+Intelligent Robots and Systems
+October 11-15, 2009 St. Louis, USA
+978-1-4244-3804-4/09/$25.00 ©2009 IEEE"
+2757ff9bba677e7bceaa4802d85cc6f872618583,From basis components to complex structural patterns,"FROM BASIS COMPONENTS TO COMPLEX STRUCTURAL PATTERNS
+Anh Huy Phan‡, Andrzej Cichocki‡∗, Petr Tichavsk´y•†, Rafal Zdunek§ and Sidney Lehky‡⋆
+Brain Science Institute, RIKEN, Wakoshi, Japan
+•Institute of Information Theory and Automation, Prague, Czech Republic
+§Wroclaw University of Technology, Poland
+⋆Computational Neurobiology Lab, The Salk Institute, USA"
+27448716366bed56515c1b32579daf224165861e,Deep Multi-camera People Detection,"Deep Multi-Camera People Detection
+Tatjana Chavdarova and Franc¸ois Fleuret
+Idiap Research Institute and
+´Ecole Polytechnique F´ed´erale de Lausanne
+Email:"
+277cadfadc4550fc781be7df8cb4ec89e54b793e,Autonomous Real-time Vehicle Detection from a Medium-Level UAV,"Autonomous Real-time Vehicle Detection from a
+Medium-Level UAV
+Toby P. Breckon, Stuart E. Barnes, Marcin L. Eichner and Ken Wahren"
+27b1670e1b91ab983b7b1ecfe9eb5e6ba951e0ba,Comparison between k-nn and svm method for speech emotion recognition,"Comparison between k-nn and svm method
+for speech emotion recognition
+Muzaffar Khan, Tirupati Goskula, Mohmmed Nasiruddin ,Ruhina Quazi
+Anjuman College of Engineering & Technology ,Sadar, Nagpur, India"
+27ee8482c376ef282d5eb2e673ab042f5ded99d7,Scale Normalization for the Distance Maps AAM,"Scale Normalization for the Distance Maps AAM.
+Denis GIRI, Maxime ROSENWALD, Benjamin VILLENEUVE, Sylvain LE GALLOU and Renaud S ´EGUIER
+Email: {denis.giri, maxime.rosenwald, benjamin.villeneuve, sylvain.legallou,
+Avenue de la boulaie, BP 81127,
+5 511 Cesson-S´evign´e, France
+Sup´elec, IETR-SCEE Team"
+2734b3a6345396499b2b7c6cc1b43fc7e9b375ee,Full-System Simulation of big.LITTLE Multicore Architecture for Performance and Energy Exploration,"Full-System Simulation of big.LITTLE Multicore
+Architecture for Performance and Energy
+Exploration
+Anastasiia Butko, Florent Bruguier, Abdoulaye Gamati´e,
+Gilles Sassatelli, David Novo, Lionel Torres and Michel Robert
+LIRMM (CNRS and University of Montpellier)
+Montpellier, France
+Email:"
+4b4106614c1d553365bad75d7866bff0de6056ed,Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions,"Unconstrained Facial Images: Database for Face
+Recognition under Real-world Conditions⋆
+Ladislav Lenc1,2 and Pavel Kr´al1,2
+Dept. of Computer Science & Engineering
+University of West Bohemia
+Plzeˇn, Czech Republic
+NTIS - New Technologies for the Information Society
+University of West Bohemia
+Plzeˇn, Czech Republic"
+4b90f2e4f421dd9198d4c52cd3371643acddf1f9,Detecting planar surface using a light-field camera with application to distinguishing real scenes from printed photos,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+´Ecole Polytechnique F´ed´erale de Lausanne
+School of Computer and Communication Sciences
+AudioVisual Communications Laboratory
+. INTRODUCTION
+Alireza Ghasemi
+Martin Vetterli"
+4b57456642e1d21f2bda05aea586b7f419d309ce,Disposable Ties and the Urban Poor,"Disposable Ties and the Urban Poor
+Author(s): Matthew Desmond
+Reviewed work(s):
+Source: American Journal of Sociology, Vol. 117, No. 5 (March 2012), pp. 1295-1335
+Published by: The University of Chicago Press
+Stable URL: http://www.jstor.org/stable/10.1086/663574 .
+Accessed: 17/08/2012 17:34
+Your use of the JSTOR archive indicates your acceptance of the Terms & Conditions of Use, available at .
+http://www.jstor.org/page/info/about/policies/terms.jsp
+JSTOR is a not-for-profit service that helps scholars, researchers, and students discover, use, and build upon a wide range of
+ontent in a trusted digital archive. We use information technology and tools to increase productivity and facilitate new forms
+of scholarship. For more information about JSTOR, please contact
+The University of Chicago Press is collaborating with JSTOR to digitize, preserve and extend access to
+American Journal of Sociology.
+http://www.jstor.org"
+4bb83b00e7b8eb27ad04d4bb80499e91fc471a07,Emotion related structures in large image databases,"Emotion Related Structures in Large Image Databases
+Martin Solli
+ITN, Linköping University
+SE-60174 Norrköping, Sweden
+Reiner Lenz
+ITN, Linköping University
+SE-60174 Norrköping, Sweden"
+4b37efd3987c1e625b063a6998bd6b282c844915,End-to-end Convolutional Network for Saliency Prediction,"End-to-end Convolutional Network for Saliency Prediction
+Junting Pan and Xavier Gir´o-i-Nieto
+Universitat Politecnica de Catalunya (UPC)
+Barcelona, Catalonia/Spain"
+4b89cf7197922ee9418ae93896586c990e0d2867,Unsupervised Discovery of Action Classes,"LATEX Author Guidelines for CVPR Proceedings
+First Author
+Institution1
+Institution1 address"
+4b69bbb6dc2959ea3d2e911ed45c6298dc531490,Deep Mixture of Experts via Shallow Embedding,"TAFE-Net: Task-Aware Feature Embeddings for
+Efficient Learning and Inference
+Xin Wang Fisher Yu Ruth Wang Trevor Darrell
+EECS Department, UC Berkeley
+Joseph E. Gonzalez"
+4b042eb64ddb8991c0e63fff02b1c51c378a8f58,Leveraging Massive User Contributions for Knowledge Extraction,"Chapter 16
+Leveraging Massive User Contributions for
+Knowledge Extraction
+Spiros Nikolopoulos, Elisavet Chatzilari, Eirini Giannakidou,
+Symeon Papadopoulos, Ioannis Kompatsiaris, and Athena Vakali"
+4b5dd0a1b866f928734bc36afd597adca20a7ec1,Detector ensembles for face recognition in video surveillance,"Detector Ensembles for Face Recognition in Video Surveillance
+Christophe Pagano, Eric Granger, Robert Sabourin and Dmitry O. Gorodnichy"
+4b6eb9117c1b7833c8c6b95ecad427f8f994f023,Robust Depth-Based Person Re-Identification,"Robust Depth-based Person Re-identification
+Ancong Wu, Wei-Shi Zheng, Jian-Huang Lai
+Code is available at the project page:
+http://isee.sysu.edu.cn/∼wuancong/ProjectDepthReID.htm
+For reference of this work, please cite:
+Ancong Wu, Wei-Shi Zheng,
+Person Re-identification.
+(DOI:10.1109/TIP.2017.2675201)
+Jian-Huang Lai. Robust Depth-based
+title={Robust Depth-based Person Re-identification},
+uthor={Wu, Ancong and Zheng, Wei-Shi and Lai, Jianhuang},
+(DOI:10.1109/TIP.2017.2675201)},
+year={2017}"
+4b8762d7637868b6ba0c97c95b2d4949d103ecdc,The OU-ISIR Gait Database Comprising the Large Population Dataset and Performance Evaluation of Gait Recognition,"The OU-ISIR Gait Database Comprising the Large
+Population Dataset and Performance Evaluation of
+Gait Recognition
+Haruyuki Iwama, Mayu Okumura, Yasushi Makihara, and Yasushi Yagi, Member, IEEE
+the world’s"
+4ba1cf65eb86aba729192d2f0fe2cd064ac346cf,One-Shot Person Re-identification with a Consumer Depth Camera,"One-Shot Person Re-Identification with a
+Consumer Depth Camera
+Matteo Munaro, Andrea Fossati, Alberto Basso, Emanuele Menegatti and Luc Van"
+4b1fc77a54e9daece9f11ec881a2ec40919337b7,Fusion of LBP and HOG using multiple kernel learning for infrared face recognition,"Fusion of LBP and HOG Using Multiple Kernel
+Learning for Infrared Face Recognition
+Zhihua Xie, Peng Jiang, Shuai Zhang
+Key Lab of Optic-Electronic and Communication
+Jiangxi Sciences and Technology Normal University
+Nanchang, Jiangxi Province, China
+limitation
+(LBP) has"
+4b6ea82fa73d2137c884ad43f7865d88b24ff01d,How deep should be the depth of convolutional neural networks: a backyard dog case study,"How deep should be the depth of convolutional neural
+networks: a backyard dog case study
+Alexander N. Gorban, Evgeny M. Mirkes, Ivan Y. Tukin
+University of Leicester, Leicester LE1 7RH, UK"
+4b7dc1e99b0b34022aec2bde1a13481f28f62030,Person Re-Identification Based on Weighted Indexing Structures,"Person Re-Identification based on Weighted
+Indexing Structures
+Cristianne R. S. Dutra, Matheus Castro Rocha, and William Robson Schwartz
+Department of Computer Science, Universidade Federal de Minas Gerais
+Belo Horizonte, Minas Gerais, Brazil, 31270-901
+rocha"
+4b9b39bbdac95e24773789f1bb543149116cdc37,Region-Of-Interest Retrieval in Brain MR Images,"Technical Note PR-TN 2008/00905
+Issued: 12/2008
+Region-Of-Interest Retrieval in Brain MR
+Images
+D. Unay; A. Ekin
+Philips Research Europe
+Unclassified
+ Koninklijke Philips Electronics N.V. 2008"
+4b04247c7f22410681b6aab053d9655cf7f3f888,Robust Face Recognition by Constrained Part-based Alignment,"Robust Face Recognition by Constrained Part-based
+Alignment
+Yuting Zhang, Kui Jia, Yueming Wang, Gang Pan, Tsung-Han Chan, Yi Ma"
+4b60e45b6803e2e155f25a2270a28be9f8bec130,Attribute based object identification,"Attribute Based Object Identification
+Yuyin Sun, Liefeng Bo and Dieter Fox"
+4b48e912a17c79ac95d6a60afed8238c9ab9e553,Minimum Margin Loss for Deep Face Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Minimum Margin Loss for Deep Face Recognition
+Xin Wei, Student Member, IEEE, Hui Wang, Member, IEEE, Bryan Scotney, and Huan Wan"
+4b0893bf71e4e13529cefb286c78b166a9491552,Estimating orientation in tracking individuals of flying swarms,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+4b5eeea5dd8bd69331bd4bd4c66098b125888dea,Human Activity Recognition Using Conditional Random Fields and Privileged Information,"Human Activity Recognition Using Conditional
+Random Fields and Privileged Information
+DOCTORAL THESIS
+submitted to
+the designated by the General Assembly Composition of the
+Department of Computer Science & Engineering Inquiry
+Committee
+Michalis Vrigkas
+in partial fulfillment of the Requirements for the Degree of
+DOCTOR OF PHILOSOPHY
+February 2016"
+4bde15a51413fafa04193e72c15e132e7716d8a6,Performance Study of Fusion in Multimodal Biometric Verification using Ear and Iris Features,"International Conference on Research Trends in Computer Technologies (ICRTCT - 2013)
+Proceedings published in International Journal of Computer Applications® (IJCA) (0975 – 8887)
+Performance Study of Fusion in Multimodal Biometric
+Verification using Ear and Iris Features
+Poornima.S
+Department of IT, SSN College of Engineering
+Chennai, India."
+4b4763303a15a4c6313bfb386756437f394a0129,Explicit Inductive Bias for Transfer Learning with Convolutional Networks,"Explicit Inductive Bias for Transfer Learning with Convolutional Networks
+Xuhong LI 1 Yves GRANDVALET 1 Franck DAVOINE 1"
+4b8ce1bfedb285d8d609d1059dd0183420d63671,Transductive Multi-View Zero-Shot Learning,"Transductive Multi-view Zero-Shot Learning
+Yanwei Fu, Timothy M. Hospedales, Tao Xiang and Shaogang Gong"
+4be03fd3a76b07125cd39777a6875ee59d9889bd,Content-based analysis for accessing audiovisual archives: Alternatives for concept-based indexing and search,"CONTENT-BASED ANALYSIS FOR ACCESSING AUDIOVISUAL ARCHIVES:
+ALTERNATIVES FOR CONCEPT-BASED INDEXING AND SEARCH
+Tinne Tuytelaars
+ESAT/PSI - IBBT
+KU Leuven, Belgium"
+4baf3b165489122a1f8b574240c2a7fa9b6a7a14,Composite Statistical Inference for Semantic Segmentation,"Composite Statistical Inference for Semantic Segmentation
+Fuxin Li(1), Joao Carreira(2), Guy Lebanon(1), Cristian Sminchisescu(3)
+(1) Georgia Institute of Technology. (2) ISR - University of Coimbra. (3) Lund University"
+4bc67489bbe634271f8fde73a851d7a59946ed36,Wide area motion capture using an array of consumer grade structured light depthsensors,"Mälardalen University
+School of Innovation, Design and Engineering
+Bachelor thesis in Computer science
+Wide area motion capture using an array of
+onsumer grade structured light depth
+sensors
+Author:
+Karl Arvidsson
+Supervisor:
+Afshin Ameri
+Examiner:
+Baran Çürüklü
+October 20, 2015"
+4be63e7891180e28085d03bb992abbc5104ac446,Adapting a Pedestrian Detector by Boosting LDA Exemplar Classifiers,"Adapting a Pedestrian Detector by Boosting LDA Exemplar Classifiers
+Jiaolong Xu1, David V´azquez1, Sebastian Ramos1, Antonio M. L´opez1,2 and Daniel Ponsa1,2
+Computer Vision Center
+Dept. of Computer Science
+Autonomous University of Barcelona
+08193 Bellaterra, Barcelona, Spain
+{jiaolong, dvazquez, sramosp, antonio,"
+4b7d5b17c0daa35f682417c32e80022c6645dc7f,Fine-Grained Object Recognition and Zero-Shot Learning in Remote Sensing Imagery,"Fine-Grained Object Recognition and Zero-Shot
+Learning in Remote Sensing Imagery
+Gencer Sumbul, Ramazan Gokberk Cinbis, and Selim Aksoy, Senior Member, IEEE
+learning (ZSL)"
+4bfdbe2ffc6311c8a297355422d914cb666b358a,"On Boosting, Tug of War, and Lexicographic Programming","On Boosting, Tug of War, and Lexicographic
+Programming
+Shounak Datta, Sayak Nag, and Swagatam Das, Senior Member, IEEE"
+4bfe7037b2d92215aeb5e116988ade7e6733a6b9,Frontal contributions to face processing differences in autism: evidence from fMRI of inverted face processing.,"Journal of the International Neuropsychological Society (2008), 14, 922–932.
+Copyright © 2008 INS. Published by Cambridge University Press. Printed in the USA.
+doi:10.10170S135561770808140X
+SYMPOSIUM
+Frontal contributions to face processing differences
+in autism: Evidence from fMRI of inverted
+face processing
+SUSAN Y. BOOKHEIMER,1,2 A. TING WANG,3 ASHLEY SCOTT,1 MARIAN SIGMAN,1,2
+nd MIRELLA DAPRETTO 1
+Department of Psychiatry and Biobehavioral Sciences, David Geffen School of Medicine, University of California Los Angeles,
+Los Angeles, California
+Department of Psychology, University of California Los Angeles, Los Angeles, California
+Department of Psychiatry, Mount Sinai School of Medicine, New York, New York
+(Received January 8, 2008; Final Revision August 9, 2008; Accepted August 11, 2008)"
+4b0111182ace7443f060a64754ca23b2fc7e1d77,Face Recognition by Super-Resolved 3D Models From Consumer Depth Cameras,"Face Recognition by Super-Resolved 3D Models
+From Consumer Depth Cameras
+Stefano Berretti, Pietro Pala, Senior Member, IEEE, and Alberto del Bimbo, Member, IEEE
+the impact of"
+11943efec248fcac57ff6913424e230d0a02e977,Auxiliary Tasks in Multi-task Learning,"Auxiliary Tasks in Multi-task Learning
+Lukas Liebel
+Marco Körner
+Computer Vision Research Group, Chair of Remote Sensing Technology
+Technical University of Munich, Germany
+{lukas.liebel,
+Multi-task convolutional neural networks (CNNs) have shown impressive results for certain combinations of tasks, such
+s single-image depth estimation (SIDE) and semantic segmentation. This is achieved by pushing the network towards
+learning a robust representation that generalizes well to different atomic tasks. We extend this concept by adding
+uxiliary tasks, which are of minor relevance for the application, to the set of learned tasks. As a kind of additional
+regularization, they are expected to boost the performance of the ultimately desired main tasks. To study the proposed
+pproach, we picked vision-based road scene understanding (RSU) as an exemplary application. Since multi-task
+learning requires specialized datasets, particularly when using extensive sets of tasks, we provide a multi-modal dataset
+for multi-task RSU, called synMT. More than 2.5 · 105 synthetic images, annotated with 21 different labels, were
+cquired from the video game Grand Theft Auto V (GTA V). Our proposed deep multi-task CNN architecture was
+trained on various combination of tasks using synMT. The experiments confirmed that auxiliary tasks can indeed boost
+network performance, both in terms of final results and training time.
+Introduction
+Various applications require solving several atomic tasks from
+the computer vision domain using a single image as input. Such"
+1178beb48d666d7fc41b2d476f6a92450c0726c0,Challenges in Multi-modal Gesture Recognition,"Journal of Machine Learning Research 17 (2016) 1-54
+Submitted 11/14; Revised 1/16; Published 4/16
+Challenges in multimodal gesture recognition
+Sergio Escalera
+Computer Vision Center UAB and University of Barcelona
+Vassilis Athitsos
+University of Texas
+Isabelle Guyon
+ChaLearn, Berkeley, California
+Editors: Zhuowen Tu"
+1152b88194214d4ea0f85b727f4b120915ad8056,Exploiting feature dynamics for active object recognition,"Exploiting Feature Dynamics for Active
+Object Recognition
+Philipp Robbel and Deb Roy
+MIT Media Laboratory
+Cambridge, MA 02139, USA"
+11f7f939b6fcce51bdd8f3e5ecbcf5b59a0108f5,Rolling Riemannian Manifolds to Solve the Multi-class Classification Problem,"Rolling Riemannian Manifolds to Solve the Multi-class Classification Problem
+Rui Caseiro1, Pedro Martins1, João F. Henriques1, Fátima Silva Leite1,2, and Jorge Batista1
+Institute of Systems and Robotics - University of Coimbra, Portugal
+Department of Mathematics - University of Coimbra, Portugal ,
+{ruicaseiro, pedromartins, henriques,"
+111ff5420111751454a2f4f55b7bb75d837ed5f4,Automatic Annotation of Structured Facts in Images,"Proceedings of the 5th Workshop on Vision and Language, pages 1–9,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+11b00a4be68e9622d7b4698aca84da85aca3e288,Modeling Social Interactions in Real Work Environments,"Modeling Social Interactions in Real Work Environments
+Salvatore Vanini
+SUPSI-DTI
+via Cantonale
+6928 Manno, Switzerland
+Silvia Giordano
+SUPSI-DTI
+via Cantonale
+6928 Manno, Switzerland
+Dario Gallucci
+SUPSI-DTI
+via Cantonale
+6928 Manno, Switzerland
+Kamini Garg
+SUPSI-DTI
+via Cantonale
+6928 Manno, Switzerland
+Victoria Mirata
+FFHS-IFeL
+Überlandstrasse 12"
+115724ce1ce9422dad095b301c7d096498ad50d3,The E2E Dataset: New Challenges For End-to-End Generation,"Saarbr¨ucken, Germany, 15-17 August 2017. c(cid:13)2017 Association for Computational Linguistics
+Proceedings of the SIGDIAL 2017 Conference, pages 201–206,"
+11f73583ba373487967225ae4797d723ff367c1c,"End-to-end, sequence-to-sequence probabilistic visual odometry through deep neural networks","Article
+End-to-end, sequence-to-sequence
+probabilistic visual odometry through
+deep neural networks
+The International Journal of
+Robotics Research
+© The Author(s) 2017
+Reprints and permissions:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/0278364917734298
+journals.sagepub.com/home/ijr
+Sen Wang1,2, Ronald Clark3, Hongkai Wen4 and Niki Trigoni2"
+11691f1e7c9dbcbd6dfd256ba7ac710581552baa,SoccerNet: A Scalable Dataset for Action Spotting in Soccer Videos,"SoccerNet: A Scalable Dataset for Action Spotting in Soccer Videos
+Silvio Giancola, Mohieddine Amine, Tarek Dghaily, Bernard Ghanem
+King Abdullah University of Science and Technology (KAUST), Saudi Arabia"
+11bfc54a64ca69786323551bbf88b85b216ae486,Exploring the Facial Expression Perception-Production Link Using Real-Time Automated Facial Expression Recognition,"Exploring the Facial Expression
+Perception-Production Link Using Real-Time
+Automated Facial Expression Recognition
+David M. Deriso1, Josh Susskind1, Jim Tanaka2, Piotr Winkielman3,
+John Herrington4, Robert Schultz4, and Marian Bartlett1
+Machine Perception Laboratory, University of California, San Diego
+Department of Psychology, University of Victoria
+Department of Psychology, University of California, San Diego
+Center for Autism Research, Children’s Hospital of Philadelphia"
+11155ee686bfb675816a2acdf5a8ddf06e67b65f,EmoDetect – Smart Emotion Detection from Facial Expressions,"EmoDetect – Smart Emotion Detection from Facial Expressions
+Rishabh Animesh
+Skand Hurkat
+Abhinandan Majumdar
+Aayush Saxena
+ra523
+sh953
+m2352
+s2825"
+1149c6ac37ae2310fe6be1feb6e7e18336552d95,"Classification of Face Images for Gender, Age, Facial Expression, and Identity","Proc. Int. Conf. on Artificial Neural Networks (ICANN’05), Warsaw, LNCS 3696, vol. I, pp. 569-574, Springer Verlag 2005
+Classification of Face Images for Gender, Age,
+Facial Expression, and Identity1
+Torsten Wilhelm, Hans-Joachim B¨ohme, and Horst-Michael Gross
+Department of Neuroinformatics and Cognitive Robotics
+Ilmenau Technical University, P.O.Box 100565, 98684 Ilmenau, Germany"
+11f17191bf74c80ad0b16b9f404df6d03f7c8814,Recognition of Visually Perceived Compositional Human Actions by Multiple Spatio-Temporal Scales Recurrent Neural Networks,"Recognition of Visually Perceived Compositional
+Human Actions by Multiple Spatio-Temporal Scales
+Recurrent Neural Networks
+Haanvid Lee, Minju Jung, and Jun Tani"
+11467733103a3e58ae88cb238f620cf6cafd4420,Learning of Graphical Models and Efficient Inference for Object Class Recognition,"Learning of Graphical Models and Ef‌f‌icient
+Inference for Object Class Recognition
+Martin Bergtholdt, J¨org Kappes, and Christoph Schn¨orr
+Computer Vision, Graphics, and Pattern Recognition Group
+Department of Mathematics and Computer Science
+University of Mannheim, 68131 Mannheim, Germany"
+11a34bda2daecad5f7c1caa309897cc9cc334480,Person re-identification using view-dependent score-level fusion of gait and color features,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+1172ce24f6e9242b9c26c84c6aa89a72ed8203d0,Find your own way: Weakly-supervised segmentation of path proposals for urban autonomy,"Find Your Own Way: Weakly-Supervised Segmentation of Path
+Proposals for Urban Autonomy
+Dan Barnes, Will Maddern and Ingmar Posner"
+11be33019f591214c8f79dbcb24a50d8f7fa5c95,Salgan 360 : Visual Saliency Prediction on 360 Degree Images with Generative Adversarial Networks,"SALGAN360: VISUAL SALIENCY PREDICTION ON 360 DEGREE IMAGES WITH
+GENERATIVE ADVERSARIAL NETWORKS
+Fang-Yi Chao, Lu Zhang, Wassim Hamidouche, Olivier Deforges
+Univ Rennes, INSA Rennes, CNRS, IETR - UMR 6164, F-35000 Rennes, France
+{fang-yi.chao, lu.ge, wassim.hamidouche,"
+1169f3386a49daccbe199cccb518238a0130a537,"Analyzing Complex Events and Human Actions in ""in-the-wild"" Videos",
+1151a81118368e7596843b8db2508e4974fd7435,A Testbed for Cross-Dataset Analysis,"A Testbed for Cross-Dataset Analysis
+Tatiana Tommasi and Tinne Tuytelaars
+ESAT-PSI/VISICS - iMinds, KU Leuven, Belgium"
+1119b4b038fd7d1d337d4aee232dea6c56f20cf1,A Sparse Embedding and Least Variance Encoding Approach to Hashing,"A Sparse Embedding and Least Variance Encoding
+Approach to Hashing
+Xiaofeng Zhu, Lei Zhang, Member, IEEE, Zi Huang"
+116261c74ad54646f7d1d6be38cb9930f1bf44f6,3D Twins and Expression Challenge,"D Twins and
+Expression Challenge
+Vipin Vijayan, Kevin W. Bowyer, and Patrick J. Flynn."
+1198572784788a6d2c44c149886d4e42858d49e4,Learning Discriminative Features using Encoder-Decoder type Deep Neural Nets,"Learning Discriminative Features using Encoder/Decoder type Deep
+Neural Nets
+Vishwajeet Singh1, Killamsetti Ravi Kumar2, K Eswaran3
+ALPES, Bolarum, Hyderabad 500010,
+ALPES, Bolarum, Hyderabad 500010,
+SNIST, Ghatkesar, Hyderabad 501301,"
+11ed823555aabf7e32df5b09a04111a686f8ebb6,Learning visual dictionaries and decision lists for object recognition,"CONFIDENTIAL. Limited circulation. For review only.
+Preprint submitted to 19th International Conference on Pattern Recognition.
+Received April 10, 2008."
+1183db5f409e8498d1a0f542703f908275a6dc34,Robust Visual Tracking and Vehicle Classification via Sparse Representation,"Robust Visual Tracking and Vehicle
+Classification via Sparse Representation
+Xue Mei and Haibin Ling, Member, IEEE"
+111f2f1255fa9e5a82753bf5b3f2f0974e87f86d,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+11fe6d45aa2b33c2ec10d9786a71c15ec4d3dca8,Tied Factor Analysis for Face Recognition across Large Pose Differences,"JUNE 2008
+Tied Factor Analysis for Face Recognition
+cross Large Pose Differences
+Simon J.D. Prince, Member, IEEE, James H. Elder, Member, IEEE,
+Jonathan Warrell, Member, IEEE, and Fatima M. Felisberti"
+1134a6be0f469ff2c8caab266bbdacf482f32179,Facial Expression Identification Using Four-bit Co- Occurrence Matrixfeatures and K-nn Classifier,"IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+FACIAL EXPRESSION IDENTIFICATION USING FOUR-BIT CO-
+OCCURRENCE MATRIXFEATURES AND K-NN CLASSIFIER
+Bonagiri C S K Sunil Kumar1, V Bala Shankar2, Pullela S V V S R Kumar3
+,2,3 Department of Computer Science & Engineering, Aditya College of Engineering, Surampalem, East Godavari
+District, Andhra Pradesh, India"
+11d9bee72759e23f19117fc8cbb60b487e8ac79e,Benchmark Visual Question Answer Models by using Focus Map,"Benchmark Visual Question Answer Models by using Focus Map
+Wenda Qiu
+Yueyang Xianzang
+Zhekai Zhang
+Shanghai Jiaotong University"
+1131088237aacddcc078547b4455e8572c61766b,Object Referring in Videos with Language and Human Gaze,"Object Referring in Videos with Language and Human Gaze
+Arun Balajee Vasudevan1, Dengxin Dai1, Luc Van Gool1,2
+ETH Zurich1
+KU Leuven 2"
+111a9645ad0108ad472b2f3b243ed3d942e7ff16,Facial Expression Classification Using Combined Neural Networks,"Facial Expression Classification Using
+Combined Neural Networks
+Rafael V. Santos, Marley M.B.R. Vellasco, Raul Q. Feitosa, Ricardo Tanscheit
+DEE/PUC-Rio, Marquês de São Vicente 225, Rio de Janeiro – RJ - Brazil"
+11d04269aa147450f37215beb3ae44207daf3511,Using Visual Context and Region Semantics for High-Level Concept Detection,"Using Visual Context and Region Semantics for
+High-Level Concept Detection
+Phivos Mylonas, Member, IEEE, Evaggelos Spyrou, Student Member, IEEE, Yannis Avrithis, Member, IEEE, and
+Stefanos Kollias, Member, IEEE"
+11a7c4aadb47753c8d30cbda4ab347c361e4c66a,How to collect high quality segmentations: use human or computer drawn object boundaries?,"Boston University Computer Science Technical Report No. BUCS-TR-2013-20
+How to Collect High Quality Segmentations: Use Human or Computer Drawn
+Object Boundaries?
+Danna Gurari, Zheng Wu, Brett Isenberg, Chentian Zhang, Alberto Purwada, Joyce Y. Wong, Margrit Betke"
+11f732fe8f127c393cc8404ee8db2b3e85dd3d59,Disentangling Latent Factors with Whitening,"DISENTANGLING LATENT FACTORS WITH WHITENING
+Sangchul Hahn, Heeyoul Choi
+School of Information Technology
+{schahn21,
+Handong Global University
+Pohang, South Korea"
+111d0b588f3abbbea85d50a28c0506f74161e091,Facial Expression Recognition from Visual Information using Curvelet Transform,"International Journal of Computer Applications (0975 – 8887)
+Volume 134 – No.10, January 2016
+Facial Expression Recognition from Visual Information
+using Curvelet Transform
+Pratiksha Singh
+Surabhi Group of Institution Bhopal
+systems. Further applications"
+1120e88663a38ed05120af378f57ecf557660160,Generic Object Crowd Tracking by Multi-Task Learning,"LUOETAL.:GENERICOBJECTCROWDTRACKINGBYMULTI-TASKLEARNING
+Generic Object Crowd Tracking by
+Multi-Task Learning
+Wenhan Luo
+http://www.iis.ee.ic.ac.uk/~whluo
+Tae-Kyun Kim
+http://www.iis.ee.ic.ac.uk/~tkkim
+Department of Electrical and Electronic
+Engineering, Imperial College,
+London, UK"
+11feb48d2c4c8f8a5ed9054d49e7a13b0f75f2af,Feature Representation and Extraction for Image Search and Video Retrieval,"Chapter 1
+Feature Representation and Extraction for
+Image Search and Video Retrieval
+Qingfeng Liu, Yukhe Lavinia, Abhishek Verma, Joyoung Lee, Lazar Spasovic, and
+Chengjun Liu"
+7d92d82eae23fe872e8d29116ae22cbd0b15abce,Joint Image Clustering and Labeling by Matrix Factorization,"Joint Image Clustering and Labeling
+y Matrix Factorization
+Seunghoon Hong, Jonghyun Choi, Jan Feyereisl, Bohyung Han, Larry S. Davis"
+7d98dcd15e28bcc57c9c59b7401fa4a5fdaa632b,Face Appearance Factorization for Expression Analysis and Synthesis,"FACE APPEARANCE FACTORIZATION FOR EXPRESSION ANALYSIS AND SYNTHESIS
+Bouchra Abboud, Franck Davoine
+Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne.
+BP 20529, 60205 COMPIEGNE Cedex, FRANCE.
+E-mail:"
+7dce05b7765541b3fb49a144fb39db331c14fdd1,Modélisation et suivi des déformations faciales : applications à la description des expressions du visage dans le contexte de la langue des signes,"Modélisation et suivi des déformations faciales :
+pplications à la description des expressions du visage
+dans le contexte de la langue des signes
+Hugo Mercier
+To cite this version:
+Hugo Mercier. Modélisation et suivi des déformations faciales : applications à la description des
+expressions du visage dans le contexte de la langue des signes.
+Interface homme-machine [cs.HC].
+Université Paul Sabatier - Toulouse III, 2007. Français. <tel-00185084>
+HAL Id: tel-00185084
+https://tel.archives-ouvertes.fr/tel-00185084
+Submitted on 5 Nov 2007
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+7da961cb039b1a01cad9b78d93bdfe2a69ed3ccf,Hierarchical Gaussian Descriptors with Application to Person Re-Identification,"Hierarchical Gaussian Descriptors with
+Application to Person Re-Identification
+Tetsu Matsukawa, Member, IEEE, Takahiro Okabe, Member, IEEE,
+Einoshin Suzuki, Non Member, IEEE and Yoichi Sato, Member, IEEE"
+7d7cfc8dc71967f93c2b5ec611747e63c06e1aa1,Crowd Counting and Profiling: Methodology and Evaluation,"Crowd Counting and Profiling: Methodology
+nd Evaluation
+Chen Change Loy, Ke Chen, Shaogang Gong, and Tao Xiang"
+7d6539d637f919fa20a9261e03aedcf59f92598e,Improving Cross-Resolution Face Matching Using Ensemble-Based Co-Transfer Learning,"Improving Cross-resolution Face Matching using
+Ensemble based Co-Transfer Learning
+Himanshu S. Bhatt, Student Member, IEEE, Richa Singh, Senior Member, IEEE, Mayank Vatsa, Senior
+Member, IEEE, and Nalini K. Ratha, Fellow, IEEE"
+7dfedb083fadb6822c07be82233588c31f37317c,FPGA-based IP cores implementation for face recognition using dynamic partial reconfiguration,"J Real-Time Image Proc (2013) 8:327–340
+DOI 10.1007/s11554-011-0221-x
+S P E C I A L I S S U E
+FPGA-based IP cores implementation for face
+recognition using dynamic partial reconfiguration
+Afandi Ahmad • Abbes Amira • Paul Nicholl •
+Benjamin Krill
+Received: 8 October 2010 / Accepted: 22 August 2011 / Published online: 14 September 2011
+Ó Springer-Verlag 2011"
+7dba0e39bb059103e10fb81bce2fe831f520fb38,Articulated human pose estimation in natural images,"Articulated Human Pose Estimation
+in Natural Images
+Samuel Alan Johnson
+Submitted in accordance with the requirements
+for the degree of Doctor of Philosophy.
+The University of Leeds
+School of Computing
+October 2012"
+7db00be42ded44f87f23661c49913f9d64107983,2d Face Recognition: an Experimental and Reproducible Research Survey,"D FACE RECOGNITION: AN
+EXPERIMENTAL AND REPRODUCIBLE
+RESEARCH SURVEY
+Manuel Günther Laurent El Shafey
+Sébastien Marcel
+Idiap-RR-13-2017
+APRIL 2017
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+7d5a83495c4eff62c98c3fd27d0992850611b2bd,Enhanced Performance of Consensus Fault-tolerant Schemes for Decentralized 363 Unmanned Autonomous Vehicle System —,"Proceedings of the Pakistan Academy of Sciences:
+A. Physical and Computational Sciences 53 (4): 363–372 (2016)
+Copyright © Pakistan Academy of Sciences
+ISSN: 2518-4245 (print), 2518-4253 (online)
+Pakistan Academy of Sciences
+Research Article
+Enhanced Performance of Consensus Fault-tolerant Schemes for
+Decentralized Unmanned Autonomous Vehicle System
+Naeem Khan*, Aitzaz Ali, and Wasi Ullah
+Campus, Pakistan
+*Electrical Engineering Department, University of Engineering and Technology Peshawar, Bannu"
+7d7f60e41dd9cb84ac5754d59e5a8b418fc7a685,Image Caption Generator Based On Deep Neural Networks,"Image Caption Generator Based On Deep Neural Networks
+Jianhui Chen
+CPSC 503
+CS Department
+Wenqiang Dong
+CPSC 503
+CS Department
+Minchen Li
+CPSC 540
+CS Department"
+7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0,An Unsupervised Approach to Solving Inverse Problems using Generative Adversarial Networks,"An Unsupervised Approach to Solving Inverse
+Problems using Generative Adversarial Networks
+Rushil Anirudh
+Center for Applied Scientific Computing
+Lawrence Livermore National Laboratory
+Jayaraman J. Thiagarajan
+Center for Applied Scientific Computing
+Lawrence Livermore National Laboratory
+Bhavya Kailkhura
+Timo Bremer
+Center for Applied Scientific Computing
+Lawrence Livermore National Laboratory
+Center for Applied Scientific Computing
+Lawrence Livermore National Laboratory"
+7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22,Labeled Faces in the Wild: A Survey,"Labeled Faces in the Wild: A Survey
+Erik Learned-Miller, Gary Huang, Aruni RoyChowdhury, Haoxiang Li, Gang Hua"
+7d73adcee255469aadc5e926066f71c93f51a1a5,Face alignment by deep convolutional network with adaptive learning rate,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+7de028e5c878b56057559bfbd57f1ce6482ec282,An Architecture for Agile Machine Learning in Real-Time Applications,"An Architecture for Agile Machine Learning
+in Real-Time Applications
+Johann Schleier-Smith
+San Francisco, CA 94111
+if(we) Inc.
+848 Battery St."
+7d6132a884d2b154059c461e107c7a8c41603ef7,Exploring Multi-Branch and High-Level Semantic Networks for Improving Pedestrian Detection,"Exploring Multi-Branch and High-Level Semantic
+Networks for Improving Pedestrian Detection
+Jiale Cao, Yanwei Pang, Senior Member, IEEE, and Xuelong Li, Fellow, IEEE"
+7d9fe410f24142d2057695ee1d6015fb1d347d4a,Facial Expression Feature Extraction Based on FastLBP,"Facial Expression Feature Extraction Based on
+FastLBP
+Computer and Information Engineering Department of Beijing Technology and Business University, Beijing, China
+Ya Zheng
+Email:
+Computer and Information Engineering Department of Beijing Technology and Business University, Beijing, China
+Email:
+Xiuxin Chen, Chongchong Yu and Cheng Gao
+facial expression"
+7d9dbef9bacf1257e942121f82c3f411f2a78fff,Machine Learning Performance on Face Expression Recognition using Filtered Backprojection in DCT-PCA Domain,"Machine Learning Performance on Face Expression Recognition
+using Filtered Backprojection in DCT-PCA Domain.
+Ongalo Pheobe1, Huang DongJun2 and Richard Rimiru3
+1 School of Information Science and Engineering, Central South University
+Changsha, Hunan, 410083, PR China
+School of Information Science and Engineering, Central South University
+Changsha, Hunan, 410083, PR China
+School of Information Science and Engineering, Central South University
+Changsha, Hunan, 410083, PR China"
+7d841607ce29ff4a75734ffbf569431425d8342f,Bimodal 2D-3D face recognition using a two-stage fusion strategy,"Bimodal 2D-3D face recognition using a two-stage fusion
+strategy
+Amel AISSAOUI1 and Jean MARTINET2
+University of Science and Technologies
+Houari Boumediene
+Algiers, Algeria
+Email:
+CRIStAL
+Lille 1 University
+Villeneuve d’Ascq, France
+Email:"
+7dffe7498c67e9451db2d04bb8408f376ae86992,LEAR-INRIA submission for the THUMOS workshop,"LEAR-INRIA submission for the THUMOS workshop
+Heng Wang and Cordelia Schmid
+LEAR, INRIA, France"
+7d057676c9ba7b313adf0b191f64eb26ac2f9dd6,Variability in postnatal sex hormones due to the use of oral contraception and the phase of menstrual cycle influenced brain,"SEX DIFFERENCES AND THE ROLE OF SEX
+HORMONES IN FACE DEVELOPMENT AND FACE
+PROCESSING
+Klára Marečková, MSc.
+Thesis submitted to the University of Nottingham for the degree of
+Doctor of Philosophy
+JULY 2013"
+7dd654ac5e775fa1fa585e257565455ae8832caf,Deep Pictorial Gaze Estimation,"Deep Pictorial Gaze Estimation
+Seonwook Park, Adrian Spurr, and Otmar Hilliges
+AIT Lab, Department of Computer Science, ETH Zurich"
+7d3dd33950f4a1be56eb88c0791263b3e3a6deee,Object Counts! Bringing Explicit Detections Back into Image Captioning,"Object Counts! Bringing Explicit Detections Back into Image Captioning
+Josiah Wang, Pranava Madhyastha and Lucia Specia
+{j.k.wang, p.madhyastha,
+Department of Computer Science
+University of Sheffield, UK"
+7d3f6dd220bec883a44596ddec9b1f0ed4f6aca2,Linear Regression for Face Recognition,"Linear Regression for Face Recognition
+Imran Naseem,
+Roberto Togneri, Senior Member, IEEE, and
+Mohammed Bennamoun"
+7d30939e2d6f8b980910f4eeca5338d072f5ecb6,A Pylon Model for Semantic Segmentation,"A Pylon Model for Semantic Segmentation
+Victor Lempitsky
+Andrea Vedaldi
+Visual Geometry Group, University of Oxford∗
+Andrew Zisserman"
+7df103807902f45824329ab9b2a558b8baf950b2,Precise Localization in High-Definition Road Maps for Urban Regions,"Precise Localization in High-Definition Road Maps for Urban Regions
+Fabian Poggenhans1, Niels Ole Salscheider1 and Christoph Stiller2"
+294163a4126b3a886bf62ab896865ce3fc1147a8,Group Sparse Non-negative Matrix Factorization for Multi-Manifold Learning,BMVC 2011 http://dx.doi.org/10.5244/C.25.56
+29ce6b54a87432dc8371f3761a9568eb3c5593b0,Age Sensitivity of Face Recognition Algorithms,"Kent Academic Repository
+Full text document (pdf)
+Citation for published version
+Yassin, DK H. PHM and Hoque, Sanaul and Deravi, Farzin (2013) Age Sensitivity of Face Recognition
+pp. 12-15.
+https://doi.org/10.1109/EST.2013.8
+Link to record in KAR
+http://kar.kent.ac.uk/43222/
+Document Version
+Author's Accepted Manuscript
+Copyright & reuse
+Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all
+ontent is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions
+for further reuse of content should be sought from the publisher, author or other copyright holder.
+Versions of research
+The version in the Kent Academic Repository may differ from the final published version.
+Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the
+published version of record.
+Enquiries
+For any further enquiries regarding the licence status of this document, please contact:"
+295266d09fde8f85e6e577b5181cbc73a1594b6b,Parallel effects of processing fluency and positive affect on familiarity-based recognition decisions for faces,"ORIGINAL RESEARCH ARTICLE
+published: 22 April 2014
+doi: 10.3389/fpsyg.2014.00328
+Parallel effects of processing fluency and positive affect on
+familiarity-based recognition decisions for faces
+Devin Duke*, Chris M. Fiacconi and Stefan Köhler*
+Department of Psychology, Brain and Mind Institute, Western University, London, ON, Canada
+Edited by:
+Kevin Bradley Clark, Veterans Affairs
+Greater Los Angeles Healthcare
+System, USA
+Reviewed by:
+Bernhard Hommel, Leiden
+University, Netherlands
+Sascha Topolinski, Universität
+Würzburg, Germany
+*Correspondence:
+Devin Duke and Stefan Köhler,
+Department of Psychology, Brain
+nd Mind Institute, Western"
+299ca90452aa8a7dd517de3ff3c9bf224d5100c7,Dynamic Scene Classification Using Redundant Spatial Scenelets,"Dynamic Scene Classification Using Redundant
+Spatial Scenelets
+Liang Du and Haibin Ling, Member, IEEE"
+29a6cbf089a8d916b563e02480a1844909754bcf,"The rules of implicit evaluation by race, religion, and age.","The Rules of Implicit Evaluation by Race, Religion, and Age
+Axt JR, Ebersole CR, Nosek BA.
+014; 25(9):1804-1815
+ARTICLE IDENTIFIERS
+DOI: 10.1177/0956797614543801
+PMID: 25079218
+PMCID: not available
+JOURNAL IDENTIFIERS
+LCCN: not available
+pISSN: 0956-7976
+eISSN: 1467-9280
+OCLC ID: not available
+CONS ID: not available
+US National Library of Medicine ID: not available
+This article was identified from a query of the SafetyLit database.
+Powered by TCPDF (www.tcpdf.org)"
+295d978cf47c873936ad774169cac651ea5f3c96,Monocular Depth Prediction using Generative Adversarial Networks,"018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops
+Monocular Depth Prediction using Generative Adversarial Networks
+Arun CS Kumar
+Suchendra M. Bhandarkar
+The University of Georgia
+Mukta Prasad
+Trinity College Dublin"
+2933da06df9e47da8e855266f5ff50e03c0ccd27,Combination of RGB-D Features for Head and Upper Body Orientation Classification,"Combination of RGB-D Features for Head and Upper
+Body Orientation Classification
+Laurent Fitte-Duval, Alhayat Ali Mekonnen, Frédéric Lerasle
+To cite this version:
+Laurent Fitte-Duval, Alhayat Ali Mekonnen, Frédéric Lerasle. Combination of RGB-D Features for
+Head and Upper Body Orientation Classification. Advanced Concepts for Intelligent Vision Systems
+, Oct 2016, Lecce, Italy. 2016. <hal-01763125>
+HAL Id: hal-01763125
+https://hal.laas.fr/hal-01763125
+Submitted on 10 Apr 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+292c4bd6fa516393e9c8c5f1dae5afe0bb0ece35,Interacting Multiview Tracker,"Interacting Multiview Tracker
+Ju Hong Yoon, Ming-Hsuan Yang, Senior Member, IEEE, and Kuk-Jin Yoon"
+292eba47ef77495d2613373642b8372d03f7062b,Deep Secure Encoding: An Application to Face Recognition,"Deep Secure Encoding: An Application to Face Recognition
+Rohit Pandey
+Yingbo Zhou
+Venu Govindaraju"
+296afa5f7e99fc16df47f961c9539347732f7b13,GradNorm: Gradient Normalization for Adaptive Loss Balancing in Deep Multitask Networks,"GradNorm: Gradient Normalization for Adaptive
+Loss Balancing in Deep Multitask Networks
+Zhao Chen 1 Vijay Badrinarayanan 1 Chen-Yu Lee 1 Andrew Rabinovich 1"
+29e96ec163cb12cd5bd33bdf3d32181c136abaf9,Regularized Locality Preserving Projections with Two-Dimensional Discretized Laplacian Smoothing,"Report No. UIUCDCS-R-2006-2748
+UILU-ENG-2006-1788
+Regularized Locality Preserving Projections with Two-Dimensional
+Discretized Laplacian Smoothing
+Deng Cai, Xiaofei He, and Jiawei Han
+July 2006"
+29933de38d72a0941d763b7ac5a480e733ef74a2,Open Set Logo Detection and Retrieval,"Open Set Logo Detection and Retrieval
+Andras T¨uzk¨o1, Christian Herrmann1,2, Daniel Manger1, J¨urgen Beyerer1,2
+Fraunhofer IOSB, Karlsruhe, Germany
+Karlsruhe Institute of Technology KIT, Vision and Fusion Lab, Karlsruhe, Germany
+Keywords:
+Logo Detection, Logo Retrieval, Logo Dataset, Trademark Retrieval, Open Set Retrieval, Deep Learning."
+290c8196341bbac80efc8c89af5fc60e1b8c80e6,Learning deep representations by mutual information estimation and maximization,"Learning deep representations by mutual information
+estimation and maximization
+R Devon Hjelm
+MSR Montreal, MILA, UdeM, IVADO
+Alex Fedorov
+MRN, UNM
+Samuel Lavoie-Marchildon
+MILA, UdeM
+Karan Grewal
+U Toronto
+Phil Bachman
+MSR Montreal
+Adam Trischler
+MSR Montreal
+Yoshua Bengio
+MILA, UdeM, IVADO, CIFAR"
+29e793271370c1f9f5ac03d7b1e70d1efa10577c,Face Recognition Based on Multi-classifierWeighted Optimization and Sparse Representation,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.6, No.5 (2013), pp.423-436
+http://dx.doi.org/10.14257/ijsip.2013.6.5.37
+Face Recognition Based on Multi-classifierWeighted Optimization
+nd Sparse Representation
+Deng Nan1, Zhengguang Xu2 and ShengQin Bian3
+,2,3Institute of control science and engineering,
+University of Science and Technology Beijing
+,2,330 Xueyuan Road, Haidian District, Beijing 100083 P. R.China"
+294eef6848403520016bb2c93bfb71b3c75c73fa,Extension of Robust Principal Component Analysis for Incremental Face Recognition,"Extension of Robust Principal Component Analysis for Incremental Face
+Recognition
+Ha¨ıfa Nakouri and Mohamed Limam
+Institut Sup´erieur de Gestion, LARODEC Laboratory
+University of Tunis, Tunis, Tunisia
+Keywords:
+Image alignment, Robust Principal Component Analysis, Incremental RPCA."
+29c23c7d5d70aef54168ba20dccdd14f570901a3,Duplicate Discovery on 2 Billion Internet Images,"Duplicate Discovery on 2 Billion Internet Images
+Xin-Jing Wang, Lei Zhang
+Microsoft Research Asia
+5 Danling Street, Beijing, China
+fxjwang,"
+29c7dfbbba7a74e9aafb6a6919629b0a7f576530,Automatic Facial Expression Analysis and Emotional Classification,"Automatic Facial Expression Analysis and Emotional
+Classification
+Robert Fischer
+Submitted to the Department of Math and Natural Sciences
+in partial fulfillment of the requirements for the degree of a
+Diplomingenieur der Optotechnik und Bildverarbeitung (FH)
+(Diplom Engineer of Photonics and Image Processing)
+t the
+UNIVERSITY OF APPLIED SCIENCE DARMSTADT (FHD)
+Accomplished and written at the
+MASSACHUSETTS INSTITUTE OF TECHNOLOGY (MIT)
+October 2004
+Author . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Department of Math and Natural Sciences
+October 30, 2004
+Certified by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+Dr. Harald Scharfenberg
+Professor at FHD
+Thesis Supervisor
+Accepted by . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ."
+292c6b743ff50757b8230395c4a001f210283a34,Fast violence detection in video,"Fast Violence Detection in Video
+O. Deniz1, I. Serrano1, G. Bueno1 and T-K. Kim2
+VISILAB group, University of Castilla-La Mancha, E.T.S.I.Industriales, Avda. Camilo Jose Cela s.n, 13071 Spain
+Department of Electrical and Electronic Engineering, Imperial College, South Kensington Campus, London SW7 2AZ, UK.
+{oscar.deniz, ismael.serrano,
+Keywords:
+ction recognition, violence detection, fight detection"
+293ca770a66313c9427dc71cf86bef7e1b94f2d9,Steerable part models,"Steerable Part Models
+Hamed Pirsiavash Deva Ramanan
+Department of Computer Science, University of California, Irvine"
+29a46aed79df53a1984ee755bed4c8ba2ae94040,Multiple Object Tracking Using K-Shortest Paths Optimization,"Multiple Object Tracking using
+K-Shortest Paths Optimization
+J´erˆome Berclaz, Franc¸ois Fleuret, Engin T¨uretken, and Pascal Fua, Senior Member, IEEE"
+29cf7937a1c1848c24b294569d50a2f7122de51b,MarioQA: Answering Questions by Watching Gameplay Videos,"MarioQA: Answering Questions by Watching Gameplay Videos
+Jonghwan Mun*
+Bohyung Han
+Paul Hongsuck Seo*
+Ilchae Jung
+Department of Computer Science and Engineering, POSTECH, Korea
+{choco1916, hsseo, chey0313,"
+29b1a44d1e1ffa05c2bf7f4be931c5045f427718,Review on Generic Object Recognition Techniques : Challenges and Opportunities,"International Journal of Advanced Research in Engineering and Technology
+(IJARET)
+Volume 6, Issue 12, Dec 2015, pp. 104-133, Article ID: IJARET_06_12_010
+Available online at
+http://www.iaeme.com/IJARET/issues.asp?JType=IJARET&VType=6&IType=12
+ISSN Print: 0976-6480 and ISSN Online: 0976-6499
+© IAEME Publication
+REVIEW ON GENERIC OBJECT
+RECOGNITION TECHNIQUES:
+CHALLENGES AND OPPORTUNITIES
+Prof. Deepika Shukla
+Comp. Science and Engineering Department,
+Institute of Technology, Nirma University, Ahmedabad, India
+Apurva Desai
+Department of Computer Science and Information Technology,
+VNSGU, Surat India"
+294d1fa4e1315e1cf7cc50be2370d24cc6363a41,A modular non-negative matrix factorization for parts-based object recognition using subspace representation,"008 SPIE Digital Library -- Subscriber Archive Copy
+Processing: Machine Vision Applications, edited by Kurt S. Niel, David Fofi, Proc. of SPIE-IS&T Electronic Imaging, SPIE Vol. 6813, 68130C, © 2008 SPIE-IS&T · 0277-786X/08/$18SPIE-IS&T/ Vol. 6813 68130C-1"
+29d414bfde0dfb1478b2bdf67617597dd2d57fc6,Perfect histogram matching PCA for face recognition,"Multidim Syst Sign Process (2010) 21:213–229
+DOI 10.1007/s11045-009-0099-y
+Perfect histogram matching PCA for face recognition
+Ana-Maria Sevcenco · Wu-Sheng Lu
+Received: 10 August 2009 / Revised: 21 November 2009 / Accepted: 29 December 2009 /
+Published online: 14 January 2010
+© Springer Science+Business Media, LLC 2010"
+29c5a44e01d1126505471b2ab46163d598c871c7,Improving Landmark Localization with Semi-Supervised Learning,"Improving Landmark Localization with Semi-Supervised Learning
+Sina Honari1∗, Pavlo Molchanov2, Stephen Tyree2, Pascal Vincent1,4,5, Christopher Pal1,3, Jan Kautz2
+MILA-University of Montreal, 2NVIDIA, 3Ecole Polytechnique of Montreal, 4CIFAR, 5Facebook AI Research.
+{honaris,
+{pmolchanov, styree,"
+29230bbb447b39b7fc3de7cb34b313cc3afe0504,Face Detection and Recognition Using Maximum Likelihood Classifiers on Gabor Graphs,"SPI-J068 00721
+International Journal of Pattern Recognition
+nd Artificial Intelligence
+Vol. 23, No. 3 (2009) 433–461
+(cid:1) World Scientific Publishing Company
+FACE DETECTION AND RECOGNITION USING MAXIMUM
+LIKELIHOOD CLASSIFIERS ON GABOR GRAPHS
+MANUEL G ¨UNTHER and ROLF P. W ¨URTZ
+Institut f¨ur Neuroinformatik
+Ruhr-Universit¨at Bochum
+D–44780 Bochum, Germany
+We present an integrated face recognition system that combines a Maximum Likelihood
+(ML) estimator with Gabor graphs for face detection under varying scale and in-plane
+rotation and matching as well as a Bayesian intrapersonal/extrapersonal classifier (BIC)
+on graph similarities for face recognition. We have tested a variety of similarity functions
+nd achieved verification rates (at FAR 0.1%) of 90.5% on expression-variation and 95.8%
+on size-varying frontal images within the CAS-PEAL database. Performing Experiment 1
+of FRGC ver2.0, the method achieved a verification rate of 72%.
+Keywords: Face recognition; Maximum Likelihood estimators; Gabor graphs.
+. Introduction"
+2939169aed69aa2626c5774d9b20e62c905e479b,Fast Exact HyperGraph Matching with Dynamic Programming for Spatio-Temporal Data,"Fast Exact Hyper-Graph Matching with Dynamic
+Programming for Spatio-Temporal Data
+Oya Celiktutan, Christian Wolf, Bülent Sankur, Eric Lombardi
+To cite this version:
+Oya Celiktutan, Christian Wolf, Bülent Sankur, Eric Lombardi. Fast Exact Hyper-Graph Matching
+with Dynamic Programming for Spatio-Temporal Data. Journal of Mathematical Imaging and Vision,
+Springer Verlag, 2015, 51, pp.1-21. <hal-01151755>
+HAL Id: hal-01151755
+https://hal.archives-ouvertes.fr/hal-01151755
+Submitted on 13 May 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+29107badb19e7c5c89f57f81f50df08422e53304,Automatic localisation and segmentation of the Left Ventricle in Cardiac Ultrasound Images,"MASTER THESIS
+Automatic localisation and
+segmentation of the Left Ventricle in
+Cardiac Ultrasound Images
+Presented by:
+Esther PUYOL
+IG 3A F4B and MR 2A SISEA
+013/2014
+Supervisor:
+Paolo PIRO
+Academic supervisor:
+Guy CAZUGUEL
+MEDISYS - PHILIPS RESEARCH PARIS
+Company:
+University:
+TELECOM BRETAGNE
+7th March - 12th September 2014"
+29113ed00421953e0ddc4fa6784eaba60f05e801,Automatic Track Creation and Deletion Framework for Face Tracking,"IJCSNS International Journal of Computer Science and Network Security, VOL.15 No.2, February 2015
+Automatic Track Creation and Deletion Framework for Face
+Tracking
+Dept. of Information and Communication, St.Xavier’s Catholic College of Engineering, Nagercoil, Tamilnadu, India.
+Renimol T G, Anto Kumar R.P"
+290136947fd44879d914085ee51d8a4f433765fa,On a taxonomy of facial features,"On a Taxonomy of Facial Features
+Brendan Klare and Anil K. Jain"
+2957715e96a18dbb5ed5c36b92050ec375214aa6,InclusiveFaceNet: Improving Face Attribute Detection with Race and Gender Diversity,"Improving Face Attribute Detection with Race and Gender Diversity
+InclusiveFaceNet:
+Hee Jung Ryu 1 Hartwig Adam * 1 Margaret Mitchell * 1"
+29dbb9492292b574f7bfd8629d6801d3136887b7,Towards Autonomous Situation Awareness,"Towards Autonomous Situation Awareness
+Nikhil Naikal
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2014-124
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-124.html
+May 21, 2014"
+29b3f9f0fb821883a3c3bccbf0337c242c3b8a64,Transfer Learning for Video Recognition with Scarce Training Data,"Transfer Learning for Video Recognition
+with Scarce Training Data
+for Deep Convolutional Neural Network
+Yu-Chuan Su, Tzu-Hsuan Chiu, Chun-Yen Yeh, Hsin-Fu Huang, Winston H. Hsu"
+29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d,SCface – surveillance cameras face database,"Multimed Tools Appl (2011) 51:863–879
+DOI 10.1007/s11042-009-0417-2
+SCface – surveillance cameras face database
+Mislav Grgic & Kresimir Delac & Sonja Grgic
+Published online: 30 October 2009
+# Springer Science + Business Media, LLC 2009"
+299af7d4fe6da8ac0b390e3ce45c48f7a8b5bb37,"Attribute And-Or Grammar for Joint Parsing of Human Attributes, Part and Pose","Attribute And-Or Grammar for Joint Parsing of
+Human Attributes, Part and Pose
+Seyoung Park, Bruce Xiaohan Nie and Song-Chun Zhu"
+29633712a36c3efc77ce3a9844a2e9a029daf310,AdaBoost for Parking Lot Occupation Detection,"AdaBoost for Parking Lot Occupation
+Detection
+Radovan Fusek1, Karel Mozdˇreˇn1, Milan ˇSurkala1 and Eduard Sojka1"
+29619496c688f8400a90fef79b4fa756967ed0f7,Head Gesture Recognition: A Literature Review,"International Conference on Innovative Research in Engineering, Science, Management and Humanities (ICIRESMH-2017)
+t (IETE) Institution of Electronics and Telecommunication Engineers, Lodhi Road, Delhi, India
+on 19th February 2017
+ISBN: 978-81-932712-5-4
+Head Gesture Recognition: A Literature Review
+Er. Rushikesh T. Bankar
+Ph. D Scholar,
+Department of Electronics Engineering,
+G. H. Raisoni College of Engineering,
+Nagpur, India.
+Dr. Suresh S. Salankar
+Dean SAC & Professor,
+Department of E&TC Engineering,
+G. H. Raisoni College of Engineering,
+Nagpur, India."
+2965d092ed72822432c547830fa557794ae7e27b,Improving Representation and Classification of Image and Video Data for Surveillance Applications,"Improving Representation and Classification of Image and
+Video Data for Surveillance Applications
+Andres Sanin
+BSc(Biol), MSc(Biol), MSc(CompSc)
+A thesis submitted for the degree of Doctor of Philosophy at
+The University of Queensland in 2012
+School of Information Technology and Electrical Engineering"
+29bd7de310438c2b9d8b6e7eb7df662079934747,Semantic Scene Mapping with Spatio-temporal Deep Neural Network for Robotic Applications,"Cogn Comput
+https://doi.org/10.1007/s12559-017-9526-9
+Semantic Scene Mapping with Spatio-temporal Deep Neural
+Network for Robotic Applications
+Ruihao Li1
+· Dongbing Gu1 · Qiang Liu1 · Zhiqiang Long2 · Huosheng Hu1
+Received: 25 September 2017 / Accepted: 31 October 2017
+© Springer Science+Business Media, LLC, part of Springer Nature 2017"
+29c6b06ac98dbdaf25e4cc9a05b4ab314923cccd,Assessment of the communicative and coordination skills of children with Autism Spectrum Disorders and typically developing children using social signal processing,"Research in Autism Spectrum Disorders 7 (2013) 741–756
+Contents lists available at SciVerse ScienceDirect
+Research in Autism Spectrum Disorders
+J o u r n a l h o m e p a g e : h t t p : / / e e s . e l s e v i e r . c o m / R A S D / d e f a u l t . a s p
+Assessment of the communicative and coordination skills of
+hildren with Autism Spectrum Disorders and typically
+developing children using social signal processing
+Emilie Delaherche a, Mohamed Chetouani a, Fabienne Bigouret b,c, Jean Xavier c,
+Monique Plaza a, David Cohen a,c,*
+Institute of Intelligent Systems and Robotics, University Pierre and Marie Curie, 75005 Paris, France
+University of Paris 8, 93526 Saint-Denis, France
+Department of Child and Adolescent Psychiatry, Hoˆpital de la Pitie´-Salpeˆtrie`re, University Pierre and Marie Curie, 75013 Paris, France
+A R T I C L E
+I N F O
+A B S T R A C T
+Article history:
+Received 27 November 2012
+Received in revised form 5 February 2013
+Accepted 8 February 2013
+Keywords:"
+29ca8ddf79d4cd1dc20cc8160a6d3326933e943f,Pragmatic descriptions of perceptual stimuli,"Proceedings of the Student Research Workshop at the 15th Conference of the European Chapter of the Association for Computational Linguistics,
+pages 1–10, Valencia, Spain, April 3-7 2017. c(cid:13)2017 Association for Computational Linguistics"
+2921719b57544cfe5d0a1614d5ae81710ba804fa,Face Recognition Enhancement Based on Image File Formats and Wavelet De - noising,"Face Recognition Enhancement Based on Image
+File Formats and Wavelet De-noising
+Isra’a Abdul-Ameer Abdul-Jabbar, Jieqing Tan, and Zhengfeng Hou"
+2914a20df10f3bb55c5d4764ece85101c1a3e5a8,User interest profiling using tracking-free coarse gaze estimation,"User Interest Profiling Using
+Tracking-free Coarse Gaze Estimation
+Federico Bartoli, Giuseppe Lisanti, Lorenzo Seidenari, Alberto Del Bimbo
+Media Integration and Communication Center
+Universit`a degli Studi di Firenze
+Firenze, Italy"
+291be6e3027575287c24f4363e4bf7a8b415d4c1,MSER-Based Real-Time Text Detection and Tracking,"To appear in the proceedings of the 2014 International Conference on Pattern Recognition.
+MSER-based Real-Time Text Detection and Tracking
+Llu´ıs G´omez and Dimosthenis Karatzas
+Computer Vision Center
+Universitat Aut`onoma de Barcelona
+Email:"
+29a013b2faace976f2c532533bd6ab4178ccd348,Hierarchical Manifold Learning With Applications to Supervised Classification for High-Resolution Remotely Sensed Images,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Hierarchical Manifold Learning With Applications
+to Supervised Classification for High-Resolution
+Remotely Sensed Images
+Hong-Bing Huang, Hong Huo, and Tao Fang"
+29756b6b16d7b06ea211f21cdaeacad94533e8b4,Thresholding Approach based on GPU for Facial Expression Recognition,"Thresholding Approach based on GPU for Facial
+Expression Recognition
+Jesús García-Ramírez1, J. Arturo Olvera-López1, Ivan Olmos-Pineda1, Georgina
+Flores-Becerra2, Adolfo Aguilar-Rico2
+Benemérita Universidad Autónoma de Puebla, Faculty of Computer Science, Puebla, México
+Instituto Tecnológico de Puebla, Puebla, México"
+293193d24d5c4d2975e836034bbb2329b71c4fe7,Building a Corpus of Facial Expressions for Learning-Centered Emotions,"Building a Corpus of Facial Expressions
+for Learning-Centered Emotions
+María Lucía Barrón-Estrada, Ramón Zatarain-Cabada,
+Bianca Giovanna Aispuro-Medina, Elvia Minerva Valencia-Rodríguez,
+Ana Cecilia Lara-Barrera
+Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+Mexico
+{lbarron, rzatarain, m06170904, m95170906, m15171452}"
+294bd7eb5dc24052237669cdd7b4675144e22306,Automatic Face Annotation,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2013): 4.438
+Automatic Face Annotation
+Ashna Shajahan
+M.Tech Student, Dept. of Computer Science & Engineering, Mount Zion College of Engineering, Pathanamthitta, Kerala, India"
+296502c6370cabd2b7e38e71cfc757d2e5fa2199,Detection of Deep Network Generated Images Using Disparities in Color Components,"Detection of Deep Network Generated Images
+Using Disparities in Color Components
+Haodong Li, Bin Li, Shunquan Tan, Jiwu Huang"
+2988f24908e912259d7a34c84b0edaf7ea50e2b3,A Model of Brightness Variations Due to Illumination Changes and Non-rigid Motion Using Spherical Harmonics,"A Model of Brightness Variations Due to
+Illumination Changes and Non-rigid Motion
+Using Spherical Harmonics
+Jos´e M. Buenaposada
+Alessio Del Bue
+Dep. Ciencias de la Computaci´on,
+U. Rey Juan Carlos, Spain
+http://www.dia.fi.upm.es/~pcr
+Inst. for Systems and Robotics
+Inst. Superior T´ecnico, Portugal
+http://www.isr.ist.utl.pt/~adb
+Enrique Mu˜noz
+Facultad de Inform´atica,
+U. Complutense de Madrid, Spain
+Luis Baumela
+Dep. de Inteligencia Artificial,
+U. Polit´ecnica de Madrid, Spain
+http://www.dia.fi.upm.es/~pcr
+http://www.dia.fi.upm.es/~pcr"
+29d591806cdc6ef0d580e4a21f32e5ad9d09d148,Large scale image annotation: learning to rank with joint word-image embeddings,"Large Scale Image Annotation:
+Learning to Rank with Joint Word-Image
+Embeddings
+Jason Weston1, Samy Bengio1, and Nicolas Usunier2
+Google, USA
+Universit´e Paris 6, LIP6, France"
+29f46586c95af2fa6326724c867aa88b55b5400e,Failure Prediction for Autonomous Driving,"Failure Prediction for Autonomous Driving
+Simon Hecker1, Dengxin Dai1, and Luc Van Gool1,2"
+7c9d8593cdf2f8ba9f27906b2b5827b145631a0b,MsCGAN: Multi-scale Conditional Generative Adversarial Networks for Person Image Generation,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, OCTOBER 2018
+MsCGAN: Multi-scale Conditional Generative
+Adversarial Networks for Person Image
+Generation
+Wei Tang∗, Teng Li
+† Anhui University, HeFei, China
+Hefei University, HeFei, China
+§ Hefei University of Technology, HeFei, China"
+7c4864065f4e107cb5be49a8dba8cf7d94b8340f,Multi-target Tracking by Lagrangian Relaxation to Min-cost Network Flow,"Multi-target Tracking by Lagrangian Relaxation to Min-Cost Network Flow
+Asad A. Butt and Robert T. Collins
+The Pennsylvania State University, University Park, PA. 16802, USA"
+7c1db13ae2c62d1f860fd2664885c9c93a28cab8,Multistage Particle Windows for Fast and Accurate Object Detection,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Multi-Stage Particle Windows for Fast and
+Accurate Object Detection
+Giovanni Gualdi, Andrea Prati, Member, IEEE, and Rita Cucchiara, Member, IEEE"
+7cee802e083c5e1731ee50e731f23c9b12da7d36,2^B3^C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional Networks,"B3C: 2 Box 3 Crop of Facial Image for Gender Classification with Convolutional
+Networks
+Vandit Gajjar
+Department of Electronics and Communication Engineering and
+Computer Vision Group, L. D. College of Engineering, Ahmedabad, India"
+7c47da191f935811f269f9ba3c59556c48282e80,Robust eye centers localization with zero-crossing encoded image projections,"Robust Eye Centers Localization
+with Zero–Crossing Encoded Image Projections
+Laura Florea
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313
+Corneliu Florea
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313
+Constantin Vertan
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313"
+7cee2a2bee27657e6599b13f9ed6536d5f46fd0a,A Semantic Labeling Approach for Accurate Weed Mapping of High Resolution UAV Imagery,"Article
+A Semantic Labeling Approach for Accurate Weed
+Mapping of High Resolution UAV Imagery
+Huasheng Huang 1,2,†, Yubin Lan 1,2,†, Jizhong Deng 1,2,*, Aqing Yang 3, Xiaoling Deng 2,3,
+Lei Zhang 2,4 and Sheng Wen 2,5
+College of Engineering, South China Agricultural University, Wushan Road, Guangzhou 510642, China;
+(H.H.); (Y.L.)
+National Center for International Collaboration Research on Precision Agricultural Aviation Pesticide
+Spraying Technology, Wushan Road, Guangzhou 510642, China; (X.D.);
+(L.Z.); (S.W.)
+College of Electronic Engineering, South China Agricultural University, Wushan Road, Guangzhou 516042,
+China;
+College of Agriculture, South China Agricultural University, Wushan Road, Guangzhou 516042, China
+Engineering Fundamental Teaching and Training Center, South China Agricultural University,
+Wushan Road, Guangzhou 510642, China
+* Correspondence: Tel.: +86-20-8528-8201
+These authors contributed equally to this work and should be considered as co-first authors.
+Received: 13 May 2018; Accepted: 27 June 2018; Published: 1 July 2018"
+7c25a4b2eaa7bf0bc4e0bd239f05d6c0d4cb3431,Fast Appearance-based Person Re-identification and Retrieval Using Dissimilarity Representations,"Fast Appearance-based Person Re-identification
+nd Retrieval Using Dissimilarity
+Representations
+Riccardo Satta, Giorgio Fumera, and Fabio Roli
+Dept. of Electrical and Electronic Engineering, University of Cagliari
+Piazza d’Armi, 09123 Cagliari, Italy
+e-mail: {satta, fumera,
+WWW: http://prag.diee.unica.it"
+7c45b5824645ba6d96beec17ca8ecfb22dfcdd7f,News Image Annotation on a Large Parallel Text-image Corpus,"News image annotation on a large parallel text-image corpus
+Pierre Tirilly, Vincent Claveau, Patrick Gros
+Universit´e de Rennes 1/IRISA, CNRS/IRISA, INRIA Rennes-Bretagne Atlantique
+Campus de Beaulieu
+5042 Rennes Cedex, France"
+7c18965f5573020f32b151a08178ee4906b5bf4c,Recursive Coarse-to-Fine Localization for Fast Object Detection,"Recursive Coarse-to-Fine Localization
+for fast Object Detection
+Marco Pedersoli, Jordi Gonz`alez, Andrew D. Bagdanov, and Juan J. Villanueva
+Dept. Ci`encies de la Computaci´o & Centre de Visi´o per Computador,
+Edifici O, Campus UAB 08193 Bellaterra (Cerdanyola) Barcelona, Spain"
+7c0a6824b556696ad7bdc6623d742687655852db,MPCA+MDA: A novel approach for face recognition based on tensor objects,"8th Telecommunications forum TELFOR 2010
+Serbia, Belgrade, November 23-25, 2010.
+MPCA+DATER: A Novel Approach for Face
+Recognition Based on Tensor Objects
+Ali. A. Shams Baboli, Member, IEEE, G. Rezai-rad, Member, IEEE, Aref. Shams Baboli"
+7c95449a5712aac7e8c9a66d131f83a038bb7caa,This is an author produced version of Facial first impressions from another angle: How social judgements are influenced by changeable and invariant facial properties. White Rose Research Online URL for this paper: http://eprints.whiterose.ac.uk/102935/,"This is an author produced version of Facial first impressions from another angle: How
+social judgements are influenced by changeable and invariant facial properties.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/102935/
+Article:
+Sutherland, Clare, Young, Andrew William orcid.org/0000-0002-1202-6297 and Gillian,
+Rhodes (2017) Facial first impressions from another angle: How social judgements are
+influenced by changeable and invariant facial properties. British journal of psychology. pp.
+97-415. ISSN 0007-1269
+https://doi.org/10.1111/bjop.12206
+promoting access to
+White Rose research papers
+http://eprints.whiterose.ac.uk/"
+7caca02d3c61271d22c43580677acb6d52b23503,What Makes Good Synthetic Training Data for Learning Disparity and Optical Flow Estimation?,"IJCV VISI manuscript No.
+(will be inserted by the editor)
+What Makes Good Synthetic Training Data for Learning
+Disparity and Optical Flow Estimation?
+Nikolaus Mayer · Eddy Ilg · Philipp Fischer · Caner Hazirbas · Daniel
+Cremers · Alexey Dosovitskiy · Thomas Brox
+Received: date / Accepted: date"
+7c3e09e0bd992d3f4670ffacb4ec3a911141c51f,Transferring Object-Scene Convolutional Neural Networks for Event Recognition in Still Images,"Noname manuscript No.
+(will be inserted by the editor)
+Transferring Object-Scene Convolutional Neural Networks for
+Event Recognition in Still Images
+Limin Wang · Zhe Wang · Yu Qiao · Luc Van Gool
+Received: date / Accepted: date"
+7c98c27f4be40a7675ba9c85179ce72d12593a7a,Training Bit Fully Convolutional Network for Fast Semantic Segmentation,"Training Bit Fully Convolutional Network for Fast Semantic Segmentation
+He Wen and Shuchang Zhou and Zhe Liang and Yuxiang Zhang and Dieqiao Feng and Xinyu Zhou and Cong Yao
+{wenhe, zsc, liangzhe, zyx, fdq, zxy,
+Megvii Inc."
+7c7b0550ec41e97fcfc635feffe2e53624471c59,"Head, Eye, and Hand Patterns for Driver Activity Recognition","051-4651/14 $31.00 © 2014 IEEE
+DOI 10.1109/ICPR.2014.124"
+7c8d57ca9cbefd1c2b3f4d45ab6791adba2d6bb4,Two-Stage Hashing for Fast Document Retrieval,"Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Short Papers), pages 495–500,
+Baltimore, Maryland, USA, June 23-25 2014. c(cid:13)2014 Association for Computational Linguistics"
+7c119e6bdada2882baca232da76c35ae9b5277f8,Facial expression recognition using embedded Hidden Markov Model,"Facial Expression Recognition Using Embedded
+Hidden Markov Model
+Languang He, Xuan Wang, Member, IEEE, Chenglong Yu, Member, IEEE, Kun Wu
+Intelligence Computing Research Center
+HIT Shenzhen Graduate School
+Shenzhen, China
+{telent, wangxuan, ycl, wukun}"
+7cd5d849212c294c452be009ff465ca7d3d923c8,A Brief Survey of Face Recognition Techniques,"(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:2)(cid:2)(cid:3)(cid:3)(cid:4)(cid:4)(cid:5)(cid:5)(cid:6)(cid:6)(cid:7)(cid:7)(cid:1)(cid:1)(cid:8)(cid:8)(cid:1)(cid:1)(cid:9)(cid:9)(cid:1)(cid:1)(cid:10)(cid:10)(cid:5)(cid:5)(cid:6)(cid:6)(cid:11)(cid:11)(cid:7)(cid:7)(cid:12)(cid:12)(cid:1)(cid:1)(cid:13)(cid:13)(cid:1)(cid:1)(cid:14)(cid:14)(cid:15)(cid:15)(cid:12)(cid:12)(cid:16)(cid:16)(cid:17)(cid:17)(cid:1)(cid:1)(cid:13)(cid:13)(cid:18)(cid:18)(cid:19)(cid:19)(cid:20)(cid:20)(cid:1)(cid:1)(cid:21)(cid:21)(cid:1)(cid:1)(cid:22)(cid:22)(cid:7)(cid:7)(cid:23)(cid:23)(cid:24)(cid:24)(cid:1)(cid:1)(cid:13)(cid:13)(cid:18)(cid:18)(cid:19)(cid:19)(cid:20)(cid:20)(cid:1)(cid:1)(cid:23)(cid:23)(cid:23)(cid:23)(cid:25)(cid:25)(cid:1)(cid:1)(cid:13)(cid:13)(cid:18)(cid:18)(cid:21)(cid:21)(cid:26)(cid:26)(cid:27)(cid:27)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)
+(cid:15)(cid:15)(cid:28)(cid:28)(cid:15)(cid:15)(cid:29)(cid:29)(cid:4)(cid:4)(cid:15)(cid:15)(cid:11)(cid:11)(cid:4)(cid:4)(cid:7)(cid:7)(cid:1)(cid:1)(cid:3)(cid:3)(cid:30)(cid:30)(cid:4)(cid:4)(cid:29)(cid:29)(cid:30)(cid:30)(cid:7)(cid:7)(cid:1)(cid:1)(cid:15)(cid:15)(cid:24)(cid:24)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:1)(cid:31)(cid:31)(cid:31)(cid:31)(cid:31)(cid:31)(cid:25)(cid:25)(cid:16)(cid:16) !!(cid:3)(cid:3)(cid:5)(cid:5)(cid:12)(cid:12)(cid:30)(cid:30)(cid:15)(cid:15)(cid:4)(cid:4) (cid:25)(cid:25)(cid:16)(cid:16)(cid:3)(cid:3)(cid:6)(cid:6)(cid:1)
+(cid:1)
+(cid:1)
+AA BBrriieeff SSuurrvveeyy ooff FFaaccee RReeccooggnniittiioonn TTeecchhnniiqquueess
+Nilam B. Goswami, Pinal Patel, Chirag I. Patel, Parth Parekh
+Post Graduation, CE and IT department, Government Engineering College, Gandhinagar, India"
+7c8adb2fa156b119a1f576652c39fb06e4e19675,Ordinal Regression using Noisy Pairwise Comparisons for Body Mass Index Range Estimation,"Ordinal Regression using Noisy Pairwise Comparisons for Body Mass Index
+Range Estimation
+Luisa F. Polan´ıa
+Dongning Wang
+Glenn M. Fung
+American Family Insurance, Strategic Data & Analytics, Madison, WI
+{lpolania, dwang1,"
+7c25ed788da1f5f61d8d1da23dd319dfb4e5ac2d,Human-In-The-Loop Person Re-Identification,"Human-In-The-Loop Person Re-Identification
+Hanxiao Wang, Shaogang Gong, Xiatian Zhu, and Tao Xiang"
+7c26559e7269679ef52a85d02c6ff7000c2387d2,Towards a Development of a Learners’ Ratified Acceptance of Multi-biometrics Intentions Model (RAMIM): Initial Empirical Results,"Yair Levy, Michelle M. Ramim
+Towards a Development of a Learners’ Ratified
+Acceptance of Multi-biometrics Intentions Model
+(RAMIM): Initial Empirical Results
+Graduate School of Computer and Information
+H. Wayne Huizenga School of Business and
+Nova Southeastern University, USA
+Nova Southeastern University, USA
+Yair Levy
+Sciences
+Michelle M. Ramim
+Entrepreneurship
+implemented as"
+7c9a65f18f7feb473e993077d087d4806578214e,SpringerLink - Zeitschriftenbeitrag,"SpringerLink - Zeitschriftenbeitrag
+http://www.springerlink.com/content/93hr862660nl1164/?p=abe5352...
+Deutsch
+Deutsch
+Vorherige Beitrag Nächste Beitrag
+Beitrag markieren
+In den Warenkorb legen
+Zu gespeicherten Artikeln
+hinzufügen
+Permissions & Reprints
+Diesen Artikel empfehlen
+Ergebnisse
+finden
+Erweiterte Suche
+im gesamten Inhalt
+in dieser Zeitschrift
+in diesem Heft
+Diesen Beitrag exportieren
+Diesen Beitrag exportieren als RIS
+| Text"
+7c0f7d47da05a41e8671b059ade70dd2df7070db,Face Recognition and Feature Detection Using Artificial Neural Networks and ANFIS,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 5, Issue 7, July 2015)
+Face Recognition and Feature Detection Using Artificial
+Neural Networks and ANFIS
+Sanjay Kumar Dekate1, Dr. Anupam Shukla2
+Research Scholar, Dr. C. V. Raman University, Bilaspur, India
+Professor, ABV-IIITM, Gwalior, India"
+7c0ffae3acb0fd0a14ff66b6d474229aa16c53ab,Covariance Descriptor Multiple Object Tracking and Re-identification with Colorspace Evaluation,"Covariance Descriptor Multiple Object Tracking
+nd Re-Identification with Colorspace
+Evaluation
+Andr´es Romero, Mich`ele Gouiff´es and Lionel Lacassagne
+Institut d’´El´ectronique Fondamentale, UMR 8622, Universit´e Paris-Sud XI, Bˆatiment
+660, rue Noetzlin, Plateau du Moulon, 91400 Orsay"
+7c1e1c767f7911a390d49bed4f73952df8445936,Non-Rigid Object Detection with LocalInterleaved Sequential Alignment (LISA),"NON-RIGID OBJECT DETECTION WITH LOCAL INTERLEAVED SEQUENTIAL ALIGNMENT (LISA)
+Non-Rigid Object Detection with Local
+Interleaved Sequential Alignment (LISA)
+Karel Zimmermann, Member, IEEE,, David Hurych, Member, IEEE,
+nd Tom´aˇs Svoboda, Member, IEEE"
+7cf579088e0456d04b531da385002825ca6314e2,Emotion Detection on TV Show Transcripts with Sequence-based Convolutional Neural Networks,"Emotion Detection on TV Show Transcripts with
+Sequence-based Convolutional Neural Networks
+Sayyed M. Zahiri
+Jinho D. Choi
+Mathematics and Computer Science
+Mathematics and Computer Science
+Emory University
+Atlanta, GA 30322, USA
+Emory University
+Atlanta, GA 30322, USA"
+7c349932a3d083466da58ab1674129600b12b81c,Leveraging Multiple Features for Image Retrieval and Matching,
+16e2e9e4741795c004d15e95532b07943d3a3242,CPS: 3D Compositional Part Segmentation through Grasping,"CPS: 3D Compositional Part Segmentation through Grasping
+Safoura Rezapour Lakani
+University of Innsbruck
+Innsbruck, Austria
+Mirela Popa
+University of Innsbruck
+Innsbruck, Austria
+Antonio J. Rodr´ıguez-S´anchez
+University of Innsbruck
+Innsbruck, Austria
+Justus Piater
+University of Innsbruck
+Innsbruck, Austria"
+162403e189d1b8463952fa4f18a291241275c354,Action Recognition with Spatio-Temporal Visual Attention on Skeleton Image Sequences,"Action Recognition with Spatio-Temporal
+Visual Attention on Skeleton Image Sequences
+Zhengyuan Yang, Student Member, IEEE, Yuncheng Li, Jianchao Yang, Member, IEEE,
+nd Jiebo Luo, Fellow, IEEE
+With a strong ability of modeling sequential data, Recur-
+rent Neural Networks (RNN) with Long Short-Term Memory
+(LSTM) neurons outperform the previous hand-crafted feature
+ased methods [9], [10]. Each skeleton frame is converted into
+feature vector and the whole sequence is fed into the RNN.
+Despite the strong ability in modeling temporal sequences,
+RNN structures lack the ability to efficiently learn the spatial
+relations between the joints. To better use spatial information,
+hierarchical structure is proposed in [11], [12] that feeds
+the joints into the network as several pre-defined body part
+groups. However,
+limit
+the effectiveness of representing spatial relations. A spatio-
+temporal 2D LSTM (ST-LSTM) network [13] is proposed
+to learn the spatial and temporal relations simultaneously.
+Furthermore, a two-stream RNN structure [14] is proposed to"
+160259f98a6ec4ec3e3557de5e6ac5fa7f2e7f2b,Discriminant multi-label manifold embedding for facial Action Unit detection,"Discriminant Multi-Label Manifold Embedding for Facial Action Unit
+Detection
+Signal Procesing Laboratory (LTS5), ´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+Anıl Y¨uce, Hua Gao and Jean-Philippe Thiran"
+16fdc3829dc8322a26eac46e93703000005f3d6d,An occlusion reasoning scheme for monocular pedestrian tracking in dynamic scenes,"An Occlusion Reasoning Scheme for Monocular
+Pedestrian Tracking in Dynamic Scenes
+Sourav Garg and Swagat Kumar
+Innovation Lab
+Tata Consultancy Services
+New Delhi, India 201301
+Email:
+Rajesh Ratnakaram and Prithwijit Guha
+Department of Electronics and Electrical Engineering
+Indian Institute of Technology Guwahati
+Guwahati, Assam, India 781039
+Email:"
+16671b2dc89367ce4ed2a9c241246a0cec9ec10e,Detecting the Number of Clusters in n-Way Probabilistic Clustering,"Detecting the Number of Clusters
+in n-Way Probabilistic Clustering
+Zhaoshui He, Andrzej Cichocki, Senior Member, IEEE,
+Shengli Xie, Senior Member, IEEE, and Kyuwan Choi"
+16bd796687ca17ac7ca28d28d856b324186628ba,Face Recognition and Verification Using Photometric Stereo: The Photoface Database and a Comprehensive Evaluation,"Face Recognition and Verification Using
+Photometric Stereo: The Photoface Database
+nd a Comprehensive Evaluation
+Stefanos Zafeiriou, Member, IEEE, Gary A. Atkinson, Mark F. Hansen, William A. P. Smith, Member, IEEE,
+Vasileios Argyriou, Member, IEEE, Maria Petrou, Senior Member, IEEE, Melvyn L. Smith, and Lyndon N. Smith"
+16395b40e19cbc6d5b82543039ffff2a06363845,Action Recognition in Video Using Sparse Coding and Relative Features,"Action Recognition in Video Using Sparse Coding and Relative Features
+Anal´ı Alfaro
+Domingo Mery
+Alvaro Soto
+P. Universidad Catolica de Chile
+P. Universidad Catolica de Chile
+P. Universidad Catolica de Chile
+Santiago, Chile
+Santiago, Chile
+Santiago, Chile"
+16e577820999e584c787ec611f55746cf9147518,Cross-Domain Person Reidentification Using Domain Adaptation Ranking SVMs,"Cross-Domain Person Re-Identification Using
+Domain Adaptation Ranking SVMs
+Andy J Ma, Jiawei Li, Pong C Yuen, Senior Member, IEEE, and Ping Li
+label"
+1696f6861c208b6a7cac95fbeba524867ad3e8d6,Using deep learning to quantify the beauty of outdoor places,"Downloaded from
+http://rsos.royalsocietypublishing.org/
+on September 4, 2017
+rsos.royalsocietypublishing.org
+Research
+Cite this article: Seresinhe CI, Preis T, Moat
+HS. 2017 Using deep learning to quantify the
+eauty of outdoor places. R. Soc. open sci.
+: 170170.
+http://dx.doi.org/10.1098/rsos.170170
+Received: 23 February 2017
+Accepted: 19 June 2017
+Subject Category:
+Computer science
+Subject Areas:
+environmental science/computer modelling
+nd simulation
+Keywords:
+environmental aesthetics, well-being,
+onvolutional neural networks, deep learning,"
+16d1e29b588fd26f5f0ac8038110f7b8500a1ec9,$L_0$ Regularized Stationary-Time Estimation for Crowd Analysis,"L0 Regularized Stationary-Time Estimation
+for Crowd Analysis
+Shuai Yi, Xiaogang Wang, Member, IEEE, Cewu Lu, Member, IEEE,
+Jiaya Jia, Senior Member, IEEE, and Hongsheng Li"
+16da7c95c218e9e97eea7734d6c243e8b825196d,A stable and accurate multi-reference representation for surfaces of R<sup>3</sup>: Application to 3D faces description,"A stable and accurate multi-reference representation for surfaces of
+R3: Application to 3D faces description
+Wieme Gadacha1, Faouzi Ghorbel1
+CRISTAL laboratory, GRIFT research group
+National School of Computer Sciences (NSCS), La Manouba 2010, Tunisia"
+1685ac0f9fedd83a178a2f64f25155fb37998d8f,Human tracking using wearable sensors in the pocket,"Human Tracking using Wearable Sensors in the
+Pocket
+Wenchao Jiang
+Department of Computer Science
+Zhaozheng Yin
+Department of Computer Science
+Missouri University of Science and Technology
+Missouri University of Science and Technology"
+166f42f66c5e6dd959548acfb97dc77a36013639,Bilevel Model-Based Discriminative Dictionary Learning for Recognition,"Bilevel Model-Based Discriminative Dictionary
+Learning for Recognition
+Pan Zhou, Chao Zhang, Member, IEEE, and Zhouchen Lin, Senior Member, IEEE"
+16c884be18016cc07aec0ef7e914622a1a9fb59d,Exploiting Multimodal Data for Image Understanding,"UNIVERSITÉ DE GRENOBLE
+No attribué par la bibliothèque
+THÈSE
+pour obtenir le grade de
+DOCTEUR DE L’UNIVERSITÉ DE GRENOBLE
+Spécialité : Mathématiques et Informatique
+préparée au Laboratoire Jean Kuntzmann
+dans le cadre de l’École Doctorale Mathématiques,
+Sciences et Technologies de l’Information, Informatique
+présentée et soutenue publiquement
+Matthieu Guillaumin
+le 27 septembre 2010
+Exploiting Multimodal Data for Image Understanding
+Données multimodales pour l’analyse d’image
+Directeurs de thèse : Cordelia Schmid et Jakob Verbeek
+M. Éric Gaussier
+M. Antonio Torralba
+Mme Tinne Tuytelaars Katholieke Universiteit Leuven
+M. Mark Everingham University of Leeds
+Mme Cordelia Schmid"
+16aec3ee9a97162b85b1d51c3c5ce73a472e74b8,Application of Selective Search to Pose estimation,"Application of Selective Search to Pose estimation
+Ujwal Krothapalli
+Department of Electrical and
+Computer Engineering
+Virginia Tech
+Blacksburg, Virginia 24061"
+16c855aea9789e2b7a77f35dc4181efc93dec69c,Exploiting Sum of Submodular Structure for Inference in Very High Order MRF-MAP Problems,"SUBMITTED TO IEEE TPAMI
+Exploiting Sum of Submodular Structure for
+Inference in Very High Order MRF-MAP
+Problems
+Ishant Shanu Surbhi Goel Chetan Arora Parag Singla"
+163738c0f74ec82ab670a868a051edb732543b6e,Image alignment with rotation manifolds built on sparse geometric expansions,"Image alignment with rotation manifolds built
+on sparse geometric expansions
+Effrosyni Kokiopoulou and Pascal Frossard
+Ecole Polytechnique F´ed´erale de Lausanne (EPFL)
+Signal Processing Institute - ITS
+CH- 1015 Lausanne, Switzerland"
+1630e839bc23811e340bdadad3c55b6723db361d,Exploiting relationship between attributes for improved face verification,"SONG, TAN, CHEN: EXPLOITING RELATIONSHIP BETWEEN ATTRIBUTES
+Exploiting Relationship between Attributes for
+Improved Face Verification
+Fengyi Song
+Xiaoyang Tan
+Songcan Chen
+Department of Computer Science and
+Technology, Nanjing University of Aero-
+nautics and Astronautics, Nanjing 210016,
+P.R. China"
+160ab0e879f4451fa4df88cd567508150894ba9d,Cross Dataset Person Re-identification,"Cross Dataset Person Re-identification
+Yang Hu, Dong Yi, Shengcai Liao, Zhen Lei, Stan Z. Li(cid:63)
+Center for Biometrics and Security Research
+National Laboratory of Pattern Recognition
+Institute of Automation, Chinese Academy of Sciences (CASIA)
+95 Zhongguancun East Road, 100190, Beijing, China
+{yhu, dong.yi, scliao, zlei,"
+16597862a1df1a983c439e82e0462424f538bb48,Personalized Saliency and its Prediction,
+166b5bdea1f4f850af5b045a953d6de74bc18d1e,Best of both worlds: Human-machine collaboration for object annotation,"Best of both worlds: human-machine collaboration for object annotation
+Olga Russakovsky1, Li-Jia Li2, Li Fei-Fei1
+Stanford University. 2Snapchat (this work was done while at Yahoo! Labs).
+The long-standing goal of localizing every object in an image remains
+elusive. Manually annotating objects is quite expensive despite crowd en-
+gineering innovations. Current automatic object detectors can accurately
+detect at most a few objects per image. This paper brings together the latest
+dvancements in object detection and in crowd engineering into a principled
+framework for accurately and efficiently localizing objects in images.
+The input to the system is an image to annotate and a set of annotation
+onstraints: (1) desired utility of labeling, which is a generalization of the
+number of labeled objects, (2) desired precision of the labeling and/or (3)
+the budget, which is the human cost of the labeling. Our system automati-
+ally solicits feedback from human workers (“users”) to annotate the image
+subject to these constraints, as illustrated in Figure 1. The output is a set of
+object annotations, informed by humans and computer vision.
+One important decision is which questions to pose to the human label-
+ers. In computer vision with human-in-the-loop approaches, human inter-
+vention has ranged from binary question-and-answer [1] to attribute-based
+feedback [4] to free-form object annotation [6]. Binary questions are not"
+161c9ef7114bda7c5a60a29ee4a3161b0a76e676,Low Rank Approximation and Decomposition of Large Matrices Using Error Correcting Codes,"Low rank approximation and decomposition of
+large matrices using error correcting codes
+Shashanka Ubaru, Arya Mazumdar Senior Member, IEEE, and Yousef Saad"
+16286fb0f14f6a7a1acc10fcd28b3ac43f12f3eb,"All Smiles are Not Created Equal: Morphology and Timing of Smiles Perceived as Amused, Polite, and Embarrassed/Nervous.","J Nonverbal Behav
+DOI 10.1007/s10919-008-0059-5
+O R I G I N A L P A P E R
+All Smiles are Not Created Equal: Morphology
+nd Timing of Smiles Perceived as Amused, Polite,
+nd Embarrassed/Nervous
+Zara Ambadar Æ Jeffrey F. Cohn Æ Lawrence Ian Reed
+Ó Springer Science+Business Media, LLC 2008"
+165d966940dcccf9c9976ebffcabe72d66996b05,Semi-Supervised Nonlinear Hashing Using Bootstrap Sequential Projection Learning,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Semi-supervised Nonlinear Hashing Using
+Bootstrap Sequential Projection Learning
+Chenxia Wu, Jianke Zhu, Deng Cai, Chun Chen, and Jiajun Bu"
+1697a4188b9f75ff5324eb9957b8317f459bbf59,Dual-tree fast exact max-kernel search,"Dual-Tree Fast Exact Max-Kernel Search
+Ryan R. Curtin and Parikshit Ram
+December 11, 2013"
+16e8d439fbcf8311efea7b0baeb1a5340272b396,Stereo and LIDAR Fusion based Detection of Humans and Other Obstacles in Farming Scenarios,
+166186e551b75c9b5adcc9218f0727b73f5de899,Automatic Age and Gender Recognition in Human Face Image Dataset using Convolutional Neural Network System,"Volume 4, Issue 2, February 2016
+International Journal of Advance Research in
+Computer Science and Management Studies
+Research Article / Survey Paper / Case Study
+Available online at: www.ijarcsms.com
+ISSN: 2321-7782 (Online)
+Automatic Age and Gender Recognition in Human Face Image
+Dataset using Convolutional Neural Network System
+Subhani Shaik1
+Assoc. Prof & Head of the Department
+Department of CSE,
+Anto A. Micheal2
+Associate Professor
+Department of CSE,
+St.Mary’s Group of Institutions Guntur
+St.Mary’s Group of Institutions Guntur
+Chebrolu(V&M),Guntur(Dt),
+Andhra Pradesh - India
+Chebrolu(V&M),Guntur(Dt),
+Andhra Pradesh - India"
+16d9b983796ffcd151bdb8e75fc7eb2e31230809,GazeDirector: Fully Articulated Eye Gaze Redirection in Video,"EUROGRAPHICS 2018 / D. Gutierrez and A. Sheffer
+(Guest Editors)
+Volume 37 (2018), Number 2
+GazeDirector: Fully Articulated Eye Gaze Redirection in Video
+ID: paper1004"
+165abb6fdbadae997135feec447fc825edb31c6c,Dimensionality Reduction with Simultaneous Sparse Approximations,"SCHOOL OF ENGINEERING - STI
+SIGNAL PROCESSING INSTITUTE
+EffrosyniKokiopoulouandPascalFrossard
+CH-1015 LAUSANNE
+Telephone: +41216932601
+Telefax: +41216937600
+e-mail:
+ÉCOLE POLYTECHNIQUE(cid:13)
+FÉDÉRALE DE LAUSANNE
+DIMENSIONALITY REDUCTION WITH
+SIMULTANEOUS SPARSE APPROXIMATIONS
+Effrosyni Kokiopoulou and Pascal Frossard
+Swiss Federal Institute of Technology Lausanne (EPFL)
+Signal Processing Institute Technical Report
+TR-ITS-2006.010
+October 21st, 2006
+Part of this work has been submitted to IEEE TMM.
+This work has been supported by the Swiss NSF, under grants PP-002-68737, and NCCR IM2."
+162c33a2ec8ece0dc96e42d5a86dc3fedcf8cd5e,Large-Scale Classification by an Approximate Least Squares One-Class Support Vector Machine Ensemble,"Mygdalis, V., Iosifidis, A., Tefas, A., & Pitas, I. (2016). Large-Scale
+Classification by an Approximate Least Squares One-Class Support Vector
+of a meeting held 20-22 August 2015, Helsinki, Finland (Vol. 2, pp. 6-10).
+Institute of Electrical and Electronics Engineers (IEEE). DOI:
+0.1109/Trustcom.2015.555
+Peer reviewed version
+Link to published version (if available):
+0.1109/Trustcom.2015.555
+Link to publication record in Explore Bristol Research
+PDF-document
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms"
+16e8b0a1e8451d5f697b94c0c2b32a00abee1d52,UMB-DB: A database of partially occluded 3D faces,"UMB-DB
+A Database of Partially Occluded 3D Faces
+Alessandro Colombo
+Claudio Cusano
+Raimondo Schettini
+Universit`a degli Studi di Milano-Bicocca
+3 November 2011"
+16bd481fb66259df9c4c22b54797d8e8adc910fc,Robustifying Descriptor Instability Using Fisher Vectors,"Robustifying Descriptor Instability
+using Fisher Vectors
+Ivo Everts, Jan C. van Gemert, Thomas Mensink, Theo Gevers, Member, IEEE"
+1654fadee3e70d744a4eb231932b87c41c1e3ae5,Survey on Emotional Body Gesture Recognition,"JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 201X
+Survey on Emotional Body Gesture Recognition
+Fatemeh Noroozi, Ciprian Adrian Corneanu, Dorota Kami´nska, Tomasz Sapi´nski, Sergio Escalera,
+nd Gholamreza Anbarjafari,"
+161eb9ecc119952c137959e87a796da0f3c62cd1,Eye tracking in early autism research,"Falck-Ytter et al. Journal of Neurodevelopmental Disorders 2013, 5:28
+http://www.jneurodevdisorders.com/content/5/1/28
+R EV I E W
+Eye tracking in early autism research
+Terje Falck-Ytter1,2*, Sven Bölte1,3 and Gustaf Gredebäck2
+Open Access"
+16647dc1bc87ba1e7b8bcd7e1ea8ccebcfe20fa5,Psychometric properties of reaction time based experimental paradigms measuring anxiety-related information-processing biases in children,"PDF hosted at the Radboud Repository of the Radboud University
+Nijmegen
+The following full text is a publisher's version.
+For additional information about this publication click this link.
+http://repository.ubn.ru.nl/handle/2066/126858
+Please be advised that this information was generated on 2018-10-16 and may be subject to
+hange."
+1670729d1edc9bc6103ee823f1137d302be41397,Patch-based Object Recognition Using Discriminatively Trained Gaussian Mixtures,"Patch-based Object Recognition Using
+Discriminatively Trained Gaussian Mixtures
+Andre Hegerath, Thomas Deselaers, and Hermann Ney
+Human Language Technology and Pattern Recognition Group,
+RWTH Aachen University – D-52056 Aachen, Germany
+{hegerath, deselaers,"
+16bfd904f5a76bb52d5cd8a25721277047a02e89,Blindfold Baselines for Embodied QA,"Blindfold Baselines for Embodied QA
+Ankesh Anand1 Eugene Belilovsky1 Kyle Kastner1 Hugo Larochelle2,1 Aaron Courville1,3
+Mila
+Google Brain 3CIFAR Fellow"
+161eb88031f382e6a1d630cd9a1b9c4bc6b47652,Automatic facial expression recognition using features of salient facial patches,"Automatic Facial Expression Recognition
+Using Features of Salient Facial Patches
+S L Happy and Aurobinda Routray"
+16f48e8b7f1f6c03c888e3f4664ce3fa1261296b,Steganographic Generative Adversarial Networks,"Steganographic Generative Adversarial Networks
+Denis Volkhonskiy1,2,3, Ivan Nazarov1,2, Boris Borisenko3 and Evgeny Burnaev1,2,3
+Skolkovo Institute of Science and Technology
+The Institute for Information Transmission Problems RAS (Kharkevich Institute)
+National Research University Higher School of Economics (HSE)"
+4209783b0cab1f22341f0600eed4512155b1dee6,Accurate and Efficient Similarity Search for Large Scale Face Recognition,"Accurate and Efficient Similarity Search for Large Scale Face Recognition
+Ce Qi
+Zhizhong Liu
+Fei Su"
+42e3dac0df30d754c7c7dab9e1bb94990034a90d,PANDA: Pose Aligned Networks for Deep Attribute Modeling,"PANDA: Pose Aligned Networks for Deep Attribute Modeling
+Ning Zhang1,2, Manohar Paluri1, Marc’Aurelio Ranzato1, Trevor Darrell2, Lubomir Bourdev1
+EECS, UC Berkeley
+{mano, ranzato,
+Facebook AI Research
+{nzhang,"
+422fc05b3ef72e96c87b9aa4190efa7c7fb8c170,Preprocessing Technique for Face Recognition Applications under Varying Illumination Conditions,"Global Journal of Computer Science and Technology
+Graphics & Vision
+Volume 12 Issue 11 Version 1.0 Year 2012
+Type: Double Blind Peer Reviewed International Research Journal
+Publisher: Global Journals Inc. (USA)
+Online ISSN: 0975-4172 & Print ISSN: 0975-4350
+Preprocessing Technique for Face Recognition Applications
+under Varying Illumination Conditions
+By S.Anila & Dr.N.Devarajan
+Sri Ramakrishna Institute of Technology, Coimbatore-10, Tamil Nadu, India"
+42c645df49106b68a71abe757ac13245db4be394,A New Method of Illumination Normalization for Robust Face Recognition,"A New Method of Illumination Normalization
+for Robust Face Recognition
+Young Kyung Park, Bu Cheon Min, and Joong Kyu Kim
+School of Information and Communication Engineering, SungKyunKwan University.
+00, Chun-Chun-Dong, Chang-An-Ku, Suwon, Korea 440-746
+{multipym,"
+4244d3340304b114e5c00e7b5797d2338a5c2b82,Face Recognition Using Local Texture Feature,"International Journal of Computer Engineering and Applications,
+Volume XII, Issue I, Jan. 18, www.ijcea.com ISSN 2321-3469
+FACE RECOGNITION USING LOCAL TEXTURE FEATURE
+Pavan.M 1, Sayed Aftab Ahamed 2
+Dept. of Information Science & engineering, J.N.N.C.E
+Shimoga, Karnataka, India"
+429b8d5bb05e1a580fad0222b9e9496985465e40,"See No Evil, Say No Evil: Description Generation from Densely Labeled Images","Proceedings of the Third Joint Conference on Lexical and Computational Semantics (*SEM 2014), pages 110–120,
+Dublin, Ireland, August 23-24 2014.
+(Count:3) Isa: ride, vehicle,… Doing: parking,… Has: steering wheel,… Attrib: black, shiny,… children (Count:2) Isa: kids, children … Doing: biking, riding … Has: pants, bike … Attrib: young, small … bike (Count:1) Isa: bike, bicycle,… Doing: playing,… Has: chain, pedal,… Attrib: silver, white,… women(Count:3) Isa: girls, models,… Doing: smiling,... Has: shorts, bags,… Attrib: young, tan,… purses(Count:3) Isa: accessory,… Doing: containing,… Has: body, straps,… Attrib: black, soft,… sidewalk(Count:1) Isa: sidewalk, street,… Doing: laying,… Has: stone, cracks,… Attrib: flat, wide,… woman(Count:1) Isa: person, female,… Doing: pointing,… Has: nose, legs,… Attrib: tall, skinny,… tree(Count:1) Isa: plant,… Doing: growing,… Has: branches,… Attrib: tall, green,… kids(Count:5) Isa: group, teens,… Doing: walking,… Has: shoes, bags,… Attrib: young,… Fiveyoungpeopleonthestreet,twosharingabicycle.Severalyoungpeoplearewalkingnearparkedvehicles.Threegirlswithlargehandbagswalkingdownthesidewalk.Threewomenwalkdownacitystreet,asseenfromabove.Threeyoungwomanwalkingdownasidewalklookingup.Figure1:Anannotatedimagewithhumangeneratedsen-tencedescriptions.Eachboundingpolygonencompassesoneormoreobjectsandisassociatedwithacountandtextla-bels.Thisimagehas9highlevelobjectsannotatedwithover250textuallabels.tomuchofthevisualcontentneededtogeneratecomplete,human-likesentences.Inthispaper,weinsteadstudygenerationwithmorecompletevisualsupport,asprovidedbyhu-manannotations,allowingustodevelopmorecomprehensivemodelsthanpreviouslyconsid-ered.Suchmodelshavethedualbenefitof(1)providingnewinsightsintohowtoconstructmorehuman-likesentencesand(2)allowingustoper-formexperimentsthatsystematicallystudythecontributionofdifferentvisualcuesingeneration,suggestingwhichautomaticdetectorswouldbemostbeneficialforgeneration.Inanefforttoapproximaterelativelycompletevisualrecognition,wecollectedmanuallylabeledrepresentationsofobjects,parts,attributesandac-tivitiesforabenchmarkcaptiongenerationdatasetthatincludesimagespairedwithhumanauthored"
+421387011b5cdd2cb4a1fdf04728d350741a0ac1,Incidental memory for faces in children with different genetic subtypes of Prader-Willi syndrome,"Social Cognitive and Affective Neuroscience, 2017, 918–927
+doi: 10.1093/scan/nsx013
+Advance Access Publication Date: 17 February 2017
+Original article
+Incidental memory for faces in children with different
+genetic subtypes of Prader-Willi syndrome
+Alexandra P. Key,1,2 and Elisabeth M. Dykens1,3
+Vanderbilt Kennedy Center for Research on Human Development, 2Department of Hearing and Speech
+Sciences, Vanderbilt University Medical Center, and 3Department of Psychology and Human Development,
+Vanderbilt University, Nashville, TN 37203, USA
+Correspondence should be addressed to Alexandra P. Key, Vanderbilt Kennedy Center, Peabody Box 74, Vanderbilt University, Nashville, TN 37203, USA.
+E-mail:"
+42f4653f0693f16e087e4b913407d9b0278154c9,3D Human Action Recognition with Siamese-LSTM Based Deep Metric Learning,"D Human Action Recognition with Siamese-
+LSTM Based Deep Metric Learning
+VisLab, Department of Computer Engineering, Gebze Technical University, Kocaeli, Turkey
+Seyma Yucer and Yusuf Sinan Akgul
+Email: {syucer,"
+42afe5fd3f7b1d286a20e9306c6bc8624265f658,Face Detection Using the 3×3 Block Rank Patterns of Gradient Magnitude Images,"Signal & Image Processing : An International Journal (SIPIJ) Vol.4, No.5, October 2013
+FACE DETECTION USING THE 3×3 BLOCK RANK
+PATTERNS OF GRADIENT MAGNITUDE IMAGES
+Kang-Seo Park, Young-Gon Kim, and Rae-Hong Park
+Department of Electronic Engineering, School of Engineering, Sogang University,
+5 Baekbeom-ro (Sinsu-dong), Mapo-gu, Seoul 121-742, Korea"
+4213502d0f226b9845b00c2882851ba4c57742ab,Does Rabbit Antithymocyte Globulin (Thymoglobuline®) Have a Role in Avoiding Delayed Graft Function in the Modern Era of Kidney Transplantation?,"Hindawi
+Journal of Transplantation
+Volume 2018, Article ID 4524837, 11 pages
+https://doi.org/10.1155/2018/4524837
+Review Article
+Does Rabbit Antithymocyte Globulin (ThymoglobulineD)
+Have a Role in Avoiding Delayed Graft Function in the Modern
+Era of Kidney Transplantation?
+Lluís Guirado
+Department of Renal Transplantation, Fundaci´o Puigvert, Barcelona, Spain
+Correspondence should be addressed to Llu´ıs Guirado;
+Received 12 April 2018; Accepted 20 June 2018; Published 12 July 2018
+Academic Editor: Andreas Zuckermann
+Copyright © 2018 Llu´ıs Guirado. This is an open access article distributed under the Creative Commons Attribution License, which
+permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Delayed graft function (DGF) increases the risk of graft loss by up to 40%, and recent developments in kidney donation have
+increased the risk of its occurrence. Lowering the risk of DGF, however, is challenging due to a complicated etiology in which
+ischemia-reperfusion injury (IRI) leads to acute tubular necrosis. Among various strategies explored, the choice of induction
+therapy is one consideration. Rabbit antithymocyte globulin (rATG [Thymoglobuline]) has complex immunomodulatory effects
+that are relevant to DGF. In addition to a rapid and profound T-cell depletion, rATG inhibits leukocyte migration and adhesion."
+4265269bc894caa97efbfcfe5b83da7413f86a30,Asymmetric Tri-training for Unsupervised Domain Adaptation,"Asymmetric Tri-training for Unsupervised Domain Adaptation
+Kuniaki Saito 1 Yoshitaka Ushiku 1 Tatsuya Harada 1"
+42f8ef9d5ebf969a7e2b4d1eef4b332db562e5d4,Which Training Methods for GANs do actually Converge?,"Which Training Methods for GANs do actually Converge?
+Lars Mescheder 1 Andreas Geiger 1 2 Sebastian Nowozin 3"
+42cc8637a5e7b8203722ba0dca995814f6dfd525,PETS 2016: Dataset and Challenge,"PETS 2016: Dataset and Challenge
+Luis Patino*, Tom Cane**, Alain Vallee*** and James Ferryman*
+*University of Reading, Computational Vision Group, Reading RG6 6AY, United Kingdom,
+{j.l.patinovilchis,
+**BMT Group Ltd., Teddington TW11 8LZ. United Kingdom,
+***SAGEM, 92659 Boulogne-Billancourt, France,"
+4212a93f011aa47c6344c0cdc3e991740d8c7c04,Zero-Shot Kernel Learning,"Zero-Shot Kernel Learning
+Hongguang Zhang∗,2,1
+Piotr Koniusz∗,1,2
+Data61/CSIRO, 2Australian National University
+nu.edu.au2}"
+426b47af132293e9ffe6071a3ede59cfdc1aa3fb,Promoting social behavior with oxytocin in high-functioning autism spectrum disorders.,"Promoting social behavior with oxytocin in high-
+functioning autism spectrum disorders
+Elissar Andaria, Jean-René Duhamela, Tiziana Zallab, Evelyn Herbrechtb, Marion Leboyerb, and Angela Sirigua,1
+Centre de Neuroscience Cognitive, Unité Mixte de Recherche 5229, Centre National de la Recherche Scientifique, 69675 Bron, France; and bInstitut National
+de la Santé et de la Recherche Médicale U 841, Department of Psychiatry, Hôpital Chenevier-Mondor, 94000 Créteil, France
+Edited by Leslie G. Ungerleider, National Institute of Mental Health, Bethesda, MD, and approved January 7, 2010 (received for review September 8, 2009)
+Social adaptation requires specific cognitive and emotional compe-
+tences. Individuals with high-functioning autism or with Asperger
+syndrome cannot understand or engage in social situations despite
+preserved intellectual abilities. Recently, it has been suggested that
+oxytocin, a hormone known to promote mother-infant bonds, may
+e implicated in the social deficit of autism. We investigated the
+ehavioral effects of oxytocin in 13 subjects with autism.
+simulated ball game where participants interacted with fictitious
+partners, we found that after oxytocin inhalation, patients
+exhibited stronger interactions with the most socially cooperative
+partner and reported enhanced feelings of trust and preference.
+Also, during free viewing of pictures of faces, oxytocin selectively
+increased patients’ gazing time on the socially informative region of
+the face, namely the eyes. Thus, under oxytocin, patients respond"
+423e8cc1a7501066b7e0e5bb1beb5b9592337023,Accurate eye center localization using Snakuscule,"Accurate Eye Center Localization using Snakuscule
+Abhinav Tripathi
+Microsoft Research India
+Edward Cutrell
+Microsoft Research India
+Sanyam Garg
+Microsoft Research India"
+42cc9ea3da1277b1f19dff3d8007c6cbc0bb9830,Coordinated Local Metric Learning,"Coordinated Local Metric Learning
+Shreyas Saxena
+Jakob Verbeek
+Inria∗"
+42350e28d11e33641775bef4c7b41a2c3437e4fd,Multilinear Discriminant Analysis for Face Recognition,"Multilinear Discriminant Analysis
+for Face Recognition
+Shuicheng Yan, Member, IEEE, Dong Xu, Qiang Yang, Senior Member, IEEE, Lei Zhang, Member, IEEE,
+Xiaoou Tang, Senior Member, IEEE, and Hong-Jiang Zhang, Fellow, IEEE"
+42e793b1dd6669b74ad106071c432aa5015b8631,How do people think about interdependence? A multidimensional model of subjective outcome interdependence.,"tapraid5/z2g-perpsy/z2g-perpsy/z2g99917/z2g4623d17z
+xppws S⫽1
+8/10/17
+:53 Art: 2016-0710
+APA NLM
+017, Vol. 0, No. 999, 000
+0022-3514/17/$12.00
+© 2017 American Psychological Association
+http://dx.doi.org/10.1037/pspp0000166
+How Do People Think About Interdependence? A Multidimensional Model
+of Subjective Outcome Interdependence
+Fabiola H. Gerpott, Daniel Balliet,
+Simon Columbus, and Catherine Molho
+Vrije Universiteit Amsterdam
+Reinout E. de Vries
+Vrije Universiteit Amsterdam and University of Twente
+Interdependence is a fundamental characteristic of social interactions. Interdependence Theory states that
+6 dimensions describe differences between social situations. Here we examine if these 6 dimensions
+describe how people think about their interdependence with others in a situation. We find that people (in
+situ and ex situ) can reliably differentiate situations according to 5, but not 6, dimensions of interde-"
+42e155ea109eae773dadf74d713485be83fca105,Sparse reconstruction of facial expressions with localized gabor moments,
+423aacfe7467961e32f012bc6de10d636ebc0236,Breaking the interactive bottleneck in multi-class classification with active selection and binary feedback,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Breaking the Interactive Bottleneck in
+Multi-Class Classification with Active
+Selection and Binary Feedback
+Ajay Joshi, Fatih Porikli, Nikolaos Papanikolopoulos
+TR2010-037
+July 2010"
+42b56c77e4b154364763d4024baa8129da75151f,Deep Detection of People and their Mobility Aids for a Hospital Robot,"Deep Detection of People and their Mobility Aids for a Hospital Robot
+Andres Vasquez
+Marina Kollmitz
+Andreas Eitel
+Wolfram Burgard"
+4297deda7ea77fb90de2509c763738584b2353de,Beyond one billion time series: indexing and mining very large time series collections with $$i$$ SAX2+,"Knowl Inf Syst
+DOI 10.1007/s10115-012-0606-6
+REGULAR PAPER
+Beyond one billion time series: indexing and mining very
+large time series collections with iSAX2+
+Alessandro Camerra · Jin Shieh · Themis Palpanas ·
+Thanawin Rakthanmanon · Eamonn Keogh
+Received: 23 March 2012 / Revised: 23 September 2012 / Accepted: 28 December 2012
+© Springer-Verlag London 2013"
+423e0f595365640b653c1195749e01394cbcd937,Web-Scale Responsive Visual Search at Bing,"Web-Scale Responsive Visual Search at Bing
+Houdong Hu, Yan Wang, Linjun Yang, Pavel Komlev, Li Huang,
+Xi (Stephen) Chen, Jiapei Huang, Ye Wu, Meenaz Merchant, Arun Sacheti
+Microsoft
+Redmond, Washington"
+424e918134ed7c70fa73450bd6af1bd982071a27,Final Report : Localized object detection with Convolutional Neural Networks,"Final Report: Localized object detection with Convolutional
+Computer Vision
+Neural Networks
+Bardia Doosti
+Vijay Hareesh Avula
+May 5, 2016"
+428e42f8d5cbffc068e2e5fe8f697c9c9ee113a9,Deep Multimodal Subspace Clustering Networks,"IEEE JOURNAL OF SELECTED TOPICS IN SIGNAL PROCESSING, VOL. X, NO. X, SEPTEMBER 21, 2018
+Deep Multimodal Subspace Clustering Networks
+Mahdi Abavisani, Student Member, IEEE and Vishal M. Patel, Senior Member, IEEE"
+42d8a6b1ef5acaaf4640a8974c6f99d60b56090c,Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation,"SUBMIT TO IEEE TRANS. PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. XX, NO. XX, AUGUST 2012
+Markerless Motion Capture of Multiple Characters
+Using Multi-view Image Segmentation
+Yebin Liu, Juergen Gall Member, IEEE, Carsten Stoll, Qionghai Dai Senior Member, IEEE,
+Hans-Peter Seidel, and Christian Theobalt"
+4270460b8bc5299bd6eaf821d5685c6442ea179a,"Partial Similarity of Objects, or How to Compare a Centaur to a Horse","Int J Comput Vis (2009) 84: 163–183
+DOI 10.1007/s11263-008-0147-3
+Partial Similarity of Objects, or How to Compare a Centaur
+to a Horse
+Alexander M. Bronstein · Michael M. Bronstein · Alfred
+M. Bruckstein · Ron Kimmel
+Received: 30 September 2007 / Accepted: 3 June 2008 / Published online: 26 July 2008
+© Springer Science+Business Media, LLC 2008"
+426840ccf74bbd8b087cf357efdb80ecc85ea2ab,Reduced Analytic Dependency Modeling: Robust Fusion for Visual Recognition,"Noname manuscript No.
+(will be inserted by the editor)
+Reduced Analytic Dependency Modeling: Robust Fusion for Visual
+Recognition
+Andy J Ma · Pong C Yuen
+Received: date / Accepted: date"
+422d352a7d26fef692a3cd24466bfb5b4526efea,Pedestrian interaction in tracking: the social force model and global optimization methods,"Pedestrian interaction in tracking: the social
+force model and global optimization methods
+Laura Leal-Taix´e and Bodo Rosenhahn"
+429d4848d03d2243cc6a1b03695406a6de1a7abd,"Face Recognition based on Logarithmic Fusion of SVD and KT Ramachandra A C , Raja K B , Venugopal K R , L M Patnaik","Face Recognition based on Logarithmic Fusion
+International Journal of Soft Computing and Engineering (IJSCE)
+ISSN: 2231-2307, Volume-2, Issue-3, July 2012
+of SVD and KT
+Ramachandra A C, Raja K B, Venugopal K R, L M Patnaik"
+42ab6c438bf5a6e0e74cc2dd9192a12f2406ca33,Nonlinear Dimensionality Reduction by Manifold Unfolding,"Nonlinear Dimensionality Reduction
+y Manifold Unfolding
+Pooyan Khajehpour Tadavani
+A thesis
+presented to the University of Waterloo
+in fulfillment of the
+thesis requirement for the degree of
+Doctor of Philosophy
+Computer Science
+Waterloo, Ontario, Canada, 2013
+(cid:13) Pooyan Khajehpour Tadavani 2013"
+4273a9d1605a69ac66440352b92ebeb230fd34f6,Simple Test Procedure for Image-Based Biometric Veri cation Systems,"SimpleTestProcedureforImage-BasedBiometric
+Veri(cid:12)cationSystems
+C.L.Wilson,R.M.McCabe
+InformationTechnologyLaboratory
+NationalInstituteofStandardsandTechnology
+Gaithersburg,MD
+42dc36550912bc40f7faa195c60ff6ffc04e7cd6,Visible and Infrared Face Identification via Sparse Representation,"Hindawi Publishing Corporation
+ISRN Machine Vision
+Volume 2013, Article ID 579126, 10 pages
+http://dx.doi.org/10.1155/2013/579126
+Research Article
+Visible and Infrared Face Identification via
+Sparse Representation
+Pierre Buyssens1 and Marinette Revenu2
+LITIS EA 4108-QuantIF Team, University of Rouen, 22 Boulevard Gambetta, 76183 Rouen Cedex, France
+GREYC UMR CNRS 6072 ENSICAEN-Image Team, University of Caen Basse-Normandie, 6 Boulevard Mar´echal Juin,
+4050 Caen, France
+Correspondence should be addressed to Pierre Buyssens;
+Received 4 April 2013; Accepted 27 April 2013
+Academic Editors: O. Ghita, D. Hernandez, Z. Hou, M. La Cascia, and J. M. Tavares
+Copyright © 2013 P. Buyssens and M. Revenu. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+ited.
+We present a facial recognition technique based on facial sparse representation. A dictionary is learned from data, and patches
+extracted from a face are decomposed in a sparse manner onto this dictionary. We particularly focus on the design of dictionaries
+that play a crucial role in the final identification rates. Applied to various databases and modalities, we show that this approach"
+42e0d7fe2039b075ac2372d883fa994eb0a68b48,Learning human actions in video,"Learning human actions in video
+Alexander Klaser
+To cite this version:
+Alexander Klaser. Learning human actions in video. Modeling and Simulation. Institut Na-
+tional Polytechnique de Grenoble - INPG, 2010. English. <tel-00514814>
+HAL Id: tel-00514814
+https://tel.archives-ouvertes.fr/tel-00514814
+Submitted on 3 Sep 2010
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires
+publics ou priv´es."
+424e52158b43e40f356af7eafb35c91a9e13db30,"Impact Factor : 3 . 449 ( ISRA ) , Impact Factor : 2 .","[Randive, 4(1): January, 2015]
+ISSN: 2277-9655
+Scientific Journal Impact Factor: 3.449
+(ISRA), Impact Factor: 2.114
+IJESRT
+INTERNATIONAL JOURNAL OF ENGINEERING SCIENCES & RESEARCH
+TECHNOLOGY
+AN INNOVATIVE APPROACH FOR PLASTIC SURGERY FACE RECOGNITION-A
+Mahendra P. Randive *, Prof. Umesh W. Hore
+REVIEW
+*Student of M.E. Department of Electronics & Telecommunication Engineering, P. R. Patil College of
+Engineering, Amravati Maharashtra – India."
+42ecfc3221c2e1377e6ff849afb705ecd056b6ff,Pose Invariant Face Recognition Under Arbitrary Unknown Lighting Using Spherical Harmonics,"Pose Invariant Face Recognition under Arbitrary
+Unknown Lighting using Spherical Harmonics
+Lei Zhang and Dimitris Samaras
+Department of Computer Science,
+SUNY at Stony Brook, NY, 11790
+{lzhang,"
+421955c6d2f7a5ffafaf154a329a525e21bbd6d3,Evolutionary Pursuit and Its Application to Face Recognition,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 22, NO. 6,
+JUNE 2000
+Evolutionary Pursuit and Its
+Application to Face Recognition
+Chengjun Liu, Member, IEEE, and Harry Wechsler, Fellow, IEEE"
+42832bcb36ee3f69327c38d0d17e6e2a73aaa2a6,SUN Database: Exploring a Large Collection of Scene Categories,"Int J Comput Vis
+DOI 10.1007/s11263-014-0748-y
+SUN Database: Exploring a Large Collection of Scene Categories
+Jianxiong Xiao · Krista A. Ehinger · James Hays ·
+Antonio Torralba · Aude Oliva
+Received: 9 June 2013 / Accepted: 2 July 2014
+© Springer Science+Business Media New York 2014"
+423cfa55a14cd92ada32245b416b587ef9c29308,Visually-Grounded Bayesian Word Learning,"Visually-Grounded Bayesian Word Learning
+Yangqing Jia
+Joshua Abbott
+Joseph Austerweil
+Thomas Griffiths
+Trevor Darrell
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-202
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-202.html
+October 17, 2012"
+4263630a35c5ee34ccf9dbd81c0541d92d0c7d5b,Shape Variation-Based Frieze Pattern for Robust Gait Recognition,"Shape Variation-Based Frieze Pattern for Robust Gait Recognition
+Seungkyu Lee* Yanxi Liu* Robert Collins
+Dept. of Computer Science and Eng. *Dept. of Electrical Eng.
+The Penn State University"
+42df75080e14d32332b39ee5d91e83da8a914e34,Illumination Compensation Using Oriented Local Histogram Equalization and its Application to Face Recognition,"Illumination Compensation Using Oriented
+Local Histogram Equalization and
+Its Application to Face Recognition
+Ping-Han Lee, Szu-Wei Wu, and Yi-Ping Hung"
+421b3a33ec70af2d733310f6c83ad713a314951d,Using nasal curves matching for expression robust 3D nose recognition,"Emambakhsh, M., Evans, A. and Smith, M. (2013) Using nasal curves
+matching for expression robust 3D nose recognition. In: IEEE Con-
+ference on Biometrics: Theory, Applications and Systems (BTAS2013),
+Washington DC, USA, September 29th - October 2, 2013. Available
+from: http://eprints.uwe.ac.uk/20812
+We recommend you cite the published version.
+The publisher’s URL is:
+http://eprints.uwe.ac.uk/20812/
+Refereed: Yes
+(no note)
+Disclaimer
+UWE has obtained warranties from all depositors as to their title in the material
+deposited and as to their right to deposit such material.
+UWE makes no representation or warranties of commercial utility, title, or fit-
+ness for a particular purpose or any other warranty, express or implied in respect
+of any material deposited.
+UWE makes no representation that the use of the materials will not infringe
+ny patent, copyright, trademark or other property or proprietary rights.
+UWE accepts no liability for any infringement of intellectual property rights
+in any material deposited but will remove such material from public view pend-"
+896e2776174dcb86d311789ab83a266151d0595b,A Novel Performance Evaluation Methodology for Single-Target Trackers,"A Novel Performance Evaluation Methodology
+for Single-Target Trackers
+Matej Kristan, Member, IEEE, Jiri Matas, Aleˇs Leonardis, Member, IEEE, Tom´aˇs Voj´ıˇr,
+Roman Pflugfelder, Gustavo Fern´andez, Georg Nebehay, Fatih Porikli and
+Luka ˇCehovin Member, IEEE,"
+89945b7cd614310ebae05b8deed0533a9998d212,Divide-and-Conquer Method for L1 Norm Matrix Factorization in the Presence of Outliers and Missing Data,"Divide-and-Conquer Method for L1 Norm Matrix
+Factorization in the Presence of Outliers and
+Missing Data
+Deyu Meng and Zongben Xu"
+89c84628b6f63554eec13830851a5d03d740261a,Image Enhancement and Automated Target Recognition Techniques for Underwater Electro-Optic Imagery,"Image Enhancement and Automated Target Recognition
+Techniques for Underwater Electro-Optic Imagery
+Thomas Giddings (PI), Cetin Savkli and Joseph Shirron
+Metron, Inc.
+1911 Freedom Dr., Suite 800
+Reston, VA 20190
+phone: (703) 437-2428 fax: (703) 787-3518 email:
+Contract Number N00014-07-C-0351
+http:www.metsci.com
+LONG TERM GOALS
+The long-term goal of this project is to provide a flexible, accurate and extensible automated target
+recognition (ATR) system for use with a variety of imaging and non-imaging sensors. Such an ATR
+system, once it achieves a high level of performance, can relieve human operators from the tedious
+usiness of pouring over vast quantities of mostly mundane data, calling the operator in only when the
+omputer assessment involves an unacceptable level of ambiguity. The ATR system will provide most
+leading edge algorithms for detection, segmentation, and classification while incorporating many novel
+lgorithms that we are developing at Metron. To address one of the most critical challenges in ATR
+technology, the system will also provide powerful feature extraction routines designed for specific
+pplications of current interest.
+OBJECTIVES"
+89c51f73ec5ebd1c2a9000123deaf628acf3cdd8,Face Recognition Based on Nonlinear Feature Approach Eimad,"American Journal of Applied Sciences 5 (5): 574-580, 2008
+ISSN 1546-9239
+© 2008 Science Publications
+Face Recognition Based on Nonlinear Feature Approach
+Eimad E.A. Abusham, 1Andrew T.B. Jin, 1Wong E. Kiong and 2G. Debashis
+Faculty of Information Science and Technology,
+Faculty of Engineering and Technology, Multimedia University (Melaka Campus),
+Jalan Ayer Keroh Lama, 75450 Bukit Beruang, Melaka, Malaysia"
+89c73b1e7c9b5e126a26ed5b7caccd7cd30ab199,Application of an Improved Mean Shift Algorithm in Real-time Facial Expression Recognition,"Application of an Improved Mean Shift Algorithm
+in Real-time Facial Expression Recognition
+School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008,china
+School of Electrical and Information Engineering, Hunan University of Technology, Hunan, Zhuzhou, 412008,china
+School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008,china
+Zhao-yi PENG
+Yu ZHOU
+Yan-hui ZHU
+Email:
+Zhi-qiang WEN
+Email:
+School of Computer and Communication, Hunan University of Technology, Hunan, Zhuzhou, 412008,china
+facial
+real-time
+expression"
+893239f17dc2d17183410d8a98b0440d98fa2679,UvA-DARE ( Digital Academic Repository ) Expression-Invariant Age Estimation,"UvA-DARE (Digital Academic Repository)
+Expression-Invariant Age Estimation
+Alnajar, F.; Lou, Z.; Alvarez Lopez, J.M.; Gevers, T.
+Published in:
+Proceedings of the British Machine Vision Conference 2014
+0.5244/C.28.14
+Link to publication
+Citation for published version (APA):
+Alnajar, F., Lou, Z., Alvarez, J., & Gevers, T. (2014). Expression-Invariant Age Estimation. In M. Valstar, A.
+French, & T. Pridmore (Eds.), Proceedings of the British Machine Vision Conference 2014 (pp. 14.1-14.11).
+BMVA Press. DOI: 10.5244/C.28.14
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 04 Aug 2017"
+89f9225a7223133fa687e1c44bb758c3567f4f26,F3-F: A System Theoretic Approach to Robust Detection Of Potential Threats from Video,"F3-F: A System Theoretic Approach to Robust
+Detection Of Potential Threats from Video"
+8966af6a8049192556e9c9356886a135595c19b8,Temporally Coherent CRP: A Bayesian Non-Parametric Approach for Clustering Tracklets with applications to Person Discovery in Videos,"Temporally Coherent CRP: A Bayesian Non-Parametric Approach for
+Clustering Tracklets with applications to Person Discovery in Videos
+Adway Mitra∗
+Soma Biswas†
+Chiranjib Bhattacharyya‡"
+8949563597276246f9f480d4b38b3b7851fd5495,Toward Efficient and Robust Large-scale Structure-from-motion Systems,"TOWARD EFFICIENT AND ROBUST LARGE-SCALE
+STRUCTURE-FROM-MOTION SYSTEMS
+Jared S. Heinly
+A dissertation submitted to the faculty of the University of North Carolina at Chapel Hill in partial
+fulfillment of the requirements for the degree of Doctor of Philosophy in the Department of
+Computer Science.
+Chapel Hill
+Approved by:
+Jan-Michael Frahm
+Enrique Dunn
+Alexander C. Berg
+Marc Niethammer
+Sameer Agarwal"
+8913a5b7ed91c5f6dec95349fbc6919deee4fc75,BigBIRD: A large-scale 3D database of object instances,"BigBIRD: A Large-Scale 3D Database of Object Instances
+Arjun Singh, James Sha, Karthik S. Narayan, Tudor Achim, Pieter Abbeel"
+89d3a57f663976a9ac5e9cdad01267c1fc1a7e06,Neural Class-Specific Regression for face verification,"Neural Class-Specific Regression for face
+verification
+Guanqun Cao, Alexandros Iosifidis, Moncef Gabbouj"
+89a245eae1e7eda7aa8e360c0cdb4bf6a72da225,A Survey of Pedestrian Detection in Video,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 5, No. 10, 2014
+A Survey of Pedestrian Detection in Video
+Achmad Solichin
+Department of Informatics
+Budi Luhur University
+Jakarta, Indonesia
+Agus Harjoko
+Agfianto Eko Putra
+Dept. of Computer Science and
+Dept. of Computer Science and
+Electronics Gadjah Mada University
+Electronics Gadjah Mada University
+Yogyakarta, Indonesia
+Yogyakarta, Indonesia"
+8948e9dce2dfaeb1d93ce146fab5364b6cd342c9,Dual Attention Network for Scene Segmentation,"Dual Attention Network for Scene Segmentation
+Jun Fu, Jing Liu, Haijie Tian, Zhiwei Fang, Hanqing Lu
+{jun.fu, jliu, zhiwei.fang,
+CASIA IVA"
+89bc311df99ad0127383a9149d1684dfd8a5aa34,Towards ontology driven learning of visual concept detectors,"Towards ontology driven learning of
+visual concept detectors
+Sanchit ARORA, Chuck CHO, Paul FITZPATRICK, Franc¸ois SCHARFFE 1
+Dextro Robotics, Inc. 101 Avenue of the Americas, New York, USA"
+8935ffe454758e2e5def0b5190de6e28c350b3b8,Learning to Reconstruct Face Geometries Research,"Learning to Reconstruct Face
+Geometries
+Elad Richardson
+Technion - Computer Science Department - M.Sc. Thesis MSC-2017-11 - 2017"
+8961677300a9ee30ca51e1a3cf9815b4a162265b,Deep Representation Learning with Part Loss for Person Re-Identification,"Deep Representation Learning with Part Loss for Person Re-Identification
+Hantao Yao, Shiliang Zhang, Yongdong Zhang, Jintao Li, Qi Tian"
+89f44f756c230e104cdf2ec0152d5f015586399c,Wide-area Based Traffic Situation Detection at an Ungated Level Crossing,"M. Junghans, et al., Int. J. of Safety and Security Eng., Vol. 6, No. 2 (2016) 383–393
+WIDE-AREA BASED TRAFFIC SITUATION DETECTION
+AT AN UNGATED LEVEL CROSSING
+M. JUNGHANS, A. LEICH, K. KOZEMPEL, H. SAUL & S. KNAKE-LANGHORST
+Institute of Transportation Systems, German Aerospace Center (DLR), Berlin, Germany."
+89e324b9c64a800e57ad82eddecc03f2cc0b7cc5,Long-Term Identity-Aware Multi-Person Tracking for Surveillance Video Summarization,"Long-Term Identity-Aware Multi-Person Tracking
+for Surveillance Video Summarization
+Shoou-I Yu, Yi Yang, Xuanchong Li, and Alexander G. Hauptmann"
+89174737423d87258d3b9d5a660236a0bb66a470,On the usage of Sensor Pattern Noise for Picture-to-Identity linking through social network accounts,"On the usage of Sensor Pattern Noise for Picture-to-Identity linking
+through social network accounts
+Riccardo Satta1 and Pasquale Stirparo1,2
+Institute for the Protection and Security of the Citizen
+Joint Research Centre (JRC), European Commission, Ispra (VA), Italy
+Royal Institute of Technology (KTH), Stockholm, Sweden
+{riccardo.satta,
+Keywords:
+social network, account, Sensor Pattern Noise, identity, linking, digital image forensics, multimedia forensics"
+8929e704b6af7f09ad027714b75972cb9df57483,Image Inpainting for Irregular Holes Using Partial Convolutions,
+894f1e924dfb8dfb843c42835fa79e386ac07383,Dimensional emotion recognition using visual and textual cues,"Dimensional emotion recognition using visual and textual cues
+Pedro M. Ferreira1, Diogo Pernes2, Kelwin Fernandes1, Ana Rebelo3 and Jaime S. Cardoso1"
+898a66979c7e8b53a10fd58ac51fbfdb6e6e6e7c,Dynamic vs. Static Recognition of Facial Expressions,"Dynamic vs. Static Recognition of Facial
+Expressions
+No Author Given
+No Institute Given"
+89d590d7013433304aae1c97debd257b8dd801fa,Outdoor Human Motion Capture by Simultaneous Optimization of Pose and Camera Parameters,"Volume xx (200y), Number z, pp. 1–13
+Outdoor Human Motion Capture by Simultaneous
+Optimization of Pose and Camera Parameters
+A. Elhayek C. Stoll K. I. Kim and C. Theobalt
+Max-Planck-Institute for Informatics, Saarbrücken, Germany
+Figure 1: Examples of multi-person tracking with moving cameras. (Left two images) two actors, and two moving and 3 static
+ameras (Soccer1). (Right two images) One actor, and three moving and two static cameras (Walk2)."
+89d7cc9bbcd2fdc4f4434d153ecb83764242227b,Face-Name Graph Matching For The Personalities In Movie Screen,"Einstein.J, DivyaBaskaran / International Journal of Engineering Research and Applications
+(IJERA) ISSN: 2248-9622 www.ijera.com
+Vol. 3, Issue 2, March -April 2013, pp.351-355
+Face-Name Graph Matching For The Personalities In Movie
+Screen
+*(Asst. Professor, Dept. of IT, VelTech HighTech Dr. Rangarajan Dr.Sakunthala Engineering College,
+Einstein.J*, DivyaBaskaran**
+** (Final Year Student, M.Tech IT, Vel Tech Dr. RR &Dr. SR Technical University, Chennai.)
+Chennai.)"
+890103cb8d3d869298421da817d0a181487ec79a,Learning the Hierarchical Parts of Objects by Deep Non-Smooth Nonnegative Matrix Factorization,"Learning the Hierarchical Parts of Objects by Deep
+Non-Smooth Nonnegative Matrix Factorization
+Jinshi Yu, Guoxu Zhou, Andrzej Cichocki
+IEEE Fellow, and Shengli Xie IEEE Senior Member"
+89358e65aec4d6665098c7dbbe3975296cc7a2fc,Discriminative Feature Based Algorithm for Detecting And Classifying Frames In Image Sequences,"M. A. A Victoria et al. Int. Journal of Engineering Research and Applications www.ijera.com
+Vol. 3, Issue 5, Sep-Oct 2013, pp.446-450
+RESEARCH ARTICLE OPEN ACCESS
+Discriminative Feature Based Algorithm for Detecting And
+Classifying Frames In Image Sequences
+M. Antony Arockia Victoria, R. Sahaya Jeya Sutha
+B.E,M.E. Assistant Professor, Department of MCA, Dr.Sivanthi Aditanar College of Engineering,
+MCA,M.Phil. Assistant Professor, Department of MCA, Dr. Sivanthi Aditanar College of Engineering"
+8954d46e1d7a11b20b2c688e5fb8bce4901650d6,Looking at movies and cartoons: eye-tracking evidence from Williams syndrome and autism.,"Looking at Movies and Cartoons: Eye-tracking evidence from Williams syndrome
+nd Autism
+Deborah M Riby and Peter J B Hancock
+Journal of Intellectual Disability Research
+http://dx.doi.org/10.1111/j.1365-2788.2008.01142.x"
+89d02ceae9e972eca633ae6ff9da9ee8a85fb171,Using Explanations to Improve Ensembling of Visual Question Answering Systems,"In Proceedings of the IJCAI 2017 Workshop on Explainable Artificial
+Intelligence (XAI), pp. 43-47, Melbourne, Australia, August 2017."
+89742f28108330f97df94df98f73b459b02ca33d,Query Specific Semantic Signature for Improved Web Image Re - Ranking,"International Journal of Engineering and Technical Research (IJETR)
+ISSN: 2321-0869, Volume-3, Issue-3, March 2015
+Query Specific Semantic Signature for Improved
+Web Image Re-Ranking
+Joshith.K, S.Krishnamoorthi"
+89475b4d09e541e09becb9aa134c8de117725205,Automatic Analysis of Facial Expressions Based on Deep Covariance Trajectories,"Automatic Analysis of Facial Expressions Based on
+Deep Covariance Trajectories
+Naima Otberdout, Member, IEEE, Anis Kacem, Member, IEEE, Mohamed Daoudi, Senior, IEEE,
+Lahoucine Ballihi, Member, IEEE, and Stefano Berretti, Senior, IEEE"
+891b10c4b3b92ca30c9b93170ec9abd71f6099c4,2 New Statement for Structured Output Regression Problems,"Facial landmark detection using structured output deep
+neural networks
+Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+Adam∗2
+LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+September 24, 2015"
+455943924a98593655ae7197ee3835b9f6a3b778,Visual SLAM for Automated Driving: Exploring the Applications of Deep Learning,"Visual SLAM for Automated Driving:
+Exploring the Applications of Deep Learning
+Stefan Milz, Georg Arbeiter, Christian Witt
+Valeo Schalter und Sensoren GmbH
+Bassam Abdallah
+Valeo Vision, Bobigny
+stefan.milz, georg.arbeiter,
+Senthil Yogamani
+Valeo Vision Systems, Ireland"
+45379046c6c1311dfa6d8e1941b3e2c7971ca2bc,An alternating direction and projection algorithm for structure-enforced matrix factorization,"Noname manuscript No.
+(will be inserted by the editor)
+An Alternating Direction and Projection Algorithm
+for Structure-enforced Matrix Factorization
+Lijun Xu · Bo Yu · Yin Zhang
+Received: date / Accepted: date"
+4572725e98f3e1b6f258c03643d74b69982aa39a,Semantic Cluster Unary Loss for Efficient Deep Hashing,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Semantic Cluster Unary Loss for Efficient Deep
+Hashing
+Shifeng Zhang, Jianmin Li, and Bo Zhang
+hashing [15], [22], [27], [32], [38], [54] and semi-supervised
+hashing [43]. Experiments convey that hashcodes learned by
+(semi-)supervised hashing methods contain more semantic
+information than those learned by the unsupervised ones."
+45ede580b1e402aae6832256586211a47c53afe3,Biometric Application: Texture and Shape Based 3d Face Recognition,"BIOMETRIC APPLICATION: TEXTURE AND SHAPE BASED 3D FACE
+RECOGNITION
+P.Manju Bala1
+Senior Assistant professor,
+A.Kalaiselvi2
+Assistant Professor,
+Department of Computer Science and Engineering,
+Department of Computer Science and Engineering,
+IFET College of Engineering,
+Villupuram."
+451bf4124ec8a55b9112cf9cc167d304fa004924,Modelling State of Interaction from Head Poses for Social Human-Robot Interaction,"Modelling State of Interaction from Head Poses
+for Social Human-Robot Interaction
+Andre Gaschler
+fortiss GmbH
+Guerickstr. 25
+80805 München, Germany
+Ingmar Kessler
+fortiss GmbH
+Guerickstr. 25
+80805 München, Germany
+Kerstin Huth
+Universität Bielefeld
+Universitätsstr. 25
+3615 Bielefeld, Germany
+Jan de Ruiter
+Universität Bielefeld
+Universitätsstr. 25
+3615 Bielefeld, Germany
+ielefeld.de
+Manuel Giuliani"
+45aefa11101129862e323958b62505700bc281ae,Unsupervised learning in generative models of occlusion,"Unsupervised Learning in Generative
+Models of Occlusion
+Dissertation
+zur Erlangung des Doktorgrades
+der Naturwissenschaften
+vorgelegt beim Fachbereich Physik
+der Johann Wolfgang Goethe-Universität
+in Frankfurt am Main
+Marc Henniges
+us Frankfurt am Main
+Frankfurt (2012)
+(D 30)"
+45c340c8e79077a5340387cfff8ed7615efa20fd,Assessment of the Emotional States of Students during e-Learning,
+457abee61182a320b301d73ecceff00d055f596e,Face Recognition Using Line Edge Map,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 24, NO. 6,
+JUNE 2002
+Face Recognition Using Line Edge Map
+Yongsheng Gao, Member, IEEE, and Maylor K.H. Leung, Member, IEEE"
+450e9f80a273df2cdaafd9ae3a9ff149950cc834,Human Pose Estimation using Histograms of Edge Directions,"Human Pose Estimation
+using Histograms of Edge Directions
+Andrès Koetsier
+University of Twente HMI Department"
+45e7ddd5248977ba8ec61be111db912a4387d62f,Adversarial Learning of Structure-Aware Fully Convolutional Networks for Landmark Localization,"CHEN ET AL.: ADVERSARIAL POSENET
+Adversarial Learning of Structure-Aware Fully
+Convolutional Networks for Landmark
+Localization
+Yu Chen1, Chunhua Shen2, Hao Chen2, Xiu-Shen Wei3, Lingqiao Liu2 and Jian Yang1"
+45f884c4c3bcdabdca46ee0e3794ce1631b9c558,Vision-based assessment of parkinsonism and levodopa-induced dyskinesia with pose estimation,"Vision-Based Assessment of Parkinsonism and
+Levodopa-Induced Dyskinesia with Deep
+Learning Pose Estimation
+Michael H. Li, Tiago A. Mestre, Susan H. Fox, Babak Taati*"
+4526992d4de4da2c5fae7a5ceaad6b65441adf9d,System for Medical Mask Detection in the Operating Room Through Facial Attributes,"System for Medical Mask Detection
+in the Operating Room Through
+Facial Attributes
+A. Nieto-Rodr´ıguez, M. Mucientes(B), and V.M. Brea
+Center for Research in Information Technologies (CiTIUS),
+University of Santiago de Compostela, Santiago de Compostela, Spain"
+45efd6c2dd4ca19eed38ceeb7c2c5568231451e1,Comparative Analysis of Statistical Approach for Face Recognition,"Comparative Analysis of Statistical Approach
+for Face Recognition
+S.Pradnya1, M.Riyajoddin2, M.Janga Reddy3
+CMR Institute of Technology, Hyderabad, (India)"
+4560491820e0ee49736aea9b81d57c3939a69e12,Investigating the Impact of Data Volume and Domain Similarity on Transfer Learning Applications,"Investigating the Impact of Data Volume and
+Domain Similarity on Transfer Learning
+Applications
+Michael Bernico, Yuntao Li, and Dingchao Zhang
+State Farm Insurance, Bloomington IL 61710, USA,"
+4571626d4d71c0d11928eb99a3c8b10955a74afe,Geometry Guided Adversarial Facial Expression Synthesis,"Geometry Guided Adversarial Facial Expression Synthesis
+Lingxiao Song1,2
+Zhihe Lu1,3 Ran He1,2,3
+Zhenan Sun1,2
+Tieniu Tan1,2,3
+National Laboratory of Pattern Recognition, CASIA
+Center for Research on Intelligent Perception and Computing, CASIA
+Center for Excellence in Brain Science and Intelligence Technology, CAS"
+451d777ee33833a3b5eb6ba5292fae162c6d265f,Exploiting Feature Correlations by Brownian Statistics for People Detection and Recognition,"TRANSACTIONS ON CYBERNETICS
+Exploiting Feature Correlations by Brownian
+Statistics for People Detection and Recognition
+Sławomir B ˛ak1, Marco San Biagio2, Ratnesh Kumar1, Vittorio Murino2 and François Brémond1
+STARS Lab, INRIA Sophia Antipolis Méditerranée, Sophia Antipolis, 06902 Valbonne, France
+Pattern Analysis and Computer Vision (PAVIS), IIT IStituto Italiano di Tecnologia, 16163 Genova, Italy
+Characterizing an image region by its feature inter-correlations is a modern trend in computer vision. In this paper, we introduce
+new image descriptor that can be seen as a natural extension of a covariance descriptor with the advantage of capturing nonlinear
+nd non-monotone dependencies. Inspired from the recent advances in mathematical statistics of Brownian motion, we can express
+highly complex structural information in a compact and computationally efficient manner. We show that our Brownian covariance
+descriptor can capture richer image characteristics than the covariance descriptor. Additionally, a detailed analysis of the Brownian
+manifold reveals that in opposite to the classical covariance descriptor, the proposed descriptor lies in a relatively flat manifold,
+which can be treated as a Euclidean. This brings significant boost in the efficiency of the descriptor. The effectiveness and the
+generality of our approach is validated on two challenging vision tasks, pedestrian classification and person re-identification. The
+experiments are carried out on multiple datasets achieving promising results.
+Index Terms—brownian descriptor, covariance descriptor, pedestrian detection, re-identification.
+I. INTRODUCTION
+D ESIGNING proper image descriptors is a crucial step
+in computer vision applications, including scene detec-
+tion, target tracking and object recognition. A good descrip-"
+45e81d04d01ef1db78a04ef7a9472fd4cd6de84c,Variational learning of finite Beta-Liouville mixture models using component splitting,"Variational Learning of Finite Beta-Liouville Mixture Models Using
+Component Splitting
+Wentao Fan and Nizar Bouguila"
+4583d7d1d76dfe18e86e91f7438ce1a03cdcf68f,"""3D Face"": Biometric Template Protection for 3D Face Recognition","\3D Face"": Biometric Template Protection for
+D Face Recognition
+E.J.C. Kelkboom, B. G(cid:127)okberk, T.A.M. Kevenaar, A.H.M. Akkermans, and M.
+van der Veen
+Philips Research, High-Tech Campus 34, 5656AE, Eindhoven
+femile.kelkboom, berk.gokberk, tom.kevenaar, ton.h.akkermans,"
+454ec30d0a491800458a52a5aa655eb76a28f4f5,3-D Object Recognition Using 2-D Views,"-D Object Recognition Using 2-D Views
+Wenjing Li, Member, IEEE, George Bebis, Member, IEEE, and Nikolaos G. Bourbakis, Fellow, IEEE"
+45bedfcb562e48a64436ea3131bc91098eb93dab,Incremental update of biometric models in face-based video surveillance,"Incremental Update of Biometric Models in
+Face-Based Video Surveillance
+Miguel De-la-Torre∗†, Eric Granger∗, Paulo V. W. Radtke∗, Robert Sabourin∗, Dmitry O. Gorodnichy‡
+´Ecole de technologie sup´erieure, Montr´eal, Canada
+Centro Universitario de Los Valles, Universidad de Guadalajara, Ameca, M´exico
+Science and Engineering Directorate, Canada Border Services Agency, Ottawa, Canada"
+4534d78f8beb8aad409f7bfcd857ec7f19247715,Transformation-Based Models of Video Sequences,"Under review as a conference paper at ICLR 2017
+TRANSFORMATION-BASED MODELS OF VIDEO
+SEQUENCES
+Joost van Amersfoort ∗, Anitha Kannan, Marc’Aurelio Ranzato,
+Arthur Szlam, Du Tran & Soumith Chintala
+Facebook AI Research
+{akannan, ranzato, aszlam, trandu,"
+453e311c6de1285cd5ea6d93fd78a636eac0ba82,Multi patches 3D facial representation for person authentication using AdaBoost,"Multi patches 3D facial representation for Person
+Authentication using AdaBoost
+Lahoucine Ballihi, Boulbaba Ben Amor, Mohamed Daoudi, Anuj Srivastava
+To cite this version:
+Lahoucine Ballihi, Boulbaba Ben Amor, Mohamed Daoudi, Anuj Srivastava. Multi patches 3D facial
+representation for Person Authentication using AdaBoost. I/V Communications and Mobile Network
+(ISVC), 2010 5th International Symposium on, Sep 2010, Rabat, Morocco. pp.1-4, 2010. <hal-
+00665904>
+HAL Id: hal-00665904
+https://hal.archives-ouvertes.fr/hal-00665904
+Submitted on 3 Feb 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+459e840ec58ef5ffcee60f49a94424eb503e8982,One-shot Face Recognition by Promoting Underrepresented Classes,"One-shot Face Recognition by Promoting Underrepresented Classes
+Yandong Guo, Lei Zhang
+Microsoft
+One Microsoft Way, Redmond, Washington, United States
+{yandong.guo,"
+45954ed44b99edc5f0d1100a1ea33d856602d78a,Retinal Vessel Segmentation under Extreme Low Annotation: A Generative Adversarial Network Approach,"Retinal Vessel Segmentation under Extreme Low
+Annotation: A Generative Adversarial Network
+Approach
+Avisek Lahiri*, Vineet Jain*, Arnab Mondal*, and Prabir Kumar Biswas, Senior Member, IEEE"
+451c42da244edcb1088e3c09d0f14c064ed9077e,Using subclasses in discriminant non-negative subspace learning for facial expression recognition,"© EURASIP, 2011 - ISSN 2076-1465
+9th European Signal Processing Conference (EUSIPCO 2011)
+INTRODUCTION"
+456ccc8bbb538037ff00fabf25afb2aceb39149e,Computational Aspects of the Hausdorff Distance in Unbounded Dimension,"Journal of Computational Geometry
+COMPUTATIONAL ASPECTS OF THE HAUSDORFF DISTANCE
+IN UNBOUNDED DIMENSION
+Stefan K¨onig∗"
+4568063b7efb66801e67856b3f572069e774ad33,Correspondence driven adaptation for human profile recognition,"Correspondence Driven Adaptation for Human Profile Recognition
+Ming Yang1, Shenghuo Zhu1, Fengjun Lv2, Kai Yu1
+NEC Laboratories America, Inc.
+Huawei Technologies (USA)
+Cupertino, CA 95014
+Santa Clara, CA 95050"
+45c4514ca2b7903b4c8f43e396bce73f014b72be,Parallel Feature Extraction through Preserving Global and Discriminative Property for Kernel-Based Image Classification,"Journal of Information Hiding and Multimedia Signal Processing
+Ubiquitous International
+(cid:13)2015 ISSN 2073-4212
+Volume 6, Number 5, September 2015
+Parallel Feature Extraction through Preserving
+Global and Discriminative Property for Kernel-Based
+Image Classification
+Xun-Fei Liu, and Xiang-Xian Zhu
+Department of Electrical Engineering
+Suzhou Institute of Industrial Technology
+Suzhou, 215104, China
+Received May, 2015; revised June, 2015"
+4563cbfbdba1779fc598081071ae40be021cb81d,Adversarial Attacks on Variational Autoencoders,"Adversarial Attacks on Variational Autoencoders
+George Gondim-Ribeiro, Pedro Tabacof, and Eduardo Valle
+RECOD Lab. — DCA / School of Electrical and Computer Engineering (FEEC)
+University of Campinas (Unicamp)
+Campinas, SP, Brazil
+{gribeiro, tabacof,"
+4541f3ee510b593243ff9a66d3586ef9125c2931,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+458e44d20f7a85a0ce378b48a41febb16383c075,Tracking Interacting Objects in Image Sequences,"Tracking Interacting Objects in Image Sequences
+THÈSE NO 6632 (2015)
+PRÉSENTÉE LE 3 JUILLET 2015
+À LA FACULTÉ INFORMATIQUE ET COMMUNICATIONS
+LABORATOIRE DE VISION PAR ORDINATEUR
+PROGRAMME DOCTORAL EN INFORMATIQUE ET COMMUNICATIONS
+ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Xinchao WANG
+cceptée sur proposition du jury:
+Prof. W. Gerstner, président du jury
+Prof. P. Fua, directeur de thèse
+Prof. J. Sullivan, rapporteuse
+Prof. P. Dillenbourg, rapporteur
+Prof. S. Roth, rapporteur
+Suisse"
+456f00e213e03058a056069fa75c34929cf7d4e9,Detecting ground control points via convolutional neural network for stereo matching,"Noname manuscript No.
+(will be inserted by the editor)
+Detecting Ground Control Points via Convolutional Neural Network for
+Stereo Matching
+Zhun Zhong · Songzhi Su · Donglin Cao · Shaozi Li
+Received: date / Accepted: date"
+4599b9d9a379385a3d31681696d2523beeb0e9c1,LG ] 8 F eb 2 01 6 A Latent-Variable Grid Model,"A Latent-Variable Grid Model
+Rajasekaran Masatran
+Computer Science and Engineering, Indian Institute of Technology Madras
+FREESHELL · ORG"
+45e459462a80af03e1bb51a178648c10c4250925,LCrowdV: Generating Labeled Videos for Simulation-based Crowd Behavior Learning,"LCrowdV: Generating Labeled Videos for
+Simulation-based Crowd Behavior Learning
+Ernest Cheung1, Tsan Kwong Wong1, Aniket Bera1, Xiaogang Wang2, and
+Dinesh Manocha1
+The University of North Carolina at Chapel Hill"
+458677de7910a5455283a2be99f776a834449f61,Face Image Retrieval Using Facial Attributes By K-Means,"Face Image Retrieval Using Facial Attributes By
+K-Means
+[1]I.Sudha, [2]V.Saradha, [3]M.Tamilselvi, [4]D.Vennila
+[1]AP, Department of CSE ,[2][3][4] B.Tech(CSE)
+Achariya college of Engineering Technology-
+Puducherry"
+45a6333fc701d14aab19f9e2efd59fe7b0e89fec,Dataset Creation for Gesture Recognition,"HAND POSTURE DATASET CREATION FOR GESTURE
+RECOGNITION
+Luis Anton-Canalis
+Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+Elena Sanchez-Nielsen
+Departamento de E.I.O. y Computacion
+8271 Universidad de La Laguna, Spain
+Keywords:
+Image understanding, Gesture recognition, Hand dataset."
+4562272025a5bcdb321408116c699798a7997847,Leveraging RGB-D Data: Adaptive fusion and domain adaptation for object detection,"Leveraging RGB-D Data: Adaptive Fusion and
+Domain Adaptation for Object Detection
+Luciano Spinello and Kai O. Arras
+Social Robotics Lab, University of Freiburg, Germany
+{spinello,"
+457d3ca924afc21719d19175caf285aa575d1c90,Analyzing Structured Scenarios by Tracking People and Their Limbs,
+45e2aa7706fcedcbb2d93304a9824fe762b8b3b0,DAC-SDC Low Power Object Detection Challenge for UAV Applications,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2018
+DAC-SDC Low Power Object Detection
+Challenge for UAV Applications
+Xiaowei Xu, Member, IEEE, Xinyi Zhang, Student Member, IEEE, Bei Yu, Senior Member, IEEE, Xiaobo
+Sharon Hu, Fellow, IEEE, Christopher Rowen, Fellow, IEEE, Jingtong Hu, Member, IEEE, and Yiyu
+Shi, Senior Member, IEEE"
+456983805a8781d6429bed1ed66dc9f3902767af,Seeing with Humans: Gaze-Assisted Neural Image Captioning,"Seeing with Humans: Gaze-Assisted
+Neural Image Captioning
+Yusuke Sugano and Andreas Bulling"
+45ca696076e9c073e6cf699766f808899589bc88,Aalborg Universitet Thermal Tracking of Sports Players,"Aalborg Universitet
+Thermal Tracking of Sports Players
+Gade, Rikke; Moeslund, Thomas B.
+Published in:
+Sensors
+DOI (link to publication from Publisher):
+0.3390/s140813679
+Publication date:
+Document Version
+Publisher's PDF, also known as Version of record
+Link to publication from Aalborg University
+Citation for published version (APA):
+Gade, R., & Moeslund, T. B. (2014). Thermal Tracking of Sports Players. Sensors, 14(8), 13679-13691. DOI:
+0.3390/s140813679
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain
+? You may freely distribute the URL identifying the publication in the public portal ?"
+458713d5c1dd8ff95865302e51f0f8df22204d91,A Review on Face Recognition Using Different Pre- Processing Methods in Images Captured under Various Illumination and Posing Conditions,
+1f98daf89f9a3dba655f0a4eb4164118ea6226ef,"Parallel k-Means Image Segmentation Using Sort, Scan and Connected Components on a GPU","The original publication is available at: www.springerlink.com
+Parallel k-Means Image Segmentation Using
+Sort, Scan & Connected Components on a GPU
+Michael Backer, Jan T¨unnermann, and B¨arbel Mertsching
+GET Lab, University of Paderborn, Pohlweg 47-49, 33098 Paderborn, Germany
+{backer, tuennermann,
+http://getwww.upb.de"
+1ffe20eb32dbc4fa85ac7844178937bba97f4bf0,Face Clustering: Representation and Pairwise Constraints,"Face Clustering: Representation and Pairwise
+Constraints
+Yichun Shi, Student Member, IEEE, Charles Otto, Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+1ff616ae8b61f8167f2d626b7c1a36e018b23e94,Learning with Parsimony for Large Scale Object Detection and Discovery,"Learning with Parsimony for Large Scale Object
+Detection and Discovery
+Hyun Oh Song
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2014-148
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-148.html
+August 12, 2014"
+1f7cd3343f4b6b0f936c94e3a45c477c014e2b5c,3D Human Pose Estimation on a Configurable Bed from a Pressure Image,"D Human Pose Estimation on a Configurable Bed from a Pressure Image
+Henry M. Clever*, Ariel Kapusta, Daehyung Park, Zackory Erickson, Yash Chitalia, Charles C. Kemp"
+1f8304f4b51033d2671147b33bb4e51b9a1e16fe,Beyond Trees: MAP Inference in MRFs via Outer-Planar Decomposition,"Noname manuscript No.
+(will be inserted by the editor)
+Beyond Trees:
+MAP Inference in MRFs via Outer-Planar Decomposition
+Dhruv Batra · Andrew C. Gallagher · Devi Parikh · Tsuhan Chen
+Received: date / Accepted: date"
+1f9ae272bb4151817866511bd970bffb22981a49,An Iterative Regression Approach for Face Pose Estimation from RGB Images,"An Iterative Regression Approach for Face Pose Estima-
+tion from RGB Images
+Wenye He
+This paper presents a iterative optimization method, explicit shape regression, for face pose
+detection and localization. The regression function is learnt to find out the entire facial shape
+nd minimize the alignment errors. A cascaded learning framework is employed to enhance
+shape constraint during detection. A combination of a two-level boosted regression, shape
+performance. In this paper, we have explain the advantage of ESR for deformable object like
+face pose estimation and reveal its generic applications of the method. In the experiment,
+we compare the results with different work and demonstrate the accuracy and robustness in
+different scenarios.
+Introduction
+Pose estimation is an important problem in computer vision, and has enabled many practical ap-
+plication from face expression 1 to activity tracking 2. Researchers design a new algorithm called
+explicit shape regression (ESR) to find out face alignment from a picture 3. Figure 1 shows how
+the system uses ESR to learn a shape of a human face image. A simple way to identify a face is to
+find out facial landmarks like eyes, nose, mouth and chin. The researchers define a face shape S
+nd S is composed of Nf p facial landmarks. Therefore, they get S = [x1, y1, ..., xNf p, yNf p]T . The
+objective of the researchers is to estimate a shape S of a face image. The way to know the accuracy"
+1f2f712253a68cd9f8172de19297e35cec7919dd,Vision System of Facial Robot SHFR- III for Human-robot Interaction,
+1f8eefd6dd2f20fd78a67dfdfe33022c6f9981d6,Unsupervised Features for Facial Expression Intensity Estimation over Time,
+1fef45786e707e6b9b8517b0403e596ecbdea6a5,Sketch-based manga retrieval using manga109 dataset,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2012
+Sketch-based Manga Retrieval
+using Manga109 Dataset
+Yusuke Matsui, Member, IEEE, Kota Ito, Yuji Aramaki, Toshihiko Yamasaki, Member, IEEE,
+nd Kiyoharu Aizawa, Senior Member, IEEE,"
+1fc249ec69b3e23856b42a4e591c59ac60d77118,Evaluation of a 3D-aided pose invariant 2D face recognition system,"Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+Xiang Xu, Ha A. Le, Pengfei Dou, Yuhang Wu, Ioannis A. Kakadiaris
+{xxu18, hale4, pdou, ywu35,
+Computational Biomedicine Lab
+800 Calhoun Rd. Houston, TX, USA"
+1f4aa1d14bb99e152dd1c7ac3cfd5afa8f6a012f,Learning Discriminative Part Detectors for Image Classification and Cosegmentation,"Learning Discriminative Part Detectors for Image Classification and
+Cosegmentation
+Jian Sun
+Jean Ponce
+Xi’an Jiaotong University, INRIA, ∗
+´Ecole Normale Sup´erieure, *
+This is a preliminary version accepted for publication to ICCV 2013"
+1fbb66a9407470e1da332c4ef69cdc34e169a3d7,A Baseline for General Music Object Detection with Deep Learning,"Article
+A Baseline for General Music Object Detection with
+Deep Learning
+Alexander Pacha 1,*
+, Jan Hajiˇc, Jr. 2 and Jorge Calvo-Zaragoza 3
+Institute for Visual Computing and Human-Centered Technology, TU Wien, 1040 Wien, Austria
+Institute of Formal and Applied Linguistics, Charles University, 116 36 Staré Mˇesto, Czech Republic;
+PRHLT Research Center, Universitat Politècnica de València, 46022 València, Spain;
+* Correspondence:
+Received: 31 July 2018; Accepted: 26 August 2018; Published: 29 August 2018"
+1fbde67e87890e5d45864e66edb86136fbdbe20e,The Action Similarity Labeling Challenge,"The Action Similarity Labeling Challenge
+Orit Kliper-Gross, Tal Hassner, and
+Lior Wolf, Member, IEEE"
+1ff057f2fb8258bd5359cded950a3627bd8ee1f4,Low-rank embedding for semisupervised face classification,"Low-Rank Embedding for Semisupervised Face Classification
+Gaurav Srivastava, Ming Shao and Yun Fu∗"
+1f41a96589c5b5cee4a55fc7c2ce33e1854b09d6,Demographic Estimation from Face Images: Human vs. Machine Performance,"Demographic Estimation from Face Images:
+Human vs. Machine Performance
+Hu Han, Member, IEEE, Charles Otto, Student Member, IEEE, Xiaoming Liu, Member, IEEE
+nd Anil K. Jain, Fellow, IEEE"
+1f35f0400d6d112e3b27231d0d9241258efd782d,Learning to Rank Using High-Order Information,"Learning to Rank Using High-Order Information
+Puneet Kumar Dokania1, Aseem Behl2, C.V. Jawahar2, and M. Pawan Kumar1
+Ecole Centrale de Paris
+INRIA Saclay, France
+IIIT Hyderabad, India"
+1fcd7978c6956fd9a0d752ecc9f5ac1a1b2896e9,Impact of Face Registration Errors on Recognition,"Impact of Face Registration Errors on Recognition
+E. Rentzeperis, A. Stergiou, A. Pnevmatikakis and L. Polymenakos
+Athens Information Technology, Autonomic and Grid Computing,
+Markopoulou Ave., 19002 Peania, Greece
+{eren, aste, apne,
+http://www.ait.edu.gr/research/RG1/overview.asp"
+1f5e47ad5490a63c7bea79000999b711055fbf2a,Aggregated Channels Network for Real-Time Pedestrian Detection,"Aggregated Channels Network for Real-Time Pedestrian Detection
+Farzin Ghorban1,2, Javier Marín3, Yu Su2, Alessandro Colombo2, Anton Kummert1
+Universität Wuppertal, 2Delphi Deutschland, 3Massachusetts Institute of Technology"
+1f5c409e9b6aec60003b5d4534373f9b07ff8443,Saliency Weighted Features for Person Re-identification,"Saliency Weighted Features for Person
+Re-Identification
+Niki Martinel, Christian Micheloni and Gian Luca Foresti
+Department of Mathematics and Computer Science
+University of Udine - 33100, Udine, Italy"
+1fc952fef09d63c61b9b8828f872b7a018eefac1,QUEST: Quadriletral Senary bit Pattern for Facial Expression Recognition,"ACCEPTED IN SMC IEEE CONFERENCE 2018 (PAPER ID: 13628)
+QUEST:Quadriletral Senary bit Pattern for Facial
+Expression Recognition
+Monu Verma1
+Prafulla Saxena2
+S. K. Vipparthi3
+Gridhari Singh4
+Dept. of Computer Science and Engineering, Malaviya national Institute of Technology, Jaipur, India
+improves"
+1f65cbc7894323a85f2964d05ae937070e70e43b,Eliminating Background-bias for Robust Person Re-identification,"Eliminating Background-bias for Robust Person Re-identification
+Maoqing Tian1, Shuai Yi1, Hongsheng Li2, Shihua Li3,
+Xuesen Zhang1, Jianping Shi1, Junjie Yan1, Xiaogang Wang2
+SenseTime Research, 2 Chinese University of Hong Kong, 3 Shenzhen Municipal Public Security Bureau"
+1f4fff64adef5ec6ae21e8647d5a042bf71d64d9,Human detection in surveillance videos and its applications - a review,"Paul et al. EURASIP Journal on Advances in Signal Processing 2013, 2013:176
+http://asp.eurasipjournals.com/content/2013/1/176
+R EV I E W
+Human detection in surveillance videos and its
+pplications - a review
+Manoranjan Paul*, Shah M E Haque and Subrata Chakraborty
+Open Access"
+1f18708439ba1dadd81568e102216731d44340d5,Sparse Quantization for Patch Description,"Sparse Quantization for Patch Description
+Xavier Boix
+Michael Gygli
+Gemma Roig
+Luc Van Gool
+Computer Vision Lab, ETH Zurich, Switzerland"
+1f8e44593eb335c2253d0f22f7f9dc1025af8c0d,Fine-Tuning Regression Forests Votes for Object Alignment in the Wild,"Fine-tuning regression forests votes for object alignment in the wild.
+Yang, H; Patras, I
+© 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing
+this material for advertising or promotional purposes, creating new collective works, for resale
+or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+other works.
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/22607
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+1f6dd0ff2e8493b81e3699b520193198d4eed4e6,Shaogang Gong Part I Features and Representations 1 Discriminative Image Descriptors for Person Re-identification . . . . . 25 7 One-shot Person Re-identification with a Consumer Depth Camera . 163 List of Contributors the Re-identification Challenge,"Shaogang Gong
+Marco Cristani
+Shuicheng Yan
+Chen Change Loy (Eds.)
+PERSON RE-IDENTIFICATION
+October 10, 2013
+Springer"
+1fa9c5af78b3ca04476f4ee6910684dc19008f5e,Supplementary Material : Cross-Dataset Adaptation for Visual Question Answering,"Supplementary Material:
+Cross-Dataset Adaptation for Visual Question Answering
+Wei-Lun Chao∗
+U. of Southern California
+Los Angeles, CA
+Hexiang Hu∗
+Los Angeles, CA
+U. of Southern California
+U. of Southern California
+Fei Sha
+Los Angeles, CA
+We provide contents omitted in the main text.
+• Section 1: details on Name that dataset! (Sect. 3.2 of
+the main text).
+• Section 2: details on the proposed domain adaptation
+lgorithm (Sect. 4.2 and 4.3 of the main text).
+• Section 3: details on the experimental setup (Sect. 5.2
+of the main text).
+• Section 4: additional experimental results (Sect. 5.3
+nd 5.4 of the main text)."
+1fed6a571d9f688e18960e560d9441f5c5e3e2bd,Scalable Active Learning for Multiclass Image Classification,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Scalable Active Learning for Multi-Class
+Image Classification
+Joshi, A.J.; Porikli, F.; Papanikolopoulos, N.
+TR2012-026
+January 2012"
+1f436aa4e68274037fff44e6cfbcd0a1ee3f60df,Tell and Predict: Kernel Classifier Prediction for Unseen Visual Classes from Unstructured Text Descriptions,"Tell and Predict: Kernel Classifier Prediction for Unseen Visual Classes
+from Unstructured Text Descriptions
+Mohamed Elhoseiny, Ahmed Elgammal, Babak Saleh"
+1fd8c71a8859da611a8fde1cbb2bba1c7cf00b4c,EYEDIAP: a database for the development and evaluation of gaze estimation algorithms from RGB and RGB-D cameras,"This paper was presented at the 2014 Symposium on Eye Tracking Research & Applications 2014
+EYEDIAP: A Database for the Development and Evaluation of Gaze Estimation
+Algorithms from RGB and RGB-D Cameras
+Kenneth Alberto Funes Mora1,2, Florent Monay1 and Jean-Marc Odobez1,2
+Idiap Research Institute 2 ´Ecole Polytechnique F´ed´erale de Lausanne (EPFL), Switzerland
+{kfunes, monay,"
+1fe74d637bc5e7d95abcd18b6967e51461fd8cdd,On the Dynamic Selection of Biometric Fusion Algorithms,"On the Dynamic Selection of Biometric Fusion
+Algorithms
+Mayank Vatsa, Member, IEEE, Richa Singh, Member, IEEE, Afzel Noore, Member, IEEE, and
+Arun Ross, Member, IEEE"
+1fb2082d3f772933b586cca65af2099512b9c68b,Comparison of Spectral-Only and Spectral/Spatial Face Recognition for Personal Identity Verification,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 943602, 6 pages
+doi:10.1155/2009/943602
+Research Article
+Comparison of Spectral-Only and Spectral/Spatial Face
+Recognition for Personal Identity Verification
+Zhihong Pan,1 Glenn Healey,2 and Bruce Tromberg3
+Galileo Group Inc., 100 Rialto Place Suite 737, Melbourne, FL 32901, USA
+Department of Electrical Engineering and Computer Science, University of California, Irvine, CA 92697, USA
+Beckman Laser Institute, 1002 East Health Sciences Road, Irvine, CA 92612, USA
+Correspondence should be addressed to Zhihong Pan,
+Received 29 September 2008; Revised 22 February 2009; Accepted 8 April 2009
+Recommended by Kevin Bowyer
+Face recognition based on spatial features has been widely used for personal identity verification for security-related applications.
+Recently, near-infrared spectral reflectance properties of local facial regions have been shown to be suf‌f‌icient discriminants for
+ccurate face recognition. In this paper, we compare the performance of the spectral method with face recognition using the
+eigenface method on single-band images extracted from the same hyperspectral image set. We also consider methods that use
+multiple original and PCA-transformed bands. Lastly, an innovative spectral eigenface method which uses both spatial and spectral
+features is proposed to improve the quality of the spectral features and to reduce the expense of the computation. The algorithms"
+1f614a97e16671c091b1bcd1a33e1280822b53db,Tracking People's Hands and Feet Using Mixed Network AND/OR Search,"DRAFT FOR TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Tracking people’s hands and feet using mixed
+network AND/OR search
+Vlad I. Morariu, Member, IEEE, David Harwood, Member, IEEE,
+nd Larry S. Davis, Fellow, IEEE"
+1f94734847c15fa1da68d4222973950d6b683c9e,Embedding Label Structures for Fine-Grained Feature Representation,"Embedding Label Structures for Fine-Grained Feature Representation
+Xiaofan Zhang
+UNC Charlotte
+Charlotte, NC 28223
+Feng Zhou
+NEC Lab America
+Cupertino, CA 95014
+Yuanqing Lin
+NEC Lab America
+Cupertino, CA 95014
+Shaoting Zhang
+UNC Charlotte
+Charlotte, NC 28223"
+1ff89bd94d8a21b7ca4bf844e2d366f854822918,Robust Online Multi-object Tracking by Maximum a Posteriori Estimation with Sequential Trajectory Prior,"Robust Online Multi-object Tracking
+y Maximum a Posteriori Estimation
+with Sequential Trajectory Prior
+Min Yang(B), Mingtao Pei, Jiajun Shen, and Yunde Jia
+Beijing Laboratory of Intelligent Information Technology, School of Computer
+Science, Beijing Institute of Technology, Beijing 100081, People’s Republic of China"
+1fff309330f85146134e49e0022ac61ac60506a9,Data-Driven Sparse Sensor Placement for Reconstruction,"Data-Driven Sparse Sensor Placement for Reconstruction
+Krithika Manohar∗, Bingni W. Brunton, J. Nathan Kutz, and Steven L. Brunton
+Corresponding author:"
+1f69fa423b076e19dc2ccf6bc9013f09ae39133c,Multimodal Dialogs (MMD): A large-scale dataset for studying multimodal domain-aware conversations,"Towards Building Large Scale Multimodal Domain-Aware Conversation Systems
+Amrita Saha1,2
+Mitesh M. Khapra2
+Karthik Sankaranarayanan1
+IBM Research AI
+I.I.T. Madras, India"
+1f8f0abfe4689aa93f2f6cc7ec4fd4c6adc2c2d6,Semantic Instance Segmentation with a Discriminative Loss Function,"Semantic Instance Segmentation with a Discriminative Loss Function
+Bert De Brabandere∗
+Davy Neven∗
+ESAT-PSI, KU Leuven
+Luc Van Gool"
+1fd54172f7388cd83ed78ff9165519296de5cf20,Changing the Image Memorability: From Basic Photo Editing to GANs,"Changing the Image Memorability: From Basic Photo Editing to GANs
+Oleksii Sidorov
+The Norwegian Colour and Visual Computing Laboratory, NTNU
+Gjovik, Norway
+Figure 1: Modification of memorability using the proposed algorithm. All the results were generated without any human intervention.
+“What” and “how” to change were learned by the model from experimental data."
+1f82eebadc3ffa41820ad1a0f53770247fc96dcd,Using Trajectories derived by Dense Optical Flows as a Spatial Component in Background Subtraction,"Using Trajectories derived by Dense Optical Flows as a
+Spatial Component in Background Subtraction
+Martin Radolko
+University of Rostock
+nd Fraunhofer IGD
+Joachim-Jungius 11
+Rostock 18059
+r.fraunhofer.de
+Fahimeh Farhadifard
+University of Rostock
+nd Fraunhofer IGD
+Joachim-Jungius 11
+Rostock 18059
+r.fraunhofer.de"
+1f3370e2e6381408efe11e69ab12586bd6f74dc8,Feature Selection Library (MATLAB Toolbox),"Feature Selection Techniques for Classification:
+A widely applicable code library
+Giorgio Roffo
+University of Verona,
+Department of Computer Science"
+1f2c99bf032868ce520b9c5586a0c20051367b60,A Study of The Illumination Cones Method for Face Recognition Under Variable Illumination T.J. Chin and D. Suter A Study of The Illumination Cones Method for Face Recognition Under Variable Illumination,"Department of Electrical
+Computer Systems Engineering
+Technical Report
+MECSE-7-2004
+A Study of The Illumination Cones Method for Face
+Recognition Under Variable Illumination
+T.J. Chin and D. Suter"
+1f53ca209f982500069fed73efe2345358eff79e,Pedestrian Detection with Deep Convolutional Neural Network,"Pedestrian Detection with Deep Convolutional
+Neural Network
+Xiaogang Chen, Pengxu Wei, Wei Ke, Qixiang Ye, Jianbin Jiao
+School of Electronic,Electrical and Communication Engineering, University of
+Chinese Academy of Science, Beijing, China"
+1f8d539885f78e1a9d1314e952f3099e71676a5b,Audio-Visual Speaker Diarization Based on Spatiotemporal Bayesian Fusion,"Audio-Visual Speaker Diarization Based on
+Spatiotemporal Bayesian Fusion
+Israel D. Gebru, Sil`eye Ba, Xiaofei Li and Radu Horaud"
+1f7cf2df2fa7719c9db3fe57a0f01d65f08a9a8f,How social exclusion modulates social information processing: A behavioural dissociation between facial expressions and gaze direction,"RESEARCH ARTICLE
+How social exclusion modulates social
+information processing: A behavioural
+dissociation between facial expressions and
+gaze direction
+Francesco Bossi1,2*, Marcello Gallucci1,2, Paola Ricciardelli1,2
+Department of Psychology, University of Milan – Bicocca, Milan, Italy, 2 NeuroMI: Milan Center for
+Neuroscience, Milan, Italy"
+73a7ccf0facccd8943f7e54d19478f2bef9b7dab,Number 16,"Number 16
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 132
+Number 16
+Year of Publication: 2015
+Authors:
+Pronaya Prosun Das, Taskeed Jabid, S.M. Shariar Mahamud
+10.5120/ijca2015907690
+{bibtex}2015907690.bib{/bibtex}"
+73f467b4358ac1cafb57f58e902c1cab5b15c590,Combination of Dimensionality Reduction Techniques for Face Image Retrieval: A Review,"ISSN 0976 3724 47
+Combination of Dimensionality Reduction Techniques for Face
+Image Retrieval: A Review
+Fousiya K.K 1, Jahfar Ali P 2
+M.Tech Scholar, MES College of Engineering, Kuttippuram,
+Kerala
+Asst. Professor, MES College of Engineering, Kuttippuram,
+Kerala"
+7323b594d3a8508f809e276aa2d224c4e7ec5a80,An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+An Experimental Evaluation of Covariates
+Effects on Unconstrained Face Verification
+Boyu Lu, Student Member, IEEE, Jun-Cheng Chen, Member, IEEE, Carlos D Castillo, Member, IEEE
+nd Rama Chellappa, Fellow, IEEE"
+732e8d8f5717f8802426e1b9debc18a8361c1782,Unimodal Probability Distributions for Deep Ordinal Classification,"Unimodal Probability Distributions for Deep Ordinal Classification
+Christopher Beckham 1 Christopher Pal 1"
+73351b313df89572afe1332625044f7e5dd0ce06,High-level Feature Learning by Ensemble Projection for Image Classification with Limited Annotations I,"High-level Feature Learning by Ensemble Projection for Image
+Classification with Limited Annotations $
+Dengxin Dai∗, Luc Van Gool
+Computer Vision Lab, ETH Z¨urich, CH-8092, Switzerland"
+73c72161969a070b3caa40d4f075ba501a1b994b,Expression-Invariant 3D Face Recognition Using Patched Geodesic Texture Transform,"Expression-Invariant 3D Face Recognition using Patched
+Geodesic Texture Transform
+Author
+Hajati, Farshid, Raie, Abolghasem, Gao, Yongsheng
+Published
+Conference Title
+Proceedings 2010 Digital Image Computing: Techniques and Applications DICTA 2010
+https://doi.org/10.1109/DICTA.2010.52
+Copyright Statement
+© 2010 IEEE. Personal use of this material is permitted. However, permission to reprint/
+republish this material for advertising or promotional purposes or for creating new collective
+works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+this work in other works must be obtained from the IEEE.
+Downloaded from
+http://hdl.handle.net/10072/37733
+Link to published version
+http://dicta2010.conference.nicta.com.au/
+Griffith Research Online
+https://research-repository.griffith.edu.au"
+73764fa9bed84ad2c932dc8089ace7fa8fa7c1d3,"Disparity Statistics for Pedestrian Detection: Combining Appearance, Motion and Stereo","Disparity Statistics for Pedestrian Detection:
+Combining Appearance, Motion and Stereo
+Stefan Walk1, Konrad Schindler1,2, and Bernt Schiele1,3
+Computer Science Department, TU Darmstadt
+Photogrammetry and Remote Sensing Group, ETH Z¨urich
+MPI Informatics, Saarbr¨ucken"
+73ed64803d6f2c49f01cffef8e6be8fc9b5273b8,Cooking in the kitchen: Recognizing and Segmenting Human Activities in Videos,"Noname manuscript No.
+(will be inserted by the editor)
+Cooking in the kitchen: Recognizing and Segmenting Human
+Activities in Videos
+Hilde Kuehne · Juergen Gall · Thomas Serre
+Received: date / Accepted: date"
+73bbbfac7b144f835840fe7f7b5139283bf4f3f1,Do we spontaneously form stable trustworthiness impressions from facial appearance?,"ATTITUDES AND SOCIAL COGNITION
+Do We Spontaneously Form Stable Trustworthiness Impressions From
+Facial Appearance?
+André Klapper
+Radboud University
+Ron Dotsch
+Utrecht University and Radboud University
+Iris van Rooij and Daniël H. J. Wigboldus
+Radboud University
+It is widely assumed among psychologists that people spontaneously form trustworthiness impressions of
+newly encountered people from their facial appearance. However, most existing studies directly or
+indirectly induced an impression formation goal, which means that the existing empirical support for
+spontaneous facial trustworthiness impressions remains insufficient. In particular, it remains an open
+question whether trustworthiness from facial appearance is encoded in memory. Using the ‘who said
+what’ paradigm, we indirectly measured to what extent people encoded the trustworthiness of observed
+faces. The results of 4 studies demonstrated a reliable tendency toward trustworthiness encoding. This
+was shown under conditions of varying context-relevance, and salience of trustworthiness. Moreover,
+evidence for this tendency was obtained using both (experimentally controlled) artificial and (naturalistic
+varying) real faces. Taken together, these results suggest that there is a spontaneous tendency to form
+relatively stable trustworthiness impressions from facial appearance, which is relatively independent of"
+73713880d4d1ec4c8f4608a94f67ea9e9f9a97a5,Visual query attributes suggestion,"Visual Query Attributes Suggestion
+Jingwen Bian
+National University of
+Singapore, Singapore
+Zheng-Jun Zha
+National University of
+Singapore, Singapore
+Hanwang Zhang
+National University of
+Singapore, Singapore
+Qi Tian
+University of Texas at San
+Antonio, USA"
+73fa81d2b01c81c6ede71d046f9101440884e604,Fuzzy Based Texton Binary Shape Matrix (FTBSM) for Texture Classification,"Global Journal of Computer Science and Technology
+Graphics & Vision
+Volume 12 Issue 15 Version 1.0 Year 2012
+Type: Double Blind Peer Reviewed International Research Journal
+Publisher: Global Journals Inc. (USA)
+Online ISSN: 0975-4172 & Print ISSN: 0975-4350
+Fuzzy Based Texton Binary Shape Matrix (FTBSM) for Texture
+Classification
+By P.Chandra Sekhar Reddy & B.Eswara Reddy
+Jntua College of Engineering, Anantapur, A.P, India"
+73c13ba142588f45aaa92805fe75ca2691ac981b,A Comparative Study of Social Scene Parsing Strategies between Children with and without Autism Spectrum Disorder,"96 Jul 2016 Vol 9 No.3 North American Journal of Medicine and Science
+Original Research
+A Comparative Study of Social Scene Parsing
+Strategies between Children with and
+without Autism Spectrum Disorder
+Chen Song;1 Aosen Wang;1 Kathy Ralabate Doody, PhD;2* Michelle Hartley-
+McAndrew, MD;3 Jana Mertz, MBA;4 Feng Lin, PhD;1 Wenyao Xu, PhD1
+Computer Science and Engineering, SUNY, University at Buffalo, Buffalo NY
+Exceptional Education, SUNY, Buffalo State, Buffalo, NY
+Jacobs School of Medicine and Biomedical Sciences, SUNY, University at Buffalo Women and Children's Hospital of Buffalo, Buffalo, NY
+Children’s Guild Foundation Autism Spectrum Disorder Center, Women and Children’s Hospital of Buffalo, Buffalo, NY
+Autism spectrum disorder (ASD) is a complex developmental disability characterized by deficits in social
+interaction. Gaze behavior is of great interest because it reveals the parsing strategy the participant uses to
+chieve social content. The legacy features in gaze fixation, such as time and area-of-interest, however, cannot
+omprehensively reveal the way the participant may cognize the social scene. In this work, we investigate the
+dynamic components within the gaze behavior of children with ASD upon the carefully-selected social scene.
+A cohort of child participants (n = 51) were recruited between 2 and 10 years. The results suggest significant
+differences in the social scene parsing strategies of children with ASD, giving added insight into the way they
+may decode and interpret the social scenarios.
+[N A J Med Sci. 2016;9(3):96-103. DOI: 10.7156/najms.2016.0903096]"
+73d8fafee6be9d4fa789ece2192f259199f00e60,3D Face Recognition Using Radon Transform and Factorial Discriminant Analysis (FDA),"Volume 3, Issue 7, July 2013 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+D Face Recognition Using Radon Transform and Factorial
+Discriminant Analysis (FDA)
+P. S. Hiremath , Manjunatha Hiremath
+Department of Computer Science
+Gulbarga University, Gulbarga-585106
+Karnataka, India."
+735c38361d77e707ac48f0d040493c65ca559d3c,Machine Learning for Simplifying the Use of Cardiac Image Databases. (Apprentissage automatique pour simplifier l'utilisation de banques d'images cardiaques),"N°: 2009 ENAM XXXX
+École doctorale n° 84 :
+Sciences et technologies de l’information et de la communication
+Doctorat ParisTech
+T H È S E
+pour obtenir le grade de docteur délivré par
+l’École nationale supérieure des mines de Paris
+Spécialité “ Contrôle, optimisation et prospective ”
+présentée et soutenue publiquement par
+Ján MARGETA
+le 14 Décembre 2015
+Apprentissage automatique pour simplifier
+l’utilisation de banques d’images cardiaques
+Machine Learning for Simplifying
+the Use of Cardiac Image Databases
+Directeurs de thèse : Nicholas AYACHE et Antonio CRIMINISI
+M. Patrick CLARYSSE, DR, Creatis, CNRS, INSA Lyon
+M. Bjoern MENZE, Professeur, ImageBioComp Group, TU München
+M. Hervé DELINGETTE, DR, Asclepios Research Project, Inria Sophia Antipolis
+M. Antonio CRIMINISI, Chercheur principal, MLP Group, Microsoft Research Cambridge"
+7306d42ca158d40436cc5167e651d7ebfa6b89c1,Transductive Zero-Shot Action Recognition by Word-Vector Embedding,"Noname manuscript No.
+(will be inserted by the editor)
+Transductive Zero-Shot Action Recognition by
+Word-Vector Embedding
+Xun Xu · Timothy Hospedales · Shaogang Gong
+Received: date / Accepted: date"
+73200504c7381c48c900894455995b9188676cd5,Weakly-Supervised Image Annotation and Segmentation with Objects and Attributes,"Weakly-Supervised Image Annotation and
+Segmentation with Objects and Attributes
+Zhiyuan Shi, Yongxin Yang, Timothy M. Hospedales, Tao Xiang"
+734cdda4a4de2a635404e4c6b61f1b2edb3f501d,Automatic landmark point detection and tracking for human facial expressions,"Tie and Guan EURASIP Journal on Image and Video Processing 2013, 2013:8
+http://jivp.eurasipjournals.com/content/2013/1/8
+R ES EAR CH
+Open Access
+Automatic landmark point detection and tracking
+for human facial expressions
+Yun Tie* and Ling Guan"
+7373c4a23684e2613f441f2236ed02e3f9942dd4,Feature extraction through Binary Pattern of Phase Congruency for facial expression recognition,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+Feature extraction through binary pattern of phase
+ongruency for facial expression recognition
+Author(s)
+Shojaeilangari, Seyedehsamaneh; Yau, Wei-Yun; Li, Jun;
+Teoh, Eam Khwang
+Citation
+Shojaeilangari, S., Yau, W. Y., Li, J., & Teoh, E. K.
+(2012). Feature extraction through binary pattern of
+phase congruency for facial expression recognition. 12th
+International Conference on Control Automation Robotics
+& Vision (ICARCV), 166-170.
+http://hdl.handle.net/10220/18012
+Rights
+© 2012 IEEE. Personal use of this material is permitted.
+Permission from IEEE must be obtained for all other
+uses, in any current or future media, including
+reprinting/republishing this material for advertising or"
+732686d799d760ccca8ad47b49a8308b1ab381fb,Teachers’ differing classroom behaviors: The role of emotional sensitivity and cultural tolerance,"Running head: TEACHERS’ DIFFERING BEHAVIORS
+Graduate School of Psychology
+RESEARCH MASTER’S PSYCHOLOGY THESıS REPORT
+Teachers’ differing classroom behaviors:
+The role of emotional sensitivity and cultural tolerance
+Ceren Su Abacıoğlu
+Supervisor: prof. dr. Agneta Fischer
+Second supervisor: dr. Disa Sauter
+External Supervisor: prof. dr. Monique Volman
+Research Master’s, Social Psychology
+Ethics Committee Reference Code: 2016-SP-7084"
+73599349402bf8f0d97f51862d11d128cdba44ef,Affective analysis of videos: detecting emotional content in real-life scenarios,"Affective Analysis of Videos:
+Detecting Emotional Content in Real-Life Scenarios
+vorgelegt von
+Master of Science
+Esra Acar Celik
+geb. in Afyonkarahisar
+Von der Fakultät IV – Elektrotechnik und Informatik –
+der Technischen Universität Berlin
+zur Erlangung des akademischen Grades
+Doktor der Ingenieurwissenschaften
+– Dr.-Ing. –
+genehmigte Dissertation
+Promotionsausschuss:
+Vorsitzender:
+Berichter:
+Berichter:
+Berichter:
+Prof. Dr. Thomas Wiegand
+Prof. Dr. Dr. h.c. Sahin Albayrak
+Prof. Dr. Adnan Yazıcı"
+73a4fe5072a30c132e8a0a18384caae4c112f198,What is typical is good: the influence of face typicality on perceived trustworthiness.,"554955 PSSXXX10.1177/0956797614554955Sofer et al.What Is Typical Is Good
+research-article2014
+Research Article
+What Is Typical Is Good: The Influence
+of Face Typicality on Perceived
+Trustworthiness
+015, Vol. 26(1) 39 –47
+© The Author(s) 2014
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797614554955
+pss.sagepub.com
+Carmel Sofer1,2, Ron Dotsch2,3, Daniel H. J. Wigboldus2, and
+Alexander Todorov1,2
+Department of Psychology, Princeton University; 2Behavioural Science Institute, Radboud University
+Nijmegen; and 3Department of Psychology, Utrecht University"
+73704242a548e8725926762faf7333e5598d0228,Surveillance of Super-Extended Objects : Bimodal Approach,"World Academy of Science, Engineering and Technology
+International Journal of Mechanical and Mechatronics Engineering
+Vol:8, No:9, 2014
+Surveillance of Super-Extended Objects: Bimodal
+Approach
+Andrey V. Timofeev, Dmitry Egorov"
+73866bdb723841da93b6ad93afe3d72817e2b377,Dense and Low-Rank Gaussian CRFs Using Deep Embeddings,"Dense and Low-Rank Gaussian CRFs Using Deep Embeddings
+Siddhartha Chandra1
+Nicolas Usunier2
+Iasonas Kokkinos2
+INRIA GALEN, CentraleSup´elec
+Facebook AI Research, Paris"
+73fbdd57270b9f91f2e24989178e264f2d2eb7ae,Kernel linear regression for low resolution face recognition under variable illumination,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+738d5a6491ae0fef5d2debc17f951534061cf6f8,Advances in Learning Visual Saliency: From Image Primitives to Semantic Contents,"Chapter 14
+Advances in Learning Visual Saliency:
+From Image Primitives to Semantic Contents
+Qi Zhao and Christof Koch"
+73d57e2c855c39b4ff06f2d7394ab4ea35f597d4,First Order Generative Adversarial Networks,"First Order Generative Adversarial Networks
+Calvin Seward 1 2 Thomas Unterthiner 2 Urs Bergmann 1 Nikolay Jetchev 1 Sepp Hochreiter 2"
+73052a2bf7b41b7be2447fadc13c29be1d994708,Pedestrian tracking using probability fields and a movement feature space 1,"Pedestrian tracking using probability fields and a movement feature space 1
+Pablo Negri a & Damián Garayalde b
+Universidad Argentina de la Empresa (UADE). CONICET. Buenos Aires, Argentina.
+Instituto Tecnológico de Buenos Aires (ITBA), Buenos Aires, Argentina.
+Received: April 18th, 2016. Received in revised form: November 1rd, 2016. Accepted: December 2nd, 2016."
+73ec2d5a6b4bee0f268b793ff646330507497e38,Is an Image Worth More than a Thousand Words? On the Fine-Grain Semantic Differences between Visual and Linguistic Representations,"Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers,
+pages 2807–2817, Osaka, Japan, December 11-17 2016."
+73be334ecc48751269443b0db2629086125e69f5,Robust Face Recognition under Difficult Lighting Conditions,"International Journal of Technological Exploration and Learning (IJTEL)
+Volume 1 Issue 1 (August 2012)
+Robust Face Recognition under Difficult Lighting
+Conditions
+S.S. Ghatge1,V.V. Dixit2
+Department of Electronics &Telecomunication1, 2
+Sinhgad College of Engineering1, 2
+University of Pune, India1, 2"
+731840289e35c61c6e21ae18f2da2751bd8e2f20,Event-related potential (ERP) correlates of face processing in verbal children with autism spectrum disorders (ASD) and their first-degree relatives: a family study,"Sysoeva et al. Molecular Autism (2018) 9:41
+https://doi.org/10.1186/s13229-018-0220-x
+Open Access
+R ES EAR CH
+Event-related potential (ERP) correlates of
+face processing in verbal children with
+utism spectrum disorders (ASD) and their
+first-degree relatives: a family study
+Olga V. Sysoeva1,2, John N. Constantino1*
+nd Andrey P. Anokhin1"
+73c9cbbf3f9cea1bc7dce98fce429bf0616a1a8c,Unsupervised Learning of Object Landmarks by Factorized Spatial Embeddings,"imagesViewpoint factorizationLearned landmarksFigure1.Wepresentanovelmethodthatcanlearnviewpointin-variantlandmarkswithoutanysupervision.Themethodusesaprocessofviewpointfactorizationwhichlearnsadeeplandmarkdetectorcompatiblewithimagedeformations.Itcanbeappliedtorigidanddeformableobjectsandobjectcategories.terns.Achievingadeeperunderstandingofobjectsrequiresmodelingtheirintrinsicviewpoint-independentstructure.Oftenthisstructureisdefinedmanuallybyspecifyingen-titiessuchaslandmarks,parts,andskeletons.Givensuffi-cientmanualannotations,itispossibletoteachdeepneuralnetworksandothermodelstorecognizesuchstructuresinimages.However,theproblemoflearningsuchstructureswithoutmanualsupervisionremainslargelyopen.Inthispaper,wecontributeanewapproachtolearnviewpoint-independentrepresentationsofobjectsfromim-ageswithoutmanualsupervision(fig.1).Weformulatethistaskasafactorizationproblem,wheretheeffectsofimagedeformations,forexamplearisingfromaviewpointchange,areexplainedbythemotionofareferenceframeattachedtotheobjectandindependentoftheviewpoint.Afterdescribingthegeneralprinciple(sec.3.1),wein-1"
+87cab840df202609bfcfb5a9ee3293e61c7c85db,Vision based victim detection from unmanned aerial vehicles,"Vision Based Victim Detection from Unmanned Aerial Vehicles
+Mykhaylo Andriluka1, Paul Schnitzspan1, Johannes Meyer2, Stefan Kohlbrecher1,
+Karen Petersen1, Oskar von Stryk1, Stefan Roth1, and Bernt Schiele1,3
+Department of Computer Science, TU Darmstadt
+Department of Mechanical Engineering, TU Darmstadt
+MPI Informatics, Saarbr¨ucken"
+874082164d9ab9fced08b9890c009b91a2e846f1,Understanding Convolution for Semantic Segmentation,"Understanding Convolution for Semantic Segmentation
+Panqu Wang1, Pengfei Chen1, Ye Yuan2, Ding Liu3, Zehua Huang1, Xiaodi Hou1, Garrison Cottrell4
+TuSimple, 2Carnegie Mellon University, 3University of Illinois Urbana-Champaign, 4UC San Diego"
+87c2806f1fd20287f00b43dab07822ab13035169,Verfahren zur Analyse von Ähnlichkeit im Ortsbereich,"Matthias Fiedler
+Verfahren zur Analyse von Ähnlichkeit im Ortsbereich"
+87ad56e06d48fa9b30e2915473c488c1b4b7e6ae,Learn from experience: probabilistic prediction of perception performance to avoid failure,"Article
+Learn from experience: probabilistic
+prediction of perception performance to
+void failure
+The International Journal of
+Robotics Research
+© The Author(s) 2017
+Reprints and permissions:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/0278364917730603
+journals.sagepub.com/home/ijr
+Corina Gur˘au1, Dushyant Rao1, Chi Hay Tong2, and Ingmar Posner1"
+8765f22fbcdcf610a08b01db01edc4b8cc67d082,Probability Models for Open Set Recognition,"for all other uses,
+© 2014 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained
+including
+reprinting/republishing this material for advertising or promotional purposes, creating
+new collective works, for resale or redistribution to servers or lists, or reuse of any
+opyrighted component of this work in other works.
+in any current or
+future media,
+Pre-print of article that will appear in T-PAMI."
+8796f2d54afb0e5c924101f54d469a1d54d5775d,Illumination Invariant Face Recognition Using Fuzzy LDA and FFNN,"Journal of Signal and Information Processing, 2012, 3, 45-50
+http://dx.doi.org/10.4236/jsip.2012.31007 Published Online February 2012 (http://www.SciRP.org/journal/jsip)
+Illumination Invariant Face Recognition Using Fuzzy LDA
+nd FFNN
+Behzad Bozorgtabar, Hamed Azami, Farzad Noorian
+School of Electrical Engineering, Iran University of Science and Technology, Tehran, Iran.
+Email:
+Received October 20th, 2011; revised November 24th, 2011; accepted December 10th, 2011"
+87f285782d755eb85d8922840e67ed9602cfd6b9,Incorporating Boltzmann Machine Priors for Semantic Labeling in Images and Videos,"INCORPORATING BOLTZMANN MACHINE PRIORS
+FOR SEMANTIC LABELING IN IMAGES AND VIDEOS
+A Dissertation Presented
+ANDREW KAE
+Submitted to the Graduate School of the
+University of Massachusetts Amherst in partial fulfillment
+of the requirements for the degree of
+DOCTOR OF PHILOSOPHY
+May 2014
+Computer Science"
+871f5f1114949e3ddb1bca0982086cc806ce84a8,Discriminative learning of apparel features,"Discriminative Learning of Apparel Features
+Rasmus Rothe1, Marko Ristin1, Matthias Dantone1, and Luc Van Gool1,2
+Computer Vision Laboratory, D-ITET, ETH Z¨urich, Switzerland
+ESAT - PSI / IBBT, K.U. Leuven, Belgium"
+8722ab37a03336f832e4098224cb63cd02cdfe0a,Face recognition with 3 D face asymmetry,"Face recognition with 3D face asymmetry
+Janusz Bobulski
+Czestochowa University of Technology
+Institute of Computer and Information Sciences
+Dabrowskiego 73, 42-200, Czestochowa, Poland
+Summary. Using of 3D images for the identification was in a field of the interest
+of many researchers which developed a few methods offering good results. However,
+there are few techniques exploiting the 3D asymmetry amongst these methods. We
+propose fast algorithm for rough extraction face asymmetry that is used to 3D
+face recognition with hidden Markov models. This paper presents conception of fast
+method for determine 3D face asymmetry. The research results indicate that face
+recognition with 3D face asymmetry may be used in biometrics systems.
+Introduction
+Biometrics systems use individual and unique biological features of person
+for user identification. The most popular features are: fingerprint, iris, voice,
+palm print, face image et al. Most of them are not accepted by users, because
+they feel under surveillance or as criminals. Others, in turn, are characterized
+y problems with the acquisition of biometric pattern and require closeness
+to the reader. Among the biometric methods popular technique is to identify
+people on the basis of the face image, the advantage is the ease of obtaining"
+87bee0e68dfc86b714f0107860d600fffdaf7996,Automated 3D Face Reconstruction from Multiple Images Using Quality Measures,"Automated 3D Face Reconstruction from Multiple Images
+using Quality Measures
+Marcel Piotraschke and Volker Blanz
+Institute for Vision and Graphics, University of Siegen, Germany"
+878f70f6abb83f5158ca0bacfc2bacd49b1886b1,Aligning Artificial Neural Networks to the Brain Yields Shallow Recurrent Architec- Tures,"Under review as a conference paper at ICLR 2019
+ALIGNING ARTIFICIAL NEURAL NETWORKS TO THE
+BRAIN YIELDS SHALLOW RECURRENT ARCHITEC-
+TURES
+Anonymous authors
+Paper under double-blind review"
+87da8bd9eb2fff2d77809c8bee3bed8c93cb5b4b,A Generative Model For Zero Shot Learning Using Conditional Variational Autoencoders,"A Generative Model For Zero Shot Learning
+Using Conditional Variational Autoencoders
+Ashish Mishra1 , Shiva Krishna Reddy1, Anurag Mittal, and Hema A Murthy
+Indian Institute of Technology Madras"
+878169be6e2c87df2d8a1266e9e37de63b524ae7,Image interpretation above and below the object level.,"CBMM Memo No. 089
+May 10, 2018
+Image interpretation above and below the object level
+Guy Ben-Yosef, Shimon Ullman"
+87363751b8e3d51a002dea6d32df553ee5315cb7,Fine-grained sketch-based image retrieval: The role of part-aware attributes,"Fine-Grained Sketch-Based Image Retrieval: The Role of Part-Aware Attributes
+Ke Li1&2
+Kaiyue Pang1&2
+Yi-Zhe Song2
+Timothy Hospedales2
+Honggang Zhang1
+School of Electronic Engineering and Computer Science Queen Mary University of London.
+Beijing University of Posts and Telecommunications.
+Yichuan Hu1"
+877d083b2a3a75cc1bb25f770a9c5684bf5f6f44,Learning to Hash with Binary Reconstructive Embeddings,"Learning to Hash with Binary Reconstructive
+Embeddings
+Brian Kulis and Trevor Darrell
+UC Berkeley EECS and ICSI
+Berkeley, CA"
+87bba3f4292727091027b7888b5d8f364425344d,End-to-End Learning of Driving Models with Surround-View Cameras and Route Planners,"End-to-End Learning of Driving Models with
+Surround-View Cameras and Route Planners
+Simon Hecker1, Dengxin Dai1, and Luc Van Gool1,2
+ETH Zurich, Zurich, Switzerland
+KU Leuven, Leuven, Belgium"
+877aff9bd05de7e9d82587b0e6f1cda28fd33171,Long-Term Visual Localization Using Semantically Segmented Images,"Long-term Visual Localization using Semantically Segmented Images
+Erik Stenborg1,2 Carl Toft1 and Lars Hammarstrand1"
+878301453e3d5cb1a1f7828002ea00f59cbeab06,Faceness-Net: Face Detection through Deep Facial Part Responses,"Faceness-Net: Face Detection through
+Deep Facial Part Responses
+Shuo Yang, Ping Luo, Chen Change Loy, Senior Member, IEEE and Xiaoou Tang, Fellow, IEEE"
+87e592ee1a7e2d34e6b115da08700a1ae02e9355,Deep Pictorial Gaze Estimation,"Deep Pictorial Gaze Estimation
+Seonwook Park, Adrian Spurr, and Otmar Hilliges
+AIT Lab, Department of Computer Science, ETH Zurich"
+87bdafbcf3569c06eef4a397beffc451f5101f94,Facial expression: An under-utilised tool for the assessment of welfare in mammals.,"published February 8, 2017
+Review article
+Facial expression: An under-utilised tool for
+the assessment of welfare in mammals1
+Kris A. Descovich1,2,3, Jennifer Wathan4, Matthew C. Leach5, Hannah M. Buchanan-Smith1,
+Paul Flecknell6, David Farningham7 and Sarah-Jane Vick1
+Psychology, Faculty of Natural Sciences, University of Stirling; 2Environmental and Animal Sciences, Unitec Institute of
+Technology; 3Centre for Animal Welfare and Ethics, University of Queensland; 4School of Psychology, University of Sussex,
+United Kingdom; 5School of Agriculture, Food & Rural Development, University of Newcastle; 6Comparative Biology
+Centre, University of Newcastle; 7Centre for Macaques, Medical Research Council
+Summary
+Animal welfare is a key issue for industries that use or impact upon animals. The accurate identification of welfare
+states is particularly relevant to the field of bioscience, where the 3Rs framework encourages refinement of
+experimental procedures involving animal models. The assessment and improvement of welfare states in animals
+is reliant on reliable and valid measurement tools. Behavioural measures (activity, attention, posture and
+vocalisation) are frequently used because they are immediate and non-invasive, however no single indicator can
+yield a complete picture of the internal state of an animal. Facial expressions are extensively studied in humans
+s a measure of psychological and emotional experiences but are infrequently used in animal studies, with the
+exception of emerging research on pain behaviour. In this review, we discuss current evidence for facial
+representations of underlying affective states, and how communicative or functional expressions can be useful"
+8765f312e35bba0650aa769b59da7e8fac9e98aa,A Cognitively-Motivated Framework for Partial Face Recognition in Unconstrained Scenarios,"Sensors 2015, 15, 1903-1924; doi:10.3390/s150101903
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+A Cognitively-Motivated Framework for Partial Face
+Recognition in Unconstrained Scenarios
+João C. Monteiro * and Jaime S. Cardoso
+INESC TEC and Faculdade de Engenharia, Universidade do Porto, Campus da FEUP,
+Rua Dr. Roberto Frias, n 378, 4200-465 Porto, Portugal; E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +351-22-209-4299.
+Academic Editor: Vittorio M.N. Passaro
+Received: 24 November 2014 / Accepted: 7 January 2015 / Published: 16 January 2015"
+87dd3fd36bccbe1d5f1484ac05f1848b51c6eab5,Spatio-temporal Maximum Average Correlation Height Templates in Action Recognition and Video Summarization,"SPATIO-TEMPORAL MAXIMUM AVERAGE CORRELATION
+HEIGHT TEMPLATES IN ACTION RECOGNITION AND VIDEO
+SUMMARIZATION
+MIKEL RODRIGUEZ
+B.A. Earlham College, Richmond Indiana
+M.S. University of Central Florida
+A dissertation submitted in partial fulfillment of the requirements
+for the degree of Doctor of Philosophy
+in the School of Electrical Engineering and Computer Science
+in the College of Engineering and Computer Science
+t the University of Central Florida
+Orlando, Florida
+Summer Term
+Major Professor: Mubarak Shah"
+87c6ba55b0f817de4504e39dbb201842ae102c9f,Three Dimensional Face Recognition Using Iso-Geodesic and Iso-Depth Curves,"Three Dimensional Face Recognition Using Iso-Geodesic and Iso-Depth
+Curves
+Sina Jahanbin, Hyohoon Choi, Yang Liu, Alan C. Bovik"
+87bb183d8be0c2b4cfceb9ee158fee4bbf3e19fd,Craniofacial Image Analysis,"Craniofacial Image Analysis
+Ezgi Mercan, Indriyati Atmosukarto, Jia Wu, Shu Liang and Linda G. Shapiro"
+87f0a779ce4e060e3e076df3cc651e0f3f01b2ae,Bimodal Biometric Person Identification System Under Perturbations,"Bimodal Biometric Person Identification System
+Under Perturbations
+Miguel Carrasco1, Luis Pizarro2, and Domingo Mery1
+Pontificia Universidad Cat´olica de Chile
+Av. Vicu˜na Mackenna 4860(143), Santiago, Chile
+Mathematical Image Analysis Group
+Faculty of Mathematics and Computer Science
+Saarland University, Bldg. E11, 66041 Saarbr¨ucken, Germany"
+8064d7a28c763ec37a840450d729f23428ad8f8b,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+80265d7c9fe6a948dd8c975bd4d696fb7ba099c9,Face Recognition Based on Human Visual Perception Theories and Unsupervised ANN,"Face Recognition Based on
+Human Visual Perception Theories and
+Unsupervised ANN
+Mario I. Chacon M. and Pablo Rivas P.
+Chihuahua Institute of Technology
+Mexico
+. Introduction
+The face recognition problem has been faced for more than 30 years. Although a lot of
+research has been done, much more research is and will be required in order to end up with
+robust face recognition system with a potential close to human performance. Currently
+face recognition systems, FRS, report high performance levels, however achievement of
+00% of correct recognition is still a challenge. Even more, if the FRS must work on non-
+ooperative environment its performance may decrease dramatically. Non-cooperative
+environments are characterized by changes on; pose, illumination, facial expression.
+Therefore FRS for non-cooperative environment represents an attractive challenge to
+researchers working on the face recognition area.
+Most of the work presented in the literature dealing with the face recognition problem
+follows an engineering approach that in some cases do not incorporate information from a
+psychological or neuroscience perspective. It is our interest in this material, to show how
+information from the psychological and neuroscience areas may contribute in the solution of"
+809e25da311366bfd684228e16184737d948eef6,Supplementary material for : Learning Finer-class Networks for Universal Representations,"GIRARD ET AL.: SUPPLEMENTARY FOR FINER-CLASS NETWORKS
+Supplementary material for: Learning
+Finer-class Networks for Universal
+Representations
+Julien Girard12
+Youssef Tamaazousti123
+Hervé Le Borgne2
+Céline Hudelot3
+Both authors contributed equally.
+CEA LIST
+Vision Laboratory,
+Gif-sur-Yvette, France.
+CentraleSupélec,
+MICS Laboratory,
+Châtenay-Malabry, France."
+8006219efb6ab76754616b0e8b7778dcfb46603d,Contributions to large-scale learning for image classification. (Contributions à l'apprentissage grande échelle pour la classification d'images),"CONTRIBUTIONSTOLARGE-SCALELEARNINGFORIMAGECLASSIFICATIONZeynepAkataPhDThesisl’´EcoleDoctoraleMath´ematiques,SciencesetTechnologiesdel’Information,InformatiquedeGrenoble"
+8010636454316faf1a09202542af040ffd04fefa,"Performance Parameter Analysis of Face Recognition Based On Fuzzy C-Means Clustering , Shape and Corner Detection","Minj Salen Kujur et al Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 3, Issue 6, Nov-Dec 2013, pp.515-520
+RESEARCH ARTICLE OPEN ACCESS
+Performance Parameter Analysis of Face Recognition Based On
+Fuzzy C-Means Clustering, Shape and Corner Detection
+Minj Salen Kujur1, Prof. Prashant Jain2
+Department of Electronics & Communication Engineering college Jabalpur"
+804b4c1b553d9d7bae70d55bf8767c603c1a09e3,Subspace clustering with a learned dimensionality reduction projection,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+800cbbe16be0f7cb921842d54967c9a94eaa2a65,Multimodal Recognition of Emotions Multimodal Recognition of Emotions,"MULTIMODAL RECOGNITION OF
+EMOTIONS"
+80135ed7e34ac1dcc7f858f880edc699a920bf53,Efficient Action and Event Recognition in Videos Using Extreme Learning Machines,"EFFICIENT ACTION AND EVENT RECOGNITION IN VIDEOS USING
+EXTREME LEARNING MACHINES
+G¨ul Varol
+B.S., Computer Engineering, Bo˘gazi¸ci University, 2013
+Submitted to the Institute for Graduate Studies in
+Science and Engineering in partial fulfillment of
+the requirements for the degree of
+Master of Science
+Graduate Program in Computer Engineering
+Bo˘gazi¸ci University"
+8032a89ba67e2b35e2789983426842f688c49a93,Matching-Constrained Active Contours,"Matching-Constrained Active Contours
+Junyan Wang*, Member, IEEE, Kap Luk Chan, Member, IEEE"
+801a80f7a18fccb2e8068996a73aee2cf04ae460,Optimal transport maps for distribution preserving operations on latent spaces of Generative Models,"OPTIMAL TRANSPORT MAPS FOR DISTRIBUTION PRE-
+SERVING OPERATIONS ON LATENT SPACES OF GENER-
+ATIVE MODELS
+Eirikur Agustsson
+D-ITET, ETH Zurich
+Switzerland
+Alexander Sage
+D-ITET, ETH Zurich
+Switzerland
+Radu Timofte
+D-ITET, ETH Zurich
+Merantix GmbH
+Luc Van Gool
+D-ITET, ETH Zurich
+ESAT, KU Leuven"
+807913b776bc5039cd3f195841419e55979ec7c7,Recreation of spontaneous non-verbal behavior on a synthetic agent EVA,"Roboti c.s. d.o.o, 2Faculty of Electrical Engineering and Computer Science, University of Maribor
+IZIDOR MLAKAR, 2MATEJ ROJC
+Recreation of spontaneous non-verbal behavior on a synthetic agent
+Tržaška cesta 23, 2Smetanova ulica 17
+SLOVENIA
+systematic
+sequencing"
+8031dd2c6583d8681fdd85bdae4371c7c745713f,Generative adversarial models for people attribute recognition in surveillance,"Generative Adversarial Models for People Attribute Recognition in Surveillance
+Matteo Fabbri
+Simone Calderara
+Rita Cucchiara
+University of Modena and Reggio Emilia
+via Vivarelli 10 Modena 41125 Italy"
+803c92a3f0815dbf97e30c4ee9450fd005586e1a,Max-Mahalanobis Linear Discriminant Analysis Networks,"Max-Mahalanobis Linear Discriminant Analysis Networks
+Tianyu Pang 1 Chao Du 1 Jun Zhu 1"
+802ecaabffbece0dc2c31d44b693967c683fc5ff,Faster RER-CNN: application to the detection of vehicles in aerial images,"Faster RER-CNN: application to the detection of
+vehicles in aerial images
+Jean Ogier du Terrail(1,2), Fr´ed´eric Jurie(1)
+(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+(2)Safran Electronics and Defense
+September 21, 2018"
+801b0ae343a11a15fd7abc5720831afea6f0a61d,Similarity Learning with Listwise Ranking for Person Re-Identification,"SIMILARITY LEARNING WITH LISTWISE
+RANKING FOR PERSON RE-IDENTIFICATION
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla
+Baskurt
+To cite this version:
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla Baskurt. SIMILARITY
+LEARNING WITH LISTWISE RANKING FOR PERSON RE-IDENTIFICATION. International
+onference on image processing, Oct 2018, Athenes, Greece. <hal-01895355>
+HAL Id: hal-01895355
+https://hal.archives-ouvertes.fr/hal-01895355
+Submitted on 15 Oct 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+805c77bd351fc98d6acbee68b73af915c5cb6776,Overview of the ImageCLEF 2012 Scalable Web Image Annotation Task,"Overview of the ImageCLEF 2012 Scalable Web
+Image Annotation Task
+Mauricio Villegas and Roberto Paredes
+Institut Tecnol`ogic d’Inform`atica
+Universitat Polit`ecnica de Val`encia
+Cam´ı de Vera s/n, 46022 Val`encia, Spain"
+80c8d143e7f61761f39baec5b6dfb8faeb814be9,Local Directional Pattern based Fuzzy Co- occurrence Matrix Features for Face recognition,"Local Directional Pattern based Fuzzy Co-
+occurrence Matrix Features for Face recognition
+Dr. P Chandra Sekhar Reddy
+Professor, CSE Dept.
+Gokaraju Rangaraju Institute of Engineering and Technology, Hyd."
+80345fbb6bb6bcc5ab1a7adcc7979a0262b8a923,Soft Biometrics for a Socially Assistive Robotic Platform,"Research Article
+Pierluigi Carcagnì*, Dario Cazzato, Marco Del Coco, Pier Luigi Mazzeo, Marco Leo, and
+Cosimo Distante
+Soft Biometrics for a Socially Assistive Robotic
+Platform
+Open Access"
+80a6bb337b8fdc17bffb8038f3b1467d01204375,Subspace LDA Methods for Solving the Small Sample Size Problem in Face Recognition,"Proceedings of the International Conference on Computer and Information Science and Technology
+Ottawa, Ontario, Canada, May 11 – 12, 2015
+Paper No. 126
+Subspace LDA Methods for Solving the Small Sample Size
+Problem in Face Recognition
+Ching-Ting Huang, Chaur-Chin Chen
+Department of Computer Science/National Tsing Hua University
+01 KwanFu Rd., Sec. 2, Hsinchu, Taiwan"
+80510c47d7fad872b18d865f3957568dc512780c,Occlusion Invariant 3D Face Recognition with UMB – DB and BOSPHORUS Databases,"International Journal of Computer Applications (0975 – 8887)
+National Conference on Advances in Computing (NCAC 2015)
+Occlusion Invariant 3D Face Recognition with UMB – DB
+nd BOSPHORUS Databases
+G.E.S. R.H. Sapat College of Engineering, Nashik
+G.E.S. R.H. Sapat College of Engineering, Nashik
+H. Y. Patil, PhD
+Assistant Professor (Dept. of E&TC),
+Maharashtra
+Charushila R. Singh
+M.E. student (Dept. of E&TC),
+Maharashtra"
+80c8f02c945c1dbbec31983164c1e4e0b742c44a,Cohort of LSTM and lexicon verification for handwriting recognition with gigantic lexicon,"Cohort of LSTM and lexicon verification for
+handwriting recognition with gigantic lexicon
+Bruno STUNERa,∗, Cl´ement CHATELAINa, Thierry PAQUETa
+Normandie Univ, UNIROUEN, UNIHAVRE, INSA Rouen, LITIS, 76000 Rouen, France"
+80097a879fceff2a9a955bf7613b0d3bfa68dc23,Active Self-Paced Learning for Cost-Effective and Progressive Face Identification,"Active Self-Paced Learning for Cost-Effective and
+Progressive Face Identification
+Liang Lin, Keze Wang, Deyu Meng, Wangmeng Zuo, and Lei Zhang"
+748260579dc2fb789335a88ae3f63c114795d047,Action and Interaction Recognition in First-Person Videos,"Action and Interaction Recognition in First-person videos
+Sanath Narayan
+Dept. of Electrical Engg.,
+IISc, Bangalore
+Mohan S. Kankanhalli
+School of Computing,
+NUS, Singapore
+Kalpathi R. Ramakrishnan
+Dept. of Electrical Engg.,
+IISc, Bangalore"
+7484911e00afec5c08e7b83f3a1259d60035d77f,In Your Face: Startle to Emotional Facial Expressions Depends on Face Direction,"Article
+In Your Face: Startle to
+Emotional Facial Expressions
+Depends on Face Direction
+i-Perception
+January-February 2017, 1–13
+! The Author(s) 2017
+DOI: 10.1177/2041669517694396
+journals.sagepub.com/home/ipe
+Ole A˚ sli, Henriette Michalsen and Morten Øvervoll
+Department of Psychology, University of Tromsø—The Arctic University
+of Norway, Tromsø, Norway"
+747e9b36c5a1b0b8a9572da0ab416ddd1e1d2d33,Data Augmentation for Visual Question Answering,"Proceedings of The 10th International Natural Language Generation conference, pages 198–202,
+Santiago de Compostela, Spain, September 4-7 2017. c(cid:13)2017 Association for Computational Linguistics"
+74408cfd748ad5553cba8ab64e5f83da14875ae8,Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation and Evaluation,"Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+nd Evaluation"
+74a1e28dd2c03076124282482074e10bb02bc643,Coulomb Gans: Provably Optimal Nash Equi-,"Under review as a conference paper at ICLR 2018
+COULOMB GANS: PROVABLY OPTIMAL NASH EQUI-
+LIBRIA VIA POTENTIAL FIELDS
+Anonymous authors
+Paper under double-blind review"
+74671fd8dd510db4abdcb93864fb5d5f77c878a0,Real-Time Viola-Jones Face Detection in a Web Browser,"Real-Time Viola-Jones
+Face Detection in a
+Web Browser
+Theo Ephraim - Tristan Himmelman - Kaleem Siddiqi
+McGill University - School of Computer Science
+Centre For Intelligent Machines (CIM)
+http://flashfacedetection.com"
+74dbe6e0486e417a108923295c80551b6d759dbe,An HMM based Model for Prediction of Emotional Composition of a Facial Expression using both Significant and Insignificant Action Units and Associated Gender Differences,"International Journal of Computer Applications (0975 – 8887)
+Volume 45– No.11, May 2012
+An HMM based Model for Prediction of Emotional
+Composition of a Facial Expression using both
+Significant and Insignificant Action Units and
+Associated Gender Differences
+Suvashis Das
+Koichi Yamada
+Department of Management and Information
+Department of Management and Information
+Systems Science
+603-1 Kamitomioka, Nagaoka
+Niigata, Japan
+Systems Science
+603-1 Kamitomioka, Nagaoka
+Niigata, Japan"
+74032e526edb45bc6c79cb5576e69486e72a316d,Animated 3D Human Models for Use in Person Recognition Experiments,"Animated 3D Human Models for Use in Person Recognition Experiments
+Jean M. Vettel1,2,3, Justin Kantner1,2, Matthew Jaswa4, Michael Miller2
+U.S. Army Research Laboratory, 2University of California, Santa Barbara, 3University of
+Pennsylvania, 4DCS Corporation
+Jean M Vettel
+U.S. Army Research Laboratory
+59 Mulberry Point Road
+Aberdeen Proving Ground, MD 21005
+10.278.7431"
+747c25bff37b96def96dc039cc13f8a7f42dbbc7,EmoNets: Multimodal deep learning approaches for emotion recognition in video,"EmoNets: Multimodal deep learning approaches for emotion
+recognition in video
+Samira Ebrahimi Kahou · Xavier Bouthillier · Pascal Lamblin · Caglar Gulcehre ·
+Vincent Michalski · Kishore Konda · S´ebastien Jean · Pierre Froumenty · Yann
+Dauphin · Nicolas Boulanger-Lewandowski · Raul Chandias Ferrari · Mehdi Mirza ·
+David Warde-Farley · Aaron Courville · Pascal Vincent · Roland Memisevic ·
+Christopher Pal · Yoshua Bengio"
+74e6110466306f41f703d84bb3d136ba414b1998,Face Recognition System under Varying Lighting Conditions,"IOSR Journal of Computer Engineering (IOSR-JCE)
+e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 14, Issue 3 (Sep. - Oct. 2013), PP 79-88
+www.iosrjournals.org
+Face Recognition System under Varying Lighting Conditions
+P.Kalaiselvi1, S.Nithya2
+(Asst. Professor, Department of ECE, NSN College of Engineering and Technology, Karur, Tamilnadu, India)
+(Asst. Professor, Department of ECE, NSN College of Engineering and Technology, Karur, Tamilnadu, India)"
+744fa8062d0ae1a11b79592f0cd3fef133807a03,Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification.,"Aalborg Universitet
+Deep Pain
+Rodriguez, Pau; Cucurull, Guillem; Gonzàlez, Jordi; M. Gonfaus, Josep ; Nasrollahi, Kamal;
+Moeslund, Thomas B.; Xavier Roca, F.
+Published in:
+I E E E Transactions on Cybernetics
+DOI (link to publication from Publisher):
+0.1109/TCYB.2017.2662199
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Rodriguez, P., Cucurull, G., Gonzàlez, J., M. Gonfaus, J., Nasrollahi, K., Moeslund, T. B., & Xavier Roca, F.
+(2017). Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification. I E E E
+Transactions on Cybernetics, 1-11. DOI: 10.1109/TCYB.2017.2662199
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research."
+74f21f2edfa985280be63f8a01aa00541f3a5625,People Groping by Spatio-Temporal Features of Trajectories,"4-13
+MVA2013 IAPR International Conference on Machine Vision Applications, May 20-23, 2013, Kyoto, JAPAN
+People Groping by Spatio-Temporal Features of Trajectories
+Asami Okada†, Yusuke Moriguchi†, Norimichi Ukita†,
+nd Norihiro Hagita†‡
+Nara Institute od Science and Technology
+Advanced Telecommunications Research Institute International
+e-mail"
+747b15ecd9a9e28bbd733527c59e5dd0aa5de7a1,Learning Visual Features from Large Weakly Supervised Data,"Learning Visual Features from Large Weakly Supervised Data
+Armand Joulin∗
+Laurens van der Maaten∗
+Allan Jabri
+Nicolas Vasilache
+Facebook AI Research
+770 Broadway, New York NY 10003"
+743e582c3e70c6ec07094887ce8dae7248b970ad,Face Recognition based on Deep Neural Network,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.8, No.10 (2015), pp.29-38
+http://dx.doi.org/10.14257/ijsip.2015.8.10.04
+Face Recognition based on Deep Neural Network
+Li Xinhua,Yu Qian
+Shandong Women’s University"
+74b0095944c6e29837c208307a67116ebe1231c8,Manifold learning using Euclidean k-nearest neighbor graphs [image processing examples]," beindependentandidenticallydis-tributed(i.i.d.)randomvectorswithvaluesinacompactsubsetof.The(-)nearestneighborof inisgivenby!""$%&(*,.%135 7 5where5 7 5istheusualEuclidean(<=)distanceinbe-tweenvector and .Forgeneralinteger?,the-nearestneighborofapointisdefinedinasimilarway.The-NNgraphputsanedgebetweeneachpointinandits-nearestneighbors.LetBCDBCDFHbethesetof-nearestneighborsof in.Thetotaledgelengthofthe-NNgraphisdefinedas:<JDCFHMN
+M%&QRS15 7 5J(1)whereVWXisapowerweightingconstant.2.1.ConvergencetoExtrinsicZ-EntropyThe-NNedgelengthliesinthelargeclassoffunctionalscalledcontinuousquasi-additiveEuclideanfunctionals[7].Othergraphsinthisclassincludetheminimalspanningtree,theminimalmatch-inggraphorthetravelingsalesmantouramongothers.Thesefunc-tionalshaveremarkableasymptoticbehaviorasincreases:Theorem1([7,Theorem8.3])Let
+ bei.i.d.randomvectorswithvaluesinacompactsubsetofandLebesgueden-sity\.Let]?_,aVb]anddefineZF]7VHf].Then,withprobability(w.p.)gh""jk<JDCFHmoDJDCp\mFrHtr(2)whereoDJDCisaconstantindependentof\.Furthermore,themeanlengthuv<JDCFHwfmconvergestothesamelimit.Thequantitythatdeterminesthelimit(2)inTheorem1istheex-trinsicR´enyiZ-entropyofthemultivariateLebesguedensity\:yz{mF\H7Zg!pz{\mFrHtr(3)III - 9880-7803-8484-9/04/$20.00 ©2004 IEEEICASSP 2004(cid:224)"
+74156a11c2997517061df5629be78428e1f09cbd,"Preparatory coordination of head, eyes and hands: Experimental study at intersections","Cancún Center, Cancún, México, December 4-8, 2016
+978-1-5090-4846-5/16/$31.00 ©2016 IEEE"
+74cbb3acfc401a397c9a4e151ff8e3ecf5ea76d0,Egocentric Video Description based on Temporally-Linked Sequences,"Egocentric Video Description based on Temporally-Linked Sequences
+Marc Bola˜nosa,b, ´Alvaro Perisc, Francisco Casacubertac, Sergi Solera, Petia Radevaa,b
+Universitat de Barcelona, Barcelona, Spain
+Computer Vision Center, Bellaterra, Spain
+PRHLT Research Center, Universitat Polit`ecnica de Val`encia, Val`encia, Spain"
+74410df341f44f5c915d97725ce396a862d44a7b,Shadow extraction and application in pedestrian detection,"Wang and Yagi EURASIP Journal on Image and Video Processing 2014, 2014:12
+http://jivp.eurasipjournals.com/content/2014/1/12
+RESEARCH
+Open Access
+Shadow extraction and application in
+pedestrian detection
+Junqiu Wang1* and Yasushi Yagi2"
+749d605dd12a4af58de1fae6f5ef5e65eb06540e,Multi-Task Video Captioning with Video and Entailment Generation,"Multi-Task Video Captioning with Video and Entailment Generation
+Ramakanth Pasunuru and Mohit Bansal
+UNC Chapel Hill
+{ram,"
+749382d19bfe9fb8d0c5e94d0c9b0a63ab531cb7,A Modular Framework to Detect and Analyze Faces for Audience Measurement Systems,"A Modular Framework to Detect and Analyze Faces for
+Audience Measurement Systems
+Andreas Ernst, Tobias Ruf, Christian Kueblbeck
+Fraunhofer Institute for Integrated Circuits IIS
+Department Electronic Imaging
+Am Wolfsmantel 33, 91058 Erlangen, Germany
+{andreas.ernst, tobias.ruf,"
+74c19438c78a136677a7cb9004c53684a4ae56ff,RESOUND: Towards Action Recognition Without Representation Bias,"RESOUND: Towards Action Recognition
+without Representation Bias
+Yingwei Li, Yi Li, and Nuno Vasconcelos
+UC San Diego"
+74618fb4ce8ce0209db85cc6069fe64b1f268ff4,Rendering and animating expressive caricatures,"Rendering and Animating Expressive
+Caricatures
+Mohammad Obaid* t, Ramakrishnan
+Mukundan
+*HITLab New Zealand,
+University
+of Canterbury,
+t, and Mark Billinghurst*
+Christchurch,
+New Zealand
+tComputer
+Science
+nd Software Engineering
+Email: {mohammad.obaid,
+Dept., University
+of Canterbury,
+New Zealand
+stylized
+nd control
+on the generated caricature."
+745ec003b7fbeb52aecd00c41ac889fcd4d88bcd,Guiding Intelligent Surveillance System by learning-by-synthesis gaze estimation,"Pattern Recognition Letters
+journal homepage: www.elsevier.com
+Guiding Intelligent Surveillance System by learning-by-synthesis gaze estimation
+Tongtong Zhaoa, Yuxiao Yana, Jinjia Penga, Zetian Mia, Xianping Fua,∗∗
+Information Science and Technology College, Dalian Maritime University, Dalian, China."
+74eae724ef197f2822fb7f3029c63014625ce1ca,Feature Extraction based on Local Directional Pattern with SVM Decision-level Fusion for Facial Expression Recognition,"International Journal of Bio-Science and Bio-Technology
+Vol. 5, No. 2, April, 2013
+Feature Extraction based on Local Directional Pattern with SVM
+Decision-level Fusion for Facial Expression Recognition
+Juxiang Zhou1, Tianwei Xu1,2 and Jianhou Gan1
+Key Laboratory of Education Informalization for Nationalities, Ministry of
+Education, Yunnan Normal University, Kunming, China
+College of Information, Yunnan Normal University, Kunming, China"
+744fe47157477235032f7bb3777800f9f2f45e52,"Progressive Growing of GANs for Improved Quality, Stability, and Variation","Published as a conference paper at ICLR 2018
+PROGRESSIVE GROWING OF GANS FOR IMPROVED
+QUALITY, STABILITY, AND VARIATION
+Tero Karras
+NVIDIA
+Samuli Laine
+NVIDIA
+Timo Aila
+NVIDIA
+Jaakko Lehtinen
+NVIDIA and Aalto University"
+74d4224989b5937ee6c97eec1955e64ab0699f57,Facial Emotional Classifier For Natural Interaction,"Electronic Letters on Computer Vision and Image Analysis 7(4):1-12, 2008
+Facial Emotional Classifier For Natural Interaction
+Isabelle Hupont, Eva Cerezo, Sandra Baldassarri
+Departamento de Informática e Ingeniería de Sistemas,
+Instituto de Investigación en Ingeniería de Aragón, Universidad de Zaragoza (Spain)
+Received 29th November 2007, Revised 26th February 2008, Accepted 3rd June 2008
+{478953, ecerezo,"
+7480d8739eb7ab97c12c14e75658e5444b852e9f,MLBoost Revisited: A Faster Metric Learning Algorithm for Identity-Based Face Retrieval,"NEGREL ET AL.: REVISITED MLBOOST FOR FACE RETRIEVAL
+MLBoost Revisited: A Faster Metric
+Learning Algorithm for Identity-Based Face
+Retrieval
+Romain Negrel
+Alexis Lechervy
+Frederic Jurie
+Normandie Univ, UNICAEN,
+ENSICAEN, CNRS
+France"
+747ca08cbf258da8d2b89ba31f24bdb17d7132bb,Tall and skinny QR factorizations in MapReduce architectures,"Tall and Skinny QR factorizations
+in MapReduce architectures
+Paul G. Constantine
+Sandia National Laboratories∗
+Albuquerque, NM
+David F. Gleich
+Sandia National Laboratories∗
+Livermore, CA"
+7411761e789ccb1da80984472f5df5cb084e8ba3,Towards Scene Understanding with Detailed 3D Object Representations,"Towards Scene Understanding with Detailed 3D Object Representations
+M. Zeeshan Zia1, Michael Stark2, and Konrad Schindler1
+Photogrammetry and Remote Sensing, ETH Z¨urich, Switzerland
+Stanford University and Max Planck Institute for Informatics"
+74ba4ab407b90592ffdf884a20e10006d2223015,Partial Face Detection in the Mobile Domain,"Partial Face Detection in the Mobile Domain
+Upal Mahbub, Student Member, IEEE, Sayantan Sarkar, Student Member, IEEE,
+nd Rama Chellappa, Fellow, IEEE"
+7405ed035d1a4b9787b78e5566340a98fe4b63a0,Self-Expressive Decompositions for Matrix Approximation and Clustering,"Self-Expressive Decompositions for
+Matrix Approximation and Clustering
+Eva L. Dyer, Member, IEEE, Tom A. Goldstein, Member, IEEE, Raajen Patel, Student Member, IEEE,
+Konrad P. K¨ording, and Richard G. Baraniuk, Fellow, IEEE"
+744db9bd550bf5e109d44c2edabffec28c867b91,FX e-Makeup for Muscle Based Interaction,"FX e-Makeup for Muscle Based Interaction
+Katia Canepa Vega1, Abel Arrieta2, Felipe Esteves3, and Hugo Fuks1
+Department of Informatics, PUC-Rio, Rio de Janeiro, Brazil
+Department of Mechanical Engineering, PUC-Rio, Rio de Janeiro, Brazil
+Department of Administration, PUC-Rio, Rio de Janeiro, Brazil"
+74325f3d9aea3a810fe4eab8863d1a48c099de11,Regression-Based Image Alignment for General Object Categories,"Regression-Based Image Alignment
+for General Object Categories
+Hilton Bristow1 and Simon Lucey2
+Queensland University of Technology (QUT)
+Brisbane QLD 4000, Australia
+Carnegie Mellon University (CMU)
+Pittsburgh PA 15289, USA"
+7478c2351c75183527f258aecce6931be9c9d624,Periodic Variance Maximization using Generalized Eigenvalue Decomposition applied to Remote Photoplethysmography estimation,"Periodic Variance Maximization using Generalized Eigenvalue
+Decomposition applied to Remote Photoplethysmography estimation
+Richard Macwan, Serge Bobbia, Yannick Benezeth, Julien Dubois, Alamin Mansouri
+LE2I EA7508, Arts et M´etiers
+Univ. Bourgogne Franche-Comt´e
+{richard.macwan, serge.bobbia, yannick.benezeth, julien.dubois,"
+744d23991a2c48d146781405e299e9b3cc14b731,Aging Face Recognition: A Hierarchical Learning Model Based on Local Patterns Selection,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2535284, IEEE
+Transactions on Image Processing
+Aging Face Recognition: A Hierarchical Learning
+Model Based on Local Patterns Selection
+Zhifeng Li, Senior Member, IEEE, Dihong Gong, Xuelong Li, Fellow, IEEE, and Dacheng Tao, Fellow, IEEE"
+1a45ddaf43bcd49d261abb4a27977a952b5fff12,LDOP: Local Directional Order Pattern for Robust Face Retrieval,"LDOP: Local Directional Order Pattern for Robust
+Face Retrieval
+Shiv Ram Dubey, Member, IEEE, and Snehasis Mukherjee, Member, IEEE"
+1a849b694f2d68c3536ed849ed78c82e979d64d5,This is a repository copy of Symmetric Shape Morphing for 3 D Face and Head Modelling,"This is a repository copy of Symmetric Shape Morphing for 3D Face and Head Modelling.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/131760/
+Version: Accepted Version
+Proceedings Paper:
+Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634, Smith, William Alfred
+Peter orcid.org/0000-0002-6047-0413 et al. (1 more author) (2018) Symmetric Shape
+Morphing for 3D Face and Head Modelling. In: The 13th IEEE Conference on Automatic
+Face and Gesture Recognition. IEEE .
+Reuse
+Items deposited in White Rose Research Online are protected by copyright, with all rights reserved unless
+indicated otherwise. They may be downloaded and/or printed for private study, or other acts as permitted by
+national copyright laws. The publisher or other rights holders may allow further reproduction and re-use of
+the full text version. This is indicated by the licence information on the White Rose Research Online record
+for the item.
+Takedown
+If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+emailing including the URL of the record and the reason for the withdrawal request.
+https://eprints.whiterose.ac.uk/"
+1ab7d8da096c418c0bf93de14d128eb008a92db4,Towards three-dimensional face recognition in the real Huibin,"Towards three-dimensional face recognition in the real
+Huibin Li
+To cite this version:
+Huibin Li. Towards three-dimensional face recognition in the real. Other. Ecole Centrale de
+Lyon, 2013. English. <NNT : 2013ECDL0037>. <tel-00998798>
+HAL Id: tel-00998798
+https://tel.archives-ouvertes.fr/tel-00998798
+Submitted on 2 Jun 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires
+publics ou priv´es."
+1a7243913d9b8c6855b1eb3bb6566f2f1041d50a,Articulated clinician detection using 3D pictorial structures on RGB-D data,"Articulated Clinician Detection Using 3D Pictorial
+Structures on RGB-D Data
+Abdolrahim Kadkhodamohammadi, Afshin Gangi, Michel de Mathelin and Nicolas Padoy"
+1a878e4667fe55170252e3f41d38ddf85c87fcaf,Discriminative Machine Learning with Structure,"Discriminative Machine Learning with Structure
+Simon Lacoste-Julien
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2010-4
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-4.html
+January 12, 2010"
+1a03716411e72722f853b904a83d9c15a0d737a3,Using color texture sparsity for facial expression recognition,"Using Color Texture Sparsity for Facial Expression
+Recognition
+Seung Ho Lee, Hyungil Kim,
+Korea Advanced
+Department
+Institute
+of Electrical
+of Science
+of Korea
+Republic
+Daejeon,
+nd Y ong Man Ro
+Engineering
+nd Technology
+Department
+Engineering
+Konstantinos
+of Electrical
+University
+N. Plataniotis"
+1ae3a26a985fe525b23f080a9e1041ecff0509ad,A Comparative Study of Statistical Conversion of Face to Voice Based on Their Subjective Impressions,"Interspeech 2018
+-6 September 2018, Hyderabad
+0.21437/Interspeech.2018-2005"
+1a41831a3d7b0e0df688fb6d4f861176cef97136,A Biological Model of Object Recognition with Feature Learning,"massachusetts institute of technology — artificial intelligence laboratory
+A Biological Model of Object
+Recognition with Feature Learning
+Jennifer Louie
+AI Technical Report 2003-009
+CBCL Memo 227
+June 2003
+© 2 0 0 3 m a s s a c h u s e t t s i n s t i t u t e o f
+t e c h n o l o g y, c a m b r i d g e , m a 0 2 1 3 9 u s a — w w w. a i . m i t . e d u"
+1a9e0bf9f7a9495bcdf1aeb214ccc9df9f2a9030,Challenges and Opportunities The Main Memory System : Challenges and Opportunities,"특집원고Ⅰ
+The Main Memory System: Challenges and Opportunities
+Carnegie Mellon University Onur Mutlu・Justin Meza・Lavanya Subramanian
+The memory system is a fundamental performance and
+energy bottleneck in almost all computing systems. Recent
+system design, application, and technology trends that
+require more capacity, bandwidth, efficiency, and predictability
+out of the memory system make it an even more important
+system bottleneck. At the same time, DRAM technology
+is experiencing difficult technology scaling challenges
+that make the maintenance and enhancement of its capacity,
+energy-efficiency, and reliability significantly more costly
+with conventional techniques.
+In this article, after describing the demands and challenges
+faced by the memory system, we examine some promising
+research and design directions to overcome challenges posed
+y memory scaling. Specifically, we describe three major
+new research challenges and solution directions: 1) enabling
+new DRAM architectures, functions, interfaces, and better
+integration of the DRAM and the rest of the system (an"
+1a6b2972506d7d85100552bee99ce2b267e30d41,Learning Optimal Embedded Cascades,"Learning Optimal Embedded Cascades
+Mohammad Javad Saberian and Nuno Vasconcelos, Senior Member, IEEE"
+1a3f7b9fc451b54110aaebae56c65413c620f6e2,Multilevel Linear Dimensionality Reduction for Data Analysis using Nearest-Neighbor Graphs,"Multilevel Linear Dimensionality Reduction for Data
+Analysis using Nearest-Neighbor Graphs∗
+Sophia Sakellaridi
+Department of Computer
+Science and Engineering
+University of Minnesota;
+Minneapolis, MN 55455
+Haw-ren Fang
+Department of Computer
+Science and Engineering
+University of Minnesota;
+Minneapolis, MN 55455
+Yousef Saad
+Department of Computer
+Science and Engineering
+University of Minnesota;
+Minneapolis, MN 55455"
+1ae19084d2cd53c70d7e44d419df32560e417fb9,The Canadian experience using the expanded criteria donor classification for allocating deceased donor kidneys for transplantation,"Young et al. Canadian Journal of Kidney Health and Disease (2016) 3:15
+DOI 10.1186/s40697-016-0106-9
+Open Access
+O R I G I N AL R ES EA R C H AR TI C L E
+The Canadian experience using the
+expanded criteria donor classification for
+llocating deceased donor kidneys for
+transplantation
+Ann Young1, Stephanie N. Dixon2, Greg A. Knoll2,3, Amit X. Garg2,4, Charmaine E. Lok1,2,6, Ngan N. Lam5
+nd S. Joseph Kim1,2,6*"
+1a5151b4205ab27b1c76f98964debbfc11b124d5,Self Paced Deep Learning for Weakly Supervised Object Detection,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Self Paced Deep Learning for Weakly
+Supervised Object Detection
+Enver Sangineto†, Moin Nabi†, Dubravko Culibrk and Nicu Sebe,"
+1a515f0b852c2e93272677dbf6ecb05c7be0ea2e,Reduced serotonin receptor subtypes in a limbic and a neocortical region in autism.,"RESEARCH ARTICLE
+Reduced Serotonin Receptor Subtypes in a Limbic and a Neocortical
+Region in Autism
+Adrian Oblak, Terrell T. Gibbs, and Gene J. Blatt
+Autism is a behaviorally defined, neurological disorder with symptom onset before the age of 3. Abnormalities in
+social-emotional behaviors are a core deficit in autism, and are characterized by impaired reciprocal–social interaction,
+lack of facial expressions, and the inability to recognize familiar faces. The posterior cingulate cortex (PCC) and fusiform
+gyrus (FG) are two regions within an extensive limbic-cortical network that contribute to social-emotional behaviors.
+Evidence indicates that changes in brains of individuals with autism begin prenatally. Serotonin (5-HT) is one of the
+earliest expressed neurotransmitters, and plays an important role in synaptogenesis, neurite outgrowth, and neuronal
+migration. Abnormalities in 5-HT systems have been implicated in several psychiatric disorders, including autism, as
+evidenced by immunology, imaging, genetics, pharmacotherapy, and neuropathology. Although information is known
+regarding peripheral 5-HT in autism, there is emerging evidence that 5-HT systems in the central nervous system,
+including various 5-HT receptor subtypes and transporters, are affected in autism. The present study demonstrated
+significant reductions in 5-HT1A receptor-binding density in superficial and deep layers of the PCC and FG, and in the
+density of 5-HT2A receptors in superficial layers of the PCC and FG. A significant reduction in the density of serotonin
+transporters (5-HTT) was also found in the deep layers of the FG, but normal levels were demonstrated in both layers of
+the PCC and superficial layers of the FG. This study provides potential substrates for decreased 5-HT modulation/
+innervation in the autism brain, and implicate two 5-HT receptor subtypes as potential neuromarkers for novel or
+existing pharmacotherapies. Autism Res 2013, 6: 571–583. © 2013 International Society for Autism Research, Wiley"
+1a6c3c37c2e62b21ebc0f3533686dde4d0103b3f,Implementation of Partial Face Recognition using Directional Binary Code,"International Journal of Linguistics and Computational Applications (IJLCA) ISSN 2394-6385 (Print)
+Volume 4, Issue 1, January – March 2017 ISSN 2394-6393 (Online)
+Implementation of Partial Face Recognition
+using Directional Binary Code
+N.Pavithra #1, A.Sivapriya*2, K.Hemalatha*3 , D.Lakshmi*4
+,2,3Final Year, Department of Computer Science and Engineering, PanimalarInstitute of Technology,
+Assistant Professor, Department of Computer Science and Engineering, PanimalarInstitute of Technology, Tamilnadu, India,
+faith
+is proposed. It
+face alignment and"
+1a3eee980a2252bb092666cf15dd1301fa84860e,PCA Gaussianization for image processing,"PCA GAUSSIANIZATION FOR IMAGE PROCESSING
+Valero Laparra, Gustavo Camps-Valls and Jes´us Malo
+Image Processing Laboratory (IPL), Universitat de Val`encia
+Catedr´atico A. Escardino - 46980 Paterna, Val`encia, Spain"
+1a382d4e436e3e4f3d735f6e34ba2bc61e30838e,Fusion of Multispectral Data Through Illumination-aware Deep Neural Networks for Pedestrian Detection,
+1a8a2539cffba25ed9a7f2b869ebb737276ccee1,Pros and Cons of GAN Evaluation Measures,"Pros and Cons of GAN Evaluation Measures
+Ali Borji"
+1ad823bf77c691f1d2b572799f8a8c572d941118,Précis of “Towards The Deep Model : Understanding Visual Recognition Through Computational Models”,"implement
+the system.
+Précis of “Towards ​The Deep Model
+: Understanding Visual
+Recognition Through Computational Models”
+Panqu Wang
+Introduction
+Vision, due to its significance in surviving and socializing, is one of the most important and
+extensively studied sensory functions in the human brain. In order to fully understand visual
+information processing, or more specifically, visual recognition, David Marr proposed the
+Tri-level Hypothesis [29], in that three levels of the system should be studied: the computational
+goal of the system, the internal representation or the algorithm the system uses to achieve the
+goal, and the neural substrates that
+is well-known that visual
+recognition in the human brain is implemented by the ventral visual pathway [32], which
+receives visual information from the retina and goes through a layered structure including V1
+(also known as the primary visual cortex), V2, V4, before reaching the inferior temporal cortex
+(IT). The topographic mapping between the retina and the human visual cortex follows a
+log-polar transformation, in which the Cartesian coordinates of the retina are transformed to
+polar coordinates (polar angle and eccentricity) in the human visual cortex. From V1 to V4, each"
+1abf6491d1b0f6e8af137869a01843931996a562,ParseNet: Looking Wider to See Better,"ParseNet: Looking Wider to See Better
+Wei Liu
+UNC Chapel Hill
+Andrew Rabinovich
+MagicLeap Inc.
+Alexander C. Berg
+UNC Chapel Hill"
+1a031378cf1d2b9088a200d9715d87db8a1bf041,D Eep D Ictionary L Earning : S Ynergizing R E - Construction and C Lassification,"Workshop track - ICLR 2018
+DEEP DICTIONARY LEARNING: SYNERGIZING RE-
+CONSTRUCTION AND CLASSIFICATION
+Shahin Mahdizadehaghdam, Ashkan Panahi, Hamid Krim & Liyi Dai"
+1afd481036d57320bf52d784a22dcb07b1ca95e2,Automated Content Metadata Extraction Services Based on MPEG Standards,"The Computer Journal Advance Access published December 6, 2012
+© The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved.
+For Permissions, please email:
+doi:10.1093/comjnl/bxs146
+Automated Content Metadata Extraction
+Services Based on MPEG Standards
+D.C. Gibbon∗, Z. Liu, A. Basso and B. Shahraray
+AT&T Labs Research, Middletown, NJ, USA
+Corresponding author:
+This paper is concerned with the generation, acquisition, standardized representation and transport
+of video metadata. The use of MPEG standards in the design and development of interoperable
+media architectures and web services is discussed. A high-level discussion of several algorithms
+for metadata extraction is presented. Some architectural and algorithmic issues encountered when
+designing services for real-time processing of video streams, as opposed to traditional offline media
+processing, are addressed. A prototype real-time video analysis system for generating MPEG-7
+Audiovisual Description Profile from MPEG-2 transport stream encapsulated video is presented.
+Such a capability can enable a range of new services such as content-based personalization of live
+roadcasts given that the MPEG-7 based data models fit in well with specifications for advanced
+television services such as TV-Anytime andAlliance for Telecommunications Industry Solutions IPTV
+Interoperability Forum."
+1a7e385d2aa041ca8931784fb7664e9905194565,Sentiment Analysis Using Social Multimedia,"Chapter 2
+Sentiment Analysis Using Social
+Multimedia
+Jianbo Yuan, Quanzeng You and Jiebo Luo"
+1ad88221f308bf9f36775650f880f32d91ce929a,Learning a Recurrent Residual Fusion Network for Multimodal Matching,"Learning a Recurrent Residual Fusion Network for Multimodal Matching
+Yu Liu
+Yanming Guo
+Erwin M. Bakker
+Michael S. Lew
+LIACS Media Lab, Leiden University, Leiden, The Netherlands
+{y.liu, y.guo, e.m.bakker,"
+1a0912bb76777469295bb2c059faee907e7f3258,Mask R-CNN,"Mask R-CNN
+Kaiming He Georgia Gkioxari
+Piotr Doll´ar Ross Girshick
+Facebook AI Research (FAIR)"
+1afe9919ddb2b245e21b610fa96037724bcdf648,SceneNet: A Perceptual Ontology for Scene Understanding,"SceneNet: A Perceptual Ontology for Scene
+Understanding
+Ilan Kadar and Ohad Ben-Shahar
+Ben-Gurion University of the Negev"
+1a9a192b700c080c7887e5862c1ec578012f9ed1,Discriminant Subspace Analysis for Face Recognition with Small Number of Training Samples,"IEEE TRANSACTIONS ON SYSTEM, MAN AND CYBERNETICS, PART B
+Discriminant Subspace Analysis for Face
+Recognition with Small Number of Training
+Samples
+Hui Kong, Xuchun Li, Matthew Turk, and Chandra Kambhamettu"
+1abdf07ce2fca11a26222dedd581b68b141af3f2,Face Recognition Aiding Historical Photographs Indexing Using a Two-Stage Training Scheme and an Enhanced Distance Measure,"Face Recognition Aiding Historical Photographs Indexing
+Using a Two-stage Training Scheme and an Enhanced Distance Measure
+Ana Paula Brand˜ao Lopes1,2, Camillo Jorge Santos Oliveira1,3, Arnaldo de Albuquerque Ara´ujo1
+Computer Science Department – Federal University of Minas Gerais
+Av. Antˆonio Carlos, 6627, Pampulha, CEP 31270–901, Belo Horizonte, MG, Brazil
+Exact and Technological Sciences Department – State University of Santa Cruz
+Rodovia Ilh´eus-Itabuna, km 16 – Pavilh˜ao Jorge Amado, CEP 45600-000, Ilh´eus, BA, Brazil
+Informatics Department – Pontifical Catholic University of Minas Gerais
+Rua Rio Comprido, 4.580 - CEP 32.010-025, Contagem, MG, Brazil,
+{paula, camillo,"
+1a2431e3b35a4a4794dc38ef16e9eec2996114a1,Automated Face Recognition: Challenges and Solutions,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+1a8ccc23ed73db64748e31c61c69fe23c48a2bb1,Extensive Facial Landmark Localization with Coarse-to-Fine Convolutional Network Cascade,"Extensive Facial Landmark Localization
+with Coarse-to-fine Convolutional Network Cascade
+Erjin Zhou Haoqiang Fan Zhimin Cao Yuning Jiang Qi Yin
+Megvii Inc."
+1afe5d933b58b4dd982a559cc6ec1d17959239de,Enhanced canonical correlation analysis with local density for cross-domain visual classification,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+1a86620ea59816564db30fe0ae94cc422c5266e3,Can 3D Pose be Learned from 2D Projections Alone?,"Can 3D Pose be Learned from
+D Projections Alone?
+Dylan Drover, Rohith MV, Ching-Hang Chen,
+Amit Agrawal, Ambrish Tyagi, and Cong Phuoc Huynh
+Amazon Lab126 Inc., Sunnyvale, CA, USA
+{droverd, kurohith, chinghc, aaagrawa,
+mbrisht,"
+1ad97cce5fa8e9c2e001f53f6f3202bddcefba22,Grassmann Averages for Scalable Robust PCA,"Grassmann Averages for Scalable Robust PCA
+Aasa Feragen
+DIKU and MPIs T¨ubingen∗
+Denmark and Germany
+Søren Hauberg
+DTU Compute∗
+Lyngby, Denmark"
+1a219e7bcd8f30f886a1f24a8c05bc26bef83ff9,Crowd Counting with Density Adaption Networks,"Crowd Counting with Density Adaption Networks
+Li Wang, Weiyuan Shao, Yao Lu, Hao Ye, Jian Pu, Yingbin Zheng"
+1a1ed320882c00c94d9f738b7b14eadd941376ed,Extracting Human Face Similarity Judgments: Pairs or Triplets?,"Extracting Human Face Similarity Judgments: Pairs or Triplets?
+Linjie Li1, Vicente Malave2, Amanda Song2, and Angela J. Yu2
+Department of Electrical and Computer Engineering, University of California, San Diego, La Jolla, CA, USA
+Department of Cognitive Science, University of California, San Diego, La Jolla, CA, USA"
+1a1955920ee36d58265fe17100ca451d899e8372,A Local Feature based on Lagrangian Measures for Violent Video Classification,"Best Paper Award, IET 6th International Conference on Imaging for Crime Prevention and Detection, 2015
+A Local Feature based on Lagrangian Measures for Violent Video
+Classification
+Tobias Senst, Volker Eiselein, Thomas Sikora
+Communication Systems Group, Technische Universität Berlin, Germany
+Keywords: violent video detection,
+recognition, lagrangian measures, lagrangian framework
+local feature, action"
+1a9997d8421d577a728f6ac119d4b14a3f46402c,Using Tectogrammatical Annotation for Studying Actors and Actions in Sallust ’ s Bellum Catilinae,"The Prague Bulletin of Mathematical Linguistics
+NUMBER 111 OCTOBER 2018 5–28
+Using Tectogrammatical Annotation for Studying
+Actors and Actions in Sallust’s Bellum Catilinae
+Berta González Saavedra,a Marco Passarottib
+Dep. de Filología Clásica, Universidad Autónoma de Madrid, Spain
+CIRCSE Research Centre. Università Cattolica del Sacro Cuore, Milan, Italy"
+1a6d748365dbf3b17f2db371a30469478ee7b142,DeepID-Net: Object Detection with Deformable Part Based Convolutional Neural Networks,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2016.2587642, IEEE
+Transactions on Pattern Analysis and Machine Intelligence
+IEEE TRANSACTIONS PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+DeepID-Net: Object Detection with Deformable
+Part Based Convolutional Neural Networks
+Wanli Ouyang*, Member, IEEE, Xingyu Zeng*, Student Member, IEEE,
+Xiaogang Wang, Member, IEEE,Shi Qiu Member, IEEE, Ping Luo, Member, IEEE,
+Yonglong Tian Student Member, IEEE, Hongsheng Li, Member, IEEE, Shuo Yang Student Member, IEEE,
+Zhe Wang, Student Member, IEEE, Hongyang Li, Kun Wang, Junjie Yan,
+Chen-Change Loy, Member, IEEE, Xiaoou Tang, Fellow, IEEE"
+1a54a8b0c7b3fc5a21c6d33656690585c46ca08b,Fast Feature Pyramids for Object Detection,"Fast Feature Pyramids for Object Detection
+Piotr Doll´ar, Ron Appel, Serge Belongie, and Pietro Perona"
+1a51bc5f9f12f6794297a426739350ae57c87731,Image classification with CNN-based Fisher vector coding,"Kent Academic Repository
+Full text document (pdf)
+Citation for published version
+Song, Yan and Hong, Xinhai and McLoughlin, Ian Vince and Dai, Li-Rong (2017) Image Classification
+with CNN-based Fisher Vector Coding. In: IEEE International Conference on Visual Communications
+nd Image Processing 2016, 27-30 Nov 2016, Chengdu, Sichuan, China.
+https://doi.org/10.1109/VCIP.2016.7805494
+Link to record in KAR
+http://kar.kent.ac.uk/57115/
+Document Version
+Author's Accepted Manuscript
+Copyright & reuse
+Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all
+ontent is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions
+for further reuse of content should be sought from the publisher, author or other copyright holder.
+Versions of research
+The version in the Kent Academic Repository may differ from the final published version.
+Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the
+published version of record.
+Enquiries"
+1aa52a25c2967b8bc228268c9ab5a96a32d2189b,Visual Fashion-Product Search at SK Planet,"Visual Fashion-Product Search at SK Planet
+Taewan Kim, Seyeoung Kim, Sangil Na, Hayoon Kim, Moonki Kim, Byoung-Ki Jeon
+Machine Intelligence Lab.
+SK Planet, SeongNam City, South Korea"
+1a0b09e7e9182a68fc457bb888536b9023f6c9fd,Multi-affinity spectral clustering,"MULTI-AFFINITY SPECTRAL CLUSTERING
+Hsin-Chien Huang(cid:63)†
+Yung-Yu Chuang(cid:63)
+Chu-Song Chen†
+(cid:63)National Taiwan University
+Academia Sinica"
+1a7a2221fed183b6431e29a014539e45d95f0804,Person Identification Using Text and Image Data,"Person Identification Using Text and Image Data
+David S. Bolme, J. Ross Beveridge and Adele E. Howe
+Computer Science Department
+Colorado State Univeristy
+Fort Collins, Colorado 80523"
+1a5b39a4b29afc5d2a3cd49087ae23c6838eca2b,Competitive Game Designs for Improving the Cost Effectiveness of Crowdsourcing,"Competitive Game Designs for Improving the Cost
+Effectiveness of Crowdsourcing
+Markus Rokicki, Sergiu Chelaru, Sergej Zerr, Stefan Siersdorfer
+L3S Research Center, Hannover, Germany"
+28bd795c580ca24f40dc82cd01d9d277749d2661,Site-adaptation methods for face recognition,"Site-adaptation methods for face recognition
+Jilin Tu and Xiaoming Liu and Peter Tu"
+28209a6ef1de7c10ec13717eba8bad7c2f4feba7,Deep Representation of Facial Geometric and Photometric Attributes for Automatic 3D Facial Expression Recognition,"Deep Representation of Facial Geometric and
+Photometric Attributes for Automatic 3D Facial
+Expression Recognition
+Huibin Li, Jian Sun∗, Dong Wang, Zongben Xu, and Liming Chen"
+28e9ae07540e3709e7a3a6242f636f893ba557e6,Learning to Select Pre-Trained Deep Representations with Bayesian Evidence Framework,"Learning to Select Pre-trained Deep Representations with
+Bayesian Evidence Framework
+Yong-Deok Kim∗1
+Taewoong Jang∗2 Bohyung Han3
+Seungjin Choi3
+Software R&D Center, Device Solutions, Samsung Electronics, Korea
+Department of Computer Science and Engineering, POSTECH, Korea
+Stradvision Inc., Korea"
+286eb053f55e45ad5d0490c1c18f6d80381dfb4b,Block-Sparse Recovery via Convex Optimization,"Block-Sparse Recovery via Convex Optimization
+Ehsan Elhamifar, Student Member, IEEE, and Ren´e Vidal, Senior Member, IEEE"
+287795991fad3c61d6058352879c7d7ae1fdd2b6,Biometrics Security: Facial Marks Detection from the Low Quality Images,"International Journal of Computer Applications (0975 – 8887)
+Volume 66– No.8, March 2013
+Biometrics Security: Facial Marks Detection from the
+Low Quality Images
+nd facial marks are detected using LoG with morphological
+operator. This method though was not enough to detect the
+facial marks from the low quality images [7]. But, facial
+marks have been used to speed up the retrieval process in
+order to differentiate the human faces [15].
+Ziaul Haque Choudhury K.M.Mehata
+B.S.Abdur Rahman University B.S.Abdur Rahman University
+Dept. Of Information Technology Dept. Of Computer Science & Engineering
+Chennai, India Chennai, India"
+282578039c767f3d393529565cae6be56fda6242,Augmented Reality Meets Computer Vision: Efficient Data Generation for Urban Driving Scenes,"Augmented Reality Meets Computer Vision : Efficient Data Generation for
+Urban Driving Scenes
+Hassan Abu Alhaija1
+Siva Karthik Mustikovela1
+Lars Mescheder2 Andreas Geiger2,3 Carsten Rother1
+Computer Vision Lab, TU Dresden
+Autonomous Vision Group, MPI for Intelligent Systems T¨ubingen
+Computer Vision and Geometry Group, ETH Z¨urich"
+285faa4cc54ef9b1834128705e0f96ad17b61e0b,SIFT Flow: Dense Correspondence across Scenes and Its Applications,"SIFT Flow: Dense Correspondence across
+Scenes and its Applications
+Ce Liu, Member, IEEE, Jenny Yuen, Student Member, IEEE, and Antonio Torralba, Member, IEEE"
+28d7029cfb73bcb4ad1997f3779c183972a406b4,Discriminative Nonlinear Analysis Operator Learning: When Cosparse Model Meets Image Classification,"Discriminative Nonlinear Analysis Operator
+Learning: When Cosparse Model Meets Image
+Classification
+Zaidao Wen, Biao Hou, Member, IEEE, and Licheng Jiao, Senior Member, IEEE"
+280d59fa99ead5929ebcde85407bba34b1fcfb59,Online Nonnegative Matrix Factorization With Outliers,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+28126d165f73c2a18600a9b0440f5e80191d52d9,Clock-Modeled Ternary Spatial Relations for Visual Scene Analysis,"Clock-Modeled Ternary Spatial Relations
+for Visual Scene Analysis
+Joanna Isabelle Olszewska
+School of Computing and Engineering, University of Huddersfield
+Queensgate, Huddersfield, HD1 3DH, United Kingdom"
+287c5be2610e1c61798851feb32b88c424acfbf9,Hierarchical Co-Attention for Visual Question Answering,"Hierarchical Co-Attention for Visual Question Answering
+Jiasen Lu, Jianwei Yang, Dhruv Batra, Devi Parikh
+Virginia Tech
+{jiasenlu, jw2yang, dbatra,"
+28f9cf85ebbff86207e1f6067880bb23daff0878,Prime Object Proposals with Randomized Prim's Algorithm,"Prime Object Proposals with Randomized Prim’s Algorithm
+Santiago Manen1
+Matthieu Guillaumin1
+Luc Van Gool1,2
+Computer Vision Laboratory
+ESAT - PSI / IBBT
+{smanenfr, guillaumin,
+ETH Zurich
+K.U. Leuven"
+286ea63b1b5df1b8b67718f25b47357ec3168e97,Human parsing using stochastic and-or grammars and rich appearances,"Human Parsing using Stochastic And-Or
+Grammars and Rich Appearances
+Brandon Rothrock and Song-Chun Zhu
+UCLA Dept. of Computer Science
+Thursday, November 17, 11"
+284be8be0c6bedc36dfe43229bc84345ab0aedc2,Faster Training of Mask R-CNN by Focusing on Instance Boundaries,"Faster Training of Mask R-CNN by Focusing on Instance Boundaries$
+Roland S. Zimmermanna,b,1, Julien N. Siemsa,c,2
+BMW Car IT GmbH, Lise-Meitner-Straße 14, 89081 Ulm, Germany
+Georg-August University of G¨ottingen, Friedrich-Hund-Platz 1, 37077 G¨ottingen, Germany
+Albert Ludwig University of Freiburg, Fahnenbergplatz, 79085 Freiburg im Breisgau, Germany"
+28f53ec7732299fa946ed3fc27bf691a6ab5c60c,Spatial as Deep: Spatial CNN for Traffic Scene Understanding,"Spatial As Deep: Spatial CNN for Traffic Scene Understanding
+Xingang Pan1, Jianping Shi2, Ping Luo1, Xiaogang Wang1, and Xiaoou Tang1
+{px117, pluo,
+The Chinese University of Hong Kong 2SenseTime Group Limited"
+283550fce0fdc0876db5df533625dffdfcd8d099,Fairness-aware scheduling on single-ISA heterogeneous multi-cores,"Fairness-Aware Scheduling on
+Single-ISA Heterogeneous Multi-Cores
+Kenzo Van Craeynest†◦
+Ghent University, Belgium
+Shoaib Akram†
+Wim Heirman†◦
+◦ExaScience Lab, Belgium
+Aamer Jaleel‡
+Lieven Eeckhout†
+VSSAD, Intel Corporation
+(e.g.,"
+28cd46a078e8fad370b1aba34762a874374513a5,"cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey","CVPAPER.CHALLENGE IN 2016, JULY 2017
+vpaper.challenge in 2016: Futuristic Computer
+Vision through 1,600 Papers Survey
+Hirokatsu Kataoka, Soma Shirak-
+be, Yun He, Shunya Ueta, Teppei Suzuki, Kaori Abe, Asako Kanezaki, Shin’ichiro
+Morita, Toshiyuki Yabe, Yoshihiro Kanehara, Hiroya Yatsuyanagi, Shinya Maruyama, Ryosuke Taka-
+sawa, Masataka Fuchida, Yudai Miyashita, Kazushige Okayasu, Yuta Matsuzaki"
+28daa489dace2d2f040dcdbbd2d4ab919b046254,2D/3D Pose Estimation and Action Recognition using Multitask Deep Learning,"D/3D Pose Estimation and Action Recognition using Multitask Deep Learning
+ETIS UMR 8051, Paris Seine University, ENSEA, CNRS, F-95000, Cergy, France
+Sorbonne Universit´e, CNRS, Laboratoire d’Informatique de Paris 6, LIP6, F-75005 Paris, France
+Diogo C. Luvizon1, David Picard1,2, Hedi Tabia1
+{diogo.luvizon, picard,"
+2805daf3795e4e153d79dbecfe88b830ddc068d3,Articulated human motion tracking with foreground learning,"ARTICULATED HUMAN MOTION TRACKING WITH FOREGROUND LEARNING
+Aichun Zhu1, Hichem Snoussi1, Abel Cherouat2
+ICD - LM2S - Universit´e de Technologie de Troyes (UTT) - UMR STMR CNRS
+ICD - GAMMA3 - Universit´e de Technologie de Troyes (UTT) - UMR STMR CNRS
+2 rue Marie Curie - CS 42060 - 10004 Troyes cedex - France
+E-mail :{aichun.zhu, hichem.snoussi,"
+280d45fb813e75622b7c584ee7fba70066245871,Visual Tracking with Online Incremental Deep Learning and Particle Filter,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.8, No.12 (2015), pp.107-120
+http://dx.doi.org/10.14257/ijsip.2015.8.12.12
+Visual Tracking with Online Incremental Deep Learning and
+Particle Filter
+Shuai Cheng 1, Yonggang Cao3,1, Junxi Sun2 and Guangwen Liu1*
+School of Electronic Information Engineering, Changchun University of Science
+School of Computer Science and information Technology, Northeast Normal
+nd Technology, Changchun, China
+Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of
+University, Changchun, China
+Sciences, Changchun, China"
+2803a7e8e6057d4e9462b37b258e670df61a742d,The Conference on Empirical Methods in Natural Language Processing Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing,"EMNLP2017TheConferenceonEmpiricalMethodsinNaturalLanguageProcessingProceedingsofthe2ndWorkshoponStructuredPredictionforNaturalLanguageProcessingSeptember9-11,2017Copenhagen,Denmark"
+28795f32b324eb3601e9a8c1ce93335691e120f3,CliqueCNN: Deep Unsupervised Exemplar Learning,"CliqueCNN: Deep Unsupervised Exemplar Learning
+Miguel A. Bautista∗, Artsiom Sanakoyeu∗, Ekaterina Sutter, Björn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany"
+28103f6c09fd64c90a738076b0681400d4d31c9f,Color Invariants for Person Reidentification,"Color Invariants for Person
+Re-Identification
+Igor Kviatkovsky
+Technion - Computer Science Department - M.Sc. Thesis MSC-2012-03 - 2012"
+2891ceceaf586e4ae013d932978074ff0a06801f,Joint statistical analysis of images and keywords with applications in semantic image enhancement,"Joint Statistical Analysis of Images and Keywords with
+Applications in Semantic Image Enhancement
+Albrecht Lindner
+School of Computer and
+Communication Sciences
+EPFL, Switzerland
+Nicolas Bonnier
+Océ Print Logic Technologies
+Créteil, France
+Appu Shaji
+School of Computer and
+Communication Sciences
+EPFL, Switzerland
+Sabine Süsstrunk
+School of Computer and
+Communication Sciences
+EPFL, Switzerland"
+28d65e4d72638983fbc723b102d78b10587c06aa,Low Resolution Sparse Binary Face Patterns,
+28b6adbc5ef790413431cdb2f512432862778b3b,Security and Surveillance,"Security and Surveillance
+Shaogang Gong and Chen Change Loy and Tao Xiang"
+286c1e0b34ee6d40706ca6a02604420a192204e7,An overview of NuDetective Forensic Tool and its usage to combat child pornography in Brazil,"An overview of NuDetective Forensic Tool and its usage
+to combat child pornography in Brazil
+Pedro Monteiro da Silva Eleuterio and Mateus de Castro Polastro
+Brazilian Federal Police"
+28b5b5f20ad584e560cd9fb4d81b0a22279b2e7b,A New Fuzzy Stacked Generalization Technique and Analysis of its Performance,"A New Fuzzy Stacked Generalization Technique
+nd Analysis of its Performance
+Mete Ozay, Student Member, IEEE, Fatos T. Yarman Vural, Member, IEEE"
+28c24f16e20c83c747f2aca8232f2cb6614905f5,The Role of Face Parts in Gender Recognition,"The Role of Face Parts in Gender Recognition
+Yasmina Andreu and Ram´on A. Mollineda
+Dept. Llenguatges i Sistemes Inform`atics
+Universitat Jaume I. Castell´o de la Plana, Spain"
+283181a2173b485726664edc6fe73f0465387629,Random Temporal Skipping for Multirate Video Analysis,"Random Temporal Skipping for Multirate Video
+Analysis
+Yi Zhu1 and Shawn Newsam1
+University of California at Merced, Merced CA 95343, USA"
+28bc378a6b76142df8762cd3f80f737ca2b79208,Understanding Objects in Detail with Fine-Grained Attributes,"Understanding Objects in Detail with Fine-grained Attributes
+Andrea Vedaldi1
+Siddharth Mahendran2
+Stavros Tsogkas3
+Subhransu Maji4
+Ross Girshick5
+Juho Kannala6
+Esa Rahtu6
+Matthew B. Blaschko3
+David Weiss7
+Ben Taskar8
+Naomi Saphra2
+Sammy Mohamed9
+Iasonas Kokkinos3
+Karen Simonyan1"
+2814d558b4d7425b5dae6b3dbbf5f4a08650fcb1,A joint estimation of head and body orientation cues in surveillance video,"A Joint Estimation of Head and Body Orientation Cues in Surveillance Video
+Cheng Chen
+Alexandre Heili
+Jean-Marc Odobez
+Idiap Research Institute – CH-1920, Martigny, Switzerland∗"
+28e77337bcb88e37d36f5660709a53e71377a2a8,5 Discriminative Cluster Analysis,",250+OPEN ACCESS BOOKS106,000+INTERNATIONALAUTHORS AND EDITORS112+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book Theory and Novel Applications of Machine LearningDownloaded from:http://www.intechopen.com/books/theory_and_novel_applications_of_machine_learningPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at"
+2842cebee2793c9b4f503895a32b328b7781b60e,BWIBots: A platform for bridging the gap between AI and human-robot interaction research,"Article
+BWIBots: A platform for bridging the
+gap between AI and human–robot
+interaction research
+The International Journal of
+Robotics Research
+© The Author(s) 2017
+Reprints and permissions:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/0278364916688949
+journals.sagepub.com/home/ijr
+Piyush Khandelwal1, Shiqi Zhang1,2, Jivko Sinapov1, Matteo Leonetti1,3, Jesse Thomason1,
+Fangkai Yang4, Ilaria Gori5, Maxwell Svetlik1, Priyanka Khante1, Vladimir Lifschitz1,
+J. K. Aggarwal5, Raymond Mooney1 and Peter Stone1"
+28af8e1a3cb3a158f8a642c8493fcfb207743d0a,Better Image Segmentation by Exploiting Dense Semantic Predictions,"Better Image Segmentation by Exploiting Dense
+Semantic Predictions
+Qiyang Zhao, Lewis D Griffin
+Beihang University & UCL"
+2864c8df356b1b915e16bb285bda64bfd7396f74,3D Face Reconstruction from Stereo: A Model Based Approach,"-4244-1437-7/07/$20.00 ©2007 IEEE
+III - 65
+ICIP 2007"
+2848cde23fe32c30980183f33b6a2c2ce7526726,Three-Dimensional Model-Based Human Detection in Crowded Scenes,"Title
+Three-dimensional model-based human detection in crowded
+scenes
+Author(s)
+Wang, L; Yung, NHC
+Citation
+v. 13 n. 2, p. 691-703
+Issued Date
+http://hdl.handle.net/10722/155766
+Rights
+Copyright © IEEE.; ©20xx IEEE. Personal use of this material is
+permitted. However, permission to reprint/republish this material
+for advertising or promotional purposes or for creating new
+ollective works for resale or redistribution to servers or lists, or
+to reuse any copyrighted component of this work in other works
+must be obtained from the IEEE.; This work is licensed under a
+Creative Commons Attribution-NonCommercial-NoDerivatives
+.0 International License."
+287afb29b5aef6255a5882418b87e6b41cc9b29d,Nude Detection in Video Using Bag-of-Visual-Features,"Nude Detection in Video using Bag-of-Visual-Features
+Ana Paula B. Lopes∗†, Sandra E. F. de Avila∗, Anderson N. A. Peixoto∗,
+Rodrigo S. Oliveira∗, Marcelo de M. Coelho∗‡ and Arnaldo de A. Ara´ujo∗
+Computer Science Department, Federal University of Minas Gerais – UFMG
+Exact and Technological Sciences Department, State University of Santa Cruz – UESC
+1270–010, Belo Horizonte, MG, Brazil
+5662–000, Ilh´eus, BA, Brazil
+Preparatory School of Air Cadets – EPCAR
+6205–900, Barbacena, MG, Brazil
+{paula, sandra, andenap, rsilva, mcoelho,"
+28bcf31f794dc27f73eb248e5a1b2c3294b3ec9d,Improved Combination of LBP plus LFDA for Facial Expression Recognition using SRC,"International Journal of Computer Applications (0975 – 8887)
+Volume 96– No.13, June 2014
+Improved Combination of LBP plus LFDA for Facial
+Expression Recognition using SRC
+Ritesh Bora
+Research Scholar, CSE Department,
+Government College of Engineering, Aurangabad
+human
+facial
+expression
+recognition"
+288bddfabe739b32721df62d821632e3dafed06a,Robust multi-image based blind face hallucination,"Robust Multi-Image Based Blind Face Hallucination
+Yonggang Jin, 2Christos-Savvas Bouganis
+University of Bristol. 2Imperial College London.
+1.56 0.73
+3.15 0.80
+3.61 0.82
+3.32 0.80
+3.98 0.83
+3.63 0.82
+PCA-Init
+PCA-Est
+PCA-GT
+MPPCA-Est MPPCA-GT
+Methods
+Blurring
+Trans.
+9.67
+Initial
+9.52
+[1, 5]"
+2830fb5282de23d7784b4b4bc37065d27839a412,Poselets: Body part detectors trained using 3D human pose annotations,"Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations ∗
+Lubomir Bourdev1,2 and Jitendra Malik1
+EECS, U.C. Berkeley, Berkeley, CA 94720
+Adobe Systems, Inc., 345 Park Ave, San Jose, CA 95110"
+28fe6e785b32afdcd2c366c9240a661091b850cf,Facial Expression Recognition using Patch based Gabor Features,"International Journal of Applied Information Systems (IJAIS) – ISSN : 2249-0868
+Foundation of Computer Science FCS, New York, USA
+Volume 10 – No.7, March 2016 – www.ijais.org
+Facial Expression Recognition using Patch based Gabor
+Features
+Electronics & Telecommunication Engg
+Electronics & Telecommunication Engg
+St. Francis Institute of Technology
+St. Francis Institute of Technology
+Vaqar Ansari
+Department
+Mumbai, India
+Anju Chandran
+Department
+Mumbai, India"
+28e1c113b1b57e0731c189d28e404cea3bddf260,Template based Mole Detection for Face,"is used
+recognition"
+28c9198d30447ffe9c96176805c1cd81615d98c8,No evidence that a range of artificial monitoring cues influence online donations to charity in an MTurk sample,"rsos.royalsocietypublishing.org
+Research
+Cite this article: Saunders TJ, Taylor AH,
+Atkinson QD. 2016 No evidence that a range of
+rtificial monitoring cues influence online
+donations to charity in an MTurk sample.
+R. Soc. open sci. 3: 150710.
+http://dx.doi.org/10.1098/rsos.150710
+Received: 22 December 2015
+Accepted: 13 September 2016
+Subject Category:
+Psychology and cognitive neuroscience
+Subject Areas:
+ehaviour/psychology/evolution
+Keywords:
+prosociality, eye images, charity donation,
+reputation, online behaviour
+Author for correspondence:
+Quentin D. Atkinson
+e-mail:"
+284b5dafe6d8d7552794ccd2efb4eabb12dc3512,Efficient and accurate inversion of multiple scattering with deep learning,"Efficient and accurate inversion of multiple scattering with deep learning
+Yu Sun1, Zhihao Xia1, and Ulugbek S. Kamilov1,2,∗
+Department of Computer Science and Engineering, Washington University in St. Louis, MO 63130, USA.
+Department of Electrical and Systems Engineering, Washington University in St. Louis, MO 63130, USA.
+email:"
+28446fa9d9ac0468cc715594a6dcc0ac5d9288a5,Semantic Instance Segmentation for Autonomous Driving Bert,"Semantic Instance Segmentation for Autonomous Driving
+Bert De Brabandere
+Davy Neven
+Luc Van Gool
+ESAT-PSI, KU Leuven"
+2866cbeb25551257683cf28f33d829932be651fe,A Two-Step Learning Method For Detecting Landmarks on Faces From Different Domains,"In Proceedings of the 2018 IEEE International Conference on Image Processing (ICIP)
+The final publication is available at: http://dx.doi.org/10.1109/ICIP.2018.8451026
+A TWO-STEP LEARNING METHOD FOR DETECTING LANDMARKS
+ON FACES FROM DIFFERENT DOMAINS
+Bruna Vieira Frade
+Erickson R. Nascimento
+Universidade Federal de Minas Gerais (UFMG), Brazil
+{brunafrade,"
+28589357a7631581e55ec6db3cde2e24e4789482,Involuntary processing of social dominance cues from bimodal face-voice displays.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+Involuntary processing of social dominance cues
+from bimodal face-voice displays
+Virginie Peschard, Pierre Philippot & Eva Gilboa-Schechtman
+To cite this article: Virginie Peschard, Pierre Philippot & Eva Gilboa-Schechtman (2016):
+Involuntary processing of social dominance cues from bimodal face-voice displays, Cognition and
+Emotion, DOI: 10.1080/02699931.2016.1266304
+To link to this article: http://dx.doi.org/10.1080/02699931.2016.1266304
+Published online: 21 Dec 2016.
+Submit your article to this journal
+Article views: 33
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+Download by: [UCL Service Central des Bibliothèques]
+Date: 25 April 2017, At: 23:38"
+281be1be2f0ecce173e3678a7e87419f0815e016,Studies of Plain-to-Rolled Fingerprint Matching Using the NIST Algorithmic Test Bed (ATB),"Studies of Plain-to-Rolled Fingerprint
+Matching Using the NIST
+Algorithmic Test Bed (ATB)
+NISTIR 7112
+Stephen S. Wood
+Charles L. Wilson
+April 2004"
+28eceb438da0b841bbd3d02684dbfa263838ed60,Photographic Image Synthesis with Cascaded Refinement Networks,"Photographic Image Synthesis with Cascaded Refinement Networks
+Qifeng Chen† ‡
+Vladlen Koltun†
+(a) Input semantic layouts
+(b) Synthesized images
+Figure 1. Given a pixelwise semantic layout, the presented model synthesizes an image that conforms to this layout. (a) Semantic layouts
+from the Cityscapes dataset of urban scenes; semantic classes are coded by color. (b) Images synthesized by our model for these layouts.
+The layouts shown here and throughout the paper are from the validation set and depict scenes from new cities that were never seen during
+training. Best viewed on the screen."
+28f5f8dc2f2f9f2a4e49024fe6aa7e9a63b23ab0,Vision-based bicycle detection and tracking using a deformable part model and an EKF algorithm,"Vision-based Bicycle Detection and Tracking using a Deformable Part
+Model and an EKF Algorithm
+Hyunggi Cho, Paul E. Rybski and Wende Zhang"
+28aa89b2c827e5dd65969a5930a0520fdd4a3dc7,Characterization and Classification of Faces across Age Progression,
+283b3160f02db64759259b4eb39dd54c4969d6f8,ActivityNet: A large-scale video benchmark for human activity understanding,"ActivityNet: A Large-Scale Video Benchmark for Human Activity
+Understanding
+Fabian Caba Heilbron1,2, Victor Escorcia1,2, Bernard Ghanem2 and Juan Carlos Niebles1
+King Abdullah University of Science and Technology (KAUST), Saudi Arabia
+Universidad del Norte, Colombia"
+28b061b5c7f88f48ca5839bc8f1c1bdb1e6adc68,Predicting User Annoyance Using Visual Attributes,"Predicting User Annoyance Using Visual Attributes
+Gordon Christie
+Virginia Tech
+Amar Parkash
+Goibibo
+Ujwal Krothapalli
+Virginia Tech
+Devi Parikh
+Virginia Tech"
+28f1f6cbe07b117387e2b07c11e7ac9c4ef8cf95,A Machine Learning Approach to Pedestrian Detection for Autonomous Vehicles Using High-Definition 3D Range Data,"Article
+A Machine Learning Approach to Pedestrian
+Detection for Autonomous Vehicles Using
+High-Definition 3D Range Data
+Pedro J. Navarro *,†, Carlos Fernández †, Raúl Borraz † and Diego Alonso †
+División de Sistemas en Ingeniería Electrónica (DSIE), Universidad Politécnica de Cartagena,
+Campus Muralla del Mar, s/n, Cartagena 30202, Spain; (C.F.);
+(R.B.); (D.A.)
+* Correspondence: Tel.: +34-968-32-6546
+These authors contributed equally to this work.
+Academic Editor: Felipe Jimenez
+Received: 31 October 2016; Accepted: 15 December 2016; Published: 23 December 2016"
+1701ee9e9518a055e82e79f6425645c4797c19de,Supervised Hashing Using Graph Cuts and Boosted Decision Trees,"APPEARING IN IEEE TRANS. PATTERN ANALYSIS AND MACHINE INTELLIGENCE, FEB. 2015
+Supervised Hashing Using Graph Cuts and
+Boosted Decision Trees
+Guosheng Lin, Chunhua Shen, Anton van den Hengel"
+17a85799c59c13f07d4b4d7cf9d7c7986475d01c,Extending Procrustes Analysis: Building Multi-view 2-D Models from 3-D Human Shape Samples,"ADVERTIMENT. La consulta d’aquesta tesi queda condicionada a l’acceptació de les següents
+ondicions d'ús: La difusió d’aquesta tesi per mitjà del servei TDX (www.tesisenxarxa.net) ha
+estat autoritzada pels titulars dels drets de propietat intel·lectual únicament per a usos privats
+emmarcats en activitats d’investigació i docència. No s’autoritza la seva reproducció amb finalitats
+de lucre ni la seva difusió i posada a disposició des d’un lloc aliè al servei TDX. No s’autoritza la
+presentació del seu contingut en una finestra o marc aliè a TDX (framing). Aquesta reserva de
+drets afecta tant al resum de presentació de la tesi com als seus continguts. En la utilització o cita
+de parts de la tesi és obligat indicar el nom de la persona autora.
+ADVERTENCIA. La consulta de esta tesis queda condicionada a la aceptación de las siguientes
+ondiciones de uso: La difusión de esta tesis por medio del servicio TDR (www.tesisenred.net) ha
+sido autorizada por los titulares de los derechos de propiedad intelectual únicamente para usos
+privados enmarcados en actividades de investigación y docencia. No se autoriza su reproducción
+on finalidades de lucro ni su difusión y puesta a disposición desde un sitio ajeno al servicio TDR.
+No se autoriza la presentación de su contenido en una ventana o marco ajeno a TDR (framing).
+Esta reserva de derechos afecta tanto al resumen de presentación de la tesis como a sus
+ontenidos. En la utilización o cita de partes de la tesis es obligado indicar el nombre de la
+persona autora.
+WARNING. On having consulted this thesis you’re accepting the following use conditions:
+Spreading this thesis by the TDX (www.tesisenxarxa.net) service has been authorized by the
+titular of the intellectual property rights only for private uses placed in investigation and teaching"
+174ddb6379b91a0e799e9988d0e522a5af18f91d,ChatPainter: Improving Text to Image Generation using Dialogue,"ChatPainter: Improving Text to Image Generation using Dialogue
+Shikhar Sharma 1 Dendi Suhubdy 2 3 Vincent Michalski 2 3 1 Samira Ebrahimi Kahou 1 Yoshua Bengio 2 3"
+17c62bff70eb0919864f111df4930062aded729a,Encoding Spatial Context in Local Image Descriptors,"Universit¨at des Saarlandes
+Max-Planck-Institut f¨ur Informatik
+Encoding Spatial Context in
+Local Image Descriptors
+Masterarbeit im Fach Informatik
+Master’s Thesis in Computer Science
+von / by
+Dushyant Mehta
+ngefertigt unter der Leitung von / supervised by
+Dr. Roland Angst
+etreut von / advised by
+Dr. Roland Angst
+egutachtet von / reviewers
+Dr. Roland Angst
+Prof. Dr. Joachim Weickert
+Saarbr¨ucken, February 28, 2016"
+17dea513763c57dcd0e62085045fb5be6770c600,"Dynamic thread mapping for high-performance, power-efficient heterogeneous many-core systems","Summary: Dynamic Thread Mapping for High-Performance, Power-Efficient
+Heterogeneous Many-core Systems
+Guangshuo Liu, Jinpyo Park, Diana Marculescu
+I. OVERVIEW
+throughput
+for maximizing
+This paper investigates about the problem of dynamic thread
+mapping in heterogeneous many-core systems via an efficient
+lgorithm that maximizes performance under power constraints.
+The approach is to formulate the mapping problem as a 0-1
+integer linear program (ILP), given any numbers of threads,
+ores and type of cores. An iterative O(n2/m) heuristic-based
+lgorithm for solving the 0-1 ILP thread mapping is proposed,
+thereby providing, a novel scalable approach for effective thread
+mapping
+on many-core
+heterogeneous systems.
+The paper considers multi-threaded workloads and assumes that
+each core runs at most one thread at a time thereby supporting
+single threaded execution, without simultaneous multithreading"
+1748867e04ba16673ec5231f6a2ca0ae03835658,Fast Exact Search in Hamming Space With Multi-Index Hashing,"Fast Exact Search in Hamming Space
+with Multi-Index Hashing
+Mohammad Norouzi, Ali Punjani, David J. Fleet,
+{norouzi, alipunjani,"
+17cf838720f7892dbe567129dcf3f7a982e0b56e,Global-Local Face Upsampling Network,"Global-Local Face Upsampling Network
+Oncel Tuzel
+Yuichi Taguchi
+John R. Hershey
+Mitsubishi Electric Research Labs (MERL), Cambridge, MA, USA"
+17257fc03b611315ae49bd53d229188b889002e6,Hard Negative Mining for Metric Learning Based Zero-Shot Classification,"Hard Negative Mining for
+Metric Learning Based Zero-Shot Classification
+Maxime Bucher1,2, St´ephane Herbin1, Fr´ed´eric Jurie2
+ONERA - The French Aerospace Lab, Palaiseau, France
+Normandie Univ, UNICAEN, ENSICAEN, CNRS, Caen, France"
+178a82e3a0541fa75c6a11350be5bded133a59fd,BioHDD: a dataset for studying biometric identification on heavily degraded data,"Techset Composition Ltd, Salisbury
+{IEE}BMT/Articles/Pagination/BMT20140045.3d
+www.ietdl.org
+Received on 15th July 2014
+Revised on 17th September 2014
+Accepted on 23rd September 2014
+doi: 10.1049/iet-bmt.2014.0045
+ISSN 2047-4938
+BioHDD: a dataset for studying biometric
+identification on heavily degraded data
+Gil Santos1, Paulo T. Fiadeiro2, Hugo Proença1
+Department of Computer Science, IT – Instituto de Telecomunicações, University of Beira Interior, Covilhã, Portugal
+Department of Physics, Remote Sensing Unit – Optics, Optometry and Vision Sciences Group, University of Beira Interior,
+Covilhã, Portugal
+E-mail:"
+171d7762137725839fe5292901fe90d91b74811d,SLAM Algorithm by using Global Appearance of Omnidirectional Images,
+174cd8e98f17b3f5bda1c8e16cb39e3dec800f74,Multi-scale Context Intertwining for Semantic Segmentation,"Multi-Scale Context Intertwining
+for Semantic Segmentation
+Di Lin1, Yuanfeng Ji1, Dani Lischinski2, Daniel Cohen-Or1,3, and Hui Huang1(cid:63)
+Shenzhen University 2The Hebrew University of Jerusalem 3Tel Aviv University"
+17c0094c68d6efd19b80287c51d228fa50750f46,An efficient partial face detection method using AlexNet CNN,"SSRG International Journal of Electronics and Communication Engineering - (ICRTECITA-2017) - Special Issue - March 2017
+An efficient partial face detection method using
+AlexNet CNN
+Prof Mr.Sivalingam.T, S.Kabilan ,
+Dhanabal.M ,Arun.R ,Chandrabhagavan.K
+V.S.B Engineering College,Karur"
+177c48590469c62d430cf74fee7b5bd28bfbbc1d,Articulated Motion Learning via Visual and Lingual Signals,"Learning Articulated Motion Models from Visual and Lingual Signals
+Zhengyang Wu
+Georgia Tech
+Atlanta, GA 30332
+Mohit Bansal
+TTI-Chicago
+Chicago, IL 60637
+Matthew R. Walter
+TTI-Chicago
+Chicago, IL 60637"
+1740a0732e8e308f5dd395313313cc3289666f13,Preference-Aware View Recommendation System for Cameras Based on Bag of Aesthetics-Preserving Features,"Transactions on Multimedia
+Page 22 of 32
+Preference-Aware View Recommendation System
+for Cameras Based on Bag of
+Aesthetics-Preserving Features
+Hsiao-Hang Su, Tse-Wei Chen, Member, IEEE, Chieh-Chi Kao, Winston H. Hsu, Member, IEEE,
+nd Shao-Yi Chien*, Member, IEEE"
+17ff59bb388b155f613f7566ba7cd71ec780cdec,Asymmetric Sparse Kernel Approximations for Large-Scale Visual Search,"Asymmetric sparse kernel approximations
+for large-scale visual search
+Damek Davis
+University of California
+Los Angeles, CA 90095
+Jonathan Balzer
+University of California
+Los Angeles, CA 90095
+Stefano Soatto
+University of California
+Los Angeles, CA 90095"
+17dd242e6d7afb5d7fafcf9f8e8b201573ce4b89,An Extensive Review on Spectral Imaging in Biometric Systems: Challenges and Advancements,"An Extensive Review on Spectral Imaging in Biometric Systems: Challenges &
+Advancements
+Rumaisah Munira,∗, Rizwan Ahmed Khana,b,∗∗
+Faculty of IT, Barrett Hodgson University, Karachi, Pakistan.
+LIRIS, Universite Claude Bernard Lyon1, France."
+17635e22a73da3ff60a72715b7dd8837de6fee89,The ABBA study – approach bias modification in bulimia nervosa and binge eating disorder: study protocol for a randomised controlled trial,"Brockmeyer et al. Trials (2016) 17:466
+DOI 10.1186/s13063-016-1596-6
+ST UD Y P R O T O C O L
+Open Access
+The ABBA study – approach bias
+modification in bulimia nervosa and binge
+eating disorder: study protocol for a
+randomised controlled trial
+Timo Brockmeyer1,2*, Ulrike Schmidt2 and Hans-Christoph Friederich1,3"
+17daa9ddaf524de914e7440157fc0314db171884,Data driven analysis of faces from images,"Data Driven Analysis
+of Faces from Images
+Dissertation zur Erlangung des Grades „Doktor der Ingenieurwissenschaften (Dr.-Ing.)”
+der Naturwissenschaftlich-Technischen Fakultäten der Universität des Saarlandes
+Kristina Scherbaum
+8.05.2013
+Universität des Saarlandes | Max-Planck-Institut für Informatik
+Saarbrücken – Germany"
+17a995680482183f3463d2e01dd4c113ebb31608,Structured Label Inference for Visual Understanding,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. Y, MONTH Z
+Structured Label Inference for
+Visual Understanding
+Nelson Nauata, Hexiang Hu, Guang-Tong Zhou, Zhiwei Deng,
+Zicheng Liao and Greg Mori"
+17a9db524ddbeb5577a94924c2a7cca048dd19f9,Object Recognition with Multi-Scale Pyramidal Pooling Networks,"Object Recognition with Multi-Scale Pyramidal
+Pooling Networks
+Jonathan Masci1, Ueli Meier1, Gabriel Fricout2, and J¨urgen Schmidhuber1
+IDSIA – USI – SUPSI, Manno – Lugano, Switzerland,
+http://idsia.ch/~masci/
+ArcelorMittal, Maizi`eres Research, Measurement and Control Dept., France"
+17db741725b9f8406f69b27a117e99bee1a9a323,Person Re-identification with a Body Orientation-Specific Convolutional Neural Network,"Person Re-identification with a Body
+Orientation-Specific Convolutional Neural Network
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla
+Baskurt
+To cite this version:
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla Baskurt. Person Re-
+identification with a Body Orientation-Specific Convolutional Neural Network. Advanced Concepts
+for Intelligent Vision systems, Sep 2018, Poitiers, France. <hal-01895374>
+HAL Id: hal-01895374
+https://hal.archives-ouvertes.fr/hal-01895374
+Submitted on 15 Oct 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+1742ffea0e1051b37f22773613f10f69d2e4ed2c,Interactive Mirror for Smart Home,
+174b6d661b96840e27cd9435c2dbb8e538b2c8a6,Progressive Representation Adaptation for Weakly Supervised Object Localization,"Progressive Representation Adaptation for
+Weakly Supervised Object Localization
+Dong Li, Jia-Bin Huang, Yali Li, Shengjin Wang(cid:63) and Ming-Hsuan Yang"
+17d84ca10607442a405f3c4c8b4572bdd79801c2,Expression robust 3D face recognition via mesh-based histograms of multiple order surface differential quantities,"EXPRESSION ROBUST 3D FACE RECOGNITION VIA MESH-BASED HISTOGRAMS OF
+MULTIPLE ORDER SURFACE DIFFERENTIAL QUANTITIES
+Huibin Li1,2, Di Huang1,2, Pierre Lemaire1,2, Jean-Marie Morvan1,3,4, Liming Chen1,2
+Universit´e de Lyon, CNRS
+Ecole Centrale de Lyon, LIRIS UMR5205, F-69134, Lyon, France
+Universit´e Lyon 1, Institut Camille Jordan,
+3 blvd du 11 Novembre 1918, F-69622 Villeurbanne - Cedex, France
+King Abdullah University of Science and Technology, GMSV Research Center,
+Bldg 1, Thuwal 23955-6900, Saudi Arabia"
+17ad76ef00d4cb584389682ca6b138a8bdc9a2da,Continuous Multimodal Emotion Recognition Approach for AVEC 2017,"Continuous Multimodal Emotion Recognition
+Approach for AVEC 2017
+Narotam Singh*, Nittin Singh†, Abhinav Dhall‡
+Department of Computer Science and Engineering, Indian Institute of Technology Ropar
+Email:
+India"
+174930cac7174257515a189cd3ecfdd80ee7dd54,Multi-view Face Detection Using Deep Convolutional Neural Networks,"Multi-view Face Detection Using Deep Convolutional
+Neural Networks
+Sachin Sudhakar Farfade
+Yahoo
+Mohammad Saberian
+inc.com
+Yahoo
+Li-Jia Li
+Yahoo"
+1750db78b7394b8fb6f6f949d68f7c24d28d934f,Detecting Facial Retouching Using Supervised Deep Learning,"Detecting Facial Retouching Using Supervised
+Deep Learning
+Aparna Bharati, Richa Singh, Senior Member, IEEE, Mayank Vatsa, Senior Member, IEEE, Kevin W.
+Bowyer, Fellow, IEEE"
+17e769ef3d86e74c21f2616c7f7a6f20a4e2fbaa,Bag of Machine Learning Concepts for Visual Concept Recognition in Images,"Bag of Machine Learning Concepts for
+Visual Concept Recognition in Images
+vorgelegt vom
+Diplom-Mathematiker
+Alexander Binder
+us Berlin
+von der Fakult¨at IV – Elektrotechnik und Informatik
+der Technischen Universit¨at Berlin
+zur Erlangung des akademischen Grades
+Doktor der Naturwissenschaften
+– Dr. rer. nat. –
+genehmigte Dissertation
+Promotionsausschuss:
+Vorsitzender:
+. Gutachter:
+. Gutachter:
+. Gutachter:
+Prof. Dr. Olaf Hellwich
+Prof. Dr. Klaus-Robert M¨uller
+Prof. Dr. Volker Tresp"
+173657da03e3249f4e47457d360ab83b3cefbe63,HKU-Face : A Large Scale Dataset for Deep Face Recognition Final Report,"HKU-Face: A Large Scale Dataset for
+Deep Face Recognition
+Final Report
+Haicheng Wang
+035140108
+COMP4801 Final Year Project
+Project Code: 17007"
+177cbeb83c3a0868b9a5c75cd74edf4b972cba80,Exact Primitives for Time Series Data Mining,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Exact Primitives for Time Series Data Mining
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Abdullah Al Mueen
+March 2012
+Dissertation Committee:
+Dr. Eamonn Keogh, Chairperson
+Dr. Vassilis Tsotras
+Dr. Stefano Lonardi"
+7b0e81249159686337ca2cfe81662123906b6b26,An Automatic Eye Detection Method for Gray Intensity Facial Images,"IJCSI International Journal of Computer Science Issues, Vol. 8, Issue 4, No 2, July 2011
+ISSN (Online): 1694-0814
+www.IJCSI.org
+An Automatic Eye Detection Method for Gray Intensity Facial
+Images
+M. Hassaballah1,2 , Kenji Murakami1, Shun Ido1
+Department of Computer Science, Ehime University, 790-8577, Japan
+Department of Mathematics, Faculty of Science, South Valley University, Qena, 83523, Egypt"
+7be6fe8c58ca12974c563689b7230b933dfca432,Design of Radial Basis Function Network as Classifier in Face Recognition Using Eigenfaces,"SBRN’98 – Simpósio Brasileiro de Redes Neurais, Belo Horizonte, Minas Gerais, dezembro de 1998.
+Design of Radial Basis Function Network as Classifier in Face Recognition Using
+Eigenfaces
+Carlos Eduardo Thomaz
+Raul Queiroz Feitosa
+Álvaro Veiga
+PUC RJ- Pontifícia Universidade Católica do Rio de Janeiro
+Departamento de Engenharia Elétrica
+Rua Marquês de São Vicente, 225, 22453-900 Rio de Janeiro, RJ, Brasil"
+7bd6d0bca27ff68621acd10d6d1709f084f97602,Learning to Detect and Track Visible and Occluded Body Joints in a Virtual World,"Learning to Detect and Track Visible and
+Occluded Body Joints in a Virtual World
+Matteo Fabbri(cid:63), Fabio Lanzi(cid:63), Simone Calderara(cid:63), Andrea Palazzi, Roberto
+Vezzani, and Rita Cucchiara
+Department of Engineering “Enzo Ferrari”
+University of Modena and Reggio Emilia, Italy"
+7bbaa09c9e318da4370a83b126bcdb214e7f8428,"FaaSter, Better, Cheaper: The Prospect of Serverless Scientific Computing and HPC","FaaSter, Better, Cheaper: The Prospect of
+Serverless Scientific Computing and HPC
+Josef Spillner1, Cristian Mateos2, and David A. Monge3
+Zurich University of Applied Sciences, School of Engineering
+Service Prototyping Lab (blog.zhaw.ch/icclab/), 8401 Winterthur, Switzerland
+ISISTAN Research Institute - CONICET - UNICEN
+Campus Universitario, Paraje Arroyo Seco, Tandil (7000), Buenos Aires, Argentina
+ITIC Research Institute, National University of Cuyo
+Padre Jorge Contreras 1300, M5502JMA Mendoza, Argentina"
+7b8aa3ebeae17e5266dac23e87f603a5d5f7b1e3,Open Set Logo Detection and Retrieval,"Open Set Logo Detection and Retrieval
+Andras T¨uzk¨o1, Christian Herrmann1,2, Daniel Manger1, J¨urgen Beyerer1,2
+Fraunhofer IOSB, Karlsruhe, Germany
+Karlsruhe Institute of Technology KIT, Vision and Fusion Lab, Karlsruhe, Germany
+Keywords:
+Logo Detection, Logo Retrieval, Logo Dataset, Trademark Retrieval, Open Set Retrieval, Deep Learning."
+7b1af8cc9c2c43fa9d528bcfb05142d714df3700,"Modeling Shape, Appearance and Motion for Human Movement Analysis",
+7b6f0c4b22aee0cb4987cba9df121d4076fac5a5,On Learning 3D Face Morphable Model from In-the-wild Images,"On Learning 3D Face Morphable Model
+from In-the-wild Images
+Luan Tran, and Xiaoming Liu, Member, IEEE"
+7b9a5d9d7386d47c51cb473f6338988bd6e9f2b1,An Individual-Specific Strategy for Management of Reference Data in Adaptive Ensembles for Person Re-Identification,"An Individual-Specific Strategy for Management of Reference Data
+in Adaptive Ensembles for Person Re-Identification
+Miguel De-la-Torre*†, Eric Granger*, Robert Sabourin*, Dmitry O. Gorodnichy‡
+* ´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montr´eal, Canada,
+Centro Universitario de Los Valles, Universidad de Guadalajara, Ameca, M´exico
+Science and Engineering Directorate, Canada Border Services Agency, Ottawa, Canada,
+Keywords: Multi-Classifier Systems; Adaptive Biometrics; Face
+Recognition; Video Surveillance; Person Re-Identification"
+7ba6ac1b769ad7098037c07a5b7399fe9d97fcc8,Moving Object Detection in Heterogeneous Conditions in Embedded Systems,"Article
+Moving Object Detection in Heterogeneous
+Conditions in Embedded Systems
+Alessandro Garbo and Stefano Quer *
+Dipartimento di Automatica ed Informatica, Politecnico di Torino, 10129 Torino, Italy;
+* Correspondence: Tel.: +39-011-090-7076
+Received: 25 May 2017; Accepted: 27 June 2017; Published: 1 July 2017"
+7b9961094d3e664fc76b12211f06e12c47a7e77d,Bridging biometrics and forensics,"Bridging Biometrics and Forensics
+Yanjun Yan and Lisa Ann Osadciw
+EECS, Syracuse University, Syracuse, NY, USA
+{yayan,"
+7b67c38a6f49e02c03e1cea98146a506f607b0d7,Using Facial Symmetry to Handle Pose Variations in Real-World 3D Face Recognition,"Using Facial Symmetry to Handle Pose
+Variations in Real-World 3D Face Recognition
+Georgios Passalis1,2, Panagiotis Perakis1,2, Theoharis Theoharis1,2
+nd Ioannis A. Kakadiaris2, Senior Member, IEEE"
+7b9b3794f79f87ca8a048d86954e0a72a5f97758,Passing an Enhanced Turing Test - Interacting with Lifelike Computer Representations of Specific Individuals,"DOI 10.1515/jisys-2013-0016      Journal of Intelligent Systems 2013; 22(4): 365–415
+Avelino J. Gonzalez*, Jason Leigh, Ronald F. DeMara, Andrew
+Johnson, Steven Jones, Sangyoon Lee, Victor Hung, Luc
+Renambot, Carlos Leon-Barth, Maxine Brown, Miguel Elvir,
+James Hollister and Steven Kobosko
+Passing an Enhanced Turing Test –
+Interacting with Lifelike Computer
+Representations of Specific Individuals"
+7bce4f4e85a3bfcd6bfb3b173b2769b064fce0ed,A Psychologically-Inspired Match-Score Fusion Model for Video-Based Facial Expression Recognition,"A Psychologically-Inspired Match-Score Fusion Model
+for Video-Based Facial Expression Recognition
+Albert Cruz, Bir Bhanu, Songfan Yang,
+VISLab, EBUII-216, University of California Riverside,
+Riverside, California, USA, 92521-0425
+{acruz, bhanu,"
+7b8e9c50f74ce6ca66a8ab61fb18ca31d26cf13f,Nonlinear Channels Aggregation Networks for Deep Action Recognition,"Under review as a conference paper at ICLR 2019
+Nonlinear Channels Aggregation Networks
+for Deep Action Recognition
+Anonymous authors
+Paper under double-blind review"
+7b0f1fc93fb24630eb598330e13f7b839fb46cce,Learning to Find Eye Region Landmarks for Remote Gaze Estimation in Unconstrained Settings,"Learning to Find Eye Region Landmarks for Remote Gaze
+Estimation in Unconstrained Settings
+Seonwook Park
+ETH Zurich
+Xucong Zhang
+MPI for Informatics
+Andreas Bulling
+MPI for Informatics
+Otmar Hilliges
+ETH Zurich"
+7bdcd85efd1e3ce14b7934ff642b76f017419751,Learning Discriminant Face Descriptor,"Learning Discriminant Face Descriptor
+Zhen Lei, Member, IEEE, Matti Pietika¨ inen, Fellow, IEEE, and Stan Z. Li, Fellow, IEEE"
+7b47ca13af16bdc1f4b88e9b68dd3ea52d959199,Online nonparametric discriminant analysis for incremental subspace learning and recognition,"Pattern Anal Applic (2008) 11:259–268
+DOI 10.1007/s10044-008-0131-0
+T H E O R E T I C A L A D V A N C E S
+Online nonparametric discriminant analysis for incremental
+subspace learning and recognition
+B. Raducanu Æ J. Vitria`
+Received: 15 December 2006 / Accepted: 20 January 2008 / Published online: 24 July 2008
+Ó Springer-Verlag London Limited 2008"
+7bcd98ee2df3d14eae7bbed713208cb7da7b5db0,Unsupervised data association for metric learning in the context of multi-shot person re-identification,"Unsupervised data association for Metric Learning in the context of Multi-shot
+Person Re-identification
+Furqan M. Khan, Francois Bremond
+INRIA Sophia Antipolis-Mediterrannee
+004 Route des Lucioles, Sophia Antipolis Cedex, France
+{furqan.khan |"
+7b66dababebd800e95d23a1fde299d44a52e98ed,Dual Recurrent Attention Units for Visual Question Answering,"Under review for Computer Vision and Image Understanding
+DRAU: Dual Recurrent Attention Units for Visual Question Answering
+Ahmed Osmana,, Wojciech Sameka,
+Fraunhofer Heinrich Hertz Institute, Einsteinufer 37, Berlin 10587, Germany"
+7b331c80a91acf3616afd88e78801ac55c874f43,Multiple Player Tracking in Sports Video: A Dual-Mode Two-Way Bayesian Inference Approach With Progressive Observation Modeling,"Multiple Player Tracking in Sports Video: A
+Dual-Mode Two-Way Bayesian Inference Approach
+With Progressive Observation Modeling
+Junliang Xing, Student Member, IEEE, Haizhou Ai, Senior Member, IEEE, Liwei Liu, and
+Shihong Lao, Member, IEEE"
+7b9ebcc8b9c05ef661182fe73438b7725584817d,Restoring effects of oxytocin on the attentional preference for faces in autism,"Citation: Transl Psychiatry (2017) 7, e1097; doi:10.1038/tp.2017.67
+www.nature.com/tp
+ORIGINAL ARTICLE
+Restoring effects of oxytocin on the attentional preference
+for faces in autism
+M Kanat1,2, I Spenthof1,3, A Riedel4, LT van Elst2,4, M Heinrichs1,2 and G Domes1,2,3
+Reduced attentional preference for faces and symptoms of social anxiety are common in autism spectrum disorders (ASDs). The
+neuropeptide oxytocin triggers anxiolytic functions and enhances eye gaze, facial emotion recognition and neural correlates of face
+processing in ASD. Here we investigated whether a single dose of oxytocin increases attention to faces in ASD. As a secondary
+question, we explored the influence of social anxiety on these effects. We tested for oxytocin’s effects on attention to neutral faces
+s compared to houses in a sample of 29 autistic individuals and 30 control participants using a dot-probe paradigm with two
+different presentation times (100 or 500 ms). A single dose of 24 IU oxytocin was administered in a randomized, double-blind
+placebo-controlled, cross-over design. Under placebo, ASD individuals paid less attention to faces presented for 500 ms than did
+ontrols. Oxytocin administration increased the allocation of attention toward faces in ASD to a level observed in controls.
+Secondary analyses revealed that these oxytocin effects primarily occurred in ASD individuals with high levels of social anxiety who
+were characterized by attentional avoidance of faces under placebo. Our results confirm a positive influence of intranasal oxytocin
+on social attention processes in ASD. Further, they suggest that oxytocin may in particular restore the attentional preference for
+facial information in ASD individuals with high social anxiety. We conclude that oxytocin’s anxiolytic properties may partially
+ccount for its positive effects on socio-cognitive functioning in ASD, such as enhanced eye gaze and facial emotion recognition.
+Translational Psychiatry (2017) 7, e1097; doi:10.1038/tp.2017.67; published online 18 April 2017"
+7b3b7769c3ccbdf7c7e2c73db13a4d32bf93d21f,"On the design and evaluation of robust head pose for visual user interfaces: algorithms, databases, and comparisons","On the Design and Evaluation of Robust Head Pose for
+Visual User Interfaces: Algorithms, Databases, and
+Comparisons
+Sujitha Martin
+Laboratory of Intelligent and
+Safe Automobiles
+UCSD - La Jolla, CA, USA
+Ashish Tawari
+Laboratory of Intelligent and
+Safe Automobiles
+UCSD - La Jolla, CA, USA
+Erik Murphy-Chutorian
+Laboratory of Intelligent and
+Safe Automobiles
+UCSD - La Jolla, CA, USA
+Shinko Y. Cheng
+Laboratory of Intelligent and
+Safe Automobiles
+UCSD - La Jolla, CA, USA
+Mohan Trivedi"
+7b358ed87f39a12d737070dc22b4c547ce378648,Color Features for Boosted Pedestrian Detection,"Institutionen för systemteknik
+Department of Electrical Engineering
+Examensarbete
+Color Features for Boosted Pedestrian Detection
+Examensarbete utfört i Datorseende
+vid Tekniska högskolan vid Linköpings universitet
+Niklas Hansson
+LiTH-ISY-EX--15/4899--SE
+Linköping 2015
+Department of Electrical Engineering
+Linköpings universitet
+SE-581 83 Linköping, Sweden
+Linköpings tekniska högskola
+Linköpings universitet
+581 83 Linköping"
+7b2e0c87aece7ff1404ef2034d4c5674770301b2,Discriminative Feature Learning with Foreground Attention for Person Re-Identification,"Discriminative Feature Learning with Foreground
+Attention for Person Re-Identification
+Sanping Zhou, Jinjun Wang, Deyu Meng, Yudong Liang, Yihong Gong, Nanning Zheng"
+7b522c5d6d2d0699c4183a543b8e65b1a66d9e74,Understanding Critical Factors in Appearance-Based Gender Categorization,"Understanding Critical Factors in
+Appearance-based Gender Categorization
+Enrico Grosso, Andrea Lagorio, Luca Pulina, and Massimo Tistarelli
+POLCOMING – University of Sassari
+Viale Mancini, 5 – 07100 Sassari, Italy"
+7b07a87ff71b85f3493d1944034a960917b8482f,Alternating BackPropagation for Generator Network,"Alternating Back-Propagation for Generator Network
+Tian Han†, Yang Lu†, Song-Chun Zhu, and Ying Nian Wu
+Department of Statistics, University of California, Los Angeles, USA"
+7b95bd44db15f7cf20bfc051c353841f3fcea383,Low-Complexity Face Recognition using a Multilevel DWT and Two States of Continuous HMM to recognize Noisy Images,"Low-Complexity Face Recognition using a
+Multilevel DWT and Two States of
+Continuous HMM to recognize Noisy
+Images
+Hameed R. Farhan1, Mahmuod H. Al-Muifraje2, Thamir R. Saeed2
+Department of Electrical and Electronic Engineering, University of Kerbala, Kerbala, Iraq
+Department of Electrical Engineering, University of Technology, Baghdad, Iraq"
+7b83867b7f79cbfbfc71996bcf07fe7ee7a7600c,Object detection through search with a foveated visual system,"Object Detection Through Exploration With A
+Foveated Visual Field
+Emre Akbas, Miguel P. Eckstein"
+8f9fa03690428cde478f1a27d4773f78d857b88f,Visual Recognition using Embedded Feature Selection for Curvature Self-Similarity,"Visual Recognition using Embedded Feature
+Selection for Curvature Self-Similarity
+Angela Eigenstetter
+HCI & IWR, University of Heidelberg
+Bj¨orn Ommer
+HCI & IWR, University of Heidelberg"
+8f6d05b8f9860c33c7b1a5d704694ed628db66c7,Non-linear dimensionality reduction and sparse representation models for facial analysis. (Réduction de la dimension non-linéaire et modèles de la représentations parcimonieuse pour l'analyse du visage),"Non-linear dimensionality reduction and sparse
+representation models for facial analysis
+Yuyao Zhang
+To cite this version:
+Yuyao Zhang. Non-linear dimensionality reduction and sparse representation models for facial analysis.
+Medical Imaging. INSA de Lyon, 2014. English. <NNT : 2014ISAL0019>. <tel-01127217>
+HAL Id: tel-01127217
+https://tel.archives-ouvertes.fr/tel-01127217
+Submitted on 7 Mar 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+8f05c4c1b3c1ad31ec95ccb87bca24a884b5ad4c,Overhead Detection: Beyond 8-bits and RGB,"Overhead Detection: Beyond 8-bits and RGB
+Eliza Mace1
+Keith Manville1
+Monica Barbu-McInnis1
+Michael Laielli2
+Matthew Klaric2
+Samuel Dooley2
+MITRE,
+NGA,"
+8f772d9ce324b2ef5857d6e0b2a420bc93961196,Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network,"MAHPOD et al.: CFDRNN
+Facial Landmark Point Localization using
+Coarse-to-Fine Deep Recurrent Neural Network
+Shahar Mahpod, Rig Das, Emanuele Maiorana, Yosi Keller, and Patrizio Campisi,"
+8fdfd4c5039cf7d70470a2a3ac52bfd229bcd4e2,Pushing the Limits of Radiology with Joint Modeling of Visual and Textual Information,"Pushing the Limits of Radiology with Joint Modeling of Visual and
+Textual Information
+Department of Computing, Macquarie University1
+Sonit Singh1,2
+DATA61, CSIRO2
+Sydney, Australia"
+8fda2f6b85c7e34d3e23927e501a4b4f7fc15b2a,Feature Selection with Annealing for Big Data Learning,"Feature Selection with Annealing for Big Data
+Learning
+Adrian Barbu, Yiyuan She, Liangjing Ding, Gary Gramajo"
+8fbe68810cbc53521395829620060cf9558231cc,Learning Discriminant Person-Specific Facial Models Using Expandable Graphs,"Learning Discriminant Person-Specific
+Facial Models Using Expandable Graphs
+Stefanos Zafeiriou, Anastasios Tefas, Member, IEEE, and Ioannis Pitas, Fellow, IEEE"
+8fa3478aaf8e1f94e849d7ffbd12146946badaba,Attributes for Classifier Feedback,"Attributes for Classifier Feedback
+Amar Parkash1 and Devi Parikh2
+Indraprastha Institute of Information Technology (Delhi, India)
+Toyota Technological Institute (Chicago, US)"
+8ff3c7b46ab36f1d01e96681baf512859cc80a4d,Dynamics of alpha oscillations elucidate facial affect recognition in schizophrenia.,"Dynamics of alpha oscillations elucidate facial affect
+recognition in schizophrenia
+Tzvetan G. Popov & Brigitte S. Rockstroh & Petia Popova &
+Almut M. Carolus & Gregory A. Miller"
+8f9c37f351a91ed416baa8b6cdb4022b231b9085,Generative Adversarial Style Transfer Networks for Face Aging,"Generative Adversarial Style Transfer Networks for Face Aging
+Sveinn Palsson
+D-ITET, ETH Zurich
+Eirikur Agustsson
+D-ITET, ETH Zurich"
+8f8c0243816f16a21dea1c20b5c81bc223088594,Local Directional Number Based Classification and Recognition of Expressions Using Subspace Methods,
+8f98e1e041e7d3e27397c268e85e815065329d2d,Hierarchical feed forward models for robust object recognition,"Hierarchical Feed-Forward Models for
+Robust Object Recognition
+Ingo Bax
+Der Technischen Fakult¨at der Universit¨at Bielefeld vorgelegt zur Erlangung
+des akademischen Grades Doktor der Ingenieurwissenschaften"
+8fc21217ee89c505930b540b716b11bab89d3bcd,Memory Efficient Nonuniform Quantization for Deep Convolutional Neural Network,"Memory Efficient Nonuniform Quantization for
+Deep Convolutional Neural Network
+Fangxuan Sun and Jun Lin"
+8f5566fa00f8c79f4720e14084489e784688ab0b,The role of the amygdala in atypical gaze on emotional faces in autism spectrum disorders.,"The Journal of Neuroscience, July 11, 2012 • 32(28):9469 –9476 • 9469
+Behavioral/Systems/Cognitive
+The Role of the Amygdala in Atypical Gaze on Emotional
+Faces in Autism Spectrum Disorders
+Dorit Kliemann,1,2,3,4 Isabel Dziobek,2,3 Alexander Hatri,1,2,3 Ju¨rgen Baudewig,2,3 and Hauke R. Heekeren1,2,3,4
+Department of Education and Psychology, 2Cluster of Excellence “Languages of Emotion,” and 3Dahlem Institute for Neuroimaging of Emotion (D.I.N.E),
+Freie Universita¨t Berlin, 14195 Berlin, Germany, and 4Max Planck Institute for Human Development, 14195 Berlin, Germany
+Reduced focus toward the eyes is a characteristic of atypical gaze on emotional faces in autism spectrum disorders (ASD). Along with the
+typical gaze, aberrant amygdala activity during face processing compared with neurotypically developed (NT) participants has been
+repeatedly reported in ASD. It remains unclear whether the previously reported dysfunctional amygdalar response patterns in ASD
+support an active avoidance of direct eye contact or rather a lack of social attention. Using a recently introduced emotion classification
+task, we investigated eye movements and changes in blood oxygen level-dependent (BOLD) signal in the amygdala with a 3T MRI scanner
+in 16 autistic and 17 control adult human participants. By modulating the initial fixation position on faces, we investigated changes
+triggered by the eyes compared with the mouth. Between-group interaction effects revealed different patterns of gaze and amygdalar
+BOLD changes in ASD and NT: Individuals with ASD gazed more often away from than toward the eyes, compared with the NT group,
+which showed the reversed tendency. An interaction contrast of group and initial fixation position further yielded a significant cluster of
+mygdala activity. Extracted parameter estimates showed greater response to eyes fixation in ASD, whereas the NT group showed an
+increase for mouth fixation.
+The differing patterns of amygdala activity in combination with differing patterns of gaze behavior between groups triggered by direct
+eye contact and mouth fixation, suggest a dysfunctional profile of the amygdala in ASD involving an interplay of both eye-avoidance"
+8fb849fe51fbf4b56393cfef26397caef2a22fb0,Public Document Agreed Plans for Open Source Reference Software Document Evolution Executive Summary,"Project N° IST-2002-507634 - BioSecure
+D2.2.1 – Revision: b3
+2 March 2005
+Contract Number :
+Project Acronym :
+Project Title :
+Instrument :
+Start Date of Project :
+Duration :
+Deliverable Number :
+Title of Deliverable :
+Contractual Due Date :
+Actual Date of Completion :
+IST-2002-507634
+BioSecure
+Biometrics for Secure Authentication
+Network of Excellence
+01 June, 2004
+6 months
+D2.2.1"
+8f2e83f6d70b9e161ad714fee79ed6d23ae2a93f,Image Intelligent Detection Based on the Gabor Wavelet and the Neural Network,"Article
+Image Intelligent Detection Based on the Gabor
+Wavelet and the Neural Network
+Yajun Xu 1, Fengmei Liang 1,*, Gang Zhang 1 and Huifang Xu 2
+College of Information Engineering, Taiyuan University of Technology, Taiyuan 030024, China;
+(Y.X.); (G.Z.)
+Daqin Railway Co. Ltd., Taiyuan Railway Administration, Taiyuan 030013, China;
+* Correspondence: Tel.: +86-186-0341-0966
+Academic Editor: Angel Garrido
+Received: 21 September 2016; Accepted: 11 November 2016; Published: 15 November 2016"
+8f3e3f0f97844d3bfd9e9ec566ac7a54f6931b09,"A Survey on Human Emotion Recognition Approaches, Databases and Applications","Electronic Letters on Computer Vision and Image Analysis 14(2):24-44; 2015
+A Survey on Human Emotion Recognition Approaches,
+Databases and Applications
+C.Vinola*, K.Vimaladevi†
+* Department of Computer Science and Engineering, Francis Xavier Engineering College, Tirunelveli,Tamilnadu,India
+Department of Computer Science and Engineering, P.S.R Engineering College, Sivakasi, Tamilnadu,India
+Received 7th Aug 2015; accepted 30th Nov 2015"
+8fc730d22f33d08be927e5449f359dc15b5c3503,Measuring and modeling the perception of natural and unconstrained gaze in humans and machines,"CBMM Memo No. 059
+November 28, 2016
+Measuring and modeling the perception of natural
+nd unconstrained gaze in humans and machines
+Daniel Harari*, Tao Gao*, Nancy Kanwisher, Joshua Tenenbaum, Shimon
+Ullman"
+8f89aed13cb3555b56fccd715753f9ea72f27f05,Attended End-to-end Architecture for Age Estimation from Facial Expression Videos,"Attended End-to-end Architecture for Age
+Estimation from Facial Expression Videos
+Wenjie Pei, Hamdi Dibeklio˘glu, Member, IEEE, Tadas Baltruˇsaitis and David M.J. Tax"
+8fcdeda0c2f4e265e2180eb5ed39f6548ae3ba99,A Generic Middle Layer for Image Understanding,"UNIVERSIT ¨AT HAMBURG
+A Generic Middle Layer for Image
+Understanding
+Kasim Terzi´c
+Doktorarbeit
+Fakult¨at f¨ur Mathematik, Informatik und Naturwissenschaften
+Fachbereich Informatik"
+8fe7354a92b4c74c22dc0a253dfe7320487d22ab,Literature Survey on Sparse Representation for Neural Network Based Face Detection and Recognition,"Circuits and Systems: An International Journal (CSIJ), Vol. 1, No.2, April 2014
+LITERATURE SURVEY ON SPARSE
+REPRESENTATION FOR NEURAL
+NETWORK BASED FACE DETECTION AND
+RECOGNITION
+Raviraj Mane,Poorva Agrawal,
+Nisha Auti CS Department SIT, Pune"
+8fe43144c0ff36ffefca869eec0a63e71ca02049,1D correlation filter based class-dependence feature analysis for face recognition,"This article appeared in a journal published by Elsevier. The attached
+opy is furnished to the author for internal non-commercial research
+nd education use, including for instruction at the authors institution
+nd sharing with colleagues.
+Other uses, including reproduction and distribution, or selling or
+licensing copies, or posting to personal, institutional or third party
+websites are prohibited.
+In most cases authors are permitted to post their version of the
+rticle (e.g. in Word or Tex form) to their personal website or
+institutional repository. Authors requiring further information
+regarding Elsevier’s archiving and manuscript policies are
+encouraged to visit:
+http://www.elsevier.com/copyright"
+8f4c8a80e94a883356ee4c4425324dac5457661a,Noise Robust Face Image Super-Resolution Through Smooth Sparse Representation,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Noise Robust Face Image Super-Resolution
+Through Smooth Sparse Representation
+Junjun Jiang, Member, IEEE, Jiayi Ma, Member, IEEE, Chen Chen, Xinwei Jiang, and Zheng Wang"
+8fd9c22b00bd8c0bcdbd182e17694046f245335f,Recognizing Facial Expressions in Videos,"Recognizing Facial Expressions in Videos
+Lin Su, Matthew Balazsi"
+8f2e594f55ca1b1675d8bfef25922c97109cb599,An evil face? Verbal evaluative multi-CS conditioning enhances face-evoked mid-latency magnetoencephalographic responses,"Social Cognitive and Affective Neuroscience, 2017, 695–705
+doi: 10.1093/scan/nsw179
+Advance Access Publication Date: 22 December 2016
+Original article
+An evil face? Verbal evaluative multi-CS conditioning
+enhances face-evoked mid-latency magnetoencephalo-
+graphic responses
+Markus Jungho¨ fer,1,2 Maimu Alissa Rehbein,1,2 Julius Maitzen,1
+Sebastian Schindler,3,4 and Johanna Kissler3,4
+Institute for Biomagnetism and Biosignalanalysis, University Hospital Mu¨ nster, Mu¨ nster D-48149, Germany,
+Otto Creutzfeldt Center for Cognitive and Behavioral Neuroscience, University of Mu¨ nster, Mu¨ nster D-48151,
+Germany, 3Department of Psychology, Affective Neuropsychology Unit and 4Center of Excellence Cognitive
+Interaction Technology (CITEC), University of Bielefeld, Bielefeld D-33501, Germany
+Correspondence should be addressed to Johanna Kissler, Department of Psychology, Affective Neuropsychology Unit, University of Bielefeld, Bielefeld
+D-33501, Germany. E-mail:"
+8f0c11a3332c434af11c01ee11ff7c492c7968da,Domain Adaptive Faster R-CNN for Object Detection in the Wild,"Domain Adaptive Faster R-CNN for Object Detection in the Wild
+Yuhua Chen1 Wen Li1 Christos Sakaridis1 Dengxin Dai1
+Luc Van Gool1,2
+Computer Vision Lab, ETH Zurich
+VISICS, ESAT/PSI, KU Leuven"
+8a12ee3c98b76d99531d5965f15bb77a10ec2569,Holistic Face Recognition through Multivariate Analysis and Genetic Algorithms,"Holistic Face Recognition through Multivariate Analysis and Genetic
+Algorithms"
+8a4119c2898f611a6ffa0b4b72acf322d1b455b1,A Diagram is Worth a Dozen Images,"A Diagram Is Worth A Dozen Images
+Aniruddha Kembhavi†, Mike Salvato†(cid:63), Eric Kolve†(cid:63), Minjoon Seo§,
+Hannaneh Hajishirzi§, Ali Farhadi†§
+Allen Institute for Artificial Intelligence, §University of Washington"
+8a91cb96dd520ba3e1f883aa6d57d4d716c5d1c8,Low Cost Eye Tracking: The Current Panorama,"Hindawi Publishing Corporation
+Computational Intelligence and Neuroscience
+Volume 2016, Article ID 8680541, 14 pages
+http://dx.doi.org/10.1155/2016/8680541
+Review Article
+Low Cost Eye Tracking: The Current Panorama
+Onur Ferhat1,2 and Fernando Vilariño1,2
+Computer Vision Center, Edifici O, Campus UAB, 08193 Bellaterra, Spain
+Computer Science Department, Universitat Aut`onoma de Barcelona, Edifici Q, Campus UAB, 08193 Bellaterra, Spain
+Correspondence should be addressed to Onur Ferhat;
+Received 27 November 2015; Accepted 18 February 2016
+Academic Editor: Ying Wei
+Copyright © 2016 O. Ferhat and F. Vilari˜no. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+ited.
+Despite the availability of accurate, commercial gaze tracker devices working with infrared (IR) technology, visible light gaze
+tracking constitutes an interesting alternative by allowing scalability and removing hardware requirements. Over the last years, this
+field has seen examples of research showing performance comparable to the IR alternatives. In this work, we survey the previous
+work on remote, visible light gaze trackers and analyze the explored techniques from various perspectives such as calibration
+strategies, head pose invariance, and gaze estimation techniques. We also provide information on related aspects of research such"
+8a29378973987bdb040f35349d1c5a86a538c0fc,Hierarchical Temporal Memory Using Memristor Networks: A Survey,"Hierarchical Temporal Memory using Memristor
+Networks: A Survey
+Olga Krestinskaya, Graduate Student Member, IEEE, Irina Dolzhikova, Graduate Student Member, IEEE, and
+Alex Pappachen James, Senior Member, IEEE"
+8a14dfe0e11e03505db9c0d84bce96f165223cae,Learning from Demonstration in the Wild,"Learning from Demonstration in the Wild
+Feryal Behbahani1, Kyriacos Shiarlis1, Xi Chen1, Vitaly Kurin1,2, Sudhanshu Kasewa1,2, Ciprian Stirbu1,2,
+Jo˜ao Gomes1, Supratik Paul1,2, Frans A. Oliehoek1,3, Jo˜ao Messias1, Shimon Whiteson1,2"
+8a382f000f98cdab7f7b79e543c75c6b8f93b6f9,Learning Semantic Image Representations at a Large Scale,"Learning Semantic Image Representations at a Large
+Scale
+Yangqing Jia
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2014-93
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-93.html
+May 16, 2014"
+8ab183883acba0501c3315a914aee755b5e517d8,Synthesis-based Robust Low Resolution Face Recognition,"IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. X, NO. X, MONTH 20XX
+Synthesis-based Robust Low Resolution Face
+Recognition
+Sumit Shekhar, Student Member, IEEE, Vishal M. Patel, Member, IEEE, and Rama Chellappa, Fellow, IEEE"
+8ad407142de84b66144029845587c77ae94fd240,Multi-class speed-density relationship for pedestrian traffic,"Multi-class speed-density relationship for
+pedestrian traffic
+Marija Nikoli´c ∗
+Matthieu de Lapparent ∗
+Michel Bierlaire ∗
+Riccardo Scarinci ∗
+January 15, 2017
+Report TRANSP-OR 170115
+Transport and Mobility Laboratory
+School of Architecture, Civil and Environmental Engineering
+Ecole Polytechnique Fédérale de Lausanne
+transp-or.epfl.ch
+Transport and Mobility Laboratory, School of Architecture, Civil and Environmental Engi-
+neering, École Polytechnique Fédérale de Lausanne, Switzerland,
+{marija.nikolic, michel.bierlaire, matthieu.delapparent,"
+8aac66d15e0903257ec3abe6f126bf6316779011,Constructive Autoassociative Neural Network for Facial Recognition,"RESEARCH ARTICLE
+Constructive Autoassociative Neural
+Network for Facial Recognition
+Bruno J. T. Fernandes1*, George D. C. Cavalcanti2, Tsang I. Ren2
+. Escola Polite´ cnica, Universidade de Pernambuco, Recife-PE, Brazil, 2. Centro de Informa´ tica,
+Universidade Federal de Pernambuco, Recife-PE, Brazil"
+8acdc4be8274e5d189fb67b841c25debf5223840,Improving clustering performance using independent component analysis and unsupervised feature learning,"Gultepe and Makrehchi
+Hum. Cent. Comput. Inf. Sci. (2018) 8:25
+https://doi.org/10.1186/s13673-018-0148-3
+RESEARCH
+Improving clustering performance
+using independent component analysis
+nd unsupervised feature learning
+Open Access
+Eren Gultepe* and Masoud Makrehchi
+*Correspondence:
+Department of Electrical
+nd Computer Engineering,
+University of Ontario Institute
+of Technology, 2000 Simcoe
+St N, Oshawa, ON L1H 7K4,
+Canada"
+8ad4742e656c409e5a813c1a6d5f21fd2e3a9225,A Novel Algorithm for Face Recognition From Very Low Resolution Images,"J Electr Eng Technol Vol. 10, No. ?: 742-?, 2015
+http://dx.doi.org/10.5370/JEET.2015.10.1.742
+ISSN(Print) 1975-0102
+ISSN(Online) 2093-7423
+A Novel Algorithm for Face Recognition From Very Low Resolution
+Images
+C. Senthilsingh† and M. Manikandan*"
+8ac074829b55bb6b4c67f062ca9ec62bb79f865f,Person re-identification based on deep multi-instance learning,"Person Re-identification based on Deep
+Multi-instance Learning
+Domonkos Varga∗†, Tam´as Szir´anyi∗‡
+MTA SZTAKI, Institute for Computer Science and Control
+{varga.domonkos,
+Budapest University of Technology and Economics, Department of Networked Systems and Services
+Budapest University of Technology and Economics, Department of Material Handling and Logistics Systems"
+8a7726e58c2e24b0a738b48ae35185aaaacb8fe9,PILOT ASSESSMENT OF NONVERBAL PRAGMATIC ABILITY IN PEOPLE WITH ASPERGER SYNDROME Introduction,"Psychology of Language and Communication 2013, Vol. 17, No. 3
+DOI: 10.2478/plc-2013-0018
+FRANCISCO J. RODRÍGUEZ MUÑOZ
+University of Almería
+PILOT ASSESSMENT OF NONVERBAL PRAGMATIC ABILITY
+IN PEOPLE WITH ASPERGER SYNDROME
+The purpose of this study is to present a diagnostic tool to assess the nonverbal pragmatic
+ehaviors of people with Asperger syndrome, with the intent to give an account of the
+severity of symptoms in the area of nonverbal interaction, as well as providing a profile
+of nonverbal behaviors that may be targeted for intervention. Through this communica-
+tion profile, overall nonverbal ability is calculated in a group of 20 subjects with Asperger
+syndrome. The proposed scale also includes the measurement of the following nonverbal
+dimensions: (1) eye gaze, (2) facial expression, (3) body language and posture, (4) proxemics,
+(5) gestures, and (6) paralanguage. The results of this assessment suggest low nonverbal
+pragmatic ability in these subjects, show specific deficits in nonverbal communication, and
+apture variability in nonverbal behavior in individuals with AS.
+Key words: Asperger syndrome, autism spectrum disorders, communication profile, non-
+verbal communication, pragmatic assessment, speech-language pathology
+Introduction
+Nobody can deny that nonverbal behavior, understood as a communication"
+8a54f8fcaeeede72641d4b3701bab1fe3c2f730a,What do you think of my picture? Investigating factors of influence in profile images context perception,"What do you think of my picture? Investigating factors
+of influence in profile images context perception
+Filippo Mazza, Matthieu Perreira da Silva, Patrick Le Callet, Ingrid
+Heynderickx
+To cite this version:
+Filippo Mazza, Matthieu Perreira da Silva, Patrick Le Callet, Ingrid Heynderickx. What do you
+think of my picture? Investigating factors of influence in profile images context perception. Human
+Vision and Electronic Imaging XX, Mar 2015, San Francisco, United States. Proc. SPIE 9394, Hu-
+man Vision and Electronic Imaging XX, 9394, <http://spie.org/EI/conferencedetails/human-vision-
+electronic-imaging>. <10.1117/12.2082817>. <hal-01149535>
+HAL Id: hal-01149535
+https://hal.archives-ouvertes.fr/hal-01149535
+Submitted on 7 May 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est"
+8aae23847e1beb4a6d51881750ce36822ca7ed0b,Comparison Between Geometry-Based and Gabor-Wavelets-Based Facial Expression Recognition Using Multi-Layer Perceptron,"Comparison Between Geometry-Based and Gabor-Wavelets-Based
+Facial Expression Recognition Using Multi-Layer Perceptron
+Zhengyou Zhang
+Shigeru Akamatsu
+ Michael Lyons
+
+ ATR Interpreting Telecommunications Research Laboratories
+-2 Hikaridai, Seika-cho, Soraku-gun, Kyoto 619-02, Japan
+INRIA, 2004 route des Lucioles, BP 93, F-06902 Sophia-Antipolis Cedex, France
+e-mail:"
+8aa6c3601924c99ca420c7c37ffcffe00db1eb78,3D facial expression recognition via multiple kernel learning of Multi-Scale Local Normal Patterns,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-0-9 ©2012 ICPR"
+8a866bc0d925dfd8bb10769b8b87d7d0ff01774d,WikiArt Emotions: An Annotated Dataset of Emotions Evoked by Art,"WikiArt Emotions: An Annotated Dataset of Emotions Evoked by Art
+Saif M. Mohammad and Svetlana Kiritchenko
+National Research Council Canada"
+8ab16c26678245ef009cbbf87d750cfd18e21572,A Wearable Ultrasonic Obstacle Sensor for Aiding Visually Impaired and Blind Individuals,"A Wearable Ultrasonic Obstacle Sensor for Aiding Visually Impaired and Blind Individuals
+{tag} {/tag}
+IJCA Proceedings on National Conference on
+Growth of Technologies in Electronics, Telecom and Computers - India Perception
+© 2014 by IJCA Journal
+GTETC-IP
+Year of Publication: 2014
+Authors:
+V. Diana Earshia
+S. M. Kalaivanan
+Angel Dayana
+{bibtex}gtetc1314.bib{/bibtex}"
+8af0854c652c90d4004e1868bc5fafec3e4ce724,Labelling the Behaviour of Local Descriptors for Selective Video Content Retrieval,"INSTITUT NATIONAL DE RECHERCHE EN INFORMATIQUE ET EN AUTOMATIQUE
+Labelling the Behaviour of Local Descriptors for
+Selective Video Content Retrieval
+Julien Law-To — Valerie Gouet-Brunet — Olivier Buisson — Nozha Boujemaa
+N° 5821
+January 2006
+Thème COG
+p p o r t (cid:13)
+(cid:13) d e r e c h e r c h e (cid:13)"
+8aaa97c686c60f611fe5a979d9afbc29dde3d33f,Mastering the Dungeon: Grounded Language Learning by Mechanical Turker Descent,"Published as a conference paper at ICLR 2018
+MASTERING THE DUNGEON: GROUNDED LANGUAGE
+LEARNING BY MECHANICAL TURKER DESCENT
+Zhilin Yang, Saizheng Zhang, Jack Urbanek, Will Feng, Alexander H. Miller
+Arthur Szlam, Douwe Kiela & Jason Weston
+Facebook AI Research"
+8a77025bde5479a1366bb93c6f2366b5a6293720,Sharp Attention Network via Adaptive Sampling for Person Re-identification,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, VOL. XX, NO. XX, XX 2018
+Sharp Attention Network via Adaptive Sampling
+for Person Re-identification
+Chen Shen, Guo-Jun Qi, Member, IEEE, Rongxin Jiang, Zhongming Jin, Hongwei Yong, Yaowu Chen,
+nd Xian-Sheng Hua, Fellow, IEEE"
+8a2ed61448d9e41295753f5bd0a662ac28373e6f,Domain-Specific Face Synthesis for Video Face Recognition From a Single Sample Per Person,"Domain-Specific Face Synthesis for Video Face
+Recognition From a Single Sample Per Person
+Fania Mokhayeri
+, Student Member, IEEE, Eric Granger
+, Member, IEEE,
+nd Guillaume-Alexandre Bilodeau , Member, IEEE"
+8ac2736683dac9a467602ee19f5a290096259148,HyperNet: Towards Accurate Region Proposal Generation and Joint Object Detection,"HyperNet: Towards Accurate Region Proposal Generation
+nd Joint Object Detection
+Tao Kong1
+Anbang Yao2 Yurong Chen2 Fuchun Sun1
+State Key Lab. of Intelligent Technology and Systems
+Tsinghua National Laboratory for Information Science and Technology (TNList)
+Department of Computer Science and Technology, Tsinghua University 2Intel Labs China
+{anbang.yao,"
+8aea75940c90fac8c1e5d7ece7d04a61555c3bf6,Divide and Grow: Capturing Huge Diversity in Crowd Images with Incrementally Growing CNN,
+8adb2fcab20dab5232099becbd640e9c4b6a905a,Beyond Euclidean Eigenspaces: Bayesian Matching for Visual Recognition,"Beyond Euclidean Eigenspaces:
+Bayesian Matching for Visual Recognition
+Baback Moghaddam
+Alex Pentland
+Mitsubishi Electric Research Laboratory
+MIT Media Laboratory
+
+
+Cambridge, MA
+Cambridge, MA
+8a0538eb80b5d41c0e5991aceeef47db01603033,Proposal Flow: Semantic Correspondences from Object Proposals,"Proposal Flow: Semantic Correspondences from
+Object Proposals
+Bumsub Ham, Member, IEEE, Minsu Cho, Cordelia Schmid, Fellow, IEEE and Jean Ponce, Fellow, IEEE"
+8aa5f1b2639da73c2579ea9037a4ebf4579fdc4f,A Steerable multitouch Display for Surface Computing and its Evaluation,"December
+S0218213013600166
+013 14:51 WSPC/INSTRUCTION
+st Reading
+International Journal on Artificial Intelligence Tools
+Vol. 22, No. 6 (2013) 1360016 (29 pages)
+(cid:13) World Scientific Publishing Company
+DOI: 10.1142/S0218213013600166
+A STEERABLE MULTITOUCH DISPLAY FOR SURFACE
+COMPUTING AND ITS EVALUATION
+PANAGIOTIS KOUTLEMANIS, ANTONIOS NTELIDAKIS, XENOPHON ZABULIS,
+DIMITRIS GRAMMENOS and ILIA ADAMI
+Foundation for Research and Technology – Hellas (FORTH )
+Institute of Computer Science, N. Plastira 100
+Vassilika Vouton, GR-700 13 Heraklion, Crete, Greece
+{koutle, ntelidak, zabulis, grammenos,
+Received 28 January 2013
+Accepted 19 March 2013
+Published 20 December 2013
+In this paper, a steerable, interactive projection display that has the shape of a disk is"
+8abfda3c1e1599bed454661f15ee0bbe7f6b8c12,Who is Mistaken?,"Who is Mistaken?
+Benjamin Eysenbach
+Carl Vondrick
+Antonio Torralba"
+8ae02cef563120be51f8655e199a54af856059b7,Three-Dimensional Anthropometric Database of Attractive Caucasian Women: Standards and Comparisons,"SCIENTIFIC FOUNDATION
+Three-Dimensional Anthropometric Database of
+Attractive Caucasian Women: Standards
+nd Comparisons
+Luigi Maria Galantucci, PhD, MSE,
+Alberto Laino, PhD, DS,
+Eliana Di Gioia, DS, MD,§jj Raoul D’Alessio, DS, MD,ô Fulvio Lavecchia, PhD, MSE,#
+Roberto Deli, PhD, DS,
+Gianluca Percoco, PhD, MSE,# and Carmela Savastano, DS, MD"
+8afe84f915d3dbc45c57011e62f5dbf9003dfb4c,Adaptive Binary Quantization for Fast Nearest Neighbor Search,"Adaptive Binary Quantization for Fast Nearest Neighbor
+Search
+Zhujin Li1 and Xianglong Liu∗2 and Junjie Wu3 and Hao Su4"
+8a91ad8c46ca8f4310a442d99b98c80fb8f7625f,2D Segmentation Using a Robust Active Shape Model With the EM Algorithm,"D Segmentation Using a Robust Active
+Shape Model With the EM Algorithm
+Carlos Santiago, Jacinto C. Nascimento, Member, IEEE, and Jorge S. Marques"
+8a2bd5dbcf0ab0130dfb97e2a035e5722aa9319e,NLP EAC Recognition by Component Separation in the Eye Region,"NLP EAC Recognition by Component
+Separation in the Eye Region
+Ruxandra Vrˆanceanu, Corneliu Florea, Laura Florea and Constantin Vertan
+The Image Processing and Analysis Laboratory (LAPI), Politehnica University of
+Bucharest, Romania"
+8aed6ec62cfccb4dba0c19ee000e6334ec585d70,Localizing and Visualizing Relative Attributes,"Localizing and Visualizing Relative Attributes
+Fanyi Xiao and Yong Jae Lee"
+8a336e9a4c42384d4c505c53fb8628a040f2468e,Detecting Visually Observable Disease Symptoms from Faces,"Wang and Luo EURASIP Journal on Bioinformatics
+nd Systems Biology (2016) 2016:13
+DOI 10.1186/s13637-016-0048-7
+R ES EAR CH
+Detecting Visually Observable Disease
+Symptoms from Faces
+Kuan Wang* and Jiebo Luo
+Open Access"
+8a56adc9605a894c513537f1a2c8d9459573c0a8,Running head: EFFECT OF IDENTITY ON TRUST LEARNING 1 Incidental learning of trust from eye-gaze: Effects of race and facial trustworthiness,"This is an author produced version of Incidental learning of trust from eye-gaze: Effects of
+race and facial trustworthiness.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/119885/
+Article:
+Strachan, James, Kirkham, Alexander James orcid.org/0000-0001-9286-9448, Manssuer,
+Luis et al. (2 more authors) (2017) Incidental learning of trust from eye-gaze: Effects of
+race and facial trustworthiness. VISUAL COGNITION. pp. 1-13. ISSN 1350-6285
+https://doi.org/10.1080/13506285.2017.1338321
+promoting access to
+White Rose research papers
+http://eprints.whiterose.ac.uk/"
+7e8edc45fa80cb0f7bc2c20e8eb893dcadde2c8c,Combining Speeded-up Robust Features with Principal Component Analysis in Face Recognition System,"International Journal of Innovative
+Computing, Information and Control
+Volume 8, Number 12, December 2012
+ICIC International c(cid:13)2012 ISSN 1349-4198
+pp. 8545{8556
+COMBINING SPEEDED-UP ROBUST FEATURES WITH PRINCIPAL
+COMPONENT ANALYSIS IN FACE RECOGNITION SYSTEM
+Shinfeng D. Lin(cid:3), Bo-Feng Liu and Jia-Hong Lin
+Department of Computer Science and Information Engineering
+National Dong Hwa University
+No. 1, Sec. 2, Da Hsueh Rd., Shoufeng, Hualien 97401, Taiwan
+Corresponding author:
+(cid:3)
+Received October 2011; revised March 2012"
+7ed9913de03dd2990b68751842306c2636852647,VQABQ: Visual Question Answering by Basic Questions,"VQABQ: Visual Question Answering by Basic Questions
+Jia-Hong Huang
+King Abdullah University of Science and Technology
+{jiahong.huang, modar.alfadly,
+Modar Alfadly
+Bernard Ghanem"
+7e53ab07d0ce28484830329036a1fc018b9644dd,Online multiple people tracking-by-detection in crowded scenes,"Journal of Advances in Computer Engineering and Technology, 1(2) 2015
+Online multiple people tracking-by-detection in
+rowded scenes
+Sahar Rahmatian1, Reza Safabakhsh2
+Received (2015-01-23)
+Accepted (2015-03-19)"
+7e3367b9b97f291835cfd0385f45c75ff84f4dc5,Improved local binary pattern based action unit detection using morphological and bilateral filters,"Improved Local Binary Pattern Based Action Unit Detection Using
+Morphological and Bilateral Filters
+Anıl Y¨uce1, Matteo Sorci2 and Jean-Philippe Thiran1
+Signal Processing Laboratory (LTS5)
+´Ecole Polytechnique F´ed´erale de Lausanne,
+Switzerland
+nViso SA
+Lausanne, Switzerland"
+7ef0cc4f3f7566f96f168123bac1e07053a939b2,Triangular Similarity Metric Learning: a Siamese Architecture Approach. ( L'apprentissage de similarité triangulaire en utilisant des réseaux siamois),"Triangular Similarity Metric Learning: a Siamese
+Architecture Approach
+Lilei Zheng
+To cite this version:
+Lilei Zheng. Triangular Similarity Metric Learning: a Siamese Architecture Approach. Com-
+puter Science [cs]. UNIVERSITE DE LYON, 2016. English. <NNT : 2016LYSEI045>. <tel-
+01314392>
+HAL Id: tel-01314392
+https://hal.archives-ouvertes.fr/tel-01314392
+Submitted on 11 May 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+7e5414277148c8fdf9903068b001887225b69868,Perceptive Parallel Processes Coordinating Geometry and Texture,"Perceptive Parallel Processes Coordinating Geometry and Texture
+Marco A. Gutierrez1, Rafael E. Banchs2 and Luis F. D'Haro2"
+7e7e4af2a79288fd2e391020edff8552ea1ece9a,Trimming Prototypes of Handwritten Digit Images with Subset Infinite Relational Model,"Trimming Prototypes of Handwritten Digit
+Images with Subset Infinite Relational Model
+Tomonari Masada1 and Atsuhiro Takasu2
+Nagasaki University, 1-14 Bunkyo-machi, Nagasaki-shi, Nagasaki, 852-8521 Japan,
+National Institute of Informatics, 2-1-2 Hitotsubashi, Chiyoda-ku, Tokyo, 101-8430
+Japan,"
+7e79c3a92f60c55a6970f89acfa152bcf74823e0,Face Recognition using FSS-DSOP for Small Sample Size Problem with Illumination Variations,"Int. J. Advance. Soft Comput. Appl., Vol. 1, No. 2, November 2009
+ISSN 2074-8523; Copyright © ICSRS Publication, 2009
+www.i-csrs.org
+Face Recognition using FSS-DSOP for Small
+Sample Size Problem with Illumination
+Variations
+Ganesh Bhat, K.K. Achary
+Canara Engineering College,Department of Electronics, India"
+7ebc96b4b7886b263808c2cd62b21158ebf6297c,"Crowd Motion Analysis: Segmentation, Anomaly Detection, and Behavior Classification","CROWD MOTION ANALYSIS:
+SEGMENTATION, ANOMALY
+DETECTION, AND BEHAVIOR
+CLASSIFICATION
+Habib Ullah
+Advisor: Nicola Conci, PhD
+February 2015"
+7e7b4b4a84c2aa0ee69b5cea3a4da7f62a0a37d5,GraSp: Combining Spatially-aware Mobile Devices and a Display Wall for Graph Visualization and Interaction,"Eurographics Conference on Visualization (EuroVis) 2017
+J. Heer, T. Ropinski and J. van Wijk
+(Guest Editors)
+Volume 36 (2017), Number 3
+GRASP: Combining Spatially-aware Mobile Devices
+nd a Display Wall for Graph Visualization and Interaction
+U. Kister1, K. Klamka1, C. Tominski2 and R. Dachselt1
+Interactive Media Lab Dresden, Technische Universität Dresden, Germany
+Institute for Computer Science, University of Rostock, Germany
+Figure 1: Mobile devices support graph visualization and interaction on wall-sized displays close to the display wall and further away (A).
+The GRASP system provides a mobile toolbox with selections, alternative representations, lenses, and filtering close to the user (B)."
+7ee53d931668fbed1021839db4210a06e4f33190,What If We Do Not have Multiple Videos of the Same Action? &#x2014; Video Action Localization Using Web Images,"What if we do not have multiple videos of the same action? —
+Video Action Localization Using Web Images
+Center for Research in Computer Vision (CRCV), University of Central Florida (UCF)
+Waqas Sultani, Mubarak Shah"
+7e9df45ece7843fe050033c81014cc30b3a8903a,Audio-visual intent-to-speak detection for human-computer interaction,"AUDIO-VISUAL INTENT-TO-SPEAK DETECTION FOR HUMAN-COMPUTER
+INTERACTION
+Philippe de Cuetos
+Institut Eurecom
+ , route des Cr^etes, BP  
+
+Chalapathy Neti, Andrew W. Senior
+IBM T.J. Watson Research Center
+Yorktown Heights, NY 
+cneti,aws"
+7ebd323ddfe3b6de8368c4682db6d0db7b70df62,Location-based Face Recognition Using Smart Mobile Device Sensors,"Proceedings of the International Conference on Computer and Information Science and Technology
+Ottawa, Ontario, Canada, May 11 – 12, 2015
+Paper No. 111
+Location-based Face Recognition Using Smart Mobile Device
+Sensors
+Nina Taherimakhsousi, Hausi A. Müller
+Department of Computer Science
+University of Victoria, Victoria, Canada"
+7e3693fffef8d83ac109309a77f2545d32c10fc3,The effect of Ramadan fasting on spatial attention through emotional stimuli,"Psychology Research and Behavior Management
+Open access Full Text article
+Dovepress
+open access to scientific and medical research
+O Ri g i n a l R e s e aRc h
+The effect of Ramadan fasting on spatial attention
+through emotional stimuli
+Maziyar Molavi
+Jasmy Yunus
+nugraha P Utama
+Department of clinical sciences,
+Faculty of Biosciences and Medical
+engineering (FBMe), Universiti
+Teknologi Malaysia (UTM), Johor
+Bahru, Johor, Malaysia
+orrespondence: nugraha P Utama
+Department of clinical sciences, Faculty
+of Biosciences and Medical engineering,
+Universiti Teknologi Malaysia (UTM),
+81310 Johor Bahru, Johor, Malaysia"
+7e59d2d3416537dd958ff71b7a0bff87e639dad9,Feature-Based Pose Estimation,"Feature-based Pose Estimation
+Cristian Sminchisescu1,2, Liefeng Bo3, Catalin Ionescu4, Atul Kanaujia5"
+7ea7c073d13e80ec5015f41f1d57f0674502cc5e,An Implementation of Face Emotion Identification System using Active Contour Model and PCA,"IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 04, 2015 | ISSN (online): 2321-0613
+An Implementation of Face Emotion Identification System using Active
+Contour Model and PCA
+Namita Rathore1 Mr.Rohit Miri2
+P.G. Student 2Assistant Professor
+,2Department of Computer Science and Engineering
+,2DR C V Raman Institute of Science and Technology Kota, bilaspur
+systems,
+surveillance"
+7e463877264e70d53c844cf4b1bf3b15baec8cfb,ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks,"ReNet: A Recurrent Neural Network Based
+Alternative to Convolutional Networks
+Francesco Visin(cid:63)
+Politecnico di Milano
+Kyle Kastner(cid:63)
+University of Montreal
+Kyunghyun Cho(cid:63)
+University of Montreal
+Matteo Matteucci
+Politecnico di Milano
+Aaron Courville
+University of Montreal
+Yoshua Bengio
+University of Montreal
+CIFAR Senior Fellow"
+7ed6ff077422f156932fde320e6b3bd66f8ffbcb,State of 3D Face Biometrics for Homeland Security Applications,"State of 3D Face Biometrics for Homeland Security Applications
+Anshuman Razdan1, Gerald Farin2, Myung Soo-Bae3 and Mahesh
+Chaudhari4"
+7e3b5d30b83a20c7cffdacf53b3ffbaf81002b54,People Transitioning Across Places: A Multimethod Investigation of How People Go to Football Games,"12589 EABXXX10.1177/0013916511412589
+© The Author(s) 2011
+Reprints and permission: http://www.
+sagepub.com/journalsPermissions.nav
+Environment and Behavior
+XX(X) 1 –28
+© 2011 SAGE Publications
+Reprints and permission: http://www.
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0013916511412589
+http://eab.sagepub.com
+People Transitioning
+Across Places: A
+Multimethod
+Investigation of
+How People Go to
+Football Games
+R. Barry Ruback1, Robert T. Collins1,
+Sarah Koon-Magnin1, Weina Ge2,
+Luke Bonkiewicz1, and Clifford E. Lutz1"
+7e654380bd0d1f4c00e85da71a3081d3ada432ef,Mgan: Training Generative Adversarial Nets,"Under review as a conference paper at ICLR 2018
+MGAN: TRAINING GENERATIVE ADVERSARIAL NETS WITH
+MULTIPLE GENERATORS
+Anonymous authors
+Paper under double-blind review"
+7ed5dca8725d59714d61ef8e1a14cc4b71c56d3f,Face Sketch to Photo Matching Using LFDA and Pre-Processing,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Face Sketch to Photo Matching Using LFDA and
+Pre-Processing
+Pushpa Gopal Ambhore1, Lokesh Bijole2
+Research Scholar, 2Assistant professor, Computer Engineering Department,
+Padm. Dr. V. B. Kolte College of Engineering, Malkapur, Maharashtra, India"
+7e25544be9ba701c8cf02c841e0bbadb36fa0e29,Zero-Shot Visual Recognition using Semantics-Preserving Adversarial Embedding Network,"Zero-Shot Visual Recognition using Semantics-Preserving
+Adversarial Embedding Networks
+Long Chen1 Hanwang Zhang2
+Jun Xiao1∗ Wei Liu3
+Shih-Fu Chang4
+Zhejiang University 2Nanyang Technological University 3Tencent AI Lab 4Columbia University
+{longc, {wliu,
+Figure 1: (a) Attribute variance heat maps of the 312 attributes in CUB birds [60] and the 102 attributes in SUN scenes [47]
+(lighter color indicates lower variance, i.e., lower discriminability) and the t-SNE [35] visualizations of the test images
+represented by all attributes (left) and only the high-variance ones (right). Some of the low-variance attributes (the lighter
+part to the left of the cut-off line) discarded at training are still needed in discriminating unseen test classes. (b) Comparison
+of reconstructed images using SAE [25] and our proposed SP-AEN method, which is shown to retain sufficient semantics for
+photo-realistic reconstruction."
+7e507370124a2ac66fb7a228d75be032ddd083cc,Dynamic Pose-Robust Facial Expression Recognition by Multi-View Pairwise Conditional Random Forests,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2708106, IEEE
+Transactions on Affective Computing
+Dynamic Pose-Robust Facial Expression
+Recognition by Multi-View Pairwise Conditional
+Random Forests
+Arnaud Dapogny1 and Kevin Bailly1 and S´everine Dubuisson1
+Sorbonne Universit´es, UPMC Univ Paris 06
+CNRS, UMR 7222, F-75005, Paris, France"
+7ea07b7b27d59300840df17e5881dbe3a4769872,Detection driven adaptive multi-cue integration for multiple human tracking,"Detection Driven Adaptive Multi-cue Integration for Multiple Human Tracking
+Ming Yang, Fengjun Lv, Wei Xu, Yihong Gong
+NEC Laboratories America, Inc.
+0080 North Wolfe Road, SW-350, Cupertino, CA 95014"
+10fb32ef34f815e9056ba71bc4b67a9951b4475b,End-to-End Audio Visual Scene-Aware Dialog using Multimodal Attention-Based Video Features,"End-to-End Audio Visual Scene-Aware Dialog using
+Multimodal Attention-Based Video Features
+Chiori Hori†, Huda Alamri∗†, Jue Wang†, Gordon Wichern†,
+Vincent Cartillier∗, Raphael Gontijo Lopes∗, Abhishek Das∗,
+Takaaki Hori†, Anoop Cherian†, Tim K. Marks†,
+Irfan Essa∗, Dhruv Batra∗ Devi Parikh∗,
+Mitsubishi Electric Research Laboratories (MERL), Cambridge, MA, USA
+School of Interactive Computing, Georgia Tech"
+1042683cf5733244238198ff486d3a65e70c9621,End-to-End Instance Segmentation with Recurrent Attention,"End-to-End Instance Segmentation with Recurrent Attention
+Mengye Ren1, Richard S. Zemel1,2
+University of Toronto1, Canadian Institute for Advanced Research2"
+1059729bcca57731c81d8a9c866ceb8ed3547d8d,Coupled Object Detection and Tracking from Static Cameras and Moving Vehicles,"Coupled Object Detection and Tracking from
+Static Cameras and Moving Vehicles
+Bastian Leibe, Konrad Schindler, Nico Cornelis, and Luc Van Gool"
+100f57d2eb737d6cb467bfac6e4bbfa9b39e774f,Mixing Body-Part Sequences for Human Pose Estimation,"Mixing Body-Part Sequences for Human Pose Estimation
+Anoop Cherian∗
+Julien Mairal∗ Karteek Alahari∗ Cordelia Schmid∗
+Inria"
+10cdb31a23c3233527ad2f8beebe7803b7a51a8c,Altered Neocortical Microcircuitry in the Valproic Acid Rat Model of Autism,"Altered Neocortical Microcircuitry in the
+Valproic Acid Rat Model of Autism
+THÈSE N° 3701 (2006)
+PRÉSENTÉE LE 20 NOVEMBRE
+À LA FACULTÉ DES SCIENCES DE LA VIE
+LABORATOIRE DE NEUROSCIENCE DES MICROCIRCUITS
+PROGRAMME DOCTORAL EN NEUROSCIENCES
+ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Tania Rinaldi
+ingénieur chimiste diplômée EPF
+de nationalité suisse et originaire de Vouvry (VS)
+cceptée sur proposition du jury:
+Prof. R. Schneggenburger, président du jury
+Prof. H. Markram, directeur de thèse
+Prof. B. Gähwiler, rapporteur
+Prof. A. Lüthi, rapporteur
+Prof. C. Petersen, rapporteur
+Suisse
+(2006) année d’impression"
+10e7dd3bbbfbc25661213155e0de1a9f043461a2,Cross Euclidean-to-Riemannian Metric Learning with Application to Face Recognition from Video,"Cross Euclidean-to-Riemannian Metric Learning
+with Application to Face Recognition from Video
+Zhiwu Huang, Member, IEEE, Ruiping Wang, Member, IEEE, Shiguang Shan, Senior Member, IEEE,
+Luc Van Gool, Member, IEEE and Xilin Chen, Fellow, IEEE"
+106b54ed74f0fffaf6408a9b847d4ac0aa0ffef9,Block-Diagonal Sparse Representation by Learning a Linear Combination Dictionary for Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2015
+Block-Diagonal Sparse Representation by Learning
+Linear Combination Dictionary for Recognition
+Xinglin Piao, Yongli Hu, Member, IEEE, Yanfeng Sun, Member, IEEE, Junbin Gao, Baocai Yin, Member, IEEE"
+10c4b2489d7e1ee43a1d19724d3c1e9c33ca3f29,A Question-Answering framework for plots using Deep learning,"A Question-Answering framework for plots using Deep learning
+Revanth Reddy1, Rahul Ramesh1, Ameet Deshpande1 and Mitesh M. Khapra1
+Indian Institute of Technology Madras"
+10d39dedfaf34d862e3ca7216521c6290044ff87,Synthesized Classifiers for Zero-Shot Learning,"Synthesized Classifiers for Zero-Shot Learning
+Soravit Changpinyo∗, Wei-Lun Chao∗
+U. of Southern California
+Los Angeles, CA
+Boqing Gong
+U. of Central Florida
+Orlando, FL
+schangpi,
+Fei Sha
+U. of California
+Los Angeles, CA"
+10c077bf2dd1bed928926feb37837862ab786808,"Multiple Target Tracking and Identity Linking under Split, Merge and Occlusion of Targets and Observations","Multiple target tracking and identity linking under split, merge and
+occlusion of targets and observations
+nonymous submission
+Keywords:
+Tracking, graphical models, MAP inference, particle tracking, live cell tracking, intelligent headlights."
+101c5b39f4fc4dda1f39bf0c00e196f0a4720af2,Viewpoint Invariant Human Re-Identification in Camera Networks Using Pose Priors and Subject-Discriminative Features,"Viewpoint Invariant Human Re-identification in
+Camera Networks Using Pose Priors and
+Subject-Discriminative Features
+Ziyan Wu, Student Member, IEEE, Yang Li, Student Member, IEEE, and Richard J. Radke, Senior
+Member, IEEE"
+10d8a48deae967b627839cc95c98b6c080ba9966,Overview of the ImageCLEF 2013 Scalable Concept Image Annotation Subtask,"Overview of the ImageCLEF 2013 Scalable
+Concept Image Annotation Subtask
+Mauricio Villegas,† Roberto Paredes† and Bart Thomee‡
+ITI/DSIC, Universitat Polit`ecnica de Val`encia
+Cam´ı de Vera s/n, 46022 Val`encia, Spain
+Yahoo! Research
+Avinguda Diagonal 177, 08018 Barcelona, Spain"
+10ca3d8802ab0cc6ce000682a42fd9f6575a2006,Embedding Semantic Information into the Content of Natural Scenes Images,"http://dx.doi.org/10.5755/j01.eee.18.9.2808
+ELEKTRONIKA IR ELEKTROTECHNIKA, ISSN 1392-1215, VOL. 18, NO. 9, 2012
+Embedding Semantic Information into the
+Content of Natural Scenes Images
+G. Kazakeviciute-Januskeviciene1, E. Januskevicius2
+Department of Graphical systems, Vilnius Gediminas Technical University,
+Saulėtekio av.11, Vilnius, Lithuania, phone: +370 5 2744848
+Department of Building Structures, Vilnius Gediminas Technical University,
+Pylimo St. 26/1, Vilnius, Lithuania; phone: +370 5 2745205"
+10b3afc6a10149cd88bc6f4007b41895d661d5fe,SAN: Learning Relationship Between Convolutional Features for Multi-scale Object Detection,"SAN: Learning Relationship between
+Convolutional Features
+for Multi-Scale Object Detection
+Yonghyun Kim1[0000−0003−0038−7850], Bong-Nam Kang2[0000−0002−6818−7532],
+nd Daijin Kim1[0000−0002−8046−8521]
+Department of Computer Science and Engineering, POSTECH, Korea
+Department of Creative IT Engineering, POSTECH, Korea"
+1099d475ee0807fc0e4aec55b636db4abc01dcb6,Perceptual Principles for Video Classification With Slow Feature Analysis,"Perceptual principles for video classification with
+Slow Feature Analysis
+Christian Th´eriault(1), Nicolas Thome(1), Matthieu Cord(1), Patrick P´erez(2)
+(1)UPMC-Sorbonne Universities, Paris, France (2)Technicolor, France"
+10be82098017fc2d60b0572cea8032afabad5d1a,A Dataset for Multimodal Question Answering in the Cultural Heritage Domain,"Proceedings of the Workshop on Language Technology Resources and Tools for Digital Humanities (LT4DH),
+pages 10–17, Osaka, Japan, December 11-17 2016."
+10ce3a4724557d47df8f768670bfdd5cd5738f95,Fisher Light-Fields for Face Recognition across Pose and Illumination,"Fihe igh Fie
+Ac e ad 
+Ra
+The Rbic i e Caegie e
+5000 Fbe Ave e ib gh A 15213
+Abac.  ay face ecgii ak he e ad i
+dii f he be ad ga
+
+di(cid:11)ee e ad de a di(cid:11)ee i
+ecgii a
+ bjec ca ed a abiay e ad de abiay i
+d ay  be f be iage agai ca ed a abiay e ad
+de abiay i
+Fihe
+iage. achig bewee he be ad ga
+he Fihe
+d ci
+ ay face ecgii ceai he e f he be ad ga
+di(cid:11)ee. The ga
+The a
+102e374347698fe5404e1d83f441630b1abf62d9,Facial Image Analysis for Fully Automatic Prediction of Difficult Endotracheal Intubation,"Facial Image Analysis for Fully-Automatic
+Prediction of Difficult Endotracheal Intubation
+Gabriel L. Cuendet, Student Member, IEEE, Patrick Schoettker, Anıl Y¨uce Student Member, IEEE, Matteo Sorci,
+Hua Gao, Christophe Perruchoud, Jean-Philippe Thiran, Senior Member, IEEE"
+101c7bfc56091b627886636afcf1103c1cecccf6,Rapid Clothing Retrieval via Deep Learning of Binary Codes and Hierarchical Search,"Rapid Clothing Retrieval via Deep Learning of Binary
+Codes and Hierarchical Search
+Kevin Lin
+Academia Sinica, Taiwan
+Huei-Fang Yang
+Academia Sinica, Taiwan
+Kuan-Hsien Liu
+Academia Sinica, Taiwan
+Jen-Hao Hsiao
+Yahoo! Taiwan
+Chu-Song Chen
+Academia Sinica, Taiwan"
+10114df7ddbb221337cc1e99e1de0eab8e47c95d,Evaluating Feature Importance for Re-identification,"Chapter 9
+Evaluating Feature Importance for
+Re-Identification
+Chunxiao Liu, Shaogang Gong, Chen Change Loy, and Xinggang Lin"
+1068f6eca07c35426ca67961f00c3cac4866f155,Bilinear Models for 3-D Face and Facial Expression Recognition,"Bilinear Models for 3D Face and Facial
+Expression Recognition
+Iordanis Mpiperis, Sotiris Malassiotis and Michael G. Strintzis, Fellow,"
+102a2096ba2e2947dc252445f764e7583b557680,Precomputed Real-Time Texture Synthesis with Markovian Generative Adversarial Networks,"Precomputed Real-Time Texture Synthesis with
+Markovian Generative Adversarial Networks
+Chuan Li and Michael Wand
+Institut for Informatik, University of Mainz, Germany"
+10261848b16292a5c8c700de6c6c9f692867c9c8,Cleaning Training-Datasets with Noise-Aware Algorithms,"Cleaning Training-Datasets with Noise-Aware Algorithms
+Instituto Nacional de Astrof´ısica ´Optica y Electr´onica,
+H. Jair Escalante
+Computer Science Department
+Tonantzintla, Puebla, 72840, M´exico"
+100641ed8a5472536dde53c1f50fa2dd2d4e9be9,Visual attributes for enhanced human-machine communication,"Visual Attributes for Enhanced Human-Machine Communication*
+Devi Parikh1"
+10678172baa93d8318dd1945d09f38721a0c1ffa,A Comparison of Adaptive Appearance Methods for Tracking Faces in Video Surveillance,"A Comparison of Adaptive Appearance Methods for Tracking
+Faces in Video Surveillance
+M. Ali Akber Dewan*, E. Granger*, F. Roli†, R. Sabourin*, and G. L. Marcialis†
+*Laboratoire d’imagerie, de vision et d’intelligence artificielle, École de technologie supérieure,
+Université du Québec, Montréal, Canada
+Department of Electrical and Electronic Engineering, University of Cagliari, Piazza d'Armi, Cagliari, Italy
+Keywords: Biometrics, Face Tracking, Spatiotemporal Face
+Recognition, Video Surveillance, On-Line and Incremental
+Learning, Adaptive Appearance Methods."
+10916d4eeacbf63a178c229868160189c6ce8850,Extraction of Illumination Invariant Features using Fuzzy Threshold based Approach,"International Conference on Intelligent Systems and Data Processing (ICISD) 2011
+Special Issue published by International Journal of Computer Applications® (IJCA)
+Extraction of Illumination Invariant Features using
+Fuzzy Threshold based Approach
+R. M. Makwana
+V. K. Thakar
+N.C. Chauhan
+Dept. of Computer Engineering
+A. D. Patel Inst. of Technology,
+S.P. University, New V.V. Nagar
+Dept. of Electronics and Commu.
+A. D. Patel Inst. of Technology
+S.P. University, New V.V. Nagar
+Dept. of Information Technology
+A. D. Patel Inst. of Technology
+S.P. University, New V.V. Nagar
+in unconstrained environment"
+105fdf31d14ec55fda91c05059ec83162ba7ce3a,Automatic feature generation and selection in predictive analytics solutions,AutomaticfeaturegenerationandselectioninpredictiveanalyticssolutionsSuzannevandenBosch
+10f641aabdd8bc1eb87fae74c63b814d8ef274a5,Automatic Single-Image People Segmentation and Removal for Cultural Heritage Imaging,"Automatic Single-Image People Segmentation
+nd Removal for Cultural Heritage Imaging
+Marco Manfredi, Costantino Grana, and Rita Cucchiara
+Universit`a degli Studi di Modena e Reggio Emilia, Modena MO 41125, Italy"
+101569eeef2cecc576578bd6500f1c2dcc0274e2,Multiaccuracy: Black-Box Post-Processing for Fairness in Classification,"Multiaccuracy: Black-Box Post-Processing for Fairness in
+Michael P. Kim∗†
+Classification
+Amirata Ghorbani∗
+James Zou"
+106732a010b1baf13c61d0994552aee8336f8c85,Expanded Parts Model for Semantic Description of Humans in Still Images,"Expanded Parts Model for Semantic Description
+of Humans in Still Images
+Gaurav Sharma, Member, IEEE, Fr´ed´eric Jurie, and Cordelia Schmid, Fellow, IEEE"
+102b27922e9bd56667303f986404f0e1243b68ab,Multiscale recurrent regression networks for face alignment,"Wang et al. Appl Inform (2017) 4:13
+DOI 10.1186/s40535-017-0042-5
+RESEARCH
+Multiscale recurrent regression networks
+for face alignment
+Open Access
+Caixun Wang1,2,3, Haomiao Sun1,2,3, Jiwen Lu1,2,3*, Jianjiang Feng1,2,3 and Jie Zhou1,2,3
+*Correspondence:
+State Key Lab of Intelligent
+Technologies and Systems,
+Beijing 100084, People’s
+Republic of China
+Full list of author information
+is available at the end of the
+rticle"
+107010b7f2abe3c0c9df62bcef35eb77f6fc76df,Domain-Adversarial Training of Neural Networks,"Journal of Machine Learning Research 17 (2016) 1-35
+Submitted 5/15; Published 4/16
+Domain-Adversarial Training of Neural Networks
+Yaroslav Ganin
+Evgeniya Ustinova
+Skolkovo Institute of Science and Technology (Skoltech)
+Skolkovo, Moscow Region, Russia
+Hana Ajakan
+Pascal Germain
+D´epartement d’informatique et de g´enie logiciel, Universit´e Laval
+Qu´ebec, Canada, G1V 0A6
+Hugo Larochelle
+D´epartement d’informatique, Universit´e de Sherbrooke
+Qu´ebec, Canada, J1K 2R1
+Fran¸cois Laviolette
+Mario Marchand
+D´epartement d’informatique et de g´enie logiciel, Universit´e Laval
+Qu´ebec, Canada, G1V 0A6
+Victor Lempitsky
+Skolkovo Institute of Science and Technology (Skoltech)"
+10fcbf30723033a5046db791fec2d3d286e34daa,On-Line Cursive Handwriting Recognition: A Survey of Methods and Performances,"On-Line Cursive Handwriting Recognition: A Survey of Methods
+nd Performances
+Dzulkifli Mohamad* , 2Muhammad Faisal Zafar*, and 3Razib M. Othman*
+*Faculty of Computer Science & Information Systems, Universiti Teknologi Malaysia (UTM) , 81310
+Skudai, Johor, Malaysia."
+108b2581e07c6b7ca235717c749d45a1fa15bb24,Using Stereo Matching with General Epipolar Geometry for 2D Face Recognition across Pose,"Using Stereo Matching with General Epipolar
+Geometry for 2D Face Recognition
+cross Pose
+Carlos D. Castillo, Student Member, IEEE, and
+David W. Jacobs, Member, IEEE"
+103590b36d026928a90eae7ade9d7da318202168,Indoor Scene Recognition Using Local Semantic Concepts,"Indoor Scene Recognition Using Local Semantic
+Concepts
+Elham Seifossadat1, Niloofar Gheissari2 and Ali Fanian3
+Electrical and Computer Department,Isfahan University of Technology
+Isfahan, Iran
+Electrical and Computer Department,Isfahan University of Technology
+Isfahan, Iran
+3 Electrical and Computer Department,Isfahan University of Technology
+Isfahan, Iran"
+10773e5c1bc8a9a901a8baf4d0b891397975ea9d,Group encoding of local features in image classification,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+10d334a98c1e2a9e96c6c3713aadd42a557abb8b,Scene Text Recognition Using Part-Based Tree-Structured Character Detection,"Scene Text Recognition using Part-based Tree-structured Character Detection
+Cunzhao Shi, Chunheng Wang, Baihua Xiao, Yang Zhang, Song Gao and Zhong Zhang
+State Key Laboratory of Management and Control for Complex Systems, CASIA, Beijing, China"
+1038aa6c1f63c1de9045f10e47ed573810cb4a52,A Video-Based Method for Objectively Rating Ataxia,"A Video-Based Method for Objectively Rating Ataxia
+Ronnachai Jaroensri∗1, Amy Zhao∗1, Guha Balakrishnan1, Derek Lo2, Jeremy Schmahmann3,
+John Guttag1, and Fr´edo Durand1
+MIT CSAIL 2Yale University 3Massachusetts General Hospital"
+1040a32d5bd5e6f4c8bc1932345ef93671e2c019,Real-time RGB-D based template matching pedestrian detection,"Real-Time RGB-D based Template Matching Pedestrian Detection
+Omid Hosseini jafari and Michael Ying Yang"
+109df0e8e5969ddf01e073143e83599228a1163f,Scheduling heterogeneous multi-cores through performance impact estimation (PIE),"Scheduling Heterogeneous Multi-Cores through
+Performance Impact Estimation (PIE)
+Kenzo Van Craeynest•∗ Aamer Jaleel†
+Lieven Eeckhout•
+Paolo Narvaez†
+Joel Emer†‡
+Ghent University•
+Ghent, Belgium
+{kenzo.vancraeynest,
+Intel Corporation, VSSAD†
+{aamer.jaleel,paolo.narvaez,
+Hudson, MA
+Cambridge, MA"
+1048c753e9488daa2441c50577fe5fdba5aa5d7c,Recognising faces in unseen modes: A tensor based approach,"Recognising faces in unseen modes: a tensor based approach
+Santu Rana, Wanquan Liu, Mihai Lazarescu and Svetha Venkatesh
+{santu.rana, wanquan, m.lazarescu,
+Dept. of Computing, Curtin University of Technology
+GPO Box U1987, Perth, WA 6845, Australia."
+191753aa338f24bb41f7bacb4326e0c0a1b90459,"Visual People Detection – Different Models, Comparison and Discussion","Visual People Detection – Different Models, Comparison and Discussion
+Bernt Schiele, Mykhaylo Andriluka, Nikodem Majer, Stefan Roth and Christian Wojek
+Department of Computer Science, TU Darmstadt"
+199fdc3c0b73d9469d2e732c97e889bfc8bf8bff,"Multi-Class Constrained Normalized Cut With Hard, Soft, Unary and Pairwise Priors and its Applications to Object Segmentation","Multi-Class Constrained Normalized Cut With
+Hard, Soft, Unary and Pairwise Priors and Its
+Applications to Object Segmentation
+Han Hu, Jianjiang Feng, Member, IEEE, Chuan Yu, and Jie Zhou, Senior Member, IEEE"
+199aabb19ea78576a74d573739a7f35cf04fac6e,Fast globally optimal 2D human detection with loopy graph models,"Fast Globally Optimal 2D Human
+Detection with Loopy Graph Models
+Paper by
+T.-P. Tian and S. Sclaroff
+Slides by A. Vedaldi"
+19fd089807f8925b9384bae6e66cbfe7e6d318aa,Acume: A new visualization tool for understanding facial expression and gesture data,"Acume: A New Visualization Tool for
+Understanding Facial Expression and Gesture
+Daniel McDuff - MIT Media Lab
+March 24, 2011"
+19841b721bfe31899e238982a22257287b9be66a,Recurrent Neural Networks,"Published as a conference paper at ICLR 2018
+SKIP RNN: LEARNING TO SKIP STATE UPDATES IN
+RECURRENT NEURAL NETWORKS
+V´ıctor Campos∗†, Brendan Jou‡, Xavier Gir´o-i-Nieto§, Jordi Torres†, Shih-Fu ChangΓ
+Barcelona Supercomputing Center, ‡Google Inc,
+§Universitat Polit`ecnica de Catalunya, ΓColumbia University
+{victor.campos,"
+19cfe13e8196872b81d6f31d2849dc540d146f7c,A Bayesian Framework for Sparse Representation-Based 3-D Human Pose Estimation,"A Bayesian Framework for Sparse
+Representation-Based 3D Human Pose Estimation
+Behnam Babagholami-Mohamadabadi, Amin Jourabloo, Ali Zarghami, and Shohreh Kasaei Senior Member, IEEE"
+19dc5a1156819230e6ae425e9c9d56e898d6bcb9,Comparing human and machine face recognition,"Comparing human and machine face recognition1
+Face Recognition Algorithms
+Surpass Humans Matching Faces Over
+Changes in Illumination
+Alice J. O’TOOLE, P. Jonathon PHILLIPS, Fang JIANG, Janet AYYAD, Nils PENARD,
+nd Hervé ABDI*"
+19fcb95815e4c225b250f7deed9be3e90963933d,Evaluación de la calidad de las imágenes de rostros utilizadas para la identificación de las personas,"ISSN: 1405-5546
+Instituto Politécnico Nacional
+México
+Méndez-Vázquez, Heydi; Chang, Leonardo; Rizo-Rodríguez, Dayron; Morales-González, Annette
+Evaluación de la calidad de las imágenes de rostros utilizadas para la identificación de las personas
+Instituto Politécnico Nacional
+Distrito Federal, México
+Disponible en: http://www.redalyc.org/articulo.oa?id=61523309003
+Cómo citar el artículo
+Número completo
+Más información del artículo
+Página de la revista en redalyc.org
+Sistema de Información Científica
+Red de Revistas Científicas de América Latina, el Caribe, España y Portugal
+Proyecto académico sin fines de lucro, desarrollado bajo la iniciativa de acceso abierto"
+19441b8be551e8134dd9eb33238309bc2de0a42f,Playing for Benchmarks,"Playing for Benchmarks
+Stephan R. Richter
+TU Darmstadt
+Zeeshan Hayder
+Vladlen Koltun
+Intel Labs
+Figure 1. Data for several tasks in our benchmark suite. Clockwise from top left: input video frame, semantic segmentation, semantic
+instance segmentation, 3D scene layout, visual odometry, optical flow. Each task is presented on a different image."
+192723085945c1d44bdd47e516c716169c06b7c0,Vision and Attention Theory Based Sampling for Continuous Facial Emotion Recognition,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation
+Vision and Attention Theory Based Sampling
+for Continuous Facial Emotion Recognition
+Albert C. Cruz, Student Member, IEEE, Bir Bhanu, Fellow, IEEE, and
+Ninad S. Thakoor, Member, IEEE"
+197a3c1863c780507798c9550dd6faadeb65caaa,Processing and Recognising Faces in 3D Images,",300+OPEN ACCESS BOOKS107,000+INTERNATIONALAUTHORS AND EDITORS113+ MILLIONDOWNLOADSBOOKSDELIVERED TO151 COUNTRIESAUTHORS AMONGTOP 1%MOST CITED SCIENTIST12.2%AUTHORS AND EDITORSFROM TOP 500 UNIVERSITIESSelection of our books indexed in theBook Citation Index in Web of Science™Core Collection (BKCI)Chapter from the book New Approaches to Characterization and Recognition of FacesDownloaded from: http://www.intechopen.com/books/new-approaches-to-characterization-and-recognition-of-facesPUBLISHED BYWorld's largest Science,Technology & Medicine Open Access book publisherInterested in publishing with InTechOpen?Contact us at"
+19b9e5127155730c618c0e1b41e1c723f143651d,Face Verification for Mobile Personal Devices,"Face Verification for Mobile Personal Devices
+Qian Tao"
+19fb5e5207b4a964e5ab50d421e2549ce472baa8,Online emotional facial expression dictionary,"International Conference on Computer Systems and Technologies - CompSysTech’14
+Online Emotional Facial Expression Dictionary
+Léon Rothkrantz"
+1962e4c9f60864b96c49d85eb897141486e9f6d1,Locality preserving embedding for face and handwriting digital recognition,"Neural Comput & Applic (2011) 20:565–573
+DOI 10.1007/s00521-011-0577-7
+O R I G I N A L A R T I C L E
+Locality preserving embedding for face and handwriting digital
+recognition
+Zhihui Lai • MingHua Wan • Zhong Jin
+Received: 3 December 2008 / Accepted: 11 March 2011 / Published online: 1 April 2011
+Ó Springer-Verlag London Limited 2011
+supervised manifold
+the local sub-manifolds."
+19bc52323383732c3c7d73e11726f6232515d2f9,KAIST Multi-Spectral Day/Night Data Set for Autonomous and Assisted Driving,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+KAIST Multi-Spectral Day/Night Data Set for
+Autonomous and Assisted Driving
+Yukyung Choi
+, Namil Kim, Soonmin Hwang, Kibaek Park, Jae Shin Yoon,
+Kyounghwan An, Member, IEEE, and In So Kweon, Member, IEEE
+i.e., a thermal"
+191674c64f89c1b5cba19732869aa48c38698c84,Face Image Retrieval Using Attribute - Enhanced Sparse Codewords,"International Journal of Advanced Technology in Engineering and Science www.ijates.com
+Volume No.03, Issue No. 03, March 2015 ISSN (online): 2348 – 7550
+FACE IMAGE RETRIEVAL USING ATTRIBUTE -
+ENHANCED SPARSE CODEWORDS
+E.Sakthivel1 , M.Ashok kumar2
+PG scholar, Communication Systems, Adhiyamaan College of Engineeing,Hosur,(India)
+Asst. Prof., Electronics And Communication Engg., Adhiyamaan College of Engg.,Hosur,(India)"
+190d8bd39c50b37b27b17ac1213e6dde105b21b8,Mining Weakly Labeled Web Facial Images for Search-Based Face Annotation,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+Mining weakly labeled web facial images for search-
+ased face annotation
+Author(s) Wang, Dayong; Hoi, Steven C. H.; He, Ying; Zhu, Jianke
+Citation
+Wang, D., Hoi, S. C. H., He, Y., & Zhu, J. (2014). Mining
+weakly labeled web facial images for search-based face
+nnotation. IEEE Transactions on Knowledge and Data
+Engineering, 26(1), 166-179.
+http://hdl.handle.net/10220/18955
+Rights
+© 2014 IEEE. Personal use of this material is permitted.
+Permission from IEEE must be obtained for all other
+uses, in any current or future media, including
+reprinting/republishing this material for advertising or
+promotional purposes, creating new collective works, for
+resale or redistribution to servers or lists, or reuse of any
+opyrighted component of this work in other works."
+19af008599fb17bbd9b12288c44f310881df951c,Discriminative Local Sparse Representations for Robust Face Recognition,"Discriminative Local Sparse Representations for
+Robust Face Recognition
+Yi Chen, Umamahesh Srinivas, Thong T. Do, Vishal Monga, and Trac D. Tran"
+19296e129c70b332a8c0a67af8990f2f4d4f44d1,Is that you? Metric learning approaches for face identification,"Metric Learning Approaches for Face Identification
+Is that you?
+M. Guillaumin, J. Verbeek and C. Schmid
+LEAR team, INRIA Rhˆone-Alpes, France
+Supplementary Material"
+19666b9eefcbf764df7c1f5b6938031bcf777191,Group Component Analysis for Multiblock Data: Common and Individual Feature Extraction,"Group Component Analysis for Multi-block Data:
+Common and Individual Feature Extraction
+Guoxu Zhou, Andrzej Cichocki Fellow, IEEE, Yu Zhang, and Danilo Mandic Fellow, IEEE"
+198b6beb53e0e61357825d57938719f614685f75,Vaulted Verification: A Scheme for Revocable Face Recognition,"Vaulted Verification: A Scheme for Revocable Face
+Recognition
+Michael Wilber
+University of Colorado, Colorado Springs"
+197eafb6abb6b7d2813eec0891b143e27fc57386,Smile! Studying expressivity of happiness as a synergic factor in collaborative information seeking,"Smile! Studying expressivity of happiness as a synergic factor in collaborative
+information seeking.
+Rutgers University has made this article freely available. Please share how this access benefits you.
+Your story matters. [https://rucore.libraries.rutgers.edu/rutgers-lib/47408/story/]
+This work is the AUTHOR'S ORIGINAL (AO)
+This is the author's original version of a work, which may or may not have been subsequently published. The author accepts full
+responsibility for the article. Content and layout is as set out by the author.
+Citation to this Version: Shah, Chirag, González-Ibáñez, Roberto & Córdova-Rubio, Natalia. (2011). Smile! Studying
+expressivity of happiness as a synergic factor in collaborative information seeking.. New Orleans
+(La.). Retrieved from doi:10.7282/T3NK3GWF.
+Terms of Use: Copyright for scholarly resources published in RUcore is retained by the copyright holder. By virtue of its appearance in this open
+ccess medium, you are free to use this resource, with proper attribution, in educational and other non-commercial settings. Other uses, such as
+reproduction or republication, may require the permission of the copyright holder.
+Article begins on next page
+SOAR is a service of RUcore, the Rutgers University Community Repository
+RUcore is developed and maintained by Rutgers University Libraries"
+19911c7e66b05d5aa28673608fdfc50ef00591dd,Recognizing Human Faces: Physical Modeling and Pattern Classification,
+195d331c958f2da3431f37a344559f9bce09c0f7,Parsing occluded people by flexible compositions,"Parsing Occluded People by Flexible Compositions
+Xianjie Chen, Alan Yuille
+University of California, Los Angeles.
+Figure 1: An illustration of the flexible compositions. Each connected sub-
+tree of the full graph (include the full graph itself) is a flexible composition.
+The flexible compositions that do not have certain parts are suitable for the
+people with those parts occluded.
+Figure 2: The absence of body parts evidence can help to predict occlusion.
+However, absence of evidence is not evidence of absence.
+It can fail in
+some challenging scenes. The local image measurements near the occlusion
+oundary (i.e., around the right elbow and left shoulder) can reliably provide
+evidence of occlusion.
+This paper presents an approach to parsing humans when there is signifi-
+ant occlusion. We model humans using a graphical model which has a tree
+structure building on recent work [1, 6] and exploit the connectivity prior
+that, even in presence of occlusion, the visible nodes form a connected sub-
+tree of the graphical model. We call each connected subtree a flexible com-
+position of object parts. This involves a novel method for learning occlusion
+ues. During inference we need to search over a mixture of different flexible"
+19a30ad283f2ab2d84f1c666d17492da14056d75,Visuomotor Coordination in Reach-To-Grasp Tasks: From Humans to Humanoids and Vice Versa,"Visuomotor Coordination in Reach-To-Grasp Tasks:
+From Humans to Humanoids and Vice Versa
+THÈSE NO 6695 (2015)
+PRÉSENTÉE LE 4 JUIN 2015
+À L’ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNE
+À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+LABORATOIRE D'ALGORITHMES ET SYSTÈMES D'APPRENTISSAGE
+À L’INSTITUTO SUPERIOR TÉCNICO (IST) DA UNIVERSIDADE DE LISBOA
+INSTITUTO DE SISTEMA E ROBOTICA
+PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE
+DOUTORAMENTO EM ENGENHARIA ELECTROTÉCNICA E DE COMPUTADORES
+POUR L’OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES (PhD)
+Luka LUKIC
+Prof. A. Billard, Prof. J. Santos-Victor, directeurs de thèse
+cceptée sur proposition du jury:
+Prof. J. Faria, président du jury
+Prof. D. Vernon, rapporteur
+Prof. E. Bicho, rapporteuse
+Prof. A. Bernardino, rapporteur
+Prof. G. Sandini, rapporteur"
+19a3374ac2f917b408b4bcdca33fc9e9fd7ff260,Visual Fixation Patterns during Reciprocal Social Interaction Distinguish a Subgroup of 6-Month-Old Infants At-Risk for Autism from Comparison Infants.,"J Autism Dev Disord (2007) 37:108–121
+DOI 10.1007/s10803-006-0342-4
+O R I G I N A L P A P E R
+Visual Fixation Patterns during Reciprocal Social Interaction
+Distinguish a Subgroup of 6-Month-Old Infants At-Risk
+for Autism from Comparison Infants
+Noah Merin Æ Gregory S. Young Æ Sally Ozonoff Æ
+Sally J. Rogers
+Published online: 27 December 2006
+Ó Springer Science+Business Media, LLC 2006"
+19c53302bda8a82ec40d314a85b1713f43058a1a,Deep learning models of biological visual information processing,"Turcsány, Diána (2016) Deep learning models of
+iological visual information processing. PhD thesis,
+University of Nottingham.
+Access from the University of Nottingham repository:
+http://eprints.nottingham.ac.uk/35561/1/thesis_DianaTurcsany.pdf
+Copyright and reuse:
+The Nottingham ePrints service makes this work by researchers of the University of
+Nottingham available open access under the following conditions.
+This article is made available under the University of Nottingham End User licence and may
+e reused according to the conditions of the licence. For more details see:
+http://eprints.nottingham.ac.uk/end_user_agreement.pdf
+For more information, please contact"
+197f945b66995e4d006497808586f828f8a88a86,Part Discovery from Partial Correspondence,"Part Discovery from Partial Correspondence
+Subhransu Maji
+Gregory Shakhnarovich
+Toyota Technological Institute at Chicago, IL, USA"
+19c0c7835dba1a319b59359adaa738f0410263e8,Natural Image Statistics and Low-Complexity Feature Selection,"Natural Image Statistics and
+Low-Complexity Feature Selection
+Manuela Vasconcelos and Nuno Vasconcelos, Senior Member, IEEE"
+193c9bd069e9457ac8650a8dfd4319bb3f4afd56,Improving Person Tracking Using an Inexpensive Thermal Infrared Sensor,"Improving Person Tracking Using an Inexpensive Thermal Infrared Sensor
+Suren Kumar
+Univ. of SUNY-Buffalo
+Tim K. Marks
+Mitsubishi Electric Research Labs
+Michael Jones
+Mitsubishi Electric Research Labs"
+19cfec264e863793dd96a5f308a3b603c6b9912e,Attention-Based Ensemble for Deep Metric Learning,"Attention-based Ensemble for
+Deep Metric Learning
+Wonsik Kim, Bhavya Goyal, Kunal Chawla, Jungmin Lee, Keunjoo Kwon
+Samsung Research,
+Samsung Electronics
+{wonsik16.kim, bhavya.goyal, kunal.chawla, jm411.lee,"
+19d583bf8c5533d1261ccdc068fdc3ef53b9ffb9,FaceNet: A unified embedding for face recognition and clustering,"FaceNet: A Unified Embedding for Face Recognition and Clustering
+Florian Schroff
+Dmitry Kalenichenko
+James Philbin
+Google Inc.
+Google Inc.
+Google Inc."
+1910f5f7ac81d4fcc30284e88dee3537887acdf3,Semantic Based Hypergraph Reranking Model for Web Image Search,"Volume 6, Issue 5, May 2016 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+Semantic Based Hypergraph Reranking Model for Web
+Image Search
+Amol Darkunde, 2Manoj Jalan, 3Yelmar Mahesh, 4Shivadatta Shinde, 5Dnyanda Patil
+, 2, 3, 4 B. E. Dept of CSE, 5 Asst. Prof. Dept of CSE
+, 2, 3, 4, 5 Dr.D.Y.Patil College of Engineering, Pune, Maharashtra, India"
+1936a73920c5a7eb97e8b73cb9a6096aa509e402,Robust Multi-Person Tracking from Moving Platforms,"Robust Multi-Person Tracking from Moving Platforms
+Andreas Ess1, Konrad Schindler1, Bastian Leibe1,2 and Luc van Gool1,3
+ETH Z¨urich
+KU Leuven, IBBT
+RWTH Aachen"
+19f7654f22416e6fdf430c1c873ad3e8c15e64f8,Zero-crossing based image projections encoding for eye localization,"0th European Signal Processing Conference (EUSIPCO 2012)
+© EURASIP, 2012 - ISSN 2076-1465
+. INTRODUCTION"
+197c64c36e8a9d624a05ee98b740d87f94b4040c,Regularized Greedy Column Subset Selection,"Regularized Greedy Column Subset Selection
+Bruno Ordozgoiti*a, Alberto Mozoa, Jes´us Garc´ıa L´opez de Lacalleb
+Department of Computer Systems, Universidad Polit´ecnica de Madrid
+Department of Applied Mathematics, Universidad Polit´ecnica de Madrid"
+19158dfe2815e7f9eebc5822687e83d0a89ae147,Semantic Regularisation for Recurrent Image Annotation,[cs.CV] 16 Nov 2016
+1957956856dc04ebee5815bd62874687e2af7260,Joint Optical Flow and Temporally Consistent Semantic Segmentation,"Joint Optical Flow and Temporally Consistent
+Semantic Segmentation
+Junhwa Hur and Stefan Roth
+Department of Computer Science, TU Darmstadt"
+19d4855f064f0d53cb851e9342025bd8503922e2,Learning SURF Cascade for Fast and Accurate Object Detection,"Learning SURF Cascade for Fast and Accurate Object Detection
+Jianguo Li, Yimin Zhang
+Intel Labs China"
+193ec7bb21321fcf43bbe42233aed06dbdecbc5c,Automatic 3D Facial Expression Analysis in Videos,"UC Santa Barbara
+UC Santa Barbara Previously Published Works
+Title
+Automatic 3D facial expression analysis in videos
+Permalink
+https://escholarship.org/uc/item/3g44f7k8
+Authors
+Chang, Y
+Vieira, M
+Turk, M
+et al.
+Publication Date
+005-01-01
+Peer reviewed
+eScholarship.org
+Powered by the California Digital Library
+University of California"
+19359fb238888c0eb012a4ab5c6f0fa0e9be493b,Enhanced Facial Expression Recognition using 2DPCA Principal component Analysis and Gabor Wavelets,"Enhanced Facial Expression Recognition
+using 2DPCA Principal component Analysis
+nd Gabor Wavelets.
+(1)Laboratory of Automatic and Signals Annaba (LASA) , Department of electronics, Faculty of Engineering,
+Zermi.Narima(1), Saaidia.Mohammed(2),
+Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria.
+E-Mail :
+(2) Département de Génie-électrique, Université M.C.M. Souk-Ahras, Algeria"
+19766585a701749fc297a5ca6b8cdc0c62d4ba1b,A Bottom-Up Approach for Pancreas Segmentation Using Cascaded Superpixels and (Deep) Image Patch Labeling,"A Bottom-up Approach for Pancreas Segmentation using
+Cascaded Superpixels and (Deep) Image Patch Labeling
+Amal Faraga, Le Lua, Holger R. Rotha, Jiamin Liua, Evrim Turkbeya, Ronald M. Summersa,∗
+Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, Radiology and Imaging Sciences, National Institutes
+of Health Clinical Center, Building 10 Room 1C224D, MSC 1182, Bethesda, MD 20892-1182, United States"
+4c6d6bb5bafba9e04d8f2ce128be71fba1d1e0e8,Human parsing with a cascade of hierarchical poselet based pruners,"HUMAN PARSING WITH A CASCADE OF HIERARCHICAL POSELET BASED PRUNERS
+Duan Tran†
+Yang Wang‡
+University of Illinois at Urbana Champaign†
+David Forsyth†
+University of Manitoba‡"
+4c0ce0ed9cc92115874be4397f6240769d3ed84f,The effect of familiarity on face adaptation.,"doi:10.1068/p6774
+The effect of familiarity on face adaptation
+Sarah Laurence, Graham Hole
+School of Psychology, University of Sussex, Falmer, Brighton BN1 9QH, Sussex, UK;
+e-mail:
+Received 14 July 2010, in revised form 30 March 2011"
+4c6e1840451e1f86af3ef1cb551259cb259493ba,Hand Posture Dataset Creation for Gesture Recognition,"HAND POSTURE DATASET CREATION FOR GESTURE
+RECOGNITION
+Instituto de Sistemas Inteligentes y Aplicaciones Numericas en Ingenieria
+Luis Anton-Canalis
+Campus Universitario de Tafira, 35017 Gran Canaria, Spain
+Elena Sanchez-Nielsen
+Departamento de E.I.O. y Computacion
+8271 Universidad de La Laguna, Spain
+Keywords:
+Image understanding, Gesture recognition, Hand dataset."
+4c69da79843016d5d934464d3777030741978180,Neuromorphic Atomic Switch Networks,"Neuromorphic Atomic Switch Networks
+Audrius V. Avizienis1.
+Adam Z. Stieg2,3*, James K. Gimzewski1,2,3
+, Henry O. Sillin1.
+, Cristina Martin-Olmos1, Hsien Hang Shieh2, Masakazu Aono3,
+Department of Chemistry and Biochemistry, University of California Los Angeles, Los Angeles, California, United States of America, 2 California NanoSystems Institute,
+University of California Los Angeles, Los Angeles, California, United States of America, 3 World Premier International Center for Materials Nanoarchitectonics, National
+Institute for Materials Science, Tsukuba, Ibaraki, Japan"
+4cc5fb6cf48b2c58b283460b19f3beeb7e5b6a22,Clickage: towards bridging semantic and intent gaps via mining click logs of search engines,"Clickage: Towards Bridging Semantic and Intent Gaps
+via Mining Click Logs of Search Engines
+Xian-Sheng Hua, Linjun Yang, Jingdong Wang, Jing Wang
+Ming Ye, Kuansan Wang, Yong Rui, Jin Li
+Microsoft Corporation, One Microsoft Way, Redmond WA 98052, USA
+{xshua; linjuny; jingdw; v-wangji; mingye; kuansanw; yongrui;"
+4cf74211e635c73ca5816199ef33d10c3462beae,Review of Facial Expression Recognition System and Used Datasets,"IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+REVIEW OF FACIAL EXPRESSION RECOGNITION SYSTEM AND
+USED DATASETS
+Shyna Dutta1, V.B. Baru2,
+ME Student, Department of Electronics and Telecommunication, Sinhgad College of Engineering Vadgaon, Pune,
+Associate Professor, Department of Electronics and Telecommunication, Sinhgad College of Engineering Vadgaon,"
+4c41b774a6bdf43d980f640880cc49b82ae19b34,3D Facial Landmark Detection under Large Yaw and Expression Variations,"D Facial Landmark Detection under
+Large Yaw and Expression Variations
+Panagiotis Perakis, Member, IEEE Computer Society, Georgios Passalis,
+Theoharis Theoharis, and Ioannis A. Kakadiaris, Senior Member, IEEE"
+4cff5b5099b0227730efa9e9fd724a63dc0c0c2f,Learning Efficient Binary Codes From High-Level Feature Representations for Multilabel Image Retrieval,"Learning Efficient Binary Codes From
+High-Level Feature Representations
+for Multilabel Image Retrieval
+Lei Ma
+, Hongliang Li, Senior Member, IEEE, Fanman Meng, Member, IEEE, Qingbo Wu, Member, IEEE,
+nd King Ngi Ngan, Fellow, IEEE"
+4cdfef0fec0918dcf5c40b9b53c9e3f48be0462b,Unsupervised robotic sorting: Towards autonomous decision making robots,"Unsupervised robotic sorting:
+Towards autonomous decision making
+robots
+Joris Gu´erin, St´ephane Thiery, Eric Nyiri and Olivier Gibaru
+Arts et M´etiers ParisTech, Lille, FRANCE"
+4c4454aa7a2a244c678f507a982fe8827ba419bb,Adversarial Examples for Semantic Image Segmentation,"Workshop track - ICLR 2017
+ADVERSARIAL EXAMPLES FOR
+SEMANTIC IMAGE SEGMENTATION
+Volker Fischer1, Mummadi Chaithanya Kumar2, Jan Hendrik Metzen1 & Thomas Brox2
+Bosch Center for Artificial Intelligence, Robert Bosch GmbH
+University of Freiburg
+{volker.fischer,"
+4c797506d610525591288f813621b271ce879452,The automaticity of face perception is influenced by familiarity,"Atten Percept Psychophys (2017) 79:2202–2211
+DOI 10.3758/s13414-017-1362-1
+The automaticity of face perception is influenced by familiarity
+Xiaoqian Yan 1 & Andrew W. Young 1 & Timothy J. Andrews 1
+Published online: 5 July 2017
+# The Author(s) 2017. This article is an open access publication"
+4c5041f8b93fd71a851445e84bfca0d7d0c3bb9b,Enhancing Memory-Based Particle Filter with Detection-Based Memory Acquisition for Robustness under Severe Occlusion,"ENHANCING MEMORY-BASED PARTICLE FILTER WITH
+DETECTION-BASED MEMORY ACQUISITION FOR ROBUSTNESS
+UNDER SEVERE OCCLUSION
+Dan Mikami, Kazuhiro Otsuka, Shiro Kumano and Junji Yamato
+NTT Communication Science Laboratories, NTT, 3-1 Morinosato-Wakamiya, Atsugi, Kanagawa, 243-0198, Japan
+Keywords:
+Pose Tracking, Face Pose, Memory-based Prediction, Memory Acquisition."
+4c815f367213cc0fb8c61773cd04a5ca8be2c959,Facial expression recognition using curvelet based local binary patterns,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+4ca8ff09f24f0838022f1d0b94af4331f6e538cd,Semantic Parsing to Probabilistic Programs for Situated Question Answering,"Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 160–170,
+Austin, Texas, November 1-5, 2016. c(cid:13)2016 Association for Computational Linguistics"
+4cf17bca0e19070fbe9bb25644787f65fa6ebe1a,Human Pose Estimation,"Human pose estimation
+Leonid Sigal, Disney Research, Pittsburgh
+Synonyms
+– Articulated pose estimation
+– Body configuration recovery
+Related Concepts
+– Human pose tracking
+– People tracking
+– Articulated pose tracking
+– Body parsing
+– People parsing
+Definition
+Human pose estimation is the process of estimating the configuration of the
+ody (pose) from a single, typically monocular, image.
+Background
+Human pose estimation is one of the key problems in computer vision that
+has been studied for well over 15 years. The reason for its importance is the
+bundance of applications that can benefit from such a technology. For example,
+human pose estimation allows for higher level reasoning in the context of human-
+omputer interaction and activity recognition; it is also one of the basic building"
+4ce18536eec7917da848be6b5f783d3ee3d49677,Fast Face Detection in One Line of Code,"Fast Face Detection in One Line of Code
+Michael Zucchi, B.E. (Comp. Sys. Eng.)
+Unaliated, unfunded, personal research."
+4c1ef2a628627798939dccc072d33f9e12b48640,Advanced Hybrid Color Space Normalization for Human Face Extraction and Detection,"IJSRD - International Journal for Scientific Research & Development| Vol. 1, Issue 4, 2013 | ISSN (online): 2321-0613
+Advanced Hybrid Color Space Normalization for Human Face
+Extraction and Detection
+Jayakrishna.V1 Akhila G.P.2 Shafeena Basheer3
+, 2Faculty 3PG Student
+, 3Amal Jyothi College of Engineering, Kanjirappally
+UKF College of Engineering &Technology,Parippally
+S.P.B.Patel Engineering College, Mehsana, Gujarat
+(CSN)
+technique
+enhancing
+is contained
+in Y component, and"
+4c4e49033737467e28aa2bb32f6c21000deda2ef,Improving Landmark Localization with Semi-Supervised Learning,"Improving Landmark Localization with Semi-Supervised Learning
+Sina Honari1∗, Pavlo Molchanov2, Stephen Tyree2, Pascal Vincent1,4,5, Christopher Pal1,3, Jan Kautz2
+MILA-University of Montreal, 2NVIDIA, 3Ecole Polytechnique of Montreal, 4CIFAR, 5Facebook AI Research.
+{honaris,
+{pmolchanov, styree,"
+4c39000bbd6761dd9e5609fe310af51facb835a9,Kinects and human kinetics: A new approach for studying pedestrian behavior,"This paper might be a pre-copy-editing or a post-print author-produced .pdf of an article accepted for publication. For the
+definitive publisher-authenticated version, please refer directly to publishing house’s archive system."
+4c822705edd305d04f2c02ac9b1b73421e857961,Towards fully automated person re-identification,"Towards Fully Automated Person Re-Identification
+Matteo Taiana, Dario Figueira, Athira Nambiar, Jacinto Nascimento and Alexandre Bernardino
+Institute for Systems and Robotics, IST, Lisboa, Portugal
+Re-Identification, Pedestrian Detection, Camera Networks, Video Surveillance
+Keywords:"
+4c477ba5513ec9c629ca3442c1fee15612259905,Complex Relations in a Deep Structured Prediction Model for Fine Image Segmentation,"Complex Relations in a Deep Structured Prediction
+Model for Fine Image Segmentation
+Cristina Mata, Guy Ben-Yosef, Boris Katz
+Computer Science and Artificial Intelligence Laboratory
+{cfmata, gby,
+Center for Brains, Minds and Machines"
+4c55ea9c04d46d60ec5789f4e4c3224c41360768,Dimensionality Reduction Using Similarity-Induced Embeddings,"IEEE Copyright Notice
+Copyright c(cid:13)2017 IEEE
+Personal use of this material is permitted. Permission from
+IEEE must be obtained for all other uses, in any current or fu-
+ture media, including reprinting/republishing this material for
+dvertising or promotional purposes, creating new collective
+works, for resale or redistribution to servers or lists, or reuse
+of any copyrighted component of this work in other works.
+Published in: IEEE Transactions on Neural Networks and
+Learning Systems
+URL: http://ieeexplore.ieee.org/document/8004500
+DOI: 10.1109/TNNLS.2017.2728818
+DOI 10.1109/TNNLS.2017.2728818 c(cid:13)2017 IEEE"
+4cc675422395ed7dc7e4772280f7c57cac6fbaee,Efficient person re-identification by hybrid spatiogram and covariance descriptor,"Efficient Person Re-identification by Hybrid Spatiogram and Covariance
+Descriptor
+Mingyong Zeng, Zemin Wu, Chang Tian, Lei Zhang, and Lei Hu
+College of Communications Engineering, PLA University
+of Science and Technology, Nanjing 210007, China"
+4c1e47ba68b81d210718f837b197253164decaf0,Evaluation of Quality Factors for the Captured Facial Image,"International Journal of Computer Applications (0975 – 8887)
+Volume 142 – No.10, May 2016
+Evaluation of Quality Factors for the Captured Facial
+Image
+Abhay Goyal
+M.Tech. Student
+Department of ECE
+SBSSTC, Ferozepur, Pujnab"
+4ce68170f85560942ee51465e593b16560f9c580,Practical Matrix Completion and Corruption Recovery Using Proximal Alternating Robust Subspace Minimization,"(will be inserted by the editor)
+Practical Matrix Completion and Corruption Recovery using
+Proximal Alternating Robust Subspace Minimization
+Yu-Xiang Wang · Choon Meng Lee · Loong-Fah Cheong · Kim-Chuan Toh
+Introduction
+Completing a low-rank matrix from partially observed
+entries, also known as matrix completion, is a central
+task in many real-life applications. The same abstrac-
+tion of this problem has appeared in diverse fields such
+s signal processing, communications, information re-
+trieval, machine learning and computer vision. For in-
+stance, the missing data to be filled in may correspond
+to plausible movie recommendations (Koren et al 2009;
+Funk 2006), occluded feature trajectories for rigid or
+non-rigid structure from motion, namely SfM (Hart-
+ley and Schaffalitzky 2003; Buchanan and Fitzgibbon
+005) and NRSfM (Paladini et al 2009), relative dis-
+tances of wireless sensors (Oh et al 2010), pieces of un-
+ollected measurements in DNA micro-array (Friedland
+et al 2006), just to name a few."
+4c81c76f799c48c33bb63b9369d013f51eaf5ada,Multi-modal Score Fusion and Decision Trees for Explainable Automatic Job Candidate Screening from Video CVs,"Multi-modal Score Fusion and Decision Trees for Explainable Automatic Job
+Candidate Screening from Video CVs
+Heysem Kaya1, Furkan G¨urpınar2, and Albert Ali Salah2
+Department of Computer Engineering, Namık Kemal University, Tekirda˘g, Turkey
+Department of Computer Engineering, Bo˘gazic¸i University, Istanbul, Turkey"
+4c1ce6bced30f5114f135cacf1a37b69bb709ea1,Gaze direction estimation by component separation for recognition of Eye Accessing Cues,"Gaze Direction Estimation by Component Separation for
+Recognition of Eye Accessing Cues
+Ruxandra Vrˆanceanu
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313
+Corneliu Florea
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313
+Laura Florea
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313
+Constantin Vertan
+Image Processing and Analysis Laboratory
+University ”Politehnica” of Bucharest, Romania, Address Splaiul Independent¸ei 313"
+4cfa2fe87c250534fd2f285c2300e7ca2cd9e325,"Visual, Auditory, and Cross Modal Sensory Processing in Adults with Autism: An EEG Power and BOLD fMRI Investigation","ORIGINAL RESEARCH
+published: 19 April 2016
+doi: 10.3389/fnhum.2016.00167
+Visual, Auditory, and Cross Modal
+Sensory Processing in Adults with
+Autism: An EEG Power and BOLD
+fMRI Investigation
+Elizabeth’ C. Hames1, Brandi Murphy2, Ravi Rajmohan3, Ronald C. Anderson1,
+Mary Baker1*, Stephen Zupancic2, Michael O’Boyle4 and David Richman5
+Department of Electrical and Computer Engineering, Texas Tech University, Lubbock, TX, USA, 2 Department of Audiology,
+Texas Tech University Health Sciences Center, Lubbock, TX, USA, 3 Department of Pharmacology and Neuroscience, Texas
+Tech University Health Sciences Center, Lubbock, TX, USA, 4 College of Human Sciences, Texas Tech University, Lubbock,
+TX, USA, 5 Burkhart Center for Autism Education and Research, Texas Tech University, Lubbock, TX, USA
+Electroencephalography (EEG) and blood oxygen level dependent functional magnetic
+resonance imagining (BOLD fMRI) assessed the neurocorrelates of sensory processing
+of visual and auditory stimuli
+in 11 adults with autism (ASD) and 10 neurotypical (NT)
+ontrols between the ages of 20–28. We hypothesized that ASD performance on
+ombined audiovisual trials would be less accurate with observable decreased EEG
+power across frontal, temporal, and occipital channels and decreased BOLD fMRI"
+4c88e41424022c7c5f111d34d931fae15f52a551,"CUR Decompositions, Similarity Matrices, and Subspace Clustering","CUR Decompositions, Similarity Matrices, and
+Subspace Clustering
+Akram Aldroubi, Keaton Hamm, Ahmet Bugra Koku, and Ali Sekmen"
+4cfae149d6acd8cffc12c06ed796f1f84dce0e73,Face Recognition Based on Image Latent Semantic Analysis Model and SVM,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol. 6, No. 3, June, 2013
+Face Recognition Based on Image Latent Semantic Analysis Model
+nd SVM
+Jucheng Yang 1, 2, Min Luo3 and Yanbin Jiao4
+Ahead Software Company Limited, Nanchang, 330041, China
+College of Computer Science and Information Engineering, Tianjin University of
+Science and Technology, Tianjin, China
+Jiangxi Institute of Computing Technology, Nanchang, China
+School of Information Technology, Jiangxi University of Finance and Economics,
+Nanchang, China"
+4cfdd0c8313ac4f92845dcd658115beb115b97ce,Multi-Task Learning as Multi-Objective Optimization,"Multi-Task Learning as Multi-Objective Optimization
+Ozan Sener
+Intel Labs
+Vladlen Koltun
+Intel Labs"
+4c863a15c4da0d0ccd20c5897a4e33fb771fe3eb,The effect of forced choice on facial emotion recognition: a comparison to open verbal classification of emotion labels,"OPEN ACCESS
+Research Article
+The effect of forced choice on facial emotion recognition:
+comparison to open verbal classification of emotion
+labels
+Der Effekt eines geschlossenen Antwortformats auf die mimische
+Emotionserkennung: ein Vergleich mit der freien verbale Zuordnung von
+Emotionswörtern
+Kerstin
+Limbrecht-Ecklundt1
+Andreas Scheck1
+Lucia Jerg-Bretzke1
+Steffen Walter1
+Holger Hoffmann1
+Harald C. Traue1
+University of Ulm, University
+Clinic of Psychosomatic
+Medicine and Psychotherapy,
+Medical Psychology, Ulm,
+Germany"
+4c05dc45b82b79e87f7b337ccf9f48d537c0e6e2,Exploring Heterogeneity within a Core for Improved Power Efficiency,"Exploring Heterogeneity within a Core for
+Improved Power Efficiency
+Sudarshan Srinivasan, Nithesh Kurella, Israel Koren, Fellow, IEEE, and Sandip Kundu, Fellow, IEEE"
+2608a2499819053468f4e6f77a715c2dbfefdfb0,Object Classification using Hybrid Holistic Descriptors: Application to Building Detection in Aerial Orthophotos,"Object Classification using Hybrid Holistic
+Descriptors: Application to Building Detection
+in Aerial Orthophotos
+Fadi Dornaika, Abdelmalik Moujahid, Alireza Bosaghzadeh, Youssef El Merabet, and Yassine Ruichek"
+26172460c2c47886f8b0e141c15de29c9766bfbe,An Iterative Co-Saliency Framework for RGBD Images,"IEEE TRANSACTIONS ON CYBERNETICS, VOL. XX, NO. XX, XXXX 2017
+An Iterative Co-Saliency Framework for RGBD
+Images
+Runmin Cong, Jianjun Lei, Senior Member, IEEE, Huazhu Fu, Weisi Lin, Fellow, IEEE,
+Qingming Huang, Senior Member, IEEE, Xiaochun Cao, Senior Member, IEEE, and Chunping Hou"
+2603efdc673e9c7cfa0c1e1dfda512b6ef54ea2c,On the Use of Simple Geometric Descriptors Provided by RGB-D Sensors for Re-Identification,"Sensors 2013, 13, 8222-8238; doi:10.3390/s130708222
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+On the Use of Simple Geometric Descriptors Provided by
+RGB-D Sensors for Re-Identification
+Javier Lorenzo-Navarro *, Modesto Castrill´on-Santana and Daniel Hern´andez-Sosa
+SIANI, Universidad de Las Palmas de Gran Canaria, Campus de Tafira,
+Las Palmas de Gran Canaria 35017, Spain; E-Mails: (M.C.-S.);
+(D.H.-S.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +34-928-458-747.
+Received: 25 March 2013; in revised form: 7 June 2013 / Accepted: 20 June 2013 /
+Published: 27 June 2013"
+2661f38aaa0ceb424c70a6258f7695c28b97238a,Multilayer Architectures for Facial Action Unit Recognition,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 4, AUGUST 2012
+Multilayer Architectures for Facial
+Action Unit Recognition
+Tingfan Wu, Nicholas J. Butko, Paul Ruvolo, Jacob Whitehill, Marian S. Bartlett, and Javier R. Movellan"
+2603d8578a6c95a9b9d4cb8a73bc66f18d523f37,Deep Parts Similarity Learning for Person Re-Identification,
+264a84f4d27cd4bca94270620907cffcb889075c,Deep motion features for visual tracking,"Deep Motion Features for Visual Tracking
+Susanna Gladh, Martin Danelljan, Fahad Shahbaz Khan, Michael Felsberg
+Computer Vision Laboratory, Department of Electrical Engineering, Link¨oping University, Sweden"
+2677a79b6381f3e7787c5dca884fa53d0b28dfe2,Supplementary Document : Single-Shot Multi-Person 3 D Pose Estimation From Monocular RGB 1,"Supplementary Document:
+Single-Shot Multi-Person 3D Pose
+Estimation From Monocular RGB
+. Read-out Process
+An algorithmic description of the read-out process
+is provided in Alg. 1.
+Algorithm 1 3D Pose Inference
+: Given: P 2D, C2D, M
+: for all i ∈ (1..m) do
+if C2D
+[k] > thresh, k ∈ {pelvis, neck} then
+Person i is detected
+for all joints j ∈ (1..n) do
+rloc = P2D
+Pi[:, j] = ReadLocMap(j, rloc)
+limbs
+{arml, armr, legl, legr, head} do
+{pelvis, neck}; j = parent(j) do
+j = getExtremity(l); j
+if isValidReadoutLoc(i, j) then"
+266b5b038750e1ab1311e38554e4c2c8ba6564fd,SLIC Superpixels Compared to State-of-the-Art Superpixel Methods,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, DECEMBER 2011
+SLIC Superpixels Compared to State-of-the-art
+Superpixel Methods
+Radhakrishna Achanta, Appu Shaji, Kevin Smith,
+Aurelien Lucchi, Pascal Fua, and Sabine S¨usstrunk"
+26a6b2051fe7970f94584e9efbfcf7bdcfd1d6d6,Diffeomorphic image registration with applications to deformation modelling between multiple data sets,"Diffeomorphic image registration
+with applications to deformation
+modelling between multiple data sets
+Bartłomiej Władysław Papież
+A thesis submitted in partial fulfilment
+for the requirements of the degree
+of Doctor of Philosophy
+The research presented in this thesis was carried out at the
+Applied Digital Signal and Image Processing Research Centre,
+School of Computing, Engineering and Physical Sciences,
+University of Central Lancashire,
+October 2012"
+26a32691321574ac1c90c58f47ec73fdfbc8507a,SATURN (Situational awareness tool for urban responder networks),"SATURN
+(Situational Awareness Tool for Urban Responder Networks)
+Heather Zwahlen
+Aaron Yahr
+Danielle Berven
+Michael T. Chan
+Maximilian Merfeld
+Christine Russ
+Jason Thornton
+MIT Lincoln Laboratory
+Lexington, MA
+{heatherz | ayahr | danielle.berven | mchan | max.merfeld
+| christine russ |"
+265644f1b6740ca34bfbe9762b90b33021adde62,Deep Learning in Medical Imaging: General Overview.,"Review Article | Experiment, Engineering, and Physics
+https://doi.org/10.3348/kjr.2017.18.4.570
+pISSN 1229-6929 · eISSN 2005-8330
+Korean J Radiol 2017;18(4):570-584
+Deep Learning in Medical Imaging: General Overview
+June-Goo Lee, PhD1, Sanghoon Jun, PhD2, 3, Young-Won Cho, MS2, 3, Hyunna Lee, PhD2, 3,
+Guk Bae Kim, PhD2, 3, Joon Beom Seo, MD, PhD2*, Namkug Kim, PhD2, 3*
+Biomedical Engineering Research Center, University of Ulsan College of Medicine, Asan Medical Center, Seoul 05505, Korea; 2Department of
+Radiology, Research Institute of Radiology, University of Ulsan College of Medicine, Asan Medical Center, Seoul 05505, Korea; 3Department of
+Convergence Medicine, Biomedical Engineering Research Center, University of Ulsan College of Medicine, Asan Medical Center, Seoul 05505, Korea
+The artificial neural network (ANN)–a machine learning technique inspired by the human neuronal synapse system–was
+introduced in the 1950s. However, the ANN was previously limited in its ability to solve actual problems, due to the vanishing
+gradient and overfitting problems with training of deep architecture, lack of computing power, and primarily the absence of
+sufficient data to train the computer system. Interest in this concept has lately resurfaced, due to the availability of big data,
+enhanced computing power with the current graphics processing units, and novel algorithms to train the deep neural network.
+Recent studies on this technology suggest its potentially to perform better than humans in some visual and auditory recognition
+tasks, which may portend its applications in medicine and healthcare, especially in medical imaging, in the foreseeable future.
+This review article offers perspectives on the history, development, and applications of deep learning technology, particularly
+regarding its applications in medical imaging.
+Keywords: Artificial intelligence; Machine learning; Convolutional neural network; Recurrent Neural Network; Computer-aided;"
+267bb08aa4eeefa1ef653716ca0ab572748a3a4e,Vision-Based Real-Time Aerial Object Localization and Tracking for UAV Sensing System,"Vision-based Real-Time Aerial Object Localization
+nd Tracking for UAV Sensing System
+Yuanwei Wu, Student Member,
+IEEE, Yao Sui, Member, IEEE, and Guanghui Wang, Member, IEEE"
+26a72e9dd444d2861298d9df9df9f7d147186bcd,Collecting and annotating the large continuous action dataset,"DOI 10.1007/s00138-016-0768-4
+ORIGINAL PAPER
+Collecting and annotating the large continuous action dataset
+Daniel Paul Barrett1 · Ran Xu2 · Haonan Yu1 · Jeffrey Mark Siskind1
+Received: 18 June 2015 / Revised: 18 April 2016 / Accepted: 22 April 2016 / Published online: 21 May 2016
+© The Author(s) 2016. This article is published with open access at Springerlink.com"
+269c1f9df4a36b361d32bfdc81457b0a32b60966,Dimensionality Reduction of Visual Features for Efficient Retrieval and Classification,"SIP (2016), vol. 5, e14, page 1 of 14 © The Authors, 2016.
+This is an Open Access article, distributed under the terms of the Creative Commons Attribution licence (http://creativecommons.org/licenses/by/4.0/), which permits unre-
+stricted re-use, distribution, and reproduction in any medium, provided the original work is properly cited.
+doi:10.1017/ATSIP.2016.14
+industrial technology advances
+Dimensionality reduction of visual features
+for efficient retrieval and classification
+petros t. boufounos1, hassan mansour1, shantanu rane2 and anthony vetro1
+Visual retrieval and classification are of growing importance for a number of applications, including surveillance, automotive,
+s well as web and mobile search. To facilitate these processes, features are often computed from images to extract discriminative
+spects of the scene, such as structure, texture or color information. Ideally, these features would be robust to changes in per-
+spective, illumination, and other transformations. This paper examines two approaches that employ dimensionality reduction
+for fast and accurate matching of visual features while also being bandwidth-efficient, scalable, and parallelizable. We focus on
+two classes of techniques to illustrate the benefits of dimensionality reduction in the context of various industrial applications.
+The first method is referred to as quantized embeddings, which generates a distance-preserving feature vector with low rate. The
+second method is a low-rank matrix factorization applied to a sequence of visual features, which exploits the temporal redun-
+dancy among feature vectors associated with each frame in a video. Both methods discussed in this paper are also universal in
+that they do not require prior assumptions about the statistical properties of the signals in the database or the query. Further-
+more, they enable the system designer to navigate a rate versus performance trade-off similar to the rate-distortion trade-off in
+onventional compression."
+26861e41e5b44774a2801e1cd76fd56126bbe257,Personalized Tour Recommendation Based on User Interests and Points of Interest Visit Durations,"Personalized Tour Recommendation based on User Interests and Points of Interest
+Visit Durations
+Kwan Hui Lim*†, Jeffrey Chan*, Christopher Leckie*† and Shanika Karunasekera*
+*Department of Computing and Information Systems, The University of Melbourne, Australia
+Victoria Research Laboratory, National ICT Australia, Australia"
+266766818dbc5a4ca1161ae2bc14c9e269ddc490,Boosting a Low-Cost Smart Home Environment with Usage and Access Control Rules,"Article
+Boosting a Low-Cost Smart Home Environment with
+Usage and Access Control Rules
+Paolo Barsocchi * ID , Antonello Calabrò, Erina Ferro, Claudio Gennaro ID and Eda Marchetti and
+Claudio Vairo
+Institute of Information Science and Technologies of CNR (CNR-ISTI)-Italy, 56124 Pisa, Italy;
+(A.C.); (E.F.); (C.G.);
+(E.M.); (C.V.)
+* Correspondence: Tel.: +39-050-315-2965
+Received: 27 April 2018; Accepted: 31 May 2018; Published: 8 June 2018"
+2606e6a5759c030e259ebf3f4261b9c04a36a609,Generating Semantically Precise Scene Graphs from Textual Descriptions for Improved Image Retrieval,"Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 70–80,
+Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics."
+265af79627a3d7ccf64e9fe51c10e5268fee2aae,A Mixture of Transformed Hidden Markov Models for Elastic Motion Estimation,"A Mixture of Transformed Hidden Markov
+Models for Elastic Motion Estimation
+Huijun Di, Linmi Tao, and Guangyou Xu, Senior Member, IEEE"
+267595dd40cd109c93e67874a1cf49ce79871f3a,A Compromise Principle in Deep Monocular Depth Estimation,"A Compromise Principle in Deep Monocular Depth
+Estimation
+Huan Fu, Mingming Gong, Chaohui Wang, and Dacheng Tao, Fellow, IEEE"
+26c89f890da91119ffa16d5a23fba963257ef3fc,Tattoo Image Search at Scale: Joint Detection and Compact Representation Learning,"Tattoo Image Search at Scale: Joint Detection
+nd Compact Representation Learning
+Hu Han, Member, IEEE, Jie Li, Anil K. Jain, Fellow, IEEE,
+Shiguang Shan, Senior Member, IEEE and Xilin Chen, Fellow, IEEE"
+26af867977f90342c9648ccf7e30f94470d40a73,Joint Gender and Face Recognition System for RGB-D Images with Texture and DCT Features,"IJIRST –International Journal for Innovative Research in Science & Technology| Volume 3 | Issue 04 | September 2016
+ISSN (online): 2349-6010
+Joint Gender and Face Recognition System for
+RGB-D Images with Texture and DCT Features
+Jesny Antony
+PG Student
+Department of Computer Science & Information Systems
+Federal Institute of Science and Technology, Mookkannoor
+PO, Angamaly, Ernakulam, Kerala 683577, India
+Prasad J. C.
+Associate Professor
+Department of Computer Science & Engineering
+Federal Institute of Science and Technology, Mookkannoor
+PO, Angamaly, Ernakulam, Kerala 683577, India"
+2663fa2f1777dc779a73d678c7919cce37b5fb61,Relevance - Weighted ( 2 D ) 2 LDA Image Projection Technique for Face Recognition,"Relevance-Weighted (2D)2LDA
+Image Projection Technique for Face Recognition
+In this paper, a novel image projection technique for
+face recognition application is proposed which is based on
+linear discriminant analysis (LDA) combined with the
+relevance-weighted (RW) method. The projection is
+performed through 2-directional and 2-dimensional LDA,
+or (2D)2LDA, which simultaneously works in row and
+olumn directions to solve the small sample size problem.
+Moreover, a weighted discriminant hyperplane is used in
+the between-class scatter matrix, and an RW method is
+used in the within-class scatter matrix to weigh the
+information to resolve confusable data in these classes.
+This technique is called the relevance-weighted (2D)2LDA,
+or RW(2D)2LDA, which is used for a more accurate
+discriminant decision than that produced by the
+onventional LDA or 2DLDA. The proposed technique
+has been successfully tested on four face databases.
+Experimental results
+the proposed"
+26c884829897b3035702800937d4d15fef7010e4,Facial Expression Recognition by Supervised Independent Component Analysis Using MAP Estimation,"IEICE TRANS. INF. & SYST., VOL.Exx–??, NO.xx XXXX 200x
+PAPER
+Facial Expression Recognition by Supervised Independent
+Component Analysis using MAP Estimation
+Fan CHEN
+, Nonmember and Kazunori KOTANI
+, Member
+SUMMARY Permutation ambiguity of the classical Inde-
+pendent Component Analysis (ICA) may cause problems in fea-
+ture extraction for pattern classification. Especially when only a
+small subset of components is derived from data, these compo-
+nents may not be most distinctive for classification, because ICA
+is an unsupervised method. We include a selective prior for de-
+mixing coef‌f‌icients into the classical ICA to alleviate the problem.
+Since the prior is constructed upon the classification information
+from the training data, we refer to the proposed ICA model with
+selective prior as a supervised ICA (sICA). We formulated the
+learning rule for sICA by taking a Maximum a Posteriori (MAP)
+scheme and further derived a fixed point algorithm for learning
+the de-mixing matrix. We investigate the performance of sICA"
+26cdb9b6d94c1d6c6a01792fee3c176585f594ac,Hybrid Person Detection and Tracking in H.264/AVC Video Streams,"Hybrid Person Detection and Tracking in H.264/AVC Video Streams
+Philipp Wojaczek1, Marcus Laumer1,2, Peter Amon2, Andreas Hutter2 and André Kaup1
+Multimedia Communications and Signal Processing,
+Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU), Erlangen, Germany
+Imaging and Computer Vision, Siemens Corporate Technology, Munich, Germany
+Keywords:
+Object Detection, Person Detection, Tracking, Compressed Domain, Pixel Domain, H.264/AVC, Mac-
+roblocks, Compression, Color Histogram, Hue, HSV, Segmentation."
+26ad6ceb07a1dc265d405e47a36570cb69b2ace6,Neural Correlates of Cross-Cultural Adaptation,"RESEARCH AND EXPLOR ATORY
+DEVELOPMENT DEPARTMENT
+REDD-2015-384
+Neural Correlates of Cross-Cultural
+How to Improve the Training and Selection for
+Military Personnel Involved in Cross-Cultural
+Operating Under Grant #N00014-12-1-0629/113056
+Adaptation
+September, 2015
+Interactions
+Jonathon Kopecky
+Jason Spitaletta
+Mike Wolmetz
+Alice Jackson
+Prepared for:
+Office of Naval Research"
+26ad124271c118e207113ae42f0fd3d30f204ea1,State of the Art Report on Video-Based Graphics and Video Visualization,"General Copyright Notice
+The documents distributed by this server have been provided by the contributing authors as a means to ensure timely
+dissemination of scholarly and technical work on a noncommercial basis. Copyright and all rights therein are maintained by the
+uthors or by other copyright holders, notwithstanding that they have offered their works here electronically. It is understood that
+ll persons copying this information will adhere to the terms and constraints invoked by each author's copyright. These works
+may not be reposted without the explicit permission of the copyright holder.
+R. Borgo, M. Chen, B. Daubney, E. Grundy, G. Heidemann, B. Höferlin, M. Höferlin, H. Leitte, D.
+Weiskopf, X. Xie:
+State of the Art Report on Video-Based Graphics and Video Visualization,
+Computer Graphics Forum, Vol. 31, No. 8, 2450-2477, 2012.
+DOI: 10.1111/j.1467-8659.2012.03158.x
+This is the author’s personal copy of the final, accepted version of the paper, which slightly differs from
+the version published in Computer Graphics Form.
+Copyright © 2012 The Eurographics Association and Blackwell Publishing Ltd.
+Preprint"
+260081528f19f6f7e8e5ae16a776b62ad8c2ed0d,An Agent Based WCET Analysis for Top-View Person Re-Identification,"An agent-based WCET analysis for Top-View
+Person Re-Identification
+Marina Paolanti, Valerio Placidi,
+Michele Bernardini, Andrea Felicetti, Rocco Pietrini, and
+Emanuele Frontoni
+Department of Information Engineering, Universit`a Politecnica delle Marche,
+Via Brecce Bianche 12, 60131, Ancona, Italy"
+26f5b8a79fac681ffb132c4863c51a55bc2b20e2,Visual speech synthesis from 3D mesh sequences driven by combined speech features,"VISUAL SPEECH SYNTHESIS FROM 3D MESH SEQUENCES DRIVEN BY COMBINED
+SPEECH FEATURES
+Felix Kuhnke and J¨orn Ostermann
+Institut f¨ur Informationsverarbeitung, Leibniz Universit¨at Hannover, Germany"
+26437fb289cd7caeb3834361f0cc933a02267766,Innovative Assessment Technologies: Comparing ‘Face-to-Face’ and Game-Based Development of Thinking Skills in Classroom Settings,"012 International Conference on Management and Education Innovation
+IPEDR vol.37 (2012) © (2012) IACSIT Press, Singapore
+Innovative Assessment Technologies: Comparing ‘Face-to-Face’ and
+Game-Based Development of Thinking Skills in Classroom Settings
+Gyöngyvér Molnár 1 + and András Lőrincz 2
+University of Szeged, 2 Eötvös Loránd University"
+2690264001ccd4b682b7b4c0334c80af6f5e9c9c,Sensor Transfer: Learning Optimal Sensor Effect Image Augmentation for Sim-to-Real Domain Adaptation,"Sensor Transfer: Learning Optimal Sensor Effect Image Augmentation
+for Sim-to-Real Domain Adaptation
+Alexandra Carlson1, Katherine A. Skinner1, Ram Vasudevan2 and Matthew Johnson-Roberson3"
+26e570049aaedcfa420fc8c7b761bc70a195657c,Hybrid Facial Regions Extraction for Micro-expression Recognition System,"J Sign Process Syst
+DOI 10.1007/s11265-017-1276-0
+Hybrid Facial Regions Extraction for Micro-expression
+Recognition System
+Sze-Teng Liong1,2,3 · John See4 · Raphael C.-W. Phan2 · KokSheik Wong5 ·
+Su-Wei Tan2
+Received: 2 February 2016 / Revised: 20 October 2016 / Accepted: 10 August 2017
+© Springer Science+Business Media, LLC 2017"
+264dcfb5be3f89dc0950472a2a274ef7b641b1af,Dynamic Objects Segmentation for Visual Localization in Urban Environments,"Dynamic Objects Segmentation for Visual
+Localization in Urban Environments
+G. Zhou1, B. Bescos2, M. Dymczyk1, M. Pfeiffer1, J. Neira2, R. Siegwart1"
+21b0b2f5df87318912d58d3b843da363a4fb91c3,"Distributed and Higher-Order Graphical Models: towards Segmentation, Tracking, Matching and 3D Model Inference Defended by","ECOLECENTRALEPARISPHDTHESIStoobtainthetitleofDoctorofEcoleCentraleParisSpecialty:APPLIEDMATHEMATICSDistributedandHigher-OrderGraphicalModels:towardsSegmentation,Tracking,Matchingand3DModelInferenceDefendedbyChaohuiWANGpreparedatEcoleCentraleParis,MASlaboratorydefendedonSeptember29,2011JURYChairman:Prof.HenriMAITRE-TélécomParisTechReviewers:Prof.MichaelJ.BLACK-MaxPlanckInstituteforIntelligentSystemsProf.PhilipH.S.TORR-OxfordBrookesUniversityAdvisor:Prof.NikosPARAGIOS-EcoleCentraleParisExaminers:Prof.PatrickBOUTHEMY-INRIA-RennesProf.VladimirKOLMOGOROV-InstituteofScienceandTechnologyAustriaProf.DimitrisSAMARAS-StonyBrookUniversity"
+21ef129c063bad970b309a24a6a18cbcdfb3aff5,Individual and Inter-related Action Unit Detection in Videos for Affect Recognition,"POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Dr J.-M. Vesin, président du juryProf. J.-Ph. Thiran, Prof. D. Sander, directeurs de thèseProf. M. F. Valstar, rapporteurProf. H. K. Ekenel, rapporteurDr S. Marcel, rapporteurIndividual and Inter-related Action Unit Detection in Videos for Affect RecognitionTHÈSE NO 6837 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 19 FÉVRIER 2016À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 5PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2016PARAnıl YÜCE"
+218b2c5c9d011eb4432be4728b54e39f366354c1,Enhancing Training Collections for Image Annotation: An Instance-Weighted Mixture Modeling Approach,"Enhancing Training Collections for Image
+Annotation: An Instance-Weighted Mixture
+Modeling Approach
+Neela Sawant, Student Member, IEEE, James Z. Wang, Senior Member, IEEE, Jia Li, Senior Member, IEEE."
+21967faefa55857c6a09f9fe52a10a394757d59c,Emotion Recognition Ability Test Using JACFEE Photos: A Validity/Reliability Study of a War Veterans' Sample and Their Offspring,"RESEARCH ARTICLE
+Emotion Recognition Ability Test Using
+JACFEE Photos: A Validity/Reliability Study of
+War Veterans' Sample and Their Offspring
+Ivone Castro-Vale1,5*, Milton Severo2,3, Davide Carvalho4,5, Rui Mota-Cardoso1
+Medical Psychology Unit, Department of Clinical Neurosciences and Mental Health, Faculty of Medicine,
+University of Porto, Porto, Portugal, 2 Department of Clinical Epidemiology, Predictive Medicine and Public
+Health, Faculty of Medicine, University of Porto, Porto, Portugal, 3 Department of Medical Education and
+Simulation, Faculty of Medicine, University of Porto, Porto, Portugal, 4 Department of Endocrinology,
+Diabetes and Metabolism, Centro Hospitalar Sāo Joāo, Faculty of Medicine, University of Porto, Porto,
+Portugal, 5 Instituto de Investigação e Inovação em Saúde, Universidade do Porto, Porto, Portugal
+11111"
+21262e01039e5994114b4c102fc80e9afa3f1bde,Pedestrian Detection and Tracking in Thermal Images from Aerial MPEG Videos,
+21679eb7e953bd132803703c27dcd56484d497e6,"utism , oxytocin and interoception","Neuroscience and Biobehavioral Reviews 47 (2014) 410–430
+Contents lists available at ScienceDirect
+Neuroscience
+Biobehavioral
+Reviews
+j o u r n a l h o m e p a g e : w w w . e l s e v i e r . c o m / l o c a t e / n e u b i o r e v
+Review
+Autism, oxytocin and interoception
+E. Quattrocki∗, Karl Friston 1
+The Wellcome Trust Centre for Neuroimaging, UCL, 12 Queen Square, London WC1N 3BG, UK
+Article history:
+Received 5 February 2014
+Received in revised form 23 July 2014
+Accepted 20 September 2014
+Available online 30 September 2014
+Keywords:
+Autism
+Oxytocin
+Interoception
+Bayesian predictive coding"
+2162654cb02bcd10794ae7e7d610c011ce0fb51b,Joint gaze-correction and beautification of DIBR-synthesized human face via dual sparse coding,"978-1-4799-5751-4/14/$31.00 ©2014 IEEE
+http://www.skype.com/
+http://www.google.com/hangouts/
+tification, sparse coding"
+21f3c5b173503185c1e02a3eb4e76e13d7e9c5bc,Rotation Invariant Real-time Face Detection and Recognition System,"m a s s a c h u s e t t s i n s t i t u t e o f
+t e c h n o l o g y — a r t i f i c i a l i n t e l l i g e n c e l a b o r a t o r y
+Rotation Invariant Real-time
+Face Detection and
+Recognition System
+Purdy Ho
+AI Memo 2001-010
+CBCL Memo 197
+May 31, 2001
+© 2 0 0 1 m a s s a c h u s e t t s i n s t i t u t e o f
+t e c h n o l o g y, c a m b r i d g e , m a 0 2 1 3 9 u s a — w w w. a i . m i t . e d u"
+214db8a5872f7be48cdb8876e0233efecdcb6061,Semantic-Aware Co-Indexing for Image Retrieval,"Semantic-aware Co-indexing for Image Retrieval
+Shiliang Zhang2, Ming Yang1, Xiaoyu Wang1, Yuanqing Lin1, Qi Tian2
+NEC Laboratories America, Inc.
+Dept. of CS, Univ. of Texas at San Antonio
+Cupertino, CA 95014
+San Antonio, TX 78249"
+219b7b157f2a559ecdffe21c2a0edf5285931298,Deep hashing for compact binary codes learning,"Deep Hashing for Compact Binary Codes Learning
+Venice Erin Liong1, Jiwen Lu1, Gang Wang1,2, Pierre Moulin1,3, and Jie Zhou4
+ADSC, Singapore, 2NTU, Singapore, 3UIUC, USA, 4Tsinghua University, China
+Large scale visual search has attracted great attention in computer vision
+due to its wide potential applications [1]. Hashing is a powerful technique
+for large-scale visual search and a variety of hashing-based methods have
+een proposed in the literature [3, 4, 7]. The basic idea of hashing-based
+pproach is to construct a series of hash functions to map each visual object
+into a binary feature vector so that visually similar samples are mapped into
+similar binary codes.
+In this paper, we propose a new deep hashing (DH) method to learn
+ompact binary codes for large scale visual search. Figure 1 illustrates the
+asic idea of the proposed approach. Different from most existing binary
+odes learning methods which usually seek a single linear projection to map
+each sample into a binary vector [2, 5, 6], we develop a deep neural network
+to seek multiple hierarchical non-linear transformations to learn these bina-
+ry codes. For a given sample xn, we obtain a binary vector bn by passing
+it to a network which contains multiple stacked layers of nonlinear trans-
+formations. Assume we have M + 1 layers, the output for the mth layer is:
+n = s(Wmhm−1"
+2129304075990cd2f3317ea67a2acf52b7d7a3e2,Face Recognition and Detection through Similarity Measurements,"International Journal of Computer Applications (0975 – 8887)
+Volume 174 – No.3, September 2017
+Face Recognition and Detection through Similarity
+Measurements
+Irfan Bashir
+M.Tech( CSE) Schoral
+SMVDU, Kakryal Katra, Jummu"
+21e82350472bf6a12af0f761b8dea91cb16bf42f,Cost-Sensitive Convolution based Neural Networks for Imbalanced Time-Series Classification,"Cost-Sensitive Convolution based Neural
+Networks for Imbalanced Time-Series
+Classification
+Yue Geng* and Xinyu Luo
+Mechanical and Electrical Engineering Institute of CUMTB, Beijing, 100083, China
+E-mail:"
+214ac8196d8061981bef271b37a279526aab5024,Face Recognition Using Smoothed High-Dimensional Representation,"Face Recognition Using Smoothed High-Dimensional
+Representation
+Juha Ylioinas, Juho Kannala, Abdenour Hadid, and Matti Pietik¨ainen
+Center for Machine Vision Research, PO Box 4500,
+FI-90014 University of Oulu, Finland"
+218595e1979007ccd6b1bc5a30a3484841c0eafa,Discovering Beautiful Attributes for Aesthetic Image Analysis,"Noname manuscript No.
+(will be inserted by the editor)
+Discovering beautiful attributes for aesthetic image analysis
+Luca Marchesotti · Naila Murray · Florent Perronnin
+Received: date / Accepted: date"
+21913787b7ed62773926a287b60308d1960e6966,LR-CNN for fine-grained classification with varying resolution,"LR-CNN FOR FINE-GRAINED CLASSIFICATION WITH VARYING RESOLUTION
+M. Chevalier(1,2), N. Thome(1), M. Cord(1), J. Fournier(2), G. Henaff(2), E. Dusch(2)
+(1) Sorbonne Universit´es, UPMC Univ Paris 06, LIP6, 4 place Jussieu 75005 Paris, France
+(2) Thales Optronique S.A.S., 2 avenue Gay-Lussac, 78990 Elancourt, France"
+218603147709344d4ff66625d83603deee2854bf,Learning Deep Embeddings with Histogram Loss,"Learning Deep Embeddings with Histogram Loss
+Evgeniya Ustinova and Victor Lempitsky
+Skolkovo Institute of Science and Technology (Skoltech)
+Moscow, Russia"
+213a579af9e4f57f071b884aa872651372b661fd,Automatic and Efficient Human Pose Estimation for Sign Language Videos,"Int J Comput Vis
+DOI 10.1007/s11263-013-0672-6
+Automatic and Efficient Human Pose Estimation for Sign
+Language Videos
+James Charles · Tomas Pfister · Mark Everingham ·
+Andrew Zisserman
+Received: 4 February 2013 / Accepted: 29 October 2013
+© Springer Science+Business Media New York 2013"
+2155739f578e33449546f45a0b4cf64dbd614025,what is facereader ?,"FaceReader
+Methodology Note
+what is facereader?
+FaceReader™ is a program for facial analysis. It can detect
+facial expressions. FaceReader has been trained to classify
+expressions in one of the following categories: happy,
+sad, angry, surprised, scared, disgusted, and neutral. These
+emotional categories have been described by Ekman [1]
+s the basic or universal emotions. In addition to these
+asic emotions, contempt can be classified as expression,
+just like the other emotions [2]. Obviously, facial expres-
+sions vary in intensity and are often a mixture of emo-
+tions. In addition, there is quite a lot of interpersonal
+variation.
+Figure 1. Analyzing facial expressions with FaceReader.
+FaceReader has been trained to classify the expressions
+mentioned above. It is not possible to add expressions to
+the software yourself. Please contact Noldus Information
+Technology if you are interested in the classification of
+other expressions."
+21626caa46cbf2ae9e43dbc0c8e789b3dbb420f1,Transductive VIS-NIR face matching,"978-1-4673-2533-2/12/$26.00 ©2012 IEEE
+ICIP 2012"
+2118b1ce0c2551e75d30fb6ba24482e50b319a90,Ensemble Projection for Semi-supervised Image Classification,"Ensemble Projection for Semi-supervised Image Classification
+Dengxin Dai
+Computer Vision Lab, ETH Zurich
+Luc Van Gool
+Computer Vision Lab, ETH Zurich"
+216c61796c6ead27b1042046e1d95a2038624d26,Vehicle Re-identification Using Quadruple Directional Deep Learning Features,"Vehicle Re-identification Using Quadruple
+Directional Deep Learning Features
+Jianqing Zhu, Huanqiang Zeng, Jingchang Huang, Shengcai Liao, Zhen Lei, Canhui Cai and LiXin Zheng"
+21241d07840e3cc30feda59642571a9b459c817b,Biometrics via Oculomotor Plant Characteristics: Impact of Parameters in Oculomotor Plant Model,"This is a pre-print. Final version of the paper will be available at ACM digital library.
+Biometrics via Oculomotor Plant Characteristics:
+Impact of Parameters in Oculomotor Plant Model
+OLEG KOMOGORTSEV, COREY HOLLAND, ALEX KARPOV, AND LARRY R. PRICE Texas State University
+This paper proposes and evaluates a novel biometric approach utilizing the internal, non-visible, anatomical structure of the human eye. The
+proposed method estimates the anatomical properties of the human oculomotor plant from the measurable properties of human eye movements,
+utilizing a two-dimensional linear homeomorphic model of the oculomotor plant. The derived properties are evaluated within a biometric
+framework to determine their efficacy in both verification and identification scenarios. The results suggest that the physical properties derived from
+the oculomotor plant model are capable of achieving 20.3% equal error rate and 65.7% rank-1 identification rate on high-resolution equipment
+involving 32 subjects, with biometric samples taken over four recording sessions; or 22.2% equal error rate and 12.6% rank-1 identification rate on
+low-resolution equipment involving 172 subjects, with biometric samples taken over two recording sessions.
+Categories and Subject Descriptors: I.2.10 [Artificial Intelligence]: Vision and Scene Understanding—Modeling and recovery of physical
+ttributes; I.5.1 [Pattern Recognition]: Models—Structural; I.6.4 [Simulation and Modeling]: Model Validation and Analysis
+General Terms: Biometrics
+Additional Key Words and Phrases: Human oculomotor system, biological system modeling, mathematical model, security and protection.
+ACM Reference Format:
+Komogortsev, O., Holland, C., Karpov, A., and Price, L. R. 2014. Oculomotor Plant Characteristics: Biometric Performance Evaluation. ACM
+Trans. Appl. Percept. 2, 3, Article 1 (May 2014), 13 pages.
+DOI:http://dx.doi.org/10.1145/0000000.0000000
+INTRODUCTION"
+21b16df93f0fab4864816f35ccb3207778a51952,Recognition of Static Gestures Applied to Brazilian Sign Language (Libras),"Recognition of Static Gestures applied to Brazilian Sign Language (Libras)
+Igor L. O. Bastos
+Math Institute
+Michele F. Angelo, Angelo C. Loula
+Department of Technology, Department of Exact Sciences
+Federal University of Bahia (UFBA),
+State University of Feira de Santana (UEFS)
+Salvador, Brazil
+Feira de Santana, Brazil"
+2170636d5d31eb461618b5da10f4473c67e74e73,Person Re-identification by Multi-Channel Parts-Based CNN with Improved Triplet Loss Function,"Person Re-Identification by Multi-Channel Parts-Based CNN with Improved
+Triplet Loss Function
+De Cheng, Yihong Gong, Sanping Zhou, Jinjun Wang, Nanning Zheng
+Institute of Artificial Intelligence and Robotics
+Xi’an Jiaotong University,Xi’an, Shaanxi, P.R. China"
+21ff1d20dd7b3e6b1ea02036c0176d200ec5626d,Loss Max-Pooling for Semantic Image Segmentation,"Loss Max-Pooling for Semantic Image Segmentation
+Samuel Rota Bul`o(cid:63),†
+Gerhard Neuhold†
+Peter Kontschieder†
+Mapillary - Graz, Austria -
+(cid:63)FBK - Trento, Italy -"
+2168ec12eff5c3d1ff09d0f3c13d6df5b5061164,Face recognition with salient local gradient orientation binary patterns,"978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+ICIP 2009"
+21ac5d1c34675bf6056d2670f9fa3dde530b1716,ALB at SemEval-2018 Task 10: A System for Capturing Discriminative Attributes,"Proceedings of the 12th International Workshop on Semantic Evaluation (SemEval-2018), pages 963–967
+New Orleans, Louisiana, June 5–6, 2018. ©2018 Association for Computational Linguistics"
+21a1654b856cf0c64e60e58258669b374cb05539,"You Only Look Once: Unified, Real-Time Object Detection","You Only Look Once:
+Unified, Real-Time Object Detection
+Joseph Redmon∗, Santosh Divvala∗†, Ross Girshick¶, Ali Farhadi∗†
+University of Washington∗, Allen Institute for AI†, Facebook AI Research¶
+http://pjreddie.com/yolo/"
+4dd2744a37bd1e666346a41dcd2a271945c74e2f,Human-Robot Teaming : Approaches from Joint Action and Dynamical Systems,"Human Robot Teaming: Approaches from Joint
+Action and Dynamical Systems
+Tariq Iqbal and Laurel D. Riek"
+4d510bca00b625f86606cb0096299b993090534a,Small Sample Learning in Big Data Era,"Small Sample Learning in Big Data Era
+Jun Shu
+Zongben Xu
+Deyu Meng
+School of Mathematics and Statistics
+Ministry of Education Key Lab of Intelligent Networks and Network Security
+Xi’an Jiaotong University, Xian, China"
+4dade6faf6d5d6db53d5bcb2e107311da1ad48ac,Facial Expression Biometrics Using Statistical Shape Models,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 261542, 17 pages
+doi:10.1155/2009/261542
+Research Article
+Facial Expression Biometrics Using Statistical Shape Models
+Wei Quan, Bogdan J. Matuszewski (EURASIP Member), Lik-Kwan Shark,
+nd Djamel Ait-Boudaoud
+Applied Digital Signal and Image Processing Research Centre, University of Central Lancashire, Preston PR1 2HE, UK
+Correspondence should be addressed to Bogdan J. Matuszewski,
+Received 30 September 2008; Revised 2 April 2009; Accepted 18 August 2009
+Recommended by Jonathon Phillips
+This paper describes a novel method for representing different facial expressions based on the shape space vector (SSV) of the
+statistical shape model (SSM) built from 3D facial data. The method relies only on the 3D shape, with texture information not
+eing used in any part of the algorithm, that makes it inherently invariant to changes in the background, illumination, and to
+some extent viewing angle variations. To evaluate the proposed method, two comprehensive 3D facial data sets have been used
+for the testing. The experimental results show that the SSV not only controls the shape variations but also captures the expressive
+haracteristic of the faces and can be used as a significant feature for facial expression recognition. Finally the paper suggests
+improvements of the SSV discriminatory characteristics by using 3D facial sequences rather than 3D stills.
+Copyright © 2009 Wei Quan et al. This is an open access article distributed under the Creative Commons Attribution License,"
+4d49c6cff198cccb21f4fa35fd75cbe99cfcbf27,Topological principal component analysis for face encoding and recognition,"Topological Principal Component Analysis for
+face encoding and recognition
+Albert Pujol , Jordi Vitri(cid:18)a, Felipe Lumbreras,
+Juan J. Villanueva
+Computer Vision Center and Departament d’Inform(cid:18)atica, Edi(cid:12)ci O, Universitat
+Aut(cid:18)onoma de Barcelona
+4da735d2ed0deeb0cae4a9d4394449275e316df2,"The rhythms of head, eyes and hands at intersections","Gothenburg, Sweden, June 19-22, 2016
+978-1-5090-1820-8/16/$31.00 ©2016 IEEE"
+4db64fbc3dd2486a74dba3350d44c51e561f515f,An Ecological Visual Exploration Tool to Support the Analysis of Visual Processing Pathways in Children with Autism Spectrum Disorders,"Article
+An Ecological Visual Exploration Tool to Support the
+Analysis of Visual Processing Pathways in Children
+with Autism Spectrum Disorders
+Dario Cazzato 1, Marco Leo 2,*, Cosimo Distante 2, Giulia Crifaci 3,
+Giuseppe Massimo Bernava 4, Liliana Ruta 4, Giovanni Pioggia 4 and Silvia M. Castro 5
+Interdisciplinary Centre for Security Reliability and Trust (SnT), University of Luxembourg, 29,
+Avenue JF Kennedy, L-1855 Luxembourg, Luxembourg;
+Institute of Applied Sciences and Intelligence Systems—CNR, 73100 Lecce, Italy;
+Department of Clinical Physiology, CNR Pisa, 56124 Pisa, Italy;
+Institute of Applied Sciences and Intelligence Systems—CNR, 98164 Messina, Italy;
+(G.M.B.); (L.R.); (G.P.)
+5 Universidad Nacional del Sur, 8000 Bahía Blanca, Argentina;
+* Correspondence:
+Received: 6 November 2017; Accepted: 19 December 2017; Published: 29 December 2017"
+4dc6659b5022ecc2c4e1459e9dff16ddece4147e,Transfer Learning for Illustration Classification,"CEIG - Spanish Computer Graphics Conference (2017)
+F. J. Melero and N. Pelechano (Editors)
+Transfer Learning for Illustration Classification
+Manuel Lagunas1 Elena Garces2
+Universidad de Zaragoza, I3A
+Technicolor
+Figure 1: Comparison of the probabilities of the images that belong to the class pelican using our method and the network VGG19 [SZ14].
+image (a) is a photograph and image (b) is an illustration which has similar colours, gradients and edges than the natural image. On the"
+4d1fc3245b05731a313e61165c1109f42f5b4a0c,Facial expression recognition using local binary patterns and discriminant kernel locally linear embedding,"Zhao and Zhang EURASIP Journal on Advances in Signal Processing 2012, 2012:20
+http://asp.eurasipjournals.com/content/2012/1/20
+RESEARCH
+Facial expression recognition using local binary
+patterns and discriminant kernel locally linear
+embedding
+Xiaoming Zhao1 and Shiqing Zhang2*
+Open Access"
+4d4b1aa87af8bfd65ac7bc250bba5951aed40986,A Survey on Model Based Approaches for 2D and 3D Visual Human Pose Recovery,"Sensors 2014, 14, 4189-4210; doi:10.3390/s140304189
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Review
+A Survey on Model Based Approaches for 2D and 3D Visual
+Human Pose Recovery
+Xavier Perez-Sala 1
+,*, Sergio Escalera 2, Cecilio Angulo 3 and Jordi Gonz`alez 4
+Fundaci´o Privada Sant Antoni Abat, Vilanova i la Geltr´u, Universitat Polit`ecnica de Catalunya,
+Vilanova i la Geltr´u 08800, Catalonia, Spain
+Department Mathematics (MAIA), Universitat de Barcelona and Computer Vision Center (CVC),
+Barcelona 08007, Catalonia, Spain; E-Mail:
+Automatic Control Department (ESAII), Universitat Polit`ecnica de Catalunya,
+Vilanova i la Geltr´u 08800, Catalonia, Spain; E-Mail:
+Department Computer Science, Universitat Aut`onoma de Barcelona and Computer Vision Center
+(CVC), Bellaterra 08193, Catalonia, Spain; E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Received: 29 November 2013; in revised form: 30 January 2014 / Accepted: 9 February 2014 /"
+4de83b6025526ef7a340ffca30626dac53d7f8cb,SIFT/LBP 3D face recognition,"SIFT/LBP 3D face recognition
+Narimen SAAD1 NourEddine DJEDI
+Department of Computer Science
+LESIA Laboratory
+University of Biskra, Algeria"
+4d530a4629671939d9ded1f294b0183b56a513ef,Facial Expression Classification Method Based on Pseudo Zernike Moment and Radial Basis Function Network,"International Journal of Machine Learning and Computing, Vol. 2, No. 4, August 2012
+Facial Expression Classification Method Based on Pseudo
+Zernike Moment and Radial Basis Function Network
+Tran Binh Long, Le Hoang Thai, and Tran Hanh"
+4d87784afdb704d9eca14010212afd5cd74c60ec,Cosine Similarity Search with Multi Index Hashing,"Cosine Similarity Search
+with Multi-Index Hashing
+Sepehr Eghbali and Ladan Tahvildari"
+4dd72cdafead8a98dbc77a1a74bd66ffb90d3e01,Virtual and Real World Adaptation for Pedestrian Detection,"Virtual and Real World Adaptation for
+Pedestrian Detection
+David V ´azquez, Antonio M. L ´opez, Member, IEEE, Javier Mar´ın, Daniel Ponsa, David Ger ´onimo"
+4d8347a69e77cc02c1e1aba3a8b6646eac1a0b3d,Re-ID done right: towards good practices for person re-identification,"Re-ID done right: towards good practices for person re-identification
+Jon Almaz´an1 Bojana Gaji´c2∗ Naila Murray1 Diane Larlus1
+Computer Vision Group
+NAVER LABS Europe
+Computer Vision Center
+Dept. de Ci`encies de la Computaci´o, UAB"
+4d2022e3db712237b95fe381a75dbeb827551924,Running Head : GENDER CATEGORIZATION IN INFANTS AND CHILDREN 1 Gender Categorization in Infants and Children,"Running Head: GENDER CATEGORIZATION IN INFANTS AND CHILDREN
+Gender Categorization in Infants and Children
+Hong N. T. Bui
+Senior Thesis in Psychology
+Advisor: Karen Wynn
+April 27, 2018"
+4d2975445007405f8cdcd74b7fd1dd547066f9b8,Image and Video Processing for Affective Applications,"Image and Video Processing
+for Affective Applications
+Maja Pantic and George Caridakis"
+4d45612c41d3e27a30a5ec64e0d8e2362dcb6b73,Brand > Logo: Visual Analysis of Fashion Brands,"Brand > Logo: Visual Analysis of Fashion
+Brands
+M. Hadi Kiapour and Robinson Piramuthu
+eBay, San Francisco CA 94105, USA"
+4ddd55a9f103001da8dc24d123d9223dbb67f884,Combining Face and Facial Feature Detectors for Face Detection Performance Improvement,"Combining face and facial feature detectors for
+face detection performance improvement
+M. Castrill´on-Santana, D. Hern´andez-Sosa, and J. Lorenzo-Navarro(cid:63)
+SIANI
+Universidad de Las Palmas de Gran Canaria, Spain"
+4dba7e19e2958d8ab75261219747aebc675c6f8a,Finding the Topic of a Set of Images,"Finding the Topic of a Set of Images
+Gonzalo Vaca-Castano
+Univeristy of Central Florida"
+4df54d4758b1a883902c036b2a10ef6d0f2d4af9,An Automatic Face Recognition System Based On Adaptive Wavelet Transforms,"International Journal of Scientific Research and Engineering Studies (IJSRES)
+Volume 2 Issue 4, April 2015
+ISSN: 2349-8862
+An Automatic Face Recognition System Based On Adaptive
+Wavelet Transforms
+Prof. Khaladkar
+Nilam Chavan
+Apurva Kadam"
+4db9e5f19366fe5d6a98ca43c1d113dac823a14d,"Are 1, 000 Features Worth A Picture? Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers","Combining Crowdsourcing and Face Recognition to Identify Civil War Soldiers
+Are 1,000 Features Worth A Picture?
+Vikram Mohanty, David Thames, Kurt Luther
+Department of Computer Science and Center for Human-Computer Interaction
+Virginia Tech, Arlington, VA, USA"
+4de757faa69c1632066391158648f8611889d862,Review of Face Recognition Technology Using Feature Fusion Vector,"International Journal of Advanced Engineering Research and Science (IJAERS) Vol-3, Issue-3 , March- 2016]
+ISSN: 2349-6495
+Review of Face Recognition Technology Using
+Feature Fusion Vector
+Shrutika Shukla, Prof. Anuj Bhargav, Prof. Prashant Badal
+Department of Electronics and Communication, S.R.C.E.M, Banmore, RGPV, University, Bhopal, Madhya Pradesh, India"
+4d20fbd6dcdb4408dd6268951d86b92e8d96f332,Robust Face Recognition of Variations in Blur and Illumination by Using LDA,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Humming Bird ( 01st March 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+Robust Face Recognition of Variations in Blur and Illumination
+y Using LDA
+Ms. K. Hema
+PG Student
+Department of AE
+University College of Engineering
+Nagercoil-629004.
+Mr. J. Arun Prem Santh M. E.,
+Teaching Fellow
+Department of ECE
+University College of Engineering
+Nagercoil-629004 ."
+4d334cfafd11a93394917adcffef6c1d27aa178b,Refined Clustering technique based on boosting and outlier detection,"International Journal of Scientific & Engineering Research, Volume 6, Issue 11, November-2015 472
+ISSN 2229-5518
+Refined Clustering technique based on boosting
+nd outlier detection
+Ms. Reshma Y. Nagpure, Prof. P. P. Rokade"
+4d6043a25bf48c6fd6aff6a46597fe1902a9c6a7,Long-term tracking of multiple interacting pedestrians using a single camera,"Long-term tracking of multiple interacting
+pedestrians using a single camera
+Mogomotsi Keaikitse∗, Willie Brink† and Natasha Govender∗
+Modelling and Digital Sciences
+Council for Scientific and Industrial Research
+Pretoria, South Africa
+Department of Mathematical Sciences
+Stellenbosch University
+Stellenbosch, South Africa"
+4d6e7d73f5226142ffc42b4e8380882d5071e187,Discretion Within Constraint: Homophily and Structure in a Formal Organization,"This article was downloaded by: [128.32.74.70] On: 03 July 2014, At: 15:15
+Publisher: Institute for Operations Research and the Management Sciences (INFORMS)
+INFORMS is located in Maryland, USA
+Publication details, including instructions for authors and subscription information:
+http://pubsonline.informs.org
+Discretion Within Constraint: Homophily and Structure in
+Formal Organization
+Adam M. Kleinbaum, Toby E. Stuart, Michael L. Tushman
+To cite this article:
+Adam M. Kleinbaum, Toby E. Stuart, Michael L. Tushman (2013) Discretion Within Constraint: Homophily and Structure in a
+Full terms and conditions of use: http://pubsonline.informs.org/page/terms-and-conditions
+This article may be used only for the purposes of research, teaching, and/or private study. Commercial use
+or systematic downloading (by robots or other automatic processes) is prohibited without explicit Publisher
+pproval, unless otherwise noted. For more information, contact
+The Publisher does not warrant or guarantee the article’s accuracy, completeness, merchantability, fitness
+for a particular purpose, or non-infringement. Descriptions of, or references to, products or publications, or
+inclusion of an advertisement in this article, neither constitutes nor implies a guarantee, endorsement, or
+support of claims made of that product, publication, or service.
+Copyright © 2013, INFORMS
+Please scroll down for article—it is on subsequent pages"
+4d442ea40635a10fd3e642a7161dfc8f2b15a71e,An Image reranking model based on attributes and visual features eliminating duplication,"© 2016 IJEDR | Volume 4, Issue 2 | ISSN: 2321-9939
+An Image reranking model based on attributes and
+visual features eliminating duplication
+Ms.Madhuri Mhaske,2Prof.Sachin Patil
+PG Scholar at G. H. Raisoni College of Engineering and Management, Chas, Ahmednagar
+, 2Professor at G. H. Raisoni College of Engineering and Management, Vagholi, Pune
+________________________________________________________________________________________________________"
+4d7e1eb5d1afecb4e238ba05d4f7f487dff96c11,Largest center-specific margin for dimension reduction,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+4d5c34fb36cf8c74880a62814750760bce0aef16,Boosting descriptors condensed from video sequences for place recognition,"Boosting Descriptors Condensed from Video Sequences for Place Recognition
+Tat-Jun Chin, Hanlin Goh and Joo-Hwee Lim
+Institute for Infocomm Research
+1 Heng Mui Keng Terrace, Singapore 119613.
+{tjchin, hlgoh,"
+4df34e0194faa27078832cb5078a2af6c9d0ea9b,Saliency Prediction in the Deep Learning Era: An Empirical Investigation,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Saliency Prediction in the Deep Learning Era:
+An Empirical Investigation
+Ali Borji, Member, IEEE"
+4d6ad0c7b3cf74adb0507dc886993e603c863e8c,Human Activity Recognition Based on Wearable Sensor Data : A Standardization of the State-ofthe-Art,"Human Activity Recognition Based on Wearable
+Sensor Data: A Standardization of the
+State-of-the-Art
+Artur Jord˜ao, Antonio C. Nazare Jr., Jessica Sena and William Robson Schwartz
+Smart Surveillance Interest Group, Computer Science Department
+Universidade Federal de Minas Gerais, Brazil
+Email: {arturjordao, antonio.nazare, jessicasena,"
+4d7bbaa2c7e89d5ba6940ee5804cf10a6b24d6ec,Multi-target Unsupervised Domain Adaptation without Exactly Shared Categories,"Multi-target Unsupervised Domain Adaptation
+without Exactly Shared Categories
+Huanhuan Yu, Menglei Hu and Songcan Chen"
+4dca3d6341e1d991c902492952e726dc2a443d1c,Learning towards Minimum Hyperspherical Energy,"Learning towards Minimum Hyperspherical Energy
+Weiyang Liu1,*, Rongmei Lin2,*, Zhen Liu1,*, Lixin Liu3,*, Zhiding Yu4, Bo Dai1,5, Le Song1,6
+Georgia Institute of Technology 2Emory University
+South China University of Technology 4NVIDIA 5Google Brain 6Ant Financial"
+4d0ef449de476631a8d107c8ec225628a67c87f9,Face system evaluation toolkit: Recognition is harder than it seems,"© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE
+must be obtained for all other uses, in any current or future media, including
+reprinting/republishing this material for advertising or promotional purposes,
+reating new collective works, for resale or redistribution to servers or lists, or
+reuse of any copyrighted component of this work in other works.
+Pre-print of article that appeared at BTAS 2010.
+The published article can be accessed from:
+http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5634517"
+4d231311cdfe3aba13766bd0b358d4db0a9af3d3,Processing and Recognising Faces in 3D Images,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+4dea287ad9271d4ac73c58c03b8e6e714dd2db6c,Pyramid Center - symmetric Local 1 Binary / Trinary Patterns for Pedestrian 2 Detection,"Pyramid Center-symmetric Local
+Binary/Trinary Patterns for Pedestrian
+Detection
+Yongbin Zheng, Chunhua Shen, Richard Hartley and Xinsheng Huang
+Australian National University and NICTA, Canberra"
+4d47261b2f52c361c09f7ab96fcb3f5c22cafb9f,Deep multi-frame face super-resolution,"Deep multi-frame face super-resolution
+Evgeniya Ustinova, Victor Lempitsky
+October 17, 2017"
+4dc8b1c193c421f8f570c0a7eac2fc73da06cb51,MODS: Fast and Robust Method for Two-View Matching,"MODS: Fast and Robust Method for Two-View
+Matching
+Dmytro Mishkin, Jiri Matas, Michal Perdoch
+Center for Machine Perception, Faculty of Electrical Engineering,
+Czech Technical University in Prague. Karlovo namesti, 13. Prague 2, 12135"
+4d9d25e67ebabbfc0acd63798f1a260cb2c8a9bd,Playing for Data: Ground Truth from Computer Games,"Playing for Data: Ground Truth from Computer Games
+Stephan R. Richter∗1 Vibhav Vineet∗2
+Stefan Roth1 Vladlen Koltun2
+TU Darmstadt
+Intel Labs"
+4d3a6c2cee0cf06ff6471fad3d65a5835d0552f8,3-D Face Recognition Using Geodesic-Map Representation and Statistical Shape Modelling,"Article
+­D Face Recognition Using Geodesic­Map
+Representation and Statistical Shape Modelling
+Quan, Wei, Matuszewski, Bogdan and Shark, Lik
+Available at http://clok.uclan.ac.uk/13240/
+Quan, Wei, Matuszewski, Bogdan and Shark, Lik (2016) 3­D Face Recognition Using Geodesic­
+Map Representation and Statistical Shape Modelling. Lecture Notes in Computer Science, 9493 .
+pp. 199­212. ISSN 0302­9743
+It is advisable to refer to the publisher’s version if you intend to cite from the work.
+http://dx.doi.org/10.1007/978-3-319-27677-9_13
+For more information about UCLan’s research in this area go to
+http://www.uclan.ac.uk/researchgroups/ and search for <name of research Group>.
+For information about Research generally at UCLan please go to
+http://www.uclan.ac.uk/research/
+All outputs in CLoK are protected by Intellectual Property Rights law, including
+Copyright law. Copyright, IPR and Moral Rights for the works on this site are retained
+y the individual authors and/or other copyright owners. Terms and conditions for use
+of this material are defined in the http://clok.uclan.ac.uk/policies/
+Central Lancashire online Knowledge
+www.clok.uclan.ac.uk"
+4df3143922bcdf7db78eb91e6b5359d6ada004d2,The Chicago face database: A free stimulus set of faces and norming data.,"Behav Res (2015) 47:1122–1135
+DOI 10.3758/s13428-014-0532-5
+The Chicago face database: A free stimulus set of faces
+nd norming data
+Debbie S. Ma & Joshua Correll & Bernd Wittenbrink
+Published online: 13 January 2015
+# Psychonomic Society, Inc. 2015"
+75827a2021ac2ad2256144b2a2fe301948d39b51,AI Benchmark: Running Deep Neural Networks on Android Smartphones,"AI Benchmark: Running Deep Neural Networks
+on Android Smartphones
+Andrey Ignatov
+ETH Zurich
+Radu Timofte
+ETH Zurich
+William Chou
+Qualcomm, Inc.
+Ke Wang
+Huawei, Inc.
+Max Wu
+MediaTek, Inc.
+Tim Hartley
+Arm, Inc.
+Luc Van Gool ∗
+ETH Zurich"
+75cb21fa931e957941c0237a1030aa36209bae36,Gaussian Process for Activity Modeling and Anomaly Detection,"GAUSSIAN PROCESS FOR ACTIVITY MODELING AND ANOMALY DETECTION
+Wentong Liaoa, Bodo Rosenhahna, Michael Ying Yangb
+Institute for Information Processing, Leibniz University Hannover, Germany
+Computer Vision Lab, TU Dresden, Germany
+KEY WORDS: Gaussian Process regression, activity modeling, anomaly detection
+Commission WG III/3"
+75879ab7a77318bbe506cb9df309d99205862f6c,Analysis of emotion recognition from facial expressions using spatial and transform domain methods,"Analysis Of Emotion Recognition From Facial
+Expressions Using Spatial And Transform Domain
+Methods
+Ms. P. Suja* and Dr. Shikha Tripathi"
+75d571d53eb250e222d66461fa2400956b40eaa9,What Makes a Photograph Memorable?,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+What makes a photograph memorable?
+Phillip Isola, Jianxiong Xiao, Member, IEEE, Devi Parikh, Member, IEEE, Antonio Torralba, Member, IEEE,
+nd Aude Oliva"
+75d59ae0ed3ce51e37b383985cfff310251f591a,Cost-Sensitive Robustness against Adversarial Examples,"Cost-Sensitive Robustness against Adversarial Examples
+Xiao Zhang∗
+nd David Evans†"
+75a9d9ea6c1a5ee55fc0ccb347b263785b15ac0a,An Image Search Reranking Model based on attribute assisted hypergraph Miss,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+Volume: 03 Issue: 05 | May-2016 www.irjet.net p-ISSN: 2395-0072
+An Image Search Reranking Model based on
+ttribute assisted hypergraph
+Miss. Madhuri J.Mhaske1, Prof. Sachin P.Patil2
+PG Scholar Computer Engineering , G. H. Raisoni College of Engineering and Management,
+Savitribai Phule Pune University , Chas, Ahmednagar.414001,Maharashtra, India.
+Assistant professor, computer engineering, G.H. Raisoni College of engineering and Management,
+Savitribai Phule Pune University, Wagholi, Pune 411015, Maharashtra, India.
+---------------------------------------------------------------------***---------------------------------------------------------------------
+user wants to search for a red image, the images cannot be"
+758572c5779a47e898caff7232af76eda253163b,Csr: Medium: Collaborative Research: Architecture and System Support for Power-agile Computing,"CSR: MEDIUM: COLLABORATIVE RESEARCH: ARCHITECTURE AND
+SYSTEM SUPPORT FOR POWER-AGILE COMPUTING
+Co-PI: Geoffrey Challen (University at Buffalo), Co-PI: Mark Hempstead (Drexel University)
+NSF PROPOSAL
+5 OCT 2013
+As energy management on energy-constrained devices continues to challenge researchers and frustrate
+users, device designs are addressing the problem by integrating more hardware components that can trade
+off energy and performance. Dynamic voltage-and-frequency scaling (DVFS) allows CPUs and memory
+to trade off speed and energy, buffering and polling rates allow radios to trade off latency and energy,
+nd screen refresh rates allow displays to trade off quality and energy. And as the Dark Silicon utilization
+wall forces systems to choose what parts of the CPU to operate, the already-large configuration space will
+explode. This proposal refers to the emerging class of devices integrating multiple energy-proportional
+omponents as power-agile, reflecting their potential ability to adaptively reallocate energy usage between
+omponents to improve performance and save energy. But as energy-management features proliferate,
+new interfaces enabling coordination between applications, the operating system (OS), and hardware are
+urgently needed to realize the potential energy and performance benefits.
+INTELLECTUAL MERIT: Our proposal describes a new architecture for power-agile systems with both
+novel interfaces that cleanly separate energy management responsibilities and a new approach to energy
+llocation driven by differences in hardware energy efficiency. Applications use resource requests to allo-
+ate energy between hardware components, making their resource needs explicit. The OS manages energy"
+75a92d92ee59555c847973a7422d7356514cde2d,Exploiting Multiple Detections for Person Re-Identification,"Article
+Exploiting Multiple Detections for
+Person Re-Identification
+Amran Bhuiyan *, Alessandro Perina and Vittorio Murino
+Pattern Analysis and Computer Vision (PAVIS), Istituto Italiano di Tecnologia, Via Morego 30,
+6163 Genova, Italy; (A.P.); (V.M.)
+* Correspondence: Tel.: +39-331-803-7176
+Received: 18 November 2017; Accepted: 11 January 2018; Published: 23 January 2018"
+7557e81c1189f0ef9643519e0664d60baed51721,Robust and Efficient Graph Correspondence Transfer for Person Re-identification,"DRAFT
+Transfer for Person Re-identification
+Qin Zhou, Heng Fan, Hua Yang, Member, IEEE, Hang Su, Member, IEEE, Shibao Zheng, Member, IEEE,
+Shuang Wu, and Haibin Ling, Member, IEEE"
+751e11880b54536a89bfcc4fd904b0989345a601,Hierarchical Adversarially Learned Inference,"HIERARCHICAL ADVERSARIALLY LEARNED
+INFERENCE
+Mohamed Ishmael Belghazi1, Sai Rajeswar1, Olivier Mastropietro1,
+Negar Rostamzadeh2, Jovana Mitrovic2 and Aaron Courville1†
+MILA, Université de Montréal,
+Element AI,
+DeepMind,
+CIFAR Fellow."
+75503aff70a61ff4810e85838a214be484a674ba,Improved facial expression recognition via uni-hyperplane classification,"Improved Facial Expression Recognition via Uni-Hyperplane Classification
+S.W. Chew∗, S. Lucey†, P. Lucey‡, S. Sridharan∗, and J.F. Cohn‡"
+754fa133a250d824c50b4c3b9c73975059954f41,Siamese Learning Visual Tracking: A Survey,"Siamese Learning Visual Tracking: A Survey
+Roman Pflugfelder, Member, IEEE
+(Draft Article)"
+75308067ddd3c53721430d7984295838c81d4106,Rapid Facial Reactions in Response to Facial Expressions of Emotion Displayed by Real Versus Virtual Faces,"Article
+Rapid Facial Reactions
+in Response to Facial
+Expressions of Emotion
+Displayed by Real Versus
+Virtual Faces
+i-Perception
+018 Vol. 9(4), 1–18
+! The Author(s) 2018
+DOI: 10.1177/2041669518786527
+journals.sagepub.com/home/ipe
+Leonor Philip, Jean-Claude Martin and Ce´ line Clavel
+LIMSI, CNRS, University of Paris-Sud, Orsay, France"
+750e567370fd8c37bab657207195517405727a71,Time Aware Task Delegation in Agent Interactions for Video-Surveillance,"Time aware task delegation in agent interactions
+for video-surveillance
+Paolo Sernani1, Matteo Biagiola2,3, Nicola Falcionelli1,
+Dagmawi Neway Mekuria1, Stefano Cremonini4, Aldo Franco Dragoni1
+Dipartimento di Ingegneria dell’Informazione,
+Universit`a Politecnica delle Marche,
+Ancona, Italy
+{p.sernani,
+{n.falcionelli,
+Fondazione Bruno Kessler,
+Trento, Italy
+Universit`a degli Studi di Genova,
+Genova, Italy
+Site Spa, Bologna, Italy"
+75d8f2da0e9d80eef141c765254d7752445afb53,Violent video detection based on MoSIFT feature and sparse coding,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+Long Xu1, Chen Gong1, Jie Yang1(cid:3), Qiang Wu2, Lixiu Yao1
+. INTRODUCTION"
+75e4efae6de6d1ac787a6ca381fb49381fcb062b,Hierarchical Representation Learning for Kinship Verification,"IEEE TRANSACTIONS ON IMAGE PROCESSING
+Hierarchical Representation Learning for Kinship
+Verification
+Naman Kohli, Student Member, IEEE, Mayank Vatsa, Senior Member, IEEE, Richa Singh, Senior Member, IEEE,
+Afzel Noore, Senior Member, IEEE, and Angshul Majumdar, Senior Member, IEEE"
+75d5e67e31cefa09ae46044fa1f9f7696e058c99,MRI based Techniques for Detection of Alzheimer: A Survey,"MRI based Techniques for Detection of Alzheimer: A Survey
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 159
+Number 5
+Year of Publication: 2017
+Authors:
+Ruaa Adeeb Abdulmunem Al-falluji
+10.5120/ijca2017912929
+{bibtex}2017912929.bib{/bibtex}"
+759a3b3821d9f0e08e0b0a62c8b693230afc3f8d,Attribute and simile classifiers for face verification,"Attribute and Simile Classifiers for Face Verification
+Neeraj Kumar
+Alexander C. Berg
+Peter N. Belhumeur
+Columbia University∗
+Shree K. Nayar"
+75e9401e70c05c4d080e2d17f83ed2b61b44b3af,A distributed algorithm for partitioned robust submodular maximization,"A Distributed Algorithm for Partitioned
+Robust Submodular Maximization
+Ilija Bogunovic, Slobodan Mitrovi´c, Jonathan Scarlett, and Volkan Cevher
+École Polytechnique Fédérale de Lausanne (EPFL)
+{ilija.bogunovic, slobodan.mitrovic, jonathan.scarlett,"
+7538ad235caf4dbc64a8b94a6146e1212d4de1ff,Amygdala dysfunction in men with the fragile X premutation.,"doi:10.1093/brain/awl338
+Brain (2007), 130, 404–416
+Amygdala dysfunction in men with the fragile
+X premutation
+David Hessl,1,2 Susan Rivera,1,5 Kami Koldewyn,1,6 Lisa Cordeiro,1 John Adams,1 Flora Tassone,1,4
+Paul J. Hagerman1,4 and Randi J. Hagerman1,3
+Medical Investigation of Neurodevelopmental Disorders (MIND) Institute and Departments of 2Psychiatry and Behavioral
+Sciences, 3Pediatrics, University of California-Davis, Medical Center, Sacramento, 4Department of Biochemistry and
+Molecular Medicine, University of California-Davis, School of Medicine, 5Department of Psychology and 6Center for
+Neuroscience, University of California-Davis, Davis, CA, USA
+Correspondence to: David Hessl, PhD, Assistant Clinical Professor, MIND Institute, University of California, Davis Medical
+Center, 2825 50th Street, Sacramento, CA 95817, USA.
+E-mail:
+Premutation alleles (55–200 CGG repeats) of the fragile X mental retardation 1 (FMR1) gene are associated
+with autism spectrum disorder in childhood, premature ovarian failure, and the neurodegenerative disorder,
+fragile X-associated tremor/ataxia syndrome (FXTAS). FXTAS, and perhaps the other clinical presentations
+mong carriers, are thought to be due to toxic gain-of-function of elevated levels of the expanded-repeat
+FMR1 mRNA. Previous structural MRI studies have implicated the amygdala as a potential site of dysfunction
+underlying social deficits and/or risk for FXTAS. As a preliminary investigation of this possible association, adult
+males with the premutation, and male controls matched for IQ, age and education, completed three protocols"
+75859ac30f5444f0d9acfeff618444ae280d661d,Multibiometric Cryptosystems Based on Feature-Level Fusion,"Multibiometric Cryptosystems based on Feature
+Level Fusion
+Abhishek Nagar, Student Member, IEEE, Karthik Nandakumar, Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+758d7e1be64cc668c59ef33ba8882c8597406e53,"AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild","IEEE TRANSACTIONS ON AFFECTIVE COMPUTING
+AffectNet: A Database for Facial Expression,
+Valence, and Arousal Computing in the Wild
+Ali Mollahosseini, Student Member, IEEE, Behzad Hasani, Student Member, IEEE,
+nd Mohammad H. Mahoor, Senior Member, IEEE"
+75522dfc1610c8765185c4344d97db33e1af5047,"RASKIN, RUDZSKY, RIVLIN: BODY-PART TRACKING AND ACTION CLASSIFICATION 1 3D Human Body-Part Tracking and Action Classification Using a Hierarchical Body Model","RASKIN, RUDZSKY, RIVLIN: BODY-PART TRACKING AND ACTION CLASSIFICATION
+D Human Body-Part Tracking and Action
+Classification Using a Hierarchical Body
+Model
+Leonid Raskin
+Michael Rudzsky
+Ehud Rivlin
+Computer Science Department
+Technion -Israel Institute of Technology
+Haifa, Israel, 3200"
+7553fba5c7f73098524fbb58ca534a65f08e91e7,A Practical Approach for Determination of Human Gender & Age,"Harpreet Kaur Bhatia et al, International Journal of Computer Science and Mobile Computing, Vol.3 Issue.6, June- 2014, pg. 816-824
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 3, Issue. 6, June 2014, pg.816 – 824
+RESEARCH ARTICLE
+A Practical Approach for Determination
+of Human Gender & Age
+Harpreet Kaur Bhatia1, Ahsan Hussain2
+CSE Dept. & CSVTU University, India
+CSE Dept. & CSVTU University, India"
+75cf72819b8741777a961157f43d994238219f5e,Crowd Behavior Detection for Abnormal Conditions,"International Journal of Computer Systems (ISSN: 2394-1065), Volume 03– Issue 06, June, 2016
+Available at http://www.ijcsonline.com/
+Crowd Behavior Detection for Abnormal Conditions
+Aniket A. Patil, Prof. S. A. Shinde
+Department of Computer Engineering,
+Savitribai Phule Pune University, Pune, India"
+75b987f86af2bc7f68edc45be240dd30e1ef2699,Sampling Algorithms to Handle Nuisances in Large-Scale Recognition,"UNIVERSITY OF CALIFORNIA
+Los Angeles
+Sampling Algorithms to Handle Nuisances in Large-Scale Recognition
+A dissertation submitted in partial satisfaction
+of the requirements for the degree
+Doctor of Philosophy in Computer Science
+Nikolaos Karianakis"
+75073faadb967823db48794e9cd54b681bb0729b,Thermal-Aware Task Allocation and Scheduling for Heterogeneous Multi-core Cyber-Physical Systems,"Thermal-Aware Task Allocation and Scheduling for
+Heterogeneous Multi-core Cyber-Physical Systems
+Department of Electrical and Computer Engineering University of Massachusetts Amherst, Amherst, MA, 01003
+Shikang Xu, Israel Koren and C. M. Krishna"
+75c3ba0c7e5b0d4a11e9d2e073ccd02ee688c0c9,"A Multimodal LDA Model integrating Textual, Cognitive and Visual Modalities","Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1146–1157,
+Seattle, Washington, USA, 18-21 October 2013. c(cid:13)2013 Association for Computational Linguistics"
+75650bfc20036d99314f7ddae8f2baecde3d57e2,Concave Losses for Robust Dictionary Learning,"CONCAVE LOSSES FOR ROBUST DICTIONARY LEARNING
+Rafael Will M. de Araujo, R. Hirata Jr ∗
+Alain Rakotomamonjy †
+University of S˜ao Paulo
+Institute of Mathematics and Statistics
+Rua do Mat˜ao, 1010 – 05508-090 – S˜ao Paulo-SP, Brazil
+Universit´e de Rouen Normandie
+LITIS EA 4108
+76800 Saint- ´Etienne-du-Rouvray, France"
+75249ebb85b74e8932496272f38af274fbcfd696,Face Identification in Large Galleries,"Face Identification in Large Galleries
+Rafael H. Vareto, Filipe Costa, William Robson Schwartz
+Smart Surveillance Interest Group, Department of Computer Science
+Universidade Federal de Minas Gerais, Belo Horizonte, Brazil"
+816c8c8d0f02200f988625d4989a1b4b34d779c6,An Efficient Hybrid Face Recognition Algorithm Using PCA and GABOR Wavelets,
+81eb804756f27d08f2d193d1074e58e1c5d263ca,Monocular 3D Human Pose Estimation Using Transfer Learning and Improved CNN Supervision,"Monocular 3D Human Pose Estimation Using Transfer Learning and Improved
+CNN Supervision
+Dushyant Mehta*, Helge Rhodin*, Dan Casass, Oleksandr Sotnychenko*, Weipeng Xu*, and Christian
+Theobalt*
+*Max Planck Institute For Informatics, Saarland Informatics Campus, Germany
+sUniversidad Rey Juan Carlos, Spain"
+81a142c751bf0b23315fb6717bc467aa4fdfbc92,Pairwise Trajectory Representation for Action Recognition,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+81bfe562e42f2eab3ae117c46c2e07b3d142dade,A Hajj And Umrah Location Classification System For Video Crowded Scenes,"A Hajj And Umrah Location Classification System For Video
+Crowded Scenes
+Hossam M. Zawbaa†
+Salah A. Aly†‡
+Adnan A. Gutub†
+Center of Research Excellence in Hajj and Umrah, Umm Al-Qura University, Makkah, KSA
+College of Computers and Information Systems, Umm Al-Qura University, Makkah, KSA"
+81695fbbbea2972d7ab1bfb1f3a6a0dbd3475c0f,Comparison of Face Recognition Neural Networks,"UNIVERSITY OF TARTU
+FACULTY OF SCIENCE AND TECHNOLOGY
+Institute of Computer Science
+Computer Science
+Zepp Uibo
+Comparison of Face Recognition
+Neural Networks
+Bachelor's thesis (6 ECST)
+Supervisor: Tambet Matiisen
+Tartu 2016"
+8147ee02ec5ff3a585dddcd000974896cb2edc53,Angular Embedding: A Robust Quadratic Criterion,"Angular Embedding:
+A Robust Quadratic Criterion
+Stella X. Yu, Member,"
+8199803f476c12c7f6c0124d55d156b5d91314b6,The iNaturalist Species Classification and Detection Dataset,"The iNaturalist Species Classification and Detection Dataset
+Grant Van Horn1 Oisin Mac Aodha1 Yang Song2 Yin Cui3 Chen Sun2
+Alex Shepard4 Hartwig Adam2
+Pietro Perona1
+Serge Belongie3
+Caltech
+Google
+Cornell Tech
+iNaturalist"
+816617fa6801fb2abd3d4475c459bf6e3221954d,3D human detection and tracking on a mobile platform for situation awareness,"D Human Detection and
+Tracking on a Mobile Platform
+for Situation Awareness
+Niklas Beuter"
+81e628a23e434762b1208045919af48dceb6c4d2,Attend and Rectify: A Gated Attention Mechanism for Fine-Grained Recovery,"Attend and Rectify: a Gated Attention
+Mechanism for Fine-Grained Recovery
+Pau Rodr´ıguez†, Josep M. Gonfaus‡, Guillem Cucurull†,
+F. Xavier Roca†, Jordi Gonz`alez†
+Computer Vision Center and Universitat Aut`onoma de Barcelona (UAB),
+Campus UAB, 08193 Bellaterra, Catalonia Spain
+Visual Tagging Services, Parc de Recerca, Campus UAB"
+811dff89b6d4657e5a0b8534e208baefd2204cee,Pseudo-Feature Generation for Imbalanced Data Analysis in Deep Learning,"Pseudo-Feature Generation for Imbalanced Data
+Analysis in Deep Learning
+Tomohiko Konno∗ and Michiaki Iwazume
+AI Science Research and Development Promotion Center
+National Institute of Information and Communications Technology, Tokyo Japan
+Figure 1: The sketch of proposed method. Left: train deep neural networks. Center: extract features
+from a layer, and then obtain multivariate probability distributions of the features, and then generate
+pseudo-features of minority classes from the probability distributions, and then re-train the layers
+elow the layer. Right: Put the retrained layers back to the original one. (It is the last classifier that is
+re-trained and put back in the experiment.)"
+812725dc3968aaff6429ec7c3f44ba1ca2116013,Acoplamiento de micro multitudes para el desarrollo de videojuegos controlados por movimiento,"Acoplamiento de micro multitudes
+para el desarrollo de videojuegos
+ontrolados por movimiento
+Iv´an Rivalcoba1, Krely Rodr´ıguez2, Oriam Degives1, Isaac Rudom´ın3
+Tecnol´ogico de Monterrey, Campus Estado de M´exico,
+M´exico
+Tecnol´ogico de Minatitl´an,
+Minatitl´an, Veracruz, M´exico
+Barcelona Supercomputing Center
+Barcelona, Espa˜na
+Resumen. La simulaci´on de multitudes en tiempo real y los juegos controlados
+por movimiento se han vuelto muy populares en los ´ultimos a˜nos. En conjunto
+estas dos tecnolog´ıas proporcionan una mejor experiencia de juego en entornos
+virtuales logrando escenas m´as realistas y vibrantes. Sin embargo, hasta ahora no
+se ha explotado la interacci´on de m´ultiples jugadores con una gran multitud bajo
+un entorno virtual. En este trabajo presentamos un sistema no intrusivo capaz
+de simular multitudes virtuales acopladas en tiempo real con varios usuarios,
+sentando con ello las bases para la creaci´on de juegos donde interact´uen muchos
+jugadores con muchas personajes, para ello se realiza una detecci´on de personas
+en una secuencia de v´ıdeo, nuestra contribuci´on consiste en utilizar patrones"
+812a6ced985317b3b9429ef0455645a9744af6d1,No need for a social cue! A masked magician can also trick the audience in the vanishing ball illusion.,"Atten Percept Psychophys
+DOI 10.3758/s13414-015-1036-9
+No need for a social cue! A masked magician can also trick
+the audience in the vanishing ball illusion
+Cyril Thomas 1 & André Didierjean 1
+# The Psychonomic Society, Inc. 2015"
+81706277ed180a92d2eeb94ac0560f7dc591ee13,Emotion based Contextual Semantic Relevance Feedback in Multimedia Information Retrieval,"International Journal of Computer Applications (0975 – 8887)
+Volume 55– No.15, October 2012
+Emotion based Contextual Semantic Relevance
+Feedback in Multimedia Information Retrieval
+Karm Veer Singh
+Department of Computer Engineering, Indian
+Institute of Technology, Banaras Hindu
+University,Varanasi, 221005, India
+Anil K. Tripathi
+Department of Computer Engineering, Indian
+Institute of Technology, Banaras Hindu
+University,Varanasi, 221005, India
+find some
+issued by a user"
+81c03eda1d175fbe351980ac4cffe42c5dec47b0,User observation & dataset collection for robot training,"User Observation & Dataset Collection for Robot Training
+Caroline Pantofaru
+Willow Garage, Inc.
+Menlo Park, CA 94025
+Categories and Subject Descriptors:
+I.5.2 [Comput-
+ing Methodologies]: Pattern Recognition - Design Method-
+ology, H.1.2 [Information Systems]: Models and Principles -
+User/Machine Systems
+General Terms: Measurement
+INTRODUCTION
+Personal robots operate in human environments such as
+homes and of‌f‌ices, co-habiting with people. To effectively
+train robot algorithms for such scenarios, a large amount of
+training data containing both people and the environment is
+required. Collecting such data involves taking a robot into
+new environments, observing and interacting with people.
+So far, best practices for robot data collection have been
+undefined. Fortunately, the human-robot interaction com-
+munity has conducted field studies whose methodology can"
+81a51cd6ecd467abb1ef38c8e35bdf1885f96fe3,Deep Spatio-Temporal Random Fields for Efficient Video Segmentation,"Deep Spatio-Temporal Random Fields for Efficient Video Segmentation
+Siddhartha Chandra1
+Camille Couprie2
+INRIA GALEN, Ecole CentraleSup´elec Paris
+Iasonas Kokkinos2
+Facebook AI Research, Paris"
+81f30bc57b84a6e5b71983b50bdea32f32bee285,"The more fine-grained, the better for transfer learning","The more fine-grained, the better for transfer learning
+Anonymous Author(s)
+Affiliation
+Address
+email"
+81b2a541d6c42679e946a5281b4b9dc603bc171c,Semi-supervised learning with committees: exploiting unlabeled data using ensemble learning algorithms,"Universit¨at Ulm | 89069 Ulm | Deutschland
+Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+Institut f¨ur Neuroinformatik
+Direktor: Prof. Dr. G¨unther Palm
+Semi-Supervised Learning with Committees:
+Exploiting Unlabeled Data Using Ensemble
+Learning Algorithms
+Dissertation zur Erlangung des Doktorgrades
+Doktor der Naturwissenschaften (Dr. rer. nat.)
+der Fakult¨at f¨ur Ingenieurwissenschaften und Informatik
+der Universit¨at Ulm
+vorgelegt von
+Mohamed Farouk Abdel Hady
+us Kairo, ¨Agypten
+Ulm, Deutschland"
+81ff6d7f934f7134d93b2039d788b72f8593693c,Accelerating Convolutional Neural Network Systems,"Accelerating Convolutional
+Neural Network Systems
+Henry G.R. Gouk
+This report is submitted in partial fulfillment of the requirements for the degree of
+Bachelor of Computing and Mathematical Sciences with Honours (BCMS(Hons))
+t The University of Waikato.
+COMP520-14C (HAM)
+© 2014 Henry G.R. Gouk"
+813e9f76fb9e3f007f0bc819eab66b0b5fbd8204,Towards Building Large Scale Multimodal Domain-Aware Conversation Systems,"Towards Building Large Scale Multimodal Domain-Aware Conversation Systems
+Amrita Saha1,2
+Mitesh M. Khapra2
+Karthik Sankaranarayanan1
+IBM Research AI
+I.I.T. Madras, India"
+81eecb00eeadb5fe36cd840b687439bfdca7ff30,Kernelized Saliency-Based Person Re-Identification Through Multiple Metric Learning,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2012
+Kernelized Saliency-based Person Re-Identification
+through Multiple Metric Learning
+Niki Martinel* Student Member, IEEE, Christian Micheloni, Member, IEEE, and Gian Luca Foresti, Senior
+Member, IEEE"
+81d327ec41c67728b15438bca86d10b72de1d88f,Visual Affordance and Function Understanding: A Survey,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JULY 2018
+Visual Affordance and Function Understanding:
+A Survey
+Mohammed Hassanin, Salman Khan, Murat Tahtali"
+81d5c4b49fe17aaa3af837745cafdedb066a067d,Automatic Adaptive Center of Pupil Detection Using Face Detection and CDF Analysis,"Automatic Adaptive Center of Pupil Detection
+Using Face Detection and CDF Analysis
+Mansour Asadifard, Jamshid Shanbezadeh"
+819a321975c736e006870e76446d581e195cad2e,Deep Canonical Time Warping for Simultaneous Alignment and Representation Learning of Sequences,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Deep Canonical Time Warping
+for simultaneous alignment and representation
+learning of sequences
+George Trigeorgis, Mihalis A. Nicolaou, Member, IEEE, Bj¨orn W. Schuller, Senior member, IEEE
+Stefanos Zafeiriou, Member, IEEE"
+81006fe4c4947d225b9fa17e6b98b8acb36a7692,A Dataset for Grasping and Manipulation using ROS,"A Dataset for Grasping and Manipulation using ROS
+Matei Ciocarlie†, Gary Bradski†, Kaijen Hsiao† and Peter Brook†∗"
+810eafc9e854ea9b1d7a9e9f755f8102310d5db6,Dynamic Multimodal Instance Segmentation Guided by Natural Language Queries,"Dynamic Multimodal Instance Segmentation
+Guided by Natural Language Queries
+Edgar Margffoy-Tuay, Juan C. P´erez, Emilio Botero, and Pablo Arbel´aez
+{ea.margffoy10, jc.perez13, e.botero10,
+Universidad de los Andes, Colombia"
+816c1925de9e8557fa70ec67d0ff71a5059eb931,Person Re-identification by Articulated Appearance Matching,"Person Re-identification by Articulated
+Appearance Matching
+Dong Seon Cheng and Marco Cristani"
+8160b3b5f07deaa104769a2abb7017e9c031f1c1,Exploiting discriminant information in nonnegative matrix factorization with application to frontal face verification,"Exploiting Discriminant Information in Nonnegative
+Matrix Factorization With Application
+to Frontal Face Verification
+Stefanos Zafeiriou, Anastasios Tefas, Member, IEEE, Ioan Buciu, and Ioannis Pitas, Senior Member, IEEE"
+81fc46dd71121cfafbb11455745ae62f6eca0b25,Joint Camera Pose Estimation and 3D Human Pose Estimation in a Multi-camera Setup,"Joint Camera Pose Estimation and 3D Human
+Pose Estimation in a Multi-Camera Setup
+Jens Puwein1, Luca Ballan1, Remo Ziegler2 and Marc Pollefeys1
+Department of Computer Science, ETH Zurich, Switzerland
+Vizrt"
+814d091c973ff6033a83d4e44ab3b6a88cc1cb66,The EU-Emotion Stimulus Set: A validation study.,"Behav Res (2016) 48:567–576
+DOI 10.3758/s13428-015-0601-4
+The EU-Emotion Stimulus Set: A validation study
+Helen O’Reilly 1,2 & Delia Pigat 1 & Shimrit Fridenson 5 & Steve Berggren 3,4 & Shahar Tal 5 &
+Ofer Golan 5 & Sven Bölte 3,4 & Simon Baron-Cohen 1,6 & Daniel Lundqvist 3
+Published online: 30 September 2015
+# Psychonomic Society, Inc. 2015"
+816eff5e92a6326a8ab50c4c50450a6d02047b5e,fLRR: Fast Low-Rank Representation Using Frobenius Norm,"fLRR: Fast Low-Rank Representation Using
+Frobenius Norm
+Haixian Zhang, Zhang Yi, and Xi Peng
+Low Rank Representation (LRR) intends to find the representation
+with lowest-rank of a given data set, which can be formulated as a
+rank minimization problem. Since the rank operator is non-convex and
+discontinuous, most of the recent works use the nuclear norm as a convex
+relaxation. This letter theoretically shows that under some conditions,
+Frobenius-norm-based optimization problem has an unique solution that
+is also a solution of the original LRR optimization problem. In other
+words, it is feasible to apply Frobenius-norm as a surrogate of the
+nonconvex matrix rank function. This replacement will largely reduce the
+time-costs for obtaining the lowest-rank solution. Experimental results
+show that our method (i.e., fast Low Rank Representation, fLRR),
+performs well in terms of accuracy and computation speed in image
+lustering and motion segmentation compared with nuclear-norm-based
+LRR algorithm.
+Introduction: Given a data set X ∈ Rm×n(m < n) composed of column
+vectors, let A be a data set composed of vectors with the same dimension
+s those in X. Both X and A can be considered as matrices. A linear"
+81ed28ea6cfe71bfc4cfc35c6695fa07dd7cc42e,"Deep Episodic Memory: Encoding, Recalling, and Predicting Episodic Experiences for Robot Action Execution","Deep Episodic Memory: Encoding, Recalling, and Predicting
+Episodic Experiences for Robot Action Execution
+Jonas Rothfuss∗†, Fabio Ferreira∗†, Eren Erdal Aksoy ‡, You Zhou† and Tamim Asfour†"
+81ede08b36f3abd423424804da8ff240606b3a5d,Top-Down Deep Appearance Attention for Action Recognition,"Top-Down Deep Appearance Attention for
+Action Recognition
+Rao Muhammad Anwer1, Fahad Shahbaz Khan2, Joost van de Weijer3, Jorma
+Laaksonen1
+Department of Computer Science, Aalto University School of Science, Finland
+Computer Vision Laboratory, Link¨oping University, Sweden
+Computer Vision Center, CS Dept. Universitat Autonoma de Barcelona, Spain"
+810d60ff5c0106de53a48fa2731eacf5ca2377b6,MultiQ: single sensor-based multi-quality multi-modal large-scale biometric score database and its performance evaluation,"Uddin et al. IPSJ Transactions on Computer Vision and
+Applications (2017) 9:18
+DOI 10.1186/s41074-017-0029-0
+IPSJ Transactions on Computer
+Vision and Applications
+TECHNICAL NOTE
+Open Access
+MultiQ: single sensor-based multi-quality
+multi-modal large-scale biometric score
+database and its performance evaluation
+Md. Zasim Uddin*, Daigo Muramatsu, Takuhiro Kimura, Yasushi Makihara and Yasushi Yagi"
+8149c30a86e1a7db4b11965fe209fe0b75446a8c,Semi-supervised multiple instance learning based domain adaptation for object detection,"Semi-Supervised Multiple Instance Learning based
+Domain Adaptation for Object Detection
+Siemens Corporate Research
+Siemens Corporate Research
+Siemens Corporate Research
+Amit Kale
+Bangalore
+Chhaya Methani
+Bangalore
+{chhaya.methani,
+Rahul Thota
+Bangalore
+rahul.thota,"
+815069f591122aa7b388615f944c17c7fa1eff14,Constrained Overcomplete Analysis Operator Learning for Cosparse Signal Modelling,"Constrained Overcomplete Analysis Operator
+Learning for Cosparse Signal Modelling
+Mehrdad Yaghoobi, Sangnam Nam, R´emi Gribonval and Mike E. Davies"
+81b6de17391f44c07b2efe75a529aa200604ee48,Machine à Vecteurs Supports Multi-Noyau pour la détection de points caractéristiques du visage,"Machine à Vecteurs Supports Multi-Noyau pour la détection de points
+aractéristiques du visage
+Vincent Rapp1, Thibaud Senechal1, Kevin Bailly1, Lionel Prevost2
+ISIR - CNRS UMR 7222
+Université Pierre et Marie Curie, Paris
+LAMIA - EA 4540
+Université des Antilles et de la Guyanne
+{rapp, senechal,
+Résumé
+Dans cet article, nous présentons une méthode robuste
+et précise pour détecter 17 points caractéristiques du vi-
+sage sur des images expressives. Une nouvelle architecture
+multi-résolution basée sur les récents algorithmes multi-
+noyau est introduite. Les patches de faibles résolutions
+odent les informations globales du visage donnant lieu à
+une détection grossière mais robuste du point désiré. Les
+patches de grandes résolutions quant à eux utilisent les dé-
+tails locaux afin d’affiner cette localisation. En combinant
+une détection indépendante de points et des informations
+priori sur les distributions de points, nous proposons"
+819d1dcea397e6e671acf74adccdef5750550873,Representations for Visually Guided Actions,"Representations for Visually Guided Actions
+Saurabh Gupta
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2018-104
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2018/EECS-2018-104.html
+August 8, 2018"
+8121824f4598d600e4cdb745cd2715e4655c9e88,A Taxonomy of Emerging Multilinear Discriminant Analysis Solutions for Biometric Signal Recognition,"Contents
+A Taxonomy of Emerging Multilinear Discriminant Analysis Solutions
+for Biometric Signal Recognition
+Haiping Lu, K. N. Plataniotis and A. N. Venetsanopoulos
+Introduction
+.2 Multilinear basics
+.3 Multilinear discriminant analysis
+.5 Conclusions
+Empirical Comparison of MLDA variants on Face Recognition
+Appendix: Multilinear decompositions
+References"
+81c3d1be0c69e9d3e13054969e4b67ee69a4e6f0,Dynamical Models for Neonatal Intensive Care Monitoring,"This thesis has been submitted in fulfilment of the requirements for a postgraduate degree
+(e.g. PhD, MPhil, DClinPsychol) at the University of Edinburgh. Please note the following
+terms and conditions of use:
+This work is protected by copyright and other intellectual property rights, which are
+retained by the thesis author, unless otherwise stated.
+A copy can be downloaded for personal non-commercial research or study, without
+prior permission or charge.
+This thesis cannot be reproduced or quoted extensively from without first obtaining
+permission in writing from the author.
+The content must not be changed in any way or sold commercially in any format or
+medium without the formal permission of the author.
+When referring to this work, full bibliographic details including the author, title,
+warding institution and date of the thesis must be given."
+81eb9fca9093f58eabb8850512f8f46fe2bb07a2,Sem-GAN: Semantically-Consistent Image-to-Image Translation,"Sem-GAN: Semantically-Consistent Image-to-Image Translation
+Anoop Cherian
+Alan Sullivan
+Mitsubishi Electric Research Labs (MERL), Cambridge, MA
+{cherian,"
+818dcb3bac6342c02eebd896cd0a46bcf2192b64,Unified Structured Learning for Simultaneous Human Pose Estimation and Garment Attribute Classification,"Unified Structured Learning for Simultaneous
+Human Pose Estimation and Garment Attribute
+Classification
+Jie Shen, Guangcan Liu, Member, IEEE, Jia Chen, Yuqiang Fang, Jianbin Xie, Member, IEEE, Yong Yu,
+nd Shuicheng Yan, Senior Member, IEEE"
+8134b052a9aedd573dd16649a611f68b48e30cb2,InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image,"InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image
+Hyeongwoo Kim1
+Justus Thies2
+Max-Planck-Institute for Informatics
+Michael Zollhöfer1
+Christian Richardt3
+University of Erlangen-Nuremberg 3 University of Bath
+Christian Theobalt1
+Ayush Tewari1
+Figure 1. Our single-shot deep inverse face renderer InverseFaceNet obtains a high-quality geometry, reflectance and illumination estimate
+from just a single input image. We jointly recover the face pose, shape, expression, reflectance and incident scene illumination. From left to
+right: input photo, our estimated face model, its geometry, and the pointwise Euclidean error compared to Garrido et al. [14]."
+862f19f8317971fabc46cf0f994f4a8616f17b78,Human Re-identification through Distance Metric Learning based on Jensen-Shannon Kernel,"HUMAN RE-IDENTIFICATION THROUGH DISTANCE METRIC
+LEARNING BASED ON JENSEN-SHANNON KERNEL
+Yoshihisa Ijiri1, Shihong Lao2, Tony X. Han3 and Hiroshi Murase4
+Corporate R&D, OMRON Corp., Kizugawa, Kyoto, Japan
+OMRON Social Solutions Co. Ltd., Kizugawa, Kyoto, Japan
+Electrical & Computer Engineering Dept., Univ. of Missouri, Columbia, MO, U.S.A.
+Graduate School of Information Science, Nagoya Univ., Chigusaku, Nagoya, Japan
+Keywords:
+Human Re-identification, Distance Metric Learning, Jensen-Shannon Kernel."
+86614c2d2f6ebcb9c600d4aef85fd6bf6eab6663,Benchmarks for Cloud Robotics,"Benchmarks for Cloud Robotics
+Arjun Singh
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-142
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-142.html
+August 12, 2016"
+86b69b3718b9350c9d2008880ce88cd035828432,Improving Face Image Extraction by Using Deep Learning Technique,"Improving Face Image Extraction by Using Deep Learning Technique
+Zhiyun Xue, Sameer Antani, L. Rodney Long, Dina Demner-Fushman, George R. Thoma
+National Library of Medicine, NIH, Bethesda, MD"
+86904aee566716d9bef508aa9f0255dc18be3960,Learning Anonymized Representations with Adversarial Neural Networks,"Learning Anonymized Representations with
+Adversarial Neural Networks
+Cl´ement Feutry, Pablo Piantanida, Yoshua Bengio, and Pierre Duhamel"
+8602b2ef26a0f851f1f6f2f2ae0ce142eb64300a,Is it a face ? How to find and validate a face on 3D scans,"Is it a face ? How to find and validate a face on 3D scans
+Przemyslaw Szeptycki,
+Mohsen Ardabilian,
+Liming Chen
+Ecole Centrale de Lyon, 36 av. Guy de Collongue, 69134 Lyon, France
+{przemyslaw.szeptycki, mohsen.ardabilian,
+Introduction"
+867e709a298024a3c9777145e037e239385c0129,Analytical Representation of Undersampled Face Recognition Approach Based on Dictionary Learning and Sparse Representation,"INTERNATIONAL JOURNAL
+OF PROFESSIONAL ENGINEERING STUDIES Volume VIII /Issue 2 / FEB 2017
+ANALYTICAL REPRESENTATION OF UNDERSAMPLED FACE
+RECOGNITION APPROACH BASED ON DICTIONARY LEARNING
+AND SPARSE REPRESENTATION
+Murala Sandeep1 A.Mallikarjuna Reddy2 P.Rajashaker Reddy3 Dr. G. Vishnu murthy4
+(M.Tech)1, Assistant Professor2, Assistant Professor3, HOD of CSE Department4
+Anurag group of institutions Ghatkesar, Ranga Reddy, Hyderabad, India"
+869a2fbe42d3fdf40ed8b768edbf54137be7ac71,Relative Attributes for Enhanced Human-Machine Communication,"Relative Attributes for Enhanced Human-Machine Communication
+Devi Parikh1, Adriana Kovashka3, Amar Parkash2, and Kristen Grauman3
+Toyota Technological Institute, Chicago
+Indraprastha Institute of Information Technology, Delhi
+University of Texas, Austin"
+86b1751b265b289b09de79956e77a01d82e12086,Face recognition in multi-camera surveillance videos,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+8645fe95f3f503f854b08096c2874a3f7ea6b79b,BoxCars: 3D Boxes as CNN Input for Improved Fine-Grained Vehicle Recognition,"BoxCars: 3D Boxes as CNN Input
+for Improved Fine-Grained Vehicle Recognition
+Jakub Sochor∗, Adam Herout, Jiˇr´ı Havel
+Brno University of Technology
+Brno, Czech Republic"
+86e5f81bde496549e9df2b1abdef0879a3135adb,The Visual QA Devil in the Details: The Impact of Early Fusion and Batch Norm on CLEVR,"The Visual QA Devil in the Details: The Impact
+of Early Fusion and Batch Norm on CLEVR
+Mateusz Malinowski and Carl Doersch
+DeepMind, London, United Kingdom
+Introduction
+Visual QA is a pivotal challenge for higher-level reasoning [1,2,3,4], requiring
+understanding language, vision, and relationships between many objects in a
+scene. Although datasets like CLEVR [5] are designed to be unsolvable with-
+out such complex relational reasoning, some surprisingly simple feed-forward,
+“holistic” models have recently shown strong performance on this dataset [6,7].
+These models lack any kind of explicit iterative, symbolic reasoning procedure,
+which are hypothesized to be necessary for counting objects, narrowing down
+the set of relevant objects based on several attributes, etc. The reason for this
+strong performance is poorly understood. Hence, our work analyzes such mod-
+els, and finds that minor architectural elements are crucial to performance. In
+particular, we find that early fusion of language and vision provides large per-
+formance improvements. This contrasts with the late fusion approaches popular
+t the dawn of Visual QA [5,8,9,10]. We propose a simple module we call Mul-
+timodal Core (MC), which we hypothesize performs the fundamental operations
+for multimodal tasks. We believe that understanding why these elements are so"
+86cdc6ae46f53ac86b9e0ace2763c5fe15633055,Experimental Force-Torque Dataset for Robot Learning of Multi-Shape Insertion,"Experimental Force-Torque Dataset for Robot Learning of Multi-Shape Insertion
+Giovanni De Magistris1, Asim Munawar1, Tu-Hoa Pham1, Tadanobu Inoue1,
+Phongtharin Vinayavekhin1, Ryuki Tachibana1
+IBM Research - Tokyo, Japan
+The accurate modeling of real-world systems and
+physical interactions is a common challenge towards the
+resolution of robotics tasks. Machine learning approaches
+have demonstrated significant results in the modeling of
+omplex systems (e.g., articulated robot structures, ca-
+le stretch, fluid dynamics), or to learn robotics tasks
+(e.g., grasping, reaching) from raw sensor measurements
+without explicit programming, using reinforcement learn-
+ing. However, a common bottleneck in machine learn-
+ing techniques resides in the availability of suitable data.
+While many vision-based datasets have been released in
+the recent years, ones involving physical interactions, of
+particular interest for the robotic community, have been
+scarcer. In this paper, we present a public dataset on peg-
+in-hole insertion tasks containing force-torque and pose
+information for multiple variations of convex-shaped pegs."
+86c053c162c08bc3fe093cc10398b9e64367a100,Cascade of forests for face alignment,"Cascade of Forests for Face Alignment
+Heng Yang, Changqing Zou, Ioannis Patras"
+861802ac19653a7831b314cd751fd8e89494ab12,"Time-of-Flight and Depth Imaging. Sensors, Algorithms, and Applications","Marcin Grzegorzek, Christian Theobalt, Reinhard Koch,
+Andreas Kolb
+Time-of-Flight and Depth Imaging. Sensors, Algorithms
+nd Applications: Dagstuhl Seminar 2012 and GCPR
+Workshop on Imaging New Modalities (Lecture ... Vision,
+Pattern Recognition, and Graphics)
+Publisher: Springer; 2013 edition
+(November 8, 2013)
+Language: English
+Pages: 320
+ISBN: 978-3642449635
+Size: 20.46 MB
+Format: PDF / ePub / Kindle
+Cameras for 3D depth imaging, using
+either time-of-flight (ToF) or
+structured light sensors, have received
+lot of attention recently and have
+een improved considerably over the
+last few years. The present
+techniques..."
+8646f22a46b65c2018bc39ad3cbdb939e788a1fc,Learning a Confidence Measure for Optical Flow,"Learning a Confidence Measure
+for Optical Flow
+Oisin Mac Aodha, Ahmad Humayun, Marc Pollefeys and Gabriel J. Brostow"
+8641593c67d87d81e528448a527e45fc9a5aa145,Complex Urban LiDAR Data Set,"Complex Urban LiDAR Data Set
+Jinyong Jeong1, Younggun Cho1, Young-Sik Shin1, Hyunchul Roh1 and Ayoung Kim1
+Fig. 1: This paper provides the complex urban data set including metropolitan area, apartment building complex and
+underground parking lot. Sample scenes from the data set can be found in https://youtu.be/IguZjmLf5V0."
+861b12f405c464b3ffa2af7408bff0698c6c9bf0,An Effective Technique for Removal of Facial Dupilcation by SBFA,"International Journal on Recent and Innovation Trends in Computing and Communication ISSN: 2321-8169
+Volume: 3 Issue: 5
+3337 - 3342
+_______________________________________________________________________________________________
+An Effective Technique for Removal of Facial Dupilcation by SBFA
+Miss. Deepika B. Patil
+Computer Department,
+GHRCEM,
+Pune, India
+Dr. Ayesha Butalia
+Computer Department,
+GHRCEM,
+Pune, India"
+869df5e8221129850e81e77d4dc36e6c0f854fe6,A metric for sets of trajectories that is practical and mathematically consistent,"A metric for sets of trajectories that is
+practical and mathematically consistent
+Jos´e Bento
+Jia Jie Zhu"
+86c1bf121851aa901e3e7eb11a3b8cc5a08a921b,"Motion, Blur, Illumination based Face Recognition","ISSN: 2455-5797 International Journal of Innovative Works in Engineering and Technology (IJIWET)
+Motion, Blur, Illumination based Face Recognition
+Anand M.S
+PG Student
+Department of ECE
+Satyam College of Engineering
+E-mail :"
+86e1bdbfd13b9ed137e4c4b8b459a3980eb257f6,The Kinetics Human Action Video Dataset,"The Kinetics Human Action Video Dataset
+Will Kay
+Jo˜ao Carreira
+Karen Simonyan
+Brian Zhang
+Chloe Hillier
+Sudheendra Vijayanarasimhan
+Fabio Viola
+Tim Green
+Trevor Back
+Paul Natsev
+Mustafa Suleyman
+Andrew Zisserman"
+86b6de59f17187f6c238853810e01596d37f63cd,Competitive Representation Based Classification Using Facial Noise Detection,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 7, No. 3, 2016
+Competitive Representation Based Classification
+Using Facial Noise Detection
+Tao Liu
+Ying Liu
+Chongqing Key Laboratory of Computational Intelligence
+College of Computer Science and Technology, Chongqing
+Chongqing Key Laboratory of Computational Intelligence
+College of Computer Science and Technology, Chongqing
+University of Posts and Telecommunications
+University of Posts and Telecommunications
+Chongqing, China
+Chongqing, China
+Cong Li
+Chao Li
+Chongqing Key Laboratory of Computational Intelligence
+College of Computer Science and Technology, Chongqing
+Chongqing Key Laboratory of Computational Intelligence
+College of Computer Science and Technology, Chongqing"
+86e87d276b5b01a6b4b09b5487781fab740aca2e,Deep Ranking Model by Large Adaptive Margin Learning for Person Re-identification,"Deep Ranking Model by Large Adaptive Margin Learning for
+Person Re-identification
+Jiayun Wanga, Sanping Zhoua, Jinjun Wanga,∗, Qiqi Houa
+The institute of artificial intelligence and robotic, Xi’an Jiaotong University, Xianning West Road
+No.28, Shaanxi, 710049, P.R. China"
+860196a306c9303ddaf323d702dacba68db658d2,Open-Ended Content-Style Recombination Via Leakage Filtering,"OPEN-ENDED CONTENT-STYLE RECOMBINATION
+VIA LEAKAGE FILTERING
+Karl Ridgeway+∗ & Michael C. Mozer+†
++ Department of Computer Science, University of Colorado, Boulder
+Sensory, Inc.
+presently at Google Brain, Mountain View"
+86b105c3619a433b6f9632adcf9b253ff98aee87,A Mutual Information based Face Clustering Algorithm for Movies,"­4244­0367­7/06/$20.00 ©2006 IEEE
+ICME 2006"
+8616ff1d0fd7bcfc5fd81d1e8a9b189c21f3b93d,Visual Reference Resolution using Attention Memory for Visual Dialog,"Visual Reference Resolution using Attention Memory
+for Visual Dialog
+Paul Hongsuck Seo†
+POSTECH
+Andreas Lehrmann§
+{hsseo, {andreas.lehrmann,
+Bohyung Han†
+§Disney Research
+Leonid Sigal§"
+8609035f1b9fa5bddfbbffd287a98ba47a1ecba0,Making Bertha See,"Making Bertha See
+Uwe Franke, David Pfeiffer, Clemens Rabe, Carsten Knoeppel,
+Markus Enzweiler, Fridtjof Stein, and Ralf G. Herrtwich
+Daimler AG - Research & Development, 71059 Sindelfingen, Germany"
+86be567bab1293ed847979d2c56a662fcbcbc1d5,Exploiting View-Specific Appearance Similarities Across Classes for Zero-Shot Pose Prediction: A Metric Learning Approach,"Exploiting View-Specific Appearance Similarities Across Classes for
+Zero-shot Pose Prediction: A Metric Learning Approach
+Alina Kuznetsova
+Leibniz University Hannover
+Appelstr 9A, 30169
+Hannover, Germany
+Sung Ju Hwang
+UNIST
+50 UNIST-gil, 689798
+Ulsan, Korea
+Bodo Rosenhahn
+Leibniz University Hannover
+Appelstr 9A, 30169
+Hannover, Germany
+Leonid Sigal
+Disney Research
+720 Forbes Avenue, 15213
+Pittsburgh, PA, US"
+8627248c6e3c3e316e3964d12e0a44e23aa969f3,Automated Annotations,"Automated Annotations
+Richard Brath and Martin Matusiak*
+Uncharted Software Inc."
+72ef0ac03d3043bf664ca7c21abafc4191b24557,Towards Safe Autonomous Driving: Capture Uncertainty in the Deep Neural Network For Lidar 3D Vehicle Detection,"Towards Safe Autonomous Driving: Capture Uncertainty in the Deep
+Neural Network For Lidar 3D Vehicle Detection
+Di Feng1, Lars Rosenbaum1, Klaus Dietmayer2"
+7214d9356398aa39923c69650bcf761d4ab6307f,Improving Spatial Saliency Using Affinity Model and Temporal Motion,"Int'l Conf. IP, Comp. Vision, and Pattern Recognition | IPCV'15 |
+Improving Spatial Saliency Using
+Affinity Model and Temporal Motion
+Dept. of Computer and Communications Engineering, Kangwon National University
+Manbae Kim
+Chunchon, Gangwondo, Republic of Korea
+E-mail:"
+721fbc63a647239158bf817311d1c084455398e9,Shape-based automatic detection of a large number of 3D facial landmarks,"Shape-based Automatic Detection of a Large Number of 3D Facial Landmarks
+Syed Zulqarnain Gilani, Faisal Shafait, Ajmal Mian
+School of Computer Science and Software Engineering,The University of Western Australia.
+Figure 3: Histogram of mean localization error for 18 landmarks on 4,007
+scans of FRGCv2 dataset (18× 4007 Landmarks).
+Mean Localization Error(mm)
+Neutral
+Non−Neutral
+Neutral
+Level−1
+Level−2
+Level−3
+Level−4
+Figure 1: Our algorithm automatically detects an arbitrarily large number of
+facial landmarks by establishing dense correspondences between 3D faces.
+The figure shows 85 landmarks detected (red) on neutral and extreme anger
+expression of a subject from BU3DFE database [3]. The ground truth is
+represented by blue dots.
+2202
+Mean Localization Error(mm)"
+72a87f509817b3369f2accd7024b2e4b30a1f588,Fault diagnosis of a railway device using semi-supervised independent factor analysis with mixing constraints,"Fault diagnosis of a railway device using semi-supervised
+independent factor analysis with mixing constraints
+Etienne Côme, Latifa Oukhellou, Thierry Denoeux, Patrice Aknin
+To cite this version:
+Etienne Côme, Latifa Oukhellou, Thierry Denoeux, Patrice Aknin. Fault diagnosis of a railway device
+using semi-supervised independent factor analysis with mixing constraints. Pattern Analysis and
+Applications, Springer Verlag, 2012, 15 (3), pp.313-326. <hal-00750589>
+HAL Id: hal-00750589
+https://hal.archives-ouvertes.fr/hal-00750589
+Submitted on 11 Nov 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+72a00953f3f60a792de019a948174bf680cd6c9f,Understanding the role of facial asymmetry in human face identification,"Stat Comput (2007) 17:57–70
+DOI 10.1007/s11222-006-9004-9
+Understanding the role of facial asymmetry in human face
+identification
+Sinjini Mitra · Nicole A. Lazar · Yanxi Liu
+Received: May 2005 / Accepted: September 2006 / Published online: 30 January 2007
+C(cid:1) Springer Science + Business Media, LLC 2007"
+725597072c76dad5caa92b7baa6e1c761addc300,Deep adversarial neural decoding,"Deep adversarial neural decoding
+Ya˘gmur Güçlütürk*, Umut Güçlü*,
+Katja Seeliger, Sander Bosch,
+Rob van Lier, Marcel van Gerven,
+Radboud University, Donders Institute for Brain, Cognition and Behaviour
+Nijmegen, the Netherlands
+*Equal contribution"
+727ecf8c839c9b5f7b6c7afffe219e8b270e7e15,Leveraging Geo-referenced Digital Photographs a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"LEVERAGING GEO-REFERENCED DIGITAL PHOTOGRAPHS
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Mor Naaman
+July 2005"
+7278f4c361f960b2e54275c5efd98535f9ccaded,Image Based Recognition of Dynamic Traffic Situations by Evaluating the Exterior Surrounding and Interior Space of Vehicles,"IMAGE BASED RECOGNITION OF DYNAMIC TRAFFIC SITUATIONS BY
+EVALUATING THE EXTERIOR SURROUNDING AND INTERIOR SPACE OF VEHICLES
+Photogrammetry & Remote Sensing, Technische Universitaet Muenchen, Germany - (alexander.hanel, ludwig.hoegner,
+BMW Research & Technology, Muenchen, Germany -
+A. Hanela, H. Klödenb, L. Hoegnera, U. Stillaa
+KEY WORDS: vehicle camera system, crowd sourced data, image analysis, machine learning, object detection, illumination recogni-
+tion, traffic situation recognition"
+722221f6c696b4a7cc094748aaad8158990ec41e,3D facial expression recognition: A perspective on promises and challenges,"D Facial Expression Recognition:
+A Perspective on Promises and Challenges
+T. Fang, X. Zhao, O. Ocegueda, S.K. Shah and I.A. Kakadiaris*"
+72ecaff8b57023f9fbf8b5b2588f3c7019010ca7,Facial Keypoints Detection,"Facial Keypoints Detection
+Shenghao Shi"
+72edc24c67c34b5f2c98086a689bf0f3591e393d,An Introduction to Image Synthesis with Generative Adversarial Nets,"An Introduction to Image Synthesis with
+Generative Adversarial Nets
+He Huang, Phillip S. Yu and Changhu Wang"
+72591a75469321074b072daff80477d8911c3af3,Group Component Analysis for Multiblock Data: Common and Individual Feature Extraction,"Group Component Analysis for Multi-block Data:
+Common and Individual Feature Extraction
+Guoxu Zhou, Andrzej Cichocki Fellow, IEEE, Yu Zhang, and Danilo Mandic Fellow, IEEE"
+72a1ecfcd5f0b022fef49cab72bb476e41dea40e,Bag-of-features representations using spatial visual vocabularies for object classification,"BAG-OF-FEATURES REPRESENTATIONS USING SPATIAL VISUAL VOCABULARIES FOR
+OBJECT CLASSIFICATION
+Rene Grzeszick, Leonard Rothacker, Gernot A. Fink
+TU Dortmund
+Email: {rene.grzeszick, leonard.rothacker,
+Department of Computer Science"
+729a9d35bc291cc7117b924219bef89a864ce62c,Recognizing Material Properties from Images,"Recognizing Material Properties from Images
+Gabriel Schwartz and Ko Nishino, Senior Member, IEEE"
+7249b263d0a84d2d9d03f2f7b378778d129f9af9,Research Statement Research Focus,"RESEARCH STATEMENT
+Ryan Farrell
+In recent years, the topic of object detection/recognition has rapidly gained in popularity and is now
+perhaps the most actively researched topic in computer vision. Object detection algorithms are becoming
+prevalent in consumer devices such as digital cameras (real-time face detection) and automobiles (pedestrian
+detection systems for collision avoidance are already available and will be a standard feature on new cars
+within a few years). Object recognition technology is quickly becoming widespread in smartphone apps;
+examples include Google Goggles, Amazon Flow and Leafsnap. I believe we are at a ‘tipping point’ towards
+the impending ubiquity of computer vision, specifically object recognition, in our everyday lives.
+RESEARCH FOCUS
+My research in object recognition focuses specifically on Fine-grained Visual Categorization (sometimes
+bbreviated FGVC). For many years, computer vision has focused on classifying an object in several basic-
+level categories such as person, car, frog, or piano. At the opposing end of the categorization spectrum
+(see Figure ) is biometric identification - recognizing individuals within a population (e.g. face recognition or
+recognizing individual whales by unique fluke patterns). Between these two extremes lie what are called entry-
+nd subordinate-level categories. Entry-level categories include penguin, owl, etc.; people generally use these
+more specific labels instead of simply saying “bird” (the basic-level category). Subordinate-level categories
+re highly specific. Continuing with the example of birds, categorizing at the subordinate-level would require
+differentiating two quite similar species (such as the Red-breasted and White-breasted Nuthatches). Fine-
+grained recognition addresses this situation where categories are distinguised by very subtle differences."
+721d9c387ed382988fce6fa864446fed5fb23173,Assessing Facial Expressions in Virtual Reality Environments,
+72c0c8deb9ea6f59fde4f5043bff67366b86bd66,Age progression in Human Faces : A Survey,"Age progression in Human Faces : A Survey
+Narayanan Ramanathan, Rama Chellappa and Soma Biswas"
+727d03100d4a8e12620acd7b1d1972bbee54f0e6,von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification,"von Mises-Fisher Mixture Model-based Deep
+learning: Application to Face Verification
+Md. Abul Hasnat, Julien Bohn´e, Jonathan Milgram, St´ephane Gentric and Liming Chen"
+728a8c4ed6b5565a250bd1e0587293a6a97f515b,Arguing Machines: Human Supervision of Black Box AI Systems That Make Life-Critical Decisions,"Arguing Machines: Human Supervision of Black Box
+AI Systems That Make Life-Critical Decisions
+Lex Fridman*
+Li Ding
+Massachusetts Institute of Technology (MIT)
+Benedikt Jenik
+Bryan Reimer
+Figure 1: “Arguing machines” framework that adds a secondary system to a primary “black box” AI system that makes life-
+ritical decisions and uses disagreement between the two as a signal to seek human supervision. We demonstrate that this can
+e a powerful way to reduce overall system error."
+72a6044a0108e0f8f1e68cd70ada46c81a416324,Improved Training of Generative Adversarial Networks Using Representative Features,"Improved Training of Generative Adversarial Networks
+using Representative Features
+Duhyeon Bang 1 Hyunjung Shim 1"
+72ef87fb1a49f0e386f123a6b4f5566f51a3a47d,Minimizing Latency for Secure Coded Computing Using Secret Sharing via Staircase Codes,"Minimizing Latency for Secure Coded Computing
+Using Secret Sharing via Staircase Codes
+Rawad Bitar, Parimal Parag, and Salim El Rouayheb"
+7276a3ffa0941524083ac0fa9f0129746bca65d7,Multi-scale Deep Learning Architectures for Person Re-identification,"Multi-scale Deep Learning Architectures for Person Re-identification
+Xuelin Qian1 Yanwei Fu2,5,* Yu-Gang Jiang1,3 Tao Xiang4 Xiangyang Xue1,2
+Shanghai Key Lab of Intelligent Info. Processing, School of Computer Science, Fudan University;
+School of Data Science, Fudan University; 3Tencent AI Lab;
+Queen Mary University of London; 5University of Technology Sydney;"
+72f4aaf7e2e3f215cd8762ce283988220f182a5b,Active illumination and appearance model for face alignment,"Turk J Elec Eng & Comp Sci, Vol.18, No.4, 2010, c(cid:2) T ¨UB˙ITAK
+doi:10.3906/elk-0906-48
+Active illumination and appearance model for face
+lignment
+Fatih KAHRAMAN1, Muhittin G ¨OKMEN 2, Sune DARKNER3, Rasmus LARSEN3
+Institute of Informatics, ˙Istanbul Technical University, ˙Istanbul, 34469, TURKEY
+Department of Computer Engineering, ˙Istanbul Technical University, ˙Istanbul, 34469, TURKEY
+DTU Informatics, Technical University of Denmark, DK-2800 Kgs. Lyngby, DENMARK
+e-mail:
+e-mail:
+e-mail: {sda,"
+72944b4266523effe97708bff89e1d57d6aebf50,"A Multi-Sensory, Automated and Accelerated Sensory Integration Program","A Multi-Sensory, Automated and Accelerated
+Sensory Integration Program
+The Research
+Below are several published research reports that document the efficacy of a
+singular program such as auditory therapy or visual therapy alone as well as the
+use of multi-sensory programs using one or more sensory programs together.
+This is only a sample of the volumes of research that has been done.
+Multisensory integration of cross-modal stimulus combinations yielded responses
+that were significantly greater than those evoked by the best component
+stimulus. J Neurophysiol 97: 3193–3205, 2007. doi:10.1152/jn.00018.2007.
+Multisensory Versus Unisensory Integration: Contrasting Modes in the Superior
+Colliculus, Juan Carlos Alvarado, J. William Vaughan, Terrence R. Stanford, and
+Barry E. Stein
+Department of Neurobiology and Anatomy, Wake Forest University School of
+Medicine, Winston-Salem, North Carolina
+When sound and touch were activated simultaneously, the activation of the
+uditory cortex was strongest. Auditory information in conjunction with tactile
+input assists with making tactile decisions. Tactile and auditory stimulation
+simultaneously and individually may positively impact neuroplastic changes in
+individuals with neurological deficits or impairments. Used singularly, sound"
+72d067a6e1fd447ef512262248ad5f73823a3842,Probabilistic Models for 3D Urban Scene Understanding from Movable Platforms,"Probabilistic Models for
+D Urban Scene Understanding
+from Movable Platforms
+Dissertation
+Dipl.-Inform. Andreas Geiger"
+72f4c415b5f3ecf63380b6985c95c5af2ba72632,Activity Recognition on a Large Scale in Short Videos - Moments in Time Dataset,"ACTIVITY RECOGNITION ON A LARGE SCALE IN
+SHORT VIDEOS - MOMENTS IN TIME DATASET
+Ankit Parag Shah* ∗
+Harini Kesavamoorthy*
+Poorva Rane*
+Pramati Kalwad*
+Alexander Hauptmann
+Florian Metze"
+72a55554b816b66a865a1ec1b4a5b17b5d3ba784,Real-Time Face Identification via CNN and Boosted Hashing Forest,"Real-Time Face Identification
+via CNN
+nd Boosted Hashing Forest
+Yury Vizilter, Vladimir Gorbatsevich, Andrey Vorotnikov and Nikita Kostromov
+State Research Institute of Aviation Systems (GosNIIAS), Moscow, Russia
+IEEE Computer Society Workshop on Biometrics
+In conjunction with CVPR 2016, June 26, 2016"
+72c248c8d3bd76e2a31963aad7286b8d06ab7f8e,Looking outside of the Box: Object Detection and Localization with Multi-scale Patterns,"Looking outside of the Box:
+Object Detection and Localization with
+Multi-scale Patterns
+Eshed Ohn-Bar, Student Member, IEEE, and Mohan Manubhai Trivedi, Fellow, IEEE"
+72a79f351d4ae03ff940ff920898e41ce960f58e,Author's Personal Copy Backtracking: Retrospective Multi-target Tracking,"(This is a sample cover image for this issue. The actual cover is not yet available at this time.)
+This article appeared in a journal published by Elsevier. The attached
+opy is furnished to the author for internal non-commercial research
+nd education use, including for instruction at the authors institution
+nd sharing with colleagues.
+Other uses, including reproduction and distribution, or selling or
+licensing copies, or posting to personal, institutional or third party
+websites are prohibited.
+In most cases authors are permitted to post their version of the
+rticle (e.g. in Word or Tex form) to their personal website or
+institutional repository. Authors requiring further information
+regarding Elsevier’s archiving and manuscript policies are
+encouraged to visit:
+http://www.elsevier.com/copyright"
+72bf9c5787d7ff56a1697a3389f11d14654b4fcf,Robust Face Recognition Using Symmetric Shape-from-Shading,"RobustFaceRecognitionUsing
+SymmetricShape-from-Shading
+W.Zhao
+RamaChellappa
+CenterforAutomationResearchand
+ElectricalandComputerEngineeringDepartment
+UniversityofMaryland
+CollegePark,MD
+ThesupportoftheO(cid:14)ceofNavalResearchunderGrantN
+727c8c696c6acc04e57b6c3541613702c22c6f0f,Optimal discrete wavelet transform (DWT) features for face recognition,"010 Asia Pacific Conference on Circuits and Systems (APCCAS 2010)
+6 - 9 December 2010, Kuala Lumpur, Malaysia
+Optimal Discrete Wavelet Transform (DWT)
+Features for Face Recognition
+Paul Nicholl
+School of Electronics, Electrical
+Engineering & Computer Science
+Queen’s Univ., Northern Ireland
+Email:
+Afandi Ahmad
+Abbes Amira
+JEC, Faculty of. Elec. and Electronic Eng.
+Univ. Tun Hussein Onn Malaysia
+NIBEC, Faculty of Comp. and Eng.
+Univ. of Ulster, Jordanstown Campus
+Johor, Malaysia
+Email:
+Northern Ireland
+Email:"
+725a45ad75caf0112d649253f8a69793b1f00e80,LIFEisGAME : An approach to the utilization of serious games for therapy for children with ASD,"LIFEisGAME: An approach to the utilization of serious
+games for therapy for children with ASD
+Tiago Fernandes1,5, Samanta Alves2, José Miranda3,5, Cristina Queirós2, Verónica
+Instituto de Telecomunicações, Lisboa, Portugal,
+Faculdade de Psicologia da Universidade do Porto, Porto, Portugal,
+Instituto Politécnico da Guarda, Porto, Portugal,
+Faculdade de Ciências da Universidade do Porto, Porto, Portugal,
+5 Faculdade de Engenharia da Universidade do Porto, Porto, Portugal,
+Orvalho1,4"
+72cebd7d046080899703ed3cd96e3019a9f60f13,Towards Transparent AI Systems: Interpreting Visual Question Answering Models,"Towards Transparent AI Systems:
+Interpreting Visual Question Answering Models
+Yash Goyal, Akrit Mohapatra, Devi Parikh, Dhruv Batra
+{ygoyal, akrit, parikh,
+Virginia Tech"
+724a493411b7c5a904445406d3037df4a22b6c89,Training of Convolutional Networks on Multiple Heterogeneous Datasets for Street Scene Semantic Segmentation,"Training of Convolutional Networks on Multiple Heterogeneous
+Datasets for Street Scene Semantic Segmentation
+Panagiotis Meletis and Gijs Dubbelman"
+4414a328466db1e8ab9651bf4e0f9f1fe1a163e4,Weighted voting of sparse representation classifiers for facial expression recognition,"© EURASIP, 2010 ISSN 2076-1465
+8th European Signal Processing Conference (EUSIPCO-2010)
+INTRODUCTION"
+44736c0c7cfced2c0f06c5ae8dd0111d9ea0dc20,On the Robustness of Speech Emotion Recognition for Human-Robot Interaction with Deep Neural Networks,"On the Robustness of Speech Emotion Recognition for Human-Robot
+Interaction with Deep Neural Networks
+Egor Lakomkin1, Mohammad Ali Zamani1, Cornelius Weber1, Sven Magg1 and Stefan Wermter1"
+44f4b1b90f8d5515f2486e07e4cb4b9589c27518,Deep Learning and Its Applications to Machine Health Monitoring: A Survey,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Deep Learning and Its Applications to Machine
+Health Monitoring: A Survey
+Rui Zhao, Ruqiang Yan, Zhenghua Chen, Kezhi Mao, Peng Wang, and Robert X. Gao"
+44b30a1048465cd56904cdcbec8e79dffab693bd,Semantic based Query Approach For Web Image Search Through reranking algorithm,"Scientific Journal of Impact Factor (SJIF): 3.134
+E-ISSN (O): 2348-4470
+P-ISSN (P): 2348-6406
+International Journal of Advance Engineering and Research
+Development
+Volume 2,Issue 12,December -2015
+Semantic based Query Approach For Web Image Search
+Through reranking algorithm
+Pushpak Waghmare1, Shubham Katkamwan2, Abhijeet Markand3, Abuj Pratiksha4, Prof. Navale Girish Jaysingh5
+-5Department Of Computer,All India shri Shivaji Memorial Society’s"
+44442a26062c20dab7db4a9862349b598efca119,Modelling errors in a biometric re-identification system,"Modeling Errors in a Biometric Re-Identification System
+B. DeCann and A. Ross
+We consider the problem of “re-identification” where a biometric system answers the question “Has this person been encountered before?” without actually
+deducing the person’s identity. Such a system is vital in biometric surveillance applications and applicable to biometric de-duplication. In such a system, identifiers
+re created dynamically as and when the system encounters an input probe. Consequently, multiple probes of the same identity may be mistakenly assigned different
+identifiers, while probes from different identities may be mistakenly assigned the same identifier. In this work, we describe a re-identification system and develop
+terminology as well as mathematical expressions for prediction of matching errors. Further, we demonstrate that the sequential order in which the probes are
+encountered by the system has a great impact on its matching performance. Experimental analysis based on unimodal and multimodal face and fingerprint scores
+onfirms the validity of the designed error prediction model, as well as demonstrates that traditional metrics for biometric recognition fail to accurately characterize
+the error dynamics of a re-identification system.
+Introduction: In a classical biometric system [1], the input probe (query) biometric data is compared against the reference samples (templates) residing
+in the reference database (gallery). Each sample in the reference database is assigned a label, which acts as an identifier (e.g., user-id, name, etc.) that
+relates the reference sample to a specific individual and therefore, the comparison process enables the system to either determine the individual associated
+with the input data (referred to as identification or 1:N matching) or verify whether the input biometric data corresponds to a specific person (referred
+to as verification or 1:1 matching). Labels are assigned to a reference sample during an enrollment phase, when the biometric data of an individual is
+cquired and stored in the reference database. The identifier may be further associated with additional biographic data (e.g., legal name, ID number) to
+link the identifier to an identity.1 Thus, the identification and verification problems address the question: “Who is this person?” or “Is this person who
+they claim to be?”, respectively.
+In this work, we examine a variant of the classical biometric identification system, wherein probe data is input into the system from sensors at
+multiple locations. The objective of the system is to deduce: “Has this person been encountered before?”. A biometric system performing such duties"
+4425df6cc10917644c44a7f4177a5d7cc1c8b7bc,Object Localization based on Structural SVM using Privileged Information,"Object Localization based on Structural SVM
+using Privileged Information
+Jan Feyereisl, Suha Kwak∗, Jeany Son, Bohyung Han
+Dept. of Computer Science and Engineering, POSTECH, Pohang, Korea"
+4439746eeb7c7328beba3f3ef47dc67fbb52bcb3,YASAMAN HEYDARZADEH at al: AN EFFICIENT FACE DETECTION METHOD USING ADABOOST,"YASAMAN HEYDARZADEH at al: AN EFFICIENT FACE DETECTION METHOD USING ADABOOST . . .
+An Efficient Face Detection Method Using Adaboost and Facial Parts
+Yasaman Heydarzadeh, Abolfazl Toroghi Haghighat
+Computer, IT and Electronic department
+Azad University of Qazvin
+Tehran, Iran
+qiau.ac.ir ,"
+446a99fdedd5bb32d4970842b3ce0fc4f5e5fa03,A Pose-Adaptive Constrained Local Model for Accurate Head Pose Tracking,"A Pose-Adaptive Constrained Local Model For
+Accurate Head Pose Tracking
+Lucas Zamuner
+Eikeo
+1 rue Leon Jouhaux,
+F-75010, Paris, France
+Kevin Bailly
+Sorbonne Universit´es
+UPMC Univ Paris 06
+CNRS UMR 7222, ISIR
+F-75005, Paris, France
+Erwan Bigorgne
+Eikeo
+1 rue Leon Jouhaux,
+F-75010, Paris, France"
+44b1399e8569a29eed0d22d88767b1891dbcf987,Learning Multi-modal Latent Attributes,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Learning Multi-modal Latent Attributes
+Yanwei Fu, Timothy M. Hospedales, Tao Xiang and Shaogang Gong"
+448efcae3b97aa7c01b15c6bc913d4fbb275f644,Style Finder: Fine-Grained Clothing Style Recognition and Retrieval,"Style Finder: Fine-Grained Clothing Style Recognition and Retrieval
+Wei Di2, Catherine Wah1, Anurag Bhardwaj2, Robinson Piramuthu2, and Neel Sundaresan2
+Department of Computer Science and Engineering, University of California, San Diego
+eBay Research Labs, 2145 Hamilton Ave. San Jose, CA"
+4443ee5eaa56e41acddb62cacbc2f6d8c84ccd59,Multiple Objects Fusion Tracker Using a Matching Network for Adaptively Represented Instance Pairs,"Article
+Multiple Objects Fusion Tracker Using a Matching
+Network for Adaptively Represented Instance Pairs
+Sang-Il Oh and Hang-Bong Kang *
+Department of Media Engineering, Catholic University of Korea, 43-1, Yeoggok 2-dong, Wonmmi-gu,
+Bucheon-si, Gyeonggi-do 14662, Korea;
+* Correspondence: Tel.: +82-2-2164-4598
+Academic Editor: Simon X. Yang
+Received: 27 February 2017; Accepted: 14 April 2017; Published: 18 April 2017"
+446dc1413e1cfaee0030dc74a3cee49a47386355,Recent Advances in Zero-shot Recognition,"Recent Advances in Zero-shot Recognition
+Yanwei Fu, Tao Xiang, Yu-Gang Jiang, Xiangyang Xue, Leonid Sigal, and Shaogang Gong"
+44a3ec27f92c344a15deb8e5dc3a5b3797505c06,A Taxonomy of Part and Attribute Discovery Techniques,"A Taxonomy of Part and Attribute Discovery
+Techniques
+Subhransu Maji"
+44880df54e6caa3e7263db7a4d5cb77838f4698f,Learning Optimal Parameters for Multi-target Tracking with Contextual Interactions,"Learning Optimal Parameters for Multi-target Tracking with Contextual
+Interactions
+Shaofei Wang · Charless C. Fowlkes"
+44bb6ccb3526bb38364550263bc608116910da32,Model-Driven Simulations for Computer Vision,"017 IEEE Winter Conference on Applications of Computer Vision
+Model-driven Simulations for Computer Vision
+VSR Veeravasarapu1, Constantin Rothkopf2, Ramesh Visvanathan1
+Center for Cognition and Computation, Dept. of Computer Science, Goethe University, Frankfurt
+Center for Cognitive Science & Dept. of Psychology, Technical University Darmstadt.
+(a) Lambertian
+(Direct-lighting based rendering)
+(b) Ray tracing
+(appearance-driven rendering)
+(c) Monte-Carlo rendering
+(physics-driven rendering)
+(d) Semantic labels
+(e) Day light
+(f) Night
+Figure 1: Rendering fidelity and Virtual scene diversity. This work aims to quantify the impact of photorealism and physics
+fidelity on transfer learning from virtual reality. (a)-(c): Images of same scene state rendered with different rendering engines.
+(e)-(g): Same scene under different lighting. (d) and (h) semantic labels. Color coding scheme for labels is same as [5].
+(g) Rain
+(h) Semantic labels"
+44993de87bbbce71f14d7917944d055700217696,A late fusion approach to combine multiple pedestrian detectors,"A Late Fusion Approach to Combine Multiple
+Pedestrian Detectors
+Artur Jord˜ao, Jessica Sena de Souza, William Robson Schwartz
+Smart Surveillance Interest Group, Computer Science Department
+Universidade Federal de Minas Gerais, Minas Gerais, Brazil"
+44241248f16c172a1c2fb90e48fd728ba26220fc,Expression-invariant Non-rigid 3D Face Recognition: A Robust Approach to Expression-aware Morphing,"Expression-invariant Non-rigid 3D Face Recognition: A Robust Approach to
+Expression-aware Morphing
+F. R. Al-Osaimi
+M. Bennamoun
+A. Mian"
+44dd150b9020b2253107b4a4af3644f0a51718a3,An Analysis of the Sensitivity of Active Shape Models to Initialization When Applied to Automatic Facial Landmarking,"An Analysis of the Sensitivity of Active Shape
+Models to Initialization when Applied to Automatic
+Facial Landmarking
+Keshav Seshadri, Student Member, IEEE and Marios Savvides, Member, IEEE"
+447d8893a4bdc29fa1214e53499ffe67b28a6db5,Electronic Transport in Quantum Confined Systems,"THÈSEPour obtenir le titre deDOCTEUR DE L’UNIVERSITÉSpécialitéSCIENCES DES MATÉRIAUXParMaxime BERTHEElectronic transport in quantum confined systemsSoutenue le 11 décembre 2007 devant la commission d’examen composée de:B. DJAFARI-ROUHANIS. ROUSSETD. RODITCHEVF. CHARRAD. STIÉVENARDH. SHIGEKAWAB. GRANDIDIERPrésidentRapporteurRapporteurExaminateurDirecteur de thèseCo-directeur de thèseCo-directeur de thèsel’Université des Sciences et Technologies de LilleEcole Doctorale Sciences de la Matière, du Rayonnement et de l’EnvironnementPrésentée à"
+44f65e3304bdde4be04823fd7ca770c1c05c2cef,On the use of phase of the Fourier transform for face recognition under variations in illumination,"SIViP
+DOI 10.1007/s11760-009-0125-4
+ORIGINAL PAPER
+On the use of phase of the Fourier transform for face recognition
+under variations in illumination
+Anil Kumar Sao · B. Yegnanarayana
+Received: 17 November 2008 / Revised: 20 February 2009 / Accepted: 7 July 2009
+© Springer-Verlag London Limited 2009"
+44703dea094eb9558965db9439a07b9a74fd36b5,"Multiculturalism, Colorblindness, and Prejudice: Examining How Diversity Ideologies Impact Intergroup Attitudes","University of Arkansas, Fayetteville
+Theses and Dissertations
+8-2018
+Multiculturalism, Colorblindness, and Prejudice:
+Examining How Diversity Ideologies Impact
+Intergroup Attitudes
+David Sparkman
+University of Arkansas, Fayetteville
+Follow this and additional works at: https://scholarworks.uark.edu/etd
+Part of the Social Psychology Commons
+Recommended Citation
+Sparkman, David, ""Multiculturalism, Colorblindness, and Prejudice: Examining How Diversity Ideologies Impact Intergroup
+Attitudes"" (2018). Theses and Dissertations. 2923.
+https://scholarworks.uark.edu/etd/2923
+This Dissertation is brought to you for free and open access by It has been accepted for inclusion in Theses and Dissertations by
+n authorized administrator of For more information, please contact"
+4461a1b70e461ec298d7066ba103deda48d4ba22,Classification via Minimum Incremental Coding Length,"Vol. 2, No. 2, pp. 367–395
+(cid:2) 2009 Society for Industrial and Applied Mathematics
+Classification via Minimum Incremental Coding Length
+John Wright
+, Yi Ma
+, Yangyu Tao
+, Zhouchen Lin
+, and Heung-Yeung Shum"
+442cc39db208a66acf3acc22589b13981bb303fd,Design of Non-Linear Discriminative Dictionaries for Image Classification,"CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+Design of Non-Linear Discriminative
+Dictionaries for Image Classi(cid:12)cation
+Anonymous ACCV 2012 submission
+Paper ID 662"
+447a5e1caf847952d2bb526ab2fb75898466d1bc,Learning Non-linear Transform with Discrim- Inative and Minimum Information Loss Priors,"Under review as a conference paper at ICLR 2018
+LEARNING NON-LINEAR TRANSFORM WITH DISCRIM-
+INATIVE AND MINIMUM INFORMATION LOSS PRIORS
+Anonymous authors
+Paper under double-blind review"
+4452c36dc4c5e9f11d041489c8ff2e7006d33c80,"A Computational Analysis of Recent Multi-Object Tracking Methods Based on Particle Filter, HMM and Appearance Information of Objects","International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 3, Issue 02, February 2013)
+A Computational Analysis of Recent Multi-Object Tracking
+Methods Based on Particle Filter, HMM and Appearance
+Information of Objects
+Raksha Shrivastava1, Professor Rajesh Nema 2
+,2Department of Electronics and Communication, NRI Institute of Information Science and Technology, Bhopal (M.P)"
+2a7bca56e2539c8cf1ae4e9da521879b7951872d,Exploiting Unrelated Tasks in Multi-Task Learning,"Exploiting Unrelated Tasks in Multi-Task Learning
+Anonymous Author 1
+Unknown Institution 1
+Anonymous Author 2
+Unknown Institution 2
+Anonymous Author 3
+Unknown Institution 3"
+2af2aa21538783e46911fb857a23dbb88ed90c2b,A Study on Deep Learning Based Sauvegrain Method for Measurement of Puberty Bone Age,"A Study on Deep Learning Based
+Sauvegrain Method for Measurement
+of Puberty Bone Age
+Keum Gang Cha∗
+Seung Bin Baik∗
+Plani Inc.
+Plani Inc.
+September 20, 2018"
+2aa08ab3d6c227e3b071dc470a2f36dc5d4a2403,Ensembling Visual Explanations for VQA,"To Appear In Proceedings of the NIPS 2017 workshop on Visually-Grounded
+Interaction and Language (ViGIL), December 2017."
+2a2b99fc9583419931681acfd83ac953a3df3270,Estimating the quality of face localization for face verification,"ESTIMATING THE QUALITY OF FACE LOCALIZATION FOR FACE VERIFICATION
+Yann Rodriguez
+Fabien Cardinaux
+Samy Bengio
+Johnny Mari´ethoz
+IDIAP
+CP 592, rue du Simplon 4
+920 Martigny, Switzerland"
+2a93ce4284c7f8605e1d9bc0a8b86036073ebf61,"Tracking, Learning and Detection of Multiple Objects in Video Sequences","Master Thesis
+Czech
+Technical
+University
+in Prague
+Faculty of Electrical Engineering
+Department of Cybernetics
+Tracking, Learning and Detection of
+Multiple Objects in Video Sequences
+Filip Naiser
+Supervisor: prof. Ing. Jiří Matas, Ph.D.
+January 2017"
+2a218c17944d72bfdc7f078f0337cab67536e501,Detection bank: an object detection based video representation for multimedia event recognition,"Detection Bank: An Object Detection Based Video
+Representation for Multimedia Event Recognition
+Tim Althoff, Hyun Oh Song, Trevor Darrell
+UC Berkeley EECS/ICSI
+Multimedia Event Detection
+Birthday Party vs Wedding Ceremony
+● ObjectBank omits the following steps that are
+standard in a detection pipeline:
+● Thresholding of score maps
+● Non-maximum suppression
+● Pooling across all scales
+● We compute different detection count statistics to
+apture e.g. max number of detections, sum of
+detection scores, probablity of detection based on
+the detection images from a large number of
+windowed object detectors.
+Detection Count Statistics
+Look for: Balloon, Candle, Birthday Cake vs.
+Bride, Groom, Wedding Gown, Wedding Cake
+Illustration"
+2a152dae1ba70d0cc605b0f7418392ed1a294a4a,Head Pose Detection Using Fast Robust PCA for Side Active Appearance Models Under Occlusion,"Head Pose Detection Using Fast Robust PCA
+for Side Active Appearance Models Under Occlusion
+Anıl Yüce1, Matteo Sorci2, and Jean-Philippe Thiran1
+Signal Processing Laboratory (LTS5)
+École Polytechnique Fédérale de Lausanne (EPFL), Lausanne, Switzerland
+nViso Sàrl, Lausanne, Switzerland"
+2a12c72b0328a23b0d7ea63db1f93abf3054beec,Extended Feature Descriptor and Vehicle Motion Model with Tracking-by-Detection for Pedestrian Active Safety,"IEICE TRANS. ??, VOL.Exx–??, NO.xx XXXX 200x
+PAPER
+Extended Feature Descriptor and Vehicle Motion Model with
+Tracking-by-detection for Pedestrian Active Safety
+Hirokatsu KATAOKAy;yya), Kimimasa TAMURAy, Nonmembers, Kenji IWATAyyy, Yutaka SATOHyyy, Members,
+Yasuhiro MATSUIyyyy, Nonmember, and Yoshimitsu AOKIy, Member
+SUMMARY
+The percentage of pedestrian deaths in traf‌f‌ic accidents is
+on the rise in Japan. In recent years, there have been calls for measures
+to be introduced to protect vulnerable road users such as pedestrians and
+yclists. In this study, a method to detect and track pedestrians using an
+in-vehicle camera is presented. We improve the technology of detecting
+pedestrians by using the highly accurate images obtained with a monocular
+amera. In the detection step, we employ ECoHOG as the feature descrip-
+tor; it accumulates the integrated gradient intensities. In the tracking step,
+we apply an effective motion model using optical flow and the proposed
+feature descriptor ECoHOG in a tracking-by-detection framework. These
+techniques were verified using images captured on real roads.
+key words: Pedestrian Active Safety, Tracking-by-detection, ECoHOG,
+Particle Filter, Vehicle Motion Model"
+2a067874fc1ec318b6d23f34bdb13ea4e95d5ca6,An Evaluation of Image-Based Verb Prediction Models against Human Eye-Tracking Data,"New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics
+Proceedings of NAACL-HLT 2018, pages 758–763"
+2ad2af8e3bdeb0302de07defc3fec9b387414a27,Don't Look Back: Post-hoc Category Detection via Sparse Reconstruction,"Don't Look Back: Post-hoc Category Detection via
+Sparse Reconstruction
+Hyun Oh Song
+Mario Fritz
+Tim Althoff
+Trevor Darrell
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-16
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-16.html
+January 24, 2012"
+2a86bcdfb1d817ddb76ba202319f8267a36c0f62,PCL: Proposal Cluster Learning for Weakly Supervised Object Detection,"JOURNAL OF LATEX CLASS FILES
+PCL: Proposal Cluster Learning for Weakly
+Supervised Object Detection
+Peng Tang, Xinggang Wang, Member, IEEE, Song Bai, Wei Shen, Xiang Bai, Senior Member, IEEE,
+Wenyu Liu, Senior Member, IEEE, and Alan Yuille, Fellow, IEEE"
+2a259fd1b4442a71cd127afac417a650ffc379d9,Human upper body posture recognition and upper limbs motion parameters estimation,"Human Upper Body Posture Recognition and Upper
+Limbs Motion Parameters Estimation
+Jun-Yang Huang1 Shih-Chung Hsu1and Chung-Lin Huang1,2
+. Department Of Electrical Engineering, National Tsing-Hua University, Hsin-Chu, Taiwan
+. Department of Applied Informatics and Multimedia, Asia Univeristy, Tai-Chung, Taiwan.
+Email:"
+2a0efb1c17fbe78470acf01e4601a75735a805cc,Illumination-Insensitive Face Recognition Using Symmetric Shape-from-Shading,"Illumination-InsensitiveFaceRecognitionUsing
+SymmetricShape-from-Shading
+WenYiZhao
+RamaChellappa
+CenterforAutomationResearch
+UniversityofMaryland,CollegePark,MD
+2a7e2cda27807d24b845f5b5080fb1296c302bfe,Personal Authentication Using Signature Recognition,"Personal Authentication Using Signature Recognition
+Diana Kalenova
+Department of Information Technology, Laboratory of Information Processing,
+Lappeenranta University of Technology"
+2a08147bf88041c6e0354e26762b4e4d65d5163f,Trimmed Event Recognition ( Moments in Time ) : Submission to ActivityNet Challenge 2018,"Trimmed Event Recognition (Moments in Time):
+Submission to ActivityNet Challenge 2018
+Dongyang Cai"
+2a3227f54286d8a36736663781f194167f2b6582,Nonlinear Dimensionality Reduction for Discriminative Analytics of Multiple Datasets,"Nonlinear Dimensionality Reduction for
+Discriminative Analytics of Multiple Datasets
+Jia Chen, Gang Wang, Member, IEEE, and Georgios B. Giannakis, Fellow, IEEE"
+2ade545f25f5ba66295aeab3a89583e7cf6101b3,A Dataset for Airborne Maritime Surveillance Environments,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2017.2775524, IEEE
+Transactions on Circuits and Systems for Video Technology
+A Dataset for Airborne Maritime Surveillance
+Environments
+Ricardo Ribeiro, Member, IEEE, Gonc¸alo Cruz, Jorge Matos, Student, IST,
+nd Alexandre Bernardino, Member, IEEE,"
+2aec012bb6dcaacd9d7a1e45bc5204fac7b63b3c,Robust Registration and Geometry Estimation from Unstructured Facial Scans,"Robust Registration and Geometry Estimation from Unstructured
+Facial Scans
+Maxim Bazik1 and Daniel Crispell2"
+2ac31bc7a4dd0256166208dcc8d5dfa99347117e,A Window-Based Classifier for Automatic Video-Based Reidentification,"A Window-Based Classifier for Automatic
+Video-Based Reidentification
+Dario Figueira, Matteo Taiana, Jacinto C. Nascimento, Member, IEEE, and Alexandre Bernardino, Member, IEEE"
+2ae139b247057c02cda352f6661f46f7feb38e45,Combining modality specific deep neural networks for emotion recognition in video,"Combining Modality Specific Deep Neural Networks for
+Emotion Recognition in Video
+Samira Ebrahimi Kahou1, Christopher Pal1, Xavier Bouthillier2, Pierre Froumenty1,
+Ça˘glar Gülçehre2,∗ , Roland Memisevic2, Pascal Vincent2, Aaron Courville2, & Yoshua Bengio2
+École Polytechique de Montréal, Université de Montréal, Montréal, Canada
+Laboratoire d’Informatique des Systèmes Adaptatifs, Université de Montréal, Montréal, Canada
+{samira.ebrahimi-kahou, christopher.pal,
+{bouthilx, gulcehrc, memisevr, vincentp, courvila,"
+2a86bc520586f611771c2052b50ac52239414dd2,CrowdHuman: A Benchmark for Detecting Human in a Crowd,"CrowdHuman: A Benchmark for Detecting Human in a Crowd
+Shuai Shao∗ Zijian Zhao∗ Boxun Li
+Tete Xiao Gang Yu Xiangyu Zhang
+Jian Sun
+{shaoshuai, zhaozijian, liboxun, xtt, yugang, zhangxiangyu,
+Megvii Inc. (Face++)"
+2a1deffc67ccb5f8ca5897ac3f31dac09af70f05,Robust Subspace Clustering via Tighter Rank Approximation,"Robust Subspace Clustering via Tighter Rank
+Approximation
+Zhao Kang
+Computer Science Dept.
+Southern Illinois University
+Carbondale, IL, USA
+Chong Peng
+Computer Science Dept.
+Southern Illinois University
+Carbondale, IL, USA
+Qiang Cheng
+Computer Science Dept.
+Southern Illinois University
+Carbondale, IL, USA"
+2a83a51c9596ed796da52bdac49ca30e4eb04345,Eclectic Genetic Algorithm for Holistic Face Recognition in L ∞ Space,"Eclectic Genetic Algorithm for Holistic Face
+Recognition in L∞ Space
+C. Villegas, J. Climent, C.R. Murillo, A. Otero, C.R. Villegas"
+2a87f95e36938ca823b33c72a633d8d902d5cb86,xytocin Improves “Mind-Reading” in Humans,"PRIORITY COMMUNICATION
+Oxytocin Improves “Mind-Reading” in Humans
+Gregor Domes, Markus Heinrichs, Andre Michel, Christoph Berger, and Sabine C. Herpertz
+Background: The ability to “read the mind” of other individuals, that is, to infer their mental state by interpreting subtle social cues, is
+indispensable in human social interaction. The neuropeptide oxytocin plays a central role in social approach behavior in nonhuman
+mammals.
+Methods: In a double-blind, placebo-controlled, within-subject design, 30 healthy male volunteers were tested for their ability to infer
+the affective mental state of others using the Reading the Mind in the Eyes Test (RMET) after intranasal administration of 24 IU oxytocin.
+Results: Oxytocin improved performance on the RMET compared with placebo. This effect was pronounced for difficult compared with
+easy items.
+Conclusions: Our data suggest that oxytocin improves the ability to infer the mental state of others from social cues of the eye region.
+Oxytocin might play a role in the pathogenesis of autism spectrum disorder, which is characterized by severe social impairment.
+Key Words: Emotion, oxytocin, peptide, social cognition, theory of
+T he ability to infer the internal state of another person to
+dapt one’s own behavior is a cornerstone of all human
+social interactions. Humans have to infer internal states
+from external cues such as facial expressions in order to make
+sense of or predict another person’s behavior, an ability that is
+referred to as “mind-reading” (Siegal and Varley 2002; Stone et al
+998). In particular, individuals with autism have distinct diffi-"
+2a6c7d5aa087233ff8a09bdaa34d5f76f3330a4f,A Survey of Efficient Regression of General-Activity Human Poses from Depth Images,"A Survey of Efficient Regression of General-Activity Hu-
+man Poses from Depth Images
+Wenye He
+This paper presents a comprehensive review on regression-based method for human pose es-
+timation. The problem of human pose estimation has been intensively studied and enabled
+many application from entertainment to training. Traditional methods often rely on color im-
+ge only which cannot completely ambiguity of joint’s 3D position, especially in the complex
+ontext. With the popularity of depth sensors, the precision of 3D estimation has significant
+improvement. In this paper, we give a detailed analysis of state-of-the-art on human pose
+estimation, including depth image based and RGB-D based approaches. The experimental
+results demonstrate their advantages and limitation for different scenarios.
+Introduction
+Human pose estimation from images has been studied for decades in computer vision. As recent
+development in cameras and sensors, depth images receive a wide spread of notice from researchers
+from body pose estimation 1 to 3D reconstruction 2. Girshick et al.1 present an approach to find the
+joints position in human body from depth images. They address the problem of general-activity
+pose estimation. Their regression-based approach sucessfully computes the joint positions even
+with occlusion. Their method can be view as a new combination of two existing works, implicit
+shape models3 and Hough forest4. The following sections cover related works, explanation on the
+method from testing to training, and result and comparison."
+2a2232f2972191a0606d588aa4f13c9f27d1972d,InstanceCut: From Edges to Instances with MultiCut,"InstanceCut: from Edges to Instances with MultiCut
+Alexander Kirillov1 Evgeny Levinkov2 Bjoern Andres2 Bogdan Savchynskyy1 Carsten Rother1
+TU Dresden, Dresden, Germany
+MPI for Informatics, Saarbr¨ucken, Germany"
+2a06341b40b3fd27483b2a8d8cbf86fddf45e423,Automatic generation of ground truth for the evaluation of obstacle detection and tracking techniques,"Automatic generation of ground truth for the evaluation of obstacle detection
+nd tracking techniques
+Hatem Hajri∗, Emmanuel Doucet∗†, Marc Revilloud∗, Lynda Halit∗, Benoit Lusetti∗,
+Mohamed-Cherif Rahal∗
+Automated Driving Research Team, Institut VEDECOM, Versailles, France
+InnoCoRe Team, Valeo, Bobigny, France"
+2acf319c5eac89cc9e0ed24633e4408dbd4a8a5b,The Effect of Distance Measures on the Recognition Rates of PCA and LDA Based Facial Recognition,"The Effect of Distance Measures on the Recognition Rates of PCA
+nd LDA Based Facial Recognition
+Philip Miller, Jamie Lyle
+Digitial Image Processing
+Clemson Universtiy
+{pemille,"
+2a40917ef436000b22bc7c6f35400440ef673d36,Learning clustered sub-spaces for sketch-based image retrieval,"Learning Clustered Sub-spaces for Sketch-based Image Retrieval
+Koustav Ghosal Ameya Prabhu
+Riddhiman Dasgupta
+koustav.ghosal∗
+meya.prabhu∗
+riddhiman.dasgupta∗
+Anoop M Namboodiri
+noop†
+Centre for Visual Information Technology, IIIT-Hyderabad, India"
+2a56a51490f6ccfaf6fcbdf546a5515bef5203a1,"Attention, please!: Comparing Features for Measuring Audience Attention Towards Pervasive Displays","Attention, please! Comparing Features for Measuring
+Audience Attention Towards Pervasive Displays
+Florian Alta, Andreas Bullingb, Lukas Meckea, Daniel Buscheka
+LMU Munich
+Munich, Germany"
+2aa362740ac9a2b304a74122da820e3829689842,"Past, Present, and Future of Simultaneous Localization and Mapping: Toward the Robust-Perception Age","Past, Present, and Future of Simultaneous
+Localization And Mapping: Towards the
+Robust-Perception Age
+Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif,
+Davide Scaramuzza, Jos´e Neira, Ian Reid, John J. Leonard"
+2ad0ee93d029e790ebb50574f403a09854b65b7e,Acquiring linear subspaces for face recognition under variable lighting,"Acquiring Linear Subspaces for Face
+Recognition under Variable Lighting
+Kuang-Chih Lee, Student Member, IEEE, Jeffrey Ho, Member, IEEE, and
+David Kriegman, Senior Member, IEEE"
+2a8aedea2031128868f1c6dd44329c5bb7afc419,A Convex Duality Framework for GANs,"A Convex Duality Framework for GANs
+Farzan Farnia∗
+David Tse∗"
+2acf7e58f0a526b957be2099c10aab693f795973,Bosphorus Database for 3D Face Analysis,"Bosphorus Database for 3D Face Analysis
+Arman Savran1, Neşe Alyüz2, Hamdi Dibeklioğlu2, Oya Çeliktutan1, Berk Gökberk3,
+Bülent Sankur1, and Lale Akarun2
+Boğaziçi University, Electrical and Electronics Engineering Department
+Boğaziçi University, Computer Engineering Department
+Philips Research, Eindhoven, The Netherlands"
+2ab9c36e19090ed9ac5295b3704708bdce80462d,Zero-Shot Learning via Category-Specific Visual-Semantic Mapping and Label Refinement,"Zero-Shot Learning via Category-Specific
+Visual-Semantic Mapping
+Li Niu, Jianfei Cai, and Ashok Veeraraghavan"
+2ac986ec18c3572ee4f922ba9a90ae374563491c,A New Approach of Human Segmentation from Photo Images,"International Journal of Scientific and Research Publications, Volume 5, Issue 1, January 2015
+ISSN 2250-3153
+A New Approach of Human Segmentation from Photo
+Images
+Ashwini Magar*, Prof.J.V.Shinde**
+* Computer Department, Late G .N. Sapkal College Of Engineering, Savitribai Phule Pune University
+** Computer Department, Late G .N .Sapkal College Of Engineering, Savitribai Phule Pune University"
+2a6327a8bdbd31e2c08863b96c4f09245db8cab7,Targets ' facial width-to-height ratio biases pain judgments ☆,"Journal of Experimental Social Psychology 74 (2018) 56–64
+Contents lists available at ScienceDirect
+Journal of Experimental Social Psychology
+journal homepage: www.elsevier.com/locate/jesp
+Targets' facial width-to-height ratio biases pain judgments☆
+Jason C. Deska⁎, Kurt Hugenberg
+Miami University, 501 East High Street, Oxford, OH 45056, United States
+A R T I C L E I N F O
+A B S T R A C T
+Keywords:
+Facial width-to-height ratio
+Pain judgments
+Pain perception
+The accurate perception of others' pain is important for both perceivers and targets. Yet, like other person
+perception judgments, pain judgments are prone to biases. Although past work has begun detailing character-
+istics of targets that can bias pain judgments (e.g., race, gender), the current work examines a novel source of
+ias inherent to all targets: structural characteristics of the human face. Specifically, we present four studies
+demonstrating that facial width-to-height ratio, a stable feature of all faces, biases pain judgments. Compared to
+those with low facial width-to-height ratio, individuals with high facial width-to-height ratio are perceived as
+experiencing less pain in otherwise identical situations (Studies 1, 2, & 3), and as needing less pain medication to"
+2ff9618ea521df3c916abc88e7c85220d9f0ff06,Facial Tic Detection Using Computer Vision,"Facial Tic Detection Using Computer Vision
+Christopher D. Leveille
+Advisor: Prof. Aaron Cass
+March 20, 2014"
+2f587ab6694fdcfe6bd2977120ebeb758e28d77f,Coupled Generative Adversarial Nets,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Coupled Generative Adversarial Nets
+Liu, M.-Y.; Tuzel, O.
+TR2016-070
+June 2016"
+2f0c30d6970da9ee9cf957350d9fa1025a1becb4,Deformable Convolutional Networks,"Deformable Convolutional Networks
+Jifeng Dai∗ Haozhi Qi∗,† Yuwen Xiong∗,† Yi Li∗,† Guodong Zhang∗,† Han Hu Yichen Wei
+Microsoft Research Asia"
+2fda461869f84a9298a0e93ef280f79b9fb76f94,OpenFace: An open source facial behavior analysis toolkit,"OpenFace: an open source facial behavior analysis toolkit
+Tadas Baltruˇsaitis
+Peter Robinson
+Louis-Philippe Morency"
+2f0d5cd2d25ea2f3add0139cf4b61f358435bab8,A New Effective System for Filtering Pornography Videos,"Tarek Abd El Hafeez / (IJCSE) International Journal on Computer Science and Engineering
+Vol. 02, No. 09, 2010, 2847-2852
+A New Effective System for Filtering
+Pornography Videos
+Tarek Abd El-Hafeez
+Department of Computer Science,
+Faculty of Science, Minia University
+El-Minia, Egypt"
+2ffcd35d9b8867a42be23978079f5f24be8d3e35,Satellite based Image Processing using Data mining,"ISSN XXXX XXXX © 2018 IJESC
+Research Article Volume 8 Issue No.6
+Satellite based Image Processing using Data mining
+E.Malleshwari1, S.Nirmal Kumar2, J.Dhinesh3
+Professor1, Assistant Professor2, PG Scholar3
+Department of Information Technology1, 2, Master of Computer Applications3
+Vel Tech High Tech Dr Rangarajan Dr Sakunthala Engineering College, Avadi, Chennai, India"
+2fa16dc0ee50550c1bf58c410912d48cddbc3554,Search Tracker: Human-Derived Object Tracking in the Wild Through Large-Scale Search and Retrieval,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2016.2555718, IEEE
+Transactions on Circuits and Systems for Video Technology
+Search Tracker: Human-derived object tracking
+in-the-wild through large-scale search and retrieval
+Archith John Bency, Student Member, IEEE S. Karthikeyan,, Carter De Leo, Santhoshkumar Sunderrajan,
+Member, IEEE and B. S. Manjunath, Fellow, IEEE"
+2f7e9b45255c9029d2ae97bbb004d6072e70fa79,cvpaper.challenge in 2015 - A review of CVPR2015 and DeepSurvey,"Noname manuscript No.
+(will be inserted by the editor)
+vpaper.challenge in 2015
+A review of CVPR2015 and DeepSurvey
+Hirokatsu Kataoka · Yudai Miyashita · Tomoaki Yamabe · Soma
+Shirakabe · Shin’ichi Sato · Hironori Hoshino · Ryo Kato · Kaori Abe ·
+Takaaki Imanari · Naomichi Kobayashi · Shinichiro Morita · Akio
+Nakamura
+Received: date / Accepted: date"
+2f04c7aaac3a884088be550d1be51b4a0b585a2e,"Robust, Real-Time 3D Tracking of Multiple Objects with Similar Appearances","Robust, Real-Time 3D Tracking of Multiple Objects with Similar Appearances
+Taiki Sekii
+Panasonic System Networks R&D Lab. Co., Ltd."
+2f489bd9bfb61a7d7165a2f05c03377a00072477,Structured Semi-supervised Forest for Facial Landmarks Localization with Face Mask Reasoning,"JIA, YANG: STRUCTURED SEMI-SUPERVISED FOREST
+Structured Semi-supervised Forest for
+Facial Landmarks Localization with Face
+Mask Reasoning
+Department of Computer Science
+The Univ. of Hong Kong, HK
+School of EECS
+Queen Mary Univ. of London, UK
+Xuhui Jia1
+Heng Yang2
+Angran Lin1
+Kwok-Ping Chan1
+Ioannis Patras2"
+2f33884d0612fcc3f7eed66e1a4acc229860d6b5,Survey on Spatio-Temporal View Invariant Human Pose Recovery,"Survey on Spatio-Temporal View
+Invariant Human Pose Recovery
+Xavier Perez-Sala, Email: a;c,
+Sergio Escalera, Email: b;c and
+Cecilio Angulo, Email: a
+CETpD-UPC Technical Research Center for Dependency Care and Autonomous
+Living, Universitat Polit`ecnica de Catalunya, Ne`apolis, Rambla de l’Exposici´o, 59-69,
+Dept. Mathematics, Universitat de Barcelona, Gran Via de les Corts Catalanes 585,
+08800 Vilanova i la Geltru, Spain
+Computer Vision Center, Campus UAB, Edifici 0, 08193, Bellaterra, Spain
+08007, Barcelona, Spain"
+2f7452476910a7dbf6231b6b27aed67d9ed455d3,Seam carving for content-aware image resizing,"Seam Carving for Content-Aware Image Resizing
+Shai Avidan
+Mitsubishi Electric Research Labs
+Ariel Shamir
+The Interdisciplinary Center & MERL
+Figure 1: A seam is a connected path of low energy pixels in an image. On the left is the original image with one horizontal and one vertical
+seam. In the middle the energy function used in this example is shown (the magnitude of the gradient), along with the vertical and horizontal
+path maps used to calculate the seams. By automatically carving out seams to reduce image size, and inserting seams to extend it, we achieve
+ontent-aware resizing. The example on the top right shows our result of extending in one dimension and reducing in the other, compared to
+standard scaling on the bottom right."
+2f29b13fcf7a92a3cc438014068f11f9e45d62be,"AMIGOS: A Dataset for Affect, Personality and Mood Research on Individuals and Groups","AMIGOS: A dataset for Mood, personality and
+ffect research on Individuals and GrOupS
+Juan Abdon Miranda-Correa, Student Member, IEEE, Mojtaba Khomami Abadi, Student Member, IEEE,
+Nicu Sebe, Senior Member, IEEE, and Ioannis Patras, Senior Member, IEEE"
+2fe0555f2b92a81992247519cb8fdc047069e2b0,A Semantic World Model for Urban Search and Rescue Based on Heterogeneous Sensors,"This is a preprint of a paper which appeared in the Proceedings of
+RoboCup 2010: Robot Soccer World Cup XIV
+A Semantic World Model for Urban Search and
+Rescue Based on Heterogeneous Sensors
+Johannes Meyer2, Paul Schnitzspan1, Stefan Kohlbrecher1, Karen Petersen1,
+Mykhaylo Andriluka1, Oliver Schwahn1, Uwe Klingauf2, Stefan Roth1,
+Bernt Schiele1,3, and Oskar von Stryk1
+Department of Computer Science, TU Darmstadt, Germany
+Department of Mechanical Engineering, TU Darmstadt, Germany
+MPI Informatics, Saarbr¨ucken, Germany"
+2f23f7d08c7b8670289cfedd1e571f44a3bace8b,Contextual Information and Covariance Descriptors for People Surveillance: An Application for Safety of Construction Workers,"Hindawi Publishing Corporation
+EURASIP Journal on Image and Video Processing
+Volume 2011, Article ID 684819, 16 pages
+doi:10.1155/2011/684819
+Research Article
+Contextual Information and Covariance Descriptors for People
+Surveillance: An Application for Safety of Construction Workers
+Giovanni Gualdi,1 Andrea Prati,2 and Rita Cucchiara1
+DII, University of Modena and Reggio Emilia, 41122 Modena, Italy
+DISMI, University of Modena and Reggio Emilia, 42122 Reggio Emilia, Italy
+Correspondence should be addressed to Andrea Prati,
+Received 30 April 2010; Revised 7 October 2010; Accepted 10 December 2010
+Academic Editor: Luigi Di Stefano
+Copyright © 2011 Giovanni Gualdi et al. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+ited.
+In computer science, contextual information can be used both to reduce computations and to increase accuracy. This paper
+discusses how it can be exploited for people surveillance in very cluttered environments in terms of perspective (i.e., weak scene
+alibration) and appearance of the objects of interest (i.e., relevance feedback on the training of a classifier). These techniques are
+pplied to a pedestrian detector that uses a LogitBoost classifier, appropriately modified to work with covariance descriptors which"
+2f59f28a1ca3130d413e8e8b59fb30d50ac020e2,Children Gender Recognition Under Unconstrained Conditions Based on Contextual Information,"Children Gender Recognition Under Unconstrained
+Conditions Based on Contextual Information
+Riccardo Satta, Javier Galbally and Laurent Beslay
+Joint Research Centre, European Commission, Ispra, Italy
+Email:"
+2f43bfedb8cffc9e44de9f95db80b26395a29cc8,Generalized Hadamard-Product Fusion Operators for Visual Question Answering,"Generalized Hadamard-Product Fusion Operators
+for Visual Question Answering
+Brendan Duke∗†, Graham W. Taylor∗†‡
+School of Engineering, University of Guelph
+Vector Institute for Artificial Intelligence
+Canadian Institute for Advanced Research"
+2f78e471d2ec66057b7b718fab8bfd8e5183d8f4,An Investigation of a New Social Networks Contact Suggestion Based on Face Recognition Algorithm,"SOFTWARE ENGINEERING
+VOLUME: 14 | NUMBER: 5 | 2016 | DECEMBER
+An Investigation of a New Social Networks
+Contact Suggestion Based on Face Recognition
+Algorithm
+Ivan ZELINKA1,2, Petr SALOUN 2, Jakub STONAWSKI 2, Adam ONDREJKA2
+Modeling Evolutionary Algorithms Simulation and Artificial Intelligence, Faculty of Electrical & Electronics
+Engineering, Ton Duc Thang University, 19 Nguyen Huu Tho Street, Ho Chi Minh City, Vietman
+Department of Computer Science, Faculty of Electrical Engineering and Computer Science,
+VSB–Technical University of Ostrava, 17. listopadu 15, 708 33 Ostrava, Czech Republic
+DOI: 10.15598/aeee.v14i5.1116"
+2f88d3189723669f957d83ad542ac5c2341c37a5,Attribute-correlated local regions for deep relative attributes learning,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/13/2018
+Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+Attribute-correlatedlocalregionsfordeeprelativeattributeslearningFenZhangXiangweiKongZeJiaFenZhang,XiangweiKong,ZeJia,“Attribute-correlatedlocalregionsfordeeprelativeattributeslearning,”J.Electron.Imaging27(4),043021(2018),doi:10.1117/1.JEI.27.4.043021."
+2fda164863a06a92d3a910b96eef927269aeb730,Names and faces in the news,"Names and Faces in the News
+Tamara L. Berg, Alexander C. Berg, Jaety Edwards, Michael Maire,
+Ryan White, Yee-Whye Teh, Erik Learned-Miller and D.A. Forsyth
+Computer Science Division
+U.C. Berkeley
+Berkeley, CA 94720"
+2f8ef26bfecaaa102a55b752860dbb92f1a11dc6,A Graph Based Approach to Speaker Retrieval in Talk Show Videos with Transcript-Based Supervision,"A Graph Based Approach to Speaker Retrieval in Talk
+Show Videos with Transcript-Based Supervision
+Yina Han 1, Guizhong Liu, Hichem Sahbi, Gérard Chollet"
+2fd9ecb40df6c7cd4f27c047223a1e45aae1bb95,Feature-based affine-invariant localization of faces,"Feature-based affine-invariant localization of
+faces
+M. Hamouz, J. Kittler, J.-K. Kamarainen, P. Paalanen, H. K¨alvi¨ainen, J. Matas"
+2fdb3576715829aa9bbaf74825236bbb71d06f1a,Where-and-When to Look: Deep Siamese Attention Networks for Video-based Person Re-identification,"Where-and-When to Look: Deep Siamese Attention
+Networks for Video-based Person Re-identification
+Lin Wu, Yang Wang, Junbin Gao, Xue Li"
+2f3f4e0c8a9c63e714a10a6711c67f5e84e4c7c1,IoT Based Embedded Smart Lock Control System,"ISSN XXXX XXXX © 2016 IJESC
+Research Article Volume 6 Issue No. 11
+IoT Based Embedded Smart Lock Control System
+Rohith R1, J. Nageswara Reddy2, K. Ravi Kiran3
+M.Tech, Embedded Systems, CM RCET, Hyderabad, India 1
+Assistant Professor, Depart ment of ECE, CM RCET, Hyderabad, India2
+Assistant Professor, Depart ment, of ECE, CM RCET, Hyderabad, India3
+INTRODUCTION
+Abstrac t:
+Smart ho me security and re mote monitoring have become vita l and indispensable in recent times, and with the advent of new con cepts
+like Internet of Things and development of advanced authentication and security technologies, the need for smarter security s ystems
+has only been growing. The design and development of an intelligent web -based door lock control system using face recognition
+technology, for authentication, re mote monitoring of visitors and re mote control of s mart door loc k has been reported in th is paper.
+This system uses Haar-like features for face detection and Local Binary Pattern Histogram (LBPH) fo r face recognition. The system
+lso includes a web-based remote monitoring, an authentication module, and a bare-bones embedded IoT server, which transmits the
+live pictures of the visitors via email a long with an SMS notification, and the owner can then remotely control the lock by responding
+to the email with predefined security codes to unlock the door. This system finds a wide application in sma rt homes where the
+physical presence of the owner at all times is not possible, and where a remote authentication and control is desired. The system has
+een imple mented and tested using the Raspberry Pi 2 board, Python along with OpenCV are used to program the various face
+recognition and control modules."
+2f000034f040f6a23c756671477f5f573514af8a,Learning Transferable Distance Functions for Human Action Recognition and Detection,"-)41/ 64)5.-4)*- ,156)+- .7+615
+.4 07) )+61 4-+/161 ),
+,-6-+61
+9AEC ;=C
+*-C 5KJDA=IJ 7ELAHIEJO +DE= %
+= 6DAIEI E F=HJE= BKBEAJ
+B JDA HAGKEHAAJI BH JDA B
+=IJAH B 5?EA?A
+E JDA 5?D
++FKJEC 5?EA?A
+? 9AEC ;=C
+51 .4)5-4 718-4516;
+5FHEC
+) HECDJI 0MALAH E MEJD JDA
++FOHECDJ )?J B JDEI MH =O >A MEJDKJ
+=KJDHE=JE JDA BH .=EH ,A=EC 6DAHABHA
+B JDEI MH BH JDA FKHFIAI B FHEL=JA
+HAIA=H?D ?HEJE?EI HALEAM AMI HAFHJEC EI EAO J
+>A E MEJD JDA =M F=HJE?K=HO EB =FFHFHE=JAO"
+2fdc469096f72533726964260c80b4c14ae62fab,A Kernel Maximum uncertainty Discriminant Analysis and its Application to Face Recognition,"A KERNEL MAXIMUM UNCERTAINTY DISCRIMINANT
+ANALYSIS AND ITS APPLICATION TO FACE RECOGNITION
+Department of Electrical Engineering, Centro Universitario da FEI, FEI, Sao Paulo, Brazil
+Carlos Eduardo Thomaz
+Gilson Antonio Giraldi
+Department of Computer Science, National Laboratory for Scientific Computing, LNCC, Rio de Janeiro, Brazil
+Keywords:"
+2fce767ad830e0203d62ce30bbe75213b959d19c,Histogram of Log-Gabor Magnitude Patterns for face recognition,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+School of Information and Communication Engineering,
+{yijun,
+Jun Yi†, Fei Su†‡
+. INTRODUCTION"
+2f17f6c460e02bd105dcbf14c9b73f34c5fb59bd,Robust Face Recognition Using the Deep C2D-CNN Model Based on Decision-Level Fusion,"Article
+Robust Face Recognition Using the Deep C2D-CNN
+Model Based on Decision-Level Fusion
+Jing Li 1,2,†, Tao Qiu 3,†, Chang Wen 3,*, Kai Xie 1,2 and Fang-Qing Wen 1,2
+School of Electronic and Information, Yangtze University, Jingzhou 434023, China;
+(J.L.); (K.X.); (F-Q.W.)
+National Demonstration Center for Experimental Electrical and Electronic Education, Yangtze University,
+Jingzhou 434023, China
+School of Computer Science, Yangtze University, Jingzhou 434023, China;
+* Correspondence: Tel.: +86-136-9731-5482
+These authors contributed equally to this work.
+Received: 20 May 2018; Accepted: 25 June 2018; Published: 28 June 2018"
+2f184c6e2c31d23ef083c881de36b9b9b6997ce9,Polichotomies on Imbalanced Domains by One-per-Class Compensated Reconstruction Rule,"Polichotomies on Imbalanced Domains
+y One-per-Class Compensated Reconstruction Rule
+Roberto D’Ambrosio and Paolo Soda
+Integrated Research Centre, Universit´a Campus Bio-Medico of Rome, Rome, Italy"
+2fa1629d75a03b950c56bf9b3430b2983abd7881,Learning geometrical transforms between multi camera views using Canonical Correlation Analysis,"CONRAD, MESTER: LEARNING GEOMETRICAL TRANSFORMS USING CCA
+Learning geometrical transforms between
+multi camera views using Canonical
+Correlation Analysis
+Christian Conrad
+Rudolf Mester
+Visual Sensorics and Information
+Processing Lab, Goethe University
+Frankfurt am Main, Germany
+Computer Vision Laboratory
+Electr. Eng. Dept. (ISY)
+Linköping University, Sweden"
+2f529605ed776d4fbeac2d73054247b495504ac7,Person Re-identification for Real-world Surveillance Systems,"Person Re-identification for Real-world
+Surveillance Systems
+Furqan M. Khan and Fran¸cois Br´emond
+INRIA Sophia Antipolis - M´editerran´ee
+004 Route des Lucioles, Sophia Antipolis
+{furqan.khan |"
+2f3125bf303bca19d9cdc9ffe1de2aacf7a23023,In-Bed Pose Estimation: Deep Learning with Shallow Dataset,"JOURNAL OF , VOL. , NO. , MONTH YEAR
+In-Bed Pose Estimation:
+Deep Learning with Shallow Dataset
+Shuangjun Liu, Yu Yin, and Sarah Ostadabbas"
+2f48f1cb1cfef964fa70d7868b87d81455e7be2e,A new image centrality descriptor for wrinkle frame detection in WCE videos,"MVA2013 IAPR International Conference on Machine Vision Applications, May 20-23, 2013, Kyoto, JAPAN
+A new image centrality descriptor for wrinkle frame detection in
+WCE videos.
+Santi Segu´ı1,2, Ekaterina Zaytseva1,2, Michal Drozdzal1,2, Carolina Malagelada3,
+Fernando Azpiroz3, Petia Radeva1,2 and Jordi Vitri`a1,2
+Computer Vision Center (CVC), Universitat Aut`onoma de Barcelona, Barcelona, Spain
+Dept. Matem`atica Aplicada i An`alisis, Universitat de Barcelona, Barcelona, Spain
+Digestive System Research Unit, Hospital Vall dHebron, Barcelona, Spain"
+2fc15f80080b4317cad60ad645300b49afddb19e,Low cognitive load strengthens distractor interference while high load attenuates when cognitive load and distractor possess similar visual characteristics.,"Atten Percept Psychophys
+DOI 10.3758/s13414-015-0866-9
+Low cognitive load strengthens distractor interference while high
+load attenuates when cognitive load and distractor possess similar
+visual characteristics
+Takehiro Minamoto & Zach Shipstead & Naoyuki Osaka &
+Randall W. Engle
+# The Psychonomic Society, Inc. 2015"
+2fc2250d843326f3eefab1941e5a6e54eef239b3,Appearance Based Facial Recognition System Using Dhmm with Linear Discriminant Analysis,"Daffodil International University
+Institutional Repository
+DIU Journal of Science and Technology
+Volume 10, Issue 1-2, July 2015
+016-06-18
+Appearance Based Facial Recognition
+System Using Dhmm with Linear
+Discriminant Analysis
+Islam, Md. Rabiul
+http://hdl.handle.net/20.500.11948/1487
+Downloaded from http://dspace.library.daffodilvarsity.edu.bd, Copyright Daffodil International University Library"
+2f13dd8c82f8efb25057de1517746373e05b04c4,Evaluation of state-of-the-art algorithms for remote face recognition,"EVALUATION OF STATE-OF-THE-ART ALGORITHMS FOR REMOTE FACE
+RECOGNITION
+Jie Ni and Rama Chellappa
+Department of Electrical and Computer Engineering and Center for Automation Research, University
+of Maryland, College Park, MD 20742, USA"
+2fa241edb56734539c3b3487eda159e0b3e0f31c,Kinematic Pose Rectification for Performance Analysis and Retrieval in Sports,"Kinematic Pose Rectification for Performance Analysis and Retrieval in Sports
+Dan Zecha, Moritz Einfalt, Christian Eggert and Rainer Lienhart
+Multimedia Computing and Computer Vision Lab
+University of Augsburg"
+2f77c0908716b0febfda19ff6a0e2970c23af440,A face recognition system dealing with expression variant faces,"A face recognition system dealing with expression variant faces
+Stefano Arca∗, Paola Campadelli, Raffaella Lanzarotti, Giuseppe Lipori
+Dipartimento di Scienze dell’Informazione
+Universit`a degli Studi di Milano
+Via Comelico, 39/41 20135 Milano, Italy"
+2f02328dc09396e37e159141c5e21bef3e6ff06e,Combining face detection and people tracking in video sequences,"Author manuscript, published in ""The 3rd International Conference on Imaging for Crime Detection and Prevention - ICDP09,
+Kingston Upon Thames (London) : Royaume-Uni (2009)"""
+2f3a67394deb32f265bcff9daf2c829d4be36336,Improving Visual Relationship Detection Using Semantic Modeling of Scene Descriptions,"Improving Visual Relationship Detection using
+Semantic Modeling of Scene Descriptions
+Stephan Baier1, Yunpu Ma1,2, and Volker Tresp1,2
+Ludwig Maximilian University, 80538 Munich, Germany
+Siemens AG, Corporate Technology, Munich, Germany"
+2fa1fc116731b2b5bb97f06d2ac494cb2b2fe475,A novel approach to personal photo album representation and management,"A novel approach to personal photo album representation
+nd management
+Edoardo Ardizzone, Marco La Cascia, and Filippo Vella
+Universit`a di Palermo - Dipartimento di Ingegneria Informatica
+Viale delle Scienze, 90128, Palermo, Italy"
+2f882ceaaf110046e63123b495212d7d4e99f33d,High Frequency Component Compensation based Super-Resolution Algorithm for Face Video Enhancement,"High Frequency Component Compensation based Super-resolution
+Algorithm for Face Video Enhancement
+Junwen Wu, Mohan Trivedi, Bhaskar Rao
+CVRR Lab, UC San Diego, La Jolla, CA 92093, USA"
+2f349ec19443523bc6c1e4b15fb677b1c188e253,Finding Time Series Motifs in Disk-Resident Data,"Finding Time Series Motifs in Disk-Resident Data
+Abdullah Mueen, Eamonn Keogh
+Nima Bigdely-Shamlo
+Department of Computer Science and Engineering
+University of California, Riverside, USA
+{mueen,"
+2f95340b01cfa48b867f336185e89acfedfa4d92,Face expression recognition with a 2-channel Convolutional Neural Network,"Face Expression Recognition with a 2-Channel
+Convolutional Neural Network
+Dennis Hamester, Pablo Barros, Stefan Wermter
+University of Hamburg — Department of Informatics
+Vogt-K¨olln-Straße 30, 22527 Hamburg, Germany
+http://www.informatik.uni-hamburg.de/WTM/"
+2fa3ad0329386bf9f55eb2c011e031ca71a11299,Weakly-supervised Semantic Parsing with Abstract Examples,
+2fa4f66a7c3846a189ea1f962592d7c20d9683b1,Object Detection with YOLO on Artwork Dataset,"Object Detection with YOLO on Artwork Dataset
+Yihui He∗
+Computer Science Department, Xi’an Jiaotong University"
+2faa09413162b0a7629db93fbb27eda5aeac54ca,Quantifying how lighting and focus affect face recognition performance,"NISTIR 7674
+Quantifying How Lighting and Focus
+Affect Face Recognition Performance
+Phillips, P. J.
+Beveridge, J. R.
+Draper, B.
+Bolme, D.
+Givens, G. H.
+Lui, Y. M."
+433bb1eaa3751519c2e5f17f47f8532322abbe6d,Face Recognition,
+434ad689f9f8bc034fa8489f80f851686b8b449e,Regularized Multi-Concept MIL for weakly-supervised facial behavior categorization,"A.RUIZ, X.BINEFA, J.VAN DE WEIJER: RMC-MIL FACIAL BEHAVIOR CATEGORIZATION 1
+Regularized Multi-Concept MIL for
+weakly-supervised facial behavior
+ategorization
+Adria Ruiz1
+Joost Van de Weijer2
+Xavier Binefa1
+Universitat Pompeu Fabra (DTIC)
+Barcelona, Spain
+Centre de Visió per Computador
+Barcelona, Spain"
+43bf6489abd63992b82f2008b4417a1638955f0c,Principal Angles Separate Subject Illumination Spaces in YDB and CMU-PIE,"Short Papers___________________________________________________________________________________________________
+Principal Angles Separate Subject
+Illumination Spaces in YDB and CMU-PIE
+J. Ross Beveridge, Member, IEEE,
+Bruce A. Draper, Member, IEEE,
+Jen-Mei Chang, Michael Kirby,
+Holger Kley, and
+Chris Peterson"
+43bb20ccfda7b111850743a80a5929792cb031f0,Discrimination of Computer Generated versus Natural Human Faces,"PhD Dissertation
+International Doctorate School in Information and
+Communication Technologies
+DISI - University of Trento
+Discrimination of Computer Generated
+versus Natural Human Faces
+Duc-Tien Dang-Nguyen
+Advisor:
+Prof. Giulia Boato
+Universit`a degli Studi di Trento
+Co-Advisor:
+Prof. Francesco G. B. De Natale
+Universit`a degli Studi di Trento
+February 2014"
+439ac8edfa1e7cbc65474cab544a5b8c4c65d5db,Face authentication with undercontrolled pose and illumination,"SIViP (2011) 5:401–413
+DOI 10.1007/s11760-011-0244-6
+ORIGINAL PAPER
+Face authentication with undercontrolled pose and illumination
+Maria De Marsico · Michele Nappi · Daniel Riccio
+Received: 15 September 2010 / Revised: 14 December 2010 / Accepted: 17 February 2011 / Published online: 7 August 2011
+© Springer-Verlag London Limited 2011"
+432be99dde7d93001044048501c72c70e4ea2927,People and Mobile Robot Classification Through Spatio-Temporal Analysis of Optical Flow,"June 3, 2015
+3:29 WSPC/INSTRUCTION FILE
+People and mobile robot classification through spatio-temporal analysis
+of optical flow
+Plinio Moreno and Dario Figueira and Alexandre Bernardino and Jos´e Santos-Victor
+Institute for Systems and Robotics (ISR/IST)
+LARSyS, Instituto Superior T´ecnico
+Universidade de Lisboa
+{plinio, dfigueira, alex,
+Lisboa, Portugal
+The goal of this work is to distinguish between humans and robots in a mixed human-
+robot environment. We analyze the spatio-temporal patterns of optical flow-based fea-
+tures along several frames. We consider the Histogram of Optical Flow (HOF) and the
+Motion Boundary Histogram (MBH) features, which have shown good results on people
+detection. The spatio-temporal patterns are composed by groups of feature components
+that have similar values on previous frames. The groups of features are fed into the
+FuzzyBoost algorithm, which at each round selects the spatio-temporal pattern (i.e.
+feature set) having the lowest classification error. The search for patterns is guided by
+grouping feature dimensions, considering three algorithms: (a) similarity of weights from
+dimensionality reduction matrices, (b) Boost Feature Subset Selection (BFSS) and (c)"
+43f6953804964037ff91a4f45d5b5d2f8edfe4d5,Multi-feature fusion in advanced robotics applications,"Multi-Feature Fusion in Advanced Robotics Applications
+Zahid Riaz, Christoph Mayer, Michael Beetz,
+Bernd Radig
+Institut für Informatik
+Technische Universität München
+D-85748 Garching, Germany"
+430482d92007a3eec7009a2603aa5c1f2e63f661,Synaesthesia: mechanisms and broader traits,"Synaesthesia: mechanisms and broader traits.
+Agnieszka Barbara Janik
+Department of Psychology
+Goldsmiths University of London
+PhD in Psychology
+I, Agnieszka Barbara Janik, confirm that the work presented in this thesis is my own.
+Where information has been derived from other sources, I confirm that this has been
+indicated in the thesis."
+43a2c871450ba4d8888e8692aa98cb10e861ea71,Learning Generative ConvNet with Continuous Latent Factors by Alternating Back-Propagation,"Alternating Back-Propagation for Generator Network
+Tian Han †, Yang Lu †, Song-Chun Zhu, Ying Nian Wu
+Department of Statistics, University of California, Los Angeles, USA"
+439ec47725ae4a3660e509d32828599a495559bf,Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation and Evaluation,"Facial Expressions Tracking and Recognition: Database Protocols for Systems Validation
+nd Evaluation"
+43de246e9cc197623e27ab41a69530a8d121c77e,Developmental disruption of amygdala transcriptome and socioemotional behavior in rats exposed to valproic acid prenatally,"Barrett et al. Molecular Autism (2017) 8:42
+DOI 10.1186/s13229-017-0160-x
+R ES EAR CH
+Developmental disruption of amygdala
+transcriptome and socioemotional behavior
+in rats exposed to valproic acid prenatally
+Catherine E. Barrett 1,2*, Thomas M. Hennessey1,2, Katelyn M. Gordon1,2, Steve J. Ryan1,2, Morgan L. McNair1,2,
+Kerry J. Ressler3 and Donald G. Rainnie1,2
+Open Access"
+43c76cf17767a43a345cd1a8d7c08d18578b53ec,Boosting Color Feature Selection for Color Face Recognition,"Accepted Manuscript for Publication in IEEE Transaction on Image Processing
+Boosting Color Feature Selection for Color Face Recognition
+Jae Young Choi, Student Member, IEEE, Yong Man Ro, Senior Member, IEEE, and
+Konstantinos N. Plataniotis, Senior Member, IEEE"
+43fbe350681185ec9a18991dbcb19d694ce4f245,The Perspective Face Shape Ambiguity,"The Perspective Face Shape Ambiguity
+William A. P. Smith"
+432326edbc598774315a0def91d1fc224d732922,Classification of Diseased Arecanut based on Texture Features,"International Journal of Computer Applications (0975 – 8887)
+Recent Advances in Information Technology, 2014
+Classification of Diseased Arecanut based on Texture
+Suresha M
+Department of Computer
+Science
+Kuvempu University
+Karnataka, India
+Features
+Ajit Danti
+Department of MCA
+JNN College of Engineering
+Karnataka, India
+S. K Narasimhamurthy
+Department of Mathematics
+Kuvempu University
+Karnataka, India"
+434bf475addfb580707208618f99c8be0c55cf95,DeXpression: Deep Convolutional Neural Network for Expression Recognition,"UNDER CONSIDERATION FOR PUBLICATION IN PATTERN RECOGNITION LETTERS
+DeXpression: Deep Convolutional Neural
+Network for Expression Recognition
+Peter Burkert∗‡, Felix Trier∗‡, Muhammad Zeshan Afzal†‡,
+Andreas Dengel†‡ and Marcus Liwicki‡
+German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany
+University of Kaiserslautern, Gottlieb-Daimler-Str., Kaiserslautern 67663, Germany"
+43836d69f00275ba2f3d135f0ca9cf88d1209a87,Effective hyperparameter optimization using Nelder-Mead method in deep learning,"Ozaki et al. IPSJ Transactions on Computer Vision and
+Applications (2017) 9:20
+DOI 10.1186/s41074-017-0030-7
+IPSJ Transactions on Computer
+Vision and Applications
+RESEARCH PAPER
+Open Access
+Effective hyperparameter optimization
+using Nelder-Mead method in deep learning
+Yoshihiko Ozaki1,2, Masaki Yano1,2 and Masaki Onishi1,2*"
+43e11904ca961006be79f650025b5d8fbac9913f,Unsupervised Deep Video Hashing with Balanced Rotation,"Unsupervised Deep Video Hashing with Balanced Rotation
+IJCAI Anonymous Submission 2367"
+4362368dae29cc66a47114d5ffeaf0534bf0159c,"Performance Analysis of FDA Based Face Recognition Using Correlation, ANN and SVM","UACEE International Journal of Artificial Intelligence and Neural Networks ISSN:- 2250-3749 (online)
+Performance Analysis of FDA Based Face
+Recognition Using Correlation, ANN and SVM
+Mahesh Goyani
+Akash Dhorajiya
+Ronak Paun
+Department of Computer Engineering
+Department of Computer Engineering
+Department of Computer Engineering
+GCET, Sardar Patel University
+GCET, Sardar Patel University
+GCET, Sardar Patel University
+Anand, INDIA
+Anand, INDIA
+Anand, INDIA
+e- mail :
+e- mail :
+e- mail :"
+4350bb360797a4ade4faf616ed2ac8e27315968e,Edge Suppression by Gradient Field Transformation Using Cross-Projection Tensors,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Edge Suppression by Gradient Field
+Transformation using Cross-Projection
+Tensors
+Amit Agrawal, Ramesh Raskar, Rama Chellappa
+TR2006-058
+June 2006"
+43c1bf9bd7b18c9603324c328f0f2696278c5327,Tracking Multiple Players using a Single Camera,"Noname manuscript No.
+(will be inserted by the editor)
+Tracking Multiple Players using a Single Camera
+Horesh BenShitrit · Mirko Raca · Fran¸cois
+Fleuret · Pascal Fua
+Received: date / Accepted: date"
+439da29cf857151f386e6af488b2d60c098c4fd8,Person Authentication Using Color Face Recognition,"Kiran Davakhar et al. Int. Journal of Engineering Research and Applications www.ijera.com
+Vol. 3, Issue 5, Sep-Oct 2013, pp.178-182
+RESEARCH ARTICLE OPEN ACCESS
+Person Authentication Using Color Face Recognition
+Kiran Davakhar1, S. B. Mule2, Achala Deshmukh3
+(Department of E&TC, Sinhgad COE, Vadgaon, Pune, Pune University, India)
+(Department of E&TC, Sinhgad COE, Vadgaon, Pune, Pune University, India)
+(Department of E&TC, Sinhgad COE, Vadgaon, Pune, Pune University, India)"
+43476cbf2a109f8381b398e7a1ddd794b29a9a16,A Practical Transfer Learning Algorithm for Face Verification,"A Practical Transfer Learning Algorithm for Face Verification
+Xudong Cao
+David Wipf
+Fang Wen
+Genquan Duan
+Jian Sun"
+4353d0dcaf450743e9eddd2aeedee4d01a1be78b,Learning Discriminative LBP-Histogram Bins for Facial Expression Recognition,"Learning Discriminative LBP-Histogram Bins
+for Facial Expression Recognition
+Caifeng Shan and Tommaso Gritti
+Philips Research, High Tech Campus 36, Eindhoven 5656 AE, The Netherlands
+{caifeng.shan,"
+4335d53e763b2caf20f06928cd420ae09e5041ad,Discrete-continuous optimization for multi-target tracking,"Discrete-Continuous Optimization for Multi-Target Tracking
+Anton Andriyenko1
+Konrad Schindler2
+Stefan Roth1
+Department of Computer Science, TU Darmstadt
+Photogrammetry and Remote Sensing Group, ETH Z¨urich"
+43d073d3fdc22f0d74793fdac47ff56b66c95990,Online Localization and Prediction of Actions and Interactions,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Online Localization and Prediction of
+Actions and Interactions
+Khurram Soomro, Member, IEEE, Haroon Idrees, Member, IEEE, and Mubarak Shah, Fellow, IEEE"
+43d4927f5113c5e376ab05d41e33063a6d06d727,Pedestrian Detection: Exploring Virtual Worlds,"Pedestrian Detection: Exploring Virtual Worlds
+Javier Mar´ın
+Computer Vision Center,
+Universitat Aut`onoma de Barcelona, Spain
+David Ger´onimo, David V´azquez, Antonio M. L´opez
+Computer Vision Center and Computer Science Department,
+Universitat Aut`onoma de Barcelona, Spain
+Introduction
+The objective of advanced driver assistance systems (ADAS) is to improve traffic safety by assisting the driver
+through warnings and by even automatically taking active countermeasures. Two examples of successfully com-
+mercialised ADAS are lane departure warnings and adaptive cruise control, which make use of either active
+(e.g., radar) or passive (e.g., cameras) sensors to keep the vehicle on the lane and maintain a safe distance from
+the preceding vehicle, respectively. One of the most complex safety systems are pedestrian protection systems
+(PPSs) (Bishop, 2005; Gandhi & Trivedi, 2007; Enzweiler & Gavrila, 2009; Ger´onimo et al., 2010), which are
+specialised in avoiding vehicle-to-pedestrian collisions. In fact, this kind of accidents results in approximately
+50000 injuries and 7000 killed pedestrians every year just in the European Union (UN-ECE, 2007). Similar
+statistics apply to the United States, while underdeveloped countries are increasing theirs year after year. In the
+ase of PPSs, the most promising approaches make use of images as main source of information, as can be seen
+in the large amount of proposals exploiting them (Ger´onimo et al., 2010). Hence, the core of a PPS is a forward
+facing camera that acquires images and processes them using Computer Vision techniques. In fact, the Computer"
+434627a03d4433b0df03058724524c3ac1c07478,Online Multi-Target Tracking With Unified Handling of Complex Scenarios,"IEEE TRANSANCTIONS ON IMAGE PROCESSING, VOL. XX, NO. XX, NOVEMBER 2014
+Online Multi-Target Tracking
+with Unified Handling of Complex Scenarios
+Huaizu Jiang, Jinjun Wang, Yihong Gong, Senior Member, IEEE
+Na Rong, Zhenhua Chai, and Nanning Zheng, Fellow, IEEE"
+431fc5903ab4853820eac6614073c5b7aec0ac31,Semantic-visual concept relatedness and co-occurrences for image retrieval,"978-1-4673-2533-2/12/$26.00 ©2012 IEEE
+ICIP 2012"
+434fe2cca3321c08ef30a0076864298cf608e0d5,Multiple Human Tracking in High-Density Crowds,"Multiple Human Tracking in High-Density Crowds
+Irshad Ali1, Matthew N. Dailey 2
+Computer Science and Information Management Program, Asian Institute of Technology
+(AIT), Pathumthani, Thailand"
+43cb50f669a0d492256d11c6cc4128ba0ce79a3e,Per-Pixel Feedback for improving Semantic Segmentation,"Indian Institute of Technology Roorkee
+Department of Mathematics
+Per-Pixel Feedback for improving Semantic
+Segmentation
+Aditya Ganeshan
+Submitted in part fulfilment of the requirements for the degree of
+Integrated Masters of Science in Applied Mathematics, May 2017"
+434a0aebf3522638d75614b0de1f0c2dcc1b19f1,Visual Analytics in Deep Learning: An Interrogative Survey for the Next Frontiers,"Visual Analytics in Deep Learning:
+An Interrogative Survey for the Next Frontiers
+Fred Hohman, Member, IEEE, Minsuk Kahng, Member, IEEE, Robert Pienta, Member, IEEE,
+nd Duen Horng Chau, Member, IEEE"
+43b8b5eeb4869372ef896ca2d1e6010552cdc4d4,Large-scale Supervised Hierarchical Feature Learning for Face Recognition,"Large-scale Supervised Hierarchical Feature Learning for Face Recognition
+Jianguo Li, Yurong Chen
+Intel Labs China"
+43ae4867d058453e9abce760ff0f9427789bab3a,Graph Embedded Nonparametric Mutual Information for Supervised Dimensionality Reduction,"Graph Embedded Nonparametric Mutual
+Information For Supervised
+Dimensionality Reduction
+Dimitrios Bouzas, Nikolaos Arvanitopoulos, Student Member, IEEE, and Anastasios Tefas, Member, IEEE"
+438b88fe40a6f9b5dcf08e64e27b2719940995e0,Building a classification cascade for visual identification from one example,"Building a Classi(cid:2)cation Cascade for Visual Identi(cid:2)cation from One Example
+Andras Ferencz
+Erik G. Learned-Miller
+Computer Science, U.C. Berkeley
+Computer Science, UMass Amherst
+Jitendra Malik
+Computer Science, U.C. Berkeley"
+43e3cd896d4dada4114a8961b98ae9f6d6ff9401,Image2speech: Automatically generating audio descriptions of images,"Image2speech: Automatically generating audio descriptions of images
+Mark Hasegawa-Johnson1, Alan Black2, Lucas Ondel3, Odette Scharenborg4, Francesco Ciannella2
+. University of Illinois, Urbana, IL USA 2. Carnegie-Mellon University, Pittsburgh, PA USA
+. Brno University of Technology, Brno, Czech Republic
+. Centre for Language Studies, Radboud University, Nijmegen, Netherlands"
+43fb9efa79178cb6f481387b7c6e9b0ca3761da8,Mixture of parts revisited: Expressive part interactions for Pose Estimation,"Mixture of Parts Revisited: Expressive Part Interactions for Pose Estimation
+Anoop R Katti
+IIT Madras
+Chennai, India
+Anurag Mittal
+IIT Madras
+Chennai, India"
+4332314ac4ab56153f68a9e55e92b3659e93a5b4,Learning Collective Crowd Behaviors with Dynamic Pedestrian-Agents,"Int J Comput Vis
+DOI 10.1007/s11263-014-0735-3
+Learning Collective Crowd Behaviors with Dynamic
+Pedestrian-Agents
+Bolei Zhou · Xiaoou Tang · Xiaogang Wang
+Received: 9 September 2013 / Accepted: 24 May 2014
+© Springer Science+Business Media New York 2014"
+43ed518e466ff13118385f4e5d039ae4d1c000fb,Classification of Occluded Objects Using Fast Recurrent Processing,"Classification of Occluded Objects using Fast Recurrent
+Processing
+Ozgur Yilmaza,∗
+Turgut Ozal University, Department of Computer Engineering, Ankara Turkey"
+43d7d0d0d0e2d6cf5355e60c4fe5b715f0a1101a,Playlist Generation using Facial Expression Analysis and Task Extraction,"Pobrane z czasopisma Annales AI- Informatica http://ai.annales.umcs.pl
+Data: 04/05/2018 16:53:32
+U M CS"
+88e3aefe454e72388bbbe7dfa0b74fcfc52032f0,Weighted Gradient Feature Extraction Based on Multiscale Sub-Blocks for 3D Facial Recognition in Bimodal Images,"Article
+Weighted Gradient Feature Extraction Based on
+Multiscale Sub-Blocks for 3D Facial Recognition in
+Bimodal Images
+Yingchun Guo *, Ruoyu Wei and Yi Liu *
+School of Computer Science and Engineering, Hebei University of Technology, Tianjin 300400, China;
+* Correspondence: (Y.G.); (Y.L.)
+Received: 6 January 2018; Accepted: 19 February 2018; Published: 28 February 2018"
+88c6d4b73bd36e7b5a72f3c61536c8c93f8d2320,Image patch modeling in a light field,"Image patch modeling in a light field
+Zeyu Li
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2014-81
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-81.html
+May 15, 2014"
+889bc64c7da8e2a85ae6af320ae10e05c4cd6ce7,Using Support Vector Machines to Enhance the Performance of Bayesian Face Recognition,"Using Support Vector Machines to Enhance the
+Performance of Bayesian Face Recognition
+Zhifeng Li, Member, IEEE, and Xiaoou Tang, Senior Member, IEEE"
+88dc2b2f6d033b290ed56b844c98c3ee6efde80b,Experimental manipulation of face-evoked activity in the fusiform gyrus of individuals with autism.,"!""#$%&’(#)*+%,&$%-.,/*.&-+-%012%34&*+%5/#6+’$#(17
+8/2%9:%;+<(+=0+’%9>?>
+FB0*#$""+’%F$1)"".*.G1%F’+$$
+H/I.’=&%J(-%K+G#$(+’+-%#/%L/G*&/-%&/-%M&*+$%K+G#$(+’+-%NB=0+’2%?>D9COP%K+G#$(+’+-%.II#)+2%Q.’(#=+’%R.B$+S%EDT
+P?%Q.’(#=+’%;(’++(S%J./-./%M?!%EURS%5V
+;.)#&*%N+B’.$)#+/)+
+FB0*#)&(#./%-+(&#*$S%#/)*B-#/G%#/$(’B)(#./$%I.’%&B("".’$%&/-%$B0$)’#<(#./%#/I.’=&(#./2
+""((<2WW,,,X#/I.’=&,.’*-X).=W$=<<W(#(*+Y)./(+/(Z(DP?DD??PE
+L[<+’#=+/(&*%=&/#<B*&(#./%.I%I&)+T+6.\+-%&)(#6#(1%#/%(""+%IB$#I.’=%G1’B$%.I
+#/-#6#-B&*$%,#(""%&B(#$=
+‘#’$(%<B0*#$""+-%./2%>P%Q&1%9>?>
+N+B’.$)#+/)+SS%‘#’$(%<B0*#$""+-%./2%>P%Q&1%9>?>%a#‘#’$(b
+5KJ2%""((<2WW-[X-.#X.’GW?>X?>d>W?DPD>C??>>E:dE?dO
+PLEASE SCROLL DOWN FOR ARTICLE
+Full terms and conditions of use: http://www.informaworld.com/terms-and-conditions-of-access.pdf
+This article may be used for research, teaching and private study purposes. Any substantial or
+systematic reproduction, re-distribution, re-selling, loan or sub-licensing, systematic supply or
+distribution in any form to anyone is expressly forbidden.
+The publisher does not give any warranty express or implied or make any representation that the contents
+will be complete or accurate or up to date. The accuracy of any instructions, formulae and drug doses"
+88bbedf7f6f0dcc830640c521acece28e67be356,Robust sparse coding for face recognition,"Robust Sparse Coding for
+Face Recognition
+Meng Yang, Lei Zhang, Jian Yang, David Zhang
+Hong Kong Polytechnic Univ.
+Presenter : 江振國"
+88a898592b4c1dfd707f04f09ca58ec769a257de,MobileFace: 3D Face Reconstruction with Efficient CNN Regression,"MobileFace: 3D Face Reconstruction
+with Ef‌f‌icient CNN Regression
+Nikolai Chinaev1, Alexander Chigorin1, and Ivan Laptev1,2
+VisionLabs, Amsterdam, The Netherlands
+{n.chinaev,
+Inria, WILLOW, Departement d’Informatique de l’Ecole Normale Superieure, PSL
+Research University, ENS/INRIA/CNRS UMR 8548, Paris, France"
+881066ec43bcf7476479a4146568414e419da804,From Traditional to Modern: Domain Adaptation for Action Classification in Short Social Video Clips,"From Traditional to Modern : Domain Adaptation for
+Action Classification in Short Social Video Clips
+Aditya Singh, Saurabh Saini, Rajvi Shah, and P J Narayanan
+Center for Visual Information Technology, IIIT Hyderabad, India"
+8813368c6c14552539137aba2b6f8c55f561b75f,Trunk-Branch Ensemble Convolutional Neural Networks for Video-Based Face Recognition,"Trunk-Branch Ensemble Convolutional Neural
+Networks for Video-based Face Recognition
+Changxing Ding, Student Member, IEEE, Dacheng Tao, Fellow, IEEE"
+886dfe069bd0f6bbb0a885e0bf2788007bfb737c,3-D Facial Expression Representation using B-spline Statistical Shape Model,"-D Facial Expression Representation using
+B-spline Statistical Shape Model
+Wei Quan, Bogdan J. Matuszewski, Lik-Kwan Shark, Djamel Ait-Boudaoud
+Applied Digital Signal and Image Processing Research Centre
+University of Central Lancashire
+Preston PR1 2HE, UK"
+883006c0f76cf348a5f8339bfcb649a3e46e2690,Weakly supervised pain localization using multiple instance learning,"Weakly Supervised Pain Localization using Multiple Instance Learning
+Karan Sikka, Abhinav Dhall and Marian Bartlett"
+88f5f9d92c4fa696457a824c3eec204da05ba6a4,XGAN: Unsupervised Image-to-Image Translation for many-to-many Mappings,"XGAN: Unsupervised Image-to-Image
+Translation for Many-to-Many Mappings
+Am´elie Royer1[0000−0002−8407−0705], Konstantinos Bousmalis2,6, Stephan
+Gouws2, Fred Bertsch3, Inbar Mosseri4, Forrester Cole4, and Kevin Murphy5
+IST Austria, 3400 Klosterneuburg, Austria
+Work done while at Google Brain London, UK
+Google Brain, London, UK
+{konstantinos,
+Google Brain, Mountain View, USA
+Google Research, Cambridge, USA
+5 Google Research, Mountain View, USA
+6 Currently at Deepmind, London, UK"
+88850b73449973a34fefe491f8836293fc208580,XBeats-An Emotion Based Music Player,"www.ijaret.org Vol. 2, Issue I, Jan. 2014
+ISSN 2320-6802
+INTERNATIONAL JOURNAL FOR ADVANCE RESEARCH IN
+ENGINEERING AND TECHNOLOGY
+WINGS TO YOUR THOUGHTS…..
+XBeats-An Emotion Based Music Player
+Sayali Chavan1, Ekta Malkan2, Dipali Bhatt3, Prakash H. Paranjape4
+U.G. Student, Dept. of Computer Engineering,
+D.J. Sanghvi College of Engineering,
+Vile Parle (W), Mumbai-400056.
+U.G. Student, Dept. of Computer Engineering,
+D.J. Sanghvi College of Engineering,
+Vile Parle (W), Mumbai-400056.
+U.G. Student, Dept. of Computer Engineering,
+D.J. Sanghvi College of Engineering,
+Vile Parle (W), Mumbai-400056.
+Assistant Professor, Dept. of Computer Engineering,
+D.J. Sanghvi College of Engineering,
+Vile Parle (W), Mumbai-400056."
+88f2952535df5859c8f60026f08b71976f8e19ec,A neural network framework for face recognition by elastic bunch graph matching,"A neural network framework for face
+recognition by elastic bunch graph matching
+Francisco A. Pujol López, Higinio Mora Mora*, José A. Girona Selva"
+88c5baffa5522ea62ff5d5c41036b92e30d7e3c9,Who is who at different cameras. People re-identification using Depth Cameras,"Document downloaded from:
+This paper must be cited as:
+The final publication is available at
+Copyright
+Additional Information
+http://dx.doi.org/10.1049/iet-cvi.2011.0140http://hdl.handle.net/10251/56627Institution of Engineering and Technology (IET)Albiol Colomer, AJ.; Albiol Colomer, A.; Oliver Moll, J.; Mossi García, JM. (2012). Who iswho at different cameras: people re-identification using depth cameras. IET ComputerVision. 6(5):378-387. doi:10.1049/iet-cvi.2011.0140."
+887cd2271ca5a58501786d49afa53139f48c66f3,"Visual orienting in children with autism: Hyper‐responsiveness to human eyes presented after a brief alerting audio‐signal, but hyporesponsiveness to eyes presented without sound","SHORT REPORT
+Visual Orienting in Children With Autism: Hyper-Responsiveness
+to Human Eyes Presented After a Brief Alerting Audio-Signal,
+ut Hyporesponsiveness to Eyes Presented Without Sound
+Johan Lundin Kleberg, Emilia Thorup, and Terje Falck-Ytter
+Autism Spectrum Disorder (ASD) has been associated with reduced orienting to social stimuli such as eyes, but the
+results are inconsistent. It is not known whether atypicalities in phasic alerting could play a role in putative altered
+social orienting in ASD. Here, we show that in unisensory (visual) trials, children with ASD are slower to orient to
+eyes (among distractors) than controls matched for age, sex, and nonverbal IQ. However, in another condition where
+brief spatially nonpredictive sound was presented just before the visual targets, this group effect was reversed. Our
+results indicate that orienting to social versus nonsocial stimuli is differently modulated by phasic alerting mecha-
+nisms in young children with ASD. Autism Res 2017, 10: 246–250. VC 2016 The Authors Autism Research published
+y Wiley Periodicals, Inc. on behalf of International Society for Autism Research.
+Keywords: Autism; social orienting; eye tracking; phasic alerting; arousal; face perception
+According to social orienting theories of Autism Spec-
+trum Disorder (ASD), people with this condition orient
+less or slower to socially salient stimuli than people
+with typical development (TD; Dawson et al., 2004).
+Further, it is assumed that reduced orienting early in
+life may have cascading effects on both brain develop-"
+887b7676a4efde616d13f38fcbfe322a791d1413,Deep Temporal Appearance-Geometry Network for Facial Expression Recognition,"Deep Temporal Appearance-Geometry Network
+for Facial Expression Recognition
+Injae Lee‡ Chunghyun Ahn‡
+Junmo Kim†
+Heechul Jung† Sihaeng Lee† Sunjeong Park†
+Korea Advanced Institute of Science and Technology†
+Electronics and Telecommunications Research Institute‡
+{heechul, haeng, sunny0414, {ninja,"
+88909ec19d2c6750f836e8b9c15ee3e1236b37e7,Local Learning with Deep and Handcrafted Features for Facial Expression Recognition,"Local Learning with Deep and Handcrafted Features
+for Facial Expression Recognition
+Mariana-Iuliana Georgescu1,2
+Radu Tudor Ionescu1,3
+Marius Popescu1,3
+University of Bucharest, 14 Academiei, Bucharest, Romania
+Novustech Services, 12B Aleea Ilioara, Bucharest, Romania
+SecurifAI, 21D Mircea Vod˘a, Bucharest, Romania
+georgescu"
+887b7d34ebac80bbe3fb3792ed579dd82ff7e373,Query-driven iterated neighborhood graph search for scalable visual indexing,"Query-driven iterated neighborhood graph search for scalable
+visual indexing∗
+Jingdong Wang† Xian-Sheng Hua‡ Shipeng Li†
+Microsoft Corporation
+Microsoft Research Asia
+August 10, 2012"
+8878871ec2763f912102eeaff4b5a2febfc22fbe,Human Action Recognition in Unconstrained Videos by Explicit Motion Modeling,"Human Action Recognition in Unconstrained
+Videos by Explicit Motion Modeling
+Yu-Gang Jiang, Qi Dai, Wei Liu, Xiangyang Xue, and Chong-Wah Ngo"
+8855d6161d7e5b35f6c59e15b94db9fa5bbf2912,COGNITION IN PREGNANCY AND THE POSTPARTUM PERIOD COGNITIVE REORGANIZATION AND PROTECTIVE MECHANISMS IN PREGNANCY AND THE POSTPARTUM PERIOD By,COGNITION IN PREGNANCY AND THE POSTPARTUM PERIOD
+88132a786442ab8a5038d81164384c1c1f7231c8,Limited attentional bias for faces in toddlers with autism spectrum disorders.,"ORIGINAL ARTICLE
+Limited Attentional Bias for Faces in Toddlers
+With Autism Spectrum Disorders
+Katarzyna Chawarska, PhD; Fred Volkmar, MD; Ami Klin, PhD
+Context: Toddlers with autism spectrum disorders (ASD)
+exhibit poor face recognition and atypical scanning pat-
+terns in response to faces. It is not clear if face-processing
+deficits are also expressed on an attentional level. Typical
+individuals require more effort to shift their attention from
+faces compared with other objects. This increased disen-
+gagement cost is thought to reflect deeper processing of these
+socially relevant stimuli.
+Objective: To examine if attention disengagement from
+faces is atypical in the early stages of ASD.
+Design: Attention disengagement was tested in a varia-
+tion of the cued attention task in which participants were
+required to move their visual attention from face or non-
+face central fixation stimuli and make a reactive saccade
+to a peripheral target. The design involved diagnosis as
+between-group factor and central fixation stimuli type"
+88590857138505ee524f3adf6da9c57352d917f2,Random Subspace Two-Dimensional PCA for Face Recognition,"Random Subspace Two-Dimensional PCA for
+Face Recognition
+Nam Nguyen, Wanquan Liu and Svetha Venkatesh
+Department of Computing, Curtin University of Technology, WA 6845, Australia"
+8855755a72c148dfde84bb08ae65d58c260e70d4,Robust image classification: analysis and applications,"POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Prof. P. Vandergheynst, président du juryProf. P. Frossard, directeur de thèseProf. J. Bruna, rapporteurProf. N. Paragios, rapporteurDr F. Fleuret, rapporteurRobust image classification: analysis and applicationsTHÈSE NO 7258 (2016)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 16 DÉCEMBRE 2016 À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 4PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUE Suisse2016PARAlhussein FAWZI"
+88bee9733e96958444dc9e6bef191baba4fa6efa,Extending Face Identification to Open-Set Face Recognition,"Extending Face Identification to
+Open-Set Face Recognition
+Cassio E. dos Santos Jr., William Robson Schwartz
+Department of Computer Science
+Universidade Federal de Minas Gerais
+Belo Horizonte, Brazil"
+8818dafda0cf230731ac2f962d8591c89a9fac09,xGEMs: Generating Examplars to Explain Black-Box Models,"xGEMs: Generating Examplars to Explain Black-Box
+Models
+Shalmali Joshi
+UT Austin
+Oluwasanmi Koyejo
+Been Kim
+Google Brain
+Joydeep Ghosh
+UT Austin"
+88fd4d1d0f4014f2b2e343c83d8c7e46d198cc79,Joint action recognition and summarization by sub-modular inference,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+9f22e0749405dfc3e3211474b933aa7514722e4b,Theory of mind - not emotion recognition - mediates the relationship between executive functions and social functioning in patients with schizophrenia.,"© Medicinska naklada - Zagreb, Croatia
+Original paper
+THEORY OF MIND - NOT EMOTION RECOGNITION -
+MEDIATES THE RELATIONSHIP BETWEEN EXECUTIVE
+FUNCTIONS AND SOCIAL FUNCTIONING IN PATIENTS
+WITH SCHIZOPHRENIA
+Michal Hajdúk1,2, Dana Kraj(cid:254)ovi(cid:254)ová2, Miroslava Zimányiová2, Viera Ko(cid:284)ínková2,
+Anton Heretik1 & Ján Pe(cid:254)e(cid:278)ák2
+Department of Psychology, Faculty of Arts, Comenius University, Bratislava, Slovak Republic
+Clinic of Psychiatry, Faculty of Medicine, Comenius University, Bratislava, Slovak Republic
+received: 9.8.2017;
+revised: 15.3.2018;
+ccepted: 17.7.2018
+SUMMARY
+Background: Dysfunction of social-cognitive abilities is one of the hallmark features of schizophrenia and is associated with
+neurocognition and social functioning. The Green and Nuechterlein model proposed that social cognition mediates the relationship
+etween neurocognition and functional outcome. We tested this hypothesis in schizophrenia patients in the everyday clinical setting.
+Subjects and methods: Social cognition, executive function and social functioning were assessed in a group of 43 patients with
+schizophrenia or schizoaffective disorder using a range of measures.
+Results: Theory of mind was associated with executive functions and social functioning. Results of our mediation analysis"
+9f889c81bdb1d791e22c5f455baf32829b5b788b,The GRODE metrics: Exploring the performance of group detection approaches,"Exploring the Performance of Group Detection Approaches
+The GRODE Metrics:
+Francesco Setti
+ISTC - CNR
+via alla Cascata 56/C, I-38121 Trento"
+9fd5ecc538a9344814dc00b92beb45c54d5dff3e,NIC: A Robust Background Extraction Algorithm for Foreground Detection in Dynamic Scenes,"NIC: A Robust Background Extraction Algorithm
+for Foreground Detection in Dynamic Scenes
+Thien Huynh-The, Student Member, IEEE, Oresti Banos, Member, IEEE, Sungyoung Lee, Member, IEEE,
+Byeong Ho Kang, Eun-Soo Kim, and Thuong Le-Tien, Member, IEEE"
+9f1319162974cb4d6125e8c6c52878ebc48eb8a7,Loss factors for learning Boosting ensembles from imbalanced data,"Loss Factors for Learning Boosting Ensembles
+from Imbalanced Data
+Roghayeh Soleymani∗, Eric Granger∗, Giorgio Fumera†
+Laboratoire d’imagerie, de vision et d’intelligence artificielle, École de technologie supérieure,
+Université du Québec, Montreal, Canada,
+Dept. of Electrical and Electronic Engineering, University of Cagliari, Cagliari, Italy,
+Email:
+Email:"
+9fede7e3fac47a4206a643c4647834e5680f2a8f,Results from a Real-time Stereo-based Pedestrian Detection System on a Moving Vehicle,"Results from a Real-time Stereo-based Pedestrian Detection System on
+Moving Vehicle
+Max Bajracharya, Baback Moghaddam, Andrew Howard, Shane Brennan, Larry H. Matthies"
+9f91fd3e9621b88769ecc330f362a591876f948f,Bicycle Detection Based On Multi-feature and Multi-frame Fusion in low-resolution traffic videos,"Bicycle Detection Based On Multi-feature and
+Multi-frame Fusion in low-resolution traffic videos
+Yicheng Zhang, Student Member, IEEE, and Qiang Ling, Senior Member, IEEE
+Some other methods like using MSC-HOG method for
+detection [12] or detecting tires of bicycles in videos [13]
+lso can get good results, but they are either time consuming
+or high quality videos required. Some new methods, such the
+method based on HOG features with ROI in [14], try to use
+more advanced hardware device like GPU to finish the great
+mount of computation.
+In summary, there are three major defects in the available
+icycle detection methods based on image processing. First,
+they require fine features for detection, which are hard to
+extract, particularly for traffic videos with low-resolution.
+Second, the processing time under these methods is usually
+long and may not meet
+the requirement of the real-time
+detection. Last, they make the bicycle detection decision by the
+information in a single frame, which may lead to misjudgment,
+especially in the case of strong noise or light changing."
+9fc37eccb3d12329f208cb7d3a509024e182a100,Mel-cepstral feature extraction methods for image representation,Downloaded From: https://www.spiedigitallibrary.org/journals/Optical-Engineering on 9/28/2017 Terms of Use: https://spiedigitallibrary.spie.org/ss/TermsOfUse.aspx
+9fb1bd7d98a2fa79e1b9cb21b865ec7af0c1283f,Not All Distraction Is Bad: Working Memory Vulnerability to Implicit Socioemotional Distraction Correlates with Negative Symptoms and Functional Impairment in Psychosis,"Hindawi Publishing Corporation
+Schizophrenia Research and Treatment
+Volume 2014, Article ID 320948, 6 pages
+http://dx.doi.org/10.1155/2014/320948
+Clinical Study
+Not All Distraction Is Bad: Working Memory Vulnerability
+to Implicit Socioemotional Distraction Correlates with Negative
+Symptoms and Functional Impairment in Psychosis
+Quintino R. Mano,1,2,3 Gregory G. Brown,1,2,3 Heline Mirzakhanian,1,2,3
+Khalima Bolden,1,2,3 Kristen S. Cadenhead,1,2,3 and Gregory A. Light1,2,3
+San Diego Veterans Affairs Healthcare System, San Diego, CA 92161, USA
+VISN-22 Mental Illness, Research, Education and Clinical Center (MIRECC), VA San Diego Healthcare System,
+San Diego, CA 92161, USA
+Department of Psychiatry, University of California, San Diego, School of Medicine, San Diego, CA, USA
+Correspondence should be addressed to Gregory G. Brown;
+Received 31 July 2013; Revised 26 November 2013; Accepted 15 December 2013; Published 27 February 2014
+Academic Editor: Steven J. Siegel
+Copyright © 2014 Quintino R. Mano et al. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly
+ited."
+9f483933bcc872771707dcf0acb1382411ffee94,Which Facial Expressions Can Reveal Your Gender? A Study With 3D Faces,"IN SUBMISSION TO IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY
+Which Facial Expressions Can Reveal Your
+Gender? A Study With 3D Faces
+Baiqiang XIA"
+9fa1be81d31fba07a1bde0275b9d35c528f4d0b8,Identifying Persons by Pictorial and Contextual Cues,"Identifying Persons by Pictorial and
+Contextual Cues
+Nicholas Leonard Pi¨el
+Thesis submitted for the degree of Master of Science
+Supervisor:
+Prof. dr. Theo Gevers
+April 2009"
+9f7c1b794805be34bc2091e02c382c5461e0bcb4,On-board real-time tracking of pedestrians on a UAV,"On-board real-time tracking of pedestrians on a UAV
+Floris De Smedt, Dries Hulens, and Toon Goedem´e
+ESAT-PSI-VISICS, KU Leuven, Belgium"
+9f094341bea610a10346f072bf865cb550a1f1c1,Recognition and volume estimation of food intake using a mobile device,"Recognition and Volume Estimation of Food Intake using a Mobile Device
+Manika Puri Zhiwei Zhu Qian Yu Ajay Divakaran Harpreet Sawhney
+Sarnoff Corporation
+01 Washington Rd,
+Princeton, NJ, 08540
+{mpuri, zzhu, qyu, adivakaran,"
+9fbe2611b1e2a49199fdee96c2083da625ba57df,Leveraging Multi-Modal Sensing for Mobile Health: A Case Review in Chronic Pain,"J-STSP-PCSPHT-00370-2015.R1
+Leveraging Multi-Modal Sensing for Mobile
+Health: a Case Review in Chronic Pain
+Min S. H. Aung, Faisal Alquaddoomi, Andy Hsieh, Mashfiqui Rabbi, Longqi Yang, J.P. Pollak,
+Tanzeem Choudhury, and Deborah Estrin
+(cid:3)"
+9fb1d7cbf1baf5f347d159410d22912fcee1fdb1,Face Detection Using Ferns,"FACE DETECTION USING FERNS
+Venkatesh Bala Subburaman Sébastien Marcel
+Idiap-Com-01-2011
+DECEMBER 2011
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+6b7f27cff688d5305c65fbd90ae18f3c6190f762,Generative networks as inverse problems with Scattering transforms,"Published as a conference paper at ICLR 2018
+GENERATIVE NETWORKS AS INVERSE PROBLEMS
+WITH SCATTERING TRANSFORMS
+Tom´as Angles & St´ephane Mallat
+´Ecole normale sup´erieure, Coll`ege de France, PSL Research University
+75005 Paris, France"
+6bd6460ec06adc1bd69d9517d116fd1545c04ac7,Small sample scene categorization from perceptual relations,"In the Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR), 2012
+Small Sample Scene Categorization from Perceptual Relations
+Ilan Kadar and Ohad Ben-Shahar
+Dept. of Computer Science, Ben-Gurion University
+Beer-Sheva, Israel"
+6bcfcc4a0af2bf2729b5bc38f500cfaab2e653f0,Facial Expression Recognition in the Wild Using Improved Dense Trajectories and Fisher Vector Encoding,"Facial expression recognition in the wild using improved dense trajectories and
+Fisher vector encoding
+Sadaf Afshar1
+Albert Ali Salah2
+Computational Science and Engineering Program, Bo˘gazic¸i University, Istanbul, Turkey
+Department of Computer Engineering, Bo˘gazic¸i University, Istanbul, Turkey
+{sadaf.afshar,"
+6bee77418af305d632b21eb03872a0d268eeebac,Understanding the Intrinsic Memorability of Images,"Understanding the Intrinsic Memorability of Images
+Phillip Isola
+Devi Parikh
+TTI-Chicago
+Antonio Torralba
+Aude Oliva"
+6bbcec054017a6fd64af8bf325cb6e3e7244ba55,On the Benefits and the Limits of `p-norm Multiple Kernel Learning In Image Classification,"On the Benefits and the Limits of (cid:96)p-norm Multiple Kernel Learning In Image
+Classification
+Alexander Binder
+Technical University of Berlin
+Franklinstr. 28/29, 10587 Berlin, Germany
+Shinichi Nakajima
+NIKON Corporation
+Optical Research Laboratory, Tokyo, Japan
+Marius Kloft
+Technical University of Berlin
+Christina M¨uller
+Technical University of Berlin
+Wojciech Samek
+Technical University of Berlin
+Ulf Brefeld
+Yahoo! Research
+Barcelona, Spain
+Klaus-Robert M¨uller
+Technical University of Berlin
+Motoaki Kawanabe"
+6b4da897dce4d6636670a83b64612f16b7487637,Learning from Simulated and Unsupervised Images through Adversarial Training,"This paper has been submitted for publication on November 15, 2016.
+Learning from Simulated and Unsupervised Images through Adversarial
+Training
+Ashish Shrivastava, Tomas Pfister, Oncel Tuzel, Josh Susskind, Wenda Wang, Russ Webb
+Apple Inc"
+6b089627a4ea24bff193611e68390d1a4c3b3644,Cross-Pollination of Normalization Techniques From Speaker to Face Authentication Using Gaussian Mixture Models,"CROSS-POLLINATION OF NORMALISATION
+TECHNIQUES FROM SPEAKER TO FACE
+AUTHENTICATION USING GAUSSIAN
+MIXTURE MODELS
+Roy Wallace Mitchell McLaren Chris McCool
+Sébastien Marcel
+Idiap-RR-03-2012
+JANUARY 2012
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+6b5850c5a288fd26480ebcbbfc43172597e0d442,PHARMACOLOGICAL EFFECTS ON SOCIAL INTERACTION 1 Effects of Pharmacological Manipulations on Natural Social Interaction in Rhesus Macaques: A Pilot Investigation,"PHARMACOLOGICAL EFFECTS ON SOCIAL INTERACTION
+Effects of Pharmacological Manipulations on
+Natural Social Interaction in Rhesus Macaques: A Pilot Investigation
+Angelica Fuentes
+Spring, 2017
+Cognitive Science
+Advisor: Steve W. Chang"
+6be0ab66c31023762e26d309a4a9d0096f72a7f0,Enhance Visual Recognition under Adverse Conditions via Deep Networks,"Enhance Visual Recognition under Adverse
+Conditions via Deep Networks
+Ding Liu, Student Member, IEEE, Bowen Cheng, Zhangyang Wang, Member, IEEE,
+Haichao Zhang, Member, IEEE, and Thomas S. Huang, Life Fellow, IEEE"
+6b3c9c0e4d47bd960c0adc4d13ae524a5d9b94d1,Visual Multiple-Object Tracking for Unknown Clutter Rate,"THIS PAPER IS A PREPRINT OF A PAPER SUBMITTED TO IET COMPUTER VISION. IF ACCEPTED, THE COPY OF RECORD WILL BE AVAILABLE AT THE IET DIGITAL LIBRARY1
+Visual Multiple-Object Tracking for Unknown
+Clutter Rate
+Du Yong Kim"
+6bf58047438f54720e03252d50984d1a340a116a,Discriminative Autoencoders for Small Targets Detection,"Discriminative Autoencoders
+for Small Targets Detection.
+Sebastien Razakarivony
+SAGEM D.S. – SAFRAN Group
+CNRS UMR 6072 – University of Caen – ENSICAEN
+Email:
+Fr´ed´eric Jurie
+CNRS UMR 6072 – University of Caen – ENSICAEN
+Email:"
+6b0b10836197d7934f53080a39787b7d8d2b81f2,Detecting Granger-causal relationships in global spatio-temporal climate data via multitask learning,"Detecting Granger-causal relationships in global
+spatio-temporal climate data via multi-task learning
+Matthias Demuzere
+Christina Papagiannopoulou
+Diego G. Miralles
+Ghent University
+Ghent University
+Ghent University
+Niko E. C. Verhoest
+Ghent University
+Willem Waegeman
+Ghent University"
+6b78f2ece211c2d1eb6699e1e057b7beb3e0b4a7,GM-PHD-Based Multi-Target Visual Tracking Using Entropy Distribution and Game Theory,"GM-PHD-Based Multi-Target Visual Tracking
+Using Entropy Distribution and Game Theory
+Xiaolong Zhou, Youfu Li, Senior Member, IEEE, Bingwei He, and Tianxiang Bai"
+6b2db002cbc5312e4796de4d4b14573df2c01648,Learning Hierarchical Features from Deep Generative Models,"Learning Hierarchical Features from Deep Generative Models
+Shengjia Zhao 1 Jiaming Song 1 Stefano Ermon 1"
+6b18628cc8829c3bf851ea3ee3bcff8543391819,Face recognition based on subset selection via metric learning on manifold,"Hong Shao, Shuang Chen, Jie-yi Zhao, Wen-cheng Cui, Tian-shu Yu, 2015.
+Face recognition based on subset selection via metric learning on manifold.
+058. [doi:10.1631/FITEE.1500085]
+Face recognition based on subset
+selection via metric learning on manifold
+Key words: Face recognition, Sparse representation, Manifold structure,
+Metric learning, Subset selection
+Contact: Shuang Chen
+E-mail:
+ORCID: http://orcid.org/0000-0001-7441-4749
+Front Inform Technol & Electron Eng"
+6b02d73f097d745e58bb99a880e559b78c4594a1,Cross-Domain Face Verification: Matching ID Document and Self-Portrait Photographs,"Cross-Domain Face Verification:
+Matching ID Document and Self-Portrait Photographs
+Guilherme Folego 1,2 ∗ Marcus A. Angeloni 1,2
+Jos´e Augusto Stuchi 2,3 Alan Godoy 1,2 Anderson Rocha 2
+CPqD Foundation, Brazil
+University of Campinas (Unicamp), Brazil
+Phelcom Technologies, Brazil"
+6bf57ae6c63873253d1b95782f8c6b7bbc91b9ac,Semantic face segmentation from video streams in the wild,"UNIVERSITAT POLITÈCNICA DE CATALUNYA
+Universitat de Barcelona
+Universitat Rovira i Virgili
+MASTER THESIS
+Semantic face segmentation from video
+streams in the wild
+Author:
+Deividas SKIPARIS
+Academic Supervisor:
+Dr. Sergio ESCALERA
+Industry Supervisor:
+Dr. Pascal LANDRY
+A thesis submitted in fulfillment of the requirements
+for the degree of Master of Artificial Intelligence
+in the
+Facultat d’Informàtica de Barcelona (FIB)
+Facultat de Matemàtiques (UB)
+Escola Tècnica Superior d’Enginyeria (URV)
+June 16, 2017"
+6b6946ce943da5ba4bf6471609d3355cadec172e,Improvement of Facial Emotion Recognition Using Skin Color and Face Components,"International journal of Computer Science & Network Solutions April.2014-Volume 2.No4
+http://www.ijcsns.com
+ISSN 2345-3397
+Improvement of Facial Emotion Recognition
+Using Skin Color and Face Components
+Department of Computer Engineering, khouzestan Science and Research Branch, Islamic Azad
+kowsar azadmanesh, Reza javidan, S. Enayatolah Alavi
+Computer Engineering and IT Department Shiraz University of Technology, Shiraz, Iran,
+Department of computer Engineering, shahid chamran university, Ahvaz, Iran,
+University, Ahvaz, Iran,"
+6b5438161cfe55d1bd44829db81f396819e9e6b9,Wasserstein Dictionary Learning: Optimal Transport-based unsupervised non-linear dictionary learning,"Wasserstein Dictionary Learning:
+Optimal Transport-Based Unsupervised Nonlinear Dictionary Learning
+Morgan A. Schmitz∗ , Matthieu Heitz† , Nicolas Bonneel† , Fred Ngol`e‡ , David Coeurjolly† ,
+Marco Cuturi§ , Gabriel Peyr´e¶, and Jean-Luc Starck∗"
+6b6791c0a3f06c356035747f7e5f87d54bc5a657,A Neuro Fuzzy approach for Facial Expression Recognition using LBP Histograms,"International Journal of Computer Theory and Engineering, Vol. 2, No. 2 April, 2010
+793-8201
+A Neuro Fuzzy approach for Facial Expression
+Recognition using LBP Histograms
+V. Gomathi, Dr. K. Ramar, and A. Santhiyaku Jeevakumar"
+6b59716a193d3f91f88277e4c8a0f4cd0b6873c4,Detection of Deception in the Mafia Party Game,"Detection of Deception in the Mafia Party Game
+Sergey Demyanov
+James Bailey
+Kotagiri
+Ramamohanarao
+Christopher Leckie
+Department of Computing and Information Systems
+The University of Melbourne, Melbourne, VIC, Australia"
+6b55153f8d87bfd0dfb2f24eb2aa61d40e314cae,"Track, Then Decide: Category-Agnostic Vision-Based Multi-Object Tracking","Track, then Decide: Category-Agnostic Vision-based
+Multi-Object Tracking
+Aljoˇsa Oˇsep, Wolfgang Mehner, Paul Voigtlaender, and Bastian Leibe"
+6bca057c25b48fa7d1607e5701c46392ec906822,An ordered topological representation of 3D triangular mesh facial surface: Concept and applications,"Werghi et al. EURASIP Journal on Advances in Signal Processing 2012, 2012:144
+http://asp.eurasipjournals.com/content/2012/1/144
+RESEARCH
+Open Access
+An ordered topological representation of 3D
+triangular mesh facial surface: concept and
+pplications
+Naoufel Werghi1*, Mohamed Rahayem2 and Johan Kjellander2"
+6b6943a138938c31b285c1bb11213b87404feddf,Multiple Instance Learning-Based Birdsong Classification Using Unsupervised Recording Segmentation,"Multiple Instance Learning-Based Birdsong Classification
+Using Unsupervised Recording Segmentation
+J. F. Ruiz-Mu˜noz, Mauricio Orozco-Alzate, G. Castellanos-Dominguez
+Universidad Nacional de Colombia - Sede Manizales
+{jfruizmu, morozcoa,"
+6b8a5a2d018356b396301b27156fd69dd18b1d82,A Study on the Impact of Wavelet Decomposition on Face Recognition Methods,"International Journal of Computer Applications (0975 – 8887)
+Volume 87 – No.3, February 2014
+A Study on the Impact of Wavelet Decomposition on
+Face Recognition Methods
+M. M. Mohie El-Din1, Neveen. I. Ghali2, Ahmed. A. A. G1 and H. A. El Shenbary 1
+Department of Mathematics and Computer Science, Faculty of Science, Al-Azhar University, Cairo, Egypt
+Assoc. Prof Computer Science, Faculty of Science, Al-Azhar University, Cairo. Egypt"
+6b6493551017819a3d1f12bbf922a8a8c8cc2a03,Pose Normalization for Local Appearance-Based Face Recognition,"Pose Normalization for Local Appearance-Based
+Face Recognition
+Hua Gao, Hazım Kemal Ekenel, and Rainer Stiefelhagen
+Computer Science Department, Universit¨at Karlsruhe (TH)
+Am Fasanengarten 5, Karlsruhe 76131, Germany
+http://isl.ira.uka.de/cvhci"
+6b6e2c2ff6fcc5837523940c69cf2e9e94bc0503,Unsupervised Deep Video Hashing with Balanced Rotation,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+6b95a3dbec92071c8552576930e69455c70e529c,BEGAN: Boundary Equilibrium Generative Adversarial Networks,"BEGAN: Boundary Equilibrium Generative
+Adversarial Networks
+David Berthelot, Thomas Schumm, Luke Metz
+Google"
+6b6ff9d55e1df06f8b3e6f257e23557a73b2df96,Survey of Threats to the Biometric Authentication Systems and Solutions,"International Journal of Computer Applications (0975 – 8887)
+Volume 61– No.17, January 2013
+Survey of Threats to the Biometric Authentication
+Systems and Solutions
+Sarika Khandelwal
+Research Scholor,Mewar
+University,Chitorgarh. (INDIA)
+P.C.Gupta
+Kota University,Kota(INDIA)
+Khushboo Mantri
+M.tech.student, Arya College of
+engineering ,Jaipur(INDIA)"
+6bb55ed3761eb1556acbd1a0d15c2c9099bab0b7,Temporally Coherent Bayesian Models for Entity Discovery in Videos by Tracklet Clustering,"Temporally Coherent Chinese Restaurant Process
+for Discovery of Persons and Corresponding
+Tracklets from User-generated Videos"
+0728f788107122d76dfafa4fb0c45c20dcf523ca,The Best of BothWorlds: Combining Data-Independent and Data-Driven Approaches for Action Recognition,"The Best of Both Worlds: Combining Data-independent and Data-driven
+Approaches for Action Recognition
+Zhenzhong Lan, Dezhong Yao, Ming Lin, Shoou-I Yu, Alexander Hauptmann
+{lanzhzh, minglin, iyu,"
+07d49098ada2d8e1ca0608c70e559dd517ca3432,Modélisation de contextes pour l'annotation sémantique de vidéos. (Context based modeling for video semantic annotation),"Modélisation de contextes pour l’annotation sémantique
+de vidéos
+Nicolas Ballas
+To cite this version:
+Nicolas Ballas. Modélisation de contextes pour l’annotation sémantique de vidéos. Autre [cs.OH].
+Ecole Nationale Supérieure des Mines de Paris, 2013. Français. <NNT : 2013ENMP0051>. <pastel-
+00958135>
+HAL Id: pastel-00958135
+https://pastel.archives-ouvertes.fr/pastel-00958135
+Submitted on 11 Mar 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+07ea3dd22d1ecc013b6649c9846d67f2bf697008,Human-centric Video Understanding with Weak Supervision a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"HUMAN-CENTRIC VIDEO UNDERSTANDING WITH WEAK
+SUPERVISION
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Vignesh Ramanathan
+June 2016"
+071099a4c3eed464388c8d1bff7b0538c7322422,Facial expression recognition in the wild using rich deep features,"FACIAL EXPRESSION RECOGNITION IN THE WILD USING RICH DEEP FEATURES
+Abubakrelsedik Karali, Ahmad Bassiouny and Motaz El-Saban
+Microsoft Advanced Technology labs, Microsoft Technology and Research, Cairo, Egypt"
+079b6800e3130ca2ef1815a35632ab6998848ef3,Fine-grained Apparel Classification and Retrieval without rich annotations,"Fine-grained Apparel Classification and Retrieval
+without rich annotations
+Aniket Bhatnagar · Sanchit Aggarwal"
+0760b9375db1505e9b9c182e98bb9579dd9197af,Robust Subspace Discovery through Supervised Low-Rank Constraints,"Robust Subspace Discovery through Supervised Low-Rank Constraints
+Sheng Li∗
+Yun Fu∗"
+070ab604c3ced2c23cce2259043446c5ee342fd6,An Active Illumination and Appearance (AIA) Model for Face Alignment,"AnActiveIlluminationandAppearance(AIA)ModelforFaceAlignment
+FatihKahraman,MuhittinGokmen
+IstanbulTechnicalUniversity,
+ComputerScienceDept.,Turkey
+{fkahraman,
+InformaticsandMathematicalModelling,Denmark
+SuneDarkner,RasmusLarsen
+TechnicalUniversityofDenmark"
+07a8a4b8f207b2db2a19e519027f70cd1c276294,Pixel Recursive Super Resolution,"Pixel Recursive Super Resolution
+Ryan Dahl ∗
+Jonathon Shlens
+Mohammad Norouzi
+Google Brain"
+071135dfb342bff884ddb9a4d8af0e70055c22a1,Temporal 3D ConvNets: New Architecture and Transfer Learning for Video Classification,"New Architecture and Transfer Learning for Video Classification
+Temporal 3D ConvNets:
+Ali Diba1,4,(cid:63), Mohsen Fayyaz2,(cid:63), Vivek Sharma3, Amir Hossein Karami4, Mohammad Mahdi Arzani4,
+Rahman Yousefzadeh4, Luc Van Gool1,4
+ESAT-PSI, KU Leuven, 2University of Bonn, 3CV:HCI, KIT, Karlsruhe, 4Sensifai"
+0754e769eb613fd3968b6e267a301728f52358be,Towards a Watson that sees: Language-guided action recognition for robots,"Towards a Watson That Sees: Language-Guided Action Recognition for
+Robots
+Ching L. Teo, Yezhou Yang, Hal Daum´e III, Cornelia Ferm¨uller and Yiannis Aloimonos"
+0725b950792ddbe4edf812a7ee8cef14447236ed,Efficient Large-Scale Multi-Modal Classification,"Efficient Large-Scale Multi-Modal Classification
+Douwe Kiela, Edouard Grave, Armand Joulin and Tomas Mikolov
+Facebook AI Research"
+07c83f544d0604e6bab5d741b0bf9a3621d133da,Learning Spatio-Temporal Features with 3D Residual Networks for Action Recognition,"Learning Spatio-Temporal Features with 3D Residual Networks
+for Action Recognition
+Kensho Hara, Hirokatsu Kataoka, Yutaka Satoh
+National Institute of Advanced Industrial Science and Technology (AIST)
+Tsukuba, Ibaraki, Japan
+{kensho.hara, hirokatsu.kataoka,"
+07adc7429fb22352946b675023df7db11c905701,Active Multitask Learning Using Both Latent and Supervised Shared Topics,"Active Multitask Learning Using Both Latent and Supervised Shared Topics
+Ayan Acharya∗
+Raymond J. Mooney∗
+Joydeep Ghosh∗"
+073c9ec4ff069218f358b9dd8451a040cf1a4a82,Object Classification and Detection in High Dimensional Feature Space,"Object Classification and Detection
+in High Dimensional Feature Space
+THIS IS A TEMPORARY TITLE PAGE
+It will be replaced for the final print by a version
+provided by the service académique.
+Thèse n. 6043
+présentée le 17 Décembre 2013
+à la Faculté Sciences et Techniques de l’Ingénieur
+Laboratoire de l’Idiap
+Programme doctoral en Informatique, Communications et Infor-
+mation
+École Polytechnique Fédérale de Lausanne
+pour l’obtention du grade de Docteur ès Sciences
+Charles Dubout
+cceptée sur proposition du jury:
+Prof Mark Pauly, président du jury
+Dr François Fleuret, directeur de thèse
+Prof Pascal Fua, rapporteur
+Prof Gilles Blanchard, rapporteur
+Prof Frédéric Jurie, rapporteur"
+0726152a1c1a5723ac34d54abec0dc8d4659598e,Realtime Image Matching for Vision Based Car Navigation with Built-in Sensory Data,"ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, Volume II-3/W2, 2013
+ISA13 - The ISPRS Workshop on Image Sequence Analysis 2013, 11 November 2013, Antalya, Turkey"
+070199a5087590f96c4422b82e4803911bb0652e,What Are We Tracking: A Unified Approach of Tracking and Recognition,"What Are We Tracking: A Unified Approach of
+Tracking and Recognition
+Jialue Fan, Xiaohui Shen, Student Member, IEEE, and Ying Wu, Senior Member, IEEE"
+07ca211bde38009697c964702a29d0fe3260bf97,Resource Aware Person Re-identification across Multiple Resolutions,"Resource Aware Person Re-identification across Multiple Resolutions
+Yan Wang∗ †, Lequn Wang∗ †, Yurong You∗ ‡, Xu Zou§, Vincent Chen†
+Serena Li†, Gao Huang†, Bharath Hariharan†, Kilian Q. Weinberger†"
+07dbf04089b015db773fe95e664fa73aef874b36,Fishy Faces: Crafting Adversarial Images to Poison Face Authentication,"Fishy Faces: Crafting Adversarial Images to Poison Face Authentication
+Giuseppe Garofalo
+Vera Rimmer
+Tim Van hamme
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven
+Davy Preuveneers
+Wouter Joosen
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven"
+07d6238d8f8edbfe0fd2887fa0a7939735f21e13,Learning Human Optical Flow,"RANJAN, ROMERO, BLACK: LEARNING HUMAN OPTICAL FLOW
+Learning Human Optical Flow
+MPI for Intelligent Systems
+Tübingen, Germany
+Amazon Inc.
+Anurag Ranjan1
+Javier Romero∗,2
+Michael J. Black1"
+07ad6bb9b21c065cd92ab2f24a22c1d4a8f205a7,Realtime facial animation with on-the-fly correctives,"Realtime Facial Animation with On-the-fly Correctives
+Hao Li⇤
+Jihun Yu†
+Yuting Ye‡
+Chris Bregler§
+Industrial Light & Magic
+input depth map & 2D features
+data-driven tracking
+our tracking
+data-driven retargeting
+our retargeting
+Figure 1: Our adaptive tracking model conforms to the input expressions on-the-fly, producing a better fit to the user than state-of-the-art
+data driven techniques [Weise et al. 2011] which are confined to learned motion priors and generate plausible but not accurate tracking.
+Links:
+Introduction
+The essence of high quality performance-driven facial animation is
+to capture every trait and characteristic of an actor’s facial and ver-
+al expression and to reproduce those on a digital double or crea-
+ture. Even with the latest 3D scanning and motion capture tech-
+nology, the creation of realistic digital faces in film and game pro-"
+072fd0b8d471f183da0ca9880379b3bb29031b6a,Image-to-Image Translation with Conditional Adversarial Networks,"Image-to-Image Translation with Conditional Adversarial Networks
+Phillip Isola
+Jun-Yan Zhu
+Tinghui Zhou
+Alexei A. Efros
+Berkeley AI Research (BAIR) Laboratory, UC Berkeley
+Figure 1: Many problems in image processing, graphics, and vision involve translating an input image into a corresponding output image.
+These problems are often treated with application-specific algorithms, even though the setting is always the same: map pixels to pixels.
+Conditional adversarial nets are a general-purpose solution that appears to work well on a wide variety of these problems. Here we show
+results of the method on several. In each case we use the same architecture and objective, and simply train on different data."
+0717b47ab84b848de37dbefd81cf8bf512b544ac,Robust Face Recognition and Tagging in Visual Surveillance System,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Humming Bird ( 01st March 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+Robust Face Recognition and Tagging in Visual Surveillance
+Kavitha MS 1, Siva Pradeepa S2
+System
+Kavitha MS Author is currently pursuing M.E(CSE)in VINS Christian college of Engineering,Nagercoil.
+Siva pradeepa,Assistant Lecturer in VINS Christian college of Engineering"
+07eaf19eecf4ccdd5f8e3367c1675d9f4addd2df,Learning pullback manifolds of dynamical models,"IEEE TRANSACTIONS ON PAMI, VOL. XX, NO. Y, MONTH 2010
+SubmittedtoIEEETrans.onPatternAnalysisandMachineIntelligence;October27,2010
+Learning pullback manifolds of dynamical
+models
+Fabio Cuzzolin"
+0779875eff440365184dd8bf44e9f85f78267c5f,An Intelligent Extraversion Analysis Scheme from Crowd Trajectories for Surveillance,"JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. YY, JULY 2017
+An Intelligent Extraversion Analysis Scheme from
+Crowd Trajectories for Surveillance
+Wenxi Liu, Yuanlong Yu, Chun-Yang Zhang, Genggeng Liu, Naixue Xiong"
+074a12f9187beafe40386f19aa2544df30fa5703,Product Characterisation towards Personalisation: Learning Attributes from Unstructured Data to Recommend Fashion Products,"Product Characterisation towards Personalisation
+Learning Attributes from Unstructured Data to Recommend Fashion Products
+Ângelo Cardoso∗
+ISR, IST, Universidade de Lisboa
+Lisbon, Portugal
+Fabio Daolio
+ASOS.com
+London, UK
+Saúl Vargas
+ASOS.com
+London, UK"
+0750a816858b601c0dbf4cfb68066ae7e788f05d,CosFace: Large Margin Cosine Loss for Deep Face Recognition,"CosFace: Large Margin Cosine Loss for Deep Face Recognition
+Hao Wang, Yitong Wang, Zheng Zhou, Xing Ji, Dihong Gong, Jingchao Zhou,
+Zhifeng Li∗, and Wei Liu∗
+Tencent AI Lab"
+078d507703fc0ac4bf8ca758be101e75ea286c80,Large - Scale Content Based Face Image Retrieval using Attribute Enhanced,"ISSN: 2321-8169
+International Journal on Recent and Innovation Trends in Computing and Communication
+Volume: 3 Issue: 8
+5287 - 5296
+________________________________________________________________________________________________________________________________
+Large- Scale Content Based Face Image Retrieval using Attribute Enhanced
+Sparse Codewords.
+Chaitra R,
+Mtech Digital Coomunication Engineering
+Acharya Institute Of Technology
+Bangalore"
+0716e1ad868f5f446b1c367721418ffadfcf0519,Interactively Guiding Semi-Supervised Clustering via Attribute-Based Explanations,"Interactively Guiding Semi-Supervised
+Clustering via Attribute-Based Explanations
+Shrenik Lad and Devi Parikh
+Virginia Tech, Blacksburg, VA, USA"
+07c6744e25ed01967e448a397f5d7e9d540345c3,Effective Multi-Query Expansions: Collaborative Deep Networks for Robust Landmark Retrieval,"Effective Multi-Query Expansions: Collaborative Deep Networks for Robust
+Landmark Retrieval
+Yang Wang, Xuemin Lin, Lin Wu, Wenjie Zhang"
+0726a45eb129eed88915aa5a86df2af16a09bcc1,Introspective perception: Learning to predict failures in vision systems,"Introspective Perception: Learning to Predict Failures in Vision Systems
+Shreyansh Daftry, Sam Zeng, J. Andrew Bagnell and Martial Hebert"
+07625af8d73142e239b5cdccb1dd226648e4b0d4,Learning Scene-Independent Group Descriptors for Crowd Understanding,"Learning Scene-Independent Group Descriptors for
+Crowd Understanding
+Jing Shao, Chen Change Loy, Member, IEEE, and Xiaogang Wang, Member, IEEE"
+0742d051caebf8a5d452c03c5d55dfb02f84baab,Real-time geometric motion blur for a deforming polygonal mesh,"Real-Time Geometric Motion Blur for a Deforming Polygonal Mesh
+Nathan Jones
+Formerly: Texas A&M University
+Currently: The Software Group"
+079a0a3bf5200994e1f972b1b9197bf2f90e87d4,Component-Based Face Recognition with 3D Morphable Models,"Component-based Face Recognition with 3D
+Morphable Models
+Jennifer Huang1, Bernd Heisele1;2, and Volker Blanz3
+Center for Biological and Computational Learning, M.I.T., Cambridge, MA, USA
+Honda Research Institute US, Boston, MA, USA
+Computer Graphics Group, Max-Planck-Institut, Saarbr˜ucken, Germany"
+07faa38d4d0e9d14d72bd049362efa83fae78ee3,Quick Identification of Child Pornography in Digital Videos,"IJoFCS (2012) 2, 21-32
+DOI: 10.5769/J201202002 or http://dx.doi.org/10.5769/J201202002
+Quick Identification of Child Pornography
+in Digital Videos
+Mateus de Castro Polastro and Pedro Monteiro da Silva Eleuterio
+Brazilian Federal Police
+Campo Grande/MS
+E-mails:"
+073bcb3b1aed5cdf7bff4e9fe46a21175f42c877,"Zero-Shot Learning - A Comprehensive Evaluation of the Good, the Bad and the Ugly","Zero-Shot Learning - A Comprehensive
+Evaluation of the Good, the Bad and the Ugly
+Yongqin Xian, Student Member, IEEE, Christoph H. Lampert,
+Bernt Schiele, Fellow, IEEE, and Zeynep Akata, Member, IEEE"
+0770f0f8f168c284a63e46b394150a8c429549da,Project-Team Pulsar Perception Understanding Learning Systems for Activity Recognition,"INSTITUT NATIONAL DE RECHERCHE EN INFORMATIQUE ET EN AUTOMATIQUE
+Project-Team Pulsar
+Perception Understanding Learning
+Systems for Activity Recognition
+Sophia Antipolis - Méditerranée
+THEME COG
+tivitytepor2008"
+389b2390fd310c9070e72563181547cf23dceea3,Β-vae: Learning Basic Visual Concepts with a Constrained Variational Framework,"Published as a conference paper at ICLR 2017
+β-VAE: LEARNING BASIC VISUAL CONCEPTS WITH A
+CONSTRAINED VARIATIONAL FRAMEWORK
+Irina Higgins, Loic Matthey, Arka Pal, Christopher Burgess, Xavier Glorot,
+Matthew Botvinick, Shakir Mohamed, Alexander Lerchner
+Google DeepMind
+{irinah,lmatthey,arkap,cpburgess,glorotx,"
+38d56ddcea01ce99902dd75ad162213cbe4eaab7,Sense Beauty by Label Distribution Learning,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+389334e9a0d84bc54bcd5b94b4ce4c5d9d6a2f26,Facial parameter extraction system based on active contours,"FACIAL PARAMETER EXTRACTION SYSTEM BASED ON ACTIVE CONTOURS
+Montse Pardàs, Marcos Losada
+Universitat Politècnica de Catalunya, Barcelona, Spain"
+38f7f3c72e582e116f6f079ec9ae738894785b96,A New Technique for Face Matching after Plastic Surgery in Forensics,"IJARCCE
+ISSN (Online) 2278-1021
+ISSN (Print) 2319 5940
+International Journal of Advanced Research in Computer and Communication Engineering
+Vol. 4, Issue 11, November 2015
+A New Technique for Face Matching after
+Plastic Surgery in Forensics
+Anju Joseph1, Nilu Tressa Thomas2, Neethu C. Sekhar3
+Student, Dept. of CSE, Amal Jyothi College of Engineering, Kanjirappally, India 1,2
+Asst. Professor, Dept. of CSE, Amal Jyothi College of Engineering, Kanjirappally, India 3
+I. INTRODUCTION
+Facial recognition is one of the most important task that
+forensic examiners execute
+their
+investigation. This work focuses on analysing the effect of
+plastic surgery in face recognition algorithms. It is
+imperative for the subsequent facial recognition systems to
+e capable of addressing this significant issue and
+ccordingly there is a need for more research in this
+important area."
+38679355d4cfea3a791005f211aa16e76b2eaa8d,Evolutionary Cross-Domain Discriminative Hessian Eigenmaps,"Title
+Evolutionary cross-domain discriminative Hessian Eigenmaps
+Author(s)
+Si, S; Tao, D; Chan, KP
+Citation
+Issued Date
+http://hdl.handle.net/10722/127357
+Rights
+This work is licensed under a Creative Commons Attribution-
+NonCommercial-NoDerivatives 4.0 International License.; ©2010
+IEEE. Personal use of this material is permitted. However,
+permission to reprint/republish this material for advertising or
+promotional purposes or for creating new collective works for
+resale or redistribution to servers or lists, or to reuse any
+opyrighted component of this work in other works must be
+obtained from the IEEE."
+38998d58a0c1048ad4c08d0022066e22ba6d1201,Re-identification through a Video Camera Network,"UNIVERSIT´EDENICE-SOPHIAANTIPOLIS´ECOLEDOCTORALESTICSCIENCESETTECHNOLOGIESDEL’INFORMATIONETDELACOMMUNICATIONTH`ESEpourl’obtentiondugradedeDocteurenSciencesdel’Universit´edeNice-SophiaAntipolisMention:AUTOMATIQUETRAITEMENTDUSIGNALETDESIMAGESpr´esent´eeetsoutenueparMalikSOUDEDPEOPLEDETECTION,TRACKINGANDRE-IDENTIFICATIONTHROUGHAVIDEOCAMERANETWORKTh`esedirig´eeparFranc¸oisBR´EMONDSoutenancepr´evuele20/12/2013Jury:MoniqueTHONNATDirectrice,INRIASophia-Antipolis,FrancePr´esidenteJamesFERRYMANProfesseur,UniversityofReading,UKRapporteurCarloREGAZZONIProfesseur,UniversityofGenova,ItalyRapporteurPatrickBOUTHEMYDirecteur,INRIARennes,FranceExaminateurFranc¸oisBREMONDDirecteur,INRIASophia-Antipolis,FranceDirecteurdeth`eseMarie-ClaudeFRASSONDirectrice,DigitalBarriers,Sophia-Antipolis,FranceInvit´ee"
+380b8df0f340e5bbc3a953c62f9bc573ce073b92,Joint Image-Text News Topic Detection and Tracking by Multimodal Topic And-Or Graph,"Joint Image-Text News Topic Detection and
+Tracking by Multimodal Topic And-Or Graph
+Weixin Li, Jungseock Joo, Hang Qi, and Song-Chun Zhu"
+382f1ebe6009e580949d5513bc298cb253a1eeda,Interpreting Complex Regression Models,"Interpreting Complex Regression Models
+Noa Avigdor-Elgrabli∗, Alex Libov†, Michael Viderman∗, Ran Wolff∗
+Yahoo Research, Haifa, Israel,
+Amazon Research, Haifa, Israel,"
+38682c7b19831e5d4f58e9bce9716f9c2c29c4e7,Movie Character Identification Using Graph Matching Algorithm,"International Journal of Computer Trends and Technology (IJCTT) – Volume 18 Number 5 – Dec 2014
+Movie Character Identification Using Graph Matching
+Algorithm
+Shaik. Kartheek.*1, A.Srinivasa Reddy*2
+M.Tech Scholar, Dept of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India.
+Associate Professor, Department of CSE, QISCET, ONGOLE, Dist: Prakasam, AP, India"
+383d64b27fb3cdf2beff43f3beb8caac8c21a886,Detecting activities of daily living in first-person camera views,"Detecting Activities of Daily Living in First-person Camera Views
+Hamed Pirsiavash Deva Ramanan
+Department of Computer Science, University of California, Irvine"
+3851ed2e3c00083f68c2811694736ebdaa9ed8b5,DeepStory: Video Story QA by Deep Embedded Memory Networks,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+38b3cae6ba1b98d6bc6f88d903916dac888cb951,Improving Semantic Embedding Consistency by Metric Learning for Zero-Shot Classiffication,"Improving Semantic Embedding Consistency by
+Metric Learning for Zero-Shot Classification
+Maxime Bucher1,2, St´ephane Herbin1, Fr´ed´eric Jurie2
+ONERA - The French Aerospace Lab, Palaiseau, France
+Normandie Univ, UNICAEN, ENSICAEN, CNRS"
+3810b6299140bf2c7d6d0cced765c0777d603923,Do deep features generalize from everyday objects to remote sensing and aerial scenes domains?,"Do Deep Features Generalize from Everyday Objects
+to Remote Sensing and Aerial Scenes Domains?
+Ot´avio A. B. Penatti
+Advanced Technologies Group
+SAMSUNG Research Institute
+Campinas, SP, 13097-160, Brazil
+Keiller Nogueira, Jefersson A. dos Santos
+Department of Computer Science
+Universidade Federal de Minas Gerais
+Belo Horizonte, MG, 31270-010, Brazil"
+38eea307445a39ee7902c1ecf8cea7e3dcb7c0e7,Multi-distance Support Matrix Machines,"Noname manuscript No.
+(will be inserted by the editor)
+Multi-distance Support Matrix Machine
+Yunfei Ye1
+· Dong Han1
+Received: date / Accepted: date"
+3885cfd634c025c6e27c4db8211d72f54f864f90,Implications of holistic face processing in autism and schizophrenia,"Implications of holistic face processing in autism and
+schizophrenia
+Tamara L. Watson*
+School of Social Science and Psychology, University of Western Sydney, Sydney, NSW, Australia
+REVIEW ARTICLE
+published: 05 July 2013
+doi: 10.3389/fpsyg.2013.00414
+People with autism and schizophrenia have been shown to have a local bias in sensory
+processing and face recognition difficulties. A global or holistic processing strategy is
+known to be important when recognizing faces. Studies investigating face recognition in
+these populations are reviewed and show that holistic processing is employed despite
+lower overall performance in the tasks used. This implies that holistic processing is
+necessary but not sufficient for optimal face recognition and new avenues for research
+into face recognition based on network models of autism and schizophrenia are proposed.
+Keywords: vision, face recognition, autism, schizophrenia, holistic coding, configurational coding
+Edited by:
+Rachel A. Robbins, Univeristy of
+Western Sydney, Australia
+Reviewed by:
+Olivia Carter, University of"
+3837f81524286ed5f9142d245743733766aa4017,Houdini: Fooling Deep Structured Visual and Speech Recognition Models with Adversarial Examples,"Houdini: Fooling Deep Structured Visual and Speech
+Recognition Models with Adversarial Examples
+Moustapha Cisse
+Facebook AI Research
+Natalia Neverova*
+Facebook AI Research"
+38192f06ac19172299ab543483d2e0eca2f889c0,Mining Mid-level Features for Image Classification,"(will be inserted by the editor)
+Mining Mid-level Features for Image Classification
+Basura Fernando · Elisa Fromont · Tinne Tuytelaars
+Received: date / Accepted: date"
+3832a6d6b1f78cdadee6968d51c1c7c2922ab3cd,ISIA at the ImageCLEF 2017 Image Caption Task,"ISIA at the ImageCLEF 2017 Image Caption Task
+Sisi Liang, Xiangyang Li, Yongqing Zhu, Xue Li, and Shuqiang Jiang
+Key Laboratory of Intelligent Information Processing,
+Institute of Computing Technology Chinese Academy of Sciences,
+No.6 Kexueyuan South Road Zhongguancun, Haidian District, 100190 Beijing, China
+{sisi.liang, xiangyang.li, yongqing.zhu, xue.li,"
+384908bfad5b9e81d605344abcb9e99d8b0f4027,Improving Deep Models of Person Re-identification for Cross-Dataset Usage,"Improving Deep Models of Person Re-identification for
+Cross-Dataset Usage
+Sergey Rodionov1,2, Alexey Potapov1,3, Hugo Latapie4, Enzo Fenoglio4,
+Maxim Peterson2,3
+SingularityNET LLC
+Novamente LLC, USA
+ITMO University, Kronverkskiy pr. 49, 197101 St. Petersburg, Russia
+Chief Technology & Architecture Office, Cisco
+{pas.aicv, astroseger, {hlatapie,"
+38a169b6e67ef7768f91fa208c9b5544f6f57f16,Object Bank: An Object-Level Image Representation for High-Level Visual Recognition,"Int J Comput Vis
+DOI 10.1007/s11263-013-0660-x
+Object Bank: An Object-Level Image Representation
+for High-Level Visual Recognition
+Li-Jia Li · Hao Su · Yongwhan Lim · Li Fei-Fei
+Received: 2 January 2012 / Accepted: 11 September 2013
+© Springer Science+Business Media New York 2013"
+38b18585e4bdb78347d44caa561e69a0045ade8d,Differential Attention for Visual Question Answering,"Differential Attention for Visual Question Answering
+Badri Patro, Vinay P. Namboodiri
+IIT Kanpur
+{ badri,vinaypn"
+3805d47da61527137b6f44b92af3017a2dfe7bd5,Greedy column subset selection for large-scale data sets,"(will be inserted by the editor)
+Greedy Column Subset Selection for Large-scale
+Data Sets
+Ahmed K. Farahat · Ahmed Elgohary ·
+Ali Ghodsi · Mohamed S. Kamel
+Received: date / Accepted: date"
+386a5c06d334d20227e8b2daf5433a2bef385648,Cross and Learn: Cross-Modal Self-Supervision,"Cross and Learn: Cross-Modal Self-Supervision
+Nawid Sayed1, Biagio Brattoli2, and Bj¨orn Ommer2
+Heidelberg University, HCI / IWR, Germany"
+384f972c81c52fe36849600728865ea50a0c4670,"Multi-Fold Gabor, PCA and ICA Filter Convolution Descriptor for Face Recognition","Multi-Fold Gabor, PCA and ICA Filter
+Convolution Descriptor for Face Recognition
+Cheng Yaw Low, Andrew Beng Jin Teoh, Senior Member, IEEE, Cong Jie Ng"
+38f1fac3ed0fd054e009515e7bbc72cdd4cf801a,Finding Person Relations in Image Data of the Internet Archive,"Finding Person Relations in Image Data of the
+Internet Archive
+Eric M¨uller-Budack1,2[0000−0002−6802−1241],
+Kader Pustu-Iren1[0000−0003−2891−9783], Sebastian Diering1, and
+Ralph Ewerth1,2[0000−0003−0918−6297]
+Leibniz Information Centre for Science and Technology (TIB), Hannover, Germany
+L3S Research Center, Leibniz Universit¨at Hannover, Germany"
+380d5138cadccc9b5b91c707ba0a9220b0f39271,Deep Imbalanced Learning for Face Recognition and Attribute Prediction,"Deep Imbalanced Learning for Face Recognition
+nd Attribute Prediction
+Chen Huang, Yining Li, Chen Change Loy, Senior Member, IEEE and Xiaoou Tang, Fellow, IEEE"
+383a58de852715c8544abe60fa64d29fb7ea5688,Inductive Hashing on Manifolds,"Inductive Hashing on Manifolds
+Fumin Shen‡(cid:5)∗ Chunhua Shen(cid:5)† Qinfeng Shi(cid:5) Anton van den Hengel(cid:5) Zhenmin Tang‡
+(cid:5) The University of Adelaide, Australia ‡ Nanjing University of Science and Technology, China"
+38215c283ce4bf2c8edd597ab21410f99dc9b094,The SEMAINE Database: Annotated Multimodal Records of Emotionally Colored Conversations between a Person and a Limited Agent,"The SEMAINE Database: Annotated Multimodal Records of
+Emotionally Colored Conversations between a Person and a Limited
+Agent
+McKeown, G., Valstar, M., Cowie, R., Pantic, M., & Schröder, M. (2012). The SEMAINE Database: Annotated
+Multimodal Records of Emotionally Colored Conversations between a Person and a Limited Agent. IEEE
+Transactions on Affective Computing, 3(1), 5-17. DOI: 10.1109/T-AFFC.2011.20
+Published in:
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the
+Research Portal that you believe breaches copyright or violates any law, please contact
+Download date:05. Nov. 2018"
+38b0a67727dea3fe563e8662517bd0fda2fd5e06,Perceiving and expressing feelings through actions in relation to individual differences in empathic traits: the Action and Feelings Questionnaire (AFQ),"Cogn Affect Behav Neurosci (2016) 16:248–260
+DOI 10.3758/s13415-015-0386-z
+Perceiving and expressing feelings through actions in relation
+to individual differences in empathic traits: the Action
+nd Feelings Questionnaire (AFQ)
+Justin H. G. Williams 1,4 & Isobel M. Cameron 1 & Emma Ross 2 & Lieke Braadbaart 3 &
+Gordon D Waiter 3
+Published online: 20 October 2015
+# The Author(s) 2015. This article is published with open access at Springerlink.com"
+38a3611138388490c2cd60dfbf795932d5e55a79,2D pose estimation in the Restaurant of the Future,"D pose estimation in the Restaurant
+of the Future
+Frederik (Frank) Evers
+supervision by
+dr. ir. Nico P. van der Aa
+Noldus IT B.V.
+Wageningen, NL
+dr. Robby T. Tan
+University of Utrecht
+Utrecht, NL
+March 29, 2012"
+383f874ba7975c83b55c694ec0a70f51dc3a0ee5,Towards Automatic Image Understanding and Mining via Social Curation,"Towards Automatic Image Understanding and Mining via Social Curation
+Katsuhiko Ishiguro, Akisato Kimura, and Koh Takeuchi
+NTT Communication Science Laboratories
+NTT Corporation, Kyoto, Japan"
+389363432ee9fcf0e0cfe67b7b4f62618e1f4b59,Performing content-based retrieval of humans using gait biometrics,"Performing Content-Based Retrieval of Humans
+Using Gait Biometrics
+Sina Samangooei and Mark S. Nixon
+School of Electronics and Computer Science, Southampton University, Southampton,
+SO17 1BJ, United Kingdom"
+3837f3faa722c91aa21d6f17ea1ac1cb5187bda1,Human Action Attribute Learning From Video Data Using Low-Rank Representations,"Human Action Attribute Learning From Video
+Data Using Low-Rank Representations
+Tong Wu, Student Member, IEEE, Prudhvi Gurram, Senior Member, IEEE,
+Raghuveer M. Rao, Fellow, IEEE, and Waheed U. Bajwa, Senior Member, IEEE"
+3898a9dcb22f87413f08bb44c656f4129e1c42df,On binary representations for biometric template protection,"ON BINARY REPRESENTATIONS FOR
+BIOMETRIC TEMPLATE PROTECTION
+Chun Chen"
+38cc2896058131e4656443aedfb1b9dae61b99cd,Functional Connectivity Imaging Analysis: Interhemispheric Integration in Autism,"Functional Connectivity Imaging Analysis:
+Interhemispheric Integration in Autism
+Daniel J. Kelley"
+3802da31c6d33d71b839e260f4022ec4fbd88e2d,Deep Attributes for One-Shot Face Recognition,"Deep Attributes for One-Shot Face Recognition
+Aishwarya Jadhav1,3, Vinay P. Namboodiri2, and K. S. Venkatesh 3
+Xerox Research Center India, 2Department of Computer Science,
+Department of Electrical Engineering, IIT Kanpur"
+38e509fc0d94e954a512128760f7a1f0d6fbc384,A Framework for Application-Guided Task Management on Heterogeneous Embedded Systems,"A Framework for Application Guided Task Management on
+Heterogeneous Embedded Systems
+FRANCISCO GASPAR, INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa
+LUIS TANIC¸ A, INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa
+PEDRO TOM ´AS, INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa
+ALEKSANDAR ILIC, INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa
+LEONEL SOUSA, INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa
+In this paper, we propose a general framework for fine-grain application-aware task management in hetero-
+geneous embedded platforms, which allows integration of different mechanisms for an efficient resource uti-
+lization, frequency scaling and task migration. The proposed framework incorporates several components for
+ccurate run-time monitoring by relying on the OS facilities and performance self-reporting for parallel and
+iterative applications. The framework efficiency is experimentally evaluated on a real hardware platform,
+where significant power and energy savings are attained for SPEC CPU2006 and PARSEC benchmarks, by
+guiding frequency scaling and inter-cluster migrations according to the run-time application behavior and
+predefined performance targets.
+CCS Concepts:rComputer systems organization → Multicore architectures; Heterogeneous (hybrid)
+systems;rSoftware and its engineering → Process management;
+Additional Key Words and Phrases: Heterogeneous multi processor; scheduling; embedded systems; quality
+of service; big.LITTLE; task migration; dynamic voltage and frequency control
+ACM Reference Format:"
+000a83a533f9c945addce83e466e308df1ae79c5,Efficient max-margin multi-label classification with applications to zero-shot learning,"Mach Learn manuscript No.
+(will be inserted by the editor)
+Efficient Max-Margin Multi-Label Classification with
+Applications to Zero-Shot Learning
+Bharath Hariharan · S. V. N. Vishwanathan ·
+Manik Varma
+Received: 30 September 2010 / Accepted: date"
+004dc8de3a6832c8d4764144570dc122b5265ec5,Hyper-dimensional computing for a visual question-answering system that is trainable end-to-end,"Hyper-dimensional computing for a visual
+question-answering system that is trainable
+end-to-end
+Guglielmo Montone
+J.Kevin O’Regan
+Laboratoire Psychologie de la Perception
+Laboratoire Psychologie de la Perception
+Université Paris Descartes
+75006 Paris, France
+Université Paris Descartes
+75006 Paris, France
+Alexander V. Terekhov
+Laboratoire Psychologie de la Perception
+Université Paris Descartes
+75006 Paris, France"
+00fb2836068042c19b5197d0999e8e93b920eb9c,Genetic Algorithm for Weight Optimization in Descriptor based Face Recognition Methods,
+005c996a9059af96454c3d6f83338068d3608585,On Detection of Multiple Object Instances Using Hough Transforms,"On Detection of Multiple Object Instances using Hough Transforms
+Olga Barinova
+Moscow State University∗
+Victor Lempitsky
+University of Oxford∗
+Pushmeet Kohli
+Microsoft Research Cambridge"
+0033e0ce8720f913761f9edb9a6c378eed8366a8,Interactive Object Retrieval using Interpretable Visual Models,"UNIVERSIT´EPARIS-SUD11Facult´edessciencesd’OrsayN◦Ordre:2011PA112054PHDTHESISInteractiveObjectRetrievalusingInterpretableVisualModelsSubmittedforthedegreeof“docteurensciences”oftheUniversityParis-Sud11Speciality:ComputerScienceByAhmedRebaiMay2011INRIAParis-Rocquencourt,ImediaTeamThesiscommittee:Reviewers:FredStentiford-Prof.atUniversityCollegeLondon(UK)SylviePhilipp-Foliguet-Prof.atUniversit´eCergy/Pontoise(FR)Director:NozhaBoujemaa-DirectoroftheINRIA-SaclayCenter(FR)Advisor:AlexisJoly-ResearcheratINRIA-Rocquencourt(FR)Examinator:MichelCrucianu-Prof.atCNAM(FR)President:Fran¸coisYvon-Prof.atUniversit´eParis-Sud11(FR)Copyrightc(cid:13)2011AhmedRebaiAllrightsreserved."
+003afe78ec7989371f648fd8957a6ce79083cf11,SeaCLEF 2016: Object Proposal Classification for Fish Detection in Underwater Videos,"SeaCLEF 2016: Object proposal classification for
+fish detection in underwater videos
+Jonas J¨ager1,2, Erik Rodner2, Joachim Denzler2, Viviane Wolff1, and Klaus
+Fricke-Neuderth1
+Department of Electrical Engineering and Information Technology,
+Fulda University of Applied Sciences, Germany
+Computer Vision Group, Friedrich Schiller University Jena, Germany"
+00dfd58bbaff871603e4a8aa81e67915b0675aeb,Human Sensing Using Computer Vision for Personalized Smart Spaces,"013 IEEE 10th International Conference on Ubiquitous Intelligence & Computing and 2013 IEEE 10th International Conference
+on Autonomic & Trusted Computing
+Human Sensing using Computer Vision for
+Personalized Smart Spaces
+Dipak Surie, Saeed Partonia, Helena Lindgren
+User Interaction and Knowledge Modeling Group
+Dept. of Computing Science
+Umeå University, Sweden
+{dipak, mcs10spa,
+spaces
+everyday"
+008dafebbb27eb64a1af8ded8bfe2e7a04c1d703,CANDLE/Supervisor: A Workflow Framework for Machine Learning Applied to Cancer Research,"CANDLE/Supervisor: A Workflow Framework for
+Machine Learning Applied to Cancer Research
+Justin M. Wozniak, Rajeev Jain,
+Prasanna Balaprakash
+Mathematics & Computer Science
+Argonne National Laboratory
+Argonne, IL, USA
+Jamaludin Mohd-Yusof,
+Cristina Garcia Cardona
+Computer, Computational &
+Statistical Sciences
+Los Alamos National Laboratory
+Los Alamos, NM, USA
+Jonathan Ozik,
+Nicholson Collier
+Global Security Sciences
+Argonne National Laboratory
+Argonne, IL, USA
+Brian Van Essen
+Lawrence Livermore National"
+0077cd8f97cafd2b389783858a6e4ab7887b0b6b,Face Image Reconstruction from Deep Templates,"MAI et al.: ON THE RECONSTRUCTION OF DEEP FACE TEMPLATES
+On the Reconstruction of Deep Face Templates
+Guangcan Mai, Kai Cao, Pong C. Yuen, Senior Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+00b03ee4a7e31a999715d7a0c31d283d646106fa,Multi-level Semantic Feature Augmentation for One-shot Learning,"Multi-level Semantic Feature Augmentation for
+One-shot Learning
+Zitian Chen, Yanwei Fu*, Yinda Zhang, Leonid Sigal"
+00d8f67ac0ea0bb2c9827b60e1f47c300346cd7a,Face recognition using color local binary pattern from mutually independent color channels,"Anbarjafari EURASIP Journal on Image and Video Processing 2013, 2013:6
+http://jivp.eurasipjournals.com/content/2013/1/6
+R ES EAR CH
+Open Access
+Face recognition using color local binary pattern
+from mutually independent color channels
+Gholamreza Anbarjafari"
+00214fe1319113e6649435cae386019235474789,Face Recognition using Distortion Models,"Bachelorarbeit im Fach Informatik
+Face Recognition using
+Distortion Models
+Mathematik, Informatik und Naturwissenschaften der
+RHEINISCH-WESTFÄLISCHEN TECHNISCHEN HOCHSCHULE AACHEN
+Der Fakultät für
+Lehrstuhl für Informatik VI
+Prof. Dr.-Ing. H. Ney
+vorgelegt von:
+Harald Hanselmann
+Matrikelnummer 252400
+Gutachter:
+Prof. Dr.-Ing. H. Ney
+Prof. Dr. B. Leibe
+Betreuer:
+Dipl.-Inform. Philippe Dreuw
+September 2009"
+0063b44da282eec78045ab59d2debbf61959a4a4,Improving person re-identification by viewpoint cues,"Improving Person Re-identification by Viewpoint Cues
+Sławomir B ˛ak
+Sofia Zaidenberg Bernard Boulay
+Francois Brémond
+INRIA Sophia Antipolis, STARS/Neosensys
+004, route des Lucioles, BP93
+06902 Sophia Antipolis Cedex - France"
+003b141fb02078a4b5d02f4f803001ce22d73ba7,Real-time 3d Multiple Human Tracking with Robustness Enhancement through Machine Learning,"REAL-TIME 3D MULTIPLE HUMAN TRACKING WITH
+ROBUSTNESS ENHANCEMENT THROUGH MACHINE LEARNING
+Keywords:
+Visual Tracking"
+004e3292885463f97a70e1f511dc476289451ed5,Quadruplet-Wise Image Similarity Learning,"Quadruplet-wise Image Similarity Learning
+Marc T. Law
+Nicolas Thome
+Matthieu Cord
+LIP6, UPMC - Sorbonne University, Paris, France
+{Marc.Law, Nicolas.Thome,"
+00d14af37bc75b6477b4846f6ab561cdc89c96a2,"UvA-DARE ( Digital Academic Repository ) Infants ’ Temperament and Mothers ’ , and Fathers ’ Depression Predict Infants ’ Attention to Objects Paired with Emotional","UvA-DARE (Digital Academic Repository)
+Infants’ Temperament and Mothers’, and Fathers’ Depression Predict Infants’ Attention
+to Objects Paired with Emotional Faces
+Aktar, E.; Mandell, D.J.; de Vente, W.; Majdandzic, M.; Raijmakers, M.E.J.; Bögels, S.M.
+Published in:
+Journal of Abnormal Child Psychology
+0.1007/s10802-015-0085-9
+Link to publication
+Citation for published version (APA):
+Aktar, E., Mandell, D. J., de Vente, W., Majdandži, M., Raijmakers, M. E. J., & Bögels, S. M. (2016). Infants’
+Temperament and Mothers’, and Fathers’ Depression Predict Infants’ Attention to Objects Paired with Emotional
+Faces. Journal of Abnormal Child Psychology, 44(5), 975-990. DOI: 10.1007/s10802-015-0085-9
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible."
+00433d2ad90b40bc5ad22a591aac0da68037003e,K-means Based Automatic Pests Detection and Classification for Pesticides Spraying,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 8 No. 11, 2017
+K-means Based Automatic Pests Detection and
+Classification for Pesticides Spraying
+Muhammad Hafeez Javed
+Foundation University Islamabad
+M Humair Noor
+Babar Yaqoob Khan
+Foundation University Islamabad
+Foundation University Islamabad
+Nazish Noor
+Foundation University Islamabad
+Tayyaba Arshad
+Foundation University Islamabad"
+00cb08dcef72bfaa1aab0664d34168615ac6a5cc,Amygdala Surface Modeling with Weighted Spherical Harmonics,"Amygdala Surface Modeling with
+Weighted Spherical Harmonics
+Moo K. Chung1,2, Brendon M. Nacewicz2, Shubing Wang1,
+Kim M. Dalton2, Seth Pollak3, and Richard J. Davidson2,3
+Department of Statistics, Biostatistics and Medical Informatics
+Waisman Laboratory for Brain Imaging and Behavior
+Department of Psychology and Psychiatry
+University of Wisconsin, Madison, WI 53706, USA"
+0079d56c8e183ef36f876b84327b97ee9454825b,Scene Parsing by Weakly Supervised Learning with Image Descriptions,"Hierarchical Scene Parsing by Weakly
+Supervised Learning with Image Descriptions
+Ruimao Zhang, Liang Lin, Guangrun Wang, Meng Wang, and Wangmeng Zuo"
+003846e4559fa32699f08ecd09de13ed5a4e92d2,Analysis of Brain Waves in Violent Images - Are Differences in Gender?,
+00f0ed04defec19b4843b5b16557d8d0ccc5bb42,Modeling Spatial and Temporal Cues for Multi-label Facial Action Unit Detection,
+005503ccf270890ea2582370feed4506f3785004,Characterizing the temporal dynamics of object recognition by deep neural networks: role of depth,"ioRxiv preprint first posted online Sep. 10, 2017;
+peer-reviewed) is the author/funder. All rights reserved. No reuse allowed without permission.
+http://dx.doi.org/10.1101/178541
+The copyright holder for this preprint (which was not
+Characterizing the temporal dynamics of object
+recognition by deep neural networks : role of depth
+Kandan Ramakrishnan1, Iris I.A. Groen2, Arnold W.M. Smeulders1,
+H. Steven Scholte*3, Sennay Ghebreab*1
+Institute of Informatics, University of Amsterdam.
+Laboratory of Brain and Cognition, National Institute of Health.
+Department of Psychology, University of Amsterdam.
+Keywords: deep neural network, ERP, architecture, number of layers"
+00d63b30e7e8383ea3dd2993499df70a51295d13,Exploiting structure in man-made environments,"Exploiting structure in man-made environments
+ALPER AYDEMIR
+Doctoral Thesis
+Stockholm, Sweden, 2012"
+0037bff7be6d463785d4e5b2671da664cd7ef746,Multiple Instance Metric Learning from Automatically Labeled Bags of Faces,"Author manuscript, published in ""European Conference on Computer Vision (ECCV '10) 6311 (2010) 634--647""
+DOI : 10.1007/978-3-642-15549-9_46"
+0014a057ebdeca672b1cdee8104cca4dc928ef3e,Training Deformable Part Models with Decorrelated Features,"Training deformable part models with decorrelated features
+Ross Girshick and Jitendra Malik
+UC Berkeley
+{rbg,"
+00b370765678c44acd5313f3946b2431890721a9,Dynamic Scene Classification: Learning Motion Descriptors with Slow Features Analysis,"Dynamic Scene Classification: Learning Motion Descriptors with Slow Features
+Analysis
+Christian Th´eriault, Nicolas Thome, Matthieu Cord
+UPMC-Sorbonne Universities, Paris, France"
+00e39fad9846084eb435b6cddd675ee11f2dfb90,Person Re-identification Using Haar-based and DCD-based Signature,"Person Re-identification Using Haar-based and
+DCD-based Signature
+Slawomir Bak, Etienne Corvee, François Bremond, Monique Thonnat
+To cite this version:
+Slawomir Bak, Etienne Corvee, François Bremond, Monique Thonnat. Person Re-identification Us-
+ing Haar-based and DCD-based Signature. 2nd Workshop on Activity Monitoring by Multi-Camera
+Surveillance Systems, AMMCSS 2010, in conjunction with 7th IEEE International Conference on Ad-
+vanced Video and Signal-Based Surveillance, AVSS - 2010, Aug 2010, Boston, United States. 2010.
+<inria-00496051>
+HAL Id: inria-00496051
+https://hal.inria.fr/inria-00496051
+Submitted on 29 Jun 2010
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+006a9f68bcf6edca62d8750af55168971cf0890c,Dynamic Programming Bipartite Belief Propagation For Hyper Graph Matching,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+001dc49f7f3348841b4086f966bfe4e9dfadf03e,Automatic image captioning using multitask learning,"Automatic image captioning using multi-task learning
+Anna Fariha"
+0029418d56d8fe71d1d45bdaad88e5cc75dc58e7,Pushing the “Speed Limit”: High-Accuracy US Traffic Sign Recognition With Convolutional Neural Networks,"Pushing the “Speed Limit”: High-Accuracy U.S.
+Traffic Sign Recognition with Convolutional Neural
+Networks
+Yuan Li, Andreas Møgelmose, and Mohan M. Trivedi"
+00d9d88bb1bdca35663946a76d807fff3dc1c15f,Subjects and Their Objects: Localizing Interactees for a Person-Centric View of Importance,"Subjects and Their Objects: Localizing Interactees for a
+Person-Centric View of Importance
+Chao-Yeh Chen · Kristen Grauman"
+00091891790ee77816ebd785d25900254e6986bd,Discriminative Robust Local Binary Pattern based Edge Texture Features for Object Recognition,"International Journal of Scientific Engineering and Research (IJSER)
+ISSN (Online): 2347-3878, Impact Factor (2014): 3.05
+www.ijser.in
+Discriminative Robust Local Binary Pattern based
+Edge Texture Features for Object Recognition
+Rasika Raikar1, Shivani Pandita2
+Dhole Patil College of Engineering, Wagholi, Pune, India
+Professor, Dhole Patil College of Engineering, Wagholi, Pune, India
+round
+each point. Various"
+00edd45d8f4fd75fc329d6a6fcc7d87108baa3a9,Distance Measures for Gabor Jets-Based Face Authentication: A Comparative Evaluation,"Distance Measures for Gabor Jets-based Face
+Authentication: A Comparative Evaluation
+Daniel Gonz´alez-Jim´enez1, Manuele Bicego2, J.W.H. Tangelder3, B.A.M
+Schouten3, Onkar Ambekar3, Jos´e Luis Alba-Castro1, Enrico Grosso2, Massimo
+Tistarelli4
+TSC Department, University of Vigo, Vigo (Spain)
+DEIR - University of Sassari, Sassari (Italy)
+CWI, Amsterdam (The Netherlands)
+DAP - University of Sassari, Alghero (Italy)"
+00a3cfe3ce35a7ffb8214f6db15366f4e79761e3,Using Kinect for real-time emotion recognition via facial expressions,"Qi-rong Mao, Xin-yu Pan, Yong-zhao Zhan, Xiang-jun Shen, 2015. Using
+Kinect for real-time emotion recognition via facial expressions. Frontiers of
+Information Technology & Electronic Engineering, 16(4):272-282.
+[doi:10.1631/FITEE.1400209]
+Using Kinect for real-time emotion
+recognition via facial expressions
+Key words: Kinect, Emotion recognition, Facial expression, Real-time
+lassification, Fusion algorithm, Support vector machine (SVM)
+Contact: Qi-rong Mao
+E-mail:
+ORCID: http://orcid.org/0000-0002-5021-9057
+Front Inform Technol & Electron Eng"
+004a1bb1a2c93b4f379468cca6b6cfc6d8746cc4,Balanced k-Means and Min-Cut Clustering,"Balanced k-Means and Min-Cut Clustering
+Xiaojun Chang, Feiping Nie, Zhigang Ma, and Yi Yang"
+0089a590154694e0de340f357a022f6a38d60946,Speeding-up Object Detection Training for Robotics with FALKON,"Speeding-up Object Detection Training for Robotics with FALKON
+Elisa Maiettini1,2,3, Giulia Pasquale1,2, Lorenzo Rosasco2,3 and Lorenzo Natale1"
+00d94b35ffd6cabfb70b9a1d220b6823ae9154ee,Discriminative Bayesian Dictionary Learning for Classification,"Discriminative Bayesian Dictionary Learning
+for Classification
+Naveed Akhtar, Faisal Shafait, and Ajmal Mian"
+002d1619748a99aa683b5c30b7eafebdfe6adfc4,Nearest feature line embedding for face hallucination,"Nearest feature line embedding for face
+hallucination
+Junjun Jiang, Ruimin Hu, Zhen Han and Tao Lu
+A new manifold learning method, called nearest feature line (NFL)
+embedding, for face hallucination is proposed. While many manifold
+learning based face hallucination algorithms have been proposed in
+recent years, most of them apply the conventional nearest neighbour
+metric to derive the subspace and may not effectively characterise
+the geometrical
+information of the samples, especially when the
+number of training samples is limited. This reported work proposes
+using the NFL metric to define the neighbourhood relations between
+face samples to improve the expressing power of the given training
+samples for reconstruction. The algorithm preserves the linear relation-
+ship in a smaller local space than traditional manifold learning based
+methods, which better reflects the nature of manifold learning theory.
+Experimental results demonstrate that
+the method is effective at
+preserving detailed visual information.
+Introduction: Face super-resolution (SR), or face hallucination, refers to"
+00f17fca3cf3ab4262edde3626e6230a89ff1a1f,Human Pose Estimation with Iterative Error Feedback,"Human Pose Estimation with Iterative Error
+Feedback
+Jo˜ao Carreira
+UC Berkeley
+Pulkit Agrawal
+UC Berkeley
+Katerina Fragkiadaki
+UC Berkeley
+Jitendra Malik
+UC Berkeley"
+006f283a50d325840433f4cf6d15876d475bba77,Preserving Structure in Model-Free Tracking,"Preserving Structure in Model-Free Tracking
+Lu Zhang and Laurens van der Maaten"
+00d931eccab929be33caea207547989ae7c1ef39,The Natural Input Memory Model,"The Natural Input Memory Model
+Joyca P.W. Lacroix
+Department of Computer Science, IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands
+Department of Psychology, Universiteit van Amsterdam, Roeterstraat 15, 1018 WB Amsterdam, The Netherlands
+Jaap M.J. Murre
+Department of Computer Science, IKAT, Universiteit Maastricht, St. Jacobsstraat 6, 6211 LB Maastricht, The Netherlands
+Eric O. Postma
+H. Jaap van den Herik"
+00796052277d41e2bb3a1284d445c1747aed295f,Performance and Energy Consumption Characterization and Modeling of Video Decoding on Multi-core Heterogenous SoC and their Applications,"Performance and Energy Consumption Characterization
+nd Modeling of Video Decoding on Multi-core
+Heterogenous SoC and their Applications
+Yahia Benmoussa
+To cite this version:
+Yahia Benmoussa. Performance and Energy Consumption Characterization and Modeling of
+Video Decoding on Multi-core Heterogenous SoC and their Applications. Multimedia [cs.MM].
+Universit´e de Bretagne Occidentale, 2015. English. <tel-01313326>
+HAL Id: tel-01313326
+https://hal.archives-ouvertes.fr/tel-01313326
+Submitted on 9 May 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+0052de4885916cf6949a6904d02336e59d98544c,Generalized Low Rank Approximations of Matrices,"005 Springer Science + Business Media, Inc. Manufactured in The Netherlands.
+DOI: 10.1007/s10994-005-3561-6
+Generalized Low Rank Approximations of Matrices
+JIEPING YE
+Department of Computer Science & Engineering,University of Minnesota-Twin Cities, Minneapolis,
+MN 55455, USA
+Editor:
+Peter Flach
+Published online: 12 August 2005"
+00319cd17cebae5e1095a248260bd7be15781362,A Dataset for Improved RGBD-Based Object Detection and Pose Estimation for Warehouse Pick-and-Place,"A Dataset for Improved RGBD-based Object
+Detection and Pose Estimation for Warehouse
+Pick-and-Place
+Colin Rennie1, Rahul Shome1, Kostas E. Bekris1, and Alberto F. De Souza2"
+0041afaf2b17f1a33bd514db27b17ce34670fdb8,Deep Reinforcement Learning-Based Image Captioning with Embedding Reward,"Deep Reinforcement Learning-based Image Captioning with Embedding Reward
+Zhou Ren1
+Xiaoyu Wang1
+Ning Zhang1
+Xutao Lv1
+Li-Jia Li2∗
+{zhou.ren, xiaoyu.wang, ning.zhang,
+Snap Inc.
+Google Inc."
+006350ae14784bb929b6a749d4e5c265a10168b7,Abstract Eye Detection Using Discriminatory Features and an Efficient Support Vector Machine Eye Detection Using Discriminatory Features and an Efficient Support Vector Machine Eye Detection Using Discriminatory Features and an Efficient Support Vector Machine,"Copyright Warning & Restrictions
+The copyright law of the United States (Title 17, United
+States Code) governs the making of photocopies or other
+reproductions of copyrighted material.
+Under certain conditions specified in the law, libraries and
+rchives are authorized to furnish a photocopy or other
+reproduction. One of these specified conditions is that the
+photocopy or reproduction is not to be “used for any
+purpose other than private study, scholarship, or research.”
+If a, user makes a request for, or later uses, a photocopy or
+reproduction for purposes in excess of “fair use” that user
+may be liable for copyright infringement,
+This institution reserves the right to refuse to accept a
+opying order if, in its judgment, fulfillment of the order
+would involve violation of copyright law.
+Please Note: The author retains the copyright while the
+New Jersey Institute of Technology reserves the right to
+distribute this thesis or dissertation
+Printing note: If you do not wish to print this page, then select
+“Pages from: first page # to: last page #” on the print dialog screen"
+6ef0b43cf897f527540c29cae0618aabb7329072,Parallel Algorithms for Nearest Neighbor Search Problems in High Dimensions,"PARALLEL ALGORITHMS FOR NEAREST NEIGHBOR SEARCH
+PROBLEMS IN HIGH DIMENSIONS.
+BO XIAO∗ AND GEORGE BIROS†"
+6e396401b3950eccdaf8265aeae8a4f0da8965a0,Obstacle Detection Quality as a Problem-Oriented Approach to Stereo Vision Algorithms Estimation in Road Situation Analysis,"Obstacle Detection Quality as a Problem-Oriented
+Approach to Stereo Vision Algorithms Estimation
+in Road Situation Analysis
+A.A. Smagina, D.A. Shepelev, E.I. Ershov, A.S. Grigoryev
+Institute for Information Transmission Problems (Kharkevich Institute) –IITP RAS,
+Bolshoy Karetny per. 19, build.1, Moscow, Russia, 127051
+E-mail:"
+6e99832e265999194aa88958d892db62afbd7ac9,Is Combinational Strategy Better For Image Memorability Prediction,"Is Combinational Strategy Better For Image
+Memorability Prediction
+Wenting Zhu"
+6e198f6cc4199e1c4173944e3df6f39a302cf787,MORPH-II: Inconsistencies and Cleaning Whitepaper,"MORPH-II: Inconsistencies and Cleaning Whitepaper
+Participants: G. Bingham, B. Yip, M. Ferguson, and C. Nansalo
+Mentors: C. Chen, Y. Wang, and T. Kling
+NSF-REU Site at UNC Wilmington, Summer 2017"
+6e0288b874320b1b6461016fde8b215c3ba46b90,Recognising activities by jointly modelling actions and their effects,"This thesis has been submitted in fulfilment of the requirements for a postgraduate degree
+(e.g. PhD, MPhil, DClinPsychol) at the University of Edinburgh. Please note the following
+terms and conditions of use:
+This work is protected by copyright and other intellectual property rights, which are
+retained by the thesis author, unless otherwise stated.
+A copy can be downloaded for personal non-commercial research or study, without
+prior permission or charge.
+This thesis cannot be reproduced or quoted extensively from without first obtaining
+permission in writing from the author.
+The content must not be changed in any way or sold commercially in any format or
+medium without the formal permission of the author.
+When referring to this work, full bibliographic details including the author, title,
+warding institution and date of the thesis must be given."
+6e82ce9897093ce4f5fa795887273992489c380d,Face recognition using Eigensurface on Kinect depth-maps,"Int'l Conf. IP, Comp. Vision, and Pattern Recognition | IPCV'16 |
+Face recognition using Eigensurface on Kinect depth-maps
+Marcelo Romero1, Cesar Flores1, Vianney Muñoz1 and Luis Carlos Altamirano2
+Universidad Autónoma del Estado de México1 and Benemérita Universidad Autónoma de Puebla2"
+6e297f10a02580dfc74595ff8d7db34020002ec4,Correlation Net : spatio temporal multimodal deep learning,"learning
+Novanto Yudistira, Takio Kurita, Member, IEEE,"
+6e35585eb37ee8a1de60a10a56a3183af480e214,"The YLI-MED Corpus: Characteristics, Procedures, and Plans",
+6e7cfcefe82471a6aca78b59be0285467ce37b8b,Déjà Vu: an empirical evaluation of the memorization properties of ConvNets,"D´ej`a Vu: an empirical evaluation of the
+memorization properties of ConvNets
+Alexandre Sablayrolles†,(cid:63), Matthijs Douze†, Cordelia Schmid(cid:63),
+nd Herv´e J´egou†
+Facebook AI Research
+(cid:63)Inria
+September 19, 2018"
+6eba25166fe461dc388805cc2452d49f5d1cdadd,"ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV 1 Learning Grimaces by Watching TV","Pages 122.1-122.12
+DOI: https://dx.doi.org/10.5244/C.30.122"
+6e8a81d452a91f5231443ac83e4c0a0db4579974,Illumination robust face representation based on intrinsic geometrical information,"Illumination robust face representation based on intrinsic geometrical
+information
+Soyel, H; Ozmen, B; McOwan, PW
+This is a pre-copyedited, author-produced PDF of an article accepted for publication in IET
+Conference on Image Processing (IPR 2012). The version of record is available
+http://ieeexplore.ieee.org/document/6290632/?arnumber=6290632&tag=1
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/16147
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+6ed738ff03fd9042965abdfaa3ed8322de15c116,K-MEAP: Generating Specified K Clusters with Multiple Exemplars by Efficient Affinity Propagation,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+K-MEAP: Generating Specified K Clusters with Multiple
+Exemplars by Efficient Affinity Propagation
+Author(s) Wang, Yangtao; Chen, Lihui
+Citation
+Wang, Y & Chen, L. (2014). K-MEAP: Generating
+Specified K Clusters with Multiple Exemplars by Efficient
+Affinity Propagation. 2014 IEEE International Conference
+on Data Mining (ICDM), 1091-1096.
+http://hdl.handle.net/10220/39690
+Rights
+© 2014 IEEE. Personal use of this material is permitted.
+Permission from IEEE must be obtained for all other
+uses, in any current or future media, including
+reprinting/republishing this material for advertising or
+promotional purposes, creating new collective works, for
+resale or redistribution to servers or lists, or reuse of any
+opyrighted component of this work in other works. The"
+6ee1f57cbf7daa37576efca7e7d24040a5c94ee2,Multimodal Neural Network for Overhead Person Re-Identification,"Aalborg Universitet
+Multimodal Neural Network for Overhead Person Re-identification
+Lejbølle, Aske Rasch; Nasrollahi, Kamal; Krogh, Benjamin; Moeslund, Thomas B.
+Published in:
+6th International Conference of the Biometrics Special Interest Group
+DOI (link to publication from Publisher):
+0.23919/BIOSIG.2017.8053514
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Lejbølle, A. R., Nasrollahi, K., Krogh, B., & Moeslund, T. B. (2017). Multimodal Neural Network for Overhead
+Person Re-identification. In 16th International Conference of the Biometrics Special Interest Group IEEE.
+https://doi.org/10.23919/BIOSIG.2017.8053514
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain"
+6ecd4025b7b5f4894c990614a9a65e3a1ac347b2,Automatic Naming of Character using Video Streaming for Face Recognition with Graph Matching,"International Journal on Recent and Innovation Trends in Computing and Communication
+ISSN: 2321-8169
+Volume: 2 Issue: 5
+1275– 1281
+_______________________________________________________________________________________________
+Automatic Naming of Character using Video Streaming for Face
+Recognition with Graph Matching
+Nivedita.R.Pandey
+Ranjan.P.Dahake
+PG Student at MET’s IOE Bhujbal Knowledge City,
+PG Student at MET’s IOE Bhujbal Knowledge City,
+Nasik, Maharashtra, India,
+Nasik, Maharashtra, India,"
+6e7b2afb4daf1fe50a62faf75018ff81c24ee526,Submitted to CVPR ' 99 Discriminant Analysis based Feature ExtractionW,"SubmittedtoCVPR' DiscriminantAnalysisbasedFeatureExtraction
+W.Zhao
+CenterforAutomationResearch
+UniversityofMaryland
+CollegePark,MD
+nantAnalysishaveachievedquiteasuccessinprac-"
+6e3a181bf388dd503c83dc324561701b19d37df1,Finding a low-rank basis in a matrix subspace,"Finding a low-rank basis in a matrix subspace
+Yuji Nakatsukasa · Tasuku Soma ·
+Andr´e Uschmajew"
+6e1b85aabb132ed741381fdf00909475d16cd3ba,"Motor, emotional and cognitive empathic abilities in children with autism and conduct disorder","Motor, Emotional and Cognitive Empathic Abilities
+in Children with Autism and Conduct Disorder
+Danielle M.A. Bons1,2
++31 (0)488 – 469 611
+Nanda N.J. Rommelse1,2
++31 (0)24 351 2222
+Floor E. Scheepers1
+Jan K. Buitelaar1,2
+Karakter child- and adolescent psychiatry
+University Centre Nijmegen, Zetten-Tiel
+Department of Psychiatry UMC St. Radboud
+P.O. Box 9101, 6500HB Nijmegen, The
+P.O. Box 104, 6670AC Zetten, The Netherlands
+the studies"
+6ef1996563835b4dfb7fda1d14abe01c8bd24a05,Nonparametric Part Transfer for Fine-Grained Recognition,"Nonparametric Part Transfer for Fine-grained Recognition
+Christoph G¨oring, Erik Rodner, Alexander Freytag, and Joachim Denzler∗
+Computer Vision Group, Friedrich Schiller University Jena
+www.inf-cv.uni-jena.de"
+6e75fcf384b31ea2108a81d868fbb886f39cd188,Sparse Coding on Symmetric Positive Definite Manifolds Using Bregman Divergences,"Sparse Coding on Symmetric Positive Definite Manifolds
+using Bregman Divergences
+Mehrtash Harandi, Richard Hartley, Brian Lovell, Conrad Sanderson"
+6e80caed3f2ac86db775bd5e7d64925b00f1a0ca,Social interaction contexts bias the perceived expressions of interactants.,"City Research Online
+City, University of London Institutional Repository
+Citation: Gray, K., Barber, L., Murphy, J. & Cook, R. (2017). Social interaction contexts
+0.1037/emo0000257
+This is the accepted version of the paper.
+This version of the publication may differ from the final published
+version.
+Permanent repository link: http://openaccess.city.ac.uk/16315/
+Link to published version: http://dx.doi.org/10.1037/emo0000257
+Copyright and reuse: City Research Online aims to make research
+outputs of City, University of London available to a wider audience.
+Copyright and Moral Rights remain with the author(s) and/or copyright
+holders. URLs from City Research Online may be freely distributed and
+linked to.
+City Research Online: http://openaccess.city.ac.uk/"
+6e32c368a6157fb911c9363dc3e967a7fb2ad9f7,Hybrid Stochastic / Deterministic Optimization for Tracking Sports Players and Pedestrians,"Hybrid Stochastic / Deterministic Optimization
+for Tracking Sports Players and Pedestrians(cid:2)
+Robert T. Collins1 and Peter Carr2
+The Pennsylvania State University, USA
+Disney Research Pittsburgh, USA"
+6e44ddb54edbb80d5bb8f2ca3b36e40c486b9daf,Evolutionary 3D Mapping,"Evolutionary 3D Mapping Using the GPU
+Calculating the psi similarity function for 2D images
+Diana Cristina Albu
+May 7, 2007
+Submitted to the School of Engineering and Sciences
+in partial fulfillment of the requirements for the degree of
+Bachelor of Science in Electrical Engineering and Computer Science
+Jacobs University Bremen
+Supervisor: Andreas Birk"
+6e8c3b7d25e6530a631ea01fbbb93ac1e8b69d2f,"Deep Episodic Memory: Encoding, Recalling, and Predicting Episodic Experiences for Robot Action Execution","Deep Episodic Memory: Encoding, Recalling, and Predicting
+Episodic Experiences for Robot Action Execution
+Jonas Rothfuss∗†, Fabio Ferreira∗†, Eren Erdal Aksoy ‡, You Zhou† and Tamim Asfour†"
+6e7d799497b94954dc4232d840628c3a00263e42,Deep Multimodal Pain Recognition: A Database and Comparision of Spatio-Temporal Visual Modalities,"Aalborg Universitet
+Deep Multimodal Pain Recognition: A Database and Comparison of Spatio-Temporal
+Visual Modalities
+Haque, Mohammad Ahsanul; Nasrollahi, Kamal; Moeslund, Thomas B.; B. Bautista, Ruben;
+Laursen, Christian B.; Escalera, Sergio; Irani, Ramin; Andersen, Ole Kæseler; Spaich, Erika
+Geraldina; Kulkarni, Kaustubh; Bellantonio, Marco; Anbarjafari, Gholamreza; Noroozi,
+Fatemeh
+Published in:
+Proc. of the 13th IEEE Conf. on Automatic Face and Gesture Recognition
+Publication date:
+Link to publication from Aalborg University
+Citation for published version (APA):
+Haque, M. A., Nasrollahi, K., Moeslund, T. B., B. Bautista, R., Laursen, C. B., Escalera, S., ... Noroozi, F. (2018).
+Deep Multimodal Pain Recognition: A Database and Comparison of Spatio-Temporal Visual Modalities. In Proc.
+of the 13th IEEE Conf. on Automatic Face and Gesture Recognition (pp. 1). IEEE.
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain"
+6e911227e893d0eecb363015754824bf4366bdb7,Wasserstein Divergence for GANs,"Wasserstein Divergence for GANs
+Jiqing Wu1, Zhiwu Huang1, Janine Thoma1, Dinesh Acharya1, and
+Luc Van Gool1,2
+Computer Vision Lab, ETH Zurich, Switzerland
+VISICS, KU Leuven, Belgium"
+6e885d831568520aa95f523f625623e46578efd0,Camera Selection for Adaptive Human-Computer Interface,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2012
+Camera Selection for Adaptive
+Human-Computer Interface
+Niki Martinel Student Member, IEEE, Christian Micheloni, Member, IEEE,
+Claudio Piciarelli, Member, IEEE and Gian Luca Foresti, Senior Member, IEEE"
+6eb7ae81554ad4db92ee6b578f47be659c8b9cbd,Audio phrases for audio event recognition,"AUDIO PHRASES FOR AUDIO EVENT RECOGNITION
+Huy Phan(cid:63)†, Lars Hertel(cid:63), Marco Maass(cid:63), Radoslaw Mazur(cid:63), and Alfred Mertins(cid:63)
+Graduate School for Computing in Medicine and Life Sciences, University of L¨ubeck, Germany
+(cid:63)Institute for Signal Processing, University of L¨ubeck, Germany
+Email: {phan, hertel, maass, mazur,"
+6ee8a94ccba10062172e5b31ee097c846821a822,How to solve classification and regression problems on high-dimensional data with a supervised extension of slow feature analysis,"Submitted 3/13; Revised 10/13; Published 12/13
+How to Solve Classification and Regression Problems on
+High-Dimensional Data with a Supervised
+Extension of Slow Feature Analysis
+Alberto N. Escalante-B.
+Laurenz Wiskott
+Institut f¨ur Neuroinformatik
+Ruhr-Universit¨at Bochum
+Bochum D-44801, Germany
+Editor: David Dunson"
+6ee64c19efa89f955011531cde03822c2d1787b8,Table S1: Review of Existing Facial Expression Databases That Are Often Used in Social Psycholgy,"Table S1: Review of existing facial expression databases that are often used in social
+psycholgy.
+Author
+database
+Expressions1
+Format
+Short summary
+GEMEP Corpus
+Mind Reading: the
+interactive
+guide
+to emotions
+udio
+video
+record-
+Videos
+nger,
+muse-
+dmiration,
+ment,"
+6ed559a0d04e7d4185eeea43f77e372483982e4b,A Review Paper on Player Tracking and Automated Analysis in Sports Videos,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 5, Issue 6, June 2015)
+A Review Paper on Player Tracking and Automated Analysis in
+Sports Videos
+Nikhil M.1, Sreejith S.2
+,2Department of ECE, Government College of Engineering Kannur, kerala, India"
+6ee3fbc4768f578601d42b1596aaf2b0cfa1d40a,Human Detection and Identification by Robots Using Thermal and Visual Information in Domestic Environments,"J Intell Robot Syst (2012) 66:223–243
+DOI 10.1007/s10846-011-9612-2
+Human Detection and Identification by Robots
+Using Thermal and Visual Information
+in Domestic Environments
+Mauricio Correa · Gabriel Hermosilla ·
+Rodrigo Verschae · Javier Ruiz-del-Solar
+Received: 11 December 2010 / Accepted: 30 May 2011 / Published online: 12 July 2011
+© Springer Science+Business Media B.V. 2011"
+6e379f2d34e14efd85ae51875a4fa7d7ae63a662,A New Multi-modal Biometric System Based on Fingerprint and Finger Vein Recognition,"A NEW MULTI-MODAL BIOMETRIC SYSTEM
+BASED ON FINGERPRINT AND FINGER
+VEIN RECOGNITION
+Naveed AHMED
+Master's Thesis
+Department of Software Engineering
+Advisor: Prof. Dr. Asaf VAROL
+JULY-2014"
+6e74a055a70c69c287a34d86ce8b159456cf4420,Pose Recognition for Tracker Initialization Using 3 D Models,"Institutionen för systemteknik
+Department of Electrical Engineering
+Examensarbete
+Pose Recognition for Tracker Initialization Using
+D Models
+Examensarbete utfört i Bildbehandling
+vid Tekniska högskolan i Linköping
+Martin Berg
+LiTH-ISY-EX--07/4076--SE
+Linköping 2008
+Department of Electrical Engineering
+Linköpings universitet
+SE-581 83 Linköping, Sweden
+Linköpings tekniska högskola
+Linköpings universitet
+581 83 Linköping"
+6e0a05d87b3cc7e16b4b2870ca24cf5e806c0a94,Random Graphs for Structure Discovery in High-dimensional Data,"RANDOM GRAPHS FOR STRUCTURE
+DISCOVERY IN HIGH-DIMENSIONAL DATA
+Jos¶e Ant¶onio O. Costa
+A dissertation submitted in partial fulflllment
+of the requirements for the degree of
+Doctor of Philosophy
+(Electrical Engineering: Systems)
+in The University of Michigan
+Doctoral Committee:
+Professor Alfred O. Hero III, Chair
+Professor Jefirey A. Fessler
+Professor Susan A. Murphy
+Professor David L. Neuhofi"
+6e1802874ead801a7e1072aa870681aa2f555f35,Exploring Feature Descritors for Face Recognition,"­4244­0728­1/07/$20.00 ©2007 IEEE
+I ­ 629
+ICASSP 2007
+*22+),)164,7+616DAIK??AIIB=B=?AHA?CEJE=CHEJDCHA=JOHAEAI.EIDAHB=?A -*/ 4A?AJO?=*E=HO2=JJAH*22+),)"
+6ed22b934e382c6f72402747d51aa50994cfd97b,Customized expression recognition for performance-driven cutout character animation,"Customized Expression Recognition for Performance-Driven
+Cutout Character Animation
+Xiang Yu†
+NEC Laboratories America
+Jianchao Yang‡ Wilmot Li§
+Snapchat"
+6e261b9e539ecd03d76063f893d59c6eafb6ed43,On the Use of External Face Features for Identity Verification,"On the Use of External Face Features for
+Identity Verification
+`Agata Lapedriza1, David Masip2 and Jordi Vitri`a1
+Computer Vision Center (CVC), Computer Science Dept.
+Universitat Aut`onoma de Barcelona
+Bellaterra, Spain, 08193.
+{agata,
+Department of Applied Mathematics and Analysis (MAiA)
+University of Barcelona (UB)
+Edifici Hist`oric Gran Via de les Corts Catalanes 585, Barcelona 08007, Spain."
+6ee5205408fc6db03460c05765ae0f21a6eb9552,A literature review on recent multi-object tracking methods based on HMM and particle filter,"IOSR Journal of Computer Engineering (IOSR-JCE)
+e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 16, Issue 2, Ver. VII (Mar-Apr. 2014), PP 05-07
+www.iosrjournals.org
+A literature review on recent multi-object tracking methods
+ased on HMM and particle filter
+Kalyani Ahire1, Prof.P.S Mohod2
+Department of Computer Science & Engineering,, G.H.R.I.E.T.W.,RashtrasantTukdojiMaharaj Nagpur
+University Nagpur, India"
+6e93fd7400585f5df57b5343699cb7cda20cfcc2,Comparing a novel model based on the transferable belief model with humans during the recognition of partially occluded facial expressions.,"http://journalofvision.org/9/2/22/
+Comparing a novel model based on the transferable
+elief model with humans during the recognition of
+partially occluded facial expressions
+Zakia Hammal
+Martin Arguin
+Frédéric Gosselin
+Département de Psychologie, Université de Montréal,
+Canada
+Département de Psychologie, Université de Montréal,
+Canada
+Département de Psychologie, Université de Montréal,
+Canada
+Humans recognize basic facial expressions effortlessly. Yet, despite a considerable amount of research, this task remains
+elusive for computer vision systems. Here, we compared the behavior of one of the best computer models of facial
+expression recognition (Z. Hammal, L. Couvreur, A. Caplier, & M. Rombaut, 2007) with the behavior of human observers
+during the M. Smith, G. Cottrell, F. Gosselin, and P. G. Schyns (2005) facial expression recognition task performed on
+stimuli randomly sampled using Gaussian apertures. The modelVwhich we had to significantly modify in order to give the
+bility to deal with partially occluded stimuliVclassifies the six basic facial expressions (Happiness, Fear, Sadness,
+Surprise, Anger, and Disgust) plus Neutral from static images based on the permanent facial feature deformations and the"
+6e604946a0a51911db0e887378ba1ae103dcfb9e,Detection and Classification of a Moving Object in a Video Stream,"Proc. of the Intl. Conf. on Advances in Computing and Information Technology-- ACIT 2014
+Copyright © Institute of Research Engineers and Doctors. All rights reserved.
+ISBN: 978-981-07-8859-9 doi: 10.3850/ 978-981-07-8859-9_23
+Detection and Classification of a Moving Object
+in a Video Stream
+Asim R. Aldhaheri and Eran A. Edirisinghe"
+6edb41364802b0fdd1e3e98d644fe78b1ecbbe45,Understanding Image and Text Simultaneously: a Dual Vision-Language Machine Comprehension Task,"Understanding Image and Text Simultaneously: a Dual Vision-Language
+Machine Comprehension Task
+Nan Ding
+Google
+Sebastian Goodman
+Google
+Fei Sha
+Google
+Radu Soricut
+Google"
+9ab463d117219ed51f602ff0ddbd3414217e3166,Weighted Transmedia Relevance Feedback for Image Retrieval and Auto-annotation,"Weighted Transmedia
+Relevance Feedback for
+Image Retrieval and
+Auto-annotation
+Thomas Mensink, Jakob Verbeek, Gabriela Csurka
+TECHNICAL
+REPORT
+N° 0415
+December 2011
+Project-Teams LEAR - INRIA
+nd TVPA - XRCE"
+9af9fa7727df11b86301a252db8a916c3a516a8d,VIBIKNet: Visual Bidirectional Kernelized Network for Visual Question Answering,"VIBIKNet: Visual Bidirectional Kernelized
+Network for Visual Question Answering
+Marc Bola˜nos1,2, ´Alvaro Peris3, Francisco Casacuberta3, Petia Radeva1,2
+Universitat de Barcelona, Barcelona, Spain,
+Computer Vision Center, Bellaterra, Spain,
+PRHLT Research Center, Universitat Polit`ecnica de Val`encia, Val`encia, Spain,"
+9ac82909d76b4c902e5dde5838130de6ce838c16,Recognizing Facial Expressions Automatically from Video,"Recognizing Facial Expressions Automatically
+from Video
+Caifeng Shan and Ralph Braspenning
+Introduction
+Facial expressions, resulting from movements of the facial muscles, are the face
+hanges in response to a person’s internal emotional states, intentions, or social
+ommunications. There is a considerable history associated with the study on fa-
+ial expressions. Darwin (1872) was the first to describe in details the specific fa-
+ial expressions associated with emotions in animals and humans, who argued that
+ll mammals show emotions reliably in their faces. Since that, facial expression
+nalysis has been a area of great research interest for behavioral scientists (Ekman,
+Friesen, and Hager, 2002). Psychological studies (Mehrabian, 1968; Ambady and
+Rosenthal, 1992) suggest that facial expressions, as the main mode for non-verbal
+ommunication, play a vital role in human face-to-face communication. For illus-
+tration, we show some examples of facial expressions in Fig. 1.
+Computer recognition of facial expressions has many important applications in
+intelligent human-computer interaction, computer animation, surveillance and se-
+urity, medical diagnosis, law enforcement, and awareness systems (Shan, 2007).
+Therefore, it has been an active research topic in multiple disciplines such as psy-
+hology, cognitive science, human-computer interaction, and pattern recognition."
+9a6b80f8ea7e5f24e3da05a5151ba8b42494962f,Leveraging multiple tasks to regularize fine-grained classification,"Cancún Center, Cancún, México, December 4-8, 2016
+978-1-5090-4847-2/16/$31.00 ©2016 IEEE
+KingfisherRingedKingfisherWhite Breasted KingfisherMegaceryleCeryleChloroceryleHalcyonAlcedinidaeHalcyonidaeFig.1.Leveragingthetaxonomicontologyofbirdsforfinegrainedrecogni-tion.Fromtoptobottom,wehavefamily,orderandspeciesforfiveclassesofkingfishersintheCUB-200-2011dataset[6].Observehowidentifyingthefamilyorordercanhelpidentifyingtheclass,e.g.incaseofringedkingfisherandgreenkingfisher.Bestviewedenlarged,incolor.differencesandstrikinginter-classsimilarities.Mostmodernmethodsforfinegrainedrecognitionrelyonacombinationoflocalizingdiscriminativeregionsandlearningcorrespondingdiscriminativefeatures.Thisinturnrequiresstrongsuper-visionsuchaskeypointorattributeannotations,whichareexpensiveanddifficulttoobtainatscale.Ontheotherhand,sincefinegrainedrecognitiondealswithsubordinate-levelclassification,thereexistsanimpliedrelationshipsamonglabels.Theserelationshipsmaybetaxonomical(suchassuperclasses)orsemantic(suchasattributes)innature.Theontol-ogyobtainedinthismannercontainsrichlatentknowledgeaboutfinerdifferencesbetweenclassesthatcanbeexploitedforvisualclassification.Themodelweproposeconsistsofasingledeepconvolutionalneuralnetwork,witheachleveloftheontologygivingrisetoanadditionalsetoflabelsfortheinputimages.Theseadditionallabelsareusedasauxiliarytasksforamulti-tasknetwork,whichcanbetrainedend-to-endusingasimpleweightedobjectivefunction.Wealsoproposeanovelmethodtodynamicallyupdatethelearningrates(hereforthreferredtoasthetaskcoefficients)foreachtaskinthemulti-tasknetwork,basedonitsrelatednesstotheprimarytask.Inthiswork,weanalyzetheutilityofjointlylearningmultiplerelated/auxiliarytasksthatcouldregularizeeachothertopreventover-fitting,whileensuringthatthenetworkretainsitsdiscriminativecapability.Muchlikedropoutisbaggingtakentotheextreme,multi-tasklearningisanalogoustoboosting,ifeachtaskisconsideredaweaklearner.Wenotethatourmodelcanbepluggedintoorusedinconjunctionwithmorecomplexmulti-stagepipelinemethodssuchas[7]–[10]"
+9ac15845defcd0d6b611ecd609c740d41f0c341d,Robust Color-based Vision for Mobile Robots,"Copyright
+Juhyun Lee"
+9af1cf562377b307580ca214ecd2c556e20df000,International Journal of Advanced Studies in Computer Science and Engineering,"Feb. 28
+International Journal of Advanced Studies in Computer Science and Engineering
+IJASCSE, Volume 4, Issue 2, 2015
+Video-Based Facial Expression Recognition
+Using Local Directional Binary Pattern
+Sahar Hooshmand, Ali Jamali Avilaq, Amir Hossein Rezaie
+Electrical Engineering Dept., AmirKabir Univarsity of Technology
+Tehran, Iran"
+9a9af8a5b6939a1da9936608fbf071f852eca7e1,Deep Part Features Learning by a Normalised Double-Margin-Based Contrastive Loss Function for Person Re-Identification,
+9a23a0402ae68cc6ea2fe0092b6ec2d40f667adb,High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs,"High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs
+Ting-Chun Wang1 Ming-Yu Liu1
+Jun-Yan Zhu2 Andrew Tao1
+Jan Kautz1 Bryan Catanzaro1
+NVIDIA Corporation
+UC Berkeley
+Figure 1: We propose a generative adversarial framework for synthesizing 2048 × 1024 images from semantic label maps
+(lower left corner in (a)). Compared to previous work [5], our results express more natural textures and details. (b) We can
+hange labels in the original label map to create new scenes, like replacing trees with buildings. (c) Our framework also
+llows a user to edit the appearance of individual objects in the scene, e.g. changing the color of a car or the texture of a road.
+Please visit our website for more side-by-side comparisons as well as interactive editing demos."
+9ad27106b8e0cf14e8e2814dc318142138d5527b,Camera Style Adaptation for Person Re-identification,"Camera 6Style Transfer(a) Example images under two cameras from Market-1501(b) Examples of camera-aware style transfer between two camerasrealtransferredrealtransferredFigure1.(a)ExampleimagesfromMarket-1501[42].(b)Exam-plesofcamera-awarestyletransferbetweentwocamerasusingourmethod.Imagesinthesamecolumnrepresentthesameperson.ancepropertyunderdifferentcameras.Examplesintradi-tionalapproachesincludeKISSME[16],XQDA[20],DNS[39],etc.Examplesindeeprepresentationlearningmeth-odsincludeIDE[43],SVDNet[29],TripletNet[11],etc.Comparingtopreviousmethods,thispaperresortstoanexplicitstrategyfromtheviewofcamerastyleadapta-tion.Wearemostlymotivatedbytheneedforlargedatavolumeindeeplearningbasedpersonre-ID.Tolearnrichfeatureswhicharerobusttocameravariations,annotatinglarge-scaledatasetsisusefulbutprohibitivelyexpensive.Nevertheless,ifwecanaddmoresamplestothetrainingsetthatareawareofthestyledifferencesbetweencameras,weareableto1)addressthedatascarcityprobleminpersonre-IDand2)learninvariantfeaturesacrossdifferentcameras.Preferably,thisprocessshouldnotcostanymorehumanla-beling,sothatthebudgetiskeptlow.Basedontheabovediscussions,weproposeacam-erastyle(CamStyle)adaptationmethodtoregularizeCNNtrainingforpersonre-ID.Initsvanillaversion,welearnimage-imagetranslationmodelsforeachcamerapairwithCycleGAN[51].WiththelearnedCycleGANmodel,foratrainingimagecapturedbyacertaincamera,wecangener-"
+9a7784eea6bfa62bf2834ee0b87a3cdda46006f2,Digital Comics Image Indexing Based on Deep Learning,"Article
+Digital Comics Image Indexing Based on
+Deep Learning
+Nhu-Van Nguyen * ID , Christophe Rigaud ID and Jean-Christophe Burie ID
+Lab L3I, University of La Rochelle, 17000 La Rochelle, France; (C.R.);
+(J.-C.B.)
+* Correspondence:
+Received: 30 April 2018; Accepted: 27 June 2018; Published: 2 July 2018"
+9a9a888bcce37e582b8a5b5f12f662e487443e5c,Cascaded Pyramid Network for Multi-Person Pose Estimation,"Cascaded Pyramid Network for Multi-Person Pose Estimation
+Yilun Chen∗ Zhicheng Wang∗ Yuxiang Peng1
+Zhiqiang Zhang2 Gang Yu
+Jian Sun
+Megvii Inc. (Face++), {chenyilun, wangzhicheng, pyx, zhangzhiqiang, yugang,
+Tsinghua University 2HuaZhong University of Science and Technology"
+9a7858eda9b40b16002c6003b6db19828f94a6c6,Mooney face classification and prediction by learning across tone,"MOONEY FACE CLASSIFICATION AND PREDICTION BY LEARNING ACROSS TONE
+Tsung-Wei Ke(cid:63)†
+Stella X. Yu(cid:63)†
+David Whitney(cid:63)
+(cid:63) UC Berkeley / †ICSI"
+9a2ed8abaa17834cb8f227a9353c8cfed3a367cd,A Method of Detecting Abnormal Crowd Behavior Events Applied in Air Patrol Robot,"A Method of Detecting Abnormal Crowd Behavior Events Applied in Air Patrol Robot
+School of Electrical and Electronic Engineering ,Shanghai Institute of Technology, Shanghai, China
+Huailin Zhao
+School of Electrical and Electronic Engineering ,Shanghai Institute of Technology, Shanghai, China
+Shunzhou Wang
+School of Electrical and Electronic Engineering ,Shanghai Institute of Technology, Shanghai, China
+Shifang Xu
+School of Computer Science and Information Engineering ,Shanghai Institute of Technology, Shanghai, China
+Yani Zhang
+Masanori Sugisaka
+Alife Robotics Corporation LTD, Oita, Japan"
+9abc9e3cadbec9139b39dfddb0de6c08b7aaf2d0,Pain Intensity Evaluation through Facial Action Units,"Pain Intensity Evaluation Through Facial Action
+Units
+Zuhair Zafar
+Dept. of Electrical Engineering, SBASSE,
+Lahore University of Management Sciences,
+Lahore, Pakistan
+Nadeem Ahmad Khan
+Dept. of Electrical Engineering, SBASSE,
+Lahore University of Management Sciences,
+Lahore, Pakistan"
+9a88d23234ee41965ac17fc5774348563448a94d,3021977 GI P_212 Cover.indd,"Gesellschaft für Informatik e.V. (GI)
+publishes this series in order to make available to a broad public
+recent findings in informatics (i.e. computer science and informa-
+tion systems), to document conferences that are organized in co-
+operation with GI and to publish the annual GI Award dissertation.
+Broken down into
+• seminars
+• proceedings
+• dissertations
+• thematics
+urrent topics are dealt with from the vantage point of research and
+development, teaching and further training in theory and practice.
+The Editorial Committee uses an intensive review process in order
+to ensure high quality contributions.
+The volumes are published in German or English.
+Information: http://www.gi.de/service/publikationen/lni/
+ISSN 1617-5468
+ISBN 978-3-88579-606-0
+The proceedings of the BIOSIG 2013 include scientific contributions of the annual
+onference of the Biometrics Special Interest Group (BIOSIG) of the Gesellschaft"
+9a276c72acdb83660557489114a494b86a39f6ff,Emotion Classification through Lower Facial Expressions using Adaptive Support Vector Machines,"Emotion Classification through Lower Facial Expressions using Adaptive
+Support Vector Machines
+Porawat Visutsak
+Department of Information Technology, Faculty of Industrial Technology and Management,
+King Mongkut’s University of Technology North Bangkok,"
+9ad65c5c5a2b22ef0343831fe0dabc2055d72497,Eyediap Database: Data Description and Gaze Tracking Evaluation Benchmarks,"EYEDIAP DATABASE: DATA DESCRIPTION
+AND GAZE TRACKING EVALUATION
+BENCHMARKS
+Kenneth Alberto Funes Mora Florent Monay
+Jean-Marc Odobez
+Idiap-RR-08-2014
+Version of SEPTEMBER 18, 2014
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+9a1a9dd3c471bba17e5ce80a53e52fcaaad4373e,Automatic Recognition of Spontaneous Facial Actions,"Automatic Recognition of Spontaneous Facial
+Actions
+Marian Stewart Bartlett1, Gwen C. Littlewort1, Mark G. Frank2, Claudia Lainscsek1,
+Ian R. Fasel1, Javier R. Movellan1
+Institute for Neural Computation, University of California, San Diego.
+Department of Communication, University at Buffalo, State University of New York."
+9a08459b0cb133f0f4352c58225446f9dc95ecc4,Metadata of the chapter that will be visualized in SpringerLink,"Metadata of the chapter that will be visualized in
+SpringerLink
+Book Title
+Series Title
+Chapter Title
+Copyright Year
+Copyright HolderName
+Author
+Corresponding Author
+Author
+Author
+Instituto de Investigación en Informática de Albacete
+Universidad de Castilla-La Mancha
+02071, Albacete, Spain
+Ambient Assisted Living. ICT-based Solutions in Real Life Situations
+Sokolova
+Marina V.
+Fernández-Caballero
+Experimentation on Emotion Regulation with Single-Colored Images
+Springer International Publishing Switzerland"
+9a42c519f0aaa68debbe9df00b090ca446d25bc4,Face Recognition via Centralized Coordinate Learning,"Face Recognition via Centralized Coordinate
+Learning
+Xianbiao Qi, Lei Zhang"
+9a03b7b71a82fc2c86b3b4cbec802dfc16978486,One-Shot Observation Learning,"One-Shot Observation Learning
+Leo Pauly, Wisdom C. Agboh, Mohamed Abdellatif, David C. Hogg, Raul Fuentes"
+9aad8e52aff12bd822f0011e6ef85dfc22fe8466,Temporal-Spatial Mapping for Action Recognition,"Temporal-Spatial Mapping for Action Recognition
+Xiaolin Song, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jingyu Yang, and Xiaoyan Sun"
+9a9019972dece591f502a2f794e81648b9e064fe,Combination of facial landmarks for robust eye localization using the Discriminative Generalized Hough Transform,"Combination of Facial Landmarks
+for Robust Eye Localization
+Using the Discriminative Generalized Hough Transform
+Ferdinand Hahmann, Gordon B¨oer, Hauke Schramm
+Institute of Applied Computer Science
+University of Applied Sciences Kiel
+Grenzstraße 3, 24149 Kiel"
+363ca0a3f908859b1b55c2ff77cc900957653748,Local Binary Patterns and Linear Programming using Facial Expression,"International Journal of Computer Trends and Technology (IJCTT) – volume 1 Issue 3 Number 4 – Aug 2011
+Local Binary Patterns and Linear Programming using
+Facial Expression
+Ms.P.Jennifer
+#MCA Department, Bharath Institute of Science and Technology
++B.Tech (C.S.E), Bharath University , Chennai – 73.
+Dr. A. Muthu kumaravel
+#MCA Department, Bharath Institute of Science and Technology
++B.Tech (C.S.E), Bharath University , Chennai – 73."
+36d8cc038db71a473d0c94c21f2b68a840dff21c,Unsupervised Detector Adaptation by Joint Dataset Feature Learning," 
+ 
+ 
+     
+ 
+   
+ 
+ 
+    
+    
+
+
+!∀∀
+##!∃%&∋()  
+  
+∗+,
+    
+ #−./!0!∀
+ !!2!342
+,"
+36cbcd70af6f2fd3e700e0a710acd5f1f6abebcf,Matching People across Camera Views using Kernel Canonical Correlation Analysis,"Matching People across Camera Views using
+Kernel Canonical Correlation Analysis
+Giuseppe Lisanti , Iacopo Masi , Alberto Del Bimbo
+Media Integration and Communication Center (MICC), Università degli Studi di Firenze
+Viale Morgagni 65 - 50134 Firenze, Italy"
+36358eff7c34de64c0ce8aa42cf7c4da24bf8e93,Deep Metric Learning for Person Re-identification,"Deep Metric Learning for Person Re-Identification
+(Invited Paper)
+Dong Yi, Zhen Lei, Shengcai Liao and Stan Z. Li
+Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+Institute of Automation, Chinese Academy of Sciences (CASIA)"
+367231b80e8201fc9c461fbb42047b20e89ea961,Impatient DNNs - Deep Neural Networks with Dynamic Time Budgets,"MANUEL AMTHOR, ERIK RODNER, AND JOACHIM DENZLER: IMPATIENT DNNS
+Impatient DNNs – Deep Neural Networks
+with Dynamic Time Budgets
+Manuel Amthor
+Erik Rodner
+Joachim Denzler
+Computer Vision Group
+Friedrich Schiller University Jena
+Germany
+www.inf-cv.uni-jena.de"
+36fa002f36e14ab7d24ebcdd99b6589ed726b383,Detecting conversational gaze aversion using unsupervised learning,"Detecting Conversational Gaze Aversion Using
+Unsupervised Learning
+Matthew Roddy, Naomi Harte
+ADAPT Centre, School of Engineering
+Trinity College Dublin, Ireland"
+362cfe79a6822f9e317555c5e3469dd038b9053f,Damped Gauss-Newton algorithm for nonnegative Tucker decomposition,"978-1-4577-0568-7/11/$26.00 ©2011 IEEE
+DY, An , G (cid:2) (cid:12)Y  G  A (cid:12)2
+DECOMPOSITION
+. INTRODUCTION"
+364584f8313e7601b1f5134d371e98aeb61110e8,An invariant bipolar representation for 3D surfaces,"An invariant bipolar representation for 3D surfaces
+M. JRIBI and F. GHORBEL
+CRSITAL Laboratory / GRIFT research group,
+Ecole Nationale des Sciences de l’Informatique (ENSI),
+La Manouba University, 2010 La Manouba, Tunisia"
+36939e6a365e9db904d81325212177c9e9e76c54,"Assessing the Accuracy of Four Popular Face Recognition Tools for Inferring Gender, Age, and Race","Assessing the Accuracy of Four Popular Face Recognition Tools for
+Inferring Gender, Age, and Race
+Soon-Gyo Jung, Jisun An, Haewoon Kwak, Joni Salminen, Bernard J. Jansen
+Qatar Computing Research Institute, HBKU
+HBKU Research Complex, Doha, P.O. Box 34110, Qatar"
+366c14f477bf2ed16b1498d1c56a7e1f2af08e69,Comparative Analysis of Statistical Shape Spaces,"Comparative Analysis of Statistical Shape Spaces
+Alan Brunton∗
+Augusto Salazar†
+Timo Bolkart†
+Stefanie Wuhrer†"
+3646b42511a6a0df5470408bc9a7a69bb3c5d742,Detection of Facial Parts based on ABLATA,"International Journal of Computer Applications (0975 – 8887)
+Applications of Computers and Electronics for the Welfare of Rural Masses (ACEWRM) 2015
+Detection of Facial Parts based on ABLATA
+Siddhartha Choubey
+Shri Shankaracharya
+Technical Campus, Bhilai
+Vikas Singh
+Shri Shankaracharya
+Technical Campus, Bhilai
+Abha Choubey
+Shri Shankaracharya
+Technical Campus, Bhilai"
+36cd55cdb1b032c8f29e011ed0637923afc46d3f,Strategies to Improve Activity Recognition Based on Skeletal Tracking: Applying Restrictions Regarding Body Parts and Similarity Boundaries †,"Article
+Strategies to Improve Activity Recognition Based on
+Skeletal Tracking: Applying Restrictions Regarding
+Body Parts and Similarity Boundaries †
+Carlos Gutiérrez-López-Franca *, Ramón Hervás and Esperanza Johnson
+MAmI Research Lab, University of Castilla-La Mancha, Paseo de la Universidad 4, 13071 Ciudad Real, Spain;
+(R.H.); (E.J.)
+* Correspondence:
+This paper is an extended version of our paper published in Gutiérrez López de la Franca, C.; Hervás, R.;
+Johnson, E.; Bravo, J. Findings about Selecting Body Parts to Analyze Human Activities through Skeletal
+Tracking Joint Oriented Devices. In Proceedings of the 10th International Conference on Ubiquitous
+Computing and Ambient Intelligence (UCAMI 2016), Gran Canaria, Spain, 29 November–2 December 2016.
+Received: 4 April 2018; Accepted: 17 May 2018; Published: 22 May 2018"
+36fe39ed69a5c7ff9650fd5f4fe950b5880760b0,Tracking von Gesichtsmimik mit Hilfe von Gitterstrukturen zur Klassifikation von schmerzrelevanten Action Units,"Tracking von Gesichtsmimik
+mit Hilfe von Gitterstrukturen
+zur Klassifikation von schmerzrelevanten Action
+Units
+Christine Barthold1, Anton Papst1, Thomas Wittenberg1
+Christian K¨ublbeck1, Stefan Lautenbacher2, Ute Schmid2, Sven Friedl1,3
+Fraunhofer-Institut f¨ur Integrierte Schaltungen IIS, Erlangen,
+Otto-Friedrich-Universit¨at Bamberg, 3Universit¨atsklinkum Erlangen
+Kurzfassung. In der Schmerzforschung werden schmerzrelevante Mi-
+mikbewegungen von Probanden mittels des Facial Action Coding System
+klassifiziert. Die manuelle Klassifikation hierbei ist aufw¨andig und eine
+utomatische (Vor-)klassifikation k¨onnte den diagnostischen Wert dieser
+Analysen erh¨ohen sowie den klinischen Workflow unterst¨utzen. Der hier
+vorgestellte regelbasierte Ansatz erm¨oglicht eine automatische Klassifika-
+tion ohne große Trainingsmengen vorklassifizierter Daten. Das Verfahren
+erkennt und verfolgt Mimikbewegungen, unterst¨utzt durch ein Gitter,
+und ordnet diese Bewegungen bestimmten Gesichtsarealen zu. Mit die-
+sem Wissen kann aus den Bewegungen auf die zugeh¨origen Action Units
+geschlossen werden.
+Einleitung"
+363e5a0e4cd857e98de72a726ad6f80cea9c50ab,Fast Landmark Localization With 3D Component Reconstruction and CNN for Cross-Pose Recognition,"Fast Landmark Localization
+with 3D Component Reconstruction and CNN for
+Cross-Pose Recognition
+Gee-Sern (Jison) Hsu, Hung-Cheng Shie, Cheng-Hua Hsieh"
+36b2aa7248152fdad7bc7f670d0b577c9728d466,Data-dependent Initializations of Convolutional Neural Networks,"Under review as a conference paper at ICLR 2016
+DATA-DEPENDENT INITIALIZATIONS OF
+CONVOLUTIONAL NEURAL NETWORKS
+Philipp Kr¨ahenb¨uhl1, Carl Doersch1,2, Jeff Donahue1, Trevor Darrell1
+Department of Electrical Engineering and Computer Science, UC Berkeley
+Machine Learning Department, Carnegie Mellon"
+36fc4120fc0638b97c23f97b53e2184107c52233,Introducing Celebrities in an Images using HAAR Cascade algorithm,"National Conference on Innovative Paradigms in Engineering & Technology (NCIPET-2013)
+Proceedings published by International Journal of Computer Applications® (IJCA)
+Introducing Celebrities in an Images using HAAR
+Cascade algorithm
+Jaya M. Jadhav
+Deipali V. Gore
+Asst. Professor
+Rashmi R. Tundalwar
+PES Modern College of Engg.
+PES Modern College of Engg.
+PES Modern College of Engg.
+Shivaji Nagar, Pune
+Shivaji Nagar, Pune
+Shivaji Nagar, Pune"
+361367838ee5d9d5c9a77c69c1c56b1c309ab236,Salient Object Detection: A Survey,"Salient Object Detection: A Survey
+Ali Borji, Ming–Ming Cheng, Huaizu Jiang and Jia Li"
+36ca720185b62e92a7f3cce75418356a5a125d24,Template aging in 3D and 2D face recognition,"Template Aging in 3D and 2D Face Recognition
+Ishan Manjani∗
+Hakki Sumerkan†
+Patrick J. Flynn†
+Kevin W. Bowyer†"
+36ce0b68a01b4c96af6ad8c26e55e5a30446f360,Facial expression recognition based on a mlp neural network using constructive training algorithm,"Multimed Tools Appl
+DOI 10.1007/s11042-014-2322-6
+Facial expression recognition based on a mlp neural
+network using constructive training algorithm
+Hayet Boughrara · Mohamed Chtourou ·
+Chokri Ben Amar · Liming Chen
+Received: 5 February 2014 / Revised: 22 August 2014 / Accepted: 13 October 2014
+© Springer Science+Business Media New York 2014"
+3674f3597bbca3ce05e4423611d871d09882043b,Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions,"ISSN 1796-2048
+Volume 7, Number 4, August 2012
+Contents
+Special Issue: Multimedia Contents Security in Social Networks Applications
+Guest Editors: Zhiyong Zhang and Muthucumaru Maheswaran
+Guest Editorial
+Zhiyong Zhang and Muthucumaru Maheswaran
+SPECIAL ISSUE PAPERS
+DRTEMBB: Dynamic Recommendation Trust Evaluation Model Based on Bidding
+Gang Wang and Xiao-lin Gui
+Block-Based Parallel Intra Prediction Scheme for HEVC
+Jie Jiang, Baolong, Wei Mo, and Kefeng Fan
+Optimized LSB Matching Steganography Based on Fisher Information
+Yi-feng Sun, Dan-mei Niu, Guang-ming Tang, and Zhan-zhan Gao
+A Novel Robust Zero-Watermarking Scheme Based on Discrete Wavelet Transform
+Yu Yang, Min Lei, Huaqun Liu, Yajian Zhou, and Qun Luo
+Stego Key Estimation in LSB Steganography
+Jing Liu and Guangming Tang
+REGULAR PAPERS
+Facial Expression Spacial Charts for Describing Dynamic Diversity of Facial Expressions"
+362bfeb28adac5f45b6ef46c07c59744b4ed6a52,Incorporating Scalability in Unsupervised Spatio- Temporal Feature Learning,"INCORPORATING SCALABILITY IN UNSUPERVISED SPATIO-TEMPORAL FEATURE
+LEARNING
+Sujoy Paul, Sourya Roy and Amit K. Roy-Chowdhury
+Dept. of Electrical and Computer Engineering, University of California, Riverside, CA 92521"
+36918b2ef6b20ffb8cffe458c0067742500c6149,"""Look, some Green Circles!"": Learning to Quantify from Images","Proceedings of the 5th Workshop on Vision and Language, pages 75–79,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+365866dc937529c3079a962408bffaa9b87c1f06,Facial Feature Expression Based Approach for Human Face Recognition: A Review,"IJISET - International Journal of Innovative Science, Engineering & Technology, Vol. 1 Issue 3, May 2014.
+www.ijiset.com
+ISSN 2348 – 7968
+Facial Feature Expression Based Approach for Human Face
+Recognition: A Review
+Jageshvar K. Keche1, Mahendra P. Dhore2
+Department of Computer Science, SSESA, Science College, Congress Nagar, Nagpur, (MS)-India,
+Department of Electronics & Computer Science, RTM Nagpur University, Campus Nagpur, (MS)-India.
+required
+extraction of"
+362a70b6e7d55a777feb7b9fc8bc4d40a57cde8c,A partial least squares based ranker for fast and accurate age estimation,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+360a590703542f2ba345b432416398b6dad9e3fb,Multimodal Person Reidentification Using RGB-D Cameras,"Multi-modal Person Re-Identification
+Using RGB-D Cameras
+Federico Pala, Member, IEEE, Riccardo Satta, Giorgio Fumera, Member, IEEE, and Fabio Roli, Fellow, IEEE"
+36c91b1342c1357877e89b4c43f8eadb39755c0b,Recognizing Human-Object Interactions in Still Images by Modeling the Mutual Context of Objects and Human Poses,"Recognizing Human-Object Interactions in
+Still Images by Modeling the Mutual Context
+of Objects and Human Poses
+Bangpeng Yao, Member, IEEE, and Li Fei-Fei, Member, IEEE"
+36c9731f24e5daa42c1e2c6c68258567dfa78a0a,Movement tracking in terrain conditions accelerated with CUDA,"Proceedings of the 2014 Federated Conference on
+Computer Science and Information Systems pp. 709–717
+DOI: 10.15439/2014F282
+ACSIS, Vol. 2
+978-83-60810-58-3/$25.00 c(cid:13) 2014, IEEE"
+3678dac7e9998567b92f526046a16e2910ced55d,Talking Robots: grounding a shared lexicon in an unconstrained environment,"Berthouze, L., Prince, C. G., Littman, M., Kozima, H., and Balkenius, C. (2007).
+Proceedings of the Seventh International Conference on Epigenetic Robotics: Modeling
+Cognitive Development in Robotic Systems. Lund University Cognitive Studies, 135.
+Talking Robots: grounding a shared lexicon in an
+unconstrained environment
+Matthieu Nottale
+Jean-Christophe Baillie
+ENSTA-UEI cognitive robotics lab."
+3630324c2af04fd90f8668f9ee9709604fe980fd,Image Classification With Tailored Fine-Grained Dictionaries,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2016.2607345, IEEE
+Transactions on Circuits and Systems for Video Technology
+Image Classification with Tailored Fine-Grained
+Dictionaries
+Xiangbo Shu, Jinhui Tang, Guo-Jun Qi, Zechao Li, Yu-Gang Jiang and Shuicheng Yan"
+36513f869e5ba2928369014244dff998ab93728c,Discriminative cluster analysis,"Chapter 1
+Discriminative Cluster Analysis
+Fernando De la Torre and Takeo Kanade"
+36973330ae638571484e1f68aaf455e3e6f18ae9,Scale-Aware Fast R-CNN for Pedestrian Detection,"Scale-aware Fast R-CNN for Pedestrian Detection
+Jianan Li, Xiaodan Liang, ShengMei Shen, Tingfa Xu, and Shuicheng Yan"
+36b322095bd0953d6076096111e4a020f427793b,Large Displacement Optical Flow: Descriptor Matching in Variational Motion Estimation,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+Large Displacement Optical Flow:
+Descriptor Matching in Variational
+Motion Estimation
+Thomas Brox, Jitendra Malik, Fellow, IEEE"
+36cf96fe11a2c1ea4d999a7f86ffef6eea7b5958,RGB-D Face Recognition With Texture and Attribute Features,"RGB-D Face Recognition with Texture and
+Attribute Features
+Gaurav Goswami, Student Member, IEEE, Mayank Vatsa, Senior Member, IEEE, and Richa Singh, Senior
+Member, IEEE"
+36018404263b9bb44d1fddaddd9ee9af9d46e560,Occluded Face Recognition by Using Gabor Features,"OCCLUDED FACE RECOGNITION BY USING GABOR
+FEATURES
+Burcu Kepenekci 1,2, F. Boray Tek 1,2, Gozde Bozdagi Akar 1
+Department of Electrical And Electronics Engineering, METU, Ankara, Turkey
+7h%ł7$.(cid:3)%ł/7(1(cid:15)(cid:3)$QNDUD(cid:15)(cid:3)7XUNH\"
+36f039e39efde3558531b99d85cd9e3ab7d396b3,Efficiency of Recognition Methods for Single Sample per Person Based Face Recognition,"Efficiency of Recognition Methods for Single
+Sample per Person Based Face Recognition
+Miloš Oravec, Jarmila Pavlovičová, Ján Mazanec,
+Ľuboš Omelina, Matej Féder and Jozef Ban
+Faculty of Electrical Engineering and Information Technology
+Slovak University of Technology in Bratislava
+Slovakia
+. Introduction
+Even for the present-day computer technology, the biometric recognition of human face is
+difficult task and continually evolving concept in the area of biometric recognition. The
+rea of face recognition is well-described today in many papers and books, e.g. (Delac et al.,
+008), (Li & Jain, 2005), (Oravec et al., 2010). The idea that two-dimensional still-image face
+recognition in controlled environment is already a solved task is generally accepted and
+several benchmarks evaluating recognition results were done in this area (e.g. Face
+Recognition Vendor Tests, FRVT 2000, 2002, 2006, http://www.frvt.org/). Nevertheless,
+many tasks have to be solved, such as recognition in unconstrained environment,
+recognition of non-frontal images, single sample per person problem, etc.
+This chapter deals with single sample per person face recognition (also called one sample
+per person problem). This topic is related to small sample size problem in pattern
+recognition. Although there are also advantages of single sample – fast and easy creation of"
+367b5b814aa991329c2ae7f8793909ad8c0a56f1,Performance evaluation of random set based pedestrian tracking algorithms,"Performance Evaluation of Random Set Based
+Pedestrian Tracking Algorithms
+Branko Ristic
+ISR Division
+Australia
+Jamie Sherrah
+ISR Division
+Australia
+´Angel F. Garc´ıa-Fern´andez
+Department of Signals and Systems
+Chalmers University of Technology
+Sweden"
+36688a79cc8926f489ccb6e6dadba15afbb4b6a4,Linear discriminant analysis for the small sample size problem: an overview,"Int. J. Mach. Learn. & Cyber.
+DOI 10.1007/s13042-013-0226-9
+O R I G I N A L A R T I C L E
+Linear discriminant analysis for the small sample size problem:
+n overview
+Alok Sharma • Kuldip K. Paliwal
+Received: 19 March 2013 / Accepted: 26 December 2013
+Ó Springer-Verlag Berlin Heidelberg 2014"
+368132f8dfcbd6e857dfc1b7dce2ab91bd9648ad,"Simultaneous Localization And Mapping: Present, Future, and the Robust-Perception Age","Simultaneous Localization And Mapping:
+Present, Future, and the Robust-Perception Age
+Cesar Cadena, Luca Carlone, Henry Carrillo, Yasir Latif,
+Davide Scaramuzza, Jos´e Neira, Ian D. Reid, John J. Leonard"
+367008b91eb57c5ea64ef7520dfcabc0c5c85532,"Person Re-identification: Past, Present and Future","JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Person Re-identification:
+Past, Present and Future
+Liang Zheng, Yi Yang, and Alexander G. Hauptmann"
+365b72a225a18a930b96e7c0b215b9fede8a0968,Storyline Reconstruction for Unordered Images,"Storyline Reconstruction for Unordered Images
+Final Paper
+Sameedha Bairagi, Arpit Khandelwal, Venkatesh Raizaday
+Introduction:
+Storyline reconstruction is a relatively new topic and has not been researched extensively. The
+main objective is to take a stream of images as input and re-shuffle them in chronological order.
+The recent growth of online multimedia data has generated lots and lots of unstructured data on
+the web. Image streams are generated daily on websites like Flicker, Instagram etc. and almost
+00 hours of video is uploaded on YouTube on a daily basis.
+In this paper, we try and implement an algorithm which uses the property of videos of being
+temporally adept to sort a stream of unordered images. The basic process is as follows:
+- Generate key frames/video summary of a video from multiple instances of the same
+ategory.
+- Cluster these key frames on the basis of the action being performed in them.
+- Create a graph from these clusters using temporal data from the videos.
+- Take an input stream of images and assign each image to its most probable cluster.
+- Use the graph to assign ordering to the images.
+In the following sections, we will try and go deep into each of the step mentioned above and
+discuss multiple approaches we implemented to do the same.
+Background and Related work:"
+362250566948f17693b737122fc1434173982da8,Automatic Image Annotation using Weakly Labelled Web Data,"Automatic Image Annotation using
+Weakly Labelled Web Data
+Pravin Kakar, Xiangyu Wang and Alex Yong-Sang Chia
+Social Media and Internet Vision Analytics Lab,
+Institute for Infocomm Research,
+#21-01, 1 Fusionopolis Way,
+{kakarpv, wangx,
+Singapore 138632."
+36ab143da8b6f6d49811afaaa7bcbf81c22a210e,Modeling Multimodal Clues in a Hybrid Deep Learning Framework for Video Classification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Modeling Multimodal Clues in a Hybrid Deep
+Learning Framework for Video Classification
+Yu-Gang Jiang, Zuxuan Wu, Jinhui Tang, Zechao Li, Xiangyang Xue, Shih-Fu Chang"
+366595171c9f4696ec5eef7c3686114fd3f116ad,Algorithms and Representations for Visual Recognition,"Algorithms and Representations for Visual
+Recognition
+Subhransu Maji
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-53
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-53.html
+May 1, 2012"
+3607afdb204de9a5a9300ae98aa4635d9effcda2,Face Description with Local Binary Patterns: Application to Face Recognition,"Face Description with Local Binary Patterns:
+Application to Face Recognition
+Timo Ahonen, Student Member, IEEE, Abdenour Hadid,
+nd Matti Pietik¨ainen, Senior Member, IEEE"
+367c571480ac46d48be050dee4e6103a0ebb5db5,Multimedia Content Based Image Retrieval Iii: Local Tetra Pattern,"Manas M N et al Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 4, Issue 6( Version 3), June 2014, pp.104-107
+RESEARCH ARTICLE
+OPEN ACCESS
+Multimedia Content Based Image Retrieval Iii: Local Tetra
+Pattern
+Nagaraja G S1, Rajashekara Murthy S2, Manas M N3, Sridhar N H4
+(Department of CSE, RVCE, Visvesvaraya Technological University, Bangalore-59, Karnataka, India)
+(Department of ISE, RVCE, Visvesvaraya Technological University, Bangalore-59, Karnataka, India)
+(M. Tech, Department of CSE, RVCE, Visvesvaraya Technological University, Bangalore-59, Karnataka,
+India)
+(Research Scholar, Department of CSE, RVCE, Visvesvaraya Technological University, Bangalore-59,
+Karnataka, India)"
+36119c10f75094e0568cae8256400c94546d973b,The CASIA NIR-VIS 2.0 Face Database,"The CASIA NIR-VIS 2.0 Face Database
+Stan Z. Li, Dong Yi, Zhen Lei and Shengcai Liao
+Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+Institute of Automation, Chinese Academy of Sciences (CASIA)
+szli, dyi, zlei,"
+36b9faf0d6c4c6296193b8d5d7833624a181624c,Real-Time Multiple Human Perception With Color-Depth Cameras on a Mobile Robot,"Real-Time Multiple Human Perception
+with Color-Depth Cameras on a Mobile Robot
+Hao Zhang, Student Member, IEEE, Christopher Reardon, Student Member, IEEE, and Lynne E. Parker, Fellow, IEEE"
+5c6ccca19179fd217a74ccb954a4c4370e4203e2,Correspondences of Persistent Feature Points on Near-Isometric Surfaces,"Correspondences of Persistent Feature Points
+on Near-Isometric Surfaces
+Ying Yang1,2, David G¨unther1,3, Stefanie Wuhrer3,1, Alan Brunton3,4
+Ioannis Ivrissimtzis2, Hans-Peter Seidel1, Tino Weinkauf1 (cid:63)
+MPI Informatik 2Durham University 3Saarland University 4University of Ottawa"
+5cb343e447c7fd933ff8f57fc9c99c5673cad97d,MoCap-guided Data Augmentation for 3D Pose Estimation in the Wild,"MoCap-guided Data Augmentation
+for 3D Pose Estimation in the Wild
+Grégory Rogez
+Cordelia Schmid
+Inria Grenoble Rhône-Alpes, Laboratoire Jean Kuntzmann, France"
+5ca2e14f91dffb4784c443fe5cfe7838c3f3713c,Convolutional Recurrent Predictor: Implicit Representation for Multi-target Filtering and Tracking,"Convolutional Recurrent Predictor:
+Implicit Representation for Multi-target Filtering and Tracking
+Mehryar Emambakhsh, Alessandro Bay and Eduard Vazquez
+{mehryar.emambakhsh, alessandro.bay,
+Cortexica Vision Systems
+London, UK"
+5c6de2d9f93b90034f07860ae485a2accf529285,Compensating for pose and illumination in unconstrained periocular biometrics,"Int. J. Biometrics, Vol. X, No. Y, xxxx
+Compensating for pose and illumination in
+unconstrained periocular biometrics
+Chandrashekhar N. Padole and
+Hugo Proença*
+Department of Computer Science,
+IT – Instituto de Telecomunicações,
+University of Beira Interior,
+6200-Covilhã, Portugal
+Fax: +351-275-319899
+E-mail:
+E-mail:
+*Corresponding author"
+5c5dbca68946434afb201f0df90011104c85e4c4,Robust 3D Patch-Based Face Hallucination,"Robust 3D Patch-Based Face Hallucination
+Chengchao Qu1,2 Christian Herrmann1,2 Eduardo Monari2 Tobias Schuchert2
+J¨urgen Beyerer2,1
+Vision and Fusion Laboratory (IES), Karlsruhe Institute of Technology (KIT)
+Fraunhofer Institute of Optronics, System Technologies and Image Exploitation (Fraunhofer IOSB)"
+5cc9fdd3a588f6e62e46d7884c1dbeef92a782f2,Spontaneous attention to faces in Asperger syndrome using ecologically valid static stimuli.,"Durham Research Online
+Deposited in DRO:
+6 December 2014
+Version of attached le:
+Accepted Version
+Peer-review status of attached le:
+Peer-reviewed
+Citation for published item:
+Hanley, M. and McPhillips, M. and Mulhern, G. and Riby, D. M. (2013) 'Spontaneous attention to faces in
+Asperger Syndrome using ecologically valid static stimuli.', Autism., 17 (6). pp. 754-761.
+Further information on publisher's website:
+http://dx.doi.org/10.1177/1362361312456746
+Publisher's copyright statement:
+Use policy
+The full-text may be used and/or reproduced, and given to third parties in any format or medium, without prior permission or charge, for
+personal research or study, educational, or not-for-prot purposes provided that:
+• a full bibliographic reference is made to the original source
+• a link is made to the metadata record in DRO
+• the full-text is not changed in any way
+The full-text must not be sold in any format or medium without the formal permission of the copyright holders."
+5c7db2907c586f4f2d6ae5937b0dc0f4d1bc834a,Deliverable D2.1 Audio-visual Algorithms for Person Tracking and Characterization (baseline),"MULTIMODAL MALL ENTERTAINMENT ROBOT
+mummer-project.eu
+Grant No. 688147. Project started 2016-03-01. Duration 48 months.
+DELIVERABLE D2.1
+AUDIO-VISUAL ALGORITHMS FOR PERSON
+TRACKING AND CHARACTERIZATION (BASELINE)
+Jean-Marc Odobez (Idiap), Natalia Lyubova (SBRE),
+Olivier Can´evet (Idiap), Kenneth Funes Mora (Idiap),
+Weipeng He (Idiap), Angel Martinez Gonzalez (Idiap),
+Jean-Marc Montanier (SBRE), Marc Moreaux (SBRE)
+Beneficiaries:
+Workpackage:
+Idiap Research Institute (lead), SoftBank Robotics Europe
+Active Multimodal Sensing and Perception
+Version:
+Nature:
+Dissemination level:
+Pages:
+017-3-3
+Draft"
+5c0dc4dff1dfb5e27b19bef0713bccd9f85ce3b2,Joint probabilistic pedestrian head and body orientation estimation,"014 IEEE Intelligent Vehicles Symposium (IV)
+June 8-11, 2014. Dearborn, Michigan, USA
+978-1-4799-3637-3/14/$31.00 ©2014 IEEE"
+5c8ad080ccb3f5e3c999c2948029f0bd005d5635,Engaging Image Captioning,"ENGAGING IMAGE CAPTIONING VIA PERSONALITY
+Kurt Shuster, Samuel Humeau, Hexiang Hu, Antoine Bordes, Jason Weston
+Facebook AI Research"
+5c81048593a6729b2d0b948a1129a97bdbf82f11,Moving Object Localization Using Optical Flow for Pedestrian Detection from a Moving Vehicle,"Hindawi Publishing Corporation
+e Scientific World Journal
+Volume 2014, Article ID 196415, 8 pages
+http://dx.doi.org/10.1155/2014/196415
+Research Article
+Moving Object Localization Using Optical Flow for Pedestrian
+Detection from a Moving Vehicle
+Joko Hariyono, Van-Dung Hoang, and Kang-Hyun Jo
+Graduate School of Electrical Engineering, University of Ulsan, Ulsan 680-749, Republic of Korea
+Correspondence should be addressed to Kang-Hyun Jo;
+Received 9 April 2014; Revised 7 June 2014; Accepted 8 June 2014; Published 10 July 2014
+Academic Editor: Yu-Bo Yuan
+Copyright © 2014 Joko Hariyono et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+This paper presents a pedestrian detection method from a moving vehicle using optical flows and histogram of oriented gradients
+(HOG). A moving object is extracted from the relative motion by segmenting the region representing the same optical flows after
+ompensating the egomotion of the camera. To obtain the optical flow, two consecutive images are divided into grid cells 14 × 14
+pixels; then each cell is tracked in the current frame to find corresponding cell in the next frame. Using at least three corresponding
+ells, affine transformation is performed according to each corresponding cell in the consecutive images, so that conformed optical
+flows are extracted. The regions of moving object are detected as transformed objects, which are different from the previously"
+5c271b5f96cfce1b4fdacc728ae8f8ebcbc738f9,A framework for implicit human-centered image tagging inspired by attributed affect,"Vis Comput (2013)
+O R I G I NA L A RT I C L E
+A framework for implicit human centered image tagging
+inspired by attributed affect
+Konstantinos C. Apostolakis · Petros Daras
+Published online:
+© Springer-Verlag Berlin Heidelberg 2013"
+5cfa8d0384bcdf5dfd7501561c748e69f3a2a747,Lip AUs Detection by Boost-SVM and Gabor,"Lip AUs Detection by Boost-SVM and Gabor
+Xianmei Wang, Yuyu Liang, Xiujie Zhao and Zhiliang Wang
+School of Computer and Communication Engineering, University of Science and Technology, Beijing, China
+Email:"
+5c2e264d6ac253693469bd190f323622c457ca05,Improving large-scale face image retrieval using multi-level features,"978-1-4799-2341-0/13/$31.00 ©2013 IEEE
+ICIP 2013"
+5c48f97a8a8217025abafeababaef6288fd7ded6,Model syndromes for investigating social cognitive and affective neuroscience: a comparison of Autism and Williams syndrome.,"doi:10.1093/scan/nsl035
+SCAN (2006) 1of 8
+Model syndromes for investigating social cognitive
+nd affective neuroscience: a comparison of
+utism and Williams syndrome
+Helen Tager-Flusberg, Daniela Plesa Skwerer, and Robert M. Joseph
+Boston University School of Medicine, Boston, MA, USA
+Autism and Williams syndrome are genetically based neurodevelopmental disorders that present strikingly different social
+phenotypes. Autism involves fundamental impairments in social reciprocity and communication, whereas people with Williams
+syndrome are highly sociable and engaging. This article reviews the behavioral and neuroimaging literature that has explored the
+neurocognitive mechanisms that underlie these contrasting social phenotypes, focusing on studies of face processing. The article
+oncludes with a discussion of how the social phenotypes of both syndromes may be characterized by impaired connectivity
+etween the amygdala and other critical regions in the ’social brain’.
+Keywords: autism; Williams syndrome; face processing; emotion processing; amygdala
+INTRODUCTION
+For the past two decades autism, (ASD)1 and Williams
+syndrome (WMS) have captured the interest and imagina-
+tion of cognitive neuroscientists. These neurodevelopmental
+disorders present striking phenotypes that hold out the
+promise of advancing our understanding of the biological"
+5cdc02ed9f456219369fe3115321564c9955b9ae,Real-time Analysis and Visualization of the YFCC100m Dataset,"Real-time Analysis and Visualization
+of the YFCC100m Dataset
+Firstname Lastname
+Institute
+City, Country"
+5ce40105e002f9cb428a029e8dec6efe8fad380e,Co-design of architectures and algorithms for mobile robot localization and model-based detection of obstacles. (Co-conception d'architectures et d'algorithmes pour la localisation de robots mobiles et la détection d'obstacles basée sur des modèles),"Co-design of architectures and algorithms for mobile
+robot localization and model-based detection of obstacles
+Daniel Törtei
+To cite this version:
+Daniel Törtei. Co-design of architectures and algorithms for mobile robot localization and model-based
+detection of obstacles. Embedded Systems. Université Paul Sabatier - Toulouse III, 2016. English.
+<NNT : 2016TOU30294>. <tel-01477662v2>
+HAL Id: tel-01477662
+https://tel.archives-ouvertes.fr/tel-01477662v2
+Submitted on 16 Feb 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+5c5e1f367e8768a9fb0f1b2f9dbfa060a22e75c0,Reference Face Graph for Face Recognition,"Reference Face Graph for Face Recognition
+Mehran Kafai, Member, IEEE, Le An, Student Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+5c35ac04260e281141b3aaa7bbb147032c887f0c,Face Detection and Tracking Control with Omni Car,"Face Detection and Tracking Control with Omni Car
+Jheng-Hao Chen, Tung-Yu Wu
+CS 231A Final Report
+June 31, 2016"
+5c435c4bc9c9667f968f891e207d241c3e45757a,"""How old are you?"" : Age Estimation with Tensors of Binary Gaussian Receptive Maps","RUIZ-HERNANDEZ, CROWLEY, LUX: HOW OLD ARE YOU?
+""How old are you?"" : Age Estimation with
+Tensors of Binary Gaussian Receptive Maps
+John A. Ruiz-Hernandez
+James L. Crowley
+Augustin Lux
+INRIA Grenoble Rhones-Alpes
+Research Center and Laboratoire
+d’Informatique de Grenoble (LIG)
+655 avenue de l’Europe
+8 334 Saint Ismier Cedex, France"
+5c315aae464602115674716a7f976c4992fcb98e,Teachers’ Perception in the Classroom,"Teachers’ Perception in the Classroom
+¨Omer S¨umer1
+Patricia Goldberg1
+Kathleen St¨urmer1
+Tina Seidel3
+Peter Gerjets2 Ulrich Trautwein1
+Enkelejda Kasneci1
+University of T¨ubingen, Germany
+Leibniz-Institut f¨ur Wissensmedien, Germany
+Technical University of Munich, Germany"
+5c77901df1e0f52a9774b39e730c31afbc1214a7,Learning Social Tag Relevance by Neighbor Voting,"Learning Social Tag Relevance by Neighbor Voting
+Xirong Li, Cees G. M. Snoek, Member, IEEE, Marcel Worring, Member, IEEE"
+5cb1277bc7257e7b4cfc1699199c6d8e13ff0b1a,Refining Synthetic Images with Semantic Layouts by Adversarial Training,"Proceedings of Machine Learning Research 95:863-878, 2018
+ACML 2018
+Refining Synthetic Images with Semantic Layouts by
+Adversarial Training
+Tongtong Zhao
+Dalian Maritime University
+Dalian 116026, China
+Yuxiao Yan
+Dalian Maritime University
+Dalian 116026, China
+JinJia Peng
+Dalian Maritime University
+Dalian 116026, China
+HaoHui Wei
+Dalian Maritime University
+Dalian 116026, China
+Xianping Fu
+Dalian Maritime University
+Dalian 116026, China
+Editors: Jun Zhu and Ichiro Takeuchi"
+5c9c153f705a02e157adcf49dccf4f1eeb70cf93,Learning Appearance Transfer for Person Re-identification,"Learning Appearance Transfer for Person
+Re-identification
+Tamar Avraham and Michael Lindenbaum"
+5c1e0e94d6cb74448c7b3c1e0db42121be4e9bd6,Saliency Detection using regression trees on hierarchical image segments,"SALIENCY DETECTION USING REGRESSION TREES ON
+HIERARCHICAL IMAGE SEGMENTS
+G¨okhan Yildirim, Appu Shaji, Sabine S¨usstrunk
+School of Computer and Communication Sciences
+´Ecole Polytechnique F´ed´erale de Lausanne"
+5c3fd194ba96c5eea41c0772ad0b2292dedcd197,Understanding the Energy Saving Potential of Smart Scale Selection in the Viola and Jones Facial Detection Algorithm,
+5cff58d081a4732b11e6da498196ed6fbb54d15b,Adversarial Examples for Semantic Segmentation and Object Detection,"Adversarial Examples for Semantic Segmentation and Object Detection
+Cihang Xie1*, Jianyu Wang2*, Zhishuai Zhang1∗, Yuyin Zhou1, Lingxi Xie1, Alan Yuille1
+Department of Computer Science, The Johns Hopkins University, Baltimore, MD 21218 USA
+{cihangxie306, wjyouch, zhshuai.zhang, zhouyuyiner, 198808xc,
+Baidu Research USA, Sunnyvale, CA 94089 USA"
+5cd11d6b6cb7a2b8c00fcb535879edbd6b008a01,Stereo DSO: Large-Scale Direct Sparse Visual Odometry with Stereo Cameras,"Large-Scale Direct Sparse Visual Odometry with Stereo Cameras
+Stereo DSO:
+Rui Wang∗, Martin Schw¨orer∗, Daniel Cremers
+Technical University of Munich
+{wangr, schwoere,"
+5c09d905f6d4f861624821bf9dfe2aae29137e9c,Women Also Snowboard: Overcoming Bias in Captioning Models,"Women also Snowboard:
+Overcoming Bias in Captioning Models
+Lisa Anne Hendricks * 1 Kaylee Burns * 1 Kate Saenko 2 Trevor Darrell 1 Anna Rohrbach 1"
+5cead7ba087ebe7314f96d875f3d3dbb8dbed1c7,Automatic Food Intake Assessment Using Camera Phones,"Michigan Technological University
+Digital Commons Michigan
+Dissertations, Master's Theses and Master's Reports
+- Open
+Dissertations, Master's Theses and Master's Reports
+Automatic Food Intake Assessment Using Camera
+Phones
+Fanyu Kong
+Michigan Technological University
+Copyright 2012 Fanyu Kong
+Recommended Citation
+Kong, Fanyu, ""Automatic Food Intake Assessment Using Camera Phones"", Dissertation, Michigan Technological University, 2012.
+http://digitalcommons.mtu.edu/etds/494
+Follow this and additional works at: http://digitalcommons.mtu.edu/etds
+Part of the Computer Engineering Commons"
+5cebc83001ea0737cc46360850fd294327c82013,MEMORY-BASED GAIT RECOGNITION 1 Memory-based Gait Recognition,"DANLIUet al.:MEMORY-BASEDGAITRECOGNITION
+Memory-based Gait Recognition
+Dan Liu
+Mao Ye∗
+Xudong Li
+Feng Zhang
+Lan Lin
+School of Computer Science and
+Engineering,
+Center for Robotics,
+Key Laboratory for NeuroInformation of
+Ministry of Education,
+University of Electronic Science and
+Technology of China,
+Chengdu 611731, P.R. China"
+5cd34abb1e96e0c11f427364e40b1e87d6fc62c2,Greedy Part-Wise Learning of Sum-Product Networks,"Greedy Part-Wise Learning of Sum-Product
+Networks
+Robert Peharz, Bernhard C. Geiger and Franz Pernkopf
+{robert.peharz, geiger,
+Signal Processing and Speech Communication Laboratory
+Graz, University of Technology"
+5c02bd53c0a6eb361972e8a4df60cdb30c6e3930,Multimedia stimuli databases usage patterns: a survey report,"Multimedia stimuli databases usage patterns: a
+survey report
+M. Horvat1, S. Popović1 and K. Ćosić1
+University of Zagreb, Faculty of Electrical Engineering and Computing
+Department of Electric Machines, Drives and Automation
+Zagreb, Croatia"
+5c5304b79ebc2afd28ade6bb88daa80144ae3587,Review of Human-Robot Interactive Modelling and Application for Elders,"COMPUTER MODELLING & NEW TECHNOLOGIES 2014 18(12C) 408-413
+Han Jing, Xie, Lun Xu Shangmou, Wang Zhiliang
+Review of Human-Robot Interactive Modelling and
+Application for Elders
+Jing Han, Lun Xie*, Shangmou Xu, Zhiliang Wang
+School of Computer and Communication Engineering, University of Science and Technology Beijing, No.30 Xueyuan road, Beijing, China
+Received 23 November 2014, www.cmnt.lv"
+5c717afc5a9a8ccb1767d87b79851de8d3016294,A novel eye region based privacy protection scheme,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+5c879f9e2e79d6c6af8d4c821575e73876240a83,DeepFaceLIFT: Interpretable Personalized Models for Automatic Estimation of Self-Reported Pain,"Journal of Machine Learning Research 66 (2017) 1-16
+Submitted 5/17; Published 08/17
+DeepFaceLIFT: Interpretable Personalized Models
+for Automatic Estimation of Self-Reported Pain
+Dianbo Liu*2,3
+Fengjiao Peng*1
+Andrew Shea*3
+Ognjen (Oggi) Rudovic1
+Rosalind Picard1
+Media Lab, MIT, Cambridge, MA, USA
+Computer Science and Artificial Intelligence Laboratory, MIT, Cambridge, MA, USA
+Department of Electrical Engineering and Computer Science, MIT, Cambridge, MA, USA"
+0971a5e835f365b6008177a867cfe4bae76841a5,Supervised Dictionary Learning by a Variational Bayesian Group Sparse Nonnegative Matrix Factorization,"Supervised Dictionary Learning by a
+Variational Bayesian Group Sparse
+Nonnegative Matrix Factorization
+Ivan Ivek"
+09f4e1064afffd8464e9fd558fc8ef7be5e33170,Spatial and Temporal Organization of the Individual Human Cerebellum,"Article
+Spatial and Temporal Organization of the Individual
+Human Cerebellum"
+098388c08ef7d23ab583819b793b0057c0396dc8,Low Rank Approximation using Error Correcting Coding Matrices,"Low Rank Approximation using Error Correcting Coding Matrices
+Shashanka Ubaru
+Arya Mazumdar
+Yousef Saad
+University of Minnesota-Twin Cities, MN USA"
+092f955f701b31f3e58adb57c57e39a4dcab9fcd,Weighted Additive Criterion for Linear Dimension Reduction,"Seventh IEEE International Conference on Data Mining
+Seventh IEEE International Conference on Data Mining
+Seventh IEEE International Conference on Data Mining
+Seventh IEEE International Conference on Data Mining
+Seventh IEEE International Conference on Data Mining
+Weighted Additive Criterion for Linear Dimension Reduction
+Jing Peng & Stefan Robila
+Computer Science Department, Montclair State University
+Montclair, NJ 07043"
+09e5f2f819a21162d833f356670a140cd555a740,Adaptive Algorithm and Platform Selection for Visual Detection and Tracking,"Adaptive Algorithm and Platform Selection for
+Visual Detection and Tracking
+Shu Zhang, Qi Zhu, and Amit K. Roy-Chowdhury"
+096e68f8d632f4363056d54a7de9c59d66b806d8,Impaired visuocortical discrimination learning of socially conditioned stimuli in social anxiety.,"Impaired Visuocortical Discrimination Learning of Socially
+Conditioned Stimuli in Social Anxiety
+Lea M. Ahrens1, Andreas Mühlberger2, Paul Pauli1, & Matthias J. Wieser1
+Department of Psychology I, University of Würzburg, Germany
+Department of Clinical Psychology and Psychotherapy, University of Regensburg, Germany
+Address for correspondence:
+Lea M. Ahrens, University of Würzburg, Department of Psychology, Biological Psychology, Clinical
+Psychology, and Psychotherapy, Marcusstr. 9-11, D-97070 Würzburg, Phone.: +49 931 31-81929,
+Fax: +49 931 31-82733,
+Running title:
+Social Conditioning in Social Anxiety
+Words: 4995 (+ 8 place marker)
+© The Author (2014). Published by Oxford University Press. For Permissions, please email:"
+0969aa7d4557699b7460e4159658828efafed8bd,Con-Text: Text Detection for Fine-Grained Object Classification,"Con-Text: Text Detection for Fine-grained Object
+Classification
+Sezer Karaoglu, Ran Tao, Jan C. van Gemert and Theo Gevers, Member, IEEE,"
+096eb8b4b977aaf274c271058feff14c99d46af3,Multi-observation visual recognition via joint dynamic sparse representation,"REPORT DOCUMENTATION PAGE
+Form Approved OMB NO. 0704-0188
+including
+for reviewing
+information,
+this collection of
+information
+is estimated
+to average 1 hour per response,
+the data needed, and completing and reviewing
+this collection of
+instructions,
+The public reporting burden
+Send comments
+searching existing data sources, gathering and maintaining
+to Washington
+regarding
+this burden estimate or any other aspect of
+Information Operations and Reports, 1215 Jefferson Davis Highway, Suite 1204, Arlington VA, 22202-4302.
+Headquarters Services, Directorate"
+09d9d9d153119558e83643f0097ffb87e1037649,Face Recognition and Verification Using Artificial Neural Network,"©2010 International Journal of Computer Applications (0975 – 8887)
+Volume 1 – No. 14
+Face Recognition and Verification
+Using Artificial Neural Network
+Ms. S. S.Ranawade
+Maharashtra Institute Technology, Pune 05
+/ nonface
+images. We solve"
+09137e3c267a3414314d1e7e4b0e3a4cae801f45,Two Birds with One Stone: Transforming and Generating Facial Images with Iterative GAN,"Noname manuscript No.
+(will be inserted by the editor)
+Two Birds with One Stone: Transforming and Generating
+Facial Images with Iterative GAN
+Dan Ma · Bin Liu · Zhao Kang · Jiayu Zhou · Jianke Zhu · Zenglin Xu
+Received: date / Accepted: date"
+092d5bc60a21933abf98aa85ace8a9c85df16958,Implementing Randomized Matrix Algorithms in Parallel and Distributed Environments,"Implementing Randomized Matrix Algorithms in Parallel and
+Distributed Environments
+Jiyan Yang ∗
+Xiangrui Meng †
+Michael W. Mahoney ‡"
+09d78009687bec46e70efcf39d4612822e61cb8c,Consistent Re-identification in a Camera Network,"Consistent Re-identification in a Camera
+Network
+Abir Das(cid:2), Anirban Chakraborty(cid:2), and Amit K. Roy-Chowdhury(cid:2)(cid:2)
+Dept. of Electrical Engineering, University of California, Riverside, CA 92521, USA"
+09926ed62511c340f4540b5bc53cf2480e8063f8,Tubelet Detector for Spatio-Temporal Action Localization,"Action Tubelet Detector for Spatio-Temporal Action Localization
+Vicky Kalogeiton1,2
+Philippe Weinzaepfel3
+Vittorio Ferrari2
+Cordelia Schmid1"
+0917de8a3be50f2a813e7b77fc53b81125a58acb,Video based head detection and tracking surveillance system,978-1-4673-0024-7/10/$26.00 ©2012 IEEE 2832
+09fbfb566a8f2af9df4d3a1bf5df00d0693a22eb,Conformal Prediction for Automatic Face Recognition,"Proceedings of Machine Learning Research 60:1–20, 2017 Conformal and Probabilistic Prediction and Applications
+Conformal Prediction for Automatic Face Recognition
+Charalambos Eliades
+Harris Papadopoulos
+Computer Science and Engineering Department, Frederick University,
+7 Y. Frederickou St., Palouriotisa, Nicosia 1036, Cyprus
+Editor: Alex Gammerman, Vladimir Vovk, Zhiyuan Luo, and Harris Papadopoulos"
+0965a62c9c354d2c7175e313ade9e38120f1bd4e,Efficient Face Detection Method using Modified Hausdorff Distance Method with C 4 . 5 Classifier and Canny Edge Detection,"International Journal of Computer Applications (0975 – 8887)
+Volume 123 – No.10, August 2015
+Efficient Face Detection Method using Modified
+Hausdorff Distance Method with C4.5 Classifier and
+Canny Edge Detection
+Neelima Singh
+Research Scholar
+Computer Science and
+Engineering Department
+Samrat Ashok Technological
+Institute, Vidisha, M. P.
+Satish Pawar
+Assistant Professor
+Computer Science and
+Engineering Department
+Samrat Ashok Technological
+Institute, Vidisha, M. P.
+Yogendra Kumar Jain
+Head of Department
+Computer Science and"
+09eaa332ddcd036b0f0950bbdb3624072f105a3b,When appearance does not match accent: neural correlates of ethnicity-related expectancy violations.,"doi: 10.1093/scan/nsw148
+Advance Access Publication Date: 19 October 2016
+Original article
+When appearance does not match accent: neural
+orrelates of ethnicity-related expectancy violations
+Karolina Hansen,1 Melanie C. Steffens,2 Tamara Rakic,3 and Holger Wiese4
+University of Warsaw, Warsaw, Poland, 2University of Koblenz-Landau, Landau, Germany, 3Lancaster
+University, Lancaster, UK, and 4Durham University, Durham, UK
+Correspondence should be addressed to Karolina Hansen, Faculty of Psychology, University of Warsaw, Stawki 5/7, 00-183 Warszawa, Poland.
+E-mail:"
+09c4732280c3b2586e390d818ef0056a8de73e2c,A New Method of Histogram Computation for Efficient Implementation of the HOG Algorithm,"Article
+A New Method of Histogram Computation for
+Efficient Implementation of the HOG Algorithm †
+Mariana-Eugenia Ilas 1,* ID and Constantin Ilas 2
+Department of Electronics, Telecommunications and IT, University Politehnica Bucharest,
+Bucharest 060042, Romania
+Department of Automatics and Computer Science, University Politehnica Bucharest,
+Bucharest 060042, Romania;
+* Correspondence: Tel.: +40-21-402-4618
+This paper is an extended version of our paper published in the 9th Computer Science & Electronic
+Engineering Conference (CEEC), Colchester, UK, 27–29 September 2017.
+Received: 5 January 2018; Accepted: 27 February 2018; Published: 1 March 2018"
+09a6261c3334471bb0bc1a173aff672afe963ae3,Key-Pose Prediction in Cyclic Human Motion,"Key-Pose Prediction in Cyclic Human Motion
+Multimedia Computing and Computer Vision Lab, University of Augsburg
+Dan Zecha
+Rainer Lienhart"
+09c019141b209401b76a35184c86bab6cd1fe6b9,3D Deformable Shape Reconstruction with Diffusion Maps,"TAO, MATUSZEWSKI: 3D RECONSTRUCTION WITH DIFFUSION MAPS
+D Deformable Shape Reconstruction with
+Diffusion Maps
+Lili Tao
+Bogdan J. Matuszewski
+Applied Digital Signal and Image
+Processing Research Centre
+University of Central Lancashire, UK"
+09718bf335b926907ded5cb4c94784fd20e5ccd8,"Recognizing partially occluded, expression variant faces from single training image per person with SOM and soft k-NN ensemble","Recognizing Partially Occluded, Expression Variant
+Faces From Single Training Image per Person
+With SOM and Soft k-NN Ensemble
+Xiaoyang Tan, Songcan Chen, Zhi-Hua Zhou, Member, IEEE, and Fuyan Zhang"
+09251a324dc4865732e2ead50334bfb906f8ffb4,Beyond Text based sentiment analysis: Towards multi-modal systems,"Springer Cognitive Computation manuscript No.
+(will be inserted by the editor)
+Beyond Text based sentiment analysis: Towards multi-modal
+systems
+Soujanya Poria · Amir Hussain · Erik Cambria
+the date of receipt and acceptance should be inserted later"
+09ac8added26307b358b83884b55af29de8b5bf9,Learning to grasp objects with multiple contact points,"Learning to grasp objects with multiple contact points
+Quoc V. Le, David Kamm, Arda F. Kara, Andrew Y. Ng"
+0949f46d5db3169813ae23acafa345c6b8a37f08,When Slower Is Faster: On Heterogeneous Multicores for Reliable Systems,"When Slower is Faster: On Heterogeneous Multicores for Reliable Systems
+Tomas Hruby
+The Network Institute, VU University Amsterdam
+Herbert Bos
+Andrew S. Tanenbaum"
+09222c50d8ffcc74bbb7462400bd021772850bba,Incorporating Network Built-in Priors in Weakly-Supervised Semantic Segmentation,"Incorporating Network Built-in Priors in
+Weakly-supervised Semantic Segmentation
+Fatemeh Sadat Saleh, Mohammad Sadegh Aliakbarian, Mathieu Salzmann, Lars Petersson,
+Jose M. Alvarez, and Stephen Gould"
+0994916f67fd15687dd5d7e414becb1cd77129ac,Multi Class Different Problem Solving Using Intelligent Algorithm,"SIVAKUMAR R, Dr.M.SRIDHAR / International Journal of Engineering Research and
+Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+Vol. 2, Issue4, July-August 2012, pp.1782-1785
+Multi Class Different Problem Solving Using Intelligent
+Algorithm
+SIVAKUMAR R, 2Dr.M.SRIDHAR
+Research Scholar Dept of ECE BHARATH UNIVERSITY India
+Dept of ECE BHARATH UNIVERSITY India"
+0903bb001c263e3c9a40f430116d1e629eaa616f,An Empirical Study of Context in Object Detection,"CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+An Empirical Study of Context in Object Detection
+Anonymous CVPR submission
+Paper ID 987"
+092597b8e0f31be1671025cea1b9fd28a48e04bc,Supervised Person Re-ID based on Deep Hand-crafted and CNN Features,
+091b4ad74ac5bec206604673506b19838d6a0c52,Person Re-Identification By Saliency Learning,"|| Volume 2 ||Issue 10 ||MAY 2017||ISSN (Online) 2456-0774
+INTERNATIONAL JOURNAL OF ADVANCE SCIENTIFIC RESEARCH
+AND ENGINEERING TRENDS
+Person Re-Identification By Saliency Learning
+Shaihenila
+P.G. Student, Computer Science & Engineering, Everest Educational Society's Group of Institutions, Aurangabad, India."
+092b64ce89a7ec652da935758f5c6d59499cde6e,Human3.6M: Large Scale Datasets and Predictive Methods for 3D Human Sensing in Natural Environments,"Human3.6M:
+Large Scale Datasets and Predictive Methods
+for 3D Human Sensing in Natural Environments
+Catalin Ionescu∗†‡, Dragos Papava∗‡, Vlad Olaru∗, Cristian Sminchisescu§∗"
+09df62fd17d3d833ea6b5a52a232fc052d4da3f5,Mejora de Contraste y Compensación en Cambios de la Iluminación,"ISSN: 1405-5546
+Instituto Politécnico Nacional
+México
+Rivas Araiza, Edgar A.; Mendiola Santibañez, Jorge D.; Herrera Ruiz, Gilberto; González Gutiérrez,
+Carlos A.; Trejo Perea, Mario; Ríos Moreno, G. J.
+Mejora de Contraste y Compensación en Cambios de la Iluminación
+Instituto Politécnico Nacional
+Distrito Federal, México
+Disponible en: http://www.redalyc.org/articulo.oa?id=61509703
+Cómo citar el artículo
+Número completo
+Más información del artículo
+Página de la revista en redalyc.org
+Sistema de Información Científica
+Red de Revistas Científicas de América Latina, el Caribe, España y Portugal
+Proyecto académico sin fines de lucro, desarrollado bajo la iniciativa de acceso abierto"
+093b6af0e5f00f9578088a49822d8d500283cab0,Human visual behaviour for collaborative human-machine interaction,"Human Visual Behaviour for
+Collaborative Human-Machine
+Interaction
+Andreas Bulling
+Perceptual User Interfaces
+Group
+Max Planck Institute for
+Informatics
+Saarbr¨ucken, Germany
+Permission to make digital or hard copies of all or part of this work for
+personal or classroom use is granted without fee provided that copies are not
+made or distributed for profit or commercial advantage and that copies bear
+this notice and the full citation on the first page. Copyrights for components"
+09e3967a34cca8dc0f00c9ee7a476a96812a55e0,1 Machine Learning Methods for Social Signal Processing,"Machine Learning Methods for
+Social Signal Processing
+Ognjen Rudovic, Mihalis A. Nicolaou and Vladimir Pavlovic
+Introduction
+In this chapter we focus on systematization, analysis, and discussion of recent
+trends in machine learning methods for Social signal processing (SSP)(Pentland
+007). Because social signaling is often of central importance to subconscious de-
+ision making that affects everyday tasks (e.g., decisions about risks and rewards,
+resource utilization, or interpersonal relationships) the need for automated un-
+derstanding of social signals by computers is a task of paramount importance.
+Machine learning has played a prominent role in the advancement of SSP over
+the past decade. This is, in part, due to the exponential increase of data avail-
+bility that served as a catalyst for the adoption of a new data-driven direction in
+ffective computing. With the dif‌f‌iculty of exact modeling of latent and complex
+physical processes that underpin social signals, the data has long emerged as the
+means to circumvent or supplement expert- or physics-based models, such as the
+deformable musculo-sceletal models of the human body, face or hands and its
+movement, neuro-dynamical models of cognitive perception, or the models of the
+human vocal production. This trend parallels the role and success of machine
+learning in related areas, such as computer vision, c.f., (Poppe 2010, Wright"
+094f5e36dae2602e179f2c1d95a616df3dbe967f,Bilinear classifiers for visual recognition,"Bilinear classifiers for visual recognition
+Hamed Pirsiavash
+Deva Ramanan
+Charless Fowlkes
+Department of Computer Science
+University of California at Irvine"
+0910a4c470a410fac446f4026f7c8ef512ae7427,Hierarchical Question-Image Co-Attention for Visual Question Answering,"Hierarchical Question-Image Co-Attention
+for Visual Question Answering
+Jiasen Lu∗, Jianwei Yang∗, Dhruv Batra∗† , Devi Parikh∗†
+Virginia Tech, † Georgia Institute of Technology
+{jiasenlu, jw2yang, dbatra,"
+09d08e543a9b2fc350cb37e47eb087935c12be16,"A Multimodal, Full-Surround Vehicular Testbed for Naturalistic Studies and Benchmarking: Design, Calibration and Deployment","A Multimodal, Full-Surround Vehicular Testbed for Naturalistic Studies
+nd Benchmarking: Design, Calibration and Deployment
+Akshay Rangesh1, Kevan Yuen1, Ravi Kumar Satzoda1, Rakesh Nattoji Rajaram1,
+Pujitha Gunaratne2, and Mohan M. Trivedi1
+Laboratory for Intelligent and Safe Automobiles (LISA), UC San Diego
+Toyota Collaborative Safety Research Center (CSRC)
+in autonomous"
+09f853ce12f7361c4b50c494df7ce3b9fad1d221,Random Forests for Real Time 3D Face Analysis,"myjournal manuscript No.
+(will be inserted by the editor)
+Random forests for real time 3D face analysis
+Gabriele Fanelli · Matthias Dantone · Juergen Gall · Andrea Fossati ·
+Luc Van Gool
+Received: date / Accepted: date"
+09ba6b87736fa29aae88c5b4cf30f25188e4c6ef,Gaze Estimation in the 3D Space Using RGB-D Sensors,"The final publication is available at Springer via http://dx.doi.org/10.1007/s11263-015-0863-4
+Gaze Estimation in the 3D Space Using RGB-D sensors
+Towards Head-Pose And User Invariance
+Kenneth A. Funes-Mora · Jean-Marc Odobez
+Received: 19 November 2014 / Accepted: 23 September 2015"
+09edf114f8764c82713f8dd35b1b32ad83ecaa17,Large-Margin Learning of Compact Binary Image Encodings,"MANUSCRIPT
+Large-margin Learning of Compact Binary Image
+Encodings
+Sakrapee Paisitkriangkrai, Chunhua Shen, Anton van den Hengel"
+09b0040ad09d61f3403c57c437c03271f8614add,HUMAN ACTIVITY RECOGNITION AND GYMNASTICS ANALYSIS THROUGH DEPTH IMAGERY by,"HUMAN ACTIVITY RECOGNITION AND
+GYMNASTICS ANALYSIS THROUGH
+DEPTH IMAGERY
+Brian J. Reily"
+09750c9bbb074bbc4eb66586b20822d1812cdb20,Estimation of the neutral face shape using Gaussian Mixture Models,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+09e15bb266da86d0a9525d2a94ac0b38f0b53b88,Detect What You Can: Detecting and Representing Objects Using Holistic Models and Body Parts,"Detect What You Can: Detecting and Representing Objects using Holistic
+Models and Body Parts
+Xianjie Chen1, Roozbeh Mottaghi2, Xiaobai Liu1, Sanja Fidler3, Raquel Urtasun3, Alan Yuille1
+University of California, Los Angeles 2Stanford University 3University of Toronto"
+0956a3c628959afcf870f5d7ec581160a4aa5221,LIFEisGAME Prototype: A Serious Game about Emotions for Children with Autism Spectrum Disorders,"Volume 11, Number 3, 191 – 211
+LIFEisGAME Prototype: A Serious Game about Emotions
+for Children with Autism Spectrum Disorders
+Samanta Alves1, António Marques2, Cristina Queirós∗1 and Verónica Orvalho3
+Psychosocial
+Rehabilitation
+Laboratory, Faculty of
+Psychology and
+Educational Sciences,
+Porto University
+(Portugal)
+Psychosocial
+Rehabilitation
+Laboratory, School of
+Allied Health Sciences,
+Porto Polytechnic
+Institute
+(Portugal)
+Porto
+Interactive"
+09749e7b0ae6bd9ab37671fcc4f0e7a7bcf9ff2e,Perceptual enhancement of emotional mocap head motion: An experimental study,"Perceptual Enhancement of Emotional Mocap Head Motion: An Experimental
+Study
+Yu Ding
+Univeristy of Houston
+Houston, TX, USA
+Lei Shi
+Univeristy of Houston
+Houston, TX, USA
+Zhigang Deng
+Univeristy of Houston
+Houston, TX, USA"
+097f674aa9e91135151c480734dda54af5bc4240,Face Recognition Based on Multiple Region Features,"Proc. VIIth Digital Image Computing: Techniques and Applications, Sun C., Talbot H., Ourselin S. and Adriaansen T. (Eds.), 10-12 Dec. 2003, Sydney
+Face Recognition Based on Multiple Region Features
+Jiaming Li, Geoff Poulton, Ying Guo, Rong-Yu Qiao
+CSIRO Telecommunications & Industrial Physics
+Australia
+Tel: 612 9372 4104, Fax: 612 9372 4411, Email:"
+5d0e11844f1a210f16025e990de938f6732672ab,Distance to Center of Mass Encoding for Instance Segmentation,"Distance to Center of Mass Encoding for Instance Segmentation
+Thomio Watanabe
+University of Sao Paulo
+Denis Wolf
+University of Sao Paulo"
+5da740682f080a70a30dc46b0fc66616884463ec,Real-Time Head Pose Estimation Using Multi-variate RVM on Faces in the Wild,"Real-Time Head Pose Estimation Using
+Multi-Variate RVM on Faces in the Wild
+Mohamed Selim, Alain Pagani, Didier Stricker
+Augmented Vision Research Group,
+German Research Center for Artificial Intelligence (DFKI),
+Tripstaddterstr. 122, 67663 Kaiserslautern, Germany
+Technical University of Kaiserslautern
+http://www.av.dfki.de"
+5dc003a75a302761778cb1c15d796e3d90dd9322,Bayesian Fisher's Discriminant for Functional Data,"Bayesian Fisher’s Discriminant for Functional Data
+Yao-Hsiang Yang ∗, Lu-Hung Chen†, Chieh-Chih Wang‡, and Chu-Song Chen §
+December 10, 2014"
+5d1608e03ab9c529d0b05631f9d2a3afcbf1c3e3,Sparsity and Robustness in Face Recognition,"Sparsity and Robustness in Face Recognition
+John Wright, Arvind Ganesh, Allen Yang, Zihan Zhou, and Yi Ma
+Background. This note concerns the use of techniques for sparse signal representation and sparse
+from the paper [WYG+09], which showed how, under certain technical conditions, one could cast
+the face recognition problem as one of seeking a sparse representation of a given input face image
+in terms of a “dictionary” of training images and images of individual pixels. To be more precise,
+the method of [WYG+09] assumes access to a suf‌f‌icient number of well-aligned training images of
+each of the k subjects. These images are stacked as the columns of matrices A1, . . . , Ak. Given a
+new test image y, also well aligned, but possibly subject to illumination variation or occlusion, the
+method of [WYG+09] seeks to represent y as a sparse linear combination of the database as whole.
+Writing A = [A1 | ··· | Ak], this approach solves
+(cid:107)x(cid:107)1 + (cid:107)e(cid:107)1
+subj. to Ax + e = y.
+minimize
+the identity of the test image y the index whose sparse coef‌f‌icients minimize the residual:
+ˆi = arg min
+(cid:107)y − Aixi − e(cid:107)2.
+This approach demonstrated successful results in laboratory settings (fixed pose, varying illumi-
+nation, moderate occlusion) in [WYG+09], and was extended to more realistic settings (involving
+moderate pose and misalignemnt) in [WWG+11]. For the sake of clarity, we repeat the above"
+5d80149e005894ab57f47e667f3e060e247d8e43,Lip reading using CNN and LSTM,"Lip reading using CNN and LSTM
+Amit Garg
+Jonathan Noyola
+Sameep Bagadia"
+5df11c59e3b47189486445f5833675bf08359bfe,Influence of Image Classification Accuracy on Saliency Map Estimation,"IET Research Journals
+Brief Paper
+Influence of Image Classification Accuracy
+on Saliency Map Estimation
+Taiki Oyama1 Takao Yamanaka1
+Department of Information & Communication Sciences, Sophia University, 7-1 Kioi-cho, Chiyoda-ku, Tokyo, 102-0094, Japan
+* E-mail:
+ISSN 1751-8644
+doi: 0000000000
+www.ietdl.org"
+5da139fc43216c86d779938d1c219b950dd82a4c,A Generalized Multiple Instance Learning Algorithm for Iterative Distillation and Cross-Granular Propagation of Video Annotations,"-4244-1437-7/07/$20.00 ©2007 IEEE
+II - 205
+ICIP 2007"
+5d04bd7104f08f7fb91967613ffc519c27641e99,Bound to Lose: Physical Incapacitation Increases the Conceptualized Size of an Antagonist in Men,"Bound to Lose: Physical Incapacitation Increases the
+Conceptualized Size of an Antagonist in Men
+Daniel M. T. Fessler*, Colin Holbrook
+Department of Anthropology and Center for Behavior, Evolution, and Culture, University of California Los Angeles, Los Angeles, California, United States of America"
+5d14cc415a93e6f3a625ed7794e1fdcf99ea5713,Predicting Face Recognition Performance Using Image Quality,"Predicting Face Recognition Performance Using
+Image Quality
+Abhishek Dutta, Raymond Veldhuis, Senior Member, IEEE and Luuk Spreeuwers,"
+5da53a17165fcc64e8fb6e9ca532bfb6d95ff622,RSCM: Region Selection and Concurrency Model for Multi-Class Weather Recognition,"RSCM: Region Selection and Concurrency Model
+for Multi-Class Weather Recognition
+Di Lin, Cewu Lu, Member, IEEE, Hui Huang, Member, IEEE, and Jiaya Jia, Senior Member, IEEE
+ondition"
+5d185d82832acd430981ffed3de055db34e3c653,A Fuzzy Reasoning Model for Recognition of Facial Expressions,"A Fuzzy Reasoning Model for Recognition
+of Facial Expressions
+Oleg Starostenko1, Renan Contreras1, Vicente Alarcón Aquino1, Leticia Flores Pulido1,
+Jorge Rodríguez Asomoza1, Oleg Sergiyenko2, and Vira Tyrsa3
+Research Center CENTIA, Department of Computing, Electronics and Mechatronics,
+Universidad de las Américas, 72820, Puebla, Mexico
+{oleg.starostenko; renan.contrerasgz; vicente.alarcon; leticia.florespo;
+Engineering Institute, Autonomous University of Baja California, Blvd. Benito Juárez,
+Insurgentes Este, 21280, Mexicali, Baja California, Mexico
+Universidad Politécnica de Baja California, Mexicali, Baja California, Mexico"
+5d90f06bb70a0a3dced62413346235c02b1aa086,Learning Multiple Layers of Features from Tiny Images,"Learning Multiple Layers of Features from Tiny Images
+Alex Krizhevsky
+April 8, 2009"
+5d233e6f23b1c306cf62af49ce66faac2078f967,Optimal Geometrical Set for Automated Marker Placement to Virtualized Real-Time Facial Emotions,"RESEARCH ARTICLE
+Optimal Geometrical Set for Automated
+Marker Placement to Virtualized Real-Time
+Facial Emotions
+Vasanthan Maruthapillai, Murugappan Murugappan*
+School of Mechatronic Engineering, Universiti Malaysia Perlis, 02600, Ulu Pauh, Arau, Perlis, West Malaysia"
+5da0224590d91defe8c75db0ab5e12d50b6ab6f3,NMTPY: A Flexible Toolkit for Advanced Neural Machine Translation Systems,"NMTPY: A FLEXIBLE TOOLKIT FOR ADVANCED
+NEURAL MACHINE TRANSLATION SYSTEMS
+Ozan Caglayan, Mercedes García-Martínez, Adrien Bardet, Walid Aransa,
+Fethi Bougares, Loïc Barrault
+Laboratoire d’Informatique de l’Université du Maine (LIUM)
+Language and Speech Technology (LST) Team
+Le Mans, France"
+5da43ff9c246ae37d9006bba3406009cb4fb1dcf,Lifelong Machine Learning Lifelong Machine Learning,"Lifelong Machine Learning
+November, 2016
+Zhiyuan Chen and Bing Liu
+Draft : This is an early draft of the book.
+Zhiyuan Chen and Bing Liu. Lifelong Machine Learning.
+Morgan & Claypool Publishers, Nov 2016.
+LifelongMachineLearningZhiyuan ChenBing Liu"
+5dcfb84ab3f5d5f1dd02f59e45154c9710de97b2,On the Latent Variable Interpretation in Sum-Product Networks,"On the Latent Variable Interpretation in
+Sum-Product Networks
+Robert Peharz, Robert Gens, Franz Pernkopf, Senior Member, IEEE, and Pedro Domingos"
+5db46dda9f0f08220d49a5db1204f149bd4f6a4a,Engaging Image Captioning Via Personality,"ENGAGING IMAGE CAPTIONING VIA PERSONALITY
+Kurt Shuster, Samuel Humeau, Hexiang Hu, Antoine Bordes, Jason Weston
+Facebook AI Research"
+5db075a308350c083c3fa6722af4c9765c4b8fef,The Novel Method of Moving Target Tracking Eyes Location based on SIFT Feature Matching and Gabor Wavelet Algorithm,"The Novel Method of Moving Target Tracking Eyes
+Location based on SIFT Feature Matching and Gabor
+Wavelet Algorithm
+* Jing Zhang, Caixia Yang, Kecheng Liu
+College of Computer and Information Engineering, Nanyang Institute of Technology,
+Henan Nanyang, 473004, China
+* Tel.: 0086+13838972861
+* E-mail:
+Sensors & Transducers, Vol. 154, Issue 7, July 2013, pp. 129-137
+SSSeeennnsssooorrrsss &&& TTTrrraaannnsssddduuuccceeerrrsss
+© 2013 by IFSA
+http://www.sensorsportal.com
+Received: 28 April 2013 /Accepted: 19 July 2013 /Published: 31 July 2013"
+5d7de2eb2ee99798bfb2e50ed5169e3b8a35469a,Design of a Three-dimensional Face Recognition System,"The Open Automation and Control Systems Journal, 2015, 7, 587-590
+Design of a Three-Dimensional Face Recognition System
+Send Orders for Reprints to
+Open Access
+Wang Xuechun* and Wang Zhaoping
+School of Information Engineering, Huanghe Science and Technology College, Zhengzhou, Henan, 450006, P.R. China"
+5d7f8eb73b6a84eb1d27d1138965eb7aef7ba5cf,Robust Registration of Dynamic Facial Sequences,"Robust Registration of Dynamic Facial Sequences
+Evangelos Sariyanidi, Hatice Gunes, and Andrea Cavallaro"
+5db4fe0ce9e9227042144758cf6c4c2de2042435,Recognition of Facial Expression Using Haar Wavelet Transform,"INTERNATIONAL JOURNAL OF ELECTRICAL AND ELECTRONIC SYSTEMS RESEARCH, VOL.3, JUNE 2010
+Recognition of Facial Expression Using Haar
+Wavelet Transform
+M. Satiyan, M.Hariharan, R.Nagarajan
+paper
+features
+investigates"
+5d165ff5b0b389e32809c17838a2afc218a91d62,Object Detectors Emerge in Deep Scene CNNs,"Published as a conference paper at ICLR 2015
+OBJECT DETECTORS EMERGE IN DEEP SCENE CNNS
+Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, Antonio Torralba
+Computer Science and Artificial Intelligence Laboratory, MIT"
+5d7f9e1463b596eb5d77865a8b1a0e149215303b,A Hidden Markov Model-based Approach for Face Detection and Recognition a Hidden Markov Model-based Approach for Face Detection and Recognition,"AHiddenMarkovModel-BasedApproach
+forFaceDetectionandRecognition
+ATHESIS
+Presentedto
+TheAcademicFaculty
+AraNe(cid:12)an
+InPartialFul(cid:12)llment
+oftheRequirementsfortheDegreeof
+DoctorofPhilosophyinElectricalEngineering
+GeorgiaInstituteofTechnology
+August,"
+5d5cd6fa5c41eb9d3d2bab3359b3e5eb60ae194e,Face Recognition Algorithms,"Face Recognition Algorithms
+Proyecto Fin de Carrera
+June 16, 2010
+Ion Marqu´es
+Supervisor:
+Manuel Gra˜na"
+5d09d5257139b563bd3149cfd5e6f9eae3c34776,Pattern recognition with composite correlation filters designed with multi-objective combinatorial optimization,"Optics Communications 338 (2015) 77–89
+Contents lists available at ScienceDirect
+Optics Communications
+journal homepage: www.elsevier.com/locate/optcom
+Pattern recognition with composite correlation filters designed with
+multi-objective combinatorial optimization
+Victor H. Diaz-Ramirez a,n, Andres Cuevas a, Vitaly Kober b, Leonardo Trujillo c,
+Abdul Awwal d
+Instituto Politécnico Nacional – CITEDI, Ave. del Parque 1310, Mesade Otay, Tijuana B.C. 22510, México
+Department of Computer Science, CICESE, Carretera Ensenada-Tijuana 3918, Ensenada B.C. 22860, México
+Instituto Tecnológico de Tijuana, Blvd. Industrial y Ave. ITR TijuanaS/N, Mesa de Otay, Tijuana B.C. 22500, México
+d National Ignition Facility, Lawrence Livermore National Laboratory, Livermore, CA 94551, USA
+r t i c l e i n f o
+b s t r a c t
+Article history:
+Received 12 July 2014
+Accepted 16 November 2014
+Available online 23 October 2014
+Keywords:
+Object recognition"
+5d7395085f2636dd2b6262bc7f3fef14058f4765,Regularizing Deep Networks by Modeling and Predicting Label Structure,"Regularizing Deep Networks by Modeling and Predicting Label Structure
+Mohammadreza Mostajabi
+Michael Maire
+Gregory Shakhnarovich
+Toyota Technological Institute at Chicago"
+5d197c8cd34473eb6cde6b65ced1be82a3a1ed14,A Face Image Database for Evaluating Out-of-Focus Blur,"0AFaceImageDatabaseforEvaluatingOut-of-FocusBlurQiHan,QiongLiandXiamuNiuHarbinInstituteofTechnologyChina1.IntroductionFacerecognitionisoneofthemostpopularresearchfieldsofcomputervisionandmachinelearning(Tores(2004);Zhaoetal.(2003)).Alongwithinvestigationoffacerecognitionalgorithmsandsystems,manyfaceimagedatabaseshavebeencollected(Gross(2005)).Facedatabasesareimportantfortheadvancementoftheresearchfield.Becauseofthenonrigidityandcomplex3Dstructureofface,manyfactorsinfluencetheperformanceoffacedetectionandrecognitionalgorithmssuchaspose,expression,age,brightness,contrast,noise,blurandetc.Someearlyfacedatabasesgatheredunderstrictlycontrolledenvironment(Belhumeuretal.(1997);Samaria&Harter(1994);Turk&Pentland(1991))onlyallowslightexpressionvariation.Toinvestigatetherelationshipsbetweenalgorithms’performanceandtheabovefactors,morefacedatabaseswithlargerscaleandvariouscharacterswerebuiltinthepastyears(Bailly-Bailliereetal.(2003);Flynnetal.(2003);Gaoetal.(2008);Georghiadesetal.(2001);Hallinan(1995);Phillipsetal.(2000);Simetal.(2003)).Forinstance,The""CAS-PEAL"",""FERET"",""CMUPIE"",and""YaleB""databasesincludevariousposes(Gaoetal.(2008);Georghiadesetal.(2001);Phillipsetal.(2000);Simetal.(2003));The""HarvardRL"",""CMUPIE""and""YaleB""databasesinvolvemorethan40differentconditionsinillumination(Georghiadesetal.(2001);Hallinan(1995);Simetal.(2003));Andthe""BANCA"",and""NDHID""databasescontainover10timesgathering(Bailly-Bailliereetal.(2003);Flynnetal.(2003)).Thesedatabaseshelpresearcherstoevaluateandimprovetheiralgorithmsaboutfacedetection,recognition,andotherpurposes.Blurisnotthemostimportantbutstillanotablefactoraffectingtheperformanceofabiometricsystem(Fronthaleretal.(2006);Zamanietal.(2007)).Themainreasonsleadingblurconsistinout-of-focusofcameraandmotionofobject,andtheout-of-focusblurismoresignificantintheapplicationenvironmentoffacerecognition(Eskicioglu&Fisher(1995);Kimetal.(1998);Tanakaetal.(2007);Yitzhaky&Kopeika(1996)).Toinvestigatetheinfluenceofbluronafacerecognitionsystem,afaceimagedatabasewithdifferentconditionsofclarityandefficientblurevaluatingalgorithmsareneeded.Thischapterintroducesanewfacedatabasebuiltforthepurposeofblurevaluation.Theapplicationenvironmentsoffacerecognitionareanalyzedfirstly,thenaimagegatheringschemeisdesigned.Twotypicalgatheringfacilitiesareusedandthefocusstatusaredividedinto11steps.Further,theblurassessmentalgorithmsaresummarizedandthecomparisonbetweenthemisraisedonthevarious-claritydatabase.The7www.intechopen.com"
+5da2ae30e5ee22d00f87ebba8cd44a6d55c6855e,"When facial expressions do and do not signal minds: The role of face inversion, expression dynamism, and emotion type.","This is an Open Access document downloaded from ORCA, Cardiff University's institutional
+repository: http://orca.cf.ac.uk/111659/
+This is the author’s version of a work that was submitted to / accepted for publication.
+Citation for final published version:
+Krumhuber, Eva G, Lai, Yukun, Rosin, Paul and Hugenberg, Kurt 2018. When facial expressions
+Publishers page:
+Please note:
+Changes made as a result of publishing processes such as copy-editing, formatting and page
+numbers may not be reflected in this version. For the definitive version of this publication, please
+refer to the published source. You are advised to consult the publisher’s version if you wish to cite
+this paper.
+This version is being made available in accordance with publisher policies. See
+http://orca.cf.ac.uk/policies.html for usage policies. Copyright and moral rights for publications
+made available in ORCA are retained by the copyright holders."
+31625522950e82ad4dffef7ed0df00fdd2401436,Motion Representation with Acceleration Images,"Motion Representation with Acceleration Images
+Hirokatsu Kataoka, Yun He, Soma Shirakabe, Yutaka Satoh
+National Institute of Advanced Industrial Science and Technology (AIST)
+Tsukuba, Ibaraki, Japan
+{hirokatsu.kataoka, yun.he, shirakabe-s,"
+3107486fe666a3004b720125bd2b05ff9382fdb8,Generalized two-dimensional linear discriminant analysis with regularization,"JOURNAL OF LATEX CLASS FILES, VOL.
+, NO.
+Generalized two-dimensional linear discriminant
+nalysis with regularization
+Chun-Na Li, Yuan-Hai Shao,Wei-Jie Chen, Zhen Wang and Nai-Yang Deng"
+318e7e6daa0a799c83a9fdf7dd6bc0b3e89ab24a,Sparsity in Dynamics of Spontaneous Subtle Emotions: Analysis and Application,"Sparsity in Dynamics of Spontaneous
+Subtle Emotions: Analysis & Application
+Anh Cat Le Ngo, Member, IEEE, John See, Member, IEEE, Raphael C.-W. Phan, Member, IEEE"
+3137eede6bbada4442e0193dc5918788b7e88aa1,Hyper-class augmented and regularized deep learning for fine-grained image classification,"Hyper-class Augmented and Regularized Deep Learning for Fine-grained Image Classification
+Saining Xie1, Tianbao Yang2 Xiaoyu Wang3, Yuanqing Lin4
+University of California, San Diego. 2University of Iowa. 3Snapchat Research. 4NEC Labs America, Inc.
+Fine-grained image classification (FGIC) is challenging because (i) fine-
+grained labeled data is much more expensive to acquire (usually requir-
+ing domain expertise); (ii) there exists large intra-class and small inter-
+lass variance. In this paper, we propose a systematic framework of learn-
+ing a deep CNN that addresses the challenges from two new perspectives:
+(i) identifying easily annotated hyper-classes inherent in the fine-grained
+data and acquiring a large number of hyper-class-labeled images from read-
+ily available external sources, and formulating the problem into multi-task
+learning, to address the data scarcity issue. We use two common types of
+hyper-classes to augment our data, with one being the super-type hyper-
+lasses that subsume a set of fine-grained classes, and another being named
+factor-type hyper-classes (e.g., different view-points of a car) that explain
+the large intra-class variance. (ii) a novel learning model by exploiting a reg-
+ularization between the fine-grained recognition model and the hyper-class
+recognition model to mitigate the issue of large intra-class variance and im-
+prove the generalization performance. The proposed approach also closely
+relates to attribute-based learning, since one can consider that factor-type"
+31c0968fb5f587918f1c49bf7fa51453b3e89cf7,Deep Transfer Learning for Person Re-identification,"Deep Transfer Learning for Person Re-identification
+Mengyue Geng
+Yaowei Wang
+Tao Xiang
+Yonghong Tian"
+318d7a4bc9c7b1e3a01056815479564ed8ad78a4,University of Oklahoma Graduate College Reinforcement Learning Scheduler for Heterogeneous Multi-core Processors Reinforcement Learning Scheduler for Heterogeneous Multi-core Processors a Thesis Approved for the School of Computer Science,"UNIVERSITY OF OKLAHOMA
+GRADUATE COLLEGE
+REINFORCEMENT LEARNING SCHEDULER FOR HETEROGENEOUS
+MULTI-CORE PROCESSORS
+A THESIS
+SUBMITTED TO THE GRADUATE FACULTY
+in partial fulfillment of the requirements for the
+Degree of
+MASTER OF SCIENCE
+XIAOLEI YAN
+Norman, Oklahoma"
+318eb316c0117059dd47978854cfa92baeaac1d2,Deterministic CUR for Improved Large-Scale Data Analysis: An Empirical Study,"Deterministic CUR for Improved Large-Scale Data Analysis:
+An Empirical Study
+Christian Thurau, Kristian Kersting, and Christian Bauckhage
+Fraunhofer IAIS, Germany"
+3174fceef3cf09ac35e8d1eb4e1b8b73a3b2c713,Unsupervised learning from videos using temporal coherency deep networks,"Computer Vision and Image Understanding
+journal homepage: www.elsevier.com
+Unsupervised learning from videos using temporal coherency deep networks
+Carolina Redondo-Cabreraa,∗∗, Roberto Lopez-Sastrea
+GRAM, University of Alcal´a, Alcal´a de Henares, 28805, Spain"
+31f1c4cf34ce0bb35382c35b2f468cf72bffae0b,Are spatial and global constraints really necessary for segmentation?,"Are Spatial and Global Constraints Really Necessary for Segmentation?
+Aur´elien Lucchi1
+Yunpeng Li1
+Computer Vision Laboratory, EPFL, Lausanne
+Xavier Boix2
+Kevin Smith1
+Pascal Fua1
+BIWI, ETH Zurich"
+3123e97a6b86913d994e44f8d9d5c639e0e2dc96,A Method of Initialization for Nonnegative Matrix Factorization,"A METHOD OF INITIALIZATION FOR NONNEGATIVE MATRIX FACTORIZATION
+Yong-Deok Kim and Seungjin Choi
+Department of Computer Science, POSTECH, Korea
+{karma13,"
+31ea778b6f5c9c2653eb2bed307ac7b02bcc6894,Dense Error Correction via `-Minimization,"IEEE TRANS. ON INFORMATION THEORY, 2009.
+Dense Error Correction via (cid:96)1-Minimization
+John Wright, Member, and Yi Ma, Senior Member."
+316e67550fbf0ba54f103b5924e6537712f06bee,Multimodal semi-supervised learning for image classification,"Multimodal semi-supervised learning
+for image classification
+Matthieu Guillaumin, Jakob Verbeek, Cordelia Schmid
+LEAR team, INRIA Grenoble, France"
+31786e6d5187d7bc41678cbd2d1bf8edf1ddfed9,Capture de mouvements humains par capteurs RGB-D. (Capture human motions by RGB-D sensor ),"Capture de mouvements humains par capteurs RGB-D
+Jean-Thomas Masse
+To cite this version:
+Jean-Thomas Masse. Capture de mouvements humains par capteurs RGB-D. Robotique
+[cs.RO]. Universit´e Paul Sabatier - Toulouse III, 2015. Fran¸cais.
+¡ NNT : 2015TOU30361
+HAL Id: tel-01280163
+https://tel.archives-ouvertes.fr/tel-01280163v2
+Submitted on 26 Apr 2017
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires"
+31ef5419e026ef57ff20de537d82fe3cfa9ee741,Facial Expression Analysis Based on High Dimensional Binary Features,"Facial Expression Analysis Based on
+High Dimensional Binary Features
+Samira Ebrahimi Kahou, Pierre Froumenty, and Christopher Pal
+´Ecole Polytechique de Montr´eal, Universit´e de Montr´eal, Montr´eal, Canada
+{samira.ebrahimi-kahou, pierre.froumenty,"
+3176ee88d1bb137d0b561ee63edf10876f805cf0,Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation,"Recombinator Networks: Learning Coarse-to-Fine Feature Aggregation
+Sina Honari1, Jason Yosinski2, Pascal Vincent1,4, Christopher Pal3
+University of Montreal, 2Cornell University, 3Ecole Polytechnique of Montreal, 4CIFAR
+{honaris,"
+310a88a60ffa2d8a0fa7ef9fc77fa842d16eed57,View Invariant Gait Recognition,"View Invariant Gait Recognition
+Richard D. Seely, Michela Goffredo, John N. Carter and Mark S. Nixon"
+3151b110ecdcf2105def494bfb0775f21259d7e8,Asymmetric Cuts: Joint Image Labeling and Partitioning,"Asymmetric Cuts : Joint Image Labeling and
+Partitioning
+Thorben Kroeger1, J¨org H. Kappes2, Thorsten Beier1, Ullrich Koethe1 and
+Fred A. Hamprecht1,2
+Multidimensional Image Processing Group, Heidelberg University
+Heidelberg Collaboratory for Image Processing, Heidelberg University"
+31ace8c9d0e4550a233b904a0e2aabefcc90b0e3,Learning Deep Face Representation,"Learning Deep Face Representation
+Haoqiang Fan
+Megvii Inc.
+Zhimin Cao
+Megvii Inc.
+Yuning Jiang
+Megvii Inc.
+Qi Yin
+Megvii Inc.
+Chinchilla Doudou
+Megvii Inc."
+312b807a24b8c30876c1750530b08e4d9627e231,Increasing Trustworthiness of Face Authentication in Mobile Devices by Modeling Gesture Behavior and Location Using Neural Networks,"Article
+Increasing Trustworthiness of Face Authentication in
+Mobile Devices by Modeling Gesture Behavior and
+Location Using Neural Networks
+Blerim Rexha 1 ID , Gresa Shala 2,* and Valon Xhafa 3
+Faculty of Electrical and Computer Engineering, University of Prishtina, Kodra e Diellit p.n.,
+0000 Prishtina, Kosovo;
+Department of Computer Science, Freiburg University, Georges-Köhler Alley 101,
+79110 Freiburg im Breisgau, Germany
+Department of Informatics, Technical University of Munich, Boltzmannstraße 3,
+85748 Garching bei München, Germany;
+* Correspondence:
+Received: 18 January 2018; Accepted: 2 February 2018; Published: 5 February 2018"
+31afdb6fa95ded37e5871587df38976fdb8c0d67,Quantized fuzzy LBP for face recognition,"QUANTIZED FUZZY LBP FOR FACE RECOGNITION
+Jianfeng
+Xudong Jiang,
+Junsong
+BeingThere
+Centre
+Institute
+of Media Innovation
+Nanyang
+50 Nanyang
+Technological
+Singapore
+Drive,
+637553.
+University
+School of Electrical
+& Electronics
+Engineering
+Nanyang
+50 Nanyang"
+318c4c25d86511690cc5df7b041a6392e8cc4ea8,Fashion-Gen: The Generative Fashion Dataset and Challenge,"Fashion-Gen: The Generative Fashion Dataset and Challenge
+Negar Rostamzadeh 1 Seyedarian Hosseini 1 2 Thomas Boquet 1 Wojciech Stokowiec 1 Ying Zhang 1
+Christian Jauvin 1 Chris Pal 3 1"
+316bed02e22aa6742dffcd50c29a7365c5a5a437,Representation Learning for Visual-Relational Knowledge Graphs,"Representation Learning for Visual-Relational
+Knowledge Graphs
+Daniel Oñoro-Rubio, Mathias Niepert, Alberto García-Durán, Roberto
+González-Sánchez and Roberto J. López-Sastre*
+NEC Labs Europe, Alcalá de Henares*
+{daniel.onoro, mathias.niepert, alberto.duran,
+https://github.com/nle-ml/mmkb.git"
+317f5a56519df95884cce81cfba180ee3adaf5a5,Operator-In-The-Loop Deep Sequential Multi-camera Feature Fusion for Person Re-identification,"FusionCam C1Cam C2Classical re-id schemeProposed re-idschemeQueryQueryRanked List: Cam 𝐶1Ranked List: Cam 𝐶2Ranked List: Cam 𝐶1OperatorFeedbackRanked List: Cam 𝐶2Fig.1:(Top)Classicalre-idschemewherequeryimage’sfeaturerepresentationisusedtosearcheachcamerainthenetworkinde-pendently.Theretrievedlistsarereturnedtothehumanoperator.(Bottom)Ourproposedsequentialre-idschemewhereoperatorfeedbackregardingtargetsightingisutilizedtowardsbetterre-idperformanceinanonlinefashion.Inthefigure,cameraC1isqueriedfirstandrankedlistofmatchesisobtained.Thecorrectmatch(pinkbox)inretrievedrankedlistisidentifiedbyoperator.Thecorrectmatchisfusedwithqueryimageatfeaturelevel(orangeblock).ThisfusedrepresentationisusedtoquerycameraC2.NoticethatrankingofquerytargetinC2’slistimprovesinourapproachunliketheclassicalversionwhichcannotexploitoperatorinputstoimprovesubsequentqueries.arXiv:1807.07295v3 [cs.CV] 6 Nov 2018"
+3130eb9bfab5e5a095ab989ba3cc6a2ec62c156d,Generating Facial Ground Truth with Synthetic Faces,"Generating Facial Ground Truth with Synthetic Faces
+Rossana Queiroz, Marcelo Cohen, Juliano L. Moreira, Adriana Braun, J´ulio C. Jacques J´unior, Soraia Raupp Musse
+Pontif´ıcia Universidade Cat´olica do Rio Grande do Sul - PUCRS
+Graduate Programme in Computer Science
+Virtual Human Laboratory -www.inf.pucrs.br/∼vhlab
+Porto Alegre, Brazil
+Figure 1. A sample of 3D faces generated by our prototype."
+31b9251dedce1e10467a0a33f56ac4eb05ed0451,Viewpoint-dependent 3D human body posing for sports legacy recovery from images and video,"VIEWPOINT-DEPENDENT 3D HUMAN BODY POSING FOR SPORTS LEGACY
+RECOVERY FROM IMAGES AND VIDEO
+Luis Unzueta, Jon Goenetxea, Mikel Rodriguez and Maria Teresa Linaza
+Vicomtech-IK4, Paseo Mikeletegi, 57, Parque Tecnológico, 20009, Donostia, Spain"
+31ca0d6488a27a140263291c51ec924b8a49967b,"Show, Ask, Attend, and Answer: A Strong Baseline For Visual Question Answering","Show, Ask, Attend, and Answer:
+A Strong Baseline For Visual Question Answering
+Vahid Kazemi
+Ali Elqursh
+Google Research
+600 Amphitheater Parkway
+{vahid,"
+31ea3186aa7072a9e25218efe229f5ee3cca3316,A ug 2 01 7 Reinforced Video Captioning with Entailment Rewards,"Reinforced Video Captioning with Entailment Rewards
+Ramakanth Pasunuru and Mohit Bansal
+UNC Chapel Hill
+{ram,"
+318f7b59fc22d6326f77b24939860b0137bf8e77,Multiple Classifier Boosting and Tree-Structured Classifiers,"Multiple Classifier Boosting and
+Tree-Structured Classifiers
+Tae-Kyun Kim and Roberto Cipolla"
+31470cf8fda53c4460de4373e5ac4544236c44af,Biased information processing as an endophenotype for depression,"PDF hosted at the Radboud Repository of the Radboud University
+Nijmegen
+The following full text is a publisher's version.
+For additional information about this publication click this link.
+http://repository.ubn.ru.nl/handle/2066/127113
+Please be advised that this information was generated on 2017-04-19 and may be subject to
+hange."
+318ee553c61888f2418280cb1d342c698d3444c9,Towards face unlock: on the difficulty of reliably detecting faces on mobile phones,"Towards Face Unlock: On the Difficulty of Reliably
+Detecting Faces on Mobile Phones
+Rainhard D. Findling
+Softwarepark 11
+Hagenberg, Austria
+Rene Mayrhofer
+Softwarepark 11
+Hagenberg, Austria
+Department for Mobile Computing
+Upper Austria University of Applied Sciences
+Department for Mobile Computing
+Upper Austria University of Applied Sciences"
+318985dc2b8d5a1882b709eedeaac4a2e7de1d81,Accelerating Message Passing for MAP with Benders Decomposition,"Accelerating Message Passing for MAP with
+Benders Decomposition
+Julian Yarkony
+Experian Data Lab.
+Shaofei Wang
+Baidu Inc.
+May 15, 2018"
+31fc3b044ec908f7f61386422727ef23784178c0,Enhancing Face Recognition using Average per Region,"International Journal of Computer Applications (0975 – 8887)
+Volume 65– No.3, March 2013
+Enhancing Face Recognition using Average per Region
+Basheer M. Nasef
+Teaching Assistant
+Dept of Computer and Systems Engineering,
+Zagazig University, Sharkia, Egypt
+Ibrahim E. Ziedan
+Dept of Computer and Systems Engineering,
+Professor
+Zagazig University, Sharkia, Egypt"
+31c174f2190889d5792358713e078336926d7ee4,Image Categorization Using Codebooks Built from Scored and Selected Local Features,"Image Categorization using Codebooks Built from
+Scored and Selected Local Features
+Department of Computer Science, Northern Illinois University DeKalb IL USA 60115
+Bala S. Divakaruni and Jie Zhou
+follows
+(M&C) process"
+31d30089d00d89715167ca4a130a5d262e1d79d3,"Fawzi, Frossard: Measuring the Effect of Nuisance Variables","FAWZI, FROSSARD: MEASURING THE EFFECT OF NUISANCE VARIABLES
+Measuring the effect of nuisance variables
+on classifiers
+Alhussein Fawzi
+Pascal Frossard
+Signal Processing Laboratory (LTS4)
+Ecole Polytechnique Fédérale de
+Lausanne (EPFL)
+Lausanne, Switzerland"
+3137870bf1314e25c2246d4a9d77d941aadd5398,Influence of Positive Instances on Multiple Instance Support Vector Machines,"Influence of Positive Instances on
+Multiple Instance Support Vector Machines
+Nuno Barroso Monteiro1,2, Jo˜ao Pedro Barreto2, and Jos´e Gaspar1
+Institute for Systems and Robotics (ISR/IST), LARSyS, Univ. of Lisbon, Portugal
+Institute for Systems and Robotics, Univ. of Coimbra, Portugal"
+9175b123837ecf55a9aae6c40ba245ddacbc37d5,Various Fusion Schemes to Recognize Simulated and Spontaneous Emotions,"Various Fusion Schemes to Recognize Simulated and Spontaneous
+Emotions
+Sonia Gharsalli1, H´el`ene Laurent2, Bruno Emile1 and Xavier Desquesnes1
+Univ. Orl´eans, INSA CVL,
+PRISME EA 4229, Bourges, France
+on secondment from INSA CVL, Univ. Orl´eans,
+PRISME EA 4229, Bourges, France
+to the Rector of the Academy of Strasbourg, Strasbourg, France
+Keywords:
+Facial Emotion Recognition, Posed Expression, Spontaneous Expression, Early Fusion, Late Fusion, SVM,
+FEEDTUM Database, CK+ Database."
+91811203c2511e919b047ebc86edad87d985a4fa,Expression Subspace Projection for Face Recognition from Single Sample per Person,"Expression Subspace Projection for Face
+Recognition from Single Sample per Person
+Hoda Mohammadzade, Student Member, IEEE, and Dimitrios Hatzinakos, Senior Member, IEEE"
+912f1f57a010194047b6438cc1ea6bec95c6c2b8,ContextVP: Fully Context-Aware Video Prediction,"ContextVP: Fully Context-Aware Video
+Prediction
+Wonmin Byeon1,2,3,4, Qin Wang2,
+Rupesh Kumar Srivastava4, and Petros Koumoutsakos2
+NVIDIA, Santa Clara, CA, USA
+ETH Zurich, Zurich, Switzerland
+The Swiss AI Lab IDSIA, Manno, Switzerland
+NNAISENSE, Lugano, Switzerland"
+91f67f69597a52b905c748a15db427c61f352073,Scale-Aware Pixelwise Object Proposal Networks,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Scale-aware Pixel-wise Object Proposal Networks
+Zequn Jie, Xiaodan Liang, Jiashi Feng, Wen Feng Lu, Eng Hock Francis Tay, Shuicheng Yan
+essential
+proposal"
+91edca64a666c46b0cbca18c3e4938e557eeb21a,Guiding InfoGAN with Semi-supervision,"Guiding InfoGAN with Semi-Supervision
+Adrian Spurr, Emre Aksan, and Otmar Hilliges
+Advanced Interactive Technologies, ETH Zurich
+{adrian.spurr, emre.aksan,"
+912f6a6ac8703e095d21e2049da4871cc6d4d23b,Partitioning Networks with Node Attributes by Compressing Information Flow,"Partitioning Networks with Node Attributes by
+Compressing Information Flow
+Laura M. Smith
+Department of Mathematics
+California State University
+Fullerton, CA
+Kristina Lerman
+Information Sciences Institute
+U. of Southern California
+Marina del Rey, CA 90292
+Linhong Zhu
+Information Sciences Institute
+U. of Southern California
+Marina del Rey, CA 90292
+Allon G. Percus
+Claremont Graduate U.
+Claremont, CA 91711"
+91dda4183c6118de8195e07a623962dbd22cc34e,Representing local binary descriptors with BossaNova for visual recognition,"Representing Local Binary Descriptors with
+BossaNova for Visual Recognition
+Carlos Caetano†, Sandra Avila†, Silvio Guimarães‡, Arnaldo de A. Araújo†
+Federal University of Minas Gerais, NPDI Lab — DCC/UFMG, Minas Gerais, Brazil
+Pontifical Catholic University of Minas Gerais, VIPLAB — ICEI/PUC Minas, Minas Gerais, Brazil
+{carlos.caetano,"
+9117fd5695582961a456bd72b157d4386ca6a174,Recognition Using Dee Networks,"Facial Expression
+n Recognition Using Dee
+ep Neural
+Networks
+Junnan Li and Edmund Y. Lam
+Departm
+ment of Electrical and Electronic Engineering
+he University of Hong Kong, Pokfulam,
+Hong Kong"
+91067f298e1ece33c47df65236853704f6700a0b,Local Binary Pattern and Local Linear Regression for Pose Invariant Face Recognition,"IJSTE - International Journal of Science Technology & Engineering | Volume 2 | Issue 11 | May 2016
+ISSN (online): 2349-784X
+Local Binary Pattern and Local Linear
+Regression for Pose Invariant Face Recognition
+Raju Dadasab Patil
+M. Tech Student
+Shreekumar T
+Associate Professor
+Department of Computer Science & Engineering
+Department of Computer Science & Engineering
+Mangalore Institute of Engineering & Technology, Badaga
+Mangalore Institute of Engineering & Technology, Badaga
+Mijar, Moodbidri, Mangalore
+Mijar, Moodbidri, Mangalore
+Karunakara K
+Professor & Head of Dept.
+Department of Information Science & Engineering
+Sri SidarthaInstitute of Technology, Tumkur"
+91b0081a348d182d616f74a0c9fb80d56acf4198,Exploiting photographic style for category-level image classification by generalizing the spatial pyramid,"Exploiting Photographic Style for Category-Level Image
+Classification by Generalizing the Spatial Pyramid
+Jan C. van Gemert
+Puzzual
+Oudeschans 18
+011LA, Amsterdam, The Netherlands"
+91a7816609f991c1ac45b791c1cd3c6117194bb0,I Know How You Feel: Emotion Recognition with Facial Landmarks,"I Know How You Feel: Emotion Recognition with Facial Landmarks
+Tooploox 2Polish-Japanese Academy of Information Technology 3Warsaw University of Technology
+Ivona Tautkute1,2, Tomasz Trzcinski1,3 and Adam Bielski1"
+919d3067bce76009ce07b070a13728f549ebba49,Time Based Re-ranking for Web Image Search,"International Journal of Scientific and Research Publications, Volume 4, Issue 6, June 2014
+ISSN 2250-3153
+Time Based Re-ranking for Web Image Search
+Ms. A.Udhayabharadhi *, Mr. R.Ramachandran **
+* MCA Student, Sri Manakula Vinayagar Engineering College, Pondicherry-605106
+** Assistant Professor dept of MCA, Sri Manakula Vinayagar Engineering College, Pondicherry-605106"
+91f820e2cb6fb5a8adc83e6065cbdf071aca84bd,What makes Federer look so elegant?,"What makes Federer look so elegant?
+Kuldeep Kulkarni and Vinay Venkataraman"
+91e57667b6fad7a996b24367119f4b22b6892eca,Probabilistic Corner Detection for Facial Feature,"Probabilistic Corner Detection for Facial Feature
+Extraction
+Article
+Accepted version
+E. Ardizzone, M. La Cascia, M. Morana
+In Lecture Notes in Computer Science Volume 5716, 2009
+It is advisable to refer to the publisher's version if you intend to cite
+from the work.
+Publisher: Springer
+http://link.springer.com/content/pdf/10.1007%2F978-3-
+642-04146-4_50.pdf"
+917611cfc0fee3e834d1a6cc13ad5bc18ae428f3,Geometric models with co-occurrence groups,"Geometric Models with Co-occurrence Groups
+Joan Bruna1
+and St´ephane Mallat2
+8/16 rue Paul Vaillant Couturier, 92240, Malakoff - France
+- Zoran France
+- Ecole Polytechnique - CMAP
+Route de Saclay, 91128 Palaiseau - France"
+917bea27af1846b649e2bced624e8df1d9b79d6f,Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for Mobile and Embedded Applications,"Ultra Power-Efficient CNN Domain Specific Accelerator with 9.3TOPS/Watt for
+Mobile and Embedded Applications
+Baohua Sun,
+Lin Yang,
+Patrick Dong, Wenhan Zhang,
+Gyrfalcon Technology Inc.
+Jason Dong, Charles Young
+900 McCarthy Blvd. Milpitas, CA 95035"
+91b1a59b9e0e7f4db0828bf36654b84ba53b0557,Simultaneous Hallucination and Recognition of Low-Resolution Faces Based on Singular Value Decomposition,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+> REPLACE THIS LINE WITH YOUR PAPER IDENTIFICATION NUMBER (DOUBLE-CLICK HERE TO EDIT) <
+Simultaneous Hallucination and Recognition of
+Low-Resolution Faces Based on Singular Value
+Decomposition
+Muwei Jian, Kin-Man Lam*, Senior Member, IEEE
+(SVD)
+for performing both"
+911bef7465665d8b194b6b0370b2b2389dfda1a1,Learning Human Optical Flow,"RANJAN, ROMERO, BLACK: LEARNING HUMAN OPTICAL FLOW
+Learning Human Optical Flow
+MPI for Intelligent Systems
+Tübingen, Germany
+Amazon Inc.
+Anurag Ranjan1
+Javier Romero∗,2
+Michael J. Black1"
+91ead35d1d2ff2ea7cf35d15b14996471404f68d,Combining and Steganography of 3D Face Textures,"Combining and Steganography of 3D Face Textures
+Mohsen Moradi and Mohammad-Reza Rafsanjani-Sadeghi"
+91c014ff243ea747ea3a84a9efd4a3e38a7217ee,Reinforced Temporal Attention and Split-Rate Transfer for Depth-Based Person Re-identification,"Reinforced Temporal Attention and Split-Rate
+Transfer for Depth-Based Person
+Re-Identification
+Nikolaos Karianakis1, Zicheng Liu1, Yinpeng Chen1, and Stefano Soatto2
+Microsoft, Redmond, USA
+University of California, Los Angeles, USA"
+919e827c449ca77bcff4ce5f2ccbccdab8399ac6,Generative Entity Networks: Disentangling Enti-,"Under review as a conference paper at ICLR 2018
+GENERATIVE ENTITY NETWORKS: DISENTANGLING ENTI-
+TIES AND ATTRIBUTES IN VISUAL SCENES USING PARTIAL
+NATURAL LANGUAGE DESCRIPTIONS
+Anonymous authors
+Paper under double-blind review"
+914fd65d29094e434346806bdddeb17d9468610d,Scene Text Recognition in Mobile Applications by Character Descriptor and Structure Configuration,"IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+SCENE TEXT RECOGNITION IN MOBILE APPLICATIONS BY
+CHARACTER DESCRIPTOR AND STRUCTURE CONFIGURATION
+Sathish Kumar Penchala1, Pallavi S.Umap2
+Assistant Professor, Dept. of Computer Engineering, Dr. D.Y.Patil SOET., Lohegaon, Pune-47, Maharashtra, India
+ME 2nd year, Dept. of Computer Engineering, Dr.D.Y.Patil SOET., Lohegaon, Pune-47, Maharashtra India"
+91d513af1f667f64c9afc55ea1f45b0be7ba08d4,Automatic Face Image Quality Prediction,"Automatic Face Image Quality Prediction
+Lacey Best-Rowden, Student Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+91e58c39608c6eb97b314b0c581ddaf7daac075e,Pixel-wise Ear Detection with Convolutional Encoder-Decoder Networks,"Pixel-wise Ear Detection with Convolutional
+Encoder-Decoder Networks
+ˇZiga Emerˇsiˇc 1, Luka Lan Gabriel 2, Vitomir ˇStruc 3 and Peter Peer 1"
+910da5e0afef96c8acca3c6a4314a9ab5121b1e4,Détection d'obstacles multi-capteurs supervisée par stéréovision. (Multi-sensor road obstacle deetection controled by stereovision),"Détection d’obstacles multi-capteurs supervisée par
+stéréovision
+Mathias Perrollaz
+To cite this version:
+Mathias Perrollaz. Détection d’obstacles multi-capteurs supervisée par stéréovision. Vision par ordi-
+nateur et reconnaissance de formes [cs.CV]. Université Pierre et Marie Curie - Paris VI, 2008. Français.
+<tel-00656864>
+HAL Id: tel-00656864
+https://tel.archives-ouvertes.fr/tel-00656864
+Submitted on 5 Jan 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+91ee88754cc7a193d51656a3b53e16389bf4aadb,Fast and accurate algorithm for eye localisation for gaze tracking in low-resolution images,"Fast and Accurate Algorithm for Eye Localization
+for Gaze Tracking in Low Resolution Images
+Anjith George, Member, IEEE, and Aurobinda Routray, Member, IEEE"
+91bdc706ad1d7b246e457870a7eb8caff87ec05a,Face Recognition Using Holistic Based Approach,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 4, Issue 7, July 2014)
+Face Recognition Using Holistic Based Approach
+1Research Scholar, 2Professor, Department of Information Science and Engineering, SDM CET, Dharwad
+Vandana S. Bhat1, Dr. Jagadeesh D. Pujari2"
+9168b36568b8abffab5b9de029be5941f673dca2,Improving 3D Facial Action Unit Detection with Intrinsic Normalization,"YUDIN, ET AL.: IMPROVING 3D AU DETECTION WITH INTRINSIC NORMALIZATION
+Improving 3D Facial Action Unit Detection
+with Intrinsic Normalization
+Geometric Image Processing Lab
+Technion - Israel Institute of Technology
+Technion City, Haifa, Israel
+Eric Yudin
+Aaron Wetzler
+Matan Sela
+Ron Kimmel"
+916ca7000c022fbd97ea15cc0094f0e53c408b56,Spontaneous and Non-Spontaneous 3D Facial Expression Recognition Using a Statistical Model with Global and Local Constraints,"SPONTANEOUS AND NON-SPONTANEOUS 3D FACIAL EXPRESSION RECOGNITION
+USING A STATISTICAL MODEL WITH GLOBAL AND LOCAL CONSTRAINTS"
+91eae81dbba3013261292296bb929a18d73b447f,Utilization of Interest Point Detectors in Content Based Image Retrieval,"Ročník 2011
+Číslo II
+Utilization of Interest Point Detectors in Content Based Image Retrieval
+M. Zukal 1, P. Číka1
+Department of Telecommunications, Faculty of Electrical Engineering, BUT, Brno,
+E-mail :
+Purkyňova 118, Brno"
+91ddac7d1d63c52cbe30fe27674b9c1e54bc584c,Development of Eye-Blink and Face Corpora for Research in Human Computer Interaction,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 6, No. 5, 2015
+Development of Eye-Blink and Face Corpora for
+Research in Human Computer Interaction
+Emmanuel Jadesola Adejoke.
+Dept. of Computer science
+Bingham University
+Nassarawa, Nigeria
+Ibiyemi Tunji Samuel
+Dept. of Electrical Engineering
+University of Ilorin
+Ilorin, Nigeria
+oded
+voluntary
+eye-blink based
+language communication depends"
+9131c990fad219726eb38384976868b968ee9d9c,Deep Facial Expression Recognition: A Survey,"Deep Facial Expression Recognition: A Survey
+Shan Li and Weihong Deng∗, Member, IEEE"
+911505a4242da555c6828509d1b47ba7854abb7a,Improved Active Shape Model for Facial Feature Localization,"IMPROVED ACTIVE SHAPE MODEL FOR FACIAL FEATURE LOCALIZATION
+Hui-Yu Huang and Shih-Hang Hsu
+National Formosa University, Taiwan
+Email:"
+915d4a0fb523249ecbc88eb62cb150a60cf60fa0,Comparison of Feature Extraction Techniques in Automatic Face Recognition Systems for Security Applications,"Comparison of Feature Extraction Techniques in Automatic
+Face Recognition Systems for Security Applications
+S . Cruz-Llanas, J. Ortega-Garcia, E. Martinez-Torrico, J. Gonzalez-Rodriguez
+Dpto. Ingenieria Audiovisual y Comunicaciones, EUIT Telecomunicacion, Univ. PolitCcnica de Madrid, Spain
+{cruzll, jortega, etorrico,
+http://www.atvs.diac.upm.es"
+65126e0b1161fc8212643b8ff39c1d71d262fbc1,Occlusion Coherence: Localizing Occluded Faces with a Hierarchical Deformable Part Model,"Occlusion Coherence: Localizing Occluded Faces with a
+Hierarchical Deformable Part Model
+Golnaz Ghiasi Charless C. Fowlkes
+Dept. of Computer Science, University of California, Irvine"
+657ae9ecb59cb2a27e57784577a9efb60de81126,The Task Matters: Comparing Image Captioning and Task-Based Dialogical Image Description,"The Task Matters: Comparing Image
+Captioning and Task-Based Dialogical Image Description
+Nikolai Ilinykh, Sina Zarrieß, David Schlangen
+Dialogue Systems Group
+University of Bielefeld
+Germany"
+6582f4ec2815d2106957215ca2fa298396dde274,Discriminative Learning and Recognition of Image Set Classes Using Canonical Correlations,"JUNE 2007
+Discriminative Learning and Recognition
+of Image Set Classes Using
+Canonical Correlations
+Tae-Kyun Kim, Josef Kittler, Member, IEEE, and Roberto Cipolla, Member, IEEE"
+656e7c7739e3f334d4f275c71499485501aabc44,A Two-Step Methodology for Human Pose Estimation Increasing the Accuracy and Reducing the Amount of Learning Samples Dramatically,"A two-step methodology for human pose
+estimation increasing the accuracy and reducing
+the amount of learning samples dramatically
+Samir Azrour, Sébastien Piérard, Pierre Geurts, and Marc Van Droogenbroeck
+INTELSIG Laboratory, Department of Electrical Engineering and Computer Science,
+University of Liège, Belgium"
+65eff143b099e53dcf39692c2fb542b0ee1fdfb6,Real-time Scale-invariant Object Recognition from Light Field Imaging,
+65639b79576f22b705a601f062bb6905f0a396af,A Preliminary Investigation into the Impact of Training for Example-Based Facial Blendshape Creation,"EUROGRAPHICS 2018/ O. Diamanti and A. Vaxman
+Short Paper
+A Preliminary Investigation into the Impact of Training for
+Example-Based Facial Blendshape Creation
+Emma Carrigan1, Ludovic Hoyet2, Rachel McDonnell1 and Quentin Avril3
+Graphics Vision and Visualisation Group, Trinity College Dublin, Ireland
+Inria Rennes, France 3 Technicolor"
+65539436abf0eedabeb915a52f787b962722c99a,Satellite Image Classification via Two-Layer Sparse Coding With Biased Image Representation,"Satellite Image Classification via Two-Layer Sparse
+Coding With Biased Image Representation
+Dengxin Dai and Wen Yang, Member, IEEE"
+658c802890c7133e2ade778b5d88b68bcd0dca9c,Learning to Segment via Cut-and-Paste,"Learning to Segment via Cut-and-Paste
+Tal Remez, Jonathan Huang, Matthew Brown
+Google"
+65b1209d38c259fe9ca17b537f3fb4d1857580ae,Information Constraints on Auto-Encoding Variational Bayes,"Information Constraints on Auto-Encoding Variational Bayes
+Romain Lopez1, Jeffrey Regier1, Michael I. Jordan1,2, and Nir Yosef1,3,4
+{romain_lopez, regier,
+Department of Electrical Engineering and Computer Sciences, University of California, Berkeley
+Department of Statistics, University of California, Berkeley
+Ragon Institute of MGH, MIT and Harvard
+Chan-Zuckerberg Biohub"
+651125ca22947e95e5be6206c3056988b850266a,Swifter: improved online video scrubbing,"Swifter: Improved Online Video Scrubbing
+Justin Matejka, Tovi Grossman, and George Fitzmaurice
+Autodesk Research, Toronto, Ontario, Canada
+Figure 1. Scrubbing behavior of a traditional streaming video player, the Swift interface [16], and our new Swifter
+interface, which shows multiple frames around the active timeline location and allows for direct selection of each frame.
+Traditional
+Swift
+Swifter"
+655d9ba828eeff47c600240e0327c3102b9aba7c,Kernel pooled local subspaces for classification,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 35, NO. 3, JUNE 2005
+Kernel Pooled Local Subspaces for Classification
+Peng Zhang, Student Member, IEEE, Jing Peng, Member, IEEE, and Carlotta Domeniconi"
+656a59954de3c9fcf82ffcef926af6ade2f3fdb5,Convolutional Network Representation for Visual Recognition,"Convolutional Network Representation
+for Visual Recognition
+ALI SHARIF RAZAVIAN
+Doctoral Thesis
+Stockholm, Sweden, 2017"
+650f4ccbe7d4aa49ae80e246df394ca6c60894ec,Department of Informatics,"DEPARTMENT OF INFORMATICS
+TECHNISCHE UNIVERSITÄT MÜNCHEN
+Bachelor’s Thesis in Informatics
+Pedestrian detection in urban environments
+ased on vision and depth data
+Andreas Kreutz"
+652d3f33fd0a99808dd646aed228b45eacdaf34f,A Framework for Binding and Retrieving Class-Specific Information to and from Image Patterns Using Correlation Filters,"A Framework for Binding and Retrieving
+Class-Specific Information to and from Image
+Patterns using Correlation Filters
+Vishnu Naresh Boddeti, Student Member, IEEE, and B.V.K Vijaya Kumar, Fellow, IEEE"
+65edab091e437d3b9d093dcb8be7c5dc4ce0fe0f,DeepOrgan: Multi-level Deep Convolutional Networks for Automated Pancreas Segmentation,"DeepOrgan: Multi-level Deep Convolutional
+Networks for Automated Pancreas Segmentation
+Holger R. Roth, Le Lu, Amal Farag, Hoo-Chang Shin, Jiamin Liu,
+Evrim B. Turkbey, and Ronald M. Summers
+Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, Radiology and
+Imaging Sciences, National Institutes of Health Clinical Center, Bethesda, MD
+0892-1182, USA"
+65237b5e96c7492a0e5d01ddea5b1d381da408cd,A human-machine collaborative approach to tracking human movement in multi-camera video,"A Human-Machine Collaborative Approach to Tracking
+Human Movement in Multi-Camera Video
+Philip DeCamp
+MIT Media Lab
+0 Ames Street, E15-441
+Cambridge, Massachusetts 02139
+Deb Roy
+MIT Media Lab
+0 Ames Street, E15-488
+Cambridge, Massachusetts 02139"
+656aeb92e4f0e280576cbac57d4abbfe6f9439ea,Use of Image Enhancement Techniques for Improving Real Time Face Recognition Efficiency on Wearable Gadgets,"Journal of Engineering Science and Technology
+Vol. 12, No. 1 (2017) 155 - 167
+© School of Engineering, Taylor’s University
+USE OF IMAGE ENHANCEMENT TECHNIQUES
+FOR IMPROVING REAL TIME FACE RECOGNITION EFFICIENCY
+ON WEARABLE GADGETS
+MUHAMMAD EHSAN RANA1,*, AHMAD AFZAL ZADEH2,
+AHMAD MOHAMMAD MAHMOOD ALQURNEH3
+, 3Asia Pacific University of Technology & Innovation, Kuala Lumpur 57000, Malaysia
+Staffordshire University, Beaconside Stafford ST18 0AB, United Kingdom
+*Corresponding Author:"
+65a858ca95dcfa032e812a7f1fc7ee5bdac88f5b,Using Pre-Trained Models for Fine-Grained Image Classification in Fashion Field,"Using Pre-Trained Models for Fine-Grained Image
+Classification in Fashion Field
+Anna Iliukovich-Strakovskaia
+Moscow Institute of Physics and
+Technology
+Moscow Institute of Physics and
+Alexey Dral
+Technology
+“А” Kerchenskaya st., Moscow,
+17303, Russian Federation
+“А” Kerchenskaya st., Moscow,
+17303, Russian Federation
++7 495 408 45 54
++7 495 408 45 54
+Emeli Dral
+Moscow Institute of Physics and
+Technology & Yandex Data Factory
+“А” Kerchenskaya st., Moscow,
+17303, Russian Federation
++7 495 408 45 54"
+6527cf0b9dbddbd0c6429a35a3cbded3ca336583,MCMC Supervision for People Re-identification in Nonoverlapping Cameras,"MEDEN, LERASLE, SAYD: MCMC TRACKING-BY-REIDENTICATION
+MCMC Supervision for People
+Reidentification in Nonoverlapping Cameras
+Boris Meden1
+Frédéric Lerasle2
+lerasle.laas.fr
+Patrick Sayd1
+CEA, LIST,
+Laboratoire Vision et Ingénierie des
+Contenus,
+BP 94, F-91191 Gif-sur-Yvette, France
+CNRS ; LAAS ;
+Université de Toulouse ; UPS, LAAS ;
+F-31077 Toulouse Cedex 4, France"
+656f05741c402ba43bb1b9a58bcc5f7ce2403d9a,Supervised Learning Approaches for Automatic Structuring of Videos. (Méthodes d'apprentissage supervisé pour la structuration automatique de vidéos),"THÈSEPour obtenir le grade deDOCTEUR DE L’UNIVERSITÉ GRENOBLE ALPESSpécialité : Mathématiques et InformatiqueArrêté ministériel : 7 août 2006Présentée parDanila POTAPOVThèse dirigée par Cordelia SCHMID et codirigée par Zaid HARCHAOUIpréparée au sein de Inria Grenoble Rhône-Alpesdans l'École Doctorale Mathématiques, Sciences et technologies de l'information, InformatiqueSupervised Learning Approaches for Automatic Structuring of VideosThèse soutenue publiquement le « 22 Juillet 2015 »,devant le jury composé de : Prof. Cordelia SCHMID Inria Grenoble Rhône-Alpes, France, Directeur de thèseDr. Zaid HARCHAOUIInria Grenoble Rhône-Alpes, France, Co-encadrant de thèse Prof. Patrick PEREZTechnicolor Rennes, France, RapporteurProf. Ivan LAPTEVInria Paris Rocquencourt, France, Rapporteur, PrésidentDr. Florent PERRONNINFacebook AI Research, Paris, France, ExaminateurDr. Matthijs DOUZEInria Grenoble Rhône-Alpes, France, Examinateur"
+659fc2a483a97dafb8fb110d08369652bbb759f9,Improving the Fisher Kernel for Large-Scale Image Classification,"Improving the Fisher Kernel
+for Large-Scale Image Classification
+Florent Perronnin, Jorge S´anchez, and Thomas Mensink
+Xerox Research Centre Europe (XRCE)"
+656a5d4d84c450792402b3c69eecbdbca4cad4cb,2.1. Imagenet and Related Datasets,"Figure 4: Percent of clean images at different tree depth levels in
+ImageNet. A total of 80 synsets are randomly sampled at every
+tree depth of the mammal and vehicle subtrees. An independent
+group of subjects verified the correctness of each of the images.
+An average of 99.7% precision is achieved for each synset.
+ImageNet
+TinyImage
+LabelMe
+LHill
+LabelDisam
+Clean
+DenseHie
+FullRes
+PublicAvail
+Segmented
+Table 1: Comparison of some of the properties of ImageNet ver-
+sus other existing datasets.
+ImageNet offers disambiguated la-
+els (LabelDisam), clean annotations (Clean), a dense hierarchy
+(DenseHie), full resolution images (FullRes) and is publicly avail-"
+65874dd7220664762b5b25f47460b623a7eb0175,Tree Crown Mapping in Managed Woodlands (Parklands) of Semi-Arid West Africa Using WorldView-2 Imagery and Geographic Object Based Image Analysis,"Sensors 2014, 14, 22643-22669; doi:10.3390/s141222643
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Tree Crown Mapping in Managed Woodlands (Parklands) of
+Semi-Arid West Africa Using WorldView-2 Imagery and
+Geographic Object Based Image Analysis
+Martin Karlson 1,*, Heather Reese 2,† and Madelene Ostwald 1,3,†
+Centre for Climate Science and Policy Research, Department of Thematic Studies/Environmental
+Change, Linköping University, Linköping 58183, Sweden; E-Mail:
+Section of Forest Remote Sensing, Department of Forest Resource Management,
+Swedish University of Agricultural Sciences, Umeå 901 83, Sweden; E-Mail:
+Centre for Environment and Sustainability, GMV, University of Gothenburg and
+Chalmers University of Technology, Göteborg 405 30, Sweden
+These authors contributed equally to this work.
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +46-1328-2977; Fax: +46-1313-3630.
+External Editor: Assefa M. Melesse"
+6574eaab393aa8d674cd785fab16cae06a53151a,A study on polymorphing superscalar processor dynamically to improve power efficiency,"A Study on Polymorphing Superscalar Processor Dynamically
+to Improve Power Efficiency
+Sudarshan Srinivasan, Rance Rodrigues, Arunachalam Annamalai, Israel Koren and Sandip Kundu
+Department of Electrical and Computer Engineering
+University of Massachusetts at Amherst, MA, USA
+Email: {ssrinivasan, rodrigues, annamalai, koren,"
+65ec52a3e0a0f6a46fd140ff83bb82d7d02a2d45,Learning Hierarchical Features from Generative Models,"Learning Hierarchical Features from Generative Models
+Shengjia Zhao 1 Jiaming Song 1 Stefano Ermon 1"
+656b6133fd671f129fce0091a8dab39c97e604f2,Multiview Discriminative Geometry Preserving Projection for Image Classification,"Hindawi Publishing Corporation
+e Scientific World Journal
+Volume 2014, Article ID 924090, 11 pages
+http://dx.doi.org/10.1155/2014/924090
+Research Article
+Multiview Discriminative Geometry Preserving
+Projection for Image Classification
+Ziqiang Wang, Xia Sun, Lijun Sun, and Yuchun Huang
+School of Information Science and Engineering, Henan University of Technology, Zhengzhou 450001, China
+Correspondence should be addressed to Ziqiang Wang;
+Received 19 December 2013; Accepted 22 January 2014; Published 9 March 2014
+Academic Editors: X. Meng, Z. Zhou, and X. Zhu
+Copyright © 2014 Ziqiang Wang et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+In many image classification applications, it is common to extract multiple visual features from different views to describe an image.
+Since different visual features have their own specific statistical properties and discriminative powers for image classification, the
+onventional solution for multiple view data is to concatenate these feature vectors as a new feature vector. However, this simple
+oncatenation strategy not only ignores the complementary nature of different views, but also ends up with “curse of dimensionality.”
+To address this problem, we propose a novel multiview subspace learning algorithm in this paper, named multiview discriminative
+geometry preserving projection (MDGPP) for feature extraction and classification. MDGPP can not only preserve the intraclass"
+65817963194702f059bae07eadbf6486f18f4a0a,WhittleSearch: Interactive Image Search with Relative Attribute Feedback,"http://dx.doi.org/10.1007/s11263-015-0814-0
+WhittleSearch: Interactive Image Search with Relative Attribute
+Feedback
+Adriana Kovashka · Devi Parikh · Kristen Grauman
+Received: date / Accepted: date"
+6581c5b17db7006f4cc3575d04bfc6546854a785,Contextual Person Identification in Multimedia Data,"Contextual Person Identification
+in Multimedia Data
+zur Erlangung des akademischen Grades eines
+Doktors der Ingenieurwissenschaften
+der Fakultät für Informatik
+des Karlsruher Instituts für Technologie (KIT)
+genehmigte
+Dissertation
+Dipl.-Inform. Martin Bäuml
+us Erlangen
+Tag der mündlichen Prüfung:
+8. November 2014
+Hauptreferent:
+Korreferent:
+Prof. Dr. Rainer Stiefelhagen
+Karlsruher Institut für Technologie
+Prof. Dr. Gerhard Rigoll
+Technische Universität München
+KIT – Universität des Landes Baden-Württemberg und nationales Forschungszentrum in der Helmholtz-Gemeinschaft
+www.kit.edu"
+659fc18b1ec79a7437e6e7b1dce145d423e82199,Real time person detection and tracking by mobile robots using RGB-D images,"Real Time Person Detection and Tracking by Mobile Robots using
+RGB-D Images
+Duc My Vo, Lixing Jiang and Andreas Zell"
+65d588e2ff7b4f2903efbeded978885f7da5d0e0,UMPM benchmark: A multi-person dataset with synchronized video and motion capture data for evaluation of articulated human motion and interaction,"UMPM benchmark: a multi-person dataset with synchronized video and motion
+apture data for evaluation of articulated human motion and interaction
+N.P. van der Aa1,2, X. Luo1, G.J. Giezeman1, R.T. Tan1, R.C. Veltkamp1
+{x.luo, g.j.giezeman, r.t.tan,
+Utrecht University
+Noldus Information Technology"
+653d19e64bd75648cdb149f755d59e583b8367e3,"Decoupling ""when to update"" from ""how to update""","Decoupling “when to update” from “how to
+update”
+Eran Malach and Shai Shalev-Shwartz
+School of Computer Science, The Hebrew University, Israel"
+65babb10e727382b31ca5479b452ee725917c739,Label Distribution Learning,"Label Distribution Learning
+Xin Geng*, Member, IEEE"
+62dccab9ab715f33761a5315746ed02e48eed2a0,A Short Note about Kinetics-600,"A Short Note about Kinetics-600
+Jo˜ao Carreira
+Eric Noland
+Andras Banki-Horvath
+Chloe Hillier
+Andrew Zisserman"
+62d1a31b8acd2141d3a994f2d2ec7a3baf0e6dc4,Noise-resistant network: a deep-learning method for face recognition under noise,"Ding et al. EURASIP Journal on Image and Video Processing (2017) 2017:43
+DOI 10.1186/s13640-017-0188-z
+EURASIP Journal on Image
+nd Video Processing
+R ES EAR CH
+Noise-resistant network: a deep-learning
+method for face recognition under noise
+Yuanyuan Ding1,2, Yongbo Cheng1,2, Xiaoliu Cheng1, Baoqing Li1*, Xing You1 and Xiaobing Yuan1
+Open Access"
+62aeecbe5db3e4ed6b783f4b580157f4f1c8ba45,"Haar like and LBP based features for face, head and people detection in video sequences","Author manuscript, published in ""International Workshop on Behaviour Analysis and Video Understanding (ICVS 2011) (2011)"
+6275aa21331a2712222b7ab2116e9589e21ae82c,Prediction of Manipulation Actions,"Noname manuscript No.
+(will be inserted by the editor)
+Prediction of Manipulation Actions
+Cornelia Ferm¨uller · Fang Wang · Yezhou Yang · Konstantinos Zampogiannis · Yi
+Zhang · Francisco Barranco · Michael Pfeiffer
+the date of receipt and acceptance should be inserted later"
+62694828c716af44c300f9ec0c3236e98770d7cf,Identification of Action Units Related to Affective States in a Tutoring System for Mathematics,"Padrón-Rivera, G., Rebolledo-Mendez, G., Parra, P. P., & Huerta-Pacheco, N. S. (2016). Identification of Action Units Related to
+Identification of Action Units Related to Affective States in a Tutoring System
+Gustavo Padrón-Rivera1, Genaro Rebolledo-Mendez1*, Pilar Pozos Parra2 and N. Sofia
+Facultad de Estadística e Informática, Universidad Veracruzana, Mexico // 2Universidad Juárez Autónoma de
+Tabasco, Mexico // // // //
+for Mathematics
+Huerta-Pacheco1
+*Corresponding author"
+6225e9c2a9ee47b4d3d58313a839f6e170b48525,Shape Aware Matching of Implicit Surfaces based on Thin Shell Energies,"SHAPE AWARE MATCHING OF IMPLICIT SURFACES BASED ON THIN SHELL
+ENERGIES
+JOS ´E A. IGLESIAS, MARTIN RUMPF, AND OTMAR SCHERZER"
+62e8010e2ac1523d3a3e7e1c13cb34e63e85ce04,Transfer Learning for Action Unit Recognition,"Transfer Learning for Action Unit Recognition
+Yen Khye Lim1, Zukang Liao1, Stavros Petridis1 and Maja Pantic1,2"
+62d5c16760018b08e301a940434c3fc2e862c385,Approach For Palm Vein Blood Vessel Detection Based On Fuzzy Logic,"International Journal of Electronics Engineering Research.
+ISSN 0975-6450 Volume 9, Number 4 (2017) pp. 613-619
+© Research India Publications
+http://www.ripublication.com
+Approach For Palm Vein Blood Vessel Detection
+Based On Fuzzy Logic
+Praveen Kaundal
+Department of E.C.E, PEC, University of Technology
+Chandigarh-160012, India
+Dr. Sukhwinder Singh
+Department of E.C.E, PEC, University of Technology
+Chandigarh-160012, India"
+622949b1aacd316c60a7034c44121c698a3fb6a4,Highway Driving Dataset for Semantic Video Segmentation,"KIM, YIM, AND KIM: HIGHWAY DRIVING DATASET
+Highway Driving Dataset
+for Semantic Video Segmentation
+Byungju Kim
+Junho Yim
+Junmo Kim*
+School of Electrical Engineering
+Korea Advanced Institute of Science
+nd Technology (KAIST),
+South Korea"
+6211ba456908d605e85d102d63b106f1acb52186,Visual Interpretability forDeepLearning,"Zhang et al. / Front Inform Technol Electron Eng
+in press
+Frontiers of Information Technology & Electronic Engineering
+www.jzus.zju.edu.cn; engineering.cae.cn; www.springerlink.com
+ISSN 2095-9184 (print); ISSN 2095-9230 (online)
+E-mail:
+Visual Interpretability for Deep Learning∗
+Quanshi Zhang and Song-Chun Zhu
+(University of California, Los Angeles)
+E-mail:"
+62f0d8446adee6a5e8102053a63a61af07ac4098,Facial point detection using convolutional neural network transferred from a heterogeneous task,"FACIAL POINT DETECTION USING CONVOLUTIONAL NEURAL NETWORK
+TRANSFERRED FROM A HETEROGENEOUS TASK
+Takayoshi Yamashita* Taro Watasue** Yuji Yamauchi* Hironobu Fujiyoshi*
+**Tome R&D
+*Chubu University,
+200, Matsumoto-cho, Kasugai, AICHI"
+62cf8c07ca6c4c7817f6a5682eb2d7cde76198ae,Boosted Metric Learning for Efficient Identity-Based Face Retrieval,"NEGREL ET AL.: BOOSTED METRIC LEARNING FOR FACE RETRIEVAL
+Boosted Metric Learning for Efficient
+Identity-Based Face Retrieval
+Romain Negrel
+Alexis Lechervy
+Frederic Jurie
+GREYC, CNRS UMR 6072, ENSICAEN
+Université de Caen Basse-Normandie
+France"
+6268ad4bc516a41a30db566e2207079fc483212e,LBP-Based Edge-Texture Features for Object Recognition,"LBP-Based Edge-Texture Features for
+Object Recognition
+Amit Satpathy, Member, IEEE, Xudong Jiang, Senior Member, IEEE, and How-Lung Eng, Member, IEEE"
+62374b9e0e814e672db75c2c00f0023f58ef442c,Frontal face authentication using discriminating,"Frontalfaceauthenticationusingdiscriminatinggridswith
+morphologicalfeaturevectors
+A.Tefas
+C.Kotropoulos
+I.Pitas
+DepartmentofInformatics,AristotleUniversityofThessaloniki
+Box,Thessaloniki
+EDICSnumbers:-KNOWContentRecognitionandUnderstanding
+-MODAMultimodalandMultimediaEnvironments
+Anovelelasticgraphmatchingprocedurebasedonmultiscalemorphologicaloperations,thesocalled
+morphologicaldynamiclinkarchitecture,isdevelopedforfrontalfaceauthentication.Fastalgorithms
+forimplementingmathematicalmorphologyoperationsarepresented.Featureselectionbyemploying
+linearprojectionalgorithmsisproposed.Discriminatorypowercoe(cid:14)cientsthatweighthematching
+errorateachgridnodearederived.Theperformanceofmorphologicaldynamiclinkarchitecturein
+frontalfaceauthenticationisevaluatedintermsofthereceiveroperatingcharacteristicontheMVTS
+faceimagedatabase.Preliminaryresultsforfacerecognitionusingtheproposedtechniquearealso
+presented.
+Correspondingauthor:I.Pitas
+DRAFT
+September
+6215c5713adeacbb33b9d1c4c739f2b0b50dd17f,Part-based 3d Face Recognition under Pose and Expression Variations,"PART-BASED 3D FACE RECOGNITION UNDER POSE AND EXPRESSION
+VARIATIONS
+Hamdi Dibeklio˘glu
+B.S, in Computer Engineering, Yeditepe University, 2006
+Submitted to the Institute for Graduate Studies in
+Science and Engineering in partial fulfillment of
+the requirements for the degree of
+Master of Science
+Graduate Program in Computer Engineering
+Bo˘gazi¸ci University"
+6273b3491e94ea4dd1ce42b791d77bdc96ee73a8,"Evaluating Appearance Models for Recognition, Reacquisition, and Tracking","Evaluating Appearance Models for Recognition, Reacquisition, and Tracking
+Doug Gray
+Shane Brennan
+Hai Tao
+University of California, Santa Cruz
+156 High St., Santa Cruz, CA 95064
+{dgray, shanerb,"
+62d1b32d67e4a4b58a66cba91629aae5f7968962,Recurrent Neural Networks for Semantic Instance Segmentation,"Recurrent Neural Networks
+for Semantic Instance Segmentation
+Amaia Salvador1, M´ıriam Bellver2, V´ıctor Campos2, Manel Baradad1
+Ferran Marques1 Jordi Torres2 and Xavier Giro-i-Nieto1
+Universitat Polit`ecnica de Catalunya 2Barcelona Supercomputing Center"
+626859fe8cafd25da13b19d44d8d9eb6f0918647,Activity Recognition Based on a Magnitude-Orientation Stream Network,"Activity Recognition based on a
+Magnitude-Orientation Stream Network
+Carlos Caetano, Victor H. C. de Melo, Jefersson A. dos Santos, William Robson Schwartz
+Smart Surveillance Interest Group, Department of Computer Science
+Universidade Federal de Minas Gerais, Belo Horizonte, Brazil"
+6289d2c4c47d7101861153bfe78c92d16cf4581b,A Cross-Core Performance Model for Heterogeneous Many-Core Architectures,"A Cross-Core Performance Model for
+Heterogeneous Many-Core Architectures
+Rui Pinheiro, Nuno Roma, and Pedro Tom´as (cid:63)
+INESC-ID, Instituto Superior T´ecnico, Universidade de Lisboa"
+623da0faea1f98f238936e34f361518829edfdf4,Digital geometry image analysis for medical diagnosis,"Digital Geometry Image Analysis for Medical Diagnosis
+Jiandong Fang Shiaofen Fang Jeffrey Huang Mihran Tuceryan
+Department of Computer and Information Science
+Indiana University Purdue University Indianapolis
+723 W. Michigan St., SL 280
+Indianapolis, IN 46202, USA
+-317-274-9731"
+624077c8c8c9306c12671870cacc0fb13ff20324,"Smart, Sparse Contours to Represent and Edit Images","Sparse, Smart Contours to Represent and Edit Images
+Tali Dekel 1
+Chuang Gan 2
+Dilip Krishnan 1
+Ce Liu 1 William T. Freeman 1,3
+Google Research 2 MIT-Watson AI Lab 3 MIT-CSAIL
+Figure 1. Our method produces high quality reconstructions of images from information along a small number of contours: a source
+(512×512) image in (a) is reconstructed in (c) from gradient information stored at the set of colored contours in (b)2, which are less than
+5% of the pixels. The model synthesizes hair texture, facial lines and shading even in regions where no input information is provided.
+Our model allows for semantically intuitive editing in the contour domain. Top-right: a caricature-like result (e) is created by moving and
+scaling some contours in (d). Bottom-right: hairs are synthesized by pasting a set of hair contours copied from a reference image. Edited
+ontours are marked in green while the original contours in red."
+6236962ce0d627fc23774f0680e77069b9667803,Fitting a Morphable Model to Pose and Shape of a Point Cloud,"Fitting a Morphable Model to Pose and Shape of a Point Cloud
+David C. Schneider, Peter Eisert
+Fraunhofer Heinrich Hertz Institute, Einsteinufer 37, 10587 Berlin, Germany"
+627107c02c2df1366965f11678dd3c4fb14ac9b3,Connecting Images and Natural Language a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"CONNECTING IMAGES AND NATURAL LANGUAGE
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Andrej Karpathy
+August 2016"
+629722342f719ee413e9bb07072a2fc2b4f09a26,Gender Classification by Information Fusion of Hair and Face,"Gender Classification by Information Fusion
+of Hair and Face
+Zheng Ji, Xiao-Chen Lian and Bao-Liang Lu
+Department of Computer Science and Engineering, Shanghai
+Jiao Tong University 800 Dong Chuan Road,
+Shanghai 200240, China
+. Introduction
+Various gender classification methods have been reported in the literature. These existing
+methods fall into two categories. The first kind of method is the appearance-based approach.
+Golomb et al. [1] used a two-layer neural network with 30 × 30 inputs and directly fed the
+scaled image pixels to the network without dimensionality reduction. Their database
+ontains only 90 images with half male and half female facial images. Gutta et al. [2] used the
+mixture of experts combining the ensembles of radial basis functions (RBF) networks and a
+decision tree. Xu et al. [3] applied Adaboost to gender classification problem with the feature
+pools composed of a set of linear projections utilizing statistical moments up to second
+order. Wu et al. [4] also adopted Adaboost. Instead of using threshold weak classifiers, they
+used looking-up table weak classifiers, which are more general and better than simple
+threshold ones due to stronger ability to model complex distribution of training samples.
+Moghaddam and Yang [5] demonstrated that support vector machines (SVMs) work better
+than other classifiers such as ensemble of radial basis function (RBF) networks, classical RBF"
+62e2c431d375bbafd988d53c4d39f240c8b7977b,A Game-Theoretic Probabilistic Approach for Detecting Conversational Groups,"A Game-Theoretic Probabilistic Approach
+for Detecting Conversational Groups
+Sebastiano Vascon1, Eyasu Zemene Mequanint2, Marco Cristani1,3, Hayley Hung4 (cid:63),
+Marcello Pelillo2, and Vittorio Murino1,3
+Dept. of Pattern Analysis & Computer Vision (PAVIS), Istituto Italiano di Tecnologia, Genova, Italy
+Dept. of Environmental Sciences, Informatics and Statistics, University Ca’ Foscari of Venice, Italy
+Dept. of Computer Science, University of Verona, Italy
+Faculty of Electrical Engineering, Mathematics and Computer Science, Technical University of Delft, Netherlands"
+627412bf4cf2706f6dc9530313ecf06bbc532cca,Dissertation Gerard Pons Moll,"Human Pose Estimation from Video and Inertial
+Sensors
+Von der Fakultät für Elektrotechnik und Informatik
+der Gottfried Wilhelm Leibniz Universität Hannover
+zur Erlangung des akademischen Grades
+Doktor-Ingenieur
+(abgekürzt: Dr.-Ing.)
+genehmigte
+Dissertation
+Gerard Pons Moll
+geboren am 25. Oktober 1984 in Barcelona."
+62dd66f9f4995cfdaafb479de50363ce0255b1bd,2 Feature Extraction Based on Wavelet Moments and Moment Invariants in Machine Vision Systems,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+621ea1f1e364262348135c803557e7b3454a804e,Generative spatiotemporal modeling of neutrophil behavior,"Accepted to 2018 IEEE International Symposium on Biomedical Imaging
+Copyright ©2018 IEEE
+Generative Spatiotemporal Modeling Of Neutrophil Behavior
+Narita Pandhe(cid:63)
+Balazs Rada†
+Shannon Quinn(cid:63)
+(cid:63) Department of Computer Science
+Department of Infectious Diseases
+University of Georgia"
+62007c30f148334fb4d8975f80afe76e5aef8c7f,Eye In-Painting with Exemplar Generative Adversarial Networks,"Eye In-Painting with Exemplar Generative Adversarial Networks
+Brian Dolhansky, Cristian Canton Ferrer
+Facebook Inc.
+Hacker Way, Menlo Park (CA), USA
+{bdol,"
+62a30f1b149843860938de6dd6d1874954de24b7,Fast Algorithm for Updating the Discriminant Vectors of Dual-Space LDA,"Fast Algorithm for Updating the Discriminant Vectors
+of Dual-Space LDA
+Wenming Zheng, Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+626c12d6ccb1405c97beca496a3456edbf351643,Conditional Variance Penalties and Domain Shift Robustness,"Conditional Variance Penalties and Domain Shift Robustness
+Christina Heinze-Deml & Nicolai Meinshausen
+Seminar for Statistics
+ETH Zurich
+Zurich, Switzerland"
+62e0380a86e92709fe2c64e6a71ed94d152c6643,Facial emotion recognition with expression energy,"Facial Emotion Recognition With Expression Energy
+Albert Cruz
+Center for Research in
+Intelligent Systems
+16 Winston Chung Hall
+Bir Bhanu
+Center for Research in
+Intelligent Systems
+16 Winston Chung Hall
+Ninad Thakoor
+Center for Research in
+Intelligent Systems
+16 Winston Chung Hall
+Riverside, CA, 92521-0425,
+Riverside, CA, 92521-0425,
+Riverside, CA, 92521-0425,"
+62b90583723174220b26c92bd67f6c422ad75570,Dna-gan: Learning Disentangled Represen-,"Under review as a conference paper at ICLR 2018
+DNA-GAN: LEARNING DISENTANGLED REPRESEN-
+TATIONS FROM MULTI-ATTRIBUTE IMAGES
+Anonymous authors
+Paper under double-blind review"
+62070fbd22b2a4bba830668c2e9720ec4bff4171,Fast human detection using template matching for gradient images and aSC descriptors based on subtraction stereo,"978-1-4799-2341-0/13/$31.00 ©2013 IEEE
+ICIP 2013"
+968c62bb2927ca300ef953644e652ba7d2c2e5e6,Learning person-object interactions for action recognition in still images,"Learning person-object interactions for
+ction recognition in still images
+Vincent Delaitre∗
+´Ecole Normale Sup´erieure
+Josef Sivic*
+INRIA Paris - Rocquencourt
+Ivan Laptev*
+INRIA Paris - Rocquencourt"
+96e7142ab905c54c033696ac3692e85692c43bf3,Sparse Illumination Learning and Transfer for Single-Sample Face Recognition with Image Corruption and Misalignment,"Noname manuscript No.
+(will be inserted by the editor)
+Sparse Illumination Learning and Transfer for
+Single-Sample Face Recognition with Image Corruption and
+Misalignment
+Liansheng Zhuang · Tsung-Han Chan ·
+Allen Y. Yang · S. Shankar Sastry · Yi Ma
+Received: date / Accepted: date"
+9626bcb3fc7c7df2c5a423ae8d0a046b2f69180c,A deep learning approach for action classification in American football video sequences,"UPTEC STS 17033
+Examensarbete 30 hp
+November 2017
+A deep learning approach for
+ction classification in American
+football video sequences
+Jacob Westerberg"
+96e9bc6b54d1c79406cf37ae45fd35ef04d647c6,A Fully Automated System for Sizing Nasal PAP Masks Using Facial Photographs,"A Fully Automated System for Sizing Nasal PAP Masks Using Facial
+Photographs
+Benjamin Johnston Student Member, IEEE and Philip de Chazal Senior Member, IEEE"
+9603b3a4649fd217752972909d627bde8e0a5023,Spectral Hashing With Semantically Consistent Graph for Image Indexing,"Spectral Hashing With Semantically
+Consistent Graph for Image Indexing
+Peng Li, Meng Wang, Member, IEEE, Jian Cheng, Member, IEEE, Changsheng Xu, Senior Member, IEEE, and
+Hanqing Lu, Senior Member, IEEE"
+96d6e0bf752c42ede0170e9b332ca390ac75cd1f,Temporal Hierarchical Dictionary with HMM for Fast Gesture Recognition,"018 24th International Conference on Pattern Recognition (ICPR)
+Beijing, China, August 20-24, 2018
+978-1-5386-3787-6/18/$31.00 ©2018 European Union"
+9696b172d66e402a2e9d0a8d2b3f204ad8b98cc4,Region-Based Facial Expression Recognition in Still Images,"J Inf Process Syst, Vol.9, No.1, March 2013
+pISSN 1976-913X
+eISSN 2092-805X
+Region-Based Facial Expression Recognition in
+Still Images
+Gawed M. Nagi*, Rahmita Rahmat*, Fatimah Khalid* and Muhamad Taufik*"
+9679d15c6699b521740408b2e899c03af89390ac,Dimensionality Reduction for 3d Articulated Body Tracking and Human Action Analysis,"DIMENSIONALITY REDUCTION FOR 3D
+ARTICULATED BODY TRACKING AND HUMAN
+ACTION ANALYSIS
+Leonid Raskin
+Research Supervisors:
+Prof. Ehud Rivlin, Dr. Michael Rudzsky
+Prof. Michael Lindenbaum
+Submitted in Partial Fulfillment of the Requirements for the
+Degree of Doctor of Philosophy
+Technion IIT - Israel Institute of Technology
+Haifa, Israel
+March 2010
+(cid:176) Copyright by Leonid Raskin, 2010
+Technion - Computer Science Department - Ph.D. Thesis PHD-2010-11 - 2010"
+96390f95a73a6bd495728b6cd2a97554ef187f76,Pan Olympus : Sensor Privacy through Utility Aware,"Proceedings on Privacy Enhancing Technologies ..; .. (..):1–21
+Nisarg Raval, Ashwin Machanavajjhala, and Jerry Pan
+Olympus: Sensor Privacy through Utility Aware
+Obfuscation"
+9630109529870d142fde01341da05967484e906c,Techniques of Facial Synthesis: A Comprehensive Literature Review,"International Journal of Computer Applications (0975 – 8887)
+Volume 61– No.10, January 2013
+Techniques of Facial Synthesis: A
+Comprehensive Literature Review
+Deepti Chandra
+Shri Shankaracharya College
+of Engg. & Technology, Bhilai,
+Chhattisgarh, India
+Sanjeev Karmakar
+Bhilai Institute of Technology (BIT)
+Chhattisgarh, Durg 491001, India
+Rajendra Hegadi
+Pragati College of Engg. &
+Management
+Raipur,Chhattisgarh, India
+realism
+-the synthesized"
+96f4a1dd1146064d1586ebe86293d02e8480d181,Comparative Analysis of Reranking Techniques for Web Image Search,"COMPARATIVE ANALYSIS OF RERANKING
+TECHNIQUES FOR WEB IMAGE SEARCH
+Suvarna V. Jadhav1, A.M.Bagade2
+,2Department of Information Technology, Pune Institute of Computer Technology, Pune,( India)"
+96723b42451c42ec396381596490143aac8f85cd,A Computer Vision Approach for the Eye Accessing Cue Model Used in Neuro-linguistic Programming,"U.P.B. Sci. Bull., Series C, Vol. 75, Iss. 4, 2013 ISSN 2286 – 3540
+A COMPUTER VISION APPROACH FOR THE EYE
+ACCESSING CUE MODEL USED IN NEURO-LINGUISTIC
+PROGRAMMING
+Ruxandra VRÂNCEANU1, Laura FLOREA2, Corneliu FLOREA3
+This paper investigates the Eye Accessing Cue (EAC) model used in Neuro-
+Linguistic Programming (NLP) and shows how image processing techniques can be
+used to improve the interpretation of this model. An experiment was carried out to
+validate the model by inducing certain eye cues using a set of questions. A simple
+nd efficient method is proposed for automatically locating the eyes and the
+orrespondent EAC. The relative position between the iris and the sclera is
+determined using a fast mechanism, based on the analysis of integral projections
+inside the bounding box of the eye.
+Keywords: Neuro-Linguistic Programming, Eye Detection, Eye Gaze
+. Introduction
+The progress made in image processing and the increase of computational
+apabilities of machines over the past decades has led to new opportunities for
+human-computer interactions and to the development of systems capable of
+utomatically interpreting the facial attributes of a person. Such algorithms are
+used in the field of people identification and description, in applications that"
+96a7f2faf4baa09184deb458a03146805d62beed,Passive Three Dimensional Face Recognition Using Iso-Geodesic Contours and Procrustes Analysis,"Int J Comput Vis (2013) 105:87–108
+DOI 10.1007/s11263-013-0631-2
+Passive Three Dimensional Face Recognition Using Iso-Geodesic
+Contours and Procrustes Analysis
+Sina Jahanbin · Rana Jahanbin · Alan C. Bovik
+Received: 11 November 2011 / Accepted: 11 May 2013 / Published online: 19 June 2013
+© Springer Science+Business Media New York 2013"
+9606b1c88b891d433927b1f841dce44b8d3af066,Principal Component Analysis with Tensor Train Subspace,"Principal Component Analysis with Tensor Train
+Subspace
+Wenqi Wang, Vaneet Aggarwal, and Shuchin Aeron"
+96fdc0131dc80ffa6d7b9c526e07f080414c54ec,1 Paying More A ention to Saliency : Image Captioning with Saliency and Context A ention,"Paying More A(cid:130)ention to Saliency: Image Captioning with
+Saliency and Context A(cid:130)ention
+MARCELLA CORNIA, University of Modena and Reggio Emilia
+LORENZO BARALDI, University of Modena and Reggio Emilia
+GIUSEPPE SERRA, University of Udine
+RITA CUCCHIARA, University of Modena and Reggio Emilia
+Image captioning has been recently gaining a lot of a(cid:138)ention thanks to the impressive achievements
+shown by deep captioning architectures, which combine Convolutional Neural Networks to extract image
+representations, and Recurrent Neural Networks to generate the corresponding captions. At the same time,
+signi(cid:128)cant research e(cid:130)ort has been dedicated to the development of saliency prediction models, which
+an predict human eye (cid:128)xations. Even though saliency information could be useful to condition an image
+aptioning architecture, by providing an indication of what is salient and what is not, research is still struggling
+to incorporate these two techniques. In this work, we propose an image captioning approach in which a
+generative recurrent neural network can focus on di(cid:130)erent parts of the input image during the generation of
+the caption, by exploiting the conditioning given by a saliency prediction model on which parts of the image
+re salient and which are contextual. We show, through extensive quantitative and qualitative experiments on
+large scale datasets, that our model achieves superior performances with respect to captioning baselines with
+nd without saliency, and to di(cid:130)erent state of the art approaches combining saliency and captioning.
+CCS Concepts: •Computing methodologies →Scene understanding; Natural language generation;
+Additional Key Words and Phrases: saliency, visual saliency prediction, image captioning, deep learning."
+9691055b1fcbe626b5bce9d8d43903094a5c0339,Generating an item pool for translational social cognition research: methodology and initial validation.,"Behav Res (2015) 47:228–234
+DOI 10.3758/s13428-014-0464-0
+Generating an item pool for translational social cognition
+research: Methodology and initial validation
+Michael K. Keutmann & Samantha L. Moore &
+Adam Savitt & Ruben C. Gur
+Published online: 10 April 2014
+# Psychonomic Society, Inc. 2014"
+96b1000031c53cd4c1c154013bb722ffd87fa7da,ContextVP: Fully Context-Aware Video Prediction,"ContextVP: Fully Context-Aware Video
+Prediction
+Wonmin Byeon1,2,3,4, Qin Wang2,
+Rupesh Kumar Srivastava4, and Petros Koumoutsakos2
+NVIDIA, Santa Clara, CA, USA
+ETH Zurich, Zurich, Switzerland
+The Swiss AI Lab IDSIA, Manno, Switzerland
+NNAISENSE, Lugano, Switzerland"
+96f0da034d090a3ecadd0fb92333bb681f23ab14,Temporal-Spatial Mapping for Action Recognition,"Temporal-Spatial Mapping for Action Recognition
+Xiaolin Song, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jingyu Yang, and Xiaoyan Sun"
+964e43f4983a42ef3790c265bdce42c1fce56d79,A Virtual Environment Tool for Benchmarking Face Analysis Systems,"A Virtual Environment Tool for Benchmarking Face
+Analysis Systems
+Mauricio Correa+,*, Javier Ruiz-del-Solar+,*, Rodrigo Verschae*
++Department of Electrical Engineering, Universidad de Chile
+*Advanced Mining Technology Center, Universidad de Chile
+{macorrea,"
+968f472477a8afbadb5d92ff1b9c7fdc89f0c009,Firefly-based Facial Expression Recognition,Firefly-based Facial Expression Recognition
+96fc93175169b788acd98f0a676dffab00651cbc,On Matching Faces with Alterations due to Plastic Surgery and Disguise,"On Matching Faces with Alterations due to Plastic Surgery and Disguise
+Saksham Suri1, Anush Sankaran2, Mayank Vatsa1, Richa Singh1
+IIIT - Delhi, India 2IBM Research, Bengaluru, India
+{saksham15082, mayank,"
+9686dcf40e6fdc4152f38bd12b929bcd4f3bbbcc,Emotion Based Music Player,"International Journal of Engineering Research and General Science Volume 3, Issue 1, January-February, 2015
+ISSN 2091-2730
+Emotion Based Music Player
+Hafeez Kabani1, Sharik Khan2, Omar Khan3, Shabana Tadvi4
+Department of Computer Science and Engineering
+Department of Computer Science and Engineering
+Department of Computer Science and Engineering
+Asst. Professor, Department of Computer Science and Engineering
+M.H Saboo Siddik College of Engineering, University of Mumbai, India"
+96788880589a514c3ae9de29695c0127d6e76b8f,Attention-Based Multimodal Fusion for Video Description,"Attention-Based Multimodal Fusion for Video Description
+Chiori Hori
+Takaaki Hori
+Teng-Yok Lee
+Kazuhiro Sumi∗
+John R. Hershey
+Tim K. Marks
+Mitsubishi Electric Research Laboratories (MERL)
+{chori, thori, tlee, sumi, hershey,"
+3abfab8740ffc66c0c191ce32ce1240062620bea,Continuous Facial Affect Recognition from Videos,"N. Garay, J. Abascal (Eds.): Actas del XII Congreso Internacional Interacción 2011, Lisboa
+Continuous Facial Affect Recognition from Videos
+Sergio Ballano1, Isabelle Hupont1, Eva Cerezo2 and Sandra Baldassarri2
+Aragon Institute of Technology, Department of R&D and Technology Services,
+Zaragoza. 5018, María de Luna 7-8, Spain
+University of Zaragoza, Computer Science and Systems Engineering Department,
+Zaragoza. 50018, María de Luna 3, Spain
+{sballano, {ecerezo,"
+3a7f9b4badc7407273325650763e887ad7b5cc9e,Anthropometric Comparison of Cross-Sectional External Ear between Monozygotic Twin,"Annals of Forensic Research and Analysis
+*Corresponding author
+Rumiza Abd Rashid, Institute of Forensic Sciences,
+Universiti Teknologi MARA, 40450 Shah Alam, Selangor,
+Malaysia; Tel: +60196943080; Fax: +603-55444562 ;
+Email:
+Submitted: 19 November 2014
+Accepted: 20 November 2014
+Published: 22 November 2014
+Copyright
+© 2014 Rashid et al.
+OPEN ACCESS
+Keywords
+• External ear
+• Monozygotic twin
+• Anthropometric measurement
+• Forensic anthropology
+• Identification
+Research Article
+Anthropometric Comparison"
+3aad63c3c049eedb1c6da4871faa90e797b933e8,Highway Networks for Visual Question Answering,"Highway Networks for Visual Question Answering
+Aaditya Prakash and James Storer
+Brandeis University"
+3a2fc58222870d8bed62442c00341e8c0a39ec87,Probabilistic Local Variation Segmentation,"Probabilistic Local Variation
+Segmentation
+Michael Baltaxe
+Technion - Computer Science Department - M.Sc. Thesis MSC-2014-02 - 2014"
+3a4ecdf7d73b0fb392763048aa834a537a495537,Contour-based object detection,"SCHLECHT, OMMER: CONTOUR-BASED OBJECT DETECTION
+Contour-based Object Detection
+Joseph Schlecht
+Björn Ommer
+Interdisciplinary Center for
+Scientific Computing
+University of Heidelberg
+Germany"
+3aef744dad3982a7ae1ad97b4f126b6772fc3d07,Scene-Centric Joint Parsing of Cross-View Videos,"Scene-centric Joint Parsing of Cross-view Videos
+Hang Qi1∗, Yuanlu Xu1∗, Tao Yuan1∗, Tianfu Wu2, Song-Chun Zhu1
+Dept. Computer Science and Statistics, University of California, Los Angeles (UCLA)
+{hangqi, tianfu
+Dept. Electrical and Computer Engineering, NC State University"
+3a8f16d8f7adae8bd0cdc5cc5114dac0b388a9f6,Interpreting Deep Neural Network: Fast Object Localization via Sensitivity Analysis,"Under review as a conference paper at ICLR 2019
+INTERPRETING DEEP NEURAL NETWORK:
+FAST OBJECT LOCALIZATION VIA SENSITIVITY
+ANALYSIS
+Anonymous authors
+Paper under double-blind review"
+3a8023d206613c930cee8e9166fcbbfd743e6634,Enhancing Person Re-identification in a Self-trained Subspace,"Enhancing Person Re-identification in a Self-trained
+Subspace
+Xun Yang, Meng Wang, Richang Hong, Qi Tian, Yong Rui"
+3acfbc2aee9b2ed246a640930ebc2e350621f990,Progressive Boosting for Class Imbalance,"Progressive Boosting for Class Imbalance
+Roghayeh Soleymania,∗, Eric Grangera, Giorgio Fumerab
+Laboratoire d’imagerie, de vision et d’intelligence artificielle, ´Ecole de technologie sup´erieure
+Pattern Recognition and Applications Group, Dept. of Electrical and Electronic Engineering
+Universit´e du Qu´ebec, Montreal, Canada
+University of Cagliari, Cagliari, Italy"
+3a804cbf004f6d4e0b041873290ac8e07082b61f,A Corpus-Guided Framework for Robotic Visual Perception,"Language-Action Tools for Cognitive Artificial Agents: Papers from the 2011 AAAI Workshop (WS-11-14)
+A Corpus-Guided Framework for Robotic Visual Perception
+Ching L. Teo, Yezhou Yang, Hal Daum´e III, Cornelia Ferm¨uller, Yiannis Aloimonos
+University of Maryland Institute for Advanced Computer Studies, College Park, MD 20742-3275
+{cteo, yzyang, hal, fer,"
+3abc833f4d689f37cc8a28f47fb42e32deaa4b17,Large Scale Retrieval and Generation of Image Descriptions,"Noname manuscript No.
+(will be inserted by the editor)
+Large Scale Retrieval and Generation of Image Descriptions
+Vicente Ordonez · Xufeng Han · Polina Kuznetsova · Girish Kulkarni ·
+Margaret Mitchell · Kota Yamaguchi · Karl Stratos · Amit Goyal ·
+Jesse Dodge · Alyssa Mensch · Hal Daum´e III · Alexander C. Berg ·
+Yejin Choi · Tamara L. Berg
+Received: date / Accepted: date"
+3a165f7e22f0667b401cba1b2615048193781b4c,Patch-based Object Recognition,"Diplomarbeit im Fach Informatik
+Rheinisch-Westf¨alische Technische Hochschule Aachen
+Lehrstuhl f¨ur Informatik 6
+Prof. Dr.-Ing. H. Ney
+Patch-Based Object Recognition
+vorgelegt von:
+Andre Hegerath
+Matrikelnummer 228760
+Gutachter:
+Prof. Dr.-Ing. H. Ney
+Prof. Dr. T. Seidl
+Betreuer:
+Dipl.-Inform. T. Deselaers"
+3abb51739b90c8bfd665e045b0eeadc87e065b63,Intrinsic 3D Dynamic Surface Tracking based on Dynamic Ricci Flow and Teichm&#xfc;ller Map,"Intrinsic 3D Dynamic Surface Tracking based on Dynamic Ricci Flow and
+Teichm ¨uller Map
+Xiaokang Yu
+Dept of Comp Sci
+Qingdao Univ
+Na Lei
+Dept of Soft and Tech
+Dalian Univ of Tech
+Qingdao, PR China
+Dalian,PR China
+Yalin Wang
+Comp.Sci.& Engin
+Arizona State Univ
+Arizona, USA
+Xianfeng Gu
+Dept of Comp Sci
+Stony Brook Univ
+Stony Brook, USA"
+3ab13f3ee6d66186c33766ac115d57f8b381468f,Stream Clustering with Dynamic Estimation of Emerging Local Densities,"Stream Clustering with Dynamic Estimation of
+Emerging Local Densities
+Ziyin Wang
+Gavriil Tsechpenakis
+Department of Computer and Information Science
+Indiana University-Purdue University Indianapolis
+Department of Computer and Information Science
+Indiana University-Purdue University Indianapolis
+Indianapolis, IN 46202, USA
+Email:
+Indianapolis, IN 46202, USA
+Email:"
+3acb6b3e3f09f528c88d5dd765fee6131de931ea,Novel representation for driver emotion recognition in motor vehicle videos,"(cid:49)(cid:50)(cid:57)(cid:40)(cid:47)(cid:3)(cid:53)(cid:40)(cid:51)(cid:53)(cid:40)(cid:54)(cid:40)(cid:49)(cid:55)(cid:36)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:41)(cid:50)(cid:53)(cid:3)(cid:39)(cid:53)(cid:44)(cid:57)(cid:40)(cid:53)(cid:3)(cid:40)(cid:48)(cid:50)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:53)(cid:40)(cid:38)(cid:50)(cid:42)(cid:49)(cid:44)(cid:55)(cid:44)(cid:50)(cid:49)(cid:3)(cid:3)
+(cid:44)(cid:49)(cid:3)(cid:48)(cid:50)(cid:55)(cid:50)(cid:53)(cid:3)(cid:57)(cid:40)(cid:43)(cid:44)(cid:38)(cid:47)(cid:40)(cid:3)(cid:57)(cid:44)(cid:39)(cid:40)(cid:50)(cid:54)(cid:3)
+(cid:53)(cid:68)(cid:77)(cid:78)(cid:88)(cid:80)(cid:68)(cid:85)(cid:3)(cid:55)(cid:75)(cid:72)(cid:68)(cid:74)(cid:68)(cid:85)(cid:68)(cid:77)(cid:68)(cid:81)(cid:13)(cid:15)(cid:3)(cid:37)(cid:76)(cid:85)(cid:3)(cid:37)(cid:75)(cid:68)(cid:81)(cid:88)(cid:13)(cid:15)(cid:3)(cid:36)(cid:79)(cid:69)(cid:72)(cid:85)(cid:87)(cid:3)(cid:38)(cid:85)(cid:88)(cid:93)(cid:130)(cid:15)(cid:3)(cid:37)(cid:72)(cid:79)(cid:76)(cid:81)(cid:71)(cid:68)(cid:3)(cid:47)(cid:72)(cid:13)(cid:15)(cid:3)(cid:36)(cid:86)(cid:82)(cid:81)(cid:74)(cid:88)(cid:3)(cid:55)(cid:68)(cid:80)(cid:69)(cid:82)(cid:13)(cid:3)
+(cid:3)
+*Center for Research in Intelligent Systems, University of California, Riverside, CA 92521, USA
+(cid:130) Computer Perception Lab, California State University, Bakersfield, CA 93311, USA
+(cid:36)(cid:37)(cid:54)(cid:55)(cid:53)(cid:36)(cid:38)(cid:55)(cid:3)
+the background
+(cid:3)
+A novel feature representation of human facial expressions
+for emotion recognition is developed. The representation
+leveraged
+texture removal ability of
+Anisotropic Inhibited Gabor Filtering (AIGF) with the
+ompact representation of spatiotemporal
+local binary
+patterns. The emotion recognition system incorporated face
+detection and registration followed by the proposed feature
+representation: Local Anisotropic Inhibited Binary Patterns
+in Three Orthogonal"
+3a13c964cc7adc5f010164ccb91d150457685a78,LIMO: Lidar-Monocular Visual Odometry,"LIMO: Lidar-Monocular Visual Odometry
+Johannes Graeter1, Alexander Wilczynski1 and Martin Lauer1"
+3aee6a6285869e6db48ad269eb110b542ad23c93,One-Click Annotation with Guided Hierarchical Object Detection,"One - Click Annotation with Guided Hierarchical Object Detection
+Adithya Subramanian, Anbumani Subramanian
+Intel
+Bangalore, India"
+3ac09c2589178dac0b6a2ea2edf04b7629672d81,Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2017
+Wasserstein CNN: Learning Invariant Features
+for NIR-VIS Face Recognition
+Ran He, Senior Member, IEEE, Xiang Wu, Zhenan Sun∗, Member, IEEE, and Tieniu Tan, Fellow, IEEE"
+3a772ed83fdc90e10def9d38f59153aee49cd47b,A Camera Network Tracking (CamNeT) Dataset and Performance Baseline,"A Camera Network Tracking (CamNeT) Dataset and Performance Baseline
+Shu Zhang1, Elliot Staudt1, Tim Faltemier2, and Amit K. Roy-Chowdhury1
+Department of Electrical and Computer Engineering, University of California, Riverside
+Progeny Systems Corporation"
+3a35154f765dcba4e3789a38346bf54bce69e336,Object Hallucination in Image Captioning,"Object Hallucination in Image Captioning
+Anna Rohrbach∗1, Lisa Anne Hendricks∗1,
+Kaylee Burns1 , Trevor Darrell1, Kate Saenko2
+UC Berkeley, 2 Boston University"
+3a60678ad2b862fa7c27b11f04c93c010cc6c430,A Multimodal Database for Affect Recognition and Implicit Tagging,"JANUARY-MARCH 2012
+A Multimodal Database for
+Affect Recognition and Implicit Tagging
+Mohammad Soleymani, Member, IEEE, Jeroen Lichtenauer,
+Thierry Pun, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+3a37f57a9b94fff82ffea4e77803ebe5ebf6401b,ER7ST-algorithm for extracting facial expressions,"068 The International Arab Journal of Information Technology Vol. 13, No. 6B, 2016
+ER7ST-Algorithm for Extracting Facial Expressions
+Ahmad Tayyar1, Shadi Al-Shehabi2, and Majida AlBakoor3
+Department of Computer Science, Jerash University, Jordan
+Department of C omputer Engineeringm, Türk Hava Kurumu Üniversitesi, Turkey
+Department of Mathematics, Aleppo University, Syria"
+3acdccd33e518f22dcfe36ee29c332a644afdb25,Automatic Detection of Facial Midline And Its Contributions To Facial Feature Extraction,"Electronic Letters on Computer Vision and Image Analysis 6(3):55-66, 2008
+Automatic Detection of Facial Midline
+And Its Contributions To Facial Feature Extraction
+Nozomi NAKAO, Wataru OHYAMA, Tetsushi WAKABAYASHI and Fumitaka KIMURA
+Graduate School of Engineering, Mie University, 1577 Kurimamachiya–cho, Tsu–shi, Mie 514–8507, Japan
+Received 17 April 2007; revised 17 June 2007; accepted 17 September 2007"
+3a92a00b41dc6217f7685148c8a378524fa1a542,Human Pose Estimation Using Exemplars and Part Based Refinement,"Human Pose Estimation
+Using Exemplars and Part Based Refinement
+Yanchao Su1, Haizhou Ai1, Takayoshi Yamashita2, and Shihong Lao2
+Computer Science and Technology Department, Tsinghua, Beijing 100084, China
+Core Technology Center, Omron Corporation, Kyoto 619-0283, Japan"
+3a591a9b5c6d4c62963d7374d58c1ae79e3a4039,Driver Cell Phone Usage Detection from HOV/HOT NIR Images,"Driver Cell Phone Usage Detection From HOV/HOT NIR Images
+Yusuf Artan, Orhan Bulan, Robert P. Loce, and Peter Paul
+Xerox Research Center Webster
+800 Phillips Rd. Webster NY 14580"
+3aa9c8c65ce63eb41580ba27d47babb1100df8a3,Differentiating Duchenne from non-Duchenne smiles using active appearance models,"Annals of the
+University of North Carolina Wilmington
+Master of Science in
+Computer Science and Information Systems"
+3a0a839012575ba455f2b84c2d043a35133285f9,Corpus-Guided Sentence Generation of Natural Images,"Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 444–454,
+Edinburgh, Scotland, UK, July 27–31, 2011. c(cid:13)2011 Association for Computational Linguistics"
+3a192e0391c357124cd2ec2287b1706f523ecdfd,An Introduction to the 3rd Workshop on Egocentric (First-Person) Vision,"An Introduction to the 3rd Workshop on Egocentric (First-person) Vision
+Steve Mann, Kris M. Kitani, Yong Jae Lee, M. S. Ryoo, Alireza Fathi"
+3aa98c08043558fec09bbf731cd7a8f09cf4eacf,Projective Nonnegative Matrix Factorization with α-Divergence,"Projective Nonnegative Matrix Factorization
+with α-Divergence
+Zhirong Yang and Erkki Oja
+Department of Information and Computer Science(cid:2)
+P.O. Box 5400, FI-02015, TKK, Espoo, Finland
+Helsinki University of Technology"
+3a9681e2e07be7b40b59c32a49a6ff4c40c962a2,"Comparing treatment means : overlapping standard errors , overlapping confidence intervals , and tests of hypothesis","Biometrics & Biostatistics International Journal
+Comparing treatment means: overlapping standard
+errors, overlapping confidence intervals, and tests of
+hypothesis"
+3a846704ef4792dd329a5c7a2cb8b330ab6b8b4e,FACE-GRAB: Face recognition with General Region Assigned to Binary operator,"in any current or
+future media,
+for all other uses,
+© 2010 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained
+including
+reprinting/republishing this material for advertising or promotional purposes, creating
+new collective works, for resale or redistribution to servers or lists, or reuse of any
+opyrighted component of this work in other works.
+Pre-print of article that appeared at the IEEE Computer Society Workshop on Biometrics
+010.
+The published article can be accessed from:
+http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5544597"
+3af0a26ef9a4084703b310eb997ca630d0bae237,Automatic conversion of monoscopic image / video to stereo for 3 D visualization,"________________________________________________________________________________________________
+International Journal of Recent Advances in Engineering & Technology (IJRAET)
+Automatic conversion of monoscopic image/ video to stereo for 3D
+visualization
+R.C.Gokul Nanda Kumar, 2Vijaykumar T
+4th sem, M.Tech (Digital Electronics), SJBIT, Bangalore
+Assoc Prof, Dept. of ECE, SJBIT, Bangalore
+Email:
+into a"
+3aa66f2829ef440842c71a52cdaff30398a90ccb,Pointly-Supervised Action Localization,"International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+Pointly-Supervised Action Localization
+Pascal Mettes · Cees G. M. Snoek
+Received: date / Accepted: date"
+3a0673199699cd51abe0f104ebe080f63d1b6d37,Sparse shape registration for occluded facial feature localization,"Sparse Shape Registration for Occluded Facial Feature Localization
+Fei Yang, Junzhou Huang and Dimitris Metaxas"
+3a95eea0543cf05670e9ae28092a114e3dc3ab5c,Constructing the L2-Graph for Robust Subspace Learning and Subspace Clustering,"Constructing the L2-Graph for Robust Subspace
+Learning and Subspace Clustering
+Xi Peng, Zhiding Yu, Huajin Tang, Member, IEEE, and Zhang Yi, Senior Member, IEEE"
+3af0400c011700f3958062edfdfed001e592391c,The Intense World Theory – A Unifying Theory of the Neurobiology of Autism,"HUMAN NEUROSCIENCE
+The Intense World Theory – a unifying theory of the
+neurobiology of autism
+Review ARticle
+published: 21 December 2010
+doi: 10.3389/fnhum.2010.00224
+Kamila Markram
+* and
+Henry Markram
+Laboratory of Neural Microcircuits, Brain Mind Institute, Ecole Polytechnique Fédérale de Lausanne, Lausanne, Switzerland
+Edited by:
+Silvia A. Bunge, University of California
+Berkeley, USA
+Reviewed by:
+Matthew K. Belmonte, Cornell
+University, USA; University of
+Cambridge, UK
+Egidio D’Angelo, University of Pavia,
+Italy
+*Correspondence:"
+3a24c276368fa63473078723ce4bc99c9ea36019,Stability comparison of dimensionality reduction techniques attending to data and parameter variations,"Eurographics Conference on Visualization (EuroVis) (2013)
+M. Hlawitschka and T. Weinkauf (Editors)
+Short Papers
+Stability comparison of dimensionality reduction techniques
+ttending to data and parameter variations
+Francisco J. García-Fernández1,2, Michel Verleysen2, John A. Lee2 and Ignacio Díaz1
+University of Oviedo, Spain
+Université Catholique de Louvain, Belgium"
+3a0cceb1a10697e3e17738579d27708c9c3303a8,Data-Intensive Multimedia Semantic Concept Modeling using Robust Subspace Bagging and MapReduce,"Data-Intensive Multimedia Semantic Concept Modeling
+using Robust Subspace Bagging and MapReduce"
+3af28e9e9e883c235b6418a68bda519b08f9ae26,Implications of Adult Facial Aging on Biometrics,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+3a28fe49e7a856ddd60d134696a891ed7bca5962,Small-scale Pedestrian Detection Based on Somatic Topology Localization and Temporal Feature Aggregation,"Small-scale Pedestrian Detection Based on
+Somatic Topology Localization and Temporal
+Feature Aggregation
+Tao Song, Leiyu Sun, Di Xie, Haiming Sun, Shiliang Pu
+Hikvision Research Institute"
+3affe6f9c2244f4b32c1c0f7d7f1d24770d40efe,Evaluating the Resilience of Face Recognition Systems against Malicious Attacks,"OMAR L., IVRISSIMTZIS I.: RESILIENCE OF FACE RECOGNITION SYSTEMS
+Evaluating the Resilience of Face
+Recognition Systems against Malicious
+Attacks
+Luma Omar1
+Ioannis Ivrissimtzis1
+School of Engineering and
+Computing Sciences
+Durham University
+Durham, UK"
+3ab7f06cf8e7e7ca34427f81b766b823647ac117,Explaining Eye Movements During Learning as an Active Sampling Process,"Proceedings of the 2004 International
+Conference on Development and Learning
+Editors: Jochen Triesch and Tony Jebara
+Publisher: UCSD Institute for Neural Computation
+Location: The Salk Institute for Biological Studies
+La Jolla California, USA
+ISBN: 0-615-12704-5"
+3a4f522fa9d2c37aeaed232b39fcbe1b64495134,Face Recognition and Retrieval Using Cross-Age Reference Coding With Cross-Age Celebrity Dataset,"ISSN (Online) 2321 – 2004
+ISSN (Print) 2321 – 5526
+INTERNATIONAL JOURNAL OF INNOVATIVE RESEARCH IN ELECTRICAL, ELECTRONICS, INSTRUMENTATION AND CONTROL ENGINEERING
+Vol. 4, Issue 5, May 2016
+IJIREEICE
+Face Recognition and Retrieval Using Cross
+Age Reference Coding
+Sricharan H S1, Srinidhi K S1, Rajath D N1, Tejas J N1, Chandrakala B M2
+BE, DSCE, Bangalore1
+Assistant Professor, DSCE, Bangalore2"
+54509dbe70cd3015007bbd5fa1fd8793b388319e,Fast Pedestrian Detection by Cascaded Random Forest with Dominant Orientation Templates,"TANG ET AL.: FAST PEDESTRIAN DETECTION BY RANDOM FORESTS WITH DOT
+Fast Pedestrian Detection by Cascaded
+Random Forest with Dominant Orientation
+Templates
+Danhang Tang
+http://www.iis.ee.ic.ac.uk/~dtang
+Yang Liu
+http://www.iis.ee.ic.ac.uk/~yliu
+Tae-Kyun Kim
+http://www.iis.ee.ic.ac.uk/~tkkim
+Department of Electrical Engineering,
+Imperial College,
+London, UK"
+548f94f82bf28efa299a64c2527aad36d76b81af,Adaptive Kernels for Texture Based Analysis of Object Based Classification of Forest Stands,"Adaptive Kernels for Texture Based
+Analysis of Object Based Classification
+of Forest Stands
+Ziab Khan
+A thesis submitted in partial fulfilment for the
+degree of Master of Philosophy
+in the
+Department of Geography
+University of Leicester
+August 26, 2014"
+540b39ba1b8ef06293ed793f130e0483e777e278,Biologically Inspired Emotional Expressions for Artificial Agents,"ORIGINAL RESEARCH
+published: 13 July 2018
+doi: 10.3389/fpsyg.2018.01191
+Biologically Inspired Emotional
+Expressions for Artificial Agents
+Beáta Korcsok 1*, Veronika Konok 2, György Persa 3, Tamás Faragó 2, Mihoko Niitsuma 4,
+Ádám Miklósi 2,5, Péter Korondi 1, Péter Baranyi 6 and Márta Gácsi 2,5
+Department of Mechatronics, Optics and Engineering Informatics, Budapest University of Technology and Economics,
+Budapest, Hungary, 2 Department of Ethology, Eötvös Loránd University, Budapest, Hungary, 3 Institute for Computer Science
+nd Control, Hungarian Academy of Sciences, Budapest, Hungary, 4 Department of Precision Mechanics, Chuo University,
+Tokyo, Japan, 5 MTA-ELTE Comparative Ethology Research Group, Budapest, Hungary, 6 Department of Telecommunications
+nd Media Informatics, Budapest University of Technology and Economics, Budapest, Hungary
+A special area of human-machine interaction,
+the expression of emotions gains
+importance with the continuous development of artificial agents such as social robots or"
+54ed052738ca0f4570c74931857b3275fca9993b,Knowledge-Guided Deep Fractal Neural Networks for Human Pose Estimation,"Knowledge-Guided Deep Fractal Neural Networks
+for Human Pose Estimation
+Guanghan Ning, Student Member, IEEE, Zhi Zhang, Student Member, IEEE, and Zhihai He, Fellow, IEEE"
+54bb3a17d536c7b88e56d294464f3d54de2ea9b3,Video surveillance online repository (ViSOR): www.openvisor.org,"Video Surveillance Online Repository (ViSOR)
+www.openvisor.org
+Roberto Vezzani, Rita Cucchiara
+Dipartimento di Ingegneria “Enzo Ferrari”
+University of Modena and Reggio Emilia, Italy"
+544829d3b2e878c8f28fae5aa0c226e65ba6242a,Human Body Segmentation with Multi-limb Error-Correcting Output Codes Detection and Graph Cuts Optimization,"Human Pose Recovery and Behavior Analysis Group
+Human Body Segmentation with
+Multi-limb Error-Correcting Output
+Codes Detection and Graph Cuts
+Optimization
+Daniel Sánchez, Juan Carlos Ortega,
+Miguel Ángel Bautista & Sergio Escalera
+All rights reserved HuBPA©"
+54d78ad2ed30557474fabd1d3a9e5db1c76fbeaa,Deep Person Re-identification for Probabilistic Data Association in Multiple Pedestrian Tracking,"Deep Person Re-identification for Probabilistic Data Association in
+Multiple Pedestrian Tracking
+Brian H. Wang1, Yan Wang2, Kilian Q. Weinberger2, and Mark Campbell1"
+54983972aafc8e149259d913524581357b0f91c3,ReSEED: social event dEtection dataset,"ReSEED: Social Event dEtection Dataset
+Timo Reuter
+Universität Bielefeld, CITEC
+Bielefeld, Germany
+ielefeld.de
+Symeon Papadopoulos
+CERTH-ITI
+Thermi, Greece
+Vasilios Mezaris
+CERTH-ITI
+Thermi, Greece
+Philipp Cimiano
+Universität Bielefeld, CITEC
+Bielefeld, Germany
+ielefeld.de"
+541c68e2c65f6dce6179801c9f92dc7803dc71b5,Unsupervised and Transfer Learning under Uncertainty - From Object Detections to Scene Categorization,"Unsupervised and Transfer Learning under Uncertainty:
+from Object Detections to Scene Categorization
+Gr´egoire Mesnil1,2, Salah Rifai1, Antoine Bordes3,
+Xavier Glorot1, Yoshua Bengio1 and Pascal Vincent1
+LISA, Universit´e de Montr´eal, Qu´ebec, Canada
+LITIS, Universit´e de Rouen, France
+CNRS - Heudiasyc UMR 7253, Universit´e de Technologie de Compi`egne, France
+Keywords:
+Unsupervised Learning, Transfer Learning, Deep Learning, Scene Categorization, Object Detection"
+543f21d81bbea89f901dfcc01f4e332a9af6682d,Unsupervised and Semi-supervised Learning with Categorical Generative Adversarial Networks,"Published as a conference paper at ICLR 2016
+UNSUPERVISED AND SEMI-SUPERVISED LEARNING
+WITH CATEGORICAL GENERATIVE ADVERSARIAL
+NETWORKS
+Jost Tobias Springenberg
+University of Freiburg
+79110 Freiburg, Germany"
+54c5e9cded7da1f9dc695f5397d9d1a5ac5350af,Person Re-identification Based on Color Histogram and Spatial Configuration of Dominant Color Regions,"Person Re-identification Based on Color Histogram and Spatial
+Configuration of Dominant Color Regions
+Kwangchol Jang, Sokmin Han, Insong Kim
+College of Computer Science, KIM IL SUNG University, Pyongyang, D.P.R of Korea
+illumination, pose and viewpoint, camera parameters. Being related"
+54969bcd728b0f2d3285866c86ef0b4797c2a74d,Learning for Video Compression,"IEEE TRANSACTION SUBMISSION
+Learning for Video Compression
+Zhibo Chen, Senior Member, IEEE, Tianyu He, Xin Jin, Feng Wu, Fellow, IEEE"
+5456166e3bfe78a353df988897ec0bd66cee937f,Improved Boosting Performance by Exclusion of Ambiguous Positive Examples,"Improved Boosting Performance by Exclusion
+of Ambiguous Positive Examples
+Miroslav Kobetski, Josephine Sullivan
+Computer Vision and Active Perception, KTH, Stockholm 10800, Sweden
+{kobetski,
+Keywords:
+Boosting, Image Classification, Algorithm Evaluation, Dataset Pruning, VOC2007."
+543c601f8ebc0995040f4b8de4a339fd4c860cbb,Eye localization: a survey,"Eye localization: a survey
+Paola CAMPADELLI, Raffaella LANZAROTTI, Giuseppe LIPORI 1
+Dipartimento di Scienze dell’Informazione.
+Università degli Studi di Milano.
+Via Comelico, 39/41 - 20135 Milano (Italy)"
+5432392d916e730c53962be202c115133e6d7777,Face processing in a case of high functioning autism with developmental prosopagnosia.,"RESEARCH PAPER
+Acta Neurobiol Exp 2018, 78: 114–131
+DOI: 10.21307/ane‑2018‑011
+Face processing in a case of high functioning autism
+with developmental prosopagnosia
+Hanna B. Cygan1,3*, Hanna Okuniewska2, Katarzyna Jednoróg3, Artur Marchewka4,
+Marek Wypych4 and Anna Nowicka3
+Laboratory of Social Psychology, Department of Ergonomics, Central Institute for Labour Protection, National Research Institute,
+Warsaw, Poland, 2 Faculty of Psychology, University of Warsaw, Warsaw, Poland, 3 Laboratory of Psychophysiology, Department of
+Neurophysiology, Nencki Institute of Experimental Biology, Polish Academy of Science, Warsaw, Poland, 4 Laboratory of Brain Imaging,
+Neurobiology Center, Nencki Institute of Experimental Biology, Polish Academy of Science, Warsaw, Poland,
+* Email:
+The ability to “read” the information about facial identity, expressed emotions, and intentions is crucial for non‑verbal social interaction.
+Neuroimaging and clinical studies consequently link face perception with fusiform gyrus (FG) and occipital face area (OFA) activity. Here
+we investigated face processing in an adult, patient PK, diagnosed with both high functioning autism spectrum disorder (ASD) and
+developmental prosopagnosia (DP). Both disorders have a significant impact on face perception and recognition, thus creating a unique
+neurodevelopmental condition. We used eye‑tracking and functional magnetic resonance imaging (fMRI) method. Eye‑tracking and fMRI
+results of PK were compared to results of control subjects. Patient PK showed atypical gaze‑fixation strategy during face perception and
+typical patterns of brain activations in the FG and OFA. However, a significant difference between PK and control subjects was found in
+the left anterior superior temporal sulcus/middle temporal gyrus (aSTS/MTG). In PK the left aSTS/MTG was hypo‑activated in comparison"
+5479da1038a530beb760a38dbb5b08947dfaefbd,Fusing continuous spectral images for face recognition under indoor and outdoor illuminants,"DOI 10.1007/s00138-008-0151-1
+ORIGINAL PAPER
+Fusing continuous spectral images for face recognition
+under indoor and outdoor illuminants
+H. Chang · A. Koschan · B. Abidi · M. Abidi
+Received: 4 December 2007 / Accepted: 14 May 2008 / Published online: 17 June 2008
+© Springer-Verlag 2008
+image fusion approaches,"
+54aacc196ffe49b3450059fccdf7cd3bb6f6f3c3,A joint learning framework for attribute models and object descriptions,"A Joint Learning Framework for Attribute Models and Object Descriptions
+Dhruv Mahajan
+Yahoo! Labs, Bangalore, India
+Sundararajan Sellamanickam
+Vinod Nair"
+5478a70badcf4d6da383d86163f0acc2c28b6bd3,Enhancing pedestrian detection using optical flow for surveillance,"Int. J. Computational Vision and Robotics, Vol. 7, Nos. 1/2, 2017
+Enhancing pedestrian detection using optical flow for
+surveillance
+Redwan A.K. Noaman*,
+Mohd Alauddin Mohd Ali and
+Nasharuddin Zainal
+Department of Electrical, Electronic and Systems Engineering,
+Faculty of Engineering and Built Environment,
+Universiti Kebangsaan Malaysia,
+3600 Bandar Baru Bangi, Selangor, Malaysia
+Email:
+Email:
+Email:
+*Corresponding author"
+5454c5900b6b6a0cf36df65d667129fcbd5262dc,Benchmarking asymmetric 3D-2D face recognition systems,"Benchmarking Asymmetric 3D-2D Face Recognition Systems
+Xi Zhao, Wuming Zhang, Georgios Evangelopoulos, Di Huang, Shishir K. Shah, Yunhong Wang,
+Ioannis A. Kakadiaris and Liming Chen"
+541bccf19086755f8b5f57fd15177dc49e77d675,A few days of a robot's life in the human's world: toward incremental individual recognition,"Computer Science and ArtificialIntelligence LaboratoryTechnical Reportmassachusetts institute of technology, cambridge, ma 02139 usa — www.csail.mit.eduMIT-CSAIL-TR-2007-022April 3, 2007A Few Days of A Robot’s Life in the Human’s World: Toward Incremental Individual RecognitionLijin Aryananda"
+54f0fa07dee7bd270d3bd8da9011ca90df78af59,Comparison of Laser-Based Person Tracking at Feet and Upper-Body Height,"Comparison of Laser-based Person Tracking at
+Feet and Upper-Body Height
+Konrad Schenk, Markus Eisenbach,
+Alexander Kolarow, and Horst-Michael Gross (cid:63)
+Neuroinformatics and Cognitive Robotics
+Ilmenau University of Technologies"
+542289d1acfebb9d79ea7a10c8e1516924e09973,Video Highlight Prediction Using Audience Chat Reactions,"Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 972–978
+Copenhagen, Denmark, September 7–11, 2017. c(cid:13)2017 Association for Computational Linguistics"
+54dae5187de3898d8034719bcaa3e0100ae72d76,Probabilistic Attributed Hashing,"Probabilistic Attributed Hashing
+Mingdong Ou1, Peng Cui1, Jun Wang2, Fei Wang3, Wenwu Zhu1
+Tsinghua National Laboratory for Information Science and Technology
+Department of Computer Science and Technology, Tsinghua University. Beijing, China
+Department of Computer Science and Engineering, University of Connecticut. Storrs, CT. USA.
+Data Science, Alibaba Group, Seattle, WA, USA."
+5458ccf22bdea7197e28b433ef06d5225fb030a7,Video Description Using Bidirectional Recurrent Neural Networks,"Video Description using Bidirectional Recurrent
+Neural Networks
+´Alvaro Peris1, Marc Bola˜nos2,3, Petia Radeva2,3, and Francisco Casacuberta1
+PRHLT Research Center, Universitat Polit`ecnica de Val`encia, Valencia (Spain)
+Universitat de Barcelona, Barcelona (Spain)
+Computer Vision Center, Bellaterra (Spain)"
+546cef6f86fb5a9fd59d40d9df63301c8a9d7d15,PathTrack: Fast Trajectory Annotation with Path Supervision,"PathTrack: Fast Trajectory Annotation with Path Supervision
+Santiago Manen1
+Michael Gygli1
+Dengxin Dai1
+Luc Van Gool1,2
+Computer Vision Laboratory
+ESAT - PSI / IBBT
+{smanenfr, gygli, daid,
+ETH Zurich
+K.U. Leuven"
+54b309443f53ed960f588f64d6aefe53f87504b6,TVD: A Reproducible and Multiply Aligned TV Series Dataset,"TVD: a reproducible and multiply aligned TV series dataset
+Anindya Roy1, Camille Guinaudeau1,2, Herv´e Bredin1, Claude Barras1,2
+Spoken Language Processing Group, CNRS-LIMSI, B.P. 133, Orsay, France.
+Universit´e Paris Sud, Orsay, France.
+{roy, guinaudeau, bredin,"
+541b13515480c0371bb8bb79cf17120645edccc7,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+54756f824befa3f0c2af404db0122f5b5bbf16e0,Computer Vision — Visual Recognition,"Research Statement
+Computer Vision — Visual Recognition
+Alexander C. Berg
+Computational visual recognition concerns identifying what is in an image, video, or other visual data, enabling
+pplications such as measuring location, pose, size, activity, and identity as well as indexing for search by content.
+Recent progress in making economical sensors and improvements in network, storage, and computational power
+make visual recognition practical and relevant in almost all experimental sciences and commercial applications
+such as image search. My work in visual recognition brings together machine learning, insights from psychology
+nd physiology, computer graphics, algorithms, and a great deal of computation.
+While I am best known for my work on general object category detection – creating techniques and building
+systems for some of the best performing approaches to categorizing and localizing objects in images, recognizing
+ction in video, and searching large collections of video and images – my research extends widely across visual
+recognition including:
+• Creating low-level image descriptors – procedures for converting pixel values to features that can be used
+to model appearance for recognition. These include widely used descriptors for category recognition in
+images [4, 2], object detection in images and video [11, 10, 2], and optical flow based descriptors for action
+recognition in video [8].
+• Developing models for recognition – ranging from what is becoming seminal work in recognizing human
+ctions in video [8], to formulating object localization as approximate subgraph isomorphism [2], to models
+for parsing architectural images [3], to a novel approach for face recognition based on high level describable"
+549c719c4429812dff4d02753d2db11dd490b2ae,YouTube-BoundingBoxes: A Large High-Precision Human-Annotated Data Set for Object Detection in Video,"YouTube-BoundingBoxes: A Large High-Precision
+Human-Annotated Data Set for Object Detection in Video
+Esteban Real
+Google Brain
+Jonathon Shlens
+Google Brain
+Stefano Mazzocchi
+Google Research
+Xin Pan
+Google Brain
+Vincent Vanhoucke
+Google Brain"
+548bc4203770450c21133bfb72c58f5fae0fbdf2,Visual-Inertial-Semantic Scene Representation for 3D Object Detection,"Visual-Inertial-Semantic Scene Representation for 3D Object Detection
+Jingming Dong∗
+Xiaohan Fei∗
+Stefano Soatto
+UCLA Vision Lab, University of California, Los Angeles, CA 90095
+{dong, feixh,"
+987dd3dd6079e5fa8a10a1c53b2580fd71e27ede,Concept-Based Video Retrieval By Cees,"Foundations and Trends R(cid:1) in
+Information Retrieval
+Vol. 2, No. 4 (2008) 215–322
+(cid:1) 2009 C. G. M. Snoek and M. Worring
+DOI: 10.1561/1500000014
+Concept-Based Video Retrieval
+By Cees G. M. Snoek and Marcel Worring
+Contents
+Introduction
+How to Retrieve Video Content?
+Human-Driven Labeling
+.3 Machine-Driven Labeling
+Aims, Scope, and Organization
+Detecting Semantic Concepts in Video
+Introduction
+Basic Concept Detection
+Feature Fusion
+Classifier Fusion
+.5 Modeling Relations
+Best of Selection"
+9853136dbd7d5f6a9c57dc66060cab44a86cd662,"Improving the Neural Network Training for Face Recognition using Adaptive Learning Rate , Resilient Back Propagation and Conjugate Gradient Algorithm","International Journal of Computer Applications (0975 – 8887)
+Volume 34– No.2, November 2011
+Improving the Neural Network Training for Face
+Recognition using Adaptive Learning Rate, Resilient
+Back Propagation and Conjugate Gradient Algorithm
+Hamed Azami
+M.Sc. Student
+Department of Electrical
+Engineering, Iran University
+of Science and Technology,
+Tehran, Iran
+Saeid Sanei
+Associate Professor
+Department of Computing,
+Faculty of Engineering and
+Physical Sciences, University
+of Surrey, UK
+Karim Mohammadi
+Professor
+Department of Electrical"
+98f1613889657963b102460e4e970fe421c6ed3c,Accurate and Robust Neural Networks for Security Related Applications Exampled by Face Morphing Attacks,"Accurate and Robust Neural Networks for
+Security Related Applications Exampled by Face
+Morphing Attacks
+Clemens Seibold1, Wojciech Samek1, Anna Hilsmann1 and Peter Eisert1,2
+Fraunhofer HHI, Einsteinufer 37, 10587 Berlin, Germany
+Humboldt University Berlin, Unter den Linden 6, 10099 Berlin, Germany"
+98a6f2145a358cb2e54eddc99dd29911764bce0e,Learning Single-view 3D Reconstruction of Objects and Scenes,"Learning Single-view 3D Reconstruction of Objects and
+Scenes
+Shubham Tulsiani
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2018-93
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2018/EECS-2018-93.html
+July 26, 2018"
+986224bad9684c359db7fac2192b7134b855fbe3,Shopping for emotion,"Shopping for emotion
+Evaluating the usefulness of emotion recognition data from a retail perspective
+Anton Forsberg
+Anton Forsberg
+VT 2017
+Examensarbete f¨or civilingenj¨orer, 30hp
+Supervisor: Lars-Erik Janlert
+Examiner: Anders Broberg
+Civilingenj¨orsprogammet i Interaktion & Design"
+987c9a137d638f3d561c52b6dd0f987734ad5460,Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,"Efficient Dense Modules of Asymmetric Convolution for
+Real-Time Semantic Segmentation
+Shao-Yuan Lo1 Hsueh-Ming Hang1 Sheng-Wei Chan2 Jing-Jhih Lin2
+National Chiao Tung University 2 Industrial Technology Research Institute
+{ShengWeiChan,"
+988d1295ec32ce41d06e7cf928f14a3ee079a11e,Semantic Deep Learning,"Semantic Deep Learning
+Hao Wang
+September 29, 2015"
+98c7a6210ca7bc81d2f7092ab28451f47039e920,UC Merced Proceedings of the Annual Meeting of the Cognitive Science Society Title What is the Ground ?,"UC Merced
+Proceedings of the Annual Meeting of the Cognitive Science
+Society
+Title
+What is the Ground? Continuous Maps for Symbol Grounding
+Permalink
+https://escholarship.org/uc/item/9p5236j4
+Journal
+Proceedings of the Annual Meeting of the Cognitive Science Society, 36(36)
+Authors
+Perera, Ian
+Allen, James
+Publication Date
+014-01-01
+Peer reviewed
+eScholarship.org
+Powered by the California Digital Library
+University of California"
+98c548a4be0d3b62971e75259d7514feab14f884,Deep generative-contrastive networks for facial expression recognition,"Deep generative-contrastive networks for facial expression recognition
+Youngsung Kim†, ByungIn Yoo‡,†, Youngjun Kwak†, Changkyu Choi†, and Junmo Kim‡
+Samsung Advanced Institute of Technology (SAIT), ‡KAIST
+hangkyu"
+98b98a8413f21a48ee6effd52da8c31ece6a910d,Detecting handwritten signatures in scanned documents,"9th Computer Vision Winter Workshop
+Zuzana Kúkelová and Jan Heller (eds.)
+Křtiny, Czech Republic, February 3–5, 2014
+Detecting handwritten signatures in scanned documents
+İlkhan Cüceloğlu1,2, Hasan Oğul1
+Department of Computer Engineering, Başkent University, Ankara, Turkey
+DAS Document Archiving and Management Systems CO., Ankara, Turkey"
+98142e84a3cee08661b31371a2c610183df82c8f,Tight Bounds for the Expected Risk of Linear Classifiers and PAC-Bayes Finite-Sample Guarantees,"Tight Bounds for the Expected Risk of Linear Classifiers and
+PAC-Bayes Finite-Sample Guarantees
+Jean Honorio
+CSAIL, MIT
+Cambridge, MA 02139, USA"
+981449cdd5b820268c0876477419cba50d5d1316,Learning Deep Features for One-Class Classification,"Learning Deep Features for One-Class
+Classification
+Pramuditha Perera, Student Member, IEEE, and Vishal M. Patel, Senior Member , IEEE"
+98960be5ae51d30118f091f7091299a49f2f34bb,Global and Feature Based Gender Classification of Faces: a Comparison of Human Performance and Computational Models,"GLOBAL AND FEATURE BASED GENDER CLASSIFICATION
+OF FACES: A COMPARISON OF HUMAN PERFORMANCE
+AND COMPUTATIONAL MODELS
+SAMARASENA BUCHALAA TIM M.GALEA,B NEIL DAVEYA RAY J.FRANKA
+KERRY FOLEYB
+A Department of Computer Science, University of Hertfordshire, College Lane, Hatfield,
+{S.Buchala, N.Davey, T.Gale,
+AL10 9AB, UK
+B Department of Psychiatry, QEII Hospital, Welwyn Garden City, AL7 4HQ, UK
+Most computational models for gender classification use global information (the full face
+image) giving equal weight to the whole face area irrespective of the importance of the
+internal features. Here, we use a global and feature based representation of face images
+that includes both global and featural information. We use dimensionality reduction
+techniques and a support vector machine classifier and show that this method performs
+etter than either global or feature based representations alone.
+. Introduction
+Most computational models of gender classification use whole face images,
+giving equal weight to all areas of the face, irrespective of the importance of
+internal facial features. In this paper we evaluate the importance of global and
+local information in a series of gender recognition experiments. Global"
+98c5b88db35d7ab2d3cc0a63c7ff1414160d2aa6,Convolutional Neural Network-Based Finger-Vein Recognition Using NIR Image Sensors,"Article
+Convolutional Neural Network-Based Finger-Vein
+Recognition Using NIR Image Sensors
+Hyung Gil Hong, Min Beom Lee and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (H.G.H); (M.B.L.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Academic Editor: Vittorio M. N. Passaro
+Received: 11 May 2017; Accepted: 1 June 2017; Published: 6 June 2017"
+98424c79970a80f30db837db84880a4c02e76f1a,Deepagent: An Algorithm Integration Approach for Person Re-Identification,"DEEPAGENT: AN ALGORITHM INTEGRATION APPROACH FOR PERSON
+RE-IDENTIFICATION
+Fulong Jiao, Bir Bhanu
+Center for Research in Intelligent Systems
+University of California, Riverside, Riverside, CA 92521, USA"
+98f13ab2845cfe8513a0c05427a8b90d9c0c1b69,Pedestrian Attribute Recognition with Part-based CNN and Combined Feature Representations,
+98220d35ae6a3ba745f7dea1434f000ca60c62c0,Multi-object Tracking using Particle Swarm Optimization on Target Interactions,"Multi-object Tracking using Particle Swarm
+Optimization on Target Interactions
+Bogdan Kwolek"
+984d5ed1fa80124117fdd0aa9a5be69f269da268,[insert Cover Letter Here],[Insert cover letter here]
+988d5ad8d114f5f21a73b2ae464dca4277f5725f,Persian Viseme Classification Using Interlaced Derivative Patterns and Support Vector Machine,"Journal of Information Assurance and Security.
+ISSN 1554-1010 Volume 9 (2014) pp. 148-156
+© MIR Labs, www.mirlabs.net/jias/index.html
+Persian Viseme Classification Using Interlaced
+Derivative Patterns and Support Vector Machine
+Mohammad Mahdi Dehshibi1, Jamshid Shanbehzadeh2
+Digital Signal Processing Lab., Pattern Research Center,
+Karaj, Iran
+Department of Computer Engineering, Kharazmi University,
+Tehran, Iran
+is a"
+986be05b286d99d840583578c102af31c56428fd,An Efficient Algorithm for Implementing Traffic Sign Detection on Low Cost Embedded System,"International Journal of Innovative
+Computing, Information and Control
+Volume 14, Number 1, February 2018
+ICIC International c(cid:13)2018 ISSN 1349-4198
+pp. 1–14
+AN EFFICIENT ALGORITHM FOR IMPLEMENTING TRAFFIC SIGN
+DETECTION ON LOW COST EMBEDDED SYSTEM
+Aryuanto Soetedjo and I Komang Somawirata
+Department of Electrical Engineering
+National Institute of Technology
+Jalan Raya Karanglo KM 2 Malang 65153, Indonesia
+Received May 2017; revised September 2017"
+9854145f2f64d52aac23c0301f4bb6657e32e562,An Improved Face Verification Approach Based on Speedup Robust Features and Pairwise Matching,"An Improved Face Verification Approach based on
+Speedup Robust Features and Pairwise Matching
+Eduardo Santiago Moura, Herman Martins Gomes and Jo˜ao Marques de Carvalho
+Center for Electrical Engineering and Informatics (CEEI)
+Federal University of Campina Grande (UFCG)
+Campina Grande, Para´ıba, Brazil
+Email:"
+983534325c649e391fefe87025337187021b9830,Towards Automatic Generation of Question Answer Pairs from Images,"Towards Automatic Generation of Question Answer Pairs from Images
+Issey Masuda Mora, Santiago Pascual de la Puente, Xavier Giro-i-Nieto
+Universitat Politecnica de Catalunya (UPC)
+Barcelona, Catalonia/Spain"
+98127346920bdce9773aba6a2ffc8590b9558a4a,Efficient human action recognition using histograms of motion gradients and VLAD with descriptor shape information,"Noname manuscript No.
+(will be inserted by the editor)
+Ef‌f‌icient Human Action Recognition using
+Histograms of Motion Gradients and
+VLAD with Descriptor Shape Information
+Ionut C. Duta · Jasper R.R. Uijlings ·
+Bogdan Ionescu · Kiyoharu Aizawa ·
+Alexander G. Hauptmann · Nicu Sebe
+Received: date / Accepted: date"
+98582edd6029c94844f5a40d246eaa86f74d8512,Learning Visual Scene Attributes,"Learning Visual Scene Attributes
+Vazheh Moussavi
+A Glance at Attribute-Centric Scene Representations
+Take a look around you. How would you describe your surroundings to best give an idea of what
+everything looks like to someone not there? Maybe you will give a category to the scene, say,
+‘bedroom’. You might try to list some of the objects around you, like ‘bed’, ‘lamp’, and ‘desk’. Or
+perhaps you’ll describe it with adjectives like ‘indoors’, ‘cozy’, and ‘cluttered’. In computer vision,
+(or more specifically, in scene understanding), the most effective way to describe a visual scene is
+lso a major question.
+Of the these three ways of describing a scene, (commonly referred to as categorization, scene pars-
+ing, and attribute-based representation respectively), categories have historically been the method of
+hoice. In categorization, an image (scene) is allowed to fall into exactly one of an arbitrary number
+of buckets. Attribute representations, however, are typically composed of several sets of buckets
+each of which will have a value associated with that scene. For instance, a simple category-based
+model would place an image in one of urban/rural/room, whereas a binary attribute-based model
+would have as attributes indoors and warm, each of which are marked as either present or not. In
+larger models, this leads to high dimensionality for attribute-based models, which has been a large
+disincentive for its use. In addition, classifying a scene’s entire attribute set non-trivially falls un-
+der multi-label learning, for which there exist very few learning algorithms in popular use. Lastly,
+there is scene parsing[5], which involves using object detectors, possibly in conjunction, to build"
+9889596a98824bdf7e7c59b62e732c0b2d356c69,Soft Correspondences in Multimodal Scene Parsing,"Sarah Taghavi Namin, Mohammad Najafi, Mathieu Salzmann, and Lars Petersson"
+98a660c15c821ea6d49a61c5061cd88e26c18c65,Face Databases for 2D and 3D Facial Recognition: A Survey,"IOSR Journal of Engineering (IOSRJEN)
+e-ISSN: 2250-3021, p-ISSN: 2278-8719
+Vol. 3, Issue 4 (April. 2013), ||V1 || PP 43-48
+Face Databases for 2D and 3D Facial Recognition: A Survey
+R.Senthilkumar1, Dr.R.K.Gnanamurthy2
+Assistant Professor, Department of Electronics and Communication Engineering, Institute of Road and
+Professor and Dean , Department of Electronics and Communication Engineering, Odaiyappa College of
+Transport Technology,Erode-638 316.
+Engineering and Technology,Theni-625 531."
+9817e0d11701e9ce0e31a32338ff3ff0969621ed,Dppnet: Approximating Determinantal Point Processes with Deep Networks,"Under review as a conference paper at ICLR 2019
+DPPNET: APPROXIMATING DETERMINANTAL POINT
+PROCESSES WITH DEEP NETWORKS
+Anonymous authors
+Paper under double-blind review"
+98126d18be648640fc3cfeb7ffc640a2ec1d5f6f,Supplemental Material: Discovering Groups of People in Images,"Supplemental Material: Discovering Groups of People in
+Images
+Wongun Choi1, Yu-Wei Chao2, Caroline Pantofaru3 and Silvio Savarese4
+. NEC Laboratories 2. University of Michigan, Ann Arbor
+. Google, Inc
+. Stanford University
+Qualitative Examples
+In Fig. 1 and 2, we show additional qualitative examples obtained using our model
+with poselet [1] and ground truth (GT) detections, respectively. We show the image
+onfiguration of groups on the left and corresponding 3D configuration on the right.
+Different colors and different line types (solid or dashed) represent different groups,
+the type of each structured group is overlayed on the bottom-left of one participant. In
+D visualization, squares represent standing people, circles represent people sitting on
+n object, and triangles represent people sitting on the ground. The view point of each
+individual is shown with a line. The gray triangle is the camera position. The poses are
+obtained by using the individual pose classification output for visualization purposes.
+The figures show that our algorithm is capable of correctly associating individu-
+ls into multiple different groups while estimating the type of each group. Notice that
+our algorithm can successfully segment different instances of the same group type that
+ppear in proximity. A distance-based clustering method would not be able to differ-"
+98a60b218ff8addaf213e97e2f4b54d39e45f5b9,Benchmarking Real World Object Recognition,"Bonn-Aachen International Center for Information Technology
+Master of Science in Autonomous Systems
+Bonn-Rhein-Sieg University of Applied Sciences
+Date: March 4, 2005
+Student: Adolf, Florian-Michael
+Matriculation-No: 9005989
+eMail:
+Supervisor: Prassler, Erwin
+Institution: UAS Bonn-Rhein-Sieg
+eMail:
+Benchmarking Real World Object Recognition
+Summer Term 2005
+Master Thesis Proposal
+Context
+Service robotics basically comprise everything that is not industrial robotics, and reflects
+the distinction between the manufacturing and service sectors of the economy. Hence
+service robots are supposed to operate in our human world as autonomously as possible.
+The perception of objects in video images suitable for everyday use (”real-world”) is one
+of the key disciplines in developing this key technology.
+Recent service robotic projects [16, 13, 19, 20, 2] demand research in machine vision and"
+984ecfbda7249e67eca8d9b1697e81f80e2e483d,Visual object categorization with new keypoint-based adaBoost features,"Visual object categorization with new keypoint-based
+daBoost features
+Taoufik Bdiri, Fabien Moutarde, Bruno Steux
+To cite this version:
+Taoufik Bdiri, Fabien Moutarde, Bruno Steux. Visual object categorization with new keypoint-based
+daBoost features. IEEE Symposium on Intelligent Vehicles (IV’2009), Jun 2009, XiAn, China. 2009.
+<hal-00422580>
+HAL Id: hal-00422580
+https://hal.archives-ouvertes.fr/hal-00422580
+Submitted on 7 Oct 2009
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+98519f3f615e7900578bc064a8fb4e5f429f3689,Dictionary-Based Domain Adaptation Methods for the Re-identification of Faces,"Dictionary-based Domain Adaptation Methods
+for the Re-identification of Faces
+Qiang Qiu, Jie Ni, and Rama Chellappa"
+9825aa96f204c335ec23c2b872855ce0c98f9046,Face and Facial Expression Recognition in 3-d Using Masked Projection under Occlusion,"International Journal of Ethics in Engineering & Management Education
+Website: www.ijeee.in (ISSN: 2348-4748, Volume 1, Issue 5, May2014)
+FACE AND FACIAL EXPRESSION
+RECOGNITION IN 3-D USING MASKED
+PROJECTION UNDER OCCLUSION
+Jyoti patil *
+M.Tech (CSE)
+GNDEC Bidar-585401
+BIDAR, INDIA
+Gouri Patil
+M.Tech (CSE)
+GNDEC Bidar- 585401
+BIDAR, INDIA
+Snehalata Patil
+M.Tech (CSE)
+VKIT, Bangalore- 560040
+BANGALORE, INDIA"
+981847c0a3d667aae385276221834edbb8ebd11c,A generalizable approach for multi-view 3D human pose regression,"A generalizable approach for multi-view 3D human pose regression
+Abdolrahim Kadkhodamohammadia,∗, Nicolas Padoya
+ICube, University of Strasbourg, CNRS, IHU Strasbourg, France"
+982db27f0a092d5c8db88e959a77fae5b4f9cdf6,"A cross-cultural, multimodal, affective corpus for gesture expressivity analysis","J Multimodal User Interfaces
+DOI 10.1007/s12193-012-0112-x
+ORIGINAL PAPER
+A cross-cultural, multimodal, affective corpus for gesture
+expressivity analysis
+G. Caridakis · J. Wagner · A. Raouzaiou ·
+F. Lingenfelser · K. Karpouzis · E. Andre
+Received: 5 March 2012 / Accepted: 15 September 2012
+© OpenInterface Association 2012"
+53819049f41998a5a1587dfccccc2db8612b45af,Deep Semantic Lane Segmentation for Mapless Driving,"018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)
+Madrid, Spain, October 1-5, 2018
+978-1-5386-8093-3/18/$31.00 ©2018 IEEE"
+53079196041fedeb5f1e236b1c76c7108fd8346e,"Multiple Object Detection, Tracking and Long-Term Dynamics Learning in Large 3D Maps","Multiple Object Detection, Tracking
+nd Long-Term Dynamics Learning
+in Large 3D Maps
+Local
+D Maps
+Object
+Posteriors
+Learn
+Dynamics
+Location l1
+Location l3
+Object jump
+probability:
+pjump = 0.036
+Object spatial
+process variance:
+q = 0.137
+Measurement
+ovariance:
+ 0.14 −0.03 0.02"
+5357bdaf7c54619016bdb7ebfa991a65a6cc8353,"Infants’ Temperament and Mothers’, and Fathers’ Depression Predict Infants’ Attention to Objects Paired with Emotional Faces","J Abnorm Child Psychol (2016) 44:975–990
+DOI 10.1007/s10802-015-0085-9
+Infants’ Temperament and Mothers’, and Fathers’ Depression
+Predict Infants’ Attention to Objects Paired with Emotional Faces
+Evin Aktar 1,2 & Dorothy J. Mandell 1 & Wieke de Vente 2 & Mirjana Majdandžić 2 &
+Maartje E. J. Raijmakers 1,3 & Susan M. Bögels 2
+Published online: 8 October 2015
+# The Author(s) 2015. This article is published with open access at Springerlink.com"
+53c5f995e76ead002f1b0a78bfd50de3b1faf593,Enhancing the Symmetry and Proportion of 3D Face Geometry,"Enhancing the symmetry and proportion of 3D
+face geometry
+Qiqi Liao, Xiaogang Jin, Wenting Zeng"
+531b211d4cbe766e0b86c4bb6f24e924494360c5,"SuperDepth: Self-Supervised, Super-Resolved Monocular Depth Estimation","SuperDepth: Self-Supervised, Super-Resolved Monocular Depth Estimation
+Sudeep Pillai, Rares, Ambrus,, Adrien Gaidon
+Toyota Research Institute (TRI)"
+53bb52eb910c3a0ac5dc7f379b1f3f7c29af529d,Pain recognition using spatiotemporal oriented energy of facial muscles,"Pain Recognition using Spatiotemporal Oriented Energy of Facial Muscles
+Ramin Irani, Kamal Nasrollahi, and Thomas B. Moeslund
+Visual Analysis of People (VAP) Laboratory
+Rendsburggade 14, 9000 Aalborg, Denmark
+{ri, kn,"
+53e081f5af505374c3b8491e9c4470fe77fe7934,Unconstrained realtime facial performance capture,"Unconstrained Realtime Facial Performance Capture
+Pei-Lun Hsieh⇤
+⇤ University of Southern California
+Chongyang Ma⇤
+Jihun Yu†
+Hao Li⇤
+Industrial Light & Magic
+Figure 1: Calibration-free realtime facial performance capture on highly occluded subjects using an RGB-D sensor."
+53f8f1ddd83a9e0e0821aaa883fbf7c1f7f5426e,Face Recognition using Principal Component Analysis and Log-Gabor Filters,"Face Recognition using Principal Component
+Analysis and Log-Gabor Filters
+Vytautas Perlibakas
+Image Processing and Analysis Laboratory, Computational Technologies Centre,
+Kaunas University of Technology, Studentu st. 56-305, LT-51424 Kaunas,
+Lithuania"
+53ac22fff7ae3ed08565439ac30656846cac2465,Learning 3D Human Pose from Structure and Motion,"Learning 3D Human Pose from Structure and Motion
+Rishabh Dabral1, Anurag Mundhada1, Uday Kusupati1, Safeer Afaque1, Abhishek
+Sharma2, Arjun Jain1
+{rdabral, safeer, {anuragmundhada,
+Indian Institute of Technology Bombay, 2Gobasco AI Labs
+kusupatiuday,"
+53b35519e09772fb7ec470fdec51c6edb43c4f13,Word Channel Based Multiscale Pedestrian Detection without Image Resizing and Using Only One Classifier,"Word Channel Based Multiscale Pedestrian Detection
+Without Image Resizing and Using Only One Classifier
+Arthur Daniel Costea and Sergiu Nedevschi
+Image Processing and Pattern Recognition Group (http://cv.utcluj.ro)
+Computer Science Department, Technical University of Cluj-Napoca, Romania
+{arthur.costea,
+pedestrian or non-pedestrian based on image features. The
+image features should capture the required information for
+lassification, while allowing fast computation.
+Previous object detection approaches use a fixed size
+sliding window and resize the image [8] or use a fixed size
+image and resize the sliding window [29]. When using
+multiple sliding window scales, individual classifiers are
+trained for different scales. In this paper we propose a
+solution to pedestrian detection that does not require image
+resizing and uses only one classifier for all sliding window
+scales. The proposed approach introduces the use of word
+hannels, inspired from codebook based semantic image
+nnotation techniques for extracting classification features.
+. Related work"
+5357e6e5d5fe06934bfe693d18b9f44bbd98f73b,Landmark Detection for Unconstrained Face Recognition,"Landmark Detection for
+Unconstrained Face Recognition
+Panagiotis B. Perakis (cid:63)
+National and Kapodistrian University of Athens
+Department of Informatics and Telecommunications"
+53f981cb6f1cf19b08255c571d62cc1073fd792b,Deconvolutional networks for point-cloud vehicle detection and tracking in driving scenarios,"Deconvolutional Networks for Point-Cloud Vehicle Detection
+nd Tracking in Driving Scenarios
+V´ıctor Vaquero∗, Ivan del Pino∗, Francesc Moreno-Noguer, Joan Sol`a, Alberto Sanfeliu and Juan Andrade-Cetto"
+538a9230ddc14b8a5d3f5f195aac4ec43e37d16f,Joint Holistic and Partial CNN for Pedestrian Detection,"YUN ZHAO et al.: JOINT HOLISTIC AND PARTIAL CNN FOR PEDESTRIAN DETECTION 1
+Joint Holistic and Partial CNN for Pedestrian
+Detection
+Yun Zhao1
+Zejian Yuan*1
+Hui Zhang2
+Institute of Artificial Intelligence and
+Robotics
+Xi’an Jiaotong University
+Xi’an, China
+Shenzhen Forward Innovation
+Digital Technology Co. Ltd. China"
+53881bb35cb98c788f75fbc8c76198ccbc50edbf,Selective experience replay in reinforcement learning for reidentification,"SELECTIVE EXPERIENCE REPLAY IN REINFORCEMENT LEARNING FOR
+REIDENTIFICATION
+Ninad Thakoor , Bir Bhanu
+Center for Research in Intelligent Systems,
+University of California, Riverside, Riverside, CA 92521, USA"
+53993c7fabf631cbd8a44ab3e42c6bdf784db456,Understanding and Predicting Image Memorability at a Large Scale,"Understanding and Predicting Image Memorability at a Large Scale
+Aditya Khosla
+Akhil S. Raju
+Antonio Torralba
+Aude Oliva"
+537a00082b413b40fbdd02b5584791614f5071d2,Face Recognition Using Principal Component Analysis for Security Based System,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2013): 4.438
+Face Recognition Using Principal Component
+Analysis for Security Based System
+Madhuri M. Ghodake1, Parul S. Arora2
+Savitribai Phule Pune University, G.H.Raisoni College of Engg & Management, Domkhel Road, Wagholi, Pune
+Assistant Professor, G.H.Raisoni College of Engg & Management, Domkhel Road, Wagholi, Pune, Savitribai Phule University, Pune"
+53c36186bf0ffbe2f39165a1824c965c6394fe0d,I Know How You Feel: Emotion Recognition with Facial Landmarks,"I Know How You Feel: Emotion Recognition with Facial Landmarks
+Tooploox 2Polish-Japanese Academy of Information Technology 3Warsaw University of Technology
+Ivona Tautkute1,2, Tomasz Trzcinski1,3 and Adam Bielski1"
+5366573e96a1dadfcd4fd592f83017e378a0e185,"Server, server in the cloud. Who is the fairest in the crowd?","Böhlen, Chandola and Salunkhe
+Server, server in the cloud.
+Who is the fairest in the crowd?"
+53a41c711b40e7fe3dc2b12e0790933d9c99a6e0,Recurrent Memory Addressing for Describing Videos,"Recurrent Memory Addressing for describing videos
+Arnav Kumar Jain∗ Abhinav Agarwalla∗
+Kumar Krishna Agrawal∗
+Pabitra Mitra
+{arnavkj95, abhinavagarawalla, kumarkrishna,
+Indian Institute of Technology Kharagpur"
+53822d61e829ef02a95a6c89fea082114fd3e16b,A General Framework for Tracking Multiple People from a Moving Camera,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+IEEE TRANSACTION ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+A General Framework for Tracking Multiple
+People from a Moving Camera
+Wongun Choi, Caroline Pantofaru, Silvio Savarese"
+53c8f841cbf2c8f09c6ece9d7f164504fe39409b,Deep Clustering for Unsupervised Learning of Visual Features,"Deep Clustering for Unsupervised Learning
+of Visual Features
+Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze
+Facebook AI Research"
+533bfb82c54f261e6a2b7ed7d31a2fd679c56d18,Unconstrained Face Recognition: Identifying a Person of Interest From a Media Collection,"Technical Report MSU-CSE-14-1
+Unconstrained Face Recognition: Identifying a
+Person of Interest from a Media Collection
+Lacey Best-Rowden, Hu Han, Member, IEEE, Charles Otto, Brendan Klare, Member, IEEE, and
+Anil K. Jain, Fellow, IEEE"
+5383473d1a669beb0089f72a9a5075e943f0270f,Higher-order Occurrence Pooling on Mid- and Low-level Features: Visual Concept Detection,
+5367610430dc0380dfbe8344e08537267875968c,Tracking 3D Surfaces Using Multiple Cameras: A Probabilistic Approach,"Tracking 3D Surfaces Using
+Multiple Cameras: A
+Probabilistic Approach
+Thomas Popham
+Thesis
+Submitted to the University of Warwick
+for the degree of
+Doctor of Philosophy
+Department of Computer Science
+August 2010"
+53facd4da5f1d1f98f876211421957f5fbe8a29a,The Mesh-LBP: A Framework for Extracting Local Binary Patterns From Discrete Manifolds,"The Mesh-LBP: A Framework for Extracting Local
+Binary Patterns From Discrete Manifolds
+Naoufel Werghi, Member, IEEE, Stefano Berretti, Member, IEEE, and Alberto del Bimbo, Member, IEEE"
+537061f3601965b5aab9f402763d9dcf451e1cef,A Deep Neural Model Of Emotion Appraisal,"Noname manuscript No.
+(will be inserted by the editor)
+A Deep Neural Model Of Emotion Appraisal
+Pablo Barros · Emilia Barakova · Stefan Wermter
+Received: date / Accepted: date"
+53492cb14b33a26b10c91102daa2d5a2a3ed069d,Improving Online Multiple Object tracking with Deep Metric Learning,"Improving Online Multiple Object tracking with Deep Metric Learning
+Michael Thoreau, Navinda Kottege"
+53bed2d3d75c4320ad5af4a85e31bf92e3c704ef,Reinforced Video Captioning with Entailment Rewards,"Reinforced Video Captioning with Entailment Rewards
+Ramakanth Pasunuru and Mohit Bansal
+UNC Chapel Hill
+{ram,"
+536d1f74c6543afcf2bc711befd82ac7886d1c33,Fusing Shearlets and LBP Feature Sets for Face Recognition,"ISSN 1746-7659, England, UK
+Journal of Information and Computing Science
+Vol. 10, No. 1, 2015, pp. 029-039
+Fusing Shearlets and LBP Feature Sets for Face Recognition
+Zhiyong Zeng 1
+Faculty of Software, Fujian Normal University, Fuzhou, 350108, China
+(Received October 07, 2014, accepted December 24, 2014)"
+538f735450463f40c78f60797899fcee47df72bc,Discriminative Dictionary Learning With Motion Weber Local Descriptor for Violence Detection,"© 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for
+ll other uses, in any current or future media, including reprinting/republishing this material for
+dvertising or promotional purposes, creating new collective works, for resale or redistribution to
+servers or lists, or reuse of any copyrighted component of this work in other works."
+3f55d26dd638c849745b95e912c28d88445ba5e1,Supervised Learning of Universal Sentence Representations from Natural Language Inference Data,"Supervised Learning of Universal Sentence Representations from
+Natural Language Inference Data
+Alexis Conneau
+Facebook AI Research
+Douwe Kiela
+Facebook AI Research
+Holger Schwenk
+Facebook AI Research
+Lo¨ıc Barrault
+LIUM, Universit´e Le Mans
+Antoine Bordes
+Facebook AI Research"
+3fbd68d1268922ee50c92b28bd23ca6669ff87e5,A shape- and texture-based enhanced Fisher classifier for face recognition,"IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. 10, NO. 4, APRIL 2001
+A Shape- and Texture-Based Enhanced Fisher
+Classifier for Face Recognition
+Chengjun Liu, Member, IEEE, and Harry Wechsler, Fellow, IEEE"
+3f22a4383c55ceaafe7d3cfed1b9ef910559d639,Robust Kronecker Component Analysis,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Robust Kronecker Component Analysis
+Mehdi Bahri, Student Member, IEEE, Yannis Panagakis, and Stefanos Zafeiriou, Member, IEEE"
+3f06d445371c252d5a6ba977181987094148d6de,Fast Single Shot Detection and Pose Estimation,"Fast Single Shot Detection and Pose Estimation
+Patrick Poirson1, Phil Ammirato1, Cheng-Yang Fu1, Wei Liu1, Jana Koˇseck´a2, Alexander C. Berg1
+UNC Chapel Hill 2George Mason University
+201 S. Columbia St., Chapel Hill, NC 27599 24400 University Dr, Fairfax, VA 22030"
+3fdcc1e2ebcf236e8bb4a6ce7baf2db817f30001,A Top-Down Approach for a Synthetic Autobiographical Memory System,"A top-down approach for a synthetic
+utobiographical memory system
+Andreas Damianou1,2, Carl Henrik Ek3, Luke Boorman1, Neil D. Lawrence2,
+nd Tony J. Prescott1
+Shef‌f‌ield Centre for Robotics (SCentRo), Univ. of Shef‌f‌ield, Shef‌f‌ield, S10 2TN, UK
+Dept. of Computer Science, Univ. of Shef‌f‌ield, Shef‌f‌ield, S1 4DP, UK
+CVAP Lab, KTH, Stockholm, Sweden"
+3f44352b857f2fc18c18c5ebb2cbf994ee22f44c,Humanist computing for knowledge discovery from ordered datasets,"HumanistComputingforKnowledgeDiscovery
+fromOrderedDatasets
+JonathanMichaelRossiter
+DepartmentofEngineeringMathematics
+UniversityofBristol
+AdissertationsubmittedtotheUniversityofBristol
+inaccordancewiththerequirementsofthedegreeof
+DoctorofPhilosophyintheFacultyofEngineering
+January
+3f9c09e2fbefc9aeba6505f49317f9a2fc03a615,Understanding fundamental design choices in single-ISA heterogeneous multicore architectures,"Understanding Fundamental Design Choices in Single-ISA
+Heterogeneous Multicore Architectures
+KENZO VAN CRAEYNEST and LIEVEN EECKHOUT, Ghent University
+Single-ISA heterogeneous multicore processors have gained substantial interest over the past few years
+ecause of their power efficiency, as they offer the potential for high overall chip throughput within a
+given power budget. Prior work in heterogeneous architectures has mainly focused on how heterogeneity
+an improve overall system throughput. To what extent heterogeneity affects per-program performance
+has remained largely unanswered. In this article, we aim at understanding how heterogeneity affects both
+hip throughput and per-program performance; how heterogeneous architectures compare to homogeneous
+rchitectures under both performance metrics; and how fundamental design choices, such as core type, cache
+size, and off-chip bandwidth, affect performance.
+We use analytical modeling to explore a large space of single-ISA heterogeneous architectures. The ana-
+lytical model has linear-time complexity in the number of core types and programs of interest, and offers a
+unique opportunity for exploring the large space of both homogeneous and heterogeneous multicore proces-
+sors in limited time. Our analysis provides several interesting insights: While it is true that heterogeneity
+an improve system throughput, it fundamentally trades per-program performance for chip throughput;
+lthough some heterogeneous configurations yield better throughput and per-program performance than
+homogeneous designs, some homogeneous configurations are optimal for particular throughput versus per-
+program performance trade-offs. Two core types provide most of the benefits from heterogeneity and a larger
+number of core types does not contribute much; job-to-core mapping is both important and challenging for"
+3f5b20c35f55417823f0201862d85af1f31e9348,Salience Biased Loss for Object Detection in Aerial Images,"Salience Biased Loss for Object Detection
+in Aerial Images
+Peng Sun
+Guerdan Luke
+Guang Chen
+University of Missouri-Columbia
+Yi Shang
+over regular and dense sampling of object scales, locations,
+nd aspect ratios, such as YOLO [8], SSD [11], and RetinaNet
+[18]. Each of these demonstrates promising results with faster
+speed, a simpler network, and similar accuracy of two-stage
+object detectors. RetinaNet [18] even outperforms one of the
+est two-stage detectors, Faster R-CNN [5], with a relative 4.0
+mAP improvement in COCO data [17]."
+3faebe9d5c47fc90998811c4ac768706283d605c,Semi-Supervised Detection of Extreme Weather Events in Large Climate Datasets,"Under review as a conference paper at ICLR 2017
+SEMI-SUPERVISED DETECTION OF EXTREME WEATHER
+EVENTS IN LARGE CLIMATE DATASETS
+Evan Racah1, Christopher Beckham2, Tegan Maharaj2
+Prabhat1, Christopher Pal2
+Lawrence Berkeley National Lab, Berkeley, CA,
+´Ecole Polytechnique de Montr´eal,"
+3f0f3c2bc151ef91959b06442b9ad80d405387a5,Evidential combination of pedestrian detectors,"XU ET AL.: EVIDENTIAL COMBINATION OF PEDESTRIAN DETECTORS
+Evidential combination of pedestrian
+detectors
+Philippe Xu1
+https://www.hds.utc.fr/~xuphilip
+Franck Davoine12
+Thierry Denœux1
+https://www.hds.utc.fr/~tdenoeux
+UMR CNRS 7253, Heudiasyc,
+Université de Technologie de
+Compiègne, France
+CNRS, LIAMA,
+Beijing, P. R. China"
+3f848d6424f3d666a1b6dd405a48a35a797dd147,Is 2D Information Enough For Viewpoint Estimation?,"GHODRATI et al.: IS 2D INFORMATION ENOUGH FOR VIEWPOINT ESTIMATION?
+Is 2D Information Enough For Viewpoint
+Estimation?
+Amir Ghodrati
+Marco Pedersoli
+Tinne Tuytelaars
+KU Leuven, ESAT - PSI, iMinds
+Leuven, Belgium"
+3f6a6050609ba205ec94b8af186a9dca60a8f65e,Harmonizing Maximum Likelihood with Gans,"Under review as a conference paper at ICLR 2019
+HARMONIZING MAXIMUM LIKELIHOOD WITH GANS
+FOR MULTIMODAL CONDITIONAL GENERATION
+Anonymous authors
+Paper under double-blind review"
+3f10b9d98a276fb9e21e5742ce88bc7f48629715,Imparare a Quantificare Guardando (Learning to Quantify by Watching),"Imparare a quantificare guardando
+Sandro Pezzelle
+CIMeC
+Ionut Sorodoc
+Aurelie Herbelot
+CIMeC
+EM LCT
+Universit`a degli Studi di Trento
+Raffaella Bernardi
+CIMeC, DISI"
+3fa738ab3c79eacdbfafa4c9950ef74f115a3d84,DaMN - Discriminative and Mutually Nearest: Exploiting Pairwise Category Proximity for Video Action Recognition,"DaMN – Discriminative and Mutually Nearest:
+Exploiting Pairwise Category Proximity
+for Video Action Recognition
+Rui Hou1, Amir Roshan Zamir1, Rahul Sukthankar2, and Mubarak Shah1
+Center for Research in Computer Vision at UCF, Orlando, USA
+Google Research, Mountain View, USA
+http://crcv.ucf.edu/projects/DaMN/"
+3f8e481ea845aa20704d8c93f6a3a72025219f64,Data mapping by probabilistic modular networks and information-theoretic criteria,"IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. 46, NO. 12, DECEMBER 1998
+Data Mapping by Probabilistic Modular
+Networks and Information-Theoretic Criteria
+Yue Wang, Shang-Hung Lin, Huai Li, and Sun-Yuan Kung, Fellow, IEEE"
+3f2270762ff68d6771d93d800683ae6bc76855e7,3D Human Motion Tracking and Pose Estimation using Probabilistic Activity Models,"MANCHESTER METROPOLITAN UNIVERSITY
+D Human Motion Tracking and
+Pose Estimation using
+Probabilistic Activity Models
+John Darby
+A thesis submitted in partial fulfillment for the
+degree of Doctor of Philosophy
+Faculty of Science and Engineering
+The Department of Computing and Mathematics
+October 2010"
+3fb98e76ffd8ba79e1c22eda4d640da0c037e98a,Convolutional Neural Networks for Crop Yield Prediction using Satellite Images,"Convolutional Neural Networks for Crop Yield Prediction using Satellite Images
+H. Russello"
+3fa9bf4649ff5e0d63ee20a546e8814f3a93ca4d,Digital Image Technique using Gabor Filter and SVM in Heterogeneous Face Recognition,"Research Inventy: International Journal of Engineering And Science
+Vol.4, Issue 4 (April 2014), PP 45-52
+Issn (e): 2278-4721, Issn (p):2319-6483, www.researchinventy.com
+Digital Image Technique using Gabor Filter and SVM in
+Heterogeneous Face Recognition
+M.Janani#1, K.Nandhini*2, K.Senthilvadivel*3,S.Jothilakshmi*4,
+PG Student#1,*2*3, Assistant Professor*4,, Dept of CSE#1,*2,*3,*4
+S.V.S College of Engineering#1,*4,, PPG Institute of Technology*2,*3,
+Coimbatore, Tamilnadu"
+3f600008dd9745e8357f5b7b3c1a69b8be6b7767,Atypical reflexive gaze patterns on emotional faces in autism spectrum disorders.,"The Journal of Neuroscience, September 15, 2010 • 30(37):12281–12287 • 12281
+Behavioral/Systems/Cognitive
+Atypical Reflexive Gaze Patterns on Emotional Faces in
+Autism Spectrum Disorders
+Dorit Kliemann,1,2,3 Isabel Dziobek,2 Alexander Hatri,1,2 Rosa Steimke,2,4 and Hauke R. Heekeren1,2,3
+Department of Educational Science and Psychology, and 2Cluster of Excellence, “Languages of Emotion,” Freie Universita¨t Berlin, 14195 Berlin, Germany,
+nd 3Max Planck Institute for Human Development, 14195 Berlin, Germany, and 4Department of Psychiatry and Psychotherapy, Charité University
+Medicine, 10117 Berlin, Germany
+Atypical scan paths on emotional faces and reduced eye contact represent a prominent feature of autism symptomatology, yet the reason
+for these abnormalities remains a puzzle. Do individuals with autism spectrum disorders (ASDs) fail to orient toward the eyes or do they
+ctively avoid direct eye contact? Here, we used a new task to investigate reflexive eye movements on fearful, happy, and neutral faces.
+Participants (ASDs: 12; controls: 11) initially fixated either on the eyes or on the mouth. By analyzing the frequency of participants’ eye
+movements away from the eyes and toward the eyes, respectively, we explored both avoidance and orientation reactions. The ASD group
+showed a reduced preference for the eyes relative to the control group, primarily characterized by more frequent eye movements away
+from the eyes. Eye-tracking data revealed a pronounced influence of active avoidance of direct eye contact on atypical gaze in ASDs. The
+ombination of avoidance and reduced orientation into an individual index predicted emotional recognition performance. Crucially, this
+result provides evidence for a direct link between individual gaze patterns and associated social symptomatology. These findings thereby
+give important insights into the social pathology of ASD, with implications for future research and interventions.
+Introduction
+Recent reports from the social-cognitive neurosciences have em-"
+3f60b1f800178841f4e0ecb79b64fe60b48ed03b,Video Scene Parsing with Predictive Feature Learning,"Video Scene Parsing with Predictive Feature Learning
+Xiaojie Jin1 Xin Li2 Huaxin Xiao2 Xiaohui Shen3 Zhe Lin3 Jimei Yang3
+Yunpeng Chen2 Jian Dong4 Luoqi Liu4 Zequn Jie2 Jiashi Feng2 Shuicheng Yan4,2
+NUS Graduate School for Integrative Science and Engineering, NUS
+360 AI Institute
+Department of ECE, NUS
+Adobe Research"
+3f9210830e31f42103c6550f75cb37fde18e5af1,HeadFusion: 360° Head Pose Tracking Combining 3D Morphable Model and 3D Reconstruction,"PAMI SPECIAL ISSUE
+HeadFusion: 360◦Head Pose tracking combining
+D Morphable Model and 3D Reconstruction
+Yu Yu, Kenneth Alberto Funes Mora, Jean-Marc Odobez"
+3f14b504c2b37a0e8119fbda0eff52efb2eb2461,Joint Facial Action Unit Detection and Feature Fusion: A Multi-Conditional Learning Approach,"Joint Facial Action Unit Detection and Feature
+Fusion: A Multi-Conditional Learning Approach
+Stefanos Eleftheriadis, Ognjen Rudovic, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+3fac7c60136a67b320fc1c132fde45205cd2ac66,Remarks on Computational Facial Expression Recognition from HOG Features Using Quaternion Multi-layer Neural Network,"Remarks on Computational Facial Expression
+Recognition from HOG Features Using
+Quaternion Multi-layer Neural Network
+Kazuhiko Takahashi1, Sae Takahashi1, Yunduan Cui2,
+nd Masafumi Hashimoto3
+Information Systems Design, Doshisha University, Kyoto, Japan
+Graduate School of Doshisha University, Kyoto, Japan
+Intelligent Information Engineering and Science, Doshisha University, Kyoto, Japan"
+3f0e00188d751829c4548f9aacb939b982425ebd,Template Protection For 3D Face Recognition,"Template Protection For 3D Face Recognition
+Template Protection For 3D Face Recognition
+Xuebing Zhou, Arjan Kuijper and Christoph Busch
+Fraunhofer Institute for Computer Graphics Research IGD
+Germany"
+3f9a7d690db82cf5c3940fbb06b827ced59ec01e,VIP: Finding important people in images,"VIP: Finding Important People in Images
+Clint Solomon Mathialagan
+Virginia Tech
+Andrew C. Gallagher
+Google Inc.
+Dhruv Batra
+Virginia Tech
+Project: https://computing.ece.vt.edu/~mclint/vip/
+Demo: http://cloudcv.org/vip/"
+3fd90098551bf88c7509521adf1c0ba9b5dfeb57,Attribute-Based Classification for Zero-Shot Visual Object Categorization,"Page 1 of 21
+*****For Peer Review Only*****
+Attribute-Based Classification for Zero-Shot
+Visual Object Categorization
+Christoph H. Lampert, Hannes Nickisch and Stefan Harmeling"
+3f5158ea65bb483c6797462faffa16fea9f0b004,"Lie-X: Depth Image Based Articulated Object Pose Estimation, Tracking, and Action Recognition on Lie Groups","Lie-X : Depth Image Based Articulated Object Pose Estimation,
+Tracking, and Action Recognition on Lie Groups
+Chi Xu1, Lakshmi Narasimhan Govindarajan1, Yu Zhang1, and Li Cheng∗1
+Bioinformatics Institute, A*STAR, Singapore"
+3faff93758fe7fc58b3832055cb15c6ca3f306a7,Evaluation of multi feature fusion at score-level for appearance-based person re-identification,"Evaluation of Multi Feature Fusion at Score-Level
+for Appearance-based Person Re-Identification
+Markus Eisenbach
+Ilmenau University of Technology
+98684 Ilmenau, Germany
+Alexander Kolarow
+Alexander Vorndran
+Julia Niebling
+Horst-Michael Gross
+Ilmenau University of Technology
+Ilmenau University of Technology
+98684 Ilmenau, Germany
+98684 Ilmenau, Germany"
+3f7723ab51417b85aa909e739fc4c43c64bf3e84,Improved Performance in Facial Expression Recognition Using 32 Geometric Features,"Improved Performance in Facial Expression
+Recognition Using 32 Geometric Features
+Giuseppe Palestra1(B), Adriana Pettinicchio2, Marco Del Coco2,
+Pierluigi Carcagn`ı2, Marco Leo2, and Cosimo Distante2
+Department of Computer Science, University of Bari, Bari, Italy
+National Institute of Optics, National Research Council, Arnesano, LE, Italy"
+3fb689c0f1db224d53d9fdaee578d3ff8522f807,"Integrating Motion, Illumination, and Structure in Video Sequences with Applications in Illumination-Invariant Tracking","Integrating Motion, Illumination, and Structure
+in Video Sequences with Applications in
+Illumination-Invariant Tracking
+Yilei Xu, Student Member, IEEE, and Amit K. Roy-Chowdhury, Member, IEEE"
+3f63f9aaec8ba1fa801d131e3680900680f14139,Facial Expression recognition using Local Binary Patterns and Kullback Leibler divergence,"Facial Expression Recognition using Local Binary
+Patterns and Kullback Leibler Divergence
+AnushaVupputuri, SukadevMeher
+divergence."
+3f0e0739677eb53a9d16feafc2d9a881b9677b63,Efficient Two-Stream Motion and Appearance 3D CNNs for Video Classification,"Efficient Two-Stream Motion and Appearance 3D CNNs for
+Video Classification
+Ali Diba
+ESAT-KU Leuven
+Ali Pazandeh
+Sharif UTech
+Luc Van Gool
+ESAT-KU Leuven, ETH Zurich"
+302fee58f8c9498e8a5e543312e7c11baf7e0827,Robust voting algorithm based on labels of behavior for video copy detection,"Robust Voting Algorithm Based on Labels of Behavior
+for Video Copy Detection
+Julien Law-To, Olivier Buisson
+Valerie Gouet-Brunet, Nozha Boujemaa
+INRIA Institut National
+de la Recherche et de l’Informatique
+Rocquencourt, France
+Institut National de l’Audiovisuel
+Bry Sur Marne, France
+(jlawto,obuisson)"
+30b15cdb72760f20f80e04157b57be9029d8a1ab,Face Aging with Identity-Preserved Conditional Generative Adversarial Networks,"Face Aging with Identity-Preserved
+Conditional Generative Adversarial Networks
+Zongwei Wang
+Shanghaitech University
+Xu Tang
+Baidu
+Weixin Luo, Shenghua Gao∗
+Shanghaitech University
+{luowx,"
+30c8a2b6a505645b9f93dcc4d365eee6f46c4c37,Using Curvilinear Features in Focus for Registering a Single Image to a 3D Object,"Using Curvilinear Features in Focus for Registering
+Single Image to a 3D Object
+Hatem A. Rashwan, Sylvie Chambon, Pierre Gurdjos, G´eraldine Morin and Vincent Charvillat"
+30870ef75aa57e41f54310283c0057451c8c822b,Overcoming catastrophic forgetting with hard attention to the task,"Overcoming Catastrophic Forgetting with Hard Attention to the Task
+Joan Serr`a 1 D´ıdac Sur´ıs 1 2 Marius Miron 1 3 Alexandros Karatzoglou 1"
+305346d01298edeb5c6dc8b55679e8f60ba97efb,Fine-Grained Face Annotation Using Deep Multi-Task CNN,"Article
+Fine-Grained Face Annotation Using Deep
+Multi-Task CNN
+Luigi Celona *
+, Simone Bianco
+nd Raimondo Schettini
+Department of Informatics, Systems and Communication, University of Milano-Bicocca,
+viale Sarca, 336 Milano, Italy; (S.B.); (R.S.)
+* Correspondence:
+Received: 3 July 2018; Accepted: 13 August 2018; Published: 14 August 2018"
+306ae56a4fc8f090e58a237749950e1607382ed7,Spatio-Temporal Matching for Human Pose Estimation in Video,"Spatio-temporal Matching for
+Human Pose Estimation in Video
+Feng Zhou and Fernando De la Torre"
+30ccfd2b4b6d5b30581356ccefcf96fd77c1766a,Overview of the ImageCLEF 2014 Scalable Concept Image Annotation Task,"Overview of the ImageCLEF 2016 Scalable
+Concept Image Annotation Task
+Andrew Gilbert, Luca Piras, Josiah Wang, Fei Yan, Arnau Ramisa, Emmanuel
+Dellandrea, Robert Gaizauskas, Mauricio Villegas and Krystian Mikolajczyk"
+30aac3becead355545b5ab7f0c3158040360021e,ACD: Action Concept Discovery from Image-Sentence Corpora,"ACD: Action Concept Discovery from
+Image-Sentence Corpora
+Jiyang Gao
+Univ. of Southern California
+Chen Sun
+Univ. of Southern California
+Ram Nevatia
+Univ. of Southern California"
+30962cf6f47396df88bf1c8827ebda8f0a6ff516,A Convolutional Neural Network Approach for Assisting Avalanche Search and Rescue Operations with UAV Imagery,"Article
+A Convolutional Neural Network Approach for
+Assisting Avalanche Search and Rescue Operations
+with UAV Imagery
+Mesay Belete Bejiga 1, Abdallah Zeggada 1, Abdelhamid Nouffidj 2 and Farid Melgani 1,*
+Department of Information Engineering and Computer Science University of Trento, 38123 Trento, Italy;
+(M.B.B.); (A.Z.)
+Département des Télécommunications, Faculté d’Electronique et d’Informatique, USTHB BP 32, El-Alia,
+Bab-Ezzouar, 16111 Algiers, Algeria;
+* Correspondence: Tel.: +39-046-128-1573
+Academic Editors: Francesco Nex, Xiaofeng Li and Prasad S. Thenkabail
+Received: 11 November 2016; Accepted: 14 January 2017; Published: 24 January 2017"
+309e17e6223e13b1f76b5b0eaa123b96ef22f51b,Face recognition based on a 3D morphable model,"Face Recognition based on a 3D Morphable Model
+Volker Blanz
+University of Siegen
+H¤olderlinstr. 3
+57068 Siegen, Germany"
+30256c10cb7ec139b4245855850998c39b297975,Functional magnetic resonance imaging of autism spectrum disorders,"C l i n i c a l r e s e a r c h
+Functional magnetic resonance imaging of
+utism spectrum disorders
+Gabriel S. Dichter, PhD
+Introduction
+utism was first described by Leo Kanner1 and
+Hans Asperger2 in a series of clinical case studies. Both
+linicians suggested that the conditions now referred to
+s autism spectrum disorders (ASDs) may have a neu-
+robiological basis. With the relatively recent advent of
+modern brain imaging techniques, translational psychi-
+tric research has embraced the systematic study of
+This review presents an overview of functional magnetic resonance imaging findings in autism spectrum disorders
+(ASDs). Although there is considerable heterogeneity with respect to results across studies, common themes have
+emerged, including: (i) hypoactivation in nodes of the “social brain” during social processing tasks, including regions
+within the prefrontal cortex, the posterior superior temporal sulcus, the amygdala, and the fusiform gyrus; (ii) aber-
+rant frontostriatal activation during cognitive control tasks relevant to restricted and repetitive behaviors and inter-
+ests, including regions within the dorsal prefrontal cortex and the basal ganglia; (iii) differential lateralization and
+ctivation of language processing and production regions during communication tasks; (iv) anomalous mesolimbic
+responses to social and nonsocial rewards; (v) task-based long-range functional hypoconnectivity and short-range"
+3046baea53360a8c5653f09f0a31581da384202e,Deformable Face Alignment via Local Measurements and Global Constraints,"Deformable Face Alignment via Local
+Measurements and Global Constraints
+Jason M. Saragih"
+30aff559ad25dd3490712749793547bc89b0f103,Image Latent Semantic Analysis for Face Recognition,"Image Latent Semantic Analysis for Face Recognition
+Jucheng Yang 1,2,3 , Yanbin Jiao2, Jinfeng Yang4,Zhijun Fang2 , Congcong Xiong1,
+Lei Shu2
+College of Computer Science and Information Engineering, Tianjin University of Science
+nd Technology, Tianjin, China.
+School of Information Technology, Jiangxi University of Finance and Economics,
+Nanchang, China. {ybjiao, zjfang, lshu
+Ahead Software Company Limited, Nanchang, 330041, China
+Tianjin Key Lab for Advanced Signal Processing, Civil Aviation University of China,
+Tianjin, China"
+3028690d00bd95f20842d4aec84dc96de1db6e59,Leveraging Union of Subspace Structure to Improve Constrained Clustering,"Leveraging Union of Subspace Structure to Improve Constrained Clustering
+John Lipor 1 Laura Balzano 1"
+308647f22e3f1c80b7416b3c53fd56f9abfa904f,Robust Real-Time Tracking with Diverse Ensembles and Random Projections,"Robust Real-Time Tracking with Diverse Ensembles and Random Projections
+Center for Informatics Science,
+Center for Informatics Science,
+Sara Maher
+Nile University
+Giza, Egypt
+Mohamed El Helw
+Center for Informatics Science,
+Nile University
+Giza, Egypt
+Ahmed Salaheldin
+Nile University
+Giza, Egypt"
+30f7609d111bb3bc006e3dd38678291528aa14d3,A new approach for extracting and summarizing abnormal activities in surveillance videos,"014 IEEE International
+Conference on Multimedia and
+Expo Workshops
+(ICMEW 2014)
+Chengdu, China
+4-18 July 2014
+Pages 516-1030
+IEEE Catalog Number:
+ISBN:
+CFP14IEW-POD
+978-1-4799-4716-4"
+30c96cc041bafa4f480b7b1eb5c45999701fe066,Discrete Cosine Transform Locality-Sensitive Hashes for Face Retrieval,"Discrete Cosine Transform Locality-Sensitive
+Hashes for Face Retrieval
+Mehran Kafai, Member, IEEE, Kave Eshghi, and Bir Bhanu, Fellow, IEEE"
+300fb25626bebfc84cf2f6458784b5cdf5c3ffc2,Cross-Dataset Adaptation for Visual Question Answering,"Cross-Dataset Adaptation for Visual Question Answering
+Wei-Lun Chao∗
+Hexiang Hu∗
+Fei Sha
+U. of Southern California
+U. of Southern California
+U. of Southern California
+Los Angeles, CA
+Los Angeles, CA
+Los Angeles, CA"
+30eed14dfdee78279536e680871bed4f128d5f46,A Study of Calorie Estimation in Pictures of Food,
+306957285fea4ce11a14641c3497d01b46095989,Face Recognition Under Varying Lighting Based on Derivates of Log Image,"FACE RECOGNITION UNDER VARYING LIGHTING BASED ON
+DERIVATES OF LOG IMAGE
+Laiyun Qing1,2, Shiguang Shan2, Wen Gao1,2
+ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing 100080, China
+Graduate School, CAS, Beijing, 100039, China"
+309e5ae1554d2afc3b94eaea66b8f31ba85c434a,"Bian, Xiao. Sparse and Low-rank Modeling on High Dimensional Data: a Geometric Perspective. (under the Direction of Dr. Hamid Krim.) Sparse and Low-rank Modeling on High Dimensional Data: a Geometric Perspective",
+30f113d985d876a3974838b2ead49a069b474e57,Guided Upsampling Network for Real-Time Semantic Segmentation,"MAZZINI: GUN FOR REAL-TIME SEMANTIC SEGMENTATION
+Guided Upsampling Network for Real-Time
+Semantic Segmentation
+Davide Mazzini
+Department of Informatics, Systems
+nd Communication
+University of Milano-Bicocca
+viale Sarca 336 Milano, Italy"
+3005a4afddab849d9070788ac0e4e95e0fff2216,"Transfer Metric Learning: Algorithms, Applications and Outlooks","JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, XXXX XXXX
+Transfer Metric Learning: Algorithms,
+Applications and Outlooks
+Yong Luo, Yonggang Wen, Senior Member, IEEE, Ling-Yu Duan, Member, IEEE,
+nd Dacheng Tao, Fellow, IEEE"
+307a810d1bf6f747b1bd697a8a642afbd649613d,An affordable contactless security system access for restricted area,"An affordable contactless security system access
+for restricted area
+Pierre Bonazza1, Johel Mitéran1, Barthélémy Heyrman1, Dominique Ginhac1,
+Vincent Thivent2, Julien Dubois1
+Laboratory Le2i
+University Bourgogne Franche-Comté, France
+Odalid compagny, France
+Contact
+Keywords – Smart Camera, Real-time Image Processing, Biometrics, Face Detection, Face Verifica-
+tion, EigenFaces, Support Vector Machine,
+We present in this paper a security system based on
+identity verification process and a low-cost smart cam-
+era, intended to avoid unauthorized access to restricted
+rea. The Le2i laboratory has a longstanding experi-
+ence in smart cameras implementation and design [1],
+for example in the case of real-time classical face de-
+tection [2] or human fall detection [3].
+The principle of the system, fully thought and designed
+in our laboratory, is as follows: the allowed user pre-
+sents a RFID card to the reader based on Odalid system"
+301474a50a39b24917ad79bd2493f1168c4c1227,Eigen-disfigurement model for simulating plausible facial disfigurement after reconstructive surgery,"Lee et al. BMC Medical Imaging (2015) 15:12
+DOI 10.1186/s12880-015-0050-7
+R ES EAR CH A R T I C LE
+Open Access
+Eigen-disfigurement model for simulating plausible
+facial disfigurement after reconstructive surgery
+Juhun Lee1,2, Michelle C Fingeret2,3, Alan C Bovik1, Gregory P Reece2, Roman J Skoracki2,
+Matthew M Hanasono2 and Mia K Markey4,5*"
+30b32f4a6341b5809428df1271bdb707f2418362,A Sequential Neural Encoder With Latent Structured Description for Modeling Sentences,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+A Sequential Neural Encoder with Latent Structured
+Description for Modeling Sentences
+Yu-Ping Ruan, Qian Chen, and Zhen-Hua Ling, Member, IEEE"
+30a059872d0fff3442504c24880c93738036e6aa,Calcul Neuronal Distribué Pour La Perception Visuelle Du Mouvement Th`ese,"UFRmath´ematiquesetinformatique´EcoledoctoraleIAEMLorraineD´epartementdeformationdoctoraleeninformatiqueCalculneuronaldistribu´epourlaperceptionvisuelledumouvementTH`ESEpr´esent´eeetsoutenuepubliquementle14Octobre2011pourl’obtentionduDoctoratdel’universit´eNancy2(sp´ecialit´einformatique)parMauricioDavidCerdaVillablancaCompositiondujuryPr´esident:Lepr´esidentRapporteurs:MathiasQUOYProfesseur,Universit´edeCergy-Pontoise,FranceAdrianPALACIOSProfesseur,UniversidaddeValparaiso,ChiliExaminateurs:HeikoNEUMANNProfesseur,UniversityofUlm,AllemagneAnneBOYERProfesseur,Universit´eNancy2,FranceRachidDERICHEDirecteurdeRecherche,INRIA,Sophia-Antipolis,FranceBernardGIRAU(directeur)Professeur,Universit´eHenriPoincar´e,Nancy1LaboratoireLorraindeRechercheenInformatiqueetsesApplications—UMR7503"
+300eb15b819ecc9668be26735e5038efc4e05281,Object-based Place Recognition for Mobile Robots Using Panoramas,"Object-based Place Recognition for
+Mobile Robots Using Panoramas
+Arturo RIBES a,1, Arnau RAMISA a and Ramon LOPEZ DE MANTARAS a and
+Ricardo TOLEDO b
+Artificial Intelligence Research Institute (IIIA-CSIC), Campus UAB, 08193 Bellaterra,
+Computer Vision Center (CVC), Campus UAB, 08193 Bellaterra, Spain
+Spain"
+30bb582c2c09abc7eb9dda7d9f80804eeb89f9d7,Research Problems and Opportunities in Memory Systems,"ResearchProblemsandOpportunitiesinMemorySystemsOnurMutlu1,LavanyaSubramanian1c(cid:13)TheAuthors2014.ThispaperispublishedwithopenaccessatSuperFri.orgThememorysystemisafundamentalperformanceandenergybottleneckinalmostallcom-putingsystems.Recentsystemdesign,application,andtechnologytrendsthatrequiremoreca-pacity,bandwidth,ef‌f‌iciency,andpredictabilityoutofthememorysystemmakeitanevenmoreimportantsystembottleneck.Atthesametime,DRAMtechnologyisexperiencingdif‌f‌iculttech-nologyscalingchallengesthatmakethemaintenanceandenhancementofitscapacity,energy-ef‌f‌iciency,andreliabilitysignificantlymorecostlywithconventionaltechniques.Inthisarticle,afterdescribingthedemandsandchallengesfacedbythememorysystem,weexaminesomepromisingresearchanddesigndirectionstoovercomechallengesposedbymemoryscaling.Specifically,wedescribethreemajornewresearchchallengesandsolutiondirections:1)enablingnewDRAMarchitectures,functions,interfaces,andbetterintegrationoftheDRAMandtherestofthesystem(anapproachwecallsystem-DRAMco-design),2)designingamemorysystemthatemploysemergingnon-volatilememorytechnologiesandtakesadvantageofmultipledifferenttechnologies(i.e.,hybridmemorysystems),3)providingpredictableperformanceandQoStoapplicationssharingthememorysystem(i.e.,QoS-awarememorysystems).WealsobrieflydescribeourongoingrelatedworkincombatingscalingchallengesofNANDflashmemory.Keywords:memorysystems,scaling,DRAM,flash,non-volatilememory,QoS,reliability.IntroductionMainmemoryisacriticalcomponentofallcomputingsystems,employedinserver,em-bedded,desktop,mobileandsensorenvironments.Memorycapacity,energy,cost,performance,andmanagementalgorithmsmustscaleaswescalethesizeofthecomputingsysteminordertomaintainperformancegrowthandenablenewapplications.Unfortunately,suchscalinghasbe-comedif‌f‌icultbecauserecenttrendsinsystems,applications,andtechnologygreatlyexacerbatethememorysystembottleneck.1.MemorySystemTrendsInparticular,onthesystems/architecturefront,energyandpowerconsumptionhavebecomekeydesignlimitersasthememorysystemcontinuestoberesponsibleforasignificantfractionofoverallsystemenergy/power[112].Moreandincreasinglyheterogeneousprocessingcoresandagents/clientsaresharingthememorysystem[11,36,39,60,78,79,178,181],leadingtoincreasingdemandformemorycapacityandbandwidthalongwitharelativelynewdemandforpredictableperformanceandqualityofservice(QoS)fromthememorysystem[129,137,176].Ontheapplicationsfront,importantapplicationsareusuallyverydataintensiveandarebecomingincreasinglyso[17],requiringbothreal-timeandof‌f‌linemanipulationofgreatamountsofdata.Forexample,next-generationgenomesequencingtechnologiesproducemassiveamountsofsequencedatathatoverwhelmsmemorystorageandbandwidthrequirementsoftoday’shigh-enddesktopandlaptopsystems[9,111,186,196,197]yetresearchershavethegoalofenablinglow-costpersonalizedmedicine,whichrequiresevenlargeramountsofdataandtheireffectiveanalyses.Creationofnewkillerapplicationsandusagemodelsforcomputerslikelydependsonhowwellthememorysystemcansupporttheef‌f‌icientstorageandmanipulationofdatainsuch1CarnegieMellonUniversityDOI:10.14529/jsfi1403022014,Vol.1,No.319"
+302c2293e36e0704ccfe9af759a8505df588eb07,Face recognition with Multilevel B-Splines and Support Vector Machines,"Face Recognition with Multilevel B-Splines and Support
+Vector Machines
+Manuele Bicego
+Dipartimento di Informatica
+University of Verona
+Strada Le Grazie 15
+7134 Verona - Italia
+Gianluca Iacono
+Dipartimento di Informatica
+University of Verona
+Strada Le Grazie 15
+7134 Verona - Italia
+Vittorio Murino
+Dipartimento di Informatica
+University of Verona
+Strada Le Grazie 15
+7134 Verona - Italia"
+30f84c48bdf2f6152075dd9651a761a84b2f2166,"No fear, no panic: probing negation as a means for emotion regulation.","doi:10.1093/scan/nss043
+SCAN (2013) 8, 654 ^661
+No fear, no panic: probing negation as a means for
+emotion regulation
+Cornelia Herbert,1 Roland Deutsch,2 Petra Platte,1 and Paul Pauli1
+Department of Psychology, Biological Psychology, Clinical Psychology and Psychotherapy, University of Wu¨rzburg, 97070 Wu¨rzburg and
+Department of Psychology, Technische Universita¨t Dresden, Dresden, Germany
+This electroencephalographic study investigated if negating one’s emotion results in paradoxical effects or leads to effective emotional downregulation.
+Healthy participants were asked to downregulate their emotions to happy and fearful faces by using negated emotional cue words (e.g. no fun, no fear).
+Cue words were congruent with the emotion depicted in the face and presented prior to each face. Stimuli were presented in blocks of happy and fearful
+faces. Blocks of passive stimulus viewing served as control condition. Active regulation reduced amplitudes of early event-related brain potentials (early
+posterior negativity, but not N170) and the late positive potential for fearful faces. A fronto-central negativity peaking at about 250 ms after target face
+onset showed larger amplitude modulations during downregulation of fearful and happy faces. Behaviorally, negating was more associated with
+reappraisal than with suppression. Our results suggest that in an emotional context, negation processing could be quite effective for emotional
+downregulation but that its effects depend on the type of the negated emotion (pleasant vs unpleasant). Results are discussed in the context of
+dual process models of cognition and emotion regulation.
+Keywords: emotion regulation; event-related brain potentials; negation; reappraisal; suppression
+INTRODUCTION
+Emotion regulation is an important aspect of everyday life (Gross and
+John, 2003; Nezlek and Kuppens, 2008). Imagine the following situ-"
+300b8caf79783a7eba5608b5819b6fed14273d2d,Unsupervised Joint Mining of Deep Features and Image Labels for Large-Scale Radiology Image Categorization and Scene Recognition,"Unsupervised Joint Mining of Deep Features and Image Labels
+for Large-scale Radiology Image Categorization and Scene Recognition
+Xiaosong Wang, Le Lu, Hoo-chang Shin, Lauren Kim, Mohammadhadi Bagheri,
+Isabella Nogues, Jianhua Yao, Ronald M. Summers
+Department of Radiology and Imaging Sciences, National Institutes of Health Clinical Center,
+0 Center Drive, Bethesda, MD 20892"
+300b819bbbe857f5fe89d0895f907073fc288719,"Towards a Robust People Tracking Framework for Service Robots in Crowded, Dynamic Environments","Towards a Robust People Tracking Framework
+for Service Robots in Crowded, Dynamic Environments
+Timm Linder
+Fabian Girrbach
+Kai O. Arras"
+305dccd4004560572af2e849a36faf5626990517,Comparative Analysis of Face Recognition Approaches : A Survey,"Comparative Analysis of Face Recognition Approaches:
+International Journal of Computer Applications (0975 – 8887)
+Volume 57– No.17, November 2012
+A Survey
+Ripal Patel, Nidhi Rathod, Ami Shah
+Electronics & Telecommunication Department,
+BVM Engineering College,
+Vallabh Vidyanagar-388120, Gujarat, India."
+30fd7b1f8502b1c1d7a855946d99d2d5323ec973,Big Data Analysis for 2 Media Production,"I N V I T E D
+P A P E R
+Big Data Analysis for
+Media Production
+By Josep Blat, Alun Evans, Hansung Kim, Evren Imre, Luka`sˇ Polok,
+Viorela Ila, Nikos Nikolaidis, Senior Member IEEE, Pavel Zemcˇı´k, Anastasios Tefas,
+Pavel Smrzˇ, Adrian Hilton, Member IEEE, and Ioannis Pitas, Fellow IEEE"
+302c9c105d49c1348b8f1d8cc47bead70e2acf08,Unconstrained Face Recognition Using A Set-to-Set Distance Measure,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TCSVT.2017.2710120, IEEE
+Transactions on Circuits and Systems for Video Technology
+IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY
+Unconstrained Face Recognition Using A Set-to-Set
+Distance Measure
+Jiaojiao Zhao, Jungong Han, and Ling Shao, Senior Member IEEE"
+30861d747c87e2e838c1c30eed334b17cc93cdb6,Bootstrapping Face Detection with Hard Negative Examples,"Bootstrapping Face Detection with Hard
+Negative Examples
+Shaohua Wan
+Zhijun Chen Tao Zhang Bo Zhang Kong-kat Wong
+{wanshaohua, chenzhijun, tao.zhang, zhangbo,
+Xiaomi Inc.
+August 9, 2016"
+301b0da87027d6472b98361729faecf6e1d5e5f6,Head Pose Estimation in Face Recognition Across Pose Scenarios,"HEAD POSE ESTIMATION IN FACE RECOGNITION ACROSS
+POSE SCENARIOS
+M. Saquib Sarfraz and Olaf Hellwich
+Computer vision and Remote Sensing, Berlin university of Technology
+Sekr. FR-3-1, Franklinstr. 28/29, D-10587, Berlin, Germany.
+Keywords:
+Pose estimation, facial pose, face recognition, local energy models, shape description, local features, head
+pose classification."
+30b103d59f8460d80bb9eac0aa09aaa56c98494f,Enhancing Human Action Recognition with Region Proposals,"Enhancing Human Action Recognition with Region Proposals
+Fahimeh Rezazadegan, Sareh Shirazi, Niko Sünderhauf, Michael Milford, Ben Upcroft
+Australian Centre for Robotic Vision(ACRV), School of Electrical Engineering and Computer Science
+Queensland University of Technology(QUT)"
+5e6f546a50ed97658be9310d5e0a67891fe8a102,Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?,"Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?
+Kensho Hara, Hirokatsu Kataoka, Yutaka Satoh
+National Institute of Advanced Industrial Science and Technology (AIST)
+Tsukuba, Ibaraki, Japan
+{kensho.hara, hirokatsu.kataoka,"
+5e5e11e143140cc376db466d5b096a54b900c2ba,Face Recognition in Uncontrolled Environment,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 7, No. 8, 2016
+Face Recognition in Uncontrolled Environment
+Radhey Shyam and Yogendra Narain Singh
+Department of Computer Science & Engineering
+Institute of Engineering and Technology
+Lucknow - 226 021, India"
+5eee9c417157916ee66689718af65965c423b2b7,Autism and Asperger’s Syndrome: A Cognitive Neuroscience Perspective,"In Press: Carol Armstrong, Ed., Handbook of Medical Neuropsychology. New York:
+Springer Science.
+Autism and Asperger’s Syndrome: A Cognitive Neuroscience Perspective
+Jeanne Townsend, Ph.D., Marissa Westerfield, Ph.D.
+Department of Neurosciences, University of California, San Diego
+Table of Contents
+History and Background
+Biological Underpinnings
+Postmortem Studies
+MRI Studies
+White Matter Connectivity
+Neuroanatomy
+EEG Abnormalities
+Seizures
+Diagnosis
+Neurocognitive Mechanisms
+Screening Guidelines
+Clinical & Research Criteria
+Increased Prevalence of Autism
+It’s not the vaccine"
+5e0eb34aeb2b58000726540336771053ecd335fc,Low-Quality Video Face Recognition with Deep Networks and Polygonal Chain Distance,"Low-Quality Video Face Recognition with Deep
+Networks and Polygonal Chain Distance
+Christian Herrmann∗†, Dieter Willersinn†, J¨urgen Beyerer†∗
+Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany
+Fraunhofer IOSB, Karlsruhe, Germany"
+5e0832848fab012b7e59580264257e0a3d05c596,The University of Southampton Multi-Biometric Tunnel and introducing a novel 3D gait dataset,"The University of Southampton Multi-Biometric Tunnel and
+introducing a novel 3D gait dataset
+Richard D. Seely, Sina Samangooei, Lee Middleton, John N. Carter and Mark S. Nixon"
+5eae1a3e0dfd0834be6a003b979bf5b3dc923453,"Far-Field, Multi-Camera, Video-to-Video Face Recognition","Far-Field, Multi-Camera, Video-to-Video Face
+Recognition
+Aristodemos Pnevmatikakis and Lazaros Polymenakos
+Athens Information Technology
+Greece
+. Introduction
+Face recognition on still images has been extensively studied. Given sufficient training data
+(many gallery stills of each person) and/or high resolution images, the 90% recognition
+arrier can be exceeded, even for hundreds of different people to be recognized (Phillips et
+l., 2006). Face recognition on video streams has only recently begun to receive attention
+(Weng et al., 2000; Li et al., 2001; Gorodnichy, 2003; Lee et al., 2003; Liu and Chen, 2003;
+Raytchev and Murase, 2003; Aggarval et al., 2004; Xie et al., 2004; Stergiou et al., 2006).
+Video-to-video face recognition refers to the problem of training and testing face recognition
+systems using video streams. Usually these video streams are near-field, where the person
+to be recognized occupies most of the frame. They are also constrained in the sense that the
+person looks mainly at the camera. Typical such video streams originate from video-calls
+nd news narration, where a person’s head and upper torso is visible.
+A much more interesting application domain is that of the far-field unconstrained video
+streams. In such streams the people are far from the camera, which is typically mounted on a
+room corner near the ceiling. VGA-resolution cameras in such a setup can easily lead to quite"
+5ecf564bc9eab26c96c17304744ff1029215a109,Single-Sample Face Recognition Based on Intra-Class Differences in a Variation Model,"Sensors 2015, 15, 1071-1087; doi:10.3390/s150101071
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Single-Sample Face Recognition Based on Intra-Class
+Differences in a Variation Model
+Jun Cai, Jing Chen * and Xing Liang
+School of Optoelectronics, Beijing Institute of Technology, Beijing 100081, China;
+E-Mails: (J.C.); (X.L.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +86-136-8151-5195.
+External Editor: Valentina Gatteschi
+Received: 17 September 2014 / Accepted: 10 December 2014 / Published: 8 January 2015"
+5e0df06d92176f362d52962de866e2d825185afb,Improving Multi-frame Data Association with Sparse Representations for Robust Near-online Multi-object Tracking,"Improving Multi-Frame Data Association with
+Sparse Representations for Robust Near-Online
+Multi-Object Tracking
+Lo¨ıc Fagot-Bouquet1, Romaric Audigier1, Yoann Dhome1, Fr´ed´eric Lerasle2,3
+CEA, LIST, Vision and Content Engineering Laboratory,
+Point Courrier 173, F-91191 Gif-sur-Yvette, France
+CNRS, LAAS, 7, Avenue du Colonel Roche, F-31400 Toulouse, France
+Universit´e de Toulouse, UPS, LAAS, F-31400 Toulouse, France"
+5e16cc5dc7ef8b4fc1320abbfeb838b4fe041905,A Proposal for Common Dataset in Neural-Symbolic Reasoning Studies,"A Proposal for Common Dataset in
+Neural-Symbolic Reasoning Studies
+Ozgur Yilmaz, Artur d’Avila Garcez, and Daniel Silver
+Turgut Ozal University, Computer Science Department, Ankara Turkey
+City University London, Department of Computer Science, London UK
+Acadia University, Jodrey School of Computer Science, Nova Scotia Canada,"
+5e28673a930131b1ee50d11f69573c17db8fff3e,Descriptor Based Methods in the Wild,"Author manuscript, published in ""Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille : France
+(2008)"""
+5ef49174ca2b54c1bb54df828acc52075cf1634b,DAPs: Deep Action Proposals for Action Understanding,"DAPs: Deep Action Proposals for Action
+Understanding
+Victor Escorcia1, Fabian Caba Heilbron1,
+Juan Carlos Niebles2,3, Bernard Ghanem1
+King Abdullah University of Science and Technology (KAUST), Saudi Arabia.
+Stanford University. 3 Universidad del Norte, Colombia.
+{victor.escorcia, fabian.caba,"
+5ea9063b44b56d9c1942b8484572790dff82731e,Multiclass Support Vector Machines and Metric Multidimensional Scaling for Facial Expression Recognition,"MULTICLASS SUPPORT VECTOR MACHINES AND METRIC MULTIDIMENSIONAL
+SCALING FOR FACIAL EXPRESSION RECOGNITION
+Irene Kotsiay, Stefanos Zafeiriouy, Nikolaos Nikolaidisy and Ioannis Pitasy
+yAristotle University of Thessaloniki, Department of Informatics
+Thessaloniki, Greece
+email: fekotsia, dralbert, nikolaid,"
+5e9a6357fd7de7271dac77756c3992dce260eb49,On the Convergence of Affective and Persuasive Technologies in Computer-mediated Health-care Systems,"Rebeca I. García-Betances
+Life Supporting Technologies (LifeSTech)
+Superior Technical School of
+Telecommunications Engineers
+Polytechnic University of Madrid
+Superior Technical School of
+Telecommunications Engineers
+Polytechnic University of Madrid
+Spain
+Dario Salvi
+Spain
+Giuseppe Fico
+Life Supporting Technologies (LifeSTech)
+Superior Technical School of
+Telecommunications Engineers
+Polytechnic University of Madrid
+Spain
+Manuel Ottaviano
+Superior Technical School of
+Telecommunications Engineers"
+5e8a7a2eef68f568c023f37e41576fa811e5c628,Deep Reinforcement Learning For Sequence to Sequence Models,"Deep Reinforcement Learning for
+Sequence-to-Sequence Models
+Yaser Keneshloo, Tian Shi, Naren Ramakrishnan, Chandan K. Reddy, Senior Member, IEEE"
+5e1b42d07eb84cddc1ebae607f3041aa2ef8fce8,RAM: Role Representation and Identification from combined Appearance and Activity Maps,"RAM: Role Representation and Identification
+from combined Appearance and Activity Maps
+Carlos Torres† Archith J. Bency† Je(cid:130)rey C. Fried‡ B. S. Manjunath†
+University of California Santa Barbara ‡Santa Barbara Co(cid:138)age Hospital
+{carlostorres, archith,"
+5e053cd164b02433c4efc0fc675f6273a8a1c46a,Scalable Bayesian Learning of Recurrent Neural Networks for Language Modeling,"Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, pages 321–331
+Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, pages 321–331
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+https://doi.org/10.18653/v1/P17-1030
+https://doi.org/10.18653/v1/P17-1030"
+5e4ad1f19e88b6dc87000f64b984d8f09abe7baf,Invariant Spectral Hashing of Image Saliency Graph,"Invariant Spectral Hashing of Image Saliency Graph
+Maxime Taquet, Laurent Jacques, Christophe De Vleeschouwer and Benoˆıt Macq
+Information and Communication Technologies, Electronics and Applied Mathematics
+Universit´e catholique de Louvain, Belgium.
+September 17, 2010"
+5e832ea5328cdcc9b4346458672ad8288a56c0a7,Illumination-robust face recognition with Block-based Local Contrast Patterns,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+5e6ba16cddd1797853d8898de52c1f1f44a73279,Face Identification with Second-Order Pooling,"Face Identification with Second-Order Pooling
+Fumin Shen, Chunhua Shen and Heng Tao Shen"
+5e2b918f2dee17cb79d692e10aa2103ca9129e2c,Rotating your face using multi-task deep neural network,"Rotating Your Face Using Multi-task Deep Neural Network
+Junho Yim1 Heechul Jung1 ByungIn Yoo1;2 Changkyu Choi2 Dusik Park2
+Junmo Kim1
+School of Electrical Engineering, KAIST, South Korea
+Samsung Advanced Institute of Technology
+fjunho.yim, heechul,
+fbyungin.yoo, changkyu choi,"
+5e8e3d2a79537a6cd0c138545bce63ddafaa853c,Intent-aware long-term prediction of pedestrian motion,"Intent-Aware Long-Term Prediction of Pedestrian Motion
+Vasiliy Karasev
+Alper Ayvaci
+Bernd Heisele
+Stefano Soatto"
+5e2266d4ca1377bdf38ad2c07d0d9e0200813522,Recognizing and Mask Removal in 3D Faces Even In Presence of Occlusions,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer and Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol.2, Special Issue 1, March 2014
+Proceedings of International Conference On Global Innovations In Computing Technology (ICGICT’14)
+Organized by
+Department of CSE, JayShriram Group of Institutions, Tirupur, Tamilnadu, India on 6th & 7th March 2014
+Recognizing and Mask Removal in 3D Faces Even
+In Presence of Occlusions
+M.Dhivya1, P.Purushothaman2
+Dept. of Computer Science and Engineering, Muthayammal Engineering College, Rasipuram, Tamilnadu, India1. 2"
+5eefe98aafffe665b19de515e3ba90c9c0b7219c,Trimmed Event Recognition Submission to ActivityNet Challenge 2018,"Trimmed Event Recognition​ Submission to ActivityNet Challenge 2018
+Jiaqing Lin, Akikazu Takeuchi
+STAIR Lab, Chiba Institute of Technology, Japan
+{lin,
+. Overview
+This paper describes STAIR Lab submission to
+ActivityNet 2018 Challenge for guest
+task C:
+Trimmed Event Recognition (Moments in Time) [1].
+Our approach is to utilize three networks, Audio
+Net, Spatial-temporal Net, and DenseNet to make
+individual predictions, then use MLP to fuses the
+results to make an overall prediction. The flow chart
+of our approach is shown in figure 1.
+. Implementation
+.1 Audio network
+Our audio dataset training is different from other
+methods. Usually, auditory raw waveforms are used
+s input and are fed into a model like SoundNet [2].
+In our case, firstly, we converted auditory raw"
+5e6944abfed38fd30d8be45ee0c24dc1c0525ba1,An Algorithm for Face Recognition based on Isolated Image Points with Neural Network,"International Journal of Computer Applications (0975 – 8887)
+Volume 150 – No.2, September 2016
+An Algorithm for Face Recognition based on Isolated
+Image Points with Neural Network
+Hassan Jaleel Hassan, PhD
+Computer Engineering Department,
+University of Technology
+techniques
+Pixel-based"
+5e9e3afeea446a2ae19e3a8e0678f08b73b0b36b,Commonsense knowledge acquisition and applications,"Commonsense Knowledge
+Acquisition and Applications
+Niket Tandon
+Max-Planck-Institut f¨ur Informatik
+Dissertation
+zur Erlangung des Grades
+des Doktors der Ingenieurwissenschaften (Dr.-Ing.)
+der Naturwissenschaftlich-Technischen Fakult¨aten
+der Universit¨at des Saarlandes
+Saarbr¨ucken
+August, 2016"
+5ece99e52efbd43ac7fed8a7d0d604218cba0337,Towards Deep Representation Learning with Genetic Programming,"Towards Deep Representation Learning with Genetic
+Programming(cid:63)
+Lino Rodriguez-Coayahuitl, Alicia Morales-Reyes, and Hugo Jair Escalante
+Instituto Nacional de Astrofisica, Optica y Electronica,
+Luis Enrique Erro No.1, Tonantzintla, 72840, Puebla, Mexico,"
+5ec94adc9e0f282597f943ea9f4502a2a34ecfc2,Leveraging the Power of Gabor Phase for Face Identification: A Block Matching Approach,"Leveraging the Power of Gabor Phase for Face
+Identification: A Block Matching Approach
+Yang Zhong, Haibo Li
+KTH, Royal Institute of Technology"
+5ebd9457a3a09889fad8cc86a91b274da5986636,oASIS: Adaptive Column Sampling for Kernel Matrix Approximation,"PATEL et al.: OASIS: ADAPTIVE COLUMN SAMPLING FOR KERNEL MATRIX APPROXIMATION
+oASIS: Adaptive Column Sampling
+for Kernel Matrix Approximation
+Raajen Patel*, Student Member, IEEE, Thomas A. Goldstein, Member, IEEE, Eva L. Dyer, Member, IEEE,
+Azalia Mirhoseini, Student Member, IEEE, and Richard G. Baraniuk, Fellow, IEEE"
+5e286a45a4780a142e1420728ab99cb92993ab50,Data-driven image captioning with meta-class based retrieval,"META-SINIF TABANLI GETİRME İLE VERİYE DAYALI İMGE ALTYAZILAMA
+DATA-DRIVEN IMAGE CAPTIONING WITH META-CLASS BASED RETRIEVAL
+Mert Kılıçkaya1, Erkut Erdem1, Aykut Erdem1, Nazlı İkizler Cinbiş1, Ruket Çakıcı2
+Bilgisayar Mühendisliği Bölümü
+Hacettepe Üniversitesi
+ÖZETÇE
+Otomatik imge altyazılama, bir imgenin açıklamasını yaratma
+işlemi, bilgisayarlı görü ve doğal dil işleme topluluklarının
+ilgisini daha yeni çeken çok zorlu bir problemdir. Bu
+çalışmada, verilen bir imge için; imge-altyazı ikilileri içeren
+geniş bir veri kümesinden ona görsel olarak en benzer imgeyi
+ulan ve onun altyazısını girdi imgesinin açıklaması olarak
+ktaran veriye dayalı özgün bir imge altyazılama stratejisi
+önerilmiştir. Özgünlüğümüz, getirme
+için girdi
+görüntüsünün anlamsal içeriğini daha iyi yakalamak için
+meta-sınıg gösterimi olarak adlandırılan yeni önerilmiş yüksek
+düzey bir global imge gösterimi kullanılmasında yatmaktadır.
+Deneylerimiz meta-sınıf güdümlü yaklaşımımızın dayanak
+Im2Text modeline kıyasla daha doğru açıklamalar ürettiğini"
+5ef2be1aadd2f666756b2ab66bc05d146ba0681b,Normalization in Training Deep Convolutional Neural Networks for 2D Bio-medical Semantic Segmentation,"Normalization in Training Deep Convolutional Neural Networks for 2D
+Bio-medical Semantic Segmentation
+Xiao-Yun Zhou1 and Guang-Zhong Yang1"
+5e39deb4bff7b887c8f3a44dfe1352fbcde8a0bd,Supervised COSMOS Autoencoder: Learning Beyond the Euclidean Loss!,"Supervised COSMOS Autoencoder: Learning Beyond the
+Euclidean Loss!
+Maneet Singh, Student Member, IEEE, Shruti Nagpal, Student Member, IEEE, Mayank Vatsa, Senior Member, IEEE,
+Richa Singh, Senior Member, IEEE, and Afzel Noore, Senior Member, IEEE"
+5ee220b6fb70a3d4d99be9d81d2c0e5de06ab3b9,LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics,"Pre-print of article that will appear in Proceedings of Robotics: Science and Systems XIV, 2018.Please cite this paper as:Sourav Garg, Niko Sunderhauf, and Michael Milford. LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics. Proceedings of Robotics: Science and Systems XIV, title={LoST? Appearance-Invariant Place Recognition for Opposite Viewpoints using Visual Semantics}, author={Garg, Sourav and Suenderhauf, Niko and Milford, Michael}, journal={Proceedings of Robotics: Science and Systems XIV}, year={2018}}"
+5e4a451faf2e47486a5dbeca8a5109b53e22d95a,Statement Arun Kumar,"Research Statement
+Arun Kumar
+Large-scale data analytics using machine learning (ML), popularly known as advanced analytics or “Big
+Data” analytics, is transforming almost every data-powered application in the enterprise, Web, science,
+government, and other domains. However, there are still many barriers to broad and successful adoption
+of advanced analytics. Designing new ML algorithms and faster ML implementations are important issues
+that have been studied by researchers for a long time, but for most data-powered applications, the real
+showstopper is a different issue that is often glossed over in research: the end-to-end process of building
+ML models given raw data is often too painful even for professional analysts, while developers skilled in
+oth general-purpose programming and the latest ML are rare. The goal of my research is to improve the
+productivity of the users and developers of advanced analytics systems to enable data-powered applications
+to realize the full potential of advanced analytics. To this end, my work focuses on fundamental research
+questions at the intersection of data management and ML that address usability, developability, perfor-
+mance, and scalability issues. My approach to solving a problem involves the whole spectrum of algorithm
+design, theoretical analysis, empirical analysis, building prototype systems, and deploying them in practice.
+Research Summary. My dissertation opens up a new problem that I call “learning over joins”, which
+illustrates my goal of improving the productivity of analysts. My observation is simple: most ML toolkits
+ssume the input data is a single table, but many real-world datasets are multi-table. Thus, analysts join
+ll tables to create a single table that might be much larger, which means that managing and maintaining
+it is a usability headache. Creating a single table also causes storage and performance issues. To mitigate"
+5be74c6fa7f890ea530e427685dadf0d0a371fc1,Deep Co-attention based Comparators For Relative Representation Learning in Person Re-identification,"Deep Co-attention based Comparators For Relative
+Representation Learning in Person Re-identification
+Lin Wu, Yang Wang, Junbin Gao, Dacheng Tao, Fellow, IEEE"
+5b25b9053ceafe1cf8258d8daa818a2da80c800f,Assigning affinity-preserving binary hash codes to images,"Assigning af‌f‌inity-preserving
+inary hash codes to images
+Jason Filippou
+Varun Manjunatha
+June 10, 2014"
+5bfc32d9457f43d2488583167af4f3175fdcdc03,Local Gray Code Pattern (LGCP): A Robust Feature Descriptor for Facial Expression Recognition,"International Journal of Science and Research (IJSR), India Online ISSN: 2319-7064
+Local Gray Code Pattern (LGCP): A Robust
+Feature Descriptor for Facial Expression
+Recognition
+Mohammad Shahidul Islam
+Atish Dipankar University of Science & Technology, School, Department of Computer Science and Engineering, Dhaka, Bangladesh."
+5bc5cfc2622f6b0a0003d7b115726d075205a2cc,Auto Landing Process for Autonomous Flying Robot by Using Image Processing Based on Edge Detection,"AUTO LANDING PROCESS FOR
+AUTONOMOUS FLYING ROBOT BY USING
+IMAGE PROCESSING BASED ON EDGE
+DETECTION
+Bahram Lavi Sefidgari1 and Sahand Pourhassan Shamchi2
+Department of Computer Engineering, EMU, Famagusta, Cyprus
+Department of Mechanical Engineering, EMU, Famagusta, Cyprus"
+5ba7882700718e996d576b58528f1838e5559225,Predicting Personalized Image Emotion Perceptions in Social Networks,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2016.2628787, IEEE
+Transactions on Affective Computing
+IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. X, NO. X, OCTOBER 2016
+Predicting Personalized Image Emotion
+Perceptions in Social Networks
+Sicheng Zhao, Hongxun Yao, Yue Gao, Senior Member, IEEE, Guiguang Ding and Tat-Seng Chua"
+5b6f0a508c1f4097dd8dced751df46230450b01a,Finding lost children,"Finding Lost Children
+Ashley Michelle Eden
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2010-174
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-174.html
+December 20, 2010"
+5b10fa6b4c0921af7b36a58f4fd2d8fca6e3c9b1,Low-Rank Multi-View Learning in Matrix Completion for Multi-Label Image Classification,"Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence
+Low-Rank Multi-View Learning
+in Matrix Completion for Multi-Label Image Classification
+Meng Liu†, Yong Luo†§, Dacheng Tao‡, Chao Xu†, and Yonggang Wen§
+Key Laboratory of Machine Perception (MOE), School of EECS, PKU, Beijing 100871, China
+Center for Quantum Computation and Intelligent Systems, UTS, Sydney, NSW 2007, Australia
+§Division of Networks and Distributed Systems School of Computer Engineering, NTU, 639798, Singapore
+{lemolemac,"
+5bb684dfe64171b77df06ba68997fd1e8daffbe1,One-Sided Unsupervised Domain Mapping,
+5bf9493564d1ed173aee4dc701d4e62d5f926fe3,Bonnet: An Open-Source Training and Deployment Framework for Semantic Segmentation in Robotics using CNNs,"Bonnet: An Open-Source Training and Deployment Framework
+for Semantic Segmentation in Robotics using CNNs
+Andres Milioto
+Cyrill Stachniss"
+5b0552a8e0ffdf1b6e7f2573640f888815391dec,Part-level fully convolutional networks for pedestrian detection,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+5b14abbea83270282ef94fcf3f3a73e7d8fee023,Experiments about the Generalization Ability of Common Vector based Methods for Face Recognition,"Experiments about the Generalization Ability of
+Common Vector based methods for Face
+Recognition ?
+Marcelo Armengot, Francesc J. Ferri, and Wladimiro D´ıaz
+Dept. d’Inform`atica, Universitat de Val`encia
+Dr Moliner, 50 46100 Burjassot, Spain"
+5b9c849c2acbdea6e3cfc730def4f083f169521c,A Method for Face Detection based on Wavelet Transform and optimised feature selection using Ant Colony Optimisation in Support Vector Machine,"ISSN (Print) : 2320 – 9798
+ISSN (Online) : 2320 – 9801
+International Journal of Innovative Research in Computer and Communication Engineering
+Vol. 1, Issue 2, April 2013
+A Method for Face Detection based on Wavelet
+Transform and optimised feature selection using Ant
+Colony Optimisation in Support Vector Machine
+Sanjay Kumar Pal1, Uday Chourasia 2 and Manish Ahirwar3
+Department of CSE, University Institute of Technology, RGPV, Bhopal, India1,2,3"
+5bf4f97b631937b2176db9c80dee965e2e2286be,From Classical to Generalized Zero-Shot Learning: a Simple Adaptation Process,"From Classical to Generalized Zero-Shot
+Learning: a Simple Adaptation Process
+Yannick Le Cacheux
+Herv´e Le Borgne
+CEA LIST
+CEA LIST
+Michel Crucianu
+CEDRIC Lab – CNAM
+September 27, 2018"
+5be6340c55d4a45e96e811bdeac3972328ca9247,People Identification and Tracking Through Fusion of Facial and Gait Features,"Original citation:
+Guan, Yu (Researcher in Computer Science), Wei, Xingjie, Li, Chang-Tsun and Keller,
+Y. (2014) People identification and tracking through fusion of facial and gait features. In:
+Cantoni, Virginio and Dimov, Dimo and Tistarell, Massimo, (eds.) Biometric
+Authentication : First International Workshop, BIOMET 2014, Sofia, Bulgaria, June 23-
+4, 2014. Revised Selected Papers. Lecture Notes in Computer Science . Springer
+International Publishing, pp. 209-221. ISBN 9783319133850
+Permanent WRAP url:
+http://wrap.warwick.ac.uk/65110
+Copyright and reuse:
+The Warwick Research Archive Portal (WRAP) makes this work by researchers of the
+University of Warwick available open access under the following conditions. Copyright ©
+nd all moral rights to the version of the paper presented here belong to the individual
+uthor(s) and/or other copyright owners. To the extent reasonable and practicable the
+material made available in WRAP has been checked for eligibility before being made
+vailable.
+Copies of full items can be used for personal research or study, educational, or not-for
+profit purposes without prior permission or charge. Provided that the authors, title and
+full bibliographic details are credited, a hyperlink and/or URL is given for the original
+metadata page and the content is not changed in any way."
+5bae9822d703c585a61575dced83fa2f4dea1c6d,MOTChallenge 2015: Towards a Benchmark for Multi-Target Tracking,"MOTChallenge 2015:
+Towards a Benchmark for Multi-Target Tracking
+Laura Leal-Taix´e∗, Anton Milan∗, Ian Reid, Stefan Roth, and Konrad Schindler"
+5bcff482bd9652420f8f6b0e6e58ab59a562046e,Bit-Scalable Deep Hashing With Regularized Similarity Learning for Image Retrieval and Person Re-Identification,"Bit-Scalable Deep Hashing with Regularized
+Similarity Learning for Image Retrieval and Person
+Re-identification
+Ruimao Zhang, Liang Lin, Rui Zhang, Wangmeng Zuo, and Lei Zhang"
+5babbad3daac5c26503088782fd5b62067b94fa5,Are You Sure You Want To Do That? Classification with Verification,"Are You Sure You Want To Do That?
+Classification with Verification
+Harris Chan∗
+Atef Chaudhury∗
+Kevin Shen∗"
+5bb87c7462c6c1ec5d60bde169c3a785ba5ea48f,Targeting Ultimate Accuracy: Face Recognition via Deep Embedding,"Targeting Ultimate Accuracy: Face Recognition via Deep Embedding
+Jingtuo Liu Yafeng Deng Tao Bai Zhengping Wei Chang Huang
+Baidu Research – Institute of Deep Learning"
+5b7870359b8b9934453f8e772ab7c3f9df3a5035,LF Indoor Location and Identification System,"LF Indoor Location and Identification System
+Antti Ropponen, Matti Linnavuo, Raimo Sepponen
+Helsinki University of Technology
+Department of Electronics
+PL 3340, 02015 TKK Finland
+Emails:"
+5b6c603fba0a66fb3c037632079bdca82ec3bf91,Alternating Co-Quantization for Cross-Modal Hashing,"Alternating Co-Quantization for Cross-modal Hashing
+Go Irie
+Hiroyuki Arai
+Yukinobu Taniguchi
+NTT Corporation
+{irie.go, arai.hiroyuki,"
+5b9d9f5a59c48bc8dd409a1bd5abf1d642463d65,An evolving spatio-temporal approach for gender and age group classification with Spiking Neural Networks,"Evolving Systems. manuscript No.
+(will be inserted by the editor)
+An evolving spatio-temporal approach for gender and age
+group classification with Spiking Neural Networks
+Fahad Bashir Alvi, Russel Pears, Nikola Kasabov
+Received: date / Accepted: date"
+5b01d4338734aefb16ee82c4c59763d3abc008e6,A Robust Face Recognition Algorithm Based on Kernel Regularized Relevance-Weighted Discriminant Analysis,"DI WU: A ROBUST FACE RECOGNITION ALGORITHM BASED ON KERNEL REGULARIZED RELEVANCE …
+A Robust Face Recognition Algorithm Based on Kernel Regularized
+Relevance-Weighted Discriminant Analysis
+Di WU 1, 2
+2 Hunan Provincial Key Laboratory of Wind Generator and Its Control, Hunan Institute of Engineering, Xiangtan, China.
+College of Electrical and Information Engineering,
+[e-mail:
+I. INTRODUCTION
+interface and security
+recognition
+their
+this paper, we propose an effective"
+5b721f86f4a394f05350641e639a9d6cb2046c45,Detection under Privileged Information,"A short version of this paper is accepted to ACM Asia Conference on Computer and Communications Security (ASIACCS) 2018
+Detection under Privileged Information (Full Paper)∗
+Z. Berkay Celik
+Pennsylvania State University
+Patrick McDaniel
+Pennsylvania State University
+Rauf Izmailov
+Vencore Labs
+Nicolas Papernot,
+Ryan Sheatsley, Raquel Alvarez
+Pennsylvania State University
+Ananthram Swami
+Army Research Laboratory"
+5b6bdf478860b1e3f797858e71abd14f98684b61,Distributed neural computation for the visual perception of motion. (Calcul neuronal distribué pour la perception visuelle du mouvement),"Distributed neural computation for the visual
+perception of motion
+Mauricio Cerda
+To cite this version:
+Mauricio Cerda. Distributed neural computation for the visual perception of motion. Computer
+science. Universit´e Nancy II, 2011. English. <tel-00642818>
+HAL Id: tel-00642818
+https://tel.archives-ouvertes.fr/tel-00642818
+Submitted on 18 Nov 2011
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires"
+5b3725c8b5e058ec3a383b621aa9316b90738b2e,Gaussian Conditional Random Field Network for Semantic Segmentation,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Gaussian Conditional Random Field Network for Semantic
+Segmentation
+Vemulapalli, R.; Tuzel, C.O.; Liu, M.-Y.; Chellappa, R.
+TR2016-078
+June 2016"
+5b1b90a0a6d491b26f427824985d69d5d0693220,Human gender classification: a review,"IEEE SENSORS JOURNAL, VOL. X, NO. X, XXXXXXX 2015
+Human Gender Classification: A Review
+Yingxiao Wu, Member, IEEE, Yan Zhuang, Student Member, IEEE, Xi Long, Member, IEEE,
+Feng Lin, Member, IEEE, and Wenyao Xu, Member, IEEE"
+5bb24d1250df62a56cab1445f1d8c5c61269b785,Measuring the Temporal Behavior of Real-World Person Re-Identification,"Measuring the Temporal Behavior of Real-World
+Person Re-Identification
+Meng Zheng, Student Member, IEEE, Srikrishna Karanam, Member, IEEE,
+nd Richard J. Radke, Senior Member, IEEE"
+5bb14bba7510c590164007d7e3aa1bf88cb3faec,Learning to Match Appearances by Correlations in a Covariance Metric Space,"Learning to Match Appearances by Correlations
+in a Covariance Metric Space
+Sªawomir B¡k, Guillaume Charpiat, Etienne Corvée, François Brémond,
+Monique Thonnat
+INRIA Sophia Antipolis, STARS group
+004, route des Lucioles, BP93
+06902 Sophia Antipolis Cedex - France"
+5ba1db56bccc090ce5eceb13f46f2cd15ba3aa55,Interpretable Counting in Visual Question Answering,"Under review as a conference paper at ICLR 2018
+INTERPRETABLE COUNTING IN VISUAL QUESTION
+ANSWERING
+Anonymous authors
+Paper under double-blind review"
+5b818c73ce5681e523d6fe9ed8603c7afc0a9089,Improving Shape Retrieval by Spectral Matching and Meta Similarity,"Improving Shape retrieval by Spectral
+Matching and Meta Similarity
+Amir Egozi (BGU),
+Yosi Keller (BIU)
+nd Hugo Guterman (BGU)
+Department of Electrical and Computer Engineering,
+Ben-Gurion University of the Negev
+/ 21"
+5b1d78b160560db5f581e65289ce5e2f99eb9b1f,Twitter100k: A Real-World Dataset for Weakly Supervised Cross-Media Retrieval,"Twitter100k: A Real-world Dataset for Weakly
+Supervised Cross-Media Retrieval
+Yuting Hu, Liang Zheng, Yi Yang, and Yongfeng Huang"
+5b94093939ac42aba54ab41eb1725aeba1bd5c34,RGB-D Segmentation of Poultry Entrails,"Aalborg Universitet
+RGB-D Segmentation of Poultry Entrails
+Philipsen, Mark Philip; Jørgensen, Anders; Guerrero, Sergio Escalera; Moeslund, Thomas B.
+Published in:
+IX International Conference on Articulated Motion and Deformable Objects
+DOI (link to publication from Publisher):
+0.1007/978-3-319-41778-3_17
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Philipsen, M. P., Jørgensen, A., Guerrero, S. E., & Moeslund, T. B. (2016). RGB-D Segmentation of Poultry
+Entrails. In IX International Conference on Articulated Motion and Deformable Objects (pp. 168-174). Springer.
+(Lecture Notes in Computer Science, Vol. 9756). DOI: 10.1007/978-3-319-41778-3_17
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain"
+5be3cc1650c918da1c38690812f74573e66b1d32,Relative Parts: Distinctive Parts for Learning Relative Attributes,"Relative Parts: Distinctive Parts for Learning Relative Attributes
+Ramachandruni N. Sandeep
+Yashaswi Verma
+C. V. Jawahar
+Center for Visual Information Technology, IIIT Hyderabad, India - 500032"
+5b6bed112e722c0629bcce778770d1b28e42fc96,Can Your Eyes Tell Me How You Think? A Gaze Directed Estimation of the Mental Activity,"FLOREA ET AL.:CANYOUREYESTELLMEHOWYOUTHINK?
+Can Your Eyes Tell Me How You Think? A
+Gaze Directed Estimation of the Mental
+Activity
+Laura Florea
+http://alpha.imag.pub.ro/common/staff/lflorea
+Corneliu Florea
+http://alpha.imag.pub.ro/common/staff/cflorea
+Ruxandra Vrânceanu
+Constantin Vertan
+http://alpha.imag.pub.ro/common/staff/vertan
+Image Processing and Analysis
+Laboratory, LAPI
+University “Politehnica” of Bucharest
+Bucharest, Romania"
+374c7a2898180723f3f3980cbcb31c8e8eb5d7af,Facial Expression Recognition in Videos using a Novel Multi-Class Support Vector Machines Variant,"FACIAL EXPRESSION RECOGNITION IN VIDEOS USING A NOVEL MULTI-CLASS
+SUPPORT VECTOR MACHINES VARIANT
+Irene Kotsiay, Nikolaos Nikolaidisy and Ioannis Pitasy
+yAristotle University of Thessaloniki
+Department of Informatics
+Box 451, 54124 Thessaloniki, Greece"
+37b207d2c4a82a57f80e96353f79ecd71320a854,Person Search with Natural Language Description,"Person Search with Natural Language Description
+Shuang Li1 Tong Xiao1 Hongsheng Li1∗ Bolei Zhou2 Dayu Yue3 Xiaogang Wang1 ∗
+The Chinese University of Hong Kong 2Massachuate Institute of Technology 3SenseTime Group Limited"
+37c42f0a0e2e97a74113e1a1e1a79b04e0c64244,Covariance Pooling For Facial Expression Recognition,"Covariance Pooling for Facial Expression Recognition
+Computer Vision Lab, ETH Zurich, Switzerland
+VISICS, KU Leuven, Belgium
+Dinesh Acharya†, Zhiwu Huang†, Danda Pani Paudel†, Luc Van Gool†‡
+{acharyad, zhiwu.huang, paudel,"
+372fb32569ced35eaf3740a29890bec2be1869fa,Mu rhythm suppression is associated with the classification of emotion in faces.,"Running head: MU RHYTHM MODULATION BY CLASSIFICATION OF EMOTION 1
+Mu rhythm suppression is associated with the classification of emotion in faces
+Matthew R. Moore1, Elizabeth A. Franz1
+Department of Psychology, University of Otago, Dunedin, New Zealand
+Corresponding authors:
+Matthew Moore & Liz Franz
+Phone: +64 (3) 479 5269; Fax: +64 (3) 479 8335
+Department of Psychology
+University of Otago
+PO Box 56
+Dunedin, New Zealand"
+376ea595a6ff5b876367654833de1e1778bacd1e,Bilingualism and ambiguous emotional cues 1,"Bilingualism and ambiguous emotional cues 1
+Examensarbete på avancerad nivå
+Independent degree project  second cycle
+Psychology
+Major subject
+Title
+Bilingualism and Children's Attention to Facial Expressions that Conflict with Lexical
+Content
+Amani Asad"
+37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e,Co-operative Pedestrians Group Tracking in Crowded Scenes Using an MST Approach,"WACV 2015 Submission #394. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+Co-operative Pedestrians Group Tracking in Crowded Scenes using an MST
+Approach
+Anonymous WACV submission
+Paper ID 394"
+3748a828dabc6b5292b53cec6080cef33d78d3e3,On Clustering and Embedding Manifolds using a Low Rank Neighborhood Approach,"On Clustering and Embedding Manifolds using a
+Low Rank Neighborhood Approach
+Arun M. Saranathan, Student Member, IEEE, and Mario Parente, Member, IEEE"
+3795974e24296185d9b64454cde6f796ca235387,Finding your Lookalike: Measuring Face Similarity Rather than Face Identity,"Finding your Lookalike:
+Measuring Face Similarity Rather than Face Identity
+Amir Sadovnik, Wassim Gharbi, Thanh Vu
+Lafayette College
+Easton, PA
+Andrew Gallagher
+Google Research
+Mountain View, CA"
+37d6cde8be756b70d22262f1acc3442a0c6aa7ea,Kernel learning approaches for image classification,"Kernel Learning Approaches for
+Image Classification
+Dissertation
+zur Erlangung des akademischen Grades
+Doctor rerum naturalium (Dr.rer.nat)
+n der Naturwissenschaftlich-Technischen Fakult¨at I
+der Universit¨at des Saarlandes, Saarbr¨ucken
+vorgelegt von Dipl.-Inform.
+Peter Vincent Gehler
+0. Juni 2009"
+37a4eb74f9c9d6333864dbe1e0803d30c2e4db7c,An Evaluation of Deep CNN Baselines for Scene-Independent Person Re-Identification,"An Evaluation of Deep CNN Baselines for
+Scene-Independent Person Re-Identification
+Paul Marchwica, Michael Jamieson, Parthipan Siva
+Senstar Corporation
+Waterloo, Canada
+{Paul.Marchwica, Mike.Jamieson,
+the art"
+37a95a78bee34bb26a64c7ec30f7bd0496e072f1,The Focus-Aspect-Polarity Model for Predicting Subjective Noun Attributes in Images,"The Focus-Aspect-Polarity Model
+for Predicting Subjective Noun Attributes in Images
+Tushar Karayil1
+DFKI, Germany
+Philipp Blandfort1
+DFKI and TUK, Germany
+J¨orn Hees
+DFKI, Germany
+Andreas Dengel
+DFKI, Germany"
+37278ffce3a0fe2c2bbf6232e805dd3f5267eba3,Can we still avoid automatic face detection?,"Can we still avoid automatic face detection?
+Michael J. Wilber1,2
+Vitaly Shmatikov1,2
+Serge Belongie1,2
+Department of Computer Science, Cornell University 2 Cornell Tech"
+377a1be5113f38297716c4bb951ebef7a93f949a,Facial emotion recognition with anisotropic inhibited Gabor energy histograms,"Dear Faculty, IGERT Fellows, IGERT Associates and Students,
+You are cordially invited to attend a Seminar presented by Albert Cruz. Please
+plan to attend.
+Albert Cruz
+IGERT Fellow
+Electrical Engineering
+Date: Friday, October 11, 2013
+Location: Bourns A265
+Time: 11:00am
+Facial emotion recognition with anisotropic
+inhibited gabor energy histograms"
+37992120053b50b2f92eaa1949273bf828a54b50,Face Recognition Techniques - An evaluation Study,"Int. J. Advanced Networking and Applications
+Volume: 6 Issue: 4 Pages: 2393-2397 (2015) ISSN: 0975-0290
+Face Recognition Techniques - An evaluation
+Department of Management Information System, Applied Science University, 166-11391, Jordan
+Study
+Dr.Asmahan M Altaher
+Email:
+Keywords"
+3765df816dc5a061bc261e190acc8bdd9d47bec0,Presentation and validation of the Radboud Faces Database,"This article was downloaded by: [Radboud University Nijmegen]
+On: 24 November 2010
+Access details: Access Details: [subscription number 907172236]
+Publisher Psychology Press
+Informa Ltd Registered in England and Wales Registered Number: 1072954 Registered office: Mortimer House, 37-
+1 Mortimer Street, London W1T 3JH, UK
+Cognition & Emotion
+Publication details, including instructions for authors and subscription information:
+http://www.informaworld.com/smpp/title~content=t713682755
+Presentation and validation of the Radboud Faces Database
+Oliver Langnera; Ron Dotscha; Gijsbert Bijlstraa; Daniel H. J. Wigboldusa; Skyler T. Hawkb; Ad van
+Knippenberga
+Radboud University Nijmegen, Nijmegen, The Netherlands b University of Amsterdam, Amsterdam,
+The Netherlands
+Online publication date: 22 November 2010
+To cite this Article Langner, Oliver , Dotsch, Ron , Bijlstra, Gijsbert , Wigboldus, Daniel H. J. , Hawk, Skyler T. and van
+Knippenberg, Ad(2010) 'Presentation and validation of the Radboud Faces Database', Cognition & Emotion, 24: 8, 1377 —
+To link to this Article: DOI: 10.1080/02699930903485076
+URL: http://dx.doi.org/10.1080/02699930903485076
+PLEASE SCROLL DOWN FOR ARTICLE"
+370e0d9b89518a6b317a9f54f18d5398895a7046,Cross-pollination of normalisation techniques from speaker to face authentication using Gaussian mixture models,"IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY, VOL. X, NO. X, XXXXXXX 20XX
+Cross-pollination of normalisation techniques
+from speaker to face authentication
+using Gaussian mixture models
+Roy Wallace, Member, IEEE, Mitchell McLaren, Member, IEEE, Christopher McCool, Member, IEEE,
+nd S´ebastien Marcel, Member, IEEE"
+372bc106c61e7eb004835e85bbfee997409f176a,Coupled Generative Adversarial Networks,"Coupled Generative Adversarial Networks
+Mitsubishi Electric Research Labs (MERL),
+Mitsubishi Electric Research Labs (MERL),
+Ming-Yu Liu
+Oncel Tuzel"
+37838a832838ff3211b358bc51ba5105b9d82e89,The Complete Gabor-Fisher Classifier for Robust Face Recognition,"EURASIP JOURNAL ON ADVANCES IS SIGNAL PROCESSING
+The Complete Gabor-Fisher Classifier for Robust
+Face Recognition
+Vitomir ˇStruc and Nikola Paveˇsi´c"
+37381718559f767fc496cc34ceb98ff18bc7d3e1,Harnessing Synthesized Abstraction Images to Improve Facial Attribute Recognition,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+372bf2716c53e353be6c3f027493f1a40edb6640,MINE: Mutual Information Neural Estimation,"Mutual Information Neural Estimation
+Mohamed Ishmael Belghazi 1 Aristide Baratin 1 2 Sai Rajeswar 1 Sherjil Ozair 1 Yoshua Bengio 1 3 4
+Aaron Courville 1 3 R Devon Hjelm 1 4"
+3773e5d195f796b0b7df1fca6e0d1466ad84b5e7,UNIVERSITY OF CALIFORNIA RIVERSIDE Learning from Time Series in the Presence of Noise: Unsupervised and Semi-Supervised Approaches,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Learning from Time Series in the Presence of Noise: Unsupervised and Semi-Supervised
+Approaches
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Computer Science
+Dragomir Dimitrov Yankov
+March 2008
+Dissertation Committee:
+Dr. Eamonn Keogh, Chairperson
+Dr. Stefano Lonardi
+Dr. Vassilis Tsotras"
+37eb666b7eb225ffdafc6f318639bea7f0ba9a24,"Age, Gender and Race Estimation from Unconstrained Face Images","MSU Technical Report (2014): MSU-CSE-14-5
+Age, Gender and Race Estimation from
+Unconstrained Face Images
+Hu Han, Member, IEEE and Anil K. Jain, Fellow, IEEE"
+375993fd5f94c7b02169ff0d71a74d1b84262dfc,Parallel Application Library for Object Recognition,"Parallel Application Library for Object Recognition
+Bor-Yiing Su
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-199
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-199.html
+September 27, 2012"
+375435fb0da220a65ac9e82275a880e1b9f0a557,From Pixels to Response Maps: Discriminative Image Filtering for Face Alignment in the Wild,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+From Pixels to Response Maps: Discriminative Image
+Filtering for Face Alignment in the Wild
+Akshay Asthana, Stefanos Zafeiriou, Georgios Tzimiropou-
+los, Shiyang Cheng and Maja Pantic"
+37a23e76674e606ce779131d2c93496e8a53bb2f,The discrete cosine transform (DCT) plus local normalization: a novel two-stage method for de-illumination in face recognition,"Optica Applicata, Vol. XLI, No. 4, 2011
+The discrete cosine transform (DCT)
+plus local normalization:
+novel two-stage method
+for de-illumination in face recognition
+MINGHUA ZHAO*, YINGHUI WANG, ZHENGHAO SHI, JIULONG ZHANG
+School of Computer Science and Engineering, Xi’an University of Technology, Xi’an710048, China
+*Corresponding author:
+To deal with illumination variations in face recognition, a novel two-stage illumination
+normalization method is proposed in this paper. Firstly, a discrete cosine transform (DCT) is used
+on the original images in logarithm domain. DC coefficient is set based on the average pixel value
+of all the within-class training samples and some low frequency AC coefficients are set to zero to
+eliminate illumination variations in large areas. Secondly, local normalization method, which can
+minimize illumination variations in small areas, is used on the inverse DCT images. This makes
+the pixel values on the processed images be close to or equal to that of the normal illumination
+ondition. Experimental results, both on Yale B database and Extended Yale B database, show
+that the proposed method can eliminate effect of illumination variations effectively and improve
+performance of face recognition methods significantly. The present method does not demand
+modeling step and can eliminate the effect of illumination variations before face recognition. In
+this way, it can be used as a preprocessing step for any existing face recognition method."
+3726b82007512a15a530fd1adad57af58a9abb62,Teaching Compositionality to CNNs,"Teaching Compositionality to CNNs∗
+Austin Stone
+Yi Liu
+Huayan Wang
+D. Scott Phoenix
+Michael Stark
+Dileep George
+Vicarious FPC, San Francisco, CA, USA
+{austin, huayan, michael, yi, scott,"
+37b6d6577541ed991435eaf899a2f82fdd72c790,Vision-based Human Gender Recognition: A Survey,"Vision-based Human Gender Recognition: A Survey
+Choon Boon Ng, Yong Haur Tay, Bok Min Goi
+Universiti Tunku Abdul Rahman, Kuala Lumpur, Malaysia."
+37b0357d2db89bc4560d4201c3c2478988c87640,Face Recognition Based on Curvelet Transform and LS-SVM,"ISBN 978-952-5726-02-2 (Print), 978-952-5726-03-9 (CD-ROM)
+Proceedings of the 2009 International Symposium on Information Processing (ISIP’09)
+Huangshan, P. R. China, August 21-23, 2009, pp. 140-143
+Face Recognition Based on Curvelet Transform
+nd LS-SVM
+School of Electronics, Jiangxi University of Finance and Economics, Nanchang, China
+Jianhong Xie
+long
+reduce
+singularities
+urves. To"
+37347e4c1b35196761fc1620e451738f880f0392,Exemplar-based human action pose correction and tagging,"Exemplar-Based Human Action Pose Correction and Tagging
+Wei Shen
+Ke Deng
+Xiang Bai
+Huazhong Univ. of Sci.&Tech.
+Microsoft Corporation
+Huazhong Univ. of Sci.&Tech.
+Tommer Leyvand
+Microsoft Corporation
+Baining Guo
+Zhuowen Tu
+Microsoft Research Asia
+Microsoft Research Asia & UCLA"
+3752dc15fada54abc0af866273d03a28f4dc8975,A Variational Framework for Pedestrian Segmentation in Cluttered Scenes Using Bag of Optical Flows and Shape Priors,"A VARIATIONAL FRAMEWORK FOR PEDESTRIAN
+SEGMENTATION IN CLUTTERED SCENES USING
+BAG OF OPTICAL FLOWS AND SHAPE PRIORS
+Gagan Bansal
+A thesis submitted to The Johns Hopkins University in conformity with the requirements
+for the degree of Master of Science.
+Baltimore, Maryland
+January, 2009
+(cid:176) Gagan Bansal 2009
+All rights reserved"
+375e478acf62eede1cc69693c54d81aa718df9e7,DFT domain Feature Extraction using Edge-based Scale Normalization for Enhanced Face Recognition,"Journal of Advanced Computer Science and Technology, 1 (3) (2012) 134-166
+(cid:13)Science Publishing Corporation
+www.sciencepubco.com/index.php/JACST
+DFT domain Feature Extraction using
+Edge-based Scale Normalization for
+Enhanced Face Recognition
+K Manikantan1,∗, S Ramachandran2,†
+Department of Electronics and Communication Engineering,
+M S Ramaiah Institute of Technology, Bangalore, Karnataka, India 560054
+Department of Electronics and Communication Engineering,
+S J B Institute of Technology, Bangalore, Karnataka, India 560060"
+372a8bf0ef757c08551d41e40cb7a485527b6cd7,Unsupervised Video Hashing by Exploiting Spatio-Temporal Feature,"Unsupervised Video Hashing by Exploiting
+Spatio-Temporal Feature
+Chao Ma, Yun Gu, Wei Liu, and Jie Yang(cid:63)
+Institute of Image Processing and Pattern Recognition, Shanghai Jiao Tong
+University, Shanghai, China."
+37c4541037b67e8f4c538b285efe80aa251a49b9,Tracking as a Whole: Multi-Target Tracking by Modeling Group Behavior With Sequential Detection,"Tracking as a Whole: Multi-Target Tracking
+y Modeling Group Behavior With
+Sequential Detection
+Yuan Yuan, Senior Member, IEEE, Yuwei Lu, and Qi Wang, Senior Member, IEEE"
+376b73334bd9aebed1fbb69c4ed3848ec0826b6c,Online non-rigid structure-from-motion based on a keyframe representation of history,"Online Non-rigid Structure-from-motion based on a
+keyframe representation of history
+Simon Donn´e, Ljubomir Jovanov, Bart Goossens, Wilfried Philips, Aleksandra Piˇzurica
+Department of Telecommunications and Information Processing (TELIN)
+{Simon.Donne, Ljubomir.Jovanov, Bart.Goossens, Wilfried.Philips,
+Ghent University
+Ghent, Belgium"
+370ed90971eca7ad84c67d8804f97e02ff6fd5b4,"The Socio-Moral Image Database (SMID): A novel stimulus set for the study of social, moral and affective processes","RESEARCH ARTICLE
+The Socio-Moral Image Database (SMID): A
+novel stimulus set for the study of social,
+moral and affective processes
+Damien L. Crone1*, Stefan Bode1, Carsten Murawski2, Simon M. Laham1
+Melbourne School of Psychological Sciences, University of Melbourne, Melbourne, Australia,
+Department of Finance, University of Melbourne, Melbourne, Australia"
+370277791a0708b7c93deb21da172e025b558643,"Fusing LIDAR, camera and semantic information: A context-based approach for pedestrian detection","Fusing LIDAR, camera and semantic information:
+context-based approach for pedestrian detection
+Cristiano Premebida and Urbano Nunes
+The final version is available at: http://ijr.sagepub.com/content/32/3.toc
+This is a pre-print version."
+370b5757a5379b15e30d619e4d3fb9e8e13f3256,Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments,"Labeled Faces in the Wild: A Database for Studying
+Face Recognition in Unconstrained Environments
+Gary B. Huang, Manu Ramesh, Tamara Berg, and Erik Learned-Miller"
+08aedeb74dda306a14c699ffcef4f434a60f34e8,3 D Spatial Layout and Geometric Constraints for Scene Understanding by Varsha Chandrashekhar,(cid:13) 2011 Varsha Chandrashekhar Hedau
+08d2f655361335bdd6c1c901642981e650dff5ec,Automatic Cast Listing in Feature-Length Films with Anisotropic Manifold Space,"This is the published version:
+Arandjelovic, Ognjen and Cipolla, R. 2006, Automatic cast listing in feature‐length films with
+Anisotropic Manifold Space, in CVPR 2006 : Proceedings of the Computer Vision and Pattern
+Recognition Conference 2006, IEEE, Piscataway, New Jersey, pp. 1513‐1520.
+http://hdl.handle.net/10536/DRO/DU:30058435
+Reproduced with the kind permission of the copyright owner.
+Copyright : 2006, IEEE
+Available from Deakin Research Online:"
+085fce160b0fa279597bf23b518c56c735d9e7ff,Joint detection and recognition of human actions in wireless surveillance camera networks,"Joint Detection and Recognition of Human Actions in Wireless
+Surveillance Camera Networks
+Nikhil Naikal1, Pedram Lajevardi2 and Shankar. S. Sastry1"
+08fbe3187f31b828a38811cc8dc7ca17933b91e9,Statistical Computations on Grassmann and Stiefel Manifolds for Image and Video-Based Recognition,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Statistical Computations on Grassmann and
+Stiefel Manifolds for Image and Video-Based
+Recognition
+Turaga, P.; Veeraraghavan, A.; Srivastava, A.; Chellappa, R.
+TR2011-084 April 2011"
+08ae100805d7406bf56226e9c3c218d3f9774d19,Predicting the Sixteen Personality Factors (16PF) of an individual by analyzing facial features,"Gavrilescu and Vizireanu EURASIP Journal on Image and Video Processing (2017) 2017:59
+DOI 10.1186/s13640-017-0211-4
+EURASIP Journal on Image
+nd Video Processing
+R ES EAR CH
+Predicting the Sixteen Personality Factors
+(16PF) of an individual by analyzing facial
+features
+Mihai Gavrilescu* and Nicolae Vizireanu
+Open Access"
+08c6943a17f267ef27316cff9248b3036a7059f3,We are not contortionists: Coupled adaptive learning for head and body orientation estimation in surveillance video,"We are not Contortionists: Coupled Adaptive Learning
+for Head and Body Orientation Estimation in Surveillance Video
+Cheng Chen
+Jean-Marc Odobez
+Idiap Research Institute – CH-1920, Martigny, Switzerland
+(cid:3)"
+08c18b2f57c8e6a3bfe462e599a6e1ce03005876,A Least-Squares Framework for Component Analysis,"A Least-Squares Framework
+for Component Analysis
+Fernando De la Torre Member, IEEE,"
+08ff81f3f00f8f68b8abd910248b25a126a4dfa4,Symmetric Subspace Learning for Image Analysis,"Papachristou, K., Tefas, A., & Pitas, I. (2014). Symmetric Subspace Learning
+5697. DOI: 10.1109/TIP.2014.2367321
+Peer reviewed version
+Link to published version (if available):
+0.1109/TIP.2014.2367321
+Link to publication record in Explore Bristol Research
+PDF-document
+This is the author accepted manuscript (AAM). The final published version (version of record) is available online
+via Institute of Electrical and Electronic Engineers at http://dx.doi.org/10.1109/TIP.2014.2367321. Please refer to
+ny applicable terms of use of the publisher.
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms"
+08d625158727bd97ba6fc58992158ee55a53011c,HCLAE: High Capacity Locally Aggregating Encodings for Approximate Nearest Neighbor Search,"HCLAE: High Capacity Locally Aggregating Encodings for Approximate Nearest
+Neighbor Search
+{artheru, yz sjr, Shanghai Jiaotong University
+Liu Shicong, Shao Junru, Lu Hongtao"
+08f46d6a91e513edd57a0ef15d5367b5d0545c1b,"How do targets, nontargets, and scene context influence real-world object detection?","Atten Percept Psychophys
+DOI 10.3758/s13414-017-1359-9
+How do targets, nontargets, and scene context influence
+real-world object detection?
+Harish Katti 1
+& Marius V. Peelen 2 & S. P. Arun 1
+# The Psychonomic Society, Inc. 2017"
+0888b6904ef12bc7a3c59fa59c4051d5002de80f,Learning with Shared Information for Image and Video Analysis,"DEPARTMENT OF INFORMATION ENGINEERING AND COMPUTER SCIENCE
+ICT International Doctoral School
+LEARNING WITH SHARED INFORMATION FOR IMAGE
+AND VIDEO ANALYSIS
+Gaowen Liu
+Advisor
+Prof. Nicu Sebe
+Universit`a degli Studi di Trento"
+0834dff6e1d37ecb36137e019f8e2c933d5e74f6,Building Part-Based Object Detectors via 3D Geometry,"BUILDING PART-BASED OBJECT DETECTORS VIA 3D GEOMETRY
+Experimental Results
+Qualitative Results
+Input Image
+DPM Detection
+Test Set: NYU v2 RGB Images
+gDPM Detection Predicted Geometry
+Bed gDPM Model 3
+Sofa gDPM Model 3
+Table gDPM Model 3
+Discriminative Part-based Models
+Supervised Parts
+Unsupervised Parts
+Key-point/part annotation, e.g.,
+Heuristic initialization, e.g., gradient
+natomical.
+magnitudes.
+. Overview
+. Overview
+As input to the system, at training, we use RGB images"
+0816cbac9ea8f4425d9b57fd46174cb35cd5d7cc,People tracking in RGB-D data with on-line boosted target models,"People Tracking in RGB-D Data
+With On-line Boosted Target Models
+Matthias Luber
+Luciano Spinello
+Kai O. Arras"
+0856622ce2fcc4e39fd396427abae90cddf78fd0,Abnormal activation of the social brain during face perception in autism.,"Abnormal Activation of the Social Brain During
+Face Perception in Autism
+Nouchine Hadjikhani,1,2* Robert M. Joseph,3 Josh Snyder,1
+nd Helen Tager-Flusberg3
+Athinoula A. Martinos Center for Biomedical Imaging, Massachusetts General Hospital,
+Division of Health Sciences and Technology, Harvard-Massachusetts Institute of Technology,
+Harvard Medical School, Charlestown, Massachusetts
+Department of Anatomy and Neurobiology, Boston University School of Medicine, Boston,
+Cambridge, Massachusetts
+Massachusetts"
+083a2bc86e0984968b06593ba06654277b252f00,Neural evidence for the contribution of holistic processing but not attention allocation to the other-race effect on face memory.,"Cognitive, Affective, & Behavioral Neuroscience (2018) 18:1015–1033
+https://doi.org/10.3758/s13415-018-0619-z
+Neural evidence for the contribution of holistic processing but not
+ttention allocation to the other-race effect on face memory
+Grit Herzmann 1 & Greta Minor 1 & Tim Curran 2
+Published online: 25 June 2018
+# Psychonomic Society, Inc. 2018"
+085ca7f8935808986ae1c6afbbb62f6804049f26,Monocular 3D human pose estimation by classification,"Universit¨at Augsburg
+Monocular 3D Human Pose Estimation
+y Classification
+T. Greif, D. Sengupta, R. Lienhart
+Report 2011-09
+M¨arz 2011
+Institut f¨ur Informatik
+D-86135 Augsburg"
+0875af310ab8c850b3232b3f6b84535ffff84e5d,A Novel Technique to Detect Faces in a Group Photo,"International Journal of Computer Applications (0975 – 8887)
+Volume 54– No.1, September 2012
+A Novel Technique to Detect Faces in a Group Photo
+Saravanan Chandran
+Assistant Professor, National Institute of Technology, Durgapur, West Bengal, India."
+081093b0b3195e3f6bfa283b49fee26b606d4f67,Object Co-detection,"Object Co-detection
+Sid Yingze Bao, Yu Xiang, Silvio Savarese
+University of Michigan at Ann Arbor, USA
+{yingze, yuxiang,"
+08bbb59036c4b85a2418f9702ccd37929c5dd154,Understanding and Predicting the Memorability of Natural Scene Images,"Understanding and Predicting the Memorability of
+Natural Scene Images
+Jiaxin Lu, Mai Xu, Senior Member, IEEE, Ren Yang and Zulin Wang"
+08bdb84d5c66265b3b6d33e8f95c4cc27caf33ad,Detecting Visual Relationships Using Box Attention,"Detecting Visual Relationships Using Box Attention
+Alexander Kolesnikov∗
+Google AI
+Christoph H. Lampert
+IST Austria
+Vittorio Ferrari
+Google AI"
+084bd219dd239dc4c9a02621a5333d3bc1446566,DeepTrack: Learning Discriminative Feature Representations Online for Robust Visual Tracking,"DeepTrack: Learning Discriminative Feature
+Representations Online for Robust Visual Tracking
+Hanxi Li, Yi Li, Fatih Porikli"
+0861f86fb65aa915fbfbe918b28aabf31ffba364,An Efficient Facial Annotation with Machine Learning Approach,"International Journal of Computer Trends and Technology (IJCTT) – volume 22 Number 3–April 2015
+An Efficient Facial Annotation with Machine Learning Approach
+A.Anusha,2R.Srinivas
+Final M.Tech Student, 2Associate Professor
+,2Dept of CSE ,Aditya Institute of Technology And Management, Tekkali, Srikakulam , Andhra Pradesh"
+082a8642455b9a5cfb27c07cf9969106f8a7bf3c,Face recognition is similarly affected by viewpoint in school-aged children and adults,"Face recognition is similarly affected by
+viewpoint in school-aged children and
+dults
+Marisa Nordt and Sarah Weigelt
+Department of Developmental Neuropsychology, Institute of Psychology, Ruhr-Universität Bochum,
+Bochum, Germany"
+08b76e6923eea74ab0ed149811b3144fa21c7c73,Scalable Laplacian K-modes,"Scalable Laplacian K-modes
+Imtiaz Masud Ziko ∗
+ÉTS Montreal
+Eric Granger
+ÉTS Montreal
+Ismail Ben Ayed
+ÉTS Montreal"
+08809165154c9c557d368cddfa3ae66ccaceaed9,Taming VAEs,"Taming VAEs
+Danilo J. Rezende ∗
+Fabio Viola ∗
+{danilor,
+DeepMind, London, UK"
+080c204edff49bf85b335d3d416c5e734a861151,CLAD: A Complex and Long Activities Dataset with Rich Crowdsourced Annotations,"CLAD: A Complex and Long Activities
+Dataset with Rich Crowdsourced
+Annotations
+Jawad Tayyub1, Majd Hawasly2∗, David C. Hogg1 and Anthony G. Cohn1
+Journal Title
+XX(X):1–6
+(cid:13)The Author(s) 2016
+Reprints and permission:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/ToBeAssigned
+www.sagepub.com/"
+08f4832507259ded9700de81f5fd462caf0d5be8,Geometric Approach for Human Emotion Recognition using Facial Expression,"International Journal of Computer Applications (0975 – 8887)
+Volume 118 – No.14, May 2015
+Geometric Approach for Human Emotion
+Recognition using Facial Expression
+S. S. Bavkar
+Assistant Professor
+VPCOE Baramati
+J. S. Rangole
+Assistant Professor
+VPCOE Baramati
+V. U. Deshmukh
+Assistant Professor
+VPCOE Baramati"
+081d6ac51bbb7df142e3db6649fb5d663e90d569,Generalized zero-shot learning for action recognition with web-scale video data,"Noname manuscript No.
+(will be inserted by the editor)
+Generalized Zero-Shot Learning for Action
+Recognition with Web-Scale Video Data
+Kun Liu · Wu Liu · Huadong Ma ·
+Wenbing Huang · Xiongxiong Dong
+Received: date / Accepted: date"
+082d339e29b1b1a9a800a1d72b401f69b6a157c5,Webly Supervised Joint Embedding for Cross-Modal Image-Text Retrieval,"Webly Supervised Joint Embedding for Cross-Modal
+Image-Text Retrieval
+Niluthpol Chowdhury Mithun
+University of California, Riverside, CA
+Evangelos E. Papalexakis
+University of California, Riverside, CA
+Rameswar Panda
+University of California, Riverside, CA
+Amit K. Roy-Chowdhury
+University of California, Riverside, CA"
+08d40ee6e1c0060d3b706b6b627e03d4b123377a,Towards Weakly-Supervised Action Localization,"Human Action Localization
+with Sparse Spatial Supervision
+Philippe Weinzaepfel, Xavier Martin, and Cordelia Schmid, Fellow, IEEE"
+08030f9d34cc96384f672d9f9f296914d594335b,Multiple Object Tracking: A Literature Review,"Multiple Object Tracking: A Literature Review
+Wenhan Luo, Junliang Xing, Anton Milan, Xiaoqin Zhang, Wei Liu, Xiaowei Zhao and Tae-Kyun Kim"
+085ba9f82e15603f1fe2a29dfa0182d46465a591,Face Recognition In Presence Of Occlusion Using Machine Learning Classifier,"International Journal of Science, Engineering and Technology Research (IJSETR), Volume 3, Issue 4, April 2014
+Face Recognition In Presence Of Occlusion
+Using Machine Learning Classifier
+Vandana P, Manjunath C N
+chieve"
+088aabe3da627432fdccf5077969e3f6402f0a80,Classifier-to-generator Attack: Estimation,"Under review as a conference paper at ICLR 2018
+CLASSIFIER-TO-GENERATOR ATTACK: ESTIMATION
+OF TRAINING DATA DISTRIBUTION FROM CLASSIFIER
+Anonymous authors
+Paper under double-blind review"
+084f1a6c62a3464b1a9b745fee40af2895920301,Capitalize on dimensionality increasing techniques for improving face recognition grand challenge performance,"Capitalize on Dimensionality Increasing
+Techniques for Improving Face Recognition
+Grand Challenge Performance
+Chengjun Liu"
+08903bf161a1e8dec29250a752ce9e2a508a711c,Joint Dimensionality Reduction and Metric Learning: A Geometric Take,"Joint Dimensionality Reduction and Metric Learning: A Geometric Take
+Mehrtash Harandi 1 2 Mathieu Salzmann 3 Richard Hartley 2 1"
+08847df8ea5b22c6a2d6d75352ef6270f53611de,Using k-Poselets for Detecting People and Localizing Their Keypoints,"Using k-poselets for detecting people and localizing their keypoints
+Georgia Gkioxari∗, Bharath Hariharan∗, Ross Girshick and Jitendra Malik
+University of California, Berkeley - Berkeley, CA 94720"
+08e24f9df3d55364290d626b23f3d42b4772efb6,Enhancing facial expression classification by information fusion,"ENHANCING FACIAL EXPRESSION CLASSIFICATION BY INFORMATION
+FUSION
+I. Buciu1, Z. Hammal 2, A. Caplier2, N. Nikolaidis 1, and I. Pitas 1
+AUTH/Department of Informatics/ Aristotle University of Thessaloniki
+phone: + 30(2310)99.6361, fax: + 30(2310)99.8453, email:
+GR-54124, Thessaloniki, Box 451, Greece
+Laboratoire des Images et des Signaux / Institut National Polytechnique de Grenoble
+phone: + 33(0476)574363, fax: + 33(0476)57 47 90, email:
+web: http://www.aiia.csd.auth.gr
+8031 Grenoble, France
+web: http://www.lis.inpg.fr"
+08ff22f76a567fcbc1afec6bfbf957a560cfadc7,Exploring Person Context and Local Scene Context for Object Detection,"Exploring Person Context and Local Scene Context for Object Detection
+Saurabh Gupta∗
+UC Berkeley
+Bharath Hariharan∗
+Facebook AI Research
+Jitendra Malik
+UC Berkeley"
+08b0664fd37cd434201a1b37c20c0919833a6ff1,Online Multi-Object Tracking with Historical Appearance Matching and Scene Adaptive Detection Filtering,"Online Multi-Object Tracking with Historical Appearance Matching and
+Scene Adaptive Detection Filtering
+Young-chul Yoon Abhijeet Boragule Young-min Song Kwangjin Yoon Moongu Jeon
+Gwangju Institute of Science and Technology
+23 Cheomdangwagi-ro, Buk-gu, Gwangju, 61005, South Korea
+{zerometal9268, abhijeet, sym, yoon28,
+(cid:11)(cid:36)(cid:57)(cid:54)(cid:54)(cid:3)(cid:21)(cid:19)(cid:20)(cid:27)(cid:12)
+(cid:20)(cid:17)(cid:3)(cid:44)(cid:81)(cid:87)(cid:85)(cid:82)(cid:71)(cid:88)(cid:70)(cid:87)(cid:76)(cid:82)(cid:81)(cid:3)(cid:11)(cid:87)(cid:72)(cid:80)(cid:83)(cid:82)(cid:85)(cid:68)(cid:79)(cid:3)(cid:72)(cid:85)(cid:85)(cid:82)(cid:85)(cid:86)(cid:3)(cid:71)(cid:88)(cid:85)(cid:76)(cid:81)(cid:74)(cid:3)(cid:87)(cid:85)(cid:68)(cid:70)(cid:78)(cid:76)(cid:81)(cid:74)(cid:12)"
+08ca2a2a543ee74e2bd6585e0a059b30aae65d30,Semantic Video Segmentation with Using Ensemble of Particular Classifiers and a Deep Neural Network for Systems of Detecting Abnormal Situations,"IT in Industry, vol. 6, 2018 Published online 09-Feb-2018
+Semantic Video Segmentation with Using Ensemble
+of Particular Classifiers and a Deep Neural Network
+for Systems of Detecting Abnormal Situations
+O. Amosov, Y. Ivanov, S. Zhiganov
+Department of Industrial Electronics
+Komsomolsk-on-Amur State Technical University
+Komsomolsk-on-Amur, Russia"
+0874a262c2ec7082658cbfc55892ec6e5ca6a374,CaTDet: Cascaded Tracked Detector for Efficient Object Detection from Video,"CATDET: CASCADED TRACKED DETECTOR FOR EFFICIENT OBJECT
+DETECTION FROM VIDEO
+Huizi Mao 1 Taeyoung Kong 1 William J. Dally 1 2"
+0857281a3b6a5faba1405e2c11f4e17191d3824d,Face recognition via edge-based Gabor feature representation for plastic surgery-altered images,"Chude-Olisah et al. EURASIP Journal on Advances in Signal Processing 2014, 2014:102
+http://asp.eurasipjournals.com/content/2014/1/102
+R ES EAR CH
+Face recognition via edge-based Gabor feature
+representation for plastic surgery-altered images
+Chollette C Chude-Olisah1*, Ghazali Sulong1, Uche A K Chude-Okonkwo2 and Siti Z M Hashim1
+Open Access"
+08b70ab782141a2d7003226a0f438a6aea0a0d46,Parametrizing Fully Convolutional Nets,"Under review as a conference paper at ICLR 2019
+PARAMETRIZING FULLY CONVOLUTIONAL NETS
+WITH A SINGLE HIGH-ORDER TENSOR
+Anonymous authors
+Paper under double-blind review"
+081456e22734a2cdef442345f80182e84d1c6124,Approaches for Multi-Class Discriminant Analysis for Ranking Principal Components,"Approaches for Multi-Class Discriminant Analysis
+for Ranking Principal Components
+Tiene Andre Filisbino
+Laborat´orio Nacional
+Gilson Antonio Giraldi
+Laborat´orio Nacional
+Carlos Eduardo Thomaz
+Departamento de Engenharia El´etrica
+de Computac¸˜ao Cient´ıfica - LNCC
+de Computac¸˜ao Cient´ıfica - LNCC
+Centro Universit´ario da FEI
+Petr´opolis, RJ 25651-075
+Email:
+Petr´opolis, RJ 25651-075
+Email:
+S˜ao Bernardo do Campo, SP 09850-901
+Email:"
+08f00e5adaba03628144dbc97daefa8ceb6e5322,Machine Vision based Fruit Classification and Grading-A Review,"International Journal of Computer Applications (0975 – 8887)
+Volume 170 – No.9, July 2017
+Machine Vision based Fruit Classification and
+Grading - A Review
+Sapan Naik
+Babu Madhav Institute of Information Technology
+Uka Tarsadia University,
+Bardoli, Surat, Gujarat, India."
+08ff3e9f5ad47e59592ad993348b817003b9c0e4,A Sequential Classifier for Hand Detection in the Framework of Egocentric Vision,"A Sequential Classifier for Hand Detection in the Framework of Egocentric Vision
+Alejandro Betancourt1,2
+Miriam M. L´opez1
+Carlo S. Regazzoni1
+Matthias Rauterberg2
+Department of Naval, Electric, Electronic and Telecommunications Engineering - University of Genoa, Italy
+Designed Intelligence Group, Department of Industrial Design - Eindhoven University of Technology, The Netherlands"
+08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7,Understanding Kin Relationships in a Photo,"Understanding Kin Relationships in a Photo
+Siyu Xia, Ming Shao, Student Member, IEEE, Jiebo Luo, Fellow, IEEE, and Yun Fu, Senior Member, IEEE"
+6d88fb85fe5c61bd65e0a373cd39fac81a19596a,DC-Image for Real Time Compressed Video Matching,"DC-Image for Real Time Compressed
+Video Matching
+Saddam Bekhet, Amr Ahmed and Andrew Hunter"
+6d96bf377c96e1dd9b43e9f12e0ee2a66543edbe,Viewpoint invariant 3D landmark model inference from monocular 2D images using higher-order priors,"011 IEEE International Conference on Computer Vision
+978-1-4577-1102-2/11/$26.00 c(cid:13)2011 IEEE"
+6dd052df6b0e89d394192f7f2af4a3e3b8f89875,A literature survey on Facial Expression Recognition using Global Features,"International Journal of Engineering and Advanced Technology (IJEAT)
+ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+A literature survey on Facial Expression
+Recognition using Global Features
+Vaibhavkumar J. Mistry, Mahesh M. Goyani"
+6d84d92d9ed6c226f0cc6401bc425a23432c9f96,Autism spectrum disorders: clinical and research frontiers.,"Downloaded from
+dc.bmj.com
+on 22 May 2008
+Autism spectrum disorders: clinical and research
+frontiers
+E B Caronna, J M Milunsky and H Tager-Flusberg
+Arch. Dis. Child.
+doi:10.1136/adc.2006.115337
+2008;93;518-523; originally published online 27 Feb 2008;
+Updated information and services can be found at:
+http://adc.bmj.com/cgi/content/full/93/6/518
+These include:
+References
+This article cites 70 articles, 25 of which can be accessed free at:
+http://adc.bmj.com/cgi/content/full/93/6/518#BIBL
+Rapid responses
+You can respond to this article at:
+http://adc.bmj.com/cgi/eletter-submit/93/6/518
+Email alerting
+service"
+6dd5dbb6735846b214be72983e323726ef77c7a9,A Survey on Newer Prospective Biometric Authentication Modalities,"Josai Mathematical Monographs
+vol. 7 (2014), pp. 25-40
+A Survey on Newer Prospective
+Biometric Authentication Modalities
+Narishige Abe, Takashi Shinzaki"
+6d10beb027fd7213dd4bccf2427e223662e20b7d,User Adaptive and Context-Aware Smart Home Using Pervasive and Semantic Technologies,"Publishing CorporationJournal of Electrical and Computer EngineeringVolume 2016, Article ID 4789803, 20 pageshttp://dx.doi.org/10.1155/2016/4789803"
+6d500b0c342c1cf23efff049ef121bcf5e606ea1,Real-Time Category-Based and General Obstacle Detection for Autonomous Driving,"Real-time category-based and general obstacle detection for autonomous driving
+Noa Garnett
+Uri Verner
+Ariel Ayash
+Shai Silberstein
+Vlad Goldner
+Shaul Oron
+Rafi Cohen
+Ethan Fetaya
+Kobi Horn
+Dan Levi
+Advanced Technical Center Israel, General Motors R&D
+Hamada 7, Herzlyia, Israel"
+6dd007b6e518a3aa96111028c4664f2647e5e81a,3D Face Synthesis Driven by Personality Impression,"D Face Synthesis Driven by Personality Impression
+Yining Lang1 Wei Liang1 Yujia Wang1 Lap-Fai Yu2
+Beijing Institute of Technology
+University of Massachusetts Boston"
+6d6bb981bc8470de23e30890bd96a76ffd2b7ced,The Eyes Are the Windows to the Mind: Direct Eye Gaze Triggers the Ascription of Others' Minds.,"669124 PSPXXX10.1177/0146167216669124Personality and Social Psychology BulletinKhalid et al.
+research-article2016
+Article
+The Eyes Are the Windows to
+the Mind: Direct Eye Gaze Triggers
+the Ascription of Others’ Minds
+Saara Khalid1, Jason C. Deska1, and Kurt Hugenberg1
+Personality and Social
+Psychology Bulletin
+016, Vol. 42(12) 1666 –1677
+© 2016 by the Society for Personality
+nd Social Psychology, Inc
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0146167216669124
+pspb.sagepub.com"
+6d432962055a8c521e6b388d5a0a2140a0019a5e,Sensor network reconfiguration and big multimedia data fusion for situational awareness in smart environments,"Sensor network reconfiguration and big multimedia data fusion for situational
+wareness in smart environments
+Z. Akhtar, C. Drioli, M. Farinosi, G. Ferrin, G.L. Foresti, N. Martinel, C. Micheloni, C. Piciarelli, D.
+Salvati, L. Snidaro and M. Vernier
+AVIRES Lab - Department of Mathematics and Computer Science, Università degli Studi di Udine
+Via delle Scienze, 206, 33100 Udine - Italy
+last years, an
+INTRODUCTION
+increasing number of
+environments have been enhanced with smart
+sensors and have become more and more smart and
+self-organizing [1]. Situational awareness (SA) in
+these wide areas covers a huge range of topics and
+hallenges [2]. As matter of fact, understanding
+ctivities
+for situation assessment cannot be
+chieved locally but it requires to widen as much as
+possible the monitored area. Several different and
+new problems must be investigated from the use of
+single sensors able to adapt internal or external"
+6dddf1440617bf7acda40d4d75c7fb4bf9517dbb,"Beyond Counting: Comparisons of Density Maps for Crowd Analysis Tasks - Counting, Detection, and Tracking","JOURNAL OF LATEX CLASS FILES, VOL. XX, NO. X, MM YY
+Beyond Counting: Comparisons of Density Maps for Crowd
+Analysis Tasks - Counting, Detection, and Tracking
+Di Kang, Zheng Ma, Member, IEEE, Antoni B. Chan Senior Member, IEEE,"
+6d902439b736a7546dd8872b307fb760087ca629,SIFT Meets CNN: A Decade Survey of Instance Retrieval,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+SIFT Meets CNN:
+A Decade Survey of Instance Retrieval
+Liang Zheng, Yi Yang, and Qi Tian, Fellow, IEEE"
+6d5b0f6e5258d370f9af8a2cebf035fe61905db1,Gazefinder as a clinical supplementary tool for discriminating between autism spectrum disorder and typical development in male adolescents and adults,"Fujioka et al. Molecular Autism (2016) 7:19
+DOI 10.1186/s13229-016-0083-y
+Open Access
+R ES EAR CH
+Gazefinder as a clinical supplementary tool
+for discriminating between autism
+spectrum disorder and typical development
+in male adolescents and adults
+Toru Fujioka1,2,3, Keisuke Inohara1,4, Yuko Okamoto2,3, Yasuhiro Masuya1, Makoto Ishitobi1,5, Daisuke N. Saito2,3,6,
+Minyoung Jung2,3, Sumiyoshi Arai2,3, Yukiko Matsumura1, Takashi X. Fujisawa2,3, Kosuke Narita7, Katsuaki Suzuki3,8,9,
+Kenji J. Tsuchiya3,8,9, Norio Mori3,8,9, Taiichi Katayama3, Makoto Sato2,3,10,11, Toshio Munesue3,12,
+Hidehiko Okazawa2,3,6, Akemi Tomoda2,3, Yuji Wada1,2,3 and Hirotaka Kosaka1,2,3*"
+6d973fb5f682c491be94aa40a184a1707a8dc24a,Combining Multiple Image Segmentations by Maximizing Expert Agreement,"Combining Multiple Image Segmentations by
+Maximizing Expert Agreement
+Joni-Kristian Kamarainen, Lasse Lensu, and Tomi Kauppi
+Machine Vision and Pattern Recognition Laboratory
+Department of Information Technology
+Lappeenranta University of Technology
+P.O. Box 20, FI-53851 Lappeenranta, Finland
+http://www2.it.lut.fi/mvpr/"
+6d79999f8dc0cb9f86a87eaa2eb313a4eaeb2e5a,Instructions for use Title Bregman pooling : feature-space local pooling for imageclassification,"Title
+Bregman pooling : feature-space local pooling for image
+lassification
+Author(s)
+Najjar, Alameen; Ogawa, Takahiro; Haseyama, Miki
+Citation
+International Journal of Multimedia Information Retrieval
+Issue Date
+015-09-04
+Doc URL
+http://hdl.handle.net/2115/62753
+Right
+The final publication is available at link.springer.com
+rticle (author version)
+Additional
+Information
+Information BP.pdf
+Instructions for use
+Hokkaido University Collection of Scholarly and Academic Papers : HUSCAP"
+6da06fc70f32454f7841b153c582e65aed7047e9,Deep pipelined one-chip FPGA implementation of a real-time image-based human detection algorithm,"NAOSITE: Nagasaki University's Academic Output SITE
+Title
+Deep pipelined one-chip FPGA implementation of a real-time image-based
+human detection algorithm
+Author(s)
+Negi, Kazuhiro; Dohi, Keisuke; Shibata, Yuichiro; Oguri, Kiyoshi
+Citation
+011, Article number6132679; 2011
+Issue Date
+011-12
+Right
+http://hdl.handle.net/10069/29887
+© 2011 IEEE. Personal use of this material is permitted. Permission from
+IEEE must be obtained for all other uses, in any current or future media,
+including reprinting/republishing this material for advertising or
+promotional purposes, creating new collective works, for resale or
+redistribution to servers or lists, or reuse of any copyrighted component of
+this work in other works.
+This document is downloaded at: 2018-12-08T05:46:10Z
+http://naosite.lb.nagasaki-u.ac.jp"
+6dc17e91c0b02ff3b9e5c9283924279c28641db7,A Methodology for Extracting Standing Human Bodies from Single Images,"Invention Journal of Research Technology in Engineering & Management (IJRTEM) ISSN: 2455-3689
+www.ijrtem.com ǁ Volume 1 ǁ Issue 8 ǁ
+A Methodology for Extracting Standing Human Bodies from Single Images
+Dr. Y. Raghavender Rao1, N. Devadas Naik2
+Head ECE JNTUHCEJ Jagtityal
+Asst professor Sri Chaitanya engineering college"
+6d4b5444c45880517213a2fdcdb6f17064b3fa91,Harvesting Image Databases from The Web,"Journal of Information Engineering and Applications
+ISSN 2224-5782 (print) ISSN 2225-0506 (online)
+Vol 2, No.3, 2012
+www.iiste.org
+Harvesting Image Databases from The Web
+Snehal M. Gaikwad
+G.H.Raisoni College of Engg. & Mgmt.,Pune,India
+Snehal S. Pathare
+G.H.Raisoni College of Engg. & Mgmt.,Pune,India
+Trupti A. Jachak
+G.H.Raisoni College of Engg. & Mgmt.,Pune,India"
+6d8c9a1759e7204eacb4eeb06567ad0ef4229f93,"Face Alignment Robust to Pose, Expressions and Occlusions","Face Alignment Robust to Pose, Expressions and
+Occlusions
+Vishnu Naresh Boddeti†, Myung-Cheol Roh†, Jongju Shin, Takaharu Oguri, Takeo Kanade"
+6dd0597f8513dc100cd0bc1b493768cde45098a9,Learning to parse images of articulated bodies,"Learning to parse images of articulated bodies
+Deva Ramanan
+Toyota Technological Institute at Chicago
+Chicago, IL 60637"
+6db59b031406546682a773baed2caed529aaf37c,Inferring the semantics of direction signs in public places,"Inferring the Semantics of Direction Signs in Public Places
+J´erˆome Maye∗, Luciano Spinello∗†, Rudolph Triebel∗, and Roland Siegwart∗
+Autonomous Systems Lab, ETH Zurich, Switzerland
+email: {jerome.maye, rudolph.triebel,
+Social Robotics Lab, Department of Computer Science, University of Freiburg, Germany
+email:"
+6d2b633743178bd5aac1073b60d81ceb41933a4a,Carried Object Detection Based on an Ensemble of Contour Exemplars,"Carried Object Detection based on an Ensemble
+of Contour Exemplars
+Farnoosh Ghadiri1, Robert Bergevin1, Guillaume-Alexandre Bilodeau2
+LVSN-REPARTI, Universit(cid:19)e Laval
+LITIV lab., Polytechnique Montr(cid:19)eal"
+6dfa82f00ec6faee1db319c1e306ae779cfc1c36,"The Role of Methodology and Spatiotemporal Scale in Understanding Environmental Change in Peri-Urban Ouagadougou, Burkina Faso","Remote Sens. 2013, 5, 1465-1483; doi:10.3390/rs5031465
+OPEN ACCESS
+ISSN 2072-4292
+www.mdpi.com/journal/remotesensing
+Article
+The Role of Methodology and Spatiotemporal Scale in
+Understanding Environmental Change in Peri-Urban
+Ouagadougou, Burkina Faso
+Yonatan Kelder 1,*, Thomas Theis Nielsen 1 and Rasmus Fensholt 2
+Roskilde University, Universitetsvej 1, ENSPAC House 0.2, Roskilde 4000, Denmark;
+E-Mail:
+Copenhagen University, Institute for Geography and Geology, Øster Voldgade 10,
+Copenhagen K 1350, Denmark; E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +45-30-49-14-92.
+Received: 18 January 2013; in revised form: 24 February 2013 / Accepted: 15 March 2013 /
+Published: 19 March 2013"
+6d7ba173121edd5defadfde04f7c1e7bc72859c2,The study of autism as a distributed disorder.,"MENTAL RETARDATION AND DEVELOPMENTAL DISABILITIES
+RESEARCH REVIEWS 13: 85 – 95 (2007)
+THE STUDY OF AUTISM AS A
+DISTRIBUTED DISORDER
+Brain Development Imaging Laboratory, Department of Psychology, San Diego State University, San Diego, California
+Department of Cognitive Science, University of California, San Diego, California
+Ralph-Axel Mu¨ ller1,2*
+Past autism research has often been dedicated to tracing the
+auses of the disorder to a localized neurological abnormality, a single
+functional network, or a single cognitive-behavioral domain.
+In this
+review, I argue that autism is a ‘‘distributed disorder’’ on various levels of
+study (genetic, neuroanatomical, neurofunctional, behavioral). ‘‘Localizing’’
+models are therefore not promising. The large array of potential genetic
+risk factors suggests that multiple (or all) emerging functional brain net-
+works are affected during early development. This is supported by wide-
+spread growth abnormalities throughout the brain. Interactions during
+development between affected functional networks and atypical experi-
+ential effects (associated with atypical behavior) in children with autism
+further complicate the neurological bases of the disorder, resulting in"
+6dc3b8a5fdceaea4b32df8552cbb5a22ef83c197,Speech-Based Visual Question Answering,"Speech-Based Visual Question Answering
+Ted Zhang
+KU Leuven
+Dengxin Dai
+ETH Zurich
+Tinne Tuytelaars
+KU Leuven
+Marie-Francine Moens
+KU Leuven"
+6d6a106caef228b3eee1f5765740938a534db828,Density-based clustering: A ‘landscape view’ of multi-channel neural data for inference and dynamic complexity analysis,"RESEARCH ARTICLE
+Density-based clustering: A ‘landscape view’ of
+multi-channel neural data for inference and
+dynamic complexity analysis
+Gabriel Baglietto1,2*, Guido Gigante3,4, Paolo Del Giudice1,3
+INFN-Roma1, Italian National Institute for Nuclear Research (INFN), Rome, Italy, 2 IFLYSIB Instituto de
+Fı´sica de Lı´quidos y Sistemas Biolo´gicos (UNLP-CONICET), La Plata, Argentina, 3 Italian Institute of Health
+(ISS), Rome, Italy, 4 Mperience srl, Rome, Italy"
+6d618657fa5a584d805b562302fe1090957194ba,Human Facial Expression Recognition based on Principal Component Analysis and Artificial Neural Network,"Full Paper
+NNGT Int. J. of Artificial Intelligence , Vol. 1, July 2014
+Human Facial Expression Recognition based
+on Principal Component Analysis and
+Artificial Neural Network
+Laboratory of Automatic and Signals Annaba (LASA) , Department of electronics, Faculty of Engineering,
+Zermi.Narima, Ramdani.M, Saaidia.M
+Badji-Mokhtar University, P.O.Box 12, Annaba-23000, Algeria.
+E-Mail :"
+6d7dabc58f53c0233d6d593a8fee76d1c7f44033,Robust Observation Detection for Single Object Tracking: Deterministic and Probabilistic Patch-Based Approaches,"Sensors 2012, 12, 15638-15670; doi:10.3390/s121115638
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Robust Observation Detection for Single Object Tracking:
+Deterministic and Probabilistic Patch-Based Approaches
+Mohd Asyraf Zulkifley 1,*, David Rawlinson 2 and Bill Moran 2
+Department of Electrical, Electronic and Systems Engineering, Faculty of Engineering and Built
+Environment, Universiti Kebangsaan Malaysia, 43600 Bangi, Malaysia
+Department of Electrical and Electronic Engineering, The University of Melbourne, VIC 3010,
+Australia; E-Mails: (D.R.); (B.M.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +603-8921-6335.
+Received: 18 September 2012; in revised form: 5 November 2012 / Accepted: 5 November 2012 /
+Published: 12 November 2012
+the problems of blurring, moderate deformation,"
+6d66c98009018ac1512047e6bdfb525c35683b16,Face Recognition Based on Fitting a 3D Morphable Model,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 9, SEPTEMBER 2003
+Face Recognition Based on
+Fitting a 3D Morphable Model
+Volker Blanz and Thomas Vetter, Member, IEEE"
+0172867f4c712b33168d9da79c6d3859b198ed4c,Expression and illumination invariant preprocessing technique for Face Recognition,"Technique for Face Recognition
+Computer and System Engineering Department, Faculty of Engineering, Ain Shams University, Cairo, Egypt
+A. Abbas, M. I. Khalil, S. Abdel-Hay, H. M. Fahmy
+Expression and Illumination Invariant Preprocessing"
+013e9e0f712d8caa89dd0881ab8dcf90d687ba50,Face Recognition using LBP and LVQ Classifier,"Face Recognition using LBP and LVQ Classifier
+Abdul Quyoom
+Department of Computer Science Engineering
+Central University of Rajasthan
+Ajmer, Rajasthan, India
+each human"
+01b5d63b60bcc35aa8bead42ea52a517f879bfc9,Solving Uncalibrated Photometric Stereo Using Total Variation,"Noname manuscript No.
+(will be inserted by the editor)
+Solving Uncalibrated Photometric Stereo using Total
+Variation
+Yvain Qu´eau · Fran¸cois Lauze · Jean-Denis Durou
+the date of receipt and acceptance should be inserted later"
+0145dc4505041bf39efa70ea6d95cf392cfe7f19,Human action segmentation with hierarchical supervoxel consistency,"Human Action Segmentation with Hierarchical Supervoxel Consistency
+Jiasen Lu1, Ran Xu1 Jason J. Corso2
+Department of Computer Science and Engineering, SUNY at Buffalo. 2Department of EECS, University of Michigan.
+Detailed analysis of human action, such as classification, detection and lo-
+alization has received increasing attention from the community; datasets
+like J-HMDB [1] have made it plausible to conduct studies analyzing the
+impact that such deeper information has on the greater action understanding
+problem. However, detailed automatic segmentation of human action has
+omparatively been unexplored. In this paper, we introduce a hierarchical
+MRF model to automatically segment human action boundaries in videos
+“in-the-wild” (see Fig. 1).
+We first propose a human motion saliency representation which incor-
+porates two parts: foreground motion and human appearance information.
+For foreground motion estimation, we propose a new motion saliency fea-
+ture by using long-term trajectories to build a camera motion model, and
+then measure the motion saliency via the deviation from the camera model.
+For human appearance information, we use a DPM person detector trained
+on PASCAL VOC 2007 and construct a saliency map by averaging the nor-
+malized detection score of all the scale and all components.
+Then, to segment the human action, we start by applying hierarchical"
+01bef320b83ac4405b3fc5b1cff788c124109fb9,Translating Head Motion into Attention - Towards Processing of Student's Body-Language,"de Lausanne
+RLC D1 740, CH-1015
+Lausanne
+de Lausanne
+RLC D1 740, CH-1015
+Lausanne
+de Lausanne
+RLC D1 740, CH-1015
+Lausanne
+Translating Head Motion into Attention - Towards
+Processing of Student’s Body-Language
+Mirko Raca
+CHILI Laboratory
+Łukasz Kidzi´nski
+CHILI Laboratory
+Pierre Dillenbourg
+CHILI Laboratory
+École polytechnique fédérale
+École polytechnique fédérale
+École polytechnique fédérale"
+014844a9e6ae39a101fb79f103aa047699f88246,Interpretable Counting for Visual Question Answering,"Under review as a conference paper at ICLR 2018
+INTERPRETABLE COUNTING FOR VISUAL QUESTION
+ANSWERING
+Anonymous authors
+Paper under double-blind review"
+017229c2df23c542b30c59f4a5eeb747e3d34729,Efficient Object Recognition using Convolution Neural Networks Theorem,"International Journal of Computer Applications (0975 – 8887)
+Volume 161 – No 2, March 2017
+Efficient Object Recognition using Convolution Neural
+Networks Theorem
+Aarushi Thakral
+VIT University
+Vellore
+Tamil Nadu
+Shaurya Shekhar
+VIT University
+Vellore
+Tamil Nadu
+to overcome"
+0183eff3a60f44bc6e4bcade37518f6470af3437,Human Identification Using Temporal Information Preserving Gait Template,"Human Identification Using Temporal
+Information Preserving Gait Template
+Chen Wang, Junping Zhang, IEEE Member, Liang Wang, IEEE Senior Member,
+Jian Pu, and Xiaoru Yuan, IEEE Member"
+01c9f0be6a300f385274b72a5463a650e51e300a,Support Vector Data Description based on PCA features for face detection,"SUPPORT VECTOR DATA DESCRIPTION BASED ON PCA FEATURES FOR FACE
+DETECTION
+Ver´onica Vilaplana and Ferran Marqu´es
+phone: + (34)934011066, fax: + (34)934016447, email:
+Jordi Girona, 1-3, 08034 Barcelona, SPAIN
+Image Processing Group, Universitat Polit`ecnica de Catalunya
+web: gps-tsc.upc.es/imatge"
+01c8d7a3460422412fba04e7ee14c4f6cdff9ad7,Rule Based System for Recognizing Emotions Using Multimodal Approach,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 4, No. 7, 2013
+Rule Based System for Recognizing Emotions Using
+Multimodal Approach
+Preeti Khanna
+Information System
+SBM, SVKM’s NMIMS
+Mumbai, India"
+01d785bb989850019001a418a16202fd7502ac14,Hierarchical object detection and tracking with an Implicit Shape Model,"Hierarchical object detection and tracking with an Implicit Shape
+Model
+K. Jüngling1, S. Becker1, and M. Arens1
+Object Recognition, Fraunhofer IOSB, Ettlingen, Germany"
+01f5689a4010ae14ca444c36bec81f12ce528912,"Extended Fast Search Clustering Algorithm: Widely Density Clusters, No Density Peaks","EXTENDED FAST SEARCH CLUSTERING
+ALGORITHM: WIDELY DENSITY
+CLUSTERS, NO DENSITY PEAKS
+Zhang WenKai1 and Li Jing2
+,2School of Computer Science and Technology, University of Science and
+Technology of China, Hefei, 230026, China"
+0163d847307fae508d8f40ad193ee542c1e051b4,Classemes and Other Classifier-Based Features for Efficient Object Categorization,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+Classemes and Other Classifier-based
+Features for Efficient Object Categorization
+- Supplementary material -
+Alessandro Bergamo, and Lorenzo Torresani, Member, IEEE
+LOW-LEVEL FEATURES
+We extract the SIFT [1] features for our descriptor
+ccording to the following pipeline. We first convert
+each image to gray-scale, then we normalize the con-
+trast by forcing the 0.01% of lightest and darkest pixels
+to be mapped to white and black respectively, and
+linearly rescaling the values in between. All images
+exceeding 786,432 pixels of resolution are downsized
+to this maximum value while keeping the aspect ratio.
+The 128-dimensional SIFT descriptors are computed
+from the interest points returned by a DoG detec-
+tor [2]. We finally compute a Bag-Of-Word histogram
+of these descriptors, using a K-means vocabulary of
+500 words.
+CLASSEMES"
+01c4cf9c7c08f0ad3f386d88725da564f3c54679,Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors (TCAV),"Interpretability Beyond Feature Attribution:
+Quantitative Testing with Concept Activation Vectors (TCAV)
+Been Kim Martin Wattenberg Justin Gilmer Carrie Cai James Wexler
+Fernanda Viegas Rory Sayres"
+017ce398e1eb9f2eed82d0b22fb1c21d3bcf9637,Face Recognition with Harmonic De-lighting,"FACE RECOGNITION WITH HARMONIC DE-LIGHTING
+Laiyun Qing1,2, Shiguang Shan2, Wen Gao1,2
+ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+Graduate School, CAS, Beijing, China, 100080
+Emails: {lyqing, sgshan, wgao}jdl.ac.cn"
+014e1186209e4f942f3b5ba29b6b039c8e99ad88,Social interactions: A first-person perspective,"Social Interactions: A First-Person
+Perspective
+Alireza Fathi, Jessica K. Hodgins, James M. Rehg
+CVPR 2012
+Bora Çelikkale"
+0135747b4d3c9a2d983f7d0d9f4c39e094825149,Embedded wavelet-based face recognition under variable position,"Embedded wavelet-based face recognition under variable
+position
+Pascal Cotreta, Stéphane Chevobbea and Mehdi Darouicha
+CEA, LIST, Laboratoire Adéquation Algorithme Architecture, Gif-sur-Yvette, F-91191 France"
+014e3d0fa5248e6f4634dc237e2398160294edce,What does 2D geometric information really tell us about 3D face shape?,"Int J Comput Vis manuscript No.
+(will be inserted by the editor)
+What does 2D geometric information really tell us about
+D face shape?
+Anil Bas1 · William A. P. Smith1
+Received: date / Accepted: date"
+01ababc0985143ad57320b0599fb2f581d79d3c2,Unobtrusive Low Cost Pupil Size Measurements using Web cameras,"Unobtrusive Low Cost Pupil Size Measurements using Web cameras
+Sergios Petridis, Theodoros Giannakopoulos and Costantine D. Spyropoulos
+National Center for Scientific Research ""Demokritos""
+Unobtrusive every day health monitoring can be of important use for the elderly population. In
+particular, pupil size may be a valuable source of information, since, apart from pathological
+ases, it can reveal the emotional state, the fatigue and the ageing. To allow for unobtrusive
+monitoring to gain acceptance, one should seek for ef‌f‌icient methods of monitoring using com-
+mon low-cost hardware. This paper describes a method for monitoring pupil sizes using a
+ommon web camera in real time. Our method works by first detecting the face and the eyes
+rea. Subsequently, optimal iris and sclera location and radius, modelled as ellipses, are found
+using ef‌f‌icient filtering. Finally, the pupil center and radius is estimated by optimal filtering
+within the area of the iris. Experimental result show both the ef‌f‌iciency and the effectiveness
+of our approach.
+Keywords: video analysis, eye tracking, pupil size estimation, physiological measurements
+Motivation
+Unobtrusive every day health monitoring can be of im-
+portant use for the elderly population.
+In particular, pupil
+size may be a valuable source of information, since, apart
+from pathological cases, it can reveal the emotional state, the"
+016473c5b809ff55304a2923c36eaf58f02f02e4,DensePose: Dense Human Pose Estimation In The Wild,"DensePose: Dense Human Pose Estimation In The Wild
+Rıza Alp G¨uler∗
+Natalia Neverova
+Iasonas Kokkinos
+INRIA-CentraleSup´elec
+Facebook AI Research
+Facebook AI Research
+Figure 1: Dense pose estimation aims at mapping all human pixels of an RGB image to the 3D surface of the human body.
+spondences for 50K images, and train DensePose-RCNN to densely regress UV coordinates at multiple frames per second.
+Right: Partitioning and UV parametrization of the body surface."
+013ae78fc6bd26a13799fe2e07a6ad363aca9ba7,Inspiring Computer Vision System Solutions,"Inspiring Computer Vision System Solutions
+Julian Zilly 1 Amit Boyarski 2 Micael Carvalho 3 Amir Atapour Abarghouei 4 Konstantinos Amplianitis 5
+Aleksandr Krasnov 6 Massimiliano Mancini 7 Hernán Gonzalez 8 Riccardo Spezialetti 9
+Carlos Sampedro Pérez 10 Hao Li 11"
+0155c2921f060a95c0eca8c64bf62a1eaac591e4,Spatiotemporal CNNs for Pornography Detection in Videos,"Spatiotemporal CNNs for Pornography
+Detection in Videos
+Murilo Varges da Silva1,2 and Aparecido Nilceu Marana3
+UFSCar - Federal University of Sao Carlos, Sao Carlos, SP, Brazil
+IFSP - Federal Institute of Education of Sao Paulo, Birigui, SP, Brazil
+UNESP - Sao Paulo State University, Bauru, SP, Brazil"
+011e6146995d5d63c852bd776f782cc6f6e11b7b,Fast Training of Triplet-Based Deep Binary Embedding Networks,"Fast Training of Triplet-based Deep Binary Embedding Networks
+Bohan Zhuang, Guosheng Lin, Chunhua Shen∗, Ian Reid
+The University of Adelaide; and Australian Centre for Robotic Vision"
+01350214f850f43d72268df4f98b05901fbbe06c,1 Deep convolutional neural networks for detection of 2 polar mesocyclones from satellite mosaics 3,"Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 19 September 2018 doi:10.20944/preprints201809.0361.v1
+Article
+Deep convolutional neural networks for detection of
+polar mesocyclones from satellite mosaics
+Mikhail Krinitskiy 1,*, Polina Verezemskaya 1,2, Kirill Grashchenkov1,3, Natalia Tilinina1,
+Sergey Gulev1 and Matthew Lazzara 4
+Shirshov Institute of Oceanology, Russian Academy of Sciences, Moscow, Russia;
+Research Computing Center of Lomonosov Moscow State University, Moscow, Russia
+Moscow Institute of Physics and Technology, Moscow, Russia
+University of Wisconsin-Madison and Madison Area Technical College, Madison, Wisconsin, USA
+* Correspondence: Tel.: +7-926-141-6200"
+01f42436042ddaa48998c87109cbe46cad6e7e52,Schedtask: a hardware-assisted task scheduler,"SchedTask: A Hardware-Assisted Task Scheduler
+Prathmesh Kallurkar∗
+Microarchitecture Research Lab
+Intel Corporation
+Smruti R. Sarangi
+Department of Computer Science
+Indian Institute of Technology Delhi"
+014b4335d055679bc680a6ceb6f1a264d8ce8a4a,Are You Sure You Want To Do That? Classification with Verification,"Are You Sure You Want To Do That?
+Classification with Verification
+Harris Chan∗
+Atef Chaudhury∗
+Kevin Shen∗"
+01959ef569f74c286956024866c1d107099199f7,VQA: Visual Question Answering,"VQA: Visual Question Answering
+www.visualqa.org
+Stanislaw Antol∗1, Aishwarya Agrawal∗1, Jiasen Lu, Margaret Mitchell,
+Dhruv Batra, C. Lawrence Zitnick, Devi Parikh"
+016860404c0926dda53b9bf4745f3eb9708fa1d2,Iterative hypothesis testing for multi-object tracking in presence of features with variable reliability,"Iterative hypothesis testing for multi-object tracking in presence of
+features with variable reliability
+Amit Kumar K.C.1, Damien Delannay2 and Christophe De Vleeschouwer1
+ISPGroup, ELEN Department, Universit´e catholique de Louvain, Belgium
+{amit.kc,
+Keemotion, Belgium"
+011c5bb510c9a4c24e2fc07e7464fa8493237058,Accelerating Nearest Neighbor Search on Manycore Systems,"Accelerating Nearest Neighbor Search on Manycore
+Systems
+Lawrence Cayton
+Max Planck Institute
+Tübingen, Germany"
+01a152e7ca6accce4fa52e29b27feb76418583fb,Tracking Multiple High-Density Homogeneous Targets,"IEEE TRANSACTION ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, VOL. X, NO. X, XXXX
+Tracking multiple high-density homogeneous targets
+Fabio Poiesi and Andrea Cavallaro"
+0144b29bde2579e0a1b8ab3a38306c5621a5c30b,Top-Down Visual Saliency via Joint CRF and Dictionary Learning,"Top-Down Visual Saliency via Joint CRF and Dictionary Learning
+Jimei Yang and Ming-Hsuan Yang
+University of California at Merced"
+01915181692c821cc5a0a703047bd5b07c1f9af5,Cross-Caption Coreference Resolution for Automatic Image Understanding,"Proceedings of the Fourteenth Conference on Computational Natural Language Learning, pages 162–171,
+Uppsala, Sweden, 15-16 July 2010. c(cid:13)2010 Association for Computational Linguistics"
+0181fec8e42d82bfb03dc8b82381bb329de00631,Discriminative Subspace Clustering,"Discriminative Subspace Clustering
+Vasileios Zografos∗1, Liam Ellis†1, and Rudolf Mester‡1 2
+CVL, Dept. of Electrical Engineering, Link¨oping University, Link¨oping, Sweden
+VSI Lab, Computer Science Department, Goethe University, Frankfurt, Germany"
+01ece1dd9a0a2a7289d791625c6c7446d38584e7,A Comparative Analysis of Classification Algorithms Applied to M5AIE-Extracted Human Poses,"A Comparative Analysis of Classification Algorithms
+Applied to M5AIE-Extracted Human Poses
+Andr´e Brand˜ao, Leandro A. F. Fernandes, and Esteban Clua
+MediaLab-UFF, Instituto de Computac¸˜ao, Universidade Federal Fluminense
+Email:
+CEP 24210-240 Niter´oi, RJ, Brazil"
+01e812ad00b7743e9b24aa070a24023f05710b8b,A Distributed Representation Based Query Expansion Approach for Image Captioning,"Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics
+nd the 7th International Joint Conference on Natural Language Processing (Short Papers), pages 106–111,
+Beijing, China, July 26-31, 2015. c(cid:13)2015 Association for Computational Linguistics"
+019a95631c49011330773e953194a0c73c61f3f0,Impairments in monkey and human face recognition in 2-year-old toddlers with Autism Spectrum Disorder and Developmental Delay.,"DOI: 10.1111/j.1467-7687.2006.00543.x
+Blackwell Publishing Ltd
+Face recognition in ASD
+PAPER
+Impairments in monkey and human face recognition in
+-year-old toddlers with Autism Spectrum Disorder and
+Developmental Delay
+Katarzyna Chawarska and Fred Volkmar
+Child Study Center, Yale University School of Medicine, New Haven, CT, USA"
+013e0fe2d203eaa33a4b42d057688815116cc6bb,Recognizing Car Fluents from Video,"Recognizing Car Fluents from Video
+Bo Li1,∗, Tianfu Wu2, Caiming Xiong3,∗ and Song-Chun Zhu2
+Beijing Lab of Intelligent Information Technology, Beijing Institute of Technology
+Department of Statistics, University of California, Los Angeles
+Metamind Inc.
+{tfwu,"
+0113b302a49de15a1d41ca4750191979ad756d2f,Matching Faces with Textual Cues in Soccer Videos,"­4244­0367­7/06/$20.00 ©2006 IEEE
+ICME 2006"
+014b8df0180f33b9fea98f34ae611c6447d761d2,Facial feature tracking and expression recognition for sign language,"Facial Feature Tracking and Expression Recognition
+for Sign Language
+˙Ismail Arı
+Computer Engineering
+Bo˜gazic.i University
+˙Istanbul, Turkey
+Email:
+Asli Uyar
+Computer Engineering
+Bo˜gazic.i University
+˙Istanbul, Turkey
+Email:
+Lale Akarun
+Computer Engineering
+Bo˜gazic.i University
+˙Istanbul, Turkey
+Email:"
+01e5eb25e262afa4289d39b964c837a22a32f5a2,Cricket activity detection,"Cricket Activity Detection
+Ashok Kumar(11164)
+Javesh Garg(11334)
+March 1, 2014"
+0136bf1d3747770a7fb4fcdeaf0b4b195815ed67,Weighted Fourier Series Representation and Its Application to Quantifying the Amount of Gray Matter,"Weighted Fourier Series Representation and
+Its Application to Quantifying the Amount
+of Gray Matter
+Moo K. Chung*, Kim M. Dalton, Li Shen, Alan C. Evans, and Richard J. Davidson"
+069f2092c5d22e6d4c1e27c30e18dc63848fa3c3,A comparison of low-level features for visual attribute recognition,"Görsel Nitelik Ö˘grenmede Alt-Düzey Özniteliklerin
+Kar¸sıla¸stırılması
+A Comparison of Low-level Features for Visual
+Attribute Recognition
+Emine Gül DANACI
+Bilgisayar Mühendisli˘gi Bölümü
+Hacettepe Üniversitesi
+Ankara, Türkiye
+Nazlı ˙IK˙IZLER C˙INB˙I¸S
+Bilgisayar Mühendisli˘gi Bölümü
+Hacettepe Üniversitesi
+Ankara, Türkiye
+Özetçe —Görsel nitelik ö˘grenme ve kullanımı, son yıllarda
+ilgisayarlı görü alanında sıklıkla ara¸stırılmaya ba¸slanmı¸s bir
+konudur. Bu çalı¸smamızda, görsel nitelik ö˘grenmeye, hangi alt
+düzey özniteliklerin daha anlamlı ve verimli sonuçlar verdi˘gini
+ra¸stırmayı amaçlamaktayız. Bu kapsamda, renk ve ¸sekil bil-
+gisini farklı detaylarda ele alan alt düzey özniteliklerin, nitelik
+sınıflandırmaya katkısı ara¸stırılmı¸s, ve deneysel olarak de˘ger-
+lendirilmi¸stir. Elde edilen sonuçlar, özellikle renk ve yerel ¸sekil"
+0601416ade6707c689b44a5bb67dab58d5c27814,Feature Selection in Face Recognition: A Sparse Representation Perspective,"Feature Selection in Face Recognition: A Sparse
+Representation Perspective
+Allan Y. Yang
+John Wright
+Yi Ma
+S. Shankar Sastry
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2007-99
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2007/EECS-2007-99.html
+August 14, 2007"
+06e15d0d6f92a11bb5b46b5a3e0250cccc452c92,Diagnostic Features of Emotional Expressions Are Processed Preferentially,"Diagnostic Features of Emotional Expressions Are
+Processed Preferentially
+Elisa Scheller1, Christian Bu¨ chel2, Matthias Gamer2*
+Department of Psychiatry and Psychotherapy, University Medical Center Freiburg, Freiburg, Germany, 2 Department of Systems Neuroscience, University Medical Center
+Hamburg-Eppendorf, Hamburg, Germany"
+064b797aa1da2000640e437cacb97256444dee82,Coarse-to-fine Face Alignment with Multi-Scale Local Patch Regression,"Coarse-to-fine Face Alignment with Multi-Scale Local Patch Regression
+Zhiao Huang
+Megvii Inc.
+Erjin Zhou
+Megvii Inc.
+Zhimin Cao
+Megvii Inc."
+06f146dfcde10915d6284981b6b84b85da75acd4,Scalable Face Image Retrieval Using Attribute-Enhanced Sparse Codewords,"Scalable Face Image Retrieval using
+Attribute-Enhanced Sparse Codewords
+Bor-Chun Chen, Yan-Ying Chen, Yin-Hsi Kuo, Winston H. Hsu"
+0697bd81844d54064d992d3229162fe8afcd82cb,User-driven mobile robot storyboarding: Learning image interest and saliency from pairwise image comparisons,"User-driven mobile robot storyboarding: Learning image interest and
+saliency from pairwise image comparisons
+Michael Burke1"
+06cfc431b70ec6a6783284953a668984600e77e2,A Framework for Human Pose Estimation in Videos,"A Framework for Human Pose Estimation in
+Videos
+Dong Zhang and Mubarak Shah"
+06262d6beeccf2784e4e36a995d5ee2ff73c8d11,Recognize Actions by Disentangling Components of Dynamics,"Recognize Actions by Disentangling Components of Dynamics
+Yue Zhao1, Yuanjun Xiong1,2, and Dahua Lin1
+CUHK - SenseTime Joint Lab, The Chinese University of Hong Kong 2Amazon Rekognition"
+0690ba31424310a90028533218d0afd25a829c8d,Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs,"Published as a conference paper at ICLR 2015
+SEMANTIC IMAGE SEGMENTATION WITH DEEP CON-
+VOLUTIONAL NETS AND FULLY CONNECTED CRFS
+Liang-Chieh Chen
+Univ. of California, Los Angeles
+George Papandreou ∗
+Google Inc.
+Iasonas Kokkinos
+CentraleSup´elec and INRIA
+Kevin Murphy
+Google Inc.
+Alan L. Yuille
+Univ. of California, Los Angeles"
+063f0e6afe13df9913617dbc2230ad4263a595bc,Loneliness and Hypervigilance to Social Cues in Females: An Eye-Tracking Study,"RESEARCH ARTICLE
+Loneliness and Hypervigilance to Social Cues
+in Females: An Eye-Tracking Study
+Gerine M. A. Lodder1*, Ron H. J. Scholte1¤a, Ivar A. H. Clemens2, Rutger C. M. E. Engels1¤b,
+Luc Goossens3, Maaike Verhagen1
+Behavioural Science Institute, Radboud University, Nijmegen, The Netherlands, 2 Donders Institute for
+Brain, Cognition, and Behaviour, Radboud University, Nijmegen, The Netherlands, 3 Research Group
+School Psychology and Child and Adolescent Development, KU Leuven, Leuven, Belgium
+¤a Current address: Praktikon, Nijmegen, The Netherlands
+¤b Current address: The Trimbos Institute, Netherlands Institute of Mental Health and Addiction, Utrecht,
+The Netherlands"
+06a23ffbd9752ce204197df59812b2ebd1a097ff,Feedforward semantic segmentation with zoom-out features,"Feedforward semantic segmentation with zoom-out features
+Mohammadreza Mostajabi, Payman Yadollahpour and Gregory Shakhnarovich
+Toyota Technological Institute at Chicago"
+06de3eab314437cc3ed08c3db5171a79c1f684c6,Boosting patch-based scene text script identification with ensembles of conjoined networks,"Boosting patch-based scene text script identification with
+ensembles of conjoined networks
+Lluis Gomez, Anguelos Nicolaou, Dimosthenis Karatzas
+Computer Vision Center, Universitat Autonoma de Barcelona. Edifici O, Campus UAB, 08193 Bellaterra (Cerdanyola)
+Barcelona, Spain. E-mail:"
+06774cc8b0ab364866beaf3efda1b2d012a7bcf9,MobileNetV2: Inverted Residuals and Linear Bottlenecks,"MobileNetV2: Inverted Residuals and Linear Bottlenecks
+Mark Sandler Andrew Howard Menglong Zhu Andrey Zhmoginov Liang-Chieh Chen
+{sandler, howarda, menglong, azhmogin,
+Google Inc."
+06d93a40365da90f30a624f15bf22a90d9cfe6bb,Learning from Candidate Labeling Sets,"Learning from Candidate Labeling Sets
+Idiap Research Institute and EPF Lausanne
+Luo Jie
+Francesco Orabona
+DSI, Universit`a degli Studi di Milano"
+06ef2ba33ec911aa0102fb938b53bd3cc36a475f,Introducing FoxFaces: A 3-in-1 Head Dataset,
+06992ca951456bb88523f702f904dfd23eb27c53,Using Mobile Platform to Detect and Alerts Driver Fatigue,"International Journal of Computer Applications (0975 – 8887)
+Volume 123 – No.8, August 2015
+Using Mobile Platform to Detect and Alerts
+Maysoon F. Abulkhair
+Department of Information
+Technology, Faculty of
+Computing and Information
+Technology, King Abdulaziz
+University
+B.P. 42808 Zip Code 21551-
+Girl Section, Jeddah, Saudi
+Arabia
+Driver Fatigue
+Hesham A. Salman
+Department of Information
+Systems
+Faculty of Computing and
+Information Technology King
+Abdulaziz University
+Lamiaa F. Ibrahim"
+06e7e99c1fdb1da60bc3ec0e2a5563d05b63fe32,WhittleSearch: Image search with relative attribute feedback,"WhittleSearch: Image Search with Relative Attribute Feedback
+Adriana Kovashka, Devi Parikh and Kristen Grauman
+(Supplementary Material)
+Comparative Qualitative Search Results
+We present three qualitative search results for human-generated feedback, in addition to those
+shown in the paper. Each example shows one search iteration, where the 20 reference images are
+randomly selected (rather than ones that match a keyword search, as the image examples in the
+main paper illustrate). For each result, the first figure shows our method and the second figure
+shows the binary feedback result for the corresponding target image. Note that for our method,
+“more/less X” (where X is an attribute) means that the target image is more/less X than the
+reference image which is shown.
+Figures 1 and 2 show results for human-generated relative attribute and binary feedback, re-
+spectively, when both methods are used to target the same “mental image” of a shoe shown in the
+top left bubble. The top right grid of 20 images are the reference images displayed to the user, and
+those outlined and annotated with constraints are the ones chosen by the user to give feedback.
+The bottom row of images in either figure shows the top-ranked images after integrating the user’s
+feedback into the scoring function, revealing the two methods’ respective performance. We see that
+while both methods retrieve high-heeled shoes, only our method retrieves images that are as “open”
+s the target image. This is because using the proposed approach, the user was able to comment
+explicitly on the desired openness property."
+064aaad2a9ac5044b333714e61955631faee87fd,Face Recognition using Radial Curves and Back Propagation Neural Network for Frontal Faces under Various Challenges,"International Journal of Computer Applications (0975 – 8887)
+International Conference on Advances in Science and Technology 2015 (ICAST 2015)
+Face Recognition using Radial Curves and Back
+Propagation Neural Network for frontal faces under
+various challenges
+Latasha Keshwani
+Electronics and Telecommunication Department
+Datta Meghe College of Engineering, Airoli, Mumbai
+University, (MS), India"
+06e768d74f076b251d53b0c86fc9910d7243bdc6,Effective and efficient visual description based on local binary patterns and gradient distribution for object recognition,"Effective and ef‌f‌icient visual description based on local
+inary patterns and gradient distribution for object
+recognition
+Chao Zhu
+To cite this version:
+Chao Zhu. Effective and ef‌f‌icient visual description based on local binary patterns and gradient
+distribution for object recognition. Other. Ecole Centrale de Lyon, 2012. English. <NNT :
+012ECDL0005>. <tel-00755644>
+HAL Id: tel-00755644
+https://tel.archives-ouvertes.fr/tel-00755644
+Submitted on 21 Nov 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+066d71fcd997033dce4ca58df924397dfe0b5fd1,Iranian Face Database and Evaluation with a New Detection Algorithm,"(cid:1)(cid:2)(cid:3)(cid:4)(cid:5)(cid:3)(cid:4)(cid:6)(cid:7)(cid:3)(cid:8)(cid:9)(cid:6)(cid:10)(cid:3)(cid:11)(cid:3)(cid:12)(cid:3)(cid:13)(cid:9)
+(cid:3)(cid:4)(cid:14)(cid:6)(cid:15)(cid:16)(cid:3)(cid:17)(cid:18)(cid:3)(cid:11)(cid:5)(cid:19)(cid:4) (cid:20)(cid:5)(cid:11)(cid:21)(cid:6)(cid:3)(cid:6)(cid:22)(cid:9)(cid:20)(cid:6)(cid:10)(cid:9)(cid:11)(cid:9)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)(cid:6)(cid:23)(cid:17)(cid:24)(cid:19)(cid:2)(cid:5)(cid:11)(cid:21)(cid:25)
+(cid:26)(cid:11)(cid:5)(cid:8)(cid:17)(cid:6)(cid:27)(cid:1)(cid:9)(cid:22)(cid:8)(cid:18)(cid:1)(cid:28)(cid:12)(cid:6)(cid:29)(cid:4)(cid:20)(cid:11)(cid:6)(cid:24)(cid:30)(cid:1)(cid:15)(cid:25)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)!(cid:8) (cid:8)(cid:6)(cid:4)(cid:1)""(cid:16)(cid:8)(cid:16)(cid:20)(cid:14)(cid:1)(cid:3)(cid:15)(cid:8)(cid:22)(cid:4)(cid:12)(cid:1)(cid:23)(cid:5)(cid:29)(cid:18)(cid:14)(cid:1)(cid:31)(cid:8)(cid:20)(cid:8) (cid:14)(cid:1)(cid:26)!(cid:9)(cid:13)(cid:14)(cid:1)#(cid:17)(cid:8)(cid:6)(cid:5)$(cid:1)(cid:17)(cid:4)(cid:5)%(cid:8)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)&(cid:30)(cid:8)(cid:16)(cid:15)(cid:15)(cid:21)(cid:27)(cid:15)(cid:17)
+(cid:3)(cid:4)(cid:5)(cid:6)(cid:7)(cid:8)(cid:1)(cid:9)(cid:10)(cid:10)(cid:8)(cid:11)(cid:6)(cid:8)(cid:12)(cid:1)(cid:13)(cid:6)(cid:7)(cid:14) (cid:3)(cid:15)(cid:16)(cid:8)(cid:17)(cid:17)(cid:8)(cid:18)(cid:1)(cid:3)(cid:8)(cid:16)(cid:18)(cid:6)(cid:1)(cid:19)(cid:4)(cid:16)(cid:11)(cid:16)(cid:6)(cid:10)(cid:6)(cid:14)(cid:1)(cid:19)(cid:20)(cid:21)(cid:1)(cid:9)(cid:22)(cid:8)(cid:17)(cid:1)(cid:23)(cid:8)(cid:11)(cid:24)(cid:8)(cid:12)(cid:25)(cid:8)(cid:20)(cid:18)
+(cid:23)(cid:12)(cid:13)(cid:11)(cid:2)(cid:3)(cid:8)(cid:11)$(cid:1)’(cid:16)(cid:6)(cid:11) ((cid:8)((cid:4)(cid:20)(cid:1)(cid:6)(cid:12)(cid:24)(cid:20)(cid:15)(cid:18))(cid:27)(cid:4)(cid:11)(cid:1)(cid:8)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:15)(cid:25)(cid:1)(cid:15)(cid:29)(cid:4)(cid:20)(cid:1)*(cid:14)+,,(cid:1)(cid:27)(cid:15)(cid:5)(cid:15)(cid:20)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1).(cid:4)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:4)(cid:18)(cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)+(cid:2)+(cid:1)(cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1)(cid:16))(cid:17)(cid:8)(cid:12)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:11) (cid:6)(cid:12)(cid:1)(cid:8)-(cid:4)(cid:11)(cid:1)(cid:10)(cid:4)(cid:24).(cid:4)(cid:4)(cid:12)(cid:1)/
+(cid:8)(cid:12)(cid:18) 01(cid:21)(cid:1)2(cid:4)(cid:1)(cid:12)(cid:8)(cid:17)(cid:4)(cid:18)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:26)(cid:20)(cid:8)(cid:12)(cid:6)(cid:8)(cid:12)(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)4(cid:26)3(cid:19)(cid:23)5(cid:21)(cid:1)’(cid:15)(cid:1)(cid:4)(cid:29)(cid:8)(cid:5))(cid:8)(cid:24)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:24)(cid:16)(cid:4)(cid:1)(cid:4)6((cid:4)(cid:20)(cid:6)(cid:17)(cid:4)(cid:12)(cid:24)(cid:8)(cid:5)(cid:1)(cid:20)(cid:4)(cid:11))(cid:5)(cid:24)(cid:1)(cid:15)(cid:25)(cid:1)(cid:8)(cid:1)(cid:12)(cid:4).(cid:1)(cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)
+(cid:25)(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:18)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:8)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:1)(cid:6)(cid:11)(cid:1)(cid:20)(cid:4)((cid:15)(cid:20)(cid:24)(cid:4)(cid:18)(cid:21)
+(cid:26)(cid:9)(cid:27) (cid:28)(cid:19)(cid:2)(cid:14)(cid:13)$(cid:1)3(cid:8)(cid:27)(cid:4)(cid:1)(cid:26)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:19)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:14)(cid:1)3(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1)3(cid:4)(cid:8)(cid:24))(cid:20)(cid:4)(cid:1)(cid:19)(cid:4)(cid:24)(cid:4)(cid:27)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:9)(cid:5)-(cid:15)(cid:20)(cid:6)(cid:24)(cid:16)(cid:17)(cid:11)(cid:14)(cid:1)(cid:9)-(cid:4)(cid:1)7(cid:5)(cid:8)(cid:11)(cid:11)(cid:6)(cid:25)(cid:6)(cid:27)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:21)
+(cid:29) (cid:1)(cid:4)(cid:11)(cid:2)(cid:19)(cid:14)(cid:18)(cid:8)(cid:11)(cid:5)(cid:19)(cid:4)
+8)(cid:17)(cid:8)(cid:12)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:6)(cid:11)(cid:1) (cid:24)(cid:16)(cid:4)(cid:1) (cid:17)(cid:15)(cid:11)(cid:24)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) )(cid:11)(cid:4)(cid:25))(cid:5)(cid:1) (cid:7)(cid:4)(cid:30)(cid:1) (cid:24)(cid:15)(cid:1) (cid:8)(cid:1)
+((cid:4)(cid:20)(cid:11)(cid:15)(cid:12)9(cid:11)(cid:1) (cid:6)(cid:18)(cid:4)(cid:12)(cid:24)(cid:6)(cid:24)(cid:30)(cid:21)(cid:1) (cid:9)(cid:11)(cid:1) (cid:16))(cid:17)(cid:8)(cid:12)(cid:11)(cid:14)(cid:1) .(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:24)(cid:15)(cid:1) (cid:27)(cid:8)(cid:24)(cid:4)-(cid:15)(cid:20)(cid:6)(cid:22)(cid:4)(cid:1) (cid:8)(cid:1)
+((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:8)-(cid:4)(cid:1)-(cid:20)(cid:15))((cid:1)(cid:25)(cid:20)(cid:15)(cid:17)(cid:1)(cid:8)(cid:1)((cid:4)(cid:20)(cid:11)(cid:15)(cid:12):(cid:11)(cid:1)(cid:25)(cid:8)(cid:27)(cid:4)(cid:1)(cid:6)(cid:17)(cid:8)-(cid:4)(cid:1)(cid:8)(cid:12)(cid:18)(cid:1)(cid:8)(cid:20)(cid:4)(cid:1)(cid:15)(cid:25)(cid:24)(cid:4)(cid:12)(cid:1)
+(cid:8)(cid:10)(cid:5)(cid:4)(cid:1)(cid:24)(cid:15)(cid:1)(cid:10)(cid:4)(cid:1);)(cid:6)(cid:24)(cid:4)(cid:1)((cid:20)(cid:4)(cid:27)(cid:6)(cid:11)(cid:4)(cid:1)(cid:6)(cid:12)(cid:1)(cid:24)(cid:16)(cid:6)(cid:11)(cid:1)(cid:4)(cid:11)(cid:24)(cid:6)(cid:17)(cid:8)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)<(cid:2)=(cid:21)(cid:1)(cid:26)(cid:12)(cid:1)(cid:20)(cid:4)(cid:27)(cid:4)(cid:12)(cid:24)(cid:1)(cid:30)(cid:4)(cid:8)(cid:20)(cid:11)(cid:14)(cid:1)
+(cid:25)(cid:8)(cid:27)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:8)(cid:12)(cid:18)(cid:1) (cid:20)(cid:4)(cid:5)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1) .(cid:15)(cid:20)(cid:7)(cid:11)(cid:1) (cid:16)(cid:8)(cid:29)(cid:4)(cid:1) (cid:20)(cid:4)(cid:27)(cid:4)(cid:6)(cid:29)(cid:4)(cid:18)(cid:1) (cid:11))(cid:10)(cid:11)(cid:24)(cid:8)(cid:12)(cid:24)(cid:6)(cid:8)(cid:5)(cid:1)
+(cid:8)(cid:24)(cid:24)(cid:4)(cid:12)(cid:24)(cid:6)(cid:15)(cid:12)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) (cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1) (cid:6)(cid:12)(cid:1) (cid:10)(cid:6)(cid:15)(cid:17)(cid:4)(cid:24)(cid:20)(cid:6)(cid:27)(cid:11)(cid:14)(cid:1) ((cid:8)(cid:24)(cid:24)(cid:4)(cid:20)(cid:12)(cid:1) (cid:20)(cid:4)(cid:27)(cid:15)-(cid:12)(cid:6)(cid:24)(cid:6)(cid:15)(cid:12)(cid:14)(cid:1)
+(cid:8)(cid:12)(cid:18)(cid:1) (cid:27)(cid:15)(cid:17)()(cid:24)(cid:4)(cid:20) (cid:29)(cid:6)(cid:11)(cid:6)(cid:15)(cid:12)(cid:1) (cid:27)(cid:15)(cid:17)(cid:17))(cid:12)(cid:6)(cid:24)(cid:6)(cid:4)(cid:11)(cid:1) </(cid:14)(cid:1) *(cid:14)(cid:1) > (cid:8)(cid:12)(cid:18) 1=(cid:21)(cid:1) ’(cid:16)(cid:4)(cid:11)(cid:4)(cid:1)
+(cid:27)(cid:15)(cid:17)(cid:17)(cid:15)(cid:12)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:20)(cid:4)(cid:11)(cid:24)(cid:11)(cid:1)(cid:8)(cid:17)(cid:15)(cid:12)-(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:1)(cid:17)(cid:15)(cid:24)(cid:6)(cid:29)(cid:8)(cid:24)(cid:4)(cid:18)(cid:1))(cid:11)(cid:1)(cid:24)(cid:15)(cid:1)(cid:27)(cid:15)(cid:5)(cid:5)(cid:4)(cid:27)(cid:24)(cid:1)(cid:8)(cid:1)
+(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1) (cid:15)(cid:25)(cid:1) (cid:25)(cid:8)(cid:27)(cid:6)(cid:8)(cid:5)(cid:1) (cid:6)(cid:17)(cid:8)-(cid:4)(cid:11)(cid:1) (cid:25)(cid:20)(cid:15)(cid:17)(cid:1) ((cid:4)(cid:15)((cid:5)(cid:4)(cid:1) (cid:6)(cid:12)(cid:1) (cid:18)(cid:6)(cid:25)(cid:25)(cid:4)(cid:20)(cid:4)(cid:12)(cid:24)(cid:1) (cid:8)-(cid:4)(cid:11)(cid:21) ’(cid:16)(cid:4)(cid:1)
+(cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:1)(cid:6)(cid:11)(cid:1)(cid:6)(cid:12)(cid:24)(cid:4)(cid:12)(cid:18)(cid:4)(cid:18)(cid:1)(cid:25)(cid:15)(cid:20)(cid:1)(cid:18)(cid:6)(cid:11)(cid:24)(cid:20)(cid:6)(cid:10))(cid:24)(cid:6)(cid:15)(cid:12)(cid:1)(cid:24)(cid:15)(cid:1)(cid:20)(cid:4)(cid:11)(cid:4)(cid:8)(cid:20)(cid:27)(cid:16)(cid:4)(cid:20)(cid:11)(cid:21)
+’(cid:16)(cid:4)(cid:20)(cid:4)(cid:1) (cid:8)(cid:20)(cid:4)(cid:1) (cid:17)(cid:8)(cid:12)(cid:30)(cid:1) ()(cid:10)(cid:5)(cid:6)(cid:27)(cid:8)(cid:5)(cid:5)(cid:30)(cid:1) (cid:8)(cid:29)(cid:8)(cid:6)(cid:5)(cid:8)(cid:10)(cid:5)(cid:4)(cid:1) (cid:18)(cid:8)(cid:24)(cid:8)(cid:10)(cid:8)(cid:11)(cid:4)(cid:11)(cid:1) (cid:25)(cid:15)(cid:20)(cid:1) (cid:25)(cid:8)(cid:27)(cid:4)(cid:1)"
+06560d5721ecc487a4d70905a485e22c9542a522,Deep Facial Attribute Detection in the Wild: From General to Specific,"SUN, YU: DEEP FACIAL ATTRIBUTE DETECTION IN THE WILD
+Deep Facial Attribute Detection in the Wild:
+From General to Specific
+Yuechuan Sun
+Jun Yu
+Department of Automation
+University of Science and Technology
+of China
+Hefei, China"
+066000d44d6691d27202896691f08b27117918b9,Vision-Based Analysis of Small Groups in Pedestrian Crowds,"Vision-based Analysis of Small Groups in
+Pedestrian Crowds
+Weina Ge, Robert T. Collins, Senior Member, IEEE, and R. Barry Ruback
+E-mail:"
+061fb1b627554f52ff8f3ebb531e326767d845ec,Globally-optimal greedy algorithms for tracking a variable number of objects,"Globally-Optimal Greedy Algorithms for Tracking a Variable Number of
+Objects
+Hamed Pirsiavash Deva Ramanan Charless C. Fowlkes
+Department of Computer Science, University of California, Irvine"
+06599d41a3256245aa0cb2e9e56b29459c2e2c69,VisualWord2Vec (Vis-W2V): Learning Visually Grounded Word Embeddings Using Abstract Scenes,Visual Word2Vec (vis-w2v): Learning Visually Grounded
+06dfc1c6f62bffd5f8b8619d8c51db1ec4d25f3f,Fusing Local Patterns of Gabor Magnitude and Phase for Face Recognition,"Fusing Local Patterns of Gabor Magnitude
+nd Phase for Face Recognition
+Shufu Xie, Shiguang Shan, Member, IEEE, Xilin Chen, Senior Member, IEEE, and Jie Chen, Member, IEEE"
+06f7e0aee7fc5807ab862432a4e5ade2cda73c4b,Flowing ConvNets for Human Pose Estimation in Videos,"Flowing ConvNets for Human Pose Estimation in Videos
+Tomas Pfister1, James Charles2 and Andrew Zisserman1
+Objective & Contributions
+Estimate 2D upper body joint positions (wrist, elbow, shoulder, head) with high accuracy in real-time
+- A better ConvNet for general image (x,y) position regression
+- Spatial fusion layers that learn an implicit spatial model between predicted positions
+- Optical flow for propagating position predictions from neighbouring frames
+. Regress a heatmap for each position
+Heatmap
+ConvNet
+(fully convolutional)
+56 x 256 x 3
+64 x 64 x N
+. Represent positions by Gaussians
+k joints
+Idea 1: Implicit ConvNet spatial model
+. Add fusion layers to learn dependencies between predicted positions
+onv1
+5x5x128
+pool 2x2"
+069c40a8ca5305c9a0734c1f6134eb19a678f4ab,LabelMe: A Database and Web-Based Tool for Image Annotation,"Int J Comput Vis (2008) 77: 157–173
+DOI 10.1007/s11263-007-0090-8
+LabelMe: A Database and Web-Based Tool for Image Annotation
+Bryan C. Russell · Antonio Torralba ·
+Kevin P. Murphy · William T. Freeman
+Received: 6 September 2005 / Accepted: 11 September 2007 / Published online: 31 October 2007
+© Springer Science+Business Media, LLC 2007"
+06fe63b34fcc8ff68b72b5835c4245d3f9b8a016,Learning semantic representations of objects and their parts,"Mach Learn
+DOI 10.1007/s10994-013-5336-9
+Learning semantic representations of objects
+nd their parts
+Grégoire Mesnil · Antoine Bordes · Jason Weston ·
+Gal Chechik · Yoshua Bengio
+Received: 24 May 2012 / Accepted: 26 February 2013
+© The Author(s) 2013"
+069c9b3c7cf82310d3e06831208aea15f6fdfc32,Power management for mobile games on asymmetric multi-cores,"Power Management for Mobile Games
+on Asymmetric Multi-Cores
+Anuj Pathania, Santiago Pagani, Muhammad Shafique, J¨org Henkel
+Chair for Embedded Systems (CES), Karlsruhe Institute of Technology (KIT), Karlsruhe, Germany
+Corresponding Author:"
+06d89147794d0889b2e031b0c6811423806f5ea0,A 3D Morphable Eye Region Model for Gaze Estimation,"A 3D Morphable Eye Region Model
+for Gaze Estimation
+Anonymous ECCV submission
+Paper ID 93"
+06aab105d55c88bd2baa058dc51fa54580746424,Image Set-Based Collaborative Representation for Face Recognition,"Image Set based Collaborative Representation for
+Face Recognition
+Pengfei Zhu, Student Member, IEEE, Wangmeng Zuo, Member, IEEE, Lei Zhang, Member, IEEE, Simon C.K. Shiu,
+Member, IEEE, David Zhang, Fellow, IEEE"
+06262d14323f9e499b7c6e2a3dec76ad9877ba04,Real-Time Pose Estimation Piggybacked on Object Detection,"Real-Time Pose Estimation Piggybacked on Object Detection
+Roman Jur´anek, Adam Herout, Mark´eta Dubsk´a, Pavel Zemˇc´ık
+Brno University of Technology
+Brno, Czech Republic"
+06e9149b7ef8bff3a4b5a18fe01da9a522f91891,SRLSP: A Face Image Super-Resolution Algorithm Using Smooth Regression With Local Structure Prior,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TMM.2016.2601020, IEEE
+Transactions on Multimedia
+SRLSP: A Face Image Super-Resolution Algorithm
+Using Smooth Regression with Local Structure Prior
+Junjun Jiang, Member, IEEE, Chen Chen, Jiayi Ma, Member, IEEE, Zheng Wang, Zhongyuan
+Wang, Member, IEEE, and Ruimin Hu, Senior Member, IEEE
+traditional"
+062c41dad67bb68fefd9ff0c5c4d296e796004dc,Temporal Generative Adversarial Nets with Singular Value Clipping,"Temporal Generative Adversarial Nets with Singular Value Clipping
+Masaki Saito∗
+Eiichi Matsumoto∗
+Preferred Networks inc., Japan
+{msaito, matsumoto,
+Shunta Saito"
+06cb0939ed5fb2b3398d54a7fcdb865fe53f414a,Bag-of-Words Image Representation: Key Ideas and Further Insight,"Chapter 2
+Bag-of-Words Image Representation:
+Key Ideas and Further Insight
+Marc T. Law, Nicolas Thome and Matthieu Cord"
+06bd34951305d9f36eb29cf4532b25272da0e677,"A Fast and Accurate System for Face Detection, Identification, and Verification","JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+A Fast and Accurate System for Face Detection,
+Identification, and Verification
+Rajeev Ranjan, Ankan Bansal, Jingxiao Zheng, Hongyu Xu, Joshua Gleason, Boyu Lu, Anirudh Nanduri,
+Jun-Cheng Chen, Carlos D. Castillo, Rama Chellappa"
+068a7c7849cb6480def2e124ac5a45564e094b2a,Multi-Scale Learning for Low-Resolution Person Re-Identification,"Multi-scale learning for low-resolution person re-identification
+Li, X; Zheng, WS; Wang, X; Xiang, T; Gong, S
+© 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing
+this material for advertising or promotional purposes, creating new collective works, for resale
+or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+other works.
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/19657
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+06687e82ecc94f716d86d3e9f6bfbd30655c6631,CANDECOMP/PARAFAC Decomposition of High-Order Tensors Through Tensor Reshaping,"CANDECOMP/PARAFAC Decomposition of
+High-order Tensors Through Tensor Reshaping
+Anh Huy Phan∗, Petr Tichavsk´y and Andrzej Cichocki"
+0694b05cbc3ef5d1c5069a4bfb932a5a7b4d5ff0,Exploiting Local Class Information in Extreme Learning Machine,"Iosifidis, A., Tefas, A., & Pitas, I. (2014). Exploiting Local Class Information
+in Extreme Learning Machine. Paper presented at International Joint
+Conference on Computational Intelligence (IJCCI), Rome, Italy.
+Peer reviewed version
+Link to publication record in Explore Bristol Research
+PDF-document
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms"
+0612745dbd292fc0a548a16d39cd73e127faedde,Flickr30k Entities: Collecting Region-to-Phrase Correspondences for Richer Image-to-Sentence Models,"Noname manuscript No.
+(will be inserted by the editor)
+Flickr30k Entities: Collecting Region-to-Phrase Correspondences for
+Richer Image-to-Sentence Models
+Bryan A. Plummer · Liwei Wang · Chris M. Cervantes · Juan C. Caicedo · Julia
+Hockenmaier · Svetlana Lazebnik
+Received: date / Accepted: date"
+0683be899f3e04b8b55a501e8ffafc0484b44056,Using Deep Learning and Low-Cost RGB and Thermal Cameras to Detect Pedestrians in Aerial Images Captured by Multirotor UAV,"Article
+Using Deep Learning and Low-Cost RGB and
+Thermal Cameras to Detect Pedestrians in Aerial
+Images Captured by Multirotor UAV
+Diulhio Candido de Oliveira * ID and Marco Aurelio Wehrmeister ID
+Computing Systems Engineering Laboratory (LESC), Federal University of Technology—Parana (UTFPR),
+Curitiba 80230-901, Brazil;
+* Correspondence: Tel.: +55-41-3310-4646
+Received: 27 April 2018; Accepted: 3 July 2018; Published: 12 July 2018"
+06dee5ff4b41eadf5db5c6841d3441d388f08117,3D Cascade of Classifiers for Open and Closed Eye Detection in Driver Distraction Monitoring,"D Cascade of Classifiers for
+Open and Closed Eye Detection
+in Driver Distraction Monitoring
+Mahdi Rezaei and Reinhard Klette
+The .enpeda.. Project, The University of Auckland
+Tamaki Innovation Campus, Auckland, New Zealand"
+060820f110a72cbf02c14a6d1085bd6e1d994f6a,Fine-grained classification of pedestrians in video: Benchmark and state of the art,"Fine-Grained Classification of Pedestrians in Video: Benchmark and State of the Art
+David Hall and Pietro Perona
+California Institute of Technology.
+The dataset was labelled with bounding boxes, tracks, pose and fine-
+grained labels. To achieve this, crowdsourcing, using workers from Ama-
+zon’s Mechanical Turk (MTURK) was used. A summary of the dataset’s
+statistics can be found in Table 1.
+Number of Frames Sent to MTURK
+Number of Frames with at least 1 Pedestrian
+Number of Bounding Box Labels
+Number of Pose Labels
+Number of Tracks
+8,708
+0,994
+2,457
+7,454
+,222
+Table 1: Dataset Statistics
+A state-of-the-art algorithm for fine-grained classification was tested us-
+ing the dataset. The results are reported as a useful performance baseline."
+063a3be18cc27ba825bdfb821772f9f59038c207,The development of spontaneous facial responses to others’ emotions in infancy: An EMG study,"This is a repository copy of The development of spontaneous facial responses to others’
+emotions in infancy. An EMG study.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/125231/
+Version: Published Version
+Article:
+Kaiser, Jakob, Crespo-Llado, Maria Magdalena, Turati, Chiara et al. (1 more author)
+(2017) The development of spontaneous facial responses to others’ emotions in infancy.
+An EMG study. Scientific Reports. ISSN 2045-2322
+https://doi.org/10.1038/s41598-017-17556-y
+Reuse
+This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+llows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+uthors for the original work. More information and the full terms of the licence here:
+https://creativecommons.org/licenses/
+Takedown
+If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+emailing including the URL of the record and the reason for the withdrawal request.
+https://eprints.whiterose.ac.uk/"
+060797f33c242b568189be251f9735afdc4c9f22,Robust Deep-Learning-Based Road-Prediction for Augmented Reality Navigation Systems at Night,"Robust Deep-Learning-Based Road-Prediction
+for Augmented Reality Navigation Systems
+Matthias Limmer1*, Julian Forster1*, Dennis Baudach1, Florian Schüle2,
+Roland Schweiger1 and Hendrik P.A. Lensch3"
+069ebb57ccca31ab68983e07044e65ce1a04174f,4D facial expression recognition,"011 IEEE International Conference on Computer Vision Workshops
+978-1-4673-0063-6/11/$26.00 c(cid:13)2011 IEEE"
+06680961e99aadb366968e5f515da58864ecd784,ENabler for Design Specifications FP 6 - IST - 2005 - 27916,"Trends Research ENabler for Design Specifications
+FP6-IST-2005-27916
+Deliverable
+TRENDS META-DELIVERABLE 1 - STATE OF THE ART
+Security Classification : PU
+Leading partner
+SERAM
+Issue Date
+03/09/2007
+Version
+Authors
+Approved by
+Final draft
+Aranzazu BERECIARTUA, Carole BOUCHARD, Marin FERECATU, Guillaume LOGEROT, Loïs RIGOUSTE, Carlotta
+VITALE
+Carole Bouchard
+03/09/2007
+META DELIVERABLE 1 - STATE OF THE ART
+This document presents a State Of the Art related to
+most popular products, tools and methods"
+069cadd9d8e52ad2715a3551012a06e506191626,Person re-identification using semantic color names and RankBoost,"Person Re-identification using Semantic Color Names and RankBoost
+Cheng-Hao Kuo1, Sameh Khamis2∗, and Vinay Shet1
+Imaging and Computer Vision, Siemens Corporation, Corporate Technology1, Princeton, NJ
+University of Maryland2, College Park, MD"
+06f969d3858b6d14425fcbe7ff12b72e213ee240,Recognizing Cardiac Magnetic Resonance Acquisition Planes,"Recognizing cardiac magnetic resonance acquisition
+planes
+Jan Margeta, Antonio Criminisi, Daniel C. Lee, Nicholas Ayache
+To cite this version:
+Jan Margeta, Antonio Criminisi, Daniel C. Lee, Nicholas Ayache. Recognizing cardiac magnetic
+resonance acquisition planes. MIUA - Medical Image Understanding and Analysis Conference
+- 2014, Jul 2014, London, United Kingdom. 2014. <hal-01009952>
+HAL Id: hal-01009952
+https://hal.inria.fr/hal-01009952
+Submitted on 19 Jun 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+06c333fc146d0a87f591c82a1f22925ccef378b1,Emotional Cues during Simultaneous Face and Voice Processing: Electrophysiological Insights,"Emotional Cues during Simultaneous Face and Voice
+Processing: Electrophysiological Insights
+Taosheng Liu1,2, Ana Pinheiro2,3, Zhongxin Zhao4*, Paul G. Nestor2,5, Robert W. McCarley2, Margaret A.
+Niznikiewicz2*
+Department of Psychology, Second Military Medical University, Shanghai, China, 2 Clinical Neuroscience Division, Laboratory of Neuroscience, Department of Psychiatry,
+Boston VA Healthcare System, Brockton Division and Harvard Medical School, Brockton, Massachusetts, United States of America, 3 Neuropsychophysiology Laboratory,
+CiPsi, School of Psychology, University of Minho, Braga, Portugal, 4 Department of Neurology, Neuroscience Research Center of Changzheng Hospital, Second Military
+Medical University, Shanghai, China, 5 University of Massachusetts, Boston, Massachusetts, United States of America"
+0628ffefb911d1446914098d7c38a094c92c8a70,An opportunistic prediction-based thread scheduling to maximize throughput/watt in AMPs,"An Opportunistic Prediction-based Thread
+Scheduling to Maximize Throughput/Watt in AMPs
+Arunachalam Annamalai, Rance Rodrigues, Israel Koren and Sandip Kundu
+Department of Electrical and Computer Engineering, University of Massachusetts at Amherst
+Email: {annamalai, rodrigues, koren,"
+06ad99f19cf9cb4a40741a789e4acbf4433c19ae,SenTion: A framework for Sensing Facial Expressions,"SenTion: A framework for Sensing Facial
+Expressions
+Rahul Islam∗, Karan Ahuja∗, Sandip Karmakar∗, Ferdous Barbhuiya∗ ∗IIIT Guwahati
+{rahul.islam, karan.ahuja, sandip,"
+06e959c88dcce05847a395dc404725dd0488003d,Articulated clinician detection using 3D pictorial structures on RGB-D data,"D Pictorial Structures on RGB-D Data for
+Articulated Human Detection in Operating Rooms
+Abdolrahim Kadkhodamohammadi, Afshin Gangi, Michel de Mathelin and Nicolas Padoy"
+06a2a3c6d44ab5572df55ce34d9b1216bc685385,GANVO: Unsupervised Deep Monocular Visual Odometry and Depth Estimation with Generative Adversarial Networks,"GANVO: Unsupervised Deep Monocular Visual Odometry and Depth
+Estimation with Generative Adversarial Networks
+Yasin Almalioglu1, Muhamad Risqi U. Saputra1, Pedro P. B. de Gusmo1, Andrew Markham1, and Niki Trigoni1"
+6c3c845fe484bdb2b3549054644c7a06bd9b87b8,ENCARA: real-time detection of frontal faces,"ENCARA: REAL-TIME DETECTION OF FRONTAL FACES
+M. Castrillón Santana, M. Hernández Tejera, J. Cabrera Gámez
+Instituto Universitario Sistemas Inteligentes y Aplicaciones Numéricas en Ingeniería
+Universidad de Las Palmas de Gran Canaria
+5017 Gran Canaria - Spain"
+6c27eccf8c4b22510395baf9f0d0acc3ee547862,Using CMU PIE Human Face Database to a Convolutional Neural Network - Neocognitron,"Using CMU PIE Human Face Database to a
+Convolutional Neural Network - Neocognitron
+José Hiroki Saito1, Tiago Vieira de Carvalho1, Marcelo Hirakuri1, André Saunite1,
+Alessandro Noriaki Ide2 and Sandra Abib1
+- Federal University of São Carlos - Computer Science Department - GAPIS
+Rodovia Washington Luis, Km 235, São Carlos – SP - Brazil
+- University of Genoa - Department of Informatics, Systems and Telematics - Neurolab
+Via Opera Pia, 13 – I-16145 – Genoa - Italy"
+6c0f9acd62ca9f156ca632dad6d666209eae461e,Discriminative vision-based recovery and recognition of human motion,"Discriminative Vision-Based Recovery and
+Recognition of Human Motion
+9-789036-528108
+CTIT Dissertation Series No. 09-136
+Center for Telematics and Information Technology (CTIT)
+P.O. Box 217, 7500 AE Enschede, The Netherlands
+Ronald Poppe"
+6cbb3c47010e406de656d13fe289522bb3071bc0,Improved vehicle detection system based on customized HOG,"Improved vehicle detection system based on
+ustomized HOG
+Haythem AMEUR1, Abdelhamid HELALI1, Hassen MAAREF1, Anis YOUSSEF2
+Laboratory of Micro-Optoelectronic and Nanostructure, University of Monastir
+Tunisia, Monastir
+2 TELNET Innovation Labs Tunisia, Tunis"
+6ce6da7a6b2d55fac604d986595ba6979580393b,Cross Domain Knowledge Transfer for Person Re-identification,"Cross Domain Knowledge Transfer for Person Re-identification
+Qiqi Xiao
+Kelei Cao
+Haonan Chen
+Fangyue Peng
+Chi Zhang"
+6cd557019b7775d8647ca31260734c786fdb69ec,Visual Classifier Prediction by Distributional Semantic Embedding of Text Descriptions,"Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 48–50,
+Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics."
+6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365,Privacy-Preserving Deep Inference for Rich User Data on The Cloud,"Privacy-Preserving Deep Inference for Rich User
+Data on The Cloud
+Seyed Ali Osia ♯, Ali Shahin Shamsabadi ♯, Ali Taheri ♯, Kleomenis Katevas ⋆,
+Hamid R. Rabiee ♯, Nicholas D. Lane †, Hamed Haddadi ⋆
+♯ Sharif University of Technology
+⋆ Queen Mary University of London
+Nokia Bell Labs & University of Oxford"
+6cb68c1f7558e01966ad1e1fa81feeeae3dee666,Photo Filter Recommendation by Category-Aware Aesthetic Learning,"IEEE TRANSACTION ON MULTIMEDIA
+Photo Filter Recommendation
+y Category-Aware Aesthetic Learning
+Wei-Tse Sun, Ting-Hsuan Chao, Yin-Hsi Kuo, Winston H. Hsu"
+6c54261f601c8a569149b77d32efe6c58f2e4a2e,Preliminary evidence that the limbal ring influences facial attractiveness.,"Evolutionary Psychology
+www.epjournal.net – 2011. 9(2): 137-146
+¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯
+Original Article
+Preliminary Evidence that the Limbal Ring Influences Facial Attractiveness
+Darren Peshek, Department of Cognitive Sciences, University of California Irvine, Irvine, CA, USA. Email:
+(Corresponding author).
+Negar Semmaknejad, Department of Cognitive Sciences, University of California Irvine, Irvine, CA, USA.
+Donald Hoffman, Department of Cognitive Sciences, University of California Irvine, Irvine, CA, USA.
+Pete Foley, Innovation Science, Procter & Gamble, Cincinnati, OH, USA."
+6c62330cbd60f2cb6cb80b920104d0df3116cb3f,Robust People Tracking Using A Light Coding Depth Sensor,"Robust People Tracking Using A Light Coding Depth Sensor
+Xun Changqing1, Yang Shuqiang2, and Zhang Chunyuan1
+College of Computer, National University of Defence Technology, ChangSha, China
+College of Electronic Science and Engineering, National University of Defence Technology, ChangSha, China"
+6c52c12644321d4256306feaf784ccae6ebc4fea,Enhanced vote count circuit based on nor flash memory for fast similarity search,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+6ceacd889559cfcf0009e914d47f915167231846,The impact of visual attributes on online image diffusion,"The Impact of Visual Attributes on Online Image Diffusion
+Luam Totti
+Federal University of
+Minas Gerais (UFMG)
+Belo Horizonte, MG, Brazil
+Felipe Costa
+Federal University of
+Minas Gerais (UFMG)
+Belo Horizonte, MG, Brazil
+Sandra Avila
+RECOD Lab., DCA / FEEC /
+UNICAMP
+Campinas, SP, Brazil
+Eduardo Valle
+RECOD Lab., DCA / FEEC /
+UNICAMP
+Campinas, SP, Brazil
+Wagner Meira Jr.
+Federal University of
+Minas Gerais (UFMG)"
+6cad008ad80081dc42752e813ee6924e3c174dc7,Does Facial Resemblance Enhance Cooperation?,"Does Facial Resemblance Enhance Cooperation?
+Trang Giang*, Raoul Bell*, Axel Buchner
+Department of Experimental Psychology, Heinrich Heine University Du¨ sseldorf, Du¨ sseldorf, Germany"
+6c304f3b9c3a711a0cca5c62ce221fb098dccff0,Attentive Semantic Video Generation Using Captions,"Attentive Semantic Video Generation using Captions
+Tanya Marwah∗
+IIT Hyderabad
+Gaurav Mittal∗
+Vineeth N. Balasubramanian
+IIT Hyderabad"
+6cb7648465ba7757ecc9c222ac1ab6402933d983,Visual Forecasting by Imitating Dynamics in Natural Sequences,"Visual Forecasting by Imitating Dynamics in Natural Sequences
+Kuo-Hao Zeng†‡ William B. Shen† De-An Huang† Min Sun‡ Juan Carlos Niebles†
+{khzeng, bshen88, dahuang,
+Stanford University ‡National Tsing Hua University"
+6c2b392b32b2fd0fe364b20c496fcf869eac0a98,Fully automatic face recognition framework based on local and global features,"DOI 10.1007/s00138-012-0423-7
+ORIGINAL PAPER
+Fully automatic face recognition framework based
+on local and global features
+Cong Geng · Xudong Jiang
+Received: 30 May 2011 / Revised: 21 February 2012 / Accepted: 29 February 2012 / Published online: 22 March 2012
+© Springer-Verlag 2012"
+6c4d5ac0eed17513e3ceacd396526b8ad6c8fc09,Learning to Learn by Exploiting Prior Knowledge,"Learning to Learn by Exploiting
+Prior Knowledge
+Thèse n. 5587
+à présenter le 07 November 2012
+à la Faculté des Sciences et Techniques de L'ingénieur
+laboratoire de L'Idiap
+programme doctoral en Génie Électrique
+École Polytechnique Fédérale de Lausanne
+pour l'obtention du grade de Docteur ès Sciences
+Tatiana Tommasi
+cceptée sur proposition du jury :
+Prof Dario Floreano, président du jury
+Prof Hervé Bourlard, directeur de thèse
+Dr Barbara Caputo, co-directeur de thèse
+Prof Jean-Philippe Thiran, rapporteur
+Prof Jim Little, rapporteur
+Dr Vittorio Ferrari, rapporteur
+Lausanne, EPFL, 2012"
+6c984bb3243f3b8d0afd8d90cd4ce85eb8f1dd3c,3D Ear Recognition System Using Neural Network Based Self Organizing Maps,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Humming Bird ( 01st March 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+D Ear Recognition System Using Neural Network Based Self
+Organizing Maps
+M.Sathish Babu1, Assistant Professor
+Email:
+Department of Computer Science and Engineering, Cape Institute of Technology."
+6c38ab65df4a1bf546f1426e8a7f2f5cb5f765d3,Pathological Tremor Detection From Video,"Pathological Tremor Detection From Video
+Xilin Li"
+6c518aabdbba2c073eab6a3bb4120023851e524c,Person Recognition System Based on a Combination of Body Images from Visible Light and Thermal Cameras,"Article
+Person Recognition System Based on a Combination
+of Body Images from Visible Light and
+Thermal Cameras
+Dat Tien Nguyen, Hyung Gil Hong, Ki Wan Kim and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (D.T.N.); (H.G.H.);
+(K.W.K.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Academic Editor: Vittorio M. N. Passaro
+Received: 5 January 2017; Accepted: 14 March 2017; Published: 16 March 2017"
+6c514a85b840c461cf6959927e6a34414e1e0f5e,Texture descriptors to distinguish radiation necrosis from recurrent brain tumors on multi-parametric MRI,"Medical Imaging 2014: Computer-Aided Diagnosis, edited by Stephen Aylward, Lubomir M. Hadjiiski,
+Proc. of SPIE Vol. 9035, 90352B · © 2014 SPIE · CCC code: 1605-7422/14/$18 · doi: 10.1117/12.2043969
+Proc. of SPIE Vol. 9035 90352B-1
+From: http://proceedings.spiedigitallibrary.org/ on 10/02/2014 Terms of Use: http://spiedl.org/terms"
+6cddc7e24c0581c50adef92d01bb3c73d8b80b41,Face Verification Using the LARK Representation,"Face Verification Using the LARK
+Representation
+Hae Jong Seo, Student Member, IEEE, Peyman Milanfar, Fellow, IEEE,"
+6cfc337069868568148f65732c52cbcef963f79d,Audio-Visual Speaker Localization via Weighted Clustering Israel -,"Audio-Visual Speaker Localization via Weighted
+Clustering
+Israel-Dejene Gebru, Xavier Alameda-Pineda, Radu Horaud, Florence Forbes
+To cite this version:
+Israel-Dejene Gebru, Xavier Alameda-Pineda, Radu Horaud, Florence Forbes. Audio-Visual Speaker
+Localization via Weighted Clustering. IEEE Workshop on Machine Learning for Signal Processing,
+Sep 2014, Reims, France. pp.1-6, 2014, <10.1109/MLSP.2014.6958874>. <hal-01053732>
+HAL Id: hal-01053732
+https://hal.archives-ouvertes.fr/hal-01053732
+Submitted on 11 Aug 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+6cadbc0122376be3c249ecfec7de8247ffbc4fb3,Bidirectional Label Propagation over Graphs,"Int J Software Informatics, Volume 7, Issue 3 (2013), pp.419–433
+International Journal of Software and Informatics, ISSN 1673-7288
+(cid:176)2013 by ISCAS. All rights reserved.
+Tel: +86-10-62661040
+http://www.ijsi.org
+Email:
+Bidirectional Label Propagation over Graphs
+Wei Liu1 and Tongtao Zhang2
+(IBM T. J. Watson Research Center, Yorktown Heights, NY, USA)
+(Columbia University, New York, NY, USA)"
+6c24fed42d9a1ec283d2aa39a2dd768256a1a066,Swift: reducing the effects of latency in online video scrubbing,"Swift: Reducing the Effects of Latency in Online Video Scrubbing
+Justin Matejka, Tovi Grossman, George Fitzmaurice
+Autodesk Research, Toronto, Ontario, Canada
+Traditional Video Scrubbing
+Swift Video Scrubbing
+Figure 1. An illustration of the scrubbing behavior of a traditional streaming video player and the Swift player. With the
+Swift system a quick-to-download low resolution version of the video is displayed while scrubbing.
+tasks which
+the effects of"
+6c22b549d854845c5d2f17d75417e4469e6d3f83,A robust face recognition algorithm for real-world applications,"A Robust Face Recognition
+Algorithm for
+Real-World Applications
+zur Erlangung des akademischen Grades eines
+Doktors der Ingenieurwissenschaften
+der Fakult¨at f¨ur Informatik
+der Universit¨at Fridericiana zu Karlsruhe (TH)
+genehmigte
+Dissertation
+Hazım Kemal Ekenel
+us Samsun, T¨urkei
+Tag der m¨undlichen Pr¨ufung: 02.02.2009
+Erster Gutachter:
+Prof. Dr. A. Waibel
+Zweiter Gutachter:
+Prof. Dr. J. Kittler"
+6cd96f2b63c6b6f33f15c0ea366e6003f512a951,A New Approach in Solving Illumination and Facial Expression Problems for Face Recognition,"A New Approach in Solving Illumination and Facial Expression Problems
+for Face Recognition
+Yee Wan Wong, Kah Phooi Seng, Li-Minn Ang
+The University of Nottingham Malaysia Campus
+Tel : 03-89248358, Fax : 03-89248017
+E-mail :
+Jalan Broga
+3500 Semenyih, Selangor"
+6c8c7065d1041146a3604cbe15c6207f486021ba,Attention Modeling for Face Recognition via Deep Learning,"Attention Modeling for Face Recognition via Deep Learning
+Sheng-hua Zhong
+Department of Computing, Hung Hom, Kowloon
+Hong Kong, 999077 CHINA
+Yan Liu
+Department of Computing, Hung Hom, Kowloon
+Hong Kong, 99907 CHINA
+Yao Zhang
+Department of Computing, Hung Hom, Kowloon
+Hong Kong, 99907 CHINA
+Fu-lai Chung
+Department of Computing, Hung Hom, Kowloon
+Hong Kong, 99907 CHINA"
+6cd762e7cb1301abd0ddbb265dd9c7661ffc0458,On optimal low rank Tucker approximation for tensors: the case for an adjustable core size,"On Optimal Low Rank Tucker Approximation for Tensors:
+The Case for an Adjustable Core Size
+Bilian CHEN ∗
+Zhening LI †
+Shuzhong ZHANG ‡
+August 7, 2014"
+6cd7a47bbba11a994cd8e68ee5eae2fcb0033054,Driving in the Matrix: Can virtual worlds replace human-generated annotations for real world tasks?,"Driving in the Matrix: Can Virtual Worlds Replace Human-Generated
+Annotations for Real World Tasks?
+Matthew Johnson-Roberson1, Charles Barto2, Rounak Mehta3, Sharath Nittur Sridhar2, and Ram Vasudevan4"
+3965d73c9d7c97cdb391bfd86a15bfd3534cbd32,Deep Learning for Visual Question Answering,"Deep Learning for Visual Question Answering
+Avi Singh"
+39803a9c075d543e19384d79fb4c36b207892179,Regression Techniques versus Discriminative Methods for Face Recognition,"Regression Techniques versus Discriminative Methods for Face
+Recognition
+Vitomir ˇStruc, France Miheliˇc, Rok Gajˇsek and Nikola Paveˇsi´c"
+3917bf2cc075ef075d9c879fc9ec3349ea116735,Discriminant Analysis by Locally Linear Transformations,"Discriminant Analysis by Locally Linear
+Transformations
+Tae-Kyun Kim1,2, Josef Kittler2, Hyun-Chul Kim3, and Seok Cheol Kee1
+: Samsung Advanced Institute of Technology, KOREA
+: Center for Vision, Speech and Signal Processing, University of
+Surrey,U.K.
+: Pohang University of Science and Technology, KOREA"
+390f3d7cdf1ce127ecca65afa2e24c563e9db93b,Learning Deep Representation for Face Alignment with Auxiliary Attributes,"Learning Deep Representation for Face
+Alignment with Auxiliary Attributes
+Zhanpeng Zhang, Ping Luo, Chen Change Loy, Member, IEEE and Xiaoou Tang, Fellow, IEEE"
+39ed31ced75e6151dde41944a47b4bdf324f922b,Pose-Guided Photorealistic Face Rotation,"Pose-Guided Photorealistic Face Rotation
+Yibo Hu1,2, Xiang Wu1, Bing Yu3, Ran He1,2 ∗, Zhenan Sun1,2
+CRIPAC & NLPR & CEBSIT, CASIA 2University of Chinese Academy of Sciences
+Noah’s Ark Laboratory, Huawei Technologies Co., Ltd.
+{yibo.hu, {rhe,"
+3918dcfddf2da218a615dd8f008f6fce436e06f7,Learning Sight from Sound: Ambient Sound Provides Supervision for Visual Learning,"Int J Comput Vis manuscript No.
+(will be inserted by the editor)
+Learning Sight from Sound:
+Ambient Sound Provides Supervision for Visual Learning
+Andrew Owens · Jiajun Wu · Josh H. McDermott · William T. Freeman ·
+Antonio Torralba
+Received: date / Accepted: date"
+3918b425bb9259ddff9eca33e5d47bde46bd40aa,Learning Language from Ambiguous Perceptual Context,"Copyright
+David Lieh-Chiang Chen"
+39675124e4fe1be08f42bdd2e1e237e5a87839ba,"Adversarial Collaboration: Joint Unsupervised Learning of Depth, Camera Motion, Optical Flow and Motion Segmentation","Adversarial Collaboration: Joint Unsupervised
+Learning of Depth, Camera Motion, Optical
+Flow and Motion Segmentation
+Anurag Ranjan1
+Varun Jampani2
+Kihwan Kim 2
+Deqing Sun 2
+Jonas Wulff 1
+Michael J. Black1
+Max Planck Institute for Intelligent Systems
+NVIDIA Research
+{aranjan, jwulff,
+{vjampani, kihwank,"
+39df6ca15f41e5a674ed8cd1654e699dbc8b8c11,Human tracking over camera networks: a review,"Hou et al. EURASIP Journal on Advances in Signal Processing (2017) 2017:43
+DOI 10.1186/s13634-017-0482-z
+EURASIP Journal on Advances
+in Signal Processing
+R EV I E W
+Human tracking over camera networks: a
+review
+Li Hou1,2,3*, Wanggen Wan1,3, Jenq-Neng Hwang4, Rizwan Muhammad1,3, Mingyang Yang1,3 and Kang Han1,3
+Open Access"
+39d900da87fa2f8987567d22a924fb7674f9be67,Generating Notifications for Missing Actions: Don't Forget to Turn the Lights Off!,"Generating Notifications for Missing Actions:
+Don’t forget to turn the lights off!
+Bilge Soran*, Ali Farhadi*†, Linda Shapiro*
+*University of Washington
+Allen Institute for Artificial Intelligence
+{bilge, ali,
+Figure 1: Our purpose is to issue notifications about missing actions given an unsegmented input stream of egocentric video.
+For the latte making sequence above, our system recognizes the actions that happened so far, predicts the ongoing action,
+reasons about missing actions and the associated cost, and generates notifications for the costly missing actions. In this figure,
+the brackets refer to segmented action boundaries, the blue arrows show the prediction points and the graphs below show the
+inter-action dependencies. The most recently completed action is marked in red, the predicted action is marked in blue, and
+the missing action is marked in orange. In this example, the actor is about to miss an important action: steam milk, and a
+reminder for that is given."
+39d406df1823aad167a429f60ae8f1d3dc4250fa,Scaling for Multimodal 3D Object Detection,"Scaling for Multimodal 3D Object Detection
+Andrej Karpathy
+Stanford"
+397400dd7c31e47f8dec20a742695abed297a150,An integrated vision-based architecture for home security system,"An Integrated Vision-based Architecture for Home
+Security System
+John See, Student Member, IEEE, and Sze-Wei Lee, Member, IEEE"
+39b080aea9b342947058884ca25fb5bb1b8f6d66,Fully Automated and Highly Accurate Dense Correspondence for Facial Surfaces,"Fully Automated and Highly Accurate Dense
+Correspondence for Facial Surfaces
+C. Martin Grewe and Stefan Zachow
+Mathematics for Life and Materials Sciences,
+Zuse Institute Berlin, Germany
+Fig. 1: Two facial expressions (a,b) from our database set into dense correspon-
+dence using the proposed framework. High geometric and photometric details are
+ccurately morphed between both expressions via a dense corresponding mesh."
+39d08fa8b028217384daeb3e622848451809a422,Variational Approaches for Auto-Encoding Generative Adversarial Networks,"Variational Approaches for Auto-Encoding
+Generative Adversarial Networks
+Mihaela Rosca∗ Balaji Lakshminarayanan∗ David Warde-Farley
+Shakir Mohamed
+DeepMind"
+3998c5aa6be58cce8cb65a64cb168864093a9a3e,Understanding head and hand activities and coordination in naturalistic driving videos,Intelligent Vehicles Symposium 2014
+39fc0fe46ddf43f13073cbab077d981547889dc1,Using Gradient Features from Scale-invariant Keypoints on Face Recognition,"International Journal of Innovative
+Computing, Information and Control
+Volume 7, Number 4, April 2011
+ICIC International c⃝2011 ISSN 1349-4198
+pp. 1639{1649
+USING GRADIENT FEATURES FROM SCALE-INVARIANT
+KEYPOINTS ON FACE RECOGNITION
+Shinfeng D. Lin, Jia-Hong Lin and Cheng-Chin Chiang
+Department of Computer Science and Information Engineering
+National Dong Hwa University
+No. 1, Sec. 2, Da Hsueh Rd., Shoufeng, Hualien 97401, Taiwan
+f david; bbmac;
+Received November 2009; revised March 2010"
+39dc2ce4cce737e78010642048b6ed1b71e8ac2f,Recognition of six basic facial expressions by feature-points tracking using RBF neural network and fuzzy inference system,"Recognition of Six Basic Facial Expressions by Feature-Points Tracking using
+RBF Neural Network and Fuzzy Inference System
+Hadi Seyedarabi*, Ali Aghagolzadeh **, Sohrab Khanmohammadi **
+*Islamic Azad University of AHAR
+**Elect. Eng. Faculty, Tabriz University, Tabriz, Iran"
+39a76fdc4b2d4b9e8ef8f69a87d17ae930520acc,Occlusion-Aware Human Pose Estimation with Mixtures of Sub-Trees,"Occlusion-Aware Human Pose Estimation with
+Mixtures of Sub-Trees
+Ibrahim Radwan∗, Abhinav Dhall and Roland Goecke"
+397fffa6f785762acb3cd3c96c4c6b65058b816f,Modeling mutual context of object and human pose in human-object interaction activities,"Modeling Mutual Context of Object and
+Human Pose in Human-object Interaction
+Activities
+•  Bangpeng Yao
+•  Li Fei-Fei
+Presented by Sahil Shah"
+3907d83f14ba9e2b8a93c3f02b04ca0b81901c4b,Semantic segmentation - using Convolutional Neural Networks and Sparse Dictionaries,"Master of Science Thesis in Electrical Engineering
+Department of Electrical Engineering, Linköping University, 2017
+Semantic segmentation
+- using Convolutional Neural Networks
+nd Sparse Dictionaries
+Viktor Andersson"
+391e52ac04408d3e6496614ffafd6ac89c1b6c45,Seeing 3D Chairs: Exemplar Part-Based 2D-3D Alignment Using a Large Dataset of CAD Models,"Seeing 3D chairs: exemplar part-based 2D-3D alignment
+using a large dataset of CAD models
+Mathieu Aubry1,∗ Daniel Maturana2 Alexei A. Efros3,∗ Bryan C. Russell4
+Josef Sivic1,∗
+INRIA 2Carnegie Mellon University
+UC Berkeley
+Intel Labs"
+390e212d4a874d8d2256e55fe0dee9193e4c376a,Just in Time: Controlling Temporal Performance in Crowdsourcing Competitions,"Just in Time: Controlling Temporal Performance in
+Crowdsourcing Competitions
+Markus Rokicki
+L3S Research Center,
+Hannover, Germany
+Electronics and Computer
+Science, University of
+Southampton, Southampton,
+Sergej Zerr"
+399ab5652908d99a5be1a664425f6463f67df2aa,Mechanisms of Diminished Attention to Eyes in Autism.,"Mechanisms of diminished attention to eyes in
+utism
+Jennifer M. Moriuchi, Emory University
+Ami Klin, Emory University
+Warren R Jones, Emory University
+Journal Title: American Journal of Psychiatry
+Volume: Volume 174, Number 1
+Publisher: American Psychiatric Publishing | 2017-01-01, Pages 26-35
+Type of Work: Article | Post-print: After Peer Review
+Publisher DOI: 10.1176/appi.ajp.2016.15091222
+Permanent URL: https://pid.emory.edu/ark:/25593/s8mpz
+Final published version: http://dx.doi.org/10.1176/appi.ajp.2016.15091222
+Copyright information:
+018 American Psychiatric Association
+Accessed June 11, 2018 8:03 PM EDT"
+397085122a5cade71ef6c19f657c609f0a4f7473,Using Segmentation to Predict the Absence of Occluded Parts,"GHIASI, FOWLKES: USING SEGMENTATION TO DETECT OCCLUSION
+Using Segmentation to Predict the Absence
+of Occluded Parts
+Golnaz Ghiasi
+Charless C. Fowlkes
+Dept. of Computer Science
+University of California
+Irvine, CA"
+39c8b34c1b678235b60b648d0b11d241a34c8e32,Learning to Deblur Images with Exemplars,"Learning to Deblur Images with Exemplars
+Jinshan Pan∗, Wenqi Ren∗, Zhe Hu∗, and Ming-Hsuan Yang"
+39bce1d5e4b31a555f12f0a44e92abcad73aab4f,"Explorer "" Here ' s looking at you , kid ""","""Here's looking at you, kid""
+Citation for published version:
+Marin-Jimenez, M, Zisserman, A & Ferrari, V 2011, ""Here's looking at you, kid"": Detecting people looking at
+each other in videos. in Proceedings of the British Machine Vision Conference (BMVC): Dundee, September
+011. BMVA Press, pp. 22.1-22.12. DOI: 10.5244/C.25.22
+Digital Object Identifier (DOI):
+0.5244/C.25.22
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+Proceedings of the British Machine Vision Conference (BMVC)
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please"
+3986161c20c08fb4b9b791b57198b012519ea58b,An Efficient Method for Face Recognition based on Fusion of Global and Local Feature Extraction,"International Journal of Soft Computing and Engineering (IJSCE)
+ISSN: 2231-2307, Volume-4 Issue-4, September 2014
+An Efficient Method for Face Recognition based on
+Fusion of Global and Local Feature Extraction
+E. Gomathi, K. Baskaran"
+3988ed2b900af26c07432d0f9f3c2679f3c532ac,Vision Meets Drones: A Challenge,"Vision Meets Drones: A Challenge
+Pengfei Zhu, Longyin Wen, Xiao Bian, Haibin Ling and Qinghua Hu"
+398ad0036b899aec04502c243dd129c1f3e4c21e,Object detection using voting spaces trained by few samples,"Downloaded From: https://www.spiedigitallibrary.org/journals/Optical-Engineering on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+ObjectdetectionusingvotingspacestrainedbyfewsamplesPeiXuMaoYeXueLiLishenPeiPengweiJiao"
+3910b1cc849f999dc8a2c02a0313be32dd5d2b43,A Systematic Comparison of Deep Learning Architectures in an Autonomous Vehicle,"A Systematic Comparison of Deep Learning Architectures in an
+Autonomous Vehicle
+Michael Teti1†, William Edward Hahn1, Shawn Martin2, Christopher Teti3, and Elan Barenholtz1
+such tasks, or an attempt
+largely due to recent developments"
+395978c1dee9fd75bbcb249e74ad6fb4d3c2b9fc,A Reliable Hybrid Technique for Human Face Detection,"Hakim A., Marsland S. and W. Guesgen H. (2010).
+A RELIABLE HYBRID TECHNIQUE FOR HUMAN FACE DETECTION.
+In Proceedings of the International Conference on Computer Vision Theory and Applications, pages 241-244
+Copyright c(cid:13) SciTePress"
+395dadff1eab9c8177f843326ec864567342eba5,Vision-Based People Detection System for Heavy Machine Applications,"Article
+Vision-Based People Detection System for Heavy
+Machine Applications
+Vincent Fremont 1,*, Manh Tuan Bui 1, Djamal Boukerroui 1 and Pierrick Letort 2
+Received: 12 October 2015; Accepted: 13 January 2016; Published: 20 January 2016
+Academic Editor: Vittorio M. N. Passaro
+Sorbonne Universités, Université de Technologie de Compiègne, CNRS, UMR 7253,
+Heudiasyc-CS 60 319, 60 203 Compiègne Cedex, France; (M.T.B.);
+(D.B.)
+Technical Center for the Mechanical Industry (CETIM), 60300 Senlis, France;
+* Correspondence: Tel.: +33-344-237-917; Fax: +33-344-234-477"
+39b0bce87eec467adfe5bebcfe628ff5bd397fc7,"R4-A.2: Rapid Similarity Prediction, Forensic Search & Retrieval in Video","R4-A.2: Rapid Similarity Prediction, Forensic
+Search & Retrieval in Video
+PARTICIPANTS
+Venkatesh Saligrama
+David Castañón
+Ziming Zhang
+Gregory Castañón
+Yuting Chen
+Marc Eder
+Faculty/Staff
+Institution
+Title
+Co-PI
+Co-PI
+Post-Doc
+Graduate, Undergraduate and REU Students
+Degree Pursued
+Institution
+Email
+Month/Year of Graduation"
+399a5f7500648462fd8cf1704dfaeaea9d560e7e,Spoof Detection for Finger-Vein Recognition System Using NIR Camera,"Article
+Spoof Detection for Finger-Vein Recognition System
+Using NIR Camera
+Dat Tien Nguyen, Hyo Sik Yoon, Tuyen Danh Pham and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (D.T.N.); (H.S.Y.);
+(T.D.P.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 15 August 2017; Accepted: 27 September 2017; Published: 1 October 2017"
+392c3cabe516c0108b478152902a9eee94f4c81e,Tiny images,"Computer Science and Artificial Intelligence Laboratory
+Technical Report
+MIT-CSAIL-TR-2007-024
+April 23, 2007
+Tiny images
+Antonio Torralba, Rob Fergus, and William T. Freeman
+m a s s a c h u s e t t s i n s t i t u t e o f t e c h n o l o g y, c a m b r i d g e , m a 0 213 9 u s a — w w w. c s a i l . m i t . e d u"
+39e7ac344b17d97267ec80681aeded17e3e6d786,Joint Parsing of Cross-view Scenes with Spatio-temporal Semantic Parse Graphs,"Joint Parsing of Cross-view Scenes with Spatio-temporal Semantic Parse Graphs∗
+Hang Qi1∗, Yuanlu Xu1∗, Tao Yuan1∗, Tianfu Wu2, Song-Chun Zhu1
+Dept. Computer Science and Statistics, University of California, Los Angeles (UCLA)
+{hangqi, tianfu
+Dept. Electrical and Computer Engineering, NC State University"
+39db2ff704cc30a7e94989de33ff4290ea4a6df1,Low-Cost Visual Feature Representations For Image Retrieval,"Low-Cost Visual Feature Representations For Image
+Retrieval
+Ramon F. Pessoa, William R. Schwartz, Jefersson A. dos Santos
+Department of Computer Science
+Universidade Federal de Minas Gerais (UFMG)
+Belo Horizonte - Minas Gerais, Brazil, 31270-901
+Email: {ramon.pessoa, william,"
+39a19a687b3182054b30f36f627bc6875b09dbd3,A new boostrapping strategy for the AdaBoost-based face detector T.-J. Chin and D. Suter A new boostrapping strategy for the AdaBoost-based face detector,"Department of Electrical
+Computer Systems Engineering
+Technical Report
+MECSE-13-2005
+A new boostrapping strategy for the AdaBoost-based face
+detector
+T.-J. Chin and D. Suter"
+39340257d9a478b3c3b736ad31df1c0a6a78c851,Parts-based object recognition seeded by frequency-tuned saliency for child detection in active safety,"Parts-based object recognition seeded by frequency-tuned saliency for
+Child Detection in Active Safety
+Shinko Y. Cheng, Jose Molineros, Yuri Owechko
+HRL Laboratories, LLC
+011 Malibu Canyon Road
+Malibu CA 90265"
+3964caa0a1d788eb30365972880f83b71df1ab21,Multi-Modal Obstacle Detection in Unstructured Environments with Conditional Random Fields,"Multi-Modal Obstacle Detection in Unstructured
+Environments with Conditional Random Fields
+Mikkel Kragh1 and James Underwood2"
+39df4f8ad7add3863208a5f7b71e22ed1970ca58,Bayesian Supervised Dictionary learning‎,"Bayesian Supervised Dictionary learning
+B. Babagholami-Mohamadabadi
+A. Jourabloo
+M. Zolfaghari
+M.T. Manzuri-Shalmani
+CE Dept.
+Sharif University
+Tehran, Iran
+CE Dept.
+Sharif University
+Tehran, Iran
+CE Dept.
+Sharif University
+Tehran, Iran
+CE Dept.
+Sharif University
+Tehran, Iran"
+397c395aed9d96aef064b9ceb9f0eae9421eb00a,An Evaluation of the Pedestrian Classification in a Multi-Domain Multi-Modality Setup,"Sensors 2015, 15, 13851-13873; doi:10.3390/s150613851
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+An Evaluation of the Pedestrian Classification in
+Multi-Domain Multi-Modality Setup
+Alina Miron 1,*, Alexandrina Rogozan 2, Samia Ainouz 2, Abdelaziz Bensrhair 2
+nd Alberto Broggi 3
+ISR Laboratory, University of Reading, Reading RG6 6AY, UK
+INSA Rouen/LITIS laboratory - EA4108, Saint-Etienne du Rouvray 76801, France;
+E-Mails: (A.R.); (S.A.);
+(A.B.)
+VisLab, University of Parma, Parco Area delle Scienze 181A, 43100 Parma, Italy;
+E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +44-118-378-7631.
+Academic Editor: Vittorio M.N. Passaro
+Received: 2 April 2015 / Accepted: 8 June 2015 / Published: 12 June 2015"
+3933e323653ff27e68c3458d245b47e3e37f52fd,Evaluation of a 3 D-aided Pose Invariant 2 D Face Recognition System,"Evaluation of a 3D-aided Pose Invariant 2D Face Recognition System
+Xiang Xu, Ha A. Le, Pengfei Dou, Yuhang Wu, Ioannis A. Kakadiaris
+{xxu18, hale4, pdou, ywu35,
+Computational Biomedicine Lab
+800 Calhoun Rd. Houston, TX, USA"
+3903cbd56446436a4a3b8443c26c90fc1b69f5e0,Event driven software architecture for multi-camera and distributed surveillance research systems,"Event Driven Software Architecture for Multi-camera and Distributed
+Surveillance Research Systems
+Roberto Vezzani, Rita Cucchiara
+University of Modena and Reggio Emilia - Italy"
+3958db5769c927cfc2a9e4d1ee33ecfba86fe054,Describable Visual Attributes for Face Verification and Image Search,"Describable Visual Attributes for
+Face Verification and Image Search
+Neeraj Kumar, Student Member, IEEE, Alexander C. Berg, Member, IEEE,
+Peter N. Belhumeur, and Shree K. Nayar, Member, IEEE"
+99ced8f36d66dce20d121f3a29f52d8b27a1da6c,Organizing Multimedia Data in Video Surveillance Systems Based on Face Verification with Convolutional Neural Networks,"Organizing Multimedia Data in Video
+Surveillance Systems Based on Face Verification
+with Convolutional Neural Networks
+Anastasiia D. Sokolova, Angelina S. Kharchevnikova, Andrey V. Savchenko
+National Research University Higher School of Economics, Nizhny Novgorod, Russian
+Federation"
+994f7c469219ccce59c89badf93c0661aae34264,Model Based Face Recognition Across Facial Expressions,"Model Based Face Recognition Across Facial
+Expressions
+Zahid Riaz, Christoph Mayer, Matthias Wimmer, and Bernd Radig, Senior Member, IEEE
+screens, embedded into mobiles and installed into everyday
+living and working environments they become valuable tools
+for human system interaction. A particular important aspect of
+this interaction is detection and recognition of faces and
+interpretation of facial expressions. These capabilities are
+deeply rooted in the human visual system and a crucial
+uilding block for social interaction. Consequently, these
+apabilities are an important step towards the acceptance of
+many technical systems.
+trees as a classifier
+lies not only"
+9949ac42f39aeb7534b3478a21a31bc37fe2ffe3,Parametric Stereo for Multi-pose Face Recognition and 3D-Face Modeling,"Parametric Stereo for Multi-Pose Face Recognition and
+D-Face Modeling
+Rik Fransens, Christoph Strecha, Luc Van Gool
+PSI ESAT-KUL
+Leuven, Belgium"
+9900be092f81547ad71e4124cd850048e1969063,3D Face Analysis for Facial Expression Recognition,"Author manuscript, published in ""20th International Conference on Pattern Recognition (ICPR 2010), Istanbul : Turquie (2010)"""
+9958942a0b7832e0774708a832d8b7d1a5d287ae,The Sparse Matrix Transform for Covariance Estimation and Analysis of High Dimensional Signals,"The Sparse Matrix Transform for Covariance
+Estimation and Analysis of High Dimensional
+Signals
+Guangzhi Cao*, Member, IEEE, Leonardo R. Bachega, and Charles A. Bouman, Fellow, IEEE"
+99582ce8439dce17d9d6f74eb54fc5c89dbe06d9,"Hough Forests for Object Detection, Tracking, and Action Recognition","Hough Forests for Object Detection, Tracking,
+nd Action Recognition
+Juergen Gall Member, IEEE, Angela Yao, Nima Razavi, Luc Van Gool Member, IEEE, and
+Victor Lempitsky"
+99726ad232cef837f37914b63de70d8c5101f4e2,Facial Expression Recognition Using PCA & Distance Classifier,"International Journal of Scientific & Engineering Research, Volume 5, Issue 5, May-2014 570
+ISSN 2229-5518
+Facial Expression Recognition Using PCA & Distance Classifier
+AlpeshKumar Dauda*
+Dept. of Electronics & Telecomm. Engg.
+Ph.D Scholar,VSSUT
+BURLA, ODISHA, INDIA
+Nilamani Bhoi
+Reader in Dept. of Electronics & Telecomm. Engg.
+VEER SURENDRA SAI UNIVERSITY OF
+TECHNOLOGY
+BURLA, ODISHA, INDIA"
+998e829cc72080c88a780f322d6bf7ab78dbd743,Towards Real-Time Multiresolution Face/Head Detection,"´AAAAAAAAAAAAAAAAAAAAAAAA
+´AAAAAAAAAAAAAAAAAAAAAAAA
+ART´ICULO
+Towards Real-Time Multiresolution Face/Head
+Detection*
+M. Castrill´on-Santana, H. Kruppa**, C. Guerra-Artal, M. Hern´andez-Tejera
+Universidad de Las Palmas de Gran Canaria
+Instituto Universitario de Sistemas Inteligentes
+y Aplicaciones Num´ericas en Ingenier´ıa
+Edificio Central del Parque Cient´ıfico-Tecnol´ogico
+Campus Universitario de Tafira
+5017 Las Palmas - Espa˜na"
+99a3a4151abbc2e5d33d4beec88dc55a057df299,Topological analysis of discrete scalar data,"TOPOLOGICAL ANALYSIS OF
+DISCRETE SCALAR DATA
+DAVID GÜNTHER
+DISSERTATION ZUR ERLANGUNG DES GRADES
+DES DOKTORS DER INGENIEURWISSENSCHAFTEN
+DER NATURWISSENSCHAFTLICH-TECHNISCHEN FAKULTÄTEN
+DER UNIVERSITÄT DES SAARLANDES
+SAARBRÜCKEN, 2012"
+99e1fd6a378209d48c12a70229e4f6d4d83f4417,Modular Vehicle Control for Transferring Semantic Information Between Weather Conditions Using GANs,"Modular Vehicle Control for Transferring Semantic
+Information Between Weather Conditions Using
+Patrick Wenzel1,2∗
+, Qadeer Khan1,2∗
+, Daniel Cremers1,2, and Laura Leal-Taixé1
+Technical University of Munich
+Artisense"
+99e1ab1fb08af137cad6efbc0454c6e1e68dca51,3D human action recognition and motion analysis using selective representations,"D HUMAN ACTION RECOGNITION
+AND MOTION ANALYSIS USING
+SELECTIVE REPRESENTATIONS
+D LEIGHTLEY
+PhD 2015"
+99f565df31ef710a2d8a1b606e3b7f5f92ab657c,Geometry Score: A Method For Comparing Generative Adversarial Networks,"Geometry Score: A Method For Comparing Generative Adversarial Networks
+Valentin Khrulkov 1 Ivan Oseledets 1 2"
+99b7ff97ad54308b816e47d9bbf6704b787b8f52,Causal Flow,"Causal Flow
+Yuya Yamashita, Tatsuya Harada, Member, IEEE, and Yasuo Kuniyoshi, Member, IEEE"
+99df887213407f612c1f5df502b637709a29cd6b,Ensembles of exemplar-SVMs for video face recognition from a single sample per person,"Ensembles of Exemplar-SVMs for Video Face Recognition from a
+Single Sample Per Person
+Saman Bashbaghi, Eric Granger, Robert Sabourin
+Guillaume-Alexandre Bilodeau
+Laboratoire d’imagerie de vision et d’intelligence artificielle
+LITIV Lab
+École de technologie supérieure, Université du Québec, Montréal, Canada
+Polytechnique Montréal, Montréal, Canada
+{eric.granger,"
+99cb716cd7687db8ef3d0403c85b1ab90869800f,Face Recognition under Pose and Expresivity Variation Using Thermal and Visible Images,"FACE RECOGNITION UNDER POSE AND EXPRESIVITY
+VARIATION USING THERMAL AND VISIBLE IMAGES
+Florin Marius Pop, Mihaela Gordan, Camelia Florea, Aurel Vlaicu
+Centre for Multimedia Technologies and Distance Education
+Technical University of Cluj-Napoca, Romania
+{Mihaela.Gordan, Camelia.Florea,"
+9993f1a7cfb5b0078f339b9a6bfa341da76a3168,"A Simple, Fast and Highly-Accurate Algorithm to Recover 3D Shape from 2D Landmarks on a Single Image","JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+A Simple, Fast and Highly-Accurate Algorithm to
+Recover 3D Shape from 2D Landmarks on a Single
+Image
+Ruiqi Zhao, Yan Wang, Aleix M. Martinez"
+992ebd81eb448d1eef846bfc416fc929beb7d28b,Exemplar-Based Face Parsing Supplementary Material,"Exemplar-Based Face Parsing
+Supplementary Material
+Brandon M. Smith Li Zhang
+Jonathan Brandt Zhe Lin Jianchao Yang
+University of Wisconsin–Madison
+Adobe Research
+http://www.cs.wisc.edu/~lizhang/projects/face-parsing/
+. Additional Selected Results
+Figures 1 and 2 supplement Figure 4 in our paper. In all cases, the input images come from our Helen [1] test set. We note
+that our algorithm generally produces accurate results, as shown in Figures 1. However, our algorithm is not perfect and makes
+mistakes on especially challenging input images, as shown in Figure 2.
+In our view, the mouth is the most challenging region of the face to segment: the shape and appearance of the lips vary
+widely from subject to subject, mouths deform significantly, and the overall appearance of the mouth region changes depending
+on whether the inside of the mouth is visible or not. Unusual mouth expressions, like those shown in Figure 2, are not repre-
+sented well in the exemplar images, which results in poor label transfer from the top exemplars to the test image. Despite these
+hallenges, our algorithm generally performs well on the mouth, with large segmentation errors occurring infrequently.
+. Comparisons with Liu et al. [2]
+The scene parsing approach by Liu et al. [2] shares sevaral similarities with our work. Like our approach, they propose a
+nonparametric system that transfers labels from exemplars in a database to annotate a test image. This begs the question, Why
+not simply apply the approach from Liu et al. to face images?"
+998b7c8608fb9f80177ce54230761d8c3d82b2da,SHEF-Multimodal: Grounding Machine Translation on Images,"Proceedings of the First Conference on Machine Translation, Volume 2: Shared Task Papers, pages 660–665,
+Berlin, Germany, August 11-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+9941a408ae031d1254bbc0fe7a63fac5f85fe347,Neural Processes,"Neural Processes
+Marta Garnelo 1 Jonathan Schwarz 1 Dan Rosenbaum 1 Fabio Viola 1 Danilo J. Rezende 1 S. M. Ali Eslami 1
+Yee Whye Teh 1"
+9963af1199679e176f0836e6d63572b3a69fa7da,23 Generating Facial Expressions with Deep Belief Nets,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,500
+08,000
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact"
+998f2cfb4a3bac6b38d8a4a96a3827e06a0eaadb,Geo-Supervised Visual Depth Prediction,"Geo-Supervised Visual Depth Prediction
+Xiaohan Fei
+Alex Wong
+Stefano Soatto"
+99c20eb5433ed27e70881d026d1dbe378a12b342,Semi-Supervised and Unsupervised Data Extraction Targeting Speakers: From Speaker Roles to Fame?,"ISCA Archive
+http://www.isca-speech.org/archive
+First Workshop on Speech, Language
+nd Audio in Multimedia
+Marseille, France
+August 22-23, 2013
+Proceedings of the First Workshop on Speech, Language and Audio in Multimedia (SLAM), Marseille, France, August 22-23, 2013."
+99d3bc6d62675297693e5e57ff0770e7017f9637,Hierarchical Invariant Feature Learning with Marginalization for Person Re-Identification,"Hierarchical Invariant Feature Learning with
+Marginalization for Person Re-Identification
+Rahul Rama Varior, Student Member, IEEE, Gang Wang, Member, IEEE"
+9990e0b05f34b586ffccdc89de2f8b0e5d427067,Auto - Optimized Multimodal Expression Recognition Framework Using 3 D Kinect Data for ASD Therapeutic Aid,"International Journal of Modeling and Optimization, Vol. 3, No. 2, April 2013
+Auto-Optimized Multimodal Expression Recognition
+Framework Using 3D Kinect Data for ASD Therapeutic
+Amira E. Youssef, Sherin F. Aly, Ahmed S. Ibrahim, and A. Lynn Abbott
+regarding
+emotion
+recognize"
+99d7678039ad96ee29ab520ff114bb8021222a91,Political image analysis with deep neural networks,"Political image analysis with deep neural
+networks
+L. Jason Anastasopoulos∗
+Shiry Ginosar§.
+Dhruvil Badani†
+Jake Ryland Williams¶
+Crystal Lee‡
+November 28, 2017"
+9922a2ec8dfb307bb1fcb334098fd912e23b3bab,Particle-based pedestrian path prediction using LSTM-MDL models,"Particle-based Pedestrian Path Prediction using LSTM-MDL Models
+Ronny Hug∗, Stefan Becker∗, Wolfgang H¨ubner∗ and Michael Arens∗"
+99ae92bae7c873432a6a60238b33d494bbae13eb,Recognition of Human Pose from Images Based on Graph Spectra,"RECOGNITION OF HUMAN POSE FROM IMAGES BASED ON GRAPH SPECTRA
+A. A. Zakharov a *, A. E. Barinov a, A. L. Zhiznyakov a
+Murom Institut Vladimir State University, CAD Department, , 602264, Orlovskaya 23, Murom, Russian Federation, aa-
+Commission VI, WG VI/4
+KEY WORDS: Image Recognition, Human Pose, Spectral Graph Matching"
+99227909e5733d76b0d50fc3fab975ab7a43fce3,A Cascaded Inception of Inception Network with Attention Modulated Feature Fusion for Human Pose Estimation,"A Cascaded Inception of Inception Network with Attention Modulated Feature
+Fusion for Human Pose Estimation
+Submission ID: 2065"
+522fab628aab972f39835521e31564b4b6c64fe5,Vehicle Classification on Low-resolution and Occluded images: A low-cost labeled dataset for augmentation,"Vehicle Classification on Low-resolution and
+Occluded images: A low-cost labeled dataset for
+ugmentation
+Anonymous Author(s)
+Affiliation
+Address
+email"
+52012b4ecb78f6b4b9ea496be98bcfe0944353cd,Using Support Vector Machine and Local Binary Pattern for Facial Expression Recognition,"JOURNAL OF COMPUTATION IN BIOSCIENCES AND ENGINEERING
+Journal homepage: http://scienceq.org/Journals/JCLS.php
+Research Article
+Using Support Vector Machine and Local Binary Pattern for Facial Expression
+Recognition
+Open Access
+Ayeni Olaniyi Abiodun 1, Alese Boniface Kayode1, Dada Olabisi Matemilayo2
+1. Department of Computer Science, Federal University Technology Akure, PMB 704, Akure, Nigeria.
+. Department of computer science, Kwara state polytechnic Ilorin, Kwara-State, Nigeria.
+. *Corresponding author: Ayeni Olaniyi Abiodun Mail Id:
+Received: September 22, 2015, Accepted: December 14, 2015, Published: December 14, 2015."
+5293960de53b0118ef3c8b410d27b23b9cec9bf7,Online Multi-Object Tracking with Dual Matching Attention Networks,"Online Multi-Object Tracking with
+Dual Matching Attention Networks
+Ji Zhu1,2, Hua Yang1(cid:63), Nian Liu3, Minyoung Kim4,
+Wenjun Zhang1, and Ming-Hsuan Yang5,6
+Northwestern Polytechnical University 4Massachusetts Institute of Technology
+Shanghai Jiao Tong University 2Visbody Inc
+5University of California, Merced 6Google Inc
+{jizhu1023,"
+527cc8cd2af06a9ac2e5cded806bab5c3faad9cf,Abnormal Event Detection in Videos Using Spatiotemporal Autoencoder,"Abnormal Event Detection in Videos
+using Spatiotemporal Autoencoder
+Yong Shean Chong
+Yong Haur Tay
+Lee Kong Chian Faculty of Engineering Science,
+Universiti Tunku Abdul Rahman, 43000 Kajang, Malaysia.
+January 9, 2017"
+529e2ce6fb362bfce02d6d9a9e5de635bde81191,Normalization of Face Illumination Based on Large-and Small-Scale Features,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+> TIP-05732-2009<
+Normalization of Face Illumination Based
+on Large- and Small- Scale Features
+Xiaohua Xie, Wei-Shi Zheng, Member, IEEE, Jianhuang Lai*, Member, IEEE
+Pong C. Yuen, Member, IEEE, Ching Y. Suen, IEEE Fellow"
+52887969107956d59e1218abb84a1f834a314578,Travel Recommendation by Mining People Attributes and Travel Group Types From Community-Contributed Photos,"Travel Recommendation by Mining People
+Attributes and Travel Group Types From
+Community-Contributed Photos
+Yan-Ying Chen, An-Jung Cheng, and Winston H. Hsu, Senior Member, IEEE"
+52f71cc9c312aa845867ad1695c25a6d1d94ba0e,The invariance assumption in process-dissociation models: an evaluation across three domains.,"Journal of Experimental Psychology: General
+015, Vol. 144, No. 1, 198 –221
+0096-3445/15/$12.00
+© 2014 American Psychological Association
+http://dx.doi.org/10.1037/xge0000044
+The Invariance Assumption in Process-Dissociation Models:
+An Evaluation Across Three Domains
+Karl Christoph Klauer, Kerstin Dittrich,
+nd Christine Scholtes
+Albert-Ludwigs-Universität Freiburg
+Andreas Voss
+Universität Heidelberg
+The class of process-dissociation models, a subset of the class of multinomial processing-tree models, is
+one of the best understood classes of models used in experimental psychology. A number of prominent
+debates have addressed fundamental assumptions of process-dissociation models, leading, in many cases,
+to conceptual clarifications and extended models that address identified issues. One issue that has so far
+defied empirical clarification is how to evaluate the invariance assumption for the dominant process.
+Violations of the invariance assumption have, however, the potential to bias conventional process-
+dissociation analyses in different ways, and they can cause misleading theoretical interpretations and
+onclusions. Based on recent advances in multinomial modeling, we propose new approaches to examine"
+52e0c03dd661d032865dfedd91ca49542ccfc2a3,Improving Human Action Recognition Using Score Distribution and Ranking,"Improving Human Action Recognition
+using Score Distribution and Ranking
+Minh Hoai1,2 and Andrew Zisserman1
+Visual Geometry Group, Dept. Engineering Science, University of Oxford.
+Department of Computer Science, Stony Brook University."
+523abe29cc278f9daf03fe74d1e09d9e2711b73e,Facial Recognition System: A Review,"Debolina S. De, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.10, October- 2015, pg. 7-11
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 4, Issue. 10, October 2015, pg.7 – 11
+REVIEW ARTICLE
+Facial Recognition System: A Review
+Debolina S. De
+Computer Engineering Department, Mukesh Patel School of Technology Management and Engineering, India"
+5251cb5349e37495b3ca29b06e6ed7422f12d126,A Pedestrian Detector Using Histograms of Oriented Gradients and a Support Vector Machine Classifier,"Proceedings of the 2007 IEEE
+Intelligent Transportation Systems Conference
+Seattle, WA, USA, Sept. 30 - Oct. 3, 2007
+MoD2.2
+-4244-1396-6/07/$25.00 ©2007 IEEE."
+524634e1055637b7c22b29e7e36437f4ba80df04,Thermal to Visible Synthesis of Face Images Using Multiple Regions,"Thermal to Visible Synthesis of Face Images using Multiple Regions
+Benjamin S. Riggan1,*
+Nathaniel J. Short1,2
+Shuowen Hu1
+U.S. Army Research Laboratory, 2800 Powder Mill Rd., Adelphi, MD 20783
+Booz Allen Hamilton, 8283 Grennsboro Dr., McLean, VA 22102
+*Corresponding author:"
+52884a0c7913be319c1a2395f009cea47b03f128,Explorer Learning Grounded Meaning Representations with Autoencoders,"Learning Grounded Meaning Representations with Autoencoders
+Citation for published version:
+Silberer, C & Lapata, M 2014, 'Learning Grounded Meaning Representations with Autoencoders'. in
+Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long
+Papers). Association for Computational Linguistics, Baltimore, Maryland, pp. 721-732.
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Publisher final version (usually the publisher pdf)
+Published In:
+Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long
+Papers)
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please
+ontact providing details, and we will remove access to the work immediately and"
+52b6df1fe810d36fd615eb7c47aa1fd29376e769,Graph Mining for Object Tracking in Videos,"Graph Mining for Object Tracking in Videos
+Fabien Diot, Elisa Fromont, Baptiste Jeudy, Emmanuel Marilly, Olivier
+Martinot
+To cite this version:
+Fabien Diot, Elisa Fromont, Baptiste Jeudy, Emmanuel Marilly, Olivier Martinot. Graph
+Mining for Object Tracking in Videos. European Conference on Machine Learning and Prin-
+iples and Practice of Knowledge Discovery in Databases, Sep 2012, Bristol, United Kingdom.
+Springer, LNCS (LNAI 6321), pp.394-409, 2012. <hal-00714705v2>
+HAL Id: hal-00714705
+https://hal.archives-ouvertes.fr/hal-00714705v2
+Submitted on 20 Sep 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+521120c3907677e17708c17c5b6bab9087e61c5b,"l2, 1-Norm Regularized Discriminative Feature Selection for Unsupervised Learning","(cid:2)2,1-Norm Regularized Discriminative Feature
+Selection for Unsupervised Learning
+Yi Yang1, Heng Tao Shen1, Zhigang Ma2, Zi Huang1, Xiaofang Zhou1
+School of Information Technology & Electrical Engineering, The University of Queensland.
+Department of Information Engineering & Computer Science, University of Trento.
+yangyi {huang,"
+5223f3485b96bffe7dd4b3aa71e63fd2b049fcf0,Is the Pedestrian going to Cross? Answering by 2D Pose Estimation,"Is the Pedestrian going to Cross? Answering by 2D Pose Estimation
+Zhijie Fang and Antonio M. L´opez"
+52417b0406886154f0b4e2343ad6ac18c0484ec4,Ecological legacies of civil war: 35-year increase in savanna tree cover following wholesale large-mammal declines,"Journal of Ecology 2016, 104, 79–89
+doi: 10.1111/1365-2745.12483
+Ecological legacies of civil war: 35-year increase in
+savanna tree cover following wholesale large-mammal
+declines
+Joshua H. Daskin1*, Marc Stalmans2 and Robert M. Pringle1
+Department of Ecology and Evolutionary Biology, 106A Guyot Hall, Princeton University Princeton, NJ 08540, USA;
+nd 2Department of Scientific Services, Gorongosa National Park, Sofala Province, Mozambique
+Summary
+. Large mammalian herbivores (LMH) exert strong effects on plants in tropical savannas, and
+many wild LMH populations are declining. However, predicting the impacts of these declines on
+vegetation structure remains challenging.
+. Experiments suggest that tree cover can increase rapidly following LMH exclusion. Yet it is
+unclear whether these results scale up to predict ecosystem-level impacts of LMH declines, which
+often alter fire regimes, trigger compensatory responses of other herbivores and accompany anthro-
+pogenic land-use changes. Moreover, theory predicts that grazers and browsers should have oppos-
+ing effects on tree cover, further complicating efforts to forecast the outcomes of community-wide
+declines.
+. We used the near-extirpation of grazing and browsing LMH from Gorongosa National Park dur-
+ing the Mozambican Civil War (1977–1992) as a natural experiment to test whether megafaunal col-"
+52ed30920f2f96970c4f79d6768436ed855dad42,Active image pair selection for continuous person re-identification,"ACTIVE IMAGE PAIR SELECTION FOR CONTINUOUS PERSON RE-IDENTIFICATION
+Abir Das, Rameswar Panda, Amit Roy-Chowdhury
+Electrical and Computer Engineering Department, University of California, Riverside, USA"
+52258ec5ec73ce30ca8bc215539c017d279517cf,Recognizing Faces with Expressions: Within-class Space and Between-class Space,"Recognizing Faces with Expressions: Within-class Space and Between-class Space
+Department of Computer Science and Engineering, Zhejang University, Hangzhou 310027,P.R.China
+Email:
+Yu Bing Chen Ping Jin Lianfu"
+526ce11a6c80716fca69bdc111f32dfbe045e400,A Survey on Dataset Recognition of 3 D Face with Missing Parts,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+A Survey on Dataset Recognition of 3D Face with
+Missing Parts
+Madhura Patil
+ME Student, Department of Computer Engineering, Sinhgad Academy of Engg. Pune, Maharashtra, India
+possibly
+recognition.3D
+recognization
+methodology"
+52969cdd2c5eaccb534fe1296a61517b7ec42a54,Human Identification based on Ear Recognition,"Human Identification based on Ear Recognition
+S. Gangaram1, and S. Viriri1,2"
+526ce5c72af5e1f93b8029a26e2eed7d1ac009f5,0 Constructing Kernel Machines in the Empirical Kernel Feature Space,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+5265be9c7b8b22f4e06a01736bbedf171caee74e,Covariance of Motion and Appearance Featuresfor Spatio Temporal Recognition Tasks,"Covariance of Motion and Appearance Features
+for Human Action and Gesture Recognition
+Subhabrata Bhattacharya, Nasim Souly and Mubarak Shah"
+524890eef6beaeb2e206c7b1bf51b58298eb55ec,Florian et al_ICMCSSE 2012_3,"Efficient and Effective Gabor Feature
+Representation for Face Detection
+Yasuomi D. Sato, Yasutaka Kuriya"
+527ed756eba3bc77eb58d22d4cfe27da04d3bbbb,Adaptive skew-sensitive fusion of ensembles and their application to face re-identification,"Adaptive Skew-Sensitive Fusion of Ensembles and
+their Application to Face Re-Identification
+Miguel De-la-Torre∗†, Eric Granger∗, Robert Sabourin∗
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montr´eal, Canada
+Centro Universitario de Los Valles, Universidad de Guadalajara, Ameca, M´exico"
+52144c6d20ddea70e59514c2aa9ec7dc801e5c5e,An Investigation of Face Recognition Characteristics Using PCA and ICA,"Yundi Fu et al, International Journal of Computer Science and Mobile Computing, Vol.3 Issue.2, February- 2014, pg. 110-123
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 3, Issue. 2, February 2014, pg.110 – 123
+RESEARCH ARTICLE
+An Investigation of Face Recognition
+Characteristics Using PCA and ICA
+Yundi Fu1, Yongli Cao1, Arun Kumar Sangaiah2
+Department of Software Engineering, University of Electronic Science and Technology, China
+School of Computing Science and Engineering, VIT University, Vellore, India"
+529341eb910ca5125b4aa6aa83bfc5fc8bf44fe3,V&L Net 2014 The 3rd Annual Meeting Of The EPSRC Network On Vision & Language and The 1st Technical Meeting of the European Network on Integrating Vision and Language,"V&LNet2014The3rdAnnualMeetingOfTheEPSRCNetworkOnVision&LanguageandThe1stTechnicalMeetingoftheEuropeanNetworkonIntegratingVisionandLanguageAWorkshopofthe25thInternationalConferenceonComputationalLinguistics(COLING2014)ProceedingsAugust23,2014Dublin,Ireland"
+529baf1a79cca813f8c9966ceaa9b3e42748c058,Triangle wise Mapping Technique to Transform one Face Image into Another Face Image,"Triangle Wise Mapping Technique to Transform one Face Image into Another Face Image
+{tag} {/tag}
+International Journal of Computer Applications
+© 2014 by IJCA Journal
+Volume 87 - Number 6
+Year of Publication: 2014
+Authors:
+Rustam Ali Ahmed
+Bhogeswar Borah
+10.5120/15209-3714
+{bibtex}pxc3893714.bib{/bibtex}"
+527d596a56aa238dfc450c3ebfdae31e82c6c175,Face Detection Methods,"Face Detection Methods
+ZYAD SHAABAN
+Department of Information Technology
+College of Computers and Information Technology
+University of Tabuk
+Tabuk 71491
+KINGDOM OF SAUDI ARABIA"
+5239001571bc64de3e61be0be8985860f08d7e7e,Deep Appearance Models: A Deep Boltzmann Machine Approach for Face Modeling,"SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, JUNE 2016
+Deep Appearance Models: A Deep Boltzmann
+Machine Approach for Face Modeling
+Chi Nhan Duong, Student, IEEE, Khoa Luu, Member, IEEE,
+Kha Gia Quach, Student, IEEE, Tien D. Bui, Senior Member, IEEE"
+558c587373e2ea44898f70de7858da71aa217b8d,Cross-Lingual Image Caption Generation,"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 1780–1790,
+Berlin, Germany, August 7-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+555488f1da920bb1a06b4d19ff687805993eb7fb,Finding Speaker Face Region by Audiovisual Correlation,"Author manuscript, published in ""Workshop on Multi-camera and Multi-modal Sensor Fusion Algorithms and Applications - M2SFA2
+008, Marseille : France (2008)"""
+554b53f6e5e37d0f8c8eade1a962b39ce591f6ae,"COCO-CN for Cross-Lingual Image Tagging, Captioning and Retrieval","COCO-CN for Cross-Lingual Image Tagging, Captioning and
+Retrieval
+Xirong Li, Xiaoxu Wang, Chaoxi Xu, Weiyu Lan, Qijie Wei, Gang Yang, Jieping Xu
+Key Lab of Data Engineering and Knowledge Engineering, Renmin University of China
+Multimedia Computing Lab, Renmin University of China"
+55ea0c775b25d9d04b5886e322db852e86a556cd,DOCK: Detecting Objects by transferring Common-sense Knowledge,"DOCK: Detecting Objects
+y transferring Common-sense Knowledge
+Santosh Divvala2,3[0000−0003−4042−5874], Ali Farhadi2,3[0000−0001−7249−2380], and
+Krishna Kumar Singh1,3[0000−0002−8066−6835],
+Yong Jae Lee1[0000−0001−9863−1270]
+University of California, Davis 2University of Washington 3Allen Institute for AI
+https://dock-project.github.io"
+554b9478fd285f2317214396e0ccd81309963efd,Spatio-Temporal Action Localization For Human Action Recognition in Large Dataset,"Spatio-Temporal Action Localization For Human Action
+Recognition in Large Dataset
+Sameh MEGRHI1, Marwa JMAL 2, Azeddine BEGHDADI1 and Wided Mseddi1,2
+L2TI, Institut Galil´ee, Universit´e Paris 13, France;
+SERCOM, Ecole Polytechnique de Tunisie"
+5582aafd943f2b67805cdb4aba9e2f288dfe0ca8,"Human Object Sketches: Datasets, Descriptors, Computational Recognition and 3d Shape Retrieval","Human Object Sketches:
+Datasets, Descriptors, Computational
+Recognition and 3d Shape Retrieval
+vorgelegt von
+Mathias Eitz, Dipl.-Inf., M.Eng.
+us Friedrichshafen
+von der Fakultät IV - Elektrotechnik und Informatik
+der Technischen Universität Berlin
+zur Erlangung des akademischen Grades
+Doktor der Ingenieurwissenschaften
+– Dr.-Ing. –
+genehmigte Dissertation
+Promotionsausschuss:
+Vorsitzender: Prof. Dr. Oliver Brock
+Gutachter: Prof. Dr. Marc Alexa
+Gutachter: Prof. Tamy Boubekeur, PhD
+Tag der wissenschaftlichen Aussprache: 07.12.2012
+Berlin 2012"
+558613d96d7c125c00eae0c58c56ee6983208fd5,Identification of Unmodeled Objects from Symbolic Descriptions,"Identification of Unmodeled Objects from Symbolic Descriptions*
+Andrea Baisero, Stefan Otte, Peter Englert and Marc Toussaint"
+550edcdc27aff4e7ea8807356a265a0031434a49,Fully Convolutional Attention Localization Networks: Efficient Attention Localization for Fine-Grained Recognition,"Fine-Grained Recognition with Automatic and Efficient Part Attention
+Xiao Liu, Tian Xia, Jiang Wang, Yi Yang, Feng Zhou and Yuanqing Lin
+Baidu Research
+{liuxiao12,xiatian,wangjiang03, yangyi05, zhoufeng09,"
+55c68c1237166679d2cb65f266f496d1ecd4bec6,Learning to score the figure skating sports videos,"Learning to Score Figure Skating Sport Videos
+Chengming Xu, Yanwei Fu, Zitian Chen,Bing Zhang, Yu-Gang Jiang, Xiangyang Xue"
+55c22f9c8f76b40793a8473248873f726abd8ce9,Unpaired Image-to-Image Translation Using Cycle-Consistent Adversarial Networks,"Unpaired Image-to-Image Translation
+using Cycle-Consistent Adversarial Networks
+Jun-Yan Zhu∗
+Taesung Park∗
+Berkeley AI Research (BAIR) laboratory, UC Berkeley
+Phillip Isola
+Alexei A. Efros
+Figure 1: Given any two unordered image collections X and Y , our algorithm learns to automatically “translate” an image
+from one into the other and vice versa: (left) Monet paintings and landscape photos from Flickr; (center) zebras and horses
+from ImageNet; (right) summer and winter Yosemite photos from Flickr. Example application (bottom): using a collection
+of paintings of famous artists, our method learns to render natural photographs into the respective styles."
+558c4917dc9a1d34f62c0ab713b1b9a37ad04853,Action Recognition Using Multilevel Features and Latent Structural SVM,"Action Recognition Using Multilevel Features and
+Latent Structural SVM
+Xinxiao Wu, Dong Xu, Member, IEEE, Lixin Duan, Jiebo Luo, Fellow, IEEE, and Yunde Jia, Member, IEEE"
+55dcaee65936583846e8c4fa36589df066ebadfa,Learning to Relate Literal and Sentimental Descriptions of Visual Properties,"Atlanta, Georgia, 9–14 June 2013. c(cid:13)2013 Association for Computational Linguistics
+Proceedings of NAACL-HLT 2013, pages 416–425,"
+555222f2ad6dae447eef04f96fa40c1b8a397150,CaloriNet: From silhouettes to calorie estimation in private environments,"CaloriNet: From silhouettes to calorie estimation in private
+environments
+Alessandro Masullo∗
+Tilo Burghardt
+Victor Ponce-López
+Dima Damen
+Majid Mirmehdi
+Sion Hannuna
+June 22, 2018"
+5502dfe47ac26e60e0fb25fc0f810cae6f5173c0,Affordance Prediction via Learned Object Attributes,"Affordance Prediction via Learned Object Attributes
+Tucker Hermans
+James M. Rehg
+Aaron Bobick"
+5582bebed97947a41e3ddd9bd1f284b73f1648c2,Grad-CAM: Why did you say that? Visual Explanations from Deep Networks via Gradient-based Localization,"Visual Explanations from Deep Networks via Gradient-based Localization
+Grad-CAM: Why did you say that?
+Ramprasaath R. Selvaraju
+Abhishek Das
+Devi Parikh
+Ramakrishna Vedantam
+Dhruv Batra
+Virginia Tech
+Michael Cogswell
+{ram21, abhshkdz, vrama91, cogswell, parikh,
+(a) Original Image
+(b) Guided Backprop ‘Cat’
+(c) Grad-CAM for ‘Cat’
+(d) Guided Grad-CAM ‘Cat’
+(e) Occlusion Map ‘Cat’
+(f) ResNet Grad-CAM ‘Cat’
+(g) Original Image
+(h) Guided Backprop ‘Dog’
+(i) Grad-CAM for ‘Dog’
+(l) ResNet Grad-CAM ‘Dog’"
+5556234869c36195ffdcd29349e5dcdf695023e9,Minimum Distance between Pattern Transformation Manifolds: Algorithm and Applications,"JULY 2009
+Minimum Distance between
+Pattern Transformation Manifolds:
+Algorithm and Applications
+Effrosyni Kokiopoulou, Student Member, IEEE, and Pascal Frossard, Senior Member, IEEE"
+55ef8c3c28e2afda486d8471205204927127c605,Multiview Alignment Hashing for Efficient Image Search,"Multiview Alignment Hashing for Efficient Image
+Search
+Li Liu, Mengyang Yu, Student Member, IEEE, and Ling Shao, Senior Member, IEEE"
+5531e728850185b80835a78db2e4fd23e288f359,Towards Reading Hidden Emotions: A comparative Study of Spontaneous Micro-expression Spotting and Recognition Methods,"Reading Hidden Emotions: Spontaneous
+Micro-expression Spotting and Recognition
+Xiaobai Li, Student Member, IEEE, Xiaopeng Hong, Member, IEEE, Antti Moilanen, Xiaohua Huang, Student
+Member, IEEE, Tomas Pfister, Guoying Zhao, Senior Member, IEEE, and Matti Pietik¨ainen, Fellow, IEEE"
+5520acfa1f4e678f1abbaab67ec76e903c3d3bdc,SALSA: A Novel Dataset for Multimodal Group Behavior Analysis,"SALSA: A Novel Dataset for Multimodal Group
+Behavior Analysis
+Xavier Alameda-Pineda, Jacopo Staiano, Ramanathan Subramanian, Member, IEEE, Ligia Batrinca,
+Elisa Ricci, Member, IEEE, Bruno Lepri, Oswald Lanz, Member, IEEE, Nicu Sebe, Senior Member, IEEE"
+558719ec858120908ef40b27a5d32904a68f6dd9,Toward an Automatic Evaluation of Retrieval Performance with Large Scale Image Collections,"Towards an Automatic Evaluation of Retrieval Performance
+with Large Scale Image Collections
+Adrian Popescu1, Eleftherios Spyromitros-Xioufis2, Symeon Papadopoulos2, Hervé Le
+Borgne1, Ioannis Kompatsiaris2
+CEA, LIST, 91190 Gif-sur-Yvette, France,
+CERTH-ITI, Thermi-Thessaloniki, Greece,"
+559295770dc2e2e3a1348df31ac5c3f3e66f1764,Generating Multiple Hypotheses for Human 3D Pose Consistent with 2D Joint Detections,"Generating Multiple Hypotheses for Human 3D Pose Consistent with 2D Joint Detections
+Johns Hopkins University
+Johns Hopkins University
+Alan L. Yuille
+Baltimore, USA
+Ehsan Jahangiri
+Baltimore, USA"
+551fedfeaf55e3f7a7cf19d2b21f1a56f8cbe9f6,Egocentric Vision-based Future Vehicle Localization for Intelligent Driving Assistance Systems,"Egocentric Vision-based Future Vehicle Localization
+for Intelligent Driving Assistance Systems
+Yu Yao1∗, Mingze Xu2∗, Chiho Choi3, David J. Crandall2, Ella M. Atkins1, and Behzad Dariush3"
+55a158f4e7c38fe281d06ae45eb456e05516af50,Simile Classifiers for Face Classification,"The 22nd International Conference on Computer Graphics and Vision
+GraphiCon’2012"
+55cad1f4943018459b761f89afd9292d347610f2,Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz,
+5543224d6f8e22e7eaabfcbc4bed9e8a9451e3f8,Automatische Bildfolgenanalyse mit statistischen Mustererkennungsverfahren,"Automatische Bildfolgenanalyse
+mit statistischen
+Mustererkennungsverfahren
+Vom Fachbereich Elektrotechnik
+der Gerhard-Mercator-Universit¨at Duisburg
+zur Erlangung des akademischen Grades eines
+Doktors der Ingenieurwissenschaften
+genehmigte Dissertation
+Dipl.-Ing. Stefan Eickeler
+us Duisburg
+Referent: Prof. Dr. Gerhard Rigoll
+Korreferent: Prof. Dr. Martin Reiser
+Tag der m¨undlichen Pr¨ufung: 5. November 2001"
+5550a6df1b118a80c00a2459bae216a7e8e3966c,A perusal on Facial Emotion Recognition System ( FERS ),"ISSN: 0974-2115
+www.jchps.com Journal of Chemical and Pharmaceutical Sciences
+A perusal on Facial Emotion Recognition System (FERS)
+School of Information Technology and Engineering, VIT University, Vellore, 632014, India
+Krithika L.B
+*Corresponding author: E-Mail:"
+555b332252522fce0f31b0c0b7630cf4f36ba0a5,Face processing in Williams syndrome and Autism,"Face processing in Williams syndrome and Autism
+Deborah Michelle Riby
+Department of Psychology,
+University of Stirling"
+55ba5e4c07f6ecf827bfee04e96de35a170f7485,This Dissertation entitled MODELING THE HUMAN FACE THROUGH MULTIPLE VIEW THREE-DIMENSIONAL STEREOPSIS: A SURVEY AND COMPARATIVE ANALYSIS OF FACIAL RECOGNITION OVER MULTIPLE MODALITIES,"This Dissertation
+entitled
+MODELING THE HUMAN FACE THROUGH MULTIPLE
+VIEW THREE-DIMENSIONAL STEREOPSIS: A SURVEY AND
+COMPARATIVE ANALYSIS OF FACIAL RECOGNITION
+OVER MULTIPLE MODALITIES
+typeset with nddiss2"" v1.0 (2004/06/15) on July 26, 2006 for
+Xin Chen
+This LATEX 2"" class(cid:12)le conforms to the University of Notre Dame style guide-
+lines established in Spring 2004. However it is still possible to generate a non-
+onformant document if the published instructions are not followed! Be sure to re-
+fer to the published Graduate School guidelines at http://graduateschool.nd.edu
+s well.
+It is YOUR resposnsibility to ensure that the Chapter titles and Table caption
+titles are put in CAPS LETTERS. This class(cid:12)le does NOT do that! This way,
+you have total control over how you want the symbols and sub-/superscripts in
+titles and captions look like.
+This summary page can be disabled by specifying the nosummary option to the class
+invocation. (i.e., ndocumentclass[...,nosummary,...]fnddiss2eg)
+THIS PAGE IS NOT PART OF THE THESIS, BUT"
+5522073ebd53a6502cec9d716a77bb2c18aca593,Multi-view Body Part Recognition with Random Forests,"KAZEMI, BURENIUS, AZIZPOUR, SULLIVAN: MULTI-VIEW BODY PART RECOGNITION 1
+Multi-view Body Part Recognition with
+Random Forests
+CVAP / KTH
+The Royal Institute of Technology
+Stockholm, Sweden
+Vahid Kazemi
+Magnus Burenius
+Hossein Azizpour
+Josephine Sullivan"
+55079a93b7d1eb789193d7fcdcf614e6829fad0f,Efficient and Robust Inverse Lighting of a Single Face Image Using Compressive Sensing,"Efficient and Robust Inverse Lighting of a Single Face Image using Compressive
+Sensing
+Miguel Heredia Conde†, Davoud Shahlaei#, Volker Blanz# and Otmar Loffeld†
+Center for Sensor Systems† (ZESS) and Institute for Vision and Graphics#, University of Siegen
+57076 Siegen, Germany"
+550c369cc3080c03b89d738d82f1ed50145c5aa7,"Information, Technology, and Information Worker Productivity","Information, Technology and Information Worker Productivity
+NYU Stern School of Business & MIT, 44 West 4th Street Room: 8-81, New York, NY 10012
+MIT Sloan School of Management, Room: E53-313, 50 Memorial Drive, Cambridge, MA 02142
+Sinan Aral
+Erik Brynjolfsson
+Marshall Van Alstyne
+Boston University & MIT, 595 Commonwealth Avenue, Boston, MA 02215
+We study the fine-grained relationships among information flows, IT use, and individual information-worker produc-
+tivity, by analyzing work at a midsize executive recruiting firm. We analyze both project-level and individual-level
+performance using: (1) direct observation of over 125,000 e-mail messages over a period of 10 months by individual
+workers (2) detailed accounting data on revenues, compensation, project completion rates, and team membership for
+over 1300 projects spanning 5 years, and (3) survey data on a matched set of the same workers’ IT skills, IT use and in-
+formation sharing. These detailed data permit us to econometrically evaluate a multistage model of production and in-
+teraction activities at the firm, and to analyze the relationships among communications flows, key technologies, work
+practices, and output. We find that (a) the structure and size of workers’ communication networks are highly correlated
+with their performance; (b) IT use is strongly correlated with productivity but mainly by allowing multitasking rather
+than by speeding up work; (c) productivity is greatest for small amounts of multitasking but beyond an optimum, mul-
+titasking is associated with declining project completion rates and revenue generation; and (d) asynchronous informa-
+tion seeking such as email and database use promotes multitasking while synchronous information seeking over the
+phone shows a negative correlation. Overall, these data show statistically significant relationships among social net-"
+551fa37e8d6d03b89d195a5c00c74cc52ff1c67a,GeThR-Net: A Generalized Temporally Hybrid Recurrent Neural Network for Multimodal Information Fusion,"GeThR-Net: A Generalized Temporally Hybrid
+Recurrent Neural Network for Multimodal
+Information Fusion
+Ankit Gandhi1 ∗, Arjun Sharma1 ∗ , Arijit Biswas2, and Om Deshmukh1
+Xerox Research Centre India; 2 Amazon Development Center India
+(*-equal contribution)"
+5592574c82eec9367e9173b7820ff329a27b6c21,Image Enhancement and Automated Target Recognition Techniques for Underwater Electro-Optic Imagery,"Image Enhancement and Automated Target Recognition
+Techniques for Underwater Electro-Optic Imagery
+Thomas Giddings (PI), Cetin Savkli and Joseph Shirron
+Metron, Inc.
+1911 Freedom Dr., Suite 800
+Reston, VA 20190
+phone: (703) 437-2428 fax: (703) 787-3518 email:
+Contract Number N00014-07-C-0351
+http:www.metsci.com
+LONG TERM GOALS
+The long-term goal of this project is to provide a flexible, accurate and extensible automated target
+recognition (ATR) system for use with a variety of imaging and non-imaging sensors. Such an ATR
+system, once it achieves a high level of performance, can relieve human operators from the tedious
+usiness of pouring over vast quantities of mostly mundane data, calling the operator in only when the
+omputer assessment involves an unacceptable level of ambiguity. The ATR system will provide most
+leading edge algorithms for detection, segmentation, and classification while incorporating many novel
+lgorithms that we are developing at Metron. To address one of the most critical challenges in ATR
+technology, the system will also provide powerful feature extraction routines designed for specific
+pplications of current interest.
+OBJECTIVES"
+55c40cbcf49a0225e72d911d762c27bb1c2d14aa,Indian Face Age Database : A Database for Face Recognition with Age Variation,"Indian Face Age Database: A Database for Face Recognition with Age Variation
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 126
+Number 5
+Year of Publication: 2015
+Authors:
+Reecha Sharma, M.S. Patterh
+10.5120/ijca2015906055
+{bibtex}2015906055.bib{/bibtex}"
+55202f10bb1d7640b0b279a4cdc8e9925cd9ef81,ICM: An Intuitive Model Independent and Accurate Certainty Measure for Machine Learning,
+9717bd66ad50aedabaea0f3af784c7ba9643b686,TransFlow: Unsupervised Motion Flow by Joint Geometric and Pixel-level Estimation,"TransFlow: Unsupervised Motion Flow by Joint
+Geometric and Pixel-level Estimation
+Stefano Alletto*, Davide Abati, Simone Calderara, Rita Cucchiara
+University of Modena and Reggio Emilia
+Via P. Vivarelli 10, Modena, Italy
+Luca Rigazio*
+Panasonic Silicon Valley Laboratory
+0900 North Tantau Avenue, Suite 200, Cupertino, CA, USA"
+97692960a11d4316880fb229cca699293e133945,An efficient multi-resolution SVM network approach for object detection in aerial images,"015 IEEE INTERNATIONAL WORKSHOP ON MACHINE LEARNING FOR SIGNAL PROCESSING, SEPT. 17–20, 2015, BOSTON, USA
+AN EFFICIENT MULTI-RESOLUTION SVM NETWORK APPROACH FOR OBJECT
+DETECTION IN AERIAL IMAGES
+J. Pasquet(cid:63)†
+M. Chaumont∗†
+G. Subsol †
+M. Derras(cid:63)
+LIRMM, Universit´e de Montpellier / CNRS, France
+(cid:63) Berger Levrault, Lab`ege, France
+Universit´e de Nˆımes, France"
+970e571305ed9dde9308e559694044e204d6e2ad,Learning Finer-class Networks for Universal Representations,"GIRARD ET AL.: FINER-CLASS NETWORKS
+Learning Finer-class Networks for Universal
+Representations
+Julien Girard12
+Youssef Tamaazousti123
+Hervé Le Borgne2
+Céline Hudelot3
+Both authors contributed equally.
+CEA LIST
+Vision Laboratory,
+Gif-sur-Yvette, France.
+CentraleSupélec,
+MICS Laboratory,
+Châtenay-Malabry, France."
+973e3d9bc0879210c9fad145a902afca07370b86,From Emotion Recognition to Website Customizations,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 7, No. 7, 2016
+From Emotion Recognition to Website
+Customizations
+O.B. Efremides
+School of Web Media
+Bahrain Polytechnic
+Isa Town, Kingdom of Bahrain"
+97104def2b92b430c02f595d7802f9ba23b74cc7,DispSegNet: Leveraging Semantics for End-to-End Learning of Disparity Estimation from Stereo Imagery,"DispSegNet: Leveraging Semantics for End-to-End Learning of
+Disparity Estimation from Stereo Imagery
+Junming Zhang1, Katherine A. Skinner2, Ram Vasudevan3 and Matthew Johnson-Roberson4"
+97b8249914e6b4f8757d22da51e8347995a40637,"Large-Scale Vehicle Detection, Indexing, and Search in Urban Surveillance Videos","Large-Scale Vehicle Detection, Indexing,
+nd Search in Urban Surveillance Videos
+Rogerio Schmidt Feris, Associate Member, IEEE, Behjat Siddiquie, James Petterson,
+Yun Zhai, Associate Member, IEEE, Ankur Datta, Lisa M. Brown, Senior Member, IEEE, and
+Sharath Pankanti, Fellow, IEEE"
+9728c3e32f57b54dea94fa9737c8f300de5cc468,Imbalanced Malware Images Classification: a CNN based Approach,"Imbalanced Malware Images Classification: a CNN
+ased Approach
+Songqing Yue
+University of Wisconsin"
+97bcf007516cb70d8cb17b7de6452aa06c4b9c76,GABAergic neurotransmission alterations in autism spectrum disorders,"Neurotransmitter 2015; 2: e1052. doi: 10.14800/nt.1052; © 2015 by Carla V Sesarini
+http://www.smartscitech.com/index.php/nt
+REVIEW
+GABAergic neurotransmission alterations in autism spectrum
+disorders
+Carla V Sesarini
+Instituto de Ciencias Básicas y Medicina Experimental (ICBME), Instituto Universitario del Hospital Italiano de Buenos Aires
+(HIBA), Potosi 4240 (C1199ACL), CABA, Argentina
+Correspondence: Carla V Sesarini
+E-mail:
+Received: October 04, 2015
+Published online: November 09, 2015
+Autism spectrum disorders (ASDs) are a group of complex disorders of neurodevelopment characterized by
+difficulties in social interaction, verbal and nonverbal communication, and repetitive behaviors. In ASD, deficits
+in social cognition and related cognitive functions would be the resultant of reduced synchronization between
+rain regions. A possible explanation for ASDs is the disturbance of the delicate balance between excitation and
+inhibition in the developing brain which may have profound impact in neurobehavioral phenotypes. At least
+some forms of autism would be caused by a disproportionately high level of excitation (or weaker inhibition) in
+neural circuits that mediate language and social behavior (local circuits). A more excitable cortex (more weakly
+inhibited) is functionally more poorly differentiated and could lead to broad ranging abnormalities in"
+972ef9ddd9059079bdec17abc8b33039ed25c99c,A Novel on understanding How IRIS Recognition works,"International Journal of Innovations in Engineering and Technology (IJIET)
+A Novel on understanding How IRIS
+Recognition works
+Vijay Shinde
+Dept. of Comp. Science
+M.P.M. College, Bhopal, India
+Prof. Prakash Tanwar
+Asst. Professor CSE
+M.P.M. College, Bhopal, India"
+97032b13f1371c8a813802ade7558e816d25c73f,Total Recall Final Report,"Total Recall Final Report
+Peter Collingbourne, Nakul Durve, Khilan Gudka, Steve Lovegrove, Jiefei Ma, Sadegh Shahrbaf
+Supervisor: Professor Duncan Gillies
+January 11, 2006"
+97b54703c267deef8c86ab6240c24d76a59864e7,Pixel Objectness: Learning to Segment Generic Objects Automatically in Images and Videos,"Pixel Objectness: Learning to Segment Generic
+Objects Automatically in Images and Videos
+Bo Xiong∗, Suyog Dutt Jain∗, and Kristen Grauman, Member, IEEE"
+97a0aba4e9a95db17c3d4367f59aad1f02e04b55,How far did we get in face spoofing detection?,"This manuscript is a preprint version. The final version of this paper is
+vailable in Engineering Applications of Artificial Intelligence, vol. 72,
+pp. 368-381, 2018. DOI: 10.1016/j.engappai.2018.04.013
+How far did we get in face spoofing detection?
+Luiz Souza, Luciano Oliveira, Mauricio Pamplona
+IVISION Lab, Federal University of Bahia
+Joao Papa
+RECOGNA Lab, S˜ao Paulo State University"
+97f9c3bdb4668f3e140ded2da33fe704fc81f3ea,An Experimental Comparison of Appearance and Geometric Model Based Recognition,"AnExperimentalComparisonofAppearance
+ndGeometricModelBasedRecognition
+J.Mundy,A.Liu,N.Pillow,A.Zisserman,S.Abdallah,S.Utcke,
+S.NayarandC.Rothwell
+GeneralElectricCorporateResearchandDevelopment,Schenectady,NY,USA
+RoboticsResearchGroup,UniversityofOxford,Oxford,UK
+Dept.ofComputerScience,ColumbiaUniversity,NY,USA
+INRIA,SophiaAntipolis,France"
+97d811ae99bcbcf9f63c2f447041ab6d74a20b1e,Face recognition using truncated transform domain feature extraction,"The International Arab Journal of Information Technology, Vol. 12, No. 3, May 2015 211
+Face Recognition using Truncated Transform
+Domain Feature Extraction
+Rangan Kodandaram, Shashank Mallikarjun, Manikantan Krishnamuthan, and Ramachandran Sivan
+Department of Electronics and Communication Engineering, M.S. Ramaiah Institute of Technology, India"
+9729ff547b6882b49898c1f5abb69646edf77e71,Two Kinds of Statistics for Better Face Recognition,"Two Kinds of Statistics for Better Face Recognition
+Manuel Günther, Marco K. Müller and Rolf P. Würtz
+Institut für Neuroinformatik, Ruhr-Universität, 44780 Bochum, Germany"
+97cf04eaf1fc0ac4de0f5ad4a510d57ce12544f5,"Deep Affect Prediction in-the-wild: Aff-Wild Database and Challenge, Deep Architectures, and Beyond","manuscript No.
+(will be inserted by the editor)
+Deep Affect Prediction in-the-wild: Aff-Wild Database and Challenge,
+Deep Architectures, and Beyond
+Dimitrios Kollias (cid:63) · Panagiotis Tzirakis † · Mihalis A. Nicolaou ∗ · Athanasios
+Papaioannou(cid:107) · Guoying Zhao1 · Bj¨orn Schuller2 · Irene Kotsia3 · Stefanos
+Zafeiriou4"
+97d1d561362a8b6beb0fdbee28f3862fb48f1380,Age Synthesis and Estimation via Faces: A Survey,"Age Synthesis and Estimation via Faces:
+A Survey
+Yun Fu, Member, IEEE, Guodong Guo, Senior Member, IEEE, and
+Thomas S. Huang, Fellow, IEEE"
+97e7810f21a145caddc7e5168b59f0ab8894f669,Technical Report: Learning to Rank using High-Order Information,"Technical Report: Learning to Rank using
+High-Order Information
+Puneet K. Dokania1, Aseem Behl2, C. V. Jawahar2, and M. Pawan Kumar1
+Ecole Centrale de Paris1, INRIA Saclay1, IIIT Hyderabad - India2"
+97ee35db6b389a7bcc4b7975d12dbcd165226aad,Structured Learning of Human Interactions in TV Shows,"Structured Learning
+of Human Interactions in TV Shows
+Alonso Patron-Perez, Member, IEEE, Marcin Marszalek,
+Ian Reid, Member, IEEE, and Andrew Zisserman"
+97865d31b5e771cf4162bc9eae7de6991ceb8bbf,Face and Gender Classification in Crowd Video,"Face and Gender Classification in Crowd Video
+Priyanka Verma
+IIIT-D-MTech-CS-GEN-13-100
+July 16, 2015
+Indraprastha Institute of Information Technology
+New Delhi
+Thesis Advisors
+Dr. Richa Singh
+Dr. Mayank Vatsa
+Submitted in partial fulfillment of the requirements
+for the Degree of M.Tech. in Computer Science
+(cid:13) Verma, 2015
+Keywords : Face Recognition, Gender Classification, Crowd database"
+97ede92a6a3579f9fc8ad7c179eaaf37b3966e5a,Bicycle tracking using ellipse extraction,"Bicycle Tracking Using Ellipse Extraction
+Tohid Ardeshiri, Fredrik Larsson, Fredrik Gustafsson, Thomas B. Sch¨on, Michael Felsberg
+Department of Electrical Engineering
+Link¨oping University
+Link¨oping, Sweden
+e-mail: {tohid, larsson, fredrik, schon,"
+978d9a5251028da5a23fd0aed8234ed22b4918c5,Reduced Eigen Space Dimensionality for Fast Face Recognition,"www.ijemr.net
+ISSN (ONLINE): 2250-0758, ISSN (PRINT): 2394-6962
+Volume-5, Issue-2, April-2015
+International Journal of Engineering and Management Research
+Page Number: 33-39
+Reduced Eigen Space Dimensionality for Fast Face Recognition
+Research Scholar, Department of Computer Science and Applications, Panjab University, Chandigarh, INDIA
+Professor, Department of Computer Science and Applications, Panjab University, Chandigarh, INDIA
+Davoud Aflakian1, M. Syamala Devi2"
+979f63114a30d60c5c06d4c9c18c8249c3a63099,Synthetically Trained Neural Networks for Learning Human-Readable Plans from Real-World Demonstrations,"Synthetically Trained Neural Networks for Learning
+Human-Readable Plans from Real-World Demonstrations
+Jonathan Tremblay
+Thang To
+Artem Molchanov†
+Stephen Tyree
+Jan Kautz
+Stan Birchfield"
+9709d362a15414b062efa9cf4a212469af803a7a,Holistic Multi-modal Memory Network for Movie Question Answering,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Holistic Multi-modal Memory Network
+for Movie Question Answering
+Anran Wang, Anh Tuan Luu, Chuan-Sheng Foo, Hongyuan Zhu, Yi Tay, Vijay Chandrasekhar"
+9727c74a09aad74abd67ff1d2dff083cc73d4a2e,Visual Focus of Attention in Non-calibrated Environments using Gaze Estimation,"Int J Comput Vis
+DOI 10.1007/s11263-013-0691-3
+Visual Focus of Attention in Non-calibrated Environments using
+Gaze Estimation
+Stylianos Asteriadis · Kostas Karpouzis ·
+Stefanos Kollias
+Received: 24 May 2012 / Accepted: 2 December 2013
+© Springer Science+Business Media New York 2013"
+970e723404885e94e77780766b39ee951dd7abb3,Multimodal Learning of Geometry-Preserving Binary Codes for Semantic Image Retrieval,"IEICE TRANS. INF. & SYST., VOL.E100–D, NO.4 APRIL 2017
+INVITED PAPER SpecialSectiononAward-winningPapers
+Multimodal Learning of Geometry-Preserving Binary Codes for
+Semantic Image Retrieval
+Go IRIE†a), Hiroyuki ARAI†, Members, and Yukinobu TANIGUCHI††, Senior Member
+SUMMARY
+This paper presents an unsupervised approach to feature
+inary coding for ef‌f‌icient semantic image retrieval. Although the majority
+of the existing methods aim to preserve neighborhood structures of the fea-
+ture space, semantically similar images are not always in such neighbors
+ut are rather distributed in non-linear low-dimensional manifolds. More-
+over, images are rarely alone on the Internet and are often surrounded by
+text data such as tags, attributes, and captions, which tend to carry rich se-
+mantic information about the images. On the basis of these observations,
+the approach presented in this paper aims at learning binary codes for se-
+mantic image retrieval using multimodal information sources while pre-
+serving the essential low-dimensional structures of the data distributions in
+the Hamming space. Specifically, after finding the low-dimensional struc-
+tures of the data by using an unsupervised sparse coding technique, our
+pproach learns a set of linear projections for binary coding by solving an"
+9715aba0688195b2019d510ae3fd8da2e40f6e20,Evaluation of color spaces for person re-identification,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+97d9c57576a573955c1b21b63f5b5ae44438e973,Discriminative on Multi - Manifolds,
+9755554b13103df634f9b1ef50a147dd02eab02f,How Transferable Are CNN-Based Features for Age and Gender Classification?,"How Transferable are CNN-based Features for
+Age and Gender Classification?
+Gökhan Özbulak1, Yusuf Aytar2 and Hazım Kemal Ekenel1"
+9794d69194ac772c3e92ee1f322a36feb3c16239,Hausdorff Artmap for Human Face Recognition,"HAUSDORFF ARTMAP FOR HUMAN FACE RECOGNITION
+ARIT THAMMANO AND CHONGKOLNEE RUNGRUANG
+Faculty of Information Technology
+King Mongkut’s Institute of Technology Ladkrabang,
+Bangkok, 10520 Thailand
+later
+received
+identification has
+encompasses
+ll of"
+63ebe80e020d902bc1fdc865c23a9ad7d1eac17a,Exploring the feasibility of subliminal priming on smartphones,"Exploring the Feasibility of Subliminal Priming on
+Anonymised for blind review
+Smartphones
+Affiliation
+City, Country
+e-mail address"
+63cf5fc2ee05eb9c6613043f585dba48c5561192,Prototype Selection for Classification in Standard and Generalized Dissimilarity Spaces Prototype Selection for Classification in Standard and Generalized Dissimilarity Spaces,"Prototype Selection for
+Classification in Standard
+nd Generalized
+Dissimilarity Spaces"
+63db312ec494988e1af0c1db5f9d9ca40ef89237,Vision Based Gesture Recognition : a Comprehensive Study,"REGULAR ISSUE
+ARTICLE
+VISION BASED GESTURE RECOGNITION: A COMPREHENSIVE
+STUDY
+A Balasundaram1*, C Chellappan 2
+Research Scholar, Department of CSE, G.K.M. College of Engineering and Technology, Chennai, INDIA
+Principal, G.K.M. College of Engineering and Technology, Chennai, INDIA"
+63f2c3e312d07c6452bdad0a8adef1b879950500,Multi-stage Sampling with Boosting Cascades for Pedestrian Detection in Images and Videos,"Multi-stage Sampling with Boosting Cascades
+for Pedestrian Detection in Images and Videos
+Giovanni Gualdi, Andrea Prati, and Rita Cucchiara
+University of Modena and Reggio Emilia(cid:2), Italy"
+63cbfc7bfabd1e234c779f8445ea775b74d8fbe8,Adequacy of the Gradient-Descent Method for Classifier Evasion Attacks,"Adequacy of the Gradient-Descent Method for
+Classifier Evasion Attacks
+Yi Han
+School of Computing and Information Systems
+University of Melbourne
+Ben Rubinstein
+School of Computing and Information Systems
+University of Melbourne"
+63dbacac269c29b46b2b0bddbef828db025689dd,Deep Structure Inference Network for Facial Action Unit Recognition,"Deep Structure Inference Network for Facial Action Unit Recognition
+Ciprian A. Corneanu1, Meysam Madadi2,3, Sergio Escalera1,2
+Dept. Mathematics and Informatics, Universitat de Barcelona, Catalonia, Spain
+Computer Vision Center, Edifici O, Campus UAB, 08193 Bellaterra (Barcelona), Catalonia, Spain
+Dept. of Computer Science, Univ. Aut`onoma de Barcelona (UAB), 08193 Bellaterra, Catalonia, Spain"
+6358b95b1c97df4f10f57a90913f672e44d2094b,Opponent Colors for Human Detection,"Opponent Colors for Human Detection
+Rao Muhammad Anwer, David V´azquez, and Antonio M. L´opez
+Computer Vision Center and Computer Science Dpt.,
+Universitat Aut`onoma de Barcelona
+-- www.cvc.uab.es/adas
+Edifici O, 08193 Bellaterra, Barcelona, Spain"
+631d21e51ca9100f1eca3c80dcf42db81cfc7e2b,Interactive Person Following and Gesture Recognition with a Flying Robot,"Interactive Person Following and
+Gesture Recognition with a Flying Robot
+Tayyab Naseer*, J¨urgen Sturm†, Wolfram Burgard*, and Daniel Cremers†
+*Department of Computer Science, University of Freiburg, Germany
+Department of Computer Science, Technical University of Munich, Germany"
+637648198f9e91654ce27eaaa40512f2dc870fc1,Survey of Visual Question Answering: Datasets and Techniques,"Survey of Visual Question Answering: Datasets and Techniques
+Akshay Kumar Gupta
+Indian Institute of Technology Delhi"
+63b89e654124eb2b8edeeb82c6373bdcf228744e,Single-Image 3D Scene Parsing Using Geometric Commonsense,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+image I3D reconstructed sceneFigure1:Single-view3DscenereconstructionusingGeometriccommonsense.Top:theworldisfullofcommonsenseovergeo-metricdimensions,e.g.,thatasedanisabout4.5meterslong.Bot-tom:exemplarresultoftheproposedmethod,includingsynthesizedimage(left),planarsegmentation(middle),anddepthmap(right).geometriccommonsensefor3Dsceneparsing.Suchapars-ingtaskaimstosegmentbothlow-levelsceneentities(e.g.,straightedges,semanticregions)andobject-levelsceneenti-ties(e.g.,human,vehicles)in2Dimages,andestimatetheirgeometricdimensionsinthe3Dworld[Hoiemetal.,2005;DelPeroetal.,2013;Liuetal.,2014;Wangetal.,2015a;Mottaghietal.,2016].Mostexisting3Dparsingalgo-rithms[Hoiemetal.,2008]aredesignedforaparticu-lartypeofscenecategories,e.g.,urban[Liuetal.,2014;Guptaetal.,2010],indoor[Wangetal.,2015b].Howev-er,apracticalAIsystem,e.g.,autonomousdriving,usuallyneedstodealwithawidevarietyofscenecategories.Oursolutiontotheabovechallengesismotivatedbythefactthatwehumanbeings,unconsciouslysometimes,uti-lizerichpriorknowledgeofthegeometricdimensionsofsceneentitiestounderstandthescenestructuresinimagesorvideos[Davisetal.,1993].Thisknowledgecanberoughlydividedintotwotypes:i)priordistributionsoverasingledi-mensionofobjects,e.g.,theheightofafemaleadultisabout1.75meters,orthatthelengthofasedanisabout4.5meters;ii)pair-wisecomparisonsbetweenthedimensionsofdifferentsceneentitiesatbothobject-level,e.g.,human,windows,ve-hicles,etc.,andpart-level,e.g.,straightedges,planarregions,etc.AsillustratedinFigure1,forexample,thewindowedgesonthesamefacadeareparalleltoeachotherandareorthog-onaltotheedgesontheground,abuildingishigherthanahuman,orthelengthofallsedansareroughlyequal.Theseu-naryandpair-wiseknowledge,onceacquired,arevalidacross"
+63c109946ffd401ee1195ed28f2fb87c2159e63d,Robust Facial Feature Localization Using Improved Active Shape Model and Gabor Filter,"MVA2011 IAPR Conference on Machine Vision Applications, June 13-15, 2011, Nara, JAPAN
+Robust Facial Feature Localization using Improved Active Shape
+Model and Gabor Filter
+Hui-Yu Huang
+Engineering, National Formosa University,
+Taiwan
+E-mail:"
+63db76fc3ab23beb921be682d70eb021cb6c4f16,How Polarized Have We Become? A Multimodal Classification of Trump Followers and Clinton Followers,
+634f698c05d640ab355e94a9a0cf9191891b3dcb,Video Face Recognition From A Single Still Image Using an Adaptive Appearance Model Tracker,"Video Face Recognition From A Single Still Image
+Using an Adaptive Appearance Model Tracker
+M. Ali Akber Dewan
+E. Granger, R. Sabourin
+G.-L. Marcialis, F. Roli
+School of Computing and Information
+Systems, Athabasca University
+Department of Automated Production
+Engineering, École de technologie supé-
+Department of Electrical and Electronic
+Engineering, University of Cagliari
+Edmonton, Canada
+rieure, Montreal, Canada
+Cagliari, Italy"
+631483c15641c3652377f66c8380ff684f3e365c,Sync-DRAW: Automatic GIF Generation using Deep Recurrent Attentive Architectures,"Sync-DRAW: Automatic Video Generation using Deep Recurrent
+A(cid:130)entive Architectures
+Gaurav Mi(cid:138)al∗
+Tanya Marwah∗
+IIT Hyderabad
+Vineeth N Balasubramanian
+IIT Hyderabad"
+636027f52ab111b2b22332ab2ec5346d03aac305,Unsupervised learning of foreground object detection,"Unsupervised learning of foreground object detection
+Ioana Croitoru · Simion-Vlad Bogolin · Marius Leordeanu"
+63cdf4aa1492c5c8fb109a1bf03af4844982e265,Reconstructing High-Resolution Face Models From Kinect Depth Sequences,"Reconstructing High-Resolution Face Models
+From Kinect Depth Sequences
+Enrico Bondi, Pietro Pala, Senior Member, IEEE, Stefano Berretti, Member, IEEE,
+nd Alberto Del Bimbo, Senior Member, IEEE"
+6372262685162f3f11ef7ac1882c327e98564875,A Survey of Approaches for Curve Based Facial Surface Representations For Three-Dimensional Face Recognition,"A Survey of Approaches for Curve Based Facial Surface Representations
+For Three-Dimensional Face Recognition
+Aouragh Salima1,3, Sbaa Salim2, Taleb-Ahmed Abdelmalik3
+Department of Electrical engineering, Kasdi Merbah University, Ouargla, Algeria.
+Department of Electrical engineering, Mohamed Kheider University, Biskra, Algeria.
+LAMIH UMR CNRS 8201 UVHC, University of Valenciennes and Hainaut Cambrésis, France."
+63c65e8584d2c3fb8833af772eb713f438cbdfe0,Exposing seam carving forgery under recompression attacks by hybrid large feature mining,"Cancún Center, Cancún, México, December 4-8, 2016
+978-1-5090-4846-5/16/$31.00 ©2016 IEEE"
+632fa986bed53862d83918c2b71ab953fd70d6cc,What Face and Body Shapes Can Tell About Height,"GÜNEL ET AL.: WHAT FACE AND BODY SHAPES CAN TELL ABOUT HEIGHT
+What Face and Body Shapes Can Tell
+About Height
+Semih Günel
+Helge Rhodin
+Pascal Fua
+CVLab
+EPFL,
+Lausanne, Switzerland"
+63340c00896d76f4b728dbef85674d7ea8d5ab26,Discriminant Subspace Analysis: A Fukunaga-Koontz Approach,"Discriminant Subspace Analysis:
+A Fukunaga-Koontz Approach
+Sheng Zhang, Member, IEEE, and Terence Sim, Member, IEEE"
+635bea02dae6d4402b53eb3b31930b53ef00adc0,Unsupervised Feature Learning for Dense Correspondences Across Scenes,"Unsupervised Feature Learning for Dense Correspondences
+cross Scenes
+Chao Zhang, Chunhua Shen, Tingzhi Shen
+v1 July 2014; v2 December 2014; v3 April 2015"
+63c71e317168d5b55dccaf5515ad96c9e87f7d9e,"Part-Based RDF for Direction Classification of Pedestrians, and a Benchmark","Part-based RDF for Direction Classification
+of Pedestrians, and a Benchmark
+Junli Tao and Reinhard Klette
+The .enpeda.. Project, Tamaki Campus
+The University of Auckland, Auckland, New Zealand"
+63344dee49a1ab7e27ac34eefc30fb948a0bf9bb,Geometry and Illumination Modelling for Scene Understanding,"Geometry and Illumination Modelling for Scene Understanding
+Principal Investigators: Jana Koˇseck´a and Dimitris Samaras
+Project Summary The goal this proposal is to develop unified framework for reasoning about
+objects, scenes and lighting from single and multiple views of indoors and outdoors environments.
+We propose computational models for semantic parsing of scenes which incorporate information
+bout the lighting and illumination to resolve the ambiguities of purely appearance based methods
+nd develop class of models where partial geometry and semantic information aid the process of
+recovery of illumination. The proposed work can be partitioned into three main research topics:
+. Supervised approach for semantic parsing of object and non-object categories using photo-
+metric, geometric and shadow cues.
+. Closing the loop on estimation of Illumination using coarse object models and geometric
+ontext.
+. Object recognition, change detection, scene matching and 3D reconstruction with dramatic
+hanges in illumination.
+We propose to study the interactions between appearance, geometry and lighting in the context
+of the problems outlined above and develop computational models which jointly consider these
+spects. In some cases different models will serve as preprocessing stage for the follow up prob-
+lems and in others they will interact jointly or in a feedback loop manner. For joint interactions
+final inference for estimation of semantic categories and illumination will be formulated in Markov
+Random field or Conditional Markov Random field using both photometric, geometric and illumi-"
+6388c3f3559b61632942856bbede67b724542c9e,Multi-Target Tracking Using Hierarchical Convolutional Features and Motion Cues,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 8, No. 11, 2017
+Multi-Target Tracking Using Hierarchical
+Convolutional Features and Motion Cues
+Heba Mahgoub, Khaled Mostafa, Khaled T. Wassif, Ibrahim Farag
+Faculty of Computers and Information
+Cairo University
+Cairo, Egypt"
+63f38f60022ab78aa5e47bd84070547409ab3cc8,The Use of Semantic Human Description as a Soft Biometric,"The Use of Semantic Human Description as a Soft Biometric
+Sina Samangooei
+Baofeng Guo
+Mark S. Nixon"
+634541661d976c4b82d590ef6d1f3457d2857b19,Advanced Techniques for Face Recognition under Challenging Environments,"AAllmmaa MMaatteerr SSttuuddiioorruumm –– UUnniivveerrssiittàà ddii BBoollooggnnaa
+in cotutela con Università di Sassari
+DOTTORATO DI RICERCA IN
+INGEGNERIA ELETTRONICA, INFORMATICA E DELLE
+TELECOMUNICAZIONI
+Ciclo XXVI
+Settore Concorsuale di afferenza: 09/H1
+Settore Scientifico disciplinare: ING-INF/05
+ADVANCED TECHNIQUES FOR FACE RECOGNITION
+UNDER CHALLENGING ENVIRONMENTS
+TITOLO TESI
+YUNLIAN SUN
+Presentata da:
+Coordinatore Dottorato
+ALESSANDRO VANELLI-CORALLI
+Relatore
+DAVIDE MALTONI
+Relatore
+MASSIMO TISTARELLI
+Esame finale anno 2014"
+6332a99e1680db72ae1145d65fa0cccb37256828,MASTER IN COMPUTER VISION AND ARTIFICIAL INTELLIGENCE REPORT OF THE RESEARCH PROJECT OPTION: COMPUTER VISION Pose and Face Recovery via Spatio-temporal GrabCut Human Segmentation,"MASTER IN COMPUTER VISION AND ARTIFICIAL INTELLIGENCE
+REPORT OF THE RESEARCH PROJECT
+OPTION: COMPUTER VISION
+Pose and Face Recovery via
+Spatio-temporal GrabCut Human
+Segmentation
+Author: Antonio Hernández Vela
+Date: 13/07/2010
+Advisor: Sergio Escalera Guerrero"
+63488398f397b55552f484409b86d812dacde99a,Learning Universal Multi-view Age Estimator by Video Contexts,"Learning Universal Multi-view Age Estimator by Video Contexts
+Zheng Song1, Bingbing Ni3, Dong Guo4, Terence Sim2, Shuicheng Yan1
+Department of Electrical and Computer Engineering, 2 School of Computing, National University of Singapore;
+{zheng.s,
+Advanced Digital Sciences Center, Singapore; 4 Facebook"
+63c022198cf9f084fe4a94aa6b240687f21d8b41,Consensus Message Passing for Layered Graphical Models,
+63c7c0511e82172b6b60af21e56df68e2c6ab228,Target-based evaluation of face recognition technology for video surveillance applications,"Target-based evaluation of face recognition
+technology for video surveillance applications
+Dmitry Gorodnichy and Eric Granger"
+0f5e10cfca126682e1bad1a07848919489df6a65,Facial emotion processing in patients with social anxiety disorder and Williams-Beuren syndrome: an fMRI study.,"Research Paper
+Facial emotion processing in patients with social
+nxiety disorder and Williams–Beuren syndrome:
+n fMRI study
+Cynthia Binelli, PhD; Armando Muñiz, MD; Susana Subira, MD, PhD;
+Ricard Navines, MD, PhD; Laura Blanco-Hinojo, MSc; Debora Perez-Garcia, BSc;
+Jose Crippa, MD, PhD; Magi Farré, MD, PhD; Luis Pérez-Jurado, MD, PhD;
+Jesus Pujol, MD, PhD; Rocio Martin-Santos, MD, PhD
+Background: Social anxiety disorder (SAD) and Williams–Beuren syndrome (WBS) are 2 conditions with major differences in terms of
+genetics, development and cognitive profiles. Both conditions are associated with compromised abilities in overlapping areas, including so-
+ial approach, processing of social emotional cues and gaze behaviour, and to some extent they are associated with opposite behaviours in
+these domains. We examined common and distinct patterns of brain activation during a facial emotion processing paradigm in patients with
+SAD and WBS. Methods: We examined patients with SAD and WBS and healthy controls matched by age and laterality using functional
+MRI during the processing of happy, fearful and angry faces. Results: We included 20 patients with SAD and 20 with WBS as well as
+0 matched controls in our study. Patients with SAD and WBS did not differ in the pattern of limbic activation. We observed differences in
+early visual areas of the face processing network in patients with WBS and differences in the cortical prefrontal regions involved in the top–
+down regulation of anxiety and in the fusiform gyrus for patients with SAD. Compared with those in the SAD and control groups, participants
+in the WBS group did not activate the right lateral inferior occipital cortex. In addition, compared with controls, patients with WBS hypoacti-
+vated the posterior primary visual cortex and showed significantly less deactivation in the right temporal operculum. Participants in the SAD
+group showed decreased prefrontal activation compared with those in the WBS and control groups. In addition, compared with controls,"
+0f0499989f3331396af94f92c29f2eda9b58d4dc,Object detection methods for robot grasping: Experimental assessment and tuning,"Object detection methods for robot
+grasping: Experimental assessment and
+tuning
+Ferran RIGUAL a,1, Arnau RAMISA a, Guillem ALENYA a and Carme TORRAS a
+Institut de Rob`otica i Inform`atica Industrial, CSIC-UPC, Barcelona"
+0f4b902a2e12378e0ac0cb6fff7dd4c5f81e2c0a,Capturing facial videos with Kinect 2.0: A multithreaded open source tool and database,"Capturing Facial Videos with Kinect 2.0:
+A Multithreaded Open Source Tool and Database
+Daniel Merget
+Tobias Eckl
+Institute for Human-Machine Communication, TUM, Germany
+Philipp Tiefenbacher
+Martin Schwoerer
+Gerhard Rigoll"
+0f5bf2a208d262aa0469bd3185f6e2e56acada81,Pose Estimation and Segmentation of People in 3D Movies,"Pose Estimation and Segmentation of People in 3D
+Movies
+Karteek Alahari, Guillaume Seguin, Josef Sivic, Ivan Laptev
+To cite this version:
+Karteek Alahari, Guillaume Seguin, Josef Sivic, Ivan Laptev. Pose Estimation and Segmentation of
+People in 3D Movies. ICCV - IEEE International Conference on Computer Vision, Dec 2013, Sydney,
+Australia. IEEE, pp.2112-2119, 2013, <10.1109/ICCV.2013.263>. <hal-00874884>
+HAL Id: hal-00874884
+https://hal.inria.fr/hal-00874884
+Submitted on 18 Oct 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+0f65c91d0ed218eaa7137a0f6ad2f2d731cf8dab,Multi-Directional Multi-Level Dual-Cross Patterns for Robust Face Recognition,"Multi-Directional Multi-Level Dual-Cross
+Patterns for Robust Face Recognition
+Changxing Ding, Jonghyun Choi, Dacheng Tao, Senior Member, IEEE, and Larry S. Davis, Fellow, IEEE"
+0f112e49240f67a2bd5aaf46f74a924129f03912,Age-Invariant Face Recognition,"Age-Invariant Face Recognition
+Unsang Park, Member, IEEE,
+Yiying Tong, Member, IEEE, and
+Anil K. Jain, Fellow, IEEE"
+0f07dcf92588945eb0d70893cdf0fe4a48552763,Detection- and Trajectory-Level Exclusion in Multiple Object Tracking,"Detection- and Trajectory-Level Exclusion in Multiple Object Tracking
+Anton Milan1
+Konrad Schindler2
+Stefan Roth1
+Department of Computer Science, TU Darmstadt
+Photogrammetry and Remote Sensing Group, ETH Z¨urich"
+0fbf59328d32e1a9950dfa08c3ec87eb94398651,Beyond RGB: Very High Resolution Urban Remote Sensing With Multimodal Deep Networks,"Beyond RGB: Very High Resolution Urban Remote
+Sensing With Multimodal Deep Networks
+Nicolas Audeberta,b,, Bertrand Le Sauxa, Sébastien Lefèvreb
+ONERA, The French Aerospace Lab, F-91761 Palaiseau, France
+Univ. Bretagne-Sud, UMR 6074, IRISA, F-56000 Vannes, France"
+0f4cfcaca8d61b1f895aa8c508d34ad89456948e,Local appearance based face recognition using discrete cosine transform,"LOCAL APPEARANCE BASED FACE RECOGNITION USING
+DISCRETE COSINE TRANSFORM (WedPmPO4)
+Author(s) :"
+0fdcfb4197136ced766d538b9f505729a15f0daf,Multiple pattern classification by sparse subspace decomposition,"Multiple Pattern Classification by Sparse Subspace Decomposition
+Institute of Media and Information Technology, Chiba University
+Tomoya Sakai
+-33 Yayoi, Inage, Chiba, Japan"
+0fad544edfc2cd2a127436a2126bab7ad31ec333,Decorrelating Semantic Visual Attributes by Resisting the Urge to Share,"Decorrelating Semantic Visual Attributes by Resisting the Urge to Share
+Dinesh Jayaraman
+UT Austin
+Fei Sha
+Kristen Grauman
+UT Austin"
+0f085f389a52e13586fe50f2dae49e105225303f,Distribution-sensitive learning for imbalanced datasets,"Distribution-Sensitive
+Learning
+for Imbalanced
+Datasets
+Yale Songl, Louis-Philippe
+Morency2, and Randall Davisl
+MIT Computer Science and Artificial
+Intelligence
+Laboratory
+USC Institute
+for Creative Technology"
+0f708ace6f4829e466a8a549bd23f6fcf719ab9d,Multi-shot person re-identification via relational Stein divergence,"This is the author’s version of a work that was submitted/accepted for pub-
+lication in the following source:
+Alavi, Azadeh, Yang, Yan, Harandi, Mehrtash, & Sanderson, Conrad
+(2013)
+Multi-shot person re-identification via relational stein divergence. In
+ICIP 2013 Proceedings : 2013 IEEE International Conference on Image
+Processing, Institute of Electrical and Electronics Engineers, Inc., Mel-
+ourne Convention and Exhibition Centre, Melbourne, pp. 3542-3546.
+This file was downloaded from: https://eprints.qut.edu.au/71704/
+(cid:13) c(cid:13) 2013 by the Institute of Electrical and Electronics
+Engineers, Inc.
+Notice: Changes introduced as a result of publishing processes such as
+opy-editing and formatting may not be reflected in this document. For a
+definitive version of this work, please refer to the published source:
+https://doi.org/10.1109/ICIP.2013.6738731"
+0fe5d8acc77f54d60edc56c012f35517d9c861da,Interactive Stereoscopic Video Conversion,"Interactive Stereoscopic Video Conversion
+Zhebin Zhang, Chen Zhou, Yizhou Wang, and Wen Gao, Fellow, IEEE
+erial perspective,"
+0fd1715da386d454b3d6571cf6d06477479f54fc,A Survey of Autonomous Human Affect Detection Methods for Social Robots Engaged in Natural HRI,"J Intell Robot Syst (2016) 82:101–133
+DOI 10.1007/s10846-015-0259-2
+A Survey of Autonomous Human Affect Detection Methods
+for Social Robots Engaged in Natural HRI
+Derek McColl · Alexander Hong ·
+Naoaki Hatakeyama · Goldie Nejat ·
+Beno Benhabib
+Received: 10 December 2014 / Accepted: 11 August 2015 / Published online: 23 August 2015
+© Springer Science+Business Media Dordrecht 2015"
+0f08d62e882026ac83ebf26c0bd288c553873814,Multispecies Fruit Flower Detection Using a Refined Semantic Segmentation Network,"Multispecies fruit flower detection using a refined
+semantic segmentation network
+Philipe A. Dias1, Amy Tabb2, and Henry Medeiros1"
+0f94f4934d0a26dfd243852036468ecc9bf8d22c,Low Resolution Lidar-Based Multi-Object Tracking for Driving Applications,"Low resolution lidar-based multi-object tracking
+for driving applications
+Iv´an del Pino(cid:63), V´ıctor Vaquero(cid:63), Beatrice Masini,
+Joan Sol`a, Francesc Moreno-Noguer,
+Alberto Sanfeliu, and Juan Andrade-Cetto
+Institut de Rob`otica i Inform`atica Industrial, CSIC-UPC
+Llorens Artigas 4-6, 08028 Barcelona, Spain.
+http://www.iri.upc.edu"
+0f1392c1180582a45b42e621e1526f03cc6e9ca6,Learning with Hierarchical-Deep Models,"Learning with Hierarchical-Deep Models
+Ruslan Salakhutdinov, Joshua B. Tenenbaum, and Antonio Torralba"
+0fb75f5cb12d1e1a909b9f698b7617bb9603002f,Design of Weight-Learning Efficient Convolutional Modules in Deep Convolutional Neural Networks and its Application to Large-Scale Visual Recognition Tasks,"Data Analysis Project
+Design of Weight-Learning Ef‌f‌icient Convolutional Modules in Deep
+Convolutional Neural Networks and its Application to
+Large-Scale Visual Recognition Tasks
+Felix Juefei-Xu
+May 3, 2017"
+0f366de3ea595932dad06389f6e61fe0dd8cbe74,DeepAnomaly: Combining Background Subtraction and Deep Learning for Detecting Obstacles and Anomalies in an Agricultural Field,"Article
+DeepAnomaly: Combining Background Subtraction
+nd Deep Learning for Detecting Obstacles and
+Anomalies in an Agricultural Field
+Peter Christiansen 1,*, Lars N. Nielsen 2, Kim A. Steen 3, Rasmus N. Jørgensen 1 and
+Henrik Karstoft 1
+Department of Engineering, Aarhus University, Aarhus 8200, Denmark;
+(R.N.J.); (H.K.)
+Danske Commodities, Aarhus 8000, Denmark;
+AgroIntelli, Aarhus 8200, Denmark;
+* Correspondence: Tel.: +45-2759-2953
+Academic Editors: Gabriel Oliver-Codina, Nuno Gracias and Antonio M. López
+Received: 15 September 2016; Accepted: 7 November 2016; Published: 11 November 2016"
+0f92e9121e9c0addc35eedbbd25d0a1faf3ab529,MORPH-II: A Proposed Subsetting Scheme,"MORPH-II: A Proposed Subsetting Scheme
+Participants: K. Kempfert, J. Fabish, K. Park, and R. Towner
+Mentors: Y. Wang, C. Chen, and T. Kling
+NSF-REU Site at UNC Wilmington, Summer 2017"
+0ff23392e1cb62a600d10bb462d7a1f171f579d0,Toward Sparse Coding on Cosine Distance,"Toward Sparse Coding on Cosine
+Distance
+Jonghyun Choi, Hyunjong Cho, Jungsuk Kwak#,
+Larry S. Davis
+UMIACS | University of Maryland, College Park
+#Stanford University"
+0fd2956ef990443f584112fa093f85a90a43c4af,Performance Evaluation of Multi-camera Visual Tracking,"PEOPLE COUNT ESTIMATION IN SMALL CROWDS
+Pietro Morerio, Lucio Marcenaro, Carlo S. Regazzoni
+Department of Biophysical and Electronic Engineering
+University of Genoa, Genoa, Italy"
+0fcda01765c5a0b4cff99b5ed5139a6e1eddb689,Exploiting Long-Term Connectivity and Visual Motion in CRF-Based Multi-Person Tracking,"Exploiting Long-Term Connectivity and Visual
+Motion in CRF-Based Multi-Person Tracking
+Alexandre Heili, Student Member, IEEE, Adolfo López-Méndez, and Jean-Marc Odobez, Member, IEEE"
+0fcca61391e7ee7718f5d2c05adc658f2978a2e8,Spectral Face Recognition Using Orthogonal Subspace Bases,
+0f9bd0d528603654de2687d3ae2472a522607ee3,Semantics-aware visual localization under challenging perceptual conditions,"Semantics-aware Visual Localization
+under Challenging Perceptual Conditions
+Tayyab Naseer
+Gabriel L. Oliveira
+Thomas Brox
+Wolfram Burgard"
+0f395a49ff6cbc7e796656040dbf446a40e300aa,The Change of Expression Configuration Affects Identity-Dependent Expression Aftereffect but Not Identity-Independent Expression Aftereffect,"ORIGINAL RESEARCH
+published: 22 December 2015
+doi: 10.3389/fpsyg.2015.01937
+The Change of Expression
+Configuration Affects
+Identity-Dependent Expression
+Aftereffect but Not
+Identity-Independent Expression
+Aftereffect
+Miao Song 1, 2*, Keizo Shinomori 2, Qian Qian 3, Jun Yin 1 and Weiming Zeng 1
+College of Information Engineering, Shanghai Maritime University, Shanghai, China, 2 School of Information, Kochi University
+of Technology, Kochi, Japan, 3 Yunnan Key Laboratory of Computer Technology Applications, Kunming University of Science
+nd Technology, Kunming, China
+The present study examined the influence of expression configuration on cross-identity
+expression aftereffect. The expression configuration refers to the spatial arrangement
+of facial features in a face for conveying an emotion, e.g., an open-mouth smile vs.
+closed-mouth smile. In the first of two experiments, the expression aftereffect is
+measured using a cross-identity/cross-expression configuration factorial design. The
+facial
+identities of test faces were the same or different from the adaptor, while"
+0fb680b5136d80c13e8d15078ef18ca4aac269f6,Optimizing Deep Neural Network Architecture: A Tabu Search Based Approach,"Optimizing Deep Neural Network Architecture: A Tabu
+Search Based Approach
+Tarun Kumar Gupta and Khalid Raza*
+Department of Computer Science, Jamia Millia Islamia, New Delhi-110025"
+0f2a910f98e9955d2fbd4841d31b4943b91ab382,Creating and Annotating Affect Databases from Face and Body Display: A Contemporary Survey,"Creating and Annotating Affect Databases from Face and Body
+Display: A Contemporary Survey
+Hatice Gunes and Massimo Piccardi"
+0f29710e54f714eeea5233628afc68c680d881bb,Tracking Indistinguishable Translucent Objects over Time Using Weakly Supervised Structured Learning,"Tracking indistinguishable translucent objects over time
+using weakly supervised structured learning
+Luca Fiaschi1, Ferran Diego1, Konstantin Gregor1, Martin Schiegg1, Ullrich Koethe1, Marta Zlatic2 and
+Fred A. Hamprecht1
+HCI University of Heidelberg, Germany, http://hci.iwr.uni-heidelberg.de
+HHMI Janelia Farm, USA, http://janelia.org/"
+0ffee18b495830d373dbc65f67a452d94938900b,Registration-based moving object detection from a moving camera,"IROS 2008 2nd Workshop on Planning, Perception and Navigation for Intelligent Vehicles
+Registration-based moving object detection
+from a moving camera
+Angel D. Sappa, Fadi Dornaika, David Ger´onimo and Antonio L´opez"
+0f5275b472344dbfc4a26a9ba73dff23844b7e84,Head movements and postures as pain behavior,"RESEARCH ARTICLE
+Head movements and postures as pain
+ehavior
+Philipp Werner1*, Ayoub Al-Hamadi1, Kerstin Limbrecht-Ecklundt2, Steffen Walter3,
+Harald C. Traue3
+Neuro-Information Technology group, Institute for Information Technology and Communications, Otto-von-
+Guericke University Magdeburg, Magdeburg, Germany, 2 Department of Anesthesiology, University Medical
+Center Hamburg-Eppendorf, Hamburg, Germany, 3 Medical Psychology, University Clinic for Psychosomatic
+Medicine and Psychotherapy, Ulm, Germany
+1111111111
+1111111111
+1111111111
+1111111111
+1111111111"
+0f41f1a4bd5141184ee3ed3cf8874eeb396d7862,Deep Forest: Towards An Alternative to Deep Neural Networks,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+0fd1bffb171699a968c700f206665b2f8837d953,Weakly Supervised Object Localization with Multi-Fold Multiple Instance Learning,"Weakly Supervised Object Localization with
+Multi-fold Multiple Instance Learning
+Ramazan Gokberk Cinbis, Jakob Verbeek, and Cordelia Schmid, Fellow, IEEE"
+0f82a869a80b6114bd16437dbf703bcae84da7b9,Neural Activation Constellations: Unsupervised Part Model Discovery with Convolutional Networks,"Neural Activation Constellations: Unsupervised Part Model Discovery with
+Convolutional Networks
+Marcel Simon and Erik Rodner
+Computer Vision Group, University of Jena, Germany∗
+http://www.inf-cv.uni-jena.de/constellation_model_revisited"
+0fa42d4478b514b0f961e26bccbaf2b75d42e912,Extending UML for Conceptual Modeling of Annotation of Medical Images,"Extending UML for Conceptual Modeling of Annotation
+International Journal of Computer Applications (0975 – 8887)
+Volume 72– No.10, June 2013
+of Medical Images
+Mouhamed Gaith Ayadi
+Riadh Bouslimi
+Jalel Akaichi
+Department of computer
+sciences
+ISG university of Tunis
+Tunisia
+Department of computer
+sciences
+ISG university of Tunis
+Tunisia
+Department of computer
+sciences
+ISG university of Tunis
+Tunisia"
+0f25aa473e808de72c6975fdb1e3e65180a38c05,Bag of Soft Biometrics for Person Identification New trends and challenges,"Noname manuscript No.
+(will be inserted by the editor)
+Bag of Soft Biometrics for Person Identi(cid:12)cation
+New trends and challenges.
+Antitza Dantcheva (cid:1) Carmelo Velardo (cid:1)
+Angela D’Angelo (cid:1) Jean{Luc Dugelay
+Received: 01.08.2010 / Accepted: 11.10.2010"
+0ff14ec76e5fe7f17dce102e781ffce2738c8d4b,Real-time pedestrian detection in urban scenarios,"Real-time Pedestrian Detection in Urban Scenarios
+VARGA Robert, VESA Andreea Valeria, JEONG Pangyu, NEDEVSCHI Sergiu
+{robert.varga, pangyu.jeong,
+Technical University of Cluj Napoca
+Telephone: (800) 555–1212"
+0f556558853268d86cd05bf8ea42da6d7862a024,Shade Face: Multiple image-based 3D face recognition,"UWA Research Publication
+Mian, A. (2009). Shade Face: Multiple Image-based 3D Face Recognition. In R. Cipolla,
+M. Hebert, X. Tang, & N. Yokoya (Eds.), Proceedings of the 2009 IEEE International
+Workshop on 3-D Digital Imaging and Modeling (3DIM2009). (pp. 1833-1839). USA:
+IEEE Computer Society. 10.1109/ICCVW.2009.5457505
+© 2009 IEEE
+This is pre-copy-editing, author-produced version of an article accepted for publication,
+following peer review. The definitive published version is located at
+http://dx.doi.org/10.1109/ICCVW.2009.5457505
+This version was made available in the UWA Research Repository on 4 March 2015, in
+ompliance with the publisher’s policies on archiving in institutional repositories.
+Use of the article is subject to copyright law."
+0f2ffd582674bd856247bc5482d85e6db3b49b8f,A neural signature of the creation of social evaluation.,"doi:10.1093/scan/nst051
+SCAN (2014) 9, 731^736
+A neural signature of the creation of social evaluation
+Roman Osinsky,1 Patrick Mussel,1 Linda O¨ hrlein,1 and Johannes Hewig1,2
+Department of Psychology I, Julius-Maximilians-University Wu¨rzburg, 97070 Wu¨rzburg, Germany and 2Department of Psychology,
+Friedrich-Schiller-University Jena, 07743 Jena, Germany
+Previous research has shown that receiving an unfair monetary offer in economic bargaining elicits also-called feedback negativity (FN). This scalp-
+recorded brain potential probably reflects a bad-vs-good evaluation in the medial frontal cortex and has been linked to fundamental processes of
+reinforcement learning. In the present study, we investigated whether the evaluative mechanism indexed by the FN is also involved in learning who is an
+unfair vs fair bargaining partner. An electroencephalogram was recorded while participants completed a computerized version of the Ultimatum Game,
+repeatedly receiving fair or unfair monetary offers from alleged other participants. Some of these proposers were either always fair or always unfair in
+their offers. In each trial, participants first saw a portrait picture of the respective proposer before the monetary offer was presented. Therefore, the faces
+ould be used as predictive cues for the fairness of the pending offers. We found that not only unfair offers themselves induced a FN, but also (over the
+task) faces of unfair proposers. Thus, when interaction partners repeatedly behave in an unfair way, their faces acquire a negative valence, which
+manifests in a basal neural mechanism of bad-vs-good evaluation.
+Keywords: social evaluation; feedback negativity; ultimatum game; evaluative conditioning
+INTRODUCTION
+trading
+example,
+family, work,"
+0a811063cfd674275f91006d28cb8620c781e817,Image recognition based on hidden Markov eigen-image models using variational Bayesian method,"IMAGE RECOGNITION BASED ON
+HIDDEN MARKOV EIGEN-IMAGE MODELS
+USING VARIATIONAL BAYESIAN METHOD
+Kei Sawada, Kei Hashimoto,
+Yoshihiko Nankaku, Keiichi Tokuda
+Nagoya Institute of Technology
+APSIPA ASC 10/30/2013"
+0a2aca07c9e15de3d5924e156af9a8e1a67b4cab,Person Reidentification With Reference Descriptor,"Person Reidentification With Reference Descriptor
+Le An, Member, IEEE, Mehran Kafai, Member, IEEE, Songfan Yang, Member, IEEE,
+nd Bir Bhanu, Fellow, IEEE
+cross
+identification"
+0a1e3d271fefd506b3a601bd1c812a9842385829,Face Recognition Using 3D Directional Corner Points,"Face Recognition using 3D Directional Corner Points
+Author
+Yu, Xun, Gao, Yongsheng, Zhou, Jun
+Published
+Conference Title
+Pattern Recognition (ICPR), 2014 22nd International Conference on
+https://doi.org/10.1109/ICPR.2014.483
+Copyright Statement
+© 2014 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing this
+material for advertising or promotional purposes, creating new collective works, for resale or
+redistribution to servers or lists, or reuse of any copyrighted component of this work in other
+works.
+Downloaded from
+http://hdl.handle.net/10072/66408
+Link to published version
+http://www.icpr2014.org/index.htm
+Griffith Research Online
+https://research-repository.griffith.edu.au"
+0a6d344112b5af7d1abbd712f83c0d70105211d0,Constrained Local Neural Fields for Robust Facial Landmark Detection in the Wild,"Constrained Local Neural Fields for robust facial landmark detection in the wild
+Tadas Baltruˇsaitis
+Peter Robinson
+University of Cambridge Computer Laboratory
+USC Institute for Creative Technologies
+5 JJ Thomson Avenue
+Louis-Philippe Morency
+2015 Waterfront Drive"
+0a55e4191c90ec1edb8d872237a2dacd5f6eda90,"Intentional Minds: A Philosophical Analysis of Intention Tested through fMRI Experiments Involving People with Schizophrenia, People with Autism, and Healthy Individuals","HUMAN NEUROSCIENCE
+Intentional minds: a philosophical analysis of intention tested
+through fMRI experiments involving people with
+schizophrenia, people with autism, and healthy individuals
+Review ARticle
+published: 02 February 2011
+doi: 10.3389/fnhum.2011.00007
+Bruno G. Bara1,2*, Angela Ciaramidaro1, Henrik Walter 3 and Mauro Adenzato1,2
+Department of Psychology, Center for Cognitive Science, University of Turin, Turin, Italy
+Neuroscience Institute of Turin, University of Turin, Turin, Italy
+Department of Psychiatry and Psychotherapy, Charité Universitätsmedizin Berlin, Berlin, Germany
+Edited by:
+Ivan Toni, Radboud University,
+Netherlands
+Reviewed by:
+Ivan Toni, Radboud University,
+Netherlands
+Roel M. Willems, University of
+California Berkeley, USA
+*Correspondence:"
+0a391c4d7aafa73324549f212cf28640ed471a81,From Caregivers to Peers: Puberty Shapes Human Face Perception.,"663142 PSSXXX10.1177/0956797616663142Picci, ScherfPuberty Shapes Human Face Perception
+research-article2016
+Research Article
+From Caregivers to Peers: Puberty
+Shapes Human Face Perception
+Giorgia Picci and K. Suzanne Scherf
+Department of Psychology, Pennsylvania State University
+1 –13
+© The Author(s) 2016
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797616663142
+pss.sagepub.com"
+0a66015112da542b9b6687e4b3c9ff73565d0844,A k-NN Approach for Scalable Image Annotation Using General Web Data,"A k-NN Approach for Scalable Image Annotation
+Using General Web Data
+Mauricio Villegas and Roberto Paredes
+Institut Tecnol`ogic d’Inform`atica
+Universitat Polit`ecnica de Val`encia
+Cam´ı de Vera s/n, 46022 Val`encia, Spain"
+0a058caa89d195930224148d3d2897c0c08fc668,Metric Embedding Autoencoders for Unsupervised Cross-Dataset Transfer Learning,"Metric Embedding Autoencoders for
+Unsupervised Cross-Dataset Transfer Learning
+Alexey Potapov1,3, Sergey Rodionov1,2, Hugo Latapie4, and Enzo Fenoglio4
+SingularityNET Foundation
+Novamente LLC
+ITMO University, St. Petersburg, Russia
+Chief Technology & Architecture Of‌f‌ice, Cisco"
+0a3863a0915256082aee613ba6dab6ede962cdcd,Early and Reliable Event Detection Using Proximity Space Representation,"Early and Reliable Event Detection Using Proximity Space Representation
+Maxime Sangnier
+LTCI, CNRS, T´el´ecom ParisTech, Universit´e Paris-Saclay, 75013, Paris, France
+J´erˆome Gauthier
+LADIS, CEA, LIST, 91191, Gif-sur-Yvette, France
+Alain Rakotomamonjy
+Normandie Universit´e, UR, LITIS EA 4108, Avenue de l’universit´e, 76801, Saint-Etienne-du-Rouvray, France"
+0ad4a9fad873e9c4914fd2464404b211f295d7b6,New insights into Laplacian similarity search,"New Insights into Laplacian Similarity Search
+Xiao-Ming Wu1, Zhenguo Li2, Shih-Fu Chang1
+Department of Electrical Engineering, Columbia University. 2Huawei Noah’s Ark Lab, Hong Kong.
+(a) Λ = I, AP = 0.14
+(b) Λ = D, AP = 0.67
+(c) Λ = H, AP = 0.67
+(a) Λ = I, AP = 0.27
+(b) Λ = D, AP = 0.17
+(c) Λ = H, AP = 0.27
+Figure 1: Top 40 retrieved images on extended YaleB, with false images
+highlighted in blue box (query on top left comes from the sparsest cluster).
+Figure 2: Top 40 retrieved images on CIFAR-10, with positive images high-
+lighted in magenta box (query on top left comes from the densest cluster).
+Similarity metrics are important building blocks of many visual applica-
+tions such as image retrieval, image segmentation, and manifold learning.
+Well-known similarity metrics include personalized PageRank, hitting and
+ommute times, and the pseudo-inverse of graph Laplacian. Despite their
+popularity, the understanding of their behaviors is far from complete, and
+their use in practice is mostly guided by empirical trials and error analy-
+sis. This paper bridges this gap by investigating the fundamental design of"
+0a6a173a1d1d36285bae97f98f4b901067d40097,Similarity learning on an explicit polynomial kernel feature map for person re-identification,"Similarity Learning on an Explicit Polynomial Kernel Feature Map for Person
+Re-Identification
+Dapeng Chen y, Zejian Yuan y, Gang Huaz, Nanning Zhengy, Jingdong Wang x
+y Xi’an Jiaotong University
+zStevens Institute of Technology
+xMicrosoft Research"
+0a60e76e6983e1647469172a50907023913b0c9f,Longitudinal study of amygdala volume and joint attention in 2- to 4-year-old children with autism.,"ORIGINAL ARTICLE
+Longitudinal Study of Amygdala Volume and Joint
+Attention in 2- to 4-Year-Old Children With Autism
+Matthew W. Mosconi, PhD; Heather Cody-Hazlett, PhD; Michele D. Poe, PhD;
+Guido Gerig, PhD; Rachel Gimpel-Smith, BA; Joseph Piven, MD
+Context: Cerebral cortical volume enlargement has been
+reported in 2- to 4-year-olds with autism. Little is known
+bout the volume of subregions during this period of de-
+velopment. The amygdala is hypothesized to be abnormal
+in volume and related to core clinical features in autism.
+Objectives: To examine amygdala volume at 2 years with
+follow-up at 4 years of age in children with autism and
+to explore the relationship between amygdala volume and
+selected behavioral features of autism.
+Design: Longitudinal magnetic resonance imaging study.
+Setting: University medical setting.
+Participants: Fifty autistic and 33 control (11 devel-
+opmentally delayed, 22 typically developing) children be-
+tween 18 and 35 months (2 years) of age followed up at
+2 to 59 months (4 years) of age."
+0a81810af97e8ab5b8c483209b4d0ff7210436f9,Human Joint Angle Estimation and Gesture Recognition for Assistive Robotic Vision,"Human Joint Angle Estimation and Gesture Recognition
+for Assistive Robotic Vision
+Alp Guler1, Nikolaos Kardaris2, Siddhartha Chandra1, Vassilis Pitsikalis2, Christian
+Werner3, Klaus Hauer3, Costas Tzafestas2, Petros Maragos2, Iasonas Kokkinos1
+(1) INRIA GALEN & Centrale Sup´elec Paris,
+(2) National Technical University of Athens, (3) University of Heidelberg"
+0adffd02029363c204a561092e1e0cc05cacfee7,A New Method for Static Video Summarization Using Local Descriptors and Video Temporal Segmentation,"A New Method for Static Video Summarization
+Using Local Descriptors and Video Temporal
+Segmentation
+Edward J. Y. Cayllahua Cahuina
+Computer Research Center
+San Pablo Catholic University
+Arequipa, Peru
+Email:
+Guillermo Camara Chavez
+Department of Computer Science
+Federal university of Ouro Preto
+Ouro Preto, Brazil
+Email:"
+0a60d9d62620e4f9bb3596ab7bb37afef0a90a4f,Chimpanzee Faces in the Wild: Log-Euclidean CNNs for Predicting Identities and Attributes of Primates,"Chimpanzee Faces in the Wild: Log-Euclidean CNNs for Predicting Identities and Attributes of Primates. GCPR 2016
+(cid:13) Copyright by Springer. The final publication will be available at link.springer.com
+A. Freytag, E. Rodner, M. Simon, A. Loos, H. K¨uhl and J. Denzler
+Chimpanzee Faces in the Wild:
+Log-Euclidean CNNs for Predicting Identities
+nd Attributes of Primates
+Alexander Freytag1,2, Erik Rodner1,2, Marcel Simon1, Alexander Loos3,
+Hjalmar S. K¨uhl4,5, and Joachim Denzler1,2,5
+Computer Vision Group, Friedrich Schiller University Jena, Germany
+Michael Stifel Center Jena, Germany
+Fraunhofer Institute for Digital Media Technology, Germany
+Max Planck Institute for Evolutionary Anthropology, Germany
+5German Centre for Integrative Biodiversity Research (iDiv), Germany"
+0a773ed20a5920897788dd6f0d63c20defca8ab0,ConceptLearner: Discovering visual concepts from weakly labeled image collections,"ConceptLearner: Discovering Visual Concepts from Weakly Labeled Image
+Collections
+Bolei Zhou†, Vignesh Jagadeesh‡, Robinson Piramuthu‡
+MIT ‡eBay Research Labs"
+0ad0a1293f80c838c843726eeddf5a97f33f0c89,Understanding image virality,"Understanding Image Virality
+Arturo Deza
+UC Santa Barbara
+Devi Parikh
+Virginia Tech"
+0aa9872daf2876db8d8e5d6197c1ce0f8efee4b7,Timing is everything : a spatio-temporal approach to the analysis of facial actions,"Imperial College of Science, Technology and Medicine
+Department of Computing
+Timing is everything
+A spatio-temporal approach to the analysis of facial
+ctions
+Michel Fran¸cois Valstar
+Submitted in part fulfilment of the requirements for the degree of
+Doctor of Philosophy in Computing of Imperial College, February 2008"
+0adb5923fb1955f7ca0a85454afe17e5d25425df,Crowd motion monitoring using tracklet-based commotion measure,"CROWD MOTION MONITORING USING TRACKLET-BASED COMMOTION MEASURE
+Hossein Mousavi*
+Moin Nabi* Hamed Kiani
+Alessandro Perina
+Vittorio Murino
+Pattern Analysis and Computer Vision Department (PAVIS)
+Istituto Italiano di Tecnologia
+Genova, Italy"
+0a87d781fe2ae2e700237ddd00314dbc10b1429c,Multi-scale HOG Prescreening Algorithm for Detection of Buried Explosive Hazards in FL-IR and FL-GPR Data,"Distribution Statement A: Approved for public release; distribution unlimited.
+Multi-scale HOG Prescreening Algorithm for Detection of Buried
+Explosive Hazards in FL-IR and FL-GPR Data
+*University of Missouri, Electrical and Computer Engineering Department, Columbia, MO
+K. Stone*, J. M. Keller*, D. Shaw*"
+0ae07f24251946b2086fb992031c298ada2805de,Exemplar-AMMs: Recognizing Crowd Movements From Pedestrian Trajectories,"JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+Exemplar-AMMs: Recognizing Crowd Movements
+from Pedestrian Trajectories
+Wenxi Liu, Rynson W.H. Lau, Xiaogang Wang, Dinesh Manocha"
+0af65df112db18248ed24a1c0fb5fe8524015336,Contour Segment Analysis for Human Silhouette Pre-segmentation,"Author manuscript, published in ""5th International Conference on Computer Vision Theory and Applications (VISAPP 2010),
+Angers : France (2010)"""
+0ae3182836b1b962902d664ddd524e8554b742cf,Integrating Context and Occlusion for Car Detection by Hierarchical And-Or Model,"Integrating Context and Occlusion for Car
+Detection by Hierarchical And-Or Model
+Bo Li1,2, Tianfu Wu2,(cid:2), and Song-Chun Zhu2
+Beijing Lab of Intelligent Information Technology, Beijing Institute of Technology
+Department of Statistics, University of California, Los Angeles"
+0a7a7b3f05918fb4fc33f04cb7e31232fa197f76,Fitting a Morphable Model to 3D Scans of Faces,"Fitting a Morphable Model to 3D Scans of Faces
+Volker Blanz
+Universit¤at Siegen,
+Siegen, Germany
+Kristina Scherbaum
+MPI Informatik,
+Saarbr¤ucken, Germany
+Hans-Peter Seidel
+MPI Informatik,
+Saarbr¤ucken, Germany"
+0a3051c8dde80975640d42dca21fac17ed60f987,A Hierarchical Switching Linear Dynamical System Applied to the Detection of Sepsis in Neonatal Condition Monitoring,
+0a8ab703839ae585c2f27099616c40974cbeeda2,"Fast, Exact and Multi-scale Inference for Semantic Image Segmentation with Deep Gaussian CRFs","Fast, Exact and Multi-Scale Inference for Semantic
+Image Segmentation with Deep Gaussian CRFs
+Siddhartha Chandra
+Iasonas Kokkinos
+INRIA GALEN & Centrale Sup´elec, Paris, France"
+0a2d2b79ba39e2140c93543b8ce873f106c08e3d,Semi-Supervised Sparse Representation Based Classification for Face Recognition With Insufficient Labeled Samples,"Semi-Supervised Sparse Representation Based
+Classification for Face Recognition with Insufficient
+Labeled Samples
+Yuan Gao, Jiayi Ma, and Alan L. Yuille Fellow, IEEE"
+0af48a45e723f99b712a8ce97d7826002fe4d5a5,Toward Wide-Angle Microvision Sensors,"Toward Wide-Angle Microvision Sensors
+Sanjeev J. Koppal, Member, IEEE, Ioannis Gkioulekas, Student Member, IEEE,
+Travis Young, Member, IEEE, Hyunsung Park, Student Member, IEEE,
+Kenneth B. Crozier, Member, IEEE, Geoffrey L. Barrows, Member, IEEE, and
+Todd Zickler, Member, IEEE"
+0a4ba4d5bd6e07a31fa4586322fd5e07d9f9975e,Online Bayesian Nonparametrics for Group Detection,"ZANOTTO, BAZZANI, CRISTANI, MURINO: ONLINE BNP FOR GROUP DETECTION
+Online Bayesian Nonparametrics for Group
+Detection
+Matteo Zanotto
+Loris Bazzani
+Marco Cristani
+Vittorio Murino
+Pattern Analysis & Computer Vision
+Istituto Italiano di Tecnologia
+Via Morego 30 - 16163
+Genova, Italy"
+0aa8a0203e5f406feb1815f9b3dd49907f5fd05b,Mixture Subclass Discriminant Analysis,"Mixture subclass discriminant analysis
+Nikolaos Gkalelis, Vasileios Mezaris, Ioannis Kompatsiaris"
+0a7309147d777c2f20f780a696efe743520aa2db,Stories for Images-in-Sequence by using Visual and Narrative Components,"Stories for Images-in-Sequence by using Visual
+nd Narrative Components (cid:63)
+Marko Smilevski1,2, Ilija Lalkovski2, and Gjorgji Madjarov1,3
+Ss. Cyril and Methodius University, Skopje, Macedonia
+Pendulibrium, Skopje, Macedonia
+Elevate Global, Skopje, Macedonia"
+0a40415bdfe4bc9ef7e019e4f1442a9fb61f58b2,Automatic Discovery and Geotagging of Objects from Street View Imagery,"Automatic Discovery and Geotagging of Objects from Street View Imagery
+Vladimir A. Krylov
+Eamonn Kenny
+Rozenn Dahyot
+ADAPT Centre, School of Computer Science and Statistics, Trinity College Dublin, Dublin, Ireland"
+0ad90ad5d2050ebaba5b5cddeb474c7d889bec3e,A Unified Semantic Embedding: Relating Taxonomies and Attributes,"A Unified Semantic Embedding:
+Relating Taxonomies and Attributes
+Sung Ju Hwang∗
+Disney Research
+Pittsburgh, PA"
+0a8c6b40d6ca75bc1995083825e362137b130624,Nonparametric Method for Data-driven Image Captioning,"Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Short Papers), pages 592–598,
+Baltimore, Maryland, USA, June 23-25 2014. c(cid:13)2014 Association for Computational Linguistics"
+0a1138276c52c734b67b30de0bf3f76b0351f097,Discriminant Incoherent Component Analysis,"This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+The final version of record is available at
+http://dx.doi.org/10.1109/TIP.2016.2539502
+Discriminant Incoherent Component Analysis
+Christos Georgakis, Student Member, IEEE, Yannis Panagakis, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+0a572c16e635312f118d1a53f0ff6446402d3c32,Learning with proxy supervision for end-to-end visual learning,"Learning with Proxy Supervision for End-To-End Visual Learning
+Jiˇr´ı ˇCerm´ak1∗ Anelia Angelova2"
+0a6a25ee84fc0bf7284f41eaa6fefaa58b5b329a,Neural Networks Regularization Through Representation Learning,"THÈSEPour obtenir le diplôme de doctorat Spécialité Informatique Préparée au sein de « l'INSA Rouen Normandie » Présentée et soutenue parSoufiane BELHARBIThèse dirigée par Sébastien ADAM, laboratoire LITIS Neural Networks Regularization Through Representation LearningThèse soutenue publiquement le 06 Juillet 2018 devant le jury composé deSébastien ADAMProfesseur à l'Université de Rouen NormandieDirecteur de thèseClément CHATELAINMaître de conférence à l'INSA Rouen NormandieEncadrant de thèseRomain HÉRAULTMaître de conférence à l'INSA Rouen NormandieEncadrant de thèseElisa FROMONTProfesseur à l'Université de Rennes 1Rapporteur de thèseThierry ARTIÈRESProfesseur à l'École Centrale MarseilleRapporteur de thèseJohn LEEProfesseur à l'Université Catholique de LouvainExaminateur de thèseDavid PICARDMaître de conférences à l'École Nationale Supérieure de l'Électronique et de ses ApplicationsExaminateur de thèseFrédéric JURIEProfesseur à l' Université de Caen NormandieInvité"
+0ae9cc6a06cfd03d95eee4eca9ed77b818b59cb7,"Multi-task, multi-label and multi-domain learning with residual convolutional networks for emotion recognition","Noname manuscript No.
+(will be inserted by the editor)
+Multi-task, multi-label and multi-domain learning with
+residual convolutional networks for emotion recognition
+Gerard Pons · David Masip
+Received: date / Accepted: date"
+0acf23485ded5cb9cd249d1e4972119239227ddb,Dual coordinate solvers for large-scale structural SVMs,"Dual coordinate solvers for large-scale structural SVMs
+Deva Ramanan
+UC Irvine
+This manuscript describes a method for training linear SVMs (including binary SVMs, SVM regression,
+nd structural SVMs) from large, out-of-core training datasets. Current strategies for large-scale learning fall
+into one of two camps; batch algorithms which solve the learning problem given a finite datasets, and online
+lgorithms which can process out-of-core datasets. The former typically requires datasets small enough to fit
+in memory. The latter is often phrased as a stochastic optimization problem [4, 15]; such algorithms enjoy
+strong theoretical properties but often require manual tuned annealing schedules, and may converge slowly
+for problems with large output spaces (e.g., structural SVMs). We discuss an algorithm for an “intermediate”
+regime in which the data is too large to fit in memory, but the active constraints (support vectors) are small
+enough to remain in memory.
+In this case, one can design rather ef‌f‌icient learning algorithms that are
+s stable as batch algorithms, but capable of processing out-of-core datasets. We have developed such a
+MATLAB-based solver and used it to train a series of recognition systems [19, 7, 21, 12] for articulated pose
+estimation, facial analysis, 3D object recognition, and action classification, all with publicly-available code.
+This writeup describes the solver in detail.
+Approach: Our approach is closely based on data-subsampling algorithms for collecting hard exam-
+ples [9, 10, 6], combined with the dual coordinate quadratic programming (QP) solver described in liblinear
+[8]. The latter appears to be current fastest method for learning linear SVMs. We make two extensions (1)"
+0aaa66501298c3df27293eca7906e93d8013b729,Fast HOG based person detection devoted to a mobile robot with a spherical camera,"Fast HOG based Person Detection devoted to a Mobile Robot with a
+Spherical Camera
+A. A. Mekonnen1, C. Briand1, F. Lerasle1, A. Herbulot1"
+0a20e2fbe52efdb794b7566ce5233c41f4c5efc9,Monocular visual scene understanding from mobile platforms,"Monocular Visual Scene
+Understanding
+from Mobile Platforms
+A dissertation for the degree of
+Doktor-Ingenieur (Dr.-Ing.)
+pproved by
+TECHNISCHE UNIVERSITÄT DARMSTADT
+Fachbereich Informatik
+presented by
+CHRISTIAN ALEXANDER WOJEK
+Dipl.-Inform.
+orn in Schillingsfürst, Germany
+Examiner:
+Prof. Dr. Bernt Schiele
+Co-examiner: Prof. Dr. Luc Van Gool
+Date of Submission: 14th of May, 2010
+0th of June, 2010
+Date of Defense:
+Darmstadt, 2010"
+64a6c30ca95e85427c56acb4c1c20f62c6ec0709,PersonNet: Person Re-identification with Deep Convolutional Neural Networks,"PersonNet: Person Re-identification with Deep
+Convolutional Neural Networks
+Lin Wu, Chunhua Shen, Anton van den Hengel"
+64c9cc92ea496b9053fa5326567487b5f08bb13f,3D Human Face Recognition Using Summation Invariants,"(cid:176)2006 IEEE. Personal use of this material is permitted.
+However, permission to reprint/republish this material for ad-
+vertising or promotional purposes or for creating new collec-
+tive works for resale or redistribution to servers or lists, or to
+reuse any copyrighted component of this work in other works
+must be obtained from the IEEE."
+647c6ac5e0bfee0241d583650f18c6314f28aaee,Segmentation Driven Object Detection with Fisher Vectors,"Segmentation Driven Object Detection with Fisher Vectors
+Ramazan Gokberk Cinbis
+Jakob Verbeek Cordelia Schmid
+LEAR, INRIA Grenoble - Rhˆone-Alpes, France
+Laboratoire Jean Kuntzmann"
+6412d8bbcc01f595a2982d6141e4b93e7e982d0f,"Deep Convolutional Neural Network Using Triplets of Faces, Deep Ensemble, and Score-Level Fusion for Face Recognition","Deep Convolutional Neural Network using Triplets of Faces, Deep Ensemble, and
+Score-level Fusion for Face Recognition
+Bong-Nam Kang, Student Member, IEEE1, Yonghyun Kim, Student Member, IEEE2, and
+Daijin Kim, Member, IEEE2
+Department of Creative IT Engineering, POSTECH, Korea
+Department of Computer Science and Engineering, POSTECH, Korea
+{bnkang, gkyh0805,"
+641f0989b87bf7db67a64900dcc9568767b7b50f,Reconstructing faces from their signatures using RBF regression,"Reconstructing Faces from their Signatures using RBF
+Regression
+Alexis Mignon, Fr´ed´eric Jurie
+To cite this version:
+Alexis Mignon, Fr´ed´eric Jurie. Reconstructing Faces from their Signatures using RBF Regres-
+sion. British Machine Vision Conference 2013, Sep 2013, Bristol, United Kingdom. pp.103.1–
+03.12, 2013, <10.5244/C.27.103>. <hal-00943426>
+HAL Id: hal-00943426
+https://hal.archives-ouvertes.fr/hal-00943426
+Submitted on 13 Feb 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+64fb6c31033e38eaaa10c0f7c2b7995f8fa84de3,Visualizing Video Sounds through Sound Word Animation,"VISUALIZING VIDEO SOUNDS THROUGH
+SOUND WORD ANIMATION
+擬音語アニメーションによる動画音響の可視化手法
+Fangzhou Wang
+A Master Thesis
+Submitted to
+the Graduate School of the University of Tokyo
+on February 20, 2014
+in Partial Ful(cid:12)llment of the Requirements
+for the Degree of Master of Information Science and
+Technology
+in Computer Science
+Thesis Supervisor: Takeo Igarashi 五十嵐健夫
+Professor of Computer Science"
+6483ebbb9c28024431c8ada03354217453ca1b3b,Statement in Lieu of an Oath,"Universit¨at des Saarlandes
+Max-Planck-Institut f¨ur Informatik
+Learning to Track Humans in Videos
+Master’s Thesis in Computer Science
+Mihai Fieraru
+supervised by
+Prof. Dr. Bernt Schiele
+dvised by
+MSc Anna Khoreva
+MSc Eldar Insafutdinov
+reviewers
+Prof. Dr. Bernt Schiele
+Dr. Mario Fritz
+Saarbr¨ucken, December 2017"
+64be271fd50fce1cf8434020145a1b6e16f75c1a,Intrinsic Divergence for Face Recognition,"Centre for Theoretical Neuroscience
+Technical Report
+UW-CTN-TR-20090204-001
+February 4, 2009
+Intrinsic Divergence for Face
+Recognition
+Yichuan Tang and Xuan Choo
+Centre for Theoretical Neuroscience, Waterloo, ON. http://compneuro.uwaterloo.ca/cnrglab"
+646fa86edc22ccc452a44ac7a5953ba62fc0929b,Recognizing jumbled images: The role of local and global information in image classification,"The Role of Local and Global Information in Image Classification
+Recognizing Jumbled Images:
+Toyota Technological Institute, Chicago (TTIC)
+Devi Parikh"
+6475c1e95da0a3bd36786a32d00a893d85460e9e,Combined image- and world-space tracking in traffic scenes,"Combined Image- and World-Space Tracking in Traffic Scenes
+Aljoˇsa Oˇsep, Wolfgang Mehner, Markus Mathias, and Bastian Leibe"
+643abe6001946ebb7e262465edcf78d600c38f4f,The COST292 experimental framework for TRECVID 2007,"The COST292 experimental framework for TRECVID 2007
+Q. Zhang1, K. Chandramouli1, U. Damnjanovic1, T. Piatrik1, E. Izquierdo1,
+M. Corvaglia2, N. Adami2, R. Leonardi2, G. Yakın3, S. Aksoy3, U. Naci4,
+A. Hanjalic4, S. Vrochidis5, A. Moumtzidou5, S. Nikolopoulos5, V. Mezaris5,
+L. Makris5, I. Kompatsiaris5, E. Esen6, A. Alatan6, E. Spyrou7,
+P. Kapsalas7, G. Tolias7, P. Mylonas7, Y. Avrithis7, B. Reljin8, G. Zajic8,
+R. Jarina9, M. Kuba9, N. Aginamo10, J. Goya10, B. Mansencal11,
+J. Benois-Pineau11, A. M. G. Pinheiro12, L. A. Alexandre12, P. Almeida12
+October 22, 2007"
+64153df77fe137b7c6f820a58f0bdb4b3b1a879b,Shape Invariant Recognition of Segmented Human Faces using Eigenfaces,"Shape Invariant Recognition of Segmented Human
+Faces using Eigenfaces
+Zahid Riaz, Michael Beetz, Bernd Radig
+Department of Informatics
+Technical University of Munich, Germany"
+64c78c8bf779a27e819fd9d5dba91247ab5a902b,Tracking with multi-level features,"Tracking with multi-level features
+Roberto Henschel, Laura Leal-Taix´e, Bodo Rosenhahn, Konrad Schindler"
+64e0bd1210f180e0610b2a1faa188051a1de29bf,Combining Detectors for Robust Head Detection,"Combining Detectors for Robust Head Detection
+Henrik Brauer, Christos Grecos and Kai von Luck
+Living Place - HAW Hamburg
+Berliner Tor 11
+0099 Hamburg, Germany"
+649eb674fc963ce25e4e8ce53ac7ee20500fb0e3,Toward correlating and solving abstract tasks using convolutional neural networks,
+64f6f1cd23bbac1983ad4115475e4ef26ab86ba4,Person re-identification by unsupervised video matching,"Person Re-Identification by Unsupervised Video Matching
+Xiaolong Ma1,4, Xiatian Zhu2, Shaogang Gong2, Xudong Xie1, Jianming Hu1, Kin-Man Lam3, Yisheng Zhong1"
+6434b95401aea9ece22b2b29950118afc163c2db,Localized anomaly detection via hierarchical integrated activity discovery,"THIS PAPER APPEARED IN IEEE INT. CONF. ON ADVANCED VIDEO AND SIGNAL-BASED PROCESSING (AVSS), KRAKOW, 2013
+Localized Anomaly Detection via Hierarchical Integrated Activity Discovery
+Thiyagarajan Chockalingam1
+R´emi Emonet2
+http://home.heeere.com
+Jean-Marc Odobez2,3
+: Colorado State University – Fort Collins, CO 80523, United States
+: Idiap Research Institute – CH-1920, Martigny, Switzerland
+: ´Ecole Polytechnique F´ed´eral de Lausanne – CH-1015, Lausanne, Switzerland"
+6497eb53fd7d3ff09190566be8099016fb49f801,Biometric Sensor Interoperability: A Case Study in 3D Face Recognition,
+64cac22210861d4e9afb00b781da90cf99f9d19c,Facial Landmark Detection for Manga Images,"Noname manuscript No.
+(will be inserted by the editor)
+Facial Landmark Detection for Manga Images
+Marco Stricker · Olivier Augereau ·
+Koichi Kise · Motoi Iwata
+Received: date / Accepted: date"
+64d1fcc26c2af47c8ed7436fe91546ba5bfc7a1f,Disentangling Multiple Conditional Inputs in GANs,"Disentangling Multiple Conditional Inputs in GANs
+Gökhan Yildirim
+Urs Bergmann
+Zalando Research
+Zalando Research
+Berlin, Germany
+Berlin, Germany
+Calvin Seward∗
+Zalando Research
+Berlin, Germany
+process. Researchers have achieved control of image generation by
+using GANs that are conditioned on a categorical input [12, 13].
+In this paper, we employ conditional GANs to control the visual
+ttributes, such as color, texture, and shape, of a generated apparel.
+One of the main challenges of the conditional image generation
+GANs is to isolate the effects of input attributes on the final image.
+For example, we want the color of an article to stay constant, when
+we tune its texture and/or shape. One possibility would be to employ
+Adversarial Autoencoders [11] or DNA-GAN [17] to disentangle
+the inputs. However, this requires an exhaustive dataset, in other"
+641fd2edcf93fa29181952356e93a83a26012aa2,Following are some examples from CIFAR dataset : Goal : To alter the training criteria to obtain ‘ objectness ’ in the synthesis of images,"Published as a conference paper at ICLR 2017
+IMPROVING GENERATIVE ADVERSARIAL NETWORKS
+WITH DENOISING FEATURE MATCHING
+David Warde-Farley & Yoshua Bengio(cid:63)
+Montreal Institute for Learning Algorithms, (cid:63) CIFAR Senior Fellow
+Universit´e de Montr´eal
+Montreal, Quebec, Canada"
+6472df86bed51909f7b8aa0631f910db5a627c84,Minimax and Adaptive Estimation of Covariance Operator for Random Variables Observed on a Lattice Graph,"Minimax and Adaptive Estimation of Covariance Operator for
+Random Variables Observed on a Lattice Graph
+T. Tony Cai∗ and Ming Yuan†
+University of Pennsylvania and Georgia Institute of Technology
+November 3, 2012"
+6403117f9c005ae81f1e8e6d1302f4a045e3d99d,"A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.","A Systematic Evaluation and Benchmark for
+Person Re-Identification: Features, Metrics, and
+Datasets
+Srikrishna Karanam∗, Student Member, IEEE, Mengran Gou∗, Student Member, IEEE,
+Ziyan Wu, Member, IEEE, Angels Rates-Borras, Octavia Camps, Member, IEEE,
+nd Richard J. Radke, Senior Member, IEEE"
+641f9c87356c0829e690272b010848242058b8bc,Object Co-detection via Efficient Inference in a Fully-Connected CRF,"Object Co-detection via Ef‌f‌icient Inference
+in a Fully-Connected CRF(cid:2)
+Zeeshan Hayder, Mathieu Salzmann, and Xuming He
+Australian National University (ANU)
+NICTA, Canberra, Australia"
+6446089a2a383ad9e4315aea0199084dc61490f9,Computational analysis of human-robot interactions through first-person vision: Personality and interaction experience,"Proceedings of the 24th IEEE International
+Symposium on Robot and Human Interactive Communication
+Kobe, Japan, Aug 31 - Sept 4, 2015
+978-1-4673-6704-2/15/$31.00 ©2015 IEEE"
+645de797f936cb19c1b8dba3b862543645510544,Deep Temporal Linear Encoding Networks,"Deep Temporal Linear Encoding Networks
+Ali Diba1,(cid:63), Vivek Sharma1,(cid:63), and Luc Van Gool1,2
+ESAT-PSI, KU Leuven, 2CVL, ETH Z¨urich"
+64bd5878170bfab423bc3fc38d693202ef4ba6b6,Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision,"Monocular 3D Human Pose Estimation In The Wild
+Using Improved CNN Supervision
+Dushyant Mehta1, Helge Rhodin2, Dan Casas3, Pascal Fua2,
+Oleksandr Sotnychenko1, Weipeng Xu1, and Christian Theobalt1
+MPI for Informatics, Germany
+EPFL, Switzerland
+Universidad Rey Juan Carlos, Spain"
+90d735cffd84e8f2ae4d0c9493590f3a7d99daf1,Recognition of Faces using Efficient Multiscale Local Binary Pattern and Kernel Discriminant Analysis in Varying Environment,"Original Research Paper
+American Journal of Engineering and Applied Sciences
+Recognition of Faces using Efficient Multiscale Local Binary
+Pattern and Kernel Discriminant Analysis in Varying
+Environment
+Sujata G. Bhele and
+V.H. Mankar
+Department of Electronics Engg, Priyadarshini College of Engg, Nagpur, India
+Department of Electronics Engg, Government Polytechnic, Nagpur, India
+Article history
+Received: 20-06-2017
+Revised: 18-07-2017
+Accepted: 21-08-2017
+Corresponding Author:
+Sujata G. Bhele
+Department of Electronics
+Engg, Priyadarshini College of
+Engg, Nagpur, India
+Email:"
+904c53ea063d7d1e13b99d55257801d69d073775,Combined Object Detection and Segmentation,"International Journal of Machine Learning and Computing, Vol. 3, No. 1, February 2013
+Combined Object Detection and Segmentation
+Jarich Vansteenberge, Masayuki Mukunoki, and Michihiko Minoh"
+9070045c1a9564a5f25b42f3facc7edf4c302483,Everybody needs somebody: Modeling social and grouping behavior on a linear programming multiple people tracker,"Everybody needs somebody: Modeling social and grouping behavior on a linear
+programming multiple people tracker
+Laura Leal-Taix´e, Gerard Pons-Moll and Bodo Rosenhahn
+Institute for Information Processing (TNT)
+Leibniz University Hannover, Germany"
+90d8bf2199e7fd972dab3bd3dc6fb67536fa509b,Performance and Energy Modeling of Heterogeneous Many-core Architectures,"PERFORMANCE AND ENERGY MODELING OF HETEROGENEOUS MANY-CORE ARCHITECTURES
+Performance and Energy Modeling of
+Heterogeneous Many-core Architectures
+Rui Pedro Gaspar Pinheiro"
+904a8241ef400bd85b1ad10267a1177bbde1c048,Image-Text Dataset Generation for Image Annotation and Retrieval,"II Congreso Español de Recuperación de la Información
+CERI 2012
+Image-Text Dataset Generation for Image
+Annotation and Retrieval⋆
+Mauricio Villegas and Roberto Paredes
+Institut Tecnol`ogic d’Inform`atica
+Universitat Polit`ecnica de Val`encia
+Cam´ı de Vera s/n, 46022 Val`encia (Spain)"
+902d1b14b076120cb21029b51ed8e63529fe686d,Performance Analysis for Facial Expression Recognition under Salt and Pepper Noise with Median Filter Approach,"PERFORMANCE ANALYSIS FOR FACIAL EXPRESSION
+RECOGNITION UNDER SALT AND PEPPER NOISE WITH
+MEDIAN FILTER APPROACH
+AZRINI BINTI IDRIS
+A project report submitted in partial
+fulfillment of the requirement for the award of the
+Degree of Master of Electrical Engineering
+Facultyof Electrical and Electronic Engineering
+UniversitiTun Hussein Onn Malaysia
+JULY 2013"
+90915cc93248174c4729be65159fb946d2ad5f72,"Relative Dense Tracklets for Human Action Recognition Piotr Bilinski Etienne Corvee Slawomir Bak Francois Bremond INRIA Sophia Antipolis , STARS team 2004 Route des Lucioles , BP 93 , 06902 Sophia Antipolis , France","Relative Dense Tracklets for Human Action Recognition
+Piotr Bilinski
+Etienne Corvee
+Slawomir Bak
+Francois Bremond
+INRIA Sophia Antipolis, STARS team
+004 Route des Lucioles, BP93, 06902 Sophia Antipolis, France"
+907fbe706ec14101978a63c6252e0d75e657e8dd,The Unreasonable Effectiveness of Texture Transfer for Single Image Super-resolution,"The Unreasonable Effectiveness of Texture Transfer
+for Single Image Super-resolution
+Muhammad Waleed Gondal
+Max Planck Institute for Intelligent Systems.
+Bernhard Schölkopf
+Max Planck Institute for Intelligent Systems.
+Michael Hirsch
+Amazon Research."
+9095f633a153c0e3a5503c0373c9c1dfeeefb0cc,Fast 3D face reconstruction based on uncalibrated photometric stereo,"Multimed Tools Appl
+DOI 10.1007/s11042-013-1791-3
+Fast 3D face reconstruction based on uncalibrated
+photometric stereo
+Yujuan Sun & Junyu Dong & Muwei Jian & Lin Qi
+# Springer Science+Business Media New York 2013"
+90eb9f6a1b7e3dae24e438b201e6b1f671a87eb5,Single-Camera Automatic Landmarking for People Recognition with an Ensemble of Regression Trees,"Single-Camera Automatic Landmarking for People Recognition
+with an Ensemble of Regression Trees
+Karla Trejo, Cecilio Angulo
+Universitat Polit`ecnica de Catalunya, Barcelona,
+Spain
+(AAM)
+Active Appearance Model"
+90dd771829094dad1230e32b8bc4385bfe86c4e5,A Comparison of Word Embeddings for the Biomedical Natural Language Processing,[cs.IR] 18 Jul 2018
+90e994a802a0038f24c8e3735d7619ebb40e6e93,Semantic Foggy Scene Understanding with Synthetic Data,"Noname manuscript No.
+(will be inserted by the editor)
+Semantic Foggy Scene Understanding with Synthetic Data
+Christos Sakaridis · Dengxin Dai · Luc Van Gool
+Received: date / Accepted: date"
+90ce227ec08053ea6acf9f9f9f53d8b7169574f2,An Introduction to Evaluating Biometric Systems,"C O V E R F E A T U R E
+An Introduction to
+Evaluating
+Biometric
+Systems
+O n the basis of media hype alone, you might
+onclude that biometric passwords will soon
+replace their alphanumeric counterparts
+with versions that cannot be stolen, forgot-
+ten, lost, or given to another person. But
+what if the performance estimates of these systems are
+far more impressive than their actual performance?
+P. Jonathon
+Phillips
+Alvin Martin
+C.L. Wilson
+Przybocki
+National
+Institute of
+Standards and"
+90e56a8515c8c2ff16f5c79c69811e283be852c7,Boosting face recognition via neural Super-Resolution,"Boosting face recognition via neural Super-Resolution
+Guillaume Berger, Cl´ement Peyrard and Moez Baccouche
+Orange Labs - 4 rue du Clos Courtel, 35510 Cesson-S´evign´e - France"
+90fb58eeb32f15f795030c112f5a9b1655ba3624,Face and Iris Recognition in a Video Sequence Using Dbpnn and Adaptive Hamming Distance,"INTERNATIONAL JOURNAL OF RESEARCH IN COMPUTER APPLICATIONS AND ROBOTICS
+www.ijrcar.com
+Vol.4 Issue 6, Pg.: 12-27
+June 2016
+INTERNATIONAL JOURNAL OF
+RESEARCH IN COMPUTER
+APPLICATIONS AND ROBOTICS
+ISSN 2320-7345
+FACE AND IRIS RECOGNITION IN A
+VIDEO SEQUENCE USING DBPNN AND
+ADAPTIVE HAMMING DISTANCE
+S. Revathy, 2Mr. L. Ramasethu
+PG Scholar, Hindusthan College of Engineering and Technology, Coimbatore, India.
+Assistant Professor, Hindusthan College of Engineering and Technology, Coimbatore, India.
+Email id:"
+9043df1de4f6e181875011c1379d1a7f68a28d6c,People Detection from Overhead Cameras,"People Detection from Overhead
+Cameras
+A study of impact of occlusion on
+performance
+Lu Liu
+in partial fulfillment of the requirements for the degree of
+Master of Science
+t the Delft University of Technology,
+to be defended publicly on Friday August 31, 2018 at 01:00 PM.
+Student number:
+Thesis committee: Dr. Hayley Hung (supervisor)
+621832
+EEMCS
+Laura Cabrera-Quiros (mentor) EEMCS
+EEMCS
+Prof. Marcel Reinders,
+Dr. Julian Kooij,"
+902114feaf33deac209225c210bbdecbd9ef33b1,Side-Information based Linear Discriminant Analysis for Face Recognition,"KAN et al.: SIDE-INFORMATION BASED LDA FOR FACE RECOGNITION
+Side-Information based Linear
+Discriminant Analysis for Face
+Recognition
+Meina Kan1,2,3
+Shiguang Shan1,2
+Dong Xu3
+Xilin Chen1,2
+Digital Media Research Center,
+Institute of Computing
+Technology, CAS, Beijing, China
+Key Laboratory of Intelligent
+Information Processing, Chinese
+Academy of Sciences, Beijing,
+China
+School of Computer Engineering,
+Nanyang Technological
+University, Singapore"
+90a70b38c5a1b40ac16e18628a7772923cdc5cb5,Exact Subspace Segmentation and Outlier Detection by Low-Rank Representation,"Exact Subspace Segmentation and Outlier Detection by
+Low-Rank Representation
+Anonymous Author 1
+Unknown Institution 1
+Anonymous Author 2
+Unknown Institution 2
+Anonymous Author 3
+Unknown Institution 3"
+900175d24928921600d09985211b6b9bfea44ce0,Person re-identification by pose priors,"Person re-identification by pose priors
+Sławomir Bąk
+Filipe Martins
+Francois Brémond
+INRIA Sophia Antipolis, STARS team, 2004, route des Lucioles, BP93
+06902 Sophia Antipolis Cedex - France"
+909f91c1957ce2bf9d76ee2109a865e87bf17057,GMCP-Tracker: Global Multi-object Tracking Using Generalized Minimum Clique Graphs,"GMCP-Tracker: Global Multi-object Tracking
+Using Generalized Minimum Clique Graphs
+Amir Roshan Zamir, Afshin Dehghan, and Mubarak Shah
+UCF Computer Vision Lab, Orlando, FL 32816, USA"
+903210406f14a12b481524d543b14f16114797e2,Pretest of images for the beauty dimension,"Análise Psicológica (2015), 4 (XXXIII): 453-466
+doi: 10.14417/ap.1052
+Pretest of images for the beauty dimension
+Joana Mello* / Filipe Loureiro*
+* ISPA – Instituto Universitário
+In this work, we present norms concerning the perceived association of two sets of image stimuli with
+the concept of “beauty”: 40 objects (Study 1) and 40 photos of human faces (Study 2)1. Participants
+were presented with a set of words associated with the construct of “beauty” and were subsequently
+sked to judge each image on how much they considered them to be related with this construct on a
+7-point scale (1 – Not at all related; 7 – Very related). The interpretation of means’ confidence intervals
+distinguish between 40 images, evaluated as “ugly” – with low scores on the beauty dimension – (20
+objects and 20 faces), and 28 images evaluated as “beautiful” – with high scores on the beauty
+dimension – (12 objects and 16 faces). Results are summarized and photos made available to support
+future research requiring beauty and/or ugly stimulus.
+Key words: Norms, Beauty, Ugly, People, Objects.
+Introduction
+The objective of this work consists on the presentation of beauty norms of a set of images from
+two categories (people and objects) for further use in different contexts and experimental settings.
+Our main purpose was to present norms of a set of updated to present-days photos of faces and
+objects regarding its level of activation of the “beauty” construct, i.e., of the perceived association"
+9015fd773526e21e352037663de3f586ccf4e907,Fused Deep Neural Networks for Efficient Pedestrian Detection,"Fused Deep Neural Networks for Efficient
+Pedestrian Detection
+Xianzhi Du, Mostafa El-Khamy, Vlad I. Morariu, Jungwon Lee, and Larry Davis"
+90f0646c0801f1dad43d2374d1145be8e005bdbf,Raised Middle-Finger: Electrocortical Correlates of Social Conditioning with Nonverbal Affective Gestures,"Raised Middle-Finger: Electrocortical Correlates of Social
+Conditioning with Nonverbal Affective Gestures
+Matthias J. Wieser1*, Tobias Flaisch2, Paul Pauli1
+Department of Psychology, University of Wu¨ rzburg, Wu¨ rzburg, Germany, 2 Department of Psychology, University of Konstanz, Konstanz, Germany"
+90cb074a19c5e7d92a1c0d328a1ade1295f4f311,Fully Automatic Upper Facial Action Recognition,"MIT. Media Laboratory Affective Computing Technical Report #571
+Appears in IEEE International Workshop on Analysis and Modeling of Faces and Gestures , Oct 2003
+Fully Automatic Upper Facial Action Recognition
+Ashish Kapoor Yuan Qi Rosalind W. Picard
+MIT Media Laboratory
+Cambridge, MA 02139"
+907475a4febf3f1d4089a3e775ea018fbec895fe,Statistical modeling for facial expression analysis and synthesis,"STATISTICAL MODELING FOR FACIAL EXPRESSION ANALYSIS AND SYNTHESIS
+Bouchra Abboud, Franck Davoine, Mˆo Dang
+Heudiasyc Laboratory, CNRS, University of Technology of Compi`egne.
+BP 20529, 60205 COMPIEGNE Cedex, FRANCE.
+E-mail:"
+90d8dbaa799430d7384425061317e0fa55bf5cbb,Representation Models and Machine Learning Techniques for Scene Classificatio,"Representation Models and
+Machine Learning Techniques
+for Scene Classificatio
+Giovanni Maria Farinella and Sebastiano Battiato
+Image Processing Lab, Dipartimento di Matematica e Informatica,
+Universit`a degli Studi di Catania, Viale A. Doria 6, 95125 Catania, Italy;
+E-mail: {gfarinella,"
+9028fbbd1727215010a5e09bc5758492211dec19,Solving the Uncalibrated Photometric Stereo Problem Using Total Variation,"Solving the Uncalibrated Photometric Stereo
+Problem using Total Variation
+Yvain Qu´eau1, Fran¸cois Lauze2, and Jean-Denis Durou1
+IRIT, UMR CNRS 5505, Toulouse, France
+Dept. of Computer Science, Univ. of Copenhagen, Denmark"
+bff77a3b80f40cefe79550bf9e220fb82a74c084,Facial Expression Recognition Based on Local Binary Patterns and Local Fisher Discriminant Analysis,"Facial Expression Recognition Based on Local Binary Patterns and
+Local Fisher Discriminant Analysis
+SHIQING ZHANG 1, XIAOMING ZHAO 2, BICHENG LEI 1
+School of Physics and Electronic Engineering
+Taizhou University
+Taizhou 318000
+CHINA
+2Department of Computer Science
+Taizhou University
+Taizhou 318000
+CHINA"
+bf4ec5068e6ff0b008a09f0c94bfaac290ae7d3b,Co-attention CNNs for Unsupervised Object Co-segmentation,Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence (IJCAI-18)
+bf4fcd80083f3145176b64d15bab78456a7e5e43,Title Fast Randomized Algorithms for Convex Optimization and Statistical Estimation Permalink,"Fast Randomized Algorithms for Convex Optimization and
+Statistical Estimation
+Mert Pilanci
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-147
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-147.html
+August 14, 2016"
+bfd8bfce7c998a7bf209b7bf2e6c2e1f03c4334e,Discriminative Face Alignment,"Discriminative Face Alignment
+Xiaoming Liu, Member, IEEE"
+bf4f76c3da8a46783dfd2b72651e2300901ced25,Robust aggregation of GWAP tracks for local image annotation,"Robust aggregation of GWAP tracks
+for local image annotation
+C. Bernaschina, P. Fraternali, L. Galli, D. Martinenghi, M. Tagliasacchi
+Dipartimento di Elettronica, Informazione e Bioingegneria
+Politecnico di Milano, Italy"
+bf1e0279a13903e1d43f8562aaf41444afca4fdc,Different Viewpoints of Recognizing Fleeting Facial Expressions with DWT,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+Volume: 04 Issue: 10 | Oct -2017 www.irjet.net p-ISSN: 2395-0072
+Different Viewpoints of Recognizing Fleeting Facial Expressions with
+VAIBHAV SHUBHAM1, MR. SANJEEV SHRIVASTAVA2, DR. MOHIT GANGWAR3
+information
+to get desired
+information
+Introduction
+---------------------------------------------------------------------***---------------------------------------------------------------------"
+bf96a0f037e7472e4b6cb1dae192a5fedbbbd88a,Visual Listening In: Extracting Brand Image Portrayed on Social Media,"Visual Listening In: Extracting Brand Image
+Portrayed on Social Media
+Liu Liu
+NYU Stern School of Business,
+Daria Dzyabura
+NYU Stern School of Business,
+University of Washington - Foster School of Business,
+Natalie Mizik
+Marketing academics and practitioners recognize the importance of monitoring consumer online conversations
+bout brands. The focus so far has been on user generated content in the form of text. However, images are
+on their way to surpassing text as the medium of choice for social conversations. In these images, consumers
+often tag brands. We propose a “visual listening in” approach to measuring how brands are portrayed on
+social media (Instagram), by mining visual content posted by users. Our approach consists of two stages. We
+first use two supervised machine learning methods, traditional support vector machine classifiers and deep
+onvolutional neural networks, to measure brand attributes (glamorous, rugged, healthy, fun) from images.
+We then apply the classifiers to brand-related images posted on social media to measure what consumers
+re visually communicating about brands. We study 56 brands in the apparel and beverages categories, and
+ompare their portrayal in consumer-created images with images on the firm’s of‌f‌icial Instagram account, as
+well as with consumer brand perceptions measured in a national brand survey. Although the three measures
+exhibit convergent validity, we find key differences between how consumers and firms portray the brands on"
+bfef76d0e287fc6401d69a9f65ff174e4fbf0970,Nonnegative Matrix Factorization with Outliers,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+bfebba8356c5d20dc6a9b2f72ff66adaf63321b7,End-to-end pedestrian collision warning system based on a convolutional neural network with semantic segmentation,"End-to-End Pedestrian Collision Warning System
+ased on a Convolutional Neural Network
+with Semantic Segmentation
+Heechul Jung
+DGIST
+Daegu, Republic of Korea
+Min-Kook Choi
+DGIST
+Daegu, Republic of Korea
+Kwon Soon
+DGIST
+Daegu, Republic of Korea
+Woo Young Jung
+DGIST
+Daegu, Republic of Korea"
+bf05e710dae791f82cc639a09dbe5ec66fed2008,Generating Video Description using Sequence-to-sequence Model with Temporal Attention,"Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers,
+pages 44–52, Osaka, Japan, December 11-17 2016."
+bf4825474673246ae855979034c8ffdb12c80a98,"UNIVERSITY OF CALIFORNIA RIVERSIDE Active Learning in Multi-Camera Networks, With Applications in Person Re-Identification A Dissertation submitted in partial satisfaction of the requirements for the degree of Doctor of Philosophy in Electrical Engineering","UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Active Learning in Multi-Camera Networks, With Applications in Person
+Re-Identification
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Electrical Engineering
+Abir Das
+December 2015
+Dissertation Committee:
+Professor Amit K. Roy-Chowdhury, Chairperson
+Professor Anastasios Mourikis
+Professor Walid Najjar"
+bfdcd4d5cc10c8c64743fc7be7e7ad6709d93b53,Evaluation of PCA and LDA techniques for Face recognition using ORL face database,"Evaluation of PCA and LDA techniques for Face
+recognition using ORL face database
+CSE Dept. Faculty of Engineering, Avinashilingam University, Coimbatore, India
+M.Saraswathi, Dr. S. Sivakumari"
+bf735bb7557e73bc6f68853cba828b55bd163726,Fusion of Zernike Moments and SIFT Features for Improved Face Recognition,"International Conference on Recent Advances and Future Trends in Information Technology (iRAFIT2012)
+Proceedings published in International Journal of Computer Applications® (IJCA)
+Fusion of Zernike Moments and SIFT Features for
+Improved Face Recognition
+Chandan Singh
+Professor
+Department of Computer
+Science, Punjabi University
+Patiala, India
+Ekta Walia
+Asst. Prof., Department of
+Computer Science, South
+Asian University, New Delhi,
+Neerja Mittal
+Asst. Prof., Department of
+CSE&IT, RBIEBT, Kharar,
+Distt. Mohali, India
+India"
+bfffcd2818a1679ac7494af63f864652d87ef8fa,Neural Importance Sampling,"Neural Importance Sampling
+THOMAS MÜLLER, Disney Research & ETH Zürich
+BRIAN MCWILLIAMS, Disney Research
+FABRICE ROUSSELLE, Disney Research
+MARKUS GROSS, Disney Research & ETH Zürich
+JAN NOVÁK, Disney Research
+We propose to use deep neural networks for generating samples in Monte
+Carlo integration. Our work is based on non-linear independent compo-
+nents estimation (NICE), which we extend in numerous ways to improve
+performance and enable its application to integration problems. First, we
+introduce piecewise-polynomial coupling transforms that greatly increase
+the modeling power of individual coupling layers. Second, we propose to
+preprocess the inputs of neural networks using one-blob encoding, which
+stimulates localization of computation and improves inference. Third, we de-
+rive a gradient-descent-based optimization for the KL and the χ 2 divergence
+for the specific application of Monte Carlo integration with unnormalized
+stochastic estimates of the target distribution. Our approach enables fast and
+ccurate inference and efficient sample generation independently of the di-
+mensionality of the integration domain. We show its benefits on generating
+natural images and in two applications to light-transport simulation: first,"
+bf15ba4db09fd805763738ec2cb48c09481785dd,Training Deep Neural Network in Limited Precision,"Training Deep Neural Network in Limited Precision
+Hyunsun Park∗, Jun Haeng Lee∗, Youngmin Oh, Sangwon Ha, Seungwon Lee
+Samsung Advanced Institute of Technology
+Samsung-ro 130, Suwon-si, Republic of Korea
+{h-s.park,"
+bf5940d57f97ed20c50278a81e901ae4656f0f2c,Query-Free Clothing Retrieval via Implicit Relevance Feedback,"Query-free Clothing Retrieval via Implicit
+Relevance Feedback
+Zhuoxiang Chen, Zhe Xu, Ya Zhang, Member, IEEE, and Xiao Gu"
+bff354d05823c83215183c8824faefbc093de011,A new efficient SVM and its application to real-time accurate eye localization,"Proceedings of International Joint Conference on Neural Networks, San Jose, California, USA, July 31 – August 5, 2011
+A New Efficient SVM and Its Application to
+Real-time Accurate Eye Localization
+Shuo Chen and Chengjun Liu"
+bfa763e7cec812f855c712895fa48eae89a34a00,Face Retrieval using Frequency Decoded Local Descriptor,"PREPRINT: ACCEPTED IN MULTIMEDIA TOOLS AND APPLICATIONS, SPRINGER
+Face Retrieval using Frequency Decoded Local
+Descriptor
+Shiv Ram Dubey"
+bfb98423941e51e3cd067cb085ebfa3087f3bfbe,Sparseness helps: Sparsity Augmented Collaborative Representation for Classification,"Sparseness helps: Sparsity Augmented
+Collaborative Representation for Classification
+Naveed Akhtar, Faisal Shafait, and Ajmal Mian"
+bf4e6ec60e5603324f6a40d2a060420322dbdd62,Kinects and Human Kinetics: A New Approach for Studying Crowd Behavior,"Kinects and Human Kinetics: A New Approach for
+Studying Crowd Behavior
+Stefan Seera,b,∗, Norbert Br¨andlea, Carlo Rattib
+Austrian Institute of Technology (AIT), Giefinggasse 2, 1210 Vienna, Austria
+MIT Senseable City Lab, Massachusetts Institute of Technology (MIT), 77
+Massachusetts Avenue, 02139 Cambridge, MA, USA"
+bff9d100e99dd6a99ec26ca867694075b1dcac92,Passive Multimodal 2-D+3-D Face Recognition Using Gabor Features and Landmark Distances,"Passive Multimodal 2-D+3-D Face Recognition
+Using Gabor Features and Landmark Distances
+Sina Jahanbin, Member, IEEE, Hyohoon Choi, Member, IEEE, and Alan C. Bovik, Fellow, IEEE"
+bf8bcda2e4d04b6bd6f5e70622e972baf525a1c7,Three decades of Cognition & Emotion: A brief review of past highlights and future prospects.,"COGNITION AND EMOTION, 2018
+VOL. 32, NO. 1, 1–12
+https://doi.org/10.1080/02699931.2018.1418197
+nd future prospects
+Klaus Rothermunda and Sander L. Kooleb
+Institute of Psychology, Friedrich-Schiller-Universität Jena, Jena, Germany; bDepartment of Psychology, VU Amsterdam,
+Amsterdam, the Netherlands"
+d3e9c5a63215a9c46bc61ec04df5285ac355e42c,Integration of visual and depth information for vehicle detection,pport (cid:13)(cid:13)de recherche(cid:13)ISSN0249-6399ISRNINRIA/RR--7703--FR+ENGRoboticsINSTITUTNATIONALDERECHERCHEENINFORMATIQUEETENAUTOMATIQUEIntegrationofvisualanddepthinformationforvehicledetectionAlexandrosMakris—MathiasPerrollaz—IgorParomtchik—ChristianLaugierN°7703July2011
+d3c1612ae08241dadf6abd650663f4f9351abaf9,Early Start Intention Detection of Cyclists Using Motion History Images and a Deep Residual Network,"Early Start Intention Detection of Cyclists Using Motion History
+Images and a Deep Residual Network
+Stefan Zernetsch, Viktor Kress, Bernhard Sick and Konrad Doll"
+d33c9fe66bad7a90e34e8bc1332b73147a30d202,Trace alignment algorithms for offline workload analysis of heterogeneous architectures,"Trace Alignment Algorithms for Offline Workload Analysis
+of Heterogeneous Architectures
+Muhammet Mustafa Ozdal
+Intel Corporation
+Hillsboro, OR 97124
+Aamer Jaleel
+Intel Corporation
+Hudson, MA
+Paolo Narvaez
+Intel Corporation
+Hudson, MA
+Steven Burns
+Intel Corporation
+Hillsboro, OR
+Ganapati Srinivasa
+Intel Corporation
+Hillsboro, OR"
+d3b73e06d19da6b457924269bb208878160059da,Implementation of an Automated Smart Home Control for Detecting Human Emotions via Facial Detection,"Proceedings of the 5th International Conference on Computing and Informatics, ICOCI 2015
+1-13 August, 2015 Istanbul, Turkey. Universiti Utara Malaysia (http://www.uum.edu.my )
+Paper No.
+IMPLEMENTATION OF AN AUTOMATED SMART HOME
+CONTROL FOR DETECTING HUMAN EMOTIONS VIA FACIAL
+DETECTION
+Lim Teck Boon1, Mohd Heikal Husin2, Zarul Fitri Zaaba3 and Mohd Azam
+Osman4
+Universiti Sains Malaysia, Malaysia,
+Universiti Sains Malaysia, Malaysia,
+Universiti Sains Malaysia, Malaysia,
+Universiti Sains Malaysia, Malaysia,"
+d3612bcc772761b611365fe21c42eafb181338ef,Face and Street Detection with Asymmetric Haar Features,"Face and Street Detection with Asymmetric Haar Features
+Geovany A. Ramirez
+University of Texas at El Paso
+500 W University Ave - El Paso TX 79968
+500 W University Ave - El Paso TX 79968
+Olac Fuentes
+University of Texas at El Paso"
+d3d71a110f26872c69cf25df70043f7615edcf92,Learning Compact Feature Descriptor and Adaptive Matching Framework for Face Recognition,"Learning Compact Feature Descriptor and Adaptive
+Matching Framework for Face Recognition
+Zhifeng Li, Senior Member, IEEE, Dihong Gong, Xuelong Li, Fellow, IEEE, and Dacheng Tao, Fellow, IEEE
+improvements"
+d33beb4f1477374fbcffd8e9df74ca2547eb77ee,Feature Selection for Tracker-Less Human Activity Recognition,"Feature Selection for tracker-less human activity
+recognition(cid:63)
+Plinio Moreno, Pedro Ribeiro, and Jos´e Santos-Victor
+Instituto de Sistemas e Rob´otica & Instituto Superior T´ecnico
+Portugal"
+d3b18ba0d9b247bfa2fb95543d172ef888dfff95,Learning and Using the Arrow of Time,"Learning and Using the Arrow of Time
+Donglai Wei1, Joseph Lim2, Andrew Zisserman3 and William T. Freeman4,5
+Harvard University 2University of Southern California
+University of Oxford 4Massachusetts Institute of Technology 5Google Research
+Figure 1: Seeing these ordered frames from videos, can you tell whether each video is playing forward or backward? (answer
+elow1). Depending on the video, solving the task may require (a) low-level understanding (e.g. physics), (b) high-level
+reasoning (e.g. semantics), or (c) familiarity with very subtle effects or with (d) camera conventions. In this work, we learn
+nd exploit several types of knowledge to predict the arrow of time automatically with neural network models trained on
+large-scale video datasets."
+d3e51c0cfd6ae3d3082c2aa27fa1c73fa9662fdf,Isometry-invariant Surface Matching : Numerical Algorithms and Applications,"ISOMETRY-INVARIANT SURFACE
+MATCHING: NUMERICAL ALGORITHMS
+AND APPLICATIONS
+MICHAEL M. BRONSTEIN
+Technion - Computer Science Department - Ph.D. Thesis PHD-2007-04 - 2007"
+d3761354b7df1228eabf46032fd01a4109229d43,Selection of optimal narrowband multispectral images for face recognition. (Sélection des bandes spectrales optimales pour la reconnaissance des visages),"UNIVERSITY OF BURGUANDY
+SPIM doctoral school
+PhD from the University of Burgundy in
+Computer Science
+Presented by:
+Hamdi Bouchech
+Defense Date: January 26, 2015
+Selection of optimal narrowband multispectral images for face
+recognition
+Thesis supervisor:
+Dr. Sebti Foufou
+Jury:
+Frederic Morain-Nicolier, Professeur a I’IUT de Troyes, Rapporteur.
+Pierre BONTON, Professeur à l’ Université Blaise Pascal, retraité , Rapporteur.
+Saida Bouakaz, Professeur à l’ Université Claude Bernard Lyon 1, Examinatrice.
+Pierre Gouton, Professeur à l’ Université de Bourgogne, Examinateur.
+Yassine Ruichek, Professeur à l’ Université de Technologie de Belfort-Montbéliard,
+Examinateur.
+Sebti Foufou, Professeur à l’ Université de Bourgogne, directeur de thèse."
+d348197e47a8e081bd3f12a22bc52b055ecd8302,Unified Framework for Automated Person Re-identification and Camera Network Topology Inference in Camera Networks,"Unified Framework for Automated Person Re-identification and
+Camera Network Topology Inference in Camera Networks
+Yeong-Jun Cho, Jae-Han Park*, Su-A Kim*, Kyuewang Lee and Kuk-Jin Yoon
+Computer Vision Laboratory, GIST, South Korea
+{yjcho, qkrwogks, suakim, kyuewang,"
+d3797366259182070c598e95cef8fff1ddb21f65,Distance-based Camera Network Topology Inference for Person Re-identification,"Distance-based Camera Network Topology Inference for Person Re-identification
+Yeong-Jun Cho and Kuk-Jin Yoon
+Computer Vision Laboratory, GIST, South Korea
+{yjcho,"
+d309e414f0d6e56e7ba45736d28ee58ae2bad478,Efficient Two-Stream Motion and Appearance 3 D CNNs for Video Classification,"Efficient Two-Stream Motion and Appearance 3D CNNs for
+Video Classification
+Ali Diba
+ESAT-KU Leuven
+Ali Pazandeh
+Sharif UTech
+Luc Van Gool
+ESAT-KU Leuven, ETH Zurich"
+d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9,STAIR Actions: A Video Dataset of Everyday Home Actions,
+d3d887aebeeae44cefd5c2bdbb388d9ce109e335,Image Manipulation with Perceptual Discriminators,"Image Manipulation with
+Perceptual Discriminators
+Diana Sungatullina(cid:63), Egor Zakharov(cid:63), Dmitry Ulyanov, and Victor Lempitsky
+Skolkovo Institute of Science and Technology, Moscow, Russia
+{d.sungatullina, egor.zakharov, dmitry.ulyanov,"
+d3c004125c71942846a9b32ae565c5216c068d1e,Recognizing Age-Separated Face Images: Humans and Machines,"RESEARCH ARTICLE
+Recognizing Age-Separated Face Images:
+Humans and Machines
+Daksha Yadav1, Richa Singh2, Mayank Vatsa2*, Afzel Noore1
+. West Virginia University, Morgantown, West Virginia, United States of America, 2. IIIT Delhi, New Delhi,
+Delhi, India"
+d350a9390f0818703f886138da27bf8967fe8f51,Lighting design for portraits with a virtual light stage,"LIGHTING DESIGN FOR PORTRAITS WITH A VIRTUAL LIGHT STAGE
+Davoud Shahlaei, Marcel Piotraschke, Volker Blanz
+Institute for Vision and Graphics, University of Siegen, Germany"
+d33fcdaf2c0bd0100ec94b2c437dccdacec66476,Neurons With Paraboloid Decision Boundaries for Improved Neural Network Classification Performance.,"Neurons with Paraboloid Decision Boundaries for
+Improved Neural Network Classification
+Performance
+Nikolaos Tsapanos, Anastasios Tefas, Member, IEEE, Nikolaos Nikolaidis, Member, IEEE, and
+Ioannis Pitas, Fellow, IEEE"
+d46b790d22cb59df87f9486da28386b0f99339d3,Learning Face Deblurring Fast and Wide,"Learning Face Deblurring Fast and Wide
+Meiguang Jin
+University of Bern
+Switzerland
+Michael Hirsch†
+Amazon Research
+Germany
+Paolo Favaro
+University of Bern
+Switzerland"
+d40bd8d44fe78952769a9bb04fe74ce38ef07534,Locally Adaptive Learning Loss for Semantic Image Segmentation,"Locally Adaptive Learning Loss for Semantic Image Segmentation
+Jinjiang Guo1,2, Pengyuan Ren1, Aiguo Gu1, Jian Xu1, Weixin Wu1
+Beijing NetPosa Technologies Co., Ltd. Beijing, China
+Institut National des Sciences Appliqu´ees de Lyon, Lyon, France
+{renpengyuan, guaiguo, xujian,"
+d41c11ebcb06c82b7055e2964914b9af417abfb2,CDI-Type I: Unsupervised and Weakly-Supervised Discovery of Facial Events,"CDI-Type I: Unsupervised and Weakly-Supervised
+Introduction
+Discovery of Facial Events
+The face is one of the most powerful channels of nonverbal communication. Facial expression has been a
+focus of emotion research for over a hundred years [12]. It is central to several leading theories of emotion
+[18, 31, 54] and has been the focus of at times heated debate about issues in emotion science [19, 24, 50].
+Facial expression figures prominently in research on almost every aspect of emotion, including psychophys-
+iology [40], neural correlates [20], development [11], perception [4], addiction [26], social processes [30],
+depression [49] and other emotion disorders [55], to name a few. In general, facial expression provides cues
+bout emotional response, regulates interpersonal behavior, and communicates aspects of psychopathology.
+Because of its importance to behavioral science and the emerging fields of computational behavior
+science, perceptual computing, and human-robot interaction, significant efforts have been applied toward
+developing algorithms that automatically detect facial expression. With few exceptions, previous work on
+facial expression relies on supervised approaches to learning (i.e. event categories are defined in advance
+in labeled training data). While supervised learning has important advantages, two critical limitations may
+e noted. One, because labeling facial expression is highly labor intensive, progress in automated facial
+expression recognition and analysis is slowed. For the most detailed and comprehensive labeling or coding
+systems, such as Facial Action Coding System (FACS), three to four months is typically required to train
+coder (’coding’ refers to the labeling of video using behavioral descriptors). Once trained, each minute
+of video may require 1 hour or more to code [9]. No wonder relatively few databases are yet available,"
+d497b9e50dc2aacfb1693ca4de6ebf904404d98d,Patch Based Approaches for Visual Object Class Recognition - a Survey,"ALBERT-LUDWIGS-UNIVERSIT ¨AT FREIBURG
+INSTITUT F ¨UR INFORMATIK
+Lehrstuhl f¨ur Mustererkennung und Bildverarbeitung
+Patch Based Approaches for the Recognition of Visual Object
+Classes - A Survey
+Internal Report 2/06
+Alexandra Teynor
+November, 2006"
+d488dad9fa81817c85a284b09ebf198bf6b640f9,FCHD: A fast and accurate head detector,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+FCHD: A fast and accurate head detector
+Aditya Vora, Johnson Controls Inc."
+d444368421f456baf8c3cb089244e017f8d32c41,CNN for IMU assisted odometry estimation using velodyne LiDAR,"CNN for IMU Assisted Odometry Estimation using Velodyne LiDAR
+Martin Velas, Michal Spanel, Michal Hradis, and Adam Herout"
+d48bd355d091e7ae75ade4e878fe346741e7da1a,Can You Spot the Semantic Predicate in this Video ?,"Can You Spot the Semantic Predicate in this Video?
+Christopher Reale, Claire Bonial, Heesung Kwon and Clare R. Voss
+U.S. Army Research Lab, Adelphi, Maryland 20783
+{claire.n.bonial.civ, heesung.kwon.civ,"
+d4ced2086ccd9259ade8fabdba14e0e4d9fc0c40,A Mobile Imaging System for Medical Diagnostics,"A Mobile Imaging System for Medical
+Diagnostics
+Sami Varjo and Jari Hannuksela
+The Center for Machine Vision Research
+Department of Computer Science and Engineering
+P.O. Box 4500, FI-90014 University of Oulu"
+d40c4e370d35264e324e4e3d5df59e51518c9979,A Transfer Learning based Feature-Weak-Relevant Method for Image Clustering,"A Transfer Learning based Feature-Weak-Relevant Method for
+Image Clustering
+Bo Dong, Xinnian Wang
+Dalian Maritime University
+Dalian, China"
+d4885ca24189b4414031ca048a8b7eb2c9ac646c,"Efficient Facial Representations for Age, Gender and Identity Recognition in Organizing Photo Albums using Multi-output CNN","Ef‌f‌icient Facial Representations for Age, Gender
+nd Identity Recognition in Organizing Photo
+Albums using Multi-output CNN
+Andrey V. Savchenko
+Samsung-PDMI Joint AI Center, St. Petersburg Department of Steklov Institute of
+Mathematics
+National Research University Higher School of Economics
+Nizhny Novgorod, Russia"
+d45dc3546702db7fcef8d4863db319ca84cc8d3d,How emotional are you? Neural Architectures for Emotion Intensity Prediction in Microblogs,"How emotional are you? Neural Architectures for Emotion Intensity
+Prediction in Microblogs
+Devang Kulshreshtha∗, Pranav Goel∗, and Anil Kumar Singh
+Indian Institute of Technology (Banaras Hindu University) Varanasi
+{devang.kulshreshtha.cse14, pranav.goel.cse14,
+Varanasi, Uttar Pradesh, India"
+d4001826cc6171c821281e2771af3a36dd01ffc0,Modélisation de contextes pour l'annotation sémantique de vidéos. (Context based modeling for video semantic annotation),"Modélisation de contextes pour l’annotation sémantique
+de vidéos
+Nicolas Ballas
+To cite this version:
+Nicolas Ballas. Modélisation de contextes pour l’annotation sémantique de vidéos. Autre [cs.OH].
+Ecole Nationale Supérieure des Mines de Paris, 2013. Français. <NNT : 2013ENMP0051>. <pastel-
+00958135>
+HAL Id: pastel-00958135
+https://pastel.archives-ouvertes.fr/pastel-00958135
+Submitted on 11 Mar 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+d458c49a5e34263c95b3393386b5d76ba770e497,A Comparative Analysis of Gender Classification Techniques,"Middle-East Journal of Scientific Research 20 (1): 01-13, 2014
+ISSN 1990-9233
+© IDOSI Publications, 2014
+DOI: 10.5829/idosi.mejsr.2014.20.01.11434
+A Comparative Analysis of Gender Classification Techniques
+Sajid Ali Khan, Maqsood Ahmad, Muhammad Nazir and Naveed Riaz
+Shaheed Zulfikar Ali Bhutto Institute of Science and Technology, Islamabad, Pakistan"
+d4e4369babdba158bfdce1b605f92d6b1b665be4,The amygdala and the relevance detection theory of autism: an evolutionary perspective,"REVIEW ARTICLE
+published: 30 December 2013
+doi: 10.3389/fnhum.2013.00894
+The amygdala and the relevance detection theory of autism:
+n evolutionary perspective
+Tiziana Zalla1* and Marco Sperduti 2,3
+Institut Jean Nicod, Centre National de la Recherche Scientifique, Ecole Normale Supérieure, Paris, France
+Laboratoire Mémoire et Cognition, Institut de Psychologie, Université Paris Descartes, Boulogne-Billancourt, France
+Inserm U894, Centre de Psychiatrie et Neurosciences, Université Paris Descartes, Paris, France
+Edited by:
+Corrado Corradi-Dell’Acqua, University
+of Geneva, Switzerland
+Reviewed by:
+Sebastian B. Gaigg, City University
+London, UK
+Bhismadev Chakrabarti, University of
+Reading, UK
+Danilo Bzdok, Research Center Jülich,
+Germany
+*Correspondence:"
+d4f8168242f688af29bcbbe1cc5aec7cd12a601c,Edinburgh Research Explorer Visually Grounded Meaning Representations,"Visually Grounded Meaning Representations
+Citation for published version:
+Silberer, C, Ferrari, V & Lapata, M 2016, 'Visually Grounded Meaning Representations' IEEE Transactions
+on Pattern Analysis and Machine Intelligence. DOI: 10.1109/TPAMI.2016.2635138
+Digital Object Identifier (DOI):
+0.1109/TPAMI.2016.2635138
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+IEEE Transactions on Pattern Analysis and Machine Intelligence
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please
+ontact providing details, and we will remove access to the work immediately and"
+d4e669d5d35fa0ca9f8d9a193c82d4153f5ffc4e,A Lightened CNN for Deep Face Representation,"A Lightened CNN for Deep Face Representation
+Xiang Wu
+School of Computer and Communication Engineering
+University of Science and Technology Beijing, Beijing, China
+Ran He, Zhenan Sun
+National Laboratory of Pattern Recognition
+Institute of Automation Chinese Academy of Sciences, Beijing, China
+{rhe,"
+d409d8978034de5e5e8f9ee341d4a00441e3d05f,Annual research review: re-thinking the classification of autism spectrum disorders.,"Journal of Child Psychology and Psychiatry 53:5 (2012), pp 490–509
+doi:10.1111/j.1469-7610.2012.02547.x
+Annual Research Review: Re-thinking the
+lassification of autism spectrum disorders
+Center for Autism and the Developing Brain, Weill-Cornell Medical College and New York Presbyterian Hospital/
+Westchester Division, White Plains, NY, USA
+Catherine Lord and Rebecca M. Jones
+Background: The nosology of autism spectrum disorders (ASD) is at a critical point in history as the
+field seeks to better define dimensions of social-communication deficits and restricted/repetitive
+ehaviors on an individual
+level for both clinical and neurobiological purposes. These different
+dimensions also suggest an increasing need for quantitative measures that accurately map their dif-
+ferences, independent of developmental factors such as age, language level and IQ. Method: Psycho-
+metric measures, clinical observation as well as genetic, neurobiological and physiological research
+from toddlers, children and adults with ASD are reviewed. Results: The question of how to conceptu-
+lize ASDs along dimensions versus categories is discussed within the nosology of autism and the
+proposed changes to the DSM-5 and ICD-11. Differences across development are incorporated into the
+new classification frameworks. Conclusions: It is crucial to balance the needs of clinical practice in
+ASD diagnostic systems, with neurobiologically based theories that address the associations between
+social-communication and restricted/repetitive dimensions in individuals. Clarifying terminology,"
+d45fbd818f032566e9e8f8bdc0f658cdd6873e8f,Full-body High-resolution Anime Generation with Progressive Structure-conditional Generative Adversarial Networks,"Full-body High-resolution Anime Generation
+with Progressive Structure-conditional
+Generative Adversarial Networks
+Koichi Hamada, Kentaro Tachibana, Tianqi Li,
+Hiroto Honda, and Yusuke Uchida
+DeNA Co., Ltd., Tokyo, Japan"
+d4b88be6ce77164f5eea1ed2b16b985c0670463a,A Survey of Different 3D Face Reconstruction Methods,"TECHNICAL REPORT JAN.15.2016
+A Survey of Different 3D Face Reconstruction
+Methods
+Amin Jourabloo
+Department of Computer Science and Engineering"
+d42142285c46207a16bd4294e437d504e419a9b7,Varying image description tasks : spoken versus written descriptions,"Varying image description tasks: spoken versus written descriptions
+Emiel van Miltenburg
+Vrije Universiteit Amsterdam
+Ruud Koolen
+Tilburg University
+Emiel Krahmer
+Tilburg University"
+d4dd4600e8f4ecfd11fa4a4a702b1f08bc9ec6f7,Combining intention and emotional state inference in a dynamic neural field architecture for human-robot joint action,"Special issue on Grounding Emotions in Robots
+Combining intention and emotional
+state inference in a dynamic neural
+field architecture for human-robot
+joint action
+Adaptive Behavior
+016, Vol. 24(5) 350–372
+Ó The Author(s) 2016
+Reprints and permissions:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/1059712316665451
+db.sagepub.com
+Rui Silva1, Luı´s Louro1, Tiago Malheiro1, Wolfram Erlhagen2 and
+Estela Bicho1"
+d4712c75a1a51ecbc74e362747926a16a2cd36ed,Automated Human Recognition by Gait using Neural Network,"Image Processing Theory, Tools & Applications
+Automated Human Recognition by Gait using Neural Network
+Jang-Hee Yoo
+Information Security
+Research Division, ETRI
+S. Korea
+Ki-Young Moon
+Information Security
+Research Division, ETRI
+S. Korea"
+d4c657ce3b7e47237201393aa6bba0e19442bfd2,Interpolation Based Tracking for Fast Object Detection in Videos,"Interpolation Based Tracking for Fast Object
+Detection In Videos
+Rahul Jain, Pramod Sankar K.*, C. V. Jawahar
+Center for Visual Information Technology
+pramod
+IIIT-Hyderabad, INDIA"
+d44ca9e7690b88e813021e67b855d871cdb5022f,"Selecting, Optimizing and Fusing 'Salient' Gabor Features for Facial Expression Recognition","QUT Digital Repository:
+http://eprints.qut.edu.au/
+Zhang, Ligang and Tjondronegoro, Dian W. (2009) Selecting, optimizing and
+fusing ‘salient’ Gabor features for facial expression recognition. In: Neural
+Information Processing (Lecture Notes in Computer Science), 1-5 December
+009, Hotel Windsor Suites Bangkok, Bangkok.
+© Copyright 2009 Springer-Verlag GmbH Berlin Heidelberg"
+d4901683e2c2552fc2d62d4eb3b1f5d5fa60a5ff,ScaleNet: Scale Invariant Network for Semantic Segmentation in Urban Driving Scenes,
+ba0d84d97eeec7774534b91da78b10c5d924fdc8,Classification with Repulsion Tensors: A Case Study on Face Recognition,"Classification with Repulsion Tensors: A Case Study on Face
+Recognition
+Hawren Fang∗
+March 16, 2016"
+bad7254ae08f8bf1305e70c7de28374f67f151fd,Ré-identification de personnes à partir des séquences vidéo. (Person re-identification from video sequence),"Ré-identification de personnes à partir des séquences
+vidéo
+Mohamed Ibn Khedher
+To cite this version:
+Mohamed Ibn Khedher. Ré-identification de personnes à partir des séquences vidéo. Réseaux et
+télécommunications [cs.NI]. Institut National des Télécommunications, 2014. Français. <NNT :
+014TELE0018>. <tel-01149691>
+HAL Id: tel-01149691
+https://tel.archives-ouvertes.fr/tel-01149691
+Submitted on 7 May 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+bafb8812817db7445fe0e1362410a372578ec1fc,Image-Quality-Based Adaptive Face Recognition,"Image-Quality-Based Adaptive Face Recognition
+Harin Sellahewa and Sabah A. Jassim"
+bac5906adc227e390f2f70705e990a3e1ec369df,Active Control of Camera Parameters for Object Detection Algorithms,"Active Control of Camera Parameters for Object
+Detection Algorithms
+Yulong Wu, John Tsotsos
+Department of Electrical Engineering and Computer Science
+York Univeristy
+Toronto, ON M3J 1P3
+Email: {yulong,"
+ba8e0bda11af08b6037666b67cf54ae1f780822d,Spatial Pyramid Matching,"Author manuscript, published in ""Object Categorization: Computer and Human Vision Perspectives Cambridge University Press (Ed.)
+(2009) 401--415"""
+ba99c37a9220e08e1186f21cab11956d3f4fccc2,A Fast Factorization-Based Approach to Robust PCA,"A Fast Factorization-based Approach to Robust PCA
+Department of Computer Science, Southern Illinois University,Carbondale, IL 62901 USA
+Chong Peng, Zhao Kang, and Qiang Cheng
+Email:"
+ba816806adad2030e1939450226c8647105e101c,MindLAB at the THUMOS Challenge,"MindLAB at the THUMOS Challenge
+Fabi´an P´aez
+Jorge A. Vanegas
+Fabio A. Gonz´alez
+MindLAB Research Group
+MindLAB Research Group
+MindLAB Research Group
+Bogot´a, Colombia
+Bogot´a, Colombia
+Bogot´a, Colombia"
+ba051292ca6e8c689542831479e436be7035c147,Superpixel Sampling Networks,"Superpixel Sampling Networks
+Varun Jampani1, Deqing Sun1, Ming-Yu Liu1,
+Ming-Hsuan Yang1,2, Jan Kautz1
+NVIDIA
+UC Merced"
+baf0af0ac2f2fbbf0c04141e12886ff850d77413,Feature-based 3d Slam,"KERNEL{BASED CLASSIFIERS WITH
+APPLICATIONS TO FACE DETECTION
+TH(cid:18)ESE No 3141 (2004)
+PR(cid:19)ESENT(cid:19)EE (cid:18)A LA FACULT(cid:19)E SCIENCES ET TECHNIQUES DE L’ING(cid:19)ENIEUR
+INSTITUT DE TRAITEMENT DES SIGNAUX
+SECTION DE G(cid:19)ENIE (cid:19)ELECTRIQUE ET (cid:19)ELECTRONIQUE
+(cid:19)ECOLE POLYTECHNIQUE F(cid:19)ED(cid:19)ERALE DE LAUSANNE
+POUR L’OBTENTION DU GRADE DE DOCTEUR (cid:18)ES SCIENCES
+Vlad POPOVICI
+DEA de sciences des syst(cid:18)emes et des calculateurs, Universit(cid:19)e Technique de Cluj-Napoca, Roumanie
+et de nationalit(cid:19)e roumaine
+ccept(cid:19)ee sur proposition du jury:
+Prof. J.-P. Thiran, directeur de th(cid:18)ese
+Dr. S. Bengio, rapporteur
+Prof. J. Kittler, rapporteur
+Prof. M. Kunt, rapporteur
+Lausanne, EPFL
+D(cid:19)ecembre 2004"
+badcd992266c6813063c153c41b87babc0ba36a3,Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks,"Recent Advances in Object Detection in the Age
+of Deep Convolutional Neural Networks
+Shivang Agarwal(∗
+,1), Jean Ogier du Terrail(∗
+,1,2), Fr´ed´eric Jurie(1)
+(∗) equal contribution
+(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+(2)Safran Electronics and Defense
+September 11, 2018"
+ba8a99d35aee2c4e5e8a40abfdd37813bfdd0906,Uporaba emotivno pogojenega računalništva v priporočilnih sistemih,"ELEKTROTEHNI ˇSKI VESTNIK 78(1-2): 12–17, 2011
+EXISTING SEPARATE ENGLISH EDITION
+Uporaba emotivno pogojenega raˇcunalniˇstva v
+priporoˇcilnih sistemih
+Marko Tkalˇciˇc, Andrej Koˇsir, Jurij Tasiˇc
+Univerza v Ljubljani, Fakulteta za elektrotehniko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+Univerza v Ljubljani, Fakulteta za raˇcunalniˇstvo in informatiko, Trˇzaˇska 25, 1000 Ljubljana, Slovenija
+E-poˇsta:
+Povzetek. V ˇclanku predstavljamo rezultate treh raziskav, vezanih na izboljˇsanje delovanja multimedijskih
+priporoˇcilnih sistemov s pomoˇcjo metod emotivno pogojenega raˇcunalniˇstva (ang. affective computing).
+Vsebinski priporoˇcilni sistem smo izboljˇsali s pomoˇcjo metapodatkov, ki opisujejo emotivne odzive uporabnikov.
+Pri skupinskem priporoˇcilnem sistemu smo dosegli znaˇcilno izboljˇsanje v obmoˇcju hladnega zagona z uvedbo
+nove mere podobnosti, ki temelji na osebnostnem modelu velikih pet (ang. five factor model). Razvili smo tudi
+sistem za neinvazivno oznaˇcevanje vsebin z emotivnimi parametri, ki pa ˇse ni zrel za uporabo v priporoˇcilnih
+sistemih.
+Kljuˇcne besede: priporoˇcilni sistemi, emotivno pogojeno raˇcunalniˇstvo, strojno uˇcenje, uporabniˇski profil,
+emocije
+Uporaba emotivnega raˇcunalniˇstva v priporoˇcilnih
+sistemih
+In this paper we present the results of three investigations of"
+baeb207ea6f4b52eea129b9d8597d4b7a0891ad6,"Sparse , Smart Contours to Represent and Edit Images","Sparse, Smart Contours to Represent and Edit Images
+Tali Dekel 1
+Chuang Gan 2
+Dilip Krishnan 1
+Ce Liu 1 William T. Freeman 1,3
+Google Research 2 MIT-Watson AI Lab 3 MIT-CSAIL
+Reconstruction from Sparse Contour Represenation
+Editing in the Contour Domain
+.4% px
+.5% px
+(a) Source
+(b) Contours
+(c) Source Reconstuction
+(d) Edited/blended Contours
+(e) Recon. from Edit
+Reference
+Figure 1. Our method produces high quality reconstructions of images from information along a small number of contours: a source
+(512×512) image in (a) is reconstructed in (c) from gradient information stored at the set of colored contours in (b)2, which are less than
+5% of the pixels. The model synthesizes hair texture, facial lines and shading even in regions where no input information is provided.
+Our model allows for semantically intuitive editing in the contour domain. Top-right: a caricature-like result (e) is created by moving and"
+ba1cf2d0493f25da61bd816f92712291999c0ef6,Simple online and realtime tracking with a deep association metric,"SIMPLE ONLINE AND REALTIME TRACKING WITH A DEEP ASSOCIATION METRIC
+Nicolai Wojke†, Alex Bewley(cid:5), Dietrich Paulus†
+University of Koblenz-Landau†, Queensland University of Technology(cid:5)"
+bade9b38c45afd4f988e246974427685f3ff599f,Pairwise Rotation Hashing for High-dimensional Features,"Pairwise Rotation Hashing for High-dimensional
+Features
+Kohta Ishikawa, Ikuro Sato, and Mitsuru Ambai
+Denso IT Laboratory, Inc."
+badd371a49d2c4126df95120902a34f4bee01b00,Parallel Separable 3D Convolution for Video and Volumetric Data Understanding,"GONDA, WEI, PARAG, PFISTER: PARALLEL SEPARABLE 3D CONVOLUTION
+Parallel Separable 3D Convolution for Video
+nd Volumetric Data Understanding
+Harvard John A. Paulson School of
+Engineering and Applied Sciences
+Camabridge MA, USA
+Felix Gonda
+Donglai Wei
+Toufiq Parag
+Hanspeter Pfister"
+ba87bcf4bf799001641b7afd7d1025600f57c4a1,A Hybrid Architecture for Tracking People in Real-time Using a Video Surveillance Camera: Application for Behavioural Marketing,"Signal & Image Processing : An International Journal (SIPIJ) Vol.6, No.6, December 2015
+A HYBRID ARCHITECTURE FOR TRACKING
+PEOPLE IN REAL-TIME USING A VIDEO
+SURVEILLANCE CAMERA: APPLICATION FOR
+BEHAVIOURAL MARKETING
+Kheireddine AZIZ1, Djamal MERAD2, Jean-Luc DAMOISEAUX3 and
+Pierre DRAP2
+SeaTech Toulon, Toulon University, La Gardes, France
+LSIS Lab, Aix-Marseille University, Marseille, France
+IUT R&T, Aix-Marseille University, Marseille, France"
+bab47c7bf80c9310f947cbdaf71b3c983c497b68,Systematic Parameter Optimization and Application of Automated Tracking in Pedestrian Dominant Situations Date of submission : 2014-0801,"Systematic Parameter Optimization and Application of Automated
+Tracking in Pedestrian Dominant Situations
+Date of submission: 2014-08-01
+Dariush Ettehadieh*
+M.Sc. Student,
+Polytechnique Montréal,
+500, Chemin de Polytechnique, Montreal
+phone : 1-514-266-5544
+Bilal Farooq
+Assistant Professor,
+Polytechnique Montréal
+500, Chemin de Polytechnique, Montreal
+phone : 1-514-340-4711 ext. 4802
+Nicolas Saunier
+Associate Professor,
+Polytechnique Montréal
+500, Chemin de Polytechnique, Montreal
+phone : 1-514-340-4711 ext. 4962
+5029 Words + 4 Figures + 3 Tables = 6779
+Submitted for presentation to the 94th Annual Meeting of the Transportation Research Board and publication in"
+ba7c01e1432bffc2fcde824d0b0ebd25ad7238c3,Face Recognition Techniques : A Review,"International Journal of Engineering Research and Development
+e-ISSN: 2278-067X, p-ISSN: 2278-800X, www.ijerd.com
+Volume 4, Issue 7 (November 2012), PP. 70-78
+Face Recognition Techniques: A Review
+Rajeshwar Dass, 2Ritu Rani, 3Dharmender Kumar
+,2,3 Deen Bandhu Chotu Ram University of Science & Technology Murthal, Haryana, India"
+a079309d28b6f8753ca26a789bd0bc43de9bd9f8,Interpretable Counting for Visual Question Answering,"Published as a conference paper at ICLR 2018
+INTERPRETABLE COUNTING FOR VISUAL QUESTION
+ANSWERING
+Alexander Trott, Caiming Xiong∗, & Richard Socher
+Salesforce Research
+Palo Alto, CA"
+a0f94e9400938cbd05c4b60b06d9ed58c3458303,Value-Directed Human Behavior Analysis from Video Using Partially Observable Markov Decision Processes,"Value-Directed Human Behavior Analysis
+from Video Using Partially Observable
+Markov Decision Processes
+Jesse Hoey and James J. Little, Member, IEEE"
+a022eff5470c3446aca683eae9c18319fd2406d5,Deep learning for semantic description of visual human traits. (Apprentissage profond pour la description sémantique des traits visuels humains),"017-ENST-0071
+EDITE - ED 130
+Doctorat ParisTech
+T H È S E
+pour obtenir le grade de docteur délivré par
+TÉLÉCOM ParisTech
+Spécialité « SIGNAL et IMAGES »
+présentée et soutenue publiquement par
+Grigory ANTIPOV
+le 15 décembre 2017
+Apprentissage Profond pour la Description Sémantique des Traits
+Visuels Humains
+Directeur de thèse : Jean-Luc DUGELAY
+Co-encadrement de la thèse : Moez BACCOUCHE
+Mme Bernadette DORIZZI, PRU, Télécom SudParis
+Mme Jenny BENOIS-PINEAU, PRU, Université de Bordeaux
+M. Christian WOLF, MC/HDR, INSA de Lyon
+M. Patrick PEREZ, Chercheur/HDR, Technicolor Rennes
+M. Moez BACCOUCHE, Chercheur/Docteur, Orange Labs Rennes
+M. Jean-Luc DUGELAY, PRU, Eurecom Sophia Antipolis"
+a06ef8ef4838c048b814563f7cca479c7d4513f2,Multi-module Singular Value Decomposition for Face Recognition,"ORIENTAL JOURNAL OF
+COMPUTER SCIENCE & TECHNOLOGY
+An International Open Free Access, Peer Reviewed Research Journal
+Published By: Oriental Scientific Publishing Co., India.
+www.computerscijournal.org
+ISSN: 0974-6471
+April 2014,
+Vol. 7, No. (1):
+Pgs. 09-14
+Multi-module Singular Value Decomposition
+for Face Recognition
+A. NAMACHIVAYAM and KALIYAPERUMAL KARTHIKEYAN
+Eritrea Institute of Technology, Asmara, Eritrea, North East Africa.
+(Received: March 20, 2014; Accepted: March 30, 2014)"
+a0c37f07710184597befaa7e6cf2f0893ff440e9,Fast Retinomorphic Event Stream for Video Recognition and Reinforcement Learning,
+a010835842ac0e49eade395f056e1e33d45b6ea5,Four Way Local Binary Pattern for Gender Classification Using Periocular Images,"Four Way Local Binary Pattern for
+Gender Classification Using Periocular
+Images
+Md. Siyam Sajeeb Khan
+(2014-1-60-024)
+Rifat Mehreen Amin
+(2014-1-60-003)
+Department of Computer Science and Engineering
+East West University
+Aftabnagar, Dhaka-1212, Bangladesh
+August, 2017"
+a0a950f513b4fd58cee54bccc49b852943ffd02c,Image Inpainting using Block-wise Procedural Training with Annealed Adversarial Counterpart,"Image Inpainting using Block-wise Procedural Training with Annealed
+Adversarial Counterpart
+Chao Yang1, Yuhang Song1, Xiaofeng Liu2, Qingming Tang3, and C.-C. Jay Kuo1
+USC, 2Carnegie Mellon University, 3Toyota Technological Institute at Chicago,"
+a012b41fc54060e11744db20ef6d191b290f1879,Unconstrained Face Recognition From Blurred and Illumination with Pose Variant Face Image Using SVM,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer and Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol.2, Special Issue 1, March 2014
+Proceedings of International Conference On Global Innovations In Computing Technology (ICGICT’14)
+Department of CSE, JayShriram Group of Institutions, Tirupur, Tamilnadu, India on 6th & 7th March 2014
+Organized by
+Unconstrained Face Recognition From Blurred and
+Illumination with Pose Variant Face Image Using
+Dept. of CSE, PG Student (SE), Sri Krishna College of Engineering and Technology, Coimbatore, Tamilnadu, India1
+C.Indhumathi1"
+a0e3775fd5d5df951ac7f65d3a9165bf4b96fbd8,Towards Automatic Image Editing: Learning to See another You,"Towards Automatic Image Editing: Learning to See another You
+Amir Ghodrati1∗, Xu Jia1∗, Marco Pedersoli2†, Tinne Tuytelaars1
+KU Leuven, ESAT-PSI, iMinds
+INRIA"
+a0b2df8f72ff672cb0760c5221657a5f48f0ec5d,Searching Image Databases Using Appearance Models,"Searching Image Databases
+Using Appearance Models
+A thesis submitted to the University of Manchester for the degree of
+Doctor of Philosophy in the Faculty of Medicine, Dentistry, Nursing
+nd Pharmacy
+Ian M. Scott
+Division of Imaging Science and Biomedical Engineering"
+a01ba008252d2ce32f326f50c208c9ad9d5c78a6,Detecting Sudden Pedestrian Crossings and Avoiding Accidents Using Arm 11,"K. Sri Krishna Aditya et al Int. Journal of Engineering Research and Applications www.ijera.com
+ISSN : 2248-9622, Vol. 3, Issue 5, Sep-Oct 2013, pp.1213-1216
+RESEARCH ARTICLE OPEN ACCESS
+Detecting Sudden Pedestrian Crossings and Avoiding Accidents
+Using Arm 11
+K. Sri Krishna Aditya1, T. Surya Kavita2, U. Yedukondalu3
+Assistant Professor, 2Associate Professor, 3Head of the Department E.C.E.
+Aditya Engineering College, 2Aditya Engineering College, 3Aditya Engineering College"
+a0fd85b3400c7b3e11122f44dc5870ae2de9009a,Learning Deep Representation for Face Alignment with Auxiliary Attributes,"Learning Deep Representation for Face
+Alignment with Auxiliary Attributes
+Zhanpeng Zhang, Ping Luo, Chen Change Loy, Member, IEEE and Xiaoou Tang, Fellow, IEEE"
+a0dfb8aae58bd757b801e2dcb717a094013bc178,Reconocimiento de expresiones faciales con base en la dinámica de puntos de referencia faciales,"Reconocimiento de expresiones faciales con base
+en la din´amica de puntos de referencia faciales
+E. Morales-Vargas, C.A. Reyes-Garcia, Hayde Peregrina-Barreto
+Instituto Nacional de Astrof´ısica ´Optica y Electr´onica,
+Divisi´on de Ciencias Computacionales, Tonantzintla, Puebla,
+M´exico
+Resumen. Las expresiones faciales permiten a las personas comunicar
+emociones, y es pr´acticamente lo primero que observamos al interactuar
+on alguien. En el ´area de computaci´on, el reconocimiento de expresiones
+faciales es importante debido a que su an´alisis tiene aplicaci´on directa en
+´areas como psicolog´ıa, medicina, educaci´on, entre otras. En este articulo
+se presenta el proceso de dise˜no de un sistema para el reconocimiento de
+expresiones faciales utilizando la din´amica de puntos de referencia ubi-
+ados en el rostro, su implementaci´on, experimentos realizados y algunos
+de los resultados obtenidos hasta el momento.
+Palabras clave: Expresiones faciales, clasificaci´on, m´aquinas de soporte
+vectorial,modelos activos de apariencia.
+Facial Expressions Recognition Based on Facial
+Landmarks Dynamics"
+a03cfd5c0059825c87d51f5dbf12f8a76fe9ff60,Simultaneous Learning and Alignment: Multi-Instance and Multi-Pose Learning,"Simultaneous Learning and Alignment:
+Multi-Instance and Multi-Pose Learning?
+Boris Babenko1 Piotr Doll´ar1,2
+Zhuowen Tu3
+Serge Belongie1,2
+Comp. Science & Eng.
+Univ. of CA, San Diego
+Electrical Engineering
+California Inst. of Tech.
+Lab of Neuro Imaging
+Univ. of CA, Los Angeles"
+a090d61bfb2c3f380c01c0774ea17929998e0c96,On the dimensionality of video bricks under varying illumination,"On the Dimensionality of Video Bricks under Varying Illumination
+Beijing Lab of Intelligent Information Technology, School of Computer Science,
+Youdong Zhao, Xi Song, Yunde Jia
+Beijing Institute of Technology, Beijing 100081, PR China
+{zyd458, songxi,"
+a05a770bb2b7778e195a578006482926dfc1af82,Learning to Recognize Pedestrian Attribute,"Learning to Recognize Pedestrian Attribute
+Yubin Deng, Ping Luo, Chen Change Loy, Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+a016fbe8d09402316c7b38946ccd502d76aa8c74,Using a Single RGB Frame for Real Time 3D Hand Pose Estimation in the Wild,"Using a single RGB frame for real time 3D hand pose estimation in the wild
+Paschalis Panteleris1
+Iason Oikonomidis1
+Institute of Computer Science, FORTH
+Computer Science Department, UOC
+Antonis Argyros1,2"
+a0798a0a422520241cc02282946882dd1ef853cd,Full Quantification of Left Ventricle via Deep Multitask Learning Network Respecting Intra- and Inter-Task Relatedness,"Full Quantification of Left Ventricle via Deep
+Multitask Learning Network Respecting
+Intra- and Inter-Task Relatedness
+Wufeng Xue, Andrea Lum, Ashley Mercado, Mark Landis, James Warrington,
+nd Shuo Li*
+Department of Medical Imaging, Western University, ON, Canada
+Digital Imaging Group of London, ON, Canada"
+a0541d4a28d90a17cd3eaa9d1797882eacc8ccf0,Improving Person Re-identification via Pose-Aware Multi-shot Matching,"Improving Person Re-identification via Pose-aware Multi-shot Matching
+Yeong-Jun Cho and Kuk-Jin Yoon
+Computer Vision Laboratory, GIST, South Korea
+{yjcho,"
+a0e5afb1237d47f7a8ac66e7b5ada24cec5222cb,Semantic pooling for image categorization using multiple kernel learning,"SEMANTIC POOLING FOR IMAGE CATEGORIZATION USING MULTIPLE KERNEL
+LEARNING
+Thibaut Durand(1,2), David Picard(1), Nicolas Thome(2), Matthieu Cord(2)
+(1) ETIS, UMR 8051 / ENSEA, Universit´e Cergy-Pontoise, CNRS, F-95000, Cergy,
+(2) Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France"
+a06761b3181a003c2297d8e86c7afc20e17fd2c6,Convolutional Neural Network-Based Human Detection in Nighttime Images Using Visible Light Camera Sensors,"Article
+Convolutional Neural Network-Based Human
+Detection in Nighttime Images Using Visible Light
+Camera Sensors
+Jong Hyun Kim, Hyung Gil Hong and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (J.H.K.); (H.G.H.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Academic Editor: Vittorio M. N. Passaro
+Received: 31 March 2017; Accepted: 4 May 2017; Published: 8 May 2017"
+a000149e83b09d17e18ed9184155be140ae1266e,Action Recognition in Realistic Sports Videos,"Chapter 9
+Action Recognition in Realistic
+Sports Videos
+Khurram Soomro and Amir R. Zamir"
+a01f9461bc8cf8fe40c26d223ab1abea5d8e2812,Facial Age Estimation Through the Fusion of Texture and Local Appearance Descriptors,"Facial Age Estimation Through the Fusion of Texture
+nd local appearance Descriptors
+Ivan Huerta1, Carles Fern´andez2, and Andrea Prati1
+DPDCE, University IUAV, Santa Croce 1957, 30135 Venice, Italy
+Herta Security, Pau Claris 165 4-B, 08037 Barcelona, Spain"
+a0e03c5b647438299c79c71458e6b1776082a37b,Areas of Attention for Image Captioning,"transformerFigure1.Weproposeanattentionmechanismthatjointlypredictsthenextcaptionwordandthecorrespondingregionateachtime-stepgiventheRNNstate(top).BesidesimplementingourmodelusingattentionareasdefinedoverCNNactivationgridsorobjectproposals,asusedinpreviouswork,wealsopresentaend-to-endtrainableconvolutionalspatialtransformerapproachtocomputeimagespecificattentionareas(bottom).typeorlocation,objectproperties,andtheirinteractions.Neuralencoder-decoderbasedapproaches,similartothoseusedinmachinetranslation[30],havebeenfoundveryeffectiveforthistask,seee.g.[19,23,32].Thesemethodsuseaconvolutionalneuralnetwork(CNN)toen-codetheinputimageintoacompactrepresentation.Are-currentneuralnetwork(RNN)isusedtodecodethisrepre-sentationword-by-wordintoanaturallanguagedescriptionoftheimage.Whileeffective,thesemodelsarelimitedinthattheimageanalysisis(i)static,i.e.doesnotchangeovertimeasthedescriptionisproduced,and(ii)notspatiallylo-calized,i.e.describesthesceneasawholeinsteadoffo-cousingonlocalaspectsrelevanttopartsofthedescription.Attentionmechanismscanaddresstheselimitationsbydy-namicallyfocusingondifferentpartsoftheinputastheout-putsequenceisgenerated.Suchmechanismsareeffectiveforavarietyofsequentialpredictiontasks,includingma-1"
+a759570e6ef674cd93068020c2e6bd036961f7c6,SPEECH-COCO: 600k Visually Grounded Spoken Captions Aligned to MSCOCO Data Set,"SPEECH-COCO: 600k Visually Grounded Spoken Captions Aligned to
+MSCOCO Data Set
+William N. Havard1, Laurent Besacier1, Olivier Rosec2
+Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG, F-38000 Grenoble, France
+Voxygen, France"
+a702fc36f0644a958c08de169b763b9927c175eb,Facial expression recognition using Hough forest,"FACIAL EXPRESSION RECOGNITION USING HOUGH FOREST
+Chi-Ting Hsu1, Shih-Chung Hsu1, and Chung-Lin Huang1,2
+. Department of Electrical Engineering, National Tsing-Hua University, Hsin-Chu, Taiwan
+Email:
+. Department of Applied Informatics and Multimedia, Asia University, Taichung, Taiwan"
+a7790555c65be0fc5b5de9bcb1dc550f4919ce3f,Literature Survey for Face Detection under Illumination Variation,"International Journal of Scientific Research Engineering & Technology (IJSRET)
+Volume 2 Issue 10 pp 659-664 January 2014
+www.ijsret.org ISSN 2278 – 0882
+Literature Survey for Face Detection under Illumination Variation
+J.SHYNU, P.KANNAN
+PG Scholar Department of ECE, PET Engineering College, India
+Professor Department of ECE, PET Engineering College, India"
+a7267bc781a4e3e79213bb9c4925dd551ea1f5c4,Proceedings of eNTERFACE 2015 Workshop on Intelligent Interfaces,"Proceedings of eNTERFACE’15
+The 11th Summer Workshop
+on Multimodal Interfaces
+August 10th - September 4th, 2015
+Numediart Institute, University of Mons
+Mons, Belgium"
+a7a1d3036c542824f2c681c3bf08f5b85f05d9e9,A Fast and Precise HOG-Adaboost Based Visual Support System Capable to Recognize Pedestrian and Estimate Their Distance,"A fast and precise HOG-Adaboost based based visual support
+system capable to recognize Pedestrian and estimate their distance.
+Yokohama City University, Graduate School of Nanobioscience, 22-2 Seto Kanazawa-ku, 236-0027 Yokohama, Japan
+Takahisa Kishino1, Sun Zhe1,Ruggero Micheletto1"
+a784a0d1cea26f18626682ab108ce2c9221d1e53,Anchored Regression Networks Applied to Age Estimation and Super Resolution,"Anchored Regression Networks applied to Age Estimation and Super Resolution
+Eirikur Agustsson
+D-ITET, ETH Zurich
+Switzerland
+Radu Timofte
+D-ITET, ETH Zurich
+Merantix GmbH
+Luc Van Gool
+D-ITET, ETH Zurich
+ESAT, KU Leuven"
+a77e9f0bd205a7733431a6d1028f09f57f9f73b0,Multimodal feature fusion for CNN-based gait recognition: an empirical comparison,"Multimodal feature fusion for CNN-based gait recognition: an
+empirical comparison
+F.M. Castroa,, M.J. Mar´ın-Jim´enezb, N. Guila, N. P´erez de la Blancac
+Department of Computer Architecture, University of Malaga, Spain, 29071
+Department of Computing and Numerical Analysis, University of Cordoba, Spain, 14071
+Department of Computer Science and Artificial Intelligence, University of Granada, Spain, 18071"
+a7d23c699a5ae4ad9b8a5cbb8c38e5c3b5f5fb51,A Summary of literature review : Face Recognition,"Postgraduate Annual Research Seminar 2007 (3-4 July 2007)
+A Summary of literature review : Face Recognition
+Kittikhun Meethongjan & Dzulkifli Mohamad
+Faculty of Computer Science & Information System,
+University Technology of Malaysia, 81310 Skudai, Johor, Malaysia."
+a77e0db38ed7ad95a3bca95fea72048985c54508,DART: Distribution Aware Retinal Transform for Event-based Cameras,"SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+DART: Distribution Aware Retinal Transform for
+Event-based Cameras
+Bharath Ramesh*, Hong Yang, Garrick Orchard, Ngoc Anh Le Thi, and Cheng Xiang, Member, IEEE"
+a7fe834a0af614ce6b50dc093132b031dd9a856b,Orientation Driven Bag of Appearances for Person Re-identification,"Orientation Driven Bag of Appearances for Person
+Re-identification
+Liqian Ma, Hong Liu†, Member, IEEE, Liang Hu, Can Wang, Qianru Sun"
+a7664247a37a89c74d0e1a1606a99119cffc41d4,Modal Consistency based Pre-Trained Multi-Model Reuse,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+a7bfb6426359140a0bc0c84741ad9a3ac83eff04,Object-Level Context Modeling For Scene Classification with Context-CNN,"Object-Level Context Modeling For Scene Classification with Context-CNN
+Syed Ashar Javed1 and Anil Kumar Nelakanti2
+IIIT Hyderabad, 2Amazon"
+a71e3cf566de457336aab9dd6a5f5d6282b4a6af,Visual Abstraction for Zero-Shot Learning,
+a73bc57fb0aa429ba5f7f12b6d02e2c6274cabdd,A Superior Tracking Approach: Building a Strong Tracker through Fusion,"A Superior Tracking Approach:
+Building a Strong Tracker through Fusion
+Christian Bailer1, Alain Pagani1, and Didier Stricker1,2
+German Research Center for Artificial Intelligence, Kaiserslautern, Germany
+University of Kaiserslautern, Germany"
+a7152589980ec27375023d719eec6acc04b7d4fd,Generating Facial Expressions,"Generating Facial Expressions
+Jonathan Suit
+Georgia Tech"
+a7a6eb53bee5e2224f2ecd56a14e3a5a717e55b9,Face Recognition Using Multi-viewpoint Patterns for Robot Vision,"1th International Symposium of Robotics Research (ISRR2003), pp.192-201, 2003
+Face Recognition Using Multi-viewpoint Patterns for
+Robot Vision
+Kazuhiro Fukui and Osamu Yamaguchi
+Corporate Research and Development Center, TOSHIBA Corporation
+, KomukaiToshiba-cho, Saiwai-ku, Kawasaki 212-8582 Japan"
+a7e274db8f1389b95469588995f18c1c42b62534,VideoStory Embeddings Recognize Events when Examples are Scarce,
+a7e78f80e0e37d0c17bc09058c27996e32e4454e,UNAM at SemEval-2018 Task 10: Unsupervised Semantic Discriminative Attribute Identification in Neural Word Embedding Cones,"Proceedings of the 12th International Workshop on Semantic Evaluation (SemEval-2018), pages 977–984
+New Orleans, Louisiana, June 5–6, 2018. ©2018 Association for Computational Linguistics"
+a758b744a6d6962f1ddce6f0d04292a0b5cf8e07,"Study on Human Face Recognition under Invariant Pose, Illumination and Expression using LBP, LoG and SVM","ISSN XXXX XXXX © 2017 IJESC
+Research Article Volume 7 Issue No.4
+Study on Human Face Recognition under Invariant Pose, Illumination
+nd Expression using LBP, LoG and SVM
+Amrutha
+Depart ment of Co mputer Science & Engineering
+Mangalore Institute of Technology & Engineering , Moodabidri, Mangalore, India
+INTRODUCTION
+RELATED WORK
+Abstrac t:
+Face recognition system uses human face for the identification of the user. Face recognition is a difficu lt task there is no unique
+method that provide accurate an accurate and effic ient solution in all the situations like the face image with differen t pose ,
+illu mination and exp ression. Local Binary Pattern (LBP) and Laplac ian of Gaussian (Lo G) operators. Support Vector Machine
+lassifier is used to recognize the human face. The Lo G algorith m is used to preprocess the image to detect the edges of the face
+image to get the image information. The LBP operator divides the face image into several blocks to generate the features informat ion
+on pixe l level by creating LBP labels for all the blocks of image is obtained by concatenating all the individual local histo grams.
+Support Vector Machine classifier (SVM ) is used to classify t he image. The a lgorith m performances is verified under the constraints
+like illu mination, e xp ression and pose variation
+Ke ywor ds: Face Recognition, Local Binary Pattern, Laplac ian of Gaussian, histogram, illu mination, pose angle, exp ression
+variations, SVM ."
+a73a16203b644353a287a4759bc951450e67d700,BodyNet: Volumetric Inference of 3D Human Body Shapes,"BodyNet: Volumetric Inference of
+D Human Body Shapes
+G¨ul Varol1,*
+Ersin Yumer2,‡
+Duygu Ceylan2
+Bryan Russell2
+Jimei Yang2
+Ivan Laptev1,*
+Cordelia Schmid1,†
+Inria, France
+Adobe Research, USA"
+a764cba765648c6e36782b02393ea2eed5cd69c7,Contributions to large-scale learning for image classification. (Contributions à l'apprentissage grande échelle pour la classification d'images),"CONTRIBUTIONSTOLARGE-SCALELEARNINGFORIMAGECLASSIFICATIONZeynepAkataPhDThesisl’´EcoleDoctoraleMath´ematiques,SciencesetTechnologiesdel’Information,InformatiquedeGrenoble"
+a7663528eb6c9b79a68b94800e30da952c0b6bb2,IFQ-Net : Integrated Fixed-point Quantization Networks for Embedded Vision,"IFQ-Net: Integrated Fixed-point Quantization Networks for Embedded Vision
+Hongxing Gao, Wei Tao, Dongchao Wen
+Canon Information Technology (Beijing) Co., LTD
+Tse-Wei Chen, Kinya Osa, Masami Kato
+Device Technology Development Headquarters, Canon Inc."
+a7e8ce268c16ea8c10e4c5ccd8d6e53702423faa,The Ciona17 Dataset for Semantic Segmentation of Invasive Species in a Marine Aquaculture Environment,"The Ciona17 Dataset for Semantic Segmentation
+of Invasive Species in a Marine Aquaculture Environment
+Angus Galloway∗, Graham W. Taylor∗, Aaron Ramsay†, Medhat Moussa∗
+School of Engineering
+University of Guelph
+Guelph, ON, Canada
+{gallowaa, gwtaylor,
+Department of Agriculture and Fisheries
+Government of PEI
+Montague, PEI, Canada"
+a75ee7f4c4130ef36d21582d5758f953dba03a01,Human face attributes prediction with Deep Learning,"DD2427 Final Project Report
+Mohamed Abdulaziz Ali Haseeb
+DD2427 Final Project Report
+Human face attributes prediction with Deep
+Learning
+Mohamed Abdulaziz Ali Haseeb"
+a726858df7c9503116504206577a938df1a67815,Unsupervised Vehicle Re-Identification using Triplet Networks,"Unsupervised Vehicle Re-Identification using Triplet Networks
+Pedro Antonio Mar´ın-Reyes
+Andrea Palazzi
+University of Las Palmas de Gran Canaria
+University of Modena and Reggio Emilia
+Luca Bergamini
+Simone Calderara
+University of Modena and Reggio Emilia
+University of Modena and Reggio Emilia
+Javier Lorenzo-Navarro
+Rita Cucchiara
+University of Las Palmas de Gran Canaria
+University of Modena and Reggio Emilia"
+a760ce8baddf2da7946d2ed6f02ac3927f39a9da,Face Recognition Using a Unified 3D Morphable Model,"Face Recognition Using a Unified 3D Morphable Model
+Hu, G., Yan, F., Chan, C-H., Deng, W., Christmas, W., Kittler, J., & Robertson, N. M. (2016). Face Recognition
+Using a Unified 3D Morphable Model. In Computer Vision – ECCV 2016: 14th European Conference,
+Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII (pp. 73-89). (Lecture Notes in
+Computer Science; Vol. 9912). Springer Verlag. DOI: 10.1007/978-3-319-46484-8_5
+Published in:
+Computer Vision – ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14,
+016, Proceedings, Part VIII
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+The final publication is available at Springer via http://dx.doi.org/10.1007/978-3-319-46484-8_5
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to"
+a71106ef95103276fac010c10291f6dd6fd9d9f5,Social status level and dimension interactively influence person evaluations indexed by P300s.,"ISSN: 1747-0919 (Print) 1747-0927 (Online) Journal homepage: http://www.tandfonline.com/loi/psns20
+Social status level and dimension interactively
+influence person evaluations indexed by P300s
+Ivo Gyurovski, Jennifer Kubota, Carlos Cardenas-Iniguez & Jasmin Cloutier
+To cite this article: Ivo Gyurovski, Jennifer Kubota, Carlos Cardenas-Iniguez & Jasmin Cloutier
+(2017): Social status level and dimension interactively influence person evaluations indexed by
+To link to this article: http://dx.doi.org/10.1080/17470919.2017.1326400
+Accepted author version posted online: 02
+May 2017.
+Published online: 15 May 2017.
+Submit your article to this journal
+Article views: 11
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=psns20
+Download by: [University of Chicago Library]
+Date: 22 May 2017, At: 09:19"
+a775da3e6e6ea64bffab7f9baf665528644c7ed3,Human Face Pose Estimation based on Feature Extraction Points,"International Journal of Computer Applications (0975 – 8887)
+Volume 142 – No.9, May 2016
+Human Face Pose Estimation based on Feature
+Extraction Points
+Guneet Bhullar
+Research scholar,
+Department of ECE
+SBSSTC, Moga Road,
+Ferozepur, Punjab, India"
+a703d51c200724517f099ee10885286ddbd8b587,Fuzzy neural networks(FNN)-based approach for personalized facial expression recognition with novel feature selection method,"Fuzzy Neural Networks(FNN)-based Approach for
+Personalized Facial Expression Recognition with
+Novel Feature Selection Method
+Dae-Jin Kim and Zeungnam Bien
+Div. of EE, Dept. of EECS, KAIST
+73-1 Guseong-dong, Yuseong-gu, Daejeon 305-701, Korea
+Kwang-Hyun Park
+Human-friendly Welfare Robotic System Engineering Research Center, KAIST
+73-1 Guseong-dong, Yuseong-gu, Daejeon 305-701, Korea"
+a70fa8af52e4cc32dae09e6e753f1dd3ec198327,Neural Task Representations as Weak Supervision for Model Agnostic Cross-Lingual Transfer,"Neural Task Representations as Weak Supervision for Model Agnostic
+Cross-Lingual Transfer
+Sujay Kumar Jauhar
+Microsoft Research AI
+Redmond, WA, USA
+Michael Gamon
+Microsoft Research AI
+Redmond, WA, USA
+Patrick Pantel∗
+Facebook Inc.
+Seattle, WA, USA"
+a7eee3222623778294461102d0dc770d4e09a7c5,A novel fusion-based method for expression-invariant gender classification,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+b878518814fee31ce8cb61040301e7a921892156,A Gaussian Feature Adaptive Integrated PCA-ICA Approach for Facial Recognition,"Vaishali et al, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.5, May- 2015, pg. 401-406
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+IJCSMC, Vol. 4, Issue. 5, May 2015, pg.401 – 406
+RESEARCH ARTICLE
+ISSN 2320–088X
+A Gaussian Feature Adaptive Integrated PCA-ICA
+Approach for Facial Recognition
+Student, Dept. of ECE, ITM University Gurgaon Haryana
+Vaishali
+Dr. Rekha Vig
+Asstt. Prof, Dept. of ECE, ITM University Gurgaon Haryana"
+b871d1b8495025ff8a6255514ed39f7765415935,Application of Completed Local Binary Pattern for Facial Expression Recognition on Gabor Filtered Facial Images,"Application of Completed Local Binary Pattern for Facial Expression
+Recognition on Gabor Filtered Facial Images
+Tanveer Ahsan, 2Rifat Shahriar, *3Uipil Chong
+Dept. of Electrical and Computer Engineering, University of Ulsan, Ulsan, Republic of Korea"
+b85901174fa83c76ae994603228ba5b4f299a1af,"Sos, Lost in a High Dimensional Space","SOS, LOST IN A HIGH DIMENSIONAL SPACE
+Anne Hendrikse"
+b8dba0504d6b4b557d51a6cf4de5507141db60cf,Comparing Performances of Big Data Stream Processing Platforms with RAM3S,"Comparing Performances of Big Data Stream
+Processing Platforms with RAM3S"
+b8b46df1b013c30d791972ee109425a94e3adc06,"Automaticity, Control, and the Social Brain","C H A P T E R 1 9
+Automaticity, Control,
+nd the Social Brain
+Robert P. Spunt and Matthew D. Lieberman
+The social world is good at keeping the
+human brain busy, posing cognitive chal-
+lenges that are complex, frequent, and enor-
+mously important to our well-being. In fact,
+the computational demands of the social
+world may be the principal reason why
+the human brain has evolved to its present
+form and function relative to other primates
+(Dunbar, 1993). Importantly, the human
+rain is often able to make sense of the
+social world without having to do too much
+work. This is because many of its processes
+re automatically initiated by the presence
+of relevant social stimuli and run to comple-
+tion without much, if any, conscious inter-
+vention (Bargh & Chartrand, 1999; Gilbert,"
+b89862f38fff416d2fcda389f5c59daba56241db,A Web Survey for Facial Expressions Evaluation,"A Web Survey for Facial Expressions Evaluation
+Matteo Sorci
+Gianluca Antonini
+Jean-Philippe Thiran
+Ecole Polytechnique Federale de Lausanne
+Signal Processing Institute
+Ecublens, 1015 Lausanne, Switzerland
+Ecole Polytechnique Federale de Lausanne, Operation Research Group
+Michel Bierlaire
+Ecublens, 1015 Lausanne, Switzerland
+June 9, 2008"
+b8612b5c1aa0970b5d99340ad19d7fcede1b0854,"Fusion of Speech, Faces and Text for Person Identification in TV Broadcast","Fusion of speech, faces and text for
+person identification in TV broadcast
+Herv´e Bredin1, Johann Poignant2, Makarand Tapaswi3, Guillaume Fortier4,
+Viet Bac Le5, Thibault Napoleon6, Hua Gao3, Claude Barras1, Sophie Rosset1,
+Laurent Besacier2, Jakob Verbeek4, Georges Qu´enot2, Fr´ed´eric Jurie6, and
+Hazim Kemal Ekenel3
+Univ Paris-Sud / CNRS-LIMSI UPR 3251, BP 133, F-91403 Orsay, France
+UJF-Grenoble 1 / UPMF-Grenoble 2 / Grenoble INP / CNRS-LIG UMR 5217,
+F-38041 Grenoble, France
+Karlsruher Institut fur Technologie, Karlsruhe, Germany
+INRIA Rhone-Alpes, 655 Avenue de lEurope, F-38330 Montbonnot, France
+5 Vocapia Research, 3 rue Jean Rostand, Parc Orsay Universit´e, F-91400 Orsay,
+6 Universit´e de Caen / GREYC UMR 6072, F-14050 Caen Cedex, France
+France"
+b82a4a0457170258aaf622b81e6f739a220398eb,Probe Strongly Similar Neutral Strongly Dissimilar Quasi-similar Quasi-dissimilar Push Pull,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TMM.2016.2605058, IEEE
+Transactions on Multimedia
+Person Re-identification via Ranking Aggregation
+of Similarity Pulling and Dissimilarity Pushing
+Mang Ye, Chao Liang(cid:3), Yi Yu, Zheng Wang, Qingming Leng,
+Chunxia Xiao, Member, IEEE, Jun Chen, Ruimin Hu, Senior Member, IEEE"
+b88771387d5c0f09ea9a2ccc743b11471fb257b4,An interactive facial-expression training platform for individuals with autism spectrum disorder,"An Interactive Facial-Expression Training Platform
+for Individuals with Autism Spectrum Disorder
+Christina Tsangouri*, Wei Li+, Zhigang Zhu*
+* Dept. of Comp. Sci.. and +Dept of Electrical Eng..
+City College of New York, New York, USA"
+b8a5839f6b1e051f430f2b89d5a1a7e49a10655a,DCFNet: Deep Neural Network with Decomposed Convolutional Filters,"DCFNet: Deep Neural Network with Decomposed Convolutional Filters
+Qiang Qiu 1 Xiuyuan Cheng 1 Robert Calderbank 1 Guillermo Sapiro 1"
+b8969d6e5658b360111f33d3f85eac63afcd7252,WESPE: Weakly Supervised Photo Enhancer for Digital Cameras,"WESPE: Weakly Supervised Photo Enhancer for Digital Cameras
+Andrey Ignatov, Nikolay Kobyshev, Kenneth Vanhoey, Radu Timofte, Luc Van Gool
+ETH Zurich
+{andrey, nk, vanhoey, timofter,"
+b8053da77bf1a5b4c87fddf6140be0a612cfc164,Multi-Pose Face Recognition Using Hybrid Face Features Descriptor,"MULTI-POSE FACE RECOGNITION USING
+HYBRID FACE FEATURES DESCRIPTOR
+I Gede Pasek Suta WIJAYA[1,2], Keiichi UCHIMURA[2] and Gou KOUTAKI[2]"
+b8b202fa955801da840afc9f523d439d14d87cc1,A Novel Approach for Monocular 3D Object Tracking in Cluttered Environment,"International Journal of Computational Intelligence Research
+ISSN 0973-1873 Volume 13, Number 5 (2017), pp. 851-864
+© Research India Publications
+http://www.ripublication.com
+A Novel Approach for Monocular 3D Object
+Tracking in Cluttered Environment
+Navneet S. Ghedia
+Research scholar, Gujarat Technological University, Gujarat, India.
+Dr. C.H. Vithalani
+Professor and Head of EC Dept., Government Engineering College, Rajkot, India.
+Dr. Ashish Kothari
+Associate Professor and Head of EC Dept., Atmiya Institute of Technology and
+Science, Rajkot, Gujarat, India."
+b8f3f6d8f188f65ca8ea2725b248397c7d1e662d,Selfie Detection by Synergy-Constraint Based Convolutional Neural Network,"Selfie Detection by Synergy-Constriant Based
+Convolutional Neural Network
+Yashas Annadani, Vijaykrishna Naganoor, Akshay Kumar Jagadish and Krishnan Chemmangat
+Electrical and Electronics Engineering, NITK-Surathkal, India."
+b85580ff2d8d8be0a2c40863f04269df4cd766d9,HCMUS team at the Multimodal Person Discovery in Broadcast TV Task of MediaEval 2016,"HCMUS team at the Multimodal Person Discovery in
+Broadcast TV Task of MediaEval 2016
+Vinh-Tiep Nguyen, Manh-Tien H. Nguyen, Quoc-Huu Che, Van-Tu Ninh,
+Tu-Khiem Le, Thanh-An Nguyen, Minh-Triet Tran
+Faculty of Information Technology
+University of Science, Vietnam National University-Ho Chi Minh city
+{nhmtien, cqhuu, nvtu,"
+b8471908880c916ebc70ac900e9446705ed258f4,Transitional and translational studies of risk for anxiety.,"Review
+TRANSITIONAL AND TRANSLATIONAL STUDIES
+OF RISK FOR ANXIETY
+B. J. Casey Ph.D.,
+Erika J. Ruberry B.S., Victoria Libby B.A., Charles E. Glatt M.D., Ph.D., Todd Hare Ph.D.,
+Fatima Soliman M.D., Ph.D., Stephanie Duhoux Ph.D., Helena Frielingsdorf M.D., Ph.D., and Nim Tottenham
+Ph.D.
+Adolescence reflects a period of increased rates of anxiety, depression, and
+suicide. Yet most teens emerge from this period with a healthy, positive outcome.
+In this article, we identify biological factors that may increase risk for some
+individuals during this developmental period by: (1) examining changes in
+neural circuitry underlying core phenotypic features of anxiety as healthy
+individuals transition into and out of adolescence; (2) examining genetic factors
+that may enhance the risk for psychopathology in one individual over another
+using translation from mouse models to human neuroimaging and behavior;
+nd (3) examining the effects of early experiences on core phenotypic features of
+nxiety using human neuroimaging and behavioral approaches. Each of these
+pproaches alone provides only limited information on genetic and environ-
+mental influences on complex human behavior across development. Together,
+they reflect an emerging field of translational developmental neuroscience in"
+b856c493c2e5cbb71791f56763886e5e0d40295c,Unsupervised Domain Adaptive Re-Identification: Theory and Practice,"Unsupervised Domain Adaptive Re-Identification:
+Theory and Practice
+Liangchen Song12∗ Cheng Wang23∗ Lefei Zhang1 Bo Du1
+Qian Zhang2 Chang Huang2 Xinggang Wang3
+Wuhan University 2Horizon Robotics
+Huazhong Univ. of Science and Technology"
+b8e35566129299c3591af0fd4f127e5e0d0b5774,3D Facial Image Comparison using Landmarks,"D Facial Image Comparison using Landmarks
+A study to the discriminating value of the characteristics
+of 3D facial landmarks and their automated detection.
+Alize Scheenstra
+Master thesis: INF/SCR-04-54
+Netherlands Forensic Institute
+Institute of Information and Computing Sciences
+Utrecht University
+February 2005"
+b831a08a7098b64485587541485859c9213e6dc2,Applications of 3D morphable models for faces with expressions,"Applications of 3D morphable models for faces with expressions
+B. Chu1,2, S. Romdhani1 et L. Chen2
+Morpho, SAFRAN Group
+1 boulevard Galliéni 92130 Issy-Les-Moulineaux - France
+{baptiste.chu,
+Université de Lyon, CNRS
+Ecole Centrale de Lyon, LIRIS UMR5205, F-69134
+Lyon, France
+{baptiste.chu,"
+b8a53daa97fb917a89c351c47f0b197573e20023,Recognizing Faces---An Approach Based on Gabor Wavelets,"Recognizing Faces --- An Approach Based on Gabor
+Wavelets
+By LinLin Shen, BSc, MSc
+Thesis submitted to the University of Nottingham
+for the degree of Doctor of Philosophy
+July 2005"
+b8f09ff53e5a1700492100b8cd1b9e9783485376,Clustered Multi-task Feature Learning for Attribute Prediction,"#1105
+CVPR 2016 Submission #1105. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#1105
+Clustered Multi-task Feature Learning for Attribute Prediction
+Anonymous CVPR submission
+Paper ID 1105"
+b8aef59bac4035013bcdaa9b56d665fc8b4e187d,Optimal Bayes Classification of High Dimensional Data in Face Recognition,"Optimal Bayes Classification of High Dimensional Data in Face
+Recognition
+GRIFT Research Group, CRISTAL Laboratory, National School of Computer Sciences, University of Manouba,
+Wissal Drira and Faouzi Ghorbel
+Manouba, Tunisia
+Keywords:
+Face Classification, Bayes, Feature Extraction, Reduction Dimension, L2 Probabilistic Dependence
+Measure."
+b8a829b30381106b806066d40dd372045d49178d,A Probabilistic Framework for Joint Pedestrian Head and Body Orientation Estimation,"A Probabilistic Framework for Joint Pedestrian Head
+nd Body Orientation Estimation
+Fabian Flohr, Madalin Dumitru-Guzu, Julian F. P. Kooij, and Dariu M. Gavrila"
+b8a4e7c21c3163b7595dac0cb00cf518e2dd82b5,Coupling Fall Detection and Tracking in Omnidirectional Cameras,"Coupling Fall Detection and Tracking in
+Omnidirectional Cameras
+removed for blind review
+No Institute Given"
+b88e0c3a6a95e5193085a258cd281802852e5a4a,Progression in large Age-gap face verification,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395-0056
+Volume: 04 Issue: 09 | Sep -2017 www.irjet.net p-ISSN: 2395-0072
+Progression in large Age-gap face verification
+Neha Rahman1, Ankit Chaora2
+,2 Dept. of Electronics and Telecommunication Engineering, Rungta College of Engineering and Technology
+M.tech Scholar, Digital Electronics, 2Assistant Professor
+Bhilai, India
+techniques, database, machine
+research projects.
+. The
+increasing need for surveillance related
+pplications due to drug trafficking and terrorist
+ctivities etc.
+. The availability of real time hardware.
+. The re-emergence of neural network classifiers with
+emphasis on real time computation and adaptation.
+---------------------------------------------------------------------***---------------------------------------------------------------------
+. The increase in emphasis on civilian or commercial"
+b1d89015f9b16515735d4140c84b0bacbbef19ac,Too Far to See? Not Really!—Pedestrian Detection With Scale-Aware Localization Policy,"Too Far to See? Not Really!
+— Pedestrian Detection with Scale-aware
+Localization Policy
+Xiaowei Zhang, Li Cheng, Bo Li, and Hai-Miao Hu"
+b12431e61172443c534ea523a4d7407e847b5c5b,Yüz Tanımaya Dayalı Kişi Bazlı Test Otomasyonu,"Y¨uz Tanımaya Dayalı Ki¸si Bazlı Test
+Otomasyonu
+Alphan C¸ amlı1, Damla G¨ulen1, Nihat ¨Uk1, and Anıl G¨undo˘gdu1
+Siemens A.S., Istanbul 34870, Turkey"
+b1e27fade89e973f4087ed9a243981b0e713b22c,Functional neuroanatomy and the rationale for using EEG biofeedback for clients with Asperger's syndrome.,"Appl Psychophysiol Biofeedback (2010) 35:39–61
+DOI 10.1007/s10484-009-9095-0
+Functional Neuroanatomy and the Rationale for Using EEG
+Biofeedback for Clients with Asperger’s Syndrome
+Lynda Thompson Æ Michael Thompson Æ
+Andrea Reid
+Published online: 1 July 2009
+Ó Springer Science+Business Media, LLC 2009
+nd Oberman"
+b18f94c5296a9cebe9e779d50d193fd180f78ed9,Forecasting Interactive Dynamics of Pedestrians with Fictitious Play,"Forecasting Interactive Dynamics of Pedestrians with Fictitious Play
+Wei-Chiu Ma1 De-An Huang2 Namhoon Lee3 Kris M. Kitani4
+Stanford
+Oxford"
+b14b672e09b5b2d984295dfafb05604492bfaec5,Apprentissage de Modèles pour la Classification et la Recherche d ’ Images Learning Image Classification and Retrieval Models,LearningImageClassificationandRetrievalModelsThomasMensink
+b183914d0b16647a41f0bfd4af64bf94a83a2b14,Extensible video surveillance software with simultaneous event detection for low and high density crowd analysis,"Extensible Video Surveillance Software with
+Simultaneous Event Detection for Low and High
+Density Crowd Analysis
+Anuruddha L. Hettiarachchi, Heshani O. Thathsarani, Pamuditha U. Wickramasinghe,
+Dilranjan S. Wickramasuriya and Ranga Rodrigo
+Department of Electronic and Telecommunication Engineering, University of Moratuwa, Sri Lanka
+Email: 090184v, 090518c, 090560v, 090561b,"
+b196f95a4274533b7f931a509eaf5507358945f9,Transformation-Invariant Analysis of Visual Signals with Parametric Models,"POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCESacceptée sur proposition du jury:Prof. P. Vandergheynst, président du juryProf. P. Frossard, directeur de thèseProf. D. Kressner, rapporteur Dr G. Peyré, rapporteur Prof. M. B. Wakin, rapporteurTransformation-Invariant Analysis of Visual Signals with Parametric ModelsTHÈSE NO 5844 (2013)ÉCOLE POLYTECHNIQUE FÉDÉRALE DE LAUSANNEPRÉSENTÉE LE 4 OCTOBRE 2013 À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEURLABORATOIRE DE TRAITEMENT DES SIGNAUX 4PROGRAMME DOCTORAL EN GÉNIE ÉLECTRIQUESuisse2013PARElif VURAL"
+b13254c2c9ca90f57e385d34abc7fe78d74e5222,Real-Time Multi-object Tracking with Occlusion and Stationary Objects Handling for Conveying Systems,"Real-time Multi-Object Tracking with Occlusion and
+Stationary Objects Handling for Conveying Systems
+Adel Benamara, Serge Miguet, Mihaela Scuturici
+To cite this version:
+Adel Benamara, Serge Miguet, Mihaela Scuturici. Real-time Multi-Object Tracking with Occlu-
+sion and Stationary Objects Handling for Conveying Systems. 12th International Symposium
+on Visual Computing (ISVC’16), Dec 2016, Las Vegas, NV, United States. .
+HAL Id: hal-01385529
+https://hal.archives-ouvertes.fr/hal-01385529
+Submitted on 26 Oct 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+b11e97d5a12046ded77bc4dc0f762ac3c34e65cb,Blur and Illumination Invariant Robust Face Recognition Using Support Vector Machine (svm),"Vetri--International Journal of Computer Science information and Engg., Technologies ISSN 2277-4408 || 01032014-011
+BLUR AND ILLUMINATION INVARIANT ROBUST
+FACE RECOGNITION USING SUPPORT VECTOR
+MACHINE (SVM)
+A.Vetri Selvi1 , N.Priyalakshmi2, S.Reshmi3
+, G.Nandhini4,
+1, 2, 3 UG Scholars, Department of Information Technology, Sri Ramakrishna Engineering College, Coimbatore, India.
+4 Assistant Professor, Department of Information Technology, Sri Ramakrishna Engineering College, Coimbatore,
+India."
+b1a3b19700b8738b4510eecf78a35ff38406df22,Automatic Analysis of Facial Actions: A Survey,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2017.2731763, IEEE
+Transactions on Affective Computing
+JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+Automatic Analysis of Facial Actions: A Survey
+Brais Martinez, Member, IEEE, Michel F. Valstar, Senior Member, IEEE, Bihan Jiang,
+nd Maja Pantic, Fellow, IEEE"
+b166ce267ddb705e6ed855c6b679ec699d62e9cb,Sample group and misplaced atom dictionary learning for face recognition,"Turk J Elec Eng & Comp Sci
+(2017) 25: 4421 { 4430
+⃝ T (cid:127)UB_ITAK
+doi:10.3906/elk-1702-49
+Sample group and misplaced atom dictionary learning for face recognition
+Meng WANG1;2, Zhengping HU1;(cid:3)
+, Zhe Sun1, Mei ZHU2, Mei SUN2
+Department of Information Science & Engineering, Faculty of Electronics & Communication, Yanshan University,
+Department of Physics & Electronics Engineering, Faculty of Electronics & Communication, Taishan University,
+Qinhuangdao, P.R. China
+Tai’an, P.R. China
+Received: 04.02.2017
+(cid:15)
+Accepted/Published Online: 01.06.2017
+(cid:15)
+Final Version: 05.10.2017"
+b16ff1331f961b2067c9464c491b7cbe90694758,Automatic plankton image classification combining multiple view features via multiple kernel learning,"Zheng et al. BMC Bioinformatics 2017, 18(Suppl 16):570
+DOI 10.1186/s12859-017-1954-8
+RESEARCH
+Open Access
+Automatic plankton image classification
+ombining multiple view features via multiple
+kernel learning
+Haiyong Zheng1, Ruchen Wang1, Zhibin Yu1, Nan Wang1, Zhaorui Gu1 and Bing Zheng2*
+From 16th International Conference on Bioinformatics (InCoB 2017)
+Shenzhen, China. 20-22 September 2017
+including phytoplankton and zooplankton, are the main source of food for organisms in the"
+b15a06d701f0a7f508e3355a09d0016de3d92a6d,Facial contrast is a cue for perceiving health from the face.,"Running head: FACIAL CONTRAST LOOKS HEALTHY
+Facial contrast is a cue for perceiving health from the face
+Richard Russell1, Aurélie Porcheron2,3, Jennifer R. Sweda1, Alex L. Jones1, Emmanuelle
+Mauger2, Frederique Morizot2
+Gettysburg College, Gettysburg, PA, USA
+CHANEL Recherche et Technologie, Chanel PB
+Université Grenoble Alpes
+Author Note
+Richard Russell, Jennifer R. Sweda, and Alex L. Jones, Department of Psychology,
+Gettysburg College. Aurélie Porcheron, Emmanuelle Mauger, and Frederique Morizot,
+CHANEL Recherche et Technologie, Chanel PB. Aurélie Porcheron, Laboratoire de
+Psychologie et NeuroCognition, Université Grenoble Alpes.
+Corresponding author: Richard Russell, Department of Psychology, Box 407, Gettysburg
+College, Gettysburg, PA 17325, USA. Email:
+This is a prepublication copy. This article may not exactly replicate the authoritative document
+published in the APA journal. It is not the copy of record. The authoritative document can be
+found through this DOI: http://psycnet.apa.org/doi/10.1037/xhp0000219"
+b137480d2ccf3b53433de208815ce891d95af912,Visual Sentences for Pose Retrieval Over Low-Resolution Cross-Media Dance Collections,"Visual Sentences for Pose Retrieval over
+Low-resolution Cross-media Dance Collections
+Reede Ren, Member, IEEE, John Collomosse, Member, IEEE"
+b17b20c3a3804482a1af3be897758d4f3be26677,Self-calibrating 3D context for retrieving people with luggage,"Self-Calibrating 3D Context for Retrieving People with Luggage
+Johannes Schels∗ , Joerg Liebelt∗
+EADS Innovation Works
+M¨unchen, Germany
+Rainer Lienhart
+University of Augsburg
+Augsburg, Germany"
+b13499d60e7be1d593ec91fc952b9c32ce62bd57,Gambit: A Robust Chess-Playing Robotic System,"Gambit: A Robust Chess-Playing Robotic System
+Cynthia Matuszek, Brian Mayton, Roberto Aimi, Marc Peter Deisenroth, Liefeng Bo,
+Robert Chu, Mike Kung, Louis LeGrand, Joshua R. Smith, Dieter Fox"
+b1444b3bf15eec84f6d9a2ade7989bb980ea7bd1,Local Directional Relation Pattern for Unconstrained and Robust Face Retrieval,"LOCAL DIRECTIONAL RELATION PATTERN
+Local Directional Relation Pattern for
+Unconstrained and Robust Face Retrieval
+Shiv Ram Dubey, Member, IEEE"
+b1edff56936e5d306e51479142b98cc2414c1a56,Human-Centered Autonomous Vehicle Systems: Principles of Effective Shared Autonomy,"Human-Centered Autonomous Vehicle Systems:
+Principles of E(cid:128)ective Shared Autonomy
+Massachuse(cid:138)s Institute of Technology (MIT)
+Lex Fridman
+Figure 1: Principles of shared autonomy used for the design and development of the Human-Centered Autonomous Vehicle."
+b1451721864e836069fa299a64595d1655793757,Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking,"Criteria Sliders: Learning Continuous
+Database Criteria via Interactive Ranking
+James Tompkin,1∗ Kwang In Kim,2∗ Hanspeter Pfister,3 and Christian Theobalt4
+Brown University 2University of Bath
+Harvard University 4Max Planck Institute for Informatics"
+b1ec55cbf2e9a6785e1f1f2fc060e4171ec88b4b,Implicit Discrimination of Basic Facial Expressions of Positive/Negative Emotion in Fragile X Syndrome and Autism Spectrum Disorder.,"015, Vol. 120, No. 4, 328–345
+EAAIDD
+DOI: 10.1352/1944-7558-120.4.328
+Implicit Discrimination of Basic Facial Expressions of
+Positive/Negative Emotion in Fragile X Syndrome and
+Autism Spectrum Disorder
+Hayley Crawford, Joanna Moss, Giles M. Anderson, Chris Oliver, and Joseph P. McCleery"
+b1ffa7a926e129f8dccdd6f258fea034cbee9160,Minimizing hallucination in histogram of Oriented Gradients,"Minimizing hallucination in Histogram of Oriented Gradients
+Sławomir B ˛ak Michał Koperski
+INRIA Sophia Antipolis, STARS group
+François Brémond
+004, route des Lucioles, BP93
+06902 Sophia Antipolis Cedex - France
+Javier Ortiz"
+b1bd58bb76ae9e4504622a941e1da21a24b5cfdd,"International conference on Advanced Computing , Communication and Networks ’ 11 1087 Face Recognition Using Incremental Principal Component Analysis","International conference on Advanced Computing, Communication and Networks’11
+Face Recognition Using Incremental Principal Component Analysis
+Satish S. Banait1, Vivek Kshirsagar2, Meghana Nagori3, Archana R. Ugale4
+Dept. of Computer Engg. KK Wagh Institute of Engg. Education & Research Centre, Nashik
+, 3 Dept. of Computer Science & Engineering, Govt. College Of Engineering, Aurangabad, India
+Dept. of Computer Engg. MET’s BKC College of Engg., Nashik
+space
+- IN
+feature"
+b1ffd13e8f68401a603eea9806bc37e396a3c77d,Face Generation with Conditional Generative Adversarial Networks,"Face Generation with Conditional Generative Adversarial Networks
+Xuwen Cao, Subramanya Rao Dulloor, Marcella Cindy Prasetio"
+b19f24ec92388513d1516d71292559417c776006,Causalgan: Learning Causal Implicit Gener-,"Under review as a conference paper at ICLR 2018
+CAUSALGAN: LEARNING CAUSAL IMPLICIT GENER-
+ATIVE MODELS WITH ADVERSARIAL TRAINING
+Anonymous authors
+Paper under double-blind review"
+b19e83eda4a602abc5a8ef57467c5f47f493848d,Heat Kernel Based Local Binary Pattern for Face Representation,"JOURNAL OF LATEX CLASS FILES
+Heat Kernel Based Local Binary Pattern for
+Face Representation
+Xi Li†, Weiming Hu†, Zhongfei Zhang‡, Hanzi Wang§"
+b18efa91e9893ae5fdfcaf880bae5c569fab4d18,Visual Scanning of Dynamic Affective Stimuli in Autism Spectrum Disorders,"Georgia State University
+ScholarWorks Georgia State University
+Psychology Dissertations
+Department of Psychology
+8-1-2012
+Visual Scanning of Dynamic Affective Stimuli in
+Autism Spectrum Disorders
+Susan M. McManus
+Georgia State University
+Follow this and additional works at: http://scholarworks.gsu.edu/psych_diss
+Recommended Citation
+McManus, Susan M., ""Visual Scanning of Dynamic Affective Stimuli in Autism Spectrum Disorders."" Dissertation, Georgia State
+University, 2012.
+http://scholarworks.gsu.edu/psych_diss/105
+This Dissertation is brought to you for free and open access by the Department of Psychology at ScholarWorks Georgia State University. It has been
+ccepted for inclusion in Psychology Dissertations by an authorized administrator of ScholarWorks Georgia State University. For more information,
+please contact"
+ddc8f480898a846c2a6ba0dddd7d733ce35f0e19,Dense Pose Transfer,"Dense Pose Transfer
+Natalia Neverova1, Rıza Alp G¨uler2, and Iasonas Kokkinos1
+Facebook AI Research, Paris, France, {nneverova,
+INRIA-CentraleSup´elec, Paris, France,"
+dde5125baefa1141f1ed50479a3fd67c528a965f,Synthesizing Normalized Faces from Facial Identity Features,"Synthesizing Normalized Faces from Facial Identity Features
+Forrester Cole1 David Belanger1,2 Dilip Krishnan1 Aaron Sarna1 Inbar Mosseri1 William T. Freeman1,3
+Google, Inc. 2University of Massachusetts Amherst 3MIT CSAIL
+{fcole, dbelanger, dilipkay, sarna, inbarm,"
+ddefb92908e6174cf48136ae139efbb4bd198896,Feature-wise Bias Amplification,"Under review as a conference paper at ICLR 2019
+FEATURE-WISE BIAS AMPLIFICATION
+Anonymous authors
+Paper under double-blind review"
+dd8084b2878ca95d8f14bae73e1072922f0cc5da,"Model Distillation with Knowledge Transfer in Face Classification, Alignment and Verification","Model Distillation with Knowledge Transfer from
+Face Classification to Alignment and Verification
+Chong Wang∗, Xipeng Lan and Yangang Zhang
+Beijing Orion Star Technology Co., Ltd. Beijing, China
+{chongwang.nlpr, xipeng.lan,"
+dd7ed20a65d811dcf863f796d6dcbe873f57e7c4,Object Detection Via Structural Feature Selection and Shape Model,"Object Detection via Structural Feature
+Selection and Shape Model
+Huigang Zhang, Xiao Bai, Jun Zhou, Senior Member, IEEE, Jian Cheng and
+Huijie Zhao"
+ddf55fc9cf57dabf4eccbf9daab52108df5b69aa,Methodology and Performance Analysis of 3-D Facial Expression Recognition Using Statistical Shape Representation,"International Journal of Grid and Distributed Computing
+Vol. 4, No. 3, September, 2011
+Methodology and Performance Analysis of 3-D Facial Expression
+Recognition Using Statistical Shape Representation
+Wei Quan, Bogdan J. Matuszewski, Lik-Kwan Shark
+ADSIP Research Centre, University of Central Lancashire
+{WQuan, BMatuszewski1,
+Charlie Frowd
+School of Psychology, University of Central Lancashire"
+dd72ed9a30e4d04703487df29a8762940bd79967,Image Retrieval based on LBP Transitions,"International Journal of Computer Applications (0975 – 8887)
+Volume 101– No.16, September 2014
+Image Retrieval based on LBP Transitions
+A. Srinivasa Rao
+Assoc.Prof in CSE Dept.
+MSSISTCE
+Mylavaram, Vijayawada
+V.Venkata Krishna
+Professor in CSE Dept.
+GIET, Rajahmundry
+Andhra Pradesh, India
+A.Obulesu
+Asst.Prof in CSE Dept.
+AGI (Autonomous), Hyderabad
+Telanganastate, India"
+ddea3c352f5041fb34433b635399711a90fde0e8,Facial Expression Classification using Visual Cues and Language,"Facial Expression Classification using Visual Cues and Language
+Abhishek Kar
+Advisor: Dr. Amitabha Mukerjee
+Department of Computer Science and Engineering, IIT Kanpur"
+dde24967490f58c8d10b2a00f12bf9103bd9b4a6,Evaluation of Shape Features for Efficient Classification Based on Rotational Invariant Using Texton Model,"Dr. P Chandra Sekhar Reddy, International Journal of Computer Science and Mobile Computing, Vol.5 Issue.8, August- 2016, pg. 282-295
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IMPACT FACTOR: 5.258
+IJCSMC, Vol. 5, Issue. 8, August 2016, pg.282 – 295
+EVALUATION OF SHAPE FEATURES FOR
+EFFICIENT CLASSIFICATION BASED ON
+ROTATIONAL INVARIANT USING TEXTON MODEL
+Dr. P Chandra Sekhar Reddy
+Professor, CSE Dept.
+Gokaraju Rangaraju Institute of Engineering and Technology, Hyderabad"
+ddbd24a73ba3d74028596f393bb07a6b87a469c0,Multi-region Two-Stream R-CNN for Action Detection,"Multi-region two-stream R-CNN
+for action detection
+Xiaojiang Peng, Cordelia Schmid
+Inria(cid:63)"
+ddf099f0e0631da4a6396a17829160301796151c,Learning Face Image Quality from Human Assessments,"IEEE TRANSACTIONS ON INFORMATION FORENSICS AND SECURITY
+Learning Face Image Quality from
+Human Assessments
+Lacey Best-Rowden, Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+dd0a334b767e0065c730873a95312a89ef7d1c03,Eigenexpressions: Emotion Recognition Using Multiple Eigenspaces,"Eigenexpressions: Emotion Recognition using Multiple
+Eigenspaces
+Luis Marco-Gim´enez1, Miguel Arevalillo-Herr´aez1, and Cristina Cuhna-P´erez2
+University of Valencia. Computing Department,
+Burjassot. Valencia 46100, Spain,
+Universidad Cat´olica San Vicente M´artir de Valencia (UCV),
+Burjassot. Valencia. Spain"
+dda7bb490171a1d3364928fb8143bbe021146c5f,Local Shape Spectrum Analysis for 3D Facial Expression Recognition,"Local Shape Spectrum Analysis for 3D Facial Expression Recognition
+Department of Information and Communication Technologies, Pompeu Fabra University, Barcelona, Spain
+Dmytro Derkach and Federico M. Sukno"
+dd8d53e67668067fd290eb500d7dfab5b6f730dd,A Parameter-Free Framework for General Supervised Subspace Learning,"A Parameter-Free Framework for General
+Supervised Subspace Learning
+Shuicheng Yan, Member, IEEE, Jianzhuang Liu, Senior Member, IEEE, Xiaoou Tang, Senior Member, IEEE,
+nd Thomas S. Huang, Life Fellow, IEEE"
+ddcb77d09e4e9e2a948f9ffe7eaa5554dceb8ce3,Revisiting Cross Modal Retrieval,
+ddbfea5302fcb5cbc2ca4c498a592ddb063b9eff,L Ow Supervision Visual Learning through Cooperative Agents,"Low-supervision visual learning through cooperative agents
+Ashish Bora
+Abhishek Sinha"
+ddbb6e0913ac127004be73e2d4097513a8f02d37,Face Detection Using Quantized Skin Color Regions Merging and Wavelet Packet Analysis,"IEEE TRANSACTIONS ON MULTIMEDIA, VOL. 1, NO. 3, SEPTEMBER 1999
+Face Detection Using Quantized Skin Color
+Regions Merging and Wavelet Packet Analysis
+Christophe Garcia and Georgios Tziritas, Member, IEEE"
+ddfde5d6f4e720aeb770a20e4197db3a0c279958,Learning Convolutional Text Representations for Visual Question Answering,"Learning Convolutional Text Representations for Visual Question Answering
+Zhengyang Wang∗
+Shuiwang Ji†"
+dd54255065cf93895661c40073cdd031af7dd7e8,"GeoNet: Unsupervised Learning of Dense Depth, Optical Flow and Camera Pose","GeoNet: Unsupervised Learning of Dense Depth, Optical Flow and Camera Pose
+Zhichao Yin and Jianping Shi
+SenseTime Research
+{yinzhichao,"
+dc53c4bb04e787a0d45dd761ba2101cc51c17b82,Multiple-Person Tracking by Detection,"http://excel.fit.vutbr.cz
+Multiple-Person Tracking by Detection
+Jakub Vojvoda*"
+dc3cd4e110b526cb59bd7527d540120c5fae77ce,Adversarially Tuned Scene Generation,"Adversarially Tuned Scene Generation
+VSR Veeravasarapu1, Constantin Rothkopf2, Ramesh Visvanathan1
+Center for Cognition and Computation, Dept. of Computer Science, Goethe University, Frankfurt
+Center for Cognitive Science & Dept. of Psychology, Technical University Darmstadt."
+dcf17cc3b4f8519a6789c1ea086689bcbc1d6f11,Unsupervised Learning of Deep Feature Representation for Clustering Egocentric Actions,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+dceaef5e7cbfc4d0150c2d765cc3df4349b8b2bd,Sentiment Analysis Using Social Multimedia,"Chapter 2
+Sentiment Analysis Using Social
+Multimedia
+Jianbo Yuan, Quanzeng You and Jiebo Luo"
+dcace6f0611b77177f4aff4bb650afab0a819575,3D Face Recognition,BMVC 2006 doi:10.5244/C.20.89
+dcd88a249b480d2e25326cdd11c5879fa31865cc,A Cross-Modal Distillation Network for Person Re-identification in RGB-Depth,"A Cross-Modal Distillation Network for Person
+Re-identification in RGB-Depth
+Frank Hafner
+, Amran Bhuiyan,
+, Julian F. P. Kooij
+, Eric Granger
+, Member, IEEE"
+dc550f361ae82ec6e1a0cf67edf6a0138163382e,Emotion Based Music Player,"ISSN XXXX XXXX © 2018 IJESC
+Research Article Volume 8 Issue No.3
+Vijay Chakole1, Aniket Choudhary2, Kalyani Trivedi3, Kshitija Bhoyar4, Ruchita Bodele5, Sayali Karmore6
+Emotion Based Music Player
+Professor1, UG Student2, 3, 4, 5, 6
+Department of Electronics Engineering
+K.D.K. College of Engineering Nagpur, India"
+dc6263270cd23a51d8fffdfd7e408250442b40f3,"SimpleElastix: A User-Friendly, Multi-lingual Library for Medical Image Registration","SimpleElastix: A user-friendly, multi-lingual library for medical image
+registration
+Kasper Marstal1, Floris Berendsen2, Marius Staring2 and Stefan Klein1
+Biomedical Imaging Group Rotterdam (BIGR), Department of Radiology & Medical Informatics,
+Erasmus Medical Center, PO Box 2040, Rotterdam, 3000 CA, the Netherlands,
+Division of Image Processing (LKEB), Department of Radiology, Leiden University Medical Center,
+PO Box 9600, 2300 RC Leiden, the Netherlands,"
+dc6c47d15ffc0fd59e51ed03556c3566afe5710b,Robust Object Recognition Through Symbiotic Deep Learning In Mobile Robots *,"CONFIDENTIAL. Limited circulation. For review only.
+Preprint submitted to 2018 IEEE/RSJ International Conference
+on Intelligent Robots and Systems. Received March 1, 2018."
+dcb44fc19c1949b1eda9abe998935d567498467d,Ordinal Zero-Shot Learning,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+labelunseen labelFigure1:Supervisionintensityfordifferentlabels.Greenrepre-sentsseenlabelsandredrepresentsunseenlabels.Thegroundtruthlabelofthisinstanceis“Good”,soithasthestrongestsupervisionintensity.Although“Common”isanunseenlabel,itstillhascertainsupervisioninformationbecauseitiscloselyrelatedto“Good”.classifier;[ZhangandSaligrama,2016]learnsajointlatentspaceusingstructuredlearning.Thedifficultyinobtainingthesideinformationorusingothertechniquestoprocessthesideinformationarethemostseriousissuesformanyexistingzero-shotlearningmethods.Fortheattribute-basedmethods,humanexpertsareneededtolabelattributesandthisisverytime-consumingandnoteasytoobtainthediscriminativecategory-levelattributes.Somemethodsdiscoverattributesinteractively[ParikhandGrau-man,2011][Bransonetal.,2010],butthisalsorequiresla-borioushumanparticipation.Althoughmanyalgorithmscandiscoverattribute-relatedconceptsontheWeb[Rohrbachetal.,2010][Bergetal.,2010],theycanalsobebiasedorlackinformationthatiscriticaltoaparticulartask[ParikhandGrauman,2011].Forthetextcorpora-basedmethods,theyfirstrequirealargelanguagecorpora,suchasWikipedia,andthenneedtolearnwordrepresentation[Socheretal.,2013]orusestandardNaturalLanguageProcessing(NLP)techniquestoproduceclassdescriptions[Elhoseinyetal.,2013].Itishardtoguaranteethecorrectnessofsuchclassdescriptionsforzero-shotlearning.Conclusively,althoughsideinforma-tionishelpfulforzero-shotlearning,ithasmanydisadvan-tages.Generatingthesesideinformationisverytediousandsometimeswecannotknowwhichsideinformationistrulywanted.IfwedependonhumanlabororNLPtechniques,noisysideinformationwillbecomealmostinevitableandin-fluencethefinalperformance.Toavoidtheseproblems,itisimportanttosolvezero-shotlearninginwhateverpossiblecasesthathavesomepropertieswecanutilizetoavoidusingsideinformation."
+dcce157aa2e5db081b36fd16544a038becb408ab,Fast and Accurate Pedestrian Detection in a Truck's Blind Spot Camera,"Fast and Accurate Pedestrian Detection
+in a Truck’s Blind Spot Camera
+Kristof Van Beeck1,2(B) and Toon Goedem´e1,2
+EAVISE, KU Leuven - Campus De Nayer, J. De Nayerlaan 5, 2860
+ESAT/PSI - VISICS, KU Leuven, Kasteelpark Arenberg 10, 3001 Leuven, Belgium
+Sint-katelijne-waver, Belgium"
+dc452f3e531c4057c930f0538d5652ad9034d1aa,Quality metrics for practical face recognition,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-0-9 ©2012 ICPR"
+dc7a4d5ba20ca07d29c360b26e1e72afae9a77be,The ApolloScape Open Dataset for Autonomous Driving and its Application,"The ApolloScape Open Dataset for Autonomous
+Driving and its Application
+Xinyu Huang*, Peng Wang*, Xinjing Cheng, Dingfu Zhou, Qichuan Geng, Ruigang Yang"
+dc6d518585c18504b2e69223c062cdd691c79bbd,Domain Adaptation Through Synthesis for Unsupervised Person Re-identification,
+dc771cd7780538953811a5b6ae0e901ca68cce3d,Multiple People Tracking Using Hierarchical Deep Tracklet Re-identification,"Multiple People Tracking Using Hierarchical Deep Tracklet Re-identification
+Maryam Babaee∗
+Ali Athar∗
+Gerhard Rigoll
+Institute for Human-Machine Communication, Technical University of Munich
+Arcisstrasse 21, Munich, Germany"
+dcba9cd587be2ed5437370e12e3591bdde86dc3c,Template for Regular Entry,"TEMPLATE FOR REGULAR ENTRY
+(ENCYCLOPEDIA OF DATABASE SYSTEMS)
+TITLE OF ENTRY
+Automatic Image Annotation
+BYLINE
+Nicolas Hervé and Nozha Boujemaa, INRIA Paris-Rocquencourt, IMEDIA project, France.
+http://www-rocq.inria.fr/imedia/
+SYNONYMS
+Multimedia Content Enrichment, Image Classification, Object Detection and Recognition,
+Auto-annotation
+DEFINITION
+The widespread search engines, in the professional as well as the personal context, used to work
+on the basis of textual information associated or extracted from indexed documents. Nowadays,
+most of the exchanged or stored documents have multimedia content. To reduce the technological
+gap so that these engines still can work on multimedia content, it is very convenient developing
+methods capable to generate automatically textual annotations and metadata. These methods will
+then allow to enrich the upcoming new content or to post-annotate the existing content with
+dditional information extracted automatically if ever this existing content is partly or not annotated.
+A broad diversity in the typology of manual annotation is usually found in image databases. Part of
+them is representing contextual information. The author, date, place or technical shooting"
+dc2e805d0038f9d1b3d1bc79192f1d90f6091ecb,Face Recognition and Facial Attribute Analysis from Unconstrained Visual Data,
+dc23beb1e5c7402b1a9d5a7c854e62a253d0815e,Microscopic crowd simulation : evaluation and development of algorithms. (Simulation microscopique de foules : évaluation et développement d'algorithmes),"Microscopic crowd simulation : evaluation and
+development of algorithms
+David Wolinski
+To cite this version:
+David Wolinski. Microscopic crowd simulation : evaluation and development of algorithms. Data
+Structures and Algorithms [cs.DS]. Université Rennes 1, 2016. English. <NNT : 2016REN1S036>.
+<tel-01420105>
+HAL Id: tel-01420105
+https://tel.archives-ouvertes.fr/tel-01420105
+Submitted on 20 Dec 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+dcc064b8bf7744801ae7dfe4cbfd11b7e5a5b673,Men's physical strength moderates conceptualizations of prospective foes in two disparate societies.,"Hum Nat
+DOI 10.1007/s12110-014-9205-4
+Men’s Physical Strength Moderates Conceptualizations
+of Prospective Foes in Two Disparate Societies
+Daniel M. T. Fessler & Colin Holbrook &
+Matthew M. Gervais
+# Springer Science+Business Media New York 2014"
+dc041f307d467918ba684d3c425fb23016f3b28e,A Survey of 3D Face Recognition Methods,"A Survey of 3D Face Recognition Methods
+Alize Scheenstra1, Arnout Ruifrok2, and Remco C. Veltkamp1
+Utrecht University, Institute of Information and Computing Sciences,
+Padualaan 14, 3584 CH Utrecht, The Netherlands
+Netherlands Forensic Institute,
+Laan van Ypenburg 6, 2497 GB Den Haag, The Netherlands,"
+dc090aea412cef17c7a68ec84c34797806feab24,A mixture of gated experts optimized using simulated annealing for 3D face recognition,"978-1-4577-1302-6/11/$26.00 ©2011 IEEE
+D FACE RECOGNITION
+. INTRODUCTION"
+dc9f29118e38602c03bb2866f8b12ce478aad52c,Large scale evolution of convolutional neural networks using volunteer computing,"Large Scale Evolution of Convolutional Neural
+Networks Using Volunteer Computing
+Travis Desell∗
+March 17, 2017"
+dc22de0ed56958013234cf7128952390fb47345a,Towards dense object tracking in a 2D honeybee hive,"Towards dense object tracking in a 2D honeybee hive
+Katarzyna Bozek a, Laetitia Hebert a, Alexander S Mikheyev a & Greg J Stephens a,b∗
+Okinawa Institute of Science and Technology, 1919-1 Tancha Onna-son, Kunigami-gun, Okinawa 904-0495, Japan
+Department of Physics and Astronomy, VU University Amsterdam, 1081 HV Amsterdam, The Netherlands
+From human crowds to cells in tissue, the detection and ef‌f‌icient tracking of multiple objects
+in dense configurations is an important and unsolved problem. In the past, limitations of image
+nalysis have restricted studies of dense groups to tracking a single or subset of marked individ-
+uals, or to coarse-grained group-level dynamics, all of which yield incomplete information. Here,
+we combine convolutional neural networks (CNNs) with the model environment of a honeybee hive
+to automatically recognize all individuals in a dense group from raw image data. We create new,
+dapted individual labeling and use the segmentation architecture U-Net with a loss function depen-
+dent on both object identity and orientation. We additionally exploit temporal regularities of the
+video recording in a recurrent manner and achieve near human-level performance while reducing
+the network size by 94% compared to the original U-Net architecture. Given our novel applica-
+tion of CNNs, we generate extensive problem-specific image data in which labeled examples are
+produced through a custom interface with Amazon Mechanical Turk. This dataset contains over
+75,000 labeled bee instances across 720 video frames at 2 FPS, representing an extensive resource
+location error of ∼ 7% of a typical body dimension, and orientation error of 12◦, approximating the
+variability of human raters. Our results provide an important step towards ef‌f‌icient image-based
+dense object tracking by allowing for the accurate determination of object location and orientation"
+dca246cd06666a331b0203cb09a6ef51727bfdcc,The micro-foundations of email communication networks,"The Micro-Foundations of Email
+Communication Networks
+Ofer Engel
+London School of Economics and Political Science
+Department of Management
+Information Systems and Innovation Group
+Thesis submitted for the degree of
+PhilosophiæDoctor (PhD)
+013 June"
+dc974c31201b6da32f48ef81ae5a9042512705fe,Am I Done? Predicting Action Progress in Videos,"Am I done? Predicting Action Progress in Video
+Federico Becattini1, Tiberio Uricchio1, Lorenzo Seidenari1,
+Alberto Del Bimbo1, and Lamberto Ballan2
+Media Integration and Communication Center, Univ. of Florence, Italy
+Department of Mathematics “Tullio Levi-Civita”, Univ. of Padova, Italy"
+b66418ecc37ea0c79da5425e9ceac939ca9075ae,Efficient Gait-based Gender Classification through Feature Selection,"EFFICIENT GAIT-BASED GENDER CLASSIFICATION
+THROUGH FEATURE SELECTION∗
+Ra´ul Mart´ın-F´elez, Javier Ortells, Ram´on A. Mollineda and J. Salvador S´anchez
+Institute of New Imaging Technologies and Dept. Llenguatges i Sistemes Inform`atics
+Universitat Jaume I. Av. Sos Baynat s/n, 12071, Castell´o de la Plana, Spain
+{martinr, jortells, mollined,
+Keywords:
+Gender classification, Gait, ANOVA, Feature selection."
+b6ecc8d34ebc8895378abe2b8f35e3a0691f5d26,Annotation Methodologies for Vision and Language Dataset Creation,"Annotation Methodologies for Vision and Language Dataset Creation
+Gitit Kehat
+Computer Science Department
+Brandeis University
+Waltham, MA. 02453 USA
+James Pustejovsky
+Computer Science Department
+Brandeis University
+Waltham, MA. 02453 USA"
+b691463de5e30e7efd18b9d02cbf83c805834fe7,Evaluation of Penalty Functions for Semi-global Matching Cost Aggregation,"EVALUATION OF PENALTY FUNCTIONS FOR SEMI-GLOBAL MATCHING
+COST AGGREGATION
+Christian Banz, Peter Pirsch, and Holger Blume
+Institute of Microelectronic Systems
+Leibniz Universität Hannover, Hannover, Germany
+KEY WORDS: Stereoscopic, Quality, Matching, Vision, Reconstruction, Camera, Disparity Estimation, Semi-Global Matching"
+b6b1b0632eb9d4ab1427278f5e5c46f97753c73d,Generalização cartográfica automatizada para um banco de dados cadastral,"UNIVERSIDADE FEDERAL DE SANTA CATARINA -UFSC
+DEPARTAMENTO DE ENGENHARIA CIVIL
+PROGRAMA DE PÓS-GRADUAÇÃO EM
+ENGENHARIA CIVIL - PPGEC
+AREA DE CONCENTRAÇÃO: CADASTRO TÉCNICO E
+GESTÃO TERRITORIAL
+GENERALIZAÇÃO CARTOGRÁFICA AUTOMATIZADA
+PARA UM BANCO DE DADOS CADASTRAL
+Tese submetida à Universidade Federal de
+Santa Catarina como requisito exigido pelo
+Programa de Pós-Graduação em Engenharia
+Civil - PPGEC, para a obtenção do Título de
+DOUTOR em Engenharia Civil.
+Mariane Alves Dal Santo
+Orientador: Prof. Dr. Carlos Loch
+Florianópolis, dezembro de 2007"
+b63411ed70ba315b87a716e1809faea48e70a982,"A Survey on Object Detect , Track and Identify Using Video Surveillance","IOSR Journal of Engineering (IOSRJEN)
+e-ISSN: 2250-3021, p-ISSN: 2278-8719, www.iosrjen.org
+Volume 2, Issue 10 (October 2012), PP 71-76
+A Survey on Object Detect, Track and Identify Using Video
+Surveillance
+Chandrashekhar D.Badgujar1, Dipali P.Sapkal2
+1,2(Computer Science and Engineering G.H.R.E.M, Jalgoan)"
+b651814360e3899cd9206bfd23621aca6551e69c,Improving Feature Level Likelihoods using Cloud Features,"IMPROVING FEATURE LEVEL LIKELIHOODS USING CLOUD
+FEATURES
+Heydar Maboudi Afkham1, Stefan Carlsson1, Josephine Sullivan1
+Computer Vision and Active Perception Lab., KTH, Stockholm, Sweden
+Keywords:
+Feature inference, Latent models, Clustering"
+b69badabc3fddc9710faa44c530473397303b0b9,Unsupervised Image-to-Image Translation Networks,"Unsupervised Image-to-Image Translation Networks
+Ming-Yu Liu, Thomas Breuel,
+Jan Kautz
+NVIDIA"
+b6fd905efd5da32bd32047896074a821477cb564,An Human Perceptive Model for Person Re-identification,"An Human Perceptive Model for Person Re-identification
+Angelo Cardellicchio1, Tiziana D’Orazio1, Tiziano Politi2 and Vito Ren`o1
+National Research Council, Institute of Intelligent Systems for Automation, Bari, Italia
+Politecnico di Bari, Bari, Italia
+Keywords:
+Color Analysis, Feature Extraction, Histograms."
+b62486261104d5136aea782ee8596425b5f228da,Modelling perceptions of criminality and remorse from faces using a data-driven computational approach.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+Modelling perceptions of criminality and remorse
+from faces using a data-driven computational
+pproach
+Friederike Funk, Mirella Walker & Alexander Todorov
+To cite this article: Friederike Funk, Mirella Walker & Alexander Todorov (2017) Modelling
+perceptions of criminality and remorse from faces using a data-driven computational approach,
+Cognition and Emotion, 31:7, 1431-1443, DOI: 10.1080/02699931.2016.1227305
+To link to this article: http://dx.doi.org/10.1080/02699931.2016.1227305
+View supplementary material
+Published online: 07 Sep 2016.
+Submit your article to this journal
+Article views: 235
+View related articles
+View Crossmark data
+Citing articles: 1 View citing articles
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+Download by: [Princeton University]"
+b63041d05b78a66724fbcb2803508999bf885d6b,Deep Sets,"Deep Sets
+Manzil Zaheer 1 2 Satwik Kottur 2 Siamak Ravanbhakhsh 2 Barnabas Poczos 2 Ruslan Ssalakhutdinov 2
+Alexander Smola 1 2"
+b61b4eb2e28b9cf35578498e1bbcc35ec0a07651,Backtracking ScSPM Image Classifier for Weakly Supervised Top-Down Saliency,"Backtracking ScSPM Image Classifier for Weakly Supervised Top-down Saliency
+Hisham Cholakkal
+Jubin Johnson
+Deepu Rajan
+Multimedia Lab, School of Computer Science and Engineering
+Nanyang Technological University Singapore
+{hisham002, jubin001,"
+b6aa94b81b2165e492cc2900e05dd997619bfe7a,Automatic temporal segment detection via bilateral long short-term memory recurrent neural networks,"Automatic temporal segment
+detection via bilateral long short-
+term memory recurrent neural
+networks
+Bo Sun
+Siming Cao
+Jun He
+Lejun Yu
+Liandong Li
+Bo Sun, Siming Cao, Jun He, Lejun Yu, Liandong Li, “Automatic temporal segment
+detection via bilateral long short-term memory recurrent neural networks,” J.
+Electron. Imaging 26(2), 020501 (2017), doi: 10.1117/1.JEI.26.2.020501.
+Downloaded From: http://electronicimaging.spiedigitallibrary.org/ on 03/03/2017 Terms of Use: http://spiedigitallibrary.org/ss/termsofuse.aspx"
+b632d47eb7421a3d622b0f1ceb009e4415ccc84d,Deep Perceptual Mapping for Cross-Modal Face Recognition,"(will be inserted by the editor)
+Deep Perceptual Mapping for Cross-Modal Face
+Recognition
+M. Saquib Sarfraz · Rainer Stiefelhagen
+the date of receipt and acceptance should be inserted later"
+b6ef46621d8660eb53836202fa58f04fa20adfd7,Disgust and Anger Relate to Different Aggressive Responses to Moral Violations,"692000 PSSXXX10.1177/0956797617692000Molho et al.Moral Emotions and Aggressive Tactics
+research-article2017
+Research Article
+Disgust and Anger Relate to Different
+Aggressive Responses to Moral Violations
+Catherine Molho1, Joshua M. Tybur1, Ezgi Güler2,
+Daniel Balliet1, and Wilhelm Hofmann3
+Department of Experimental and Applied Psychology, Vrije Universiteit Amsterdam;
+Department of Political and Social Sciences, European University Institute; and 3Social Cognition
+Center Cologne, University of Cologne
+Psychological Science
+017, Vol. 28(5) 609 –619
+© The Author(s) 2017
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797617692000
+https://doi.org/10.1177/0956797617692000
+www.psychologicalscience.org/PS"
+b69f7660985be23abda72990cb1f367778960275,Object Recognition based on Principal Component Analysis to Image Patches,"International Journal of Scientific & Engineering Research, Volume 4, Issue 6, June-2013 1096
+ISSN 2229-5518
+Object Recognition based on Principal
+Component Analysis to Image Patches
+R.Ahilapriyadharsini
+Mepco Schlenk Engineering
+College,
+Sivakasi, India
+S.Arivazhagan
+M.Gowthami
+Mepco Schlenk Engineering
+Renganayagi Varatharaj College of
+College,
+Sivakasi, India
+Engineering, Salvarpatti,
+Sivakasi, India."
+b613b30a7cbe76700855479a8d25164fa7b6b9f1,Identifying User-Specific Facial Affects from Spontaneous Expressions with Minimal Annotation,"Identifying User-Specific Facial Affects from
+Spontaneous Expressions with Minimal Annotation
+Michael Xuelin Huang, Grace Ngai, Kien A. Hua, Fellow, IEEE, Stephen C.F. Chan, Member, IEEE
+nd Hong Va Leong, Member, IEEE Computer Society"
+b640c36acc0e748553f78280fce7a840965c5cec,Text Detection from Natural Image using MSER and BOW,"International Journal of Emerging Engineering Research and Technology
+Volume 3, Issue 11, November 2015, PP 152-156
+ISSN 2349-4395 (Print) & ISSN 2349-4409 (Online)
+Text Detection from Natural Image using MSER and BOW
+K.Sowndarya Lahari, 2M.Haritha, 3P.Prasanna Murali Krishna
+(M.Tech), DECS, DR.Sgit, Markapur, India.
+Associate Professor, Department of ECE, DR.Sgit, Markapur, India.
+.3H.O.D Department of ECE, DR.Sgit, Markapur, India."
+b66a93884f80a243f50da97e33211693a317dc45,Deep Learning for Generic Object Detection: A Survey,"Deep Learning for Generic Object Detection: A Survey
+Li Liu 1,2 · Wanli Ouyang 3 · Xiaogang Wang 4 ·
+Paul Fieguth 5 · Jie Chen 2 · Xinwang Liu 1 · Matti Pietik¨ainen 2
+Received: 12 September 2018"
+b6f682648418422e992e3ef78a6965773550d36b,"CBMM Memo No . 061 February 8 , 2017 Full interpretation of minimal images","February 8, 2017"
+b610e52b0a8fa11af3d01944c0383f015cade9c0,Multimodal 2 D - 3 D Face Recognition,"International Journal of Future Computer and Communication, Vol. 2, No. 6, December 2013
+Multimodal 2D-3D Face Recognition
+Gawed M. Nagi, Rahmita Rahmat, Muhamad Taufik, and Fatimah Khalid
+technology"
+b67e2ccd0f05df5358464b9b38da3bcb9feda1ab,FaceID@home: cycle-sharing for facial recognition,"ycle-sharing for facial recognition
+FaceID-BOINC: adapta¸c˜ao de algoritmos de reconhecimento facial (eigenfaces) para execu¸c˜ao
+em m´aquinas multicore e GPUs integrado num cliente para plataforma BOINC
+Nuno Miguel Abreu Teixeira - 55397
+Instituto Superior T´ecnico"
+b64cc1f0772e9620ecf916019de85b7adb357b7a,Fast Face-Swap Using Convolutional Neural Networks,"Fast Face-swap Using Convolutional Neural Networks
+Iryna Korshunova1,2
+Wenzhe Shi1
+{iryna.korshunova,
+Twitter
+Joni Dambre2
+Lucas Theis1
+IDLab, Ghent University
+{wshi,"
+b6aaaf6290ba0ca13be61d122907617f1ea86315,Embedded Face Recognition Using Cascaded Structures PROEFSCHRIFT,"Embedded Face Recognition
+Using Cascaded Structures
+PROEFSCHRIFT
+ter verkrijging van de graad van doctor aan de
+Technische Universiteit Eindhoven, op gezag van de
+Rector Magnificus, prof.dr.ir. C.J. van Duijn, voor een
+ommissie aangewezen door het College voor
+Promoties in het openbaar te verdedigen op
+dinsdag 3 oktober 2006 om 16.00 uur
+Fei Zuo
+geboren te Xi’an, China"
+b6dc1cd3cabdfea7363d41773a315a0d241dc836,Local Context Priors for Object Proposal Generation,"Local Context Priors for Object Proposal
+Generation
+Marko Ristin1, Juergen Gall2, and Luc Van Gool1,3
+ETH Zurich
+MPI for Intelligent Systems
+KU Leuven"
+b648d73edd1a533decd22eec2e7722b96746ceae,weedNet: Dense Semantic Weed Classification Using Multispectral Images and MAV for Smart Farming,"weedNet: Dense Semantic Weed Classification Using Multispectral
+Images and MAV for Smart Farming
+Inkyu Sa1, Zetao Chen2, Marija Popovi´c1, Raghav Khanna1, Frank Liebisch3, Juan Nieto1, Roland Siegwart1"
+b67e0ae9d64ec06b3e1c25c7f7e8b86020612d33,Vocabulary-informed Visual Feature Augmen-,"Under review as a conference paper at ICLR 2018
+VOCABULARY-INFORMED VISUAL FEATURE AUGMEN-
+TATION FOR ONE-SHOT LEARNING"
+a93ecf7b9780989c709714dde0f93f4d81eea640,Unconstrained Face Recognition Using SVM Across Blurred And Illuminated Images With Pose Variation,"International Journal of Innovative Research in Computer and Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol.2, Special Issue 1, March 2014
+Proceedings of International Conference On Global Innovations In Computing Technology (ICGICT’14)
+Department of CSE, JayShriram Group of Institutions, Tirupur, Tamilnadu, India on 6th & 7th March 2014
+Organized by
+Unconstrained Face Recognition Using SVM
+Across Blurred And Illuminated Images With Pose
+Variation
+Nadeena M1, S.Sangeetha, M.E, 2
+ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+II M.E CSE, Dhanalakshmi Srinivasan College of Engineering, Coimbatore, India1
+Assistant Professor, Dhanalakshmi Srinivasan College of Engineering, Coimbatore, India 2"
+a9d3547ab16a9cc936bf5991bf8fb475eadce931,Face Recognition using DWT with HMM,"Eng. & Tech. Journal, Vol.30, No.1, 2012
+Face Recognition using DWT with HMM
+Dr. Eyad I. Abbas
+Department of Electrical Engineering, University of Technology/ Baghdad
+Hameed R. Farhan
+Department of Electrical Engineering, Engineering College, University of Kerbala/ Kerbala
+Received on: 19/6/2011 & Accepted on: 3/11/2011"
+a9e28863c7fb963b40a379c5a4e0da00eb031933,A Corpus of Natural Language for Visual Reasoning,"Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Short Papers), pages 217–223
+Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Short Papers), pages 217–223
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+https://doi.org/10.18653/v1/P17-2034
+https://doi.org/10.18653/v1/P17-2034"
+a91caf771905ddff8cb271f04e7ede1a8b6d529b,Unsupervised Reverse Domain Adaptation for Synthetic Medical Images via Adversarial Training,"Unsupervised Reverse Domain Adaptation for Synthetic Medical Images via
+Adversarial Training
+Department of Biomedical Engineering
+Department of Computer Science
+Faisal Mahmood1 Richard Chen2 Nicholas J. Durr1
+Johns Hopkins University (JHU)
+{faisalm, rchen40,"
+a9791544baa14520379d47afd02e2e7353df87e5,The Need for Careful Data Collection for Pattern Recognition in Digital Pathology,"Technical Note
+The Need for Careful Data Collection for Pattern Recognition in
+Digital Pathology
+Raphaël Marée1
+Department of Electrical Engineering and Computer Science, Montefiore Institute, University of Liège, 4000 Liège, Belgium
+Received: 08 December 2016
+Accepted: 15 March 2017
+Published: 10 April 2017"
+a9d6d62f4f3f12ed565e5d75f8c4b7a202a3d809,Action and intention recognition of pedestrians in urban traffic,"Action and intention recognition of pedestrians in urban traffic
+Dimitrios Varytimidis1, Fernando Alonso-Fernandez1, Boris Duran2 and Cristofer Englund1,2∗"
+a97f3d2313affd35c889c57f2ebe21e7ba2b5bbb,Real-Time Semantic Mapping for Autonomous Off-Road Navigation,"Real-time Semantic Mapping for Autonomous
+Off-Road Navigation
+Daniel Maturana, Po-Wei Chou, Masashi Uenoyama and Sebastian Scherer"
+a9eb6e436cfcbded5a9f4b82f6b914c7f390adbd,A Model for Facial Emotion Inference Based on Planar Dynamic Emotional Surfaces,"(IJARAI) International Journal of Advanced Research in Artificial Intelligence,
+Vol. 5, No.6, 2016
+A Model for Facial Emotion Inference Based on
+Planar Dynamic Emotional Surfaces
+Ruivo, J. P. P.
+Escola Polit´ecnica
+Negreiros, T.
+Escola Polit´ecnica
+Barretto, M. R. P.
+Escola Polit´ecnica
+Tinen, B.
+Escola Polit´ecnica
+Universidade de S˜ao Paulo
+Universidade de S˜ao Paulo
+Universidade de S˜ao Paulo
+Universidade de S˜ao Paulo
+S˜ao Paulo, Brazil
+S˜ao Paulo, Brazil
+S˜ao Paulo, Brazil
+S˜ao Paulo, Brazil"
+a9ebeca46445b8af728118b05e56d95d4985000c,Restricted Isometry Property of Subspace Projection Matrix Under Random Compression,"Restricted Isometry Property of Subspace Projection
+Matrix Under Random Compression
+Xinyue Shen, Student Member, IEEE, and Yuantao Gu, Member, IEEE"
+a91fd02ed2231ead51078e3e1f055d8be7828d02,The Robust Manifold Defense: Adversarial Training using Generative Models,"The Robust Manifold Defense:
+Adversarial Training using Generative Models
+Andrew Ilyas
+Ajil Jalal
+Eirini Asteri
+MIT EECS
+UT Austin
+UT Austin
+Constantinos Daskalakis
+Alexandros G. Dimakis
+MIT EECS
+UT Austin
+December 27, 2017
+Problems worthy of attack,
+prove their worth by fighting back."
+a9ad8f6c6bf110485921b17f9790241b1548487c,Automatic Skin Tone Extraction for Visagism Applications,
+a955033ca6716bf9957b362b77092592461664b4,Video Based Face Recognition Using Artificial Neural Network,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer
+nd Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Video Based Face Recognition Using Artificial
+Vol. 3, Issue 6, June 2015
+Neural Network
+Santhy Mol T, Neethu Susan Jacob
+Pursuing M.Tech, Dept. of CSE, Caarmel Engineering College, MG University, Kerala, India
+Assistant Professor, Dept of CSE, Caarmel Engineering College, MG University, Kerala, India"
+a956ff50ca958a3619b476d16525c6c3d17ca264,A novel bidirectional neural network for face recognition,"A Novel Bidirectional Neural Network for Face Recognition
+JalilMazloum, Ali Jalali and Javad Amiryan
+Electrical and Computer Engineering Department
+ShahidBeheshti University
+Tehran, Iran"
+a90226c41b79f8b06007609f39f82757073641e2,Β-vae: Learning Basic Visual Concepts with a Constrained Variational Framework,"Under review as a conference paper at ICLR 2017
+β-VAE: LEARNING BASIC VISUAL CONCEPTS WITH A
+CONSTRAINED VARIATIONAL FRAMEWORK
+Irina Higgins, Loic Matthey, Arka Pal, Christopher Burgess, Xavier Glorot,
+Matthew Botvinick, Shakir Mohamed, Alexander Lerchner
+Google DeepMind
+{irinah,lmatthey,arkap,cpburgess,glorotx,"
+a98316980b126f90514f33214dde51813693fe0d,Collaborations on YouTube: From Unsupervised Detection to the Impact on Video and Channel Popularity,"Collaborations on YouTube: From Unsupervised Detection to the
+Impact on Video and Channel Popularity
+Christian Koch, Moritz Lode, Denny Stohr, Amr Rizk, Ralf Steinmetz
+Multimedia Communications Lab (KOM), Technische Universität Darmstadt, Germany
+E-Mail: {Christian.Koch | Denny.Stohr | Amr.Rizk |"
+a93781e6db8c03668f277676d901905ef44ae49f,Recent Data Sets on Object Manipulation: A Survey.,"Recent Datasets on Object Manipulation: A Survey
+Yongqiang Huang, Matteo Bianchi, Minas Liarokapis and Yu Sun"
+a969efee78149357ec109c1de2238a0cc670858a,Automatic 2.5-D Facial Landmarking and Emotion Annotation for Social Interaction Assistance,"Automatic 2.5-D Facial Landmarking and Emotion
+Annotation for Social Interaction Assistance
+Xi Zhao, Member, IEEE, Jianhua Zou, Member, IEEE, Huibin Li, Student Member, IEEE,
+Emmanuel Dellandréa, Member, IEEE, Ioannis A. Kakadiaris, Senior Member, IEEE,
+nd Liming Chen, Senior Member, IEEE"
+a99cf14afb556187233f772fa9bf561d7cf0c088,A Survey on Sclera Vein Recognition Techniques,"INTERNATIONAL JOURNAL OF ADVANCED RESEARCH IN COMPUTER SCIENCE AND APPLICATIONS
+ISSN 2321-872X ONLINE ISSN 2321-8932 PRINT
+VOLUME 2, ISSUE 12, DECEMBER 2014.
+A SURVEY ON SCLERA VEIN RECOGNITION TECHNIQUES
+Dr.S.BABU 1 S.SUBA 2
+Associate Professor / CSE, IFET College of Engineering, Viluppuram, Tamilnadu, India
+PG Scholar, IFET College of Engineering, Viluppuram, Tamilnadu, India"
+a9d2c96cead937e53e614abb9fd051574a55c77a,Ensembling Visual Explanations for,"In Proceedings of the NIPS 2017 workshop on Visually-Grounded Interaction and
+Language (ViGIL), December 2017."
+a94c3091be2090df6144bd121e41e7dfa96ec0e9,Enhanced visual functioning in autism: an ALE meta-analysis.,"Enhanced Visual Functioning in Autism:
+An ALE Meta-Analysis
+Fabienne Samson,1 Laurent Mottron,1 Isabelle Soulie` res,1,2
+nd Thomas A. Zeffiro2
+Centre d’Excellence en Troubles Envahissants du De´veloppement de l’Universite´ de Montre´al
+Neural Systems Group, Massachusetts General Hospital, Boston, Massachusetts
+(CETEDUM), Montre´al, QC, Canada"
+a9adb6dcccab2d45828e11a6f152530ba8066de6,Aydınlanma Alt-uzaylarına dayalı Gürbüz Yüz Tanıma Illumination Subspaces based Robust Face Recognition,"Aydınlanma Alt-uzaylarına dayalı Gürbüz Yüz Tanıma
+Illumination Subspaces based Robust Face Recognition
+D. Kern, H.K. Ekenel, R. Stiefelhagen
+Interactive Systems Labs, Universität Karlsruhe (TH)
+76131 Karlsruhe, Almanya
+web: http://isl.ira.uka.de/face_recognition
+Özetçe
+yönlerine
+ydınlanma
+kaynaklanan
+sonra, yüz uzayı
+Bu çalışmada aydınlanma alt-uzaylarına dayalı bir yüz tanıma
+sistemi sunulmuştur. Bu sistemde,
+ilk olarak, baskın
+ydınlanma yönleri, bir topaklandırma algoritması kullanılarak
+öğrenilmiştir. Topaklandırma algoritması sonucu önden, sağ
+ve sol yanlardan olmak üzere üç baskın aydınlanma yönü
+gözlemlenmiştir. Baskın
+karar
+-yüzün görünümündeki"
+a95dc0c4a9d882a903ce8c70e80399f38d2dcc89,Review and Implementation of High-Dimensional Local Binary Patterns and Its Application to Face Recognition,"TR-IIS-14-003
+Review and Implementation of
+High-Dimensional Local Binary
+Patterns and Its Application to
+Face Recognition
+Bor-Chun Chen, Chu-Song Chen, Winston Hsu
+July. 24, 2014 || Technical Report No. TR-IIS-14-003
+http://www.iis.sinica.edu.tw/page/library/TechReport/tr2014/tr14.html"
+a9286519e12675302b1d7d2fe0ca3cc4dc7d17f6,Learning to Succeed while Teaching to Fail: Privacy in Closed Machine Learning Systems,"Learning to Succeed while Teaching to Fail:
+Privacy in Closed Machine Learning Systems
+Jure Sokoli´c, Qiang Qiu, Miguel R. D. Rodrigues, and Guillermo Sapiro"
+a949b8700ca6ba96ee40f75dfee1410c5bbdb3db,Instance-Weighted Transfer Learning of Active Appearance Models,"Instance-weighted Transfer Learning of Active Appearance Models
+Daniel Haase, Erik Rodner, and Joachim Denzler
+Computer Vision Group, Friedrich Schiller University of Jena, Germany
+Ernst-Abbe-Platz 2-4, 07743 Jena, Germany"
+a94aac3caccebd82413dd05707ef8bf525dc46b9,Evaluation of the UR3D algorithm using the FRGC v2 data set,"Evaluation of the UR3D algorithm using the FRGC v2 data set
+G. Passalis, I.A. Kakadiaris, T. Theoharis, G. Toderici and N. Murtuza
+Visual Computing Lab, Dept. of Computer Science, Univ. of Houston, Houston, TX 77204, USA"
+a92b5234b8b73e06709dd48ec5f0ec357c1aabed,Disjoint Multi-task Learning Between Heterogeneous Human-Centric Tasks,
+a9453721f35f364e176a5aaa7bdb622f72fbcaec,Learning Articulated Motion Models from Visual and Lingual Signals,"Learning Articulated Motion Models from Visual and Lingual Signals
+Zhengyang Wu
+Georgia Tech
+Atlanta, GA 30332
+Mohit Bansal
+TTI-Chicago
+Chicago, IL 60637
+Matthew R. Walter
+TTI-Chicago
+Chicago, IL 60637"
+a94b832facb57ea37b18927b13d2dd4c5fa3a9ea,Domain transfer convolutional attribute embedding,"April 3, 2018
+Journal of Experimental & Theoretical Artificial Intelligence
+To appear in the Journal of Experimental & Theoretical Artificial Intelligence
+Vol. 00, No. 00, Month 20XX, 1–23
+Domain transfer convolutional attribute embedding
+Fang Sua ∗ , Jing-Yan Wangb
+School of Economics and Management, Shaanxi University of Science & Technology, Xi’an,
+New York University Abu Dhabi, Abu Dhabi, United Arab Emirates
+ShaanXi Province, P.R.C, 710021
+(v5.0 released July 2015)
+In this paper, we study the problem of transfer learning with the attribute data. In the trans-
+fer learning problem, we want to leverage the data of the auxiliary and the target domains
+to build an effective model for the classification problem in the target domain. Meanwhile,
+the attributes are naturally stable cross different domains. This strongly motives us to learn
+effective domain transfer attribute representations. To this end, we proposed to embed the
+ttributes of the data to a common space by using the powerful convolutional neural net-
+work (CNN) model. The convolutional representations of the data points are mapped to the
+orresponding attributes so that they can be effective embedding of the attributes. We also
+represent the data of different domains by a domain-independent CNN, ant a domain-specific
+CNN, and combine their outputs with the attribute embedding to build the classification"
+a9f5acdcf1fbc9563aaad943cbe1c195b796aa62,Learning Fashion By Simulated Human Supervision,"Learning Fashion By Simulated Human Supervision
+Eli Alshan Sharon Alpert Assaf Neuberger Nathaniel Bubis Eduard Oks
+{alshan, alperts, neuberg, bubis,
+Amazon Lab126"
+a91d0ebc1255d6de1c4588767b3b5e1fc630e56f,eTRIMS Scene Interpretation Datasets,"Universit¨at Hamburg
+Technical Report FBI-HH-M-345/10
+eTRIMS Scene Interpretation
+Datasets
+Johannes Hartz
+Patrick Koopmann
+Arne Kreutzmann
+Kasim Terzi´c
+{hartz | koopmann |
+informatik.uni-hamburg.de
+November 15, 2010"
+a9978df0b4df4d7b04bc4e9464c67f9ff7c31d3d,From Traditional to Interactive Playspaces,"FROM TRADITIONAL TO
+FROM TRADITIONAL TO
+INTERACTIVE PLAYSPACES
+INTERACTIVE PLAYSPACES
+Automatic Analysis of Player Behavior in the
+Interactive Tag Playground
+CTIT Ph.D. Thesis Series No. 16-386
+ISSN: 1381-3617
+Alejandro Moreno"
+a9e53a7533c9c743b57b6668c11be0c73525f188,Enhanced Feature Sets for Face Recognition with varying Lighting Conditions and Noise,"Enhanced Feature Sets for Face Recognition with varying Lighting Conditions and Noise ISSN 2278 – 3806
+Enhanced Feature Sets for Face Recognition with
+varying Lighting Conditions and Noise
+Final ME (CSE), 2Head of Department of Computer Science and Engineering
+S.Vishnupriya1 Dr.k.Lakshmi2
+Periyar Maniammai University, Thanjavur, Tamilnadu, India."
+a975f1aea5dbb748955da0e17eef8d2270a49f25,Object Recognition,"OBJECT RECOGNITION
+Object recognition is a subproblem of the more general
+problem of perception, and can be defined as follows. Given
+scene consisting of one or more objects, can we identify
+nd localize those objects that are sufficiently visible to
+the sensory system? It is generally assumed that a de-
+scription of each object to be recognized is available to the
+omputer and can be used to facilitate the task of iden-
+tification and localization. These descriptions can either
+e model-based or appearance-based, or a combination of
+oth. Model-based object representation is based on geo-
+metric features, whereas appearance-based representation
+uses a large set of images for training but does not require
+ny insight into the geometric structure of the objects. Ob-
+ject recognition is a key component of many intelligent vi-
+sion systems, such as those used in hand-eye coordination
+for bin picking, inspection, and mobile robotics.
+Various types of object recognition problems can be
+stated based on the dimensionality of their spatial descrip-
+tion: (1) recognition of a 2-D object from a single 2-D im-"
+a9c120de41679fe336e2779f3e6fe4b04945cb3a,A Robust Multilinear Model Learning Framework for 3D Faces,"A Robust Multilinear Model Learning Framework for 3D Faces∗
+Timo Bolkart
+Stefanie Wuhrer
+Saarland University, Germany
+Inria Grenoble Rhˆone-Alpes, France"
+a9f63dcae167630b0c6ba4131897539151217e2b,Testing a Method for Statistical Image Classification in Image Retrieval,"Testing a Method for Statistical Image
+Classification in Image Retrieval
+Christoph Rasche, Constantin Vertan
+Laboratorul de Analiza si Prelucrarea Imaginilor
+Universitatea Politehnica din Bucuresti
+Bucuresti 061071, RO"
+a9f03e4bb90addab234423994bfd8c25854484ea,Object Based Image Retrieval Using Lbp and Fuzzy Clustering Method,"Volume1, Issue 3, 15 May- 15 August 2015
+International Journal In Applied Studies And
+Production Management
+ISSN 2394-840X
+OBJECT BASED IMAGE RETRIEVAL USING LBP AND FUZZY
+CLUSTERING METHOD
+Jiwanjot kaur Bhinder
+Department of Computer Science & Engg, RIMT(IET) Mandigobindgarh
+Kirti joshi
+Department of Computer Science & Engg, RIMT(IET) Mandigobindgarh"
+d55cce6ecbad2c6ecccbaa1cb0d14ae3a46b1454,Multimodal representation learning with neural networks,"Multimodal representation learning with
+neural networks
+John Edilson Arevalo Ovalle
+National University of Colombia
+Engineering School, Systems and Industrial Engineering Departament
+Bogot´a, Colombia"
+d5813a4a0cca115b05e03d8d8c1ac8bf07176e96,Supplementary Material: Reinforced Video Captioning with Entailment Rewards,"Supplementary Material: Reinforced Video Captioning with Entailment
+Rewards
+Ramakanth Pasunuru and Mohit Bansal
+UNC Chapel Hill
+{ram,
+Attention-based Baseline Model
+(Cross-Entropy)
+Reinforcement Learning (Policy
+Gradient)
+Our attention baseline model is similar to the Bah-
+danau et al. (2015) architecture, where we encode
+input frame level video features to a bi-directional
+LSTM-RNN and then generate the caption using a
+single layer LSTM-RNN, with an attention mech-
+nism. Let {f1, f2, ..., fn} be the frame-level fea-
+tures of a video clip and {w1, w2, ..., wm} be the
+sequence of words forming a caption. The distri-
+ution of words at time step t given the previously
+generated words and input video frame-level fea-
+tures is given as follows:"
+d50c6d22449cc9170ab868b42f8c72f8d31f9b6c,Dynamic Multi-Task Learning with Convolutional Neural Network,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+d5bef023a7d1032a5c717109a9c1b600ee1e8a71,Autism Spectrum Disorder (ASD) and Fragile X Syndrome (FXS): Two Overlapping Disorders Reviewed through Electroencephalography—What Can be Interpreted from the Available Information?,"Brain Sci. 2015, 5, 92-117; doi:10.3390/brainsci5020092
+OPEN ACCESS
+rain sciences
+ISSN 2076-3425
+www.mdpi.com/journal/brainsci/
+Review
+Autism Spectrum Disorder (ASD) and Fragile X Syndrome
+(FXS): Two Overlapping Disorders Reviewed through
+Electroencephalography—What Can be Interpreted
+from the Available Information?
+Niamh Mc Devitt 1,2,*, Louise Gallagher 1,3,4,5,6 and Richard B. Reilly 1,2,3,7
+School of Medicine, Trinity College, the University of Dublin, Dublin, Ireland;
+E-Mails: (L.G.); (R.B.R.)
+Trinity Centre for Bioengineering, Trinity College Dublin, the University of Dublin, Dublin, Ireland
+Trinity College Institute for Neuroscience, Trinity College Dublin, the University of Dublin,
+Dublin, Ireland
+Department of Psychiatry, Trinity College Dublin, the University of Dublin, Dublin, Ireland
+5 Institute of Molecular Medicine, Trinity Centre for Health Sciences, St James’ Hospital,
+Dublin, Ireland
+6 Linn Dara Child and Adolescent Mental Health Services, Cherry Orchard Hospital Dublin 10,"
+d522c162bd03e935b1417f2e564d1357e98826d2,Weakly supervised object extraction with iterative contour prior for remote sensing images,"He et al. EURASIP Journal on Advances in Signal Processing 2013, 2013:19
+http://asp.eurasipjournals.com/content/2013/1/19
+RESEARCH
+Open Access
+Weakly supervised object extraction with
+iterative contour prior for remote sensing
+images
+Chu He1,2*, Yu Zhang1, Bo Shi1, Xin Su3, Xin Xu1 and Mingsheng Liao2"
+d5d3c1b299e81b4ab96d052f8a37013305b731d9,Performance Evaluation of Human Detection Systems for Robot Safety,"J Intell Robot Syst
+DOI 10.1007/s10846-016-0334-3
+Performance Evaluation of Human Detection Systems
+for Robot Safety
+William Shackleford · Geraldine Cheok ·
+Tsai Hong · Kamel Saidi · Michael Shneier
+Received: 9 April 2015 / Accepted: 11 January 2016
+© Springer Science+Business Media Dordrecht (outside the USA) 2016"
+d59f18fcb07648381aa5232842eabba1db52383e,Robust Facial Expression Recognition Using Spatially Localized Geometric Model,"International Conference on Systemics, Cybernetics and Informatics, February 12–15, 2004
+ROBUST FACIAL EXPRESSION RECOGNITION USING SPATIALLY
+LOCALIZED GEOMETRIC MODEL
+Department of Electrical Engineering
+Dept. of Computer Sc. and Engg.
+Ashutosh Saxena
+IIT Kanpur
+Kanpur 208016, India
+Kanpur 208016, India
+Ankit Anand
+IIT Kanpur
+Prof. Amitabha Mukerjee
+Dept. of Computer Sc. and Engg.
+IIT Kanpur
+Kanpur 208016, India
+While approaches based on 3D deformable facial model have
+chieved expression recognition rates of as high as 98% [2], they
+re computationally inefficient and require considerable apriori
+training based on 3D information, which is often unavailable.
+Recognition from 2D images remains a difficult yet important"
+d5579b2708a1c713e1b2feb8646533ce26085a3a,Effective Use of Dilated Convolutions for Segmenting Small Object Instances in Remote Sensing Imagery,"Effective Use of Dilated Convolutions for Segmenting Small Object Instances in
+Remote Sensing Imagery
+Ryuhei Hamaguchi Aito Fujita Keisuke Nemoto
+Tomoyuki Imaizumi Shuhei Hikosaka
+PASCO CORPORATION, Japan
+{riyhuc2734, aaitti6875, koetio8807, tiommu4352,"
+d588dd4f305cdea37add2e9bb3d769df98efe880,Audio - Visual Authentication System over the Internet Protocol,"Audio-Visual Authentication System over the
+Internet Protocol
+Yee Wan Wong, Kah Phooi Seng, and Li-Minn Ang
+bandoned.
+illumination based
+is developed with the objective to"
+d5de20cca347d6c5e6f662292e4d52e765ff5cee,Learning Tensors in Reproducing Kernel Hilbert Spaces with Multilinear Spectral Penalties,
+d59a9d80e7d8c875d2b73241a8b02078ea6ad0a7,A Deep Learning Perspective on the Origin of Facial Expressions,"BREUER, KIMMEL: A DEEP LEARNING PERSPECTIVE ON FACIAL EXPRESSIONS
+A Deep Learning Perspective on the Origin
+of Facial Expressions
+Ran Breuer
+Ron Kimmel
+Department of Computer Science
+Technion - Israel Institute of Technology
+Technion City, Haifa, Israel
+Figure 1: Demonstration of the filter visualization process."
+d55d6ccefe797317996805ebf58a74587b158950,Distribution-based Label Space Transformation for Multi-label Learning,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Distribution-based Label Space Transformation for
+Multi-label Learning
+Zongting Lyu, Yan Yan, and Fei Wu"
+d5444f9475253bbcfef85c351ea9dab56793b9ea,BoxCars: Improving Fine-Grained Recognition of Vehicles using 3-D Bounding Boxes in Traffic Surveillance,"IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS
+BoxCars: Improving Fine-Grained Recognition
+of Vehicles using 3D Bounding Boxes
+in Traffic Surveillance
+Jakub Sochor, Jakub ˇSpaˇnhel, Adam Herout
+in contrast"
+d53994f28deb2800120fab8a42852813b3b8c081,Does the Left Hair Part Look Better ( or Worse ) Than the Right ?,"Article
+Does the Left Hair Part Look Better
+(or Worse) Than the Right?
+Social Psychological and
+Personality Science
+ª The Author(s) 2018
+Reprints and permission:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/1948550618762500
+journals.sagepub.com/home/spp
+Jeremy A. Frimer1"
+d5ab6aa15dad26a6ace5ab83ce62b7467a18a88e,Optimized Structure for Facial Action Unit Relationship Using Bayesian Network,"World Journal of Computer Application and Technology 2(7): 133-138, 2014
+DOI: 10.13189/wjcat.2014.020701
+http://www.hrpub.org
+Optimized Structure for Facial Action Unit Relationship
+Using Bayesian Network
+Yee Koon Loh*, Shahrel A. Suandi
+Intelligent Biometric Group, School of Electrical and Electronic Engineering, Engineering Campus, Universiti Sains Malaysia, Pulau
+*Corresponding Author:
+Pinang, Malaysia
+Copyright © 2014 Horizon Research Publishing All rights reserved."
+d5fe9c84710b71a754676b2ee67cec63e8cd184b,FPGA Implementation of a HOG-based Pedestrian Recognition System,"Sebastian Bauer, Ulrich Brunsmann, Stefan Schlotterbeck-Macht
+Aschaffenburg University of Applied Sciences, Aschaffenburg, Germany
+Faculty of Engineering
+FPGA Implementation of a HOG-based
+Pedestrian Recognition System
+FPGA Implementation of a HOG-based
+Pedestrian Recognition System
+{sebastian.bauer, ulrich.brunsmann, stefan.schlotterbeck-macht}
+terms of
+With respect to road crash statistics, on-board
+pedestrian detection is a key task for future
+dvanced driver assistance systems.
+In this
+paper, we describe the implementation of a real-
+time pedestrian recognition system that combines
+FPGA-based extraction of image features with a
+CPU-based object localization and classification
+framework.
+features, we have
+implemented"
+d5d6b3959958adb1333fa1a72227378ad3f7c16d,Collaborative Contributions for Better Annotations,
+d56fe69cbfd08525f20679ffc50707b738b88031,Training of multiple classifier systems utilizing partially labeled sequential data sets,"Training of multiple classifier systems utilizing
+partially labelled sequences
+Martin Schels, Patrick Schillinger, and Friedhelm Schwenker
+Ulm University - Department of Neural Information Processing
+89069 Ulm - Germany"
+d5c6c0fb51947a2df1389f1aab7a635bf687ac1d,A Multiview Approach to Learning Articulated Motion Models,"A Multiview Approach to Learning
+Articulated Motion Models
+Andrea F. Daniele, Thomas M. Howard, and Matthew R. Walter"
+d5de42d37ee84c86b8f9a054f90ddb4566990ec0,Asynchronous Temporal Fields for Action Recognition,"Asynchronous Temporal Fields for Action Recognition
+Gunnar A. Sigurdsson1∗ Santosh Divvala2,3 Ali Farhadi2,3 Abhinav Gupta1,3
+Carnegie Mellon University 2University of Washington 3Allen Institute for Artificial Intelligence
+github.com/gsig/temporal-fields/"
+d50a40f2d24363809a9ac57cf7fbb630644af0e5,End-to-end Trained CNN Encode-Decoder Networks for Image Steganography,"END-TO-END TRAINED CNN ENCODER-DECODER NETWORKS FOR IMAGE
+STEGANOGRAPHY
+Atique ur Rehman, Rafia Rahim, Shahroz Nadeem, Sibt ul Hussain
+National University of Computer & Emerging Sciences (NUCES-FAST), Islamabad, Pakistan.
+Reveal.ai (Recognition, Vision & Learning) Lab"
+d5cf6a02f8308e948e3bcd1fd1ca660ea8ea8921,G Enerative Networks as Inverse Problems with Scattering Transforms,"Under review as a conference paper at ICLR 2018
+GENERATIVE NETWORKS AS INVERSE PROBLEMS
+WITH SCATTERING TRANSFORMS
+Anonymous authors
+Paper under double-blind review"
+d5b5c63c5611d7b911bc1f7e161a0863a34d44ea,Extracting Scene-Dependent Discriminant Features for Enhancing Face Recognition under Severe Conditions,"Extracting Scene-dependent Discriminant
+Features for Enhancing Face Recognition
+under Severe Conditions
+Rui Ishiyama and Nobuyuki Yasukawa
+Information and Media Processing Research Laboratories, NEC Corporation
+753, Shimonumabe, Nakahara-Ku, Kawasaki 211-8666 Japan"
+d53c5a974f9fccf18f3c8f7d73522d6ca7162115,X-GAN : Improving Generative Adversarial Networks with ConveX Combinations,"X-GAN: Improving Generative Adversarial
+Networks with ConveX Combinations
+Oliver Blum, Biagio Brattoli, and Bj¨orn Ommer
+Heidelberg University, HCI / IWR, Germany"
+d59404354f84ad98fa809fd1295608bf3d658bdc,Face Synthesis from Visual Attributes via Sketch using Conditional VAEs and GANs,"International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+Face Synthesis from Visual Attributes via Sketch using
+Conditional VAEs and GANs
+Xing Di · Vishal M. Patel
+Received: date / Accepted: date"
+d56407072eb9847fa44d49969129b5a4d1ef9ceb,Gaussian Process Prior Variational Autoencoders,"Gaussian Process Prior Variational Autoencoders
+Francesco Paolo Casale†∗, Adrian V Dalca‡§, Luca Saglietti†¶,
+Jennifer Listgarten(cid:93), Nicolo Fusi†
+Microsoft Research New England, Cambridge (MA), USA
+Computer Science and Artificial Intelligence Lab, MIT, Cambridge (MA), USA
+§ Martinos Center for Biomedical Imaging, MGH, HMS, Boston (MA), USA;
+¶ Italian Institute for Genomic Medicine, Torino, Italy
+(cid:93) EECS Department, University of California, Berkeley (CA), USA."
+d5856f47fe117c114e8bcfbf2abc4e80691a512c,Interpreting Complex Scenes using a Hierarchy of Prototypical Scene Models,"Interpreting Complex Scenes using a
+Hierarchy of Prototypical Scene
+Models
+Dissertation
+zur Erlangung des akademischen Grades
+Doktor der Ingenieurwissenschaften (Dr.-Ing.)
+vorgelegt an
+der Technischen Fakult¨at der Universit¨at Bielefeld
+Sarah Bonnin
+4.10.2014"
+d54f508c943b8415bfdd30d9210869ec93ff3f03,A method of illumination compensation for human face image based on quotient image,"Available online at www.sciencedirect.com
+Information Sciences 178 (2008) 2705–2721
+www.elsevier.com/locate/ins
+A method of illumination compensation for human face
+image based on quotient image q
+Wang Ying-hui a,b, Ning Xiao-juan a,*, Yang Chun-xia a, Wang Qiong-fang b
+School of Computer Science Engineering, Xi’an University of Technology, Xi’an 710048, China
+Department of Computer Science, Shaanxi Normal University, Xi’an 710062, China
+Received 23 February 2007; received in revised form 2 December 2007; accepted 14 December 2007"
+d24a30ed78b749f3730e25dcef89472dd5fb439c,Improving Face Recognition Performance Using a Hierarchical Bayesian Model,"Improving Face Recognition
+Performance Using a Hierarchical
+Bayesian Model
+Ashwini Shikaripur Nadig
+Submitted to the graduate degree program in
+Electrical Engineering & Computer Science and the
+Graduate Faculty of the University of Kansas
+School of Engineering in partial fulfillment of the
+requirements for the degree of Master of Science
+Thesis Committee:
+Dr. Brian Potetz: Chairperson
+Dr. Prasad Kulkarni
+Dr. Luke Huan
+Date Defended"
+d2f717d1799b5cec5f1f426511527bd7e6e05d9d,Image-Based Synthesis for Deep 3D Human Pose Estimation,"Noname manuscript No.
+(will be inserted by the editor)
+Image-based Synthesis for Deep 3D Human Pose Estimation
+Grégory Rogez · Cordelia Schmid
+Received: date / Accepted: date"
+d231a81b38fde73bdbf13cfec57d6652f8546c3c,SUPERRESOLUTION TECHNIQUES FOR FACE RECOGNITION FROM VIDEO by Osman,"SUPERRESOLUTION TECHNIQUES
+FOR FACE RECOGNITION FROM VIDEO
+Osman Gökhan Sezer
+B.S., E.E., Boğaziçi University, 2003
+Submitted to the Graduate School of Engineering
+and Natural Sciences in partially fulfillment of
+the requirement for the degree of
+Master of Science
+Graduate Program in Electronics Engineering and Computer Science
+Sabancı University
+Spring 2005"
+d22785eae6b7503cb16402514fd5bd9571511654,Evaluating Facial Expressions with Different Occlusion around Image Sequence,"Evaluating Facial Expressions with Different
+Occlusion around Image Sequence
+Ankita Vyas, Ramchand Hablani
+Department of Computer Science
+Sanghvi Institute of Management & Science
+Indore (MP), India
+local
+INTRODUCTION"
+d28c12e270a06e977b59194cc6564787c87caa7e,Human Action Poselets Estimation via Color G-surf in Still Images,"HUMAN ACTION POSELETS ESTIMATION VIA COLOR G-SURF IN STILL IMAGES
+M. Favorskaya *, D. Novikov, Y. Savitskaya
+Institute of Informatics and Telecommunications, Siberian State Aerospace University, 31 Krasnoyarsky Rabochy av., Krasnoyarsk,
+660014 Russian Federation - (favorskaya,
+Commission WG V/5, WG III/3
+KEY WORDS: Human Action, Poselets, Gauge-SURF, Random Forest, Still Image"
+d2df37ecfbf914d5b81e2e5e342e3907c6f55a14,Can Convolution Neural Network ( CNN ) Triumph in Ear Recognition of Uniform Illumination Invariant ?,"Indonesian Journal of Electrical Engineering and Computer Science
+Vol. 11, No. 2, August 2018, pp. 558~566
+ISSN: 2502-4752, DOI: 10.11591/ijeecs.v11.i2.pp558-566
+ 558
+Can Convolution Neural Network (CNN) Triumph in Ear
+Recognition of Uniform Illumination Invariant?
+Nursuriati Jamil1, Ali Abd Almisreb2, Syed Mohd Zahid Syed Zainal Ariffin3, N. Md Din4,
+Raseeda Hamzah5
+,3,5Faculty of Computer and Mathematical Sciences, Universiti Teknologi MARA,
+40450 Shah Alam, Selangor, Malaysia
+,4College of Graduate Studies, Universiti Tenaga Nasional, Jalan IKRAM-UNITEN, 43000 Kajang, Malaysia
+Article Info
+Article history:
+Received Mar 1, 2018
+Revised Apr 21, 2018
+Accepted May 1, 2018
+Keywords:
+Convolution Neural Network
+Ear Recognition
+Uniform Illumination Invariant"
+d252e10024a22c8274ae67dbf37aa854d75a85f2,Joint Gender Classification and Age Estimation by Nearly Orthogonalizing Their Semantic Spaces,"Joint Gender Classification and Age Estimation
+y Nearly Orthogonalizing Their Semantic
+Spaces
+Qing Tiana, Songcan Chena,∗
+College of Computer Science and Technology, Nanjing University of Aeronautics and
+Astronautics, Nanjing 210016, China"
+d2eb1079552fb736e3ba5e494543e67620832c52,DeSTNet: Densely Fused Spatial Transformer Networks,"ANNUNZIATA, SAGONAS, CALÌ: DENSELY FUSED SPATIAL TRANSFORMER NETWORKS1
+DeSTNet: Densely Fused Spatial
+Transformer Networks1
+Roberto Annunziata
+Christos Sagonas
+Jacques Calì
+Onfido Research
+Finsbury Avenue
+London, UK"
+d24dafe10ec43ac8fb98715b0e0bd8e479985260,"Effects of Social Anxiety on Emotional Mimicry and Contagion: Feeling Negative, but Smiling Politely","J Nonverbal Behav (2018) 42:81–99
+https://doi.org/10.1007/s10919-017-0266-z
+O R I G I N A L P A P E R
+Effects of Social Anxiety on Emotional Mimicry
+nd Contagion: Feeling Negative, but Smiling Politely
+Corine Dijk1
+Charlotte van Eeuwijk4
+• Gerben A. van Kleef2
+• Agneta H. Fischer2
+• Nexhmedin Morina3
+Published online: 25 September 2017
+Ó The Author(s) 2017. This article is an open access publication"
+d2860bb05f747e4628e95e4d84018263831bab0d,Learning to Generate Samples from Noise through Infusion Training,"Published as a conference paper at ICLR 2017
+LEARNING TO GENERATE SAMPLES FROM NOISE
+THROUGH INFUSION TRAINING
+Florian Bordes, Sina Honari, Pascal Vincent∗
+Montreal Institute for Learning Algorithms (MILA)
+D´epartement d’Informatique et de Recherche Op´erationnelle
+Universit´e de Montr´eal
+Montr´eal, Qu´ebec, Canada"
+d2b2b56dd8c1daa61152595caf759a62596a85c9,Revocable and Non-Invertible Multibiometric Template Protection based on Matrix Transformation,"Pertanika J. Sci. & Technol. 26 (1): 133 - 160 (2018)
+Revocable and Non-Invertible Multibiometric Template
+Protection based on Matrix Transformation
+Jegede, A.1,2*, Udzir, N. I.1, Abdullah, A.1 and Mahmod, R.1
+Faculty of Computer Science and Information Technology, Universiti Putra Malaysia, 43400 UPM,
+Serdang, Selangor, Malaysia
+Department of Computer Science, University of Jos, 930001 Nigeria"
+d20e7d7ab8e767dc1c170ca2141d8ba64a4d092b,Mental Concept in Autism,"Psychology, 2014, 5, 1392-1403
+Published Online August 2014 in SciRes. http://www.scirp.org/journal/psych
+http://dx.doi.org/10.4236/psych.2014.511150
+“Please Draw Me a Face…” Atypical Face
+Mental Concept in Autism
+Emilie Meaux1*, David Bakhos2, Frédérique Bonnet-Brilhault1, Patrice Gillet3,
+Emmanuel Lescanne4, Catherine Barthélémy1, Magali Batty1
+UMRS Imagerie et Cerveau, Inserm U930 Equipe 1, CNRS ERL 3106, Université François Rabelais de Tours,
+CHRU de Tours, Tours, France
+Unité Pédiatrique d’ORL et CCF, Centre Hospitalier Régional Universitaire de Tours, Université François
+Rabelais de Tours, CHRU de Tours, Tours, France
+Université François Rabelais de Tours, CHRU de Tours, Tours, France
+Service d’ORL et CCF Pédiatrique, CHU de Tours Gatien-de-Clocheville, Université François Rabelais de Tours,
+Tours, France
+Email:
+Received 16 May 2014; revised 12 June 2014; accepted 5 July 2014
+Copyright © 2014 by authors and Scientific Research Publishing Inc.
+This work is licensed under the Creative Commons Attribution International License (CC BY).
+http://creativecommons.org/licenses/by/4.0/"
+d259d3652f03c7b80e29c986e9540ab00b1f1133,3D Face Detection and Recognition under Occlusion,"Dr.V.Ramaswamy1, Parashuram Baraki2
+Research Guide, Jain University, Bangalore,
+Doctoral Student, Jain University, Bangalore
+& Asst.Professor , CS&E, Dept,
+GM Institute of Technology, Davanagere
+D Face Detection and Recognition under Occlusion
+is very vital. Three-dimensional"
+d2f3ba37ef34d5d39f799f8dd3557f1eb795aedd,Learning Unified Embedding for Apparel Recognition,"Learning Unified Embedding for Apparel Recognition
+Yang Song
+Google
+Yuan Li
+Google
+Xiao Zhang
+Google
+Bo Wu
+Google
+Chao-Yeh Chen
+Google
+Hartwig Adam
+Google"
+d278e020be85a1ccd90aa366b70c43884dd3f798,Learning From Less Data: Diversified Subset Selection and Active Learning in Image Classification Tasks,"Learning From Less Data: Diversified Subset Selection and
+Active Learning in Image Classification Tasks
+Vishal Kaushal
+IIT Bombay
+Mumbai, Maharashtra, India
+Khoshrav Doctor
+AITOE Labs
+Mumbai, Maharashtra, India
+Suyash Shetty
+AITOE Labs
+Mumbai, Maharashtra, India
+Rishabh Iyer
+AITOE Labs
+Seattle, Washington, USA
+Anurag Sahoo
+AITOE Labs
+Seattle, Washington, USA
+Narsimha Raju
+IIT Bombay
+Mumbai, Maharashtra, India"
+d2b86b6dc93631990e21a12278e77f002fb4b116,Aalborg Universitet Attention in Multimodal Neural Networks for Person Re-identification,"Aalborg Universitet
+Attention in Multimodal Neural Networks for Person Re-identification
+Lejbølle, Aske Rasch; Krogh, Benjamin; Nasrollahi, Kamal; Moeslund, Thomas B.
+Published in:
+018 IEEE Computer Vision and Pattern Recognition Workshops: Visual Understanding of Humans in Crowd
+Scene
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Lejbølle, A. R., Krogh, B., Nasrollahi, K., & Moeslund, T. B. (2018). Attention in Multimodal Neural Networks for
+Person Re-identification. In 2018 IEEE Computer Vision and Pattern Recognition Workshops: Visual
+Understanding of Humans in Crowd Scene (pp. 179-187). IEEE.
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain
+? You may freely distribute the URL identifying the publication in the public portal ?"
+d2a5b9b8f02f39f7d9ef48d234ec61f4ddc6c291,Facial surface reconstruction in 3D format,"Journal of Theoretical and Applied Computer Science
+ISSN 2299-2634
+Vol. 6, No. 4, 2012, pp. 37-50
+http://www.jtacs.org
+Facial surface reconstruction in 3D format
+Nadezhda Shchegoleva
+Department of Mathematical Computer Software, Saint Petersburg Electrotechnical University (LETI), Russia"
+d2518b01092160cecec2e986935b0129b0bbff45,Looking around the Backyard Helps to Recognize Handwritten Digits,"#2611
+CVPR 2008 Submission #2611. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#2611
+Looking around the Backyard Helps to Recognize Handwritten Digits
+Anonymous CVPR submission
+Paper ID 2611"
+d2cda0dbb8b2e83ce3e70d818f78d2add803c661,Automatic Video Captioning via Multi-channel Sequential Encoding,"Automatic Video Captioning via Multi-channel
+Sequential Encoding
+Chenyang Zhang and Yingli Tian
+Department of Electrical Engineering
+The City College of New York
+New York, NY 10031"
+d2b8459b41172dc332cf00dc18a309c442347a7d,Deep Spatial Feature Reconstruction for Partial Person Re-identification: Alignment-Free Approach,"Deep Spatial Feature Reconstruction for Partial Person Re-identification:
+Alignment-free Approach
+Lingxiao He∗1,2, Jian Liang∗1,2, Haiqing Li1,2, and Zhenan Sun1,2,3
+CRIPAC & NLPR, CASIA 2 University of Chinese Academy of Sciences, Beijing, P.R. China
+Center for Excellence in Brain Science and Intelligence Technology, CAS
+{lingxiao.he, jian.liang, hqli,"
+aa420d32c48a3fd526a91285673cd55ca9fe2447,R 4-A . 1 : Dynamics-Based Video Analytics,"R4-A.1: Dynamics-Based Video Analytics
+PARTICIPANTS
+Octavia Camps
+Mario Sznaier
+Title
+Co-PI
+Co-PI
+Faculty/Staff
+Institution
+Graduate, Undergraduate and REU Students
+Oliver Lehmann
+Mengran Gou
+Yongfang Cheng
+Yin Wang
+Sadjad Ashari-Esfeden
+Tom Hebble
+Rachel Shaff er
+Burak Yilmaz
+Degree Pursued
+MSEE/ PhD"
+aaaeca92457a72ec4e7e538cf6393c4c1dc8e670,Life-long Learning Perception using Cloud Database Technology,"Life-long Learning Perception using Cloud Database Technology
+Tim Niemueller
+Stefan Schiffer
+Gerhard Lakemeyer
+Knowledge-based Systems Group
+Safoura Rezapour Lakani
+Intelligent and Interactive Systems
+RWTH Aachen University (Aachen, Germany)
+University of Innsbruck (Innsbruck, Austria)"
+aaa021feeec2f84c4a5f3c56b4c0fecb5a85a352,A Riemannian Network for SPD Matrix Learning,"A Riemannian Network for SPD Matrix Learning
+Zhiwu Huang and Luc Van Gool
+Computer Vision Lab, ETH Zurich, Switzerland
+{zhiwu.huang,"
+aad03480c30c0a3d917d171d8d6b914026fe5105,Affordances Provide a Fundamental Categorization Principle for Visual Scenes,"Affordances
+Provide
+Fundamental
+Categorization
+Principle
+Visual
+Scenes
+Michelle
+Greene
+Christopher
+Baldassano
+Andre
+Esteva
+Diane
+(1) Stanford
+University,
+Department
+Computer
+Science
+(2) Stanford"
+aaaefba1bd0a9a9ec6c66a822d11fb907a05625c,"On Detection, Data Association and Segmentation for Multi-target Tracking.","This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2018.2849374, IEEE
+Transactions on Pattern Analysis and Machine Intelligence
+On Detection, Data Association and
+Segmentation for Multi-target Tracking
+Yicong Tian, Member, IEEE, Afshin Dehghan, Member, IEEE, and Mubarak Shah, Fellow, IEEE"
+aa5c2ac60a288132efeeb85c5af1fd0b39294eed,Directed Markov Stationary Features for visual classification,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+aa5efcc4331da6b1902f2c900b79120226fdcf20,A Robust Class-based Reflectance Rendering for Face Images,A ROBUST CLASS-BASED REFLECTANCE RENDERING FOR FACE IMAGES
+aa2ad3df24d8d8c4a4d2fe85f0d4e635d595f0a2,PedCut: an iterative framework for pedestrian segmentation combining shape models and multiple data cues,"F. FLOHR, D. M. GAVRILA: PEDCUT
+PedCut: an iterative framework for
+pedestrian segmentation combining
+shape models and multiple data cues
+Fabian Flohr1,2
+Dariu M. Gavrila1,2
+www.gavrila.net
+Environment Perception Department,
+Daimler R&D, Ulm, Germany
+Intelligent Systems Laboratory,
+Univ. of Amsterdam, The Netherlands"
+aa3e1824af497dc16ae27e6818a0e89c78a18371,Local Gray Code Pattern ( LGCP ) : A Robust Feature Descriptor for Facial Expression Recognition,"International Journal of Science and Research (IJSR), India Online ISSN: 2319-7064
+Local Gray Code Pattern (LGCP): A Robust
+Feature Descriptor for Facial Expression
+Recognition
+Mohammad Shahidul Islam
+Atish Dipankar University of Science & Technology, School, Department of Computer Science and Engineering, Dhaka, Bangladesh."
+aa23d33983b1abd2d8a677040eb875e93c478a7f,Measuring the Objectness of Image Windows,"Measuring the objectness of image windows
+Bogdan Alexe, Thomas Deselaers, and Vittorio Ferrari"
+aad8d2e32f1cc21eedbdd5e8ebff9f367daa6d92,Online Multi-target Tracking by Large Margin Structured Learning,"Online Multi-Target Tracking
+y Large Margin Structured Learning
+Suna Kim, Suha Kwak, Jan Feyereisl, and Bohyung Han
+Department of Computer Science and Engineering
+POSTECH, Korea"
+aaa6fe8045e1a071e1762cffe4f59e0bd508daf9,Single-Pedestrian Detection Aided by Two-Pedestrian Detection,"IEEE TRANSACTIONS PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Single-Pedestrian Detection Aided by
+-Pedestrian Detection
+Wanli Ouyang, Member, IEEE, Xingyu Zeng and Xiaogang Wang, Member, IEEE,"
+aa5fbe092f8a4dcb43c31ab93af0290900b4f0e2,Visual Question Answering using Natural Language Object Retrieval and Saliency Cues,"Visual Question Answering using Natural Language Object Retrieval and
+CS381V Final Project Report
+Saliency Cues
+Aishwarya Padmakumar
+Akanksha Saran"
+aae742779e8b754da7973949992d258d6ca26216,Robust facial expression classification using shape and appearance features,"Robust Facial Expression Classification Using Shape
+nd Appearance Features
+S L Happy and Aurobinda Routray
+Department of Electrical Engineering,
+Indian Institute of Technology Kharagpur, India"
+aad7f9eeb10d4f655c3e3d18d3542603ad3071b4,Deep Unsupervised Learning of Visual Similarities,"Deep Unsupervised Learning of Visual Similarities
+Artsiom Sanakoyeu∗, Miguel A. Bautista, Björn Ommer
+Heidelberg Collaboratory for Image Processing and Interdisciplinary Center for Scientific Computing, Heidelberg University, Germany"
+aa8cec9cec1f15f95bbe0ef4d7809e199de0f30b,Vitamin D hormone regulates serotonin synthesis. Part 1: relevance for autism.,"The FASEB Journal (cid:129) Review
+Vitamin D hormone regulates serotonin synthesis.
+Part 1: relevance for autism
+Rhonda P. Patrick1 and Bruce N. Ames1
+Nutrition and Metabolism Center, Children’s Hospital Oakland Research Institute, Oakland,
+California, USA
+Serotonin and vitamin D have been pro-"
+aa32f5b0a866b04a89f75cda32e0975a541864ff,Action-Driven Object Detection with Top-Down Visual Attentions,"Action-Driven Object Detection
+with Top-Down Visual Attentions
+Donggeun Yoo, Student Member, IEEE, Sunggyun Park, Student Member, IEEE,
+Kyunghyun Paeng, Student Member, IEEE, Joon-Young Lee, Member, IEEE,
+nd In So Kweon, Member, IEEE"
+aaa82dfc7942ae16c1d7155a109582505ccee4ec,Properties of Datasets Predict the Performance of Classifiers,"AGHAZADEH, CARLSSON: PROPERTIES OF DATASETS PREDICT THE PERFORMANCE ... 1
+Properties of Datasets Predict the
+Performance of Classifiers
+Omid Aghazadeh
+http://www.csc.kth.se/~omida
+Stefan Carlsson
+http://www.csc.kth.se/~stefanc
+Computer Vision Group
+Computer Vision and Active Perception
+Laboratory
+KTH, Sweden"
+aa52910c8f95e91e9fc96a1aefd406ffa66d797d,Face Recognition System Based on 2dfld and Pca,"FACE RECOGNITION SYSTEM BASED
+ON 2DFLD AND PCA
+Dr. Sachin D. Ruikar
+E&TC Department
+Sinhgad Academy of Engineering
+Pune, India
+Mr. Hulle Rohit Rajiv
+ME E&TC [Digital System]
+Sinhgad Academy of Engineering
+Pune, India"
+aa6854612062edff9978b33e0a410f2717bc3027,LPT: Eye Features Localizer in an N-Dimensional Image Space,"LPT: Eye Features Localizer in an N-Dimensional Image
+Space
+Mohammad Mahdi Dehshibi1, Azam Bastanfard2, and Alireza Abdi3
+Young Researchers Club, Islamic Azad University South Tehran Branch, Tehran, Iran
+IT Research Laboratory, Faculty of Engineering, Islamic Azad University Karaj Branch, Karaj, Iran
+Faculty of Electrical, Computer and IT, Islamic Azad University Qazvin Branch, Qazvin, Iran"
+aafb8dc8fda3b13a64ec3f1ca7911df01707c453,Excitation Backprop for RNNs,"Excitation Backprop for RNNs
+Sarah Adel Bargal∗1, Andrea Zunino∗ 2, Donghyun Kim1, Jianming Zhang3,
+Vittorio Murino2,4, Stan Sclaroff1
+Department of Computer Science, Boston University 2Pattern Analysis & Computer Vision (PAVIS),
+Istituto Italiano di Tecnologia 3Adobe Research 4Computer Science Department, Universit`a di Verona
+Figure 1: Our proposed framework spatiotemporally highlights/grounds the evidence that an RNN model used in producing a class label
+or caption for a given input video. In this example, by using our proposed back-propagation method, the evidence for the activity class
+CliffDiving is highlighted in a video that contains CliffDiving and HorseRiding. Our model employs a single backward pass to produce
+saliency maps that highlight the evidence that a given RNN used in generating its outputs."
+aa8c3eb6e821cb44ed5a15a2f09fba332e5561c6,Object Detection in Multi-view X-Ray Images,"Object Detection in Multi-View X-Ray Images
+Thorsten Franzel, Uwe Schmidt, and Stefan Roth
+Department of Computer Science, TU Darmstadt"
+aaba2a04c025f12f839ac71fb248da0dd6985d58,A Combined Face Recognition Approach Based on Lpd and Lvp,"VOL. 10, NO. 6, APRIL 2015 ISSN 1819-6608
+ARPN Journal of Engineering and Applied Sciences
+©2006-2015 Asian Research Publishing Network (ARPN). All rights reserved.
+www.arpnjournals.com
+A COMBINED FACE RECOGNITION APPROACH BASED ON LPD
+AND LVP
+Kabilan R.1, Ravi R.2, Rajakumar G.1, Esther Leethiya Rani S.1 and Mini Minar V. C.1
+Department of ECE, Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India
+Department of IT, Francis Xavier Engineering College, Tirunelveli, Tamilnadu, India
+E-Mail:"
+aadfcaf601630bdc2af11c00eb34220da59b7559,Multi-view Hybrid Embedding: A Divide-and-Conquer Approach,"Multi-view Hybrid Embedding:
+A Divide-and-Conquer Approach
+Jiamiao Xu∗, Shujian Yu∗, Xinge You†, Senior Member, IEEE, Mengjun Leng,
+Xiao-Yuan Jing, and C. L. Philip Chen, Fellow, IEEE"
+aaa4c625f5f9b65c7f3df5c7bfe8a6595d0195a5,Biometrics in ambient intelligence,"Biometrics in Ambient Intelligence
+Massimo Tistarelli§ and Ben Schouten§§"
+aa49556ee4f1ee3fcc9f0f713c755da30b0f505c,Exactly Robust Kernel Principal Component Analysis,"Exactly Robust Kernel Principal Component
+Analysis
+Jicong Fan, Tommy W.S. Chow"
+aa261599d70a9e649501cae5cf46fbc56229fad8,The effect of the Distance in Pedestrian Detection,"Master in Computer Vision and Artificial Intelligence - Universitat Aut`onoma de Barcelona
+September 2009
+The effect of the Distance in Pedestrian Detection
+David V´azquez Berm´udez
+Computer Vision Center
+Edifici O, Universitat Aut`onoma de Barcelona
+08193, Bellaterra (Spain)
+Advisors: Dr. Antonio M. L´opez and David Ger´onimo"
+aa5ed6ee0b2fd53df5cab952aa368f8c4908ffeb,REACH - Realtime crowd tracking using a hybrid motion model,"REACH - Realtime Crowd tracking using a Hybrid motion model
+Aniket Bera1 and Dinesh Manocha1
+http://gamma.cs.unc.edu/REACH"
+aae0e417bbfba701a1183d3d92cc7ad550ee59c3,A Statistical Method for 2-D Facial Landmarking,"A Statistical Method for 2-D Facial Landmarking
+Hamdi Dibeklio˘glu, Student Member, IEEE, Albert Ali Salah, Member, IEEE, and Theo Gevers, Member, IEEE"
+aa782f4af587ee68936f0f5361fc1448ef61bdd9,Human Tracking using Wearable Sensors in the Pocket Double blind submission,"Human Tracking using Wearable Sensors in the Pocket
+Double blind submission
+Address
+e-mail address"
+aa577652ce4dad3ca3dde44f881972ae6e1acce7,Deep Attribute Networks,"Deep Attribute Networks
+Junyoung Chung
+Department of EE, KAIST
+Daejeon, South Korea
+Donghoon Lee
+Department of EE, KAIST
+Daejeon, South Korea
+Youngjoo Seo
+Department of EE, KAIST
+Daejeon, South Korea
+Chang D. Yoo
+Department of EE, KAIST
+Daejeon, South Korea"
+aa2a4f7cf8866d513053873a410879ab5b34b53a,Improving robot manipulation with data-driven object-centric models of everyday forces,"Noname manuscript No.
+(will be inserted by the editor)
+Improving Robot Manipulation with Data-Driven
+Object-Centric Models of Everyday Forces
+Advait Jain · Charles C. Kemp
+Received: date / Accepted: date"
+aa94f214bb3e14842e4056fdef834a51aecef39c,Reconhecimento de padrões faciais: Um estudo,"Reconhecimento de padrões faciais: Um estudo
+Alex Lima Silva, Marcos Evandro Cintra
+Universidade Federal
+Rural do Semi-Árido
+Departamento de Ciências Naturais
+Mossoró, RN - 59625-900
+Email:
+Resumo—O reconhecimento facial tem sido utilizado em di-
+versas áreas para identificação e autenticação de usuários. Um
+dos principais mercados está relacionado a segurança, porém há
+uma grande variedade de aplicações relacionadas ao uso pessoal,
+onveniência, aumento de produtividade, etc. O rosto humano
+possui um conjunto de padrões complexos e mutáveis. Para
+reconhecer esses padrões, são necessárias técnicas avançadas de
+reconhecimento de padrões capazes, não apenas de reconhecer,
+mas de se adaptar às mudanças constantes das faces das pessoas.
+Este documento apresenta um método de reconhecimento facial
+proposto a partir da análise comparativa de trabalhos encontra-
+dos na literatura.
+iométrica é o uso da biometria para reconhecimento, identi-"
+aac101dd321e6d2199d8c0b48c543b541c181b66,Using Context to Enhance the Understanding of Face Images,"USING CONTEXT TO ENHANCE THE
+UNDERSTANDING OF FACE IMAGES
+A Dissertation Presented
+VIDIT JAIN
+Submitted to the Graduate School of the
+University of Massachusetts Amherst in partial fulfillment
+of the requirements for the degree of
+DOCTOR OF PHILOSOPHY
+September 2010
+Department of Computer Science"
+afaa607aa9ad0e9dad0ce2fe5b031eb4e525cbd8,Towards an automatic face indexing system for actor-based video services in an IPTV environment,"J. Y. Choi et al.: Towards an Automatic Face Indexing System for Actor-based Video Services in an IPTV Environment
+Towards an Automatic Face Indexing System for Actor-based
+Video Services in an IPTV Environment
+Jae Young Choi, Wesley De Neve, and Yong Man Ro, Senior Member, IEEE"
+af6e351d58dba0962d6eb1baf4c9a776eb73533f,How to Train Your Deep Neural Network with Dictionary Learning,"How to Train Your Deep Neural Network with
+Dictionary Learning
+Vanika Singhal*, Shikha Singh+ and Angshul Majumdar#
+*IIIT Delhi
+Okhla Phase 3
+Delhi, 110020, India
++IIIT Delhi
+Okhla Phase 3
+#IIIT Delhi
+Okhla Phase 3
+Delhi, 110020, India
+Delhi, 110020, India"
+af24595c0c8f1b317b6fe2f2b49417cc40094b5c,LSH Softmax : Sub-Linear Learning and Inference of the Softmax Layer in Deep Architectures,"LSH Softmax: Sub-Linear Learning and
+Inference of the Softmax Layer in Deep
+Daniel Levy∗
+Architectures
+Danlu Chen†
+January 31, 2018"
+af62621816fbbe7582a7d237ebae1a4d68fcf97d,Active Shape Model Based Recognition Of Facial Expression,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Humming Bird ( 01st March 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+Active Shape Model Based Recognition Of Facial Expression
+AncyRija V , Gayathri. S2
+AncyRijaV,Author is currently pursuing M.E (Software Engineering) in Vins Christian College of
+Engineering,
+e-mail:
+Gayathri.S, M.E., Asst.Prof.,Department of Information Technology , Vins Christian college of Engineering."
+afa57e50570a6599508ee2d50a7b8ca6be04834a,Motion in action : optical flow estimation and action localization in videos. (Le mouvement en action : estimation du flot optique et localisation d'actions dans les vidéos),"Motion in action : optical flow estimation and action
+localization in videos
+Philippe Weinzaepfel
+To cite this version:
+Philippe Weinzaepfel. Motion in action : optical flow estimation and action localization in videos.
+Computer Vision and Pattern Recognition [cs.CV]. Université Grenoble Alpes, 2016. English. <NNT :
+016GREAM013>. <tel-01407258>
+HAL Id: tel-01407258
+https://tel.archives-ouvertes.fr/tel-01407258
+Submitted on 1 Dec 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+af1fa9d29512fc8f4c07efdf75d3f640567a5262,Sparse Representation for Face Recognition Based on Constraint Sampling and Face Alignment,"TSINGHUA SCIENCE AND TECHNOLOGY
+ISSNll1007-0214ll08/12llpp62-67
+Volume 18, Number 1, February 2013
+Sparse Representation for Face Recognition Based on Constraint
+Sampling and Face Alignment
+Jing Wang, Guangda Su(cid:3), Ying Xiong, Jiansheng Chen, Yan Shang, Jiongxin Liu, and Xiaolong Ren"
+af9d41c598fc5ae57b20948cf664273da4664931,A comparison of crowd commotion measures from generative models,"A Comparison of Crowd Commotion Measures from Generative Models
+Sadegh Mohammadi
+Hamed Kiani
+Alessandro Perina
+Vittorio Murino
+Pattern Analysis and Computer Vision Department (PAVIS)
+Istituto Italiano di Tecnologia
+Genova, Italy"
+afb6d1e72d5b5506867a74beeb1e661599b8fff3,Dynamic Feature Learning for Partial Face Recognition,"Dynamic Feature Learning for Partial Face Recognition
+Lingxiao He1
+, Haiqing Li1
+, Qi Zhang1
+, and Zhenan Sun1
+CRIPAC & NLPR, CASIA 2 University of Chinese Academy of Sciences, Beijing, P.R. China
+Center for Excellence in Brain Science and Intelligence Technology, CAS
+{lingxiao.he, hqli, qi.zhang,"
+af9a830f62478c3638880d9a870f0b10535b3f92,Hausdorff distance-based multiresolution maps applied to image similarity measure,"Hausdorff distance-based multiresolution maps
+pplied to image similarity measure
+E. Baudrier*a, G. Millonb, F. Nicolierb, R. Seulinc and S. Ruanb
+LMA – University of La Rochelle, Avenue Cre´peau, 17000 La Rochelle, France
+CReSTIC – URCA, IUT, 9, rue de Que´bec, 10026 Troyes Cedex, France
+Le2i – CNRS UMR 5158, University of Burgundy – IUT, 12, rue de la fonderie, 71200 Le Creusot,
+France"
+af267b44c3ae6c2a0587310021a6180962e835d6,Shape and Symmetry Induction for 3D Objects,"Shape and Symmetry Induction for 3D Objects
+Shubham Tulsiani1, Abhishek Kar1, Qixing Huang2, Jo˜ao Carreira1 and Jitendra Malik1
+University of California, Berkeley 2Toyota Technological Institute at Chicago
+{shubhtuls, akar, carreira,"
+af8f59ceed0392159c3475c58af5b7ca8e4f6412,Facial Expression Recognition,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+afe3a0d463e2f099305c745ddbf943844583795d,Learning Visual Question Answering by Bootstrapping Hard Attention,"Learning Visual Question Answering by
+Bootstrapping Hard Attention
+Mateusz Malinowski, Carl Doersch, Adam Santoro, and Peter Battaglia
+DeepMind, London, United Kingdom"
+af97b793a61ba6e2b02d0d29503b73b5bdc2150d,Wavelet-Local binary pattern based face recognition,"I S S N 2 2 7 7 - 3 0 6 1
+V o l u m e 1 6 N u m b e r 1
+I N T E R N A T I O N A L J O U R N A L O F C O M P U T E R S & T E C H N O L O G Y
+Wavelet-Local binary pattern based face recognition
+Azad Abdullah Ameen(1), Hardi M. M-Saleh(2) ,Zrar Kh. Abdul(3)
+(1) Charmo University, College of Basic Education, Computer Department,Chamchamal, Raperin, Iraq
+(2) Charmo University, College of Basic Education, Computer Department, Chamchamal, Raperin, Iraq
+(3)Charmo University, College of Basic Education, Computer Department, Chamchamal, Raperin, Iraq"
+af8cd04bbe4902123d7042985159a6a5da9d9fb9,Représenter pour suivre : Exploitation de représentations parcimonieuses pour le suivi multi-objets. (Representing to follow: Exploitation of parsimonious representations for multi-object tracking),"Représenter pour suivre : exploitation de représentations
+parcimonieuses pour le suivi multi-objets
+Loïc Pierre Fagot-Bouquet
+To cite this version:
+Loïc Pierre Fagot-Bouquet. Représenter pour suivre : exploitation de représentations parcimonieuses
+pour le suivi multi-objets. Automatique. Université Paul Sabatier - Toulouse III, 2017. Français.
+<NNT : 2017TOU30030>. <tel-01516921v2>
+HAL Id: tel-01516921
+https://tel.archives-ouvertes.fr/tel-01516921v2
+Submitted on 4 May 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+afa004a8daaa7fc093a798bf97babdb00273e1a0,Experimental Study on Fast 2d Homography Estimation from a Few Point Correspondences,"Tutkimusraportti 111
+Research Report 111
+EXPERIMENTAL STUDY ON FAST 2D
+HOMOGRAPHY ESTIMATION FROM A FEW
+POINT CORRESPONDENCES
+Joni-Kristian Kämäräinen and Pekka Paalanen
+Lappeenranta University of Technology
+Faculty of Technology Management
+Department of Information Technology
+Box 20
+FIN-53851 Lappeenranta
+ISBN 978-952-214-772-1 (paperback)
+ISBN 978-952-214-773-8 (PDF)
+ISSN 0783-8069
+Lappeenranta 2009"
+af34388e69800a168876f7446a621f68ca2215c0,Low-cost scene modeling using a density function improves segmentation performance,"Low-Cost Scene Modeling using a Density Function Improves Segmentation
+Performance
+Vivek Sharma(cid:5)(cid:63), S¸ule Yildirim-Yayilgan(cid:63), and Luc Van Gool(cid:5)∓"
+af053b8cf39612cec0148e14a9c4b7a789d7db11,Paris-Lille-3D: a large and high-quality ground truth urban point cloud dataset for automatic segmentation and classification,"Paris-Lille-3D: a large and high-quality ground truth urban point cloud
+dataset for automatic segmentation and classification
+Xavier Roynard, Jean-Emmanuel Deschaud and François Goulette
+{xavier.roynard ; jean-emmanuel.deschaud ;
+Mines ParisTech, PSL Research University, Centre for Robotics"
+afb1bc830febdb9893fd938fbdb20856b4ff3922,Defoiling Foiled Image Captions,"Defoiling Foiled Image Captions
+Pranava Madhyastha, Josiah Wang and Lucia Specia
+Department of Computer Science
+University of Sheffield, UK
+{p.madhyastha, j.k.wang,"
+afb51f0e173cd9ab1d41075862945ae6bc593cde,Large databases of real and synthetic images for feature evaluation and prediction,"Large databases of real and synthetic images for
+feature evaluation and prediction
+Biliana K. Kaneva
+B.A., Computer Science and Mathematics, Smith College (2000)
+M.S., Computer Science, University of Washington (2005)
+Submitted to the Department of Electrical Engineering and Computer Science
+in partial fulfillment of the requirements for the degree of
+Doctor of Philosophy
+in Electrical Engineering and Computer Science
+t the Massachusetts Institute of Technology
+February 2012
+(cid:13) 2012 Massachusetts Institute of Technology
+All Rights Reserved.
+Author:
+Certified by:
+Certified by:
+Accepted by:
+Department of Electrical Engineering and Computer Science
+December 22, 2011
+William T. Freeman, Professor of Computer Science"
+af64854f653f2c1724d04c9657adfcdabe0f8440,Structure propagation for zero-shot learning,"Structure propagation for zero-shot learning
+Guangfeng Lina,∗, Yajun Chena, Fan Zhaoa
+Information science department, Xian University of Technology,
+5 South Jinhua Road, Xi’an, Shaanxi Province 710048, PR China"
+af740db182b541eef80bb0a2dfebd1f07bb0e316,Deformable Kernel Networks for Joint Image Filtering,"Deformable Kernel Networks for Joint Image Filtering
+Beomjun Kim, Jean Ponce, Bumsub Ham
+To cite this version:
+Beomjun Kim, Jean Ponce, Bumsub Ham. Deformable Kernel Networks for Joint Image Filtering.
+018. <hal-01857016v2>
+HAL Id: hal-01857016
+https://hal.archives-ouvertes.fr/hal-01857016v2
+Submitted on 10 Oct 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires
+publics ou privés."
+afc7092987f0d05f5685e9332d83c4b27612f964,Person-independent facial expression detection using Constrained Local Models,"Person-Independent Facial Expression Detection using Constrained
+Local Models
+Sien. W. Chew, Patrick Lucey, Simon Lucey, Jason Saragih, Jeffrey F. Cohn and Sridha Sridharan"
+b7a09eaadcb21bf9ab234d87c954e329518580c5,Learning to Fuse 2D and 3D Image Cues for Monocular Body Pose Estimation,"Learning to Fuse 2D and 3D Image Cues for Monocular Body Pose Estimation
+Bugra Tekin
+Pablo M´arquez-Neila
+Mathieu Salzmann
+Pascal Fua
+EPFL, Switzerland"
+b730908bc1f80b711c031f3ea459e4de09a3d324,Active Orientation Models for Face Alignment In-the-Wild,"Active Orientation Models for Face
+Alignment In-the-Wild
+Georgios Tzimiropoulos, Joan Alabort-i-Medina, Student Member, IEEE,
+Stefanos P. Zafeiriou, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+b778c0e5ec6cebbabc77fc56f9b7438f2974a4ea,Altered activity of the primary visual area during gaze processing in individuals with high-functioning autistic spectrum disorder: a magnetoencephalography study.,"Altered Activity of the Primary Visual Area during Gaze Processing in
+Individuals with High-Functioning Autistic Spectrum Disorder: A
+Magnetoencephalography Study
+Naoya Hasegawaa, Hideaki Kitamuraa, Hiroatsu Murakamib, Shigeki Kameyamab, Mutsuo
+Sasagawac, Jun Egawaa, Ryu Tamuraa, Tarou Endoa, Toshiyuki Someyaa
+Department of Psychiatry, Niigata University Graduate School of Medical and Dental Sciences,
+-757 Asahimachi-dori, Chuo-ku, Niigata 951-8510, Japan
+Department of Neurosurgery, Epilepsy Center, Nishi-Niigata Chuo National Hospital, 1-14-1
+Masago, Nishi-ku, Niigata 950-2085, Japan
+Department of Psychiatry, Epilepsy Center, Nishi-Niigata Chuo National Hospital, 1-14-1
+Masago, Nishi-ku, Niigata 950-2085, Japan
+Short title:
+Altered activity of the primary visual area of autistic spectrum disorder during gaze processing
+Correspondence: Hideaki Kitamura
+Department of Psychiatry, Niigata University Graduate School of Medical and Dental Sciences,
+-757 Asahimachi-dori, Chuo-ku, Niigata 951-8510, Japan
+Tel: +81-25-227-2213; Fax: +81-25-227-0777;
+E-mail:"
+b7cf7bb574b2369f4d7ebc3866b461634147041a,From NLDA to LDA/GSVD: a modified NLDA algorithm,"Neural Comput & Applic (2012) 21:1575–1583
+DOI 10.1007/s00521-011-0728-x
+O R I G I N A L A R T I C L E
+From NLDA to LDA/GSVD: a modified NLDA algorithm
+Jun Yin • Zhong Jin
+Received: 2 August 2010 / Accepted: 3 August 2011 / Published online: 19 August 2011
+Ó Springer-Verlag London Limited 2011"
+b7894c1f805ffd90ab4ab06002c70de68d6982ab,A comprehensive age estimation on face images using hybrid filter based feature extraction,"Biomedical Research 2017; Special Issue: S610-S618
+ISSN 0970-938X
+www.biomedres.info
+A comprehensive age estimation on face images using hybrid filter based
+feature extraction.
+Karthikeyan D1*, Balakrishnan G2
+Department of ECE, Srinivasan Engineering College, Perambalur, India
+Department of Computer Science and Engineering, Indra Ganesan College of Engineering, Trichy, India"
+b7a0e7dab11781c252e1145f3526aee388b4136d,Facing humanness: Facial width-to-height ratio predicts ascriptions of humanity.,"Journal of Personality and Social
+Psychology
+Facing Humanness: Facial Width-to-Height Ratio
+Predicts Ascriptions of Humanity
+Jason C. Deska, E. Paige Lloyd, and Kurt Hugenberg
+Online First Publication, August 28, 2017. http://dx.doi.org/10.1037/pspi0000110
+CITATION
+Deska, J. C., Lloyd, E. P., & Hugenberg, K. (2017, August 28). Facing Humanness: Facial Width-to-
+Advance online publication. http://dx.doi.org/10.1037/pspi0000110"
+b7eead8586ffe069edd190956bd338d82c69f880,A Video Database for Facial Behavior Understanding,"A VIDEO DATABASE FOR FACIAL
+BEHAVIOR UNDERSTANDING
+D. Freire-Obreg´on and M. Castrill´on-Santana.
+SIANI, Universidad de Las Palmas de Gran Canaria, Spain"
+b79f3d9f8de4d1cc6679676146a40d2a8596f32d,Composing Simple Image Descriptions using Web-scale N-grams,"Proceedings of the Fifteenth Conference on Computational Natural Language Learning, pages 220–228,
+Portland, Oregon, USA, 23–24 June 2011. c(cid:13)2011 Association for Computational Linguistics"
+b7ac537d97efcb968ca8e353ff5b0563e26b9dbe,Object-Aware Dense Semantic Correspondence,"Object-aware Dense Semantic Correspondence
+Fan Yang1, Xin Li1 ∗, Hong Cheng2, Jianping Li1, Leiting Chen1
+School of Computer Science & Engineering, UESTC
+Center for Robotics, School of Automation Engineering, UESTC
+fanyang xinli"
+b797f3fa4e732d52092f9eb863350440d5de8bb1,Unsupervised Category Discovery via Looped Deep Pseudo-Task Optimization Using a Large Scale Radiology Image Database,"Unsupervised Category Discovery via Looped Deep Pseudo-Task Optimization
+Using a Large Scale Radiology Image Database
+Xiaosong Wang
+Le Lu
+Hoo-chang Shin
+Lauren Kim Isabella Nogues
+Jianhua Yao
+Ronald Summers
+Imaging Biomarkers and Computer-aided Detection Laboratory
+Department of Radiology and Imaging Sciences
+National Institutes of Health Clinical Center
+0 Center Drive, Bethesda, MD 20892"
+b7c4fe5c89df51ebd1f89a34c66b94cc6019d8e6,Model Cards for Model Reporting,"Model Cards for Model Reporting
+Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben
+Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, Timnit Gebru"
+b7774c096dc18bb0be2acef07ff5887a22c2a848,Distance metric learning for image and webpage comparison. (Apprentissage de distance pour la comparaison d'images et de pages Web),"Distance metric learning for image and webpage
+omparison
+Marc Teva Law
+To cite this version:
+Marc Teva Law. Distance metric learning for image and webpage comparison. Image Processing. Uni-
+versité Pierre et Marie Curie - Paris VI, 2015. English. <NNT : 2015PA066019>. <tel-01135698v2>
+HAL Id: tel-01135698
+https://tel.archives-ouvertes.fr/tel-01135698v2
+Submitted on 18 Mar 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+b7f05d0771da64192f73bdb2535925b0e238d233,Robust Active Shape Model using AdaBoosted Histogram Classifiers,"MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+Robust Active Shape Model using AdaBoosted Histogram Classifiers
+Yuanzhong Li
+W ataru Ito
+Imaging Software Technology Center
+Imaging Software Technology Center
+FUJI PHOTO FILM CO., LTD.
+fujifilm.co.jp
+FUJI PHOTO FILM CO., LTD.
+fujifilm.co.jp"
+b701f11ecf5d465c7d5c427914db2ad8c97bb8a9,JointGAN: Multi-Domain Joint Distribution Learning with Generative Adversarial Nets,"JointGAN: Multi-Domain Joint Distribution Learning with
+Generative Adversarial Nets
+Yunchen Pu 1 Shuyang Dai 2 Zhe Gan 3 Weiyao Wang 2 Guoyin Wang 2 Yizhe Zhang 3 Ricardo Henao 2
+Lawrence Carin 2"
+b755505bdd5af078e06427d34b6ac2530ba69b12,NFRAD: Near-Infrared Face Recognition at a Distance,"To appear in the International Joint Conf. Biometrics, Washington D.C., October, 2011
+NFRAD: Near-Infrared Face Recognition at a Distance
+Hyunju Maenga, Hyun-Cheol Choia, Unsang Parkb, Seong-Whan Leea and Anil K. Jaina,b
+Dept. of Brain and Cognitive Eng. Korea Univ., Seoul, Korea
+Dept. of Comp. Sci. & Eng. Michigan State Univ., E. Lansing, MI, USA 48824
+{hjmaeng, ,"
+b7b461f82c911f2596b310e2b18dd0da1d5d4491,K-mappings and Regression trees,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+K-MAPPINGS AND REGRESSION TREES
+SAMSI and Duke University
+. INTRODUCTION
+rgminM1,...,MK
+P1,...PK
+Arthur Szlam†
+.1. Partitioning Y
+K(cid:2)
+(cid:2)
+(cid:3)
+(cid:4)"
+b732393cd3877f7e6d3cf3ca033a42415bd6db56,Statistical and Geometric Modeling of Spatio-Temporal Patterns for Video Understanding,
+b73fdae232270404f96754329a1a18768974d3f6,Local Relation Map : A Novel Illumination Invariant Face Recognition Approach Regular Paper,
+b76af8fcf9a3ebc421b075b689defb6dc4282670,Face Mask Extraction in Video Sequence,"Face Mask Extraction in Video Sequence
+Yujiang Wang 1 · Bingnan Luo 1 · Jie Shen 1 · Maja Pantic 1"
+b75df22c7c52b8d85dd7f155f7b495907ff3561f,Benchmark data and method for real-time people counting in cluttered scenes using depth sensors,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, APRIL 2018
+Benchmark data and method for real-time people
+ounting in cluttered scenes using depth sensors
+ShiJie Sun, Naveed Akhtar, HuanSheng Song, ChaoYang Zhang, JianXin Li, Ajmal Mian
+Computer Vision techniques are well-suited to the problem
+of automatic people counting for public transportations. How-
+ever, using conventional RGB videos for this purpose is chal-
+lenged by multiple issues resulting from real-world conditions
+such as clutter, occlusions, illumination variations, handling
+shadows etc. In comparison to the conventional video systems,
+RGB-D cameras (e.g. Kinect V1 [4], Prime Sense Camera [5])
+an mitigate these issues by providing ‘depth’ information
+of the scene in addition to its color video. Nevertheless,
+effective people counting in real-world conditions using depth
+information still remains a largely unsolved problem due to
+noise and occlusion [6]."
+b73a6c7083f3dbc8b355f934aaf84438c10a7963,The 54th Annual Meeting of the Association for Computational Linguistics,"The54thAnnualMeetingoftheAssociationforComputationalLinguisticsProceedingsoftheConference,Vol.2(ShortPapers)August7-12,2016Berlin,Germany"
+b774d7c951b9c444572085e15f6a81a063abf123,Diversity Regularized Spatiotemporal Attention for Video-based Person Re-identification,"FeaturesSpatial  AttentionTemporal  Attention1                      2                      3                              N‘face’‘torso’‘bag’Figure1.SpatiotemporalAttention.Inchallengingvideore-identificationscenarios,apersonisrarelyfullyvisibleinallframes.However,framesinwhichonlypartofthepersonisvis-ibleoftencontainusefulinformation.Forexample,thefaceisclearlyvisibleintheframes1and2,thetorsoinframe2,andthehandbaginframes2,3andN.Insteadofaveragingfullframefeaturesacrosstime,weproposeanewspatiotemporalapproachwhichlearnstodetectasetofKdiversesalientimageregionswithineachframe(superimposedheatmaps).Anaggregaterep-resentationofeachbodypartisthenproducedbycombiningtheextractedper-frameregionsacrosstime(weightsshownaswhitetext).Ourspatiotemporalapproachcreatesacompactencodingofthevideothatexploitsusefulpartialinformationineachframebyleveragingmultiplespatialattentionmodels,andcombiningtheiroutputsusingmultipletemporalattentionmodels.personre-identification,whichisageneralizationofthestandardimage-basedre-identificationtask.InsteadofarXiv:1803.09882v1 [cs.CV] 27 Mar 2018"
+b7f0d1d65763fb57ee9a3624116a42a2fe763707,Predicting psychological attributions from face photographs with a deep neural network,"Predicting psychological attributions from face
+photographs with a deep neural network
+Edward Grant1∗, Stephan Sahm1∗, Mariam Zabihi1∗, Marcel van Gerven1
+Radboud University, Nijmegen, the Netherlands
+Denotes equal contribution"
+b7c4b22d44be82b2e1074c5c40b76461db4b0292,Generating Multiple Diverse Hypotheses for Human 3D Pose Consistent with 2D Joint Detections,"Generating Multiple Diverse Hypotheses for Human 3D Pose Consistent with 2D
+Joint Detections
+Ehsan Jahangiri, Alan L. Yuille
+Johns Hopkins University, Baltimore, USA"
+b705ca751a947e3b761e2305b41891051525d9df,Exploring Context with Deep Structured Models for Semantic Segmentation,"Exploring Context with Deep Structured models
+for Semantic Segmentation
+Guosheng Lin, Chunhua Shen, Anton van den Hengel, Ian Reid"
+b7207c142b0b9f4def3ae7cd07ce50ca31d930e8,Human Age Group Prediction from Unknown Facial Image,"Volume 7, Issue 5, May 2017 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+Human Age Group Prediction from Unknown Facial Image
+Arumugam P, 2Muthukumar S, 3Selva Kumar S, 4Gayathri
+Department of Statistics, 2, 4 Department of CSE, 3Research Scholar
+, 3 Manonmaniam Sundaranar University, Tirunelveli, Tamilnadu, India
+, 4 Varuvan Vadivelan Institute of Technology, Dharmapuri, Tamilnadu, India
+DOI: 10.23956/ijarcsse/SV7I5/0103"
+b768cb6fc2616f3dbe9ef4e25dedd7d95781ba66,Distribution Matching in Variational Inference,"Distribution Matching in Variational Inference
+Mihaela Rosca Balaji Lakshminarayanan
+Shakir Mohamed
+DeepMind"
+b7b23814948afc5525975ed44f3dd247100e6722,Relevant Feature Selection for Human Pose Estimation and Localization in Cluttered Images,"Relevant Feature Selection for Human Pose Estimation
+nd Localization in Cluttered Images
+Ryuzo Okada(cid:2) and Stefano Soatto
+Computer Science Department, University of California, Los Angeles"
+b7216846c743d94fcd43e1b543c9d16ae11d3c48,Engaging Image Chat: Modeling Personality in Grounded Dialogue,"Engaging Image Chat: Modeling Personality in Grounded Dialogue
+Kurt Shuster Samuel Humeau Antoine Bordes Jason Weston
+{kshuster, samuelhumeau, abordes, jase}
+Facebook AI Research"
+b7f7a4df251ff26aca83d66d6b479f1dc6cd1085,Handling missing weak classifiers in boosted cascade: application to multiview and occluded face detection,"Bouges et al. EURASIP Journal on Image and Video Processing 2013, 2013:55
+http://jivp.eurasipjournals.com/content/2013/1/55
+RESEARCH
+Open Access
+Handling missing weak classifiers in boosted
+ascade: application to multiview and
+occluded face detection
+Pierre Bouges1*, Thierry Chateau1*, Christophe Blanc1 and Gaëlle Loosli2"
+b7d425ea6b476c4af208a6b6a9e84ab17921dab4,Heuristic-based Automatic Face Detection,"HEURISTIC-BASED AUTOMATIC FACE DETECTION
+Geovany Ramírez1, Vittorio Zanella1,2, Olac Fuentes2
+Universidad Popular Autónoma del Estado de Puebla
+1 sur #1103 Col. Santiago Puebla 72160, México
+Instituto Nacional de Astrofísica Optica y Electrónica
+Luis Enrique Erro #1 Sta. María Tonantzintla Puebla 72840, México
+E-mail:"
+b7a827bb393361c309fbba652967dee11d16857c,Comparative Analysis of various Illumination Normalization Techniques for Face Recognition,"International Journal of Computer Applications (0975 – 8887)
+Volume 28– No.9, August 2011
+Comparative Analysis of various Illumination
+Normalization Techniques for Face Recognition
+Tripti Goel
+GPMCE, Delhi
+Vijay Nehra
+BPSMV, Khanpur
+Virendra P.Vishwakarma
+JIIT, Noida
+explained"
+b704f8360c369e65f0826ca23dac2d4e221d8997,A Knowledge Base for Automatic Feature Recognition from Point Clouds in an Urban Scene,"Article
+A Knowledge Base for Automatic Feature Recognition
+from Point Clouds in an Urban Scene
+Xu-Feng Xing 1,2,* ID , Mir-Abolfazl Mostafavi 1,2 ID and Seyed Hossein Chavoshi 1,2
+Department of Geomatics Sciences, Université Laval, Québec, QC G1V 0A6, Canada;
+(M.-A.M.); (S.H.C.)
+Center for Research in Geomatics, Université Laval, Québec, QC G1V 0A6, Canada
+* Correspondence: Tel.: +1-581-888-9786
+Received: 4 October 2017; Accepted: 11 January 2018; Published: 16 January 2018"
+b7c2173668a4c23b79450111887d8b1e4199f89c,Complex event recognition by latent temporal models of concepts,"COMPLEX EVENT RECOGNITION BY LATENT TEMPORAL MODELS OF CONCEPTS
+Ehsan Zare Borzeshi1, Afshin Dehghan2, Massimo Piccardi1, and Mubarak Shah2
+School of Computing and Communications, University of Technology, Sydney (UTS)1,
+Centre for Research in Computer Vision, University of Central Florida (UCF)2"
+db85195e171f7b75e4e6f99ed3029d31ee557e13,the influence of a verticality metaphor in the processing of happy and sad faces,"RIPS / IRSP, 27 (2), 51-77 © 2014, Presses universitaires de Grenoble
+the influence of a verticality metaphor
+in the processing of happy and sad faces
+L’influence de la métaphore de verticalité sur le traitement
+des émotions faciales de gaieté et de tristesse
+Timothée Mahieu*,**
+Olivier Corneille**
+Vincent Y. Yzerbyt**
+Key-words
+Metaphorical thinking,
+grounded cognition,
+facial emotions, gender
+Mots-clés
+Pensée métaphorique,
+ognition incarnée,
+émotions faciales,
+genre"
+db227f72bb13a5acca549fab0dc76bce1fb3b948,Characteristic Based Image Search Using Re-Ranking Method,"International Refereed Journal of Engineering and Science (IRJES)
+ISSN (Online) 2319-183X, (Print) 2319-1821
+Volume 4, Issue 6 (June 2015), PP.169-169-174
+Characteristic Based Image Search using Re-Ranking method
+Chitti Babu, 2Yasmeen Jaweed, 3G.Vijay Kumar
+,2,3Computer Science Engineering Dept, Sree Dattha Institute of Engineering & Science"
+dbaf89ca98dda2c99157c46abd136ace5bdc33b3,Nonlinear Cross-View Sample Enrichment for Action Recognition,"Nonlinear Cross-View Sample Enrichment for
+Action Recognition
+Ling Wang, Hichem Sahbi
+Institut Mines-T´el´ecom; T´el´ecom ParisTech; CNRS LTCI"
+dbe101c7c4b5ea5986be38e4d6de70bfc4324683,1 Deep convolutional neural networks capabilities for 2 binary classification of polar mesocyclones in 3 satellite mosaics 4,"Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 29 October 2018 doi:10.20944/preprints201809.0361.v3
+Article
+Deep convolutional neural networks capabilities for
+inary classification of polar mesocyclones in
+satellite mosaics
+Mikhail Krinitskiy 1,*, Polina Verezemskaya 1,2, Kirill Grashchenkov1,3, Natalia Tilinina1,
+Sergey Gulev1 and Matthew Lazzara 4
+Shirshov Institute of Oceanology, Russian Academy of Sciences, Moscow, Russia;
+Research Computing Center of Lomonosov Moscow State University, Moscow, Russia
+Moscow Institute of Physics and Technology, Moscow, Russia
+University of Wisconsin-Madison and Madison Area Technical College, Madison, Wisconsin, USA
+* Correspondence: Tel.: +7-926-141-6200"
+dbe255d3d2a5d960daaaba71cb0da292e0af36a7,Evolutionary Cost-Sensitive Extreme Learning Machine,"Evolutionary Cost-sensitive Extreme Learning
+Machine
+Lei Zhang, Member, IEEE, and David Zhang, Fellow, IEEE"
+db480f100004e3ef075f9404041fe4f89fcf4e0c,Human Pose Estimation for RGBD Imagery with Multi-Channel Mixture of Parts and Kinematic Constraints,"Human Pose Estimation for RGBD Imagery with Multi-Channel
+Mixture of Parts and Kinematic Constraints
+ENRIQUE MARTINEZ-BERTI
+Universitat Politecnica de Valencia
+Instituto AI2
+Camino de Vera s/n, Valencia
+SPAIN
+ANTONIO J. SNCHEZ-SALMERN
+Universitat Politecnica de Valencia
+CARLOS RICOLFE-VIALA
+Universitat Politecnica de Valencia
+Instituto AI2
+Camino de Vera s/n, Valencia
+SPAIN
+Instituto AI2
+Camino de Vera s/n, Valencia
+SPAIN
+Center for Research in Computer Vision
+Center for Research in Computer Vision
+OLIVER NINA"
+db6d00f9237cce392c08b422662b48baa2ed1b80,A New Framework for Biometric Face Recognition Using Visual,"Annals of DAAAM for 2012 & Proceedings of the 23rd International DAAAM Symposium, Volume 23, No.1, ISSN 2304-1382
+ISBN 978-3-901509-91-9, CDROM version, Ed. B. Katalinic, Published by DAAAM International, Vienna, Austria, EU, 2012
+Make Harmony between Technology and Nature, and Your Mind will Fly Free as a Bird
+Annals & Proceedings of DAAAM International 2012
+A NEW FRAMEWORK FOR BIOMETRIC FACE RECOGNITION USING VISUAL
+CRYPTOGRAPY
+MIHAILESCU, M[arius] I[ulian] & PIRLOAGA, M[arian] D[orin]"
+dba3ec4420a0bcca3264f75f4c975cabdbb1af74,"""Edutainment 2017"" a visual and semantic representation of 3D face model for reshaping face in images","J Vis (2018) 21:649–660
+https://doi.org/10.1007/s12650-018-0476-4
+R E G UL A R P A P E R
+Jiang Du • Dan Song • Yanlong Tang • Ruofeng Tong • Min Tang
+‘‘Edutainment 2017’’ a visual and semantic
+representation of 3D face model for reshaping face
+in images
+Received: 15 September 2017 / Revised: 20 December 2017 / Accepted: 22 January 2018 / Published online: 16 February 2018
+Ó The Visualization Society of Japan 2018"
+db24a2c27656db88486479b26f99d8754a44f4b8,Age estimation via face images : a survey,"Angulu et al. EURASIP Journal on Image and Video
+Processing (2018) 2018:42
+https://doi.org/10.1186/s13640-018-0278-6
+EURASIP Journal on Image
+nd Video Processing
+REVIEW
+Open Access
+Age estimation via face images: a survey
+Raphael Angulu1*†
+, Jules R. Tapamo2 and Aderemi O. Adewumi1"
+dbb0a527612c828d43bcb9a9c41f1bf7110b1dc8,Machine Learning Techniques for Face Analysis,"Chapter 7
+Machine Learning Techniques
+for Face Analysis
+Roberto Valenti, Nicu Sebe, Theo Gevers, and Ira Cohen"
+dbb065aa2a6e6804e0ab8aee27314a6f68c4cde1,Advanced Hypothesis Testing Techniques and Their Application to Image Classification Advanced Hypothesis Testing Techniques and Their Application to Image Classification Title: Advanced Hypothesis Testing Techniques and Their Application to Image Classification Acknowledgements,"Dipartimento di Informatica e
+Scienze dell’Informazione
+•• ••
+Advanced Hypothesis testing techniques and their
+pplication to image classification
+Emanuele Franceschi
+Theses Series
+DISI-TH-2005-XX
+DISI, Universit`a di Genova
+v. Dodecaneso 35, 16146 Genova, Italy
+http://www.disi.unige.it/"
+db458242dd526d84579aeee563355ca1a7dea5ea,Face Detection in Nighttime Images Using Visible-Light Camera Sensors with Two-Step Faster Region-Based Convolutional Neural Network,"Article
+Face Detection in Nighttime Images Using
+Visible-Light Camera Sensors with Two-Step Faster
+Region-Based Convolutional Neural Network
+Se Woon Cho, Na Rae Baek, Min Cheol Kim, Ja Hyung Koo, Jong Hyun Kim and
+Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pil-dong-ro 1-gil, Jung-gu,
+Seoul 04620, Korea; (S.W.C.); (N.R.B.);
+(M.C.K.); (J.H.K.); (J.H.K.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 31 July 2018; Accepted: 4 September 2018; Published: 7 September 2018"
+dbb7f37fb9b41d1aa862aaf2d2e721a470fd2c57,Face image analysis with convolutional neural networks,"Face Image Analysis With
+Convolutional Neural Networks
+Dissertation
+Zur Erlangung des Doktorgrades
+der Fakult¨at f¨ur Angewandte Wissenschaften
+n der Albert-Ludwigs-Universit¨at Freiburg im Breisgau
+Stefan Duffner"
+db625c4c26c7df67c9099e78961d479532628ec7,"All-in Text: Learning Document, Label, and Word Representations Jointly","Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence (AAAI-16)
+All-in Text: Learning Document, Label, and Word Representations Jointly
+Jinseok Nam, Eneldo Loza Menc´ıa, Johannes F¨urnkranz
+Knowledge Discovery in Scientific Literature, TU Darmstadt
+Knowledge Engineering Group, TU Darmstadt
+Research Training Group AIPHES, TU Darmstadt"
+dbb7b563e84903dad4953a8e9f23e3c54c6d7e78,Joint Person Re-identification and Camera Network Topology Inference in Multiple Cameras,"Joint Person Re-identification and Camera Network
+Topology Inference in Multiple Cameras
+Yeong-Jun Cho, Su-A Kim*, Jae-Han Park*, Kyuewang Lee, Student Member, IEEE
+nd Kuk-Jin Yoon, Member, IEEE"
+dbd5e9691cab2c515b50dda3d0832bea6eef79f2,Image - based Face Recognition : Issues and Methods 1,"Image-basedFaceRecognition:IssuesandMethods
+WenYiZhao
+RamaChellappa
+Sarno(cid:11)Corporation
+CenterforAutomationResearch
+
+UniversityofMaryland
+Princeton,NJ
+CollegePark,MD
+db186bd2a276a574b2246e3e4d136f8a07c53ff2,Verisimilar Percept Sequences Tests for Autonomous Driving Intelligent Agent Assessment,"Verisimilar Percept Sequences Tests for
+Autonomous Driving Intelligent Agent Assessment
+Thomio Watanabe
+University of Sao Paulo
+Denis Wolf
+University of Sao Paulo"
+db67edbaeb78e1dd734784cfaaa720ba86ceb6d2,SPECFACE — A dataset of human faces wearing spectacles,"SPECFACE - A Dataset of Human Faces Wearing Spectacles
+Anirban Dasgupta, Shubhobrata Bhattacharya and Aurobinda Routray
+Indian Institute of Technology Kharagpur
+India"
+db0d33590dc15de2d30cf0407b7a26ae79cd51b5,Deep Probabilistic Modeling of Natural Images using a Pyramid Decomposition,"Deep Probabilistic Modeling of Natural Images using a Pyramid Decomposition
+Alexander Kolesnikov
+IST Austria, Am Campus 1, Klosterneuburg, 3400 Austria
+Christoph H. Lampert
+IST Austria, Am Campus 1, Klosterneuburg, 3400 Austria"
+a83fc450c124b7e640adc762e95e3bb6b423b310,Deep Face Feature for Face Alignment and Reconstruction,"Deep Face Feature for Face Alignment
+Boyi Jiang, Juyong Zhang, Bailin Deng, Yudong Guo and Ligang Liu"
+a84032e66db042a57722b4a3bc7301ebe567fb8b,"IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 01, 2015 | ISSN (online): 2321-0613","IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 01, 2015 | ISSN (online): 2321-0613
+Review of Energy Enhancements of Modified LEACH
+Kirti Sharma1
+Department of Electronics & Communication Engineering
+Maharishi Ved Vyas Engineering College, Jagadhri, India
+using
+minimized"
+a85e9e11db5665c89b057a124547377d3e1c27ef,Dynamics of Driver's Gaze: Explorations in Behavior Modeling and Maneuver Prediction,"Dynamics of Driver’s Gaze: Explorations in
+Behavior Modeling & Maneuver Prediction
+Sujitha Martin, Member, IEEE, Sourabh Vora, Kevan Yuen, and Mohan M. Trivedi, Fellow, IEEE"
+a8ed00afc46064b18a6bcc7aa282e554891eacf2,Underwater image restoration: super-resolution and deblurring via sparse representation and denoising by means of marine snow removal,"Underwater Image Restoration:
+Super-resolution and Deblurring via Sparse Representation and
+Denoising by Means of Marine Snow Removal
+Dissertation
+Erlangung des akademischen Grades
+Doktor-Ingenieur (Dr.-Ing)
+der Fakultät für Informatik und Elektrotechnik
+der Universität Rostock
+vorgelegt von
+Fahimeh Farhadifard
+geb. am 05.11.1985 in Mashhad/Iran
+us Rostock
+Rostock, den 27. Oktober 2017"
+a8420e7fa53b81b8069ced8d9c743c141e2fc432,Real-Time Multiple Object Tracking - A Study on the Importance of Speed,"Real-TimeMultipleObjectTrackingAStudyontheImportanceofSpeedSAMUELMURRAYMaster’sProgramme,MachineLearningDate:September28,2017Supervisor:KevinSmithExaminer:HedvigKjellströmPrincipal:HelmutPrendinger,NationalInstituteofInformatics,TokyoSwedishtitle:IdentifieringavrörligaobjektirealtidSchoolofComputerScienceandCommunication"
+a856449c724f958dbb2f0629228d26a322153ba3,Face Mask Extraction in Video Sequence,"Face Mask Extraction in Video Sequence
+Yujiang Wang 1 · Bingnan Luo 1 · Jie Shen 1 · Maja Pantic 1"
+a8117a4733cce9148c35fb6888962f665ae65b1e,A Good Practice Towards Top Performance of Face Recognition: Transferred Deep Feature Fusion,"IEEE TRANSACTIONS ON XXXX, VOL. XX, NO. XX, XX 201X
+A Good Practice Towards Top Performance of Face
+Recognition: Transferred Deep Feature Fusion
+Lin Xiong1∗†, Jayashree Karlekar1∗, Jian Zhao2∗†, Jiashi Feng2, Member, IEEE, Sugiri Pranata1, and
+Shengmei Shen1"
+a8788ce65d01018a0e1b4cdaf6466f495e68f7e3,A Probabilistic Retrieval Model for Word Spotting based on Direct Attribute Prediction,"A Probabilistic Retrieval Model
+for Word Spotting based on
+Direct Attribute Prediction
+Eugen Rusakov, Leonard Rothacker, Hyunho Mo, and Gernot A. Fink
+Department of Computer Science
+TU Dortmund University
+4221 Dortmund, Germany
+Email:{eugen.rusakov, leonard.rothacker, hyunho.mo,"
+a8d3dc5c68032c60ebbe3b547ac948d7cf8dd1d8,Multi-Label Zero-Shot Learning via Concept Embedding,"Multi-Label Zero-Shot Learning via Concept
+Embedding
+Ubai Sandouk and Ke Chen"
+a87ab836771164adb95d6744027e62e05f47fd96,Understanding human-human interactions: a survey,"Understanding human-human interactions: a survey
+Alexandros Stergiou
+Department of Information and Computing Sciences, Utrecht University,Buys Ballotgebouw, Princetonplein 5, Utrecht, 3584CC, Netherlands
+Department of Information and Computing Sciences, Utrecht University,Buys Ballotgebouw, Princetonplein 5, Utrecht, 3584CC, Netherlands
+Ronald Poppe1"
+a81d396c9210282d461f9f08b7b9794b096ecdfe,FFDNet: Toward a Fast and Flexible Solution for CNN-Based Image Denoising,"FFDNet: Toward a Fast and Flexible Solution for
+CNN based Image Denoising
+Kai Zhang, Wangmeng Zuo, Senior Member, IEEE, and Lei Zhang, Fellow, IEEE"
+a8e5d204549fcf93c5bea88b0f99a2e4da9648e7,Neuropeptidergic regulation of affiliative behavior and social bonding in animals.,"www.elsevier.com/locate/yhbeh
+Neuropeptidergic regulation of affiliative behavior and
+social bonding in animals
+Miranda M. Lim 1, Larry J. Young ⁎
+Center for Behavioral Neuroscience, Department of Psychiatry and Behavioral Sciences, and 954 Gatewood Road Yerkes National Primate Research Center,
+Emory University, Atlanta, GA 30322, USA
+Received 16 May 2006; revised 26 June 2006; accepted 27 June 2006
+Available online 4 August 2006"
+a88640045d13fc0207ac816b0bb532e42bcccf36,Simultaneously Learning Neighborship and Projection Matrix for Supervised Dimensionality Reduction,"ARXIV VERSION
+Simultaneously Learning Neighborship and
+Projection Matrix for Supervised
+Dimensionality Reduction
+Yanwei Pang, Senior Member, IEEE, Bo Zhou, and Feiping Nie, Senior Member, IEEE"
+a8d41c63462da7dbddf4094eddaa0bb6d72d0fdc,A Semantic-based Method for Visualizing Large Image Collections.,"A Semantic-based Method for
+Visualizing Large Image Collections
+Xiao Xie, Xiwen Cai, Junpei Zhou, Nan Cao, Yingcai Wu"
+a8eebadc262594d1ca86d5520f312c1779d00b33,Improved Minimum Squared Error Algorithm with Applications to Face Recognition,"Improved Minimum Squared Error Algorithm with
+Applications to Face Recognition
+Qi Zhu1,2,3, Zhengming Li1,3,4, Jinxing Liu5, Zizhu Fan1,6, Lei Yu7, Yan Chen8*
+Bio-Computing Center, Harbin Institute of Technology Shenzhen Graduate School, Shenzhen, China, 2 School of Optical-Electrical and Computer Engineering, University
+of Shanghai for Science and Technology, Shanghai, China, 3 Key Laboratory of Network Oriented Intelligent Computation, Shenzhen, China, 4 Guangdong Industrial
+Training Center, Guangdong Polytechnic Normal University, Guangzhou, China, 5 College of Information and Communication Technology, Qufu Normal University, Rizhao,
+China, 6 School of Basic Science, East China Jiaotong University, Nanchang, China, 7 School of Urban Planning and Management, Harbin Institute of Technology Shenzhen
+Graduate School, Shenzhen, China, 8 Shenzhen Sunwin Intelligent Co., Ltd., Shenzhen, China"
+a8a30a8c50d9c4bb8e6d2dd84bc5b8b7f2c84dd8,This is a repository copy of Modelling of Orthogonal Craniofacial Profiles,"This is a repository copy of Modelling of Orthogonal Craniofacial Profiles.
+White Rose Research Online URL for this paper:
+http://eprints.whiterose.ac.uk/131767/
+Version: Published Version
+Article:
+Dai, Hang, Pears, Nicholas Edwin orcid.org/0000-0001-9513-5634 and Duncan, Christian
+(2017) Modelling of Orthogonal Craniofacial Profiles. Journal of Imaging. ISSN 2313-433X
+https://doi.org/10.3390/jimaging3040055
+Reuse
+This article is distributed under the terms of the Creative Commons Attribution (CC BY) licence. This licence
+llows you to distribute, remix, tweak, and build upon the work, even commercially, as long as you credit the
+uthors for the original work. More information and the full terms of the licence here:
+https://creativecommons.org/licenses/
+Takedown
+If you consider content in White Rose Research Online to be in breach of UK law, please notify us by
+emailing including the URL of the record and the reason for the withdrawal request.
+https://eprints.whiterose.ac.uk/"
+a8638a07465fe388ae5da0e8a68e62a4ee322d68,How to predict the global instantaneous feeling induced by a facial picture?,"How to predict the global instantaneous feeling induced
+y a facial picture?
+Arnaud Lienhard, Patricia Ladret, Alice Caplier
+To cite this version:
+Arnaud Lienhard, Patricia Ladret, Alice Caplier. How to predict the global instantaneous
+feeling induced by a facial picture?. Signal Processing: Image Communication, Elsevier, 2015,
+pp.1-30. .
+HAL Id: hal-01198718
+https://hal.archives-ouvertes.fr/hal-01198718
+Submitted on 14 Sep 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+a8948941f7a24c09cd7c26f3635d8571c7998570,Face recognition of Pose and Illumination changes using Extended ASM and Robust sparse coding,"IOSR Journal of Dental and Medical Sciences (IOSR-JDMS)
+e-ISSN: 2279-0853, p-ISSN: 2279-0861.Volume 13, Issue 3 Ver. VI. (Mar. 2014), PP 49-54
+www.iosrjournals.org
+Face recognition of Pose and Illumination changes using
+Extended ASM and Robust sparse coding
+Arulmurugan R1, Laxmi Priya M.R2
+(Information Technology, Bannari Amman Institute of Technology, India)
+(Information Technology, Bannari Amman Institute of Technology, India)"
+a8e75978a5335fd3deb04572bb6ca43dbfad4738,Sparse Graphical Representation based Discriminant Analysis for Heterogeneous Face Recognition,"Sparse Graphical Representation based Discriminant
+Analysis for Heterogeneous Face Recognition
+Chunlei Peng, Xinbo Gao, Senior Member, IEEE, Nannan Wang, Member, IEEE, and Jie Li"
+a8f032b300b99dedb9c0f8362557302696d5ee9a,Intelligent Video Object Classification Scheme using Offline Feature Extraction and Machine Learning based Approach,"Intelligent Video Object Classification Scheme using Offline Feature Extraction and
+Machine Learning based Approach
+Chandra Mani Sharma1, Alok Kumar Singh Kushwaha2 ,Rakesh Roshan3 , Rabins Porwal4 and Ashish Khare5
+,3,4Department of Information Technology, Institute of Technology and Science
+Ghaziabad, U.P., India
+Department of Computer Engg. and Application, G.L.A. University,
+Mathura, U.P., India
+5 Department of Electronics and Communication, University of Allahabad,
+U.P., India"
+a8eeace37181dd87d5125c213add6e15fdd9d9f7,Approximate Fisher Kernels of Non-iid Image Models for Image Categorization,"Approximate Fisher Kernels of non-iid Image
+Models for Image Categorization
+Ramazan Gokberk Cinbis, Jakob Verbeek, and Cordelia Schmid, Fellow, IEEE"
+a81769a36c9ed7b6146a408eb253eb8e0d3ad41e,Super-Fine Attributes with Crowd Prototyping.,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Super-Fine Attributes
+with Crowd Prototyping
+Daniel Martinho-Corbishley, Mark S. Nixon and John N. Carter"
+ded968b97bd59465d5ccda4f1e441f24bac7ede5,Large scale 3 D Morphable Models,"Noname manuscript No.
+(will be inserted by the editor)
+Large scale 3D Morphable Models
+James Booth · Anastasios Roussos · Allan Ponniah · David Dunaway · Stefanos
+Zafeiriou
+Received: date / Accepted: date"
+de0eb358b890d92e8f67592c6e23f0e3b2ba3f66,Inference-Based Similarity Search in Randomized Montgomery Domains for Privacy-Preserving Biometric Identification,"ACCEPTED BY IEEE TRANS. PATTERN ANAL. AND MACH. INTELL.
+Inference-Based Similarity Search in
+Randomized Montgomery Domains for
+Privacy-Preserving Biometric Identification
+Yi Wang, Jianwu Wan, Jun Guo, Yiu-Ming Cheung, and Pong C Yuen"
+de87a5d5fbae0733806ba965b2d70fd04596f6e9,Predictive control for autonomous driving with experimental evaluation on a heavy-duty construction truck,"Predictive control for autonomous driving
+with experimental evaluation on a heavy-duty construction truck
+PEDRO F. LIMA
+Licenciate Thesis
+Stockholm, Sweden 2016"
+de86a9f484addcfee57a6f5a9224aa77bd23345b,Face Recognition Using Elastic Bunch Graph Matching,"International Journal For Technological Research In Engineering
+Volume 2, Issue 11, July-2015
+ISSN (Online): 2347 - 4718
+FACE RECOGNITION USING ELASTIC BUNCH GRAPH
+MATCHING
+Sandeep R1, D Jayakumar2
+Dept. of ECE, Kuppam Engineering College, Chittoor, Andhra Pradesh."
+de6ab8cd9d402c976082b707b1207c3ad49ae204,End-to-end Image Captioning Exploits Distributional Similarity in Multimodal Space,"MADHYASTHA ET AL.: IMAGE CAPTIONING EXPLOITS DISTRIBUTIONAL SIMILARITY 1
+End-to-end Image Captioning Exploits
+Multimodal Distributional Similarity
+Pranava Madhyastha
+Josiah Wang
+Lucia Specia
+Department of Computer Science
+The University of Sheffield
+Sheffield, UK"
+de99971e61613f174c9e5aa41a2c600399f59953,Pixel-wise Attentional Gating for Scene Parsing,"Pixel-wise Attentional Gating for Scene Parsing
+Department of Computer Science, University of California, Irvine, CA 92697, USA
+Shu Kong, Charless Fowlkes
+{skong2,"
+de724211683bb92931a5d80193e5dee31ca2e045,Sampling Design For Face Recognition,"Sampling Design For Face Recognition
+Yanjun Yan and Lisa A. Osadciw
+EECS, Syracuse University, Syracuse, NY, USA
+{yayan,"
+de2faaee4f1b2ecf23149995d0146347a13b9257,Robust Unsupervised Domain Adaptation for Neural Networks via Moment Alignment,"Robust Unsupervised Domain Adaptation for Neural
+Networks via Moment Alignment
+Werner Zellingera,∗, Bernhard A. Moserb, Thomas Grubingerb, Edwin
+Lughofera, Thomas Natschl¨agerb, Susanne Saminger-Platza
+Johannes Kepler University, Linz, Austria
+Software Competence Center Hagenberg GmbH, Hagenberg, Austria"
+de309a1d10f819d69a4ef2c26d968d3b287c3dd5,Preprocessing and Feature Sets for Robust Face Recognition,"Preprocessing and Feature Sets for Robust Face Recognition
+Xiaoyang Tan and Bill Triggs
+LJK-INRIA, 655 avenue de l’Europe, Montbonnot 38330, France"
+dea749f087a8c9a9baa9167b4eaff50bd3eb9d16,Physically Grounded Spatio-temporal Object Affordances,"Physically Grounded Spatio-Temporal
+Object Affordances
+Hema S. Koppula and Ashutosh Saxena
+Department of Computer Science, Cornell University."
+de95fa1dd69a2d0d2b76539357062062f8b1e7b8,Face to Age,"Face to Age
+Project 1
+CS395T - Deep Learning Seminar
+Aishwarya Padmakumar, Ashish Bora, Amir Gholaminejad
+October 9, 2016
+A Century of Portraits is a dataset that contains frontal-facing American high school year-book photos
+with labels to indicate the years those photos were taken [2].
+In this project we train classifiers to
+predict the label, given the image. We used several Deep Neural Network architectures for this task,
+ll of which were finetuned with ImageNet pretraining. With VGGNet architecture, we demonstrate
+significant improvements in classification accuracy reporting test set accuracy of 67.59% and mean L1
+error, as compared to 11.31 % achieved by Ginosar et al. [2]. Further, we show some visualizations of
+the trained model to gain insights into the learned model. The code for this project can be found at
+https://github.com/AshishBora/face2year.
+Introduction
+Deep Neural networks have been central to large improvements in several visual learning tasks. Feature
+representations learned by deep convolutional neural networks for image classification on large datasets
+such as ImageNet [1] have been repeatedly demonstrated to be useful for other tasks [6]. Several down-
+stream applications have also greatly benefited from these representations, either when used directly
+[9, 10] or with appropriate finetuning [3, 5]."
+de0aaf8c6b5dea97327e8ef8060d9a708bf564af,A Benchmark for Iris Location and a Deep Learning Detector Evaluation,"A Benchmark for Iris Location and a Deep
+Learning Detector Evaluation
+Evair Severo∗, Rayson Laroca∗, Cides S. Bezerra∗, Luiz A. Zanlorensi∗,
+Daniel Weingaertner∗, Gladston Moreira† and David Menotti∗
+Postgraduate Program in Informatics, Federal University of Paran´a (UFPR), Curitiba, Paran´a, Brazil
+Computing Department, Federal University of Ouro Preto (UFOP), Ouro Preto, Minas Gerais, Brazil
+Email: {ebsevero, rblsantos, csbezerra, lazjunior, daniel,"
+dee406a7aaa0f4c9d64b7550e633d81bc66ff451,Content-Adaptive Sketch Portrait Generation by Decompositional Representation Learning,"Content-Adaptive Sketch Portrait Generation by
+Decompositional Representation Learning
+Dongyu Zhang, Liang Lin, Tianshui Chen, Xian Wu, Wenwei Tan, and Ebroul Izquierdo"
+dedbbb6e588e77969ab87571917d4f84a3b1722d,Multimodal Human-Human-Robot Interactions (MHHRI) Dataset for Studying Personality and Engagement,"JOURNAL OF IEEE TRANS. ON AFFECTIVE COMPUTING
+Multimodal Human-Human-Robot Interactions
+(MHHRI) Dataset for Studying Personality and
+Engagement
+Oya Celiktutan, Efstratios Skordos and Hatice Gunes"
+defcfed9c43bdf8a4388daade4899ef9d3345458,Sistema de reconocimiento multimodal de emociones relacionadas al aprendizaje en dispositivos móviles,"Sistema de reconocimiento multimodal de emociones
+relacionadas al aprendizaje en dispositivos móviles
+María Lucía Barrón-Estrada, Ramón Zatarain-Cabada,
+Claudia Guadalupe Aispuro-Gallegos
+Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+México
+{lbarron, rzatarain,
+Resumen. Gran variedad de sistemas reconocedores de emociones han sido
+implementados, pero pocos han logrado aplicarse en el mundo real debido al
+elevado costo de la tecnología necesaria y al bajo porcentaje de efectividad del
+reconocimiento, cuando no se trabaja con emociones espontáneas. Este artículo
+presenta la implementación de un sistema de reconocimiento multimodal de
+emociones usando dispositivos móviles y la creación de una base de datos
+fectiva por medio de una aplicación móvil. El reconocedor puede ser integrado
+fácilmente a una aplicación educativa móvil para identificar las emociones de un
+usuario mientras éste interactúa con el dispositivo. Las emociones que el sistema
+reconoce son compromiso y aburrimiento. La base de datos afectiva fue creada
+on emociones espontáneas de estudiantes que interactuaron con una aplicación
+móvil educativa llamada Duolingo y una aplicación móvil recolectora de
+información llamada EmoData. El sistema desarrollado tiene un porcentaje de"
+dedabf9afe2ae4a1ace1279150e5f1d495e565da,Robust Face Recognition With Structurally Incoherent Low-Rank Matrix Decomposition,"Robust Face Recognition With Structurally
+Incoherent Low-Rank Matrix Decomposition
+Chia-Po Wei, Chih-Fan Chen, and Yu-Chiang Frank Wang"
+de7daa206f1dc3d5f83c5342fc08e3e92ddfa126,Index Codes for Multibiometric Pattern Retrieval,"Index Codes for Multibiometric Pattern Retrieval
+Aglika Gyaourova, Student Member, IEEE, and Arun Ross, Senior Member, IEEE"
+de7a148970881cbd4e6a12b6a014e3dfeee98cc9,D 4 h : Final report on WP 4,"D4h: Final report on WP4
+Workpackage 4 Deliverable
+Date: 30th January 2008"
+de398bd8b7b57a3362c0c677ba8bf9f1d8ade583,Hierarchical Bayesian Theme Models for Multipose Facial Expression Recognition,"Hierarchical Bayesian Theme Models for
+Multi-pose Facial Expression Recognition
+Qirong Mao, Member, IEEE, Qiyu Rao, Yongbin Yu, and Ming Dong*, Member, IEEE"
+def3b2254caea169c5cbc4b771c44f1773c004fd,Matching Adversarial Networks,"Matching Adversarial Networks
+Gell´ert M´attyus and Raquel Urtasun
+Uber Advanced Technologies Group and University of Toronto"
+defa8774d3c6ad46d4db4959d8510b44751361d8,FEBEI - Face Expression Based Emoticon Identification CS - B657 Computer Vision,"FEBEI - Face Expression Based Emoticon Identification
+CS - B657 Computer Vision
+Nethra Chandrasekaran Sashikar - necsashi
+Prashanth Kumar Murali - prmurali
+Robert J Henderson - rojahend"
+de48bb3a9974f6f1ed2aa36d066150015f9f8647,Ultrasound Image Despeckling using Local Binary Pattern Weighted Linear Filtering,"I.J. Information Technology and Computer Science, 2013, 06, 1-9
+Published Online May 2013 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijitcs.2013.06.01
+Ultrasound Image Despeckling using Local
+Binary Pattern Weighted Linear Filtering
+Digital Image Processing Lab, Dept. of Computer Applications, Cochin University of Science and Technology, Kerala,
+Simily Joseph, Kannan Balakrishnan
+E-mail: {simily.joseph,
+India
+M.R. Balachandran Nair
+Ernakulam Scan Center, Kerala, India
+E-mail:
+Reji Rajan Varghese
+Dept. of Biomedical Engineering, Co operative Medical College, Kerala, India
+E-mail:"
+de26c1560db47f63ef2dc8171d7c2c52369ffede,Mathematically inspired approaches to face recognition in uncontrolled conditions : super resolution and compressive sensing,"MATHEMATICALLY INSPIRED
+APPROACHES TO
+FACE RECOGNITION IN
+UNCONTROLLED CONDITIONS -
+SUPER RESOLUTION AND
+COMPRESSIVE SENSING
+NADIA AL-HASSAN
+Applied Computing Department
+The University of Buckingham / United Kingdom
+A Thesis
+Submitted for the Degree of Doctor of Philosophy in Mathematical
+Science to the school of Science and Medicine in the University of
+Buckingham
+September 2014"
+b0c3bc3e3ca143444f5193735f2aad89d1776276,Training Generative Reversible Networks,"Training Generative Reversible Networks
+Robin Tibor Schirrmeister 1 2 Patryk Chrab ˛aszcz 2 Frank Hutter 2 Tonio Ball 1"
+b08203fca1af7b95fda8aa3d29dcacd182375385,Object and Text-guided Semantics for CNN-based Activity Recognition,"OBJECT AND TEXT-GUIDED SEMANTICS FOR CNN-BASED ACTIVITY RECOGNITION
+(cid:63)Sungmin Eum †§, (cid:63)Christopher Reale †, Heesung Kwon†, Claire Bonial †, Clare Voss†
+U.S. Army Research Laboratory, Adelphi, MD, USA
+§Booz Allen Hamilton Inc., McLean, VA, USA"
+b04d4b1e8b510180726f49a66dbaaf23c9ef64a0,Introspective Generative Modeling: Decide Discriminatively,"Introspective Generative Modeling: Decide Discriminatively
+Justin Lazarow ∗
+Dept. of CSE
+Long Jin∗
+Dept. of CSE
+Zhuowen Tu
+Dept. of CogSci"
+b00796447d670f9413e831ffb4ed548a380816a2,Servoing across object instances: Visual servoing for object category,"Servoing Across Object Instances: Visual Servoing for Object Category
+Harit Pandya1, K Madhava Krishna1 and C. V. Jawahar1"
+b008d973ee93fd3b13d1148fb7533dbdbc8374d6,New Representations for Analyzing Motion and Applications,"New Representations for Analyzing Motion and Applications
+Ce Liu
+Submitted to the Department of Electrical Engineering and Computer Science in partial
+fulfillment of the requirements for the degree of
+Doctor of Philosophy
+Electrical Engineering and Computer Science
+t the Massachusetts Institute of Technology
+June 2009
+(cid:13) 2009 Massachusetts Institute of Technology
+All Rights Reserved.
+Signature of Author:
+Certified by:
+Accepted by:
+Department of Electrical Engineering and Computer Science
+May 1, 2009
+William T. Freeman, Professor of EECS
+Thesis Supervisor
+Terry P. Orlando, Professor of Electrical Engineering
+Chair, Department Committee on Graduate Students"
+b02342a423eef6e19f473eba26b067405b525f16,Co-occurrence matrix analysis-based semi-supervised training for object detection,"CO-OCCURRENCE MATRIX ANALYSIS-BASED SEMI-SUPERVISED TRAINING FOR
+OBJECT DETECTION
+Min-Kook Choi1, Jaehyeong Park1, Jihun Jung1, Heechul Jung2, Jin-Hee Lee1,
+Woong Jae Won1, Woo Young Jung1, Jincheol Kim3, and Soon Kwon1∗
+DGIST, Daegu, Republic of Korea1
+KAIST, Daejeon, Republic of Korea2
+SK Telecom, Seoul, Republic of Korea3"
+b0d607d5e9e79540c9f2673f2224b2d51be3393c,Kernel Truncated Regression Representation for Robust Subspace Clustering,"Kernel Truncated Regression Representation for
+Robust Subspace Clustering
+Liangli Zhen, Dezhong Peng, Xin Yao"
+b09b693708f412823053508578df289b8403100a,Two-Stream SR-CNNs for Action Recognition in Videos,"WANG et al.: TWO-STREAM SR-CNNS FOR ACTION RECOGNITION IN VIDEOS
+Two-Stream SR-CNNs for Action
+Recognition in Videos
+Yifan Wang1
+Jie Song1
+Limin Wang2
+Luc Van Gool2
+Otmar Hilliges1
+Advanced Interactive Technologies Lab
+ETH Zurich
+Zurich, Switzerland
+Computer Vision Lab
+ETH Zurich
+Zurich, Switzerland"
+b0a376888a33defd6fcfe396a11e6ea6d4f99f0e,Soft Measure of Visual Token Occurrences for Object Categorization,"Soft Measure of Visual Token Occurrences for
+Object Categorization
+Yanjie Wang, Xiabi Liu(cid:2), and Yunde Jia
+Beijing Laboratory of Intelligent Information Technology, School of Computer
+Science, Beijing Institute of Technology
+Tel.: +86-10-68913447, Fax: +86-10-86343158"
+b05ac3b2286c30fcab385f682b3519a823857112,UvA-DARE ( Digital Academic Repository ) Spatial frequency information modulates response inhibition and decision-making processes,"UvA-DARE (Digital Academic Repository)
+Spatial frequency information modulates response inhibition and decision-making
+processes
+Jahfari, S.; Ridderinkhof, K.R.; Scholte, H.S.
+Published in:
+PLoS One
+0.1371/journal.pone.0076467
+Link to publication
+Citation for published version (APA):
+Jahfari, S., Ridderinkhof, K. R., & Scholte, H. S. (2013). Spatial frequency information modulates response
+inhibition and decision-making processes. PLoS One, 8(10), e76467. [e76467]. DOI:
+0.1371/journal.pone.0076467
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible."
+b0fafe26b03243a22e12b021266872afdb96572c,Factors of Transferability for a Generic ConvNet Representation,"Factors of Transferability for a Generic ConvNet Representation
+Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, Stefan Carlsson
+{azizpour, razavian, sullivan, atsuto,
+Computer Vision and Active Perception (CVAP), Royal Institute of Technology (KTH), Stockholm, SE-10044 Sweden
+Evidence is mounting that Convolutional Networks (ConvNets) are the most effective representation learning method for visual
+recognition tasks. In the common scenario, a ConvNet is trained on a large labeled dataset (source) and the feed-forward units
+ctivation of the trained network, at a certain layer of the network, is used as a generic representation of an input image for a
+task with relatively smaller training set (target). Recent studies have shown this form of representation transfer to be suitable for a
+wide range of target visual recognition tasks. This paper introduces and investigates several factors affecting the transferability of
+such representations. It includes parameters for training of the source ConvNet such as its architecture, distribution of the training
+data, etc. and also the parameters of feature extraction such as layer of the trained ConvNet, dimensionality reduction, etc. Then,
+y optimizing these factors, we show that significant improvements can be achieved on various (17) visual recognition tasks. We
+further show that these visual recognition tasks can be categorically ordered based on their distance from the source task such that
+correlation between the performance of tasks and their distance from the source task w.r.t. the proposed factors is observed.
+Index Terms—Convolutional Neural Networks, Transfer Learning, Representation Learning, Deep Learning, Visual Recognition
+I. INTRODUCTION
+C ONVOLUTIONAL NETWORKS (ConvNets) trace back
+to the early works on digit and character recognition
+[11], [23]. Prior to 2012, though, in computer vision field,
+neural networks were more renowned for their propensity to"
+b0d6e204c36f029300787f6334cb727325f8983a,Neural networks related to dysfunctional face processing in autism spectrum disorder,"Brain Struct Funct
+DOI 10.1007/s00429-014-0791-z
+O R I G I N A L A R T I C L E
+Neural networks related to dysfunctional face processing
+in autism spectrum disorder
+Thomas Nickl-Jockschat • Claudia Rottschy •
+Johanna Thommes • Frank Schneider •
+Angela R. Laird • Peter T. Fox • Simon B. Eickhoff
+Received: 6 September 2013 / Accepted: 28 April 2014
+Ó Springer-Verlag Berlin Heidelberg 2014"
+b07582d1a59a9c6f029d0d8328414c7bef64dca0,Employing Fusion of Learned and Handcrafted Features for Unconstrained Ear Recognition,"Employing Fusion of Learned and Handcrafted
+Features for Unconstrained Ear Recognition
+Maur´ıcio Pamplona Segundo∗†
+Earnest E. Hansley∗
+Sudeep Sarkar∗‡
+October 24, 2017"
+b0c651f23516055583060e2197756e1390455de5,Multimodal Verification of Identity for a Realistic Access Control Application,"Multimodal Verification of Identity for a
+Realistic Access Control Application
+Thesis submitted in partial fulfilment of the requirements for the degree
+Doctor Ingeneriae
+Mechanical Engineering
+Rand Afrikaans University
+Supervisor: Professor A.L. Nel
+Nele Denys
+t the
+May 2004"
+b0b628bda8a6c4267eeaf91420b8610400ff398f,Intact emotion facilitation for nonsocial stimuli in autism: is amygdala impairment in autism specific for social information?,"Journal of the International Neuropsychological Society (2008), 14, 42–54.
+Copyright © 2008 INS. Published by Cambridge University Press. Printed in the USA.
+DOI: 10.10170S1355617708080107
+Intact emotion facilitation for nonsocial stimuli in autism:
+Is amygdala impairment in autism specific
+for social information?
+MIKLE SOUTH,1,2 SALLY OZONOFF,3 YANA SUCHY,1,4 RAYMOND P. KESNER,1,4
+WILLIAM M. McMAHON,2,4 and JANET E. LAINHART2,4
+Department of Psychology, University of Utah, Salt Lake City, Utah
+Department of Psychiatry, University of Utah School of Medicine and Utah Autism Research Project, Salt Lake City, Utah
+M.I.N.D. Institute, Department of Psychiatry and Behavioral Sciences, University of California–Davis, Sacramento, California
+The Brain Institute at the University of Utah, Salt Lake City, Utah
+(Received April 25, 2007; Final Revision July 11, 2007; Accepted July 18, 2007)"
+b0c379f740292ad2cad2c990a445f69167e18894,Knowledge distillation using unlabeled mismatched images,"Workshop track - ICLR 2017
+KNOWLEDGE DISTILLATION USING UNLABELED MIS-
+MATCHED IMAGES
+Mandar Kulkarni(*), Kalpesh Patil(**), Shirish Karande(*)
+TCS Innovation Labs, Pune, India (*), IIT Bombay, Mumbai, India(**)"
+b0771b7ca52022b37a563464f823af67c0b36c03,Image Retrieval Technique Using Local Binary Pattern (LBP),"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2013): 4.438
+Image Retrieval Technique Using Local Binary
+Pattern (LBP)
+Miss. Priyanka Pawar1, P.P.Belagali2
+P.G Student, Department of Electronics Engineering, Shivaji University, Dr.J.J.M.C.O.E Jaysingpur, Kolhapur, India
+Associate Professor, Department of Electronics Engineering, Shivaji University, Dr.J.J.M.C.O.E Jaysingpur, Kolhapur, India"
+b07546f26a99b61c5045e313bc024b0fe7de590a,Bilinear CNNs for Fine-grained Visual Recognition,"Bilinear CNNs for Fine-grained Visual
+Recognition
+Tsung-Yu Lin
+Aruni RoyChowdhury
+Subhransu Maji"
+b0c1615ebcad516b5a26d45be58068673e2ff217,How Image Degradations Affect Deep CNN-Based Face Recognition?,"How Image Degradations Affect Deep CNN-based Face
+Recognition?
+S¸amil Karahan1 Merve Kılınc¸ Yıldırım1 Kadir Kırtac¸1 Ferhat S¸ ¨ukr¨u Rende1
+G¨ultekin B¨ut¨un1Hazım Kemal Ekenel2"
+b0de0892d2092c8c70aa22500fed31aa7eb4dd3f,A Robust and Efficient Video Representation for Action Recognition,"(will be inserted by the editor)
+A robust and efficient video representation for action recognition
+Heng Wang · Dan Oneata · Jakob Verbeek · Cordelia Schmid
+Received: date / Accepted: date"
+b0623c1d8493d273d704ba1d0413db0de579ae77,Attributes-Based Re-identification,"Attributes-based Re-Identification
+Ryan Layne, Timothy M. Hospedales and Shaogang Gong"
+b0158b26f01d5fa18aac51ece055cad9a12f6d87,Memory-based Gait Recognition,"Pages 82.1-82.12
+DOI: https://dx.doi.org/10.5244/C.30.82"
+b0e7c177084be76fb73df3c4bcf1846676a2d615,Joint action recognition and pose estimation from video,"Joint Action Recognition and Pose Estimation From Video
+Bruce Xiaohan Nie, Caiming Xiong and Song-Chun Zhu
+Center for Vision, Cognition, Learning and Art
+University of California, Los Angeles, USA"
+b073313325b6482e22032e259d7311fb9615356c,Robust and accurate cancer classification with gene expression profiling,"Robust and Accurate Cancer Classification with Gene Expression Profiling
+Haifeng Li
+Keshu Zhang
+Tao Jiang
+Dept. of Computer Science
+Human Interaction Research Lab
+Dept. of Computer Science
+University of California
+Riverside, CA 92521
+Motorola, Inc.
+Tempe, AZ 85282
+University of California
+Riverside, CA 92521"
+b03d5ed5b3f253703fa37d6445fab0e7cdf38ba1,Separate-Group Covariance Estimation With Insufficient Data for Object Recognition,"Separate-Group Covariance Estimation With Insufficient Data for
+Object Recognition
+Carlos Eduardo Thomaz1, Raul Queiroz Feitosa2, Álvaro Veiga3
+,2,3Catholic University of Rio de Janeiro
+Department of Electrical Engineering
+Department of Computer Engineering
+University of Rio de Janeiro
+r. Marquês de São Vicente 225,22453-900, Rio de
+r. São Francisco Xavier, 524, 20559-900, Rio de
+Janeiro, Brazil
+Janeiro, Brazil"
+a6e7513371a49cd7b8b30bb444e8fc448c5326cb,Simple online and realtime tracking,"SIMPLE ONLINE AND REALTIME TRACKING
+Alex Bewley†, Zongyuan Ge†, Lionel Ott(cid:5), Fabio Ramos(cid:5), Ben Upcroft†
+Queensland University of Technology†, University of Sydney(cid:5)"
+a66373beaad40fb5a8e2e1b42c5a2213b166a55c,Childhood abuse is related to working memory impairment for positive emotion in female university students.,"Childhood abuse is related to working memory impairment for positive
+emotion in female university students
+Cromheeke S, Herpoel LA, Mueller SC.
+014; 19(1):38-48
+ARTICLE IDENTIFIERS
+DOI: 10.1177/1077559513511522
+PMID: 24271026
+PMCID: not available
+JOURNAL IDENTIFIERS
+LCCN: not available
+pISSN: 1077-5595
+eISSN: 1552-6119
+OCLC ID: 30832620
+CONS ID: sn 94001296
+US National Library of Medicine ID: 9602869
+This article was identified from a query of the SafetyLit database.
+Powered by TCPDF (www.tcpdf.org)"
+a66d89357ada66d98d242c124e1e8d96ac9b37a0,Failure Detection for Facial Landmark Detectors,"Failure Detection for Facial Landmark Detectors
+Andreas Steger, Radu Timofte, and Luc Van Gool
+Computer Vision Lab, D-ITET, ETH Zurich, Switzerland
+{radu.timofte,"
+a62ca056821a3179b116662b28338433ba5b5e7d,How far can we go without convolution: Improving fully-connected networks,"Under review as a conference paper at ICLR 2016
+HOW FAR CAN WE GO WITHOUT CONVOLUTION: IM-
+PROVING FULLY-CONNECTED NETWORKS
+Zhouhan Lin & Roland Memisevic
+Universit´e de Montr´eal
+Canada
+{zhouhan.lin,
+Kishore Konda
+Goethe University Frankfurt
+Germany"
+a649bc66524e5e61e4d34cc00159099b6b58db2f,Large-Scale Image Geolocalization,"Chapter 3
+Large-Scale Image Geolocalization
+James Hays and Alexei A. Efros"
+a65c76169bdb8479353806556f61bf94fdec7e10,Online Object Tracking With Sparse Prototypes,"Online Object Tracking With Sparse Prototypes
+Dong Wang, Huchuan Lu, Member, IEEE, and Ming-Hsuan Yang, Senior Member, IEEE"
+a6f477f3c1cb2ab230fe8d89c31ae6af0b9c2346,Relevance Subject Machine: A Novel Person Re-identification Framework,"Relevance Subject Machine: A Novel Person
+Re-identification Framework
+Igor Fedorov, Student Member, IEEE, Ritwik Giri, Student Member, IEEE, Bhaskar D. Rao, Fellow, IEEE,
+Truong Q. Nguyen, Fellow, IEEE"
+a63638b26d36bab8db10bd95fb287c727bab33ec,Joint Sparse and Low-Rank Representation for Emotion Recognition,"MAY 2014
+Joint Sparse and Low-Rank Representation for
+Emotion Recognition
+Xiang Xiang, Fabian Prada, Hao Jiang"
+a60146c458adfe9207f015d7a77cb7dfb54f744f,Understanding Dynamic Social Grouping Behaviors of Pedestrians,"Understanding Dynamic Social Grouping
+Behaviors of Pedestrians
+Linan Feng, Student Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+a608c5f8fd42af6e9bd332ab516c8c2af7063c61,Age Estimation via Grouping and Decision Fusion,"Age Estimation via Grouping and Decision Fusion
+Kuan-Hsien Liu, Member, IEEE, Shuicheng Yan, Senior Member, IEEE,
+nd C.-C. Jay Kuo, Fellow, IEEE"
+a6eb6ad9142130406fb4ffd4d60e8348c2442c29,"Video Description: A Survey of Methods, Datasets and Evaluation Metrics","Video Description: A Survey of Methods,
+Datasets and Evaluation Metrics
+Nayyer Aafaq, Syed Zulqarnain Gilani, Wei Liu, and Ajmal Mian"
+a65e953df1dbc007862f8eaa8c12ceb225d15837,Robust Head-shoulder Detection using Deformable Part-based Models,"Robust Head-shoulder Detection using Deformable Part-based Models
+Enes Dayangac, Christian Wiede, Julia Richter and Gangolf Hirtz
+Faculty of Electrical Engineering and Information Technology, Technische Universit¨at Chemnitz,
+Chemnitz, Germany
+Keywords:
+Person Detection, Head-shoulder Detection, Ambient Assisted Living, Latent SVM, DPM, ACF-Detector."
+a618cc9c513762d4eb5db2f7f7b686e7e2b758ca,Learning Semi-Riemannian Metrics for Semisupervised Feature Extraction,"Learning Semi-Riemannian Metrics
+for Semisupervised Feature Extraction
+Wei Zhang, Zhouchen Lin, Senior Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+a67e7ca0c7e1e3020169b5c59dc492e9f62f0022,3d Face Recognition Performance under Adversarial Conditions,
+a6404e91af8d1644aa7eea307ffceefa715dd7ea,Human Motion Capture Using a Drone,"Human Motion Capture Using a Drone
+Xiaowei Zhou, Sikang Liu, Georgios Pavlakos, Vijay Kumar, Kostas Daniilidis"
+a67d54cf585c9491ab8a3e2d58d9c4b223359602,Spatial information and end-to-end learning for visual recognition. (Informations spatiales et apprentissage bout-en-bout pour la reconnaissance visuelle),"Spatial information and end-to-end learning for visual
+recognition
+Mingyuan Jiu
+To cite this version:
+Mingyuan Jiu. Spatial information and end-to-end learning for visual recognition. Computer Science
+[cs]. INSA de Lyon, 2014. English. <NNT : 2014ISAL0038>. <tel-01127462>
+HAL Id: tel-01127462
+https://tel.archives-ouvertes.fr/tel-01127462
+Submitted on 7 Mar 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+a6a6cfae45e8633c01793debf43592b7d515f65d,From ImageNet to Mining: Adapting Visual Object Detection with Minimal Supervision,"From ImageNet to Mining: Adapting Visual
+Object Detection with Minimal Supervision
+Alex Bewley and Ben Upcroft"
+a6590c49e44aa4975b2b0152ee21ac8af3097d80,3D Interpreter Networks for Viewer-Centered Wireframe Modeling,"https://doi.org/10.1007/s11263-018-1074-6
+D Interpreter Networks for Viewer-Centered Wireframe Modeling
+Jiajun Wu1 · Tianfan Xue2 · Joseph J. Lim3 · Yuandong Tian4 ·
+Joshua B. Tenenbaum1 · Antonio Torralba1 · William T. Freeman1,5
+Received: date / Accepted: date"
+a694180a683f7f4361042c61648aa97d222602db,Face recognition using scattering wavelet under Illicit Drug Abuse variations,"Face Recognition using Scattering Wavelet under Illicit Drug Abuse Variations
+Prateekshit Pandey, Richa Singh, Mayank Vatsa
+fprateekshit12078, rsingh,
+IIIT-Delhi India"
+a6ce2f0795839d9c2543d64a08e043695887e0eb,Driver Gaze Region Estimation Without Using Eye Movement,"Driver Gaze Region Estimation
+Without Using Eye Movement
+Lex Fridman, Philipp Langhans, Joonbum Lee, and Bryan Reimer
+Massachusetts Institute of Technology (MIT)"
+a6161e53d77d7cbd6e69d1b84e6d03d7041cb93e,Dark Model Adaptation: Semantic Image Segmentation from Daytime to Nighttime,"Dark Model Adaptation: Semantic Image Segmentation from Daytime
+to Nighttime
+Dengxin Dai1 and Luc Van Gool1,2"
+a6eb8cb1c35d0f53f8d2c9a404e374c01275544b,NovaSearch on Medical ImageCLEF 2013,"NovaSearch on medical ImageCLEF 2013
+Andr´e Mour˜ao, Fl´avio Martins and Jo˜ao Magalh˜aes
+Universidade Nova de Lisboa, Faculdade de Ciˆencias e Tecnologia,
+Caparica, Portugal,"
+a6ebe013b639f0f79def4c219f585b8a012be04f,Facial Expression Recognition Based on Hybrid Approach,"Facial Expression Recognition Based on Hybrid
+Approach
+Md. Abdul Mannan, Antony Lam, Yoshinori Kobayashi, and Yoshinori Kuno
+Graduate School of Science and Engineering, Saitama University,
+55 Shimo-Okubo, Sakura-ku, Saitama-shi, Saitama 338-8570, Japan
+E-mail"
+a6574d111bfb12d6a9988bdbbf24639d3c4534ec,Image denoising: Can plain neural networks compete with BM3D?,"Image denoising: Can plain Neural Networks compete with BM3D?
+Harold C. Burger, Christian J. Schuler, and Stefan Harmeling
+Max Planck Institute for Intelligent Systems, T¨ubingen, Germany
+http://people.tuebingen.mpg.de/burger/neural_denoising/"
+b98aec5bbe7116fa3ae5f9b4d77cb1f1141eaabd,Appearance-Based 3D Upper-Body Pose Estimation and Person Re-identification on Mobile Robots,"Appearance-Based 3D Upper-Body Pose Estimation
+nd Person Re-Identification on Mobile Robots
+Christoph Weinrich, Michael Volkhardt, Horst-Michael Gross
+Neuroinformatics and Cognitive Robotics Lab
+Ilmenau University of Technology
+Ilmenau, Germany"
+b9bd9cab426f4d4a0b0d0077f6d9dca2ec01ce3c,Propositionalisation of Multi-instance Data Using Random Forests,"Propositionalisation of Multi-instance Data
+using Random Forests
+Eibe Frank and Bernhard Pfahringer
+Department of Computer Science, University of Waikato"
+b9953824b3d4cd2be77ecbc5db3f7dec3dfa031e,Guided Attention for Large Scale Scene Text Verification,"Large Scale Scene Text Verification with Guided
+Attention
+Dafang He1(cid:63), Yeqing Li2∗, Alexander Gorban2, Derrall Heath2, Julian Ibarz2,
+Qian Yu2, Daniel Kifer1, C. Lee Giles1
+The Pennsylvania State University1, Google Inc2."
+b9fb66f09b358a4ce167b54eed8c596772a392d9,Modal Regression based Atomic Representation for Robust Face Recognition,"Modal Regression based Atomic Representation for
+Robust Face Recognition
+Yulong Wang, Yuan Yan Tang, Life Fellow, IEEE, Luoqing Li, and Hong Chen"
+b9696bdba6e16959258bad17ce26e6a643be5faf,Using Photometric Stereo for Face Recognition,"International Journal of Bio-Science and Bio-Technology
+Vol. 3, No. 3, September, 2011
+Using Photometric Stereo for Face Recognition
+Gary A. Atkinson and Melvyn L. Smith
+University of the West of England, Bristol, BS16 1QY, UK"
+b97f694c2a111b5b1724eefd63c8d64c8e19f6c9,Group Affect Prediction Using Multimodal Distributions,"Group Affect Prediction Using Multimodal Distributions
+Saqib Nizam Shamsi
+Aspiring Minds
+Bhanu Pratap Singh
+Univeristy of Massachusetts, Amherst
+Manya Wadhwa
+Johns Hopkins University"
+b94e57ee9278f06c65a96ce1b586cb7a5b2b7fbb,Group Re-identification via Unsupervised Transfer of Sparse Features Encoding,"Group Re-Identification via
+Unsupervised Transfer of Sparse Features Encoding
+Giuseppe Lisanti∗,1, Niki Martinel∗,2, Alberto Del Bimbo1 and Gian Luca Foresti2
+MICC - University of Firenze, Italy
+AViReS Lab - University of Udine, Italy"
+b9305c065b3c95fd0844d16a09fb9cc7c321cf58,Detecting Humans in Dense Crowds Using Locally-Consistent Scale Prior and Global Occlusion Reasoning,"Detecting Humans in Dense Crowds Using
+Locally-Consistent Scale Prior and Global
+Occlusion Reasoning
+Haroon Idrees, Member, IEEE, Khurram Soomro, Member, IEEE, and Mubarak Shah, Fellow, IEEE"
+b9d0774b0321a5cfc75471b62c8c5ef6c15527f5,Fishy Faces: Crafting Adversarial Images to Poison Face Authentication,"Fishy Faces: Crafting Adversarial Images to Poison Face Authentication
+Giuseppe Garofalo
+Vera Rimmer
+Tim Van hamme
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven
+Davy Preuveneers
+Wouter Joosen
+imec-DistriNet, KU Leuven
+imec-DistriNet, KU Leuven"
+b9cad920a00fc0e997fc24396872e03f13c0bb9c,Face liveness detection under bad illumination conditions,"FACE LIVENESS DETECTION UNDER BAD ILLUMINATION CONDITIONS
+Bruno Peixoto, Carolina Michelassi, and Anderson Rocha
+University of Campinas (Unicamp)
+Campinas, SP, Brazil"
+b908edadad58c604a1e4b431f69ac8ded350589a,Deep Face Feature for Face Alignment,"Deep Face Feature for Face Alignment
+Boyi Jiang, Juyong Zhang, Bailin Deng, Yudong Guo and Ligang Liu"
+b9f2a755940353549e55690437eb7e13ea226bbf,Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions,"Unsupervised Feature Learning from Videos for Discovering and Recognizing Actions
+Carolina Redondo-Cabrera
+Roberto J. López-Sastre"
+b9e82ee9bb4cf016b5ed44b7acd2b42e1a5a6be2,Face recognition by applying wavelet subband representation and kernel associative memory,"Face Recognition by Applying Wavelet Subband
+Representation and Kernel Associative Memory
+Bai-Ling Zhang, Haihong Zhang, and Shuzhi Sam Ge, Senior Member, IEEE"
+b941d4a85be783a6883b7d41c1afa7a9db451831,Radiofrequency ablation planning for cardiac arrhythmia treatment using modeling and machine learning approaches,"Radiofrequency ablation planning for cardiac
+rrhythmia treatment using modeling and machine
+learning approaches
+Roc´ıo Cabrera Lozoya
+To cite this version:
+Roc´ıo Cabrera Lozoya. Radiofrequency ablation planning for cardiac arrhythmia treatment
+using modeling and machine learning approaches. Other. Universit´e Nice Sophia Antipolis,
+015. English. <NNT : 2015NICE4059>. <tel-01206478v2>
+HAL Id: tel-01206478
+https://tel.archives-ouvertes.fr/tel-01206478v2
+Submitted on 15 Dec 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+b9b7b37d7edf4482a6f440e282c3418ab1913afa,ThiNet: Pruning CNN Filters for a Thinner Net.,"ACCEPTED BY IEEE TRANS. PAMI
+ThiNet: Pruning CNN Filters for a Thinner Net
+Jian-Hao Luo, Hao Zhang, Hong-Yu Zhou, Chen-Wei Xie, Jianxin Wu, Member, IEEE,
+nd Weiyao Lin, Senior Member, IEEE"
+b92a057606a47eb7de6ecc180e4dbf53c4a8d4b7,Face Recognition Based on 2D and 3D Features,"Face Recognition Based on 2D and 3D Features
+Stefano Arca, Ra(cid:11)aella Lanzarotti, and Giuseppe Lipori
+Dipartimento di Scienze dell’Informazione
+Universit(cid:18)a degli Studi di Milano
+Via Comelico, 39/41 20135 Milano, Italy
+farca, lanzarotti,"
+b9cedd1960d5c025be55ade0a0aa81b75a6efa61,Inexact Krylov Subspace Algorithms for Large Matrix Exponential Eigenproblem from Dimensionality Reduction,"INEXACT KRYLOV SUBSPACE ALGORITHMS FOR LARGE
+MATRIX EXPONENTIAL EIGENPROBLEM FROM
+DIMENSIONALITY REDUCTION
+GANG WU∗, TING-TING FENG† , LI-JIA ZHANG‡ , AND MENG YANG§"
+b95acfe00686cc6f6526fcd1f30b6f38061d3a29,Revisiting Multiple-Instance Learning Via Embedded Instance Selection,"Revisiting Multiple-Instance Learning via
+Embedded Instance Selection
+James Foulds and Eibe Frank
+Department of Computer Science, University of Waikato, New Zealand"
+b971266b29fcecf1d5efe1c4dcdc2355cb188ab0,On the Reconstruction of Face Images from Deep Face Templates.,"MAI et al.: ON THE RECONSTRUCTION OF FACE IMAGES FROM DEEP FACE TEMPLATES
+On the Reconstruction of Face Images from
+Deep Face Templates
+Guangcan Mai, Kai Cao, Pong C. Yuen∗, Senior Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+a14260cd8c607afc6a9bd0c4df2ee22162e6d8c0,Discriminative Dictionary Learning With Ranking Metric Embedded for Person Re-Identification,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+a13a4e4cc8f4744b40668fe7cca660ae0e88537d,Explorer Multi 30 K : Multilingual English-German Image Descriptions,"Multi30K: Multilingual English-German Image Descriptions
+Citation for published version:
+Elliott, D, Frank, S, Sima'an, K & Specia, L 2016, Multi30K: Multilingual English-German Image
+Descriptions. in Proceedings of the 5th Workshop on Vision and Language, hosted by the 54th Annual
+Meeting of the Association for Computational Linguistics, 2016, August 12, Berlin, Germany.
+Association for Computational Linguistics (ACL), pp. 70-74.
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Publisher's PDF, also known as Version of record
+Published In:
+Proceedings of the 5th Workshop on Vision and Language, hosted by the 54th Annual Meeting of the
+Association for Computational Linguistics, 2016, August 12, Berlin, Germany
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please"
+a11600deb182677f4fe586fcea59f10d032a6c6f,Active Appearance Models with Rotation Invariant Kernels,"Active Appearance Models with Rotation Invariant Kernels
+Onur C. Hamsici and Aleix M. Martinez
+Department of Electrical and Computer Engineering
+Ohio State University, Columbus, OH 43210"
+a158c1e2993ac90a90326881dd5cb0996c20d4f3,Symmetry as an Intrinsically Dynamic Feature,"OPEN ACCESS
+ISSN 2073-8994
+Article
+Vito Di Gesu 1,2,†, Marco E. Tabacchi 1,3,* and Bertrand Zavidovique 4
+DMA, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Italy
+CITC, Università degli Studi di Palermo, via Archirafi 34, 90123 Palermo, Itlay
+Istituto Nazionale di Ricerche Demopolis, via Col. Romey 7, 91100 Trapani, Italy
+IEF, Université Paris IX–Orsay, Paris, France; E-Mail: (B.Z.)
+Deceased on 15 March 2009.
+* Author to whom correspondence should be addressed; E-Mail:
+Received: 4 March 2010; in revised form: 23 March 2010 / Accepted: 29 March 2010 /
+Published: 1 April 2010"
+a1e198454bd0868b4da9bca7a35218dd235cfdda,3d‐facial Expression Synthesis and Its Application to Face Recognition Systems,"D‐Facial Expression Synthesis and its Application to Face Recognition Systems
+Leonel Ramírez‐Valdez1, Rogelio Hasimoto‐Beltran2
+,2Centro de Investigación en Matemáticas(CIMAT)
+Jalisco s/n, Col. Mineral de Valenciana, Guanajuato, Gto., México 36240"
+a15f4e3adb56dbbdd6f922489efef48fc5efa003,Grounding Semantic Roles in Images,"Grounding Semantic Roles in Images
+Carina Silberer†♣
+Manfred Pinkal†
+Department of Computational Linguistics
+Saarland University, Saarbr¨ucken, Germany
+♣Universitat Pompeu Fabra
+Barcelona, Spain"
+a15d9d2ed035f21e13b688a78412cb7b5a04c469,Object Detection Using Strongly-Supervised Deformable Part Models,"Object Detection Using
+Strongly-Supervised Deformable Part Models
+Hossein Azizpour1 and Ivan Laptev2
+Computer Vision and Active Perception Laboratory (CVAP), KTH, Sweden
+INRIA, WILLOW, Laboratoire d’Informatique de l’Ecole Normale Superieure"
+a1b1442198f29072e907ed8cb02a064493737158,Crowdsourcing Facial Responses to Online Videos,"Crowdsourcing Facial Responses
+to Online Videos
+Daniel McDuff, Student Member, IEEE, Rana El Kaliouby, Member, IEEE, and
+Rosalind W. Picard, Fellow, IEEE"
+a125bc46fee1bd170a0654b8856d3b78d62e9d29,Learning weighted sparse representation of encoded facial normal information for expression-robust 3D face recognition,"Learning Weighted Sparse Representation of Encoded Facial Normal
+Information for Expression-Robust 3D Face Recognition
+Huibin Li1,2, Di Huang1,2, Jean-Marie Morvan1,3,4, Liming Chen1,2
+Universit´e de Lyon, CNRS, 2Ecole Centrale de Lyon, LIRIS UMR5205, F-69134, Lyon, France
+Universit´e Lyon 1, Institut Camille Jordan, 43 blvd. du 11 Nov. 1918, F-69622 Villeurbanne - Cedex, France
+King Abdullah University of Science and Technology, GMSV Research Center, Bldg 1, Thuwal 23955-6900, Saudi Arabia"
+a175f20189f028a1420b76ae42f6dfe99d8d6847,Where and Why Are They Looking ? Jointly Inferring Human Attention and Intentions in Complex Tasks,"Where and Why Are They Looking? Jointly Inferring Human Attention and
+Intentions in Complex Tasks
+Ping Wei1,2, Yang Liu2, Tianmin Shu2, Nanning Zheng1, and Song-Chun Zhu2
+School of Electronic and Information Engineering, Xi’an Jiaotong University, China
+Center for Vision, Cognition, Learning, and Autonomy, University of California, Los Angeles"
+a102edaa9fd458316637ce51a0b7aba2ee651637,Learning Human Poses from Actions,"ADITYA, JAWAHAR, PAWAN: LEARNING HUMAN POSES FROM ACTIONS
+Learning Human Poses from Actions
+IIIT Hyderabad
+University of Oxford &
+The Alan Turing Institute
+Aditya Arun1
+C.V. Jawahar1
+M. Pawan Kumar2"
+a1aac8e95cd262f974b26374ec8fe35c0f000185,Transferrable Feature and Projection Learning with Class Hierarchy for Zero-Shot Learning,"IJCV manuscript No.
+(will be inserted by the editor)
+Transferrable Feature and Projection Learning with Class Hierarchy for
+Zero-Shot Learning
+Aoxue Li · Zhiwu Lu · Jiechao Guan · Tao Xiang · Liwei Wang · Ji-Rong Wen
+Received: date / Accepted: date"
+a15c728d008801f5ffc7898568097bbeac8270a4,ForgetIT Deliverable Template,"www.forgetit-project.eu
+ForgetIT
+Concise Preservation by Combining Managed Forgetting
+nd Contextualized Remembering
+Grant Agreement No. 600826
+Deliverable D4.4
+Work-package
+Deliverable
+Deliverable Leader
+Quality Assessor
+Dissemination level
+Delivery date in Annex I
+Actual delivery date
+Revisions
+Status
+Keywords
+Information Consolidation and Con-
+entration
+D4.4:
+Information analysis, consolidation"
+a1e1bd4dacddc703a236681e987a09601ee1016d,Embedding Visual Hierarchy With Deep Networks for Large-Scale Visual Recognition,"Embedding Visual Hierarchy with Deep Networks
+for Large-Scale Visual Recognition
+Tianyi Zhao, Baopeng Zhang, Wei Zhang, Ning Zhou, Jun Yu, Jianping Fan"
+a19f08d7b1ce8b451df67ec125dd9254b5a05d95,3D Face Recognition Using Multiview Keypoint Matching,"009 Advanced Video and Signal Based Surveillance
+D Face Recognition Using Multiview Keypoint Matching
+Michael Mayo, Edmond Zhang
+Department of Computer Science, University of Waikato, New Zealand
+{mmayo,"
+a1669fa7d3d8f0c0cafe770c79007949cd32b245,Deep Metric Learning with BIER: Boosting Independent Embeddings Robustly,"TPAMI SUBMISSION
+Deep Metric Learning with BIER:
+Boosting Independent Embeddings Robustly
+Michael Opitz, Georg Waltner, Horst Possegger, and Horst Bischof"
+a147cec1434753777b3651101bdbda1489b09fd4,Individual differences in shifting decision criterion: a recognition memory study.,"Mem Cogn (2012) 40:1016–1030
+DOI 10.3758/s13421-012-0204-6
+Individual differences in shifting decision criterion:
+A recognition memory study
+Elissa M. Aminoff & David Clewett & Scott Freeman &
+Amy Frithsen & Christine Tipper & Arianne Johnson &
+Scott T. Grafton & Michael B. Miller
+Published online: 4 May 2012
+# Psychonomic Society, Inc. 2012"
+a157ebc849d57ccff00a52a68b24e4ac8eba9536,The Contextual Loss for Image Transformation with Non-aligned Data,"The Contextual Loss for Image Transformation
+with Non-Aligned Data
+Roey Mechrez(cid:63) , Itamar Talmi(cid:63), Lihi Zelnik-Manor
+Technion - Israel Institute of Technology
+Fig. 1. Our Contextual loss is effective for many image transformation tasks: It can
+make a Trump cartoon imitate Ray Kurzweil, give Obama some of Hillary’s features,
+nd, turn women more masculine or men more feminine. Mutual to these tasks is the
+bsence of ground-truth targets that can be compared pixel-to-pixel to the generated
+images. The Contextual loss provides a simple solution to all of these tasks."
+a1132e2638a8abd08bdf7fc4884804dd6654fa63,Real-Time Video Face Recognition for Embedded Devices,"Real-Time Video Face Recognition
+for Embedded Devices
+Gabriel Costache, Sathish Mangapuram, Alexandru
+Drimbarean, Petronel Bigioi and Peter Corcoran
+Tessera, Galway,
+Ireland
+. Introduction
+This chapter will address the challenges of real-time video face recognition systems
+implemented in embedded devices. Topics to be covered include: the importance and
+hallenges of video face recognition in real life scenarios, describing a general architecture of
+generic video face recognition system and a working solution suitable for recognizing
+faces in real-time using low complexity devices. Each component of the system will be
+described together with the system’s performance on a database of video samples that
+resembles real life conditions.
+. Video face recognition
+Face recognition remains a very active topic in computer vision and receives attention from
+large community of researchers in that discipline. Many reasons feed this interest; the
+main being the wide range of commercial, law enforcement and security applications that
+require authentication. The progress made in recent years on the methods and algorithms
+for data processing as well as the availability of new technologies makes it easier to study"
+a19de85fa1533a1a1929b98b5fc3b1fb618dc668,Towards Improving Abstractive Summarization via Entailment Generation,
+a15663e0c0a2427ac4da5161e4ed75d331a5a2be,Streaming spectral clustering,"Streaming Spectral Clustering
+Shinjae Yoo
+Computational Science Center
+Brookhaven National Laboratory
+Upton, New York 11973-5000
+Email:
+Hao Huang
+Machine Learning Laboratory
+General Electric Global Research
+San Ramon, CA 94583
+Email:
+Shiva Prasad Kasiviswanathan
+Samsung Research America
+Mountain View, CA 94043
+Email:"
+a14ae81609d09fed217aa12a4df9466553db4859,Face Identification Using Large Feature Sets,"REVISED VERSION, JUNE 2011
+Face Identification Using Large Feature Sets
+William Robson Schwartz, Huimin Guo, Jonghyun Choi, and Larry S. Davis, Fellow, IEEE"
+a1f1120653bb1bd8bd4bc9616f85fdc97f8ce892,Latent Embeddings for Zero-Shot Classification,"Latent Embeddings for Zero-shot Classification
+Yongqin Xian1, Zeynep Akata1, Gaurav Sharma1,2,∗, Quynh Nguyen3, Matthias Hein3 and Bernt Schiele1
+MPI for Informatics
+IIT Kanpur
+Saarland University"
+a10f734e30d8dcb8506c9ea5b1074e6c668904e2,Learning Features and Parts for Fine-Grained Recognition,"Learning Features and Parts for Fine-Grained
+Recognition
+(Invited Paper)
+Jonathan Krause∗, Timnit Gebru∗, Jia Deng †, Li-Jia Li ‡, Li Fei-Fei∗
+Stanford University: {jkrause, tgebru,
+University of Michigan:
+Yahoo! Research:"
+a1af05502eac70296ee22e5ab7e066420f5fe447,A Probabilistic Approach for Breast Boundary Extraction in Mammograms,"Hindawi Publishing Corporation
+Computational and Mathematical Methods in Medicine
+Volume 2013, Article ID 408595, 19 pages
+http://dx.doi.org/10.1155/2013/408595
+Research Article
+A Probabilistic Approach for Breast Boundary
+Extraction in Mammograms
+Hamed Habibi Aghdam, Domenec Puig, and Agusti Solanas
+Department of Computer Engineering and Mathematics, Rovira i Virgili University, 43007 Tarragona, Spain
+Correspondence should be addressed to Domenec Puig;
+Received 31 May 2013; Revised 21 August 2013; Accepted 16 September 2013
+Academic Editor: Reinoud Maex
+Copyright © 2013 Hamed Habibi Aghdam et al. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+The extraction of the breast boundary is crucial to perform further analysis of mammogram. Methods to extract the breast boundary
+an be classified into two categories: methods based on image processing techniques and those based on models. The former use
+image transformation techniques such as thresholding, morphological operations, and region growing. In the second category, the
+oundary is extracted using more advanced techniques, such as the active contour model. The problem with thresholding methods
+is that it is a hard to automatically find the optimal threshold value by using histogram information. On the other hand, active
+ontour models require defining a starting point close to the actual boundary to be able to successfully extract the boundary. In this"
+a1c6f88330762cc97f26585c124c6b3ac791eb89,Confidence Sets for Fine-Grained Categorization and Plant Species Identification,"Int J Comput Vis
+DOI 10.1007/s11263-014-0743-3
+Confidence Sets for Fine-Grained Categorization and Plant
+Species Identification
+Asma Rejeb Sfar · Nozha Boujemaa · Donald Geman
+Received: 1 January 2014 / Accepted: 20 June 2014
+© Springer Science+Business Media New York 2014"
+a18c8f76f2599d6d61f26cb1d4025ea386919dfe,Video Event Detection: From Subvolume Localization To Spatio-Temporal Path Search.,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+Video event detection : from subvolume localization to
+spatio-temporal path search
+Author(s)
+Tran, Du; Yuan, Junsong; Forsyth, David
+Citation
+Tran, D., Yuan, J., & Forsyth, D. (2014). Video Event
+Detection: From Subvolume Localization to
+Spatiotemporal Path Search. IEEE Transactions on
+Pattern Analysis and Machine Intelligence, 36(2), 404-
+http://hdl.handle.net/10220/19322
+Rights
+© 2014 IEEE. Personal use of this material is permitted.
+Permission from IEEE must be obtained for all other
+uses, in any current or future media, including
+reprinting/republishing this material for advertising or
+promotional purposes, creating new collective works, for
+resale or redistribution to servers or lists, or reuse of any"
+a1b7b23bd8f2b2ef37a9113e6b8499f0069aac85,Performance assessment of face recognition using super-resolution,"Performance Assessment of Face Recognition Using
+Super-Resolution
+Shuowen Hu
+Robert Maschal
+S. Susan Young
+U.S. Army Research Laboratory
+U.S. Army Research Laboratory
+U.S. Army Research Laboratory
+800 Powder Mill Rd.
+Adelphi, MD 20783
+(301)394-2526
+800 Powder Mill Rd.
+Adelphi, MD 20783
+(301)394-0437
+800 Powder Mill Rd.
+Adelphi, MD 20783
+(301)394-0230
+Tsai Hong Hong
+Jonathon P. Phillips
+National Institute of Standards and"
+a120cac99c85548d0749dd83b0450520949e6474,Unsupervised Eye Pupil Localization through Differential Geometry and Local Self-Similarity Matching,"Unsupervised Eye Pupil Localization through Differential
+Geometry and Local Self-Similarity Matching
+Marco Leo1*, Dario Cazzato1,2, Tommaso De Marco1, Cosimo Distante1
+National Research Council of Italy, Institute of Optics, Arnesano, Lecce, Italy, 2 Faculty of Engineering, University of Salento, Lecce, Italy"
+a1030e6e0e6995768dbcafedc712a59db090d2b4,Bayesian Sparsification of Recurrent Neural Networks,"Bayesian Sparsification of Recurrent Neural Networks
+Ekaterina Lobacheva * 1 2 Nadezhda Chirkova * 1 3 Dmitry Vetrov 1 4"
+a11a63e00c0e587adf4efc1425c0651c242263b7,Two More Strategies to Speed Up Connected Components Labeling Algorithms,"Two More Strategies to Speed Up Connected
+Components Labeling Algorithms
+Federico Bolelli, Michele Cancilla, Costantino Grana
+Dipartimento di Ingegneria “Enzo Ferrari”
+Universit`a degli Studi di Modena e Reggio Emilia
+Via Vivarelli 10, Modena MO 41125, Italy"
+a11f5e74b13a6353d14e024d06a902b9afa728b3,Yum-me: Personalized Healthy Meal Recommender System,"Yum-me: Personalized Healthy Meal Recommender System
+Longqi Yang
+Cornell Tech
+Nicola Dell
+Cornell Tech
+Cheng-Kang Hsieh
+Serge Belongie
+Cornell Tech
+Hongjian Yang
+Cornell Tech
+Deborah Estrin
+Cornell Tech"
+a1e97c4043d5cc9896dc60ae7ca135782d89e5fc,"Re-identification of Humans in Crowds using Personal, Social and Environmental Constraints","IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Re-identification of Humans in Crowds using
+Personal, Social and Environmental Constraints
+Shayan Modiri Assari, Member, IEEE, Haroon Idrees, Member, IEEE, and Mubarak Shah, Fellow, IEEE"
+ef61e43a1cce95afdc0696879085e834b981d5de,Real time multi-object tracking using multiple cameras Semester Project,"CVLab: Computer Vision Laboratory
+School of Computer and Communication Sciences
+Ecole Polytechnique Fédérale de Lausanne
+http://cvlab.epfl.ch/
+Real time multi-object tracking
+using multiple cameras
+Semester Project
+Michalis Zervos
+Supervisor Professor Pascal Fua
+Teaching Assistant Horesh Ben Shitrit
+Spring Semester
+June 2012"
+ef940b76e40e18f329c43a3f545dc41080f68748,A Face Recognition and Spoofing Detection Adapted to Visually-Impaired People,"Research Article Volume 7 Issue No.3
+ISSN XXXX XXXX © 2017 IJESC
+A Face Recognition and Spoofing Detection Adapted to Visually-
+Impaired People
+Rutuja R. Dengale1, Bhagyashri S. Deshmukh 2, Anuja R. Mahangade3, Shivani V. Ujja inkar4
+K.K Wagh Institute of Engineering and Education Research, Nashik, India
+Depart ment of Co mputer Engineering
+Abstrac t:
+According to estimates by the world Health organization, about 285 million people suffer fro m so me kind of v isual disabilit ies of
+which 39 million are blind, resulting in 0.7 of the word population. As many v isual impaired peoples in the word they are unable
+to recognize the people who is standing in front of them and some peoples who have problem to re me mbe r na me of the person.
+They can easily recognize the person using this system. A co mputer vision technique and image ana lysis can help v isually
+the home using face identification and spoofing detection system. This system also provide feature to add newly known people
+nd keep records of all peoples visiting their ho me.
+Ke ywor ds: face-recognition, spoofing detection, visually-impaired, system architecture.
+INTRODUCTION
+The facia l ana lysis can be used to e xtract very useful and
+relevant information in order to help people with visual
+impairment in several of its tasks daily providing them with a
+greater degree of autonomy and security. Facia l recognition"
+efd308393b573e5410455960fe551160e1525f49,Tracking Persons-of-Interest via Unsupervised Representation Adaptation,"Tracking Persons-of-Interest via
+Unsupervised Representation Adaptation
+Shun Zhang, Jia-Bin Huang, Jongwoo Lim, Yihong Gong, Jinjun Wang,
+Narendra Ahuja, and Ming-Hsuan Yang"
+ef48f1d8ec88dabbf7253cb1c8a224cb95f604af,Survey on Video Analysis of Human Walking Motion,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.7, No.3 (2014), pp.99-122
+http://dx.doi.org/10.14257/ijsip.2014.7.3.10
+Survey on Video Analysis of Human Walking Motion
+S. Nissi Paul and Y. Jayanta Singh
+Dept. Computer Science Engineering and information Technology
+Don Boco College of Engineering and Technology, Assam Don Bosco University
+Guwahati, Assam - India"
+efa2b259407b5b9171dd085061d05b72b6309eb0,"Egocentric Activity Recognition Using HOG , HOF , MBH and Combined features","International Journal on Future Revolution in Computer Science & Communication Engineering
+Volume: 3 Issue: 8
+_______________________________________________________________________________________________
+74 – 79
+ISSN: 2454-4248
+Egocentric Activity Recognition Using HOG, HOF, MBH and
+Combined features
+K. P. Sanal Kumar
+Research Scholar
+Dept. of CSE
+Annamalai University
+R. Bhavani
+Professor
+Dept. of CSE
+Annamalai University"
+ef230e3df720abf2983ba6b347c9d46283e4b690,QUIS-CAMPI: an annotated multi-biometrics data feed from surveillance scenarios,"Page 1 of 20
+QUIS-CAMPI: An Annotated Multi-biometrics Data Feed From
+Surveillance Scenarios
+João Neves1,*, Juan Moreno2, Hugo Proença3
+IT - Instituto de Telecomunicações, University of Beira Interior
+Department of Computer Science, University of Beira Interior
+IT - Instituto de Telecomunicações, University of Beira Interior"
+ef4ecb76413a05c96eac4c743d2c2a3886f2ae07,Modeling the importance of faces in natural images,"Modeling the Importance of Faces in Natural Images
+Jin B.a, Yildirim G.a, Lau C.a, Shaji A.a, Ortiz Segovia M.b and S¨usstrunk S.a
+EPFL, Lausanne, Switzerland;
+Oc´e, Paris, France"
+efef00465e1b2f4003e838e50f9c8fa1c8ffaf3e,SceneNet: A Perceptual Ontology for Scene Understanding,"SceneNet: A Perceptual Ontology for Scene
+Understanding
+Ilan Kadar and Ohad Ben-Shahar
+Ben-Gurion University of the Negev"
+ef2084979a3191403c1b8b48f503d06f346afb8f,Une méthode de reconnaissance des expressions du visage basée sur la perception,"Une m´ethode de reconnaissance des expressions du
+visage bas´ee sur la perception
+Rizwan Khan, Alexandre Meyer, Hubert Konik, Saida Bouakaz
+To cite this version:
+Rizwan Khan, Alexandre Meyer, Hubert Konik, Saida Bouakaz. Une m´ethode de reconnais-
+sance des expressions du visage bas´ee sur la perception. RFIA 2012 (Reconnaissance des
+Formes et Intelligence Artificielle), Jan 2012, Lyon, France. pp.978-2-9539515-2-3, 2012. <hal-
+00660976>
+HAL Id: hal-00660976
+https://hal.archives-ouvertes.fr/hal-00660976
+Submitted on 19 Jan 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+ef66ed8d8db41f67048d077fd4b772c8ba748090,Reservoir Computing Hardware with Cellular Automata,"Reservoir Computing Hardware with
+Cellular Automata
+Alejandro Mor´an, Christiam F. Frasser and Josep L. Rossell´o
+Electronic Engineering Group, Physics Department, Universitat de les Illes Balears,
+Spain.
+E-mail:
+June 22, 2018"
+ef75007cd6e5b990d09e7f3c4ba119be6c2546fb,Lecture 20: Object Recognition 20.1 Introduction 20.2.1 Neocognitron,"Chapter 20
+Lecture 20: Object recognition
+0.1 Introduction
+In its simplest form, the problem of recognition is posed as a binary classification task, namely distin-
+guishing between a single object class and background class. Such a classification task can be turned
+into a detector by sliding it across the image (or image pyramid), and classifying each local window.
+Classifier based methods have defined their own family of object models. Driven by advances in
+machine learning, a common practice became to through a bunch of features into the last published
+lgorithm. However, soon became clear that such an approach, in which the research gave up into trying
+to have a well defined physical model of the object, hold a lot of promise. In many cases, the use of a
+specific classifier has driven the choice of the object representation and not the contrary. In classifier-
+ased models, the preferred representations are driven by efficiency constraints and by the characteristics
+of the classifier (e.g., additive models, SVMs, neural networks, etc.).
+0.2 Neural networks
+Although neural networks can be trained in other settings than a purely discriminative framework, some
+of the first classifier based approaches used neural networks to build the classification function. Many
+urrent approaches, despite of having a different inspiration, still follow an architecture motivated by
+neural networks.
+0.2.1 Neocognitron
+The Neocognitron, developed by Fukushima in the 80 [8], consisted on a multilayered network with"
+ef3697668eb643de27995827c630cfd029b10c37,Online self-supervised multi-instance segmentation of dynamic objects,"014 IEEE International Conference on Robotics & Automation (ICRA)
+Hong Kong Convention and Exhibition Center
+May 31 - June 7, 2014. Hong Kong, China
+978-1-4799-3685-4/14/$31.00 ©2014 IEEE"
+ef247c194162f76eb8d44b1f83c25a4002ab69a6,An Effective Profile Based Video Browsing System for e- Learning,"An Effective Profile Based Video Browsing System for e-
+Learning
+S. C. Premaratne, D. D. Karunaratna and K. P. Hewagamage
+University of Colombo School of Computing, Sri Lanka"
+efcedd5750f57f4c7f748783e91918e0f42da61f,Global Haar-Like Features: A New Extension of Classic Haar Features for Efficient Face Detection in Noisy Images,"Global Haar-like Features:
+A New Extension of Classic Haar Features for
+Ef‌f‌icient Face Detection in Noisy Images
+Mahdi Rezaei(cid:63), Hossein Ziaei Nafchi‡, and Sandino Morales†
+(cid:63)The University of Auckland, New Zealand
+Synchromedia Laboratory, ´Ecole de Technologie Sup´erieure, Canada
+The University of Auckland, New Zealand"
+ef032afa4bdb18b328ffcc60e2dc5229cc1939bc,Attribute-enhanced metric learning for face retrieval,"Fang and Yuan EURASIP Journal on Image and Video
+Processing (2018) 2018:44
+https://doi.org/10.1186/s13640-018-0282-x
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+Attribute-enhanced metric learning for
+face retrieval
+Yuchun Fang*
+nd Qiulong Yuan"
+ef9081d153f96b96183666a5086c63cecf2f33e6,3D Face Recognition Using Radon Transform and Symbolic PCA,"International Journal of Electronics and Computer Science Engineering 2342
+Available Online at www.ijecse.org ISSN- 2277-1956
+D Face Recognition Using Radon Transform and
+Symbolic PCA
+P. S. Hiremath 1, Manjunath Hiremath 2
+2Departmentof Computer Science
+Gulbarga University, Gulbarga-585106
+Karnataka, India"
+ef5531711a69ed687637c48930261769465457f0,Studio2Shop: from studio photo shoots to fashion articles,"Studio2Shop: from studio photo shoots to fashion articles
+Julia Lasserre1, Katharina Rasch1 and Roland Vollgraf
+Zalando Research, Muehlenstr. 25, 10243 Berlin, Germany
+Keywords:
+omputer vision, deep learning, fashion, item recognition, street-to-shop"
+ef559d5f02e43534168fbec86707915a70cd73a0,DeepInsight: Multi-Task Multi-Scale Deep Learning for Mental Disorder Diagnosis,"DING, HUO, HU, LU: DEEPINSIGHT
+DeepInsight: Multi-Task Multi-Scale Deep
+Learning for Mental Disorder Diagnosis
+Mingyu Ding1
+Yuqi Huo2
+Jun Hu2
+Zhiwu Lu1
+School of Information
+Renmin University of China
+Beijing, 100872, China
+Beijing Key Laboratory
+of Big Data Management
+nd Analysis Methods
+Beijing, 100872, China"
+efa08283656714911acff2d5022f26904e451113,Active Object Localization in Visual Situations,"Active Object Localization in Visual Situations
+Max H. Quinn, Anthony D. Rhodes, and Melanie Mitchell"
+ef52f1e2b52fd84a7e22226ed67132c6ce47b829,Online Eye Status Detection in the Wild with Convolutional Neural Networks,
+efe208a03e2f75ddcebf8bb0f10b1c0bea4824be,A data set for evaluating the performance of multi-class multi-object video tracking,"A data set for evaluating the performance of multi-class multi-object
+video tracking
+Avishek Chakrabortya, Victor Stamatescua, Sebastien C. Wongb, Grant Wigleya, David Kearneya
+Computational Learning Systems Laboratory, School of Information Technology and Mathematical
+Sciences, University of South Australia, Mawson Lakes, SA, Australia; bDefence Science and
+Technology Group, Edinburgh, SA, Australia"
+efa65394d0ec5a16ecd57075951016502c541c0d,The Gap of Semantic Parsing: A Survey on Automatic Math Word Problem Solvers,"The Gap of Semantic Parsing: A Survey on Automatic
+Math Word Problem Solvers
+Dongxiang Zhang, Lei Wang, Nuo Xu, Bing Tian Dai and Heng Tao Shen"
+ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d,Improving face verification in photo albums by combining facial recognition and metadata with cross-matching,"Calhoun: The NPS Institutional Archive
+DSpace Repository
+Theses and Dissertations
+. Thesis and Dissertation Collection, all items
+017-12
+Improving face verification in photo albums by
+ombining facial recognition and metadata
+with cross-matching
+Bouthour, Khoubeib
+Monterey, California: Naval Postgraduate School
+http://hdl.handle.net/10945/56868
+Downloaded from NPS Archive: Calhoun"
+ef473c96dde98e2015b2d135a17a2d734319649a,Playlist Generation using Facial Expression Analysis and Task Extraction,"Pobrane z czasopisma Annales AI- Informatica http://ai.annales.umcs.pl
+Data: 04/05/2018 16:53:32
+U M CS"
+ef4b5bcaad4c36d7baa7bc166bd1712634c7ad71,Towards Spatio-temporal Face Alignment in Unconstrained Conditions,
+efbe52289f71eca9a0aaa8a5362f73334fa6b23c,Face recognition based on LDA in manifold subspace,"EAI Endorsed Transactions
+on Context-aware Systems and Applications
+Research Article
+Face recognition based on LDA in manifold subspace
+Hung Phuoc Truong1, Tue-Minh Dinh Vo1 and Thai Hoang Le1, *
+Faculty of Information Technology, University of Science – Vietnam National University Ho Chi Minh city, 227 Nguyen
+Van Cu street, HCMc, Vietnam"
+c32b5f8d400cdfd4459b0dfdeccf011744df0b4b,Object Tracking Using Local Multiple Features and a Posterior Probability Measure,"Article
+Object Tracking Using Local Multiple Features and a
+Posterior Probability Measure
+Wenhua Guo *, Zuren Feng and Xiaodong Ren
+Systems Engineering Institute, State Key Laboratory for Manufacturing Systems Engineering,
+Xi’an Jiaotong University, Xi’an 710049, China; (Z.F.); (X.R.)
+* Correspondence: Tel.: +86-29-8266-7771
+Academic Editors: Xue-Bo Jin, Shuli Sun, Hong Wei and Feng-Bao Yang
+Received: 20 February 2017; Accepted: 28 March 2017; Published: 31 March 2017"
+c32fb755856c21a238857b77d7548f18e05f482d,Multimodal Emotion Recognition for Human-Computer Interaction: A Survey,"Multimodal Emotion Recognition for Human-
+Computer Interaction: A Survey
+School of Computer and Communication Engineering, University of Science and Technology Beijing, 100083 Beijing, China.
+Michele Mukeshimana, Xiaojuan Ban, Nelson Karani, Ruoyi Liu"
+c33289788ca69a55c7eefe6e672c82a0cac5a299,Semantic Video CNNs Through Representation Warping,"Semantic Video CNNs through Representation Warping
+Raghudeep Gadde1,3, Varun Jampani1,4 and Peter V. Gehler1,2,3
+MPI for Intelligent Systems,
+University of W¨urzburg
+Bernstein Center for Computational Neuroscience,
+NVIDIA"
+c3c73bb626efec988aadbac519c61810710282fe,Saccadic movements using eye-tracking technology in individuals with autism spectrum disorders: pilot study.,"Arq Neuropsiquiatr 2006;64(3-A):559-562
+SACCADIC MOVEMENTS USING EYE-TRACKING
+TECHNOLOGY IN INDIVIDUALS WITH AUTISM
+SPECTRUM DISORDERS
+Pilot study
+Marcos T. Mercadante, Elizeu C. Macedo, Patrícia M. Baptista,
+Cristiane S. Paula, José S. Schwartzman"
+c3beae515f38daf4bd8053a7d72f6d2ed3b05d88,ACL 2014 52nd Annual Meeting of the Association for Computational Linguistics TACL Papers,"ACL201452ndAnnualMeetingoftheAssociationforComputationalLinguisticsTACLPapersJune23-25,2014Baltimore,Maryland,USA"
+c3dc4f414f5233df96a9661609557e341b71670d,Utterance independent bimodal emotion recognition in spontaneous communication,"Tao et al. EURASIP Journal on Advances in Signal Processing 2011, 2011:4
+http://asp.eurasipjournals.com/content/2011/1/4
+RESEARCH
+Utterance independent bimodal emotion
+recognition in spontaneous communication
+Jianhua Tao*, Shifeng Pan, Minghao Yang, Ya Li, Kaihui Mu and Jianfeng Che
+Open Access"
+c3a1a3d13bf1cb2b9c054857b857c3fb9d7176f6,Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction avec un robot. (Audio-visual detection of emotional (laugh and smile) and attentional markers for elderly people in social interaction with a robot),"Détection de marqueurs affectifs et attentionnels de
+personnes âgées en interaction avec un robot
+Fan Yang
+To cite this version:
+Fan Yang. Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction
+vec un robot.
+Intelligence artificielle [cs.AI]. Université Paris-Saclay, 2015. Français. <NNT :
+015SACLS081>. <tel-01280505>
+HAL Id: tel-01280505
+https://tel.archives-ouvertes.fr/tel-01280505
+Submitted on 29 Feb 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+c348118690d2e6544ec1e68f904dbf9e5b6397bd,Video-to-Video Synthesis,"Video-to-Video Synthesis
+Ting-Chun Wang1, Ming-Yu Liu1, Jun-Yan Zhu2, Guilin Liu1,
+Andrew Tao1, Jan Kautz1, Bryan Catanzaro1
+NVIDIA, 2MIT CSAIL"
+c380aa240ebcdb8bf2cad4f30bcef2390fada091,Empty Cities: Image Inpainting for a Dynamic-Object-Invariant Space,"Empty Cities: Image Inpainting for a Dynamic-Object-Invariant Space
+Berta Bescos1, Jos´e Neira1, Roland Siegwart2 and Cesar Cadena2"
+c3dc704790e1a170919087baab0ad10d7df6c24e,Oxytocin in the socioemotional brain: implications for psychiatric disorders,"C l i n i c a l r e s e a r c h
+Oxytocin in the socioemotional brain:
+implications for psychiatric disorders
+Peter Kirsch, PhD
+Introduction
+During recent years, the neuropeptide oxytocin
+(OXT) has attracted enormous interest in neuroscien-
+tific research on social and emotional processes. Given
+the generally increased interest in social cognition in
+the area of psychiatric research, the number of publi-
+ations focusing on OXT in the context of mental dis-
+orders has also increased markedly in recent years. The
+role of OXT in the context of childbirth and lactation
+has long been studied; however, two lines of research
+have motivated investigation into the role of OXT in
+social behavior. First, animal research initiated by In-
+sel and Young1 on the role of OXT in maternal be-
+havior and bonding revealed that OXT in the central
+nervous system modulates social behavior. Second,
+in human research, a startling paper by Kosfeld et al2"
+c3de7c38493cfe67654411d77f47069cfa7b077b,Multiple context mere exposure: Examining the limits of liking.,"ISSN: 1747-0218 (Print) 1747-0226 (Online) Journal homepage: http://www.tandfonline.com/loi/pqje20
+Multiple context mere exposure: Examining the
+limits of liking
+Daniel de Zilva, Ben R. Newell & Chris J. Mitchell
+To cite this article: Daniel de Zilva, Ben R. Newell & Chris J. Mitchell (2015): Multiple context
+mere exposure: Examining the limits of liking, The Quarterly Journal of Experimental
+Psychology, DOI: 10.1080/17470218.2015.1057188
+To link to this article: http://dx.doi.org/10.1080/17470218.2015.1057188
+Accepted online: 29 Jun 2015.Published
+online: 06 Jul 2015.
+Submit your article to this journal
+Article views: 43
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pqje20
+Download by: [UNSW Library]
+Date: 05 October 2015, At: 22:09"
+c34911e9fefd987470edf8f620d9ce8f0030339d,"
+Autism, Emotion Recognition and the Mirror
+Neuron System: The Case of Music
+ ","Copyright © 2009 by MJM
+MJM 2009 12(2): 87-98
+FoCuS rEViEW
+Autism, Emotion Recognition and the Mirror
+Neuron System: The Case of Music
+Istvan Molnar-Szakacs*, Martha J. Wang, Elizabeth A. Laugeson,
+Katie Overy, Wai-Ling Wu, Judith Piggot"
+c3b037fd6fb4542f7ed18c194a03ae328bcca423,Random Binary Mappings for Kernel Learning and Efficient SVM,"Random Decision Stumps for
+Kernel Learning and Efficient SVM
+Gemma Roig *
+Xavier Boix *
+Luc Van Gool
+Computer Vision Lab, ETH Zurich, Switzerland
+* Both first authors contributed equally."
+c3b3636080b9931ac802e2dd28b7b684d6cf4f8b,Face Recognition via Local Directional Pattern,"International Journal of Security and Its Applications
+Vol. 7, No. 2, March, 2013
+Face Recognition via Local Directional Pattern
+Dong-Ju Kim*, Sang-Heon Lee and Myoung-Kyu Sohn
+Division of IT Convergence, Daegu Gyeongbuk Institute of Science & Technology
+50-1, Sang-ri, Hyeonpung-myeon, Dalseong-gun, Daegu, Korea."
+c3341286ece958e6b05df56d788456b61313380b,Estimating Attention of Faces due to its Growing Level of Emotions,"Estimating Attention of Faces due to its Growing Level of Emotions
+Ravi Kant Kumar*, Jogendra Garain, Dakshina Ranjan Kisku and Goutam Sanyal
+Department of Computer Science and Engineering
+National Institute of Technology
+Durgapur, India
+E-mail: {vit.ravikant, jogs.cse, drkisku,
+imperative
+nd feeling [2] of a person at that moment. Facial
+expression plays an
+in non-verbal
+ommunication as well as to predicting the behavior of the
+person. During a group discussion, our attention
+utomatically goes towards those participants who put
+more stressed on his words or talk in a sentimental or
+emphatic voice. Same phenomenon occurs with the non-
+verbal visual communication. The face reflecting the
+higher expression of a particular emotion draws more
+ttention [3, 4] in the discussion. A particular object (It
+lso may be face), which gives us more visualization is
+onsider as a salient object and this phenomenon is called"
+c390fb954a07ecee473e0704ac065875121f6137,Heterogeneous Tensor Decomposition for Clustering via Manifold Optimization,"IEEE TRANSACTIONS ON XXXX, VOL. XX, NO. X, APRIL 2015
+Heterogeneous Tensor Decomposition for
+Clustering via Manifold Optimization
+Yanfeng Sun, Junbin Gao, Xia Hong, Bamdev Mishra and Baocai Yin"
+c398684270543e97e3194674d9cce20acaef3db3,Comparative Face Soft Biometrics for Human Identification,"Chapter 2
+Comparative Face Soft Biometrics for
+Human Identification
+Nawaf Yousef Almudhahka, Mark S. Nixon and Jonathon S. Hare"
+c3285a1d6ec6972156fea9e6dc9a8d88cd001617,Extreme 3D Face Reconstruction: Seeing Through Occlusions,
+c3ea346826467f04779e55679679c7c7e549c8a2,Learning Short-Cut Connections for Object Counting,"OÑORO-RUBIO, NIEPERT, LÓPEZ-SASTRE: LEARNING SHORT-CUT CONNECTIONS. . .
+Learning Short-Cut Connections for Object
+Counting
+Daniel Oñoro-Rubio1
+Mathias Niepert1
+Roberto J. López-Sastre2
+SysML,
+NEC Lab Europe,
+Heidelberg, Germany
+GRAM,
+University of Alcalá,
+Alcalá de Henares, Spain"
+c3b5ec36a29b320a576f6b9e58188b505becb4aa,Practical Gauss-Newton Optimisation for Deep Learning,"Practical Gauss-Newton Optimisation for Deep Learning
+Aleksandar Botev 1 Hippolyt Ritter 1 David Barber 1 2"
+c391029d67e5a0c352f9f328b838cb19528336fe,Responding to Other People’s Direct Gaze: Alterations in Gaze Behavior in Infants at Risk for Autism Occur on Very Short Timescales,"J Autism Dev Disord (2017) 47:3498–3509
+DOI 10.1007/s10803-017-3253-7
+ORIGINAL PAPER
+Responding to Other People’s Direct Gaze: Alterations in Gaze
+Behavior in Infants at Risk for Autism Occur on Very Short
+Timescales
+Pär Nyström1
+· Sven Bölte2,3 · Terje Falck‑Ytter1,2 · The EASE Team
+Published online: 4 September 2017
+© The Author(s) 2017. This article is an open access publication"
+c3bcc4ee9e81ce9c5c0845f34e9992872a8defc0,A New Scheme for Image Recognition Using Higher-Order Local Autocorrelation and Factor Analysis,"MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+A New Scheme for Image Recognition Using Higher-Order Local
+Autocorrelation and Factor Analysis
+Naoyuki Nomotoy, Yusuke Shinoharay, Takayoshi Shirakiy, Takumi Kobayashiy, Nobuyuki Otsuy yyy
+yThe University of Tokyo
+Tokyo, Japan
+yyyAIST
+Tukuba, Japan
+f shiraki, takumi, otsug"
+c324986c8599fee2f6da7b59751e89ed9624afa3,Dual Quaternions as Constraints in 4D-DPM Models for Pose Estimation,"Article
+Dual Quaternions as Constraints in 4D-DPM Models
+for Pose Estimation
+Enrique Martinez-Berti *, Antonio-José Sánchez-Salmerón and Carlos Ricolfe-Viala
+Departamento de Ingeniería de Sistemas y Automática, Instituto de Automática e informática Industrial,
+Universitat Politècnica de València, València, 46022, Spain ; (A.-J.S.-S.);
+(C.R.-V.)
+* Correspondence:
+Received: 1 June 2017; Accepted: 13 August 2017; Published: 19 August 2017"
+c32383330df27625592134edd72d69bb6b5cff5c,Intrinsic Illumination Subspace for Lighting Insensitive Face Recognition,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 2, APRIL 2012
+Intrinsic Illumination Subspace for Lighting
+Insensitive Face Recognition
+Chia-Ping Chen and Chu-Song Chen, Member, IEEE"
+c3955d74f2a084a8ddcbd7e73952c326e81804b2,Mutual Information Neural Estimation,"Mutual Information Neural Estimation
+Mohamed Ishmael Belghazi 1 Aristide Baratin 1 2 Sai Rajeswar 1 Sherjil Ozair 1 Yoshua Bengio 1 3 4
+Aaron Courville 1 3 R Devon Hjelm 1 4"
+c32f04ccde4f11f8717189f056209eb091075254,Analysis and Synthesis of Behavioural Specific Facial Motion,"Analysis and Synthesis of Behavioural Specific
+Facial Motion
+Lisa Nanette Gralewski
+A dissertation submitted to the University of Bristol in accordance with the requirements
+for the degree of Doctor of Philosophy in the Faculty of Engineering, Department of
+Computer Science.
+February 2007
+71657 words"
+c338045f80ab3465bdc381f2b1791744b060fbb3,A Diffusion and Clustering-Based Approach for Finding Coherent Motions and Understanding Crowd Scenes,"A Diffusion and Clustering-based Approach for
+Finding Coherent Motions and Understanding
+Crowd Scenes
+Weiyao Lin, Yang Mi, Weiyue Wang, Jianxin Wu, Jingdong Wang, and Tao Mei"
+c34ec5dd51880acf72336e85e4e45da5fcfc75f4,LEGO: Learning Edge with Geometry all at Once by Watching Videos,"LEGO: Learning Edge with Geometry all at Once by Watching Videos
+Zhenheng Yang1 Peng Wang2 Yang Wang2 Wei Xu3 Ram Nevatia1
+University of Southern California 2Baidu Research
+National Engineering Laboratory for Deep Learning Technology and Applications"
+c3d60c8b1dff411982ccd8875496f1e74d2cefc4,Multi-view X-ray R-CNN,"Multi-view X-ray R-CNN
+Jan-Martin O. Steitz[0000−0002−3549−312X], Faraz
+Saeedan
+Department of Computer Science, TU Darmstadt, Darmstadt, Germany"
+c317181fa1de2260e956f05cd655642607520a4f,Objective Classes for Micro-Facial Expression Recognition,"Research Article
+Research
+Article for submission to journal
+Subject Areas:
+omputer vision, pattern recognition,
+feature descriptor
+Keywords:
+micro-facial expression, expression
+recognition, action unit
+Moi Hoon Yap
+e-mail:
+Objective Classes for
+Micro-Facial Expression
+Recognition
+Adrian K. Davison1, Walied Merghani2 and
+Moi Hoon Yap3
+Centre for Imaging Sciences, University of
+Manchester, Manchester, United Kingdom
+Sudan University of Science and Technology,
+Khartoum, Sudan"
+c36f933a46e1d1c51785295bb97154df9ceada36,"Optimizing Program Performance via Similarity, Using a Feature-Agnostic Approach","Optimizing Program Performance via Similarity,
+Using a Feature-agnostic Approach
+Rosario Cammarota, Laleh Aghababaie Beni
+Alexandru Nicolau, and Alexander V. Veidenbaum
+Department of Computer Science, University of California Irvine, Irvine, USA"
+c33522fc5d2cf92c5a10f32ba9416365944cdb85,Scaling the Scattering Transform: Deep Hybrid Networks,"Scaling the Scattering Transform: Deep Hybrid Networks
+Edouard Oyallon
+D´epartement Informatique
+Ecole Normale Sup´erieure
+Eugene Belilovsky
+University of Paris-Saclay
+INRIA and KU Leuven
+Paris, France
+Sergey Zagoruyko
+Universit´e Paris-Est
+´Ecole des Ponts ParisTech
+Paris, France"
+c3599c91d0e3473178c1578b731b03e4be5d3ff1,Improving Resource Efficiency in Cloud Computing a Dissertation Submitted to the Department of Electrical Engineering and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"IMPROVING RESOURCE EFFICIENCY IN CLOUD COMPUTING
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF ELECTRICAL
+ENGINEERING
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Christina Delimitrou
+August 2015"
+c30e4e4994b76605dcb2071954eaaea471307d80,Feature Selection for Emotion Recognition based on Random Forest,
+c37a971f7a57f7345fdc479fa329d9b425ee02be,A Novice Guide towards Human Motion Analysis and Understanding,"A Novice Guide towards Human Motion Analysis and Understanding
+Dr. Ahmed Nabil Mohamed"
+c35724d227eb1e3d680333469fb9b94c677e871f,Multi-view Generative Adversarial Networks,"Under review as a conference paper at ICLR 2017
+MULTI-VIEW GENERATIVE ADVERSARIAL NET-
+WORKS
+Mickaël Chen
+Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
+Ludovic Denoyer
+Sorbonne Universités, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France"
+c3fb2399eb4bcec22723715556e31c44d086e054,Face recognition based on SIGMA sets of image features,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+. INTRODUCTION"
+c3293ef751d3fb041bd3016fbc3fa5cc16f962fa,Inferencing Based on Unsupervised Learning of Disentangled Representations,"Accepted as a conference paper at the European Symposium on Artificial Neural
+Networks, Computational Intelligence and Machine Learning (ESANN) 2018
+Inferencing Based on Unsupervised Learning
+of Disentangled Representations
+Tobias Hinz and Stefan Wermter ∗
+Universit¨at Hamburg, Department of Informatics, Knowledge Technology
+Vogt-Koelln-Str. 30, 22527 Hamburg, Germany
+http://www.informatik.uni-hamburg.de/WTM/"
+c37de914c6e9b743d90e2566723d0062bedc9e6a,Joint and Discriminative Dictionary Learning for Facial Expression Recognition,"©2016 Society for Imaging Science and Technology
+DOI: 10.2352/ISSN.2470-1173.2016.11.IMAWM-455
+Joint and Discriminative Dictionary Learning
+Expression Recognition
+for Facial
+Sriram Kumar, Behnaz Ghoraani, Andreas Savakis"
+c4f632a1b6faa43c217e63c58a4764511104c303,Extracting Pathlets FromWeak Tracking Data,"Extracting Pathlets From Weak Tracking Data∗
+Kevin Streib
+James W. Davis
+Dept. of Computer Science and Engineering
+Ohio State University, Columbus, OH 43210"
+c4a024d73902462275879fa6133bff22134fcc7e,When crowds hold privileges: Bayesian unsupervised representation learning with oracle constraints,"When crowds hold privileges: Bayesian unsupervised
+representation learning with oracle constraints
+Theofanis Karaletsos
+Computational Biology Program, Sloan Kettering Institute
+275 York Avenue, New York, USA
+Serge Belongie
+Cornell Tech
+11 Eighth Avenue #302, New York, USA
+Gunnar R¨atsch
+Computational Biology Program, Sloan Kettering Institute
+275 York Avenue, New York, USA"
+c44e2fa02f0b578a2cc92795fe6a4c578f65dc97,A Method for Copyright Protection of Line Drawings,"A Method for Copyright Protection of Line Drawings
+Weihan Sun*, Koichi Kise*
+* Graduate School of Engineering, Osaka Prefecture University, Osaka
+E-mail:"
+c4f1fcd0a5cdaad8b920ee8188a8557b6086c1a4,The Ignorant Led by the Blind: A Hybrid Human–Machine Vision System for Fine-Grained Categorization,"Int J Comput Vis (2014) 108:3–29
+DOI 10.1007/s11263-014-0698-4
+The Ignorant Led by the Blind: A Hybrid Human–Machine Vision
+System for Fine-Grained Categorization
+Steve Branson · Grant Van Horn · Catherine Wah ·
+Pietro Perona · Serge Belongie
+Received: 7 March 2013 / Accepted: 8 January 2014 / Published online: 20 February 2014
+© Springer Science+Business Media New York 2014"
+c46bcb02f92612cf525fd84c6cc79b0638c2eac9,New Fuzzy LBP Features for Face Recognition,"New Fuzzy LBP Features for Face Recognition
+Abdullah Gubbia, Mohammed Fazle Azeemb Zahid Ansaric
+Department of Electronics and Communications, P.A. College of Engineering, Mangalore, India,
+Contact:
+Department of Electrical Engineering, Aligarh Muslim University, Aligarh, India,
+Department of Computer Science, P.A. College of Engineering, Mangalore, India,
+zahid
+Contact:
+Contact:
+There are many Local texture features each very in way they implement and each of the Algorithm trying
+improve the performance. An attempt is made in this paper to represent a theoretically very simple and com-
+putationally effective approach for face recognition. In our implementation the face image is divided into 3x3
+sub-regions from which the features are extracted using the Local Binary Pattern (LBP) over a window, fuzzy
+membership function and at the central pixel. The LBP features possess the texture discriminative property
+nd their computational cost is very low. By utilising the information from LBP, membership function, and
+entral pixel, the limitations of traditional LBP is eliminated. The bench mark database like ORL and Shef‌f‌ield
+Databases are used for the evaluation of proposed features with SVM classifier. For the proposed approach K-fold
+nd ROC curves are obtained and results are compared.
+Keywords : Face Recognition, Fuzzy Logic, Information Set, Local Binary Pattern, SVM.
+. INTRODUCTION"
+c4c4e5ff454584ae6a68d25b36bfc860e9a893a0,"Real-Time Facial Recognition System—Design, Implementation and Validation","Journal of Signal Processing Theory and Applications
+(2013) 1: 1-18
+doi:10.7726/jspta.2013.1001
+Research Article
+Real-Time Facial Recognition System—Design,
+Implementation and Validation
+M. Meenakshi*
+Received 29 August 2012; Published online November 10, 2012
+© The author(s) 2012. Published with open access at uscip.org"
+c43862db5eb7e43e3ef45b5eac4ab30e318f2002,Provable Self-Representation Based Outlier Detection in a Union of Subspaces,"Provable Self-Representation Based Outlier Detection in a Union of Subspaces
+Chong You, Daniel P. Robinson, Ren´e Vidal
+Johns Hopkins University, Baltimore, MD, 21218, USA"
+c4827fe8002ea61a2748b78369afe3a0747d1a0c,Towards Optimal Naive Bayes Nearest Neighbor,"Towards Optimal Naive Bayes Nearest Neighbor
+R´egis Behmo1, Paul Marcombes1,2, Arnak Dalalyan2, and V´eronique Prinet1
+NLPR / LIAMA, Institute of Automation, Chinese Academy of Sciences(cid:2)
+IMAGINE, LIGM, Universit´e Paris-Est"
+c4dcf41506c23aa45c33a0a5e51b5b9f8990e8ad,Understanding Activity: Learning the Language of Action,"Understanding Activity: Learning the Language of Action
+Randal Nelson and Yiannis Aloimonos
+Univ. of Rochester and Maryland
+.1 Overview
+Understanding observed activity is an important
+problem, both from the standpoint of practical applications,
+nd as a central issue in attempting to describe the
+phenomenon of intelligence. On the practical side, there are a
+large number of applications that would benefit from
+improved machine ability to analyze activity. The most
+prominent are various surveillance scenarios. The current
+emphasis on homeland security has brought this issue to the
+forefront, and resulted in considerable work on mostly low-
+level detection schemes. There are also applications in
+medical diagnosis and household assistants that, in the long
+run, may be even more important. In addition, there are
+numerous scientific projects, ranging from monitoring of
+weather conditions to observation of animal behavior that
+would be facilitated by automatic understanding of activity.
+From a scientific standpoint, understanding activity"
+c42a8969cd76e9f54d43f7f4dd8f9b08da566c5f,Towards Unconstrained Face Recognition Using 3D Face Model,"Towards Unconstrained Face Recognition
+Using 3D Face Model
+Zahid Riaz1, M. Saquib Sarfraz2 and Michael Beetz1
+Intelligent Autonomous Systems (IAS), Technical University of Munich, Garching
+Computer Vision Research Group, COMSATS Institute of Information
+Technology, Lahore
+Germany
+Pakistan
+. Introduction
+Over the last couple of decades, many commercial systems are available to identify human
+faces. However, face recognition is still an outstanding challenge against different kinds of
+real world variations especially facial poses, non-uniform lightings and facial expressions.
+Meanwhile the face recognition technology has extended its role from biometrics and security
+pplications to human robot interaction (HRI). Person identity is one of the key tasks while
+interacting with intelligent machines/robots, exploiting the non intrusive system security
+nd authentication of the human interacting with the system. This capability further helps
+machines to learn person dependent traits and interaction behavior to utilize this knowledge
+for tasks manipulation. In such scenarios acquired face images contain large variations which
+demands an unconstrained face recognition system.
+Fig. 1. Biometric analysis of past few years has been shown in figure showing the"
+c48bde5b9ff17b708ab3e4f7c62a31a46c77f2f1,Nested Sparse Quantization for Efficient Feature Coding,"Nested Sparse Quantization
+for Ef‌f‌icient Feature Coding
+Xavier Boix1(cid:63), Gemma Roig1(cid:63), and Luc Van Gool1,2 (cid:63)(cid:63)
+Computer Vision Lab, ETH Zurich, Switzerland,
+KU Leuven, Belgium"
+c4f3375dab1886f37f542d998e61d8c30a927682,Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering,"Under review as a conference paper at ICLR 2018
+BEYOND SHARED HIERARCHIES: DEEP MULTITASK
+LEARNING THROUGH SOFT LAYER ORDERING
+Anonymous authors
+Paper under double-blind review"
+c4fed8f23bc9ff1ffc27edb12970963ecf2dead9,Statistical Models and Optimization Algorithms for High-Dimensional Computer Vision Problems,
+c4d3033356066ef8133f03f4060bb8cad842918f,Inference of quantized neural networks on heterogeneous all-programmable devices,"Inference of Quantized Neural Networks
+on Heterogeneous All-Programmable Devices
+Thomas B. Preußer
+Marie Skłodowska-Curie Fellow
+Xilinx Research Labs
+Giulio Gambardella
+Xilinx Research Labs
+Dublin, Ireland
+Nicholas Fraser
+Xilinx Research Labs
+Dublin, Ireland
+Michaela Blott
+Xilinx Research Labs
+Dublin, Ireland
+Dublin, Ireland"
+c4a5932f33e6f4ccbfc7218fac58350a530d0ad6,Face Recognition using Discriminant Face Features Extraction method,"Face Recognition using Discriminant Face Features Extraction method
+Miss. Poonam S. Sharma1, Prof. Nitin R. Chopde2
+Student of Master of Engineering in (CSE), G.H. Raisoni college of Engineering and Technology,
+2Assistant professor Department of (CSE), G.H. Raisoni College of Engineering and Technology,
+Amravati, India
+Amravati, India"
+c48c452f26e54f37faaf025ca3c76b33ce3e40f6,Incremental learning of latent structural SVM for weakly supervised image classification,"INCREMENTAL LEARNING OF LATENT STRUCTURAL SVM FOR WEAKLY SUPERVISED
+IMAGE CLASSIFICATION
+Thibaut Durand (1)
+Nicolas Thome (1)
+Matthieu Cord (1)
+David Picard (2)
+(1) Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
+(2) ETIS/ENSEA, University of Cergy-Pontoise, CNRS, UMR 8051, France"
+c43490eb0a3ce18fb2326ef1d0828664b60e73e2,Is This Car Looking at You? How Anthropomorphism Predicts Fusiform Face Area Activation when Seeing Cars,"RESEARCH ARTICLE
+Is This Car Looking at You? How
+Anthropomorphism Predicts Fusiform Face
+Area Activation when Seeing Cars
+Simone Ku¨ hn1*, Timothy R. Brick1, Barbara C. N. Mu¨ ller2,3, Ju¨ rgen Gallinat4,5
+. Center for Lifespan Psychology, Max Planck Institute for Human Development, Lentzeallee 94, 14195,
+Berlin, Germany, 2. Behavioural Science Institute, Radboud University of Nijmegen, P. O. Box 9104, 6500 HE,
+Nijmegen, Netherlands, 3. Department of Psychology, Ludwig-Maximilian University, Leopoldstrasse 13,
+80802, Mu¨ nchen, Germany, 4. Clinic for Psychiatry and Psychotherapy, Charite´ University Medicine, St.
+Hedwig-Krankenhaus, Große Hamburger Straße 5–11, 10115, Berlin, Germany, 5. Clinic and Policlinic for
+Psychiatry and Psychotherapy, University Clinic Hamburg-Eppendorf, Martinistraße 52, 20246, Hamburg,
+Germany"
+c4b3a1cf8842da8c64f7abf4a352583d5fd9762c,Gait recognition using sub-vector quantisation technique,"Int. J. Machine Intelligence and Sensory Signal Processing, Vol. 1, No. 1, 2013
+Gait recognition using sub-vector quantisation
+technique
+Neel K. Pandey*
+Department of Electrical Engineering and Trades,
+Faculty of Engineering and Trades,
+Manukau Institute of Technology,
+Private Bag 94006, Manukau 2241, Auckland, New Zealand
+E-mail:
+*Corresponding author
+Waleed H. Abdulla and Zoran Salcic
+Department of Electrical and Computer Engineering,
+The University of Auckland,
+Private Bag 92019, Auckland Mail Centre,
+Auckland 1142, New Zealand
+E-mail:
+E-mail:"
+c45183ec95f89aff793a2629a0520006b4153d6a,Entropy-based template analysis in face biometric identification systems,"SIViP (2013) 7:493–505
+DOI 10.1007/s11760-013-0451-4
+ORIGINAL PAPER
+Entropy-based template analysis in face biometric identification
+systems
+Maria De Marsico · Michele Nappi · Daniel Riccio ·
+Genoveffa Tortora
+Received: 19 December 2011 / Revised: 7 June 2012 / Accepted: 10 October 2012 / Published online: 17 March 2013
+© Springer-Verlag London 2013"
+c4baa3d2fe702d3e96c500274f7fd9e63f8b3d6d,Pedestrian Detection Optimization Based on Random Filtering,"Pedestrian Detection Optimization Based on
+Random Filtering
+Victor Hugo Cunha de Melo, Samir Le˜ao, William Robson Schwartz
+Universidade Federal de Minas Gerais
+Department of Computer Science
+Belo Horizonte, Minas Gerais, Brazil
+Email: {victorhcmelo, samirleao,"
+ea9cecb5b619cfa4afef6c70e193c2303696a4f9,Integration of Probabilistic Pose Estimates from Multiple Views,"Integration of Probabilistic Pose Estimates From
+Multiple Views
+¨Ozg¨ur Erkent, Dadhichi Shukla and Justus Piater
+Institute of Computer Science,
+University of Innsbruck"
+ea94d834f912f092618d030f080de8395fe39b3f,Joint autoencoders : a flexible meta-learning framework,"Under review as a conference paper at ICLR 2018
+JOINT AUTOENCODERS: A FLEXIBLE META-LEARNING
+FRAMEWORK
+Anonymous authors
+Paper under double-blind review"
+ea3503e9dc74b30b4c98a89843fe2ea0dc9221ab,Human Action Recognition Using LBP-TOP as Sparse Spatio-Temporal Feature Descriptor,"Human Action Recognition Using LBP-TOP as Sparse
+Spatio-Temporal Feature Descriptor
+Riccardo Mattivi and Ling Shao
+Philips Research, Eindhoven, The Netherlands"
+eabdefeb685dd71a39417bf40247d206af4f9b9e,"Of Kith and Kin: Perceptual Enrichment, Expectancy, and Reciprocity in Face Perception.","657250 PSRXXX10.1177/1088868316657250Personality and Social Psychology ReviewCorrell et al.
+research-article2016
+Article
+Of Kith and Kin: Perceptual Enrichment,
+Expectancy, and Reciprocity in Face
+Perception
+Personality and Social Psychology Review
+1 –25
+© 2016 by the Society for Personality
+nd Social Psychology, Inc.
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/1088868316657250
+pspr.sagepub.com
+Joshua Correll1, Sean M. Hudson1, Steffanie Guillermo1,
+nd Holly A. Earls1"
+eac6aee477446a67d491ef7c95abb21867cf71fc,A Survey of Sparse Representation: Algorithms and Applications,"JOURNAL
+A survey of sparse representation: algorithms and
+pplications
+Zheng Zhang, Student Member, IEEE, Yong Xu, Senior Member, IEEE,
+Jian Yang, Member, IEEE, Xuelong Li, Fellow, IEEE, and David Zhang, Fellow, IEEE"
+ead587db6b2b76726e98b17cb1fbf973a34ddf31,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+ea533fac61db537fe1e1f351c98ae28db7272705,Theoretical Informatics and Applications Eye Localization for Face Recognition *,"Theoretical Informatics and Applications
+Informatique Th´eorique et Applications
+Will be set by the publisher
+EYE LOCALIZATION FOR FACE RECOGNITION ∗
+PAOLA CAMPADELLI, RAFFAELLA LANZAROTTI, GIUSEPPE LIPORI 1"
+ea5dd7125c73756d7d81e49fa9826198f533cff7,Appearance tracking by transduction in surveillance scenarios,"8th IEEE International Conference on Advanced Video and Signal-Based Surveillance, 2011
+978-1-4577-0845-9/11/$26.00 c(cid:13)2011 IEEE"
+eabbf37742b79147c3bcf42d376dbceaae869a01,Recurrent Multimodal Interaction for Referring Image Segmentation,"Recurrent Multimodal Interaction for Referring Image Segmentation
+Chenxi Liu1
+Zhe Lin2 Xiaohui Shen2
+Jimei Yang2 Xin Lu2 Alan Yuille1
+Johns Hopkins University1 Adobe Research2
+{cxliu,
+{zlin, xshen, jimyang,"
+ea079334121a0ba89452036e5d7f8e18f6851519,Unsupervised incremental learning of deep descriptors from video streams,"UNSUPERVISED INCREMENTAL LEARNING OF DEEP DESCRIPTORS
+FROM VIDEO STREAMS
+Federico Pernici and Alberto Del Bimbo
+MICC – University of Florence"
+eac1b644492c10546a50f3e125a1f790ec46365f,"Chained Multi-stream Networks Exploiting Pose, Motion, and Appearance for Action Classification and Detection","Chained Multi-stream Networks Exploiting Pose, Motion, and Appearance for
+Action Classification and Detection
+Mohammadreza Zolfaghari , Gabriel L. Oliveira, Nima Sedaghat, and Thomas Brox
+University of Freiburg
+Freiburg im Breisgau, Germany"
+eadf6cb8f16c507e4a73db33da201cde3d9b2f5a,PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing,"PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network
+for Simultaneous Depth Estimation and Scene Parsing
+Dan Xu1, Wanli Ouyang2, Xiaogang Wang3, Nicu Sebe1
+The University of Trento, 2The University of Sydney, 3The Chinese University of Hong Kong
+{dan.xu,"
+ea8cb4a79b211fb288f747bdd64b3fc36e11c0fc,Automatic Facial Action Unit Recognition by Modeling Their Semantic And Dynamic Relationships,"Chapter 10
+Automatic Facial Action Unit Recognition
+y Modeling Their Semantic And Dynamic
+Relationships
+Yan Tong, Wenhui Liao, and Qiang Ji"
+ea939d72d55c095e57fedaaf2aa49f596002c196,A Part based Modeling Approach for Invoice Parsing,
+ea638559b6dd6b5520f9abe2674b92c07873a157,Semantic Segmentation of Earth Observation Data Using Multimodal and Multi-scale Deep Networks,"Semantic Segmentation of Earth Observation
+Data Using Multimodal and Multi-scale Deep
+Networks
+Nicolas Audebert1,2, Bertrand Le Saux1, S´ebastien Lef`evre2
+ONERA, The French Aerospace Lab, F-91761 Palaiseau, France -
+{nicolas.audebert,bertrand.le
+Univ. Bretagne-Sud, UMR 6074, IRISA, F-56000 Vannes, France -"
+eaaf411826d129c2a31d997dc3f5f708a8186656,SDALF: Modeling Human Appearance with Symmetry-Driven Accumulation of Local Features,"SDALF: Modeling Human Appearance with
+Symmetry-Driven Accumulation of Local
+Features
+Loris Bazzani and Marco Cristani and Vittorio Murino"
+eaaec63bb86ee87d56f5844951143485ce84a4ea,GANtruth – an unpaired image-to-image translation method for driving scenarios,"GANtruth – an unpaired image-to-image translation
+method for driving scenarios
+Anonymous Author(s)
+Affiliation
+Address
+email"
+ea482bf1e2b5b44c520fc77eab288caf8b3f367a,Flexible Orthogonal Neighborhood Preserving Embedding,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+ea6f5c8e12513dbaca6bbdff495ef2975b8001bd,Applying a Set of Gabor Filter to 2D-Retinal Fundus Image to Detect the Optic Nerve Head (ONH),"Applying a Set of Gabor Filter to 2D-Retinal Fundus Image
+to Detect the Optic Nerve Head (ONH)
+Rached Belgacem1,2*, Hédi Trabelsi2, Ines Malek3, Imed Jabri1
+Higher National School of engineering of Tunis, ENSIT, Laboratory LATICE (Information Technology and Communication and
+Electrical Engineering LR11ESO4), University of Tunis EL Manar. Adress: ENSIT 5, Avenue Taha Hussein, B. P. : 56, Bab
+Menara, 1008 Tunis; 2University of Tunis El-Manar, Tunis with expertise in Mechanic, Optics, Biophysics, Conference Master
+ISTMT, Laboratory of Research in Biophysics and Medical Technologies LRBTM Higher Institute of Medical Technologies of Tunis
+ISTMT, University of Tunis El Manar Address: 9, Rue Docteur Zouheïr Safi – 1006; 3Faculty of Medicine of Tunis; Address: 15
+Rue Djebel Lakhdhar. La Rabta. 1007, Tunis - Tunisia
+Corresponding author:
+Rached Belgacem,
+High Institute of Medical Technologies
+of Tunis, ISTMT, and High National
+School Engineering of Tunis,
+Information Technology and
+Communication Technology and
+Electrical Engineering, University of
+Tunis El-Manar, ENSIT 5, Avenue Taha
+Hussein, B. P.: 56, Bab Menara, 1008
+Tunis, Tunisia,"
+ea8abe31f3cac058cf757f16e1eefa11295322bc,Ensemble of Deep Learned Features for Melanoma Classification,"Ensemble of Deep Learned Features for Melanoma
+Classification
+Loris Nanni1*, Alessandra Lumini2, Stefano Ghidoni1
+Department of Information Engineering, University of Padua, via Gradenigo 6/B, 35131
+Padova, Italy.
+Department of Computer Science and Engineering, University of Bologna, via Sacchi 3,
+7521, Cesena (FC), Italy."
+ead2701e883174028a1b1b25472bc83bedc330aa,"Face Recognition Methods Based on Feedforward Neural Networks, Principal Component Analysis and Self-Organizing Map","RADIOENGINEERING, VOL. 16, NO. 1, APRIL 2007
+Face Recognition Methods Based on Feedforward
+Neural Networks, Principal Component Analysis
+nd Self-Organizing Map
+Miloš ORAVEC, Jarmila PAVLOVIČOVÁ
+Dept. of Telecommunications, Faculty of Electrical Engineering and Information Technology, Slovak University of
+Technology, Ilkovičova 3, 812 19 Bratislava, Slovak Republic"
+eafda8a94e410f1ad53b3e193ec124e80d57d095,Observer-Based Measurement of Facial Expression With the Facial Action Coding System,"Jeffrey F. Cohn
+Zara Ambadar
+Paul Ekman
+Observer-Based Measurement of Facial Expression
+With the Facial Action Coding System
+Facial expression has been a focus of emotion research for over
+hundred years (Darwin, 1872/1998). It is central to several
+leading theories of emotion (Ekman, 1992; Izard, 1977;
+Tomkins, 1962) and has been the focus of at times heated
+debate about issues in emotion science (Ekman, 1973, 1993;
+Fridlund, 1992; Russell, 1994). Facial expression figures
+prominently in research on almost every aspect of emotion,
+including psychophysiology (Levenson, Ekman, & Friesen,
+990), neural bases (Calder et al., 1996; Davidson, Ekman,
+Saron, Senulis, & Friesen, 1990), development (Malatesta,
+Culver, Tesman, & Shephard, 1989; Matias & Cohn, 1993),
+perception (Ambadar, Schooler, & Cohn, 2005), social pro-
+esses (Hatfield, Cacioppo, & Rapson, 1992; Hess & Kirouac,
+000), and emotion disorder (Kaiser, 2002; Sloan, Straussa,
+Quirka, & Sajatovic, 1997), to name a few."
+ea0785c2d4ac8f8d6415cffdb83547bfc4e7adba,Spontaneous Facial Expression Recognition using Sparse Representation,"Spontaneous Facial Expression Recognition using Sparse Representation
+Univ. Grenoble Alpes, GIPSA-Lab, F-38000 Grenoble, France CNRS, GIPSA-Lab, F-38000 Grenoble, France
+Dawood Al Chanti1 and Alice Caplier1
+Keywords:
+Dictionary learning, Random projection, Spontaneous facial expression, Sparse representation."
+ea85378a6549bb9eb9bcc13e31aa6a61b655a9af,Template Protection for PCA - LDA - based 3 D Face Recognition System,"Diplomarbeit
+Template Protection for PCA-LDA-based 3D
+Face Recognition System
+Daniel Hartung
+Technische Universität Darmstadt
+Fachbereich Informatik
+Fachgebiet Graphisch-Interaktive Systeme
+Fraunhoferstraße 5
+64283 Darmstadt
+Betreuer: Dipl.-Ing. Xuebing Zhou
+Prüfer: Prof. Dr. techn. Dieter W. Fellner"
+ea2ee5c53747878f30f6d9c576fd09d388ab0e2b,Viola-Jones Based Detectors: How Much Affects the Training Set?,"Viola-Jones based Detectors: How much affects
+the Training Set?
+Modesto Castrill´on-Santana, Daniel Hern´andez-Sosa, Javier Lorenzo-Navarro
+SIANI
+Edif. Central del Parque Cient´ıfico Tecnol´ogico
+Universidad de Las Palmas de Gran Canaria
+5017 - Spain"
+eace134548f9be17c243b06f133bfac76a797676,ADNet: A Deep Network for Detecting Adverts,"ADNet: A Deep Network for Detecting Adverts
+Murhaf Hossari(cid:63)1, Soumyabrata Dev(cid:63)1, Matthew Nicholson1, Killian McCabe1,
+Atul Nautiyal1, Clare Conran1, Jian Tang3, Wei Xu3, and Fran¸cois Piti´e1,2
+The ADAPT SFI Research Centre, Trinity College Dublin
+Department of Electronic & Electrical Engineering, Trinity College Dublin
+Huawei Ireland Research Center, Dublin"
+ea5eaaadb8bc928fb7543d6fa24f9f4a229ff979,Mirror Neuron Forum.,"Perspectives on Psychological
+Science
+http://pps.sagepub.com/
+Vittorio Gallese, Morton Ann Gernsbacher, Cecilia Heyes, Gregory Hickok and Marco Iacoboni
+Mirror Neuron Forum
+Perspectives on Psychological Science
+DOI: 10.1177/1745691611413392
+2011 6: 369
+The online version of this article can be found at:
+http://pps.sagepub.com/content/6/4/369
+Perspectives on Psychological Science
+can be found at:
+Additional services and information for
+Email Alerts:
+Subscriptions:
+Reprints:
+Permissions:
+http://pps.sagepub.com/cgi/alerts
+http://pps.sagepub.com/subscriptions
+http://www.sagepub.com/journalsReprints.nav"
+ea3353efbe7b856ced106718d04ea7d83e2a2310,A Survey of Video Object Tracking,"International Journal of Control and Automation
+Vol. 8, No. 9 (2015), pp. 303-312
+http://dx.doi.org/10.14257/ijca.2015.8.9.29
+A Survey of Video Object Tracking
+Meng Li, Zemin Cai1, Chuliang Wei and Ye Yuan
+Department of Electronic Engineering, College of Engineering, Shantou
+University, China
+Guangdong Provicial Key Laboratory of Digital Signal and Image Processing
+Techniques, China
+Corresponding author,"
+ea572991a75acfc8a8791955f670d2c48db49023,Arbitrary-Shape Object Localization Using Adaptive Image Grids,"Arbitrary-Shape Object Localization using
+Adaptive Image Grids
+Chunluan Zhou and Junsong Yuan
+School of EEE, Nanyang Technology University, Singapore"
+ea099ee1183145131e29009f2af0e4b13ac583f0,Effects of exposure to facial expression variation in face learning and recognition,"Psychological Research (2015) 79:1042–1053
+DOI 10.1007/s00426-014-0627-8
+O R I G I N A L A R T I C L E
+Effects of exposure to facial expression variation in face learning
+nd recognition
+Chang Hong Liu • Wenfeng Chen • James Ward
+Received: 25 July 2014 / Accepted: 6 November 2014 / Published online: 15 November 2014
+Ó The Author(s) 2014. This article is published with open access at Springerlink.com"
+eae625274767cb695fa2121ccdcb30828ffc9b66,Social Context Modulates Facial Imitation of Children’s Emotional Expressions,"RESEARCH ARTICLE
+Social Context Modulates Facial Imitation of
+Children’s Emotional Expressions
+Peter A. Bos*, Nadine Jap-Tjong, Hannah Spencer, Dennis Hofman
+Department of Experimental Psychology, Utrecht University, Utrecht, The Netherlands"
+ea9857a5e5c72d435054a5a73e50dafb755a2597,Comparative study of histogram distance measures for re-identification,"Comparative study of histogram distance measures for re-identification
+Pedro A. Mar´ın-Reyes, Javier Lorenzo-Navarro, Modesto Castrill´on-Santana
+Instituto Universitario SIANI
+Universidad de Las Palmas de Gran Canaria"
+ea2d43aa2490331cd1406e1432ce706c53139323,Tracked Instance Search,"TRACKED INSTANCE SEARCH
+Andreu Girbau†
+Ryota Hinami(cid:63)
+Shin’ichi Satoh(cid:63)
+Universitat Polit`ecnica de Catalunya, Barcelona
+(cid:63) National Institute of Informatics, Tokyo"
+ea251fc90da36fdbaf7be76f449a9e0dac1d42ef,Brain mechanisms for processing direct and averted gaze in individuals with autism.,"J Autism Dev Disord
+DOI 10.1007/s10803-011-1197-x
+O R I G I N A L P A P E R
+Brain Mechanisms for Processing Direct and Averted Gaze
+in Individuals with Autism
+Naomi B. Pitskel • Danielle Z. Bolling • Caitlin M. Hudac •
+Stephen D. Lantz • Nancy J. Minshew • Brent C. Vander Wyk •
+Kevin A. Pelphrey
+Ó Springer Science+Business Media, LLC 2011"
+e1f794bacd01eecb623bead652bdc9f86e17944e,Affective Environment for Java Programming Using Facial and EEG Recognition,"Affective Environment for Java Programming
+Using Facial and EEG Recognition
+María Lucía Barrón-Estrada, Ramón Zatarain-Cabada, Claudia Guadalupe
+Aispuro-Gallegos, Catalina de la Luz Sosa-Ochoa, Mario Lindor-Valdez
+Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+Mexico
+{lbarron, rzatarain, m03171007, m07170739,"
+e1e5d64318ec0a493995fb83ef4f433ddde82e77,Affects the Gaze-cueing Effect,"(cid:5)(cid:36)(cid:57)(cid:50)(cid:44)(cid:39)(cid:44)(cid:49)(cid:42)(cid:3)(cid:50)(cid:53)(cid:3)(cid:36)(cid:51)(cid:51)(cid:53)(cid:50)(cid:36)(cid:38)(cid:43)(cid:44)(cid:49)(cid:42)(cid:3)(cid:40)(cid:60)(cid:40)(cid:54)(cid:5)(cid:34)(cid:3)(cid:44)(cid:49)(cid:55)(cid:53)(cid:50)(cid:57)(cid:40)(cid:53)(cid:54)(cid:44)(cid:50)(cid:49)(cid:18)(cid:40)(cid:59)(cid:55)(cid:53)(cid:36)(cid:57)(cid:40)(cid:53)(cid:54)(cid:44)(cid:50)(cid:49)
+(cid:36)(cid:41)(cid:41)(cid:40)(cid:38)(cid:55)(cid:54)(cid:3)(cid:55)(cid:43)(cid:40)(cid:3)(cid:42)(cid:36)(cid:61)(cid:40)(cid:16)(cid:38)(cid:56)(cid:40)(cid:44)(cid:49)(cid:42)(cid:3)(cid:40)(cid:41)(cid:41)(cid:40)(cid:38)(cid:55)
+(cid:16)(cid:16)(cid:48)(cid:68)(cid:81)(cid:88)(cid:86)(cid:70)(cid:85)(cid:76)(cid:83)(cid:87)(cid:3)(cid:39)(cid:85)(cid:68)(cid:73)(cid:87)(cid:16)(cid:16)
+(cid:38)(cid:82)(cid:74)(cid:81)(cid:76)(cid:87)(cid:76)(cid:89)(cid:72)(cid:3)(cid:51)(cid:85)(cid:82)(cid:70)(cid:72)(cid:86)(cid:86)(cid:76)(cid:81)(cid:74)
+(cid:3)
+(cid:3)
+(cid:48)(cid:68)(cid:81)(cid:88)(cid:86)(cid:70)(cid:85)(cid:76)(cid:83)(cid:87)(cid:3)(cid:49)(cid:88)(cid:80)(cid:69)(cid:72)(cid:85)(cid:29)
+(cid:41)(cid:88)(cid:79)(cid:79)(cid:3)(cid:55)(cid:76)(cid:87)(cid:79)(cid:72)(cid:29)
+(cid:36)(cid:85)(cid:87)(cid:76)(cid:70)(cid:79)(cid:72)(cid:3)(cid:55)(cid:92)(cid:83)(cid:72)(cid:29)
+(cid:46)(cid:72)(cid:92)(cid:90)(cid:82)(cid:85)(cid:71)(cid:86)(cid:29)
+(cid:38)(cid:82)(cid:85)(cid:85)(cid:72)(cid:86)(cid:83)(cid:82)(cid:81)(cid:71)(cid:76)(cid:81)(cid:74)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:29)
+(cid:38)(cid:82)(cid:85)(cid:85)(cid:72)(cid:86)(cid:83)(cid:82)(cid:81)(cid:71)(cid:76)(cid:81)(cid:74)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:3)(cid:54)(cid:72)(cid:70)(cid:82)(cid:81)(cid:71)(cid:68)(cid:85)(cid:92)
+(cid:44)(cid:81)(cid:73)(cid:82)(cid:85)(cid:80)(cid:68)(cid:87)(cid:76)(cid:82)(cid:81)(cid:29)
+(cid:38)(cid:82)(cid:85)(cid:85)(cid:72)(cid:86)(cid:83)(cid:82)(cid:81)(cid:71)(cid:76)(cid:81)(cid:74)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:10)(cid:86)(cid:3)(cid:44)(cid:81)(cid:86)(cid:87)(cid:76)(cid:87)(cid:88)(cid:87)(cid:76)(cid:82)(cid:81)(cid:29)
+(cid:38)(cid:82)(cid:85)(cid:85)(cid:72)(cid:86)(cid:83)(cid:82)(cid:81)(cid:71)(cid:76)(cid:81)(cid:74)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:10)(cid:86)(cid:3)(cid:54)(cid:72)(cid:70)(cid:82)(cid:81)(cid:71)(cid:68)(cid:85)(cid:92)
+(cid:44)(cid:81)(cid:86)(cid:87)(cid:76)(cid:87)(cid:88)(cid:87)(cid:76)(cid:82)(cid:81)(cid:29)
+(cid:41)(cid:76)(cid:85)(cid:86)(cid:87)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:29)
+(cid:41)(cid:76)(cid:85)(cid:86)(cid:87)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:3)(cid:54)(cid:72)(cid:70)(cid:82)(cid:81)(cid:71)(cid:68)(cid:85)(cid:92)(cid:3)(cid:44)(cid:81)(cid:73)(cid:82)(cid:85)(cid:80)(cid:68)(cid:87)(cid:76)(cid:82)(cid:81)(cid:29)
+(cid:50)(cid:85)(cid:71)(cid:72)(cid:85)(cid:3)(cid:82)(cid:73)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:86)(cid:29)
+(cid:50)(cid:85)(cid:71)(cid:72)(cid:85)(cid:3)(cid:82)(cid:73)(cid:3)(cid:36)(cid:88)(cid:87)(cid:75)(cid:82)(cid:85)(cid:86)(cid:3)(cid:54)(cid:72)(cid:70)(cid:82)(cid:81)(cid:71)(cid:68)(cid:85)(cid:92)(cid:3)(cid:44)(cid:81)(cid:73)(cid:82)(cid:85)(cid:80)(cid:68)(cid:87)(cid:76)(cid:82)(cid:81)(cid:29)"
+e19b60e5b8083828285a2baa781ceaad27f6353c,The accuracy and value of machine-generated image tags: design and user evaluation of an end-to-end image tagging system,"The Accuracy and Value of Machine-Generated Image Tags
+Design and User Evaluation of an End-to-End Image Tagging System
+Lexing Xie, Apostol Natsev, Matthew Hill, John R. Smith
+IBM Watson Research Center, Hawthorne, NY, USA
+{xlx, natsev, mh,
+Alex Phillips
+IBM Global Business Services, United Kingdom"
+e18cc09c3d3d79df6cd40ea5cf13ad40eacb8a73,Visual Transfer Learning: Informal Introduction and Literature Overview,"Visual Transfer Learning: Informal Introduction
+nd Literature Overview
+Erik Rodner
+University of Jena, Germany
+August 2011"
+e151c99b5e55bfc03047a2c6c2118cd9e4ad829b,Perspectives on Deep Multimodel Robot Learning,"Perspectives on Deep Multimodel
+Robot Learning
+Wolfram Burgard, Abhinav Valada, Noha Radwan, Tayyab Naseer, Jingwei Zhang,
+Johan Vertens, Oier Mees, Andreas Eitel and Gabriel Oliveira"
+e1e60501677ae67c6a682bac2c17e4fc904ee380,Performance Analysis of Local Binary Pattern Variants in Texture Classification,"Performance Analysis of Local Binary Pattern
+International Journal of Advanced Research in Computer Engineering & Technology (IJARCET)
+Volume 06, Issue 05, May 2017, ISSN: 2278 – 1323
+Variants in Texture Classification
+Ch. Sudha Sree1, M. V. P Chandra Sekhara Rao2
+,2Department of CA, Department of CSE, R.V.R &J.C College of Engineering
+Guntur, India"
+e1371af87f6d5e22ef6d8c5f9977f5e924f176f6,Bidirectional Retrieval Made Simple Jônatas Wehrmann,"Bidirectional Retrieval Made Simple
+Jˆonatas Wehrmann
+School of Technology
+Rodrigo C. Barros
+School of Technology
+Pontif´ıcia Universidade Cat´olica
+Pontif´ıcia Universidade Cat´olica
+do Rio Grande do Sul
+do Rio Grande do Sul"
+e1725b71f3f127d6a49d24f14bee05aada1e2f96,Part-Based Deep Hashing for Large-Scale Person Re-Identification,"Part-based Deep Hashing for Large-scale
+Person Re-identification∗
+Fuqing Zhu, Xiangwei Kong, Member, IEEE, Liang Zheng, Member, IEEE, Haiyan Fu, Member, IEEE,
+Qi Tian, Fellow, IEEE,"
+e1660c10ae661cf951602232b36047b19198f599,Web Image Search Using Attribute Assisted Re- Ranking Model,"Vol-2 Issue-1 2016
+IJARIIE-ISSN(O)-2395-4396
+Web Image Search Using Attribute Assisted Re-
+Ranking Model
+Ganesh R Nagare1, Ashok V Markad 2
+Information Technology, Amrutvahini College of Engineering, Maharashtra, India"
+e1f790bbedcba3134277f545e56946bc6ffce48d,Image Retrieval Using Attribute Enhanced Sparse Code Words,"International Journal of Innovative Research in Science,
+Engineering and Technology
+(An ISO 3297: 2007 Certified Organization)
+Vol. 3, Issue 5, May 2014
+Sparse Code Words
+ISSN: 2319-8753
+Image Retrieval Using Attribute Enhanced
+M.Balaganesh1, N.Arthi2
+Associate Professor, Department of Computer Science and Engineering, SRV Engineering College, sembodai, india1
+P.G. Student, Department of Computer Science and Engineering, SRV Engineering College, sembodai, India 2"
+e135f8118145b6a2e2a6a2088c04c26ca6d38642,Dynamic Biometrics Fusion at Feature Level for Video-Based Human Recognition,
+e10662a59b5f8e1f5684409023f11ca727647320,Performance Evaluation of Deep Learning Networks for Semantic Segmentation of Traffic Stereo-Pair Images,"Performance Evaluation of Deep Learning Networks for
+Semantic Segmentation of Traffic Stereo-Pair Images
+Vlad Taran, Nikita Gordienko, Yuriy Kochura, Yuri Gordienko, Alexandr Rokovyi, Oleg
+Alienin, Sergii Stirenko
+National Technical University of Ukraine ""Igor Sikorsky Kyiv Polytechnic Institute"",
+Kyiv, Ukraine
+Semantic image segmentation is one the most demanding task, especially for analysis of traffic conditions
+for self-driving cars. Here the results of application of several deep learning architectures (PSPNet and
+ICNet) for semantic image segmentation of traffic stereo-pair images are presented. The images from
+Cityscapes dataset and custom urban images were analyzed as to the segmentation accuracy and image
+inference time. For the models pre-trained on Cityscapes dataset, the inference time was equal in the limits
+of standard deviation, but the segmentation accuracy was different for various cities and stereo channels
+even. The distributions of accuracy (mean intersection over union — mIoU) values for each city and channel
+re asymmetric, long-tailed, and have many extreme outliers, especially for PSPNet network in comparison
+to ICNet network. Some statistical properties of these distributions (skewness, kurtosis) allow us to
+distinguish these two networks and open the question about relations between architecture of deep learning
+networks and statistical distribution of the predicted results (mIoU here). The results obtained demonstrated
+the different sensitivity of these networks to: (1) the local street view peculiarities in different cities that
+should be taken into account during the targeted fine tuning the models before their practical applications,
+(2) the right and left data channels in stereo-pairs. For both networks, the difference in the predicted results"
+e17783170ecc48253fa16123a041ae298184f4ff,Graph Embedding Algorithms Based on Neighborhood Discriminant Embedding for Face Recognition,"International Journal of Computer Information Systems and Industrial Management Applications.
+ISSN 2150-7988 Volume 4 (2012) pp. 374–382
+(cid:13) MIR Labs, www.mirlabs.net/ijcisim/index.html
+Graph Embedding Algorithms Based on
+Neighborhood Discriminant Embedding for Face
+Recognition
+Dexing Zhong1,2, Jiuqiang Han1, Yongli Liu1 and Shengbin Li2
+Ministry of Education Key Lab for Intelligent Networks and Network Security, Xi’an Jiaotong University,
+8 Xianning West Road, Xian, 710049 P. R. China
+State Key Laboratory of Ministry of Health for Forensic Sciences, Xian Jiaotong University,
+76 Yanta West Road, Xian, 710061 P. R. China"
+e1e2b6a8944a4e6f195b6f7371ee9e6b0684ae6b,Generating Personalized Virtual Agent in Speech Dialogue System for People with Dementia,"Generating Personalized Virtual Agent
+in Speech Dialogue System for People
+with Dementia
+Shota Nakatani1(B), Sachio Saiki1, Masahide Nakamura1, and Kiyoshi Yasuda2
+Graduate School of System Informatics Kobe University,
+-1 Rokkodai, Nada, Kobe, Japan
+Chiba Rosai Hospital, 2-16 Tatsumidai-higashi, Ichihara, Japan"
+e19ebad4739d59f999d192bac7d596b20b887f78,Learning Gating ConvNet for Two-Stream based Methods in Action Recognition,"Learning Gating ConvNet for Two-Stream based Methods in Action
+Recognition
+Jiagang Zhu1,2, Wei Zou1, Zheng Zhu1,2"
+e1cb110c45c4416f7aff490db2674abe1460259e,Hard-Aware Point-to-Set Deep Metric for Person Re-identification,"Hard-AwarePoint-to-SetDeepMetricforPersonRe-identificationRuiYu1,ZhiyongDou1,SongBai1,ZhaoxiangZhang2,YongchaoXu1( ),andXiangBai1("
+e163118b4a5b8016754134215433eee1f2c0065a,3-D Shape Matching for Face Analysis and Recognition,"-D Shape Matching for Face Analysis and Recognition
+Wei Quan, Bogdan J. Matuszewski and Lik-Kwan Shark
+Robotics and Computer Vision Research Laboratory, Applied Digital Signal and Image Processing (ADSIP) Research
+Centre, University of Central Lancashire, Preston PR1 2HE, U.K.
+Keywords:
+Face Recognition, Shape Matching and Modelling, Isometric Embedding Representation, Non-Rigid
+Deformation Registration."
+e1fb8ab53996f06e9a35de6b553333bd6279bcbd,Learning Multilayer Channel Features for Pedestrian Detection,"Learning Multilayer Channel Features for
+Pedestrian Detection
+Jiale Cao, Yanwei Pang, and Xuelong Li"
+e1d726d812554f2b2b92cac3a4d2bec678969368,Human Action Recognition Bases on Local Action Attributes,"J Electr Eng Technol.2015; 10(?): 30-40
+http://dx.doi.org/10.5370/JEET.2015.10.2.030
+ISSN(Print)
+975-0102
+ISSN(Online) 2093-7423
+Human Action Recognition Bases on Local Action Attributes
+Jing Zhang*, Hong Liu*, Weizhi Nie† Lekha Chaisorn**, Yongkang Wong**
+nd Mohan S Kankanhalli**"
+e1140b86c64549cbcd138f868c82ee8aad77d103,Occlusion Handling using Semantic Segmentation and Visibility-Based Rendering for Mixed Reality,"Occlusion Handling using Semantic Segmentation and
+Visibility-Based Rendering for Mixed Reality
+Menandro Roxas
+Tomoki Hori
+Taiki Fukiage
+Tokyo, Japan
+Yasuhide Okamoto
+Takeshi Oishi
+(cid:140)e University of Tokyo"
+e1f815c50a6c0c6d790c60a1348393264f829e60,Pedestrian Detection and Tracking in Surveillance Video,"PEDESTRIAN DETECTION AND TRACKING IN
+SURVEILLANCE VIDEO
+PENNY CHONG
+A project report submitted in partial fulfilment of the
+requirements for the award of Bachelor of Science (Hons.)
+Applied Mathematics with Computing
+Lee Kong Chian Faculty of Engineering and Science
+Universiti Tunku Abdul Rahman
+April 2016"
+e1e6e6792e92f7110e26e27e80e0c30ec36ac9c2,Ranking with Adaptive Neighbors,"TSINGHUA SCIENCE AND TECHNOLOGY
+ISSNll1007-0214
+0?/?? pp???–???
+DOI: 10.26599/TST.2018.9010000
+Volume 1, Number 1, Septembelr 2018
+Ranking with Adaptive Neighbors
+Muge Li, Liangyue Li, and Feiping Nie∗"
+e1e1b3683ac278386cf1569e97f9aced0923f4a0,Hyperdrive: A Systolically Scalable Binary-Weight CNN Inference Engine for mW IoT End-Nodes,"Hyperdrive: A Systolically Scalable Binary-Weight
+CNN Inference Engine for mW IoT End-Nodes
+Renzo Andri∗, Lukas Cavigelli∗, Davide Rossi†, Luca Benini∗†
+Integrated Systems Laboratory, ETH Zurich, Zurich, Switzerland
+DEI, University of Bologna, Bologna, Italy"
+cd01a0018f2b8f1211e8dfe311c28e32773c58dc,Globally-Optimal Inlier Set Maximisation for Simultaneous Camera Pose and Feature Correspondence,"Globally-Optimal Inlier Set Maximisation for
+Simultaneous Camera Pose and Feature Correspondence
+Dylan Campbell1,2, Lars Petersson1,2, Laurent Kneip1 and Hongdong Li1
+Australian National University*
+Data61 – CSIRO"
+cd9666858f6c211e13aa80589d75373fd06f6246,A Novel Time Series Kernel for Sequences Generated by LTI Systems,"A Novel Time Series Kernel for
+Sequences Generated by LTI Systems
+Liliana Lo Presti, Marco La Cascia
+V.le delle Scienze Ed.6, DIID, Universit´a degli studi di Palermo, Italy"
+cd2c1e542ae8c08cfb8baea3dff788d143232de8,Multiview Human Synthesis From a Single View,"Multiview Human Synthesis From a Singleview
+Si Wen (06246679), Tiancong Zhou (06247022), Honghao Qiu (06246258)
+{wensi, longztc,"
+cd36768795c696c990ff5c89be8d8b3b205858bd,CliCR: A Dataset of Clinical Case Reports for Machine Reading Comprehension,"CliCR: A Dataset of Clinical Case Reports for Machine
+Reading Comprehension∗
+Simon ˇSuster and Walter Daelemans
+Computational Linguistics & Psycholinguistics Research Center,
+University of Antwerp, Belgium"
+cd6978bf6b98794552bd52d166b5e04626fb6d6d,A Review on Face Recognition in various Illuminations,"A Review on Face Recognition in various
+Illuminations
+Saurabh D. Parmar , Vaishali j. kalariya
+CE/IT Department-School of Engineering,R.K. University,Rajkot"
+cd0a04c0af9b6c523884415ba54bff370fd02fab,Generalized Sparselet Models for Real-Time Multiclass Object Recognition,"Generalized Sparselet Models for Real-Time
+Multiclass Object Recognition
+Hyun Oh Song, Ross Girshick, Stefan Zickler, Christopher Geyer, Pedro Felzenszwalb, and Trevor Darrell"
+cd444ee7f165032b97ee76b21b9ff58c10750570,Table of Contents.,"UNIVERSITY OF CALIFORNIA,
+IRVINE
+Relational Models for Human-Object Interactions and Object Affordances
+DISSERTATION
+submitted in partial satisfaction of the requirements
+for the degree of
+DOCTOR OF PHILOSOPHY
+in Computer Science
+Chaitanya Desai
+Dissertation Committee:
+Professor Deva Ramanan, Chair
+Professor Charless Fowlkes
+Professor Padhraic Smyth
+Professor Serge Belongie"
+cd0f7b3f545cc4bfa5e2d7185789e8ead7e3cee2,"Children’s and Adults’ Predictions of Black, White, and Multiracial Friendship Patterns","Journal of Cognition and Development
+ISSN: 1524-8372 (Print) 1532-7647 (Online) Journal homepage: http://www.tandfonline.com/loi/hjcd20
+Children’s and Adults’ Predictions of Black, White,
+nd Multiracial Friendship Patterns
+Steven O. Roberts, Amber D. Williams & Susan A. Gelman
+To cite this article: Steven O. Roberts, Amber D. Williams & Susan A. Gelman (2017) Children’s
+nd Adults’ Predictions of Black, White, and Multiracial Friendship Patterns, Journal of Cognition
+nd Development, 18:2, 189-208, DOI: 10.1080/15248372.2016.1262374
+To link to this article: http://dx.doi.org/10.1080/15248372.2016.1262374
+Accepted author version posted online: 22
+Nov 2016.
+Published online: 22 Nov 2016.
+Submit your article to this journal
+Article views: 91
+View related articles
+View Crossmark data
+Citing articles: 1 View citing articles
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=hjcd20
+Download by: [University of Michigan]"
+cd596a2682d74bdfa7b7160dd070b598975e89d9,Mood Detection: Implementing a facial expression recognition system,"Mood Detection: Implementing a facial
+expression recognition system
+Neeraj Agrawal, Rob Cosgriff and Ritvik Mudur
+. Introduction
+Facial expressions play a significant role in human dialogue. As a result, there has been
+onsiderable work done on the recognition of emotional expressions and the application of this
+research will be beneficial in improving human-machine dialogue. One can imagine the
+improvements to computer interfaces, automated clinical (psychological) research or even
+interactions between humans and autonomous robots.
+Unfortunately, a lot of the literature does not focus on trying to achieve high recognition rates
+cross multiple databases. In this project we develop our own mood detection system that
+ddresses this challenge. The system involves pre-processing image data by normalizing and
+pplying a simple mask, extracting certain (facial) features using PCA and Gabor filters and then
+using SVMs for classification and recognition of expressions. Eigenfaces for each class are used
+to determine class-specific masks which are then applied to the image data and used to train
+multiple, one against the rest, SVMs. We find that simply using normalized pixel intensities
+works well with such an approach.
+Figure 1 – Overview of our system design
+. Image pre-processing
+We performed pre-processing on the images used to train and test our algorithms as follows:"
+cd490432e35ed5c5b7d80e1525e2780d7467ffb6,Background Estimation of Lost Values Using Kinect’s Sensor in an Inpainting Technique,"International Journal of Scientific Research Engineering & Technology (IJSRET), ISSN 2278 – 0882
+Vo lu me 3, Issue 8, Nove mber 2014
+BACKGROUND ESTIMATION OF LOST VALUES USING
+KINECT’S SENSOR IN AN INPAINTING TECHNIQUE
+* PG Schola r, Dept of EEE [Embedded systems technologies], #Assistant Professsor,
+Dept of EEE, Kongunadu College Of Engineering & Technology,Trichy, Ta mil Nadu, India
+*S.Kavitha, #Ms.S.Hemalatha"
+cd5ef3aeebc231e2c833ef55cf0571aa990c5ff8,Image Quality Assessment Techniques Improve Training,"Under review as a conference paper at ICLR 2018
+IMAGE QUALITY ASSESSMENT TECHNIQUES IMPROVE
+TRAINING AND EVALUATION OF ENERGY-BASED
+GENERATIVE ADVERSARIAL NETWORKS
+Anonymous authors
+Paper under double-blind review"
+cda4fb9df653b5721ad4fe8b4a88468a410e55ec,Gabor wavelet transform and its application,"Gabor wavelet transform and its application
+Wei-lun Chao R98942073"
+cd855c776240150f4dba7a5975c7011a9c6737ac,On Accurate and Reliable Anomaly Detection for Gas Turbine Combustors: A Deep Learning Approach,"On Accurate and Reliable Anomaly Detection for Gas Turbine
+Combustors: A Deep Learning Approach
+Weizhong Yan1 and Lijie Yu2
+General Electric Global Research Center, Niskayuna, New York 12309, USA
+General Electric Power & Water Engineering, Atlanta, Georgia 30339, USA"
+cdba015be9db1e047a51b7e06403528b3551587e,SHOG - Spherical HOG Descriptors for Rotation Invariant 3D Object Detection,"SHOG - Spherical HOG Descriptors for
+Rotation Invariant 3D Object Detection
+Henrik Skibbe1,3, Marco Reisert2 and Hans Burkhardt1,3
+Department of Computer Science, University of Freiburg, Germany
+Dept. of Diagnostic Radiology, Medical Physics, University Medical Center, Freiburg
+Center for Biological Signalling Studies (BIOSS), University of Freiburg"
+cd3005753012409361aba17f3f766e33e3a7320d,Multilinear Biased Discriminant Analysis: A Novel Method for Facial Action Unit Representation,"Multilinear Biased Discriminant Analysis: A Novel Method for Facial
+Action Unit Representation
+Mahmoud Khademi†, Mehran Safayani†and Mohammad T. Manzuri-Shalmani†
+: Sharif University of Tech., DSP Lab,"
+cd687ddbd89a832f51d5510c478942800a3e6854,A game to crowdsource data for affective computing,"A Game to Crowdsource Data for Affective Computing
+Chek Tien Tan
+Hemanta Sapkota
+Daniel Rosser
+Yusuf Pisan
+Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+cd4252d1f0a124dcc91af28f527ad1fa7be3a195,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+cd7a7be3804fd217e9f10682e0c0bfd9583a08db,Women also Snowboard: Overcoming Bias in Captioning Models,"Women also Snowboard:
+Overcoming Bias in Captioning Models
+Lisa Anne Hendricks * 1 Kaylee Burns * 1 Kate Saenko 2 Trevor Darrell 1 Anna Rohrbach 1"
+cca228b47a603a9b9e2a1e3a1b278b35612d078d,Randomized Face Recognition on Partially Occluded Images,"Randomized Face Recognition on Partially
+Occluded Images
+Ariel Morelli Andres, Sebastian Padovani, Mariano Tepper, Marta Mejail, and
+Julio Jacobo
+Departamento de Computación, Facultad de Ciencias Exactas y Naturales,
+Universidad de Buenos Aires, Argentina."
+ccfcbf0eda6df876f0170bdb4d7b4ab4e7676f18,A Dynamic Appearance Descriptor Approach to Facial Actions Temporal Modeling,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JUNE 2011
+A Dynamic Appearance Descriptor Approach to
+Facial Actions Temporal Modelling
+Bihan Jiang, Student Member, IEEE, Michel Valstar, Member, IEEE, Brais Martinez, Member, IEEE, and
+Maja Pantic, Fellow, IEEE"
+ccd5bd5ce40640ebc6665b97a86ba3d28e457d11,Contributions to a fast and robust object recognition in images. (Contributions à une reconnaissance d'objet rapide et robuste en images),"Contributions to a fast and robust object recognition in
+images
+J´erˆome Revaud
+To cite this version:
+J´erˆome Revaud. Contributions to a fast and robust object recognition in images. Other [cs.OH].
+INSA de Lyon, 2011. English. <NNT : 2011ISAL0042>. <tel-00694442>
+HAL Id: tel-00694442
+https://tel.archives-ouvertes.fr/tel-00694442
+Submitted on 4 May 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de
+recherche fran¸cais ou ´etrangers, des laboratoires"
+cc5f4d5aa9c3ffa75a335f3305a1caf9cbdeb71f,Learning Hierarchical Representations for Video Analysis Using Deep Learning,"LEARNING HIERARCHICAL REPRESENTATIONS FOR VIDEO ANALYSIS USING DEEP
+LEARNING
+YANG YANG
+B.S. Beijing University of Technology, 2008
+A dissertation submitted in partial fulfilment of the requirements
+for the degree of Doctor of Philosophy
+in the Department of Electrical Engineering and Computer Science
+in the College of Engineering and Computer Science
+t the University of Central Florida
+Orlando, Florida
+Summer Term
+Major Professor: Mubarak Shah"
+cc34b0ab84e82a6d8ebce08eff1b7556026b5352,Face Recognition using Gaussian Hermite Moments,"Special Issue of International Journal of Computer Applications (0975 – 8887)
+on Software Engineering, Databases and Expert Systems – SEDEXS, September 2012
+D Face Recognition using Gaussian Hermite Moments
+Naouar Belghini
+Faculty of Technical Sciences
+B.P. 2202 – Road of Imouzzer
+Fez – Morocco
+Arsalane Zarghili
+Faculty of Technical Sciences
+B.P. 2202 – Road of Imouzzer
+Fez – Morocco
+Jamal Kharroubi
+Faculty of Technical Sciences
+B.P. 2202 – Road of Imouzzer
+Fez – Morocco"
+ccf5852bfb55e1fa6760f76139ab44dab89f2a17,"Recognize Faces across Multi - View Videos and under Varying Illumination , Facial Expressions","Recognize Faces across Multi-View Videos and
+under Varying Illumination, Facial Expressions
+Research Scholar, Dept. Electronics & Communication Engineering,
+Mr. Steven Lawrence Fernandes1
+Karunya University,
+Coimbatore, Tamil Nadu, India
+Professor, Dept. Electronics & Communication Engineering,
+Dr. G. Josemin Bala2
+Karunya University,
+Coimbatore, Tamil Nadu, India"
+cc392ab1cfaee298e05488a4a1d84ece12220880,A new multi-scale fuzzy model for Histogram-Based Descriptors,"A NEW MULTI-SCALE FUZZY MODEL FOR HISTOGRAM-BASED DESCRIPTORS
+Lunshao Chaia, Zhen Qinb, Honggang Zhanga, Jun Guoa, Bir Bhanub
+Beijing University of Posts and Telecomuunictions, Beijing, 100876, China
+University of California at Riverside, Riverside, CA 92521, USA"
+cc3e1a6376928138dff5582b7a56d40cfb3b7367,Cost-Effective Features for Reidentification in Camera Networks,"Cost-effective features for
+re-identification in camera networks
+Syed Fahad Tahir and Andrea Cavallaro"
+cc2df3a03ee731478ed48838c284ad4548563308,Towards a Better Metric for Evaluating Question Generation Systems,"Towards a Better Metric for Evaluating Question Generation Systems
+Preksha Nema†‡ Mitesh M. Khapra†‡
+IIT Madras, India
+Robert Bosch Center for Data Science and Artificial Intelligence, IIT Madras"
+ccd2152c77ae65e4d3d0988990f6e243133a5efc,Learning Human Activities and Poses with Interconnected Data,"Copyright
+Chao-Yeh Chen"
+cc3c273bb213240515147e8be68c50f7ea22777c,Gaining Insight Into Films Via Topic Modeling & Visualization,"Gaining Insight Into Films
+Via Topic Modeling & Visualization
+MISHA RABINOVICH, MFA
+YOGESH GIRDHAR, PHD
+KEYWORDS Collaboration, computer vision, cultural
+nalytics, economy of abundance, interactive data
+visualization
+We moved beyond misuse when the software actually
+ecame useful for film analysis with the addition of audio
+nalysis, subtitle analysis, facial recognition, and topic
+modeling. Using multiple types of visualizations and
+back-and-fourth workflow between people and AI
+we arrived at an approach for cultural analytics that
+an be used to review and develop film criticism. Finally,
+we present ways to apply these techniques to Database
+Cinema and other aspects of film and video creation.
+PROJECT DATE 2014
+URL http://misharabinovich.com/soyummy.html"
+cc8e378fd05152a81c2810f682a78c5057c8a735,Expression Invariant Face Recognition System based on Topographic Independent Component Analysis and Inner Product Classifier,"International Journal of Computer Sciences and Engineering Open Access
+Research Paper Volume-5, Issue-12 E-ISSN: 2347-2693
+Expression Invariant Face Recognition System based on Topographic
+Independent Component Analysis and Inner Product Classifier
+Aruna Bhat
+Department of Electrical Engineering, IIT Delhi, New Delhi, India
+*Corresponding Author:
+Available online at: www.ijcseonline.org
+Received: 07/Nov/2017, Revised: 22/Nov/2017, Accepted: 14/Dec/2017, Published: 31/Dec/2017"
+cc5a62bd7c45a9ca479506acb572566331354fa3,Eye localization through multiscale sparse dictionaries,"Eye Localization through Multiscale Sparse Dictionaries
+Fei Yang, Junzhou Huang, Peng Yang and Dimitris Metaxas"
+ccf43c62e4bf76b6a48ff588ef7ed51e87ddf50b,Nutraceuticals and Cosmeceuticals for Human Beings–An Overview,"American Journal of Food Science and Health
+Vol. 2, No. 2, 2016, pp. 7-17
+http://www.aiscience.org/journal/ajfsh
+ISSN: 2381-7216 (Print); ISSN: 2381-7224 (Online)
+Nutraceuticals and Cosmeceuticals for Human
+Beings–An Overview
+R. Ramasubramania Raja*
+Department of Pharmacognosy, Narayana Pharmacy College, Nellore, India"
+cc622a0ac114821be935ca9c66cc177b93e18876,Anomaly Detection Based on Trajectory Analysis Using Kernel Density Estimation and Information Bottleneck Techniques,"Anomaly Detection Based on Trajectory Analysis
+Using Kernel Density Estimation and Information
+Bottleneck Techniques
+Yuejun Guo, Qing Xu(cid:3), Yu Yang, Sheng Liang, Yu Liu, Mateu Sbert"
+cc09cf5831fcae802ed2905a61ab502956655bbe,Shape-based instance detection under arbitrary viewpoint,"Shape-based instance detection under arbitrary
+viewpoint
+Edward Hsiao and Martial Hebert"
+cc31db984282bb70946f6881bab741aa841d3a7c,Learning Grimaces by Watching TV,"ALBANIE, VEDALDI: LEARNING GRIMACES BY WATCHING TV
+Learning Grimaces by Watching TV
+Samuel Albanie
+http://www.robots.ox.ac.uk/~albanie
+Andrea Vedaldi
+http://www.robots.ox.ac.uk/~vedaldi
+Engineering Science Department
+Univeristy of Oxford
+Oxford, UK"
+cc246025ec8e1d32ecfbeefaba0727fdf73cd9cb,Vehicle Tracking by Simultaneous Detection and Viewpoint Estimation,"Vehicle Tracking by Simultaneous Detection and
+Viewpoint Estimation
+Ricardo Guerrero-G´omez-Olmedo1, Roberto L´opez-Sastre1, Saturnino
+Maldonado-Basc´on1, and Antonio Fern´andez-Caballero2
+GRAM, Department of Signal Theory and Communications, UAH, Alcal´a de Henares, Spain.
+Department of Computing Systems, UCLM, Albacete, Spain."
+cc9f473584c1a7f224b42d4a3a3ea2864173cc28,Hephaestus: Data Reuse for Accelerating Scientific Discovery,"Hephaestus: Data Reuse for
+Accelerating Scientific Discovery
+Jennie Duggan
+Northwestern EECS"
+cc91001f9d299ad70deb6453d55b2c0b967f8c0d,Performance Enhancement of Face Recognition in Smart TV Using Symmetrical Fuzzy-Based Quality Assessment,"OPEN ACCESS
+ISSN 2073-8994
+Article
+Performance Enhancement of Face Recognition in Smart TV
+Using Symmetrical Fuzzy-Based Quality Assessment
+Yeong Gon Kim, Won Oh Lee, Ki Wan Kim, Hyung Gil Hong and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 26 Pil-dong 3-ga, Jung-gu,
+Seoul 100-715, Korea; E-Mails: (Y.G.K.); (W.O.L.);
+(K.W.K.); (H.G.H.)
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735.
+Academic Editor: Christopher Tyler
+Received: 31 March 2015 / Accepted: 21 August 2015 / Published: 25 August 2015"
+cc96eab1e55e771e417b758119ce5d7ef1722b43,An Empirical Study of Recent Face Alignment Methods,"An Empirical Study of Recent
+Face Alignment Methods
+Heng Yang, Xuhui Jia, Chen Change Loy and Peter Robinson"
+cc4a2cab31ed06d0d8723df0bdf8cd0ece71bbe9,Analysis of Using Metric Access Methods for Visual Search of Objects in Video Databases,"Analysis of Using Metric Access Methods for Visual Search
+of Objects in Video Databases
+Henrique Batista da Silva 1
+Zenilton Kleber Gonçalves do Patrocínio Júnior 2
+Silvio Jamil Ferzoli Guimarães 2"
+cc2bb4318191a04e3fc82c008c649f5b90151e4d,Beyond Shared Hierarchies: Deep Multitask Learning through Soft Layer Ordering,"Published as a conference paper at ICLR 2018
+BEYOND SHARED HIERARCHIES: DEEP MULTITASK
+LEARNING THROUGH SOFT LAYER ORDERING
+Elliot Meyerson & Risto Miikkulainen
+The University of Texas at Austin and Sentient Technologies, Inc.
+{ekm,"
+cca198ae698e7956992f2fb326c04965b2964a18,Learning Pain from Emotion: Transferred HoT Data Representation for Pain Intensity Estimation,"Learning Pain from Emotion: Transferred HoT
+Data Representation for Pain Intensity
+Estimation
+Corneliu Florea1, Laura Florea1, and Constantin Vertan1
+Image Processing and Applications Laboratory,
+{corneliu.florea; laura.florea; constantin.vertan}
+University “Politehnica” of Bucharest,"
+e6d50d65a87425e7f0b4ec08c53d200f12f75590,The Neural Dynamics of Facial Identity Processing: Insights from EEG-Based Pattern Analysis and Image Reconstruction,"New Research
+Sensory and Motor Systems
+The Neural Dynamics of Facial Identity
+Processing: Insights from EEG-Based Pattern
+Analysis and Image Reconstruction
+Dan Nemrodov,1 Matthias Niemeier,1 Ashutosh Patel,1 and Adrian Nestor1
+DOI:http://dx.doi.org/10.1523/ENEURO.0358-17.2018
+Department of Psychology, University of Toronto Scarborough, 1265 Military Trail, Toronto, Ontario M1C1A4,
+Canada"
+e64b683e32525643a9ddb6b6af8b0472ef5b6a37,Face Recognition and Retrieval in Video,"Face Recognition and Retrieval in Video
+Caifeng Shan"
+e68b1fdc4e515f947c96f65ec7ac2521edbc06b2,ROS Wrapper for Real-Time Multi-Person Pose Estimation with a Single Camera,"Technical Report
+IRI--TR-17-02
+ROS Wrapper
+for Real-Time Multi-Person
+Pose Estimation
+with a Single Camera
+Autor
+Miguel Arduengo
+Sven Jens Jorgensen
+Supervisors
+Kimberly Hambuchen
+Luis Sentis
+Francesc Moreno
+Guillem Alenyà
+July 2017
+Institut de Robòtica i Informàtica Industrial"
+e6d8f332ae26e9983d5b42af4466ff95b55f2341,Pose-Normalized Image Generation for Person Re-identification,"Pose-Normalized Image Generation for Person Re-identification
+Xuelin Qian1, Yanwei Fu1, Tao Xiang2, Wenxuan Wang1
+Jie Qiu3, Yang Wu3, Yu-Gang Jiang1, Xiangyang Xue1
+Fudan University; 2Queen Mary University of London;
+Nara Institute of Science and Technology;"
+e63f4867c73eff9ff7cdf31246585a6915acef57,Digging Into Self-Supervised Monocular Depth Estimation,"Digging Into Self-Supervised
+Monocular Depth Estimation
+Cl´ement Godard
+Oisin Mac Aodha
+Gabriel J. Brostow"
+e6af98d1567dad534262ec0863264bb26157533f,On Multi-scale Differential Features and Their Representations for Image Retrieval and Recognition,"ON MULTI-SCALE DIFFERENTIAL FEATURES AND THEIR
+REPRESENTATIONS FOR IMAGE RETRIEVAL AND RECOGNITION
+A Dissertation Presented
+SRINIVAS S. RAVELA
+Submitted to the Graduate School of the
+University of Massachusetts Amherst in partial fulfillment
+of the requirements for the degree of
+DOCTOR OF PHILOSOPHY
+February 2003
+Department of Computer Science"
+e624c73e3057a1de75e9d6d7e813771154ff1375,Incorporating Scalability in Unsupervised Spatio- Temporal Feature Learning,"INCORPORATING SCALABILITY IN UNSUPERVISED SPATIO-TEMPORAL FEATURE
+LEARNING
+Sujoy Paul, Sourya Roy and Amit K. Roy-Chowdhury
+Dept. of Electrical and Computer Engineering, University of California, Riverside, CA 92521"
+e6b45d5a86092bbfdcd6c3c54cda3d6c3ac6b227,Pairwise Relational Networks for Face Recognition,"Pairwise Relational Networks for Face
+Recognition
+Bong-Nam Kang1[0000−0002−6818−7532], Yonghyun Kim2[0000−0003−0038−7850],
+nd Daijin Kim1,2[0000−0002−8046−8521]
+Department of Creative IT Engineering, POSTECH, Korea
+Department of Computer Science and Engineering, POSTECH, Korea"
+e68083909381a8fbd0e4468aa06204ac00a0e6fc,Visual Identification by Signature Tracking,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 25, NO. 2, FEBRUARY 2003
+Visual Identification by Signature Tracking
+Mario E. Munich, Member, IEEE, and Pietro Perona, Member, IEEE"
+e6865b000cf4d4e84c3fe895b7ddfc65a9c4aaec,"Tobias Siebenlist , Kathrin Knautz Chapter 15 . The critical role of the cold - start problem and incentive systems in emotional Web 2 . 0 services","Tobias Siebenlist, Kathrin Knautz
+Chapter 15. The critical role of the
+old-start problem and incentive systems
+in emotional Web 2.0 services"
+e6d689054e87ad3b8fbbb70714d48712ad84dc1c,Robust Facial Feature Tracking,"Robust Facial Feature Tracking
+Fabrice Bourel, Claude C. Chibelushi, Adrian A. Low
+School of Computing, Staffordshire University
+Stafford ST18 0DG"
+e6868f172df3736e052fec4c00b63780b3d739fe,Effects of a Common Variant in the CD38 Gene on Social Processing in an Oxytocin Challenge Study: Possible Links to Autism,"Effects of a Common Variant in the CD38 Gene on Social
+Processing in an Oxytocin Challenge Study: Possible Links
+to Autism
+Carina Sauer*,1, Christian Montag2, Christiane Wo¨ rner1, Peter Kirsch1,3 and Martin Reuter2,3
+Department of Clinical Psychology, Central Institute of Mental Health, Medical Faculty Mannheim, Heidelberg University, Mannheim, Germany;
+Department of Differential and Biological Psychology, Rheinische Friedrich-Wilhelms-University, Bonn, Germany
+The intranasal application of oxytocin (OT) has been shown to influence behavioral and neural correlates of social processing. These
+effects are probably mediated by genetic variations within the OT system. One potential candidate could be the CD38 gene, which codes
+for a transmembrane protein engaged in OT secretion processes. A common variation in this gene (rs3796863) was recently found to
+e associated with autism spectrum disorders (ASD). Using an imaging genetics approach, we studied differential effects of an intranasal
+OT application on neural processing of social stimuli in 55 healthy young men depending on their CD38 gene variant in a double-blind
+placebo-controlled crossover design. Genotype had a significant influence on both behavioral and neuronal measures of social processing.
+Homozygotic risk allele carriers showed slower reaction times (RT) and higher activation of left fusiform gyrus during visual processing of
+social stimuli. Under OT activation differences between genotypes were more evident (though not statistically significantly increased) and
+RT were accelerated in homozygotic risk allele carriers. According to our data, rs3796863 mainly influences fusiform gyrus activation, an
+rea which has been widely discussed in ASD research. OT seems to modulate this effect by enhancing activation differences between
+llele groups, which suggests an interaction between genetic makeup and OT availability on fusiform gyrus activation. These results
+support recent approaches to apply OT as a pharmacological treatment of ASD symptoms.
+Keywords: oxytocin; CD38; social processing; imaging genetics; autism
+INTRODUCTION"
+e63a0ea338dfc7293ddd68074baf250e99d0c6d5,Nonlinear Supervised Dimensionality Reduction via Smooth Regular Embeddings,"Nonlinear Supervised Dimensionality Reduction via
+Smooth Regular Embeddings
+Department of Electrical and Electronics Engineering, METU, Ankara
+Cem ¨Ornek and Elif Vural"
+e6d48d23308a9e0a215f7b5ba6ae30ee5d2f0ef5,Multi-person Tracking by Online Learned Grouping Model with Non-linear Motion Context,"IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS FOR VIDEO TECHNOLOGY, VOL. XX, NO. XX, MONTH YEAR
+Multi-person Tracking by Online Learned Grouping
+Model with Non-linear Motion Context
+Xiaojing Chen, Zhen Qin, Le An, Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+e6ca412a05002b51d358c2e3061913c3dab6b810,MoFA: Model-Based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction,
+e6dc1200a31defda100b2e5ddb27fb7ecbbd4acd,Flexible Manifold Embedding: A Framework for Semi-Supervised and Unsupervised Dimension Reduction,"Flexible Manifold Embedding: A Framework
+for Semi-Supervised and Unsupervised
+Dimension Reduction
+Feiping Nie, Dong Xu, Member, IEEE, Ivor Wai-Hung Tsang, and Changshui Zhang, Member, IEEE
+, the linear regression function ("
+e6e5a6090016810fb902b51d5baa2469ae28b8a1,Title Energy-Efficient Deep In-memory Architecture for NAND Flash Memories,"Title
+Energy-Efficient Deep In-memory Architecture for NAND
+Flash Memories
+Archived version
+Accepted manuscript: the content is same as the published
+paper but without the final typesetting by the publisher
+Published version
+Published paper
+Authors (contact)
+0.1109/ISCAS.2018.8351458"
+e688a6535dbdd6ce6928bc4eb2978f39628e5302,Hand Drawn Sketch Classification Using Convolutional Neural Networks,"SUPPLEMENT ISSUE
+ARTICLE
+HAND DRAWN SKETCH CLASSIFICATION USING
+CONVOLUTIONAL NEURAL NETWORKS
+Habibollah Agh Atabay*
+Department of Computer, Gonbad Kavous University, Gonbad Kavous, IRAN"
+e6aadde93aedc06525523415e574507cf5c8cc44,End-to-end optimization of goal-driven and visually grounded dialogue systems,"End-to-end optimization of goal-driven and visually grounded dialogue systems
+Florian Strub
+Univ. Lille, CNRS, Centrale Lille, Inria,
+UMR 9189 - CRIStAL, F-59000 Lille, France
+Harm de Vries
+University of Montreal
+Jeremie Mary
+Univ. Lille, CNRS, Centrale Lille, Inria,
+UMR 9189 - CRIStAL, F-59000 Lille, France
+Bilal Piot
+DeepMind"
+e605242319ba495bc5f47abe9f1c08d508d83627,Importance-Aware Semantic Segmentation for Autonomous Driving System,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+e6178de1ef15a6a973aad2791ce5fbabc2cb8ae5,Improving Facial Landmark Detection via a Super-Resolution Inception Network,"Improving Facial Landmark Detection via a
+Super-Resolution Inception Network
+Martin Knoche, Daniel Merget, Gerhard Rigoll
+Institute for Human-Machine Communication
+Technical University of Munich, Germany"
+e6beb5d95fa262b8717cc264d79a879285db15d4,Towards Transparent AI Systems: Interpreting Visual Question Answering Models,"Towards Transparent AI Systems:
+Interpreting Visual Question Answering Models
+Yash Goyal, Akrit Mohapatra, Devi Parikh, Dhruv Batra
+{ygoyal, akrit, parikh,
+Virginia Tech"
+e68ef9597613cd2b6cf76e81c13eb061ee468485,Latent Convolutional Models,"Published as a conference paper at ICLR 2019
+LATENT CONVOLUTIONAL MODELS
+ShahRukh Athar
+Skolkovo Institute of Science and Technology (Skoltech), Russia
+Evgeny Burnaev
+Victor Lempitsky∗"
+f9f08511f77c29ff948e146434dfb23608d3deb5,Question Answering Using Match-LSTM and Answer Pointer,"Question Answering Using Match-LSTM and Answer Pointer
+Annie Hu, Cindy Wang, and Brandon Yang
+{anniehu, ciwang,
+CodaLab: anniehu
+March 21, 2016
+Introduction
+Machine comprehension of text is a significant problem in natural language processing today –
+in this project, we tackle machine reading comprehension as applied to question answering. Our
+goal is: given a question and a context paragraph, to extract from the paragraph the answer to
+the question.
+As an oracle, on the dataset we used, humans score over 86.8% accuracy (EM) on the test
+set for this task, while the best models only achieve roughly 75%. Existing approaches to this
+extractive Question Answering problem typically involve an encoding layer that encodes the
+question and paragraph into a sequence, some additional layer that accounts for interaction
+etween the question and paragraph, and a final decoding layer that extracts the answer from
+the paragraph [2][3][4][7]. In this paper, we will follow a similar structure, using LSTMs in our
+encoding and decoding layers, and calculating attention as our interaction layer.
+Dataset
+The dataset used is the recently released Stanford Question Answering Dataset (SQuAD)[1].
+The context paragraphs are extracted from Wikipedia, while questions and answers are human-"
+f984a9bb5c6e7b8a055b810bff468d7f8d80a7ff,Face identification by using fusing Photographic and Thermal Images,"www.jchps.com Journal of Chemical and Pharmaceutical Sciences
+Face identification by using fusing Photographic and Thermal Images
+M. Parisa Beham, 2M.R.H. Prasanna, 2SM.Mansoor Roomi and 1H. Jebina
+ISSN: 0974-2115
+Vickram College of Engineering, Tamilnadu, India.
+Thiagarajar College of Engineering, Tamilnadu, India.
+*Corresponding Author:E-Mail"
+f95616b1593467f5b11689582d934da34e6ad1ee,Interactive Language Acquisition with One-shot Visual Concept Learning through a Conversational Game,"Interactive Language Acquisition with One-shot Visual Concept Learning
+through a Conversational Game
+Haichao Zhang†, Haonan Yu†, and Wei Xu †§
+§ National Engineering Laboratory for Deep Learning Technology and Applications, Beijing China
+Baidu Research - Institue of Deep Learning, Sunnyvale USA"
+f96b3122f66c01cb78643d7e1b412e1bae16f2c4,Affective Robots : Evaluation of Automatic Emotion Recognition Approaches on a Humanoid Robot towards Emotionally Intelligent Machines,"World Academy of Science, Engineering and Technology
+International Journal of Mechanical and Mechatronics Engineering
+Vol:12, No:6, 2018
+Affective Robots: Evaluation of Automatic Emotion
+Recognition Approaches on a Humanoid Robot
+towards Emotionally Intelligent Machines
+Silvia Santano Guill´en, Luigi Lo Iacono, Christian Meder"
+f98cbf32989387733529fa4fc943f0a7e97b5c07,To Know and To Learn - About the Integration of Knowledge Representation and Deep Learning for Fine-Grained Visual Categorization,
+f9129b3858c14b5f6cca1fcbf31c4816d94a5038,A Robust 3D-2D Interactive Tool for Scene Segmentation and Annotation,"A Robust 3D-2D Interactive Tool for Scene
+Segmentation and Annotation
+Duc Thanh Nguyen, Binh-Son Hua∗, Lap-Fai Yu, Member, IEEE, and Sai-Kit Yeung, Member, IEEE"
+f98a975642972ce24e42e6957f63be556c11dd31,Dynamic Obstacle Detection of Road Scenes using Equi-Height Mosaicking Image,"Electronic Letters on Computer Vision and Image Analysis 13(2):13-14, 2014
+Dynamic Obstacle Detection of Road Scenes
+using Equi-Height Mosaicking Image
+Min Woo Park and Soon Ki Jung
+School of Computer Science and Engineering, Kyungpook National University,
+80 Daehak-ro, Bukgu, Daegu, Republic of Korea
+Advisor/s: Soon Ki Jung
+Date and location of PhD thesis defense: 3 December 2013, Kyungpook National University
+Received 30 January 2014; accepted 25 May 2014"
+f95f5e43f34e1bfb425b6491fc09558c44d2973d,Soft Layer-Specific Multi-Task Summarization with Entailment and Question Generation,"Soft Layer-Specific Multi-Task Summarization
+with Entailment and Question Generation
+Han Guo∗
+Ramakanth Pasunuru∗
+UNC Chapel Hill
+{hanguo, ram,
+Mohit Bansal"
+f9bee6e61833c0323c9175402b73442d27ab9eb8,D Human Poses Estimation from a Single 2 D Silhouette,
+f9028b47a4755a7349108b1dc281f13add5c6c12,Atypical gaze patterns in children and adults with autism spectrum disorders dissociated from developmental changes in gaze behaviour,"Downloaded from
+http://rspb.royalsocietypublishing.org/
+on June 9, 2017
+Proc. R. Soc. B
+doi:10.1098/rspb.2010.0587
+Published online
+Atypical gaze patterns in children and
+dults with autism spectrum disorders
+dissociated from developmental changes
+in gaze behaviour
+Tamami Nakano1,2, Kyoko Tanaka3, Yuuki Endo1, Yui Yamane1,
+Takahiro Yamamoto4, Yoshiaki Nakano4, Haruhisa Ohta2,5,
+Nobumasa Kato2,5 and Shigeru Kitazawa1,2,*
+Department of Neurophysiology, and 3Department of Pediatrics, Juntendo University
+School of Medicine, Tokyo, Japan
+CREST, JST, Saitama, Japan
+Japanese Institute for Education and Treatment, Tokyo, Japan
+5Department of Psychiatry, Showa University School of Medicine, Tokyo, Japan
+Eye tracking has been used to investigate gaze behaviours in individuals with autism spectrum disorder
+(ASD). However, traditional analysis has yet to find behavioural characteristics shared by both children"
+f921e6f5085f1ebbd8289081e499240a89bf6c43,Three-Dimensional Face Recognition in the Presence of Facial Expressions: An Annotated Deformable Model Approach,"Three-Dimensional Face Recognition
+in the Presence of Facial Expressions:
+An Annotated Deformable Model Approach
+Ioannis A. Kakadiaris, Member, IEEE, Georgios Passalis, George Toderici,
+Mohammed N. Murtuza, Yunliang Lu, Nikos Karampatziakis, and Theoharis Theoharis"
+f9fdc63934841a0c4d8d29fdea80e1972ffcfe1e,Pedestrian Using Catadioptric Sensor 12,"Journal of Theoretical and Applied Information Technology
+0th April 2018. Vol.96. No 8
+© 2005 – ongoing JATIT & LLS
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+PEDESTRIAN USING CATADIOPTRIC SENSOR
+2BOUI MAROUANE, 2HADJ-ABDELKADER HICHAM, 2ABABSA FAKHR-EDDINE,
+ABOUYAKHF EL HOUSSINE
+LIMIARF University Mohammed V-Rabat
+IBISC, University of Evry, France
+E-mail:"
+f9784db8ff805439f0a6b6e15aeaf892dba47ca0,"Comparing the performance of Emotion-Recognition Implementations in OpenCV, Cognitive Services, and Google Vision APIs","Comparing the performance of Emotion-Recognition Implementations
+in OpenCV, Cognitive Services, and Google Vision APIs
+LUIS ANTONIO BELTRÁN PRIETO, ZUZANA KOMÍNKOVÁ OPLATKOVÁ
+Department of Informatics and Artificial Intelligence
+Tomas Bata University in Zlín
+Nad Stráněmi 4511, 76005, Zlín
+CZECH REPUBLIC"
+f935225e7811858fe9ef6b5fd3fdd59aec9abd1a,Spatiotemporal dynamics and connectivity pattern differences between centrally and peripherally presented faces.,"www.elsevier.com/locate/ynimg
+Spatiotemporal dynamics and connectivity pattern differences
+etween centrally and peripherally presented faces
+Lichan Liu and Andreas A. Ioannides*
+Laboratory for Human Brain Dynamics, RIKEN Brain Science Institute (BSI), 2-1 Hirosawa, Wakoshi, Saitama, 351-0198, Japan
+Received 4 May 2005; revised 26 January 2006; accepted 6 February 2006
+Available online 24 March 2006
+Most neuroimaging studies on face processing used centrally presented
+images with a relatively large visual field. Images presented in this way
+ctivate widespread striate and extrastriate areas and make it difficult
+to study spatiotemporal dynamics and connectivity pattern differences
+from various parts of the visual field. Here we studied magneto-
+encephalographic responses in humans to centrally and peripherally
+presented faces for testing the hypothesis that processing of visual
+stimuli with facial expressions of emotions depends on where the
+stimuli are presented in the visual field. Using our tomographic and
+statistical parametric mapping analyses, we identified occipitotemporal
+reas activated by face stimuli more than by control conditions. V1/V2
+ctivity was significantly stronger for lower than central and upper
+visual field presentation. Fusiform activity, however, was significantly"
+f95ba7673789d1b4118d30e360a5a37fd75d3961,Face Recognition using Modified Generalized Hough Transform and Gradient Distance Descriptor,"Face Recognition using Modified Generalized Hough Transform
+nd Gradient Distance Descriptor
+Marian Moise, Xue Dong Yang and Richard Dosselmann
+Department of Computer Science, University of Regina, 3737 Wascana Parkway, Regina, Saskatchewan, Canada
+Keywords:
+Face Recognition, Generalized Hough Transform, Image Descriptors."
+f93606d362fcbe62550d0bf1b3edeb7be684b000,Nearest Neighbor Classifier Based on Nearest Feature Decisions,"The Computer Journal Advance Access published February 1, 2012
+© The Author 2012. Published by Oxford University Press on behalf of The British Computer Society. All rights reserved.
+For Permissions, please email:
+doi:10.1093/comjnl/bxs001
+Nearest Neighbor Classifier Based
+on Nearest Feature Decisions
+Alex Pappachen James1,∗ and Sima Dimitrijev2
+Machine Intelligence Group, School of Computer Science, Indian Institute of Information Technology and
+Queensland Micro- and Nanotechnology Centre and Griffith School of Engineering, Griffith University,
+Management, Kerala, India
+Nathan, Australia
+Corresponding author:
+High feature dimensionality of realistic datasets adversely affects the recognition accuracy of nearest
+neighbor (NN) classifiers. To address this issue, we introduce a nearest feature classifier that shifts
+the NN concept from the global-decision level to the level of individual features. Performance
+omparisons with 12 instance-based classifiers on 13 benchmark University of California Irvine
+lassification datasets show average improvements of 6 and 3.5% in recognition accuracy and
+rea under curve performance measures, respectively. The statistical significance of the observed
+performance improvements is verified by the Friedman test and by the post hoc Bonferroni–Dunn
+test. In addition, the application of the classifier is demonstrated on face recognition databases, a"
+f94feceb5b725c6b303b758a0e5e90215b0174d3,Learning Non-maximum Suppression,"Learning non-maximum suppression
+Jan Hosang
+Rodrigo Benenson
+Bernt Schiele
+Max Planck Institut für Informatik
+Saarbrücken, Germany"
+f997a71f1e54d044184240b38d9dc680b3bbbbc0,Deep Cross Modal Learning for Caricature Verification and Identification(CaVINet),"Deep Cross Modal Learning for Caricature Verification and
+Identification(CaVINet)
+https://lsaiml.github.io/CaVINet/
+Jatin Garg∗
+Indian Institute of Technology Ropar
+Himanshu Tolani∗
+Indian Institute of Technology Ropar
+Skand Vishwanath Peri∗
+Indian Institute of Technology Ropar
+Narayanan C Krishnan
+Indian Institute of Technology Ropar"
+f96970f75b0f37787a47073bf7d02111f45abe83,3 D Face Recognition Performance under Adversarial Conditions,
+f9d1f12070e5267afc60828002137af949ff1544,Maximum Entropy Binary Encoding for Face Template Protection,"Maximum Entropy Binary Encoding for Face Template Protection
+Rohit Kumar Pandey
+Yingbo Zhou
+Bhargava Urala Kota
+Venu Govindaraju
+University at Buffalo, SUNY
+{rpandey, yingbozh, buralako,"
+f0f876b5bf3d442ef9eb017a6fa873bc5d5830c8,"LOH and Behold: Web-Scale Visual Search, Recommendation and Clustering Using Locally Optimized Hashing","LOH and Behold: Web-scale visual search,
+recommendation and clustering using Locally
+Optimized Hashing
+Yannis Kalantidis:, Lyndon Kennedy;‹, Huy Nguyen:,
+Clayton Mellina: and David A. Shamma§‹
+:Computer Vision and Machine Learning Group, Flickr, Yahoo
+;Futurewei Technologies Inc.
+§CWI: Centrum Wiskunde & Informatica, Amsterdam"
+f00e51ec0e3894bdb2977a01824f37b15bb82c6e,A Gaussian Approximation of Feature Space for Fast Image Similarity,"Computer Science and ArtificialIntelligence LaboratoryTechnical Reportmassachusetts institute of technology, cambridge, ma 02139 usa — www.csail.mit.eduMIT-CSAIL-TR-2012-032October 1, 2012A Gaussian Approximation of Feature Space for Fast Image Similarity Michael Gharbi, Tomasz Malisiewicz, Sylvain Paris, and FrØdo Durand"
+f0ca04fe6de04a46f44dabd8744b4163e8e0b4d3,Low-Resolution and Low-Quality Face Super-Resolution in Monitoring Scene via Support-Driven Sparse Coding,"J Sign Process Syst (2014) 75:245–256
+DOI 10.1007/s11265-013-0804-9
+Low-Resolution and Low-Quality Face Super-Resolution
+in Monitoring Scene via Support-Driven Sparse Coding
+Junjun Jiang & Ruimin Hu & Zhen Han & Zhongyuan Wang
+Received: 25 April 2013 / Revised: 2 June 2013 / Accepted: 4 June 2013 / Published online: 26 June 2013
+# Springer Science+Business Media New York 2013"
+f006161327d3ea3484064c1a86e4c87c729fd7b8,Rough Sets Methods in Feature Reduction and Classification,"Int. J. Appl. Math. Comput. Sci., 2001, Vol.11, No.3, 565{582
+ROUGH SETS METHODS IN FEATURE REDUCTION
+AND CLASSIFICATION
+Roman W. (cid:145)WINIARSKI(cid:3)
+The paper presents an application of rough sets and statistical methods to fea-
+ture reduction and pattern recognition. The presented description of rough sets
+theory emphasizes the role of rough sets reducts in feature selection and data
+reduction in pattern recognition. The overview of methods of feature selection
+emphasizes feature selection criteria, including rough set-based methods. The
+paper also contains a description of the algorithm for feature selection and re-
+duction based on the rough sets method proposed jointly with Principal Compo-
+nent Analysis. Finally, the paper presents numerical results of face recognition
+experiments using the learning vector quantization neural network, with feature
+selection based on the proposed principal components analysis and rough sets
+methods.
+Keywords: rough sets, feature selection, classi(cid:12)cation
+. Introduction
+One of the fundamental steps in classi(cid:12)er design is reduction of pattern dimensional-
+ity through feature extraction and feature selection (Cios et al., 1998; Kittler, 1986;
+Langley and Sage, 1994; Liu and Motoda, 1999). Feature selection is often isolated as"
+f08266cea120e8aa091983da5269ee5e35febe75,Semantic Diversity versus Visual Diversity in Visual Dictionaries,"Semantic Diversity versus Visual Diversity
+in Visual Dictionaries
+Ot´avio A. B. Penatti, Sandra Avila, Member, IEEE, Eduardo Valle, Ricardo da S. Torres, Member, IEEE"
+f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4,The EuroCity Persons Dataset: A Novel Benchmark for Object Detection,"The EuroCity Persons Dataset:
+A Novel Benchmark for Object Detection
+Markus Braun, Sebastian Krebs, Fabian Flohr, and Dariu M. Gavrila"
+f0865d11131a84ef1d91e1c8b5718692f153267d,Explaining Autism Spectrum Disorders,"Articles in PresS. J Neurophysiol (May 28, 2014). doi:10.1152/jn.00242.2014
+EXPLAINING AUTISM SPECTRUM DISORDERS
+Explaining autism spectrum disorders: central coherence versus predictive coding theories.
+Target Article: Stevenson, R. A., Siemann, J. K., Schneider, B. C., Eberly, H. E., Woynaroski, T. G.,
+Camarata, S. M., & Wallace, M. T. (2014). Multisensory Temporal Integration in Autism Spectrum
+Disorders. The Journal of Neuroscience, 34(3), 691-697. doi: 10.1523/jneurosci.3615-13.2014
+Jason S. Chan* & Marcus J. Naumer
+Institute of Medical Psychology
+Goethe-University, Frankfurt
+KEYWORDS: Autism Spectrum Disorder, Multisensory Integration, Temporal Binding Window
+Acknowledgements: This was funded by the Hessian initiative for the development of scientific and
+economic excellence (LOEWE) Neuronal Coordination Research Focus Frankfurt (NeFF).
+*Corresponding author:
+Jason Chan
+Copyright © 2014 by the American Physiological Society."
+f0cee87e9ecedeb927664b8da44b8649050e1c86,Image Ordinal Classification and Understanding: Grid Dropout with Masking Label,"label:(1, 0, 1, 0, 1, 1, 1, 1, 1)Masking label:(0, 1, 1, 1, 0, 1, 1, 1, 1)Entire imageInput imageNeuron dropout’s gradCAMGrid dropout’s gradCAMFig.1.Above:imageordinalclassificationwithrandomlyblackoutpatches.Itiseasyforhumantorecognizetheageregardlessofthemissingpatches.Themaskinglabelisalsousefultoimageclassification.Bottom:griddropout’sgrad-CAMisbetterthanthatofneurondropout.Thatistosay,griddropoutcanhelplearningfeaturerepresentation.problem[1].Withtheproliferationofconvolutionalneuralnetwork(CNN),workshavebeencarriedoutonordinalclas-sificationwithCNN[1][2][3].Thoughgoodperformanceshavebeenloggedwithmoderndeeplearningapproaches,therearetwoproblemsinimageordinalclassification.Ononehand,theamountofordinaltrainingdataisverylim-itedwhichprohibitstrainingcomplexmodelsproperly,andtomakemattersworse,collectinglargetrainingdatasetwithordinallabelisdifficult,evenharderthanlabellinggenericdataset.Therefore,insufficienttrainingdataincreasestheriskofoverfitting.Ontheotherhand,lessstudiesareconductedtounderstandwhatdeepmodelshavelearnedonordinaldata978-1-5386-1737-3/18/$31.00c(cid:13)2018IEEE"
+f0f4f16d5b5f9efe304369120651fa688a03d495,Temporal Generative Adversarial Nets,"Temporal Generative Adversarial Nets
+Masaki Saito∗
+Eiichi Matsumoto∗
+Preferred Networks inc., Japan
+{msaito,"
+f0d29be1a93158d320bef285442f63bb090f6c31,An Online and Flexible Multi-Object Tracking Framework using Long Short-Term Memory,"An Online and Flexible Multi-Object Tracking Framework using Long
+Short-Term Memory
+Xingyu Wan, Jinjun Wang, Sanping Zhou
+Xi’an Jiaotong University
+Institute of Artificial Intelligence and Robotics
+8 West Xianning Road, Xi’an, Shaanxi, China, 710049"
+f0ae807627f81acb63eb5837c75a1e895a92c376,Facial Landmark Detection using Ensemble of Cascaded Regressions,"International Journal of Emerging Engineering Research and Technology
+Volume 3, Issue 12, December 2015, PP 128-133
+ISSN 2349-4395 (Print) & ISSN 2349-4409 (Online)
+Facial Landmark Detection using Ensemble of Cascaded
+Regressions
+Martin Penev1*, Ognian Boumbarov2
+Faculty of Telecommunications, Technical University, Sofia, Bulgaria
+Faculty of Telecommunications, Technical University, Sofia, Bulgaria"
+f06f3e1cef2d04af915a932e83b22e46a45f3b73,Action understanding and social learning in Autism: a developmental perspective,"Life Span and Disability / XIV, 1 (2011), 7-29
+Action understanding and social learning in Autism:
+developmental perspective
+Giacomo Vivanti1 & Sally J. Rogers2"
+f0dd265dfbe9ffe86ca56ba053335626720059a3,CNN Fixations: An unraveling approach to visualize the discriminative image regions,"CNN Fixations: An unraveling approach to
+visualize the discriminative image regions
+Konda Reddy Mopuri*, Utsav Garg*, R. Venkatesh Babu, Senior Member, IEEE"
+f0aac566e3d2c06759b8f4f45a270d5af93b9705,Ear Structure Feature Extraction Based on Multi-scale Hessian Matrix,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.9, No.5 (2016), pp.159-172
+http://dx.doi.org/10.14257/ijsip.2016.9.5.14
+Ear Structure Feature Extraction Based on Multi-scale Hessian
+Matrix
+,Ban Xiaojuan*1, Wang Guosheng3 and Tian Ying2
+Ma Chi1,2,3
+School of Computer & Communication Engineering, University of Science and
+College of Software, University of Science and Technology LiaoNing, Anshan,
+Technology Beijing, Beijing, China
+Beihai Yinhe Industry Investment Co.,Ltd., Beihai, China
+China"
+f0d18a5d205c23d1309387dfbd4ecfbcf3b1687e,Atypical neural modulation in the right prefrontal cortex during an inhibitory task with eye gaze in autism spectrum disorder as revealed by functional near-infrared spectroscopy.,"Terms of Use: https://journals.spiedigitallibrary.org/terms-of-use
+Atypicalneuralmodulationintherightprefrontalcortexduringaninhibitorytaskwitheyegazeinautismspectrumdisorderasrevealedbyfunctionalnear-infraredspectroscopyTakahiroIkedaMasahiroHiraiTakeshiSakuradaYukifumiMondenTatsuyaTokudaMasakoNagashimaHideoShimoizumiIppeitaDanTakanoriYamagataTakahiroIkeda,MasahiroHirai,TakeshiSakurada,YukifumiMonden,TatsuyaTokuda,MasakoNagashima,HideoShimoizumi,IppeitaDan,TakanoriYamagata,“Atypicalneuralmodulationintherightprefrontalcortexduringaninhibitorytaskwitheyegazeinautismspectrumdisorderasrevealedbyfunctionalnear-infraredspectroscopy,”Neurophoton.5(3),035008(2018),doi:10.1117/1.NPh.5.3.035008."
+f09432b7f470268c28d3d4ebd17a44773b678900,Structured Attentions for Visual Question Answering,"Structured Attentions for Visual Question Answering
+Chen Zhu, Yanpeng Zhao, Shuaiyi Huang, Kewei Tu, Yi Ma
+{zhuchen, zhaoyp1, huangsy, tukw,
+ShanghaiTech University"
+f07956d0031ff046c5c719296f7916d7897fdd21,A Flexible Real-Time Control System for Autonomous Vehicles,"A Flexible Real-Time Control System for Autonomous Vehicles.
+Johannes Meyer, Armin Strobel
+Institute of Flight Systems and Automatic Control, Technische Universität Darmstadt, Germany 1"
+f0b77702c8f2249ee1f48e51ff9b86faffe177c9,Reformulating Level Sets as Deep Recurrent Neural Network Approach to Semantic Segmentation,"Reformulating Level Sets as Deep Recurrent Neural Network Approach
+to Semantic Segmentation
+Ngan Le 1 Kha Gia Quach 1 2 Khoa Luu 1 Marios Savvides 1 Chenchen Zhu 1"
+f040e4fcedca0c07788ecb6e92ad246b9c1697a9,Real-time Multiple Head Tracking Using Texture and Colour Cues,"REAL-TIME MULTIPLE HEAD TRACKING
+USING TEXTURE AND COLOUR CUES
+Vasil Khalidov Jean-Marc Odobez
+Idiap-RR-02-2017
+FEBRUARY 2017
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+f0a0f341fa1f91ee58a5020297bea02f8863cb26,Learning Deep Semantic Embeddings for Cross-Modal Retrieval,"Proceedings of Machine Learning Research 77:471–486, 2017
+ACML 2017
+Learning Deep Semantic Embeddings for Cross-Modal
+Retrieval
+Cuicui Kang
+No.89A Minzhuang Road, Beijing, China
+Shengcai Liao∗
+No.95 Zhuangguancun East Road, Beijing, China
+Zhen Li, Zigang Cao, Gang Xiong
+No.89A Minzhuang Road, Beijing, China
+Editors: Yung-Kyun Noh and Min-Ling Zhang"
+f0cc615b14c97482faa9c47eb855303c71ff03a7,Tracklet clustering for robust multiple object tracking using distance dependent Chinese restaurant processes,"SIViP
+DOI 10.1007/s11760-015-0817-x
+ORIGINAL PAPER
+Tracklet clustering for robust multiple object tracking
+using distance dependent Chinese restaurant processes
+Ibrahim Saygin Topkaya1 · Hakan Erdogan1 · Fatih Porikli2,3
+Received: 4 June 2015 / Revised: 19 August 2015 / Accepted: 10 September 2015
+© Springer-Verlag London 2015"
+f0483ebab9da2ba4ae6549b681cf31aef2bb6562,3c-gan: an Condition-context-composite Generative Adversarial Networks for Gen-,"Under review as a conference paper at ICLR 2018
+C-GAN: AN
+CONDITION-CONTEXT-COMPOSITE
+GENERATIVE ADVERSARIAL NETWORKS FOR GEN-
+ERATING IMAGES SEPARATELY
+Anonymous authors
+Paper under double-blind review"
+f04cffcd0cc68e28cf05827ab998cf84b1ab0f3d,Crowdsourced Data Preprocessing with R and Amazon Mechanical Turk,"CONTRIBUTED RESEARCH ARTICLES
+Crowdsourced Data Preprocessing with R
+nd Amazon Mechanical Turk
+y Thomas J. Leeper"
+f0b30a9bb9740c2886d96fc44d6f35b8eacab4f3,Are You Sure You Want To Do That ? Classification with Interpretable Queries,"Are You Sure You Want To Do That?
+Classification with Interpretable Queries
+Anonymous Author(s)
+Affiliation
+Address
+email"
+f736b7cf8388f20bfe9619d63d9c4ce070091863,Automated Crowd Detection in Stadium Arenas,"AUTOMATED CROWD DETECTION IN STADIUM ARENAS
+Loris Nanni, 1 Sheryl Brahnam, 2 Stefano Ghidoni, 1 Emanuele Menegatti1
+DIE, University of Padua, Via Gradenigo, 6 - 35131- Padova – Italy e-mail: {loris.nanni, ghidoni,
+CIS, Missouri State University, 901 S. National, Springfield, MO 65804, USA e-mail:"
+f73b15d33b9dcf329cf605815be7493b162b1fab,SLMotion - An extensible sign language oriented video analysis tool,"SLMotion – An extensible sign language oriented video analysis tool
+Matti Karppa∗, Ville Viitaniemi∗, Marcos Luzardo∗, Jorma Laaksonen∗, Tommi Jantunen†
+Department of Information and Computer Science,
+Aalto University School of Science, Espoo, Finland,
+Sign Language Centre, Department of Languages,
+University of Jyv¨askyl¨a, Finland,
+We present a software toolkit called SLMotion which provides a framework for automatic and semiautomatic analysis, feature extraction
+nd annotation of individual sign language videos, and which can easily be adapted to batch processing of entire sign language corpora.
+The program follows a modular design, and exposes a Numpy-compatible Python application programming interface that makes it easy
+nd convenient to extend its functionality through scripting. The program includes support for exporting the annotations in ELAN
+format. The program is released as free software, and is available for GNU/Linux and MacOS platforms."
+f79267b0f4c0110051c93f9faabe436215e4fc28,Selective Feature Connection Mechanism: Concatenating Multi-layer CNN Features with a Feature Selector,"Selective Feature Connection Mechanism:
+Concatenating Multi-layer CNN Features with a Feature Selector
+Chen Du1,2, Chunheng Wang1, Cunzhao Shi1, Baihua Xiao1
+Institute of Automation, Chinese Academy of Sciences(CASIA)
+University of Chinese Academy of Sciences(UCAS)
+{duchen2016, chunheng.wang, cunzhao.shi,"
+f74dbf3481fc3228ea821da232128b98ad5f7a60,Using low-level motion for high-level vision,"Using Low-Level Motion for
+High-Level Vision
+Ben Daubney
+A dissertation submitted to the University of Bristol in accordance with the
+requirements for the degree of Doctor of Philosophy in the Faculty of Engineering,
+Department of Computer Science.
+July 2009"
+f79c4bf83371627ba139b61eb427463b93cd687b,Learning from Few Examples for Visual Recognition Problems,"Learning from Few Examples for Visual
+Recognition Problems
+Erik Rodner
+Dissertation
+zur Erlangung des akademischen Grades
+doctor rerum naturalium (Dr. rer. nat.)
+vorgelegt dem Rat der Fakultät für Mathematik und Informatik
+der Friedrich-Schiller-Universität Jena"
+f740bac1484f2f2c70777db6d2a11cf4280081d6,Soft Locality Preserving Map (SLPM) for Facial Expression Recognition,"Soft Locality Preserving Map (SLPM) for Facial Expression
+Recognition
+Cigdem Turana,*, Kin-Man Lama, Xiangjian Heb
+Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong
+Kong Polytechnic University, Kowloon, Hong Kong
+Computer Science, School of Electrical and Data Engineering, University of Technology, Sydney,
+Australia
+E-mail addresses: (C. Turan), (K.-M. Lam),
+(X. He)"
+f79c97e7c3f9a98cf6f4a5d2431f149ffacae48f,Title On color texture normalization for active appearance models,"Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+version when available.
+Title
+On color texture normalization for active appearance models
+Author(s)
+Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+Publication
+009-05-12
+Publication
+Information
+Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+Texture Normalization for Active Appearance Models. Image
+Processing, IEEE Transactions on, 18(6), 1372-1378.
+Publisher
+Link to
+publisher's
+version
+http://dx.doi.org/10.1109/TIP.2009.2017163
+Item record
+http://hdl.handle.net/10379/1350"
+f7db1a670a99fd68dc3c6478eb9aeadc2838a897,Based Pose Invariant Face Recognition,"FEATURE BASED POSE INVARIANT FACE RECOGNITION
+Berk G¨okberk
+BS. in Computer Engineering, Bo˘gazi¸ci University, 1999
+Submitted to the Institute for Graduate Studies in
+Science and Engineering in partial fulfillment of
+the requirements for the degree of
+Master of Science
+Computer Engineering
+Bo˘gazi¸ci University"
+f7580def2dd84a6a083188aadd9c66c99925860b,Effective Use of Synthetic Data for Urban Scene Semantic Segmentation,"Effective Use of Synthetic Data for
+Urban Scene Semantic Segmentation(cid:63)
+Fatemeh Sadat Saleh1,2[0000−0002−3695−9876], Mohammad Sadegh
+Aliakbarian1,2,3[0000−0003−3948−6418], Mathieu Salzmann4[0000−0002−8347−8637],
+Lars Petersson2[0000−0002−0103−1904], and Jose M. Alvarez5[0000−0002−7535−6322]
+ANU, 2 Data61-CSIRO, 3 ACRV, 4 CVLab, EPFL, 5 NVIDIA"
+f7514435495cd76552a4de01652a08ff8c2863c7,Recognition of Emotions From Facial Expression and Situational Cues in Children with Autism,"Dissertations
+Loyola University Chicago
+Loyola eCommons
+Theses and Dissertations
+Recognition of Emotions From Facial Expression
+nd Situational Cues in Children with Autism
+Dina Tell
+Loyola University Chicago
+Recommended Citation
+Tell, Dina, ""Recognition of Emotions From Facial Expression and Situational Cues in Children with Autism"" (2009). Dissertations.
+Paper 234.
+http://ecommons.luc.edu/luc_diss/234
+This Dissertation is brought to you for free and open access by the Theses and Dissertations at Loyola eCommons. It has been accepted for inclusion in
+Dissertations by an authorized administrator of Loyola eCommons. For more information, please contact
+This work is licensed under a Creative Commons Attribution-Noncommercial-No Derivative Works 3.0 License.
+Copyright © 2009 Dina Tell"
+f755d9b2b7ef66ffdf7504b34167b95d0685c18d,Efficient Online Subspace Learning With an Indefinite Kernel for Visual Tracking and Recognition,"Efficient Online Subspace Learning With
+n Indefinite Kernel for Visual
+Tracking and Recognition
+Stephan Liwicki, Student Member, IEEE, Stefanos Zafeiriou, Member, IEEE,
+Georgios Tzimiropoulos, Member, IEEE, and Maja Pantic, Fellow, IEEE"
+f7dcadc5288653ec6764600c7c1e2b49c305dfaa,Interactive Image Search with Attributes by,"Copyright
+Adriana Ivanova Kovashka"
+f7de943aa75406fe5568fdbb08133ce0f9a765d4,Biometric Identification and Surveillance1,"Project 1.5: Human Identification at a Distance - Hornak, Adjeroh, Cukic, Gautum, & Ross
+Project 1.5
+Biometric Identification and Surveillance1
+Don Adjeroh, Bojan Cukic, Arun Ross – West Virginia University
+Year 5 Deliverable
+Technical Report:
+Research Challenges in Biometrics
+Indexed biography of relevant biometric research literature
+Donald Adjeroh, Bojan Cukic, Arun Ross
+April, 2014
+""This research was supported by the United States Department of Homeland Security through the National Center for Border Security
+nd Immigration (BORDERS) under grant number 2008-ST-061-BS0002. However, any opinions, findings, and conclusions or
+recommendations in this document are those of the authors and do not necessarily reflect views of the United States Department of
+Homeland Security."""
+f75852386e563ca580a48b18420e446be45fcf8d,Illumination Invariant Face Recognition,"ILLUMINATION INVARIANT FACE RECOGNITION
+Raghuraman Gopalan
+ENEE 631: Digital Image and Video Processing
+Instructor: Dr. K. J. Ray Liu
+Term Project - Spring 2006
+INTRODUCTION
+The performance of the Face Recognition algorithms is severely affected by two
+important factors: the change in Pose and Illumination conditions of the subjects. The
+hanges in Illumination conditions of the subjects can be so drastic that, the variation in
+lighting will be of the similar order as that of the variation due to the change in subjects
+[1] and this can result in misclassification.
+For example, in the acquisition of the face of a person from a real time video, the
+mbient conditions will cause different lighting variations on the tracked face. Some
+examples of images with different illumination conditions are shown in Fig. 1. In this
+project, we study some algorithms that are capable of performing Illumination Invariant
+Face Recognition. The performances of these algorithms were compared on the CMU-
+Illumination dataset [13], by using the entire face as the input to the algorithms. Then, a
+model of dividing the face into four regions is proposed and the performance of the
+lgorithms on these new features is analyzed."
+f79ab9baccd466d86460214c5cee9f3be0af4064,Image Segmentation of Medical Images using Automatic Fuzzy C-Mean Clustering,"IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 03, 2015 | ISSN (online): 2321-0613
+Image Segmentation of Medical Images using Automatic Fuzzy C-Mean
+Clustering
+Padmini Umorya1 Rajesh Singh2
+Research Scholar 2Assistant Professor
+,2Department of Computer Science and Engineering
+,2NITM College Gwalior, India"
+f7c9bafc66dc8d8002cbb2ea926378bce2b3b251,Emotion Detection Using EEG Signal Analysis,"International Journal of Electronics Communication and Computer Technology (IJECCT)
+Volume 5 Issue 2 (March 2015)
+Emotion Detection Using EEG Signal Analysis
+‘Review’
+K.S. Bhagat
+Assistant Professor,
+Dr. P.M. Mahajan
+Assistant Professo,
+Gunjal P. Waghulade
+M.E. IVth Semester,
+J.T. Mahajan College of ngineering,
+J.T. Mahajan College of ngineering,
+J.T. Mahajan College of ngineering,
+Faizpur, India
+Faizpur, India
+Faizpur, India"
+f7a37cf724aef23d0e714a35d54352243e5b52ee,Entire Reflective Object Surface Structure Understanding,"Q.LU ET AL.: ENTIRE REFLECTIVE OBJECT SURFACE STRUCTURE UNDERSTANDING 1
+Entire Reflective Object Surface Structure
+Understanding
+Qinglin Lu1
+Olivier Laligant1
+Eric Fauvet1
+Anastasia Zakharova2
+University of Burgundy
+Le2i UMR 6306 CNRS
+2,Rue de la Fonderie,71200,France
+INSA Rouen LMI EA3226
+Avenue de l’Université,76800,France"
+f77c9bf5beec7c975584e8087aae8d679664a1eb,Local Deep Neural Networks for Age and Gender Classification,"Local Deep Neural Networks for Age and Gender Classification
+Zukang Liao, Stavros Petridis, Maja Pantic
+March 27, 2017"
+f727b12c905ac585de60811048c9f9dd4188b498,R4-A.2: Rapid Forensic Search & Retrieval in Video Archives,"R4-A.2: Rapid Forensic Search & Retrieval in Video
+Archives"
+f7ba77d23a0eea5a3034a1833b2d2552cb42fb7a,LOTS about attacking deep features,"This is a pre-print of the original paper accepted at the International Joint Conference on Biometrics (IJCB) 2017.
+LOTS about Attacking Deep Features
+Andras Rozsa, Manuel G¨unther, and Terrance E. Boult
+Vision and Security Technology (VAST) Lab
+University of Colorado, Colorado Springs, USA"
+f727837e03a039d9bcec6d02cd87256f5a5854a4,"Deep Convolutional Neural Networks for Computer-Aided Detection: CNN Architectures, Dataset Characteristics and Transfer Learning","Deep Convolutional Neural Networks for
+Computer-Aided Detection: CNN Architectures,
+Dataset Characteristics and Transfer Learning
+Hoo-Chang Shin, Member, IEEE, Holger R. Roth, Mingchen Gao, Le Lu, Senior Member, IEEE, Ziyue Xu,
+Isabella Nogues, Jianhua Yao, Daniel Mollura, Ronald M. Summers*"
+f77b3e6b6eb4bc6d6bfeed290a1bc533bb97968a,Real Time Violence Detection in Video with ViF and Horn-Schunck,"Real Time Violence Detection in Video with ViF and
+Horn-Schunck
+Vicente Machaca Arceda Universidad Nacional de San Agustín Arequipa, Perú
+Karla Fernández Fabián Universidad Nacional de San Agustín Arequipa, Perú
+Juan Carlos Gutíerrez Universidad Nacional de San Agustín Arequipa, Perú"
+f724cbf5035e2df0dbe9a4992a0100465f5c6db5,Scalable Multicore k-NN Search via Subspace Clustering for Filtering,"Parallel Graph Partitioning for Complex Networks
+Henning Meyerhenke, Peter Sanders, and Christian Schulz"
+f77563386ac293620ce2b90b5d7250ab5d8f9f50,Regression-based Hypergraph Learning for Image Clustering and Classification,"IEEE TRANSACTIONS ON
+Regression-based Hypergraph Learning for Image
+Clustering and Classification
+Sheng Huang Student Member, IEEE, Dan Yang, Bo Liu, Xiaohong Zhang"
+f774f80fa4b5a8760084921f093730da519c6681,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+e819d8ec94ff9b07f81bcfcf6eb66301aa271805,Optimised Blurred Object Tracking Using Anfis,"VOL. 11, NO. 13, JULY 2016 ISSN 1819-6608
+ARPN Journal of Engineering and Applied Sciences
+©2006-2016 Asian Research Publishing Network (ARPN). All rights reserved.
+www.arpnjournals.com
+OPTIMISED BLURRED OBJECT TRACKING USING ANFIS
+Department of Electronics and Communication, Sathyabama University, Chennai, India
+S. Rajaprabha and M. Sugadev
+E-Mail:"
+e8686663aec64f4414eba6a0f821ab9eb9f93e38,Improving shape-based face recognition by means of a supervised discriminant Hausdorff distance,"IMPROVING SHAPE-BASED FACE RECOGNITION BY MEANS OF A SUPERVISED
+DISCRIMINANT HAUSDORFF DISTANCE
+J.L. Alba
+, A. Pujol
+, A. L´opez
+nd J.J. Villanueva
+Signal Theory and Communications Department, University of Vigo, Spain
+Centre de Visio per Computador, Universitat Autonoma de Barcelona, Spain
+Digital Pointer MVT"
+e80635b9b48df5ad263c51ecec62d7d4bd7327fd,"Keepon A Playful Robot for Research , Therapy , and Entertainment","Int J Soc Robot (2009) 1: 3–18
+DOI 10.1007/s12369-008-0009-8
+O R I G I N A L PA P E R
+Keepon
+A Playful Robot for Research, Therapy, and Entertainment
+Hideki Kozima · Marek P. Michalowski ·
+Cocoro Nakagawa
+Accepted: 28 October 2008 / Published online: 19 November 2008
+© Springer 2008"
+e8304700fd89461ec9ecf471179ad87f08f3c2f7,Chapter 1 . Learning to Learn New Models of Human Activities in Indoor Settings (,"Chapter 1
+Learning to learn new
+models of human activities in
+indoor settings1
+Introduction
+Biological cognitive systems have the great capability to recognize and in-
+terpret unknown situations. Equally, they can integrate new observations
+easily within their existing knowledge base. Autonomous artificial agents to
+large extent still lack such capacities. In this paper, we work towards this
+direction, as we do not only detect abnormal situations, but are also able to
+learn new concepts during runtime.
+We aim at the interpretation of human behavior in indoor environments.
+Possible applications go from the main IM2 scenario, i.e. analysis and un-
+derstanding of meetings, to monitoring of elderly or handicapped people in
+their homes in order to ensure their well-being. The indoor setting triggers
+interesting issues, such as the adaptation of pre-trained knowledge to a par-
+ticular room scene filmed with a different camera or to an unknown person
+with an individual behavior style, whereas real abnormalities must still be
+detected.
+One main limitation of automated surveillance approaches is their need"
+e8d898a6adcd526874e0a41840b69760506a98a1,Computer Vision Methods as an Aid to Visually Impaired Users Title: Computer Vision Methods as an Aid to Visually Impaired Users,"Dipartimento di Informatica, Bioingegneria,
+Robotica ed Ingegneria dei Sistemi
+Computer Vision methods as an aid to visually impaired users
+Giovanni Fusco
+Theses Series
+DIBRIS-TH-2013-03
+DIBRIS, Universit`a di Genova
+Via Opera Pia, 13 16145 Genova, Italy
+http://www.dibris.unige.it/"
+e8e8d8a619eea66c41a1a2bdc0a921a3b6d74836,"Restoring Degraded Face Images: A Case Study in Matching Faxed, Printed, and Scanned Photos","Restoring Degraded Face Images: A Case Study in
+Matching Faxed, Printed, and Scanned Photos
+Thirimachos Bourlai, Member, IEEE, Arun Ross, Senior Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+e8d1d2a61c5a259440ef9fcd301093b43e87efa1,Periocular Biometrics in the Visible Spectrum,"Periocular Biometrics in the Visible Spectrum
+Unsang Park, Member, IEEE, Raghavender Reddy Jillela, Student Member, IEEE, Arun Ross, Senior Member, IEEE,
+nd Anil K. Jain, Fellow, IEEE"
+e8fdacbd708feb60fd6e7843b048bf3c4387c6db,Deep Learning,"Deep Learning
+Andreas Eilschou
+Hinnerup Net A/S
+www.hinnerup.net
+July 4, 2014
+Introduction
+Deep learning is a topic in the field of artificial intelligence (AI) and is a relatively
+new research area although based on the popular artificial neural networks (supposedly
+mirroring brain function). With the development of the perceptron in the 1950s and
+960s by Frank RosenBlatt, research began on artificial neural networks. To further
+mimic the architectural depth of the brain, researchers wanted to train a deep multi-
+layer neural network – this, however, did not happen until Geoffrey Hinton in 2006
+introduced Deep Belief Networks [1].
+Recently, the topic of deep learning has gained public interest. Large web companies such
+s Google and Facebook have a focused research on AI and an ever increasing amount
+of compute power, which has led to researchers finally being able to produce results
+that are of interest to the general public. In July 2012 Google trained a deep learning
+network on YouTube videos with the remarkable result that the network learned to
+recognize humans as well as cats [6], and in January this year Google successfully used
+deep learning on Street View images to automatically recognize house numbers with"
+e8632e5bf43f7c59f4e1978833db8aa405c76c58,Saliency and Gist Features for Target Detection in Satellite Images,"Saliency and Gist Features for Target
+Detection in Satellite Images
+Zhicheng Li and Laurent Itti"
+e849b9b3e65130712e23afb872ac925e1e9a6b73,"Image denoising with multi-layer perceptrons, part 1: comparison with existing algorithms and with bounds","Journal of Machine Learning Research x (2012) xxx
+Submitted xx/xx; Published xx/xx
+Image denoising with multi-layer perceptrons, part 1:
+omparison with existing algorithms and with bounds
+Harold Christopher Burger
+Christian J. Schuler
+Stefan Harmeling
+Max Planck Institute for Intelligent Systems
+Spemannstr. 38
+72076 T¨ubingen, Germany
+Editor:"
+e810ddd9642db98492bd6a28b08a8655396c1555,Facing facts: neuronal mechanisms of face perception.,"Review
+Acta Neurobiol Exp 2008, 68: 229–252
+Facing facts: Neuronal mechanisms of face perception
+Monika Dekowska1, Michał Kuniecki2, and Piotr Jaśkowski3*
+Kazimierz Wielki University of Bydgoszcz, Poland; 2Department of Psychophysiology, Jagiellonian University,
+Kraków, Poland; 3Department of Cognitive Psychology, University of Finance and Management, Warszawa, Poland,
+*Email:
+The face is one of the most important stimuli carrying social meaning. Thanks to the fast analysis of faces, we are able to
+judge physical attractiveness and features of their owners’ personality, intentions, and mood. From one’s facial expression
+we can gain information about danger present in the environment. It is obvious that the ability to process efficiently one’s
+face is crucial for survival. Therefore, it seems natural that in the human brain there exist structures specialized for face
+processing. In this article, we present recent findings from studies on the neuronal mechanisms of face perception and
+recognition in the light of current theoretical models. Results from brain imaging (fMRI, PET) and electrophysiology (ERP,
+MEG) show that in face perception particular regions (i.e. FFA, STS, IOA, AMTG, prefrontal and orbitofrontal cortex) are
+involved. These results are confirmed by behavioral data and clinical observations as well as by animal studies. The
+developmental findings reviewed in this article lead us to suppose that the ability to analyze face-like stimuli is hard-wired
+nd improves during development. Still, experience with faces is not sufficient for an individual to become an expert in face
+perception. This thesis is supported by the investigation of individuals with developmental disabilities, especially with
+utistic spectrum disorders (ASD).
+Key words: face perception, emotion perception"
+e8b2a98f87b7b2593b4a046464c1ec63bfd13b51,CMS-RCNN: Contextual Multi-Scale Region-based CNN for Unconstrained Face Detection,"CMS-RCNN: Contextual Multi-Scale
+Region-based CNN for Unconstrained Face
+Detection
+Chenchen Zhu*, Student, IEEE, Yutong Zheng*, Student, IEEE,
+Khoa Luu, Member, IEEE, Marios Savvides, Senior Member, IEEE"
+e8ff87c9072d67dcbcd5491b1e5a0cecc2ee309d,A Survey on Gaze Estimation Techniques in Smartphone,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+Volume: 04 Issue: 04 | Apr -2017 www.irjet.net p-ISSN: 2395-0072
+A Survey on Gaze Estimation Techniques in Smartphone
+Akshay A Gawande1, Prof.Gangotri Nathaney2
+M.Tech Scholar, CSE Department, WCOEM, Nagpur, India1
+Assistant Professor, CSE Department, WCOEM, Nagpur, India2
+image dataset
+interest. Many of
+field mobile technology and digital
+The goal of this system to get correct gaze point with
+minimum of error rate and allow handicap people to
+operate mobile easily by eyes .The proposed system
+onsist of collecting some steps as: Collecting people
+different position eye
+,preprocessing,
+feature extraction, regression. This paper is organized as
+follows: Section 2 comprises Previous Work; section 3
+omprises Methodology and Conclusion is in section 4.
+---------------------------------------------------------------------***---------------------------------------------------------------------
+use eye trackers to identify what customer's gaze is"
+e8dda897372e6b4cf903234c7a9c40117711d8d8,What do you think of my picture? Investigating factors of influence in profile images context perception,"What do you think of my picture? Investigating factors
+of influence in profile images context perception
+Filippo Mazza, Matthieu Perreira da Silva, Patrick Le Callet, Ingrid
+Heynderickx
+To cite this version:
+Filippo Mazza, Matthieu Perreira da Silva, Patrick Le Callet, Ingrid Heynderickx. What do you
+think of my picture? Investigating factors of influence in profile images context perception. Human
+Vision and Electronic Imaging XX, Mar 2015, San Francisco, United States. Proc. SPIE 9394, Hu-
+man Vision and Electronic Imaging XX, 9394, <http://spie.org/EI/conferencedetails/human-vision-
+electronic-imaging>. <10.1117/12.2082817>. <hal-01149535>
+HAL Id: hal-01149535
+https://hal.archives-ouvertes.fr/hal-01149535
+Submitted on 7 May 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est"
+e8d2d991dcfb12b287ab06d282a86802e565780c,Inducing Behavior Change in Children with Autism Spectrum Disorders by Monitoring their Attention,"Inducing behavior change in children with autism spectrum disorders by
+monitoring their attention
+Margarida Lucas da Silva12, Hugo Silva3 and Daniel Gonc¸alves12
+Instituto Superior T´ecnico, Av. Rovisco Pais, 1, 1049-001, Lisboa, Portugal
+INESC-ID, R. Alves Redol, 9, 1000-029 Lisboa, Portugal
+Instituto de Telecomunicac¸ ˜oes, Instituto Superior T´ecnico, Av. Rovisco Pais, 1, Torre Norte - Piso 10, 1049-001, Lisboa,
+Portugal
+Keywords:
+Human Behavior Analysis, Autism Spectrum Disorders, Inducing Behavior Change."
+e84e49c9530897fad7927a06ac4a48ddaf0adf0f,Searching for Efficient Multi-Scale Architectures for Dense Image Prediction,"Searching for Efficient Multi-Scale
+Architectures for Dense Image Prediction
+Liang-Chieh Chen Maxwell D. Collins
+Barret Zoph
+Florian Schroff
+Yukun Zhu
+Hartwig Adam
+George Papandreou
+Jonathon Shlens
+Google Inc."
+e8af37ac6e0a5b7f04b6824bb1f74e4f363b99b5,On the replication of CycleGAN,"Bachelor thesis
+Computer Science
+Radboud University
+On the replication of CycleGAN
+Author:
+Robin Elbers
+s4225678
+First supervisor/assessor:
+MSc. Jacopo Acquarelli
+Second assessor:
+Prof. Tom Heskes
+August 10, 2018"
+e8e8f40ceff8b71d5dafa6b680d40690dfae940c,title : Guidelines for studying developmental prosopagnosia in adults and children,"Article type: Focus Article
+Article title: Guidelines for studying developmental prosopagnosia in adults
+nd children
+First author: Full name and affiliation; plus email address if
+orresponding author
+Kirsten A. Dalrymple*
+Institute of Child Development, University of Minnesota, Minneapolis, USA
+Second author: Full name and affiliation; plus email address if
+orresponding author
+Romina Palermo*
+School of Psychology, and ARC Centre of Excellence in Cognition and its Disorders
+University of Western Australia, Crawley, Australia
+Please note that both authors would like to be listed as “corresponding authors”."
+e819a577c57c83a133a0a0e81180d14dc13b82e9,Pyramid Histogram of Oriented Gradients based Human Ear Identification,"Pyramid Histogram of Oriented Gradients based Human Ear Identification
+Pyramid Histogram of Oriented Gradients based Human
+Ear Identification
+Partha Pratim Sarangi1, B.S.P. Mishra1 and Sachidanada Dehuri2
+School of Computer Engineering KIIT University, Bhubaneswar , Emails:
+Department of ICT FM University, Balasore, Email:"
+e8d1b134d48eb0928bc999923a4e092537e106f6,Weighted Multi-region Convolutional Neural Network for Action Recognition with Low-latency Online Prediction,"WEIGHTED MULTI-REGION CONVOLUTIONAL NEURAL NETWORK FOR ACTION
+RECOGNITION WITH LOW-LATENCY ONLINE PREDICTION
+Yunfeng Wang(cid:63), Wengang Zhou(cid:63), Qilin Zhang†, Xiaotian Zhu(cid:63), Houqiang Li(cid:63)
+(cid:63)University of Science and Technology of China, Hefei, Anhui, China
+HERE Technologies, Chicago, Illinois, USA"
+e855856d4b61b6a732005418f543c49195cb1542,Novel Method for Eyeglasses Detection in Frontal Face Images,"Novel Method for Eyeglasses Detection in Frontal
+Face Images
+R. L. Parente, L. V. Batista
+Centro de Inform´atica - CI
+Universidade Federal da Para´ıba - UFPB
+Jo˜ao Pessoa, Brazil
+I. Andreza, E. Borges, R. Marques
+VSoft Research Group
+VSoft Technology
+Jo˜ao Pessoa, Brazil
+{igorlpa90, erickvagnerr,"
+e8039e1531dd86da960be26d59718d2452f9943b,Scene Parsing and Fusion-Based Continuous Traversable Region Formation,"Scene parsing and fusion-based continuous
+traversable region formation
+Xuhong Xiao, Gee Wah Ng, Yuan Sin Tan, Yeo Ye Chuan
+0 Science Park Drive, DSO national Laboratories, Singapore 118230"
+e8c6c3fc9b52dffb15fe115702c6f159d955d308,Linear Subspace Learning for Facial Expression Analysis,"Linear Subspace Learning for
+Facial Expression Analysis
+Caifeng Shan
+Philips Research
+The Netherlands
+. Introduction
+Facial expression, resulting from movements of the facial muscles, is one of the most
+powerful, natural, and immediate means for human beings to communicate their emotions
+nd intentions. Some examples of facial expressions are shown in Fig. 1. Darwin (1872) was
+the first to describe in detail the specific facial expressions associated with emotions in
+nimals and humans; he argued that all mammals show emotions reliably in their faces.
+Psychological studies (Mehrabian, 1968; Ambady & Rosenthal, 1992) indicate that facial
+expressions, with other non-verbal cues, play a major and fundamental role in face-to-face
+ommunication.
+Fig. 1. Facial expressions of George W. Bush.
+Machine analysis of facial expressions, enabling computers to analyze and interpret facial
+expressions as humans do, has many important applications including intelligent human-
+omputer interaction, computer animation, surveillance and security, medical diagnosis,
+law enforcement, and awareness system (Shan, 2007). Driven by its potential applications
+nd theoretical interests of cognitive and psychological scientists, automatic facial"
+e8691980eeb827b10cdfb4cc402b3f43f020bc6a,Segmentation Guided Attention Networks for Visual Question Answering,"Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics- Student Research Workshop, pages 43–48
+Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics- Student Research Workshop, pages 43–48
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+Vancouver, Canada, July 30 - August 4, 2017. c(cid:13)2017 Association for Computational Linguistics
+https://doi.org/10.18653/v1/P17-3008
+https://doi.org/10.18653/v1/P17-3008"
+e8baf6ddd2e651350b843fedfe58f761848d3524,Design And Implementation Of Multiposes Face Recognization System,"Pritika V.Mamankar et al, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.4, April- 2015, pg. 387-394
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+IJCSMC, Vol. 4, Issue. 4, April 2015, pg.387 – 394
+RESEARCH ARTICLE
+ISSN 2320–088X
+Design And Implementation Of Multiposes Face
+Recognization System
+Ms. Pritika V.Mamankar
+Master of Engineering Scholar, Information Technology Department, Sipna College of Engg. and Technology, Amravati, India
+Assistant Professor of CSE Department, Computer Science and Engineering Department, Sipna College of Engg. and
+Prof. H R. Vyawahare
+Technology, Amravati, India"
+e8867f819f39c1838bba7d446934258035d4101c,Face recognition performance with superresolution.,"Face recognition performance with superresolution
+Shuowen Hu,1,* Robert Maschal,1 S. Susan Young,1 Tsai Hong Hong,2
+nd P. Jonathon Phillips2
+United States Army Research Laboratory, 2800 Powder Mill Road, Adelphi, Maryland 20783, USA
+NIST, 100 Bureau Drive, Gaithersburg, Maryland 20899, USA
+*Corresponding author:
+Received 29 September 2011; revised 19 April 2012; accepted 24 April 2012;
+posted 30 April 2012 (Doc. ID 155384); published 20 June 2012
+With the prevalence of surveillance systems, face recognition is crucial to aiding the law enforcement com-
+munity and homeland security in identifying suspects and suspicious individuals on watch lists. However,
+face recognition performance is severely affected by the low face resolution of individuals in typical sur-
+veillance footage, oftentimes due to the distance of individuals from the cameras as well as the small pixel
+ount of low-cost surveillance systems. Superresolution image reconstruction has the potential to improve
+face recognition performance by using a sequence of low-resolution images of an individual’s face in the
+same pose to reconstruct a more detailed high-resolution facial image. This work conducts an extensive
+performance evaluation of superresolution for a face recognition algorithm using a methodology and ex-
+perimental setup consistent with real world settings at multiple subject-to-camera distances. Results show
+that superresolution image reconstruction improves face recognition performance considerably at the
+examined midrange and close range.
+OCIS codes:"
+e8f753208fc354fa9aeb3fa9c6acb3d45e7eac7b,Definite Description Lexical Choice: taking Speaker's Personality into account,"Definite Description Lexical Choice:
+taking Speaker’s Personality into account
+Alex Gwo Jen Lan, Ivandr´e Paraboni
+University of S˜ao Paulo, School of Arts, Sciences and Humanities
+S˜ao Paulo, Brazil"
+facdb71e8175c33ec54c2248fa6cfc319e27cfa5,Accelerating Machine Learning Research with MI-Prometheus,"Accelerating Machine Learning Research with
+MI-Prometheus
+Tomasz Kornuta Vincent Marois Ryan L. McAvoy Younes Bouhadjar
+Alexis Asseman
+Vincent Albouy
+IBM Research AI, Almaden Research Center, San Jose, USA
+T.S. Jayram Ahmet S. Ozcan
+{tkornut, vmarois, mcavoy, byounes, jayram,
+{alexis.asseman,"
+fab7f1af3d67c7b7cf76ec1d8dfcb265da61a572,Towards Recommender Systems for Police Photo Lineup,"Towards Recommender Systems for Police Photo Lineup
+Ladislav Peska
+Department of Software Engineering
+Hana Trojanova
+Department of Psychology
+Faculty of Mathematics and Physics, Charles University, Prague
+Faculty of Arts, Charles University, Prague
+Czech Republic
+Czech Republic"
+facf25e1880d23eb993d4ad507256ebbc7e0d82d,CURE-OR: Challenging Unreal and Real Environments for Object Recognition,"Citation D. Temel, J. Lee, and G. AlRegib, “CURE-OR: Challenging unreal and real environments
+for object recognition,” 2018 17th IEEE International Conference on Machine Learning
+nd Applications (ICMLA), Orlando, Florida, USA, 2018.
+Dataset
+https://ghassanalregib.com/cure-or/
+ICMLA,
+uthor={D. Temel and J. Lee and G. AlRegib},
+ooktitle={2018 17th IEEE International Conference on Machine Learning and Applications
+(ICMLA)},
+title={CURE-OR: Challenging unreal and real environments for object recognition},
+year=2018,}
+Copyright c(cid:13)2018 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing
+this material for advertising or promotional purposes, creating new collective works, for
+resale or redistribution to servers or lists, or reuse of any copyrighted component of this
+work in other works.
+Contact
+https://ghassanalregib.com/
+http://cantemel.com/"
+fa1b849697115ceede0a08ac552ea25ce2bf33a1,A N Approach to F Ace R Ecognition of 2 - D Images Using E Igen F Aces and Pca,"Signal & Image Processing : An International Journal (SIPIJ) Vol.3, No.2, April 2012
+AN APPROACH TO FACE RECOGNITION OF 2-D
+IMAGES USING EIGEN FACES AND PCA
+Annapurna Mishra1, Monorama Swain2 and Bodhisattva Dash3
+Department of Electronics & Telecommunication Engineering
+Silicon Institute of Technology, Bhubaneswar, India"
+fa11590fea86049fff1eb412642753422738c584,Depression-related difficulties disengaging from negative faces are associated with sustained attention to negative feedback during social evaluation and predict stress recovery,"RESEARCH ARTICLE
+Depression-related difficulties disengaging
+from negative faces are associated with
+sustained attention to negative feedback
+during social evaluation and predict stress
+recovery
+Alvaro Sanchez*, Nuria Romero, Rudi De Raedt
+Department of Experimental Clinical and Health Psychology, Ghent University, Ghent, Belgium"
+fab83bf8d7cab8fe069796b33d2a6bd70c8cefc6,Draft: Evaluation Guidelines for Gender Classification and Age Estimation,"Draft: Evaluation Guidelines for Gender
+Classification and Age Estimation
+Tobias Gehrig, Matthias Steiner, Hazım Kemal Ekenel
+{tobias.gehrig,
+July 1, 2011
+Introduction
+In previous research on gender classification and age estimation did not use a
+standardised evaluation procedure. This makes comparison the different ap-
+proaches dif‌f‌icult.
+Thus we propose here a benchmarking and evaluation protocol for gender
+lassification as well as age estimation to set a common ground for future re-
+search in these two areas.
+The evaluations are designed such that there is one scenario under controlled
+labratory conditions and one under uncontrolled real life conditions.
+The datasets were selected with the criteria of being publicly available for
+research purposes.
+File lists for the folds corresponding to the individual benchmarking proto-
+ols will be provided over our website at http://face.cs.kit.edu/befit. We
+will provide two kinds of folds for each of the tasks and conditions: one set of
+folds using the whole dataset and one set of folds using a reduced dataset, which"
+fa23122db319440fb5a7253e19709f992b4571b9,Human Age Estimation via Geometric and Textural Features,"HUMAN AGE ESTIMATION VIA GEOMETRIC
+AND TEXTURAL FEATURES
+Merve Kilinc1 and Yusuf Sinan Akgul2
+TUBITAK BILGEM UEKAE, Anibal Street, 41470, Gebze, Kocaeli, Turkey
+GIT Vision Lab∗, Department of Computer Engineering, Gebze Institute of Technology, 41400, Kocaeli, Turkey
+Keywords:
+Age Estimation, Age Classification, Geometric Features, LBP, Gabor, LGBP, Cross Ratio, FGNET, MORPH."
+fa4ff855ca125b986bcb2bc6b71bef2ae8fde1cf,"3d Integral Invariant Signatures and Their Application on Face Recognition Dedication I Am Grateful for the Support and Guidance I Have Received from Dr. Irina A. Kogan, and I Also Express My Gratitude To",
+fa08a4da5f2fa39632d90ce3a2e1688d147ece61,Supplementary material for “ Unsupervised Creation of Parameterized Avatars ” 1 Summary of Notations,"Supplementary material for
+“Unsupervised Creation of Parameterized Avatars”
+Summary of Notations
+Tab. 1 itemizes the symbols used in the submission. Fig. 2,3,4 of the main text illustrate many of these
+symbols.
+DANN results
+Fig. 1 shows side by side samples of the original image and the emoji generated by the method of [1].
+As can be seen, these results do not preserve the identity very well, despite considerable effort invested in
+finding suitable architectures.
+Multiple Images Per Person
+Following [4], we evaluate the visual quality that is obtained per person and not just per image, by testing
+TOS on the Facescrub dataset [3]. For each person p, we considered the set of their images Xp, and selected
+the emoji that was most similar to their source image, i.e., the one for which:
+||f (x) − f (e(c(G(x))))||.
+rgmin
+Fig. 2 depicts the results obtained by this selection method on sample images form the Facescrub dataset
+(it is an extension of Fig. 7 of the main text). The figure also shows, for comparison, the DTN [4] result for
+the same image.
+Detailed Architecture of the Various Networks
+In this section we describe the architectures of the networks used in for the emoji and avatar experiments."
+fa83597bf71dbeb606bca6593bcef8ecd51e8661,Michael Kamaraj and G. Balakrishnan: Multiple Target Tracking Using Cost Minimization Techniques,"MICHAEL KAMARAJ AND G. BALAKRISHNAN: MULTIPLE TARGET TRACKING USING COST MINIMIZATION TECHNIQUES
+MULTIPLE TARGET TRACKING USING COST MINIMIZATION TECHNIQUES
+Department of Computer Applications, Pavendar Bharathidasan College of Engineering and Technology, India
+Department of Computer Science and Engineering, Indra Ganesan College of Engineering, India
+Michael Kamaraj1 and G. Balakrishnan2"
+fa2603efaf717974c77162c93d800defae61a129,Face recognition/detection by probabilistic decision-based neural network,"Face Recognition/Detection by Probabilistic
+Decision-Based Neural Network
+Shang-Hung Lin, Sun-Yuan Kung, Fellow, IEEE, and Long-Ji Lin"
+fac36fa1b809b71756c259f2c5db20add0cb0da0,Transferring GANs: Generating Images from Limited Data,"Transferring GANs: generating images from
+limited data
+Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer,
+Abel Gonzalez-Garcia, Bogdan Raducanu
+{yaxing, chenshen, lherranz, joost, agonzgarc,
+Computer Vision Center
+Universitat Aut`onoma de Barcelona, Spain"
+faf40ce28857aedf183e193486f5b4b0a8c478a2,Automated Human Identification Using Ear Imaging,"Imperial Journal of Interdisciplinary Research (IJIR)
+Vol.2, Issue-1 , 2016
+ISSN : 2454-1362 , www.onlinejournal.in
+Automated Human Identification Using Ear Imaging
+Priya Thakare
+SITS.Narhe
+Abhijit Patil
+SITS, Narhe.
+Priya More
+SITS, Narhe.
+Vivek Patil
+SITS, Narhe.
+Akshay Shende
+SITS, Narhe.
+Reliability
+in human authentication
+from airport surveillance
+important aspect for the security requirements in various
+pplications ranging
+electronic banking. Many physical characteristics of"
+fa24bf887d3b3f6f58f8305dcd076f0ccc30272a,Interval Insensitive Loss for Ordinal Classification,"JMLR: Workshop and Conference Proceedings 39:189–204, 2014
+ACML 2014
+Interval Insensitive Loss for Ordinal Classification
+Kostiantyn Antoniuk
+Vojtˇech Franc
+V´aclav Hlav´aˇc
+Center for Machine Perception, Department of Cybernetics, Faculty of Electrical Engineering, Czech
+Technical University in Prague, Technick´a 2, 166 27 Prague 6 Czech Republic
+Editor: Dinh Phung and Hang Li"
+fa8c73899c22b461cc062a10b6df20fccb18800c,A Novel Framework for Face Recognition in Real-Time Environments,"International Journal of Scientific and Research Publications, Volume 3, Issue 8, August 2013
+ISSN 2250-3153
+A Novel Framework for Face Recognition in Real-Time
+Environments
+Tmt.Maithili Easwaran*, Dr.B.Poorna**
+*Department of Computer Applications, S.A.Engineering College, TN, INDIA
+** Department of Computer applications, Shankarlal Sundarbai Shasun Jain College for Women, TN, INDIA
+i.e.,
+(PCA)-based"
+fafe69a00565895c7d57ad09ef44ce9ddd5a6caa,Gaussian Mixture Models for Human Face Recognition under Illumination Variations,"Applied Mathematics, 2012, 3, 2071-2079
+http://dx.doi.org/10.4236/am.2012.312A286 Published Online December 2012 (http://www.SciRP.org/journal/am)
+Gaussian Mixture Models for Human Face Recognition
+under Illumination Variations
+Information Systems and Decision Sciences Department, Mihaylo College of Business and Economics,
+California State University, Fullerton, USA
+Email:
+Sinjini Mitra
+Received August 18, 2012; revised September 18, 2012; accepted September 25, 2012"
+fab6e12a913223b69e1b9f0672df6c89275b1ed0,Initial Development of a Learners’ Ratified Acceptance of Multibiometrics Intentions Model (RAMIM),"Interdisciplinary Journal of E-Learning and Learning Objects
+IJELLO special series of Chais Conference 2009 best papers
+Volume 5, 2009
+Initial Development of a Learners’ Ratified
+Acceptance of Multibiometrics Intentions Model
+(RAMIM)
+Yair Levy
+GSCIS,
+Nova Southeastern University,
+Ft. Lauderdale, FL, USA
+Michelle M. Ramim
+Nova Southeastern University,
+Huizenga School of Business,
+Ft. Lauderdale, FL, USA"
+fab0d19c58815eccb0db7215fe45d6a32066ca1c,Inferring Human Attention by Learning Latent Intentions,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+the mug's statuschecking the book's statuslocating the dispenserFigure1:Humanattentionandintentionsina3Dscene.thedispenser,hisattentionsweepsfromthetabletothedis-penser;whilefetchingwaterfromthedispenser,hisintentionistocheckifthemugisfullandhisattentionsteadilyfocusesonthemug.Thedrivingrulesofintentionsactingonattentioncanbeindependentofactivitycategories.Forexample,inFigure1,theattentiondrivenbytheintentioncheckingstatusalwayspresentsassteadilyfocusing,evenindifferentactivities.Thisphenomenonmakesitpossibletoinfertheattentionwiththesamerulesacrossdifferentactivities.However,thesedrivingrulesarehiddenandshouldbelearnedfromdata.Thispaperproposesaprobabilisticmethodtoinfer3Dhu-manattentionbyjointlymodelingattention,intentions,andtheirinteractions.Theattentionandintentionarerepresent-edwithfeaturesextractedfromhumanskeletonsandscenevoxels.Humanintentionsaretakenaslatentvariableswhichguidethemotionsandformsofhumanattention.Conversely,thehumanattentionrevealstheintentionfeatures.Attentioninferenceismodeledasajointoptimizationwithlatenthu-manintentions.WeadoptanEM-based[Bishop,2006]approachtolearnthemodelparametersandminethelatentintentions.Giv-enanRGB-DvideowithhumanskeletonscapturedbytheKinectcamera,ajoint-statedynamicprogrammingalgorithm"
+faa111d749eb228c686643e4667dd1bc21c724f2,Condensed from Video Sequences for Place Recognition,"Boosting Descriptors Condensed from Video Sequences for Place Recognition
+Tat-Jun Chin, Hanlin Goh and Joo-Hwee Lim
+Institute for Infocomm Research
+1 Heng Mui Keng Terrace, Singapore 119613.
+{tjchin, hlgoh,"
+faca1c97ac2df9d972c0766a296efcf101aaf969,Sympathy for the Details: Dense Trajectories and Hybrid Classification Architectures for Action Recognition,"Sympathy for the Details: Dense Trajectories and Hybrid
+Classification Architectures for Action Recognition
+C´esar Roberto de Souza1,2, Adrien Gaidon1, Eleonora Vig3, Antonio Manuel L´opez2
+Computer Vision Group, Xerox Research Center Europe, Meylan, France
+Centre de Visi´o per Computador, Universitat Aut`onoma de Barcelona, Bellaterra, Spain
+German Aerospace Center, Wessling, Germany
+{cesar.desouza,"
+fa60521dabd2b64137392b4885e4d989f4b86430,Physics-Based Generative Adversarial Models for Image Restoration and Beyond,"Physics-Based Generative Adversarial Models
+for Image Restoration and Beyond
+Jinshan Pan, Yang Liu, Jiangxin Dong, Jiawei Zhang,
+Jimmy Ren, Jinhui Tang, Yu-Wing Tai and Ming-Hsuan Yang"
+fabbc7f921d77b5aa9157310df29ad81367fe92d,Title of Dissertation : EFFICIENT IMAGE AND VIDEO REPRESENTATIONS FOR RETRIEVAL,
+fa9f1b236d0a252d4a56e26e8a9a41d496803413,Face Recognition Method with Two-Dimensional HMM,"FACE RECOGNITION METHOD WITH
+TWO-DIMENSIONAL HMM
+Janusz Bobulski1
+Czestochowa University of Technology
+Institute of Computer and Information Science
+Dabrowskiego Street 73, 42-200 Czestochowa, Poland."
+fa24a04f1e8095d47e2d2ce0076bf47bdd6f997a,Wavelet Based Face Recognition for Low Quality Images,"International Journal of Advanced Research in Electrical, Electronics and Instrumentation Engineering
+Vol. 2, Issue 1, January 2013
+Wavelet Based Face Recognition for Low
+ISSN: 2278 – 8875
+Quality Images
+M.Karthika, 2K.Shanmugapriya, 3Dr.S.Valarmathy, 4M.Arunkumar
+PG Scholar, Department of ECE, Bannari Amman Institute of Technology, Sathyamangalam, Tamilnadu,India
+PG Scholar, Department of ECE, Bannari Amman Institute of Technology, Sathyamangalam, Tamilnadu,India
+Professor and Head, Department of ECE, Bannari Amman Institute of Technology, Sathyamangalam, Tamilnadu, India
+Assistant Professor, Department of ECE, Bannari Amman Institute of Technology, Sathyamangalam, Tamilnadu, India"
+fab60b3db164327be8588bce6ce5e45d5b882db6,Maximum A Posteriori Estimation of Distances Between Deep Features in Still-to-Video Face Recognition,"Maximum A Posteriori Estimation of Distances
+Between Deep Features in Still-to-Video Face
+Recognition
+Andrey V. Savchenko
+National Research University Higher School of Economics
+Laboratory of Algorithms and Technologies for Network Analysis,
+6 Rodionova St., Nizhny Novgorod, Russia
+Natalya S. Belova
+National Research University Higher School of Economics
+0 Myasnitskaya St., Moscow, Russia
+September 2, 2018"
+fad895771260048f58d12158a4d4d6d0623f4158,Audio-visual emotion recognition for natural human-robot interaction,"Audio-Visual Emotion
+Recognition For Natural
+Human-Robot Interaction
+Dissertation zur Erlangung des akademischen Grades
+Doktor der Ingenieurwissenschaften (Dr.-Ing.)
+vorgelegt von
+Ahmad Rabie
+n der Technischen Fakultät der Universität Bielefeld
+5. März 2010"
+fac0151ed0494caf10c7d778059f176ba374e29c,Recognising Complex Mental States from Naturalistic Human-Computer Interactions,"Copyright and use of this thesis
+This thesis must be used in accordance with the
+provisions of the Copyright Act 1968.
+Reproduction of material protected by copyright
+may be an infringement of copyright and
+opyright owners may be entitled to take
+legal action against persons who infringe their
+opyright.
+Section 51 (2) of the Copyright Act permits
+n authorized officer of a university library or
+rchives to provide a copy (by communication
+or otherwise) of an unpublished thesis kept in
+the library or archives, to a person who satisfies
+the authorized officer that he or she requires
+the reproduction for the purposes of research
+or study.
+The Copyright Act grants the creator of a work
+number of moral rights, specifically the right of
+ttribution, the right against false attribution and
+the right of integrity."
+fae4185a5fc540b057ea9e0402223e653327d0f9,Structured Edge Detection for Improved Object Localization using the Discriminative Generalized Hough Transform,
+ff8315c1a0587563510195356c9153729b533c5b,Zapping Index:Using Smile to Measure Advertisement Zapping Likelihood,"Zapping Index:Using Smile to Measure
+Advertisement Zapping Likelihood
+Songfan Yang, Member, IEEE, Mehran Kafai, Member, IEEE,
+Le An, Student Member, IEEE, and Bir Bhanu, Fellow, IEEE"
+ff2e25cb67209de8ae922abdfc31f922b130276e,Information Granulation and Pattern Recognition,"Chapter 25
+Information Granulation and Pattern Recognition
+Andrzej Skowron,1 Roman W. Swiniarski2
+Institute of Mathematics, Warsaw University, Banacha 2, 02-097 Warsaw, Poland
+San Diego State University, Department of Mathematical and Computer Sciences, 5500
+Campanile Drive, San Diego, CA 92182, USA
+Summary. We discuss information granulation applications in pattern recognition. The chap-
+ter consists of two parts. In the first part, we present applications of rough set methods for
+feature selection in pattern recognition. We emphasize the role of different forms of reducts
+that are the basic constructs of the rough set approach in feature selection. In the overview
+of methods for feature selection, we discuss feature selection criteria based on the rough set
+pproach and the relationships between them and other existing criteria. Our algorithm for
+feature selection used in the application reported is based on an application of the rough set
+method to the result of principal component analysis used for feature projection and reduc-
+tion. Finally, the first part presents numerical results of face recognition experiments using a
+neural network, with feature selection based on proposed principal component analysis and
+rough set methods. The second part consists of an outline of an approach to pattern recog-
+nition with the application of background knowledge specified in natural language. The ap-
+proach is based on constructing approximations of reasoning schemes. Such approximations
+re called approximate reasoning schemes and rough neural networks."
+ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a,Towards Video Captioning with Naming: A Novel Dataset and a Multi-modal Approach,"Towards Video Captioning with Naming: a
+Novel Dataset and a Multi-Modal Approach
+Stefano Pini, Marcella Cornia, Lorenzo Baraldi, Rita Cucchiara
+Dipartimento di Ingegneria “Enzo Ferrari”
+Universit`a degli Studi di Modena e Reggio Emilia"
+fffefc1fb840da63e17428fd5de6e79feb726894,Fine-Grained Age Estimation in the wild with Attention LSTM Networks,"Fine-Grained Age Estimation in the wild with
+Attention LSTM Networks
+Ke Zhang, Member, IEEE, Na Liu, Xingfang Yuan, Student Member, IEEE, Xinyao Guo, Ce Gao,
+nd Zhenbing Zhao Member, IEEE,"
+ff398e7b6584d9a692e70c2170b4eecaddd78357,Title of dissertation : FACE RECOGNITION AND VERIFICATION IN UNCONSTRAINED ENVIRIONMENTS,
+ff70cfaf3e085a6c32bfa7ebedb98adfb7658210,TABULA RASA Trusted Biometrics under Spoofing Attacks,"TABULA RASA
+Trusted Biometrics under Spoofing Attacks
+http://www.tabularasa-euproject.org/
+Funded under the 7th FP (Seventh Framework Programme)
+[Trustworthy Information and Communication Technologies]
+Theme ICT-2009.1.4
+D3.2: Evaluation of baseline non-ICAO
+iometric systems
+Due date: 30/09/2011
+Project start date: 01/11/2010 Duration: 42 months
+WP Manager: Abdenour Hadid Revision: 0
+Submission date: 30/09/2011
+Author(s): Federico Alegre, Xuran Zhao, Nick Evans (EURECOM);
+John Bustard, Mark Nixon (USOU); Abdenour Hadid (UOULU); William
+Ketchantang, Sylvaine Picard, St´ephane Revelin (MORPHO); Ale-
+jandro Riera, Aureli Soria-Frisch (STARLAB); Gian Luca Marcialis
+(UNICA)
+Project funded by the European Commission
+in the 7th Framework Programme (2008-2010)
+Dissemination Level"
+ffd81d784549ee51a9b0b7b8aaf20d5581031b74,Performance Analysis of Retina and DoG Filtering Applied to Face Images for Training Correlation Filters,"Performance Analysis of Retina and DoG
+Filtering Applied to Face Images for Training
+Correlation Filters
+Everardo Santiago Ram(cid:19)(cid:16)rez1, Jos(cid:19)e (cid:19)Angel Gonz(cid:19)alez Fraga1, Omar (cid:19)Alvarez
+Xochihua1, Everardo Gutierrez L(cid:19)opez1, and Sergio Omar Infante Prieto2
+Facultad de Ciencias, Universidad Aut(cid:19)onoma de Baja California,
+Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia Playitas,
+Ensenada, Baja California, C.P. 22860
+{everardo.santiagoramirez,angel_fraga,
+Facultad de Ingenier(cid:19)(cid:16)a, Arquitectura y Dise~no, Universidad Aut(cid:19)onoma de Baja
+California, Carretera Transpeninsular Tijuana-Ensenada, N(cid:19)um. 3917, Colonia
+Playitas, Ensenada, Baja California, C.P. 22860"
+fff854b3d8f8e916162dc5451cf6f46caf50002b,Multi-task Learning for Universal Sentence Embeddings: A Thorough Evaluation using Transfer and Auxiliary Tasks,"Multi-task Learning for Universal Sentence Embeddings: A Thorough
+Evaluation using Transfer and Auxiliary Tasks
+Wasi Uddin Ahmad†, Xueying Bai∗, Zhechao Huang§, Chao Jiang∗, Nanyun Peng(cid:63), Kai-Wei Chang†
+§Fudan University, ∗University of Virginia
+(cid:63)University of Southern California, †University of California, Los Angeles"
+ffdaa12d37c720561f74d23fc3b5d47afa268000,Pose Proposal Networks,"Pose Proposal Networks
+Taiki Sekii[0000−0002−1895−3075]
+Konica Minolta, Inc."
+ff4e8a8333e4ef506318160248c068250963806d,Gender recognition from face images using texture descriptors for human computer interaction,"www.jchps.com Journal of Chemical and Pharmaceutical Sciences
+Gender recognition from face images using texture descriptors
+ISSN: 0974-2115
+for human computer interaction
+M.Annalakshmi1*, S.M.M.Roomi2, and S.S.Priya1
+&3Department of Electronics and Communication Engineering, Sethu Institute of Technology, Pulloor, Kariapatti
+Department of Electronics and Communication Engineering, Thiagarajar College of Engineering, Madurai 625
+– 626 115, Virudhunagar – District, Tamilnadu, India.
+*Corresponding author: E-Mail:
+015, Tamilnadu, India"
+ff01bc3f49130d436fca24b987b7e3beedfa404d,Fuzzy System-Based Face Detection Robust to In-Plane Rotation Based on Symmetrical Characteristics of a Face,"Article
+Fuzzy System-Based Face Detection Robust to
+In-Plane Rotation Based on Symmetrical
+Characteristics of a Face
+Hyung Gil Hong, Won Oh Lee, Yeong Gon Kim, Ki Wan Kim, Dat Tien Nguyen and
+Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (H.G.H.); (W.O.L.); (Y.G.K.);
+(K.W.K.); (D.T.N.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Academic Editor: Angel Garrido
+Received: 15 June 2016; Accepted: 29 July 2016; Published: 3 August 2016"
+ffd73d1956163a4160ec2c96b3ab256f79fc92e8,Attributes as Semantic Units between Natural Language and Visual Recognition,"Attributes as Semantic Units between
+Natural Language and Visual Recognition
+Marcus Rohrbach"
+ffc06713436afc4e08bf4afa401ac52db674c5da,Neural Adaptive Content-aware Internet Video Delivery,"Neural Adaptive Content-aware
+Internet Video Delivery
+Hyunho Yeo, Youngmok Jung, Jaehong Kim, Jinwoo Shin, and Dongsu Han, KAIST
+https://www.usenix.org/conference/osdi18/presentation/yeo
+This paper is included in the Proceedings of the 13th USENIX Symposium on Operating Systems Design and Implementation (OSDI ’18).October 8–10, 2018 • Carlsbad, CA, USAISBN 978-1-931971-47-8Open access to the Proceedings of the 13th USENIX Symposium on Operating Systems Design and Implementation is sponsored by USENIX."
+ff269353b4e49274ff85dfb98b531888c98da365,Master : a Mobile Autonomous Scientist for Terretrial and Extra-terrestrial Research,"MASTER: A MOBILE AUTONOMOUS SCIENTIST FOR TERRETRIAL AND EXTRA-
+TERRESTRIAL RESEARCH
+Iain Wallace (1), Mark Woods (2)
+(1) SCISYS, 23 Clothier Road, Bristol, BS4 5SS, UK, Email:
+(2) SCISYS, 23 Clothier Road, Bristol, BS4 5SS, UK, Email:
+paper
+includes
+utonomy. The
+INTRODUCTION"
+ff3fa31882bb9c7573a38c7d0883503a464522a6,Imcube @ MediaEval 2015 Placing Task: Hierarchical Approach for Geo-referencing Large-Scale Datasets,"Imcube MediaEval 2015 Placing Task: A Hierarchical
+Approach for Geo-referencing Large-Scale Datasets
+Pascal Kelm, Sebastian Schmiedeke, and Lutz Goldmann
+{kelm, schmiedeke,
+Imcube Labs GmbH
+Berlin, Germany"
+fff12919cf912347776b70aa76af7635280dc401,Are object detection assessment criteria ready for maritime computer vision?,"Are object detection assessment criteria ready
+for maritime computer vision?
+Dilip K. Prasad1,∗, Deepu Rajan2, and Chai Quek2"
+ffcb92719dcd993dda292ca82d4585950ea22ac9,Handwritten Digit Recognition Using Convolutional Neural Networks,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer
+nd Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol. 4, Issue 2, February 2016
+Handwritten Digit Recognition Using
+Convolutional Neural Networks
+Haider A. Alwzwazy1, Hayder M. Albehadili2, Younes S. Alwan3, Naz E. Islam4
+M.E Student, Dept. of Electrical and Computer Eng. University of Missouri-Columbia, MO, USA1,2,3
+Professor, Dept. of Electrical and Computer Eng. University of Missouri-Columbia, MO, USA4"
+ff7de2ea4d21e7d32d7f07e07fd278bebf6b5d66,Comparative survey of visual object classifiers,"Comparative survey of visual object classifiers
+Laboratory Le2i, Universite Bourgogne - Franche-Comte,
+Hiliwi Leake Kidane
+1000 Dijon, France,"
+ffae2fe85d3c93610ac6270db2ddf1f2f6779ea8,Learning pullback HMM distances for action recognition,"#****
+ICCV 2011 Submission #****. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+Learning pullback HMM distances for action recognition
+Anonymous ICCV submission
+Paper ID ****"
+ffc9d6a5f353e5aec3116a10cf685294979c63d9,Eigenphase-based face recognition: a comparison of phase- information extraction methods,"Eigenphase-based face recognition: a comparison of phase-
+information extraction methods
+Slobodan Ribarić, Marijo Maračić
+Faculty of Electrical Engineering and Computing,
+University of Zagreb, Unska 3, 10 000 Zagreb
+E-mail:"
+ff25c6602305ac46e9c35ffa4e30b14d679a5413,Face Templates Creation for Surveillance Face Recognition System,"Face Templates Creation for Surveillance Face Recognition System
+Department of Radio Electronics, Brno University of Technology, Brno, Czech Republic
+Department of Telecommunications, Brno University of Technology, Brno, Czech Republic
+Tobias Malach1,2 and Jiri Prinosil3
+EBIS, spol. s r.o., Brno, Czech Republic
+Keywords:
+Face Templates, Template Database Creation, Face Recognition System Application, Real-world
+Conditons."
+ff8ef43168b9c8dd467208a0b1b02e223b731254,BreakingNews: Article Annotation by Image and Text Processing,"BreakingNews: Article Annotation by
+Image and Text Processing
+Arnau Ramisa*, Fei Yan*, Francesc Moreno-Noguer,
+nd Krystian Mikolajczyk"
+ff9195f99a1a28ced431362f5363c9a5da47a37b,Serial dependence in the perception of attractiveness,"Journal of Vision (2016) 16(15):28, 1–8
+Serial dependence in the perception of attractiveness
+Ye Xia
+Department of Psychology, University of California,
+Berkeley, CA, USA
+Allison Yamanashi Leib
+Department of Psychology, University of California,
+Berkeley, CA, USA
+David Whitney
+Department of Psychology, University of California,
+Berkeley, CA, USA
+Helen Wills Neuroscience Institute, University of
+California, Berkeley, CA, USA
+Vision Science Group, University of California,
+Berkeley, CA, USA
+The perception of attractiveness is essential for choices
+of food, object, and mate preference. Like perception of
+other visual features, perception of attractiveness is
+stable despite constant changes of image properties due
+to factors like occlusion, visual noise, and eye"
+ff3ec3607b77a1dbb685cf90dd23a273d622dda5,Visual Attribute Extraction Using Human Pose Estimation,"Visual Attribute Extraction using Human Pose
+Estimation
+Angelo Nodari, Marco Vanetti, and Ignazio Gallo
+Universit`a dell’Insubria, Dipartimento di Scienze Teoriche e Applicate
+via Mazzini 5, 21100 Varese, Italy"
+ff4dec12d0ba0bb1d2c6bbc194545819bc9c1e5a,Face Recognition at a Distance: System Issues,"Chapter 6
+Face Recognition at a Distance:
+System Issues
+Meng Ao, Dong Yi, Zhen Lei, and Stan Z. Li"
+ffc8f9fe66a14aa0657e59e219364b5a852ecb8f,On the Utility of Context (or the Lack Thereof) for Object Detection,"On the Utility of Context (or the Lack Thereof) for Object Detection
+Ehud Barnea and Ohad Ben-Shahar
+Dept. of Computer Science, Ben-Gurion University
+Beer-Sheva, Israel
+{barneaeh,"
+ff83aade985b981fbf2233efbbd749600e97454c,Towards Understanding Adversarial Learning for Joint Distribution Matching,"ALICE: Towards Understanding Adversarial
+Learning for Joint Distribution Matching
+Chunyuan Li1, Hao Liu2, Changyou Chen3, Yunchen Pu1, Liqun Chen1,
+Ricardo Henao1 and Lawrence Carin1
+Duke University 2Nanjing University 3University at Buffalo"
+ffcbedb92e76fbab083bb2c57d846a2a96b5ae30,Sparse Dictionary Learning and Domain Adaptation for Face and Action Recognition,
+ff7bc7a6d493e01ec8fa2b889bcaf6349101676e,Facial expression recognition with spatiotemporal local descriptors_v3.rtf,"Facial expression recognition with spatiotemporal local
+descriptors
+Guoying Zhao, Matti Pietikäinen
+Machine Vision Group, Infotech Oulu and Department of Electrical and
+Information Engineering, P. O. Box 4500 FI-90014 University of Oulu, Finland
+{gyzhao,"
+ff46c41e9ea139d499dd349e78d7cc8be19f936c,A Novel Method for Movie Character Identification and its Facial Expression Recognition,"International Journal of Modern Engineering Research (IJMER)
+www.ijmer.com Vol.3, Issue.3, May-June. 2013 pp-1339-1342 ISSN: 2249-6645
+A Novel Method for Movie Character Identification and its
+Facial Expression Recognition
+M. Dharmateja Purna, 1 N. Praveen2
+M.Tech, Sri Sunflower College of Engineering & Technology, Lankapalli
+Asst. Professor, Dept. of ECE, Sri Sunflower College of Engineering & Technology, Lankapalli"
+ffb2d596c22be7b0ed8f809fdfbeaa95bd4db835,"The BDD-Nexar Collective: A Large-Scale, Crowsourced, Dataset of Driving Scenes","The BDD-Nexar Collective: A Large-Scale, Crowsourced,
+Dataset of Driving Scenes
+Vashisht Madhavan
+Trevor Darrell
+Fisher Yu, Ed.
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2017-113
+http://www2.eecs.berkeley.edu/Pubs/TechRpts/2017/EECS-2017-113.html
+May 29, 2017"
+ff5dd6f96e108d8233220cc262bc282229c1a582,Robust Facial Marks Detection Method Using AAM And SURF,"Ziaul Haque Choudhury, K.M. Mehata / International Journal of Engineering Research and
+Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+Vol. 2, Issue 6, November- December 2012, pp.708-715
+Robust Facial Marks Detection Method Using AAM And SURF
+Ziaul Haque Choudhury, K.M. Mehata
+Dept. of Information Technology, B.S. Abdur Rahman University, Chennai-48, India
+Dept. of Computer Science & Engineering, B.S. Abdur Rahman University, Chennai-48, India"
+ffe8a4cef9dec30ddd2c956c2f63b128a4568f84,Intensity Video Guided 4D Fusion for Improved Highly Dynamic 3D Reconstruction,"Intensity Video Guided 4D Fusion for
+Improved Highly Dynamic 3D Reconstruction
+Jie Zhang, Christos Maniatis, Luis Horna and Robert B. Fisher"
+c5af99522e324b72c8a563a5d6b7c9a0101efb65,Exploring Human Vision Driven Features for Pedestrian Detection,"(cid:13) 2015 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any
+urrent or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new
+ollective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other
+works."
+c54e8c7a4f9c2ebd8787aecafa4cfdb35bfd49e0,Effective Use of Bidirectional Language Modeling for Medical Named Entity Recognition,"Effective Use of Bidirectional Language Modeling for
+Medical Named Entity Recognition
+Devendra Singh Sachan1,*, Pengtao Xie1, and Eric P Xing1
+Petuum Inc, Pittsburgh, 15222, USA"
+c588c89a72f89eed29d42f34bfa5d4cffa530732,Attributes2Classname: A Discriminative Model for Attribute-Based Unsupervised Zero-Shot Learning,"Attributes2Classname: A discriminative model for attribute-based
+unsupervised zero-shot learning
+Berkan Demirel1,3, Ramazan Gokberk Cinbis2, Nazli Ikizler-Cinbis3
+HAVELSAN Inc., 2Bilkent University, 3Hacettepe University"
+c52aa6b9c7b89782f2316ce8ef2156fa06a3696d,Learning Semantic Part-Based Models from Google Images,"Learning Semantic Part-Based Models
+from Google Images
+Davide Modolo and Vittorio Ferrari"
+c5420ef59d7508d82e53671b0d623027eb58e6ed,Learning to Reweight Examples for Robust Deep Learning,"Learning to Reweight Examples for Robust Deep Learning
+Mengye Ren 1 2 Wenyuan Zeng 1 2 Bin Yang 1 2 Raquel Urtasun 1 2"
+c5318c79bc1b880e8356211b837b684f1ee6e5c4,Acquiring Common Sense Spatial Knowledge Through Implicit Spatial Templates,"Acquiring Common Sense Spatial Knowledge through Implicit Spatial Templates
+Department of Computer Science
+Computer Vision Laboratory
+Guillem Collell
+KU Leuven
+Luc Van Gool
+ETH Zurich
+Marie-Francine Moens
+Department of Computer Science
+KU Leuven"
+c55a6c98887b3079647d0edb4778d81bab6708f6,Self-Similarity Representation of Faces for Kin Relationships,"HCTL Open International Journal of Technology Innovations and Research (IJTIR)
+http://ijtir.hctl.org
+Volume 16, July 2015
+e-ISSN: 2321-1814, ISBN (Print): 978-1-943730-43-8
+Self-Similarity Representation
+of Faces for Kin
+Relationships
+Pratibha Chaskar1, Dr. Manjusha Deshmukh2"
+c5decf0a3906c85b6540e96c9c7003957c6d395b,Optimizing the Trade-off between Single-Stage and Two-Stage Object Detectors using Image Difficulty Prediction,"Optimizing the Trade-off between
+Single-Stage and Two-Stage Deep Object Detectors
+using Image Difficulty Prediction
+Petru Soviany, Radu Tudor Ionescu
+Department of Computer Science
+University of Bucharest, Romania
+E-mails:"
+c574c72b5ef1759b7fd41cf19a9dcd67e5473739,"COGNIMUSE: a multimodal video database annotated with saliency, events, semantics and emotion with application to summarization","Zlatintsi et al. EURASIP Journal on Image and Video Processing (2017) 2017:54
+DOI 10.1186/s13640-017-0194-1
+EURASIP Journal on Image
+nd Video Processing
+RESEARCH
+Open Access
+COGNIMUSE: a multimodal video
+database annotated with saliency, events,
+semantics and emotion with application to
+summarization
+Athanasia Zlatintsi1*
+Niki Efthymiou1, Katerina Pastra4, Alexandros Potamianos1 and Petros Maragos1
+, Petros Koutras1, Georgios Evangelopoulos2, Nikolaos Malandrakis3,"
+c5b05718963f4edff80456c441796e4199ad8d41,Sampling and Ontologically Pooling Web Images for Visual Concept Learning,"Sampling and Ontologically Pooling Web Images for
+Visual Concept Learning
+Shiai Zhu, Chong-Wah Ngo, and Yu-Gang Jiang"
+c5a561c662fc2b195ff80d2655cc5a13a44ffd2d,Using Language to Learn Structured Appearance Models for Image Annotation,"Using Language to Learn Structured Appearance
+Models for Image Annotation
+Michael Jamieson, Student Member, IEEE, Afsaneh Fazly, Suzanne Stevenson, Sven Dickinson, Member, IEEE,
+Sven Wachsmuth, Member, IEEE"
+c5e4467b5830d7dad4e940f0766ae728f22e38fc,Object recognition and localization,"Object recognition and localization
+Badri Narayana Patro
+Dept. of Electrical Engineering
+Ganesh Boddupally
+Dept. of Electrical Engineering"
+c5637543e80f97c9ddab8b54a635cf71941e2786,Self-Calibrating View-Invariant Gait Biometrics,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS
+Self-Calibrating View-Invariant Gait Biometrics
+Michela Goffredo, Member, IEEE, Imed Bouchrika, Member, IEEE, John N. Carter, Member, IEEE, and
+Mark S. Nixon, Associate Member, IEEE"
+c528e6285ed170c9a838446c062c8dfbe31c546e,Real Time 3 D Head Pose Estimation : Recent Achievements and Future Challenges,"REAL TIME 3D HEAD POSE ESTIMATION:
+RECENT ACHIEVEMENTS AND FUTURE CHALLENGES
+Gabriele Fanelli, Juergen Gall, Luc Van Gool
+Computer Vision Laboratory - ETH Zurich"
+c542fa8c4cfaff6a8d8efa9678e42e1b9ead8aa9,griffith . edu . au Face Recognition using Ensemble String Matching,"Griffith Research Online
+https://research-repository.griffith.edu.au
+Face Recognition using Ensemble String
+Matching
+Author
+Chen, Weiping, Gao, Yongsheng
+Published
+Journal Title
+IEEE Transactions on Image Processing
+https://doi.org/10.1109/TIP.2013.2277920
+Copyright Statement
+Copyright 2013 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained for all other uses, in any current or future media, including reprinting/republishing this material
+for advertising or promotional purposes, creating new collective works, for resale or redistribution to
+servers or lists, or reuse of any copyrighted component of this work in other works.
+Downloaded from
+http://hdl.handle.net/10072/54416"
+c53a512b4d7dee0d8d0f3e5bf2c6ace7a00cbbae,"Content-Based Video Indexing and Retrieval using Key frames Texture, Edge and Motion Features","International Journal of Current Engineering and Technology
+©2016 INPRESSCO®, All Rights Reserved
+Research Article
+Content-Based Video Indexing and Retrieval using Key frames Texture,
+Edge and Motion Features
+M.Ravinder†* and T.Venugopal‡
+E-ISSN 2277 – 4106, P-ISSN 2347 – 5161
+Available at http://inpressco.com/category/ijcet
+(R.Hamid et al., 2007; G. Lavee et al., 2009; J. Tang et al.,
+009; X. Chen et al., 2009).
+JNTUK, Kakinada, Andhra Pradesh, India
+Department of CSE, JNTUHCES, Sultanpur, Medak, Telangana, India
+Accepted 25 April 2016, Available online 30 April 2016, Vol.6, No.2 (April 2016)"
+c593c6080c75133191a27381a58cd07c97aa935b,Gender Classification Using a Min-Max Modular Support Vector Machine with Incorporating Prior Knowledge,"SUBMITTED TO IEEE TRANSACTIONS ON NEURAL NETWORKS
+Gender Classification Using a Min-Max Modular
+Support Vector Machine with Incorporating
+Prior Knowledge
+Hui-Cheng Lian and ∗Bao-Liang Lu, Senior Member, IEEE"
+c5d9ac2f52c9fc229890798b9d6e4d899b72c525,Image Enhancement Technique using Adaptive Multiscale Retinex for Face Recognition Systems,"Image Enhancement Technique using Adaptive
+Multiscale Retinex for Face Recognition Systems
+Khairul Anuar Ishak1, Salina Abdul Samad1
+M. A. Hannan1 and Maizura Mohd Sani2
+Dept. of Electrical, Electronics and Systems Engineering
+Faculty of Engineering and Built Environment, University Kebangsaan Malaysia
+3600, UKM Bangi, Selangor, Malaysia
+Institute of Microengineering and Nanoelectronics, University Kebangsaan Malaysia
+3600, UKM Bangi, Selangor, Malaysia"
+c5c379a807e02cab2e57de45699ababe8d13fb6d,Facial Expression Recognition Using Sparse Representation,"Facial Expression Recognition Using Sparse Representation
+SHIQING ZHANG 1, XIAOMING ZHAO 2, BICHENG LEI 1
+School of Physics and Electronic Engineering
+Taizhou University
+Taizhou 318000
+CHINA
+2Department of Computer Science
+Taizhou University
+Taizhou 318000
+CHINA"
+c5ea084531212284ce3f1ca86a6209f0001de9d1,Audio-visual speech processing for multimedia localisation,"Audio-Visual Speech Processing for
+Multimedia Localisation
+Matthew Aaron Benatan
+Submitted in accordance with the requirements
+for the degree of Doctor of Philosophy
+The University of Leeds
+School of Computing
+September 2016"
+c5c0cda46a77a7ea8c1f6d4d762b189ef424ffa4,Semantic 3 D Reconstruction of Heads,"Semantic 3D Reconstruction of Heads
+Fabio Maninchedda1, Christian H¨ane2,(cid:63), Bastien Jacquet3,(cid:63),
+Ama¨el Delaunoy(cid:63), Marc Pollefeys1,4
+ETH Zurich
+UC Berkeley
+Kitware SAS
+Microsoft"
+c52f2a00fdbfb7fb10252796dbede6403e780da6,Input Convex Neural Networks,"Input Convex Neural Networks
+Brandon Amos 1 Lei Xu 2 * J. Zico Kolter 1"
+c50c034d264083757eadeee5d0b94d933fe78544,Query by string word spotting based on character bi-gram indexing,"Query by String word spotting based on character
+i-gram indexing
+Computer Vision Center, Dept. Ci`encies de la Computaci´o
+Universitat Aut`onoma de Barcelona, 08193 Bellaterra (Barcelona), Spain
+Suman K. Ghosh and Ernest Valveny
+Email:"
+c5844de3fdf5e0069d08e235514863c8ef900eb7,A Study on Similarity Computations in Template Matching Technique for Identity Verification,"Lam S K et al. / (IJCSE) International Journal on Computer Science and Engineering
+Vol. 02, No. 08, 2010, 2659-2665
+A Study on Similarity Computations in Template
+Matching Technique for Identity Verification
+Lam, S. K., Yeong, C. Y., Yew, C. T., Chai, W. S., Suandi, S. A.
+Intelligent Biometric Group, School of Electrical and Electronic Engineering
+Engineering Campus, Universiti Sains Malaysia
+4300 Nibong Tebal, Pulau Pinang, MALAYSIA
+Email:"
+c590c6c171392e9f66aab1bce337470c43b48f39,Emotion Recognition by Machine Learning Algorithms using Psychophysiological Signals,"Emotion Recognition by Machine Learning Algorithms using
+Psychophysiological Signals
+Eun-Hye Jang, 2Byoung-Jun Park, 3Sang-Hyeob Kim, 4Jin-Hun Sohn
+, 2, 3 BT Convergence Technology Research Department, Electronics and Telecommunications
+Research Institute, 138 Gajeongno, Yuseong-gu, Daejeon, 305-700, Republic of Korea,
+*4Department of Psychology/Brain Research Institute, Chungnam National University 220,
+Gung-dong, Yuseong-gu, Daejeon, 305-765, Republic of Korea,"
+c591cb28d12b7ee53af4e5c2050b74071527c248,The face of fear and anger: Facial width-to-height ratio biases recognition of angry and fearful expressions.,"The Face of Fear and Anger: Facial Width-to-Height
+Ratio Biases Recognition of Angry and Fearful
+Expressions
+Jason C. Deska, E. Paige Lloyd, and Kurt Hugenberg
+Online First Publication, May 11, 2017. http://dx.doi.org/10.1037/emo0000328
+CITATION
+Deska, J. C., Lloyd, E. P., & Hugenberg, K. (2017, May 11). The Face of Fear and Anger: Facial
+online publication. http://dx.doi.org/10.1037/emo0000328"
+c55dcc587a53ff82cf3f79d84e7df67f4c8f77ed,TabletGaze: A Dataset and Baseline Algorithms for Unconstrained Appearance-based Gaze Estimation in Mobile Tablets,"TabletGaze: A Dataset and Baseline Algorithms
+for Unconstrained Appearance-based Gaze
+Estimation in Mobile Tablets
+Qiong Huang, Student Member, IEEE, Ashok Veeraraghavan, Member, IEEE,
+nd Ashutosh Sabharwal, Fellow, IEEE"
+c50630e485d3c7785ea9e1f3bff35ea00e926a56,Deep Image Retrieval: Learning Global Representations for Image Search,"Deep Image Retrieval:
+Learning global representations for image search
+Albert Gordo, Jon Almaz´an, Jerome Revaud, and Diane Larlus
+Computer Vision Group, Xerox Research Center Europe"
+c5c6ec48ae98d86171360b19e3ec03738c712f53,Infinite Hidden Conditional Random Fields for Human Behavior Analysis,"Infinite Hidden Conditional Random Fields for
+Human Behavior Analysis
+Konstantinos Bousmalis, Student Member, IEEE,
+Stefanos Zafeiriou, Member, IEEE,
+Louis-Philippe Morency, Member, IEEE,
+nd Maja Pantic, Fellow, IEEE"
+c2c3ff1778ed9c33c6e613417832505d33513c55,"Multimodal Biometric Person Authentication Using Fingerprint, Face Features","Multimodal Biometric Person Authentication
+Using Fingerprint, Face Features
+Tran Binh Long1, Le Hoang Thai2, and Tran Hanh1
+Department of Computer Science, University of Lac Hong 10 Huynh Van Nghe,
+DongNai 71000, Viet Nam
+Department of Computer Science, Ho Chi Minh City University of Science
+27 Nguyen Van Cu, HoChiMinh 70000, Viet Nam"
+c21db705a33212768c63be11747d075371c7307f,A Content-Based Late Fusion Approach Applied to Pedestrian Detection,"A Content-Based Late Fusion Approach Applied to
+Pedestrian Detection
+Jessica Sena, Artur Jord˜ao, William Robson Schwartz
+Smart Surveillance Interest Group
+Department of Computer Science, Universidade Federal de Minas Gerais
+Av. Presidente Antˆonio Carlos, 6627 - Pampulha, Belo Horizonte, Brazil"
+c2adfc55e0ab9be6e8f5e4ebeb20770dca307cef,"The effect of diagnosis, age, and symptom severity on cortical surface area in the cingulate cortex and insula in autism spectrum disorders.","http://jcn.sagepub.com/
+The Effect of Diagnosis, Age, and Symptom Severity on Cortical Surface Area in the Cingulate Cortex
+nd Insula in Autism Spectrum Disorders
+Krissy A.R. Doyle-Thomas, Azadeh Kushki, Emma G. Duerden, Margot J. Taylor, Jason P. Lerch, Latha V. Soorya, A.
+Ting Wang, Jin Fan and Evdokia Anagnostou
+J Child Neurol
+2013 28: 729 originally published online 25 July 2012
+DOI: 10.1177/0883073812451496
+The online version of this article can be found at:
+http://jcn.sagepub.com/content/28/6/729
+Published by:
+http://www.sagepublications.com
+Additional services and information for
+can be found at:
+Email Alerts:
+http://jcn.sagepub.com/cgi/alerts
+Subscriptions:
+http://jcn.sagepub.com/subscriptions
+Reprints:
+http://www.sagepub.com/journalsReprints.nav"
+c27f64eaf48e88758f650e38fa4e043c16580d26,Title of the proposed research project: Subspace analysis using Locality Preserving Projection and its applications for image recognition,"Title of the proposed research project: Subspace analysis using Locality Preserving
+Projection and its applications for image recognition
+Research area: Data manifold learning for pattern recognition
+Contact Details:
+Name: Gitam C Shikkenawis
+Email Address:
+University: Dhirubhai Ambani Institute of Information and Communication Technology
+(DA-IICT), Gandhinagar."
+c2d065bc8067384c40b3e8146cadc9a0c4c1d633,SLC25A12 expression is associated with neurite outgrowth and is upregulated in the prefrontal cortex of autistic subjects,"& 2008 Nature Publishing Group All rights reserved 1359-4184/08 $30.00
+www.nature.com/mp
+ORIGINAL ARTICLE
+SLC25A12 expression is associated with neurite
+outgrowth and is upregulated in the prefrontal cortex
+of autistic subjects
+A-M Lepagnol-Bestel1, G Maussion1, B Boda2, A Cardona3, Y Iwayama4, A-L Delezoide5, J-M Moalic1,
+D Muller2, B Dean6, T Yoshikawa4,7, P Gorwood1, JD Buxbaum8,9, N Ramoz1 and M Simonneau1
+INSERM U675, IFR2, Faculte´ de Me´ decine Xavier Bichat, Paris, France; 2Department of Basic Neuroscience, Centre Medical
+Universitaire, Geneva, Switzerland; 3Histotechnology and Pathology Unit, Institut Pasteur, Paris, France; 4Laboratory for
+AP-HP, Paris, France; 6The Rebecca L Cooper Research Laboratories, Mental Health Research Institute of Victoria, Parkville,
+VIC, Australia; 7CREST, Japan Science and Technology Agency, Saitama, Japan; 8Department of Psychiatry, Mount Sinai
+School of Medicine, New York, NY, USA and 9Department of Neuroscience, Mount Sinai School of Medicine, New York,
+NY, USA
+in the BA46 prefrontal cortex but not
+Autism is a neurodevelopmental disorder with a strong genetic component, probably involving
+several genes. Genome screens have provided evidence of linkage to chromosome 2q31–q33,
+which includes the SLC25A12 gene. Association between autism and single-nucleotide
+polymorphisms in SLC25A12 has been reported in various studies. SLC25A12 encodes the
+mitochondrial aspartate/glutamate carrier functionally important"
+c231d8638e8b5292c479d20f7dd387c53e581a1a,Multi-View Data Generation Without View Supervision,"MULTI-VIEW DATA GENERATION WITHOUT VIEW
+SUPERVISION
+Micka¨el Chen, Ludovic Denoyer
+Sorbonne Universit´es, UPMC Univ Paris 06, UMR 7606, LIP6, F-75005, Paris, France
+Thierry Arti`eres
+Ecole Centrale Marseille - Laboratoire d’Informatique Fondamentale (Aix-Marseille Univ.), France."
+c223b2b7d38dc4e0ad418c404b2d3c43c62213bc,Trade-off Between GPGPU based Implementations of Multi Object Tracking Particle Filter,"Trade-off between GPGPU based implementations of
+multi object tracking particle filter
+Petr Jecmen, Frédéric Lerasle, Alhayat Ali Mekonnen
+To cite this version:
+Petr Jecmen, Frédéric Lerasle, Alhayat Ali Mekonnen. Trade-off between GPGPU based implemen-
+tations of multi object tracking particle filter. International Conference on Computer Vision Theory
+nd Applications, Feb 2017, Porto, Portugal. 10p., 2017. <hal-01763095>
+HAL Id: hal-01763095
+https://hal.laas.fr/hal-01763095
+Submitted on 10 Apr 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+c2fb2cb5487ad404b8e66daf74198496c40bef32,Learning to Transfer Privileged Information,"Learning to Transfer Privileged Information
+Viktoriia Sharmanska1∗, Novi Quadrianto2, and Christoph Lampert1,
+Institute of Science and Technology Austria, Austria
+SMiLe CLiNiC, University of Sussex, UK"
+c220f457ad0b28886f8b3ef41f012dd0236cd91a,Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Crystal Loss and Quality Pooling for
+Unconstrained Face Verification and Recognition
+Rajeev Ranjan, Member, IEEE, Ankan Bansal, Hongyu Xu, Member, IEEE,
+Swami Sankaranarayanan, Member, IEEE, Jun-Cheng Chen, Member, IEEE,
+Carlos D Castillo, Member, IEEE, and Rama Chellappa, Fellow, IEEE"
+c28f57d0a22e54fdd3c4a57ecb1785dda49f0e5e,From Scores to Face Templates: A Model-Based Approach,"From Scores to Face Templates:
+A Model-Based Approach
+Pranab Mohanty, Student Member, IEEE, Sudeep Sarkar, Senior Member, IEEE, and
+Rangachar Kasturi, Fellow, IEEE"
+c254b4c0f6d5a5a45680eb3742907ec93c3a222b,A Fusion-based Gender Recognition Method Using Facial Images,"A Fusion-based Gender Recognition Method
+Using Facial Images
+Benyamin Ghojogh, Saeed Bagheri Shouraki, Hoda Mohammadzade*, Ensieh Iranmehr"
+c259693737ce52e2e37972e15334cbe78b653e69,Image Processing Supports HCI in Museum Application,"Image Processing Supports HCI in Museum Application
+Niki Martinel, Marco Vernier, Gian Luca Foresti and Elisabetta Lamedica
+Department of Mathematics and Computer Science, University of Udine, Via Delle Scienze 206, Udine, Italy
+{niki.martinel, marco.vernier,
+Keywords:
+Augmented Reality: Information Visualization: User Interface Design: Mobile HCI."
+c29487c5eb0cdb67d92af1bc0ecbcf825e2abec3,3-D Face Recognition With the Geodesic Polar Representation,"-D Face Recognition With the
+Geodesic Polar Representation
+Iordanis Mpiperis, Sotiris Malassiotis, and Michael G. Strintzis, Fellow, IEEE
+therefore,"
+c2b1007824fa7ce3a7a94209f0be0902a3454bae,Project Description 1 Introduction,"Project Description
+Introduction
+Recognizing human action is a key component in many vision applications, such as video surveil-
+lance, 3D human pose estimation and video indexing. From the human-centered computing (HCC)
+point of view, an automatic action recognition system can provide an interface between artificial
+gents and human users accounting for perception and action in a novel interaction paradigm.
+Although significant progress has been made in action recognition [1], the problem remains inher-
+ently challenging due to significant intra-class variations, viewpoint change, partial occlusion and
+ackground dynamic variations. A key limitation of many action-recognition approaches is that
+their models are learned from single 2D view video features on individual datasets and thus un-
+ble to handle arbitrary view change or scale and background variations. Also, since they are not
+generalizable across different datasets, retraining is necessary for any new dataset.
+Our research is motivated by the requirement of view-invariant action recognition and the fact that
+the existing human motion capture data provides useful knowledge to understand the intrinsic motion
+structure (Fig. 2). In particular, we address the problem of modeling and analyzing human motion
+in the joint-trajectories space. Our view-invariant recognition system has the following functions
+(Fig. 1),
+(1) Given a labeled Mocap sequences with M markers in 3D, which is a 3M -dimensional sequential
+data, the low dimensional manifold structure (i.e., geodesics distance, intrinsic dimensionality, etc)
+is learnt by using Tensor Voting. This is an offline process, as shown in Fig. 1."
+c2f2c89d7615df07b540748d6c53485c4cbfa9c0,An Experience Report on Requirements-Driven Model-Based Synthetic Vision Testing,"An Experience Report on Requirements-Driven
+Model-Based Synthetic Vision Testing
+Markus Murschitz and Oliver Zendel and Martin Humenberger
+nd Christoph Sulzbachner and Gustavo Fern´andez Dom´ınguez 1"
+c2b9d6742e504491800cee44adb05d2d706fc209,Semantic-Based Web Mining For Image Retrieval Using Enhanced Support Vector Machine,"International Journal of Applied Engineering Research ISSN 0973-4562 Volume 11, Number 5 (2016) pp 3276-3281
+© Research India Publications. http://www.ripublication.com
+Semantic-Based Web Mining For Image Retrieval Using Enhanced Support
+Vector Machine
+Ph.D Research Scholar, Research Department of Computer Science,
+NGM College, Pollachi, Coimbatore, Tamil Nadu, India.
+P. Sumathi
+R. Manickachezian
+Associate Professor, Research Department of Computer Science,
+NGM College, Pollachi, Coimbatore, Tamil Nadu, India."
+c2eed73654b544a705b194ade58cd82488c6c5b9,"Scene Understanding by Labeling Pixels Key Insights ˽ Recent Progress on Image Understanding, a Long-standing Challenge of Ai, Is Enabling Numerous New Applications in Robot Perception, Surveillance and Environmental Monitoring, Content- Based Image Search, and Social-media Summarization","ontributed articles
+DOI:10.1145/2629637
+Pixels labeled with a scene’s semantics and
+geometry let computers describe what they see.
+BY STEPHEN GOULD AND XUMING HE
+Scene
+Understanding
+y Labeling
+Pixels
+PROGRAMMING COMPUTERS TO automatically interpret
+the content of an image is a long-standing challenge in
+rtificial intelligence and computer vision. That difficulty
+is echoed in a well-known anecdote from the early years
+of computer-vision research in which an undergraduate
+student at MIT was asked to spend his summer getting a
+omputer to describe what it “saw” in images obtained
+from a video camera.35 Almost 50 years later researchers
+re still grappling with the same problem.
+A scene can be described in many ways and include
+details about objects, regions, geometry, location,"
+c2b8b49526e3dd537b641a6495e49a3d1a0ebbf2,Extended Feature-Fusion Guidelines to Improve Image-Based Multi-Modal Biometrics,"Extended Feature-Fusion Guidelines to Improve
+Image-Based Multi-Modal Biometrics
+Dane Brown
+Council for Scientific and Industrial Research
+Information Security
+Pretoria, South Africa"
+c238f871c029d8c33949f8410f8cf3bf79ffc102,No Blind Spots: Full-Surround Multi-Object Tracking for Autonomous Vehicles using Cameras & LiDARs,"No Blind Spots: Full-Surround Multi-Object
+Tracking for Autonomous Vehicles using
+Cameras & LiDARs
+Akshay Rangesh, Member, IEEE, and Mohan M. Trivedi, Fellow, IEEE"
+c2d35b387518496d8100f70e82597b002eba600e,Online Multi-player Tracking in Monocular Soccer Videos,"Available online at www.sciencedirect.com
+AASRI Procedia 00 (2014) 000–000
+014 AASRI Conference on Sports Engineering and Computer Science (SECS 2014)
+Online Multi-player Tracking in Monocular Soccer Videos
+Michael Herrmanna,*, Martin Hoerniga, Bernd Radiga
+Technische Universität München, Image Understanding and Knowledge-Based Systems, Boltzmannstr. 3, D-85748 Garching, Germany"
+c20b2ec72ebf798e9567a145465e37a755fc34d8,Fully Automatic Multi-person Human Motion Capture for VR Applications,"Fully Automatic Multi-person Human Motion Capture
+for VR Applications
+Ahmed Elhayek1,2, Onorina Kovalenko1, Pramod Murthy1,2, Jameel Malik1,2, and
+Didier Stricker1,2
+German Research Centre for Artificial Intelligence (DFKI), Kaiserslautern, Germany
+University of Kaiserslautern, Germany
+{ahmed.elhayek, onorina.kovalenko, pramod.murthy,
+jameel.malik,"
+c2e9300b0e72dca0b95ccd4181fc2a7a5178dea7,Improving Bilayer Product Quantization for Billion-Scale Approximate Nearest Neighbors in High Dimensions,"Improving Bilayer Product Quantization
+for Billion-Scale Approximate Nearest Neighbors in High
+Dimensions
+Artem Babenko
+Yandex
+Moscow Institute of Physics and Technology
+Victor Lempitsky
+Skolkovo Institute of Science and Technology"
+c2cb38fc68b877a96be99b814e8ee437e585f5b2,Mining on Manifolds: Metric Learning without Labels,"Mining on Manifolds: Metric Learning without Labels
+Ahmet Iscen1 Giorgos Tolias1 Yannis Avrithis2 Ondˇrej Chum1
+VRG, FEE, CTU in Prague
+Inria Rennes"
+c2e6daebb95c9dfc741af67464c98f1039127627,Efficient Measuring of Facial Action Unit Activation Intensities using Active Appearance Models,"MVA2013 IAPR International Conference on Machine Vision Applications, May 20-23, 2013, Kyoto, JAPAN
+Ef‌f‌icient Measuring of Facial Action Unit Activation Intensities
+using Active Appearance Models
+Daniel Haase1, Michael Kemmler1, Orlando Guntinas-Lichius2, Joachim Denzler1
+Computer Vision Group, Friedrich Schiller University of Jena, Germany
+Department of Otolaryngology, University Hospital Jena, Germany"
+f65896855e5df3db5422b57ab360287efa213066,Detection of Uncontrolled Motion Behavior in Human Crowds,"IJRET: International Journal of Research in Engineering and Technology eISSN: 2319-1163 | pISSN: 2321-7308
+DETECTION OF UNCONTROLLED MOTION BEHAVIOR IN HUMAN
+CROWDS
+Vijitha V. A1
+Student of M. Tech., Computer Science & Engineering, Sahyadri College of Engineering & Management, Karnataka,
+India"
+f6ba16aee3c40b69dc88c947ae59811104b1bd49,Skeletal Tracking using Microsoft Kinect,"Skeletal Tracking using Microsoft Kinect
+Abhishek Kar
+Advisors: Dr. Amitabha Mukerjee & Dr. Prithwijit Guha
+Department of Computer Science and Engineering, IIT Kanpur"
+f6f06be05981689b94809130e251f9e4bf932660,An Approach to Illumination and Expression Invariant Multiple Classifier Face Recognition,"An Approach to Illumination and Expression Invariant
+International Journal of Computer Applications (0975 – 8887)
+Volume 91 – No.15, April 2014
+Multiple Classifier Face Recognition
+Dalton Meitei Thounaojam
+National Institute of Technology
+Silchar
+Assam: 788010
+India
+Hidangmayum Saxena Devi
+National Institute of Technology
+Silchar
+Assam: 788010
+India
+Romesh Laishram
+Manipur Institute of Technology
+Imphal West: 795001
+India"
+f6742010372210d06e531e7df7df9c01a185e241,Dimensional Affect and Expression in Natural and Mediated Interaction,"Dimensional Affect and Expression in
+Natural and Mediated Interaction
+Michael J. Lyons
+Ritsumeikan, University
+Kyoto, Japan
+October, 2007"
+f6ca29516cce3fa346673a2aec550d8e671929a6,Algorithm for Face Matching Using Normalized Cross - Correlation,"International Journal of Engineering and Advanced Technology (IJEAT)
+ISSN: 2249 – 8958, Volume-2, Issue-4, April 2013
+Algorithm for Face Matching Using Normalized
+Cross-Correlation
+C. Saravanan, M. Surender"
+f614f9ba33554cfd1a474be03520319b51651a35,Cardiac interoceptive learning is modulated by emotional valence perceived from facial expressions,"Social Cognitive and Affective Neuroscience, 2018, 677–686
+doi: 10.1093/scan/nsy042
+Advance Access Publication Date: 6 April 2018
+Original article
+Cardiac interoceptive learning is modulated by
+emotional valence perceived from facial expressions
+Amanda C. Marshall, Antje Gentsch, Lena Schro¨ der, and
+Simone Schu¨ tz-Bosbach
+General and Experimental Psychology Unit, Department of Psychology, Ludwig-Maximilians University
+Munich, D-80802 Munich, Germany
+Correspondence should be addressed to Amanda C. Marshall, General and Experimental Psychology Unit, Department of Psychology, Ludwig-
+Maximilians-University Munich, Leopoldstr. 13, D-80802 Munich, Germany. E-mail:"
+f6684367e7925cd90fb8974640d41823191c7cff,CNN-based Pore Detection and Description for High-Resolution Fingerprint Recognition,"Automatic Dataset Annotation to Learn CNN Pore
+Description for Fingerprint Recognition
+Gabriel Dahia
+Maur´ıcio Pamplona Segundo
+Department of Computer Science, Federal University of Bahia"
+f67a73c9dd1e05bfc51219e70536dbb49158f7bc,A Gaussian Mixture Model for Classifying the Human Age using DWT and Sammon Map,"Journal of Computer Science 10 (11): 2292-2298, 2014
+ISSN: 1549-3636
+© 2014 Nithyashri and Kulanthaivel, This open access article is distributed under a Creative Commons Attribution
+(CC-BY) 3.0 license
+A GAUSSIAN MIXTURE MODEL FOR CLASSIFYING THE
+HUMAN AGE USING DWT AND SAMMON MAP
+J. Nithyashri and 2G. Kulanthaivel
+Department of Computer Science and Engineering, Sathyabama University, Chennai, India
+Department of Electronics Engineering, NITTTR, Chennai, India
+Received 2014-05-08; Revised 2014-05-23; Accepted 2014-11-28"
+f663ad5467721159263c1cde261231312893f45d,UvA-DARE ( Digital Academic Repository ) Gaze Embeddings for Zero-Shot Image Classification,"UvA-DARE (Digital Academic Repository)
+Gaze Embeddings for Zero-Shot Image Classification
+Karessli, N.; Akata, Z.; Schiele, B.; Bulling, A.
+Published in:
+0th IEEE Conference on Computer Vision and Pattern Recognition
+0.1109/CVPR.2017.679
+Link to publication
+Citation for published version (APA):
+Karessli, N., Akata, Z., Schiele, B., & Bulling, A. (2017). Gaze Embeddings for Zero-Shot Image Classification. In
+0th IEEE Conference on Computer Vision and Pattern Recognition: CVPR 2017 : 21-26 July 2016, Honolulu,
+Hawaii : proceedings (pp. 6412-6421). Piscataway, NJ: IEEE. DOI: 10.1109/CVPR.2017.679
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 18 Nov 2018"
+f672d6352a5864caab5a5a286fbc1ce042b55c16,Stabilizing GAN Training with Multiple Random Projections,"Under review as a conference paper at ICLR 2018
+Stabilizing GAN Training with
+Multiple Random Projections
+Anonymous authors
+Paper under double-blind review"
+f66bc143d85d2b1d9aafec20f598a21d2b90b0c0,Seeing 3 D Objects in a Single 2 D Image,"Accepted for publication in the Proceedings of the 12th International Conference of Computer Vision, 2009
+Seeing 3D Objects in a Single 2D Image
+Diego Rother
+Johns Hopkins University"
+f6785ffe6fe2c30887637a61061a64f4d6725979,BAR: Bayesian Activity Recognition using variational inference,"BAR: Bayesian Activity Recognition using variational
+inference
+Ranganath Krishnan
+Mahesh Subedar
+Omesh Tickoo
+Intel Labs
+Hillsboro, OR (USA)"
+f6c70635241968a6d5fd5e03cde6907022091d64,Measuring Deformations and Illumination Changes in Images with Applications to Face Recognition,
+f636c087091847bd4ccd6d196ada6c0894b52d88,Rate-Accuracy Trade-Off in Video Classification with Deep Convolutional Neural Networks,"Rate-Accuracy Trade-Off In Video Classification
+With Deep Convolutional Neural Networks
+Mohammad Jubran, Alhabib Abbas, Aaron Chadha and Yiannis Andreopoulos, Senior Member, IEEE"
+f6ce34d6e4e445cc2c8a9b8ba624e971dd4144ca,Cross-Label Suppression: A Discriminative and Fast Dictionary Learning With Group Regularization,"Cross-label Suppression: A Discriminative and Fast
+Dictionary Learning with Group Regularization
+Xiudong Wang and Yuantao Gu∗
+April 24, 2017"
+f67afec4226aba674e786698b39b85b124945ddd,Spatial Variational Auto-Encoding via Matrix-Variate Normal Distributions,"Spatial Variational Auto-Encoding via Matrix-Variate
+Normal Distributions
+Zhengyang Wang
+School of Electrical Engineering
+nd Computer Science
+Washington State University
+Pullman, WA 99163
+Hao Yuan
+School of Electrical Engineering
+nd Computer Science
+Washington State University
+Pullman, WA 99163
+Shuiwang Ji
+School of Electrical Engineering
+nd Computer Science
+Washington State University
+Pullman, WA 99163"
+f6fa97fbfa07691bc9ff28caf93d0998a767a5c1,K2-means for Fast and Accurate Large Scale Clustering,"k2-means for fast and accurate large scale clustering
+Eirikur Agustsson
+Computer Vision Lab
+D-ITET
+ETH Zurich
+Radu Timofte
+Computer Vision Lab
+D-ITET
+ETH Zurich
+Luc Van Gool
+ESAT, KU Leuven
+D-ITET, ETH Zurich"
+f6cf2108ec9d0f59124454d88045173aa328bd2e,Robust User Identification Based on Facial Action Units Unaffected by Users' Emotions,"Robust user identification based on facial action units
+unaffected by users’ emotions
+Ricardo Buettner
+Aalen University, Germany"
+f614b449ee2fd45974214014c109d993aab73343,A Mathematical Motivation for Complex-Valued Convolutional Networks,"A Mathematical Motivation for
+Complex-valued Convolutional Networks
+Joan Bruna, Soumith Chintala, Yann LeCun, Serkan Piantino, Arthur Szlam, Mark Tygert
+Facebook Artificial Intelligence Research, 1 Facebook Way, Menlo Park, California 94025
+Keywords: deep learning, neural networks, harmonic analysis"
+f68f20868a6c46c2150ca70f412dc4b53e6a03c2,Differential Evolution to Optimize Hidden Markov Models Training: Application to Facial Expression Recognition,"Differential Evolution to Optimize
+Hidden Markov Models Training:
+Application to Facial Expression
+Recognition
+Khadoudja Ghanem, Amer Draa, Elvis Vyumvuhore and
+Ars`ene Simbabawe
+MISC Laboratory, Constantine 2 University, Constantine, Algeria
+The base system in this paper uses Hidden Markov
+Models (HMMs) to model dynamic relationships among
+facial features in facial behavior interpretation and un-
+derstanding field. The input of HMMs is a new set
+of derived features from geometrical distances obtained
+from detected and automatically tracked facial points.
+Numerical data representation which is in the form of
+multi-time series is transformed to a symbolic repre-
+sentation in order to reduce dimensionality, extract the
+most pertinent information and give a meaningful repre-
+sentation to humans. The main problem of the use of
+HMMs is that the training is generally trapped in local
+minima, so we used the Differential Evolution (DE)"
+f6cf220b8ef17e0a4bef0ff5aadc40eec9653159,Automated System for interpreting Non-verbal Communication in Video Conferencing,"Chetana Gavankar et al / International Journal on Computer Science and Engineering Vol.2(1), 2010, 22-27
+Automated System for interpreting Non-verbal
+Communication in Video Conferencing
+Chetana Gavankar
+Senior Lecturer,
+Department of Information Technology
+Cummins College of Engineering for Women
+Karve Nagar, Pune - 411052
+for more effective"
+e909b9e0bbfc37d0b99acad5014e977daac7e2bd,Adversarial Training of Variational Auto-Encoders for High Fidelity Image Generation,"Adversarial Training of Variational Auto-encoders for
+High Fidelity Image Generation
+Salman H. Khan†, Munawar Hayat ‡, Nick Barnes †
+Data61 - CSIRO and ANU, Australia, ‡University of Canberra, Australia,"
+e9ac109c395ededb23dfc78fe85d76eeb772ee7e,A Multilevel Mixture-of-Experts Framework for Pedestrian Classification,"A Multilevel Mixture-of-Experts Framework for
+Pedestrian Classification
+Markus Enzweiler and Dariu M. Gavrila"
+e9ed17fd8bf1f3d343198e206a4a7e0561ad7e66,Cognitive Learning for Social Robot through Facial Expression from Video Input,"International Journal of Enhanced Research in Science Technology & Engineering, ISSN: 2319-7463
+Vol. 3 Issue 1, January-2014, pp: (362-365), Impact Factor: 1.252, Available online at: www.erpublications.com
+Cognitive Learning for Social Robot through
+Facial Expression from Video Input
+Neeraj Rai1, Deepak Rai2
+Department of Automation & Robotics, 2Department of Computer Science & Engg.
+,2Ajay Kumar Garg Engineering College, Ghaziabad, UP, India"
+e988be047b28ba3b2f1e4cdba3e8c94026139fcf,Multi-Task Convolutional Neural Network for Pose-Invariant Face Recognition,"Multi-Task Convolutional Neural Network for
+Pose-Invariant Face Recognition
+Xi Yin and Xiaoming Liu Member, IEEE,"
+e9d43231a403b4409633594fa6ccc518f035a135,Deformable Part Models with CNN Features,"Deformable Part Models with CNN Features
+Pierre-Andr´e Savalle1, Stavros Tsogkas1,2, George Papandreou3, Iasonas
+Kokkinos1,2
+Ecole Centrale Paris,2 INRIA, 3TTI-Chicago (cid:63)"
+e96a3d4df7f6956ba185107747c3d7c16d1ed845,Unite the People: Closing the Loop Between 3D and 2D Human Representations,"Unite the People: Closing the Loop Between 3D and 2D Human Representations
+Christoph Lassner1,2
+Javier Romero3,*
+Martin Kiefel2
+Federica Bogo4,*
+Michael J. Black2
+Peter V. Gehler5,*
+Bernstein Center for Computational Neuroscience, T¨ubingen, Germany
+MPI for Intelligent Systems, T¨ubingen, Germany
+Body Labs Inc., New York, United States
+Microsoft, Cambridge, UK
+5University of W¨urzburg, Germany"
+e941ee2d584938e6509c0676466023f8b43b9486,Appearance based tracking with background subtraction,"The 8th International
+Computer Science
+April 26-28, 2013. Colombo,
+Sri Lanka
+& Education (ICCSE 2013)
+Conference on
+SuD1.4
+Appearance Based Tracking with Background
+Dileepa Joseph Jayamanne
+Subtraction
+Jayathu Samarawickrama
+Ranga Rodrigo
+Electronic
+Engineering
+Telecommunication
+Electronic
+Engineering
+Telecommunication
+Telecommunication
+Electronic"
+e91c7dbd33a3047c70d550e201ebdf4353cbe929,Re-identification for Online Person Tracking by Modeling Space-Time Continuum,"Re-identification for Online Person Tracking by Modeling Space-Time
+Continuum
+Neeti Narayan, Nishant Sankaran, Srirangaraj Setlur and Venu Govindaraju
+University at Buffalo, SUNY
+{neetinar, n6, setlur,"
+e9dc096762f503cfe0d56066c02d27082665b3cf,Face Sketch to Photo Matching Using LFDA,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Face Sketch to Photo Matching Using LFDA
+Pushpa Gopal Ambhore1, Lokesh Bijole2
+Research Scholor of Amravati University,
+Computer Engineering Department Padm. Dr. V. B. Kolte Coe Malkapur Maharashtra, India
+Assistant Professor, Computer Engineering Department Padm. Dr.V.B. Kolte coe Malkapur Maharashtra, India"
+e917bb1f7efdfc448b8b63c52e8f643e68630a11,3D information is valuable for the detection of humans in video streams,"D information is valuable for the detection of humans
+in video streams
+Sébastien Piérard
+Antoine Lejeune
+Marc Van Droogenbroeck
+INTELSIG Laboratory
+Montefiore Institute
+University of Liège, Belgium
+INTELSIG Laboratory
+Montefiore Institute
+University of Liège, Belgium
+INTELSIG Laboratory
+Montefiore Institute
+University of Liège, Belgium
+Email :
+Email :
+Email :"
+e9ae8bbfec913300eedede3ec48acb56c15ebdea,DisguiseNet : A Contrastive Approach for Disguised Face Verification in the Wild,"DisguiseNet : A Contrastive Approach for Disguised Face Verification in the Wild
+Skand Vishwanath Peri
+Abhinav Dhall
+Learning Affect and Semantic Image AnalysIs (LASII) Group,
+Indian Institute of Technology Ropar, India"
+e9fcd15bcb0f65565138dda292e0c71ef25ea8bb,Analysing Facial Regions for Face Recognition Using Forensic Protocols,"Repositorio Institucional de la Universidad Autónoma de Madrid
+https://repositorio.uam.es
+Esta es la versión de autor de la comunicación de congreso publicada en:
+This is an author produced version of a paper published in:
+Highlights on Practical Applications of Agents and Multi-Agent Systems:
+International Workshops of PAAMS. Communications in Computer and
+Information Science, Volumen 365. Springer, 2013. 223-230
+DOI: http://dx.doi.org/10.1007/978-3-642-38061-7_22
+Copyright: © 2013 Springer-Verlag
+El acceso a la versión del editor puede requerir la suscripción del recurso
+Access to the published version may require subscription"
+e939fb6b762de242b22e295940e0d9d7d259e442,Depth Prediction Without the Sensors: Leveraging Structure for Unsupervised Learning from Monocular Videos,"Depth Prediction Without the Sensors: Leveraging Structure for Unsupervised
+Learning from Monocular Videos
+Vincent Casser∗1
+Soeren Pirk
+Reza Mahjourian2
+Anelia Angelova
+Institute for Applied Computational Science, Harvard University; Google Brain
+Google Brain
+University of Texas at Austin; Google Brain
+{pirk, rezama,"
+e94804b7f2515740671a678239eccdb79a050272,Generating a Fusion Image: One's Identity and Another's Shape,"Generating a Fusion Image: One’s Identity and Another’s Shape
+Donggyu Joo∗
+School of Electrical Engineering, KAIST, South Korea
+Doyeon Kim∗
+{jdg105, doyeon kim,
+Junmo Kim"
+e9363f4368b04aeaa6d6617db0a574844fc59338,BenchIP: Benchmarking Intelligence Processors,"BENCHIP: Benchmarking Intelligence
+Processors
+Jinhua Tao1, Zidong Du1,2, Qi Guo1,2, Huiying Lan1, Lei Zhang1
+Shengyuan Zhou1, Lingjie Xu3, Cong Liu4, Haifeng Liu5, Shan Tang6
+Allen Rush7,Willian Chen7, Shaoli Liu1,2, Yunji Chen1, Tianshi Chen1,2
+ICT CAS,2Cambricon,3Alibaba Infrastructure Service, Alibaba Group
+IFLYTEK,5JD,6RDA Microelectronics,7AMD"
+f17d6db4844f26a023f92b8771a1c33cea91b9e4,1 Million Captioned Dutch Newspaper Images,"Million Captioned Dutch Newspaper Images
+Desmond Elliott∗† and Martijn Kleppe‡
+ILLC, University of Amsterdam; †CWI; ‡Erasmus University Rotterdam"
+f13552e2e2843716e7a1c7c2492cfcc6e86aa03c,Reinforced Pipeline Optimization: Behaving Optimally,"Under review as a conference paper at ICLR 2019
+REINFORCED PIPELINE OPTIMIZATION: BEHAVING
+OPTIMALLY WITH NON-DIFFERENTIABILITIES
+Anonymous authors
+Paper under double-blind review"
+f1ec3752535e0aa6aafe3930974a22250e652ca1,Gender and emotion recognition with implicit user signals,"Gender and Emotion Recognition with Implicit User Signals
+Maneesh Bilalpur
+International Institute of Information
+Technology
+Hyderabad, India
+Seyed Mostafa Kia
+Donders Institute, Radboud
+University
+Nijmegen, Netherlands
+Manisha Chawla
+Centre for Cognitive Science, Indian
+Institute of Technology
+Gandhinagar, India
+Tat-Seng Chua
+School of Computing, National
+University of Singapore
+Singapore
+Ramanathan Subramanian
+University of Glasgow & Advanced
+Digital Sciences Center"
+f18c34458460b9b62b51213b9165b37c057c5837,Unsupervised Object Discovery and Co-Localization by Deep Descriptor Transforming,"Noname manuscript No.
+(will be inserted by the editor)
+Unsupervised Object Discovery and Co-Localization
+y Deep Descriptor Transforming
+Xiu-Shen Wei · Chen-Lin Zhang · Jianxin Wu · Chunhua Shen ·
+Zhi-Hua Zhou
+Received: date / Accepted: date"
+f16a605abb5857c39a10709bd9f9d14cdaa7918f,Fast greyscale road sign model matching and recognition,"Fast greyscale road sign model matching
+nd recognition
+Sergio Escalera and Petia Radeva
+Centre de Visió per Computador
+Edifici O – Campus UAB, 08193 Bellaterra, Barcelona, Catalonia, Spain"
+f1aa120fb720f6cfaab13aea4b8379275e6d40a2,InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image,"InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image
+Hyeongwoo Kim1
+Justus Thies2
+Max-Planck-Institute for Informatics
+Michael Zollhöfer1
+Christian Richardt3
+University of Erlangen-Nuremberg 3 University of Bath
+Christian Theobalt1
+Ayush Tewari1
+Figure 1. Our single-shot deep inverse face renderer InverseFaceNet obtains a high-quality geometry, reflectance and illumination estimate
+from just a single input image. We jointly recover the face pose, shape, expression, reflectance and incident scene illumination. From left to
+right: input photo, our estimated face model, its geometry, and the pointwise Euclidean error compared to Garrido et al. [14]."
+f1a05136c8b8f9334a4b3d9de2a4b192d2c762c2,Scene Classification via Hypergraph-Based Semantic Attributes Subnetworks Identification,"Scene Classification via Hypergraph-Based
+Semantic Attributes Subnetworks Identification
+Sun-Wook Choi, Chong Ho Lee, and In Kyu Park
+Department of Information and Communication Engineering
+Inha University, Incheon 402-751, Korea"
+f1ba2fe3491c715ded9677862fea966b32ca81f0,Face Tracking and Recognition in Videos : HMM Vs KNN,"ISSN: 2321-7782 (Online)
+Volume 1, Issue 7, December 2013
+International Journal of Advance Research in
+Computer Science and Management Studies
+Research Paper
+Available online at: www.ijarcsms.com
+Face Tracking and Recognition in Videos:
+HMM Vs KNN
+Madhumita R. Baviskar
+Assistant Professor
+Department of Computer Engineering
+MIT College of Engineering (Pune University)
+Pune - India"
+f1471a408369689e2fc956b417dce24e47557a38,A Novel Face Template Protection Algorithm Based on the Fusion of Chaos Theory and RSA Encryption,"International Journal of Security and Its Applications
+Vol. 10, No. 6 (2016) pp.315-330
+http://dx.doi.org/10.14257/ijsia.2016.10.6.30
+A Novel Face Template Protection Algorithm Based on the Fusion
+of Chaos Theory and RSA Encryption
+Liu Yunan1, Zhao Fudong2, Xu Yanli3 and Cao Yu2*
+.School of Foreign Languages, Harbin University of Science and Technology,
+Harbin, 150080, China
+.School of Automation, Harbin University of Science and Technology, Harbin,
+50080, China
+.School of Foreign Languages, Northeast Forestry University, Harbin, 150040,
+China"
+f1c2ba8c7797c4844fa61068b3ce9d319e6ced3f,Human Head Tracking Based on Inheritance and Evolution Concept,"MVA2009 IAPR Conference on Machine Vision Applications, May 20-22, 2009, Yokohama, JAPAN
+Human Head Tracking Based on Inheritance and Evolution Concept
+Yi Hu, Tetsuya Takamori
+Fujifilm Corporation, Japan
+798, Miyanodai, Kaisei-machi, Ashigarakami-gun, Kanagawa, 258-8538 JAPAN
+{yi_hu,"
+f19527b2ceabf50831e78ac04161107c936efb2b,Discriminative Sparse Neighbor Approximation for Imbalanced Learning,"Discriminative Sparse Neighbor Approximation
+for Imbalanced Learning
+Chen Huang, Chen Change Loy, Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+f1d090fcea63d9f9e835c49352a3cd576ec899c1,Single-hidden Layer Feedforward Neual network training using class geometric information,"Iosifidis, A., Tefas, A., & Pitas, I. (2015). Single-Hidden Layer Feedforward
+Neual Network Training Using Class Geometric Information. In . J. J.
+Merelo, A. Rosa, J. M. Cadenas, A. Dourado, K. Madani, & J. Filipe (Eds.),
+Computational Intelligence: International Joint Conference, IJCCI 2014
+Rome, Italy, October 22-24, 2014 Revised Selected Papers. (Vol. III, pp.
+51-364). (Studies in Computational Intelligence; Vol. 620). Springer. DOI:
+0.1007/978-3-319-26393-9_21
+Peer reviewed version
+Link to published version (if available):
+0.1007/978-3-319-26393-9_21
+Link to publication record in Explore Bristol Research
+PDF-document
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms.html"
+f157daaffa1754aae5963d9c49247142b07c8d4a,Dct-based Reduced Face for Face Recognition,"International Journal of Information Technology and Knowledge Management
+January-June 2012, Volume 5, No. 1, pp. 97-100
+DCT-BASED REDUCED FACE FOR FACE RECOGNITION
+Vikas Maheshkar1, Sushila Kamble2, Suneeta Agarwal3, and Vinay Kumar Srivastava4"
+f174b24860b4cacbe047d3a5650cf8866d2244d9,Monocular Depth Estimation by Learning from Heterogeneous Datasets,"Monocular Depth Estimation by Learning from Heterogeneous
+Datasets
+Akhil Gurram1,2, Onay Urfalioglu2, Ibrahim Halfaoui2, Fahd Bouzaraa2 and Antonio M. L´opez1"
+f113aed343bcac1021dc3e57ba6cc0647a8f5ce1,A Survey on Mining of Weakly Labeled Web Facial Images and Annotation,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+A Survey on Mining of Weakly Labeled Web Facial
+Images and Annotation
+Tarang Boharupi1, Pranjali Joshi2
+Pune Institute of Computer Technology, Pune, India
+Professor, Pune Institute of Computer Technology, Pune, India
+the proposed system which"
+f1052df3e311b7caa563685e741e0a1bb6b288df,A Hierarchical Fusion Strategy based Multimodal Biometric System,"The International Arab Conference on Information Technology (ACIT’2013)
+A Hierarchical Fusion Strategy based Multimodal
+Biometric System
+Youssef Elmir, 2Zakaria Elberrichi and 2Réda Adjoudj
+Faculty of Sciences and Technology, University of Adrar, Algeria
+Faculty of Technology, Djillali Liabès University of Sidi Bel Abbès, Algeria"
+f19777e37321f79e34462fc4c416bd56772031bf,Literature Review of Image Compression Algorithm,"International Journal of Scientific & Engineering Research, Volume 3, Issue 6, June-2012 1
+ISSN 2229-5518
+Literature Review of Image Compression Algorithm
+Dr. B. Chandrasekhar
+Padmaja.V.K
+email: email::
+Jawaharlal Technological University, Anantapur"
+f16921c1c6e8bce89bce7679cbd824d65b494e4d,The face of love: spontaneous accommodation as social emotion regulation.,"Personality and Social Psychology
+Bulletin
+http://psp.sagepub.com/
+The Face of Love : Spontaneous Accommodation as Social Emotion Regulation
+Pers Soc Psychol Bull
+Michael Häfner and Hans IJzerman
+2011 37: 1551 originally published online 21 July 2011
+DOI: 10.1177/0146167211415629
+The online version of this article can be found at:
+http://psp.sagepub.com/content/37/12/1551
+Published by:
+http://www.sagepublications.com
+On behalf of:
+Society for Personality and Social Psychology
+Additional services and information for
+Personality and Social Psychology Bulletin
+can be found at:
+Email Alerts:
+http://psp.sagepub.com/cgi/alerts
+Subscriptions:"
+f11d070cdc9ee12b201757ca4a50a3682967ba0c,Spatial Language Understanding with Multimodal Graphs using Declarative Learning based Programming,"Proceedings of the 2nd Workshop on Structured Prediction for Natural Language Processing, pages 33–43
+Copenhagen, Denmark, September 7–11, 2017. c(cid:13)2017 Association for Computational Linguistics"
+f19ab817dd1ef64ee94e94689b0daae0f686e849,Blickrichtungsunabhängige Erkennung von Personen in Bild- und Tiefendaten,"TECHNISCHE UNIVERSIT¨AT M ¨UNCHEN
+Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+Blickrichtungsunabh¨angige Erkennung von
+Personen in Bild- und Tiefendaten
+Andre St¨ormer
+Vollst¨andiger Abdruck der von der Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+der Technischen Universit¨at M¨unchen zur Erlangung des akademischen Grades eines
+Doktor-Ingenieurs (Dr.-Ing.)
+genehmigten Dissertation.
+Vorsitzender:
+Univ.-Prof. Dr.-Ing. Thomas Eibert
+Pr¨ufer der Dissertation:
+. Univ.-Prof. Dr.-Ing. habil. Gerhard Rigoll
+. Univ.-Prof. Dr.-Ing. Horst-Michael Groß,
+Technische Universit¨at Ilmenau
+Die Dissertation wurde am 16.06.2009 bei der Technischen Universit¨at M¨unchen einge-
+reicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik am 30.10.2009
+ngenommen."
+f196a79c5e4b570013e4aa031cdd0fc0c98fc07d,Interactively Picking Real-World Objects with Unconstrained Spoken Language Instructions,"Interactively Picking Real-World Objects with
+Unconstrained Spoken Language Instructions
+Jun Hatori∗, Yuta Kikuchi∗, Sosuke Kobayashi∗, Kuniyuki Takahashi∗,
+Yuta Tsuboi∗, Yuya Unno∗, Wilson Ko, Jethro Tan†"
+f1c76d97caa6f882764c1382c622a2dfb6aade43,CoreRank: Redeeming &#x201C;Sick Silicon&#x201D; by Dynamically Quantifying Core-Level Healthy Condition,"CoreRank: Redeeming “Sick Silicon”
+y Dynamically Quantifying Core-Level
+Healthy Condition
+Guihai Yan, Member, IEEE, Faqiang Sun, Huawei Li, Senior Member, IEEE, and
+Xiaowei Li, Senior Member, IEEE"
+f1bb2c95dc270ffa9c2f88e29ae5d2178b4459cb,A Generative Model of People in Clothing,"A Generative Model of People in Clothing
+Christoph Lassner1, 2
+Gerard Pons-Moll2
+Peter V. Gehler3,*
+BCCN, Tübingen
+MPI for Intelligent Systems, Tübingen 3University of Würzburg
+Figure 1: Random examples of people generated with our model. For each row, sampling is conditioned on the silhouette
+displayed on the left. Our proposed framework also supports unconditioned sampling as well as conditioning on local
+ppearance cues, such as color."
+f131a654bbf4c8de0679d3c6054c10bba4a919d4,Vision-based Driver Assistance Systems,"Vision-based Driver Assistance Systems
+.enpeda.. (Environment Perception and Driver Assistance) Project
+CITR, Auckland, New Zealand
+Reinhard Klette
+5 February 2015"
+e79847c3bf3ffefe9304e212d8dda7aaa29eaada,From Deterministic to Generative: Multi-Modal Stochastic RNNs for Video Captioning,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+From Deterministic to Generative: Multi-Modal
+Stochastic RNNs for Video Captioning
+Jingkuan Song, Yuyu Guo, Lianli Gao, Xuelong Li, IEEE Fellow Alan Hanjalic, IEEE Fellow Heng Tao Shen"
+e7906370eae8655fb69844ae1a3d986c9f37c902,Face recognition using Deep Learning,"POLYTECHNIC UNIVERSITY OF CATALONIA
+MASTER THESIS
+Face recognition using Deep
+Learning
+Author:
+Xavier SERRA
+Advisor:
+Javier CASTÁN
+Tutor:
+Sergio ESCALERA
+This master thesis has been developed at GoldenSpear LLC
+January 2017"
+e76798bddd0f12ae03de26b7c7743c008d505215,Joint Max Margin and Semantic Features for Continuous Event Detection in Complex Scenes,
+e75cd1379b07d77358e5a2f4a042f624066603b6,Weakly-Supervised Learning of Visual Relations,"Weakly-supervised learning of visual relations
+Julia Peyre1,2
+Ivan Laptev1,2
+Cordelia Schmid2,4
+Josef Sivic1,2,3"
+e778e618862ea1c9a97e89e942228c4de98c9a86,Automated Pruning for Deep Neural Network Compression,"Automated Pruning for Deep Neural Network Compression
+Franco Manessi1†, Alessandro Rozza1†, Simone Bianco2, Paolo Napoletano2, Raimondo Schettini2
+lastminute.com group — Strategic Analytics
+{first name.last
+Universit`a degli Studi di Milano Bicocca — DISCo {first name.last"
+e74bddccc40e65b31081a1599cbe7385d5d3e1c0,Bottom-Up and Top-Down Attention for Image Captioning and Visual Question Answering,"Bottom-Up and Top-Down Attention for Image Captioning
+nd Visual Question Answering
+Peter Anderson1∗
+Xiaodong He2
+Chris Buehler3
+Damien Teney4
+Mark Johnson5
+Stephen Gould1
+Lei Zhang3
+Australian National University 2JD AI Research 3Microsoft Research 4University of Adelaide 5Macquarie University"
+e7cac91da51b78eb4a28e194d3f599f95742e2a2,"Positive Feeling, Negative Meaning: Visualizing the Mental Representations of In-Group and Out-Group Smiles","RESEARCH ARTICLE
+Positive Feeling, Negative Meaning:
+Visualizing the Mental Representations of In-
+Group and Out-Group Smiles
+Andrea Paulus1☯*, Michaela Rohr1☯, Ron Dotsch2,3, Dirk Wentura1
+Saarland University, Saarbrücken, Germany, 2 Utrecht University, Utrecht, the Netherlands,
+Behavioural Science Institute, Radboud University, Nijmegen, the Netherlands
+☯ These authors contributed equally to this work."
+e7dc0d5545e6e028b03a82d2f5bb3bccc995a0d7,A New Fast and Efficient HMM-Based Face Recognition System Using a 7-State HMM Along With SVD Coefficients,"Archive of SID
+A New Fast and Efficient HMM-Based Face Recognition
+System Using a 7-State HMM Along With SVD Coefficients
+H. Miar-Naimi* and P. Davari*"
+e7f00f6e5994c5177ec114ee353cc7064d40a78f,Back to Basic: Do Children with Autism Spontaneously Look at Screen Displaying a Face or an Object?,"Hindawi Publishing Corporation
+Autism Research and Treatment
+Volume 2013, Article ID 835247, 7 pages
+http://dx.doi.org/10.1155/2013/835247
+Research Article
+Back to Basic: Do Children with Autism Spontaneously Look at
+Screen Displaying a Face or an Object?
+Marie Guimard-Brunault,1,2,3,4 Nadia Hernandez,3 Laetitia Roché,3 Sylvie Roux,3
+Catherine Barthélémy,1,2,3 Joëlle Martineau,2,3 and Frédérique Bonnet-Brilhault1,2,3
+CHRU de Tours, Centre Universitaire de P´edopsychiatrie, 2 Boulevard Tonnell´e, 37044 Tours Cedex 9, France
+Universit´e Franc¸ois Rabelais de Tours, 60 rue du Plat D’Etain, 37020 Tours Cedex 1, France
+UMR Inserm U 930, ´Equipe 1: Imagerie et Cerveau, Universit´e Franc¸ois Rabelais de Tours, Tours, France
+UMR Inserm U 930, ´Equipe 1: Imagerie et Cerveau, CHRU de Tours-Hˆopital Bretonneau, 2 boulevard Tonnell´e,
+Bˆat B1A, 1er Etage, 37044 Tours Cedex 9, France
+Correspondence should be addressed to Marie Guimard-Brunault;
+Received 29 June 2013; Revised 29 September 2013; Accepted 21 October 2013
+Academic Editor: Elizabeth Aylward
+Copyright © 2013 Marie Guimard-Brunault et al. This is an open access article distributed under the Creative Commons
+Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is
+properly cited."
+e72e852dca333d66559dbcfb050140fac5affe4f,Anatomical Landmark Tracking by One-shot Learned Priors for Augmented Active Appearance Models,"DataωLDAFull AAMAUGMENTED AAMSubset AAMLocal TrackingextractionlearnmodelOne-shotDetectortraintrainLower LegContraintsEipolar ConstraintsDistance ConstraintsTorso ConstraintsFigure1:BasedonfewannotatedbiplanarrecordedtrainingimagesanAugmentedAAM(HaaseandDenzler,2013)istrained,consistingofanatomicalknowledge,afullmulti-viewAAMmodel,anAAMmodelofthetorsoland-marksubset,epipolarconstraintsandalocaltracking-by-detectionpriorintroducedinthispaper.In(HaaseandDenzler,2013)ActiveAppearanceModels(AAM)(Cootesetal.,2001)havebeenap-pliedtoseveralbipedalbirdlocomotiondatasets.OnecrucialconclusionofthisworkisthatAAMsneedsubstantialconstraintsfromvarioussources.Withthesupportofadditionalanatomicalknowledge,i.e.re-gionsegmentation,multi-viewacquisition,andlocallandmarktracking,fortheanimalslowerlimbsys-tem,theresultingAugmentedAAM(HaaseandDen-zler,2013)providesrobustresultsforthemajorityoftheprocesseddatasets.However,theappliedonlinetrackingapproach(Amthoretal.,2012)suffersfrom246MothesO.andDenzlerJ.AnatomicalLandmarkTrackingbyOne-shotLearnedPriorsforAugmentedActiveAppearanceModels.DOI:10.5220/0006133302460254InProceedingsofthe12thInternationalJointConferenceonComputerVision,ImagingandComputerGraphicsTheoryandApplications(VISIGRAPP2017),pages246-254ISBN:978-989-758-227-1Copyrightc(cid:13)2017bySCITEPRESS–ScienceandTechnologyPublications,Lda.Allrightsreserved"
+e78394213ae07b682ce40dc600352f674aa4cb05,Expression-invariant three-dimensional face recognition,"Expression-invariant three-dimensional face recognition
+Alexander M. Bronstein
+Email:
+Michael M. Bronstein
+Ron Kimmel
+Computer Science Department,
+Technion – Israel Institute of Technology,
+Haifa 32000, Israel
+One of the hardest problems in face recognition is dealing with facial expressions. Finding an
+expression-invariant representation of the face could be a remedy for this problem. We suggest
+treating faces as deformable surfaces in the context of Riemannian geometry, and propose to ap-
+proximate facial expressions as isometries of the facial surface. This way, we can define geometric
+invariants of a given face under different expressions. One such invariant is constructed by iso-
+metrically embedding the facial surface structure into a low-dimensional flat space. Based on this
+pproach, we built an accurate three-dimensional face recognition system that is able to distinguish
+etween identical twins under various facial expressions. In this chapter we show how under the
+near-isometric model assumption, the dif‌f‌icult problem of face recognition in the presence of facial
+expressions can be solved in a relatively simple way.
+0.1 Introduction
+It is well-known that some characteristics or behavior patterns of the human body are strictly"
+e79a34f9942172ad97c5fadca3701db3e29d32e2,Fusiform Correlates of Facial Memory in Autism,"NIH Public Access
+Author Manuscript
+Behav Sci (Basel). Author manuscript; available in PMC 2014 April 21.
+Published in final edited form as:
+Behav Sci (Basel). ; 3(3): 348–371. doi:10.3390/bs3030348.
+Fusiform Correlates of Facial Memory in Autism
+Haley G. Trontel1, Tyler C. Duffield2, Erin D. Bigler2,3,4,*, Alyson Froehlich5, Molly B.D.
+Prigge5, Jared A. Nielsen5, Jason R. Cooperrider5, Annahir N. Cariello5, Brittany G.
+Travers6, Jeffrey S. Anderson7, Brandon A. Zielinski8, Andrew Alexander6,11, Nicholas
+Lange9,10, and Janet E. Lainhart11,12
+Department of Psychology, University of Montana, Missoula, MT 59812, USA;
+Department of Psychology, Brigham Young University, Provo, UT 84604,
+USA; (T.C.D.); (E.D.B.) 3Neuroscience Center,
+Brigham Young University, Provo, UT 84604, USA 4The Brain Institute of Utah, University of
+Utah, Salt Lake City, UT 84112, USA 5Department of Psychiatry, University of Utah, Salt Lake
+City, UT 84112, USA; (A.F.);
+(M.B.D.P); (J.A.N.); (J.R.C.);
+(A.N.C.) 6Department of Medical Physics, University of Wisconsin,
+Madison, WI 53706, USA; (B.G.T.); (A.A.)
+7Department of Radiology, University of Utah, Salt Lake City, UT 84112, USA;"
+e7f4951c1106bff0460665ef67d11fb9c2d07c41,Machine Vision-Based Analysis of Gaze and Visual Context: an Application to Visual Behavior of Children with Autism Spectrum Disorders,"Machine Vision-Based Analysis of Gaze and
+Visual Context: an Application to Visual
+Behavior of Children with Autism Spectrum
+Disorders
+Basilio Noris
+MSc/BSc in Computer Science, Université de Lausanne, 2005
+Dissertation
+Submitted to the School of Engineering
+in partial fulfillment of the requirements for the degree of
+Doctor of Philosophy
+Ecole Polytechnique Fédérale de Lausanne (EPFL)
+t the
+(Swiss Federal Insitute of Technology Lausanne)
+Supervisor:
+Prof. Aude Billard
+Examiners:
+Prof. Thierry Pun
+Prof. Jacqueline Nadel
+Prof. Nouchine Hadjikhani
+President of the jury:"
+e719e1ed86bf2214512d5631e31716effe2e23d2,Learning to Estimate 3D Human Pose and Shape from a Single Color Image,"Learning to Estimate 3D Human Pose and Shape from a Single Color Image
+Georgios Pavlakos1, Luyang Zhu2, Xiaowei Zhou3, Kostas Daniilidis1
+University of Pennsylvania 2 Peking University 3 Zhejiang University"
+e7b6887cd06d0c1aa4902335f7893d7640aef823,Modelling of Facial Aging and Kinship: A Survey,"Modelling of Facial Aging and Kinship: A Survey
+Markos Georgopoulos, Yannis Panagakis, and Maja Pantic,"
+e746447afc4898713a0bcf2bb560286eb4d20019,Leveraging Virtual and Real Person for Unsupervised Person Re-identification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, NOVEMBER 2018
+Leveraging Virtual and Real Person for
+Unsupervised Person Re-identification
+Fengxiang Yang, Zhun Zhong, Zhiming Luo, Sheng Lian, and Shaozi Li"
+e72c5fb54c3d14404ebd1bf993e51d0056f6c429,Tempered Adversarial Networks,
+e72d35ae7c1f477ce4341a5fb3a15bcfe0481a0e,Behavioral Consistency Extraction for Face Verification,"Behavioral Consistency Extraction for Face
+Verification
+Hui Fang and Nicholas Costen
+Manchester Metropolitan University
+Department of Computing and Mathematics,
+Manchester, U.K."
+e7721f40fed05aae4d49d84e9ebc94ced7015aac,Design and Implementation of Resampling Techniques for Face Recognition using Classical LDA Algorithm in MATLAB,"International Journal of Computer Applications (0975 – 8887)
+Volume 152 – No.6, October 2016
+Design and Implementation of Resampling Techniques
+for Face Recognition using Classical LDA Algorithm in
+MATLAB
+S. R Bichwe
+Dept. of Electronics &
+Communication
+Kavikulguru Institute of
+Technology & Science,
+Ramtek, Maharashtra
+Sugandha Satija
+Dept. of Information
+Technology
+Kavikulguru Institute of
+Technology & Science,
+Ramtek, Maharashtra
+Madhavi R. Bichwe
+Dept of Computer Science &
+Technology"
+cb4fc4d49783f2049c48a062169f04eb744443ec,Paying More Attention to Saliency: Image Captioning with Saliency and Context Attention,"Paying More Attention to Saliency: Image Captioning with
+Saliency and Context Attention
+MARCELLA CORNIA, University of Modena and Reggio Emilia
+LORENZO BARALDI, University of Modena and Reggio Emilia
+GIUSEPPE SERRA, University of Udine
+RITA CUCCHIARA, University of Modena and Reggio Emilia
+Image captioning has been recently gaining a lot of attention thanks to the impressive achievements shown by
+deep captioning architectures, which combine Convolutional Neural Networks to extract image representations,
+nd Recurrent Neural Networks to generate the corresponding captions. At the same time, a significant research
+effort has been dedicated to the development of saliency prediction models, which can predict human eye
+fixations. Even though saliency information could be useful to condition an image captioning architecture, by
+providing an indication of what is salient and what is not, research is still struggling to incorporate these two
+techniques. In this work, we propose an image captioning approach in which a generative recurrent neural
+network can focus on different parts of the input image during the generation of the caption, by exploiting
+the conditioning given by a saliency prediction model on which parts of the image are salient and which are
+ontextual. We show, through extensive quantitative and qualitative experiments on large scale datasets, that
+our model achieves superior performances with respect to captioning baselines with and without saliency,
+nd to different state of the art approaches combining saliency and captioning.
+CCS Concepts: • Computing methodologies → Scene understanding; Natural language generation;
+Additional Key Words and Phrases: saliency, visual saliency prediction, image captioning, deep learning."
+cbca355c5467f501d37b919d8b2a17dcb39d3ef9,Super-resolution of Very Low Resolution Faces from Videos,"CANSIZOGLU, JONES: SUPER-RESOLUTION OF VERY LR FACES FROM VIDEOS
+Super-resolution of Very Low-Resolution
+Faces from Videos
+Esra Ataer-Cansizoglu
+Michael Jones
+Mitsubishi Electric Research Labs
+(MERL)
+Cambridge, MA, USA"
+cb3d38cd18c99aca9c2a228aeb4998f394c7b1b3,Impairments in facial affect recognition associated with autism spectrum disorders: a meta-analysis.,"# Cambridge University Press 2014
+doi:10.1017/S0954579414000479
+Impairments in facial affect recognition associated with autism
+spectrum disorders: A meta-analysis
+LEAH M. LOZIER, JOHN W. VANMETER, AND ABIGAIL A. MARSH
+Georgetown University"
+cba90ec61155a233fee33b529401e65d9481213a,Houdini: Fooling Deep Structured Prediction Models,"Houdini: Fooling Deep Structured Prediction Models
+Moustapha Cisse
+Facebook AI Research
+Natalia Neverova*
+Facebook AI Research"
+cb4418b5bddaaceb92caea9e72c8cc528ce4e3cc,Generative Semantic Manipulation with Contrasting GAN,"Generative Semantic Manipulation with Contrasting
+Xiaodan Liang, Hao Zhang, Eric P. Xing
+Carnegie Mellon University and Petuum Inc.
+{xiaodan1, hao,"
+cb658e9e0823dc7afe66b593307b230cc2747790,Nouveau modèle pour la datation automatique de photographies à partir de caractéristiques visuelles,"Nouveau modèle pour la datation
+utomatique de photographies
+à partir de caractéristiques visuelles1
+Paul MARTIN* — Antoine DOUCET** — Frédéric JURIE*
+* Laboratoire GREYC [UMR 6072], Université de Caen Normandie, FRANCE 14032
+{paul.martin ;
+** Laboratoire L3i, Université de La Rochelle, FRANCE 17042
+RÉSUMÉ. Nous présentons, dans cet article, une méthode de datation de photographies par
+l’usage du contenu visuel de celles-ci. Nous nous sommes inspirés de travaux récents de la
+vision par ordinateur. Nous avons amélioré la méthode de classification utilisée dans ces tra-
+vaux en dépassant une limite intrinsèque de leur approche. En effet, ils considèrent la datation
+d’images comme un problème de classification multi-classes, pour lequel une classe repré-
+sente un ensemble d’années, mais ignorant l’ordre relatif sous-jacent à l’information tempo-
+relle. Dans leur approche soit une prédiction est bonne (période valide) soit elle est mauvaise
+(période invalide) mais aucune différence n’est faite entre se tromper d’une décennie ou de
+plusieurs. Nos travaux, s’appuient sur des avancées récentes en classification ordinale. Nous
+onsidérons les dates comme des attributs à la fois ordonnés et relatifs et nous proposons un
+adre spécifique pour les manipuler."
+cb1214e42fa81977bc21f4b3c8e194a9b68278f5,Visually Aligned Word Embeddings for Improving Zero-shot Learning,"Qiao et al.: Visually Aligned Word Embeddings. Appearing in Proc. British Mach. Vis. Conf. 2017
+Visually Aligned Word Embeddings for Improving
+Zero-shot Learning
+School of Computer Science, University of
+Adelaide, Australia
+Ruizhi Qiao
+Lingqiao Liu
+Chunhua Shen
+Anton van den Hengel"
+cb310356d1c5f567b2a8796b708f6e1e10fa1917,Serotonin and the neural processing of facial emotions in adults with autism: an fMRI study using acute tryptophan depletion.,"ORIGINAL ARTICLE
+Serotonin and the Neural Processing
+of Facial Emotions in Adults With Autism
+An fMRI Study Using Acute Tryptophan Depletion
+Eileen M. Daly, BA; Quinton Deeley, PhD; Christine Ecker, MSc, PhD; Michael Craig, PhD; Brian Hallahan, MRCPsych;
+Clodagh Murphy, MRCPsych; Patrick Johnston, PhD; Debbie Spain, MSc; Nicola Gillan, MSc; Michael Brammer, PhD;
+Vincent Giampietro, PhD; Melissa Lamar, PhD; Lisa Page, MRCPsych; Fiona Toal, MRCPsych; Anthony Cleare, PhD;
+Simon Surguladze, MD, PhD; Declan G. M. Murphy, FRCPsych
+Context: People with autism spectrum disorders (ASDs)
+have lifelong deficits in social behavior and differences
+in behavioral as well as neural responses to facial expres-
+sions of emotion. The biological basis to this is incom-
+pletely understood, but it may include differences in the
+role of neurotransmitters such as serotonin, which modu-
+late facial emotion processing in health. While some in-
+dividuals with ASD have significant differences in the sero-
+tonin system, to our knowledge, no one has investigated
+its role during facial emotion processing in adults with
+ASD and control subjects using acute tryptophan deple-
+tion (ATD) and functional magnetic resonance imaging."
+cb8b2db657cd6b6ccac13b56e2ca62b7d88eda68,Log Hyperbolic Cosine Loss Improves Varia-,"Under review as a conference paper at ICLR 2019
+LOG HYPERBOLIC COSINE LOSS IMPROVES VARIA-
+TIONAL AUTO-ENCODER
+Anonymous authors
+Paper under double-blind review"
+cbcf5da9f09b12f53d656446fd43bc6df4b2fa48,Face Recognition using Gray level Co-occurrence Matrix and Snap Shot Method of the Eigen Face,"ISSN: 2277-3754
+ISO 9001:2008 Certified
+International Journal of Engineering and Innovative Technology (IJEIT)
+Volume 2, Issue 6, December 2012
+Face Recognition using Gray level Co-occurrence
+Matrix and Snap Shot Method of the Eigen Face
+Sri Chandrasekharendra Saraswathi Viswa Mahavidyalaya University, Kanchipuram, India
+M. Madhu, R. Amutha
+SSN College of Engineering, Chennai, India"
+cb004e9706f12d1de83b88c209ac948b137caae0,Face Aging Effect Simulation Using Hidden Factor Analysis Joint Sparse Representation,"Face Aging Effect Simulation using Hidden Factor
+Analysis Joint Sparse Representation
+Hongyu Yang, Student Member, IEEE, Di Huang, Member, IEEE, Yunhong Wang, Member, IEEE, Heng Wang,
+nd Yuanyan Tang, Fellow, IEEE"
+cb11a150fc245958799e763069a6ae3080814d40,3d Face Recognition from Range Image,
+cb3ba84146d1324e1cdbde3764ca3b354ee09a2a,"On the Interplay Between Throughput, Fairness and Energy Efficiency on Asymmetric Multicore Processors","On the interplay between throughput,
+fairness and energy ef‌f‌iciency on
+symmetric multicore processors
+J. C. Saez1, A. Pousa2, A. E. de Giusti2, M. Prieto-Matias1
+ArTeCS Group, Facultad de Inform´atica, Complutense University of Madrid
+III-LIDI, Facultad de Inform´atica, National University of La Plata
+Email:
+Asymmetric single-ISA multicore processors (AMPs), which integrate high-
+performance big cores and low-power small cores, were shown to deliver
+higher performance per watt than symmetric multicores. Previous work has
+highlighted that this potential of AMP systems can be realizable by scheduling
+the various applications in a workload on the most appropriate core type. A
+number of scheduling schemes have been proposed to accomplish different goals,
+such as system throughput optimization, enforcing fairness or reducing energy
+onsumption. While the interrelationship between throughput and fairness on
+AMPs has been comprehensively studied, the impact that optimizing energy
+ef‌f‌iciency has on the other two aspects is still unclear. To fill this gap, we carry out
+comprehensive analytical and experimental study that illustrates the interplay
+etween throughput, fairness and energy ef‌f‌iciency on AMPs. Our analytical
+study allowed us to define the energy-ef‌f‌iciency factor (EEF) metric, which aids"
+cb7bbede1c2eae831dd73440f439955c4310837f,Cross-Cultural and Cultural-Specific Production and Perception of Facial Expressions of Emotion in the Wild,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Cross-Cultural and Cultural-Specific Production
+nd Perception of Facial Expressions of Emotion
+in the Wild
+Ramprakash Srinivasan, Aleix M. Martinez"
+cbd20c2199062724eee841016f1575cb7d5309b4,Dropout training for SVMs with data augmentation,"JOURNAL OF LATEX CLASS FILES, VOL. X, NO. X, MAY 2015
+Dropout Training for SVMs with
+Data Augmentation
+Ning Chen and Jun Zhu, Member, IEEE, Jianfei Chen and Ting Chen"
+cb2e10d1a6792354bc0ce24ee99ecf2142d16f9b,Enhancing Real-Time Human Detection Based on Histograms of Oriented Gradients,"Enhancing Real-time Human Detection based
+on Histograms of Oriented Gradients
+Marco Pedersoli1, Jordi Gonz`alez2, Bhaskar Chakraborty1, and Juan J.
+Villanueva1
+Computer Vision Center and Departament d’Inform`atica. Universitat Aut`onoma
+de Barcelona, 08193 Bellaterra, Spain
+Institut de Rob`otica i Inform`atica Industrial(UPC-CSIC), Edifici U Parc
+Tecnol`ogic de Barcelona. 08028, Spain.
+Summary. In this paper we propose a human detection framework based on an
+enhanced version of Histogram of Oriented Gradients (HOG) features. These feature
+descriptors are computed with the help of a precalculated histogram of square-blocks.
+This novel method outperforms the integral of oriented histograms allowing the
+alculation of a single feature four times faster. Using Adaboost for HOG feature
+selection and Support Vector Machine as weak classifier, we build up a real-time
+human classifier with an excellent detection rate.
+Introduction
+Human detection is the task of finding presence and position of human beings
+in images. Many applications take advantage of it, mainly in the videosurvel-
+liance and human-computer iteration domains. Thus, human detection is the
+first step of the full process of Human Sequence Evaluation [5]."
+cbdca5e0f1fd3fd745430497d372a2a30b7bb0c5,Towards Distributed Coevolutionary GANs,"Towards Distributed Coevolutionary GANs
+Abdullah Al-Dujaili, Tom Schmiedlechner, Erik Hemberg and Una-May O’Reilly
+CSAIL, MIT, USA"
+cb30c1370885033bc833bc7ef90a25ee0900c461,FaceOff: Anonymizing Videos in the Operating Rooms,"FaceOff: Anonymizing Videos in the Operating
+Rooms
+Evangello Flouty1, Odysseas Zisimopoulos1, and Danail Stoyanov1,2
+Wellcome / ESPRC Centre for Interventional and Surgical Sciences, London,
+Digital Surgery, London, United Kingdom
+United Kingdom"
+cb6be69c67b0b15ebbda89a126f4dd62a4d32958,Igure Qa : a N a Nnotated F Igure D Ataset for V Isual R Easoning,"Workshop track - ICLR 2018
+FIGUREQA: AN ANNOTATED FIGURE DATASET FOR
+VISUAL REASONING
+Samira Ebrahimi Kahou1∗, Vincent Michalski2∗†, Adam Atkinson1,
+Ákos Kádár3†, Adam Trischler1, Yoshua Bengio3
+Microsoft Research Montréal
+Université de Montréal, MILA
+Tilburg University"
+cb38b4a5e517b4bcb00efbb361f4bdcbcf1dca2c,Learning towards Minimum Hyperspherical Energy,"Learning towards Minimum Hyperspherical Energy
+Weiyang Liu1,*, Rongmei Lin2,*, Zhen Liu1,*, Lixin Liu3,*, Zhiding Yu4, Bo Dai1,5, Le Song1,6
+Georgia Institute of Technology 2Emory University
+South China University of Technology 4NVIDIA 5Google Brain 6Ant Financial"
+cb53c8a85d58ccb2635be5b7ff978ea6e8b78cde,Face Recognition Based on Wavelet Transform and Regional Directional Weighted Local Binary Pattern,"Face Recognition Based on Wavelet Transform
+nd Regional Directional Weighted Local Binary
+Pattern
+Wu Fengxiang
+North China Career Academy of Water Resources, Henan Zhengzhou, China
+Email:
+independent application technology area"
+cb08f679f2cb29c7aa972d66fe9e9996c8dfae00,Action Understanding with Multiple Classes of Actors,"JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+Action Understanding
+with Multiple Classes of Actors
+Chenliang Xu, Member, IEEE, Caiming Xiong, and Jason J. Corso, Senior Member, IEEE"
+cbae3eaf926aede9bec7ce2e28c35c1c50b1b43f,Fast RGB-D people tracking for service robots,"Noname manuscript No.
+(will be inserted by the editor)
+Fast RGB-D People Tracking for Service Robots
+Matteo Munaro · Emanuele Menegatti
+Received: date / Accepted: date"
+cb84229e005645e8623a866d3d7956c197f85e11,Disambiguating Visual Verbs,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. X, NO. X, MONTH 201X
+Disambiguating Visual Verbs
+Spandana Gella, Frank Keller, and Mirella Lapata"
+cb94ea16f12bde2de91d3cf3fac03a20b02611b1,Element-wise Bilinear Interaction for Sentence Matching,"Proceedings of the 7th Joint Conference on Lexical and Computational Semantics (*SEM), pages 107–112
+New Orleans, June 5-6, 2018. c(cid:13)2018 Association for Computational Linguistics"
+cb96c819f20f05ad0d85bba91f86795162f63445,Noisy Ocular Recognition Based on Three Convolutional Neural Networks,"Article
+Noisy Ocular Recognition Based on Three
+Convolutional Neural Networks
+Min Beom Lee, Hyung Gil Hong and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (M.B.L.); (H.G.H.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 18 October 2017; Accepted: 14 December 2017; Published: 17 December 2017"
+cbe859d151466315a050a6925d54a8d3dbad591f,Gaze shifts as dynamical random sampling,"GAZE SHIFTS AS DYNAMICAL RANDOM SAMPLING
+Giuseppe Boccignone
+Mario Ferraro
+Dipartimento di Scienze dell’Informazione
+Universit´a di Milano
+Via Comelico 39/41
+0135 Milano, Italy"
+cb8567f074573a0d66d50e75b5a91df283ccd503,Large Margin Learning in Set-to-Set Similarity Comparison for Person Reidentification,"Large Margin Learning in Set to Set Similarity
+Comparison for Person Re-identification
+Sanping Zhou, Jinjun Wang, Rui Shi, Qiqi Hou, Yihong Gong, Nanning Zheng"
+cb4f0656ce177161667759b46e20aec5488550fa, Learning with single view . . . ,"Washington University in St. Louis
+School of Engineering and Applied Science
+Department of Computer Science and Engineering
+Dissertation Examination Committee:
+Kilian Q. Weinberger, Chair
+John Blitzer
+John Cunningham
+Tao Ju
+Robert Pless
+Bill Smart
+Learning with Single View Co-training and Marginalized Dropout
+Minmin Chen
+A dissertation presented to the Graduate School of Arts and Sciences
+of Washington University in partial fulfillment of the
+requirements for the degree of
+Doctor of Philosophy
+May 2013
+Saint Louis, Missouri"
+cb34481714bc7194ac108a1568d34e120f256405,Audio Visual Scene-Aware Dialog (AVSD) Challenge at DSTC7,"Audio Visual Scene-Aware Dialog (AVSD) Challenge at DSTC7
+Huda Alamri∗†, Vincent Cartillier∗, Raphael Gontijo Lopes∗, Abhishek Das∗, Jue Wang†,
+Irfan Essa∗, Dhruv Batra∗, Devi Parikh∗,
+Anoop Cherian†, Tim K. Marks†, Chiori Hori†
+School of Interactive Computing, Georgia Tech
+Mitsubishi Electric Research Laboratories (MERL), Cambridge, MA, USA"
+f881d2a04de838c8950a279e1ed8c0f9886452af,Multi-Stage Variational Auto-Encoders for Coarse-to-Fine Image Generation,"Multi-Stage Variational Auto-Encoders for
+Coarse-to-Fine Image Generation
+Lei Cai
+Hongyang Gao
+Washington State University
+Washington State University
+Pullman, WA 99164
+Pullman, WA 99164
+Shuiwang Ji
+Washington State University
+Pullman, WA 99164"
+f81f5da2a1e4eb80b465b8dffca4c9e583a8a8a6,"Rapid Object Detection Systems , Utilising Deep Learning and Unmanned Aerial Systems ( Uas ) for Civil Engineering Applications","RAPID OBJECT DETECTION SYSTEMS, UTILISING DEEP LEARNING AND
+UNMANNED AERIAL SYSTEMS (UAS) FOR CIVIL ENGINEERING APPLICATIONS
+UCL Department of Civil, Environmental & Geomatic Engineering, Gower Street, London, WC1E 6BT – (david.griffiths.16,
+David Griffiths*, Jan Boehm
+Commission II, WG II/6
+KEY WORDS: Object detection, Deep Learning, Unmanned Aerial Systems, Railway, Rapid"
+f86c65bc2753ae71826a0dafbf46a75d22fb5b5b,Fearful Faces do Not Lead to Faster Attentional Deployment in Individuals with Elevated Psychopathic Traits,"J Psychopathol Behav Assess (2017) 39:596–604
+DOI 10.1007/s10862-017-9614-x
+Fearful Faces do Not Lead to Faster Attentional Deployment
+in Individuals with Elevated Psychopathic Traits
+Sylco S. Hoppenbrouwers 1 & Jaap Munneke 2,3 & Karen A. Kooiman 4 & Bethany Little 4 &
+Craig S. Neumann 5 & Jan Theeuwes 4
+Published online: 30 June 2017
+# The Author(s) 2017. This article is an open access publication"
+f842b13bd494be1bbc1161dc6df244340b28a47f,An Improved Face Recognition Technique Based on Modular Multi-directional Two-dimensional Principle Component Analysis Approach,"An Improved Face Recognition Technique Based
+on Modular Multi-directional Two-dimensional
+Principle Component Analysis Approach
+Department of Physics and Electronic Engineering, Hanshan Normal University, Chaozhou, 521041, China
+Xiaoqing Dong
+Department of Physics and Electronic Engineering, Hanshan Normal University, Chaozhou, 521041, China
+Email:
+Hongcai Chen
+Email:"
+f86d8385a6170b98e434a121fb7d12facb2c8426,Frank-Wolfe Algorithm for Exemplar Selection,"Frank-Wolfe Algorithm for Exemplar Selection
+Gary Cheng
+UC Berkeley
+Armin Askari
+UC Berkeley
+Laurent El Ghaoui
+Kannan Ramchandran
+UC Berkeley
+UC Berkeley"
+f884a67187929e7dda66091c13867ed0a8a36d01,Weighted-Fusion-Based Representation Classifiers for Hyperspectral Imagery,"Remote Sens. 2015, 7, 14806-14826; doi:10.3390/rs71114806
+OPEN ACCESS
+ISSN 2072-4292
+www.mdpi.com/journal/remotesensing
+Article
+Weighted-Fusion-Based Representation Classifiers for
+Hyperspectral Imagery
+Bing Peng 1, Wei Li 1,*, Xiaoming Xie 1,*, Qian Du 2 and Kui Liu 3
+College of Information Science and Technology, Beijing University of Chemical Technology,
+Beijing 100029, China; E-Mail:
+Department of Electrical and Computer Engineering, Mississippi State University, Starkville,
+MS 39762, USA; E-Mail:
+Intelligent Fusion Technology, Germantown, MD 20876, USA; E-Mail:
+* Authors to whom correspondence should be addressed; E-Mails: (W.L.);
+(X.X.); Tel.: +86-010-6443-3717 (W.L.); +86-010-6441-3467 (X.X.).
+Academic Editors: Magaly Koch and Prasad S. Thenkabail
+Received: 17 June 2015 / Accepted: 30 October 2015 / Published: 6 November 2015"
+f8ea0f76f2044168040fcd0a9e81072c88cde4a4,Nonlinear Feature Extraction using Multilayer Perceptron based Alternating Regression for Classification and Multiple-output Regression Problems,
+f8c94afd478821681a1565d463fc305337b02779,Design and Implementation of Robust Face Recognition System for Uncontrolled Pose and Illumination Changes,"www.semargroup.org,
+www.ijsetr.com
+ISSN 2319-8885
+Vol.03,Issue.25
+September-2014,
+Pages:5079-5085
+Design and Implementation of Robust Face Recognition System for
+Uncontrolled Pose and Illumination Changes
+VIJAYA BHASKAR TALARI
+, VENKATESWARLU PRATTI
+PG Scholar, Dept of ECE, LITAM, JNTUK, Andhrapradesh, India, Email:
+Assistant Professor, Dept of ECE, LITAM, JNTUK, Andhrapradesh, India, Email:"
+f8eedcca6263062b6bab11ead255f719452f1c81,Motion in action : optical flow estimation and action localization in videos. (Le mouvement en action : estimation du flot optique et localisation d'actions dans les vidéos),"Motion in action : optical flow estimation and action
+localization in videos
+Philippe Weinzaepfel
+To cite this version:
+Philippe Weinzaepfel. Motion in action : optical flow estimation and action localization in videos.
+Computer Vision and Pattern Recognition [cs.CV]. Université Grenoble Alpes, 2016. English. <NNT :
+016GREAM013>. <tel-01407258>
+HAL Id: tel-01407258
+https://tel.archives-ouvertes.fr/tel-01407258
+Submitted on 1 Dec 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+f8cfabecbe587c611de2696a37f96e3f77ac8555,NEMGAN: Noise Engineered Mode-matching GAN,"NEMGAN: Noise Engineered Mode-matching GAN
+Deepak Mishra∗, Prathosh AP∗, Aravind J, Prashant Pandey
+& Santanu Chaudhury
+Department of Electrical Engineering
+Indian Institute of Technology Delhi
+New Delhi, India"
+f8106b414d81df11ef2e9c26dd83f812711eec35,Inferring Analogous Attributes: Large-Scale Transfer of Category-Specific Attribute Classifiers,"Inferring Analogous Attributes:
+Large-Scale Transfer of Category-Specific Attribute Classifiers
+Chao-Yeh Chen and Kristen Grauman"
+f827b596b4099b0490ab46a9dd2922db2b708963,Pathologies of Neural Models Make Interpretation Difficult,"Pathologies of Neural Models Make Interpretations Difficult
+Shi Feng1 Eric Wallace1 Alvin Grissom II2 Mohit Iyyer3,4
+Pedro Rodriguez1 Jordan Boyd-Graber1
+University of Maryland 2Ursinus College
+UMass Amherst 4Allen Institute for Artificial Intelligence"
+f879556115284946637992191563849e840789d1,Geometry Guided Adversarial Facial Expression Synthesis,"Geometry Guided Adversarial Facial Expression Synthesis
+Lingxiao Song1,2
+Zhihe Lu1,3 Ran He1,2,3
+Zhenan Sun1,2
+Tieniu Tan1,2,3
+National Laboratory of Pattern Recognition, CASIA
+Center for Research on Intelligent Perception and Computing, CASIA
+Center for Excellence in Brain Science and Intelligence Technology, CAS"
+f8ec92f6d009b588ddfbb47a518dd5e73855547d,Extreme Learning Machine Ensemble Using Bagging for Facial Expression Recognition,"J Inf Process Syst, Vol.10, No.3, pp.443~458, September 2014
+ISSN 1976-913X (Print)
+ISSN 2092-805X (Electronic)
+Extreme Learning Machine Ensemble Using
+Bagging for Facial Expression Recognition
+Deepak Ghimire* and Joonwhoan Lee*"
+f8796b8e8246ce41efb2904c053fe0ea2868e373,A Variational U-Net for Conditional Appearance and Shape Generation,"A Variational U-Net for Conditional Appearance and Shape Generation
+Patrick Esser∗, Ekaterina Sutter∗, Bj¨orn Ommer
+Heidelberg Collaboratory for Image Processing
+IWR, Heidelberg University, Germany"
+f8b26b2ec62cf76f58f95938233bc22ae1902144,UvA-DARE ( Digital Academic Repository ) Visual Tracking : An Experimental Survey Smeulders,"UvA-DARE (Digital Academic Repository)
+Visual Tracking: An Experimental Survey
+Smeulders, A.W.M.; Chu, D.M.; Cucchiara, R.; Calderara, S.; Dehghan, A.; Shah, M.
+Published in:
+IEEE Transactions on Pattern Analysis and Machine Intelligence
+0.1109/TPAMI.2013.230
+Link to publication
+Citation for published version (APA):
+Smeulders, A. W. M., Chu, D. M., Cucchiara, R., Calderara, S., Dehghan, A., & Shah, M. (2014). Visual
+Tracking: An Experimental Survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7),
+442-1468. DOI: 10.1109/TPAMI.2013.230
+General rights
+It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s),
+other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
+Disclaimer/Complaints regulations
+If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating
+your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask
+the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam,
+The Netherlands. You will be contacted as soon as possible.
+Download date: 26 Apr 2018"
+f89e5a8800b318fa03289b5cc67df54b956875b4,Do GANs actually learn the distribution? An empirical study,"Do GANs actually learn the distribution? An empirical study
+Sanjeev Arora
+Yi Zhang
+July 4, 2017"
+f8ed5f2c71e1a647a82677df24e70cc46d2f12a8,Artificial Neural Network Design and Parameter Optimization for Facial Expressions Recognition,"International Journal of Scientific & Engineering Research, Volume 2, Issue 12, December-2011 1
+ISSN 2229-5518
+Artificial Neural Network Design and Parameter
+Optimization for Facial Expressions Recognition
+Ammar A. Alzaydi"
+f8ec2079838520fcb9394574bdd956ac9d3d5832,Visual Dynamics: Stochastic Future Generation via Layered Cross Convolutional Networks,"Visual Dynamics: Stochastic Future Generation
+via Layered Cross Convolutional Networks
+Tianfan Xue*, Jiajun Wu*, Katherine L. Bouman, and William T. Freeman"
+f809f9e5a03817d238718723a7b4ac04abcd3f12,Highly Efficient 8-bit Low Precision Inference,"Under review as a conference paper at ICLR 2019
+HIGHLY EFFICIENT 8-BIT LOW PRECISION INFERENCE
+OF CONVOLUTIONAL NEURAL NETWORKS
+Anonymous authors
+Paper under double-blind review"
+f8f872044be2918de442ba26a30336d80d200c42,Facial Emotion Recognition Techniques : A Survey,"IJSRD - International Journal for Scientific Research & Development| Vol. 3, Issue 03, 2015 | ISSN (online): 2321-0613
+Facial Emotion Recognition Techniques: A Survey
+Namita Rathore1 Rohit Miri2
+,2Department of Computer Science and Engineering
+,2Dr C V Raman Institute of Science and Technology
+defense
+systems,
+surveillance"
+f8a2a6b821a092ac43acd4e7366fe7c1e9285317,Attribute-controlled face photo synthesis from simple line drawing,"ATTRIBUTE-CONTROLLED FACE PHOTO SYNTHESIS FROM SIMPLE LINE DRAWING
+Qi Guo Ce Zhu Zhiqiang Xia Zhengtao Wang Yipeng Liu
+School of Electronic Engineering / Center for Robotics
+University of Electronic Science and Technology of China (UESTC), Chengdu, China"
+f8a5bc2bd26790d474a1f6cc246b2ba0bcde9464,"KDEF-PT: Valence, Emotional Intensity, Familiarity and Attractiveness Ratings of Angry, Neutral, and Happy Faces","ORIGINAL RESEARCH
+published: 19 December 2017
+doi: 10.3389/fpsyg.2017.02181
+KDEF-PT: Valence, Emotional
+Intensity, Familiarity and
+Attractiveness Ratings of Angry,
+Neutral, and Happy Faces
+Margarida V. Garrido* and Marília Prada
+Instituto Universitário de Lisboa (ISCTE-IUL), CIS – IUL, Lisboa, Portugal
+The Karolinska Directed Emotional Faces (KDEF)
+is one of the most widely used
+human facial expressions database. Almost a decade after the original validation study
+(Goeleven et al., 2008), we present subjective rating norms for a sub-set of 210 pictures
+which depict 70 models (half female) each displaying an angry, happy and neutral facial
+expressions. Our main goals were to provide an additional and updated validation
+to this database, using a sample from a different nationality (N = 155 Portuguese
+students, M = 23.73 years old, SD = 7.24) and to extend the number of subjective
+dimensions used to evaluate each image. Specifically, participants reported emotional
+labeling (forced-choice task) and evaluated the emotional intensity and valence of the
+expression, as well as the attractiveness and familiarity of the model (7-points rating"
+f8ddeb23343cde8e2a9fdd87e877f0ce5461b42b,Illumination and Pose Invariant Face Recognition: A Technical Review,"International Journal of Computer Information Systems and Industrial Management Applications (IJCISIM)
+ISSN: 2150-7988 Vol.2 (2010), pp.029-038
+http://www.mirlabs.org/ijcisim
+Illumination and Pose Invariant Face Recognition: A Technical Review
+Kavita. R. Singh
+Department of Computer
+Technology, YCCE, Nagpur(M.S),
+41 110, India
+Mukesh. A. Zaveri
+Computer Engineering
+Department, S.V.National Institute
+of Technology, Surat(Gujarat),
+29507, India
+Mukesh. M. Raghuwanshi
+NYSS College of Engineering and
+Research, Nagpur(M.S), 441 110,
+India"
+f8d68084931f296abfb5a1c4cd971f0b0294eaa4,Unconditional Generative Models,"Published as a conference paper at ICLR 2018
+LATENT CONSTRAINTS:
+LEARNING TO GENERATE CONDITIONALLY FROM
+UNCONDITIONAL GENERATIVE MODELS
+Jesse Engel
+Google Brain
+San Francisco, CA, USA
+Matthew D. Hoffman
+Google Inc.
+San Francisco, CA, USA
+Adam Roberts
+Google Brain
+San Francisco, CA, USA"
+ce54dd2b0c6c75208ac77420233419066dd0117f,Issn 2348-375x Ear Segmentation Using Differential Box Counting Approach,"Geetha et al. UJEAS 2014, 02 (01): Page 77-78
+ISSN 2348-375X
+Unique Journal of Engineering and Advanced Sciences
+Available online: www.ujconline.net
+Research Article
+EAR SEGMENTATION USING DIFFERENTIAL BOX COUNTING APPROACH
+Geetha Prem P1*, Manikandaprabu N2, Dhivya P3, Deepa A4
+PG Scholar, AVS Engineering College, TN, India
+Lecturer, Senthur Polytechnic College, TN, India
+Asso. Prof/ECE, AVS Engineering College, Salem
+ME (Communication Systems), Sona College of Technology, Salem
+Received: 28-12-2013; Revised: 24-01-2014; Accepted: 20-02-2014
+*Corresponding Author: P. Prem Geetha, PG Scholar, AVS Engineering College, TN, India Email:"
+ceac97de889ed2f65af62f61a007651d03b36b6c,Diagnostic Accuracy of Content Based Dermatoscopic Image Retrieval with Deep Classification Features,"Diagnostic Accuracy of Content Based Dermatoscopic Image Retrieval with
+Deep Classification Features
+Tschandl P, Argenziano G, Razmara M, Yap J
+Final version available at https://doi.org/10.1111/bjd.17189
+Citation:
+tschandl cbir2018,
+Author=”Tschandl, P. and Argenziano, G. and Razmara, M. and Yap, J. ”,
+Title=”Diagnostic Accuracy of Content Based Dermatoscopic Image Retrieval with Deep Classification Features”,
+Journal=”Br J Dermatol”,
+Year=”2018”"
+cefd107b19201cd9f403e2f9332c690e81f770b5,A Survey on Databases for Facial Expression Analysis,
+cef2b5ab841568755233994b12cf046c408f881e,Techniques for Statistical Shape Model Building and Fusion,"TECHNIQUES
+STATISTICAL SHAPE MODEL
+BUILDING AND FUSION
+Constantine Butakoff
+(Kostantyn Butakov)"
+ce57cc478421adf85a9058a0cc8fad8ebfd81c52,Multimodal Attribute Extraction,"Multimodal Attribute Extraction
+Robert L. Logan IV
+University of California
+Irvine, CA
+Samuel Humeau
+Diffbot
+Mountain View, CA
+Sameer Singh
+University of California
+Irvine, CA
+Introduction
+Given the large collections of unstructured and semi-structured data available on the web, there is a
+rucial need to enable quick and efficient access to the knowledge content within them. Traditionally,
+the field of information extraction has focused on extracting such knowledge from unstructured text
+documents, such as job postings, scientific papers, news articles, and emails. However, the content
+on the web increasingly contains more varied types of data, including semi-structured web pages,
+tables that do not adhere to any schema, photographs, videos, and audio. Given a query by a user,
+the appropriate information may appear in any of these different modes, and thus there’s a crucial
+need for methods to construct knowledge bases from different types of data, and more importantly,
+Motivated by this goal, we introduce the task of multimodal attribute extraction. Provided contextual"
+ce391bcdb64f7659ddc5a0c2e5c73854c1e8031c,Zur Erlangung Des Grades Des,"FILTERING AND OPTIMIZATION
+STRATEGIES FOR MARKERLESS
+HUMAN MOTION CAPTURE WITH
+SKELETON-BASED SHAPE MODELS.
+DISSERTATION
+ZUR ERLANGUNG DES GRADES DES
+DOKTORS DER INGENIEURWISSENSCHAFTEN (DR.-ING.)
+DER NATURWISSENSCHAFTLICH-TECHNISCHEN FAKULT ¨ATEN
+DER UNIVERSIT ¨AT DES SAARLANDES
+VORGELEGT VON
+JUERGEN GALL
+SAARBR ¨UCKEN"
+ce316d2366ec1b95ee91a98b4f426e6c00cdcdc4,Hierarchical Energy-transfer Features,"Hierarchical Energy-Transfer Features
+Radovan Fusek, Eduard Sojka, Karel Mozdˇreˇn and Milan ˇSurkala
+Technical University of Ostrava, FEECS, Department of Computer Science
+7. listopadu 15, 708 33 Ostrava-Poruba, Czech Republic
+{radovan.fusek, eduard.sojka, karel.mozdren,
+Keywords:
+Object Detection, Recognition, SVM, Image Descriptors, Feature Selection."
+ceb02a8f874c84ece88fcc7be1530a581b1cd1b0,A Novel Geometry-based Algorithm for Robust Grasping in Extreme Clutter Environment,"A Novel Geometry-based Algorithm for Robust Grasping in Extreme Clutter
+Environment
+Olyvia Kundua, Swagat Kumara,∗
+TATA Consultancy Services, Bangalore, India 560066"
+ce85d953086294d989c09ae5c41af795d098d5b2,Bilinear Analysis for Kernel Selection and Nonlinear Feature Extraction,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Bilinear Analysis for Kernel Selection and
+Nonlinear Feature Extraction
+Shu Yang, Shuicheng Yan, Member, IEEE, Chao Zhang, and Xiaoou Tang, Senior Member, IEEE"
+ceb4040acf7f27b4ca55da61651a14e3a1ef26a8,Angry Crowds: Detecting Violent Events in Videos,"Angry Crowds:
+Detecting Violent Events in Videos
+Sadegh Mohammadi1, Alessandro Perina1,2, Hamed Kiani1, Vittorio Murino1,3
+Pattern Analysis and Computer Vision (PAVIS),
+Istituto Italiano di Tecnologia, Genova, Italy
+Microsoft Corp,
+WDG Core Data Science, Redmond
+Dept. of Computer Science,
+University of Verona, Italy
+As supplementary material, we selected a few testing video clips from Vio-
+lence in crowds (VIC) [1] dataset to illustrate the effectiveness of the proposed
+Aggression Force compared to the Interaction Force (SFM) [2] and Optical Flow
+for the task of violent detection in video sequences. The scenarios depicted in
+the attached video are captured under very challenging situations including low
+image quality, cluttered background, densely crowded scenes, camera motion,
+occlusions, large scale/illumination variations.
+The qualitative results in video format can be seen in ”video.avi”, highlight-
+ing two major advantages of Aggression Force compared to Social Force and
+Optical Flowing.
+Firstly, the SFM and Optical Flow are very sensitive to footages captured"
+cee700093d6672df48d169ef194861026fe31e8e,Hashing on Nonlinear Manifolds,"Hashing on Nonlinear Manifolds
+Fumin Shen, Chunhua Shen, Qinfeng Shi, Anton van den Hengel, Zhenmin Tang, Heng Tao Shen
+in the Hamming space. This means that many algorithms
+which are based on such pairwise comparisons can be made
+more efficient, and applied to much larger datasets. Due to the
+flexibility of hash codes, hashing techniques can be applied
+in many ways. one can, for example, efficiently perform
+similarity search by exploring only those data points falling
+into the close-by buckets to the query by the Hamming
+distance, or use the binary representations for other tasks like
+image classification."
+ceedb191328ac4d968853b948a32b5689c2ac2a2,Semisupervised Dimensionality Reduction and Classification Through Virtual Label Regression,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 41, NO. 3, JUNE 2011
+Semisupervised Dimensionality Reduction and
+Classification Through Virtual Label Regression
+Feiping Nie, Dong Xu, Xuelong Li, Senior Member, IEEE, and Shiming Xiang"
+ce0cc5f078c5224b9599caf518d74ae3023be0a6,Review on computer vision techniques in emergency situations,"(will be inserted by the editor)
+Review on Computer Vision Techniques in Emergency Situations
+Laura Lopez-Fuentes · Joost van de Weijer · Manuel Gonz´alez-Hidalgo · Harald
+Skinnemoen · Andrew D. Bagdanov
+Received: date / Accepted: date"
+ce4853f2214ee1f4c47a97ff45d4e53f6ffd5087,Models and Methods for Bayesian Object Matching,"Helsinki University of Technology Laboratory of Computational Engineering Publications
+Teknillisen korkeakoulun Laskennallisen tekniikan laboratorion julkaisuja
+Espoo 2005
+REPORT B52
+MODELS AND METHODS FOR BAYESIAN OBJECT
+MATCHING
+Toni Tamminen
+AB TEKNILLINEN KORKEAKOULU
+TEKNISKA H(cid:214)GSKOLAN
+HELSINKI UNIVERSITY OF TECHNOLOGY
+TECHNISCHE UNIVERSIT˜T HELSINKI
+UNIVERSITE DE TECHNOLOGIE D’HELSINKI"
+ceaa5eb51f761b5f84bd88b58c8f484fcd2a22d6,UC San Diego UC San Diego Electronic Theses and Dissertations Title Interactive learning and prediction algorithms for computer vision applications,"UC San Diego
+UC San Diego Electronic Theses and Dissertations
+Title
+Inhibitions of ascorbate fatty acid derivatives on three rabbit muscle glycolytic enzymes
+Permalink
+https://escholarship.org/uc/item/8x33n1gj
+Author
+Pham, Duyen-Anh
+Publication Date
+011-01-01
+Peer reviewed|Thesis/dissertation
+eScholarship.org
+Powered by the California Digital Library
+University of California"
+cef092bf9beed65e379ab48ef2b43498d4aaea92,Process Monitoring in the Intensive Care Unit: Assessing Patient Mobility Through Activity Analysis with a Non-Invasive Mobility Sensor,"Process Monitoring in the Intensive Care Unit:
+Assessing Patient Mobility Through Activity
+Analysis with a Non-Invasive Mobility Sensor
+Austin Reiter1(B), Andy Ma1, Nishi Rawat2, Christine Shrock2,
+nd Suchi Saria1
+The Johns Hopkins University, Baltimore, MD, USA
+Johns Hopkins Medical Institutions, Baltimore, MD, USA"
+ce12bbb8ce974df4b64f18e478d7fa99b722de03,A Hybrid Data Association Framework for Robust Online Multi-Object Tracking,"A Hybrid Data Association Framework for Robust
+Online Multi-Object Tracking
+Min Yang, Yuwei Wu∗, and Yunde Jia Member, IEEE,"
+ce9a61bcba6decba72f91497085807bface02daf,Eigen-harmonics faces: face recognition under generic lighting,"Eigen-Harmonics Faces: Face Recognition under Generic Lighting
+Laiyun Qing1,2, Shiguang Shan2, Wen Gao1,2
+Graduate School, CAS, Beijing, China, 100080
+ICT-ISVISION Joint R&D Laboratory for Face Recognition, CAS, Beijing, China, 100080
+Emails: {lyqing, sgshan, wgao}jdl.ac.cn"
+cef6cffd7ad15e7fa5632269ef154d32eaf057af,Emotion Detection Through Facial Feature Recognition,"Emotion Detection Through Facial Feature
+Recognition
+James Pao
+through consistent"
+ce3ee08f4d937a6dcb2d6dd0a1ca100920f312e6,Literature Survey On Contactless Palm Vein Recognition,"International Journal of Computer Science Trends and Technology (IJCST) – Volume 3 Issue 5, Sep-Oct 2015
+RESEARCH ARTICLE
+Literature Survey On Contactless Palm Vein Recognition
+Roshni C Rahul [1], Merin Cherian [2], Manu Mohan C M [3]
+Department of Computer Science [1], Department of Science [2], Department of Electronics [3]
+OPEN ACCESS
+Mahatma Gandhi University
+Kerala - India"
+cebfafea92ed51b74a8d27c730efdacd65572c40,Matching 2.5D face scans to 3D models,"JANUARY 2006
+Matching 2.5D Face Scans to 3D Models
+Xiaoguang Lu, Student Member, IEEE, Anil K. Jain, Fellow, IEEE, and
+Dirk Colbry, Student Member, IEEE"
+ce0dbe6b1abecb54dcc98dbe652aa63d190dbc94,Part-Based Models for Finding People and Estimating Their Pose,"Part-based models for finding people and
+estimating their pose
+Deva Ramanan"
+ced4853617ba6af27f5447f9c4de07c3e05e8c3b,Real-Time Joint Semantic Segmentation and Depth Estimation Using Asymmetric Annotations,"Real-Time Joint Semantic Segmentation and Depth Estimation Using
+Asymmetric Annotations
+Vladimir Nekrasov1, Thanuja Dharmasiri2, Andrew Spek2, Tom Drummond2, Chunhua Shen1 and Ian Reid1"
+cea85314294f9731661a419f627cb99331ad9c50,Race recognition using local descriptors,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+ce54e891e956d5b502a834ad131616786897dc91,Face Recognition Using LTP Algorithm,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+Face Recognition Using LTP Algorithm
+Richa Sharma1, Rohit Arora2
+ECE & KUK
+Assistant Professor (ECE)
+Volume 4 Issue 12, December 2015
+Licensed Under Creative Commons Attribution CC BY
+www.ijsr.net
+ Variation in luminance: Third main challenge that
+ppears in face recognition process is the luminance. Due
+to variation in the luminance the representation get varied
+from the original image. The person with same poses
+expression and seen from same viewpoint can be appear
+very different due to variation in lightening."
+ce6d23894f88349443e7c9fe512ca81291bb2e00,VIENA2: A Driving Anticipation Dataset,"VIENA2: A Driving Anticipation Dataset
+Mohammad Sadegh Aliakbarian1,2,4, Fatemeh Sadat Saleh1,4, Mathieu
+Salzmann3, Basura Fernando2, Lars Petersson1,4, and Lars Andersson4
+ANU, 2ACRV, 3CVLab, EPFL, 4Data61-CSIRO"
+ce06015fc0eb2add064ef93c9b97ad063c03aef4,Person Re-identification in Surveillance Videos using Multi-part Color Descriptor,"International Journal of Computer Applications (0975 – 8887)
+Volume 121 – No.16, July 2015
+Person Re-identification in Surveillance Videos
+using Multi-part Color Descriptor
+P.K. Sathish
+S. Balaji
+Computer Science and Engineering Dept.
+Centre for Emerging Technologies, Jain University
+Christ University
+Bengaluru- 560074"
+ce073cb70eec80d87c9e07a4ec2d4162d91e23a6,Positive Definite Matrices: Data Representation and Applications to Computer Vision,"Positive Definite Matrices: Data Representation
+nd Applications to Computer Vision
+Anoop Cherian and Suvrit Sra"
+ce6f459462ea9419ca5adcc549d1d10e616c0213,A Survey on Face Identification Methodologies in Videos,"A Survey on Face Identification Methodologies in
+Videos
+Student, M.Tech CSE ,Department of Computer Science
+& Engineering ,G.H.Raisoni College of Engineering &
+Technology for Women, Nagpur, Maharashtra, India.
+Deepti Yadav"
+ce933821661a0139a329e6c8243e335bfa1022b1,Temporal Modeling Approaches for Large-scale Youtube-8M Video Understanding,"Temporal Modeling Approaches for Large-scale
+Youtube-8M Video Understanding
+Fu Li, Chuang Gan, Xiao Liu, Yunlong Bian, Xiang Long, Yandong Li, Zhichao Li, Jie Zhou, Shilei Wen
+Baidu IDL & Tsinghua University"
+cea50611ba73b5775cc2fe1e9c27990a0bb20cf8,Gabor Feature Based Sparse Representation for Face Recognition with Gabor Occlusion Dictionary,"Gabor Feature based Sparse Representation for
+Face Recognition with Gabor Occlusion
+Dictionary
+Meng Yang, Lei Zhang ⋆
+Biometric Research Center, Dept. of Computing, The Hong Kong Polytechnic
+University, Hong Kong,"
+e0d2a28bdcb1996f9659ce2d5fcdace3d369cff6,Fusion Scheme for Semantic and Instance-level Segmentation,"Fusion Scheme for Semantic and Instance-level Segmentation
+Arthur Daniel Costea ∗, Andra Petrovai ∗ and Sergiu Nedevschi
+Image Processing and Pattern Recognition Research Center
+Technical University of Cluj-Napoca, Romania
+{arthur.costea, andra.petrovai,"
+e000dd1aec1c7b1e9e781ec7ea66f2bde72faa5e,Ear Recognition: A Complete System,"Ear Recognition: A Complete System
+Ayman Abazaa,b and MaryAnn F. Harrisona
+West Virginia High Tech Foundation, 1000 Technology Drive, Fairmont, USA;
+Cairo University, Cairo, Egypt"
+e0e8c7145c9b389dad2f4e1982f2b9c31b766503,Augmenting Crowd-Sourced 3 D Reconstructions using Semantic Detections,"Augmenting Crowd-Sourced 3D Reconstructions using Semantic Detections
+True Price1
+Department of Computer Science, UNC Chapel Hill
+Johannes L. Sch¨onberger2
+Zhen Wei1 Marc Pollefeys2
+Department of Computer Science, ETH Z¨urich
+Jan-Michael Frahm1
+Microsoft"
+e0dedb6fc4d370f4399bf7d67e234dc44deb4333,Supplementary Material: Multi-Task Video Captioning with Video and Entailment Generation,"Supplementary Material: Multi-Task Video Captioning with Video and
+Entailment Generation
+Ramakanth Pasunuru and Mohit Bansal
+UNC Chapel Hill
+{ram,
+Experimental Setup
+.1 Datasets
+.1.1 Video Captioning Datasets
+YouTube2Text or MSVD The Microsoft Re-
+search Video Description Corpus (MSVD) or
+YouTube2Text (Chen and Dolan, 2011) is used
+for our primary video captioning experiments. It
+has 1970 YouTube videos in the wild with many
+diverse captions in multiple languages for each
+video. Caption annotations to these videos are
+ollected using Amazon Mechanical Turk (AMT).
+All our experiments use only English captions. On
+verage, each video has 40 captions, and the over-
+ll dataset has about 80, 000 unique video-caption
+pairs. The average clip duration is roughly 10 sec-"
+e096b11b3988441c0995c13742ad188a80f2b461,DeepProposals: Hunting Objects and Actions by Cascading Deep Convolutional Layers,"Noname manuscript No.
+(will be inserted by the editor)
+DeepProposals: Hunting Objects and Actions by Cascading
+Deep Convolutional Layers
+Amir Ghodrati · Ali Diba · Marco Pedersoli · Tinne Tuytelaars · Luc
+Van Gool
+Received: date / Accepted: date"
+e0aa9ab8f00b2bf0dd1b6ffd5c00e5a15b6a67e1,Robust Visual Tracking via Hierarchical Convolutional Features,"Robust Visual Tracking
+via Hierarchical Convolutional Features
+Chao Ma, Jia-Bin Huang, Xiaokang Yang, and Ming-Hsuan Yang"
+e0eb1d66f244456063409264ed795d9893565011,Inhibited Softmax for Uncertainty Estimation in Neural Networks,"Electronic Preprint
+INHIBITED SOFTMAX FOR UNCERTAINTY ESTIMATION
+IN NEURAL NETWORKS
+Marcin Mo˙zejko, Mateusz Susik & Rafał Karczewski
+Sigmoidal"
+e043d79f4dc41c9decaf637d8ffdd11f8ed59f2b,Distance metric learning for image and webpage comparison. (Apprentissage de distance pour la comparaison d'images et de pages Web),"Distance metric learning for image and webpage
+omparison
+Marc Teva Law
+To cite this version:
+Marc Teva Law. Distance metric learning for image and webpage comparison. Image Processing. Uni-
+versité Pierre et Marie Curie - Paris VI, 2015. English. <NNT : 2015PA066019>. <tel-01135698v2>
+HAL Id: tel-01135698
+https://tel.archives-ouvertes.fr/tel-01135698v2
+Submitted on 18 Mar 2015
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de
+recherche français ou étrangers, des laboratoires"
+e0cac58f3855cd84b9d28f508b2f7711e0d7e44a,3a: a Person Re-identification System via Attribute Augmentation and Aggregation,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+e0181f7596b475f7c7d31fd1eccad8e9b7379180,Facial Expression Recognition for Traumatic Brain Injured Patients,
+e00bdb0b046c4d21517ca808a4233a6fd5f3faee,Efficient Retina-like Resampling from Cartesian Images,"VII Workshop de Vis˜ao Computacional – WVC 2011
+Efficient Retina-like Resampling from Cartesian Images
+Hugo Vieira Neto, Diogo Rosa Kuiaski and Gustavo Benvenutti Borba
+Graduate School of Electrical Engineering and Applied Computer Science
+Federal University of Technology - Paran´a, Brazil"
+e09c7bbf1bef602018928acb395f09448a0366b8,Learning beautiful (and ugly) attributes,"MARCHESOTTI, PERRONNIN: LEARNING BEAUTIFUL (AND UGLY) ATTRIBUTES
+Learning beautiful (and ugly) attributes
+Luca Marchesotti
+Florent Perronnin
+Xerox Research Centre Europe
+Meylan, France"
+e05444e51d292bda871388c22b97400ed4cf73a8,An Overview of Recent Approaches in Person Re-Identification,An Overview of Recent Approaches in Person Re-Identification
+e0939b4518a5ad649ba04194f74f3413c793f28e,Mind-reading machines : automated inference of complex mental states Rana,"Technical Report
+UCAM-CL-TR-636
+ISSN 1476-2986
+Number 636
+Computer Laboratory
+Mind-reading machines:
+utomated inference
+of complex mental states
+Rana Ayman el Kaliouby
+July 2005
+5 JJ Thomson Avenue
+Cambridge CB3 0FD
+United Kingdom
+phone +44 1223 763500
+http://www.cl.cam.ac.uk/"
+e01ac06aa1f0b193a620bf70c5dad91128a1bc90,CAPTAIN: Comprehensive Composition Assistance for Photo Taking,"International Journal on Computer Vision manuscript No.
+(will be inserted by the editor)
+CAPTAIN: Comprehensive Composition Assistance for Photo
+Taking
+Farshid Farhat · Mohammad Mahdi Kamani · James Z. Wang
+Received: date / Accepted: date"
+e0e71b59a34c97d15e5ff148fb9a43b892d45bd5,Facial Expression Emotion Detection for Real-Time Embedded Systems,"Article
+Facial Expression Emotion Detection for Real-Time
+Embedded Systems †
+Saeed Turabzadeh 1, Hongying Meng 1,* ID , Rafiq M. Swash 1 ID , Matus Pleva 2 ID and Jozef Juhar 2 ID
+Department of Electronic and Computer Engineering, Brunel University London, Uxbridge UB8 3PH, UK;
+(S.T.); (R.M.S.)
+Department of Electronics and Multimedia Telecommunications, Technical University of Kosice, Letna 9,
+04001 Kosice, Slovakia; (M.P.); (J.J.)
+* Correspondence: Tel.: +44-1895-265496
+This paper is an extended version of our paper in Proceedings of Innovative Computing Technology
+(INTECH 2017), Luton, UK, 16–18 August 2017; with permission from IEEE.
+Received: 15 December 2017; Accepted: 22 January 2018; Published: 26 January 2018"
+e0ed0e2d189ff73701ec72e167d44df4eb6e864d,Recognition of static and dynamic facial expressions: a study review,"Recognition of static and dynamic facial expressions: a study review
+Estudos de Psicologia, 18(1), janeiro-março/2013, 125-130
+Nelson Torro Alves
+Federal University of Paraíba"
+e018c7f468a9b61cd6e7dcbc40b332a8a25808ae,Face Recognition by Face Bunch Graph Method,"Face Recognition by Face Bunch Graph Method
+JIRI STASTNY*, VLADISLAV SKORPIL**
+* Department of Automation and Computer Science,
+** Department of Telekommunications,
+Brno University of Technology,
+Purkynova 118, 612 00 Brno,
+CZECH REPUBLIC,"
+e065a2cb4534492ccf46d0afc81b9ad8b420c5ec,SFace: An Efficient Network for Face Detection in Large Scale Variations,"SFace: An Ef‌f‌icient Network for Face Detection
+in Large Scale Variations
+Jianfeng Wang12∗, Ye Yuan 1†, Boxun Li†, Gang Yu† and Sun Jian†
+College of Software, Beihang University∗
+Megvii Inc. (Face++)†"
+e013c650c7c6b480a1b692bedb663947cd9d260f,Robust Image Analysis With Sparse Representation on Quantized Visual Features,"Robust Image Analysis With Sparse Representation
+on Quantized Visual Features
+Bing-Kun Bao, Guangyu Zhu, Jialie Shen, and Shuicheng Yan, Senior Member, IEEE"
+e01058388d139e027482a7d89a2997606f7ef4fd,Global-residual and Local-boundary Refinement Networks for Rectifying Scene Parsing Predictions,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+Input (b) FCN Based Model (c) GRN (d) Input (f) LRN (e) FCN Based Model Figure1:ResultofFCNbasedmodel(b)hasinconsistentlabelsinwall,curtainandbedsidetable,whichcanberefinedbytheproposedGRN(c).ResultofFCNbasedmodel(e)hasimpreciseanddiscon-tinuousobjectboundariesofcabinet,tableandchairs,whichcanberefinedbytheproposedLRN(f).stepinmanypracticalframeworks.Forexample,inobjectdetection,bounding-boxrefinement[GidarisandKomodakis,2015]iswidelyusedin[Heetal.,2016][Belletal.,2016][Shrivastavaetal.,2016],bringingsignificantimprovementofbounding-boxlocalizationandscoring.Inspiredbyitssuccess,wedesigntwonewrefinementnetworksparticularlyforrectifyingtheparsingpredictions,frombothglobalandlocalviewsrespectively.Eachofthetwonetworkscanbeemployedaftertheexistingparsingframeworksindividually.Moreover,cascadingthemtogetherforrefinementcangainmorepreciseparsingresults.Firstly,weconsiderperformingrefinementfromtheglobalview.Inconsistentparsingresultsareverycommoninpre-dictionsofexistingsceneparsingframeworks,asshowninFigure1(b).Toaddressthisproblem,wedesigntheGlobal-residualRefinementNetwork(GRN)throughexploit-ingglobalcontextualinformationandspatiallayoutrelation-shipsduringrefining.ThisnetworktakestheoriginalimagesandtheKconfidencemaps(i.e.,theoutputofthelastlayerbeforeSoftMaxlayer,eachforoneoftheKsemanticclasses)asinput.Thenoutputstheglobalparsingresidual,whichwillbeaddedtotheinputconfidencemapstoobtaintheglobalrectifyingresults.Thisnetworkeffectivelycapturesglobalcontextualinformationbyiterativelyusingadeepneuralnet-workwithlargereceptivefields.AfterglobalrefinementbyGRN,somemislabelingcanbecorrectedandsomeinconsis-"
+e00526ff149bd61f6811ba2f2145ed22d9306319,Personal Space Regulation in Childhood Autism Spectrum Disorders,"Personal Space Regulation in Childhood Autism
+Spectrum Disorders
+Erica Gessaroli1,2, Erica Santelli3, Giuseppe di Pellegrino1,4*, Francesca Frassinetti1,2*
+Department of Psychology, University of Bologna, Bologna, Italy, 2 Fondazione Salvatore Maugeri, Clinica del Lavoro e della Riabilitazione, Istituto di Ricovero
+e Cura a Carattere Scientifico, Mantova, Castel Goffredo, Italy, 3 Centro Autismo, Reggio Emilia, Italy, 4 Center for Studies and Research in Cognitive
+Neuroscience, Cesena, Italy"
+e0739088d578b2abf583e30953ffa000620cca98,Efficient Pedestrian Detection in Urban Traffic Scenes,"Efficient Pedestrian Detection in Urban Traffic Scenes
+Dissertation
+Erlangung des Doktorgrades (Dr. rer. nat.)
+Mathematisch-Naturwissenschaftlichen Fakult¨at
+Rheinischen Friedrich-Wilhelms-Universit¨at Bonn
+vorgelegt von
+Shanshan Zhang
+Jiangxi, V.R. China
+Bonn, 2014"
+e0082ae9e466f7c855fb2c2300215ced08f61432,Generative Temporal Models with Spatial Memory for Partially Observed Environments,"Generative Temporal Models with Spatial Memory
+for Partially Observed Environments
+Marco Fraccaro 1 * Danilo Jimenez Rezende 2 Yori Zwols 2 Alexander Pritzel 2 S. M. Ali Eslami 2 Fabio Viola 2"
+e076f818b090e42036821c69727cfa3b7da49373,Social Groups Detection in Crowd through Shape-Augmented Structured Learning,"Social Groups Detection in Crowd Through
+Shape-Augmented Structured Learning
+Francesco Solera and Simone Calderara
+DIEF University of Modena and Reggio Emilia, Italy"
+e0515dc0157a89de48e1120662afdd7fe606b544,Perception Science in the Age of Deep Neural Networks,"SPECIALTY GRAND CHALLENGE
+published: 02 February 2017
+doi: 10.3389/fpsyg.2017.00142
+Perception Science in the Age of
+Deep Neural Networks
+Rufin VanRullen 1, 2*
+Centre National de la Recherche Scientifique, UMR 5549, Faculté de Médecine Purpan, Toulouse, France, 2 Université de
+Toulouse, Centre de Recherche Cerveau et Cognition, Université Paul Sabatier, Toulouse, France
+Keywords: perception, neuroscience, psychology, neural networks, deep learning, artificial intelligence
+For decades, perception was considered a unique ability of biological systems, little understood in
+its inner workings, and virtually impossible to match in artificial systems. But this status quo was
+upturned in recent years, with dramatic improvements in computer models of perception brought
+bout by “deep learning” approaches. What does all the ruckus about a “new dawn of artificial
+intelligence” imply for the neuroscientific and psychological study of perception? Is it a threat, an
+opportunity, or maybe a little of both?
+WHILE WE WERE SLEEPING...
+My personal journey in the field of perception science started about 20 years ago. For as long as
+I can remember, we perception scientists have exploited in our papers and grant proposals the
+lack of human-level artificial perception systems, both as a justification for scientific inquiry, and
+s a convenient excuse for using a cautious, methodical approach—i.e., “baby steps.” Visual object"
+e0e511a5d58a8d090ad169be4fcfdbeaef097a70,Leveraging Cognitive Computing for Gender and Emotion Detection,"Leveraging Cognitive Computing for Gender and
+Emotion Detection
+Andrea Corriga1, Simone Cusimano1, Francesca M. Malloci1, Lodovica
+Marchesi1 and Diego Reforgiato Recupero1
+Department of Mathematics and Computer Science,
+University of Cagliari, Via Ospedale 72, 09124, Cagliari"
+4640dfc0bfe7923c08d0c762a9c33b52b9029409,Head Movement and Facial Expression Transfer from 2D Video to a 3D Model,"Head Movement and Facial Expression Transfer
+from 2D Video to a 3D Model
+Mairead Grogan
+A dissertation submitted to the University of Dublin, Trinity College,
+in partial fulfilment of the requirements for the degree of
+Master of Science in Computer Science (Interactive Entertainment Technology)
+University of Dublin, Trinity College"
+46a4551a6d53a3cd10474ef3945f546f45ef76ee,Robust and continuous estimation of driver gaze zone by dynamic analysis of multiple face videos,"014 IEEE Intelligent Vehicles Symposium (IV)
+June 8-11, 2014. Dearborn, Michigan, USA
+978-1-4799-3637-3/14/$31.00 ©2014 IEEE"
+4686bdcee01520ed6a769943f112b2471e436208,Fast search based on generalized similarity measure,"Utsumi et al. IPSJ Transactions on Computer Vision and
+Applications (2017) 9:11
+DOI 10.1186/s41074-017-0024-5
+IPSJ Transactions on Computer
+Vision and Applications
+EXPRESS PAPER
+Open Access
+Fast search based on generalized
+similarity measure
+Yuzuko Utsumi*†, Tomoya Mizuno†, Masakazu Iwamura and Koichi Kise"
+4688787d064e59023a304f7c9af950d192ddd33e,Investigating the Discriminative Power of Keystroke Sound,"Investigating the Discriminative Power of Keystroke
+Sound
+Joseph Roth Student Member, IEEE,, Xiaoming Liu, Member, IEEE, Arun Ross, Senior Member, IEEE,
+nd Dimitris Metaxas, Member, IEEE"
+46d0a519da10160a20a3070cc53e5b9401066526,Incremental Learning of Random Forests for Large-Scale Image Classification,"Incremental Learning of Random Forests for
+Large-Scale Image Classification
+Marko Ristin, Matthieu Guillaumin, Juergen Gall, Member, IEEE and Luc Van Gool, Member, IEEE"
+46f2611dc4a9302e0ac00a79456fa162461a8c80,Spatio-Temporal Channel Correlation Networks for Action Classification,"for Action Classification
+Ali Diba1,4,(cid:63), Mohsen Fayyaz3,(cid:63), Vivek Sharma2, M.Mahdi Arzani4, Rahman
+Yousefzadeh4, Juergen Gall3, Luc Van Gool1,4
+ESAT-PSI, KU Leuven, 2CV:HCI, KIT, Karlsruhe, 3University of Bonn, 4Sensifai"
+46c52f92e10fd2f2dddda162ad7995a1658e1245,Finding Socio-Textual Associations Among Locations,"Series ISSN: 2367-2005
+0.5441/002/edbt.2017.12"
+46a01565e6afe7c074affb752e7069ee3bf2e4ef,Local Descriptors Encoded by Fisher Vectors for Person Re-identification,"Local Descriptors Encoded by Fisher Vectors for Person
+Re-identification
+Bingpeng Ma, Yu Su, Fr´ed´eric Jurie
+To cite this version:
+Bingpeng Ma, Yu Su, Fr´ed´eric Jurie. Local Descriptors Encoded by Fisher Vectors for Person
+Re-identification. 12th European Conference on Computer Vision (ECCV) Workshops, 2012,
+Italy. pp.413-422, 2012, <10.1007/978-3-642-33863-2 41>. <hal-00806066>
+HAL Id: hal-00806066
+https://hal.archives-ouvertes.fr/hal-00806066
+Submitted on 29 Mar 2013
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+46994b489f7c673d031f6ef644e84ebe5d843d93,A learning-based visual saliency prediction model for stereoscopic 3D video (LBVS-3D),"A Learning-Based Visual Saliency Prediction
+Model for Stereoscopic 3D Video (LBVS-3D)
+Amin Banitalebi-Dehkordi, Mahsa T. Pourazad, and Panos Nasiopoulos"
+46386d4aa6a2b96106ab1d18658103622b24f9d8,Google Street View images support the development of vision-based driver assistance systems,"Google Street View Images Support the Development of
+Vision-Based Driver Assistance Systems
+Jan Salmen∗, Sebastian Houben∗, and Marc Schlipsing∗"
+462e4d0b35bf571bfc35dcd8e9bd589dca07a464,"Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation","Inverted Residuals and Linear Bottlenecks: Mobile Networks for
+Classification, Detection and Segmentation
+Mark Sandler Andrew Howard Menglong Zhu Andrey Zhmoginov Liang-Chieh Chen
+{sandler, howarda, menglong, azhmogin,
+Google Inc."
+46282f10271875647219b641dac2cc01c7dc8ab2,Psychopathic traits are associated with reduced fixations to the eye region of fearful faces.,"018, Vol. 127, No. 1, 43–50
+0021-843X/18/$12.00
+© 2018 American Psychological Association
+http://dx.doi.org/10.1037/abn0000322
+Psychopathic Traits Are Associated With Reduced Fixations to the Eye
+Region of Fearful Faces
+Monika Dargis, Richard C. Wolf, and Michael Koenigs
+University of Wisconsin–Madison
+Impairments in processing fearful faces have been documented in both children and adults with
+psychopathic traits, suggesting a potential mechanism by which psychopathic individuals develop callous
+nd manipulative interpersonal and affective traits. Recently, research has demonstrated that psycho-
+pathic traits are associated with reduced fixations to the eye regions of faces in samples of children and
+ommunity-dwelling adults, however this relationship has not yet been established in an offender sample
+with high levels of psychopathy. In the current study, we employed eye-tracking with paradigms
+involving the identification and passive viewing of facial expressions of emotion, respectively, in a
+sample of adult male criminal offenders (n ⫽ 108) to elucidate the relationship between visual processing
+of fearful facial expressions and interpersonal and affective psychopathic traits. We found that the
+interpersonal-affective traits of psychopathy were significantly related to fewer fixations to the eyes of
+fear faces during the emotion recognition task. This association was driven particularly by the interper-
+sonal psychopathic traits (e.g., egocentricity, deceitfulness), whereas fear recognition accuracy was"
+4669b079c3ca15aba08130c36ead597014f7341a,GrabCut-Based Human Segmentation in Video Sequences,"Sensors 2012, 12, 15376-15393; doi:10.3390/s121115376
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+GrabCut-Based Human Segmentation in Video Sequences
+Antonio Hern´andez-Vela 1,2,⋆, Miguel Reyes 1,2, V´ıctor Ponce 1,2 and Sergio Escalera 1,2
+Departamento MAIA, Universitat de Barcelona, Gran Via 585, 08007 Barcelona, Spain;
+E-Mails: (M.R.); (V.P.); (S.E.)
+Centre de Visi´o per Computador, Campus UAB, Edifici O, 08193 Bellaterra, Barcelona, Spain
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +34-93-402-1897; Fax: +34-93-402-1601.
+Received: 4 September 2012; in revised form: 1 November 2012 / Accepted: 6 November 2012 /
+Published: 9 November 2012"
+463bfb0b55c085cda77c2c6e1583abb64baa5d0a,Learning Arbitrary Potentials in CRFs with Gradient Descent,"Learning Arbitrary Potentials in CRFs with Gradient Descent
+M˚ans Larsson1
+Fredrik Kahl1,2
+Chalmers Univ. of Technology 2Lund Univ.
+Shuai Zheng3 Anurag Arnab3
+Oxford Univ.
+Philip Torr3 Richard Hartley4
+Australian National Univ."
+46f5bb35ea99c62320199b1f0924a4e7c0b001d3,Perspective-Aware CNN For Crowd Counting,"Perspective-Aware CNN For Crowd Counting
+Miaojing Shi, Zhaohui Yang, Chao Xu, Member, IEEE, and Qijun Chen, Senior Member, IEEE"
+465b75fa4b84948e19d8bf2ebf4fe4459c3c87ae,A deformation model to reduce the effect of expressions in 3D face recognition,"Vis Comput (2011) 27: 333–345
+DOI 10.1007/s00371-010-0530-2
+O R I G I NA L A RT I C L E
+A deformation model to reduce the effect of expressions in 3D face
+recognition
+Yueming Wang · Gang Pan · Jianzhuang Liu
+Published online: 5 November 2010
+© Springer-Verlag 2010"
+466a5add15bb5f91e0cfd29a55f5fb159a7980e5,Video Repeat Recognition and Mining by Visual Features,"Video Repeat Recognition and Mining by Visual
+Features
+Xianfeng Yang1and Qi Tian"
+46b031a3e368f25dd1e42f70f21165fef7b16de2,"Faces in the mirror, from the neuroscience of mimicry to the emergence of mentalizing.","doi 10.4436/jass.94037
+Vol. 94 (2016), pp. 113-126
+Faces in the mirror, from the neuroscience of mimicry
+to the emergence of mentalizing
+Antonella Tramacere & Pier Francesco Ferrari
+University of Parma, Dep. of Neuroscience, via Volturno 39, 43100, Parma, Italy
+e-mail:
+Summary - In the current opinion paper, we provide a comparative perspective on specific aspects
+of primate empathic abilities, with particular emphasis on the mirror neuron system associated with
+mouth/face actions and expression. Mouth and faces can be very salient communicative classes of stimuli
+that allow an observer access to the emotional and physiological content of other individuals. We thus
+describe patterns of activations of neural populations related to observation and execution of specific
+mouth actions and emotional facial expressions in some species of monkeys and in humans. Particular
+ttention is given to dynamics of face-to-face interactions in the early phases of development and to
+the differences in the anatomy of facial muscles among different species of primates. We hypothesize
+that increased complexity in social environments and patterns of social development have promoted
+specializations of facial musculature, behavioral repertoires related to production and recognition of
+facial emotional expression, and their neural correlates. In several primates, mirror circuits involving
+parietal-frontal regions, insular regions, cingulate cortices, and amygdala seem to support automatic
+forms of embodied empathy, which probably contribute to facial mimicry and behavioural synchrony."
+46f3b113838e4680caa5fc8bda6e9ae0d35a038c,Automated Dermoscopy Image Analysis of Pigmented Skin Lesions,"Cancers 2010, 2, 262-273; doi:10.3390/cancers2020262
+OPEN ACCESS
+ancers
+ISSN 2072-6694
+www.mdpi.com/journal/cancers
+Review
+Automated Dermoscopy Image Analysis of Pigmented Skin
+Lesions
+Alfonso Baldi 1,2,*, Marco Quartulli 3, Raffaele Murace 2, Emanuele Dragonetti 2,
+Mario Manganaro 3, Oscar Guerra 3 and Stefano Bizzi 3
+Department of Biochemistry, Section of Pathology, Second University of Naples, Via L. Armanni
+5, 80138 Naples, Italy
+Futura-onlus, Via Pordenone 2, 00182 Rome, Italy; E-Mail:
+ACS, Advanced Computer Systems, Via della Bufalotta 378, 00139 Rome, Italy
+* Author to whom correspondence should be addressed; E-Mail:
+Fax: +390815569693.
+Received: 23 February 2010; in revised form: 15 March 2010 / Accepted: 25 March 2010 /
+Published: 26 March 2010"
+4602bbec65b0c718d5887fdf2381fb7cee77a64d,Explicit Occlusion Modeling for 3D Object Class Representations,"Explicit Occlusion Modeling for 3D Object Class Representations
+M. Zeeshan Zia1, Michael Stark2, and Konrad Schindler1
+Photogrammetry and Remote Sensing, ETH Z¨urich, Switzerland
+Stanford University and Max Planck Institute for Informatics"
+46471a285b1d13530f1885622d4551b48c19fc67,Generating Artificial Data for Private Deep Learning,"Generating Artificial Data for Private Deep Learning
+Ecole Polytechnique Fédérale de Lausanne
+Ecole Polytechnique Fédérale de Lausanne
+Aleksei Triastcyn
+Artificial Intelligence Laboratory
+Lausanne, Switzerland
+Boi Faltings
+Artificial Intelligence Laboratory
+Lausanne, Switzerland"
+46d728356b5090bc28461b30cb21a08c3a690195,"Deep Multi-patch Aggregation Network for Image Style, Aesthetics, and Quality Estimation","Deep Multi-Patch Aggregation Network
+for Image Style, Aesthetics, and Quality Estimation
+Xin Lu(cid:63)
+James Z. Wang(cid:63)
+(cid:63)The Pennsylvania State University, University Park, Pennsylvania
+Zhe Lin† Xiaohui Shen† Radom´ır Mˇech†
+Adobe Research, San Jose, California
+{xinlu, {zlin, xshen,"
+46a553e670027e838716e5a1a39577d7cd7a4893,Face Recognition using TSF Model and DWT based Multilevel Illumination Normalization,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+Face Recognition using TSF Model and DWT based
+Multilevel Illumination Normalization
+Midhun Madhusoodanan1, Jini Cheriyan2
+M.Tech Scholar (Signal Processing), Department of Electronics and Communication, TKM Institute of Technology,
+MusaliarHills, Karuvelil P.O, Ezhukone, Kollam-691505, Kerala, India
+Assistant Professor, Department of Electronics and Communication, TKM Institute of Technology,
+Musaliar Hills, Karuvelil P.O, Ezhukone, Kollam-691505, Kerala, India
+recognition
+is a"
+4684c487758df6b6bf4b69f3fe22e1aad874378a,A Discriminative Voting Scheme for Object Detection using Hough Forests,"VIJAY KUMAR B G, IOANNIS PATRAS:
+A Discriminative Voting Scheme for Object
+Detection using Hough Forests
+Vijay Kumar.B.G
+Dr Ioannis Patras
+Multimedia Vision Research Group
+Queen Mary, UoL
+London, UK"
+46df854f57b6553b4b3238779e46bf2a3a3fffcf,3D Face Recognition using ICP and Geodesic Computation Coupled Approach,"D Face Recognition using ICP and Geodesic
+Computation Coupled Approach
+Karima Ouji‡, Boulbaba Ben Amor§, Mohsen Ardabilian§,
+Faouzi Ghorbel‡, and Liming Chen§
+§LIRIS, Laboratoire d’InfoRmatique en Image et Systmes d’information,
+6, av. Guy de Collongue, 69134 Ecully, France.
+GRIFT, Groupe de Recherche en Images et Formes de Tunisie,
+Ecole Nationale des Sciences de l’Informatique, Tunisie.
+Key words: 3D face recognition, Iterative Closest Point, Geodesics computa-
+tion, biometric evaluation"
+46538b0d841654a0934e4c75ccd659f6c5309b72,A Novel Approach to Generate Face Biometric Template Using Binary Discriminating Analysis,"Signal & Image Processing : An International Journal (SIPIJ) Vol.5, No.1, February 2014
+A NOVEL APPROACH TO GENERATE FACE
+BIOMETRIC TEMPLATE USING BINARY
+DISCRIMINATING ANALYSIS
+Shraddha S. Shinde1 and Prof. Anagha P. Khedkar2
+P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+Associate Professor, Department of Computer Engineering,
+MCERC, Nashik (M.S.), India"
+469ee1b00f7bbfe17c698ccded6f48be398f2a44,SURVEy: Techniques for Aging Problems in Face Recognition,"MIT International Journal of Computer Science and Information Technology, Vol. 4, No. 2, August 2014, pp. 82-88
+ISSN 2230-7621©MIT Publications
+SURVEy: Techniques for
+Aging Problems in Face Recognition
+Aashmi
+Sakshi Sahni
+Sakshi Saxena
+Scholar, Computer Science Engg. Dept.
+Moradabad Institute of Technology
+Scholar, Computer Science Engg. Dept.
+Moradabad Institute of Technology
+Scholar, Computer Science Engg. Dept.
+Moradabad Institute of Technology
+Moradabad, U.P., INDIA
+Moradabad, U.P., INDIA
+Moradabad, U.P., INDIA
+E-mail:
+E-mail:
+E-mail:"
+468aaa87ccdba65f3115bd0864f7772b6706c00e,A Survey on Heterogeneous Face Matching : NIR Images to VIS Images,"International Journal of Computer Applications (0975 – 8887)
+Emerging Trends In Computing 2016
+Heterogeneous Face Matching: NIR images to VIS
+Images
+Sandhya R.Waddhavane
+M.E Student
+Department of Computer Engineering
+KKWIEER, Nashik, India.
+Savitribai Phule Pune University,Pune
+S.M.Kamalapur, PhD
+Associate Professor
+Department of Computer Engineering
+KKWIEER, Nashik, India.
+Savitribai Phule Pune University,Pune"
+46c3e8c2b2042b193659c0a613adc72100a2f301,Vision for Robotics By Danica Kragic and Markus Vincze,"Foundations and Trends R(cid:1) in
+Robotics
+Vol. 1, No. 1 (2010) 1–78
+(cid:1) 2009 D. Kragic and M. Vincze
+DOI: 10.1561/2300000001
+Vision for Robotics
+By Danica Kragic and Markus Vincze
+Contents
+Introduction
+.1 Scope and Outline
+Historical Perspective
+.1 Early Start and Industrial Applications
+.2 Biological Influences and Affordances
+.3 Vision Systems
+What Works
+.1 Object Tracking and Pose Estimation
+.2 Visual Servoing–Arms and Platforms
+.3 Reconstruction, Localization, Navigation, and
+Visual SLAM
+.4 Object Recognition"
+4679f4a7da1cf45323c1c458b30d95dbed9c8896,Recognizing Facial Expressions Using Model-Based Image Interpretation,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+4682fee7dc045aea7177d7f3bfe344aabf153bd5,Tabula rasa: Model transfer for object category detection,"Tabula Rasa: Model Transfer for
+Object Category Detection
+Yusuf Aytar & Andrew Zisserman,
+Department of Engineering Science
+Oxford
+(Presented by Elad Liebman)"
+460845e06ca99f292fa2265beb4e535d20ba16f8,Object Detection for Comics using Manga109 Annotations,"Object Detection for Comics using Manga109
+Annotations
+Toru Ogawa · Atsushi Otsubo · Rei
+Narita · Yusuke Matsui · Toshihiko
+Yamasaki · Kiyoharu Aizawa"
+46106d9f9d9b90401b7984794536e2f45fff1dbe,Learning Distance Functions for Automatic Annotation of Images,"Learning Distance Functions for
+Automatic Annotation of Images
+Josip Krapac and Fr´ed´eric Jurie
+INRIA Rhˆone-Alpes, 655, Avenue de l’Europe, 38334 Saint Ismier Cedex, France"
+463a1ca5f819af35e71ae47ea0e57293691507d3,Soft Biometrics Classification Using Denoising Convolutional Autoencoders and Support Vector Machines,"Soft Biometrics Classification Using Denoising
+Convolutional Autoencoders and Support Vector
+Machines
+Nelson Marcelo Romero Aquino1, Matheus Gutoski2
+Leandro Takeshi Hattori3 and Heitor Silv´erio Lopes4
+Federal University of Technology - Paran´a
+Av. Sete de Setembro, 3165 - Rebou¸cas CEP 80230-901"
+4634bf44a0c994e2bed89686225f8cef601a0224,NLM at ImageCLEF 2018 Visual Question Answering in the Medical Domain,"NLM at ImageCLEF 2018 Visual Question
+Answering in the Medical Domain
+Asma Ben Abacha, Soumya Gayen, Jason J Lau, Sivaramakrishnan
+Rajaraman, and Dina Demner-Fushman
+Lister Hill National Center for Biomedical Communications,
+National Library of Medicine, Bethesda, MD, USA."
+469d249a40639d4ffb62abfb2c25f5aab0812fa4,Image Inspired Poetry Generation in XiaoIce,"Image Inspired Poetry Generation in XiaoIce∗
+Wen-Feng Cheng1,2, Chao-Chung Wu2, Ruihua Song1, Jianlong Fu1, Xing Xie1, Jian-Yun Nie3
+{wencheng, rsong, jianf,
+Microsoft, 2National Taiwan University, 3University of Montreal"
+466212a84d5b60f4517e8ab3e4473c3c9e081897,Thermal-Visible Registration of Human Silhouettes: a Similarity Measure Performance Evaluation,"Thermal-Visible Registration of Human Silhouettes: a
+Similarity Measure Performance Evaluation
+Guillaume-Alexandre Bilodeaua,∗, Atousa Torabib, Pierre-Luc St-Charlesa,
+Dorra Riahia
+LITIV Lab., Department of Computer and Software Engineering,
+´EcolePolytechnique de Montr´eal,
+P.O. Box 6079, Station Centre-ville, Montr´eal
+Qu´ebec, Canada, H3C 3A7
+LISA, Dept. IRO,
+Universit´e de Montr´eal,
+Montr´eal, Qu´ebec, Canada, H2C 3J7"
+2c9179fec33f69a5c1a453034dc7d3d3302839d3,Exploiting Hierarchical Dense Structures on Hypergraphs for Multi-Object Tracking,"Exploiting Hierarchical Dense Structures
+on Hypergraphs for Multi-Object Tracking
+Longyin Wen, Zhen Lei, Siwei Lyu, Stan Z. Li, Fellow, IEEE, and Ming-Hsuan Yang"
+2cdc1b728c90d4da31f924879a39d00008d52daa,A Side of Data with My Robot: Three Datasets for Mobile Manipulation in Human Environments,"A Side of Data with My Robot: Three Datasets for Mobile Manipulation in Human Environments
+Matei Ciocarlie, Member, IEEE, Caroline Pantofaru, Member, IEEE, Kaijen Hsiao, Member, IEEE,
+Gary Bradski, Member, IEEE, Peter Brook, and Ethan Dreyfuss"
+2ce2560cf59db59ce313bbeb004e8ce55c5ce928,Anthropometric 3D Face Recognition,"Int J Comput Vis
+DOI 10.1007/s11263-010-0360-8
+Anthropometric 3D Face Recognition
+Shalini Gupta · Mia K. Markey · Alan C. Bovik
+Received: 3 July 2009 / Accepted: 20 May 2010
+© Springer Science+Business Media, LLC 2010"
+2cc0e431d7cc0bcb926b9a19e7be8a3592d670d4,NovaMedSearch: a multimodal search engine for medical case-based retrieval,"NovaMedSearch: A multimodal search engine for medical
+ase-based retrieval
+André Mourão
+Flávio Martins
+Faculdade de Ciências e Tecnologia
+Universidade Nova de Lisboa
+Departamento de Informática
+Caparica, Portugal"
+2c8743089d9c7df04883405a31b5fbe494f175b4,Real-time full-body human gender recognition in (RGB)-D data,"Washington State Convention Center
+Seattle, Washington, May 26-30, 2015
+978-1-4799-6922-7/15/$31.00 ©2015 IEEE"
+2c5ff99e7e9769677df3eeab9f198e3ead016c35,Registration of 3D facial surfaces using covariance matrix pyramids,"Anchorage Convention District
+May 3-8, 2010, Anchorage, Alaska, USA
+978-1-4244-5040-4/10/$26.00 ©2010 IEEE"
+2c93c8da5dfe5c50119949881f90ac5a0a4f39fe,Advanced local motion patterns for macro and micro facial expression recognition,"Advanced local motion patterns for macro and micro facial
+expression recognition
+B. Allaerta,∗, IM. Bilascoa, C. Djerabaa
+Univ. Lille, CNRS, Centrale Lille, UMR 9189 - CRIStAL -
+Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France"
+2c34bf897bad780e124d5539099405c28f3279ac,Robust Face Recognition via Block Sparse Bayesian Learning,"Robust Face Recognition via Block Sparse Bayesian Learning
+Taiyong Li1,2, Zhilin Zhang3,4,∗
+School of Financial Information Engineering, Southwestern University of Finance and Economics, Chengdu 610074,
+China
+Institute of Chinese Payment System, Southwestern University of Finance and Economics, Chengdu 610074, China
+Department of Electrical and Computer Engineering, University of California at San Diego, La Jolla, CA 92093-0407,
+Samsung R&D Institute America - Dallas, 1301 East Lookout Drive, Richardson, TX 75082, USA"
+2cc4ae2e864321cdab13c90144d4810464b24275,Face Recognition Using Optimized 3D Information from Stereo Images,"Face Recognition Using Optimized 3D
+Information from Stereo Images
+Changhan Park1 and Joonki Paik2
+Advanced Technology R&D Center, Samsung Thales Co., Ltd., 2Graduate School of
+Advanced Imaging Science, Multimedia, and Film Chung-Ang University, Seoul
+Korea
+. Introduction
+Human biometric characteristics are unique, so it can not be easily duplicated [1]. Such
+information
+includes; facial, hands, torso, fingerprints, etc. Potential applications,
+economical efficiency, and user convenience make the face detection and recognition
+technique an important commodity compared to other biometric features [2], [3]. It can also
+use a low-cost personal computer (PC) camera instead of expensive equipments, and require
+minimal user interface. Recently, extensive research using 3D face data has been carried out
+in order to overcome the limits of 2D face detection and feature extraction [2], which
+includes PCA [3], neural networks (NN) [4], support vector machines (SVM) [5], hidden
+markov models (HMM) [6], and linear discriminant analysis (LDA) [7]. Among them, PCA
+nd LDA methods with self-learning method are most widely used [3]. The frontal face
+image database provides fairly high recognition rate. However, if the view data of facial
+rotation, illumination and pose change is not acquired, the correct recognition rate"
+2cac8ab4088e2bdd32dcb276b86459427355085c,A Face-to-Face Neural Conversation Model,"A Face-to-Face Neural Conversation Model
+Hang Chu1
+Daiqing Li1 Sanja Fidler1
+University of Toronto 2Vector Institute
+{chuhang1122, daiqing,"
+2c2786ea6386f2d611fc9dbf209362699b104f83,1)local Feature Representations for Facial Expression Recognition Based on Differences of Gray Color Values of Neighboring Pixels,1)LOCAL FEATURE REPRESENTATIONS FOR FACIAL EXPRESSION RECOGNITION BASED ON DIFFERENCES OF GRAY COLOR VALUES OF NEIGHBORING PIXELS Mohammad Shahidul Islam A Dissertation Submitted in Partial Fulfillment of the Requirement for the Degree of Doctor of Philosophy (Computer Science and Information Systems) School of Applied Statistics National Institute of Development Administration 2013
+2c92839418a64728438c351a42f6dc5ad0c6e686,Pose-Aware Face Recognition in the Wild,"Pose-Aware Face Recognition in the Wild
+Iacopo Masi1
+Prem Natarajan2
+USC Institute for Robotics and Intelligent Systems (IRIS), Los Angeles, CA
+G´erard Medioni1
+Stephen Rawls2
+USC Information Sciences Institute (ISI), Marina Del Rey, CA"
+2c848cc514293414d916c0e5931baf1e8583eabc,An automatic facial expression recognition system evaluated by different classifiers,"An automatic facial expression recognition system
+evaluated by different classifiers
+Caroline Silva∗, Andrews Sobral∗ and Raissa Tavares Vieira†
+Programa de P´os-Graduac¸˜ao em Mecatrˆonica
+Universidade Federal da Bahia,
+Email:
+Email:
+Department of Electrical Engineering - EESC/USP
+Email:"
+2cd03c6e78d09bb98872bb34bb70e08c32dc5f7e,Pedestrian Alignment Network for Large-scale Person Re-identification,"Noname manuscript No.
+(will be inserted by the editor)
+Pedestrian Alignment Network for
+Large-scale Person Re-identification
+Zhedong Zheng · Liang Zheng · Yi Yang
+Received: date / Accepted: date"
+2c883977e4292806739041cf8409b2f6df171aee,Are Haar-Like Rectangular Features for Biometric Recognition Reducible?,"Aalborg Universitet
+Are Haar-like Rectangular Features for Biometric Recognition Reducible?
+Nasrollahi, Kamal; Moeslund, Thomas B.
+Published in:
+Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications
+DOI (link to publication from Publisher):
+0.1007/978-3-642-41827-3_42
+Publication date:
+Document Version
+Early version, also known as pre-print
+Link to publication from Aalborg University
+Citation for published version (APA):
+Nasrollahi, K., & Moeslund, T. B. (2013). Are Haar-like Rectangular Features for Biometric Recognition
+Reducible? In J. Ruiz-Shulcloper, & G. Sanniti di Baja (Eds.), Progress in Pattern Recognition, Image Analysis,
+Computer Vision, and Applications (Vol. 8259, pp. 334-341). Springer Berlin Heidelberg: Springer Publishing
+Company. Lecture Notes in Computer Science, DOI: 10.1007/978-3-642-41827-3_42
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research."
+2cdd9e445e7259117b995516025fcfc02fa7eebb,Temporal Exemplar-Based Bayesian Networks for Facial Expression Recognition,"Title
+Temporal Exemplar-based Bayesian Networks for facial
+expression recognition
+Author(s)
+Shang, L; Chan, KP
+Citation
+Proceedings - 7Th International Conference On Machine
+Learning And Applications, Icmla 2008, 2008, p. 16-22
+Issued Date
+http://hdl.handle.net/10722/61208
+Rights
+This work is licensed under a Creative Commons Attribution-
+NonCommercial-NoDerivatives 4.0 International License.;
+International Conference on Machine Learning and Applications
+Proceedings. Copyright © IEEE.; ©2008 IEEE. Personal use of
+this material is permitted. However, permission to
+reprint/republish this material for advertising or promotional
+purposes or for creating new collective works for resale or
+redistribution to servers or lists, or to reuse any copyrighted
+omponent of this work in other works must be obtained from"
+2c98165dd72bac574ed463b00f1dd4c276808cb4,Efficient Object Pixel-Level Categorization Using Bag of Features,"Ef‌f‌icient Object Pixel-Level Categorization using
+Bag of Features
+David Aldavert1, Arnau Ramisa2, Ricardo Toledo1, and Ramon Lopez de
+Mantaras2
+Computer Vision Center (CVC)
+Dept. Ci`encies de la Computaci´o
+Universitat Aut`onoma de Barcelona (UAB), 08193, Bellaterra, Spain
+Artificial Intelligence Research Institute (IIIA-CSIC)
+Campus de la UAB, 08193, Bellaterra, Spain"
+2c07d9a383e0bb7e1c8ba07084ba8bcf71af2aad,Robust Ear Recognition via Nonnegative Sparse Representation of Gabor Orientation Information,"Hindawi Publishing Corporation
+e Scientific World Journal
+Volume 2014, Article ID 131605, 11 pages
+http://dx.doi.org/10.1155/2014/131605
+Research Article
+Robust Ear Recognition via Nonnegative Sparse Representation
+of Gabor Orientation Information
+Baoqing Zhang, Zhichun Mu, Hui Zeng, and Shuang Luo
+School of Automation and Electrical Engineering, University of Science and Technology Beijing, Beijing 100083, China
+Correspondence should be addressed to Zhichun Mu; muzc
+Received 21 December 2013; Accepted 18 January 2014; Published 24 February 2014
+Academic Editors: S. Kobashi and A. Materka
+Copyright © 2014 Baoqing Zhang et al. This is an open access article distributed under the Creative Commons Attribution License,
+which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
+Orientation information is critical to the accuracy of ear recognition systems. In this paper, a new feature extraction approach is
+investigated for ear recognition by using orientation information of Gabor wavelets. The proposed Gabor orientation feature can
+not only avoid too much redundancy in conventional Gabor feature but also tend to extract more precise orientation information of
+the ear shape contours. Then, Gabor orientation feature based nonnegative sparse representation classification (Gabor orientation
++ NSRC) is proposed for ear recognition. Compared with SRC in which the sparse coding coefficients can be negative, the
+nonnegativity of NSRC conforms to the intuitive notion of combining parts to form a whole and therefore is more consistent"
+2c5d1e0719f3ad7f66e1763685ae536806f0c23b,AENet: Learning Deep Audio Features for Video Analysis,"AENet: Learning Deep Audio Features for Video
+Analysis
+Naoya Takahashi, Member, IEEE, Michael Gygli, Member, IEEE, and Luc Van Gool, Member, IEEE"
+2c8f24f859bbbc4193d4d83645ef467bcf25adc2,Classification in the Presence of Label Noise: A Survey,"Classification in the Presence of
+Label Noise: a Survey
+Benoît Frénay and Michel Verleysen, Member, IEEE"
+2c564f5241b0905baafc3677e7ca15c27fd2c6e7,An Integrated Approach to Contextual Face Detection,"AN INTEGRATED APPROACH TO CONTEXTUAL FACE
+DETECTION.
+Santi Segu´ı1, Michal Drozdzal1,2, Petia Radeva1,2 and Jordi Vitri`a1,2
+Computer Vision Center, Universitat Aut`onoma de Barcelona, Bellaterra, Spain
+Dept. Matem`atica Aplicada i An`alisi, Universitat de Barcelona, Barcelona, Spain
+{ssegui, michal, petia,
+Keywords:
+face detection, object detection."
+2c7932c2096669113328a75d1ad1d1bfb8f86ad0,Multi30K: Multilingual English-German Image Descriptions,"Proceedings of the 5th Workshop on Vision and Language, pages 70–74,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+2c786b32a621a52fc7d00499e4b056f149a4fba7,Face Recognition with Decision Tree-Based Local Binary Patterns,"Face Recognition with Decision Tree-based Local
+Binary Patterns
+Daniel Maturana, Domingo Mery and ´Alvaro Soto
+Department of Computer Science, Pontificia Universidad Cat´olica de Chile"
+2cf7383e238fe37516e2607c4741f79a230834bf,A new Sparse Coding Approach for Human Face and Action Recognition,"A new Sparse Coding Approach for Human Face and Action
+Recognition
+Mohsen Nikpour*
+Department of Electrical and Computer Engineering, Babol Noushirvani University of Technology, Babol, Iran
+Mohammad Reza Karami Molaei
+Department of Electrical and Computer Engineering, Babol Noushirvani University of Technology, Babol, Iran
+Reza Ghaderi
+Department of nuclear Engineering, Shahid Beheshti University of Tehran, Tehran, Iran
+Received: 27/Jul/2016 Revised: 07/Jan/2017 Accepted: 14/Jan/2017"
+2cdd5b50a67e4615cb0892beaac12664ec53b81f,Mirror mirror: crowdsourcing better portraits,"To appear in ACM TOG 33(6).
+Mirror Mirror: Crowdsourcing Better Portraits
+Jun-Yan Zhu1
+Aseem Agarwala2
+Alexei A. Efros1
+Eli Shechtman2
+Jue Wang2
+University of California, Berkeley1 Adobe2
+Figure 1: We collect thousands of portraits by capturing video of a subject while they watch movie clips designed to elicit a range of positive
+emotions. We use crowdsourcing and machine learning to train models that can predict attractiveness scores of different expressions. These
+models can be used to select a subject’s best expressions across a range of emotions, from more serious professional portraits to big smiles."
+2c5b5a5e4b8cd001e535118c2fa90bff95d51648,Combining Facial Dynamics With Appearance for Age Estimation,"Combining Facial Dynamics With Appearance
+for Age Estimation
+Hamdi Dibeklio˘glu, Member, IEEE, Fares Alnajar, Student Member, IEEE,
+Albert Ali Salah, Member, IEEE, and Theo Gevers, Member, IEEE"
+2cdde47c27a8ecd391cbb6b2dea64b73282c7491,Order-aware Convolutional Pooling for Video Based Action Recognition,"ORDER-AWARE CONVOLUTIONAL POOLING FOR VIDEO BASED ACTION RECOGNITION
+Order-aware Convolutional Pooling for Video Based
+Action Recognition
+Peng Wang, Lingqiao Liu, Chunhua Shen, and Heng Tao Shen"
+2cc8371c483f76fff65a5fb1c9cc89e974ce83ea,Ridiculously Fast Shot Boundary Detection with Fully Convolutional Neural Networks,"Ridiculously Fast Shot Boundary Detection with Fully Convolutional Neural
+Networks
+Michael Gygli
+gifs.com
+Zurich, Switzerland"
+2cad358676854505517307314728e8920fe53d77,Mixture of Ridge Regressors for Human Pose Estimation,"#1754
+CVPR 2012 Submission #1754. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+#1754
+Mixture of Ridge Regressors
+for Human Pose Estimation
+Anonymous CVPR submission
+Paper ID 1754"
+2cf5f2091f9c2d9ab97086756c47cd11522a6ef3,MPIIGaze: Real-World Dataset and Deep Appearance-Based Gaze Estimation,"MPIIGaze: Real-World Dataset and Deep
+Appearance-Based Gaze Estimation
+Xucong Zhang, Yusuke Sugano∗, Mario Fritz, Andreas Bulling"
+2c72096bbecd70000f919b1cec3f31a649c94fd5,Neural Network Interpretation via Fine Grained Textual Summarization,"Neural Network Interpretation via Fine-Grained Textual Summarization
+Pei Guo, Connor Anderson, Kolton Pearson, Ryan Farrell
+Brigham Young University"
+2c2bf22e2f0a1817475aefb37e0c4e0404e8d479,Structured Prediction of 3D Human Pose with Deep Neural Networks,"TEKIN ET AL.: STRUCTURED PREDICTION OF 3D HUMAN POSE
+Structured Prediction of 3D Human Pose
+with Deep Neural Networks
+Bugra Tekin∗1
+Isinsu Katircioglu∗1
+Mathieu Salzmann1
+Vincent Lepetit2
+Pascal Fua1
+CVLab
+EPFL,
+Lausanne, Switzerland
+CVARLab
+TU Graz,
+Graz, Austria"
+2c4b96f6c1a520e75eb37c6ee8b844332bc0435c,Automatic Emotion Recognition in Robot-Children Interaction for ASD Treatment,"Automatic Emotion Recognition in Robot-Children Interaction for ASD
+Treatment
+Marco Leo, Marco Del Coco, Pierluigi Carcagn`ı, Cosimo Distante
+ISASI UOS Lecce
+Campus Universitario via Monteroni sn, 73100 Lecce Italy
+Massimo Bernava, Giovanni Pioggia
+ISASI UOS Messina
+Giuseppe Palestra
+Univerisita’ di Bari
+Marine Institute, via Torre Bianca, 98164 Messina Italy
+Via Orabona 4, 70126 Bari, Italy"
+2cc17e1ccb5f1f67f8ce882e683d8c66475330be,Multitarget tracking with the von Mises-Fisher filter and probabilistic data association,"JOURNAL OF ADVANCES IN INFORMATION FUSION
+Multitarget tracking with the von Mises-Fisher filter
+nd probabilistic data association
+Ivan Markovi´c, Mario Bukal, Josip ´Cesi´c and Ivan Petrovi´c"
+2c0a71b5e111d2c7d99c3f23989d317a0d845adc,N-best maximal decoders for part models,"N-best maximal decoders for part models
+Dennis Park Deva Ramanan
+UC Irvine"
+795cea6b95af22238600aa129b1975e83c531858,Sentence Directed Video Object Codetection,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Sentence Directed Video Object Codetection
+Haonan Yu, Student Member, IEEE and Jeffrey Mark Siskind, Senior Member, IEEE"
+7950d67f7104e9bd82d957f0ed80f11982802397,Coupled Action Recognition and Pose Estimation from Multiple Views,"Noname manuscript No.
+(will be inserted by the editor)
+Coupled Action Recognition and Pose Estimation from
+Multiple Views
+Angela Yao (cid:1) Juergen Gall (cid:1) Luc Van Gool
+Received: date / Accepted: date"
+79d3e7321e50be745bef92ba1405b486bd1f133d,Emotion Recognition in Simulated Social Interactions,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TAFFC.2018.2799593, IEEE
+> TAFFC-2017-04-0117.R1 <
+Transactions on Affective Computing
+Emotion Recognition in Simulated Social
+Interactions
+C. Mumenthaler, D. Sander, and A. S. R. Manstead"
+790aa543151312aef3f7102d64ea699a1d15cb29,Confidence-Weighted Local Expression Predictions for Occlusion Handling in Expression Recognition and Action Unit Detection,"Confidence-Weighted Local Expression Predictions for
+Occlusion Handling in Expression Recognition and Action
+Unit detection
+Arnaud Dapogny1
+Kevin Bailly1
+Séverine Dubuisson1
+Sorbonne Universités, UPMC Univ Paris 06, CNRS, ISIR UMR 7222
+place Jussieu 75005 Paris"
+791eb376d4db96376eba3ef804657c5f0ba7229a,SAFE: Secure authentication with Face and Eyes,"SAFE: Secure Authentication with Face and Eyes
+Arman Boehm(cid:91), Dongqu Chen§, Mario Frank(cid:91), Ling Huang†,
+Cynthia Kuo(cid:93), Tihomir Lolic(cid:91), Ivan Martinovic(cid:63), Dawn Song(cid:91)
+(cid:91) University of California, Berkeley; † Intel Labs; (cid:93) Nokia Research; (cid:63) Oxford University; § Yale University"
+796d5d1f6052cd600e183471a2354751883d8d5d,Feature Extraction Techniques Implementation Review and Case Study,"ISSN: 2278 – 909X
+International Journal of Advanced Research in Electronics and Communication Engineering (IJARECE)
+Volume 4, Issue 12, December 2015
+Feature Extraction Techniques
+Implementation Review and Case Study
+Uma Bhati
+Department of Computer Science & Engineering
+JSS Academy of Technical Education
+Noida-201301
+Krishna Nand Chaturvedi
+Department of Computer Science & Engineering
+JSS Academy of Technical Education
+Noida-201301
+utilizing
+recognition"
+7954a1bd6e693da8f2ae69ad01233e937d600e9b,The Lov\'asz-Softmax loss: A tractable surrogate for the optimization of the intersection-over-union measure in neural networks,"Accepted as a conference paper at CVPR 2018
+The Lov´asz-Softmax loss: A tractable surrogate for the optimization of the
+intersection-over-union measure in neural networks
+Maxim Berman Amal Rannen Triki Matthew B. Blaschko
+Dept. ESAT, Center for Processing Speech and Images
+KU Leuven, Belgium"
+792e656d2297d3b00da73c7a606eb6f539311c25,Force from Motion: Decoding Control Force of Activity in a First Person Video,"Force from Motion: Decoding Control Force of
+Activity in a First Person Video
+Hyun Soo Park and Jianbo Shi"
+79f6a8f777a11fd626185ab549079236629431ac,Pradeep RavikumarDiscriminative Object Categorization with External Semantic Knowledge,"Copyright
+Sung Ju Hwang"
+7910d3a86e03f4c41fbbe8029fab115547be151b,Taming Adversarial Domain Transfer with Structural Constraints for Image Enhancement,"Taming Adversarial Domain Transfer
+with Structural Constraints for Image Enhancement
+Elias Vansteenkiste and Patrick Kern
+Brighter.AI
+Torstrasse 177, Berlin
+{elias,
+Figure 1: Our domain transfer techniques applied to the night-to-day, removing rain and removing fog applications"
+79fc892abaf44a84a758268efd4d1b9e6b64ecf5,Leveraging Random Label Memorization for Unsupervised Pre-Training,"Leveraging Random Label Memorization for Unsupervised Pre-Training
+Vinaychandran Pondenkandath * 1 Michele Alberti * 1 Sammer Puran 1 Rolf Ingold 1 Marcus Liwicki 1 2"
+79e39f3d0577b9c5a47b93eb6d75bec04d14c07a,Person tracking and following with 2D laser scanners,"Person Tracking and Following with 2D Laser Scanners
+Angus Leigh1, Joelle Pineau1, Nicolas Olmedo2, and Hong Zhang2"
+794cf037dac115755cd15295d8c5fc1c00242548,The City Infant Faces Database: A validated set of infant facial expressions,"Behav Res (2018) 50:151–159
+DOI 10.3758/s13428-017-0859-9
+The City Infant Faces Database: A validated set of infant
+facial expressions
+Rebecca Webb 1 & Susan Ayers 1 & Ansgar Endress 2
+Published online: 15 February 2017
+# The Author(s) 2017. This article is published with open access at Springerlink.com"
+79b50cd468fcdba8f3c841c9d28d84ff66fd97fd,What do Deep Networks Like to See?,"What do Deep Networks Like to See?
+Sebastian Palacio∗
+Federico Raue Damian Borth Andreas Dengel
+Joachim Folz∗
+German Research Center for Artificial Intelligence (DFKI)
+J¨orn Hees
+TU Kaiserslautern"
+79bd7fd2b40aadea84bced07f813ffc28c88bc85,Low Rank Matrix Recovery with Simultaneous Presence of Outliers and Sparse Corruption,"Low Rank Matrix Recovery with Simultaneous
+Mostafa Rahmani, Student Member, IEEE and George K. Atia, Member, IEEE"
+79c959833ff49f860e20b6654dbf4d6acdee0230,Hide-and-Seek: A Data Augmentation Technique for Weakly-Supervised Localization and Beyond,"Hide-and-Seek: A Data Augmentation Technique
+for Weakly-Supervised Localization and Beyond
+Krishna Kumar Singh, Hao Yu, Aron Sarmasi, Gautam Pradeep, and Yong Jae Lee, Member, IEEE"
+79b669abf65c2ca323098cf3f19fa7bdd837ff31,Efficient tensor based face recognition,"Deakin Research Online
+This is the published version:
+Rana, Santu, Liu, Wanquan, Lazarescu, Mihai and Venkatesh, Svetha 2008, Efficient tensor
+ased face recognition, in ICPR 2008 : Proceedings of the 19th International Conference on
+Pattern Recognition, IEEE, Washington, D. C., pp. 1-4.
+Available from Deakin Research Online:
+http://hdl.handle.net/10536/DRO/DU:30044585
+Reproduced with the kind permissions of the copyright owner.
+Personal use of this material is permitted. However, permission to reprint/republish this
+material for advertising or promotional purposes or for creating new collective works for
+resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+in other works must be obtained from the IEEE.
+Copyright : 2008, IEEE"
+790bce6cbe30ef9bc4431c988d0d747da1c6bb1d,Salient Object Detection Using Window Mask Transferring with Multi-layer Background Contrast,"Salient Object Detection using Window Mask
+Transferring with Multi-layer Background
+Contrast
+Quan Zhou1, Shu Cai1, Shaojun Zhu2, and Baoyu Zheng1
+College of Telecom & Inf Eng, Nanjing Univ of Posts & Telecom, P.R. China
+Dept. of Comput & Inf Sci, University of Pennsylvania Philadelphia, PA, USA"
+79f02a006c77f2d7fece8302bf54d851269a515a,A Study of Deep CNN-Based Classification of Open and Closed Eyes Using a Visible Light Camera Sensor,"Article
+A Study of Deep CNN-Based Classification of Open
+nd Closed Eyes Using a Visible Light Camera Sensor
+Ki Wan Kim, Hyung Gil Hong, Gi Pyo Nam and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (K.W.K.); (H.G.H.); (G.P.N.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 2 June 2017; Accepted: 28 June 2017; Published: 30 June 2017"
+79fc3c10ce0d0f48b25c8cf460048087c97e2e90,Variational Bi-domain Triplet Autoencoder,"Variational learning across domains with triplet
+information
+Rita Kuznetsova1,2, Oleg Bakhteev1,2 and Alexandr Ogaltsov2,3
+Moscow Institute of Physics and Technology
+National Research University Higher School of Economics
+{rita.kuznetsova,
+Antiplagiat Company"
+79dd787b2877cf9ce08762d702589543bda373be,Face detection using SURF cascade,"Face Detection Using SURF Cascade
+Jianguo Li, Tao Wang, Yimin Zhang
+Intel Labs China"
+7917a7549f00306db8775d2d559460fc93dbde5a,DaP 2018 Proceedings of the Workshop on Dialogue and Perception,"DaP 2018
+Proceedings of the Workshop on
+Dialogue and Perception
+Christine Howes, Simon Dobnik and Ellen Breitholtz (eds.)
+Gothenburg, 14–15 June 2018"
+7985ac55e170273dd0ffa6bd756e588bab301d57,Mind's eye: A recurrent visual representation for image caption generation,"Mind’s Eye: A Recurrent Visual Representation for Image Caption Generation
+Xinlei Chen1, C. Lawrence Zitnick2
+Carnegie Mellon University. 2Microsoft Research Redmond.
+A good image description is often said to “paint a picture in your mind’s
+eye.” The creation of a mental image may play a significant role in sentence
+omprehension in humans [3]. In fact, it is often this mental image that is
+remembered long after the exact sentence is forgotten [5, 7]. As an illus-
+trative example, Figure 1 shows how a mental image may vary and increase
+in richness as a description is read. Could computer vision algorithms that
+omprehend and generate image captions take advantage of similar evolving
+visual representations?
+Recently, several papers have explored learning joint feature spaces for
+images and their descriptions [2, 4, 9]. These approaches project image
+features and sentence features into a common space, which may be used
+for image search or for ranking image captions. Various approaches were
+used to learn the projection, including Kernel Canonical Correlation Anal-
+ysis (KCCA) [2], recursive neural networks [9], or deep neural networks
+[4]. While these approaches project both semantics and visual features to
+common embedding, they are not able to perform the inverse projection.
+That is, they cannot generate novel sentences or visual depictions from the"
+79d13b74952449667c769be76dac9065db1acc22,"Fine-grained Recognition: Data, Recognition, and Application a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy","FINE-GRAINED RECOGNITION:
+DATA, RECOGNITION, AND APPLICATION
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Jonathan Krause
+October 2016"
+796e333796024acf662fe76c4761607eaaa98a5d,Nested multi-instance image classification,"Nested multi-instance image classification
+Anonymous Authors"
+794fd0fb684f90704e108677edb40d3ff6a85f8c,"EyeLad: Remote Eye Tracking Image Labeling Tool - Supportive Eye, Eyelid and Pupil Labeling Tool for Remote Eye Tracking Videos","EyeLad:Remote Eye Tracking Image Labeling Tool
+Supportive eye, eyelid and pupil labeling tool for remote eye tracking videos.
+Wolfgang Fuhl1, Thiago Santini1, David Geisler1, Thomas K¨ubler1, and Enkelejda Kasneci1
+{wolfgang.fuhl, thiago.santini, david.geisler, thomas.kuebler,
+Perception Engineering, University of Tbingen, Tbingen, Germany
+Keywords:
+data labeling, image processing, feature tracking, object detection, eye tracking data, remote eye tracking"
+793e896c2f66fb66bfc6c834f2678cf349af4e20,Incorporating Computation Time Measures During Heterogeneous Features Selection in a Boosted Cascade People Detector,"Incorporating Computation Time Measures during
+Heterogeneous Features Selection in a Boosted Cascade
+People Detector
+Alhayat Ali Mekonnen, Frédéric Lerasle, Ariane Herbulot, Cyril Briand
+To cite this version:
+Alhayat Ali Mekonnen, Frédéric Lerasle, Ariane Herbulot, Cyril Briand. Incorporating Computation
+Time Measures during Heterogeneous Features Selection in a Boosted Cascade People Detector. Inter-
+national Journal of Pattern Recognition and Artificial Intelligence, World Scientific Publishing, 2016,
+0 (8), pp.1655022. <10.1142/S0218001416550223>. <hal-01300472>
+HAL Id: hal-01300472
+https://hal.archives-ouvertes.fr/hal-01300472
+Submitted on 11 Apr 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+7960336aed2aa701c147ccfe36d153046f1500bc,Occlusion Reasoning for Multiple Object Visual Tracking,"OCCLUSION REASONING
+FOR MULTIPLE OBJECT VISUAL TRACKING
+ZHENG WU
+Dissertation submitted in partial fulfillment
+of the requirements for the degree of
+Doctor of Philosophy
+BOSTON
+UNIVERSITY"
+79f12f28b060221f3b80ea1b7b16779ef9362ca8,Investigations of face expertise in the social developmental disorders.,"Jason J.S. Barton,
+MD, PhD, FRCPC
+Rebecca L. Hefter, BSc
+Mariya V.
+Cherkasova, BSc
+Dara S. Manoach,
+Address correspondence and
+reprint requests to Dr. Jason
+J.S. Barton, Neuro-
+ophthalmology Section D, VGH
+Eye Care Center, 2550 Willow
+Street, Vancouver, BC Canada
+V5Z 3N9
+Investigations of face expertise in the
+social developmental disorders"
+79e7f1e13e8aafee6558729804cf1284134815b3,Deep Representation Learning for Domain Adaptation of Semantic Image Segmentation,"BENBIHI, GEIST, PRADALIER: DEEP REPRESENTATION LEARNING
+Deep Representation Learning for Domain
+Adaptation of Semantic Image Segmentation
+Assia Benbihi1
+Matthieu Geist2
+Cedric Pradalier1
+UMI 2958 GT-CNRS – GeorgiaTech
+Lorraine
+Metz, France
+Université de Lorraine
+CNRS LIEC UNR 7360,
+Metz, France"
+79335495e54446541a3655d145911beba7c29d7d,The face inversion effect in opponent-stimulus rivalry,"ORIGINAL RESEARCH ARTICLE
+published: 15 May 2014
+doi: 10.3389/fnhum.2014.00295
+The face inversion effect in opponent-stimulus rivalry
+Malte Persike*, Bozana Meinhardt-Injac and Günter Meinhardt
+Research Methods and Statistics, Department of Psychology, Institute of Psychology, Johannes Gutenberg University Mainz, Mainz, Germany
+Edited by:
+Davide Rivolta, University of East
+London, UK
+Reviewed by:
+Guillaume A. Rousselet, University
+of Glasgow, UK
+Timo Stein, Charité
+Universitätsmedizin Berlin, Germany
+*Correspondence:
+Malte Persike, Research Methods
+nd Statistics, Department of
+Psychology, Institute of Psychology,
+Johannes Gutenberg University
+Mainz, Mainz, Rheinland-Pfalz,"
+7918698ffa86cdd6123bc2f1f613be1ab38c0d2f,Learning to Recognize Faces in Realistic Conditions,"Learning to Recognize Faces in Realistic Conditions
+Anonymous Author(s)
+Affiliation
+Address
+email"
+79ade61f677dcadfc2b46444d2e0275d25ca1f06,Nonnegative Tucker decomposition with alpha-divergence,"NONNEGATIVE TUCKER DECOMPOSITION WITH ALPHA-DIVERGENCE
+Yong-Deok Kim §, Andrzej Cichocki †, Seungjin Choi §
+§ Department of Computer Science, POSTECH, Korea
+Brain Science Institute, RIKEN, Japan"
+795bd86fc22ec544e7cd9b3d3c2ccabe72de54ec,Max Margin AND / OR Graph Learning for Efficient Articulated Object,"Noname manuscript No.
+(will be inserted by the editor)
+Max Margin AND/OR Graph Learning for Efficient Articulated Object
+Parsing
+Long (Leo) Zhu · Yuanhao Chen · Chenxi Lin · Alan Yuille
+the date of receipt and acceptance should be inserted later"
+79815f31f42708fd59da345f8fa79f635a070730,Autoregressive Quantile Networks for Generative Modeling,"Autoregressive Quantile Networks for Generative Modeling
+Georg Ostrovski * 1 Will Dabney * 1 R´emi Munos 1"
+2d919473cf43e2522b2366271b778ce6ce7dc75c,Appearance-Based Re-identification of Humans in Low-Resolution Videos Using Means of Covariance Descriptors,"Appearance-based Re-Identification of Humans in Low-Resolution Videos
+using Means of Covariance Descriptors
+Fraunhofer Institute of Optronics, System Technologies and Image Exploitation IOSB
+J¨urgen Metzler
+76131 Karlsruhe, Germany"
+2d8ffa4a27b3e3b792b2d2516bbcb1a47c114846,Multi-view Laplacian Eigenmaps Based on Bag-of-Neighbors For RGBD Human Emotion Recognition,"JOURNAL OF LATEX CLASS FILES
+Multi-view Laplacian Eigenmaps
+Based on Bag-of-Neighbors
+For RGBD Human Emotion Recognition
+Shenglan Liu, Member, IEEE, Shuai Guo, Hong Qiao, Senior Member, IEEE, Yang Wang, Bin Wang,
+Wenbo Luo, Mingming Zhang, Keye Zhang, and Bixuan Du"
+2dfc48168c0de9e6c7135293c95b7d794fcfbbbf,Query-Driven Locally Adaptive Fisher Faces and Expert-Model for Face Recognition,"-4244-1437-7/07/$20.00 ©2007 IEEE
+I - 141
+ICIP 2007"
+2d27e2d8188743c4e3ca30fda5c25e70775f03e8,FollowMe: Person following and gesture recognition with a quadrocopter,"FollowMe: Person Following and
+Gesture Recognition with a Quadrocopter
+Tayyab Naseer*, J¨urgen Sturm†, and Daniel Cremers†
+*Department of Computer Science, University of Freiburg, Germany
+Department of Computer Science, Technical University of Munich, Germany"
+2db0d42192618d0c7419321fac06b887d96dea53,Image Set Classification for Low Resolution Surveillance,"Image Set Classification
+for Low Resolution Surveillance
+Uzair Nadeem, Syed Afaq Ali Shah, Mohammed Bennamoun, Roberto Togneri
+nd Ferdous Sohel"
+2d532fd0636fd49dd893c9dff7fe615f974ec826,Causal Inference in Nonverbal Dyadic Communication with Relevant Interval Selection and Granger Causality,"Causal Inference in Nonverbal Dyadic Communication with Relevant
+Interval Selection and Granger Causality
+Lea M¨uller1, Maha Shadaydeh1∗, Martin Th¨ummel1, Thomas Kessler2, Dana Schneider2 and Joachim
+Denzler1,3
+Computer Vision Group, Friedrich Schiller University of Jena, Ernst-Abbe-Platz 2, 07743 Jena, Germany
+Department of Social Psychology, Friedrich Schiller University of Jena, Humboldtstrasse 26, 07743 Jena, Germany
+Michael Stifel Center, Ernst-Abbe-Platz 2, 07743 Jena, Germany
+Keywords:
+Nonverbal emotional communication, Granger causality, maximally coherent intervals"
+2d54dc50bbc1a0a63b6f1000bc255f88d57a7a63,It's All Fun and Games until Someone Annotates: Video Games with a Purpose for Linguistic Annotation,"Transactions of the Association for Computational Linguistics, 2 (2014) 449–463. Action Editor: Mirella Lapata.
+Submitted 10/2013; Revised 03/2014; Revised 08/2014; Published 10/2014. c(cid:13)2014 Association for Computational Linguistics."
+2d294c58b2afb529b26c49d3c92293431f5f98d0,Maximum Margin Projection Subspace Learning for Visual Data Analysis,"Maximum Margin Projection Subspace Learning
+for Visual Data Analysis
+Symeon Nikitidis, Anastasios Tefas, Member, IEEE, and Ioannis Pitas, Fellow, IEEE"
+2df731a01db3caf45105c40ac266f76fe1871470,Affective issues in adaptive educational environments,"Neapolis University
+HEPHAESTUS Repository
+School of Information Sciences
+http://hephaestus.nup.ac.cy
+Book chapters
+Affective Issues in Adaptive Educational Environments
+Leontidis, Makis
+IGI Global
+http://hdl.handle.net/11728/6301
+Downloaded from HEPHAESTUS Repository, Neapolis University institutional repository"
+2d51b52b3eeae8877d1a76ca564a35b8e5051c9d,AU recognition on 3D faces based on an extended statistical facial feature model,"AU Recognition on 3D Faces Based On An Extended Statistical Facial
+Feature Model
+Xi Zhao, Emmanuel Dellandr´ea, Liming Chen and Dimitris Samaras"
+2da845c75bf9ff02bd27b6e2ceb4732e89b05fad,Linear Support Tensor Machine With LSK Channels: Pedestrian Detection in Thermal Infrared Images,"Linear Support Tensor Machine:
+Pedestrian Detection in Thermal Infrared Images
+Sujoy Kumar Biswas, Student Member, IEEE, Peyman Milanfar, Fellow, IEEE"
+2d690c63b00e68782666ebf86ac0756fad100a18,Multiple-view face hallucination by a novel regression analysis in tensor space,"The International Arab Journal of Information Technology, Vol. 13, No. 6, November 2016
+Multiple-View Face Hallucination by a Novel
+Regression Analysis in Tensor Space
+Faculty of Engineering and Technology, Panyapiwat Institute of Management, Thailand
+Parinya Sanguansat"
+2d6130f043e69849fc0443bb489c5d21f933eddd,Convolutional LSTM Networks for Video-based Person Re-identification,"Noname manuscript No.
+(will be inserted by the editor)
+Deep Recurrent Convolutional Networks for Video-based Person
+Re-identification: An End-to-End Approach
+Lin Wu · Chunhua Shen · Anton van den Hengel"
+2d1f710ba593833cdb0b63880f60146504cf1dc5,Linguistically-driven Framework for Computationally Efficient and Scalable Sign Recognition,"Linguistically-driven Framework for Computationally
+Efficient and Scalable Sign Recognition
+Dimitris Metaxas*, Mark Dilsizian*, Carol Neidle**
+*Rutgers University, **Boston University
+*Rutgers University, CBIM, Department of Computer Science, 617 Bowser Road, Piscataway, NJ 08854
+**Boston University Linguistics, 621 Commonwealth Ave., Boston, MA 02215"
+2dc62458979dfc00ec195258ea8809077c5de442,Robust Painting Recognition and Registration for Mobile Augmented Reality,"JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+Robust Painting Recognition and Registration
+for Mobile Augmented Reality
+Niki Martinel*, Student Member, IEEE, Christian Micheloni, Member, IEEE,
+nd Gian Luca Foresti, Senior Member, IEEE"
+2d120c8c74bc029a14fb0726ef103c873a5090eb,Real-Time Gender Classification by Face,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 7, No. 3, 2016
+Real-Time Gender Classification by Face
+Eman Fares Al Mashagba
+Computer Sciences Department
+Zarqa University
+Zarqa, Jordan"
+2d88e7922d9f046ace0234f9f96f570ee848a5b5,Detection under Privileged Information,"Building Better Detection with Privileged Information
+Z. Berkay Celik
+Department of CSE
+The Pennsylvania State
+University
+Patrick McDaniel
+Department of CSE
+The Pennsylvania State
+University
+Rauf Izmailov
+Applied Communication
+Sciences
+Basking Ridge, NJ, US
+Nicolas Papernot
+Department of CSE
+The Pennsylvania State
+University
+Ananthram Swami
+Army Research
+Laboratory"
+2d0dfa8779aefa1a9a89a1b400188fa9114b4c0a,Functional Map of the World,"Functional Map of the World
+Gordon Christie1
+Neil Fendley1
+The Johns Hopkins University Applied Physics Laboratory
+James Wilson2
+Ryan Mukherjee1
+DigitalGlobe"
+2dbb4b45b6a392268ce45d16fb944a652d434bd2,Maximal Cliques that Satisfy Hard Constraints with Application to Deformable Object Model Learning,"Maximal Cliques that Satisfy Hard Constraints with
+Application to Deformable Object Model Learning
+Xinggang Wang1∗ Xiang Bai1 Xingwei Yang2† Wenyu Liu1 Longin Jan Latecki3
+Dept. of Electronics and Information Engineering, Huazhong Univ. of Science and Technology, China
+Image Analytics Lab, GE Research, One Research Circle, Niskayuna, NY 12309, USA
+Dept. of Computer and Information Sciences, Temple Univ., USA"
+2d3d4883350a48708cdc0c260479110e5eed965a,Leveraging Visual Question Answering for Image-Caption Ranking,"Leveraging Visual Question Answering for
+Image-Caption Ranking
+Xiao Lin Devi Parikh
+Virginia Tech"
+2d12efd5aef4c180ecfaf65184eb7b56e5a40329,3D Object Recognition Based on Image Features: A Survey,"D Object Recognition Based on Image Features: A
+International Journal of Computer and Information Technology (ISSN: 2279 – 0764)
+Volume 03 – Issue 03, May 2014
+Survey
+Dept. of Information Systems, Faculty of Computers and
+Khaled Alhamzi
+Information, Mansoura University
+Mansoura, Egypt
+Kalhamzi {at} yahoo.com
+Mohammed Elmogy
+Dept. of Information Technology, Faculty of Computers and
+Information, Mansoura University
+Mansoura, Egypt
+Dept. of Information Systems, Faculty of Computers and
+Sherif Barakat
+Information, Mansoura University
+Mansoura, Egypt"
+2d1b8f60f2724efd6c9344870fb60e8525157d70,Parallel Multiscale Autoregressive Density Estimation,"Parallel Multiscale Autoregressive Density Estimation
+Scott Reed 1 A¨aron van den Oord 1 Nal Kalchbrenner 1 Sergio G´omez Colmenarejo 1 Ziyu Wang 1
+Yutian Chen 1 Dan Belov 1 Nando de Freitas 1"
+2d05e768c64628c034db858b7154c6cbd580b2d5,FACIAL EXPRESSION RECOGNITION : Machine Learning using C #,"Neda Firoz et al, International Journal of Computer Science and Mobile Computing, Vol.4 Issue.8, August- 2015, pg. 431-446
+Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+IJCSMC, Vol. 4, Issue. 8, August 2015, pg.431 – 446
+RESEARCH ARTICLE
+ISSN 2320–088X
+FACIAL EXPRESSION RECOGNITION:
+Machine Learning using C#
+Author: Neda Firoz
+Advisor: Dr. Prashant Ankur Jain"
+2d95cf1df9701de410792997205c71208bde98d9,Visual-Inertial based autonomous navigation of an Unmanned Aerial Vehicle in GPS-Denied environments,"FACULDADE DE ENGENHARIA DA UNIVERSIDADE DO PORTO
+Visual-Inertial based autonomous
+navigation of an Unmanned Aerial
+Vehicle in GPS-Denied environments
+Francisco de Babo Martins
+EEC0035 - PREPARAÇÃO DA DISSERTAÇÃO
+Mestrado Integrado em Engenharia Electrotécnica e de Computadores
+Supervisor: Luís Teixeira
+February 18, 2015"
+2d42b5915ca18fdc5fa3542bad48981c65f0452b,Generalization and Equilibrium in Generative Adversarial Nets (GANs),"Generalization and Equilibrium in Generative Adversarial Nets
+(GANs)
+Sanjeev Arora∗
+Rong Ge †
+Yingyu Liang‡
+Tengyu Ma§
+Yi Zhang¶"
+2d072cd43de8d17ce3198fae4469c498f97c6277,Random Cascaded-Regression Copse for Robust Facial Landmark Detection,"Random Cascaded-Regression Copse for Robust
+Facial Landmark Detection
+Zhen-Hua Feng, Student Member, IEEE, Patrik Huber, Josef Kittler, Life Member, IEEE, William Christmas,
+nd Xiao-Jun Wu"
+2d71e0464a55ef2f424017ce91a6bcc6fd83f6c3,A Survey on:Image Process using Two-Stage Crawler,"International Journal of Computer Applications (0975 – 8887)
+National Conference on Advancements in Computer & Information Technology (NCACIT-2016)
+A Survey on: Image Process using Two- Stage Crawler
+Nilesh Wani
+Assistant Professor
+SPPU, Pune
+Department of Computer Engg
+Department of Computer Engg
+Department of Computer Engg
+Dipak Bodade
+BE Student
+SPPU, Pune
+Savita Gunjal
+BE Student
+SPPU, Pune
+Varsha Mahadik
+BE Student
+Department of Computer Engg
+SPPU, Pune
+dditional"
+2d84c0d96332bb4fbd8acced98e726aabbf15591,UNIVERSITY OF CALIFORNIA RIVERSIDE Investigating the Role of Saliency for Face Recognition A Dissertation submitted in partial satisfaction of the requirements for the degree of Doctor of Philosophy in Electrical Engineering,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Investigating the Role of Saliency for Face Recognition
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Electrical Engineering
+Ramya Malur Srinivasan
+March 2015
+Dissertation Committee:
+Professor Amit K Roy-Chowdhury, Chairperson
+Professor Ertem Tuncel
+Professor Conrad Rudolph
+Professor Tamar Shinar"
+2d8d089d368f2982748fde93a959cf5944873673,Visually Guided Spatial Relation Extraction from Text,"Proceedings of NAACL-HLT 2018, pages 788–794
+New Orleans, Louisiana, June 1 - 6, 2018. c(cid:13)2018 Association for Computational Linguistics"
+2d8eff4b085b57788e2f4485c81eb80910f94da0,The impact of organizational performance on the emergence of Asian American leaders.,"Journal of Applied Psychology
+The Impact of Organizational Performance on the
+Emergence of Asian American Leaders
+Seval Gündemir, Andrew M. Carton, and Astrid C. Homan
+Online First Publication, September 24, 2018. http://dx.doi.org/10.1037/apl0000347
+CITATION
+Gündemir, S., Carton, A. M., & Homan, A. C. (2018, September 24). The Impact of Organizational
+Performance on the Emergence of Asian American Leaders. Journal of Applied Psychology.
+Advance online publication. http://dx.doi.org/10.1037/apl0000347"
+2df4d05119fe3fbf1f8112b3ad901c33728b498a,Multi-task Learning for Structured Output Prediction,"Facial landmark detection using structured output deep
+neural networks
+Soufiane Belharbi ∗1, Cl´ement Chatelain∗1, Romain H´erault∗1, and S´ebastien
+Adam∗2
+LITIS EA 4108, INSA de Rouen, Saint ´Etienne du Rouvray 76800, France
+LITIS EA 4108, UFR des Sciences, Universit´e de Rouen, France.
+September 24, 2015"
+2d7d8c468bdf123b50ea473fe78a178bfc50724c,Evaluating multi-modal deep learning systems with microworlds,"Research proposal: Evaluating multi-modal deep
+learning systems with micro-worlds
+Alexander Kuhnle
+University of Cambridge (United Kingdom)
+6th November 2016"
+2d9a49666bd72e7ba06579d9411ceb2df5205466,3D Face Mesh Modeling from Range Images for 3D Face Recognition,"-4244-1437-7/07/$20.00 ©2007 IEEE
+IV - 509
+ICIP 2007"
+2d22a60e69ebdb3fde056adcf4f6a08ccdb6106f,Robust Facial Expression Recognition,"IJREAT International Journal of Research in Engineering & Advanced Technology, Volume 2, Issue 2, Apr-May, 2014
+ISSN: 2320 – 8791 (Impact Factor: 1.479)
+www.ijreat.org
+Robust Facial Expression Recognition
+Mr. Mukund Kumar1, Ms. D. Udaya2
+, 2Computer Science and Engineering, Dr. Pauls Engineering College,Villupuram"
+2d6d4899c892346a9bc8902481212d7553f1bda4,Neural Face Editing with Intrinsic Image Disentangling SUPPLEMENTARY MATERIAL,"Neural Face Editing with Intrinsic Image Disentangling
+SUPPLEMENTARY MATERIAL
+Zhixin Shu1 Ersin Yumer2 Sunil Hadap2 Kalyan Sunkavalli2 Eli Shechtman 2 Dimitris Samaras1,3
+Stony Brook University 2Adobe Research 3 CentraleSup´elec, Universit´e Paris-Saclay
+. Implementation: more details
+In this section, we provide more details regarding
+the implementation of the rendering layers fshading and
+fimage-formation as described in the paper.
+.1. Shading Layer
+The shading layer is rendered with a spherical harmonics
+illumination representation [6, 2, 7, 1].
+where
+= c3n2
+z − c5
+= 2c1nxnz
+= c1n2
+x − c1n2
+The forward process is described by equations (3),(4),
+nd (5) in the main paper. We now provide the backward
+process, i.e., the partial derivatives ∂Si"
+41308edf82ae645923efea2d6979d076b975ee25,Convolutional Scale Invariance for Semantic Segmentation,"Convolutional Scale Invariance
+for Semantic Segmentation
+Ivan Kre(cid:20)so, Denis (cid:20)Cau(cid:20)sevi(cid:19)c, Josip Krapac and Sini(cid:20)sa (cid:20)Segvi(cid:19)c
+Faculty of Electrical Engineering and Computing
+University of Zagreb, Croatia"
+4188bd3ef976ea0dec24a2512b44d7673fd4ad26,Nonlinear Non-Negative Component Analysis Algorithms,"Nonlinear Non-Negative Component
+Analysis Algorithms
+Stefanos Zafeiriou, Member, IEEE, and Maria Petrou, Senior Member, IEEE"
+41000c3a3344676513ef4bfcd392d14c7a9a7599,A Novel Approach For Generating Face Template Using Bda,"A NOVEL APPROACH FOR GENERATING FACE
+TEMPLATE USING BDA
+Shraddha S. Shinde1 and Prof. Anagha P. Khedkar2
+P.G. Student, Department of Computer Engineering, MCERC, Nashik (M.S.), India.
+Associate Professor, Department of Computer Engineering, MCERC, Nashik (M.S.),
+India"
+418b468b804379e8a600bca0395e01bffb7e08de,Class-specific kernel linear regression classification for face recognition under low-resolution and illumination variation conditions,"Chou et al. EURASIP Journal on Advances in Signal Processing (2016) 2016:28
+DOI 10.1186/s13634-016-0328-0
+Open Access
+R ES EAR CH
+Class-specific kernel linear regression
+lassification for face recognition under
+low-resolution and illumination variation
+onditions
+Yang-Ting Chou, Shih-Ming Huang and Jar-Ferr Yang*"
+416c647cd9f8c1d77db8676195dff7ae5dfc1fd8,Grammatical Facial Expressions Recognition with Machine Learning,"Grammatical Facial Expressions Recognition with Machine Learning
+Fernando de Almeida Freitas
+Incluir Tecnologia
+Itajub´a, MG, Brazil
+Universidade de S˜ao Paulo
+S˜ao Paulo, SP, Brazil
+Clodoaldo Aparecido de Moraes Lima
+Sarajane Marques Peres
+Felipe Venˆancio Barbosa
+Universidade de S˜ao Paulo
+S˜ao Paulo, SP, Brazil"
+414722ddd809b460d5b397eaf454fbb697cfb881,Dimensionality Reduction and Classification through PCA and LDA,"International Journal of Computer Applications (0975 – 8887)
+Volume 122 – No.17, July 2015
+Dimensionality Reduction and Classification
+through PCA and LDA
+Telgaonkar Archana H.
+PG Student
+Department of CS and IT
+Dr. BAMU, Aurangabad"
+41f6368bc4ec5e334c81a9d16185205b3acecee3,Machine Learning Methods from Group to Crowd Behaviour Analysis,"Machine learning methods from group to crowd
+ehaviour analysis
+Luis Felipe Borja-Borja1, Marcelo Saval-Calvo2, and Jorge Azorin-Lopez2
+Universidad Central del Ecuador,
+Ciudadela Universitaria Av. Am´erica, Quito, Ecuador
+Computer Technology Department, University of Alicante,
+Carretera San Vicente s/n, 03690, San Vicente del Raspeig (Spain)"
+41dd2ca8929bfdae49a4bf85de74df4723ef9c3b,Correction by Projection: Denoising Images with Generative Adversarial Networks,"CORRECTION BY PROJECTION: DENOISING IMAGES
+WITH GENERATIVE ADVERSARIAL NETWORKS
+Subarna Tripathi
+Zachary C. Lipton
+Truong Q. Nguyen
+UC San Diego
+UC San Diego
+UC San Diego"
+4129e1075c7856d8bebbf0655ae00a4843109429,A Tale of Two Losses : Discriminative Deep Feature Learning for Person Re-Identification,"A Tale of Two Losses: Discriminative Deep Feature Learning for
+Person Re-Identification
+Borgia, A., Hua, Y., & Robertson, N. (2017). A Tale of Two Losses: Discriminative Deep Feature Learning for
+Person Re-Identification. In Irish Machine Vision and Image Processing Conference 2017: Proceedings
+Published in:
+Irish Machine Vision and Image Processing Conference 2017: Proceedings
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+© 2017 National University of Ireland Maynooth.
+This work is made available online in accordance with the publisher’s policies. Please refer to any applicable terms of use of the publisher.
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to
+ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the"
+414715421e01e8c8b5743c5330e6d2553a08c16d,PoTion : Pose MoTion Representation for Action Recognition,"PoTion: Pose MoTion Representation for Action Recognition
+Philippe Weinzaepfel2
+Inria∗
+NAVER LABS Europe
+J´erˆome Revaud2 Cordelia Schmid1
+Vasileios Choutas1,2"
+41f7c03519a2b108c064a2126daf627edde14c1e,Generic Object Detection using AdaBoost,"Generic Object Detection using AdaBoost
+Ben Weber
+Department of Computer Science
+University of California, Santa Cruz
+Santa Cruz, CA 95064"
+4196e0b77f88ea01cd868c535befb52c2722454f,3D Facial similarity: Automatic assessment versus perceptual judgments,"D Facial Similarity: Automatic Assessment versus Perceptual
+Judgments
+Anush K. Moorthy, Anish Mittal, Sina Jahanbin, Kristen Grauman and Alan C. Bovik"
+41ab4939db641fa4d327071ae9bb0df4a612dc89,Interpreting Face Images by Fitting a Fast Illumination-Based 3D Active Appearance Model,"Interpreting Face Images by Fitting a Fast
+Illumination-Based 3D Active Appearance
+Model
+Salvador E. Ayala-Raggi, Leopoldo Altamirano-Robles, Janeth Cruz-Enriquez
+Instituto Nacional de Astrof´ısica, ´Optica y Electr´onica,
+Luis Enrique Erro #1, 72840 Sta Ma. Tonantzintla. Pue., M´exico
+Coordinaci´on de Ciencias Computacionales
+{saraggi, robles,"
+41a5e043d499967f405e823b959e2ac4fdf9ff71,Extending Recognition in a Changing Environment,"Extending Recognition in a Changing Environment
+Department of Computer Science and Applied Mathematics, The Weizmann Institue of Science, Rehovot, Israel
+Daniel Harari and Shimon Ullman
+{danny.harari,
+Keywords:
+Object Recognition, Video Analysis, Dynamic Model Update, Unsupervised Learning, Bayesian Model."
+41a6196f88beced105d8bc48dd54d5494cc156fb,Using facial images for the diagnosis of genetic syndromes: A survey,"015 International Conference on
+Communications, Signal
+Processing, and their Applications
+(ICCSPA 2015)
+Sharjah, United Arab Emirates
+7-19 February 2015
+IEEE Catalog Number:
+ISBN:
+CFP1574T-POD
+978-1-4799-6533-5"
+41ddd29d9e56bb87b9f988afc75cd597657b2600,R4-A.3: Human Detection & Re-Identification for Mass Transit Environments,"R4-A.3: Human Detection & Re-Identification for
+Mass Transit Environments
+PARTICIPANTS
+Rich Radke
+Title
+Faculty/Staff
+Institution
+Graduate, Undergraduate and REU Students
+Srikrishna Karanam
+Eric Lam
+Degree Pursued
+Institution
+Email
+Month/Year of Graduation
+5/2017
+5/2017
+PROJECT DESCRIPTION
+Project Overview
+The computer vision research problem of human re-identification or “re-id” is generally posed as follows:
+Given a cropped rectangle of pixels representing a human in one view, a re-id algorithm produces a similarity"
+41decbe12a8aa7996163636e09d1ce1372c271cd,Attentive Fashion Grammar Network for Fashion Landmark Detection and Clothing Category Classification,"Attentive Fashion Grammar Network for
+Fashion Landmark Detection and Clothing Category Classification
+Wenguan Wang∗1,2, Yuanlu Xu∗2, Jianbing Shen†1, and Song-Chun Zhu2
+Beijing Lab of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, China
+Department of Computer Science and Statistics, University of California, Los Angeles, USA"
+413160257096b9efcd26d8de0d1fa53133b57a3d,Customer satisfaction measuring based on the most significant facial emotion,"Customer satisfaction measuring based on the most
+significant facial emotion
+Mariem Slim, Rostom Kachouri, Ahmed Atitallah
+To cite this version:
+Mariem Slim, Rostom Kachouri, Ahmed Atitallah. Customer satisfaction measuring based on the
+most significant facial emotion. 15th IEEE International Multi-Conference on Systems, Signals
+Devices (SSD 2018), Mar 2018, Hammamet, Tunisia. <hal-01790317>
+HAL Id: hal-01790317
+https://hal-upec-upem.archives-ouvertes.fr/hal-01790317
+Submitted on 11 May 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+41de109bca9343691f1d5720df864cdbeeecd9d0,Facial Emotion Recognition: A Survey and Real-World User Experiences in Mixed Reality,"Article
+Facial Emotion Recognition: A Survey and
+Real-World User Experiences in Mixed Reality
+Dhwani Mehta, Mohammad Faridul Haque Siddiqui and Ahmad Y. Javaid * ID
+EECS Department, The University of Toledo, Toledo, OH 43606, USA; (D.M.);
+(M.F.H.S.)
+* Correspondence: Tel.: +1-419-530-8260
+Received: 10 December 2017; Accepted: 26 January 2018; Published: 1 Febuary 2018"
+41ed93fd97aa76b4abfda7a09168ad1799f34664,Video Event Detection: From Subvolume Localization to Spatiotemporal Path Search,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+Video event detection : from subvolume localization to
+spatio-temporal path search
+Author(s)
+Tran, Du; Yuan, Junsong; Forsyth, David
+Citation
+Tran, D., Yuan, J., & Forsyth, D. (2014). Video Event
+Detection: From Subvolume Localization to
+Spatiotemporal Path Search. IEEE Transactions on
+Pattern Analysis and Machine Intelligence, 36(2), 404-
+http://hdl.handle.net/10220/19322
+Rights
+© 2014 IEEE. Personal use of this material is permitted.
+Permission from IEEE must be obtained for all other
+uses, in any current or future media, including
+reprinting/republishing this material for advertising or
+promotional purposes, creating new collective works, for
+resale or redistribution to servers or lists, or reuse of any"
+41d9a240b711ff76c5448d4bf4df840cc5dad5fc,Image Similarity Using Sparse Representation and Compression Distance,"JOURNAL DRAFT, VOL. X, NO. X, APR 2013
+Image Similarity Using Sparse Representation
+nd Compression Distance
+Tanaya Guha, Student Member, IEEE, and Rabab K Ward, Fellow, IEEE"
+419a6fca4c8d73a1e43003edc3f6b610174c41d2,A component based approach improves classification of discrete facial expressions over a holistic approach,"A Component Based Approach Improves Classification of Discrete
+Facial Expressions Over a Holistic Approach
+Kenny Hong, and Stephan K. Chalup, Senior Member, IEEE and Robert A.R. King"
+41a174c27f0b431d62d0f50051bce7f5b3b4ce64,A System for Object Class Detection,"A system for object class detection
+Daniela Hall
+INRIA Rh^one-Alpes, 655, ave de l’Europe,
+8320 St. Ismier, France"
+4131aa28d640d17e1d63ca82e55cc0b280db0737,Coulomb Gans: Provably Optimal Nash Equi-,"Under review as a conference paper at ICLR 2018
+COULOMB GANS: PROVABLY OPTIMAL NASH EQUI-
+LIBRIA VIA POTENTIAL FIELDS
+Anonymous authors
+Paper under double-blind review"
+4180978dbcd09162d166f7449136cb0b320adf1f,Real-time head pose classification in uncontrolled environments with Spatio-Temporal Active Appearance Models,"Real-time head pose classification in uncontrolled environments
+with Spatio-Temporal Active Appearance Models
+Miguel Reyes∗ and Sergio Escalera+ and Petia Radeva +
+Matematica Aplicada i Analisi ,Universitat de Barcelona, Barcelona, Spain
++ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain
++ Matematica Aplicada i Analisi, Universitat de Barcelona, Barcelona, Spain"
+41ea92251c668a99d2b9a31935fc71e6b6d82b6d,Canonical Correlation Analysis of Datasets With a Common Source Graph,"Canonical Correlation Analysis of Datasets
+with a Common Source Graph
+Jia Chen, Gang Wang, Student Member, IEEE,
+Yanning Shen, Student Member, IEEE, and Georgios B. Giannakis, Fellow, IEEE"
+4106c49eb96b506ea1125c27e2b2f32ad79f8c48,"Markovian Tracking-by-Detection from a Single, Uncalibrated Camera","Markovian Tracking-by-Detection from a Single, Uncalibrated Camera
+Michael D. Breitenstein1 Fabian Reichlin1 Bastian Leibe1,2 Esther Koller-Meier1 Luc Van Gool1,3
+ETH Zurich
+RWTH Aachen
+KU Leuven"
+413a1a00f0eab2fcc3dcc0d821fb2f34e85f5d7a,Pedestrian detection by scene dependent classifiers with generative learning,"June 23-26, 2013, Gold Coast, Australia
+978-1-4673-2754-1/13/$31.00 ©2013 IEEE"
+413c960e57ec3fe713e7b3e070cb6072726874bd,A Search Space Strategy for Pedestrian Detection and Localization in World Coordinates,
+41fafb5392ad5e33e5169d870812ab5edca301a1,Tree-Structured Stick Breaking Processes for Hierarchical Data,"TREE-STRUCTURED STICK BREAKING PROCESSES
+FOR HIERARCHICAL DATA
+By Ryan P. Adams, Zoubin Ghahramani and Michael I. Jordan
+Many data are naturally modeled by an unobserved hierarchical
+structure. In this paper we propose a flexible nonparametric prior over
+processes to allow for trees of unbounded width and depth, where data
+an live at any node and are infinitely exchangeable. One can view
+our model as providing infinite mixtures where the components have a
+dependency structure corresponding to an evolutionary diffusion down
+tree. By using a stick-breaking approach, we can apply Markov chain
+Monte Carlo methods based on slice sampling to perform Bayesian
+inference and simulate from the posterior distribution on trees. We
+pply our method to hierarchical clustering of images and topic
+modeling of text data.
+. Introduction. Structural aspects of models are often critical to ob-
+taining flexible, expressive model families. In many cases, however, the
+structure is unobserved and must be inferred, either as an end in itself or
+to assist in other estimation and prediction tasks. This paper addresses an
+important instance of the structure learning problem: the case when the
+data arise from a latent hierarchy. We take a direct nonparametric Bayesian"
+4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06,Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching,"Nighttime Face Recognition at Long Distance:
+Cross-distance and Cross-spectral Matching
+Hyunju Maenga, Shengcai Liaob, Dongoh Kanga, Seong-Whan Leea,
+Anil K. Jaina;b
+Dept. of Brain and Cognitive Eng. Korea Univ., Seoul, Korea
+Dept. of Comp. Sci. & Eng. Michigan State Univ., E. Lansing, MI, USA 48824"
+41690be86b39c55a26ea056261513ddd726d6601,Heterogeneous microarchitectures trump voltage scaling for low-power cores,"Heterogeneous Microarchitectures Trump Voltage Scaling
+for Low-Power Cores
+Andrew Lukefahr, Shruti Padmanabha, Reetuparna Das, Ronald Dreslinski Jr.,
+Thomas F. Wenisch, and Scott Mahlke
+Advanced Computer Architecture Laboratory
+Ann Arbor, MI, USA
+{lukefahr, shrupad, reetudas, rdreslin, twenisch,"
+4189aa74550c1761dd5927442d0a98ff3d3d1134,Residual Conv-Deconv Grid Network for Semantic Segmentation,"FOURURE ET AL.: RESIDUAL CONV-DECONV GRIDNET
+Residual Conv-Deconv Grid Network for
+Semantic Segmentation
+Univ Lyon, UJM Saint-Etienne,
+CNRS UMR 5516,
+Hubert Curien Lab, F-42023
+Saint-Etienne, France
+INSA-Lyon,
+LIRIS UMR CNRS 5205,
+F-69621,
+France
+Damien Fourure1
+Rémi Emonet1
+Elisa Fromont1
+Damien Muselet1
+Alain Tremeau1
+Christian Wolf2"
+413a184b584dc2b669fbe731ace1e48b22945443,Human Pose Co-Estimation and Applications,"Human Pose Co-Estimation and Applications
+Marcin Eichner and Vittorio Ferrari"
+410017a1810308564dc54cb986b12f079428f966,A functional pipeline framework for landmark identification on 3D surface extracted from volumetric data,"RESEARCH ARTICLE
+A functional pipeline framework for landmark
+identification on 3D surface extracted from
+volumetric data
+Pan Zheng1,2*, Bahari Belaton2*, Iman Yi Liao3, Zainul Ahmad Rajion4,5
+Faculty of Engineering, Computing and Science, Swinburne University of Technology Sarawak Campus,
+Kuching, Malaysia, 2 School of Computer Sciences, Universiti Sains Malaysia, Penang, Malaysia, 3 School of
+Computer Science, The University of Nottingham Malaysia Campus, Semenyih, Malaysia, 4 School of Dental
+Sciences, Universiti Sains Malaysia, Kubang Kerian, Malaysia, 5 College of Dentistry, King Saud bin
+Abdulaziz University for Health Sciences, Riyadh, Kingdom of Saudi Arabia
+* (PZ); (BB)"
+4183d1b79d54f5638063e6c59a2a873ee2cd1bed,Multi-cue pedestrian classification with partial occlusion handling,"Multi-Cue Pedestrian Classification With Partial Occlusion Handling
+Markus Enzweiler1
+Angela Eigenstetter2
+Bernt Schiele2,3
+Dariu M. Gavrila4,5
+Image & Pattern Analysis Group, Univ. of Heidelberg, Germany
+Computer Science Department, TU Darmstadt, Germany
+MPI Informatics, Saarbr¨ucken, Germany
+Environment Perception, Group Research, Daimler AG, Ulm, Germany
+5 Intelligent Autonomous Systems Group, Univ. of Amsterdam, The Netherlands"
+41c1b8f319e27be0c77c3b33cf877c29b1676501,"3D Face Recognition based on Radon Transform, PCA, LDA using KNN and SVM","I.J. Computer Network and Information Security, 2014, 7, 36-43
+Published Online June 2014 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijcnis.2014.07.05
+D Face Recognition based on Radon Transform,
+PCA, LDA using KNN and SVM
+P. S. Hiremath and Manjunatha Hiremath
+Department of Computer Science, Gulbarga University, Gulbarga – 585106
+e-mail: and
+Karnataka, India"
+83b7578e2d9fa60d33d9336be334f6f2cc4f218f,The S-HOCK dataset: Analyzing crowds at the stadium,"The S-HOCK Dataset: Analyzing Crowds at the Stadium
+Davide Conigliaro1,3, Paolo Rota2, Francesco Setti3, Chiara Bassetti3, Nicola Conci4, Nicu Sebe4, Marco Cristani1,
+University of Verona. 2Vienna Institute of Technology. 3ISTC–CNR (Trento). 4University of Trento.
+The topic of crowd modeling in computer vision usually assumes a sin-
+gle generic typology of crowd, which is very simplistic. In this paper we
+dopt a taxonomy that is widely accepted in sociology, focusing on a partic-
+ular category, the spectator crowd, which is formed by people “interested in
+watching something specific that they came to see” [1]. This can be found
+t the stadiums, amphitheaters, cinema, etc.
+In particular, we propose a
+novel dataset, the Spectators Hockey (S-HOCK), which deals with 4 hockey
+matches during an international tournament.
+The dataset is unique in the crowd literature, and in general in the
+surveillance realm. The dataset analyzes the crowd at different levels of
+detail. At the highest level, it models the network of social connections
+mong the public (who knows whom in the neighborhood), what is the sup-
+ported team and what has been the best action in the match; all of this has
+een obtained by interviews at the stadium. At a medium level, spectators
+re localized, and information regarding the pose of their heads and body is
+given. Finally, at a lowest level, a fine grained specification of all the actions"
+83b20fdd3eafd21a6971dacc73d85c484a093bfc,Interleaved Structured Sparse Convolutional Neural Networks,"Interleaved Structured Sparse Convolutional Neural Networks
+Guotian Xie1,2,∗ Jingdong Wang3† Ting Zhang3
+Jianhuang Lai1,2 Richang Hong4 Guo-Jun Qi5
+Sun Yat-Sen University 2Guangdong Key Laboratory of Information Security Technology
+Microsoft Research 4Hefei University of Technology 5University of Central Florida"
+83ca4cca9b28ae58f461b5a192e08dffdc1c76f3,Detecting emotional stress from facial expressions for driving safety,"DETECTING EMOTIONAL STRESS FROM FACIAL EXPRESSIONS FOR DRIVING SAFETY
+Hua Gao, Anil Y¨uce, Jean-Philippe Thiran
+Signal Processing Laboratory (LTS5),
+´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland"
+83c19722450e8f7dcb89dabb38265f19efafba27,A framework with updateable joint images re-ranking for Person Re-identification,"A framework with updateable joint images re-ranking for Person
+Re-identification
+Yuan Mingyue1,2 Yin Dong1,2* Ding Jingwen1,3* Luo Yuhao1,2 Zhou Zhipeng1,2
+Zhu Chengfeng1,2 Zhang Rui1,2
+School of Information Science Technology, USTC, Hefei, Anhui 230027, China
+Key Laboratory of Electromagnetic Space Information of CAS, Hefei, Anhui 230027, China"
+8380b8f4e36c993eef23af42ccb382ae60aceabf,"URBAN-i: From urban scenes to mapping slums, transport modes, and pedestrians in cities using deep learning and computer vision","URBAN-i: From urban scenes to mapping slums, transport modes, and pedestrians
+in cities using deep learning and computer vision
+Mohamed R. Ibrahim1, James Haworth2 and Tao Cheng3
+Department of Civil, Environmental and Geomatic Engineering, University College London (UCL)"
+831fbef657cc5e1bbf298ce6aad6b62f00a5b5d9,Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning,
+830b48f210f3905117b335e305166df4ec092b8b,Pixel-Level Encoding and Depth Layering for Instance-Level Semantic Labeling,"Pixel-level Encoding and Depth Layering for
+Instance-level Semantic Labeling
+Jonas Uhrig1,2, Marius Cordts1,3, Uwe Franke1, Thomas Brox2
+Daimler AG R&D, 2University of Freiburg, 3TU Darmstadt"
+8322ed1a3db7c63af40280a782e39fb01bfe96dd,Class label autoencoder for zero-shot learning,"Class label autoencoder for zero-shot learning
+Guangfeng Lina,∗, Caixia Fana, Wanjun Chena, Yajun Chena, Fan Zhaoa
+Information Science Department, Xian University of Technology,
+5 South Jinhua Road, Xi’an, Shaanxi Province 710048, PR China"
+833a2c168849697aae3589bbeef0cbca22808fe8,"Quantity, Contrast, and Convention in Cross-Situated Language Comprehension","Proceedings of the 19th Conference on Computational Language Learning, pages 226–236,
+Beijing, China, July 30-31, 2015. c(cid:13)2015 Association for Computational Linguistics"
+8306e384e7ca48445843bc025b08236cd181d7c6,Histogram of Oriented Gradients with Cell Average Brightness for Human Detection,"Metrol. Meas. Syst., Vol. XXIII (2016), No. 1, pp. 27–36.
+METROLOGY AND MEASUREMENT SYSTEMS
+Index 330930, ISSN 0860-8229
+www.metrology.pg.gda.pl
+HISTOGRAM OF ORIENTED GRADIENTS WITH CELL AVERAGE
+BRIGHTNESS FOR HUMAN DETECTION
+Marek Wójcikowski
+Gdańsk University of Technology, Faculty of Electronics, Telecommunications and Informatics, G. Narutowicza 11/12, 80-233 Gdańsk, Poland
+((cid:1) +48 58 347 1974)"
+83c00537e0c3e226d999a5abf02464e138867e96,Pedestrians and their phones - detecting phone-based activities of pedestrians for autonomous vehicles,"Windsor Oceanico Hotel, Rio de Janeiro, Brazil, November 1-4, 2016
+978-1-5090-1889-5/16/$31.00 ©2016 IEEE"
+832e1d128059dd5ed5fa5a0b0f021a025903f9d5,Pairwise Conditional Random Forests for Facial Expression Recognition,"Pairwise Conditional Random Forests for Facial Expression Recognition
+Arnaud Dapogny1
+Kevin Bailly1
+S´everine Dubuisson1
+Sorbonne Universit´es, UPMC Univ Paris 06, CNRS, ISIR UMR 7222, 4 place Jussieu 75005 Paris"
+83e093a07efcf795db5e3aa3576531d61557dd0d,Facial Landmark Localization Using Robust Relationship Priors and Approximative Gibbs Sampling,"Facial Landmark Localization using Robust
+Relationship Priors and Approximative Gibbs
+Sampling
+Karsten Vogt, Oliver M¨uller and J¨orn Ostermann
+Institut f¨ur Informationsverarbeitung (tnt)
+Leibniz Universit¨at Hannover, Germany
+{vogt, omueller,"
+8326d3e57796dad294ab1c14a0688221550098b6,ABC-GAN: Adaptive Blur and Control for improved training stability of Generative Adversarial Networks,"Adaptive Blur and Control for improved training stability of
+Generative Adversarial Networks
+ABC-GAN:
+Igor Susmelj 3 Eirikur Agustsson 3 Radu Timofte 3"
+8377ac1b2dffb11cf48f456be2531c95d14aa6e5,Improving the Annotation of DeepFashion Images for Fine-grained Attribute Recognition,"Improving the Annotation of DeepFashion
+Images for Fine-grained Attribute Recognition
+Roshanak Zakizadeh, Michele Sasdelli, Yu Qian and Eduard Vazquez
+Cortexica Vision Systems, London, UK"
+838a4bcfeb36dc7bdb4a38f776fc0a70ce8ae9f0,Face Presentation Attack Detection using Biologically-inspired Features,
+83ef7de2669bb2827208fd3a64ac910e276fbdb4,Fully Convolutional Networks for Dense Semantic Labelling of High-Resolution Aerial Imagery,"Fully Convolutional Networks for Dense Semantic Labelling of
+High-Resolution Aerial Imagery
+Jamie Sherrah
+Defence Science & Technology Group
+Edinburgh, South Australia
+email:
+https://au.linkedin.com/jsherrah
+June 9, 2016"
+8397956c7ad3bd24c6c6c0b38866e165367327c0,Social Relation Trait Discovery from Visual LifeLog Data with Facial Multi-Attribute Framework,
+83b4899d2899dd6a8d956eda3c4b89f27f1cd308,A Robust Approach for Eye Localization Under Variable Illuminations,"-4244-1437-7/07/$20.00 ©2007 IEEE
+I - 377
+ICIP 2007"
+8387c58a5a3fd847f9b03760842dd49fec7cbb0e,Two-year-olds with autism orient to nonsocial contingencies rather than biological motion,"Vol 459 | 14 May 2009 | doi:10.1038/nature07868
+LETTERS
+Two-year-olds with autism orient to non-social
+ontingencies rather than biological motion
+Ami Klin1, David J. Lin1{, Phillip Gorrindo1{, Gordon Ramsay1,2 & Warren Jones1,3
+Typically developing human infants preferentially attend to bio-
+logical motion within the first days of life1. This ability is highly
+onserved across species2,3 and is believed to be critical for filial
+ttachment and for detection of predators4. The neural under-
+pinnings of biological motion perception are overlapping with
+rain regions involved in perception of basic social signals such
+s facial expression and gaze direction5, and preferential attention
+to biological motion is seen as a precursor to the capacity for
+ttributing intentions to others6. However, in a serendipitous
+observation7, we recently found that an infant with autism failed
+to recognize point-light displays of biological motion, but was
+instead highly sensitive to the presence of a non-social, physical
+ontingency that occurred within the stimuli by chance. This
+observation raised the possibility that perception of biological
+motion may be altered in children with autism from a very early"
+833fbf0e4be3ba82e7a1efdbc16813ee849d9942,Restricted Deformable Convolution based Road Scene Semantic Segmentation Using Surround View Cameras,"SUBMITTED TO IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS
+Restricted Deformable Convolution based
+Road Scene Semantic Segmentation
+Using Surround View Cameras
+Liuyuan Deng, Ming Yang, Hao Li, Tianyi Li, Bing Hu, Chunxiang Wang"
+83d1617092b34804c3825fdf4292120c382fe043,Appearance-Based Multimodal Human Tracking and Identification for Healthcare in the Digital Home,"Sensors 2014, 14, 14253-14277; doi:10.3390/s140814253
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Appearance-Based Multimodal Human Tracking and
+Identification for Healthcare in the Digital Home
+Mau-Tsuen Yang * and Shen-Yen Huang
+Department of Computer Science & Information Engineering, National Dong-Hwa University, No. 1,
+Sec. 2, Da-Hsueh Rd., Shoufeng, Hualien 974, Taiwan; E-Mail:
+* Author to whom correspondence should be addressed; E-Mail:
+Tel.: +886-3-863-4028; Fax: +886-3-863-4010.
+Received: 2 April 2014; in revised form: 3 July 2014 / Accepted: 8 July 2014 /
+Published: 5 August 2014"
+8323af714efe9a3cadb31b309fcc2c36c8acba8f,Automatic Real-Time Facial Expression Recognition for Signed Language Translation,"Automatic Real-Time
+Facial Expression Recognition
+for Signed Language Translation
+Jacob Richard Whitehill
+A thesis submitted in partial fulfillment of the requirements for the de-
+gree of Magister Scientiae in the Department of Computer Science,
+University of the Western Cape.
+May 2006"
+83fd5c23204147844a0528c21e645b757edd7af9,USDOT number localization and recognition from vehicle side-view NIR images,"USDOT Number Localization and Recognition From Vehicle Side-View NIR
+Images
+Orhan Bulan, Safwan Wshah, Ramesh Palghat, Vladimir Kozitsky and Aaron Burry
+Palo Alto Research Center (PARC)
+800 Phillips Rd. Webster NY 14580"
+83ce2c969ea323784b9098b9b170e015d559a1df,Detecting domestic objects with ensembles of view-tuned support vector machine cascades trained on Web images,"Detecting Domestic Objects with Ensembles of
+View-tuned Support Vector Machine Cascades Trained
+on Web Images
+Marco Kortkamp"
+8395cf3535a6628c3bdc9b8d0171568d551f5ff0,Entropy Non-increasing Games for the Improvement of Dataflow Programming,"Entropy Non-increasing Games for the
+Improvement of Dataflow Programming
+Norbert B´atfai, Ren´at´o Besenczi, Gerg˝o Bogacsovics,
+Fanny Monori∗
+February 16, 2017"
+834f5ab0cb374b13a6e19198d550e7a32901a4b2,Face Translation between Images and Videos using Identity-aware CycleGAN,"Face Translation between Images and Videos using Identity-aware CycleGAN
+Zhiwu Huang†, Bernhard Kratzwald†, Danda Pani Paudel†, Jiqing Wu†, Luc Van Gool†‡
+Computer Vision Lab, ETH Zurich, Switzerland
+VISICS, KU Leuven, Belgium
+{zhiwu.huang, paudel, jwu,"
+83df0ec6071dfda29da831860fdb2a1f19a6b3bc,3D Face Recognition Using Joint Differential Invariants,"D Face Recognition Using Joint Differential
+Invariants
+Marinella Cadoni1, Manuele Bicego1,2, and Enrico Grosso1
+Computer Vision Laboratory, DEIR, University of Sassari, Italy
+Computer Science Dept., University of Verona, Italy"
+832aae00e16c647716f1be38de233c9c15af9a28,Feature fusion for facial landmark detection,"Author's Accepted Manuscript
+Feature fusion for facial landmark detection
+Panagiotis Perakis, Theoharis Theoharis, Ioan-
+nis A. Kakadiaris
+Reference:
+S0031-3203(14)00105-8
+http://dx.doi.org/10.1016/j.patcog.2014.03.007
+PR5053
+www.elsevier.com/locate/pr
+To appear in:
+Received date: 10 March 2013
+Revised date: 18 September 2013
+Accepted date: 8 March 2014
+Cite this article as: Panagiotis Perakis, Theoharis Theoharis,
+http://dx.doi.org/10.1016/j.patcog.2014.03.007
+Ioannis A.
+This is a PDF file of an unedited manuscript that has been accepted for
+publication. As a service to our customers we are providing this early version of
+the manuscript. The manuscript will undergo copyediting, typesetting, and
+review of the resulting galley proof before it is published in its final citable form."
+8320dbdd3e4712cca813451cd94a909527652d63,Ear Biometrics,"EAR BIOMETRICS
+Mark Burge
+nd Wilhelm Burger
+Johannes Kepler University(cid:1) Institute of Systems Science(cid:1) A(cid:2)
+urge(cid:1)cast(cid:2)uni(cid:3)linz(cid:2)ac(cid:2)at"
+83d0b7100ddce32e37af72585f9aa4181e6447e3,Online Social Behavior Modeling for Multi-target Tracking,"Online Social Behavior Modeling for Multi-Target Tracking
+Shu Zhang1 Abir Das1 Chong Ding2 Amit K. Roy-Chowdhury1
+University of California, Riverside, CA 92521 USA"
+833cd4265bd8162d3cfb483ce8f31eaef28e7a2e,Towards Effective Gans,"Under review as a conference paper at ICLR 2018
+TOWARDS EFFECTIVE GANS
+FOR DATA DISTRIBUTIONS WITH DIVERSE MODES
+Anonymous authors
+Paper under double-blind review"
+83968f81f23a34e18e850fe2cf68bab51e22e35c,Attention-Driven Parts-Based Object Detection,"Attention-Driven Parts-Based Object Detection
+Ilkka Autio & J.T. Lindgren
+Department of Computer Science
+University of Helsinki
+Finland"
+83e71455ee2070617ea35c02f03b7451187985d1,Faces Recognition with Image Feature Weights and Least Mean Square Learning Approach,"Faces Recognition with Image Feature Weights and Least Mean Square
+Learning Approach
+Dept. of Electrical Engineering, National Taiwan Uni. of Sci. & Technology, Taipei, Taiwan
+Wei-Li Fang, Ying-Kuei Yang and Jung-Kuei Pan
+Email:"
+833bdee366f1e6250dea59bdebdcad271c7cfddd,Bayesian non-parametrics for multi-modal segmentation,"Bayesian Non-Parametrics for
+Multi-Modal Segmentation
+Thesis for obtaining the title of
+Doctor of Engineering Science
+(Dr.-Ing.)
+of the Faculty of Natural Science and Technology I
+of Saarland University
+Wei-Chen Chiu, M.Sc.
+Saarbrücken
+September 2016"
+837e99301e00c2244023a8a48ff98d7b521c93ac,Local Feature Evaluation for a Constrained Local Model Framework,"Local Feature Evaluation for a Constrained
+Local Model Framework
+Maiya Hori(B), Shogo Kawai, Hiroki Yoshimura, and Yoshio Iwai
+Graduate School of Engineering, Tottori University,
+01 Minami 4-chome, Koyama-cho, Tottori 680-8550, Japan"
+83e7254431486d24715d4170680c6cbc8bdb2328,Image retrieval using visual attention,"IMAGE RETRIEVAL USING VISUAL ATTENTION
+Liam M. Mayron
+A Dissertation Submitted to the Faculty of
+The College of Engineering and Computer Science
+in Partial Fulfillment of the Requirements for the Degree of
+Doctor of Philosophy
+Florida Atlantic University
+Boca Raton, Florida
+May 2008"
+83c332971c4534907afc4865179c2de30f2792c4,Sparse and Dense Hybrid Representation via Dictionary Decomposition for Face Recognition,"Sparse And Dense Hybrid Representation
+via Dictionary Decomposition
+for Face Recognition
+Xudong Jiang, Senior Member, IEEE, and Jian Lai, Student Member, IEEE"
+8326b11dd0b81dcc169ce21fc12e0c9d632db6bd,Tracking and Recognition: A Unified Approach on Tracking and Recognition,"ISSN: 2321-8169
+International Journal on Recent and Innovation Trends in Computing and Communication
+Volume: 2 Issue: 11
+3532 – 3539
+_______________________________________________________________________________________________
+Tracking and Recognition: A Unified Approach on Tracking and Recognition
+Ms. Anuja V. Vaidya
+Dr. Mrs. S.B. Patil
+Dept. of Electronics & Communication
+Dept of Electronics & Communication
+Dr. J.J. Magdum College of Engg. Jaysingpur,
+Dr. J.J. Magdum College of Engg. Jaysingpur,
+Maharashtra, India
+Maharashtra, India"
+834b15762f97b4da11a2d851840123dbeee51d33,Landmark-free smile intensity estimation,"Landmark-free smile intensity estimation
+J´ulio C´esar Batista, Olga R. P. Bellon and Luciano Silva
+IMAGO Research Group - Universidade Federal do Paran´a
+Fig. 1. Overview of our method for smile intensity estimation"
+83e7c51c4d6f04049f5a3dbf4ac9e129ed96caee,Spatio-temporal Pain Recognition in CNN-Based Super-Resolved Facial Images,"Aalborg Universitet
+Spatio-Temporal Pain Recognition in CNN-based Super-Resolved Facial Images
+Bellantonio, Marco; Haque, Mohammad Ahsanul; Rodriguez, Pau; Nasrollahi, Kamal; Telve,
+Taisi; Guerrero, Sergio Escalera; Gonzàlez, Jordi; Moeslund, Thomas B.; Rasti, Pejman;
+Anbarjafari, Gholamreza
+Published in:
+Video Analytics
+DOI (link to publication from Publisher):
+0.1007/978-3-319-56687-0_13
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Bellantonio, M., Haque, M. A., Rodriguez, P., Nasrollahi, K., Telve, T., Guerrero, S. E., ... Anbarjafari, G. (2017).
+Spatio-Temporal Pain Recognition in CNN-based Super-Resolved Facial Images. In Video Analytics: Face and
+Facial Expression Recognition and Audience Measurement Springer. Lecture Notes in Computer Science, Vol..
+0165 https://doi.org/10.1007/978-3-319-56687-0_13
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners"
+83b700f0777a408eb36eef4b1660beb3f6dc1982,Violent behaviour detection using local trajectory response,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/317628106
+Violent behaviour detection using local
+trajectory response
+Conference Paper · January 2016
+DOI: 10.1049/ic.2016.0082
+CITATIONS
+authors, including:
+Paul L. Rosin
+Cardiff University
+READS
+David Marshall
+Cardiff University
+31 PUBLICATIONS 7,739 CITATIONS
+98 PUBLICATIONS 2,855 CITATIONS
+SEE PROFILE
+SEE PROFILE
+Simon Christopher Moore
+University of Wales
+08 PUBLICATIONS 1,069 CITATIONS
+SEE PROFILE"
+83a4b9c9ae3f75bf7e4a3222c46d99be7b7998ab,A random forest approach to segmenting and classifying gestures,"A Random Forest Approach to Segmenting and Classifying Gestures
+Ajjen Joshi1, Camille Monnier2, Margrit Betke1 and Stan Sclaroff1
+Department of Computer Science, Boston Univeristy, Boston, MA 02215 USA
+Charles River Analytics, Cambridge, MA 02138 USA"
+833f6ab858f26b848f0d747de502127406f06417,Learning weighted similarity measurements for unconstrained face recognition,"978-1-4244-5654-3/09/$26.00 ©2009 IEEE
+ICIP 2009"
+832a9584e85af1675d49ee35fd13283b21ce3a3f,Generating Photo-Realistic Training Data to Improve Face Recognition Accuracy,"Generating Photo-Realistic Training Data to Improve
+Face Recognition Accuracy
+Daniel S´aez Trigueros, Li Meng
+School of Engineering and Technology
+University of Hertfordshire
+Hatfield AL10 9AB, UK
+Margaret Hartnett
+GBG plc
+London E14 9QD, UK"
+8399c71abc9a820bacd9c4e21c85c461c0b830b3,"Adaboost with ""Keypoint Presence Features"" for Real-Time Vehicle Visual Detection","Author manuscript, published in ""16th World Congress on Intelligent Transport Systems (ITSwc'2009), Sweden (2009)"""
+83963d1454e66d9cc82e28ff4efc562f5fe6b7d3,"Automated detection of feeding strikes by larval fish using continuous high-speed digital video: a novel method to extract quantitative data from fast, sparse kinematic events.","© 2016. Published by The Company of Biologists Ltd | Journal of Experimental Biology (2016) 219, 1608-1617 doi:10.1242/jeb.133751
+METHODS & TECHNIQUES
+Automated detection of feeding strikes by larval fish using
+ontinuous high-speed digital video: a novel method to extract
+quantitative data from fast, sparse kinematic events
+Eyal Shamur1,‡, Miri Zilka2,*,‡, Tal Hassner1, Victor China3,4, Alex Liberzon5 and Roi Holzman3,4,§
+the observer and subject"
+8309e8f27f3fb6f2ac1b4343a4ad7db09fb8f0ff,Generic versus Salient Region-Based Partitioning for Local Appearance Face Recognition,"Generic versus Salient Region-based Partitioning
+for Local Appearance Face Recognition
+Hazım Kemal Ekenel and Rainer Stiefelhagen
+Computer Science Depatment, Universit¨at Karlsruhe (TH)
+Am Fasanengarten 5, Karlsruhe 76131, Germany
+http://isl.ira.uka.de/cvhci"
+1b02b9413b730b96b91d16dcd61b2420aef97414,Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction avec un robot. (Audio-visual detection of emotional (laugh and smile) and attentional markers for elderly people in social interaction with a robot),"Détection de marqueurs affectifs et attentionnels de
+personnes âgées en interaction avec un robot
+Fan Yang
+To cite this version:
+Fan Yang. Détection de marqueurs affectifs et attentionnels de personnes âgées en interaction
+vec un robot.
+Intelligence artificielle [cs.AI]. Université Paris-Saclay, 2015. Français. <NNT :
+015SACLS081>. <tel-01280505>
+HAL Id: tel-01280505
+https://tel.archives-ouvertes.fr/tel-01280505
+Submitted on 29 Feb 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+1bed38bc216f80a50617afa5c6d9cc4b2db72519,Face recognition using early biologically inspired features,"Face Recognition Using Early Biologically Inspired Features
+Min Li, Shenghua Bao, Weihong Qian, and Zhong Su
+IBM China Research Lab, PRC
+fminliml,baoshhua,qianwh,
+Nalini K. Ratha
+IBM Watson Research Center, USA"
+1b55a0ad1d4738a7d46ed787542991d4a05ae27e,Accurate Object Detection and Semantic Segmentation using Gaussian Mixture Model and CNN,"IJARCCE
+ISSN (Online) 2278-1021
+ISSN (Print) 2319 5940
+International Journal of Advanced Research in Computer and Communication Engineering
+Vol. 4, Issue 11, November 2015
+Accurate Object Detection and Semantic
+Segmentation using Gaussian Mixture Model and
+Sakshi Jain1, Satish Dehriya2, Yogendra Kumar Jain3
+Research Scholar, Computer Science & Engg, Samrat Ashok Technological Institute, Vidisha (M.P.), India1
+Assist. Professor, Computer Science & Engg, Samrat Ashok Technological Institute, Vidisha (M.P.), India 2
+Head of the Department, Computer Science & Engg, Samrat Ashok Technological Institute, Vidisha (M.P.), India3"
+1b2183c2b9608b7f815551c9ba602f22205126b1,Facial Reenactment Project Plan,"Facial Reenactment
+Project Plan
+Student:
+Li Wing Yee
+Supervisor:
+Dr. Dirk Scheiders"
+1b1d9b528c69e082dc5685089090bd2d849d887d,MixedPeds: Pedestrian Detection in Unannotated Videos using Synthetically-Generated Human-agents for Training,"MixedPeds: Pedestrian Detection in Unannotated Videos using Synthetically
+Generated Human-agents for Training
+Ernest Cheung, Anson Wong, Aniket Bera, Dinesh Manocha
+Department of Computer Science
+Project Webpage: http://gamma.cs.unc.edu/MixedPeds
+The University of North Carolina at Chapel Hill
+Email: {ernestc, ahtsans, ab,"
+1bb14ddc0326a8e5b44eafd915738c2b1342f392,Title On color texture normalization for active appearance models,"Provided by the author(s) and NUI Galway in accordance with publisher policies. Please cite the published
+version when available.
+Title
+On color texture normalization for active appearance models
+Author(s)
+Ionita, Mircea C.; Corcoran, Peter M.; Buzuloiu, Vasile
+Publication
+009-05-12
+Publication
+Information
+Ionita, M. C., Corcoran, P., & Buzuloiu, V. (2009). On Color
+Texture Normalization for Active Appearance Models. Image
+Processing, IEEE Transactions on, 18(6), 1372-1378.
+Publisher
+Link to
+publisher's
+version
+http://dx.doi.org/10.1109/TIP.2009.2017163
+Item record
+http://hdl.handle.net/10379/1350"
+1b7a7d291235e4b6e5f97722124070feb26f3cc1,Learning Two-Branch Neural Networks for Image-Text Matching Tasks,"Learning Two-Branch Neural Networks for
+Image-Text Matching Tasks
+Liwei Wang, Yin Li, Jing Huang, Svetlana Lazebnik"
+1ba55051d3957895d77257cc9a5885068fb2e43a,High-Resolution Face Verification Using Pore-Scale Facial Features,"High-Resolution Face Verification Using
+Pore-Scale Facial Features
+Dong Li, Huiling Zhou, and Kin-Man Lam"
+1b8508c6e341dcc803e52ed02968ae944c744f68,Face detection evaluation: a new approach based on the golden ratio $${\Phi}$$,"SIViP manuscript No.
+(will be inserted by the editor)
+Face Detection Evaluation: A New Approach Based on
+the Golden Ratio (cid:8)
+M. Hassaballah (cid:1) Kenji Murakami (cid:1) Shun Ido
+Received: 1 Jan. 2011 /Revised: 9 March 2011/ Accepted: date"
+1b55c4e804d1298cbbb9c507497177014a923d22,Incremental Class Representation Learning for Face Recognition,"Incremental Class Representation
+Learning for Face Recognition
+Degree’s Thesis
+Audiovisual Systems Engineering
+Author:
+Advisors: Elisa Sayrol, Josep Ramon Morros
+Eric Presas Valga
+Universitat Politècnica de Catalunya (UPC)
+016 - 2017"
+1b6394178dbc31d0867f0b44686d224a19d61cf4,EPML: Expanded Parts Based Metric Learning for Occlusion Robust Face Verification,"EPML: Expanded Parts based Metric Learning for
+Occlusion Robust Face Verification
+Gaurav Sharma, Fr´ed´eric Jurie, Patrick P´erez
+To cite this version:
+Gaurav Sharma, Fr´ed´eric Jurie, Patrick P´erez. EPML: Expanded Parts based Metric Learning
+for Occlusion Robust Face Verification. Asian Conference on Computer Vision, Nov 2014, -,
+Singapore. pp.1-15, 2014. <hal-01070657>
+HAL Id: hal-01070657
+https://hal.archives-ouvertes.fr/hal-01070657
+Submitted on 2 Oct 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+1bdef21f093c41df2682a07f05f3548717c7a3d1,Towards Automated Classification of Emotional Facial Expressions,"Towards Automated Classification of Emotional Facial Expressions
+Lewis J. Baker Vanessa LoBue
+Elizabeth Bonawitz & Patrick Shafto
+Department of Mathematics and Computer Science, 2Department of Psychology
+Rutgers University – Newark, 101 Warren St., Newark, NJ, 07102 USA"
+1b2e50412ec151486912f0bfd01703c8ec46b5a7,A Geometric Approach to Face Detector Combining,"A Geometric Approach to Face Detector
+Combining⋆
+Nikolay Degtyarev and Oleg Seredin
+Tula State University
+http://lda.tsu.tula.ru"
+1b150248d856f95da8316da868532a4286b9d58e,Analyzing 3D Objects in Cluttered Images,"Analyzing 3D Objects in Cluttered Images
+Mohsen Hejrati
+UC Irvine
+Deva Ramanan
+UC Irvine"
+1be498d4bbc30c3bfd0029114c784bc2114d67c0,Age and Gender Estimation of Unfiltered Faces,"Age and Gender Estimation of Unfiltered Faces
+Eran Eidinger, Roee Enbar, Tal Hassner*"
+1b3505018e39a794eab032e7e313784b21be42e9,Saliency based Person Re-Identification in Video using Colour Features,"GRD Journals- Global Research and Development Journal for Engineering | Volume 1 | Issue 10 | September 2016
+ISSN: 2455-5703
+Saliency based Person Re-Identification in Video
+using Colour Features
+Srujy Krishna A U
+PG Student
+Shimy Joseph
+Assistant Professor
+Department of Computer Science and Engineering
+Department of Computer Science and Engineering
+Federal Institute Of Science and Technology
+Federal Institute Of Science and Technology"
+1bbec7190ac3ba34ca91d28f145e356a11418b67,Explorer Action Recognition with Dynamic Image Networks,"Action Recognition with Dynamic Image Networks
+Citation for published version:
+Bilen, H, Fernando, B, Gravves, E & Vedaldi, A 2017, 'Action Recognition with Dynamic Image Networks'
+IEEE Transactions on Pattern Analysis and Machine Intelligence. DOI: 10.1109/TPAMI.2017.2769085
+Digital Object Identifier (DOI):
+0.1109/TPAMI.2017.2769085
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+IEEE Transactions on Pattern Analysis and Machine Intelligence
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please
+ontact providing details, and we will remove access to the work immediately and"
+1b3587363d37dd197b6adbcfa79d49b5486f27d8,Multimodal Grounding for Language Processing,"Multimodal Grounding for Language Processing
+Lisa Beinborn◦∗3
+Teresa Botschen∗(cid:52)
+Iryna Gurevych (cid:52)
+Language Technology Lab, University of Duisburg-Essen
+(cid:52) Ubiquitous Knowledge Processing Lab (UKP) and Research Training Group AIPHES
+Department of Computer Science, Technische Universit¨at Darmstadt
+www.ukp.tu-darmstadt.de"
+1ba61a4fedc217f7bd052d1b2904567c9985dc44,Person Re-identification for Improved Multi-person Multi-camera Tracking by Continuous Entity Association,"Person Re-identification for Improved
+Multi-person Multi-camera Tracking by
+Continuous Entity Association
+Neeti Narayan, Nishant Sankaran, Devansh Arpit, Karthik
+Dantu, Srirangaraj Setlur, Venu Govindaraju
+University at Buffalo"
+1b3d5d95e1fcded017f193f5cf9772bf8a1ed108,Using Keystroke Analytics to Improve Pass – Fail Classifiers,"(2017). Using
+nalytics
+http://dx.doi.org/10.18608/jla.2017.42.14
+keystrokes
+improve
+pass-fail
+lassifiers.
+Journal
+Learning Analytics,
+(2),
+89–211.
+Using Keystroke Analytics to Improve Pass–Fail Classifiers
+Kevin Casey
+Maynooth University, Ireland"
+1b74479f6e597a33703a63161527d55cc5d3096f,Self-Supervised Model Adaptation for Multimodal Semantic Segmentation,"Self-Supervised Model Adaptation for Multimodal
+Semantic Segmentation
+Abhinav Valada · Rohit Mohan · Wolfram Burgard"
+1b92973843c3a791bb5ca5a68405c3ecb3473ded,Building Deep Networks on Grassmann Manifolds,"Building Deep Networks on Grassmann Manifolds
+Zhiwu Huang†, Jiqing Wu†, Luc Van Gool†‡
+Computer Vision Lab, ETH Zurich, Switzerland
+VISICS, KU Leuven, Belgium
+{zhiwu.huang, jiqing.wu,"
+1b300a7858ab7870d36622a51b0549b1936572d4,Dynamic Facial Expression Recognition With Atlas Construction and Sparse Representation,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TIP.2016.2537215, IEEE
+Transactions on Image Processing
+Dynamic Facial Expression Recognition with Atlas
+Construction and Sparse Representation
+Yimo Guo, Guoying Zhao, Senior Member, IEEE, and Matti Pietik¨ainen, Fellow, IEEE"
+1bea531e8271202462c7907f60a8458fa5aec00d,"Ein generisches System zur automatischen Detektion, Verfolgung und Wiedererkennung von Personen in Videodaten","Ein generisches System zur automatischen
+Detektion, Verfolgung und Wiedererkennung von
+Personen in Videodaten
+Zur Erlangung des akademischen Grades eines
+Doktor-Ingenieurs
+von der Fakult¨at f¨ur
+Bauingenieur-, Geo- und Umweltwissenschaften
+des Karlsruher Instituts f¨ur Technologie (KIT)
+(Institut f¨ur Photogrammetrie und Fernerkundung)
+genehmigte
+Dissertation
+Dipl.-Inform. Kai J¨ungling
+us Adenau
+Tag der m¨undlichen Pr¨ufung: 24.01.2011
+Referent: Prof. Dr.-Ing. Stefan Hinz
+Korreferent: Prof. Dr. rer. nat. Maurus Tacke
+Korreferent: Prof. Dr.-Ing. Christoph Stiller
+Karlsruhe 2011"
+1b6d2f8f9cbbf5e20e445a60cb7840a30975f297,Learning from Noisy Web Data with Category-level Supervision,"Learning from Noisy Web Data with Category-level
+Supervision
+Li Niu, Qingtao Tang, Ashok Veeraraghavan, and Ashu Sabharwal"
+1b90507f02967ff143fce993a5abbfba173b1ed0,Gradient-DCT (G-DCT) descriptors,"Image Processing Theory, Tools and Applications
+Gradient-DCT (G-DCT) Descriptors
+Radovan Fusek, Eduard Sojka
+Technical University of Ostrava, FEECS, Department of Computer Science,
+7. listopadu 15, 708 33 Ostrava-Poruba, Czech Republic
+e-mail:"
+1b7b95ee13d91e9c768de6417a8919f2a3384599,A Probabilistic U-Net for Segmentation of Ambiguous Images,"A Probabilistic U-Net for Segmentation of Ambiguous
+Images
+Simon A. A. Kohl1∗,2,, Bernardino Romera-Paredes1, Clemens Meyer1, Jeffrey De Fauw1,
+Joseph R. Ledsam1, Klaus H. Maier-Hein2, S. M. Ali Eslami1, Danilo Jimenez Rezende1, and
+Olaf Ronneberger1
+Division of Medical Image Computing, German Cancer Research Center, Heidelberg, Germany
+DeepMind, London, UK"
+1bd80812c58de8cb0127aea915a45ebbff42dc3b,Twins 3D face recognition challenge,"Twins 3D Face Recognition Challenge
+Vipin Vijayan 1, Kevin W. Bowyer 1, Patrick J. Flynn 1, Di Huang 2, Liming Chen 2,
+Mark Hansen 3, Omar Ocegueda 4, Shishir K. Shah 4, Ioannis A. Kakadiaris 4"
+1ba20398e3b0154730590217a0988fbbab19e927,Doubly weighted nonnegative matrix factorization for imbalanced face recognition,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+1b6afc2cdf931a02df46d5052b4409c770ef8660,An Approach to Analyse Facial Expression from Videos using Pyramid Histogram of Orientation Gradients,"International Journal of Engineering Research and Applications (IJERA) ISSN: 2248-9622
+International Conference on Industrial Automation and Computing (ICIAC- 12-13th April 2014)
+RESEARCH ARTICLE
+OPEN ACCESS
+An Approach to Analyse Facial Expression from Videos using
+Pyramid Histogram of Orientation Gradients
+Ashish D. Lonare1, Shweta V. Jain2
+Department of Computer Science and Engineering, Shri Ramdeobaba College of Engineering and
+Management Nagpur, India
+Department of Computer Science and Engineering, Shri Ramdeobaba College of Engineering and
+Management Nagpur, India"
+1b1173a3fb33f9dfaf8d8cc36eb0bf35e364913d,Registration Invariant Representations for Expression Detection,"DICTA
+DICTA 2010 Submission #147. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+Registration Invariant Representations for Expression Detection
+Anonymous DICTA submission
+Paper ID 147"
+1b0a071450c419138432c033f722027ec88846ea,Looking at faces in a vehicle: A deep CNN based approach and evaluation,"Windsor Oceanico Hotel, Rio de Janeiro, Brazil, November 1-4, 2016
+978-1-5090-1889-5/16/$31.00 ©2016 IEEE"
+1b224ad99c42e696b6d98c05a87f1738e28c6c5e,A Markov Random Field Groupwise Registration Framework for Face Recognition,"A Markov Random Field Groupwise Registration
+Framework for Face Recognition
+Shu Liao, Dinggang Shen, and Albert C.S. Chung"
+1b3b01513f99d13973e631c87ffa43904cd8a821,HMM recognition of expressions in unrestrained video intervals,"HMM RECOGNITION OF EXPRESSIONS IN UNRESTRAINED VIDEO INTERVALS
+José Luis Landabaso, Montse Pardàs, Antonio Bonafonte
+Universitat Politècnica de Catalunya, Barcelona, Spain"
+1b71e4b59358ed7ecf6117e19fc944307e58a7af,3 D Spectral Nonrigid Registration of Facial Expression Scans,"IEEE TRANSACTIONS ON VISUALIZATION AND COMPUTER GRAPHICS
+D Spectral Nonrigid Registration of
+Facial Expression Scans
+Gabriel L. Cuendet, Student member, IEEE, Christophe Ecabert, Marina Zimmermann, Student
+member, IEEE, Hazım K. Ekenel, and Jean-Philippe Thiran, Senior Member, IEEE"
+1b2568de7363a9f46094b9cac82f4fe2ec1a4f56,Detection of Fragmented Rectangular Enclosures in Very High Resolution Remote Sensing Images,"Detection of Fragmented Rectangular Enclosures in
+Very High Resolution Remote Sensing Images
+Igor Zingman, Dietmar Saupe, Otávio A. B. Penatti, and Karsten Lambers"
+1b2297ba37fece76568c8b53369e6fd34d63175a,High-Resolution 3D Layout from a Single View,"High-Resolution 3D Layout from a Single View
+M. Zeeshan Zia1, Michael Stark2, and Konrad Schindler1
+Photogrammetry and Remote Sensing, ETH Z¨urich, Switzerland
+Stanford University and Max Planck Institute for Informatics"
+1be18a701d5af2d8088db3e6aaa5b9b1d54b6fd3,Enhancement of Fast Face Detection Algorithm Based on a Cascade of Decision Trees,"ENHANCEMENT OF FAST FACE DETECTION ALGORITHM BASED ON A CASCADE OF
+DECISION TREES
+V. V. Khryashchev a, *, A. A. Lebedev a, A. L. Priorov a
+YSU, Yaroslavl, Russia - (vhr,
+Commission II, WG II/5
+KEY WORDS: Face Detection, Cascade Algorithm, Decision Trees."
+1bb73d8f1224a846473d0a2ddc4289ae3e21b61c,A joint particle filter to track the position and head orientation of people using audio visual cues,"© EURASIP, 2010 ISSN 2076-1465
+8th European Signal Processing Conference (EUSIPCO-2010)
+INTRODUCTION"
+1b70bbf7cdfc692873ce98dd3c0e191580a1b041,Enhancing Performance of Face Recognition System Using Independent Component Analysis,"International Research Journal of Engineering and Technology (IRJET) e-ISSN: 2395 -0056
+Volume: 03 Issue: 10 | Oct -2016 www.irjet.net p-ISSN: 2395-0072
+Enhancing Performance of Face Recognition
+System Using Independent Component Analysis
+Dipti Rane1, Prof. Uday Bhave2, and Asst Prof. Manimala Mahato3
+Student, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India 1
+Guide, HOD, Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India 2
+Co-Guide, Assistant Prof., Computer Science, Shah and Anchor Kuttchi Engineering College, Mumbai, India 3
+---------------------------------------------------------------------***---------------------------------------------------------------------
+ards, tokens and keys. Biometric based methods examine"
+1b2dd300a43d0553f1deb578d9aea45d99472136,TABIA et al.: FAST APPROXIMATION OF DISTANCE BETWEEN ELASTIC CURVES USING KERNELS,
+1b4424e06ac29b72535727b92f261f39d065e858,3D Pictorial Structures Revisited: Multiple Human Pose Estimation,"D Pictorial Structures Revisited:
+Multiple Human Pose Estimation
+Vasileios Belagiannis, Sikandar Amin, Mykhaylo Andriluka,
+Bernt Schiele, Nassir Navab, and Slobodan Ilic"
+1bf0b5186af083117af136dfcb08ed28828664d0,"Deep Filter Banks for Texture Recognition, Description, and Segmentation","Int J Comput Vis
+DOI 10.1007/s11263-015-0872-3
+Deep Filter Banks for Texture Recognition, Description,
+nd Segmentation
+Mircea Cimpoi1 · Subhransu Maji2 · Iasonas Kokkinos3 · Andrea Vedaldi1
+Received: 4 June 2015 / Accepted: 20 November 2015
+© The Author(s) 2015. This article is published with open access at Springerlink.com"
+1b71d3f30238cb6621021a95543cce3aab96a21b,Fine-grained Video Classification and Captioning,"Fine-grained Video Classification and Captioning
+Farzaneh Mahdisoltani1,2, Guillaume Berger2, Waseem Gharbieh2
+David Fleet1, Roland Memisevic2
+{farzaneh,
+University of Toronto1, Twenty Billion Neurons2"
+1b807b6abaeef68edfbdc4200e198bf4e9613198,Image Processing Pipeline for Facial Expression Recognition under Variable Lighting,"Image Processing Pipeline for Facial Expression Recognition under Variable
+Lighting
+Ralph Ma, Amr Mohamed"
+1b4f6f73c70353869026e5eec1dd903f9e26d43f,Robust Subjective Visual Property Prediction from Crowdsourced Pairwise Labels,"Robust Subjective Visual Property Prediction
+from Crowdsourced Pairwise Labels
+Yanwei Fu, Timothy M. Hospedales, Tao Xiang, Jiechao Xiong,
+Shaogang Gong, Yizhou Wang, and Yuan Yao"
+1bc23c771688109bed9fd295ce82d7e702726327,Sparse Modeling of High - Dimensional Data for Learning and Vision,(cid:13) 2011 Jianchao Yang
+1b0548e52a1ffc7ebffe5200e2111525c9f7fd4a,Novel Views of Objects from a Single Image,"Novel Views of Objects from a Single Image
+Konstantinos Rematas, Chuong Nguyen, Tobias Ritschel, Mario Fritz, and Tinne Tuytelaars"
+1b4bc7447f500af2601c5233879afc057a5876d8,Facial Action Unit Classification with Hidden Knowledge under Incomplete Annotation,"Facial Action Unit Classification with Hidden Knowledge
+under Incomplete Annotation
+Jun Wang
+University of Science and
+Technology of China
+Hefei, Anhui
+Shangfei Wang
+University of Science and
+Technology of China
+Hefei, Anhui
+Rensselaer Polytechnic
+Qiang Ji
+Institute
+Troy, NY
+P.R.China, 230027
+P.R.China, 230027
+USA, 12180"
+1b7a0fffb5ee96adece2f6079f5e9ab79c3bc50e,Spigan: Privileged Adversarial Learning,"Under review as a conference paper at ICLR 2019
+SPIGAN: PRIVILEGED ADVERSARIAL LEARNING
+FROM SIMULATION
+Anonymous authors
+Paper under double-blind review"
+7711a7404f1f1ac3a0107203936e6332f50ac30c,Action Classification and Highlighting in Videos,"Action Classification and Highlighting in Videos
+Atousa Torabi
+Disney Research Pittsburgh
+Leonid Sigal
+Disney Research Pittsburgh"
+77ad2727065cb3dc5c91975604af01c82ec5c9f6,Convolutional Neural Networks for Disaster Images Retrieval,"Convolutional Neural Networks for Disaster Images Retrieval
+Sheharyar Ahmad1,Kashif Ahmad2, Nasir Ahmad1, Nicola Conci2
+DCSE, UET Peshawar, Pakistan
+DISI-University of Trento, Trento"
+776c5e37eecd26049ae31f56b3249c390e25e4e9,Angry and Beautiful: The Interactive Effect of Facial Expression and Attractiveness on Time Perception,"Psihologijske teme, 25, 2016 (2), 299-315
+Izvorni znanstveni rad – UDK –159.925.072
+59.937.072:115
+Angry and Beautiful: The Interactive Effect of Facial
+Expression and Attractiveness on Time Perception
+Jasmina Tomas
+Department of Psychology, Faculty of Humanities and Social Sciences,
+University of Zagreb, Croatia
+Ana Marija Španić
+Child Protection Center of Zagreb, Zagreb, Croatia"
+770b3855cdd15b49c89e4053b6cedafe53cecd6f,Improved Face Recognition Using Pseudo 2 - DHidden,"ImprovedFaceRecognitionUsingPseudo-D
+HiddenMarkovModels
+StefanEickeler,StefanM(cid:127)uller,GerhardRigoll
+Gerhard-Mercator-UniversityDuisburg
+DepartmentofComputerScience
+FacultyofElectricalEngineering
+
+-ti.uni-duisburg.de"
+778c9f88839eb26129427e1b8633caa4bd4d275e,Pose pooling kernels for sub-category recognition,"Pose Pooling Kernels for Sub-category Recognition
+Ning Zhang
+ICSI & UC Berkeley
+Ryan Farrell
+ICSI & UC Berkeley
+Trever Darrell
+ICSI & UC Berkeley"
+7789a5d87884f8bafec8a82085292e87d4e2866f,A Unified Tensor-based Active Appearance Face Model,"A Unified Tensor-based Active Appearance Face
+Model
+Zhen-Hua Feng, Member, IEEE, Josef Kittler, Life Member, IEEE, William Christmas, and Xiao-Jun Wu,
+Member, IEEE"
+779f67f2fe406828bbe7a19e8736cb5fd309e321,Fine-Grained Recognition in the Wild: A Multi-task Domain Adaptation Approach,"Fine-grained Recognition in the Wild:
+A Multi-Task Domain Adaptation Approach
+Timnit Gebru
+Judy Hoffman
+Li Fei-Fei
+CS Department Stanford University
+{tgebru, jhoffman,"
+771a9e7dc747fa2282815a4863502183f4e887c8,Efficient Bootsrapping and Query Adaptive Ranking for Image Search,"The International Journal Of Science & Technoledge (ISSN 2321 – 919X)
+www.theijst.com
+THE INTERNATIONAL JOURNAL OF
+SCIENCE & TECHNOLEDGE
+Efficient Bootsrapping and Query Adaptive Ranking for Image Search
+A. A. R. Senthilkumar
+Head of the Department, Department of Master of Computer Application
+PGP College of Engineering and Technology, Namakkal
+P. Mayuri
+Department of Computer Science and Engineering
+PGP College of Engineering and Technology, Namakkal"
+774c8945ccf0f5315482abb8cf84ac5d37c60aa0,A Comparative Study of Feature Extraction Methods in Images Classification,"I.J. Image, Graphics and Signal Processing, 2015, 3, 16-23
+Published Online February 2015 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijigsp.2015.03.03
+A Comparative Study of Feature Extraction
+Methods in Images Classification
+University of Sciences and Technology Mohamed Boudiaf USTO-MB, Faculty of Mathematics and Computer Science,
+Seyyid Ahmed Medjahed
+Oran, 31000, Algeria
+Email:"
+778952cc94d5baa5132ffbe2cf342f80032f5f73,Comparative Analysis of Techniques for the Recognition of Stabbed Wound and Accidental Wound Patterns,"International Journal of Computer Applications (0975 – 8887)
+Volume 182 – No. 13, September 2018
+Comparative Analysis of Techniques for the Recognition
+of Stabbed Wound and Accidental Wound Patterns
+Dayanand G. Savakar
+Department of Computer Science
+Rani Channamma University, Belagavi
+INDIA
+schemas of"
+7711330fb88e2522a5779a09c1622b75557f9254,Real-time detection and tracking of pedestrians in CCTV images using a deep convolutional neural network,"Real-time detection and tracking of pedestrians in
+CCTV images using a deep convolutional neural network
+Debaditya Acharya
+Kourosh Khoshelham
+Stephan Winter
+Infrastructure Engineering, The University of Melbourne"
+77882930692d41db107430a5a524ff5e4bb2ee5c,Hyperbolic Attention Networks,"Hyperbolic Attention Networks
+Caglar Gulcehre Misha Denil Mateusz Malinowski Ali Razavi
+Razvan Pascanu Karl Moritz Hermann
+Peter Battaglia Victor Bapst
+David Raposo Adam Santoro Nando de Freitas
+Deepmind"
+77e69753fc7cf007a136b12f102e1e11a93f87f5,Head and Body Orientation Estimation Using Convolutional Random Projection Forests.,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2017.2784424, IEEE
+Transactions on Pattern Analysis and Machine Intelligence
+Head and Body Orientation Estimation Using
+Convolutional Random Projection Forests
+Donghoon Lee, Ming-Hsuan Yang, and Songhwai Oh∗"
+7730fd15ff14dd84d71f965bfeab8e4d790d91d8,SpaRTA - Tracking across occlusions via global partitioning of 3D clouds of points,"SpaRTA
+Tracking across occlusions via global
+partitioning of 3D clouds of points
+Andrea Cavagna, Stefania Melillo, Leonardo Parisi, Federico Ricci-Tersenghi"
+778bff335ae1b77fd7ec67404f71a1446624331b,Hough Forest-Based Facial Expression Recognition from Video Sequences,"Hough Forest-based Facial Expression Recognition from
+Video Sequences
+Gabriele Fanelli, Angela Yao, Pierre-Luc Noel, Juergen Gall, and Luc Van Gool
+BIWI, ETH Zurich http://www.vision.ee.ethz.ch
+VISICS, K.U. Leuven http://www.esat.kuleuven.be/psi/visics"
+776b77306bdb852c89a22ba142fb57c8e8bb7bb5,Efficient On-Board Stereo Vision Pose Estimation,"Ef‌f‌icient On-Board Stereo Vision
+Pose Estimation(cid:2)
+Angel D. Sappa1, Fadi Dornaika2, David Ger´onimo1, and Antonio L´opez1
+Computer Vision Center, Edifici O Campus UAB
+08193 Bellaterra, Barcelona, Spain
+{asappa, dgeronimo,
+Institut G´eographique National
+94165 Saint Mand´e, France"
+7726a6ab26a1654d34ec04c0b7b3dd80c5f84e0d,Content-aware compression using saliency-driven image retargeting,"CONTENT-AWARE COMPRESSION USING SALIENCY-DRIVEN IMAGE RETARGETING
+Fabio Z¨und*†, Yael Pritch*, Alexander Sorkine-Hornung*, Stefan Mangold*, Thomas Gross†
+*Disney Research Zurich
+ETH Zurich"
+7754b708d6258fb8279aa5667ce805e9f925dfd0,Facial Action Unit Recognition by Exploiting Their Dynamic and Semantic Relationships,"Facial Action Unit Recognition by Exploiting
+Their Dynamic and Semantic Relationships
+Yan Tong, Student Member, IEEE, Wenhui Liao, Member, IEEE, and Qiang Ji, Senior Member, IEEE"
+77db171a523fc3d08c91cea94c9562f3edce56e1,Gauss-Laguerre wavelet textural feature fusion with geometrical information for facial expression identification,"Poursaberi et al. EURASIP Journal on Image and Video Processing 2012, 2012:17
+http://jivp.eurasipjournals.com/content/2012/1/17
+R ES EAR CH
+Open Access
+Gauss–Laguerre wavelet textural feature fusion
+with geometrical information for facial expression
+identification
+Ahmad Poursaberi1*, Hossein Ahmadi Noubari2, Marina Gavrilova1 and Svetlana N Yanushkevich1"
+77037a22c9b8169930d74d2ce6f50f1a999c1221,Robust Face Recognition With Kernelized Locality-Sensitive Group Sparsity Representation,"Robust Face Recognition With Kernelized
+Locality-Sensitive Group Sparsity Representation
+Shoubiao Tan, Xi Sun, Wentao Chan, Lei Qu, and Ling Shao"
+7714a5aa27ab5ad4d06a81fbb3e973d3b1002ac1,SSD-Sface : Single shot multibox detector for small faces,"SSD-Sface: Single shot multibox detector for small faces
+C. Thuis"
+77cb6ea4feff6f44e9977cc7572185d24e48ce40,On the Complementarity of Face Parts for Gender Recognition,"On the Complementarity of Face Parts for
+Gender Recognition
+Yasmina Andreu and Ram´on A. Mollineda
+Dept. Llenguatges i Sistemes Inform`atics
+Universitat Jaume I. Castell´o de la Plana, Spain"
+775c15a5dfca426d53c634668e58dd5d3314ea89,Image Quality-aware Deep Networks Ensemble for Efficient Gender Recognition in the Wild,
+779ad364cae60ca57af593c83851360c0f52c7bf,Steerable Pyramids Feature Based Classification Using Fisher Linear Discriminant for Face Recognition,"Steerable Pyramids Feature Based Classification Using Fisher
+Linear Discriminant for Face Recognition
+EL AROUSSI MOHAMED1
+EL HASSOUNI MOHAMMED12
+GHOUZALI SANAA1
+RZIZA MOHAMMED1
+ABOUTAJDINE DRISS1
+GSCM-LRIT, Faculty of Sciences, Mohammed V University-Agdal, Rabat, Morocco
+DESTEC, FLSHR Mohammed V University-Agdal, Rabat, Morocco
+PO.Box 1014, Rabat, Morocco"
+77b11260154e13e33c84599feba4cdc4f781bf71,Building User Profiles from Shared Photos,Building User Profiles from Shared Photos
+7793c7431f3ddce74fe2d444df614d8d8fd9af4a,A Review of Neural Network based Semantic Segmentation for Scene Understanding in Context of the self driving Car,"A Review of Neural Network based Semantic Segmentation for
+Scene Understanding in Context of the self driving Car
+J. Niemeijer1, P. Pekezou Fouopi2, S. Knake-Langhorst2, and E. Barth3
+Medizinische Informatik, Universität zu Lübeck,
+German Aerospace Center, Braunschweig,
+Institute of Neuro- and Bioinformatics, Universität zu Lübeck,"
+77dc158a979731d2ed01145b1d3ead34a6c33487,Preference for geometric patterns early in life as a risk factor for autism.,"ORIGINAL ARTICLE
+ONLINE FIRST
+Preference for Geometric Patterns Early in Life
+s a Risk Factor for Autism
+Karen Pierce, PhD; David Conant; Roxana Hazin, BS; Richard Stoner, PhD; Jamie Desmond, MPH
+Context: Early identification efforts are essential for the
+early treatment of the symptoms of autism but can only oc-
+ur if robust risk factors are found. Children with autism
+often engage in repetitive behaviors and anecdotally pre-
+fertovisuallyexaminegeometricrepetition,suchasthemov-
+ing blade of a fan or the spinning of a car wheel. The ex-
+tent to which a preference for looking at geometric repeti-
+tion is an early risk factor for autism has yet to be examined.
+Objectives: To determine if toddlers with an autism spec-
+trum disorder (ASD) aged 14 to 42 months prefer to vi-
+sually examine dynamic geometric images more than so-
+ial images and to determine if visual fixation patterns
+an correctly classify a toddler as having an ASD.
+Design: Toddlers were presented with a 1-minute movie
+depicting moving geometric patterns on 1 side of a video"
+77851ca35105ebe007d99e5d78ceb3473491071c,Spatiotemporal Stacked Sequential Learning for Pedestrian Detection,"Spatiotemporal Stacked Sequential Learning for Pedestrian Detection
+Alejandro Gonz´alez1
+Sebastian Ramos1
+David V´azquez1
+Antonio M. L´opez1
+Jaume Amores1
+Computer Vision Center, Barcelona
+Universitat Aut`onoma de Barcelona
+United Technologies Research Center"
+77351eaeb65e374a4d1e54acc28fea426670e364,Compression Based Face Recognition Using Transform Domain Features Fused at Matching Level,"Signal & Image Processing : An International Journal (SIPIJ) Vol.8, No.4, August 2017
+COMPRESSION BASED FACE RECOGNITION
+USING TRANSFORM DOMAIN FEATURES
+FUSED AT MATCHING LEVEL
+Srinivas Halvia, Nayina Ramapurb , K B Rajac and Shanti Prasadd
+Dayananda Sagar College of Engineering, Bangalore, India.
+Sai-Tektronix Pvt. Ltd., Bangalore, India.
+University Visvesvaraya College of Engineering, Bangalore, India.
+dK.S. Institute of Technology, Bangalore, India."
+77052654a37b88719c014c5afd3db89cb2288aeb,Lung Cancer Prediction Using Neural Network Ensemble with Histogram of Oriented Gradient Genomic Features,"Hindawi Publishing Corporation
+e Scientific World Journal
+Volume 2015, Article ID 786013, 17 pages
+http://dx.doi.org/10.1155/2015/786013
+Research Article
+Lung Cancer Prediction Using Neural Network Ensemble with
+Histogram of Oriented Gradient Genomic Features
+Emmanuel Adetiba and Oludayo O. Olugbara
+ICT and Society Research Group, Durban University of Technology, P.O. Box 1334, Durban 4000, South Africa
+Correspondence should be addressed to Oludayo O. Olugbara;
+Received 12 December 2014; Accepted 29 January 2015
+Academic Editor: Alexander Schonhuth
+Copyright © 2015 E. Adetiba and O. O. Olugbara. This is an open access article distributed under the Creative Commons
+Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is
+properly cited.
+This paper reports an experimental comparison of artificial neural network (ANN) and support vector machine (SVM) ensembles
+nd their “nonensemble” variants for lung cancer prediction. These machine learning classifiers were trained to predict lung cancer
+using samples of patient nucleotides with mutations in the epidermal growth factor receptor, Kirsten rat sarcoma viral oncogene,
+nd tumor suppressor p53 genomes collected as biomarkers from the IGDB.NSCLC corpus. The Voss DNA encoding was used to
+map the nucleotide sequences of mutated and normal genomes to obtain the equivalent numerical genomic sequences for training"
+77c81c13a110a341c140995bedb98101b9e84f7f,WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection,"WILDTRACK: A Multi-camera HD Dataset for
+Dense Unscripted Pedestrian Detection
+Tatjana Chavdarova1, Pierre Baqu´e2, St´ephane Bouquet2,
+Andrii Maksai2, Cijo Jose1, Timur Bagautdinov2, Louis Lettry3,
+Pascal Fua2, Luc Van Gool3, and Franc¸ois Fleuret1
+Machine Learning group, Idiap Research Institute & ´Ecole Polytechnique F´ed´erale de Lausanne
+CVLab, ´Ecole Polytechnique F´ed´erale de Lausanne
+Computer Vision Lab, ETH Zurich"
+771b7d76df1ed476dea859034a276f14ad1e49f1,Multi-scale elastic graph matching for face detection,"Sato and Kuriya EURASIP Journal on Advances in Signal Processing 2013, 2013:175
+http://asp.eurasipjournals.com/content/2013/1/175
+REVIEW
+Open Access
+Multi-scale elastic graph matching for face
+detection
+Yasuomi D Sato1,2,3* and Yasutaka Kuriya1"
+77d31d2ec25df44781d999d6ff980183093fb3de,The Multiverse Loss for Robust Transfer Learning,"The Multiverse Loss for Robust Transfer Learning
+Supplementary
+. Omitted proofs
+for which the joint loss:
+m(cid:88)
+L(F r, br, D, y)
+J(F 1, b1...F m, bm, D, y) =
+is bounded by:
+mL∗(D, y) ≤ J(F 1, b1...F m, bm, D, y)
+m−1(cid:88)
+≤ mL∗(D, y) +
+Alλd−j+1
+where [A1 . . . Am−1] are bounded parameters.
+We provide proofs that were omitted from the paper for
+lack of space. We follow the same theorem numbering as in
+the paper.
+Lemma 1. The minimizers F ∗, b∗ of L are not unique, and
+it holds that for any vector v ∈ Rc and scalar s, the solu-
+tions F ∗ + v1(cid:62)
+Proof. denoting V = v1(cid:62)"
+77d4843a177031b2b5721824280033e2e601334c,Comparative Evaluation of 3D versus 2D Modality for Automatic Detection of Facial Action Units,"Author’s Accepted Manuscript
+Comparative Evaluation of 3D versus 2D Modality
+for Automatic Detection of Facial Action Units
+Arman Savran, Bülent Sankur, M. Taha Bilge
+Reference:
+S0031-3203(11)00310-4
+doi:10.1016/j.patcog.2011.07.022
+PR 4228
+To appear in:
+Pattern Recognition
+Received date:
+Revised date:
+Accepted date:
+3 November 2010
+5 July 2011
+9 July 2011
+www.elsevier.com/locate/pr
+Cite this article as: Arman Savran, Bülent Sankur and M. Taha Bilge, Comparative Eval-
+uation of 3D versus 2D Modality for Automatic Detection of Facial Action Units, Pattern
+Recognition, doi:10.1016/j.patcog.2011.07.022"
+77fb0266b354d33f3725629c2ddce3d2342b318a,Is Attribute-Based Zero-Shot Learning an Ill-Posed Strategy?,"Is Attribute-Based Zero-Shot Learning
+n Ill-Posed Strategy?
+Ibrahim Alabdulmohsin1, Moustapha Cisse2, and Xiangliang Zhang1(B)
+Computer, Electrical and Mathematical Sciences and Engineering Division,
+King Abdullah University of Science and Technology (KAUST),
+Thuwal 23955-6900, Saudi Arabia
+Facebook Artificial Intelligence Research (FAIR), Menlo Park, USA
+http://mine.kaust.edu.sa"
+77c7f5c5852c189b59c34ebbbbec03e5e4060428,Talking to Robots: Learning to Ground Human Language in Perception and Execution,"(cid:13)Copyright 2014
+Cynthia Matuszek"
+482769e4c4cf832128b52f1bdff873af1eee8ba8,Robust Face Detection using Fusion of Haar and Daubechies Orthogonal Wavelet Template,"International Journal of Computer Applications (0975 – 8887)
+Volume 46– No.6, May 2012
+Robust Face Detection using Fusion of Haar and
+Daubechies Orthogonal Wavelet Template
+Chirag I Patel
+Sanjay Garg
+Research scholar, Institute of Technology,
+Professor, Institute of Technology,
+Nirma University, Ahmedabad, Gujarat, India
+Nirma University, Ahmedabad, Gujarat, India"
+48186494fc7c0cc664edec16ce582b3fcb5249c0,P-CNN: Pose-Based CNN Features for Action Recognition,"P-CNN: Pose-based CNN Features for Action Recognition
+Guilhem Ch´eron∗ †
+Ivan Laptev∗
+INRIA
+Cordelia Schmid†"
+48499deeaa1e31ac22c901d115b8b9867f89f952,Interim Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition,"Interim Report of Final Year Project
+HKU-Face: A Large Scale Dataset for
+Deep Face Recognition
+Haicheng Wang
+035140108
+Haoyu Li
+035141841
+COMP4801 Final Year Project
+Project Code: 17007"
+486a82f50835ea888fbc5c6babf3cf8e8b9807bc,Face Search at Scale: 80 Million Gallery,"MSU TECHNICAL REPORT MSU-CSE-15-11, JULY 24, 2015
+Face Search at Scale: 80 Million Gallery
+Dayong Wang, Member, IEEE, Charles Otto, Student Member, IEEE, Anil K. Jain, Fellow, IEEE"
+48fb35946641351f7480a5b88567aae59e526d82,Generating faces for affect analysis,"Noname manuscript No.
+(will be inserted by the editor)
+Generating faces for affect analysis
+Dimitrios Kollias (cid:63) · Shiyang Cheng † · Evangelos Ververas ∗ · Irene
+Kotsia1 · Stefanos Zafeiriou2
+Received: Sept 30th 2018 / Accepted: date"
+48b38d157272f03f6b44c0df61130534d11d8569,Natural Language Guided Visual Relationship Detection,"oard)(person-behind-kid)(skate board-on-street)(person-sit on-street)...ImageVisual relationshipsFigure1:Visualrelationshipsrepresenttheinteractionsbe-tweenobservedobjects.Eachrelationshiphasthreeele-ments:subject,predicateandobject.HereisanexampleimagefromVisualGenome[17].Ourproposedmethodisabletoeffectivelydetectnumerouskindsofdifferentrela-tionshipsfromsuchimage.objectsinimages.Therelationshipscanberepresentedinatripletformofhsubject-predicate-objecti,e.g.,hkid-on-skateboardi,asshowninFig.1.Anaturalapproachforthistaskistotreatitasaclassificationproblem:eachkindofrelationships/phraseisarelationcategory[32],asshowninFig.2.Totrainsuchreliableandrobustmodel,suffi-cienttrainingsamplesforeachpossiblehsubject-predicate-objecticombinationareessential.ConsidertheVisualRe-lationshipDataset(VRD)[24],withN=100objectcate-goriesandK=70predicates,thenthereareN2K=700kcombinationsintotal.However,itcontainsonly38kre-lationships,whichmeansthateachcombinationhaslessthan1sampleonaverage.Thepreviousclassification-basedworkscanonlydetectthemostcommonrelationships,e.g.,[32]studiedonly13frequentrelationships.Anotherpopularstrategyistodetecttherelationshippredicatesandtheobjectcategoriesindependently.Al-thoughthenumberofcategoriesdecreasesdramatically,thesemanticrelationshipbetweentheobjectsandthepredi-catesareignored.Consequently,thephrasewhichhasthesamepredicatebutdifferentagentsisconsideredasthesametypeofrelationship.Forinstance,the”clock-on-1"
+485e0d178bafa959ac956aa8de6556a2439c6663,Learning from Examples to Generalize over Pose and Illumination,"Learning from Examples to Generalize over Pose
+nd Illumination
+Marco K. M¨uller and Rolf P. W¨urtz
+Institute f¨ur Neural Computation, Ruhr-University, 44780 Bochum, Germany"
+483ca50670c5f7d33f7c722dd71105327a30ea60,Improving object classification using semantic attributes,"SU, ALLAN, JURIE: SEMANTIC ATTRIBUTES
+Improving object classification
+using semantic attributes
+Yu Su
+http://users.info.unicaen.fr/~ysu/
+Moray Allan
+http://users.info.unicaen.fr/~mallan/
+Frédéric Jurie
+http://users.info.unicaen.fr/~jurie/
+GREYC
+Université de Caen
+4032 Caen Cedex
+France"
+4839f861709e6ae6d4d032228473ce1764acbdcc,Finding Egocentric Image Topics through Convolutional Neural Network Based Representations,"Finding Egocentric Image Topics through Convolutional Neural Network Based Representations
+Kai Zhen, David Crandall
+School of Informatics and Computing, Indiana University.
+Life-logging cameras create huge collections of photos, even for a single
+person on a single day [1, 6], which makes it difficult for users to browse
+or organize their photos effectively. Unlike text corpora in which words
+reate intermediate representations that carry semantic meaning for higher-
+level concepts such as topics, images have no such obvious intermediate
+representation to connect raw pixels and semantics. Egocentric photos are
+particularly challenging because they were taken opportunistically, so they
+re often blurry and poorly-composed compared to consumer-style images.
+This paper applies topic modeling on deep features to extract visual
+“concept clusters” from egocentric datasets. We discretize features to form
+better analogy to the word-document model, which we find yields faster
+onvergence during inference. We also find that removing frequent, less in-
+formative features helps to prevent outliers and improve the semantic mean-
+ing of extracted topics, analogous to removing stop words in the text mining
+domain. In a generative process similar to that proposed in LDA [2], we
+model an image as being generated by first choosing topics, and then sam-
+pling features (visual words) from selected topics,"
+4850af6b54391fc33c8028a0b7fafe05855a96ff,Discovering useful parts for pose estimation in sparsely annotated datasets,"Discovering Useful Parts for Pose Estimation in Sparsely Annotated Datasets
+Mikhail Breslav1, Tyson L. Hedrick2, Stan Sclaroff1, and Margrit Betke1
+Department of Computer Science and 2Department of Biology
+Boston University and 2University of North Carolina"
+485eb41be3ce1600e9934167808b0319a6c3ec2f,A Novel Structural-Description Approach for Image Retrieval,"A Novel Structural-Description Approach For
+Image Retrieval
+Christoph Rasche, Constantin Vertan
+Laboratorul de Analiza si Prelucrarea Imaginilor
+Universitatea Politehnica din Bucuresti
+Bucuresti 061071, RO"
+48c0059feb14ca3deedfa7e3b53fbc34bd6d8efb,Facial Expression Retrieval Using 3-Dimensional Mesh Sequences,"Facial Expression Retrieval Using
+-Dimensional Mesh Sequences
+Danelakis E. Antonios*
+National and Kapodistrian University of Athens
+Department of Informatics and Telecommunications"
+48b4f49ec708677fc9f70edc74fd0f92ef986406,CS168: The Modern Algorithmic Toolbox Lecture #6: Stochastic Gradient Descent and Regularization,"CS168: The Modern Algorithmic Toolbox
+Lecture #6: Stochastic Gradient Descent and
+Regularization
+Tim Roughgarden & Gregory Valiant∗
+April 13, 2016
+Context
+Last lecture we covered the basics of gradient descent, with an emphasis on the intuition
+ehind and geometry underlying the method, plus a concrete instantiation of it for the
+problem of linear regression (fitting the best hyperplane to a set of data points). This basic
+method is already interesting and useful in its own right (see Homework #3).
+This lecture we’ll cover two extensions that, while simple, will bring your knowledge a step
+loser to the state-of-the-art in modern machine learning. The two extensions have different
+haracters. The first concerns how to actually solve (computationally) a given unconstrained
+minimization problem, and gives a modification of basic gradient descent — “stochastic
+gradient descent” — that scales to much larger data sets. The second extension concerns
+problem formulation rather than implementation, namely the choice of the unconstrained
+optimization problem to solve (i.e., the objective function f ). Here, we introduce the idea
+of “regularization,” with the goal of avoiding overfitting the function learned to the data set
+t hand, even for very high-dimensional data.
+Recap"
+4871300f1e5a58ce920e6b5be14e89c5da4aa4c4,Manifold Learning for Video-to-Video Face Recognition,"Manifold Learning for Video-to-Video Face
+Recognition"
+48d299fe3303c80f840816fc76971a42b4a8b624,Predicting Important Objects for Egocentric Video Summarization,"http://dx.doi.org/10.1007/s11263-014-0794-5
+Predicting Important Objects for Egocentric Video Summarization
+Yong Jae Lee · Kristen Grauman
+Received: date / Accepted: date"
+488676e61fcf7b79d83c25fb103c8d8a854d8987,Leveraging Convolutional Pose Machines for Fast and Accurate Head Pose Estimation,"Leveraging Convolutional Pose Machines
+for Fast and Accurate Head Pose Estimation
+Yuanzhouhan Cao1, Olivier Can´evet 1 and Jean-Marc Odobez1,2"
+48a5b6ee60475b18411a910c6084b3a32147b8cd,Pedestrian Attribute Recognition with Part-based CNN and Combined Feature Representations,"Pedestrian attribute recognition with part-based CNN
+nd combined feature representations
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla
+Baskurt
+To cite this version:
+Yiqiang Chen, Stefan Duffner, Andrei Stoian, Jean-Yves Dufour, Atilla Baskurt. Pedestrian attribute
+recognition with part-based CNN and combined feature representations. VISAPP2018, Jan 2018,
+Funchal, Portugal. <hal-01625470>
+HAL Id: hal-01625470
+https://hal.archives-ouvertes.fr/hal-01625470
+Submitted on 21 Jun 2018
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,"
+486e5c2996726ec0f7c37077a2752dc4bd8c1413,Linearized Smooth Additive Classifiers,"Linearized Smooth Additive Classifiers
+Subhransu Maji
+Toyota Technological Institute at Chicago,
+Chicago, IL 60637, USA"
+480810001ed845ec04a20b00461a8a82fcffbb52,Autistic Traits and Brain Activation during Face-to-Face Conversations in Typically Developed Adults,"Autistic Traits and Brain Activation during Face-to-Face
+Conversations in Typically Developed Adults
+Masashi Suda, Yuichi Takei, Yoshiyuki Aoyama, Kosuke Narita, Noriko Sakurai, Masato Fukuda*,
+Masahiko Mikuni
+Department of Psychiatry and Neuroscience, Gunma University Graduate School of Medicine, Gunma, Japan"
+488493dc29c844b36660395266d8d347c7cfa9ce,Towards Flexible Classification: Cost-Aware Online Query of Cascades and Operating Points,"Towards Flexible Classification: Cost-Aware
+Online Query of Cascades and Operating Points
+Brandyn White, Andrew Miller, Tom Yeh, and Larry S. Davis
+University of Maryland: College Park"
+48a42303559ea518ba06f54a8cfce4226bb0e77e,Urban tribes: Analyzing group photos from a social perspective,"Urban Tribes: Analyzing Group Photos from a Social Perspective
+Ana C. Murillo†,
+Iljung S. Kwak‡, Lubomir Bourdev§∗, David Kriegman‡, Serge Belongie‡
+DIIS - Instituto de Ingenier´ıa de Arag´on. Universidad de Zaragoza, Spain
+§Facebook. 1601 Willow Road, Menlo Park, CA 94025, USA
+Computer Science and Engineering Department. University of California, San Diego, USA"
+483f85e1ebef9d10a951b3c01751892aca92a2c2,Adaptive Classification for Person Re-identification Driven by Change Detection,"Adaptive Classification for Person Re-Identification Driven by Change
+Detection
+C. Pagano1, E. Granger1, R. Sabourin1, G. L. Marcialis2 and F. Roli2
+Lab. d’imagerie, de vision et d’intelligence artificielle,
+´Ecole de technologie sup´erieure, Universit´e du Qu´ebec, Montreal, Canada
+Pattern Recognition and Applications Group, Dept. of Electrical and Electronic Engineering,
+{eric.granger,
+University of Cagliari, Cagliari, Italy
+Keywords:
+Multi-Classifier Systems, Incremental Learning, Adaptive Biometrics, Change Detection, Face Recognition,
+Video Surveillance."
+484c2617471fd742c4806f9281e5add45c6831a7,LSTM Self-Supervision for Detailed Behavior Analysis,"LSTM Self-Supervision for Detailed Behavior Analysis
+Biagio Brattoli1∗, Uta B¨uchler1∗, Anna-Sophia Wahl2, Martin E. Schwab2, Bj¨orn Ommer1
+HCI / IWR, Heidelberg University, Germany
+Department of HST, ETH Zurich, Switzerland"
+486f08c875e88b3f1f157e7de1ae2cf5176f5431,Structure-from-motion for Calibration of a Vehicle Camera System with Non-overlapping Fields-of-view in an Urban Environment,"STRUCTURE-FROM-MOTION FOR CALIBRATION OF A VEHICLE CAMERA SYSTEM
+WITH NON-OVERLAPPING FIELDS-OF-VIEW IN AN URBAN ENVIRONMENT
+Photogrammetry & Remote Sensing, Technische Universitaet Muenchen, Germany - (alexander.hanel,
+A. Hanela, U. Stillaa
+Commission I, WG 9
+KEY WORDS: vehicle cameras, camera calibration, structure from motion, bundle adjustment"
+488e475eeb3bb39a145f23ede197cd3620f1d98a,Pedestrian Attribute Classification in Surveillance: Database and Evaluation,"Pedestrian Attribute Classification in Surveillance: Database and Evaluation
+Jianqing Zhu, Shengcai Liao, Zhen Lei, Dong Yi, Stan Z. Li∗
+Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+Institute of Automation, Chinese Academy of Sciences (CASIA)
+95 Zhongguancun East Road, 100190, Beijing, China
+{jqzhu, scliao, zlei, dyi,"
+48bf7357723abf7770400d68f914d6a7ca5a1a5f,Real-Time Head Pose Tracking with Online Face Template Reconstruction,"Real-Time Head Pose Tracking with Online
+Face Template Reconstruction
+Songnan Li, Member, IEEE,
+King Ngi Ngan, Fellow, IEEE,
+Raveendran Paramesran, Senior Member, IEEE,
+nd Lu Sheng"
+48f45accce6a4a22e4ead41fe292a915f3531f5b,Active Learning for Visual Question Answering: An Empirical Study,"Active Learning for Visual Question Answering:
+An Empirical Study
+Xiao Lin
+Virginia Tech
+Devi Parikh
+Georgia Tech"
+487df616e981557c8e1201829a1d0ec1ecb7d275,Acoustic Echo Cancellation Using a Vector-Space-Based Adaptive Filtering Algorithm,"Acoustic Echo Cancellation Using a Vector-Space-Based
+Adaptive Filtering Algorithm
+Yu Tsao, Member IEEE, Shih-Hau Fang*, Senior Member IEEE, and Yao Shiao"
+486a0044b9c86c6f648f153f3d3f2e534342b754,Trajectories and Maneuvers of Surrounding Vehicles With Panoramic Camera Arrays,"Trajectories and Maneuvers of Surrounding Vehicles
+with Panoramic Camera Arrays
+Jacob V. Dueholm, Miklas S. Kristoffersen, Ravi K. Satzoda, Thomas B. Moeslund, and Mohan M. Trivedi"
+48319e611f0daaa758ed5dcf5a6496b4c6ef45f2,Non Binary Local Gradient Contours for Face Recognition,"Non Binary Local Gradient Contours for Face Recognition
+Abdullah Gubbia, Mohammad Fazle Azeemb, M Sharmila Kumaric
+Department of Electronics and Communication, P.A. College of Engnineering, Mangalore,
+Nadupadavu, Mangalore, India, Contact:
+Senior IEEE Member, Department of Electrical and Electronics Engineering, Aligarh Muslim
+University, India, Contact:
+Department of Computer Science and Engineering, P A College of Engineering, Nadupadavu,
+Mangalore, India. Contact:
+As the features from the traditional Local Binary patterns (LBP) and Local Directional Patterns (LDP) are
+found to be ineffective for face recognition, we have proposed a new approach derived on the basis of Information
+sets whereby the loss of information that occurs during the binarization is eliminated. The information sets
+s a product. Since face is having smooth texture in a limited area, the extracted features must be highly
+discernible. To limit the number of features, we consider only the non overlapping windows. By the application
+of the information set theory we can reduce the number of feature of an image. The derived features are shown
+to work fairly well over eigenface, fisherface and LBP methods.
+Keywords: Local Binary Pattern, Local Directional Pattern, Information Sets, Gradient Contour, Support
+Vector Machine, KNN, Face Recognition.
+. INTRODUCTION
+In face recognition, the major issue to be ad-
+dressed is the extraction of features which are"
+4875bed500321dec353959a556541715da5c9d18,A Domain Agnostic Normalization Layer for Unsupervised Adversarial Domain Adaptation,"A Domain Agnostic Normalization Layer
+for Unsupervised Adversarial Domain Adaptation
+R. Romijnders
+Eindhoven, University of Technology
+P. Meletis
+G. Dubbelman"
+48cfc5789c246c6ad88ff841701204fc9d6577ed,Age Invariant Face Recognition Based on DCT Feature Extraction and Kernel Fisher Analysis,"J Inf Process Syst, Vol.12, No.3, pp.392~409, September 2016
+ISSN 1976-913X (Print)
+ISSN 2092-805X (Electronic)
+Age Invariant Face Recognition Based on DCT
+Feature Extraction and Kernel Fisher Analysis
+Leila Boussaad*, Mohamed Benmohammed**, and Redha Benzid***"
+484c4eec34e985d8ca0c20bf83efc56881180709,Efficient semantic image segmentation with superpixel pooling,"Ef‌f‌icient semantic image segmentation with superpixel pooling
+Mathijs Schuurmans Maxim Berman Matthew B. Blaschko
+Dept. ESAT, Center for Processing Speech and Images
+KU Leuven, Belgium
+{maxim.berman,
+June 8, 2018"
+70f189798c8b9f2b31c8b5566a5cf3107050b349,The challenge of face recognition from digital point-and-shoot cameras,"The Challenge of Face Recognition from Digital Point-and-Shoot Cameras
+J. Ross Beveridge∗
+Geof H. Givens§
+W. Todd Scruggs¶
+P. Jonathon Phillips†
+Yui Man Lui∗
+Kevin W. Bowyer(cid:107)
+David Bolme‡
+Mohammad Nayeem Teli∗
+Patrick J. Flynn(cid:107)
+Bruce A. Draper∗,
+Hao Zhang∗
+Su Cheng†"
+70671018d4597b6d2d0c99b38b1f1a3f1271eaec,Learning Representations Specialized in Spatial Knowledge: Leveraging Language and Vision,"Transactions of the Association for Computational Linguistics, vol. 6, pp. 133–144, 2018. Action Editor: Stefan Riezler.
+Submission batch: 6/2017; Revision batch: 9/2017; Published 2/2018.
+(cid:13)2018 Association for Computational Linguistics. Distributed under a CC-BY 4.0 license."
+70f0636b14b9e3916a780d70a5c712e8fea739da,"ANDRE MOUTON On Artefact Reduction , Segmentation and Classification of 3 D Computed Tomography Imagery in Baggage Security Screening","CRANFIELD UNIVERSITY
+SCHOOL OF ENGINEERING
+PhD THESIS
+Academic Year 2013-2014
+ANDRE MOUTON
+On Artefact Reduction, Segmentation and Classification of
+D Computed Tomography Imagery in Baggage Security
+Screening
+Supervised by: Dr Toby Breckon and Dr Carol Armitage
+March 2014
+This thesis is submitted in partial fulfilment of the requirements for
+the Degree of Doctor of Philosophy
+©Cranfield University, 2014. All rights reserved. No part of this
+publication may be reproduced without the written permission of
+the copyright holder."
+70ec156f7e6de0275c7e4e95e35f1bc1e92e29b3,Deep learning ensembles for melanoma recognition in dermoscopy images,"Deep learning ensembles for melanoma recognition in dermoscopy images1
+N. C. F. Codella, Q. B. Nguyen, S. Pankanti, D. Gutman, B. Helba, A. Halpern, J. R. Smith"
+70109c670471db2e0ede3842cbb58ba6be804561,Zero-Shot Visual Recognition via Bidirectional Latent Embedding,"Noname manuscript No.
+(will be inserted by the editor)
+Zero-Shot Visual Recognition via Bidirectional Latent Embedding
+Qian Wang · Ke Chen
+Received: date / Accepted: date"
+706600aa77ffb165097e4aeccb2b214dabdb8092,Combining Graph-based Dependency Features with Convolutional Neural Network for Answer Triggering,"Combining Graph-based Dependency Features with
+Convolutional Neural Network for Answer Triggering
+Deepak Gupta∗, Sarah Kohail†, Pushpak Bhattacharyya∗
+Indian Institute of Technology Patna, India
+Universit¨at Hamburg, Germany
+{deepak.pcs16,"
+708a55d65568faf8158417ddfb79e728b2b28f86,3D Body Model Construction and Matching for Real Time People Re-Identification,"Eurographics Italian Chapter Conference (2010)
+E. Puppo, A. Brogni, and L. De Floriani (Editors)
+D Body Model Construction and Matching for Real Time
+People Re-Identification
+D. Baltieri, R. Vezzani and R. Cucchiara
+Dipartimento di Ingegneria dell’Informazione
+University of Modena and Reggio Emilia
+Via Vignolese, 905 - 41100 Modena - Italy"
+706236308e1c8d8b8ba7749869c6b9c25fa9f957,Crowdsourced data collection of facial responses,"Crowdsourced Data Collection of Facial Responses
+Daniel McDuff
+MIT Media Lab
+Cambridge
+02139, USA
+Rosalind Picard
+MIT Media Lab
+Cambridge
+02139, USA
+Rana el Kaliouby
+MIT Media Lab
+Cambridge
+02139, USA"
+70f3d3d9a7402a0f62a5646a16583c6c58e3b07a,"An Architecture for Deep, Hierarchical Generative Models","An Architecture for Deep, Hierarchical Generative
+Models
+Philip Bachman
+Maluuba Research"
+706b9767a444de4fe153b2f3bff29df7674c3161,Fast Metric Learning For Deep Neural Networks,"Fast Metric Learning For Deep Neural Networks
+Henry Gouk1, Bernhard Pfahringer1, and Michael Cree2
+Department of Computer Science, University of Waikato, Hamilton, New Zealand
+School of Engineering, University of Waikato, Hamilton, New Zealand"
+70c58700eb89368e66a8f0d3fc54f32f69d423e1,In Unsupervised Spatio-temporal Feature Learning,"INCORPORATING SCALABILITY IN UNSUPERVISED SPATIO-TEMPORAL FEATURE
+LEARNING
+Sujoy Paul, Sourya Roy and Amit K. Roy-Chowdhury
+Dept. of Electrical and Computer Engineering, University of California, Riverside, CA 92521"
+708355d319a88485fdbbea3524104982b8cf37c2,2D/3D Sensor Exploitation and Fusion for Enhanced Object Detection,"D/3D Sensor Exploitation and Fusion for Enhanced Object Detection
+Jiejun Xu
+HRL Laboratories LLC
+Kyungnam Kim
+HRL Laboratories LLC
+Zhiqi Zhang
+HRL Laboratories LLC
+Hai-wen Chen
+HRL Laboratories LLC
+Yuri Owechko
+HRL Laboratories LLC"
+70990e1b13cec2b3e4831a00c6ac901dae76b27a,"Mareckova , Klara ( 2013 ) Sex differences and the role of sex hormones in face development and face processing","Mareckova, Klara (2013) Sex differences and the role of
+sex hormones in face development and face processing.
+PhD thesis, University of Nottingham.
+Access from the University of Nottingham repository:
+http://eprints.nottingham.ac.uk/13333/1/KlaraMareckova_PhDThesis_finalversion1.pdf
+Copyright and reuse:
+The Nottingham ePrints service makes this work by researchers of the University of
+Nottingham available open access under the following conditions.
+· Copyright and all moral rights to the version of the paper presented here belong to
+the individual author(s) and/or other copyright owners.
+To the extent reasonable and practicable the material made available in Nottingham
+ePrints has been checked for eligibility before being made available.
+· Copies of full items can be used for personal research or study, educational, or not-
+for-profit purposes without prior permission or charge provided that the authors, title
+nd full bibliographic details are credited, a hyperlink and/or URL is given for the
+original metadata page and the content is not changed in any way.
+· Quotations or similar reproductions must be sufficiently acknowledged.
+Please see our full end user licence at:
+http://eprints.nottingham.ac.uk/end_user_agreement.pdf
+A note on versions:"
+70eb48e06d9d5edf84246b772673b6d44af4b3c6,Robust Ldp Based Face Descriptor,"International Journal of Advances in Engineering & Technology, Mar. 2013.
+©IJAET ISSN: 2231-1963
+ROBUST LDP BASED FACE DESCRIPTOR
+Mahadeo D. Narlawar and Jaideep G. Rana
+Department of Electronics Engineering, Jawaharlal Nehru College of Engineering,
+Aurangabad-431004, Maharashtra, India"
+70e79d7b64f5540d309465620b0dab19d9520df1,Facial Expression Recognition System Using Extreme Learning Machine,"International Journal of Scientific & Engineering Research, Volume 8, Issue 3, March-2017
+ISSN 2229-5518
+Facial Expression Recognition System
+Using Extreme Learning Machine
+Firoz Mahmud, Dr. Md. Al Mamun"
+70bfe8dfd9c9b05c8854a5d4aca9c3ee3a3b7eff,3D Object Reconstruction using Multiple Views,"!, >A?J 4A?IJHK?JE KIEC KJEFA 8EAMI
+,CD E
+,AF=HJAJ B +FKJAH 5?EA?A 5J=JEIJE?I
+7ELAHIEJO B ,K>E 6HEEJO +ACA
+) JDAIEI J JDA 7ELAHIEJO B ,K>E 6HEEJO +ACA E BKAJ B
+JDA HAGKEHAAJI BH JDA B
+,?JH B 2DEIFDO
+5AFJA>AH"
+7003d903d5e88351d649b90d378f3fc5f211282b,Facial Expression Recognition using Gabor Wavelet,"International Journal of Computer Applications (0975 – 8887)
+Volume 68– No.23, April 2013
+Facial Expression Recognition using Gabor Wavelet
+Mahesh Kumbhar
+ENTC SVERI’S COE (Poly),
+Pandharpur,
+Solapur, India
+Manasi Patil
+ENTC SVERI’S COE,
+Pandharpur,
+Solapur, India
+Ashish Jadhav
+ENTC SVERI’S COE (Poly),
+Pandharpur,
+Solapur, India"
+70e3c02575e4041519434e0dacb291bbb8791380,Generative 2D and 3D Human Pose Estimation with Vote Distributions,"Generative 2D and 3D
+Human Pose Estimation
+with Vote Distributions
+J¨urgen Brauer, Wolfgang H¨ubner, Michael Arens
+Fraunhofer Institute of Optronics, System Technologies and Image Exploitation
+{juergen.brauer, wolfgang.huebner,
+Gutleuthausstr. 1, 76275 Ettlingen, Germany"
+70920447b8300fd65745c0a884523e4d52d000ef,Automated Crowd Detection in Stadium Arenas,"AUTOMATED CROWD DETECTION IN STADIUM ARENAS
+Loris Nanni, 1 Sheryl Brahnam, 2 Stefano Ghidoni, 1 Emanuele Menegatti1
+DIE, University of Padua, Via Gradenigo, 6 - 35131- Padova – Italy e-mail: {loris.nanni, ghidoni,
+CIS, Missouri State University, 901 S. National, Springfield, MO 65804, USA e-mail:"
+70af8e4ff3c029aea788bc28b45c56932b50c056,Robust Facial Landmark Detection Using a Mixture of Synthetic and Real Images with Dynamic Weighting: A Survey,"Om Prakash Gupta et al. 2016, Volume 4 Issue 1
+ISSN (Online): 2348-4098
+ISSN (Print): 2395-4752"
+70ce1a17f257320fc718d61964b21e7aeabd8cd5,Person re-identification with fusion of hand-crafted and deep pose-based body region features,"Person re-identification with fusion of hand-crafted and deep pose-based body
+region features
+Jubin Johnson1
+Shunsuke Yasugi2
+Yoichi Sugino2
+Sugiri Pranata1
+Panasonic R&D Center
+Singapore
+Shengmei Shen1
+Panasonic Corporation
+Core Element Technology Development Center
+Japan
+http://www.prdcsg.panasonic.com.sg/"
+70b0538af40672e3be4b72f97cec486693d5204f,Mixture Component Identification and Learning for Visual Recognition,"Mixture Component Identification and Learning
+for Visual Recognition
+Omid Aghazadeh, Hossein Azizpour, Josephine Sullivan, and Stefan Carlsson
+Computer Vision and Active Perception laboratory (CVAP), KTH, Sweden"
+70e90b9df5b8617ef6636c5492db727f9d48d0ec,People Search with Textual Queries About Clothing Appearance Attributes,"People search with textual queries about
+lothing appearance attributes
+Riccardo Satta, Federico Pala, Giorgio Fumera, and Fabio Roli"
+7056a051e0589ab6aa299c7d2a31588800b8c93e,Facial expression recognition and histograms of oriented gradients: a comprehensive study,"Carcagnì et al. SpringerPlus (2015) 4:645
+DOI 10.1186/s40064-015-1427-3
+RESEARCH
+Facial expression recognition
+nd histograms of oriented gradients: a
+omprehensive study
+Pierluigi Carcagnì*†, Marco Del Coco†, Marco Leo† and Cosimo Distante†
+Open Access
+*Correspondence:
+Pierluigi Carcagnì, Marco Del
+Coco, Marco Leo and Cosimo
+Distante contributed equally
+to this work
+National Research Council
+of Italy, Institute of Applied
+Sciences and Intelligent
+Systems, Via della Libertà, 3,
+73010 Arnesano , LE, Italy"
+70bf1769d2d5737fc82de72c24adbb7882d2effd,Face Detection in Intelligent Ambiences with Colored Illumination,"Face detection in intelligent ambiences with colored illumination
+Christina Katsimerou, Judith A. Redi, Ingrid Heynderickx
+Department of Intelligent Systems
+TU Delft
+Delft, The Netherlands"
+70560383cbf7c0dc5e9be1f2fd9efba905377095,Accelerating Online CP Decompositions for Higher Order Tensors,"Accelerating Online CP Decompositions for
+Higher Order Tensors
+Shuo Zhou1, Nguyen Xuan Vinh1, James Bailey1, Yunzhe Jia1, Ian Davidson2
+Dept. of Computing and Information Systems, The University of Melbourne, Australia
+Dept. of Computer Science, University of California, Davis, USA"
+70480ee0e636a77f6289be98ae39300a584808f6,Iterative Robust Registration Approach based on Feature Descriptors Correspondence - Application to 3D Faces Description,"Iterative Robust Registration Approach based on Feature Descriptors
+Correspondence
+Application to 3D Faces Description
+Cristal lab.Grift research group, National School of Computer Science, La Mannouba, Tunisia
+Wieme Gadacha and Faouzi Ghorbel
+Keywords:
+D Rigid Registration, Hausdorff Distance in Shape Space, 3D Parametrisation, Matching, Face Description,
+Shannon Theorem."
+70bb5c2570673eae86a3f9ced55c7ef00e0be8b5,Combinaison de Descripteurs Hétérogènes pour la Reconnaissance de Micro-Mouvements Faciaux,"Combinaison de Descripteurs Hétérogènes pour la Reconnaissance de
+Micro-Mouvements Faciaux.
+Vincent Rapp1, Thibaud Senechal1, Hanan Salam2, Lionel Prevost3, Renaud Seguier2, Kevin Bailly1
+ISIR - CNRS UMR 7222
+Université Pierre et Marie Curie, Paris
+{rapp, senechal,
+Supelec - ETR (UMR 6164)
+Avenue de la Boulaie, 35511,
+Cesson-Sevigne
+{salam,
+LAMIA - EA 4540
+Université des Antilles et de la Guyanne
+Résumé
+Dans cet article, nous présentons notre réponse au premier
+hallenge international sur la reconnaissance et l’analyse
+d’émotions faciales (Facial Emotion Recognition and Ana-
+lysis Challenge). Nous proposons une combinaison de dif-
+férents types de descripteurs dans le but de détecter de ma-
+nière automatique, les micro-mouvements faciaux d’un vi-
+sage. Ce système utilise une Machine à Vecteurs Supports"
+70b42bbd76e6312d39ea06b8a0c24beb4a93e022,Solving Multiple People Tracking in a Minimum Cost Arborescence,"Solving Multiple People Tracking In A Minimum Cost Arborescence
+Institut f¨ur Informationsverarbeitung
+Institute of Geodesy and Photogrammetry
+Laura Leal-Taix´e
+ETH Z¨urich
+Roberto Henschel
+Universit¨at Hannover
+Bodo Rosenhahn
+Institut f¨ur Informationsverarbeitung
+Universit¨at Hannover
+. Introduction
+For many applications of computer vision, it is neces-
+sary to localize and track humans that appear in a video
+sequence. Multiple people tracking has thus evolved as an
+ongoing research topic in the computer vision domain.
+A commonly used approach to solve the data associa-
+tion problem within the tracking task is to apply a hierarchi-
+al tracklet framework [5]. Although there has been great
+progress in such a model, mainly due to its good bootstrap-
+ping capabilities, so far little attention has been drawn to"
+1e058b3af90d475bf53b3f977bab6f4d9269e6e8,Manifold Relevance Determination,"Manifold Relevance Determination
+Andreas C. Damianou
+Dept. of Computer Science & Sheffield Institute for Translational Neuroscience, University of Sheffield, UK
+Carl Henrik Ek
+KTH – Royal Institute of Technology, CVAP Lab, Stockholm, Sweden
+Michalis K. Titsias
+Wellcome Trust Centre for Human Genetics, Roosevelt Drive, Oxford OX3 7BN, UK
+Neil D. Lawrence
+Dept. of Computer Science & Sheffield Institute for Translational Neuroscience, University of Sheffield, UK"
+1e1e35284591b6a69569c48b3677b6f4409c5edc,Optimal Feature Extraction and Classification of Tensors via Matrix Product State Decomposition,"Matrix Product State for Feature Extraction of
+Higher-Order Tensors
+Johann A. Bengua1, Ho N. Phien1, Hoang D. Tuan1 and Minh N. Do2
+een applied in neuroscience, pattern analysis, image classifi-
+ation and signal processing [7], [8], [9]. The central concept
+of using the TD is to decompose a large multidimensional
+tensor into a set of common factor matrices and a single core
+tensor which is considered as reduced features of the original
+tensor in spite of its lower dimension [7]. In practice, the
+TD is often performed in conjunction with some constraints,
+e.g. nonnegativity, orthogonality, etc., imposed on the common
+factors in order to obtain a better feature core tensor [7].
+However, constraints like orthogonality often leads to an NP-
+hard computational problem [10]. Practical application of the
+TD is normally limited to small-order tensors. This is due
+to the fact
+the TD core tensor preserves the higher-
+order structure of the original tensor, with its dimensionality
+remaining fairly large in order to capture relevant interactions
+etween components of the tensor [2]."
+1e2087908e6ce34032c821c7fb6629f2d0733086,Affective Embodied Conversational Agents for Natural Interaction,"Affective Embodied Conversational Agents for
+Natural Interaction
+Eva Cerezo, Sandra Baldassarri, Isabelle Hupont and Francisco J. Seron
+Advanced Computer Graphics Group (GIGA)
+Computer Science Department, Engineering Research Institute of Aragon(I3A),
+University of Zaragoza,
+Spain
+. Introduction
+Human computer intelligent interaction is an emerging field aimed at providing natural
+ways for humans to use computers as aids. It is argued that for a computer to be able to
+interact with humans it needs to have the communication skills of humans. One of these
+skills is the affective aspect of communication, which is recognized to be a crucial part of
+human intelligence and has been argued to be more fundamental in human behaviour and
+success in social life than intellect (Vesterinen, 2001; Pantic, 2005).
+Embodied conversational agents, ECAs (Casell et al., 2000), are graphical interfaces capable
+of using verbal and non-verbal modes of communication to interact with users in computer-
+ased environments. These agents are sometimes just as an animated talking face, may be
+displaying simple facial expressions and, when using speech synthesis, with some kind of
+lip synchronization, and sometimes they have sophisticated 3D graphical representation,
+with complex body movements and facial expressions."
+1e799047e294267087ec1e2c385fac67074ee5c8,Automatic Classification of Single Facial Images,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, VOL. 21, NO. 12, DECEMBER 1999
+Short Papers___________________________________________________________________________________________________
+Automatic Classification of
+Single Facial Images
+Michael J. Lyons, Julien Budynek, and
+Shigeru Akamatsu"
+1eb4ea011a3122dc7ef3447e10c1dad5b69b0642,Contextual Visual Recognition from Images and Videos,"Contextual Visual Recognition from Images and Videos
+Georgia Gkioxari
+Jitendra Malik
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2016-132
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-132.html
+July 19, 2016"
+1e7ae86a78a9b4860aa720fb0fd0bdc199b092c3,A Brief Review of Facial Emotion Recognition Based on Visual Information,"Article
+A Brief Review of Facial Emotion Recognition Based
+on Visual Information
+Byoung Chul Ko ID
+Department of Computer Engineering, Keimyung University, Daegu 42601, Korea;
+Tel.: +82-10-3559-4564
+Received: 6 December 2017; Accepted: 25 January 2018; Published: 30 January 2018"
+1e8eee51fd3bf7a9570d6ee6aa9a09454254689d,Face Search at Scale,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI 10.1109/TPAMI.2016.2582166, IEEE
+Transactions on Pattern Analysis and Machine Intelligence
+Face Search at Scale
+Dayong Wang, Member, IEEE, Charles Otto, Student Member, IEEE, Anil K. Jain, Fellow, IEEE"
+1e02dfeb93e8fd8753d2e69baf705baf8996cb81,"Online Object Tracking, Learning and Parsing with And-Or Graphs","ARXIV VERSION
+Online Object Tracking, Learning and Parsing
+with And-Or Graphs
+Tianfu Wu, Yang Lu and Song-Chun Zhu"
+1ea2a53a6cb9c08312276a2f0646935d5fab5ed3,Real-time Crowd Tracking using Parameter Optimized Mixture of Motion Models,"Noname manuscript No.
+(will be inserted by the editor)
+Real-time Crowd Tracking using Parameter Optimized
+Mixture of Motion Models
+Aniket Bera · David Wolinski · Julien Pettr´e · Dinesh Manocha
+Received: date / Accepted: date"
+1eec03527703114d15e98ef9e55bee5d6eeba736,Automatic identification of persons in TV series,"UNIVERSITÄT KARLSRUHE (TH)
+FAKULTÄT FÜR INFORMATIK
+INTERACTIVE SYSTEMS LABS
+Prof. Dr. A. Waibel
+DIPLOMA THESIS
+Automatic identification
+of persons in TV series
+SUBMITTED BY
+Mika Fischer
+MAY 2008
+ADVISORS
+M.Sc. Hazım Kemal Ekenel
+Dr.-Ing. Rainer Stiefelhagen"
+1e4c717a8a5eed5c3385b77641ebe3d8c4ceb3ac,An efficient algorithm for maximal margin clustering,"J Glob Optim
+DOI 10.1007/s10898-011-9691-4
+An efficient algorithm for maximal margin clustering
+Jiming Peng · Lopamudra Mukherjee · Vikas Singh ·
+Dale Schuurmans · Linli Xu
+Received: 29 April 2009 / Accepted: 5 February 2011
+© Springer Science+Business Media, LLC. 2011"
+1e2d965df330a72b3426279f9327f77330c2ee64,Simultaneous Detection and Segmentation of Pedestrians using Top-down and Bottom-up Processing,"Simultaneous Detection and Segmentation of Pedestrians
+using Top-down and Bottom-up Processing ∗
+Vinay Sharma
+James W. Davis
+Dept. of Computer Science and Engineering
+Ohio State University
+Columbus OH 43210 USA"
+1ebf201b34d9687fa17e336a608ab43e466ca13f,Detecting Parts for Action Localization,"Nicolas Chesneau
+Grégory Rogez
+Karteek Alahari
+Cordelia Schmid
+CHESNEAU ET AL.: DETECTING PARTS FOR ACTION LOCALIZATION
+Detecting Parts for Action Localization
+Inria∗"
+1ef1f33c48bc159881c5c8536cbbd533d31b0e9a,Identity-based Adversarial Training of Deep CNNs for Facial Action Unit Recognition,"Z. ZHANG ET AL.: ADVERSARIAL TRAINING FOR ACTION UNIT RECOGNITION
+Identity-based Adversarial Training of Deep
+CNNs for Facial Action Unit Recognition
+Zheng Zhang
+Shuangfei Zhai
+Lijun Yin
+Department of Computer Science
+State University of New York at
+Binghamton
+NY, USA."
+1ebcf5dbb37fcd369530b0ee4df5d4a60f756f3e,High-level Feature Learning by Ensemble Projection for Image Classification with Limited Annotations,"High-level Feature Learning by Ensemble Projection for Image
+Classification with Limited Annotations $
+Dengxin Dai∗, Luc Van Gool
+Computer Vision Lab, ETH Z¨urich, CH-8092, Switzerland"
+1e1334f76177ddf3ddc35f7359a1e04b65438dc4,What is the Most EfficientWay to Select Nearest Neighbor Candidates for Fast Approximate Nearest Neighbor Search?,"What Is the Most Efficient Way to Select Nearest Neighbor Candidates for Fast
+Approximate Nearest Neighbor Search?
+Masakazu Iwamura, Tomokazu Sato and Koichi Kise
+Graduate School of Engineering, Osaka Prefecture University
+{masa,"
+1e1a3ee9626c740be78f9c5f75f9c4d7edc45666,Estimating the Natural Illumination Conditions from a Single Outdoor Image,E-mail:
+1e8a265ec741584e851b83b5efc00351048bbe3f,Real Time Human Detection and Localization Using Consumer Grade Camera and Commercial UAV,"Preprints (www.preprints.org) | NOT PEER-REVIEWED | Posted: 7 November 2018 doi:10.20944/preprints201811.0156.v1
+Article
+Real Time Human Detection and Localization Using
+Consumer Grade Camera and Commercial UAV
+Nemi Bhattarai 1,*, Tai Nakamura 1 and Chitrini Mozumder 1,*
+Remote Sensing and Geographic Information Systems, School of Engineering and Technology, Asian
+Institute of Technology, Thailand; (T.N.)
+* Correspondence: (N.B); (C.M); Tel.: +66-099-421-7492"
+1e5c6c9fa9ba089931cfb2bc81e4368a4db5dd2d,Multi- View Fusion for Action Recognition in Child-Robot Interaction,"978-1-4799-7061-2/18/$31.00 ©2018 IEEE
+ICIP 2018
+#2Kinect #1Kinect #3Multi-view action recognition systemSenseActDecisionSpeakRec.ActionActFig.1:Multi-viewactionrecognitionsystemforchild-robotinteraction.presentspontaneousbehaviorandaninformalwayofcommunica-tion.Inaddition,thesameactionscanbeperformedinavarietyofwaysandawidespectrum,furthercomplicatingtherecognitionofactions.Althoughhumanactionrecognitionisapopularproblemwithmanyproposedmethods[8–13],therequirementsofmulti-viewac-tionrecognitiondiffersignificantlyasithastotakeintoaccountbothactionrecognitionthatresultsfromsingleviewsandalsothefusionamongtheresultinginformationfromthedifferentstreams[14,15].Incross-viewactionrecognitionworksitisattemptedtoshareknowledgefortheactionamongthedifferentsetupviews.Forexample,in[16]aspecificviewistreatedasthetargetdomainandtheotherviewsassourcedomainsinordertoformulateacross-viewlearningframework.Inotherapproaches,theknowledgeofactionsistransferredfromthedifferentviewsinasinglecanoni-calview[17].In[18]itisproposedtolearnview-invariantfeaturesrobusttoviewvariationsusingdeepmodels.Inthefieldofmulti-viewactionrecognition,anewglobalrepresentationthatiscalledmulti-viewsupervectorhasalsobeenproposedinordertoenhancerecognitionperformance[19].Finally,anotherinterestingapproachispresentedin[20]whereitisattemptedtotransferthelow-levelfeaturesintoahigh-levelsemanticspaceandamulti-tasklearningapproachforjointactionmodelingisexamined.Inthispaperwedevelopamulti-viewactionrecognitionsystemsuitableforCRI.Themaincontributionsofthispapercanbesum-marizedasfollows:1)Single-viewmethodsareexploredinordertocreaterobustactionrecognitionmodelsforparticularusers,i.e.children,underdifficulttaskswithfewtrainingdata.2)Methodsforthefusionofinformationfromdifferentstreamsinamulti-viewsys-temareproposedtoenhanceactionrecognitionduringCRI.3)Themulti-viewactionrecognitionsystemisintegratedinroboticplat-"
+1ed6a05a226cb0d09afd76ff9b7560c404d8eb49,D4g: Pre-completion report on exemplar,"D4g: Pre-completion report on exemplar
+Workpackage 4 Deliverable
+Date: 31th August 2007"
+1ecf4055831ca23c9f6026ef866dac95c8b8f9de,Eye Gaze Tracking With a Web Camera in a Desktop Environment,"Eye Gaze Tracking With a Web Camera
+in a Desktop Environment
+Yiu-ming Cheung, Senior Member, IEEE, and Qinmu Peng, Member, IEEE"
+1eadafc27372b33a73eca062438a58d4280fd3a1,DeepSkeleton: Learning Multi-Task Scale-Associated Deep Side Outputs for Object Skeleton Extraction in Natural Images,"DeepSkeleton: Learning Multi-task Scale-associated
+Deep Side Outputs for Object Skeleton Extraction
+in Natural Images
+Wei Shen, Kai Zhao, Yuan Jiang, Yan Wang, Xiang Bai and Alan Yuille"
+1e21078efc0aa7a3881d0e87cb5dd5918523f525,Network Consistent Data Association,"Network Consistent Data Association
+Anirban Chakraborty, Member, IEEE, Abir Das, Student Member, IEEE,
+nd Amit K. Roy-Chowdhury, Senior Member, IEEE"
+1e8394cc9fe7c2392aa36fb4878faf7e78bbf2de,Zero-Shot Object Recognition System Based on Topic Model,"TO APPEAR IN IEEE THMS
+Zero-Shot Object Recognition System
+ased on Topic Model
+Wai Lam Hoo and Chee Seng Chan"
+1e2b8778cfe44de4bbe4a099ee7cdff5c2ca5f38,Attention to Scale: Scale-Aware Semantic Image Segmentation,"Attention to Scale: Scale-aware Semantic Image Segmentation
+Liang-Chieh Chen∗
+{yangyi05, wangjiang03,
+Yi Yang, Jiang Wang, Wei Xu
+Alan L. Yuille"
+1e93ec0f5c29069beedbe7d617f5167b82b70730,Filtering SVM frame-by-frame binary classification in a detection framework,"FILTERING SVM FRAME-BY-FRAME BINARY CLASSIFICATION IN A DETECTION
+FRAMEWORK
+Alejandro Betancourt1,2, Pietro Morerio1, Lucio Marcenaro1, Matthias Rauterberg2, Carlo Regazzoni1
+Information and Signal Processing for Cognitive
+University of Genoa, Italy
+Telecommunications Group.
+Department of Naval, Electric, Electronic
+nd Telecommunications Engineering.
+Designed Intelligence Group.
+Department of Industrial Design.
+Eindhoven University of Technology.
+Eindhoven, Netherlands."
+1ecb56e7c06a380b3ce582af3a629f6ef0104457,"A New Way of Discovery of Belief, Desire and Intention in the BDI Agent-Based Software Modeling","List of Contents Vol.8
+Contents of
+Journal of Advanced Computational
+Intelligence and Intelligent Informatics
+Volume 8
+Vol.8 No.1, January 2004
+Editorial:
+o Special Issue on Selected Papers from Humanoid,
+Papers:
+o Dynamic Color Object Recognition Using Fuzzy
+Nano-technology, Information Technology,
+Communication and Control, Environment, and
+Management (HNICEM’03).
+Elmer P. Dadios
+Papers:
+o A New Way of Discovery of Belief, Desire and
+Intention in the BDI Agent-Based Software
+Modeling .
+Chang-Hyun Jo
+o Integration of Distributed Robotic Systems"
+1e64b2d2f0a8a608d0d9d913c4baee6973995952,Dominant and Complementary Multi-Emotional Facial Expression Recognition Using C-Support Vector Classification,"DOMINANT AND
+COMPLEMENTARY MULTI-
+EMOTIONAL FACIAL
+EXPRESSION RECOGNITION
+USING C-SUPPORT VECTOR
+CLASSIFICATION
+Christer Loob, Pejman Rasti, Iiris Lusi, Julio C. S. Jacques
+Junior, Xavier Baro, Sergio Escalera, Tomasz Sapinski,
+Dorota Kaminska and Gholamreza Anbarjafari"
+1e82a8965f08e8d38b16f39412e6e3c456f6f22e,Social force model aided robust particle PHD filter for multiple human tracking,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+1e21b925b65303ef0299af65e018ec1e1b9b8d60,Unsupervised Cross-Domain Image Generation,"Under review as a conference paper at ICLR 2017
+UNSUPERVISED CROSS-DOMAIN IMAGE GENERATION
+Yaniv Taigman, Adam Polyak & Lior Wolf
+Facebook AI Research
+Tel-Aviv, Israel"
+1ee27c66fabde8ffe90bd2f4ccee5835f8dedbb9,9 Entropy Regularization,"Entropy Regularization
+Yves Grandvalet
+Yoshua Bengio
+The problem of semi-supervised induction consists in learning a decision rule from
+labeled and unlabeled data. This task can be undertaken by discriminative methods,
+provided that learning criteria are adapted consequently. In this chapter, we moti-
+vate the use of entropy regularization as a means to bene(cid:12)t from unlabeled data in
+the framework of maximum a posteriori estimation. The learning criterion is derived
+from clearly stated assumptions and can be applied to any smoothly parametrized
+model of posterior probabilities. The regularization scheme favors low density sep-
+ration, without any modeling of the density of input features. The contribution
+of unlabeled data to the learning criterion induces local optima, but this problem
+an be alleviated by deterministic annealing. For well-behaved models of posterior
+probabilities, deterministic annealing EM provides a decomposition of the learning
+problem in a series of concave subproblems. Other approaches to the semi-supervised
+problem are shown to be close relatives or limiting cases of entropy regularization.
+A series of experiments illustrates the good behavior of the algorithm in terms of
+performance and robustness with respect to the violation of the postulated low den-
+sity separation assumption. The minimum entropy solution bene(cid:12)ts from unlabeled
+data and is able to challenge mixture models and manifold learning in a number of"
+1e0ba1a61ed0c6d4a76697de1e185ed5def60fb4,Learning to Parse Video into Stable Spatiotemporal Volumes1,"Learning to Parse Video into Stable Spatiotemporal Volumes1
+Thomas Dean
+Google Inc.
+We are interested in learning how to exploit continuity, motion and context to account for stable, recov-
+erable, spatiotemporal phenomena embedded in video. While most humans can make sense of still images,
+for the most part, we need continuity and motion to make sense of the world around us. Humans are also
+ided by strong priors that allow us to make confident predictions despite ambiguity, noise and occlusion.
+The idea of combining top-down prior knowledge and bottom-up cues derived from motion and other
+low-level features has been around almost as long as research in computer vision, e.g., [10], and has recently
+seen renewed interest, e.g., [3, 2, 6, 11]. Rather than the traditional tasks of object recognition or image
+ategorization, here we focus on the task of explaining each new frame in a video in terms of a continuously
+evolving representation of spatiotemporal volumes that account for the complete visual field. For the purpose"
+1e1dc91c2ac3ad0ae44941e711aed193231c3335,Universal Adversarial Perturbations Against Semantic Image Segmentation,"Universal Adversarial Perturbations Against Semantic Image Segmentation
+Bosch Center for Artificial Intelligence, Robert Bosch GmbH
+Jan Hendrik Metzen
+Mummadi Chaithanya Kumar
+University of Freiburg
+Thomas Brox
+University of Freiburg
+Bosch Center for Artificial Intelligence, Robert Bosch GmbH
+Volker Fischer"
+1e1a67a78badc619b2f9938e4a03922dcbee0fb6,Food/Non-food Image Classification and Food Categorization using Pre-Trained GoogLeNet Model,"Food/Non-food Image Classification and Food
+Categorization using Pre-Trained GoogLeNet Model
+Ashutosh Singla
+Lin Yuan
+Touradj Ebrahimi
+Multimedia Signal Processing Group
+Ecole Polytechnique Fédérale de Lausanne
+Station 11, 1015 Lausanne, Switzerland"
+1e15c5cba95cbb475ddb67157fdd480f5253502e,Face Recognition under Varying Lighting Conditions: A Combination of Weber-face and Local Directional Pattern for Feature Extraction and Support Vector Machines for Classification,"Journal of Information Hiding and Multimedia Signal Processing
+Ubiquitous International
+©2017 ISSN 2073-4212
+Volume 8, Number 5, September 2017
+Face Recognition under Varying Lighting Conditions:
+A Combination of Weber-face and Local Directional
+Pattern for Feature Extraction and Support Vector
+Machines for Classification
+Chin-Shiuh Shieh1,5, Liyun Chang4,∗, and Tsair-Fwu Lee1,3,5,∗
+Chi-Kien Tran1,2, Chin-Dar Tseng1, Pei-Ju Chao1,3
+Medical Physics and Informatics Laboratory of Electronics Engineering,
+National Kaohsiung University of Applied Sciences, Kaohsiung 80778, Taiwan, ROC
+Center for Information Technology, Hanoi University of Industry, Hanoi, Vietnam
+Department of Radiation Oncology, Kaohsiung Chang Gung Memorial Hospital,
+Department of Medical Imaging and Radiological Sciences, I-Shou University,
+Kaohsiung 83305,Taiwan, ROC
+Kaohsiung 82445,Taiwan, ROC
+5 Graduate Institute of Clinical Medicine, Kaohsiung Medical University,
+Corresponding authors:
+Kaohsiung 807,Taiwan, ROC"
+1e9c3d0d87e09ea359ce1e31114b677d627bf9e7,Correction: Rapid Stress System Drives Chemical Transfer of Fear from Sender to Receiver,"RESEARCH ARTICLE
+Rapid Stress System Drives Chemical Transfer
+of Fear from Sender to Receiver
+Jasper H. B. de Groot1*, Monique A. M. Smeets1, Gün R. Semin1,2,3
+Department of Social and Organizational Psychology, Faculty of Social and Behavioral Sciences, Utrecht
+University, Utrecht, the Netherlands, 2 Department of Psychology, Koç University, Istanbul, Turkey,
+Instituto Superior de Psicologia Aplicada (ISPA), Instituto Universitário, Lisbon, Portugal
+11111"
+1ee3b4ba04e54bfbacba94d54bf8d05fd202931d,Celebrity Face Recognition using Deep Learning,"Indonesian Journal of Electrical Engineering and Computer Science
+Vol. 12, No. 2, November 2018, pp. 476~481
+ISSN: 2502-4752, DOI: 10.11591/ijeecs.v12.i2.pp476-481
+ 476
+Celebrity Face Recognition using Deep Learning
+Nur Ateqah Binti Mat Kasim1, Nur Hidayah Binti Abd Rahman2, Zaidah Ibrahim3,
+Nur Nabilah Abu Mangshor4
+,2,3Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+Faculty of Computer and Mathematical Sciences, UniversitiTeknologi MARA (UiTM),
+Shah Alam, Selangor, Malaysia
+Campus Jasin, Melaka, Malaysia
+Article Info
+Article history:
+Received May 29, 2018
+Revised Jul 30, 2018
+Accepted Aug 3, 2018
+Keywords:
+AlexNet
+Convolutional neural network
+Deep learning"
+1eda03469d860ac725122bd27faaae6b2cb47d0d,Image Question Answering Using Convolutional Neural Network with Dynamic Parameter Prediction,"Image Question Answering using Convolutional Neural Network
+with Dynamic Parameter Prediction
+Hyeonwoo Noh
+Paul Hongsuck Seo
+Bohyung Han
+{shgusdngogo, hsseo,
+Department of Computer Science and Engineering, POSTECH, Korea"
+1e41a3fdaac9f306c0ef0a978ae050d884d77d2a,Robust Object Recognition with Cortex-Like Mechanisms,"Robust Object Recognition with
+Cortex-Like Mechanisms
+Thomas Serre, Lior Wolf, Stanley Bileschi, Maximilian Riesenhuber, and
+Tomaso Poggio, Member, IEEE"
+1e8711d2fc4b05eac0699c82f4698154c2b057d3,The unreasonable effectiveness of small neural ensembles in high-dimensional brain,"The unreasonable effectiveness of small neural ensembles
+in high-dimensional brain
+A.N. Gorbana,b,∗, V.A. Makarovb,c, I.Y. Tyukina,b,d
+Instituto de Matem´atica Interdisciplinar, Faculty of Mathematics, Universidad Complutense de Madrid, Avda Complutense s/n, 28040 Madrid,
+Department of Mathematics, University of Leicester, Leicester, LE1 7RH, UK
+Lobachevsky University, Nizhni Novgorod, Russia
+dSaint-Petersburg State Electrotechnical University, Saint-Petersburg, Russia
+Spain"
+1e83e2abcb258cd62b160e3f31a490a6bc042e83,Metric Learning in Codebook Generation of Bag-of-Words for Person Re-identification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Metric Learning in Codebook Generation of
+Bag-of-Words for Person Re-identification
+Lu Tian, Student Member, IEEE, and Shengjin Wang, Member, IEEE"
+1e1e66783f51a206509b0a427e68b3f6e40a27c8,Semi-supervised Estimation of Perceived Age from Face Images,"SEMI-SUPERVISED ESTIMATION OF PERCEIVED AGE
+FROM FACE IMAGES
+VALWAY Technology Center, NEC Soft, Ltd., Tokyo, Japan
+Kazuya Ueki
+Masashi Sugiyama
+Keywords:"
+1ef46f7bb7463ead4369a796435106da63578733,Shamann: Shared Memory Augmented Neural Networks,"Under review as a conference paper at ICLR 2019
+SHAMANN: SHARED MEMORY AUGMENTED
+NEURAL NETWORKS
+Anonymous authors
+Paper under double-blind review"
+1efaa128378f988965841eb3f49d1319a102dc36,Hierarchical binary CNNs for landmark localization with limited resources,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Hierarchical binary CNNs for landmark
+localization with limited resources
+Adrian Bulat and Georgios Tzimiropoulos"
+8408f4b1193e8db25fec818a989d9fe3194d5ea6,3D Face Recognition using Radon Transform and Symbolic LDA,"International Journal of Computer Applications (0975 - 8887)
+Volume 67 - No. 4, April 2013
+D Face Recognition using Radon Transform and
+Symbolic LDA
+P. S. Hiremath
+Department of Computer Science
+Gulbarga University, Gulbarga-585106
+Karnataka, India
+Manjunatha Hiremath
+Department of Computer Science
+Gulbarga University, Gulbarga-585106
+Karnataka, India"
+84a69f6357b137028e3aa51376ce2dffad5e0179,Studies of Typically and Atypically Developing Children,"Digital Comprehensive Summaries of Uppsala Dissertations
+from the Faculty of Social Sciences 152
+Visual Attention to Faces, Eyes and
+Objects
+Studies of Typically and Atypically Developing
+Children
+JOHAN L. KLEBERG
+ISSN 1652-9030
+ISBN 978-91-513-0244-7
+urn:nbn:se:uu:diva-342578
+UNIVERSITATIS
+UPSALIENSIS
+UPPSALA"
+84af83ff6412a756df58b6436f0d2e3c049e1f12,Abnormality Detection with Improved Histogram of Oriented Tracklets,"Abnormality Detection with Improved
+Histogram of Oriented Tracklets
+Hossein Mousavi1, Moin Nabi1 , Hamed Kiani Galoogahi1
+Alessandro Perina1 and Vittorio Murino1,2
+Pattern Analysis and Computer Vision Department (PAVIS)
+Istituto Italiano di Tecnologia (IIT) Genova, Italy
+Dipartimento di Informatica,University of Verona, Italy"
+8451bf3dd6bcd946be14b1a75af8bbb65a42d4b2,Consensual and Privacy-Preserving Sharing of Multi-Subject and Interdependent Data,"Consensual and Privacy-Preserving Sharing of
+Multi-Subject and Interdependent Data
+Alexandra-Mihaela Olteanu
+EPFL, UNIL–HEC Lausanne
+K´evin Huguenin
+UNIL–HEC Lausanne
+Italo Dacosta
+Jean-Pierre Hubaux"
+842e42d30dc31de1833047c268f0a5cdff16f2ce,3D Face Compression and Recognition using Spherical Wavelet Parametrization,"(IJACSA) International Journal of Advanced Computer Science and Applications,
+Vol. 3, No.9, 2012
+D Face Compression and Recognition using
+Spherical Wavelet Parametrization
+Rabab M. Ramadan
+College of Computers and Information Technology
+University of Tabuk
+Tabuk, KSA
+into multi-resolution sub"
+845c03910c7cfd02de7df9622a9973e8b085c0d8,Interactive Generation of Realistic Facial Wrinkles from Sketchy Drawings,"EUROGRAPHICS 2015 / O. Sorkine-Hornung and M. Wimmer
+(Guest Editors)
+Volume 34 (2015), Number 2
+Interactive Generation of Realistic Facial Wrinkles from
+Sketchy Drawings
+Hyeon-Joong Kim 1,3, A. Cengiz Öztireli2, Il-Kyu Shin1, Markus Gross2, Soo-Mi Choi†1
+Sejong University, Korea 2 ETH Zurich, Switzerland 3 3D Systems, Korea
+Figure 1: We use statistics extracted from example faces to augment interactively drawn concept sketches for synthesizing
+realistic facial wrinkles."
+84c35fc21db3bcd407a4ffb009912b6ac5a47e3c,Mgan: Training Generative Adversarial Nets,"Under review as a conference paper at ICLR 2018
+MGAN: TRAINING GENERATIVE ADVERSARIAL NETS WITH
+MULTIPLE GENERATORS
+Anonymous authors
+Paper under double-blind review"
+84e4b7469f9c4b6c9e73733fa28788730fd30379,Projective complex matrix factorization for facial expression recognition,"Duong et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:10
+DOI 10.1186/s13634-017-0521-9
+EURASIP Journal on Advances
+in Signal Processing
+R ES EAR CH
+Projective complex matrix factorization for
+facial expression recognition
+Viet-Hang Duong1, Yuan-Shan Lee1, Jian-Jiun Ding2, Bach-Tung Pham1, Manh-Quan Bui1, Pham The Bao2
+nd Jia-Ching Wang1,3*
+Open Access"
+84968d6488e87c99b8560ab33110a5bf85aa5761,Object category learning and retrieval with weak supervision,"Object category learning and retrieval with
+weak supervision
+Steven Hickson, Anelia Angelova, Irfan Essa, Rahul Sukthankar
+Google Brain / Google Research
+(shickson, anelia, irfanessa,"
+84be05dd82a7208a6e7b3d238df27b123cc917ce,Revisiting Visual Question Answering Baselines,"Revisiting Visual Question Answering Baselines
+Allan Jabri, Armand Joulin, and Laurens van der Maaten
+Facebook AI Research"
+84c8b29103480cf6f2b93e2fd4225b0d9d535ed6,Playing hide and seek with a mobile companion robot,"Playing Hide and Seek with a Mobile
+Companion Robot
+Michael Volkhardt, Steffen Mueller, Christof Schroeter, Horst-Michael Gross
+Neuroinformatics and Cognitive Robotics Lab
+Ilmenau University of Technology
+98684 Ilmenau, Germany
+Email:"
+846f3857976ba437e0592a848e47f6a3370880a3,3D Face Recognition Based on Depth and Intensity Gabor Features using Symbolic PCA and AdaBoost,"International Journal of Signal Processing, Image Processing and Pattern Recognition
+Vol.6, No.5 (2013), pp.1-12
+http://dx.doi.org/10.14257/ijsip.2013.6.5.01
+D Face Recognition Based on Depth and Intensity Gabor
+Features using Symbolic PCA and AdaBoost
+P. S. Hiremath and Manjunatha Hiremath
+Department of Computer Science
+Gulbarga University, Gulbarga – 585106
+Karnataka, India,"
+844568d9e49ec34536502bb8c66d5578c962abd6,From Virtual to Real World Visual Perception Using Domain Adaptation - The DPM as Example,"Invited book chapter to appear in Domain Adaptation in Computer Vision Applications, Springer Series: Advances
+in Computer Vision and Pattern Recognition, Edited by Gabriela Csurka. Written during Summer 2016.
+From Virtual to Real World Visual Perception using Domain
+Adaptation – The DPM as Example
+Computer Vision Center (CVC) and Dpt. Ci`encies de la Computaci´o (DCC),
+Antonio M. L´opez
+Universitat Aut`onoma de Barcelona (UAB)
+Jiaolong Xu
+Jos´e L. G´omez
+David V´azquez
+CVC and DCC, UAB
+CVC and DCC, UAB
+CVC and DCC, UAB
+Germ´an Ros
+CVC and DCC, UAB
+December 30, 2016"
+84fa126cb19d569d2f0147bf6f9e26b54c9ad4f1,Improved Boosting Performance by Explicit Handling of Ambiguous Positive Examples,"Improved Boosting Performance by Explicit
+Handling of Ambiguous Positive Examples
+Miroslav Kobetski and Josephine Sullivan"
+84508e846af3ac509f7e1d74b37709107ba48bde,Use of the Septum as a Reference Point in a Neurophysiologic Approach to Facial Expression Recognition,"Use of the Septum as a Reference Point in a Neurophysiologic Approach to
+Facial Expression Recognition
+Igor Stankovic and Montri Karnjanadecha
+Department of Computer Engineering, Faculty of Engineering,
+Prince of Songkla University, Hat Yai, Songkhla, 90112 Thailand
+Telephone: (66)080-7045015, (66)074-287-357
+E-mail:"
+84c8eb2db35f7fd38c906ced741e2c5470ba7544,Deep Control - a simple automatic gain control for memory efficient and high performance training of deep convolutional neural networks,"Deep Control - a simple automatic gain control for memory
+efficient and high performance training of deep
+onvolutional neural networks
+Brendan Ruff
+Submitted to BMVC 2017, 2nd May 2017
+Patent application GB1619779.0, 23rd Nov 2016"
+841a5de1d71a0b51957d9be9d9bebed33fb5d9fa,PCANet: A Simple Deep Learning Baseline for Image Classification?,"PCANet: A Simple Deep Learning Baseline for
+Image Classification?
+Tsung-Han Chan, Member, IEEE, Kui Jia, Shenghua Gao, Jiwen Lu, Senior Member, IEEE,
+Zinan Zeng, and Yi Ma, Fellow, IEEE"
+84fd7c00243dc4f0df8ab1a8c497313ca4f8bd7b,Perceived Age Estimation from Face Images,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+8411fe1142935a86b819f065cd1f879f16e77401,Facial Recognition using Modified Local Binary Pattern and Random Forest,"International Journal of Artificial Intelligence & Applications (IJAIA), Vol. 4, No. 6, November 2013
+Facial Recognition using Modified Local Binary
+Pattern and Random Forest
+Brian O’Connor and Kaushik Roy
+Department of Computer Science,
+North Carolina A&T State University,
+Greensboro, NC 27411"
+84187adc5e6412123405102bb3c2f0428713593c,Quad-Tree based Image Encoding Methods for Data-Adaptive Visual Feature Learning,"IPSJ SIG Technical Report
+Quad-Tree based Image Encoding Methods for
+Data-Adaptive Visual Feature Learning
+Cuicui Zhang1,a) Xuefeng Liang1,b) Takashi Matsuyama1,c)"
+84a20d0a47c0d826b77f73075530d618ba7573d2,Look at Boundary: A Boundary-Aware Face Alignment Algorithm,"(68 points) COFW (29 points) AFLW (19 points) Figure1:Thefirstcolumnshowsthefaceimagesfromdifferentdatasetswithdifferentnumberoflandmarks.Thesecondcolumnillustratestheuniversallydefinedfacialboundariesestimatedbyourmethods.Withthehelpofboundaryinformation,ourapproachachieveshighaccuracylocalisationresultsacrossmultipledatasetsandannotationprotocols,asshowninthethirdcolumn.Differenttofacedetection[45]andrecognition[75],facealignmentidentifiesgeometrystructureofhumanfacewhichcanbeviewedasmodelinghighlystructuredout-put.Eachfaciallandmarkisstronglyassociatedwithawell-definedfacialboundary,e.g.,eyelidandnosebridge.However,comparedtoboundaries,faciallandmarksarenotsowell-defined.Faciallandmarksotherthancornerscanhardlyremainthesamesemanticallocationswithlargeposevariationandocclusion.Besides,differentannotationschemesofexistingdatasetsleadtoadifferentnumberoflandmarks[28,5,66,30](19/29/68/194points)andanno-tationschemeoffuturefacealignmentdatasetscanhardlybedetermined.Webelievethereasoningofauniquefacial"
+84124eba5ccd5a25d2275c3dd6d2f15e30225ef7,People counting with image retrieval using compressed sensing,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 Crown
+Homa Foroughi, Nilanjan Ray, Hong Zhang
+Index Terms— compressed sensing, people counting,
+. INTRODUCTION"
+84f6f20496fadb975922b47528fd94c71e872950,Dissimilarity-based people re-identification and search for intelligent video surveillance,"Ph.D. in Electronic and Computer Engineering
+Dept. of Electrical and Electronic Engineering
+University of Cagliari
+Dissimilarity-based people
+re-identification and search for
+intelligent video surveillance
+Riccardo Satta
+Advisor: Prof. Fabio Roli
+Co-advisor: Prof. Giorgio Fumera
+Curriculum: ING-INF/05 - Sistemi di Elaborazione delle Informazioni
+XXV Cycle
+April 2013"
+4adca62f888226d3a16654ca499bf2a7d3d11b71,Models of Semantic Representation with Visual Attributes,"Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 572–582,
+Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics"
+4a45b8f8decc178305af06d758ac7428a9070fad,Augmented CycleGAN: Learning Many-to-Many Mappings from Unpaired Data,"Augmented CycleGAN: Learning Many-to-Many Mappings
+from Unpaired Data
+Amjad Almahairi 1 † Sai Rajeswar 1 Alessandro Sordoni 2 Philip Bachman 2 Aaron Courville 1 3"
+4a70c6e14bcd7a44838fdabdcdb33bc026c907b4,Allocentric Pose Estimation,"Allocentric Pose Estimation
+Jos´e Oramas M.
+Luc De Raedt
+Tinne Tuytelaars
+KU Leuven, ESAT-PSI, iMinds
+KU Leuven, CS-DTAI
+KU Leuven, ESAT-PSI, iMinds"
+4a9831e5fec549edee454709048a51997ef60fb7,Did the Model Understand the Question?,"Did the Model Understand the Question?
+Pramod K. Mudrakarta
+University of Chicago
+Ankur Taly
+Google Brain
+Mukund Sundararajan
+Kedar Dhamdhere
+Google
+Google"
+4af25075729aa4d0fa4ecf6c948f59ec15bf9565,ii DOCUMENT EVOLUTION Version Date,"Project N° IST-2002-507634 - BioSecure
+D 9.1.2 - Revision: b3
+4 June 2005
+Contract Number :
+Project Acronym :
+Project Title :
+Instrument :
+Start Date of Project :
+Duration :
+Deliverable Number :
+Title of Deliverable :
+Contractual Due Date :
+Actual Date of Completion :
+IST-2002-507634
+BioSecure
+Biometrics for Secure Authentication
+Network of Excellence
+01 June, 2004
+6 months
+D 9.1.2"
+4af133c49d39c8b7aa9d82c17f1fd2c70e36233f,Recognition of Facial Gestures using Gabor Filter,"Recognition of Facial Gestures using Gabor Filter
+{tag} {/tag}
+International Journal of Computer Applications
+© 2011 by IJCA Journal
+Number 8 - Article 2
+Year of Publication: 2011
+Authors:
+Subhashini Ramalingam
+Dr Ilango Paramasivam
+Mangayarkarasi Ramiah
+10.5120/3153-3990"
+4a2d54ea1da851151d43b38652b7ea30cdb6dfb2,Direct recognition of motion-blurred faces,"Direct Recognition of Motion Blurred Faces
+Kaushik Mitra, Priyanka Vageeswaran and Rama Chellappa"
+4ab84f203b0e752be83f7f213d7495b04b1c4c79,Concave Losses for Robust Dictionary Learning,"CONCAVE LOSSES FOR ROBUST DICTIONARY LEARNING
+Rafael Will M. de Araujo, R. Hirata Jr ∗
+Alain Rakotomamonjy †
+University of S˜ao Paulo
+Institute of Mathematics and Statistics
+Rua do Mat˜ao, 1010 – 05508-090 – S˜ao Paulo-SP, Brazil
+Universit´e de Rouen Normandie
+LITIS EA 4108
+76800 Saint- ´Etienne-du-Rouvray, France"
+4a75d59c9c57da420441190071ba545eb4a75e1e,Deep Mixture of Diverse Experts for Large-Scale Visual Recognition,"Deep Mixture of Diverse Experts for Large-Scale
+Visual Recognition
+Tianyi Zhao, Jun Yu, Zhenzhong Kuang, Wei Zhang, Jianping Fan"
+4a1b67d1f30abeeecb270666605025d9d78971ff,Energy-based adaptive skin segmentation for hand and head detection,"Noname manuscript No.
+(will be inserted by the editor)
+Energy-based adaptive skin segmentation for hand and
+head detection
+Michal Kawulok
+Received: date / Accepted: date"
+4a3758f283b7c484d3f164528d73bc8667eb1591,Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial Networks,"Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial
+Networks
+Yunfan Liu, Qi Li, and Zhenan Sun∗
+Center for Research on Intelligent Perception and Computing, CASIA
+National Laboratory of Pattern Recognition, CASIA
+{qli,"
+4a19f6545473363b16d4a10ed13fef29b38856d3,What is a Salient Object? A Dataset and a Baseline Model for Salient Object Detection,"What is a salient object? A dataset and
+baseline model for salient object detection
+Ali Borji, Member, IEEE"
+4af997701ce14ba689f7f964a72bcae0a2432435,The role of gaze direction in face memory in autism spectrum disorder.,"RESEARCH ARTICLE
+The Role of Gaze Direction in Face Memory in Autism
+Spectrum Disorder
+Safa R. Zaki and Shannon A. Johnson
+We tested the hypothesis that the direction of gaze of target faces may play a role in reported face recognition deficits
+in those with an autism spectrum disorder (ASD). In previous studies, typically developing children and adults better
+remembered faces in which the eyes were gazing directly at them compared with faces in which the eyes were averted.
+In the current study, high-functioning children and adolescents with an ASD and age- and IQ-matched typically
+developing controls were shown a series of pictures of faces in a study phase. These pictures were of individuals whose
+gaze was either directed straight ahead or whose gaze was averted to one side. We tested the memory for these study faces
+in a recognition task in which the faces were shown with their eyes closed. The typically developing group better
+remembered the direct-gaze faces, whereas the ASD participants did not show this effect. These results imply that there
+may be an important link between gaze direction and face recognition abilities in ASD. Autism Res 2013, (cid:129)(cid:129): (cid:129)(cid:129)–(cid:129)(cid:129).
+© 2013 International Society for Autism Research, Wiley Periodicals, Inc.
+Keywords: autism spectrum disorder; face recognition; eye-contact; face-processing; gaze
+Face processing is a pivotal component of human
+ommunication and interaction. There is evidence that
+people with an autism spectrum disorder (ASD), a disor-
+der characterized by impairments in social interaction
+nd communication as well as restricted range of interests"
+4ac4b0a2d06ff5df1cc4941f8ae47843b4593bba,American Sign Language fingerspelling recognition from video: Methods for unrestricted recognition and signer-independence,"American Sign Language fingerspelling recognition
+from video: Methods for unrestricted recognition
+nd signer-independence
+Taehwan Kim
+A thesis submitted
+in partial fulfillment of the requirements for
+the degree of
+Doctor of Philosophy in Computer Science
+t the
+Toyota Technological Institute at Chicago
+Chicago, Illionois
+August 2016
+Thesis Committee:
+Vassilis Athitsos
+Karen Livescu (Thesis Advisor)
+Greg Shakhnarovich
+Yisong Yue"
+4a4da3d1bbf10f15b448577e75112bac4861620a,"Face , Expression , and Iris Recognition","FACE, EXPRESSION, AND IRIS RECOGNITION
+USING LEARNING-BASED APPROACHES
+Guodong Guo
+A dissertation submitted in partial fulfillment of
+the requirements for the degree of
+Doctor of Philosophy
+(Computer Sciences)
+t the
+UNIVERSITY OF WISCONSIN–MADISON"
+4abd49538d04ea5c7e6d31701b57ea17bc349412,Recognizing Fine-Grained and Composite Activities Using Hand-Centric Features and Script Data,"Recognizing Fine-Grained and Composite Activities
+using Hand-Centric Features and Script Data
+Marcus Rohrbach · Anna Rohrbach · Michaela Regneri ·
+Sikandar Amin · Mykhaylo Andriluka · Manfred Pinkal · Bernt Schiele"
+4a0f98d7dbc31497106d4f652968c708f7da6692,Real-time eye gaze direction classification using convolutional neural network,"Real-time Eye Gaze Direction Classification Using
+Convolutional Neural Network
+Anjith George, Member, IEEE, and Aurobinda Routray, Member, IEEE"
+4af36d3ce93f7ed82a7dc321fca926d540691b33,ADVISE: Symbolism and External Knowledge for Decoding Advertisements,[cs.CV] 29 Jul 2018
+4a95dacb1d38a07e73007082b8ed7651a4b5277c,Region labelling using a Point-Based Coherence Criterion,"Region labelling using a Point-Based Coherence Criterion
+Hichem Houissa(cid:2) and Nozha Boujemaa(cid:2)
+(cid:2)INRIA Rocquencourt, BP 105,78153, Le Chesnay Cedex-France"
+4a5592ae1f5e9fa83d9fa17451c8ab49608421e4,Multi-modal social signal analysis for predicting agreement in conversation settings,"Multi-modal Social Signal Analysis for Predicting
+Agreement in Conversation Settings
+Víctor Ponce-López
+IN3, Open University of
+Catalonia, Roc Boronat, 117,
+08018 Barcelona, Spain.
+Dept. MAiA, University of
+Barcelona, Gran Via, 585,
+08007 Barcelona, Spain.
+Computer Vision Center, UAB,
+08193 Barcelona, Spain.
+Sergio Escalera
+Dept. MAiA, University of
+Barcelona, Gran Via, 585,
+08007 Barcelona, Spain.
+Computer Vision Center, UAB,
+08193 Barcelona, Spain.
+Xavier Baró
+EIMT, Open University of
+Catalonia, Rbla. Poblenou,"
+4a56d5e483ddea93f14bfbe350a3063b2b9126cb,Iterative Action and Pose Recognition Using Global-and-Pose Features and Action-Specific Models,"Iterative Action and Pose Recognition
+using Global-and-Pose Features and Action-specific Models
+Norimichi Ukita
+Nara Institute of Science and Technology"
+4a53ac7f99a42da17a7f1ba04f5c6d6831e31151,Beyond Bilinear: Generalized Multi-modal Factorized High-order Pooling for Visual Question Answering,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Beyond Bilinear: Generalized Multi-modal
+Factorized High-order Pooling
+for Visual Question Answering
+Zhou Yu, Jun Yu Member, IEEE, Chenchao Xiang, Jianping Fan, Dacheng Tao Fellow, IEEE"
+4a1a5316e85528f4ff7a5f76699dfa8c70f6cc5c,Face Recognition using Local Features based on Two-layer Block Model,"MVA2005 IAPR Conference on Machine VIsion Applications, May 16-18, 2005 Tsukuba Science City, Japan
+Face Recognition using Local Features based on Two-layer Block M odel
+W onjun Hwang1 Ji-Yeun Kim Seokcheol Kee
+Computing Lab.,
+Samsung Advanced Institute of Technology
+ombined by Yang and etc [7]. The sparsification of LFA
+helps the reduction of dimension of image in LDA scheme
+nd local topological property is more useful than holistic
+property of PCA in recognition, but there is still structural
+problem because the method to select the features is
+designed for minimization of reconstruction error, not for
+increasing discriminability in face model.
+In this paper, we proposed the novel recognition
+lgorithm to merge LFA and LDA method. We do not use
+the existing sparsification method for selecting features but
+dopt the two-layer block model to make several groups
+with topographic local features in similar position. Each
+local block, flocked local features, can represent its own
+local property and at
+time holistic face"
+4a2062ba576ca9e9a73b6aa6e8aac07f4d9344b9,Fusing Deep Convolutional Networks for Large Scale Visual Concept Classification,"Fusing Deep Convolutional Networks for Large
+Scale Visual Concept Classification
+Hilal Ergun and Mustafa SertB
+Department of Computer Engineering
+Bas¸kent University
+06810 Ankara, TURKEY"
+4a8085987032e85ac8017d9977a4b76b0d8fa4ac,Object Recognition using Template Matching,"Object Recognition using Template Matching
+Nikhil Gupta, Rahul Gupta, Amardeep Singh, Matt Wytock
+December 12, 2008
+Introduction
+Building 3D models
+Object Recognition is inherently a hard problem in
+omputer vision. Current standard object recogni-
+tion techniques require small training data sets of
+images and apply sophisticated algorithms. These
+methods tend to perform poorly because the small
+data set does not reflect the true distribution (selec-
+tion bias).
+Recently, Torralba et al [1] have proposed to de-
+velop a large data set of images (80 million images)
+nd apply simple algorithms for object recognition.
+Their method performs relatively well for some cer-
+tain classes of objects. Nevertheless, their data sets
+require very large storage and are noisy.
+In this project, we develop precise 3D models of
+objects and use these to apply simple learning al-"
+4ac3cd8b6c50f7a26f27eefc64855134932b39be,Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network,"Robust Facial Landmark Detection
+via a Fully-Convolutional Local-Global Context Network
+Daniel Merget
+Matthias Rock
+Gerhard Rigoll
+Technical University of Munich"
+4a0f152a07a9becb986b516a1281a4482b38db81,Video Compression for Object Detection Algorithms,"CONFIDENTIAL. Limited circulation. For review only.
+Preprint submitted to 24th International Conference on Pattern Recognition.
+Received January 22, 2018."
+4ad51a99e489939755f1d4f5d1f5bc509c49e96d,Preferences for facially communicated big five personality traits and their relation to self-reported big five personality,"Personality and Individual Differences 134 (2018) 195–200
+Contents lists available at ScienceDirect
+Personality and Individual Differences
+journal homepage: www.elsevier.com/locate/paid
+Preferences for facially communicated big five personality traits and their
+relation to self-reported big five personality
+Donald F. Sacco⁎, Mitch Brown
+The University of Southern Mississippi, United States of America
+A R T I C L E I N F O
+A B S T R A C T
+Keywords:
+Personality
+Face perception
+Big five
+Similarity
+Complementarity
+A growing body of research has begun to document that core personality traits are associated with specific facial
+structures, and that individuals are sensitive to these facial cues, as indexed by preferences for faces commu-
+nicating higher or lower levels of specific traits. We explored how self-reported Big Five personality traits in-
+fluence preferences for facially-communicated Big Five personality in targets. Participants selected among pairs"
+4a31ca27b987606ae353b300488068b5240633ee,WSABIE: Scaling Up to Large Vocabulary Image Annotation,"WSABIE: Scaling Up To Large Vocabulary Image Annotation
+Jason Weston1 and Samy Bengio1 and Nicolas Usunier2
+Google, USA
+Universit´e Paris 6, LIP6, France"
+4abaebe5137d40c9fcb72711cdefdf13d9fc3e62,Dimension Reduction for Regression with Bottleneck Neural Networks,"Dimension Reduction for Regression
+with Bottleneck Neural Networks
+Elina Parviainen
+BECS, Aalto University School of Science and Technology, Finland"
+4a64b020c72db15a729939a2c041ef4f5830f0f7,Challenges of Ground Truth Evaluation of Multi-target Tracking,"Challenges of Ground Truth Evaluation of Multi-Target Tracking
+Anton Milan1
+Konrad Schindler2
+Stefan Roth1
+Department of Computer Science, TU Darmstadt
+Photogrammetry and Remote Sensing Group, ETH Z¨urich"
+4abaf7d4b9577131cb2f93e913f8bd83f924da4c,Towards learning through robotic interaction alone: the joint guided search task,"Towards learning through robotic interaction alone:
+the joint guided search task
+Nick DePalma and Cynthia Breazeal
+0 Ames Str. Cambridge MA
+Personal Robots Group
+MIT Media Lab"
+4a3a9d02999fcf0895db31d644f40c98254ac4b1,Vision-based 3D bicycle tracking using deformable part model and Interacting Multiple Model filter,"Vision-based 3D Bicycle Tracking using Deformable Part Model
+nd Interacting Multiple Model Filter
+Hyunggi Cho, Paul E. Rybski and Wende Zhang"
+4a4a3effdfffb51a0f82d3b0904c017086996ac6,Conceptual and methodological challenges for neuroimaging studies of autistic spectrum disorders,"Mazzone and Curatolo Behavioral and Brain Functions 2010, 6:17
+http://www.behavioralandbrainfunctions.com/content/6/1/17
+REVIEW
+Conceptual and methodological challenges for
+neuroimaging studies of autistic spectrum
+disorders
+Luigi Mazzone1*, Paolo Curatolo2
+Open Access"
+4a9afcc6ba45c0ff05ea93d306ff73ede32f7ed4,Multiple-shot People Re-identify based on Feature Selection with Sparsity,"International Journal of Hybrid Information Technology
+Vol.8, No.1 (2015), pp.27-34
+http://dx.doi.org/10.14257/ijhit.2015.8.1.03
+Multiple-shot People Re-identify based on Feature Selection with
+Sparsity
+Dongping Zhang, Yanjie Li, Jiao Xu and Ye Shen
+College of Information Engineering, China Jiliang University, Hangzhou 310018,
+China"
+4a88237199595feaa3f0e3289cbdd201a3ce28ff,Multi-Domain Pose Network for Multi-Person Pose Estimation and Tracking,"Multi-Domain Pose Network for Multi-Person
+Pose Estimation and Tracking
+Hengkai Guo1(cid:63), Tang Tang1, Guozhong Luo1, Riwei Chen1, Yongchen Lu1,
+nd Linfu Wen1
+ByteDance AI Lab"
+4a227881f5763d2bda2e545eac346389b2b2017a,Model based image interpretation with application to facial expression recognition,"d d d
+d d d d
+ddd ddd ddd ddd
+Institut für Informatik
+der Technischen Universität München
+Model-based Image Interpretation with
+Application to Facial Expression
+Recognition
+Dissertation
+Matthias Wimmer"
+4a869781d074f6be7a5001c59e41b25145bdd830,DeltaPhish: Detecting Phishing Webpages in Compromised Websites,"DeltaPhish: Detecting Phishing Webpages
+in Compromised Websites∗
+Igino Corona1,2, Battista Biggio1,2, Matteo Contini2, Luca Piras1,2, Roberto Corda2, Mauro
+Mereu2, Guido Mureddu2, Davide Ariu1,2, and Fabio Roli1,2
+Pluribus One, via Bellini 9, 09123 Cagliari, Italy
+DIEE, University of Cagliari, Piazza d’Armi 09123, Cagliari, Italy"
+4a303369828d9334022a0f5e8ad2b1a715d1c0c9,Deep Metric Learning by Online Soft Mining and Class-Aware Attention,"Deep Metric Learning by Online Soft Mining and Class-Aware Attention
+Xinshao Wang1,2, Yang Hua1,2, Elyor Kodirov2, Guosheng Hu1,2, Neil M. Robertson1,2
+School of Electronics, Electrical Engineering and Computer Science, Queen’s University Belfast, UK
+{xwang39, y.hua, {elyor,
+Anyvision Research Team, UK"
+4ae3cdba121dec886a84eff146e438a55513002c,Interactive Hausdorff distance computation for general polygonal models,"Interactive Hausdorff Distance Computation for General Polygonal Models
+Min Tang∗
+Minkyoung Lee†
+Ewha Womans University, Seoul, Korea
+Young J. Kim‡
+http://graphics.ewha.ac.kr/HDIST
+Figure 1: Interactive Hausdorff Distance Computation. Our algorithm can compute Hausdorff distance between complicated models at
+interactive rates (the first three figures). Here, the green line denotes the Hausdorff distance. This algorithm can also be used to find
+penetration depth (PD) for physically-based animation (the last two figures). It takes only a few milli-seconds to run on average."
+4aeb87c11fb3a8ad603311c4650040fd3c088832,Self-paced Mixture of Regressions,"Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+SamplesSelected SamplesOutliersMoRSPMoR (ours)6361242024Figure1:Inter-componentimbalanceandintra-componentoutliersinMixtureofRegression(MoR)approaches.StandardMoRcannotlearnaccurateregressors(denotedbythedashedlines).Byintroduc-inganovelself-pacedscheme,ourSPMoRapproach(denotedbythesolidlines)selectsbalancedandconfidenttrainingsamplesfromeachcomponent,whilepreventlearningfromtheoutliersthroughoutthetrainingprocedure.theywillbeinevitablybiasedbydatadistribution:lowre-gressionerrorindenselysampledspacewhilehigherrorineverywhereelse.Foraddressingtheissuesofthedatadiscontinuityandheterogeneity,thedivide-and-conquerapproacheswerepro-posedlately.Thecoreideaistolearntocombinemultiplelocalregressors.Forinstance,thehierarchical-based[Hanetal.,2015]andtree-basedregression[HaraandChellappa,2014]makehardpartitionsrecursively,andthesubsetsofsam-plesmaynotbehomogeneousforlearninglocalregressors.WhileMixtureofRegressions(MoR)[Jacobsetal.,1991;JordanandXu,1995]distributesregressionerroramonglocalregressorsbymaximizinglikelihoodinthejointinput-outputspace.Theseapproachesreduceoverallerrorbyfittingre-gressionlocallyandreliefsthebiasbydiscontinuousdatadistribution.Unfortunately,theaforementionedapproachesstillcannotachievesatisfactoryperformancewhenapplyinginsomereal-worldapplications.Themainreasonisthattheseapproachestendtobesensitivetotheintra-componentoutliers(i.e.,thenoisytrainingdataresidingincertaincomponents)andtheinter-componentimbalance(i.e.,thedifferentamountsoftrain-"
+4a3d96b2a53114da4be3880f652a6eef3f3cc035,A Dictionary Learning-Based 3D Morphable Shape Model,"A Dictionary Learning-Based
+D Morphable Shape Model
+Claudio Ferrari
+, Giuseppe Lisanti, Stefano Berretti
+, Senior Member, IEEE, and Alberto Del Bimbo"
+4aa18f3a1c85f7a09d3b0d6b28c0339199892d60,The Application of Neural Networks for Facial Landmarking on Mobile Devices,
+4a6fcf714f663618657effc341ae5961784504c7,Scaling Up Class-Specific Kernel Discriminant Analysis for Large-Scale Face Verification,"Scaling up Class-Specific Kernel Discriminant
+Analysis for large-scale Face Verification
+Alexandros Iosifidis, Senior Member, IEEE, and Moncef Gabbouj, Fellow, IEEE"
+4a855d86574c9bd0a8cfc522bc1c77164819c0bc,PixelCNN Models with Auxiliary Variables for Natural Image Modeling,"PixelCNN Models with Auxiliary Variables for Natural Image Modeling
+Alexander Kolesnikov 1 Christoph H. Lampert 1"
+2409557812a3d26258949ba73a05031591f42bdc,Exact Discovery of Time Series Motifs,"Abdullah Mueen
+Exact Discovery of Time Series Motifs
+Eamonn Keogh
+Qiang Zhu
+Sydney Cash1,2 Brandon Westover1,3
+Massachusetts General Hospital, 2Harvard Medical School, 3Brigham and Women's Hospital
+University of California – Riverside
+{mueen, eamonn,"
+24ec4cd704d07865ce31fe539d00cd2597b5dfc9,Face Localization in the Neural Abstraction Pyramid,Face Localization
+24e98b70dc6982af2dd3a5bb4e501cc1b61f7d2b,LCR-Net++: Multi-person 2D and 3D Pose Detection in Natural Images,"SUBMITTED TO IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE, 2018
+LCR-Net++: Multi-person 2D and 3D Pose
+Detection in Natural Images
+Gr´egory Rogez, Philippe Weinzaepfel, and Cordelia Schmid, Fellow, IEEE"
+24c7554823bb8c1c0729c4ece5f3e50965aea74e,Robust Computation of Linear Models by Convex Relaxation,"ROBUST COMPUTATION OF LINEAR MODELS,
+OR HOW TO FIND A NEEDLE IN A HAYSTACK
+GILAD LERMAN∗, MICHAEL MCCOY†, JOEL A. TROPP†, AND TENG ZHANG◦"
+245130ac792531ca9981f9c5907190eac19ebb50,Detecting Objects using Unsupervised Parts-based Attributes∗,"Detecting Objects using Unsupervised Parts-based Attributes∗
+Santosh K. Divvala1, Larry Zitnick2, Ashish Kapoor2 , Simon Baker2
+Carnegie Mellon University.
+Microsoft Research.
+{larryz, ashishk,"
+24115d209e0733e319e39badc5411bbfd82c5133,Long-Term Recurrent Convolutional Networks for Visual Recognition and Description,"Long-term Recurrent Convolutional Networks for
+Visual Recognition and Description
+Jeff Donahue, Lisa Anne Hendricks, Marcus Rohrbach, Subhashini Venugopalan, Sergio Guadarrama,
+Kate Saenko, Trevor Darrell"
+24c442ac3f6802296d71b1a1914b5d44e48b4f29,Pose and Expression-Coherent Face Recovery in the Wild,"Pose and expression-coherent face recovery in the wild
+Xavier P. Burgos-Artizzu
+Joaquin Zepeda
+Technicolor, Cesson-S´evign´e, France
+Franc¸ois Le Clerc
+Patrick P´erez"
+245922e5251c103c2021577cc0f99791d748ac64,Fusion of Intraoperative 3D B-mode and Contrast-Enhanced Ultrasound Data for Automatic Identification of Residual Brain Tumors,"Article
+Fusion of Intraoperative 3D B-mode and
+Contrast-Enhanced Ultrasound Data for Automatic
+Identification of Residual Brain Tumors
+Elisee Ilunga-Mbuyamba 1,3, Dirk Lindner 2, Juan Gabriel Avina-Cervantes 1,∗, Felix Arlt 2,
+Horacio Rostro-Gonzalez 1, Ivan Cruz-Aceves 4 and Claire Chalopin 3
+Telematics (CA), Engineering Division (DICIS), University of Guanajuato, Campus Irapuato-Salamanca,
+Carr. Salamanca-Valle km 3.5 + 1.8, Comunidad de Palo Blanco, Salamanca, Gto. 36885, Mexico;
+(E.I.-M.); (H.R.-G.)
+Department of Neurosurgery, University Hospital Leipzig, Leipzig 04103, Germany;
+(D.L.); (F.A.)
+Innovation Center Computer Assisted Surgery (ICCAS), University of Leipzig, Leipzig 04103, Germany;
+Centro de Investigacion en Matematicas (CIMAT), A.C., Jalisco S/N, Col. Valenciana,
+Guanajuato, Gto. 36000, Mexico;
+* Correspondence: Tel.: +52-46-4647-9940 (ext. 2400)
+Academic Editor: Hideyuki Hasegawa
+Received: 15 February 2017; Accepted: 17 April 2017; Published: 19 April 2017"
+244a6d4f5f745f8c2a58a6a70d7ba2b91300c118,RADON Transform and PCA based 3 D Face Recognition using KNN and SVM,"International Journal of Computer Applications (0975 – 8887)
+Recent Advances in Information Technology, 2014
+RADON Transform and PCA based 3D Face Recognition
+using KNN and SVM
+P. S. Hiremath
+Department of Computer Science
+Gulbarga University
+Gulbarga, KA, India
+Manjunatha Hiremath
+Department of Computer Science
+Gulbarga University
+Gulbarga, KA, India
+integral
+researches
+society.Many"
+247b14570940601f5c7a2da1db532ecf1c302288,Dual Attention Networks for Multimodal Reasoning and Matching,"Dual Attention Networks for Multimodal Reasoning and Matching
+Hyeonseob Nam
+Naver Search Solutions
+Jung-Woo Ha
+Naver Labs
+Jeonghee Kim
+Naver Labs"
+245f8ec4373e0a6c1cae36cd6fed5a2babed1386,Lucas Kanade Optical Flow Computation from Superpixel based Intensity Region for Facial Expression Feature Extraction,"J. Appl. Environ. Biol. Sci., 7(3S)1-10, 2017
+© 2017, TextRoad Publication
+ISSN: 2090-4274
+Journal of Applied Environmental
+nd Biological Sciences
+www.textroad.com
+Lucas Kanade Optical Flow Computation from Superpixel based Intensity
+Region for Facial Expression Feature Extraction
+Halina Hassan1,2, Abduljalil Radman1, Shahrel Azmin Suandi1, Sazali Yaacob2
+Intelligent Biometric Group, School of Electrical and Electronics Engineering, Universiti Sains Malaysia,
+Electrical, Electronics and Automation Section, Universiti Kuala Lumpur Malaysian Spanish Institute, 09000
+Engineering Campus, 14300 Nibong Tebal, Pulau Pinang, Malaysia
+Kulim Hi-Tech Park, Kedah, Malaysia
+Received: February 21, 2017
+Accepted: May 14, 2017"
+2484a34597a40d846c084e827fda299fd0927008,Image Matching Algorithm based on SURF Feature-point and DAISY Descriptor,"Image Matching Algorithm based on
+Feature-point and DAISY Descriptor
+School of Business, Sichuan Agricultural University, Sichuan Dujianyan 611830, China
+Li Li
+is the research"
+24b6d839662e5d56f17fc26eab4d2901f6835ddf,Real Time Lip Motion Analysis for a Person Authentication System using Near Infrared Illumination,"REAL TIME LIP MOTION ANALYSIS FOR A
+PERSON AUTHENTICATION SYSTEM USING NEAR
+INFRARED ILLUMINATION
+Faisal Shafait, Ralph Kricke, Islam Shdaifat, Rolf-Rainer Grigat
+TUHH Vision Systems (4-08/1)
+Harburger Schloßstr. 20, 21079 Hamburg, Germany
+Tel: +49 40 42878-3125, Fax: +49 40 42878-2911
+http://www.ti1.tu-harburg.de
+in: 2006 IEEE International Conference on Image Processing. See also BIBTEX entry below.
+BIBTEX:
+uthor = {Faisal Shafait and Ralph Kricke and Islam Shdaifat and Rolf-Rainer Grigat},
+title = {REAL TIME LIP MOTION ANALYSIS FOR A PERSON AUTHENTICATION SYSTEM
+USING NEAR INFRARED ILLUMINATION},
+ooktitle = {2006 IEEE International Conference on Image Processing},
+year = {2006},
+pages = {1957-1960},
+month = {oct},
+url = {http://www.ti1.tu-harburg.de/Publikationen}
+scheduled for October 8-11, 2006 in Atlanta, Georgia, USA. Personal use of this material is permitted.
+However, permission to reprint/republish this material for advertising or promotional purposes or for cre-"
+246218fd60d47975990908c48274341b47255292,Marker-less motion capture in general scenes with sparse multi-camera setups,"Marker-less Motion Capture in General
+Scenes with Sparse Multi-camera Setups
+Ahmed Elhayek
+Saarbr¨ucken, Germany
+Dissertation
+zur Erlangung des Grades des
+Doktors der Ingenieurswissenschaften (Dr.-Ing.)
+der Naturwissenschaftlich-Technischen Fakult¨aten
+der Universit¨at des Saarlandes
+March 2015"
+2491203e3b268235ea0269f41dbebd113d2a1b0a,"Optimal multiplexed sensing: bounds, conditions and a graph theory link.","Optimal multiplexed sensing: bounds,
+onditions and a graph theory link
+Netanel Ratner,1 Yoav Y. Schechner,1,∗
+nd Felix Goldberg2
+Dept. Electrical Engineering, Technion - Israel Inst. Technology
+Haifa 32000, Israel
+Dept. Mathematics, Technion - Israel Inst. Technology
+Haifa 32000, Israel
+Corresponding author:"
+24e099e77ae7bae3df2bebdc0ee4e00acca71250,Robust Face Alignment Under Occlusion via Regional Predictive Power Estimation,"Robust face alignment under occlusion via regional predictive power
+estimation.
+Heng Yang; Xuming He; Xuhui Jia; Patras, I
+© 2015 IEEE
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/22467
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+24e79933d8d71dd9e72e289d9d89a061ccbb01c3,Analysis of Principal Component Analysis (PCA) Face Recognition: Effects of Similarity Measure,"Analysis of Principal Component Analysis (PCA)
+Face Recognition: Effects of Similarity Measure
+Arjun V Mane#1, Ramesh R Manza#2, Karbhari V Kale#3
+#Department of Computer Science & Information Technology,
+Dr. Babasaheb Ambedkar Marathwada University, Aurangabad (MS) India"
+2431eeb2df8877d78901fa37a091a23dc207c2b2,Rotation-Invariant HOG Descriptors Using Fourier Analysis in Polar and Spherical Coordinates,"Int J Comput Vis
+DOI 10.1007/s11263-013-0634-z
+Rotation-Invariant HOG Descriptors Using Fourier Analysis
+in Polar and Spherical Coordinates
+Kun Liu · Henrik Skibbe · Thorsten Schmidt ·
+Thomas Blein · Klaus Palme · Thomas Brox ·
+Olaf Ronneberger
+Received: 30 September 2012 / Accepted: 21 May 2013
+© Springer Science+Business Media New York 2013"
+2450c618cca4cbd9b8cdbdb05bb57d67e63069b1,A connexionist approach for robust and precise facial feature detection in complex scenes,"A Connexionist Approach for Robust and Precise Facial Feature Detection in
+Complex Scenes
+Stefan Duffner and Christophe Garcia
+France Telecom Research & Development
+, rue du Clos Courtel
+5512 Cesson-S´evign´e, France
+fstefan.duffner,"
+246fa412f26d5bf5b151a7c3f5287141bd08ae0b,Deep Metric Learning for the Target Cost in Unit-Selection Speech Synthesizer,"Interspeech 2018
+-6 September 2018, Hyderabad
+0.21437/Interspeech.2018-1305"
+24041477d6e412e4afc441992f4b170831f725c7,International Journal of Advance Research in Computer Science and Management Studies,"Volume 3, Issue 10, October 2015
+International Journal of Advance Research in
+Computer Science and Management Studies
+Research Article / Survey Paper / Case Study
+Available online at: www.ijarcsms.com
+ISSN: 2321-7782 (Online)
+Automatic Face Naming by Using Fused Affinity Matrix
+Kadam Vaibhav Bharat1
+B.E. Computer Science
+Deshpande Supriya Ajay2
+B.E. Computer Science
+Alarm College of Engineering, Pune, India
+Alarm College of Engineering, Pune, India
+Malpure Sagar3
+B.E. Computer Science
+Choudhary Jitendra4
+B.E. Computer Science
+Alarm College of Engineering, Pune, India
+Alarm College of Engineering, Pune, India"
+244b57cc4a00076efd5f913cc2833138087e1258,Warped Convolutions: Efficient Invariance to Spatial Transformations,"Warped Convolutions: Efficient Invariance to Spatial Transformations
+Jo˜ao F. Henriques 1 Andrea Vedaldi 1"
+242ae7b1b1c3e1aafcbe9cef3cb23918c6f94f2c,Performance Evaluation of Biometric Template Update,"Performance Evaluation
+of Biometric Template Update
+Romain Giot and Christophe Rosenberger
+Université de Caen, UMR 6072 GREYC
+ENSICAEN, UMR 6072 GREYC
+CNRS, UMR 6072 GREYC
+Email:
+Email:
+Bernadette Dorizzi
+Institut Télécom; Télécom SudParis
+UMR 5157 SAMOVAR
+Email:"
+2475d216fd52994ac69ef922f4daf73e47f9535d,Joint Albedo Estimation and Pose Tracking from Video,"Joint Albedo Estimation and Pose Tracking
+from Video
+Sima Taheri, Student Member, IEEE, Aswin Sankaranarayanan, Member, IEEE,
+nd Rama Chellappa, Fellow, IEEE"
+24869258fef8f47623b5ef43bd978a525f0af60e,Données multimodales pour l ’ analyse d ’ image,"UNIVERSITÉDEGRENOBLENoattribuéparlabibliothèqueTHÈSEpourobtenirlegradedeDOCTEURDEL’UNIVERSITÉDEGRENOBLESpécialité:MathématiquesetInformatiquepréparéeauLaboratoireJeanKuntzmanndanslecadredel’ÉcoleDoctoraleMathématiques,SciencesetTechnologiesdel’Information,InformatiqueprésentéeetsoutenuepubliquementparMatthieuGuillauminle27septembre2010ExploitingMultimodalDataforImageUnderstandingDonnéesmultimodalespourl’analysed’imageDirecteursdethèse:CordeliaSchmidetJakobVerbeekJURYM.ÉricGaussierUniversitéJosephFourierPrésidentM.AntonioTorralbaMassachusettsInstituteofTechnologyRapporteurMmeTinneTuytelaarsKatholiekeUniversiteitLeuvenRapporteurM.MarkEveringhamUniversityofLeedsExaminateurMmeCordeliaSchmidINRIAGrenobleExaminatriceM.JakobVerbeekINRIAGrenobleExaminateur"
+246ec873db261257833231d657ec8995d686cc3e,Facing the implications: Dangerous world beliefs differentially predict men and Women's aversion to facially communicated..,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/315831650
+Facing the implications: Dangerous world
+eliefs differentially predict men and Women's
+version to facially communicated...
+Article in Personality and Individual Differences · October 2017
+READS
+DOI: 10.1016/j.paid.2017.04.018
+CITATIONS
+authors, including:
+Mitch Brown
+University of Southern Mississippi
+9 PUBLICATIONS 14 CITATIONS
+SEE PROFILE
+Some of the authors of this publication are also working on these related projects:
+Facially Communicated Extraversion and Social Motives View project
+Grip Strength and Perceptions View project
+All content following this page was uploaded by Mitch Brown on 09 April 2017.
+The user has requested enhancement of the downloaded file. All in-text references underlined in blue are added to the original document
+nd are linked to publications on ResearchGate, letting you access and read them immediately."
+247df1d4fca00bc68e64af338b84baaecc34690b,Evaluation of Gender Classification Methods with Automatically Detected and Aligned Faces,"Review
+Procedure
+009/6/12
+Paper
+ “Evaluation of Gender Classification Methods
+with Automatically Detected and Aligned
+Faces”
+ Erno Makinen & Roope Raisamo
+ 2008
+Decision
+resizing
+lignment
+face detection
+resizing
+lassification
+resizing
+lignment
+lignment
+resizing
+face detection"
+24da9c1eb30ed5ef0052f760d5d847bf5cd1d2ba,A Machine-Learning Approach to Keypoint Detection and Landmarking on 3D Meshes,"Int J Comput Vis
+DOI 10.1007/s11263-012-0605-9
+A Machine-Learning Approach to Keypoint Detection
+nd Landmarking on 3D Meshes
+Clement Creusot · Nick Pears · Jim Austin
+Received: 14 October 2011 / Accepted: 17 December 2012
+© Springer Science+Business Media New York 2013"
+2475ad865b2102cef83a87adfe0d2e71d4791e53,A Supervised Clustering Algorithm for the Initialization of RBF Neural Network Classifiers,"A Supervised Clustering Algorithm for the Initialization
+of RBF Neural Network Classifiers
+Hakan Cevikalp, Diane Larlus, Frédéric Jurie
+To cite this version:
+Hakan Cevikalp, Diane Larlus, Frédéric Jurie. A Supervised Clustering Algorithm for the Ini-
+SIU ’07 - 15th Signal Processing and Com-
+tialization of RBF Neural Network Classifiers.
+munications Applications, Jun 2007, Eskisehir, Turkey.
+IEEE Computer society, pp.1-4, 2007,
+<10.1109/SIU.2007.4298803>. <hal-00203762>
+HAL Id: hal-00203762
+https://hal.archives-ouvertes.fr/hal-00203762
+Submitted on 14 Jan 2008
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est"
+2472d6e4459dd65cd77b5fce99220d3b30854408,Towards 3D object recognition via classification of arbitrary object tracks,"Towards 3D Object Recognition
+via Classification of Arbitrary Object Tracks
+Alex Teichman, Jesse Levinson, Sebastian Thrun
+Stanford Artificial Intelligence Laboratory
+{teichman, jessel,"
+243778aefb3c23d6774309c70217cb83f7204915,"The Mutex Watershed: Efficient, Parameter-Free Image Partitioning","The Mutex Watershed:
+Ef‌f‌icient, Parameter-Free Image Partitioning
+Steffen Wolf1⋆, Constantin Pape1,2⋆, Alberto Bailoni1, Nasim Rahaman1, Anna
+Kreshuk1,2, Ullrich K¨othe1, and Fred A. Hamprecht1
+HCI/IWR, University of Heidelberg, Germany
+EMBL Heidelberg, Germany"
+2465fc22e03faf030e5a319479a95ef1dfc46e14,Influence of different feature selection approaches on the performance of emotion recognition methods based on SVM,"______________________________________________________PROCEEDING OF THE 20TH CONFERENCE OF FRUCT ASSOCIATION
+Influence of Different Feature Selection Approaches
+on the Performance of Emotion Recognition
+Methods Based on SVM
+Daniil Belkov, Konstantin Purtov, Vladimir Kublanov
+Ural Federal University (UrFU)
+Yekaterinburg, Russia
+d.d.belkov,"
+2452dfb2c5a4578ac9497cc4dc3c6d5d03997210,On designing an unconstrained tri-band pupil detection system for human identification,"DOI 10.1007/s00138-015-0700-3
+ORIGINAL PAPER
+On designing an unconstrained tri-band pupil detection system
+for human identification
+Cameron Whitelam1 · Thirimachos Bourlai1
+Received: 30 September 2014 / Revised: 11 February 2015 / Accepted: 15 June 2015
+© Springer-Verlag Berlin Heidelberg 2015
+facial"
+24ff832171cb774087a614152c21f54589bf7523,Beat-Event Detection in Action Movie Franchises,"Beat-Event Detection in Action Movie Franchises
+Danila Potapov
+Matthijs Douze
+Jerome Revaud
+Zaid Harchaoui
+Cordelia Schmid"
+247232ab9eabb4f2480dd70557a1ee89afed4f20,Dominant men are faster in decision-making situations and exhibit a distinct neural signal for promptness,"Cerebral Cortex, October 2018;28: 3740–3751
+doi: 10.1093/cercor/bhy195
+Advance Access Publication Date: 15 August 2018
+Original Article
+O R I G I N A L A R T I C L E
+Dominant men are faster in decision-making situations
+nd exhibit a distinct neural signal for promptness
+Janir da Cruz1,2, João Rodrigues3, John C. Thoresen3, Vitaly Chicherov1,
+Patrícia Figueiredo2, Michael H. Herzog1 and Carmen Sandi
+Laboratory of Psychophysics, Brain Mind Institute, School of Life Sciences, Swiss Federal Institute of
+Technology Lausanne (EPFL), CH-1015 Lausanne, Switzerland, 2Institute for Systems and Robotics – Lisboa,
+Department of Bioengineering, Instituto Superior Técnico, Universidade de Lisboa, 1049-001 Lisbon, Portugal
+nd 3Laboratory of Behavioral Genetics, Brain Mind Institute, School of Life Sciences, Swiss Federal Institute of
+Technology Lausanne (EPFL), CH-1015 Lausanne, Switzerland
+Address correspondence to Carmen Sandi, Laboratory of Behavioral Genetics, Brain Mind Institute, School of Life Sciences, Swiss Federal Institute of
+Technology Lausanne (EPFL), CH-1015 Lausanne, Switzerland. Email:
+orcid.org/0000-0001-7713-8321
+Janir da Cruz, João Rodrigues, and John C. Thoresen contributed equally to this work
+Michael H. Herzog and Carmen Sandi contributed equally to this work"
+244377600b1474e1da3b86a08683e629990d1417,Embedded Vision System for Atmospheric Turbulence Mitigation,"Embedded Vision System for Atmospheric Turbulence Mitigation
+Ajinkya Deshmukh1, Gaurav Bhosale, Swarup Medasani2, Karthik Reddy,
+Hemanthakumar P, Chandrasekhar A, Kirankumar P, Vijayasagar K
+Uurmi Systems Pvt. Ltd., Hyderabad, India"
+247a6b0e97b9447850780fe8dbc4f94252251133,Facial action unit detection: 3D versus 2D modality,"Facial Action Unit Detection: 3D versus 2D Modality
+Arman Savran
+Electrical and Electronics Engineering
+Bo˘gazic¸i University, Istanbul, Turkey
+B¨ulent Sankur
+Electrical and Electronics Engineering
+Bo˘gazic¸i University, Istanbul, Turkey
+M. Taha Bilge
+Department of Psychology
+Bo˘gazic¸i University, Istanbul, Turkey"
+2485c98aa44131d1a2f7d1355b1e372f2bb148ad,The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART A: SYSTEMS AND HUMANS, VOL. 38, NO. 1, JANUARY 2008
+The CAS-PEAL Large-Scale Chinese Face
+Database and Baseline Evaluations
+Wen Gao, Senior Member, IEEE, Bo Cao, Shiguang Shan, Member, IEEE,
+Xilin Chen, Member, IEEE, Delong Zhou, Xiaohua Zhang, and Debin Zhao"
+24d3e695af619e88613aba7dc0e7492c12fa4d0e,Sparsest Matrix based Random Projection for Classification,"Sparse Matrix-based Random Projection for
+Classification
+Weizhi Lu, Weiyu Li, Kidiyo Kpalma and Joseph Ronsin"
+24585f90bdf30583733841f70430d36948f16ae2,An efficient method for human face recognition using nonsubsampled contourlet transform and support vector machine,"Optica Applicata, Vol. XXXIX, No. 3, 2009
+An efficient method for human face recognition
+using nonsubsampled contourlet transform
+nd support vector machine
+XUEBIN XU, DEYUN ZHANG, XINMAN ZHANG*
+School of Electronics and Information Engineering, Xi’an Jiaotong University,
+8 Xianning West Road, Xi’an 710049, P.R. China
+*Corresponding author:
+To improve the recognition rate in different conditions, a multiscale face recognition method
+ased on nonsubsampled contourlet transform and support vector machine is proposed in this
+paper. Firstly, all face images are decomposed by using nonsubsampled contourlet transform.
+The contourlet coefficients of low frequency and high frequency in different scales and various
+ngles will be obtained. Most significant information of faces is contained in coefficients, which
+is important for face recognition. Then, the combinations of coefficients are applied as study
+samples to the support vector machine classifiers. Finally, the decomposed coefficients of testing
+face image are used to test classifiers, then face recognition results are obtained. The experiments
+re performed on the YaleB database and the Cambridge University ORL database. The results
+indicate that the method proposed has performs better than the wavelet-based method. Compared
+with the wavelet-based method, the proposed method can make the best recognition rates increase
+y 2.85% for YaleB database and 1.87% for ORL database, respectively. Our method is also"
+230527d37421c28b7387c54e203deda64564e1b7,Person Re-identification: System Design and Evaluation Overview,"Person Re-identification: System Design and
+Evaluation Overview
+Xiaogang Wang and Rui Zhao"
+236942bb64f1711b4763424b2f795fb518c9d8d4,Optimizing LBP Structure For Visual Recognition Using Binary Quadratic Programming,"Optimizing LBP Structure For Visual Recognition
+Using Binary Quadratic Programming
+Jianfeng Ren, Student Member, IEEE, Xudong Jiang, Senior Member, IEEE, Junsong Yuan, Senior Member, IEEE,
+nd Gang Wang, Member, IEEE"
+2315371408e02cdff6f54359f159f192009d1600,Effective Pedestrian Detection Using Center-symmetric Local Binary/Trinary Patterns,"SEPTEMBER 2010
+Effective Pedestrian Detection Using
+Center-symmetric Local Binary/Trinary Patterns
+Yongbin Zheng, Chunhua Shen, Richard Hartley, Fellow, IEEE, and Xinsheng Huang"
+237316762470d72a02795a7f57de9279e9cda16a,Dimensionality-reduced subspace clustering,"Dimensionality-reduced subspace clustering
+Reinhard Heckel, Michael Tschannen, and Helmut B¨olcskei
+December 15, 2015"
+239c06cd437832faa55a8e7292c50e45229a3d7c,Generating analytic insights on human behavior using image processing,"Generating Analytic Insights on Human behavior
+using Image Processing
+Namit Juneja, Rajesh Kumar M, Senior Member, IEEE
+School of Electronics Engineering
+VIT University
+Vellore, India"
+2396ff03c41c498ff20e3a0e5419afa45e4a9d41,MIT Autonomous Vehicle Technology Study: Large-Scale Deep Learning Based Analysis of Driver Behavior and Interaction with Automation,"MIT Autonomous Vehicle Technology Study:
+Large-Scale Deep Learning Based Analysis of
+Driver Behavior and Interaction with Automation
+Lex Fridman∗, Daniel E. Brown, Michael Glazer, William Angell, Spencer Dodd, Benedikt Jenik,
+Andrew Sipperley, Anthony Pettinato, Bobbie Seppelt, Linda Angell, Bruce Mehler, Bryan Reimer∗
+Jack Terwilliger, Julia Kindelsberger, Li Ding, Sean Seaman, Hillary Abraham, Alea Mehler,"
+23e1746c449e675a4ffa3833b0ac5c5a7b743f7f,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+2349eab05cd0c6f94ba5314c037d198aa12c2f0f,Eigen-profiles of spatio-temporal fragments for adaptive region-based tracking,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+237734e3fd3abab005b0b97d61416ee16105f902,Consensus Maximization for Semantic Region Correspondences,"Consensus Maximization for Semantic Region Correspondences
+Pablo Speciale1, Danda P. Paudel2, Martin R. Oswald1,
+Hayko Riemenschneider2, Luc V. Gool2,4, and Marc Pollefeys1,3
+Department of Computer Science, ETH Z¨urich.
+Microsoft, Redmond, USA
+{pablo, moswald,
+Computer Vision Laboratory, D-ITET, ETH Z¨urich
+VISICS, ESAT/PSI, KU Leuven, Belgium
+{paudel, hayko,
+Day / Night
+Registration
+Outdoor / Indoor
+Registration
+Scan / CAD
+Registration
+Figure 1: Example registration results. Our approach solves challenging registration problems by maximizing the number of corre-
+sponding semantic regions – such as windows, doors or balconies – for datasets from different modalities, with large amounts of noise and
+outliers, little data overlap, or significantly different data statistics."
+239df42479c69cf95e7194cc0ec3d8cf7d4a98e8,Face Detection and Extraction from Low Resolution Surveillance Video Using Motion Segmentation,"Face Detection and Extraction from Low
+Resolution Surveillance Video Using
+Motion Segmentation
+Vikram Mutneja1
+I.K. Gujral Punjab Technical University, Kapurthala, Punjab (India)
+Ph.D. Research Scholar,
+I.K. Gujral Punjab Technical University Main Campus, Kapurthala, Punjab (India)
+Dr. Satvir Singh2,
+Associate Professor,"
+23fa51635c646aa621bb18ff76f31d5e48ac969b,MFSC: A new shape descriptor with robustness to deformations,"MFSC: A NEW SHAPE DESCRIPTOR WITH ROBUSTNESS TO DEFORMATIONS
+Lunshao Chaia, Zhen Qinb, Honggang Zhanga, Jun Guoa, Bir Bhanub
+Beijing University of Posts and Telecomuunictions, Beijing, 100876, China
+University of California at Riverside, Riverside, CA 92521, USA"
+23172f9a397f13ae1ecb5793efd81b6aba9b4537,Defining Visually Descriptive Language,"Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 10–17,
+Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics."
+23fd82c04852b74d655015ff0876e6c5defc6e61,Deep-based Ingredient Recognition for Cooking Recipe Retrieval,"Deep-based Ingredient Recognition for
+Cooking Recipe Retrieval
+Jingjing Chen
+City University of HongKong
+Kowloon, HongKong
+Chong-Wah Ngo
+City University of HongKong
+Kowloon, HongKong"
+236a4f38f79a4dcc2183e99b568f472cf45d27f4,Randomized Clustering Forests for Image Classification,"Randomized Clustering Forests
+for Image Classification
+Frank Moosmann, Student Member, IEEE, Eric Nowak, Student Member, IEEE, and
+Frederic Jurie, Member, IEEE Computer Society"
+230c4a30f439700355b268e5f57d15851bcbf41f,EM Algorithms for Weighted-Data Clustering with Application to Audio-Visual Scene Analysis,"EM Algorithms for Weighted-Data Clustering
+with Application to Audio-Visual Scene Analysis
+Israel D. Gebru, Xavier Alameda-Pineda, Florence Forbes and Radu Horaud"
+237fa91c8e8098a0d44f32ce259ff0487aec02cf,Bidirectional PCA with assembled matrix distance metric for image recognition,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 4, AUGUST 2006
+Bidirectional PCA With Assembled Matrix
+Distance Metric for Image Recognition
+Wangmeng Zuo, David Zhang, Senior Member, IEEE, and Kuanquan Wang, Member, IEEE"
+237ec7e6d20025c32069e41f8007bb97931a7fc6,Learning real-time object detectors : probabilistic generative approaches,
+2331df8ca9f29320dd3a33ce68a539953fa87ff5,Extended Isomap for Pattern Classification,"Extended Isomap for Pattern Classification
+Ming-Hsuan Yang
+Honda Fundamental Research Labs
+Mountain View, CA 94041"
+2333cf918f50ac2ae201a837166d310adf3a00b0,Optimally Training a Cascade Classifier,"Optimally Training a Cascade Classifier
+Chunhua Shen, Peng Wang, and Anton van den Hengel"
+23ba9e462151a4bf9dfc3be5d8b12dbcfb7fe4c3,Determining Mood from Facial Expressions,"CS 229 Project, Fall 2014
+Matthew Wang
+Spencer Yee
+Determining Mood from Facial Expressions
+Introduction
+Facial expressions play an extremely important role in human communication. As
+society continues to make greater use of human-machine interactions, it is important for
+machines to be able to interpret facial expressions in order to improve their
+uthenticity. If machines can be trained to determine mood to a better extent than
+humans can, especially for more subtle moods, then this could be useful in fields such as
+ounseling. This could also be useful for gauging reactions of large audiences in various
+ontexts, such as political talks.
+The results of this project could also be applied to recognizing other features of facial
+expressions, such as determining when people are purposefully suppressing emotions or
+lying. The ability to recognize different facial expressions could also improve technology
+that recognizes to whom specific faces belong. This could in turn be used to search a
+large number of pictures for a specific photo, which is becoming increasingly difficult, as
+storing photos digitally has been extremely common in the past decade. The possibilities
+re endless.
+II Data and Features"
+2311cdd241c118395a510776ec226aff7725ebc8,Hunting Nessie - Real-time abnormality detection from webcams,"Hunting Nessie – Real-Time Abnormality Detection from Webcams
+Michael D. Breitenstein1 Helmut Grabner1 Luc Van Gool1,2
+Computer Vision Laboratory
+ETH Zurich
+ESAT-PSI / IBBT
+KU Leuven"
+2340a8fa6d90741c53e659cd1e7ca86ff900aa55,Body Parts Dependent Joint Regressors for Human Pose Estimation in Still Images,"Body Parts Dependent Joint Regressors for
+Human Pose Estimation in Still Images
+Matthias Dantone, Juergen Gall, Member, IEEE Christian Leistner, and Luc Van Gool, Member, IEEE"
+238fc68b2e0ef9f5ec043d081451902573992a03,Enhanced Local Gradient Order Features and Discriminant Analysis for Face Recognition,"Enhanced Local Gradient Order Features and
+Discriminant Analysis for Face Recognition
+Chuan-Xian Ren, Zhen Lei, Member, IEEE, Dao-Qing Dai, Member, IEEE, and Stan Z. Li, Fellow, IEEE
+role in robust face recognition [5]. Many algorithms have
+een proposed to deal with the effectiveness of feature design
+nd extraction [6], [7]; however, the performance of many
+existing methods is still highly sensitive to variations of
+imaging conditions, such as outdoor illumination, exaggerated
+expression, and continuous occlusion. These complex varia-
+tions are significantly affecting the recognition accuracy in
+recent years [8]–[10].
+Appearance-based subspace learning is one of the sim-
+plest approach for feature extraction, and many methods
+re usually based on linear correlation of pixel intensities.
+For example, Eigenface [11] uses eigen system of pixel
+intensities to estimate the lower rank linear subspace of
+set of training face images by minimizing the (cid:2)2 dis-
+tance metric. The solution enjoys optimality properties when
+noise is independent
+identically distributed Gaussian only."
+2322ec2f3571e0ddc593c4e2237a6a794c61251d,Four not six: Revealing culturally common facial expressions of emotion.,"Jack, R. E. , Sun, W., Delis, I., Garrod, O. G. B. and Schyns, P. G. (2016)
+Four not six: revealing culturally common facial expressions of
+emotion.Journal of Experimental Psychology: General, 145(6), pp. 708-
+730. (doi:10.1037/xge0000162)
+This is the author’s final accepted version.
+There may be differences between this version and the published version.
+You are advised to consult the publisher’s version if you wish to cite from
+http://eprints.gla.ac.uk/116592/
+Deposited on: 20 April 2016
+Enlighten – Research publications by members of the University of Glasgow
+http://eprints.gla.ac.uk"
+23ea8a34570342855611a78a4ff00ddd902e6123,Gradient-based global features and its application to image retargeting,"Gradient-based Global Features and Its Application
+to Image Retargeting
+Izumi Ito
+Tokyo Institute of Technology Tokyo, 152-8552 Japan
++81-3-5734-2997"
+2312bc2d48a0f68bd5ab1b024d5726786455da3a,Learning Deep Context-Aware Features over Body and Latent Parts for Person Re-identification,"Learning Deep Context-aware Features over Body and Latent Parts
+for Person Re-identification
+Supplementary Materials
+Dangwei Li1,2, Xiaotang Chen1,2, Zhang Zhang1,2, Kaiqi Huang1,2,3
+CRIPAC & NLPR, CASIA 2University of Chinese Academy of Sciences
+CAS Center for Excellence in Brain Science and Intelligence Technology
+{dangwei.li, xtchen, zzhang,
+. Market1501 dataset
+To further understand the results on Market1501 [8], we show mean Average Precision (mAP) and Rank-1 identification
+rate between camera pairs in Figure 1 and Figure 2. Compared to the BOW methods, the proposed method improves mean
+mAP and Rank-1 identification rate between camera pairs by 35.09% and 40.01% respectively. In addition, we show some
+searching results with different query images in Figure 3. The dataset is challenging and the returned images have very similar
+ppearances and some pedestrians have large backgrounds and occlusions. For the query image in first row of Figure 3, even
+though the query person has large occlusions and some groundtruth images have large backgrounds, our proposed method
+an still return the right results. This shows the effectiveness of our proposed method.
+. CUHK03 dataset
+CUHK03 [3] is one of the largest person re-identification datasets. It provides two types of pedestrian bounding boxes,
+including detected and manually annotated. In this paragraph, we show the overall Cumulated Matching Characteristics
+(CMC) on both detected and labeled datasets in Figure 4. For the GateSCNN [5] in Figure 4(a), we use the singe-query
+results to approximate the single-shot results. The DGD [6] is trained using multiple datasets. In this paper, we use the"
+23a2b75c92123b3e7bbaf1d98e434845167fe259,Multimodal Biometrics for Identity Documents,"Forensic Science International 167 (2007) 154–159
+www.elsevier.com/locate/forsciint
+Multimodal biometrics for identity documents (
+Damien Dessimoz a,*, Jonas Richiardi b, Christophe Champod a, Andrzej Drygajlo b
+Institut de Police Scientifique, E´ cole des Sciences Criminelles, Universite´ de Lausanne, Switzerland
+Speech Processing and Biometrics Group, Signal Processing Institute, E´ cole Polytechnique Fe´de´rale de Lausanne, Switzerland
+Received 9 June 2006; accepted 14 June 2006
+Available online 4 August 2006"
+23c9fe37fa0474967be4cc6c7a310dcc87b86b72,Spatial Feature Interdependence Matrix (SFIM): A Robust Descriptor for Face Recognition,"Spatial Feature Interdependence Matrix (SFIM):
+A Robust Descriptor for Face Recognition
+Anbang Yao1 and Shan Yu2
+National Laboratory of Pattern Recognition, Institute of Automation,
+Chinese Academy of Science, Beijing, 100090, China
+National Institute for Research in Computer Science and Control, France"
+23b93f3b237481bd1d36941ca3312bb16f4beb58,Reconnaissance d'événements et d'actions à partir de la profondeur thermique 3D. (Event and action recognition from thermal and 3D depth Sensing),"Reconnaissance d’événements et d’actions à partir de la
+profondeur thermique 3D
+Adnan Al Alwani
+To cite this version:
+Adnan Al Alwani. Reconnaissance d’événements et d’actions à partir de la profondeur thermique
+D. Vision par ordinateur et reconnaissance de formes [cs.CV]. Université de Caen Normandie, 2016.
+Français. <tel-01418369>
+HAL Id: tel-01418369
+https://hal.archives-ouvertes.fr/tel-01418369
+Submitted on 16 Dec 2016
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+235f4fad10a5d9e043759354a7cb94122a8f10fc,"Multi-perspective vehicle detection and tracking: Challenges, dataset, and metrics","Windsor Oceanico Hotel, Rio de Janeiro, Brazil, November 1-4, 2016
+978-1-5090-1889-5/16/$31.00 ©2016 IEEE"
+23120f9b39e59bbac4438bf4a8a7889431ae8adb,Improved RGB-D-T based face recognition,"Aalborg Universitet
+Improved RGB-D-T based Face Recognition
+Oliu Simon, Marc; Corneanu, Ciprian; Nasrollahi, Kamal; Guerrero, Sergio Escalera;
+Nikisins, Olegs; Sun, Yunlian; Li, Haiqing; Sun, Zhenan; Moeslund, Thomas B.; Greitans,
+Modris
+Published in:
+DOI (link to publication from Publisher):
+0.1049/iet-bmt.2015.0057
+Publication date:
+Document Version
+Accepted manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Oliu Simon, M., Corneanu, C., Nasrollahi, K., Guerrero, S. E., Nikisins, O., Sun, Y., ... Greitans, M. (2016).
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
+? You may not further distribute the material or use it for any profit-making activity or commercial gain
+? You may freely distribute the URL identifying the publication in the public portal ?"
+23d55061f7baf2ffa1c847d356d8f76d78ebc8c1,Generic and attribute-specific deep representations for maritime vessels,"Solmaz et al. IPSJ Transactions on Computer Vision and
+Applications (2017) 9:22
+DOI 10.1186/s41074-017-0033-4
+IPSJ Transactions on Computer
+Vision and Applications
+RESEARCH PAPER
+Open Access
+Generic and attribute-specific deep
+representations for maritime vessels
+Berkan Solmaz*†
+, Erhan Gundogdu†, Veysel Yucesoy and Aykut Koc"
+23095c6fc92f41a86f93276d446cfc72c7ce7b23,Stereo-based Pedestrian Detection using Multiple Patterns,"HATTORI et al.: STEREO-BASED PEDESTRIAN DETECTION USING MULTI-PATTERNS
+Stereo-based Pedestrian Detection using
+Multiple Patterns
+Research & Development Center,
+TOSHIBA Corporation, JAPAN
+Hiroshi Hattori
+Akihito Seki
+Manabu Nishiyama
+Tomoki Watanabe"
+23a8d02389805854cf41c9e5fa56c66ee4160ce3,Influence of low resolution of images on reliability of face detection and recognition,"Multimed Tools Appl
+DOI 10.1007/s11042-013-1568-8
+Influence of low resolution of images on reliability
+of face detection and recognition
+Tomasz Marciniak· Agata Chmielewska·
+Radoslaw Weychan· Marianna Parzych·
+Adam Dabrowski
+© The Author(s) 2013. This article is published with open access at SpringerLink.com"
+23e881c9b791fd17e248b1fb4fc980710dd005d7,An Unbiased Temporal Representation for Video-Based Person Re-Identification,"AN UNBIASED TEMPORAL REPRESENTATION FOR VIDEO-BASED PERSON
+RE-IDENTIFICATION
+Xiu Zhang and Bir Bhanu
+Center for Research in Intelligent Systems
+University of California, Riverside, Riverside, CA 92521, USA"
+23b37c2f803a2d4b701e2f39c5f623b2f3e14d8e,Modified Approaches on Face Recognition By using Multisensory Image,"Available Online at www.ijcsmc.com
+International Journal of Computer Science and Mobile Computing
+A Monthly Journal of Computer Science and Information Technology
+ISSN 2320–088X
+IJCSMC, Vol. 2, Issue. 4, April 2013, pg.646 – 649
+RESEARCH ARTICLE
+Modified Approaches on Face Recognition
+By using Multisensory Image
+S. Dhanarajan1, G. Michael2
+Computer Science Department, Bharath University, India
+Computer Science Department, Bharath University, India"
+4f892475be26333ddf1b72c21f0c9c4ca129bd80,Mobile Cloud Computing for Biometric Applications,"Singidunum University
+Belgrade, Serbia
+Mobile Cloud Computing for Biometric Applications
+Milos Stojmenovic
+Department of Informatics and Computation"
+4f00f5fe9d762009f524fb97555088769b96328c,Eye Gaze Tracking System Using . Net,"IJSART - Volume 3 Issue 5 –MAY 2017 ISSN [ONLINE]: 2395-1052
+Eye Gaze Tracking System Using .Net
+Madhu M Nayak1, Usha Rani J2, Anandhi G3
+Department of CSE
+, 2, 3Assistant Professor,GSSIETW, Mysuru"
+4f051022de100241e5a4ba8a7514db9167eabf6e,Face Parsing via a Fully-Convolutional Continuous CRF Neural Network,"Face Parsing via a Fully-Convolutional Continuous
+CRF Neural Network
+Lei Zhou, Zhi Liu, Senior Member, IEEE, Xiangjian He, Senior Member, IEEE"
+4faded442b506ad0f200a608a69c039e92eaff11,İstanbul Technical University Institute of Science and Technology Face Recognition under Varying Illumination,"İSTANBUL TECHNICAL UNIVERSITY  INSTITUTE OF SCIENCE AND TECHNOLOGY
+FACE RECOGNITION UNDER VARYING
+ILLUMINATION
+Master Thesis by
+Erald VUÇINI, B.Sc.
+Department : Computer Engineering
+Programme: Computer Engineering
+Supervisor: Prof. Dr. Muhittin GÖKMEN
+JUNE 2006"
+4f4c067e684252cf5549f60036829a89b2f35fc8,Sentic Avatar: Multimodal Affective Conversational Agent with Common Sense,"Sentic Avatar: Multimodal Affective
+Conversational Agent with Common Sense
+Erik Cambria1, Isabelle Hupont2,
+Amir Hussain1, Eva Cerezo3, and Sandra Baldassarri3
+University of Stirling, Stirling, UK
+Aragon Institute of Technology, Zaragoza, Spain
+University of Zaragoza, Zaragoza, Spain
+http://cs.stir.ac.uk/~eca/sentics"
+4f41f7a2f1f5eb5f26d47aeb168dbeb0f9ed453f,A Graph Transduction Game for Multi-target Tracking,"A Graph Transduction Game for Multi-target
+Tracking
+Tewodros Mulugeta Dagnew∗, Dalia Coppi†, Marcello Pelillo∗, Rita Cucchiara†
+DAIS - Ca´ Foscari University
+Venezia, Italy
+Email:
+DIEF - University of Modena and Reggio Emilia
+Email:
+Modena, Italy"
+4fc936102e2b5247473ea2dd94c514e320375abb,Guess Where? Actor-Supervision for Spatiotemporal Action Localization,"Guess Where? Actor-Supervision for Spatiotemporal Action Localization
+Victor Escorcia1∗
+Cuong D. Dao1
+Mihir Jain3
+KAUST1, University of Amsterdam2, Qualcomm Technologies, Inc.3
+Bernard Ghanem1
+Cees Snoek2∗"
+4f0aedbd0b5cb5939449da41579c93b98048fcdc,Robust classification using structured sparse representation,"Robust Classification using Structured Sparse Representation
+Center for Imaging Science, Johns Hopkins University, Baltimore MD 21218, USA
+Ehsan Elhamifar Ren´e Vidal"
+4f8bd3519a6e8a05db9e35b027c0c65c91d2ff62,Brain Oxytocin is a Main Regulator of Prosocial Behaviour - Link to Psychopathology,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+4ff7f5928f96ddc877b4b8675cc41cc08f4bd561,Recent Advance in Content-based Image Retrieval: A Literature Survey,"Recent Advance in Content-based Image
+Retrieval: A Literature Survey
+Wengang Zhou, Houqiang Li, and Qi Tian Fellow, IEEE"
+4f6adc53798d9da26369bea5a0d91ed5e1314df2,Online Nonnegative Matrix Factorization with General Divergences,"IEEE TRANSACTIONS ON SIGNAL PROCESSING, VOL. , NO. , 2016
+Online Nonnegative Matrix Factorization with
+General Divergences
+Renbo Zhao, Member, IEEE, Vincent Y. F. Tan, Senior Member, IEEE, Huan Xu"
+4f46dba09e075b2e7dfae1ba2a71e8e21b46e88d,Genetic CNN,"Genetic CNN
+Center for Imaging Science, The Johns Hopkins University, Baltimore, MD, USA
+Lingxi Xie, Alan Yuille"
+4fc609df4e17b5854e3b7f4371e5f4192608eda5,3D Face Recognition Benchmarks on the Bosphorus Database with Focus on Facial Expressions,"D Face Recognition Benchmarks on the
+Bosphorus Database with Focus on Facial
+Expressions
+Nes¸e Aly¨uz1, Berk G¨okberk2, Hamdi Dibeklio˘glu1, Arman Savran3, Albert Ali
+Salah4, Lale Akarun1, B¨ulent Sankur3"
+4f591e243a8f38ee3152300bbf42899ac5aae0a5,Understanding Higher-Order Shape via 3D Shape Attributes,"SUBMITTED TO TPAMI
+Understanding Higher-Order Shape
+via 3D Shape Attributes
+David F. Fouhey, Abhinav Gupta, Andrew Zisserman"
+4fec382efed4e08a36fafa3710b97f0b20de1ebe,Binarized Representation Entropy (bre) Regularization,"Published as a conference paper at ICLR 2018
+IMPROVING GAN TRAINING VIA
+BINARIZED REPRESENTATION ENTROPY (BRE)
+REGULARIZATION
+Yanshuai Cao, Gavin Weiguang Ding, Kry Yik-Chau Lui, Ruitong Huang
+Borealis AI
+Canada"
+4fdeb5d59b218ecba0f72dc3c42f38a086417c0f,InformatIon theoretIc combInatIon of classIfIers wIth applIcatIon to face DetectIon,"InformatIon theoretIc combInatIon
+of classIfIers wIth applIcatIon to face DetectIon
+THÈSE NO 3951 (2007)
+PRÉSENTÉE LE 23 NOvEMBRE 2007
+À LA FACULTÉ DES SCIENCES ET TECHNIQUES DE L'INGÉNIEUR
+LABORATOIRE DE TRAITEMENT DES SIGNAUX 5
+PROGRAMME DOCTORAL EN INFORMATIQUE, COMMUNICATIONS ET INFORMATION
+ÉCOLE POLyTECHNIQUE FÉDÉRALE DE LAUSANNE
+POUR L'OBTENTION DU GRADE DE DOCTEUR ÈS SCIENCES
+Julien MEyNET
+DEA signal, image, parole, télécoms, Institut national polytechnique de Grenoble, France
+et de nationalité française
+cceptée sur proposition du jury:
+Prof. H. Bourlard, président du jury
+Prof. J.-Ph. Thiran, directeur de thèse
+Prof. A. Billard, rapporteur
+Prof. H. Bunke, rapporteur
+Prof. J. Kittler, rapporteur
+Suisse"
+4f15b1e750007465024181dd002dfc6d1baa48c9,Face Recognition and Computer Graphics for Modelling,"Face Recognition and Computer Graphics for
+Modelling Expressive Faces in 3D
+Tufool Al-Nuaimi
+Submitted to the Department of Electrical Engineering and Computer Science
+in Partial Fulfillment of the Requirements for the Degree of
+Master of Engineering in Electrical Engineering and Computer Science
+t the Massachusetts Institute of Technology
+May 26, 2006
+Copyright 2006 Tufool AI-Nuaimi. All rights reserved.
+The author hereby grants to M.I.T. permission to reproduce and
+distribute publicly paper and electronic copies of this thesis
+nd to grant others the right to do so.
+Author
+Certified by_
+Accepted by_ _
+Tufool Al-Nuaimi
+Department of Electrical Engineering and Computer Science
+-Ma 26, 2006
+Judith Barry
+Supervisor"
+4fa6a688f350831503d158f8f618c58d1e06bc5d,"Bootstrap, Review, Decode: Using Out-of-Domain Textual Data to Improve Image Captioning","Bootstrap, Review, Decode: Using Out-of-Domain Textual Data
+to Improve Image Captioning
+Wenhu Chen
+RWTH Aachen
+Aurelien Lucchi
+ETH Zurich
+Thomas Hofmann
+ETH Zurich"
+4fb569af589d89f11d84d4b828459231345cc301,Exploring Linear Relationship in Feature Map Subspace for ConvNets Compression,"Exploring Linear Relationship in Feature Map
+Subspace for ConvNets Compression
+Dong Wang1, Lei Zhou1, Xueni Zhang1, Xiao Bai1, and Jun Zhou2
+Beihang University 2Grif‌f‌ith University"
+4f7e4b1b74955b54c434bdf76c47fb1e96db74e0,Naive Bayes Image Classification: Beyond Nearest Neighbors,"Naive Bayes Image Classification:
+Beyond Nearest Neighbors
+Radu Timofte1, Tinne Tuytelaars1, and Luc Van Gool1,2
+ESAT-VISICS /IBBT, Catholic University of Leuven, Belgium
+D-ITET, ETH Zurich, Switzerland"
+4f10b81f822091ce2142e33f0578940da1e25ad3,"Indoor Mobile Robotics at Grima, PUC","Noname manuscript No.
+(will be inserted by the editor)
+Indoor Mobile Robotics at Grima, PUC
+L. Caro · J. Correa · P. Espinace · D.
+Maturana · R. Mitnik · S. Montabone · S.
+Pszcz´o(cid:32)lkowski · D. Langdon · A. Araneda ·
+D. Mery · M. Torres · A. Soto
+Received: date / Accepted: date"
+4f863543407143a62e1bb053d435a947886ba619,Distributed deep learning on edge-devices: Feasibility via adaptive compression,"Distributed deep learning on edge-devices:
+feasibility via adaptive compression
+Corentin Hardy
+Technicolor, Inria
+Rennes, France
+Erwan Le Merrer
+Technicolor
+Rennes, France
+Bruno Sericola
+Inria
+Rennes, France"
+4f5e5fea12c44a5be7107748320e6d66192b7acb,Automatic approach-avoidance tendencies as a candidate intermediate phenotype for depression: Associations with childhood trauma and the 5-HTTLPR transporter polymorphism,"RESEARCH ARTICLE
+Automatic approach-avoidance tendencies as
+candidate intermediate phenotype for
+depression: Associations with childhood
+trauma and the 5-HTTLPR transporter
+polymorphism
+Pascal Fleurkens1*, Agnes van Minnen1,2, Eni S. Becker1, Iris van Oostrom3,
+Anne Speckens3, Mike Rinck1, Janna N. Vrijsen3,4
+Behavioural Science Institute, Radboud University Nijmegen, Nijmegen, The Netherlands,
+Psychotrauma Expertise Centrum (PSYTREC), Bilthoven, The Netherlands, 3 Department of Psychiatry,
+Radboud University Medical Centre, Nijmegen, The Netherlands, 4 Pro Persona: Institution for Integrated
+Mental Health Care, Nijmegen, The Netherlands"
+4fe0c6c83d998a0660bc5280c8ab6e61df9df887,Face Image Normalization and Expression/pose Validation for the Analysis of Machine Readable Travel Documents,"FACE IMAGE NORMALIZATION AND
+EXPRESSION/POSE VALIDATION FOR THE
+ANALYSIS OF MACHINE READABLE TRAVEL
+DOCUMENTS
+Markus Storer1, Martin Urschler1, Horst Bischof1,
+Josef A. Birchbauer2"
+4f618cbf19917ce5b8703adbc14e15b0bf0d35cc,Multi-View Dynamic Facial Action Unit Detection,"Multi-View Dynamic Facial Action Unit Detection
+Andr´es Romero
+Juan Le´on
+Pablo Arbel´aez
+Universidad de los Andes"
+4fb11a58d5a3ffc0bb6d4ade334a366b4a431b02,The Role of Minimal Complexity Functions in Unsupervised Learning of Semantic Mappings,
+4f606761ce65399ef4ff24cd503ec09cf53562e9,"A System View of the Recognition and Interpretation of Observed Human Shape, Pose and Action","Copyright © 2015 David W. Arathorn
+A System View of the Recognition and Interpretation of
+Observed Human Shape, Pose and Action
+David W. Arathorn
+Dept of Electrical and Computer Engineering
+(formerly of Center for Computational Biology)
+Montana State University-Bozeman
+General Intelligence Corporation
+Bozeman, MT"
+4fdbe95edb967bfc0b44f0fa291cd86b178fca2e,"Competitive Collaboration: Joint Unsupervised Learning of Depth, Camera Motion, Optical Flow and Motion Segmentation","Competitive Collaboration: Joint Unsupervised
+Learning of Depth, Camera Motion, Optical
+Flow and Motion Segmentation
+Anurag Ranjan1
+Varun Jampani2
+Kihwan Kim 2
+Deqing Sun 2
+Jonas Wulff 1
+Michael J. Black1
+Max Planck Institute for Intelligent Systems
+NVIDIA Research
+{aranjan, jwulff,
+{vjampani, kihwank,"
+4f4f920eb43399d8d05b42808e45b56bdd36a929,A Novel Method for 3 D Image Segmentation with Fusion of Two Images using Color K-means Algorithm,"International Journal of Computer Applications (0975 – 8887)
+Volume 123 – No.4, August 2015
+A Novel Method for 3D Image Segmentation with Fusion
+of Two Images using Color K-means Algorithm
+Neelam Kushwah
+Dept. of CSE
+ITM Universe
+Gwalior
+Priusha Narwariya
+Dept. of CSE
+ITM Universe
+Gwalior"
+4f77a37753c03886ca9c9349723ec3bbfe4ee967,"Localizing Facial Keypoints with Global Descriptor Search, Neighbour Alignment and Locally Linear Models","Localizing Facial Keypoints with Global Descriptor Search,
+Neighbour Alignment and Locally Linear Models
+Md. Kamrul Hasan1, Christopher Pal1 and Sharon Moalem2
+´Ecole Polytechnique de Montr´eal, Universit´e de Montr´eal
+University of Toronto and Recognyz Systems Technologies
+lso focused on emotion recognition in the wild [9]."
+4f77c682f133d5010762556ebf512533524da071,Deep Learning of Appearance Models for Online Object Tracking,"Deep Learning of Appearance Models for Online
+Object Tracking
+Mengyao Zhai, Mehrsan Javan Roshtkhari, Greg Mori"
+4fec8a97d6d87713c5c00f369fc1373fba4377e3,Training Sources 3 D Normalized Pose Space 2 D Normalized Pose Space KD-Tree Input Image 2 D Pose Estimation 3 D Pose Reconstruction Retrieved 3 D Nearest Neighbours Motion Capture Dataset Annotated 2,"SUBMITTED TO COMPUTER VISION AND IMAGE UNDERSTANDING.
+A Dual-Source Approach for 3D Human Pose
+Estimation from a Single Image
+Umar Iqbal*, Andreas Doering*, Hashim Yasin, Björn Krüger, Andreas Weber, and Juergen Gall"
+8d40150c7ec59daba7d1a34eba291ff2eac6388c,Overcoming Dataset Bias: An Unsupervised Domain Adaptation Approach,"Overcoming Dataset Bias:
+An Unsupervised Domain Adaptation Approach
+Boqing Gong
+Dept. of Computer Science
+U. of Southern California
+Los Angeles, CA 90089
+Fei Sha
+Dept. of Computer Science
+U. of Southern California
+Los Angeles, CA 90089
+Kristen Grauman
+Dept. of Computer Science
+U. of Texas at Austin
+Austin, TX 78701"
+8de06a584955f04f399c10f09f2eed77722f6b1c,Facial Landmarks Localization Estimation by Cascaded Boosted Regression,"Author manuscript, published in ""International Conference on Computer Vision Theory and Applications (VISAPP 2013) (2013)"""
+8d4f0517eae232913bf27f516101a75da3249d15,Event-based Dynamic Face Detection and Tracking Based on Activity,"ARXIV SUBMISSION, MARCH 2018
+Event-based Dynamic Face Detection and
+Tracking Based on Activity
+Gregor Lenz, Sio-Hoi Ieng and Ryad Benosman"
+8d19cfe643582fae03ce024efaf117d1efef5e58,A Robust Likelihood Function for 3D Human Pose Tracking,"This is the author's version of an article that has been published in this journal. Changes were made to this version by the publisher prior to publication.
+The final version of record is available at http://dx.doi.org/10.1109/TIP.2014.2364113
+A Robust Likelihood Function for 3D Human Pose
+Tracking
+Weichen Zhang, Student Member, IEEE, Lifeng Shang, Member, IEEE, Antoni B. Chan, Member, IEEE,"
+8d97e0102b5d89c62e5c6697eeaaefc82b36c809,Bottom-up attention orienting in young children with autism.,"J Autism Dev Disord (2014) 44:664–673
+DOI 10.1007/s10803-013-1925-5
+O R I G I N A L P A P E R
+Bottom-Up Attention Orienting in Young Children with Autism
+Dima Amso • Sara Haas • Elena Tenenbaum •
+Julie Markant • Stephen J. Sheinkopf
+Published online: 1 September 2013
+Ó Springer Science+Business Media New York 2013"
+8d8afef13a8f6195d3b874231e5e767cf62f3c50,Deep Ranking for Person Re-Identification via Joint Representation Learning,"Deep Ranking for Person Re-identification via
+Joint Representation Learning
+Shi-Zhe Chen, Chun-Chao Guo, Student Member, IEEE, and Jian-Huang Lai, Senior Member, IEEE"
+8de2dbe2b03be8a99628ffa000ac78f8b66a1028,Action Recognition in Videos,"´Ecole Nationale Sup´erieure dInformatique et de Math´ematiques Appliqu´ees de Grenoble
+INP Grenoble – ENSIMAG
+UFR Informatique et Math´ematiques Appliqu´ees de Grenoble
+Rapport de stage de Master 2 et de projet de fin d’´etudes
+Effectu´e au sein de l’´equipe LEAR, I.N.R.I.A., Grenoble
+Action Recognition in Videos
+Gaidon Adrien
+e ann´ee ENSIMAG – Option I.I.I.
+M2R Informatique – sp´ecialit´e I.A.
+04 f´evrier 2008 – 04 juillet 2008
+LEAR,
+I.N.R.I.A., Grenoble
+655 avenue de l’Europe
+8 334 Montbonnot
+France
+Responsable de stage
+Mme. Cordelia Schmid
+Tuteur ´ecole
+M. Augustin Lux
+M. Roger Mohr"
+8db9f32b0de29cfb7fd8e3d225be47b801cc9848,Vision-based deep execution monitoring,"Vision-based deep execution monitoring
+Francesco Puja, Simone Grazioso, Antonio Tammaro, Valsmis Ntouskos, Marta Sanzari, Fiora Pirri"
+8d3fbdb9783716c1832a0b7ab1da6390c2869c14,Discriminant Subspace Analysis for Uncertain Situation in Facial Recognition,"Discriminant Subspace Analysis for Uncertain
+Situation in Facial Recognition
+Pohsiang Tsai, Tich Phuoc Tran, Tom Hintz and Tony Jan
+School of Computing and Communications – University of Technology, Sydney
+Australia
+. Introduction
+Facial analysis and recognition have received substential attention from researchers in
+iometrics, pattern recognition, and computer vision communities. They have a large
+number of applications, such as security, communication, and entertainment. Although a
+great deal of efforts has been devoted to automated face recognition systems, it still remains
+challenging uncertainty problem. This is because human facial appearance has potentially
+of very large intra-subject variations of head pose, illumination, facial expression, occlusion
+due to other objects or accessories, facial hair and aging. These misleading variations may
+ause classifiers to degrade generalization performance.
+It is important for face recognition systems to employ an effective feature extraction scheme
+to enhance separability between pattern classes which should maintain and enhance
+features of the input data that make distinct pattern classes separable (Jan, 2004). In general,
+there exist a number of different feature extraction methods. The most common feature
+extraction methods are subspace analysis methods such as principle component analysis
+(PCA) (Kirby & Sirovich, 1990) (Jolliffe, 1986) (Turk & Pentland, 1991b), kernel principle"
+8d09c8c6b636ef70633a3f1bb8ff6b4d4136b5cf,3D Twins Expression Challenge,"D Twins Expression Challenge
+Vipin Vijayan, Kevin Bowyer, Patrick Flynn
+Department of Computer Science and Engineering,
+University of Notre Dame.
+84 Fitzpatrick Hall,
+Notre Dame, IN 46556, USA.
+{vvijayan, kwb,
+. Introduction
+We describe the 3D Twins Expression Challenge (“3D
+TEC”) problem in the area of 3D face recognition. The
+supporting dataset contains 3D scans of pairs of identical
+twins taken with two different facial expressions, neutral
+nd smiling. The dataset is smaller than the FRGC v2 [1]
+dataset by approximately a factor of ten, but is still more
+hallenging than the FRGC v2 dataset due to it containing
+twins with different expressions. This challenge problem
+will help to push the frontiers of 3D face recognition.
+Three dimensional face recognition is an active research
+topic in biometrics [2, 3]. While 2D pictures can be cap-
+tured quickly, non-intrusively, and easily by widely avail-"
+8d42a24d570ad8f1e869a665da855628fcb1378f,An Empirical Study of Context in Object Detection,"CVPR 2009 Submission #987. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.
+An Empirical Study of Context in Object Detection
+Anonymous CVPR submission
+Paper ID 987"
+8d8461ed57b81e05cc46be8e83260cd68a2ebb4d,Age identification of Facial Images using Neural Network,"Age identification of Facial Images using Neural
+Network
+Sneha Thakur, Ligendra Verma
+CSE Department,CSVTU
+RIT, Raipur, Chhattisgarh , INDIA"
+8de7c496c1dac3be5fa55de72867325153b119bd,Robust Face Recognition using Key-point Descriptors,"Robust Face Recognition using Key-point Descriptors
+Soeren Klemm, Yasmina Andreu, Pedro Henriquez and Bogdan J. Matuszewski
+Robotics and Computer Vision Research Laboratory, School of Computing Engineering and Physical Sciences,
+University of Central Lancashire, Preston, U.K.
+Keywords:
+Face Recognition, SIFT, SURF, ORB, Feature Matching, Face Occlusions."
+8d384e8c45a429f5c5f6628e8ba0d73c60a51a89,Temporal Dynamic Graph LSTM for Action-Driven Video Object Detection,"Temporal Dynamic Graph LSTM for Action-driven Video Object Detection
+Yuan Yuan1 Xiaodan Liang2 Xiaolong Wang2 Dit-Yan Yeung1 Abhinav Gupta2
+The Hong Kong University of Science and Technology 2 Carneige Mellon University"
+8d9067da4ba5c57643ee7a84cd5c5d5674384937,Sorting out Lipschitz function approximation,"SORTING OUT LIPSCHITZ FUNCTION APPROXIMATION
+Cem Anil ∗
+James Lucas∗
+Roger Grosse
+University of Toronto; Vector Institute
+{cemanil, jlucas,"
+8d559aeefb291d5b017c263a49f38e8a28439344,Visually-Driven Semantic Augmentation for Zero-Shot Learning,"VDSA: VISUALLY-DRIVEN SEMANTIC AUGMENTATION FOR ZSL
+Visually-Driven Semantic Augmentation for
+Zero-Shot Learning
+Abhinaba Roy1,2
+Jacopo Cavazza1
+Vittorio Murino1,3
+Pattern Analysis and Computer Vision
+Istituto Italiano di Tecnologia
+Genova, Italy
+Department of Naval, Electrical,
+Electronic and Telecommunications
+Engineering
+University of Genova, Italy
+Department of Computer Science
+University of Verona, Italy"
+8d6d0fdf4811bc9572326d12a7edbbba59d2a4cc,SchiNet: Automatic Estimation of Symptoms of Schizophrenia from Facial Behaviour Analysis,"SchiNet: Automatic Estimation of Symptoms of
+Schizophrenia from Facial Behaviour Analysis
+Mina Bishay, Petar Palasek, Stefan Priebe, and Ioannis Patras"
+8d4f2339fcadc2d1ef2126a11dce08ce7cb75bdd,Subspace Clustering via Optimal Direction Search,"Subspace Clustering via Optimal Direction Search
+Mostafa Rahmani, Student Member, IEEE and George K. Atia, Member, IEEE"
+8d3114a3236ec9adabcf0c40613a23f00c272a1c,From 3D Point Clouds to Pose-Normalised Depth Maps,"Int J Comput Vis (2010) 89: 152–176
+DOI 10.1007/s11263-009-0297-y
+From 3D Point Clouds to Pose-Normalised Depth Maps
+Nick Pears · Tom Heseltine · Marcelo Romero
+Received: 30 September 2008 / Accepted: 14 September 2009 / Published online: 25 September 2009
+© Springer Science+Business Media, LLC 2009"
+8d1adf0ac74e901a94f05eca2f684528129a630a,Facial Expression Recognition Using Facial Movement Features,"Facial Expression Recognition Using Facial
+Movement Features"
+8db43d306a70e23e2a0e6eb2fda60f14b73f65d0,Multi-Commodity Network Flow for Tracking Multiple People,"Multi-Commodity Network Flow
+for Tracking Multiple People
+Horesh Ben Shitrit, J´erˆome Berclaz, Franc¸ois Fleuret, and Pascal Fua, Fellow, IEEE"
+8dfdfcc3f34263779871d023fad973f4a1966ec0,Internet of vehicles in big data era,"Internet of Vehicles in Big Data Era
+Wenchao Xu, Haibo Zhou, Member, IEEE, Nan Cheng, Member, IEEE, Feng Lyu, Weisen Shi, Jiayin Chen,
+Xuemin (Sherman) Shen, Fellow, IEEE"
+8def62fd86b5ea0a41fd9f892bd95b01bf072e88,A hybrid approach to content based image retrieval using visual features and textual queries,"Proceedings of the 2013 International Conference on Information, Operations Management and Statistics (ICIOMS2013),
+Kuala Lumpur, Malaysia, September 1-3, 2013
+A Hybrid Approach to Content-based Image Retrieval
+Smarajit Bosea, Amita Pala*, Jhimli Mallickb , Sunil Kumarc
+Applied Statistics Division, Indian Statistical Institute, Kolkata, India
+TechBLA Solutions, Kolkata, India
+ETH, Zurich, Switzerland"
+8d646ac6e5473398d668c1e35e3daa964d9eb0f6,Memory-Efficient Global Refinement of Decision-Tree Ensembles and its Application to Face Alignment,"MEMORY-EFFICIENT GLOBAL REFINEMENT OF DECISION-TREE ENSEMBLES AND
+ITS APPLICATION TO FACE ALIGNMENT
+Nenad Markuˇs†
+Ivan Gogi´c†
+Igor S. Pandˇzi´c†
+J¨orgen Ahlberg‡
+University of Zagreb, Faculty of Electrical Engineering and Computing, Unska 3, 10000 Zagreb, Croatia
+Computer Vision Laboratory, Dept. of Electrical Engineering, Link¨oping University, SE-581 83 Link¨oping, Sweden"
+8dffbb6d75877d7d9b4dcde7665888b5675deee1,Emotion Recognition with Deep-Belief Networks,"Emotion Recognition with Deep-Belief
+Networks
+Tom McLaughlin, Mai Le, Naran Bayanbat
+Introduction
+For our CS229 project, we studied the problem of
+reliable computerized emotion recognition in images of
+human
+faces. First, we performed a preliminary
+exploration using SVM classifiers, and then developed an
+pproach based on Deep Belief Nets. Deep Belief Nets, or
+DBNs, are probabilistic generative models composed of
+multiple layers of stochastic latent variables, where each
+“building block” layer is a Restricted Boltzmann Machine
+(RBM). DBNs have a greedy layer-wise unsupervised
+learning algorithm as well as a discriminative fine-tuning
+procedure for optimizing performance on classification
+tasks. [1].
+We trained our classifier on three databases: the
+Cohn-Kanade Extended Database (CK+) [2], the Japanese
+Female Facial Expression Database (JAFFE) [3], and the"
+8d5998cd984e7cce307da7d46f155f9db99c6590,ChaLearn looking at people: A review of events and resources,"ChaLearn Looking at People:
+A Review of Events and Resources
+Sergio Escalera1,2, Xavier Bar´o2,3, Hugo Jair Escalante4,5, Isabelle Guyon4,6,
+Dept. Mathematics and Computer Science, UB, Spain,
+Computer Vision Center, UAB, Barcelona, Spain,
+EIMT, Open University of Catalonia, Barcelona, Spain,
+ChaLearn, California, USA, 5 INAOE, Puebla, Mexico,
+6 Universit´e Paris-Saclay, Paris, France,
+http://chalearnlap.cvc.uab.es"
+8dce38840e6cf5ab3e0d1b26e401f8143d2a6bff,Towards large scale multimedia indexing: A case study on person discovery in broadcast news,"Towards large scale multimedia indexing:
+A case study on person discovery in broadcast news
+Nam Le1, Hervé Bredin2, Gabriel Sargent3, Miquel India5, Paula Lopez-Otero6,
+Claude Barras2, Camille Guinaudeau2, Guillaume Gravier3, Gabriel Barbosa da Fonseca4,
+Izabela Lyon Freire4, Zenilton Patrocínio Jr4, Silvio Jamil F. Guimarães4, Gerard Martí5,
+Josep Ramon Morros5, Javier Hernando5, Laura Docio-Fernandez6, Carmen Garcia-Mateo6,
+Sylvain Meignier7, Jean-Marc Odobez1
+Idiap Research Institute & EPFL, 2 LIMSI, CNRS, Univ. Paris-Sud, Université Paris-Saclay,
+CNRS, Irisa & Inria Rennes, 4 PUC de Minas Gerais, Belo Horizonte,
+5 Universitat Politècnica de Catalunya, 6 University of Vigo, 7 LIUM, University of Maine"
+8d7a55d184659ac97d02061a660ae4e30604185b,Penalizing Top Performers: Conservative Loss for Semantic Segmentation Adaptation,"Penalizing Top Performers: Conservative Loss
+for Semantic Segmentation Adaptation
+Xinge Zhu1, Hui Zhou2, Ceyuan Yang1, Jianping Shi2, Dahua Lin1
+CUHK-SenseTime Joint Lab, CUHK
+SenseTime Research"
+8df3bef321cd1b259cf6fb1ef264a2e885610044,Interactively Learning Visually Grounded Word Meanings from a Human Tutor,"Proceedings of the 5th Workshop on Vision and Language, pages 48–53,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+8d156f3b4f1ad5d041ae9f50a0b879e25c80749e,A New Approach for Face Recognition and Age Classification using LDP,"International Journal of Scientific & Engineering Research, Volume 4, Issue 6, June 2013
+ISSN 2229-5518
+A New Approach for Face Recognition and Age
+Classification using LDP
+M Rama Bai
+Professor, Dept of CSE, M.G.I.T, JNTUH, Hyderabad, Andhra Pradesh, INDIA"
+153f5ad54dd101f7f9c2ae17e96c69fe84aa9de4,Overview of algorithms for face detection and tracking,"Overview of algorithms for face detection and
+tracking
+Nenad Markuˇs"
+15b44a1c3602385b6cf3eeb049cb2d6c12bb7d74,Automatic semantic annotation of images based on Web data,"010 Sixth International
+Conference
+on Information
+Assurance
+nd Security
+Automatic
+semantic annotation
+of images based on Web data
+Guiguang Ding
+School of Software
+University
+of Tsinghua
+Beijing,
+China
+edu.cn
+School of Software
+University
+Beijing,
+China
+of Tsinghua"
+155ce5d596c7b525110ca24db11e47d521b487ce,STC: A Simple to Complex Framework for Weakly-Supervised Semantic Segmentation,"STC: A Simple to Complex Framework for
+Weakly-supervised Semantic Segmentation
+Yunchao Wei, Xiaodan Liang, Yunpeng Chen, Xiaohui Shen, Ming-Ming Cheng, Jiashi Feng, Yao Zhao,
+Senior Member, IEEE and Shuicheng Yan Senior Member, IEEE"
+15c8443f8d9f1f6537fa8ff470ac407bf2185b0e,Learning Binary Code Representations for Effective and Efficient Image Retrieval,
+1550c3835822843a02b2144cef8abc534441f5d4,Human Pose Classification within the Context of Near-IR Imagery Tracking,"Human Pose Classification within the Context of Near-IR
+Imagery Tracking
+Jiwan Han, Anna Gaszczak, Ryszard Maciol, Stuart E. Barnes, Toby P. Breckon
+School of Engineering, Cranfield University, Bedfordshire, UK"
+15696370ff33b6e5a81bf5131d80065d6e59804f,Semantically guided location recognition for outdoors scenes,"Semantically Guided Location Recognition for Outdoors Scenes
+Arsalan Mousavian and Jana Koˇseck´a and Jyh-Ming Lien"
+15cf11ddfc046b2ed2766c375e8ad067baaf8347,Active Pedestrian Safety by Automatic Braking and Evasive Steering,"Active Pedestrian Safety
+y Automatic Braking and Evasive Steering
+C. Keller, T. Dang, H. Fritz, A. Joos, C. Rabe and D. M. Gavrila"
+15cd05baa849ab058b99a966c54d2f0bf82e7885,Structured Sparse Subspace Clustering: A unified optimization framework,"Structured Sparse Subspace Clustering: A Unified Optimization Framework
+Chun-Guang Li1, René Vidal2
+SICE, Beijing University of Posts and Telecommunications. 2Center for Imaging Science, Johns Hopkins University.
+In many real-world applications, we need to deal with high-dimensional
+datasets, such as images, videos, text, and more. In practice, such high-
+dimensional datasets can be well approximated by multiple low-dimensional
+subspaces corresponding to multiple classes or categories. For example, the
+feature point trajectories associated with a rigidly moving object in a video
+lie in an affine subspace (of dimension up to 4), and face images of a subject
+under varying illumination lie in a linear subspace (of dimension up to 9).
+Therefore, the task, known in the literature as subspace clustering [6], is
+to segment the data into the corresponding subspaces and finds multiple
+pplications in computer vision.
+State of the art approaches [1, 2, 3, 4, 5, 7] for solving this problem fol-
+low a two-stage approach: a) Construct an affinity matrix between points by
+exploiting the ‘self-expressiveness’ property of the data, which allows any
+data point to be represented as a linear (or affine) combination of the other
+data points; b) Apply spectral clustering on the affinity matrix to recover
+the data segmentation. Dividing the problem in two steps is, on the one
+hand, appealing because the first step can be solved using convex optimiza-"
+15136c2f94fd29fc1cb6bedc8c1831b7002930a6,Deep Learning Architectures for Face Recognition in Video Surveillance,"Deep Learning Architectures for Face
+Recognition in Video Surveillance
+Saman Bashbaghi, Eric Granger, Robert Sabourin and Mostafa Parchami"
+15623fe8875a36cac5283ff2f08cd50998599725,Semantic Instance Segmentation for Autonomous Driving,"Semantic Instance Segmentation for Autonomous Driving
+Bert De Brabandere
+Davy Neven
+ESAT-PSI, KU Leuven
+Luc Van Gool"
+159b52158512481df7684c341401efbdbc5d8f02,Object Detection with Active Sample Harvesting,"Object Detection
+with Active Sample Harvesting
+Thèse no 7312
+présentée le 5 Octobre 2016
+à la Faculté des Sciences et Techniques de l'Ingénieur
+Laboratoire LIDIAP (Idiap Research Institute)
+École Polytechnique Fédérale de Lausanne
+pour l'obtention du grade de Docteur ès Sciences
+Olivier Canévet
+devant le jury composé de :
+Prof. Pascal Frossard, président du jury
+Prof. Gilles Blanchard, rapporteur
+Prof. Raphael Sznitman, rapporteur
+Dr Mathieu Salzmann, rapporteur
+Dr François Fleuret, directeur de thèse
+Lausanne, EPFL, 2016"
+15e024d8f5625ec03c8ac592fbc093687cfb5f02,The Visual Object Tracking VOT2013 Challenge Results,"The Visual Object Tracking VOT2013 challenge results
+Matej Kristan a
+Luka ˇCehovin a
+Roman Pflugfelder b
+Georg Nebehay b
+Aleˇs Leonardis c
+Gustavo Fernandez b
+Jiri Matas d
+Tom´aˇs Voj´ıˇr d
+Fatih Porikli e
+Adam Gatt f
+Ahmad Khajenezhad g
+Alfredo Petrosino i
+Chee Seng Chan m
+Dorothy Monekosso n
+Jin Gao q
+Ahmed Salahledin h
+Anthony Milton j
+CherKeng Heng l
+Jingjing Xiao c"
+15605634feb1a5770182a8f2c3515daf102ed463,Real-time human pose recognition in parts from single depth images,"Real-Time Human Pose Recognition in Parts from Single Depth Images
+Mark Finocchio
+Jamie Shotton
+Andrew Fitzgibbon
+Toby Sharp
+Andrew Blake
+Richard Moore
+Mat Cook
+Alex Kipman
+Microsoft Research Cambridge & Xbox Incubation"
+15f57134b42638cbd57d0d8c4437e8b6b6a8bac4,Learning Visual Reasoning Without Strong Priors,"Learning Visual Reasoning Without Strong Priors
+Ethan Perez12, Harm de Vries1, Florian Strub3,
+Vincent Dumoulin1, Aaron Courville14
+MILA, Universit´e of Montr´eal, Canada; 2Rice University, U.S.A.
+Univ. Lille, CNRS, Centrale Lille, Inria, UMR 9189 CRIStAL France
+CIFAR Fellow, Canada"
+153e5cddb79ac31154737b3e025b4fb639b3c9e7,Active Dictionary Learning in Sparse Representation Based Classification,"PREPRINT SUBMITTED TO IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS
+Active Dictionary Learning in Sparse
+Representation Based Classification
+Jin Xu, Haibo He, Senior Member, IEEE, and Hong Man, Senior Member, IEEE"
+15e6c983e74dcf70d8a557b75bdc172e36692191,VSO: Visual Semantic Odometry,"VSO: Visual Semantic Odometry
+Konstantinos-Nektarios Lianos 1,⋆,
+Johannes L. Sch¨onberger 2,
+Marc Pollefeys 2,3, Torsten Sattler 2
+Geomagical Labs, Inc., USA 3 Microsoft, Switzerland
+Department of Computer Science, ETH Z¨urich, Switzerland"
+15df73918e084a146cd215b839a3eec1cc813a78,Projection Peak Analysis for Rapid Eye Localization,"PROJECTION PEAK ANALYSIS FOR RAPID EYE LOCALIZATION
+Research Center of Intelligent Robotics, Shanghai Jiaotong University, Shanghai, 200240, China
+Jingwen Dai, Dan Liu and Jianbo Su
+Keywords:
+Eye localization, Threshold, Segmentation, Projection peak."
+1542b8a1805d73a755d4b2eb402c5c861e6acd02,PMCTrack: Delivering Performance Monitoring Counter Support to the OS Scheduler,"PMCTrack: Delivering performance
+monitoring counter support to the OS
+scheduler
+J. C. Saez1, A. Pousa2, R. Rodr´ıguez-Rodr´ıguez1, F. Castro1,
+M. Prieto-Matias1
+ArTeCS Group, Facultad de Inform´atica, Complutense University of Madrid
+III-LIDI, Facultad de Inform´atica, National University of La Plata
+Email:
+Hardware performance monitoring counters (PMCs) have proven effective in
+haracterizing application performance. Because PMCs can only be accessed
+directly at the OS privilege level, kernel-level tools must be developed to enable
+the end user and userspace programs to access PMCs. A large body of work
+has demonstrated that the OS can perform effective runtime optimizations in
+multicore systems by leveraging performance-counter data. Special attention has
+een paid to optimizations in the OS scheduler. While existing performance
+monitoring tools greatly simplify the collection of PMC application data from
+userspace, they do not provide an architecture-agnostic kernel-level mechanism
+that is capable of exposing high-level PMC metrics to OS components, such as
+the scheduler. As a result, the implementation of PMC-based OS scheduling
+schemes is typically tied to specific processor models."
+1548cea1fa9be7a23d4d1e38086336913d501e44,Semantic 3D Reconstruction of Heads Supplementary Material,"Semantic 3D Reconstruction of Heads
+Supplementary Material
+Fabio Maninchedda1, Christian H¨ane2,(cid:63), Bastien Jacquet3,(cid:63),
+Ama¨el Delaunoy(cid:63), Marc Pollefeys1,4
+ETH Zurich
+UC Berkeley
+Kitware SAS
+Microsoft
+Fig. 1: From left to right: Input image; Input labels and depth; Depth map fusion
+(TV-Flux fusion from [9]); Statistical model of [7] fitted into our raw input data;
+Our semantic reconstruction; Our result skin class only; Our model textured.
+(cid:63) Work done while authors were at the Department of Computer Science, ETH Z¨urich"
+15d1326f054f4fadea463f217ce54bad6908705a,Sensor fusion in smart camera networks for Ambient Intelligence - Report on PhD Thesis and Defense,"Sensor fusion in smart camera networks for ambient
+intelligence
+Maatta, T.T.
+0.6100/IR755363
+Published: 01/01/2013
+Document Version
+Publisher’s PDF, also known as Version of Record (includes final page, issue and volume numbers)
+Please check the document version of this publication:
+• A submitted manuscript is the author's version of the article upon submission and before peer-review. There can be important differences
+etween the submitted version and the official published version of record. People interested in the research are advised to contact the
+uthor for the final version of the publication, or visit the DOI to the publisher's website.
+• The final author version and the galley proof are versions of the publication after peer review.
+• The final published version features the final layout of the paper including the volume, issue and page numbers.
+Link to publication
+Citation for published version (APA):
+Maatta, T. T. (2013). Sensor fusion in smart camera networks for ambient intelligence Eindhoven: Technische
+Universiteit Eindhoven DOI: 10.6100/IR755363
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights."
+159b87e6e68b18f4daa3505bfc415be9b21a7db6,Tracking The Invisible Man - Hidden-object Detection for Complex Visual Scene Understanding,
+15ec1faddbd61a9d50925c7b9b0c76642abe94e7,Efficient Techniques for Recovering 2d Human Body Poses from Images Dissertation Efficient Techniques for Recovering 2d Human Body Poses from Images Second Reader,"EFFICIENT TECHNIQUES FOR RECOVERING 2D
+HUMAN BODY POSES FROM IMAGES
+TAI-PENG TIAN
+Dissertation submitted in partial fulfillment
+of the requirements for the degree of
+Doctor of Philosophy
+BOSTON
+UNIVERSITY"
+150326137da214210b46e0b7f22e30f7e6529006,Pedestrian Detection at Warp Speed: Exceeding 500 Detections per Second,"Pedestrian Detection at Warp Speed: Exceeding 500 Detections per Second
+Floris De Smedt∗, Kristof Van Beeck∗, Tinne Tuytelaars and Toon Goedem´e
+EAVISE, ESAT-PSI-VISICS, KU Leuven, Belgium"
+15ebec3796a2e23d31c8c8ddf6d21555be6eadc6,Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks,"Recent Advances in Object Detection in the Age
+of Deep Convolutional Neural Networks
+Shivang Agarwal(∗
+,1), Jean Ogier du Terrail(∗
+,1,2), Fr´ed´eric Jurie(1)
+(∗) equal contribution
+(1)Normandie Univ, UNICAEN, ENSICAEN, CNRS
+(2)Safran Electronics and Defense
+September 11, 2018"
+156b194d0cee545337524bd993ae640ed227b79e,Radon Transform and Symbolic Linear Discriminant Analysis Based 3 D Face Recognition Using Knn and Svm,"ISSN 2320 - 2602
+Volume 2, No.12, December 2013
+P. S. Hiremath et al., International Journal of Advances in Computer Science and Technology, 2(12), December 2013, 267-274
+International Journal of Advances in Computer Science and Technology
+Available Online at http://warse.org/pdfs/2013/ijacst022122013.pdf
+RADON TRANSFORM AND SYMBOLIC LINEAR DISCRIMINANT
+ANALYSIS BASED 3D FACE RECOGNITION USING KNN AND SVM
+P. S. Hiremath, Manjunatha Hiremath1
+Department of Computer Science,
+Gulbarga University, Gulbarga, Karnataka, India"
+1565bf91f8fdfe5f5168a5050b1418debc662151,One-pass Person Re-identification by Sketch Online Discriminant Analysis,"One-pass Person Re-identification by
+Sketch Online Discriminant Analysis
+Wei-Hong Li, Zhuowei Zhong, and Wei-Shi Zheng∗"
+1546b65e5e95543cf2dc0ead92b758fb31a5f4d6,An inexpensive monocular vision system for tracking humans in industrial environments,"An Inexpensive Monocular Vision System for
+Tracking Humans in Industrial Environments
+Centre for Applied Autonomous Sensor Systems (AASS), ¨Orebro University, Sweden
+Rafael Mosberger and Henrik Andreasson"
+157eb982da8fe1da4c9e07b4d89f2e806ae4ceb6,Connecting the dots in multi-class classification: From nearest subspace to collaborative representation,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Connecting the Dots in Multi-Class Classification: From
+Nearest Subspace to Collaborative Representation
+Chi, Y.; Porikli, F.
+TR2012-043
+June 2012"
+15f51d51c05c22e1dca3a40fb1af46941d91f598,Modeling Visual Compatibility through Hierarchical Mid-level Elements,"Modeling Visual Compatibility through
+Hierarchical Mid-level Elements
+Jose Oramas M., Tinne Tuytelaars
+KU Leuven, ESAT-PSI, iMinds"
+15e0b9ba3389a7394c6a1d267b6e06f8758ab82b,The OU-ISIR Gait Database comprising the Large Population Dataset with Age and performance evaluation of age estimation,"Xu et al. IPSJ Transactions on Computer Vision and
+Applications (2017) 9:24
+DOI 10.1186/s41074-017-0035-2
+IPSJ Transactions on Computer
+Vision and Applications
+TECHNICAL NOTE
+Open Access
+The OU-ISIR Gait Database comprising the
+Large Population Dataset with Age and
+performance evaluation of age estimation
+Chi Xu1,2, Yasushi Makihara2*, Gakuto Ogi2, Xiang Li1,2, Yasushi Yagi2 and Jianfeng Lu1"
+155033f2f096934042d659d10912ef29aa1cdbd1,Visual classification of coarse vehicle orientation using Histogram of Oriented Gradients features,"Visual Classification of Coarse Vehicle Orientation
+using Histogram of Oriented Gradients Features
+Paul E. Rybski and Daniel Huber and Daniel D. Morris and Regis Hoffman"
+158a8037ce1c577620550da385d2275a31b9ccaa,Combining motion detection and hierarchical particle filter tracking in a multi-player sports environment,"Combining motion detection and hierarchical particle filter tracking
+in a multi-player sports environment
+Robbie Vos, Willie Brink
+Department of Mathematical Sciences
+University of Stellenbosch, South Africa"
+157d2c6dd8c9999b251099ef4211cff8030ae486,Invariance properties of Gabor filter-based features-overview and applications,"Invariance Properties of Gabor Filter Based
+Features – Overview and Applications
+Joni-Kristian Kamarainen∗, Ville Kyrki, Member, IEEE, Heikki K¨alvi¨ainen, Member, IEEE"
+15aa6c457678e25f6bc0e818e5fc39e42dd8e533,Conditional Image Generation for Learning the Structure of Visual Objects,
+15c7fe9c9154113f9824f68ca1870564600b66d6,"EICHNER, FERRARI: BETTER APPEARANCE MODELS FOR PICTORIAL STRUCTURES 1 Better appearance models for pictorial structures","EICHNER, FERRARI: BETTER APPEARANCE MODELS FOR PICTORIAL STRUCTURES
+Better appearance models
+for pictorial structures
+Marcin Eichner
+Vittorio Ferrari
+Computer Vision Laboratory
+Zürich, Switzerland"
+15cf1f17aeba62cd834116b770f173b0aa614bf4,Facial Expression Recognition using Neural Network with Regularized Backpropagation Algorithm,"International Journal of Computer Applications (0975 – 8887)
+Volume 77 – No.5, September 2013
+Facial Expression Recognition using Neural Network with
+Regularized Back-propagation Algorithm
+Ashish Kumar Dogra
+Research Scholar
+Department of ECE,
+Lovely Professional University,
+Phagwara, India
+Nikesh Bajaj
+Assistant Professor
+Department of ECE,
+Lovely Professional University,
+Phagwara, India
+Harish Kumar Dogra
+Research Scholar
+Department of ECE,
+Gyan Ganga Institute of
+Technology & Sciences,
+Jabalpur, India"
+15f3d47b48a7bcbe877f596cb2cfa76e798c6452,Automatic face analysis tools for interactive digital games,"Automatic face analysis tools for interactive digital games
+Anonymised for blind review
+Anonymous
+Anonymous
+Anonymous"
+15728d6fd5c9fc20b40364b733228caf63558c31,Expanding the Breadth and Detail of Object Recognition By,(cid:13) 2013 Ian N. Endres
+15667845de2531b59736d866531728a771500d34,3-D Face Recognition Using Local Appearance-Based Models,"[4] L. Lee and W. E. L. Grimson, “Gait analysis for recognition and classi-
+fication,” in Proc. IEEE Int. Conf. Automatic Face and Gesture Recog-
+nition, Washington, DC, May 2002, pp. 734–742.
+[5] L. Wang, H. Ning, W. Hu, and T. Tan, “Gait recognition based on pro-
+rustes shape analysis,” in Proc. Int. Conf. Image Processing, 2002, pp.
+33–436.
+[6] L. Wang, H. Ning, T. Tan, and W. Hu, “Fusion of static and dynamic
+ody biometrics for gait recognition,” IEEE Trans. Circuits Syst. Video
+Technol., vol. 14, no. 2, pp. 149–158, Feb. 2004.
+[7] D. Cunado, M. S. Nixon, and J. N. Carter, “Automatic extraction and
+description of human gait models for recognition purposes,” in Comput.
+Vis. Image Understand., Apr. 2003, vol. 90, pp. 1–41.
+[8] P. J. Phillips, S. Sarkar, I. R. Vega, P. Grother, and K. W. Bowyer,
+“The gait identification challenge problem: Data sets and baseline al-
+gorithm,” in Proc. Int. Conf. Pattern Recognition, Quebec City, QC,
+Canada, Aug. 2002, vol. 1, pp. 385–388.
+[9] S. Sarkar, P. J. Phillips, Z. Liu, I. R. Vega, P. Grother, and K. W.
+Bowyer, “The human ID gait challenge problem: Data sets, perfor-
+mance, and analysis,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 27,
+no. 2, pp. 162–177, Feb. 2005."
+15e6e1551ce9a4094c57db70985e420e57c6997a,Asymmetric cross-view dictionary learning for person re-identification,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+155448563c354b01d12610b5864b511644cfeb27,Mapping Images to Sentiment Adjective Noun Pairs with Factorized Neural Nets,"Mapping Images to Sentiment Adjective Noun Pairs with Factorized Neural Nets
+Takuya Narihira
+Sony / ICSI
+Damian Borth
+DFKI / ICSI
+Stella X. Yu
+UC Berkeley / ICSI
+Karl Ni
+In-Q-Tel
+Trevor Darrell
+UC Berkeley / ICSI"
+15292f380f5996f539f4d5e93dba3082d53338fb,Feature Space Optimization for Semantic Video Segmentation,"Feature Space Optimization for Semantic Video Segmentation
+Abhijit Kundu∗
+Georgia Tech
+Vibhav Vineet∗
+Vladlen Koltun
+Intel Labs
+Intel Labs
+Figure 1. Semantic video segmentation on the Cityscapes dataset [6]. Input frame on the left, semantic segmentation computed by our
+pproach on the right."
+157ee7498320119f6f5da2d9c592448986edea7e,Learning Multiple Non-linear Sub-spaces Using K-RBMs,"Learning Multiple Non-Linear Sub-Spaces using K-RBMs
+Siddhartha Chandra1, Shailesh Kumar2 & C. V. Jawahar3
+CVIT, IIIT Hyderabad, 2Google, Hyderabad"
+153c8715f491272b06dc93add038fae62846f498,On Clustering Images of Objects,"(cid:13) Copyright by Jongwoo Lim, 2005"
+12dfc8d4062b83a0b824b1676533482f14e4978c,Cutting Edge: Soft Correspondences in Multimodal Scene Parsing,"Cutting Edge: Soft Correspondences in Multimodal Scene Parsing
+Sarah Taghavi Namin1,2 Mohammad Najafi1,2 Mathieu Salzmann2,3
+Australian National University (ANU)
+Lars Petersson1,2
+CVLab, EPFL, Switzerland
+NICTA∗
+{sarah.namin, mohammad.najafi,"
+12919f98aecdd74c1e0db56cba13d107553e421b,Temporal Model Adaptation for Person Re-Identification: Supplementary Material,"Temporal Model Adaptation for
+Person Re-Identification:
+Supplementary Material
+Niki Martinel1,3, Abir Das2,
+Christian Micheloni1, and Amit K. Roy-Chowdhury3
+University of Udine, 33100 Udine, Italy
+University of Massatchussets Lowell, 01852 Lowell, MA, USA
+University of California Riverside, 92507 Riverside, CA, USA"
+123bc74a006a75fefcdd9995cbdc1c6c64c8bed6,Socially Constrained Structural Learning for Groups Detection in Crowd,"Socially Constrained Structural Learning for
+Groups Detection in Crowd
+Francesco Solera, Simone Calderara, Member, IEEE, and Rita Cucchiara, Fellow, IEEE"
+124476c2815bbfb523c77943c74356f94f79b580,Recognition of Faces in Unconstrained Environments: A Comparative Study,"Hindawi Publishing Corporation
+EURASIP Journal on Advances in Signal Processing
+Volume 2009, Article ID 184617, 19 pages
+doi:10.1155/2009/184617
+Research Article
+Recognition of Faces in Unconstrained Environments:
+A Comparative Study
+Javier Ruiz-del-Solar, Rodrigo Verschae, and Mauricio Correa
+Department of Electrical Engineering, Universidad de Chile, Avenida Tupper 2007, 837-0451 Santiago, Chile
+Correspondence should be addressed to Javier Ruiz-del-Solar,
+Received 10 October 2008; Revised 31 January 2009; Accepted 13 March 2009
+Recommended by Kevin Bowyer
+The aim of this work is to carry out a comparative study of face recognition methods that are suitable to work in unconstrained
+environments. The analyzed methods are selected by considering their performance in former comparative studies, in addition to
+e real-time, to require just one image per person, and to be fully online. In the study two local-matching methods, histograms
+of LBP features and Gabor Jet descriptors, one holistic method, generalized PCA, and two image-matching methods, SIFT-
+ased and ERCF-based, are analyzed. The methods are compared using the FERET, LFW, UCHFaceHRI, and FRGC databases,
+which allows evaluating them in real-world conditions that include variations in scale, pose, lighting, focus, resolution, facial
+expression, accessories, makeup, occlusions, background and photographic quality. Main conclusions of this study are: there is
+large dependence of the methods on the amount of face and background information that is included in the face’s images,"
+12c7ecbfd714c160d2a6bb9cf03fa8b88e8da62b,Impaired Recognition of Basic Emotions from Facial Expressions in Young People with Autism Spectrum Disorder: Assessing the Importance of Expression Intensity.,"Griffiths, S. L., Jarrold, C., Penton-Voak, I., Woods, A., Skinner, A., &
+Munafo, M. (2017). Impaired Recognition of Basic Emotions from Facial
+Expressions in Young People with Autism Spectrum Disorder: Assessing the
+Importance of Expression Intensity. Journal of Autism and Developmental
+Disorders. DOI: 10.1007/s10803-017-3091-7
+Publisher's PDF, also known as Version of record
+License (if available):
+CC BY
+Link to published version (if available):
+0.1007/s10803-017-3091-7
+Link to publication record in Explore Bristol Research
+PDF-document
+This is the final published version of the article (version of record). It first appeared online via Springer at
+http://link.springer.com/article/10.1007%2Fs10803-017-3091-7. Please refer to any applicable terms of use of
+the publisher.
+University of Bristol - Explore Bristol Research
+General rights
+This document is made available in accordance with publisher policies. Please cite only the published
+version using the reference above. Full terms of use are available:
+http://www.bristol.ac.uk/pure/about/ebr-terms.html"
+12d62f1360587fdecee728e6c509acc378f38dc9,Feature Affinity based Pseudo Labeling for Semi-supervised Person Re-identification,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Feature Affinity based Pseudo Labeling for
+Semi-supervised Person Re-identification
+Guodong Ding, Shanshan Zhang, Salman Khan, Zhenmin Tang, Jian Zhang, Senior Member, IEEE and Fatih
+Porikli, Fellow, IEEE"
+122ee00cc25c0137cab2c510494cee98bd504e9f,The Application of Active Appearance Models to Comprehensive Face Analysis Technical Report,"The Application of
+Active Appearance Models to
+Comprehensive Face Analysis
+Technical Report
+Simon Kriegel
+TU M¨unchen
+April 5, 2007"
+12ebb51d50f704b5d0a8d821e90dd336175ec8aa,TUHOI: Trento Universal Human Object Interaction Dataset,"Proceedings of the 25th International Conference on Computational Linguistics, pages 17–24,
+Dublin, Ireland, August 23-29 2014."
+127759fc41d62b516298fff2706dfcc754ff1ee8,Fabrik: An Online Collaborative Neural Network Editor,"FABRIK: AN ONLINE COLLABORATIVE NEURAL NETWORK EDITOR
+Utsav Garg 1 Viraj Prabhu 2 Deshraj Yadav 2 Ram Ramrakhya 3 Harsh Agrawal 2 Dhruv Batra 2 4
+fabrik.cloudcv.org"
+12417ed7ae81fb4e6c07f501ace9ea463349481b,Pairwise Augmented GANs with Adversarial Reconstruction Loss,"PAIRWISE AUGMENTED GANS WITH
+ADVERSARIAL RECONSTRUCTION LOSS
+Aibek Alanov1,2,3∗, Max Kochurov1,2∗, Daniil Yashkov5, Dmitry Vetrov1,3,4
+Samsung AI Center in Moscow
+Skolkovo Institute of Science and Technology
+National Research University Higher School of Economics
+Joint Samsung-HSE lab
+5Federal Research Center ""Informatics and Management"" of the Russian Academy of Sciences"
+129a6daa54a7334930b6413875b6154acef3922a,Data-Driven Synthesis of Cartoon Faces Using Different Styles,"Data-Driven Synthesis of Cartoon
+Faces Using Different Styles
+Yong Zhang, Weiming Dong, Member, IEEE, Chongyang Ma, Xing Mei, Member, IEEE, Ke Li,
+Feiyue Huang, Bao-Gang Hu, Senior Member, IEEE, and Oliver Deussen"
+124fddbb5cbe4e5e6ea69be1467437aad01eb5d9,A Unified Algorithmic Framework for Multi-Dimensional Scaling,"A Unified Algorithmic Framework for Multi-Dimensional Scaling
+Arvind Agarwal
+Jeff M. Phillips†
+Suresh Venkatasubramanian‡"
+12d0c11d546d91e776a170898ebf3a38c010695c,Semi-Supervised Hashing for Large-Scale Search,"Semi-Supervised Hashing for Large Scale
+Search
+Jun Wang, Member, IEEE, Sanjiv Kumar, Member, IEEE, and Shih-Fu Chang, Fellow, IEEE"
+12727bb8a4a1462553a13a253a97c2569cbcba0a,Study on Different Representation Methods for Subspace Segmentation,"International Journal of Grid Distribution Computing
+Vol.8, No.1 (2015), pp.259-268
+http://dx.doi.org/10.14257/ijgdc.2015.8.1.24
+Study on Different Representation Methods for Subspace
+Segmentation
+Jiangshu Wei, Mantao Wang and Qianqian Wu
+College of Information and Engineering, Sichuan Agricultural University, Ya’an,
+625014, China"
+12149fc431d2b3ec4d1f194e92e74c765e51ee67,Concentration in unbounded metric spaces and algorithmic stability,"Concentration in unbounded metric spaces and algorithmic stability
+Aryeh Kontorovich
+Department of Computer Science, Ben-Gurion University, Beer Sheva 84105, ISRAEL"
+120b22e7a47923e42a123b9b68a93ccac5aaea6d,Paper on Ear Biometric Authentication,"Research Article Volume 6 Issue No.10
+ISSN XXXX XXXX © 2016 IJESC
+Review Paper on Ear Biometric Authentication
+Shubham Mohurle 1, See ma Khutwad 2, Pratiksha Kunjir3, Anjali Bhosle4
+Assistant Professor4
+KJCOEM R, Pune, India
+Abstrac t:
+In this paper we have studied about ear bio metric authentication. Powe rful bio metrics likes fingerprint, face and iris are used while
+omparing the new biometric technology that is human ear recognition. We are studied different methods like 2D ear reco gnition,
+Pattern extract ion method, robust algorithm, Pixe l based feature extraction. Genetic algorith m is the solution to all proble ms faced by
+these methods. Recognition Rate for t ime series modeling is 99% obtained.AR model is used for time series modeling. All methods
+re discussed later.
+Ke ywor ds: Ear, Recognition Rate, 2D image, AR model
+During crime investigation, in the absence of (valid) fingerprints
+nd footprints ear ma rks are used for identification. Just like
+fingerprints, use of ear shapes recommends its use for human
+identification. An ear recognition system is simila r to face
+recognition system and which has five components: image
+cquisition, preprocessing, feature extraction, model training and
+template matching. Du ring image gaining, an image of the ear is"
+12cb3bf6abf63d190f849880b1703ccc183692fe,Guess Who?: A game to crowdsource the labeling of affective facial expressions is comparable to expert ratings,"Guess Who?: A game to crowdsource the labeling of affective facial
+expressions is comparable to expert ratings.
+Barry Borsboom
+Graduation research project, june 2012
+Supervised by: Dr. Joost Broekens
+Leiden University Media Technology Department,"
+1222705b626a33974e85985ddabfcea135e9ddce,k-fold Subsampling based Sequential Backward Feature Elimination,
+127c229a3306bfc8170b84b12316f4a8024cc7ab,"A derived transformation of emotional functions using self-reports, implicit association tests, and frontal alpha asymmetries.","Learn Behav
+DOI 10.3758/s13420-015-0198-6
+A derived transformation of emotional functions
+using self-reports, implicit association tests, and frontal
+lpha asymmetries
+Micah Amd 1 & Bryan Roche 1
+# Psychonomic Society, Inc. 2015"
+12cd96a419b1bd14cc40942b94d9c4dffe5094d2,Leveraging Captions in the Wild to Improve Object Detection,"Proceedings of the 5th Workshop on Vision and Language, pages 29–38,
+Berlin, Germany, August 12 2016. c(cid:13)2016 Association for Computational Linguistics"
+12c548d99fdc59bd702910af2c3daa17ed43e5d7,Performance analysis of different matrix decomposition methods on face recognition,"016 International Conference on Computer Communication and Informatics (ICCCI -2016), Jan. 07 – 09, 2016, Coimbatore, INDIA
+Performance analysis of different Matrix
+decomposition methods on Face Recognition
+Dept. of Electronics and Communication Engineering
+Dept. of Electronics and Communication Engineering
+Suresh Babu K and K B Raja
+UVCE, Bengaluru, India
+the recognition accuracy
+image and known stored images in terms of dimension
+reduced images is made to declare identity of a person. It is
+proved
+improved by
+onverting the images with variation in expression to neutral
+images [5] and using image fusion with light field camera for
+image capturing [6]. Maintaining robustness in recognition
+ccuracy is elusive for key factors such as pose [7], back view
+illumination variation [9] and others. Developing
+illumination invariant image representation with textures is a
+difficult task and pre-processing methods for mitigating the
+illumination effect are discussed in future sections of this"
+1275852f2e78ed9afd189e8b845fdb5393413614,A Transfer Learning based Feature-Weak-Relevant Method for Image Clustering,"A Transfer Learning based Feature-Weak-Relevant Method for
+Image Clustering
+Bo Dong, Xinnian Wang
+Dalian Maritime University
+Dalian, China"
+126b98473cc25e604abd58eb6bcf720354ac7e7a,An experimental illustration of 3D facial shape analysis under facial expressions,"Author manuscript, published in ""Annals of Telecommunications 64, 5-6 (2009) 369-379"""
+12055b8f82d5411f9ad196b60698d76fbd07ac1e,Multiview Facial Landmark Localization in RGB-D Images via Hierarchical Regression With Binary Patterns,"Multiview Facial Landmark Localization in RGB-D
+Images via Hierarchical Regression
+With Binary Patterns
+Zhanpeng Zhang, Student Member, IEEE, Wei Zhang, Member, IEEE, Jianzhuang Liu, Senior Member, IEEE,
+nd Xiaoou Tang, Fellow, IEEE"
+12d813f14166578dea8aa6aacc945102dddfd05d,Fog Computing in 5G Networks: An Application Perspective,"“fog˙5g˙full”
+016/5/4
+page 1
+Chapter 1
+Fog Computing in 5G Networks: An Application
+Perspective
+Harshit Gupta1, Sandip Chakraborty1, Soumya K. Ghosh1,
+nd Rajkumar Buyya2
+Department of Computer Science and Engineering, IIT
+CLOUDS Laboratory, University of Melbourne, Australia
+Kharagpur, India"
+12e80b3a89bc021a6352840fb4552df842a6fe7d,Fast sparse representation with prototypes,"Fast Sparse Representation with Prototypes
+Jia-Bin Huang and Ming-Hsuan Yang
+University of California at Merced"
+1272d526614e40ce859e73de7e39a54baffd28cc,A unified approach to learning task-specific bit vector representations for fast nearest neighbor search,"A Unified Approach to Learning Task-Specific Bit Vector
+Representations for Fast Nearest Neighbor Search
+Vinod Nair
+Yahoo! Labs Bangalore
+Dhruv Mahajan
+Yahoo! Labs Bangalore
+S. Sundararajan
+Yahoo! Labs Bangalore"
+120785f9b4952734818245cc305148676563a99b,Diagnostic automatique de l'état dépressif(Classification of depressive moods),"Diagnostic automatique de l’état dépressif
+S. Cholet
+H. Paugam-Moisy
+Laboratoire de Mathématiques Informatique et Applications (LAMIA - EA 4540)
+Université des Antilles, Campus de Fouillole - Guadeloupe
+Résumé
+Les troubles psychosociaux sont un problème de santé pu-
+lique majeur, pouvant avoir des conséquences graves sur
+le court ou le long terme, tant sur le plan professionnel que
+personnel ou familial. Le diagnostic de ces troubles doit
+être établi par un professionnel. Toutefois, l’IA (l’Intelli-
+gence Artificielle) peut apporter une contribution en four-
+nissant au praticien une aide au diagnostic, et au patient
+un suivi permanent rapide et peu coûteux. Nous proposons
+une approche vers une méthode de diagnostic automatique
+de l’état dépressif à partir d’observations du visage en
+temps réel, au moyen d’une simple webcam. A partir de
+vidéos du challenge AVEC’2014, nous avons entraîné un
+lassifieur neuronal à extraire des prototypes de visages
+selon différentes valeurs du score de dépression de Beck"
+12c5cd899d5ed85741197baed191f3b8b7fac495,Altered intrinsic functional connectivity of anterior and posterior insula regions in high-functioning participants with autism spectrum disorder.,"Altered Intrinsic Functional Connectivity of
+Anterior and Posterior Insula Regions in
+High-Functioning Participants With
+Autism Spectrum Disorder
+Sjoerd J.H. Ebisch,1,2* Vittorio Gallese,3,4 Roel M. Willems,5
+Dante Mantini,1,2,6 Wouter B. Groen,7 Gian Luca Romani,1,2
+Jan K. Buitelaar,7 and Harold Bekkering8
+Department of Clinical Sciences and Bioimaging, G. d’Annunzio University Chieti-Pescara,
+Institute for Advanced Biomedical Technologies (ITAB), G. d’Annunzio Foundation, Chieti, Italy
+Department of Neuroscience, Section of Physiology, Parma University, Parma, Italy
+Chieti, Italy
+Italian Institute of Technology (IIT), Section of Parma, Italy
+5Donders Institute for Brain, Cognition and Behavior, Centre for Cognitive Neuroimaging,
+6Laboratory for Neuro-Psychophysiology, K.U. Leuven Medical School, Leuven, Belgium
+7Department of Psychiatry, Radboud University Medical Centre and Karakter University Centre for
+Radboud University, Nijmegen, The Netherlands
+Child and Adolescent Psychiatry, Nijmegen, The Netherlands
+8Donders Institute for Brain, Cognition and Behavior, Centre for Cognition,
+Radboud University, Nijmegen, The Netherlands"
+122c674f264c53d762af841669209e131b49b3f2,Non-Rigid Structure from Motion for Building 3D Face Model,"Faculty of Informatics
+Institute for Anthropomatics
+Chair Prof. Dr.-Ing. R. Stiefelhagen
+Facial Image Processing and Analysis Group
+Non-Rigid Structure from Motion
+for Building 3D Face Model
+DIPLOMA THESIS OF
+Chengchao Qu
+ADVISORS
+Dipl.-Inform. Hua Gao
+Dr.-Ing. Hazım Kemal Ekenel
+MARCH 2011
+KIT – University of the State of Baden-Württemberg and National Laboratory of the Helmholtz Association
+www.kit.edu"
+127316fbe268c78c519ceb23d41100e86639418a,CNN Features Off-the-Shelf: An Astounding Baseline for Recognition,"CNN Features off-the-shelf: an Astounding Baseline for Recognition
+Ali Sharif Razavian Hossein Azizpour
+Josephine Sullivan Stefan Carlsson
+CVAP, KTH (Royal Institute of Technology)
+Stockholm, Sweden"
+123a9768700433c405bd7266f4c57ca8222e7fe1,Expanded Parts Model for Human Attribute and Action Recognition in Still Images,"Expanded Parts Model for Human Attribute and Action
+Recognition in Still Images
+Gaurav Sharma1,2, Fr´ed´eric Jurie1, Cordelia Schmid2
+GREYC, CNRS UMR 6072, University of Caen Basse-Normandie
+LEAR, INRIA Grenoble Rhˆone-Alpes
+inria}.fr"
+12ebeb2176a5043ad57bc5f3218e48a96254e3e9,Traffic Road Sign Detection and Recognition for Automotive Vehicles,"International Journal of Computer Applications (0975 – 8887)
+Volume 120 – No.24, June 2015
+Traffic Road Sign Detection and Recognition for
+Automotive Vehicles
+Md. Safaet Hossain
+Zakir Hyder
+Department of Electrical Engineering and
+Department of Electrical Engineering and
+Computer Science North South University, Dhaka
+Computer Science North South University, Dhaka
+Bangladesh
+Bangladesh"
+12150d8b51a2158e574e006d4fbdd3f3d01edc93,Deep End2End Voxel2Voxel Prediction,"Deep End2End Voxel2Voxel Prediction
+Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo
+Torresani, Manohar Paluri
+Presented by: Ahmed Osman
+Ahmed Osman"
+12ba7c6f559a69fbfaacf61bfb2f8431505b09a0,DocFace+: ID Document to Selfie Matching,"DocFace+: ID Document to Selfie* Matching
+Yichun Shi, Student Member, IEEE, and Anil K. Jain, Life Fellow, IEEE"
+12d8730da5aab242795bdff17b30b6e0bac82998,Persistent Evidence of Local Image Properties in Generic ConvNets,"Persistent Evidence of Local Image Properties in Generic ConvNets
+Ali Sharif Razavian, Hossein Azizpour,
+Atsuto Maki, Josephine Sullivan, Carl Henrik Ek, and Stefan Carlsson
+CVAP, KTH (Royal Institute of Technology), Stockholm, SE-10044"
+12831caca9674e0ab3fe2fc02a447ddb5a372994,Deep Aesthetic Quality Assessment With Semantic Information,"Deep Aesthetic Quality Assessment with Semantic
+Information
+Yueying Kao, Ran He, Kaiqi Huang"
+8c13f2900264b5cf65591e65f11e3f4a35408b48,A Generic Face Representation Approach for Local Appearance Based Face Verification,"A GENERIC FACE REPRESENTATION APPROACH FOR
+LOCAL APPEARANCE BASED FACE VERIFICATION
+Hazim Kemal Ekenel, Rainer Stiefelhagen
+Interactive Systems Labs, Universität Karlsruhe (TH)
+76131 Karlsruhe, Germany
+{ekenel,
+web: http://isl.ira.uka.de/face_recognition/"
+8ca29760334b7bdeaa7ad7ae4ff54c3b24420dd2,Analysis of Dynamic Characteristics of Spontaneous Facial Expressions,"Analysis of Dynamic Characteristics of Spontaneous Facial Expressions
+Masashi Komori Yoshitaro Onishi
+Division of Information and Computer Sciences, Osaka Electro-Communication University,
+8-8 Hatsucho, Neyagawa, Osaka, 572-8530, JAPAN"
+8c5fa29c9bcab3d518fdf355e9da62fb0b58905e,Exploiting Semantics in Adversarial Training for Image-Level Domain Adaptation,"Exploiting Semantics in Adversarial Training for
+Image-Level Domain Adaptation
+st Pierluigi Zama Ramirez
+University of Bologna
+nd Alessio Tonioni
+University of Bologna
+rd Luigi Di Stefano
+University of Bologna"
+8c955f3827a27e92b6858497284a9559d2d0623a,Facial Expression Recognition under Noisy Environment Using Gabor Filters,"Buletinul Ştiinţific al Universităţii ""Politehnica"" din Timişoara
+Seria ELECTRONICĂ şi TELECOMUNICAŢII
+TRANSACTIONS on ELECTRONICS and COMMUNICATIONS
+Tom 53(67), Fascicola 1-2, 2008
+Facial Expression Recognition under Noisy Environment
+Using Gabor Filters
+Ioan Buciu1, I. Nafornita2, I. Pitas3"
+8cd61bb3469aa253d4411ef2295b50683a031d17,Random Occlusion-recovery for Person Re-identification,"Random Occlusion-recovery for Person Re-identification
+Institute of Machine Learning and Systems Biology, School of Electronics and Information Engineering, Tongji
+Di Wu1, Kun Zhang1 and De-Shuang Huang1
+University, Caoan Road 4800, Shanghai 201804, China"
+8c30b154811453b6a1017bb27e3becefde44f689,Bibliometric profile of the global scientific research on autism spectrum disorders,"Sweileh et al. SpringerPlus (2016) 5:1480
+DOI 10.1186/s40064-016-3165-6
+RESEARCH
+Bibliometric profile of the global
+scientific research on autism
+spectrum disorders
+Waleed M. Sweileh1*, Samah W. Al‑Jabi2, Ansam F. Sawalha1 and Sa’ed H. Zyoud2
+Open Access"
+8c7f4c11b0c9e8edf62a0f5e6cf0dd9d2da431fa,Dataset Augmentation for Pose and Lighting Invariant Face Recognition,"Dataset Augmentation for Pose and Lighting
+Invariant Face Recognition
+Daniel Crispell∗, Octavian Biris∗, Nate Crosswhite†, Jeffrey Byrne†, Joseph L. Mundy∗
+Vision Systems, Inc.
+Systems and Technology Research"
+8cf679ef0ea28557acb86546e4b1b1a617d1c698,Long Term Multi-Target Tracking based on Detection and Data Association,"International Journal of Electronics and Electrical Engineering Vol. 1, No. 3, September, 2013
+Long Term Multi-Target Tracking based on
+Detection and Data Association
+Ai Min Li
+Shandong Polytechnic University, Jinan, China
+Email:
+Pil Seong Park
+University of Suwon, Suwon, Korea
+Email:"
+8c0f38c7c07c631d0b5414a84dda2992bdc4514f,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+8cc23e554d98522b377d227dc78e9382a0ed35e5,"Bootstrap, Review, Decode: Using Out-of-Domain Textual Data to Improve Image Captioning","Bootstrap, Review, Decode: Using Out-of-Domain Textual Data
+to Improve Image Captioning
+Wenhu Chen
+RWTH Aachen
+Aurelien Lucchi
+ETH Zurich
+Thomas Hofmann
+ETH Zurich"
+8c5852530abaefcdce805d1e339677351c6ec7fe,Lernen situationsunabhängiger Personenerkennung,"{ HAUPTBEITRAG / SITUATIONSUNABHÄNGIGE PERSONENERKENNUNG
+Lernen situationsunabhängiger
+Personenerkennung
+Marco K. Müller · Michael Tremer
+Christian Bodenstein · Rolf P. Würtz
+Einleitung
+In den vergangenen 25 Jahren hat sich automati-
+sche Gesichtserkennung von einem akademischen
+Projekt zu einer reifen Technik entwickelt. Bei der
+Frage, ob es sich auf zwei Fotos um die gleiche Per-
+son handelt, sind kommerzielle Systeme inzwischen
+sogar Menschen überlegen [6]. Dies ist nicht mit der
+Erkennung von bekannten Personen zu verwech-
+seln, die der Mensch in sehr vielen verschiedenen
+Situationen auch nach vielen Jahren wiedererkennen
+kann.
+Es ist eine zentrale Aufgabe des Computersehens,
+ekannte Objekte in Bildern wiederzuerkennen. Dies
+ist schwierig, weil dasselbe Objekt in verschiede-
+nen Situationen sehr verschiedene Bilder erzeugt."
+8c244417db2082f4d5897548e72ef304ae886e52,Tree Based Space Partition of Trajectory Pattern Mining For Frequent Item Sets,"Australian Journal of Basic and Applied Sciences, 10(2) Special 2016, Pages: 250-261
+Australian Journal of Basic and Applied Sciences
+AUSTRALIAN JOURNAL OF BASIC AND
+AUSTRALIAN JOURNAL OF BASIC AND
+APPLIED SCIENCES
+ISSN:1991-8178 EISSN: 2309-8414
+Journal home page: www.ajbasweb.com
+Tree Based Space Partition of Trajectory Pattern Mining For Frequent
+Tree Based Space Partition of Trajectory Pattern Mining For Frequent
+Tree Based Space Partition of Trajectory Pattern Mining For Frequent
+Item Sets
+nd Engineering , Alagappa University, Tamil Nadu, India.
+P.Geetha and 2 E. Ramaraj
+Ph.D scholar, Alagappa University.
+Department of Computer Science and Engineering
+Address For Correspondence:
+P.Geetha, Ph.D scholar, Alagappa University.
+Ph.D scholar, Alagappa University.
+A R T I C L E I N F O
+Article history:"
+8ce9b7b52d05701d5ef4a573095db66ce60a7e1c,Structured Sparse Subspace Clustering: A Joint Affinity Learning and Subspace Clustering Framework,"Structured Sparse Subspace Clustering: A Joint
+Affinity Learning and Subspace Clustering
+Framework
+Chun-Guang Li, Chong You, and Ren´e Vidal"
+8cb6daba2cb1e208e809633133adfee0183b8dd2,Know Before You Do: Anticipating Maneuvers via Learning Temporal Driving Models,"Know Before You Do: Anticipating Maneuvers
+via Learning Temporal Driving Models
+Ashesh Jain, Hema S Koppula, Bharad Raghavan, Shane Soh, Ashutosh Saxena
+Cornell University and Stanford University"
+8cb4349f7d4b04a2e98b727524d3699bad50de1c,SOCIAL GAME EPITOME VERSUS AUTOMATIC VISUAL ANALYSIS Paper ID ***,"SOCIAL GAME EPITOME VERSUS AUTOMATIC VISUAL ANALYSIS
+Paper ID ***"
+8c6427cc1f4e1bbe5d6da34a4511842361f4fbb6,Hypothesis Only Baselines in Natural Language Inference,"Hypothesis Only Baselines in Natural Language Inference
+Adam Poliak1 Jason Naradowsky1 Aparajita Haldar1,2
+Rachel Rudinger1 Benjamin Van Durme1
+Johns Hopkins University 2BITS Pilani, Goa Campus, India"
+8c3c699f568ee825eefc4dc44b71c8b0bc592cca,Binary Multi-View Clustering.,"Binary Multi-View Clustering
+Zheng Zhang†, Li Liu†, Fumin Shen, Heng Tao Shen, Ling Shao*"
+8c6c0783d90e4591a407a239bf6684960b72f34e,SESSION KNOWLEDGE ENGINEERING AND MANAGEMENT + KNOWLEDGE ACQUISITION Chair(s),"SESSION
+KNOWLEDGE ENGINEERING AND
+MANAGEMENT + KNOWLEDGE ACQUISITION
+Chair(s)
+Int'l Conf. Information and Knowledge Engineering | IKE'13 |1"
+8cc07ae9510854ec6e79190cc150f9f1fe98a238,Using Deep Learning to Challenge Safety Standard for Highly Autonomous Machines in Agriculture,"Article
+Using Deep Learning to Challenge Safety Standard
+for Highly Autonomous Machines in Agriculture
+Kim Arild Steen *,†, Peter Christiansen †, Henrik Karstoft and Rasmus Nyholm Jørgensen
+Department of Engineering, Aarhus University, Finlandsgade 22 8200 Aarhus N, Denmark;
+(P.C.); (H.K.); (R.N.J.)
+* Correspondence: Tel.: +45-3116-8628
+These authors contributed equally to this work.
+Academic Editors: Francisco Rovira-Más and Gonzalo Pajares Martinsanz
+Received: 18 December 2015; Accepted: 2 February 2016; Published: 15 February 2016"
+8599560c50a55e75928dba6bbcbb98ef180a0798,Vocabulary Length Experiments for Binary Image Classification Using Bov Approach,"Signal & Image Processing : An International Journal (SIPIJ) Vol.4, No.6, December 2013
+VOCABULARY LENGTH EXPERIMENTS FOR BINARY
+IMAGE CLASSIFICATION USING BOV APPROACH
+S.P.Vimal1, Eshaan Puri2 and P.K.Thiruvikiraman3
+,2Department of Computer Science and Information Systems
+Birla Institute of Technology and Science, Pilani, Rajasthan, India
+Department of Physics, Birla Institute of Technology and Science,
+Hyderabad Campus, Andra Pradesh, India"
+8509abbde2f4b42dc26a45cafddcccb2d370712f,A way to improve precision of face recognition in SIPP without retrain of the deep neural network model,"Improving precision and recall of face recognition in SIPP with combination of
+modified mean search and LSH
+Xihua.Li"
+8529c0b98ab4f6eb21715a54395420988dd69633,Adapting Semantic Segmentation Models for Changes in Illumination and Camera Perspective,"Adapting Semantic Segmentation Models for Changes
+in Illumination and Camera Perspective
+Wei Zhou, Alex Zyner, Stewart Worrall, and Eduardo Nebot"
+858ddff549ae0a3094c747fb1f26aa72821374ec,"Survey on RGB, 3D, Thermal, and Multimodal Approaches for Facial Expression Recognition: History, Trends, and Affect-Related Applications","Survey on RGB, 3D, Thermal, and Multimodal
+Approaches for Facial Expression Recognition:
+History, Trends, and Affect-related Applications
+Ciprian A. Corneanu, Marc Oliu, Jeffrey F. Cohn, and Sergio Escalera"
+851f3dcfde59313dc2c8b87314f5a191d82194f4,Multiview Graphical Models for Tracking Occluded Objects,"Volume 3, Issue 10, October 2013 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+Multiview Graphical Models for Tracking Occluded Objects
+Bharath
+Student,
+Dept.of CSE,
+Jntuk, Kakinada, India
+Smt. D.Neelima
+Asst.Professor,
+Dept.of CSE,
+Jntuk, Kakinada, India"
+85fd2bda5eb3afe68a5a78c30297064aec1361f6,"Are You Smiling, or Have I Seen You Before? Familiarity Makes Faces Look Happier.","702003 PSSXXX10.1177/0956797617702003Carr et al.Are You Smiling, or Have I Seen You Before?
+research-article2017
+Research Article
+Are You Smiling, or Have I Seen You
+Before? Familiarity Makes Faces Look
+Happier
+017, Vol. 28(8) 1087 –1102
+© The Author(s) 2017
+Reprints and permissions:
+sagepub.com/journalsPermissions.nav
+DOI: 10.1177/0956797617702003
+https://doi.org/10.1177/0956797617702003
+www.psychologicalscience.org/PS
+Evan W. Carr1, Timothy F. Brady2, and Piotr Winkielman2,3,4
+Columbia Business School, Columbia University; 2Psychology Department, University of California, San Diego;
+Behavioural Science Group, Warwick Business School, University of Warwick; and 4Faculty of Psychology,
+SWPS University of Social Sciences and Humanities"
+856b8576999517c0cb7d95aef0159432604a8447,Weighted Heterogeneous Learning for Deep Convolutional Neural Network Based Facial Image Analysis,The 19th Meeting on Image Recognition and Understanding
+85955fe6cdf4f9f35fc9eab6cc4fccbb819e68a1,3D Face Reconstruction by Learning from Synthetic Data,"D Face Reconstruction by Learning from Synthetic Data
+Elad Richardson*
+Matan Sela*
+Ron Kimmel
+Department of Computer Science, Technion - Israel Institute of Technology"
+8558ea46c8f7e56c57073b27408c6638e81293f0,Morphable crowds,
+858901405086056361f8f1839c2f3d65fc86a748,On Tensor Tucker Decomposition: the Case for an Adjustable Core Size,"ON TENSOR TUCKER DECOMPOSITION: THE CASE FOR AN
+ADJUSTABLE CORE SIZE
+BILIAN CHEN ∗, ZHENING LI † , AND SHUZHONG ZHANG ‡"
+851e78906e1307773b664953bf2830f32b28511f,Lie Algebra-Based Kinematic Prior for 3D Human Pose Tracking,"Lie Algebra-Based Kinematic Prior for 3D Human Pose Tracking
+Edgar Simo-Serra, Carme Torras, and Francesc Moreno-Noguer
+Institut de Rob`otica i Inform`atica Industrial (CSIC-UPC). Barcelona, Spain"
+8562ca7f86e7cc144aa2d34a9cce41431b9e13e9,Master Thesis Report: Face Recognition for Cognitive Robots,"Face Recognition for Cognitive
+Robots
+F. Gaisser
+BioMechanical Enginering"
+85401b669a989da15bb3d2b37d4598c21d9d061b,"The effect of intranasal oxytocin versus placebo treatment on the autonomic responses to human sounds in autism: a single-blind, randomized, placebo-controlled, crossover design study","Lin et al. Molecular Autism 2014, 5:20
+http://www.molecularautism.com/content/5/1/20
+Open Access
+R ES EAR CH
+The effect of intranasal oxytocin versus placebo
+treatment on the autonomic responses to human
+sounds in autism: a single-blind, randomized,
+placebo-controlled, crossover design study
+I-Fan Lin1*, Makio Kashino1,2, Haruhisa Ohta3, Takashi Yamada3, Masayuki Tani3, Hiromi Watanabe3, Chieko Kanai3,
+Taisei Ohno3, Yuko Takayama3, Akira Iwanami3 and Nobumasa Kato3,4"
+8575adafc04a7915bd71c3733e379577da0c4406,Sistema tutor afectivo para la enseñanza de lógica algorítmica y programación,"Sistema tutor afectivo para la enseñanza de lógica
+lgorítmica y programación
+Ramón Zatarain-Cabada1, María Lucia Barrón-Estrada1,
+José Mario Ríos-Félix1, Giner Alor-Hernandez2
+Instituto Tecnológico de Culiacán, Culiacán Sinaloa,
+México
+Instituto Tecnológico de Orizaba,
+División de Estudios de Posgrado e Investigación, Orizaba, Veracruz,
+México
+{rzatarain, lbarron,
+Resumen. La creciente demanda de herramientas de software que motiven y
+poyen a los estudiantes en el aprendizaje de diseño e implementación de
+lgoritmos y programas, ha motivado la creación de este tipo de sistemas de
+software. En este artículo presentamos un nuevo e innovador sistema tutor
+fectivo de lógica algorítmica y programación, basado en la técnica de bloques.
+Nuestro enfoque combina la interfaz de Google Blockly con técnicas de
+gamificación y ejercicios que son monitoreados para obtener el estado afectivo
+del estudiante. Dependiendo de la emoción manifestada (aburrido, enganchado,
+frustrado y neutral), el sistema evalúa una serie de variables, para determinar si
+el estudiante requiere asistencia. En base a las pruebas preliminares con varios"
+850c5d1f97eee47a1fdaefc0894b52e51a3145fc,Improved Semantic Stixels via Multimodal Sensor Fusion,"Improved Semantic Stixels via
+Multimodal Sensor Fusion
+Florian Piewak(cid:63)1,2, Peter Pinggera1, Markus Enzweiler1,
+David Pfeiffer(cid:63)(cid:63)1, and Marius Z¨ollner2,3
+Daimler AG, R&D, Stuttgart, Germany
+Karlsruhe Institute of Technology (KIT), Karlsruhe, Germany
+Forschungszentrum Informatik (FZI), Karlsruhe, Germany"
+85188c77f3b2de3a45f7d4f709b6ea79e36bd0d9,"Combined model for detecting, localizing, interpreting and recognizing faces","Author manuscript, published in ""Workshop on Faces in 'Real-Life' Images: Detection, Alignment, and Recognition, Marseille :
+France (2008)"""
+8582d5307793643e5b6a5e4354ee1ba32eff3809,Techniques for Face Detection & Recognition System-,"IOSR Journal of Computer Engineering (IOSR-JCE)
+e-ISSN: 2278-0661, p- ISSN: 2278-8727Volume 15, Issue 5 (Nov. - Dec. 2013), PP 01-12
+www.iosrjournals.org
+Techniques for Face Detection & Recognition System-
+Comprehensive Review
+Vandana S.Bhat1, Dr. J. D. Pujari2
+Department of Information Science & Engineering, SDMCET, Dharwad, INDIA
+Department of Information Science & Engineering, SDMCET, Dharwad, INDIA"
+853d6cfe9c08c971979d1dd138bb21c25ff750bf,Comparison of MultiView Face Recognition using DCT and Hybrid DWT of Score Fusion under Uncontrolled Illumination Variation,"International Journal of Computer Applications (0975 – 8887)
+Volume 96– No.4, June 20143
+Comparison of Multi-View Face Recognition using DCT
+nd Hybrid DWT of Score Fusion under Uncontrolled
+Illumination Variation
+Manisha J Kasar
+M.Tech Student (CE)
+Computer Department, MPSTME
+NMIMS, Shirpur, Dist :Dhule, Maharashtra,
+India
+Nitin S.Choubey
+P.hd (Computer)
+Computer Department, MPSTME
+NMIMS, Shirpur, Dist :Dhule, Maharashtra,
+India
+is one of
+for matching. First,"
+853feff8674f4a856e6568c9ddce5eace014de8c,NISTIR 8045 Performance Evaluation Methods for Human Detection and Tracking Systems for Robotic Applications,"NISTIR 8045
+Performance Evaluation Methods for
+Human Detection and Tracking
+Systems for Robotic Applications
+Michael Shneier
+Tsai Hong
+Geraldine Cheok
+Kamel Saidi
+Will Shackleford
+This publication is available free of charge from:
+http://dx.doi.org/10.6028/NIST.IR.8045"
+85489639f395608174f686d634d6e27ef44c9d77,Social ‘wanting’ dysfunction in autism: neurobiological underpinnings and treatment implications,"Kohls et al. Journal of Neurodevelopmental Disorders 2012, 4:10
+http://www.jneurodevdisorders.com/content/4/1/10
+RE VI E W
+Open Access
+Social ‘wanting’ dysfunction in autism:
+neurobiological underpinnings and
+treatment implications
+Gregor Kohls*, Coralie Chevallier, Vanessa Troiani and Robert T Schultz"
+858b51a8a8aa082732e9c7fbbd1ea9df9c76b013,Can Computer Vision Problems Benefit from Structured Hierarchical Classification?,"Can Computer Vision Problems Benefit from
+Structured Hierarchical Classification?
+Thomas Hoyoux1, Antonio J. Rodr´ıguez-S´anchez2, Justus H. Piater2, and
+Sandor Szedmak2
+INTELSIG, Montefiore Institute, University of Li`ege, Belgium
+Intelligent and Interactive Systems, Institute of Computer Science, University of
+Innsbruck, Austria"
+854890f35fc7955d94777395f6a66da433426d98,Human Gaze Following for Human-Robot Interaction,"Human Gaze Following for Human-Robot Interaction
+Akanksha Saran1, Srinjoy Majumdar2, Elaine Schaertl Short2, Andrea Thomaz2 and Scott Niekum1"
+854f9fb21853d1e50302dddcc1fd5c2e933ed8f4,Information Constraints on Auto-Encoding Variational Bayes,"Information Constraints on Auto-Encoding Variational Bayes
+Romain Lopez1, Jeffrey Regier1, Michael I. Jordan1,2, and Nir Yosef1,3,4
+{romain_lopez, regier,
+Department of Electrical Engineering and Computer Sciences, University of California, Berkeley
+Department of Statistics, University of California, Berkeley
+Ragon Institute of MGH, MIT and Harvard
+Chan-Zuckerberg Biohub"
+8566231abd7e5bc71ee0bc0da84b8d76ce07a501,On The Stability of Video Detection and Tracking,"On The Stability of Video Detection and Tracking
+Hong Zhang
+Chinese University of Hong Kong
+Naiyan Wang
+TuSimple LLC"
+8518b501425f2975ea6dcbf1e693d41e73d0b0af,Relative Hidden Markov Models for Evaluating Motion Skill,"Relative Hidden Markov Models for Evaluating Motion Skills
+Qiang Zhang and Baoxin Li
+Computer Science and Engineering
+Arizona State Univerisity, Tempe, AZ 85281"
+85c1926ea23ff4f472774fec8c6a993bb499e4f4,Eigenbands fusion for frontal face recognition,"EIGENBANDS FUSION FOR FRONTAL FACE RECOGNITION
+George D. C. Cavalcanti1s2 and Edson C. B. Cawalho Filho’
+’ UFPE-Universidade Federal de Pemambuco, 50732-970, Recife, PE, Brad
+FIR-Faculdade Integrada do Recife 50720-635 Recife, PE, Brad"
+853bd61bc48a431b9b1c7cab10c603830c488e39,Learning Face Representation from Scratch,"Learning Face Representation from Scratch
+Dong Yi, Zhen Lei, Shengcai Liao and Stan Z. Li
+Center for Biometrics and Security Research & National Laboratory of Pattern Recognition
+Institute of Automation, Chinese Academy of Sciences (CASIA)
+dong.yi, zlei, scliao,"
+857fface5ccd0fd4f30d6b1b3d2cd25a2b471501,Head pose estimation via probabilistic high-dimensional regression,"Head Pose Estimation Via Probabilistic
+High-Dimensional Regression
+Vincent Drouard 1 Sil`eye Ba 1 Georgios Evangelidis 1
+Antoine Deleforge 2 Radu Horaud 1
+Team Perception - Inria Grenoble Rhˆone-Alpes, France
+Friedrich-Alexander-Universit¨at, Erlangen, Germany
+September 28, 2015, Qu´ebec, Canada
+Work supported by EU-FP7 ERC Advanced Grant VHIA (#340113) and STREP project EARS (#609645)"
+854dbb4a0048007a49df84e3f56124d387588d99,Spatial-Temporal Recurrent Neural Network for Emotion Recognition,"JOURNAL OF LATEX CLASS FILES, VOL. 13, NO. 9, SEPTEMBER 2014
+Spatial-Temporal Recurrent Neural Network for
+Emotion Recognition
+Tong Zhang, Wenming Zheng*, Member, IEEE, Zhen Cui*, Yuan Zong and Yang Li"
+8569fc88a3d1ac8b873872becb2ee8bc01dc73bc,Deep-Person: Learning Discriminative Deep Features for Person Re-Identification,"Deep-Person: Learning Discriminative Deep Features
+for Person Re-Identification
+Xiang Bai, Mingkun Yang, Tengteng Huang,
+Zhiyong Dou, Rui Yu, Yongchao Xu∗
+School of Electronic Information and Communications, Huazhong University of Science and
+Technology (HUST), Wuhan, 430074, China"
+85387549277d6131dc8596ffacc7a21aeee0c6d1,Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial Networks,"Attribute Enhanced Face Aging with Wavelet-based Generative Adversarial
+Networks
+Yunfan Liu, Qi Li, and Zhenan Sun∗
+Center for Research on Intelligent Perception and Computing, CASIA
+National Laboratory of Pattern Recognition, CASIA
+{qli,"
+85cad2b23e2ed7098841285bae74aafbff921659,Pa-gan: Improving Gan Training by Progressive Augmentation,"Under review as a conference paper at ICLR 2019
+PA-GAN: IMPROVING GAN TRAINING BY
+PROGRESSIVE AUGMENTATION
+Anonymous authors
+Paper under double-blind review"
+1d7df7000a3e8fafa21679db4efe2ffedcfe0335,And the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"SEMANTIC IMAGE UNDERSTANDING: FROM THE WEB, IN
+LARGE SCALE, WITH REAL-WORLD CHALLENGING DATA
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Jia Li
+November 2011"
+1d4c2dd3996cb3d87da6c35d72572637d3175ea5,Toward Storytelling From Visual Lifelogging: An Overview,"JOURNAL OF TRANSACTIONS ON HUMAN-MACHINE SYSTEMS JULY 2015
+Towards Storytelling from
+Visual Lifelogging: An Overview
+Marc Bola˜nos∗, Mariella Dimiccoli∗, and Petia Radeva"
+1d5901662dc4fa5be2375f35be07b4116fd450ea,The Effects of Prediction on the Perception for Own-Race and Other-Race Faces,"RESEARCH ARTICLE
+The Effects of Prediction on the Perception
+for Own-Race and Other-Race Faces
+Guangming Ran1,2, Qi Zhang3, Xu Chen1,2*, Yangu Pan1,2
+. Faculty of Psychology, Southwest University (SWU), Chongqing, 400715, China, 2. Research Center of
+Mental Health Education, Southwest University (SWU), Chongqing, 400715, China, 3. School of Education
+Science, Guizhou Normal University (GNU), Guizhou, 550001, China"
+1d524c57214384ad6a003c54b1918130744b69d2,Identifying Human-Object Interactions in Motionless Images by Modeling the Mutual Context of Objects and Human Poses,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Impact Factor (2012): 3.358
+Identifying Human-Object Interactions in
+Motionless Images by Modeling the Mutual Context
+of Objects and Human Poses
+A. N. Bhagat1, N. B. Pokale2
+Department of Computer Engineering, TSSM,s Bhivrabai Sawant College Of Engineering and Research, Narhe, Pune, Maharashtra, India.
+Associate Professor, Department of Computer Engineering, TSSM,s Bhivrabai Sawant College Of Engineering and Research, Narhe, Pune,
+Maharashtra, India."
+1dc45403839d6aefe65c6e7f2179d5ea697dfeac,DCT-based features for categorisation of social media in compressed domain,"DCT-based Features for Categorisation of Social
+Media in Compressed Domain
+Sebastian Schmiedeke, Pascal Kelm, Thomas Sikora
+Communication Systems Group
+Technische Universit¨at Berlin
+Germany"
+1d0a6759de0d55d15439b0367f0aa49c1e248c5c,"Networking in Autism: Leveraging Genetic, Biomarker and Model System Findings in the Search for New Treatments","...............................................................................................................................................................
+REVIEW
+Networking in Autism: Leveraging Genetic, Biomarker
+nd Model System Findings in the Search for New
+Treatments
+Jeremy Veenstra-VanderWeele1,2,3,4 and Randy D Blakely*,1,3,4
+Department of Psychiatry, Vanderbilt University School of Medicine, Nashville, TN, USA; 2Department of Pediatrics,
+Vanderbilt University School of Medicine, Nashville, TN, USA; 3Department of Pharmacology, Vanderbilt University School of
+Medicine, Nashville, TN, USA; 4Center for Molecular Neuroscience, Vanderbilt University School of Medicine, Nashville,
+TN, USA
+Autism Spectrum Disorder (ASD) is a common neurodevelopmental disorder affecting approximately 1% of children. ASD is
+defined by core symptoms in two domains: negative symptoms of impairment in social and communication function, and
+positive symptoms of restricted and repetitive behaviors. Available treatments are inadequate for treating both core
+symptoms and associated conditions. Twin studies indicate that ASD susceptibility has a large heritable component. Genetic
+studies have identified promising leads, with converging insights emerging from single-gene disorders that bear ASD
+features, with particular interest in mammalian target of rapamycin (mTOR)-linked synaptic plasticity mechanisms. Mouse
+models of these disorders are revealing not only opportunities to model behavioral perturbations across species, but also
+evidence of postnatal rescue of brain and behavioral phenotypes. An intense search for ASD biomarkers has consistently
+pointed to elevated platelet serotonin (5-HT) levels and a surge in brain growth in the first 2 years of life. Following a review of
+the diversity of ASD phenotypes and its genetic origins and biomarkers, we discuss opportunities for translation of these"
+1d5d68bee741d81771e9224fe53806e85ed469aa,RATM: Recurrent Attentive Tracking Model,"RATM: Recurrent Attentive Tracking Model
+Samira Ebrahimi Kahou, Vincent Michalski, and Roland Memisevic"
+1d03698a46ff12fdfaf4811528b3e7961dfd2fe6,Fast Exact Max-Kernel Search,"Fast Exact Max-kernel Search
+Ryan R. Curtin
+Parikshit Ram
+Alexander G. Gray"
+1d7ecdcb63b20efb68bcc6fd99b1c24aa6508de9,The Hidden Sides of Names&#x2014;Face Modeling with First Name Attributes,"The Hidden Sides of Names—Face Modeling
+with First Name Attributes
+Huizhong Chen, Student Member, IEEE, Andrew C. Gallagher, Senior Member, IEEE, and
+Bernd Girod, Fellow, IEEE"
+1d9497450f60b874eb6ecbf82e3d0808a6fe236c,Nonconvex proximal splitting with computational errors∗,"Nonconvex proximal splitting with computational errors∗
+Suvrit Sra
+Max Planck Institute, T¨ubingen, Germany
+Introduction
+We study in this chapter large-scale nonconvex optimization problems with composite objective functions
+that are composed of a differentiable possibly nonconvex cost and a nonsmooth but convex regularizer.
+More precisely, we consider optimization problems of the form
+minimize Φ(x) := f (x) + r(x),
+where X ⊂ Rn is a compact convex set, f : Rn → R is a differentiable cost function and r : Rn → R is a
+losed convex function. Further, we assume that the gradient ∇ f is Lipschitz continuous on X (denoted
+f ∈ C1
+L(X )), i.e.,
+x ∈ X ,
+∃L > 0 s.t. (cid:107)∇ f (x) − ∇ f (y)(cid:107) ≤ L(cid:107)x − y(cid:107)
+for all
+x, y ∈ X .
+Throughout this chapter, (cid:107)·(cid:107) denotes the standard Euclidean norm.
+Problem (1) generalizes the more thoroughly studied class of composite convex optimization prob-
+lems [30], a class that has witnessed huge interest in machine learning, signal processing, statistics,
+nd other related areas. We refer the interested reader to [2, 3, 21, 37] for several convex examples"
+1df554e992baf60f2d0b7c1b563250ba19b8f8ff,3D Face Recognition Based on 3D Ridge Lines in Range Data,"-4244-1437-7/07/$20.00 ©2007 IEEE
+I - 137
+ICIP 2007"
+1d251acc459931d927f5befdfb5b9cdf643cd8bc,Bayesian Compression for Natural Language Processing,"Bayesian Compression for Natural Language Processing
+Nadezhda Chirkova1∗, Ekaterina Lobacheva1∗, Dmitry Vetrov1,2
+Samsung-HSE Laboratory, National Research University Higher School of Economics
+Samsung AI Center
+Moscow, Russia"
+1dca6a54d201dd56b41a5475aaf498a207083b0e,Ego-surfing first person videos,"IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Ego-Surfing First-Person Videos
+Ryo Yonetani, Member, IEEE, Kris M. Kitani, Member, IEEE, and Yoichi Sato, Member, IEEE"
+1dd3a58ab363cb396bf36223fadc8d2341bfdb83,Picture: A probabilistic programming language for scene perception,"Picture: a probabilistic programming language for scene perception
+Tejas D Kulkarni1, Pushmeet Kohli2, Joshua B Tenenbaum1, Vikash Mansinghka1
+Brain and Cognitive Science, Computer Science and Artificial Intelligence Lab, Massachusetts Institute of Technology. 2Microsoft Research Cambridge.
+Probabilistic scene understanding systems aim to produce high-probability
+descriptions of scenes conditioned on observed images or videos, typically ei-
+ther via discriminatively trained models or generative models in an “analysis
+y synthesis” framework. Discriminative approaches lend themselves to fast,
+ottom-up inference methods and relatively knowledge-free, data-intensive
+training regimes, and have been remarkably successful on many recognition
+problems. Generative approaches hold out the promise of analyzing complex
+scenes more richly and flexibly, but have been less widely embraced for two
+main reasons: Inference typically depends on slower forms of approximate
+inference, and both model-building and inference can involve considerable
+problem-specific engineering to obtain robust and reliable results. These
+factors make it difficult to develop simple variations on state-of-the-art mod-
+els, to thoroughly explore the many possible combinations of modeling,
+representation, and inference strategies, or to richly integrate complemen-
+tary discriminative and generative modeling approaches to the same problem.
+More generally, to handle increasingly realistic scenes, generative approaches
+will have to scale not just with respect to data size but also with respect to"
+1dc94886ca1d4893208d38b18cb7ad1541a74b82,Weakly Supervised Training of Speaker Identification Models,"Weakly Supervised Training of Speaker Identification Models
+Martin Karu, Tanel Alum¨ae
+Department of Software Science
+Tallinn University of Technology, Estonia"
+1d9bd24e65345258259ee24332141e371c6e4868,Learning Image Descriptors with Boosting,"Learning Image Descriptors with Boosting
+Tomasz Trzcinski, Mario Christoudias, and Vincent Lepetit"
+1d1e78bb93590a86ecfd2516f4e5789cc05d76f5,Local Features and Generative Models,"FACE AUTHENTICATION BASED ON
+LOCAL FEATURES AND
+GENERATIVE MODELS
+Fabien Cardinaux (a)
+IDIAP–RR 05-85
+JANUARY 2006
+ESEARCHREPRORTIDIAPRue du Simplon 4IDIAP Research Institute1920 Martigny − Switzerlandwww.idiap.chTel: +41 27 721 77 11Email: Box 592Fax: +41 27 721 77 12"
+1d692f37c2594ddb30518da27bfc0f5044690d09,Learning Depth From Single Images With Deep Neural Network Embedding Focal Length,"Learning Depth from Single Images with Deep
+Neural Network Embedding Focal Length
+Lei He, Guanghui Wang (Senior Member, IEEE) and Zhanyi Hu"
+1d6905e88f64ac826344d89c51ad8daea3b95e0e,Monocular Object Orientation Estimation using Riemannian Regression and Classification Networks,"Noname manuscript No.
+(will be inserted by the editor)
+Monocular Object Orientation Estimation using
+Riemannian Regression and Classification Networks
+Siddharth Mahendran · Ming Yang Lu · Haider Ali · Ren´e Vidal
+the date of receipt and acceptance should be inserted later"
+1d59ffad091a5bffa5fe935b79f5bfc08d2e802d,Intensity Video Guided 4D Fusion for Improved Highly Dynamic 3D Reconstruction,"Intensity Video Guided 4D Fusion for
+Improved Highly Dynamic 3D Reconstruction
+Jie Zhang, Christos Maniatis, Luis Horna and Robert B. Fisher"
+1d53aebe67d0e088e2da587fd6b08c8e8ed7f45c,A Selection Module for Large-Scale Face Recognition Systems,"A Selection module for large-scale face
+recognition systems
+Giuliano Grossi, Raffaella Lanzarotti, and Jianyi Lin
+Dipartimento di Informatica, Universit`a degli Studi di Milano
+Via Comelico 39/41, Milano, Italy"
+1d4e1b4f37caf40dc70d211c6b2745195dfa6c3f,Facial Expression Recognition Using Interpolation Features,"Facial Expression Recognition Using Interpolation
+Features
+Jesús García-Ramírez, Ivan Olmos-Pineda, J. Arturo Olvera-López, and
+Manuel Martín-Ortíz
+Benemérita Universidad Autónoma de Puebla, Faculty of Computer Science, Puebla, México"
+1df314a1e4dce42fd9fab094b79a0f2a10ad0b03,People Detection in Fish-eye Top-views,
+1dca96fdcab180133644442df4ad78eeec1aa00b,Learning from Synthetic Humans,"Learning from Synthetic Humans
+G¨ul Varol∗†
+Javier Romero‡
+Xavier Martin†
+Naureen Mahmood‡
+Michael Black‡
+Ivan Laptev∗
+Cordelia Schmid†"
+1d0dd20b9220d5c2e697888e23a8d9163c7c814b,Boosted Metric Learning for Efficient Identity-Based Face Retrieval,"NEGREL ET AL.: BOOSTED METRIC LEARNING FOR FACE RETRIEVAL
+Boosted Metric Learning for Efficient
+Identity-Based Face Retrieval
+Romain Negrel
+Alexis Lechervy
+Frederic Jurie
+GREYC, CNRS UMR 6072, ENSICAEN
+Université de Caen Basse-Normandie
+France"
+1d4f56a9bb093c52569917537a93c7671db28e6f,Real-time Tracking of Player Identities in Team Sports,"Real-time Tracking of Player
+Identities in Team Sports
+Dissertation
+Nicolai Baron von Hoyningen-Huene"
+1dc4b5e93233fc632b070c8ff282ef0fe9141f64,2-D Structure-Based Gait Recognition in Video Using Incremental GMM-HMM,"-D Structure-Based Gait Recognition in Video
+Using incremental GMM-HMM
+Rui Pu1, Yunhong Wang1
+Laboratory of Intelligence Recognition and Image Processing, Beijing Key
+Laboratory of Digital Media, School of Computer Science and Engineering, Beihang
+University, Beijing 100191, China"
+1d776bfe627f1a051099997114ba04678c45f0f5,Deployment of Customized Deep Learning based Video Analytics On Surveillance Cameras,"Deployment of Customized Deep Learning based
+Video Analytics On Surveillance Cameras
+Pratik Dubal(cid:63), Rohan Mahadev(cid:63), Suraj Kothawade(cid:63),
+Kunal Dargan, and Rishabh Iyer
+AitoeLabs (www.aitoelabs.com)"
+1d9306ea0f0239c88aecbcf0a48a11c964a0fcd4,3D facial expression recognition using maximum relevance minimum redundancy geometrical features,"Rabiu et al. EURASIP Journal on Advances in Signal Processing 2012, 2012:213
+http://asp.eurasipjournals.com/content/2012/1/213
+RESEARCH
+Open Access
+D facial expression recognition using
+maximum relevance minimum redundancy
+geometrical features
+Habibu Rabiu*, M. Iqbal Saripan, Syamsiah Mashohor and Mohd Hamiruce Marhaban"
+1da57510321fb8b25dc4d21844fb9afa4e40571e,Activity representation with motion hierarchies,"Int J Comput Vis
+DOI 10.1007/s11263-013-0677-1
+Activity representation with motion hierarchies
+Adrien Gaidon · Zaid Harchaoui · Cordelia Schmid
+Received: 17 May 2013 / Accepted: 20 November 2013
+© Springer Science+Business Media New York 2013"
+1dd3faf5488751c9de10977528ab96be24616138,Detecting Anomalous Faces with 'No Peeking' Autoencoders,"Detecting Anomalous Faces with ‘No Peeking’ Autoencoders
+Anand Bhattad 1 Jason Rock 1 David Forsyth 1"
+1d4e0427dffec6ac75b96a564986046ea2b00980,Eye Controlled Robotic Motion Using Video Tracking In Real Time,"ISSN(Online): 2319-8753
+ISSN (Print): 2347-6710
+International Journal of Innovative Research in Science,
+Engineering and Technology
+(An ISO 3297: 2007 Certified Organization)
+Website: www.ijirset.com
+Vol. 6, Issue 7, July 2017
+Eye Controlled Robotic Motion Using Video
+Tracking In Real Time
+Kriti Bhattacharjee 1, Dr. Manoj Soni 2
+P.G. Student, Department of Mechanical and Automation Engineering, IGDTUW, New Delhi, India1
+Associate Professor, Department of Mechanical and Automation Engineering, IGDTUW, New Delhi, India2"
+1d1f83023686d43fd4e8805c8e517dffb02d118c,Compiler Enhanced Scheduling for OpenMP for Heterogeneous Multiprocessors,"Compiler Enhanced Scheduling for OpenMP for
+Heterogeneous Multiprocessors
+Jyothi Krishna V S
+IIT Madras"
+1d81293bc17a135cfd35912146c538cd81830381,Single camera multi-person tracking based on crowd simulation,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+1dff919e51c262c22630955972968f38ba385d8a,Toward an Affect-Sensitive Multimodal Human–Computer Interaction,"Toward an Affect-Sensitive Multimodal
+Human–Computer Interaction
+MAJA PANTIC, MEMBER, IEEE, AND LEON J. M. ROTHKRANTZ
+Invited Paper
+The ability to recognize affective states of a person we are com-
+municating with is the core of emotional intelligence. Emotional
+intelligenceisa facet of human intelligence thathas been argued to be
+indispensable and perhaps the most important for successful inter-
+personal social interaction. This paper argues that next-generation
+human–computer interaction (HCI) designs need to include the
+essence of emotional intelligence—the ability to recognize a user’s
+ffective states—in order to become more human-like, more effec-
+tive, and more efficient. Affective arousal modulates all nonverbal
+ommunicative cues (facial expressions, body movements, and vocal
+nd physiological reactions). In a face-to-face interaction, humans
+detect and interpret those interactive signals of their communicator
+with little or no effort. Yet design and development of an automated
+system that accomplishes these tasks is rather difficult. This paper
+surveys the past work in solving these problems by a computer
+nd provides a set of recommendations for developing the first"
+1de8f38c35f14a27831130060810cf9471a62b45,A Branch-and-Bound Framework for Unsupervised Common Event Discovery,"Int J Comput Vis
+DOI 10.1007/s11263-017-0989-7
+A Branch-and-Bound Framework for Unsupervised Common
+Event Discovery
+Wen-Sheng Chu1
+Jeffrey F. Cohn1,2 · Daniel S. Messinger3
+· Fernando De la Torre1 ·
+Received: 3 June 2016 / Accepted: 12 January 2017
+© Springer Science+Business Media New York 2017"
+1d2af64416882b2ae8fe4de51b85fdd7d561cfee,Headgear Accessories Classification Using an Overhead Depth Sensor,"Article
+Headgear Accessories Classification Using an
+Overhead Depth Sensor
+Carlos A. Luna, Javier Macias-Guarasa ID , Cristina Losada-Gutierrez * ID , Marta Marron-Romera,
+Manuel Mazo, Sara Luengo-Sanchez and Roberto Macho-Pedroso
+Department of Electronics, University of Alcala, Ctra. Madrid-Barcelona, km.33,600, 28805 Alcalá de Henares,
+Spain; (C.A.L.); (J.M.-G.); (M.M.-R.);
+(M.M.); (S.L.-S.); (R.M.-P.)
+* Correspondence: Tel.: +34-918-856-906; Fax: +34-918-856-591
+Received: 22 June 2017; Accepted: 8 August 2017; Published: 10 August 2017"
+1dc07322715e093c560b30fdf1e168e58e9a9409,DRBF and IRBF Based Face Recognition and Extraction of Facial Expressions from the Blur Image,"Australian Journal of Basic and Applied Sciences, 8(3) March 2014, Pages: 61-68
+AENSI Journals
+Australian Journal of Basic and Applied Sciences
+ISSN:1991-8178
+Journal home page: www.ajbasweb.com
+DRBF and IRBF Based Face Recognition and Extraction of Facial Expressions from the
+Blur Image
+M. Jayashree, 2Dr. D. Deepa, 3M. Rubhashree
+PG Scholar, Department of Information Technology, Bannari Amman Institute of Technology, Sathyamangalam, TamilNadu, India.
+2Associate Professor, Department of Information Technology, Bannari Amman Institute of Technology, Sathyamangalam, TamilNadu,
+India.
+Assistant Professor, Department of Computer Science and Engineering, Bannari Amman Institute of Technology, Sathyamangalam,
+TamilNadu, India.
+A R T I C L E I N F O
+Article history:
+Received 12 January 2014
+Received in revised form 22
+March 2014
+Accepted 27 March 2014
+Available online 2 April 2014"
+1da83903c8d476c64c14d6851c85060411830129,Iterated Support Vector Machines for Distance Metric Learning,"Iterated Support Vector Machines for Distance
+Metric Learning
+Wangmeng Zuo, Member, IEEE, Faqiang Wang, David Zhang, Fellow, IEEE, Liang Lin, Member, IEEE,
+Yuchi Huang, Member, IEEE, Deyu Meng, and Lei Zhang, Senior Member, IEEE"
+1d93a1af770040cb8a64e96215884ee363a8f53a,Improved face recognition at a distance using light field camera & super resolution schemes,"Improved Face Recognition At A Distance Using Light
+Field Camera & Super Resolution Schemes
+R. Raghavendra* Kiran B. Raja*† Bian Yang* Christoph Busch*†
+{raghavendra.ramachandra, kiran.raja, bian.yang,
+*Norwegian Biometrics Laboratory
+Hochschule Darmstadt - CASED
+Gjøvik University College
+802 Gjøvik, Norway
+Haardtring 100,
+64295 Darmstadt, Germany"
+1d5fe82303712a70c1d231ead2ee03f042d8ad70,ImageNet pre-trained models with batch normalization,"ImageNet pre-trained models with batch normalization
+Marcel Simon, Erik Rodner, Joachim Denzler
+Computer Vision Group
+Friedrich-Schiller-Universit¨at Jena, Germany
+{marcel.simon, erik.rodner,"
+1d455f918062f66e86ed53cf258284abd6abd8fc,SMSnet: Semantic motion segmentation using deep convolutional neural networks,"SMSnet: Semantic Motion Segmentation
+using Deep Convolutional Neural Networks
+Johan Vertens∗
+Abhinav Valada∗
+Wolfram Burgard"
+1d99282d00f7cf3e4d912428313848add8de8220,Comparing Attribute Classifiers for Interactive Language Grounding,"Proceedings of the 2015 Workshop on Vision and Language (VL’15), pages 60–69,
+Lisbon, Portugal, 18 September 2015. c(cid:13)2015 Association for Computational Linguistics."
+1d58d83ee4f57351b6f3624ac7e727c944c0eb8d,Enhanced Local Texture Feature Sets for Face Recognition Under Difficult Lighting Conditions,"Enhanced Local Texture
+Feature Sets for Face
+Recognition under Difficult
+Lighting Conditions
+Xiaoyang Tan and Bill Triggs
+INRIA & Laboratoire Jean
+Kuntzmann,
+655 avenue de l'Europe, Montbonnot 38330, France"
+1d679b371c9dfd833cee0925de483562d2bc7d88,Face Recognition using 3D Summation Invariant Features,"­4244­0367­7/06/$20.00 ©2006 IEEE
+ICME 2006"
+1d729693a888a460ee855040f62bdde39ae273af,Photorealistic Face De-Identification by Aggregating Donors' Face Components,"Photorealistic Face de-Identification by Aggregating
+Donors’ Face Components
+Saleh Mosaddegh, Lo¨ıc Simon, Fr´ed´eric Jurie
+To cite this version:
+Saleh Mosaddegh, Lo¨ıc Simon, Fr´ed´eric Jurie. Photorealistic Face de-Identification by Aggre-
+gating Donors’ Face Components. Asian Conference on Computer Vision, Nov 2014, Singapore.
+pp.1-16, 2014. <hal-01070658>
+HAL Id: hal-01070658
+https://hal.archives-ouvertes.fr/hal-01070658
+Submitted on 2 Oct 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+1d4c25f9f8f08f5a756d6f472778ab54a7e6129d,An Innovative Mean Approach for Plastic Surgery Face Recognition,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2014): 6.14 | Impact Factor (2014): 4.438
+An Innovative Mean Approach for Plastic Surgery
+Face Recognition
+Mahendra P. Randive1, Umesh W. Hore2
+Student of M.E., Department of Electronics & Telecommunication Engineering,
+P. R. Patil College of Engineering, Amravati Maharashtra – India
+Assistant Professor, Department of Electronics & Telecommunication Engineering,
+P. R. Patil College of Engineering, Amravati Maharashtra – India"
+714794c74941e45798d9c405a4fec1138cff2df3,Iris Segmentation: State of the Art and Innovative Methods,"Iris segmentation: state of the art and innovative
+methods
+Ruggero Donida Labati, Angelo Genovese, Vincenzo Piuri, and Fabio Scotti"
+71ab53b0b3635411d5985f71cc56bb1784023834,RoboCupRescue 2012 - Robot League Team,"RoboCupRescue 2012 - Robot League Team
+Hector Darmstadt (Germany)
+Thorsten Graber2, Stefan Kohlbrecher1, Johannes Meyer2, Karen Petersen1,
+Oskar von Stryk1, Uwe Klingauf2(cid:63)
+Department of Computer Science (1) and Department of Mechanical Engineering (2),
+Technische Universit¨at Darmstadt,
+Karolinenplatz 5, D-64289 Darmstadt, Germany
+E-Mail:
+Web: www.gkmm.tu-darmstadt.de/rescue"
+71b376dbfa43a62d19ae614c87dd0b5f1312c966,The temporal connection between smiles and blinks,"The Temporal Connection Between Smiles and Blinks
+Laura C. Trutoiu, Jessica K. Hodgins, and Jeffrey F. Cohn"
+713345804a00c6c0083e4155b904956bb95949da,Scalable Normalized Cut with Improved Spectral Rotation,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+71fd29c2ae9cc9e4f959268674b6b563c06d9480,End-to-end 3D shape inverse rendering of different classes of objects from a single input image,"End-to-end 3D shape inverse rendering of different classes
+of objects from a single input image
+Shima Kamyab1 and S. Zohreh Azimifar1
+Computer Science and Engineering and Information Technology, Shiraz
+university, Shiraz, Iran
+November 17, 2017"
+712609494dd049b44ebfd82698b9305ef07f027b,Biometric bits extraction through phase quantization based on feature level fusion,"Telecommun Syst (2011) 47:255–273
+DOI 10.1007/s11235-010-9317-z
+Biometric bits extraction through phase quantization based
+on feature level fusion
+Hyunggu Lee · Andrew Beng Jin Teoh · Jaihie Kim
+Published online: 4 June 2010
+© Springer Science+Business Media, LLC 2010"
+71dcf25a3ea3801f09d6cc446dbf78e22481d609,Face recognition with the continuous n-tuple classifier,"FaceRecognitionwiththecontinuous
+n-tupleclassi(cid:12)er
+S.M.Lucas
+DepartmentofElectronicSystemsEngineering
+UniversityofEssex
+ColchesterCOSQ,UK"
+7174e77f8e26aef3105996512b787b336320d46f,People Counting in High Density Crowds from Still Images,"People Counting in High Density Crowds from Still
+Images
+Ankan Bansal, and K S Venkatesh"
+71f1e72670e676b6902cce0d6fc0b4f63b46ca28,Survey paper: Face Detection and Face Recognition,"Survey paper:
+Face Detection and Face Recognition
+By Hyun Hoi James Kim
+. Introduction
+Face recognition is one of biometric methods identifying individuals by the features of face. Research in this
+rea has been conducted for more than 30 years; as a result, the current status of face recognition technology
+is well advanced. Many commercial applications of face recognition are also available such as criminal
+identification, security system, image and film processing.
+From the sequence of images captured by camera, the goal is to find best match with given image. Using a
+pre-stored image database, the face recognition system should be able to identify or verify one or more
+persons in the scene. Before face recognition is performed, the system should determine whether or not there
+is a face in a given image or given video, a sequence of images. This process is called face detection. Once a
+face is detected, face region should be isolated from the scene for the face recognition. The face detection and
+face extraction are often performed simultaneously. The overall process is depicted in Fig 1.
+Identification
+or Verification
+Feature Extraction
+Face Detection
+Face Recognition
+Input"
+71f969fdc6990b21536c5662c52110d7fdb29028,Driver Gaze Tracking and Eyes Off the Road Detection System Using a Depth Camera,"X Encontro de Alunos e Docentes do DCA/FEEC/UNICAMP (EADCA)
+X DCA/FEEC/University of Campinas (UNICAMP) Workshop (EADCA)
+Campinas, 26 e 27 de outubro de 2017
+Campinas, Brazil, October 26-27, 2017
+Driver Gaze Tracking and Eyes Off the Road Detection System
+Using a Depth Camera
+Ribeiro, Rafael F. , Costa, P. D. P (Orientador)
+Dept. of Computer Engineering and Industrial Automation (DCA)
+School of Electrical and Computer Engineering (FEEC)
+University of Campinas (Unicamp)
+Postal Code 6101, 13083-970 – Campinas, SP, Brazil"
+71c549df77b0fc2ebe0dc20d39d0a629a563bd7a,Texture Classification based on Local Features Using Dual Neighborhood Approach,"I.J. Image, Graphics and Signal Processing, 2017, 9, 59-67
+Published Online September 2017 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijigsp.2017.09.07
+Texture Classification based on Local Features
+Using Dual Neighborhood Approach
+Associate Professor, Dept. of C.S.E, Sri Vasavi Institute of Engineering & Technology, pedana, Andhrapradesh, India
+M. Srinivasa Rao
+Email:
+V.Vijaya Kumar
+Professor, Anurag Group of Institutions (Autonomous), Hyderabad,Telanagana, India
+Email:
+MHM KrishnaPrasad
+Professor of the Department of Computer Science and Engineering, University College of Engineering, Kakinada
+(Autonomous), JNTUK, Andhra Pradesh, India
+Email:
+Received: 11 March 2017; Accepted: 05 July 2017; Published: 08 September 2017"
+71406b7358812400d0626e8d62e7eb38cea99bbe,On Improving Face Detection Performance by Modelling Contextual Information,"ON IMPROVING FACE DETECTION
+PERFORMANCE BY MODELLING
+CONTEXTUAL INFORMATION
+Cosmin Atanasoaei Chris McCool
+Sébastien Marcel
+Idiap-RR-43-2010
+DECEMBER 2010
+Centre du Parc, Rue Marconi 19, P.O. Box 592, CH - 1920 Martigny
+T +41 27 721 77 11 F +41 27 721 77 12 www.idiap.ch"
+71403805e67eeb6ec336e0cb83646fdb7c819757,Visual Strategies for Sparse Spike Coding,"Visual Strategies for Sparse Spike Coding
+Laurent Perrinet
+Manuel Samuelides
+ONERA/DTIM,
+, av. Belin,
+1055 Toulouse, France"
+714d487571ca0d676bad75c8fa622d6f50df953b,eBear: An expressive Bear-Like robot,"eBear: An Expressive Bear-Like Robot
+Xiao Zhang, Ali Mollahosseini, Amir H. Kargar B., Evan Boucher,
+Richard M. Voyles, Rodney Nielsen and Mohammd H. Mahoor"
+710ce8cf25f31df8547b888519b414187e989257,Amygdala activation predicts gaze toward fearful eyes.,"The Journal of Neuroscience, July 15, 2009 • 29(28):9123–9126 • 9123
+Brief Communications
+Amygdala Activation Predicts Gaze toward Fearful Eyes
+Matthias Gamer and Christian Bu¨chel
+Department of Systems Neuroscience, University Medical Center Hamburg-Eppendorf, D-20246 Hamburg, Germany
+The human amygdala can be robustly activated by presenting fearful faces, and it has been speculated that this activation has functional
+relevance for redirecting the gaze toward the eye region. To clarify this relationship between amygdala activation and gaze-orienting behavior,
+functional magnetic resonance imaging data and eye movements were simultaneously acquired in the current study during the evaluation of
+facial expressions. Fearful, angry, happy, and neutral faces were briefly presented to healthy volunteers in an event-related manner. We con-
+trolled for the initial fixation by unpredictably shifting the faces downward or upward on each trial, such that the eyes or the mouth were
+presentedatfixation.Acrossemotionalexpressions,participantsshowedabiastoshifttheirgazetowardtheeyes,butthemagnitudeofthiseffect
+followed the distribution of diagnostically relevant regions in the face. Amygdala activity was specifically enhanced for fearful faces with the
+mouth aligned to fixation, and this differential activation predicted gazing behavior preferentially targeting the eye region. These results reveal
+direct role of the amygdala in reflexive gaze initiation toward fearfully widened eyes. They mirror deficits observed in patients with amygdala
+lesions and open a window for future studies on patients with autism spectrum disorder, in which deficits in emotion recognition, probably
+related to atypical gaze patterns and abnormal amygdala activation, have been observed.
+Introduction
+The human amygdala is known to be robustly activated by the
+presentation of fearful faces (Morris et al., 1996; Hariri et al.,
+002; Gla¨scher et al., 2004; Reinders et al., 2005), which seems to"
+7128f1239cbd1007ef19d8fd8cdab083d33a6984,"Aligned to the Object, not to the Image: A Unified Pose-aligned Representation for Fine-grained Recognition","Aligned to the Object, not to the Image:
+A Unified Pose-aligned Representation for Fine-grained Recognition
+Pei Guo, Ryan Farrell
+Computer Science Department
+Brigham Young University"
+710011644006c18291ad512456b7580095d628a2,Learning Residual Images for Face Attribute Manipulation,"Learning Residual Images for Face Attribute Manipulation
+Wei Shen
+Rujie Liu
+Fujitsu Research & Development Center, Beijing, China.
+{shenwei,"
+71529e3e51f2967e338124652e93a3d34eb6c5e1,Deep triplet-group network by exploiting symmetric and asymmetric information for person reidentification,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 9/6/2018
+Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+Deeptriplet-groupnetworkbyexploitingsymmetricandasymmetricinformationforpersonreidentificationBenzhiYuNingXuBenzhiYu,NingXu,“Deeptriplet-groupnetworkbyexploitingsymmetricandasymmetricinformationforpersonreidentification,”J.Electron.Imaging27(3),033033(2018),doi:10.1117/1.JEI.27.3.033033."
+714947e4d7f79f753c5c44eac701185e37086276,An Exponential Representation in the API Algorithm for Hidden Markov Models Training,"An Exponential Representation in the API
+Algorithm for Hidden Markov Models Training
+S´ebastien Aupetit1, Nicolas Monmarch´e1, Mohamed Slimane1, and
+Pierre Liardet2
+Universit´e Fran¸cois-Rabelais de Tours, Laboratoire d’Informatique
+Polytech’Tours, 64, Av Jean Portalis, 37200 Tours, France
+Universit´e de Provence, CMI
+Laboratoire ATP, UMR-CNRS 6632
+9 rue F. Joliot-Curie, 13453 Marseille cedex 13, France"
+71f98c3f7a5b02ab193110d5ae9f9d48a1c5ec38,Deep Human Parsing with Active Template Regression,"Deep Human Parsing with Active Template
+Regression
+Xiaodan Liang, Si Liu, Xiaohui Shen, Jianchao Yang, Luoqi Liu, Jian Dong, Liang Lin, Shuicheng
+Yan, Senior Member, IEEE"
+71286a2b3d564daf171cdef54ff8972159152729,Combinatorial Resampling Particle Filter: An Effective and Efficient Method for Articulated Object Tracking,"Noname manuscript No.
+(will be inserted by the editor)
+Combinatorial Resampling Particle Filter: an Effective and Efficient
+Method for Articulated Object Tracking
+Christophe Gonzales · S´everine Dubuisson
+Received: date / Accepted: date"
+71d3ed17c0642234a921bb45fcadd86520794941,Learning by Tracking: Siamese CNN for Robust Target Association,"Learning by tracking: Siamese CNN for robust target association
+Laura Leal-Taix´e
+TU M¨unchen
+Munich, Germany
+Cristian Canton-Ferrer
+Microsoft
+Redmond (WA), USA
+Konrad Schindler
+ETH Zurich
+Zurich, Switzerland"
+71766bf224d5c74a0be6996b38d8885c2eed5a2c,Fooling Vision and Language Models Despite Localization and Attention Mechanism,
+71d8fae870ea78a89e231247afb3259267e09799,Probabilistic multi-class segmentation for the Amazon Picking Challenge,"Probabilistic Multi-Class Segmentation
+for the Amazon Picking Challenge
+Rico Jonschkowski
+Clemens Eppner∗
+Sebastian H¨ofer∗
+Roberto Mart´ın-Mart´ın∗ Oliver Brock"
+71dcbca34d71bda0bc41c33c04d2c1a740274feb,An Innovative Mean Approach for Plastic Surgery Face Recognition,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2014): 6.14 | Impact Factor (2014): 4.438
+An Innovative Mean Approach for Plastic Surgery
+Face Recognition
+Mahendra P. Randive1, Umesh W. Hore2
+Student of M.E., Department of Electronics & Telecommunication Engineering,
+P. R. Patil College of Engineering, Amravati Maharashtra – India
+Assistant Professor, Department of Electronics & Telecommunication Engineering,
+P. R. Patil College of Engineering, Amravati Maharashtra – India"
+7189d5584416ef2a39d6ab16929dfecdddc10081,A Review of Face Sketch Recognition Systems,"Journal of Theoretical and Applied Information Technology
+20th November 2015. Vol.81. No.2
+© 2005 - 2015 JATIT & LLS. All rights reserved.
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+A REVIEW OF FACE SKETCH RECOGNITION SYSTEMS
+SALAH EDDINE LAHLALI, 2ABDELALIM SADIQ, 3 SAMIR MBARKI
+23Department of Computing, Faculty of sciences, IbnTofail University, Kenitra, Morocco
+E-mail:"
+711bb5f63139ee7a9b9aef21533f959671a7d80e,Objects extraction and recognition for camera-based interaction : heuristic and statistical approaches,"Helsinki University of Technology Laboratory of Computational Engineering Publications
+Teknillisen korkeakoulun Laskennallisen tekniikan laboratorion julkaisuja
+Espoo 2007
+REPORT B68
+OBJECTS EXTRACTION AND RECOGNITION FOR
+CAMERA-BASED INTERACTION: HEURISTIC AND
+STATISTICAL APPROACHES
+Hao Wang
+TEKNILLINEN KORKEAKOULU
+TEKNILLINEN KORKEAKOULU
+TEKNISKA HÖGSKOLAN
+TEKNISKA HÖGSKOLAN
+HELSINKI UNIVERSITY OF TECHNOLOGY
+HELSINKI UNIVERSITY OF TECHNOLOGY
+TECHNISCHE UNIVERSITÄT HELSINKI
+TECHNISCHE UNIVERSITÄT HELSINKI
+UNIVERSITE DE TECHNOLOGIE D'HELSINKI
+UNIVERSITE DE TECHNOLOGIE D'HELSINKI"
+7173871866fc7e555e9123d1d7133d20577054e8,Simultaneous Adversarial Training - Learn from Others Mistakes,"Simultaneous Adversarial Training - Learn from
+Others’ Mistakes
+Zukang Liao
+Lite-On Singapore Pte. Ltd, 2Imperial College London"
+71edcfe5e3a4e1678698a0659a7e51555291d242,Who's that Actor? Automatic Labelling of Actors in TV Series Starting from IMDB Images,"Who’s that Actor? Automatic Labelling of
+Actors in TV series starting from IMDB Images
+Rahaf Aljundi(cid:63), Punarjay Chakravarty(cid:63) and Tinne Tuytelaars
+KU Leuven, ESAT-PSI, iMinds, Belgium"
+715216a92c338a3c35319026d38ed0da0c57d013,Integrated Pedestrian and Direction Classification Using a Random Decision Forest,"Integrated Pedestrian and Direction Classification
+using a Random Decision Forest
+Junli Tao and Reinhard Klette
+University of Auckland, Auckland, New Zealand"
+711801297f23df9ac8ca1c2d3c9d7dfa2ed12043,Enhancing Energy Efficiency of Multimedia Applications in Heterogeneous Mobile Multi-Core Processors,"Contention-Aware Fair Scheduling for
+Asymmetric Single-ISA Multicore Systems
+Adrian Garcia-Garcia , Juan Carlos Saez , and Manuel Prieto-Matias"
+76ec5c774bb3fd04f9e68864a411286536a544c5,Latent Constraints: Learning to Generate Conditionally from Unconditional Generative Models,"LATENT CONSTRAINTS:
+LEARNING TO GENERATE CONDITIONALLY FROM
+UNCONDITIONAL GENERATIVE MODELS
+Jesse Engel
+Google Brain
+San Francisco, CA, USA
+Matthew D. Hoffman
+Google Inc.
+San Francisco, CA, USA
+Adam Roberts
+Google Brain
+San Francisco, CA, USA"
+7608953ef5c7a882bd2e7e7053a600e543748233,Robust 3D Face Recognition by Local Shape Difference Boosting,"Robust 3D Face Recognition
+y Local Shape Difference Boosting
+Yueming Wang, Jianzhuang Liu, Senior Member, IEEE, and Xiaoou Tang, Fellow, IEEE"
+76ff6a68d7a8dcc12b6ba68e914294f6720a466d,The red one!: On learning to refer to things based on discriminative properties,"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 213–218,
+Berlin, Germany, August 7-12, 2016. c(cid:13)2016 Association for Computational Linguistics"
+76fd801981fd69ff1b18319c450cb80c4bc78959,Alignment of Eye Movements and Spoken Language for Semantic Image Understanding,"Proceedings of the 11th International Conference on Computational Semantics, pages 76–81,
+London, UK, April 15-17 2015. c(cid:13)2015 Association for Computational Linguistics"
+76dc11b2f141314343d1601635f721fdeef86fdb,Weighted Decoding ECOC for Facial Action Unit Classification,"Weighted Decoding ECOC for Facial
+Action Unit Classification
+Terry Windeatt"
+76673de6d81bedd6b6be68953858c5f1aa467e61,Discovering a Lexicon of Parts and Attributes,"Discovering a Lexicon of Parts and Attributes
+Subhransu Maji
+Toyota Technological Institute at Chicago,
+Chicago, IL 60637, USA"
+76f73c884e4437a22afcba60193bbd7f35e64aaf,Title of dissertation : RESOURCE ALLOCATION IN COMPUTER VISION,
+768cb0e32de3f1b5aebe04448aaec4c25586680c,Boosting Image Captioning with Attributes,"Under review as a conference paper at ICLR 2017
+BOOSTING IMAGE CAPTIONING WITH ATTRIBUTES
+Ting Yao, Yingwei Pan, Yehao Li, Zhaofan Qiu, Tao Mei
+Microsoft Research Asia
+{tiyao, v-yipan, v-yehl, v-zhqiu,"
+76cd5e43df44e389483f23cb578a9015d1483d70,Face Verification from Depth using Privileged Information,"BORGHI ET AL.: FACE VERIFICATION FROM DEPTH
+Face Verification from Depth using
+Privileged Information
+Department of Engineering
+""Enzo Ferrari""
+University of Modena and Reggio
+Emilia
+Modena, Italy
+Guido Borghi
+Stefano Pini
+Filippo Grazioli
+Roberto Vezzani
+Rita Cucchiara"
+76b2732a8684babdfd95c655b2e1a1b79c3aeb9b,Face detection from few training examples,"978-1-4244-1764-3/08/$25.00 ©2008 IEEE
+ICIP 2008
+Authorized licensed use limited to: UNSW Library. Downloaded on June 12, 2009 at 01:20 from IEEE Xplore. Restrictions apply."
+76c018c6dfc81f61c3912c5ed442d9a72f64e467,Graphical Processing Unit Assisted Image Processing for Accelerated Eye Tracking,"Graphical Processing Unit Assisted Image Processing for
+Accelerated Eye Tracking
+Dissertation submitted by
+Jean-Pierre Louis du Plessis
+Student Number: 2006033415
+to the
+Department of Computer Science and Informatics
+Faculty of Natural and Agricultural Sciences
+University of the Free State, South Africa
+Submitted in fulfilment of the requirements of the degree
+Magister Scientiae
+February 2015
+Study Leader: Prof P.J. Blignaut"
+76b11c281ac47fe6d95e124673a408ee9eb568e3,Real-time Multi View Face Detection and Pose Estimation Aishwarya,"International Journal of Latest Engineering and Management Research (IJLEMR)
+ISSN: 2455-4847
+www.ijlemr.com || Volume 02 - Issue 03 || March 2017 || PP. 59-71
+REAL-TIME MULTI VIEW FACE DETECTION AND POSE
+ESTIMATION
+AISHWARYA.S1 , RATHNAPRIYA.K1, SUKANYA SARGUNAR.V2
+U. G STUDENTS, DEPT OF CSE, ALPHA COLLEGE OF ENGINEERING, CHENNAI,
+ASST PROF.DEPARTMENT OF CSE, ALPHA COLLEGE OF ENGINEERING, CHENNAI"
+76bfa74a6311db5d84bad2a7a941f30dd750d01c,Evidence That Emotion Mediates Social Attention in Rhesus Macaques,"Evidence That Emotion Mediates Social Attention in
+Rhesus Macaques
+Emily J. Bethell1*, Amanda Holmes2, Ann MacLarnon1, Stuart Semple1
+Centre for Research in Evolutionary and Environmental Anthropology, University of Roehampton, London, United Kingdom, 2 Department of Psychology, University of
+Roehampton, London, United Kingdom"
+7689d23a22682c92bdf9a1df975fa2cdd24f1b87,MMD with Kernel Learning In practice we use finite samples from distributions to estimate,"MMD GAN: Towards Deeper Understanding of Moment Matching
+Network
+Chun-Liang Li
+Committee: Barnab´as P´oczos and Pradeep Ravikumar
+Tuesday 28th November, 2017"
+76ebe6d24ee69e3f853740fb75085a2118d40d51,ILLUMINANCE FLOW ( met een samenvatting in het Nederlands ) PROEFSCHRIFT ter verkrijging van de graad van doctor,"ILLUMINANCE FLOW
+(met een samenvatting in het Nederlands)
+PROEFSCHRIFT
+ter verkrijging van de graad van doctor aan de Universiteit Utrecht op
+gezag van de rector magnificus, prof.dr. J.C. Stoof, ingevolge het besluit
+van het college voor promoties
+in het openbaar te verdedigen op vrijdag 15 januari 2010
+des middags te 4.15 uur
+(Dan) Stefan Mikael Karlsson
+geboren op 3 september 1978 te Stafsinge, Zweden"
+76f3450e50c20fca00dd6319df38503c5d7ebad0,THÈSE DE DOCTORAT présentée par OLIVIER DUCHENNE pour obtenir le grade de DOCTEUR DE L ’ ÉCOLE NORMALE SUPÉRIEURE,"THÈSEDEDOCTORATprésentéeparOLIVIERDUCHENNEpourobtenirlegradedeDOCTEURDEL’ÉCOLENORMALESUPÉRIEUREDomaine:MATHÉMATIQUESAPPLIQUÉESSujetdelathèse:Alignementélastiqued’imagespourlareconnaissanced’objet—Non-rigidimagealignmentforobjectrecognitionThèseprésentéeetsoutenueàl’ENSUlmle29Novembre2012devantlejurycomposéde:JeanPonceProfesseur,DirecteurduDI,ENSUlmDirecteurdethèsePedroFelzenszwalbProfesseur,BrownUniversityRapporteurMartialHebertProfesseur,CarnegieMellonUniversityRapporteurFrancisBachDirecteurderecherche,ENSUlmÉxaminateurJitendraMalikProfesseur,UniversityofBerkeleyÉxaminateurCordeliaSchmidProfesseur,INPGrenobleÉxaminateurAndrewZissermanProfesseur,UniversityofOxfordÉxaminateurThèsepréparéeauseindel’équipeWILLOWdudépartementd’informatiquedel’ÉcoleNormaleSupérieure,Ulm.(INRIA/ENS/CNRSUMR8548)."
+76d9f5623d3a478677d3f519c6e061813e58e833,Fast Algorithms for the Generalized Foley-Sammon Discriminant Analysis,"FAST ALGORITHMS FOR THE GENERALIZED FOLEY-SAMMON
+DISCRIMINANT ANALYSIS
+LEI-HONG ZHANG∗, LI-ZHI LIAO† , AND MICHAEL K. NG‡"
+76e2d7621019bd45a5851740bd2742afdcf62837,Real-Time Detection and Measurement of Eye Features from Color Images,"Article
+Real-Time Detection and Measurement of Eye
+Features from Color Images
+Diana Borza 1, Adrian Sergiu Darabant 2 and Radu Danescu 1,*
+Computer Science Department, Technical University of Cluj Napoca, 28 Memorandumului Street,
+Cluj Napoca 400114, Romania;
+Computer Science Department, Babes Bolyai University, 58-60 Teodor Mihali, C333, Cluj Napoca 400591,
+Romania;
+* Correspondence: Tel.: +40-740-502-223
+Academic Editors: Changzhi Li, Roberto Gómez-García and José-María Muñoz-Ferreras
+Received: 28 April 2016; Accepted: 14 July 2016; Published: 16 July 2016"
+765b2cb322646c52e20417c3b44b81f89860ff71,PoseShop: Human Image Database Construction and Personalized Content Synthesis,"PoseShop: Human Image Database
+Construction and Personalized
+Content Synthesis
+Tao Chen, Ping Tan, Member, IEEE, Li-Qian Ma, Ming-Ming Cheng, Member, IEEE,
+Ariel Shamir, and Shi-Min Hu, Member, IEEE"
+763158cef9d1e4041f24fce4cf9d6a3b7a7f08ff,Hierarchical Modeling and Applications to Recognition Tasks,"Hierarchical Modeling and
+Applications to Recognition Tasks
+Thesis submitted for the degree of
+”Doctor of Philosophy”
+Alon Zweig
+Submitted to the Senate of the Hebrew University
+August / 2013"
+7606a74de57f67257c77a8bb0295ff4593566040,Content-based Image Retrieval Using Constrained Independent Component Analysis : Facial Image Retrieval Based on Compound Queries,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,800
+16,000
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact
+Numbers displayed above are based on latest data collected."
+76cb2ecc96f02b1d8a7a0d1681fbb55367a4b765,Learning Object States from Videos,"Learning Object States from Videos
+Liang-Kang Huang
+Katerina Fragkiadaki"
+7671234c3726fda01b2842f85327624f0dda8ead,The data deluge: Challenges and opportunities of unlimited data in statistical signal processing,"978-1-4244-2354-5/09/$25.00 ©2009 IEEE
+ICASSP 2009"
+760ba44792a383acd9ca8bef45765d11c55b48d4,Class-specific classifier: avoiding the curse of dimensionality,"INTRODUCTION AND BACKGROUND
+The purpose of this article is to introduce the
+reader to the basic principles of classification with
+lass-specific features. It is written both for readers
+interested in only the basic concepts as well as those
+interested in getting started in applying the method.
+For in-depth coverage, the reader is referred to a more
+detailed article [l].
+Class-Specific Classifier:
+Avoiding the Curse of
+Dimensionality
+PAUL M. BAGGENSTOSS, Member. lEEE
+US. Naval Undersea Warfare Center
+This article describes a new probabilistic method called the
+“class-specific method” (CSM). CSM has the potential to avoid
+the “curse of dimensionality” which plagues most clmiiiers
+which attempt to determine the decision boundaries in a
+highdimensional featue space. In contrast, in CSM, it is possible
+to build classifiers without a ” n o n feature space. Separate
+Law-dimensional features seta may be de6ned for each class, while"
+76d8b370d0a8fc63ead6ba657dd438d7155d659f,Modular Sensor Fusion for Semantic Segmentation,"(cid:13)2018 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any
+urrent or future media, including reprinting/republishing this material for advertising or promotional purposes, creating
+new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in
+other works.
+Please cite this paper as:
+title
+uthor
+ooktitle = ""2018 {IEEE/RSJ} International Conference on Intelligent Robots
+= ""Modular Sensor Fusion for Semantic Segmentation"",
+= ""Blum, Hermann and Gawel, Abel and Siegwart, Roland and Cadena, Cesar"",
+nd Systems ({IROS})"",
+= 2018;"
+766728bac030b169fcbc2fbafe24c6e22a58ef3c,A survey of deep facial landmark detection,"A survey of deep facial landmark detection
+Yongzhe Yan1,2
+Xavier Naturel2
+Christophe Garcia3
+Thierry Chateau1
+Christophe Blanc1
+Stefan Duffner3
+Université Clermont Auvergne, France
+Wisimage, France
+Université de Lyon, CNRS, INSA Lyon, LIRIS, UMR5205, Lyon, France
+Résumé
+La détection de landmarks joue un rôle crucial dans de
+nombreuses applications d’analyse du visage comme la
+reconnaissance de l’identité, des expressions, l’animation
+d’avatar, la reconstruction 3D du visage, ainsi que pour
+les applications de réalité augmentée comme la pose de
+masque ou de maquillage virtuel. L’avènement de l’ap-
+prentissage profond a permis des progrès très importants
+dans ce domaine, y compris sur les corpus non contraints
+(in-the-wild). Nous présentons ici un état de l’art cen-"
+7697295ee6fc817296bed816ac5cae97644c2d5b,Detecting and Recognizing Human-Object Interactions,"Detecting and Recognizing Human-Object Interactions
+Georgia Gkioxari Ross Girshick
+Piotr Doll´ar Kaiming He
+Facebook AI Research (FAIR)"
+76a0016ce19363ef8f7ba5c3964c4a0c29b608ca,ModaNet: A Large-scale Street Fashion Dataset with Polygon Annotations,"ModaNet: A Large-scale Street Fashion Dataset with Polygon
+Annotations
+Shuai Zheng
+eBay Inc.
+San Jose, California
+M. Hadi Kiapour
+eBay Inc.
+San Francisco, California
+Fan Yang
+eBay Inc.
+San Jose, California
+Robinson Piramuthu
+eBay Inc.
+San Francisco, California"
+7636f94ddce79f3dea375c56fbdaaa0f4d9854aa,Robust Facial Expression Recognition Using a Smartphone Working against Illumination Variation,"Appl. Math. Inf. Sci. 6 No. 2S pp. 403S-408S (2012)
+An International Journal
+© 2012 NSP
+Applied Mathematics & Information Sciences
+Robust Facial Expression Recognition Using
+Smartphone Working against Illumination Variation
+2012 NSP
+Natural Sciences Publishing Cor.
+Kyoung-Sic Cho1, In-Ho Choi1 and Yong-Guk Kim1
+Department of Computer Engineering, Sejong University, 98 Kunja-Dong, Kwangjin-Gu, Seoul, Korea
+Corresponding author: Email:
+Received June 22, 2010; Revised March 21, 2011; Accepted 11 June 2011
+Published online: 1 January 2012"
+7638cb16631fbcdf621aaf392fec5108e6fa9f47,On Nonrigid Shape Similarity and Correspondence,"Alon Shtern and Ron Kimmel
+November 25, 2013
+trinsically symmetric halves of a human face were found by mapping the shape (left) to itself.
+Textures from two faces (middle) were transferred to each half (right)."
+1ca9ab2c1b5e8521cba20f78dcf1895b3e1c36ac,"Explorer "" Here ' s looking at you , kid","""Here's looking at you, kid""
+Citation for published version:
+Marin-Jimenez, M, Zisserman, A & Ferrari, V 2011, ""Here's looking at you, kid"": Detecting people looking at
+each other in videos. in Proceedings of the British Machine Vision Conference (BMVC): Dundee, September
+011. BMVA Press, pp. 22.1-22.12. DOI: 10.5244/C.25.22
+Digital Object Identifier (DOI):
+0.5244/C.25.22
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Peer reviewed version
+Published In:
+Proceedings of the British Machine Vision Conference (BMVC)
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please"
+1c80bc91c74d4984e6422e7b0856cf3cf28df1fb,Hierarchical Adaptive Structural SVM for Domain Adaptation,"Noname manuscript No.
+(will be inserted by the editor)
+Hierarchical Adaptive Structural SVM for Domain Adaptation
+Jiaolong Xu · Sebastian Ramos · David V´azquez · Antonio M. L´opez
+Received: date / Accepted: date"
+1ce3a91214c94ed05f15343490981ec7cc810016,Exploring photobios,"Exploring Photobios
+Ira Kemelmacher-Shlizerman1
+Eli Shechtman2
+Rahul Garg1,3
+Steven M. Seitz1,3
+University of Washington∗
+Adobe Systems†
+Google Inc."
+1cd9dba357e05c9be0407dc5d477fd528cfeb79b,Model-driven Simulations for Deep Convolutional Neural Networks,"Model-driven Simulations for Deep Convolutional
+Neural Networks
+V S R Veeravasarapu1, Constantin Rothkopf2, Visvanathan Ramesh1
+Center for Cognition and Computing, Goethe University, Frankfurt.
+Cognitive Science Center, Technical University, Darmstadt."
+1cb68fa98a0d9871a394cd0035488df167b9c2cf,RedNet: Residual Encoder-Decoder Network for indoor RGB-D Semantic Segmentation,"RedNet: Residual Encoder-Decoder Network for
+indoor RGB-D Semantic Segmentation
+Jindong Jiang, Lunan Zheng, Fei Luo, and Zhijun Zhang
+The School of Automation Science and Engineering, South China University of
+Technology, Guangzhou 510640, China"
+1cf6bc0866226c1f8e282463adc8b75d92fba9bb,"Ask, Attend and Answer: Exploring Question-Guided Spatial Attention for Visual Question Answering","Ask, Attend and Answer: Exploring Question-Guided Spatial Attention for
+Visual Question Answering
+Huijuan Xu
+UMass Lowell
+Kate Saenko
+UMass Lowell"
+1c9333bcf523388d75f852e0689b0e7f5a04faa4,Person Part Segmentation based on Weak Supervision,"JIANG, CHI: PERSON PART SEGMENTATION BASED ON WEAK SUPERVISION 1
+Person Part Segmentation based on Weak
+Supervision
+Yalong Jiang1 1Department of Electronic and Information
+Engineering
+Zheru Chi1 The Hong Kong Polytechnic University, HK"
+1c26e415c7eae2f3b0f49e0519f0d985ec661c63,Intersection of Longest Paths in Graph Theory and Predicting Performance in Facial Recognition,"Georgia State University
+ScholarWorks Georgia State University
+Mathematics Dissertations
+Department of Mathematics and Statistics
+-6-2017
+Intersection of Longest Paths in Graph Theory and
+Predicting Performance in Facial Recognition
+Amy Yates
+Follow this and additional works at: http://scholarworks.gsu.edu/math_diss
+Recommended Citation
+Yates, Amy, ""Intersection of Longest Paths in Graph Theory and Predicting Performance in Facial Recognition."" Dissertation, Georgia
+State University, 2017.
+http://scholarworks.gsu.edu/math_diss/34
+This Dissertation is brought to you for free and open access by the Department of Mathematics and Statistics at ScholarWorks Georgia State
+University. It has been accepted for inclusion in Mathematics Dissertations by an authorized administrator of ScholarWorks Georgia State
+University. For more information, please contact"
+1cb95f013ec3e78acdda6ac6cfdb362ae6a5ceac,Nonnegative matrix factorization for segmentation analysis,"Nonnegative matrix factorization for
+segmentation analysis
+Roman Sandler
+Technion - Computer Science Department - Ph.D. Thesis PHD-2010-09 - 2010"
+1cfe3533759bf95be1fce8ce1d1aa2aeb5bfb4cc,Recognition of Facial Gestures Based on Support Vector Machines,"Recognition of Facial Gestures based on Support
+Vector Machines
+Attila Fazekas and Istv(cid:19)an S(cid:19)anta
+Faculty of Informatics, University of Debrecen, Hungary
+H-4010 Debrecen P.O.Box 12."
+1ce4587e27e2cf8ba5947d3be7a37b4d1317fbee,Deep fusion of visual signatures for client-server facial analysis,"Deep fusion of visual signatures
+for client-server facial analysis
+Binod Bhattarai
+Normandie Univ, UNICAEN,
+ENSICAEN, CNRS, GREYC
+Gaurav Sharma
+Computer Sc. & Engg.
+IIT Kanpur, India
+Frederic Jurie
+Normandie Univ, UNICAEN,
+ENSICAEN, CNRS, GREYC
+Facial analysis is a key technology for enabling human-
+machine interaction.
+In this context, we present a client-
+server framework, where a client transmits the signature of
+face to be analyzed to the server, and, in return, the server
+sends back various information describing the face e.g. is the
+person male or female, is she/he bald, does he have a mus-
+tache, etc. We assume that a client can compute one (or a
+ombination) of visual features; from very simple and ef‌f‌i-"
+1cd0bc067e66bc1f66a73b401a4a470e43e4bb9e,Houdini: Fooling Deep Structured Visual and Speech Recognition Models with Adversarial Examples,"Houdini: Fooling Deep Structured Visual and Speech
+Recognition Models with Adversarial Examples
+Moustapha Cisse
+Facebook AI Research
+Natalia Neverova*
+Facebook AI Research"
+1cee733ee31e245dac4655a870fd9226163a52b5,Bidirectional Beam Search: Forward-Backward Inference in Neural Sequence Models for Fill-in-the-Blank Image Captioning,"Bidirectional Beam Search: Forward-Backward Inference in
+Neural Sequence Models for Fill-in-the-Blank Image Captioning
+Qing Sun
+Virginia Tech
+Stefan Lee
+Virginia Tech
+Dhruv Batra
+Georgia Tech"
+1cd584f519d9cd730aeef1b1d87f7e2e82b4de59,A fully automatic face recognition system using a combined audio - visual approach ∗,"A fully automatic face recognition system using a combined
+udio-visual approach ∗
+Alberto Albiol†, Luis Torres†, and Edward J. Delp? †
+Communications Department
+Technical University of Valencia, Valencia, Spain
+Department of Signal Theory & Communications
+Technical University of Catalonia, Barcelona, Spain
+?School of Electrical and Computer Engineering
+Purdue University West Lafayette, IN 47907-1285
+Corresponding Author:
+Dr. Alberto Albiol
+Communications Department
+Technical University of Valencia, Valencia, Spain
+6022 Valencia (Spain)
+Telephone: +34 96 387 97 38
+Fax: +34 96 387 73 09
+Email:"
+1c30bb689a40a895bd089e55e0cad746e343d1e2,Learning Spatiotemporal Features with 3D Convolutional Networks,"Learning Spatiotemporal Features with 3D Convolutional Networks
+Du Tran1
+, Lubomir Bourdev1, Rob Fergus1, Lorenzo Torresani2, Manohar Paluri1
+Facebook AI Research, 2Dartmouth College"
+1c521ac6e68436f6c6aad3c0eb7ffa557fe25b0d,Modeling Image Patches with a Generic Dictionary of Mini-epitomes,"Modeling Image Patches with a Generic Dictionary of Mini-Epitomes
+George Papandreou
+TTI Chicago
+Liang-Chieh Chen
+UC Los Angeles
+Alan L. Yuille
+UC Los Angeles"
+1cc3c5f242d885738e9349a91d4beba82ae106a6,Scalable nonconvex inexact proximal splitting,"Scalable nonconvex inexact proximal splitting
+Suvrit Sra
+Max Planck Institute for Intelligent Systems
+72076 T¨ubigen, Germany"
+1cf01968594ae59d28b12c9a35fc43d944563071,Low-Level Features for Image Retrieval Based on Extraction of Directional Binary Patterns and Its Oriented Gradients Histogram,"Computer Applications: An International Journal (CAIJ), Vol.2, No.1, February 2015
+LOW-LEVEL FEATURES FOR IMAGE RETRIEVAL BASED
+ON EXTRACTION OF DIRECTIONAL BINARY PATTERNS
+AND ITS ORIENTED GRADIENTS HISTOGRAM
+Nagaraja S. and Prabhakar C.J.
+Department of P.G. Studies and Research in Computer Science
+Kuvempu University, India"
+1c3073b57000f9b6dbf1c5681c52d17c55d60fd7,Direction de thèse:,"THÈSEprésentéepourl’obtentiondutitredeDOCTEURDEL’ÉCOLENATIONALEDESPONTSETCHAUSSÉESSpécialité:InformatiqueparCharlotteGHYSAnalyse,Reconstruction3D,&AnimationduVisageAnalysis,3DReconstruction,&AnimationofFacesSoutenancele19mai2010devantlejurycomposéde:Rapporteurs:MajaPANTICDimitrisSAMARASExaminateurs:MichelBARLAUDRenaudKERIVENDirectiondethèse:NikosPARAGIOSBénédicteBASCLE"
+1cbc189a4484cd2b1371798bae2ff50c0442ce60,A Hybrid Loss for Multiclass and Structured Prediction,"IEEE TRANSACTIONS ON PATTERN ANALYSIS & MACHINE INTELLIGENCE, FINAL DRAFT, FEB. 2014
+A Hybrid Loss for Multiclass
+nd Structured Prediction
+Qinfeng Shi, Mark Reid, Tiberio Caetano, Anton van den Hengel and Zhenhua Wang"
+1cf29a0131211079fc73908ecf211ee78f090ad9,Regionlets for Generic Object Detection,"Regionlets for Generic Object Detection
+Xiaoyu Wang Ming Yang
+Shenghuo Zhu
+Yuanqing Lin
+NEC Laboratories America, Inc."
+1c1a24169be56e01b0e36e260f49025260a5c7e7,A Deep Compositional Framework for Human-like Language Acquisition in Virtual Environment,"A Deep Compositional Framework for Human-like
+Language Acquisition in Virtual Environment
+Haonan Yu, Haichao Zhang, and Wei Xu
+Baidu Research - Institue of Deep Learning
+Sunnyvale, CA 94089"
+1c93b48abdd3ef1021599095a1a5ab5e0e020dd5,A Compositional and Dynamic Model for Face Aging,"JOURNAL OF LATEX CLASS FILES, VOL. *, NO. *, JANUARY 2009
+A Compositional and Dynamic Model for Face Aging
+Jinli Suo , Song-Chun Zhu , Shiguang Shan and Xilin Chen"
+1cc0183d8fbef098d29b6b5f621745ff099f6c6c,Joint Discovery of Object States and Manipulation Actions,"Joint Discovery of Object States and Manipulation Actions
+Jean-Baptiste Alayrac∗ †
+Josef Sivic∗ † ‡
+Ivan Laptev∗ †
+Simon Lacoste-Julien§"
+1c90ad1e264c29a8d180de47373257a5f1b5aa57,Generalizing Image Captions for Image-Text Parallel Corpus,"Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 790–796,
+Sofia, Bulgaria, August 4-9 2013. c(cid:13)2013 Association for Computational Linguistics
+house being pulled by a boat.” “I saw her in the light of her reading lamp and sneaked back to her door with the camera.” “Sections of the bridge sitting in the Dyer Construction yard south of Cabelas Driver.” Circumstantial information that is not visually present Visually relevant, but with overly extraneous details Visually truthful, but for an uncommon situation Figure1:Examplesofcaptionsthatarenotreadilyapplicabletoothervisuallysimilarimages.textfromtheretrievedsamplestothequeryim-age(e.g.Farhadietal.(2010),Ordonezetal.(2011),Kuznetsovaetal.(2012)).Otherwork(e.g.FengandLapata(2010a),FengandLapata(2010b))usescomputervisiontobiassummariza-tionoftextassociatedwithimagestoproducede-scriptions.Alloftheseapproachesrelyonex-istingtextthatdescribesvisualcontent,butmanytimesexistingimagedescriptionscontainsignifi-cantamountsofextraneous,non-visual,orother-wisenon-desirablecontent.Thegoalofthispaperistodeveloptechniquestoautomaticallycleanupvisuallydescriptivetexttomakeitmoredirectlyusableforapplicationsexploitingtheconnectionbetweenimagesandlanguage.Asaconcreteexample,considerthefirstimageinFigure1.Thiscaptionwaswrittenbythephotoownerandthereforecontainsinformationrelatedtothecontextofwhenandwherethephotowastaken.Objectssuchas“lamp”,“door”,“camera”arenotvisuallypresentinthephoto.Thesecondimageshowsasimilarbutsomewhatdifferentis-sue.Itscaptiondescribesvisibleobjectssuchas“bridge”and“yard”,but“CabelasDriver”areoverlyspecificandnotvisuallydetectable.The"
+1c51aeece7a3c30302ebd83bdcaa65df0bfc48fe,Unsupervised Video Indexing based on Audiovisual Characterization of Persons. (Indexation vidéo non-supervisée basée sur la caractérisation des personnes),"Unsupervised Video Indexing based on Audiovisual
+Characterization of Persons
+Elie El Khoury
+To cite this version:
+Elie El Khoury. Unsupervised Video Indexing based on Audiovisual Characterization of Per-
+sons. Human-Computer Interaction [cs.HC]. Universit´e Paul Sabatier - Toulouse III, 2010.
+English. <tel-00515424v3>
+HAL Id: tel-00515424
+https://tel.archives-ouvertes.fr/tel-00515424v3
+Submitted on 7 Sep 2010
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,
+´emanant des ´etablissements d’enseignement et de"
+1cbf3b90065e8a410668ed914e9d03a94a4d94aa,Visual-Inertial Semantic Scene Representation,"Visual-Inertial Semantic Scene Representation
+UCLA TR CSD160005
+Stefano Soatto
+May 20, 2016"
+1c7e1248ce254b3a9a0b6fef9e37d37620fc8aa3,Dynamic Image-to-Class Warping for Occluded Face Recognition,"Dynamic Image-to-Class Warping for Occluded
+Face Recognition
+Xingjie Wei, Chang-Tsun Li, Senior Member, IEEE, Zhen Lei, Member, IEEE,
+Dong Yi, and Stan Z. Li, Fellow, IEEE"
+1cdf8790a675037579bbe2ee4f39f731f7672fae,Pivot Correlational Neural Network for Multimodal Video Categorization,"Pivot Correlational Neural Network for
+Multimodal Video Categorization
+Sunghun Kang1[0000−0003−2632−7522], Junyeong Kim1[0000−0002−7871−9627],
+Hyunsoo Choi2, Sungjin Kim2, and Chang D. Yoo1
+KAIST, Daejeon, South Korea
+{sunghun.kang, junyeong.kim,
+SAMSUNG ELECTRONICS CO.,LTD, Seoul, South Korea
+{hsu.choi,"
+1ca40e1d0ae377296ac6804c81c1e5bcbc5475c8,RVM-Based Human Action Classification in Crowd through Projection and Star Skeletonization,"Hindawi Publishing Corporation
+EURASIP Journal on Image and Video Processing
+Volume 2009, Article ID 164019, 12 pages
+doi:10.1155/2009/164019
+Research Article
+RVM-Based Human Action Classification in Crowd through
+Projection and Star Skeletonization
+B. Yogameena, S. Veeralakshmi, E. Komagal, S. Raju, and V. Abhaikumar
+Department of Electronics and Communication Engineering, Thiagarajar College of Engineering,
+Madurai 625015, Tamil Nadu, India
+Correspondence should be addressed to B. Yogameena,
+Received 1 February 2009; Revised 17 May 2009; Accepted 26 August 2009
+Recommended by Amit Roy-Chowdhury
+Detection of abnormal human actions in the crowd has become a critical problem in video surveillance applications like terrorist
+ttacks. This paper proposes a real-time video surveillance system which is capable of classifying normal and abnormal actions of
+individuals in a crowd. The abnormal actions of human such as running, jumping, waving hand, bending, walking and fighting
+with each other in a crowded environment are considered. In this paper, Relevance Vector Machine (RVM) is used to classify
+the abnormal actions of an individual in the crowd based on the results obtained from projection and skeletonization methods.
+Experimental results on benchmark datasets demonstrate that the proposed system is robust and ef‌f‌icient. A comparative study of
+lassification accuracy between Relevance Vector Machine and Support Vector Machine (SVM) classification is also presented."
+1cdff2cd2e3cf8dbeb8f0a42df0cdc77c953dc81,The Emergence of Visual Crowdsensing: Challenges and Opportunities,"The Emergence of Visual Crowdsensing:
+Challenges and Opportunities
+Bin Guo, Senior Member, IEEE, Qi Han,Member, IEEE , Huihui Chen, Longfei Shangguan, Member, IEEE,
+Zimu Zhou, Member, IEEE, and Zhiwen Yu, Senior Member, IEEE"
+1c1e4415f0acf5d536c9579117d326471f0b678b,Temporal Model Adaptation for Person Re-identification,"Temporal Model Adaptation for
+Person Re-Identification
+Niki Martinel1,3, Abir Das2,
+Christian Micheloni1, and Amit K. Roy-Chowdhury3
+University of Udine, 33100 Udine, Italy
+University of Massatchussets Lowell, 01852 Lowell, MA, USA
+University of California Riverside, 92507 Riverside, CA, USA"
+1ca155a4b65ae19ccb73df48516e4775770a382c,Action Representations in Robotics: A Taxonomy and Systematic Classification,"Action representations in robotics: A
+taxonomy and systematic classification
+Journal Title
+XX(X):1–32
+(cid:13)The Author(s) 2016
+Reprints and permission:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/ToBeAssigned
+www.sagepub.com/
+Philipp Zech, Erwan Renaudo, Simon Haller, Xiang Zhang and Justus Piater"
+1c0e8c3fb143eb5eb5af3026eae7257255fcf814,Weakly Supervised Deep Detection Networks,"GOALS
+Goal: Learn object detectors using only image-level labels
+Why weakly supervised learning?
+• annotations are costly
+• CNN training is data-hungry
+Hypothesis: Pre-trained CNNs should contain meaningful
+representations of data such as objects and object parts.
+Thus we can exploit this implicit knowledge to learn localizing
+objects.
+Classification stream
+𝑹𝟏 𝑹𝟐 𝑹𝟑 𝑹𝟒
+0.52 0.47 0.04 0.93
+horse
+person 0.48 0.53 0.96 0.07
+Normalize over classes
+Detection stream
+𝑹𝟏 𝑹𝟐 𝑹𝟑 𝑹𝟒
+horse
+0.04 0.01 0.07 0.88
+person 0.02 0.03 0.91 0.04"
+1c400dcd6c3e54498d9a7bd5aa4c456079a9d236,Sketch and Validate for Big Data Clustering,"Sketch and Validate for Big Data Clustering
+Panagiotis A. Traganitis, Konstantinos Slavakis, Senior Member, IEEE, and Georgios B. Giannakis, Fellow, IEEE"
+1c6e22516ceb5c97c3caf07a9bd5df357988ceda,Copycat CNN: Stealing Knowledge by Persuading Confession with Random Non-Labeled Data,"NetworkCNNimageslabelsFakeDatasetimages24132labelsTarget NetworkCNNimageslabelsOriginalDatasetFakeDatasetFig.1:Ontheleft,thetargetnetworkistrainedwithanoriginal(confidential)datasetandisservedpubliclyasanAPI,receivingimagesasinputandprovidingclasslabelsasoutput.Ontheright,itispresentedtheprocesstogetstolenlabelsandtocreateafakedataset:randomnaturalimagesaresenttotheAPIandthelabelsareobtained.Afterthat,thecopycatnetworkistrainedusingthisfakedataset.cloud-basedservicestocustomersallowingthemtooffertheirownmodelsasanAPI.Becauseoftheresourcesandmoneyinvestedincreatingthesemodels,itisinthebestinterestofthesecompaniestoprotectthem,i.e.,toavoidthatsomeoneelsecopythem.Someworkshavealreadyinvestigatedthepossibilityofcopyingmodelsbyqueryingthemasablack-box.In[1],forexample,theauthorsshowedhowtoperformmodelextractionattackstocopyanequivalentornear-equivalentmachinelearningmodel(decisiontree,logisticregression,SVM,andmultilayerperceptron),i.e.,onethatachievescloseto100%agreementonaninputspaceofinterest.In[2],theauthorsevaluatedtheprocessofcopyingaNaiveBayesandSVMclassifierinthecontextoftextclassification.Bothworksfocusedongeneralclassifiersandnotondeepneuralnetworksthatrequirelargeamountsofdatatobetrainedleavingthequestionofwhetherdeepmodelscanbeeasilycopied.Althoughthesecondusesdeeplearningtostealtheclassifiers,itdoesnottrytouseDNNstostealfromdeepmodels.Additionally,theseworksfocusoncopyingbyqueryingwithproblemdomaindata.Inrecentyears,researchershavebeenexploringsomeintriguingpropertiesofdeepneuralnetworks[3],[4].More©2018IEEE.Personaluseofthismaterialispermitted.PermissionfromIEEEmustbeobtainedforallotheruses,inanycurrentorfuturemedia,includingreprinting/republishingthismaterialforadvertisingorpromotionalpurposes,creatingnewcollectiveworks,forresaleorredistributiontoserversorlists,orreuseofanycopyrightedcomponentofthisworkinotherworks."
+82d5656c74362d6c5c5fd889fc48f7816bbb033a,Contemplating Visual Emotions: Understanding and Overcoming Dataset Bias,"Contemplating Visual Emotions: Understanding
+nd Overcoming Dataset Bias
+Rameswar Panda1, Jianming Zhang2, Haoxiang Li3, Joon-Young Lee2, Xin
+Lu2, and Amit K. Roy-Chowdhury1
+Department of ECE, UC Riverside.
+Adobe Research.
+Aibee."
+825f56ff489cdd3bcc41e76426d0070754eab1a8,Making Convolutional Networks Recurrent for Visual Sequence Learning,"Making Convolutional Networks Recurrent for Visual Sequence Learning
+Xiaodong Yang Pavlo Molchanov Jan Kautz
+NVIDIA"
+82224858677af47b8c836df701eeea8fffaec924,Paper On Person Identification System Using Multi - Model Biometric Based On Face,"International Journal of Science, Engineering and Technology Research (IJSETR)
+Volume 6, Issue 4, April 2017, ISSN: 2278 -7798
+Review Paper On Person Identification System
+Using Multi-Model Biometric Based On Face
+CHETAN JAMDAR1, AMOL BOKE2
+Chetan Jamdar, M. Tech Student, Dept Of ECE, G.H. Raisoni Academy Of Engg. And Technology, Nagpur,
+Maharashtra, India.
+Guide details: Amol Boke, Assistant Professor, Dept Of ECE, G.H. Raisoni Academy Of Engg. And Technology,
+Nagpur, Maharashtra, India"
+82d2af2ffa106160a183371946e466021876870d,A Novel Space-Time Representation on the Positive Semidefinite Con for Facial Expression Recognition,"A Novel Space-Time Representation on the Positive Semidefinite Cone
+for Facial Expression Recognition
+Anis Kacem1, Mohamed Daoudi1, Boulbaba Ben Amor1, and Juan Carlos Alvarez-Paiva2
+IMT Lille Douai, Univ. Lille, CNRS, UMR 9189 – CRIStAL –
+Centre de Recherche en Informatique Signal et Automatique de Lille, F-59000 Lille, France
+Univ. Lille, CNRS, UMR 8524, Laboratoire Paul Painlev´e, F-59000 Lille, France."
+82a2a523c4488c34b486c920046f4ebbf8ea828e,Vision-Based System for Human Detection and Tracking in Indoor Environment,"Author manuscript, published in ""International Journal of Social Robotics 2, 1 (2010) 41-52""
+DOI : 10.1007/s12369-009-0040-4"
+82eff71af91df2ca18aebb7f1153a7aed16ae7cc,MSU-AVIS dataset : Fusing Face and Voice Modalities for Biometric Recognition in Indoor Surveillance Videos,"MSU-AVIS dataset:
+Fusing Face and Voice Modalities for Biometric
+Recognition in Indoor Surveillance Videos
+Anurag Chowdhury*, Yousef Atoum+, Luan Tran*, Xiaoming Liu*, Arun Ross*
+*Michigan State University, USA
++Yarmouk University, Jordan"
+82d3dc1dd35e7d2d13bc43614b575dce61b0aba3,Head Pose Estimation from Passive Stereo Images,"Head Pose Estimation
+from Passive Stereo Images
+M. D. Breitenstein1, J. Jensen2, C. Høilund2, T. B. Moeslund2, L. Van Gool1
+ETH Zurich, Switzerland1 Aalborg University, Denmark2"
+820b1349751d7e932b74c3de94b96557fa2534cf,BAM! The Behance Artistic Media Dataset for Recognition Beyond Photography,"BAM! The Behance Artistic Media Dataset for Recognition Beyond Photography
+Michael J. Wilber1,2
+Chen Fang1
+John Collomosse1
+Adobe Research
+Aaron Hertzmann1
+Hailin Jin1
+Serge Belongie2
+Cornell Tech"
+82ff25b6e7749e0210b2f8d5a0666f3499745154,Adaptive Multiple Kernels with SIR-Particle Filter Based Multi Human Tracking for Occluded Environment,"International Journal of Computational Intelligence and Informatics, Vol. 3: No. 4, January - March 2014
+Adaptive Multiple Kernels with SIR-Particle Filter
+Based Multi Human Tracking for Occluded
+Environment
+T Karpagavalli
+Department of Electronics and Communication
+KLN College of Information Technology
+Sivagangai, Tamilnadu, India
+S Appavu alias Balamurugan
+Department of Information Technology
+KLN College of Information Technology
+Sivagangai, Tamilnadu, India"
+82c303cf4852ad18116a2eea31e2291325bc19c3,Fusion Based FastICA Method: Facial Expression Recognition,"Journal of Image and Graphics, Volume 2, No.1, June, 2014
+Fusion Based FastICA Method: Facial Expression
+Recognition
+Humayra B. Ali and David M W Powers
+Computer Science, Engineering and Mathematics School, Flinders University, Australia
+Email: {ali0041,"
+82fae97673a353271b1d4c001afda1af6ef6dc23,Semantic contours from inverse detectors,"Semantic Contours from Inverse Detectors∗
+Bharath Hariharan1, Pablo Arbel´aez1, Lubomir Bourdev1
+, Subhransu Maji1 and Jitendra Malik1
+EECS, U.C. Berkeley, Berkeley, CA 94720
+Adobe Systems, Inc., 345 Park Ave, San Jose, CA 95110
+{bharath2, arbelaez, lbourdev, smaji,"
+82ec2ff0bef7db7e5ea48c42336200fb0e44dbf9,Reconstruction of 3D Human Facial Images Using Partial Differential Equations,"Reconstruction of 3D Human Facial Images
+Using Partial Differential Equations
+University of Bradford/EIMC Department, Richmond Road, BD7 1DP, Bradford, UK
+Email: {E.Elyan,
+Eyad Elyan, Hassan Ugail
+(PDE). Here"
+8210fd10ef1de44265632589f8fc28bc439a57e6,Single Sample Face Recognition via Learning Deep Supervised Autoencoders,"Single Sample Face Recognition via Learning Deep
+Supervised Auto-Encoders
+Shenghua Gao, Yuting Zhang, Kui Jia, Jiwen Lu, Yingying Zhang"
+82ab819815c86e85128a2a055a0c0fcd1146b696,Sampled Image Tagging and Retrieval Methods on User Generated Content,[cs.CV] 23 Nov 2016
+82f6dad08432a5f1b737ba91dd002ff1f89170f7,c○2013 The Association for Computational Linguistics Order copies of this and other ACL proceedings from:,"ACL201351stAnnualMeetingoftheAssociationforComputationalLinguisticsProceedingsoftheConferenceSystemDemonstrationsAugust4-9,2013Sofia,Bulgaria"
+82a4a35b2bae3e5c51f4d24ea5908c52973bd5be,Real-time emotion recognition for gaming using deep convolutional network features,"Real-time emotion recognition for gaming using
+deep convolutional network features
+S´ebastien Ouellet"
+8239e4a37825979f66ff0419ccd50a08aebfbadf,Tracing the Colors of Clothing in Paintings with Image Analysis,"Tracing the Colors of Clothing in Paintings with
+Image Analysis
+Cihan Sarı1, Albert Ali Salah2, and Alkım Almıla Akda˘g Salah3
+Bo˘gazi¸ci University, Systems and Control Engineering,
+Bo˘gazi¸ci University, Computer Engineering,
+{cihan.sari,
+Istanbul S¸ehir University, College of Communications
+Introduction
+The history of color is full of instances of how and why certain colors become to
+e associated with certain concepts, ideas, politics, status and power. Sometimes
+the connotations occur arbitrarily, like in the instance when pink was assigned
+to baby girls, and blue started to be associated with baby boys at the turn of
+9th Century [Paoletti, 1987]. Sometimes though, color associations have very
+tangible reasons, such as in the case of Marian blue and why over the centuries
+it was reserved only for painting Virgin Mary. The reason is to be found in the
+scarcity of the rock lapis lazuli -even more valuable than gold-, from which the
+lue pigments were extracted. Individual colors have convoluted and contested
+histories, since they have been attached to many symbols at any given time.
+John Gage, an art historian who has devoted 30 years of research on the topic
+of color, explains the conundrum of what he terms as “politics of color” in a"
+82a610a59c210ff77cfdde7fd10c98067bd142da,Human attention and intent analysis using robust visual cues in a Bayesian framework,"UC San Diego
+UC San Diego Electronic Theses and Dissertations
+Title
+Human attention and intent analysis using robust visual cues in a Bayesian framework
+Permalink
+https://escholarship.org/uc/item/1cb8d7vw
+Author
+McCall, Joel Curtis
+Publication Date
+006-01-01
+Peer reviewed|Thesis/dissertation
+eScholarship.org
+Powered by the California Digital Library
+University of California"
+825bfa844e4493f205f66782c6ca68aa69018d9c,In-Place Activated BatchNorm for Memory-Optimized Training of DNNs,"In-Place Activated BatchNorm for Memory-Optimized Training of DNNs
+Samuel Rota Bulò, Lorenzo Porzi, Peter Kontschieder
+Mapillary Research"
+82a922e775ec3a83d2d5637030860f587697ae42,Dense Multiperson Tracking with Robust Hierarchical Linear Assignment,"Dense Multiperson Tracking with Robust Hierarchical Linear
+Assignment
+McLaughlin, N., Martinez-del-Rincon, J., & Miller, P. (2015). Dense Multiperson Tracking with Robust
+https://doi.org/10.1109/TCYB.2014.2348314
+Published in:
+Document Version:
+Peer reviewed version
+Queen's University Belfast - Research Portal:
+Link to publication record in Queen's University Belfast Research Portal
+Publisher rights
+Copyright 2014 IEEE.
+Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this
+material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any
+opyrighted components of this work in other works.
+General rights
+Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other
+opyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated
+with these rights.
+Take down policy
+The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to"
+82485c89a6b48077b03b65a774fd5768ea768d4d,Unsupervised Adaptive Re-identification in Open World Dynamic Camera Networks,"Unsupervised Adaptive Re-identification in Open World Dynamic Camera
+Networks
+Rameswar Panda1,∗ Amran Bhuiyan2,∗,† Vittorio Murino2 Amit K. Roy-Chowdhury1
+Department of ECE
+Pattern Analysis and Computer Vision (PAVIS)
+UC Riverside
+Istituto Italiano di Tecnologia, Italy"
+829f390b3f8ad5856e7ba5ae8568f10cee0c7e6a,A Robust Rotation Invariant Multiview Face Detection in Erratic Illumination Condition,"International Journal of Computer Applications (0975 – 8887)
+Volume 57– No.20, November 2012
+A Robust Rotation Invariant Multiview Face Detection in
+Erratic Illumination Condition
+G.Nirmala Priya
+Associate Professor, Department of ECE
+Sona College of Technology
+Salem"
+82f4e8f053d20be64d9318529af9fadd2e3547ef,Technical Report: Multibiometric Cryptosystems,"Technical Report:
+Multibiometric Cryptosystems
+Abhishek Nagar, Student Member, IEEE, Karthik Nandakumar, Member, IEEE, and Anil K. Jain, Fellow, IEEE"
+82319857563e7b578bcb66ec4df1c85decd6a624,Cooperative Tracking of Cyclists Based on Smart Devices and Infrastructure,"Cooperative Tracking of Cyclists Based on
+Smart Devices and Infrastructure
+G¨unther Reitberger, Maarten Bieshaar, Stefan Zernetsch, Konrad Doll, Bernhard Sick, and Erich Fuchs"
+828b73e8a4d539eeae82601b5f5a4392818c6430,Long-Term Tracking by Decision Making,"UNIVERSITY OF CALIFORNIA,
+IRVINE
+Long-Term Tracking by Decision Making
+DISSERTATION
+submitted in partial satisfaction of the requirements
+for the degree of
+DOCTOR OF PHILOSOPHY
+in Computer Science
+James Supanˇciˇc, III
+Dissertation Committee:
+Deva Ramanan, Chair
+Charless Fowlkes
+Alexander Ihler"
+821ba3eba1e36a29cc482f5378f4a0d0f6893159,Unsupervised Domain Adaptation for Learning Eye Gaze from a Million Synthetic Images: An Adversarial Approach,"Unsupervised Domain Adaptation for Learning Eye Gaze from a
+Million Synthetic Images: An Adversarial Approach
+Avisek Lahiri∗
+Abhinav Agarwalla
+Prabir Kumar Biswas
+Dept. of E&ECE, IIT Kharagpur
+Dept. of E&ECE, IIT Kharagpur
+Dept. of Mathematics, IIT Kharagpur"
+82d781b7b6b7c8c992e0cb13f7ec3989c8eafb3d,Robust Facial Expression Recognition Using a State-based Model of Spatially-localized Facial,"REFERENCES
+Adler A., Youmaran R. and Loyka S., “Towards a Measure of
+Biometric Information”, Canadian Conference on Electrical and
+Computer Engineering, pp. 210-213, 2006.
+Ahmed A.A.E. and Traore I., “Anomaly Intrusion Detection Based on
+Biometrics”, IEEE Workshop on Information Assurance, United States
+Military Academy, West Point, New York, pp. 452-458, 2005.
+Ahmed A.A.E. and Traore I., “Detecting Computer Intrusions using
+Behavioural Biometrics”, Third Annual Conference on Privacy,
+Security and Trust, St. Andrews, New Brunswick, Canada, pp. 1-8,
+005.
+Al-Zubi S., Bromme A. and Tonnies K., “Using an Active Shape
+Structural Model for Biometric Sketch Recognition”, Proceedings of
+DAGM, Magdeburg, Germany, Vol. 2781, pp. 187-195, 2003.
+Angle S., Bhagtani R. and Chheda H., “Biometrics: a Further Echelon
+of Security”, The First UAE International Conference on Biological
+nd Medical Physics, pp. 1-4, 2005.
+Avraam Kasapis., “MLPs and Pose, Expression Classification”,
+Proceedings of UNiS Report, pp. 1-87, 2003.
+Banikazemi M., Poff D. and Abali B., “Storage-based Intrusion"
+82088af865626e2340db12b2e42f3a258053d593,Learning Generative ConvNets via Multi-grid Modeling and Sampling,"Learning Generative ConvNets via Multi-grid Modeling and Sampling
+Ruiqi Gao1∗, Yang Lu2∗, Junpei Zhou3, Song-Chun Zhu1, Ying Nian Wu1
+University of California, Los Angeles, USA, 2 Amazon, 3 Zhejiang University, China
+{sczhu,"
+82417d8ec8ac6406f2d55774a35af2a1b3f4b66e,Some Faces are More Equal than Others: Hierarchical Organization for Accurate and Efficient Large-Scale Identity-Based Face Retrieval,"Some faces are more equal than others:
+Hierarchical organization for accurate and
+ef‌f‌icient large-scale identity-based face retrieval
+Binod Bhattarai1, Gaurav Sharma2, Fr´ed´eric Jurie1, Patrick P´erez2
+GREYC, CNRS UMR 6072, Universit´e de Caen Basse-Normandie, France1
+Technicolor, Rennes, France2"
+82a4562d9ef19aec3aeaf9bd9f0ac4e09bdf5c86,Putting Out a HIT: Crowdsourcing Malware Installs,"Putting Out a HIT: Crowdsourcing Malware Installs
+Chris Kanich
+UC San Diego
+Stephen Checkoway
+UC San Diego
+Keaton Mowery
+UC San Diego"
+82f6cc54ddb4df9fae811467bdf25f25985c7e2f,CNN features are also great at unsupervised classification,"CNN features are also great at unsupervised
+lassification
+Joris Guérin∗
+Arts et Métiers ParisTech
+59000, Lille, France
+Eric Nyiri∗
+Arts et Métiers ParisTech
+59000, Lille, France
+Olivier Gibaru∗
+Arts et Métiers ParisTech
+59000, Lille, France
+Stéphane Thiery∗
+Arts et Métiers ParisTech
+59000, Lille, France"
+82752700f496d4575163b2c59a547d24eb916baf,Similarity Search on Spatio-Textual Point Sets,"Series ISSN: 2367-2005
+0.5441/002/edbt.2016.31
+o1, {shop,jeans}u2, o2, {football,match,stadium}u3, o3, {shop,market}u2, o5, {hurry, tube, time}u1, o4, {tube,ride}u3, o6, {thames,bridge}u3, o7, {bus,ride}spatial thresholdu2, o8, {football,derby}Figure1:STPSJoinqueryscenario.Multipleobjectsarespatiallyortextuallysimilar,butonlyusersu1andu3haveobjectswhicharemutuallysimilar.dayfrom100millionactiveusers.Useractivitiesintheseplatformsgeneratecontentthathastextualcomponent,e.g.,statusupdates,shortmessages,ortags,and,followingthewidespreadadoptionofGPSinmobiledevices,ageospatialcomponent,e.g.,geotaggedtweets,photos,andusercheck-ins.Thus,theactionsofusersaredocumentedbytheirmessagesinsocialnetworksandassuchgenerate“traces”,whichconsistofspatio-textualobjects.Ef‌f‌icientindexingandqueryingofspatio-textualdatahasreceivedalotofattentionoverthepastyears,duetothehighimportanceofsuchcontentinlocation-basedservices,suchasnearbysearchandrecommendations.Inparticu-lar,multipletypesofspatio-textualquerieshavebeenex-tensivelystudied,includingbooleanrangequeries,top-kqueries,k-nearestneighborqueries,andmorerecently,spatio-textualsimilarityjoins[11,7].Nevertheless,inexistingworks,spatio-textualentitiesaretypicallytreatedasisolatedobservations.Atypicalexamplequeryistofindnearbyrestaurantsorhotelsmatchingcertaincriteria.Theworkin[7]dealswithfindingpairsofentitiesthatarebothspatiallycloseandtextuallysimilar.Exampleusecasesarede-duplicatingPoints-of-Interestacrossdatasets,orfindingmatchingphotostakenatroughlythesameloca-tionandhavingsimilartags.Nowconsiderlookingforsimilarusersinsocialnetworks.Here,auserischaracterizedbythemessagestheygenerateand,ifavailable,respectivelocationinformation.Assuch,eachmessagecanbeconsideredaspatio-textualobject,e.g.,ageotaggedphotoortweet.Witheachuserbeingcharacter-"
+8263834bbe6e986a703370810f9b963e2d25a7f7,Towards Head Motion Compensation Using Multi-Scale Convolutional Neural Networks,"Towards Head Motion Compensation Using Multi-Scale
+Convolutional Neural Networks
+O. Rajput1∗, N. Gessert1∗, M. Gromniak1, L. Matth¨aus2, A. Schlaefer1
+Institute of Medical Technology, Hamburg University of Technology, Hamburg, Germany
+eemagine Medical Imaging Solutions GmbH, Berlin, Germany
+Both authors contributed equally.
+Contact:"
+8239a0b4cdb480c9fb913c7476f12825418b0909,People detection in RGB-D data,"People Detection in RGB-D Data
+Luciano Spinello
+Kai O. Arras"
+8291491723d24fd242a3a93248f6475cb084999c,MobileFace: 3D Face Reconstruction with Efficient CNN Regression,"MobileFace: 3D Face Reconstruction
+with Ef‌f‌icient CNN Regression
+Nikolai Chinaev1, Alexander Chigorin1, and Ivan Laptev1,2
+VisionLabs, Amsterdam, The Netherlands
+{n.chinaev,
+Inria, WILLOW, Departement d’Informatique de l’Ecole Normale Superieure, PSL
+Research University, ENS/INRIA/CNRS UMR 8548, Paris, France"
+823f4300ddf64a95324db89035946638ecb02aa0,MX-LSTM: mixing tracklets and vislets to jointly forecast trajectories and head poses,"MX-LSTM: mixing tracklets and vislets to jointly forecast
+trajectories and head poses
+Irtiza Hasan1,2, Francesco Setti1, Theodore Tsesmelis1,2,3, Alessio Del Bue3,
+Fabio Galasso2, and Marco Cristani1
+University of Verona (UNIVR)
+OSRAM GmbH
+Istituto Italiano di Tecnologia (IIT)"
+826c66bd182b54fea3617192a242de1e4f16d020,Action-vectors: Unsupervised movement modeling for action recognition,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+49b2545b8b9ed81cc547ec974e0b61d01b7bc759,Examplers based image fusion features for face recognition,"Examplers based image fusion features for face
+recognition
+Alex Pappachen James*1 and Sima Dimitrijev2
+*1 Asst. Professor and Group Lead, Machine Intelligence Group, Indian Institute of
+Information Technology and Management-Kerala, India. www.mirgroup.co.cc,
+Professor and Deputy Director,Queensland Micro- and Nanotechnology Center, Griffith
+University, Australia, www.gu.edu.au/qmnc"
+499842b3df387b81dbb2436c764d22b1a3f42cae,Collaborative feature learning from social media,"Collaborative Feature Learning from Social Media
+Chen Fang1, Hailin Jin2, Jianchao Yang3, Zhe Lin2
+Department of Computer Science, Dartmouth College. 2Adobe Research. 3Snapchat.
+Image feature representation plays an essential role in image recognition
+nd related tasks. The current state-of-the-art feature learning paradigm
+is supervised learning from labeled data [3], which surpasses other well-
+known hand-crafted feature based methods [4, 5]. However, this paradigm
+requires large datasets with category labels to train properly, which limits its
+pplicability to new problem domains where labels are hard to obtain.
+In this paper, we ask an interesting research question: Are category-level
+labels the only way for data driven feature learning?
+There is a surge of social media websites in the last ten years. Most
+social media websites such as Pinterest have been collecting content data
+that the users share as well as behavior data of the users. User behavior
+data are the activities of individual users, such as likes, comments, or view
+histories and they carry rich information about corresponding content data.
+For instance, two photos of a similar style on Pinterest tend to be pinned by
+the same user. If we aggregate the user behavior data across many users, we
+may recover interesting properties of the content. For instance, the photos
+liked by a group of users of similar interests tend to have very similar styles."
+4941f92222d660f9b60791ba95796e51a7157077,Conditional CycleGAN for Attribute Guided Face Image Generation,"Conditional CycleGAN for Attribute Guided
+Face Image Generation
+Yongyi Lu
+HKUST
+Yu-Wing Tai
+Tencent
+Chi-Keung Tang
+HKUST"
+49004f22a420e0897f7b811239c1e098b0c655bf,Out of the Box: Reasoning with Graph Convolution Nets for Factual Visual Question Answering,"Out of the Box: Reasoning with Graph Convolution
+Nets for Factual Visual Question Answering
+Medhini Narasimhan, Svetlana Lazebnik, Alexander G. Schwing
+University of Illinois Urbana-Champaign
+{medhini2, slazebni,"
+4919663c62174a9bc0cc7f60da8f96974b397ad2,Human age estimation using enhanced bio-inspired features (EBIF),"HUMAN AGE ESTIMATION USING ENHANCED BIO-INSPIRED FEATURES (EBIF)
+Mohamed Y.El Dib and Motaz El-Saban
+Faculty of Computers and Information, Cairo University, Cairo, Egypt"
+492f3def325296164cd32b80d19a591b72b480cd,Metric Learning,"Computer Vision Group
+Metric Learning
+Technical University of Munich
+Department of Informatics
+Computer Vision Group
+June 9, 2017
+M.Sc. John Chiotellis: Metric Learning
+/ 46"
+4967b0acc50995aa4b28e576c404dc85fefb0601,An Automatic Face Detection and Gender Classification from Color Images using Support Vector Machine,"Vol. 4, No. 1 Jan 2013 ISSN 2079-8407
+Journal of Emerging Trends in Computing and Information Sciences
+©2009-2013 CIS Journal. All rights reserved.
+An Automatic Face Detection and Gender Classification from
+http://www.cisjournal.org
+Color Images using Support Vector Machine
+Md. Hafizur Rahman, 2 Suman Chowdhury, 3 Md. Abul Bashar
+, 2, 3 Department of Electrical & Electronic Engineering, International
+University of Business Agriculture and Technology, Dhaka-1230, Bangladesh"
+4913477a16c8354f032546b1444728c592823586,Web Image Retrieval Search Engine based on Semantically Shared Annotation,"Web Image Retrieval Search Engine based on Semantically
+Shared Annotation
+Alaa Riad1, Hamdy Elminir2 and Sameh Abd-Elghany3
+Vice dean of Students Affair, Faculty of Computers and Information Sciences, Mansoura University
+Mansoura, Egypt
+Mansoura, Egypt
+Mansoura, Egypt
+Head of Electronic and Communication Dept, Misr Higher Institute of Engineering and Technology
+Faculty of Computers and Information Sciences, Mansoura University"
+4914f51bc2f5a35c0d15924e39a51975c53f9753,A 3D Feature Descriptor Recovered from a Single 2D Palmprint Image,"A 3D Feature Descriptor Recovered from a
+Single 2D Palmprint Image
+Qian Zheng1,2, Ajay Kumar1, and Gang Pan2"
+4972aadcce369a8c0029e6dc2f288dfd0241e144,Multi-target Unsupervised Domain Adaptation without Exactly Shared Categories,"Multi-target Unsupervised Domain Adaptation
+without Exactly Shared Categories
+Huanhuan Yu, Menglei Hu and Songcan Chen"
+49d4cb2e1788552a04c7f8fec33fbfabb3882995,Visually-Enabled Active Deep Learning for (Geo) Text and Image Classification: A Review,"Article
+Visually-Enabled Active Deep Learning for
+(Geo) Text and Image Classification: A Review
+Liping Yang 1,*, Alan M. MacEachren 1,* ID , Prasenjit Mitra 2 and Teresa Onorati 3
+Department of Geography and Institute for CyberScience, The Pennsylvania State University,
+University Park, PA 16802, USA
+College of Information Sciences and Technology, The Pennsylvania State University, University Park,
+PA 16802, USA;
+Computer Science Department, Universidad Carlos III de Madrid, 28911-Leganés, Madrid, Spain;
+* Correspondence: (L.Y.); (A.M.M.)
+Received: 29 December 2017; Accepted: 17 February 2018; Published: 20 February 2018"
+494c1630c93e74aca3169ae33734f2f733c95e05,The Iris Challenge Evaluation 2005,"The Iris Challenge Evaluation 2005
+P. Jonathon Phillips, Kevin W. Bowyer, Patrick J. Flynn, Xiaomei Liu, W. Todd Scruggs"
+49f22f29e57f5867b47348555136844ffa6c6603,Beyond Lesion-Based Diabetic Retinopathy: A Direct Approach for Referral,"JOURNAL OF LATEX CLASS FILES, VOL. 11, NO. 4, DECEMBER 2012
+Beyond Lesion-based Diabetic Retinopathy:
+Direct Approach for Referral
+Ramon Pires, Member, IEEE, Sandra Avila, Member, IEEE, Herbert F. Jelinek, Member, IEEE,
+Jacques Wainer, Eduardo Valle, and Anderson Rocha, Senior Member, IEEE"
+49e85869fa2cbb31e2fd761951d0cdfa741d95f3,Adaptive Manifold Learning,"Adaptive Manifold Learning
+Zhenyue Zhang, Jing Wang, and Hongyuan Zha"
+490a217a4e9a30563f3a4442a7d04f0ea34442c8,An SOM-based Automatic Facial Expression Recognition System,"International Journal on Soft Computing, Artificial Intelligence and Applications (IJSCAI), Vol.2, No.4, August 2013
+An SOM-based Automatic Facial Expression
+Recognition System
+Mu-Chun Su1, Chun-Kai Yang1, Shih-Chieh Lin1,De-Yuan Huang1, Yi-Zeng
+Hsieh1, andPa-Chun Wang2
+Department of Computer Science &InformationEngineering,National Central
+University,Taiwan, R.O.C.
+Cathay General Hospital, Taiwan, R.O.C.
+E-mail:"
+4987ac5638e1fdb116cc76626465f166998d7536,Polysemous Codes,"Polysemous codes
+Matthijs Douze, Herv´e J´egou and Florent Perronnin
+Facebook AI Research"
+494e736c05ddf500830e9c51b5fb42be9b9bff1a,Learning Depth from Monocular Videos using Direct Methods,
+49a7949fabcdf01bbae1c2eb38946ee99f491857,A concatenating framework of shortcut convolutional neural networks,"A CONCATENATING FRAMEWORK OF SHORTCUT
+CONVOLUTIONAL NEURAL NETWORKS
+Yujian Li Ting Zhang, Zhaoying Liu, Haihe Hu"
+49b3f6d8712c01f315686b6b8541eda8c5ee428a,Virtual friend or threat? The effects of facial expression and gaze interaction on psychophysiological responses and emotional experience.,"Copyright r 2009 Society for Psychophysiological Research
+DOI: 10.1111/j.1469-8986.2009.00831.x
+Virtual friend or threat? The effects of facial expression
+nd gaze interaction on psychophysiological responses
+nd emotional experience
+FRANZISKA SCHRAMMEL,a SEBASTIAN PANNASCH,a SVEN-THOMAS GRAUPNER,a
+ANDREAS MOJZISCH,b and BORIS M. VELICHKOVSKYa
+Institute for Psychology III, Technische Universitaet Dresden, Germany
+Institute for Psychology, Georg-August-University Goettingen, Germany"
+49957368eceaa751c0b9c49251512ca6a8800cff,Accurate Object Localization with Shape Masks,"Accurate Object Localization with Shape Masks
+Marcin Marsza(cid:7)ek
+Cordelia Schmid
+INRIA, LEAR - LJK
+665 av de l’Europe, 38330 Montbonnot, France"
+499343a2fd9421dca608d206e25e53be84489f44,Face Recognition with Name Using Local Weber‟s Law Descriptor,"Anil Kumar.C, et.al, International Journal of Technology and Engineering Science [IJTES]TM
+Volume 1[9], pp: 1371-1375, December 2013
+Face Recognition with Name Using Local Weber‟s
+Law Descriptor
+C.Anil kumar,2A.Rajani,3I.Suneetha
+M.Tech Student,2Assistant Professor,3Associate Professor
+Department of ECE, Annamacharya Institute of Technology and Sciences, Tirupati, India-517520
+on FERET"
+490fa9ee39614e1ef1d74162e698e4a1f0e5f916,In Good Shape: Robust People Detection based on Appearance and Shape,"PISHCHULIN et al.: PEOPLE DETECTION USING APPEARANCE AND SHAPE
+In Good Shape: Robust People Detection
+ased on Appearance and Shape
+Computer Vision and
+Multimodal Computing
+MPI Informatics
+Saarbrücken, Germany
+Leonid Pishchulin
+Arjun Jain
+Christian Wojek
+Thorsten Thormählen
+Bernt Schiele"
+498fd231d7983433dac37f3c97fb1eafcf065268,Linear Disentangled Representation Learning for Facial Actions,"LINEAR DISENTANGLED REPRESENTATION LEARNING FOR FACIAL ACTIONS
+Xiang Xiang1 and Trac D. Tran2
+Dept. of Computer Science
+Dept. of Electrical & Computer Engineering
+Johns Hopkins University, 3400 N. Charles Street, Baltimore, MD 21218, USA
+Fig. 1. The separability of the neutral face yn and expression
+omponent ye. We find yn is better for identity recognition
+than y and ye is better for expression recognition than y."
+49e1aa3ecda55465641b2c2acc6583b32f3f1fc6,Support Vector Machine for age classification,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, Volume 2, Issue 5, May 2012)
+Support Vector Machine for age classification
+Sangeeta Agrawal1, Rohit Raja2, Sonu Agrawal3
+Assistant Professor, CSE, RSR RCET, Kohka Bhilai
+,3 Sr. Assistant Professor, CSE, SSCET, Junwani Bhilai"
+491cf4d86ed895000a35ba96f46261984c0bdf7c,Facial Expression Recognition for Domestic Service Robots,"Facial Expression Recognition for Domestic
+Service Robots
+Geovanny Giorgana and Paul G. Ploeger
+Bonn-Rhein-Sieg University of Applied Sciences,
+Grantham-Allee 20 53757 Sankt Augustin, Germany"
+490a0b6ff5b982e884622bb9c81250f05c069f32,Template Aging in 3 D and 2 D Face Recognition,"Template Aging in 3D and 2D Face Recognition
+Ishan Manjani∗
+Hakki Sumerkan†
+Patrick J. Flynn†
+Kevin W. Bowyer†"
+4991dcef497ddd7ea115663985a9e0635494a95d,Detecting Group Activities With Multi-Camera Context,"Detecting Group Activities With
+Multi-Camera Context
+Zheng-Jun Zha, Member, IEEE, Hanwang Zhang, Meng Wang, Member, IEEE, Huanbo Luan, and Tat-Seng Chua"
+49d7fd8975413fb2912e111093749733712210dd,Vpliv kakovosti vhodnih slik na zanesljivost samodejnega razpoznavanja obrazov,"Elektrotehniški vestnik 74(3): 145-150, 2007
+Electrotechnical Review: Ljubljana, Slovenija
+Vpliv kakovosti vhodnih slik na zanesljivost samodejnega
+razpoznavanja obrazov
+Vitomir Štruc, Nikola Paveši(cid:29)
+Univerza v Ljubljani, Fakulteta za elektrotehniko, Tržaška 25, 1001 Ljubljana, Slovenija
+E-pošta:
+Povzetek. Zanesljivost samodejnega razpoznavanja obrazov je odvisna od številnih dejavnikov, med katerimi so
+najpomembnejši natan(cid:24)nost dolo(cid:24)itve slikovnega obmo(cid:24)ja obraza in njegova odpornost na slabšo kakovost slik,
+izbira ustreznega postopka izpeljave obraznih zna(cid:24)ilk ter uporaba primernega algoritma za izra(cid:24)un podobnosti in
+sprejetje odlo(cid:24)itve o identiteti osebe. V (cid:24)lanku predstavljamo rezultate vrednotenja napak, ki jih v biometri(cid:24)ni
+sistem vnašajo razli(cid:24)ne degradacije vhodnih slik. Njihov vpliv smo prou(cid:24)ili za tri na podro(cid:24)ju razpoznavanja
+obrazov pogosteje uporabljene postopke izpeljave zna(cid:24)ilk (analizo glavnih komponent – PCA, analizo linearne
+diskriminante – LDA ter analizo neodvisnih komponent – ICA), pri (cid:24)emer smo za dolo(cid:24)itev zanesljivosti
+razpoznavanja (verifikacije) uporabili bazo XM2VTS; za ovrednotenje napak, ki jih v biometri(cid:24)ni sistem vnašajo
+spremembe v kakovosti slik, pa njene degradirane razli(cid:24)ice.
+Klju ne besede: razpoznavanje obrazov, analiza glavnih komponent, analiza linearne diskriminante, analiza
+neodvisnih komponent, zanesljivost razpoznavanja, kakovost vhodnih slik
+Impact of image degradations on the face recognition accuracy"
+49df381ea2a1e7f4059346311f1f9f45dd997164,Client-Specific Anomaly Detection for Face Presentation Attack Detection,"On the Use of Client-Specific Information for Face
+Presentation Attack Detection Based on Anomaly
+Detection
+Shervin Rahimzadeh Arashloo and Josef Kittler,"
+496074fcbeefd88664b7bd945012ca22615d812e,Driver Distraction Using Visual-Based Sensors and Algorithms,"Review
+Driver Distraction Using Visual-Based Sensors
+nd Algorithms
+Alberto Fernández 1,*, Rubén Usamentiaga 2, Juan Luis Carús 1 and Rubén Casado 2
+Grupo TSK, Technological Scientific Park of Gijón, 33203 Gijón, Asturias, Spain;
+Department of Computer Science and Engineering, University of Oviedo, Campus de Viesques, 33204 Gijón,
+Asturias, Spain; (R.U.); (R.C.)
+* Corrospondence: Tel.: +34-984-29-12-12; Fax: +34-984-39-06-12
+Academic Editor: Gonzalo Pajares Martinsanz
+Received: 14 July 2016; Accepted: 24 October 2016; Published: 28 October 2016"
+40205181ed1406a6f101c5e38c5b4b9b583d06bc,Using Context to Recognize People in Consumer Images,"Using Context to Recognize People in Consumer Images
+Andrew C. Gallagher and Tsuhan Chen"
+40dab43abef32deaf875c2652133ea1e2c089223,Facial Communicative Signals: valence recognition in task-oriented human-robot Interaction,"Noname manuscript No.
+(will be inserted by the editor)
+Facial Communicative Signals
+Valence Recognition in Task-Oriented Human-Robot Interaction
+Christian Lang · Sven Wachsmuth · Marc Hanheide · Heiko Wersing
+Received: date / Accepted: date"
+403b3d0594989629c95e5bc5230d4ccb1691f255,Automatic detection of pain from spontaneous facial expressions,"Meawad, F., Yang, S.-Y. and Loy, F. L. (2017) Automatic Detection of
+Pain from Spontaneous Facial Expressions. In: 19th ACM International
+Conference on Multimodal Interaction (ICMI 2017), Glasgow, Scotland,
+3-17 Nov 2017, pp. 397-401. ISBN 9781450355438
+(doi:10.1145/3136755.3136794)
+This is the author’s final accepted version.
+There may be differences between this version and the published version.
+You are advised to consult the publisher’s version if you wish to cite from
+http://eprints.gla.ac.uk/151491/
+Deposited on: 22 December 2017
+Enlighten – Research publications by members of the University of Glasgow
+http://eprints.gla.ac.uk"
+40ce2567ccc2552287f8a1c25e9f6086efa6bf8f,Identification and evaluation of children with autism spectrum disorders.,"CLINICAL REPORT
+Identification and Evaluation of
+Children With Autism Spectrum
+Disorders
+Chris Plauche´ Johnson, MD, MEd, Scott M. Myers, MD, and the Council on Children With Disabilities
+Guidance for the Clinician in Rendering
+Pediatric Care"
+40b0fced8bc45f548ca7f79922e62478d2043220,Do Convnets Learn Correspondence?,"Do Convnets Learn Correspondence?
+Trevor Darrell
+Jonathan Long
+{jonlong, nzhang,
+University of California – Berkeley
+Ning Zhang"
+405b43f4a52f70336ac1db36d5fa654600e9e643,What can we learn about CNNs from a large scale controlled object dataset?,"What can we learn about CNNs from a large scale controlled object dataset?
+Ali Borji
+Saeed Izadi
+Laurent Itti"
+40b86ce698be51e36884edcc8937998979cd02ec,Finding Faces in News Photos Using Both Face and Name Information,"Yüz ve İsim İlişkisi kullanarak Haberlerdeki Kişilerin Bulunması
+Finding Faces in News Photos Using Both Face and Name Information
+Derya Ozkan, Pınar Duygulu
+Bilgisayar Mühendisliği Bölümü, Bilkent Üniversitesi, 06800, Ankara
+Özetçe
+Bu çalışmada, haber fotoğraflarından oluşan geniş veri
+kümelerinde kişilerin sorgulanmasını sağlayan bir yöntem
+sunulmuştur. Yöntem isim ve yüzlerin ilişkilendirilmesine
+dayanmaktadır. Haber başlığında kişinin ismi geçiyor ise
+fotoğrafta da o kişinin yüzünün bulunacağı varsayımıyla, ilk
+olarak sorgulanan isim ile ilişkilendirilmiş, fotoğraflardaki
+tüm yüzler seçilir. Bu yüzler arasında sorgu kişisine ait farklı
+koşul, poz ve zamanlarda çekilmiş pek çok resmin yanında,
+haberde ismi geçen başka kişilere ait yüzler ya da kullanılan
+yüz bulma yönteminin hatasından kaynaklanan yüz olmayan
+resimler de bulunabilir. Yine de, çoğu zaman, sorgu kişisine
+it resimler daha çok olup, bu resimler birbirine diğerlerine
+olduğundan daha çok benzeyeceklerdir. Bu nedenle, yüzler
+rasındaki benzerlikler çizgesel olarak betimlendiğinde ,
+irbirine en çok benzeyen yüzler bu çizgede en yoğun bileşen"
+40a0e080a01094cdb2174e9154540c217d3f9440,Improved Security Aspects on Microsofts Two -layer Captcha,"Vol-2 Issue-5 2017
+IJARIIE-ISSN(O)-2395-4396
+IMPROVED SECURITY ASPECTS ON
+MICROSOFTS
+TWO -LAYER CAPTCHA
+Rachana.B.S, Dhruthi.S, Swarna.R, Chandan.A
+Rachana.B.S, Asst.Prof, ISE, APSCE, B’lore, Karnataka, INDIA
+Dhruthi S, Student, ISE,, APSCE, Karnataka, India
+Swarna R, Student, ISE, APSCE, Karnataka, India
+Chandana A, Student, ISE, APSCE, Karnataka, India"
+404c7839afe2fec48a06f83d2a532c05ad8ba0d3,Vehicle Classification using Transferable Deep Neural Network Features,"Vehicle Classification using Transferable Deep
+Neural Network Features
+Yiren Zhou, Ngai-Man Cheung"
+40041b80cef6dc23946ffa9628b6ac3b8dcc971a,Parallel Separable 3D Convolution for Video and Volumetric Data Understanding,"GONDA, WEI, PARAG, PFISTER: PARALLEL SEPARABLE 3D CONVOLUTION
+Parallel Separable 3D Convolution for Video
+nd Volumetric Data Understanding
+Harvard John A. Paulson School of
+Engineering and Applied Sciences
+Camabridge MA, USA
+Felix Gonda
+Donglai Wei
+Toufiq Parag
+Hanspeter Pfister"
+40f7ea135907d2f4abeae0475d9a88477239d504,Multimodal Explanations: Justifying Decisions and Pointing to the Evidence,"Multimodal Explanations: Justifying Decisions and Pointing to the Evidence
+Dong Huk Park1, Lisa Anne Hendricks1, Zeynep Akata2,3, Anna Rohrbach1,3,
+Bernt Schiele3, Trevor Darrell1, and Marcus Rohrbach4
+EECS, UC Berkeley, 2University of Amsterdam, 3MPI for Informatics, 4Facebook AI Research"
+402f6db00251a15d1d92507887b17e1c50feebca,3D Facial Action Units Recognition for Emotional Expression,"D Facial Action Units Recognition for Emotional
+Expression
+Norhaida Hussain1, Hamimah Ujir, Irwandi Hipiny and Jacey-Lynn Minoi2
+Department of Information Technology and Communication, Politeknik Kuching, Sarawak, Malaysia
+Faculty of Computer Science and Information Technology, Universiti Malaysia Sarawak, Kota Samarahan, Sarawak, Malaysia
+The muscular activities caused the activation of certain AUs for every facial expression at the certain duration of time
+throughout the facial expression. This paper presents the methods to recognise facial Action Unit (AU) using facial distance
+of the facial features which activates the muscles. The seven facial action units involved are AU1, AU4, AU6, AU12, AU15,
+AU17 and AU25 that characterises happy and sad expression. The recognition is performed on each AU according to rules
+defined based on the distance of each facial points. The facial distances chosen are extracted from twelve facial features.
+Then the facial distances are trained using Support Vector Machine (SVM) and Neural Network (NN). Classification result
+using SVM is presented with several different SVM kernels while result using NN is presented for each training, validation
+nd testing phase.
+Keywords: Facial action units recognition, 3D AU recognition, facial expression"
+40932ccdd7cda22e90c1e16b4a4dc4930b122a9c,Learning to Look around Objects for Top-View Representations of Outdoor Scenes,"Learning to Look around Objects for Top-View
+Representations of Outdoor Scenes
+Samuel Schulter1,† Menghua Zhai2,†
+Nathan Jacobs2
+Manmohan Chandraker1,3
+NEC-Labs1, Computer Science University of Kentucky2, UC San Diego3"
+4053e3423fb70ad9140ca89351df49675197196a,Robust Face Detection Using the Hausdorff Distance,"(cid:13) In Proc. Third International Conference on Audio- and Video-based
+Biometric Person Authentication, Springer, Lecture Notes in Computer
+Science, LNCS-2091, pp. 90–95, Halmstad, Sweden, 6–8 June 2001.
+Robust Face Detection
+Using the Hausdorff Distance
+Oliver Jesorsky, Klaus J. Kirchberg, and Robert W. Frischholz
+BioID AG, Berlin, Germany
+WWW home page: http://www.bioid.com"
+409220cf5137d6dc6c85f440d618e44d244f402e,Randomized Algorithms for Large-scale Strongly Over-determined Linear Regression Problems a Dissertation Submitted to the Institute for Computational and Mathematical Engineering and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree Of,"RANDOMIZED ALGORITHMS FOR LARGE-SCALE STRONGLY
+OVER-DETERMINED LINEAR REGRESSION PROBLEMS
+A DISSERTATION
+SUBMITTED TO THE INSTITUTE FOR
+COMPUTATIONAL AND MATHEMATICAL ENGINEERING
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Xiangrui Meng
+June 2014"
+406caefc7f51e8a16833402e4757704d5d84a1f8,Dual-Tree Complex Wavelets Transform Based Facial Expression Recognition using Principal Component Analysis ( PCA ) and Local Binary Pattern ( LBP ),"ISSN XXXX XXXX © 2017 IJESC
+Research Article Volume 7 Issue No.4
+Dual-Tree Complex Wavelets Transform Based Facial Expression
+Recognition using Principal Component Analysis (PCA) and Local
+Binary Pattern(LBP)
+Fahad Abdu Jibrin1, Abubakar Sadiq Muhammad2
+Department of Electrical Engineering1, Department of Computer Engineering2
+School of Technology, Kano State Polytechnic, Nigeria"
+40d4fab85e2e1557e61d03b92429d64c6efba101,Detection-based multi-human tracking using a CRF model,"Detection-Based Multi-Human Tracking Using a CRF Model
+Alexandre Heili1,2
+Jean-Marc Odobez1,2
+Idiap Research Institute – CH-1920 Martigny, Switzerland
+Cheng Chen1
+´Ecole Polytechnique F´ed´erale de Lausanne – CH-1015, Lausanne, Switzerland"
+40000b058cf80b7983a2c0f96562368a40a04580,Predicting human mobility through the assimilation of social media traces into mobility models,"Predicting human mobility through the assimilation of social media
+traces into mobility models
+Mariano G. Beir´o1
+Andr´e Panisson1
+Michele Tizzoni1
+Ciro Cattuto1
+ISI Foundation, Turin, Italy"
+40fb4e8932fb6a8fef0dddfdda57a3e142c3e823,A mixed generative-discriminative framework for pedestrian classification,"A Mixed Generative-Discriminative Framework for Pedestrian Classification
+Markus Enzweiler1
+Dariu M. Gavrila2,3
+Image & Pattern Analysis Group, Dept. of Math. and Comp. Sc., Univ. of Heidelberg, Germany
+Environment Perception, Group Research, Daimler AG, Ulm, Germany
+Intelligent Systems Lab, Faculty of Science, Univ. of Amsterdam, The Netherlands"
+40f5ae73e598114edab3ddaefc38fbdbf5c114b9,Optical Flow Based Face Recognition under Expression Variations,"International Journal of Information Science and Intelligent System, 3(2): 1-12, 2014
+Optical Flow Based Face Recognition under
+Expression Variations
+Vimala K1,∗, Dr.V.Kalaivani2, V.Anusuya Devi3
+1Assistant Professor, Department of CSE(PG),National Engineering College , Kovilpatti, India
+2 Associate Professor(SG) and Head, Department of CSE(PG),National Engineering College India ,
+Assistant Professor, Department of CSE(PG),National Engineering College, Kovilpatti, India"
+409ff083816d8357fe839e3ea0e62d648a5532aa,SEMDIAL 2016 JerSem Proceedings of the 20th Workshop on the Semantics and Pragmatics of Dialogue,"SEMDIAL 2016
+JerSem
+Proceedings of the 20th Workshop on
+the Semantics and Pragmatics of Dialogue
+Julie Hunter, Mandy Simons, and Matthew Stone (eds.)
+New Brunswick, NJ, 16–18 July 2016"
+40dd2b9aace337467c6e1e269d0cb813442313d7,Localizing spatially and temporally objects and actions in videos. (Localiser spatio-temporallement des objets et des actions dans des vidéos),"This thesis has been submitted in fulfilment of the requirements for a postgraduate degree
+(e.g. PhD, MPhil, DClinPsychol) at the University of Edinburgh. Please note the following
+terms and conditions of use:
+This work is protected by copyright and other intellectual property rights, which are
+retained by the thesis author, unless otherwise stated.
+A copy can be downloaded for personal non-commercial research or study, without
+prior permission or charge.
+This thesis cannot be reproduced or quoted extensively from without first obtaining
+permission in writing from the author.
+The content must not be changed in any way or sold commercially in any format or
+medium without the formal permission of the author.
+When referring to this work, full bibliographic details including the author, title,
+warding institution and date of the thesis must be given."
+40536b0cc73fda29a335c6ecf9ce891dcb6d04cd,Face Detection Algorithms: A Comparative Study,"Face Detection Algorithms: A Comparative Study
+Kapil Kumar Gupta1, M. Rizwan Beg 2 , Jitendra Kumar Niranjan3
+1 Department of Computer Science & Engg., Integral University,
+Lucknow, Uttar Pradesh, 226001, India
+Department of Computer Science & Engg., Integral University,
+Lucknow, Uttar Pradesh, 226001, India
+Department of Computer Science & Engg, IMS Engineering College
+Ghaziabad, Uttar Pradesh 201009, India"
+405a70c184e00eefcf797a0e842578ea0b51f6cd,Learning a Family of Detectors via Multiplicative Kernels,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Learning a Family of Detectors
+via Multiplicative Kernels
+Quan Yuan, Member, IEEE, Ashwin Thangali, Student Member, IEEE,
+Vitaly Ablavsky, Student Member, IEEE, and Stan Sclaroff, Senior Member, IEEE"
+40c3b350008ada8f3f53a758e69992b6db8a8f95,Discriminative Decorrelation for Clustering and Classification,"Discriminative Decorrelation for Clustering and
+Classification￿
+Bharath Hariharan1, Jitendra Malik1, and Deva Ramanan2
+Univerisity of California at Berkeley, Berkeley, CA, USA
+University of California at Irvine, Irvine, CA, USA"
+40b87d3b1e3dbbc82fb7d786004fe202e131c045,Multi-modal Egocentric Activity Recognition using Audio-Visual Features,"Submitted to IEEE Transactions on Human-Machine Systems
+Multi-modal Egocentric Activity Recognition
+using Audio-Visual Features
+Mehmet Ali Arabacı, Fatih Özkan, Elif Surer, Peter Jančovič, Alptekin Temizel"
+40229a034d2fcddc3df32f906ec4ef6a3b3e017e,A semi-automated system for accurate gaze coding in natural dyadic interactions,"A Semi-Automated System for Accurate Gaze Coding
+in Natural Dyadic Interactions
+Kenneth A. Funes-Mora, Laurent Nguyen, Daniel Gatica-Perez, Jean-Marc Odobez
+Idiap Research Institute and École Polytechnique Fédérale de Lausanne (EPFL), Switzerland"
+40a34d4eea5e32dfbcef420ffe2ce7c1ee0f23cd,Bridging Heterogeneous Domains With Parallel Transport For Vision and Multimedia Applications,"Bridging Heterogeneous Domains With Parallel Transport For Vision and
+Multimedia Applications
+Raghuraman Gopalan
+Dept. of Video and Multimedia Technologies Research
+AT&T Labs-Research
+San Francisco, CA 94108"
+40389b941a6901c190fb74e95dc170166fd7639d,Automatic Facial Expression Recognition,"Automatic Facial Expression Recognition
+Jacob Whitehill, Marian Stewart Bartlett, and Javier R. Movellan
+Emotient
+http://emotient.com
+February 12, 2014
+Imago animi vultus est, indices oculi. (Cicero)
+Introduction
+The face is innervated by two different brain systems that compete for control of its muscles:
+cortical brain system related to voluntary and controllable behavior, and a sub-cortical
+system responsible for involuntary expressions. The interplay between these two systems
+generates a wealth of information that humans constantly use to read the emotions, inten-
+tions, and interests [25] of others.
+Given the critical role that facial expressions play in our daily life, technologies that can
+interpret and respond to facial expressions automatically are likely to find a wide range of
+pplications. For example, in pharmacology, the effect of new anti-depression drugs could
+e assessed more accurately based on daily records of the patients’ facial expressions than
+sking the patients to fill out a questionnaire, as it is currently done [7]. Facial expression
+recognition may enable a new generation of teaching systems to adapt to the expression
+of their students in the way good teachers do [61]. Expression recognition could be used
+to assess the fatigue of drivers and air-pilots [58, 59]. Daily-life robots with automatic"
+401f056e1017151018e83d2b13b5eaec573b4dbc,Rapid and accurate face depth estimation in passive stereo systems,"Noname manuscript No.
+(will be inserted by the editor)
+Rapid and accurate face depth estimation in passive
+stereo systems
+Amel AISSAOUI · Jean MARTINET ·
+Chaabane DJERABA
+Received: date / Accepted: date"
+40010e1918e1f342b14c8ec74e570101f07471b2,Flower Categorization using Deep Convolutional Neural Networks,"Flower Categorization using Deep Convolutional Neural Networks
+Ayesha Gurnani
+Viraj Mavani
+Vandit Gajjar
+Yash Khandhediya
+L. D. College of Engineering
+L. D. College of Engineering
+L. D. College of Engineering
+L. D. College of Engineering"
+40a63746a710baf4a694fd5a4dd8b5a3d9fc2846,Invertible Conditional GANs for image editing,"Invertible Conditional GANs for image editing
+Guim Perarnau, Joost van de Weijer, Bogdan Raducanu
+Computer Vision Center
+Barcelona, Spain
+Jose M. Álvarez
+Data61 CSIRO
+Canberra, Australia"
+40377a1bc15a9ec28ea54cc53d5cf0699365634f,Некооперативная Биометрическая Идентификация По 3d- Моделям Лица С Использованием Видеокамер Высокого Разрешения,"НЕКООПЕРАТИВНАЯ БИОМЕТРИЧЕСКАЯ ИДЕНТИФИКАЦИЯ ПО 3D-
+МОДЕЛЯМ ЛИЦА С ИСПОЛЬЗОВАНИЕМ ВИДЕОКАМЕР ВЫСОКОГО
+РАЗРЕШЕНИЯ
+А.И. Манолов, А.Ю. Соколов, О.В. Степаненко, А.C. Тумачек, А.В.Тяхт, А. К. Цискаридзе,
+Д.Н. Заварикин, А.А. Кадейшвили,
+Компания Vocord
+Аннотация
+Получены результаты по распознаванию лиц, основанные
+на 3D реконструкции без использования какой-либо
+структурированной подсветки. 3D реконструкция основана
+на использовании камер высокого разрешения.
+Вероятность распознавания составляет 92-98%.
+Ключевые слова: 3D реконструкция, 3D распознавание
+. ВВЕДЕНИЕ
+Системам распознавания лиц, основанным на двумерных
+изображениях, присущи определенные недостатки. Такие
+системы чувствительны к изменениям яркости. Свет,
+собранный с лица, является функцией геометрии лица,
+отражательной способности лица, свойствами источника
+света и свойствами камеры. С учетом этого, сложно создать"
+40b10e330a5511a6a45f42c8b86da222504c717f,Implementing the Viola-Jones Face Detection Algorithm,"Implementing the Viola-Jones
+Face Detection Algorithm
+Ole Helvig Jensen
+Kongens Lyngby 2008
+IMM-M.Sc.-2008-93"
+400aa5cb2fec558f7827c3638993bae34752ff31,Assessing post-detection filters for a generic pedestrian detector in a tracking-by-detection scheme,"(cid:13)2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including
+reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists,
+or reuse of any copyrighted component of this work in other works.
+Assessing Post-Detection Filters for a Generic Pedestrian Detector in a
+Tracking-By-Detection Scheme
+Volker Eiselein, Erik Bochinski and Thomas Sikora
+Communication Systems Group, Technische Universit¨at Berlin"
+40ca925befa1f7e039f0cd40d57dbef6007b4416,Sampling Matters in Deep Embedding Learning,"Sampling Matters in Deep Embedding Learning
+Chao-Yuan Wu∗
+UT Austin
+R. Manmatha
+A9/Amazon
+Alexander J. Smola
+Amazon
+Philipp Kr¨ahenb¨uhl
+UT Austin"
+4026dc62475d2ff2876557fc2b0445be898cd380,An Affective User Interface Based on Facial Expression Recognition and Eye-Gaze Tracking,"An Affective User Interface Based on Facial Expression
+Recognition and Eye-Gaze Tracking
+Soo-Mi Choi and Yong-Guk Kim
+School of Computer Engineering, Sejong University, Seoul, Korea"
+40f2b3af6b55efae7992996bd0c474a9c1574008,xytocin Increases Retention of Social Cognition n Autism,"ARTICLE IN PRESS
+Oxytocin Increases Retention of Social Cognition
+in Autism
+Eric Hollander, Jennifer Bartz, William Chaplin, Ann Phillips, Jennifer Sumner, Latha Soorya,
+Evdokia Anagnostou, and Stacey Wasserman
+Background: Oxytocin dysfunction might contribute to the development of social deficits in autism, a core symptom domain and
+potential target for intervention. This study explored the effect of intravenous oxytocin administration on the retention of social
+information in autism.
+Methods: Oxytocin and placebo challenges were administered to 15 adult subjects diagnosed with autism or Asperger’s disorder, and
+omprehension of affective speech (happy, indifferent, angry, and sad) in neutral content sentences was tested.
+Results: All subjects showed improvements in affective speech comprehension from pre- to post-infusion; however, whereas those who
+received placebo first tended to revert to baseline after a delay, those who received oxytocin first retained the ability to accurately assign
+emotional significance to speech intonation on the speech comprehension task.
+Conclusions: These results are consistent with studies linking oxytocin to social recognition in rodents as well as studies linking
+oxytocin to prosocial behavior in humans and suggest that oxytocin might facilitate social information processing in those with autism.
+These findings also provide preliminary support for the use of oxytocin in the treatment of autism.
+Key Words: Autism, oxytocin, neuropeptide, social cognition,
+ffective speech
+A utism is a developmental disorder characterized by ab-
+normalities in speech and communication, impaired so-"
+40f127fa4459a69a9a21884ee93d286e99b54c5f,Optimizing Apparent Display Resolution Enhancement for Arbitrary Videos,"Optimizing Apparent Display Resolution
+Enhancement for Arbitrary Videos
+Michael Stengel*, Member, IEEE, Martin Eisemann, Stephan Wenger,
+Benjamin Hell, Marcus Magnor, Member, IEEE"
+401e6b9ada571603b67377b336786801f5b54eee,Active Image Clustering: Seeking Constraints from Humans to Complement Algorithms,"Active Image Clustering: Seeking Constraints from
+Humans to Complement Algorithms
+November 22, 2011"
+40248cd4a742cb33c14e835fe6b847ad3f8d5b96,Learning View-Specific Deep Networks for Person Re-Identification,"Learning View-Specific Deep Networks for Person
+Re-Identification
+Zhanxiang Feng, Jianhuang Lai, and Xiaohua Xie"
+403e7fed4fa1785af8309b1c4c736d98fa75be5b,Social status gates social attention in monkeys,"Magazine
+Social status
+gates social
+ttention in
+monkeys
+Stephen V. Shepherd1,
+Robert O. Deaner1 and
+Michael L. Platt1,2,3
+Humans rapidly shift attention in
+the direction other individuals are
+looking, following gaze in a
+manner suggestive of an
+obligatory social reflex [1–4].
+Monkeys’ attention also follows
+gaze, and the similar magnitude
+nd time-course of gaze-
+following in rhesus macaques and
+humans [5] is indicative of shared
+neural mechanisms. Here we
+show that low-status male rhesus"
+40f6c9355dbf01a240b4c26b0fd00b5cfbd5f67d,An eye-tracking method to reveal the link between gazing patterns and pragmatic abilities in high functioning autism spectrum disorders,"ORIGINAL RESEARCH ARTICLE
+published: 14 January 2015
+doi: 10.3389/fnhum.2014.01067
+An eye-tracking method to reveal the link between gazing
+patterns and pragmatic abilities in high functioning autism
+spectrum disorders
+Ouriel Grynszpan 1* and Jacqueline Nadel 2
+Institut des Systèmes Intelligents et de Robotique (ISIR), Université Pierre et Marie Curie, Centre National de la Recherche Scientifique, Paris, France
+Centre Emotion, Hôpital de La Salpêtrière, Paris, France
+Edited by:
+John J. Foxe, Albert Einstein
+College of Medicine, USA
+Reviewed by:
+Hans-Peter Frey, Albert Einstein
+College of Medicine, USA
+Julia Irwin, Haskins Laboratories,
+Karri Gillespie-Smith, University of
+West of Scotland, UK
+*Correspondence:
+Ouriel Grynszpan, Institut des"
+40bd5d4b01c89e84fe2b0f6b1cc22657bf4e8d80,Toward Unconstrained Fingerprint Recognition: A Fully Touchless 3-D System Based on Two Views on the Move,"Toward Unconstrained Fingerprint Recognition:
+Fully Touchless 3-D System
+Based on Two Views on the Move
+Ruggero Donida Labati, Member, IEEE, Angelo Genovese, Member, IEEE,
+Vincenzo Piuri, Fellow, IEEE, and Fabio Scotti, Senior Member, IEEE"
+2eef20a11324686099ee6f9b1a7613444b0d2112,Dual-Path Convolutional Image-Text Embedding with Instance Loss,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Dual-Path Convolutional Image-Text Embeddings
+with Instance Loss
+Zhedong Zheng, Liang Zheng, Michael Garrett, Yi Yang, Yi-Dong Shen"
+2e53a5dbadfd30b834feea80c365ffff3925eb76,The role of alexithymia in reduced eye-fixation in Autism Spectrum Conditions.,"23Journal of Autism andDevelopmental Disorders ISSN 0162-3257Volume 41Number 11 J Autism Dev Disord (2011)41:1556-1564DOI 10.1007/s10803-011-1183-3The Role of Alexithymia in Reduced Eye-Fixation in Autism Spectrum ConditionsGeoffrey Bird, Clare Press & DanielC. Richardson"
+2e8e6b835e5a8f55f3b0bdd7a1ff765a0b7e1b87,Pointly-Supervised Action Localization,"International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+Pointly-Supervised Action Localization
+Pascal Mettes · Cees G. M. Snoek
+Received: date / Accepted: date"
+2e10560579f2bdeae0143141f26bd9f0a195b4b7,Mixed Precision Training,"Published as a conference paper at ICLR 2018
+MIXED PRECISION TRAINING
+Sharan Narang∗, Gregory Diamos, Erich Elsen†
+Baidu Research
+{sharan,
+Paulius Micikevicius∗, Jonah Alben, David Garcia, Boris Ginsburg, Michael Houston,
+Oleksii Kuchaiev, Ganesh Venkatesh, Hao Wu
+NVIDIA
+{pauliusm, alben, dagarcia, bginsburg, mhouston,
+okuchaiev, gavenkatesh,"
+2eb37a3f362cffdcf5882a94a20a1212dfed25d9,Local Feature Based Face Recognition,"Local Feature Based Face Recognition
+Sanjay A. Pardeshi and Sanjay N. Talbar
+R.I.T., Rajaramnagar and S.G.G.S. COE &T, Nanded
+India
+. Introduction
+A reliable automatic face recognition (AFR) system is a need of time because in today's
+networked world, maintaining the security of private information or physical property is
+ecoming increasingly important and difficult as well. Most of the time criminals have been
+taking the advantage of fundamental flaws in the conventional access control systems i.e.
+the systems operating on credit card, ATM etc. do not grant access by ""who we are"", but by
+""what we have”. The biometric based access control systems have a potential to overcome
+most of the deficiencies of conventional access control systems and has been gaining the
+importance in recent years. These systems can be designed with biometric traits such as
+fingerprint, face, iris, signature, hand geometry etc. But comparison of different biometric
+traits shows that face is very attractive biometric because of its non-intrusiveness and social
+cceptability. It provides automated methods of verifying or recognizing the identity of a
+living person based on its facial characteristics.
+In last decade, major advances occurred in face recognition, with many systems capable of
+chieving recognition rates greater than 90%. However real-world scenarios remain a
+hallenge, because face acquisition process can undergo to a wide range of variations. Hence"
+2e0481def73dbd3e6dfb447c1c3c8afdfaf9b7ec,UPC System for the 2015 MediaEval Multimodal Person Discovery in Broadcast TV task,"UPC System for the 2015 MediaEval Multimodal Person
+Discovery in Broadcast TV task
+M. India, D. Varas, V. Vilaplana, J.R. Morros, J. Hernando
+Universitat Politecnica de Catalunya, Spain"
+2e5cfa97f3ecc10ae8f54c1862433285281e6a7c,Generative Adversarial Networks for Improving Face Classification,"Generative Adversarial Networks for Improving Face Classification JONAS NATTEN SUPERVISOR Morten Goodwin, PhD University of Agder, 2017 Faculty of Engineering and Science Department of ICT"
+2e091b311ac48c18aaedbb5117e94213f1dbb529,Collaborative Facial Landmark Localization for Transferring Annotations Across Datasets,"Collaborative Facial Landmark Localization
+for Transferring Annotations Across Datasets
+Brandon M. Smith and Li Zhang
+University of Wisconsin – Madison
+http://www.cs.wisc.edu/~lizhang/projects/collab-face-landmarks/"
+2e1415a814ae9abace5550e4893e13bd988c7ba1,Dictionary Based Face Recognition in Video Using Fuzzy Clustering and Fusion,"International Journal of Engineering Trends and Technology (IJETT) – Volume 21 Number 3 – March 2015
+Dictionary Based Face Recognition in Video Using
+Fuzzy Clustering and Fusion
+Neeraja K.C.#1, RameshMarivendan E.#2,
+#1IInd year M.E. Student, #2Assistant Professor
+#1#2ECE Department, Dhanalakshmi Srinivasan College of Engineering,
+Coimbatore,Tamilnadu,India.
+Anna University."
+2eefaa9c278346b9e0eb51085cff490b0a43688f,TEMPO: Feature-Endowed Teichmüller Extremal Mappings of Point Clouds,"Vol. 9, No. 4, pp. 1922–1962
+(cid:13) 2016 Society for Industrial and Applied Mathematics
+TEMPO: Feature-Endowed Teichm¨uller Extremal Mappings of Point Clouds∗
+Ting Wei Meng† , Gary Pui-Tung Choi‡ , and Lok Ming Lui†"
+2ea8029283e6bbb03c023070d042cb19647f06af,Neurobiological mechanisms associated with facial affect recognition deficits after traumatic brain injury,"Neurobiological mechanisms associated with facial affect recognition deficits after
+traumatic brain injury
+Dawn Neumann, PhD
+Indiana University School of Medicine
+Department of Physical Medicine and Rehabilitation
+Rehabilitation Hospital of Indiana
+141 Shore Drive
+Indianapolis, IN 46254
+Email:
+Phone: 317-329-2188
+Brenna C. McDonald, PsyD, MBA
+Indiana University School of Medicine
+Department of Radiology and Imaging Sciences
+Indiana University Center for Neuroimaging
+55 W. 16th St., GH Suite 4100
+Indianapolis, IN 46202
+Email:
+John West, MS
+Indiana University School of Medicine
+Department of Radiology and Imaging Sciences"
+2e68190ebda2db8fb690e378fa213319ca915cf8,Generating Videos with Scene Dynamics,"Generating Videos with Scene Dynamics
+Carl Vondrick
+Hamed Pirsiavash
+Antonio Torralba"
+2e0d56794379c436b2d1be63e71a215dd67eb2ca,Improving precision and recall of face recognition in SIPP with combination of modified mean search and LSH,"Improving precision and recall of face recognition in SIPP with combination of
+modified mean search and LSH
+Xihua.Li"
+2ed9a69ee6509c0b3fe5a51d1116dccc877653ba,Reconstruction and Analysis of Shapes from 3D Scans,"Reconstruction and Analysis
+of Shapes from 3D Scans"
+2e7874ec37df91db1934d61d9e1181de5e4efb36,COCO-Stuff: Thing and Stuff Classes in Context,"COCO-Stuff: Thing and Stuff Classes in Context
+Holger Caesar1
+Jasper Uijlings2 Vittorio Ferrari1 2
+University of Edinburgh1 Google AI Perception2"
+2e585adbe1f434396ca6a669dd91914d4d4bf42a,Early Prediction for Physical Human Robot Collaboration in the Operating Room,"TO APPEAR IN AUTONOMOUS ROBOTS, SPECIAL ISSUE IN LEARNING FOR HUMAN-ROBOT COLLABORATION
+Early Prediction for Physical Human Robot
+Collaboration in the Operating Room
+Tian Zhou, Student Member, IEEE, and Juan Wachs, Member, IEEE"
+2edf55ebc88e89c4caff0c49c6b8e79f46407d19,Pruning Deep Neural Networks using Partial Least Squares,"Pruning Deep Neural Networks using Partial Least Squares
+Artur Jordao, Ricardo Kloss∗, Fernando Yamada and William Robson Schwartz
+Smart Sense Laboratory, Computer Science Department
+Universidade Federal de Minas Gerais, Brazil
+Email: {arturjordao, rbk, fernandoakio,"
+2e1ff08fb5790e3b5ba7864408628467795a9df4,Human Pose Estimation with Fields of Parts,"Human Pose Estimation
+with Fields of Parts
+Martin Kiefel and Peter Vincent Gehler
+Max Planck Institute for Intelligent Systems, T¨ubingen Germany"
+2e1822bf06d80f5ad07a79a4bfff98c1c18fb573,Knowing who to listen to: Prioritizing experts from a diverse ensemble for attribute personalization,"KNOWING WHO TO LISTEN TO: PRIORITIZING EXPERTS FROM A DIVERSE
+ENSEMBLE FOR ATTRIBUTE PERSONALIZATION
+Shrenik Lad1, Bernardino Romera Paredes2, Julien Valentin2, Philip Torr2, Devi Parikh1
+. Virginia Tech 2. University of Oxford"
+2e475f1d496456831599ce86d8bbbdada8ee57ed,Groupsourcing: Team Competition Designs for Crowdsourcing,"Groupsourcing: Team Competition Designs for
+Crowdsourcing
+Markus Rokicki, Sergej Zerr, Stefan Siersdorfer
+L3S Research Center, Hannover, Germany"
+2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e,3DPeS: 3D people dataset for surveillance and forensics,"DPeS: 3D People Dataset for Surveillance and Forensics
+Davide Baltieri, Roberto Vezzani, Rita Cucchiara
+{davide.baltieri, roberto.vezzani, rita.cucchiara}
+University of Modena and Reggio Emilia, Italy (Dipartimento di Ingegneria dell’Informazione)
+A new Dataset for People
+Tracking and Reidentification
+600 videos, 200 people, 8 cameras
+Calibration and 3D scene reconstruction
+taken
+The dataset contains hundreds of video sequences of
+from a multi-camera distributed
+00 people
+surveillance system over several days, with different light
+onditions; each person is detected multiple times and
+from different points of view.
+The dataset
+The starting point of our dataset is a real
+surveillance setup, composed by 8 different
+surveillance cameras, monitoring a section of the
+ampus of the University of Modena and Reggio"
+2ef51b57c4a3743ac33e47e0dc6a40b0afcdd522,Leveraging Billions of Faces to Overcome Performance Barriers in Unconstrained Face Recognition,"Leveraging Billions of Faces to Overcome
+Performance Barriers in Unconstrained Face
+Recognition
+Yaniv Taigman and Lior Wolf
+face.com
+{yaniv,"
+2efc4eee3953f6b52e23989bbcc2598a91e18ba0,External Cameras and a Mobile Robot for Enhanced Multi-person Tracking,"RFAntennas2D SICKLaserFirewire Cameraon PTU LaptopCamera 1Flea RGB Camera 2Flea RGBHubFirewireFigure1:Perceptualplatform;staticcameras(withroughpositionsandfieldsofview)andthemobilerobotRackham.Thispaperisstructuredasfollows:architectureofthecooperativesystemispresentedinsection2.Sec-tion3describesthedifferentdetectionmodalitiesthatdrivethemulti-persontracker(presentedinsection4).Evaluationsandresultsarepresentedinsection5fol-lowedbyconcludingremarksinsection6.2ARCHITECTUREOurcooperativeframeworkismadeupofamobilerobotandtwofixedviewwall-mountedRGBflea2cameras(figure1).Thecamerashaveamaximumres-olutionof640x480pixelsandareconnectedtoadual-coreIntelCentrinoLaptopviaafire-wirecable.Therobot,calledRackham,isaniRobotB21rmobileplat-form.Ithasvarioussensors,ofwhichitsSICKLaserRangeFinder(LRF)isutilizedinthiswork.Commu-nicationbetweenthemobilerobotandthecomputer"
+2e956e178fd50ab140f30f9255a83d853c8be210,Robust Facial Expression Recognition via Compressive Sensing,"Sensors 2012, 12, 3747-3761; doi:10.3390/s120303747
+OPEN ACCESS
+sensors
+ISSN 1424-8220
+www.mdpi.com/journal/sensors
+Article
+Robust Facial Expression Recognition via Compressive Sensing
+Shiqing Zhang 1, Xiaoming Zhao 2,* and Bicheng Lei 1
+School of Physics and Electronic Engineering, Taizhou University, Taizhou 318000, China;
+E-Mails: (S.Z.); (B.L.)
+Department of Computer Science, Taizhou University, Taizhou 318000, China
+* Author to whom correspondence should be addressed; E-Mail:
+Tel./Fax: +86-576-8513-7178.
+Received: 28 December 2011; in revised form: 19 February 2012 / Accepted: 16 March 2012 /
+Published: 21 March 2012"
+2e082232eb37c98052e62eec76e674a491082544,Virtual Scenarios: Achievements and Current Work,"Virtual Scenarios: Achievements and Current Work
+Javier Mar´ın, David V´azquez and Antonio M. L´opez
+ADAS, Computer Vision Center, Universitat Autonoma de Barcelona, Spain
+e-mail:{ jmarin, dvazquez, antonio"
+2eae02d59a3f455f3714ce674d85d3f073c9d7a2,All in the first glance: first fixation predicts individual differences in valence bias.,"Cognition and Emotion
+ISSN: 0269-9931 (Print) 1464-0600 (Online) Journal homepage: http://www.tandfonline.com/loi/pcem20
+All in the first glance: first fixation predicts
+individual differences in valence bias
+Maital Neta, Tien T. Tong, Monica L. Rosen, Alex Enersen, M. Justin Kim &
+Michael D. Dodd
+To cite this article: Maital Neta, Tien T. Tong, Monica L. Rosen, Alex Enersen, M. Justin Kim &
+Michael D. Dodd (2016): All in the first glance: first fixation predicts individual differences in
+valence bias, Cognition and Emotion, DOI: 10.1080/02699931.2016.1152231
+To link to this article: http://dx.doi.org/10.1080/02699931.2016.1152231
+View supplementary material
+Published online: 10 Mar 2016.
+Submit your article to this journal
+View related articles
+View Crossmark data
+Full Terms & Conditions of access and use can be found at
+http://www.tandfonline.com/action/journalInformation?journalCode=pcem20
+Download by: [University of Nebraska, Lincoln]
+Date: 10 March 2016, At: 09:04"
+2ed4973984b254be5cba3129371506275fe8a8eb,Victoria Ovsyannikova THE EFFECTS OF MOOD ON EMOTION RECOGNITION AND ITS RELATIONSHIP WITH THE GLOBAL VS LOCAL INFORMATION PROCESSING,"Victoria Ovsyannikova
+THE EFFECTS OF MOOD ON
+EMOTION RECOGNITION AND
+ITS RELATIONSHIP WITH THE
+GLOBAL VS LOCAL
+INFORMATION PROCESSING
+STYLES
+BASIC RESEARCH PROGRAM
+WORKING PAPERS
+SERIES: PSYCHOLOGY
+WP BRP 60/PSY/2016
+This Working Paper is an output of a research project implemented at the National Research
+University Higher School of Economics (HSE). Any opinions or claims contained in this
+Working Paper do not necessarily reflect the views of HSE"
+2e9c780ee8145f29bd1a000585dd99b14d1f5894,Simultaneous Adversarial Training - Learn from Others Mistakes,"Simultaneous Adversarial Training - Learn from
+Others’ Mistakes
+Zukang Liao
+Lite-On Singapore Pte. Ltd, 2Imperial College London"
+2ebc35d196cd975e1ccbc8e98694f20d7f52faf3,Towards Wide-angle Micro Vision Sensors,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE
+Towards Wide-angle Micro Vision Sensors
+Sanjeev J. Koppal*
+Ioannis Gkioulekas* Travis Young+ Hyunsung Park*
+Kenneth B. Crozier* Geoffrey L. Barrows+ Todd Zickler*"
+2ea46531f7d837c1e4b9e6a8d8fc084c6e526545,Just Look at the Image: Viewpoint-Specific Surface Normal Prediction for Improved Multi-View Reconstruction,"Just look at the image: viewpoint-specific surface normal prediction
+for improved multi-view reconstruction
+Silvano Galliani
+Konrad Schindler
+Photogrammetry and Remote Sensing, ETH Zurich"
+2e927d0a2dc4b69fc03124ad876329b22a61f1b0,Temporal Reasoning in Videos using Convolutional Gated Recurrent Units,"Temporal Reasoning in Videos using Convolutional Gated Recurrent Units
+Debidatta Dwibedi∗
+Pierre Sermanet
+Jonathan Tompson
+Google Brain
+{debidatta, sermanet,"
+2ec393b4fa5739c54ac9f61e583f5e41cfb2687c,Face Recognition using Spherical Wavelets,"Face Recognition using Spherical Wavelets
+Christian Lessig∗"
+2e55fd3f5138e55250aed84a7dc17adfc34970d3,The implications of social neuroscience for social disability.,"J Autism Dev Disord (2012) 42:1256–1262
+DOI 10.1007/s10803-012-1514-z
+O R I G I N A L P A P E R
+The Implications of Social Neuroscience for Social Disability
+James C. McPartland • Kevin A. Pelphrey
+Published online: 29 March 2012
+Ó Springer Science+Business Media, LLC 2012"
+2ea78e128bec30fb1a623c55ad5d55bb99190bd2,Residual vs. Inception vs. Classical Networks for Low-Resolution Face Recognition,"Residual vs. Inception vs. Classical Networks for
+Low-Resolution Face Recognition
+Christian Herrmann1,2, Dieter Willersinn2, and J¨urgen Beyerer1,2
+Vision and Fusion Lab, Karlsruhe Institute of Technology KIT, Karlsruhe, Germany
+Fraunhofer IOSB, Karlsruhe, Germany
+{christian.herrmann,dieter.willersinn,"
+2e0f5e72ad893b049f971bc99b67ebf254e194f7,Apparel Classification with Style,"Apparel Classification with Style
+Lukas Bossard1, Matthias Dantone1, Christian Leistner1,2,
+Christian Wengert1,3, Till Quack3, Luc Van Gool1,4
+ETH Z¨urich, Switzerland 2Microsoft, Austria 3Kooaba AG, Switzerland
+KU Leuven, Belgium"
+2e491c8e3d1d3314ea5e50943c0bdf2aa57b99b7,Weighted joint sparse representation-based classification method for robust alignment-free face recognition,"Downloaded From: https://www.spiedigitallibrary.org/journals/Journal-of-Electronic-Imaging on 12/17/2017 Terms of Use: https://www.spiedigitallibrary.org/terms-of-use
+Weightedjointsparserepresentation-basedclassificationmethodforrobustalignment-freefacerecognitionBoSunFengXuGuoyanZhouJunHeFengxiangGe"
+2ec7d6a04c8c72cc194d7eab7456f73dfa501c8c,A R Eview on T Exture B Ased E Motion R Ecognition from F Acial E Xpression,"International Journal of Scientific Research and Management Studies (IJSRMS)
+ISSN: 2349-3771
+Volume 3 Issue 4, pg: 164-169
+A REVIEW ON TEXTURE BASED EMOTION RECOGNITION
+FROM FACIAL EXPRESSION
+Rishabh Bhardwaj, 2Amit Kumar Chanchal, 3 Shubham Kashyap,
+3 Pankaj Pandey, 3Prashant Kumar
+U.G. Scholars, 2Assistant Professor,
+Dept. of E & C Engg., MIT Moradabad, Ram Ganga Vihar, Phase II, Moradabad, India."
+2eb9f1dbea71bdc57821dedbb587ff04f3a25f07,Face for Ambient Interface,"Face for Ambient Interface
+Maja Pantic
+Imperial College, Computing Department, 180 Queens Gate,
+London SW7 2AZ, U.K."
+2e6c3557cb90f472e6798fcaa8ecc9dff3557f11,Towards Perspective-Free Object Counting with Deep Learning,"Towards perspective-free object counting with
+deep learning
+Daniel O˜noro-Rubio and Roberto J. L´opez-Sastre
+GRAM, University of Alcal´a, Alcal´a de Henares, Spain"
+2e56209ed179be641e6df5efd11be8b3d54a62e9,Combining Deep and Handcrafted Image Features for Presentation Attack Detection in Face Recognition Systems Using Visible-Light Camera Sensors,"Article
+Combining Deep and Handcrafted Image Features for
+Presentation Attack Detection in Face Recognition
+Systems Using Visible-Light Camera Sensors
+Dat Tien Nguyen, Tuyen Danh Pham, Na Rae Baek and Kang Ryoung Park *
+Division of Electronics and Electrical Engineering, Dongguk University, 30 Pildong-ro 1-gil, Jung-gu,
+Seoul 100-715, Korea; (D.T.N.); (T.D.P.);
+(N.R.B.)
+* Correspondence: Tel.: +82-10-3111-7022; Fax: +82-2-2277-8735
+Received: 30 January 2018; Accepted: 24 February 2018; Published: 26 February 2018"
+2efc6f98720b804345c030e22aef6c9f4a53023e,Soft-biometrics evaluation for people re-identification in uncontrolled multi-camera environments,"Moctezuma et al. EURASIP Journal on Image and Video Processing (2015) 2015:28
+DOI 10.1186/s13640-015-0078-1
+RESEARCH
+Open Access
+Soft-biometrics evaluation for people
+re-identification in uncontrolled multi-camera
+environments
+Daniela Moctezuma1*, Cristina Conde2, Isaac Martín De Diego2 and Enrique Cabello2"
+2e708431df3e7a9585a338e1571f078ddbe93a71,Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification.,"Aalborg Universitet
+Deep Pain
+Rodriguez, Pau; Cucurull, Guillem; Gonzàlez, Jordi; M. Gonfaus, Josep ; Nasrollahi, Kamal;
+Moeslund, Thomas B.; Xavier Roca, F.
+Published in:
+I E E E Transactions on Cybernetics
+DOI (link to publication from Publisher):
+0.1109/TCYB.2017.2662199
+Publication date:
+Document Version
+Accepted author manuscript, peer reviewed version
+Link to publication from Aalborg University
+Citation for published version (APA):
+Rodriguez, P., Cucurull, G., Gonzàlez, J., M. Gonfaus, J., Nasrollahi, K., Moeslund, T. B., & Xavier Roca, F.
+(2017). Deep Pain: Exploiting Long Short-Term Memory Networks for Facial Expression Classification. I E E E
+Transactions on Cybernetics, 1-11. DOI: 10.1109/TCYB.2017.2662199
+General rights
+Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners
+nd it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
+? Users may download and print one copy of any publication from the public portal for the purpose of private study or research."
+2e832d5657bf9e5678fd45b118fc74db07dac9da,"Recognition of Facial Expressions of Emotion: The Effects of Anxiety, Depression, and Fear of Negative Evaluation","Running head: RECOGNITION OF FACIAL EXPRESSIONS OF EMOTION
+Recognition of Facial Expressions of Emotion: The Effects of Anxiety, Depression, and Fear of Negative
+Evaluation
+Rachel Merchak
+Wittenberg University
+Rachel Merchak, Psychology Department, Wittenberg University.
+Author Note
+This research was conducted in collaboration with Dr. Stephanie Little, Psychology Department,
+Wittenberg University, and Dr. Michael Anes, Psychology Department, Wittenberg University.
+Correspondence concerning this article should be addressed to Rachel Merchak, 10063 Fox
+Chase Drive, Loveland, OH 45140.
+E‐mail:"
+2bb968e8f9df0fa72dd72e5d705ea7b75af8dcd7,Fast Support Vector Classifier for automated content-based search in video surveillance,"Fast Support Vector Classifier for Automated
+Content-based Search in Video Surveillance
+Cătălin A. Mitrea1, Ionuț Mironică1, Bogdan Ionescu1,2, Radu Dogaru1
+LAPI & Natural Computing Labs, University “Politehnica” of Bucharest, 061971, Romania
+LISTIC, University Savoie Mont Blanc, 74940 Annecy-le-Vieux, France
+Email:
+for multiple-instance human retrieval"
+2ba5e4c421b1413139e4bc5d935d6d48cc753757,Vantage Feature Frames for Fine-Grained Categorization,"Vantage Feature Frames For Fine-Grained Categorization
+Asma Rejeb Sfar
+INRIA Saclay
+Palaiseau, France
+Nozha Boujemaa
+INRIA Saclay
+Palaiseau, France
+Donald Geman
+Johns Hopkins University
+Baltimore, MD, USA
+sma.rejeb"
+2baea24cc71793ba40cf738b7ad1914f0e549863,Attribute Augmented Convolutional Neural Network for Face Hallucination,"Attribute Augmented Convolutional Neural Network for Face Hallucination
+Cheng-Han Lee1 Kaipeng Zhang1 Hu-Cheng Lee1 Chia-Wen Cheng2 Winston Hsu1
+National Taiwan University 2The University of Texas at Austin
+{r05922077, r05944047, r05922174,"
+2ba64deeb3e170e4776e2d2704771019cf9c8639,Differences between Old and Young Adults’ Ability to Recognize Human Faces Underlie Processing of Horizontal Information,"AGING NEUROSCIENCE
+ORIGINAL RESEARCH ARTICLE
+published: 23 April 2012
+doi: 10.3389/fnagi.2012.00003
+Differences between old and young adults’ ability to
+recognize human faces underlie processing of
+horizontal information
+Sven Obermeyer *,Thorsten Kolling, Andreas Schaich and Monika Knopf
+Department of Psychology, Institute for Psychology, Goethe-University Frankfurt am Main, Frankfurt am Main, Germany
+Edited by:
+Hari S. Sharma, Uppsala University,
+Sweden
+Reviewed by:
+Luis Francisco Gonzalez-Cuyar,
+University of Washington School of
+Medicine, USA
+Gregory F. Oxenkrug, Tufts University,
+*Correspondence:
+Sven Obermeyer , Department of
+Psychology, Goethe-University"
+2b50f8e4568ecd84e2f9d6357254272d8db4bbd4,Hierarchical Gaussian Descriptor for Person Re-identification,"Hierarchical Gaussian Descriptor for Person Re-Identification
+Tetsu Matsukawa1, Takahiro Okabe2, Einoshin Suzuki1, Yoichi Sato3
+Kyushu University 2 Kyushu Institute of Technology 3 The University of Tokyo
+{matsukawa,"
+2bf41bf420c8d86dd1bffbacd28c70fa8b12b6dd,Counting the uncountable: deep semantic density estimation from Space,"Counting the uncountable: Deep semantic
+density estimation from space
+Andres C. Rodriguez and Jan D. Wegner
+ETH Zurich, Stefano-franscini-platz 5 8093 Zurich, Switzerland
+Accepted at GCPR 2018"
+2b4d092d70efc13790d0c737c916b89952d4d8c7,Robust Facial Expression Recognition using Local Haar Mean Binary Pattern,"JOURNAL OF INFORMATION SCIENCE AND ENGINEERING 32, XXXX-XXXX (2016)
+Robust Facial Expression Recognition using Local Haar
+Mean Binary Pattern
+MAHESH GOYANI1, NARENDRA PATEL2
+,2 Department of Computer Engineering
+Charotar University of Science and Technology, Changa, India
+Gujarat Technological University, V.V.Nagar, India
+E-mail:
+In this paper, we propose a hybrid statistical feature extractor, Local Haar Mean Bina-
+ry Pattern (LHMBP). It extracts level-1 haar approximation coefficients and computes Local
+Mean Binary Pattern (LMBP) of it. LMBP code of pixel is obtained by weighting the
+thresholded neighbor value of 3  3 patch on its mean. LHMBP produces highly discrimina-
+tive code compared to other state of the art methods. To localize appearance features, ap-
+proximation subband is divided into M  N regions. LHMBP feature descriptor is derived
+y concatenating LMBP distribution of each region. We also propose a novel template
+matching strategy called Histogram Normalized Absolute Difference (HNAD) for histogram
+ased feature comparison. Experiments prove the superiority of HNAD over well-known
+template matching techniques such as L2 norm and Chi-Square. We also investigated
+LHMBP for expression recognition in low resolution. The performance of the proposed ap-
+proach is tested on well-known CK, JAFFE, and SFEW facial expression datasets in diverse"
+2b1358efbceda12de2f36398cdbdb3c7bccc70d4,Unified Detection and Tracking of Instruments during Retinal Microsurgery,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication.
+JOURNAL OF LATEX CLASS FILES, VOL. 6, NO. 1, JANUARY 2007
+Unified detection and tracking of instruments
+during retinal microsurgery
+Raphael Sznitman, Rogerio Richa, Russell H. Taylor Fellow, IEEE, Bruno Jedynak
+nd Gregory D. Hager, Fellow, IEEE"
+2befea9b289f22547f8911aa56672d6373c1ac64,GAIDON et al.: RECOGNIZING ACTIVITIES WITH CLUSTER-TREES OF TRACKLETS 1 Recognizing activities with cluster-trees of tracklets,"GAIDON et al.: RECOGNIZING ACTIVITIES WITH CLUSTER-TREES OF TRACKLETS
+Recognizing activities with cluster-trees of
+tracklets
+Adrien Gaidon
+http://lear.inrialpes.fr/people/gaidon
+Zaid Harchaoui
+http://lear.inrialpes.fr/people/harchaoui
+Cordelia Schmid
+http://lear.inrialpes.fr/people/schmid
+LEAR - INRIA Grenoble, LJK
+655, avenue de l’Europe
+8330 Montbonnot, France"
+2b4d40ef1610500c207f166e9a5b55dbfe234045,A New Biased Discriminant Analysis Using Composite Vectors for Eye Detection,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 42, NO. 4, AUGUST 2012
+A New Biased Discriminant Analysis Using
+Composite Vectors for Eye Detection
+Chunghoon Kim, Member, IEEE, Sang-Il Choi, Member, IEEE,
+Matthew Turk, Senior Member, IEEE, and Chong-Ho Choi, Member, IEEE"
+2b0ff4b82bac85c4f980c40b3dc4fde05d3cc23f,An Effective Approach for Facial Expression Recognition with Local Binary Pattern and Support Vector Machine,"An Effective Approach for Facial Expression Recognition with Local Binary
+Pattern and Support Vector Machine
+Cao Thi Nhan, 2Ton That Hoa An, 3Hyung Il Choi
+*1School of Media, Soongsil University,
+School of Media, Soongsil University,
+School of Media, Soongsil University,"
+2bac4161a928eb33e6be700ed8ea4d823494b22c,MergeNet: A Deep Net Architecture for Small Obstacle Discovery,"MergeNet: A Deep Net Architecture for Small Obstacle Discovery
+Krishnam Gupta1, Syed Ashar Javed2, Vineet Gandhi2 and K. Madhava Krishna2
+evidences is more likely to perform the task better. Recent
+efforts [3] on multi modal fusion also suggests likewise."
+2baf54199b4b0047f3610ba691fb0a718dbce97e,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"International Journal of Computer Applications (0975 – 8887)
+Volume 134 – No.7, January 2016
+Development of an Efficient Face Recognition System
+ased on Linear and Nonlinear Algorithms
+Filani Araoluwa S.
+Department of Computer Science,
+The Federal University of Technology,
+P.M.B.704, Akure, Ondo State, Nigeria."
+2b4b0795358d0264f846e8b3c19ec3180da301cc,Active MAP Inference in CRFs for Efficient Semantic Segmentation,"Active MAP Inference in CRFs for Efficient Semantic Segmentation
+Roderick de Nijs2
+Gemma Roig1 ∗
+Sebastian Ramos3
+Xavier Boix1 ∗
+Kolja K¨uhnlenz2
+Luc Van Gool1,4
+ETH Z¨urich, Switzerland 2TU Munchen, Germany 3CVC Barcelona, Spain 4KU Leuven, Belgium
+Both first authors contributed equally."
+2ba7c88a7e96d412c116d6bea4ba27be2ed4dd48,CocoNet: A deep neural network for mapping pixel coordinates to color values,"CocoNet: A Deep Neural Network for Mapping
+Pixel Coordinates to Color Values
+Paul Andrei Bricman1 and Radu Tudor Ionescu2
+George Co¸sbuc National College, 29-31 Olari, Bucharest, Romania,
+University of Bucharest, 14 Academiei, Bucharest, Romania"
+2b285e5eaeb7a2aa7e37c5ae6762b838d3742b4e,Video event recognition using concept attributes,"Video Event Recognition Using Concept Attributes
+Jingen Liu, Qian Yu, Omar Javed, Saad Ali, Amir Tamrakar, Ajay Divakaran, Hui Cheng, Harpreet Sawhney
+SRI International Sarnoff
+Princeton, NJ, USA 08540"
+2bd49bdfc61788c8ac5621fe7f08a06dd2152fb9,Pose Invariant Face Recognition Using Neuro - Biologically Inspired Features Pramod,"International Journal of Future Computer and Communication, Vol. 1, No. 3, October 2012
+Pose Invariant Face Recognition Using
+Neuro-Biologically Inspired Features
+Pramod Kumar Pisharady and Martin Saerbeck"
+2b1327a51412646fcf96aa16329f6f74b42aba89,Improving performance of recurrent neural network with relu nonlinearity,"Under review as a conference paper at ICLR 2016
+IMPROVING PERFORMANCE OF RECURRENT NEURAL
+NETWORK WITH RELU NONLINEARITY
+Sachin S. Talathi & Aniket Vartak
+Qualcomm Research
+San Diego, CA 92121, USA"
+2bdc0c79b26fed51bc2af1af16117879ee3f571e,Augmented Multitouch Interaction upon a 2-DOF Rotating Disk,"Augmented Multitouch Interaction
+upon a 2-DOF Rotating Disk
+Xenophon Zabulis, Panagiotis Koutlemanis, and Dimitris Grammenos
+Institute of Computer Science, Foundation for Research and Technology - Hellas,
+Herakleion, Crete, Greece"
+2b8a61184b6423e3d5285803eb1908ff955db1a8,Processing and analysis of 2 . 5 D face models for non-rigid mapping based face recognition using differential geometry tools,"Processing and analysis of 2.5D face models for
+non-rigid mapping based face recognition using
+differential geometry tools
+Przemyslaw Szeptycki
+To cite this version:
+Przemyslaw Szeptycki. Processing and analysis of 2.5D face models for non-rigid mapping
+ased face recognition using differential geometry tools. Other. Ecole Centrale de Lyon, 2011.
+English. <NNT : 2011ECDL0020>. <tel-00675988>
+HAL Id: tel-00675988
+https://tel.archives-ouvertes.fr/tel-00675988
+Submitted on 2 Mar 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destin´ee au d´epˆot et `a la diffusion de documents
+scientifiques de niveau recherche, publi´es ou non,"
+2b632f090c09435d089ff76220fd31fd314838ae,Early Adaptation of Deep Priors in Age Prediction from Face Images,"Early Adaptation of Deep Priors in Age Prediction from Face Images
+Mahdi Hajibabaei
+Computer Vision Lab
+D-ITET, ETH Zurich
+Anna Volokitin
+Computer Vision Lab
+D-ITET, ETH Zurich
+Radu Timofte
+CVL, D-ITET, ETH Zurich
+Merantix GmbH"
+2beb9777bf452d02f9bec5275c100f4a736def10,Near Duplicate Image Discovery on One Billion Images,"Near Duplicate Image Discovery on One Billion Images
+Saehoon Kim ∗
+Department of Computer Science,
+POSTECH, Korea
+Xin-Jing Wang
+Web Search and Mining Group
+Microsoft Research Asia, Beijing
+Lei Zhang
+Web Search and Mining Group
+Microsoft Research Asia, Beijing
+Seungjin Choi
+Department of Computer Science,
+POSTECH, Korea"
+2b507f659b341ed0f23106446de8e4322f4a3f7e,Deep Identity-aware Transfer of Facial Attributes,"Deep Identity-aware Transfer of Facial Attributes
+Mu Li1, Wangmeng Zuo2, David Zhang1
+The Hong Kong Polytechnic University 2Harbin Institute of Technology"
+2bbb772332a90b2aba893f7467daa76b373be240,Extracting 3D Layout From a Single Image Using Global Image Structures,"Extracting 3D Layout From a Single Image
+Using Global Image Structures
+Zhongyu Lou, Theo Gevers, Member, IEEE, and Ninghang Hu"
+2b8dfbd7cae8f412c6c943ab48c795514d53c4a7,Polynomial based texture representation for facial expression recognition,"014 IEEE International Conference on Acoustic, Speech and Signal Processing (ICASSP)
+978-1-4799-2893-4/14/$31.00 ©2014 IEEE
+e-mail:
+e-mail:
+RECOGNITION
+. INTRODUCTION
+(d1,d2)∈[0;d]2
+d1+d2≤d"
+2b3fe9a0356eaf50f1340dda3f3d14f6904905ec,Taking advantage of sensor modality specific properties in Automated Driving Extended Abstract,"Taking advantage of sensor modality specific properties in
+Automated Driving"
+2b9082b6b5266f6f7d7a95892f30cc84138697e5,Video Person Re-identification by Temporal Residual Learning,"SUBMITTED TO IEEE TRANSACTIONS ON IMAGE PROCESSING, VOL. XX, NO. XX, FEB 2018
+Video Person Re-identification by Temporal
+Residual Learning
+Ju Dai∗, Pingping Zhang∗, Huchuan Lu, Senior Member, IEEE, and Hongyu Wang, Member, IEEE"
+2bae810500388dd595f4ebe992c36e1443b048d2,Analysis of Facial Expression Recognition by Event-related Potentials,"International Journal of Bioelectromagnetism
+Vol. 18, No. 1, pp. 13 - 18, 2016
+www.ijbem.org
+Analysis of Facial Expression Recognition
+y Event-related Potentials
+Taichi Hayasaka and Ayumi Miyachi
+Department of Information and Computer Engineering,
+National Institute of Technology, Toyota College, Japan
+Correspondence: Taichi Hayasaka, Department of Information and Computer Engineering, National Institute of Technology,
+Toyota College, 2-1 Eisei, Toyota-shi, Aichi, 471-8525 Japan,
+E-mail: phone +81 565 36 5861, fax +81 565 36 5926"
+2bbbbe1873ad2800954058c749a00f30fe61ab17,Face Verification across Ages Using Self Organizing Map,"ISSN(Online): 2320-9801
+ISSN (Print): 2320-9798
+International Journal of Innovative Research in Computer and Communication Engineering
+(An ISO 3297: 2007 Certified Organization)
+Vol.2, Special Issue 1, March 2014
+Proceedings of International Conference On Global Innovations In Computing Technology (ICGICT’14)
+Organized by
+Department of CSE, JayShriram Group of Institutions, Tirupur, Tamilnadu, India on 6th & 7th March 2014
+Face Verification across Ages Using Self
+Organizing Map
+B.Mahalakshmi1, K.Duraiswamy2, P.Gnanasuganya3, P.Aruldhevi4, R.Sundarapandiyan5
+Associate Professor, Department of CSE, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India1
+Dean, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India2
+B.E, Department of CSE, K.S.Rangasamy College of Technology, Namakkal, TamilNadu, India3, 4, 5"
+2b8667df1a0332386d8d799fbac0327496ce02c9,Stranger danger: Parenthood increases the envisioned bodily formidability of menacing men,"Evolution and Human Behavior 35 (2014) 109–117
+Contents lists available at ScienceDirect
+Evolution and Human Behavior
+j o u r n a l h o m e p a g e : w w w . e h b o n l i n e . o r g
+Original Article
+Stranger danger: Parenthood increases the envisioned bodily formidability
+of menacing men☆
+Daniel M.T. Fessler a,b,⁎, Colin Holbrook a,b, Jeremy S. Pollack b, Jennifer Hahn-Holbrook b,c
+Department of Anthropology, University of California, Los Angeles, Los Angeles, CA 90095, USA
+Center for Behavior, Evolution, and Culture, University of California, Los Angeles, Los Angeles, CA 90095, USA
+Department of Psychology, University of California, Los Angeles, Los Angeles, CA 90095, USA
+r t i c l e
+i n f o
+b s t r a c t
+Article history:
+Initial receipt 6 April 2013
+Final revision received 1 November 2013
+Keywords:
+Parenthood
+Relative formidability"
+47fc921add1421ff8adb730df7aa9e7f865bfdeb,Toward Practical Smile Detection,"Towards Practical Smile Detection
+Jacob Whitehill, Gwen Littlewort, Ian Fasel, Marian Bartlett, and Javier Movellan"
+4701112bfe9946a97a60c2bbb2d47dc784942c3f,Understanding classifier errors by examining influential neighbors,"Understanding Classifier Errors by Examining Influential Neighbors
+Mayank Kabra, Alice Robie, Kristin Branson
+Janelia Research Campus of the Howard Hughes Medical Institute
+Ashburn, VA, 20147, USA"
+47be79c0ecb598e1af44e57f386f79adf491f82b,Scenes categorization based on appears objects probability,"016 IEEE 6th International Conference on System Engineering and Technology (ICSET)
+Oktober 3-4, 2016 Bandung – Indonesia
+Scenes Categorization based on Appears Objects
+Probability
+Marzuki1, Egi Muhamad Hidayat2, Rinaldi Munir3, Ary Setijadi P4 ,Carmadi Machbub5
+School of Electrical Engineering and Informatics, Institut Teknologi Bandung
+Bandung, Indonesia
+lskk.ee.itb.ac.id"
+47ce78c9f49248a7d1bd395befb43e45d89555ee,Vision-and-Language Navigation: Interpreting visually-grounded navigation instructions in real environments,"Vision-and-Language Navigation: Interpreting visually-grounded
+navigation instructions in real environments
+Peter Anderson1
+Niko S¨underhauf3
+Qi Wu2
+Damien Teney2
+Jake Bruce3
+Mark Johnson4
+Ian Reid2
+Stephen Gould1
+Anton van den Hengel2
+Australian National University 2University of Adelaide 3Queensland University of Technology 4Macquarie University"
+47096e7103a2fbb6f6ede05e996209497d41db6a,Implementation of Artificial Intelligence Methods for Virtual Reality Solutions: a Review of the Literature,"Implementation of Artificial Intelligence Methods for
+Virtual Reality Solutions: a Review of the Literature
+Rytis Augustauskas
+Department of Automation
+Aurimas Kudarauskas
+Department of Automation
+Kaunas University of Technology,
+Kaunas University of Technology,
+Kaunas, Lithuania
+Kaunas, Lithuania
+Cenker Canbulut
+Department of Multimedia Engineering
+Kaunas University of Technology,
+Kaunas, Lithuania"
+477236563c6a6c6db922045453b74d3f9535bfa1,Attribute Based Image Search Re-Ranking Snehal,"International Journal of Science and Research (IJSR)
+ISSN (Online): 2319-7064
+Index Copernicus Value (2013): 6.14 | Impact Factor (2014): 5.611
+Attribute Based Image Search Re-Ranking
+Snehal S Patil1, Ajay Dani2
+Master of Computer Engg, Savitribai Phule Pune University, G. H. Raisoni Collage of Engg and Technology, Wagholi, Pune
+2Professor, Computer and Science Dept, Savitribai Phule Pune University, G. H .Raisoni Collage of Engg and Technology, Wagholi, Pune
+integrating
+images by"
+47fdd1579f732dd6389f9342027560e385853180,Deep Sparse Subspace Clustering,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015
+Deep Sparse Subspace Clustering
+Xi Peng, Jiashi Feng, Shijie Xiao, Jiwen Lu Senior Member, IEEE, Zhang Yi Fellow, IEEE,
+Shuicheng Yan Fellow, IEEE,"
+47f2088afb616bde5468818e23d79e1ae5a562cd,Multi-view gender classification based on local Gabor binary mapping pattern and support vector machines,"Multi-view Gender Classification based on Local Gabor Binary
+Mapping Pattern and Support Vector Machines
+Bin Xia, He Sun and Bao-Liang Lu∗ Senior Member, IEEE"
+470dbd3238b857f349ebf0efab0d2d6e9779073a,Unsupervised Simultaneous Orthogonal basis Clustering Feature Selection,"Unsupervised Simultaneous Orthogonal Basis Clustering Feature Selection
+Dongyoon Han and Junmo Kim
+School of Electrical Engineering, KAIST, South Korea
+In this paper, we propose a novel unsupervised feature selection method: Si-
+multaneous Orthogonal basis Clustering Feature Selection (SOCFS). To per-
+form feature selection on unlabeled data effectively, a regularized regression-
+ased formulation with a new type of target matrix is designed. The target
+matrix captures latent cluster centers of the projected data points by per-
+forming the orthogonal basis clustering, and then guides the projection ma-
+trix to select discriminative features. Unlike the recent unsupervised feature
+selection methods, SOCFS does not explicitly use the pre-computed local
+structure information for data points represented as additional terms of their
+objective functions, but directly computes latent cluster information by the
+target matrix conducting orthogonal basis clustering in a single unified term
+of the proposed objective function.
+Since the target matrix is put in a single unified term for regression of
+the proposed objective function, feature selection and clustering are simul-
+taneously performed. In this way, the projection matrix for feature selection
+is more properly computed by the estimated latent cluster centers of the
+projected data points. To the best of our knowledge, this is the first valid"
+47541d04ec24662c0be438531527323d983e958e,British Library Cataloguing in Publication Data A catalogue record for this book is available from the British Library Library of Congress Control Number: 2008xxxxxx,Affective Information Processing
+479f44f9b4c401327a721550334b8d491f6b3f16,OR-PCA with MRF for Robust Foreground Detection in Highly Dynamic Backgrounds,"OR-PCA with MRF for Robust Foreground
+Detection in Highly Dynamic Backgrounds
+Sajid Javed1, Seon Ho Oh1, Andrews Sobral2,
+Thierry Bouwmans2 and Soon Ki Jung1
+School of Computer Science and Engineering, Kyungpook National University,
+80 Daehak-ro, Buk-gu,Daegu, 702-701, Republic of Korea
+{sajid,
+Laboratoire MIA (Mathematiques, Image et Applications)- Universit´e de La
+Rochelle, 17000, France, {andrews.sobral,"
+474b461cd12c6d1a2fbd67184362631681defa9e,Multi-resolution fusion of DTCWT and DCT for shift invariant face recognition,"014 IEEE International
+Conference on Systems, Man
+nd Cybernetics
+(SMC 2014)
+San Diego, California, USA
+5-8 October 2014
+Pages 1-789
+IEEE Catalog Number:
+ISBN:
+CFP14SMC-POD
+978-1-4799-3841-4"
+47ca2df3d657d7938d7253bed673505a6a819661,"Fields of Study Major Field: Computer Vision Minor Field: Pattern Recognition, Image Procession, Statistical Learning Ix Abstract Facial Expression Analysis on Manifolds","UNIVERSITY OF CALIFORNIA
+Santa Barbara
+Facial Expression Analysis on Manifolds
+A Dissertation submitted in partial satisfaction of the
+requirements for the degree Doctor of Philosophy
+in Computer Science
+Ya Chang
+Committee in charge:
+Professor Matthew Turk, Chair
+Professor Yuan-Fang Wang
+Professor B.S. Manjunath
+Professor Andy Beall
+September 2006"
+47d4838087a7ac2b995f3c5eba02ecdd2c28ba14,Automatic Recognition of Deceptive Facial Expressions of Emotion,"JOURNAL OF IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. X, XXX 2017
+Automatic Recognition of Facial Displays of
+Unfelt Emotions
+Kaustubh Kulkarni*, Ciprian Adrian Corneanu*, Ikechukwu Ofodile*, Student Member, IEEE, Sergio
+Escalera, Xavier Bar´o, Sylwia Hyniewska, Member, IEEE, J¨uri Allik,
+nd Gholamreza Anbarjafari, Senior Member, IEEE"
+47f8ba44fde1f8a3a621b20cabb7e84515fb8313,Superpixel-based Road Segmentation for Real-time Systems using CNN,
+4753a125469da7649e9f58fb0db781622dff41f8,Multi-view Stereo with Single-View Semantic Mesh Refinement,"Multi-View Stereo with Single-View Semantic Mesh Refinement
+Andrea Romanoni Marco Ciccone
+Francesco Visin Matteo Matteucci
+{andrea.romanoni, marco.ciccone, francesco.visin,
+Politecnico di Milano, Italy"
+47a2727bd60e43f3253247b6d6f63faf2b67c54b,Semi-supervised Vocabulary-Informed Learning,"Semi-supervised Vocabulary-informed Learning
+Yanwei Fu and Leonid Sigal
+Disney Research"
+475de283dad61a8a9ed231dce0d8d62a54f4d062,Person Following by Autonomous Robots: A Categorical Overview,"Islam et al.
+Person Following by Autonomous
+Robots: A Categorical Overview
+Md Jahidul Islam, Jungseok Hong and Junaed Sattar
+Preprint Version I
+XX(X):1–25
+(cid:13)The Author(s) 2018
+Reprints and permission:
+sagepub.co.uk/journalsPermissions.nav
+DOI: 10.1177/ToBeAssigned
+www.sagepub.com/"
+478261574ddc6cf297611000735aa9808f8f0030,ScanNet: Richly-Annotated 3D Reconstructions of Indoor Scenes,
+47022785c35735a242dbacd4f1f1bb73628493ea,Person Retrieval Based on Viewpoint Saliency Prior,"Journal of Computational Information Systems 9: 20 (2013) 8235–8242
+Available at http://www.Jofcis.com
+Person Retrieval Based on Viewpoint Saliency Prior
+Qingming LENG, Ruimin HU∗, Cuina JIAO, Chao LIANG, Zheng WANG
+National Engineering Research Center for Multimedia Software, School of Computer, Wuhan
+University, Wuhan 430079, China"
+47d3b923730746bfaabaab29a35634c5f72c3f04,Real-Time Facial Expression Recognition App Development on Smart Phones,"Humaid Alshamsi.et.al. Int. Journal of Engineering Research and Application www.ijera.com
+ISSN : 2248-9622, Vol. 7, Issue 7, ( Part -3) July 2017, pp.30-38
+RESEARCH ARTICLE
+OPEN ACCESS
+Real-Time Facial Expression Recognition App Development on
+Smart Phones
+Humaid Alshamsi, Veton Kupuska
+Electrical And Computer Engineering Department, Florida Institute Of Technology, Melbourne Fl,"
+47e3029a3d4cf0a9b0e96252c3dc1f646e750b14,Facial expression recognition in still pictures and videos using active appearance models: a comparison approach,"International Conference on Computer Systems and Technologies - CompSysTech’07
+Facial Expression Recognition in still pictures and videos using Active
+Appearance Models. A comparison approach.
+Drago(cid:1) Datcu
+Léon Rothkrantz"
+470b89e2c5248eb58e09129aa9b4d8bc77497e7e,Neurobiology of Disease Cortical Folding Abnormalities in Autism Revealed by Surface-Based Morphometry,"The Journal of Neuroscience, October 24, 2007 • 27(43):11725–11735 • 11725
+Neurobiology of Disease
+Cortical Folding Abnormalities in Autism Revealed by
+Surface-Based Morphometry
+Christine Wu Nordahl,1 Donna Dierker,2 Iman Mostafavi,1 Cynthia M. Schumann,1,3 Susan M. Rivera,4
+David G. Amaral,1 and David C. Van Essen2
+The Medical Investigation of Neurodevelopmental Disorders (M.I.N.D.) Institute and the Department of Psychiatry and Behavioral Sciences, University of
+California, Davis, Sacramento, California 95817, 2Department of Anatomy and Neurobiology, Washington University in St. Louis, St. Louis, Missouri 63110,
+Department of Neurosciences, University of California, San Diego, La Jolla, California 92093, and 4The M.I.N.D. Institute and the Department of
+Psychology, University of California, Davis, Davis, California 95616
+We tested for cortical shape abnormalities using surface-based morphometry across a range of autism spectrum disorders (7.5–18 years
+of age). We generated sulcal depth maps from structural magnetic resonance imaging data and compared typically developing controls
+to three autism spectrum disorder subgroups: low-functioning autism, high-functioning autism, and Asperger’s syndrome. The low-
+functioning autism group had a prominent shape abnormality centered on the pars opercularis of the inferior frontal gyrus that was
+ssociated with a sulcal depth difference in the anterior insula and frontal operculum. The high-functioning autism group had bilateral
+shape abnormalities similar to the low-functioning group, but smaller in size and centered more posteriorly, in and near the parietal
+operculum and ventral postcentral gyrus. Individuals with Asperger’s syndrome had bilateral abnormalities in the intraparietal sulcus
+that correlated with age, intelligence quotient, and Autism Diagnostic Interview-Revised social and repetitive behavior scores. Because of
+evidence suggesting age-related differences in the developmental time course of neural alterations in autism, separate analyses on
+hildren (7.5–12.5 years of age) and adolescents (12.75–18 years of age) were also carried out. All of the cortical shape abnormalities"
+475e16577be1bfc0dd1f74f67bb651abd6d63524,DAiSEE: Towards User Engagement Recognition in the Wild,"DAiSEE: Towards User Engagement Recognition in the Wild
+Abhay Gupta
+Microsoft
+Vineeth N Balasubramanian
+Indian Institution of Technology Hyderabad"
+471befc1b5167fcfbf5280aa7f908eff0489c72b,Class-Specific Kernel-Discriminant Analysis for Face Verification,"Class-Specific Kernel-Discriminant
+Analysis for Face Verification
+Georgios Goudelis, Stefanos Zafeiriou, Anastasios Tefas, Member, IEEE, and Ioannis Pitas, Fellow, IEEE
+lass problems ("
+47bd6c1d7da596d3cf79f06ec0de816d10f11beb,Coupled Discriminant Analysis for Heterogeneous Face Recognition,"Coupled Discriminant Analysis for Heterogeneous
+Face Recognition
+Zhen Leiy, Member, IEEE, Shengcai Liaoz, Anil K. Jainz, Fellow, IEEE, and Stan Z. Liy, Fellow, IEEE"
+47e8db3d9adb79a87c8c02b88f432f911eb45dc5,MAGMA: Multilevel Accelerated Gradient Mirror Descent Algorithm for Large-Scale Convex Composite Minimization,"MAGMA: Multi-level accelerated gradient mirror descent algorithm for
+large-scale convex composite minimization
+Vahan Hovhannisyan
+Panos Parpas
+Stefanos Zafeiriou
+July 15, 2016"
+47c0c7f1a27d467e00a6fa7ea2ca0af2e3328b9e,Predicting Scene Parsing and Motion Dynamics in the Future,"Predicting Scene Parsing and Motion Dynamics
+in the Future
+Xiaojie Jin1, Huaxin Xiao2, Xiaohui Shen3, Jimei Yang3, Zhe Lin3
+Yunpeng Chen2, Zequn Jie4, Jiashi Feng2, Shuicheng Yan5,2
+NUS Graduate School for Integrative Science and Engineering (NGS), NUS
+Department of ECE, NUS
+Adobe Research
+Tencent AI Lab
+5Qihoo 360 AI Institute"
+47f5f740e225281c02c8a2ae809be201458a854f,Simultaneous Unsupervised Learning of Disparate Clusterings,"Simultaneous Unsupervised Learning of Disparate Clusterings
+Prateek Jain*, Raghu Meka and Inderjit S. Dhillon
+Department of Computer Sciences, University of Texas, Austin, TX 78712-1188, USA
+Received 14 April 2008; accepted 05 May 2008
+DOI:10.1002/sam.10007
+Published online 3 November 2008 in Wiley InterScience (www.interscience.wiley.com)."
+47bf7a8779c68009ea56a7c20e455ccdf0e3a8fa,Automatic Face Recognition System using Pattern Recognition Techniques: A Survey,"International Journal of Computer Applications (0975 – 8887)
+Volume 83 – No 5, December 2013
+Automatic Face Recognition System using Pattern
+Recognition Techniques: A Survey
+Ningthoujam Sunita Devi Prof.K.Hemachandran
+Department of Computer Science Department of Computer Science
+Assam University, Silchar-788011 Assam University, Silchar-788011"
+47b508abdaa5661fe14c13e8eb21935b8940126b,An Efficient Method for Feature Extraction of Face Recognition Using PCA,"Volume 4, Issue 12, December 2014 ISSN: 2277 128X
+International Journal of Advanced Research in
+Computer Science and Software Engineering
+Research Paper
+Available online at: www.ijarcsse.com
+An Efficient Method for Feature Extraction of Face
+Recognition Using PCA
+Tara Prasad Singh
+(M.Tech. Student)
+Computer Science & Engineering
+Iftm University,Moradabad-244001 U.P."
+47b34a8ad5100582aa7cbfd85df3ca7659adc392,Is this a wampimuk? Cross-modal mapping between distributional semantics and the visual world,"Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, pages 1403–1414,
+Baltimore, Maryland, USA, June 23-25 2014. c(cid:13)2014 Association for Computational Linguistics"
+47440f514318b438ebf04d9932f5dafdb488a536,Emotion Recognition from Facial Images Using Binary Face Relevance Maps,"STUDIA INFORMATICA
+Volume 36
+Number 4 (122)
+Tomasz HERUD, Michal KAWULOK
+Silesian University of Technology, Institute of Informatics
+Future Processing, Gliwice, Poland
+Bogdan SMOLKA
+Silesian University of Technology, Institute of Automatic Control
+EMOTION RECOGNITION FROM FACIAL IMAGES USING
+BINARY FACE RELEVANCE MAPS1
+Summary. This paper is focused on automatic emotion recognition from static
+grayscale images. Here, we propose a new approach to this problem, which combines
+few other methods. The facial region is divided into small subregions, which are
+selected for processing based on a face relevance map. From these regions, local
+directional pattern histograms are extracted and concatenated into a single feature
+histogram, which is classified into one of seven defined emotional states using support
+vector machines. In our case, we distinguish: anger, disgust, fear, happiness,
+neutrality, sadness and surprise. In our experimental study we demonstrate that the
+expression recognition accuracy for Japanese Female Facial Expression database is
+one of the best compared with the results reported in the literature."
+782188821963304fb78791e01665590f0cd869e8,Automatic Spatially-Aware Fashion Concept Discovery,"sleevelengthincreasing dress length+ mini =(b) Structured product browsing(c) Attribute-feedback product retrieval(a) Concept discoveryminimidimaxisleevelessshort-sleevelong-sleeveblueblackredyellowFigure1.(a)Weproposeaconceptdiscoveryapproachtoauto-maticallyclusterspatially-awareattributesintomeaningfulcon-cepts.Thediscoveredspatially-awareconceptsarefurtherutilizedfor(b)structuredproductbrowsing(visualizingimagesaccordingtoselectedconcepts)and(c)attribute-feedbackproductretrieval(refiningsearchresultsbyprovidingadesiredattribute).variousfeedback,includingtherelevanceofdisplayedim-ages[20,4],ortuningparameterslikecolorandtexture,andthenresultsareupdatedcorrespondingly.However,rel-evancefeedbackislimitedduetoitsslowconvergencetomeetthecustomerrequirements.Inadditiontocolorandtexture,customersoftenwishtoexploithigher-levelfea-tures,suchasneckline,sleevelength,dresslength,etc.Semanticattributes[13],whichhavebeenappliedef-fectivelytoobjectcategorization[15,27]andfine-grainedrecognition[12]couldpotentiallyaddresssuchchallenges.Theyaremid-levelrepresentationsthatdescribesemanticproperties.Recently,researchershaveannotatedclotheswithsemanticattributes[9,2,8,16,11](e.g.,material,pat-tern)asintermediaterepresentationsorsupervisorysignalstobridgethesemanticgap.However,annotatingsemanticattributesiscostly.Further,attributesconditionedonob-jectpartshaveachievedgoodperformanceinfine-grainedrecognition[3,33],confirmingthatspatialinformationiscriticalforattributes.Thisalsoholdsforclothingimages.Forexample,thenecklineattributeusuallycorrespondstothetoppartinimageswhilethesleeveattributeordinarily1"
+786e57ed6877dc8491b1bb9253f8b82c02732977,Efficient approach to de-identifying faces in videos,"Page 1 of 8
+An Efficient Approach to De-Identifying Faces in Videos
+Li Meng *, Zongji Sun, Odette Tejada Collado
+School of Engineering and Technology, University of Hertfordshire, College Lane, Hatfield, UK"
+788eceb4d1b7556d1c9033224da2348b4402d6ca,An Empirical Evaluation of Visual Question Answering for Novel Objects,"An Empirical Evaluation of Visual Question Answering for Novel Objects
+Santhosh K. Ramakrishnan1,2 Ambar Pal1 Gaurav Sharma1 Anurag Mittal2
+IIT Kanpur∗
+IIT Madras†"
+7854876ab5d87248ace94615731ed3e3e56af769,MixedPeds: Pedestrian Detection in Unannotated Videos Using Synthetically Generated Human-Agents for Training,
+789c76749a15614d97ac8f4ec18b3ce7d80a2d28,Explorer Multiplicative LSTM for sequence modelling,"Multiplicative LSTM for sequence modelling
+Citation for published version:
+Krause, B, Murray, I, Renals, S & LU, L 2017, Multiplicative LSTM for sequence modelling. in International
+Conference on Learning Representations - ICLR 2017 - Workshop Track. pp. 2872-2880.
+Link:
+Link to publication record in Edinburgh Research Explorer
+Document Version:
+Publisher's PDF, also known as Version of record
+Published In:
+International Conference on Learning Representations - ICLR 2017 - Workshop Track
+General rights
+Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s)
+nd / or other copyright owners and it is a condition of accessing these publications that users recognise and
+bide by the legal requirements associated with these rights.
+Take down policy
+The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer
+ontent complies with UK legislation. If you believe that the public display of this file breaches copyright please
+ontact providing details, and we will remove access to the work immediately and
+investigate your claim.
+Download date: 02. Sep. 2017"
+78749b58299ecebf100e2512872029f89878449b,One-class Selective Transfer Machine for Personalized Anomalous Facial Expression Detection,
+78c91d969c55a4a61184f81001c376810cdbd541,A Spike and Slab Restricted Boltzmann Machine,"A Spike and Slab Restricted Boltzmann Machine
+Aaron Courville
+James Bergstra
+Yoshua Bengio
+DIRO, Universit´e de Montr´eal, Montr´eal, Qu´ebec, Canada"
+787303db8e707feee2fa2b93dfc46e3d3cc244cd,Defocus Blur Parameter Estimation Technique,"International Journal of Electronics and Communication Engineering and Technology (IJECET)
+Volume 7, Issue 4, July-August 2016, pp. 85–90, Article ID: IJECET_07_04_010
+Available online at
+http://www.iaeme.com/IJECET/issues.asp?JType=IJECET&VType=7&IType=4
+Journal Impact Factor (2016): 8.2691 (Calculated by GISI) www.jifactor.com
+ISSN Print: 0976-6464 and ISSN Online: 0976-6472
+© IAEME Publication
+DEFOCUS BLUR PARAMETER ESTIMATION
+TECHNIQUE
+Ruchi Gajjar, Aditi Pathak and Tanish Zaveri
+Electronics and Communication Engineering Department
+Institute of Technology, Nirma University, Ahmedabad, Gujarat, India"
+784cc0363d44bf09f3f636abd1a532ddac95ca13,Group-level emotion recognition using transfer learning from face identification,"Group-level Emotion Recognition using Transfer Learning from
+Face Identification
+Alexandr Rassadin
+Alexey Gruzdev
+Andrey Savchenko
+National Research University Higher
+National Research University Higher
+National Research University Higher
+School of Economics
+Laboratory of Algorithms and
+Technologies for Network Analysis,
+School of Economics
+Nizhny Novgorod
+Russia
+School of Economics
+Laboratory of Algorithms and
+Technologies for Network Analysis,
+Nizhny Novgorod
+Russia
+Nizhny Novgorod"
+783f3fccde99931bb900dce91357a6268afecc52,Adapted Active Appearance Models,"Hindawi Publishing Corporation
+EURASIP Journal on Image and Video Processing
+Volume 2009, Article ID 945717, 14 pages
+doi:10.1155/2009/945717
+Research Article
+Adapted Active Appearance Models
+Renaud S´eguier,1 Sylvain Le Gallou,2 Gaspard Breton,2 and Christophe Garcia2
+SUP ´ELEC/IETR, Avenue de la Boulaie, 35511 Cesson-S´evign´e, France
+Orange Labs—TECH/IRIS, 4 rue du clos courtel, 35 512 Cesson S´evign´e, France
+Correspondence should be addressed to Renaud S´eguier,
+Received 5 January 2009; Revised 2 September 2009; Accepted 20 October 2009
+Recommended by Kenneth M. Lam
+Active Appearance Models (AAMs) are able to align ef‌f‌iciently known faces under duress, when face pose and illumination are
+ontrolled. We propose Adapted Active Appearance Models to align unknown faces in unknown poses and illuminations. Our
+proposal is based on the one hand on a specific transformation of the active model texture in an oriented map, which changes the
+AAM normalization process; on the other hand on the research made in a set of different precomputed models related to the most
+dapted AAM for an unknown face. Tests on public and private databases show the interest of our approach. It becomes possible
+to align unknown faces in real-time situations, in which light and pose are not controlled.
+Copyright © 2009 Renaud S´eguier et al. This is an open access article distributed under the Creative Commons Attribution
+License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly"
+789389dce27ad72adad251c81734bdb6c274c30f,3D Facial Feature Localization for Registration,"D Facial Feature Localization for Registration
+Albert Ali Salah and Lale Akarun
+Bo˘gazi¸ci University
+Computer Engineering Department, Turkey
+Perceptual Intelligence Laboratory
+{salah,"
+78a2a964b61308f683fae6f3a62e3a8aece51bae,Functional Neuroimaging of the Interaction between Social and Executive Neural Circuitry in Individuals with High- Functioning Autism,"FUNCTIONAL NEUROIMAGING OF THE INTERACTION BETWEEN SOCIAL
+AND EXECUTIVE NEURAL CIRCUITRY IN INDIVIDUALS WITH HIGH-
+FUNCTIONING AUTISM
+Kimberly Lynn Hills Carpenter
+A dissertation submitted to the faculty of the University of North Carolina at Chapel
+Hill in partial fulfillment of the requirements for the degree of Doctor of Philosophy in
+the Curriculum in Neurobiology
+Chapel Hill
+Approved By:
+Dr. Aysenil Belger
+Dr. Jim Bodfish
+Dr. Gabriel Dichter
+Dr. Kevin LaBar
+Dr. Joseph Piven
+Dr. Aldo Rustioni"
+781d3550f54f3b4bfbd99ca9957aba6d6dec990e,Regularized Kernel Discriminant Analysis With a Robust Kernel for Face Recognition and Verification,"This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Brief Papers
+Regularized Kernel Discriminant Analysis With a Robust
+Kernel for Face Recognition and Verification
+Stefanos Zafeiriou, Georgios Tzimiropoulos, Maria Petrou,
+nd Tania Stathaki"
+78045e2b93745b16a174137074e430ccd5ff53ff,Hedging Deep Features for Visual Tracking.,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+Hedging Deep Features for Visual Tracking
+Yuankai Qi, Shengping Zhang, Lei Qin, Qingming Huang, Hongxun Yao, Jongwoo Lim, and Ming-Hsuan Yang"
+78342d17c6c6fff00cf1b20602f3213a3f61ba56,Collaborative Discriminant Locality Preserving Projections With its Application to Face Recognition,"Collaborative Discriminant Locality Preserving Projections With its Application
+to Face Recognition
+Sheng Huanga,c, Dan Yanga,b,∗, Dong Yangc, Ahmed Elgammalc
+College of Computer Science at Chongqing University, Chonqing, 400044, China
+School of Software Engineering at Chongqing University Chonqing, 400044, China
+Department of Computer Science at Rutgers University, Piscataway, NJ, 08854, USA"
+78f438ed17f08bfe71dfb205ac447ce0561250c6,Bridging the Semantic Gap : Image and video Understanding by Exploiting Attributes,
+78f7304ba4c853c568dc4e38fef35aa2c003e3f3,Modeling correlations in spontaneous activity of visual cortex with centered Gaussian-binary deep Boltzmann machines,"visual cortex with centered Gaussian-binary deep
+Boltzmann machines
+Nan Wang
+Institut f¨ur Neuroinformatik
+Ruhr-Universit¨at Bochum
+Bochum, 44780, Germany
+Dirk Jancke
+Institut f¨ur Neuroinformatik
+Ruhr-Universit¨at Bochum
+Bochum, 44780, Germany
+Laurenz Wiskott
+Institut f¨ur Neuroinformatik
+Ruhr-Universit¨at Bochum
+Bochum, 44780, Germany"
+78c9a63be8e07dc6acb90f4fe3f06821719eaa34,Hierarchical online domain adaptation of deformable part-based models,"Hierarchical online domain adaptation of deformable part-based models
+Jiaolong Xu1, David V´azquez2, Krystian Mikolajczyk3 and Antonio M. L´opez1"
+7882c67f555b761e10ecc70216db25382890d9d7,Automated Characterization of Stenosis in Invasive Coronary Angiography Images with Convolutional Neural Networks,"Automated Characterization of Stenosis in Invasive Coronary Angiography Images with Convolutional
+Neural Networks"
+781c2553c4ed2a3147bbf78ad57ef9d0aeb6c7ed,Tubelets: Unsupervised Action Proposals from Spatiotemporal Super-Voxels,"Int J Comput Vis
+DOI 10.1007/s11263-017-1023-9
+Tubelets: Unsupervised Action Proposals from Spatiotemporal
+Super-Voxels
+Mihir Jain1
+Cees G. M. Snoek1
+· Jan van Gemert2 · Hervé Jégou3 · Patrick Bouthemy3 ·
+Received: 25 June 2016 / Accepted: 18 May 2017
+© The Author(s) 2017. This article is an open access publication"
+7803206f024ba6887d93e8aec91dd0097ffc5165,Automatic detection of facial actions from 3D data,"Automatic Detection of Facial Actions from 3D Data
+Arman Savran
+Electrical and Electronics Engineering Department
+Bo˘gazic¸i University, Istanbul, Turkey
+B¨ulent Sankur"
+78598c69201cccfc060d47fc0415f2f9365035fc,A Taught-Obesrve-Ask (TOA) Method for Object Detection with Critical Supervision,"A Taught-Obesrve-Ask (TOA) Method for Object
+Detection with Critical Supervision
+Chi-Hao Wu, Qin Huang, Siyang Li, and C.-C. Jay Kuo, Fellow, IEEE"
+78a144d5dce1a61c92420e77c11116f541a7617f,Box Aggregation for Proposal Decimation: Last Mile of Object Detection,"Box Aggregation for Proposal Decimation: Last Mile of Object Detection
+The Chinese University of Hong Kong ♯Stanford University ‡Shanghai Jiao Tong University
+Shu Liu† Cewu Lu♯,‡
+Jiaya Jia†"
+78df7d3fdd5c32f037fb5cc2a7c104ac1743d74e,Temporal Pyramid Pooling-Based Convolutional Neural Network for Action Recognition,"TEMPORAL PYRAMID POOLING CNN FOR ACTION RECOGNITION
+Temporal Pyramid Pooling Based Convolutional
+Neural Network for Action Recognition
+Peng Wang, Yuanzhouhan Cao, Chunhua Shen, Lingqiao Liu, and Heng Tao Shen"
+78fdf2b98cf6380623b0e20b0005a452e736181e,Dense Wide-Baseline Stereo with Varying Illumination and its Application to Face Recognition,
+7858410077f9ba94ca60d0f6b4d29509e46a4ef9,Predicting Visual Exemplars of Unseen Classes for Zero-Shot Learning,"Predicting Visual Exemplars of Unseen Classes for Zero-Shot Learning
+Soravit Changpinyo
+U. of Southern California
+Los Angeles, CA
+Wei-Lun Chao
+Los Angeles, CA
+U. of Southern California
+U. of Southern California
+Fei Sha
+Los Angeles, CA"
+787c1bb6d1f2341c5909a0d6d7314bced96f4681,"Face Detection and Verification in Unconstrained Videos: Challenges, Detection, and Benchmark Evaluation","Face Detection and Verification in Unconstrained
+Videos: Challenges, Detection, and Benchmark
+Evaluation
+Mahek Shah
+IIIT-D-MTech-CS-GEN-13-106
+July 16, 2015
+Indraprastha Institute of Information Technology, Delhi
+Thesis Advisors
+Dr. Mayank Vatsa
+Dr. Richa Singh
+Submitted in partial fulfillment of the requirements
+for the Degree of M.Tech. in Computer Science
+(cid:13) Shah, 2015
+Keywords: face recognition, face detection, face verification"
+7808937b46acad36e43c30ae4e9f3fd57462853d,Describing people: A poselet-based approach to attribute classification,"Describing People: A Poselet-Based Approach to Attribute Classification ∗
+Lubomir Bourdev1,2, Subhransu Maji1 and Jitendra Malik1
+EECS, U.C. Berkeley, Berkeley, CA 94720
+Adobe Systems, Inc., 345 Park Ave, San Jose, CA 95110"
+7809a42a833b49725f3a4bb8f70f63f4d2cee11c,Detection of Person in A Group of People Using 3-D Based Model,"Detection of Person in A Group of People Using 3-D Based Model
+Dr. P. Srirama Chandra Murty1, Ch. Anuradha2, Dr. Syed Muneer3
+Assistant Professor, Dept. of Computer Science and Engineering, ANUCET, Acharya Nagarjuna
+University, Guntur, India
+Asst. Professor, Dept. of Computer Science and Engineering, PNC & Vijay Institute of Engineering
+Computer Professional, Dept. of Computer Science and Engineering, ANUCET, Acharya Nagarjuna
+nd Technology, Guntur, Andhra Pradesh, India.
+University, Guntur, Andhra Pradesh, India"
+788a3faa14ca191d7f187b812047190a70798428,Interpretable Set Functions,"Interpretable Set Functions
+Andrew Cotter, Maya Gupta, Heinrich Jiang,
+James Muller, Taman Narayan, Serena Wang, Tao Zhu
+600 Amphitheatre Parkway, Mountain View, CA 94043
+Google Research"
+780772a69b1556d5f725630dff8e79ec3ccb46bb,FieldSAFE: Dataset for Obstacle Detection in Agriculture,"FieldSAFE: Dataset for Obstacle Detection in Agriculture
+Mikkel Kragh∗1, Peter Christiansen∗1, Morten S. Laursen1, Morten Larsen2, Kim
+A. Steen3, Ole Green3, Henrik Karstoft1 and Rasmus N. Jørgensen1
+Department of Engineering, Aarhus University, Denmark
+Conpleks Innovation ApS, Struer, Denmark
+AgroIntelli, Aarhus, Denmark"
+8b2c090d9007e147b8c660f9282f357336358061,Emotion Classification based on Expressions and Body Language using Convolutional Neural Networks,"Lake Forest College
+Lake Forest College Publications
+Senior Theses
+-23-2018
+Student Publications
+Emotion Classification based on Expressions and
+Body Language using Convolutional Neural
+Networks
+Aasimah S. Tanveer
+Lake Forest College,
+Follow this and additional works at: https://publications.lakeforest.edu/seniortheses
+Part of the Neuroscience and Neurobiology Commons
+Recommended Citation
+Tanveer, Aasimah S., ""Emotion Classification based on Expressions and Body Language using Convolutional Neural Networks""
+(2018). Senior Theses.
+This Thesis is brought to you for free and open access by the Student Publications at Lake Forest College Publications. It has been accepted for
+inclusion in Senior Theses by an authorized administrator of Lake Forest College Publications. For more information, please contact"
+8b607928c7af70259a9f8af9e08e28e6037411c8,Bayesian teaching of image categories,"Bayesian teaching of image categories
+Wai Keen Vong∗
+Ravi B. Sojitra*
+Newark, NJ, 07102
+Anderson Reyes
+Scott Cheng-Hsin Yang
+Patrick Shafto
+Department of Mathematics and Computer Science, 110 Warren Street,"
+8b9c53e7d65ba7a7be3d588d00481f2ff49b5ef4,Orienting in response to gaze and the social use of gaze among children with autism spectrum disorder.,23Journal of Autism andDevelopmental Disorders ISSN 0162-3257Volume 43Number 7 J Autism Dev Disord (2013)43:1584-1596DOI 10.1007/s10803-012-1704-8Orienting in Response to Gaze and theSocial Use of Gaze among Children withAutism Spectrum DisorderAdrienne Rombough & Grace Iarocci
+8bddd0afd064e2d45ab6cf9510f2631f7438c17b,Outlier Detection using Generative Models with Theoretical Performance Guarantees,"Outlier Detection using Generative Models with
+Theoretical Performance Guarantees∗
+Jirong Yi†
+Anh Duc Le‡
+Tianming Wang§
+Xiaodong Wu¶
+Weiyu Xu(cid:107)
+October 29, 2018"
+8b547b87fd95c8ff6a74f89a2b072b60ec0a3351,Initial perceptions of a casual game to crowdsource facial expressions in the wild,"Initial Perceptions of a Casual Game to Crowdsource
+Facial Expressions in the Wild
+Chek Tien Tan
+Hemanta Sapkota
+Daniel Rosser
+Yusuf Pisan
+Games Studio, Faculty of Engineering and IT, University of Technology, Sydney"
+8b26744e11e5f226f187bf903b88933c5b0fcdc0,Cost-Effective Class-Imbalance Aware CNN for Vehicle Localization and Categorization in High Resolution Aerial Images,"Article
+Cost-Effective Class-Imbalance Aware CNN for
+Vehicle Localization and Categorization in High
+Resolution Aerial Images
+Feimo Li 1,2,*, Shuxiao Li 1,2,*, Chengfei Zhu 1,2, Xiaosong Lan 1,2 and Hongxing Chang 1,2
+Institute of Automation Chinese Academy of Sciences, Beijing 100190, China;
+(C.Z.); (X.L.); (H.C.)
+University of Chinese Academy of Science, Beijing 100049, China
+* Correspondence: (F.L.); (S.L.);
+Tel.: +86-188-0012-4228 (F.L.); +86-138-1077-1030 (S.L.)
+Academic Editors: Qi Wang, Nicolas H. Younan, Carlos López-Martínez, Gonzalo Pajares Martinsanz,
+Xiaofeng Li and Prasad S. Thenkabail
+Received: 26 February 2017; Accepted: 15 May 2017; Published: 18 May 2017"
+8bf57dc0dd45ed969ad9690033d44af24fd18e05,Subject-Independent Emotion Recognition from Facial Expressions using a Gabor Feature RBF Neural Classifier Trained with Virtual Samples Generated by Concurrent Self-Organizing Maps,"Subject-Independent Emotion Recognition from Facial Expressions
+using a Gabor Feature RBF Neural Classifier Trained with Virtual
+Samples Generated by Concurrent Self-Organizing Maps
+VICTOR-EMIL NEAGOE, ADRIAN-DUMITRU CIOTEC
+Depart. Electronics, Telecommunications & Information Technology
+Polytechnic University of Bucharest
+Splaiul Independentei No. 313, Sector 6, Bucharest,
+ROMANIA"
+8bdbb685174d6023e63c55fdf9ad9b2ac78e79bd,Learning Human Poses from Actions-Supplementary Material,"ADITYA, JAWAHAR, PAWAN: LEARNING HUMAN POSES FROM ACTIONS
+Learning Human Poses from Actions -
+Supplementary Material
+Aditya Arun1
+C.V. Jawahar1
+M. Pawan Kumar2
+IIIT Hyderabad
+University of Oxford &
+The Alan Turing Institute
+In this supplementary material, we provide additional details on optimization of our
+learning objective, implementation details, and visualization of the learning process. We
+lso provide additional results of training a different architecture for human pose estimation
+on two data sets.
+Optimization
+In this section, we provide details of optimization presented in section 3.5 of the paper.
+.1 Learning Objective
+We represent the prediction distribution using a DISCO Net, which we denote by Prw, w
+eing the parameter of the network. Similarly, we represent the conditional distribution using
+set of DISCO Nets, which we denote by Prθθθ . The set of parameters for the conditional
+networks is denoted by θθθ. We compute samples from the prediction network as {hw"
+8b9f529700a93a2ff6e227c76a1333883a1f6213,PREMOC: Plataforma de reconocimiento multimodal de emociones,"PREMOC: Plataforma de reconocimiento multimodal
+de emociones
+Ramón Zatarain-Cabada, María Lucia Barrón-Estrada, Gilberto Muñoz-Sandoval
+Instituto Tecnológico de Culiacán, Culiacán, Sinaloa,
+México
+{rzaratain, lbarron,
+Resumen. En años recientes la computación afectiva ha venido a mejorar la
+interacción humano-computadora, pues ayuda a la computadora a conocer el
+estado afectivo del usuario para mejorar la toma de decisiones. Este artículo
+presenta los avances en el proyecto PREMOC, una plataforma que brinda un
+servicio web para el reconocimiento de emociones en texto, imágenes de rostros,
+sonidos de voz y señales EEG de manera mono-modal y multimodal. PREMOC
+yuda a los desarrolladores a integrar el reconocimiento de afecto a sus
+plicaciones o sistemas de software. Cada uno de los reconocedores se
+implementó aplicando diferentes técnicas tanto para extraer características como
+para clasificar emociones; además para el reconocimiento multimodal se
+integraron las emociones mediante un sistema difuso. Esta plataforma ya está
+siendo utilizada por diferentes proyectos en el laboratorio de la Maestría en
+Ciencias de la Computación del Instituto Tecnológico de Culiacán.
+Palabras claves: Computación afectiva, inteligencia artificial, reconocimiento"
+8b8b3375bc51ae357528a1f015c4d094418c9f71,"An Efficient Feature Extraction Method, Global Between Maximum and Local Within Minimum, and Its Applications","Hindawi Publishing Corporation
+Mathematical Problems in Engineering
+Volume 2011, Article ID 176058, 15 pages
+doi:10.1155/2011/176058
+Research Article
+An Efficient Feature Extraction Method,
+Global Between Maximum and Local Within
+Minimum, and Its Applications
+Lei Wang,1, 2 Jiangshe Zhang,1, 2 and Fei Zang1, 2
+School of Science, Xi’an Jiaotong University, Xi’an 710049, China
+State Key Laboratory for Manufacturing Systems Engineering, Xi’an Jiaotong University,
+Xi’an 710049, China
+Correspondence should be addressed to Lei Wang,
+Received 28 March 2011; Revised 16 April 2011; Accepted 18 April 2011
+Academic Editor: Jyh Horng Chou
+Copyright q 2011 Lei Wang et al. This is an open access article distributed under the Creative
+Commons Attribution License, which permits unrestricted use, distribution, and reproduction in
+ny medium, provided the original work is properly cited.
+Feature extraction plays an important role in preprocessing procedure in dealing with small
+sample size problems. Considering the fact that LDA, LPP, and many other existing methods are"
+8b744786137cf6be766778344d9f13abf4ec0683,And Summarization by Sub-modular Inference,"978-1-4799-9988-0/16/$31.00 ©2016 IEEE
+ICASSP 2016"
+8b879863237d315997857a5585afb2bbbf78c622,Social Network Analysis as a Tool for Improving Enterprise Architecture,"Proceedings of the 5th International KES Symposium on Agents and Multi-agent
+Systems, KES-AMSTA 2011. Manchester, UK, June 29 - July 1, 2011
+Lecture Notes in Artificial Intelligence LNAI, Volume 6682, 2011, pp. 651-660
+DOI: 10.1007/978-3-642-22000-5_67
+Social Network Analysis as a Tool
+for Improving Enterprise Architecture
+Przemysław Kazienko, Radosław Michalski, Sebastian Palus
+Institute of Informatics, Wrocław University of Technology
+Wybrzeże Wyspiańskiego 27, 50-370 Wrocław, Poland
+{kazienko, radoslaw.michalski,"
+8bb4d90d5b97e8d08d2aaa99e9c075a506b3108a,Generating Diverse Clusterings,"Generating Diverse Clusterings
+Anonymous Author(s)"
+8b64dbeac77fe8d6bf440311337451f9f61b9ea0,Image-based approaches to hair modeling,"Image-Based Approaches to Hair Modeling
+Dissertation
+Erlangung des Doktorgrades (Dr. rer. nat)
+Mathematisch-Naturwissenschaftlichen Fakult¨at
+Rheinischen Friedrich-Wilhelms-Universit¨at Bonn
+vorgelegt von
+Tom´as Lay Herrera
+Havanna
+Bonn, November 2012"
+8bbafa3efb7b96adb95128ea2a30a363bfe06812,Towards usable authentication on mobile phones: An evaluation of speaker and face recognition on off-the-shelf handsets,"Towards usable authentication on mobile phones: An
+evaluation of speaker and face recognition on off-the-shelf
+handsets
+Rene Mayrhofer
+University of Applied Sciences Upper Austria
+Softwarepark 11, A-4232 Hagenberg, Austria
+University of Applied Sciences Upper Austria
+Softwarepark 11, A-4232 Hagenberg, Austria
+Thomas Kaiser
+hagenberg.at"
+8bf647fed40bdc9e35560021636dfb892a46720e,Learning to hash-tag videos with Tag2Vec,"Learning to Hash-tag Videos with Tag2Vec
+Aditya Singh
+Saurabh Saini
+Rajvi Shah
+CVIT, KCIS, IIIT Hyderabad, India
+P J Narayanan
+http://cvit.iiit.ac.in/research/projects/tag2vec
+Figure 1. Learning a direct mapping from videos to hash-tags : sample frames from short video clips with user-given hash-tags
+(left); a sample frame from a query video and hash-tags suggested by our system for this query (right)."
+8b2f99b0106143fd0193fcbf2b07eba80dc7f8dd,Enhancing Recommender Systems for TV by Face Recognition,
+8b29ee0a47efc11071ab8baec8369fd54970bfbb,Features Extraction for Low-Power Face Verification,"Thèse présentée à la faculté des sciences pour
+l’obtention du grade de docteur ès sciences
+Features Extraction for
+Low-Power Face Verification
+Patrick Stadelmann
+Acceptée sur proposition du jury :
+Prof. Fausto Pellandini, directeur de thèse
+PD Dr. Michael Ansorge, co-directeur de thèse
+Prof. Pierre-André Farine, rapporteur
+Dr. Nicolas Blanc, rapporteur
+Soutenue le 23 mai 2008
+Institut de Microtechnique
+Université de Neuchâtel"
+8b20737b454fa8c2848979b5c76be9915a65a75f,Automated Object Recognition Using Multiple X-ray Views,"Automated Object Recognition
+Using Multiple X-ray Views
+Domingo Mery1 – Vladimir Riffo1, 2
+Department of Computer Science, Pontificia
+Universidad Católica de Chile.
+Department of Computer Engineering and
+Computer Science, Universidad de Atacama.
+Av. Vicuña Mackenna 4860(143) – Santiago de
+Chile.
+Av. Copayapu 485 – Copiapó, Chile.
+http://dmery.ing.puc.cl
+http://www.ing.puc.cl/~vriffo1"
+8bb21b1f8d6952d77cae95b4e0b8964c9e0201b0,Multimodal Interaction on a Social Robotic Platform,"Methoden
+t 11/2013
+(cid:2)(cid:2)(cid:2)
+Multimodale Interaktion
+uf einer sozialen Roboterplattform
+Multimodal Interaction on a Social Robotic Platform
+Jürgen Blume
+Korrespondenzautor:
+, Tobias Rehrl, Gerhard Rigoll, Technische Universität München
+Zusammenfassung Dieser Beitrag beschreibt die multimo-
+dalen Interaktionsmöglichkeiten mit der Forschungsroboter-
+plattform ELIAS. Zunächst wird ein Überblick über die Ro-
+oterplattform sowie die entwickelten Verarbeitungskompo-
+nenten gegeben, die Einteilung dieser Komponenten erfolgt
+nach dem Konzept von wahrnehmenden und agierenden Mo-
+dalitäten. Anschließend wird das Zusammenspiel der Kom-
+ponenten in einem multimodalen Spieleszenario näher be-
+trachtet. (cid:2)(cid:2)(cid:2) Summary
+This paper presents the mul-
+timodal"
+8b1db0894a23c4d6535b5adf28692f795559be90,How Reliable are Your Visual Attributes?,"Biometric and Surveillance Technology for Human and Activity Identification X, edited by Ioannis Kakadiaris,
+Walter J. Scheirer, Laurence G. Hassebrook, Proc. of SPIE Vol. 8712, 87120Q · © 2013 SPIE
+CCC code: 0277-786X/13/$18 · doi: 10.1117/12.2018974
+Proc. of SPIE Vol. 8712 87120Q-1
+From: http://proceedings.spiedigitallibrary.org/ on 06/07/2013 Terms of Use: http://spiedl.org/terms"
+8bff7353fa4f75629ea418ca8db60477a751db93,Invariance of Weight Distributions in Rectified MLPs,"Invariance of Weight Distributions in Rectified MLPs
+Russell Tsuchida 1 Farbod Roosta-Khorasani 2 3 Marcus Gallagher 1"
+8b9db19d0d3e2a7d740be811810a043a04d6226a,An Attention-based Regression Model for Grounding Textual Phrases in Images,Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence (IJCAI-17)
+8bda09b2fb85c317c6361aee1935bcbcf87c1c70,Score Normalization in Multimodal Systems using Generalized Extreme Value Distribution,"Score Normalization in Multimodal
+Systems using Generalized Extreme Value
+Distribution
+Renu Sharma1, 2 1Centre for Development of Advanced Computing,
+Mumbai, India
+Sukhendu Das2 2Indian Institute of Technology, Madras, India
+Padmaja Joshi1"
+8b632db02220806cd62e35fdebb3ede58243dee0,Recognizing Partially Occluded Faces from a Single Sample Per Class Using String-Based Matching,"Recognizing Partially Occluded Faces from a
+Single Sample Per Class Using String-Based
+Matching
+Weiping Chen1 and Yongsheng Gao1,2
+School of Engineering, Grif‌f‌ith University, Australia
+National ICT Australia, Queensland Research Lab"
+8b9e94fb3bb64389e9765ffde365862231b5972c,Fast Eye Tracking and Feature Measurement using a Multi-stage Particle Filter,
+8bba26895022749e2273729f96051571eabc7b99,Natural language acquisition in recurrent neural architectures,"Natural Language Acquisition in
+Recurrent Neural Architectures
+Dissertation
+submitted to the Universität Hamburg,
+Faculty of Mathematics,
+Informatics
+nd Natural Sciences, Department
+fulfilment
+of the requirements for the degree of
+Doctor rerum naturalium (Dr. rer. nat.)
+Informatics,
+in partial
+Dipl.-Inform. Stefan Heinrich
+Hamburg, 2016"
+135fcdab631ab30ae837a743040f1c8751268e41,DeepStyle: Multimodal Search Engine for Fashion and Interior Design,"SUBMITTED TO IEEE TRANSACTIONS ON MULTIMEDIA
+DeepStyle: Multimodal Search Engine
+for Fashion and Interior Design
+Ivona Tautkute1, 3, Tomasz Trzci´nski2, 3, Aleksander Skorupa3, Lukasz Brocki1 and Krzysztof Marasek1"
+139bb2a4034a0498934185e8c6d515d8f9330e2a,One-Shot Segmentation in Clutter,"One-Shot Segmentation in Clutter
+Claudio Michaelis 1 2 Matthias Bethge 1 2 3 4 Alexander S. Ecker 1 2 4"
+13f9922632ff5311046229b849615fcd2f5d0c06,On Multi-scale differential features for face recognition,"On Multi-scale differential features for face recognition
+Center for Intelligent Information Retrieval
+S. Ravela
+Allen R. Hanson
+Vision Laboratory
+Dept. of Computer Science, University of Massachusetts at Amherst, MA, 01002"
+135fe2a0a0e6b726e5d81299edad4b3ce39d6614,Multichannel-Kernel Canonical Correlation Analysis for Cross-View Person Reidentification,"This is a pre-print version, the final version of the manuscript with more experiments can be found at:
+https://doi.org/10.1145/3038916
+Multi Channel-Kernel Canonical Correlation
+Analysis for Cross-View Person Re-Identification
+Giuseppe Lisanti, Svebor Karaman, Iacopo Masi"
+13a82da2bfa24583caf78ab1d14b5cfa4798b3b3,Robust face hallucination using quantization-adaptive dictionaries,"Robust Face Hallucination using
+Quantization-Adaptive Dictionaries
+Reuben Farrugia
+Christine Guillemot
+IEEE Int. Conf. on Image Processing, Arizona, USA
+6th September 2016"
+137457bbf46009b25d7f6d853083b6da02bfd6b9,Following Eye Gaze Activates a Patch in the Posterior Temporal Cortex That Is not Part of the Human “Face Patch” System,"New Research
+Cognition and Behavior
+Following Eye Gaze Activates a Patch in the
+Posterior Temporal Cortex That Is not Part of the
+Human “Face Patch” System
+Kira Marquardt,1,ⴱ Hamidreza Ramezanpour,1,2,3,ⴱ Peter W. Dicke,1 and Peter Thier1,4
+DOI:http://dx.doi.org/10.1523/ENEURO.0317-16.2017
+Department of Cognitive Neurology, Hertie Institute for Clinical Brain Research, 72076 Tübingen, Germany,
+Graduate School of Neural and Behavioural Sciences, University of Tübingen, 72074 Tübingen, Germany,
+International Max Planck Research School for Cognitive and Systems Neuroscience, University of Tübingen, 72074
+Tübingen, Germany, 4Werner Reichardt Centre for Integrative Neuroscience (CIN), University of Tübingen, 72076
+Tübingen, Germany"
+13ab059e6b592ca7bcb14337316ec1ac14aa5c5a,Constrained planar cuts - Object partitioning for point clouds,"Constrained Planar Cuts - Object Partitioning for Point Clouds
+Markus Schoeler, Jeremie Papon and Florentin W¨org¨otter
+Bernstein Center for Computational Neuroscience (BCCN)
+III Physikalisches Institut - Biophysik, Georg-August University of G¨ottingen"
+13b2e01030ae41983003e3ae53b5bb3ed3e764f0,Detection-Tracking for Efficient Person Analysis: The DetTA Pipeline,"Detection-Tracking for Efficient Person Analysis: The DetTA Pipeline
+Stefan Breuers1, Lucas Beyer1, Umer Rafi1, Bastian Leibe1"
+130bf256f4cc3dded4fb701f74f6a34992be639b,A Robust Multiwavelet-Based Watermarking Scheme for Copyright Protection of Digital Images Using Human Visual System,"The International Arab Journal of Information Technology, Vol. 10, No. 6, November 2013 527
+A Robust Multiwavelet-Based Watermarking
+Scheme for Copyright Protection of Digital
+Images using Human Visual System
+Padmanabhareddy Vundela1 and Varadarajan Sourirajan2
+Department of Information Technology, Vardhaman College of Engineering, India
+Department of Electrical and Electronic Engineering, S.V. University College of Engineering, India"
+13f8c13cfbf2a504f02745bd44da4ac40fd8f8df,Feature Sets and Dimensionality Reduction for Visual Object Detection,"Author manuscript, published in ""British Machine Vision Conference, Aberystwyth :
+Royaume-Uni (2010)""
+DOI : 10.5244/C.24.112"
+134db6ca13f808a848321d3998e4fe4cdc52fbc2,Dynamics of facial expression: recognition of facial actions and their temporal segments from face profile image sequences,"IEEE TRANSACTIONS ON SYSTEMS, MAN, AND CYBERNETICS—PART B: CYBERNETICS, VOL. 36, NO. 2, APRIL 2006
+Dynamics of Facial Expression: Recognition of
+Facial Actions and Their Temporal Segments
+From Face Profile Image Sequences
+Maja Pantic, Member, IEEE, and Ioannis Patras, Member, IEEE"
+133dd0f23e52c4e7bf254e8849ac6f8b17fcd22d,Active Clustering with Model-Based Uncertainty Reduction,"This article has been accepted for publication in a future issue of this journal, but has not been fully edited. Content may change prior to final publication. Citation information: DOI
+Active Clustering with Model-Based
+Uncertainty Reduction
+Caiming Xiong, David M. Johnson, and Jason J. Corso Senior Member, IEEE"
+13f03aab62fc29748114a0219426613cf3ba76ae,MORPH-II: Feature Vector Documentation,"MORPH-II: Feature Vector Documentation
+Troy P. Kling
+NSF-REU Site at UNC Wilmington, Summer 2017
+MORPH-II Subsets
+Four different subsets of the MORPH-II database were selected for a wide range of purposes, including age
+estimate, gender and race classification, and facial recognition.
+• The “Full” data set contains all 55,134 mugshots [1].
+• The “Partial” data set contains 1,000 mugshots randomly selected from the full data set.
+• The “Partial (Even)” data set contains 1,000 mugshots selected from the full data set according to very
+strict rules and is intended mainly for age estimation tasks. The subjects range in age from 21 to 45,
+with exactly 40 subjects in each age category (thus the term “even” in the name of the data set). Of
+these 40 subjects in each age group, exactly 30 are male and 10 are female, giving rise to a 3:1 gender
+ratio. Additionally, half of the males in each age group are black, and the same goes for the females,
+so there is a precise 1:1 ratio of black to white individuals. No subject is represented more than once
+in this data set, so it should not be used for face recognition tasks.
+• The “Recognition” data set contains 1,660 mugshots selected from the full data set according to certain
+rules and is intended to be used for facial recognition tasks. There are 166 subjects present in the data
+set – 83 males and 83 females – each of whom has exactly 10 images, usually taken over the span of
+multiple years. No restrictions on age or race were placed on this data set.
+Image Preprocessing"
+134fe1c4f45cea3339c094fee817e7a024d73d88,Inferring door locations from a teammate's trajectory in stealth human-robot team operations,"Inferring door locations from a teammate’s trajectory in stealth
+human-robot team operations
+Jean Oh, Luis Navarro-Serment, Arne Supp´e, Anthony Stentz and Martial Hebert1"
+1369e9f174760ea592a94177dbcab9ed29be1649,Geometrical facial modeling for emotion recognition,"Geometrical Facial Modeling for Emotion Recognition
+Giampaolo L. Libralon and Roseli A. F. Romero"
+133900a0e7450979c9491951a5f1c2a403a180f0,Social Grouping for Multi-Target Tracking and Head Pose Estimation in Video,"JOURNAL OF LATEX CLASS FILES
+Social Grouping for Multi-target Tracking and
+Head Pose Estimation in Video
+Zhen Qin and Christian R. Shelton"
+131059ea24073d08de0bd153f9caddc123911e51,Facial emotional recognition in schizophrenia: preliminary results of the Virtual Reality Program for Facial Emotional Recognition Reconhecimento emocional de faces na esquizofrenia: resultados preliminares do Programa de Realidade Virtual para o Reconhecimento Emocional de Faces,"Facial emotional recognition in schizophrenia: preliminary results of the Virtual
+Reality Program for Facial Emotional Recognition
+Reconhecimento emocional de faces na esquizofrenia: resultados preliminares do Programa de Realidade Virtual
+para o Reconhecimento Emocional de Faces
+Teresa souTo1,2, alexandre BapTisTa1, diana Tavares1,3, CrisTina Queirós1,2, anTónio MarQues1,3
+Psychosocial Rehabilitation Laboratory of Faculty of Psychology and Educational Sciences, Porto University/School of Allied Health Sciences, Porto Polytechnic Institute (FPCEUP/ESTSPIPP), Porto,
+Portugal.
+FPCEUP, Porto, Portugal.
+ESTSPIPP, Porto, Portugal.
+Institution where the study was elaborated: Faculty of Psychology and Educational Sciences, Porto University, Portugal.
+Received: 11/6/2012 – Accepted: 2/14/2013"
+13b8d657f0f9a0178339570bdc153bfd10a81300,Harvesting large-scale weakly-tagged image databases from the web,"Harvesting Large-Scale Weakly-Tagged Image Databases from the Web
+Jianping Fan1, Yi Shen1, Ning Zhou1, Yuli Gao2
+Department of Computer Science, UNC-Charlotte, NC28223, USA
+Multimedia Interaction and Understanding, HP Labs, Palo Alto, CA94304, USA"
+13db9466d2ddf3c30b0fd66db8bfe6289e880802,Transfer Subspace Learning Model for Face Recognition at a Distance,"I.J. Image, Graphics and Signal Processing, 2017, 1, 27-32
+Published Online January 2017 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijigsp.2017.01.04
+Transfer Subspace Learning Model for Face
+Recognition at a Distance
+Alwin Anuse
+MIT, Pune ,India
+Email:
+Nilima Deshmukh
+AISSM’S IOT,India
+Email:
+Vibha Vyas
+College of Engineering Pune,India
+Email:
+learning algorithms work"
+13ec6666b8b722ad9eb68a21a302e3f2f1ab4df7,Biometric Human Identification of Hand Geometry Features Using Discrete Wavelet Transform,"Biometric Human Identification of Hand
+Geometry Features Using Discrete
+Wavelet Transform
+Osslan Osiris Vergara Villegas, Humberto de Jesús Ochoa Domínguez,
+Vianey Guadalupe Cruz Sánchez, Leticia Ortega Maynez
+nd Hiram Madero Orozco
+Universidad Autónoma de Ciudad Juárez
+Instituto de Ingeniería y Tecnología
+Mexico
+. Introduction
+Since the security factor became a basic need for civilization, a lot of systems have been
+developed. Those systems, try to ensure the safety in all the things that driving a certain
+degree of exclusivity. Historically, keys, cards and passwords were used as security systems;
+however, these methods are vulnerable to loss and theft. As a result biometric identification
+methods emerge in order to tackle the disadvantages of the non biometric classical methods.
+Biometrics,
+is an emerging technology that addresses the automated identification of
+individuals, based on their physiological and behavioral traits. The main advantage of
+iometric methods is the ability to recognize, which is made by means of a physical feature or
+unique pattern (Jain et al. (2008)). With these methods and individual can hardly be victim"
+13ae3c8afef5a0d6f4c9e684da9fc1fa96caaeb6,Online Anomaly Detection in Crowd Scenes via Structure Analysis,"Online Anomaly Detection in Crowd Scenes
+via Structure Analysis
+Yuan Yuan, Senior Member, IEEE, Jianwu Fang, and Qi Wang"
+13caf4d2e0a4b6fcfcd4b9e8e2341b8ebd38258d,Joint Learning of Siamese CNNs and Temporally Constrained Metrics for Tracklet Association,"Joint Learning of Siamese CNNs and Temporally
+Constrained Metrics for Tracklet Association
+Bing Wang, Student Member, IEEE, Li Wang, Member, IEEE, Bing Shuai, Student Member, IEEE,
+Zhen Zuo, Student Member, IEEE, Ting Liu, Student Member, IEEE, Kap Luk Chan, Member, IEEE, and
+Gang Wang, Member, IEEE"
+13aac86217231a7d118ecdff444ee07234fcff50,Classification via Incoherent Subspaces,"Classification via Incoherent Subspaces
+Karin Schnass, Pierre Vandergheynst, Senior Member, IEEE"
+13141284f1a7e1fe255f5c2b22c09e32f0a4d465,Object Tracking by Oversampling Local Features,"Object Tracking by
+Oversampling Local Features
+Federico Pernici and Alberto Del Bimbo"
+1394ca71fc52db972366602a6643dc3e65ee8726,EmoReact: a multimodal approach and dataset for recognizing emotional responses in children,"See discussions, stats, and author profiles for this publication at: https://www.researchgate.net/publication/308407783
+EmoReact: A Multimodal Approach and Dataset
+for Recognizing Emotional Responses in Children
+Conference Paper · November 2016
+DOI: 10.1145/2993148.2993168
+CITATIONS
+READS
+authors, including:
+Behnaz Nojavanasghari
+University of Central Florida
+PUBLICATIONS 20 CITATIONS
+Tadas Baltrusaitis
+Carnegie Mellon University
+0 PUBLICATIONS 247 CITATIONS
+SEE PROFILE
+SEE PROFILE
+Charles E. Hughes
+University of Central Florida
+85 PUBLICATIONS 1,248 CITATIONS
+SEE PROFILE"
+135fc59c8adb8d97a0a8dacf615f1b18a2102372,Language-Based Image Editing with Recurrent Attentive Models,"Language-Based Image Editing with Recurrent Attentive Models
+Jianbo Chen∗, Yelong Shen†, Jianfeng Gao†, Jingjing Liu†, Xiaodong Liu†
+University of California, Berkeley∗ and Microsoft Research†
+yeshen, jfgao, jingjl,"
+1373195c26eab581138579f7389cdf8b7a94a4bb,Synscapes: A Photorealistic Synthetic Dataset for Street Scene Parsing,"Synscapes: A Photorealistic Synthetic Dataset for Street Scene Parsing
+Magnus Wrenninge1,∗ Jonas Unger1,2,†
+7D Labs
+Link¨oping University, Sweden
+Figure 1: Example image from Synscapes."
+13631379de6487fd0571e5919f4efb65d16c1633,Accelerated Inference in Markov Random Fields via Smooth Riemannian Optimization,"Accelerated Inference in Markov Random Fields
+via Smooth Riemannian Optimization
+Siyi Hu and Luca Carlone"
+133da0d8c7719a219537f4a11c915bf74c320da7,A Novel Method for 3D Image Segmentation with Fusion of Two Images using Color K-means Algorithm,"International Journal of Computer Applications (0975 – 8887)
+Volume 123 – No.4, August 2015
+A Novel Method for 3D Image Segmentation with Fusion
+of Two Images using Color K-means Algorithm
+Neelam Kushwah
+Dept. of CSE
+ITM Universe
+Gwalior
+Priusha Narwariya
+Dept. of CSE
+ITM Universe
+Gwalior"
+134dd3bb637b51c61fa9d2332f11e39efc0b359a,High-level activity learning and recognition in structured environments,"High-level activity learning and recognition in
+structured environments
+John Patrick Greenall
+Submitted in accordance with the requirements
+for the degree of Doctor of Philosophy.
+The University of Leeds
+School of Computing
+June 2012"
+133f01aec1534604d184d56de866a4bd531dac87,Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics,"Effective Unconstrained Face Recognition by
+Combining Multiple Descriptors and Learned
+Background Statistics
+Lior Wolf, Member, IEEE, Tal Hassner, and Yaniv Taigman"
+13841d54c55bd74964d877b4b517fa94650d9b65,Generalised ambient reflection models for Lambertian and Phong surfaces,"Generalised Ambient Reflection Models for Lambertian and
+Phong Surfaces
+Author
+Zhang, Paul, Gao, Yongsheng
+Published
+Conference Title
+Proceedings of the 2009 IEEE International Conference on Image Processing (ICIP 2009)
+https://doi.org/10.1109/ICIP.2009.5413812
+Copyright Statement
+© 2009 IEEE. Personal use of this material is permitted. However, permission to reprint/
+republish this material for advertising or promotional purposes or for creating new collective
+works for resale or redistribution to servers or lists, or to reuse any copyrighted component of
+this work in other works must be obtained from the IEEE.
+Downloaded from
+http://hdl.handle.net/10072/30001
+Griffith Research Online
+https://research-repository.griffith.edu.au"
+13451899558d7217206b275ca0bb1f48fa4afdd9,Hidden Markov Models Training by a Particle Swarm Optimization Algorithm,"Journal of Mathematical Modelling and Algorithms (2007) 6: 175–193
+DOI: 10.1007/s10852-005-9037-7
+# Springer 2006
+Hidden Markov Models Training by a Particle
+Swarm Optimization Algorithm
+, NICOLAS MONMARCHE´
+SE´ BASTIEN AUPETIT
+nd MOHAMED SLIMANE
+Laboratoire d’Informatique, Polytech’Tours, Universite´ Franc¸ois-Rabelais de Tours,
+64 avenue Jean Portalis, 37200 Tours, France.
+e-mail: {sebastien.aupetit, nicolas.monmarche,
+(Received 16 July 2005; in final form 22 December 2005; published online 28 February 2006)
+In this work we consider the problem of Hidden Markov Models (HMM) training. This"
+132781c1b2495ff0e792b46b94fdf33867394e4a,Autistic Traits and Symptoms of Social Anxiety are Differentially Related to Attention to Others’ Eyes in Social Anxiety Disorder,"J Autism Dev Disord (2017) 47:3814–3821
+DOI 10.1007/s10803-016-2978-z
+S.I. : ANXIETY IN AUTISM SPECTRUM DISORDERS
+Autistic Traits and Symptoms of Social Anxiety are Differentially
+Related to Attention to Others’ Eyes in Social Anxiety Disorder
+Johan Lundin Kleberg1 · Jens Högström2,3 · Martina Nord2,3 · Sven Bölte4,5 ·
+Eva Serlachius2,3 · Terje Falck‑Ytter1,4,5
+Published online: 20 December 2016
+© The Author(s) 2016. This article is published with open access at Springerlink.com"
+132f88626f6760d769c95984212ed0915790b625,Exploring Entity Resolution for Multimedia Person Identification,"UC Irvine
+UC Irvine Electronic Theses and Dissertations
+Title
+Exploring Entity Resolution for Multimedia Person Identification
+Permalink
+https://escholarship.org/uc/item/9t59f756
+Author
+Zhang, Liyan
+Publication Date
+014-01-01
+Peer reviewed|Thesis/dissertation
+eScholarship.org
+Powered by the California Digital Library
+University of California"
+13f6ab2f245b4a871720b95045c41a4204626814,Cortex commands the performance of skilled movement,"RESEARCH ARTICLE
+Cortex commands the performance of
+skilled movement
+Jian-Zhong Guo, Austin R Graves, Wendy W Guo, Jihong Zheng, Allen Lee,
+Juan Rodrı´guez-Gonza´ lez, Nuo Li, John J Macklin, James W Phillips,
+Brett D Mensh, Kristin Branson, Adam W Hantman*
+Janelia Research Campus, Howard Hughes Medical Institute, Ashburn, United
+States"
+138778d75fc4e2fd490897ac064b9ac84b6b9f04,Generation and visualization of emotional states in virtual characters,"COMPUTER ANIMATION AND VIRTUAL WORLDS
+Comp. Anim. Virtual Worlds 2008; 19: 259–270
+Published online 25 July 2008 in Wiley InterScience
+(www.interscience.wiley.com) DOI: 10.1002/cav.234
+...........................................................................................
+Generation and visualization of
+emotional states in virtual characters
+By Diana Arellano*, Javier Varona and Francisco J. Perales
+..........................................................................
+This paper presents an affective model that determines the emotional state of a character
+ccording to the personality traits and the experienced emotions. We consider an emotional
+state as the layer between personality and emotion. The proposed affective model offers a
+mapping between emotions and emotional states. To evidence emotional states of a virtual
+haracter, we can attribute them facial expressions based on their associated emotions.
+Facial expressions for intermediate emotions are generated automatically from expressions
+for universal emotions. The experiments show coherent emotional states produced by a
+simulated story. They also present how the corresponding emotions were represented
+through dynamic and static facial expressions. Finally, the obtained results demonstrate the
+satisfactory recognition by a group of people unfamiliar with the work described. Copyright
+© 2008 John Wiley & Sons, Ltd."
+13afc4f8d08f766479577db2083f9632544c7ea6,Multiple kernel learning for emotion recognition in the wild,"Multiple Kernel Learning for
+Emotion Recognition in the Wild
+Karan Sikka, Karmen Dykstra, Suchitra Sathyanarayana,
+Gwen Littlewort and Marian S. Bartlett
+Machine Perception Laboratory
+EmotiW Challenge, ICMI, 2013"
+13c4a4359e9d7f5b2abe1b9542c0950946b0565a,Learning sparse tag patterns for social image classification,"This document is downloaded from DR-NTU, Nanyang Technological
+University Library, Singapore.
+Title
+Learning sparse tag patterns for social image
+lassification
+Author(s)
+Lin, Jie; Duan, Ling-Yu; Yuan, Junsong; Li, Qingyong;
+Luo, Siwei
+Citation
+http://hdl.handle.net/10220/12960
+Rights"
+13f07d51c073964d11f9af6463fe3ffe5475c393,"Part-Based Pedestrian Detection and Feature-Based Tracking for Driver Assistance: Real-Time, Robust Algorithms, and Evaluation","This article has been accepted for inclusion in a future issue of this journal. Content is final as presented, with the exception of pagination.
+Part-Based Pedestrian Detection and Feature-Based
+Tracking for Driver Assistance: Real-Time,
+Robust Algorithms, and Evaluation
+Antonio Prioletti, Student Member, IEEE, Andreas Møgelmose, Student Member, IEEE, Paolo Grisleri,
+Mohan Manubhai Trivedi, Fellow, IEEE, Alberto Broggi, Senior Member, IEEE, and
+Thomas B. Moeslund, Member, IEEE"
+13188a88bbf83a18dd4964e3f89d0bc0a4d3a0bd,Image Normalization Robust using Histogram Equalization and Logarithm Transform Frequency DCT Coefficients for Illumination in Facial Images,"Dr. V. S. Manjula
+HOD, Department of Computer Science, St. Joseph College of Information Technology, Songea, Tanzania"
+13e348264fe1077caa44e1b59c71e67a8e4b5ad9,Effect of Eyes Detection and Position Estimation Methods on the Accuracy of Comparative Testing of Face Detection Algorithms,"EFFECT OF EYES DETECTION AND POSITION ESTIMATION METHODS
+ON THE ACCURACY OF COMPARATIVE TESTING OF FACE
+DETECTION ALGORITHMS1
+N. Degtyarev, O. Seredin
+Tula State University, 92 Lenin Ave., Tula 300600, Russian Federation;
+Phone: +7(4872)353637; E-mail:
+Many published comparisons of face detection algorithms used different evaluation
+procedures for each algorithm or even contain only a summary of the originally reported
+performance among several face detection algorithms on the pair of small datasets. Deg-
+tyarev et al. have proposed the FD algorithm evaluation procedure containing model of
+face representation conversion unifying the FD algorithms comparison procedures,
+which makes such evaluation more reliable. However, there is no evidence that such
+""conversion"" does not diminish the localization accuracy. The aim of this work is to ex-
+mined the effects of two different face representation conversion techniques - eyes es-
+timation model proposed by Degtyarev et al. and highly scored eyes detection method
+proposed by Bolme et al. and based on ASE filters - via routine testing.
+Introduction
+Face detection (FD) algorithms are getting
+widely used in the modern world: security sys-
+tems, interactive user interfaces, advertisement"
+13d9da779138af990d761ef84556e3e5c1e0eb94,Learning to Locate Informative Features for Visual Identification,"Int J Comput Vis (2008) 77: 3–24
+DOI 10.1007/s11263-007-0093-5
+Learning to Locate Informative Features for Visual Identification
+Andras Ferencz · Erik G. Learned-Miller ·
+Jitendra Malik
+Received: 18 August 2005 / Accepted: 11 September 2007 / Published online: 9 November 2007
+© Springer Science+Business Media, LLC 2007"
+7f511a6a2b38a26f077a5aec4baf5dffc981d881,Low-Latency Human Action Recognition with Weighted Multi-Region Convolutional Neural Network,"LOW-LATENCY HUMAN ACTION RECOGNITION WITH WEIGHTED MULTI-REGION
+CONVOLUTIONAL NEURAL NETWORK
+Yunfeng Wang(cid:63), Wengang Zhou(cid:63), Qilin Zhang†, Xiaotian Zhu(cid:63), Houqiang Li(cid:63)
+(cid:63)University of Science and Technology of China, Hefei, Anhui, China
+HERE Technologies, Chicago, Illinois, USA"
+7ff83f10e49e81ce6f66270e8f3f42dd2c6eb3ed,PIRM Challenge on Perceptual Image Enhancement on Smartphones: Report,"PIRM Challenge on Perceptual Image Enhancement
+on Smartphones: Report
+Andrey Ignatov, Radu Timofte, Thang Van Vu, Tung Minh Luu, Trung X Pham, Cao Van Nguyen,
+Yongwoo Kim, Jae-Seok Choi, Munchurl Kim, Jie Huang, Jiewen Ran, Chen Xing, Xingguang Zhou,
+Pengfei Zhu, Mingrui Geng, Yawei Li, Eirikur Agustsson, Shuhang Gu, Luc Van Gool, Etienne de Stoutz,
+Nikolay Kobyshev, Kehui Nie, Yan Zhao, Gen Li, Tong Tong, Qinquan Gao, Liu Hanwen, Pablo Navarrete
+Michelini, Zhu Dan, Hu Fengshuo, Zheng Hui, Xiumei Wang, Lirui Deng, Rang Meng, Jinghui Qin, Yukai
+Shi, Wushao Wen, Liang Lin, Ruicheng Feng, Shixiang Wu, Chao Dong, Yu Qiao, Subeesh Vasu, Nimisha
+Thekke Madam, Praveen Kandula, A. N. Rajagopalan, Jie Liu, Cheolkon Jung ∗"
+7fa62c091a14830ae256dc00b512f7d4b4cf5b94,Stabilizing GAN Training with Multiple Random Projections,"Under review as a conference paper at ICLR 2018
+Stabilizing GAN Training with
+Multiple Random Projections
+Anonymous authors
+Paper under double-blind review"
+7ff42ee09c9b1a508080837a3dc2ea780a1a839b,Data Fusion for Real-time Multimodal Emotion Recognition through Webcams and Microphones in E-Learning,"Data Fusion for Real-time Multimodal Emotion Recognition through Webcams
+nd Microphones in E-Learning
+Kiavash Bahreini*, Rob Nadolski*, Wim Westera*
+*Welten Institute, Research Centre for Learning, Teaching and Technology, Faculty of
+Psychology and Educational Sciences, Open University of the Netherlands, Valkenburgerweg
+77, 6419 AT Heerlen, The Netherlands
+{kiavash.bahreini, rob.nadolski,"
+7fbff9fa2ba7a7ff57a433e8bb19cfd99d52132d,A probabilistic framework for car detection in images using context and scale,"RiverCentre, Saint Paul, Minnesota, USA
+May 14-18, 2012
+978-1-4673-1405-3/12/$31.00 ©2012 IEEE"
+7fdcb6638a9e01986cd8fb4133b4448700087faf,Expression-Invariant Multispectral Face Recognition: You Can Smile Now!,"Expression-Invariant Multispectral Face Recognition:
+You Can Smile Now!
+Ioannis A. Kakadiarisa, George Passalisa, George Todericia, Yunliang Lua,
+Nikos Karampatziakisa, Najam Murtuzaa, Theoharis Theoharisa
+Computational Biomedicine Lab, Dept. of Computer Science, Univ. of Houston, TX, USA"
+7f533bd8f32525e2934a66a5b57d9143d7a89ee1,Audio-Visual Identity Grounding for Enabling Cross Media Search,"Audio-Visual Identity Grounding for Enabling Cross Media Search
+Kevin Brady, MIT Lincoln Laboratory
+Paper ID 22"
+7f44f8a5fd48b2d70cc2f344b4d1e7095f4f1fe5,Sparse Output Coding for Scalable Visual Recognition,"Int J Comput Vis (2016) 119:60–75
+DOI 10.1007/s11263-015-0839-4
+Sparse Output Coding for Scalable Visual Recognition
+Bin Zhao1 · Eric P. Xing1
+Received: 15 May 2013 / Accepted: 16 June 2015 / Published online: 26 June 2015
+© Springer Science+Business Media New York 2015"
+7f4bc8883c3b9872408cc391bcd294017848d0cf,The Multimodal Focused Attribute Model : A Nonparametric Bayesian Approach to Simultaneous Object Classification and Attribute Discovery,"Computer
+Sciences
+Department
+The Multimodal Focused Attribute Model: A Nonparametric
+Bayesian Approach to Simultaneous Object Classification and
+Attribute Discovery
+Jake Rosin
+Charles R. Dyer
+Xiaojin Zhu
+Technical Report #1697
+January 2012"
+7f6061c83dc36633911e4d726a497cdc1f31e58a,YouTube-8M: A Large-Scale Video Classification Benchmark,"YouTube-8M: A Large-Scale Video Classification
+Benchmark
+Sami Abu-El-Haija
+George Toderici
+Nisarg Kothari
+Joonseok Lee
+Paul Natsev
+Balakrishnan Varadarajan
+Sudheendra Vijayanarasimhan
+Google Research"
+7f65bbc93cf414d4889773b697b1833e85f0a15f,Neural Perspective to Jigsaw Puzzle Solving,"Neural Perspective to Jigsaw Puzzle Solving
+Viveka Kulharia⇤, Arnab Ghosh⇤, Nikhil Patil?, Piyush Rai
+Department of Computer Science, IIT Kanpur
+Kanpur, India"
+7ff0ad5c34f02b9c394ed0d8a3db9c270dc70e44,Learning a temporally invariant representation for visual tracking,"LEARNING A TEMPORALLY INVARIANT REPRESENTATION FOR VISUAL TRACKING
+Chao Ma(cid:63)†, Xiaokang Yang(cid:63), Chongyang Zhang(cid:63), and Ming-Hsuan Yang†
+(cid:63)Shanghai Jiao Tong University, China
+University of California at Merced, USA"
+7f0fadae16cc74b6176ba940aa2f8b5a0a67e09e,An Expert Local Mesh Correlation Histograms for Biomedical Image Indexing and Retrieval,"CHAPTER 1
+An Expert Local Mesh Correlation Histograms for
+Biomedical Image Indexing and Retrieval
+Santosh Kumar Vipparthi, Subrahmanyam Murala, S.K. Nagar and Anil
+Balaji Gonde
+Santosh Kumar Vipparthi
+Department of Computer Science and Engineering
+Malaviya National Institute of Technology
+Jaipur, India
+e-mail:
+Subrahmanyam Murala
+Department of Electrical Engineering
+Indian Institute of Technology Ropar
+India
+e-mail:
+S.K. Nagar
+Department of Electrical Engineering
+Indian Institute of Technology Banaras Hindu University
+India
+e-mail:"
+7f7c3a99923549601c81cd5e9659ca01e8a42f47,Zero-Shot Learning of Language Models for Describing Human Actions Based on Semantic Compositionality of Actions,"PACLIC 28
+Zero-Shot Learning of Language Models for Describing Human Actions
+Based on Semantic Compositionality of Actions
+Hideki ASOH
+National Institute of
+Graduate School of Humanities and Sciences,
+Ichiro KOBAYASHI
+Ochanomizu University
+Bunkyo-ku, Tokyo 112-8610 Japan
+Advanced Industrial Science and Technology
+Tsukuba, Ibaraki 305-8568 Japan"
+7f36dd9ead29649ed389306790faf3b390dc0aa2,Movement Differences between Deliberate and Spontaneous Facial Expressions: Zygomaticus Major Action in Smiling.,"MOVEMENT DIFFERENCES BETWEEN DELIBERATE
+AND SPONTANEOUS FACIAL EXPRESSIONS:
+ZYGOMATICUS MAJOR ACTION IN SMILING
+Karen L. Schmidt, Zara Ambadar, Jeffrey F. Cohn, and L. Ian Reed"
+7f217ff1f3c21c84ed116d32e3b8d1509a306fbd,Direct Optimization through arg max for Discrete Variational Auto-Encoder,"Direct Optimization through arg max for Discrete
+Variational Auto-Encoder
+Guy Lorberbom (Technion), Andreea Gane (MIT),
+Tommi Jaakkola (MIT), Tamir Hazan (Technion)."
+7f6cd03e3b7b63fca7170e317b3bb072ec9889e0,A Face Recognition Signature Combining Patch-based Features with Soft Facial Attributes,"A Face Recognition Signature Combining Patch-based
+Features with Soft Facial Attributes
+L. Zhang, P. Dou, I.A. Kakadiaris
+Computational Biomedicine Lab, 4849 Calhoun Rd, Rm 373, Houston, TX 77204"
+7fa41631cdef8f7fba7e1289dd4c5f3723b172ab,A robust and isotropic curved surface representation for 3D faces description,"A robust and isotropic curved surface representation for 3D faces
+description
+Majdi Jribi and Faouzi Ghorbel"
+7f6a527a3dc2e526aa59a57cadb20ff727124973,A comparison of adaptive matchers for screening of faces in video surveillance,"012 IEEE Symposium on
+Computational Intelligence for
+Security and Defence Applications
+(CISDA 2012)
+Ottawa, Ontario, Canada
+1 – 13 July 2012
+IEEE Catalog Number:
+ISBN:
+CFP12SDA-PRT
+978-1-4673-1416-9"
+7f9cacb5fc126f87dbf53dd547a9fb9f58ded557,RoadNet-v2: A 10 ms Road Segmentation Using Spatial Sequence Layer,"RoadNet-v2: A 10 ms Road Segmentation Using
+Spatial Sequence Layer
+Yecheng Lyu and Xinming Huang
+Department of Electrical and Computer Engineering
+Worcester Polytechnic Institute
+Worcester, MA 01609, USA"
+7f3c6bf191a8633d10fad32e23fa06a3c925ffee,The benefits of simply observing: mindful attention modulates the link between motivation and behavior.,"015, Vol. 108, No. 1, 148 –170
+0022-3514/15/$12.00
+© 2014 American Psychological Association
+http://dx.doi.org/10.1037/a0038032
+The Benefits of Simply Observing: Mindful Attention Modulates the Link
+Between Motivation and Behavior
+Esther K. Papies
+Utrecht University
+Mike Keesman
+Utrecht University
+Tila M. Pronk
+Tilburg University
+Lawrence W. Barsalou
+Emory University
+Mindful attention, a central component of mindfulness meditation, can be conceived as becoming aware
+of one’s thoughts and experiences and being able to observe them as transient mental events. Here, we
+present a series of studies demonstrating the effects of applying this metacognitive perspective to one’s
+spontaneous reward responses when encountering attractive stimuli. Taking a grounded cognition
+perspective, we argue that reward simulations in response to attractive stimuli contribute to appetitive
+ehavior and that motivational states and traits enhance these simulations. Directing mindful attention at"
+7f97a36a5a634c30de5a8e8b2d1c812ca9f971ae,Incremental Classifier Learning with Generative Adversarial Networks,"Incremental Classifier Learning with Generative Adversarial Networks
+Yue Wu1 Yinpeng Chen2 Lijuan Wang2 Yuancheng Ye3
+Zicheng Liu2 Yandong Guo2 Zhengyou Zhang2 Yun Fu1
+Northeastern University 2Microsoft Research 3City University of New York"
+7ff636c82898a35d3239573f8e3a29da89c73ed4,Automatic Detection of the Uterus and Fallopian Tube Junctions in Laparoscopic Images,"Automatic Detection of the Uterus and
+Fallopian Tube Junctions in Laparoscopic Images
+Kristina Prokopetc, Toby Collins, and Adrien Bartoli
+Image Science for Interventional Techniques (ISIT),
+UMR 6284 CNRS, Universit´e d(cid:48)Auvergne, France"
+7fc5ab3743e6e9a2f4fe70152440e13a673e239b,Improved Face Recognition Rate Using HOG Features and SVM Classifier,"IOSR Journal of Electronics and Communication Engineering (IOSR-JECE)
+e-ISSN: 2278-2834,p- ISSN: 2278-8735.Volume 11, Issue 4, Ver. I (Jul.-Aug .2016), PP 34-44
+www.iosrjournals.org
+Improved Face Recognition Rate Using HOG Features and SVM
+Classifier
+Harihara Santosh Dadi, Gopala Krishna Mohan Pillutla"
+7f04b65f2c6f96c7ce000f537fb691a93f61db52,Geometrical and Visual Feature Quantization for 3D Face Recognition,
+7f268f29d2c8f58cea4946536f5e2325777fa8fa,Facial Emotion Recognition in Curvelet Domain,"Facial Emotion Recognition in Curvelet Domain
+Gyanendra K Verma and Bhupesh Kumar Singh
+Indian Institute of Informaiton Technology, Allahabad, India
+Allahabad, India - 211012"
+7ff1c4e0ad0dae92d4f25b93783fadde8f07276d,An efficient example-based approach for image super-resolution,"IEEE Int. Conference Neural Networks & Signal Processing
+Zhenjiang, China, June 8~10, 2008
+AN EFFICIENT EXAMPLE-BASED APPROACH FOR IMAGE
+SUPER-RESOLUTION
+Xiaoguang Li1,2, Kin Man Lam2, Guoping Qiu3, Lansun Shen1 and Suyu Wang1
+. Signal & Information Processing Lab. Beijing University of Technology, Beijing, China, 100124
+. Centre for Signal Processing, Department of Electronic and Information Engineering, The Hong Kong
+Polytechnic University, Hong Kong
+. Department of Computer Science, Nottingham University, UK"
+7f3a73babe733520112c0199ff8d26ddfc7038a0,Robust Face Identification with Small Sample Sizes using Bag of Words and Histogram of Oriented Gradients,
+7fd97bc23c85213b8b2e4d28264f04ce6dc84e74,Optimal Transformation Estimation with Semantic Cues,"Optimal Transformation Estimation with Semantic Cues
+Danda Pani Paudel
+Computer Vision Laboratory
+D-ITET, ETH Zurich
+Adlane Habed
+ICube Laboratory
+CNRS, University of Strasbourg
+Luc Van Gool
+Computer Vision Laboratory
+D-ITET, ETH Zurich"
+7af38f6dcfbe1cd89f2307776bcaa09c54c30a8b,Learning in Computer Vision and Beyond: Development,"eaig i C e Vii ad Beyd:
+Deve
+h . Weg
+Deae f C e Sciece
+ichiga Sae Uiveiy
+Ea aig  48824
+Abac
+Thi chae id ce wha i ca
+aic
+ve
+y h a cgiive deve
+ih i deeied befe he \bih"" f he ye. Afe he \bih"" i eab
+
+ach i  ea
+deve
+way aia
+  ea whi
+de deve
+7a1828e181e3c8bd014c7e5fc1bcc417f122c18c,Face Perception and Test Reliabilities in Congenital Prosopagnosia in Seven Tests,"i-Perception
+January-February 2016: 1–37
+! The Author(s) 2016
+DOI: 10.1177/2041669515625797
+ipe.sagepub.com
+Article
+Face Perception and Test
+Reliabilities in Congenital
+Prosopagnosia in Seven Tests
+Janina Esins
+Department of Human Perception, Cognition and Action, Max Planck
+Institute for Biological Cybernetics, Tu¨bingen, Germany
+Johannes Schultz
+Department of Psychology, Durham University, Durham, UK
+Claudia Stemper
+Institute of Human Genetics, Westfa¨lische Wilhelms-Universita¨t
+Mu¨nster, Mu¨nster, Germany
+Ingo Kennerknecht
+Institute of Human Genetics, Westfa¨lische Wilhelms-Universita¨t
+Mu¨nster, Mu¨nster, Germany"
+7ab41d2fb37079d20db5e25fd6e71755673f82f0,Building Emotional Machines: Recognizing Image Emotions Through Deep Neural Networks,"Building Emotional Machines: Recognizing Image
+Emotions through Deep Neural Networks
+Hye-Rin Kim, Yeong-Seok Kim, Seon Joo Kim, In-Kwon Lee"
+7af6d86139aa86cb5897904563a9f67c016a176d,Performance of Correlation Filters in Facial Recognition,"Performance of Correlation Filters in Facial
+Recognition
+Everardo Santiago-Ramirez, J.A. Gonzalez-Fraga, and J.I. Ascencio-Lopez
+Facultad de Ciencias, Universidad Autónoma de Baja California, Km. 103, Carretera Tijuana-
+Ensenada, Ensenada, Baja California C. P. 22860"
+7a81967598c2c0b3b3771c1af943efb1defd4482,Do We Need More Training Data?,"Do We Need More Training Data?
+Xiangxin Zhu · Carl Vondrick · Charless C. Fowlkes · Deva Ramanan"
+7a7a53b05e22305b2963c05ac89830e099146767,Assessing fish abundance from underwater video using deep neural networks,"Assessing fish abundance from underwater video
+using deep neural networks
+Ranju Mandal∗, Rod M. Connolly†, Thomas A. Schlacher‡ and Bela Stantic∗
+School of ICT, Griffith Sciences, Griffith University, QLD 4222, Australia
+Australian Rivers Institute - Coast & Estuaries and
+School of Environment and Science, Griffith University, QLD 4222, Australia
+School of Science and Engineering, University of the Sunshine Coast, QLD 4558, Australia
+{r.mandal, r.connolly,"
+7ace44190729927e5cb0dd5d363fcae966fe13f7,A bag-of-features approach based on Hue-SIFT descriptor for nude detection,"7th European Signal Processing Conference (EUSIPCO 2009)
+Glasgow, Scotland, August 24-28, 2009
+A BAG-OF-FEATURES APPROACH BASED ON
+HUE-SIFT DESCRIPTOR FOR NUDE DETECTION
+Ana P. B. Lopes1,2, Sandra E. F. de Avila1, Anderson N. A. Peixoto1
+Rodrigo S. Oliveira1 and Arnaldo de A. Ara´ujo1
+Computer Science Department – Federal University of Minas Gerais
+Av. Antˆonio Carlos, 6627, Pampulha, CEP 31270–901, Belo Horizonte, MG, Brazil
+Exact and Technological Sciences Department – State University of Santa Cruz
+Rodovia Ilh´eus-Itabuna, km 16 – Pavilh˜ao Jorge Amado, CEP 45600-000, Ilh´eus, BA, Brazil"
+7ae0212d6bf8a067b468f2a78054c64ea6a577ce,Human Face Processing Techniques With Application To Large Scale Video Indexing,"Human Face Processing Techniques
+With Application To
+Large Scale Video Indexing
+LE DINH DUY
+DOCTOR OF
+PHILOSOPHY
+Department of Informatics,
+School of Multidisciplinary Sciences,
+The Graduate University for Advanced Studies (SOKENDAI)
+006 (School Year)
+September 2006"
+7ab9035ec3871bbeadf1095afbe1ff9d9cb25480,DLBP and SVD Fusion for 3 D Face Recognition Using Range Image,"Computer Science and Information Technology 5(2): 61-65, 2017
+DOI: 10.13189/csit.2017.050203
+http://www.hrpub.org
+DLBP and SVD Fusion for 3D Face Recognition Using
+Range Image
+El Mahdi Barrah, Rachid Ahdid, Said Safi, Abdessamad Malaoui∗
+Interdisciplinary Laboratory of Research in Sciences and Technologies (LIRST), Sultan Moulay Slimane University, Bni Mellal, Morocco
+Copyright c(cid:13)2017 by authors, all rights reserved. Authors agree that this article remains permanently
+open access under the terms of the Creative Commons Attribution License 4.0 International License"
+7a540e0e2049a8f0118be2eab9a2ec5f57e022c9,Deep Learning Methods for Classification with Limited Training Data,"Deep Learning Methods for Classification with
+Limited Training Data
+Seminar Report : Spring 2017
+submitted by
+Aviral Kumar
+(140070031)
+under the guidance of
+Prof. Sunita Sarawagi
+Department of Computer Science and Engineering
+Indian Institute of Technology Bombay
+April, 2017"
+7a0fb972e524cb9115cae655e24f2ae0cfe448e0,Facial Expression Classification Using RBF AND Back-Propagation Neural Networks,"Facial Expression Classification Using RBF AND Back-Propagation Neural Networks
+R.Q.Feitosa1,2,
+M.M.B.Vellasco1,2,
+D.T.Oliveira1,
+D.V.Andrade1,
+S.A.R.S.Maffra1
+– Catholic University of Rio de Janeiro, Brazil
+Department of Electric Engineering
+– State University of Rio de Janeiro, Brazil
+Department of Computer Engineering
+e-mail: [raul, -rio.br, [diogo,"
+7ad77b6e727795a12fdacd1f328f4f904471233f,Supervised Local Descriptor Learning for Human Action Recognition,"Supervised Local Descriptor Learning
+for Human Action Recognition
+Xiantong Zhen, Feng Zheng, Ling Shao, Senior Member, IEEE, Xianbin Cao, Senior Member, IEEE, and Dan Xu"
+7a88d33b3e23a2cdf1e8a2b848c73a12a34ba88c,TUB-IRML at MediaEval 2014 Violent Scenes Detection Task: Violence Modeling through Feature Space Partitioning,"TUB-IRML at MediaEval 2014 Violent Scenes Detection
+Task: Violence Modeling through Feature Space
+Partitioning
+Esra Acar, Sahin Albayrak
+DAI Laboratory, Technische Universität Berlin
+Ernst-Reuter-Platz 7, TEL 14, 10587 Berlin, Germany"
+7a4f3d17672ecd89e4ad0d4f3a9257352a055d9b,A Novel Data-driven Image Annotation Method,"A Novel Data-driven Image Annotation Method
+Guiguang Ding, Jianmin Wang, Na Xu"
+7a97de9460d679efa5a5b4c6f0b0a5ef68b56b3b,Constrained Joint Cascade Regression Framework for Simultaneous Facial Action Unit Recognition and Facial Landmark Detection,"nd Face shape relationship2)AU relationship3)Face shape patternUpdate facial landmark locationsUpdate AU activation probabilitiesAU activation probabilitiesCurrent landmark locationsFigure1.Constrainedjointcascaderegressionframeworkforsi-multaneousfacialactionunitrecognitionandlandmarkdetection.wouldenablethemachineunderstandingofhumanfacialbehavior,intent,emotionetc.Facialactionunitrecognitionandfaciallandmarkdetec-tionarerelatedtasks,buttheyareseldomlyexploitedjointlyintheliteratures.Forexample,thefaceshapedefinedbythelandmarklocationsareconsideredaseffectivefeaturesforAUrecognition.But,thelandmarklocationinforma-tionisusuallyextractedbeforehandwithfaciallandmarkdetectionalgorithms.Ontheotherhand,theActionUnitinformationisrarelyutilizedintheliteraturetohelpfaciallandmarkdetection,eventhoughthefacialmusclemove-mentsandtheactivationofspecificfacialactionunitcancausetheappearanceandshapechangesofthefacewhichsignificantlyaffectfaciallandmarkdetection.Themutualinformationandintertwinedrelationshipamongfacialac-tionunitrecognitionandfaciallandmarkdetectionshouldbeutilizedtoboosttheperformancesofbothtasks.Cascaderegressionframeworkhasbeenshowntobeaneffectivemethodforfacealignmentrecently[19][13].Itstartsfromaninitialfaceshape(e.g.meanface)anditit-erativelyupdatesthefaciallandmarklocationsbasedonthelocalappearancefeaturesuntilconvergence.Severalregres-sionmodelshavebeenappliedtolearnthemappingfromthelocalappearancefeaturestothefaceshapeupdate.Toleveragethesuccessofthecascaderegressionframe-workandtoachievethegoalofjointfacialactionunit13400"
+7a7db5a1325844b62d2ecf8489872c8f515f1c37,Nuclear Norm-Based 2-DPCA for Extracting Features From Images,"Nuclear Norm-Based 2-DPCA for Extracting
+Features From Images
+Fanlong Zhang, Jian Yang, Member, IEEE, Jianjun Qian, and Yong Xu, Member, IEEE"
+7a776f080b270c8759b2b4fe601682276d1b2eb4,Multi-target Tracking with Sparse Group Features and Position Using Discrete-Continuous Optimization,"Multi-Target Tracking with Sparse Group
+Features and Position using Discrete-Continuous
+Optimization
+Billy Peralta (1) and Alvaro Soto (2)
+(1)Universidad Cat´olica de Temuco, (2)Pontificia Universidad Cat´olica de Chile"
+7a3676dcf55e22c7249eac7615174309617c8246,Joint Feature Learning With Robust Local Ternary Pattern for Face Recognition,"International Journal of Application or Innovation in Engineering & Management (IJAIEM)
+Web Site: www.ijaiem.org Email:
+ISSN 2319 - 4847
+Volume 5, Issue 6, June 2016
+Joint Feature Learning With Robust Local
+Ternary Pattern for Face Recognition
+Yuvaraju.M1, Shalini.S2
+Nadu, India
+Assistant Professor, Department of Electrical and Electronics Engineering, Anna University Regional Campus, Coimbatore, Tamil
+Pg Scholar, Department of Electrical and Electronics Engineering, Anna University Regional Campus, Coimbatore, Tamil Nadu,
+India"
+7ac25c5391251611696d16e677bd71040d80d583,Person Re-Identification by Saliency Learning,"MANUSCRIPT DRAFT
+Person Re-identification by saliency Learning
+Rui Zhao, Student Member, IEEE, Wanli Oyang, Member, IEEE, and
+Xiaogang Wang, Member, IEEE"
+7aa4c16a8e1481629f16167dea313fe9256abb42,Multi-task learning for face identification and attribute estimation,"978-1-5090-4117-6/17/$31.00 ©2017 IEEE
+ICASSP 2017"
+7ad204758df6c921010d9967a5b7449dd406ea56,Deep Face Quality Assessment,"Deep Face Quality Assessment
+Vishal Agarwal
+Department of Electronics and Electrical Engineering
+Indian Institute of Technology Guwahati
+India"
+7ad7897740e701eae455457ea74ac10f8b307bed,Random Subspace Two-dimensional LDA for Face Recognition,"Random Subspace Two-dimensional LDA for Face Recognition*
+Garrett Bingham1"
+7acc05ae92823c12b28d6ad73cb2a7707ccb6c7b,Single view-based 3D face reconstruction robust to self-occlusion,"Lee et al. EURASIP Journal on Advances in Signal Processing 2012, 2012:176
+http://asp.eurasipjournals.com/content/2012/1/176
+R ES EAR CH
+Open Access
+Single view-based 3D face reconstruction robust
+to self-occlusion
+Youn Joo Lee1, Sung Joo Lee2, Kang Ryoung Park3, Jaeik Jo1 and Jaihie Kim1*"
+7a7b1352d97913ba7b5d9318d4c3d0d53d6fb697,Attend and Rectify: a Gated Attention Mechanism for Fine-Grained Recovery,"Attend and Rectify: a Gated Attention
+Mechanism for Fine-Grained Recovery
+Pau Rodr´ıguez†, Josep M. Gonfaus‡, Guillem Cucurull†,
+F. Xavier Roca†, Jordi Gonz`alez†
+Computer Vision Center and Universitat Aut`onoma de Barcelona (UAB),
+Campus UAB, 08193 Bellaterra, Catalonia Spain
+Visual Tagging Services, Parc de Recerca, Campus UAB"
+7aa062c6c90dba866273f5edd413075b90077b51,Minimizing Separability : A Comparative Analysis of Illumination Compensation Techniques in Face Recognition,"I.J. Information Technology and Computer Science, 2017, 5, 40-51
+Published Online May 2017 in MECS (http://www.mecs-press.org/)
+DOI: 10.5815/ijitcs.2017.05.06
+Minimizing Separability: A Comparative Analysis
+of Illumination Compensation Techniques in Face
+Recognition
+Chollette C. Olisah
+Department of Computer Science and IT, Baze University, Abuja, Nigeria
+E-mail:"
+7a8ba1a6c90b56ae0a98fe43d015ab0f2a73912e,A Vision-Based Hybrid Method for Eye Detection and Tracking,"A Vision-Based Hybrid Method for Eye Detection and Tracking
+International Journal of Security and Its Applications
+Vol. 7, No. 4, July, 2013
+Kun Mu
+Department of Computer Science and Engineering, Henan Institute of Engineering,
+Zhengzhou 451191, China"
+146879bd04a1ab25dce3484bc587e5f2ff1b1d91,Securing Certificate Revocation through Speaker Verification: the CertiVeR Project,"Securing Certificate Revocation through Speaker Verification:
+the CertiVeR Project
+Javier R. Saeta1, Javier Hernando2, Oscar Manso3, Manel Medina3
+Biometric Technologies, S.L. Barcelona, Spain
+TALP Research Center. Universitat Politècnica de Catalunya, Spain
+SeMarket, S.A. Barcelona, Spain"
+1451e7b11e66c86104f9391b80d9fb422fb11c01,Image privacy protection with secure JPEG transmorphing,"IET Signal Processing
+Research Article
+Image privacy protection with secure JPEG
+transmorphing
+ISSN 1751-9675
+Received on 30th December 2016
+Revised 13th July 2017
+Accepted on 11th August 2017
+doi: 10.1049/iet-spr.2016.0756
+www.ietdl.org
+Lin Yuan1 , Touradj Ebrahimi1
+Multimedia Signal Processing Group, Electrical Engineering Department, EPFL, Station 11, Lausanne, Switzerland
+E-mail:"
+1456f147381bf7c385225d854c2fb48c19eca285,LCAV-31: a dataset for light field object recognition,"Computational Imaging XII, edited by Charles A. Bouman, Ken D. Sauer, Proc. of SPIE-IS&T Electronic Imaging,
+SPIE Vol. 9020, 902014 · © 2014 SPIE-IS&T · CCC code: 0277-786X/14/$18 · doi: 10.1117/12.2041097
+Proc. of SPIE-IS&T/ Vol. 9020 902014-1"
+143e3ec5a5a11547da2d77a17d0ca7b1940280b5,"People detection, tracking and re-identification through a video camera network. (Détection, suivi et ré-identification de personnes à travers un réseau de caméra vidéo)","People detection, tracking and re-identification through
+video camera network
+Malik Souded
+To cite this version:
+Malik Souded. People detection, tracking and re-identification through a video camera network.
+Other [cs.OH]. Université Nice Sophia Antipolis, 2013. English. <NNT : 2013NICE4152>. <tel-
+00913072v2>
+HAL Id: tel-00913072
+https://tel.archives-ouvertes.fr/tel-00913072v2
+Submitted on 29 Jan 2014
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents
+scientifiques de niveau recherche, publiés ou non,
+émanant des établissements d’enseignement et de"
+14aad0d391a9491eb122d5b6af6c325a0e090dc7,Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms,"Development of an Efficient Face Recognition System based on Linear and Nonlinear Algorithms
+{tag} {/tag}
+International Journal of Computer Applications
+Foundation of Computer Science (FCS), NY, USA
+Volume 134
+Number 7
+Year of Publication: 2016
+Authors:
+Filani Araoluwa S., Adetunmbi Adebayo O.
+10.5120/ijca2016907932
+{bibtex}2016907932.bib{/bibtex}"
+14761b89152aa1fc280a33ea4d77b723df4e3864,Zero-Shot Learning via Visual Abstraction,
+14fdec563788af3202ce71c021dd8b300ae33051,Social Influence Analysis based on Facial Emotions,"Social Influence Analysis based on Facial Emotions
+Pankaj Mishra, Rafik Hadfi, and Takayuki Ito
+Department of Computer Science and Engineering
+Nagoya Institute of Technology, Gokiso, Showa-ku, Nagoya, 466-8555 Japan
+{pankaj.mishra,"
+14fed18d838bf6b89d98837837ff314e61ab7c60,Deep Learning with Differential Privacy,"A preliminary version of this paper appears in the proceedings of the 23rd ACM Conference on Computer and Communications Security
+(CCS 2016). This is a full version.
+Deep Learning with Differential Privacy
+Martín Abadi∗
+H. Brendan McMahan∗
+October 25, 2016
+Andy Chu∗
+Ilya Mironov∗
+Li Zhang∗
+Ian Goodfellow†
+Kunal Talwar∗"
+1419956b08f9ab398cd2100ddec74271ef5fa72c,Joint detection and online multi-object tracking,"Joint detection and online multi-object tracking
+Hilke Kieritz, Wolfgang H¨ubner, and Michael Arens
+Fraunhofer IOSB, Germany"
+149e5e5eeea5a9015ab5ae755f62c45ef70fa79b,Hierarchical Convolutional Features for Visual Tracking,"Hierarchical Convolutional Features for Visual Tracking
+Chao Ma
+Jia-Bin Huang
+Xiaokang Yang
+Ming-Hsuan Yang
+UC Merced"
+1414d4880e368414cbbbbd215e8b0471f185aa03,Face Detection in Low-Resolution Color Images,"Face Detection in Low-resolution Color Images
+Jun Zheng, Geovany A. Ramirez, and Olac Fuentes,
+Computer Science Department,
+University of Texas at El Paso,
+El Paso, Texas, 79968, U.S.A.
+No Institute Given"
+140dbcb0be3ce7961ed551f129698e9ad4c9aa8c,Interactive Learning and its Role in Pervasive Robotics,"Interactive Learning and its Role in Pervasive Robotics
+Cynthia Matuszek
+Dieter Fox
+Nicholas FitzGerald
+Evan Herbst"
+1459d4d16088379c3748322ab0835f50300d9a38,Cross-Domain Visual Matching via Generalized Similarity Measure and Feature Learning,"Cross-Domain Visual Matching via Generalized
+Similarity Measure and Feature Learning
+Liang Lin, Guangrun Wang, Wangmeng Zuo, Xiangchu Feng, and Lei Zhang"
+14f964d152337e963e4a4fd3619f6030aa75deb1,Person Re-identification by Discriminatively Selecting Parts and Features,"Person re-identification by discriminatively
+selecting parts and features
+Amran Bhuiyan, Alessandro Perina and Vittorio Murino
+Pattern Analysis and Computer Vision (PAVIS)
+Istituto Italiano di Tecnologia
+Genova, Italy"
+1450296fb936d666f2f11454cc8f0108e2306741,Learning to Discover Cross-Domain Relations with Generative Adversarial Networks,"Learning to Discover Cross-Domain Relations
+with Generative Adversarial Networks
+Taeksoo Kim 1 Moonsu Cha 1 Hyunsoo Kim 1 Jung Kwon Lee 1 Jiwon Kim 1"
+14373c9fd08dee8f7195a88430121c69bbebbe1b,Head Pose Estimation Using Covariance of Oriented Gradients,"978-1-4244-4296-6/10/$25.00 ©2010 IEEE
+ICASSP 2010"
+14a01628169a3a060b6af5d5dcdeeb584b648abf,Semi-Supervised Multiresolution Classification Using Adaptive Graph Filtering With Application to Indirect Bridge Structural Health Monitoring,"Semi-Supervised Multiresolution Classification
+Using Adaptive Graph Filtering with Application to
+Indirect Bridge Structural Health Monitoring
+Siheng Chen, Student Member, IEEE, Fernando Cerda, Piervincenzo Rizzo, Jacobo Bielak, James H. Garrett and
+Jelena Kovaˇcevi´c, Fellow, IEEE"
+147b7998526ebbdf64b1662503b378d9f6456ccd,Generative Adversarial Networks for Image Steganography,"Under review as a conference paper at ICLR 2017
+GENERATIVE ADVERSARIAL NETWORKS FOR IMAGE
+STEGANOGRAPHY
+Denis Volkhonskiy2,3, Boris Borisenko3 and Evgeny Burnaev1,2,3
+Skolkovo Institute of Science and Technology
+The Institute for Information Transmission Problems RAS (Kharkevich Institute)
+National Research University Higher School of Economics (HSE)"
+143b54525bdda1f83965002616a4e7b5b9f523a3,A probabilistic patch based image representation using Conditional Random Field model for image classification,"A probabilistic patch based image representation using Conditional Random Field model
+for image classification
+Fariborz Taherkhani
+Department of Electrical Engineering and Computer Science, University of Wisconsin-Milwaukee, USA"
+14a022a3eb8cc9681b1ab075650d462788de1fa0,GANs for Biological Image Synthesis,"GANs for Biological Image Synthesis
+INRIA/ENS∗, France
+Anton Osokin
+HSE†, Russia
+Anatole Chessel
+´Ecole Polytechnique‡,
+France"
+14860877a790d99296a990281b22e6b6a430b64f,Deep Over-sampling Framework for Classifying Imbalanced Data,"Deep Over-sampling Framework for Classifying
+Imbalanced Data
+Shin Ando1 and Chun Yuan Huang2
+School of Management,
+Tokyo University of Science,
+-11-2 Fujimi, Chiyoda-ku, Tokyo, Japan
+School of Management,
+Tokyo University of Science,
+-11-2 Fujimi, Chiyoda-ku, Tokyo, Japan"
+14f0283c703e450e5f17cbe94878896de865ce30,International Journal of Advance Research and Innovation,"Volume 3, Issue 2 (2015) 383-385
+ISSN 2347 - 3258
+International Journal of Advance Research and Innovation
+Robust Visual Tracking for Multiple Targets with Data Association and
+Track Management
+N. Mahalakshmi, S. R. Saranya
+Department of Computer Science Engineering, Dhanalakshmi Srinivasan Engineering College, Perambalur, Tamil
+Nadu, India
+Article Info
+Article history:
+Received 5 April 2015
+Received in revised form
+0 April 2015
+Accepted 20 May 2015
+Available online 15 June 2015
+Keywords
+Online Multi-Object Tracking,
+Tracking-By Detection,
+Data Association,
+Track Management,"
+1442319de86d171ce9595b20866ec865003e66fc,Vision-Based Fall Detection with Convolutional Neural Networks,"Vision-Based Fall Detection with Convolutional
+Neural Networks
+Adri´an Nu˜nez-Marcos1, Gorka Azkune1, Ignacio Arganda-Carreras234
+DeustoTech - University of Deusto
+Avenida de las Universidades, 24 - 48007, Bilbao, Spain
+Dept. of Computer Science and Artificial Intelligence, Basque
+Country University, San Sebastian, Spain
+P. Manuel Lardizabal, 1 - 20018, San Sebastian, Spain
+Ikerbasque, Basque Foundation for Science, Bilbao, Spain
+Maria Diaz de Haro, 3 - 48013 Bilbao, Spain
+Donostia International Physics Center (DIPC), San Sebastian, Spain
+P. Manuel Lardizabal, 4 - 20018, San Sebastian, Spain"
+147c33df99dd52502d65fe390ee45c585349b3b3,Pixel and Feature Level Based Domain Adaption for Object Detection in Autonomous Driving,"Pixel and Feature Level Based Domain Adaption
+for Object Detection in Autonomous Driving
+Yuhu Shan, Wen Feng Lu, Chee Meng Chew"
+146e6504d473b92e56108b7276d96aebaa58ccfc,3 D Model and Part Fusion for Vehicle Retrieval,"International Journal of Research in Advent Technology, Vol.2, No.5, May 2014
+E-ISSN: 2321-9637
+D Model and Part Fusion for Vehicle Retrieval
+M.Nagarasan1, T.N.Chitradevi2, S.Senthilnathan3
+Department of computer science and engineering1,2, 3
+Aditya institute of technology, Coimbatore.1, 3,Sri Ramakrishna Engineering College, Coimbatore2"
+1462bc73834e070201acd6e3eaddd23ce3c1a114,Face Authentication /recognition System for Forensic Application Using Sketch Based on the Sift Features Approach,"International Journal of Science, Engineering and Technology Research (IJSETR), Volume 3, Issue 4, April 2014
+FACE AUTHENTICATION /RECOGNITION
+SYSTEM FOR FORENSIC APPLICATION
+USING SKETCH BASED ON THE SIFT
+FEATURES APPROACH
+Poonam A. Katre
+Department of Electronics Engineering KITS,
+RTMNU Nagpur University, India"
+143ac3b7338e240b106863d35177c4567ef9c1aa,Euclidean & Geodesic Distance between a Facial Feature Points in Two-Dimensional Face Recognition System,"Euclidean & Geodesic Distance between a Facial
+Feature Points in Two-Dimensional Face
+Recognition System
+Rachid AHDID1, Khaddouj TAIFI1, Said SAFI1 and Bouzid MANAUT2"
+1471c0b72e4a88b39e59362bf169bb35915966a9,Extended Coding and Pooling in the HMAX Model,"Extended coding and pooling in the HMAX model
+Christian Th´eriault, Nicolas Thome, Member, IEEE, and Matthieu Cord, Member, IEEE
+Universit´e Pierre et Marie Curie, UPMC-Sorbonne Universities, LIP6, 4 place Jussieu, 75005, Paris, France"
+1436d72a51feefda3278068a164d263f6d845236,Interactive Learning a Person Detector: Fewer Clicks – Less Frustration1,"INTERACTIVE LEARNING A PERSON
+DETECTOR:
+FEWER CLICKS – LESS FRUSTRATION1
+Peter M. Roth2, Helmut Grabner2, Christian Leistner2,
+Martin Winter2, and Horst Bischof2"
+140c95e53c619eac594d70f6369f518adfea12ef,Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A,"Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A
+Brendan F. Klare, Emma Taborsky , Austin Blanton , Jordan Cheney , Kristen Allen , Patrick Grother , Alan Mah , Anil K. Jain
+The development of accurate and scalable unconstrained face recogni-
+tion algorithms is a long term goal of the biometrics and computer vision
+ommunities. The term “unconstrained” implies a system can perform suc-
+essful identifications regardless of face image capture presentation (illumi-
+nation, sensor, compression) or subject conditions (facial pose, expression,
+occlusion). While automatic, as well as human, face identification in certain
+scenarios may forever be elusive, such as when a face is heavily occluded or
+aptured at very low resolutions, there still remains a large gap between au-
+tomated systems and human performance on familiar faces. In order to close
+this gap, large annotated sets of imagery are needed that are representative
+of the end goals of unconstrained face recognition. This will help continue
+to push the frontiers of unconstrained face detection and recognition, which
+re the primary goals of the IARPA Janus program.
+The current state of the art in unconstrained face recognition is high
+ccuracy (roughly 99% true accept rate at a false accept rate of 1.0%) on
+faces that can be detected with a commodity face detectors, but unknown
+ccuracy on other faces. Despite the fact that face detection and recognition
+research generally has advanced somewhat independently, the frontal face"
+1467c4ab821c3b340abe05a1b13a19318ebbce98,Multitask and transfer learning for multi-aspect data,"Multitask and Transfer Learning for
+Multi-Aspect Data
+Bernardino Romera Paredes
+A dissertation submitted in partial fulfillment
+of the requirements for the degree of
+Doctor of Philosophy of University College London."
+142dcfc3c62b1f30a13f1f49c608be3e62033042,Adaptive region pooling for object detection,"Adaptive Region Pooling for Object Detection
+Yi-Hsuan Tsai
+UC Merced
+Onur C. Hamsici
+Qualcomm Research, San Diego
+Ming-Hsuan Yang
+UC Merced"
+14e9eaa6ac23996e9a62060c8da90bdb7116ee37,Localization Recall Precision (LRP): A New Performance Metric for Object Detection,[cs.CV] 5 Jul 2018
+14f457bcb5c3e294919512b132bb171bdcaf5ec2,Understanding Human Actions in Still Images a Dissertation Submitted to the Department of Computer Science and the Committee on Graduate Studies of Stanford University in Partial Fulfillment of the Requirements for the Degree of Doctor of Philosophy,"UNDERSTANDING HUMAN ACTIONS
+IN STILL IMAGES
+A DISSERTATION
+SUBMITTED TO THE DEPARTMENT OF COMPUTER SCIENCE
+AND THE COMMITTEE ON GRADUATE STUDIES
+OF STANFORD UNIVERSITY
+IN PARTIAL FULFILLMENT OF THE REQUIREMENTS
+FOR THE DEGREE OF
+DOCTOR OF PHILOSOPHY
+Bangpeng Yao
+August 2013"
+14c988aa9086207b337dcc5611aad08422129b42,Human Relative Position Detection Based on Mutual Occlusion,"Human Relative Position Detection
+Based on Mutual Occlusion
+V´ıctor Borjas, Michal Drozdzal, Petia Radeva, and Jordi Vitri`a
+Facultat de Matem`atiques & Centre de Visi`o per Computador,
+Universitat de Barcelona,
+Campus UAB"
+14e428f2ff3dc5cf96e5742eedb156c1ea12ece1,Facial Expression Recognition Using Neural Network Trained with Zernike Moments,"Facial Expression Recognition Using Neural Network Trained with Zernike
+Moments
+Mohammed Saaidia
+Dept. Génie-Electrique
+Université M.C.M Souk-Ahras
+Souk-Ahras, Algeria"
+148721b162dd355812fae94c8aaf365e5e2c3a79,"Vista: A Visually, Socially, and Temporally-aware Model for Artistic Recommendation","Vista: A Visually, Socially, and Temporally-aware Model
+for Artistic Recommendation
+Ruining He
+UC San Diego
+Chen Fang
+Adobe Research
+Zhaowen Wang
+Adobe Research
+Julian McAuley
+UC San Diego"
+147fe6bfc76f30ccacc3620662511e452bc395f6,A Survey of Face Recognition Techniques,"Invited Paper
+Journal of Information Processing Systems, Vol.5, No.2, June 2009 41
+A Survey of Face Recognition Techniques
+Rabia Jafri* and Hamid R. Arabnia*"
+14a5feadd4209d21fa308e7a942967ea7c13b7b6,Content-based vehicle retrieval using 3D model and part information,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+14fee990a372bcc4cb6dc024ab7fc4ecf09dba2b,Modeling Spatio-Temporal Human Track Structure for Action Localization,"Modeling Spatio-Temporal Human Track Structure for Action
+Localization
+Guilhem Ch´eron · Anton Osokin · Ivan Laptev · Cordelia Schmid"
+14ee4948be56caeb30aa3b94968ce663e7496ce4,SmileNet: Registration-Free Smiling Face Detection,"SmileNet: Registration-Free Smiling Face Detection In The Wild.
+Jang, Y; Gunes, H; Patras, I
+© Copyright 2018 IEEE
+For additional information about this publication click this link.
+http://qmro.qmul.ac.uk/xmlui/handle/123456789/36405
+Information about this research object was correct at the time of download; we occasionally
+make corrections to records, please therefore check the published record when citing. For
+more information contact"
+8ea56e4697430d1dbc728bad5a6e8ebafcced835,Adaptive Stochastic Gradient Descent on the Grassmannian for Robust Low-Rank Subspace Recovery,"Adaptive Stochastic Gradient Descent on the
+Grassmannian for Robust Low-Rank Subspace
+Recovery
+Jun He, Member, IEEE, Yue Zhang, Student Member, IEEE"
+8ec76d7d4a9abd09f088fb3f7a3351a7fda1fde0,Generative Adversarial Networks to Synthetically Augment Data for Deep Learning based Image Segmentation *,"Proceedings of the OAGM Workshop 2018
+DOI: 10.3217/978-3-85125-603-1-07"
+8e9f973e9d01fdd275af6c1460e5307d2ff3d2bc,OF KITH AND KIN 1 Of kith and kin :,"OF KITH AND KIN
+Of kith and kin:
+Perceptual enrichment, expectancy and reciprocal processing in face perception
+Joshua Correll Sean M. Hudson Steffanie Guillermo Holly A. Earls
+University of Colorado Boulder
+Author Note
+Joshua Correll, Sean M. Hudson, Steffanie Guillermo, Holly A. Earls, Department of
+Psychology & Neuroscience, University of Colorado Boulder.
+We dedicate this paper to the memory of Sean Hudson, a wonderful scientist and a true
+friend. We thank Jasmin Cloutier, Tim Correll, Tim Curran, Tiffany Ito, Sarah Lamer,
+Debbie Ma, Max Weisbuch, and Bernd Wittenbrink for their thoughtful comments on
+previous drafts.
+Correspondence should be addressed to Joshua Correll, Department of Psychology &
+Neuroscience, UCB 345, Boulder, Colorado, 80309-0345;"
+8ea9093542075bd8cc4928a4c671a95f363c61ef,Sliced-Wasserstein Autoencoder : An Embarrassingly Simple Generative Model,"Sliced-Wasserstein Autoencoder: An
+Embarrassingly Simple Generative Model"
+8ee62f7d59aa949b4a943453824e03f4ce19e500,Robust Head-Pose Estimation Based on Partially-Latent Mixture of Linear Regressions,"Robust Head-Pose Estimation Based on
+Partially-Latent Mixture of Linear Regression
+Vincent Drouard∗, Radu Horaud∗, Antoine Deleforge†, Sil`eye Ba∗ and Georgios Evangelidis∗
+INRIA Grenoble Rhˆone-Alpes, Montbonnot Saint-Martin, France
+INRIA Rennes Bretagne Atlantique, Rennes, France"
+8e33183a0ed7141aa4fa9d87ef3be334727c76c0,Robustness of Face Recognition to Image Manipulations,"– COS429 Written Report, Fall 2017 –
+Robustness of Face Recognition to Image Manipulations
+Cathy Chen (cc27), Zachary Liu (zsliu), and Lindy Zeng (lindy)
+. Motivation
+We can often recognize pictures of people we know even if the image has low resolution or obscures
+part of the face, if the camera angle resulted in a distorted image of the subject’s face, or if the
+subject has aged or put on makeup since we last saw them. Although this is a simple recognition task
+for a human, when we think about how we accomplish this task, it seems non-trivial for computer
+lgorithms to recognize faces despite visual changes.
+Computer facial recognition is relied upon for many application where accuracy is important.
+Facial recognition systems have applications ranging from airport security and suspect identification
+to personal device authentication and face tagging [7]. In these real-world applications, the system
+must continue to recognize images of a person who looks slightly different due to the passage of
+time, a change in environment, or a difference in clothing.
+Therefore, we are interested in investigating face recognition algorithms and their robustness to
+image changes resulting from realistically plausible manipulations. Furthermore, we are curious
+bout whether the impact of image manipulations on computer algorithms’ face recognition ability
+mirrors related insights from neuroscience about humans’ face recognition abilities.
+. Goal
+In this project, we implement both face recognition algorithms and image manipulations. We then"
+8e3d0b401dec8818cd0245c540c6bc032f169a1d,McGan: Mean and Covariance Feature Matching GAN,"McGan: Mean and Covariance Feature Matching GAN
+Youssef Mroueh * 1 2 Tom Sercu * 1 2 Vaibhava Goel 2"
+8e7749f635b161558efa3e98a324e88c73e2b18f,[Neuroimaging findings in autism: a brief review].,"Türk Psikiyatri Dergisi 2009;
+Turkish Journal of Psychiatry
+Neuroimaging Findings in Auti sm: A Brief Review
+Halime Tuna ULAY1, Aygün ERTUĞRUL2"
+8edb2219370a86c4277549813d36a6c139503fb4,Facial feature units’ localization using horizontal information of most significant bit planes,"Journal of Engineering and Technology Research Vol. 3(14), pp. 381-387, 22 December, 2011
+Available online at http:// www.academicjournals.org/JETR
+DOI: 10.5897/JETR11.068
+ISSN 2006-9790 ©2011 Academic Journals
+Full Length Research Paper
+Facial feature units’ localization using horizontal
+information of most significant bit planes
+Asif Khan1*, Khalilullah1, Ihtesham-Ul-Islam1 and Mohammad A. U. Khan2
+FAST National University of Computer and Emerging Sciences, Peshawar, Pakistan.
+Effat University, Jeddah, Saudi Arabia.
+Accepted 8 November, 2011
+We present here an approach to find the exact position of some feature units related to human face
+images. We use the horizontal information in most significant bit planes of images to accomplish the
+task. Finding location of facial feature units is of importance as most human face recognition
+pproaches take it as initial point. The prominent feature units in a face are eyes, nostrils and lips which
+re usually oriented in horizontal direction and visually significant in face image. The majority of the
+visually significant data in image can be extracted using higher order bits of that image. Our four step
+method consists of bit planes processing, separating horizontal information using wavelet transform
+(WT), binary thresholding and appropriate combination of Dilation and Erosion. The proposed method
+shows high accuracy in the presence of all real world situations like various gestures, illumination"
+8eeab0aeb3170b1ef6497745d2a9bf78c001331d,Machine Vision Techniques for the Evaluation of Animal Behaviour,"Machine Vision Techniques for the
+Evaluation of Animal Behaviour
+Dr Derek Robert Magee
+Submitted in accordance with the requirements
+for the degree of Doctor of Philosophy
+SI T Y O
+The University of Leeds
+School of Computing
+October 2000
+The candidate confirms that the work submitted is his own and that appropriate credit has been
+given where reference has been made to the work of others."
+8e94ed0d7606408a0833e69c3185d6dcbe22bbbe,For your eyes only,"© 2012 IEEE. Personal use of this material is permitted. Permission from IEEE
+must be obtained for all other uses, in any current or future media, including
+reprinting/republishing this material for advertising or promotional purposes,
+reating new collective works, for resale or redistribution to servers or lists, or
+reuse of any copyrighted component of this work in other works.
+Pre-print of article that will appear at WACV 2012."
+8e6957334ab60111fd7e2ae59b008a745223aabe,An incremental learning face recognition system for single sample per person,"An Incremental Learning Face Recognition System
+for Single Sample Per Person
+Tao Zhu, Furao Shen and Jinxi Zhao
+recognition system. In nowadays, most of the existed in-
+remental
+learning systems are designed to update the
+eigenspace of face data as new images arrive [8]. To our
+knowledge, few of them can automatically decide when to
+learn new information from an input image. In other words,
+they need an external observer to tell them how to prevent
+learning distorted information from a misclassified or non-
+ideal image. Moreover, few of these methods can be applied
+in the scenario of single sample per person.
+In this paper, we mainly focus on the issue of robust incre-
+mental face recognition under the condition of one training
+sample per person. Inspired by the Single Image subspace
+(SIS) approach [9], we propose an incremental learning face
+recognition system. The goals of the proposed system are:
+(1) self-adaptively updating and adjusting training samples
+during learning process; (2) keeping learning new knowledge"
+8e64f7f38db57ddc197cc7a9c51b914920ee99cc,An Optimized Framework for Detection and Tracking of Video Objects in Challenging Backgrounds,"The International Journal of Multimedia & Its Applications (IJMA) Vol.6, No.4, August 2014
+AN OPTIMIZED FRAMEWORK FOR DETECTION
+AND TRACKING OF VIDEO OBJECTS IN
+CHALLENGING BACKGROUNDS
+Sukanyathara J1 and Alphonsa Kuriakose2
+Department of Computer Science & Engineering,
+Viswajyothi College of Engineering & Technology, MG University, Kerala, India"
+8e461978359b056d1b4770508e7a567dbed49776,LOMo: Latent Ordinal Model for Facial Analysis in Videos,"LOMo: Latent Ordinal Model for Facial Analysis in Videos
+Karan Sikka1,∗
+Gaurav Sharma2,3,†
+Marian Bartlett1,∗,‡
+UCSD, USA
+MPI for Informatics, Germany
+IIT Kanpur, India"
+8ea30ade85880b94b74b56a9bac013585cb4c34b,From turbo hidden Markov models to turbo state-space models [face recognition applications],"FROM TURBO HIDDEN MARKOV MODELS TO TURBO STATE-SPACE MODELS
+Florent Perronnin and Jean-Luc Dugelay
+Institut Eur´ecom
+Multimedia Communications Department
+BP 193, 06904 Sophia Antipolis Cedex, France
+fflorent.perronnin,"
+8e723e8a3a5a9ea258591d384232e0251f842a1c,Twin-GAN - Unpaired Cross-Domain Image Translation with Weight-Sharing GANs,"Twin-GAN – Unpaired Cross-Domain Image
+Translation with Weight-Sharing GANs
+Jerry Li
+Google
+600 Amphitheatre Parkway, Mountain View, CA 94040"
+8e8e3f2e66494b9b6782fb9e3f52aeb8e1b0d125,"Detecting and classifying scars, marks, and tattoos found in the wild","in any current or
+future media,
+for all other uses,
+ 2012 IEEE. Personal use of this material is permitted. Permission from IEEE must be
+obtained
+including
+reprinting/republishing this material for advertising or promotional purposes, creating
+new collective works, for resale or redistribution to servers or lists, or reuse of any
+opyrighted component of this work in other works.
+Pre-print of article that will appear at BTAS 2012.!!"
+8e92168860d8c6591a0c088573629e4d167f5947,"Look at the Driver, Look at the Road: No Distraction! No Accident!","Look at the Driver, Look at the Road: No Distraction! No Accident!
+Mahdi Rezaei and Reinhard Klette
+The University of Auckland
+Private Bag 92019, Auckland, New Zealand"
+8e378ef01171b33c59c17ff5798f30293fe30686,A system for automatic face analysis based on statistical shape and texture models,"Lehrstuhl f¨ur Mensch-Maschine-Kommunikation
+der Technischen Universit¨at M¨unchen
+A System for Automatic Face Analysis
+Based on
+Statistical Shape and Texture Models
+Ronald M¨uller
+Vollst¨andiger Abdruck der von der Fakult¨at
+f¨ur Elektrotechnik und Informationstechnik
+der Technischen Universit¨at M¨unchen
+zur Erlangung des akademischen Grades eines
+Doktor-Ingenieurs
+genehmigten Dissertation
+Vorsitzender: Prof. Dr. rer. nat. Bernhard Wolf
+Pr¨ufer der Dissertation:
+. Prof. Dr.-Ing. habil. Gerhard Rigoll
+. Prof. Dr.-Ing. habil. Alexander W. Koch
+Die Dissertation wurde am 28.02.2008 bei der Technischen Universit¨at M¨unchen
+eingereicht und durch die Fakult¨at f¨ur Elektrotechnik und Informationstechnik
+m 18.09.2008 angenommen."
+8e579a8a43f6af1d66e927a48b89e8296eba63f7,Learning to hash faces using large feature vectors,"Learning to Hash Faces Using Large Feature Vectors
+Cassio E. dos Santos Jr.∗, Ewa Kijak†, Guillaume Gravier†, William Robson Schwartz∗
+Department of Computer Science, Universidade Federal de Minas Gerais, Belo Horizonte, Brazil
+IRISA & Inria Rennes (CNRS, Univ. Rennes 1), Campus de Beaulieu, Rennes, France"
+8eb2e7c9017b4a110978a1bb504accbc7b9ba211,Marching into battle: synchronized walking diminishes the conceptualized formidability of an antagonist in men.,"Downloaded from
+http://rsbl.royalsocietypublishing.org/
+on June 9, 2015
+rsbl.royalsocietypublishing.org
+Research
+Cite this article: Fessler DMT, Holbrook C.
+014 Marching into battle: synchronized
+walking diminishes the conceptualized
+formidability of an antagonist in men. Biol.
+Lett. 10: 20140592.
+http://dx.doi.org/10.1098/rsbl.2014.0592
+Received: 25 July 2014
+Accepted: 6 August 2014
+Subject Areas:
+ehaviour
+Keywords:
+synchrony, alliance, fighting capacity
+Author for correspondence:
+Daniel M. T. Fessler
+e-mail:"
+8ec7194952ee9e7cf383b1a1b0aeccaed5b7daaa,Constrained multi-target tracking for team sports activities,"Gade and Moeslund IPSJ Transactions on Computer Vision and
+Applications (2018) 10:2
+DOI 10.1186/s41074-017-0038-z
+IPSJ Transactions on Computer
+Vision and Applications
+SYSTEMS PAPER
+Open Access
+Constrained multi-target tracking for
+team sports activities
+Rikke Gade*
+nd Thomas B. Moeslund"
+8e7493bdabddc2ec99cfa2b9b862343f70c1701a,Pseudo-positive regularization for deep person re-identification,"Noname manuscript No.
+(will be inserted by the editor)
+Pseudo-positive regularization for deep person re-identification
+Fuqing Zhu · Xiangwei Kong · Haiyan Fu · Qi Tian
+Received: date / Accepted: date"
+8e8c511ebc12a093d3f73a4717ec71c32e4dbd49,The use of visual information in the recognition of posed and spontaneous facial expressions.,"The use of visual information in the recognition of posed and
+spontaneous facial expressions
+Camille Saumure
+Marie-Pier Plouffe-Demers
+Amanda Est ´ephan
+Daniel Fiset
+Caroline Blais
+Department of Psychoeducation and Psychology,
+Universit ´e du Qu ´ebec en Outaouais,
+Gatineau, Qu ´ebec, Canada
+Department of Psychoeducation and Psychology,
+Universit ´e du Qu ´ebec en Outaouais,
+Gatineau, Qu ´ebec, Canada
+Department of Psychoeducation and Psychology,
+Universit ´e du Qu ´ebec en Outaouais,
+Gatineau, Qu ´ebec, Canada
+Department of Psychoeducation and Psychology,
+Universit ´e du Qu ´ebec en Outaouais,
+Gatineau, Qu ´ebec, Canada
+Department of Psychoeducation and Psychology,"
+8e6526b46a52a18028336a8d026e9d466aa12edf,Moving Poselets: A Discriminative and Interpretable Skeletal Motion Representation for Action Recognition,"Moving Poselets: A Discriminative and Interpretable Skeletal Motion
+Representation for Action Recognition
+Lingling Tao and Ren´e Vidal
+Center for Imaging Science, Johns Hopkins University
+ltao4,"
+8ed051be31309a71b75e584bc812b71a0344a019,Class-Based Feature Matching Across Unrestricted Transformations,"Class-based feature matching across unrestricted
+transformations
+Evgeniy Bart and Shimon Ullman"
+8e36100cb144685c26e46ad034c524b830b8b2f2,Modeling Facial Geometry using Compositional VAEs,"Modeling Facial Geometry using Compositional VAEs
+Timur Bagautdinov∗1, Chenglei Wu2, Jason Saragih2, Pascal Fua1, Yaser Sheikh2
+´Ecole Polytechnique F´ed´erale de Lausanne
+Facebook Reality Labs, Pittsburgh"
+8e112ad656ff90720ae609841bd0fcb2caa90d65,"""Show me the cup"": Reference with Continuous Representations",[cs.CL] 28 Jun 2016
+8edcd935362c899e630349784e4ff8adb3a69cdc,Person re-identification using deformable patch metric learning,"Person Re-identification using Deformable Patch Metric Learning
+Sławomir B ˛ak
+Peter Carr
+Disney Research
+Pittsburgh, PA, USA, 15213"
+8ee50fd3e19729a487f7196b682ccaa2d17aa0df,Improving head and body pose estimation through semi-supervised manifold alignment,"IMPROVING HEAD AND BODY POSE ESTIMATION
+THROUGH SEMI-SUPERVISED MANIFOLD ALIGNMENT
+Alexandre Heili(cid:63), Jagannadan Varadarajan†, Bernard Ghanem‡, Narendra Ahuja(cid:63)†, Jean-Marc Odobez(cid:63)
+(cid:63) Idiap Research Institute, ´Ecole Polytechnique F´ed´erale de Lausanne, Switzerland
+Advanced Digital Sciences Center, Singapore, (cid:63)† University of Illinois at Urbana-Champaign
+King Abdullah University of Science and Technology, Saudi Arabia"
+8e0becfc5fe3ecdd2ac93fabe34634827b21ef2b,Learning from Longitudinal Face Demonstration - Where Tractable Deep Modeling Meets Inverse Reinforcement Learning,"International Journal of Computer Vision manuscript No.
+(will be inserted by the editor)
+Learning from Longitudinal Face Demonstration -
+Where Tractable Deep Modeling Meets Inverse Reinforcement Learning
+Chi Nhan Duong · Kha Gia Quach · Khoa Luu · T. Hoang Ngan Le · Marios
+Savvides · Tien D. Bui
+Received: date / Accepted: date"
+8e0cc47c194ef7daf15aaef14d61e493879ae137,Deep Network Flow for Multi-object Tracking,"Deep Network Flow for Multi-Object Tracking
+Samuel Schulter
+Paul Vernaza Wongun Choi Manmohan Chandraker
+NEC Laboratories America, Media Analytics Department
+Cupertino, CA, USA"
+22cf367d14e646914cc959bbcd402df0c20cd0dc,Towards Automated Melanoma Screening: Proper Computer Vision & Reliable Results,"Towards Automated Melanoma Screening:
+Proper Computer Vision & Reliable Results
+Michel Fornaciali, Micael Carvalho, Fl´avia Vasques Bittencourt, Sandra Avila, Eduardo Valle"
+2258e01865367018ed6f4262c880df85b94959f8,Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,"Hindawi Publishing Corporation
+EURASIP Journal on Image and Video Processing
+Volume 2008, Article ID 246309, 10 pages
+doi:10.1155/2008/246309
+Research Article
+Evaluating Multiple Object Tracking Performance:
+The CLEAR MOT Metrics
+Keni Bernardin and Rainer Stiefelhagen
+Interactive Systems Lab, Institut f¨ur Theoretische Informatik, Universit¨at Karlsruhe, 76131 Karlsruhe, Germany
+Correspondence should be addressed to Keni Bernardin,
+Received 2 November 2007; Accepted 23 April 2008
+Recommended by Carlo Regazzoni
+Simultaneous tracking of multiple persons in real-world environments is an active research field and several approaches have
+een proposed, based on a variety of features and algorithms. Recently, there has been a growing interest in organizing systematic
+evaluations to compare the various techniques. Unfortunately, the lack of common metrics for measuring the performance of
+multiple object trackers still makes it hard to compare their results. In this work, we introduce two intuitive and general metrics to
+llow for objective comparison of tracker characteristics, focusing on their precision in estimating object locations, their accuracy
+in recognizing object configurations and their ability to consistently label objects over time. These metrics have been extensively
+used in two large-scale international evaluations, the 2006 and 2007 CLEAR evaluations, to measure and compare the performance
+of multiple object trackers for a wide variety of tracking tasks. Selected performance results are presented and the advantages and"
+229e105fd4d34815e476702dd5ca4362943c475d,WildDash - Creating Hazard-Aware Benchmarks,"WildDash - Creating Hazard-Aware Benchmarks
+Oliver Zendel, Katrin Honauer, Markus Murschitz, Daniel Steininger, and
+Gustavo Fern´andez Dom´ınguez
+AIT, Austrian Institute of Technology, Giefinggasse 4, 1210, Vienna, Austria
+{oliver.zendel, katrin.honauer.fl, markus.murschitz, daniel.steininger,"
+22043cbd2b70cb8195d8d0500460ddc00ddb1a62,Separability-Oriented Subclass Discriminant Analysis,"Separability-Oriented Subclass Discriminant
+Analysis
+Huan Wan, Hui Wang, Gongde Guo, Xin Wei"
+22137ce9c01a8fdebf92ef35407a5a5d18730dde,Recognition of Faces from single and Multi-View Videos,
+2270c94d3f9d9451b3d337aa5ba2d5681cb98497,Evaluation of GIST descriptors for web-scale image search,"Evaluation of GIST descriptors for web-scale image
+search
+Matthijs Douze, Hervé Jégou, Sandhawalia Harsimrat, Laurent Amsaleg,
+Cordelia Schmid
+To cite this version:
+Matthijs Douze, Hervé Jégou, Sandhawalia Harsimrat, Laurent Amsaleg, Cordelia Schmid. Evaluation
+of GIST descriptors for web-scale image search. CIVR 2009 - International Conference on Image and
+Video Retrieval, Jul 2009, Santorini, Greece. ACM, pp.19:1-8, 2009, <10.1145/1646396.1646421>.
+<inria-00394212>
+HAL Id: inria-00394212
+https://hal.inria.fr/inria-00394212
+Submitted on 23 Mar 2012
+HAL is a multi-disciplinary open access
+rchive for the deposit and dissemination of sci-
+entific research documents, whether they are pub-
+lished or not. The documents may come from
+teaching and research institutions in France or
+broad, or from public or private research centers.
+L’archive ouverte pluridisciplinaire HAL, est
+destinée au dépôt et à la diffusion de documents"
+22fb836a593267d9ff09a4d12aa5b4a6fd52c81e,Brief report: Visual processing of faces in individuals with fragile X syndrome: an eye tracking study.,"J Autism Dev Disord (2009) 39:946–952
+DOI 10.1007/s10803-009-0744-1
+B R I E F R E P O R T
+Brief Report: Visual Processing of Faces in Individuals
+with Fragile X Syndrome: An Eye Tracking Study
+Faraz Farzin Æ Susan M. Rivera Æ David Hessl
+Published online: 28 April 2009
+Ó The Author(s) 2009. This article is published with open access at Springerlink.com"
+221debbd7878ed303eaa4666f8df04a48e4c5070,Making Computer Vision Computationally Efficient,"Making computer vision computationally efficient
+Narayanan Sundaram
+Electrical Engineering and Computer Sciences
+University of California at Berkeley
+Technical Report No. UCB/EECS-2012-106
+http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-106.html
+May 11, 2012"
+22264e60f1dfbc7d0b52549d1de560993dd96e46,UnitBox: An Advanced Object Detection Network,"UnitBox: An Advanced Object Detection Network
+Jiahui Yu1,2
+Yuning Jiang2
+Zhangyang Wang1
+Zhimin Cao2
+Thomas Huang1
+University of Illinois at Urbana−Champaign
+Megvii Inc
+{jyu79, zwang119, {jyn,"
+220f8088f2fc1ddd9df1a0b583d3d01cb929ee8d,ROML: A Robust Feature Correspondence Approach for Matching Objects in A Set of Images,"Noname manuscript No.
+(will be inserted by the editor)
+ROML: A Robust Feature Correspondence Approach for
+Matching Objects in A Set of Images
+Kui Jia · Tsung-Han Chan · Zinan Zeng · Shenghua Gao
+Gang Wang · Tianzhu Zhang · Yi Ma"
+22029de24dbf6867658145264f36b161c40a09d8,A Discriminative Representation of Convolutional Features for Indoor Scene Recognition,"A Discriminative Representation of Convolutional
+Features for Indoor Scene Recognition
+S. H. Khan, M. Hayat, M. Bennamoun, Member, IEEE, R. Togneri, and F. Sohel, Senior Member, IEEE"
+22c01d758a4941c01239fa8facdb3407559132ed,Segmentation and Restoration of Images on Surfaces by Parametric Active Contours with Topology Changes,"Segmentation and Restoration of Images on Surfaces by Parametric
+Active Contours with Topology Changes
+Heike Benninghoff∗ and Harald Garcke†"
+22f8148e43c50341bad686d7fccb425b0682e667,Facial ethnicity classification based on boosted local texture and shape descriptions,"Facial Ethnicity Classification based on Boosted Local Texture and
+Shape Descriptions
+Huaxiong Ding, Di Huang, IEEE Member, Yunhong Wang, IEEE Member, Liming Chen, IEEE Member,"
+224547337e1ace6411a69c2e06ce538bc67923f7,Convolutional Neural Network for Camera Pose Estimation from Object Detections,"CONVOLUTIONAL NEURAL NETWORK FOR CAMERA POSE ESTIMATION FROM
+OBJECT DETECTIONS
+E. V. Shalnova, A. S. Konushina,b
+MSU, Faculty of Computational Mathematics and Cybernetics, Russia, 119991, Moscow, GSP-1, 1-52, Leninskiye Gory, -
+HSE, Faculty of Computer Science, Russia, 125319, Moscow, 3, Kochnovsky Proezd
+KEY WORDS: Camera Pose, CNN, Head Detection, Computer Graphics
+Commission II, WG II/5"
+223ec77652c268b98c298327d42aacea8f3ce23f,Acted Facial Expressions In The Wild Database,"TR-CS-11-02
+Acted Facial Expressions In The Wild
+Database
+Abhinav Dhall, Roland Goecke, Simon
+Lucey, Tom Gedeon
+September 2011
+ANU Computer Science Technical Report Series"
+228558a2a38a6937e3c7b1775144fea290d65d6c,Nonparametric Context Modeling of Local Appearance for Pose- and Expression-Robust Facial Landmark Localization,"Nonparametric Context Modeling of Local Appearance
+for Pose- and Expression-Robust Facial Landmark Localization
+Brandon M. Smith1
+Jonathan Brandt2
+University of Wisconsin–Madison
+Zhe Lin2
+Adobe Research
+Li Zhang1
+http://www.cs.wisc.edu/~lizhang/projects/face-landmark-localization/"
+2230848e506553159e0edfc20472b8cd6084be17,Vision Based Hand Puppet,"ENTERFACE’10, JULY 12TH - AUGUST 6TH, AMSTERDAM, THE NETHERLANDS.
+Vision Based Hand Puppet
+Cem Keskin, ˙Ismail Arı, Tolga Eren, Furkan Kırac¸, Lukas Rybok, Hazım Ekenel, Rainer Stiefelhagen, Lale Akarun"
+22ee43dbd2bdefbc8945d453c6cd453f49ab5eb7,Urban Traffic Surveillance in Smart Cities Using Radar Images,"Urban Traffic Surveillance in Smart Cities
+Using Radar Images
+J. S´anchez-Oro, David Fern´andez-L´opez, R. Cabido,
+Antonio S. Montemayor, and Juan Jos´e Pantrigo
+Dept. Ciencias de la Computaci´on
+Universidad Rey Juan Carlos
+Spain"
+22fdd8d65463f520f054bf4f6d2d216b54fc5677,Efficient Small and Capital Handwritten Character Recognition with Noise Reduction,"International Journal of Emerging Technology and Advanced Engineering
+Website: www.ijetae.com (ISSN 2250-2459, ISO 9001:2008 Certified Journal, Volume 3, Issue 8, August 2013)
+Efficient Small and Capital Handwritten Character
+Recognition with Noise Reduction
+Beerendra Kumar Pal, Prof. Shailendra Tiwari, Prof. Sandeep Kumar
+Department of Computer Science Engg., IES College of Technology, Bhopal"
+2251a88fbccb0228d6d846b60ac3eeabe468e0f1,Matrix-Based Kernel Subspace Methods,"Matrix-Based Kernel Subspace Methods
+S. Kevin Zhou
+Integrated Data Systems Department
+Siemens Corporate Research
+755 College Road East, Princeton, NJ 08540
+Email:"
+225fbfd99465033e993460a1bc838a87fbf42346,Gaussian-Bernoulli deep Boltzmann machine,"Gaussian-Bernoulli Deep Boltzmann Machine
+KyungHyun Cho, Tapani Raiko and Alexander Ilin
+Department of Information and Computer Science,
+Aalto University School of Science
+Email:"
+222d86787abed673600f1054796367f439c2eec1,Etworks via a Ttention T Ransfer,"Published as a conference paper at ICLR 2017
+PAYING MORE ATTENTION TO ATTENTION:
+IMPROVING THE PERFORMANCE OF CONVOLUTIONAL
+NEURAL NETWORKS VIA ATTENTION TRANSFER
+Sergey Zagoruyko, Nikos Komodakis
+Universit´e Paris-Est, ´Ecole des Ponts ParisTech
+Paris, France"
+22532c6e38ded690dc1420f05c18e23f6f24804d,Chapter 5 Genetic & Evolutionary Biometrics,"We are IntechOpen,
+the world’s leading publisher of
+Open Access books
+Built by scientists, for scientists
+,700
+08,500
+.7 M
+Open access books available
+International authors and editors
+Downloads
+Our authors are among the
+Countries delivered to
+TOP 1%
+2.2%
+most cited scientists
+Contributors from top 500 universities
+Selection of our books indexed in the Book Citation Index
+in Web of Science™ Core Collection (BKCI)
+Interested in publishing with us?
+Contact"
+2251a1efad0cef802fd64fc79cc1b7007b64f425,Estimating 3D Pose via Stochastic Search and Expectation Maximization,"-IJE=JEC !, 2IA LE= 5J?D=IJE? 5A=H?D
+-NFA?J=JE =NEE=JE
+*A ,=K>AO :E=CDK= :EA
+,AF=HJAJ B +FKJAH 5?EA?A 5M=IA= 7ELAHIEJO
+5) &22
+*,=K>AO::EA(IM=IA==?K
+)>IJH=?J 1 JDEI F=FAH = =FFH=?D EI J AIJE=JA !, FIA
+KIEC = F=HJ IJ?D=IJE? ) HAFHAIAJ=JE B JDA
+DK= EI LAH EJI JD=J AFOI BK
+A=HJ >AJMAA EJI 6DEI HAFHAIAJ=JE EI
+=C=EIJ = FFK=H =JAH=JELA LAH F=HJI KIEC E>
+J EI IDM JD=J KIEC BK E> HAIKJI E =
+JD=J EI B=H HA HAFHAIAJ=JELA B JDA HECE= JH=EEC .KH
+JDAHHA EJ EI JD=J -NFA?J=JE =NEE=JE EI IKEJ=>A
+BH AIJE=JEC !, FIA >AJJAH ?LAHCA?A EI MDA KIEC BK
+E> 6 JDA A?=?O B JDA EJ
+EI J JDA B !, FIA AIJE=JE KIEC = IECA ?K=H
+E=CA 3K=JEJ=JELA HAIKJI =HA KIEC JDA 0K=-L=
+MDE?D ?H JD=J JDA KJFAHBHI JD=J B JDA ?
+FAJEC F=HJ 1 JDEI MH KIJ = IECA EI A=HJ J"
+227b18fab568472bf14f9665cedfb95ed33e5fce,Compositional Dictionaries for Domain Adaptive Face Recognition,"Compositional Dictionaries for Domain Adaptive
+Face Recognition
+Qiang Qiu, and Rama Chellappa, Fellow, IEEE."
+227b1a09b942eaf130d1d84cdcabf98921780a22,Multi-feature shape regression for face alignment,"Yang et al. EURASIP Journal on Advances in Signal Processing (2018) 2018:51
+https://doi.org/10.1186/s13634-018-0572-6
+EURASIP Journal on Advances
+in Signal Processing
+R ES EAR CH
+Multi-feature shape regression for face
+lignment
+Wei-Jong Yang, Yi-Chen Chen, Pau-Choo Chung and Jar-Ferr Yang*
+Open Access"
+22029beb936c9871757813758c5ae3e5820260c9,Proximity Distribution Kernels for Geometric Context in Category Recognition,"Proximity Distribution Kernels for Geometric Context in Category Recognition
+Haibin Ling∗
+Stefano Soatto
+Integrated Data Systems Department
+Computer Science Department
+Siemens Corporate Research, Princeton, NJ
+University of California, Los Angeles, CA
+haibin.ling siemens.com
+soatto cs.ucla.edu"
+2279cae83716e2a00181593a7b10966020dd11d1,Real-time head pose estimation and facial feature localization using a depth sensor and triangular surface patch features,"MITSUBISHI ELECTRIC RESEARCH LABORATORIES
+http://www.merl.com
+Real-time head pose estimation and facial feature localization
+using a depth sensor and triangular surface patch features
+Papazov, C.; Marks, T.K.; Jones, M.J.
+TR2015-069
+June 2015"
+22086b3c772ba638e7d50b10bcf544abd93c9305,Face Localization based on Skin Color,"International Journal of Computer Applications (0975 – 8887)
+Volume 109 – No. 12, January 2015
+Face Localization based on Skin Color
+M. Mahadevi
+Research Scholar, M.S. University
+S.D.N.B. Vaishnav College for Women
+Chrompet,Chennai-44"
+224ffad672f7e6c7995780eb9bd3c8a141cb25cd,Understanding pedestrian behaviors from stationary crowd groups,"Understanding Pedestrian Behaviors from Stationary Crowd Groups
+Shuai Yi1, Hongsheng Li1,2, Xiaogang Wang1
+Department of Electronic Engineering, The Chinese University of Hong Kong.
+School of Electronic Engineering, University of Electronic Science and Technology of China.
+Pedestrian behavior modeling and analysis is important for crowd scene un-
+derstanding and has various applications in video surveillance. Stationary
+rowd groups are a key factor influencing pedestrian walking patterns but
+was largely ignored in literature. As shown in Figure 1 (d), the walking
+path of a pedestrian (black curve) is affected by a stationary crowd group.
+Without modeling the stationary crowd group, it is difficult to explain why
+the pedestrian detours when approaching the destination (Figure 1 (f)). Sta-
+tionary crowd groups can serve as multiple roles (Figure 1 (e)) for different
+pedestrians, such as source, destination, or obstacle. Moreover, the spatial
+distribution of stationary crowd groups might change over time (Figure 1
+(a)-(d)), which leads to the dynamic variations of traffic patterns. In our
+work, the factor of stationary crowd groups is introduced for the first time
+to model pedestrian behaviors.
+The Proposed Pedestrian Behavior Model
+A general energy map M is proposed to model the traveling difficulty of
+every location of the scene. It can be modeled with three channels calculated"
+227094e85ae30794d03f3cee426f40877ac2b11b,Performance Improvements in Face Classification using Random Forest,"Vatsal Vishwakarma, Abhishek Kumar Srivastava / International Journal of Engineering Research and
+Applications (IJERA) ISSN: 2248-9622 www.ijera.com
+Vol. 2, Issue 3, May-Jun 2012, pp.2384-2388
+Performance Improvements in Face Classification using Random Forest
+Vatsal Vishwakarma*, Abhishek Kumar Srivastava **
+*(Department of Electronics and Communication, Lovely Professional University, Jalandhar , India.)
+** (Department of Electronics and Communication, Lovely Professional University, Jalandhar , India.)"
+2236294e803316c5934fa387f27d128fa7819a03,Iterative Human Pose Estimation based on A New Part Appearance Model,"Appl. Math. Inf. Sci. 8, No. 1L, 311-317 (2014)
+Applied Mathematics & Information Sciences
+An International Journal
+http://dx.doi.org/10.12785/amis/081L39
+Iterative Human Pose Estimation based on A New Part
+Appearance Model
+Wang Hao, Meng Fanhui and Fang Baofu∗
+School of Computer and Information, Hefei Universty of Technology, Hefei, China
+Received: 15 May. 2013, Revised: 9 Sep. 2013, Accepted: 10 Sep. 2013
+Published online: 1 Apr. 2014"
+22dabd4f092e7f3bdaf352edd925ecc59821e168,Exploiting side information in locality preserving projection,"Deakin Research Online
+This is the published version:
+An, Senjian, Liu, Wanquan and Venkatesh, Svetha 2008, Exploiting side information in
+locality preserving projection, in CVPR 2008 : Proceedings of the 26th IEEE Conference on
+Computer Vision and Pattern Recognition, IEEE, Washington, D. C., pp. 1-8.
+Available from Deakin Research Online:
+http://hdl.handle.net/10536/DRO/DU:30044576
+Reproduced with the kind permissions of the copyright owner.
+Personal use of this material is permitted. However, permission to reprint/republish this
+material for advertising or promotional purposes or for creating new collective works for
+resale or redistribution to servers or lists, or to reuse any copyrighted component of this work
+in other works must be obtained from the IEEE.
+Copyright : 2008, IEEE"
+224868cc607dc38b7eca8536018580c577f9fedf,Exploring Temporal Patterns in Classifying Frustrated and Delighted Smiles,"IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, MANUSCRIPT ID
+Exploring Temporal Patterns in Classifying
+Frustrated and Delighted Smiles
+Mohammed E. Hoque, Daniel J. McDuff, and Rosalind W. Picard, Member, IEEE"
+224d4cf75e8baf32a795f38ee8ccfdf82e4c5a70,Identifying Exceptional Descriptions of People Using Topic Modeling and Subgroup Discovery,"Identifying Exceptional Descriptions of People
+using Topic Modeling and Subgroup Discovery
+Andrew T. Hendrickson, Jason Wang, and Martin Atzmueller
+Tilburg University, 5037AB, the Netherlands
+{a.hendrickson, y.w.wang,"
+220d62414053519f7b9a6aecb4aa9f775014c98c,Incremental Feature Transformation for Temporal Space,"Incremental Feature Transformation for Temporal Space
+International Journal of Computer Applications (0975 – 8887)
+Volume 145 – No.8, July 2016
+Preeti Mahadev
+University of Mysore,
+Mysuru, Karnataka,
+India
+P. Nagabhushan
+University of Mysore,
+Mysuru, Karnataka,
+India"
+229bce6384ae16a388881e766bfa5a672b61dc9b,Application of Video Scene Semantic Recognition Technology in Smart Video,"ISSN 1330-3651 (Print), ISSN 1848-6339 (Online) https://doi.org/10.17559/TV-20180620082101
+Original scientific paper
+Application of Video Scene Semantic Recognition Technology in Smart Video
+Lele QIN, Lihua KANG"
+22e189a813529a8f43ad76b318207d9a4b6de71a,What will Happen Next? Forecasting Player Moves in Sports Videos,"What will Happen Next?
+Forecasting Player Moves in Sports Videos
+Panna Felsen
+UC Berkeley, STATS
+Pulkit Agrawal
+UC Berkeley
+Jitendra Malik
+UC Berkeley"
+22e4e64c1172c90ba23f634d850931ee5f9a972f,Robust Bayesian fitting of 3D morphable model,"Robust Bayesian Fitting of 3D Morphable
+Model
+Claudia Arellano and Rozenn Dahyot
+School of Computer Science and Statistics
+Trinity College Dublin, Ireland
+7th November 2013"
+227a312324edd41892eb2c1dbc4bf8d94984a326,Deep Learning Based Vehicle Make-Model Classification,"Deep Learning Based Vehicle Make-Model
+Classification
+Burak Satar1 and Ahmet Emir Dirik2(cid:63)
+Uludag University, Bursa, Turkey
+Department of Electrical-Electronics Engineering
+Uludag University, Bursa, Turkey
+Department of Computer Engineering"
+22c89775cb5309eae5ac1f9ce9d1c2d569439492,Face recognition based on extended separable lattice 2-D HMMS,"978-1-4673-0046-9/12/$26.00 ©2012 IEEE
+ICASSP 2012"
+25ae83767c926898047bbc50971b5b11de34e12a,Detection and Tracking of Occluded People,"Noname manuscript No.
+(will be inserted by the editor)
+Detection and Tracking of Occluded People
+Siyu Tang · Mykhaylo Andriluka · Bernt Schiele
+Received: date / Accepted: date"
+25b9ef5c78dbf17c71e6fd94054dd55d66c39264,Multimedia Semantic Integrity Assessment Using Joint Embedding Of Images And Text,"Multimedia Semantic Integrity Assessment Using Joint
+Embedding Of Images And Text
+Ayush Jaiswal∗
+USC Information Sciences Institute
+Marina del Rey, CA, USA
+Ekraam Sabir∗
+USC Information Sciences Institute
+Marina del Rey, CA, USA
+Wael AbdAlmageed
+USC Information Sciences Institute
+Marina del Rey, CA, USA
+Premkumar Natarajan
+USC Information Sciences Institute
+Marina del Rey, CA, USA"
+25c19d8c85462b3b0926820ee5a92fc55b81c35a,Pose-Invariant Facial Expression Recognition Using Variable-Intensity Templates,"Noname manuscript No.
+(will be inserted by the editor)
+Pose-Invariant Facial Expression Recognition
+Using Variable-Intensity Templates
+Shiro Kumano · Kazuhiro Otsuka · Junji Yamato ·
+Eisaku Maeda · Yoichi Sato
+Received: date / Accepted: date"
+2528022c14428ad5912c323f6a356009457c985b,Automatic 3D facial expression recognition using geometric and textured feature fusion,"Automatic 3D Facial Expression Recognition using Geometric and
+Textured Feature Fusion
+Department of Electronic and Computer Engineering, Brunel University London, UK
+Asim Jan and Hongying Meng"
+25474c21613607f6bb7687a281d5f9d4ffa1f9f3,Recognizing disguised faces,"This article was downloaded by: [Carnegie Mellon University]
+On: 03 May 2012, At: 06:22
+Publisher: Psychology Press
+Informa Ltd Registered in England and Wales Registered Number: 1072954
+Registered office: Mortimer House, 37-41 Mortimer Street, London W1T 3JH,
+Visual Cognition
+Publication details, including instructions for authors
+nd subscription information:
+http://www.tandfonline.com/loi/pvis20
+Recognizing disguised faces
+Giulia Righi a , Jessie J. Peissig b & Michael J. Tarr c
+Children's Hospital Boston, Harvard Medical School,
+Boston, MA, USA
+Department of Psychology, California State University
+Fullerton, Fullerton, CA, USA
+Department of Psychology, Carnegie Mellon
+University, Pittsburgh, PA, USA
+Available online: 13 Feb 2012
+To cite this article: Giulia Righi, Jessie J. Peissig & Michael J. Tarr (2012): Recognizing
+disguised faces, Visual Cognition, 20:2, 143-169"
+25ed9bd6c5febac832f3d68b96123e6ba013df83,Object segmentation by alignment of poselet activations to image contours,"Object Segmentation by Alignment of Poselet Activations to Image Contours
+Thomas Brox1, Lubomir Bourdev2,3, Subhransu Maji2, and Jitendra Malik2∗
+University of California at Berkeley
+University of Freiburg, Germany
+Adobe Systems Inc., San Jose, CA"
+258a8c6710a9b0c2dc3818333ec035730062b1a5,Benelearn 2005 Annual Machine Learning Conference of Belgium and the Netherlands CTIT P ROCEEDINGS OF THE FOURTEENTH,"Benelearn 2005
+Annual Machine Learning Conference of
+Belgium and the Netherlands
+CTIT PROCEEDINGS OF THE FOURTEENTH
+ANNUAL MACHINE LEARNING CONFERENCE
+OF BELGIUM AND THE NETHERLANDS
+Martijn van Otterlo, Mannes Poel and Anton Nijholt (eds.)"
+25695abfe51209798f3b68fb42cfad7a96356f1f,An Investigation into Combining Both Facial Detection and Landmark Localisation into a Unified Procedure Using Gpu Computing,"AN INVESTIGATION INTO COMBINING
+BOTH FACIAL DETECTION AND
+LANDMARK LOCALISATION INTO A
+UNIFIED PROCEDURE USING GPU
+COMPUTING
+J M McDonagh
+MSc by Research"
+25b83cffddff334d78c55db4d67c65b1d8999b2f,Optimization of Person Re-Identification through Visual Descriptors,
+257e61e6b38ae23b7ddce9907c05b0e78be4d79d,The LORACs prior for VAEs: Letting the Trees Speak for the Data,"The LORACs prior for VAEs: Letting the Trees Speak for the Data
+Sharad Vikram
+U.C. San Diego1
+Matthew D. Hoffman
+Matthew J. Johnson
+Google AI
+Google Brain"
+253325f09f07c2f7a05191f76e4977f473f4bac5,Filtering and Optimization Strategies for Markerless Human Motion Capture,"FILTERING AND OPTIMIZATION
+STRATEGIES FOR MARKERLESS
+HUMAN MOTION CAPTURE WITH
+SKELETON-BASED SHAPE MODELS.
+DISSERTATION
+ZUR ERLANGUNG DES GRADES DES
+DOKTORS DER INGENIEURWISSENSCHAFTEN (DR.-ING.)
+DER NATURWISSENSCHAFTLICH-TECHNISCHEN FAKULT ¨ATEN
+DER UNIVERSIT ¨AT DES SAARLANDES
+VORGELEGT VON
+JUERGEN GALL
+SAARBR ¨UCKEN"
+250ebcd1a8da31f0071d07954eea4426bb80644c,DenseBox: Unifying Landmark Localization with End to End Object Detection,"DenseBox: Unifying Landmark Localization with
+End to End Object Detection
+Lichao Huang1
+Yi Yang2
+Yafeng Deng2
+Institute of Deep Learning
+Baidu Research
+Yinan Yu3"
+25a5f7179b794ab2bb7283c8337480fccee51944,Two novel motion-based algorithms for surveillance video analysis on embedded platforms,"Julien A. Vijverberg, Marijn J.H. Loomans, Cornelis J. Koeleman and Peter H.N. de With, ”Two novel
+motion-based algorithms for surveillance video analysis on embedded platforms,” Real-Time Image and Video
+Processing, Nasser Kehtarnavaz and Matthias F. Carlsohn, Editors, Proc. SPIE 7724, 77240I(2010).
+Copyright 2010 Society of Photo-Optical Instrumentation Engineers. One print or electronic copy may be
+made for personal use only. Systematic electronic or print reproduction and distribution, duplication of any
+material in this paper for a fee or for commercial purposes, or modification of the content of the paper are
+prohibited.
+http://dx.doi.org/10.1117/12.851371"
+2504b7bddd1892bc905fc5df6b5afc0b109ef40e,Function Norms and Regularization in Deep Networks,"Function Norms and Regularization in Deep
+Networks
+Amal Rannen Triki∗
+KU Leuven, ESAT-PSI, imec, Belgium
+Maxim Berman
+KU Leuven, ESAT-PSI, imec, Belgium
+Matthew B. Blaschko
+KU Leuven, ESAT-PSI, imec, Belgium"
+25337690fed69033ef1ce6944e5b78c4f06ffb81,Strategic Engagement Regulation: an Integration of Self-enhancement and Engagement,"STRATEGIC ENGAGEMENT REGULATION:
+AN INTEGRATION OF SELF-ENHANCEMENT AND ENGAGEMENT
+Jordan B. Leitner
+A dissertation submitted to the Faculty of the University of Delaware in partial
+fulfillment of the requirements for the degree of Doctor of Philosophy in Psychology
+Spring 2014
+© 2014 Jordan B. Leitner
+All Rights Reserved"
+25bb4212af72d64ec20cac533f58f7af1472e057,Person Re-Identification by Camera Correlation Aware Feature Augmentation,"Person Re-Identification by Camera
+Correlation Aware Feature Augmentation
+Ying-Cong Chen, Xiatian Zhu, Wei-Shi Zheng, Jian-Huang Lai
+Code is available at the project page:
+http://isee.sysu.edu.cn/%7ezhwshi/project/CRAFT.html
+For reference of this work, please cite:
+Ying-Cong Chen, Xiatian Zhu,Wei-Shi Zheng, and Jian-Huang Lai. Per-
+son Re-Identification by Camera Correlation Aware Feature Augmenta-
+0.1109/TPAMI.2017.2666805)
+title={Person Re-Identification by Camera Correlation Aware Feature Aug-
+mentation},
+uthor={Chen, Ying-Cong and Zhu, Xiatian and Zheng, Wei-Shi and Lai,
+Jian-Huang},
+(DOI: 10.1109/TPAMI.2017.2666805)}"
+2547607a98eff30654994902f518e30caf2f8271,Synthesizing manipulation sequences for under-specified tasks using unrolled Markov Random Fields,"Synthesizing Manipulation Sequences for Under-Specified Tasks
+using Unrolled Markov Random Fields
+Jaeyong Sung, Bart Selman and Ashutosh Saxena"
+250449a9827e125d6354f019fc7bc6205c5fd549,Adversarial Reconstruction Loss,"PAIRWISE AUGMENTED GANS WITH
+ADVERSARIAL RECONSTRUCTION LOSS
+Aibek Alanov1,2,3∗, Max Kochurov1,2∗, Daniil Yashkov5, Dmitry Vetrov1,3,4
+Samsung AI Center in Moscow
+Skolkovo Institute of Science and Technology
+National Research University Higher School of Economics
+Joint Samsung-HSE lab
+5Federal Research Center ""Informatics and Management"" of the Russian Academy of Sciences"
+253d2fd2891a97d4caa49d87094dac1ec18c7752,Bio-authentication for Layered Remote Health Monitor Framework,"JOURNAL OF MEDICAL INFORMATICS & TECHNOLOGIES Vol. 23/2014, ISSN 1642-6037
+Remote Health Monitor, Security Issues,
+Multi-Factor Biometric Authentication,
+Keystroke Analysis, Face Recognition
+Tapalina BHATTASALI1, Khalid SAEED2, Nabendu CHAKI1, Rituparna CHAKI3
+BIO-AUTHENTICATION FOR LAYERED REMOTE
+HEALTH MONITOR FRAMEWORK
+Aged people, patients with chronic disease, patients at remote location need continuous monitoring under
+healthcare professionals. Remote health monitor is likely to be an effective approach to provide healthcare service
+in a simple and cost effective way. However, effective implementation of this type of framework needs consid-
+eration of variety of security threats. In this paper, a layer based remote health monitor framework is proposed
+to analyze health condition of patients from remote places. Beside this, a multi-modal biometric authentication
+mechanism is proposed here to reduce misuse of health data and biometrics templates in heterogeneous cloud
+environment. Main focus of the paper is to design semi-continuous authentication mechanism after establishing
+mutual 1:1 trust relationship among the participants in cloud environment. Behavioral biometrics keystroke
+nalysis is fused with physiological biometrics face recognition to enhance accuracy of authentication. Instead of
+onsidering traditional performance evaluation parameters for biometrics, this paper considers a few performance
+metrics for determining efficiency of semi-continuous verification of the proposed framework.
+. INTRODUCTION
+Remote health monitor provides healthcare service for patients from remote locations to support"
+2562d6ec0044eee9d604fe3a351f80d4d10d4a3d,Conditional Image-Text Embedding Networks,"Conditional Image-Text Embedding Networks
+Bryan A. Plummer†, Paige Kordas†, M. Hadi Kiapour‡, Shuai Zheng‡,
+Robinson Piramuthu‡, and Svetlana Lazebnik†
+University of Illinois at Urbana-Champaign†
+Ebay Inc.‡"
+25d3e122fec578a14226dc7c007fb1f05ddf97f7,The first facial expression recognition and analysis challenge,"The First Facial Expression Recognition and Analysis Challenge
+Michel F. Valstar, Bihan Jiang, Marc Mehu, Maja Pantic, and Klaus Scherer"
+2597b0dccdf3d89eaffd32e202570b1fbbedd1d6,Towards Predicting the Likeability of Fashion Images,"Towards predicting the likeability of fashion images
+Jinghua Wang, Abrar Abdul Nabi, Gang Wang, Member, IEEE, Chengde Wan, Tian-Tsong Ng, Member, IEEE,"
+2594bf77a1fef68d86be74a2cb79c55499cb2bec,Learning Invariant Color Features for Person Reidentification,"Learning Invariant Color Features for
+Person Re-Identification
+Rahul Rama Varior, Student Member, IEEE,
+Gang Wang, Member, IEEE Jiwen Lu, Member, IEEE"
+25aa935217a52d83bc1637687a78017984fcb731,The Continuous N-tuple Classiier and Its Application to Face Recognition,"Thecontinuousn-tupleclassi(cid:12)eranditsapplicationto
+facerecognition
+S.M.Lucas
+DepartmentofElectronicSystemsEngineering
+UniversityofEssex
+ColchesterCOSQ,UK"
+25e62096a44e3fe2f641b492379e7c4babce7ee6,Investigating Gaze of Children with ASD in Naturalistic Settings,"Investigating Gaze of Children with ASD in Naturalistic
+Settings
+Basilio Noris1*, Jacqueline Nadel2, Mandy Barker3, Nouchine Hadjikhani4, Aude Billard1
+Learning Algorithms and Systems Laboratory, Ecole Polyte´chnique Fe´de´rale de Lausanne, Lausanne, Switzerland, 2 Emotion Centre, Hoˆ pital de La Salpe´trie`re, Paris,
+France, 3 Lausanne University Department of Child and Adolescent Psychiatry, University Hospital of Canton de Vaud, Lausanne, Switzerland, 4 Brain and Mind Institute,
+Ecole Polyte´chnique Fe´de´rale de Lausanne, Lausanne, Switzerland & Martinos Center for Biomedical Imaging Massachusetts General Hospital/Healthcare Management
+Systems/HST, Boston, Massachusetts, United States of America"
+25982e2bef817ebde7be5bb80b22a9864b979fb0,Facial Feature Tracking Under Varying Facial Expressions and Face Poses Based on Restricted Boltzmann Machines,"(a)26facialfeaturepointsthatwetrack(b)oneexamplesequenceFigure1.Facialfeaturepointtrackingunderexpressionvariationandocclusion.Inrecentyears,thesemodelshavebeenusedexplicitlytohandletheshapevariations[17][5].Thenonlinearityem-beddedinRBManditsvariantsmakesthemmoreeffectiveandefficienttorepresentthenonrigiddeformationsofob-jectscomparedtothelinearmethods.Theirlargenumberofhiddennodesanddeeparchitecturesalsocanimposesuffi-cientconstraintsaswellasenoughdegreesoffreedomsintotherepresentationsofthetargetobjects.Inthispaper,wepresentaworkthatcaneffectivelytrackfacialfeaturepointsusingfaceshapepriormodelsthatareconstructedbasedonRBM.Thefacialfeaturetrackercantrack26facialfeaturepoints(Fig.1(a))eveniffaceshavedifferentfacialexpressions,varyingposes,orocclu-sion(Fig.1(b)).Unlikethepreviousworksthattrackfacialfeaturepointsindependentlyorbuildashapemodeltocap-turethevariationsoffaceshapeorappearanceregardlessofthefacialexpressionsandfaceposes,theproposedmodelcouldcapturethedistinctionsaswellasthevariationsoffaceshapesduetofacialexpressionandposechangeinaunifiedframework.Specifically,wefirstconstructamodel1"
+251da2569036cebc2ea109972f412c5b1a9db20f,Appearance modeling for person re-identification using Weighted Brightness Transfer Functions,"1st International Conference on Pattern Recognition (ICPR 2012)
+November 11-15, 2012. Tsukuba, Japan
+978-4-9906441-1-6 ©2012 IAPR"
+25403c52a7c3092866773b0e765ab55841d3cb67,Joint Prediction of Activity Labels and Starting Times in Untrimmed Videos,"Joint Prediction of Activity Labels and Starting Times in Untrimmed Videos
+Tahmida Mahmud1, Mahmudul Hasan2, Amit K. Roy-Chowdhury1
+University of California, Riverside, CA-92521, USA
+Comcast Labs, Washington, DC-20005, USA"
+25d48ab3b05bf299fe61ed6580674e893f08380b,"Pedestrian Detection: A Survey of Methodologies, Techniques and Current Advancements","International Journal of Scientific Research Engineering & Technology (IJSRET), ISSN 2278 – 0882
+Volume 4, Issue 1, January 2015
+Pedestrian Detection: A Survey of Methodologies, Techniques and Current
+Advancements
+Tanmay Bhadra1, Joydeep Sonar2 , Arup Sarmah3 ,Chandan Jyoti Kumar4
+Dept. of CSE & IT, School of Technology
+Assam Don Bosco University"
+25e05a1ea19d5baf5e642c2a43cca19c5cbb60f8,Label Distribution Learning,"Label Distribution Learning
+Xin Geng*, Member, IEEE"
+2559b15f8d4a57694a0a33bdc4ac95c479a3c79a,Contextual Object Localization With Multiple Kernel Nearest Neighbor,"Contextual Object Localization With Multiple
+Kernel Nearest Neighbor
+Brian McFee, Student Member, IEEE, Carolina Galleguillos, Student Member, IEEE, and
+Gert Lanckriet, Member, IEEE"
+259bd09bc382763f864986498e46ab0178714f58,Lifelong Machine Learning,"Lifelong Machine Learning
+November, 2016
+Zhiyuan Chen and Bing Liu
+Draft : This is mainly an early draft of the book.
+We also updated a few places after the publication, highlighted in yellow.
+Zhiyuan Chen and Bing Liu. Lifelong Machine Learning.
+Morgan & Claypool Publishers, Nov 2016.
+LifelongMachineLearningZhiyuan ChenBing Liu"
+257eb6d5ca49eb4ea90658a8668d1853d9c38af7,A Dissertation submitted in partial satisfaction of the requirements for the degree of Doctor of Philosophy in,"UNIVERSITY OF CALIFORNIA
+RIVERSIDE
+Wide-Area Video Understanding: Tracking, Video Summarization and
+Algorithm-Platform Co-Design
+A Dissertation submitted in partial satisfaction
+of the requirements for the degree of
+Doctor of Philosophy
+Electrical Engineering
+Shu Zhang
+December 2015
+Dissertation Committee:
+Dr. Amit K. Roy-Chowdhury, Chairperson
+Dr. Qi Zhu
+Dr. Ertem Tuncel"
+253cedd3022e25a79bcaffe74e3405db65c6d2ce,Deep Hashing for Scalable Image Search,"Deep Hashing for Scalable Image Search
+Jiwen Lu, Senior Member, IEEE, Venice Erin Liong, and Jie Zhou, Senior Member, IEEE"
+25f1a5121cb7fb67749a6f6dbc27fd48f177d5fb,Context-Aware Hypergraph Modeling for Re-identification and Summarization,"Context-Aware Hypergraph Modeling for
+Re-identification and Summarization
+Santhoshkumar Sunderrajan, Member, IEEE, and B. S. Manjunath, Fellow, IEEE"
+25f1f195c0efd84c221b62d1256a8625cb4b450c,Experiments with Facial Expression Recognition using Spatiotemporal Local Binary Patterns,"-4244-1017-7/07/$25.00 ©2007 IEEE
+ICME 2007"
+25885e9292957feb89dcb4a30e77218ffe7b9868,Analyzing the Affect of a Group of People Using Multi-modal Framework,"JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2016
+Analyzing the Affect of a Group of People Using
+Multi-modal Framework
+Xiaohua Huang, Abhinav Dhall, Xin Liu, Guoying Zhao, Jingang Shi, Roland Goecke and Matti Pietik¨ainen"
+259706f1fd85e2e900e757d2656ca289363e74aa,Improving People Search Using Query Expansions: How Friends Help To Find People,"Improving People Search Using Query Expansions
+How Friends Help To Find People
+Thomas Mensink and Jakob Verbeek
+LEAR - INRIA Rhˆone Alpes - Grenoble, France"
+258a2dad71cb47c71f408fa0611a4864532f5eba,Discriminative Optimization of Local Features for Face Recognition,"Discriminative Optimization
+of Local Features for Face Recognition
+H O S S E I N A Z I Z P O U R
+Master of Science Thesis
+Stockholm, Sweden 2011"
+25127c2d9f14d36f03d200a65de8446f6a0e3bd6,Evaluating the Performance of Deep Supervised Auto Encoder in Single Sample Face Recognition Problem Using Kullback-leibler Divergence Sparsity Regularizer,"Journal of Theoretical and Applied Information Technology
+20th May 2016. Vol.87. No.2
+© 2005 - 2016 JATIT & LLS. All rights reserved.
+ISSN: 1992-8645 www.jatit.org E-ISSN: 1817-3195
+EVALUATING THE PERFORMANCE OF DEEP SUPERVISED
+AUTO ENCODER IN SINGLE SAMPLE FACE RECOGNITION
+PROBLEM USING KULLBACK-LEIBLER DIVERGENCE
+SPARSITY REGULARIZER
+OTNIEL Y. VIKTORISA, 2ITO WASITO, 2ARIDA F. SYAFIANDINI
+Faculty of Computer of Computer Science, Universitas Indonesia, Kampus UI Depok, Indonesia
+E-mail: ,"
diff --git a/scraper/requirements.txt b/scraper/requirements.txt
new file mode 100644
index 00000000..a804dbfe
--- /dev/null
+++ b/scraper/requirements.txt
@@ -0,0 +1,6 @@
+Jinja2
+mistune
+requests
+click
+csv
+
diff --git a/scraper/s2-citation-report.py b/scraper/s2-citation-report.py
new file mode 100644
index 00000000..5c5fae9a
--- /dev/null
+++ b/scraper/s2-citation-report.py
@@ -0,0 +1,291 @@
+import os
+import re
+import glob
+import simplejson as json
+import math
+import operator
+import click
+#import builder
+from util import *
+
+@click.command()
+def s2_citation_report():
+ addresses = AddressBook()
+ megapixels = load_megapixels_queries()
+ successful_geocodes = {}
+ papers = []
+ for row in megapixels:
+ paper_data = process_paper(row, addresses, successful_geocodes)
+ if paper_data is not None:
+ papers.append(paper_data)
+ write_papers_report('reports/report_index.html', 'All Papers', papers, 'title')
+ write_papers_report('reports/report_coverage.html', 'Coverage', papers, 'citations_geocoded', reverse=True)
+
+ paper_count = 0
+ geocode_count = 0
+ for key, value in successful_geocodes.items():
+ if value:
+ geocode_count += 1
+ paper_count += 1
+ print("citations: {}".format(paper_count))
+ print("geocoded: {} ({}%)".format(geocode_count, percent(geocode_count, paper_count)))
+
+def write_papers_report(fn, title, papers, key, reverse=False):
+ sorted_papers = []
+ for paper in sorted(papers, key=lambda x: x[key], reverse=reverse):
+ sorted_papers.append([
+ paper['paperId'],
+ paper['key'],
+ paper['name'],
+ LinkLine(paper['report_link'], paper['title']),
+ LinkLine(paper['pdf_link'], '[pdf]'),
+ paper['journal'],
+ paper['address_type'],
+ paper['address'],
+ paper['lat'],
+ paper['lng'],
+ str(percent(paper['citations_geocoded'], paper['citation_count'])) + '%',
+ paper['citation_count'],
+ paper['citations_geocoded'],
+ paper['citations_unknown'],
+ paper['citations_empty'],
+ paper['citations_pdf'],
+ paper['citations_doi'],
+ ])
+ sorted_paper_keys = [
+ 'Paper ID',
+ 'Megapixels Key',
+ 'Megapixels Name',
+ 'Report Link',
+ 'PDF Link',
+ 'Journal',
+ 'Type',
+ 'Address',
+ 'Lat',
+ 'Lng',
+ 'Coverage',
+ 'Total Citations',
+ 'Geocoded Citations',
+ 'Unknown Citations',
+ 'Empty Citations',
+ 'With PDF',
+ 'With DOI',
+ ]
+ write_report(fn, title=title, keys=sorted_paper_keys, rows=sorted_papers)
+
+def process_paper(row, addresses, success):
+ res = {
+ 'paperId': '',
+ 'key': '',
+ 'title': '',
+ 'journal': '',
+ 'address': '',
+ 'address_type': '',
+ 'lat': '',
+ 'lng': '',
+ 'pdf_link': '',
+ 'report_link': '',
+ 'citation_count': 0,
+ 'citations_geocoded': 0,
+ 'citations_unknown': 0,
+ 'citations_empty': 0,
+ 'citations_pdf': 0,
+ 'citations_doi': 0,
+ }
+
+ geocoded_citations = []
+ unknown_citations = []
+ display_geocoded_citations = []
+ empty_citations = []
+ pdf_count = 0
+ doi_count = 0
+ address_count = 0
+
+ fn = file_path('papers', row['paper_id'], 'paper.json')
+
+ with open(fn, 'r') as f:
+ data = json.load(f)
+ print('>> {}'.format(data['paperId']))
+ paper = load_paper(data['paperId'])
+ if paper is None:
+ print("Paper missing! {}".format(data['paperId']))
+ return
+
+ res['key'] = row['key']
+ res['name'] = row['name']
+ res['paperId'] = paper.paper_id
+ res['title'] = paper.title
+ res['journal'] = paper.journal
+ res['report_link'] = 'papers/{}.html'.format(paper.paper_id)
+ res['pdf_link'] = paper.pdf_link
+ # res['authors'] = ', '.join(paper.authors)
+ # res['citations'] = []
+
+ paper_institutions = load_institutions(paper.paper_id)
+ paper_address = None
+ for inst in sorted(paper_institutions, key=operator.itemgetter(1)):
+ # print(inst[1])
+ institution = inst[1]
+ if paper_address is None:
+ paper_address = addresses.find(institution)
+
+ if paper_address:
+ # print(paper_address)
+ res['address'] = paper_address[0]
+ res['lat'] = paper_address[3]
+ res['lng'] = paper_address[4]
+ res['address_type'] = paper_address[5]
+
+ for cite in data['citations']:
+ citationId = cite['paperId']
+ citation = load_paper(citationId)
+ has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt'))
+ has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi'))
+ if has_pdf:
+ pdf_count += 1
+ if has_doi:
+ doi_count += 1
+ if citation.data is None:
+ print("Citation missing! {}".format(cite['paperId']))
+ continue
+ institutions = load_institutions(citationId)
+ geocoded_institutions = []
+ unknown_institutions = []
+ institution = ''
+ address = None
+ for inst in sorted(institutions, key=operator.itemgetter(1)):
+ # print(inst[1])
+ address_count += 1
+ institution = inst[1]
+ next_address = addresses.find(institution)
+ if next_address:
+ address = next_address
+ geocoded_institutions.append(institution)
+ else:
+ unknown_institutions.append(institution)
+ if not address:
+ if has_pdf:
+ headings, found_abstract = read_headings(file_path('pdf', citationId, 'paper.txt'), citation)
+ heading_string = '\n'.join(headings[0:20])
+ found_addresses = []
+ if len(headings):
+ for heading in headings:
+ l = heading.lower().strip()
+ if l:
+ next_address = addresses.find(l)
+ if next_address:
+ address = next_address
+ geocoded_institutions.append(heading)
+ else:
+ unknown_institutions.append(heading)
+ else:
+ empty_citations.append([
+ citationId,
+ citation.title,
+ ])
+
+ # res['citations'].append({
+ # 'title': citation.title,
+ # 'journal': citation.journal,
+ # 'authors': citation.authors,
+ # 'institutions': [inst[1] for inst in institutions],
+ # 'geocoded': geocoded_institutions,
+ # })
+ if address:
+ success[citationId] = True
+ geocoded_citations.append([
+ citation.title,
+ institution,
+ ] + address)
+ display_geocoded_citations.append([
+ citationId,
+ LinkLine(citation.pdf_link, '[pdf]'),
+ citation.title,
+ ] + address[0:5])
+ else:
+ success[citationId] = False
+ unknown_citations.append([
+ citationId,
+ LinkLine(citation.pdf_link, '[pdf]'),
+ citation.title,
+ '<br>'.join(unknown_institutions),
+ ])
+ res['citation_count'] = len(data['citations'])
+ res['citations_geocoded'] = len(geocoded_citations)
+ res['citations_unknown'] = len(unknown_citations)
+ res['citations_empty'] = len(empty_citations)
+ res['citations_pdf'] = pdf_count
+ res['citations_doi'] = doi_count
+
+ total_citations = len(geocoded_citations) + len(unknown_citations)
+ os.makedirs('reports/papers/', exist_ok=True)
+ with open('reports/papers/{}.html'.format(paper.paper_id), 'w') as f:
+ f.write("<!doctype html>")
+ f.write("<html>")
+ f.write("<head>")
+ f.write('<meta charset="utf-8">')
+ f.write("<title>{}</title>".format(paper.title))
+ f.write("<link rel='stylesheet' href='../reports.css'>")
+ f.write('<link rel="stylesheet" href="https://unpkg.com/leaflet@1.3.4/dist/leaflet.css" integrity="sha512-puBpdR0798OZvTTbP4A8Ix/l+A4dHDD0DGqYW6RQ+9jxkRFclaxxQb/SJAWZfWAkuyeQUytO7+7N4QKrDh+drA==" crossorigin=""/>')
+ f.write("</head>")
+ f.write("<body>")
+ f.write("<div id='mapid'></div>")
+ f.write("<h2>{}</h2>".format(paper.title))
+ f.write('<ul>')
+ if paper.journal:
+ f.write('<li>Journal: {}</li>'.format(paper.journal))
+ if paper_address:
+ f.write('<li>Research institution: {}</li>'.format(paper_address[0]))
+ f.write('<li>Address: {}</li>'.format(paper_address[2]))
+ f.write('<li>Lat/Lng: {}, {}</li>'.format(paper_address[3], paper_address[4]))
+ f.write('<li>Year: {}</li>'.format(paper.year))
+ if total_citations == 0:
+ f.write('<li>Coverage: No citations found!</li>')
+ else:
+ f.write('<li>Coverage: {} / {} citations were located ({} %).</li>'.format(len(geocoded_citations), total_citations, math.floor(len(geocoded_citations) / total_citations * 100)))
+ f.write('</ul>')
+ f.write('<h3>{}</h3>'.format('Geocoded Citations'))
+ write_table(f, keys=None, rows=sorted(display_geocoded_citations, key=operator.itemgetter(0)))
+ f.write('<h3>{}</h3>'.format('Other Citations'))
+ write_table(f, keys=None, rows=sorted(unknown_citations, key=operator.itemgetter(0)))
+ f.write("</body>")
+ f.write('<script src="../snap.svg-min.js"></script>')
+ f.write('<script src="https://unpkg.com/leaflet@1.3.4/dist/leaflet.js" integrity="sha512-nMMmRyTVoLYqjP9hrbed9S+FzjZHW5gY1TWCHA5ckwXZBadntCNs8kEqAWdrb9O7rxbCaA4lKTIWjDXZxflOcA==" crossorigin=""></script>')
+ f.write('<script src="../leaflet.arc.js"></script>')
+ f.write('<script src="../leaflet.bezier.js"></script>')
+ f.write('<script type="text/json" id="address">')
+ json.dump(paper_address, f)
+ f.write('</script>')
+ f.write('<script type="text/json" id="citations">')
+ json.dump(geocoded_citations, f)
+ f.write('</script>')
+ f.write('<script src="../map.js"></script>')
+ f.write("</html>")
+ # template = env.get_template('paper.html')
+ return res
+
+def load_megapixels_queries():
+ keys, rows = read_csv('datasets/citation_lookup.csv')
+ recs = []
+ for row in rows:
+ rec = {}
+ for index, key in enumerate(keys):
+ rec[key] = row[index]
+ recs.append(rec)
+ return recs
+
+def load_institutions(paperId):
+ if os.path.exists(file_path('pdf', paperId, 'institutions.json')):
+ return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions']
+ elif os.path.exists(file_path('doi', paperId, 'institutions.json')):
+ return read_json(file_path('doi', paperId, 'institutions.json'))['institutions']
+ else:
+ return []
+
+def data_path(key, paper_id):
+ return 'datasets/s2/{}/{}/{}'.format(key, paper_id[0:2], paper_id)
+def file_path(key, paper_id, fn):
+ return os.path.join(data_path(key, paper_id), fn)
+
+if __name__ == '__main__':
+ s2_citation_report()
diff --git a/scraper/s2-doi-report.py b/scraper/s2-doi-report.py
new file mode 100644
index 00000000..b10b5da1
--- /dev/null
+++ b/scraper/s2-doi-report.py
@@ -0,0 +1,249 @@
+import re
+import os
+import gzip
+import glob
+import simplejson as json
+import click
+import operator
+from util import *
+from bs4 import BeautifulSoup
+from importlib import import_module
+from urllib.parse import unquote
+doi = import_module('s2-fetch-doi')
+
+DOI_DIR = 'datasets/s2/doi'
+
+@click.command()
+def doi_report():
+ rows = []
+ domains = {}
+ institutions = {}
+ # geocode_lookup = load_geocode_lookup()
+ addresses = AddressBook()
+
+ geocoded_papers = []
+ unknown_papers = []
+ unattributed_papers = []
+ paper_count = 0
+ ieee_count = 0
+ springer_count = 0
+ sciencedirect_count = 0
+ acm_count = 0
+ computerorg_count = 0
+ elsevier_count = 0
+ unparsed_count = 0
+ for fn in glob.iglob('{}/**/*.url'.format(DOI_DIR), recursive=True):
+ paper_count += 1
+ url_info = read_json(fn)
+ domain = url_info['domain']
+ paper_id = url_info['paper_id']
+ paper = load_paper(paper_id)
+ doi_fn = fn.replace('.url', '.doi')
+ address = None
+ if domain in domains:
+ domains[domain] += 1
+ else:
+ domains[domain] = 1
+ affiliations = None
+ paper_affiliation_count = 0
+
+ if 'ieee.org' in domain:
+ ieee_count += 1
+ affiliations = load_ieee(paper, doi_fn)
+ elif 'link.springer.com' in domain:
+ springer_count += 1
+ affiliations = load_springer(paper, doi_fn)
+ elif 'sciencedirect.com' in domain:
+ sciencedirect_count += 1
+ affiliations = load_sciencedirect(paper, doi_fn)
+ elif 'acm.org' in domain:
+ acm_count += 1
+ affiliations = load_acm(paper, doi_fn)
+ elif 'computer.org' in domain:
+ computerorg_count += 1
+ affiliations = load_computerorg(paper, doi_fn)
+ elif 'elsevier.com' in domain:
+ elsevier_count += 1
+ affiliations = load_elsevier(paper, doi_fn)
+ else:
+ unparsed_count += 1
+
+ if affiliations:
+ for affiliation in affiliations:
+ if affiliation:
+ paper_affiliation_count += 1
+ if affiliation in institutions:
+ institutions[affiliation] += 1
+ else:
+ institutions[affiliation] = 1
+ address = addresses.find(affiliation)
+ if not address:
+ unknown_papers.append([paper.paper_id, paper.title, affiliation])
+ if paper_affiliation_count == 0:
+ unattributed_papers.append([paper.paper_id, paper.title])
+ if address:
+ geocoded_papers.append([paper.paper_id, paper.title] + address)
+
+ domain_list = reversed(sorted(domains.items(), key=operator.itemgetter(1)))
+ # for domain, count in domain_list:
+ # print('{}\t{}'.format(count, domain))
+ institution_list = reversed(sorted(institutions.items(), key=operator.itemgetter(1)))
+ # for institution, count in institution_list:
+ # print('{}\t{}'.format(count, institution))
+ display_institution_list = []
+ unknown_institution_list = []
+ for inst in institution_list:
+ addr = addresses.find(inst[0])
+ if addr:
+ display_institution_list.append((BoldLine(inst[0]), inst[1],))
+ elif len(inst[0]) > 1:
+ display_institution_list.append(inst)
+ unknown_institution_list.append(inst)
+ write_report('reports/doi_domains.html', title='DOI Domains', keys=None, rows=domain_list)
+ write_report('reports/doi_institutions.html', title='Institutions from IEEE', keys=None, rows=display_institution_list)
+ write_report('reports/doi_institutions_unknown.html', title='Unknown Institutions from DOI', keys=None, rows=unknown_institution_list)
+ write_csv('reports/doi_institutions_geocoded.csv', keys=None, rows=geocoded_papers)
+ write_csv('reports/doi_institutions_unknown.csv', keys=None, rows=unknown_papers)
+ write_csv('reports/doi_institutions_unattributed.csv', keys=None, rows=unattributed_papers)
+ print("total papers: {}".format(paper_count))
+ print(".. ieee: {}".format(ieee_count))
+ print(".. springer: {}".format(springer_count))
+ print(".. acm: {}".format(acm_count))
+ print(".. computerorg: {}".format(computerorg_count))
+ print(".. sciencedirect: {}".format(sciencedirect_count))
+ print(".. elsevier: {}".format(elsevier_count))
+ print(".. unparsed: {}".format(unparsed_count))
+ print("geocoded papers: {}".format(len(geocoded_papers)))
+ print("unknown papers: {}".format(len(unknown_papers)))
+ print("unattributed papers: {}".format(len(unattributed_papers)))
+
+def load_ieee(paper, fn):
+ with open(fn, 'r') as f:
+ try:
+ data = f.read().split('global.document.metadata=')[1].split('</script>')[0].strip()[:-1]
+ data = json.loads(data)
+ write_json(fn.replace('paper.doi', 'ieee.json'), data)
+ # print(data)
+ except:
+ print('ieee: could not read data')
+ return None
+ affiliations = [ author['affiliation'] for author in data['authors'] ]
+ institutions = [ [ paper.paper_id, author['affiliation'], author['affiliation'] ] for author in data['authors'] ]
+ # print(affiliations)
+ write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ return affiliations
+
+def load_springer(paper, fn):
+ # print('springer: {}'.format(paper.paper_id))
+ with open(fn, 'r') as f:
+ try:
+ soup = BeautifulSoup(f.read(), 'html.parser')
+ except:
+ print('springer: could not read data')
+ return None
+ items = soup.find_all(class_='affiliation__item')
+ affiliations = [ ', '.join(item.strings) for item in items ]
+ institutions = [ [ paper.paper_id, affiliation ] for affiliation in affiliations ]
+ write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ return affiliations
+
+def load_sciencedirect(paper, fn):
+ # print('sciencedirect: {}'.format(paper.paper_id))
+ with open(fn, 'r') as f:
+ try:
+ soup = BeautifulSoup(f.read(), 'html.parser')
+ except:
+ print('sciencedirect: could not read data')
+ return None
+
+ items = soup.find_all("script", type='application/json', limit=1)
+ if len(items) == 0:
+ return None
+
+ try:
+ data = json.loads(items[0].string)
+ write_json(fn.replace('paper.doi', 'sciencedirect.json'), data)
+ # print(data)
+ except:
+ print('sciencedirect: json error')
+ return None
+
+ affiliations = [value['$$'][0]['_'] for value in data['authors']['affiliations'].values()]
+
+ institutions = [ [ paper.paper_id, affiliation ] for affiliation in affiliations ]
+ write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ return affiliations
+
+def load_acm(paper, fn):
+ # print('acm: {}'.format(paper.paper_id))
+ with open(fn, 'r') as f:
+ try:
+ soup = BeautifulSoup(f.read(), 'html.parser')
+ except:
+ print('acm: could not read data')
+ return None
+ items = soup.find_all("a", title='Institutional Profile Page')
+ affiliations = [ item.string for item in items ]
+ # print(affiliations)
+ institutions = [ [ paper.paper_id, affiliation ] for affiliation in affiliations ]
+ write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ return affiliations
+
+def load_computerorg(paper, fn):
+ # print('computerorg: {}'.format(paper.paper_id))
+ # if not os.path.exists(doi.old_doi_fn(fn)):
+ pass
+ # with open(fn, 'r') as f:
+ # try:
+ # soup = BeautifulSoup(f.read(), 'html.parser')
+ # except:
+ # print('computerorg: could not read data')
+ # return None
+ # items = soup.find_all("a", title='Institutional Profile Page')
+ # affiliations = [ item.string for item in items ]
+ # print(affiliations)
+ # institutions = [ [ paper.paper_id, affiliation ] for affiliation in affiliations ]
+ # write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ # return affiliations
+
+def load_elsevier(paper, fn):
+ print('elsevier: {}'.format(paper.paper_id))
+ if not os.path.exists(doi.old_doi_fn(paper.paper_id)):
+ with open(fn, 'r') as f:
+ try:
+ soup = BeautifulSoup(f.read(), 'html.parser')
+ except:
+ print('elsevier: could not read data')
+ return None
+ item = soup.find_all("input", attrs={"name": 'redirectURL'})[0]
+ new_url = unquote(item['value'])
+ if new_url:
+ print(new_url)
+ doi.fetch_doi(paper.paper_id, new_url, replace=True)
+ else:
+ print("missing redirect url: {}".format(paper.paper_id))
+ # print('elsevier: {}'.format(paper.paper_id))
+ # with open(fn, 'r') as f:
+ # try:
+ # soup = BeautifulSoup(f.read(), 'html.parser')
+ # except:
+ # print('elsevier: could not read data')
+ # return None
+ # items = soup.find_all("a", title='Institutional Profile Page')
+ # affiliations = [ item.string for item in items ]
+ # # print(affiliations)
+ # institutions = [ [ paper.paper_id, affiliation ] for affiliation in affiliations ]
+ # write_json('{}/{}'.format(paper_path(paper.paper_id), 'institutions.json'), { 'institutions': institutions })
+ # return affiliations
+
+def find_authors(authors, line):
+ for a in authors:
+ if a[2] in line:
+ return a
+ return None
+
+def paper_path(paper_id):
+ return '{}/{}/{}'.format(DOI_DIR, paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ doi_report()
diff --git a/scraper/s2-dump-db-pdf-urls.py b/scraper/s2-dump-db-pdf-urls.py
new file mode 100644
index 00000000..bc702e09
--- /dev/null
+++ b/scraper/s2-dump-db-pdf-urls.py
@@ -0,0 +1,122 @@
+import os
+import glob
+import simplejson as json
+import click
+from urllib.parse import urlparse
+import operator
+from util import *
+
+@click.command()
+def s2_dump_pdf_urls():
+ # loop over all the papers in db_papers
+ # get all the PDF urls, pick the best one
+ # store it and the paper id
+ # another script will fetch the urls from this process
+ rows = []
+ pdf_count = 0
+ ieee_count = 0
+ url_count = 0
+ doi_count = 0
+ empty_count = 0
+ domains = {}
+ pdf = []
+ doi = []
+ for fn in glob.iglob('./datasets/s2/*_papers/**/paper.json', recursive=True):
+ if 'db_paper' in fn:
+ row = process_db_paper(fn)
+ elif 'raw_paper' in fn:
+ row = process_raw_paper(fn)
+ if row is not None:
+ rows.append(row)
+ if row[1] is not None:
+ pdf.append([row[0], row[1]])
+ pdf_count += 1
+ elif row[2] is not None:
+ doi.append([row[0], row[2]])
+ ieee_count += 1
+ elif row[3] is not None:
+ doi.append([row[0], row[3]])
+ doi_count += 1
+ elif row[4] is not None:
+ if 'pdf' not in row[4]:
+ doi.append([row[0], row[4]])
+ url_count += 1
+ domain = urlparse(row[4]).netloc
+ if domain in domains:
+ domains[domain] += 1
+ else:
+ domains[domain] = 1
+ else:
+ empty_count += 1
+ print("Wrote {} rows".format(len(rows)))
+ print("pdf count: {}".format(pdf_count))
+ print("ieee count: {}".format(ieee_count))
+ print("doi count: {}".format(doi_count))
+ print("url count: {}".format(url_count))
+ for domain, count in sorted(domains.items(), key=operator.itemgetter(1)):
+ print(" -- {} - {}".format(domain, count))
+ print("empty count: {}".format(empty_count))
+ write_csv('db_paper_pdf_list.csv', keys=['Paper ID', 'PDF URL', 'IEEE URL', 'DOI URL', 'Extra URL'], rows=rows)
+ write_csv('db_paper_pdf.csv', keys=None, rows=pdf)
+ write_csv('db_paper_doi.csv', keys=None, rows=doi)
+ # write_csv('raw_paper_pdf_list.csv', keys=['Paper ID', 'PDF URL', 'IEEE URL', 'DOI URL', 'Extra URL'], rows=rows)
+ # write_csv('raw_paper_pdf.csv', keys=None, rows=pdf)
+ # write_csv('raw_paper_doi.csv', keys=None, rows=doi)
+
+def process_db_paper(fn):
+ # print(fn)
+ paper = read_json(fn)
+ if paper is None:
+ return None
+ paper_id = paper['id']
+ pdf_url = None
+ ieee_url = None
+ doi_url = None
+ extra_url = None
+ if paper['s2PdfUrl']:
+ pdf_url = paper['s2PdfUrl']
+ for url in paper['pdfUrls']:
+ if 'ieeexplore.ieee.org' in url:
+ ieee_url = url
+ elif 'doi.org' in url:
+ doi_url = url
+ elif pdf_url is None and 'pdf' in url:
+ pdf_url = url
+ else:
+ extra_url = url
+ return [paper_id, pdf_url, ieee_url, doi_url, extra_url]
+
+def process_raw_paper(fn):
+ # print(fn)
+ data = read_json(fn)
+ if 'paper' not in data:
+ print(data)
+ return
+ paper = data['paper']
+ if paper is None:
+ return None
+ paper_id = paper['id']
+ pdf_url = None
+ ieee_url = None
+ doi_url = None
+ extra_url = None
+ if 'primaryPaperLink' in paper and 'url' in paper['primaryPaperLink']:
+ primary_url = paper['primaryPaperLink']['url']
+ if 'pdf' in primary_url:
+ pdf_url = primary_url
+ elif 'doi' in primary_url:
+ doi_url = primary_url
+ for link in paper['links']:
+ url = link['url']
+ if 'ieeexplore.ieee.org' in url:
+ ieee_url = url
+ elif 'doi.org' in url:
+ doi_url = url
+ elif pdf_url is None and 'pdf' in url:
+ pdf_url = url
+ else:
+ extra_url = url
+ return [paper_id, pdf_url, ieee_url, doi_url, extra_url]
+
+if __name__ == '__main__':
+ s2_dump_pdf_urls()
diff --git a/s2-dump-ids.py b/scraper/s2-dump-ids.py
index 66ff6d77..bddc8040 100644
--- a/s2-dump-ids.py
+++ b/scraper/s2-dump-ids.py
@@ -1,7 +1,7 @@
import os
import gzip
import glob
-import json
+import simplejson as json
import click
from util import *
@@ -19,12 +19,13 @@ def s2_dump_ids():
def process_paper(fn, ids):
with open(fn, 'r') as f:
data = json.load(f)
+ print(data['paperId'])
ids[data['paperId']] = True
for cite in data['citations']:
ids[cite['paperId']] = True
def paper_path(paper_id):
- return '{}/{}/{}'.format(DATA_DIR, paper_id[0:3], paper_id)
+ return '{}/{}/{}'.format(DATA_DIR, paper_id[0:2], paper_id)
if __name__ == '__main__':
s2_dump_ids()
diff --git a/scraper/s2-dump-missing-paper-ids.py b/scraper/s2-dump-missing-paper-ids.py
new file mode 100644
index 00000000..bf0b7e50
--- /dev/null
+++ b/scraper/s2-dump-missing-paper-ids.py
@@ -0,0 +1,40 @@
+import os
+import gzip
+import glob
+import click
+from util import *
+
+DB_PAPER_DIR = './datasets/s2/db_papers'
+RAW_PAPER_DIR = './datasets/s2/raw_papers'
+
+@click.command()
+@click.option('--fn', '-f', default='ids.json', help='List of IDs to extract from the big dataset.')
+def fetch_missing_entries(fn):
+ missing_ids = load_missing_ids(fn)
+ write_csv('./missing.csv', keys=None, rows=[[id] for id in missing_ids])
+
+def load_missing_ids(fn):
+ lookup = {}
+ missing_lookup = {}
+ ids = read_json(fn)
+ found_count = 0
+ missing_count = 0
+ for paper_id in ids:
+ db_paper_path = make_db_paper_path(paper_id)
+ raw_paper_path = make_raw_paper_path(paper_id)
+ if os.path.exists(db_paper_path) or os.path.exists(raw_paper_path):
+ lookup[paper_id] = True
+ found_count += 1
+ else:
+ missing_lookup[paper_id] = True
+ missing_count += 1
+ print("{} papers found, {} must be fetched".format(found_count, missing_count))
+ return missing_lookup.keys()
+
+def make_db_paper_path(paper_id):
+ return '{}/{}/{}'.format(DB_PAPER_DIR, paper_id[0:2], paper_id)
+def make_raw_paper_path(paper_id):
+ return '{}/{}/{}'.format(RAW_PAPER_DIR, paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_missing_entries()
diff --git a/s2-extract-papers.py b/scraper/s2-extract-papers.py
index 90323e6e..1969643a 100644
--- a/s2-extract-papers.py
+++ b/scraper/s2-extract-papers.py
@@ -2,40 +2,52 @@ import os
import gzip
import glob
import click
+from util import *
S2_DIR = '/media/blue/undisclosed/semantic-scholar/corpus-2018-05-03'
-DATA_DIR = '/home/lens/undisclosed/megapixels_dev/datasets/s2/db_papers'
+DATA_DIR = '/home/lens/undisclosed/megapixels_dev/scraper/datasets/s2/db_papers'
@click.command()
-@click.option('--input', '-i', default='ids.json', help='List of IDs to extract from the big dataset.')
+@click.option('--fn', '-f', default='ids.json', help='List of IDs to extract from the big dataset.')
def fetch_entries(fn):
ids = load_id_lookup(fn)
- for filename in glob.iglob('{}/*.gz'.format(S2_DIR)):
- search_dataset_shard('{}/{}'.format(S2_DIR, filename), ids)
+ for fn in glob.iglob('{}/*.gz'.format(S2_DIR)):
+ search_dataset_shard(fn, ids)
def search_dataset_shard(fn, ids):
+ print(fn)
+ i = 0
with gzip.open(fn, 'r') as f:
+ i += 1
+ if (i % 1000) == 0:
+ print("{}...".format(i))
for line in f.readlines():
- process_paper(str(line)[2:-3])
+ process_paper(line.decode('UTF-8'), ids)
-def process_paper(line):
+def process_paper(line, ids):
paper_id = line.split('"id":"', 2)[1].split('"', 2)[0]
if paper_id in ids:
- print(paper_id)
+ #print(paper_id)
del ids[paper_id]
write_paper(paper_id, line)
def load_id_lookup(fn):
lookup = {}
ids = read_json(fn)
+ skip_count = 0
+ save_count = 0
for paper_id in ids:
path = paper_path(paper_id)
if not os.path.exists(path):
lookup[paper_id] = True
+ save_count += 1
+ else:
+ skip_count += 1
+ print("finding {} ids ({} already pulled)".format(save_count, skip_count))
return lookup
def paper_path(paper_id):
- return '{}/{}/{}'.format(DATA_DIR, paper_id[0:3], paper_id)
+ return '{}/{}/{}'.format(DATA_DIR, paper_id[0:2], paper_id)
def write_paper(paper_id, data):
dir = paper_path(paper_id)
@@ -43,7 +55,7 @@ def write_paper(paper_id, data):
if os.path.exists(fn):
return
os.makedirs(dir, exist_ok=True)
- with open(fn, 'wb') as f:
+ with open(fn, 'w') as f:
f.write(data)
if __name__ == '__main__':
diff --git a/scraper/s2-fetch-doi.py b/scraper/s2-fetch-doi.py
new file mode 100644
index 00000000..ae80036e
--- /dev/null
+++ b/scraper/s2-fetch-doi.py
@@ -0,0 +1,69 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import simplejson as json
+import click
+from urllib.parse import urlparse
+from s2 import SemanticScholarAPI
+from util import *
+
+s2 = SemanticScholarAPI()
+
+@click.command()
+@click.option('--fn', '-i', default='db_paper_doi.csv', help='Filename of CSV (id, url,)')
+def fetch_doi_list(fn):
+ lines = read_csv(fn, keys=False)
+ domains = []
+ for line in lines:
+ paper_id, url = line
+ if url:
+ domain = fetch_doi(paper_id, url)
+ print(domain)
+ print("{} papers processed".format(len(lines)))
+
+def fetch_doi(paper_id, url, replace=False):
+ os.makedirs(make_doi_path(paper_id), exist_ok=True)
+ doi_fn = make_doi_fn(paper_id)
+ url_fn = make_url_fn(paper_id)
+ txt_fn = make_txt_fn(paper_id)
+ if replace and os.path.exists(doi_fn):
+ os.rename(doi_fn, old_doi_fn(paper_id))
+ os.rename(url_fn, old_url_fn(paper_id))
+ if os.path.exists(doi_fn) or os.path.exists(txt_fn):
+ # return read_json(doi_fn)
+ return None, None
+ size, final_url = s2.fetch_doi(url, doi_fn)
+ if size is None:
+ print("{} empty?".format(paper_id))
+ time.sleep(random.randint(2, 5))
+ return None, None
+ print("{} {} kb".format(paper_id, int(size / 1024)))
+ domain = urlparse(final_url).netloc
+ write_json(url_fn, {
+ 'paper_id': paper_id,
+ 'domain': domain
+ })
+ time.sleep(random.randint(2, 5))
+ return domain
+ # return paper
+
+def make_doi_path(paper_id):
+ return './datasets/s2/doi/{}/{}'.format(paper_id[0:2], paper_id)
+def make_doi_fn(paper_id):
+ return './datasets/s2/doi/{}/{}/paper.doi'.format(paper_id[0:2], paper_id)
+def make_url_fn(paper_id):
+ return './datasets/s2/doi/{}/{}/paper.url'.format(paper_id[0:2], paper_id)
+def make_txt_fn(paper_id):
+ return './datasets/s2/pdf/{}/{}/paper.txt'.format(paper_id[0:2], paper_id)
+
+def old_doi_fn(paper_id):
+ return './datasets/s2/doi/{}/{}/paper.doi2'.format(paper_id[0:2], paper_id)
+def old_url_fn(paper_id):
+ return './datasets/s2/doi/{}/{}/paper.url2'.format(paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_doi_list()
diff --git a/scraper/s2-fetch-google-sheet.py b/scraper/s2-fetch-google-sheet.py
new file mode 100644
index 00000000..1fc887e4
--- /dev/null
+++ b/scraper/s2-fetch-google-sheet.py
@@ -0,0 +1,4 @@
+from util import *
+
+if __name__ == '__main__':
+ fetch_google_sheet()
diff --git a/scraper/s2-fetch-pdf.py b/scraper/s2-fetch-pdf.py
new file mode 100644
index 00000000..5477cbd5
--- /dev/null
+++ b/scraper/s2-fetch-pdf.py
@@ -0,0 +1,49 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import simplejson as json
+import click
+from s2 import SemanticScholarAPI
+from util import *
+
+s2 = SemanticScholarAPI()
+
+@click.command()
+@click.option('--fn', '-i', default='db_paper_pdf.csv', help='Filename of CSV (id, url,)')
+def fetch_pdfs(fn):
+ lines = read_csv(fn, keys=False)
+ for line in lines:
+ paper_id, url = line
+ fetch_pdf(paper_id, url)
+ print("{} papers processed".format(len(lines)))
+
+def fetch_pdf(paper_id, url):
+ os.makedirs(make_pdf_path(paper_id), exist_ok=True)
+ pdf_fn = make_pdf_fn(paper_id)
+ txt_fn = make_txt_fn(paper_id)
+ if os.path.exists(pdf_fn) or os.path.exists(txt_fn):
+ # return read_json(pdf_fn)
+ return
+ size = s2.fetch_file(url, pdf_fn)
+ if size is None:
+ print("{} empty?".format(paper_id))
+ time.sleep(random.randint(5, 10))
+ return None
+ print("{} {} kb".format(paper_id, int(size / 1024)))
+ time.sleep(random.randint(5, 10))
+ return
+ # return paper
+
+def make_pdf_path(paper_id):
+ return './datasets/s2/pdf/{}/{}'.format(paper_id[0:2], paper_id)
+def make_pdf_fn(paper_id):
+ return './datasets/s2/pdf/{}/{}/paper.pdf'.format(paper_id[0:2], paper_id)
+def make_txt_fn(paper_id):
+ return './datasets/s2/pdf/{}/{}/paper.txt'.format(paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_pdfs()
diff --git a/scraper/s2-geocode-spreadsheet.py b/scraper/s2-geocode-spreadsheet.py
new file mode 100644
index 00000000..d0fd2050
--- /dev/null
+++ b/scraper/s2-geocode-spreadsheet.py
@@ -0,0 +1,83 @@
+import os
+import csv
+import click
+import time
+from geopy import geocoders
+from dotenv import load_dotenv
+from util import *
+load_dotenv()
+
+@click.command()
+def s2_geocode_spreadsheet():
+ geolocator = geocoders.GoogleV3(os.getenv('MAPS_API_KEY'))
+
+ worksheet = fetch_worksheet()
+ rows = fetch_google_sheet()
+ valid_count = 0
+ invalid_count = 0
+
+ print("got {} rows".format(len(rows)))
+
+ cname_lookup = {}
+ for i, row in enumerate(rows):
+ if len(row) == 6:
+ cname, name, address, lat, lng, org_type = row
+ elif len(row) == 7:
+ cname, name, address, lat, lng, org_type, extra_address = row
+ else:
+ print("Weirdly formatted row {}".format(i))
+ continue
+ if cname == name or cname not in cname_lookup:
+ cname_lookup[cname] = i
+
+ # 0 cname 1 name 2 address 3 lat 4 lng 5 org_type
+ for i, row in enumerate(rows):
+ if len(row) == 6:
+ cname, name, address, lat, lng, org_type = row
+ elif len(row) == 7:
+ cname, name, address, lat, lng, org_type, extra_address = row
+ else:
+ print("Weirdly formatted row {}: {} entries".format(i, len(row)))
+ continue
+ if lat and lng:
+ continue
+ c_row = rows[cname_lookup[cname]]
+ if c_row[3] and c_row[4]:
+ print("name {}, found cname: {}".format(name, cname))
+ worksheet.update_cell(i+2, 3, c_row[2])
+ worksheet.update_cell(i+2, 4, c_row[3])
+ worksheet.update_cell(i+2, 5, c_row[4])
+ continue
+ if address:
+ address_to_geocode = address
+ elif name:
+ address_to_geocode = name
+ elif cname:
+ address_to_geocode = cname
+
+ if not address_to_geocode:
+ continue
+
+ print(address_to_geocode)
+ location = geolocator.geocode(address_to_geocode)
+ if location:
+ print("{} found: {}".format(i+1, name))
+ print(location.raw)
+ worksheet.update_cell(i+2, 3, location.address)
+ worksheet.update_cell(i+2, 4, location.latitude)
+ worksheet.update_cell(i+2, 5, location.longitude)
+ if address and address != location.address:
+ worksheet.update_cell(i+2, 7, address)
+ valid_count += 1
+ row[2] = location.address
+ row[3] = location.latitude
+ row[4] = location.longitude
+ else:
+ print("{} not found: {}".format(i+1, address_to_geocode))
+ invalid_count += 1
+ time.sleep(2)
+
+ print("geocoded {} addresses, {} found, {} not found".format(len(rows), valid_count, invalid_count))
+
+if __name__ == '__main__':
+ s2_geocode_spreadsheet()
diff --git a/scraper/s2-geocode.py b/scraper/s2-geocode.py
new file mode 100644
index 00000000..eee11c4d
--- /dev/null
+++ b/scraper/s2-geocode.py
@@ -0,0 +1,81 @@
+import random
+import re
+import os
+import glob
+import time
+import simplejson as json
+from geopy import geocoders
+import click
+from urllib.parse import urlparse
+from dotenv import load_dotenv
+import operator
+from util import *
+load_dotenv()
+
+@click.command()
+@click.option('--fn', '-f', default='reports/doi_institutions_unknown.csv', help='List of institution names, to be geocoded :)')
+def s2_geocode(fn):
+ # geolocator = geocoders.Nominatim(user_agent="cool geocoding service")
+ geolocator = geocoders.GoogleV3(os.getenv('MAPS_API_KEY'))
+ worksheet = fetch_worksheet()
+
+ ## DISABLED!!
+ return
+ print(fn)
+
+ rows = read_csv(fn, keys=False)
+ # valid = read_csv('./reports/doi_institutions_geocoded.csv', keys=False, create=True)
+ # invalid = read_csv('./reports/doi_institutions_not_found.csv', keys=False, create=True)
+ # valid_names = [row[0] for row in valid]
+ # invalid_names = [row[0] for row in invalid]
+ # random.shuffle(rows)
+ for i, row in enumerate(rows):
+ name = row[2]
+ name = remove_department_name(name)
+ if not name:
+ continue
+ try:
+ location = geolocator.geocode(name)
+ except:
+ location = None
+ if location:
+ print("found: {}".format(name))
+ cname = name
+ for word in name.split(', '):
+ if "university" in word.lower():
+ cname = word
+ worksheet.append_row([
+ cname, name, location.address, location.latitude, location.longitude, 'edu'
+ ])
+ # valid.append([
+ # name,
+ # location.latitude,
+ # location.longitude,
+ # location.address,
+ # ])
+ # valid_names.append(name)
+ else:
+ print("not found: {}".format(name))
+ # invalid.append(row)
+ # invalid_names.append(row[0])
+ # if i and (i % 20) == 0:
+ # print("{}...".format(i))
+ # write_csv('./reports/doi_institutions_geocoded.csv', keys=None, rows=valid)
+ # write_csv('./reports/doi_institutions_not_found.csv', keys=None, rows=invalid)
+ time.sleep(2)
+
+def remove_department_name(name):
+ name_partz = name.split(', ')
+ valid_partz = []
+ for part in name_partz:
+ if 'school of' in part.lower():
+ continue
+ if 'department' in part.lower():
+ continue
+ if 'dept' in part.lower():
+ continue
+ valid_partz.append(part)
+ return ', '.join(valid_partz)
+
+if __name__ == '__main__':
+ s2_geocode()
diff --git a/scraper/s2-merge-csv.py b/scraper/s2-merge-csv.py
new file mode 100644
index 00000000..301f47ae
--- /dev/null
+++ b/scraper/s2-merge-csv.py
@@ -0,0 +1,28 @@
+import os
+import glob
+import time
+import simplejson as json
+import click
+import operator
+from util import *
+import random
+
+@click.command()
+@click.option('--path', '-d', default='report/institutions_geocoded', help='Path to CSVs')
+def s2_merge_csv(path):
+ print(path)
+ lookup = {}
+ for fn in glob.iglob('{}/*.csv'.format(path)):
+ rows = read_csv(fn, keys=False)
+ for row in rows:
+ key = row[0]
+ if key not in lookup:
+ lookup[key] = row
+ keys = sorted(lookup.keys())
+ deduped = []
+ for key in keys:
+ deduped.append(lookup[key])
+ write_csv('{}.csv'.format(path), keys=None, rows=deduped)
+
+if __name__ == '__main__':
+ s2_merge_csv()
diff --git a/s2-search.py b/scraper/s2-papers.py
index b9e8db2d..bf77a734 100644
--- a/s2-search.py
+++ b/scraper/s2-papers.py
@@ -23,24 +23,24 @@ totalPages
totalResults
'''
+s2 = SemanticScholarAPI()
+
@click.command()
-@click.option('--index', '-n', default=0, help='Index of CSV.')
-def fetch_entries(index):
+@click.option('--index', '-n', default=0, help='Index of CSV (query,)')
+@click.option('--depth', '-d', default=1, help='Depth to recurse (not implemented).')
+def fetch_papers(index, depth):
keys, lines = read_citation_list(index)
- s2 = SemanticScholarAPI()
for line in lines:
label = line[0]
title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[1])
entry_fn = './datasets/s2/entries/{}.json'.format(title)
if not os.path.exists(entry_fn):
- results = s2.search(title)
- write_json(dump_fn, results)
- if len(results['results']) == 0:
- print("No results for {}".format(title))
- else:
- print(title)
- write_json(entry_fn, results['results'][0])
- time.sleep(random.randint(10, 20))
+ print('not found: {}'.format(entry_fn))
+ continue
+ result = read_json(entry_fn)
+ paper_id = result['id']
+ paper = fetch_paper(paper_id)
+ # get all of the paper's citations
if __name__ == '__main__':
- fetch_entries()
+ fetch_papers()
diff --git a/scraper/s2-pdf-first-pages.py b/scraper/s2-pdf-first-pages.py
new file mode 100644
index 00000000..0a6b20bd
--- /dev/null
+++ b/scraper/s2-pdf-first-pages.py
@@ -0,0 +1,133 @@
+import re
+import os
+import gzip
+import glob
+import simplejson as json
+import click
+import math
+import string
+from util import *
+
+PDF_DIR = 'datasets/s2/pdf'
+
+@click.command()
+def report_first_pages():
+ rows = []
+ institution_names = []
+ institutions = []
+ no_institutions = []
+ for fn in glob.iglob('{}/**/*.txt'.format(PDF_DIR), recursive=True):
+ data = process_paper(fn)
+ rows.append(data['first_pages'])
+ if data['institutions']:
+ for institution in data['institutions']:
+ institutions.append(institution)
+ institution_names.append(institution[1])
+ if data['no_institutions']:
+ no_institutions.append(data['no_institutions'])
+ deduped_institutions = dedupe(institution_names)
+
+ write_report('reports/first_pages.html', title='First pages', keys=None, rows=rows)
+ write_report('reports/institutions.html', title='Institutions', keys=None, rows=sorted(institutions, key=lambda x: x[1]))
+ write_report('reports/institutions_missing.html', title='Institutions', keys=None, rows=no_institutions)
+ write_csv('reports/institution_names.csv', keys=None, rows=[(name,) for name in deduped_institutions])
+ print("{} deduped institutions".format(len(deduped_institutions)))
+
+def dedupe(a):
+ p = {}
+ for s in a:
+ p[s] = None
+ ss = sorted(p.keys())
+ return ss
+
+def process_paper(fn):
+ paper_id = fn.replace(PDF_DIR, '').split('/')[2]
+ paper = load_paper(paper_id)
+ if paper is None:
+ print("{} no paper found!".format(paper_id))
+ return None
+ with open(fn, 'r') as f:
+ lines = []
+ emails = []
+ institutions = []
+ authors = [ (a[0], a[1], a[1].lower(),) for a in paper.authors ]
+ journal = paper.journal.lower()
+ found_authors = []
+ for line in f.readlines():
+ l = line.lower()
+ if 'abstract' in l:
+ break
+ if len(line) < 3:
+ continue
+ if journal and journal in l:
+ continue
+ if '@' in line:
+ # print('email {}'.format(line))
+ emails.append(line)
+ continue
+ names = [s.strip() for s in re.split(',| and ', l)]
+ was_found = False
+ for name in names:
+ found = find_authors(authors, name)
+ if found:
+ was_found = True
+ # print("found {}".format(found[1]))
+ if found[0]:
+ found_authors.append(found)
+ if was_found:
+ # lines.append(NameLine(line))
+ continue
+
+ if 'university' in l or 'universiteit' in l or 'research center' in l or 'research lab' in l or 'college' in l or ', inc' in l or 'institute' in l:
+ inst = re.sub(r'^[\W\d]+', '', line)
+ inst = re.sub(r'[\W\d]+$', '', inst)
+ inst = re.sub(r'\s+', ' ', inst)
+ inst = re.sub(r'Dept.', 'Department ', inst)
+ if len(inst) < 160:
+ inst = inst.replace('&', 'and')
+ inst_parts = []
+ department = ''
+ for inst_part in inst.split(','):
+ inst_part = inst_part.strip()
+ inst_low = inst_part.lower()
+ if 'prof' in inst_low:
+ continue
+ if 'article ' in inst_low:
+ continue
+ if 'department' in inst_low:
+ department = inst_part
+ else:
+ inst_parts.append(inst_part)
+ inst = ', '.join(inst_parts)
+ if inst:
+ inst = ''.join([i if ord(i) < 128 else ' ' for i in inst]).strip()
+ institutions.append([ paper_id, inst, department ])
+ lines.append(BoldLine(inst))
+ continue
+ lines.append(line)
+ write_json('{}/{}'.format(paper_path(paper_id), 'institutions.json'), { 'institutions': institutions })
+ return {
+ 'first_pages': [
+ paper_id,
+ lines,
+ found_authors,
+ emails,
+ ],
+ 'institutions': None if not len(institutions) else institutions,
+ 'no_institutions': None if len(institutions) else [
+ paper_id,
+ lines,
+ ],
+ }
+
+def find_authors(authors, line):
+ for a in authors:
+ if a[2] in line:
+ return a
+ return None
+
+def paper_path(paper_id):
+ return '{}/{}/{}'.format(PDF_DIR, paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ report_first_pages()
diff --git a/scraper/s2-pdf-report.py b/scraper/s2-pdf-report.py
new file mode 100644
index 00000000..cdb340f5
--- /dev/null
+++ b/scraper/s2-pdf-report.py
@@ -0,0 +1,102 @@
+import re
+import os
+import gzip
+import glob
+import simplejson as json
+import click
+import math
+import string
+# import nltk
+from collections import Counter
+from util import *
+
+PDF_DIR = 'datasets/s2/pdf'
+punctuation = re.compile(r'[-.?!,":;()|0-9]')
+
+@click.command()
+def s2_pdf_report():
+ rows = []
+ empty_papers = []
+ no_separator_papers = []
+ geocoded_papers = []
+ unknown_papers = []
+ unknown_terms = Counter()
+ unknown_bigrams = Counter()
+ unknown_trigrams = Counter()
+ found_count = 0
+ total_count = 0
+ addresses = AddressBook()
+ for fn in glob.iglob('{}/**/*.txt'.format(PDF_DIR), recursive=True):
+ paper_id = fn.replace(PDF_DIR, '').split('/')[2]
+ paper = load_paper(paper_id)
+ total_count += 1
+ # print(paper_id)
+ headings, found_abstract = read_headings(fn, paper)
+ heading_string = '\n'.join(headings[0:20])
+ found_addresses = []
+ if not found_abstract:
+ if len(headings) == 0:
+ empty_papers.append(paper.record())
+ continue
+ if len(headings) > 20:
+ no_separator_papers.append(paper.record())
+ # continue
+ for heading in headings:
+ l = heading.lower().strip()
+ address = addresses.find(l)
+ if address:
+ found_addresses.append(address)
+ if not address:
+ for heading in headings:
+ l = heading.lower().strip()
+ l = re.sub('[^a-zA-Z]+', ' ', l)
+ l = re.sub('\s+', ' ', l)
+ terms = l.strip().split(' ')
+ last_term = None
+ penultimate_term = None
+ for term in terms:
+ if len(term) > 1 and term != 'cid':
+ if len(term) > 2:
+ unknown_terms[term] += 1
+ if last_term:
+ unknown_bigrams[last_term + ' ' + term] += 1
+ if penultimate_term:
+ unknown_trigrams[penultimate_term + ' ' + last_term + ' ' + term] += 1
+ penultimate_term = last_term
+ last_term = term
+
+ # MAYBE try checking the entire string against everything?
+ # if not len(found_addresses):
+ # l = heading_string.lower().strip()
+ # address = addresses.find(l)
+ # if address:
+ # found_addresses.append(address)
+
+ if len(found_addresses):
+ found_count += 1
+ for address in found_addresses:
+ geocoded_papers.append([paper.paper_id, paper.title] + address)
+ else:
+ unknown_papers.append([paper.paper_id, paper.title, heading_string])
+
+ write_report('reports/pdf_unknown_terms.html', title='PDF Report: Unknown Terms', keys=None, rows=unknown_terms.most_common(1000))
+ write_report('reports/pdf_unknown_bigrams.html', title='PDF Report: Unknown Bigrams', keys=None, rows=unknown_bigrams.most_common(1000))
+ write_report('reports/pdf_unknown_trigram.html', title='PDF Report: Unknown Trigrams', keys=None, rows=unknown_trigrams.most_common(1000))
+ write_csv('reports/stats/empty_papers.csv', keys=None, rows=empty_papers)
+ write_csv('reports/stats/no_separator_papers.csv', keys=None, rows=no_separator_papers)
+ write_csv('reports/stats/geocoded_papers.csv', keys=None, rows=geocoded_papers)
+ write_csv('reports/stats/unknown_papers.csv', keys=None, rows=unknown_papers)
+ print("{} {} ({}%)".format('empty', len(empty_papers), percent(len(empty_papers), total_count)))
+ print("{} {} ({}%)".format('no separator', len(no_separator_papers), percent(len(no_separator_papers), total_count)))
+ print("{} {} ({}%)".format('found', found_count, percent(found_count, total_count)))
+ print("{} {} ({}%)".format('unknown', len(unknown_papers), percent(len(unknown_papers), total_count)))
+ print("{} {} entities".format('geocoded', len(geocoded_papers)))
+
+def percent(a,b):
+ return round(100 * a / b)
+
+def paper_path(paper_id):
+ return '{}/{}/{}'.format(PDF_DIR, paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ s2_pdf_report()
diff --git a/scraper/s2-raw-papers.py b/scraper/s2-raw-papers.py
new file mode 100644
index 00000000..089055da
--- /dev/null
+++ b/scraper/s2-raw-papers.py
@@ -0,0 +1,44 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import simplejson as json
+import click
+from s2 import SemanticScholarAPI
+from util import *
+
+s2 = SemanticScholarAPI()
+
+@click.command()
+@click.option('--fn', '-i', default='missing.csv', help='Filename of CSV (id,)')
+def fetch_raw_papers(fn):
+ lines = read_csv(fn, keys=False)
+ for line in lines:
+ paper_id = line[0]
+ fetch_raw_paper(paper_id)
+
+def fetch_raw_paper(paper_id):
+ os.makedirs(make_raw_paper_path(paper_id), exist_ok=True)
+ paper_fn = make_raw_paper_fn(paper_id)
+ if os.path.exists(paper_fn):
+ return read_json(paper_fn)
+ print(paper_id)
+ paper = s2.raw_paper(paper_id)
+ if paper is None:
+ print("Got empty paper?? {}".format(paper_id))
+ # time.sleep(random.randint(5, 10))
+ return None
+ write_json(paper_fn, paper)
+ # time.sleep(random.randint(5, 10))
+ return paper
+
+def make_raw_paper_path(paper_id):
+ return './datasets/s2/raw_papers/{}/{}'.format(paper_id[0:2], paper_id)
+def make_raw_paper_fn(paper_id):
+ return './datasets/s2/raw_papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_raw_papers()
diff --git a/scraper/s2-search.py b/scraper/s2-search.py
new file mode 100644
index 00000000..e943053a
--- /dev/null
+++ b/scraper/s2-search.py
@@ -0,0 +1,78 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import simplejson as json
+import click
+from s2 import SemanticScholarAPI
+from util import *
+
+'''
+s2 search API format:
+results
+matchedAuthors
+matchedPresentations
+query
+querySuggestions
+results
+stats
+totalPages
+totalResults
+'''
+
+@click.command()
+@click.option('--index', '-n', default=0, help='Index of CSV (query,)')
+def fetch_entries(index):
+ keys, lines = read_citation_list(index)
+ citation_lookup = []
+ s2 = SemanticScholarAPI()
+ for line in lines:
+ key = line[0]
+ name = line[1]
+ title = line[2].strip()
+ clean_title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[2])
+ if len(clean_title) < 2:
+ continue
+ dump_fn = './datasets/s2/dumps/{}.json'.format(key)
+ entry_fn = './datasets/s2/entries/{}.json'.format(key)
+ result = None
+ if os.path.exists(entry_fn):
+ result = read_json(entry_fn)
+ else:
+ results = s2.search(clean_title)
+ write_json(dump_fn, results)
+ if len(results['results']) == 0:
+ print("- {}".format(title))
+ else:
+ print("+ {}".format(title))
+ result = results['results'][0]
+ write_json(entry_fn, result)
+ if result:
+ paper_id = result['id']
+ paper = fetch_paper(s2, paper_id)
+ citation_lookup.append([key, name, title, paper_id])
+ write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup)
+
+def fetch_paper(s2, paper_id):
+ os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
+ paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
+ if os.path.exists(paper_fn):
+ return read_json(paper_fn)
+ print(paper_id)
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Got none paper??")
+ # time.sleep(random.randint(1, 2))
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Paper not found")
+ return None
+ write_json(paper_fn, paper)
+ # time.sleep(random.randint(1, 2))
+ return paper
+
+if __name__ == '__main__':
+ fetch_entries()
diff --git a/s2.py b/scraper/s2.py
index 5ebe507b..b1b9742c 100644
--- a/s2.py
+++ b/scraper/s2.py
@@ -1,4 +1,6 @@
+import os
import requests
+from util import *
class AuthorStub(object):
@@ -115,17 +117,53 @@ class SemanticScholarAPI(object):
AUTHOR_ENDPOINT = "{}/{}".format(BASE_URL, "author")
PAPER_ENDPOINT = "{}/{}".format(BASE_URL, "paper")
SEARCH_ENDPOINT = "https://www.semanticscholar.org/api/1/search"
+ RAW_PAPER_ENDPOINT = "https://www.semanticscholar.org/api/1/paper"
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
+ }
+
+ @staticmethod
+ def fetch_file(url, fn, **kwargs):
+ try:
+ resp = requests.get(url, params=kwargs, headers=SemanticScholarAPI.headers, verify=False)
+ if resp.status_code != 200:
+ return None
+ except:
+ return None
+ size = 0
+ with open(fn, 'wb') as f:
+ for chunk in resp.iter_content(chunk_size=1024):
+ if chunk:
+ size += len(chunk)
+ f.write(chunk)
+ return size
+
+ @staticmethod
+ def fetch_doi(url, fn, **kwargs):
+ try:
+ resp = requests.get(url, params=kwargs, headers=SemanticScholarAPI.headers, verify=False)
+ if resp.status_code != 200:
+ return None, None
+ except:
+ return None, None
+ size = 0
+ with open(fn, 'wb') as f:
+ for chunk in resp.iter_content(chunk_size=1024):
+ if chunk:
+ size += len(chunk)
+ f.write(chunk)
+ return size, resp.url
@staticmethod
def paper(paper_id, **kwargs):
url = "{}/{}".format(SemanticScholarAPI.PAPER_ENDPOINT, paper_id)
- resp = requests.get(url, params=kwargs)
+ resp = requests.get(url, params=kwargs, headers=SemanticScholarAPI.headers)
return None if resp.status_code != 200 else resp.json() # Paper(**resp.json())
@staticmethod
def author(author_id, **kwargs):
url = "{}/{}".format(SemanticScholarAPI.AUTHOR_ENDPOINT, author_id)
- resp = requests.get(url, params=kwargs)
+ resp = requests.get(url, params=kwargs, headers=SemanticScholarAPI.headers)
return None if resp.status_code != 200 else resp.json() # Author(**resp.json())
@staticmethod
@@ -133,6 +171,12 @@ class SemanticScholarAPI(object):
return "http://pdfs.semanticscholar.org/{}/{}.pdf".format(paper_id[:4], paper_id[4:])
@staticmethod
+ def raw_paper(paper_id, **kwargs):
+ url = "{}/{}".format(SemanticScholarAPI.RAW_PAPER_ENDPOINT, paper_id)
+ resp = requests.get(url, params=kwargs, headers=SemanticScholarAPI.headers)
+ return None if resp.status_code != 200 else resp.json() # Paper(**resp.json())
+
+ @staticmethod
def search(q):
resp = requests.post(SemanticScholarAPI.SEARCH_ENDPOINT, json={
'authors': [],
@@ -146,6 +190,6 @@ class SemanticScholarAPI(object):
'sort': "relevance",
'venues': [],
'yearFilter': None,
- })
+ }, headers=SemanticScholarAPI.headers)
# print(resp.status_code)
return None if resp.status_code != 200 else resp.json()
diff --git a/scraper/samples/s2-orc-paper.json b/scraper/samples/s2-orc-paper.json
new file mode 100644
index 00000000..1fb29126
--- /dev/null
+++ b/scraper/samples/s2-orc-paper.json
@@ -0,0 +1,131 @@
+{
+ "entities": [
+ "Database",
+ "Experiment",
+ "Facial recognition system",
+ "Optimization problem",
+ "Program optimization",
+ "Simultaneous localization and mapping",
+ "Taxicab geometry",
+ "The Matrix",
+ "Video game localization"
+ ],
+ "journalVolume": "",
+ "journalPages": "3871-3879",
+ "pmid": "",
+ "year": 2015,
+ "outCitations": [
+ "bd5accc96a772f2bbb7c64af0aa0b600b0b8594b",
+ "0e3fcfe63b7b6620e3c47e9751fe3456e85cc52f",
+ "023f6fc69fe1f6498e35dbf85932ecb549d36ca4",
+ "54aafe33c4a32fb5875d04fd4a6e6da50920c263",
+ "9f87e3212ab1d89c5d46924c925d6bb1da02f92b",
+ "0f0fcf041559703998abf310e56f8a2f90ee6f21",
+ "53b919c994b3658cca8178e455681ca244a7afad",
+ "084bd02d171e36458f108f07265386f22b34a1ae",
+ "14ce7635ff18318e7094417d0f92acbec6669f1c",
+ "e42998bbebddeeb4b2bedf5da23fa5c4efc976fa",
+ "0cb48f543c4bf329a16c0408c4d2c198679a6057",
+ "0de91641f37b0a81a892e4c914b46d05d33fd36e",
+ "3f204a413d9c8c16f146c306c8d96b91839fed0c",
+ "3393459600368be2c4c9878a3f65a57dcc0c2cfa",
+ "1824b1ccace464ba275ccc86619feaa89018c0ad",
+ "70eeacf9f86ba08fceb3dd703cf015016dac1930",
+ "5040f7f261872a30eec88788f98326395a44db03",
+ "95f12d27c3b4914e0668a268360948bce92f7db3",
+ "2ee56d6c29072abd576ef16bcb26360c42caaf5f",
+ "370b5757a5379b15e30d619e4d3fb9e8e13f3256",
+ "0296fc4d042ca8657a7d9dd02df7eb7c0a0017ad",
+ "129388116bc3229546a84b9bc3d11fdac8b93201",
+ "63f9f3f0e1daede934d6dde1a84fb7994f8929f0",
+ "03f98c175b4230960ac347b1100fbfc10c100d0c",
+ "32d6ec2810b52d1df128be51464bc43b53702232",
+ "1a1a60fd4dc88a14c016b95789385801c6b80574",
+ "800683c891b9c5934246ce2931d1d28e0a364fbf",
+ "0e986f51fe45b00633de9fd0c94d082d2be51406",
+ "0dccc881cb9b474186a01fd60eb3a3e061fa6546",
+ "f731b6745d829241941307c3ebf163e90e200318",
+ "07119bc66e256f88b7436e62a4ac3384365e4e9b",
+ "0a5bf19c1b297713a3267bc670b252f9e51c2b78",
+ "013909077ad843eb6df7a3e8e290cfd5575999d2",
+ "177bc509dd0c7b8d388bb47403f28d6228c14b5c",
+ "b2d9877443ec7da2490027ccc932468f05c7bf85",
+ "b960984881665f692764069015764d25974c8d1b",
+ "4998462014c907c519ae801af39cf58a4c538bc9",
+ "5d1c4e93e32ee686234c5aae7f38025523993c8c",
+ "64d5772f44efe32eb24c9968a3085bc0786bfca7",
+ "1c08824da1383051fba69384a3edf135ba58e7fd",
+ "b5a3cc76ceee9489e66c5929c66aec1cf5210241",
+ "4068574b8678a117d9a434360e9c12fe6232dae0",
+ "830e5b1043227fe189b3f93619ef4c58868758a7",
+ "044d9a8c61383312cdafbcc44b9d00d650b21c70",
+ "3957b51b44f8727fe008162ea8a142a8c7917dea",
+ "55b4b1168c734eeb42882082bd131206dbfedd5b",
+ "140438a77a771a8fb656b39a78ff488066eb6b50",
+ "31a38fd2d9d4f34d2b54318021209fe5565b8f7f"
+ ],
+ "s2Url": "https://semanticscholar.org/paper/788a7b59ea72e23ef4f86dc9abb4450efefeca41",
+ "s2PdfUrl": "",
+ "id": "788a7b59ea72e23ef4f86dc9abb4450efefeca41",
+ "authors": [
+ {
+ "name": "Christos Sagonas",
+ "ids": [
+ "3320415"
+ ]
+ },
+ {
+ "name": "Yannis Panagakis",
+ "ids": [
+ "1780393"
+ ]
+ },
+ {
+ "name": "Stefanos Zafeiriou",
+ "ids": [
+ "1776444"
+ ]
+ },
+ {
+ "name": "Maja Pantic",
+ "ids": [
+ "1694605"
+ ]
+ }
+ ],
+ "journalName": "2015 IEEE International Conference on Computer Vision (ICCV)",
+ "paperAbstract": "Recently, it has been shown that excellent resultscan be achieved in both facial landmark localization and pose-invariant face recognition. These breakthroughs are attributed to the efforts of the community to manually annotate facial images in many different poses and to collect 3D facial data. In this paper, we propose a novel method for joint frontal view reconstruction and landmark localization using a small set of frontal images only. By observing that the frontal facial image is the one having the minimum rank of all different poses, an appropriate model which is able to jointly recover the frontalized version of the face as well as the facial landmarks is devised. To this end, a suitable optimization problem, involving the minimization of the nuclear norm and the matrix l1 norm is solved. The proposed method is assessed in frontal face reconstruction, face landmark localization, pose-invariant face recognition, and face verification in unconstrained conditions. The relevant experiments have been conducted on 8 databases. The experimental results demonstrate the effectiveness of the proposed method in comparison to the state-of-the-art methods for the target problems.",
+ "inCitations": [
+ "0e2365254f7833a1a902a6439f42ece6e5326332",
+ "368d59cf1733af511ed8abbcbeb4fb47afd4da1c",
+ "0db8e6eb861ed9a70305c1839eaef34f2c85bbaf",
+ "fd46586ff7d1cdedc184051268bb07a6630bc72e",
+ "239401dc0dc4c64aa741a75f7324768ef7557889",
+ "ef21738c40f78fd5834cf5910fc58f45f88cf698",
+ "566c104c9a494ae385ab253e3c69864f69d53b52",
+ "5cf7ac83f2b63f2c090757bcf08df4bd2dd63b81",
+ "c62c910264658709e9bf0e769e011e7944c45c90",
+ "b730f01e47151ccfaf568285acf21374efc53f88",
+ "0822cf1f041b2e572a038667474e79acab264ebf",
+ "566a2ede36a6493010ea42a7df49916739e00c9d",
+ "b1a3b19700b8738b4510eecf78a35ff38406df22",
+ "6e9a8a34ab5b7cdc12ea52d94e3462225af2c32c",
+ "a06b6d30e2b31dc600f622ab15afe5e2929581a7",
+ "52dfa36d756a46cf11e6537463add3bdd9bb4011"
+ ],
+ "pdfUrls": [
+ "http://eprints.eemcs.utwente.nl/26840/01/Pantic_Robust_Statistical_Face_Frontalization.pdf",
+ "http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.441",
+ "http://openaccess.thecvf.com/content_iccv_2015/papers/Sagonas_Robust_Statistical_Face_ICCV_2015_paper.pdf",
+ "http://ibug.doc.ic.ac.uk/media/uploads/documents/robust_frontalization.pdf",
+ "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Sagonas_Robust_Statistical_Face_ICCV_2015_paper.pdf",
+ "https://ibug.doc.ic.ac.uk/media/uploads/documents/robust_frontalization.pdf"
+ ],
+ "title": "Robust Statistical Face Frontalization",
+ "doi": "10.1109/ICCV.2015.441",
+ "sources": [
+ "DBLP"
+ ],
+ "doiUrl": "https://doi.org/10.1109/ICCV.2015.441",
+ "venue": "2015 IEEE International Conference on Computer Vision (ICCV)"
+} \ No newline at end of file
diff --git a/scraper/samples/s2-paper-detail.json b/scraper/samples/s2-paper-detail.json
new file mode 100644
index 00000000..5273fd54
--- /dev/null
+++ b/scraper/samples/s2-paper-detail.json
@@ -0,0 +1,3114 @@
+{
+ "responseType": "PAPER_DETAIL",
+ "paper": {
+ "id": "e4754afaa15b1b53e70743880484b8d0736990ff",
+ "title": {
+ "text": "300 Faces In-The-Wild Challenge: database and results",
+ "fragments": []
+ },
+ "slug": "300-Faces-In-The-Wild-Challenge:-database-and-Sagonas-Antonakos",
+ "paperAbstract": {
+ "text": "Computer Vision has recently witnessed great research advance towards automatic facial points detection. Numerous methodologies have been proposed during the last few years that achieve accurate and efficient performance. However, fair comparison between these methodologies is infeasible mainly due to two issues. (a) Most existing databases, captured under both constrained and unconstrained (in-the-wild) conditions have been annotated using different mark-ups and, in most cases, the accuracy of the annotations is low. (b) Most published works report experimental results using different training/testing sets, different error metrics and, of course, landmark points with semantically different locations. In this paper, we aim to overcome the aforementioned problems by (a) proposing a semi-automatic annotation technique that was employed to re-annotate most existing facial databases under a unified protocol, and (b) presenting the 300 Faces In-The-Wild Challenge (300-W), the first facial landmark localization challenge that was organized twice, in 2013 and 2015. To the best of our knowledge, this is the first effort towards a unified annotation scheme of massive databases and a fair experimental comparison of existing facial landmark localization systems. The images and annotations of the new testing database that was used in the 300-W challenge are available from http://ibug.doc.ic.ac.uk/resources/facial-point-annotations/.",
+ "fragments": []
+ },
+ "authors": [
+ [
+ {
+ "name": "Christos Sagonas",
+ "ids": [
+ "3320415"
+ ],
+ "slug": "Christos-Sagonas"
+ },
+ {
+ "text": "Christos Sagonas",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Epameinondas Antonakos",
+ "ids": [
+ "2788012"
+ ],
+ "slug": "Epameinondas-Antonakos"
+ },
+ {
+ "text": "Epameinondas Antonakos",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Georgios Tzimiropoulos",
+ "ids": [
+ "2610880"
+ ],
+ "slug": "Georgios-Tzimiropoulos"
+ },
+ {
+ "text": "Georgios Tzimiropoulos",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Stefanos P. Zafeiriou",
+ "ids": [
+ "1776444"
+ ],
+ "slug": "Stefanos-P.-Zafeiriou"
+ },
+ {
+ "text": "Stefanos P. Zafeiriou",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Maja Pantic",
+ "ids": [
+ "1694605"
+ ],
+ "slug": "Maja-Pantic"
+ },
+ {
+ "text": "Maja Pantic",
+ "fragments": []
+ }
+ ]
+ ],
+ "structuredAuthors": [
+ {
+ "firstName": "Christos",
+ "middleNames": [],
+ "lastName": "Sagonas"
+ },
+ {
+ "firstName": "Epameinondas",
+ "middleNames": [],
+ "lastName": "Antonakos"
+ },
+ {
+ "firstName": "Georgios",
+ "middleNames": [],
+ "lastName": "Tzimiropoulos"
+ },
+ {
+ "firstName": "Stefanos",
+ "middleNames": [
+ "P."
+ ],
+ "lastName": "Zafeiriou"
+ },
+ {
+ "firstName": "Maja",
+ "middleNames": [],
+ "lastName": "Pantic"
+ }
+ ],
+ "year": {
+ "text": "2016",
+ "fragments": []
+ },
+ "venue": {
+ "text": "Image Vision Comput.",
+ "fragments": []
+ },
+ "citationContexts": [],
+ "citationStats": {
+ "citedByBuckets": [
+ {
+ "startKey": 2015,
+ "endKey": 2015,
+ "count": 2,
+ "estimate": {
+ "min": 2.2728870038174662,
+ "value": 2.7194889455650393,
+ "max": 3.3020132174096997,
+ "confidence": 0.9
+ }
+ },
+ {
+ "startKey": 2016,
+ "endKey": 2016,
+ "count": 17,
+ "estimate": {
+ "min": 19.319539532448463,
+ "value": 23.115656037302834,
+ "max": 28.067112347982448,
+ "confidence": 0.9
+ }
+ },
+ {
+ "startKey": 2017,
+ "endKey": 2017,
+ "count": 45,
+ "estimate": {
+ "min": 51.139957585892994,
+ "value": 61.18850127521338,
+ "max": 74.29529739171824,
+ "confidence": 0.9
+ }
+ },
+ {
+ "startKey": 2018,
+ "endKey": 2018,
+ "count": 32,
+ "estimate": {
+ "min": 43.63943047329535,
+ "value": 52.21418775484875,
+ "max": 63.39865377426623,
+ "confidence": 0.9
+ }
+ }
+ ],
+ "keyCitedByBuckets": [
+ {
+ "startKey": 2016,
+ "endKey": 2016,
+ "count": 2
+ },
+ {
+ "startKey": 2017,
+ "endKey": 2017,
+ "count": 6
+ },
+ {
+ "startKey": 2018,
+ "endKey": 2018,
+ "count": 2
+ }
+ ],
+ "numCitations": 103,
+ "estNumCitations": {
+ "min": 117.05368069659951,
+ "value": 140.0536806965995,
+ "max": 170.0536806965995,
+ "confidence": 0.9
+ },
+ "numReferences": 49,
+ "numKeyCitations": 11,
+ "numKeyReferences": 19,
+ "numViewableReferences": 49,
+ "keyCitationRate": 0.10679611650485436,
+ "estCitationVelocity": {
+ "estimate": {
+ "min": 42.65391605341533,
+ "value": 47.439973828190126,
+ "max": 53.68265788224421,
+ "confidence": 0.9
+ },
+ "estCitationsByRange": [
+ {
+ "value": {
+ "min": 19.319539532448463,
+ "value": 23.115656037302834,
+ "max": 28.067112347982448,
+ "confidence": 0.9
+ },
+ "start": [
+ 2016,
+ 1,
+ 1
+ ],
+ "end": [
+ 2016,
+ 12,
+ 31
+ ]
+ },
+ {
+ "value": {
+ "min": 51.139957585892994,
+ "value": 61.18850127521338,
+ "max": 74.29529739171824,
+ "confidence": 0.9
+ },
+ "start": [
+ 2017,
+ 1,
+ 1
+ ],
+ "end": [
+ 2017,
+ 12,
+ 31
+ ]
+ },
+ {
+ "value": {
+ "min": 48.488256081439275,
+ "value": 58.01576417205417,
+ "max": 70.44294863807359,
+ "confidence": 0.9
+ },
+ "start": [
+ 2018,
+ 1,
+ 1
+ ],
+ "end": [
+ 2018,
+ 12,
+ 31
+ ]
+ }
+ ]
+ },
+ "estCitationAcceleration": {
+ "estimate": {
+ "min": -0.026615969581749135,
+ "value": -0.026615969581749065,
+ "max": -0.026615969581749045,
+ "confidence": 0.9
+ },
+ "estCitationsByRange": [
+ {
+ "value": {
+ "min": 51.139957585892994,
+ "value": 61.18850127521338,
+ "max": 74.29529739171824,
+ "confidence": 0.9
+ },
+ "start": [
+ 2017,
+ 1,
+ 1
+ ],
+ "end": [
+ 2017,
+ 12,
+ 31
+ ]
+ },
+ {
+ "value": {
+ "min": 48.488256081439275,
+ "value": 58.01576417205417,
+ "max": 70.44294863807359,
+ "confidence": 0.9
+ },
+ "start": [
+ 2018,
+ 1,
+ 1
+ ],
+ "end": [
+ 2018,
+ 12,
+ 31
+ ]
+ }
+ ]
+ }
+ },
+ "sources": [
+ "DBLP",
+ "Grobid",
+ "Anansi",
+ "Crawler",
+ "ScienceParse",
+ "SPv2"
+ ],
+ "journal": {
+ "name": "Image Vision Comput.",
+ "volume": "47",
+ "pages": "3-18"
+ },
+ "socialLinks": [],
+ "presentationUrls": [],
+ "doiInfo": {
+ "doi": "10.1016/j.imavis.2016.01.002",
+ "doiUrl": "http://doi.org/10.1016/j.imavis.2016.01.002"
+ },
+ "links": [
+ {
+ "url": "http://doi.org/10.1016/j.imavis.2016.01.002",
+ "linkType": "doi"
+ }
+ ],
+ "primaryPaperLink": {
+ "url": "http://doi.org/10.1016/j.imavis.2016.01.002",
+ "linkType": "doi"
+ },
+ "alternatePaperLinks": [
+ {
+ "url": "http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf",
+ "linkType": "crawler"
+ },
+ {
+ "url": "https://doi.org/10.1016/j.imavis.2016.01.002",
+ "linkType": "dblp"
+ },
+ {
+ "url": "https://spiral.imperial.ac.uk:8443/bitstream/10044/1/32322/2/300w.pdf",
+ "linkType": "anansi"
+ }
+ ],
+ "entities": [
+ {
+ "id": "5332",
+ "name": "Computer vision",
+ "slug": "Computer-vision"
+ },
+ {
+ "id": "55238",
+ "name": "Emoticon",
+ "slug": "Emoticon"
+ },
+ {
+ "id": "539730",
+ "name": "Landmark point",
+ "slug": "Landmark-point"
+ },
+ {
+ "id": "76540",
+ "name": "Semiconductor industry",
+ "slug": "Semiconductor-industry"
+ },
+ {
+ "id": "468",
+ "name": "Malignant Fibrous Histiocytoma",
+ "slug": "Malignant-Fibrous-Histiocytoma"
+ }
+ ],
+ "entityRelations": [],
+ "blogs": [],
+ "videos": [],
+ "githubReferences": [],
+ "faqs": [],
+ "scorecardStats": [
+ {
+ "typeKey": "highly_influential",
+ "score": 50,
+ "keyCitationCount": 11
+ },
+ {
+ "typeKey": "cited_by",
+ "citationRankPercent": 99,
+ "citationCount": 140,
+ "score": 10
+ }
+ ],
+ "hasPdf": false
+ },
+ "citingPapers": {
+ "citationType": "citingPapers",
+ "citations": [
+ {
+ "id": "6742c0a26315d7354ab6b1fa62a5fffaea06da14",
+ "title": {
+ "text": "What does 2 D geometric information really tell us about 3 D face shape ?",
+ "fragments": []
+ },
+ "slug": "What-does-2-D-geometric-information-really-tell-us-Bas-Smith",
+ "venue": {
+ "text": "",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Anil Bas",
+ "ids": [
+ "39180407"
+ ],
+ "slug": "Anil-Bas"
+ },
+ {
+ "text": "Anil Bas",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "William A. P. Smith",
+ "ids": [
+ "1687021"
+ ],
+ "slug": "William-A.-P.-Smith"
+ },
+ {
+ "text": "William A. P. Smith",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "For this reason, there has been sustained interest in building feature detectors capable of accurately labelling face landmarks in uncontrolled images (Sagonas et al. 2016).",
+ "fragments": [
+ {
+ "start": 152,
+ "end": 171
+ }
+ ]
+ },
+ {
+ "text": "Despite the small sample size, we find a pair of faces whose mean landmark error is 2.48% (i.e. they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).",
+ "fragments": [
+ {
+ "start": 158,
+ "end": 177
+ }
+ ]
+ },
+ {
+ "text": "Landmark detection on highly uncontrolled face images is now a mature research field with benchmarks (Sagonas et al. 2016) providing an indication of likely accuracy.",
+ "fragments": [
+ {
+ "start": 102,
+ "end": 121
+ }
+ ]
+ },
+ {
+ "text": "they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).",
+ "fragments": [
+ {
+ "start": 61,
+ "end": 82
+ }
+ ]
+ },
+ {
+ "text": "Similarly, the 300 faces in the wild challenge (Sagonas et al. 2016) found that even the best methods did not obtain better than 5% accuracy for more than 50% of the landmarks.",
+ "fragments": [
+ {
+ "start": 48,
+ "end": 67
+ }
+ ]
+ },
+ {
+ "text": "For example, state-of-the-art automatic face landmarking provides a mean landmark error under 4.5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).",
+ "fragments": [
+ {
+ "start": 219,
+ "end": 238
+ }
+ ]
+ },
+ {
+ "text": "5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).",
+ "fragments": [
+ {
+ "start": 122,
+ "end": 143
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "014e3d0fa5248e6f4634dc237e2398160294edce",
+ "title": {
+ "text": "What does 2D geometric information really tell us about 3D face shape?",
+ "fragments": []
+ },
+ "slug": "What-does-2D-geometric-information-really-tell-us-Bas-Smith",
+ "venue": {
+ "text": "ArXiv",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Anil Bas",
+ "ids": [
+ "39180407"
+ ],
+ "slug": "Anil-Bas"
+ },
+ {
+ "text": "Anil Bas",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "William A. P. Smith",
+ "ids": [
+ "1687021"
+ ],
+ "slug": "William-A.-P.-Smith"
+ },
+ {
+ "text": "William A. P. Smith",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "For this reason, there has been sustained interest in building feature detectors capable of accurately labelling face landmarks in uncontrolled images (Sagonas et al. 2016).",
+ "fragments": [
+ {
+ "start": 152,
+ "end": 171
+ }
+ ]
+ },
+ {
+ "text": "Despite the small sample size, we find a pair of faces whose mean landmark error is 2.48% (i.e. they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).",
+ "fragments": [
+ {
+ "start": 158,
+ "end": 177
+ }
+ ]
+ },
+ {
+ "text": "Landmark detection on highly uncontrolled face images is now a mature research field with benchmarks (Sagonas et al. 2016) providing an indication of likely accuracy.",
+ "fragments": [
+ {
+ "start": 102,
+ "end": 121
+ }
+ ]
+ },
+ {
+ "text": "they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).",
+ "fragments": [
+ {
+ "start": 61,
+ "end": 82
+ }
+ ]
+ },
+ {
+ "text": "Similarly, the 300 faces in the wild challenge (Sagonas et al. 2016) found that even the best methods did not obtain better than 5% accuracy for more than 50% of the landmarks.",
+ "fragments": [
+ {
+ "start": 48,
+ "end": 67
+ }
+ ]
+ },
+ {
+ "text": "For example, state-of-the-art automatic face landmarking provides a mean landmark error under 4.5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).",
+ "fragments": [
+ {
+ "start": 219,
+ "end": 238
+ }
+ ]
+ },
+ {
+ "text": "5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).",
+ "fragments": [
+ {
+ "start": 122,
+ "end": 143
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196",
+ "title": {
+ "text": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network",
+ "fragments": []
+ },
+ "slug": "Facial-Landmark-Point-Localization-using-Deep-Mahpod-Das",
+ "venue": {
+ "text": "ArXiv",
+ "fragments": []
+ },
+ "year": 2018,
+ "authors": [
+ [
+ {
+ "name": "Shahar Mahpod",
+ "ids": [
+ "2748312"
+ ],
+ "slug": "Shahar-Mahpod"
+ },
+ {
+ "text": "Shahar Mahpod",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Rig Das",
+ "ids": [
+ "3001038"
+ ],
+ "slug": "Rig-Das"
+ },
+ {
+ "text": "Rig Das",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Emanuele Maiorana",
+ "ids": [
+ "1767715"
+ ],
+ "slug": "Emanuele-Maiorana"
+ },
+ {
+ "text": "Emanuele Maiorana",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Yosi Keller",
+ "ids": [
+ "1926432"
+ ],
+ "slug": "Yosi-Keller"
+ },
+ {
+ "text": "Yosi Keller",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Patrizio Campisi",
+ "ids": [
+ "1682433"
+ ],
+ "slug": "Patrizio-Campisi"
+ },
+ {
+ "text": "Patrizio Campisi",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "In order to evaluate the performance of our proposed CFDRNN framework we perform some exhaustive experiments on the data released for the 300-W competition [23].",
+ "fragments": [
+ {
+ "start": 156,
+ "end": 160
+ }
+ ]
+ },
+ {
+ "text": "300-W, MENPO 300-W private test set [23] 3.",
+ "fragments": [
+ {
+ "start": 36,
+ "end": 40
+ }
+ ]
+ },
+ {
+ "text": "The authors have also tested their proposed framework’s performance against the 300-W private test set [23], which consists of 300 indoor and 300 outdoor images.",
+ "fragments": [
+ {
+ "start": 103,
+ "end": 107
+ }
+ ]
+ },
+ {
+ "text": "This dataset [23] contains 135 complicated facial images with different facial expression, poses, illumination, and multiple faces in a single image.",
+ "fragments": [
+ {
+ "start": 13,
+ "end": 17
+ }
+ ]
+ },
+ {
+ "text": "bug [21], [22], 300-W [23], and Menpo [19], [24] leaves a significant scope for further improvement.",
+ "fragments": [
+ {
+ "start": 22,
+ "end": 26
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "7789a5d87884f8bafec8a82085292e87d4e2866f",
+ "title": {
+ "text": "A Unified Tensor-based Active Appearance Face Model",
+ "fragments": []
+ },
+ "slug": "A-Unified-Tensor-based-Active-Appearance-Face-Model-Feng-Kittler",
+ "venue": {
+ "text": "ArXiv",
+ "fragments": []
+ },
+ "year": 2016,
+ "authors": [
+ [
+ {
+ "name": "Zhen-Hua Feng",
+ "ids": [
+ "2976854"
+ ],
+ "slug": "Zhen-Hua-Feng"
+ },
+ {
+ "text": "Zhen-Hua Feng",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Josef Kittler",
+ "ids": [
+ "1748684"
+ ],
+ "slug": "Josef-Kittler"
+ },
+ {
+ "text": "Josef Kittler",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "William J. Christmas",
+ "ids": [
+ "1942955"
+ ],
+ "slug": "William-J.-Christmas"
+ },
+ {
+ "text": "William J. Christmas",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Xiaojun Wu",
+ "ids": [
+ "50171811"
+ ],
+ "slug": "Xiaojun-Wu"
+ },
+ {
+ "text": "Xiaojun Wu",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "W face dataset (Sagonas et al., 2016).",
+ "fragments": [
+ {
+ "start": 16,
+ "end": 36
+ }
+ ]
+ },
+ {
+ "text": "The 300-W dataset has been widely used for benchmarking a facial landmark detection algorithm (Sagonas et al., 2016).",
+ "fragments": [
+ {
+ "start": 94,
+ "end": 116
+ }
+ ]
+ },
+ {
+ "text": "Then we demonstrate the capacity of the proposed UT-AAM to synthesise a large number of virtual faces and examine how these synthesised faces can improve the training of a facial landmark detector, using the 300-W face dataset (Sagonas et al., 2016).",
+ "fragments": [
+ {
+ "start": 227,
+ "end": 249
+ }
+ ]
+ },
+ {
+ "text": "W dataset has been widely used for benchmarking a facial landmark detection algorithm (Sagonas et al., 2016).",
+ "fragments": [
+ {
+ "start": 87,
+ "end": 107
+ }
+ ]
+ },
+ {
+ "text": "7 A comparison of the cumulative error distribution curves of SDM and CCR, as well as a set of state-of-the-art methods from Baltrusaitis, Hasan, Jaiswal, Miborrow, Yan and Zhou (Sagonas et al., 2016), on the 300-W face dataset: (a) results on the 300 outdoor face images; (b) results on the 300 indoor faces.",
+ "fragments": [
+ {
+ "start": 178,
+ "end": 200
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "0f21a39fa4c0a19c4a5b4733579e393cb1d04f71",
+ "title": {
+ "text": "Evaluation of optimization components of a 3D to 2D landmark fitting algorithm for head pose estimation",
+ "fragments": []
+ },
+ "slug": "Evaluation-of-optimization-components-of-a-3D-to-2D-Haan",
+ "venue": {
+ "text": "",
+ "fragments": []
+ },
+ "year": 2018,
+ "authors": [
+ [
+ {
+ "name": "Tim de Haan",
+ "ids": [],
+ "slug": "Tim-de-Haan"
+ },
+ {
+ "text": "Tim de Haan",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "Landmark Localization: Landmark localization has been extensively researched [1], [17], [18], which has led to the development of Dlib [7], a machine learning toolkit that has its usage in many complex applications, such as image processing.",
+ "fragments": [
+ {
+ "start": 77,
+ "end": 80
+ }
+ ]
+ },
+ {
+ "text": "This thesis uses the new 300-W dataset [1, 17, 18], this set consists of 600 photographs that can be used to project the 3DMM onto the landmarks detected by dlib.",
+ "fragments": [
+ {
+ "start": 39,
+ "end": 50
+ }
+ ]
+ },
+ {
+ "text": "histogram of oriented gradients combined with a linear classifier [7], trained on the iBUG 300-W dataset from [1], the landmark detection is an implementation of [19].",
+ "fragments": [
+ {
+ "start": 110,
+ "end": 113
+ }
+ ]
+ },
+ {
+ "text": "Table 8 shows how Dlib compares with other landmarks detection algorithms, which are the winners of the two contests that were organized by [1].",
+ "fragments": [
+ {
+ "start": 140,
+ "end": 143
+ }
+ ]
+ },
+ {
+ "text": "Additionally, the Oracle curve is added, which is the minimum error that can be achieved [1].",
+ "fragments": [
+ {
+ "start": 89,
+ "end": 92
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "1885acea0d24e7b953485f78ec57b2f04e946eaf",
+ "title": {
+ "text": "Combining Local and Global Features for 3D Face Tracking",
+ "fragments": []
+ },
+ "slug": "Combining-Local-and-Global-Features-for-3D-Face-Xiong-Li",
+ "venue": {
+ "text": "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Pengfei Xiong",
+ "ids": [
+ "40448951"
+ ],
+ "slug": "Pengfei-Xiong"
+ },
+ {
+ "text": "Pengfei Xiong",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Guoqing Li",
+ "ids": [
+ "1775836"
+ ],
+ "slug": "Guoqing-Li"
+ },
+ {
+ "text": "Guoqing Li",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Yuhang Sun",
+ "ids": [
+ "48186289"
+ ],
+ "slug": "Yuhang-Sun"
+ },
+ {
+ "text": "Yuhang Sun",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "In the past few decades, extensive studies [39, 40, 31, 11, 26, 41, 33, 43] have been proposed and singnificant improvements have been achieved, especially since a comprehensice benchmark [32] was made public, and deep convolution neural networks [33, 43] were applied in face shape regression.",
+ "fragments": [
+ {
+ "start": 188,
+ "end": 192
+ }
+ ]
+ },
+ {
+ "text": "Images from AFLW [28], FDDB[22], 300W, 300W-Test [32] are collected and fitted with 3D facial morphable model [4] to generate 84 3D point annotations.",
+ "fragments": [
+ {
+ "start": 49,
+ "end": 53
+ }
+ ]
+ },
+ {
+ "text": "300W-LP: 300W-LP(300W across Large Poses) [45] contains 61225 samples (1786 from IBUG, 5207 from AFW, and 16556 from LFPW) sysnthetical generated from 300-W [32].",
+ "fragments": [
+ {
+ "start": 157,
+ "end": 161
+ }
+ ]
+ },
+ {
+ "text": "[45] sysnthetically generated a series of datasets based on 300W [32], which gradually become the benchmark of 3D shape regression methods.",
+ "fragments": [
+ {
+ "start": 65,
+ "end": 69
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c",
+ "title": {
+ "text": "FacePoseNet: Making a Case for Landmark-Free Face Alignment",
+ "fragments": []
+ },
+ "slug": "FacePoseNet:-Making-a-Case-for-Landmark-Free-Face-Chang-Tran",
+ "venue": {
+ "text": "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Feng-Ju Chang",
+ "ids": [
+ "1752756"
+ ],
+ "slug": "Feng-Ju-Chang"
+ },
+ {
+ "text": "Feng-Ju Chang",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Anh Tuan Tran",
+ "ids": [
+ "46634688"
+ ],
+ "slug": "Anh-Tuan-Tran"
+ },
+ {
+ "text": "Anh Tuan Tran",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Tal Hassner",
+ "ids": [
+ "1756099"
+ ],
+ "slug": "Tal-Hassner"
+ },
+ {
+ "text": "Tal Hassner",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Iacopo Masi",
+ "ids": [
+ "11269472"
+ ],
+ "slug": "Iacopo-Masi"
+ },
+ {
+ "text": "Iacopo Masi",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Ramakant Nevatia",
+ "ids": [
+ "1694832"
+ ],
+ "slug": "Ramakant-Nevatia"
+ },
+ {
+ "text": "Ramakant Nevatia",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Gérard G. Medioni",
+ "ids": [
+ "3463966"
+ ],
+ "slug": "Gérard-G.-Medioni"
+ },
+ {
+ "text": "Gérard G. Medioni",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "Facial landmark detection is big business, as reflected by the numerous citation to relevant papers, the many facial landmark detection benchmarks [5, 23, 26, 40, 53], and popular international events dedicated to this problem.",
+ "fragments": [
+ {
+ "start": 147,
+ "end": 166
+ }
+ ]
+ },
+ {
+ "text": "We evaluate performance on the 300W data set [40], the most challenging benchmark of its kind [45], using 68 landmarks.",
+ "fragments": [
+ {
+ "start": 45,
+ "end": 49
+ }
+ ]
+ },
+ {
+ "text": "Landmarks detected in 300W [40] images by projecting an unmodified 3D face shape,",
+ "fragments": [
+ {
+ "start": 27,
+ "end": 31
+ }
+ ]
+ },
+ {
+ "text": "Finally, (3), we test our FPN extensively and report that better landmark detection accuracy on the widely used 300W benchmark [40] does not imply better alignment and recognition on the highly challenging IJB-A [22] and IJB-B benchmarks [44].",
+ "fragments": [
+ {
+ "start": 127,
+ "end": 131
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "cf736f596bf881ca97ec4b29776baaa493b9d50e",
+ "title": {
+ "text": "Low Dimensional Deep Features for facial landmark alignment",
+ "fragments": []
+ },
+ "slug": "Low-Dimensional-Deep-Features-for-facial-landmark-Jalan-Mynepalli",
+ "venue": {
+ "text": "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Ankit Jalan",
+ "ids": [
+ "18090725"
+ ],
+ "slug": "Ankit-Jalan"
+ },
+ {
+ "text": "Ankit Jalan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Siva Chaitanya Mynepalli",
+ "ids": [
+ "18091255"
+ ],
+ "slug": "Siva-Chaitanya-Mynepalli"
+ },
+ {
+ "text": "Siva Chaitanya Mynepalli",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Viswanath Veera",
+ "ids": [
+ "18178676"
+ ],
+ "slug": "Viswanath-Veera"
+ },
+ {
+ "text": "Viswanath Veera",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Shankar M. Venkatesan",
+ "ids": [
+ "3210146"
+ ],
+ "slug": "Shankar-M.-Venkatesan"
+ },
+ {
+ "text": "Shankar M. Venkatesan",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "300-W dataset [28, 31]: This dataset is created from",
+ "fragments": [
+ {
+ "start": 14,
+ "end": 22
+ }
+ ]
+ },
+ {
+ "text": "Experimental analysis has demonstrated that LDFFA outperforms other state-of-the-art algorithms on Helen, LFPW benchmark datasets while giving comparable performance on subsets of 300-W database [26, 27, 28, 31] with 68 fiducial landmarks.",
+ "fragments": [
+ {
+ "start": 195,
+ "end": 211
+ }
+ ]
+ },
+ {
+ "text": "We follow the method of [31] where the average L2 distance of the estimated landmark position from the ground truth is normalized by the standard definition of inter-ocular distance (douter,) to give the error (Eqn.",
+ "fragments": [
+ {
+ "start": 24,
+ "end": 28
+ }
+ ]
+ },
+ {
+ "text": "The bounding boxes for 300-W images were provided by [28, 31] using their in-house face detector.",
+ "fragments": [
+ {
+ "start": 53,
+ "end": 61
+ }
+ ]
+ },
+ {
+ "text": "3 (b & c) illustrate the Normalized Mean Error using definition from [31].",
+ "fragments": [
+ {
+ "start": 69,
+ "end": 73
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2",
+ "title": {
+ "text": "Robust FEC-CNN: A High Accuracy Facial Landmark Detection System",
+ "fragments": []
+ },
+ "slug": "Robust-FEC-CNN:-A-High-Accuracy-Facial-Landmark-He-Zhang",
+ "venue": {
+ "text": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
+ "fragments": []
+ },
+ "year": 2017,
+ "authors": [
+ [
+ {
+ "name": "Zhenliang He",
+ "ids": [
+ "3469114"
+ ],
+ "slug": "Zhenliang-He"
+ },
+ {
+ "text": "Zhenliang He",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Jie Zhang",
+ "ids": [
+ "49050482"
+ ],
+ "slug": "Jie-Zhang"
+ },
+ {
+ "text": "Jie Zhang",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Meina Kan",
+ "ids": [
+ "1693589"
+ ],
+ "slug": "Meina-Kan"
+ },
+ {
+ "text": "Meina Kan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Shiguang Shan",
+ "ids": [
+ "1685914"
+ ],
+ "slug": "Shiguang-Shan"
+ },
+ {
+ "text": "Shiguang Shan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Xilin Chen",
+ "ids": [
+ "1710220"
+ ],
+ "slug": "Xilin-Chen"
+ },
+ {
+ "text": "Xilin Chen",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "Impressive works [5, 4, 6, 3, 23, 14, 20, 27, 21, 22, 9] and benchmarks [11, 29, 2, 16, 17, 10, 26] were proposed to tackle this task in the the past few decades.",
+ "fragments": [
+ {
+ "start": 72,
+ "end": 99
+ }
+ ]
+ },
+ {
+ "text": "LFPW, HELEN, AFW, 300W Competition and Menpo 68 point subset are used as training set while IBUG is used as testing set.",
+ "fragments": [
+ {
+ "start": 23,
+ "end": 34
+ }
+ ]
+ },
+ {
+ "text": "We employ 300W [16, 18], 300W Competition [16, 17] and Menpo dataset [26] for evaluating RFC.",
+ "fragments": [
+ {
+ "start": 15,
+ "end": 23
+ }
+ ]
+ },
+ {
+ "text": "The 300W Competition dataset consists of indoor and outdoor subset.",
+ "fragments": [
+ {
+ "start": 9,
+ "end": 20
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "4ac3cd8b6c50f7a26f27eefc64855134932b39be",
+ "title": {
+ "text": "Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network",
+ "fragments": []
+ },
+ "slug": "Robust-Facial-Landmark-Detection-via-a-Local-Global-Merget-Rock",
+ "venue": {
+ "text": "",
+ "fragments": []
+ },
+ "authors": [
+ [
+ {
+ "name": "Daniel Merget",
+ "ids": [
+ "3044182"
+ ],
+ "slug": "Daniel-Merget"
+ },
+ {
+ "text": "Daniel Merget",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Matthias Rock",
+ "ids": [
+ "28096417"
+ ],
+ "slug": "Matthias-Rock"
+ },
+ {
+ "text": "Matthias Rock",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Gerhard Rigoll",
+ "ids": [
+ "46343645"
+ ],
+ "slug": "Gerhard-Rigoll"
+ },
+ {
+ "text": "Gerhard Rigoll",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "Some qualitative results on the 300-W challenge data set [28] are presented in Figure 5.",
+ "fragments": [
+ {
+ "start": 57,
+ "end": 61
+ }
+ ]
+ },
+ {
+ "text": "Figure 4 illustrates the results for inter-ocular distance (IOD) normalized mean absolute error (MAE) on the 300-W benchmark [28] and on a cross-data set test with Menpo [41].",
+ "fragments": [
+ {
+ "start": 125,
+ "end": 129
+ }
+ ]
+ },
+ {
+ "text": "The overall prediction performance on 300-W [28] was worse using cross entropy.",
+ "fragments": [
+ {
+ "start": 44,
+ "end": 48
+ }
+ ]
+ },
+ {
+ "text": "Our approach beats the state of the art on 300-W [28] and on a cross-data set test with Menpo [41].",
+ "fragments": [
+ {
+ "start": 49,
+ "end": 53
+ }
+ ]
+ },
+ {
+ "text": "From left to right: 300-W [28], iBUG [29], LFPW [5] + HELEN [20], Menpo frontal train set [41].",
+ "fragments": [
+ {
+ "start": 26,
+ "end": 30
+ }
+ ]
+ },
+ {
+ "text": "• We demonstrate the effectiveness of our approach by improving upon the state of the art on 300-W [28] and a cross-data set test on Menpo [41].",
+ "fragments": [
+ {
+ "start": 99,
+ "end": 103
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ }
+ ],
+ "requestedPageSize": 10,
+ "pageNumber": 1,
+ "totalPages": 11,
+ "sort": "is-influential"
+ },
+ "citedPapers": {
+ "citationType": "citedPapers",
+ "citations": [
+ {
+ "id": "0a6d344112b5af7d1abbd712f83c0d70105211d0",
+ "title": {
+ "text": "Constrained Local Neural Fields for Robust Facial Landmark Detection in the Wild",
+ "fragments": []
+ },
+ "slug": "Constrained-Local-Neural-Fields-for-Robust-Facial-Baltrusaitis-Robinson",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Tadas Baltrusaitis",
+ "ids": [
+ "1756344"
+ ],
+ "slug": "Tadas-Baltrusaitis"
+ },
+ {
+ "text": "Tadas Baltrusaitis",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Peter Robinson",
+ "ids": [
+ "39626495"
+ ],
+ "slug": "Peter-Robinson"
+ },
+ {
+ "text": "Peter Robinson",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Louis-Philippe Morency",
+ "ids": [
+ "49933077"
+ ],
+ "slug": "Louis-Philippe-Morency"
+ },
+ {
+ "text": "Louis-Philippe Morency",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[40] propose a probabilistic patch expert technique that learns non-linear and spatial relationships between the pixels and the probability of a landmark being aligned.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[40]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[40] T.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[40] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "1a8ccc23ed73db64748e31c61c69fe23c48a2bb1",
+ "title": {
+ "text": "Extensive Facial Landmark Localization with Coarse-to-Fine Convolutional Network Cascade",
+ "fragments": []
+ },
+ "slug": "Extensive-Facial-Landmark-Localization-with-Network-Zhou-Fan",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Erjin Zhou",
+ "ids": [
+ "1848243"
+ ],
+ "slug": "Erjin-Zhou"
+ },
+ {
+ "text": "Erjin Zhou",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Haoqiang Fan",
+ "ids": [
+ "1934546"
+ ],
+ "slug": "Haoqiang-Fan"
+ },
+ {
+ "text": "Haoqiang Fan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Zhimin Cao",
+ "ids": [
+ "2695115"
+ ],
+ "slug": "Zhimin-Cao"
+ },
+ {
+ "text": "Zhimin Cao",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Yuning Jiang",
+ "ids": [
+ "1691963"
+ ],
+ "slug": "Yuning-Jiang"
+ },
+ {
+ "text": "Yuning Jiang",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Qi Yin",
+ "ids": [
+ "2274228"
+ ],
+ "slug": "Qi-Yin"
+ },
+ {
+ "text": "Qi Yin",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[47] 0% 2.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47] propose a four-level convolutional network cascade, where each level is trained to locally refine the outputs of the previous network levels.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47] (convolutional network framework), can continually achieve better results with continuous rise in the amount of training data.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47] E.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47] from Megvii company.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[47]) and second (J.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "1c1a98df3d0d5e2034ea723994bdc85af45934db",
+ "title": {
+ "text": "Guided Unsupervised Learning of Mode Specific Models for Facial Point Detection in the Wild",
+ "fragments": []
+ },
+ "slug": "Guided-Unsupervised-Learning-of-Mode-Specific-for-Jaiswal-Almaev",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Shashank Jaiswal",
+ "ids": [
+ "2736086"
+ ],
+ "slug": "Shashank-Jaiswal"
+ },
+ {
+ "text": "Shashank Jaiswal",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Timur R. Almaev",
+ "ids": [
+ "2449665"
+ ],
+ "slug": "Timur-R.-Almaev"
+ },
+ {
+ "text": "Timur R. Almaev",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Michel F. Valstar",
+ "ids": [
+ "1795528"
+ ],
+ "slug": "Michel-F.-Valstar"
+ },
+ {
+ "text": "Michel F. Valstar",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[41]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[41] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[41] S.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[41] use Local Evidence Aggregated Regression [42], in which local patches provide evidence of the location of the target facial point using Support Vector Regressors.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "321c8ba38db118d8b02c0ba209be709e6792a2c7",
+ "title": {
+ "text": "Learn to Combine Multiple Hypotheses for Accurate Face Alignment",
+ "fragments": []
+ },
+ "slug": "Learn-to-Combine-Multiple-Hypotheses-for-Accurate-Yan-Lei",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Junjie Yan",
+ "ids": [
+ "48270105"
+ ],
+ "slug": "Junjie-Yan"
+ },
+ {
+ "text": "Junjie Yan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Zhen Lei",
+ "ids": [
+ "1718623"
+ ],
+ "slug": "Zhen-Lei"
+ },
+ {
+ "text": "Zhen Lei",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Dong Yi",
+ "ids": [
+ "1716143"
+ ],
+ "slug": "Dong-Yi"
+ },
+ {
+ "text": "Dong Yi",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Stan Z. Li",
+ "ids": [
+ "34679741"
+ ],
+ "slug": "Stan-Z.-Li"
+ },
+ {
+ "text": "Stan Z. Li",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[46]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46], Zhou et al.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46] J.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46] from The National Laboratory of Pattern Recognition at the Institute of Automation of the Chinese Academy of Sciences, and (b) Zhou et al.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46] (cascade regression framework) and",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[46] employ a cascade regression framework, where a series of regressors are utilized to progressively refine the shape initialized by the face detector.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "4f77a37753c03886ca9c9349723ec3bbfe4ee967",
+ "title": {
+ "text": "Localizing Facial Keypoints with Global Descriptor Search, Neighbour Alignment and Locally Linear Models",
+ "fragments": []
+ },
+ "slug": "Localizing-Facial-Keypoints-with-Global-Descriptor-Hasan-Pal",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Md. Kamrul Hasan",
+ "ids": [
+ "2811524"
+ ],
+ "slug": "Md.-Kamrul-Hasan"
+ },
+ {
+ "text": "Md. Kamrul Hasan",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Christopher Joseph Pal",
+ "ids": [
+ "1972076"
+ ],
+ "slug": "Christopher-Joseph-Pal"
+ },
+ {
+ "text": "Christopher Joseph Pal",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Sharon Moalem",
+ "ids": [
+ "9422894"
+ ],
+ "slug": "Sharon-Moalem"
+ },
+ {
+ "text": "Sharon Moalem",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[43] M.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[43]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[43] first apply a nearest neighbour search using global descriptors and, then, aim to align local neighbours by dynamically fitting a locally linear model to the global keypoint configurations of the returned neighbours.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[43] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "36e8ef2e5d52a78dddf0002e03918b101dcdb326",
+ "title": {
+ "text": "Multiview Active Shape Models with SIFT Descriptors for the 300-W Face Landmark Challenge",
+ "fragments": []
+ },
+ "slug": "Multiview-Active-Shape-Models-with-SIFT-Descriptors-Milborrow-Bishop",
+ "venue": {
+ "text": "2013 IEEE International Conference on Computer Vision Workshops",
+ "fragments": []
+ },
+ "year": 2013,
+ "authors": [
+ [
+ {
+ "name": "Stephen Milborrow",
+ "ids": [
+ "2822258"
+ ],
+ "slug": "Stephen-Milborrow"
+ },
+ {
+ "text": "Stephen Milborrow",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Tom E. Bishop",
+ "ids": [
+ "1823550"
+ ],
+ "slug": "Tom-E.-Bishop"
+ },
+ {
+ "text": "Tom E. Bishop",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Fred Nicolls",
+ "ids": [
+ "2537623"
+ ],
+ "slug": "Fred-Nicolls"
+ },
+ {
+ "text": "Fred Nicolls",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "[44] 0.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[44] S.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[44] approach the problem with Active Shape Models (ASMs) that incorporate a modified version of SIFT descriptors [45].",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "[44]",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "title": {
+ "text": "Face detection",
+ "fragments": []
+ },
+ "slug": "Face-detection-Zhu-Ramanan",
+ "venue": {
+ "text": "pose estimation, and landmark localization in the wild, in: Proceedings of IEEE International Conference on Computer Vision & Pattern Recognition (CVPR), IEEE",
+ "fragments": []
+ },
+ "year": 2012,
+ "authors": [
+ [
+ {
+ "name": "X. Zhu",
+ "ids": [],
+ "slug": "X.-Zhu"
+ },
+ {
+ "text": "X. Zhu",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "D. Ramanan",
+ "ids": [],
+ "slug": "D.-Ramanan"
+ },
+ {
+ "text": "D. Ramanan",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "The most notable databases of this category are LFPW [28], HELEN [29], AFW [17], AFLW [30] and IBUG [31] (all used for facial landmark points localization).",
+ "fragments": [
+ {
+ "start": 75,
+ "end": 79
+ }
+ ]
+ },
+ {
+ "text": "Note that we employ DPMs [17] to estimate the initial landmarks locations for the first iteration of the above procedure.",
+ "fragments": [
+ {
+ "start": 25,
+ "end": 29
+ }
+ ]
+ },
+ {
+ "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].",
+ "fragments": [
+ {
+ "start": 263,
+ "end": 266
+ }
+ ]
+ },
+ {
+ "text": "Furthermore, we computed the bounding boxes of all the aforementioned databases by using our in-house face detector, the one that is also employed in [16], which is a variant of [17].",
+ "fragments": [
+ {
+ "start": 178,
+ "end": 182
+ }
+ ]
+ },
+ {
+ "text": "W), the first facial landmark localization challenge, that was\n1The annotations of XM2VTS, FRGC-V2, LFPW, HELEN, AFW and IBUG are publicly available from http://ibug.doc.ic.ac.uk/resources/facial-point-annotations/.",
+ "fragments": [
+ {
+ "start": 113,
+ "end": 116
+ }
+ ]
+ },
+ {
+ "text": "LFPW, AFW, HELEN, XM2VTS and FRGC-V2 were provided for training, along with the corrected annotations produced with the semi-automatic annotation tool (Sec 3).",
+ "fragments": [
+ {
+ "start": 6,
+ "end": 9
+ }
+ ]
+ },
+ {
+ "text": ", and outperform discriminative methodologies, such as CLMs [15], DPMs [17] and SDM [18].",
+ "fragments": [
+ {
+ "start": 71,
+ "end": 75
+ }
+ ]
+ },
+ {
+ "text": "For each subject, there are available images for 15 different poses, 19 illumination conditions and 6\n5\nAC C\nEP\nTE\nD M\nAN U\nSC R\nIP T\n(a) MultiPIE/IBUG (b) XM2VTS (c) FRGC-V2 (d) AR\n(e) LFPW (f) HELEN (g) AFW (h) AFLW\nFigure 1: Landmarks configurations of existing databases.",
+ "fragments": [
+ {
+ "start": 205,
+ "end": 208
+ }
+ ]
+ },
+ {
+ "text": "Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].",
+ "fragments": [
+ {
+ "start": 82,
+ "end": 86
+ }
+ ]
+ },
+ {
+ "text": "We employed the proposed tool to re-annotate all the widely used databases, i.e. Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].",
+ "fragments": [
+ {
+ "start": 159,
+ "end": 162
+ }
+ ]
+ },
+ {
+ "text": "The discriminative techniques can be further divided to those that use discriminative response map functions, such as Active Shape Models (ASMs) [13], Constrained Local Models (CLMs) [14, 15, 16] and Deformable Part Models (DPMs) [17], those that learn a cascade of regression functions, such as Supervised Descent Method (SDM) [18] and others [19, 20, 21], and, finally, those that employ random forests [22, 23].",
+ "fragments": [
+ {
+ "start": 230,
+ "end": 234
+ }
+ ]
+ },
+ {
+ "text": "The accuracy of the fitting results was measured by the point-to-point RMS error between each fitted shape and the ground truth annotations, normalized by the face’s interoccular distance, as proposed in [17].",
+ "fragments": [
+ {
+ "start": 204,
+ "end": 208
+ }
+ ]
+ },
+ {
+ "text": "HELEN, AFW, IBUG: The rest of in-the-wild databases were annotated\n11\nAC C\nEP TE\nD M\nAN U\nSC R\nIP T\n(a) Multi-PIE (b) XM2VTS\n(c) FRGC-V2 (d) LFPW\n(e) HELEN (f) AFW\nFigure 3: Examples of the annotated images.",
+ "fragments": [
+ {
+ "start": 7,
+ "end": 10
+ }
+ ]
+ },
+ {
+ "text": "In both conducts, the training set consisted of the XM2VTS, FRGC-V2, LFPW, HELEN, AFW and IBUG databases that were annotated using the proposed semi-automatic procedure.",
+ "fragments": [
+ {
+ "start": 82,
+ "end": 85
+ }
+ ]
+ },
+ {
+ "text": "LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].",
+ "fragments": [
+ {
+ "start": 27,
+ "end": 31
+ }
+ ]
+ },
+ {
+ "text": "AFW: The Annotated Faces in-the-wild (AFW) [17] database consists of 250 images with 468 faces, that is, more than one faces are annotated in each image.",
+ "fragments": [
+ {
+ "start": 43,
+ "end": 47
+ }
+ ]
+ },
+ {
+ "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.",
+ "fragments": [
+ {
+ "start": 310,
+ "end": 313
+ }
+ ]
+ },
+ {
+ "text": "For example, DPMs [17] tend to return bounding boxes that only include facial texture and not any of the subject’s hair, as usually done by the Viola-Jones detector [48].",
+ "fragments": [
+ {
+ "start": 18,
+ "end": 22
+ }
+ ]
+ },
+ {
+ "text": "The authors were encouraged, but not restricted, to use LFPW, AFW, HELEN, IBUG, FRGC-V2 and XM2VTS databases with the provided annotations.",
+ "fragments": [
+ {
+ "start": 62,
+ "end": 65
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "id": "a74251efa970b92925b89eeef50a5e37d9281ad0",
+ "title": {
+ "text": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization",
+ "fragments": []
+ },
+ "slug": "Annotated-Facial-Landmarks-in-the-Wild:-A-database-Köstinger-Wohlhart",
+ "venue": {
+ "text": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
+ "fragments": []
+ },
+ "year": 2011,
+ "authors": [
+ [
+ {
+ "name": "Martin Köstinger",
+ "ids": [
+ "1993853"
+ ],
+ "slug": "Martin-Köstinger"
+ },
+ {
+ "text": "Martin Köstinger",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Paul Wohlhart",
+ "ids": [
+ "3202367"
+ ],
+ "slug": "Paul-Wohlhart"
+ },
+ {
+ "text": "Paul Wohlhart",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Peter M. Roth",
+ "ids": [
+ "1791182"
+ ],
+ "slug": "Peter-M.-Roth"
+ },
+ {
+ "text": "Peter M. Roth",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "Horst Bischof",
+ "ids": [
+ "3628150"
+ ],
+ "slug": "Horst-Bischof"
+ },
+ {
+ "text": "Horst Bischof",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "The most notable databases of this category are LFPW [28], HELEN [29], AFW [17], AFLW [30] and IBUG [31] (all used for facial landmark points localization).",
+ "fragments": [
+ {
+ "start": 86,
+ "end": 90
+ }
+ ]
+ },
+ {
+ "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].",
+ "fragments": [
+ {
+ "start": 273,
+ "end": 277
+ }
+ ]
+ },
+ {
+ "text": "AFLW: The Annotated Facial Landmarks in theWild (AFLW) [30] database consists of 25993 images gathered from Flickr, exhibiting a large variety in appearance (e.g., pose, expression, ethnicity, age, gender) as well as general imaging and environmental conditions.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "For each subject, there are available images for 15 different poses, 19 illumination conditions and 6\n5\nAC C\nEP\nTE\nD M\nAN U\nSC R\nIP T\n(a) MultiPIE/IBUG (b) XM2VTS (c) FRGC-V2 (d) AR\n(e) LFPW (f) HELEN (g) AFW (h) AFLW\nFigure 1: Landmarks configurations of existing databases.",
+ "fragments": [
+ {
+ "start": 213,
+ "end": 217
+ }
+ ]
+ },
+ {
+ "text": "AFLW: The Annotated Facial Landmarks in theWild (AFLW) [30] database consists of 25993 images gathered from Flickr, exhibiting a large variety in appearance (e.",
+ "fragments": [
+ {
+ "start": 55,
+ "end": 59
+ }
+ ]
+ },
+ {
+ "text": "[30] M.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].",
+ "fragments": [
+ {
+ "start": 38,
+ "end": 42
+ }
+ ]
+ },
+ {
+ "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.",
+ "fragments": [
+ {
+ "start": 320,
+ "end": 324
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "title": {
+ "text": "Fddb: A benchmark for face detection in unconstrained settings",
+ "fragments": []
+ },
+ "slug": "Fddb:-A-benchmark-for-face-detection-in-settings-Jain-Learned-Miller",
+ "venue": {
+ "text": "Tech. Rep. UM-CS-2010-009, University of Massachusetts, Amherst",
+ "fragments": []
+ },
+ "year": 2010,
+ "authors": [
+ [
+ {
+ "name": "V. Jain",
+ "ids": [],
+ "slug": "V.-Jain"
+ },
+ {
+ "text": "V. Jain",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "E. Learned-Miller",
+ "ids": [],
+ "slug": "E.-Learned-Miller"
+ },
+ {
+ "text": "E. Learned-Miller",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "Consequently, in order to facilitate the participants and make the competition less dependent to a face\n20\ndetector’s performance, we suggested them to use one of the face detection methods that took part in the Face Detection Data Set and Benchmark (FDDB) [49].",
+ "fragments": [
+ {
+ "start": 250,
+ "end": 254
+ }
+ ]
+ },
+ {
+ "text": "[49] V.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 4
+ }
+ ]
+ },
+ {
+ "text": "detector’s performance, we suggested them to use one of the face detection methods that took part in the Face Detection Data Set and Benchmark (FDDB) [49].",
+ "fragments": [
+ {
+ "start": 150,
+ "end": 154
+ }
+ ]
+ },
+ {
+ "text": "The results presented in the Face Detection Data Set and Benchmark (FDDB) [49] show that current stateof-the-art techniques achieve very good true positive rates.",
+ "fragments": [
+ {
+ "start": 74,
+ "end": 78
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ },
+ {
+ "title": {
+ "text": "Multi-pie",
+ "fragments": []
+ },
+ "slug": "Multi-pie-Gross-Matthews",
+ "venue": {
+ "text": "Image and Vision Computing 28 (5)",
+ "fragments": []
+ },
+ "year": 2010,
+ "authors": [
+ [
+ {
+ "name": "R. Gross",
+ "ids": [],
+ "slug": "R.-Gross"
+ },
+ {
+ "text": "R. Gross",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "I. Matthews",
+ "ids": [],
+ "slug": "I.-Matthews"
+ },
+ {
+ "text": "I. Matthews",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "J. Cohn",
+ "ids": [],
+ "slug": "J.-Cohn"
+ },
+ {
+ "text": "J. Cohn",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "T. Kanade",
+ "ids": [],
+ "slug": "T.-Kanade"
+ },
+ {
+ "text": "T. Kanade",
+ "fragments": []
+ }
+ ],
+ [
+ {
+ "name": "S. Baker",
+ "ids": [],
+ "slug": "S.-Baker"
+ },
+ {
+ "text": "S. Baker",
+ "fragments": []
+ }
+ ]
+ ],
+ "citationContexts": [
+ {
+ "text": "The most popular such databases are Multi-PIE [24] (used for face recognition, expressions recognition, landmark points localization), FRGC-V2 [26] (used for face recognition), XM2VTS [25] and AR [27] (both used for face recognition and landmark points localization).",
+ "fragments": [
+ {
+ "start": 46,
+ "end": 50
+ }
+ ]
+ },
+ {
+ "text": "The provided facial landmark annotations are produced by employing the annotation scheme of Multi-PIE (Fig.",
+ "fragments": [
+ {
+ "start": 92,
+ "end": 101
+ }
+ ]
+ },
+ {
+ "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].",
+ "fragments": [
+ {
+ "start": 111,
+ "end": 120
+ }
+ ]
+ },
+ {
+ "text": "Note that in the case of Multi-PIE, even though the original and generated annotations have the same configuration, the generated ones are more accurate.\nusing a common procedure.",
+ "fragments": [
+ {
+ "start": 25,
+ "end": 34
+ }
+ ]
+ },
+ {
+ "text": "The advantages of the generated annotations1 are twofold: (1) They all have the same landmarks configuration, i.e. the one employed in Multi-PIE (Fig.",
+ "fragments": [
+ {
+ "start": 135,
+ "end": 144
+ }
+ ]
+ },
+ {
+ "text": "16: Fit the person-specific AOM to the image i. 17: end for 18: end for 19: end if 20: Check and manually correct, if necessary, the generated annotations of Q.\nMulti-PIE: The available Multi-PIE annotations cover only the neutral expression with pose [−45◦, 45◦] and multiple non-neutral expressions with pose 0◦.",
+ "fragments": [
+ {
+ "start": 161,
+ "end": 170
+ }
+ ]
+ },
+ {
+ "text": "Facial databases under controlled conditions Multi-PIE: The CMU Multi Pose Illumination, and Expression (MultiPIE) Database [24] contains around 750000 images of 337 subjects captured under laboratory conditions in four different sessions.",
+ "fragments": [
+ {
+ "start": 124,
+ "end": 128
+ }
+ ]
+ },
+ {
+ "text": "Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].",
+ "fragments": [
+ {
+ "start": 10,
+ "end": 14
+ }
+ ]
+ },
+ {
+ "text": "We employed the proposed tool to re-annotate all the widely used databases, i.e. Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].",
+ "fragments": [
+ {
+ "start": 81,
+ "end": 90
+ }
+ ]
+ },
+ {
+ "text": "For example, in Multi-PIE, the annotations for subjects with expressions “disgust” at 0◦ and “neutral” at 15◦ are provided and we want to produce the annotations for subjects with expression “disgust” at 15◦.",
+ "fragments": [
+ {
+ "start": 16,
+ "end": 25
+ }
+ ]
+ },
+ {
+ "text": "The images of each such pose cluster were semi-automatically annotated using images from Multi-PIE with the same pose.",
+ "fragments": [
+ {
+ "start": 89,
+ "end": 98
+ }
+ ]
+ },
+ {
+ "text": "However, the accuracy of the annotations in some cases is limited and the locations of the provided points do not correspond to ones of Multi-PIE.",
+ "fragments": [
+ {
+ "start": 136,
+ "end": 145
+ }
+ ]
+ },
+ {
+ "text": "Multi-PIE: The CMU Multi Pose Illumination, and Expression (MultiPIE) Database [24] contains around 750000 images of 337 subjects captured under laboratory conditions in four different sessions.",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 9
+ }
+ ]
+ },
+ {
+ "text": "XM2VTS: The images of XM2VTS’s first session were semi-automatically annotated by setting V to be the subjects of Multi-PIE with neutral expression and [−15◦, 15◦] poses.",
+ "fragments": [
+ {
+ "start": 113,
+ "end": 122
+ }
+ ]
+ },
+ {
+ "text": "HELEN, AFW, IBUG: The rest of in-the-wild databases were annotated\n11\nAC C\nEP TE\nD M\nAN U\nSC R\nIP T\n(a) Multi-PIE (b) XM2VTS\n(c) FRGC-V2 (d) LFPW\n(e) HELEN (f) AFW\nFigure 3: Examples of the annotated images.",
+ "fragments": [
+ {
+ "start": 104,
+ "end": 113
+ }
+ ]
+ },
+ {
+ "text": "This subset was annotated by employing images from Multi-PIE with six expressions and [−15◦, 15◦] poses as V .",
+ "fragments": [
+ {
+ "start": 51,
+ "end": 60
+ }
+ ]
+ },
+ {
+ "text": "Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.",
+ "fragments": [
+ {
+ "start": 10,
+ "end": 14
+ }
+ ]
+ },
+ {
+ "text": "In case Q has multiple images per subject (e.g. Multi-PIE, XM2VTS, FRGC-V2, AR), the above method can be extended to further improve the generated annotations.",
+ "fragments": [
+ {
+ "start": 48,
+ "end": 57
+ }
+ ]
+ },
+ {
+ "text": "To this end, we selected such images of N = 80 different subjects with frontal pose from the Multi-PIE database.",
+ "fragments": [
+ {
+ "start": 93,
+ "end": 102
+ }
+ ]
+ },
+ {
+ "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.",
+ "fragments": [
+ {
+ "start": 158,
+ "end": 167
+ }
+ ]
+ }
+ ],
+ "isKey": true
+ }
+ ],
+ "requestedPageSize": 10,
+ "pageNumber": 1,
+ "totalPages": 5,
+ "sort": "is-influential"
+ },
+ "figureExtractions": {
+ "figures": [
+ {
+ "name": "1",
+ "figureType": "figure",
+ "caption": "Figure 1: Landmarks configurations of existing databases. Note they all have different number of landmark points with semantically different locations.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/7-Figure1-1.png",
+ "width": 449,
+ "height": 449
+ },
+ {
+ "name": "1",
+ "figureType": "table",
+ "caption": "Table 1: Overview of the characteristics of existing facial databases.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/8-Table1-1.png",
+ "width": 439,
+ "height": 439
+ },
+ {
+ "name": "2",
+ "figureType": "figure",
+ "caption": "Figure 2: Flowchart of the proposed tool. Given a set of landmarked images V with various poses and expressions, we aim to annotate a set of non-annotated images Q (1) with the same subjects and different poses and expressions, or (2) with different subjects but similar pose and expressions.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/11-Figure2-1.png",
+ "width": 449,
+ "height": 449
+ },
+ {
+ "name": "2",
+ "figureType": "table",
+ "caption": "Table 2: Overview of the characteristics of the 300-W database.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/16-Table2-1.png",
+ "width": 445,
+ "height": 445
+ },
+ {
+ "name": "3",
+ "figureType": "figure",
+ "caption": "Figure 3: Examples of the annotated images. For each database, the image on the left has the original annotations and the one on the right shows the annotations generated by the proposed tool. Note that in the case of Multi-PIE, even though the original and generated annotations have the same configuration, the generated ones are more accurate.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/13-Figure3-1.png",
+ "width": 445,
+ "height": 445
+ },
+ {
+ "name": "3",
+ "figureType": "table",
+ "caption": "Table 3: Median absolute deviation of the fitting results of the first conduct of 300-W challenge in 2013, reported for both 68 and 51 points.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/19-Table3-1.png",
+ "width": 422,
+ "height": 422
+ },
+ {
+ "name": "4",
+ "figureType": "figure",
+ "caption": "Figure 4: The cardinality of W and V per iteration.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/14-Figure4-1.png",
+ "width": 321,
+ "height": 321
+ },
+ {
+ "name": "4",
+ "figureType": "table",
+ "caption": "Table 4: Overview of the characteristics of the cropped images of the 300-W database.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/20-Table4-1.png",
+ "width": 448,
+ "height": 448
+ },
+ {
+ "name": "5",
+ "figureType": "figure",
+ "caption": "Figure 5: Each ellipse denotes the variance of each landmark point with regards to three expert human annotators. The colours of the points rank them with respect to their standard deviation normalized by the face size.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/15-Figure5-1.png",
+ "width": 413,
+ "height": 413
+ },
+ {
+ "name": "5",
+ "figureType": "table",
+ "caption": "Table 5: Second conduct of the 300-W challenge. 2nd column: Number of images for which an estimation of the landmarks was returned. 3rd and 4th columns: The mean absolute deviation of the fitting results for both 68 and 51 points. 5th column: Mean computational cost per method.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/23-Table5-1.png",
+ "width": 449,
+ "height": 449
+ },
+ {
+ "name": "6",
+ "figureType": "figure",
+ "caption": "Figure 6: The 51-points mark-up is a subset of the 68-points one after removing the 17 points of the face’s boundary. The interoccular distance is defined between the outer points of the eyes.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/17-Figure6-1.png",
+ "width": 231,
+ "height": 231
+ },
+ {
+ "name": "6",
+ "figureType": "table",
+ "caption": "Table 6: Percentage of images with fitting error less than the specified values for the winners of the first (Yan et al. [46], Zhou et al. [47]) and second (J. Deng, H. Fan) 300-W challenges, and Oracle. The error is based on 68 points using both indoor and oudoor images.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/27-Table6-1.png",
+ "width": 445,
+ "height": 445
+ },
+ {
+ "name": "7",
+ "figureType": "figure",
+ "caption": "Figure 7: Fitting results of the first conduct of the 300-W challenge in 2013. The plots show the Cumulative Error Distribution (CED) curves with respect to the landmarks (68 and 51 points) and the condtions (indoor, outdoor or both).",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/18-Figure7-1.png",
+ "width": 477,
+ "height": 477
+ },
+ {
+ "name": "8",
+ "figureType": "figure",
+ "caption": "Figure 8: Indicative examples of the way the images were cropped for the second conduct of the 300-W challenge.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/21-Figure8-1.png",
+ "width": 449,
+ "height": 449
+ },
+ {
+ "name": "9",
+ "figureType": "figure",
+ "caption": "Figure 9: Fitting results of the second conduct of the 300-W challenge in 2015. The plots show the Cumulative Error Distribution (CED) curves with respect to the landmarks (68 and 51 points) and the condtions (indoor, outdoor or both).",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/22-Figure9-1.png",
+ "width": 477,
+ "height": 477
+ },
+ {
+ "name": "10",
+ "figureType": "figure",
+ "caption": "Figure 10: Fitting examples of the first conduct of the 300-W challenge in 2013. Each row shows the fitted landmarks for each participating method.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/24-Figure10-1.png",
+ "width": 449,
+ "height": 449
+ },
+ {
+ "name": "11",
+ "figureType": "figure",
+ "caption": "Figure 11: Fitting examples of the second conduct of the 300-W challenge in 2015. Each row shows the fitted landmarks for each participating method.",
+ "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/26-Figure11-1.png",
+ "width": 455,
+ "height": 455
+ }
+ ]
+ },
+ "presentations": [
+ {
+ "contentType": "PRESENTATION",
+ "id": "df1e22c9a92e6c506c228ef23049ab3b8c908e80",
+ "title": "Face detection, pose estimation, and landmark localization in the wild",
+ "authors": [
+ "Xiangxin Zhu",
+ "Deva Ramanan"
+ ],
+ "published": 1412121600,
+ "url": "https://pdfs.semanticscholar.org/presentation/df1e/22c9a92e6c506c228ef23049ab3b8c908e80.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "b4d2151e29fb12dbe5d164b430273de65103d39b",
+ "title": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization",
+ "authors": [
+ "Martin Köstinger",
+ "Paul Wohlhart",
+ "Peter M. Roth",
+ "Horst Bischof"
+ ],
+ "published": 1412121600,
+ "url": "https://pdfs.semanticscholar.org/presentation/b4d2/151e29fb12dbe5d164b430273de65103d39b.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "d5c22cb54bd23f17289c31abb84baaf0cd439540",
+ "title": "Face Recognition by Humans: Nineteen Results All Computer Vision Researchers Should Know About",
+ "authors": [
+ "Pawan Sinha",
+ "Benjamin J. Balas",
+ "Yuri Ostrovsky",
+ "Richard Russell"
+ ],
+ "published": 1412121600,
+ "url": "https://pdfs.semanticscholar.org/presentation/d5c2/2cb54bd23f17289c31abb84baaf0cd439540.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "8e1f99812078e91a0d5722eaf54f20c6fc65df4c",
+ "title": "Challenges of the Open Source Component Marketplace in the Industry",
+ "authors": [
+ "Claudia P. Ayala",
+ "Øyvind Hauge",
+ "Reidar Conradi",
+ "Xavier Franch",
+ "Jingyue Li",
+ "Ketil Sandanger Velle"
+ ],
+ "published": 1412121600,
+ "url": "https://pdfs.semanticscholar.org/presentation/8e1f/99812078e91a0d5722eaf54f20c6fc65df4c.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "47bae254f82bdf4b66ea493afebf5a12c7291db2",
+ "title": "Pervasive computing: vision and challenges",
+ "authors": [
+ "Mahadev Satyanarayanan"
+ ],
+ "published": 1412121600,
+ "url": "https://pdfs.semanticscholar.org/presentation/47ba/e254f82bdf4b66ea493afebf5a12c7291db2.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "cf9f048a00f14ec5348a1dfe76a2e23b51e0b26a",
+ "title": "Challenges and opportunities in enterprise-wide optimization in the pharmaceutical industry",
+ "authors": [
+ "José Miguel Laínez",
+ "E. Schaefer",
+ "Gintaras V. Reklaitis"
+ ],
+ "published": 1511903839.603717,
+ "url": "https://pdfs.semanticscholar.org/presentation/cf9f/048a00f14ec5348a1dfe76a2e23b51e0b26a.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "cf0e634e608cf1d5574266d8c0110b75b3e615fd",
+ "title": "Industry 4.0 - Challenges in Anti-Counterfeiting",
+ "authors": [
+ "Christian Thiel",
+ "Christoph Thiel"
+ ],
+ "published": 1511948094.409062,
+ "url": "https://pdfs.semanticscholar.org/presentation/cf0e/634e608cf1d5574266d8c0110b75b3e615fd.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "6b9267cc7b4277e4abdefb52157bc69d318412c9",
+ "title": "Energy demand forecasting: industry practices and challenges",
+ "authors": [
+ "Mathieu Sinn"
+ ],
+ "published": 1447968149.539,
+ "url": "https://pdfs.semanticscholar.org/presentation/6b92/67cc7b4277e4abdefb52157bc69d318412c9.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "09e47a1b9abcce753ce124eab586d1cb03abdde4",
+ "title": "Hardware Acceleration Technologies in Computer Algebra : Challenges and Impact",
+ "authors": [
+ "Sardar Anisul Haque"
+ ],
+ "published": 1427429814,
+ "url": "https://pdfs.semanticscholar.org/presentation/09e4/7a1b9abcce753ce124eab586d1cb03abdde4.pdf"
+ },
+ {
+ "contentType": "PRESENTATION",
+ "id": "6442a7dc149de6763b0959d924ec5a3dbe09ec1f",
+ "title": "New results on the coarseness of bicolored point sets",
+ "authors": [
+ "José Miguel Díaz-Báñez",
+ "Ruy Fabila Monroy",
+ "Pablo Pérez-Lantero",
+ "Inmaculada Ventura"
+ ],
+ "published": 1385693873,
+ "url": "https://pdfs.semanticscholar.org/presentation/6442/a7dc149de6763b0959d924ec5a3dbe09ec1f.pdf"
+ }
+ ],
+ "featuredContent": []
+} \ No newline at end of file
diff --git a/scraper/samples/s2-papers-api.json b/scraper/samples/s2-papers-api.json
new file mode 100644
index 00000000..4db657b8
--- /dev/null
+++ b/scraper/samples/s2-papers-api.json
@@ -0,0 +1,2855 @@
+{
+ "arxivId": "1501.05703",
+ "authors": [
+ {
+ "authorId": "49410785",
+ "name": "Ning Zhang",
+ "url": "https://www.semanticscholar.org/author/49410785"
+ },
+ {
+ "authorId": "2210374",
+ "name": "Manohar Paluri",
+ "url": "https://www.semanticscholar.org/author/2210374"
+ },
+ {
+ "authorId": "2188620",
+ "name": "Yaniv Taigman",
+ "url": "https://www.semanticscholar.org/author/2188620"
+ },
+ {
+ "authorId": "2276554",
+ "name": "Rob Fergus",
+ "url": "https://www.semanticscholar.org/author/2276554"
+ },
+ {
+ "authorId": "1769383",
+ "name": "Lubomir D. Bourdev",
+ "url": "https://www.semanticscholar.org/author/1769383"
+ }
+ ],
+ "citationVelocity": 22,
+ "citations": [
+ {
+ "arxivId": "1612.02155",
+ "authors": [
+ {
+ "authorId": "2963501",
+ "name": "Shayan Modiri Assari",
+ "url": "https://www.semanticscholar.org/author/2963501"
+ },
+ {
+ "authorId": "1803711",
+ "name": "Haroon Idrees",
+ "url": "https://www.semanticscholar.org/author/1803711"
+ },
+ {
+ "authorId": "1745480",
+ "name": "Mubarak Shah",
+ "url": "https://www.semanticscholar.org/author/1745480"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "a1e97c4043d5cc9896dc60ae7ca135782d89e5fc",
+ "title": "Re-identification of Humans in Crowds using Personal, Social and Environmental Constraints",
+ "url": "https://www.semanticscholar.org/paper/a1e97c4043d5cc9896dc60ae7ca135782d89e5fc",
+ "venue": "ArXiv",
+ "year": 2016
+ },
+ {
+ "arxivId": "1804.04779",
+ "authors": [
+ {
+ "authorId": "32222907",
+ "name": "Qianru Sun",
+ "url": "https://www.semanticscholar.org/author/32222907"
+ },
+ {
+ "authorId": "9102722",
+ "name": "Ayush Tewari",
+ "url": "https://www.semanticscholar.org/author/9102722"
+ },
+ {
+ "authorId": "9765909",
+ "name": "Weipeng Xu",
+ "url": "https://www.semanticscholar.org/author/9765909"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1680185",
+ "name": "Christian Theobalt",
+ "url": "https://www.semanticscholar.org/author/1680185"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "cd2c54705c455a4379f45eefdf32d8d10087e521",
+ "title": "A Hybrid Model for Identity Obfuscation by Face Replacement",
+ "url": "https://www.semanticscholar.org/paper/cd2c54705c455a4379f45eefdf32d8d10087e521",
+ "venue": "ArXiv",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "34949584",
+ "name": "Guangcai Zha",
+ "url": "https://www.semanticscholar.org/author/34949584"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "0da75b0d341c8f945fae1da6c77b6ec345f47f2a",
+ "title": "The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People With Visual Impairments",
+ "url": "https://www.semanticscholar.org/paper/0da75b0d341c8f945fae1da6c77b6ec345f47f2a",
+ "venue": "",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2851931",
+ "name": "Zongji Sun",
+ "url": "https://www.semanticscholar.org/author/2851931"
+ },
+ {
+ "authorId": "2133352",
+ "name": "Li Meng",
+ "url": "https://www.semanticscholar.org/author/2133352"
+ },
+ {
+ "authorId": "1763301",
+ "name": "Aladdin M. Ariyaeeinia",
+ "url": "https://www.semanticscholar.org/author/1763301"
+ },
+ {
+ "authorId": "2606013",
+ "name": "Xiaodong Duan",
+ "url": "https://www.semanticscholar.org/author/2606013"
+ },
+ {
+ "authorId": "1709835",
+ "name": "Zheng-Hua Tan",
+ "url": "https://www.semanticscholar.org/author/1709835"
+ }
+ ],
+ "doi": "10.1109/MIPRO.2016.7522350",
+ "isInfluential": false,
+ "paperId": "8886b21f97c114a23b24dc7025bbf42885adc3a7",
+ "title": "Privacy protection performance of De-identified face images with and without background",
+ "url": "https://www.semanticscholar.org/paper/8886b21f97c114a23b24dc7025bbf42885adc3a7",
+ "venue": "2016 39th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)",
+ "year": 2016
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2963501",
+ "name": "Shayan Modiri Assari",
+ "url": "https://www.semanticscholar.org/author/2963501"
+ },
+ {
+ "authorId": "1803711",
+ "name": "Haroon Idrees",
+ "url": "https://www.semanticscholar.org/author/1803711"
+ },
+ {
+ "authorId": "1745480",
+ "name": "Mubarak Shah",
+ "url": "https://www.semanticscholar.org/author/1745480"
+ }
+ ],
+ "doi": "10.1007/978-3-319-46475-6_8",
+ "isInfluential": false,
+ "paperId": "2b339ece73e3787f445c5b92078e8f82c9b1c522",
+ "title": "Human Re-identification in Crowd Videos Using Personal, Social and Environmental Constraints",
+ "url": "https://www.semanticscholar.org/paper/2b339ece73e3787f445c5b92078e8f82c9b1c522",
+ "venue": "ECCV",
+ "year": 2016
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1748171",
+ "name": "Paarijaat Aditya",
+ "url": "https://www.semanticscholar.org/author/1748171"
+ },
+ {
+ "authorId": "50410801",
+ "name": "Rijurekha Sen",
+ "url": "https://www.semanticscholar.org/author/50410801"
+ },
+ {
+ "authorId": "1736987",
+ "name": "Peter Druschel",
+ "url": "https://www.semanticscholar.org/author/1736987"
+ },
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1798000",
+ "name": "Rodrigo Benenson",
+ "url": "https://www.semanticscholar.org/author/1798000"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ },
+ {
+ "authorId": "1708336",
+ "name": "Bobby Bhattacharjee",
+ "url": "https://www.semanticscholar.org/author/1708336"
+ },
+ {
+ "authorId": "40084289",
+ "name": "Tong Tong Wu",
+ "url": "https://www.semanticscholar.org/author/40084289"
+ }
+ ],
+ "doi": "10.1145/2938559.2938589",
+ "isInfluential": false,
+ "paperId": "10ab1b48b2a55ec9e2920a5397febd84906a7769",
+ "title": "Demo: I-Pic: A Platform for Privacy-Compliant Image Capture",
+ "url": "https://www.semanticscholar.org/paper/10ab1b48b2a55ec9e2920a5397febd84906a7769",
+ "venue": "MobiSys",
+ "year": 2016
+ },
+ {
+ "arxivId": "1607.08438",
+ "authors": [
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1798000",
+ "name": "Rodrigo Benenson",
+ "url": "https://www.semanticscholar.org/author/1798000"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ }
+ ],
+ "doi": "10.1007/978-3-319-46487-9_2",
+ "isInfluential": true,
+ "paperId": "bc27434e376db89fe0e6ef2d2fabc100d2575ec6",
+ "title": "Faceless Person Recognition; Privacy Implications in Social Media",
+ "url": "https://www.semanticscholar.org/paper/bc27434e376db89fe0e6ef2d2fabc100d2575ec6",
+ "venue": "ECCV",
+ "year": 2016
+ },
+ {
+ "arxivId": "1509.03502",
+ "authors": [
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1798000",
+ "name": "Rodrigo Benenson",
+ "url": "https://www.semanticscholar.org/author/1798000"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ }
+ ],
+ "doi": "10.1109/ICCV.2015.440",
+ "isInfluential": true,
+ "paperId": "0c59071ddd33849bd431165bc2d21bbe165a81e0",
+ "title": "Person Recognition in Personal Photo Collections",
+ "url": "https://www.semanticscholar.org/paper/0c59071ddd33849bd431165bc2d21bbe165a81e0",
+ "venue": "2015 IEEE International Conference on Computer Vision (ICCV)",
+ "year": 2015
+ },
+ {
+ "arxivId": "1705.07206",
+ "authors": [
+ {
+ "authorId": "2757639",
+ "name": "Jianshu Li",
+ "url": "https://www.semanticscholar.org/author/2757639"
+ },
+ {
+ "authorId": "46509407",
+ "name": "Jian Zhao",
+ "url": "https://www.semanticscholar.org/author/46509407"
+ },
+ {
+ "authorId": "49020088",
+ "name": "Yunchao Wei",
+ "url": "https://www.semanticscholar.org/author/49020088"
+ },
+ {
+ "authorId": "3350185",
+ "name": "Congyan Lang",
+ "url": "https://www.semanticscholar.org/author/3350185"
+ },
+ {
+ "authorId": "2263674",
+ "name": "Yidong Li",
+ "url": "https://www.semanticscholar.org/author/2263674"
+ },
+ {
+ "authorId": "1715286",
+ "name": "Terence Sim",
+ "url": "https://www.semanticscholar.org/author/1715286"
+ },
+ {
+ "authorId": "1698982",
+ "name": "Shuicheng Yan",
+ "url": "https://www.semanticscholar.org/author/1698982"
+ },
+ {
+ "authorId": "33221685",
+ "name": "Jiashi Feng",
+ "url": "https://www.semanticscholar.org/author/33221685"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "b5968e7bb23f5f03213178c22fd2e47af3afa04c",
+ "title": "Multiple-Human Parsing in the Wild",
+ "url": "https://www.semanticscholar.org/paper/b5968e7bb23f5f03213178c22fd2e47af3afa04c",
+ "venue": "",
+ "year": 2017
+ },
+ {
+ "arxivId": "1806.03084",
+ "authors": [
+ {
+ "authorId": "39360892",
+ "name": "Qingqiu Huang",
+ "url": "https://www.semanticscholar.org/author/39360892"
+ },
+ {
+ "authorId": "50446092",
+ "name": "Yu Xiong",
+ "url": "https://www.semanticscholar.org/author/50446092"
+ },
+ {
+ "authorId": "1807606",
+ "name": "Dahua Lin",
+ "url": "https://www.semanticscholar.org/author/1807606"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "1c9efb6c895917174ac6ccc3bae191152f90c625",
+ "title": "Unifying Identification and Context Learning for Person Recognition",
+ "url": "https://www.semanticscholar.org/paper/1c9efb6c895917174ac6ccc3bae191152f90c625",
+ "venue": "ArXiv",
+ "year": 2018
+ },
+ {
+ "arxivId": "1805.05838",
+ "authors": [
+ {
+ "authorId": "9517443",
+ "name": "Tribhuvanesh Orekondy",
+ "url": "https://www.semanticscholar.org/author/9517443"
+ },
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "3e0a1884448bfd7f416c6a45dfcdfc9f2e617268",
+ "title": "Understanding and Controlling User Linkability in Decentralized Learning",
+ "url": "https://www.semanticscholar.org/paper/3e0a1884448bfd7f416c6a45dfcdfc9f2e617268",
+ "venue": "ArXiv",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2416019",
+ "name": "Lin Yuan",
+ "url": "https://www.semanticscholar.org/author/2416019"
+ },
+ {
+ "authorId": "1681498",
+ "name": "Touradj Ebrahimi",
+ "url": "https://www.semanticscholar.org/author/1681498"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "1451e7b11e66c86104f9391b80d9fb422fb11c01",
+ "title": "Image privacy protection with secure JPEG transmorphing",
+ "url": "https://www.semanticscholar.org/paper/1451e7b11e66c86104f9391b80d9fb422fb11c01",
+ "venue": "",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "3131569",
+ "name": "Haoxiang Li",
+ "url": "https://www.semanticscholar.org/author/3131569"
+ },
+ {
+ "authorId": "1721019",
+ "name": "Jonathan Brandt",
+ "url": "https://www.semanticscholar.org/author/1721019"
+ },
+ {
+ "authorId": "2012810",
+ "name": "Zhe L. Lin",
+ "url": "https://www.semanticscholar.org/author/2012810"
+ },
+ {
+ "authorId": "1720987",
+ "name": "Xiaohui Shen",
+ "url": "https://www.semanticscholar.org/author/1720987"
+ },
+ {
+ "authorId": "1745420",
+ "name": "Gang Hua",
+ "url": "https://www.semanticscholar.org/author/1745420"
+ }
+ ],
+ "doi": "10.1109/CVPR.2016.145",
+ "isInfluential": true,
+ "paperId": "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf",
+ "title": "A Multi-level Contextual Model for Person Recognition in Photo Albums",
+ "url": "https://www.semanticscholar.org/paper/1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf",
+ "venue": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2016
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "47103251",
+ "name": "Yuansheng Xu",
+ "url": "https://www.semanticscholar.org/author/47103251"
+ },
+ {
+ "authorId": "7402662",
+ "name": "Fangyue Peng",
+ "url": "https://www.semanticscholar.org/author/7402662"
+ },
+ {
+ "authorId": "49521368",
+ "name": "Yu Yuan",
+ "url": "https://www.semanticscholar.org/author/49521368"
+ },
+ {
+ "authorId": "36637369",
+ "name": "Yizhou Wang",
+ "url": "https://www.semanticscholar.org/author/36637369"
+ }
+ ],
+ "doi": "10.1109/ICASSP.2017.7952713",
+ "isInfluential": false,
+ "paperId": "d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c",
+ "title": "Face Album: Towards automatic photo management based on person identity on mobile phones",
+ "url": "https://www.semanticscholar.org/paper/d94d7ff6f46ad5cab5c20e6ac14c1de333711a0c",
+ "venue": "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "9146911",
+ "name": "Shaghayegh Gharghabi",
+ "url": "https://www.semanticscholar.org/author/9146911"
+ },
+ {
+ "authorId": "9368612",
+ "name": "Bita Azari",
+ "url": "https://www.semanticscholar.org/author/9368612"
+ },
+ {
+ "authorId": "2319579",
+ "name": "Faraz Shamshirdar",
+ "url": "https://www.semanticscholar.org/author/2319579"
+ },
+ {
+ "authorId": "1682051",
+ "name": "Reza Safabakhsh",
+ "url": "https://www.semanticscholar.org/author/1682051"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "c8b9217ee36aebb9735e525b718490dc27c8c1cb",
+ "title": "Improving person recognition by weight adaptation of soft biometrics",
+ "url": "https://www.semanticscholar.org/paper/c8b9217ee36aebb9735e525b718490dc27c8c1cb",
+ "venue": "2016 6th International Conference on Computer and Knowledge Engineering (ICCKE)",
+ "year": 2016
+ },
+ {
+ "arxivId": "1707.06436",
+ "authors": [
+ {
+ "authorId": "1730200",
+ "name": "Hirokatsu Kataoka",
+ "url": "https://www.semanticscholar.org/author/1730200"
+ },
+ {
+ "authorId": "3393640",
+ "name": "Soma Shirakabe",
+ "url": "https://www.semanticscholar.org/author/3393640"
+ },
+ {
+ "authorId": "1713046",
+ "name": "Yun He",
+ "url": "https://www.semanticscholar.org/author/1713046"
+ },
+ {
+ "authorId": "9935341",
+ "name": "Shunya Ueta",
+ "url": "https://www.semanticscholar.org/author/9935341"
+ },
+ {
+ "authorId": "5014206",
+ "name": "Teppei Suzuki",
+ "url": "https://www.semanticscholar.org/author/5014206"
+ },
+ {
+ "authorId": "49897653",
+ "name": "Kaori Abe",
+ "url": "https://www.semanticscholar.org/author/49897653"
+ },
+ {
+ "authorId": "2554424",
+ "name": "Asako Kanezaki",
+ "url": "https://www.semanticscholar.org/author/2554424"
+ },
+ {
+ "authorId": "49133490",
+ "name": "Shinichiro Morita",
+ "url": "https://www.semanticscholar.org/author/49133490"
+ },
+ {
+ "authorId": "22219521",
+ "name": "Toshiyuki Yabe",
+ "url": "https://www.semanticscholar.org/author/22219521"
+ },
+ {
+ "authorId": "50544018",
+ "name": "Yoshihiro Kanehara",
+ "url": "https://www.semanticscholar.org/author/50544018"
+ },
+ {
+ "authorId": "22174281",
+ "name": "Hiroya Yatsuyanagi",
+ "url": "https://www.semanticscholar.org/author/22174281"
+ },
+ {
+ "authorId": "1692565",
+ "name": "Shinya Maruyama",
+ "url": "https://www.semanticscholar.org/author/1692565"
+ },
+ {
+ "authorId": "10756539",
+ "name": "Ryousuke Takasawa",
+ "url": "https://www.semanticscholar.org/author/10756539"
+ },
+ {
+ "authorId": "3217653",
+ "name": "Masataka Fuchida",
+ "url": "https://www.semanticscholar.org/author/3217653"
+ },
+ {
+ "authorId": "2642022",
+ "name": "Yudai Miyashita",
+ "url": "https://www.semanticscholar.org/author/2642022"
+ },
+ {
+ "authorId": "34935749",
+ "name": "Kazushige Okayasu",
+ "url": "https://www.semanticscholar.org/author/34935749"
+ },
+ {
+ "authorId": "20505300",
+ "name": "Yuta Matsuzaki",
+ "url": "https://www.semanticscholar.org/author/20505300"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "28cd46a078e8fad370b1aba34762a874374513a5",
+ "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey",
+ "url": "https://www.semanticscholar.org/paper/28cd46a078e8fad370b1aba34762a874374513a5",
+ "venue": "ArXiv",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2582568",
+ "name": "Yuhang Zhao",
+ "url": "https://www.semanticscholar.org/author/2582568"
+ },
+ {
+ "authorId": "1968133",
+ "name": "Shaomei Wu",
+ "url": "https://www.semanticscholar.org/author/1968133"
+ },
+ {
+ "authorId": "39685591",
+ "name": "Lindsay Reynolds",
+ "url": "https://www.semanticscholar.org/author/39685591"
+ },
+ {
+ "authorId": "3283573",
+ "name": "Shiri Azenkot",
+ "url": "https://www.semanticscholar.org/author/3283573"
+ }
+ ],
+ "doi": "10.1145/3173574.3173789",
+ "isInfluential": false,
+ "paperId": "0aaf785d7f21d2b5ad582b456896495d30b0a4e2",
+ "title": "A Face Recognition Application for People with Visual Impairments: Understanding Use Beyond the Lab",
+ "url": "https://www.semanticscholar.org/paper/0aaf785d7f21d2b5ad582b456896495d30b0a4e2",
+ "venue": "CHI",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": null,
+ "name": "Bouthour",
+ "url": null
+ },
+ {
+ "authorId": null,
+ "name": "Khoubeib",
+ "url": null
+ },
+ {
+ "authorId": "2488116",
+ "name": "Marcus S. Stefanou",
+ "url": "https://www.semanticscholar.org/author/2488116"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d",
+ "title": "Improving face verification in photo albums by combining facial recognition and metadata with cross-matching",
+ "url": "https://www.semanticscholar.org/paper/ef999ab2f7b37f46445a3457bf6c0f5fd7b5689d",
+ "venue": "",
+ "year": 2016
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "11269472",
+ "name": "Iacopo Masi",
+ "url": "https://www.semanticscholar.org/author/11269472"
+ },
+ {
+ "authorId": "1752756",
+ "name": "Feng-Ju Chang",
+ "url": "https://www.semanticscholar.org/author/1752756"
+ },
+ {
+ "authorId": "49331459",
+ "name": "Jongmoo Choi",
+ "url": "https://www.semanticscholar.org/author/49331459"
+ },
+ {
+ "authorId": "35840854",
+ "name": "Shai Harel",
+ "url": "https://www.semanticscholar.org/author/35840854"
+ },
+ {
+ "authorId": "5911467",
+ "name": "Jungyeon Kim",
+ "url": "https://www.semanticscholar.org/author/5911467"
+ },
+ {
+ "authorId": "2792633",
+ "name": "KangGeon Kim",
+ "url": "https://www.semanticscholar.org/author/2792633"
+ },
+ {
+ "authorId": "2955822",
+ "name": "Jatuporn Toy Leksut",
+ "url": "https://www.semanticscholar.org/author/2955822"
+ },
+ {
+ "authorId": "38696444",
+ "name": "Stephen Rawls",
+ "url": "https://www.semanticscholar.org/author/38696444"
+ },
+ {
+ "authorId": "46220641",
+ "name": "Yue Wu",
+ "url": "https://www.semanticscholar.org/author/46220641"
+ },
+ {
+ "authorId": "1756099",
+ "name": "Tal Hassner",
+ "url": "https://www.semanticscholar.org/author/1756099"
+ },
+ {
+ "authorId": "17806729",
+ "name": "Wael AbdAlmageed",
+ "url": "https://www.semanticscholar.org/author/17806729"
+ },
+ {
+ "authorId": "3463966",
+ "name": "Gérard G. Medioni",
+ "url": "https://www.semanticscholar.org/author/3463966"
+ },
+ {
+ "authorId": "49933077",
+ "name": "Louis-Philippe Morency",
+ "url": "https://www.semanticscholar.org/author/49933077"
+ },
+ {
+ "authorId": "40155735",
+ "name": "Prem Natarajan",
+ "url": "https://www.semanticscholar.org/author/40155735"
+ },
+ {
+ "authorId": "51061892",
+ "name": "Ramkant Nevatia",
+ "url": "https://www.semanticscholar.org/author/51061892"
+ }
+ ],
+ "doi": "10.1109/TPAMI.2018.2792452",
+ "isInfluential": true,
+ "paperId": "6f22628d34a486d73c6b46eb071200a00e3abae3",
+ "title": "Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild.",
+ "url": "https://www.semanticscholar.org/paper/6f22628d34a486d73c6b46eb071200a00e3abae3",
+ "venue": "IEEE transactions on pattern analysis and machine intelligence",
+ "year": 2018
+ },
+ {
+ "arxivId": "1807.10510",
+ "authors": [
+ {
+ "authorId": "39360892",
+ "name": "Qingqiu Huang",
+ "url": "https://www.semanticscholar.org/author/39360892"
+ },
+ {
+ "authorId": "40584026",
+ "name": "Wentao Liu",
+ "url": "https://www.semanticscholar.org/author/40584026"
+ },
+ {
+ "authorId": "1807606",
+ "name": "Dahua Lin",
+ "url": "https://www.semanticscholar.org/author/1807606"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "c97a5f2241cc6cd99ef0c4527ea507a50841f60b",
+ "title": "Person Search in Videos with One Portrait Through Visual and Temporal Links",
+ "url": "https://www.semanticscholar.org/paper/c97a5f2241cc6cd99ef0c4527ea507a50841f60b",
+ "venue": "ArXiv",
+ "year": 2018
+ },
+ {
+ "arxivId": "1705.10120",
+ "authors": [
+ {
+ "authorId": "37956314",
+ "name": "Vijay Kumar",
+ "url": "https://www.semanticscholar.org/author/37956314"
+ },
+ {
+ "authorId": "3185334",
+ "name": "Anoop M. Namboodiri",
+ "url": "https://www.semanticscholar.org/author/3185334"
+ },
+ {
+ "authorId": "2210374",
+ "name": "Manohar Paluri",
+ "url": "https://www.semanticscholar.org/author/2210374"
+ },
+ {
+ "authorId": "1694502",
+ "name": "C. V. Jawahar",
+ "url": "https://www.semanticscholar.org/author/1694502"
+ }
+ ],
+ "doi": "10.1109/CVPR.2017.719",
+ "isInfluential": false,
+ "paperId": "a3d8b5622c4b9af1f753aade57e4774730787a00",
+ "title": "Pose-Aware Person Recognition",
+ "url": "https://www.semanticscholar.org/paper/a3d8b5622c4b9af1f753aade57e4774730787a00",
+ "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2017
+ },
+ {
+ "arxivId": "1702.06890",
+ "authors": [
+ {
+ "authorId": "1715752",
+ "name": "Yu Liu",
+ "url": "https://www.semanticscholar.org/author/1715752"
+ },
+ {
+ "authorId": "46382329",
+ "name": "Hongyang Li",
+ "url": "https://www.semanticscholar.org/author/46382329"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "6fed504da4e192fe4c2d452754d23d3db4a4e5e3",
+ "title": "Learning Deep Features via Congenerous Cosine Loss for Person Recognition",
+ "url": "https://www.semanticscholar.org/paper/6fed504da4e192fe4c2d452754d23d3db4a4e5e3",
+ "venue": "ArXiv",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "49410785",
+ "name": "Ning Zhang",
+ "url": "https://www.semanticscholar.org/author/49410785"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "d6a9ea9b40a7377c91c705f4c7f206a669a9eea2",
+ "title": "Visual Representations for Fine-grained Categorization",
+ "url": "https://www.semanticscholar.org/paper/d6a9ea9b40a7377c91c705f4c7f206a669a9eea2",
+ "venue": "",
+ "year": 2015
+ },
+ {
+ "arxivId": "1604.02531",
+ "authors": [
+ {
+ "authorId": "50853180",
+ "name": "Liang Zheng",
+ "url": "https://www.semanticscholar.org/author/50853180"
+ },
+ {
+ "authorId": "1983351",
+ "name": "Hengheng Zhang",
+ "url": "https://www.semanticscholar.org/author/1983351"
+ },
+ {
+ "authorId": "3141359",
+ "name": "Shaoyan Sun",
+ "url": "https://www.semanticscholar.org/author/3141359"
+ },
+ {
+ "authorId": "2099305",
+ "name": "Manmohan Krishna Chandraker",
+ "url": "https://www.semanticscholar.org/author/2099305"
+ },
+ {
+ "authorId": "50842217",
+ "name": "Qi Tian",
+ "url": "https://www.semanticscholar.org/author/50842217"
+ }
+ ],
+ "doi": "10.1109/CVPR.2017.357",
+ "isInfluential": false,
+ "paperId": "0b84f07af44f964817675ad961def8a51406dd2e",
+ "title": "Person Re-identification in the Wild",
+ "url": "https://www.semanticscholar.org/paper/0b84f07af44f964817675ad961def8a51406dd2e",
+ "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2017
+ },
+ {
+ "arxivId": "1809.09189",
+ "authors": [
+ {
+ "authorId": "51264689",
+ "name": "Sina Mokhtarzadeh Azar",
+ "url": "https://www.semanticscholar.org/author/51264689"
+ },
+ {
+ "authorId": "51268224",
+ "name": "Sajjad Azami",
+ "url": "https://www.semanticscholar.org/author/51268224"
+ },
+ {
+ "authorId": "51388019",
+ "name": "Mina Ghadimi Atigh",
+ "url": "https://www.semanticscholar.org/author/51388019"
+ },
+ {
+ "authorId": "1826726",
+ "name": "Mohammad Javadi",
+ "url": "https://www.semanticscholar.org/author/1826726"
+ },
+ {
+ "authorId": "1780566",
+ "name": "Ahmad Nickabadi",
+ "url": "https://www.semanticscholar.org/author/1780566"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "be393cd567b338da6ed60181c8ad429627578a31",
+ "title": "Zoom-RNN: A Novel Method for Person Recognition Using Recurrent Neural Networks",
+ "url": "https://www.semanticscholar.org/paper/be393cd567b338da6ed60181c8ad429627578a31",
+ "venue": "",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1956609",
+ "name": "Vassilios Vonikakis",
+ "url": "https://www.semanticscholar.org/author/1956609"
+ },
+ {
+ "authorId": "48236457",
+ "name": "Ramanathan Subramanian",
+ "url": "https://www.semanticscholar.org/author/48236457"
+ },
+ {
+ "authorId": "49530400",
+ "name": "Jonas Toft Arnfred",
+ "url": "https://www.semanticscholar.org/author/49530400"
+ },
+ {
+ "authorId": "1702224",
+ "name": "Stefan Winkler",
+ "url": "https://www.semanticscholar.org/author/1702224"
+ }
+ ],
+ "doi": "10.1109/TMM.2017.2699859",
+ "isInfluential": false,
+ "paperId": "eb8a3948c4be0d23eb7326d27f2271be893b3409",
+ "title": "A Probabilistic Approach to People-Centric Photo Selection and Sequencing",
+ "url": "https://www.semanticscholar.org/paper/eb8a3948c4be0d23eb7326d27f2271be893b3409",
+ "venue": "IEEE Transactions on Multimedia",
+ "year": 2017
+ },
+ {
+ "arxivId": "1801.10442",
+ "authors": [
+ {
+ "authorId": "19263506",
+ "name": "Arsha Nagrani",
+ "url": "https://www.semanticscholar.org/author/19263506"
+ },
+ {
+ "authorId": "1688869",
+ "name": "Andrew Zisserman",
+ "url": "https://www.semanticscholar.org/author/1688869"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "ff1f45bdad41d8b35435098041e009627e60d208",
+ "title": "From Benedict Cumberbatch to Sherlock Holmes: Character Identification in TV series without a Script",
+ "url": "https://www.semanticscholar.org/paper/ff1f45bdad41d8b35435098041e009627e60d208",
+ "venue": "BMVC",
+ "year": 2017
+ },
+ {
+ "arxivId": "1605.08247",
+ "authors": [
+ {
+ "authorId": "1730200",
+ "name": "Hirokatsu Kataoka",
+ "url": "https://www.semanticscholar.org/author/1730200"
+ },
+ {
+ "authorId": "2642022",
+ "name": "Yudai Miyashita",
+ "url": "https://www.semanticscholar.org/author/2642022"
+ },
+ {
+ "authorId": "3108668",
+ "name": "Tomoaki K. Yamabe",
+ "url": "https://www.semanticscholar.org/author/3108668"
+ },
+ {
+ "authorId": "3393640",
+ "name": "Soma Shirakabe",
+ "url": "https://www.semanticscholar.org/author/3393640"
+ },
+ {
+ "authorId": "46427523",
+ "name": "Shin-ichi Sato",
+ "url": "https://www.semanticscholar.org/author/46427523"
+ },
+ {
+ "authorId": "29998543",
+ "name": "Hironori Hoshino",
+ "url": "https://www.semanticscholar.org/author/29998543"
+ },
+ {
+ "authorId": "2348426",
+ "name": "Ryo Kato",
+ "url": "https://www.semanticscholar.org/author/2348426"
+ },
+ {
+ "authorId": "49897653",
+ "name": "Kaori Abe",
+ "url": "https://www.semanticscholar.org/author/49897653"
+ },
+ {
+ "authorId": "3407486",
+ "name": "Takaaki Imanari",
+ "url": "https://www.semanticscholar.org/author/3407486"
+ },
+ {
+ "authorId": "26851746",
+ "name": "Naomichi Kobayashi",
+ "url": "https://www.semanticscholar.org/author/26851746"
+ },
+ {
+ "authorId": "49133490",
+ "name": "Shinichiro Morita",
+ "url": "https://www.semanticscholar.org/author/49133490"
+ },
+ {
+ "authorId": "2462801",
+ "name": "Akio Nakamura",
+ "url": "https://www.semanticscholar.org/author/2462801"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "2f7e9b45255c9029d2ae97bbb004d6072e70fa79",
+ "title": "cvpaper.challenge in 2015 - A review of CVPR2015 and DeepSurvey",
+ "url": "https://www.semanticscholar.org/paper/2f7e9b45255c9029d2ae97bbb004d6072e70fa79",
+ "venue": "ArXiv",
+ "year": 2016
+ },
+ {
+ "arxivId": "1807.00504",
+ "authors": [
+ {
+ "authorId": "29988001",
+ "name": "Zhouxia Wang",
+ "url": "https://www.semanticscholar.org/author/29988001"
+ },
+ {
+ "authorId": "1765674",
+ "name": "Tianshui Chen",
+ "url": "https://www.semanticscholar.org/author/1765674"
+ },
+ {
+ "authorId": "1723599",
+ "name": "Jimmy S. J. Ren",
+ "url": "https://www.semanticscholar.org/author/1723599"
+ },
+ {
+ "authorId": "12254824",
+ "name": "Weihao Yu",
+ "url": "https://www.semanticscholar.org/author/12254824"
+ },
+ {
+ "authorId": "47413456",
+ "name": "Hui Cheng",
+ "url": "https://www.semanticscholar.org/author/47413456"
+ },
+ {
+ "authorId": "1737218",
+ "name": "Liang Lin",
+ "url": "https://www.semanticscholar.org/author/1737218"
+ }
+ ],
+ "doi": "10.24963/ijcai.2018/142",
+ "isInfluential": true,
+ "paperId": "725c3605c2d26d113637097358cd4c08c19ff9e1",
+ "title": "Deep Reasoning with Knowledge Graph for Social Relationship Understanding",
+ "url": "https://www.semanticscholar.org/paper/725c3605c2d26d113637097358cd4c08c19ff9e1",
+ "venue": "IJCAI",
+ "year": 2018
+ },
+ {
+ "arxivId": "1805.01515",
+ "authors": [
+ {
+ "authorId": "2582568",
+ "name": "Yuhang Zhao",
+ "url": "https://www.semanticscholar.org/author/2582568"
+ },
+ {
+ "authorId": "1968133",
+ "name": "Shaomei Wu",
+ "url": "https://www.semanticscholar.org/author/1968133"
+ },
+ {
+ "authorId": "39685591",
+ "name": "Lindsay Reynolds",
+ "url": "https://www.semanticscholar.org/author/39685591"
+ },
+ {
+ "authorId": "3283573",
+ "name": "Shiri Azenkot",
+ "url": "https://www.semanticscholar.org/author/3283573"
+ }
+ ],
+ "doi": "10.1145/3134756",
+ "isInfluential": false,
+ "paperId": "2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83",
+ "title": "The Effect of Computer-Generated Descriptions on Photo-Sharing Experiences of People with Visual Impairments",
+ "url": "https://www.semanticscholar.org/paper/2a5903bdb3fdfb4d51f70b77f16852df3b8e5f83",
+ "venue": "PACMHCI",
+ "year": 2017
+ },
+ {
+ "arxivId": "1704.06456",
+ "authors": [
+ {
+ "authorId": "32222907",
+ "name": "Qianru Sun",
+ "url": "https://www.semanticscholar.org/author/32222907"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ }
+ ],
+ "doi": "10.1109/CVPR.2017.54",
+ "isInfluential": true,
+ "paperId": "23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f",
+ "title": "A Domain Based Approach to Social Relation Recognition",
+ "url": "https://www.semanticscholar.org/paper/23429ef60e7a9c0e2f4d81ed1b4e47cc2616522f",
+ "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2017
+ },
+ {
+ "arxivId": "1711.09001",
+ "authors": [
+ {
+ "authorId": "32222907",
+ "name": "Qianru Sun",
+ "url": "https://www.semanticscholar.org/author/32222907"
+ },
+ {
+ "authorId": "1847145",
+ "name": "Liqian Ma",
+ "url": "https://www.semanticscholar.org/author/1847145"
+ },
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1681236",
+ "name": "Luc Van Gool",
+ "url": "https://www.semanticscholar.org/author/1681236"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb",
+ "title": "Natural and Effective Obfuscation by Head Inpainting",
+ "url": "https://www.semanticscholar.org/paper/ba7b12c8e2ff3c5e4e0f70b58215b41b18ff8feb",
+ "venue": "ArXiv",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ },
+ {
+ "authorId": "5514293",
+ "name": "J Max",
+ "url": "https://www.semanticscholar.org/author/5514293"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "808b685d09912cbef4a009e74e10476304b4cccf",
+ "title": "From Understanding to Controlling Privacy against Automatic Person Recognition in Social Media",
+ "url": "https://www.semanticscholar.org/paper/808b685d09912cbef4a009e74e10476304b4cccf",
+ "venue": "",
+ "year": 2017
+ },
+ {
+ "arxivId": "1406.4444",
+ "authors": [
+ {
+ "authorId": "7969330",
+ "name": "Ziming Zhang",
+ "url": "https://www.semanticscholar.org/author/7969330"
+ },
+ {
+ "authorId": "1699322",
+ "name": "Venkatesh Saligrama",
+ "url": "https://www.semanticscholar.org/author/1699322"
+ }
+ ],
+ "doi": "10.1109/TCSVT.2016.2596159",
+ "isInfluential": false,
+ "paperId": "ae936628e78db4edb8e66853f59433b8cc83594f",
+ "title": "PRISM: Person Reidentification via Structured Matching",
+ "url": "https://www.semanticscholar.org/paper/ae936628e78db4edb8e66853f59433b8cc83594f",
+ "venue": "IEEE Transactions on Circuits and Systems for Video Technology",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1927977",
+ "name": "Tousif Ahmed",
+ "url": "https://www.semanticscholar.org/author/1927977"
+ },
+ {
+ "authorId": "1996617",
+ "name": "Apu Kapadia",
+ "url": "https://www.semanticscholar.org/author/1996617"
+ },
+ {
+ "authorId": "32858206",
+ "name": "Venkatesh Potluri",
+ "url": "https://www.semanticscholar.org/author/32858206"
+ },
+ {
+ "authorId": "30576065",
+ "name": "Manohar Swaminathan",
+ "url": "https://www.semanticscholar.org/author/30576065"
+ }
+ ],
+ "doi": "10.1145/3264899",
+ "isInfluential": false,
+ "paperId": "ca096e158912080493a898b0b8a4bd2902674fed",
+ "title": "Up to a Limit?: Privacy Concerns of Bystanders and Their Willingness to Share Additional Information with Visually Impaired Users of Assistive Technologies",
+ "url": "https://www.semanticscholar.org/paper/ca096e158912080493a898b0b8a4bd2902674fed",
+ "venue": "IMWUT",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": null,
+ "name": "Micheal Cogswell",
+ "url": null
+ },
+ {
+ "authorId": "37824829",
+ "name": "Michael Cogswell",
+ "url": "https://www.semanticscholar.org/author/37824829"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19",
+ "title": "Understanding Representations and Reducing their Redundancy in Deep Networks",
+ "url": "https://www.semanticscholar.org/paper/6d8eef8f8d6cd8436c55018e6ca5c5907b31ac19",
+ "venue": "",
+ "year": 2016
+ },
+ {
+ "arxivId": "1703.09471",
+ "authors": [
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ }
+ ],
+ "doi": "10.1109/ICCV.2017.165",
+ "isInfluential": true,
+ "paperId": "b68150bfdec373ed8e025f448b7a3485c16e3201",
+ "title": "Adversarial Image Perturbation for Privacy Protection A Game Theory Perspective",
+ "url": "https://www.semanticscholar.org/paper/b68150bfdec373ed8e025f448b7a3485c16e3201",
+ "venue": "2017 IEEE International Conference on Computer Vision (ICCV)",
+ "year": 2017
+ },
+ {
+ "arxivId": "1609.00408",
+ "authors": [
+ {
+ "authorId": "49702377",
+ "name": "Richard McPherson",
+ "url": "https://www.semanticscholar.org/author/49702377"
+ },
+ {
+ "authorId": "2520493",
+ "name": "Reza Shokri",
+ "url": "https://www.semanticscholar.org/author/2520493"
+ },
+ {
+ "authorId": "1723945",
+ "name": "Vitaly Shmatikov",
+ "url": "https://www.semanticscholar.org/author/1723945"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0",
+ "title": "Defeating Image Obfuscation with Deep Learning",
+ "url": "https://www.semanticscholar.org/paper/3c57e28a4eb463d532ea2b0b1ba4b426ead8d9a0",
+ "venue": "ArXiv",
+ "year": 2016
+ },
+ {
+ "arxivId": "1804.03287",
+ "authors": [
+ {
+ "authorId": "46509484",
+ "name": "Jian Zhao",
+ "url": "https://www.semanticscholar.org/author/46509484"
+ },
+ {
+ "authorId": "2757639",
+ "name": "Jianshu Li",
+ "url": "https://www.semanticscholar.org/author/2757639"
+ },
+ {
+ "authorId": "47585344",
+ "name": "Yu Cheng",
+ "url": "https://www.semanticscholar.org/author/47585344"
+ },
+ {
+ "authorId": "48207454",
+ "name": "Li Zhou",
+ "url": "https://www.semanticscholar.org/author/48207454"
+ },
+ {
+ "authorId": "1715286",
+ "name": "Terence Sim",
+ "url": "https://www.semanticscholar.org/author/1715286"
+ },
+ {
+ "authorId": "1698982",
+ "name": "Shuicheng Yan",
+ "url": "https://www.semanticscholar.org/author/1698982"
+ },
+ {
+ "authorId": "33221685",
+ "name": "Jiashi Feng",
+ "url": "https://www.semanticscholar.org/author/33221685"
+ }
+ ],
+ "doi": "10.13140/RG.2.2.23242.67523",
+ "isInfluential": false,
+ "paperId": "5f771fed91c8e4b666489ba2384d0705bcf75030",
+ "title": "Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing",
+ "url": "https://www.semanticscholar.org/paper/5f771fed91c8e4b666489ba2384d0705bcf75030",
+ "venue": "ArXiv",
+ "year": 2018
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "37956314",
+ "name": "Vijay Kumar",
+ "url": "https://www.semanticscholar.org/author/37956314"
+ },
+ {
+ "authorId": "3185334",
+ "name": "Anoop M. Namboodiri",
+ "url": "https://www.semanticscholar.org/author/3185334"
+ },
+ {
+ "authorId": "1694502",
+ "name": "C. V. Jawahar",
+ "url": "https://www.semanticscholar.org/author/1694502"
+ }
+ ],
+ "doi": "10.1007/s11760-017-1140-5",
+ "isInfluential": false,
+ "paperId": "01e27c91c7cef926389f913d12410725e7dd35ab",
+ "title": "Semi-supervised annotation of faces in image collection",
+ "url": "https://www.semanticscholar.org/paper/01e27c91c7cef926389f913d12410725e7dd35ab",
+ "venue": "Signal, Image and Video Processing",
+ "year": 2018
+ },
+ {
+ "arxivId": "1710.03224",
+ "authors": [
+ {
+ "authorId": "2390510",
+ "name": "Seong Joon Oh",
+ "url": "https://www.semanticscholar.org/author/2390510"
+ },
+ {
+ "authorId": "1798000",
+ "name": "Rodrigo Benenson",
+ "url": "https://www.semanticscholar.org/author/1798000"
+ },
+ {
+ "authorId": "1739548",
+ "name": "Mario Fritz",
+ "url": "https://www.semanticscholar.org/author/1739548"
+ },
+ {
+ "authorId": "1697100",
+ "name": "Bernt Schiele",
+ "url": "https://www.semanticscholar.org/author/1697100"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "cfd4004054399f3a5f536df71f9b9987f060f434",
+ "title": "Person Recognition in Social Media Photos",
+ "url": "https://www.semanticscholar.org/paper/cfd4004054399f3a5f536df71f9b9987f060f434",
+ "venue": "ArXiv",
+ "year": 2017
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "11269472",
+ "name": "Iacopo Masi",
+ "url": "https://www.semanticscholar.org/author/11269472"
+ },
+ {
+ "authorId": "38696444",
+ "name": "Stephen Rawls",
+ "url": "https://www.semanticscholar.org/author/38696444"
+ },
+ {
+ "authorId": "3463966",
+ "name": "Gérard G. Medioni",
+ "url": "https://www.semanticscholar.org/author/3463966"
+ },
+ {
+ "authorId": "1776379",
+ "name": "Premkumar Natarajan",
+ "url": "https://www.semanticscholar.org/author/1776379"
+ }
+ ],
+ "doi": "10.1109/CVPR.2016.523",
+ "isInfluential": true,
+ "paperId": "2c92839418a64728438c351a42f6dc5ad0c6e686",
+ "title": "Pose-Aware Face Recognition in the Wild",
+ "url": "https://www.semanticscholar.org/paper/2c92839418a64728438c351a42f6dc5ad0c6e686",
+ "venue": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2016
+ },
+ {
+ "arxivId": "1710.00870",
+ "authors": [
+ {
+ "authorId": "1715752",
+ "name": "Yu Liu",
+ "url": "https://www.semanticscholar.org/author/1715752"
+ },
+ {
+ "authorId": "46382329",
+ "name": "Hongyang Li",
+ "url": "https://www.semanticscholar.org/author/46382329"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "d949fadc9b6c5c8b067fa42265ad30945f9caa99",
+ "title": "Rethinking Feature Discrimination and Polymerization for Large-scale Recognition",
+ "url": "https://www.semanticscholar.org/paper/d949fadc9b6c5c8b067fa42265ad30945f9caa99",
+ "venue": "ArXiv",
+ "year": 2017
+ },
+ {
+ "arxivId": "1611.09967",
+ "authors": [
+ {
+ "authorId": "48513733",
+ "name": "Yao Li",
+ "url": "https://www.semanticscholar.org/author/48513733"
+ },
+ {
+ "authorId": "2604251",
+ "name": "Guosheng Lin",
+ "url": "https://www.semanticscholar.org/author/2604251"
+ },
+ {
+ "authorId": "3194022",
+ "name": "Bohan Zhuang",
+ "url": "https://www.semanticscholar.org/author/3194022"
+ },
+ {
+ "authorId": "2161037",
+ "name": "Lingqiao Liu",
+ "url": "https://www.semanticscholar.org/author/2161037"
+ },
+ {
+ "authorId": "1780381",
+ "name": "Chunhua Shen",
+ "url": "https://www.semanticscholar.org/author/1780381"
+ },
+ {
+ "authorId": "5546141",
+ "name": "Anton van den Hengel",
+ "url": "https://www.semanticscholar.org/author/5546141"
+ }
+ ],
+ "doi": "10.1109/CVPR.2017.600",
+ "isInfluential": true,
+ "paperId": "3d24b386d003bee176a942c26336dbe8f427aadd",
+ "title": "Sequential Person Recognition in Photo Albums with a Recurrent Network",
+ "url": "https://www.semanticscholar.org/paper/3d24b386d003bee176a942c26336dbe8f427aadd",
+ "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2017
+ },
+ {
+ "arxivId": "1805.04049",
+ "authors": [
+ {
+ "authorId": "2008164",
+ "name": "Luca Melis",
+ "url": "https://www.semanticscholar.org/author/2008164"
+ },
+ {
+ "authorId": "3469125",
+ "name": "Congzheng Song",
+ "url": "https://www.semanticscholar.org/author/3469125"
+ },
+ {
+ "authorId": "1728207",
+ "name": "Emiliano De Cristofaro",
+ "url": "https://www.semanticscholar.org/author/1728207"
+ },
+ {
+ "authorId": "1723945",
+ "name": "Vitaly Shmatikov",
+ "url": "https://www.semanticscholar.org/author/1723945"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "8bdf6f03bde08c424c214188b35be8b2dec7cdea",
+ "title": "Inference Attacks Against Collaborative Learning",
+ "url": "https://www.semanticscholar.org/paper/8bdf6f03bde08c424c214188b35be8b2dec7cdea",
+ "venue": "ArXiv",
+ "year": 2018
+ }
+ ],
+ "doi": "10.1109/CVPR.2015.7299113",
+ "influentialCitationCount": 19,
+ "paperId": "0a85bdff552615643dd74646ac881862a7c7072d",
+ "references": [
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2064160",
+ "name": "Alex Krizhevsky",
+ "url": "https://www.semanticscholar.org/author/2064160"
+ },
+ {
+ "authorId": "1701686",
+ "name": "Ilya Sutskever",
+ "url": "https://www.semanticscholar.org/author/1701686"
+ },
+ {
+ "authorId": "1695689",
+ "name": "Geoffrey E. Hinton",
+ "url": "https://www.semanticscholar.org/author/1695689"
+ }
+ ],
+ "doi": "10.1145/3065386",
+ "isInfluential": false,
+ "paperId": "2315fc6c2c0c4abd2443e26a26e7bb86df8e24cc",
+ "title": "ImageNet Classification with Deep Convolutional Neural Networks",
+ "url": "https://www.semanticscholar.org/paper/2315fc6c2c0c4abd2443e26a26e7bb86df8e24cc",
+ "venue": "NIPS",
+ "year": 2012
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "38511654",
+ "name": "Douglas Gray",
+ "url": "https://www.semanticscholar.org/author/38511654"
+ },
+ {
+ "authorId": "2491194",
+ "name": "Hai Tao",
+ "url": "https://www.semanticscholar.org/author/2491194"
+ }
+ ],
+ "doi": "10.1007/978-3-540-88682-2_21",
+ "isInfluential": false,
+ "paperId": "d745cf8c51032996b5fee6b19e1b5321c14797eb",
+ "title": "Viewpoint Invariant Pedestrian Recognition with an Ensemble of Localized Features",
+ "url": "https://www.semanticscholar.org/paper/d745cf8c51032996b5fee6b19e1b5321c14797eb",
+ "venue": "ECCV",
+ "year": 2008
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "48625148",
+ "name": "Wei Li",
+ "url": "https://www.semanticscholar.org/author/48625148"
+ },
+ {
+ "authorId": "49832825",
+ "name": "Rui Zhao",
+ "url": "https://www.semanticscholar.org/author/49832825"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": "10.1007/978-3-642-37331-2_3",
+ "isInfluential": false,
+ "paperId": "44484d2866f222bbb9b6b0870890f9eea1ffb2d0",
+ "title": "Human Reidentification with Transferred Metric Learning",
+ "url": "https://www.semanticscholar.org/paper/44484d2866f222bbb9b6b0870890f9eea1ffb2d0",
+ "venue": "ACCV",
+ "year": 2012
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1931707",
+ "name": "Martin Bäuml",
+ "url": "https://www.semanticscholar.org/author/1931707"
+ },
+ {
+ "authorId": "2103464",
+ "name": "Makarand Tapaswi",
+ "url": "https://www.semanticscholar.org/author/2103464"
+ },
+ {
+ "authorId": "1742325",
+ "name": "Rainer Stiefelhagen",
+ "url": "https://www.semanticscholar.org/author/1742325"
+ }
+ ],
+ "doi": "10.1109/CVPR.2013.462",
+ "isInfluential": false,
+ "paperId": "2b743e9a1aa638f46f2842187136b0e32e3bc042",
+ "title": "Semi-supervised Learning with Constraints for Person Identification in Multimedia Data",
+ "url": "https://www.semanticscholar.org/paper/2b743e9a1aa638f46f2842187136b0e32e3bc042",
+ "venue": "2013 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2013
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1687465",
+ "name": "Mor Naaman",
+ "url": "https://www.semanticscholar.org/author/1687465"
+ },
+ {
+ "authorId": "2105696",
+ "name": "Ron B. Yeh",
+ "url": "https://www.semanticscholar.org/author/2105696"
+ },
+ {
+ "authorId": "1695250",
+ "name": "Hector Garcia-Molina",
+ "url": "https://www.semanticscholar.org/author/1695250"
+ },
+ {
+ "authorId": "1750481",
+ "name": "Andreas Paepcke",
+ "url": "https://www.semanticscholar.org/author/1750481"
+ }
+ ],
+ "doi": "10.1145/1065385.1065430",
+ "isInfluential": false,
+ "paperId": "45467c70c1857b68902acebe6086d8d455f69f1d",
+ "title": "Leveraging context to resolve identity in photo albums",
+ "url": "https://www.semanticscholar.org/paper/45467c70c1857b68902acebe6086d8d455f69f1d",
+ "venue": "Proceedings of the 5th ACM/IEEE-CS Joint Conference on Digital Libraries (JCDL '05)",
+ "year": 2005
+ },
+ {
+ "arxivId": "1311.2524",
+ "authors": [
+ {
+ "authorId": "2983898",
+ "name": "Ross B. Girshick",
+ "url": "https://www.semanticscholar.org/author/2983898"
+ },
+ {
+ "authorId": "7408951",
+ "name": "Jeff Donahue",
+ "url": "https://www.semanticscholar.org/author/7408951"
+ },
+ {
+ "authorId": "1753210",
+ "name": "Trevor Darrell",
+ "url": "https://www.semanticscholar.org/author/1753210"
+ },
+ {
+ "authorId": "1689212",
+ "name": "Jitendra Malik",
+ "url": "https://www.semanticscholar.org/author/1689212"
+ }
+ ],
+ "doi": "10.1109/CVPR.2014.81",
+ "isInfluential": false,
+ "paperId": "009fba8df6bbca155d9e070a9bd8d0959bc693c2",
+ "title": "Rich Feature Hierarchies for Accurate Object Detection and Semantic Segmentation",
+ "url": "https://www.semanticscholar.org/paper/009fba8df6bbca155d9e070a9bd8d0959bc693c2",
+ "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1769383",
+ "name": "Lubomir D. Bourdev",
+ "url": "https://www.semanticscholar.org/author/1769383"
+ },
+ {
+ "authorId": "35208858",
+ "name": "Subhransu Maji",
+ "url": "https://www.semanticscholar.org/author/35208858"
+ },
+ {
+ "authorId": "1689212",
+ "name": "Jitendra Malik",
+ "url": "https://www.semanticscholar.org/author/1689212"
+ }
+ ],
+ "doi": "10.1109/ICCV.2011.6126413",
+ "isInfluential": false,
+ "paperId": "7808937b46acad36e43c30ae4e9f3fd57462853d",
+ "title": "Describing people: A poselet-based approach to attribute classification",
+ "url": "https://www.semanticscholar.org/paper/7808937b46acad36e43c30ae4e9f3fd57462853d",
+ "venue": "2011 International Conference on Computer Vision",
+ "year": 2011
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1807606",
+ "name": "Dahua Lin",
+ "url": "https://www.semanticscholar.org/author/1807606"
+ },
+ {
+ "authorId": "2189118",
+ "name": "Ashish Kapoor",
+ "url": "https://www.semanticscholar.org/author/2189118"
+ },
+ {
+ "authorId": "1745420",
+ "name": "Gang Hua",
+ "url": "https://www.semanticscholar.org/author/1745420"
+ },
+ {
+ "authorId": "40039594",
+ "name": "Simon Baker",
+ "url": "https://www.semanticscholar.org/author/40039594"
+ }
+ ],
+ "doi": "10.1007/978-3-642-15549-9_18",
+ "isInfluential": false,
+ "paperId": "01df75d2020931ad30f827653cdde7fba06dda5f",
+ "title": "Joint People, Event, and Location Recognition in Personal Photo Collections Using Cross-Domain Context",
+ "url": "https://www.semanticscholar.org/paper/01df75d2020931ad30f827653cdde7fba06dda5f",
+ "venue": "ECCV",
+ "year": 2010
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "10338111",
+ "name": "Zhen Cui",
+ "url": "https://www.semanticscholar.org/author/10338111"
+ },
+ {
+ "authorId": "50135099",
+ "name": "Wen Li",
+ "url": "https://www.semanticscholar.org/author/50135099"
+ },
+ {
+ "authorId": "38188040",
+ "name": "Dong Xu",
+ "url": "https://www.semanticscholar.org/author/38188040"
+ },
+ {
+ "authorId": "1685914",
+ "name": "Shiguang Shan",
+ "url": "https://www.semanticscholar.org/author/1685914"
+ },
+ {
+ "authorId": "1710220",
+ "name": "Xilin Chen",
+ "url": "https://www.semanticscholar.org/author/1710220"
+ }
+ ],
+ "doi": "10.1109/CVPR.2013.456",
+ "isInfluential": false,
+ "paperId": "316d51aaa37891d730ffded7b9d42946abea837f",
+ "title": "Fusing Robust Face Region Descriptors via Multiple Metric Learning for Face Recognition in the Wild",
+ "url": "https://www.semanticscholar.org/paper/316d51aaa37891d730ffded7b9d42946abea837f",
+ "venue": "2013 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2013
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "47583369",
+ "name": "John Wright",
+ "url": "https://www.semanticscholar.org/author/47583369"
+ },
+ {
+ "authorId": "2784161",
+ "name": "Allen Y. Yang",
+ "url": "https://www.semanticscholar.org/author/2784161"
+ },
+ {
+ "authorId": "1701028",
+ "name": "Arvind Ganesh",
+ "url": "https://www.semanticscholar.org/author/1701028"
+ },
+ {
+ "authorId": "1717598",
+ "name": "S. Shankar Sastry",
+ "url": "https://www.semanticscholar.org/author/1717598"
+ },
+ {
+ "authorId": "50032052",
+ "name": "Yi Ma",
+ "url": "https://www.semanticscholar.org/author/50032052"
+ }
+ ],
+ "doi": "10.1109/TPAMI.2008.79",
+ "isInfluential": false,
+ "paperId": "1512b9570669a9dfe2a252900ae94b276e3aff9e",
+ "title": "Robust Face Recognition via Sparse Representation",
+ "url": "https://www.semanticscholar.org/paper/1512b9570669a9dfe2a252900ae94b276e3aff9e",
+ "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence",
+ "year": 2009
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "3219900",
+ "name": "Gary B. Huang",
+ "url": "https://www.semanticscholar.org/author/3219900"
+ },
+ {
+ "authorId": "49354909",
+ "name": "Marwan Mattar",
+ "url": "https://www.semanticscholar.org/author/49354909"
+ },
+ {
+ "authorId": "1685538",
+ "name": "Tamara L. Berg",
+ "url": "https://www.semanticscholar.org/author/1685538"
+ },
+ {
+ "authorId": "1714536",
+ "name": "Erik G. Learned-Miller",
+ "url": "https://www.semanticscholar.org/author/1714536"
+ }
+ ],
+ "doi": null,
+ "isInfluential": true,
+ "paperId": "370b5757a5379b15e30d619e4d3fb9e8e13f3256",
+ "title": "Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments",
+ "url": "https://www.semanticscholar.org/paper/370b5757a5379b15e30d619e4d3fb9e8e13f3256",
+ "venue": "",
+ "year": 2007
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1769383",
+ "name": "Lubomir D. Bourdev",
+ "url": "https://www.semanticscholar.org/author/1769383"
+ },
+ {
+ "authorId": "1689212",
+ "name": "Jitendra Malik",
+ "url": "https://www.semanticscholar.org/author/1689212"
+ }
+ ],
+ "doi": "10.1109/ICCV.2009.5459303",
+ "isInfluential": false,
+ "paperId": "2830fb5282de23d7784b4b4bc37065d27839a412",
+ "title": "Poselets: Body part detectors trained using 3D human pose annotations",
+ "url": "https://www.semanticscholar.org/paper/2830fb5282de23d7784b4b4bc37065d27839a412",
+ "venue": "2009 IEEE 12th International Conference on Computer Vision",
+ "year": 2009
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "3056091",
+ "name": "Mark Everingham",
+ "url": "https://www.semanticscholar.org/author/3056091"
+ },
+ {
+ "authorId": "1782755",
+ "name": "Josef Sivic",
+ "url": "https://www.semanticscholar.org/author/1782755"
+ },
+ {
+ "authorId": "1688869",
+ "name": "Andrew Zisserman",
+ "url": "https://www.semanticscholar.org/author/1688869"
+ }
+ ],
+ "doi": "10.1016/j.imavis.2008.04.018",
+ "isInfluential": false,
+ "paperId": "642a386c451e94d9c44134e03052219a7512b9de",
+ "title": "Taking the bite out of automated naming of characters in TV video",
+ "url": "https://www.semanticscholar.org/paper/642a386c451e94d9c44134e03052219a7512b9de",
+ "venue": "Image Vision Comput.",
+ "year": 2009
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "48625148",
+ "name": "Wei Li",
+ "url": "https://www.semanticscholar.org/author/48625148"
+ },
+ {
+ "authorId": "49832825",
+ "name": "Rui Zhao",
+ "url": "https://www.semanticscholar.org/author/49832825"
+ },
+ {
+ "authorId": "1721881",
+ "name": "Tong Xiao",
+ "url": "https://www.semanticscholar.org/author/1721881"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": "10.1109/CVPR.2014.27",
+ "isInfluential": false,
+ "paperId": "6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3",
+ "title": "DeepReID: Deep Filter Pairing Neural Network for Person Re-identification",
+ "url": "https://www.semanticscholar.org/paper/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3",
+ "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "38707615",
+ "name": "Bryan James Prosser",
+ "url": "https://www.semanticscholar.org/author/38707615"
+ },
+ {
+ "authorId": "3333315",
+ "name": "Wei-Shi Zheng",
+ "url": "https://www.semanticscholar.org/author/3333315"
+ },
+ {
+ "authorId": "2073354",
+ "name": "Shaogang Gong",
+ "url": "https://www.semanticscholar.org/author/2073354"
+ },
+ {
+ "authorId": "1700927",
+ "name": "Tao Xiang",
+ "url": "https://www.semanticscholar.org/author/1700927"
+ }
+ ],
+ "doi": "10.5244/C.24.21",
+ "isInfluential": false,
+ "paperId": "8aef5b3cfc80fafdcefc24c72a4796ca40f4bc8b",
+ "title": "Person Re-Identification by Support Vector Ranking",
+ "url": "https://www.semanticscholar.org/paper/8aef5b3cfc80fafdcefc24c72a4796ca40f4bc8b",
+ "venue": "BMVC",
+ "year": 2010
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2351962",
+ "name": "Michela Farenzena",
+ "url": "https://www.semanticscholar.org/author/2351962"
+ },
+ {
+ "authorId": "1809420",
+ "name": "Loris Bazzani",
+ "url": "https://www.semanticscholar.org/author/1809420"
+ },
+ {
+ "authorId": "1691336",
+ "name": "Alessandro Perina",
+ "url": "https://www.semanticscholar.org/author/1691336"
+ },
+ {
+ "authorId": "1727204",
+ "name": "Vittorio Murino",
+ "url": "https://www.semanticscholar.org/author/1727204"
+ },
+ {
+ "authorId": "1723008",
+ "name": "Marco Cristani",
+ "url": "https://www.semanticscholar.org/author/1723008"
+ }
+ ],
+ "doi": "10.1109/CVPR.2010.5539926",
+ "isInfluential": false,
+ "paperId": "12fa3c73a7764cb65bb76fed0601fc5d79893bcd",
+ "title": "Person re-identification by symmetry-driven accumulation of local features",
+ "url": "https://www.semanticscholar.org/paper/12fa3c73a7764cb65bb76fed0601fc5d79893bcd",
+ "venue": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
+ "year": 2010
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2737253",
+ "name": "Matthieu Guillaumin",
+ "url": "https://www.semanticscholar.org/author/2737253"
+ },
+ {
+ "authorId": "1721683",
+ "name": "Jakob J. Verbeek",
+ "url": "https://www.semanticscholar.org/author/1721683"
+ },
+ {
+ "authorId": "2462253",
+ "name": "Cordelia Schmid",
+ "url": "https://www.semanticscholar.org/author/2462253"
+ }
+ ],
+ "doi": "10.1109/ICCV.2009.5459197",
+ "isInfluential": false,
+ "paperId": "19296e129c70b332a8c0a67af8990f2f4d4f44d1",
+ "title": "Is that you? Metric learning approaches for face identification",
+ "url": "https://www.semanticscholar.org/paper/19296e129c70b332a8c0a67af8990f2f4d4f44d1",
+ "venue": "2009 IEEE 12th International Conference on Computer Vision",
+ "year": 2009
+ },
+ {
+ "arxivId": "1311.5591",
+ "authors": [
+ {
+ "authorId": "49410785",
+ "name": "Ning Zhang",
+ "url": "https://www.semanticscholar.org/author/49410785"
+ },
+ {
+ "authorId": "2210374",
+ "name": "Manohar Paluri",
+ "url": "https://www.semanticscholar.org/author/2210374"
+ },
+ {
+ "authorId": "1706809",
+ "name": "Marc'Aurelio Ranzato",
+ "url": "https://www.semanticscholar.org/author/1706809"
+ },
+ {
+ "authorId": "1753210",
+ "name": "Trevor Darrell",
+ "url": "https://www.semanticscholar.org/author/1753210"
+ },
+ {
+ "authorId": "1769383",
+ "name": "Lubomir D. Bourdev",
+ "url": "https://www.semanticscholar.org/author/1769383"
+ }
+ ],
+ "doi": "10.1109/CVPR.2014.212",
+ "isInfluential": false,
+ "paperId": "42e3dac0df30d754c7c7dab9e1bb94990034a90d",
+ "title": "PANDA: Pose Aligned Networks for Deep Attribute Modeling",
+ "url": "https://www.semanticscholar.org/paper/42e3dac0df30d754c7c7dab9e1bb94990034a90d",
+ "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2188620",
+ "name": "Yaniv Taigman",
+ "url": "https://www.semanticscholar.org/author/2188620"
+ },
+ {
+ "authorId": "2909406",
+ "name": "Ming Yang",
+ "url": "https://www.semanticscholar.org/author/2909406"
+ },
+ {
+ "authorId": "1706809",
+ "name": "Marc'Aurelio Ranzato",
+ "url": "https://www.semanticscholar.org/author/1706809"
+ },
+ {
+ "authorId": "1776343",
+ "name": "Lior Wolf",
+ "url": "https://www.semanticscholar.org/author/1776343"
+ }
+ ],
+ "doi": "10.1109/CVPR.2014.220",
+ "isInfluential": false,
+ "paperId": "14ce7635ff18318e7094417d0f92acbec6669f1c",
+ "title": "DeepFace: Closing the Gap to Human-Level Performance in Face Verification",
+ "url": "https://www.semanticscholar.org/paper/14ce7635ff18318e7094417d0f92acbec6669f1c",
+ "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2103464",
+ "name": "Makarand Tapaswi",
+ "url": "https://www.semanticscholar.org/author/2103464"
+ },
+ {
+ "authorId": "1931707",
+ "name": "Martin Bäuml",
+ "url": "https://www.semanticscholar.org/author/1931707"
+ },
+ {
+ "authorId": "1742325",
+ "name": "Rainer Stiefelhagen",
+ "url": "https://www.semanticscholar.org/author/1742325"
+ }
+ ],
+ "doi": "10.1109/CVPR.2012.6247986",
+ "isInfluential": false,
+ "paperId": "441132dd4ec14991644723c9642ac3a63181753e",
+ "title": "“Knock! Knock! Who is it?” probabilistic person identification in TV-series",
+ "url": "https://www.semanticscholar.org/paper/441132dd4ec14991644723c9642ac3a63181753e",
+ "venue": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2012
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "39460815",
+ "name": "Andrew C. Gallagher",
+ "url": "https://www.semanticscholar.org/author/39460815"
+ },
+ {
+ "authorId": "1746230",
+ "name": "Tsuhan Chen",
+ "url": "https://www.semanticscholar.org/author/1746230"
+ }
+ ],
+ "doi": "10.1109/CVPR.2008.4587481",
+ "isInfluential": true,
+ "paperId": "22ad2c8c0f4d6aa4328b38d894b814ec22579761",
+ "title": "Clothing cosegmentation for recognizing people",
+ "url": "https://www.semanticscholar.org/paper/22ad2c8c0f4d6aa4328b38d894b814ec22579761",
+ "venue": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2008
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "3056091",
+ "name": "Mark Everingham",
+ "url": "https://www.semanticscholar.org/author/3056091"
+ },
+ {
+ "authorId": "1782755",
+ "name": "Josef Sivic",
+ "url": "https://www.semanticscholar.org/author/1782755"
+ },
+ {
+ "authorId": "1688869",
+ "name": "Andrew Zisserman",
+ "url": "https://www.semanticscholar.org/author/1688869"
+ }
+ ],
+ "doi": "10.5244/C.20.92",
+ "isInfluential": false,
+ "paperId": "75ebe1e0ae9d42732e31948e2e9c03d680235c39",
+ "title": "Hello! My name is... Buffy'' -- Automatic Naming of Characters in TV Video",
+ "url": "https://www.semanticscholar.org/paper/75ebe1e0ae9d42732e31948e2e9c03d680235c39",
+ "venue": "BMVC",
+ "year": 2006
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "22804340",
+ "name": "Gang Wang",
+ "url": "https://www.semanticscholar.org/author/22804340"
+ },
+ {
+ "authorId": "39460815",
+ "name": "Andrew C. Gallagher",
+ "url": "https://www.semanticscholar.org/author/39460815"
+ },
+ {
+ "authorId": "33642939",
+ "name": "Jiebo Luo",
+ "url": "https://www.semanticscholar.org/author/33642939"
+ },
+ {
+ "authorId": "1744452",
+ "name": "David A. Forsyth",
+ "url": "https://www.semanticscholar.org/author/1744452"
+ }
+ ],
+ "doi": "10.1007/978-3-642-15555-0_13",
+ "isInfluential": false,
+ "paperId": "291f527598c589fb0519f890f1beb2749082ddfd",
+ "title": "Seeing People in Social Context: Recognizing People and Social Relationships",
+ "url": "https://www.semanticscholar.org/paper/291f527598c589fb0519f890f1beb2749082ddfd",
+ "venue": "ECCV",
+ "year": 2010
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2230138",
+ "name": "Matthew A. Turk",
+ "url": "https://www.semanticscholar.org/author/2230138"
+ },
+ {
+ "authorId": "1682773",
+ "name": "Alex Pentland",
+ "url": "https://www.semanticscholar.org/author/1682773"
+ }
+ ],
+ "doi": "10.1162/jocn.1991.3.1.71",
+ "isInfluential": false,
+ "paperId": "a6f1dfcc44277d4cfd8507284d994c9283dc3a2f",
+ "title": "Eigenfaces for Recognition",
+ "url": "https://www.semanticscholar.org/paper/a6f1dfcc44277d4cfd8507284d994c9283dc3a2f",
+ "venue": "Journal of Cognitive Neuroscience",
+ "year": 1991
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "20373504",
+ "name": "Mary C Dugan",
+ "url": "https://www.semanticscholar.org/author/20373504"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "ef80015cd8de355d2cf197f050acce33d15d5ad5",
+ "title": "Hello, my name is ....",
+ "url": "https://www.semanticscholar.org/paper/ef80015cd8de355d2cf197f050acce33d15d5ad5",
+ "venue": "Contemporary longterm care",
+ "year": 1990
+ },
+ {
+ "arxivId": null,
+ "authors": [],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "bd433d471af50b571d7284afb5ee435654ace99f",
+ "title": "Going Deeper with Convolutional Neural Network for Intelligent Transportation",
+ "url": "https://www.semanticscholar.org/paper/bd433d471af50b571d7284afb5ee435654ace99f",
+ "venue": "",
+ "year": 2016
+ },
+ {
+ "arxivId": "1407.4979",
+ "authors": [
+ {
+ "authorId": "1716143",
+ "name": "Dong Yi",
+ "url": "https://www.semanticscholar.org/author/1716143"
+ },
+ {
+ "authorId": "1718623",
+ "name": "Zhen Lei",
+ "url": "https://www.semanticscholar.org/author/1718623"
+ },
+ {
+ "authorId": "34679741",
+ "name": "Stan Z. Li",
+ "url": "https://www.semanticscholar.org/author/34679741"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "afd29ac2de84c8a6d48232477be018ec57d6f564",
+ "title": "Deep Metric Learning for Practical Person Re-Identification",
+ "url": "https://www.semanticscholar.org/paper/afd29ac2de84c8a6d48232477be018ec57d6f564",
+ "venue": "ArXiv",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "9748713",
+ "name": "Rahul Garg",
+ "url": "https://www.semanticscholar.org/author/9748713"
+ },
+ {
+ "authorId": "1679223",
+ "name": "Steven M. Seitz",
+ "url": "https://www.semanticscholar.org/author/1679223"
+ },
+ {
+ "authorId": "1770537",
+ "name": "Deva Ramanan",
+ "url": "https://www.semanticscholar.org/author/1770537"
+ },
+ {
+ "authorId": "1830653",
+ "name": "Noah Snavely",
+ "url": "https://www.semanticscholar.org/author/1830653"
+ }
+ ],
+ "doi": "10.1109/CVPR.2011.5995546",
+ "isInfluential": false,
+ "paperId": "5b2bc289b607ca1a0634555158464f28fe68a6d3",
+ "title": "Where's Waldo: Matching people in images of crowds",
+ "url": "https://www.semanticscholar.org/paper/5b2bc289b607ca1a0634555158464f28fe68a6d3",
+ "venue": "CVPR 2011",
+ "year": 2011
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "39460815",
+ "name": "Andrew C. Gallagher",
+ "url": "https://www.semanticscholar.org/author/39460815"
+ },
+ {
+ "authorId": "1746230",
+ "name": "Tsuhan Chen",
+ "url": "https://www.semanticscholar.org/author/1746230"
+ }
+ ],
+ "doi": "10.1109/CVPRW.2009.5206828",
+ "isInfluential": false,
+ "paperId": "21d9d0deed16f0ad62a4865e9acf0686f4f15492",
+ "title": "Understanding images of groups of people",
+ "url": "https://www.semanticscholar.org/paper/21d9d0deed16f0ad62a4865e9acf0686f4f15492",
+ "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2009
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "33612681",
+ "name": "Doug Gray",
+ "url": "https://www.semanticscholar.org/author/33612681"
+ },
+ {
+ "authorId": "7207299",
+ "name": "Shane Brennan",
+ "url": "https://www.semanticscholar.org/author/7207299"
+ },
+ {
+ "authorId": "2491194",
+ "name": "Hai Tao",
+ "url": "https://www.semanticscholar.org/author/2491194"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "6273b3491e94ea4dd1ce42b791d77bdc96ee73a8",
+ "title": "Evaluating Appearance Models for Recognition , Reacquisition , and Tracking",
+ "url": "https://www.semanticscholar.org/paper/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8",
+ "venue": "",
+ "year": 2007
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1782755",
+ "name": "Josef Sivic",
+ "url": "https://www.semanticscholar.org/author/1782755"
+ },
+ {
+ "authorId": "3056091",
+ "name": "Mark Everingham",
+ "url": "https://www.semanticscholar.org/author/3056091"
+ },
+ {
+ "authorId": "1688869",
+ "name": "Andrew Zisserman",
+ "url": "https://www.semanticscholar.org/author/1688869"
+ }
+ ],
+ "doi": "10.1109/CVPRW.2009.5206513",
+ "isInfluential": false,
+ "paperId": "03d1d0a665e358863ff4de9ee7d78f64edd7e756",
+ "title": "“Who are you?” - Learning person specific classifiers from video",
+ "url": "https://www.semanticscholar.org/paper/03d1d0a665e358863ff4de9ee7d78f64edd7e756",
+ "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2009
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "2405613",
+ "name": "Omar Oreifej",
+ "url": "https://www.semanticscholar.org/author/2405613"
+ },
+ {
+ "authorId": "35422941",
+ "name": "Ramin Mehran",
+ "url": "https://www.semanticscholar.org/author/35422941"
+ },
+ {
+ "authorId": "1745480",
+ "name": "Mubarak Shah",
+ "url": "https://www.semanticscholar.org/author/1745480"
+ }
+ ],
+ "doi": "10.1109/CVPR.2010.5540147",
+ "isInfluential": false,
+ "paperId": "34c256893c3e2dfcf77b7da1c5daa77981a79196",
+ "title": "Human identity recognition in aerial images",
+ "url": "https://www.semanticscholar.org/paper/34c256893c3e2dfcf77b7da1c5daa77981a79196",
+ "venue": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
+ "year": 2010
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1838674",
+ "name": "Dragomir Anguelov",
+ "url": "https://www.semanticscholar.org/author/1838674"
+ },
+ {
+ "authorId": "2457452",
+ "name": "Kuang-chih Lee",
+ "url": "https://www.semanticscholar.org/author/2457452"
+ },
+ {
+ "authorId": "2437408",
+ "name": "Salih Burak Göktürk",
+ "url": "https://www.semanticscholar.org/author/2437408"
+ },
+ {
+ "authorId": "1702811",
+ "name": "Baris Sumengen",
+ "url": "https://www.semanticscholar.org/author/1702811"
+ }
+ ],
+ "doi": "10.1109/CVPR.2007.383057",
+ "isInfluential": false,
+ "paperId": "28f232498c6e14950ef9b6a3ae0781f2348be3b7",
+ "title": "Contextual Identity Recognition in Personal Photo Albums",
+ "url": "https://www.semanticscholar.org/paper/28f232498c6e14950ef9b6a3ae0781f2348be3b7",
+ "venue": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2007
+ },
+ {
+ "arxivId": "1409.1556",
+ "authors": [
+ {
+ "authorId": "34838386",
+ "name": "Karen Simonyan",
+ "url": "https://www.semanticscholar.org/author/34838386"
+ },
+ {
+ "authorId": "1688869",
+ "name": "Andrew Zisserman",
+ "url": "https://www.semanticscholar.org/author/1688869"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "061356704ec86334dbbc073985375fe13cd39088",
+ "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition",
+ "url": "https://www.semanticscholar.org/paper/061356704ec86334dbbc073985375fe13cd39088",
+ "venue": "ArXiv",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "3264124",
+ "name": "Ryan Layne",
+ "url": "https://www.semanticscholar.org/author/3264124"
+ },
+ {
+ "authorId": "1697755",
+ "name": "Timothy M. Hospedales",
+ "url": "https://www.semanticscholar.org/author/1697755"
+ },
+ {
+ "authorId": "2073354",
+ "name": "Shaogang Gong",
+ "url": "https://www.semanticscholar.org/author/2073354"
+ }
+ ],
+ "doi": "10.5244/C.26.24",
+ "isInfluential": false,
+ "paperId": "34aa3dca30dc5cbf86c92d5035e35d264540a829",
+ "title": "Person Re-identification by Attributes",
+ "url": "https://www.semanticscholar.org/paper/34aa3dca30dc5cbf86c92d5035e35d264540a829",
+ "venue": "BMVC",
+ "year": 2012
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "8342699",
+ "name": "Jia Deng",
+ "url": "https://www.semanticscholar.org/author/8342699"
+ },
+ {
+ "authorId": "49191744",
+ "name": "Wei Dong",
+ "url": "https://www.semanticscholar.org/author/49191744"
+ },
+ {
+ "authorId": "2166511",
+ "name": "Richard Socher",
+ "url": "https://www.semanticscholar.org/author/2166511"
+ },
+ {
+ "authorId": "33642044",
+ "name": "Li-Jia Li",
+ "url": "https://www.semanticscholar.org/author/33642044"
+ },
+ {
+ "authorId": "2168945",
+ "name": "Kai Li",
+ "url": "https://www.semanticscholar.org/author/2168945"
+ },
+ {
+ "authorId": "3216322",
+ "name": "Li Fei-Fei",
+ "url": "https://www.semanticscholar.org/author/3216322"
+ }
+ ],
+ "doi": "10.1109/CVPRW.2009.5206848",
+ "isInfluential": false,
+ "paperId": "38211dc39e41273c0007889202c69f841e02248a",
+ "title": "ImageNet: A large-scale hierarchical image database",
+ "url": "https://www.semanticscholar.org/paper/38211dc39e41273c0007889202c69f841e02248a",
+ "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2009
+ },
+ {
+ "arxivId": "1310.1531",
+ "authors": [
+ {
+ "authorId": "7408951",
+ "name": "Jeff Donahue",
+ "url": "https://www.semanticscholar.org/author/7408951"
+ },
+ {
+ "authorId": "39978391",
+ "name": "Yangqing Jia",
+ "url": "https://www.semanticscholar.org/author/39978391"
+ },
+ {
+ "authorId": "1689108",
+ "name": "Oriol Vinyals",
+ "url": "https://www.semanticscholar.org/author/1689108"
+ },
+ {
+ "authorId": "50196944",
+ "name": "Judy Hoffman",
+ "url": "https://www.semanticscholar.org/author/50196944"
+ },
+ {
+ "authorId": "49410785",
+ "name": "Ning Zhang",
+ "url": "https://www.semanticscholar.org/author/49410785"
+ },
+ {
+ "authorId": "2368132",
+ "name": "Eric Tzeng",
+ "url": "https://www.semanticscholar.org/author/2368132"
+ },
+ {
+ "authorId": "1753210",
+ "name": "Trevor Darrell",
+ "url": "https://www.semanticscholar.org/author/1753210"
+ }
+ ],
+ "doi": null,
+ "isInfluential": false,
+ "paperId": "33da83b54410af11d0cd18fd07c74e1a99f67e84",
+ "title": "DeCAF: A Deep Convolutional Activation Feature for Generic Visual Recognition",
+ "url": "https://www.semanticscholar.org/paper/33da83b54410af11d0cd18fd07c74e1a99f67e84",
+ "venue": "ICML",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1688882",
+ "name": "Yann LeCun",
+ "url": "https://www.semanticscholar.org/author/1688882"
+ },
+ {
+ "authorId": "2219581",
+ "name": "Bernhard E. Boser",
+ "url": "https://www.semanticscholar.org/author/2219581"
+ },
+ {
+ "authorId": "1747317",
+ "name": "John S. Denker",
+ "url": "https://www.semanticscholar.org/author/1747317"
+ },
+ {
+ "authorId": "37274089",
+ "name": "Donnie Henderson",
+ "url": "https://www.semanticscholar.org/author/37274089"
+ },
+ {
+ "authorId": "32295804",
+ "name": "Richard E. Howard",
+ "url": "https://www.semanticscholar.org/author/32295804"
+ },
+ {
+ "authorId": "34859193",
+ "name": "Wayne E. Hubbard",
+ "url": "https://www.semanticscholar.org/author/34859193"
+ },
+ {
+ "authorId": "2307573",
+ "name": "Lawrence D. Jackel",
+ "url": "https://www.semanticscholar.org/author/2307573"
+ }
+ ],
+ "doi": "10.1162/neco.1989.1.4.541",
+ "isInfluential": false,
+ "paperId": "a8e8f3c8d4418c8d62e306538c9c1292635e9d27",
+ "title": "Backpropagation Applied to Handwritten Zip Code Recognition",
+ "url": "https://www.semanticscholar.org/paper/a8e8f3c8d4418c8d62e306538c9c1292635e9d27",
+ "venue": "Neural Computation",
+ "year": 1989
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "49832825",
+ "name": "Rui Zhao",
+ "url": "https://www.semanticscholar.org/author/49832825"
+ },
+ {
+ "authorId": "3001348",
+ "name": "Wanli Ouyang",
+ "url": "https://www.semanticscholar.org/author/3001348"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": "10.1109/CVPR.2013.460",
+ "isInfluential": false,
+ "paperId": "46638b810bf69023bca41db664b49bc935bcba3c",
+ "title": "Unsupervised Salience Learning for Person Re-identification",
+ "url": "https://www.semanticscholar.org/paper/46638b810bf69023bca41db664b49bc935bcba3c",
+ "venue": "2013 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2013
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "49832825",
+ "name": "Rui Zhao",
+ "url": "https://www.semanticscholar.org/author/49832825"
+ },
+ {
+ "authorId": "3001348",
+ "name": "Wanli Ouyang",
+ "url": "https://www.semanticscholar.org/author/3001348"
+ },
+ {
+ "authorId": "31843833",
+ "name": "Xiaogang Wang",
+ "url": "https://www.semanticscholar.org/author/31843833"
+ }
+ ],
+ "doi": "10.1109/CVPR.2014.26",
+ "isInfluential": false,
+ "paperId": "3fd7bfd90f0dfc3369bfe718e27aff30cf268c23",
+ "title": "Learning Mid-level Filters for Person Re-identification",
+ "url": "https://www.semanticscholar.org/paper/3fd7bfd90f0dfc3369bfe718e27aff30cf268c23",
+ "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition",
+ "year": 2014
+ },
+ {
+ "arxivId": null,
+ "authors": [
+ {
+ "authorId": "1782755",
+ "name": "Josef Sivic",
+ "url": "https://www.semanticscholar.org/author/1782755"
+ },
+ {
+ "authorId": "1699161",
+ "name": "C. Lawrence Zitnick",
+ "url": "https://www.semanticscholar.org/author/1699161"
+ },
+ {
+ "authorId": "1717841",
+ "name": "Richard Szeliski",
+ "url": "https://www.semanticscholar.org/author/1717841"
+ }
+ ],
+ "doi": "10.5244/C.20.93",
+ "isInfluential": false,
+ "paperId": "586f106ba7aede43554c4287eb6c5b99d4725f8f",
+ "title": "Finding People in Repeated Shots of the Same Scene",
+ "url": "https://www.semanticscholar.org/paper/586f106ba7aede43554c4287eb6c5b99d4725f8f",
+ "venue": "BMVC",
+ "year": 2006
+ }
+ ],
+ "title": "Beyond frontal faces: Improving Person Recognition using multiple cues",
+ "topics": [
+ {
+ "topic": "Finite-state machine",
+ "topicId": "4280",
+ "url": "https://www.semanticscholar.org/topic/4280"
+ },
+ {
+ "topic": "Flickr",
+ "topicId": "67227",
+ "url": "https://www.semanticscholar.org/topic/67227"
+ },
+ {
+ "topic": "Image resolution",
+ "topicId": "881",
+ "url": "https://www.semanticscholar.org/topic/881"
+ },
+ {
+ "topic": "Convolutional neural network",
+ "topicId": "29860",
+ "url": "https://www.semanticscholar.org/topic/29860"
+ }
+ ],
+ "url": "https://www.semanticscholar.org/paper/0a85bdff552615643dd74646ac881862a7c7072d",
+ "venue": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
+ "year": 2015
+} \ No newline at end of file
diff --git a/scraper/samples/s2-search-api.json b/scraper/samples/s2-search-api.json
new file mode 100644
index 00000000..207fb98c
--- /dev/null
+++ b/scraper/samples/s2-search-api.json
@@ -0,0 +1,270 @@
+{
+ "id": "0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e",
+ "title": {
+ "text": "Large age-gap face verification by feature injection in deep networks",
+ "fragments": [
+ {
+ "start": 0,
+ "end": 5
+ },
+ {
+ "start": 6,
+ "end": 9
+ },
+ {
+ "start": 10,
+ "end": 13
+ },
+ {
+ "start": 14,
+ "end": 18
+ },
+ {
+ "start": 19,
+ "end": 31
+ },
+ {
+ "start": 35,
+ "end": 42
+ },
+ {
+ "start": 43,
+ "end": 52
+ },
+ {
+ "start": 56,
+ "end": 60
+ },
+ {
+ "start": 61,
+ "end": 69
+ }
+ ]
+ },
+ "slug": "Large-age-gap-face-verification-by-feature-in-deep-Bianco",
+ "paperAbstract": {
+ "text": "This paper introduces a new method for face verification across large age gaps and also a dataset containing variations of age in the wild, the Large Age-Gap (LAG) dataset, with images ranging from child/young to adult/old. The proposed method exploits a deep convolutional neural network (DCNN) pre-trained for the face recognition task on a large dataset and then fine-tuned for the large age-gap face verification task. Finetuning is performed in a Siamese architecture using a contrastive loss function. A feature injection layer is introduced to boost verification accuracy, showing the ability of the DCNN to learn a similarity metric leveraging external features. Experimental results on the LAG dataset show that our method is able to outperform the face verification solutions in the state of the art considered.",
+ "fragments": [
+ {
+ "start": 39,
+ "end": 43
+ },
+ {
+ "start": 44,
+ "end": 56
+ },
+ {
+ "start": 64,
+ "end": 69
+ },
+ {
+ "start": 70,
+ "end": 73
+ },
+ {
+ "start": 74,
+ "end": 78
+ },
+ {
+ "start": 123,
+ "end": 126
+ },
+ {
+ "start": 144,
+ "end": 149
+ },
+ {
+ "start": 150,
+ "end": 153
+ },
+ {
+ "start": 154,
+ "end": 157
+ },
+ {
+ "start": 255,
+ "end": 259
+ },
+ {
+ "start": 281,
+ "end": 288
+ },
+ {
+ "start": 316,
+ "end": 320
+ },
+ {
+ "start": 343,
+ "end": 348
+ },
+ {
+ "start": 385,
+ "end": 390
+ },
+ {
+ "start": 391,
+ "end": 394
+ },
+ {
+ "start": 395,
+ "end": 398
+ },
+ {
+ "start": 399,
+ "end": 403
+ },
+ {
+ "start": 404,
+ "end": 416
+ },
+ {
+ "start": 510,
+ "end": 517
+ },
+ {
+ "start": 518,
+ "end": 527
+ },
+ {
+ "start": 557,
+ "end": 569
+ },
+ {
+ "start": 758,
+ "end": 762
+ },
+ {
+ "start": 763,
+ "end": 775
+ }
+ ]
+ },
+ "authors": [
+ [
+ {
+ "name": "Simone Bianco",
+ "ids": [
+ "2217051"
+ ],
+ "slug": "Simone-Bianco"
+ },
+ {
+ "text": "Simone Bianco",
+ "fragments": []
+ }
+ ]
+ ],
+ "structuredAuthors": [
+ {
+ "firstName": "Simone",
+ "middleNames": [],
+ "lastName": "Bianco"
+ }
+ ],
+ "year": {
+ "text": "2017",
+ "fragments": []
+ },
+ "venue": {
+ "text": "Pattern Recognition Letters",
+ "fragments": []
+ },
+ "citationContexts": [],
+ "citationStats": {
+ "citedByBuckets": [
+ {
+ "startKey": 2017,
+ "endKey": 2017,
+ "count": 2,
+ "estimate": {
+ "min": 2,
+ "value": 2.543286309486936,
+ "max": 6.143286309486936,
+ "confidence": 0.9
+ }
+ },
+ {
+ "startKey": 2018,
+ "endKey": 2018,
+ "count": 3,
+ "estimate": {
+ "min": 4,
+ "value": 5.086572618973872,
+ "max": 12.286572618973873,
+ "confidence": 0.9
+ }
+ }
+ ],
+ "keyCitedByBuckets": [],
+ "numCitations": 5,
+ "estNumCitations": {
+ "min": 5,
+ "value": 6.3582157737173395,
+ "max": 15.35821577371734,
+ "confidence": 0.9
+ },
+ "numReferences": 40,
+ "numKeyCitations": 0,
+ "numKeyReferences": 8,
+ "numViewableReferences": 40,
+ "keyCitationRate": 0,
+ "estCitationVelocity": {
+ "estimate": {
+ "min": 0,
+ "value": 0,
+ "max": 0,
+ "confidence": 1
+ },
+ "estCitationsByRange": []
+ },
+ "estCitationAcceleration": {
+ "estimate": {
+ "min": 0,
+ "value": 0,
+ "max": 0,
+ "confidence": 1
+ },
+ "estCitationsByRange": []
+ }
+ },
+ "sources": [
+ "DBLP",
+ "Grobid",
+ "ScienceParse",
+ "SPv2",
+ "ArXiv",
+ "Crawler",
+ "DBLP"
+ ],
+ "journal": {
+ "name": "Pattern Recognition Letters",
+ "volume": "90",
+ "pages": "36-42"
+ },
+ "socialLinks": [],
+ "presentationUrls": [],
+ "doiInfo": {
+ "doi": "10.1016/j.patrec.2017.03.006",
+ "doiUrl": "http://doi.org/10.1016/j.patrec.2017.03.006"
+ },
+ "links": [
+ {
+ "url": "https://arxiv.org/pdf/1602.06149.pdf",
+ "linkType": "arxiv"
+ }
+ ],
+ "primaryPaperLink": {
+ "url": "https://arxiv.org/pdf/1602.06149.pdf",
+ "linkType": "arxiv"
+ },
+ "alternatePaperLinks": [],
+ "entities": [],
+ "entityRelations": [],
+ "readerId": "1602.06149",
+ "blogs": [],
+ "videos": [],
+ "githubReferences": [],
+ "faqs": [],
+ "scorecardStats": [],
+ "hasPdf": true
+} \ No newline at end of file
diff --git a/scholar-fetch.py b/scraper/scholar-fetch.py
index e206b058..e206b058 100644
--- a/scholar-fetch.py
+++ b/scraper/scholar-fetch.py
diff --git a/split-csv.py b/scraper/split-csv.py
index 62dc1597..122d2ddc 100644
--- a/split-csv.py
+++ b/scraper/split-csv.py
@@ -4,28 +4,36 @@ import csv
from math import ceil
import subprocess
import random
+from util import *
import click
@click.command()
@click.option('--count', '-c', default=2, help='Number of subdivisions.')
+@click.option('--has_keys/--no_keys', '-k', default=False, help='Whether to split off the keys.')
@click.option('--shuffle/--no_shuffle', default=False, help='Whether to shuffle.')
@click.argument('filename')
-def split_csv(count, shuffle, filename):
+def split_csv(count, has_keys, shuffle, filename):
"""Split a CSV into groups."""
with open(filename, 'r') as f:
reader = csv.reader(f)
- lines = list(reader)
- keys = lines[0]
- lines = lines[1:]
+ lines = list(unfussy_reader(reader))
+ if has_keys:
+ keys = lines[0]
+ lines = lines[1:]
+ else:
+ keys = None
fn, ext = os.path.splitext(filename)
if shuffle:
random.shuffle(lines)
- for index, chunk in enumerate(chunks(lines, count)):
+ n = max(1, ceil(len(lines) / count))
+ for index in range(count):
+ m = index * n
+ chunk = lines[m:m+n]
+ print(chunk[0])
out_fn = fn + '-' + str(index+1) + ext
write_csv(out_fn, keys, chunk)
- # sys.exit(1)
# Write a CSV
def write_csv(fn, keys, chunk):
@@ -37,10 +45,6 @@ def write_csv(fn, keys, chunk):
for row in chunk:
writer.writerow(row)
-# Split an array into chunks
-def chunks(l, n):
- n = max(1, ceil(len(l) / n))
- return (l[i:i+n] for i in range(0, len(l), n))
if __name__ == '__main__':
split_csv()
diff --git a/scraper/util.py b/scraper/util.py
new file mode 100644
index 00000000..a435f91a
--- /dev/null
+++ b/scraper/util.py
@@ -0,0 +1,298 @@
+import re
+import os
+import csv
+import string
+import codecs
+import gspread
+import simplejson as json
+from oauth2client.service_account import ServiceAccountCredentials
+
+def read_citation_list(index=0):
+ filename = './datasets/citations.csv'
+ if index > 0:
+ fn, ext = os.path.splitext(filename)
+ filename = fn + '-' + str(index) + ext
+ with open(filename, 'r') as f:
+ reader = csv.reader(f)
+ lines = list(reader)
+ keys = lines[0]
+ lines = lines[1:]
+ return keys, lines
+
+def unfussy_reader(reader):
+ while True:
+ try:
+ yield next(reader)
+ except StopIteration:
+ return
+ except csv.Error:
+ print(csv.Error)
+ # log the problem or whatever
+ continue
+
+def read_csv(fn, keys=True, create=False):
+ try:
+ with open(fn, 'r', newline='', encoding='utf-8') as f:
+ # reader = csv.reader( (line.replace('\0','') for line in f) )
+ reader = csv.reader(f)
+ lines = list(unfussy_reader(reader))
+ if keys:
+ keys = lines[0]
+ lines = lines[1:]
+ return keys, lines
+ return lines
+ except:
+ if create:
+ return []
+ raise
+
+def csv_writer(fn):
+ with open(fn, 'w', newline='', encoding='utf-8') as f:
+ return csv.writer(f)
+
+def write_csv(fn, keys, rows):
+ with open(fn, 'w', newline='', encoding='utf-8') as f:
+ writer = csv.writer(f)
+ if keys is not None:
+ writer.writerow(keys)
+ for row in rows:
+ writer.writerow(row)
+
+def read_text(fn):
+ with open(fn, 'r') as f:
+ return f.read()
+
+def read_json(fn):
+ with open(fn, 'r') as json_file:
+ return json.load(json_file)
+
+def write_json(fn, data):
+ with open(fn, 'w') as outfile:
+ json.dump(data, outfile)
+
+def write_report(fn, title=None, keys=None, rows=[]):
+ with open(fn, 'w') as f:
+ f.write("<!doctype html>")
+ f.write("<html>")
+ f.write("<head>")
+ f.write("<meta charset='utf-8'>")
+ if title is not None:
+ f.write("<title>{}</title>".format(title))
+ f.write("<link rel='stylesheet' href='reports.css'>")
+ f.write("</head>")
+ f.write("<body>")
+ if title is not None:
+ f.write("<h2>{}</h2>".format(title))
+ count = write_table(f, keys=keys, rows=rows)
+ f.write("</body>")
+ f.write("</html>")
+ print("{} {}".format(fn, count))
+
+def percent(m, n):
+ if n == 0:
+ return 100
+ return round(m / n * 100)
+
+class NameLine(object):
+ def __init__(self, s):
+ self.s = s.strip()
+ def __str__(self):
+ return '<span class="name">' + self.s + '</span>'
+
+class BoldLine(object):
+ def __init__(self, s):
+ self.s = s.strip()
+ def __str__(self):
+ return '<b>' + self.s + '</b>'
+
+class LinkLine(object):
+ def __init__(self, href, txt):
+ self.href = href
+ self.txt = txt.strip()
+ def __str__(self):
+ if self.href:
+ return '<a href="{}">{}</a>'.format(self.href, self.txt)
+ else:
+ return '<span class="gray">{}</a>'.format(self.txt)
+
+def write_table(f, keys, rows):
+ count = 0
+ f.write("<table border='1' cellpadding='3' cellspacing='3'>")
+ if keys is not None:
+ for key in keys:
+ f.write("<th>{}</th>".format(key))
+ for row in rows:
+ if row is None:
+ return
+ count += 1
+ f.write("<tr>")
+ for cell in row:
+ if isinstance(cell, list) or isinstance(cell, tuple):
+ f.write("<td>{}</td>".format('<br/>'.join(str(x) for x in cell)))
+ else:
+ f.write("<td>{}</td>".format(cell))
+ f.write("</tr>")
+ f.write("</table>")
+ return count
+
+def paper_path(key='papers', paper_id=''):
+ return '{}/{}/{}/{}/paper.json'.format('./datasets/s2', key, paper_id[0:2], paper_id)
+
+class DbPaper(object):
+ def __init__(self, paper_id):
+ self.paper_id = paper_id
+ self.data = read_json(paper_path('db_papers', paper_id))
+ @property
+ def title(self):
+ return self.data['title']
+ @property
+ def journal(self):
+ return self.data['journalName']
+ @property
+ def year(self):
+ return self.data['year'] if 'year' in self.data else ''
+ @property
+ def authors(self):
+ return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ]
+ @property
+ def pdf_link(self):
+ if self.data['s2PdfUrl']:
+ return self.data['s2PdfUrl']
+ if len(self.data['pdfUrls']):
+ return self.data['pdfUrls'][0]
+ return None
+ def record(self):
+ return [ self.paper_id, self.title, self.journal, self.year ]
+
+class RawPaper(object):
+ def __init__(self, paper_id):
+ self.paper_id = paper_id
+ data = read_json(paper_path('raw_papers', paper_id))
+ # print(data)
+ if 'paper' not in data:
+ print(data)
+ self.data = None
+ return None
+ self.data = data['paper']
+ @property
+ def title(self):
+ return self.data['title']['text']
+ @property
+ def year(self):
+ return self.data['year']['text']
+ @property
+ def journal(self):
+ if 'journal' in self.data and 'name' in self.data['journal']:
+ return self.data['journal']['name']
+ else:
+ return 'Unknown'
+ @property
+ def authors(self):
+ return [ (author[0]['ids'][0] if len(author[0]['ids']) else '', author[0]['name']) for author in self.data['authors'] ]
+ @property
+ def pdf_link(self):
+ if 'primaryPaperLink' in self.data:
+ return self.data['primaryPaperLink']
+ return None
+ def record(self):
+ return [ self.paper_id, self.title, self.journal, self.year ]
+
+def load_paper(paper_id):
+ if os.path.exists(paper_path('db_papers', paper_id)):
+ # print('db paper')
+ return DbPaper(paper_id)
+ if os.path.exists(paper_path('raw_papers', paper_id)):
+ # print('raw paper')
+ return RawPaper(paper_id)
+ print('no paper')
+ return None
+
+def dedupe(a):
+ p = {}
+ for s in a:
+ p[s] = None
+ ss = sorted(p.keys())
+ return ss
+
+def read_headings(fn, paper):
+ headings = []
+ found_abstract = False
+ found_authors = []
+ journal = paper.journal.lower()
+ authors = [ (a[0], a[1], a[1].lower(),) for a in paper.authors ]
+ with open(fn, 'r') as f:
+ for line in f.readlines():
+ line = re.sub(r"\S*@\S*\s?", '', line)
+ l = line.lower().strip()
+ if len(l) < 5:
+ continue
+ if line[0] == 'a' or line[0] == 'b' or line[0] == 'c' or line[0] == '1' or line[0] == '2' or line[0] == '3' or line[0] == '4':
+ line = line[1:]
+ line = line.strip("∗†‡")
+ line = line.replace("fl", "fl").replace('ff', 'ff').replace('ffi', 'f‌f‌i').replace('ffl', 'f‌f‌l')
+ line = line.strip()
+ if 'abstract' in l:
+ found_abstract = True
+ break
+ if journal and journal in l:
+ continue
+ names = [s.strip() for s in re.split(',| and ', l)]
+ was_found = False
+ for name in names:
+ found = find_authors(authors, name)
+ if found:
+ was_found = True
+ # print("found {}".format(found[1]))
+ if found[0]:
+ found_authors.append(found)
+ continue
+ headings.append(line.strip())
+ return headings, found_abstract
+
+def find_authors(authors, line):
+ for a in authors:
+ if a[2] in line:
+ return a
+ return None
+
+class AddressBook (object):
+ def __init__(self):
+ entities = {}
+ lookup = {}
+ data = fetch_google_sheet()
+ # keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True)
+ for index, line in enumerate(data):
+ if line[0] == line[1] or line[0] not in entities:
+ entities[line[0]] = index
+ lookup[line[1].lower().strip()] = line[0]
+ self.data = data
+ self.lookup = lookup
+ self.entities = entities
+
+ def find(self, address):
+ address = address.lower().strip().strip(string.digits)
+ if address in self.lookup:
+ entity = self.lookup[address]
+ index = self.entities[entity]
+ return self.data[index]
+ for part in address.split(','):
+ part = part.strip().replace(' ', ' ')
+ if part in self.lookup:
+ entity = self.lookup[part]
+ index = self.entities[entity]
+ return self.data[index]
+ return None
+
+def fetch_worksheet():
+ scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
+ credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope)
+ docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc"
+ client = gspread.authorize(credentials)
+ spreadsheet = client.open_by_key(docid)
+ return spreadsheet.worksheet("institutions")
+
+def fetch_google_sheet():
+ rows = fetch_worksheet().get_all_values()
+ keys = rows[0]
+ lines = rows[1:]
+ return lines
diff --git a/vendor/scholar.py b/scraper/vendor/scholar.py
index 13ccd439..13ccd439 100755
--- a/vendor/scholar.py
+++ b/scraper/vendor/scholar.py
diff --git a/server/app/README.md b/server/app/README.md
new file mode 100644
index 00000000..8bc70132
--- /dev/null
+++ b/server/app/README.md
@@ -0,0 +1,17 @@
+# Startup
+
+Run supervisor
+
+`/usr/bin/supervisord`
+
+Tail log file
+
+`tail -f /var/log/uwsgi/app/app.log`
+
+`/opt/redis/redis-stable/src/redis-server &`
+
+`celery worker -A celery_worker.celery --loglevel=info &`
+
+If using on Production Server
+
+`/usr/bin/nohup /usr/bin/supervisord &` \ No newline at end of file
diff --git a/server/app/__init__.py b/server/app/__init__.py
new file mode 100644
index 00000000..bce3f9ee
--- /dev/null
+++ b/server/app/__init__.py
@@ -0,0 +1,39 @@
+import logging
+from logging.handlers import RotatingFileHandler
+
+from flask import Flask
+from flask_bootstrap import Bootstrap
+
+from flask import Flask
+
+from config import config, Config
+
+bootstrap = Bootstrap()
+#celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
+from .basemodels import celery
+
+def create_app(config_name):
+ app = Flask(__name__)
+ app.config.from_object(config[config_name])
+ config[config_name].init_app(app)
+
+ bootstrap.init_app(app)
+ celery.conf.update(app.config)
+
+ from .main import main as main_blueprint
+ app.register_blueprint(main_blueprint)
+
+ #handler = RotatingFileHandler('debug.log', maxBytes=10000, backupCount=1)
+ #handler.setLevel(logging.INFO)
+ #app.logger.addHandler(handler)
+
+ format = "%(asctime)s - [%(levelname)s] %(message)s"
+ logging.basicConfig(filename='debug.log',
+ filemode='a',
+ format=format,
+ level=logging.DEBUG)
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ logging.getLogger(__name__).addHandler(console)
+
+ return app
diff --git a/server/app/basemodels.py b/server/app/basemodels.py
new file mode 100644
index 00000000..475ab0c2
--- /dev/null
+++ b/server/app/basemodels.py
@@ -0,0 +1,5 @@
+from config import config, Config
+from celery import Celery
+
+#bootstrap = Bootstrap()
+celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
diff --git a/server/app/favicon.ico b/server/app/favicon.ico
new file mode 100644
index 00000000..4d001b21
--- /dev/null
+++ b/server/app/favicon.ico
Binary files differ
diff --git a/server/app/index.html b/server/app/index.html
new file mode 100644
index 00000000..3c1b0dfd
--- /dev/null
+++ b/server/app/index.html
@@ -0,0 +1,161 @@
+<!doctype html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel="shortcut icon" href="/static/img/favicon.ico" />
+ <title>DullDream (v2 x ZkM)</title>
+ <link rel="stylesheet" type="text/css" href="static/css/dullbrown-theme.css">
+</head>
+<body>
+
+<header>
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></a></h1>
+ <h2 class="subtitle">Neural network photo effect</h2>
+</header>
+
+<div class="upload_view container">
+ <div class="row">
+ <div id="photo_area" class="dash_border">
+ <input class="hidden_input" id="user_file" type="file" accept="image/*">
+ <canvas class="photo" id="user_photo_canvas" width="512" height="512"></canvas>
+ <div class="center_inner">
+ <label id="take_photo_btn" for="user_file" class="upload_center_btn">
+ <div class='btn-lg btn'>Take Photo</div>
+ </label>
+ <div id="details"></div>
+ <div id="progress"></div>
+ </div>
+
+ <div id="preloader_anim">
+ <img src="/static/img/loader.gif">
+ </div>
+ </div>
+ </div>
+
+ <div id="upload_controls" class="row">
+ <div class="align_center">
+ <div id="restart_btn">
+ <a id="restart_btn" class="btn btn-md btn-default" role="button">Change Image</a>
+ <input type='file' accept="image/*">
+ </div>
+ <div id="dropdown_btn">
+ <select id="dropdown"></select>
+ </div>
+ <div id="upload_btn">
+ <a id="take_photo_btn" class="btn btn-md btn-important" role="button">Upload</a>
+ </div>
+ </div>
+ <div class="align_center consent_box">
+ <label>
+ <input type="checkbox" id="agree" value="1" checked>
+ I consent to have my dulled image displayed at ZkM.
+ </label>
+ </div>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default about_button" role="button">About</a>
+ <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a>
+ <p class="notice">
+ All images uploaded can be used for exhibition and review purposes.
+ </p>
+ <p class="notice">
+ Currently this work is on view at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>. View recent DullDreams <a href="/gallery">here</a>.
+ </p>
+ </div>
+ </div>
+</div>
+
+<div class="about_view modal">
+ <div class="inner">
+ <header>
+ <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1>
+ </header>
+ <div class='content'>
+ <p>
+ <b><i>DullDream™ by DullTech™</i></b> is a series of experiments appropriating neural network image recognition technology to make visual representation less interesting.
+ </p>
+ <p>
+ Can machine learning help us desensitize? Our impactful lives are clogging up social media feeds with unique filter settings, leaving us nostalgic for a vanilla future. Can machine learning help us achieve this? Take the excitement out of our lives, prepare us for a time where we will all have to be the same, have the same values and culture? Painting a future where the Dull is no longer a dream but a nightmare?
+ </p>
+ <p>
+ DullDream™ was developed for Transmediale 2017 - Ever Elusive by <a href="http://constantdullaart.com">Constant Dullaart</a> in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>. It has generously been made possible by the Creative Industries Fund NL.
+ </p>
+ </div>
+ <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center>
+ </div>
+</div>
+
+<div class="privacy_view modal">
+ <div class="inner">
+ <header>
+ <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1>
+ </header>
+ <div class='content'>
+ <h3>Privacy Notice</h3>
+ <p>
+ Images uploaded to this site are being used for a public art display at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>
+ </p>
+ <p>
+ If you would not like to be included, be sure to uncheck the permission box on the upload page.
+ </p>
+
+ </div>
+ <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center>
+ </div>
+</div>
+
+
+<div class="result_view">
+ <div class="final_result">
+ </div>
+
+ <div class="row made_with">
+ Made with DullDream.xyz for ZKM OpenCodes 2017
+ </div>
+
+ <div class="row">
+ <button class='btn' id="show_all_results">Detailed Analysis</button>
+ </div>
+
+ <div class="all_results">
+ </div>
+
+ <div id="share_btns" class="row">
+ <a id="permalink" href="#">Permalink</a>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="align_center">
+ <a href="/" class="btn btn-sm btn-default home_button" role="button">Home</a>
+ <a class="btn btn-sm btn-default about_button" role="button">About</a>
+ <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a>
+ </div>
+
+ </div>
+
+</div>
+
+<div id="footer">
+ DullDream™ (beta) by <a href="http://constantdullaart.com">Constant Dullaart</a>.
+ Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>
+</div>
+
+</body>
+<script type="text/html" id="result_template">
+ <div class="row">
+ <img src="{img}"><br>
+ <b>{title}</b>
+ </div>
+</script>
+<script type="text/json" id="dropdown_options">[]</script>
+<script type="text/javascript" src="static/js/vendor/jquery-3.3.1.min.js"></script>
+<script type="text/javascript" src="static/js/vendor/ExifReader.js"></script>
+<script type="text/javascript" src="static/js/vendor/canvas-to-blob.js"></script>
+<script type="text/javascript" src="static/js/vendor/prefixfree.js"></script>
+<script type="text/javascript" src="static/js/util.js"></script>
+<script type="text/javascript" src="static/js/upload.js"></script>
+<script type="text/javascript" src="static/js/app.js"></script>
+</html>
diff --git a/server/app/main/__init__.py b/server/app/main/__init__.py
new file mode 100644
index 00000000..a21e2754
--- /dev/null
+++ b/server/app/main/__init__.py
@@ -0,0 +1,5 @@
+from flask import Blueprint
+
+main = Blueprint('main', __name__)
+
+from . import views, errors, tasks, utils \ No newline at end of file
diff --git a/server/app/main/errors.py b/server/app/main/errors.py
new file mode 100644
index 00000000..60b5f227
--- /dev/null
+++ b/server/app/main/errors.py
@@ -0,0 +1,32 @@
+from flask import render_template, request, jsonify
+from . import main
+
+
+@main.app_errorhandler(403)
+def forbidden(e):
+ if request.accept_mimetypes.accept_json and \
+ not request.accept_mimetypes.accept_html:
+ response = jsonify({'error': 'forbidden'})
+ response.status_code = 403
+ return response
+ return render_template('403.html'), 403
+
+
+@main.app_errorhandler(404)
+def page_not_found(e):
+ if request.accept_mimetypes.accept_json and \
+ not request.accept_mimetypes.accept_html:
+ response = jsonify({'error': 'not found'})
+ response.status_code = 404
+ return response
+ return render_template('404.html'), 404
+
+
+@main.app_errorhandler(500)
+def internal_server_error(e):
+ if request.accept_mimetypes.accept_json and \
+ not request.accept_mimetypes.accept_html:
+ response = jsonify({'error': 'internal server error'})
+ response.status_code = 500
+ return response
+ return render_template('500.html'), 500
diff --git a/server/app/main/forms.py b/server/app/main/forms.py
new file mode 100644
index 00000000..bc1399ad
--- /dev/null
+++ b/server/app/main/forms.py
@@ -0,0 +1,60 @@
+from flask.ext.wtf import Form
+from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
+ SubmitField
+from wtforms.validators import Required, Length, Email, Regexp
+from wtforms import ValidationError
+from flask.ext.pagedown.fields import PageDownField
+from ..models import Role, User
+
+
+class NameForm(Form):
+ name = StringField('What is your name?', validators=[Required()])
+ submit = SubmitField('Submit')
+
+
+class EditProfileForm(Form):
+ name = StringField('Real name', validators=[Length(0, 64)])
+ location = StringField('Location', validators=[Length(0, 64)])
+ about_me = TextAreaField('About me')
+ submit = SubmitField('Submit')
+
+
+class EditProfileAdminForm(Form):
+ email = StringField('Email', validators=[Required(), Length(1, 64),
+ Email()])
+ username = StringField('Username', validators=[
+ Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
+ 'Usernames must have only letters, '
+ 'numbers, dots or underscores')])
+ confirmed = BooleanField('Confirmed')
+ role = SelectField('Role', coerce=int)
+ name = StringField('Real name', validators=[Length(0, 64)])
+ location = StringField('Location', validators=[Length(0, 64)])
+ about_me = TextAreaField('About me')
+ submit = SubmitField('Submit')
+
+ def __init__(self, user, *args, **kwargs):
+ super(EditProfileAdminForm, self).__init__(*args, **kwargs)
+ self.role.choices = [(role.id, role.name)
+ for role in Role.query.order_by(Role.name).all()]
+ self.user = user
+
+ def validate_email(self, field):
+ if field.data != self.user.email and \
+ User.query.filter_by(email=field.data).first():
+ raise ValidationError('Email already registered.')
+
+ def validate_username(self, field):
+ if field.data != self.user.username and \
+ User.query.filter_by(username=field.data).first():
+ raise ValidationError('Username already in use.')
+
+
+class PostForm(Form):
+ body = PageDownField("What's on your mind?", validators=[Required()])
+ submit = SubmitField('Submit')
+
+
+class CommentForm(Form):
+ body = StringField('Enter your comment', validators=[Required()])
+ submit = SubmitField('Submit')
diff --git a/server/app/main/img_proc_config.py b/server/app/main/img_proc_config.py
new file mode 100644
index 00000000..db124978
--- /dev/null
+++ b/server/app/main/img_proc_config.py
@@ -0,0 +1,20 @@
+# paths for image processors
+import os
+from os.path import join
+
+class ImgProcConfig:
+
+ def __init__(self):
+ dir_models = '/data_store/apps/dulldream/dnn_models'
+
+ # mask rcnn
+ self.mask_rcnn_class_config = '/dulldream/src/config/coco_meta.json'
+ self.mask_rcnn_model = join(dir_models,'tf/mask_rcnn_coco.h5')
+
+ # p2p
+ self.p2p_ckpts_dir = join(dir_models,'p2p/coco2014_person')
+ self.p2p_epoch = 'latest'
+
+ # p2p objects only
+ self.p2p_bg_ckpts_dir = join(dir_models,'p2p/coco2014_objects')
+ self.p2p_bg_epoch = 'latest'
diff --git a/server/app/main/paths.py b/server/app/main/paths.py
new file mode 100644
index 00000000..69c21627
--- /dev/null
+++ b/server/app/main/paths.py
@@ -0,0 +1,19 @@
+from flask import current_app as app
+
+def get_paths(agree):
+ if agree:
+ return (
+ app.config['UPLOADS'],
+ app.config['RENDERS'],
+ app.config['JSON_DIR'],
+ app.config['UPLOADS_URI'],
+ app.config['RENDERS_URI'],
+ )
+ else:
+ return (
+ app.config['UPLOADS_PRIVATE'],
+ app.config['RENDERS_PRIVATE'],
+ app.config['JSON_PRIVATE_DIR'],
+ app.config['UPLOADS_PRIVATE_URI'],
+ app.config['RENDERS_PRIVATE_URI'],
+ )
diff --git a/server/app/main/tasks.py b/server/app/main/tasks.py
new file mode 100644
index 00000000..970e6988
--- /dev/null
+++ b/server/app/main/tasks.py
@@ -0,0 +1,374 @@
+import os
+import sys
+import time
+import datetime
+import json
+from PIL import Image, ImageFilter
+import cv2 as cv
+import numpy as np
+from . import main, utils
+from .. import basemodels
+from flask import current_app as app
+from .paths import get_paths
+celery = basemodels.celery
+from celery.utils.log import get_task_logger
+celery_logger = get_task_logger(__name__)
+import imutils
+
+
+# init image processors
+sys.path.append('/dulldream/src/')
+from .img_proc_config import ImgProcConfig
+from image_processors.mask_rcnn import MaskRCNN
+from image_processors.pix2pix import Pix2Pix
+from utils import imx
+from utils import fiox
+
+
+# initialize image processor
+img_proc_config = ImgProcConfig()
+p2p = Pix2Pix(img_proc_config.p2p_ckpts_dir,epoch=img_proc_config.p2p_epoch)
+p2p_objects = Pix2Pix(img_proc_config.p2p_bg_ckpts_dir,epoch=img_proc_config.p2p_epoch)
+
+mask_rcnn = MaskRCNN(img_proc_config.mask_rcnn_class_config,
+ model_path=img_proc_config.mask_rcnn_model)
+
+
+@celery.task(bind=True)
+def task_dull(self, uuid_name, agree, mask_rcnn_result):
+ """Process image and update during"""
+ celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name))
+
+ upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree)
+
+ files = []
+ im = Image.open(os.path.join(upload_dir, uuid_name + '.jpg')).convert('RGB')
+ #im_np = cv.cvtColor(imx.ensure_np(im),cv.COLOR_RGB2BGR)
+ im_np = imx.ensure_np(im)
+ im_np = im_np[:,:,::-1]
+ im = im.resize((256,256))
+ im_np_256 = imutils.resize(im_np,width=256)
+
+ # Add original
+ fpath = os.path.join(render_dir, uuid_name + '_orig.jpg')
+ im.save(fpath, 'JPEG', quality=95)
+ files.append({
+ 'title': 'Original',
+ 'fn': render_uri + uuid_name + '_orig.jpg'
+ })
+
+ if mask_rcnn_result['valid']:
+ # -----------------------------------------------
+ # Segment image (processed in views)
+ # seems to be an error with async celery processor?
+ # -----------------------------------------------
+
+ # parse mrcnn data
+ im_mask = cv.imread(mask_rcnn_result['fp_im_mask'])
+ seg_mask = cv.imread(mask_rcnn_result['fp_seg_mask'])
+ #score = mask_rcnn_result['score']
+ #name = mask_rcnn_result['name']
+ #color = mask_rcnn_result['color']
+ files.append({
+ 'title': 'Semantic Segmentation',
+ 'fn': render_uri + uuid_name + '_seg_mask.jpg'
+ })
+ files.append({
+ 'title': 'Semantic Segmentation Isolate',
+ 'fn': render_uri + uuid_name + '_im_mask.jpg'
+ })
+
+
+ # -----------------------------------------------
+ # run rag generator
+ # -----------------------------------------------
+
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.50,
+ 'message': 'Applying Region Adjacency Graph',
+ 'uuid': uuid_name
+ })
+
+ # save the regions adjancency graph
+ im_rag = imx.create_rag_mean(im_mask,compactness=30,n_segments=128)
+ fpath = os.path.join(render_dir, uuid_name + '_rgraph.jpg')
+ imx.save_np_as_pil(fpath,im_rag,quality=95)
+ files.append({
+ 'title': 'Region Adjacency Graph',
+ 'fn': render_uri + uuid_name + '_rgraph.jpg'
+ })
+
+
+ # -----------------------------------------------
+ # generate p2p fake
+ # -----------------------------------------------
+
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.75,
+ 'message': 'Running generative adversarial network...',
+ 'uuid': uuid_name
+ })
+
+
+ # convert segmentation to mask
+ seg_mask_gray = cv.cvtColor(seg_mask,cv.COLOR_BGR2GRAY)
+ seg_mask_gray[seg_mask_gray > 1] = 255
+
+ # find best P2P fit
+ ims_p2p = []
+ match_amts = []
+ iters = 15
+ for i in range(0,iters):
+ im_p2p = p2p.create_p2p(im_rag)
+ ims_p2p.append(im_p2p)
+ im_p2p_mask = cv.cvtColor(im_p2p,cv.COLOR_RGB2GRAY)
+ im_p2p_mask[im_p2p_mask > 1] = 255
+ # find where masks intersect
+ matches = np.bitwise_and(im_p2p_mask,seg_mask_gray)
+ amt = len(np.where(matches == 255)[0])
+ match_amts.append(amt)
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.75,
+ 'message': 'Generating ({}/{})'.format(i,iters),
+ 'uuid': uuid_name
+ })
+
+ best_idx = np.argmax(match_amts)
+ im_p2p = ims_p2p[best_idx]
+
+ fpath = os.path.join(render_dir, uuid_name + '_gan.jpg')
+ imx.save_np_as_pil(fpath,im_p2p,quality=95)
+ files.append({
+ 'title': 'Generative Adversarial Network',
+ 'fn': render_uri + uuid_name + '_gan.jpg'
+ })
+
+
+ # -----------------------------------------------
+ # generate p2p fake
+ # -----------------------------------------------
+
+ # announce to user
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.90,
+ 'message': 'Compositing images...',
+ 'uuid': uuid_name
+ })
+
+
+ # apply masked cloning
+ im_p2p_gray = cv.cvtColor(im_p2p,cv.COLOR_BGR2GRAY)
+ im_clone_mask = np.zeros_like(im_p2p_gray,dtype=np.uint8)
+ im_clone_mask[im_p2p_gray > 1] = 255
+
+
+ # apply smoothed copy+paste clone
+ im_blur_mask = np.zeros(im_np_256.shape[:2],dtype=np.float64)
+ im_blur_mask[im_p2p_gray > 1] = 1.0
+ im_blur_mask = np.array([im_blur_mask,im_blur_mask,im_blur_mask]).transpose((1,2,0))
+
+ # erode mask to remove black border
+ kernel = np.ones((3,3),np.uint8)
+ im_blur_mask = cv.erode(im_blur_mask,kernel,iterations = 3)
+
+ # feather mask
+ feather_amt = (3,3)
+ im_blur_mask = (cv.GaussianBlur(im_blur_mask,feather_amt, 0) > 0) * 1.0 #?
+ im_blur_mask = cv.GaussianBlur(im_blur_mask,feather_amt, 0)
+ im_blur_mask = np.clip(im_blur_mask,0.0,1.0)
+
+ # mask p2p fg --> photo bg
+ im_dull = im_np_256.astype(np.float64) * (1.0 - im_blur_mask) + im_p2p.astype(np.float64) * im_blur_mask
+ im_dull = im_dull.astype(np.uint8)
+
+
+ else:
+ print('No person. Apply background P2P')
+ celery_logger.debug('No person. Apply background P2P, uuid: {}'.format(uuid_name))
+ im_bg_blur = cv.GaussianBlur(im_np_256,(31,31),0)
+ im_bg_rag = imx.create_rag_mean(im_bg_blur,compactness=30,n_segments=64)
+
+ # apply gan
+ im_dull = p2p_objects.create_p2p(im_bg_rag)
+
+ # resize back to full 512px
+ im_dull_512 = imutils.resize(im_dull,width=512)
+
+ # save dulldream image
+ fpath = os.path.join(render_dir, uuid_name + '_dull.jpg')
+ imx.save_np_as_pil(fpath,im_dull_512,quality=95)
+ files.append({
+ 'title': 'Your DullDream',
+ 'fn': render_uri + uuid_name + '_dull.jpg'
+ })
+
+
+ # -----------------------------------------------
+ # Write data to disk
+ # -----------------------------------------------
+
+ data = {
+ 'uuid': uuid_name,
+ 'date': str(datetime.datetime.now()),
+ 'files': files
+ }
+
+ json_path = os.path.join(json_dir, uuid_name + '.json')
+ with open(json_path, 'w') as json_file:
+ json.dump(data, json_file)
+
+ return {
+ 'percent': 100,
+ 'state': 'complete',
+ 'uuid': uuid_name
+ }
+
+
+
+
+@celery.task(bind=True)
+def blur_task(self, uuid_name, agree, extra):
+ """Process image and update during"""
+ celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name))
+
+ upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree)
+
+ files = []
+
+ im = Image.open(os.path.join(upload_dir, uuid_name + '.jpg')).convert('RGB')
+ im = im.resize((256,256))
+ files.append({
+ 'title': 'Original image',
+ 'fn': upload_uri + uuid_name + '.jpg'
+ })
+
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.25,
+ 'message': 'Applying blur',
+ 'uuid': uuid_name
+ })
+
+ im_np = utils.ensure_np(im)
+ im_blur = cv.blur(im_np, (5,5), 1.0)
+ im_blur_pil = utils.ensure_pil(im_blur)
+
+ fn = uuid_name + '_blur.jpg'
+ fpath = os.path.join(render_dir, fn)
+ im_blur_pil.save(fpath, 'JPEG', quality=95)
+
+ files.append({
+ 'title': 'Blurred image',
+ 'fn': render_uri + uuid_name + '_blur.jpg'
+ })
+
+ time.sleep(3)
+
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.50,
+ 'message': 'Sleeping for some reason',
+ 'uuid': uuid_name
+ })
+ time.sleep(2)
+
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': 0.75,
+ 'message': 'Sleeping some more',
+ 'uuid': uuid_name
+ })
+ time.sleep(2)
+
+ data = {
+ 'uuid': uuid_name,
+ 'date': str(datetime.datetime.now()),
+ 'files': files
+ }
+
+ json_path = os.path.join(json_dir, uuid_name + '.json')
+ with open(json_path, 'w') as json_file:
+ json.dump(data, json_file)
+
+ celery_logger.debug('ok')
+
+ return {
+ 'percent': 100,
+ 'state': 'complete',
+ 'uuid': uuid_name,
+ }
+
+@celery.task(bind=True)
+def sleep_task(self, uuid_name):
+ celery_logger.debug('sleep_task'.format(uuid_name))
+ msgs = [
+ {'msg':'Uploaded OK','time':.1},
+ {'msg':'Segmenting Image...','time':2},
+ {'msg':'Found: Person, Horse','time':1},
+ {'msg':'Creating Pix2Pix','time':2}
+ ]
+ for i,m in enumerate(msgs):
+ percent = int(float(i)/float(len(msgs))*100.0)
+ self.update_state(
+ state = 'PROCESSING',
+ meta = {
+ 'percent': percent,
+ 'message': m['msg'],
+ 'uuid': uuid_name
+ })
+ celery_logger.debug(m['msg'])
+ time.sleep(m['time'])
+
+ return {
+ 'percent': 100,
+ 'state': 'complete',
+ 'uuid': uuid_name
+ }
+
+def make_task_json():
+ dropdown = {}
+ for k,v in task_lookup.items():
+ if 'active' not in v or v['active'] is not False:
+ is_default = 'default' in v and v['default'] is True
+ task = {
+ 'name': k,
+ 'title': v['title'],
+ 'selected': is_default,
+ }
+ dropdown[k] = task
+ return json.dumps(dropdown)
+
+# Add all valid tasks to this lookup.
+# Set 'active': False to disable a task
+# Set 'default': True to define the default task
+
+task_lookup = {
+ 'sleep': {
+ 'title': 'Sleep Test',
+ 'task': sleep_task,
+ 'active': False
+ },
+ 'blur': {
+ 'title': 'Blur',
+ 'task': blur_task,
+ 'active': False
+ },
+ 'task_dull': {
+ 'title': 'DullDream V2',
+ 'task': task_dull,
+ 'active': True,
+ 'default': True
+ }
+}
+
diff --git a/server/app/main/utils.py b/server/app/main/utils.py
new file mode 100644
index 00000000..510e5c23
--- /dev/null
+++ b/server/app/main/utils.py
@@ -0,0 +1,37 @@
+from flask import current_app as app
+from PIL import Image
+import numpy as np
+import cv2 as cv
+import os
+from os.path import join
+
+def ensure_pil(im):
+ try:
+ im.verify()
+ return im
+ except:
+ return Image.fromarray(im.astype('uint8'), 'RGB')
+
+def ensure_np(im):
+ if type(im) == np.ndarray:
+ return im
+ return np.asarray(im, np.uint8)
+
+def get_recent_uploads(limit=10):
+ d_uploads = app.config['UPLOADS']
+ d_renders = app.config['RENDERS']
+
+ # list all files in uploads dir
+ filenames = [s for s in os.listdir(d_uploads)
+ if os.path.isfile(os.path.join(d_uploads, s))]
+ # sort upload files by date
+ filenames.sort(key=lambda s: os.path.getmtime(os.path.join(d_uploads, s)),reverse=True)
+ basenames = [os.path.splitext(os.path.basename(f))[0] for f in filenames]
+ basenames = basenames[:limit]
+ filenames = [f for f in basenames if os.path.isfile(join(d_renders,'{}_dull.jpg'.format(f)))]
+
+ # create list for uploads and renders
+ uploads = [join('/img/uploads',f) for f in filenames]
+ renders = [join('/img/renders','{}_dull'.format(f)) for f in filenames]
+ urls = [join('/d',f) for f in basenames]
+ return uploads, renders, urls
diff --git a/server/app/main/views.py b/server/app/main/views.py
new file mode 100644
index 00000000..11a8ca53
--- /dev/null
+++ b/server/app/main/views.py
@@ -0,0 +1,300 @@
+import os
+import uuid
+import json
+from flask import render_template, redirect, url_for, send_from_directory
+from flask import request, make_response, jsonify
+from . import main, utils
+from .tasks import task_lookup, make_task_json
+from PIL import Image, ImageOps
+import cv2 as cv
+
+from .paths import get_paths
+
+from flask import current_app as app
+from werkzeug.utils import secure_filename
+import imutils
+
+# ------------------------------------------------------------
+# Temp: run mask rcnn outside celery
+# ------------------------------------------------------------
+
+# init image processors
+import sys
+from .img_proc_config import ImgProcConfig
+sys.path.append('/dulldream/src/')
+from image_processors.mask_rcnn import MaskRCNN
+from utils import imx
+from utils import fiox
+
+img_proc_congif = ImgProcConfig()
+mask_rcnn = MaskRCNN(img_proc_congif.mask_rcnn_class_config,
+ model_path=img_proc_congif.mask_rcnn_model)
+
+# ------------------------------------------------------------
+# Tasks
+# ------------------------------------------------------------
+
+@main.route('/status/<task_name>/<task_id>')
+def task_status(task_name, task_id):
+ """Return celery image processing status"""
+ if task_name in task_lookup:
+ task = task_lookup[task_name]['task'].AsyncResult(task_id)
+ else:
+ return jsonify({
+ 'state': 'error',
+ 'percent': 100,
+ 'message': 'Unknown task'
+ })
+
+ app.logger.info('task state: {}'.format(task.state))
+ if task.state == 'PENDING':
+ response = {
+ 'state': task.state,
+ 'percent': 0,
+ 'message': 'Pending...'
+ }
+ elif task.state != 'FAILURE':
+ response = {
+ 'state': task.state,
+ 'percent': task.info.get('percent', 0),
+ 'uuid': task.info.get('uuid', 0),
+ 'message': task.info.get('message', '')
+ }
+ if 'result' in task.info:
+ response['result'] = task.info['result']
+ else:
+ # something went wrong in the background job
+ response = {
+ 'state': task.state,
+ 'percent': 100,
+ 'message': str(task.info), # this is the exception raised
+ }
+ return jsonify(response)
+
+# ------------------------------------------------------------
+# POST Routes
+# ------------------------------------------------------------
+
+@main.route('/upload/sleep', methods=['GET', 'POST'])
+def sleep_test():
+ async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test'])
+ task_url = url_for('main.task_status', task_name='sleep', task_id=async_task.id)
+ return jsonify({
+ 'result': True,
+ 'task_url': task_url,
+ })
+
+@main.route('/upload', methods=['POST'])
+def upload():
+
+ style = request.form['style']
+ print('style',style)
+ if style in task_lookup:
+ task = task_lookup[style]['task']
+ print('task',task)
+ else:
+ return jsonify({
+ 'result': False,
+ 'error': 'Unknown task',
+ })
+
+ file = request.files['user_image']
+ agree = bool(request.form['agree'])
+ ext = request.form['ext']
+ if ext is None:
+ ext = request.files['ext']
+
+ uuid_name = str(uuid.uuid4())
+
+ app.logger.info('[+] style: {}'.format(style))
+ app.logger.info('[+] ext: {}'.format(ext))
+ app.logger.info('[+] uuid_name: {}'.format(uuid_name))
+ app.logger.info('[+] agreed: {}'.format(agree))
+
+ # convert PNG to JPG
+ print('[+] Resizing image')
+
+ # LOL MaskRCNN needs to be run outside of the Celery Task
+ im = Image.open(file.stream).convert('RGB')
+ im = ImageOps.fit(im,(512,512))
+ if agree:
+ upload_folder = app.config['UPLOADS']
+ else:
+ upload_folder = app.config['UPLOADS_PRIVATE']
+
+ fpath = os.path.join(upload_folder, uuid_name + '.jpg')
+
+ # Save image to disk
+ print('[+] Save image to {}'.format(fpath))
+ im.save(fpath, 'JPEG', quality=100)
+ im_pil_256 = im.resize((256,256))
+
+ print('[+] ensure_np...')
+ im_np = imx.ensure_np(im_pil_256)
+ #print('[+] resize np...')
+ #im_np = imutils.resize(im_np,width=256)
+
+ upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(agree)
+
+ print('[+] Run mrcnn...')
+ try:
+ result = mask_rcnn.create_segmentations(im_np,concat=True)
+ except:
+ print('[-] Error. Could not run mask_rcnn')
+ result = []
+
+ if len(result) > 0:
+ result = result[0]
+
+ # save data, then pass to celery task
+ print('[+] Save masks')
+ seg_mask = result['seg_mask']
+ fpath_seg_mask = os.path.join(render_dir, uuid_name + '_seg_mask.jpg')
+ #cv.imwrite(fpath_seg_mask,cv.cvtColor(seg_mask,cv.COLOR_BGR2RGB))
+ #seg_mask = seg_mask[:,:,::-1]
+ seg_mask_pil = imx.ensure_pil(seg_mask)
+ seg_mask_pil.save(fpath_seg_mask, 'JPEG', quality=100)
+
+ im_mask = result['im_mask']
+ fpath_im_mask = os.path.join(render_dir, uuid_name + '_im_mask.jpg')
+ #im_mask = im_mask[:,:,::-1]
+ im_mask_pil = imx.ensure_pil(im_mask)
+ im_mask_pil.save(fpath_im_mask, 'JPEG',quality=100)
+ #cv.imwrite(fpath_im_mask,cv.cvtColor(im_mask,cv.COLOR_BGR2RGB))
+
+ celery_result = {
+ 'score':str(result['score']),
+ 'name':str(result['name']),
+ 'class_index':str(result['class_index']),
+ 'color':str(result['color']),
+ 'fp_im_mask':fpath_im_mask,
+ 'fp_seg_mask':fpath_seg_mask,
+ 'valid':True
+ }
+ else:
+ print('[-] no reults. process background only')
+ celery_result = {
+ 'score':None,
+ 'name':None,
+ 'class_index':None,
+ 'color':None,
+ 'fp_im_mask':None,
+ 'fp_seg_mask':None,
+ 'valid':False
+ }
+
+ print('[+] Start celery')
+ async_task = task.apply_async(args=[uuid_name, agree, celery_result])
+ task_url = url_for('main.task_status', task_name=style, task_id=async_task.id)
+
+ return jsonify({
+ 'result': True,
+ 'task_url': task_url,
+ 'uuid': uuid_name
+ })
+
+
+
+# ----------------------------------------------------
+# Fileserver, temp solution
+# ----------------------------------------------------
+
+@main.route('/img/<string:imtype>/<string:uuid_name>')
+def get_image(imtype,uuid_name):
+ """Return image files from render or uploads"""
+ if imtype == 'uploads':
+ d = app.config['UPLOADS']
+ suffix = ''
+ elif imtype == 'renders':
+ d = app.config['RENDERS']
+ suffix = ''
+ elif imtype == 'fcn':
+ d = app.config['RENDERS']
+ suffix = '_fcn8'
+
+ fname = uuid_name + suffix + '.jpg'
+ fpath = os.path.join(d, fname)
+
+ if os.path.isfile(fpath):
+ return send_from_directory(d,fname)
+ else:
+ return send_from_directory('static', 'img/404.jpg')
+
+# ----------------------------------------------------
+# Deleting images
+# ----------------------------------------------------
+
+def destroy_data(uuid_name, is_public):
+ uri_base = app.config['URI_BASE']
+ upload_dir, render_dir, json_dir, upload_uri, render_uri = get_paths(is_public)
+
+ json_path = os.path.join(json_dir, uuid_name + '.json')
+ with open(json_path) as json_file:
+ data = json.load(json_file)
+ for f in data['files']:
+ path = os.path.join(uri_base, f['fn'][1:])
+ if os.path.exists(path):
+ os.remove(path)
+ os.remove(json_path)
+
+@main.route('/d/<uuid_name>/destroy', strict_slashes=False) # public
+def route_public_destroy(uuid_name):
+ destroy_data(uuid_name, True)
+ return redirect("/", code=302)
+
+@main.route('/p/<uuid_name>/destroy', strict_slashes=False) # private
+def route_private_destroy(uuid_name):
+ destroy_data(uuid_name, False)
+ return redirect("/", code=302)
+
+# ----------------------------------------------------
+# Static routes
+# ----------------------------------------------------
+
+# Most of the pages are served with the single page app in index.html:
+
+task_json = make_task_json()
+
+@main.route('/', strict_slashes=False)
+def index():
+ return render_template('index.html', task_json=task_json)
+
+@main.route('/about', strict_slashes=False)
+def about():
+ return render_template('index.html', task_json=task_json)
+
+@main.route('/d/<uuid_name>', strict_slashes=False) # public
+def route_public(uuid_name):
+ return render_template('index.html', task_json=task_json)
+
+@main.route('/p/<uuid_name>', strict_slashes=False) # private
+def route_private(uuid_name):
+ return render_template('index.html', task_json=task_json)
+
+@main.route('/privacy', strict_slashes=False)
+def privacy():
+ return render_template('index.html', task_json=task_json)
+
+# Some of the pages have their own static file:
+
+@main.route('/gallery', strict_slashes = False)
+def gallery():
+ app.logger.info('access gallery')
+ uploads, renders, urls = utils.get_recent_uploads(limit=50)
+ uuids = [os.path.splitext(os.path.basename(f))[0] for f in uploads]
+ images = [{'upload':u,'render':r, 'url':url} for u,r,url in zip(uploads,renders,urls)]
+ return render_template('gallery.html',images=images)
+
+@main.route('/zkm', strict_slashes=False)
+def zkm():
+ app.logger.info('access ZkM')
+ return render_template('zkm.html')
+
+@main.route('/celery', strict_slashes=False)
+def celery_route():
+ return render_template('celery.html')
+
+@main.route('/projector', strict_slashes=False)
+def projector():
+ uploads, renders,urls = utils.get_recent_uploads()
+ return render_template('projector.html', uploads=uploads, renders=renders)
diff --git a/server/app/static/css/bootstrap.min.css b/server/app/static/css/bootstrap.min.css
new file mode 100644
index 00000000..ed3905e0
--- /dev/null
+++ b/server/app/static/css/bootstrap.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}
+/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file
diff --git a/server/app/static/css/dullbrown-theme.css b/server/app/static/css/dullbrown-theme.css
new file mode 100644
index 00000000..98aff038
--- /dev/null
+++ b/server/app/static/css/dullbrown-theme.css
@@ -0,0 +1,502 @@
+* { box-sizing: border-box; }
+html {
+ margin: 0; padding: 0;
+ width: 100%; height: 100%;
+}
+body {
+ margin: 0; padding: 0;
+ width: 100%; height: 100%;
+ font-family: Helvetica, sans-serif;
+}
+body, .modal, #footer {
+ /* Permalink - use to edit and share this gradient: http://colorzilla.com/gradient-editor/#a5ce3e+0,ffffff+50,a5ce3e+100 */
+ background: #7B7568; /* Old browsers */
+ background: -moz-linear-gradient(left, #7B7568 0%, #ffffff 50%, #7B7568 100%); /* FF3.6-15 */
+ background: -webkit-linear-gradient(left, #7B7568 0%,#ffffff 50%,#7B7568 100%); /* Chrome10-25,Safari5.1-6 */
+ background: linear-gradient(to right, #7B7568 0%,#ffffff 50%,#7B7568 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7B7568', endColorstr='#a5ce3e',GradientType=1 ); /* IE6-9 */
+}
+
+/* ------------------------------------------------ */
+/* navbar */
+.navbar-default a.navbar-brand{
+}
+.navbar-default{
+ background: transparent;
+}
+.navbar{
+ margin-bottom: 0;
+ border:0;
+}
+
+.navbar-default .navbar-brand a{
+ color:#ccc;
+}
+.navbar-default a.navbar-brand{
+ color:#ccc;
+}
+.navbar-default a.navbar-brand:hover{
+ color:#fff;
+}
+
+/* Hamburger */
+.navbar-default .navbar-toggle{
+ color:#ccc;
+}
+.navbar-default .navbar-toggle .icon-bar{
+ color:#ccc;
+}
+.navbar-default .navbar-toggle .icon-bar:hover{
+ color:#fff;
+}
+.navbar-default .navbar-toggle:focus, .navbar-default .navbar-toggle:hover{
+ color:#fff;
+ background: transparent;
+}
+.navbar{
+ border-radius: 0px;
+ min-height:30px;
+}
+.navbar-default .navbar-text{
+ color:#ccc;
+}
+.navbar-default .navbar-nav>li>a{
+ color:#ccc;
+}
+.navbar-default .navbar-nav>li>a:hover{
+ color:#fff;
+}
+.navbar-default .navbar-toggle .icon-bar{
+ background-color:#ccc;
+}
+.navbar-default .navbar-toggle:hover .icon-bar{
+ background-color: #eee;
+}
+.navbar-default .navbar-toggle:hover {
+ border-color: #fff;
+}
+.navbar-default .navbar-collapse, .navbar-default .navbar-form{
+ border:0;
+}
+
+/* ------------------------------------------------ */
+/* Jumbotron */
+.jumbotron {
+ padding-top: 0px;
+ padding-bottom: 0px;
+ margin-bottom: 0px;
+ color: inherit;
+}
+.jumbotron{
+ background: transparent;
+ color:black;
+}
+.jumbotron h1{
+ color:#ddd;
+ margin-bottom:0px;
+}
+.jumbotron a.btn-primary{
+ background:#ddd;
+ color:#333;
+}
+.jumbotron a.btn-primary:hover{
+ background:#eee;
+ color:#222;
+}
+.jumbotron p > a.jcallout{
+ color:#eee;
+ padding-bottom: 3px;
+ border-bottom:1px dotted;
+ text-decoration: none;
+}
+.jumbotron p > a.jcallout:hover{
+ color:#fff;
+ border-bottom:1px solid #ccc;
+ text-decoration: none;
+}
+.jumbotron a.btn-default{
+ color:#eee;
+ border:1px solid #eee;
+ background: transparent;
+}
+.jumbotron a.btn-default:hover{
+ background: #22f;
+ border:1px solid #ccc;
+}
+.jumbotron a.btn-default:active{
+ color:#eee;
+ border:1px solid #ccc;
+}
+
+/* Input button override
+-------------------------------------------------- */
+/*input[type="file"] {
+ display: none;
+}
+input[type="button"] {
+ display: none;
+}*/
+input.hidden_input{
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ opacity: 0;
+}
+
+/* Global styles
+-------------------------------------------------- */
+h1,h2,h3,h4,h5,h6 {
+ font-weight: bold;
+ font-style: italic;
+ text-align: center;
+}
+h1 {
+ font-size:56px;
+ margin-top: 20px;
+ margin-bottom: 0;
+}
+h2 {
+ font-size:20px;
+ margin-top: 0;
+ margin-bottom: 24px;
+}
+ul {
+ list-style: none;
+ margin:0;
+ padding:0;
+}
+li {
+ display: inline-block;
+}
+img.img_responsive_dull {
+ max-width: 100%;
+ height: auto;
+}
+#photo_area{
+ width: 512px;
+ height: 512px;
+ max-width: 97vw;
+ max-height: 97vw;
+ min-width: 240px;
+ min-height: 240px;
+ margin:0 auto;
+ text-align: center;
+}
+.dash_border{
+ background-color: rgba(255,255,255,.2);
+ border:1px dashed #000;
+}
+
+label {
+ display: block;
+}
+
+div.center_inner {
+ position: relative;
+ top: 50%;
+ -webkit-transform: translateY(-50%);
+ -ms-transform: translateY(-50%);
+ transform: translateY(-50%);
+}
+#upload_controls{
+ margin-top:25px;
+ display: none;
+}
+#restart_btn, #rotate_btn, #upload_btn, #dropdown_btn {
+ display: inline-block;
+ margin-left:5px;
+ margin-right:5px;
+}
+.custom-file-upload {
+ display: inline-block;
+ padding: 6px 12px;
+ cursor: pointer;
+}
+
+.align_center{
+ text-align: center;
+}
+
+ul.action-buttons{
+ margin-top: 40px;
+ list-style: none;
+ margin-left: 0;
+ padding-left:0;
+}
+li {
+ list-style: none;
+ margin-left: 0;
+ padding-left:0;
+ /*margin-bottom:20px;*/
+}
+
+.btn {
+ display: inline-block;
+ color: #333;
+ background-color: #fff;
+ border: 1px solid #adadad;
+ font-family: Helvetica, sans-serif;
+ padding: 6px 12px;
+ margin: 0;
+ font-size: 14px;
+ font-weight: 400;
+ line-height: 1.42857143;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: middle;
+ -ms-touch-action: manipulation;
+ touch-action: manipulation;
+ cursor: pointer;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ border-radius: 4px;
+ text-decoration: none;
+ transition: all 0.15s;
+}
+.desktop .btn:hover {
+ background-color: #fff;
+}
+.btn.btn-lg {
+ padding: 10px 16px;
+ font-size: 18px;
+ line-height: 1.3333333;
+ border-radius: 6px;
+}
+.btn.btn-sm {
+ padding: 5px 10px;
+ font-size: 12px;
+ line-height: 1.5;
+ border-radius: 3px;
+}
+.btn.btn-important {
+ border-width: 2px;
+ border-color: #444;
+}
+#photo_area {
+ position: relative;
+ cursor: pointer;
+}
+#restart_btn {
+ position: relative;
+ cursor: pointer;
+}
+input[type=file] {
+ cursor: pointer;
+ opacity: 0;
+ position: absolute;
+ top: 0; left: 0;
+ width: 100%; height: 100%;
+}
+.desktop #photo_area .btn {
+ background: #fff;
+}
+.desktop #photo_area .btn:hover {
+ background: #eee;
+}
+.consent_box {
+ margin-top: 10px;
+ font-size: smaller;
+ color: #444;
+}
+
+/* Intro page
+-------------------------------------------------- */
+canvas {
+ display: block;
+}
+.photo {
+ display: none;
+ width:100%;
+ height:100%;
+}
+
+/* form visibility */
+
+#preloader_anim {
+ display: none;
+}
+#about_btn {
+ margin: 20px 0px 80px 0;
+}
+#share_btns {
+ display: none;
+ margin:20px;
+}
+.notice {
+ color: #444;
+ font-size: small;
+}
+a.btn-default {
+ background-color: transparent;
+}
+a.btn-default:hover {
+ color: #333;
+ background-color: #fff;
+ border-color: #adadad;
+}
+.debug-view {
+ margin-bottom: 20px;
+ font-size:14px;
+ color:#333;
+}
+.debug-view img {
+ margin-bottom:4px;
+}
+#full_results, #hide_more {
+ display: none;
+}
+
+select {
+ display: inline-block;
+ height: 34px;
+ padding: 6px 12px;
+ font-size: 14px;
+ line-height: 1.428571429;
+ color: #555;
+ vertical-align: middle;
+ background-color: #eee;
+ background-image: none;
+ border: 1px solid #bbb;
+ border-radius: 4px;
+ transition: all .15s;
+ cursor: pointer;
+}
+.desktop select:hover {
+ background-color: #fff;
+ cursor: pointer;
+}
+
+/* About
+--------------------------------------------------- */
+.modal {
+ pointer-events: none;
+ opacity: 0;
+ width: 100%;
+ height: 100%;
+ position: fixed;
+ top: 0; left: 0;
+ transition: all 0.2s;
+}
+.modal.visible {
+ pointer-events: auto;
+ opacity: 1;
+}
+.modal p {
+ font-size: 16px;
+ line-height: 24px;
+}
+.modal .inner {
+ margin: 10vh auto;
+ background: rgba(255,255,255,0.5);
+ padding: 20px 20px 40px 20px;
+ width: 600px;
+ max-width: 90vw;
+}
+.modal .content {
+ margin-bottom: 20px;
+}
+
+/* Result
+--------------------------------------------------- */
+.result_view {
+ display: none;
+ text-align: center;
+}
+.final_result img {
+ width: 512px;
+ height: 512px;
+ border: 1px dashed #000;
+ margin: 10px;
+}
+.all_results {
+ display: none;
+}
+.all_results div {
+ margin-bottom: 20px;
+}
+.all_results img {
+ width: 384px;
+ height: 384px;
+ margin: 10px;
+}
+
+.made_with {
+ margin-bottom: 10px;
+}
+#delete_btns {
+ margin-top: 10px;
+ font-size: 10px;
+}
+a#destroy_data {
+ color: #888;
+}
+.desktop a#destroy_data:hover {
+ color: #f00;
+}
+
+/* Footer
+--------------------------------------------------- */
+#footer{
+ /*background: #ddd;*/
+ /*padding: 20px 0;*/
+ /*position: fixed;*/
+ bottom: 0;
+ width: 100%;
+ text-align: center;
+ padding-top: 40px;
+ padding-bottom: 20px;
+}
+#footer ul li a {
+ font-weight: bold;
+ text-decoration: none;
+}
+#footer a {
+ color:#333;
+ text-decoration: underline;
+}
+#footer a:hover {
+ color:#111;
+}
+
+@media screen and (max-width: 500px) {
+ .modal {
+ width: 100vw;
+ height: 100vh;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
+ .modal .inner {
+ margin: 0;
+ width: 100vw;
+ height: 100vh;
+ max-width: 100vw;
+ }
+}
+
+
+/* Gallery */
+
+.gallery-preview-row{
+ margin-bottom: 40px
+}
+.gallery-preview-row img{
+ width:100%;
+ padding:2px;
+}
+
+@media (max-width: 600px) {
+ #preloader_anim {
+ position: relative;
+ top: -50px;
+ }
+}
+
+/*hide dropdown*/
+#dropdown_btn{
+ display: none;
+} \ No newline at end of file
diff --git a/server/app/static/css/projector.css b/server/app/static/css/projector.css
new file mode 100644
index 00000000..401f0dff
--- /dev/null
+++ b/server/app/static/css/projector.css
@@ -0,0 +1,52 @@
+html, body, #wrapper {
+ height:100%;
+ width: 100%;
+ margin: 0;
+ padding: 0;
+ border: 0;
+ background-color: #000;
+}
+table{
+ padding:0;
+ margin:0;
+ border-spacing: 0;
+}
+table td{
+ padding:0;
+ margin:0;
+}
+#wrapper td {
+ vertical-align: middle;
+ text-align: center;
+}
+#wrapper img{
+ margin-top:-350px;
+ margin-left:-350px;
+}
+.left{
+ background-color: #000;
+}
+.right{
+ background-color: #000;;
+}
+
+#container{
+}
+#container-left{
+ width:50%;
+ float:left;
+ position: relative;
+}
+#container-right{
+ width:50%;
+ float:right;
+ position: relative;
+}
+
+.cycle{position:relative;display: none;}
+.cycle img{position:absolute;z-index:1}
+.cycle img.active{z-index:100}
+.cycle img{
+ width:700px;
+ height:700px;
+} \ No newline at end of file
diff --git a/server/app/static/js/app.js b/server/app/static/js/app.js
new file mode 100644
index 00000000..454d5c37
--- /dev/null
+++ b/server/app/static/js/app.js
@@ -0,0 +1,158 @@
+var app = (function(){
+
+ var app = {}
+
+ app.init = function(){
+ upload.init()
+ app.bind()
+ app.build()
+ app.resize()
+ app.route()
+ }
+ app.bind = function(){
+ $(window).on('resize', app.resize)
+ $(".about_button").on('click', app.about_show)
+ $(".privacy_button").on('click', app.privacy_show)
+ $(".modal").on('click', app.modal_hide)
+ $(".modal .btn").on('click', app.modal_hide)
+ $(".modal .inner").on('click', preventDefault)
+ $("#destroy_data").on('click', app.destroyData)
+ $("#show_all_results").on('click', app.showAllResults)
+ }
+ app.build = function(){
+ var items = JSON.parse(decodeEntities($("#dropdown_options").html()))
+ var $dropdown = $("#dropdown")
+ var options = Object.keys(items).sort().map(key => {
+ var item = items[key]
+ var option = document.createElement('option')
+ option.value = item.name
+ option.innerHTML = item.title
+ if (item.selected) option.selected = true
+ $dropdown.append(option)
+ })
+ var loader = new Image ()
+ loader.src = '/static/img/loader.gif'
+ }
+ app.resize = function(){
+ var $el = $('#photo_area')
+ var w = $el.width()
+ $el.height($el.width())
+ }
+ app.route = function(){
+ const path = window.location.pathname.split('/')
+ path.shift()
+ switch (path[0]) {
+ case 'd':
+ app.processingComplete(path[1], true) // public
+ break
+ case 'p':
+ app.processingComplete(path[1], false) // private
+ break
+ case 'about':
+ app.about_show()
+ break
+ case 'privacy':
+ app.privacy_show()
+ break
+ default:
+ // load index, default state
+ break
+ }
+ }
+
+ /* upload UI changes */
+
+ app.didPickPhoto = function(){
+ $('#upload_controls').fadeIn()
+ $('#user_photo_canvas').show()
+ $('#take_photo_btn').hide()
+ }
+ app.didClickUpload = function(){
+ $('#upload_controls').slideUp('fast')
+ $('#user_photo_canvas').hide()
+ $('#preloader_anim').fadeIn('fast')
+ $('#progress').fadeIn()
+ }
+ app.uploadDidComplete = function(){
+ $('#preloader_anim').hide()
+ $('#progress').hide()
+ }
+ app.uploadDidComplete = function(){
+ // $('#preloader_anim').hide()
+ // $('#progress').hide()
+ }
+ app.updateProgress = function(message, percentage){
+ message = message || "Processing..."
+ percentage = percentage || 0
+ $("#progress").html(message)
+ }
+ app.processingComplete = function(uuid, is_public){
+ $('#preloader_anim').hide()
+ $('#progress').hide()
+ //
+ $("header h2").html("Your dull result")
+ $(".upload_view").hide()
+ $(".results_view").show()
+ var endpoint = is_public ? 'json' : 'json_private'
+ $.getJSON('/static/media/' + endpoint + '/' + uuid + '.json', function(data){
+ console.log(data)
+ var template = $("#result_template").html()
+ var final_result = new Image
+ final_result.src = data.files[data.files.length-1].fn
+ $(".final_result").empty()
+ $(".all_results").empty()
+ $(".final_result").append(final_result)
+ data.files.forEach(function(file){
+ var t = template.replace(/{img}/, file.fn).replace(/{title}/, file.title)
+ $(".all_results").append(t)
+ })
+ $(".result_view").show()
+ $(".permalink").attr('href', window.location.href)
+ }).fail(function(){
+ console.log('error fetching json')
+ window.location.href = '/'
+ })
+ }
+ var detailed = false
+ app.showAllResults = function(){
+ if (!detailed) {
+ detailed = true
+ $(this).html('Hide')
+ $(".all_results").fadeIn('fast')
+ } else {
+ detailed = false
+ $(this).html('Detailed Analysis')
+ $(".all_results").slideUp('fast')
+ }
+ }
+ app.destroyData = function(){
+ var uuid = window.location.pathname.split('/')[2]
+ var confirmed = confirm("Do you really want to delete your dull dream?")
+ if (confirmed) {
+ $.get( [window.location.pathname, 'destroy'].join('/').replace('//', '/') ).always(function(){
+ alert('Dull dream deleted!')
+ window.location.href = '/'
+ })
+ }
+ }
+
+ /* modals */
+
+ app.about_show = function(e){
+ e.preventDefault()
+ $(".about_view").addClass('visible')
+ }
+ app.privacy_show = function(e){
+ e.preventDefault()
+ $(".privacy_view").addClass('visible')
+ }
+ app.modal_hide = function(e){
+ e.preventDefault()
+ e.stopPropagation()
+ $(".modal").removeClass('visible')
+ }
+
+ document.addEventListener('DOMContentLoaded', app.init)
+
+ return app
+})() \ No newline at end of file
diff --git a/server/app/static/js/upload.js b/server/app/static/js/upload.js
new file mode 100644
index 00000000..27437e43
--- /dev/null
+++ b/server/app/static/js/upload.js
@@ -0,0 +1,319 @@
+var messages = {
+ is_processing: "Running semantic segmentation...",
+ upload_failed: "Error attempting to upload the file.",
+ upload_cancelled: "Upload cancelled or browser dropped connection.",
+ unable_to_compute: "We're sorry! We were unable to compute your image.",
+ pending: "Sending to Generative Adversarial Network...",
+ complete: "Processing complete!",
+}
+
+var upload = (function(){
+ var upload = {}
+ var uploading = false
+
+ var MAX_SIDE = 512
+
+ upload.init = function(){
+ upload.bind()
+ }
+
+ upload.bind = function(){
+ $("input[type=file]").on('change', upload.change)
+ $("#upload_btn").on('click', upload.go)
+ document.body.addEventListener("dragover", upload.dragover)
+ document.body.addEventListener("dragleave", upload.dragover)
+ document.body.addEventListener("drop", upload.change)
+ }
+
+ upload.dragover = function(e){
+ e.stopPropagation()
+ e.preventDefault()
+ }
+
+ upload.change = function(e){
+ e.preventDefault()
+ var files = e.dataTransfer ? e.dataTransfer.files : e.target.files
+ if (files.length) {
+ var file = files[files.length - 1]
+ if (!file.type.match('image.*'))
+ return
+ var reader = new FileReader()
+ reader.onload = onReaderLoad
+ reader.readAsDataURL(file)
+ }
+ function onReaderLoad(e) {
+ // Don't leak!
+ reader.onload = null
+ var img = new Image
+ img.onload = function(){
+ img.onload = null
+ upload.ready(img)
+ }
+ img.src = e.target.result
+ }
+ }
+
+ upload.ready = function(img){
+ var resized = renderToCanvas(img, { correctOrientation: true })
+ var canvas = document.querySelector('#user_photo_canvas')
+ ctx = canvas.getContext('2d')
+ ctx.fillStyle = 'black'
+ ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE)
+ var x_offset = (MAX_SIDE - resized.width) / 2
+ var y_offset = (MAX_SIDE - resized.height) / 2
+
+ ctx.drawImage(resized, x_offset, y_offset)
+ app.didPickPhoto()
+ }
+
+ upload.go = function(){
+ if (uploading) return
+ uploading = true
+ app.didClickUpload()
+ try {
+ var canvas = document.querySelector('#user_photo_canvas')
+ var cb = canvas.toBlob(function(blob){
+ upload.send(blob)
+ }, 'image/jpeg', 89)
+ } catch(e){
+ app.updateProgress(messages.unable_to_compute)
+ }
+ }
+
+ upload.send = function(blob){
+ console.log("sending upload...")
+ var fd = new FormData()
+ fd.append('user_image', blob)
+ fd.append('ext', 'jpg')
+ fd.append('style', $("#dropdown").val())
+ fd.append('agree', $("#agree").val() || 0)
+
+ var xhr = new XMLHttpRequest()
+ xhr.upload.addEventListener("progress", upload.progress, false)
+ xhr.addEventListener("load", upload.complete, false)
+ xhr.addEventListener("error", upload.failed, false)
+ xhr.addEventListener("abort", upload.canceled, false)
+ xhr.open("POST", "/upload")
+ xhr.send(fd)
+ }
+
+ upload.progress = function (e) {
+ if (e.lengthComputable) {
+ var percentComplete = Math.round(e.loaded * 100 / e.total)
+ if (percentComplete > 99) {
+ app.updateProgress(messages.is_processing)
+ } else {
+ app.updateProgress("Uploaded " + percentComplete.toString() + '%')
+ }
+ }
+ else {
+ app.updateProgress(messages.unable_to_compute)
+ }
+ }
+
+ upload.complete = function (e) {
+ uploading = false
+ try {
+ var data = JSON.parse(e.target.responseText)
+ } catch (e) {
+ return app.updateProgress(messages.upload_failed)
+ }
+ app.uploadDidComplete()
+ upload.data = data
+ upload.task_progress(data.task_url)
+ }
+
+ upload.failed = function (evt) {
+ uploading = false
+ app.updateProgress(messages.upload_failed)
+ }
+
+ upload.cancelled = function (evt) {
+ uploading = false
+ app.updateProgress(messages.upload_cancelled)
+ }
+
+ upload.task_progress = function (status_url) {
+ var is_public = $("#agree").val() || 0
+ var uuid = upload.data.uuid
+ $.getJSON(status_url, function(data){
+ console.log(data)
+ var alive = true
+ var delay = 500
+ switch(data.state) {
+ case 'PENDING':
+ app.updateProgress(messages.pending)
+ delay = 2000
+ break
+ case 'PROCESSING':
+ app.updateProgress(data.message, data.percent)
+ delay = 500
+ break
+ case 'SUCCESS':
+ app.updateProgress(messages.complete)
+ if (is_public) {
+ history.pushState({}, 'DullDream', '/d/' + uuid)
+ } else {
+ history.pushState({}, 'DullDream', '/p/' + uuid)
+ }
+ app.processingComplete(uuid, is_public) // truthy if private
+ alive = false
+ break
+ default:
+ // NB: error state
+ alive = false
+ break
+ }
+ if (alive) {
+ setTimeout(function() {
+ upload.task_progress(status_url)
+ }, delay)
+ }
+ })
+ }
+
+
+ function renderToCanvas(img, options) {
+ if (!img) return
+ options = options || {}
+
+ // Canvas max size for any side
+ var maxSize = MAX_SIDE
+ var canvas = document.createElement('canvas')
+ var ctx = canvas.getContext('2d')
+ var initialScale = options.scale || 1
+ // Scale to needed to constrain canvas to max size
+ var scale = getScale(img.width * initialScale, img.height * initialScale, maxSize, maxSize, true)
+ // Still need to apply the user defined scale
+ scale *= initialScale
+ var width = canvas.width = Math.round(img.width * scale)
+ var height = canvas.height = Math.round(img.height * scale)
+ var correctOrientation = options.correctOrientation
+ var jpeg = !!img.src.match(/data:image\/jpeg|\.jpeg$|\.jpg$/i)
+ var hasDataURI = !!img.src.match(/^data:/)
+
+ ctx.save()
+
+ // Can only correct orientation on JPEGs represented as dataURIs
+ // for the time being
+ if (correctOrientation && jpeg && hasDataURI) {
+ applyOrientationCorrection(canvas, ctx, img.src)
+ }
+ // Resize image if too large
+ if (scale !== 1) {
+ ctx.scale(scale, scale)
+ }
+
+ ctx.drawImage(img, 0, 0)
+ ctx.restore()
+
+ return canvas
+ }
+
+ function getScale(width, height, viewportWidth, viewportHeight, fillViewport) {
+ fillViewport = !!fillViewport
+ var landscape = (width / height) > (viewportWidth / viewportHeight)
+ if (landscape) {
+ if (fillViewport) {
+ return fitVertical()
+ } else if (width > viewportWidth) {
+ return fitHorizontal()
+ }
+ } else {
+ if (fillViewport) {
+ return fitHorizontal()
+ } else if (height > viewportHeight) {
+ return fitVertical()
+ }
+ }
+ return 1
+
+ function fitHorizontal() {
+ return viewportWidth / width
+ }
+
+ function fitVertical() {
+ return viewportHeight / height
+ }
+ }
+
+ function applyOrientationCorrection(canvas, ctx, uri) {
+ var orientation = getOrientation(uri)
+ // Only apply transform if there is some non-normal orientation
+ if (orientation && orientation !== 1) {
+ var transform = orientationToTransform[orientation]
+ var rotation = transform.rotation
+ var mirror = transform.mirror
+ var flipAspect = rotation === 90 || rotation === 270
+ if (flipAspect) {
+ // Fancy schmancy swap algo
+ canvas.width = canvas.height + canvas.width
+ canvas.height = canvas.width - canvas.height
+ canvas.width -= canvas.height
+ }
+ if (rotation > 0) {
+ applyRotation(canvas, ctx, rotation)
+ }
+ }
+ }
+
+ function applyRotation(canvas, ctx, deg) {
+ var radians = deg * (Math.PI / 180)
+ if (deg === 90) {
+ ctx.translate(canvas.width, 0)
+ } else if (deg === 180) {
+ ctx.translate(canvas.width, canvas.height)
+ } else if (deg == 270) {
+ ctx.translate(0, canvas.height)
+ }
+ ctx.rotate(radians)
+ }
+
+ function getOrientation (uri) {
+ var exif = new ExifReader
+ // Split off the base64 data
+ var base64String = uri.split(',')[1]
+ // Read off first 128KB, which is all we need to
+ // get the EXIF data
+ var arr = base64ToUint8Array(base64String, 0, Math.pow(2, 17))
+ try {
+ exif.load(arr.buffer)
+ return exif.getTagValue('Orientation')
+ } catch (err) {
+ return 1
+ }
+ }
+
+ function base64ToUint8Array(string, start, finish) {
+ var start = start || 0
+ var finish = finish || string.length
+ // atob that shit
+ var binary = atob(string)
+ var buffer = new Uint8Array(binary.length)
+ for (var i = start; i < finish; i++) {
+ buffer[i] = binary.charCodeAt(i)
+ }
+ return buffer
+ }
+
+ /**
+ * Mapping from EXIF orientation values to data
+ * regarding the rotation and mirroring necessary to
+ * render the canvas correctly
+ * Derived from:
+ * http://www.daveperrett.com/articles/2012/07/28/exif-orientation-handling-is-a-ghetto/
+ */
+ var orientationToTransform = {
+ 1: { rotation: 0, mirror: false },
+ 2: { rotation: 0, mirror: true },
+ 3: { rotation: 180, mirror: false },
+ 4: { rotation: 180, mirror: true },
+ 5: { rotation: 90, mirror: true },
+ 6: { rotation: 90, mirror: false },
+ 7: { rotation: 270, mirror: true },
+ 8: { rotation: 270, mirror: false }
+ }
+
+
+ return upload
+})() \ No newline at end of file
diff --git a/server/app/static/js/util.js b/server/app/static/js/util.js
new file mode 100644
index 00000000..851f634a
--- /dev/null
+++ b/server/app/static/js/util.js
@@ -0,0 +1,32 @@
+var is_iphone = (navigator.userAgent.match(/iPhone/i)) || (navigator.userAgent.match(/iPod/i))
+var is_ipad = (navigator.userAgent.match(/iPad/i))
+var is_android = (navigator.userAgent.match(/Android/i))
+var is_mobile = is_iphone || is_ipad || is_android
+var is_desktop = ! is_mobile;
+
+document.body.parentNode.classList.add(is_desktop ? 'desktop' : 'mobile')
+
+function preventDefault(e){
+ e.preventDefault()
+ e.stopPropagation()
+}
+
+var decodeEntities = (function() {
+ // this prevents any overhead from creating the object each time
+ var element = document.createElement('div');
+
+ function decodeHTMLEntities (str) {
+ if(str && typeof str === 'string') {
+ // strip script/html tags
+ str = str.replace(/<script[^>]*>([\S\s]*?)<\/script>/gmi, '');
+ str = str.replace(/<\/?\w(?:[^"'>]|"[^"]*"|'[^']*')*>/gmi, '');
+ element.innerHTML = str;
+ str = element.textContent;
+ element.textContent = '';
+ }
+
+ return str;
+ }
+
+ return decodeHTMLEntities;
+})(); \ No newline at end of file
diff --git a/server/app/static/js/vendor/ExifReader.js b/server/app/static/js/vendor/ExifReader.js
new file mode 100644
index 00000000..a8343ede
--- /dev/null
+++ b/server/app/static/js/vendor/ExifReader.js
@@ -0,0 +1,1363 @@
+// Generated by CoffeeScript 1.6.2
+/*
+# ExifReader 1.1.1
+# http://github.com/mattiasw/exifreader
+# Copyright (C) 2011-2014 Mattias Wallander <mattias@wallander.eu>
+# Licensed under the GNU Lesser General Public License version 3 or later
+# See license text at http://www.gnu.org/licenses/lgpl.txt
+*/
+
+
+(function() {
+ (typeof exports !== "undefined" && exports !== null ? exports : this).ExifReader = (function() {
+ ExifReader.prototype._MIN_DATA_BUFFER_LENGTH = 2;
+
+ ExifReader.prototype._JPEG_ID_SIZE = 2;
+
+ ExifReader.prototype._JPEG_ID = 0xffd8;
+
+ ExifReader.prototype._APP_MARKER_SIZE = 2;
+
+ ExifReader.prototype._APP0_MARKER = 0xffe0;
+
+ ExifReader.prototype._APP1_MARKER = 0xffe1;
+
+ ExifReader.prototype._APP15_MARKER = 0xffef;
+
+ ExifReader.prototype._APP_ID_OFFSET = 4;
+
+ ExifReader.prototype._BYTES_Exif = 0x45786966;
+
+ ExifReader.prototype._TIFF_HEADER_OFFSET = 10;
+
+ ExifReader.prototype._BYTE_ORDER_BIG_ENDIAN = 0x4949;
+
+ ExifReader.prototype._BYTE_ORDER_LITTLE_ENDIAN = 0x4d4d;
+
+ function ExifReader() {
+ var _this = this;
+
+ this._getTagValueAt = {
+ 1: function(offset) {
+ return _this._getByteAt(offset);
+ },
+ 2: function(offset) {
+ return _this._getAsciiAt(offset);
+ },
+ 3: function(offset) {
+ return _this._getShortAt(offset);
+ },
+ 4: function(offset) {
+ return _this._getLongAt(offset);
+ },
+ 5: function(offset) {
+ return _this._getRationalAt(offset);
+ },
+ 7: function(offset) {
+ return _this._getUndefinedAt(offset);
+ },
+ 9: function(offset) {
+ return _this._getSlongAt(offset);
+ },
+ 10: function(offset) {
+ return _this._getSrationalAt(offset);
+ }
+ };
+ this._tiffHeaderOffset = 0;
+ }
+
+ /*
+ # Loads all the Exif tags from the specified image file buffer.
+ #
+ # data ArrayBuffer Image file data
+ */
+
+
+ ExifReader.prototype.load = function(data) {
+ return this.loadView(new DataView(data));
+ };
+
+ /*
+ # Loads all the Exif tags from the specified image file buffer view. Probably
+ # used when DataView isn't supported by the browser.
+ #
+ # @_dataView DataView Image file data view
+ */
+
+
+ ExifReader.prototype.loadView = function(_dataView) {
+ this._dataView = _dataView;
+ this._tags = {};
+ this._checkImageHeader();
+ this._readTags();
+ return this._dataView = null;
+ };
+
+ ExifReader.prototype._checkImageHeader = function() {
+ if (this._dataView.byteLength < this._MIN_DATA_BUFFER_LENGTH || this._dataView.getUint16(0, false) !== this._JPEG_ID) {
+ throw new Error('Invalid image format');
+ }
+ this._parseAppMarkers(this._dataView);
+ if (!this._hasExifData()) {
+ throw new Error('No Exif data');
+ }
+ };
+
+ ExifReader.prototype._parseAppMarkers = function(dataView) {
+ var appMarkerPosition, fieldLength, _results;
+
+ appMarkerPosition = this._JPEG_ID_SIZE;
+ _results = [];
+ while (true) {
+ if (dataView.byteLength < appMarkerPosition + this._APP_ID_OFFSET + 5) {
+ break;
+ }
+ if (this._isApp1ExifMarker(dataView, appMarkerPosition)) {
+ fieldLength = dataView.getUint16(appMarkerPosition + this._APP_MARKER_SIZE, false);
+ this._tiffHeaderOffset = appMarkerPosition + this._TIFF_HEADER_OFFSET;
+ } else if (this._isAppMarker(dataView, appMarkerPosition)) {
+ fieldLength = dataView.getUint16(appMarkerPosition + this._APP_MARKER_SIZE, false);
+ } else {
+ break;
+ }
+ _results.push(appMarkerPosition += this._APP_MARKER_SIZE + fieldLength);
+ }
+ return _results;
+ };
+
+ ExifReader.prototype._isApp1ExifMarker = function(dataView, appMarkerPosition) {
+ return dataView.getUint16(appMarkerPosition, false) === this._APP1_MARKER && dataView.getUint32(appMarkerPosition + this._APP_ID_OFFSET, false) === this._BYTES_Exif && dataView.getUint8(appMarkerPosition + this._APP_ID_OFFSET + 4, false) === 0x00;
+ };
+
+ ExifReader.prototype._isAppMarker = function(dataView, appMarkerPosition) {
+ var appMarker;
+
+ appMarker = dataView.getUint16(appMarkerPosition, false);
+ return appMarker >= this._APP0_MARKER && appMarker <= this._APP15_MARKER;
+ };
+
+ ExifReader.prototype._hasExifData = function() {
+ return this._tiffHeaderOffset !== 0;
+ };
+
+ ExifReader.prototype._readTags = function() {
+ this._setByteOrder();
+ this._read0thIfd();
+ this._readExifIfd();
+ this._readGpsIfd();
+ return this._readInteroperabilityIfd();
+ };
+
+ ExifReader.prototype._setByteOrder = function() {
+ if (this._dataView.getUint16(this._tiffHeaderOffset) === this._BYTE_ORDER_BIG_ENDIAN) {
+ return this._littleEndian = true;
+ } else if (this._dataView.getUint16(this._tiffHeaderOffset) === this._BYTE_ORDER_LITTLE_ENDIAN) {
+ return this._littleEndian = false;
+ } else {
+ throw new Error('Illegal byte order value. Faulty image.');
+ }
+ };
+
+ ExifReader.prototype._read0thIfd = function() {
+ var ifdOffset;
+
+ ifdOffset = this._getIfdOffset();
+ return this._readIfd('0th', ifdOffset);
+ };
+
+ ExifReader.prototype._getIfdOffset = function() {
+ return this._tiffHeaderOffset + this._getLongAt(this._tiffHeaderOffset + 4);
+ };
+
+ ExifReader.prototype._readExifIfd = function() {
+ var ifdOffset;
+
+ if (this._tags['Exif IFD Pointer'] != null) {
+ ifdOffset = this._tiffHeaderOffset + this._tags['Exif IFD Pointer'].value;
+ return this._readIfd('exif', ifdOffset);
+ }
+ };
+
+ ExifReader.prototype._readGpsIfd = function() {
+ var ifdOffset;
+
+ if (this._tags['GPS Info IFD Pointer'] != null) {
+ ifdOffset = this._tiffHeaderOffset + this._tags['GPS Info IFD Pointer'].value;
+ return this._readIfd('gps', ifdOffset);
+ }
+ };
+
+ ExifReader.prototype._readInteroperabilityIfd = function() {
+ var ifdOffset;
+
+ if (this._tags['Interoperability IFD Pointer'] != null) {
+ ifdOffset = this._tiffHeaderOffset + this._tags['Interoperability IFD Pointer'].value;
+ return this._readIfd('interoperability', ifdOffset);
+ }
+ };
+
+ ExifReader.prototype._readIfd = function(ifdType, offset) {
+ var fieldIndex, numberOfFields, tag, _i, _results;
+
+ numberOfFields = this._getShortAt(offset);
+ offset += 2;
+ _results = [];
+ for (fieldIndex = _i = 0; 0 <= numberOfFields ? _i < numberOfFields : _i > numberOfFields; fieldIndex = 0 <= numberOfFields ? ++_i : --_i) {
+ tag = this._readTag(ifdType, offset);
+ if (tag !== void 0) {
+ this._tags[tag.name] = {
+ 'value': tag.value,
+ 'description': tag.description
+ };
+ }
+ _results.push(offset += 12);
+ }
+ return _results;
+ };
+
+ ExifReader.prototype._readTag = function(ifdType, offset) {
+ var tagCode, tagCount, tagDescription, tagName, tagType, tagValue, tagValueOffset;
+
+ tagCode = this._getShortAt(offset);
+ tagType = this._getShortAt(offset + 2);
+ tagCount = this._getLongAt(offset + 4);
+ if (this._typeSizes[tagType] === void 0) {
+ return void 0;
+ }
+ if (this._typeSizes[tagType] * tagCount <= 4) {
+ tagValue = this._getTagValue(offset + 8, tagType, tagCount);
+ } else {
+ tagValueOffset = this._getLongAt(offset + 8);
+ tagValue = this._getTagValue(this._tiffHeaderOffset + tagValueOffset, tagType, tagCount);
+ }
+ if (tagType === this._tagTypes['ASCII']) {
+ tagValue = this._splitNullSeparatedAsciiString(tagValue);
+ }
+ if (this._tagNames[ifdType][tagCode] != null) {
+ if ((this._tagNames[ifdType][tagCode]['name'] != null) && (this._tagNames[ifdType][tagCode]['description'] != null)) {
+ tagName = this._tagNames[ifdType][tagCode]['name'];
+ tagDescription = this._tagNames[ifdType][tagCode]['description'](tagValue);
+ } else {
+ tagName = this._tagNames[ifdType][tagCode];
+ if (tagValue instanceof Array) {
+ tagDescription = tagValue.join(', ');
+ } else {
+ tagDescription = tagValue;
+ }
+ }
+ return {
+ 'name': tagName,
+ 'value': tagValue,
+ 'description': tagDescription
+ };
+ } else {
+ return {
+ 'name': "undefined-" + tagCode,
+ 'value': tagValue,
+ 'description': tagValue
+ };
+ }
+ };
+
+ ExifReader.prototype._getTagValue = function(offset, type, count) {
+ var tagValue, value, valueIndex;
+
+ value = (function() {
+ var _i, _results;
+
+ _results = [];
+ for (valueIndex = _i = 0; 0 <= count ? _i < count : _i > count; valueIndex = 0 <= count ? ++_i : --_i) {
+ tagValue = this._getTagValueAt[type](offset);
+ offset += this._typeSizes[type];
+ _results.push(tagValue);
+ }
+ return _results;
+ }).call(this);
+ if (value.length === 1) {
+ value = value[0];
+ } else if (type === this._tagTypes['ASCII']) {
+ value = this._getAsciiValue(value);
+ }
+ return value;
+ };
+
+ ExifReader.prototype._getAsciiValue = function(charArray) {
+ var charCode, newCharArray;
+
+ return newCharArray = (function() {
+ var _i, _len, _results;
+
+ _results = [];
+ for (_i = 0, _len = charArray.length; _i < _len; _i++) {
+ charCode = charArray[_i];
+ _results.push(String.fromCharCode(charCode));
+ }
+ return _results;
+ })();
+ };
+
+ ExifReader.prototype._getByteAt = function(offset) {
+ return this._dataView.getUint8(offset);
+ };
+
+ ExifReader.prototype._getAsciiAt = function(offset) {
+ return this._dataView.getUint8(offset);
+ };
+
+ ExifReader.prototype._getShortAt = function(offset) {
+ return this._dataView.getUint16(offset, this._littleEndian);
+ };
+
+ ExifReader.prototype._getLongAt = function(offset) {
+ return this._dataView.getUint32(offset, this._littleEndian);
+ };
+
+ ExifReader.prototype._getRationalAt = function(offset) {
+ return this._getLongAt(offset) / this._getLongAt(offset + 4);
+ };
+
+ ExifReader.prototype._getUndefinedAt = function(offset) {
+ return this._getByteAt(offset);
+ };
+
+ ExifReader.prototype._getSlongAt = function(offset) {
+ return this._dataView.getInt32(offset, this._littleEndian);
+ };
+
+ ExifReader.prototype._getSrationalAt = function(offset) {
+ return this._getSlongAt(offset) / this._getSlongAt(offset + 4);
+ };
+
+ ExifReader.prototype._splitNullSeparatedAsciiString = function(string) {
+ var character, i, tagValue, _i, _len;
+
+ tagValue = [];
+ i = 0;
+ for (_i = 0, _len = string.length; _i < _len; _i++) {
+ character = string[_i];
+ if (character === '\x00') {
+ i++;
+ continue;
+ }
+ if (tagValue[i] == null) {
+ tagValue[i] = '';
+ }
+ tagValue[i] += character;
+ }
+ return tagValue;
+ };
+
+ ExifReader.prototype._typeSizes = {
+ 1: 1,
+ 2: 1,
+ 3: 2,
+ 4: 4,
+ 5: 8,
+ 7: 1,
+ 9: 4,
+ 10: 8
+ };
+
+ ExifReader.prototype._tagTypes = {
+ 'BYTE': 1,
+ 'ASCII': 2,
+ 'SHORT': 3,
+ 'LONG': 4,
+ 'RATIONAL': 5,
+ 'UNDEFINED': 7,
+ 'SLONG': 9,
+ 'SRATIONAL': 10
+ };
+
+ ExifReader.prototype._tagNames = {
+ '0th': {
+ 0x0100: 'ImageWidth',
+ 0x0101: 'ImageLength',
+ 0x0102: 'BitsPerSample',
+ 0x0103: 'Compression',
+ 0x0106: 'PhotometricInterpretation',
+ 0x010e: 'ImageDescription',
+ 0x010f: 'Make',
+ 0x0110: 'Model',
+ 0x0111: 'StripOffsets',
+ 0x0112: {
+ 'name': 'Orientation',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'top-left';
+ case 2:
+ return 'top-right';
+ case 3:
+ return 'bottom-right';
+ case 4:
+ return 'bottom-left';
+ case 5:
+ return 'left-top';
+ case 6:
+ return 'right-top';
+ case 7:
+ return 'right-bottom';
+ case 8:
+ return 'left-bottom';
+ default:
+ return 'Undefined';
+ }
+ }
+ },
+ 0x0115: 'SamplesPerPixel',
+ 0x0116: 'RowsPerStrip',
+ 0x0117: 'StripByteCounts',
+ 0x011a: 'XResolution',
+ 0x011b: 'YResolution',
+ 0x011c: 'PlanarConfiguration',
+ 0x0128: {
+ 'name': 'ResolutionUnit',
+ 'description': function(value) {
+ switch (value) {
+ case 2:
+ return 'inches';
+ case 3:
+ return 'centimeters';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x012d: 'TransferFunction',
+ 0x0131: 'Software',
+ 0x0132: 'DateTime',
+ 0x013b: 'Artist',
+ 0x013e: 'WhitePoint',
+ 0x013f: 'PrimaryChromaticities',
+ 0x0201: 'JPEGInterchangeFormat',
+ 0x0202: 'JPEGInterchangeFormatLength',
+ 0x0211: 'YCbCrCoefficients',
+ 0x0212: 'YCbCrSubSampling',
+ 0x0213: {
+ 'name': 'YCbCrPositioning',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'centered';
+ case 2:
+ return 'co-sited';
+ default:
+ return 'undefied ' + value;
+ }
+ }
+ },
+ 0x0214: 'ReferenceBlackWhite',
+ 0x8298: {
+ 'name': 'Copyright',
+ 'description': function(value) {
+ return value.join('; ');
+ }
+ },
+ 0x8769: 'Exif IFD Pointer',
+ 0x8825: 'GPS Info IFD Pointer'
+ },
+ 'exif': {
+ 0x829a: 'ExposureTime',
+ 0x829d: 'FNumber',
+ 0x8822: {
+ 'name': 'ExposureProgram',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Undefined';
+ case 1:
+ return 'Manual';
+ case 2:
+ return 'Normal program';
+ case 3:
+ return 'Aperture priority';
+ case 4:
+ return 'Shutter priority';
+ case 5:
+ return 'Creative program';
+ case 6:
+ return 'Action program';
+ case 7:
+ return 'Portrait mode';
+ case 8:
+ return 'Landscape mode';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x8824: 'SpectralSensitivity',
+ 0x8827: 'ISOSpeedRatings',
+ 0x8828: {
+ 'name': 'OECF',
+ 'description': function(value) {
+ return '[Raw OECF table data]';
+ }
+ },
+ 0x9000: {
+ 'name': 'ExifVersion',
+ 'description': function(value) {
+ var charCode, string, _i, _len;
+
+ string = '';
+ for (_i = 0, _len = value.length; _i < _len; _i++) {
+ charCode = value[_i];
+ string += String.fromCharCode(charCode);
+ }
+ return string;
+ }
+ },
+ 0x9003: 'DateTimeOriginal',
+ 0x9004: 'DateTimeDigitized',
+ 0x9101: {
+ 'name': 'ComponentsConfiguration',
+ 'description': function(value) {
+ var character, string, _i, _len;
+
+ string = '';
+ for (_i = 0, _len = value.length; _i < _len; _i++) {
+ character = value[_i];
+ switch (character) {
+ case 0x31:
+ string += 'Y';
+ break;
+ case 0x32:
+ string += 'Cb';
+ break;
+ case 0x33:
+ string += 'Cr';
+ break;
+ case 0x34:
+ string += 'R';
+ break;
+ case 0x35:
+ string += 'G';
+ break;
+ case 0x36:
+ string += 'B';
+ }
+ }
+ return string;
+ }
+ },
+ 0x9102: 'CompressedBitsPerPixel',
+ 0x9201: 'ShutterSpeedValue',
+ 0x9202: 'ApertureValue',
+ 0x9203: 'BrightnessValue',
+ 0x9204: 'ExposureBiasValue',
+ 0x9205: 'MaxApertureValue',
+ 0x9206: 'SubjectDistance',
+ 0x9207: {
+ 'name': 'MeteringMode',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'Average';
+ case 2:
+ return 'CenterWeightedAverage';
+ case 3:
+ return 'Spot';
+ case 4:
+ return 'MultiSpot';
+ case 5:
+ return 'Pattern';
+ case 6:
+ return 'Partial';
+ case 255:
+ return 'Other';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x9208: {
+ 'name': 'LightSource',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'Daylight';
+ case 2:
+ return 'Fluorescent';
+ case 3:
+ return 'Tungsten (incandescent light)';
+ case 4:
+ return 'Flash';
+ case 9:
+ return 'Fine weather';
+ case 10:
+ return 'Cloudy weather';
+ case 11:
+ return 'Shade';
+ case 12:
+ return 'Daylight fluorescent (D 5700 – 7100K)';
+ case 13:
+ return 'Day white fluorescent (N 4600 – 5400K)';
+ case 14:
+ return 'Cool white fluorescent (W 3900 – 4500K)';
+ case 15:
+ return 'White fluorescent (WW 3200 – 3700K)';
+ case 17:
+ return 'Standard light A';
+ case 18:
+ return 'Standard light B';
+ case 19:
+ return 'Standard light C';
+ case 20:
+ return 'D55';
+ case 21:
+ return 'D65';
+ case 22:
+ return 'D75';
+ case 23:
+ return 'D50';
+ case 24:
+ return 'ISO studio tungsten';
+ case 255:
+ return 'Other light source';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x9209: {
+ 'name': 'Flash',
+ 'description': function(value) {
+ switch (value) {
+ case 0x00:
+ return 'Flash did not fire';
+ case 0x01:
+ return 'Flash fired';
+ case 0x05:
+ return 'Strobe return light not detected';
+ case 0x07:
+ return 'Strobe return light detected';
+ case 0x09:
+ return 'Flash fired, compulsory flash mode';
+ case 0x0d:
+ return 'Flash fired, compulsory flash mode, return light not detected';
+ case 0x0f:
+ return 'Flash fired, compulsory flash mode, return light detected';
+ case 0x10:
+ return 'Flash did not fire, compulsory flash mode';
+ case 0x18:
+ return 'Flash did not fire, auto mode';
+ case 0x19:
+ return 'Flash fired, auto mode';
+ case 0x1d:
+ return 'Flash fired, auto mode, return light not detected';
+ case 0x1f:
+ return 'Flash fired, auto mode, return light detected';
+ case 0x20:
+ return 'No flash function';
+ case 0x41:
+ return 'Flash fired, red-eye reduction mode';
+ case 0x45:
+ return 'Flash fired, red-eye reduction mode, return light not detected';
+ case 0x47:
+ return 'Flash fired, red-eye reduction mode, return light detected';
+ case 0x49:
+ return 'Flash fired, compulsory flash mode, red-eye reduction mode';
+ case 0x4d:
+ return 'Flash fired, compulsory flash mode, red-eye reduction mode, return light not detected';
+ case 0x4f:
+ return 'Flash fired, compulsory flash mode, red-eye reduction mode, return light detected';
+ case 0x59:
+ return 'Flash fired, auto mode, red-eye reduction mode';
+ case 0x5d:
+ return 'Flash fired, auto mode, return light not detected, red-eye reduction mode';
+ case 0x5f:
+ return 'Flash fired, auto mode, return light detected, red-eye reduction mode';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x920a: 'FocalLength',
+ 0x9214: {
+ 'name': 'SubjectArea',
+ 'description': function(value) {
+ switch (value.length) {
+ case 2:
+ return "Location; X: " + value[0] + ", Y: " + value[1];
+ case 3:
+ return "Circle; X: " + value[0] + ", Y: " + value[1] + ", diameter: " + value[2];
+ case 4:
+ return "Rectangle; X: " + value[0] + ", Y: " + value[1] + ", width: " + value[2] + ", height: " + value[3];
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x927c: {
+ 'name': 'MakerNote',
+ 'description': function(value) {
+ return '[Raw maker note data]';
+ }
+ },
+ 0x9286: {
+ 'name': 'UserComment',
+ 'description': function(value) {
+ switch (value.slice(0, 8).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('')) {
+ case 'ASCII\x00\x00\x00':
+ return value.slice(8, value.length).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('');
+ case 'JIS\x00\x00\x00\x00\x00':
+ return '[JIS encoded text]';
+ case 'UNICODE\x00':
+ return '[Unicode encoded text]';
+ case '\x00\x00\x00\x00\x00\x00\x00\x00':
+ return '[Undefined encoding]';
+ }
+ }
+ },
+ 0x9290: 'SubSecTime',
+ 0x9291: 'SubSecTimeOriginal',
+ 0x9292: 'SubSecTimeDigitized',
+ 0xa000: {
+ 'name': 'FlashpixVersion',
+ 'description': function(value) {
+ var charCode, string, _i, _len;
+
+ string = '';
+ for (_i = 0, _len = value.length; _i < _len; _i++) {
+ charCode = value[_i];
+ string += String.fromCharCode(charCode);
+ }
+ return string;
+ }
+ },
+ 0xa001: {
+ 'name': 'ColorSpace',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'sRGB';
+ case 0xffff:
+ return 'Uncalibrated';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa002: 'PixelXDimension',
+ 0xa003: 'PixelYDimension',
+ 0xa004: 'RelatedSoundFile',
+ 0xa005: 'Interoperability IFD Pointer',
+ 0xa20b: 'FlashEnergy',
+ 0xa20c: {
+ 'name': 'SpatialFrequencyResponse',
+ 'description': function(value) {
+ return '[Raw SFR table data]';
+ }
+ },
+ 0xa20e: 'FocalPlaneXResolution',
+ 0xa20f: 'FocalPlaneYResolution',
+ 0xa210: {
+ 'name': 'FocalPlaneResolutionUnit',
+ 'description': function(value) {
+ switch (value) {
+ case 2:
+ return 'inches';
+ case 3:
+ return 'centimeters';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa214: {
+ 'name': 'SubjectLocation',
+ 'description': function(value) {
+ return "X: " + value[0] + ", Y: " + value[1];
+ }
+ },
+ 0xa215: 'ExposureIndex',
+ 0xa217: {
+ 'name': 'SensingMethod',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'Undefined';
+ case 2:
+ return 'One-chip color area sensor';
+ case 3:
+ return 'Two-chip color area sensor';
+ case 4:
+ return 'Three-chip color area sensor';
+ case 5:
+ return 'Color sequential area sensor';
+ case 7:
+ return 'Trilinear sensor';
+ case 8:
+ return 'Color sequential linear sensor';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa300: {
+ 'name': 'FileSource',
+ 'description': function(value) {
+ switch (value) {
+ case 3:
+ return 'DSC';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa301: {
+ 'name': 'SceneType',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'A directly photographed image';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa302: {
+ 'name': 'CFAPattern',
+ 'description': function(value) {
+ return '[Raw CFA pattern table data]';
+ }
+ },
+ 0xa401: {
+ 'name': 'CustomRendered',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Normal process';
+ case 1:
+ return 'Custom process';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa402: {
+ 'name': 'ExposureMode',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Auto exposure';
+ case 1:
+ return 'Manual exposure';
+ case 2:
+ return 'Auto bracket';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa403: {
+ 'name': 'WhiteBalance',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Auto white balance';
+ case 1:
+ return 'Manual white balance';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa404: {
+ 'name': 'DigitalZoomRatio',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Digital zoom was not used';
+ default:
+ return value;
+ }
+ }
+ },
+ 0xa405: {
+ 'name': 'FocalLengthIn35mmFilm',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Unknown';
+ default:
+ return value;
+ }
+ }
+ },
+ 0xa406: {
+ 'name': 'SceneCaptureType',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Standard';
+ case 1:
+ return 'Landscape';
+ case 2:
+ return 'Portrait';
+ case 3:
+ return 'Night scene';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa407: {
+ 'name': 'GainControl',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'None';
+ case 1:
+ return 'Low gain up';
+ case 2:
+ return 'High gain up';
+ case 3:
+ return 'Low gain down';
+ case 4:
+ return 'High gain down';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa408: {
+ 'name': 'Contrast',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Normal';
+ case 1:
+ return 'Soft';
+ case 2:
+ return 'Hard';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa409: {
+ 'name': 'Saturation',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Normal';
+ case 1:
+ return 'Low saturation';
+ case 2:
+ return 'High saturation';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa40a: {
+ 'name': 'Sharpness',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Normal';
+ case 1:
+ return 'Soft';
+ case 2:
+ return 'Hard';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa40b: {
+ 'name': 'DeviceSettingDescription',
+ 'description': function(value) {
+ return '[Raw device settings table data]';
+ }
+ },
+ 0xa40c: {
+ 'name': 'SubjectDistanceRange',
+ 'description': function(value) {
+ switch (value) {
+ case 1:
+ return 'Macro';
+ case 2:
+ return 'Close view';
+ case 3:
+ return 'Distant view';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0xa420: 'ImageUniqueID'
+ },
+ 'gps': {
+ 0x0000: {
+ 'name': 'GPSVersionID',
+ 'description': function(value) {
+ var _ref, _ref1;
+
+ if ((value[0] === (_ref = value[1]) && _ref === 2) && (value[2] === (_ref1 = value[3]) && _ref1 === 0)) {
+ return 'Version 2.2';
+ } else {
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0001: {
+ 'name': 'GPSLatitudeRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'N':
+ return 'North latitude';
+ case 'S':
+ return 'South latitude';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0002: {
+ 'name': 'GPSLatitude',
+ 'description': function(value) {
+ return value[0] + value[1] / 60 + value[2] / 3600;
+ }
+ },
+ 0x0003: {
+ 'name': 'GPSLongitudeRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'E':
+ return 'East longitude';
+ case 'W':
+ return 'West longitude';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0004: {
+ 'name': 'GPSLongitude',
+ 'description': function(value) {
+ return value[0] + value[1] / 60 + value[2] / 3600;
+ }
+ },
+ 0x0005: {
+ 'name': 'GPSAltitudeRef',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Sea level';
+ case 1:
+ return 'Sea level reference (negative value)';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0006: {
+ 'name': 'GPSAltitude',
+ 'description': function(value) {
+ return value + ' m';
+ }
+ },
+ 0x0007: {
+ 'name': 'GPSTimeStamp',
+ 'description': function(value) {
+ var padZero;
+
+ padZero = function(num) {
+ var i;
+
+ return ((function() {
+ var _i, _ref, _results;
+
+ _results = [];
+ for (i = _i = 0, _ref = 2 - ('' + Math.floor(num)).length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) {
+ _results.push('0');
+ }
+ return _results;
+ })()) + num;
+ };
+ return value.map(padZero).join(':');
+ }
+ },
+ 0x0008: 'GPSSatellites',
+ 0x0009: {
+ 'name': 'GPSStatus',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'A':
+ return 'Measurement in progress';
+ case 'V':
+ return 'Measurement Interoperability';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x000a: {
+ 'name': 'GPSMeasureMode',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case '2':
+ return '2-dimensional measurement';
+ case '3':
+ return '3-dimensional measurement';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x000b: 'GPSDOP',
+ 0x000c: {
+ 'name': 'GPSSpeedRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'K':
+ return 'Kilometers per hour';
+ case 'M':
+ return 'Miles per hour';
+ case 'N':
+ return 'Knots';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x000d: 'GPSSpeed',
+ 0x000e: {
+ 'name': 'GPSTrackRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'T':
+ return 'True direction';
+ case 'M':
+ return 'Magnetic direction';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x000f: 'GPSTrack',
+ 0x0010: {
+ 'name': 'GPSImgDirectionRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'T':
+ return 'True direction';
+ case 'M':
+ return 'Magnetic direction';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0011: 'GPSImgDirection',
+ 0x0012: 'GPSMapDatum',
+ 0x0013: {
+ 'name': 'GPSDestLatitudeRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'N':
+ return 'North latitude';
+ case 'S':
+ return 'South latitude';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0014: {
+ 'name': 'GPSDestLatitude',
+ 'description': function(value) {
+ return value[0] + value[1] / 60 + value[2] / 3600;
+ }
+ },
+ 0x0015: {
+ 'name': 'GPSDestLongitudeRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'E':
+ return 'East longitude';
+ case 'W':
+ return 'West longitude';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0016: {
+ 'name': 'GPSDestLongitude',
+ 'description': function(value) {
+ return value[0] + value[1] / 60 + value[2] / 3600;
+ }
+ },
+ 0x0017: {
+ 'name': 'GPSDestBearingRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'T':
+ return 'True direction';
+ case 'M':
+ return 'Magnetic direction';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x0018: 'GPSDestBearing',
+ 0x0019: {
+ 'name': 'GPSDestDistanceRef',
+ 'description': function(value) {
+ switch (value.join('')) {
+ case 'K':
+ return 'Kilometers';
+ case 'M':
+ return 'Miles';
+ case 'N':
+ return 'Knots';
+ default:
+ return 'Unknown';
+ }
+ }
+ },
+ 0x001a: 'GPSDestDistance',
+ 0x001b: {
+ 'name': 'GPSProcessingMethod',
+ 'description': function(value) {
+ if (value === 0) {
+ return 'Undefined';
+ } else {
+ switch (value.slice(0, 8).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('')) {
+ case 'ASCII\x00\x00\x00':
+ return value.slice(8, value.length).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('');
+ case 'JIS\x00\x00\x00\x00\x00':
+ return '[JIS encoded text]';
+ case 'UNICODE\x00':
+ return '[Unicode encoded text]';
+ case '\x00\x00\x00\x00\x00\x00\x00\x00':
+ return '[Undefined encoding]';
+ }
+ }
+ }
+ },
+ 0x001c: {
+ 'name': 'GPSAreaInformation',
+ 'description': function(value) {
+ if (value === 0) {
+ return 'Undefined';
+ } else {
+ switch (value.slice(0, 8).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('')) {
+ case 'ASCII\x00\x00\x00':
+ return value.slice(8, value.length).map(function(charCode) {
+ return String.fromCharCode(charCode);
+ }).join('');
+ case 'JIS\x00\x00\x00\x00\x00':
+ return '[JIS encoded text]';
+ case 'UNICODE\x00':
+ return '[Unicode encoded text]';
+ case '\x00\x00\x00\x00\x00\x00\x00\x00':
+ return '[Undefined encoding]';
+ }
+ }
+ }
+ },
+ 0x001d: 'GPSDateStamp',
+ 0x001e: {
+ 'name': 'GPSDifferential',
+ 'description': function(value) {
+ switch (value) {
+ case 0:
+ return 'Measurement without differential correction';
+ case 1:
+ return 'Differential correction applied';
+ default:
+ return 'Unknown';
+ }
+ }
+ }
+ },
+ 'interoperability': {
+ 0x0001: 'InteroperabilityIndex',
+ 0x0002: 'UnknownInteroperabilityTag0x0002',
+ 0x1001: 'UnknownInteroperabilityTag0x1001',
+ 0x1002: 'UnknownInteroperabilityTag0x1002'
+ }
+ };
+
+ /*
+ # Gets the image's value of the tag with the given name.
+ #
+ # name string The name of the tag to get the value of
+ #
+ # Returns the value of the tag with the given name if it exists,
+ # otherwise throws "Undefined".
+ */
+
+
+ ExifReader.prototype.getTagValue = function(name) {
+ if (this._tags[name] != null) {
+ return this._tags[name].value;
+ } else {
+ return void 0;
+ }
+ };
+
+ /*
+ # Gets the image's description of the tag with the given name.
+ #
+ # name string The name of the tag to get the description of
+ #
+ # Returns the description of the tag with the given name if it exists,
+ # otherwise throws "Undefined".
+ */
+
+
+ ExifReader.prototype.getTagDescription = function(name) {
+ if (this._tags[name] != null) {
+ return this._tags[name].description;
+ } else {
+ return void 0;
+ }
+ };
+
+ /*
+ # Gets all the image's tags.
+ #
+ # Returns the image's tags as an associative array: name -> description.
+ */
+
+
+ ExifReader.prototype.getAllTags = function() {
+ return this._tags;
+ };
+
+ /*
+ # Delete a tag.
+ #
+ # name string The name of the tag to delete
+ #
+ # Delete the tag with the given name. Can be used to lower memory usage.
+ # E.g., the MakerNote tag can be really large.
+ */
+
+
+ ExifReader.prototype.deleteTag = function(name) {
+ return delete this._tags[name];
+ };
+
+ return ExifReader;
+
+ })();
+
+}).call(this);
diff --git a/server/app/static/js/vendor/canvas-to-blob.js b/server/app/static/js/vendor/canvas-to-blob.js
new file mode 100644
index 00000000..32913667
--- /dev/null
+++ b/server/app/static/js/vendor/canvas-to-blob.js
@@ -0,0 +1,111 @@
+/*
+ * JavaScript Canvas to Blob
+ * https://github.com/blueimp/JavaScript-Canvas-to-Blob
+ *
+ * Copyright 2012, Sebastian Tschan
+ * https://blueimp.net
+ *
+ * Licensed under the MIT license:
+ * http://www.opensource.org/licenses/MIT
+ *
+ * Based on stackoverflow user Stoive's code snippet:
+ * http://stackoverflow.com/q/4998908
+ */
+
+/* global atob, Blob, define */
+
+;(function (window) {
+ 'use strict'
+
+ var CanvasPrototype = window.HTMLCanvasElement &&
+ window.HTMLCanvasElement.prototype
+ var hasBlobConstructor = window.Blob && (function () {
+ try {
+ return Boolean(new Blob())
+ } catch (e) {
+ return false
+ }
+ }())
+ var hasArrayBufferViewSupport = hasBlobConstructor && window.Uint8Array &&
+ (function () {
+ try {
+ return new Blob([new Uint8Array(100)]).size === 100
+ } catch (e) {
+ return false
+ }
+ }())
+ var BlobBuilder = window.BlobBuilder || window.WebKitBlobBuilder ||
+ window.MozBlobBuilder || window.MSBlobBuilder
+ var dataURIPattern = /^data:((.*?)(;charset=.*?)?)(;base64)?,/
+ var dataURLtoBlob = (hasBlobConstructor || BlobBuilder) && window.atob &&
+ window.ArrayBuffer && window.Uint8Array &&
+ function (dataURI) {
+ var matches,
+ mediaType,
+ isBase64,
+ dataString,
+ byteString,
+ arrayBuffer,
+ intArray,
+ i,
+ bb
+ // Parse the dataURI components as per RFC 2397
+ matches = dataURI.match(dataURIPattern)
+ if (!matches) {
+ throw new Error('invalid data URI')
+ }
+ // Default to text/plain;charset=US-ASCII
+ mediaType = matches[2]
+ ? matches[1]
+ : 'text/plain' + (matches[3] || ';charset=US-ASCII')
+ isBase64 = !!matches[4]
+ dataString = dataURI.slice(matches[0].length)
+ if (isBase64) {
+ // Convert base64 to raw binary data held in a string:
+ byteString = atob(dataString)
+ } else {
+ // Convert base64/URLEncoded data component to raw binary:
+ byteString = decodeURIComponent(dataString)
+ }
+ // Write the bytes of the string to an ArrayBuffer:
+ arrayBuffer = new ArrayBuffer(byteString.length)
+ intArray = new Uint8Array(arrayBuffer)
+ for (i = 0; i < byteString.length; i += 1) {
+ intArray[i] = byteString.charCodeAt(i)
+ }
+ // Write the ArrayBuffer (or ArrayBufferView) to a blob:
+ if (hasBlobConstructor) {
+ return new Blob(
+ [hasArrayBufferViewSupport ? intArray : arrayBuffer],
+ {type: mediaType}
+ )
+ }
+ bb = new BlobBuilder()
+ bb.append(arrayBuffer)
+ return bb.getBlob(mediaType)
+ }
+ if (window.HTMLCanvasElement && !CanvasPrototype.toBlob) {
+ if (CanvasPrototype.mozGetAsFile) {
+ CanvasPrototype.toBlob = function (callback, type, quality) {
+ if (quality && CanvasPrototype.toDataURL && dataURLtoBlob) {
+ callback(dataURLtoBlob(this.toDataURL(type, quality)))
+ } else {
+ callback(this.mozGetAsFile('blob', type))
+ }
+ }
+ } else if (CanvasPrototype.toDataURL && dataURLtoBlob) {
+ CanvasPrototype.toBlob = function (callback, type, quality) {
+ callback(dataURLtoBlob(this.toDataURL(type, quality)))
+ }
+ }
+ }
+ if (typeof define === 'function' && define.amd) {
+ define(function () {
+ return dataURLtoBlob
+ })
+ } else if (typeof module === 'object' && module.exports) {
+ module.exports = dataURLtoBlob
+ } else {
+ window.dataURLtoBlob = dataURLtoBlob
+ }
+}(window))
diff --git a/server/app/static/js/vendor/jquery-3.3.1.min.js b/server/app/static/js/vendor/jquery-3.3.1.min.js
new file mode 100644
index 00000000..4d9b3a25
--- /dev/null
+++ b/server/app/static/js/vendor/jquery-3.3.1.min.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:s,sort:n.sort,splice:n.splice},w.extend=w.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||g(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)n=a[t],a!==(r=e[t])&&(l&&r&&(w.isPlainObject(r)||(i=Array.isArray(r)))?(i?(i=!1,o=n&&Array.isArray(n)?n:[]):o=n&&w.isPlainObject(n)?n:{},a[t]=w.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},w.extend({expando:"jQuery"+("3.3.1"+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==c.call(e))&&(!(t=i(e))||"function"==typeof(n=f.call(t,"constructor")&&t.constructor)&&p.call(n)===d)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e){m(e)},each:function(e,t){var n,r=0;if(C(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(C(Object(e))?w.merge(n,"string"==typeof e?[e]:e):s.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:u.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r,i=[],o=0,a=e.length,s=!n;o<a;o++)(r=!t(e[o],o))!==s&&i.push(e[o]);return i},map:function(e,t,n){var r,i,o=0,s=[];if(C(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&s.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&s.push(i);return a.apply([],s)},guid:1,support:h}),"function"==typeof Symbol&&(w.fn[Symbol.iterator]=n[Symbol.iterator]),w.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function C(e){var t=!!e&&"length"in e&&e.length,n=x(e);return!g(e)&&!y(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},P="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",R="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",I="\\["+M+"*("+R+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+R+"))|)"+M+"*\\]",W=":("+R+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+I+")*)|.*)\\)|)",$=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),F=new RegExp("^"+M+"*,"+M+"*"),_=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="<a id='"+b+"'></a><select id='"+b+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:he(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:he(function(e,t,n){for(var r=n<0?n+t:n;--r>=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=r.pseudos.eq;for(t in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})r.pseudos[t]=fe(t);for(t in{submit:!0,reset:!0})r.pseudos[t]=pe(t);function ye(){}ye.prototype=r.filters=r.pseudos,r.setFilters=new ye,a=oe.tokenize=function(e,t){var n,i,o,a,s,u,l,c=k[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=r.preFilter;while(s){n&&!(i=F.exec(s))||(i&&(s=s.slice(i[0].length)||s),u.push(o=[])),n=!1,(i=_.exec(s))&&(n=i.shift(),o.push({value:n,type:i[0].replace(B," ")}),s=s.slice(n.length));for(a in r.filter)!(i=V[a].exec(s))||l[a]&&!(i=l[a](i))||(n=i.shift(),o.push({value:n,type:a,matches:i}),s=s.slice(n.length));if(!n)break}return t?s.length:s?oe.error(e):k(e,u).slice(0)};function ve(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function me(e,t,n){var r=t.dir,i=t.next,o=i||r,a=n&&"parentNode"===o,s=C++;return t.first?function(t,n,i){while(t=t[r])if(1===t.nodeType||a)return e(t,n,i);return!1}:function(t,n,u){var l,c,f,p=[T,s];if(u){while(t=t[r])if((1===t.nodeType||a)&&e(t,n,u))return!0}else while(t=t[r])if(1===t.nodeType||a)if(f=t[b]||(t[b]={}),c=f[t.uniqueID]||(f[t.uniqueID]={}),i&&i===t.nodeName.toLowerCase())t=t[r]||t;else{if((l=c[o])&&l[0]===T&&l[1]===s)return p[2]=l[2];if(c[o]=p,p[2]=e(t,n,u))return!0}return!1}}function xe(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r<i;r++)oe(e,t[r],n);return n}function we(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Te(e,t,n,r,i,o){return r&&!r[b]&&(r=Te(r)),i&&!i[b]&&(i=Te(i,o)),se(function(o,a,s,u){var l,c,f,p=[],d=[],h=a.length,g=o||be(t||"*",s.nodeType?[s]:s,[]),y=!e||!o&&t?g:we(g,p,e,s,u),v=n?i||(o?e:h||r)?[]:a:y;if(n&&n(y,v,s,u),r){l=we(v,d),r(l,[],s,u),c=l.length;while(c--)(f=l[c])&&(v[d[c]]=!(y[d[c]]=f))}if(o){if(i||e){if(i){l=[],c=v.length;while(c--)(f=v[c])&&l.push(y[c]=f);i(null,v=[],l,u)}c=v.length;while(c--)(f=v[c])&&(l=i?O(o,f):p[c])>-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u<o;u++)if(n=r.relative[e[u].type])p=[me(xe(p),n)];else{if((n=r.filter[e[u].type].apply(null,e[u].matches))[b]){for(i=++u;i<o;i++)if(r.relative[e[i].type])break;return Te(u>1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u<i&&Ce(e.slice(u,i)),i<o&&Ce(e=e.slice(i)),i<o&&ve(e))}p.push(n)}return xe(p)}function Ee(e,t){var n=t.length>0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t<r;t++)if(w.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)w.find(e,i[t],n);return r>1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(w.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&w(e);if(!D.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?a.index(n)>-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s<o.length)!1===o[s].apply(n[0],n[1])&&e.stopOnFalse&&(s=o.length,n=!1)}e.memory||(n=!1),t=!1,i&&(o=n?[]:"")},l={add:function(){return o&&(n&&!t&&(s=o.length-1,a.push(n)),function t(n){w.each(n,function(n,r){g(r)?e.unique&&l.has(r)||o.push(r):r&&r.length&&"string"!==x(r)&&t(r)})}(arguments),n&&!t&&u()),this},remove:function(){return w.each(arguments,function(e,t){var n;while((n=w.inArray(t,o,n))>-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t<o)){if((e=r.apply(s,u))===n.promise())throw new TypeError("Thenable self-resolution");l=e&&("object"==typeof e||"function"==typeof e)&&e.then,g(l)?i?l.call(e,a(o,n,I,i),a(o,n,W,i)):(o++,l.call(e,a(o,n,I,i),a(o,n,W,i),a(o,n,I,n.notifyWith))):(r!==I&&(s=void 0,u=[e]),(i||n.resolveWith)(s,u))}},c=i?l:function(){try{l()}catch(e){w.Deferred.exceptionHook&&w.Deferred.exceptionHook(e,c.stackTrace),t+1>=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},X=/^-ms-/,U=/-([a-z])/g;function V(e,t){return t.toUpperCase()}function G(e){return e.replace(X,"ms-").replace(U,V)}var Y=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Q(){this.expando=w.expando+Q.uid++}Q.uid=1,Q.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Y(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[G(t)]=n;else for(r in t)i[G(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][G(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(G):(t=G(t))in r?[t]:t.match(M)||[]).length;while(n--)delete r[t[n]]}(void 0===t||w.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!w.isEmptyObject(t)}};var J=new Q,K=new Q,Z=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,ee=/[A-Z]/g;function te(e){return"true"===e||"false"!==e&&("null"===e?null:e===+e+""?+e:Z.test(e)?JSON.parse(e):e)}function ne(e,t,n){var r;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(ee,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n=te(n)}catch(e){}K.set(e,t,n)}else n=void 0;return n}w.extend({hasData:function(e){return K.hasData(e)||J.hasData(e)},data:function(e,t,n){return K.access(e,t,n)},removeData:function(e,t){K.remove(e,t)},_data:function(e,t,n){return J.access(e,t,n)},_removeData:function(e,t){J.remove(e,t)}}),w.fn.extend({data:function(e,t){var n,r,i,o=this[0],a=o&&o.attributes;if(void 0===e){if(this.length&&(i=K.get(o),1===o.nodeType&&!J.get(o,"hasDataAttrs"))){n=a.length;while(n--)a[n]&&0===(r=a[n].name).indexOf("data-")&&(r=G(r.slice(5)),ne(o,r,i[r]));J.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof e?this.each(function(){K.set(this,e)}):z(this,function(t){var n;if(o&&void 0===t){if(void 0!==(n=K.get(o,e)))return n;if(void 0!==(n=ne(o,e)))return n}else this.each(function(){K.set(this,e,t)})},null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length<n?w.queue(this[0],e):void 0===t?this:this.each(function(){var n=w.queue(this,e,t);w._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&w.dequeue(this,e)})},dequeue:function(e){return this.each(function(){w.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=w.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=J.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var re=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ie=new RegExp("^(?:([+-])=|)("+re+")([a-z%]*)$","i"),oe=["Top","Right","Bottom","Left"],ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&w.contains(e.ownerDocument,e)&&"none"===w.css(e,"display")},se=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i};function ue(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return w.css(e,t,"")},u=s(),l=n&&n[3]||(w.cssNumber[t]?"":"px"),c=(w.cssNumber[t]||"px"!==l&&+u)&&ie.exec(w.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)w.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,w.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var le={};function ce(e){var t,n=e.ownerDocument,r=e.nodeName,i=le[r];return i||(t=n.body.appendChild(n.createElement(r)),i=w.css(t,"display"),t.parentNode.removeChild(t),"none"===i&&(i="block"),le[r]=i,i)}function fe(e,t){for(var n,r,i=[],o=0,a=e.length;o<a;o++)(r=e[o]).style&&(n=r.style.display,t?("none"===n&&(i[o]=J.get(r,"display")||null,i[o]||(r.style.display="")),""===r.style.display&&ae(r)&&(i[o]=ce(r))):"none"!==n&&(i[o]="none",J.set(r,"display",n)));for(o=0;o<a;o++)null!=i[o]&&(e[o].style.display=i[o]);return e}w.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?w(this).show():w(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n<r;n++)J.set(e[n],"globalEval",!t||J.get(t[n],"globalEval"))}var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===x(o))w.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+w.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;w.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&w.inArray(o,r)>-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="<textarea>x</textarea>",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n<arguments.length;n++)u[n]=arguments[n];if(t.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,t)){s=w.event.handlers.call(this,t,l),n=0;while((o=s[n++])&&!t.isPropagationStopped()){t.currentTarget=o.elem,r=0;while((a=o.handlers[r++])&&!t.isImmediatePropagationStopped())t.rnamespace&&!t.rnamespace.test(a.namespace)||(t.handleObj=a,t.data=a.data,void 0!==(i=((w.event.special[a.origType]||{}).handle||a.handler).apply(o.elem,u))&&!1===(t.result=i)&&(t.preventDefault(),t.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,t),t.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&e.button>=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?w(i,this).index(l)>-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(e,t){Object.defineProperty(w.Event.prototype,e,{enumerable:!0,configurable:!0,get:g(t)?function(){if(this.originalEvent)return t(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[e]},set:function(t){Object.defineProperty(this,e,{enumerable:!0,configurable:!0,writable:!0,value:t})}})},fix:function(e){return e[w.expando]?e:new w.Event(e)},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==Se()&&this.focus)return this.focus(),!1},delegateType:"focusin"},blur:{trigger:function(){if(this===Se()&&this.blur)return this.blur(),!1},delegateType:"focusout"},click:{trigger:function(){if("checkbox"===this.type&&this.click&&N(this,"input"))return this.click(),!1},_default:function(e){return N(e.target,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},w.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},w.Event=function(e,t){if(!(this instanceof w.Event))return new w.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ee:ke,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&w.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[w.expando]=!0},w.Event.prototype={constructor:w.Event,isDefaultPrevented:ke,isPropagationStopped:ke,isImmediatePropagationStopped:ke,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ee,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ee,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ee,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},w.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&we.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Te.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},w.event.addProp),w.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,t){w.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return i&&(i===r||w.contains(r,i))||(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),w.fn.extend({on:function(e,t,n,r){return De(this,e,t,n,r)},one:function(e,t,n,r){return De(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,w(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=ke),this.each(function(){w.event.remove(this,e,n,t)})}});var Ne=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/<script|<style|<link/i,je=/checked\s*(?:[^=]|=\s*.checked.)/i,qe=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n<r;n++)w.event.add(t,i,l[i][n])}K.hasData(e)&&(s=K.access(e),u=w.extend({},s),K.set(t,u))}}function Me(e,t){var n=t.nodeName.toLowerCase();"input"===n&&pe.test(e.type)?t.checked=e.checked:"input"!==n&&"textarea"!==n||(t.defaultValue=e.defaultValue)}function Re(e,t,n,r){t=a.apply([],t);var i,o,s,u,l,c,f=0,p=e.length,d=p-1,y=t[0],v=g(y);if(v||p>1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f<p;f++)l=i,f!==d&&(l=w.clone(l,!0,!0),u&&w.merge(s,ye(l,"script"))),n.call(e[f],l,f);if(u)for(c=s[s.length-1].ownerDocument,w.map(s,Oe),f=0;f<u;f++)l=s[f],he.test(l.type||"")&&!J.access(l,"globalEval")&&w.contains(c,l)&&(l.src&&"module"!==(l.type||"").toLowerCase()?w._evalUrl&&w._evalUrl(l.src):m(l.textContent.replace(qe,""),c,l))}return e}function Ie(e,t,n){for(var r,i=t?w.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||w.cleanData(ye(r)),r.parentNode&&(n&&w.contains(r.ownerDocument,r)&&ve(ye(r,"script")),r.parentNode.removeChild(r));return e}w.extend({htmlPrefilter:function(e){return e.replace(Ne,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r<i;r++)Me(o[r],a[r]);if(t)if(n)for(o=o||ye(e),a=a||ye(s),r=0,i=o.length;r<i;r++)Pe(o[r],a[r]);else Pe(e,s);return(a=ye(s,"script")).length>0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(w.cleanData(ye(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=[];return Re(this,arguments,function(t){var n=this.parentNode;w.inArray(this,e)<0&&(w.cleanData(ye(this)),n&&n.replaceChild(t,this))},e)}}),w.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){w.fn[e]=function(e){for(var n,r=[],i=w(e),o=i.length-1,a=0;a<=o;a++)n=a===o?this:this.clone(!0),w(i[a])[t](n),s.apply(r,n.get());return this.pushStack(r)}});var We=new RegExp("^("+re+")(?!px)[a-z%]+$","i"),$e=function(t){var n=t.ownerDocument.defaultView;return n&&n.opener||(n=e),n.getComputedStyle(t)},Be=new RegExp(oe.join("|"),"i");!function(){function t(){if(c){l.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",c.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",be.appendChild(l).appendChild(c);var t=e.getComputedStyle(c);i="1%"!==t.top,u=12===n(t.marginLeft),c.style.right="60%",s=36===n(t.right),o=36===n(t.width),c.style.position="absolute",a=36===c.offsetWidth||"absolute",be.removeChild(l),c=null}}function n(e){return Math.round(parseFloat(e))}var i,o,a,s,u,l=r.createElement("div"),c=r.createElement("div");c.style&&(c.style.backgroundClip="content-box",c.cloneNode(!0).style.backgroundClip="",h.clearCloneStyle="content-box"===c.style.backgroundClip,w.extend(h,{boxSizingReliable:function(){return t(),o},pixelBoxStyles:function(){return t(),s},pixelPosition:function(){return t(),i},reliableMarginLeft:function(){return t(),u},scrollboxSize:function(){return t(),a}}))}();function Fe(e,t,n){var r,i,o,a,s=e.style;return(n=n||$e(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||w.contains(e.ownerDocument,e)||(a=w.style(e,t)),!h.pixelBoxStyles()&&We.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function _e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}var ze=/^(none|table(?!-c[ea]).+)/,Xe=/^--/,Ue={position:"absolute",visibility:"hidden",display:"block"},Ve={letterSpacing:"0",fontWeight:"400"},Ge=["Webkit","Moz","ms"],Ye=r.createElement("div").style;function Qe(e){if(e in Ye)return e;var t=e[0].toUpperCase()+e.slice(1),n=Ge.length;while(n--)if((e=Ge[n]+t)in Ye)return e}function Je(e){var t=w.cssProps[e];return t||(t=w.cssProps[e]=Qe(e)||e),t}function Ke(e,t,n){var r=ie.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ze(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=w.css(e,n+oe[a],!0,i)),r?("content"===n&&(u-=w.css(e,"padding"+oe[a],!0,i)),"margin"!==n&&(u-=w.css(e,"border"+oe[a]+"Width",!0,i))):(u+=w.css(e,"padding"+oe[a],!0,i),"padding"!==n?u+=w.css(e,"border"+oe[a]+"Width",!0,i):s+=w.css(e,"border"+oe[a]+"Width",!0,i));return!r&&o>=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a<i;a++)o[t[a]]=w.css(e,t[a],!1,r);return o}return void 0!==n?w.style(e,t,n):w.css(e,t)},e,t,arguments.length>1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ct(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),y=J.get(e,"fxshow");n.queue||(null==(a=w._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,w.queue(e,"fx").length||a.empty.fire()})}));for(r in t)if(i=t[r],it.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!y||void 0===y[r])continue;g=!0}d[r]=y&&y[r]||w.style(e,r)}if((u=!w.isEmptyObject(t))||!w.isEmptyObject(d)){f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=y&&y.display)&&(l=J.get(e,"display")),"none"===(c=w.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=w.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===w.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1;for(r in d)u||(y?"hidden"in y&&(g=y.hidden):y=J.access(e,"fxshow",{display:l}),o&&(y.hidden=!g),g&&fe([e],!0),p.done(function(){g||fe([e]),J.remove(e,"fxshow");for(r in d)w.style(e,r,d[r])})),u=lt(g?y[r]:0,r,p),r in y||(y[r]=u.start,g&&(u.end=u.start,u.start=0))}}function ft(e,t){var n,r,i,o,a;for(n in e)if(r=G(n),i=t[r],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=w.cssHooks[r])&&"expand"in a){o=a.expand(o),delete e[r];for(n in o)n in e||(e[n]=o[n],t[n]=i)}else t[r]=i}function pt(e,t,n){var r,i,o=0,a=pt.prefilters.length,s=w.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;for(var t=nt||st(),n=Math.max(0,l.startTime+l.duration-t),r=1-(n/l.duration||0),o=0,a=l.tweens.length;o<a;o++)l.tweens[o].run(r);return s.notifyWith(e,[l,r,n]),r<1&&a?n:(a||s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:w.extend({},t),opts:w.extend(!0,{specialEasing:{},easing:w.easing._default},n),originalProperties:t,originalOptions:n,startTime:nt||st(),duration:n.duration,tweens:[],createTween:function(t,n){var r=w.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;n<r;n++)l.tweens[n].run(1);return t?(s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l,t])):s.rejectWith(e,[l,t]),this}}),c=l.props;for(ft(c,l.opts.specialEasing);o<a;o++)if(r=pt.prefilters[o].call(l,e,c,l.opts))return g(r.stop)&&(w._queueHooks(l.elem,l.opts.queue).stop=r.stop.bind(r)),r;return w.map(c,lt,l),g(l.opts.start)&&l.opts.start.call(e,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),w.fx.timer(w.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l}w.Animation=w.extend(pt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return ue(n.elem,e,ie.exec(t),n),n}]},tweener:function(e,t){g(e)?(t=e,e=["*"]):e=e.match(M);for(var n,r=0,i=e.length;r<i;r++)n=e[r],pt.tweeners[n]=pt.tweeners[n]||[],pt.tweeners[n].unshift(t)},prefilters:[ct],prefilter:function(e,t){t?pt.prefilters.unshift(e):pt.prefilters.push(e)}}),w.speed=function(e,t,n){var r=e&&"object"==typeof e?w.extend({},e):{complete:n||!n&&t||g(e)&&e,duration:e,easing:n&&t||t&&!g(t)&&t};return w.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in w.fx.speeds?r.duration=w.fx.speeds[r.duration]:r.duration=w.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){g(r.old)&&r.old.call(this),r.queue&&w.dequeue(this,r.queue)},r},w.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=w.isEmptyObject(e),o=w.speed(t,n,r),a=function(){var t=pt(this,w.extend({},e),o);(i||J.get(this,"finish"))&&t.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(e,t,n){var r=function(e){var t=e.stop;delete e.stop,t(n)};return"string"!=typeof e&&(n=t,t=e,e=void 0),t&&!1!==e&&this.queue(e||"fx",[]),this.each(function(){var t=!0,i=null!=e&&e+"queueHooks",o=w.timers,a=J.get(this);if(i)a[i]&&a[i].stop&&r(a[i]);else for(i in a)a[i]&&a[i].stop&&ot.test(i)&&r(a[i]);for(i=o.length;i--;)o[i].elem!==this||null!=e&&o[i].queue!==e||(o[i].anim.stop(n),t=!1,o.splice(i,1));!t&&n||w.dequeue(this,e)})},finish:function(e){return!1!==e&&(e=e||"fx"),this.each(function(){var t,n=J.get(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=w.timers,a=r?r.length:0;for(n.finish=!0,w.queue(this,e,[]),i&&i.stop&&i.stop.call(this,!0),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;t<a;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}}),w.each(["toggle","show","hide"],function(e,t){var n=w.fn[t];w.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ut(t,!0),e,r,i)}}),w.each({slideDown:ut("show"),slideUp:ut("hide"),slideToggle:ut("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){w.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),w.timers=[],w.fx.tick=function(){var e,t=0,n=w.timers;for(nt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||w.fx.stop(),nt=void 0},w.fx.timer=function(e){w.timers.push(e),w.fx.start()},w.fx.interval=13,w.fx.start=function(){rt||(rt=!0,at())},w.fx.stop=function(){rt=null},w.fx.speeds={slow:600,fast:200,_default:400},w.fn.delay=function(t,n){return t=w.fx?w.fx.speeds[t]||t:t,n=n||"fx",this.queue(n,function(n,r){var i=e.setTimeout(n,t);r.stop=function(){e.clearTimeout(i)}})},function(){var e=r.createElement("input"),t=r.createElement("select").appendChild(r.createElement("option"));e.type="checkbox",h.checkOn=""!==e.value,h.optSelected=t.selected,(e=r.createElement("input")).value="t",e.type="radio",h.radioValue="t"===e.value}();var dt,ht=w.expr.attrHandle;w.fn.extend({attr:function(e,t){return z(this,w.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!N(n.parentNode,"optgroup"))){if(t=w(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=w.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=w.inArray(w.valHooks.option.get(r),o)>-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w("<script>").prop({charset:e.scriptCharset,src:e.url}).on("load error",n=function(e){t.remove(),n=null,e&&o("error"===e.type?404:200,e.type)}),r.head.appendChild(t[0])},abort:function(){n&&n()}}}});var Yt=[],Qt=/(=)\?(?=&|$)|\?\?/;w.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Yt.pop()||w.expando+"_"+Et++;return this[e]=!0,e}}),w.ajaxPrefilter("json jsonp",function(t,n,r){var i,o,a,s=!1!==t.jsonp&&(Qt.test(t.url)?"url":"string"==typeof t.data&&0===(t.contentType||"").indexOf("application/x-www-form-urlencoded")&&Qt.test(t.data)&&"data");if(s||"jsonp"===t.dataTypes[0])return i=t.jsonpCallback=g(t.jsonpCallback)?t.jsonpCallback():t.jsonpCallback,s?t[s]=t[s].replace(Qt,"$1"+i):!1!==t.jsonp&&(t.url+=(kt.test(t.url)?"&":"?")+t.jsonp+"="+i),t.converters["script json"]=function(){return a||w.error(i+" was not called"),a[0]},t.dataTypes[0]="json",o=e[i],e[i]=function(){a=arguments},r.always(function(){void 0===o?w(e).removeProp(i):e[i]=o,t[i]&&(t.jsonpCallback=n.jsonpCallback,Yt.push(i)),a&&g(o)&&o(a[0]),a=o=void 0}),"script"}),h.createHTMLDocument=function(){var e=r.implementation.createHTMLDocument("").body;return e.innerHTML="<form></form><form></form>",2===e.childNodes.length}(),w.parseHTML=function(e,t,n){if("string"!=typeof e)return[];"boolean"==typeof t&&(n=t,t=!1);var i,o,a;return t||(h.createHTMLDocument?((i=(t=r.implementation.createHTMLDocument("")).createElement("base")).href=r.location.href,t.head.appendChild(i)):t=r),o=A.exec(e),a=!n&&[],o?[t.createElement(o[1])]:(o=xe([e],t,a),a&&a.length&&w(a).remove(),w.merge([],o.childNodes))},w.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return s>-1&&(r=vt(e.slice(s)),e=e.slice(0,s)),g(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),a.length>0&&w.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?w("<div>").append(w.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},w.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){w.fn[t]=function(e){return this.on(t,e)}}),w.expr.pseudos.animated=function(e){return w.grep(w.timers,function(t){return e===t.elem}).length},w.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l,c=w.css(e,"position"),f=w(e),p={};"static"===c&&(e.style.position="relative"),s=f.offset(),o=w.css(e,"top"),u=w.css(e,"left"),(l=("absolute"===c||"fixed"===c)&&(o+u).indexOf("auto")>-1)?(a=(r=f.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),g(t)&&(t=t.call(e,n,w.extend({},s))),null!=t.top&&(p.top=t.top-s.top+a),null!=t.left&&(p.left=t.left-s.left+i),"using"in t?t.using.call(e,p):f.css(p)}},w.fn.extend({offset:function(e){if(arguments.length)return void 0===e?this:this.each(function(t){w.offset.setOffset(this,e,t)});var t,n,r=this[0];if(r)return r.getClientRects().length?(t=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:t.top+n.pageYOffset,left:t.left+n.pageXOffset}):{top:0,left:0}},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===w.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===w.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=w(e).offset()).top+=w.css(e,"borderTopWidth",!0),i.left+=w.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-w.css(r,"marginTop",!0),left:t.left-i.left-w.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===w.css(e,"position"))e=e.offsetParent;return e||be})}}),w.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,t){var n="pageYOffset"===t;w.fn[e]=function(r){return z(this,function(e,r,i){var o;if(y(e)?o=e:9===e.nodeType&&(o=e.defaultView),void 0===i)return o?o[t]:e[r];o?o.scrollTo(n?o.pageXOffset:i,n?i:o.pageYOffset):e[r]=i},e,r,arguments.length)}}),w.each(["top","left"],function(e,t){w.cssHooks[t]=_e(h.pixelPosition,function(e,n){if(n)return n=Fe(e,t),We.test(n)?w(e).position()[t]+"px":n})}),w.each({Height:"height",Width:"width"},function(e,t){w.each({padding:"inner"+e,content:t,"":"outer"+e},function(n,r){w.fn[r]=function(i,o){var a=arguments.length&&(n||"boolean"!=typeof i),s=n||(!0===i||!0===o?"margin":"border");return z(this,function(t,n,i){var o;return y(t)?0===r.indexOf("outer")?t["inner"+e]:t.document.documentElement["client"+e]:9===t.nodeType?(o=t.documentElement,Math.max(t.body["scroll"+e],o["scroll"+e],t.body["offset"+e],o["offset"+e],o["client"+e])):void 0===i?w.css(t,n,s):w.style(t,n,i,s)},t,a?i:void 0,a)}})}),w.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,t){w.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),w.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),w.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),w.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),g(e))return r=o.call(arguments,2),i=function(){return e.apply(t||this,r.concat(o.call(arguments)))},i.guid=e.guid=e.guid||w.guid++,i},w.holdReady=function(e){e?w.readyWait++:w.ready(!0)},w.isArray=Array.isArray,w.parseJSON=JSON.parse,w.nodeName=N,w.isFunction=g,w.isWindow=y,w.camelCase=G,w.type=x,w.now=Date.now,w.isNumeric=function(e){var t=w.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return w});var Jt=e.jQuery,Kt=e.$;return w.noConflict=function(t){return e.$===w&&(e.$=Kt),t&&e.jQuery===w&&(e.jQuery=Jt),w},t||(e.jQuery=e.$=w),w});
diff --git a/server/app/static/js/vendor/nanobar.min.js b/server/app/static/js/vendor/nanobar.min.js
new file mode 100644
index 00000000..9329eadc
--- /dev/null
+++ b/server/app/static/js/vendor/nanobar.min.js
@@ -0,0 +1,3 @@
+var Nanobar=function(){var c,d,e,f,g,h,k={width:"100%",height:"4px",zIndex:9999,top:"0"},l={width:0,height:"100%",clear:"both",transition:"height .3s"};c=function(a,b){for(var c in b)a.style[c]=b[c];a.style["float"]="left"};f=function(){var a=this,b=this.width-this.here;0.1>b&&-0.1<b?(g.call(this,this.here),this.moving=!1,100==this.width&&(this.el.style.height=0,setTimeout(function(){a.cont.el.removeChild(a.el)},300))):(g.call(this,this.width-b/4),setTimeout(function(){a.go()},16))};g=function(a){this.width=
+a;this.el.style.width=this.width+"%"};h=function(){var a=new d(this);this.bars.unshift(a)};d=function(a){this.el=document.createElement("div");this.el.style.backgroundColor=a.opts.bg;this.here=this.width=0;this.moving=!1;this.cont=a;c(this.el,l);a.el.appendChild(this.el)};d.prototype.go=function(a){a?(this.here=a,this.moving||(this.moving=!0,f.call(this))):this.moving&&f.call(this)};e=function(a){a=this.opts=a||{};var b;a.bg=a.bg||"#000";this.bars=[];b=this.el=document.createElement("div");c(this.el,
+k);a.id&&(b.id=a.id);b.style.position=a.target?"relative":"fixed";a.target?a.target.insertBefore(b,a.target.firstChild):document.getElementsByTagName("body")[0].appendChild(b);h.call(this)};e.prototype.go=function(a){this.bars[0].go(a);100==a&&h.call(this)};return e}(); \ No newline at end of file
diff --git a/server/app/static/js/vendor/prefixfree.js b/server/app/static/js/vendor/prefixfree.js
new file mode 100644
index 00000000..40b6e693
--- /dev/null
+++ b/server/app/static/js/vendor/prefixfree.js
@@ -0,0 +1,527 @@
+/**
+ * StyleFix 1.0.3 & PrefixFree 1.0.7
+ * @author Lea Verou
+ * MIT license
+ */
+
+(function(){
+
+if(!window.addEventListener) {
+ return;
+}
+
+var self = window.StyleFix = {
+ link: function(link) {
+ var url = link.href || link.getAttribute('data-href');
+ try {
+ // Ignore stylesheets with data-noprefix attribute as well as alternate stylesheets or without (data-)href attribute
+ if(!url || link.rel !== 'stylesheet' || link.hasAttribute('data-noprefix')) {
+ return;
+ }
+ }
+ catch(e) {
+ return;
+ }
+
+ var base = url.replace(/[^\/]+$/, ''),
+ base_scheme = (/^[a-z]{3,10}:/.exec(base) || [''])[0],
+ base_domain = (/^[a-z]{3,10}:\/\/[^\/]+/.exec(base) || [''])[0],
+ base_query = /^([^?]*)\??/.exec(url)[1],
+ parent = link.parentNode,
+ xhr = new XMLHttpRequest(),
+ process;
+
+ xhr.onreadystatechange = function() {
+ if(xhr.readyState === 4) {
+ process();
+ }
+ };
+
+ process = function() {
+ var css = xhr.responseText;
+
+ if(css && link.parentNode && (!xhr.status || xhr.status < 400 || xhr.status > 600)) {
+ css = self.fix(css, true, link);
+
+ // Convert relative URLs to absolute, if needed
+ if(css && base) {
+ css = css.replace(/url\(\s*?((?:"|')?)(.+?)\1\s*?\)/gi, function($0, quote, url) {
+ if(/^([a-z]{3,10}:|#)/i.test(url)) { // Absolute & or hash-relative
+ return $0;
+ }
+ else if(/^\/\//.test(url)) { // Scheme-relative
+ // May contain sequences like /../ and /./ but those DO work
+ return 'url("' + base_scheme + url + '")';
+ }
+ else if(/^\//.test(url)) { // Domain-relative
+ return 'url("' + base_domain + url + '")';
+ }
+ else if(/^\?/.test(url)) { // Query-relative
+ return 'url("' + base_query + url + '")';
+ }
+ else {
+ // Path-relative
+ return 'url("' + base + url + '")';
+ }
+ });
+
+ // behavior URLs shoudn’t be converted (Issue #19)
+ // base should be escaped before added to RegExp (Issue #81)
+ var escaped_base = base.replace(/([\\\^\$*+[\]?{}.=!:(|)])/g,"\\$1");
+ css = css.replace(RegExp('\\b(behavior:\\s*?url\\(\'?"?)' + escaped_base, 'gi'), '$1');
+ }
+
+ var style = document.createElement('style');
+ style.textContent = '/*# sourceURL='+link.getAttribute('href')+' */\n/*@ sourceURL='+link.getAttribute('href')+' */\n' + css;
+ style.media = link.media;
+ style.disabled = link.disabled;
+ style.setAttribute('data-href', link.getAttribute('href'));
+
+ if(link.id) style.id = link.id;
+
+ parent.insertBefore(style, link);
+ parent.removeChild(link);
+
+ style.media = link.media; // Duplicate is intentional. See issue #31
+ }
+ };
+
+ try {
+ xhr.open('GET', url);
+ xhr.send(null);
+ } catch (e) {
+ // Fallback to XDomainRequest if available
+ if (typeof XDomainRequest != "undefined") {
+ xhr = new XDomainRequest();
+ xhr.onerror = xhr.onprogress = function() {};
+ xhr.onload = process;
+ xhr.open("GET", url);
+ xhr.send(null);
+ }
+ }
+
+ link.setAttribute('data-inprogress', '');
+ },
+
+ styleElement: function(style) {
+ if (style.hasAttribute('data-noprefix')) {
+ return;
+ }
+ var disabled = style.disabled;
+
+ style.textContent = self.fix(style.textContent, true, style);
+
+ style.disabled = disabled;
+ },
+
+ styleAttribute: function(element) {
+ var css = element.getAttribute('style');
+
+ css = self.fix(css, false, element);
+
+ element.setAttribute('style', css);
+ },
+
+ process: function() {
+ // Linked stylesheets
+ $('link[rel="stylesheet"]:not([data-inprogress])').forEach(StyleFix.link);
+
+ // Inline stylesheets
+ $('style').forEach(StyleFix.styleElement);
+
+ // Inline styles
+ $('[style]').forEach(StyleFix.styleAttribute);
+
+ var event = document.createEvent('Event');
+ event.initEvent('StyleFixProcessed', true, true);
+ document.dispatchEvent(event);
+
+ },
+
+ register: function(fixer, index) {
+ (self.fixers = self.fixers || [])
+ .splice(index === undefined? self.fixers.length : index, 0, fixer);
+ },
+
+ fix: function(css, raw, element) {
+ if(self.fixers) {
+ for(var i=0; i<self.fixers.length; i++) {
+ css = self.fixers[i](css, raw, element) || css;
+ }
+ }
+
+ return css;
+ },
+
+ camelCase: function(str) {
+ return str.replace(/-([a-z])/g, function($0, $1) { return $1.toUpperCase(); }).replace('-','');
+ },
+
+ deCamelCase: function(str) {
+ return str.replace(/[A-Z]/g, function($0) { return '-' + $0.toLowerCase() });
+ }
+};
+
+/**************************************
+ * Process styles
+ **************************************/
+(function(){
+ setTimeout(function(){
+ $('link[rel="stylesheet"]').forEach(StyleFix.link);
+ }, 10);
+
+ document.addEventListener('DOMContentLoaded', StyleFix.process, false);
+})();
+
+function $(expr, con) {
+ return [].slice.call((con || document).querySelectorAll(expr));
+}
+
+})();
+
+/**
+ * PrefixFree
+ */
+(function(root){
+
+if(!window.StyleFix || !window.getComputedStyle) {
+ return;
+}
+
+// Private helper
+function fix(what, before, after, replacement, css) {
+ what = self[what];
+
+ if(what.length) {
+ var regex = RegExp(before + '(' + what.join('|') + ')' + after, 'gi');
+
+ css = css.replace(regex, replacement);
+ }
+
+ return css;
+}
+
+var self = window.PrefixFree = {
+ prefixCSS: function(css, raw, element) {
+ var prefix = self.prefix;
+
+ // Gradient angles hotfix
+ if(self.functions.indexOf('linear-gradient') > -1) {
+ // Gradients are supported with a prefix, convert angles to legacy
+ css = css.replace(/(\s|:|,)(repeating-)?linear-gradient\(\s*(-?\d*\.?\d*)deg/ig, function ($0, delim, repeating, deg) {
+ return delim + (repeating || '') + 'linear-gradient(' + (90-deg) + 'deg';
+ });
+ }
+
+ css = fix('functions', '(\\s|:|,)', '\\s*\\(', '$1' + prefix + '$2(', css);
+ css = fix('keywords', '(\\s|:)', '(\\s|;|\\}|$)', '$1' + prefix + '$2$3', css);
+ css = fix('properties', '(^|\\{|\\s|;)', '\\s*:', '$1' + prefix + '$2:', css);
+
+ // Prefix properties *inside* values (issue #8)
+ if (self.properties.length) {
+ var regex = RegExp('\\b(' + self.properties.join('|') + ')(?!:)', 'gi');
+
+ css = fix('valueProperties', '\\b', ':(.+?);', function($0) {
+ return $0.replace(regex, prefix + "$1")
+ }, css);
+ }
+
+ if(raw) {
+ css = fix('selectors', '', '\\b', self.prefixSelector, css);
+ css = fix('atrules', '@', '\\b', '@' + prefix + '$1', css);
+ }
+
+ // Fix double prefixing
+ css = css.replace(RegExp('-' + prefix, 'g'), '-');
+
+ // Prefix wildcard
+ css = css.replace(/-\*-(?=[a-z]+)/gi, self.prefix);
+
+ return css;
+ },
+
+ property: function(property) {
+ return (self.properties.indexOf(property) >=0 ? self.prefix : '') + property;
+ },
+
+ value: function(value, property) {
+ value = fix('functions', '(^|\\s|,)', '\\s*\\(', '$1' + self.prefix + '$2(', value);
+ value = fix('keywords', '(^|\\s)', '(\\s|$)', '$1' + self.prefix + '$2$3', value);
+
+ if(self.valueProperties.indexOf(property) >= 0) {
+ value = fix('properties', '(^|\\s|,)', '($|\\s|,)', '$1'+self.prefix+'$2$3', value);
+ }
+
+ return value;
+ },
+
+ prefixSelector: function(selector) {
+ return self.selectorMap[selector] || selector
+ },
+
+ // Warning: Prefixes no matter what, even if the property is supported prefix-less
+ prefixProperty: function(property, camelCase) {
+ var prefixed = self.prefix + property;
+
+ return camelCase? StyleFix.camelCase(prefixed) : prefixed;
+ }
+};
+
+/**************************************
+ * Properties
+ **************************************/
+(function() {
+ var prefixes = {},
+ properties = [],
+ shorthands = {},
+ style = getComputedStyle(document.documentElement, null),
+ dummy = document.createElement('div').style;
+
+ // Why are we doing this instead of iterating over properties in a .style object? Because Webkit.
+ // 1. Older Webkit won't iterate over those.
+ // 2. Recent Webkit will, but the 'Webkit'-prefixed properties are not enumerable. The 'webkit'
+ // (lower case 'w') ones are, but they don't `deCamelCase()` into a prefix that we can detect.
+
+ var iterate = function(property) {
+ if(property.charAt(0) === '-') {
+ properties.push(property);
+
+ var parts = property.split('-'),
+ prefix = parts[1];
+
+ // Count prefix uses
+ prefixes[prefix] = ++prefixes[prefix] || 1;
+
+ // This helps determining shorthands
+ while(parts.length > 3) {
+ parts.pop();
+
+ var shorthand = parts.join('-');
+
+ if(supported(shorthand) && properties.indexOf(shorthand) === -1) {
+ properties.push(shorthand);
+ }
+ }
+ }
+ },
+ supported = function(property) {
+ return StyleFix.camelCase(property) in dummy;
+ }
+
+ // Some browsers have numerical indices for the properties, some don't
+ if(style && style.length > 0) {
+ for(var i=0; i<style.length; i++) {
+ iterate(style[i])
+ }
+ }
+ else {
+ for(var property in style) {
+ iterate(StyleFix.deCamelCase(property));
+ }
+ }
+
+ // Find most frequently used prefix
+ var highest = {uses:0};
+ for(var prefix in prefixes) {
+ var uses = prefixes[prefix];
+
+ if(highest.uses < uses) {
+ highest = {prefix: prefix, uses: uses};
+ }
+ }
+
+ self.prefix = '-' + highest.prefix + '-';
+ self.Prefix = StyleFix.camelCase(self.prefix);
+
+ self.properties = [];
+
+ // Get properties ONLY supported with a prefix
+ for(var i=0; i<properties.length; i++) {
+ var property = properties[i];
+
+ if(property.indexOf(self.prefix) === 0) { // we might have multiple prefixes, like Opera
+ var unprefixed = property.slice(self.prefix.length);
+
+ if(!supported(unprefixed)) {
+ self.properties.push(unprefixed);
+ }
+ }
+ }
+
+ // IE fix
+ if(self.Prefix == 'Ms'
+ && !('transform' in dummy)
+ && !('MsTransform' in dummy)
+ && ('msTransform' in dummy)) {
+ self.properties.push('transform', 'transform-origin');
+ }
+
+ self.properties.sort();
+})();
+
+/**************************************
+ * Values
+ **************************************/
+(function() {
+// Values that might need prefixing
+var functions = {
+ 'linear-gradient': {
+ property: 'backgroundImage',
+ params: 'red, teal'
+ },
+ 'calc': {
+ property: 'width',
+ params: '1px + 5%'
+ },
+ 'element': {
+ property: 'backgroundImage',
+ params: '#foo'
+ },
+ 'cross-fade': {
+ property: 'backgroundImage',
+ params: 'url(a.png), url(b.png), 50%'
+ },
+ 'image-set': {
+ property: 'backgroundImage',
+ params: 'url(a.png) 1x, url(b.png) 2x'
+ }
+};
+
+
+functions['repeating-linear-gradient'] =
+functions['repeating-radial-gradient'] =
+functions['radial-gradient'] =
+functions['linear-gradient'];
+
+// Note: The properties assigned are just to *test* support.
+// The keywords will be prefixed everywhere.
+var keywords = {
+ 'initial': 'color',
+ 'grab': 'cursor',
+ 'grabbing': 'cursor',
+ 'zoom-in': 'cursor',
+ 'zoom-out': 'cursor',
+ 'box': 'display',
+ 'flexbox': 'display',
+ 'inline-flexbox': 'display',
+ 'flex': 'display',
+ 'inline-flex': 'display',
+ 'grid': 'display',
+ 'inline-grid': 'display',
+ 'max-content': 'width',
+ 'min-content': 'width',
+ 'fit-content': 'width',
+ 'fill-available': 'width',
+ 'contain-floats': 'width'
+};
+
+self.functions = [];
+self.keywords = [];
+
+var style = document.createElement('div').style;
+
+function supported(value, property) {
+ style[property] = '';
+ style[property] = value;
+
+ return !!style[property];
+}
+
+for (var func in functions) {
+ var test = functions[func],
+ property = test.property,
+ value = func + '(' + test.params + ')';
+
+ if (!supported(value, property)
+ && supported(self.prefix + value, property)) {
+ // It's supported, but with a prefix
+ self.functions.push(func);
+ }
+}
+
+for (var keyword in keywords) {
+ var property = keywords[keyword];
+
+ if (!supported(keyword, property)
+ && supported(self.prefix + keyword, property)) {
+ // It's supported, but with a prefix
+ self.keywords.push(keyword);
+ }
+}
+
+})();
+
+/**************************************
+ * Selectors and @-rules
+ **************************************/
+(function() {
+
+var
+selectors = {
+ ':any-link': null,
+ '::backdrop': null,
+ ':fullscreen': null,
+ ':full-screen': ':fullscreen',
+ //sigh
+ '::placeholder': null,
+ ':placeholder': '::placeholder',
+ '::input-placeholder': '::placeholder',
+ ':input-placeholder': '::placeholder',
+ ':read-only': null,
+ ':read-write': null,
+ '::selection': null
+},
+
+atrules = {
+ 'keyframes': 'name',
+ 'viewport': null,
+ 'document': 'regexp(".")'
+};
+
+self.selectors = [];
+self.selectorMap = {};
+self.atrules = [];
+
+var style = root.appendChild(document.createElement('style'));
+
+function supported(selector) {
+ style.textContent = selector + '{}'; // Safari 4 has issues with style.innerHTML
+
+ return !!style.sheet.cssRules.length;
+}
+
+for(var selector in selectors) {
+ var standard = selectors[selector] || selector
+ var prefixed = selector.replace(/::?/, function($0) { return $0 + self.prefix })
+ if(!supported(standard) && supported(prefixed)) {
+ self.selectors.push(standard);
+ self.selectorMap[standard] = prefixed;
+ }
+}
+
+for(var atrule in atrules) {
+ var test = atrule + ' ' + (atrules[atrule] || '');
+
+ if(!supported('@' + test) && supported('@' + self.prefix + test)) {
+ self.atrules.push(atrule);
+ }
+}
+
+root.removeChild(style);
+
+})();
+
+// Properties that accept properties as their value
+self.valueProperties = [
+ 'transition',
+ 'transition-property',
+ 'will-change'
+]
+
+// Add class for current prefix
+root.className += ' ' + self.prefix;
+
+StyleFix.register(self.prefixCSS);
+
+
+})(document.documentElement);
diff --git a/server/app/templates/403.html b/server/app/templates/403.html
new file mode 100644
index 00000000..f83c6dfc
--- /dev/null
+++ b/server/app/templates/403.html
@@ -0,0 +1,35 @@
+{%- extends "base.html" %}
+
+{% import "bootstrap/utils.html" as utils %}
+
+{% block content %}
+<div class="container">
+
+ <div class="row">
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1>
+ <h2 class="subtitle">403</h2>
+ </div>
+
+
+ <div class="row">
+ <div class="col-sm-12 align_center">
+ <p>Your request could not be handled</p>
+ </div>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="col-sm-12">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default" href="/" role="button">Home</a>
+ </div>
+ </div>
+ </div>
+
+</div>
+
+{% block footer %}
+{{super()}}
+{% endblock %}
+
+{% endblock %}
+
diff --git a/server/app/templates/404.html b/server/app/templates/404.html
new file mode 100644
index 00000000..a7f7d45a
--- /dev/null
+++ b/server/app/templates/404.html
@@ -0,0 +1,33 @@
+{%- extends "base.html" %}
+
+{% import "bootstrap/utils.html" as utils %}
+
+{% block content %}
+<div class="container">
+
+ <div class="row">
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1>
+ <h2 class="subtitle">404</h2>
+ </div>
+
+ <div class="row">
+ <div class="col-sm-12 align_center">
+ <p>Your request could not be handled</p>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="col-sm-12">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default" href="/" role="button">Home</a>
+ </div>
+ </div>
+ </div>
+
+</div>
+
+{% block footer %}
+{{super()}}
+{% endblock %}
+
+{% endblock %}
+
diff --git a/server/app/templates/500.html b/server/app/templates/500.html
new file mode 100644
index 00000000..b323c12c
--- /dev/null
+++ b/server/app/templates/500.html
@@ -0,0 +1,34 @@
+{%- extends "base.html" %}
+
+{% import "bootstrap/utils.html" as utils %}
+
+{% block content %}
+<div class="container">
+
+ <div class="row">
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1>
+ <h2 class="subtitle">505</h2>
+ </div>
+
+ <div class="row">
+ <div class="col-sm-12 align_center">
+ <p>Your request could not be handled</p>
+ </div>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="col-sm-12">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default" href="/" role="button">Home</a>
+ </div>
+ </div>
+ </div>
+
+</div>
+
+{% block footer %}
+{{super()}}
+{% endblock %}
+
+{% endblock %}
+
diff --git a/server/app/templates/base.html b/server/app/templates/base.html
new file mode 100644
index 00000000..16402af8
--- /dev/null
+++ b/server/app/templates/base.html
@@ -0,0 +1,33 @@
+{%- extends "bootstrap/base.html" %}
+
+{% block title %}DullDream (v2 x ZkM){% endblock %}
+
+{% block head %}
+ {{super()}}
+ <link rel="shortcut icon" href="{{url_for('static', filename='img/favicon.ico')}}">
+{% endblock %}
+
+{% block styles %}
+ {{super()}}
+ <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/bootstrap.min.css')}}">
+ <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/dullbrown-theme.css')}}">
+{% endblock %}
+
+{%- block content %}
+ {{super()}}
+{% endblock content %}
+
+{%- block footer %}
+ <div id="footer" class="footer">
+ <div class="container">
+ <p class="">
+ DullDream™ (beta) by <a href="http://constantdullaart.com">Constant Dullaart</a>.
+ Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>
+ </p>
+ </div>
+ </div>
+{% endblock footer %}
+
+{% block scripts %}
+ {{super()}}
+{% endblock scripts %} \ No newline at end of file
diff --git a/server/app/templates/celery.html b/server/app/templates/celery.html
new file mode 100644
index 00000000..ddcd25cb
--- /dev/null
+++ b/server/app/templates/celery.html
@@ -0,0 +1,43 @@
+{%- extends "base.html" %}
+
+{% import "bootstrap/utils.html" as utils %}
+
+{% block content %}
+<style>
+ .progress {
+ width: 100%;
+ text-align: center;
+ }
+</style>
+
+<div class="container">
+
+ <div class="row">
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1>
+ <h2 class="subtitle">Celery Test</h2>
+ </div>
+
+ <div class="row">
+ <div class="col-sm-12 align_center">
+ <button id="submit-data">Submit Data</button><br><br>
+ <div id="progress"></div>
+ </div>
+ </div>
+
+</div>
+
+{% block footer %}
+{{ super() }}
+{% endblock %}
+
+{% endblock %}
+
+
+{% block scripts %}
+ {{super()}}
+
+<script type="text/javascript" src="{{url_for('static', filename='js/old-js/nanobar.min.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/old-js/celery.js')}}"></script>
+
+
+{% endblock scripts %} \ No newline at end of file
diff --git a/server/app/templates/display.html b/server/app/templates/display.html
new file mode 100644
index 00000000..f73a6ca5
--- /dev/null
+++ b/server/app/templates/display.html
@@ -0,0 +1,69 @@
+{%- extends "base.html" %}
+
+{% import "bootstrap/utils.html" as utils %}
+
+{% block content %}
+
+<style type="text/css">
+ .display_im{
+ width:100%;
+ max-width: 360px;
+ min-width: 256px;
+ margin-bottom: 10px;
+ }
+ .display_left{
+
+ }
+ .display_right{
+
+ }
+ .caption{
+ font-size: 11px;
+ }
+</style>
+<div class="container">
+
+ <div class="row">
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png"></a></h1>
+ <h2 class="subtitle">Result</h2>
+ </div>
+
+ <div class="row">
+ <div style="height:50px"></div>
+ <div class="col-sm-12" style="text-align: center;">
+ <img class="display_im" src="{{ url_for('main.get_image', imtype='renders', uuid_name='{}'.format( uuid_name )) }}" />
+ <p class="caption">Rendered result</p>
+ </div>
+ </div>
+
+ <div class="col-sm-12" style="text-align: center;">
+ <img class="display_im" src="{{ url_for('main.get_image', imtype='uploads', uuid_name='{}'.format( uuid_name )) }}" />
+ <p class="caption">Original image</p>
+ </div>
+ </div>
+
+ <div class="col-sm-12" style="text-align: center;">
+ <img class="display_im" src="{{ url_for('main.get_image', imtype='fcn', uuid_name='{}'.format( uuid_name )) }}" />
+ <p class="caption">Semantic segmentation</p>
+ </div>
+ </div>
+
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="col-sm-12">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default" href="/" role="button">Home</a>
+ </div>
+ </div>
+ </div>
+
+ </div>
+</div>
+
+{% block footer %}
+{{super()}}
+{% endblock footer %}
+
+{% endblock %}
+
diff --git a/server/app/templates/index.html b/server/app/templates/index.html
new file mode 100644
index 00000000..f740bb5b
--- /dev/null
+++ b/server/app/templates/index.html
@@ -0,0 +1,161 @@
+<!doctype html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel="shortcut icon" href="/static/img/favicon.ico" />
+ <title>DullDream (v2 x ZkM)</title>
+ <link rel="stylesheet" type="text/css" href="{{url_for('static', filename='css/dullbrown-theme.css')}}">
+</head>
+<body>
+
+<header>
+ <h1><a href="/"><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></a></h1>
+ <h2 class="subtitle">Neural network photo effect</h2>
+</header>
+
+<div class="upload_view container">
+ <div class="row">
+ <div id="photo_area" class="dash_border">
+ <input class="hidden_input" id="user_file" type="file" accept="image/*">
+ <canvas class="photo" id="user_photo_canvas" width="512" height="512"></canvas>
+ <div class="center_inner">
+ <label id="take_photo_btn" for="user_file" class="upload_center_btn">
+ <div class='btn-lg btn'>Take Photo</div>
+ </label>
+ <div id="details"></div>
+ <div id="progress"></div>
+ </div>
+
+ <div id="preloader_anim">
+ <img src="/static/img/loader.gif">
+ </div>
+ </div>
+ </div>
+
+ <div id="upload_controls" class="row">
+ <div class="align_center">
+ <div id="restart_btn">
+ <a id="restart_btn" class="btn btn-md btn-default" role="button">Change Image</a>
+ <input type='file' accept="image/*">
+ </div>
+ <div id="dropdown_btn">
+ <select id="dropdown"></select>
+ </div>
+ <div id="upload_btn">
+ <a id="take_photo_btn" class="btn btn-md btn-important" role="button">Upload</a>
+ </div>
+ </div>
+ <div class="align_center consent_box">
+ <label>
+ <input type="checkbox" id="agree" value="1" checked>
+ I consent to have my dulled image displayed at ZkM.
+ </label>
+ </div>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="align_center">
+ <a class="btn btn-sm btn-default about_button" role="button">About</a>
+ <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a>
+ <p class="notice">
+ All images uploaded can be used for exhibition and review purposes.
+ </p>
+ <p class="notice">
+ Currently this work is on view at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>. View recent DullDreams <a href="/gallery">here</a>.
+ </p>
+ </div>
+ </div>
+</div>
+
+<div class="about_view modal">
+ <div class="inner">
+ <header>
+ <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1>
+ </header>
+ <div class='content'>
+ <p>
+ <b><i>DullDream™ by DullTech™</i></b> is a series of experiments appropriating neural network image recognition technology to make visual representation less interesting.
+ </p>
+ <p>
+ Can machine learning help us desensitize? Our impactful lives are clogging up social media feeds with unique filter settings, leaving us nostalgic for a vanilla future. Can machine learning help us achieve this? Take the excitement out of our lives, prepare us for a time where we will all have to be the same, have the same values and culture? Painting a future where the Dull is no longer a dream but a nightmare?
+ </p>
+ <p>
+ DullDream™ (version 2) was developed for the OpenCodes exhibition at ZKM. It based on the original DullDream™ (<a href="http://dulldream.xyz">version 1</a>), developed for Transmediale 2017 - Ever Elusive by <a href="http://constantdullaart.com">Constant Dullaart</a> in collaboration with <a href="http://ahprojects.com">Adam Harvey</a>. DullDream (V2) has been generously made possible by support from ZKM.
+ </p>
+ </div>
+ <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center>
+ </div>
+</div>
+
+<div class="privacy_view modal">
+ <div class="inner">
+ <header>
+ <h1><img src="/static/img/dulldream_logo_200.png" alt="DullDream"></h1>
+ </header>
+ <div class='content'>
+ <h3>Privacy Notice</h3>
+ <p>
+ Images uploaded to this site are being used for a public art display at <a href="http://zkm.de/en/event/2017/10/open-codes">ZKM</a>
+ </p>
+ <p>
+ If you would not like to be included, be sure to uncheck the permission box on the upload page.
+ </p>
+
+ </div>
+ <center><a class="btn btn-sm btn-default" href="/" role="button">Home</a></center>
+ </div>
+</div>
+
+
+<div class="result_view">
+ <div class="final_result">
+ </div>
+
+ <div class="row made_with">
+ Made with DullDream.xyz for ZKM OpenCodes 2017
+ </div>
+
+ <div class="row">
+ <button class='btn' id="show_all_results">Detailed Analysis</button>
+ </div>
+
+ <div class="all_results">
+ </div>
+
+ <div id="share_btns" class="row">
+ <a id="permalink" href="#">Permalink</a>
+ </div>
+
+ <div id="about_btn" class="row">
+ <div class="align_center">
+ <a href="/" class="btn btn-sm btn-default home_button" role="button">Home</a>
+ <a class="btn btn-sm btn-default about_button" role="button">About</a>
+ <a class="btn btn-sm btn-default privacy_button" role="button">Privacy</a>
+ </div>
+
+ </div>
+
+</div>
+
+<div id="footer">
+ DullDream™ by <a href="http://constantdullaart.com">Constant Dullaart</a>.<br>
+ <small>Made in collaboration with <a href="http://ahprojects.com">Adam Harvey</a></small>
+</div>
+
+</body>
+<script type="text/html" id="result_template">
+ <div class="row">
+ <img src="{img}"><br>
+ <b>{title}</b>
+ </div>
+</script>
+<script type="text/json" id="dropdown_options">{{ task_json }}</script>
+<script type="text/javascript" src="{{url_for('static', filename='js/vendor/jquery-3.3.1.min.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/vendor/ExifReader.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/vendor/canvas-to-blob.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/vendor/prefixfree.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/util.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/upload.js')}}"></script>
+<script type="text/javascript" src="{{url_for('static', filename='js/app.js')}}"></script>
+</html> \ No newline at end of file
diff --git a/server/celery_worker.py b/server/celery_worker.py
new file mode 100644
index 00000000..1545a884
--- /dev/null
+++ b/server/celery_worker.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import os
+from app.basemodels import celery
+from app import create_app
+
+app = create_app(os.getenv('FLASK_CONFIG') or 'default')
+app.app_context().push()
diff --git a/server/config.py b/server/config.py
new file mode 100644
index 00000000..5042efb6
--- /dev/null
+++ b/server/config.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""Application configuration."""
+import os
+from os.path import join
+basedir = os.path.abspath(os.path.dirname(__file__))
+
+class Config(object):
+ """Base configuration."""
+
+ #SECRET_KEY = os.environ.get('MYFLASKAPP_SECRET', 'secret-key') # TODO: Change me
+ APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
+ PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
+ #BCRYPT_LOG_ROUNDS = 13
+ DEBUG_TB_ENABLED = False # Disable Debug toolbar
+ #DEBUG_TB_INTERCEPT_REDIRECTS = False
+ CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
+ HOST = '0.0.0.0'
+ FLASK_DEBUG_DISABLE_STRICT = True
+ #WTF_CSRF_SECRET_KEY = '94ksadkf49DKEDFJ.&'
+ BOOTSTRAP_GOOGLE_ANALYTICS_ACCOUNT = None
+ BOOTSTRAP_SERVE_LOCAL = True
+ SECRET_KEY = os.environ.get('SECRET_KEY') or '94ksadkf49DKEDFJ.&'
+ CELERY_BROKER_URL = 'redis://localhost:6379/0'
+ CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
+
+ FLASKY_SLOW_DB_QUERY_TIME=0.5
+
+ @staticmethod
+ def init_app(app):
+ pass
+
+
+class DevelopmentConfig(Config):
+ """Development configuration."""
+ ENV = 'dev'
+ DEBUG = True
+
+class ProductionConfig(Config):
+ ENV = 'production'
+ DEBUG = False
+ # @classmethod
+ # def init_app(cls, app):
+ # Config.init_app(app)
+ # # import logging
+ # # app.logger.addHandler(mail_handler)
+
+
+class DigitalOceanConfig(Config):
+ """Production configuration."""
+ def init_app(cls, app):
+ ProductionConfig.init_app(app)
+ # log to syslog
+ import logging
+ from logging.handlers import SysLogHandler
+ syslog_handler = SysLogHandler()
+ syslog_handler.setLevel(logging.WARNING)
+ app.logger.addHandler(syslog_handler)
+
+
+class UnixConfig(ProductionConfig):
+ @classmethod
+ def init_app(cls, app):
+ ProductionConfig.init_app(app)
+
+ # log to syslog
+ import logging
+ from logging.handlers import SysLogHandler
+ syslog_handler = SysLogHandler()
+ syslog_handler.setLevel(logging.WARNING)
+ app.logger.addHandler(syslog_handler)
+
+
+config = {
+ 'development': DevelopmentConfig,
+ 'production': ProductionConfig,
+ 'digitalocean': DigitalOceanConfig,
+ 'default': DevelopmentConfig
+}
diff --git a/server/deploy.sh b/server/deploy.sh
new file mode 100755
index 00000000..c2594cab
--- /dev/null
+++ b/server/deploy.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+d_src="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/
+d_dst=/home/dull/dulldream/www/dulldream_xyz
+ssh_alis=dulldream-root
+#DOCKER_DIR_P1="$(dirname "$CWD")"
+
+echo "Syncing DullDream LOCAL to -> REMOTE"
+echo $d_src
+echo $d_dst
+
+
+#rsync -a -e 'ssh' \
+rsync -r -v --progress -e 'ssh' \
+ --delete \
+ --exclude='.DS_Store' \
+ --exclude='deploy.sh' \
+ $d_src $ssh_alis:$d_dst
+
+echo "Synced :)" \ No newline at end of file
diff --git a/server/dulldream.wsgi.py b/server/dulldream.wsgi.py
new file mode 100644
index 00000000..ed992528
--- /dev/null
+++ b/server/dulldream.wsgi.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+import sys
+sys.path.insert(0, "/home/dulldream/dulldream/www/dulldream_xyz/")
+
+from app import create_app
+
+import logging
+logging.basicConfig(stream=sys.stderr)
+# logging.basicConfig(filename='error.log',level=logging.DEBUG)
+
+application = create_app('production')
+application.secret_key = 'curlier6982!1decentralizationists'
+
diff --git a/server/run-celery.sh b/server/run-celery.sh
new file mode 100755
index 00000000..e38174fa
--- /dev/null
+++ b/server/run-celery.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+celery worker -A celery_worker.celery --loglevel=info
+
diff --git a/server/run-dev.sh b/server/run-dev.sh
new file mode 100755
index 00000000..b4eb2a61
--- /dev/null
+++ b/server/run-dev.sh
@@ -0,0 +1 @@
+FLASK_CONFIG=development python run.py \ No newline at end of file
diff --git a/server/run-gunicorn.sh b/server/run-gunicorn.sh
new file mode 100755
index 00000000..64debabd
--- /dev/null
+++ b/server/run-gunicorn.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+gunicorn -w 1 -b 0.0.0.0:8000 run:app
diff --git a/server/run-redis.sh b/server/run-redis.sh
new file mode 100755
index 00000000..e9ceb845
--- /dev/null
+++ b/server/run-redis.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+/usr/local/bin/redis-server /etc/redis/redis.conf
diff --git a/server/run.py b/server/run.py
new file mode 100644
index 00000000..c4c3e8d7
--- /dev/null
+++ b/server/run.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+import os
+from flask import Flask
+from app import create_app
+
+app = create_app(os.getenv('FLASK_CONFIG') or 'default')
+import logging
+logging.basicConfig(filename='error.log',level=logging.DEBUG)
+
+if __name__ == '__main__':
+ app.run(host='0.0.0.0',debug=False,threaded=False,port=8000)
+ pass
diff --git a/site/assets/css/css.css b/site/assets/css/css.css
new file mode 100644
index 00000000..9ac35699
--- /dev/null
+++ b/site/assets/css/css.css
@@ -0,0 +1,383 @@
+* { box-sizing: border-box; }
+html, body {
+ margin: 0;
+ padding: 0;
+ width: 100%;
+ min-height: 100%;
+ font-family: 'Roboto', sans-serif;
+ color: #b8b8b8;
+}
+html {
+ background: #191919;
+}
+
+/* header */
+
+header {
+ position: fixed;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 70px;
+ background: #1e1e1e;
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+ justify-content: space-between;
+ box-shadow: 0 0 4px rgba(0,0,0,0.3);
+}
+header .slogan {
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+ padding-left: 56px;
+ text-decoration: none;
+}
+header .logo {
+ background-image: url(../img/megapixels_logo_white.svg);
+ background-size: cover;
+ background-repeat: no-repeat;
+ margin-top: 7px;
+ margin-right: 14px;
+ width: 49px;
+ height: 30px;
+}
+header .site_name {
+ font-weight: bold;
+ color: #fff;
+}
+header .sub {
+ margin-left: 4px;
+ margin-top: 2px;
+ transition: color 0.1s cubic-bezier(0,0,1,1);
+}
+.sub {
+ color: #666;
+ font-size: 10pt;
+}
+.desktop header .slogan:hover .site_name {
+ color: #fff;
+}
+.desktop header .slogan:hover .sub {
+ color: #666;
+}
+header .links {
+ display: flex;
+ flex-direction: row;
+ font-family: 'Roboto Mono', monospace;
+}
+header .links a {
+ display: block;
+ color: #777;
+ text-decoration: none;
+ text-transform: uppercase;
+ margin-right: 32px;
+ transition: color 0.1s cubic-bezier(0,0,1,1), border-color 0.1s cubic-bezier(0,0,1,1);
+ border-bottom: 1px solid rgba(255,255,255,0);
+}
+header .links a.active {
+ color: #bbb;
+}
+.desktop header .links a:hover {
+ color: #fff;
+ border-bottom: 1px solid rgba(255,255,255,255);
+}
+.desktop header .links a.active:hover {
+ color: #fff;
+ border-bottom: 1px solid rgba(255,255,255,255);
+}
+
+/* footer */
+
+footer {
+ width: 100%;
+ background: #000;
+ display: flex;
+ flex-direction: row;
+ justify-content: space-between;
+ color: #888;
+ font-size: 9pt;
+ padding: 20px 75px 20px;
+}
+footer > div {
+ display: flex;
+ flex-direction: row;
+}
+footer a {
+ display: inline-block;
+ color: #888;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
+ margin-right: 5px;
+}
+footer a:hover {
+ color: #ddd;
+}
+
+/* headings */
+
+h1 {
+ color: #ddd;
+ font-weight: 300;
+ font-size: 24pt;
+ margin: 75px 0 10px;
+ padding: 0;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
+}
+h2, h3 {
+ margin: 0 0 20px 0;
+ padding: 0;
+ font-size: 11pt;
+ font-weight: 500;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
+}
+
+th, .gray, h2, h3 {
+ font-family: 'Roboto Mono', monospace;
+ font-weight: 400;
+ text-transform: uppercase;
+ color: #666;
+}
+th, .gray {
+ font-size: 9pt;
+}
+
+/* content */
+
+.content {
+ padding-top: 70px;
+ padding-bottom: 100px;
+ min-height: calc(100vh - 55px);
+ line-height: 1.5;
+}
+section {
+ width: 640px;
+ margin: 0 auto;
+}
+.content .first_paragraph {
+ font-weight: 300;
+ font-size: 18pt;
+ color: #ccc;
+}
+p {
+ margin: 0 0 20px 0;
+}
+.content a {
+ color: #ddd;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
+}
+.content a:hover {
+ color: #fff;
+}
+
+/* top of post metadata */
+
+.meta {
+ display: flex;
+ flex-direction: row;
+ justify-content: flex-start;
+ align-items: flex-start;
+ font-size: 10pt;
+ margin-bottom: 20px;
+}
+.meta > div {
+ margin-right: 30px;
+}
+.meta .gray {
+ font-size: 9pt;
+ padding-bottom: 4px;
+}
+
+/* misc formatting */
+
+code {
+ font-family: 'Roboto Mono', monospace;
+ font-size: 9pt;
+ padding: 2px 4px;
+ background: rgba(255,255,255,0.1);
+}
+pre {
+ margin: 0 0 40px 0;
+ border: 1px solid #666;
+ border-radius: 2px;
+}
+pre code {
+ display: block;
+ max-height: 400px;
+ max-width: 640px;
+ padding: 4px 10px;
+}
+table {
+ margin-bottom: 40px;
+}
+hr {
+ height: 1px;
+ background: #888;
+ border: 0;
+ width: 80px;
+}
+blockquote {
+ margin-left: 28px;
+ padding: 0 0 0 10px;
+ border-left: 2px solid #555;
+}
+
+/* footnotes */
+
+.footnotes hr {
+ display: none;
+}
+.footnotes ol:before {
+ content: 'Footnotes';
+ margin: 0 0 10px -40px;
+ padding-bottom: 0;
+ display: block;
+ font-family: 'Roboto Mono', monospace;
+ font-weight: 400;
+ text-transform: uppercase;
+ color: #666;
+ font-size: 11pt;
+}
+
+/* images */
+
+section img {
+ max-width: 100%;
+ display: block;
+ margin: 0 auto;
+}
+section .image {
+ margin-bottom: 40px;
+}
+section.images {
+ display: flex;
+ flex-direction: row;
+ align-items: flex-start;
+ justify-content: center;
+}
+.image:only-child {
+ width: 100%;
+}
+.image:first-child {
+ margin-left: 0;
+}
+.image:nth-child(2),
+.image:nth-child(3) {
+ margin-left: 40px;
+}
+.image:first-child:nth-last-child(2),
+.image:first-child:nth-last-child(2) ~ .image {
+ width: 300px;
+}
+.image:first-child:nth-last-child(3),
+.image:first-child:nth-last-child(3) ~ .image {
+ width: 186px;
+}
+section.wide {
+ width: 100%;
+}
+section.wide .image {
+ max-width: 100%;
+}
+.caption {
+ text-align: center;
+ font-size: 9pt;
+ color: #888;
+ max-width: 620px;
+ margin: 10px auto 0 auto;
+}
+
+/* blog index */
+
+.research_index {
+ margin-top: 40px;
+}
+.research_index a {
+ text-decoration: none;
+}
+.research_index h1 {
+ margin-top: 20px;
+ text-decoration: underline;
+}
+.desktop .research_index section:hover h1 {
+ color: #fff;
+}
+.research_index section:hover h2 {
+ color: #ddd;
+}
+
+/* home page */
+
+.hero {
+ position: relative;
+ width: 100%;
+ max-width: 1200px;
+ height: 50vw;
+ max-height: 70vh;
+ display: flex;
+ align-items: center;
+ margin: 0 auto;
+}
+#face_container {
+ pointer-events: none;
+ position: absolute;
+ width: 50vw;
+ height: 50vw;
+ max-height: 70vh;
+ top: 0;
+ right: 0;
+ z-index: -1;
+ text-align: center;
+}
+.currentFace {
+ position: absolute;
+ bottom: 50px;
+ width: 100%;
+ left: 0;
+ text-align: center;
+}
+.intro {
+ max-width: 640px;
+ padding: 75px 0 75px 10px;
+ z-index: 1;
+}
+.intro .headline {
+ font-family: 'Roboto Mono', monospace;
+ font-size: 16pt;
+}
+.intro .buttons {
+ margin: 40px 0;
+}
+.intro button {
+ font-family: 'Roboto', sans-serif;
+ padding: 8px 12px;
+ border-radius: 6px;
+ border: 1px solid transparent;
+ cursor: pointer;
+ font-size: 11pt;
+ margin-right: 10px;
+ transition: color 0.1s cubic-bezier(0,0,1,1), background-color 0.1s cubic-bezier(0,0,1,1);
+}
+.intro button.normal {
+ background: #191919;
+ border-color: #444;
+ color: #ddd;
+}
+.intro button.important {
+ background: #444;
+ border-color: #444;
+ color: #ddd;
+}
+.desktop .intro button:hover {
+ background: #666;
+ border-color: #666;
+ color: #fff;
+}
+.intro .under {
+ color: #888;
+}
+.intro .under a {
+ color: #bbb;
+}
+.desktop .intro .under a:hover {
+ color: #fff;
+} \ No newline at end of file
diff --git a/site/assets/css/fonts.css b/site/assets/css/fonts.css
new file mode 100644
index 00000000..8db01fbd
--- /dev/null
+++ b/site/assets/css/fonts.css
@@ -0,0 +1,41 @@
+@font-face {
+ font-family: 'Roboto';
+ font-style: normal;
+ font-weight: 300;
+ src: url("../fonts/Roboto_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_300.woff") format("woff"), url("../fonts/Roboto_300.woff2") format("woff2"), url("../fonts/Roboto_300.svg#Roboto") format("svg"), url("../fonts/Roboto_300.ttf") format("truetype");
+}
+
+@font-face {
+ font-family: 'Roboto';
+ font-style: normal;
+ font-weight: 400;
+ src: url("../fonts/Roboto_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_400.woff") format("woff"), url("../fonts/Roboto_400.woff2") format("woff2"), url("../fonts/Roboto_400.svg#Roboto") format("svg"), url("../fonts/Roboto_400.ttf") format("truetype");
+}
+
+@font-face {
+ font-family: 'Roboto';
+ font-style: normal;
+ font-weight: 500;
+ src: url("../fonts/Roboto_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_500.woff") format("woff"), url("../fonts/Roboto_500.woff2") format("woff2"), url("../fonts/Roboto_500.svg#Roboto") format("svg"), url("../fonts/Roboto_500.ttf") format("truetype");
+}
+
+@font-face {
+ font-family: 'Roboto Mono';
+ font-style: normal;
+ font-weight: 300;
+ src: url("../fonts/Roboto_Mono_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_300.woff") format("woff"), url("../fonts/Roboto_Mono_300.woff2") format("woff2"), url("../fonts/Roboto_Mono_300.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_300.ttf") format("truetype");
+}
+
+@font-face {
+ font-family: 'Roboto Mono';
+ font-style: normal;
+ font-weight: 400;
+ src: url("../fonts/Roboto_Mono_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_400.woff") format("woff"), url("../fonts/Roboto_Mono_400.woff2") format("woff2"), url("../fonts/Roboto_Mono_400.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_400.ttf") format("truetype");
+}
+
+@font-face {
+ font-family: 'Roboto Mono';
+ font-style: normal;
+ font-weight: 500;
+ src: local("Roboto-Mono Medium"), local("RobotoMono-Medium"), url("../fonts/Roboto_Mono_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_500.woff") format("woff"), url("../fonts/Roboto_Mono_500.woff2") format("woff2"), url("../fonts/Roboto_Mono_500.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_500.ttf") format("truetype");
+}
diff --git a/site/assets/data/3dlm_0_10.json b/site/assets/data/3dlm_0_10.json
new file mode 100644
index 00000000..1f747008
--- /dev/null
+++ b/site/assets/data/3dlm_0_10.json
@@ -0,0 +1 @@
+{"Daisy_Fuentes_0004_2737.png":[[54.0,88.0,-86.4166488647461],[61.0,117.0,-94.31047821044922],[67.0,139.0,-100.80580139160156],[74.0,158.0,-104.98035430908203],[80.0,183.0,-102.41091918945312],[86.0,206.0,-88.20156860351562],[93.0,225.0,-67.11590576171875],[105.0,247.0,-44.081058502197266],[131.0,256.0,-27.08724594116211],[159.0,247.0,-24.630205154418945],[188.0,228.0,-33.128997802734375],[207.0,209.0,-43.85726547241211],[223.0,183.0,-49.67991256713867],[232.0,158.0,-47.00068664550781],[236.0,136.0,-39.36989212036133],[239.0,113.0,-29.353200912475586],[239.0,88.0,-19.166582107543945],[45.0,94.0,9.0930757522583],[48.0,94.0,28.99441146850586],[61.0,94.0,43.41041564941406],[70.0,101.0,52.37288284301758],[80.0,104.0,56.78023910522461],[128.0,104.0,73.55677795410156],[137.0,98.0,77.24374389648438],[153.0,94.0,77.57779693603516],[172.0,94.0,73.60804748535156],[188.0,94.0,61.270816802978516],[105.0,132.0,54.630958557128906],[102.0,155.0,56.62538146972656],[99.0,171.0,60.56704330444336],[102.0,183.0,56.47877883911133],[96.0,180.0,22.875110626220703],[102.0,183.0,28.277494430541992],[112.0,187.0,32.87094497680664],[121.0,183.0,35.1693115234375],[128.0,180.0,34.83551025390625],[61.0,113.0,16.009923934936523],[64.0,113.0,29.9821834564209],[77.0,113.0,34.802860260009766],[89.0,120.0,32.677188873291016],[77.0,123.0,31.01885223388672],[67.0,123.0,24.31825828552246],[134.0,123.0,50.15107727050781],[143.0,117.0,60.02939987182617],[156.0,117.0,63.61414337158203],[169.0,120.0,55.770992279052734],[159.0,126.0,57.6904411315918],[147.0,126.0,55.69097900390625],[86.0,193.0,-17.841793060302734],[93.0,196.0,4.937897682189941],[105.0,196.0,22.647642135620117],[115.0,199.0,26.378101348876953],[121.0,196.0,29.354434967041016],[143.0,196.0,24.816020965576172],[166.0,193.0,10.712540626525879],[147.0,212.0,12.046388626098633],[134.0,221.0,11.838120460510254],[118.0,225.0,8.037301063537598],[105.0,221.0,2.2057247161865234],[96.0,212.0,-6.710599899291992],[89.0,193.0,-17.244544982910156],[105.0,202.0,12.899520874023438],[115.0,202.0,19.937997817993164],[131.0,202.0,22.065942764282227],[163.0,193.0,9.6935396194458],[131.0,212.0,14.528266906738281],[118.0,212.0,11.355504035949707],[105.0,212.0,5.307169437408447]],"George_W_Bush_0521_4871.png":[[52.0,92.0,-45.72012710571289],[49.0,113.0,-46.65034866333008],[52.0,135.0,-47.51322555541992],[54.0,151.0,-46.91441345214844],[57.0,173.0,-40.902000427246094],[68.0,189.0,-28.678600311279297],[78.0,205.0,-13.592384338378906],[95.0,219.0,-1.2501835823059082],[116.0,229.0,1.177125334739685],[140.0,227.0,-9.77007007598877],[157.0,216.0,-27.030227661132812],[165.0,205.0,-46.50547790527344],[176.0,189.0,-62.202232360839844],[184.0,170.0,-69.46493530273438],[192.0,154.0,-70.83147430419922],[197.0,138.0,-70.83583068847656],[202.0,116.0,-70.05885314941406],[81.0,81.0,17.92914390563965],[95.0,78.0,27.789169311523438],[108.0,78.0,33.28635025024414],[116.0,81.0,35.898231506347656],[127.0,84.0,36.1664924621582],[162.0,89.0,30.671188354492188],[173.0,89.0,27.389225006103516],[181.0,89.0,21.40380096435547],[192.0,92.0,12.476249694824219],[197.0,97.0,-0.4335915446281433],[143.0,105.0,32.26004409790039],[143.0,119.0,38.66748809814453],[140.0,132.0,46.93293380737305],[140.0,143.0,47.69198226928711],[122.0,149.0,31.083112716674805],[127.0,151.0,33.54079055786133],[135.0,154.0,34.166015625],[143.0,154.0,31.159467697143555],[149.0,154.0,27.037282943725586],[97.0,100.0,21.734798431396484],[105.0,97.0,28.1799373626709],[114.0,97.0,27.063697814941406],[122.0,103.0,22.652997970581055],[114.0,105.0,26.08942985534668],[103.0,103.0,25.867576599121094],[159.0,108.0,17.47022819519043],[167.0,105.0,19.028751373291016],[176.0,108.0,17.09029769897461],[181.0,111.0,7.996582984924316],[176.0,113.0,14.536516189575195],[165.0,111.0,18.149005889892578],[105.0,178.0,22.76020050048828],[116.0,170.0,33.3117561340332],[127.0,167.0,37.994598388671875],[132.0,167.0,38.12085723876953],[138.0,167.0,36.27274703979492],[149.0,175.0,27.961029052734375],[151.0,184.0,13.815584182739258],[143.0,194.0,20.624553680419922],[135.0,197.0,24.53580093383789],[127.0,197.0,26.357908248901367],[119.0,194.0,27.084375381469727],[111.0,186.0,25.804853439331055],[108.0,175.0,22.580766677856445],[124.0,173.0,33.42163848876953],[132.0,175.0,34.26499557495117],[138.0,175.0,31.121870040893555],[149.0,184.0,14.480358123779297],[135.0,189.0,25.04640769958496],[130.0,189.0,26.659072875976562],[122.0,184.0,27.26160430908203]],"Jean_Chretien_0048_6598.png":[[30.0,125.0,-40.58317947387695],[36.0,147.0,-38.26982498168945],[43.0,166.0,-36.6230583190918],[49.0,188.0,-33.90769577026367],[62.0,210.0,-26.04215431213379],[80.0,229.0,-13.205296516418457],[103.0,242.0,1.227866530418396],[128.0,251.0,11.785074234008789],[156.0,257.0,10.845030784606934],[178.0,245.0,-6.119598865509033],[188.0,232.0,-29.136899948120117],[191.0,223.0,-53.54594039916992],[194.0,201.0,-74.41848754882812],[194.0,179.0,-85.97528076171875],[194.0,156.0,-90.91073608398438],[194.0,134.0,-94.51874542236328],[194.0,112.0,-97.60952758789062],[71.0,93.0,19.528913497924805],[84.0,84.0,26.82400894165039],[96.0,78.0,30.234058380126953],[109.0,78.0,31.37984848022461],[122.0,78.0,30.30535888671875],[159.0,74.0,16.718530654907227],[169.0,71.0,10.688024520874023],[181.0,71.0,1.7389824390411377],[191.0,74.0,-9.972139358520508],[197.0,81.0,-24.279382705688477],[144.0,100.0,25.824087142944336],[150.0,112.0,34.62986373901367],[153.0,125.0,45.06562042236328],[156.0,137.0,47.438385009765625],[134.0,153.0,33.16469955444336],[144.0,153.0,34.75723648071289],[153.0,153.0,34.24925231933594],[159.0,150.0,29.585615158081055],[163.0,150.0,24.086299896240234],[90.0,109.0,23.489126205444336],[103.0,106.0,28.661333084106445],[109.0,103.0,25.710494995117188],[118.0,106.0,20.149057388305664],[112.0,109.0,25.589101791381836],[103.0,112.0,27.14316749572754],[159.0,103.0,6.047369956970215],[169.0,100.0,5.07871675491333],[178.0,100.0,1.128143072128296],[185.0,100.0,-9.351521492004395],[178.0,106.0,-0.28687334060668945],[172.0,106.0,5.514134407043457],[122.0,197.0,28.512418746948242],[131.0,175.0,37.198204040527344],[147.0,166.0,39.9262580871582],[153.0,169.0,39.13956832885742],[159.0,166.0,35.968048095703125],[172.0,175.0,26.046375274658203],[178.0,191.0,10.3086576461792],[175.0,210.0,20.745849609375],[169.0,220.0,27.28376579284668],[159.0,223.0,30.989892959594727],[147.0,220.0,32.862693786621094],[134.0,213.0,31.60837173461914],[122.0,197.0,27.478269577026367],[144.0,179.0,36.26493453979492],[153.0,175.0,36.0021858215332],[163.0,175.0,31.556718826293945],[175.0,191.0,11.229880332946777],[166.0,210.0,26.56795883178711],[156.0,210.0,29.762998580932617],[147.0,210.0,31.160558700561523]],"Jose_Maria_Aznar_0012_7639.png":[[52.0,117.0,-85.36373138427734],[58.0,142.0,-89.13520050048828],[64.0,163.0,-91.57649993896484],[70.0,181.0,-90.67222595214844],[76.0,205.0,-81.6866683959961],[86.0,224.0,-63.06562042236328],[95.0,236.0,-39.47830581665039],[110.0,248.0,-15.760049819946289],[134.0,251.0,-0.7411996126174927],[158.0,242.0,-3.497490644454956],[180.0,227.0,-18.777170181274414],[201.0,211.0,-35.454566955566406],[213.0,190.0,-47.65032196044922],[222.0,166.0,-52.36196517944336],[225.0,145.0,-50.47983932495117],[225.0,120.0,-45.808433532714844],[225.0,96.0,-40.67475128173828],[52.0,108.0,-2.9392600059509277],[58.0,108.0,12.7117338180542],[67.0,105.0,24.29625701904297],[79.0,105.0,31.84514617919922],[89.0,105.0,35.80921173095703],[131.0,99.0,48.1629753112793],[140.0,96.0,49.877891540527344],[155.0,93.0,48.81587600708008],[171.0,93.0,44.32658767700195],[186.0,96.0,33.5164909362793],[110.0,123.0,35.202392578125],[110.0,139.0,39.387237548828125],[107.0,154.0,46.361717224121094],[110.0,163.0,45.361881256103516],[104.0,175.0,21.95050048828125],[110.0,175.0,25.979686737060547],[116.0,175.0,29.28858184814453],[125.0,172.0,29.773151397705078],[131.0,169.0,28.43248176574707],[67.0,123.0,6.034923076629639],[73.0,120.0,17.086734771728516],[82.0,120.0,20.637174606323242],[95.0,123.0,19.039827346801758],[86.0,126.0,18.763216018676758],[76.0,129.0,13.737027168273926],[140.0,117.0,31.881677627563477],[146.0,111.0,38.8029670715332],[158.0,111.0,41.307891845703125],[168.0,114.0,34.02800750732422],[158.0,120.0,37.281280517578125],[146.0,120.0,36.33848571777344],[95.0,202.0,7.3154730796813965],[101.0,199.0,19.637798309326172],[113.0,196.0,28.106361389160156],[119.0,196.0,30.175378799438477],[125.0,193.0,30.934680938720703],[140.0,196.0,26.531757354736328],[155.0,196.0,18.057851791381836],[143.0,205.0,25.4660587310791],[131.0,208.0,27.901447296142578],[122.0,211.0,26.784423828125],[113.0,211.0,23.835683822631836],[104.0,208.0,18.039756774902344],[98.0,202.0,8.314102172851562],[113.0,202.0,22.491792678833008],[122.0,202.0,25.67093276977539],[131.0,202.0,25.65959930419922],[152.0,196.0,17.607336044311523],[131.0,199.0,27.57944679260254],[122.0,202.0,26.80080223083496],[113.0,202.0,24.226303100585938]],"Martin_Bandier_0001_9412.png":[[63.0,121.0,-65.37309265136719],[63.0,143.0,-64.64505004882812],[65.0,162.0,-63.10131072998047],[68.0,179.0,-58.94812774658203],[74.0,201.0,-47.4974250793457],[85.0,215.0,-28.67585563659668],[96.0,223.0,-6.62316370010376],[109.0,228.0,15.033751487731934],[134.0,234.0,26.035844802856445],[159.0,228.0,17.08003044128418],[178.0,223.0,-2.46199893951416],[192.0,217.0,-23.39816665649414],[205.0,204.0,-41.04132843017578],[214.0,187.0,-51.08419418334961],[219.0,171.0,-54.11995315551758],[225.0,151.0,-54.73630905151367],[227.0,132.0,-54.683284759521484],[76.0,102.0,5.609401226043701],[87.0,96.0,17.809030532836914],[98.0,96.0,25.773998260498047],[109.0,96.0,30.63453483581543],[120.0,96.0,32.800411224365234],[156.0,96.0,36.425437927246094],[167.0,96.0,35.66188049316406],[181.0,96.0,32.42750930786133],[192.0,99.0,26.462549209594727],[203.0,105.0,15.327049255371094],[137.0,118.0,33.622276306152344],[137.0,129.0,40.19166946411133],[134.0,140.0,49.35963439941406],[134.0,151.0,50.38871765136719],[120.0,160.0,31.231637954711914],[128.0,162.0,34.355201721191406],[134.0,162.0,36.49223327636719],[145.0,162.0,35.185462951660156],[150.0,160.0,32.73931121826172],[93.0,118.0,14.13566780090332],[101.0,116.0,22.39512825012207],[109.0,113.0,23.741657257080078],[117.0,118.0,21.19973373413086],[109.0,121.0,23.268537521362305],[101.0,121.0,20.586299896240234],[161.0,118.0,25.45779800415039],[170.0,113.0,29.320659637451172],[178.0,116.0,29.50352668762207],[186.0,118.0,21.957426071166992],[178.0,121.0,27.270540237426758],[167.0,121.0,28.569705963134766],[107.0,182.0,21.56089973449707],[115.0,179.0,31.159202575683594],[128.0,176.0,37.602928161621094],[134.0,179.0,38.916873931884766],[142.0,179.0,38.50923538208008],[156.0,182.0,33.15263748168945],[170.0,184.0,24.19475555419922],[156.0,190.0,34.02751159667969],[145.0,193.0,38.550209045410156],[134.0,193.0,39.023250579833984],[126.0,193.0,37.37254333496094],[117.0,190.0,31.93663787841797],[109.0,182.0,21.735275268554688],[126.0,184.0,33.94630813598633],[137.0,184.0,35.99415588378906],[145.0,184.0,34.99856948852539],[167.0,184.0,24.027576446533203],[145.0,184.0,36.92888259887695],[134.0,184.0,37.362281799316406],[126.0,184.0,35.78881072998047]],"Mel_Gibson_0002_9636.png":[[31.0,118.0,-2.9550893306732178],[36.0,139.0,-2.667202949523926],[41.0,158.0,-3.309368848800659],[47.0,173.0,-2.8674466609954834],[60.0,195.0,0.5379531979560852],[78.0,208.0,6.8468451499938965],[102.0,218.0,13.661532402038574],[129.0,226.0,16.51202392578125],[152.0,232.0,7.737005710601807],[166.0,224.0,-14.849297523498535],[166.0,216.0,-40.139617919921875],[160.0,205.0,-63.4443359375],[155.0,189.0,-82.52007293701172],[152.0,171.0,-92.49539947509766],[152.0,152.0,-96.97915649414062],[152.0,131.0,-100.56412506103516],[150.0,110.0,-102.95475006103516],[86.0,91.0,38.37003707885742],[102.0,86.0,41.894866943359375],[115.0,83.0,40.397464752197266],[126.0,83.0,37.24525451660156],[137.0,86.0,32.68013381958008],[166.0,83.0,8.698038101196289],[171.0,83.0,0.9768905639648438],[176.0,83.0,-9.498381614685059],[181.0,86.0,-22.676668167114258],[179.0,91.0,-38.2049674987793],[155.0,105.0,23.144948959350586],[160.0,118.0,29.64549446105957],[166.0,128.0,36.738407135009766],[168.0,139.0,37.75069046020508],[142.0,150.0,28.878965377807617],[150.0,150.0,27.837158203125],[158.0,152.0,24.27781105041504],[163.0,150.0,17.583341598510742],[166.0,147.0,10.795087814331055],[102.0,107.0,34.878822326660156],[113.0,102.0,36.4377555847168],[121.0,102.0,30.94145393371582],[126.0,107.0,23.007049560546875],[121.0,110.0,30.338083267211914],[113.0,110.0,35.04257583618164],[158.0,105.0,-2.0924415588378906],[166.0,102.0,-5.603616237640381],[174.0,102.0,-12.036636352539062],[174.0,105.0,-22.998979568481445],[174.0,107.0,-13.557750701904297],[166.0,107.0,-5.3837785720825195],[118.0,176.0,26.663448333740234],[137.0,168.0,30.426334381103516],[152.0,163.0,27.21775245666504],[158.0,163.0,23.104822158813477],[166.0,163.0,17.77894401550293],[171.0,165.0,1.9621200561523438],[171.0,171.0,-16.41786766052246],[168.0,181.0,-0.7907894849777222],[166.0,187.0,10.164527893066406],[158.0,189.0,17.966917037963867],[150.0,189.0,23.715682983398438],[137.0,184.0,25.680715560913086],[121.0,173.0,24.89003562927246],[147.0,171.0,25.607192993164062],[158.0,168.0,20.765199661254883],[163.0,168.0,12.706995964050293],[168.0,171.0,-15.33692741394043],[163.0,179.0,9.691545486450195],[158.0,181.0,17.176071166992188],[147.0,179.0,22.25572395324707]],"Miguel_Angel_Rodriguez_0001_9912.png":[[54.0,68.0,-42.45979309082031],[54.0,92.0,-49.5826301574707],[54.0,115.0,-55.79332733154297],[57.0,135.0,-59.33116912841797],[60.0,159.0,-57.03142547607422],[72.0,182.0,-45.75932312011719],[89.0,200.0,-29.281789779663086],[110.0,217.0,-13.761648178100586],[139.0,229.0,-9.994528770446777],[168.0,223.0,-22.58154296875],[185.0,208.0,-43.570491790771484],[200.0,194.0,-64.46328735351562],[209.0,176.0,-79.13212585449219],[217.0,156.0,-82.86824035644531],[223.0,135.0,-80.2246322631836],[232.0,115.0,-74.78620147705078],[238.0,92.0,-67.80895233154297],[92.0,74.0,37.68389892578125],[104.0,77.0,52.043670654296875],[118.0,77.0,59.127376556396484],[133.0,83.0,61.828956604003906],[142.0,86.0,61.20265197753906],[182.0,92.0,55.284610748291016],[194.0,89.0,52.78497314453125],[209.0,89.0,46.65754699707031],[220.0,92.0,36.06510925292969],[229.0,92.0,18.42976188659668],[159.0,109.0,52.191322326660156],[159.0,127.0,55.90726089477539],[159.0,141.0,60.87660217285156],[159.0,153.0,58.1236457824707],[136.0,147.0,32.37248992919922],[145.0,153.0,34.84940719604492],[153.0,156.0,35.226722717285156],[165.0,156.0,32.414764404296875],[171.0,153.0,28.114261627197266],[107.0,95.0,36.662654876708984],[115.0,98.0,45.565467834472656],[127.0,101.0,44.66267013549805],[136.0,103.0,38.028995513916016],[127.0,106.0,41.371986389160156],[115.0,103.0,40.59615707397461],[180.0,109.0,32.01942443847656],[191.0,109.0,35.80430221557617],[200.0,109.0,33.73044967651367],[209.0,109.0,22.261281967163086],[200.0,115.0,28.547409057617188],[188.0,115.0,32.59733963012695],[107.0,162.0,7.68004035949707],[124.0,162.0,21.82286834716797],[145.0,165.0,29.717941284179688],[150.0,165.0,29.215816497802734],[162.0,165.0,28.041715621948242],[177.0,168.0,16.328580856323242],[185.0,170.0,-1.8884063959121704],[171.0,185.0,8.578341484069824],[159.0,191.0,14.658515930175781],[147.0,191.0,16.984785079956055],[136.0,188.0,17.13002586364746],[124.0,179.0,13.756457328796387],[110.0,162.0,7.356316566467285],[139.0,168.0,23.039575576782227],[150.0,170.0,23.86418914794922],[162.0,173.0,20.56334686279297],[182.0,170.0,-1.3613767623901367],[159.0,182.0,16.720003128051758],[147.0,182.0,19.093835830688477],[139.0,179.0,19.065034866333008]],"Owen_Wilson_0002_10664.png":[[59.0,102.0,-21.243762969970703],[64.0,123.0,-25.366979598999023],[67.0,141.0,-29.538850784301758],[72.0,159.0,-31.87116813659668],[82.0,179.0,-29.50519561767578],[98.0,197.0,-21.61178207397461],[116.0,207.0,-10.408528327941895],[136.0,220.0,-1.885507583618164],[162.0,228.0,-4.386058807373047],[180.0,218.0,-20.011940002441406],[185.0,202.0,-39.94613265991211],[188.0,189.0,-59.84038162231445],[190.0,169.0,-74.894287109375],[193.0,148.0,-80.55574798583984],[195.0,130.0,-80.55134582519531],[200.0,115.0,-78.93417358398438],[200.0,94.0,-75.78621673583984],[100.0,94.0,38.169158935546875],[113.0,89.0,46.922264099121094],[128.0,87.0,50.022220611572266],[139.0,87.0,49.92811584472656],[149.0,89.0,47.58958435058594],[185.0,87.0,34.57915496826172],[193.0,82.0,30.142242431640625],[200.0,79.0,22.56153678894043],[208.0,79.0,11.584891319274902],[211.0,82.0,-3.6469027996063232],[170.0,110.0,37.8112907409668],[172.0,128.0,42.05117416381836],[177.0,141.0,47.863712310791016],[177.0,151.0,46.86157989501953],[154.0,153.0,31.895654678344727],[162.0,156.0,32.64586639404297],[170.0,159.0,31.2083740234375],[177.0,156.0,26.80669403076172],[180.0,153.0,21.87879753112793],[116.0,110.0,36.50224304199219],[126.0,107.0,41.755741119384766],[136.0,107.0,38.944122314453125],[141.0,110.0,32.12401580810547],[136.0,112.0,36.855350494384766],[126.0,112.0,38.63181686401367],[180.0,107.0,18.915437698364258],[190.0,102.0,19.264719009399414],[198.0,102.0,15.433978080749512],[200.0,105.0,4.7589192390441895],[198.0,107.0,11.944266319274902],[188.0,110.0,17.54715347290039],[134.0,174.0,22.057518005371094],[149.0,171.0,30.601415634155273],[164.0,166.0,32.62217712402344],[170.0,169.0,30.74959945678711],[175.0,166.0,27.59368133544922],[185.0,169.0,14.901169776916504],[185.0,174.0,-2.4239187240600586],[182.0,187.0,7.878442287445068],[177.0,195.0,14.520737648010254],[167.0,197.0,19.051616668701172],[157.0,197.0,22.247385025024414],[146.0,189.0,22.729080200195312],[136.0,174.0,21.341617584228516],[159.0,174.0,28.571256637573242],[167.0,174.0,26.78837776184082],[175.0,174.0,21.358505249023438],[185.0,174.0,-1.5108582973480225],[175.0,187.0,16.332590103149414],[167.0,189.0,20.710247039794922],[157.0,187.0,23.43580436706543]],"Sadie_Frost_0002_12236.png":[[43.0,110.0,-46.02598190307617],[49.0,134.0,-50.22691345214844],[55.0,154.0,-54.00772476196289],[64.0,171.0,-55.88381576538086],[73.0,192.0,-51.39228820800781],[90.0,209.0,-38.368595123291016],[108.0,221.0,-21.556013107299805],[131.0,233.0,-6.431150436401367],[160.0,239.0,-1.1526600122451782],[187.0,224.0,-13.746787071228027],[198.0,207.0,-33.85028839111328],[207.0,189.0,-53.560306549072266],[213.0,169.0,-68.61043548583984],[216.0,145.0,-74.07164764404297],[219.0,125.0,-73.03618621826172],[219.0,104.0,-70.54234313964844],[219.0,81.0,-66.82637786865234],[73.0,96.0,35.09419250488281],[84.0,90.0,48.202972412109375],[99.0,87.0,55.006980895996094],[114.0,87.0,57.74137878417969],[125.0,87.0,57.095542907714844],[169.0,78.0,52.34587860107422],[181.0,72.0,50.398277282714844],[192.0,66.0,44.81260299682617],[204.0,66.0,35.284515380859375],[213.0,66.0,19.477773666381836],[151.0,107.0,48.60774612426758],[154.0,125.0,53.40505599975586],[157.0,139.0,60.78512191772461],[160.0,151.0,59.40488052368164],[140.0,157.0,36.827823638916016],[149.0,157.0,38.765625],[157.0,157.0,39.05522537231445],[166.0,154.0,36.21161651611328],[172.0,148.0,32.36823272705078],[93.0,110.0,38.026493072509766],[105.0,107.0,46.511924743652344],[114.0,104.0,45.97248077392578],[122.0,107.0,39.76427459716797],[116.0,110.0,43.69316101074219],[105.0,113.0,43.01232147216797],[172.0,98.0,34.90428161621094],[181.0,93.0,38.591400146484375],[192.0,90.0,36.841041564941406],[198.0,93.0,26.039823532104492],[192.0,98.0,32.86278533935547],[181.0,98.0,36.44768524169922],[114.0,180.0,13.682698249816895],[131.0,174.0,29.0455265045166],[151.0,169.0,37.02834701538086],[157.0,169.0,36.82743835449219],[166.0,166.0,34.820980072021484],[181.0,166.0,20.959548950195312],[189.0,169.0,0.6888017654418945],[184.0,189.0,10.8884859085083],[175.0,201.0,18.23310089111328],[163.0,204.0,21.218435287475586],[149.0,204.0,21.746076583862305],[134.0,198.0,18.20676040649414],[116.0,180.0,12.841014862060547],[146.0,177.0,31.248977661132812],[157.0,174.0,32.05494689941406],[169.0,171.0,27.548999786376953],[187.0,169.0,0.3560405969619751],[172.0,189.0,19.1156005859375],[160.0,192.0,22.56888771057129],[149.0,192.0,22.62162971496582]],"Tony_Blair_0133_13648.png":[[28.0,35.0,-114.78353118896484],[28.0,56.0,-117.0096206665039],[30.0,77.0,-117.85218048095703],[28.0,95.0,-116.64053344726562],[25.0,119.0,-109.63522338867188],[20.0,142.0,-91.96292877197266],[12.0,163.0,-69.02586364746094],[4.0,179.0,-43.47087860107422],[17.0,192.0,-19.292436599731445],[38.0,187.0,-4.95521354675293],[67.0,179.0,-3.8730266094207764],[93.0,166.0,-5.7884602546691895],[114.0,145.0,-5.7592878341674805],[127.0,121.0,-3.3975744247436523],[132.0,103.0,0.7643616795539856],[135.0,80.0,6.597561359405518],[138.0,56.0,11.115669250488281],[-13.0,30.0,-47.145782470703125],[-21.0,30.0,-29.913450241088867],[-19.0,30.0,-14.542082786560059],[-16.0,32.0,-2.5428059101104736],[-11.0,38.0,6.405983924865723],[17.0,43.0,36.266361236572266],[25.0,40.0,42.780643463134766],[38.0,40.0,48.210636138916016],[54.0,43.0,51.576744079589844],[70.0,48.0,48.710269927978516],[1.0,64.0,18.72914695739746],[-6.0,82.0,22.138294219970703],[-11.0,101.0,25.75322723388672],[-13.0,114.0,24.547555923461914],[-6.0,116.0,-4.2706618309021],[-6.0,121.0,2.8793535232543945],[0.0,124.0,9.930088996887207],[7.0,121.0,14.941959381103516],[14.0,121.0,16.764404296875],[-8.0,51.0,-33.40031051635742],[-11.0,48.0,-21.259601593017578],[-6.0,51.0,-13.4171781539917],[4.0,56.0,-9.602275848388672],[-3.0,59.0,-14.66192626953125],[-8.0,56.0,-24.096799850463867],[30.0,64.0,21.700273513793945],[35.0,61.0,31.827836990356445],[43.0,64.0,38.890464782714844],[54.0,66.0,37.976531982421875],[46.0,69.0,35.73036193847656],[35.0,66.0,29.375158309936523],[-6.0,135.0,-31.20798683166504],[-8.0,137.0,-14.465958595275879],[-3.0,140.0,0.3486156761646271],[1.0,142.0,5.642735481262207],[4.0,140.0,10.870265007019043],[20.0,142.0,16.053525924682617],[35.0,145.0,15.69095516204834],[20.0,150.0,13.479714393615723],[9.0,156.0,9.445561408996582],[1.0,153.0,2.799577236175537],[-3.0,150.0,-5.415865898132324],[-6.0,145.0,-15.440103530883789],[-6.0,135.0,-29.30402183532715],[0.0,145.0,-5.810694694519043],[4.0,145.0,2.0995430946350098],[12.0,145.0,7.975824356079102],[33.0,145.0,13.971905708312988],[9.0,145.0,8.455390930175781],[1.0,145.0,2.259183406829834],[-3.0,142.0,-5.021042823791504]]} \ No newline at end of file
diff --git a/site/assets/fonts/Roboto_300.eot b/site/assets/fonts/Roboto_300.eot
new file mode 100644
index 00000000..17b7d5cd
--- /dev/null
+++ b/site/assets/fonts/Roboto_300.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_300.svg b/site/assets/fonts/Roboto_300.svg
new file mode 100644
index 00000000..4ded944a
--- /dev/null
+++ b/site/assets/fonts/Roboto_300.svg
@@ -0,0 +1,312 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="Roboto" horiz-adv-x="1135" ><font-face
+ font-family="Roboto Light"
+ units-per-em="2048"
+ panose-1="2 0 0 0 0 0 0 0 0 0"
+ ascent="1900"
+ descent="-500"
+ alphabetic="0" />
+<glyph unicode=" " horiz-adv-x="498" />
+<glyph unicode="!" horiz-adv-x="462" d="M284 405H173L167 1456H291L284 405ZM153 70Q153 104 175 127T235 151T295 128T318 70Q318 37 296 15T235 -8T175 14T153 70Z" />
+<glyph unicode="&quot;" horiz-adv-x="588" d="M243 1396L223 1083H143L146 1536H243V1396ZM479 1396L459 1083H378L382 1536H479V1396Z" />
+<glyph unicode="#" horiz-adv-x="1191" d="M753 410H439L362 0H263L340 410H85V503H357L440 944H161V1040H458L537 1456H636L557 1040H872L951 1456H1051L972 1040H1201V944H954L871 503H1126V410H853L776 0H676L753 410ZM456 503H771L854 944H539L456 503Z" />
+<glyph unicode="$" d="M901 359Q901 470 829 540T575 674Q349 745 258 842T167 1095Q167 1258 267 1359T539 1475V1677H641V1475Q817 1459 913 1343T1010 1028H891Q891 1185 810 1277T587 1370Q445 1370 366 1296T286 1097Q286 977 359 910T607 783T862 669T981
+540T1021 361Q1021 197 919 97T637 -18V-208H536V-19Q335 -6 225 107T115 418H235Q235 262 326 174T580 85Q722 85 811 161T901 359Z" />
+<glyph unicode="%" horiz-adv-x="1513" d="M109 1176Q109 1306 189 1391T394 1477T598 1392T679 1170V1099Q679 971 600 886T396 800Q273 800 191 884T109 1106V1176ZM206 1099Q206 1006 257 946T396 886Q481 886 531 946T582 1103V1176Q582 1269 530 1329T394
+1390Q311 1390 259 1330T206 1170V1099ZM842 357Q842 487 922 572T1126 657T1330 573T1412 350V279Q1412 149 1332 64T1128 -21T924 63T842 284V357ZM938 279Q938 185 989 125T1128 65Q1214 65 1264 125T1315 284V357Q1315 453 1264 511T1126 570Q1042 570 990
+511T938 353V279ZM434 121L359 169L1070 1307L1145 1259L434 121Z" />
+<glyph unicode="&amp;" horiz-adv-x="1260" d="M404 794Q317 899 278 981T238 1145Q238 1298 329 1387T573 1476Q712 1476 798 1396T884 1191Q884 1047 718 908L558 784L958 318Q1049 465 1049 651H1160Q1160 403 1032 232L1231 0H1087L961 146Q882 68 779 24T560
+-20Q352 -20 230 86T108 371Q108 477 170 571T390 784L404 794ZM560 81Q651 81 736 119T890 229L483 701L469 716L423 681Q227 521 227 371Q227 240 317 161T560 81ZM358 1149Q358 1027 493 861L624 961Q688 1007 729 1062T770 1191Q770 1269 716 1321T572 1374Q474
+1374 416 1311T358 1149Z" />
+<glyph unicode="&apos;" horiz-adv-x="348" d="M226 1395L209 1090H119Q124 1386 124 1536H226V1395Z" />
+<glyph unicode="(" horiz-adv-x="653" d="M140 588Q140 806 196 1011T360 1387T592 1632L621 1551Q555 1504 490 1414T374 1200T292 922T260 571Q260 362 307 169T438 -171T621 -393L592 -470Q465 -394 357 -225T195 148T140 588Z" />
+<glyph unicode=")" horiz-adv-x="667" d="M514 573Q514 353 460 150T298 -223T62 -470L33 -393Q131 -323 214 -176T346 166T394 591Q394 798 346 990T214 1334T33 1555L62 1632Q188 1555 295 1386T458 1011T514 573Z" />
+<glyph unicode="*" horiz-adv-x="869" d="M361 1000L29 1108L61 1209L393 1086L389 1456H493L485 1083L809 1210L842 1109L509 994L732 700L647 637L433 942L229 639L144 700L361 1000Z" />
+<glyph unicode="+" horiz-adv-x="1156" d="M630 740H1073V628H630V146H509V628H75V740H509V1206H630V740Z" />
+<glyph unicode="," horiz-adv-x="392" d="M131 -272L60 -220Q151 -98 154 33V188H271V63Q271 -145 131 -272Z" />
+<glyph unicode="-" horiz-adv-x="586" d="M528 592H49V693H528V592Z" />
+<glyph unicode="." horiz-adv-x="489" d="M145 72Q145 107 167 131T230 156T293 132T316 72T293 15T230 -8T168 14T145 72Z" />
+<glyph unicode="/" horiz-adv-x="813" d="M139 -125H30L638 1456H746L139 -125Z" />
+<glyph unicode="0" d="M1015 607Q1015 299 902 140T569 -20Q353 -20 238 136T120 592V853Q120 1160 234 1318T567 1476Q783 1476 897 1324T1015 874V607ZM895 868Q895 1118 814 1246T567 1374Q405 1374 323 1249T239 880V594Q239 345 323 213T569 81Q729 81 811
+210T895 588V868Z" />
+<glyph unicode="1" d="M694 0H574V1312L178 1165V1277L674 1461H694V0Z" />
+<glyph unicode="2" d="M1049 0H137V92L636 658Q760 801 808 894T856 1075Q856 1213 775 1293T552 1374Q405 1374 315 1280T224 1036H105Q105 1159 160 1260T318 1418T552 1476Q752 1476 864 1371T977 1085Q977 983 914 862T690 560L284 101H1049V0Z" />
+<glyph unicode="3" d="M403 793H527Q630 793 707 829T824 929T865 1076Q865 1216 786 1295T559 1374Q419 1374 330 1292T240 1074H120Q120 1187 177 1280T335 1425T559 1476Q757 1476 871 1368T985 1072Q985 967 919 879T736 746Q872 708 942 616T1012 395Q1012
+208 890 94T564 -20Q434 -20 326 32T158 177T98 395H218Q218 256 315 169T564 81Q719 81 805 160T892 391Q892 537 799 614T523 691H403V793Z" />
+<glyph unicode="4" d="M872 469H1099V368H872V0H752V368H67V436L741 1456H872V469ZM214 469H752V1301L699 1209L214 469Z" />
+<glyph unicode="5" d="M218 746L289 1456H1017V1345H392L341 853Q458 933 615 933Q812 933 929 805T1046 464Q1046 234 932 107T611 -20Q421 -20 303 86T168 383H283Q300 234 384 158T611 81Q767 81 846 180T926 462Q926 622 837 723T594 824Q509 824 446 803T313
+719L218 746Z" />
+<glyph unicode="6" d="M843 1467V1362H829Q568 1362 418 1209T252 782Q312 865 405 910T613 956Q805 956 918 824T1032 477Q1032 335 979 221T827 44T601 -20Q392 -20 261 131T130 523V643Q130 1034 308 1248T813 1467H843ZM594 853Q480 853 382 786T250 614V512Q250
+322 347 202T601 82Q741 82 827 193T914 473Q914 645 828 749T594 853Z" />
+<glyph unicode="7" d="M1034 1387L412 0H287L905 1354H77V1456H1034V1387Z" />
+<glyph unicode="8" d="M995 1081Q995 968 929 879T755 747Q881 704 957 608T1033 386Q1033 199 906 90T570 -20Q359 -20 233 89T106 386Q106 510 179 607T379 747Q271 789 207 878T143 1081Q143 1262 259 1369T568 1476T877 1368T995 1081ZM913 385Q913 521 816
+608T568 696T321 610T225 385T318 164T570 81Q725 81 819 163T913 385ZM875 1082Q875 1207 789 1290T568 1374Q432 1374 348 1294T263 1082Q263 954 347 876T569 798Q704 798 789 876T875 1082Z" />
+<glyph unicode="9" d="M884 674Q820 580 725 529T519 477Q395 477 300 541T153 718T101 965Q101 1109 156 1227T311 1410T541 1476Q760 1476 882 1323T1004 887V779Q1004 385 836 187T323 -11H301L302 93H344Q605 97 741 241T884 674ZM534 580Q654 580 749 651T885
+837V906Q885 1128 793 1250T543 1373Q401 1373 310 1259T219 970Q219 803 306 692T534 580Z" />
+<glyph unicode=":" horiz-adv-x="430" d="M383 72Q383 107 405 131T468 156T531 132T554 72T531 15T468 -8T406 14T383 72ZM129 995Q129 1030 151 1054T214 1079T277 1055T300 995T277 938T214 915T152 937T129 995Z" />
+<glyph unicode=";" horiz-adv-x="399" d="M118 995Q118 1030 140 1054T203 1079T266 1055T289 995T266 938T203 915T141 937T118 995ZM131 -272L60 -220Q151 -98 154 33V188H271V63Q271 -145 131 -272Z" />
+<glyph unicode="&lt;" horiz-adv-x="1047" d="M208 655L904 355V229L77 608V705L904 1083V957L208 655Z" />
+<glyph unicode="=" horiz-adv-x="1133" d="M983 829H149V935H983V829ZM983 418H149V524H983V418Z" />
+<glyph unicode="&gt;" horiz-adv-x="1061" d="M835 659L124 962V1085L969 707V610L124 231V355L835 659Z" />
+<glyph unicode="?" horiz-adv-x="930" d="M376 404Q378 522 408 594T537 763T664 901T708 990T724 1101Q724 1226 658 1297T472 1369Q352 1369 279 1301T203 1115H84Q86 1279 195 1377T472 1476Q644 1476 743 1376T843 1103Q843 995 794 901T608 680Q495 585 495
+404H376ZM360 70Q360 104 381 127T442 151Q480 151 502 128T525 70Q525 37 503 15T442 -8Q403 -8 382 14T360 70Z" />
+<glyph unicode="@" horiz-adv-x="1870" d="M1754 513Q1749 366 1700 241T1565 48T1364 -20Q1267 -20 1206 31T1125 174Q1017 -20 827 -20Q687 -20 618 101T567 427Q582 590 641 717T796 916T1001 988Q1078 988 1136 967T1271 880L1220 310Q1210 194 1249 130T1376
+66Q1499 66 1575 186T1661 513Q1680 918 1507 1122T983 1327Q772 1327 603 1222T335 923T225 478T291 35T528 -260T906 -363Q998 -363 1087 -341T1236 -284L1267 -364Q1210 -402 1108 -427T902 -453Q652 -453 472 -341T203 -17T125 478Q137 756 247 970T550 1302T987
+1420Q1242 1420 1419 1314T1681 1002T1754 513ZM673 286Q684 186 729 132T848 77Q1033 77 1121 332L1166 848Q1099 897 1008 897Q897 897 816 809T696 565T673 286Z" />
+<glyph unicode="A" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513Z" />
+<glyph unicode="B" horiz-adv-x="1255" d="M184 0V1456H614Q848 1456 969 1360T1090 1075Q1090 962 1029 879T860 759Q987 731 1064 634T1142 410Q1142 217 1018 109T671 0H184ZM307 700V104H676Q834 104 926 184T1019 408Q1019 543 931 621T686 700H307ZM307
+803H643Q797 806 881 875T966 1078Q966 1218 879 1284T614 1351H307V803Z" />
+<glyph unicode="C" horiz-adv-x="1330" d="M1215 454Q1190 224 1051 102T679 -20Q517 -20 393 61T200 290T131 630V819Q131 1013 199 1163T394 1394T688 1476Q922 1476 1057 1350T1215 1000H1091Q1045 1371 688 1371Q490 1371 373 1223T255 814V636Q255 384 369
+234T679 84Q872 84 970 176T1091 454H1215Z" />
+<glyph unicode="D" horiz-adv-x="1341" d="M184 0V1456H591Q770 1456 912 1375T1133 1141T1213 795V661Q1213 466 1134 315T912 82T582 0H184ZM307 1351V104H583Q813 104 952 256T1091 669V797Q1091 1048 954 1199T593 1351H307Z" />
+<glyph unicode="E" horiz-adv-x="1165" d="M988 698H307V104H1090V0H184V1456H1085V1351H307V802H988V698Z" />
+<glyph unicode="F" horiz-adv-x="1152" d="M986 680H307V0H184V1456H1086V1351H307V785H986V680Z" />
+<glyph unicode="G" horiz-adv-x="1400" d="M1235 173Q1171 82 1035 31T729 -20Q558 -20 425 62T219 294T145 638V822Q145 1125 298 1300T709 1476Q934 1476 1071 1362T1234 1046H1111Q1084 1206 981 1288T710 1371Q506 1371 387 1226T268 817V645Q268 479 324
+352T486 154T729 84Q888 84 1002 134Q1076 167 1112 211V587H721V691H1235V173Z" />
+<glyph unicode="H" horiz-adv-x="1449" d="M1263 0H1139V698H307V0H184V1456H307V802H1139V1456H1263V0Z" />
+<glyph unicode="I" horiz-adv-x="545" d="M334 0H211V1456H334V0Z" />
+<glyph unicode="J" horiz-adv-x="1127" d="M827 1456H951V433Q951 226 832 103T511 -20Q299 -20 185 91T71 401H194Q194 243 277 164T511 84Q650 84 737 176T827 426V1456Z" />
+<glyph unicode="K" horiz-adv-x="1292" d="M512 723L307 521V0H184V1456H307V671L1053 1456H1208L598 808L1255 0H1105L512 723Z" />
+<glyph unicode="L" horiz-adv-x="1079" d="M308 104H1027V0H184V1456H308V104Z" />
+<glyph unicode="M" horiz-adv-x="1772" d="M347 1456L884 171L1423 1456H1587V0H1464V634L1474 1284L932 0H837L297 1279L307 638V0H184V1456H347Z" />
+<glyph unicode="N" horiz-adv-x="1454" d="M1268 0H1145L308 1246V0H184V1456H308L1146 209V1456H1268V0Z" />
+<glyph unicode="O" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807Z" />
+<glyph unicode="P" horiz-adv-x="1261" d="M307 593V0H184V1456H680Q907 1456 1038 1340T1170 1021Q1170 816 1044 705T677 593H307ZM307 697H680Q859 697 953 782T1047 1019Q1047 1170 954 1259T688 1351H307V697Z" />
+<glyph unicode="Q" horiz-adv-x="1386" d="M1256 649Q1256 441 1183 287T973 53L1238 -178L1153 -254L856 3Q774 -20 689 -20Q523 -20 394 62T193 294T121 642V805Q121 1004 191 1157T391 1393T687 1476Q857 1476 986 1394T1185 1159T1256 806V649ZM1133 807Q1133
+1070 1014 1219T687 1368Q485 1368 365 1219T244 801V649Q244 390 363 239T689 87Q897 87 1015 236T1133 652V807Z" />
+<glyph unicode="R" horiz-adv-x="1300" d="M728 606H305V0H181V1456H654Q887 1456 1018 1343T1149 1027Q1149 887 1067 780T847 632L1211 13V0H1080L728 606ZM305 711H682Q837 711 931 799T1025 1027Q1025 1181 927 1266T652 1351H305V711Z" />
+<glyph unicode="S" horiz-adv-x="1213" d="M1008 358Q1008 479 923 549T612 683T282 822Q134 928 134 1100Q134 1267 271 1371T623 1476Q768 1476 882 1420T1060 1264T1123 1041H999Q999 1190 897 1280T623 1371Q456 1371 357 1297T258 1102Q258 991 347 921T632
+798T929 687T1081 549T1132 360Q1132 188 995 84T632 -20Q478 -20 350 35T155 189T88 416H211Q211 262 326 173T632 84Q802 84 905 159T1008 358Z" />
+<glyph unicode="T" horiz-adv-x="1223" d="M1172 1351H673V0H550V1351H52V1456H1172V1351Z" />
+<glyph unicode="U" horiz-adv-x="1346" d="M1187 1456V462Q1186 315 1122 206T942 39T674 -20Q444 -20 306 105T162 453V1456H284V471Q284 287 389 186T674 84T958 186T1063 470V1456H1187Z" />
+<glyph unicode="V" horiz-adv-x="1263" d="M623 180L631 149L640 180L1098 1456H1233L691 0H573L31 1456H165L623 180Z" />
+<glyph unicode="W" horiz-adv-x="1836" d="M453 393L498 167L553 383L869 1456H980L1292 383L1346 165L1394 393L1657 1456H1783L1410 0H1292L962 1139L925 1283L889 1139L551 0H433L61 1456H187L453 393Z" />
+<glyph unicode="X" horiz-adv-x="1253" d="M627 840L1037 1456H1184L702 738L1199 0H1051L627 636L201 0H55L553 738L70 1456H217L627 840Z" />
+<glyph unicode="Y" horiz-adv-x="1226" d="M611 662L1056 1456H1198L672 548V0H549V548L24 1456H170L611 662Z" />
+<glyph unicode="Z" horiz-adv-x="1225" d="M239 104H1138V0H90V93L954 1351H116V1456H1106V1368L239 104Z" />
+<glyph unicode="[" horiz-adv-x="491" d="M493 1562H283V-210H493V-312H163V1664H493V1562Z" />
+<glyph unicode="\" horiz-adv-x="807" d="M48 1456H165L773 -125H656L48 1456Z" />
+<glyph unicode="]" horiz-adv-x="491" d="M0 1664H331V-312H0V-210H211V1562H0V1664Z" />
+<glyph unicode="^" horiz-adv-x="852" d="M421 1298L193 729H77L376 1456H466L764 729H648L421 1298Z" />
+<glyph unicode="_" horiz-adv-x="884" d="M882 -101H1V0H882V-101Z" />
+<glyph unicode="`" horiz-adv-x="585" d="M438 1256H329L103 1536H247L438 1256Z" />
+<glyph unicode="a" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826 1012T934
+759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86Z" />
+<glyph unicode="b" d="M1027 530Q1027 277 915 129T614 -20Q388 -20 272 148L267 0H155V1536H274V925Q388 1102 612 1102Q804 1102 915 956T1027 548V530ZM907 551Q907 765 824 881T590 998Q475 998 395 942T274 776V288Q364 84 592 84Q740 84 823 201T907 551Z" />
+<glyph unicode="c" horiz-adv-x="1055" d="M556 81Q681 81 765 151T857 334H972Q967 235 910 154T759 26T556 -20Q343 -20 219 128T94 526V562Q94 722 150 845T310 1035T555 1102Q733 1102 848 996T972 717H857Q849 844 766 922T555 1000Q393 1000 304 883T214
+555V520Q214 313 303 197T556 81Z" />
+<glyph unicode="d" horiz-adv-x="1138" d="M108 551Q108 803 220 952T526 1102Q745 1102 860 929V1536H979V0H867L862 144Q747 -20 524 -20Q337 -20 223 130T108 537V551ZM229 530Q229 323 312 204T546 84Q767 84 860 279V787Q767 998 548 998Q397 998 313 880T229 530Z" />
+<glyph unicode="e" horiz-adv-x="1058" d="M575 -20Q437 -20 326 48T152 237T90 510V553Q90 709 150 834T319 1030T553 1102Q750 1102 865 968T981 600V533H209V510Q209 326 314 204T580 81Q676 81 749 116T883 228L958 171Q826 -20 575 -20ZM553 1000Q418 1000
+326 901T213 635H862V648Q857 804 773 902T553 1000Z" />
+<glyph unicode="f" horiz-adv-x="678" d="M242 0V984H63V1082H242V1213Q242 1379 326 1468T562 1557Q630 1557 689 1540L680 1440Q630 1452 571 1452Q472 1452 417 1391T362 1216V1082H620V984H362V0H242Z" />
+<glyph unicode="g" horiz-adv-x="1136" d="M108 551Q108 805 220 953T526 1102Q747 1102 862 926L868 1082H980V22Q980 -187 863 -309T546 -431Q433 -431 331 -381T169 -246L236 -174Q363 -330 538 -330Q688 -330 772 -242T859 4V140Q744 -20 524 -20Q336 -20
+222 130T108 535V551ZM229 530Q229 323 312 204T546 84Q767 84 859 282V785Q817 889 738 943T548 998Q397 998 313 880T229 530Z" />
+<glyph unicode="h" horiz-adv-x="1124" d="M275 899Q334 996 426 1049T627 1102Q801 1102 886 1004T972 710V0H853V711Q852 856 792 927T598 998Q487 998 402 929T275 741V0H156V1536H275V899Z" />
+<glyph unicode="i" horiz-adv-x="459" d="M290 0H170V1082H290V0ZM149 1395Q149 1429 171 1452T231 1476T291 1453T314 1395T292 1338T231 1315T171 1338T149 1395Z" />
+<glyph unicode="j" horiz-adv-x="467" d="M285 1082V-129Q285 -279 213 -358T1 -437Q-53 -437 -104 -418L-102 -319Q-58 -332 -12 -332Q166 -332 166 -127V1082H285ZM226 1476Q265 1476 287 1453T309 1395T287 1338T226 1315Q188 1315 167 1338T145 1395T166 1452T226
+1476Z" />
+<glyph unicode="k" horiz-adv-x="1003" d="M413 545L276 413V0H156V1536H276V553L389 675L803 1082H954L495 626L994 0H851L413 545Z" />
+<glyph unicode="l" horiz-adv-x="459" d="M290 0H170V1536H290V0Z" />
+<glyph unicode="m" horiz-adv-x="1815" d="M265 1082L269 906Q329 1004 419 1053T619 1102Q875 1102 944 892Q1002 993 1099 1047T1313 1102Q1661 1102 1668 722V0H1548V713Q1547 858 1486 928T1285 998Q1156 996 1067 915T968 716V0H848V722Q847 861 783 929T584
+998Q471 998 390 934T270 742V0H150V1082H265Z" />
+<glyph unicode="n" horiz-adv-x="1125" d="M270 1082L274 897Q335 997 426 1049T627 1102Q801 1102 886 1004T972 710V0H853V711Q852 856 792 927T598 998Q487 998 402 929T275 741V0H156V1082H270Z" />
+<glyph unicode="o" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681 891
+784T762 943T572 1000Q412 1000 311 875T210 546V524Z" />
+<glyph unicode="p" d="M1026 530Q1026 277 914 129T614 -20Q392 -20 274 136V-416H155V1082H266L272 929Q389 1102 611 1102Q805 1102 915 955T1026 547V530ZM906 551Q906 758 821 878T584 998Q474 998 395 945T274 791V272Q317 179 397 130T586 81Q737 81 821
+201T906 551Z" />
+<glyph unicode="q" horiz-adv-x="1142" d="M108 551Q108 805 220 953T528 1102Q747 1102 861 935L867 1082H979V-416H859V134Q741 -20 526 -20Q336 -20 222 130T108 535V551ZM229 530Q229 320 313 201T548 81Q763 81 859 268V798Q814 895 735 947T550 1000Q399
+1000 314 881T229 530Z" />
+<glyph unicode="r" horiz-adv-x="689" d="M656 980Q618 987 575 987Q463 987 386 925T275 743V0H156V1082H273L275 910Q370 1102 580 1102Q630 1102 659 1089L656 980Z" />
+<glyph unicode="s" horiz-adv-x="1037" d="M804 275Q804 364 733 418T517 502T294 572T176 669T137 807Q137 935 244 1018T518 1102Q699 1102 808 1013T918 779H798Q798 874 719 937T518 1000Q400 1000 329 948T257 811Q257 730 316 686T533 604T769 525T886 424T924
+281Q924 144 814 62T525 -20Q336 -20 219 71T101 303H221Q228 198 309 140T525 81Q650 81 727 136T804 275Z" />
+<glyph unicode="t" horiz-adv-x="658" d="M342 1359V1082H566V984H342V263Q342 173 374 129T483 85Q513 85 580 95L585 -3Q538 -20 457 -20Q334 -20 278 51T222 262V984H23V1082H222V1359H342Z" />
+<glyph unicode="u" horiz-adv-x="1125" d="M852 137Q744 -20 507 -20Q334 -20 244 80T152 378V1082H271V393Q271 84 521 84Q781 84 850 299V1082H970V0H854L852 137Z" />
+<glyph unicode="v" horiz-adv-x="985" d="M493 165L822 1082H945L541 0H444L38 1082H161L493 165Z" />
+<glyph unicode="w" horiz-adv-x="1544" d="M415 249L433 156L457 254L717 1082H819L1076 261L1104 147L1127 252L1349 1082H1473L1158 0H1056L778 858L765 917L752 857L479 0H377L63 1082H186L415 249Z" />
+<glyph unicode="x" horiz-adv-x="996" d="M496 643L788 1082H930L563 551L946 0H805L497 458L189 0H48L430 551L63 1082H204L496 643Z" />
+<glyph unicode="y" horiz-adv-x="973" d="M499 172L815 1082H944L482 -184L458 -240Q369 -437 183 -437Q140 -437 91 -423L90 -324L152 -330Q240 -330 294 -287T387 -137L440 9L32 1082H163L499 172Z" />
+<glyph unicode="z" horiz-adv-x="996" d="M235 101H938V0H87V88L743 979H107V1082H894V993L235 101Z" />
+<glyph unicode="{" horiz-adv-x="676" d="M637 -404Q469 -354 384 -241T299 59V280Q299 543 68 543V647Q299 647 299 908V1137Q300 1320 384 1433T637 1597L663 1518Q419 1440 419 1127V914Q419 668 235 595Q419 518 419 277V49Q423 -243 666 -324L637 -404Z" />
+<glyph unicode="|" horiz-adv-x="452" d="M279 -270H178V1456H279V-270Z" />
+<glyph unicode="}" horiz-adv-x="676" d="M9 -324Q252 -243 256 49V273Q256 526 449 594Q256 662 256 913V1126Q256 1442 12 1518L38 1597Q209 1546 292 1432T376 1131V908Q376 647 607 647V543Q376 543 376 280V59Q376 -128 291 -241T38 -404L9 -324Z" />
+<glyph unicode="~" horiz-adv-x="1402" d="M1254 764Q1254 615 1171 519T958 423Q886 423 824 450T670 558T535 659T441 680Q352 680 303 621T253 450L145 449Q145 598 226 692T441 787Q515 787 581 756T740 643Q807 580 855 555T958 529Q1046 529 1098 592T1150
+764H1254Z" />
+<glyph unicode="&#xa0;" horiz-adv-x="498" />
+<glyph unicode="&#xa1;" horiz-adv-x="452" d="M174 690H285L292 -359H168L174 690ZM305 1022Q305 988 283 965T223 942T163 965T140 1022T162 1079T223 1102T283 1079T305 1022Z" />
+<glyph unicode="&#xa2;" horiz-adv-x="1115" d="M581 81Q704 81 788 150T882 334H997Q989 195 887 97T636 -17V-245H516V-16Q331 7 225 150T119 526V562Q119 784 224 929T516 1098V1318H636V1099Q791 1083 891 978T997 717H882Q874 844 791 922T580 1000Q418 1000
+329 883T239 555V520Q239 313 328 197T581 81Z" />
+<glyph unicode="&#xa3;" horiz-adv-x="1170" d="M404 645L413 368Q415 194 349 104H1094V0H97V104H195Q246 117 272 211Q292 285 290 367L281 645H93V749H277L268 1039Q268 1239 378 1357T674 1476Q856 1476 961 1371T1067 1088H944Q944 1223 869 1297T665 1371Q540
+1371 466 1283T392 1039L401 749H745V645H404Z" />
+<glyph unicode="&#xa4;" horiz-adv-x="1481" d="M1131 133Q1053 61 953 21T740 -20Q514 -20 349 132L194 -26L109 60L268 221Q144 389 144 608Q144 835 277 1006L109 1177L194 1264L361 1094Q526 1234 740 1234T1119 1092L1289 1265L1375 1177L1204 1002Q1334
+832 1334 608Q1334 393 1212 224L1375 60L1289 -27L1131 133ZM257 608Q257 470 321 350T499 161T740 91Q869 91 981 161T1157 350T1221 608Q1221 747 1156 866T979 1054T740 1122T500 1054T323 867T257 608Z" />
+<glyph unicode="&#xa5;" horiz-adv-x="1056" d="M527 731L892 1456H1030L631 705H944V616H586V412H944V324H586V0H463V324H109V412H463V616H109V705H422L24 1456H163L527 731Z" />
+<glyph unicode="&#xa6;" horiz-adv-x="444" d="M159 -270V501H279V-270H159ZM279 698H159V1456H279V698Z" />
+<glyph unicode="&#xa7;" horiz-adv-x="1239" d="M1119 431Q1119 331 1058 262T887 159Q978 111 1026 41T1075 -139Q1075 -303 949 -399T606 -495Q497 -495 401 -467T236 -382Q102 -268 102 -64L222 -62Q222 -218 325 -305T606 -393Q766 -393 860 -324T954 -141Q954
+-64 920 -17T805 69T548 156T284 255T153 378T108 551Q108 651 166 721T331 825Q245 872 199 942T153 1120Q153 1281 282 1378T624 1476Q848 1476 972 1363T1097 1045H977Q977 1191 881 1282T624 1374Q459 1374 366 1306T273 1122Q273 1043 304 996T411 911T646
+828Q842 777 936 726T1075 603T1119 431ZM454 771Q346 758 287 700T228 553Q228 470 263 422T379 336T663 242L755 214Q867 227 933 284T999 428Q999 526 932 585T692 700L454 771Z" />
+<glyph unicode="&#xa8;" horiz-adv-x="881" d="M137 1396Q137 1430 159 1453T219 1477T279 1454T302 1396Q302 1363 280 1340T219 1317T159 1340T137 1396ZM575 1395Q575 1429 597 1452T657 1476T717 1453T740 1395Q740 1362 718 1339T657 1316T597 1339T575 1395Z" />
+<glyph unicode="&#xa9;" horiz-adv-x="1637" d="M1121 607Q1121 455 1039 374T807 293T566 399T474 686V776Q474 950 566 1056T807 1163T1039 1083T1122 850H1023Q1023 1074 807 1074Q701 1074 637 993T573 771V680Q573 546 636 465T807 383Q913 383 967 436T1022
+607H1121ZM192 729Q192 553 273 399T502 155T817 65Q984 65 1129 154T1357 396T1441 729Q1441 907 1358 1059T1130 1300T817 1389Q646 1389 499 1298T272 1055T192 729ZM107 729Q107 931 200 1104T459 1376T817 1476T1174 1377T1432 1104T1526 729Q1526 532 1436
+360T1181 84T817 -21Q620 -21 455 82T198 358T107 729Z" />
+<glyph unicode="&#xaa;" horiz-adv-x="906" d="M649 705Q634 748 628 799Q541 691 406 691Q289 691 223 749T157 908Q157 1018 240 1079T486 1140H625V1201Q625 1286 585 1333T464 1380Q374 1380 323 1345T271 1237L164 1243Q164 1345 247 1410T464 1476Q588 1476
+661 1405T734 1199V884Q734 792 760 705H649ZM426 786Q479 786 536 816T625 890V1058H496Q266 1058 266 912Q266 786 426 786Z" />
+<glyph unicode="&#xab;" horiz-adv-x="933" d="M247 792L523 404H418L123 783V802L418 1181H523L247 792ZM556 536L832 148H727L432 527V546L727 925H832L556 536Z" />
+<glyph unicode="&#xac;" horiz-adv-x="1117" d="M936 386H816V670H124V776H936V386Z" />
+<glyph unicode="&#xad;" horiz-adv-x="586" d="M528 592H49V693H528V592Z" />
+<glyph unicode="&#xae;" horiz-adv-x="1642" d="M102 729Q102 931 195 1104T454 1376T812 1476T1169 1377T1428 1104T1522 729Q1522 530 1431 358T1175 83T812 -21T450 82T193 358T102 729ZM187 729Q187 550 270 396T499 154T812 65T1125 153T1353 396T1436 729Q1436
+905 1355 1057T1129 1299T812 1389Q644 1389 499 1301T270 1060T187 729ZM650 666V321H552V1160H810Q957 1160 1036 1099T1115 912Q1115 779 974 715Q1046 689 1074 635T1102 504T1106 394T1119 337V321H1017Q1003 357 1003 503Q1003 592 966 629T838 666H650ZM650
+757H831Q912 757 964 799T1017 910Q1017 995 974 1031T824 1070H650V757Z" />
+<glyph unicode="&#xaf;" horiz-adv-x="874" d="M756 1343H137V1440H756V1343Z" />
+<glyph unicode="&#xb0;" horiz-adv-x="774" d="M630 1226Q630 1122 559 1051T388 980Q287 980 215 1051T143 1226T216 1402T388 1476T558 1403T630 1226ZM233 1226Q233 1159 277 1115T388 1071T497 1115T540 1226Q540 1295 497 1340T388 1385Q323 1385 278 1340T233
+1226Z" />
+<glyph unicode="&#xb1;" horiz-adv-x="1085" d="M609 829H1000V727H609V289H498V727H84V829H498V1267H609V829ZM963 0H128V101H963V0Z" />
+<glyph unicode="&#xb2;" horiz-adv-x="740" d="M667 665H96V740L416 1054Q522 1164 522 1237Q522 1300 482 1338T362 1377Q275 1377 228 1333T181 1215H76Q76 1323 155 1394T360 1465T557 1403T628 1239Q628 1138 510 1016L455 961L229 752H667V665Z" />
+<glyph unicode="&#xb3;" horiz-adv-x="740" d="M267 1107H353Q434 1109 481 1145T529 1241Q529 1303 486 1340T362 1377Q286 1377 238 1340T190 1245H85Q85 1341 163 1403T361 1465Q489 1465 562 1405T635 1243Q635 1187 597 1140T489 1069Q651 1027 651 880Q651
+778 572 716T363 654Q234 654 153 717T71 884H177Q177 822 229 782T366 741Q453 741 499 779T546 883Q546 1025 340 1025H267V1107Z" />
+<glyph unicode="&#xb4;" horiz-adv-x="576" d="M315 1536H460L229 1256H124L315 1536Z" />
+<glyph unicode="&#xb5;" horiz-adv-x="1140" d="M281 1082V446Q281 266 344 174T544 81Q676 81 753 138T859 312V1082H979V0H870L863 154Q765 -20 552 -20Q368 -20 281 105V-416H162V1082H281Z" />
+<glyph unicode="&#xb6;" horiz-adv-x="973" d="M681 0V520H573Q423 520 312 578T142 742T83 988Q83 1201 216 1328T577 1456H801V0H681Z" />
+<glyph unicode="&#xb7;" horiz-adv-x="503" d="M163 717Q163 752 185 776T247 800T310 776T333 717T310 659T247 635T185 658T163 717Z" />
+<glyph unicode="&#xb8;" horiz-adv-x="498" d="M246 0L234 -64Q399 -85 399 -235Q399 -327 320 -381T105 -435L98 -357Q187 -357 243 -325T300 -237Q300 -179 257 -157T124 -127L153 0H246Z" />
+<glyph unicode="&#xb9;" horiz-adv-x="740" d="M464 665H358V1328L126 1258V1348L450 1455H464V665Z" />
+<glyph unicode="&#xba;" horiz-adv-x="922" d="M135 1132Q135 1285 223 1380T458 1476Q605 1476 693 1381T782 1127V1033Q782 880 694 785T460 690Q313 690 224 784T135 1038V1132ZM243 1033Q243 919 299 852T460 785Q559 785 616 851T674 1037V1132Q674 1247
+616 1313T458 1380T301 1312T243 1127V1033Z" />
+<glyph unicode="&#xbb;" horiz-adv-x="928" d="M221 944L516 560V541L221 162H115L391 550L115 944H221ZM540 944L835 560V541L540 162H434L710 550L434 944H540Z" />
+<glyph unicode="&#xbc;" horiz-adv-x="1484" d="M453 664H347V1327L115 1257V1347L439 1454H453V664ZM414 129L340 177L1051 1315L1125 1267L414 129ZM1272 275H1399V187H1272V0H1167V187H768L764 253L1161 789H1272V275ZM878 275H1167V659L1136 609L878 275Z" />
+<glyph unicode="&#xbd;" horiz-adv-x="1548" d="M370 129L296 177L1007 1315L1081 1267L370 129ZM438 664H332V1327L100 1257V1347L424 1454H438V664ZM1436 0H865V75L1185 389Q1291 499 1291 572Q1291 635 1251 673T1131 712Q1044 712 997 668T950 550H845Q845
+658 924 729T1129 800T1326 738T1397 574Q1397 473 1279 351L1224 296L998 87H1436V0Z" />
+<glyph unicode="&#xbe;" horiz-adv-x="1590" d="M558 129L484 177L1195 1315L1269 1267L558 129ZM1387 275H1514V187H1387V0H1282V187H883L879 253L1276 789H1387V275ZM993 275H1282V659L1251 609L993 275ZM314 1107H400Q481 1109 528 1145T576 1241Q576 1303
+533 1340T409 1377Q333 1377 285 1340T237 1245H132Q132 1341 210 1403T408 1465Q536 1465 609 1405T682 1243Q682 1187 644 1140T536 1069Q698 1027 698 880Q698 778 619 716T410 654Q281 654 200 717T118 884H224Q224 822 276 782T413 741Q500 741 546 779T593
+883Q593 1025 387 1025H314V1107Z" />
+<glyph unicode="&#xbf;" horiz-adv-x="940" d="M551 687Q549 564 524 505T405 352T288 228Q207 123 207 -8Q207 -137 274 -207T469 -277Q588 -277 659 -207T732 -20H852Q850 -186 745 -284T469 -383Q291 -383 190 -283T88 -10Q88 101 141 202T337 438Q422 509
+429 618L431 687H551ZM567 1022Q567 988 545 965T485 941T425 964T402 1022Q402 1055 424 1078T485 1101T545 1078T567 1022Z" />
+<glyph unicode="&#xc0;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM716 1571H607L381 1851H525L716 1571Z" />
+<glyph unicode="&#xc1;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM762 1851H907L676 1571H571L762 1851Z" />
+<glyph unicode="&#xc2;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM921 1583V1573H810L642 1756L475 1573H366V1586L604 1841H680L921 1583Z" />
+<glyph unicode="&#xc3;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM983 1809Q983 1713 927 1655T788 1596Q712 1596 640 1651T510 1706Q463 1706 432 1675T400 1588L310 1591Q310 1683 364
+1743T505 1803Q553 1803 587 1786T651 1748T711 1710T783 1693Q829 1693 861 1726T894 1815L983 1809Z" />
+<glyph unicode="&#xc4;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM343 1711Q343 1745 365 1768T425 1792T485 1769T508 1711Q508 1678 486 1655T425 1632T365 1655T343 1711ZM781 1710Q781
+1744 803 1767T863 1791T923 1768T946 1710Q946 1677 924 1654T863 1631T803 1654T781 1710Z" />
+<glyph unicode="&#xc5;" horiz-adv-x="1279" d="M970 408H309L159 0H30L581 1456H698L1249 0H1121L970 408ZM347 513H931L639 1306L347 513ZM450 1715Q450 1795 506 1850T643 1905Q722 1905 779 1850T836 1715Q836 1636 781 1582T643 1528T505 1582T450 1715ZM527
+1715Q527 1665 560 1632T643 1599Q692 1599 726 1631T760 1715Q760 1768 725 1801T643 1834Q594 1834 561 1800T527 1715Z" />
+<glyph unicode="&#xc6;" horiz-adv-x="1865" d="M1823 0H1006L989 389H393L163 0H17L898 1456H1762V1354H1068L1091 809H1680V707H1095L1121 101H1823V0ZM460 502H985L950 1331L460 502Z" />
+<glyph unicode="&#xc7;" horiz-adv-x="1330" d="M1215 454Q1190 224 1051 102T679 -20Q517 -20 393 61T200 290T131 630V819Q131 1013 199 1163T394 1394T688 1476Q922 1476 1057 1350T1215 1000H1091Q1045 1371 688 1371Q490 1371 373 1223T255 814V636Q255 384
+369 234T679 84Q872 84 970 176T1091 454H1215ZM728 -9L716 -73Q881 -94 881 -244Q881 -336 802 -390T587 -444L580 -366Q669 -366 725 -334T782 -246Q782 -188 739 -166T606 -136L635 -9H728Z" />
+<glyph unicode="&#xc8;" horiz-adv-x="1165" d="M988 698H307V104H1090V0H184V1456H1085V1351H307V802H988V698ZM693 1577H584L358 1857H502L693 1577Z" />
+<glyph unicode="&#xc9;" horiz-adv-x="1165" d="M988 698H307V104H1090V0H184V1456H1085V1351H307V802H988V698ZM739 1857H884L653 1577H548L739 1857Z" />
+<glyph unicode="&#xca;" horiz-adv-x="1165" d="M988 698H307V104H1090V0H184V1456H1085V1351H307V802H988V698ZM898 1589V1579H787L619 1762L452 1579H343V1592L581 1847H657L898 1589Z" />
+<glyph unicode="&#xcb;" horiz-adv-x="1165" d="M988 698H307V104H1090V0H184V1456H1085V1351H307V802H988V698ZM320 1717Q320 1751 342 1774T402 1798T462 1775T485 1717Q485 1684 463 1661T402 1638T342 1661T320 1717ZM758 1716Q758 1750 780 1773T840 1797T900
+1774T923 1716Q923 1683 901 1660T840 1637T780 1660T758 1716Z" />
+<glyph unicode="&#xcc;" horiz-adv-x="545" d="M334 0H211V1456H334V0ZM348 1577H239L13 1857H157L348 1577Z" />
+<glyph unicode="&#xcd;" horiz-adv-x="545" d="M334 0H211V1456H334V0ZM393 1857H538L307 1577H202L393 1857Z" />
+<glyph unicode="&#xce;" horiz-adv-x="545" d="M334 0H211V1456H334V0ZM553 1589V1579H442L274 1762L107 1579H-2V1592L236 1847H312L553 1589Z" />
+<glyph unicode="&#xcf;" horiz-adv-x="545" d="M334 0H211V1456H334V0ZM-25 1717Q-25 1751 -3 1774T57 1798T117 1775T140 1717Q140 1684 118 1661T57 1638T-3 1661T-25 1717ZM413 1716Q413 1750 435 1773T495 1797T555 1774T578 1716Q578 1683 556 1660T495 1637T435
+1660T413 1716Z" />
+<glyph unicode="&#xd0;" horiz-adv-x="1371" d="M214 0V689H33V791H214V1456H621Q800 1456 942 1375T1163 1141T1243 795V661Q1243 466 1164 315T942 82T612 0H214ZM645 689H337V104H608Q843 104 982 256T1121 669V797Q1121 1048 984 1199T623 1351H337V791H645V689Z" />
+<glyph unicode="&#xd1;" horiz-adv-x="1454" d="M1268 0H1145L308 1246V0H184V1456H308L1146 209V1456H1268V0ZM1067 1809Q1067 1713 1011 1655T872 1596Q796 1596 724 1651T594 1706Q547 1706 516 1675T484 1588L394 1591Q394 1683 448 1743T589 1803Q637 1803
+671 1786T735 1748T795 1710T867 1693Q913 1693 945 1726T978 1815L1067 1809Z" />
+<glyph unicode="&#xd2;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807ZM765 1583H656L430 1863H574L765 1583Z" />
+<glyph unicode="&#xd3;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807ZM811 1863H956L725 1583H620L811 1863Z" />
+<glyph unicode="&#xd4;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807ZM970 1595V1585H859L691 1768L524 1585H415V1598L653 1853H729L970 1595Z" />
+<glyph unicode="&#xd5;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807ZM1032 1821Q1032 1725 976 1667T837 1608Q761 1608 689 1663T559 1718Q512 1718 481 1687T449 1600L359 1603Q359 1695 413 1755T554 1815Q602 1815 636 1798T700 1760T760 1722T832 1705Q878 1705
+910 1738T943 1827L1032 1821Z" />
+<glyph unicode="&#xd6;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q439 -20 282 162T125 655V805Q125 1004 195 1157T395 1393T692 1476T988 1395T1187 1166T1260 823V649ZM1137 807Q1137 1070 1018 1219T692 1368Q489 1368 369 1219T248
+801V649Q248 390 368 239T694 87Q903 87 1020 236T1137 653V807ZM392 1723Q392 1757 414 1780T474 1804T534 1781T557 1723Q557 1690 535 1667T474 1644T414 1667T392 1723ZM830 1722Q830 1756 852 1779T912 1803T972 1780T995 1722Q995 1689 973 1666T912 1643T852
+1666T830 1722Z" />
+<glyph unicode="&#xd7;" horiz-adv-x="1072" d="M93 179L451 544L108 894L187 974L529 624L872 974L951 894L608 544L966 179L887 100L529 464L172 100L93 179Z" />
+<glyph unicode="&#xd8;" horiz-adv-x="1386" d="M1260 649Q1260 448 1191 296T992 62T694 -20Q508 -20 375 77L274 -83H170L307 134Q125 318 125 658V805Q125 1004 195 1157T395 1393T692 1476Q916 1476 1064 1336L1171 1505H1274L1125 1268Q1259 1088 1260 807V649ZM248
+649Q248 388 370 235L1002 1237Q883 1368 692 1368Q489 1368 369 1219T248 801V649ZM1137 807Q1137 1018 1057 1160L434 171Q541 87 694 87Q903 87 1020 236T1137 653V807Z" />
+<glyph unicode="&#xd9;" horiz-adv-x="1346" d="M1187 1456V462Q1186 315 1122 206T942 39T674 -20Q444 -20 306 105T162 453V1456H284V471Q284 287 389 186T674 84T958 186T1063 470V1456H1187ZM756 1571H647L421 1851H565L756 1571Z" />
+<glyph unicode="&#xda;" horiz-adv-x="1346" d="M1187 1456V462Q1186 315 1122 206T942 39T674 -20Q444 -20 306 105T162 453V1456H284V471Q284 287 389 186T674 84T958 186T1063 470V1456H1187ZM802 1851H947L716 1571H611L802 1851Z" />
+<glyph unicode="&#xdb;" horiz-adv-x="1346" d="M1187 1456V462Q1186 315 1122 206T942 39T674 -20Q444 -20 306 105T162 453V1456H284V471Q284 287 389 186T674 84T958 186T1063 470V1456H1187ZM961 1583V1573H850L682 1756L515 1573H406V1586L644 1841H720L961 1583Z" />
+<glyph unicode="&#xdc;" horiz-adv-x="1346" d="M1187 1456V462Q1186 315 1122 206T942 39T674 -20Q444 -20 306 105T162 453V1456H284V471Q284 287 389 186T674 84T958 186T1063 470V1456H1187ZM383 1711Q383 1745 405 1768T465 1792T525 1769T548 1711Q548 1678
+526 1655T465 1632T405 1655T383 1711ZM821 1710Q821 1744 843 1767T903 1791T963 1768T986 1710Q986 1677 964 1654T903 1631T843 1654T821 1710Z" />
+<glyph unicode="&#xdd;" horiz-adv-x="1226" d="M611 662L1056 1456H1198L672 548V0H549V548L24 1456H170L611 662ZM732 1845H877L646 1565H541L732 1845Z" />
+<glyph unicode="&#xde;" horiz-adv-x="1214" d="M303 1456V1152H628Q771 1152 877 1101T1039 956T1096 738Q1096 553 974 441T641 324H303V0H183V1456H303ZM303 1051V425H627Q784 425 880 510T976 736T885 961T642 1051H303Z" />
+<glyph unicode="&#xdf;" horiz-adv-x="1200" d="M271 0H151V1127Q151 1327 246 1435T512 1544Q665 1544 760 1460T856 1237Q856 1179 843 1131T794 1019T746 913T733 824Q733 768 774 716T911 593T1051 454T1096 306Q1096 160 990 70T720 -20Q636 -20 545 4T414
+60L448 161Q485 132 562 106T706 80Q828 80 902 144T976 306Q976 367 932 423T797 547T659 681T613 826Q613 922 676 1034T739 1230Q739 1323 676 1382T522 1442Q275 1442 271 1136V0Z" />
+<glyph unicode="&#xe0;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM653 1256H544L318 1536H462L653 1256Z" />
+<glyph unicode="&#xe1;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM699 1536H844L613 1256H508L699 1536Z" />
+<glyph unicode="&#xe2;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM858 1268V1258H747L579 1441L412 1258H303V1271L541 1526H617L858 1268Z" />
+<glyph unicode="&#xe3;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM920 1494Q920 1398 864 1340T725 1281Q649 1281 577 1336T447 1391Q400 1391 369 1360T337 1273L247 1276Q247 1368 301 1428T442
+1488Q490 1488 524 1471T588 1433T648 1395T720 1378Q766 1378 798 1411T831 1500L920 1494Z" />
+<glyph unicode="&#xe4;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM280 1396Q280 1430 302 1453T362 1477T422 1454T445 1396Q445 1363 423 1340T362 1317T302 1340T280 1396ZM718 1395Q718 1429
+740 1452T800 1476T860 1453T883 1395Q883 1362 861 1339T800 1316T740 1339T718 1395Z" />
+<glyph unicode="&#xe5;" horiz-adv-x="1097" d="M839 0Q821 51 816 151Q753 69 656 25T449 -20Q293 -20 197 67T100 287Q100 445 231 537T598 629H815V752Q815 868 744 934T535 1001Q410 1001 328 937T246 783L126 784Q126 913 246 1007T541 1102Q722 1102 826
+1012T934 759V247Q934 90 967 12V0H839ZM463 86Q583 86 677 144T815 299V537H601Q422 535 321 472T220 297Q220 206 287 146T463 86ZM387 1400Q387 1480 443 1535T580 1590Q659 1590 716 1535T773 1400Q773 1321 718 1267T580 1213T442 1267T387 1400ZM464 1400Q464
+1350 497 1317T580 1284Q629 1284 663 1316T697 1400Q697 1453 662 1486T580 1519Q531 1519 498 1485T464 1400Z" />
+<glyph unicode="&#xe6;" horiz-adv-x="1732" d="M1265 -20Q1126 -20 1027 34T867 186Q807 88 693 34T440 -20Q271 -20 178 64T85 293Q85 450 195 539T511 632H781V720Q781 852 718 926T528 1000Q398 1000 315 935T232 765L113 778Q113 922 229 1012T528 1102Q653
+1102 741 1049T870 889Q930 989 1024 1045T1235 1102Q1431 1102 1543 982T1658 644V538H901V509Q901 308 997 195T1265 81Q1450 81 1589 199L1636 112Q1491 -20 1265 -20ZM458 80Q549 80 642 126T781 236V536H525Q388 536 302 475T207 309L206 289Q206 192 271
+136T458 80ZM1235 1000Q1103 1000 1013 902T904 636H1539V667Q1539 821 1459 910T1235 1000Z" />
+<glyph unicode="&#xe7;" horiz-adv-x="1055" d="M556 81Q681 81 765 151T857 334H972Q967 235 910 154T759 26T556 -20Q343 -20 219 128T94 526V562Q94 722 150 845T310 1035T555 1102Q733 1102 848 996T972 717H857Q849 844 766 922T555 1000Q393 1000 304 883T214
+555V520Q214 313 303 197T556 81ZM589 -9L577 -73Q742 -94 742 -244Q742 -336 663 -390T448 -444L441 -366Q530 -366 586 -334T643 -246Q643 -188 600 -166T467 -136L496 -9H589Z" />
+<glyph unicode="&#xe8;" horiz-adv-x="1058" d="M575 -20Q437 -20 326 48T152 237T90 510V553Q90 709 150 834T319 1030T553 1102Q750 1102 865 968T981 600V533H209V510Q209 326 314 204T580 81Q676 81 749 116T883 228L958 171Q826 -20 575 -20ZM553 1000Q418
+1000 326 901T213 635H862V648Q857 804 773 902T553 1000ZM640 1256H531L305 1536H449L640 1256Z" />
+<glyph unicode="&#xe9;" horiz-adv-x="1058" d="M575 -20Q437 -20 326 48T152 237T90 510V553Q90 709 150 834T319 1030T553 1102Q750 1102 865 968T981 600V533H209V510Q209 326 314 204T580 81Q676 81 749 116T883 228L958 171Q826 -20 575 -20ZM553 1000Q418
+1000 326 901T213 635H862V648Q857 804 773 902T553 1000ZM686 1536H831L600 1256H495L686 1536Z" />
+<glyph unicode="&#xea;" horiz-adv-x="1058" d="M575 -20Q437 -20 326 48T152 237T90 510V553Q90 709 150 834T319 1030T553 1102Q750 1102 865 968T981 600V533H209V510Q209 326 314 204T580 81Q676 81 749 116T883 228L958 171Q826 -20 575 -20ZM553 1000Q418
+1000 326 901T213 635H862V648Q857 804 773 902T553 1000ZM845 1268V1258H734L566 1441L399 1258H290V1271L528 1526H604L845 1268Z" />
+<glyph unicode="&#xeb;" horiz-adv-x="1058" d="M575 -20Q437 -20 326 48T152 237T90 510V553Q90 709 150 834T319 1030T553 1102Q750 1102 865 968T981 600V533H209V510Q209 326 314 204T580 81Q676 81 749 116T883 228L958 171Q826 -20 575 -20ZM553 1000Q418
+1000 326 901T213 635H862V648Q857 804 773 902T553 1000ZM267 1396Q267 1430 289 1453T349 1477T409 1454T432 1396Q432 1363 410 1340T349 1317T289 1340T267 1396ZM705 1395Q705 1429 727 1452T787 1476T847 1453T870 1395Q870 1362 848 1339T787 1316T727 1339T705
+1395Z" />
+<glyph unicode="&#xec;" horiz-adv-x="456" d="M288 0H168V1082H288V0ZM305 1244H196L-30 1524H114L305 1244Z" />
+<glyph unicode="&#xed;" horiz-adv-x="456" d="M288 0H168V1082H288V0ZM350 1780H495L264 1500H159L350 1780Z" />
+<glyph unicode="&#xee;" horiz-adv-x="456" d="M288 0H168V1082H288V0ZM510 1256V1246H399L231 1429L64 1246H-45V1259L193 1514H269L510 1256Z" />
+<glyph unicode="&#xef;" horiz-adv-x="456" d="M288 0H168V1082H288V0ZM-68 1384Q-68 1418 -46 1441T14 1465T74 1442T97 1384Q97 1351 75 1328T14 1305T-46 1328T-68 1384ZM370 1383Q370 1417 392 1440T452 1464T512 1441T535 1383Q535 1350 513 1327T452 1304T392
+1327T370 1383Z" />
+<glyph unicode="&#xf0;" horiz-adv-x="1191" d="M811 1303Q1049 1053 1055 645V535Q1055 376 999 249T842 51T615 -20Q485 -20 379 41T211 216T149 466Q149 695 268 830T587 965Q687 965 773 927T919 821Q877 1072 709 1240L484 1101L433 1174L639 1302Q502 1408
+296 1475L335 1578Q577 1506 744 1366L938 1487L989 1414L811 1303ZM935 625L933 682Q894 765 807 813T609 861Q448 861 359 756T269 466Q269 363 314 274T438 134T619 83Q760 83 847 207T935 543V625Z" />
+<glyph unicode="&#xf1;" horiz-adv-x="1125" d="M270 1082L274 897Q335 997 426 1049T627 1102Q801 1102 886 1004T972 710V0H853V711Q852 856 792 927T598 998Q487 998 402 929T275 741V0H156V1082H270ZM916 1493Q916 1397 860 1339T721 1280Q645 1280 573 1335T443
+1390Q396 1390 365 1359T333 1272L243 1275Q243 1367 297 1427T438 1487Q486 1487 520 1470T584 1432T644 1394T716 1377Q762 1377 794 1410T827 1499L916 1493Z" />
+<glyph unicode="&#xf2;" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681
+891 784T762 943T572 1000Q412 1000 311 875T210 546V524ZM645 1256H536L310 1536H454L645 1256Z" />
+<glyph unicode="&#xf3;" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681
+891 784T762 943T572 1000Q412 1000 311 875T210 546V524ZM691 1536H836L605 1256H500L691 1536Z" />
+<glyph unicode="&#xf4;" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681
+891 784T762 943T572 1000Q412 1000 311 875T210 546V524ZM850 1268V1258H739L571 1441L404 1258H295V1271L533 1526H609L850 1268Z" />
+<glyph unicode="&#xf5;" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681
+891 784T762 943T572 1000Q412 1000 311 875T210 546V524ZM912 1493Q912 1397 856 1339T717 1280Q641 1280 569 1335T439 1390Q392 1390 361 1359T329 1272L239 1275Q239 1367 293 1427T434 1487Q482 1487 516 1470T580 1432T640 1394T712 1377Q758 1377 790 1410T823
+1499L912 1493Z" />
+<glyph unicode="&#xf6;" horiz-adv-x="1147" d="M90 557Q90 713 150 838T321 1032T572 1102Q788 1102 922 951T1056 549V524Q1056 367 996 242T825 48T574 -20Q359 -20 225 131T90 533V557ZM210 524Q210 330 310 206T574 81Q736 81 836 205T937 534V557Q937 681
+891 784T762 943T572 1000Q412 1000 311 875T210 546V524ZM272 1396Q272 1430 294 1453T354 1477T414 1454T437 1396Q437 1363 415 1340T354 1317T294 1340T272 1396ZM710 1395Q710 1429 732 1452T792 1476T852 1453T875 1395Q875 1362 853 1339T792 1316T732 1339T710
+1395Z" />
+<glyph unicode="&#xf7;" horiz-adv-x="1164" d="M1070 644H72V760H1070V644ZM495 1088Q495 1123 517 1147T579 1171T642 1147T665 1088T642 1030T579 1006T517 1029T495 1088ZM495 291Q495 326 517 350T579 374T642 350T665 291T642 233T579 210T517 233T495 291Z" />
+<glyph unicode="&#xf8;" horiz-adv-x="1140" d="M89 557Q89 713 149 838T320 1032T571 1102Q685 1102 785 1054L863 1214H957L857 1010Q951 938 1003 821T1055 557V524Q1055 368 994 242T823 48T573 -20Q465 -20 373 21L294 -140H200L299 63Q199 134 144 253T89
+524V557ZM208 524Q208 414 243 319T348 163L737 957Q662 1000 571 1000Q410 1000 309 875T208 546V524ZM935 557Q935 660 902 751T806 905L419 115Q487 81 573 81Q734 81 834 205T935 534V557Z" />
+<glyph unicode="&#xf9;" horiz-adv-x="1125" d="M852 137Q744 -20 507 -20Q334 -20 244 80T152 378V1082H271V393Q271 84 521 84Q781 84 850 299V1082H970V0H854L852 137ZM647 1256H538L312 1536H456L647 1256Z" />
+<glyph unicode="&#xfa;" horiz-adv-x="1125" d="M852 137Q744 -20 507 -20Q334 -20 244 80T152 378V1082H271V393Q271 84 521 84Q781 84 850 299V1082H970V0H854L852 137ZM693 1536H838L607 1256H502L693 1536Z" />
+<glyph unicode="&#xfb;" horiz-adv-x="1125" d="M852 137Q744 -20 507 -20Q334 -20 244 80T152 378V1082H271V393Q271 84 521 84Q781 84 850 299V1082H970V0H854L852 137ZM852 1268V1258H741L573 1441L406 1258H297V1271L535 1526H611L852 1268Z" />
+<glyph unicode="&#xfc;" horiz-adv-x="1125" d="M852 137Q744 -20 507 -20Q334 -20 244 80T152 378V1082H271V393Q271 84 521 84Q781 84 850 299V1082H970V0H854L852 137ZM274 1396Q274 1430 296 1453T356 1477T416 1454T439 1396Q439 1363 417 1340T356 1317T296
+1340T274 1396ZM712 1395Q712 1429 734 1452T794 1476T854 1453T877 1395Q877 1362 855 1339T794 1316T734 1339T712 1395Z" />
+<glyph unicode="&#xfd;" horiz-adv-x="973" d="M499 172L815 1082H944L482 -184L458 -240Q369 -437 183 -437Q140 -437 91 -423L90 -324L152 -330Q240 -330 294 -287T387 -137L440 9L32 1082H163L499 172ZM633 1536H778L547 1256H442L633 1536Z" />
+<glyph unicode="&#xfe;" horiz-adv-x="1150" d="M1031 530Q1031 277 919 129T618 -20Q397 -20 279 136V-416H159V1536H279V932Q396 1102 616 1102Q808 1102 919 956T1031 548V530ZM911 551Q911 758 826 878T589 998Q479 998 400 945T279 791V270Q321 180 400 131T591
+81Q742 81 826 201T911 551Z" />
+<glyph unicode="&#xff;" horiz-adv-x="973" d="M499 172L815 1082H944L482 -184L458 -240Q369 -437 183 -437Q140 -437 91 -423L90 -324L152 -330Q240 -330 294 -287T387 -137L440 9L32 1082H163L499 172ZM214 1396Q214 1430 236 1453T296 1477T356 1454T379 1396Q379
+1363 357 1340T296 1317T236 1340T214 1396ZM652 1395Q652 1429 674 1452T734 1476T794 1453T817 1395Q817 1362 795 1339T734 1316T674 1339T652 1395Z" />
+<glyph unicode="&#x2013;" horiz-adv-x="1334" d="M1417 686H415V788H1417V686Z" />
+<glyph unicode="&#x2014;" horiz-adv-x="1580" d="M1462 686H126V788H1462V686Z" />
+<glyph unicode="&#x2018;" horiz-adv-x="364" d="M238 1554L310 1503Q220 1385 217 1249V1121H98V1233Q98 1325 135 1410T238 1554Z" />
+<glyph unicode="&#x2019;" horiz-adv-x="364" d="M133 1099L62 1151Q152 1272 155 1405V1536H273V1435Q273 1226 133 1099Z" />
+<glyph unicode="&#x201a;" horiz-adv-x="353" d="M112 -231L41 -179Q124 -68 132 51L133 205H252V104Q252 -104 112 -231Z" />
+<glyph unicode="&#x201c;" horiz-adv-x="612" d="M239 1554L311 1503Q221 1385 218 1249V1121H99V1233Q99 1325 136 1410T239 1554ZM490 1554L562 1503Q472 1385 469 1249V1121H350V1233Q350 1325 387 1410T490 1554Z" />
+<glyph unicode="&#x201d;" horiz-adv-x="617" d="M139 1099L68 1151Q158 1272 161 1405V1536H279V1435Q279 1226 139 1099ZM383 1099L312 1151Q402 1272 405 1405V1536H523V1435Q523 1226 383 1099Z" />
+<glyph unicode="&#x201e;" horiz-adv-x="593" d="M112 -240L41 -188Q130 -65 133 73V236H252V106Q252 -111 112 -240ZM346 -240L275 -188Q363 -66 366 73V236H486V106Q486 -111 346 -240Z" />
+<glyph unicode="&#x2022;" horiz-adv-x="662" d="M146 752Q146 831 197 881T331 931Q413 931 464 883T517 757V717Q517 636 466 588T332 540Q248 540 197 589T146 719V752Z" />
+<glyph unicode="&#x2039;" horiz-adv-x="609" d="M232 555L508 167H403L108 546V565L403 944H508L232 555Z" />
+<glyph unicode="&#x203a;" horiz-adv-x="609" d="M203 944L498 560V541L203 162H97L373 550L97 944H203Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_300.ttf b/site/assets/fonts/Roboto_300.ttf
new file mode 100644
index 00000000..a22188ee
--- /dev/null
+++ b/site/assets/fonts/Roboto_300.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_300.woff b/site/assets/fonts/Roboto_300.woff
new file mode 100644
index 00000000..96663f07
--- /dev/null
+++ b/site/assets/fonts/Roboto_300.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_300.woff2 b/site/assets/fonts/Roboto_300.woff2
new file mode 100644
index 00000000..52c5845a
--- /dev/null
+++ b/site/assets/fonts/Roboto_300.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_400.eot b/site/assets/fonts/Roboto_400.eot
new file mode 100644
index 00000000..a0780d6e
--- /dev/null
+++ b/site/assets/fonts/Roboto_400.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_400.svg b/site/assets/fonts/Roboto_400.svg
new file mode 100644
index 00000000..627f5a36
--- /dev/null
+++ b/site/assets/fonts/Roboto_400.svg
@@ -0,0 +1,308 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="Roboto" horiz-adv-x="1158" ><font-face
+ font-family="Roboto"
+ units-per-em="2048"
+ panose-1="2 0 0 0 0 0 0 0 0 0"
+ ascent="1900"
+ descent="-500"
+ alphabetic="0" />
+<glyph unicode=" " horiz-adv-x="507" />
+<glyph unicode="!" horiz-adv-x="527" d="M347 411H180L167 1456H361L347 411ZM160 93Q160 138 187 168T269 199T351 169T379 93T351 19T269 -11T188 18T160 93Z" />
+<glyph unicode="&quot;" horiz-adv-x="655" d="M277 1400L247 1042H136L137 1536H277V1400ZM547 1400L517 1042H406L407 1536H547V1400Z" />
+<glyph unicode="#" horiz-adv-x="1261" d="M765 410H501L421 0H278L358 410H119V547H384L453 901H195V1040H480L562 1456H705L623 1040H887L969 1456H1113L1031 1040H1235V901H1004L935 547H1160V410H909L829 0H685L765 410ZM527 547H791L860 901H596L527 547Z" />
+<glyph unicode="$" horiz-adv-x="1150" d="M856 375Q856 467 792 530T574 644Q361 709 264 813T166 1079Q166 1243 261 1348T524 1473V1692H673V1472Q841 1449 934 1331T1028 1008H844Q844 1149 777 1232T596 1315Q477 1315 414 1254T351 1082Q351 980 417 920T636
+810T874 701T1000 562T1041 377Q1041 208 940 105T655 -17V-208H507V-17Q321 0 216 115T110 429H295Q295 290 368 215T575 140Q706 140 781 203T856 375Z" />
+<glyph unicode="%" horiz-adv-x="1500" d="M105 1176Q105 1307 188 1392T403 1477Q536 1477 618 1392T701 1170V1099Q701 967 618 884T405 800Q275 800 190 883T105 1106V1176ZM243 1099Q243 1021 287 971T405 920Q476 920 519 969T563 1103V1176Q563 1254 520
+1305T403 1356T286 1305T243 1172V1099ZM814 357Q814 488 897 572T1112 657T1327 573T1411 350V279Q1411 148 1328 64T1114 -21T899 62T814 285V357ZM952 279Q952 200 996 150T1114 99Q1186 99 1229 148T1272 283V357Q1272 436 1229 486T1112 536Q1041 536 997
+487T952 353V279ZM447 110L342 176L1053 1314L1158 1248L447 110Z" />
+<glyph unicode="&amp;" horiz-adv-x="1273" d="M101 391Q101 496 159 584T383 789Q286 907 253 979T220 1122Q220 1288 318 1382T584 1476Q734 1476 832 1389T930 1168Q930 1080 886 1006T730 849L623 770L947 383Q1015 513 1015 672H1182Q1182 417 1059 249L1267
+0H1045L948 115Q874 49 775 15T572 -20Q359 -20 230 93T101 391ZM572 131Q719 131 841 243L486 668L453 644Q286 521 286 391Q286 273 362 202T572 131ZM405 1128Q405 1032 523 888L641 971Q709 1019 734 1062T759 1168Q759 1235 709 1279T583 1324Q501 1324 453
+1269T405 1128Z" />
+<glyph unicode="&apos;" horiz-adv-x="357" d="M253 1425L232 1057H103L104 1536H253V1425Z" />
+<glyph unicode="(" horiz-adv-x="700" d="M133 591Q133 817 193 1025T374 1403T623 1643L661 1521Q515 1409 422 1179T319 664L318 579Q318 193 459 -91Q544 -261 661 -357L623 -470Q490 -396 369 -222Q133 118 133 591Z" />
+<glyph unicode=")" horiz-adv-x="712" d="M567 581Q567 358 509 154T330 -224T77 -470L38 -357Q192 -239 285 9T381 561V593Q381 803 337 983T215 1307T38 1530L77 1643Q209 1570 328 1399T507 1022T567 581Z" />
+<glyph unicode="*" horiz-adv-x="882" d="M330 983L28 1073L74 1224L376 1112L367 1456H520L510 1107L807 1217L853 1065L546 974L744 703L620 609L434 897L254 616L129 707L330 983Z" />
+<glyph unicode="+" horiz-adv-x="1161" d="M670 781H1076V606H670V146H484V606H78V781H484V1206H670V781Z" />
+<glyph unicode="," horiz-adv-x="402" d="M134 -290L29 -218Q123 -87 127 52V219H308V74Q308 -27 259 -128T134 -290Z" />
+<glyph unicode="-" horiz-adv-x="565" d="M525 543H37V694H525V543Z" />
+<glyph unicode="." horiz-adv-x="539" d="M144 97Q144 145 172 177T258 209T344 177T374 97Q374 51 345 20T258 -11T173 20T144 97Z" />
+<glyph unicode="/" horiz-adv-x="844" d="M177 -125H18L626 1456H784L177 -125Z" />
+<glyph unicode="0" horiz-adv-x="1150" d="M1034 621Q1034 296 923 138T576 -20Q343 -20 231 134T115 596V843Q115 1164 226 1320T574 1476Q809 1476 920 1326T1034 861V621ZM849 874Q849 1109 783 1216T574 1324Q432 1324 367 1217T300 888V592Q300 356 368 244T576
+131Q713 131 779 237T849 571V874Z" />
+<glyph unicode="1" horiz-adv-x="1150" d="M729 0H543V1233L170 1096V1264L700 1463H729V0Z" />
+<glyph unicode="2" horiz-adv-x="1150" d="M1075 0H121V133L625 693Q737 820 779 899T822 1064Q822 1178 753 1251T569 1324Q431 1324 355 1246T278 1027H93Q93 1228 222 1352T569 1476Q772 1476 890 1370T1008 1086Q1008 871 734 574L344 151H1075V0Z" />
+<glyph unicode="3" horiz-adv-x="1150" d="M390 818H529Q660 820 735 887T810 1068Q810 1324 555 1324Q435 1324 364 1256T292 1074H107Q107 1247 233 1361T555 1476Q761 1476 878 1367T995 1064Q995 969 934 880T766 747Q886 709 951 621T1017 406Q1017 210 889
+95T556 -20T223 91T94 384H280Q280 269 355 200T556 131Q690 131 761 201T832 402Q832 529 754 597T529 667H390V818Z" />
+<glyph unicode="4" horiz-adv-x="1150" d="M902 489H1104V338H902V0H716V338H53V447L705 1456H902V489ZM263 489H716V1203L694 1163L263 489Z" />
+<glyph unicode="5" horiz-adv-x="1150" d="M206 730L280 1456H1026V1285H437L393 888Q500 951 636 951Q835 951 952 820T1069 464Q1069 239 948 110T608 -20Q415 -20 293 87T154 383H329Q346 258 418 195T608 131Q737 131 810 219T884 462Q884 608 805 696T593
+785Q472 785 403 732L354 692L206 730Z" />
+<glyph unicode="6" horiz-adv-x="1150" d="M847 1457V1300H813Q597 1296 469 1172T321 823Q436 955 635 955Q825 955 938 821T1052 475Q1052 250 930 115T601 -20Q392 -20 262 140T132 554V625Q132 1027 303 1239T814 1457H847ZM604 801Q509 801 429 744T318 601V533Q318
+353 399 243T601 133Q726 133 797 225T869 466Q869 616 797 708T604 801Z" />
+<glyph unicode="7" horiz-adv-x="1150" d="M1061 1352L458 0H264L865 1304H77V1456H1061V1352Z" />
+<glyph unicode="8" horiz-adv-x="1150" d="M1004 1076Q1004 967 947 882T791 749Q905 700 971 606T1038 393Q1038 204 911 92T575 -20Q365 -20 239 92T112 393Q112 511 176 606T355 750Q258 798 202 883T146 1076Q146 1260 264 1368T575 1476Q767 1476 885 1368T1004
+1076ZM853 397Q853 519 776 596T573 673T373 597T297 397T370 202T575 131Q705 131 779 202T853 397ZM575 1324Q466 1324 399 1257T331 1073Q331 962 397 894T575 825T752 893T819 1073T750 1254T575 1324Z" />
+<glyph unicode="9" horiz-adv-x="1150" d="M830 640Q772 571 692 529T515 487Q389 487 296 549T151 723T100 972Q100 1118 155 1235T313 1414T551 1476Q767 1476 891 1315T1016 874V820Q1016 395 848 200T341 -1H305V155H344Q573 159 696 274T830 640ZM545 640Q638
+640 716 697T831 838V912Q831 1094 752 1208T552 1322Q430 1322 356 1229T282 982Q282 833 353 737T545 640Z" />
+<glyph unicode=":" horiz-adv-x="496" d="M390 97Q390 145 418 177T504 209T590 177T620 97Q620 51 591 20T504 -11T419 20T390 97ZM135 980Q135 1028 163 1060T249 1092T335 1060T365 980Q365 934 336 903T249 872T164 903T135 980Z" />
+<glyph unicode=";" horiz-adv-x="433" d="M111 980Q111 1028 139 1060T225 1092T311 1060T341 980Q341 934 312 903T225 872T140 903T111 980ZM146 -290L41 -218Q135 -87 139 52V219H320V74Q320 -27 271 -128T146 -290Z" />
+<glyph unicode="&lt;" horiz-adv-x="1041" d="M264 644L890 391V195L72 574V720L890 1098V902L264 644Z" />
+<glyph unicode="=" horiz-adv-x="1124" d="M986 814H152V975H986V814ZM986 399H152V559H986V399Z" />
+<glyph unicode="&gt;" horiz-adv-x="1070" d="M795 650L134 909V1099L988 721V575L134 196V388L795 650Z" />
+<glyph unicode="?" horiz-adv-x="967" d="M357 410Q359 529 384 598T486 751L617 886Q701 981 701 1090Q701 1195 646 1254T486 1314Q384 1314 322 1260T260 1115H75Q77 1277 190 1376T486 1476Q675 1476 780 1375T886 1096Q886 921 724 751L615 643Q542 562 542
+410H357ZM349 93Q349 138 376 168T458 199T540 169T568 93T540 19T458 -11T377 18T349 93Z" />
+<glyph unicode="@" horiz-adv-x="1839" d="M1738 502Q1726 260 1618 120T1329 -20Q1142 -20 1089 148Q1035 63 966 22T822 -20Q680 -20 607 96T553 417Q568 582 628 711T784 915T985 989Q1066 989 1130 968T1274 883L1222 329Q1203 98 1350 98Q1463 98 1533 210T1609
+502Q1628 891 1465 1095T967 1299Q766 1299 610 1200T364 912T263 478Q251 230 323 48T542 -231T899 -328Q989 -328 1079 -306T1230 -249L1267 -364Q1205 -403 1103 -428T895 -453Q645 -453 465 -341T196 -17T118 478Q130 753 241 972T542 1311T971 1431Q1220 1431
+1398 1319T1663 996T1738 502ZM712 417Q698 275 738 199T867 123Q927 123 982 174T1074 320L1075 329L1121 832Q1065 861 1001 861Q884 861 808 742T712 417Z" />
+<glyph unicode="A" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538Z" />
+<glyph unicode="B" horiz-adv-x="1275" d="M169 0V1456H645Q882 1456 1001 1358T1121 1068Q1121 966 1063 888T905 766Q1023 733 1091 641T1160 420Q1160 224 1033 112T674 0H169ZM361 681V157H678Q812 157 889 226T967 418Q967 681 681 681H361ZM361 835H651Q777
+835 852 898T928 1069Q928 1189 858 1243T645 1298H361V835Z" />
+<glyph unicode="C" horiz-adv-x="1333" d="M1240 462Q1213 231 1070 106T688 -20Q430 -20 275 165T119 660V800Q119 1003 191 1157T397 1393T705 1476Q937 1476 1077 1347T1240 988H1047Q1022 1162 939 1240T705 1318Q521 1318 417 1182T312 795V654Q312 417 411
+277T688 137Q848 137 933 209T1047 462H1240Z" />
+<glyph unicode="D" horiz-adv-x="1343" d="M169 0V1456H580Q770 1456 916 1372T1141 1133T1222 777V684Q1222 478 1143 323T916 85T572 0H169ZM361 1298V157H563Q785 157 908 295T1032 688V773Q1032 1021 916 1158T585 1298H361Z" />
+<glyph unicode="E" horiz-adv-x="1164" d="M992 673H361V157H1094V0H169V1456H1084V1298H361V830H992V673Z" />
+<glyph unicode="F" horiz-adv-x="1132" d="M972 643H361V0H169V1456H1071V1298H361V800H972V643Z" />
+<glyph unicode="G" horiz-adv-x="1395" d="M1244 191Q1170 85 1038 33T729 -20Q551 -20 413 63T200 301T122 658V785Q122 1114 275 1295T707 1476Q935 1476 1074 1360T1244 1029H1052Q998 1318 708 1318Q515 1318 416 1183T315 790V671Q315 426 427 282T730 137Q838
+137 919 161T1053 242V569H716V725H1244V191Z" />
+<glyph unicode="H" horiz-adv-x="1460" d="M1288 0H1095V673H361V0H169V1456H361V830H1095V1456H1288V0Z" />
+<glyph unicode="I" horiz-adv-x="557" d="M375 0H183V1456H375V0Z" />
+<glyph unicode="J" horiz-adv-x="1130" d="M779 1456H972V425Q972 216 847 98T512 -20Q295 -20 174 91T53 402H245Q245 277 313 207T512 137Q631 137 704 212T779 422V1456Z" />
+<glyph unicode="K" horiz-adv-x="1284" d="M539 677L361 492V0H169V1456H361V736L1008 1456H1240L667 813L1285 0H1055L539 677Z" />
+<glyph unicode="L" horiz-adv-x="1102" d="M362 157H1052V0H169V1456H362V157Z" />
+<glyph unicode="M" horiz-adv-x="1788" d="M417 1456L893 268L1369 1456H1618V0H1426V567L1444 1179L966 0H819L342 1176L361 567V0H169V1456H417Z" />
+<glyph unicode="N" horiz-adv-x="1460" d="M1288 0H1095L362 1122V0H169V1456H362L1097 329V1456H1288V0Z" />
+<glyph unicode="O" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521 1311
+417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775Z" />
+<glyph unicode="P" horiz-adv-x="1292" d="M361 570V0H169V1456H706Q945 1456 1080 1334T1216 1011Q1216 799 1084 685T704 570H361ZM361 727H706Q860 727 942 799T1024 1009Q1024 1139 942 1217T717 1298H361V727Z" />
+<glyph unicode="Q" horiz-adv-x="1408" d="M1281 681Q1281 470 1214 318T1026 79L1286 -125L1155 -246L848 -2Q776 -20 696 -20Q524 -20 391 64T185 305T109 668V773Q109 983 182 1144T388 1390T694 1476Q870 1476 1003 1391T1209 1147T1281 774V681ZM1089 775Q1089
+1032 987 1171T694 1311Q513 1311 409 1173T301 788V681Q301 431 405 287T696 143T984 278T1089 667V775Z" />
+<glyph unicode="R" horiz-adv-x="1261" d="M703 589H361V0H168V1456H650Q896 1456 1028 1344T1161 1018Q1161 882 1088 781T883 630L1225 12V0H1019L703 589ZM361 746H656Q799 746 883 820T968 1018Q968 1153 888 1225T655 1298H361V746Z" />
+<glyph unicode="S" horiz-adv-x="1215" d="M598 649Q351 720 239 823T126 1079Q126 1251 263 1363T621 1476Q771 1476 888 1418T1070 1258T1135 1035H942Q942 1167 858 1242T621 1318Q479 1318 400 1256T320 1082Q320 993 395 932T652 819T936 707T1088 563T1138
+370Q1138 193 1000 87T631 -20Q481 -20 351 37T151 195T80 422H273Q273 290 370 214T631 137Q783 137 864 199T945 368T870 533T598 649Z" />
+<glyph unicode="T" horiz-adv-x="1222" d="M1175 1298H707V0H516V1298H49V1456H1175V1298Z" />
+<glyph unicode="U" horiz-adv-x="1328" d="M1194 1456V466Q1193 260 1065 129T716 -18L665 -20Q426 -20 284 109T140 464V1456H330V470Q330 312 417 225T665 137Q828 137 914 224T1001 469V1456H1194Z" />
+<glyph unicode="V" horiz-adv-x="1303" d="M651 255L1067 1456H1277L737 0H567L28 1456H237L651 255Z" />
+<glyph unicode="W" horiz-adv-x="1817" d="M483 459L511 267L552 440L840 1456H1002L1283 440L1323 264L1354 460L1580 1456H1773L1420 0H1245L945 1061L922 1172L899 1061L588 0H413L61 1456H253L483 459Z" />
+<glyph unicode="X" horiz-adv-x="1284" d="M644 898L993 1456H1219L759 734L1230 0H1002L644 568L284 0H57L529 734L68 1456H293L644 898Z" />
+<glyph unicode="Y" horiz-adv-x="1230" d="M613 725L993 1456H1211L709 543V0H517V543L15 1456H235L613 725Z" />
+<glyph unicode="Z" horiz-adv-x="1226" d="M313 157H1146V0H86V144L884 1298H99V1456H1114V1315L313 157Z" />
+<glyph unicode="[" horiz-adv-x="543" d="M523 1512H332V-160H523V-312H146V1664H523V1512Z" />
+<glyph unicode="\" horiz-adv-x="840" d="M40 1456H216L824 -125H648L40 1456Z" />
+<glyph unicode="]" horiz-adv-x="543" d="M9 1664H387V-312H9V-160H202V1512H9V1664Z" />
+<glyph unicode="^" horiz-adv-x="856" d="M426 1211L236 729H64L363 1456H490L788 729H617L426 1211Z" />
+<glyph unicode="_" horiz-adv-x="924" d="M920 -151H4V0H920V-151Z" />
+<glyph unicode="`" horiz-adv-x="633" d="M474 1242H315L57 1536H280L474 1242Z" />
+<glyph unicode="a" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141Z" />
+<glyph unicode="b" horiz-adv-x="1149" d="M1056 529Q1056 281 942 131T636 -20Q431 -20 319 125L310 0H140V1536H325V963Q437 1102 634 1102T943 953T1056 545V529ZM871 550Q871 739 798 842T588 945Q405 945 325 775V307Q410 137 590 137Q723 137 797 240T871 550Z" />
+<glyph unicode="c" horiz-adv-x="1072" d="M574 131Q673 131 747 191T829 341H1004Q999 248 940 164T783 30T574 -20Q353 -20 223 127T92 531V562Q92 720 150 843T316 1034T573 1102Q755 1102 875 993T1004 710H829Q821 815 750 882T573 950Q432 950 355 849T277
+555V520Q277 333 354 232T574 131Z" />
+<glyph unicode="d" horiz-adv-x="1155" d="M95 550Q95 799 213 950T522 1102Q712 1102 823 972V1536H1008V0H838L829 116Q718 -20 520 -20Q332 -20 214 134T95 536V550ZM280 529Q280 345 356 241T566 137Q742 137 823 295V792Q740 945 568 945Q432 945 356 840T280 529Z" />
+<glyph unicode="e" horiz-adv-x="1085" d="M589 -20Q369 -20 231 124T93 511V545Q93 706 154 832T326 1030T566 1102Q777 1102 894 963T1011 565V488H278Q282 328 371 230T599 131Q697 131 765 171T884 277L997 189Q861 -20 589 -20ZM566 950Q454 950 378 869T284
+640H826V654Q818 795 750 872T566 950Z" />
+<glyph unicode="f" horiz-adv-x="711" d="M231 0V939H60V1082H231V1193Q231 1367 324 1462T587 1557Q651 1557 714 1540L704 1390Q657 1399 604 1399Q514 1399 465 1347T416 1196V1082H647V939H416V0H231Z" />
+<glyph unicode="g" horiz-adv-x="1149" d="M96 550Q96 803 213 952T523 1102Q721 1102 832 962L841 1082H1010V26Q1010 -184 886 -305T551 -426Q434 -426 322 -376T151 -239L247 -128Q366 -275 538 -275Q673 -275 748 -199T824 15V108Q713 -20 521 -20Q331 -20
+214 133T96 550ZM282 529Q282 346 357 242T567 137Q742 137 824 296V790Q739 945 569 945Q434 945 358 840T282 529Z" />
+<glyph unicode="h" horiz-adv-x="1128" d="M325 951Q448 1102 645 1102Q988 1102 991 715V0H806V716Q805 833 753 889T589 945Q499 945 431 897T325 771V0H140V1536H325V951Z" />
+<glyph unicode="i" horiz-adv-x="497" d="M341 0H156V1082H341V0ZM141 1369Q141 1414 168 1445T250 1476T332 1445T360 1369T332 1294T250 1264T169 1294T141 1369Z" />
+<glyph unicode="j" horiz-adv-x="489" d="M331 1082V-125Q331 -437 48 -437Q-13 -437 -65 -419V-271Q-33 -279 19 -279Q81 -279 113 -246T146 -129V1082H331ZM127 1369Q127 1413 154 1444T235 1476Q289 1476 317 1445T345 1369T317 1294T235 1264T154 1294T127 1369Z" />
+<glyph unicode="k" horiz-adv-x="1038" d="M442 501L326 380V0H141V1536H326V607L425 726L762 1082H987L566 630L1036 0H819L442 501Z" />
+<glyph unicode="l" horiz-adv-x="497" d="M341 0H156V1536H341V0Z" />
+<glyph unicode="m" horiz-adv-x="1795" d="M314 1082L319 962Q438 1102 640 1102Q867 1102 949 928Q1003 1006 1089 1054T1294 1102Q1650 1102 1656 725V0H1471V714Q1471 830 1418 887T1240 945Q1137 945 1069 884T990 718V0H804V709Q804 945 573 945Q391 945
+324 790V0H139V1082H314Z" />
+<glyph unicode="n" horiz-adv-x="1130" d="M315 1082L321 946Q445 1102 645 1102Q988 1102 991 715V0H806V716Q805 833 753 889T589 945Q499 945 431 897T325 771V0H140V1082H315Z" />
+<glyph unicode="o" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729 807
+839T582 950Q445 950 361 841T277 529Z" />
+<glyph unicode="p" horiz-adv-x="1149" d="M1054 529Q1054 282 941 131T635 -20Q438 -20 325 105V-416H140V1082H309L318 962Q431 1102 632 1102Q827 1102 940 955T1054 546V529ZM869 550Q869 733 791 839T577 945Q409 945 325 796V279Q408 131 579 131Q712 131
+790 236T869 550Z" />
+<glyph unicode="q" horiz-adv-x="1164" d="M95 550Q95 805 212 953T526 1102Q718 1102 829 973L837 1082H1007V-416H822V100Q710 -20 524 -20Q328 -20 212 132T95 537V550ZM280 529Q280 343 358 237T570 131Q735 131 822 277V807Q734 950 572 950Q438 950 359
+844T280 529Z" />
+<glyph unicode="r" horiz-adv-x="693" d="M663 916Q621 923 572 923Q390 923 325 768V0H140V1082H320L323 957Q414 1102 581 1102Q635 1102 663 1088V916Z" />
+<glyph unicode="s" horiz-adv-x="1056" d="M770 287Q770 362 714 403T517 475T294 547T172 647T132 785Q132 918 244 1010T532 1102Q716 1102 830 1007T945 764H759Q759 840 695 895T532 950Q431 950 374 906T317 791Q317 724 370 690T561 625T786 551T913 448T955
+300Q955 155 839 68T538 -20Q408 -20 308 26T152 154T95 333H280Q285 240 354 186T538 131Q643 131 706 173T770 287Z" />
+<glyph unicode="t" horiz-adv-x="669" d="M391 1344V1082H593V939H391V268Q391 203 418 171T510 138Q542 138 598 150V0Q525 -20 456 -20Q332 -20 269 55T206 268V939H9V1082H206V1344H391Z" />
+<glyph unicode="u" horiz-adv-x="1129" d="M808 107Q700 -20 491 -20Q318 -20 228 80T136 378V1082H321V383Q321 137 521 137Q733 137 803 295V1082H988V0H812L808 107Z" />
+<glyph unicode="v" horiz-adv-x="992" d="M497 251L765 1082H954L566 0H425L33 1082H222L497 251Z" />
+<glyph unicode="w" horiz-adv-x="1539" d="M1098 255L1306 1082H1491L1176 0H1026L763 820L507 0H357L43 1082H227L440 272L692 1082H841L1098 255Z" />
+<glyph unicode="x" horiz-adv-x="1015" d="M503 687L743 1082H959L605 547L970 0H756L506 405L256 0H41L406 547L52 1082H266L503 687Z" />
+<glyph unicode="y" horiz-adv-x="969" d="M494 271L746 1082H944L509 -167Q408 -437 188 -437L153 -434L84 -421V-271L134 -275Q228 -275 280 -237T367 -98L408 12L22 1082H224L494 271Z" />
+<glyph unicode="z" horiz-adv-x="1015" d="M314 151H947V0H88V136L685 929H97V1082H917V951L314 151Z" />
+<glyph unicode="{" horiz-adv-x="693" d="M632 -366Q455 -316 366 -202T276 101V300Q276 543 64 543V688Q276 688 276 930V1138Q278 1321 365 1433T632 1597L670 1482Q461 1415 461 1133V931Q461 704 294 615Q461 525 461 296V90Q464 -185 670 -251L632 -366Z" />
+<glyph unicode="|" horiz-adv-x="499" d="M324 -270H175V1456H324V-270Z" />
+<glyph unicode="}" horiz-adv-x="693" d="M19 -251Q222 -186 229 80V300Q229 531 410 615Q229 697 229 930V1133Q229 1415 20 1482L58 1597Q235 1547 324 1435T414 1137V927Q414 688 626 688V543Q414 543 414 300V98Q414 -90 324 -203T58 -366L19 -251Z" />
+<glyph unicode="~" horiz-adv-x="1393" d="M1263 777Q1263 619 1170 511T939 402Q867 402 803 428T655 529T533 621T454 639Q376 639 334 586T292 438L131 436Q131 596 223 699T454 802Q530 802 600 770T758 658T910 567L939 565Q1015 565 1062 623T1110 776L1263 777Z" />
+<glyph unicode="&#xa0;" horiz-adv-x="507" />
+<glyph unicode="&#xa1;" horiz-adv-x="499" d="M170 684H338L351 -360H157L170 684ZM358 996Q358 951 331 920T249 889T167 920T139 996T167 1071T249 1101T330 1071T358 996Z" />
+<glyph unicode="&#xa2;" horiz-adv-x="1120" d="M586 131Q686 131 760 191T842 341H1017Q1011 215 912 115T669 -12V-245H484V-11Q305 23 205 165T105 527V562Q105 774 206 916T484 1092V1318H669V1095Q819 1072 915 966T1017 710H842Q834 815 763 882T586 950Q445
+950 368 849T290 555V520Q290 333 367 232T586 131Z" />
+<glyph unicode="&#xa3;" horiz-adv-x="1190" d="M449 622L457 402Q457 248 395 157H1128L1127 0H95V157H172Q212 166 237 231T264 393V401L256 622H91V779H251L242 1039Q242 1238 364 1357T687 1476Q877 1476 988 1370T1099 1087H908Q908 1194 845 1256T670 1318Q565
+1318 500 1241T435 1039L444 779H763V622H449Z" />
+<glyph unicode="&#xa4;" horiz-adv-x="1460" d="M1103 112Q944 -20 735 -20Q528 -20 369 110L235 -26L105 109L244 250Q140 406 140 608Q140 814 252 977L105 1128L235 1264L382 1114Q540 1234 735 1234Q931 1234 1090 1113L1239 1265L1371 1128L1220 974Q1330
+811 1330 608Q1330 412 1228 253L1371 109L1239 -27L1103 112ZM311 608Q311 485 368 379T524 212T735 151T946 212T1100 379T1157 608Q1157 730 1101 835T946 1001T735 1062Q622 1062 524 1002T369 836T311 608Z" />
+<glyph unicode="&#xa5;" horiz-adv-x="1075" d="M539 793L847 1456H1060L693 736H954V611H630V446H954V322H630V0H437V322H119V446H437V611H119V736H382L15 1456H231L539 793Z" />
+<glyph unicode="&#xa6;" horiz-adv-x="491" d="M147 -270V521H333V-270H147ZM333 698H147V1456H333V698Z" />
+<glyph unicode="&#xa7;" horiz-adv-x="1256" d="M1145 431Q1145 242 959 157Q1028 108 1064 40T1100 -128Q1100 -296 970 -395T612 -495Q500 -495 400 -467T229 -382Q90 -269 90 -64L276 -62Q276 -192 366 -267T612 -343Q748 -343 831 -285T914 -130Q914 -41 843
+11T563 126Q381 174 285 229T143 362T96 551Q96 737 278 825Q212 874 177 942T141 1110Q141 1276 274 1376T630 1476Q862 1476 992 1363T1122 1045H937Q937 1170 853 1247T630 1325Q488 1325 408 1268T327 1112Q327 1043 355 1003T450 931T661 858T889 782T1030
+698T1116 585T1145 431ZM602 691Q512 715 437 742Q357 723 320 673T282 553Q282 483 309 443T402 370T611 296T797 238Q875 258 917 308T959 428Q959 516 890 570T602 691Z" />
+<glyph unicode="&#xa8;" horiz-adv-x="856" d="M101 1371Q101 1416 128 1446T210 1477T292 1447T320 1371T292 1296T210 1266T129 1296T101 1371ZM531 1369Q531 1414 558 1445T640 1476T722 1445T750 1369T722 1294T640 1264T559 1294T531 1369Z" />
+<glyph unicode="&#xa9;" horiz-adv-x="1609" d="M1119 597Q1119 444 1033 364T788 283Q631 283 537 388T442 676V786Q442 962 537 1067T788 1173Q948 1173 1034 1091T1120 860H974Q974 959 927 1001T788 1044Q694 1044 640 975T586 783V670Q586 550 640 481T788
+412Q880 412 926 454T973 597H1119ZM206 729Q206 557 286 411T503 181T801 98T1098 181T1315 410T1395 729Q1395 899 1316 1044T1100 1272T801 1356Q641 1356 503 1274T286 1045T206 729ZM91 729Q91 931 184 1104T443 1376T801 1476T1158 1377T1416 1104T1510 729Q1510
+532 1420 360T1165 84T801 -21Q604 -21 439 82T182 358T91 729Z" />
+<glyph unicode="&#xaa;" horiz-adv-x="915" d="M618 705Q606 739 600 777Q524 691 396 691Q277 691 212 753T147 918Q147 1029 230 1089T486 1149H594V1201Q594 1336 470 1336Q401 1336 362 1309T322 1231L161 1243Q161 1346 247 1411T470 1476Q603 1476 680 1405T757
+1199V883Q757 786 783 705H618ZM435 828Q478 828 522 848T594 895V1037H482Q399 1036 355 1005T310 922Q310 828 435 828Z" />
+<glyph unicode="&#xab;" horiz-adv-x="961" d="M536 804L794 407H653L358 795V814L653 1203H794L536 804ZM610 548L868 151H727L432 539V558L727 947H868L610 548Z" />
+<glyph unicode="&#xac;" horiz-adv-x="1134" d="M958 375H772V639H127V800H958V375Z" />
+<glyph unicode="&#xad;" horiz-adv-x="565" d="M525 543H37V694H525V543Z" />
+<glyph unicode="&#xae;" horiz-adv-x="1610" d="M90 729Q90 931 183 1104T442 1376T800 1476T1157 1377T1415 1104T1509 729Q1509 532 1419 360T1164 84T800 -21Q603 -21 438 82T181 358T90 729ZM205 729Q205 557 285 411T502 181T800 98Q961 98 1099 182T1315
+412T1394 729Q1394 900 1316 1044T1099 1272T800 1356Q640 1356 502 1274T285 1045T205 729ZM653 654V316H512V1165H788Q941 1165 1025 1100T1110 909Q1110 786 982 721Q1104 671 1105 517V456Q1105 370 1122 332V316H977Q963 352 963 444T960 554Q944 650 829
+654H653ZM653 782H809Q881 784 925 817T969 904Q969 977 930 1007T791 1038H653V782Z" />
+<glyph unicode="&#xaf;" horiz-adv-x="938" d="M814 1302H142V1445H814V1302Z" />
+<glyph unicode="&#xb0;" horiz-adv-x="765" d="M130 1216Q130 1320 204 1398T385 1476Q489 1476 562 1399T636 1216Q636 1110 563 1035T385 960Q280 960 205 1035T130 1216ZM385 1088Q439 1088 476 1123T513 1216Q513 1274 476 1311T385 1349Q330 1349 293 1310T255
+1216T292 1125T385 1088Z" />
+<glyph unicode="&#xb1;" horiz-adv-x="1094" d="M649 854H1013V703H649V289H482V703H97V854H482V1267H649V854ZM970 0H135V152H970V0Z" />
+<glyph unicode="&#xb2;" horiz-adv-x="751" d="M683 667H84V775L384 1057Q493 1159 493 1228Q493 1277 461 1307T369 1338Q294 1338 259 1300T223 1205H66Q66 1319 149 1393T365 1467T574 1404T651 1230Q651 1126 544 1019L460 940L284 795H683V667Z" />
+<glyph unicode="&#xb3;" horiz-adv-x="751" d="M265 1126H349Q423 1126 459 1156T495 1235Q495 1280 464 1309T362 1338Q305 1338 268 1313T230 1246H73Q73 1344 154 1405T360 1467Q497 1467 575 1407T653 1242Q653 1187 618 1142T517 1071Q666 1030 666 887Q666
+781 581 719T360 656Q228 656 145 719T62 889H220Q220 844 259 814T366 784Q436 784 472 814T509 895Q509 1008 353 1010H265V1126Z" />
+<glyph unicode="&#xb4;" horiz-adv-x="642" d="M316 1536H540L272 1242H123L316 1536Z" />
+<glyph unicode="&#xb5;" horiz-adv-x="1160" d="M339 1082V449Q340 286 391 208T559 130Q758 130 820 282V1082H1006V0H839L830 115Q737 -20 567 -20Q420 -20 339 53V-416H154V1082H339Z" />
+<glyph unicode="&#xb6;" horiz-adv-x="1001" d="M646 0V520H562Q332 520 200 647T67 988Q67 1201 200 1328T563 1456H832V0H646Z" />
+<glyph unicode="&#xb7;" horiz-adv-x="534" d="M147 729Q147 777 175 809T261 841T347 809T377 729Q377 682 348 651T261 619T176 650T147 729Z" />
+<glyph unicode="&#xb8;" horiz-adv-x="507" d="M285 0L273 -52Q426 -79 426 -225Q426 -322 346 -378T123 -435L116 -328Q195 -328 238 -302T282 -229Q282 -185 250 -164T120 -134L152 0H285Z" />
+<glyph unicode="&#xb9;" horiz-adv-x="751" d="M495 667H338V1268L122 1211V1339L477 1456H495V667Z" />
+<glyph unicode="&#xba;" horiz-adv-x="931" d="M122 1123Q122 1281 216 1378T464 1476Q619 1476 713 1380T807 1117V1043Q807 884 714 787T466 690T217 787T122 1049V1123ZM285 1043Q285 943 333 886T466 829Q549 829 596 886T644 1045V1123Q644 1222 596 1279T464
+1336Q383 1336 335 1281T285 1129V1043Z" />
+<glyph unicode="&#xbb;" horiz-adv-x="960" d="M244 949L539 560V541L244 152H102L360 550L102 949H244ZM593 949L888 560V541L593 152H451L709 550L451 949H593Z" />
+<glyph unicode="&#xbc;" horiz-adv-x="1500" d="M458 664H301V1265L85 1208V1336L440 1453H458V664ZM443 118L339 184L1050 1322L1154 1256L443 118ZM1318 299H1425V169H1318V0H1161V169H786L780 271L1157 789H1318V299ZM938 299H1161V588L1144 560L938 299Z" />
+<glyph unicode="&#xbd;" horiz-adv-x="1589" d="M399 118L295 184L1006 1322L1110 1256L399 118ZM453 664H296V1265L80 1208V1336L435 1453H453V664ZM1481 0H882V108L1182 390Q1291 492 1291 561Q1291 610 1259 640T1167 671Q1092 671 1057 633T1021 538H864Q864
+652 947 726T1163 800T1372 737T1449 563Q1449 459 1342 352L1258 273L1082 128H1481V0Z" />
+<glyph unicode="&#xbe;" horiz-adv-x="1593" d="M570 118L466 184L1177 1322L1281 1256L570 118ZM1410 299H1517V169H1410V0H1253V169H878L872 271L1249 789H1410V299ZM1030 299H1253V588L1236 560L1030 299ZM314 1126H398Q472 1126 508 1156T544 1235Q544 1280
+513 1309T411 1338Q354 1338 317 1313T279 1246H122Q122 1344 203 1405T409 1467Q546 1467 624 1407T702 1242Q702 1187 667 1142T566 1071Q715 1030 715 887Q715 781 630 719T409 656Q277 656 194 719T111 889H269Q269 844 308 814T415 784Q485 784 521 814T558
+895Q558 1008 402 1010H314V1126Z" />
+<glyph unicode="&#xbf;" horiz-adv-x="969" d="M588 680Q587 574 567 511T498 388T358 233T255 37L253 0Q253 -109 311 -166T478 -224Q578 -224 640 -168T703 -20H888Q886 -181 774 -283T478 -385Q282 -385 175 -285T68 -5Q68 168 228 343L337 456Q403 534 403
+680H588ZM596 997Q596 952 569 921T487 890T405 921T377 997Q377 1041 405 1071T487 1101T568 1071T596 997Z" />
+<glyph unicode="&#xc0;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM778 1552H619L361 1846H584L778 1552Z" />
+<glyph unicode="&#xc1;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM763 1846H987L719 1552H570L763 1846Z" />
+<glyph unicode="&#xc2;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM975 1572V1562H822L672 1732L523 1562H370V1574L616 1846H728L975 1572Z" />
+<glyph unicode="&#xc3;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM1027 1814Q1027 1706 966 1639T812 1572Q771 1572 741 1582T663 1623T593 1660T543 1667Q502 1667 473 1636T444 1555L320
+1562Q320 1669 380 1739T534 1809Q569 1809 597 1799T673 1760T746 1722T803 1713Q846 1713 874 1747T903 1826L1027 1814Z" />
+<glyph unicode="&#xc4;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM350 1681Q350 1726 377 1756T459 1787T541 1757T569 1681T541 1606T459 1576T378 1606T350 1681ZM780 1679Q780 1724
+807 1755T889 1786T971 1755T999 1679T971 1604T889 1574T808 1604T780 1679Z" />
+<glyph unicode="&#xc5;" horiz-adv-x="1336" d="M973 380H363L226 0H28L584 1456H752L1309 0H1112L973 380ZM421 538H916L668 1219L421 538ZM887 1729Q887 1642 825 1584T672 1525Q580 1525 519 1584T457 1729T518 1876T672 1937T825 1876T887 1729ZM556 1729Q556
+1682 589 1648T672 1614Q720 1614 754 1647T788 1729T755 1812T672 1847Q622 1847 589 1812T556 1729Z" />
+<glyph unicode="&#xc6;" horiz-adv-x="1914" d="M1879 0H996L981 353H417L212 0H-14L866 1456H1817V1304H1126L1146 833H1736V682H1152L1174 151H1879V0ZM518 527H974L943 1260L518 527Z" />
+<glyph unicode="&#xc7;" horiz-adv-x="1333" d="M1240 462Q1213 231 1070 106T688 -20Q430 -20 275 165T119 660V800Q119 1003 191 1157T397 1393T705 1476Q937 1476 1077 1347T1240 988H1047Q1022 1162 939 1240T705 1318Q521 1318 417 1182T312 795V654Q312
+417 411 277T688 137Q848 137 933 209T1047 462H1240ZM751 -9L739 -61Q892 -88 892 -234Q892 -331 812 -387T589 -444L582 -337Q661 -337 704 -311T748 -238Q748 -194 716 -173T586 -143L618 -9H751Z" />
+<glyph unicode="&#xc8;" horiz-adv-x="1164" d="M992 673H361V157H1094V0H169V1456H1084V1298H361V830H992V673ZM725 1564H566L308 1858H531L725 1564Z" />
+<glyph unicode="&#xc9;" horiz-adv-x="1164" d="M992 673H361V157H1094V0H169V1456H1084V1298H361V830H992V673ZM710 1858H934L666 1564H517L710 1858Z" />
+<glyph unicode="&#xca;" horiz-adv-x="1164" d="M992 673H361V157H1094V0H169V1456H1084V1298H361V830H992V673ZM922 1584V1574H769L619 1744L470 1574H317V1586L563 1858H675L922 1584Z" />
+<glyph unicode="&#xcb;" horiz-adv-x="1164" d="M992 673H361V157H1094V0H169V1456H1084V1298H361V830H992V673ZM297 1693Q297 1738 324 1768T406 1799T488 1769T516 1693T488 1618T406 1588T325 1618T297 1693ZM727 1691Q727 1736 754 1767T836 1798T918 1767T946
+1691T918 1616T836 1586T755 1616T727 1691Z" />
+<glyph unicode="&#xcc;" horiz-adv-x="557" d="M375 0H183V1456H375V0ZM385 1564H226L-32 1858H191L385 1564Z" />
+<glyph unicode="&#xcd;" horiz-adv-x="557" d="M375 0H183V1456H375V0ZM369 1858H593L325 1564H176L369 1858Z" />
+<glyph unicode="&#xce;" horiz-adv-x="557" d="M375 0H183V1456H375V0ZM582 1584V1574H429L279 1744L130 1574H-23V1586L223 1858H335L582 1584Z" />
+<glyph unicode="&#xcf;" horiz-adv-x="557" d="M375 0H183V1456H375V0ZM-43 1693Q-43 1738 -16 1768T66 1799T148 1769T176 1693T148 1618T66 1588T-15 1618T-43 1693ZM387 1691Q387 1736 414 1767T496 1798T578 1767T606 1691T578 1616T496 1586T415 1616T387 1691Z" />
+<glyph unicode="&#xd0;" horiz-adv-x="1373" d="M199 0V666H7V817H199V1456H610Q800 1456 946 1372T1171 1133T1252 777V684Q1252 478 1173 323T946 85T602 0H199ZM643 666H391V157H592Q814 157 937 294T1062 680V773Q1062 1021 946 1158T615 1298H391V817H643V666Z" />
+<glyph unicode="&#xd1;" horiz-adv-x="1460" d="M1288 0H1095L362 1122V0H169V1456H362L1097 329V1456H1288V0ZM1081 1814Q1081 1706 1020 1639T866 1572Q825 1572 795 1582T717 1623T647 1660T597 1667Q556 1667 527 1636T498 1555L374 1562Q374 1669 434 1739T588
+1809Q623 1809 651 1799T727 1760T800 1722T857 1713Q900 1713 928 1747T957 1826L1081 1814Z" />
+<glyph unicode="&#xd2;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521
+1311 417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775ZM812 1554H653L395 1848H618L812 1554Z" />
+<glyph unicode="&#xd3;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521
+1311 417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775ZM797 1848H1021L753 1554H604L797 1848Z" />
+<glyph unicode="&#xd4;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521
+1311 417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775ZM1009 1574V1564H856L706 1734L557 1564H404V1576L650 1848H762L1009 1574Z" />
+<glyph unicode="&#xd5;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521
+1311 417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775ZM1061 1816Q1061 1708 1000 1641T846 1574Q805 1574 775 1584T697 1625T627 1662T577 1669Q536 1669 507 1638T478 1557L354 1564Q354 1671 414 1741T568 1811Q603 1811 631 1801T707
+1762T780 1724T837 1715Q880 1715 908 1749T937 1828L1061 1816Z" />
+<glyph unicode="&#xd6;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q533 -20 400 64T194 305T118 668V773Q118 983 191 1144T397 1390T703 1476Q878 1476 1011 1392T1217 1147T1289 773V681ZM1098 775Q1098 1034 994 1172T703 1311Q521
+1311 417 1173T309 788V681Q309 430 414 287T705 143Q891 143 993 278T1098 667V775ZM384 1683Q384 1728 411 1758T493 1789T575 1759T603 1683T575 1608T493 1578T412 1608T384 1683ZM814 1681Q814 1726 841 1757T923 1788T1005 1757T1033 1681T1005 1606T923
+1576T842 1606T814 1681Z" />
+<glyph unicode="&#xd7;" horiz-adv-x="1092" d="M89 329L419 665L91 1000L210 1123L539 788L868 1123L987 1000L659 665L989 329L870 206L539 543L208 206L89 329Z" />
+<glyph unicode="&#xd8;" horiz-adv-x="1408" d="M1289 681Q1289 467 1217 308T1013 64T705 -20Q534 -20 403 62L306 -93H164L308 138Q118 330 118 690V773Q118 983 191 1144T397 1390T703 1476Q917 1476 1065 1351L1168 1516H1309L1150 1261Q1287 1074 1289 780V681ZM309
+681Q309 437 407 296L971 1200Q869 1311 703 1311Q521 1311 417 1173T309 788V681ZM1098 775Q1098 957 1042 1088L493 207Q584 143 705 143Q891 143 993 278T1098 667V775Z" />
+<glyph unicode="&#xd9;" horiz-adv-x="1328" d="M1194 1456V466Q1193 260 1065 129T716 -18L665 -20Q426 -20 284 109T140 464V1456H330V470Q330 312 417 225T665 137Q828 137 914 224T1001 469V1456H1194ZM773 1552H614L356 1846H579L773 1552Z" />
+<glyph unicode="&#xda;" horiz-adv-x="1328" d="M1194 1456V466Q1193 260 1065 129T716 -18L665 -20Q426 -20 284 109T140 464V1456H330V470Q330 312 417 225T665 137Q828 137 914 224T1001 469V1456H1194ZM758 1846H982L714 1552H565L758 1846Z" />
+<glyph unicode="&#xdb;" horiz-adv-x="1328" d="M1194 1456V466Q1193 260 1065 129T716 -18L665 -20Q426 -20 284 109T140 464V1456H330V470Q330 312 417 225T665 137Q828 137 914 224T1001 469V1456H1194ZM970 1572V1562H817L667 1732L518 1562H365V1574L611
+1846H723L970 1572Z" />
+<glyph unicode="&#xdc;" horiz-adv-x="1328" d="M1194 1456V466Q1193 260 1065 129T716 -18L665 -20Q426 -20 284 109T140 464V1456H330V470Q330 312 417 225T665 137Q828 137 914 224T1001 469V1456H1194ZM345 1681Q345 1726 372 1756T454 1787T536 1757T564
+1681T536 1606T454 1576T373 1606T345 1681ZM775 1679Q775 1724 802 1755T884 1786T966 1755T994 1679T966 1604T884 1574T803 1604T775 1679Z" />
+<glyph unicode="&#xdd;" horiz-adv-x="1230" d="M613 725L993 1456H1211L709 543V0H517V543L15 1456H235L613 725ZM708 1846H932L664 1552H515L708 1846Z" />
+<glyph unicode="&#xde;" horiz-adv-x="1210" d="M352 1456V1163H631Q778 1163 888 1111T1057 961T1117 738Q1117 544 985 429T626 313H352V0H166V1456H352ZM352 1011V465H629Q771 465 851 540T931 736Q931 859 851 934T635 1011H352Z" />
+<glyph unicode="&#xdf;" horiz-adv-x="1218" d="M324 0H139V1111Q139 1319 242 1436T532 1554Q712 1554 810 1465T909 1216Q909 1091 845 990T781 819Q781 768 818 721T950 601T1087 461T1130 317Q1130 158 1029 69T745 -20Q664 -20 574 2T445 52L488 207Q537
+175 604 153T725 131Q832 131 888 178T945 307Q945 359 908 407T777 528T639 671T595 821Q595 910 664 1013T734 1201Q734 1295 682 1348T542 1402Q324 1402 324 1109V0Z" />
+<glyph unicode="&#xe0;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM687 1242H528L270 1536H493L687 1242Z" />
+<glyph unicode="&#xe1;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM672 1536H896L628 1242H479L672 1536Z" />
+<glyph unicode="&#xe2;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM884 1262V1252H731L581 1422L432 1252H279V1264L525 1536H637L884 1262Z" />
+<glyph unicode="&#xe3;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM936 1504Q936 1396 875 1329T721 1262Q680 1262 650 1272T572 1313T502 1350T452 1357Q411 1357 382 1326T353 1245L229 1252Q229 1359 289
+1429T443 1499Q478 1499 506 1489T582 1450T655 1412T712 1403Q755 1403 783 1437T812 1516L936 1504Z" />
+<glyph unicode="&#xe4;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM259 1371Q259 1416 286 1446T368 1477T450 1447T478 1371T450 1296T368 1266T287 1296T259 1371ZM689 1369Q689 1414 716 1445T798 1476T880
+1445T908 1369T880 1294T798 1264T717 1294T689 1369Z" />
+<glyph unicode="&#xe5;" horiz-adv-x="1114" d="M808 0Q792 32 782 114Q653 -20 474 -20Q314 -20 212 70T109 300Q109 469 237 562T599 656H779V741Q779 838 721 895T550 953Q451 953 384 903T317 782H131Q131 863 188 938T344 1058T561 1102Q748 1102 854 1009T964
+751V253Q964 104 1002 16V0H808ZM501 141Q588 141 666 186T779 303V525H634Q294 525 294 326Q294 239 352 190T501 141ZM796 1419Q796 1332 734 1274T581 1215Q489 1215 428 1274T366 1419T427 1566T581 1627T734 1566T796 1419ZM465 1419Q465 1372 498 1338T581
+1304Q629 1304 663 1337T697 1419T664 1502T581 1537Q531 1537 498 1502T465 1419Z" />
+<glyph unicode="&#xe6;" horiz-adv-x="1729" d="M1262 -20Q1001 -20 865 160Q800 74 687 27T433 -20Q266 -20 172 66T78 304Q78 461 191 548T526 635H749V720Q749 827 694 888T535 950Q430 950 360 895T290 759L106 778Q106 921 227 1011T535 1102Q650 1102 738
+1061T876 936Q939 1015 1026 1058T1218 1102Q1428 1102 1544 974T1660 612V497H932Q939 321 1026 226T1262 130Q1410 130 1531 206L1578 237L1642 101Q1484 -20 1262 -20ZM469 130Q541 130 620 167T749 258V495H521Q404 493 334 438T264 300Q264 223 317 177T469
+130ZM1218 950Q1103 950 1029 865T937 640H1475V671Q1475 803 1408 876T1218 950Z" />
+<glyph unicode="&#xe7;" horiz-adv-x="1072" d="M574 131Q673 131 747 191T829 341H1004Q999 248 940 164T783 30T574 -20Q353 -20 223 127T92 531V562Q92 720 150 843T316 1034T573 1102Q755 1102 875 993T1004 710H829Q821 815 750 882T573 950Q432 950 355
+849T277 555V520Q277 333 354 232T574 131ZM604 -9L592 -61Q745 -88 745 -234Q745 -331 665 -387T442 -444L435 -337Q514 -337 557 -311T601 -238Q601 -194 569 -173T439 -143L471 -9H604Z" />
+<glyph unicode="&#xe8;" horiz-adv-x="1085" d="M589 -20Q369 -20 231 124T93 511V545Q93 706 154 832T326 1030T566 1102Q777 1102 894 963T1011 565V488H278Q282 328 371 230T599 131Q697 131 765 171T884 277L997 189Q861 -20 589 -20ZM566 950Q454 950 378
+869T284 640H826V654Q818 795 750 872T566 950ZM671 1242H512L254 1536H477L671 1242Z" />
+<glyph unicode="&#xe9;" horiz-adv-x="1085" d="M589 -20Q369 -20 231 124T93 511V545Q93 706 154 832T326 1030T566 1102Q777 1102 894 963T1011 565V488H278Q282 328 371 230T599 131Q697 131 765 171T884 277L997 189Q861 -20 589 -20ZM566 950Q454 950 378
+869T284 640H826V654Q818 795 750 872T566 950ZM656 1536H880L612 1242H463L656 1536Z" />
+<glyph unicode="&#xea;" horiz-adv-x="1085" d="M589 -20Q369 -20 231 124T93 511V545Q93 706 154 832T326 1030T566 1102Q777 1102 894 963T1011 565V488H278Q282 328 371 230T599 131Q697 131 765 171T884 277L997 189Q861 -20 589 -20ZM566 950Q454 950 378
+869T284 640H826V654Q818 795 750 872T566 950ZM868 1262V1252H715L565 1422L416 1252H263V1264L509 1536H621L868 1262Z" />
+<glyph unicode="&#xeb;" horiz-adv-x="1085" d="M589 -20Q369 -20 231 124T93 511V545Q93 706 154 832T326 1030T566 1102Q777 1102 894 963T1011 565V488H278Q282 328 371 230T599 131Q697 131 765 171T884 277L997 189Q861 -20 589 -20ZM566 950Q454 950 378
+869T284 640H826V654Q818 795 750 872T566 950ZM243 1371Q243 1416 270 1446T352 1477T434 1447T462 1371T434 1296T352 1266T271 1296T243 1371ZM673 1369Q673 1414 700 1445T782 1476T864 1445T892 1369T864 1294T782 1264T701 1294T673 1369Z" />
+<glyph unicode="&#xec;" horiz-adv-x="506" d="M341 0H155V1082H341V0ZM615 1497H456L198 1791H421L615 1497Z" />
+<glyph unicode="&#xed;" horiz-adv-x="506" d="M341 0H155V1082H341V0ZM343 1791H567L299 1497H150L343 1791Z" />
+<glyph unicode="&#xee;" horiz-adv-x="506" d="M341 0H155V1082H341V0ZM556 1261V1251H403L253 1421L104 1251H-49V1263L197 1535H309L556 1261Z" />
+<glyph unicode="&#xef;" horiz-adv-x="506" d="M341 0H155V1082H341V0ZM-69 1370Q-69 1415 -42 1445T40 1476T122 1446T150 1370T122 1295T40 1265T-41 1295T-69 1370ZM361 1368Q361 1413 388 1444T470 1475T552 1444T580 1368T552 1293T470 1263T389 1293T361 1368Z" />
+<glyph unicode="&#xf0;" horiz-adv-x="1200" d="M820 1301Q1069 1037 1069 628V535Q1069 377 1011 251T844 52T602 -20Q467 -20 357 44T187 221T126 467Q126 614 182 730T341 912T574 977Q737 977 858 863Q810 1058 669 1199L451 1051L378 1150L570 1281Q438 1372
+255 1421L312 1580Q551 1526 726 1387L915 1516L988 1416L820 1301ZM884 635L882 691Q849 752 780 788T618 825Q473 825 392 730T311 467Q311 327 394 229T606 131Q731 131 807 244T884 541V635Z" />
+<glyph unicode="&#xf1;" horiz-adv-x="1130" d="M315 1082L321 946Q445 1102 645 1102Q988 1102 991 715V0H806V716Q805 833 753 889T589 945Q499 945 431 897T325 771V0H140V1082H315ZM927 1504Q927 1396 866 1329T712 1262Q671 1262 641 1272T563 1313T493 1350T443
+1357Q402 1357 373 1326T344 1245L220 1252Q220 1359 280 1429T434 1499Q469 1499 497 1489T573 1450T646 1412T703 1403Q746 1403 774 1437T803 1516L927 1504Z" />
+<glyph unicode="&#xf2;" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729
+807 839T582 950Q445 950 361 841T277 529ZM681 1242H522L264 1536H487L681 1242Z" />
+<glyph unicode="&#xf3;" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729
+807 839T582 950Q445 950 361 841T277 529ZM666 1536H890L622 1242H473L666 1536Z" />
+<glyph unicode="&#xf4;" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729
+807 839T582 950Q445 950 361 841T277 529ZM878 1262V1252H725L575 1422L426 1252H273V1264L519 1536H631L878 1262Z" />
+<glyph unicode="&#xf5;" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729
+807 839T582 950Q445 950 361 841T277 529ZM930 1504Q930 1396 869 1329T715 1262Q674 1262 644 1272T566 1313T496 1350T446 1357Q405 1357 376 1326T347 1245L223 1252Q223 1359 283 1429T437 1499Q472 1499 500 1489T576 1450T649 1412T706 1403Q749 1403 777
+1437T806 1516L930 1504Z" />
+<glyph unicode="&#xf6;" horiz-adv-x="1168" d="M91 551Q91 710 153 837T327 1033T582 1102Q803 1102 939 949T1076 542V529Q1076 371 1016 246T843 50T584 -20Q364 -20 228 133T91 538V551ZM277 529Q277 349 360 240T584 131Q725 131 808 241T891 551Q891 729
+807 839T582 950Q445 950 361 841T277 529ZM253 1371Q253 1416 280 1446T362 1477T444 1447T472 1371T444 1296T362 1266T281 1296T253 1371ZM683 1369Q683 1414 710 1445T792 1476T874 1445T902 1369T874 1294T792 1264T711 1294T683 1369Z" />
+<glyph unicode="&#xf7;" horiz-adv-x="1169" d="M1069 600H71V784H1069V600ZM461 1098Q461 1146 489 1178T575 1210T661 1178T691 1098Q691 1051 662 1020T575 989T490 1020T461 1098ZM461 281Q461 329 489 361T575 393T661 361T691 281Q691 235 662 204T575 172T490
+203T461 281Z" />
+<glyph unicode="&#xf8;" horiz-adv-x="1160" d="M91 551Q91 710 152 836T326 1032T582 1102Q692 1102 786 1060L859 1208H983L881 1003Q1076 849 1076 529Q1076 371 1014 244T840 49T584 -20Q480 -20 394 15L320 -134H196L296 69Q91 218 91 551ZM276 529Q276 335
+373 224L716 918Q654 950 582 950Q444 950 360 841T276 529ZM890 551Q890 733 803 844L463 156Q518 131 584 131Q723 131 806 240T890 535V551Z" />
+<glyph unicode="&#xf9;" horiz-adv-x="1129" d="M808 107Q700 -20 491 -20Q318 -20 228 80T136 378V1082H321V383Q321 137 521 137Q733 137 803 295V1082H988V0H812L808 107ZM673 1242H514L256 1536H479L673 1242Z" />
+<glyph unicode="&#xfa;" horiz-adv-x="1129" d="M808 107Q700 -20 491 -20Q318 -20 228 80T136 378V1082H321V383Q321 137 521 137Q733 137 803 295V1082H988V0H812L808 107ZM658 1536H882L614 1242H465L658 1536Z" />
+<glyph unicode="&#xfb;" horiz-adv-x="1129" d="M808 107Q700 -20 491 -20Q318 -20 228 80T136 378V1082H321V383Q321 137 521 137Q733 137 803 295V1082H988V0H812L808 107ZM870 1262V1252H717L567 1422L418 1252H265V1264L511 1536H623L870 1262Z" />
+<glyph unicode="&#xfc;" horiz-adv-x="1129" d="M808 107Q700 -20 491 -20Q318 -20 228 80T136 378V1082H321V383Q321 137 521 137Q733 137 803 295V1082H988V0H812L808 107ZM245 1371Q245 1416 272 1446T354 1477T436 1447T464 1371T436 1296T354 1266T273 1296T245
+1371ZM675 1369Q675 1414 702 1445T784 1476T866 1445T894 1369T866 1294T784 1264T703 1294T675 1369Z" />
+<glyph unicode="&#xfd;" horiz-adv-x="969" d="M494 271L746 1082H944L509 -167Q408 -437 188 -437L153 -434L84 -421V-271L134 -275Q228 -275 280 -237T367 -98L408 12L22 1082H224L494 271ZM599 1536H823L555 1242H406L599 1536Z" />
+<glyph unicode="&#xfe;" horiz-adv-x="1180" d="M1063 529Q1063 282 950 131T644 -20Q447 -20 334 105V-416H149V1536H334V970Q447 1102 641 1102Q836 1102 949 955T1063 546V529ZM878 550Q878 733 800 839T586 945Q418 945 334 796V279Q417 131 588 131Q721 131
+799 236T878 550Z" />
+<glyph unicode="&#xff;" horiz-adv-x="969" d="M494 271L746 1082H944L509 -167Q408 -437 188 -437L153 -434L84 -421V-271L134 -275Q228 -275 280 -237T367 -98L408 12L22 1082H224L494 271ZM186 1371Q186 1416 213 1446T295 1477T377 1447T405 1371T377 1296T295
+1266T214 1296T186 1371ZM616 1369Q616 1414 643 1445T725 1476T807 1445T835 1369T807 1294T725 1264T644 1294T616 1369Z" />
+<glyph unicode="&#x2013;" horiz-adv-x="1344" d="M1421 651H419V802H1421V651Z" />
+<glyph unicode="&#x2014;" horiz-adv-x="1599" d="M1737 651H401V802H1737V651Z" />
+<glyph unicode="&#x2018;" horiz-adv-x="409" d="M270 1555L376 1483Q283 1356 280 1209V1073H96V1189Q96 1291 144 1391T270 1555Z" />
+<glyph unicode="&#x2019;" horiz-adv-x="409" d="M153 1046L48 1118Q141 1248 144 1392V1536H327V1406Q326 1306 278 1207T153 1046Z" />
+<glyph unicode="&#x201a;" horiz-adv-x="407" d="M141 -283L36 -210Q127 -83 130 63V181H315V81Q315 -20 266 -121T141 -283Z" />
+<glyph unicode="&#x201c;" horiz-adv-x="724" d="M278 1555L384 1483Q291 1356 288 1209V1073H104V1189Q104 1291 152 1391T278 1555ZM593 1555L699 1483Q606 1356 603 1209V1073H419V1189Q419 1291 467 1391T593 1555Z" />
+<glyph unicode="&#x201d;" horiz-adv-x="731" d="M165 1046L60 1118Q153 1248 156 1392V1536H339V1406Q338 1306 290 1207T165 1046ZM472 1046L367 1118Q460 1248 463 1392V1536H646V1406Q645 1306 597 1207T472 1046Z" />
+<glyph unicode="&#x201e;" horiz-adv-x="705" d="M141 -301L36 -229Q127 -92 130 61V246H315V82Q315 -26 266 -131T141 -301ZM437 -301L332 -229Q423 -92 426 61V246H612V82Q612 -25 564 -129T437 -301Z" />
+<glyph unicode="&#x2022;" horiz-adv-x="690" d="M138 772Q138 859 193 915T341 971Q432 971 489 917T546 769V732Q546 645 491 590T342 535Q249 535 194 590T138 734V772Z" />
+<glyph unicode="&#x2039;" horiz-adv-x="614" d="M286 550L544 153H403L108 541V560L403 949H544L286 550Z" />
+<glyph unicode="&#x203a;" horiz-adv-x="614" d="M231 949L526 560V541L231 152H89L347 550L89 949H231Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_400.ttf b/site/assets/fonts/Roboto_400.ttf
new file mode 100644
index 00000000..b91bf3f7
--- /dev/null
+++ b/site/assets/fonts/Roboto_400.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_400.woff b/site/assets/fonts/Roboto_400.woff
new file mode 100644
index 00000000..92dfacc6
--- /dev/null
+++ b/site/assets/fonts/Roboto_400.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_400.woff2 b/site/assets/fonts/Roboto_400.woff2
new file mode 100644
index 00000000..7e854e66
--- /dev/null
+++ b/site/assets/fonts/Roboto_400.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_500.eot b/site/assets/fonts/Roboto_500.eot
new file mode 100644
index 00000000..849f4a50
--- /dev/null
+++ b/site/assets/fonts/Roboto_500.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_500.svg b/site/assets/fonts/Roboto_500.svg
new file mode 100644
index 00000000..67eecf44
--- /dev/null
+++ b/site/assets/fonts/Roboto_500.svg
@@ -0,0 +1,305 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="Roboto" horiz-adv-x="1176" ><font-face
+ font-family="Roboto Medium"
+ units-per-em="2048"
+ panose-1="2 0 0 0 0 0 0 0 0 0"
+ ascent="1900"
+ descent="-500"
+ alphabetic="0" />
+<glyph unicode=" " horiz-adv-x="510" />
+<glyph unicode="!" horiz-adv-x="549" d="M382 429H173L150 1456H406L382 429ZM143 115Q143 172 180 209T281 247T382 210T419 115Q419 60 383 23T281 -14T179 23T143 115Z" />
+<glyph unicode="&quot;" horiz-adv-x="664" d="M275 1399L240 1012H101V1536H275V1399ZM576 1399L541 1012H402V1536H576V1399Z" />
+<glyph unicode="#" horiz-adv-x="1250" d="M719 410H495L419 0H251L327 410H96V568H357L415 881H172V1040H445L523 1456H690L612 1040H837L915 1456H1082L1004 1040H1212V881H974L916 568H1137V410H886L810 0H643L719 410ZM525 568H749L807 881H583L525 568Z" />
+<glyph unicode="$" horiz-adv-x="1164" d="M819 380Q819 465 765 520T585 620T389 703Q156 828 156 1073Q156 1239 257 1346T531 1473V1691H691V1471Q865 1446 960 1324T1055 1005H813Q813 1131 757 1203T603 1276Q507 1276 453 1224T399 1075Q399 988 452 936T634
+836T835 749T958 658T1035 539T1062 382Q1062 213 959 108T670 -16V-211H511V-17Q313 5 207 125T100 443H343Q343 317 406 248T586 179Q700 179 759 234T819 380Z" />
+<glyph unicode="%" horiz-adv-x="1504" d="M99 1176Q99 1308 184 1392T407 1477Q547 1477 631 1393T716 1171V1099Q716 968 632 884T409 800Q274 800 187 882T99 1105V1176ZM269 1099Q269 1030 307 988T409 945Q471 945 509 987T547 1103V1176Q547 1245 509 1288T407
+1331T307 1288T269 1173V1099ZM799 357Q799 491 886 574T1108 657Q1244 657 1330 574T1417 350V279Q1417 149 1334 65T1110 -20T885 63T799 284V357ZM969 279Q969 211 1008 168T1110 124Q1174 124 1210 165T1247 282V357Q1247 427 1208 469T1108 511Q1046 511 1008
+469T969 353V279ZM459 109L334 181L1045 1319L1170 1247L459 109Z" />
+<glyph unicode="&amp;" horiz-adv-x="1309" d="M86 393Q86 494 141 578T358 779Q273 886 240 961T206 1106Q206 1277 310 1376T590 1476Q749 1476 850 1383T952 1151Q952 1060 906 984T755 831L656 759L937 427Q998 547 998 694H1209Q1209 425 1083 253L1297 0H1015L933
+97Q777 -20 561 -20T216 94T86 393ZM568 174Q691 174 798 256L480 631L449 609Q329 518 329 401Q329 300 394 237T568 174ZM434 1112Q434 1028 537 901L648 977L679 1002Q741 1057 741 1143Q741 1200 698 1240T589 1281Q518 1281 476 1233T434 1112Z" />
+<glyph unicode="&apos;" horiz-adv-x="346" d="M267 1411L241 1020H82V1536H267V1411Z" />
+<glyph unicode="(" horiz-adv-x="714" d="M128 592Q128 823 190 1030T372 1401T626 1631L674 1489Q533 1382 446 1163T350 660L349 574Q349 271 434 34T674 -328L626 -463Q492 -397 372 -233T190 138T128 592Z" />
+<glyph unicode=")" horiz-adv-x="722" d="M593 576Q593 354 532 148T347 -228T88 -463L40 -328Q190 -212 277 26T365 571V594Q365 872 289 1100T71 1467L40 1495L88 1631Q216 1569 336 1411T520 1058T592 654L593 576Z" />
+<glyph unicode="*" horiz-adv-x="905" d="M332 972L27 1060L82 1229L384 1112L369 1456H548L533 1106L830 1221L884 1049L574 961L774 695L629 589L449 877L271 598L125 700L332 972Z" />
+<glyph unicode="+" horiz-adv-x="1141" d="M686 801H1066V579H686V146H450V579H68V801H450V1206H686V801Z" />
+<glyph unicode="," horiz-adv-x="450" d="M159 -328L28 -250Q86 -159 107 -92T130 46V235H349L348 60Q347 -46 295 -152T159 -328Z" />
+<glyph unicode="-" horiz-adv-x="672" d="M596 521H71V717H596V521Z" />
+<glyph unicode="." horiz-adv-x="572" d="M276 256Q344 256 381 218T418 121Q418 64 381 27T276 -11Q211 -11 173 26T135 121T172 217T276 256Z" />
+<glyph unicode="/" horiz-adv-x="810" d="M193 -125H2L575 1456H766L193 -125Z" />
+<glyph unicode="0" horiz-adv-x="1164" d="M1058 613Q1058 299 941 140T583 -20Q347 -20 228 135T105 596V848Q105 1162 222 1319T581 1476Q820 1476 937 1323T1058 865V613ZM815 885Q815 1090 759 1185T581 1281Q462 1281 406 1191T347 908V578Q347 374 404 274T583
+174Q700 174 756 266T815 556V885Z" />
+<glyph unicode="1" horiz-adv-x="1164" d="M767 0H525V1169L168 1047V1252L736 1461H767V0Z" />
+<glyph unicode="2" horiz-adv-x="1164" d="M1088 0H109V167L594 696Q699 813 743 891T788 1049Q788 1153 730 1217T572 1281Q454 1281 389 1209T324 1012H81Q81 1145 141 1251T314 1417T574 1476Q786 1476 908 1370T1031 1075Q1031 966 970 847T768 575L412 194H1088V0Z" />
+<glyph unicode="3" horiz-adv-x="1164" d="M390 839H538Q650 840 715 897T781 1062Q781 1166 727 1223T560 1281Q462 1281 399 1225T336 1077H93Q93 1189 152 1281T318 1424T557 1476Q775 1476 899 1367T1024 1062Q1024 964 962 878T800 747Q920 706 982 618T1045
+408Q1045 212 911 96T557 -20Q347 -20 213 92T79 390H322Q322 294 386 234T560 174Q673 174 738 234T803 408Q803 523 735 585T533 647H390V839Z" />
+<glyph unicode="4" horiz-adv-x="1164" d="M931 519H1112V324H931V0H688V324H59L52 472L680 1456H931V519ZM307 519H688V1127L670 1095L307 519Z" />
+<glyph unicode="5" horiz-adv-x="1164" d="M174 722L253 1456H1035V1246H455L415 898Q516 956 643 956Q851 956 966 823T1082 465Q1082 243 954 112T603 -20Q403 -20 272 93T129 393H364Q378 287 440 231T602 174Q714 174 776 254T839 472Q839 605 770 682T580
+760Q514 760 468 743T368 674L174 722Z" />
+<glyph unicode="6" horiz-adv-x="1164" d="M865 1463V1262H835Q631 1259 509 1150T364 841Q481 964 663 964Q856 964 967 828T1079 477Q1079 255 949 118T606 -20Q388 -20 253 141T117 563V646Q117 1029 303 1246T840 1463H865ZM604 768Q524 768 458 723T360 603V529Q360
+367 428 272T604 176T775 257T838 470T774 685T604 768Z" />
+<glyph unicode="7" horiz-adv-x="1164" d="M1078 1321L496 0H241L822 1261H69V1456H1078V1321Z" />
+<glyph unicode="8" horiz-adv-x="1164" d="M1026 1072Q1026 965 971 882T821 750Q935 697 996 605T1058 397Q1058 205 928 93T582 -20Q365 -20 235 93T104 397Q104 514 166 607T340 750Q246 798 192 881T137 1072Q137 1258 257 1367T581 1476Q786 1476 906 1367T1026
+1072ZM815 409Q815 517 751 583T580 650T411 584T347 409Q347 302 409 238T582 174T753 236T815 409ZM784 1063Q784 1158 729 1219T581 1281T434 1223T380 1063Q380 963 434 904T582 845T729 904T784 1063Z" />
+<glyph unicode="9" horiz-adv-x="1164" d="M798 609Q676 480 513 480Q321 480 207 614T93 968Q93 1112 151 1229T316 1411T564 1476Q784 1476 913 1312T1042 873V805Q1042 411 864 204T333 -6H304V195H339Q554 198 669 298T798 609ZM564 670Q637 670 701 712T800
+828V923Q800 1084 734 1182T563 1280T396 1194T333 975Q333 838 396 754T564 670Z" />
+<glyph unicode=":" horiz-adv-x="543" d="M527 256Q595 256 632 218T669 121Q669 64 632 27T527 -11Q462 -11 424 26T386 121T423 217T527 256ZM271 1105Q339 1105 376 1067T413 970Q413 913 376 876T271 838Q206 838 168 875T130 970T167 1066T271 1105Z" />
+<glyph unicode=";" horiz-adv-x="487" d="M250 1105Q318 1105 355 1067T392 970Q392 913 355 876T250 838Q185 838 147 875T109 970T146 1066T250 1105ZM177 -328L46 -250Q104 -159 125 -92T148 46V235H367L366 60Q365 -46 313 -152T177 -328Z" />
+<glyph unicode="&lt;" horiz-adv-x="1041" d="M310 631L900 407V164L63 537V730L900 1102V859L310 631Z" />
+<glyph unicode="=" horiz-adv-x="1146" d="M1007 780H145V982H1007V780ZM1007 356H145V557H1007V356Z" />
+<glyph unicode="&gt;" horiz-adv-x="1066" d="M746 636L128 863V1102L992 730V537L128 165V404L746 636Z" />
+<glyph unicode="?" horiz-adv-x="996" d="M350 428Q350 561 383 640T513 813T637 948Q677 1009 677 1080Q677 1174 631 1223T494 1273Q408 1273 356 1225T303 1093H60Q62 1270 180 1373T494 1476Q695 1476 807 1374T920 1089Q920 926 768 768L645 647Q579 572
+577 428H350ZM333 117Q333 176 370 212T470 249Q534 249 571 212T608 117Q608 62 572 25T470 -12T369 25T333 117Z" />
+<glyph unicode="@" horiz-adv-x="1832" d="M1741 518Q1729 268 1618 124T1317 -21Q1136 -21 1075 133Q1024 57 957 19T815 -19Q669 -19 594 101T536 422Q552 585 615 716T776 918T984 990Q1068 990 1132 969T1284 882L1232 319Q1213 121 1346 121Q1448 121 1513
+230T1585 514Q1602 883 1443 1079T963 1275Q767 1275 616 1177T375 894T277 471Q265 230 334 56T547 -210T898 -301Q982 -301 1073 -281T1229 -227L1267 -364Q1206 -404 1103 -428T894 -453Q640 -453 458 -346T185 -34Q91 177 102 471Q114 745 225 963T528 1303T967
+1424Q1216 1424 1395 1315T1664 1000T1741 518ZM732 422Q719 286 756 216T874 145Q928 145 976 192T1054 323L1099 816Q1049 835 1002 835Q891 835 821 731T732 422Z" />
+<glyph unicode="A" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543Z" />
+<glyph unicode="B" horiz-adv-x="1292" d="M148 0V1456H647Q894 1456 1023 1357T1152 1062Q1152 962 1098 882T940 758Q1058 726 1122 638T1187 425Q1187 220 1056 110T679 0H148ZM401 657V202H682Q801 202 868 261T935 425Q935 652 703 657H401ZM401 843H649Q767
+843 833 896T900 1048Q900 1156 839 1204T647 1252H401V843Z" />
+<glyph unicode="C" horiz-adv-x="1337" d="M1259 474Q1237 241 1087 111T688 -20Q514 -20 382 62T177 297T102 650V786Q102 992 175 1149T384 1391T700 1476Q941 1476 1088 1345T1259 975H1007Q989 1132 916 1201T700 1271Q535 1271 447 1151T356 797V668Q356
+432 440 308T688 184Q837 184 912 251T1007 474H1259Z" />
+<glyph unicode="D" horiz-adv-x="1338" d="M148 0V1456H578Q771 1456 920 1370T1152 1126T1234 764V691Q1234 484 1152 327T917 85T567 0H148ZM401 1252V202H566Q765 202 871 326T980 684V765Q980 1002 877 1127T578 1252H401Z" />
+<glyph unicode="E" horiz-adv-x="1158" d="M999 650H401V202H1100V0H148V1456H1093V1252H401V850H999V650Z" />
+<glyph unicode="F" horiz-adv-x="1125" d="M987 617H401V0H148V1456H1073V1252H401V819H987V617Z" />
+<glyph unicode="G" horiz-adv-x="1394" d="M1264 189Q1185 86 1045 33T727 -20Q544 -20 403 63T186 300T106 661V775Q106 1105 264 1290T705 1476Q948 1476 1091 1356T1263 1010H1015Q973 1273 710 1273Q540 1273 452 1151T360 791V679Q360 443 459 313T736 182Q930
+182 1012 270V555H712V747H1264V189Z" />
+<glyph unicode="H" horiz-adv-x="1455" d="M1304 0H1052V647H401V0H148V1456H401V850H1052V1456H1304V0Z" />
+<glyph unicode="I" horiz-adv-x="578" d="M415 0H163V1456H415V0Z" />
+<glyph unicode="J" horiz-adv-x="1137" d="M744 1456H996V435Q996 226 866 103T521 -20Q293 -20 169 95T45 415H297Q297 299 354 241T521 182Q623 182 683 249T744 436V1456Z" />
+<glyph unicode="K" horiz-adv-x="1291" d="M566 629L401 454V0H148V1456H401V773L541 946L967 1456H1273L732 811L1304 0H1004L566 629Z" />
+<glyph unicode="L" horiz-adv-x="1108" d="M401 202H1062V0H148V1456H401V202Z" />
+<glyph unicode="M" horiz-adv-x="1793" d="M476 1456L896 340L1315 1456H1642V0H1390V480L1415 1122L985 0H804L375 1121L400 480V0H148V1456H476Z" />
+<glyph unicode="N" horiz-adv-x="1454" d="M1303 0H1050L401 1033V0H148V1456H401L1052 419V1456H1303V0Z" />
+<glyph unicode="O" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450 1138T355
+774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766Z" />
+<glyph unicode="P" horiz-adv-x="1309" d="M401 541V0H148V1456H705Q949 1456 1092 1329T1236 993Q1236 779 1096 660T702 541H401ZM401 744H705Q840 744 911 807T982 991Q982 1109 910 1179T712 1252H401V744Z" />
+<glyph unicode="Q" horiz-adv-x="1414" d="M1305 690Q1305 483 1240 332T1056 91L1306 -104L1142 -252L832 -7Q771 -20 701 -20Q525 -20 387 66T173 313T96 682V764Q96 977 171 1140T384 1389T699 1476Q879 1476 1016 1391T1229 1145T1305 771V690ZM1051 766Q1051
+1012 959 1139T699 1266Q536 1266 444 1138T349 775V690Q349 454 441 321T701 188Q870 188 960 316T1051 690V766Z" />
+<glyph unicode="R" horiz-adv-x="1278" d="M683 561H401V0H148V1456H660Q912 1456 1049 1343T1186 1016Q1186 870 1116 772T919 620L1246 13V0H975L683 561ZM401 764H661Q789 764 861 828T933 1005Q933 1122 867 1186T668 1252H401V764Z" />
+<glyph unicode="S" horiz-adv-x="1236" d="M909 375Q909 471 842 523T598 628T318 746Q119 871 119 1072Q119 1248 262 1362T635 1476Q787 1476 906 1420T1093 1261T1161 1031H909Q909 1145 838 1209T633 1274Q509 1274 441 1221T372 1073Q372 993 446 940T690
+836T963 721T1114 573T1162 377Q1162 195 1023 88T644 -20Q486 -20 354 38T148 200T74 440H327Q327 316 409 248T644 180Q776 180 842 233T909 375Z" />
+<glyph unicode="T" horiz-adv-x="1243" d="M1200 1252H746V0H495V1252H45V1456H1200V1252Z" />
+<glyph unicode="U" horiz-adv-x="1335" d="M1213 1456V483Q1213 251 1065 116T669 -20Q419 -20 272 113T125 484V1456H377V482Q377 336 451 259T669 182Q961 182 961 490V1456H1213Z" />
+<glyph unicode="V" horiz-adv-x="1325" d="M661 317L1031 1456H1309L785 0H540L18 1456H295L661 317Z" />
+<glyph unicode="W" horiz-adv-x="1802" d="M1290 360L1514 1456H1765L1429 0H1187L910 1063L627 0H384L48 1456H299L525 362L803 1456H1015L1290 360Z" />
+<glyph unicode="X" horiz-adv-x="1296" d="M649 930L955 1456H1247L807 734L1257 0H962L649 534L335 0H41L492 734L51 1456H343L649 930Z" />
+<glyph unicode="Y" horiz-adv-x="1248" d="M623 766L958 1456H1238L750 536V0H496V536L7 1456H288L623 766Z" />
+<glyph unicode="Z" horiz-adv-x="1233" d="M386 202H1164V0H80V164L833 1252H85V1456H1140V1296L386 202Z" />
+<glyph unicode="[" horiz-adv-x="561" d="M540 1488H375V-135H540V-324H132V1678H540V1488Z" />
+<glyph unicode="\" horiz-adv-x="856" d="M20 1456H260L868 -125H628L20 1456Z" />
+<glyph unicode="]" horiz-adv-x="561" d="M12 1678H422V-324H12V-135H179V1488H12V1678Z" />
+<glyph unicode="^" horiz-adv-x="875" d="M437 1190L259 729H53L352 1456H523L821 729H616L437 1190Z" />
+<glyph unicode="_" horiz-adv-x="924" d="M920 -191H3V0H920V-191Z" />
+<glyph unicode="`" horiz-adv-x="660" d="M521 1233H319L49 1536H326L521 1233Z" />
+<glyph unicode="a" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978 738V250Q978
+104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175Z" />
+<glyph unicode="b" horiz-adv-x="1153" d="M1074 530Q1074 278 962 129T652 -20Q462 -20 356 117L344 0H124V1536H367V978Q472 1102 650 1102Q848 1102 961 955T1074 544V530ZM831 551Q831 727 769 815T589 903Q431 903 367 765V319Q432 178 591 178Q705 178 767
+263T831 520V551Z" />
+<glyph unicode="c" horiz-adv-x="1072" d="M569 174Q660 174 720 227T784 358H1013Q1009 257 950 170T790 31T572 -20Q345 -20 212 127T79 533V558Q79 805 211 953T571 1102Q764 1102 885 990T1013 694H784Q780 787 721 847T569 907Q451 907 387 822T322 562V523Q322
+347 385 261T569 174Z" />
+<glyph unicode="d" horiz-adv-x="1156" d="M79 549Q79 799 195 950T506 1102Q678 1102 784 982V1536H1027V0H807L795 112Q686 -20 504 -20Q314 -20 197 133T79 549ZM322 528Q322 363 385 271T566 178Q715 178 784 311V773Q717 903 568 903Q450 903 386 810T322 528Z" />
+<glyph unicode="e" horiz-adv-x="1099" d="M601 -20Q370 -20 227 125T83 513V543Q83 705 145 832T321 1031T573 1102Q794 1102 914 961T1035 562V464H328Q339 330 417 252T615 174Q782 174 887 309L1018 184Q953 87 845 34T601 -20ZM572 907Q472 907 411 837T332
+642H795V660Q787 782 730 844T572 907Z" />
+<glyph unicode="f" horiz-adv-x="726" d="M210 0V902H45V1082H210V1181Q210 1361 310 1459T590 1557Q654 1557 726 1539L720 1349Q680 1357 627 1357Q453 1357 453 1178V1082H673V902H453V0H210Z" />
+<glyph unicode="g" horiz-adv-x="1161" d="M82 549Q82 801 200 951T515 1102Q700 1102 806 973L817 1082H1036V33Q1036 -180 904 -303T546 -426Q427 -426 314 -377T141 -247L256 -101Q368 -234 532 -234Q653 -234 723 -169T793 24V97Q688 -20 513 -20Q323 -20
+203 131T82 549ZM324 528Q324 365 390 272T575 178Q722 178 793 304V780Q724 903 577 903Q457 903 391 808T324 528Z" />
+<glyph unicode="h" horiz-adv-x="1137" d="M364 964Q483 1102 665 1102Q1011 1102 1016 707V0H773V698Q773 810 725 856T582 903Q436 903 364 773V0H121V1536H364V964Z" />
+<glyph unicode="i" horiz-adv-x="523" d="M383 0H140V1082H383V0ZM125 1363Q125 1419 160 1456T262 1493T364 1456T400 1363Q400 1308 364 1272T262 1235T161 1271T125 1363Z" />
+<glyph unicode="j" horiz-adv-x="513" d="M378 1082V-96Q378 -262 296 -349T54 -437Q-13 -437 -75 -420V-228Q-37 -237 11 -237Q132 -237 135 -105V1082H378ZM114 1363Q114 1419 149 1456T251 1493T353 1456T389 1363Q389 1308 353 1272T251 1235T150 1271T114 1363Z" />
+<glyph unicode="k" horiz-adv-x="1069" d="M476 464L368 353V0H125V1536H368V650L444 745L743 1082H1035L633 631L1078 0H797L476 464Z" />
+<glyph unicode="l" horiz-adv-x="523" d="M383 0H140V1536H383V0Z" />
+<glyph unicode="m" horiz-adv-x="1782" d="M353 1082L360 969Q474 1102 672 1102Q889 1102 969 936Q1087 1102 1301 1102Q1480 1102 1567 1003T1657 711V0H1414V704Q1414 807 1369 855T1220 903Q1137 903 1085 859T1011 742L1012 0H769V712Q764 903 574 903Q428
+903 367 784V0H124V1082H353Z" />
+<glyph unicode="n" horiz-adv-x="1139" d="M350 1082L357 957Q477 1102 672 1102Q1010 1102 1016 715V0H773V701Q773 804 729 853T583 903Q436 903 364 770V0H121V1082H350Z" />
+<glyph unicode="o" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773 811T581
+907Q462 907 392 813T322 530Z" />
+<glyph unicode="p" horiz-adv-x="1153" d="M1072 530Q1072 279 958 130T652 -20Q474 -20 367 97V-416H124V1082H348L358 972Q465 1102 649 1102Q847 1102 959 955T1072 545V530ZM830 551Q830 713 766 808T581 903Q432 903 367 780V300Q433 174 583 174Q699 174
+764 267T830 551Z" />
+<glyph unicode="q" horiz-adv-x="1163" d="M79 550Q79 804 195 953T509 1102Q690 1102 796 975L810 1082H1026V-416H783V92Q677 -20 507 -20Q313 -20 196 131T79 550ZM322 529Q322 363 387 269T569 174Q713 174 783 297V789Q713 907 571 907Q455 907 389 814T322 529Z" />
+<glyph unicode="r" horiz-adv-x="720" d="M691 860Q643 868 592 868Q425 868 367 740V0H124V1082H356L362 961Q450 1102 606 1102Q658 1102 692 1088L691 860Z" />
+<glyph unicode="s" horiz-adv-x="1057" d="M731 294Q731 359 678 393T500 453T293 519Q111 607 111 774Q111 914 229 1008T529 1102Q723 1102 842 1006T962 757H719Q719 827 667 873T529 920Q449 920 399 883T348 784Q348 728 395 697T585 635T809 560T930 455T970
+307Q970 161 849 71T532 -20Q399 -20 295 28T133 160T75 341H311Q316 255 376 209T535 162Q631 162 681 198T731 294Z" />
+<glyph unicode="t" horiz-adv-x="681" d="M429 1345V1082H620V902H429V298Q429 236 453 209T541 181Q583 181 626 191V3Q543 -20 466 -20Q186 -20 186 289V902H8V1082H186V1345H429Z" />
+<glyph unicode="u" horiz-adv-x="1138" d="M780 106Q673 -20 476 -20Q300 -20 210 83T119 381V1082H362V384Q362 178 533 178Q710 178 772 305V1082H1015V0H786L780 106Z" />
+<glyph unicode="v" horiz-adv-x="1013" d="M506 308L735 1082H986L611 0H400L22 1082H274L506 308Z" />
+<glyph unicode="w" horiz-adv-x="1522" d="M1075 335L1247 1082H1484L1189 0H989L757 743L529 0H329L33 1082H270L445 343L667 1082H850L1075 335Z" />
+<glyph unicode="x" horiz-adv-x="1030" d="M513 726L719 1082H989L658 549L1000 0H732L516 370L301 0H31L373 549L43 1082H311L513 726Z" />
+<glyph unicode="y" horiz-adv-x="997" d="M503 348L723 1082H982L552 -164Q453 -437 216 -437Q163 -437 99 -419V-231L145 -234Q237 -234 283 -201T357 -88L392 5L12 1082H274L503 348Z" />
+<glyph unicode="z" horiz-adv-x="1030" d="M384 194H960V0H82V159L631 886H92V1082H939V928L384 194Z" />
+<glyph unicode="{" horiz-adv-x="687" d="M609 -360Q256 -261 249 91V304Q249 529 56 529V707Q249 707 249 933V1145Q252 1325 342 1436T609 1597L657 1457Q484 1401 478 1151V935Q478 710 305 619Q478 527 478 300V87Q484 -163 657 -219L609 -360Z" />
+<glyph unicode="|" horiz-adv-x="514" d="M341 -270H174V1456H341V-270Z" />
+<glyph unicode="}" horiz-adv-x="687" d="M27 -219Q203 -162 207 93V301Q207 532 389 618Q207 704 207 938V1145Q203 1400 27 1457L75 1597Q257 1546 346 1432T435 1132V932Q435 707 629 707V529Q435 529 435 304V107Q435 -80 346 -194T75 -360L27 -219Z" />
+<glyph unicode="~" horiz-adv-x="1361" d="M1244 786Q1244 610 1149 499T912 387Q838 387 776 415T636 511T526 596T454 613Q387 613 349 561T310 425H117Q117 596 208 705T447 815Q521 815 587 786T726 690T832 607T905 590Q972 590 1014 646T1056 786H1244Z" />
+<glyph unicode="&#xa0;" horiz-adv-x="510" />
+<glyph unicode="&#xa1;" horiz-adv-x="542" d="M170 662H379L403 -364H146L170 662ZM409 971Q409 915 373 878T272 840Q206 840 170 877T134 971Q134 1026 170 1063T272 1101Q337 1101 373 1064T409 971Z" />
+<glyph unicode="&#xa2;" horiz-adv-x="1149" d="M591 174Q680 174 740 226T806 358H1034Q1030 222 932 120T687 -11V-245H487V-11Q304 23 202 166T100 530V558Q100 771 202 915T487 1093V1318H687V1094Q845 1066 937 958T1034 694H806Q799 790 740 848T590 907Q360
+907 344 595L343 523Q343 347 406 261T591 174Z" />
+<glyph unicode="&#xa3;" horiz-adv-x="1205" d="M509 598L516 422Q516 287 452 202H1148L1147 0H98V202H180Q219 211 240 266T262 413L255 598H94V797H249L241 1039Q241 1241 366 1358T694 1475T1013 1366T1129 1073H884Q884 1168 832 1220T685 1273Q596 1273
+545 1208T493 1039L502 797H813V598H509Z" />
+<glyph unicode="&#xa4;" horiz-adv-x="1437" d="M1085 107Q926 -20 723 -20Q521 -20 363 106L234 -26L93 118L228 255Q128 411 128 608Q128 808 237 973L93 1120L234 1264L376 1119Q531 1234 723 1234Q917 1234 1072 1117L1217 1265L1359 1120L1211 969Q1318 810
+1318 608Q1318 415 1220 259L1359 118L1217 -27L1085 107ZM313 608Q313 488 368 385T518 224T723 165T928 224T1077 386T1132 608T1078 829T929 989T723 1048T517 990T368 829T313 608Z" />
+<glyph unicode="&#xa5;" horiz-adv-x="1088" d="M545 847L807 1456H1076L735 742H969V590H666V452H969V301H666V0H414V301H106V452H414V590H106V742H354L11 1456H284L545 847Z" />
+<glyph unicode="&#xa6;" horiz-adv-x="508" d="M136 -270V525H365V-270H136ZM365 698H136V1456H365V698Z" />
+<glyph unicode="&#xa7;" horiz-adv-x="1272" d="M1164 455Q1164 271 993 182Q1128 82 1128 -103Q1128 -276 993 -375T624 -474Q378 -474 234 -366T90 -50L332 -49Q332 -159 410 -219T624 -279Q745 -279 815 -232T886 -105Q886 -28 819 17T565 118Q377 169 282
+224T141 356T94 542Q94 726 263 816Q198 866 164 934T130 1102Q130 1272 267 1374T635 1476Q875 1476 1009 1364T1143 1047H900Q900 1153 828 1217T635 1281Q512 1281 443 1234T373 1104Q373 1020 433 977T686 881T977 773T1119 640T1164 455ZM601 673Q520 694
+444 722Q336 682 336 558Q336 477 385 434T584 344L763 291L809 275Q924 322 924 439Q924 520 856 568T601 673Z" />
+<glyph unicode="&#xa8;" horiz-adv-x="901" d="M93 1366Q93 1416 126 1450T219 1484T312 1450T346 1366T312 1282T219 1248T127 1282T93 1366ZM550 1365Q550 1415 583 1449T676 1483T769 1449T803 1365T769 1281T676 1247T584 1281T550 1365Z" />
+<glyph unicode="&#xa9;" horiz-adv-x="1604" d="M1118 596Q1118 444 1031 363T783 282T529 388T434 675V788Q434 962 529 1068T783 1175Q946 1175 1032 1093T1119 861H963Q963 957 917 998T783 1040Q691 1040 640 972T588 786V669Q588 551 640 484T783 417Q872
+417 917 457T962 596H1118ZM1384 729Q1384 895 1309 1037T1097 1265T797 1351Q638 1351 502 1269T287 1043T209 729T286 415T500 188T797 104T1094 189T1308 418T1384 729ZM87 729Q87 931 180 1104T439 1376T797 1476T1154 1377T1412 1104T1506 729T1413 354T1155
+81T797 -20Q604 -20 440 80T181 353T87 729Z" />
+<glyph unicode="&#xaa;" horiz-adv-x="913" d="M608 705L591 773Q514 691 390 691Q272 691 207 752T141 919Q141 1029 225 1089T482 1150H584V1201Q584 1328 468 1328Q403 1328 367 1303T330 1229L157 1243Q157 1347 244 1411T468 1476Q605 1476 682 1404T759
+1199V883Q759 786 785 705H608ZM433 835Q473 835 515 853T584 896V1033H478Q402 1032 359 1002T316 923Q316 835 433 835Z" />
+<glyph unicode="&#xab;" horiz-adv-x="994" d="M551 537L798 138H631L343 528V547L631 937H798L551 537ZM654 537L901 138H734L446 528V547L734 937H901L654 537Z" />
+<glyph unicode="&#xac;" horiz-adv-x="1133" d="M962 374H762V634H127V805H962V374Z" />
+<glyph unicode="&#xad;" horiz-adv-x="672" d="M596 521H71V717H596V521Z" />
+<glyph unicode="&#xae;" horiz-adv-x="1604" d="M87 729Q87 931 180 1104T439 1376T797 1476T1154 1377T1412 1104T1506 729T1413 354T1155 81T797 -20Q604 -20 440 80T181 353T87 729ZM1384 729Q1384 895 1309 1037T1097 1265T797 1351Q638 1351 502 1269T287
+1043T209 729T286 415T500 188T797 104T1094 189T1308 418T1384 729ZM653 653V316H502V1166H783Q936 1166 1022 1099T1108 906Q1108 789 988 726Q1053 697 1079 642T1105 505T1108 389T1122 332V316H967Q954 350 954 510Q954 586 921 619T811 653H653ZM653 787H796Q865
+787 911 818T958 903Q958 973 923 1002T794 1033H653V787Z" />
+<glyph unicode="&#xaf;" horiz-adv-x="987" d="M842 1292H155V1450H842V1292Z" />
+<glyph unicode="&#xb0;" horiz-adv-x="778" d="M391 1476Q497 1476 574 1397T651 1208T575 1021T391 943Q282 943 205 1020T127 1208T205 1397T391 1476ZM391 1084Q444 1084 478 1119T513 1208Q513 1260 479 1298T391 1336T302 1298T266 1208T302 1120T391 1084Z" />
+<glyph unicode="&#xb1;" horiz-adv-x="1098" d="M668 899H1011V700H668V312H452V700H95V899H452V1276H668V899ZM974 1H125V197H974V1Z" />
+<glyph unicode="&#xb2;" horiz-adv-x="758" d="M690 667H78V792L363 1053Q476 1156 476 1223Q476 1265 449 1291T370 1318Q312 1318 279 1285T246 1198H60Q60 1314 144 1390T364 1467Q507 1467 585 1403T663 1224Q663 1117 557 1015L459 928L319 815H690V667Z" />
+<glyph unicode="&#xb3;" horiz-adv-x="758" d="M268 1133H349Q481 1133 481 1230Q481 1265 454 1291T365 1318Q317 1318 285 1299T252 1244H66Q66 1343 148 1405T361 1467Q504 1467 585 1407T667 1241Q667 1122 532 1071Q681 1030 681 888Q681 782 593 719T361
+656Q226 656 141 719T55 896H241Q241 858 275 832T370 805Q433 805 463 832T494 902Q494 1003 360 1004H268V1133Z" />
+<glyph unicode="&#xb4;" horiz-adv-x="667" d="M307 1536H584L307 1233H112L307 1536Z" />
+<glyph unicode="&#xb5;" horiz-adv-x="1211" d="M388 1082V446Q390 305 434 240T585 175Q753 175 812 296V1082H1055V0H832L825 86Q733 -21 586 -21Q465 -21 388 34V-416H146V1082H388Z" />
+<glyph unicode="&#xb6;" horiz-adv-x="1005" d="M644 0V520H564Q334 520 202 647T69 988Q69 1201 202 1328T565 1456H854V0H644Z" />
+<glyph unicode="&#xb7;" horiz-adv-x="578" d="M142 714Q142 772 179 811T283 850T387 811T425 714Q425 655 386 618T283 581Q218 581 180 618T142 714Z" />
+<glyph unicode="&#xb8;" horiz-adv-x="528" d="M318 3L307 -51Q457 -78 457 -224Q457 -329 371 -388T130 -447L123 -310Q189 -310 224 -287T260 -221Q260 -176 225 -159T109 -136L141 3H318Z" />
+<glyph unicode="&#xb9;" horiz-adv-x="758" d="M514 667H329V1237L128 1189V1335L495 1454H514V667Z" />
+<glyph unicode="&#xba;" horiz-adv-x="935" d="M119 1121Q119 1281 214 1378T465 1476T716 1379T812 1116V1044Q812 885 718 788T467 690Q309 690 214 788T119 1049V1121ZM294 1044Q294 946 340 891T467 836Q545 836 590 890T637 1041V1121Q637 1218 591 1273T465
+1328Q387 1328 341 1274T294 1117V1044Z" />
+<glyph unicode="&#xbb;" horiz-adv-x="994" d="M260 937L548 547V528L260 138H93L340 537L93 937H260ZM633 937L921 547V528L633 138H466L713 537L466 937H633Z" />
+<glyph unicode="&#xbc;" horiz-adv-x="1488" d="M475 664H290V1234L89 1186V1332L456 1451H475V664ZM453 117L328 189L1039 1327L1164 1255L453 117ZM1316 314H1411V163H1316V0H1129V163H771L762 284L1127 789H1316V314ZM943 314H1129V556L1115 534L943 314Z" />
+<glyph unicode="&#xbd;" horiz-adv-x="1579" d="M410 117L285 189L996 1327L1121 1255L410 117ZM466 667H281V1237L80 1189V1335L447 1454H466V667ZM1484 0H872V125L1157 386Q1270 489 1270 556Q1270 598 1243 624T1164 651Q1106 651 1073 618T1040 531H854Q854
+647 938 723T1158 800Q1301 800 1379 736T1457 557Q1457 450 1351 348L1253 261L1113 148H1484V0Z" />
+<glyph unicode="&#xbe;" horiz-adv-x="1623" d="M594 117L469 189L1180 1327L1305 1255L594 117ZM1437 314H1532V163H1437V0H1250V163H892L883 284L1248 789H1437V314ZM1064 314H1250V556L1236 534L1064 314ZM316 1133H397Q529 1133 529 1230Q529 1265 502 1291T413
+1318Q365 1318 333 1299T300 1244H114Q114 1343 196 1405T409 1467Q552 1467 633 1407T715 1241Q715 1122 580 1071Q729 1030 729 888Q729 782 641 719T409 656Q274 656 189 719T103 896H289Q289 858 323 832T418 805Q481 805 511 832T542 902Q542 1003 408 1004H316V1133Z"
+/>
+<glyph unicode="&#xbf;" horiz-adv-x="996" d="M630 661Q628 537 602 465T502 313L399 207Q309 110 309 4Q309 -90 358 -136T496 -183Q584 -183 637 -133T690 0H933Q931 -177 812 -281T498 -385Q292 -385 179 -285T66 0Q66 165 221 328L313 421Q391 493 401 608L403
+661H630ZM650 972Q650 916 615 879T513 841T411 878T375 972Q375 1027 411 1064T513 1102T614 1065T650 972Z" />
+<glyph unicode="&#xc0;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM812 1543H610L340 1846H617L812 1543Z" />
+<glyph unicode="&#xc1;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM757 1846H1034L757 1543H562L757 1846Z" />
+<glyph unicode="&#xc2;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM1030 1569V1558H835L685 1714L536 1558H343V1571L614 1847H757L1030 1569Z" />
+<glyph unicode="&#xc3;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM1052 1824Q1052 1714 989 1641T829 1568Q790 1568 762 1576T681 1615T607 1651T559 1657Q521 1657 495 1629T468 1554L319
+1562Q319 1672 382 1747T541 1822Q598 1822 678 1777T811 1732Q849 1732 876 1760T903 1836L1052 1824Z" />
+<glyph unicode="&#xc4;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM331 1676Q331 1726 364 1760T457 1794T550 1760T584 1676T550 1592T457 1558T365 1592T331 1676ZM788 1675Q788 1725
+821 1759T914 1793T1007 1759T1041 1675T1007 1591T914 1557T822 1591T788 1675Z" />
+<glyph unicode="&#xc5;" horiz-adv-x="1363" d="M963 339H399L281 0H18L568 1456H795L1346 0H1082L963 339ZM470 543H892L681 1147L470 543ZM686 1940Q779 1940 843 1879T907 1732T845 1587T686 1527Q589 1527 527 1587T464 1732T527 1878T686 1940ZM574 1732Q574
+1685 607 1653T686 1620Q733 1620 765 1652T798 1732Q798 1778 767 1811T686 1845T606 1812T574 1732Z" />
+<glyph unicode="&#xc6;" horiz-adv-x="1925" d="M1879 0H981L966 340H464L280 0H-10L825 1456H1817V1259H1171L1188 851H1736V654H1196L1216 196H1879V0ZM580 555H957L930 1203L580 555Z" />
+<glyph unicode="&#xc7;" horiz-adv-x="1337" d="M1259 474Q1237 241 1087 111T688 -20Q514 -20 382 62T177 297T102 650V786Q102 992 175 1149T384 1391T700 1476Q941 1476 1088 1345T1259 975H1007Q989 1132 916 1201T700 1271Q535 1271 447 1151T356 797V668Q356
+432 440 308T688 184Q837 184 912 251T1007 474H1259ZM775 -2L764 -56Q914 -83 914 -229Q914 -334 828 -393T587 -452L580 -315Q646 -315 681 -292T717 -226Q717 -181 682 -164T566 -141L598 -2H775Z" />
+<glyph unicode="&#xc8;" horiz-adv-x="1158" d="M999 650H401V202H1100V0H148V1456H1093V1252H401V850H999V650ZM753 1550H551L281 1853H558L753 1550Z" />
+<glyph unicode="&#xc9;" horiz-adv-x="1158" d="M999 650H401V202H1100V0H148V1456H1093V1252H401V850H999V650ZM698 1853H975L698 1550H503L698 1853Z" />
+<glyph unicode="&#xca;" horiz-adv-x="1158" d="M999 650H401V202H1100V0H148V1456H1093V1252H401V850H999V650ZM971 1576V1565H776L626 1721L477 1565H284V1578L555 1854H698L971 1576Z" />
+<glyph unicode="&#xcb;" horiz-adv-x="1158" d="M999 650H401V202H1100V0H148V1456H1093V1252H401V850H999V650ZM272 1683Q272 1733 305 1767T398 1801T491 1767T525 1683T491 1599T398 1565T306 1599T272 1683ZM729 1682Q729 1732 762 1766T855 1800T948 1766T982
+1682T948 1598T855 1564T763 1598T729 1682Z" />
+<glyph unicode="&#xcc;" horiz-adv-x="578" d="M415 0H163V1456H415V0ZM416 1550H214L-56 1853H221L416 1550Z" />
+<glyph unicode="&#xcd;" horiz-adv-x="578" d="M415 0H163V1456H415V0ZM360 1853H637L360 1550H165L360 1853Z" />
+<glyph unicode="&#xce;" horiz-adv-x="578" d="M415 0H163V1456H415V0ZM634 1576V1565H439L289 1721L140 1565H-53V1578L218 1854H361L634 1576Z" />
+<glyph unicode="&#xcf;" horiz-adv-x="578" d="M415 0H163V1456H415V0ZM-65 1683Q-65 1733 -32 1767T61 1801T154 1767T188 1683T154 1599T61 1565T-31 1599T-65 1683ZM392 1682Q392 1732 425 1766T518 1800T611 1766T645 1682T611 1598T518 1564T426 1598T392 1682Z" />
+<glyph unicode="&#xd0;" horiz-adv-x="1368" d="M178 0V652H-9V822H178V1456H608Q801 1456 950 1370T1182 1126T1264 764V691Q1264 484 1182 327T947 85T597 0H178ZM660 652H431V202H594Q797 202 903 328T1010 695V765Q1010 1002 907 1127T608 1252H431V822H660V652Z" />
+<glyph unicode="&#xd1;" horiz-adv-x="1454" d="M1303 0H1050L401 1033V0H148V1456H401L1052 419V1456H1303V0ZM1093 1824Q1093 1714 1030 1641T870 1568Q831 1568 803 1576T722 1615T648 1651T600 1657Q562 1657 536 1629T509 1554L360 1562Q360 1672 423 1747T582
+1822Q639 1822 719 1777T852 1732Q890 1732 917 1760T944 1836L1093 1824Z" />
+<glyph unicode="&#xd2;" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450
+1138T355 774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766ZM835 1543H633L363 1846H640L835 1543Z" />
+<glyph unicode="&#xd3;" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450
+1138T355 774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766ZM780 1846H1057L780 1543H585L780 1846Z" />
+<glyph unicode="&#xd4;" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450
+1138T355 774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766ZM1053 1569V1558H858L708 1714L559 1558H366V1571L637 1847H780L1053 1569Z" />
+<glyph unicode="&#xd5;" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450
+1138T355 774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766ZM1075 1824Q1075 1714 1012 1641T852 1568Q813 1568 785 1576T704 1615T630 1651T582 1657Q544 1657 518 1629T491 1554L342 1562Q342 1672 405 1747T564 1822Q621 1822 701 1777T834 1732Q872
+1732 899 1760T926 1836L1075 1824Z" />
+<glyph unicode="&#xd6;" horiz-adv-x="1414" d="M1310 690Q1310 476 1236 315T1025 67T708 -20Q531 -20 393 66T179 313T102 682V764Q102 977 177 1140T390 1389T706 1476T1021 1391T1234 1145T1310 771V690ZM1057 766Q1057 1008 966 1137T706 1266Q542 1266 450
+1138T355 774V690Q355 450 448 319T708 188Q876 188 966 316T1057 690V766ZM354 1676Q354 1726 387 1760T480 1794T573 1760T607 1676T573 1592T480 1558T388 1592T354 1676ZM811 1675Q811 1725 844 1759T937 1793T1030 1759T1064 1675T1030 1591T937 1557T845
+1591T811 1675Z" />
+<glyph unicode="&#xd7;" horiz-adv-x="1092" d="M77 364L393 686L77 1008L225 1158L540 836L856 1158L1004 1008L688 686L1004 364L856 214L540 535L225 214L77 364Z" />
+<glyph unicode="&#xd8;" horiz-adv-x="1412" d="M1314 690Q1314 476 1240 315T1029 67T711 -20Q547 -20 415 55L324 -95H155L300 143Q105 338 105 697V764Q105 977 180 1139T393 1388T709 1476Q906 1476 1049 1375L1136 1518H1303L1156 1275Q1313 1082 1314 765V690ZM358
+690Q358 483 429 355L931 1181Q844 1266 709 1266Q545 1266 453 1138T358 774V690ZM1061 766Q1061 932 1017 1046L528 242Q606 188 711 188Q880 188 970 316T1061 690V766Z" />
+<glyph unicode="&#xd9;" horiz-adv-x="1335" d="M1213 1456V483Q1213 251 1065 116T669 -20Q419 -20 272 113T125 484V1456H377V482Q377 336 451 259T669 182Q961 182 961 490V1456H1213ZM794 1543H592L322 1846H599L794 1543Z" />
+<glyph unicode="&#xda;" horiz-adv-x="1335" d="M1213 1456V483Q1213 251 1065 116T669 -20Q419 -20 272 113T125 484V1456H377V482Q377 336 451 259T669 182Q961 182 961 490V1456H1213ZM739 1846H1016L739 1543H544L739 1846Z" />
+<glyph unicode="&#xdb;" horiz-adv-x="1335" d="M1213 1456V483Q1213 251 1065 116T669 -20Q419 -20 272 113T125 484V1456H377V482Q377 336 451 259T669 182Q961 182 961 490V1456H1213ZM1012 1569V1558H817L667 1714L518 1558H325V1571L596 1847H739L1012 1569Z" />
+<glyph unicode="&#xdc;" horiz-adv-x="1335" d="M1213 1456V483Q1213 251 1065 116T669 -20Q419 -20 272 113T125 484V1456H377V482Q377 336 451 259T669 182Q961 182 961 490V1456H1213ZM313 1676Q313 1726 346 1760T439 1794T532 1760T566 1676T532 1592T439
+1558T347 1592T313 1676ZM770 1675Q770 1725 803 1759T896 1793T989 1759T1023 1675T989 1591T896 1557T804 1591T770 1675Z" />
+<glyph unicode="&#xdd;" horiz-adv-x="1248" d="M623 766L958 1456H1238L750 536V0H496V536L7 1456H288L623 766ZM698 1846H975L698 1543H503L698 1846Z" />
+<glyph unicode="&#xde;" horiz-adv-x="1226" d="M391 1456V1176H632Q876 1176 1013 1057T1150 738Q1150 539 1013 420T633 300H391V0H148V1456H391ZM391 981V495H637Q762 495 834 560T907 736T837 913T645 981H391Z" />
+<glyph unicode="&#xdf;" horiz-adv-x="1255" d="M378 0H136V1105Q136 1319 250 1438T571 1557Q758 1557 865 1464T973 1201Q973 1139 960 1090T912 985T866 896T855 824Q855 780 887 738T1009 622T1138 480T1179 336Q1179 165 1071 73T764 -20Q684 -20 599 -1T475
+44L524 239Q569 211 632 193T750 174Q847 174 892 217T937 327Q937 376 902 421T780 535T653 671T612 819Q612 907 675 1007T738 1185Q738 1266 692 1314T566 1363Q382 1363 378 1116V0Z" />
+<glyph unicode="&#xe0;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM694 1233H492L222 1536H499L694 1233Z" />
+<glyph unicode="&#xe1;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM639 1536H916L639 1233H444L639 1536Z" />
+<glyph unicode="&#xe2;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM912 1259V1248H717L567 1404L418 1248H225V1261L496 1537H639L912 1259Z" />
+<glyph unicode="&#xe3;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM934 1514Q934 1404 871 1331T711 1258Q672 1258 644 1266T563 1305T489 1341T441 1347Q403 1347 377 1319T350 1244L201 1252Q201
+1362 264 1437T423 1512Q480 1512 560 1467T693 1422Q731 1422 758 1450T785 1526L934 1514Z" />
+<glyph unicode="&#xe4;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM213 1366Q213 1416 246 1450T339 1484T432 1450T466 1366T432 1282T339 1248T247 1282T213 1366ZM670 1365Q670 1415 703 1449T796
+1483T889 1449T923 1365T889 1281T796 1247T704 1281T670 1365Z" />
+<glyph unicode="&#xe5;" horiz-adv-x="1108" d="M771 0Q755 31 743 101Q627 -20 459 -20Q296 -20 193 73T90 303Q90 476 218 568T586 661H735V732Q735 816 688 866T545 917Q462 917 409 876T356 770H113Q113 859 172 936T332 1058T559 1102Q749 1102 862 1007T978
+738V250Q978 104 1019 17V0H771ZM504 175Q576 175 639 210T735 304V508H604Q469 508 401 461T333 328Q333 258 379 217T504 175ZM568 1630Q661 1630 725 1569T789 1422T727 1277T568 1217Q471 1217 409 1277T346 1422T409 1568T568 1630ZM456 1422Q456 1375 489
+1343T568 1310Q615 1310 647 1342T680 1422Q680 1468 649 1501T568 1535T488 1502T456 1422Z" />
+<glyph unicode="&#xe6;" horiz-adv-x="1729" d="M1254 -20Q1001 -20 861 141Q796 64 689 22T448 -20Q272 -20 172 68T72 312Q72 470 191 556T543 642H734V713Q734 804 687 855T551 907Q460 907 403 863T345 752L103 771Q103 917 229 1009T553 1102Q776 1102 887
+969Q1018 1104 1218 1102Q1430 1102 1549 973T1668 608V471H973Q982 332 1058 253T1268 174Q1405 174 1512 232L1573 266L1646 100Q1576 44 1472 12T1254 -20ZM495 164Q553 164 621 193T734 266V475H538Q434 473 374 426T314 308Q314 243 360 204T495 164ZM1218
+907Q1119 907 1056 838T976 642H1428V672Q1428 785 1374 846T1218 907Z" />
+<glyph unicode="&#xe7;" horiz-adv-x="1072" d="M569 174Q660 174 720 227T784 358H1013Q1009 257 950 170T790 31T572 -20Q345 -20 212 127T79 533V558Q79 805 211 953T571 1102Q764 1102 885 990T1013 694H784Q780 787 721 847T569 907Q451 907 387 822T322
+562V523Q322 347 385 261T569 174ZM635 -2L624 -56Q774 -83 774 -229Q774 -334 688 -393T447 -452L440 -315Q506 -315 541 -292T577 -226Q577 -181 542 -164T426 -141L458 -2H635Z" />
+<glyph unicode="&#xe8;" horiz-adv-x="1099" d="M601 -20Q370 -20 227 125T83 513V543Q83 705 145 832T321 1031T573 1102Q794 1102 914 961T1035 562V464H328Q339 330 417 252T615 174Q782 174 887 309L1018 184Q953 87 845 34T601 -20ZM572 907Q472 907 411
+837T332 642H795V660Q787 782 730 844T572 907ZM682 1233H480L210 1536H487L682 1233Z" />
+<glyph unicode="&#xe9;" horiz-adv-x="1099" d="M601 -20Q370 -20 227 125T83 513V543Q83 705 145 832T321 1031T573 1102Q794 1102 914 961T1035 562V464H328Q339 330 417 252T615 174Q782 174 887 309L1018 184Q953 87 845 34T601 -20ZM572 907Q472 907 411
+837T332 642H795V660Q787 782 730 844T572 907ZM627 1536H904L627 1233H432L627 1536Z" />
+<glyph unicode="&#xea;" horiz-adv-x="1099" d="M601 -20Q370 -20 227 125T83 513V543Q83 705 145 832T321 1031T573 1102Q794 1102 914 961T1035 562V464H328Q339 330 417 252T615 174Q782 174 887 309L1018 184Q953 87 845 34T601 -20ZM572 907Q472 907 411
+837T332 642H795V660Q787 782 730 844T572 907ZM900 1259V1248H705L555 1404L406 1248H213V1261L484 1537H627L900 1259Z" />
+<glyph unicode="&#xeb;" horiz-adv-x="1099" d="M601 -20Q370 -20 227 125T83 513V543Q83 705 145 832T321 1031T573 1102Q794 1102 914 961T1035 562V464H328Q339 330 417 252T615 174Q782 174 887 309L1018 184Q953 87 845 34T601 -20ZM572 907Q472 907 411
+837T332 642H795V660Q787 782 730 844T572 907ZM201 1366Q201 1416 234 1450T327 1484T420 1450T454 1366T420 1282T327 1248T235 1282T201 1366ZM658 1365Q658 1415 691 1449T784 1483T877 1449T911 1365T877 1281T784 1247T692 1281T658 1365Z" />
+<glyph unicode="&#xec;" horiz-adv-x="538" d="M386 0H143V1082H386V0ZM652 1482H450L180 1785H457L652 1482Z" />
+<glyph unicode="&#xed;" horiz-adv-x="538" d="M386 0H143V1082H386V0ZM340 1785H617L340 1482H145L340 1785Z" />
+<glyph unicode="&#xee;" horiz-adv-x="538" d="M386 0H143V1082H386V0ZM614 1252V1241H419L269 1397L120 1241H-73V1254L198 1530H341L614 1252Z" />
+<glyph unicode="&#xef;" horiz-adv-x="538" d="M386 0H143V1082H386V0ZM-85 1359Q-85 1409 -52 1443T41 1477T134 1443T168 1359T134 1275T41 1241T-51 1275T-85 1359ZM372 1358Q372 1408 405 1442T498 1476T591 1442T625 1358T591 1274T498 1240T406 1274T372 1358Z" />
+<glyph unicode="&#xf0;" horiz-adv-x="1191" d="M834 1303Q1088 1038 1088 637V555Q1088 389 1025 258T848 53T593 -20Q455 -20 342 43T166 219T103 468Q103 617 159 732T319 910T554 973Q700 973 813 882Q764 1051 638 1173L434 1038L356 1147L528 1261Q402 1343
+240 1385L315 1580Q553 1530 730 1395L910 1515L988 1406L834 1303ZM845 663L844 681Q812 729 751 757T611 785Q485 785 416 701T346 468Q346 342 416 258T597 174Q708 174 776 274T845 547V663Z" />
+<glyph unicode="&#xf1;" horiz-adv-x="1139" d="M350 1082L357 957Q477 1102 672 1102Q1010 1102 1016 715V0H773V701Q773 804 729 853T583 903Q436 903 364 770V0H121V1082H350ZM940 1514Q940 1404 877 1331T717 1258Q678 1258 650 1266T569 1305T495 1341T447
+1347Q409 1347 383 1319T356 1244L207 1252Q207 1362 270 1437T429 1512Q486 1512 566 1467T699 1422Q737 1422 764 1450T791 1526L940 1514Z" />
+<glyph unicode="&#xf2;" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773
+811T581 907Q462 907 392 813T322 530ZM703 1233H501L231 1536H508L703 1233Z" />
+<glyph unicode="&#xf3;" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773
+811T581 907Q462 907 392 813T322 530ZM648 1536H925L648 1233H453L648 1536Z" />
+<glyph unicode="&#xf4;" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773
+811T581 907Q462 907 392 813T322 530ZM921 1259V1248H726L576 1404L427 1248H234V1261L505 1537H648L921 1259Z" />
+<glyph unicode="&#xf5;" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773
+811T581 907Q462 907 392 813T322 530ZM943 1514Q943 1404 880 1331T720 1258Q681 1258 653 1266T572 1305T498 1341T450 1347Q412 1347 386 1319T359 1244L210 1252Q210 1362 273 1437T432 1512Q489 1512 569 1467T702 1422Q740 1422 767 1450T794 1526L943 1514Z"
+/>
+<glyph unicode="&#xf6;" horiz-adv-x="1166" d="M79 551Q79 710 142 837T319 1033T581 1102Q800 1102 936 961T1084 587L1085 530Q1085 370 1024 244T848 49T583 -20Q354 -20 217 132T79 539V551ZM322 530Q322 363 391 269T583 174T774 270T843 551Q843 715 773
+811T581 907Q462 907 392 813T322 530ZM222 1366Q222 1416 255 1450T348 1484T441 1450T475 1366T441 1282T348 1248T256 1282T222 1366ZM679 1365Q679 1415 712 1449T805 1483T898 1449T932 1365T898 1281T805 1247T713 1281T679 1365Z" />
+<glyph unicode="&#xf7;" horiz-adv-x="1169" d="M1079 582H67V794H1079V582ZM576 1228Q644 1228 681 1190T718 1095T681 1001T576 963Q509 963 472 1000T435 1095T472 1190T576 1228ZM435 278Q435 336 472 374T576 412Q644 412 681 374T718 278Q718 221 681 184T576
+147Q509 147 472 184T435 278Z" />
+<glyph unicode="&#xf8;" horiz-adv-x="1160" d="M79 551Q79 710 142 837T319 1033T581 1102Q687 1102 775 1068L846 1211H991L889 1003Q1085 850 1085 530Q1085 370 1024 244T848 49T583 -20Q490 -20 400 10L328 -137H183L285 70Q79 220 79 551ZM322 530Q322 374
+386 276L685 885Q638 907 581 907Q462 907 392 813T322 530ZM843 551Q843 699 785 792L489 191Q532 174 583 174Q706 174 774 270T843 551Z" />
+<glyph unicode="&#xf9;" horiz-adv-x="1138" d="M780 106Q673 -20 476 -20Q300 -20 210 83T119 381V1082H362V384Q362 178 533 178Q710 178 772 305V1082H1015V0H786L780 106ZM696 1233H494L224 1536H501L696 1233Z" />
+<glyph unicode="&#xfa;" horiz-adv-x="1138" d="M780 106Q673 -20 476 -20Q300 -20 210 83T119 381V1082H362V384Q362 178 533 178Q710 178 772 305V1082H1015V0H786L780 106ZM641 1536H918L641 1233H446L641 1536Z" />
+<glyph unicode="&#xfb;" horiz-adv-x="1138" d="M780 106Q673 -20 476 -20Q300 -20 210 83T119 381V1082H362V384Q362 178 533 178Q710 178 772 305V1082H1015V0H786L780 106ZM914 1259V1248H719L569 1404L420 1248H227V1261L498 1537H641L914 1259Z" />
+<glyph unicode="&#xfc;" horiz-adv-x="1138" d="M780 106Q673 -20 476 -20Q300 -20 210 83T119 381V1082H362V384Q362 178 533 178Q710 178 772 305V1082H1015V0H786L780 106ZM215 1366Q215 1416 248 1450T341 1484T434 1450T468 1366T434 1282T341 1248T249 1282T215
+1366ZM672 1365Q672 1415 705 1449T798 1483T891 1449T925 1365T891 1281T798 1247T706 1281T672 1365Z" />
+<glyph unicode="&#xfd;" horiz-adv-x="997" d="M503 348L723 1082H982L552 -164Q453 -437 216 -437Q163 -437 99 -419V-231L145 -234Q237 -234 283 -201T357 -88L392 5L12 1082H274L503 348ZM585 1536H862L585 1233H390L585 1536Z" />
+<glyph unicode="&#xfe;" horiz-adv-x="1175" d="M1079 530Q1079 283 966 132T658 -20Q480 -20 373 97V-416H130V1536H373V983Q479 1102 655 1102Q852 1102 965 955T1079 546V530ZM836 551Q836 717 771 810T587 903Q438 903 373 780V300Q439 174 589 174Q705 174
+770 267T836 551Z" />
+<glyph unicode="&#xff;" horiz-adv-x="997" d="M503 348L723 1082H982L552 -164Q453 -437 216 -437Q163 -437 99 -419V-231L145 -234Q237 -234 283 -201T357 -88L392 5L12 1082H274L503 348ZM159 1366Q159 1416 192 1450T285 1484T378 1450T412 1366T378 1282T285
+1248T193 1282T159 1366ZM616 1365Q616 1415 649 1449T742 1483T835 1449T869 1365T835 1281T742 1247T650 1281T616 1365Z" />
+<glyph unicode="&#x2013;" horiz-adv-x="1321" d="M1432 621H414V817H1432V621Z" />
+<glyph unicode="&#x2014;" horiz-adv-x="1584" d="M1744 621H386V817H1744V621Z" />
+<glyph unicode="&#x2018;" horiz-adv-x="448" d="M282 1562L406 1485Q315 1352 312 1208V1056H99V1194Q100 1290 151 1394T282 1562Z" />
+<glyph unicode="&#x2019;" horiz-adv-x="444" d="M175 1024L51 1101Q141 1232 144 1378V1536H357V1398Q357 1295 305 1191T175 1024Z" />
+<glyph unicode="&#x201a;" horiz-adv-x="462" d="M173 -298L50 -220Q135 -93 138 55V202H356V69Q355 -24 304 -128T173 -298Z" />
+<glyph unicode="&#x201c;" horiz-adv-x="788" d="M291 1562L415 1485Q324 1352 321 1208V1056H108V1194Q109 1290 160 1394T291 1562ZM627 1562L751 1485Q660 1352 657 1208V1056H444V1194Q445 1290 496 1394T627 1562Z" />
+<glyph unicode="&#x201d;" horiz-adv-x="795" d="M188 1024L64 1101Q154 1232 157 1378V1536H370V1398Q370 1295 318 1191T188 1024ZM522 1024L398 1101Q488 1232 491 1378V1536H704V1398Q704 1295 652 1191T522 1024Z" />
+<glyph unicode="&#x201e;" horiz-adv-x="776" d="M177 -318L50 -240Q135 -103 138 54V255H356V69Q355 -39 300 -153Q251 -253 177 -318ZM499 -318L372 -240Q460 -98 464 52V255H682V73Q682 -26 631 -136T499 -318Z" />
+<glyph unicode="&#x2022;" horiz-adv-x="715" d="M136 771Q136 866 196 926T357 987Q460 987 520 927T580 768V731Q580 637 521 578T358 518Q259 518 199 575T136 726V771Z" />
+<glyph unicode="&#x2039;" horiz-adv-x="626" d="M316 537L563 138H396L108 528V547L396 937H563L316 537Z" />
+<glyph unicode="&#x203a;" horiz-adv-x="617" d="M251 937L539 547V528L251 138H84L331 537L84 937H251Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_500.ttf b/site/assets/fonts/Roboto_500.ttf
new file mode 100644
index 00000000..55b559f6
--- /dev/null
+++ b/site/assets/fonts/Roboto_500.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_500.woff b/site/assets/fonts/Roboto_500.woff
new file mode 100644
index 00000000..2633e152
--- /dev/null
+++ b/site/assets/fonts/Roboto_500.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_500.woff2 b/site/assets/fonts/Roboto_500.woff2
new file mode 100644
index 00000000..8dceabcf
--- /dev/null
+++ b/site/assets/fonts/Roboto_500.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_700.eot b/site/assets/fonts/Roboto_700.eot
new file mode 100644
index 00000000..0df88af1
--- /dev/null
+++ b/site/assets/fonts/Roboto_700.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_700.svg b/site/assets/fonts/Roboto_700.svg
new file mode 100644
index 00000000..11db87dd
--- /dev/null
+++ b/site/assets/fonts/Roboto_700.svg
@@ -0,0 +1,309 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="Roboto" horiz-adv-x="1191" ><font-face
+ font-family="Roboto"
+ units-per-em="2048"
+ panose-1="2 0 0 0 0 0 0 0 0 0"
+ ascent="1900"
+ descent="-500"
+ alphabetic="0" />
+<glyph unicode=" " horiz-adv-x="510" />
+<glyph unicode="!" horiz-adv-x="557" d="M405 447H165L131 1456H439L405 447ZM285 289Q358 289 402 246T447 136Q447 70 403 27T285 -16Q213 -16 169 27T124 136T168 245T285 289Z" />
+<glyph unicode="&quot;" horiz-adv-x="657" d="M266 1398L226 987H64V1536H266V1398ZM591 1398L552 987H390V1536H591V1398Z" />
+<glyph unicode="#" horiz-adv-x="1220" d="M667 410H474L402 0H219L291 410H64V582H321L371 866H139V1040H402L475 1456H657L584 1040H778L851 1456H1034L961 1040H1180V866H930L880 582H1104V410H850L778 0H595L667 410ZM504 582H697L747 866H553L504 582Z" />
+<glyph unicode="$" horiz-adv-x="1175" d="M790 383Q790 465 745 513T590 603T402 684T267 777T179 901T147 1070Q147 1239 255 1347T542 1473V1687H702V1470Q879 1445 979 1323T1079 1005H790Q790 1125 741 1184T608 1244Q526 1244 481 1198T436 1069Q436 993
+480 947T643 853T840 763T970 667T1051 545T1079 385Q1079 215 973 109T681 -16V-215H522V-17Q317 5 205 128T92 457H381Q381 338 437 275T600 211Q688 211 739 257T790 383Z" />
+<glyph unicode="%" horiz-adv-x="1512" d="M95 1176Q95 1310 182 1393T410 1477Q553 1477 640 1395T727 1171V1099Q727 964 640 882T412 800Q270 800 183 882T95 1105V1176ZM287 1099Q287 1039 321 1003T412 966T501 1003T534 1102V1176Q534 1236 501 1273T410
+1310Q355 1310 321 1274T287 1172V1099ZM791 357Q791 492 879 574T1107 657Q1249 657 1336 576T1424 351V279Q1424 145 1338 63T1109 -20Q965 -20 878 63T791 283V357ZM983 279Q983 224 1019 185T1109 146Q1231 146 1231 281V357Q1231 417 1197 453T1107 490T1017
+454T983 354V279ZM469 109L328 185L1039 1323L1180 1247L469 109Z" />
+<glyph unicode="&amp;" horiz-adv-x="1344" d="M71 392Q71 493 127 579T335 770Q270 857 233 933T196 1093Q196 1263 303 1369T593 1476Q756 1476 860 1379T965 1137Q965 963 789 830L677 749L928 457Q987 573 987 713H1233Q1233 429 1102 253L1320 0H992L917
+86Q756 -20 547 -20Q331 -20 201 93T71 392ZM561 212Q665 212 758 270L471 602L450 587Q361 511 361 408Q361 321 416 267T561 212ZM455 1097Q455 1022 543 908L620 959Q679 997 701 1033T724 1119T687 1206T592 1243Q530 1243 493 1203T455 1097Z" />
+<glyph unicode="&apos;" horiz-adv-x="331" d="M275 1389L246 985H63V1536H275V1389Z" />
+<glyph unicode="(" horiz-adv-x="719" d="M124 592Q124 821 185 1028T365 1394T623 1616L679 1460Q533 1352 451 1126T369 598V567Q369 264 450 37T679 -302L623 -455Q487 -394 370 -240T190 117T124 539V592Z" />
+<glyph unicode=")" horiz-adv-x="722" d="M609 569Q609 343 544 134T357 -235T96 -455L40 -302Q182 -195 264 28T349 541V594Q349 893 268 1121T40 1463L96 1616Q232 1557 352 1402T539 1041T609 614V569Z" />
+<glyph unicode="*" horiz-adv-x="928" d="M341 962L27 1051L86 1232L397 1107L377 1456H573L553 1100L856 1223L915 1040L595 951L805 685L646 572L464 864L284 582L125 690L341 962Z" />
+<glyph unicode="+" horiz-adv-x="1118" d="M694 815H1055V554H694V146H419V554H57V815H419V1206H694V815Z" />
+<glyph unicode="," horiz-adv-x="500" d="M186 -365L35 -286L71 -221Q138 -99 140 22V246H384L383 46Q382 -65 327 -178T186 -365Z" />
+<glyph unicode="-" horiz-adv-x="794" d="M673 507H110V740H673V507Z" />
+<glyph unicode="." horiz-adv-x="595" d="M126 142Q126 211 172 254T289 297Q360 297 406 254T453 142Q453 74 407 32T289 -11Q218 -11 172 31T126 142Z" />
+<glyph unicode="/" horiz-adv-x="765" d="M202 -125H-13L523 1456H738L202 -125Z" />
+<glyph unicode="0" horiz-adv-x="1175" d="M1079 602Q1079 300 954 140T588 -20Q350 -20 224 137T95 587V855Q95 1160 221 1318T586 1476T950 1320T1079 870V602ZM790 896Q790 1077 741 1159T586 1242Q484 1242 436 1164T384 918V564Q384 386 432 300T588 213Q694
+213 741 296T790 550V896Z" />
+<glyph unicode="1" horiz-adv-x="1175" d="M801 0H512V1114L167 1007V1242L770 1458H801V0Z" />
+<glyph unicode="2" horiz-adv-x="1175" d="M1097 0H99V198L570 700Q667 806 713 885T760 1035Q760 1132 711 1187T571 1243Q473 1243 417 1176T360 998H70Q70 1131 133 1241T313 1413T576 1476Q801 1476 925 1368T1050 1063Q1050 955 994 843T802 582L471 233H1097V0Z" />
+<glyph unicode="3" horiz-adv-x="1175" d="M393 856H547Q657 856 710 911T763 1057Q763 1145 711 1194T566 1243Q483 1243 427 1198T371 1079H82Q82 1193 143 1283T315 1425T559 1476Q790 1476 921 1366T1052 1061Q1052 961 991 877T831 748Q954 704 1014 616T1075
+408Q1075 214 934 97T559 -20Q341 -20 203 95T64 399H353Q353 317 414 265T566 213Q669 213 727 267T786 412Q786 630 546 630H393V856Z" />
+<glyph unicode="4" horiz-adv-x="1175" d="M954 548H1119V315H954V0H665V315H68L55 497L662 1456H954V548ZM343 548H665V1062L646 1029L343 548Z" />
+<glyph unicode="5" horiz-adv-x="1175" d="M142 716L226 1456H1042V1215H463L427 902Q530 957 646 957Q854 957 972 828T1090 467Q1090 326 1031 215T860 42T598 -20Q466 -20 353 33T175 184T105 405H391Q400 314 454 264T597 213Q695 213 748 283T801 483Q801
+607 740 673T567 739Q464 739 400 685L372 659L142 716Z" />
+<glyph unicode="6" horiz-adv-x="1175" d="M883 1471V1233H855Q659 1230 540 1131T396 856Q512 974 689 974Q879 974 991 838T1103 480Q1103 338 1042 223T868 44T613 -20Q383 -20 242 140T100 567V671Q100 908 189 1089T446 1370T835 1471H883ZM601 742Q531 742
+474 706T390 609V521Q390 376 447 295T607 213Q700 213 757 286T815 477Q815 596 757 669T601 742Z" />
+<glyph unicode="7" horiz-adv-x="1175" d="M1089 1294L526 0H221L785 1222H61V1456H1089V1294Z" />
+<glyph unicode="8" horiz-adv-x="1175" d="M1048 1069Q1048 963 995 881T849 750Q955 699 1017 610T1079 399Q1079 205 947 93T588 -20T228 93T95 399Q95 520 157 610T324 750Q231 799 179 881T126 1069Q126 1255 250 1365T587 1476Q799 1476 923 1367T1048 1069ZM789
+420Q789 515 734 572T586 629Q494 629 439 573T384 420Q384 327 438 270T588 213Q682 213 735 268T789 420ZM759 1055Q759 1140 714 1191T587 1243Q506 1243 461 1193T416 1055Q416 968 461 915T588 862T714 915T759 1055Z" />
+<glyph unicode="9" horiz-adv-x="1175" d="M775 582Q662 471 511 471Q318 471 202 603T86 961Q86 1104 148 1223T323 1409T575 1476Q719 1476 831 1404T1005 1197T1068 888V781Q1068 417 887 209T374 -13L303 -14V227L367 228Q744 245 775 582ZM582 692Q652 692
+702 728T779 815V934Q779 1081 723 1162T573 1243Q486 1243 430 1164T374 964Q374 845 428 769T582 692Z" />
+<glyph unicode=":" horiz-adv-x="578" d="M381 142Q381 211 427 254T544 297Q615 297 661 254T708 142Q708 74 662 32T544 -11Q473 -11 427 31T381 142ZM125 961Q125 1030 171 1073T288 1116Q359 1116 405 1073T452 961Q452 893 406 851T288 808Q217 808 171 850T125
+961Z" />
+<glyph unicode=";" horiz-adv-x="537" d="M108 961Q108 1030 154 1073T271 1116Q342 1116 388 1073T435 961Q435 893 389 851T271 808Q200 808 154 850T108 961ZM208 -365L57 -286L93 -221Q160 -99 162 22V246H406L405 46Q404 -65 349 -178T208 -365Z" />
+<glyph unicode="&lt;" horiz-adv-x="1042" d="M345 618L915 417V137L54 502V738L915 1103V823L345 618Z" />
+<glyph unicode="=" horiz-adv-x="1172" d="M1030 746H136V982H1030V746ZM1030 313H136V549H1030V313Z" />
+<glyph unicode="&gt;" horiz-adv-x="1058" d="M701 621L120 824V1102L991 737V502L120 136V415L701 621Z" />
+<glyph unicode="?" horiz-adv-x="1019" d="M347 447Q347 587 381 670T505 833T626 964T656 1071Q656 1240 500 1240Q426 1240 382 1195T335 1069H45Q47 1260 168 1368T500 1476Q712 1476 829 1374T946 1084Q946 999 908 924T775 756L694 679Q618 606 607 508L603
+447H347ZM318 140Q318 207 363 250T480 294T596 251T642 140Q642 74 598 31T480 -12T363 31T318 140Z" />
+<glyph unicode="@" horiz-adv-x="1833" d="M1749 536Q1738 280 1623 130T1312 -21Q1226 -21 1164 16T1069 122Q969 -18 808 -18Q662 -18 582 105T522 430Q540 595 605 723T771 921T987 990Q1130 990 1231 924L1294 881L1243 303Q1233 224 1260 182T1348 140Q1440
+140 1502 247T1570 529Q1587 878 1430 1065T963 1253Q770 1253 619 1155T381 877T283 463Q267 109 425 -84T898 -278Q981 -278 1072 -260T1229 -210L1267 -364Q1206 -404 1103 -428T894 -453Q630 -453 444 -347T167 -33T87 463Q99 739 213 958T523 1296T967 1416Q1218
+1416 1399 1309T1670 1001T1749 536ZM744 430Q733 298 767 230T877 161Q926 161 970 204T1043 328L1085 801Q1046 814 1005 814Q890 814 827 716T744 430Z" />
+<glyph unicode="A" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543Z" />
+<glyph unicode="B" horiz-adv-x="1307" d="M130 0V1456H640Q905 1456 1042 1355T1179 1057Q1179 950 1124 869T971 749Q1083 721 1147 636T1212 428Q1212 218 1078 110T696 0H130ZM430 634V241H687Q793 241 852 291T912 431Q912 631 705 634H430ZM430 846H652Q879
+850 879 1027Q879 1126 822 1169T640 1213H430V846Z" />
+<glyph unicode="C" horiz-adv-x="1340" d="M1273 485Q1256 250 1100 115T687 -20Q407 -20 247 168T86 686V775Q86 985 160 1145T371 1390T691 1476Q943 1476 1097 1341T1275 962H975Q964 1103 897 1166T691 1230Q541 1230 467 1123T390 789V679Q390 443 461 334T687
+225Q826 225 894 288T973 485H1273Z" />
+<glyph unicode="D" horiz-adv-x="1331" d="M130 0V1456H578Q770 1456 921 1370T1158 1124T1243 761V694Q1243 491 1160 333T924 88T581 0H130ZM430 1213V241H575Q751 241 844 356T939 685V762Q939 984 847 1098T578 1213H430Z" />
+<glyph unicode="E" horiz-adv-x="1152" d="M1006 631H430V241H1106V0H130V1456H1104V1213H430V866H1006V631Z" />
+<glyph unicode="F" horiz-adv-x="1122" d="M1006 595H430V0H130V1456H1078V1213H430V837H1006V595Z" />
+<glyph unicode="G" horiz-adv-x="1395" d="M1282 184Q1201 87 1053 34T725 -20Q536 -20 394 62T174 302T94 671V770Q94 988 167 1147T379 1391T704 1476Q963 1476 1109 1353T1282 993H990Q970 1118 902 1176T713 1234Q560 1234 480 1119T399 777V684Q399 455 486
+338T741 221Q910 221 982 293V544H709V765H1282V184Z" />
+<glyph unicode="H" horiz-adv-x="1447" d="M1315 0H1015V624H430V0H130V1456H430V866H1015V1456H1315V0Z" />
+<glyph unicode="I" horiz-adv-x="597" d="M449 0H149V1456H449V0Z" />
+<glyph unicode="J" horiz-adv-x="1144" d="M717 1456H1017V448Q1017 309 956 203T782 38T529 -20Q298 -20 169 97T40 430H342Q342 323 387 272T529 221Q615 221 666 280T717 448V1456Z" />
+<glyph unicode="K" horiz-adv-x="1300" d="M586 584L430 416V0H130V1456H430V796L562 977L933 1456H1302L785 809L1317 0H960L586 584Z" />
+<glyph unicode="L" horiz-adv-x="1109" d="M430 241H1067V0H130V1456H430V241Z" />
+<glyph unicode="M" horiz-adv-x="1794" d="M522 1456L896 400L1268 1456H1662V0H1361V398L1391 1085L998 0H792L400 1084L430 398V0H130V1456H522Z" />
+<glyph unicode="N" horiz-adv-x="1446" d="M1314 0H1014L430 958V0H130V1456H430L1015 496V1456H1314V0Z" />
+<glyph unicode="O" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473 1111T390
+766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761Z" />
+<glyph unicode="P" horiz-adv-x="1321" d="M430 513V0H130V1456H698Q862 1456 986 1396T1178 1226T1245 974Q1245 760 1099 637T693 513H430ZM430 756H698Q817 756 879 812T942 972Q942 1079 879 1145T705 1213H430V756Z" />
+<glyph unicode="Q" horiz-adv-x="1414" d="M1324 695Q1324 491 1258 341T1075 99L1317 -91L1126 -260L816 -11Q763 -20 706 -20Q525 -20 383 67T163 315T84 687V759Q84 974 161 1137T380 1388T704 1476T1027 1389T1246 1138T1324 760V695ZM1020 761Q1020 990 938
+1109T704 1228Q553 1228 471 1111T388 766V695Q388 472 470 349T706 226Q857 226 938 344T1020 690V761Z" />
+<glyph unicode="R" horiz-adv-x="1307" d="M669 533H430V0H130V1456H671Q929 1456 1069 1341T1209 1016Q1209 867 1145 768T949 609L1264 14V0H942L669 533ZM430 776H672Q785 776 847 833T909 992Q909 1095 851 1154T671 1213H430V776Z" />
+<glyph unicode="S" horiz-adv-x="1259" d="M885 382Q885 467 825 512T609 608T362 708Q114 842 114 1069Q114 1187 180 1279T371 1424T651 1476Q807 1476 929 1420T1118 1260T1186 1026H886Q886 1126 823 1181T646 1237Q536 1237 475 1191T414 1068Q414 997 485
+949T696 859Q952 782 1069 668T1186 384Q1186 195 1043 88T658 -20Q490 -20 352 41T142 210T69 458H370Q370 217 658 217Q765 217 825 260T885 382Z" />
+<glyph unicode="T" horiz-adv-x="1267" d="M1226 1213H780V0H480V1213H40V1456H1226V1213Z" />
+<glyph unicode="U" horiz-adv-x="1348" d="M1232 1456V497Q1232 258 1083 119T674 -20Q419 -20 269 115T116 486V1456H416V495Q416 352 484 287T674 221Q927 221 931 487V1456H1232Z" />
+<glyph unicode="V" horiz-adv-x="1339" d="M668 361L998 1456H1332L825 0H512L7 1456H340L668 361Z" />
+<glyph unicode="W" horiz-adv-x="1791" d="M1264 420L1460 1456H1759L1436 0H1134L897 974L660 0H358L35 1456H334L531 422L771 1456H1025L1264 420Z" />
+<glyph unicode="X" horiz-adv-x="1301" d="M651 954L924 1456H1269L845 734L1280 0H931L651 510L371 0H22L457 734L33 1456H378L651 954Z" />
+<glyph unicode="Y" horiz-adv-x="1266" d="M632 800L935 1456H1263L785 528V0H480V528L2 1456H331L632 800Z" />
+<glyph unicode="Z" horiz-adv-x="1241" d="M448 241H1182V0H73V176L793 1213H74V1456H1166V1284L448 241Z" />
+<glyph unicode="[" horiz-adv-x="569" d="M552 1471H410V-116H552V-339H120V1694H552V1471Z" />
+<glyph unicode="\" horiz-adv-x="864" d="M0 1456H295L903 -125H607L0 1456Z" />
+<glyph unicode="]" horiz-adv-x="569" d="M13 1694H445V-339H13V-116H156V1471H13V1694Z" />
+<glyph unicode="^" horiz-adv-x="895" d="M448 1186L274 729H44L343 1456H553L852 729H623L448 1186Z" />
+<glyph unicode="_" horiz-adv-x="914" d="M912 -226H1V0H912V-226Z" />
+<glyph unicode="`" horiz-adv-x="677" d="M565 1226H328L52 1536H367L565 1226Z" />
+<glyph unicode="a" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987 719V250Q988
+96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201Z" />
+<glyph unicode="b" horiz-adv-x="1153" d="M1086 531Q1086 271 975 126T665 -20Q489 -20 384 115L371 0H111V1536H400V985Q500 1102 663 1102Q861 1102 973 957T1086 547V531ZM797 552Q797 716 745 791T590 867Q452 867 400 754V327Q453 213 592 213Q732 213 776
+351Q797 417 797 552Z" />
+<glyph unicode="c" horiz-adv-x="1068" d="M561 213Q641 213 691 257T743 374H1014Q1013 264 954 173T793 31T566 -20Q334 -20 200 127T66 535V554Q66 804 199 953T564 1102Q767 1102 889 987T1014 679H743Q741 763 691 815T559 868Q458 868 407 795T355 556V526Q355
+359 406 286T561 213Z" />
+<glyph unicode="d" horiz-adv-x="1154" d="M66 549Q66 802 179 952T490 1102Q648 1102 751 984V1536H1041V0H780L766 115Q658 -20 488 -20Q297 -20 182 130T66 549ZM355 528Q355 376 408 295T562 214Q696 214 751 327V754Q697 867 564 867Q355 867 355 528Z" />
+<glyph unicode="e" horiz-adv-x="1107" d="M609 -20Q371 -20 222 126T72 515V543Q72 706 135 834T313 1032T577 1102Q799 1102 926 962T1054 565V447H365Q379 341 449 277T628 213Q795 213 889 334L1031 175Q966 83 855 32T609 -20ZM576 868Q490 868 437 810T368
+644H770V667Q768 763 718 815T576 868Z" />
+<glyph unicode="f" horiz-adv-x="734" d="M190 0V870H29V1082H190V1174Q190 1356 294 1456T587 1557Q647 1557 734 1537L731 1313Q695 1322 643 1322Q480 1322 480 1169V1082H695V870H480V0H190Z" />
+<glyph unicode="g" horiz-adv-x="1169" d="M69 549Q69 798 187 950T507 1102Q685 1102 784 980L796 1082H1058V36Q1058 -106 994 -211T812 -371T538 -426Q419 -426 306 -379T135 -256L263 -80Q371 -201 525 -201Q640 -201 704 -140T768 35V93Q668 -20 505 -20Q310
+-20 190 132T69 537V549ZM358 528Q358 381 417 298T579 214Q711 214 768 313V768Q710 867 581 867Q477 867 418 782T358 528Z" />
+<glyph unicode="h" horiz-adv-x="1146" d="M393 964Q508 1102 682 1102Q1034 1102 1039 693V0H750V685Q750 778 710 822T577 867Q450 867 393 769V0H104V1536H393V964Z" />
+<glyph unicode="i" horiz-adv-x="543" d="M416 0H126V1082H416V0ZM109 1362Q109 1427 152 1469T271 1511Q345 1511 389 1469T433 1362Q433 1296 389 1254T271 1212T154 1254T109 1362Z" />
+<glyph unicode="j" horiz-adv-x="532" d="M417 1082V-59Q417 -238 322 -337T48 -437Q-27 -437 -95 -420V-191Q-43 -200 -4 -200Q127 -200 127 -61V1082H417ZM104 1362Q104 1427 147 1469T266 1511T384 1469T428 1362Q428 1296 384 1254T266 1212T149 1254T104 1362Z" />
+<glyph unicode="k" horiz-adv-x="1094" d="M504 434L400 330V0H111V1536H400V685L456 757L733 1082H1080L689 631L1114 0H782L504 434Z" />
+<glyph unicode="l" horiz-adv-x="543" d="M416 0H126V1536H416V0Z" />
+<glyph unicode="m" horiz-adv-x="1773" d="M382 1082L391 961Q506 1102 702 1102Q911 1102 989 937Q1103 1102 1314 1102Q1490 1102 1576 1000T1662 691V0H1372V690Q1372 782 1336 824T1209 867Q1079 867 1029 743L1030 0H741V689Q741 783 704 825T578 867Q455
+867 400 765V0H111V1082H382Z" />
+<glyph unicode="n" horiz-adv-x="1147" d="M377 1082L386 957Q502 1102 697 1102Q869 1102 953 1001T1039 699V0H750V692Q750 784 710 825T577 867Q455 867 394 763V0H105V1082H377Z" />
+<glyph unicode="o" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577 868Q471
+868 413 786T355 530Z" />
+<glyph unicode="p" horiz-adv-x="1153" d="M1085 531Q1085 281 972 131T665 -20Q501 -20 400 94V-416H111V1082H379L389 976Q494 1102 663 1102Q863 1102 974 954T1085 546V531ZM796 552Q796 703 743 785T587 867Q451 867 400 763V320Q453 213 589 213Q796 213 796 552Z" />
+<glyph unicode="q" horiz-adv-x="1157" d="M66 551Q66 806 179 954T491 1102Q665 1102 768 969L787 1082H1041V-416H751V93Q651 -20 489 -20Q296 -20 181 130T66 551ZM355 530Q355 375 409 294T563 213Q696 213 751 319V766Q697 868 565 868Q465 868 410 787T355 530Z" />
+<glyph unicode="r" horiz-adv-x="747" d="M719 811Q660 819 615 819Q451 819 400 708V0H111V1082H384L392 953Q479 1102 633 1102Q681 1102 723 1089L719 811Z" />
+<glyph unicode="s" horiz-adv-x="1053" d="M697 299Q697 352 645 382T476 437Q90 518 90 765Q90 909 209 1005T522 1102Q728 1102 851 1005T975 753H686Q686 815 646 855T521 896Q448 896 408 863T368 779Q368 731 413 702T567 651T749 602Q978 518 978 311Q978
+163 851 72T523 -20Q387 -20 282 28T116 161T56 344H330Q334 267 387 226T529 185Q612 185 654 216T697 299Z" />
+<glyph unicode="t" horiz-adv-x="692" d="M457 1348V1082H642V870H457V330Q457 270 480 244T568 218Q616 218 653 225V6Q568 -20 478 -20Q174 -20 168 287V870H10V1082H168V1348H457Z" />
+<glyph unicode="u" horiz-adv-x="1146" d="M759 110Q652 -20 463 -20Q289 -20 198 80T104 373V1082H393V383Q393 214 547 214Q694 214 749 316V1082H1039V0H767L759 110Z" />
+<glyph unicode="v" horiz-adv-x="1035" d="M516 353L717 1082H1019L654 0H378L13 1082H315L516 353Z" />
+<glyph unicode="w" horiz-adv-x="1505" d="M1052 393L1194 1082H1473L1197 0H955L750 681L545 0H304L28 1082H307L448 394L646 1082H855L1052 393Z" />
+<glyph unicode="x" horiz-adv-x="1042" d="M523 759L705 1082H1014L706 552L1027 0H717L524 340L332 0H21L342 552L35 1082H345L523 759Z" />
+<glyph unicode="y" horiz-adv-x="1028" d="M515 409L715 1082H1025L590 -168L566 -225Q469 -437 246 -437Q183 -437 118 -418V-199L162 -200Q244 -200 284 -175T348 -92L382 -3L3 1082H314L515 409Z" />
+<glyph unicode="z" horiz-adv-x="1042" d="M443 233H972V0H74V176L583 848H89V1082H956V911L443 233Z" />
+<glyph unicode="{" horiz-adv-x="676" d="M586 -360Q226 -259 226 105V304Q226 515 48 515V722Q222 722 226 921V1133Q226 1318 316 1432T586 1597L642 1436Q566 1408 527 1339T486 1146V936Q486 710 307 619Q486 527 486 300V88Q491 -146 642 -198L586 -360Z" />
+<glyph unicode="|" horiz-adv-x="518" d="M348 -270H173V1456H348V-270Z" />
+<glyph unicode="}" horiz-adv-x="676" d="M34 -198Q186 -144 190 92V304Q190 530 373 618Q190 706 190 937V1146Q186 1379 34 1436L90 1597Q269 1547 359 1434T450 1136V921Q454 722 628 722V515Q450 515 450 306V89Q442 -261 90 -360L34 -198Z" />
+<glyph unicode="~" horiz-adv-x="1328" d="M1221 793Q1221 607 1128 491T888 375Q814 375 751 403T604 511T451 591Q393 591 357 541T321 413L106 415Q106 601 196 714T437 827Q515 827 579 797T724 690T873 612Q932 612 969 665T1007 794L1221 793Z" />
+<glyph unicode="&#xa0;" horiz-adv-x="510" />
+<glyph unicode="&#xa1;" horiz-adv-x="578" d="M170 639H411L444 -369H137L170 639ZM452 948Q452 881 406 838T290 795T174 838T128 948T172 1058T290 1101T407 1058T452 948Z" />
+<glyph unicode="&#xa2;" horiz-adv-x="1178" d="M594 213Q674 213 724 257T776 374H1048Q1046 229 950 124T698 -11V-245H498V-12Q311 18 205 162T99 532V554Q99 772 204 917T498 1094V1318H698V1093Q861 1064 953 953T1048 679H776Q774 765 724 816T593 868Q491
+868 440 794T388 559V526Q388 358 439 286T594 213Z" />
+<glyph unicode="&#xa3;" horiz-adv-x="1217" d="M564 576L570 437Q570 314 508 241H1161V0H102V241H194Q266 259 266 420L261 576H99V812H253L246 1039Q246 1241 369 1358T700 1475Q912 1475 1033 1363T1154 1058H867Q867 1143 824 1188T699 1233Q633 1233 590
+1184T546 1039L555 812H864V576H564Z" />
+<glyph unicode="&#xa4;" horiz-adv-x="1418" d="M1073 107Q914 -20 712 -20Q509 -20 351 106L222 -26L81 118L216 255Q116 411 116 608Q116 812 225 973L81 1120L222 1264L364 1119Q519 1234 712 1234Q906 1234 1061 1117L1205 1265L1347 1120L1199 969Q1306 810
+1306 608Q1306 415 1208 259L1347 118L1205 -27L1073 107ZM302 608Q302 490 356 389T505 229T712 170Q822 170 917 228T1067 388T1121 608Q1121 727 1067 827T918 986T712 1044Q600 1044 505 986T356 828T302 608Z" />
+<glyph unicode="&#xa5;" horiz-adv-x="1098" d="M550 892L774 1456H1087L765 742H983V567H695V452H983V278H695V0H395V278H89V452H395V567H89V742H333L10 1456H325L550 892Z" />
+<glyph unicode="&#xa6;" horiz-adv-x="516" d="M128 -270V525H388V-270H128ZM388 698H128V1456H388V698Z" />
+<glyph unicode="&#xa7;" horiz-adv-x="1287" d="M1180 481Q1180 299 1018 210Q1153 108 1153 -78Q1153 -253 1016 -352T636 -452Q379 -452 236 -345T92 -35L381 -34Q381 -123 445 -170T636 -218Q748 -218 806 -181T864 -80Q864 -15 800 26T553 117T278 223T140
+353T94 534Q94 714 256 807Q120 910 120 1095Q120 1266 260 1371T640 1476Q887 1476 1023 1363T1159 1049H870Q870 1136 809 1189T640 1243Q530 1243 470 1204T410 1097Q410 1024 465 987T706 901T988 797T1133 666T1180 481ZM458 704Q383 662 383 563Q383 494
+424 457T590 379L812 312Q894 359 894 452Q894 514 849 553T685 633L458 704Z" />
+<glyph unicode="&#xa8;" horiz-adv-x="956" d="M371 1365Q371 1312 332 1276T232 1239Q170 1239 132 1276T94 1365T132 1454T232 1492T332 1455T371 1365ZM581 1365Q581 1419 621 1455T720 1492Q779 1492 818 1455T858 1365Q858 1313 820 1276T720 1238T620 1275T581
+1365Z" />
+<glyph unicode="&#xa9;" horiz-adv-x="1606" d="M1117 596Q1117 444 1030 363T782 282T528 388T433 675V788Q433 962 528 1068T782 1175Q945 1175 1031 1093T1118 861H962Q962 957 916 998T782 1040Q690 1040 640 973T588 792V669Q588 552 639 485T782 417Q871
+417 916 457T961 596H1117ZM1383 729Q1383 895 1308 1037T1096 1265T796 1351Q637 1351 501 1269T286 1043T208 729T285 415T499 188T796 104T1093 189T1307 418T1383 729ZM86 729Q86 931 179 1104T438 1376T796 1476T1153 1377T1412 1104T1506 729Q1506 525 1411
+352T1152 79T796 -20Q603 -20 439 80T180 353T86 729Z" />
+<glyph unicode="&#xaa;" horiz-adv-x="909" d="M604 705Q594 732 587 773Q510 691 386 691Q268 691 203 752T137 919Q137 1029 221 1089T478 1150H580V1201Q580 1328 464 1328Q399 1328 363 1303T326 1229L153 1243Q153 1347 240 1411T464 1476Q599 1476 677 1404T755
+1199V883Q755 786 781 705H604ZM429 835Q469 835 511 853T580 897V1033H474Q398 1032 355 1002T312 923Q312 835 429 835Z" />
+<glyph unicode="&#xab;" horiz-adv-x="1023" d="M559 524L799 125H613L333 515V534L613 924H799L559 524ZM688 524L928 125H742L462 515V534L742 924H928L688 524Z" />
+<glyph unicode="&#xac;" horiz-adv-x="1129" d="M961 374H761V634H126V805H961V374Z" />
+<glyph unicode="&#xad;" horiz-adv-x="794" d="M673 507H110V740H673V507Z" />
+<glyph unicode="&#xae;" horiz-adv-x="1606" d="M86 729Q86 931 179 1104T438 1376T796 1476T1153 1377T1412 1104T1506 729Q1506 525 1411 352T1152 79T796 -20Q603 -20 439 80T180 353T86 729ZM1383 729Q1383 895 1308 1037T1096 1265T796 1351Q637 1351 501
+1269T286 1043T208 729T285 415T499 188T796 104T1093 189T1307 418T1383 729ZM652 653V316H501V1166H782Q933 1166 1020 1098T1107 903Q1107 791 994 729Q1055 698 1079 643T1104 505T1107 389T1121 332V316H966Q953 350 953 510Q953 586 920 619T810 653H652ZM652
+787H788Q862 787 909 819T957 903Q957 973 922 1002T793 1033H652V787Z" />
+<glyph unicode="&#xaf;" horiz-adv-x="1026" d="M858 1287H168V1454H858V1287Z" />
+<glyph unicode="&#xb0;" horiz-adv-x="795" d="M126 1200Q126 1314 207 1395T398 1476Q507 1476 586 1396T666 1200T587 1007T398 928Q290 928 208 1006T126 1200ZM398 1076Q451 1076 485 1111T520 1200Q520 1252 486 1290T398 1328T309 1290T273 1200T309 1112T398
+1076Z" />
+<glyph unicode="&#xb1;" horiz-adv-x="1100" d="M677 942H1005V701H677V337H424V701H89V942H424V1285H677V942ZM977 1H113V236H977V1Z" />
+<glyph unicode="&#xb2;" horiz-adv-x="763" d="M693 667H73V805L360 1062Q416 1111 440 1153T465 1218Q465 1302 370 1302Q320 1302 291 1271T261 1193H55Q55 1309 140 1388T361 1467Q509 1467 589 1403T670 1219Q670 1149 634 1091T490 946L342 831H693V667Z" />
+<glyph unicode="&#xb3;" horiz-adv-x="763" d="M273 1137H355Q474 1137 474 1225Q474 1260 446 1281T368 1302Q326 1302 297 1287T267 1242H62Q62 1344 146 1405T362 1467Q507 1467 592 1408T678 1241Q678 1122 543 1072Q693 1031 693 888Q693 783 602 720T362
+656Q220 656 134 722T48 903H254Q254 869 288 845T374 820Q434 820 460 846T487 908Q487 1000 365 1001H273V1137Z" />
+<glyph unicode="&#xb4;" horiz-adv-x="679" d="M298 1536H613L336 1226H101L298 1536Z" />
+<glyph unicode="&#xb5;" horiz-adv-x="1261" d="M428 1082V459Q428 333 468 274T608 214Q757 214 813 319V1082H1102V0H833L827 68Q738 -21 602 -21Q500 -21 428 24V-416H139V1082H428Z" />
+<glyph unicode="&#xb6;" horiz-adv-x="1003" d="M650 0V520H570Q340 520 208 647T75 988Q75 1201 208 1328T571 1456H869V0H650Z" />
+<glyph unicode="&#xb7;" horiz-adv-x="617" d="M140 697Q140 766 186 809T303 852T420 809T467 697T420 586T303 543Q231 543 186 586T140 697Z" />
+<glyph unicode="&#xb8;" horiz-adv-x="548" d="M345 7L334 -51Q484 -78 484 -224Q484 -334 393 -398T135 -462L128 -295Q240 -295 240 -214Q240 -172 207 -157T98 -136L129 7H345Z" />
+<glyph unicode="&#xb9;" horiz-adv-x="763" d="M528 667H324V1215L135 1174V1332L509 1453H528V667Z" />
+<glyph unicode="&#xba;" horiz-adv-x="936" d="M118 1121Q118 1281 213 1378T464 1476T715 1379T811 1116V1044Q811 885 717 788T466 690Q308 690 213 788T118 1049V1121ZM293 1044Q293 946 339 891T466 836Q544 836 589 890T636 1041V1121Q636 1218 590 1273T464
+1328Q386 1328 340 1274T293 1117V1044Z" />
+<glyph unicode="&#xbb;" horiz-adv-x="1023" d="M272 923L552 533V514L272 124H85L325 523L85 923H272ZM665 923L945 533V514L665 124H478L718 523L478 923H665Z" />
+<glyph unicode="&#xbc;" horiz-adv-x="1470" d="M494 664H290V1212L101 1171V1329L475 1450H494V664ZM458 117L317 193L1028 1331L1169 1255L458 117ZM1302 326H1390V159H1302V0H1097V159H751L739 294L1096 789H1302V326ZM935 326H1097V538L1083 516L935 326Z" />
+<glyph unicode="&#xbd;" horiz-adv-x="1559" d="M416 117L275 193L986 1331L1127 1255L416 117ZM477 670H273V1218L84 1177V1335L458 1456H477V670ZM1477 0H857V138L1144 395Q1200 444 1224 486T1249 551Q1249 635 1154 635Q1104 635 1075 604T1045 526H839Q839
+642 924 721T1145 800Q1293 800 1373 736T1454 552Q1454 482 1418 424T1274 279L1126 164H1477V0Z" />
+<glyph unicode="&#xbe;" horiz-adv-x="1655" d="M619 117L478 193L1189 1331L1330 1255L619 117ZM1460 326H1548V159H1460V0H1255V159H909L897 294L1254 789H1460V326ZM1093 326H1255V538L1241 516L1093 326ZM319 1137H401Q520 1137 520 1225Q520 1260 492 1281T414
+1302Q372 1302 343 1287T313 1242H108Q108 1344 192 1405T408 1467Q553 1467 638 1408T724 1241Q724 1122 589 1072Q739 1031 739 888Q739 783 648 720T408 656Q266 656 180 722T94 903H300Q300 869 334 845T420 820Q480 820 506 846T533 908Q533 1000 411 1001H319V1137Z"
+/>
+<glyph unicode="&#xbf;" horiz-adv-x="1019" d="M666 643Q666 510 635 428T525 272T417 162T374 89T359 8Q359 -149 513 -149Q590 -149 635 -104T683 22H972Q970 -170 849 -277T518 -385Q305 -385 187 -284T69 6Q69 165 222 318L319 411Q369 456 388 505T410 643H666ZM702
+949Q702 882 656 839T540 796T424 839T378 949T422 1059T540 1102T657 1059T702 949Z" />
+<glyph unicode="&#xc0;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM836 1536H599L323 1846H638L836 1536Z" />
+<glyph unicode="&#xc1;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM750 1846H1065L788 1536H553L750 1846Z" />
+<glyph unicode="&#xc2;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM1076 1566V1554H846L692 1699L538 1554H312V1570L608 1846H776L1076 1566Z" />
+<glyph unicode="&#xc3;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM1068 1832Q1068 1719 1002 1643T841 1566Q803 1566 774 1574T692 1611T617 1645T567 1652Q532 1652 508 1627T483 1556L315
+1566Q315 1677 380 1755T541 1833Q571 1833 597 1826T684 1789T767 1754T815 1748Q850 1748 875 1772T901 1843L1068 1832Z" />
+<glyph unicode="&#xc4;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM587 1675Q587 1622 548 1586T448 1549Q386 1549 348 1586T310 1675T348 1764T448 1802T548 1765T587 1675ZM797 1675Q797
+1729 837 1765T936 1802Q995 1802 1034 1765T1074 1675Q1074 1623 1036 1586T936 1548T836 1585T797 1675Z" />
+<glyph unicode="&#xc5;" horiz-adv-x="1378" d="M952 300H426L326 0H7L549 1456H827L1372 0H1053L952 300ZM507 543H871L688 1088L507 543ZM470 1730Q470 1816 535 1875T693 1935Q785 1935 850 1877T916 1730Q916 1645 852 1587T693 1529Q596 1529 533 1588T470
+1730ZM585 1730Q585 1686 614 1655T693 1623T772 1654T801 1730Q801 1776 772 1807T693 1839T615 1808T585 1730Z" />
+<glyph unicode="&#xc6;" horiz-adv-x="1925" d="M1865 0H968L954 333H508L341 0H2L788 1456H1804V1220H1206L1221 865H1723V629H1231L1247 235H1865V0ZM633 580H944L920 1150L633 580Z" />
+<glyph unicode="&#xc7;" horiz-adv-x="1340" d="M1273 485Q1256 250 1100 115T687 -20Q407 -20 247 168T86 686V775Q86 985 160 1145T371 1390T691 1476Q943 1476 1097 1341T1275 962H975Q964 1103 897 1166T691 1230Q541 1230 467 1123T390 789V679Q390 443 461
+334T687 225Q826 225 894 288T973 485H1273ZM797 6L786 -52Q936 -79 936 -225Q936 -335 845 -399T587 -463L580 -296Q692 -296 692 -215Q692 -173 659 -158T550 -137L581 6H797Z" />
+<glyph unicode="&#xc8;" horiz-adv-x="1152" d="M1006 631H430V241H1106V0H130V1456H1104V1213H430V866H1006V631ZM779 1539H542L266 1849H581L779 1539Z" />
+<glyph unicode="&#xc9;" horiz-adv-x="1152" d="M1006 631H430V241H1106V0H130V1456H1104V1213H430V866H1006V631ZM693 1849H1008L731 1539H496L693 1849Z" />
+<glyph unicode="&#xca;" horiz-adv-x="1152" d="M1006 631H430V241H1106V0H130V1456H1104V1213H430V866H1006V631ZM1019 1569V1557H789L635 1702L481 1557H255V1573L551 1849H719L1019 1569Z" />
+<glyph unicode="&#xcb;" horiz-adv-x="1152" d="M1006 631H430V241H1106V0H130V1456H1104V1213H430V866H1006V631ZM530 1678Q530 1625 491 1589T391 1552Q329 1552 291 1589T253 1678T291 1767T391 1805T491 1768T530 1678ZM740 1678Q740 1732 780 1768T879 1805Q938
+1805 977 1768T1017 1678Q1017 1626 979 1589T879 1551T779 1588T740 1678Z" />
+<glyph unicode="&#xcc;" horiz-adv-x="597" d="M449 0H149V1456H449V0ZM443 1539H206L-70 1849H245L443 1539Z" />
+<glyph unicode="&#xcd;" horiz-adv-x="597" d="M449 0H149V1456H449V0ZM356 1849H671L394 1539H159L356 1849Z" />
+<glyph unicode="&#xce;" horiz-adv-x="597" d="M449 0H149V1456H449V0ZM683 1569V1557H453L299 1702L145 1557H-81V1573L215 1849H383L683 1569Z" />
+<glyph unicode="&#xcf;" horiz-adv-x="597" d="M449 0H149V1456H449V0ZM194 1678Q194 1625 155 1589T55 1552Q-7 1552 -45 1589T-83 1678T-45 1767T55 1805T155 1768T194 1678ZM404 1678Q404 1732 444 1768T543 1805Q602 1805 641 1768T681 1678Q681 1626 643
+1589T543 1551T443 1588T404 1678Z" />
+<glyph unicode="&#xd0;" horiz-adv-x="1361" d="M160 0V642H-20V825H160V1456H608Q800 1456 951 1370T1188 1124T1273 761V694Q1273 491 1190 333T954 88T611 0H160ZM679 642H460V241H605Q783 241 876 358T969 694V762Q969 984 877 1098T608 1213H460V825H679V642Z" />
+<glyph unicode="&#xd1;" horiz-adv-x="1446" d="M1314 0H1014L430 958V0H130V1456H430L1015 496V1456H1314V0ZM1102 1832Q1102 1719 1036 1643T875 1566Q837 1566 808 1574T726 1611T651 1645T601 1652Q566 1652 542 1627T517 1556L349 1566Q349 1677 414 1755T575
+1833Q605 1833 631 1826T718 1789T801 1754T849 1748Q884 1748 909 1772T935 1843L1102 1832Z" />
+<glyph unicode="&#xd2;" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473
+1111T390 766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761ZM850 1536H613L337 1846H652L850 1536Z" />
+<glyph unicode="&#xd3;" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473
+1111T390 766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761ZM764 1846H1079L802 1536H567L764 1846Z" />
+<glyph unicode="&#xd4;" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473
+1111T390 766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761ZM1090 1566V1554H860L706 1699L552 1554H326V1570L622 1846H790L1090 1566Z" />
+<glyph unicode="&#xd5;" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473
+1111T390 766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761ZM1082 1832Q1082 1719 1016 1643T855 1566Q817 1566 788 1574T706 1611T631 1645T581 1652Q546 1652 522 1627T497 1556L329 1566Q329 1677 394 1755T555 1833Q585 1833 611 1826T698 1789T781
+1754T829 1748Q864 1748 889 1772T915 1843L1082 1832Z" />
+<glyph unicode="&#xd6;" horiz-adv-x="1414" d="M1326 695Q1326 480 1250 318T1033 68T708 -20Q527 -20 385 67T165 315T86 687V759Q86 974 163 1137T382 1388T706 1476T1029 1389T1248 1138T1326 760V695ZM1022 761Q1022 990 940 1109T706 1228Q555 1228 473
+1111T390 766V695Q390 472 472 349T708 226Q859 226 940 344T1022 690V761ZM601 1675Q601 1622 562 1586T462 1549Q400 1549 362 1586T324 1675T362 1764T462 1802T562 1765T601 1675ZM811 1675Q811 1729 851 1765T950 1802Q1009 1802 1048 1765T1088 1675Q1088
+1623 1050 1586T950 1548T850 1585T811 1675Z" />
+<glyph unicode="&#xd7;" horiz-adv-x="1088" d="M65 373L372 686L65 999L235 1167L539 856L844 1167L1014 999L707 686L1014 373L844 205L539 515L235 205L65 373Z" />
+<glyph unicode="&#xd8;" horiz-adv-x="1411" d="M1333 695Q1333 480 1257 318T1040 68T715 -20Q551 -20 420 50L335 -95H147L290 147Q93 342 93 702V759Q93 974 170 1137T389 1388T713 1476Q888 1476 1027 1394L1101 1518H1288L1154 1291Q1333 1093 1333 754V695ZM397
+695Q397 523 446 410L893 1167Q820 1228 713 1228Q562 1228 480 1111T397 766V695ZM1030 761Q1030 912 992 1017L552 273Q622 226 715 226Q866 226 947 344T1030 690V761Z" />
+<glyph unicode="&#xd9;" horiz-adv-x="1348" d="M1232 1456V497Q1232 258 1083 119T674 -20Q419 -20 269 115T116 486V1456H416V495Q416 352 484 287T674 221Q927 221 931 487V1456H1232ZM814 1536H577L301 1846H616L814 1536Z" />
+<glyph unicode="&#xda;" horiz-adv-x="1348" d="M1232 1456V497Q1232 258 1083 119T674 -20Q419 -20 269 115T116 486V1456H416V495Q416 352 484 287T674 221Q927 221 931 487V1456H1232ZM728 1846H1043L766 1536H531L728 1846Z" />
+<glyph unicode="&#xdb;" horiz-adv-x="1348" d="M1232 1456V497Q1232 258 1083 119T674 -20Q419 -20 269 115T116 486V1456H416V495Q416 352 484 287T674 221Q927 221 931 487V1456H1232ZM1054 1566V1554H824L670 1699L516 1554H290V1570L586 1846H754L1054 1566Z" />
+<glyph unicode="&#xdc;" horiz-adv-x="1348" d="M1232 1456V497Q1232 258 1083 119T674 -20Q419 -20 269 115T116 486V1456H416V495Q416 352 484 287T674 221Q927 221 931 487V1456H1232ZM565 1675Q565 1622 526 1586T426 1549Q364 1549 326 1586T288 1675T326
+1764T426 1802T526 1765T565 1675ZM775 1675Q775 1729 815 1765T914 1802Q973 1802 1012 1765T1052 1675Q1052 1623 1014 1586T914 1548T814 1585T775 1675Z" />
+<glyph unicode="&#xdd;" horiz-adv-x="1266" d="M632 800L935 1456H1263L785 528V0H480V528L2 1456H331L632 800ZM693 1846H1008L731 1536H496L693 1846Z" />
+<glyph unicode="&#xde;" horiz-adv-x="1246" d="M422 1456V1189H652Q814 1188 933 1133T1117 975T1181 738Q1181 536 1043 414T664 287H422V0H133V1456H422ZM422 956V520H645Q762 520 827 579T892 736T829 894T653 956H422Z" />
+<glyph unicode="&#xdf;" horiz-adv-x="1292" d="M424 0H135V1101Q135 1321 259 1440T609 1559Q800 1559 915 1460T1031 1189Q1031 1081 978 994T924 830Q924 793 954 754T1073 636Q1224 502 1224 354Q1224 177 1109 79T779 -20Q698 -20 619 -4T500 36L554 265Q652
+213 773 213Q852 213 894 249T936 349Q936 395 902 438T785 548Q635 668 635 818Q635 914 690 1004T745 1174Q745 1244 701 1285T588 1327Q429 1327 424 1114V0Z" />
+<glyph unicode="&#xe0;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM695 1226H458L182 1536H497L695 1226Z" />
+<glyph unicode="&#xe1;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM609 1536H924L647 1226H412L609 1536Z" />
+<glyph unicode="&#xe2;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM935 1256V1244H705L551 1389L397 1244H171V1260L467 1536H635L935 1256Z" />
+<glyph unicode="&#xe3;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM927 1779Q927 1666 861 1590T700 1513Q662 1513 633 1521T551 1558T476 1592T426 1599Q391 1599 367 1574T342 1503L174 1513Q174 1624
+239 1702T400 1780Q430 1780 456 1773T543 1736T626 1701T674 1695Q709 1695 734 1719T760 1790L927 1779Z" />
+<glyph unicode="&#xe4;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM446 1365Q446 1312 407 1276T307 1239Q245 1239 207 1276T169 1365T207 1454T307 1492T407 1455T446 1365ZM656 1365Q656 1419 696
+1455T795 1492Q854 1492 893 1455T933 1365Q933 1313 895 1276T795 1238T695 1275T656 1365Z" />
+<glyph unicode="&#xe5;" horiz-adv-x="1098" d="M738 0Q718 39 709 97Q604 -20 436 -20Q277 -20 173 72T68 304Q68 476 195 568T564 661H697V723Q697 798 659 843T537 888Q464 888 423 853T381 757H92Q92 851 150 931T314 1056T552 1102Q752 1102 869 1002T987
+719V250Q988 96 1030 17V0H738ZM499 201Q563 201 617 229T697 306V492H589Q372 492 358 342L357 325Q357 271 395 236T499 201ZM329 1420Q329 1506 394 1565T552 1625Q644 1625 709 1567T775 1420Q775 1335 711 1277T552 1219Q455 1219 392 1278T329 1420ZM444
+1420Q444 1376 473 1345T552 1313T631 1344T660 1420Q660 1466 631 1497T552 1529T474 1498T444 1420Z" />
+<glyph unicode="&#xe6;" horiz-adv-x="1729" d="M1246 -20Q999 -20 860 127Q796 58 694 19T466 -20Q280 -20 173 69T66 319Q66 476 190 562T557 649H725V706Q725 782 685 825T567 868Q485 868 438 833T390 745L101 764Q101 913 231 1007T570 1102Q781 1102 897
+992Q1023 1104 1223 1102Q1435 1102 1556 971T1677 607V450H1009Q1020 334 1089 273T1276 212Q1353 212 1418 228T1571 289L1648 100Q1575 44 1468 12T1246 -20ZM521 192Q570 192 628 214T725 272V461H561Q466 460 411 418T355 315Q355 259 395 226T521 192ZM1223
+868Q1133 868 1079 811T1011 644H1393V672Q1393 766 1350 817T1223 868Z" />
+<glyph unicode="&#xe7;" horiz-adv-x="1068" d="M561 213Q641 213 691 257T743 374H1014Q1013 264 954 173T793 31T566 -20Q334 -20 200 127T66 535V554Q66 804 199 953T564 1102Q767 1102 889 987T1014 679H743Q741 763 691 815T559 868Q458 868 407 795T355
+556V526Q355 359 406 286T561 213ZM666 6L655 -52Q805 -79 805 -225Q805 -335 714 -399T456 -463L449 -296Q561 -296 561 -215Q561 -173 528 -158T419 -137L450 6H666Z" />
+<glyph unicode="&#xe8;" horiz-adv-x="1107" d="M609 -20Q371 -20 222 126T72 515V543Q72 706 135 834T313 1032T577 1102Q799 1102 926 962T1054 565V447H365Q379 341 449 277T628 213Q795 213 889 334L1031 175Q966 83 855 32T609 -20ZM576 868Q490 868 437
+810T368 644H770V667Q768 763 718 815T576 868ZM688 1226H451L175 1536H490L688 1226Z" />
+<glyph unicode="&#xe9;" horiz-adv-x="1107" d="M609 -20Q371 -20 222 126T72 515V543Q72 706 135 834T313 1032T577 1102Q799 1102 926 962T1054 565V447H365Q379 341 449 277T628 213Q795 213 889 334L1031 175Q966 83 855 32T609 -20ZM576 868Q490 868 437
+810T368 644H770V667Q768 763 718 815T576 868ZM602 1536H917L640 1226H405L602 1536Z" />
+<glyph unicode="&#xea;" horiz-adv-x="1107" d="M609 -20Q371 -20 222 126T72 515V543Q72 706 135 834T313 1032T577 1102Q799 1102 926 962T1054 565V447H365Q379 341 449 277T628 213Q795 213 889 334L1031 175Q966 83 855 32T609 -20ZM576 868Q490 868 437
+810T368 644H770V667Q768 763 718 815T576 868ZM928 1256V1244H698L544 1389L390 1244H164V1260L460 1536H628L928 1256Z" />
+<glyph unicode="&#xeb;" horiz-adv-x="1107" d="M609 -20Q371 -20 222 126T72 515V543Q72 706 135 834T313 1032T577 1102Q799 1102 926 962T1054 565V447H365Q379 341 449 277T628 213Q795 213 889 334L1031 175Q966 83 855 32T609 -20ZM576 868Q490 868 437
+810T368 644H770V667Q768 763 718 815T576 868ZM439 1365Q439 1312 400 1276T300 1239Q238 1239 200 1276T162 1365T200 1454T300 1492T400 1455T439 1365ZM649 1365Q649 1419 689 1455T788 1492Q847 1492 886 1455T926 1365Q926 1313 888 1276T788 1238T688 1275T649
+1365Z" />
+<glyph unicode="&#xec;" horiz-adv-x="561" d="M423 0H134V1082H423V0ZM425 1211H188L-88 1521H227L425 1211Z" />
+<glyph unicode="&#xed;" horiz-adv-x="561" d="M423 0H134V1082H423V0ZM338 1777H653L376 1467H141L338 1777Z" />
+<glyph unicode="&#xee;" horiz-adv-x="561" d="M423 0H134V1082H423V0ZM665 1241V1229H435L281 1374L127 1229H-99V1245L197 1521H365L665 1241Z" />
+<glyph unicode="&#xef;" horiz-adv-x="561" d="M423 0H134V1082H423V0ZM176 1350Q176 1297 137 1261T37 1224Q-25 1224 -63 1261T-101 1350T-63 1439T37 1477T137 1440T176 1350ZM386 1350Q386 1404 426 1440T525 1477Q584 1477 623 1440T663 1350Q663 1298 625
+1261T525 1223T425 1260T386 1350Z" />
+<glyph unicode="&#xf0;" horiz-adv-x="1178" d="M849 1305Q1104 1043 1105 651V577Q1105 405 1039 268T853 56T587 -20Q443 -20 328 43T149 219T84 468Q84 699 207 832T536 966Q672 966 780 890Q731 1042 614 1155L423 1033L345 1147L497 1244Q381 1316 233 1355L324
+1579Q562 1531 740 1399L911 1508L988 1394L849 1305ZM816 663Q748 746 601 746Q488 746 431 672T373 468Q373 356 433 285T591 213Q694 213 755 303T816 552V663Z" />
+<glyph unicode="&#xf1;" horiz-adv-x="1147" d="M377 1082L386 957Q502 1102 697 1102Q869 1102 953 1001T1039 699V0H750V692Q750 784 710 825T577 867Q455 867 394 763V0H105V1082H377ZM951 1779Q951 1666 885 1590T724 1513Q686 1513 657 1521T575 1558T500
+1592T450 1599Q415 1599 391 1574T366 1503L198 1513Q198 1624 263 1702T424 1780Q454 1780 480 1773T567 1736T650 1701T698 1695Q733 1695 758 1719T784 1790L951 1779Z" />
+<glyph unicode="&#xf2;" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577
+868Q471 868 413 786T355 530ZM720 1226H483L207 1536H522L720 1226Z" />
+<glyph unicode="&#xf3;" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577
+868Q471 868 413 786T355 530ZM634 1536H949L672 1226H437L634 1536Z" />
+<glyph unicode="&#xf4;" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577
+868Q471 868 413 786T355 530ZM960 1256V1244H730L576 1389L422 1244H196V1260L492 1536H660L960 1256Z" />
+<glyph unicode="&#xf5;" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577
+868Q471 868 413 786T355 530ZM952 1779Q952 1666 886 1590T725 1513Q687 1513 658 1521T576 1558T501 1592T451 1599Q416 1599 392 1574T367 1503L199 1513Q199 1624 264 1702T425 1780Q455 1780 481 1773T568 1736T651 1701T699 1695Q734 1695 759 1719T785 1790L952
+1779Z" />
+<glyph unicode="&#xf6;" horiz-adv-x="1158" d="M66 551Q66 712 128 838T306 1033T577 1102Q796 1102 934 968T1089 604L1091 530Q1091 281 952 131T579 -20T206 130T66 538V551ZM355 530Q355 376 413 295T579 213Q684 213 743 293T802 551Q802 702 743 785T577
+868Q471 868 413 786T355 530ZM471 1365Q471 1312 432 1276T332 1239Q270 1239 232 1276T194 1365T232 1454T332 1492T432 1455T471 1365ZM681 1365Q681 1419 721 1455T820 1492Q879 1492 918 1455T958 1365Q958 1313 920 1276T820 1238T720 1275T681 1365Z" />
+<glyph unicode="&#xf7;" horiz-adv-x="1168" d="M1091 571H63V801H1091V571ZM415 1089Q415 1157 460 1199T578 1241Q649 1241 695 1200T742 1089Q742 1022 697 981T578 939Q503 939 459 981T415 1089ZM415 277Q415 345 460 387T578 429Q649 429 695 388T742 277Q742
+210 697 169T578 127Q503 127 459 169T415 277Z" />
+<glyph unicode="&#xf8;" horiz-adv-x="1156" d="M66 551Q66 712 128 838T306 1033T577 1102Q678 1102 763 1073L833 1216H994L891 1005Q1091 856 1091 530Q1091 281 952 131T579 -20Q484 -20 403 6L331 -142H170L273 70Q66 216 66 551ZM355 530Q355 400 396 322L656
+854Q620 868 577 868Q471 868 413 786T355 530ZM802 551Q802 665 765 747L509 223Q539 213 579 213Q684 213 743 293T802 551Z" />
+<glyph unicode="&#xf9;" horiz-adv-x="1146" d="M759 110Q652 -20 463 -20Q289 -20 198 80T104 373V1082H393V383Q393 214 547 214Q694 214 749 316V1082H1039V0H767L759 110ZM716 1226H479L203 1536H518L716 1226Z" />
+<glyph unicode="&#xfa;" horiz-adv-x="1146" d="M759 110Q652 -20 463 -20Q289 -20 198 80T104 373V1082H393V383Q393 214 547 214Q694 214 749 316V1082H1039V0H767L759 110ZM630 1536H945L668 1226H433L630 1536Z" />
+<glyph unicode="&#xfb;" horiz-adv-x="1146" d="M759 110Q652 -20 463 -20Q289 -20 198 80T104 373V1082H393V383Q393 214 547 214Q694 214 749 316V1082H1039V0H767L759 110ZM956 1256V1244H726L572 1389L418 1244H192V1260L488 1536H656L956 1256Z" />
+<glyph unicode="&#xfc;" horiz-adv-x="1146" d="M759 110Q652 -20 463 -20Q289 -20 198 80T104 373V1082H393V383Q393 214 547 214Q694 214 749 316V1082H1039V0H767L759 110ZM467 1365Q467 1312 428 1276T328 1239Q266 1239 228 1276T190 1365T228 1454T328 1492T428
+1455T467 1365ZM677 1365Q677 1419 717 1455T816 1492Q875 1492 914 1455T954 1365Q954 1313 916 1276T816 1238T716 1275T677 1365Z" />
+<glyph unicode="&#xfd;" horiz-adv-x="1028" d="M515 409L715 1082H1025L590 -168L566 -225Q469 -437 246 -437Q183 -437 118 -418V-199L162 -200Q244 -200 284 -175T348 -92L382 -3L3 1082H314L515 409ZM578 1536H893L616 1226H381L578 1536Z" />
+<glyph unicode="&#xfe;" horiz-adv-x="1162" d="M1087 531Q1087 281 974 131T667 -20Q503 -20 403 93V-416H113V1536H403V989Q503 1102 665 1102Q863 1102 975 955T1087 545V531ZM798 552Q798 703 745 785T589 867Q457 867 403 765V318Q457 213 591 213Q798 213 798 552Z" />
+<glyph unicode="&#xff;" horiz-adv-x="1028" d="M515 409L715 1082H1025L590 -168L566 -225Q469 -437 246 -437Q183 -437 118 -418V-199L162 -200Q244 -200 284 -175T348 -92L382 -3L3 1082H314L515 409ZM415 1365Q415 1312 376 1276T276 1239Q214 1239 176 1276T138
+1365T176 1454T276 1492T376 1455T415 1365ZM625 1365Q625 1419 665 1455T764 1492Q823 1492 862 1455T902 1365Q902 1313 864 1276T764 1238T664 1275T625 1365Z" />
+<glyph unicode="&#x2013;" horiz-adv-x="1294" d="M1444 596H408V832H1444V596Z" />
+<glyph unicode="&#x2014;" horiz-adv-x="1563" d="M1746 596H365V832H1746V596Z" />
+<glyph unicode="&#x2018;" horiz-adv-x="479" d="M286 1570L422 1491Q336 1355 333 1215V1048H104V1198Q104 1292 156 1398T286 1570Z" />
+<glyph unicode="&#x2019;" horiz-adv-x="470" d="M194 1009L58 1088Q144 1223 147 1367V1536H377V1381Q377 1291 327 1186T194 1009Z" />
+<glyph unicode="&#x201a;" horiz-adv-x="508" d="M202 -305L66 -226Q144 -100 147 48V229H385L384 63Q383 -26 334 -129T202 -305Z" />
+<glyph unicode="&#x201c;" horiz-adv-x="831" d="M294 1570L430 1491Q344 1355 341 1215V1048H112V1198Q112 1292 164 1398T294 1570ZM637 1570L773 1491Q687 1355 684 1215V1048H455V1198Q455 1292 507 1398T637 1570Z" />
+<glyph unicode="&#x201d;" horiz-adv-x="837" d="M208 1009L72 1088Q158 1223 161 1367V1536H391V1381Q391 1291 341 1186T208 1009ZM555 1009L419 1088Q505 1223 508 1367V1536H738V1381Q738 1291 688 1186T555 1009Z" />
+<glyph unicode="&#x201e;" horiz-adv-x="825" d="M209 -325L66 -246Q144 -112 147 47V263H385L384 64Q383 -33 336 -142T209 -325ZM545 -325L402 -246Q488 -98 491 48V263H729L728 60Q726 -36 676 -145T545 -325Z" />
+<glyph unicode="&#x2022;" horiz-adv-x="736" d="M135 766Q135 870 201 933T371 996Q479 996 543 934T610 771V728Q610 625 545 563T373 500Q268 500 202 562T135 731V766Z" />
+<glyph unicode="&#x2039;" horiz-adv-x="638" d="M334 524L574 125H388L108 515V534L388 924H574L334 524Z" />
+<glyph unicode="&#x203a;" horiz-adv-x="618" d="M267 923L547 533V514L267 124H80L320 523L80 923H267Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_700.ttf b/site/assets/fonts/Roboto_700.ttf
new file mode 100644
index 00000000..031bf06c
--- /dev/null
+++ b/site/assets/fonts/Roboto_700.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_700.woff b/site/assets/fonts/Roboto_700.woff
new file mode 100644
index 00000000..a0d26516
--- /dev/null
+++ b/site/assets/fonts/Roboto_700.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_700.woff2 b/site/assets/fonts/Roboto_700.woff2
new file mode 100644
index 00000000..e327dc95
--- /dev/null
+++ b/site/assets/fonts/Roboto_700.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_300.eot b/site/assets/fonts/Roboto_Mono_300.eot
new file mode 100644
index 00000000..f44d93dd
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_300.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_300.svg b/site/assets/fonts/Roboto_Mono_300.svg
new file mode 100644
index 00000000..e8645765
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_300.svg
@@ -0,0 +1,392 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="RobotoMono" horiz-adv-x="1227" ><font-face
+ font-family="Roboto Mono Light"
+ units-per-em="2048"
+ panose-1="0 0 0 0 0 0 0 0 0 0"
+ ascent="2146"
+ descent="-555"
+ alphabetic="0" />
+<glyph unicode=" " glyph-name="space" horiz-adv-x="1229" />
+<glyph unicode="!" glyph-name="exclam" horiz-adv-x="1229" d="M654 435H534V1456H654V435ZM514 72Q514 107 537 131T599 156Q639 156 662 132T686 72Q686 38 662 15T599 -8Q560 -8 537 15T514 72Z" />
+<glyph unicode="&quot;" glyph-name="quotedbl" horiz-adv-x="1229" d="M509 1410L493 1090H402L406 1400L405 1536H510L509 1410ZM812 1410L796 1090H705L710 1400L709 1536H814L812 1410Z" />
+<glyph unicode="#" glyph-name="numbersign" horiz-adv-x="1229" d="M729 410H415L338 0H239L316 410H61V503H333L416 944H137V1040H434L513 1456H612L533 1040H848L927 1456H1026L947 1040H1177V944H929L846 503H1102V410H828L751 0H652L729 410ZM432 503H747L830
+944H515L432 503Z" />
+<glyph unicode="$" glyph-name="dollar" horiz-adv-x="1229" d="M961 359Q961 423 936 471T867 557T764 622T636 674Q553 701 480 736T351 820T263 936T230 1095Q230 1180 258 1247T335 1363T453 1439T600 1475V1677H702V1474Q788 1466 855 1432T970 1341T1042
+1204T1070 1028H951Q950 1103 929 1166T869 1274T775 1344T649 1370Q586 1370 532 1352T437 1299T373 1214T349 1097Q349 1030 373 982T441 897T545 833T677 780Q761 752 834 718T962 634T1049 519T1081 361Q1081 275 1052 208T971 93T851 17T700 -18V-208H597V-19Q512
+-13 437 15T304 97T213 230T177 418H297Q298 330 327 267T403 163T512 103T642 85Q706 85 764 103T865 156T935 242T961 359Z" />
+<glyph unicode="%" glyph-name="percent" horiz-adv-x="1229" d="M44 1184Q44 1239 60 1291T108 1385T187 1452T297 1477Q360 1477 407 1452T486 1385T534 1292T551 1184V1107Q551 1053 535 1001T487 908T408 842T299 817Q236 817 189 842T109 907T61 1000T44
+1107V1184ZM140 1107Q140 1071 149 1035T177 969T227 922T299 904Q341 904 370 922T418 970T446 1035T455 1107V1184Q455 1221 446 1257T418 1323T369 1371T297 1390Q255 1390 226 1372T177 1324T149 1257T140 1184V1107ZM673 348Q674 402 690 454T738 548T817
+614T927 639Q990 639 1037 614T1116 548T1164 455T1182 348V270Q1181 215 1165 163T1117 70T1038 4T929 -21Q866 -21 819 4T739 70T691 163T673 270V348ZM770 270Q770 233 779 197T807 131T856 83T929 65Q971 65 1000 83T1048 130T1076 196T1085 270V348Q1085 385
+1076 421T1049 487T1000 534T927 552Q884 552 855 534T806 487T779 422T770 348V270ZM357 159L280 205L874 1293L951 1247L357 159Z" />
+<glyph unicode="&amp;" glyph-name="ampersand" horiz-adv-x="1229" d="M417 791Q384 834 357 877T309 964T277 1053T265 1147Q265 1220 287 1281T352 1385T454 1453T588 1477Q652 1477 704 1455T794 1393T852 1302T873 1191Q873 1142 857 1100T815 1020T753 949T680
+885L562 785L934 315Q978 387 1002 473T1028 651H1138Q1138 530 1104 424T1004 227L1184 0H1041L928 142Q851 68 758 24T555 -20Q463 -20 386 7T254 84T167 204T135 362Q135 429 157 487T216 597T302 695T408 784L417 791ZM555 81Q649 80 727 118T866 222L488 701L478
+713L441 681Q351 604 303 522T255 362Q255 299 277 247T339 159T434 102T555 81ZM386 1149Q385 1075 418 1004T502 862L634 971Q694 1017 726 1072T760 1191Q760 1228 748 1261T713 1319T659 1360T588 1375Q539 1375 502 1357T439 1307T400 1235T386 1149Z" />
+<glyph unicode="&apos;" glyph-name="quotesingle" horiz-adv-x="1229" d="M628 1395L610 1090H516L521 1386V1536H628V1395Z" />
+<glyph unicode="(" glyph-name="parenleft" horiz-adv-x="1229" d="M381 588Q381 760 416 922T511 1222T654 1466T833 1632L862 1551Q777 1490 710 1390T597 1164T526 891T501 591V571Q501 418 525 272T596 -1T710 -229T862 -393L833 -470Q736 -409 654 -304T511
+-61T416 238T381 573V588Z" />
+<glyph unicode=")" glyph-name="parenright" horiz-adv-x="1229" d="M823 573Q823 400 789 239T694 -60T551 -303T371 -470L342 -393Q427 -332 494 -231T607 -3T678 271T703 571V591Q703 745 678 892T605 1166T491 1393T342 1555L371 1632Q467 1572 549 1466T692
+1222T788 922T823 588V573Z" />
+<glyph unicode="*" glyph-name="asterisk" horiz-adv-x="1229" d="M570 853L184 964L220 1077L600 935L589 1342H707L689 932L1062 1078L1100 964L713 848L981 528L883 457L639 799L404 460L308 528L570 853Z" />
+<glyph unicode="+" glyph-name="plus" horiz-adv-x="1229" d="M673 740H1117V623H673V146H553V623H119V740H553V1206H673V740Z" />
+<glyph unicode="," glyph-name="comma" horiz-adv-x="1229" d="M601 35Q601 -9 592 -53T565 -140T521 -222T461 -295L385 -247Q473 -122 473 32V188H601V35Z" />
+<glyph unicode="-" glyph-name="hyphen" horiz-adv-x="1230" d="M970 601H229V702H970V601Z" />
+<glyph unicode="." glyph-name="period" horiz-adv-x="1229" d="M542 85Q542 127 569 156T644 186Q692 186 719 157T747 85Q747 43 720 16T644 -12Q597 -12 570 15T542 85Z" />
+<glyph unicode="/" glyph-name="slash" horiz-adv-x="1229" d="M391 -125H279L887 1456H999L391 -125Z" />
+<glyph unicode="0" glyph-name="zero" horiz-adv-x="1229" d="M1075 576Q1075 454 1048 346T965 156T823 28T618 -20Q500 -20 414 27T271 156T187 345T158 576V879Q159 1001 186 1109T269 1300T412 1429T616 1477Q735 1477 821 1430T964 1301T1047 1110T1075 879V576ZM278
+558Q278 517 281 480L938 1075Q924 1139 899 1194T835 1289T741 1352T616 1375Q523 1375 459 1336T354 1231T296 1079T278 899V558ZM955 899Q955 932 952 964L298 370Q313 308 339 255T404 164T496 103T618 81Q711 81 775 120T879 226T937 378T955 558V899Z" />
+<glyph unicode="1" glyph-name="one" horiz-adv-x="1229" d="M742 0H622V1305L219 1132V1245L725 1456H742V0Z" />
+<glyph unicode="2" glyph-name="two" horiz-adv-x="1229" d="M1043 0H130V92L607 633Q660 693 704 748T781 856T832 964T850 1075Q850 1141 828 1196T766 1290T669 1352T544 1374Q467 1374 407 1348T306 1277T242 1171T218 1036H98Q99 1126 131 1206T221 1346T360
+1441T544 1476Q640 1476 718 1448T853 1367T939 1241T970 1078Q970 1006 945 938T879 805T787 679T684 560L277 101H1043V0Z" />
+<glyph unicode="3" glyph-name="three" horiz-adv-x="1229" d="M402 793H523Q593 794 655 813T763 868T837 956T864 1076Q864 1147 842 1202T780 1296T684 1354T558 1374Q489 1374 431 1353T331 1292T264 1198T238 1074H118Q120 1160 153 1233T245 1361T383 1445T558
+1476Q651 1476 729 1449T863 1370T951 1243T983 1072Q983 1018 966 969T915 877T835 801T730 746Q866 708 938 616T1010 395Q1010 297 976 220T881 90T739 9T563 -20Q467 -20 383 8T236 90T136 221T97 395H216Q218 323 245 265T318 166T427 103T563 81Q634 81 694
+101T798 160T866 257T891 391Q891 471 862 527T782 620T665 673T523 691H402V793Z" />
+<glyph unicode="4" glyph-name="four" horiz-adv-x="1229" d="M888 469H1116V368H888V0H769V368H84V436L757 1456H888V469ZM228 469H769V1305L712 1204L228 469Z" />
+<glyph unicode="5" glyph-name="five" horiz-adv-x="1229" d="M260 746L331 1456H1059V1340H433L383 853Q437 890 507 911T657 933Q756 933 836 899T972 802T1058 653T1088 464Q1088 363 1062 275T983 122T848 18T655 -20Q564 -20 487 7T351 86T256 213T209 383H325Q334
+312 360 256T428 161T527 102T655 81Q735 81 794 110T892 190T949 311T968 462Q968 538 946 604T880 719T776 796T636 824Q590 824 552 818T480 799T416 766T355 719L260 746Z" />
+<glyph unicode="6" glyph-name="six" horiz-adv-x="1229" d="M863 1467V1358H845Q699 1357 594 1309T420 1181T315 994T272 773Q300 813 339 847T424 905T524 942T633 956Q738 956 816 916T947 809T1026 656T1052 477Q1052 381 1025 292T943 133T808 22T621 -20Q507
+-20 420 25T272 145T181 316T149 514V643Q149 747 165 848T215 1041T303 1210T434 1344T613 1433T845 1467H863ZM614 853Q557 853 503 835T402 785T322 708T270 607V512Q270 418 296 339T368 202T479 113T621 82Q697 82 755 115T853 202T913 327T934 473Q934 548
+914 616T853 737T753 821T614 853Z" />
+<glyph unicode="7" glyph-name="seven" horiz-adv-x="1229" d="M1082 1387L460 0H335L953 1349H125V1456H1082V1387Z" />
+<glyph unicode="8" glyph-name="eight" horiz-adv-x="1229" d="M1066 1081Q1066 1022 1048 970T997 876T921 801T823 747Q883 726 934 692T1023 612T1082 509T1103 386Q1103 287 1066 211T966 84T818 7T641 -20Q547 -20 463 6T315 83T214 211T177 386Q177 452
+197 509T255 611T342 691T453 747Q400 768 357 800T281 875T232 970T214 1081Q214 1174 246 1247T336 1371T470 1449T637 1476Q726 1476 804 1449T941 1371T1032 1247T1066 1081ZM983 385Q983 457 955 515T880 613T770 674T639 696Q570 696 508 675T399 613T325
+515T297 385Q297 311 324 254T398 159T507 101T641 81Q710 81 772 101T881 159T955 254T983 385ZM946 1082Q946 1147 921 1201T854 1293T755 1353T637 1374Q574 1374 519 1354T423 1296T358 1204T334 1082Q334 1015 358 963T424 874T521 818T639 798Q701 798 756
+817T854 873T921 963T946 1082Z" />
+<glyph unicode="9" glyph-name="nine" horiz-adv-x="1229" d="M382 97Q535 98 639 144T809 270T905 454T938 678Q912 634 876 598T793 534T692 492T573 477Q469 477 391 519T261 631T182 787T155 965Q155 1063 183 1154T267 1317T405 1432T595 1476Q721 1476 808
+1427T951 1295T1032 1109T1058 895V779Q1058 679 1045 582T1000 398T919 236T793 107T616 21T382 -11H355L356 97H382ZM588 580Q648 580 703 599T805 654T886 738T939 845V906Q939 1003 917 1088T852 1236T745 1336T597 1373Q519 1373 459 1339T358 1248T295 1120T273
+970Q273 898 293 828T353 703T452 614T588 580Z" />
+<glyph unicode=":" glyph-name="colon" horiz-adv-x="1229" d="M596 85Q596 127 623 156T698 186Q746 186 773 157T801 85Q801 43 774 16T698 -12Q651 -12 624 15T596 85ZM596 961Q596 1003 623 1032T698 1062Q746 1062 773 1033T801 961Q801 919 774 892T698
+864Q651 864 624 891T596 961Z" />
+<glyph unicode=";" glyph-name="semicolon" horiz-adv-x="1229" d="M592 961Q592 1003 619 1032T694 1062Q742 1062 769 1033T797 961Q797 919 770 892T694 864Q647 864 620 891T592 961ZM744 35Q744 -9 735 -53T708 -140T664 -222T604 -295L528 -247Q616 -122
+616 32V188H744V35Z" />
+<glyph unicode="&lt;" glyph-name="less" horiz-adv-x="1229" d="M302 656L1023 356V230L166 609V705L1023 1083V958L302 656Z" />
+<glyph unicode="=" glyph-name="equal" horiz-adv-x="1229" d="M1066 812H174V918H1066V812ZM1066 401H174V507H1066V401Z" />
+<glyph unicode="&gt;" glyph-name="greater" horiz-adv-x="1229" d="M918 660L183 962V1085L1057 707V611L183 232V355L918 660Z" />
+<glyph unicode="?" glyph-name="question" horiz-adv-x="1229" d="M540 404Q541 467 549 513T576 599T624 673T700 750Q739 787 775 825T841 906T887 997T904 1102Q904 1163 885 1213T831 1297T744 1351T629 1370Q570 1370 519 1353T428 1304T366 1225T340 1116H220Q223
+1198 255 1264T341 1378T469 1450T629 1476Q720 1476 793 1450T917 1374T996 1257T1024 1103Q1024 1034 1003 974T946 860T865 760T771 672Q738 642 717 613T683 552T665 483T660 404H540ZM517 72Q517 107 540 131T602 156Q641 156 665 132T689 72Q689 38 665 15T602
+-8Q563 -8 540 15T517 72Z" />
+<glyph unicode="@" glyph-name="at" horiz-adv-x="1229" d="M1165 775Q1163 730 1157 677T1139 571T1107 469T1058 382T990 322T901 299Q864 299 836 312T789 348T757 401T738 464Q720 431 698 401T650 349T592 313T526 299Q485 299 455 316T404 363T370 432T350
+514T342 601T344 685Q352 784 377 871T445 1022T548 1124T686 1162Q718 1162 744 1154T791 1134T830 1105T865 1072L864 1071L816 567Q814 533 817 500T830 441T860 398T909 382Q942 382 967 400T1012 449T1045 520T1066 604T1078 692T1083 775Q1088 910 1065 1020T991
+1209T861 1330T673 1373Q570 1373 478 1327T314 1192T199 976T150 684Q145 562 166 452T241 259T389 128T622 79Q653 79 687 84T755 99T818 122T873 152L900 71Q873 52 839 37T767 11T691 -5T619 -11Q509 -11 424 15T276 87T171 198T103 340T69 504T62 684Q65 787
+85 885T141 1069T229 1228T348 1353T498 1435T676 1464Q815 1464 910 1409T1063 1260T1145 1041T1165 775ZM432 685Q430 658 430 626T433 562T444 499T464 446T496 408T541 394Q576 394 605 411T658 457T701 523T736 600L780 1040Q762 1054 740 1063T688 1073Q622
+1073 577 1039T501 950T455 826T432 685Z" />
+<glyph unicode="A" glyph-name="A" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513Z" />
+<glyph unicode="B" glyph-name="B" horiz-adv-x="1229" d="M194 0V1456H592Q680 1455 763 1433T912 1364T1016 1246T1055 1075Q1055 1013 1036 964T985 876T908 809T813 761Q876 746 929 715T1022 639T1083 535T1106 410Q1107 313 1070 238T970 110T825 29T652
+0H194ZM314 700V104H655Q721 105 781 126T886 187T958 281T985 408Q985 477 960 530T890 621T787 678T664 700H314ZM314 803H607Q670 804 729 821T833 872T907 957T935 1078Q935 1151 906 1202T829 1286T720 1334T595 1351H314V803Z" />
+<glyph unicode="C" glyph-name="C" horiz-adv-x="1229" d="M1099 442Q1087 344 1050 260T952 113T808 15T620 -20Q533 -20 463 6T338 79T243 187T177 320T139 469T125 622V833Q126 909 138 986T177 1134T243 1268T337 1376T462 1449T620 1476Q726 1476 809 1441T954
+1344T1051 1196T1099 1011H980Q968 1086 942 1152T871 1267T765 1345T620 1374Q549 1374 494 1350T398 1286T327 1193T280 1079T254 957T245 835V622Q245 563 253 500T280 377T327 264T397 170T494 105T620 81Q704 81 766 108T872 185T942 300T980 442H1099Z" />
+<glyph unicode="D" glyph-name="D" horiz-adv-x="1229" d="M177 0V1456H529Q669 1454 779 1401T965 1259T1081 1052T1123 799V656Q1121 522 1081 404T965 196T779 55T529 0H177ZM297 1354V101H529Q646 103 734 149T882 271T971 447T1003 656V802Q1002 910 972
+1009T882 1184T735 1306T529 1354H297Z" />
+<glyph unicode="E" glyph-name="E" horiz-adv-x="1229" d="M962 698H320V104H1060V0H201V1456H1056V1351H320V802H962V698Z" />
+<glyph unicode="F" glyph-name="F" horiz-adv-x="1229" d="M974 680H330V0H210V1456H1071V1351H330V785H974V680Z" />
+<glyph unicode="G" glyph-name="G" horiz-adv-x="1229" d="M1097 173Q1054 122 1002 85T892 25T769 -9T639 -20Q552 -19 480 7T349 80T248 189T177 324T133 474T117 631V835Q118 911 131 988T172 1136T241 1269T338 1377T464 1449T622 1476Q719 1476 801 1445T946
+1357T1046 1221T1095 1046H977Q963 1116 935 1176T862 1279T759 1346T623 1371Q553 1371 497 1348T399 1285T326 1193T276 1081T247 959T237 837V631Q238 570 248 506T280 382T333 267T409 173T511 109T640 84Q685 83 733 88T826 108T911 148T980 211L981 587H636V691H1095L1097
+173Z" />
+<glyph unicode="H" glyph-name="H" horiz-adv-x="1229" d="M1069 0H956V698H272V0H159V1456H272V802H956V1456H1069V0Z" />
+<glyph unicode="I" glyph-name="I" horiz-adv-x="1229" d="M193 1456H1036V1348H671V106H1036V0H193V106H547V1348H193V1456Z" />
+<glyph unicode="J" glyph-name="J" horiz-adv-x="1229" d="M902 1456H1024V442Q1022 344 988 260T894 114T750 16T568 -20Q469 -20 388 10T247 97T153 233T111 410H233Q236 338 261 278T329 174T433 108T568 84Q644 84 705 113T809 191T876 305T902 442V1456Z" />
+<glyph unicode="K" glyph-name="K" horiz-adv-x="1229" d="M508 721L321 522V0H200V1456H321V677L500 883L992 1456H1141L589 809L1181 0H1035L508 721Z" />
+<glyph unicode="L" glyph-name="L" horiz-adv-x="1229" d="M340 104H1081V0H220V1456H340V104Z" />
+<glyph unicode="M" glyph-name="M" horiz-adv-x="1229" d="M321 1456L614 632L926 1456H1083V0H966V645L975 1296L652 438H574L274 1282L284 645V0H167V1456H321Z" />
+<glyph unicode="N" glyph-name="N" horiz-adv-x="1229" d="M1068 0H949L281 1229L280 0H161V1456H280L948 229L949 1456H1068V0Z" />
+<glyph unicode="O" glyph-name="O" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456 1448T613 1476Q700
+1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324 273T394 177T490
+112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825Z" />
+<glyph unicode="P" glyph-name="P" horiz-adv-x="1229" d="M336 600V0H216V1456H674Q770 1454 853 1424T999 1339T1098 1204T1134 1025Q1134 925 1098 847T1000 714T853 630T674 600H336ZM336 701H674Q746 702 808 725T916 789T988 889T1015 1023Q1015 1098 989
+1159T917 1262T809 1328T674 1354H336V701Z" />
+<glyph unicode="Q" glyph-name="Q" horiz-adv-x="1229" d="M1127 640Q1126 557 1111 473T1065 311T987 168T874 57L1124 -178L1041 -254L765 3Q732 -8 696 -14T618 -20Q529 -20 457 7T328 82T230 194T161 332T121 484T106 640V814Q107 891 120 970T160 1123T228
+1261T326 1373T455 1448T616 1476Q705 1476 777 1449T906 1374T1004 1262T1072 1124T1113 971T1127 814V640ZM1008 816Q1007 877 999 942T971 1068T921 1183T848 1279T747 1344T616 1368Q543 1368 487 1344T387 1279T313 1183T264 1067T235 942T225 816V640Q226
+579 235 515T263 389T313 273T387 177T487 111T618 87Q691 87 748 111T849 176T922 272T971 388T998 514T1008 640V816Z" />
+<glyph unicode="R" glyph-name="R" horiz-adv-x="1229" d="M682 609H322V0H204V1456H615Q711 1454 796 1425T944 1342T1044 1208T1081 1027Q1081 954 1060 894T1000 786T910 700T794 637L1126 13V0H1001L682 609ZM322 710H639Q707 712 766 736T868 801T936 901T961
+1028Q961 1105 934 1165T861 1266T751 1330T614 1354H322V710Z" />
+<glyph unicode="S" glyph-name="S" horiz-adv-x="1229" d="M990 358Q990 409 975 449T932 522T868 578T791 622T707 656T624 684Q573 700 521 719T419 764T326 822T250 896T199 987T180 1100Q180 1191 220 1261T326 1379T472 1451T637 1476Q730 1476 814 1446T961
+1360T1063 1227T1104 1052H983Q975 1123 948 1181T876 1282T771 1347T637 1371Q576 1371 517 1355T410 1305T333 1221T303 1102Q303 1029 337 978T425 892T541 834T662 793Q715 777 769 757T873 709T968 649T1044 572T1095 477T1114 360Q1114 264 1073 193T965
+75T815 4T645 -20Q549 -20 459 8T299 90T184 223T137 405H258Q267 327 299 267T383 166T501 105T645 84Q709 84 770 100T881 149T960 234T990 358Z" />
+<glyph unicode="T" glyph-name="T" horiz-adv-x="1229" d="M1139 1351H675V0H558L559 1351H94V1456H1139V1351Z" />
+<glyph unicode="U" glyph-name="U" horiz-adv-x="1229" d="M1071 1456L1072 469Q1070 370 1038 281T946 126T803 20T615 -20Q510 -20 426 19T283 125T191 281T157 469L159 1456H272L275 469Q276 394 299 325T365 202T471 116T615 84Q696 84 758 116T863 201T928
+324T953 469L955 1456H1071Z" />
+<glyph unicode="V" glyph-name="V" horiz-adv-x="1229" d="M609 181L1006 1456H1133L666 0H555L88 1456H215L609 181Z" />
+<glyph unicode="W" glyph-name="W" horiz-adv-x="1229" d="M348 330L356 228L368 330L580 1456H684L896 330L908 225L917 330L1066 1456H1178L973 0H854L632 1214L409 0H290L85 1456H198L348 330Z" />
+<glyph unicode="X" glyph-name="X" horiz-adv-x="1229" d="M629 834L998 1456H1139L700 738L1153 0H1013L631 642L248 0H107L560 738L121 1456H262L629 834Z" />
+<glyph unicode="Y" glyph-name="Y" horiz-adv-x="1229" d="M600 662L989 1456H1125L658 548L656 0H544L542 548L75 1456H214L600 662Z" />
+<glyph unicode="Z" glyph-name="Z" horiz-adv-x="1229" d="M266 104H1060V0H130L128 93L893 1351H152V1456H1032L1033 1368L266 104Z" />
+<glyph unicode="[" glyph-name="bracketleft" horiz-adv-x="1229" d="M798 1562H567V-210H798V-312H452V1664H798V1562Z" />
+<glyph unicode="\" glyph-name="backslash" horiz-adv-x="1229" d="M256 1456H369L977 -125H864L256 1456Z" />
+<glyph unicode="]" glyph-name="bracketright" horiz-adv-x="1229" d="M430 1664H776V-312H430V-210H660V1562H430V1664Z" />
+<glyph unicode="^" glyph-name="asciicircum" horiz-adv-x="1229" d="M361 667H245L570 1456H659L983 667H867L614 1293L613 1295V1293L361 667Z" />
+<glyph unicode="_" glyph-name="underscore" horiz-adv-x="1229" d="M1054 -101H173V0H1054V-101Z" />
+<glyph unicode="`" glyph-name="grave" horiz-adv-x="1229" d="M775 1232H671L453 1498H597L775 1232Z" />
+<glyph unicode="a" glyph-name="a" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731 985T612
+1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86Z" />
+<glyph unicode="b" glyph-name="b" horiz-adv-x="1229" d="M1072 530Q1072 425 1047 326T969 150T836 27T646 -20Q542 -20 458 22T316 145L311 0H199V1536H319V931Q374 1012 456 1057T644 1102Q755 1102 836 1057T969 935T1046 759T1072 551V530ZM951 551Q951
+632 934 712T879 855T778 958T621 998Q567 998 520 982T435 938T367 871T319 785V280Q339 236 369 200T439 138T525 98T623 84Q714 84 776 123T877 225T933 369T951 530V551Z" />
+<glyph unicode="c" glyph-name="c" horiz-adv-x="1229" d="M632 81Q692 81 746 98T843 149T911 228T942 334H1057Q1053 257 1017 192T923 80T790 7T632 -20Q513 -20 425 25T279 145T191 317T161 520V562Q161 667 190 764T278 937T425 1057T631 1102Q721 1102 797
+1074T930 994T1020 872T1057 717H942Q939 778 914 830T848 920T752 979T631 1000Q537 1000 471 962T362 862T301 722T281 562V520Q281 438 300 359T362 219T470 119T632 81Z" />
+<glyph unicode="d" glyph-name="d" horiz-adv-x="1229" d="M157 551Q157 659 184 758T264 934T398 1056T588 1102Q691 1102 772 1058T909 935V1536H1028V0H916L911 141Q854 63 771 22T586 -20Q479 -20 399 26T265 149T184 325T157 530V551ZM278 530Q278 449 295
+369T352 226T454 123T608 84Q659 84 704 97T788 134T857 193T909 271V795Q889 840 860 877T792 941T708 983T610 998Q518 998 455 959T353 856T296 712T278 551V530Z" />
+<glyph unicode="e" glyph-name="e" horiz-adv-x="1229" d="M655 -20Q542 -20 449 22T290 136T187 304T149 510V553Q150 669 187 769T290 943T443 1059T632 1102Q740 1102 823 1063T962 957T1048 799T1079 606V533H269V510Q270 425 298 348T377 211T500 117T660
+81Q760 80 841 118T979 228L1055 171Q1025 129 984 94T893 33T782 -6T655 -20ZM632 1000Q559 1000 498 973T391 896T315 781T274 635H959V648Q957 715 935 778T871 891T769 970T632 1000Z" />
+<glyph unicode="f" glyph-name="f" horiz-adv-x="1230" d="M1028 984H597L599 0H475L476 984H181V1082H477V1168Q478 1279 511 1355T602 1478T736 1546T900 1567Q957 1567 1011 1560T1121 1540L1106 1438Q1064 1449 1015 1456T912 1463Q847 1463 791 1449T691
+1402T623 1311T597 1168V1082H1028V984Z" />
+<glyph unicode="g" glyph-name="g" horiz-adv-x="1229" d="M159 551Q159 659 185 758T264 934T398 1056T589 1102Q693 1102 775 1058T913 932L918 1082H1030V19Q1029 -80 999 -163T913 -305T776 -398T596 -431Q548 -431 496 -420T394 -386T299 -329T219 -246L286
+-174Q415 -330 588 -330Q665 -330 725 -305T825 -235T887 -126T910 14V137Q853 61 771 21T587 -20Q479 -20 399 26T265 149T186 325T159 530V551ZM279 530Q279 449 296 369T353 226T455 123T609 84Q660 84 706 97T790 136T859 196T910 276V791Q890 836 861 874T794
+939T710 982T611 998Q519 998 456 959T354 856T297 712T279 551V530Z" />
+<glyph unicode="h" glyph-name="h" horiz-adv-x="1229" d="M313 873Q370 977 465 1039T682 1102Q771 1102 839 1075T954 995T1025 866T1050 691V0H930V691Q930 763 913 820T862 916T775 977T653 998Q596 998 544 980T448 930T370 855T313 759V0H193V1536H313V873Z" />
+<glyph unicode="i" glyph-name="i" horiz-adv-x="1229" d="M221 1082H723V106H1091V0H221V106H603V975H221V1082ZM568 1378Q568 1415 589 1442T659 1469Q707 1469 728 1442T750 1378Q750 1343 729 1317T659 1290Q610 1290 589 1316T568 1378Z" />
+<glyph unicode="j" glyph-name="j" horiz-adv-x="1229" d="M301 1082H811V0Q811 -102 784 -183T704 -320T575 -407T402 -437Q345 -437 298 -434T195 -420L203 -321Q221 -324 247 -326T301 -330T354 -332T394 -333Q454 -333 508 -316T602 -260T667 -157T691 0V975H301V1082ZM652
+1379Q652 1416 672 1442T742 1469Q790 1469 812 1443T834 1379Q834 1344 812 1318T742 1291Q693 1291 673 1317T652 1379Z" />
+<glyph unicode="k" glyph-name="k" horiz-adv-x="1229" d="M490 547L337 410V0H216V1536H337V548L466 675L905 1082H1058L575 626L1108 0H954L490 547Z" />
+<glyph unicode="l" glyph-name="l" horiz-adv-x="1229" d="M221 1536H725V106H1091V0H221V106H605V1429H221V1536Z" />
+<glyph unicode="m" glyph-name="m" horiz-adv-x="1229" d="M197 1082L200 961Q233 1024 290 1062T436 1102Q482 1102 517 1092T579 1062T623 1017T652 959Q686 1020 745 1060T894 1102Q964 1102 1011 1078T1087 1013T1128 918T1141 804L1140 0H1028L1027 804Q1027
+839 1020 874T996 937T947 982T866 998Q816 997 782 982T725 943T690 890T671 830L670 0H558L557 815Q557 849 550 882T526 941T477 983T398 998Q350 997 317 983T261 946T225 892T203 828L202 0H89V1082H197Z" />
+<glyph unicode="n" glyph-name="n" horiz-adv-x="1229" d="M303 1082L312 879Q369 980 464 1040T680 1102Q858 1102 952 996T1047 693V0H927V691Q926 840 859 919T651 998Q593 998 541 979T445 927T368 849T313 751V0H193V1082H303Z" />
+<glyph unicode="o" glyph-name="o" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131 524V557ZM251
+524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524Z" />
+<glyph unicode="p" glyph-name="p" horiz-adv-x="1229" d="M1068 530Q1068 425 1043 326T965 150T832 27T642 -20Q541 -20 457 19T316 132V-416H196V1082H308L313 934Q370 1014 453 1058T640 1102Q751 1102 832 1057T965 935T1042 759T1068 551V530ZM948 551Q948
+632 930 712T872 855T769 958T613 998Q561 998 516 984T433 943T366 880T316 799V263Q358 179 436 130T615 81Q706 81 769 121T872 224T930 368T948 530V551Z" />
+<glyph unicode="q" glyph-name="q" horiz-adv-x="1229" d="M158 551Q158 659 184 758T263 934T398 1056T591 1102Q692 1102 773 1060T911 939L917 1082H1029V-416H909V130Q851 57 769 19T589 -20Q479 -20 398 26T264 149T185 325T158 530V551ZM279 530Q279 449
+297 369T354 225T457 121T611 81Q710 81 787 130T909 261V807Q888 849 858 884T791 945T708 985T613 1000Q521 1000 458 960T355 857T297 713T279 551V530Z" />
+<glyph unicode="r" glyph-name="r" horiz-adv-x="1229" d="M909 1102Q954 1102 999 1097T1074 1081L1059 966Q1017 975 975 980T889 985Q737 985 631 913T482 692L483 0H359V1082H474L482 894V872Q548 979 657 1040T909 1102Z" />
+<glyph unicode="s" glyph-name="s" horiz-adv-x="1229" d="M948 275Q948 325 923 362T855 425T754 470T633 501Q555 517 482 539T352 597T261 682T227 807Q227 877 262 932T354 1025T484 1082T634 1102Q722 1102 798 1081T932 1018T1024 916T1061 779H941Q939
+831 913 872T844 942T747 985T634 1000Q580 1000 529 989T437 954T372 895T347 811Q346 760 370 726T437 668T535 629T656 600Q738 583 812 559T944 498T1034 408T1067 281Q1067 205 1031 149T935 55T799 -1T641 -20Q550 -20 470 2T328 65T229 167T189 303H309Q315
+239 347 196T427 128T531 92T641 81Q696 81 750 92T848 127T919 187T948 275Z" />
+<glyph unicode="t" glyph-name="t" horiz-adv-x="1229" d="M592 1359V1082H993V984H592V303Q593 240 611 199T661 132T735 95T828 84Q878 84 930 91T1020 105L1038 14Q995 -3 933 -11T807 -20Q735 -20 674 -3T568 53T497 153T470 304V984H169V1082H470V1359H592Z" />
+<glyph unicode="u" glyph-name="u" horiz-adv-x="1229" d="M919 169Q865 80 777 30T567 -20Q477 -20 409 9T294 94T223 231T198 416V1082H317V414Q319 251 383 167T581 83Q716 83 800 145T917 313L916 1082H1037V0H925L919 169Z" />
+<glyph unicode="v" glyph-name="v" horiz-adv-x="1229" d="M608 162L613 141L619 162L991 1082H1116L661 0H563L107 1082H232L608 162Z" />
+<glyph unicode="w" glyph-name="w" horiz-adv-x="1229" d="M324 351L356 159L396 351L579 1082H663L846 351L886 154L921 351L1069 1082H1180L940 0H843L652 740L620 913L588 740L399 0H302L62 1082H173L324 351Z" />
+<glyph unicode="x" glyph-name="x" horiz-adv-x="1229" d="M624 640L960 1082H1105L693 551L1121 0H979L626 461L273 0H130L558 551L146 1082H289L624 640Z" />
+<glyph unicode="y" glyph-name="y" horiz-adv-x="1229" d="M600 220L622 161L1013 1082H1149L590 -173Q568 -222 538 -269T467 -353T373 -414T250 -437Q226 -437 197 -433T148 -423L162 -323Q179 -325 204 -327T247 -330Q293 -330 331 -312T398 -266T450 -202T488
+-132L562 28L76 1082H211L600 220Z" />
+<glyph unicode="z" glyph-name="z" horiz-adv-x="1229" d="M305 101H1098V0H148L149 88L885 979H171V1082H1046L1045 993L305 101Z" />
+<glyph unicode="{" glyph-name="braceleft" horiz-adv-x="1229" d="M974 -404Q883 -389 815 -347T702 -243T633 -104T609 58V266Q608 406 538 473T331 543V570H330V597V619L331 647Q467 650 537 716T609 922V1132Q610 1216 632 1293T698 1433T811 1539T973 1597L992
+1518Q918 1504 868 1469T788 1382T744 1266T729 1132V922Q727 798 669 714T495 594Q610 558 668 474T729 266V58Q730 -13 746 -76T795 -189T877 -275T994 -324L974 -404Z" />
+<glyph unicode="|" glyph-name="bar" horiz-adv-x="1229" d="M665 -332H564V1456H665V-332Z" />
+<glyph unicode="}" glyph-name="braceright" horiz-adv-x="1229" d="M330 -324Q397 -310 446 -275T529 -190T578 -76T595 58V266Q596 389 655 473T829 594Q714 630 655 714T595 922V1132Q594 1202 581 1266T537 1381T456 1468T333 1518L351 1597Q444 1582 512
+1539T626 1434T693 1294T715 1132V922Q716 783 786 717T993 647V619H994L993 594L994 570H993V543Q857 540 787 473T715 266V58Q715 -26 692 -103T624 -242T510 -346T350 -404L330 -324Z" />
+<glyph unicode="~" glyph-name="asciitilde" horiz-adv-x="1229" d="M1168 746Q1167 683 1148 625T1092 521T1003 450T883 423Q837 423 797 433T719 463T646 509T574 568Q547 591 520 611T465 647T406 671T343 680Q297 680 262 662T204 614T168 543T154 459L59
+467Q60 531 79 589T135 691T223 761T343 787Q389 787 430 776T509 746T582 700T654 643Q682 619 708 598T763 562T820 538T883 529Q929 529 964 548T1023 598T1059 670T1073 755L1168 746Z" />
+<glyph unicode="&#xa1;" glyph-name="exclamdown" horiz-adv-x="1229" d="M558 654H678V-365H558V654ZM699 1020Q699 986 676 962T613 938Q574 938 551 961T527 1020Q527 1055 550 1079T613 1103Q651 1103 675 1079T699 1020Z" />
+<glyph unicode="&#xa2;" glyph-name="cent" horiz-adv-x="1229" d="M637 81Q697 81 751 98T848 149T916 228T947 334H1062Q1058 264 1028 204T949 98T835 23T697 -16V-245H576V-16Q473 -5 396 43T268 165T192 329T166 520V562Q166 659 191 751T268 916T396 1037T576
+1098V1318H697V1098Q775 1089 840 1057T954 975T1030 859T1062 717H947Q944 778 919 830T853 920T757 979T636 1000Q542 1000 476 962T367 862T306 722T286 562V520Q286 438 305 359T367 219T475 119T637 81Z" />
+<glyph unicode="&#xa3;" glyph-name="sterling" horiz-adv-x="1229" d="M441 647L450 367Q451 295 437 225T385 101H1133V0H138V101H235Q266 113 285 143T315 213T328 293T330 367L321 647H134V748H318L309 1039Q309 1132 337 1211T417 1350T545 1442T714 1476Q805
+1476 877 1448T1000 1368T1077 1245T1105 1089H985Q984 1156 963 1209T906 1299T818 1355T705 1374Q639 1374 588 1348T501 1277T447 1170T429 1039L438 748H782V647H441Z" />
+<glyph unicode="&#xa4;" glyph-name="currency" horiz-adv-x="1229" d="M956 116Q893 53 812 17T634 -20Q537 -20 456 16T312 114L185 -26L101 60L236 208Q185 285 159 376T132 563Q132 662 161 756T244 928L101 1087L185 1174L324 1021Q386 1078 463 1111T634
+1144Q727 1144 805 1111T945 1019L1086 1175L1171 1087L1025 925Q1078 847 1106 754T1134 563Q1134 469 1108 379T1033 212L1171 60L1086 -27L956 116ZM246 563Q246 478 273 394T350 242T473 133T634 91Q723 91 794 133T916 242T994 393T1021 563Q1021 647 994
+731T917 882T795 990T634 1032Q545 1032 474 991T352 882T274 732T246 563Z" />
+<glyph unicode="&#xa5;" glyph-name="yen" horiz-adv-x="1229" d="M614 733L1037 1456H1176L722 705H1092V618H672V412H1092V326H672V0H552V326H138V412H552V618H138V705H506L53 1456H191L614 733Z" />
+<glyph unicode="&#xa6;" glyph-name="brokenbar" horiz-adv-x="1229" d="M543 -270V501H663V-270H543ZM663 698H543V1456H663V698Z" />
+<glyph unicode="&#xa7;" glyph-name="section" horiz-adv-x="1229" d="M1121 431Q1121 376 1103 332T1054 254T981 197T888 158Q930 135 965 107T1025 42T1063 -38T1077 -139Q1077 -229 1039 -296T937 -407T788 -473T608 -495Q509 -495 419 -470T259 -392T148
+-258T105 -64L225 -62Q227 -148 259 -210T343 -313T464 -373T608 -393Q677 -393 740 -377T851 -330T928 -251T957 -141Q957 -77 930 -33T853 43T735 99T587 146Q496 170 411 200T258 277T151 389T111 551Q111 605 127 649T173 727T244 787T334 827Q294 850 261
+878T205 942T169 1022T156 1120Q156 1208 194 1274T296 1386T446 1453T626 1476Q733 1476 820 1447T968 1363T1063 1228T1099 1045H979Q977 1117 952 1177T880 1281T770 1349T626 1374Q554 1374 491 1359T380 1312T304 1233T276 1122Q276 1055 303 1010T379 931T496
+875T648 828Q739 803 824 774T976 700T1081 591T1121 431ZM598 729Q562 739 528 749T461 770Q413 764 371 748T298 705T249 640T231 553Q230 482 261 435T347 355T475 298T632 251Q663 242 693 233T753 214Q802 220 846 236T925 278T980 341T1002 428Q1003 495
+970 542T881 622T752 681T598 729Z" />
+<glyph unicode="&#xa8;" glyph-name="dieresis" horiz-adv-x="1229" d="M310 1396Q310 1429 332 1453T392 1477Q430 1477 452 1453T475 1396Q475 1364 453 1341T392 1317Q354 1317 332 1340T310 1396ZM748 1395Q748 1428 770 1452T830 1476Q868 1476 890 1452T913
+1395Q913 1363 891 1340T830 1316Q792 1316 770 1339T748 1395Z" />
+<glyph unicode="&#xa9;" glyph-name="copyright" horiz-adv-x="1229" d="M853 449Q854 402 840 360T797 285T722 234T615 215Q548 215 501 242T422 312T377 408T362 514V581Q362 633 376 685T422 780T500 848T615 875Q676 875 721 857T796 808T840 735T854 646H775Q773
+677 765 705T739 754T690 787T615 800Q566 800 534 780T481 727T452 657T443 582V514Q443 478 452 439T481 367T533 313T615 290Q660 290 690 303T738 338T764 389T774 449H853ZM158 546Q158 455 189 367T279 208T425 95T623 51Q734 51 820 94T965 208T1056 367T1087
+546Q1087 635 1056 722T966 877T820 988T623 1031Q512 1031 426 989T280 878T189 722T158 546ZM87 546Q87 651 123 751T229 929T397 1056T623 1104Q749 1104 848 1056T1016 930T1121 751T1158 546Q1158 439 1122 338T1017 156T849 28T623 -21Q539 -21 467 1T335
+63T229 156T152 273T104 405T87 546Z" />
+<glyph unicode="&#xaa;" glyph-name="ordfeminine" horiz-adv-x="1229" d="M816 705Q808 731 802 758T794 815Q758 762 697 727T555 691Q502 691 456 705T375 746T322 814T302 908Q302 970 328 1014T400 1085T506 1126T633 1140H792V1201Q790 1282 749 1330T625
+1380Q586 1380 551 1372T489 1347T446 1302T427 1237L319 1243Q322 1298 346 1341T412 1415T508 1460T625 1476Q689 1476 739 1457T826 1402T881 1315T901 1200V886Q901 793 928 705H816ZM575 786Q606 786 640 796T704 823T758 863T792 913V1058H634Q592 1058 552
+1050T481 1025T430 980T410 912Q410 879 423 855T458 816T510 793T575 786Z" />
+<glyph unicode="&#xab;" glyph-name="guillemotleft" horiz-adv-x="1229" d="M368 520L644 132H539L244 511V530L539 909H644L368 520ZM703 520L979 132H874L579 511V530L874 909H979L703 520Z" />
+<glyph unicode="&#xac;" glyph-name="logicalnot" horiz-adv-x="1229" d="M1009 386H890V670H198V776H1009V386Z" />
+<glyph unicode="&#xad;" glyph-name="uni00AD" horiz-adv-x="1230" d="M970 601H229V702H970V601Z" />
+<glyph unicode="&#xae;" glyph-name="registered" horiz-adv-x="1229" d="M84 539Q84 646 120 746T226 925T394 1050T620 1098Q704 1098 776 1077T908 1017T1013 926T1091 811T1138 679T1155 539Q1155 432 1119 332T1014 153T846 27T620 -21Q494 -21 395 26T226
+152T121 331T84 539ZM154 539Q154 448 185 361T276 204T422 93T620 51Q731 51 817 93T962 205T1053 361T1084 539Q1084 630 1053 717T963 873T817 984T620 1026Q509 1026 423 984T276 874T185 718T154 539ZM494 491V234H414V856H621Q726 854 785 807T845 671Q844
+613 809 574T708 516L860 234H776L642 491H494ZM494 565H633Q696 567 730 596T764 670Q765 725 731 752T621 781H494V565Z" />
+<glyph unicode="&#xaf;" glyph-name="overscore" horiz-adv-x="1229" d="M949 1359H279V1456H949V1359Z" />
+<glyph unicode="&#xb0;" glyph-name="degree" horiz-adv-x="1229" d="M370 1226Q370 1277 389 1322T442 1401T521 1456T616 1476Q665 1476 709 1456T786 1402T838 1322T857 1226Q857 1175 838 1131T787 1053T710 1000T616 980Q566 980 521 999T443 1052T390 1130T370
+1226ZM460 1226Q460 1193 472 1165T505 1116T554 1083T616 1071Q648 1071 675 1083T723 1116T755 1165T767 1226Q767 1259 756 1288T724 1338T677 1372T616 1385Q583 1385 555 1373T505 1339T472 1288T460 1226Z" />
+<glyph unicode="&#xb1;" glyph-name="plusminus" horiz-adv-x="1229" d="M681 829H1072V728H681V289H570V728H156V829H570V1267H681V829ZM1038 -1H203V101H1038V-1Z" />
+<glyph unicode="&#xb2;" glyph-name="twosuperior" horiz-adv-x="1229" d="M922 667H351V742L640 1025Q668 1052 693 1078T736 1131T766 1185T777 1239Q777 1272 765 1298T732 1342T681 1369T618 1379Q535 1379 488 1336T437 1217H331Q332 1272 354 1318T413 1397T502
+1448T616 1467Q673 1467 722 1452T807 1408T863 1337T883 1241Q883 1199 868 1162T826 1089T767 1020T698 952L485 754H922V667Z" />
+<glyph unicode="&#xb3;" glyph-name="threesuperior" horiz-adv-x="1229" d="M533 1108H611Q647 1108 680 1116T738 1140T779 1182T795 1242Q795 1275 783 1300T748 1343T695 1369T628 1378Q593 1378 563 1370T510 1345T472 1303T456 1246H351Q353 1298 375 1339T435
+1408T522 1451T626 1466Q683 1466 733 1452T820 1410T879 1341T901 1244Q900 1179 858 1135T751 1069Q826 1050 871 1005T917 881Q917 825 894 783T831 713T739 670T629 655Q574 655 523 669T431 711T365 783T337 885H443Q445 850 460 824T501 779T560 752T632
+742Q669 742 701 751T758 778T797 822T811 884Q811 925 794 952T749 996T685 1019T611 1026H533V1108Z" />
+<glyph unicode="&#xb4;" glyph-name="acute" horiz-adv-x="1229" d="M634 1498H779L549 1232H450L634 1498Z" />
+<glyph unicode="&#xb5;" glyph-name="mu" horiz-adv-x="1229" d="M324 1082V449Q325 362 341 294T389 178T470 106T587 81Q659 81 712 99T804 150T866 228T903 329V1082H1023V0H914L907 162Q861 78 785 30T596 -20Q506 -20 438 14T324 110V-416H206V1082H324Z" />
+<glyph unicode="&#xb6;" glyph-name="paragraph" horiz-adv-x="1229" d="M830 0V520H724Q613 520 523 554T368 649T268 797T233 988Q233 1092 268 1178T367 1325T522 1421T724 1456H950V0H830Z" />
+<glyph unicode="&#xb7;" glyph-name="middot" horiz-adv-x="1229" d="M534 717Q534 751 557 775T619 800Q658 800 681 776T705 717Q705 683 682 659T619 635Q580 635 557 659T534 717Z" />
+<glyph unicode="&#xb8;" glyph-name="cedilla" horiz-adv-x="1229" d="M624 0L612 -64Q645 -69 674 -81T727 -113T763 -163T777 -235Q777 -287 753 -324T689 -385T595 -422T483 -435L476 -357Q509 -357 545 -351T610 -332T658 -295T678 -237Q678 -205 664 -185T626
+-154T569 -137T503 -127L531 0H624Z" />
+<glyph unicode="&#xb9;" glyph-name="onesuperior" horiz-adv-x="1229" d="M743 665H638V1328L406 1258V1348L730 1455H743V665Z" />
+<glyph unicode="&#xba;" glyph-name="ordmasculine" horiz-adv-x="1229" d="M291 1141Q291 1211 313 1272T377 1378T479 1450T614 1476Q690 1476 749 1450T850 1379T914 1272T937 1141V1024Q937 954 915 893T851 787T751 716T616 690Q540 690 480 716T378 787T314
+893T291 1024V1141ZM399 1024Q400 974 413 931T454 855T521 804T616 785Q668 785 708 804T775 855T815 930T830 1024V1141Q829 1190 815 1233T774 1309T707 1361T614 1380Q561 1380 521 1361T454 1310T414 1234T399 1141V1024Z" />
+<glyph unicode="&#xbb;" glyph-name="guillemotright" horiz-adv-x="1229" d="M350 944L644 560V541L350 162H244L520 550L244 944H350ZM695 944L989 560V541L695 162H589L865 550L589 944H695Z" />
+<glyph unicode="&#xbc;" glyph-name="onequarter" horiz-adv-x="1229" d="M358 751H264V1348L55 1285V1366L347 1462H358V751ZM622 179L541 227L1064 1275L1147 1227L622 179ZM1040 248H1154V168H1040V0H945V168H586L582 228L940 710H1040V248ZM685 248H945V593L917
+548L685 248Z" />
+<glyph unicode="&#xbd;" glyph-name="onehalf" horiz-adv-x="1229" d="M580 179L499 227L1022 1275L1105 1227L580 179ZM355 747H261V1344L52 1281V1362L344 1458H355V747ZM1164 0H650V68L910 322Q935 347 957 370T996 418T1023 466T1033 515Q1033 545 1023 568T993
+607T947 632T890 641Q815 641 773 602T727 495H632Q633 545 652 586T706 657T786 703T888 720Q940 720 984 707T1060 667T1111 603T1129 517Q1129 479 1115 445T1078 379T1024 317T962 257L770 78H1164V0Z" />
+<glyph unicode="&#xbe;" glyph-name="threequarters" horiz-adv-x="1229" d="M438 174L357 222L880 1270L963 1222L438 174ZM1068 249H1182V169H1068V1H973V169H614L610 229L968 711H1068V249ZM713 249H973V594L945 549L713 249ZM226 1142H296Q329 1142 358 1149T411
+1171T448 1208T462 1262Q462 1292 451 1315T419 1353T372 1377T312 1385Q280 1385 253 1378T205 1355T172 1318T157 1266H62Q64 1313 84 1350T138 1412T216 1450T310 1464Q361 1464 406 1452T485 1414T538 1351T557 1264Q556 1206 519 1167T422 1107Q490 1090 531
+1049T572 938Q572 887 551 850T494 786T411 747T312 734Q263 734 217 746T134 785T74 849T50 941H145Q147 910 161 886T198 846T251 821T315 812Q348 812 377 820T429 845T463 885T476 940Q476 977 461 1002T421 1041T363 1061T296 1068H226V1142Z" />
+<glyph unicode="&#xbf;" glyph-name="questiondown" horiz-adv-x="1229" d="M695 684Q694 624 689 582T669 504T628 437T559 365Q520 325 483 284T416 196T370 99T352 -12Q352 -74 369 -123T421 -207T503 -261T614 -280Q670 -280 717 -263T799 -212T854 -132T877
+-26H996Q994 -107 966 -173T887 -287T768 -360T614 -386Q525 -386 455 -359T335 -284T259 -167T232 -14Q232 58 253 122T311 242T393 349T487 443Q517 471 535 497T562 552T573 612T576 684H695ZM544 1018Q544 1052 567 1076T630 1101Q669 1101 692 1077T715 1018Q715
+984 692 960T630 936Q591 936 568 960T544 1018Z" />
+<glyph unicode="&#xc0;" glyph-name="Agrave" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM691 1565H587L369 1831H513L691 1565Z" />
+<glyph unicode="&#xc1;" glyph-name="Aacute" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM752 1829H897L667 1563H568L752 1829Z" />
+<glyph unicode="&#xc2;" glyph-name="Acircumflex" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM909 1608V1591H798L629 1765L462 1591H354V1611L592 1850H668L909 1608Z" />
+<glyph unicode="&#xc3;" glyph-name="Atilde" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM955 1824Q953 1783 940 1746T902 1681T843 1637T766 1620Q719 1620 686 1637T622 1675T561 1713T489
+1730Q438 1729 410 1695T379 1611L300 1624Q301 1664 314 1701T352 1766T411 1810T489 1827Q531 1827 564 1810T629 1772T693 1734T766 1717Q791 1717 811 1727T845 1753T866 1792T876 1839L955 1824Z" />
+<glyph unicode="&#xc4;" glyph-name="Adieresis" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM322 1729Q322 1762 344 1786T404 1810Q442 1810 464 1786T487 1729Q487 1697 465 1674T404
+1650Q366 1650 344 1673T322 1729ZM760 1728Q760 1761 782 1785T842 1809Q880 1809 902 1785T925 1728Q925 1696 903 1673T842 1649Q804 1649 782 1672T760 1728Z" />
+<glyph unicode="&#xc5;" glyph-name="Aring" horiz-adv-x="1229" d="M905 406H351L222 0H101L575 1456H683L1153 0H1033L905 406ZM385 513H871L629 1279L385 513ZM444 1717Q444 1755 458 1789T497 1848T555 1888T628 1903Q666 1903 699 1889T757 1849T796 1789T811
+1717Q811 1678 797 1645T758 1588T700 1549T628 1535Q589 1535 556 1549T498 1587T459 1645T444 1717ZM520 1717Q520 1694 528 1674T551 1639T585 1615T628 1606Q651 1606 670 1615T704 1639T726 1674T734 1717Q734 1740 726 1761T704 1797T671 1822T628 1831Q605
+1831 585 1822T551 1798T528 1761T520 1717Z" />
+<glyph unicode="&#xc6;" glyph-name="AE" horiz-adv-x="1229" d="M1171 0H668L662 389H320L180 0H55L607 1456H1141V1354H759L767 809H1099V707H769L777 101H1171V0ZM361 502H661L650 1300L361 502Z" />
+<glyph unicode="&#xc7;" glyph-name="Ccedilla" horiz-adv-x="1229" d="M1099 442Q1087 344 1050 260T952 113T808 15T620 -20Q533 -20 463 6T338 79T243 187T177 320T139 469T125 622V833Q126 909 138 986T177 1134T243 1268T337 1376T462 1449T620 1476Q726
+1476 809 1441T954 1344T1051 1196T1099 1011H980Q968 1086 942 1152T871 1267T765 1345T620 1374Q549 1374 494 1350T398 1286T327 1193T280 1079T254 957T245 835V622Q245 563 253 500T280 377T327 264T397 170T494 105T620 81Q704 81 766 108T872 185T942 300T980
+442H1099ZM661 252L649 188Q682 183 711 171T764 139T800 89T814 17Q814 -35 790 -72T726 -133T632 -170T520 -183L513 -105Q546 -105 582 -99T647 -80T695 -43T715 15Q715 47 701 67T663 98T606 115T540 125L568 252H661Z" />
+<glyph unicode="&#xc8;" glyph-name="Egrave" horiz-adv-x="1229" d="M962 698H320V104H1060V0H201V1456H1056V1351H320V802H962V698ZM635 1565H531L313 1831H457L635 1565Z" />
+<glyph unicode="&#xc9;" glyph-name="Eacute" horiz-adv-x="1229" d="M962 698H320V104H1060V0H201V1456H1056V1351H320V802H962V698ZM696 1829H841L611 1563H512L696 1829Z" />
+<glyph unicode="&#xca;" glyph-name="Ecircumflex" horiz-adv-x="1229" d="M962 698H320V104H1060V0H201V1456H1056V1351H320V802H962V698ZM853 1608V1591H742L573 1765L406 1591H298V1611L536 1850H612L853 1608Z" />
+<glyph unicode="&#xcb;" glyph-name="Edieresis" horiz-adv-x="1229" d="M962 698H320V104H1060V0H201V1456H1056V1351H320V802H962V698ZM267 1729Q267 1762 289 1786T349 1810Q387 1810 409 1786T432 1729Q432 1697 410 1674T349 1650Q311 1650 289 1673T267
+1729ZM705 1728Q705 1761 727 1785T787 1809Q825 1809 847 1785T870 1728Q870 1696 848 1673T787 1649Q749 1649 727 1672T705 1728Z" />
+<glyph unicode="&#xcc;" glyph-name="Igrave" horiz-adv-x="1229" d="M193 1456H1036V1348H671V106H1036V0H193V106H547V1348H193V1456ZM662 1565H558L340 1831H484L662 1565Z" />
+<glyph unicode="&#xcd;" glyph-name="Iacute" horiz-adv-x="1229" d="M193 1456H1036V1348H671V106H1036V0H193V106H547V1348H193V1456ZM723 1829H868L638 1563H539L723 1829Z" />
+<glyph unicode="&#xce;" glyph-name="Icircumflex" horiz-adv-x="1229" d="M193 1456H1036V1348H671V106H1036V0H193V106H547V1348H193V1456ZM880 1608V1591H769L600 1765L433 1591H325V1611L563 1850H639L880 1608Z" />
+<glyph unicode="&#xcf;" glyph-name="Idieresis" horiz-adv-x="1229" d="M193 1456H1036V1348H671V106H1036V0H193V106H547V1348H193V1456ZM294 1729Q294 1762 316 1786T376 1810Q414 1810 436 1786T459 1729Q459 1697 437 1674T376 1650Q338 1650 316 1673T294
+1729ZM732 1728Q732 1761 754 1785T814 1809Q852 1809 874 1785T897 1728Q897 1696 875 1673T814 1649Q776 1649 754 1672T732 1728Z" />
+<glyph unicode="&#xd0;" glyph-name="Eth" horiz-adv-x="1259" d="M207 0V689H-45V791H207V1456H559Q699 1454 809 1401T995 1259T1111 1052T1153 799V656Q1151 522 1111 404T995 196T809 55T559 0H207ZM567 689H327V101H559Q676 103 764 149T912 271T1001 447T1033
+656V802Q1032 910 1002 1009T912 1184T765 1306T559 1354H327V791H567V689Z" />
+<glyph unicode="&#xd1;" glyph-name="Ntilde" horiz-adv-x="1229" d="M1068 0H949L281 1229L280 0H161V1456H280L948 229L949 1456H1068V0ZM937 1824Q935 1783 922 1746T884 1681T825 1637T748 1620Q701 1620 668 1637T604 1675T543 1713T471 1730Q420 1729 392
+1695T361 1611L282 1624Q283 1664 296 1701T334 1766T393 1810T471 1827Q513 1827 546 1810T611 1772T675 1734T748 1717Q773 1717 793 1727T827 1753T848 1792T858 1839L937 1824Z" />
+<glyph unicode="&#xd2;" glyph-name="Ograve" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456 1448T613
+1476Q700 1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324 273T394
+177T490 112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825ZM689 1586H585L367 1852H511L689 1586Z" />
+<glyph unicode="&#xd3;" glyph-name="Oacute" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456 1448T613
+1476Q700 1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324 273T394
+177T490 112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825ZM750 1850H895L665 1584H566L750 1850Z" />
+<glyph unicode="&#xd4;" glyph-name="Ocircumflex" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456
+1448T613 1476Q700 1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324
+273T394 177T490 112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825ZM907 1629V1612H796L627 1786L460 1612H352V1632L590 1871H666L907 1629Z" />
+<glyph unicode="&#xd5;" glyph-name="Otilde" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456 1448T613
+1476Q700 1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324 273T394
+177T490 112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825ZM953 1845Q951 1804 938 1767T900 1702T841 1658T764 1641Q717 1641 684 1658T620 1696T559 1734T487 1751Q436 1750 408 1716T377 1632L298 1645Q299 1685 312 1722T350 1787T409
+1831T487 1848Q529 1848 562 1831T627 1793T691 1755T764 1738Q789 1738 809 1748T843 1774T864 1813T874 1860L953 1845Z" />
+<glyph unicode="&#xd6;" glyph-name="Odieresis" horiz-adv-x="1229" d="M1103 631Q1102 557 1090 480T1053 330T988 194T896 83T772 8T615 -20Q529 -20 459 7T335 82T241 194T176 330T138 480T124 631V823Q125 897 137 974T175 1124T239 1261T333 1373T456 1448T613
+1476Q700 1476 770 1449T894 1374T988 1262T1052 1126T1090 976T1103 823V631ZM985 825Q984 883 976 946T950 1069T904 1183T834 1279T738 1344T613 1368Q543 1368 489 1344T393 1278T324 1183T277 1068T251 945T242 825V631Q243 573 251 511T278 388T324 273T394
+177T490 112T615 87Q686 87 740 111T836 177T905 272T951 387T976 510T985 631V825ZM320 1750Q320 1783 342 1807T402 1831Q440 1831 462 1807T485 1750Q485 1718 463 1695T402 1671Q364 1671 342 1694T320 1750ZM758 1749Q758 1782 780 1806T840 1830Q878 1830
+900 1806T923 1749Q923 1717 901 1694T840 1670Q802 1670 780 1693T758 1749Z" />
+<glyph unicode="&#xd7;" glyph-name="multiply" horiz-adv-x="1229" d="M196 179L554 544L211 894L290 974L632 624L975 974L1054 894L711 544L1070 179L991 100L632 464L275 100L196 179Z" />
+<glyph unicode="&#xd8;" glyph-name="Oslash" horiz-adv-x="1229" d="M1093 631Q1092 557 1080 480T1043 330T978 194T886 83T762 8T605 -20Q514 -20 443 9T316 90L213 -84H110L254 160Q218 208 192 265T149 383T123 507T114 631V823Q115 897 127 974T165 1124T229
+1261T323 1373T446 1448T603 1476Q713 1476 795 1433T936 1319L1046 1505H1149L991 1238Q1043 1145 1067 1037T1093 823V631ZM232 631Q232 587 237 540T252 446T278 354T317 268L875 1215Q831 1283 765 1325T603 1368Q533 1368 479 1344T383 1278T314 1183T267
+1068T241 945T232 825V631ZM975 825Q974 896 962 972T922 1121L373 188Q415 142 472 115T605 87Q676 87 730 111T826 177T895 272T941 387T966 510T975 631V825Z" />
+<glyph unicode="&#xd9;" glyph-name="Ugrave" horiz-adv-x="1229" d="M1071 1456L1072 469Q1070 370 1038 281T946 126T803 20T615 -20Q510 -20 426 19T283 125T191 281T157 469L159 1456H272L275 469Q276 394 299 325T365 202T471 116T615 84Q696 84 758 116T863
+201T928 324T953 469L955 1456H1071ZM698 1559H594L376 1825H520L698 1559Z" />
+<glyph unicode="&#xda;" glyph-name="Uacute" horiz-adv-x="1229" d="M1071 1456L1072 469Q1070 370 1038 281T946 126T803 20T615 -20Q510 -20 426 19T283 125T191 281T157 469L159 1456H272L275 469Q276 394 299 325T365 202T471 116T615 84Q696 84 758 116T863
+201T928 324T953 469L955 1456H1071ZM759 1823H904L674 1557H575L759 1823Z" />
+<glyph unicode="&#xdb;" glyph-name="Ucircumflex" horiz-adv-x="1229" d="M1071 1456L1072 469Q1070 370 1038 281T946 126T803 20T615 -20Q510 -20 426 19T283 125T191 281T157 469L159 1456H272L275 469Q276 394 299 325T365 202T471 116T615 84Q696 84 758
+116T863 201T928 324T953 469L955 1456H1071ZM916 1602V1585H805L636 1759L469 1585H361V1605L599 1844H675L916 1602Z" />
+<glyph unicode="&#xdc;" glyph-name="Udieresis" horiz-adv-x="1229" d="M1071 1456L1072 469Q1070 370 1038 281T946 126T803 20T615 -20Q510 -20 426 19T283 125T191 281T157 469L159 1456H272L275 469Q276 394 299 325T365 202T471 116T615 84Q696 84 758 116T863
+201T928 324T953 469L955 1456H1071ZM329 1723Q329 1756 351 1780T411 1804Q449 1804 471 1780T494 1723Q494 1691 472 1668T411 1644Q373 1644 351 1667T329 1723ZM767 1722Q767 1755 789 1779T849 1803Q887 1803 909 1779T932 1722Q932 1690 910 1667T849 1643Q811
+1643 789 1666T767 1722Z" />
+<glyph unicode="&#xdd;" glyph-name="Yacute" horiz-adv-x="1229" d="M600 662L989 1456H1125L658 548L656 0H544L542 548L75 1456H214L600 662ZM724 1822H869L639 1556H540L724 1822Z" />
+<glyph unicode="&#xde;" glyph-name="Thorn" horiz-adv-x="1229" d="M310 1456V1152H634Q738 1152 824 1123T972 1040T1068 910T1103 738Q1103 643 1069 567T972 437T824 354T634 324H310V0H191V1456H310ZM310 1051V425H634Q712 426 776 449T886 513T958 610T984
+736Q984 805 959 863T888 962T778 1027T634 1051H310Z" />
+<glyph unicode="&#xdf;" glyph-name="germandbls" horiz-adv-x="1229" d="M312 0H193V1116Q193 1209 217 1287T286 1423T399 1513T555 1546Q624 1546 685 1525T791 1465T864 1369T891 1239Q891 1171 873 1118T832 1019T790 926T772 826Q772 777 795 738T853 662T928
+591T1002 513T1059 420T1082 301Q1082 224 1056 165T983 64T874 1T741 -21Q702 -21 660 -16T577 -1T501 24T440 59L473 160Q496 144 526 130T589 104T658 87T728 80Q780 80 823 96T898 141T946 210T963 301Q963 355 940 397T883 476T808 548T734 625T676 715T652
+828Q652 886 671 935T713 1031T755 1126T774 1232Q774 1279 757 1318T710 1385T643 1428T565 1444Q500 1445 453 1421T375 1352T329 1248T312 1116V0Z" />
+<glyph unicode="&#xe0;" glyph-name="agrave" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731
+985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM952 1244H848L630 1510H774L952 1244Z" />
+<glyph unicode="&#xe1;" glyph-name="aacute" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731
+985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM757 1508H902L672 1242H573L757 1508Z" />
+<glyph unicode="&#xe2;" glyph-name="acircumflex" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825
+937T731 985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911
+300V537H693Q614 537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM914 1287V1270H803L634 1444L467 1270H359V1290L597 1529H673L914 1287Z" />
+<glyph unicode="&#xe3;" glyph-name="atilde" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731
+985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM960 1503Q958 1462 945 1425T907 1360T848 1316T771 1299Q724 1299 691 1316T627 1354T566 1392T494 1409Q443 1408 415 1374T384 1290L305 1303Q306 1343 319 1380T357 1445T416
+1489T494 1506Q536 1506 569 1489T634 1451T698 1413T771 1396Q796 1396 816 1406T850 1432T871 1471T881 1518L960 1503Z" />
+<glyph unicode="&#xe4;" glyph-name="adieresis" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731
+985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM327 1408Q327 1441 349 1465T409 1489Q447 1489 469 1465T492 1408Q492 1376 470 1353T409 1329Q371 1329 349 1352T327 1408ZM765 1407Q765 1440 787 1464T847 1488Q885 1488 907
+1464T930 1407Q930 1375 908 1352T847 1328Q809 1328 787 1351T765 1407Z" />
+<glyph unicode="&#xe5;" glyph-name="aring" horiz-adv-x="1229" d="M937 0Q926 33 920 73T912 156Q881 118 840 86T751 31T646 -6T529 -20Q453 -20 388 2T274 64T198 160T170 287Q170 380 211 445T322 550T484 610T677 629H911V752Q910 813 887 859T825 937T731
+985T612 1001Q554 1001 502 986T409 943T343 875T316 783L196 784Q199 853 232 911T323 1011T455 1078T617 1102Q703 1102 778 1080T909 1014T997 904T1031 750V226Q1031 172 1040 117T1064 12L1065 0H937ZM542 86Q605 85 662 101T768 145T853 213T911 300V537H693Q614
+537 541 525T412 486T323 412T289 297Q289 248 309 209T363 143T443 101T542 86ZM449 1396Q449 1434 463 1468T502 1527T560 1567T633 1582Q671 1582 704 1568T762 1528T801 1468T816 1396Q816 1357 802 1324T763 1267T705 1228T633 1214Q594 1214 561 1228T503
+1266T464 1324T449 1396ZM525 1396Q525 1373 533 1353T556 1318T590 1294T633 1285Q656 1285 675 1294T709 1318T731 1353T739 1396Q739 1419 731 1440T709 1476T676 1501T633 1510Q610 1510 590 1501T556 1477T533 1440T525 1396Z" />
+<glyph unicode="&#xe6;" glyph-name="ae" horiz-adv-x="1229" d="M894 -20Q784 -19 713 28T601 153Q582 118 555 87T492 32T415 -6T323 -20Q255 -20 204 3T118 68T65 164T47 284Q47 361 70 425T140 535T252 606T406 632H537V802Q537 839 530 874T504 937T452 982T367
+1000Q323 1000 288 986T229 947T193 888T181 812L65 816Q64 881 85 933T147 1023T242 1081T366 1102Q467 1102 529 1062T621 957Q660 1021 722 1061T872 1102Q956 1102 1015 1071T1111 987T1166 866T1184 725L1183 538H653V405Q653 346 665 289T706 185T786 110T912
+81Q951 81 983 90T1042 113T1090 145T1129 181L1166 94Q1148 72 1123 52T1064 15T988 -10T894 -20ZM341 80Q371 80 400 90T456 117T503 156T539 202L538 536H408Q350 535 305 515T228 459T181 378T165 280Q165 240 176 204T208 140T263 96T341 80ZM653 636H1071L1070
+745Q1068 788 1058 833T1025 915T965 976T872 1000Q807 1000 765 968T699 888T664 781T653 670V636Z" />
+<glyph unicode="&#xe7;" glyph-name="ccedilla" horiz-adv-x="1229" d="M632 81Q692 81 746 98T843 149T911 228T942 334H1057Q1053 257 1017 192T923 80T790 7T632 -20Q513 -20 425 25T279 145T191 317T161 520V562Q161 667 190 764T278 937T425 1057T631 1102Q721
+1102 797 1074T930 994T1020 872T1057 717H942Q939 778 914 830T848 920T752 979T631 1000Q537 1000 471 962T362 862T301 722T281 562V520Q281 438 300 359T362 219T470 119T632 81ZM679 252L667 188Q700 183 729 171T782 139T818 89T832 17Q832 -35 808 -72T744
+-133T650 -170T538 -183L531 -105Q564 -105 600 -99T665 -80T713 -43T733 15Q733 47 719 67T681 98T624 115T558 125L586 252H679Z" />
+<glyph unicode="&#xe8;" glyph-name="egrave" horiz-adv-x="1229" d="M655 -20Q542 -20 449 22T290 136T187 304T149 510V553Q150 669 187 769T290 943T443 1059T632 1102Q740 1102 823 1063T962 957T1048 799T1079 606V533H269V510Q270 425 298 348T377 211T500
+117T660 81Q760 80 841 118T979 228L1055 171Q1025 129 984 94T893 33T782 -6T655 -20ZM632 1000Q559 1000 498 973T391 896T315 781T274 635H959V648Q957 715 935 778T871 891T769 970T632 1000ZM943 1244H839L621 1510H765L943 1244Z" />
+<glyph unicode="&#xe9;" glyph-name="eacute" horiz-adv-x="1229" d="M655 -20Q542 -20 449 22T290 136T187 304T149 510V553Q150 669 187 769T290 943T443 1059T632 1102Q740 1102 823 1063T962 957T1048 799T1079 606V533H269V510Q270 425 298 348T377 211T500
+117T660 81Q760 80 841 118T979 228L1055 171Q1025 129 984 94T893 33T782 -6T655 -20ZM632 1000Q559 1000 498 973T391 896T315 781T274 635H959V648Q957 715 935 778T871 891T769 970T632 1000ZM748 1508H893L663 1242H564L748 1508Z" />
+<glyph unicode="&#xea;" glyph-name="ecircumflex" horiz-adv-x="1229" d="M655 -20Q542 -20 449 22T290 136T187 304T149 510V553Q150 669 187 769T290 943T443 1059T632 1102Q740 1102 823 1063T962 957T1048 799T1079 606V533H269V510Q270 425 298 348T377
+211T500 117T660 81Q760 80 841 118T979 228L1055 171Q1025 129 984 94T893 33T782 -6T655 -20ZM632 1000Q559 1000 498 973T391 896T315 781T274 635H959V648Q957 715 935 778T871 891T769 970T632 1000ZM905 1287V1270H794L625 1444L458 1270H350V1290L588 1529H664L905
+1287Z" />
+<glyph unicode="&#xeb;" glyph-name="edieresis" horiz-adv-x="1229" d="M655 -20Q542 -20 449 22T290 136T187 304T149 510V553Q150 669 187 769T290 943T443 1059T632 1102Q740 1102 823 1063T962 957T1048 799T1079 606V533H269V510Q270 425 298 348T377 211T500
+117T660 81Q760 80 841 118T979 228L1055 171Q1025 129 984 94T893 33T782 -6T655 -20ZM632 1000Q559 1000 498 973T391 896T315 781T274 635H959V648Q957 715 935 778T871 891T769 970T632 1000ZM318 1408Q318 1441 340 1465T400 1489Q438 1489 460 1465T483 1408Q483
+1376 461 1353T400 1329Q362 1329 340 1352T318 1408ZM756 1407Q756 1440 778 1464T838 1488Q876 1488 898 1464T921 1407Q921 1375 899 1352T838 1328Q800 1328 778 1351T756 1407Z" />
+<glyph unicode="&#xec;" glyph-name="igrave" horiz-adv-x="1229" d="M221 1082H723V106H1091V0H221V106H603V975H221V1082ZM981 1254H877L659 1520H803L981 1254Z" />
+<glyph unicode="&#xed;" glyph-name="iacute" horiz-adv-x="1229" d="M221 1082H723V106H1091V0H221V106H603V975H221V1082ZM786 1518H931L701 1252H602L786 1518Z" />
+<glyph unicode="&#xee;" glyph-name="icircumflex" horiz-adv-x="1229" d="M221 1082H723V106H1091V0H221V106H603V975H221V1082ZM943 1297V1280H832L663 1454L496 1280H388V1300L626 1539H702L943 1297Z" />
+<glyph unicode="&#xef;" glyph-name="idieresis" horiz-adv-x="1229" d="M221 1082H723V106H1091V0H221V106H603V975H221V1082ZM356 1418Q356 1451 378 1475T438 1499Q476 1499 498 1475T521 1418Q521 1386 499 1363T438 1339Q400 1339 378 1362T356 1418ZM794
+1417Q794 1450 816 1474T876 1498Q914 1498 936 1474T959 1417Q959 1385 937 1362T876 1338Q838 1338 816 1361T794 1417Z" />
+<glyph unicode="&#xf0;" glyph-name="eth" horiz-adv-x="1229" d="M796 1293Q911 1169 974 1003T1038 623V552Q1038 435 1005 332T910 150T757 26T551 -20Q450 -20 365 17T218 121T121 276T86 466Q86 575 120 666T216 824T367 928T565 965Q665 965 753 926T901
+817Q878 949 824 1052T690 1235L439 1096L388 1170L619 1297Q559 1345 489 1381T341 1446L380 1550Q583 1483 729 1358L955 1483L1007 1410L796 1293ZM918 625Q918 640 918 655T916 684Q896 724 861 756T779 812T678 848T565 861Q480 861 414 830T301 746T231 620T207
+466Q207 389 232 320T303 198T413 114T555 83Q646 83 714 122T827 226T895 375T918 552V625Z" />
+<glyph unicode="&#xf1;" glyph-name="ntilde" horiz-adv-x="1229" d="M303 1082L312 879Q369 980 464 1040T680 1102Q858 1102 952 996T1047 693V0H927V691Q926 840 859 919T651 998Q593 998 541 979T445 927T368 849T313 751V0H193V1082H303ZM932 1502Q930 1461
+917 1424T879 1359T820 1315T743 1298Q696 1298 663 1315T599 1353T538 1391T466 1408Q415 1407 387 1373T356 1289L277 1302Q278 1342 291 1379T329 1444T388 1488T466 1505Q508 1505 541 1488T606 1450T670 1412T743 1395Q768 1395 788 1405T822 1431T843 1470T853
+1517L932 1502Z" />
+<glyph unicode="&#xf2;" glyph-name="ograve" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131
+524V557ZM251 524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524ZM933 1243H829L611 1509H755L933 1243Z" />
+<glyph unicode="&#xf3;" glyph-name="oacute" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131
+524V557ZM251 524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524ZM738 1507H883L653 1241H554L738 1507Z" />
+<glyph unicode="&#xf4;" glyph-name="ocircumflex" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131
+524V557ZM251 524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524ZM895 1286V1269H784L615 1443L448 1269H340V1289L578 1528H654L895
+1286Z" />
+<glyph unicode="&#xf5;" glyph-name="otilde" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131
+524V557ZM251 524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524ZM941 1502Q939 1461 926 1424T888 1359T829 1315T752 1298Q705
+1298 672 1315T608 1353T547 1391T475 1408Q424 1407 396 1373T365 1289L286 1302Q287 1342 300 1379T338 1444T397 1488T475 1505Q517 1505 550 1488T615 1450T679 1412T752 1395Q777 1395 797 1405T831 1431T852 1470T862 1517L941 1502Z" />
+<glyph unicode="&#xf6;" glyph-name="odieresis" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q727 1102 816 1058T968 940T1063 766T1097 557V524Q1096 414 1064 315T969 142T818 24T615 -20Q501 -20 412 23T260 141T165 315T131
+524V557ZM251 524Q251 438 275 358T345 217T458 118T615 81Q703 81 770 118T883 216T953 358T977 524V557Q977 642 953 722T883 863T769 962T613 1000Q525 1000 458 963T345 864T275 722T251 557V524ZM565 1407Q565 1440 587 1464T647 1488Q685 1488 707 1464T730
+1407Q730 1375 708 1352T647 1328Q609 1328 587 1351T565 1407ZM1003 1406Q1003 1439 1025 1463T1085 1487Q1123 1487 1145 1463T1168 1406Q1168 1374 1146 1351T1085 1327Q1047 1327 1025 1350T1003 1406Z" />
+<glyph unicode="&#xf7;" glyph-name="divide" horiz-adv-x="1229" d="M1113 644H115V760H1113V644ZM539 1087Q539 1120 561 1144T621 1168Q659 1168 681 1144T704 1087Q704 1055 682 1032T621 1008Q583 1008 561 1031T539 1087ZM538 292Q538 324 560 348T620 372Q658
+372 680 348T703 292Q703 259 681 236T620 212Q582 212 560 235T538 292Z" />
+<glyph unicode="&#xf8;" glyph-name="oslash" horiz-adv-x="1229" d="M131 557Q132 667 165 766T259 939T410 1058T613 1102Q667 1102 715 1092T806 1062L880 1214H973L879 1022Q931 986 971 936T1039 825T1081 697T1097 557V524Q1096 414 1064 315T969 142T818
+24T615 -20Q552 -20 497 -7T394 32L310 -140H217L324 78Q277 115 242 164T182 270T144 391T131 524V557ZM977 557Q977 610 967 662T939 761T893 849T829 921L441 127Q516 81 615 81Q703 81 770 118T883 216T953 358T977 524V557ZM251 524Q251 424 282 334T374 181L759
+967Q727 983 691 991T613 1000Q525 1000 458 963T345 864T275 722T251 557V524Z" />
+<glyph unicode="&#xf9;" glyph-name="ugrave" horiz-adv-x="1229" d="M919 169Q865 80 777 30T567 -20Q477 -20 409 9T294 94T223 231T198 416V1082H317V414Q319 251 383 167T581 83Q716 83 800 145T917 313L916 1082H1037V0H925L919 169ZM916 1235H812L594 1501H738L916
+1235Z" />
+<glyph unicode="&#xfa;" glyph-name="uacute" horiz-adv-x="1229" d="M919 169Q865 80 777 30T567 -20Q477 -20 409 9T294 94T223 231T198 416V1082H317V414Q319 251 383 167T581 83Q716 83 800 145T917 313L916 1082H1037V0H925L919 169ZM721 1499H866L636 1233H537L721
+1499Z" />
+<glyph unicode="&#xfb;" glyph-name="ucircumflex" horiz-adv-x="1229" d="M919 169Q865 80 777 30T567 -20Q477 -20 409 9T294 94T223 231T198 416V1082H317V414Q319 251 383 167T581 83Q716 83 800 145T917 313L916 1082H1037V0H925L919 169ZM878 1278V1261H767L598
+1435L431 1261H323V1281L561 1520H637L878 1278Z" />
+<glyph unicode="&#xfc;" glyph-name="udieresis" horiz-adv-x="1229" d="M919 169Q865 80 777 30T567 -20Q477 -20 409 9T294 94T223 231T198 416V1082H317V414Q319 251 383 167T581 83Q716 83 800 145T917 313L916 1082H1037V0H925L919 169ZM548 1399Q548 1432
+570 1456T630 1480Q668 1480 690 1456T713 1399Q713 1367 691 1344T630 1320Q592 1320 570 1343T548 1399ZM986 1398Q986 1431 1008 1455T1068 1479Q1106 1479 1128 1455T1151 1398Q1151 1366 1129 1343T1068 1319Q1030 1319 1008 1342T986 1398Z" />
+<glyph unicode="&#xfd;" glyph-name="yacute" horiz-adv-x="1229" d="M600 220L622 161L1013 1082H1149L590 -173Q568 -222 538 -269T467 -353T373 -414T250 -437Q226 -437 197 -433T148 -423L162 -323Q179 -325 204 -327T247 -330Q293 -330 331 -312T398 -266T450
+-202T488 -132L562 28L76 1082H211L600 220ZM754 1499H899L669 1233H570L754 1499Z" />
+<glyph unicode="&#xfe;" glyph-name="thorn" horiz-adv-x="1229" d="M1068 530Q1068 425 1044 326T968 150T840 27T655 -20Q550 -20 463 20T316 137V-416H196V1546H316V933Q374 1014 460 1058T653 1102Q761 1102 839 1057T968 935T1043 759T1068 551V530ZM948
+551Q948 632 931 712T875 855T776 958T626 998Q572 998 525 983T438 942T368 878T316 796V265Q360 180 441 131T628 81Q716 81 777 121T876 224T931 368T948 530V551Z" />
+<glyph unicode="&#xff;" glyph-name="ydieresis" horiz-adv-x="1229" d="M600 220L622 161L1013 1082H1149L590 -173Q568 -222 538 -269T467 -353T373 -414T250 -437Q226 -437 197 -433T148 -423L162 -323Q179 -325 204 -327T247 -330Q293 -330 331 -312T398 -266T450
+-202T488 -132L562 28L76 1082H211L600 220ZM324 1399Q324 1432 346 1456T406 1480Q444 1480 466 1456T489 1399Q489 1367 467 1344T406 1320Q368 1320 346 1343T324 1399ZM762 1398Q762 1431 784 1455T844 1479Q882 1479 904 1455T927 1398Q927 1366 905 1343T844
+1319Q806 1319 784 1342T762 1398Z" />
+<glyph unicode="&#x2013;" glyph-name="endash" horiz-adv-x="1229" d="M1162 686H77V788H1162V686Z" />
+<glyph unicode="&#x2014;" glyph-name="emdash" horiz-adv-x="1229" d="M1170 686H85V788H1170V686Z" />
+<glyph unicode="&#x2018;" glyph-name="quoteleft" horiz-adv-x="1229" d="M527 1229Q527 1272 536 1317T562 1404T606 1486T666 1559L735 1508Q690 1444 668 1377T644 1232V1105H527V1229Z" />
+<glyph unicode="&#x2019;" glyph-name="quoteright" horiz-adv-x="1229" d="M702 1422Q702 1378 693 1334T666 1247T622 1165T562 1092L493 1144Q538 1207 560 1274T584 1420V1546H702V1422Z" />
+<glyph unicode="&#x201a;" glyph-name="quotesinglbase" horiz-adv-x="1229" d="M687 88Q687 45 678 1T651 -86T607 -168T547 -241L476 -189Q521 -126 544 -59T567 86V226H687V88Z" />
+<glyph unicode="&#x201c;" glyph-name="quotedblleft" horiz-adv-x="1229" d="M365 1229Q365 1272 374 1317T400 1404T444 1486T504 1559L573 1508Q528 1444 506 1377T482 1232V1105H365V1229ZM675 1229Q675 1272 684 1317T710 1404T754 1486T814 1559L883 1508Q838
+1444 816 1377T792 1232V1105H675V1229Z" />
+<glyph unicode="&#x201d;" glyph-name="quotedblright" horiz-adv-x="1229" d="M831 1422Q831 1378 822 1334T795 1247T751 1165T691 1092L622 1144Q667 1207 689 1274T713 1420V1546H831V1422ZM885 1422Q885 1378 876 1334T849 1247T805 1165T745 1092L676 1144Q721
+1207 743 1274T767 1420V1546H885V1422Z" />
+<glyph unicode="&#x201e;" glyph-name="quotedblbase" horiz-adv-x="1229" d="M554 87Q554 44 545 0T519 -87T475 -169T415 -242L344 -190Q389 -126 411 -60T435 85V225H554V87ZM803 87Q803 44 794 0T768 -87T724 -169T664 -242L593 -190Q638 -126 660 -60T684
+85V225H803V87Z" />
+<glyph unicode="&#x2022;" glyph-name="bullet" horiz-adv-x="1229" d="M428 760Q429 799 442 832T480 889T538 926T613 939Q654 939 688 926T746 889T784 832T799 760V717Q798 678 785 646T747 590T689 553T614 540Q573 540 539 553T481 589T443 645T428 717V760Z" />
+<glyph unicode="&#x2039;" glyph-name="guilsinglleft" horiz-adv-x="1229" d="M538 555L814 167H709L414 546V565L709 944H814L538 555Z" />
+<glyph unicode="&#x203a;" glyph-name="guilsinglright" horiz-adv-x="1229" d="M520 944L814 560V541L520 162H414L690 550L414 944H520Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_Mono_300.ttf b/site/assets/fonts/Roboto_Mono_300.ttf
new file mode 100644
index 00000000..6c101fe4
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_300.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_300.woff b/site/assets/fonts/Roboto_Mono_300.woff
new file mode 100644
index 00000000..68d8403e
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_300.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_300.woff2 b/site/assets/fonts/Roboto_Mono_300.woff2
new file mode 100644
index 00000000..ab64df38
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_300.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_400.eot b/site/assets/fonts/Roboto_Mono_400.eot
new file mode 100644
index 00000000..550df22a
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_400.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_400.svg b/site/assets/fonts/Roboto_Mono_400.svg
new file mode 100644
index 00000000..f384d9ce
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_400.svg
@@ -0,0 +1,390 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="RobotoMono" horiz-adv-x="1230" ><font-face
+ font-family="Roboto Mono"
+ units-per-em="2048"
+ panose-1="0 0 0 0 0 0 0 0 0 0"
+ ascent="2146"
+ descent="-555"
+ alphabetic="0" />
+<glyph unicode=" " glyph-name="space" horiz-adv-x="1229" />
+<glyph unicode="!" glyph-name="exclam" horiz-adv-x="1229" d="M690 471H504V1456H690V471ZM486 97Q486 145 514 177T600 209Q657 209 686 177T716 97Q716 51 687 20T600 -11Q543 -11 515 20T486 97Z" />
+<glyph unicode="&quot;" glyph-name="quotedbl" horiz-adv-x="1229" d="M505 1427L484 1057H354L355 1411V1536H505V1427ZM862 1427L841 1057H712L713 1411V1536H863L862 1427Z" />
+<glyph unicode="#" glyph-name="numbersign" horiz-adv-x="1229" d="M707 410H443L363 0H220L300 410H61V547H326L395 901H137V1040H422L504 1456H647L565 1040H829L911 1456H1054L972 1040H1177V901H945L876 547H1102V410H850L770 0H627L707 410ZM469 547H733L802
+901H538L469 547Z" />
+<glyph unicode="$" glyph-name="dollar" horiz-adv-x="1229" d="M907 375Q908 466 841 531T626 644Q533 673 458 711T330 801T247 922T218 1079Q218 1163 243 1231T315 1349T428 1431T576 1472V1692H725V1472Q893 1449 986 1331T1080 1008H896Q896 1075 880 1131T832
+1228T754 1292T649 1315Q528 1315 466 1252T403 1082Q403 1032 419 993T469 922T558 863T690 809Q784 778 859 740T986 651T1065 533T1093 377Q1093 290 1066 221T988 103T867 23T708 -16V-208H559V-17Q481 -9 410 19T283 100T195 235T162 429H347Q347 345 372
+290T436 201T526 154T627 140Q692 140 744 156T832 202T887 276T907 375Z" />
+<glyph unicode="%" glyph-name="percent" horiz-adv-x="1229" d="M44 1194Q44 1251 61 1302T112 1393T196 1454T311 1477Q377 1477 427 1455T511 1393T562 1303T580 1194V1117Q580 1060 563 1009T512 920T428 859T313 836Q248 836 198 858T113 919T62 1009T44
+1117V1194ZM182 1117Q182 1086 189 1058T212 1007T252 971T313 957Q348 957 372 970T412 1006T434 1057T442 1117V1194Q442 1225 435 1254T412 1306T372 1342T311 1356Q276 1356 252 1343T212 1307T189 1255T182 1194V1117ZM645 338Q645 395 662 446T714 536T798
+597T913 620Q979 620 1029 598T1113 536T1164 446T1182 338V260Q1182 203 1165 152T1113 63T1030 2T915 -21Q849 -21 799 1T714 62T663 152T645 260V338ZM783 260Q783 229 790 200T813 149T854 113T915 99Q950 99 974 112T1014 148T1036 200T1043 260V338Q1043
+369 1036 398T1014 449T973 485T913 499Q877 499 853 486T813 450T790 398T783 338V260ZM398 188L287 250L854 1287L965 1225L398 188Z" />
+<glyph unicode="&amp;" glyph-name="ampersand" horiz-adv-x="1229" d="M107 373Q107 440 127 497T183 603T269 697T380 785Q313 876 274 957T234 1126Q234 1211 259 1276T332 1386T445 1454T593 1477Q664 1477 722 1452T820 1385T883 1286T905 1168Q905 1115
+890 1072T847 990T781 918T699 851L605 771L912 368Q947 432 967 508T988 672H1155Q1155 544 1119 435T1012 237L1193 0H972L889 110Q815 47 726 14T539 -20Q440 -20 360 9T224 90T138 214T107 373ZM539 130Q610 130 676 156T799 230L476 661L448 637Q398 596 367
+557T320 484T298 421T292 373Q292 322 308 278T355 201T432 149T539 130ZM420 1128Q420 1072 445 1014T515 890L632 983Q663 1004 683 1027T714 1073T729 1121T734 1168Q734 1198 725 1226T697 1276T652 1311T593 1325Q550 1325 518 1309T464 1266T431 1204T420
+1128Z" />
+<glyph unicode="&apos;" glyph-name="quotesingle" horiz-adv-x="1229" d="M653 1425L632 1057H494L495 1409V1536H653V1425Z" />
+<glyph unicode="(" glyph-name="parenleft" horiz-adv-x="1229" d="M357 591Q357 734 379 860T440 1093T527 1290T631 1448T741 1566T846 1643L885 1521Q823 1474 762 1393T652 1197T573 931T542 593V579Q542 392 572 241T651 -26T761 -224T885 -357L846 -470Q796
+-442 741 -394T631 -276T527 -118T440 78T380 312T357 581V591Z" />
+<glyph unicode=")" glyph-name="parenright" horiz-adv-x="1229" d="M849 581Q849 438 827 312T766 79T679 -117T575 -275T465 -393T359 -470L320 -357Q382 -311 443 -229T553 -29T632 240T663 579V593Q663 780 631 932T549 1202T438 1402T320 1530L359 1643Q409
+1615 464 1566T574 1448T678 1290T766 1094T826 860T849 591V581Z" />
+<glyph unicode="*" glyph-name="asterisk" horiz-adv-x="1229" d="M537 683L160 779L214 954L579 803L554 1224H732L703 797L1066 947L1120 769L738 675L994 351L848 241L634 604L422 250L276 356L537 683Z" />
+<glyph unicode="+" glyph-name="plus" horiz-adv-x="1229" d="M710 781H1117V597H710V146H525V597H119V781H525V1206H710V781Z" />
+<glyph unicode="," glyph-name="comma" horiz-adv-x="1229" d="M643 43Q643 -4 632 -55T598 -157T543 -254T469 -336L354 -273Q402 -203 422 -124T442 40V219H643V43Z" />
+<glyph unicode="-" glyph-name="hyphen" d="M983 561H218V713H983V561Z" />
+<glyph unicode="." glyph-name="period" horiz-adv-x="1229" d="M496 120Q496 150 505 176T533 221T578 252T641 263Q677 263 704 252T750 222T778 176T788 120Q788 91 779 66T751 21T705 -8T641 -19Q605 -19 578 -9T533 21T506 65T496 120Z" />
+<glyph unicode="/" glyph-name="slash" horiz-adv-x="1229" d="M418 -125H252L860 1456H1025L418 -125Z" />
+<glyph unicode="0" glyph-name="zero" horiz-adv-x="1229" d="M1088 557Q1088 418 1056 311T964 130T817 18T618 -20Q507 -20 420 18T271 129T178 310T145 557V898Q145 1037 177 1144T269 1326T417 1438T616 1477Q728 1477 815 1439T963 1326T1056 1145T1088 898V557ZM331
+594L898 1029Q882 1174 814 1249T616 1325Q470 1325 401 1227T331 939V594ZM902 852L336 419Q353 278 423 204T618 130Q765 130 833 230T902 518V852Z" />
+<glyph unicode="1" glyph-name="one" horiz-adv-x="1229" d="M774 0H589V1220L208 1075V1244L759 1456H774V0Z" />
+<glyph unicode="2" glyph-name="two" horiz-adv-x="1229" d="M1067 0H113V133L590 663Q654 734 697 789T766 890T803 978T814 1064Q814 1118 797 1165T746 1248T665 1303T557 1324Q484 1324 431 1303T342 1244T289 1150T271 1027H85Q85 1119 117 1200T209 1343T357
+1440T557 1476Q660 1476 742 1446T882 1361T969 1233T1000 1072Q1000 1007 978 944T919 818T832 696T727 575L336 151H1067V0Z" />
+<glyph unicode="3" glyph-name="three" horiz-adv-x="1229" d="M390 817H522Q592 817 645 836T735 888T791 967T810 1068Q810 1195 745 1259T555 1324Q496 1324 448 1307T365 1256T311 1177T292 1074H106Q106 1157 138 1230T230 1358T371 1444T555 1476Q653 1476
+734 1450T873 1371T963 1242T995 1064Q995 1022 982 977T942 890T873 810T771 747Q842 724 889 687T965 604T1005 507T1017 406Q1017 304 982 225T885 92T739 9T556 -20Q460 -20 376 7T229 86T131 213T94 384H279Q279 327 298 280T354 200T441 149T556 131Q619
+131 670 147T757 198T812 282T832 402Q832 470 810 519T747 602T649 651T522 667H390V817Z" />
+<glyph unicode="4" glyph-name="four" horiz-adv-x="1229" d="M924 489H1127V338H924V0H739V338H75V447L727 1456H924V489ZM280 489H739V1210L709 1154L280 489Z" />
+<glyph unicode="5" glyph-name="five" horiz-adv-x="1229" d="M240 730L313 1456H1060V1276H469L428 888Q471 913 531 932T670 951Q771 951 851 916T987 818T1073 664T1103 464Q1103 360 1075 271T991 118T848 17T646 -20Q557 -20 478 5T338 80T237 206T187 383H363Q380
+259 454 195T646 131Q712 131 763 154T848 221T900 326T918 462Q918 530 899 589T842 691T751 760T627 785Q581 785 548 779T487 760T437 731T388 692L240 730Z" />
+<glyph unicode="6" glyph-name="six" horiz-adv-x="1229" d="M856 1457V1291H840Q748 1291 676 1272T548 1220T454 1142T388 1042T348 929T330 809Q356 840 390 866T463 912T548 943T644 955Q753 955 832 914T962 804T1037 650T1061 475Q1061 373 1033 283T947
+126T806 19T610 -20Q531 -20 466 3T348 65T257 159T193 275T154 404T141 538V625Q141 725 153 825T196 1018T277 1190T405 1330T590 1423T840 1457H856ZM614 801Q567 801 523 786T441 744T374 679T328 595V533Q328 437 351 363T413 237T503 160T610 133Q675 133
+725 158T809 227T861 332T879 466Q879 530 863 590T814 698T731 773T614 801Z" />
+<glyph unicode="7" glyph-name="seven" horiz-adv-x="1229" d="M1096 1352L493 0H299L900 1294H112V1456H1096V1352Z" />
+<glyph unicode="8" glyph-name="eight" horiz-adv-x="1229" d="M1070 1076Q1069 967 1011 882T856 749Q912 725 957 690T1035 608T1085 508T1103 393Q1103 294 1067 217T968 88T821 8T641 -20Q543 -20 459 7T312 86T213 216T177 393Q177 454 194 508T243 608T320
+690T421 750Q373 774 334 807T268 882T226 973T211 1076Q211 1171 243 1245T331 1371T466 1449T638 1476Q730 1476 809 1449T946 1371T1037 1246T1070 1076ZM918 397Q918 458 898 509T840 596T752 653T639 673Q576 673 525 653T438 597T383 509T363 397Q363 334
+382 285T438 201T525 149T641 131Q702 131 753 149T840 201T897 284T918 397ZM884 1073Q884 1128 866 1174T815 1253T737 1305T638 1324Q584 1324 540 1307T464 1256T415 1177T397 1073Q397 1016 414 971T464 893T541 843T640 825Q693 825 738 842T815 892T866
+971T884 1073Z" />
+<glyph unicode="9" glyph-name="nine" horiz-adv-x="1229" d="M373 164Q472 164 547 181T677 231T770 306T830 402T865 513T879 633Q855 603 824 577T754 530T668 499T564 487Q456 487 378 529T248 641T173 797T149 972Q149 1075 177 1166T262 1326T403 1435T600
+1476Q684 1476 751 1453T870 1388T959 1290T1019 1168T1054 1031T1065 887V820Q1065 727 1056 630T1021 441T946 270T821 129T634 34T373 -1H354V164H373ZM594 640Q641 640 685 655T767 699T834 766T881 852V912Q881 1009 859 1085T798 1213T709 1294T601 1322Q536
+1322 486 1296T402 1225T349 1117T331 982Q331 918 347 857T395 747T477 670T594 640Z" />
+<glyph unicode=":" glyph-name="colon" horiz-adv-x="1229" d="M546 120Q546 150 555 176T583 221T628 252T691 263Q727 263 754 252T800 222T828 176T838 120Q838 91 829 66T801 21T755 -8T691 -19Q655 -19 628 -9T583 21T556 65T546 120ZM546 996Q546 1026 555
+1052T583 1097T628 1128T691 1139Q727 1139 754 1128T800 1098T828 1052T838 996Q838 967 829 942T801 897T755 868T691 857Q655 857 628 867T583 897T556 941T546 996Z" />
+<glyph unicode=";" glyph-name="semicolon" horiz-adv-x="1229" d="M537 996Q537 1026 546 1052T574 1097T619 1128T682 1139Q718 1139 745 1128T791 1098T819 1052T829 996Q829 967 820 942T792 897T746 868T682 857Q646 857 619 867T574 897T547 941T537 996ZM775
+43Q775 -4 764 -55T730 -157T675 -254T601 -336L486 -273Q534 -203 554 -124T574 40V219H775V43Z" />
+<glyph unicode="&lt;" glyph-name="less" horiz-adv-x="1229" d="M406 663L353 646L406 628L1018 392V196L170 575V721L1018 1099V903L406 663Z" />
+<glyph unicode="=" glyph-name="equal" horiz-adv-x="1229" d="M1066 780H173V941H1066V780ZM1066 365H173V525H1066V365Z" />
+<glyph unicode="&gt;" glyph-name="greater" horiz-adv-x="1229" d="M178 909V1100L1061 722V576L178 197V389L825 633L885 650L825 669L178 909Z" />
+<glyph unicode="?" glyph-name="question" horiz-adv-x="1229" d="M511 410Q512 482 517 528T538 611T584 681T664 761Q698 796 733 833T798 910T847 996T866 1091Q866 1200 803 1257T623 1315Q576 1315 532 1304T453 1268T397 1206T376 1116H191Q192 1200 226
+1267T317 1380T453 1451T623 1476Q724 1476 803 1451T938 1376T1022 1257T1051 1097Q1051 1025 1026 962T962 842T875 736T779 645Q749 616 733 590T708 537T698 480T696 410H511ZM490 97Q490 145 518 177T604 209Q661 209 690 177T720 97Q720 51 691 20T604 -11Q547
+-11 519 20T490 97Z" />
+<glyph unicode="@" glyph-name="at" horiz-adv-x="1229" d="M1159 789Q1156 709 1140 626T1092 474T1008 363T883 320Q822 320 782 352T724 441Q689 385 638 353T524 320Q475 320 439 346T381 419T350 531T347 674Q356 785 384 874T456 1026T560 1122T690 1156Q757
+1156 801 1130T871 1067L827 563Q824 522 829 497T843 457T867 437T897 432Q934 432 962 460T1010 537T1039 651T1050 789Q1054 916 1029 1017T953 1188T826 1295T655 1332Q554 1332 471 1285T326 1152T230 949T191 691Q186 555 210 449T287 270T422 158T617 119Q647
+119 678 123T739 136T796 157T845 184L877 67Q852 48 820 34T753 11T682 -3T614 -8Q467 -8 362 43T190 188T94 409T69 691Q73 794 94 891T151 1074T239 1229T355 1351T495 1430T658 1458Q780 1458 875 1411T1034 1275T1131 1064T1159 789ZM470 674Q465 572 487
+515T557 458Q606 458 646 485T716 575L755 1020Q727 1033 697 1033Q644 1033 605 1008T538 936T495 823T470 674Z" />
+<glyph unicode="A" glyph-name="A" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538Z" />
+<glyph unicode="B" glyph-name="B" horiz-adv-x="1229" d="M172 0V1456H605Q695 1455 781 1433T934 1363T1041 1243T1080 1068Q1079 1011 1062 964T1014 880T942 814T855 766Q913 749 961 718T1045 642T1099 541T1119 420Q1120 318 1080 240T973 110T819 29T638
+0H172ZM358 681V157H643Q701 158 753 176T845 228T908 309T932 418Q933 480 912 528T852 609T763 661T653 681H358ZM358 835H616Q668 836 718 851T807 896T870 969T894 1069Q894 1130 871 1173T808 1243T717 1284T611 1298H358V835Z" />
+<glyph unicode="C" glyph-name="C" horiz-adv-x="1229" d="M1117 438Q1104 337 1065 253T964 108T816 14T625 -20Q535 -20 462 5T331 76T232 182T163 315T122 466T107 626V829Q108 910 121 989T162 1140T231 1273T330 1380T461 1450T625 1476Q734 1476 820 1443T968
+1348T1067 1200T1117 1010H932Q923 1076 901 1133T842 1233T752 1300T625 1325Q559 1325 509 1304T422 1245T361 1159T321 1055T300 943T293 831V626Q293 572 299 515T321 402T360 297T421 211T508 152T625 130Q698 130 751 153T842 217T901 315T932 438H1117Z"
+/>
+<glyph unicode="D" glyph-name="D" horiz-adv-x="1229" d="M155 0V1456H492Q644 1454 763 1405T965 1267T1091 1055T1136 781V674Q1135 524 1092 401T966 189T764 51T492 0H155ZM343 1304V151H492Q610 152 696 192T838 303T922 468T951 674V783Q950 894 922 988T838
+1152T696 1262T492 1304H343Z" />
+<glyph unicode="E" glyph-name="E" horiz-adv-x="1229" d="M975 673H367V157H1076V0H182V1456H1067V1298H367V830H975V673Z" />
+<glyph unicode="F" glyph-name="F" horiz-adv-x="1229" d="M984 643H378V0H191V1456H1085V1298H378V800H984V643Z" />
+<glyph unicode="G" glyph-name="G" horiz-adv-x="1229" d="M1116 191Q1024 83 905 31T644 -20Q554 -19 479 7T343 80T238 190T163 326T117 480T100 643V812Q101 893 114 973T156 1128T226 1265T326 1375T459 1449T625 1476Q727 1476 813 1445T963 1355T1066 1214T1114
+1029H931Q920 1092 897 1145T837 1237T747 1296T626 1318Q560 1318 509 1296T420 1235T357 1147T316 1040T293 926T286 814V643Q287 588 295 530T322 415T368 309T435 221T527 161T645 137Q683 136 724 140T805 156T878 188T935 242L937 569H641V725H1113L1116
+191Z" />
+<glyph unicode="H" glyph-name="H" horiz-adv-x="1229" d="M1087 0H912V673H315V0H141V1456H315V830H912V1456H1087V0Z" />
+<glyph unicode="I" glyph-name="I" horiz-adv-x="1229" d="M174 1456H1054V1295H705V160H1054V0H174V160H515V1295H174V1456Z" />
+<glyph unicode="J" glyph-name="J" horiz-adv-x="1229" d="M857 1456H1046V443Q1044 342 1008 257T908 111T760 15T573 -20Q471 -20 388 11T243 99T144 238T98 421H286Q289 360 310 308T369 218T457 158T573 137Q639 137 691 161T779 228T835 325T857 443V1456Z" />
+<glyph unicode="K" glyph-name="K" horiz-adv-x="1229" d="M523 676L361 492V0H172V1456H361V745L502 921L929 1456H1154L645 819L1188 0H963L523 676Z" />
+<glyph unicode="L" glyph-name="L" horiz-adv-x="1229" d="M383 157H1095V0H198V1456H383V157Z" />
+<glyph unicode="M" glyph-name="M" horiz-adv-x="1229" d="M377 1456L614 728L870 1456H1100V0H920V581L935 1189L666 405H560L313 1168L328 581V0H148V1456H377Z" />
+<glyph unicode="N" glyph-name="N" horiz-adv-x="1229" d="M1086 0H898L333 1088L330 0H143V1456H331L896 370L899 1456H1086V0Z" />
+<glyph unicode="O" glyph-name="O" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450 1448T613 1476Q703
+1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356 317T416 228T501
+166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812Z" />
+<glyph unicode="P" glyph-name="P" horiz-adv-x="1229" d="M376 584V0H191V1456H663Q761 1454 848 1425T1002 1341T1106 1205T1145 1019Q1145 914 1107 834T1002 698T849 614T663 584H376ZM376 736H663Q727 737 781 756T875 812T937 899T960 1017Q960 1083 938
+1136T876 1226T782 1283T663 1304H376V736Z" />
+<glyph unicode="Q" glyph-name="Q" horiz-adv-x="1229" d="M1134 663Q1133 582 1120 500T1081 342T1013 200T913 84L1164 -125L1037 -246L749 -2Q687 -20 615 -20Q522 -20 448 8T317 86T217 201T148 344T108 501T94 663V791Q95 871 108 953T148 1111T216 1253T314
+1369T446 1447T614 1476Q707 1476 781 1448T913 1370T1012 1255T1080 1112T1120 954T1134 791V663ZM950 793Q949 848 944 907T924 1024T885 1134T823 1225T735 1288T614 1311Q546 1311 495 1288T407 1225T346 1133T306 1024T285 907T278 793V663Q278 609 284 550T306
+432T345 322T407 230T495 167T615 143Q684 143 735 166T824 229T885 321T923 431T943 548T950 663V793Z" />
+<glyph unicode="R" glyph-name="R" horiz-adv-x="1229" d="M656 594H365V0H181V1456H608Q710 1454 800 1427T957 1346T1063 1211T1102 1019Q1102 948 1082 889T1025 782T939 697T829 634L1138 12L1137 0H942L656 594ZM365 746H613Q675 747 730 765T826 819T892
+904T916 1021Q916 1091 893 1143T829 1231T732 1285T608 1304H365V746Z" />
+<glyph unicode="S" glyph-name="S" horiz-adv-x="1229" d="M936 368Q936 435 906 481T827 558T722 611T611 649Q534 674 455 709T310 795T203 915T161 1079Q161 1173 202 1247T311 1372T463 1449T634 1476Q729 1476 817 1445T973 1356T1082 1218T1125 1037H935Q927
+1100 904 1151T843 1240T753 1297T634 1318Q581 1318 531 1303T441 1258T378 1184T354 1082Q355 1019 386 976T464 902T567 852T672 817Q726 800 781 779T887 729T982 666T1059 587T1110 489T1129 370Q1129 272 1086 199T973 78T817 5T643 -20Q546 -20 453 10T287
+96T167 234T118 420H307Q316 352 344 300T416 211T517 156T643 137Q697 137 749 150T843 192T910 264T936 368Z" />
+<glyph unicode="T" glyph-name="T" horiz-adv-x="1229" d="M1156 1298H706V0H526V1298H76V1456H1156V1298Z" />
+<glyph unicode="U" glyph-name="U" horiz-adv-x="1229" d="M1088 1456L1090 470Q1088 368 1053 279T955 124T805 19T614 -20Q508 -20 421 18T272 122T175 278T139 470L141 1456H317L321 470Q322 405 341 345T397 239T488 165T614 137Q685 137 739 164T829 238T885
+345T906 470L909 1456H1088Z" />
+<glyph unicode="V" glyph-name="V" horiz-adv-x="1229" d="M610 298L954 1456H1151L692 0H531L71 1456H269L610 298Z" />
+<glyph unicode="W" glyph-name="W" horiz-adv-x="1229" d="M896 394L1007 1456H1182L1005 0H816L629 1097L440 0H250L73 1456H249L360 394L547 1456H708L896 394Z" />
+<glyph unicode="X" glyph-name="X" horiz-adv-x="1229" d="M625 885L939 1456H1157L734 734L1167 0H951L629 582L306 0H87L521 734L98 1456H315L625 885Z" />
+<glyph unicode="Y" glyph-name="Y" horiz-adv-x="1229" d="M603 725L935 1456H1145L692 543L689 0H517L514 543L61 1456H272L603 725Z" />
+<glyph unicode="Z" glyph-name="Z" horiz-adv-x="1229" d="M325 157H1079V0H116L114 144L839 1298H127V1456H1050L1052 1315L325 157Z" />
+<glyph unicode="[" glyph-name="bracketleft" horiz-adv-x="1229" d="M822 1512H601V-160H822V-312H426V1664H822V1512Z" />
+<glyph unicode="\" glyph-name="backslash" horiz-adv-x="1229" d="M231 1456H398L1006 -125H839L231 1456Z" />
+<glyph unicode="]" glyph-name="bracketright" horiz-adv-x="1229" d="M405 1664H802V-312H405V-160H627V1512H405V1664Z" />
+<glyph unicode="^" glyph-name="asciicircum" horiz-adv-x="1229" d="M403 677H231L551 1456H678L997 677H826L628 1163L613 1231L598 1163L403 677Z" />
+<glyph unicode="_" glyph-name="underscore" horiz-adv-x="1229" d="M1072 -151H155V0H1072V-151Z" />
+<glyph unicode="`" glyph-name="grave" horiz-adv-x="1229" d="M813 1215H663L415 1481H638L813 1215Z" />
+<glyph unicode="a" glyph-name="a" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706 938T599
+953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521 525 431
+477T341 326Q341 286 354 252T395 194T463 155T558 141Z" />
+<glyph unicode="b" glyph-name="b" horiz-adv-x="1229" d="M1091 529Q1091 413 1063 313T981 138T848 22T665 -20Q561 -20 485 17T354 123L345 0H175V1536H360V966Q413 1032 488 1067T663 1102Q767 1102 847 1061T981 947T1063 773T1091 550V529ZM906 550Q906
+629 891 700T841 826T752 913T617 945Q569 945 529 933T458 898T402 845T360 778V305Q378 269 402 239T459 185T531 150T619 137Q695 137 749 168T838 254T889 379T906 529V550Z" />
+<glyph unicode="c" glyph-name="c" horiz-adv-x="1229" d="M635 130Q685 130 732 146T817 190T876 257T900 341H1075Q1074 269 1038 204T940 89T800 10T635 -20Q512 -20 420 24T267 142T174 314T143 520V562Q143 669 174 767T266 939T420 1058T635 1102Q731 1102
+811 1072T950 989T1042 864T1075 710H900Q899 759 879 802T823 879T740 931T635 950Q549 950 491 916T396 826T345 701T329 562V520Q329 449 344 380T395 255T489 165T635 130Z" />
+<glyph unicode="d" glyph-name="d" horiz-adv-x="1229" d="M139 550Q139 671 169 772T256 947T393 1061T572 1102Q669 1102 741 1070T867 975V1536H1052V0H882L874 114Q820 48 745 14T570 -20Q472 -20 393 22T257 138T170 312T139 529V550ZM324 529Q324 450 341
+379T393 254T484 169T616 137Q663 137 701 148T770 179T825 228T867 292V794Q831 860 770 902T618 945Q540 945 485 913T394 827T341 701T324 550V529Z" />
+<glyph unicode="e" glyph-name="e" horiz-adv-x="1229" d="M652 -20Q538 -20 443 19T279 128T173 294T135 503V545Q135 676 177 779T288 954T446 1064T628 1102Q745 1102 832 1062T978 950T1064 781T1093 571V488H320Q323 414 348 349T418 236T525 159T662 131Q761
+131 838 171T966 277L1079 189Q1052 148 1012 111T918 44T798 -2T652 -20ZM628 950Q572 950 522 930T432 870T365 773T326 640H908V654Q905 708 888 761T836 856T751 924T628 950Z" />
+<glyph unicode="f" glyph-name="f" d="M450 0V939H152V1082H450V1158Q450 1262 480 1340T566 1472T700 1552T874 1579Q940 1579 1002 1568T1131 1540L1109 1387Q1068 1401 1014 1411T897 1422Q767 1421 702 1356T636 1158V1082H1053V939H636V0H450Z" />
+<glyph unicode="g" glyph-name="g" horiz-adv-x="1229" d="M140 550Q140 671 169 772T255 947T391 1061T572 1102Q672 1102 747 1067T876 964L885 1082H1053V23Q1053 -84 1020 -167T926 -308T781 -396T595 -426Q553 -426 499 -417T390 -386T283 -328T194 -239L290
+-128Q324 -169 360 -197T433 -243T507 -267T581 -275Q647 -275 700 -256T790 -201T848 -110T868 14V107Q814 45 741 13T570 -20Q471 -20 391 22T256 138T170 312T140 529V550ZM325 529Q325 450 341 379T393 254T484 169T616 137Q664 137 702 148T771 181T826 231T868
+295V791Q850 825 826 853T771 901T702 933T618 945Q540 945 485 913T394 827T342 701T325 550V529Z" />
+<glyph unicode="h" glyph-name="h" horiz-adv-x="1229" d="M359 921Q417 1006 502 1053T693 1102Q778 1102 847 1078T966 1001T1041 870T1068 681V0H883V683Q883 816 820 881T638 945Q549 945 477 902T359 786V0H174V1536H359V921Z" />
+<glyph unicode="i" glyph-name="i" horiz-adv-x="1229" d="M203 1082H756V160H1109V0H203V160H571V921H203V1082ZM547 1366Q547 1412 574 1443T658 1475Q713 1475 741 1444T769 1366Q769 1321 741 1291T658 1261Q602 1261 575 1291T547 1366Z" />
+<glyph unicode="j" glyph-name="j" horiz-adv-x="1229" d="M299 1082H845V-7Q845 -111 817 -191T734 -325T601 -408T422 -437Q365 -437 316 -434T211 -420L224 -268Q241 -271 267 -273T319 -277T370 -279T409 -280Q461 -280 507 -267T587 -221T640 -137T660 -7V921H299V1082ZM634
+1367Q634 1412 661 1443T744 1475Q800 1475 828 1444T856 1367Q856 1322 828 1292T744 1261Q688 1261 661 1291T634 1367Z" />
+<glyph unicode="k" glyph-name="k" horiz-adv-x="1229" d="M498 505L362 374V0H176V1536H362V596L483 726L838 1082H1063L625 631L1130 0H895L498 505Z" />
+<glyph unicode="l" glyph-name="l" horiz-adv-x="1229" d="M203 1536H756V160H1109V0H203V160H571V1375H203V1536Z" />
+<glyph unicode="m" glyph-name="m" horiz-adv-x="1229" d="M259 1082L264 980Q297 1037 350 1069T480 1102Q629 1102 679 986Q711 1038 763 1069T891 1102Q1010 1102 1074 1029T1138 808V0H962V810Q962 946 837 945Q804 945 780 936T740 912T715 876T703 833V0H527V811Q527
+876 498 911T406 945Q348 945 316 922T269 861V0H93V1082H259Z" />
+<glyph unicode="n" glyph-name="n" horiz-adv-x="1229" d="M340 1082L353 922Q412 1006 498 1053T690 1102Q775 1102 844 1078T963 1003T1038 874T1065 687V0H880V683Q880 755 864 805T817 886T740 931T635 945Q542 945 472 899T359 776V0H174V1082H340Z" />
+<glyph unicode="o" glyph-name="o" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122 529V551ZM307
+529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529Z" />
+<glyph unicode="p" glyph-name="p" horiz-adv-x="1229" d="M1087 529Q1087 413 1059 313T977 138T844 22T662 -20Q563 -20 488 12T358 104V-416H173V1082H342L351 964Q405 1031 481 1066T659 1102Q763 1102 843 1061T977 947T1059 773T1087 550V529ZM902 550Q902
+629 885 700T831 826T738 913T604 945Q515 945 455 904T358 797V276Q394 212 454 172T606 131Q683 131 739 163T831 251T884 378T902 529V550Z" />
+<glyph unicode="q" glyph-name="q" horiz-adv-x="1229" d="M140 550Q140 671 169 772T254 947T391 1061T577 1102Q673 1102 746 1070T874 975L882 1082H1052V-416H867V98Q813 41 741 11T575 -20Q472 -20 392 22T255 138T170 312T140 529V550ZM325 529Q325 450
+342 378T396 251T487 164T620 131Q709 131 769 171T867 275V809Q848 840 824 865T769 910T702 939T622 950Q545 950 489 917T397 829T343 701T325 550V529Z" />
+<glyph unicode="r" glyph-name="r" horiz-adv-x="1229" d="M883 1102Q909 1102 937 1101T990 1096T1037 1087T1073 1076L1048 895Q994 907 946 912T844 918Q714 918 634 859T515 694V0H329V1082H505L514 910Q580 999 672 1050T883 1102Z" />
+<glyph unicode="s" glyph-name="s" horiz-adv-x="1229" d="M893 287Q893 320 880 346T836 395T754 435T627 469Q534 488 458 515T327 579T242 667T212 785Q212 851 243 908T330 1009T463 1077T634 1102Q734 1102 814 1076T950 1004T1036 897T1067 764H882Q882
+799 864 832T814 892T737 934T634 950Q575 950 531 937T457 902T412 851T397 791Q397 759 409 734T450 689T529 653T655 622Q755 601 834 574T967 508T1049 418T1078 300Q1078 228 1046 169T955 68T816 3T639 -20Q529 -20 443 9T298 87T207 200T175 333H360Q364
+276 390 237T456 174T544 140T639 130Q756 130 824 173T893 287Z" />
+<glyph unicode="t" glyph-name="t" horiz-adv-x="1229" d="M612 1344V1082H1024V939H612V351Q612 288 628 247T672 182T736 147T814 137Q844 137 876 140T938 149T995 159T1039 169L1065 38Q1042 24 1009 14T937 -4T855 -16T768 -20Q696 -20 634 0T525 65T453
+180T426 351V939H142V1082H426V1344H612Z" />
+<glyph unicode="u" glyph-name="u" horiz-adv-x="1229" d="M876 149Q822 69 741 25T554 -20Q469 -20 400 6T282 89T207 231T180 439V1082H365V437Q365 349 379 291T421 199T489 150T583 136Q696 136 765 182T869 306V1082H1055V0H887L876 149Z" />
+<glyph unicode="v" glyph-name="v" horiz-adv-x="1229" d="M598 266L615 199L633 266L936 1082H1125L684 0H543L98 1082H288L598 266Z" />
+<glyph unicode="w" glyph-name="w" horiz-adv-x="1229" d="M338 392L360 237L387 392L560 1082H679L851 392L880 222L907 392L1027 1082H1191L961 0H815L646 663L618 831L591 663L424 0H278L48 1082H212L338 392Z" />
+<glyph unicode="x" glyph-name="x" horiz-adv-x="1229" d="M621 681L910 1082H1127L724 547L1138 0H924L625 412L326 0H110L524 547L121 1082H335L621 681Z" />
+<glyph unicode="y" glyph-name="y" horiz-adv-x="1229" d="M572 384L620 253L950 1082H1157L608 -167Q589 -209 560 -256T488 -343T390 -410T261 -437Q248 -437 232 -436T200 -432T170 -426T147 -421L177 -270Q184 -270 196 -271T221 -273T246 -274T265 -275Q303
+-275 334 -254T390 -204T432 -144T460 -93L534 51L68 1082H275L572 384Z" />
+<glyph unicode="z" glyph-name="z" horiz-adv-x="1229" d="M396 151H1085V0H160V136L806 929H169V1082H1049V951L396 151Z" />
+<glyph unicode="{" glyph-name="braceleft" horiz-adv-x="1229" d="M978 -366Q913 -364 860 -344T765 -290T693 -211T642 -115T611 -9T601 99V268Q601 412 533 477T323 543V688Q464 688 532 752T601 962V1132Q601 1185 610 1239T638 1345T686 1442T758 1521T854
+1575T978 1597L999 1482Q927 1480 885 1448T821 1367T793 1255T787 1132V962Q786 842 731 752T560 614Q676 568 731 478T787 268V99Q787 37 797 -24T831 -135T896 -217T999 -251L978 -366Z" />
+<glyph unicode="|" glyph-name="bar" horiz-adv-x="1229" d="M689 -398H540V1456H689V-398Z" />
+<glyph unicode="}" glyph-name="braceright" horiz-adv-x="1229" d="M323 -251Q384 -249 425 -217T490 -136T525 -25T536 99V268Q536 387 591 477T762 614Q646 661 591 751T536 962V1132Q536 1193 530 1255T502 1366T438 1448T324 1482L344 1597Q413 1595 467
+1575T564 1521T635 1442T684 1346T712 1240T721 1132V962Q721 817 790 753T999 688V543Q859 542 790 477T721 268V99Q721 19 699 -62T631 -211T513 -321T344 -366L323 -251Z" />
+<glyph unicode="~" glyph-name="asciitilde" horiz-adv-x="1229" d="M1180 740Q1180 674 1158 613T1096 505T1001 430T879 402Q832 402 792 411T713 438T637 482T559 542Q504 587 454 613T348 639Q313 639 283 625T230 586T195 528T182 456L48 473Q48 539 70 599T131
+704T226 775T348 802Q394 802 435 793T516 765T592 721T669 663Q726 615 774 590T879 565Q914 565 944 581T997 623T1033 685T1046 758L1180 740Z" />
+<glyph unicode="&#xa1;" glyph-name="exclamdown" horiz-adv-x="1229" d="M522 611H707V-372H522V611ZM728 990Q728 944 699 913T613 881Q556 881 527 912T498 990Q498 1038 527 1070T613 1103Q669 1103 698 1071T728 990Z" />
+<glyph unicode="&#xa2;" glyph-name="cent" horiz-adv-x="1229" d="M639 130Q689 130 736 146T821 190T880 257T904 341H1079Q1078 277 1049 219T971 113T857 32T718 -13V-245H533V-10Q437 8 365 57T244 178T172 337T147 520V562Q147 657 171 744T244 903T364
+1024T533 1092V1318H718V1096Q798 1084 864 1050T978 964T1052 848T1079 710H904Q903 759 883 802T827 879T744 931T639 950Q553 950 495 916T400 826T349 701T333 562V520Q333 449 348 380T399 255T493 165T639 130Z" />
+<glyph unicode="&#xa3;" glyph-name="sterling" horiz-adv-x="1229" d="M463 626L471 400Q471 331 457 265T410 151H1148L1147 0H117V151H192Q219 158 237 186T267 250T282 327T286 400L278 626H113V778H273L264 1039Q264 1141 297 1222T390 1359T531 1446T709
+1476Q805 1476 881 1448T1009 1369T1090 1247T1118 1089H932Q932 1152 913 1196T860 1269T783 1311T691 1325Q640 1325 596 1306T519 1250T469 1160T450 1039L458 778H778V626H463Z" />
+<glyph unicode="&#xa4;" glyph-name="currency" horiz-adv-x="1229" d="M931 84Q869 34 794 7T634 -20Q549 -20 475 7T338 82L234 -26L103 109L215 224Q177 287 157 360T137 515Q137 601 159 678T223 820L103 944L234 1079L351 957Q411 1001 482 1025T634 1049Q714
+1049 786 1025T918 955L1038 1080L1170 944L1046 816Q1086 752 1108 676T1130 515Q1130 435 1110 363T1054 228L1170 109L1038 -27L931 84ZM309 515Q309 441 334 375T402 260T505 181T634 151Q703 151 762 180T864 259T932 375T957 515Q957 589 933 655T865 770T762
+849T634 878Q565 878 506 849T403 771T334 655T309 515Z" />
+<glyph unicode="&#xa5;" glyph-name="yen" horiz-adv-x="1229" d="M614 779L983 1456H1195L774 736H1092V615H704L703 613V446H1092V326H703V0H518V326H138V446H518V615H138V736H454L33 1456H245L614 779Z" />
+<glyph unicode="&#xa6;" glyph-name="brokenbar" horiz-adv-x="1229" d="M511 -270V521H696V-270H511ZM696 698H511V1456H696V698Z" />
+<glyph unicode="&#xa7;" glyph-name="section" horiz-adv-x="1229" d="M1140 431Q1140 334 1091 265T954 156Q1023 108 1059 40T1096 -128Q1096 -216 1061 -284T961 -399T807 -470T608 -495Q510 -495 417 -473T250 -400T132 -267T87 -64L272 -62Q272 -140 302
+-193T380 -280T488 -328T608 -343Q678 -343 734 -327T829 -283T890 -216T911 -130Q911 -82 892 -47T831 16T723 71T560 126Q454 155 368 189T220 273T126 389T93 551Q93 645 141 714T275 825Q209 874 174 942T138 1110Q138 1194 173 1261T273 1377T427 1450T626
+1476Q740 1476 831 1448T985 1365T1083 1230T1118 1045H933Q933 1103 913 1154T853 1243T757 1303T626 1325Q551 1325 495 1309T400 1264T342 1196T323 1112Q323 1060 340 1024T397 960T505 907T672 854Q780 824 866 789T1014 706T1107 592T1140 431ZM598 691Q553
+703 513 715T435 742Q357 724 318 674T278 553Q278 500 296 463T355 397T464 342T632 289Q676 276 716 264T793 238Q869 258 912 307T956 428Q956 476 936 512T873 579T762 635T598 691Z" />
+<glyph unicode="&#xa8;" glyph-name="dieresis" horiz-adv-x="1229" d="M287 1371Q287 1416 314 1446T396 1477Q450 1477 478 1447T506 1371Q506 1326 478 1296T396 1266Q342 1266 315 1296T287 1371ZM717 1369Q717 1414 744 1445T826 1476Q880 1476 908 1445T936
+1369Q936 1325 908 1295T826 1264Q772 1264 745 1294T717 1369Z" />
+<glyph unicode="&#xa9;" glyph-name="copyright" horiz-adv-x="1229" d="M862 443Q862 327 797 268T613 208Q551 208 503 230T422 290T371 382T353 498V585Q353 647 371 700T422 793T503 854T613 876Q731 876 797 816T863 641H753Q752 712 719 745T613 778Q575
+778 547 763T499 723T471 662T462 586V498Q462 457 471 422T499 361T546 320T613 305Q685 305 718 338T752 443H862ZM177 542Q177 444 211 358T305 207T446 105T623 68Q717 68 798 105T940 207T1034 358T1068 542Q1068 639 1034 724T940 874T799 974T623 1011Q528
+1011 447 975T305 874T211 725T177 542ZM90 542Q90 620 108 691T161 825T243 939T351 1026T479 1082T623 1102Q736 1102 833 1058T1001 939T1114 761T1155 542Q1155 425 1114 323T1002 144T833 23T623 -21Q510 -21 413 23T244 143T131 322T90 542Z" />
+<glyph unicode="&#xaa;" glyph-name="ordfeminine" horiz-adv-x="1229" d="M780 705Q765 749 759 802Q729 759 673 726T533 691Q415 691 350 752T284 918Q284 1030 370 1089T615 1149H756V1201Q755 1266 724 1301T630 1336Q563 1336 523 1310T481 1231L320 1243Q321
+1294 343 1337T406 1411T504 1459T630 1476Q695 1476 748 1459T839 1407T898 1320T919 1200V886Q919 838 925 794T945 705H780ZM572 828Q599 828 628 836T683 859T728 892T756 928V1037H616Q537 1037 492 1004T446 922Q446 877 478 853T572 828Z" />
+<glyph unicode="&#xab;" glyph-name="guillemotleft" horiz-adv-x="1229" d="M390 515L648 118H507L212 506V525L507 914H648L390 515ZM725 515L983 118H842L547 506V525L842 914H983L725 515Z" />
+<glyph unicode="&#xac;" glyph-name="logicalnot" horiz-adv-x="1229" d="M1019 375H834V639H189V800H1019V375Z" />
+<glyph unicode="&#xad;" glyph-name="uni00AD" d="M983 561H218V713H983V561Z" />
+<glyph unicode="&#xae;" glyph-name="registered" horiz-adv-x="1229" d="M87 540Q87 657 128 759T240 938T409 1057T620 1101Q733 1101 830 1058T998 938T1111 760T1152 540Q1152 423 1111 321T999 143T830 23T620 -21Q507 -21 410 23T241 142T128 320T87 540ZM174
+540Q174 442 208 357T302 207T443 106T620 69Q714 69 795 106T937 207T1031 356T1065 540Q1065 638 1031 724T937 873T796 973T620 1010Q525 1010 444 974T302 874T208 724T174 540ZM508 485V232H401V867H612Q667 867 711 855T786 819T835 759T852 675Q852 619
+817 580T716 519L863 232H753L633 485H508ZM508 582H627Q675 583 710 606T745 671Q745 726 715 748T612 770H508V582Z" />
+<glyph unicode="&#xaf;" glyph-name="overscore" horiz-adv-x="1229" d="M971 1313H257V1456H971V1313Z" />
+<glyph unicode="&#xb0;" glyph-name="degree" horiz-adv-x="1229" d="M361 1216Q361 1269 381 1316T436 1399T517 1455T616 1476Q667 1476 712 1456T792 1400T846 1317T866 1216Q866 1163 846 1116T792 1035T713 980T616 960Q564 960 518 980T437 1034T382 1115T361
+1216ZM485 1216Q485 1189 495 1166T523 1125T565 1098T616 1088Q643 1088 666 1097T706 1124T733 1165T743 1216Q743 1244 734 1268T707 1311T666 1339T616 1349Q589 1349 565 1339T524 1311T496 1269T485 1216Z" />
+<glyph unicode="&#xb1;" glyph-name="plusminus" horiz-adv-x="1229" d="M709 855H1072V703H709V289H541V703H156V855H541V1267H709V855ZM1030 1H195V152H1030V1Z" />
+<glyph unicode="&#xb2;" glyph-name="twosuperior" horiz-adv-x="1229" d="M934 667H335V775L623 1046Q688 1106 716 1149T744 1228Q744 1277 711 1307T620 1338Q546 1338 511 1301T474 1205H316Q316 1259 337 1307T397 1390T491 1446T616 1467Q682 1467 735 1451T825
+1403T882 1329T902 1230Q902 1190 889 1155T850 1085T789 1015T710 940L535 795H934V667Z" />
+<glyph unicode="&#xb3;" glyph-name="threesuperior" horiz-adv-x="1229" d="M526 1125H610Q680 1125 718 1154T756 1234Q756 1279 723 1308T623 1337Q566 1337 529 1313T491 1245H334Q334 1296 356 1337T417 1406T508 1450T620 1466Q684 1466 738 1452T831 1409T891
+1338T913 1241Q913 1185 878 1140T777 1070Q927 1028 927 886Q927 830 904 787T839 715T741 671T621 655Q564 655 511 668T416 710T349 783T323 888H481Q482 842 521 813T626 783Q694 783 732 813T769 894Q769 957 726 983T610 1009H526V1125Z" />
+<glyph unicode="&#xb4;" glyph-name="acute" horiz-adv-x="1229" d="M594 1481H818L550 1215H410L594 1481Z" />
+<glyph unicode="&#xb5;" glyph-name="mu" horiz-adv-x="1229" d="M373 1082V460Q373 360 389 296T434 195T504 144T593 130Q700 130 762 171T854 283V1082H1040V0H873L864 116Q820 51 756 16T601 -20Q455 -20 373 53V-416H188V1082H373Z" />
+<glyph unicode="&#xb6;" glyph-name="paragraph" horiz-adv-x="1229" d="M790 0V520H703Q585 520 494 555T339 652T244 800T211 988Q211 1089 243 1174T339 1322T493 1420T703 1456H976V0H790Z" />
+<glyph unicode="&#xb7;" glyph-name="middot" horiz-adv-x="1229" d="M504 729Q504 777 533 809T619 841Q675 841 704 809T734 729Q734 682 705 651T619 619Q562 619 533 650T504 729Z" />
+<glyph unicode="&#xb8;" glyph-name="cedilla" horiz-adv-x="1229" d="M630 0L618 -52Q647 -57 674 -69T723 -101T758 -151T771 -225Q771 -322 692 -378T468 -435L461 -328Q495 -328 525 -323T577 -305T613 -275T627 -229Q627 -204 617 -187T587 -160T536 -143T466
+-134L497 0H630Z" />
+<glyph unicode="&#xb9;" glyph-name="onesuperior" horiz-adv-x="1229" d="M758 665H601V1266L386 1209V1337L740 1454H758V665Z" />
+<glyph unicode="&#xba;" glyph-name="ordmasculine" horiz-adv-x="1229" d="M272 1141Q272 1214 296 1275T364 1381T472 1451T614 1476Q693 1476 756 1451T864 1382T932 1276T956 1141V1024Q956 952 933 891T865 785T758 715T616 690Q536 690 473 715T365 784T296
+890T272 1024V1141ZM435 1024Q435 982 446 947T479 885T536 844T616 829Q660 829 693 844T749 885T782 946T794 1024V1141Q794 1182 783 1217T749 1279T693 1321T614 1336Q569 1336 536 1321T480 1280T446 1218T435 1141V1024Z" />
+<glyph unicode="&#xbb;" glyph-name="guillemotright" horiz-adv-x="1229" d="M383 949L677 560V541L383 152H241L499 550L241 949H383ZM728 949L1022 560V541L728 152H586L844 550L586 949H728Z" />
+<glyph unicode="&#xbc;" glyph-name="onequarter" horiz-adv-x="1229" d="M383 751H242V1292L48 1241V1356L367 1461H383V751ZM658 221L545 287L1058 1241L1172 1175L658 221ZM1068 269H1164V152H1068V0H927V152H589L584 244L923 710H1068V269ZM726 269H927V529L912
+504L726 269Z" />
+<glyph unicode="&#xbd;" glyph-name="onehalf" horiz-adv-x="1229" d="M598 221L485 287L998 1241L1112 1175L598 221ZM371 747H230V1288L36 1237V1352L355 1457H371V747ZM1174 0H635V97L894 341Q952 395 977 434T1003 505Q1003 549 973 576T891 604Q825 604 793
+570T760 484H618Q618 533 636 576T690 651T776 701T888 720Q947 720 994 705T1075 663T1127 596T1145 507Q1145 471 1133 439T1098 376T1043 313T972 246L815 115H1174V0Z" />
+<glyph unicode="&#xbe;" glyph-name="threequarters" horiz-adv-x="1229" d="M469 220L356 286L869 1240L983 1174L469 220ZM1101 269H1197V152H1101V0H960V152H622L617 244L956 710H1101V269ZM759 269H960V529L945 504L759 269ZM221 1157H296Q359 1157 393 1183T428
+1255Q428 1296 398 1322T308 1348Q257 1348 224 1327T189 1265H48Q48 1311 68 1348T123 1410T204 1450T305 1464Q363 1464 411 1451T495 1413T549 1349T569 1262Q569 1211 537 1171T446 1108Q581 1070 581 942Q581 892 560 853T502 788T414 748T306 734Q255 734
+207 746T121 784T61 849T38 944H180Q181 902 216 876T311 849Q372 849 406 876T439 949Q439 1006 400 1029T296 1053H221V1157Z" />
+<glyph unicode="&#xbf;" glyph-name="questiondown" horiz-adv-x="1229" d="M724 673Q723 602 719 556T699 474T655 405T576 326Q545 291 512 254T452 175T407 89T389 -7Q389 -116 447 -173T614 -231Q657 -231 697 -220T769 -184T820 -122T839 -32H1024Q1023 -116
+992 -183T905 -296T776 -367T614 -392Q517 -392 441 -367T312 -292T232 -173T204 -13Q204 59 227 123T287 243T369 349T460 442Q489 471 505 496T528 547T537 604T539 673H724ZM509 988Q509 1036 538 1068T624 1101Q681 1101 710 1069T739 988Q739 942 710 911T624
+879Q567 879 538 910T509 988Z" />
+<glyph unicode="&#xc0;" glyph-name="Agrave" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM710 1561H560L312 1827H535L710 1561Z" />
+<glyph unicode="&#xc1;" glyph-name="Aacute" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM727 1824H951L683 1558H543L727 1824Z" />
+<glyph unicode="&#xc2;" glyph-name="Acircumflex" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM933 1624V1599H779L629 1750L480 1599H328V1625L573 1864H685L933 1624Z" />
+<glyph unicode="&#xc3;" glyph-name="Atilde" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM973 1844Q973 1799 958 1758T915 1687T850 1638T767 1620Q734 1620 708 1627T661 1644T620 1667T582
+1690T540 1708T491 1715Q447 1715 419 1683T389 1604L285 1628Q285 1673 300 1714T343 1787T407 1838T491 1857Q533 1857 566 1842T631 1810T695 1777T767 1762Q788 1762 807 1771T839 1795T861 1830T870 1874L973 1844Z" />
+<glyph unicode="&#xc4;" glyph-name="Adieresis" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM302 1718Q302 1763 329 1793T411 1824Q465 1824 493 1794T521 1718Q521 1673 493 1643T411 1613Q357
+1613 330 1643T302 1718ZM732 1716Q732 1761 759 1792T841 1823Q895 1823 923 1792T951 1716Q951 1672 923 1642T841 1611Q787 1611 760 1641T732 1716Z" />
+<glyph unicode="&#xc5;" glyph-name="Aring" horiz-adv-x="1229" d="M869 377H383L266 0H81L551 1456H706L1168 0H984L869 377ZM433 538H820L628 1170L433 538ZM424 1732Q424 1775 440 1811T484 1874T549 1916T629 1931Q671 1931 707 1916T771 1875T815 1812T831
+1732Q831 1689 815 1653T772 1592T708 1552T629 1538Q587 1538 550 1552T485 1591T441 1653T424 1732ZM523 1732Q523 1710 531 1691T554 1658T588 1635T629 1627Q651 1627 669 1635T702 1657T724 1690T732 1732Q732 1780 703 1810T629 1841Q585 1841 554 1811T523
+1732Z" />
+<glyph unicode="&#xc6;" glyph-name="AE" horiz-adv-x="1229" d="M1195 0H664L663 353H353L230 0H32L592 1456H1172V1304H835L837 833H1139V682H837L838 151H1195V0ZM414 527H663L661 1233L414 527Z" />
+<glyph unicode="&#xc7;" glyph-name="Ccedilla" horiz-adv-x="1229" d="M1117 438Q1104 337 1065 253T964 108T816 14T625 -20Q535 -20 462 5T331 76T232 182T163 315T122 466T107 626V829Q108 910 121 989T162 1140T231 1273T330 1380T461 1450T625 1476Q734
+1476 820 1443T968 1348T1067 1200T1117 1010H932Q923 1076 901 1133T842 1233T752 1300T625 1325Q559 1325 509 1304T422 1245T361 1159T321 1055T300 943T293 831V626Q293 572 299 515T321 402T360 297T421 211T508 152T625 130Q698 130 751 153T842 217T901
+315T932 438H1117ZM684 0L672 -52Q701 -57 728 -69T777 -101T812 -151T825 -225Q825 -322 746 -378T522 -435L515 -328Q549 -328 579 -323T631 -305T667 -275T681 -229Q681 -204 671 -187T641 -160T590 -143T520 -134L551 0H684Z" />
+<glyph unicode="&#xc8;" glyph-name="Egrave" horiz-adv-x="1229" d="M975 673H367V157H1076V0H182V1456H1067V1298H367V830H975V673ZM700 1561H550L302 1827H525L700 1561Z" />
+<glyph unicode="&#xc9;" glyph-name="Eacute" horiz-adv-x="1229" d="M975 673H367V157H1076V0H182V1456H1067V1298H367V830H975V673ZM717 1824H941L673 1558H533L717 1824Z" />
+<glyph unicode="&#xca;" glyph-name="Ecircumflex" horiz-adv-x="1229" d="M975 673H367V157H1076V0H182V1456H1067V1298H367V830H975V673ZM923 1624V1599H769L619 1750L470 1599H318V1625L563 1864H675L923 1624Z" />
+<glyph unicode="&#xcb;" glyph-name="Edieresis" horiz-adv-x="1229" d="M975 673H367V157H1076V0H182V1456H1067V1298H367V830H975V673ZM292 1718Q292 1763 319 1793T401 1824Q455 1824 483 1794T511 1718Q511 1673 483 1643T401 1613Q347 1613 320 1643T292
+1718ZM722 1716Q722 1761 749 1792T831 1823Q885 1823 913 1792T941 1716Q941 1672 913 1642T831 1611Q777 1611 750 1641T722 1716Z" />
+<glyph unicode="&#xcc;" glyph-name="Igrave" horiz-adv-x="1229" d="M174 1456H1054V1295H705V160H1054V0H174V160H515V1295H174V1456ZM648 1561H498L250 1827H473L648 1561Z" />
+<glyph unicode="&#xcd;" glyph-name="Iacute" horiz-adv-x="1229" d="M174 1456H1054V1295H705V160H1054V0H174V160H515V1295H174V1456ZM665 1824H889L621 1558H481L665 1824Z" />
+<glyph unicode="&#xce;" glyph-name="Icircumflex" horiz-adv-x="1229" d="M174 1456H1054V1295H705V160H1054V0H174V160H515V1295H174V1456ZM871 1624V1599H717L567 1750L418 1599H266V1625L511 1864H623L871 1624Z" />
+<glyph unicode="&#xcf;" glyph-name="Idieresis" horiz-adv-x="1229" d="M174 1456H1054V1295H705V160H1054V0H174V160H515V1295H174V1456ZM241 1718Q241 1763 268 1793T350 1824Q404 1824 432 1794T460 1718Q460 1673 432 1643T350 1613Q296 1613 269 1643T241
+1718ZM671 1716Q671 1761 698 1792T780 1823Q834 1823 862 1792T890 1716Q890 1672 862 1642T780 1611Q726 1611 699 1641T671 1716Z" />
+<glyph unicode="&#xd0;" glyph-name="Eth" horiz-adv-x="1259" d="M185 0V666H-44V817H185V1456H522Q674 1454 793 1405T995 1267T1121 1055T1166 781V674Q1165 524 1122 401T996 189T794 51T522 0H185ZM593 666H373V151H522Q640 152 726 192T868 303T952 468T981
+674V783Q980 894 952 988T868 1152T726 1262T522 1304H373V817H593V666Z" />
+<glyph unicode="&#xd1;" glyph-name="Ntilde" horiz-adv-x="1229" d="M1086 0H898L333 1088L330 0H143V1456H331L896 370L899 1456H1086V0ZM934 1844Q934 1799 919 1758T876 1687T811 1638T728 1620Q695 1620 669 1627T622 1644T581 1667T543 1690T501 1708T452
+1715Q408 1715 380 1683T350 1604L246 1628Q246 1673 261 1714T304 1787T368 1838T452 1857Q494 1857 527 1842T592 1810T656 1777T728 1762Q749 1762 768 1771T800 1795T822 1830T831 1874L934 1844Z" />
+<glyph unicode="&#xd2;" glyph-name="Ograve" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450 1448T613
+1476Q703 1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356 317T416
+228T501 166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812ZM714 1582H564L316 1848H539L714 1582Z" />
+<glyph unicode="&#xd3;" glyph-name="Oacute" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450 1448T613
+1476Q703 1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356 317T416
+228T501 166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812ZM731 1845H955L687 1579H547L731 1845Z" />
+<glyph unicode="&#xd4;" glyph-name="Ocircumflex" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450
+1448T613 1476Q703 1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356
+317T416 228T501 166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812ZM937 1645V1620H783L633 1771L484 1620H332V1646L577 1885H689L937 1645Z" />
+<glyph unicode="&#xd5;" glyph-name="Otilde" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450 1448T613
+1476Q703 1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356 317T416
+228T501 166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812ZM977 1865Q977 1820 962 1779T919 1708T854 1659T771 1641Q738 1641 712 1648T665 1665T624 1688T586 1711T544 1729T495 1736Q451 1736 423 1704T393 1625L289 1649Q289 1694
+304 1735T347 1808T411 1859T495 1878Q537 1878 570 1863T635 1831T699 1798T771 1783Q792 1783 811 1792T843 1816T865 1851T874 1895L977 1865Z" />
+<glyph unicode="&#xd6;" glyph-name="Odieresis" horiz-adv-x="1229" d="M1121 644Q1120 566 1108 487T1069 333T1003 195T906 83T778 8T615 -20Q525 -20 453 7T324 83T227 195T160 334T120 487T106 644V810Q107 888 119 967T158 1121T225 1259T322 1372T450 1448T613
+1476Q703 1476 776 1449T905 1373T1001 1261T1068 1122T1107 968T1121 810V644ZM938 812Q937 864 931 920T911 1032T872 1138T812 1227T727 1288T613 1311Q549 1311 500 1288T415 1227T355 1138T317 1032T296 920T288 812V644Q289 593 295 536T317 424T356 317T416
+228T501 166T615 143Q680 143 729 166T814 227T873 316T911 422T931 535T938 644V812ZM306 1739Q306 1784 333 1814T415 1845Q469 1845 497 1815T525 1739Q525 1694 497 1664T415 1634Q361 1634 334 1664T306 1739ZM736 1737Q736 1782 763 1813T845 1844Q899 1844
+927 1813T955 1737Q955 1693 927 1663T845 1632Q791 1632 764 1662T736 1737Z" />
+<glyph unicode="&#xd7;" glyph-name="multiply" horiz-adv-x="1229" d="M181 329L511 666L184 1000L303 1123L631 788L960 1123L1079 1000L751 666L1082 329L963 206L631 543L300 206L181 329Z" />
+<glyph unicode="&#xd8;" glyph-name="Oslash" horiz-adv-x="1229" d="M1113 644Q1112 566 1100 487T1061 333T995 195T898 83T770 8T607 -20Q516 -20 445 7T317 83L213 -93H71L231 177Q197 226 172 282T131 398T106 520T98 644V810Q99 888 111 967T150 1121T217
+1259T314 1372T442 1448T605 1476Q707 1476 785 1442T923 1348L1022 1516H1164L1003 1244Q1031 1197 1051 1145T1085 1037T1105 924T1113 810V644ZM280 644Q281 575 292 498T333 350L831 1193Q794 1246 739 1278T605 1311Q541 1311 492 1288T407 1227T347 1138T309
+1032T288 920T280 812V644ZM930 812Q929 870 922 934T895 1061L404 231Q441 190 491 167T607 143Q672 143 721 166T806 227T865 316T903 422T923 535T930 644V812Z" />
+<glyph unicode="&#xd9;" glyph-name="Ugrave" horiz-adv-x="1229" d="M1088 1456L1090 470Q1088 368 1053 279T955 124T805 19T614 -20Q508 -20 421 18T272 122T175 278T139 470L141 1456H317L321 470Q322 405 341 345T397 239T488 165T614 137Q685 137 739 164T829
+238T885 345T906 470L909 1456H1088ZM740 1549H590L342 1815H565L740 1549Z" />
+<glyph unicode="&#xda;" glyph-name="Uacute" horiz-adv-x="1229" d="M1088 1456L1090 470Q1088 368 1053 279T955 124T805 19T614 -20Q508 -20 421 18T272 122T175 278T139 470L141 1456H317L321 470Q322 405 341 345T397 239T488 165T614 137Q685 137 739 164T829
+238T885 345T906 470L909 1456H1088ZM757 1812H981L713 1546H573L757 1812Z" />
+<glyph unicode="&#xdb;" glyph-name="Ucircumflex" horiz-adv-x="1229" d="M1088 1456L1090 470Q1088 368 1053 279T955 124T805 19T614 -20Q508 -20 421 18T272 122T175 278T139 470L141 1456H317L321 470Q322 405 341 345T397 239T488 165T614 137Q685 137 739
+164T829 238T885 345T906 470L909 1456H1088ZM963 1612V1587H809L659 1738L510 1587H358V1613L603 1852H715L963 1612Z" />
+<glyph unicode="&#xdc;" glyph-name="Udieresis" horiz-adv-x="1229" d="M1088 1456L1090 470Q1088 368 1053 279T955 124T805 19T614 -20Q508 -20 421 18T272 122T175 278T139 470L141 1456H317L321 470Q322 405 341 345T397 239T488 165T614 137Q685 137 739
+164T829 238T885 345T906 470L909 1456H1088ZM332 1706Q332 1751 359 1781T441 1812Q495 1812 523 1782T551 1706Q551 1661 523 1631T441 1601Q387 1601 360 1631T332 1706ZM762 1704Q762 1749 789 1780T871 1811Q925 1811 953 1780T981 1704Q981 1660 953 1630T871
+1599Q817 1599 790 1629T762 1704Z" />
+<glyph unicode="&#xdd;" glyph-name="Yacute" horiz-adv-x="1229" d="M603 725L935 1456H1145L692 543L689 0H517L514 543L61 1456H272L603 725ZM707 1823H931L663 1557H523L707 1823Z" />
+<glyph unicode="&#xde;" glyph-name="Thorn" horiz-adv-x="1229" d="M353 1456V1163H630Q747 1163 837 1132T990 1044T1085 909T1118 738Q1118 645 1086 567T991 433T838 345T630 313H353V0H168V1456H353ZM353 1011V465H630Q708 465 765 487T859 547T915 634T933
+736Q933 790 915 839T860 927T766 988T630 1011H353Z" />
+<glyph unicode="&#xdf;" glyph-name="germandbls" horiz-adv-x="1229" d="M353 0H169V1087Q169 1199 198 1287T280 1435T405 1526T562 1558Q637 1558 702 1538T816 1475T893 1370T921 1220Q921 1141 901 1088T857 992T814 912T794 823Q794 777 816 740T873 668T947
+598T1021 522T1077 431T1100 316Q1100 231 1075 168T1003 63T894 0T754 -21Q713 -21 669 -16T585 -1T510 22T456 51L498 206Q516 195 542 182T600 157T666 138T735 130Q782 130 816 143T872 181T904 237T915 307Q915 354 893 392T836 465T762 535T688 612T631 705T608
+825Q608 864 618 897T643 959T677 1016T710 1073T736 1134T746 1204Q746 1254 731 1291T692 1354T636 1393T572 1406Q473 1406 414 1324T353 1087V0Z" />
+<glyph unicode="&#xe0;" glyph-name="agrave" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706
+938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM962 1239H812L564 1505H787L962 1239Z" />
+<glyph unicode="&#xe1;" glyph-name="aacute" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706
+938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM723 1502H947L679 1236H539L723 1502Z" />
+<glyph unicode="&#xe2;" glyph-name="acircumflex" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786
+895T706 938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM929 1302V1277H775L625 1428L476 1277H324V1303L569 1542H681L929 1302Z" />
+<glyph unicode="&#xe3;" glyph-name="atilde" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706
+938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM969 1522Q969 1477 954 1436T911 1365T846 1316T763 1298Q730 1298 704 1305T657 1322T616 1345T578 1368T536 1386T487 1393Q443 1393 415 1361T385 1282L281 1306Q281 1351 296 1392T339 1465T403
+1516T487 1535Q529 1535 562 1520T627 1488T691 1455T763 1440Q784 1440 803 1449T835 1473T857 1508T866 1552L969 1522Z" />
+<glyph unicode="&#xe4;" glyph-name="adieresis" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706
+938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM298 1396Q298 1441 325 1471T407 1502Q461 1502 489 1472T517 1396Q517 1351 489 1321T407 1291Q353 1291 326 1321T298 1396ZM728 1394Q728 1439 755 1470T837 1501Q891 1501 919 1470T947 1394Q947
+1350 919 1320T837 1289Q783 1289 756 1319T728 1394Z" />
+<glyph unicode="&#xe5;" glyph-name="aring" horiz-adv-x="1229" d="M885 0Q874 21 868 52T858 118Q830 90 796 65T720 21T631 -9T531 -20Q445 -20 376 5T258 72T183 173T156 300Q156 389 191 456T292 567T448 634T653 656H855V741Q855 790 837 829T786 895T706
+938T599 953Q544 953 501 939T427 902T381 848T364 782H178Q179 841 208 898T293 1000T428 1074T609 1102Q701 1102 780 1080T916 1012T1007 898T1040 739V236Q1040 182 1050 122T1078 16V0H885ZM558 141Q612 141 659 155T744 192T811 244T855 306V525H683Q521
+525 431 477T341 326Q341 286 354 252T395 194T463 155T558 141ZM420 1410Q420 1453 436 1489T480 1552T545 1594T625 1609Q667 1609 703 1594T767 1553T811 1490T827 1410Q827 1367 811 1331T768 1270T704 1230T625 1216Q583 1216 546 1230T481 1269T437 1331T420
+1410ZM519 1410Q519 1388 527 1369T550 1336T584 1313T625 1305Q647 1305 665 1313T698 1335T720 1368T728 1410Q728 1458 699 1488T625 1519Q581 1519 550 1489T519 1410Z" />
+<glyph unicode="&#xe6;" glyph-name="ae" horiz-adv-x="1229" d="M896 -20Q793 -20 719 17T599 126Q580 97 555 71T496 24T423 -8T335 -20Q264 -20 210 2T119 63T63 159T43 285Q43 451 147 543T448 635H511V783Q511 859 480 904T381 950Q318 950 279 909T240 792L61
+800Q61 869 83 924T147 1019T248 1080T379 1102Q462 1102 524 1075T626 995Q669 1046 728 1074T862 1102Q943 1102 1004 1076T1108 1001T1171 884T1193 731V497H688V411Q688 345 701 293T744 205T820 150T932 130Q970 130 999 137T1052 154T1095 177T1130 200L1176
+64Q1163 54 1140 40T1083 12T1002 -10T896 -20ZM862 950Q817 950 785 931T731 878T700 801T688 709V640H1017V768Q1017 803 1007 835T977 893T928 934T862 950ZM371 130Q408 130 446 151T512 202L511 495H450Q397 495 355 478T284 431T240 363T224 281Q224 250
+233 223T261 175T307 142T371 130Z" />
+<glyph unicode="&#xe7;" glyph-name="ccedilla" horiz-adv-x="1229" d="M635 130Q685 130 732 146T817 190T876 257T900 341H1075Q1074 269 1038 204T940 89T800 10T635 -20Q512 -20 420 24T267 142T174 314T143 520V562Q143 669 174 767T266 939T420 1058T635
+1102Q731 1102 811 1072T950 989T1042 864T1075 710H900Q899 759 879 802T823 879T740 931T635 950Q549 950 491 916T396 826T345 701T329 562V520Q329 449 344 380T395 255T489 165T635 130ZM705 0L693 -52Q722 -57 749 -69T798 -101T833 -151T846 -225Q846 -322
+767 -378T543 -435L536 -328Q570 -328 600 -323T652 -305T688 -275T702 -229Q702 -204 692 -187T662 -160T611 -143T541 -134L572 0H705Z" />
+<glyph unicode="&#xe8;" glyph-name="egrave" horiz-adv-x="1229" d="M652 -20Q538 -20 443 19T279 128T173 294T135 503V545Q135 676 177 779T288 954T446 1064T628 1102Q745 1102 832 1062T978 950T1064 781T1093 571V488H320Q323 414 348 349T418 236T525 159T662
+131Q761 131 838 171T966 277L1079 189Q1052 148 1012 111T918 44T798 -2T652 -20ZM628 950Q572 950 522 930T432 870T365 773T326 640H908V654Q905 708 888 761T836 856T751 924T628 950ZM957 1240H807L559 1506H782L957 1240Z" />
+<glyph unicode="&#xe9;" glyph-name="eacute" horiz-adv-x="1229" d="M652 -20Q538 -20 443 19T279 128T173 294T135 503V545Q135 676 177 779T288 954T446 1064T628 1102Q745 1102 832 1062T978 950T1064 781T1093 571V488H320Q323 414 348 349T418 236T525 159T662
+131Q761 131 838 171T966 277L1079 189Q1052 148 1012 111T918 44T798 -2T652 -20ZM628 950Q572 950 522 930T432 870T365 773T326 640H908V654Q905 708 888 761T836 856T751 924T628 950ZM718 1503H942L674 1237H534L718 1503Z" />
+<glyph unicode="&#xea;" glyph-name="ecircumflex" horiz-adv-x="1229" d="M652 -20Q538 -20 443 19T279 128T173 294T135 503V545Q135 676 177 779T288 954T446 1064T628 1102Q745 1102 832 1062T978 950T1064 781T1093 571V488H320Q323 414 348 349T418 236T525
+159T662 131Q761 131 838 171T966 277L1079 189Q1052 148 1012 111T918 44T798 -2T652 -20ZM628 950Q572 950 522 930T432 870T365 773T326 640H908V654Q905 708 888 761T836 856T751 924T628 950ZM924 1303V1278H770L620 1429L471 1278H319V1304L564 1543H676L924
+1303Z" />
+<glyph unicode="&#xeb;" glyph-name="edieresis" horiz-adv-x="1229" d="M652 -20Q538 -20 443 19T279 128T173 294T135 503V545Q135 676 177 779T288 954T446 1064T628 1102Q745 1102 832 1062T978 950T1064 781T1093 571V488H320Q323 414 348 349T418 236T525
+159T662 131Q761 131 838 171T966 277L1079 189Q1052 148 1012 111T918 44T798 -2T652 -20ZM628 950Q572 950 522 930T432 870T365 773T326 640H908V654Q905 708 888 761T836 856T751 924T628 950ZM293 1397Q293 1442 320 1472T402 1503Q456 1503 484 1473T512
+1397Q512 1352 484 1322T402 1292Q348 1292 321 1322T293 1397ZM723 1395Q723 1440 750 1471T832 1502Q886 1502 914 1471T942 1395Q942 1351 914 1321T832 1290Q778 1290 751 1320T723 1395Z" />
+<glyph unicode="&#xec;" glyph-name="igrave" horiz-adv-x="1229" d="M203 1082H756V160H1109V0H203V160H571V921H203V1082ZM1003 1218H853L605 1484H828L1003 1218Z" />
+<glyph unicode="&#xed;" glyph-name="iacute" horiz-adv-x="1229" d="M203 1082H756V160H1109V0H203V160H571V921H203V1082ZM764 1481H988L720 1215H580L764 1481Z" />
+<glyph unicode="&#xee;" glyph-name="icircumflex" horiz-adv-x="1229" d="M203 1082H756V160H1109V0H203V160H571V921H203V1082ZM970 1281V1256H816L666 1407L517 1256H365V1282L610 1521H722L970 1281Z" />
+<glyph unicode="&#xef;" glyph-name="idieresis" horiz-adv-x="1229" d="M203 1082H756V160H1109V0H203V160H571V921H203V1082ZM339 1375Q339 1420 366 1450T448 1481Q502 1481 530 1451T558 1375Q558 1330 530 1300T448 1270Q394 1270 367 1300T339 1375ZM769
+1373Q769 1418 796 1449T878 1480Q932 1480 960 1449T988 1373Q988 1329 960 1299T878 1268Q824 1268 797 1298T769 1373Z" />
+<glyph unicode="&#xf0;" glyph-name="eth" horiz-adv-x="1229" d="M845 1286Q949 1168 1007 1004T1066 633V571Q1066 433 1029 324T924 138T766 21T565 -20Q457 -20 367 17T211 121T109 276T73 467Q73 582 109 676T211 837T366 940T561 977Q649 977 725 943T859
+853Q836 963 791 1047T684 1194L418 1042L345 1141L584 1278Q543 1305 500 1325T410 1362L467 1521Q547 1499 618 1463T753 1375L983 1507L1056 1407L845 1286ZM881 635Q881 648 881 660T880 685Q863 712 835 737T766 782T675 813T561 825Q492 825 436 797T341
+719T280 605T259 467Q259 405 280 345T340 237T437 160T569 130Q641 130 698 162T796 251T859 390T881 571V635Z" />
+<glyph unicode="&#xf1;" glyph-name="ntilde" horiz-adv-x="1229" d="M340 1082L353 922Q412 1006 498 1053T690 1102Q775 1102 844 1078T963 1003T1038 874T1065 687V0H880V683Q880 755 864 805T817 886T740 931T635 945Q542 945 472 899T359 776V0H174V1082H340ZM949
+1522Q949 1477 934 1436T891 1365T826 1316T743 1298Q710 1298 684 1305T637 1322T596 1345T558 1368T516 1386T467 1393Q423 1393 395 1361T365 1282L261 1306Q261 1351 276 1392T319 1465T383 1516T467 1535Q509 1535 542 1520T607 1488T671 1455T743 1440Q764
+1440 783 1449T815 1473T837 1508T846 1552L949 1522Z" />
+<glyph unicode="&#xf2;" glyph-name="ograve" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122
+529V551ZM307 529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM951 1239H801L553 1505H776L951 1239Z" />
+<glyph unicode="&#xf3;" glyph-name="oacute" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122
+529V551ZM307 529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM712 1502H936L668 1236H528L712 1502Z" />
+<glyph unicode="&#xf4;" glyph-name="ocircumflex" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122
+529V551ZM307 529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM918 1302V1277H764L614 1428L465 1277H313V1303L558 1542H670L918
+1302Z" />
+<glyph unicode="&#xf5;" glyph-name="otilde" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122
+529V551ZM307 529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM958 1522Q958 1477 943 1436T900 1365T835 1316T752 1298Q719
+1298 693 1305T646 1322T605 1345T567 1368T525 1386T476 1393Q432 1393 404 1361T374 1282L270 1306Q270 1351 285 1392T328 1465T392 1516T476 1535Q518 1535 551 1520T616 1488T680 1455T752 1440Q773 1440 792 1449T824 1473T846 1508T855 1552L958 1522Z"
+/>
+<glyph unicode="&#xf6;" glyph-name="odieresis" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q728 1102 819 1060T974 943T1072 769T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q500 -20 409 22T254 138T156 312T122
+529V551ZM307 529Q307 449 326 377T383 249T480 162T615 130Q691 130 748 162T844 249T902 376T921 529V551Q921 630 902 702T844 830T748 917T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM287 1396Q287 1441 314 1471T396 1502Q450 1502 478 1472T506
+1396Q506 1351 478 1321T396 1291Q342 1291 315 1321T287 1396ZM717 1394Q717 1439 744 1470T826 1501Q880 1501 908 1470T936 1394Q936 1350 908 1320T826 1289Q772 1289 745 1319T717 1394Z" />
+<glyph unicode="&#xf7;" glyph-name="divide" horiz-adv-x="1229" d="M1113 600H115V784H1113V600ZM507 1097Q507 1142 534 1173T616 1204Q670 1204 698 1173T726 1097Q726 1052 698 1022T616 992Q562 992 535 1022T507 1097ZM505 282Q505 327 532 358T614 389Q668
+389 696 358T724 282Q724 238 696 208T614 177Q560 177 533 207T505 282Z" />
+<glyph unicode="&#xf8;" glyph-name="oslash" horiz-adv-x="1229" d="M122 551Q122 668 156 768T253 943T408 1059T613 1102Q670 1102 720 1092T816 1061L889 1209H1012L911 1004Q1005 929 1055 812T1106 551V529Q1106 412 1072 312T975 138T820 22T615 -20Q561
+-20 514 -11T423 16L349 -135H226L327 70Q228 143 175 262T122 529V551ZM307 529Q307 438 331 360T403 224L745 918Q716 933 684 941T613 950Q536 950 479 918T384 830T326 703T307 551V529ZM921 551Q921 636 899 712T832 845L492 156Q547 130 615 130Q691 130
+748 162T844 249T902 376T921 529V551Z" />
+<glyph unicode="&#xf9;" glyph-name="ugrave" horiz-adv-x="1229" d="M876 149Q822 69 741 25T554 -20Q469 -20 400 6T282 89T207 231T180 439V1082H365V437Q365 349 379 291T421 199T489 150T583 136Q696 136 765 182T869 306V1082H1055V0H887L876 149ZM950 1219H800L552
+1485H775L950 1219Z" />
+<glyph unicode="&#xfa;" glyph-name="uacute" horiz-adv-x="1229" d="M876 149Q822 69 741 25T554 -20Q469 -20 400 6T282 89T207 231T180 439V1082H365V437Q365 349 379 291T421 199T489 150T583 136Q696 136 765 182T869 306V1082H1055V0H887L876 149ZM711 1482H935L667
+1216H527L711 1482Z" />
+<glyph unicode="&#xfb;" glyph-name="ucircumflex" horiz-adv-x="1229" d="M876 149Q822 69 741 25T554 -20Q469 -20 400 6T282 89T207 231T180 439V1082H365V437Q365 349 379 291T421 199T489 150T583 136Q696 136 765 182T869 306V1082H1055V0H887L876 149ZM917
+1282V1257H763L613 1408L464 1257H312V1283L557 1522H669L917 1282Z" />
+<glyph unicode="&#xfc;" glyph-name="udieresis" horiz-adv-x="1229" d="M876 149Q822 69 741 25T554 -20Q469 -20 400 6T282 89T207 231T180 439V1082H365V437Q365 349 379 291T421 199T489 150T583 136Q696 136 765 182T869 306V1082H1055V0H887L876 149ZM287
+1376Q287 1421 314 1451T396 1482Q450 1482 478 1452T506 1376Q506 1331 478 1301T396 1271Q342 1271 315 1301T287 1376ZM717 1374Q717 1419 744 1450T826 1481Q880 1481 908 1450T936 1374Q936 1330 908 1300T826 1269Q772 1269 745 1299T717 1374Z" />
+<glyph unicode="&#xfd;" glyph-name="yacute" horiz-adv-x="1229" d="M572 384L620 253L950 1082H1157L608 -167Q589 -209 560 -256T488 -343T390 -410T261 -437Q248 -437 232 -436T200 -432T170 -426T147 -421L177 -270Q184 -270 196 -271T221 -273T246 -274T265
+-275Q303 -275 334 -254T390 -204T432 -144T460 -93L534 51L68 1082H275L572 384ZM731 1482H955L687 1216H547L731 1482Z" />
+<glyph unicode="&#xfe;" glyph-name="thorn" horiz-adv-x="1229" d="M1087 529Q1087 413 1060 313T980 138T848 22T668 -20Q568 -20 491 12T358 107V-416H173V1558H358V970Q413 1034 489 1068T665 1102Q769 1102 848 1061T980 947T1060 773T1087 550V529ZM902
+550Q902 629 885 700T833 826T742 913T610 945Q519 945 457 904T358 796V278Q396 213 457 172T612 131Q688 131 743 163T833 251T885 378T902 529V550Z" />
+<glyph unicode="&#xff;" glyph-name="ydieresis" horiz-adv-x="1229" d="M572 384L620 253L950 1082H1157L608 -167Q589 -209 560 -256T488 -343T390 -410T261 -437Q248 -437 232 -436T200 -432T170 -426T147 -421L177 -270Q184 -270 196 -271T221 -273T246 -274T265
+-275Q303 -275 334 -254T390 -204T432 -144T460 -93L534 51L68 1082H275L572 384ZM306 1376Q306 1421 333 1451T415 1482Q469 1482 497 1452T525 1376Q525 1331 497 1301T415 1271Q361 1271 334 1301T306 1376ZM736 1374Q736 1419 763 1450T845 1481Q899 1481 927
+1450T955 1374Q955 1330 927 1300T845 1269Q791 1269 764 1299T736 1374Z" />
+<glyph unicode="&#x2013;" glyph-name="endash" horiz-adv-x="1229" d="M1159 651H74V802H1159V651Z" />
+<glyph unicode="&#x2014;" glyph-name="emdash" horiz-adv-x="1229" d="M1164 651H79V802H1164V651Z" />
+<glyph unicode="&#x2018;" glyph-name="quoteleft" horiz-adv-x="1229" d="M492 1185Q492 1232 503 1284T537 1386T592 1483T666 1565L767 1493Q720 1422 697 1348T673 1188V1039H492V1185Z" />
+<glyph unicode="&#x2019;" glyph-name="quoteright" horiz-adv-x="1229" d="M736 1411Q736 1364 725 1312T691 1210T636 1113T562 1031L461 1103Q508 1174 531 1248T555 1408V1558H736V1411Z" />
+<glyph unicode="&#x201a;" glyph-name="quotesinglbase" horiz-adv-x="1229" d="M723 76Q723 29 712 -22T678 -124T623 -220T549 -303L444 -230Q491 -160 514 -86T538 74V225H723V76Z" />
+<glyph unicode="&#x201c;" glyph-name="quotedblleft" horiz-adv-x="1229" d="M329 1185Q329 1232 340 1284T374 1386T429 1483T503 1565L604 1493Q557 1422 534 1348T510 1188V1039H329V1185ZM654 1185Q654 1232 665 1284T699 1386T754 1483T828 1565L929 1493Q882
+1422 859 1348T835 1188V1039H654V1185Z" />
+<glyph unicode="&#x201d;" glyph-name="quotedblright" horiz-adv-x="1229" d="M576 1411Q576 1364 565 1312T531 1210T476 1113T402 1031L301 1103Q348 1174 371 1248T395 1408V1558H576V1411ZM908 1411Q908 1364 897 1312T863 1210T808 1113T734 1031L633 1103Q680
+1174 703 1248T727 1408V1558H908V1411Z" />
+<glyph unicode="&#x201e;" glyph-name="quotedblbase" horiz-adv-x="1229" d="M582 75Q582 28 571 -24T537 -126T482 -222T408 -305L303 -232Q350 -162 373 -88T397 72V223H582V75ZM872 75Q872 28 861 -24T827 -126T772 -222T698 -305L593 -232Q640 -162 663 -88T687
+72V223H872V75Z" />
+<glyph unicode="&#x2022;" glyph-name="bullet" horiz-adv-x="1229" d="M410 790Q410 833 424 869T465 932T528 973T613 988Q660 988 698 973T762 932T803 870T817 790V732Q817 689 803 653T763 591T699 550T614 535Q567 535 530 549T466 590T425 653T410 732V790Z" />
+<glyph unicode="&#x2039;" glyph-name="guilsinglleft" horiz-adv-x="1229" d="M574 550L832 153H691L396 541V560L691 949H832L574 550Z" />
+<glyph unicode="&#x203a;" glyph-name="guilsinglright" horiz-adv-x="1229" d="M538 949L832 560V541L538 152H396L654 550L396 949H538Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_Mono_400.ttf b/site/assets/fonts/Roboto_Mono_400.ttf
new file mode 100644
index 00000000..27363d17
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_400.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_400.woff b/site/assets/fonts/Roboto_Mono_400.woff
new file mode 100644
index 00000000..0ea5db89
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_400.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_400.woff2 b/site/assets/fonts/Roboto_Mono_400.woff2
new file mode 100644
index 00000000..6163de7b
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_400.woff2
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_500.eot b/site/assets/fonts/Roboto_Mono_500.eot
new file mode 100644
index 00000000..fb603779
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_500.eot
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_500.svg b/site/assets/fonts/Roboto_Mono_500.svg
new file mode 100644
index 00000000..34c55b65
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_500.svg
@@ -0,0 +1,387 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg">
+<defs >
+<font id="RobotoMono" horiz-adv-x="1231" ><font-face
+ font-family="Roboto Mono Medium"
+ units-per-em="2048"
+ panose-1="0 0 0 0 0 0 0 0 0 0"
+ ascent="2146"
+ descent="-555"
+ alphabetic="0" />
+<glyph unicode=" " glyph-name="space" horiz-adv-x="1229" />
+<glyph unicode="!" glyph-name="exclam" horiz-adv-x="1229" d="M716 493H490L482 1456H724L716 493ZM463 119Q463 177 499 215T604 254Q672 254 709 216T746 119Q746 62 709 25T604 -13Q536 -13 500 24T463 119Z" />
+<glyph unicode="&quot;" glyph-name="quotedbl" horiz-adv-x="1229" d="M495 1429L470 1030H312V1536H495V1429ZM886 1429L861 1030H703V1536H886V1429Z" />
+<glyph unicode="#" glyph-name="numbersign" horiz-adv-x="1229" d="M675 410H448L372 0H206L283 410H51V567H312L371 881H126V1040H401L479 1456H644L566 1040H793L871 1456H1038L960 1040H1167V881H930L871 567H1091V410H842L765 0H599L675 410ZM478 567H705L763
+881H536L478 567Z" />
+<glyph unicode="$" glyph-name="dollar" horiz-adv-x="1229" d="M879 380Q879 422 866 457T822 521T744 576T627 625Q529 657 450 696T315 788T229 910T199 1073Q199 1157 226 1226T302 1347T422 1431T580 1473V1691H740V1471Q827 1459 895 1423T1012 1327T1085
+1187T1111 1005H872Q872 1068 857 1118T814 1204T746 1259T653 1278Q597 1278 557 1263T490 1222T451 1158T438 1075Q438 1032 450 998T493 935T572 881T694 830Q793 796 871 757T1005 665T1089 543T1118 382Q1118 294 1090 225T1010 105T884 24T719 -16V-210H560V-17Q479
+-9 404 20T271 103T178 241T143 442H382Q382 364 404 313T463 232T544 189T635 176Q753 176 816 231T879 380Z" />
+<glyph unicode="%" glyph-name="percent" horiz-adv-x="1229" d="M39 1196Q39 1254 56 1305T109 1394T196 1454T317 1477Q386 1477 438 1455T525 1394T577 1305T595 1196V1119Q595 1062 578 1012T526 923T439 862T319 840Q250 840 198 862T110 922T57 1011T39
+1119V1196ZM207 1119Q207 1092 213 1068T232 1025T267 995T319 983Q349 983 370 994T403 1024T422 1067T428 1119V1196Q428 1223 422 1247T403 1290T369 1320T317 1332Q287 1332 266 1321T232 1291T213 1248T207 1196V1119ZM631 337Q631 394 648 445T701 534T788
+595T909 617Q978 617 1030 595T1117 535T1169 445T1187 337V259Q1187 202 1170 151T1118 62T1032 2T911 -20Q842 -20 790 2T702 62T649 151T631 259V337ZM799 259Q799 232 805 208T824 165T859 135T911 123Q943 123 964 134T997 164T1014 207T1019 259V337Q1019
+363 1013 387T994 430T960 460T909 472Q878 472 857 461T823 431T805 388T799 337V259ZM411 196L284 260L845 1288L972 1224L411 196Z" />
+<glyph unicode="&amp;" glyph-name="ampersand" horiz-adv-x="1229" d="M88 373Q88 440 107 495T160 599T244 690T353 775Q290 865 255 945T219 1111Q219 1198 245 1266T321 1381T441 1452T597 1477Q671 1477 731 1451T834 1380T900 1276T923 1152Q923 1100 907
+1056T863 972T797 899T715 832L630 761L901 409Q933 469 951 540T970 692H1179Q1179 558 1143 445T1031 240L1216 0H937L867 92Q792 37 708 8T523 -21Q422 -21 342 8T205 90T118 215T88 373ZM529 170Q589 170 646 189T754 243L466 624L450 611Q409 576 385 543T348
+479T331 424T327 381Q327 337 340 299T379 232T442 187T529 170ZM445 1112Q445 1063 466 1012T525 904L628 984Q659 1005 676 1026T702 1069T712 1109T714 1144Q714 1170 706 1195T683 1240T646 1272T597 1284Q559 1284 531 1270T483 1233T455 1178T445 1112Z"
+/>
+<glyph unicode="&apos;" glyph-name="quotesingle" horiz-adv-x="1229" d="M665 1412L640 1022H473V1536H665V1412Z" />
+<glyph unicode="(" glyph-name="parenleft" horiz-adv-x="1229" d="M348 592Q348 737 371 863T432 1096T520 1290T626 1445T738 1559T846 1632L894 1491Q837 1447 779 1372T674 1187T598 930T568 594V574Q568 386 597 239T674 -20T779 -207T894 -329L846 -463Q795
+-438 739 -391T627 -277T521 -122T432 72T371 305T348 576V592Z" />
+<glyph unicode=")" glyph-name="parenright" horiz-adv-x="1229" d="M868 576Q868 433 845 308T783 76T693 -119T586 -275T472 -390T363 -463L316 -329Q372 -286 430 -210T535 -23T612 237T642 574V594Q642 782 611 930T531 1191T426 1379T316 1498L363 1632Q414
+1606 471 1559T585 1444T693 1288T783 1092T845 860T868 592V576Z" />
+<glyph unicode="*" glyph-name="asterisk" horiz-adv-x="1229" d="M538 645L154 740L216 936L585 776L553 1203H758L723 770L1089 926L1151 727L761 637L1022 313L855 190L645 559L437 201L268 318L538 645Z" />
+<glyph unicode="+" glyph-name="plus" horiz-adv-x="1229" d="M725 800H1107V575H725V146H492V575H109V800H492V1206H725V800Z" />
+<glyph unicode="," glyph-name="comma" horiz-adv-x="1229" d="M681 33Q681 -20 668 -76T629 -186T568 -288T489 -375L354 -304Q397 -232 420 -153T443 25V234H682L681 33Z" />
+<glyph unicode="-" glyph-name="hyphen" horiz-adv-x="1229" d="M1050 542H249V735H1050V542Z" />
+<glyph unicode="." glyph-name="period" horiz-adv-x="1229" d="M454 143Q454 178 465 209T499 262T553 298T628 311Q670 311 702 298T757 263T791 209T803 143Q803 108 792 79T758 27T703 -7T628 -20Q586 -20 554 -8T499 27T466 78T454 143Z" />
+<glyph unicode="/" glyph-name="slash" horiz-adv-x="1229" d="M451 -125H233L809 1456H1027L451 -125Z" />
+<glyph unicode="0" glyph-name="zero" horiz-adv-x="1229" d="M1090 554Q1090 413 1058 306T964 126T815 16T617 -21Q508 -21 420 16T271 125T176 305T142 554V902Q142 1043 175 1150T269 1330T418 1440T615 1477Q725 1477 813 1440T962 1331T1057 1151T1090 902V554ZM382
+645L848 992Q840 1141 783 1212T615 1284Q498 1284 440 1201T382 946V645ZM850 804L384 457Q402 171 617 171Q850 171 850 512V804Z" />
+<glyph unicode="1" glyph-name="one" horiz-adv-x="1229" d="M802 0H561V1161L189 1025V1235L787 1456H802V0Z" />
+<glyph unicode="2" glyph-name="two" horiz-adv-x="1229" d="M1083 0H105V165L579 682Q639 749 679 800T744 895T778 975T788 1050Q788 1100 773 1142T729 1215T660 1263T569 1280Q439 1280 378 1209T316 1013H76Q76 1108 110 1192T209 1340T366 1439T573 1476Q681
+1476 765 1445T907 1359T995 1227T1025 1060Q1025 992 1003 929T942 802T849 676T732 545L402 191H1083V0Z" />
+<glyph unicode="3" glyph-name="three" horiz-adv-x="1229" d="M404 836H547Q610 836 657 852T735 899T781 970T797 1062Q797 1168 742 1225T574 1283Q525 1283 484 1269T412 1227T364 1162T347 1076H108Q108 1162 142 1235T238 1361T385 1445T571 1476Q674 1476
+760 1450T907 1371T1002 1242T1036 1062Q1036 1019 1023 974T984 887T917 809T821 747Q887 724 932 688T1005 608T1045 512T1057 408Q1057 306 1020 226T918 92T763 9T571 -20Q476 -20 390 6T239 83T134 211T95 389H334Q334 341 351 301T399 232T475 187T574 171Q688
+171 753 231T819 407Q819 470 800 515T746 590T660 634T547 648H404V836Z" />
+<glyph unicode="4" glyph-name="four" horiz-adv-x="1229" d="M943 517H1126V324H943V0H704V324H72L66 469L696 1456H943V517ZM312 517H704V1137L678 1090L312 517Z" />
+<glyph unicode="5" glyph-name="five" horiz-adv-x="1229" d="M195 722L274 1456H1053V1242H472L435 897Q470 917 528 936T662 955Q765 955 846 921T984 824T1070 670T1100 465Q1100 364 1072 276T985 122T837 18T627 -20Q536 -20 453 6T304 84T198 213T150 392H381Q395
+284 460 228T627 171Q686 171 730 193T803 254T846 349T861 471Q861 534 845 587T795 678T713 739T600 761Q557 761 526 755T471 737T426 709T386 675L195 722Z" />
+<glyph unicode="6" glyph-name="six" horiz-adv-x="1229" d="M867 1462V1258H851Q728 1258 640 1224T493 1130T404 993T365 825Q389 852 420 876T489 920T571 951T665 963Q772 963 850 922T980 814T1056 659T1081 477Q1081 372 1050 282T958 124T810 19T610 -20Q491
+-20 400 27T248 152T154 334T122 550V643Q122 750 138 852T189 1046T281 1213T417 1345T602 1431T840 1462H867ZM608 770Q567 770 529 758T459 723T402 668T362 598V530Q362 441 381 375T435 263T513 196T607 173Q663 173 707 195T781 256T828 350T844 470Q844
+531 829 585T784 681T710 746T608 770Z" />
+<glyph unicode="7" glyph-name="seven" horiz-adv-x="1229" d="M1096 1323L513 0H261L844 1257H89V1456H1096V1323Z" />
+<glyph unicode="8" glyph-name="eight" horiz-adv-x="1229" d="M1063 1072Q1063 965 1008 882T858 750Q912 725 955 689T1030 608T1077 510T1094 396Q1094 295 1057 217T956 87T806 7T620 -20Q519 -20 432 7T280 86T180 217T143 396Q143 456 159 510T206 609T280
+690T378 750Q284 798 230 881T176 1072Q176 1169 209 1244T300 1371T440 1449T617 1476Q713 1476 794 1450T936 1372T1029 1245T1063 1072ZM855 408Q855 463 838 508T790 585T716 634T618 652Q564 652 521 635T447 585T400 508T383 408Q383 296 446 234T620 171Q672
+171 715 187T790 234T838 308T855 408ZM824 1064Q824 1112 809 1152T767 1222T702 1267T617 1283Q570 1283 533 1268T469 1225T429 1156T415 1064Q415 1014 429 974T470 905T535 860T620 844Q666 844 704 860T768 904T809 973T824 1064Z" />
+<glyph unicode="9" glyph-name="nine" horiz-adv-x="1229" d="M351 198Q481 198 569 228T713 313T794 440T828 600Q806 575 778 554T714 516T636 490T542 480Q435 480 356 520T225 628T148 784T122 968Q122 1073 153 1165T244 1327T391 1436T591 1476Q712 1476
+801 1428T950 1299T1039 1111T1069 884V806Q1069 702 1056 602T1010 411T924 244T791 113T603 26T352 -6H333V198H351ZM591 669Q632 669 669 682T738 718T793 773T831 843V922Q831 1010 812 1077T759 1190T683 1259T591 1282Q535 1282 492 1258T420 1193T375 1095T359
+975Q359 915 373 860T415 763T488 695T591 669Z" />
+<glyph unicode=":" glyph-name="colon" horiz-adv-x="1229" d="M485 143Q485 178 496 209T530 262T584 298T659 311Q701 311 733 298T788 263T822 209T834 143Q834 108 823 79T789 27T734 -7T659 -20Q617 -20 585 -8T530 27T497 78T485 143ZM485 994Q485 1029
+496 1060T530 1113T584 1149T659 1162Q701 1162 733 1149T788 1114T822 1060T834 994Q834 959 823 930T789 878T734 844T659 831Q617 831 585 843T530 878T497 929T485 994Z" />
+<glyph unicode=";" glyph-name="semicolon" horiz-adv-x="1229" d="M500 994Q500 1029 511 1060T545 1113T599 1149T674 1162Q716 1162 748 1149T803 1114T837 1060T849 994Q849 959 838 930T804 878T749 844T674 831Q632 831 600 843T545 878T512 929T500 994ZM784
+33Q784 -20 771 -76T732 -186T671 -288T592 -375L457 -304Q500 -232 523 -153T546 25V234H785L784 33Z" />
+<glyph unicode="&lt;" glyph-name="less" horiz-adv-x="1229" d="M461 656L396 633L461 611L1029 407V167L163 540V730L1029 1102V863L461 656Z" />
+<glyph unicode="=" glyph-name="equal" horiz-adv-x="1229" d="M1085 746H165V945H1085V746ZM1085 322H165V521H1085V322Z" />
+<glyph unicode="&gt;" glyph-name="greater" horiz-adv-x="1229" d="M169 867V1103L1062 731V540L169 167V404L769 617L834 637L769 658L169 867Z" />
+<glyph unicode="?" glyph-name="question" horiz-adv-x="1229" d="M502 426Q502 497 506 545T524 630T566 699T643 774Q676 807 711 842T774 916T821 996T840 1081Q840 1180 785 1228T626 1277Q586 1277 548 1267T479 1234T431 1178T412 1095H172Q173 1188 208
+1259T305 1379T448 1452T626 1477Q734 1477 818 1452T960 1377T1049 1256T1080 1091Q1080 1020 1056 959T992 843T903 742T806 652Q776 622 760 598T737 549T728 495T727 426H502ZM471 120Q471 179 507 217T612 256Q680 256 717 218T755 120Q755 63 718 26T612
+-12Q544 -12 508 25T471 120Z" />
+<glyph unicode="@" glyph-name="at" horiz-adv-x="1229" d="M1164 807Q1161 719 1144 631T1093 473T1005 359T876 315Q821 315 780 342T718 421Q683 372 637 344T536 316Q483 316 443 341T377 415T341 530T338 682Q348 790 377 876T451 1022T556 1114T688 1147Q761
+1147 808 1122T883 1064L840 549Q837 514 841 494T854 463T874 450T897 447Q927 447 951 474T993 548T1020 661T1030 804Q1034 928 1011 1022T938 1179T818 1275T655 1307Q554 1307 473 1261T333 1131T242 935T205 686Q194 414 299 275T607 136Q669 136 731 152T839
+196L871 67Q845 48 813 34T745 11T674 -3T604 -8Q460 -8 355 41T182 181T83 400T56 686Q61 843 107 982T232 1225T421 1389T661 1450Q779 1450 874 1405T1035 1275T1134 1072T1164 807ZM490 682Q485 572 508 519T577 465Q610 465 642 481T701 544V549L740 1000Q720
+1006 699 1006Q605 1006 556 922T490 682Z" />
+<glyph unicode="A" glyph-name="A" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543Z" />
+<glyph unicode="B" glyph-name="B" horiz-adv-x="1229" d="M148 0V1456H613Q718 1455 809 1432T969 1361T1077 1239T1115 1062Q1114 954 1055 878T896 759Q957 742 1004 711T1084 635T1133 538T1150 424Q1150 320 1112 241T1007 109T847 29T647 0H148ZM389 660V196H651Q708
+197 755 213T836 259T889 330T909 424Q909 479 892 522T843 596T765 642T662 660H389ZM389 843H619Q673 844 719 857T800 897T854 962T874 1051Q874 1106 855 1145T802 1209T720 1245T617 1258H389V843Z" />
+<glyph unicode="C" glyph-name="C" horiz-adv-x="1229" d="M1132 447Q1122 342 1082 256T976 108T821 13T621 -21Q488 -21 389 30T224 170T125 376T91 628V827Q92 960 126 1078T226 1285T392 1425T622 1477Q737 1477 827 1443T981 1347T1083 1196T1132 998H892Q886
+1062 868 1114T817 1204T737 1262T622 1282Q541 1282 486 1245T397 1146T349 1001T334 829V628Q334 534 348 452T396 307T484 209T621 173Q747 173 813 244T891 447H1132Z" />
+<glyph unicode="D" glyph-name="D" horiz-adv-x="1229" d="M137 0V1456H493Q643 1455 762 1406T965 1268T1094 1056T1140 782V672Q1139 522 1094 399T966 187T765 49T500 0H137ZM381 1262V193H500Q599 194 673 228T796 325T870 476T896 672V784Q895 889 871 976T796
+1126T671 1225T493 1262H381Z" />
+<glyph unicode="E" glyph-name="E" horiz-adv-x="1229" d="M1002 653H403V196H1104V0H162V1456H1097V1258H403V847H1002V653Z" />
+<glyph unicode="F" glyph-name="F" horiz-adv-x="1229" d="M1021 620H410V0H168V1456H1111V1258H410V817H1021V620Z" />
+<glyph unicode="G" glyph-name="G" horiz-adv-x="1229" d="M1126 182Q1089 138 1039 101T927 37T793 -5T638 -20Q507 -20 406 31T236 170T130 379T92 637V817Q92 955 128 1075T231 1284T397 1424T621 1476Q734 1476 823 1445T976 1356T1077 1213T1125 1025H889Q880
+1084 861 1131T810 1211T733 1261T625 1279Q549 1279 495 1242T404 1141T351 994T333 819V637Q334 546 351 463T406 317T503 215T645 176Q679 176 715 180T783 195T843 220T887 259L889 550H626V737H1124L1126 182Z" />
+<glyph unicode="H" glyph-name="H" horiz-adv-x="1229" d="M1092 0H858V650H363V0H130V1456H363V847H858V1456H1092V0Z" />
+<glyph unicode="I" glyph-name="I" horiz-adv-x="1229" d="M181 1456H1047V1257H732V198H1047V0H181V198H489V1257H181V1456Z" />
+<glyph unicode="J" glyph-name="J" horiz-adv-x="1229" d="M832 1456H1075L1076 445Q1074 343 1037 258T934 111T780 15T589 -20Q482 -20 395 9T245 95T145 235T103 425H345Q350 307 411 242T589 176Q644 176 689 197T765 255T814 340T832 445V1456Z" />
+<glyph unicode="K" glyph-name="K" horiz-adv-x="1229" d="M546 633L390 458V0H147V1456H390V774L523 947L906 1456H1199L704 817L1224 0H936L546 633Z" />
+<glyph unicode="L" glyph-name="L" horiz-adv-x="1229" d="M412 196H1106V0H172V1456H412V196Z" />
+<glyph unicode="M" glyph-name="M" horiz-adv-x="1229" d="M433 1456L614 827L814 1456H1112V0H886V500L900 1138L676 440H550L347 1114L361 500V0H135V1456H433Z" />
+<glyph unicode="N" glyph-name="N" horiz-adv-x="1229" d="M1091 0H849L375 1002L374 0H131V1456H374L846 457L848 1456H1091V0Z" />
+<glyph unicode="O" glyph-name="O" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004 1278T1103
+1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822Z" />
+<glyph unicode="P" glyph-name="P" horiz-adv-x="1229" d="M407 558V0H167V1456H654Q763 1455 855 1424T1015 1334T1119 1192T1157 1003Q1157 901 1120 819T1015 679T856 590T654 558H407ZM407 752H654Q715 752 763 770T845 822T897 901T915 1001Q915 1058 897
+1106T845 1188T763 1242T654 1262H407V752Z" />
+<glyph unicode="Q" glyph-name="Q" horiz-adv-x="1229" d="M1146 622Q1146 548 1133 476T1096 337T1034 212T947 107L1195 -106L1036 -251L752 -2Q720 -10 686 -15T613 -20Q480 -20 382 35T218 181T121 387T88 622V808Q89 895 103 978T146 1136T218 1273T319 1380T450
+1451T613 1476Q747 1476 847 1421T1013 1274T1112 1060T1146 808V622ZM904 810Q904 895 891 978T844 1126T755 1231T613 1272Q528 1272 474 1232T387 1126T343 977T330 810V622Q330 548 342 471T387 329T473 223T613 182Q699 182 755 223T844 328T890 470T904 622V810Z"
+/>
+<glyph unicode="R" glyph-name="R" horiz-adv-x="1229" d="M637 568H393V0H153V1456H613Q725 1455 819 1428T983 1346T1090 1209T1128 1017Q1128 943 1109 883T1054 773T970 687T862 624L1170 12V0H914L637 568ZM393 762H614Q674 762 724 779T810 827T867 904T887
+1009Q887 1070 868 1117T814 1196T727 1244T613 1262H393V762Z" />
+<glyph unicode="S" glyph-name="S" horiz-adv-x="1229" d="M905 375Q905 427 884 466T823 536T729 588T610 630Q524 655 441 692T292 782T186 907T145 1073Q145 1167 185 1241T294 1368T452 1448T640 1476Q745 1476 836 1444T996 1353T1105 1213T1146 1032H906Q901
+1087 883 1132T831 1211T749 1262T638 1280Q584 1280 538 1266T459 1225T406 1160T387 1074Q387 1023 413 986T481 920T577 872T691 834Q782 807 865 768T1011 673T1111 544T1148 377Q1148 280 1107 206T997 82T837 7T647 -19Q580 -19 514 -6T388 33T275 97T185
+186T124 299T101 437H342Q347 371 371 322T435 241T528 192T647 175Q701 175 748 188T830 226T885 289T905 375Z" />
+<glyph unicode="T" glyph-name="T" horiz-adv-x="1229" d="M1180 1258H733V0H494V1258H50V1456H1180V1258Z" />
+<glyph unicode="U" glyph-name="U" horiz-adv-x="1229" d="M1096 1456L1098 481Q1097 369 1062 277T964 118T812 16T611 -20Q503 -20 415 16T264 117T166 275T130 481L132 1456H368L370 481Q371 414 387 358T433 262T507 199T611 176Q671 176 717 198T794 261T841
+358T858 481L860 1456H1096Z" />
+<glyph unicode="V" glyph-name="V" horiz-adv-x="1229" d="M612 365L915 1456H1177L727 0H498L49 1456H311L612 365Z" />
+<glyph unicode="W" glyph-name="W" horiz-adv-x="1229" d="M867 501L971 1456H1195L1009 0H779L620 968L463 0H234L48 1456H272L376 506L525 1456H713L867 501Z" />
+<glyph unicode="X" glyph-name="X" horiz-adv-x="1229" d="M624 914L904 1456H1187L772 734L1197 0H916L628 551L340 0H56L481 734L67 1456H348L624 914Z" />
+<glyph unicode="Y" glyph-name="Y" horiz-adv-x="1229" d="M616 760L919 1456H1187L733 523L731 0H498L496 529L45 1456H313L616 760Z" />
+<glyph unicode="Z" glyph-name="Z" horiz-adv-x="1229" d="M383 196H1118V0H101L100 162L812 1258H106V1456H1095L1096 1298L383 196Z" />
+<glyph unicode="[" glyph-name="bracketleft" horiz-adv-x="1229" d="M837 1489H643V-136H837V-323H410V1677H837V1489Z" />
+<glyph unicode="\" glyph-name="backslash" horiz-adv-x="1229" d="M233 1456H452L1029 -125H809L233 1456Z" />
+<glyph unicode="]" glyph-name="bracketright" horiz-adv-x="1229" d="M400 1677H828V-323H400V-136H595V1489H400V1677Z" />
+<glyph unicode="^" glyph-name="asciicircum" horiz-adv-x="1229" d="M425 683H220L539 1456H708L1026 683H822L642 1141L622 1225L602 1141L425 683Z" />
+<glyph unicode="_" glyph-name="underscore" horiz-adv-x="1229" d="M1068 -188H150V0H1068V-188Z" />
+<glyph unicode="`" glyph-name="grave" horiz-adv-x="1229" d="M858 1205H668L409 1478H682L858 1205Z" />
+<glyph unicode="a" glyph-name="a" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444 876T408
+829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417 218T474
+185T557 173Z" />
+<glyph unicode="b" glyph-name="b" horiz-adv-x="1229" d="M1106 529Q1106 409 1079 309T997 135T864 21T682 -20Q583 -20 511 15T386 116L375 0H158V1536H397V979Q448 1038 517 1070T680 1102Q784 1102 863 1062T997 948T1078 774T1106 550V529ZM867 550Q867
+622 855 686T814 800T737 877T619 906Q534 906 480 867T397 765V318Q426 255 480 215T621 175Q689 175 736 202T812 278T854 390T867 529V550Z" />
+<glyph unicode="c" glyph-name="c" horiz-adv-x="1229" d="M628 170Q672 170 713 184T787 223T837 282T855 356H1081Q1082 278 1046 210T947 90T802 9T630 -21Q505 -21 411 22T254 139T160 312T128 522V558Q128 669 159 768T254 941T411 1058T629 1102Q727 1102
+810 1072T954 988T1048 859T1081 695H855Q856 739 840 778T792 846T719 892T627 909Q552 909 502 879T422 799T380 686T367 558V522Q367 456 379 393T422 281T502 201T628 170Z" />
+<glyph unicode="d" glyph-name="d" horiz-adv-x="1229" d="M123 549Q123 672 153 774T239 949T374 1062T553 1102Q642 1102 710 1072T829 984V1536H1069V0H852L840 110Q789 47 718 14T551 -20Q453 -20 374 21T240 136T154 309T123 528V549ZM362 528Q362 455 376
+391T420 279T498 204T612 176Q694 176 746 211T829 307V777Q798 836 746 871T614 906Q547 906 500 878T422 801T377 687T362 549V528Z" />
+<glyph unicode="e" glyph-name="e" horiz-adv-x="1229" d="M659 -20Q539 -20 440 20T271 130T161 295T122 502V543Q122 672 163 775T274 951T436 1063T631 1102Q749 1102 839 1063T989 952T1081 782T1112 567V466H362Q368 403 393 349T458 256T553 194T673 171Q764
+171 840 207T965 306L1095 184Q1069 145 1028 109T932 44T809 -2T659 -20ZM630 909Q580 909 537 891T460 839T403 755T369 642H876V660Q873 708 858 752T812 832T737 888T630 909Z" />
+<glyph unicode="f" glyph-name="f" horiz-adv-x="1229" d="M427 0V904H136V1082H427V1149Q427 1254 458 1334T549 1469T693 1552T882 1581Q951 1581 1013 1571T1144 1546L1123 1356Q1084 1367 1032 1375T915 1384Q786 1384 727 1323T667 1149V1082H1069V904H667V0H427Z" />
+<glyph unicode="g" glyph-name="g" horiz-adv-x="1229" d="M124 549Q124 672 154 774T242 949T379 1062T560 1102Q657 1102 728 1069T850 974L860 1082H1077V29Q1077 -80 1041 -164T941 -307T786 -395T588 -426Q544 -426 489 -417T377 -386T270 -330T183 -246L297
+-103Q358 -175 428 -206T574 -237Q634 -237 683 -220T766 -170T818 -89T837 23V96Q786 40 718 10T558 -20Q459 -20 379 21T242 136T155 309T124 528V549ZM364 528Q364 455 379 391T425 279T504 204T619 176Q702 176 754 210T837 303V781Q805 838 753 872T621 906Q555
+906 507 878T427 801T380 687T364 549V528Z" />
+<glyph unicode="h" glyph-name="h" horiz-adv-x="1229" d="M394 932Q450 1012 530 1057T710 1102Q794 1102 864 1077T985 1000T1063 865T1091 667V0H852V669Q852 731 837 775T793 849T723 892T630 906Q552 906 492 874T394 786V0H155V1536H394V932Z" />
+<glyph unicode="i" glyph-name="i" horiz-adv-x="1229" d="M207 1082H784V198H1105V0H207V198H543V883H207V1082ZM523 1362Q523 1418 559 1454T660 1491Q726 1491 762 1455T799 1362Q799 1306 763 1270T660 1233Q595 1233 559 1269T523 1362Z" />
+<glyph unicode="j" glyph-name="j" horiz-adv-x="1229" d="M303 1082H857V-17Q857 -123 827 -201T741 -332T602 -411T414 -437Q363 -437 315 -434T219 -420L233 -226Q247 -229 270 -231T318 -235T365 -237T400 -238Q445 -238 484 -227T553 -191T599 -123T616 -17V883H303V1082ZM595
+1362Q595 1418 631 1454T732 1491Q798 1491 834 1455T871 1362Q871 1306 835 1270T732 1233Q667 1233 631 1269T595 1362Z" />
+<glyph unicode="k" glyph-name="k" horiz-adv-x="1229" d="M525 473L395 349V0H156V1536H395V631L496 744L814 1082H1102L684 632L1166 0H868L525 473Z" />
+<glyph unicode="l" glyph-name="l" horiz-adv-x="1229" d="M199 1536H784V198H1114V0H199V198H543V1337H199V1536Z" />
+<glyph unicode="m" glyph-name="m" horiz-adv-x="1229" d="M293 1082L299 968Q331 1031 385 1066T515 1102Q584 1102 629 1075T698 989Q730 1041 780 1071T901 1102Q954 1102 998 1085T1074 1029T1123 932T1141 790V0H919V792Q919 857 892 881T821 906Q780 906
+756 887T721 835V0H504V792Q504 855 479 880T409 906Q367 906 343 888T306 839V0H84V1082H293Z" />
+<glyph unicode="n" glyph-name="n" horiz-adv-x="1229" d="M371 1082L386 932Q445 1013 528 1057T714 1102Q797 1102 865 1078T983 1003T1060 871T1088 676V0H849V672Q849 739 834 784T790 856T720 894T627 906Q548 906 490 871T395 776V0H155V1082H371Z" />
+<glyph unicode="o" glyph-name="o" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111 530V551ZM350
+530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530Z" />
+<glyph unicode="p" glyph-name="p" horiz-adv-x="1229" d="M1102 530Q1102 412 1074 311T992 137T860 22T679 -20Q587 -20 517 10T395 96V-416H156V1082H376L386 973Q438 1035 509 1068T676 1102Q779 1102 858 1062T992 950T1074 776T1102 551V530ZM863 551Q863
+624 848 688T802 801T723 878T609 906Q529 906 477 872T395 781V297Q425 241 476 206T611 171Q677 171 725 200T803 278T848 392T863 530V551Z" />
+<glyph unicode="q" glyph-name="q" horiz-adv-x="1229" d="M124 550Q124 673 153 775T237 949T373 1062T557 1102Q651 1102 721 1070T843 977L855 1082H1069V-416H830V91Q779 37 711 9T555 -20Q454 -20 374 21T239 136T154 310T124 529V550ZM363 529Q363 456 377
+391T423 277T502 200T616 171Q696 171 747 204T830 295V791Q798 845 747 877T618 909Q551 909 503 881T424 803T378 689T363 550V529Z" />
+<glyph unicode="r" glyph-name="r" horiz-adv-x="1229" d="M898 1102Q927 1102 956 1100T1011 1094T1060 1086T1097 1075L1063 840Q953 865 846 865Q726 865 653 815T544 676V0H305V1082H529L540 913Q606 1001 696 1051T898 1102Z" />
+<glyph unicode="s" glyph-name="s" horiz-adv-x="1229" d="M860 293Q860 320 849 342T810 383T732 418T605 449Q509 469 432 497T299 564T213 654T183 774Q183 840 214 899T302 1004T442 1075T627 1102Q735 1102 820 1075T965 1002T1055 892T1087 757H848Q848
+789 834 818T792 871T723 907T627 921Q573 921 534 910T468 880T430 836T417 784Q417 757 426 735T462 695T534 663T653 635Q755 617 836 590T975 524T1064 432T1095 306Q1095 234 1062 174T968 71T822 3T630 -21Q512 -21 422 10T271 93T179 209T147 341H379Q382
+291 405 256T464 200T544 169T633 159Q744 159 802 196T860 293Z" />
+<glyph unicode="t" glyph-name="t" horiz-adv-x="1229" d="M634 1344V1082H1043V904H634V385Q634 324 648 284T689 220T750 187T827 177Q858 177 891 180T956 188T1014 198T1059 209L1086 44Q1060 29 1024 17T944 -4T855 -16T763 -21Q682 -21 615 1T499 71T423
+193T395 372V904H131V1082H395V1344H634Z" />
+<glyph unicode="u" glyph-name="u" horiz-adv-x="1229" d="M849 148Q794 68 716 24T540 -21Q456 -21 387 5T268 87T191 231T163 442V1082H402V440Q402 362 413 311T449 230T509 188T595 175Q691 175 750 212T839 314V1082H1079V0H863L849 148Z" />
+<glyph unicode="v" glyph-name="v" horiz-adv-x="1229" d="M598 302L615 222L633 301L895 1082H1142L718 0H512L84 1082H331L598 302Z" />
+<glyph unicode="w" glyph-name="w" horiz-adv-x="1229" d="M360 356L367 287L378 356L533 1082H690L847 348L858 276L867 355L972 1082H1188L977 0H776L612 724L610 737L608 725L447 0H247L36 1082H251L360 356Z" />
+<glyph unicode="x" glyph-name="x" horiz-adv-x="1229" d="M628 716L886 1082H1152L770 549L1164 0H899L631 379L364 0H97L491 549L109 1082H374L628 716Z" />
+<glyph unicode="y" glyph-name="y" horiz-adv-x="1229" d="M584 433L625 303L929 1082H1194L646 -164Q628 -205 599 -252T528 -341T426 -409T288 -437Q255 -437 222 -432T162 -419L196 -233Q203 -233 215 -233T240 -235T264 -236T283 -237Q318 -237 345 -217T394
+-170T428 -118T448 -83L519 51L59 1082H323L584 433Z" />
+<glyph unicode="z" glyph-name="z" horiz-adv-x="1229" d="M455 191H1109V0H140V158L758 888H150V1082H1080V929L455 191Z" />
+<glyph unicode="{" glyph-name="braceleft" horiz-adv-x="1229" d="M960 -360Q894 -360 840 -340T744 -286T671 -206T620 -108T590 -2T580 106V269Q580 408 515 469T315 530V706Q449 706 514 767T580 969V1132Q580 1184 588 1239T616 1346T664 1443T736 1523T834
+1577T960 1597L987 1458Q921 1459 884 1430T830 1353T810 1248T806 1132V969Q806 850 749 758T572 618Q693 570 749 479T806 269V106Q806 49 813 -9T841 -115T897 -191T987 -220L960 -360Z" />
+<glyph unicode="|" glyph-name="bar" horiz-adv-x="1229" d="M701 -410H534V1456H701V-410Z" />
+<glyph unicode="}" glyph-name="braceright" horiz-adv-x="1229" d="M327 -220Q382 -221 417 -192T472 -115T500 -10T508 106V269Q508 386 563 477T735 618Q618 667 563 759T508 969V1132Q508 1189 505 1247T484 1353T430 1429T327 1458L354 1597Q424 1597 479
+1577T577 1523T649 1444T698 1346T725 1239T734 1132V969Q734 829 799 768T999 706V530Q864 530 799 469T734 269V106Q734 54 725 -1T695 -108T644 -206T571 -286T474 -340T354 -360L327 -220Z" />
+<glyph unicode="~" glyph-name="asciitilde" horiz-adv-x="1229" d="M1163 746Q1163 672 1141 607T1078 493T981 416T857 388Q812 388 772 396T695 422T620 464T543 525Q491 567 444 590T347 614Q317 614 290 600T243 563T210 510T197 445L36 465Q36 539 58 603T120
+714T217 787T341 814Q431 814 505 779T656 679Q710 634 755 612T850 589Q880 589 907 603T954 642T986 699T998 765L1163 746Z" />
+<glyph unicode="&#xa1;" glyph-name="exclamdown" horiz-adv-x="1229" d="M514 584H740L748 -377H505L514 584ZM771 966Q771 909 734 872T628 834Q560 834 524 871T487 966Q487 1024 523 1063T628 1103Q696 1103 733 1064T771 966Z" />
+<glyph unicode="&#xa2;" glyph-name="cent" horiz-adv-x="1229" d="M642 170Q686 170 727 184T801 223T851 282T869 356H1095Q1096 288 1068 227T990 117T875 34T733 -13V-245H533V-11Q435 7 362 56T240 176T167 335T142 522V558Q142 656 166 744T240 904T362
+1025T533 1092V1318H733V1095Q814 1082 880 1048T995 961T1069 842T1095 695H869Q870 739 854 778T806 846T733 892T641 909Q566 909 516 879T436 799T394 686T381 558V522Q381 456 393 393T436 281T516 201T642 170Z" />
+<glyph unicode="&#xa3;" glyph-name="sterling" horiz-adv-x="1229" d="M514 604L521 418Q521 352 506 295T458 193H1162V0H115V193H195Q220 199 236 222T262 277T275 345T279 412L273 604H112V794H267L259 1040Q259 1143 293 1224T387 1361T530 1446T712 1476Q814
+1476 894 1448T1028 1367T1112 1241T1141 1077H906Q906 1133 889 1172T844 1236T778 1272T700 1283Q658 1283 622 1267T558 1221T515 1145T499 1040L507 794H819V604H514Z" />
+<glyph unicode="&#xa4;" glyph-name="currency" horiz-adv-x="1229" d="M914 78Q853 30 779 5T624 -20Q543 -20 470 5T334 76L234 -26L93 118L201 228Q166 288 147 359T128 508Q128 591 149 665T209 801L93 920L234 1064L348 947Q408 989 477 1011T624 1034Q701
+1034 771 1011T901 946L1017 1065L1159 920L1039 797Q1077 735 1097 662T1118 508Q1118 430 1100 361T1047 231L1159 118L1017 -27L914 78ZM314 508Q314 436 338 374T404 265T502 192T624 165Q688 165 744 191T843 264T909 373T933 508Q933 580 909 642T843 749T745
+821T624 848Q559 848 503 822T404 750T338 642T314 508Z" />
+<glyph unicode="&#xa5;" glyph-name="yen" horiz-adv-x="1229" d="M619 814L943 1456H1212L814 742H1092V595H735V452H1092V306H735V0H495V306H137V452H495V595H137V742H424L25 1456H294L619 814Z" />
+<glyph unicode="&#xa6;" glyph-name="brokenbar" horiz-adv-x="1229" d="M496 -270V525H722V-270H496ZM722 698H496V1456H722V698Z" />
+<glyph unicode="&#xa7;" glyph-name="section" horiz-adv-x="1229" d="M1157 453Q1157 359 1113 291T987 180Q1052 130 1086 62T1121 -105Q1121 -194 1086 -263T984 -379T825 -451T618 -476Q552 -476 487 -467T363 -438T252 -385T164 -305T105 -195T84 -51L323
+-49Q323 -117 349 -162T418 -234T512 -272T618 -283Q682 -283 731 -270T813 -234T864 -178T882 -107Q882 -67 867 -38T814 17T714 66T559 118Q448 148 361 183T214 267T121 383T89 543Q89 635 133 704T258 816Q193 866 159 934T125 1102Q125 1187 160 1256T262
+1374T421 1449T630 1476Q748 1476 841 1448T1000 1366T1101 1231T1136 1047H897Q897 1096 880 1139T829 1214T745 1265T630 1284Q561 1284 511 1270T429 1231T381 1174T365 1104Q365 1060 378 1030T428 974T526 927T685 878Q798 848 886 812T1034 727T1125 612T1157
+453ZM596 674Q553 685 514 697T438 723Q381 702 354 659T327 557Q327 512 340 480T390 422T488 373T647 321Q690 309 729 297T804 272Q861 295 891 338T921 438Q921 479 905 510T852 569T752 621T596 674Z" />
+<glyph unicode="&#xa8;" glyph-name="dieresis" horiz-adv-x="1229" d="M273 1366Q273 1416 306 1450T399 1484Q458 1484 491 1450T525 1366Q525 1316 492 1283T399 1249Q340 1249 307 1282T273 1366ZM728 1365Q728 1415 761 1448T853 1482Q912 1482 945 1449T979
+1365Q979 1315 946 1281T853 1247Q794 1247 761 1281T728 1365Z" />
+<glyph unicode="&#xa9;" glyph-name="copyright" horiz-adv-x="1229" d="M863 442Q863 323 796 265T612 207Q552 207 504 228T421 287T369 378T351 496V586Q351 650 369 703T421 795T503 855T612 877Q729 877 796 818T864 640H747Q747 712 713 743T612 775Q576
+775 549 761T503 722T476 663T466 587V496Q466 454 475 420T503 361T548 322T612 308Q679 308 712 338T746 442H863ZM181 542Q181 443 215 357T310 208T450 108T623 72Q714 72 794 108T934 207T1028 357T1063 542Q1063 641 1029 726T934 874T794 972T623 1008Q531
+1008 451 973T310 874T216 726T181 542ZM90 542Q90 660 132 762T246 940T416 1059T623 1102Q733 1102 829 1059T998 941T1112 763T1154 542Q1154 423 1112 321T998 142T830 23T623 -20Q513 -20 416 23T247 142T132 320T90 542Z" />
+<glyph unicode="&#xaa;" glyph-name="ordfeminine" horiz-adv-x="1229" d="M768 705Q753 748 747 799Q733 778 713 759T665 724T602 700T525 691Q465 691 419 707T341 753T293 825T276 919Q276 1030 359 1090T607 1150H744V1201Q744 1263 716 1295T628 1328Q563
+1328 527 1303T490 1229L317 1243Q317 1294 339 1336T402 1410T500 1458T628 1476Q694 1476 747 1459T839 1407T898 1321T919 1200V886Q919 837 925 793T945 705H768ZM568 835Q594 835 621 843T673 864T716 894T744 928V1033H608Q569 1033 540 1024T491 1000T461
+965T451 923Q451 881 479 858T568 835Z" />
+<glyph unicode="&#xab;" glyph-name="guillemotleft" horiz-adv-x="1229" d="M402 503L650 104H484L196 494V513L484 903H650L402 503ZM759 503L1007 104H841L553 494V513L841 903H1007L759 503Z" />
+<glyph unicode="&#xac;" glyph-name="logicalnot" horiz-adv-x="1229" d="M1021 374H821V634H186V805H1021V374Z" />
+<glyph unicode="&#xad;" glyph-name="uni00AD" horiz-adv-x="1229" d="M1050 542H249V735H1050V542Z" />
+<glyph unicode="&#xae;" glyph-name="registered" horiz-adv-x="1229" d="M88 541Q88 660 130 762T244 940T413 1059T620 1102Q730 1102 827 1059T996 941T1110 763T1152 541Q1152 422 1110 320T996 141T827 22T620 -21Q510 -21 413 22T244 141T130 319T88 541ZM179
+541Q179 442 213 357T308 208T448 109T620 73Q711 73 791 109T932 208T1026 356T1061 541Q1061 640 1027 725T932 874T792 972T620 1008Q529 1008 449 973T308 874T214 726T179 541ZM512 484V232H399V869H610Q721 869 787 821T854 676Q854 620 819 581T718 519L864
+232H748L631 484H512ZM512 585H626Q648 585 669 591T706 608T731 635T741 672Q741 727 712 748T610 769H512V585Z" />
+<glyph unicode="&#xaf;" glyph-name="overscore" horiz-adv-x="1229" d="M989 1299H263V1456H989V1299Z" />
+<glyph unicode="&#xb0;" glyph-name="degree" horiz-adv-x="1229" d="M358 1208Q358 1263 379 1311T436 1396T520 1454T622 1476Q675 1476 722 1455T804 1397T860 1312T881 1208Q881 1153 861 1105T805 1021T722 965T622 944Q568 944 520 964T436 1021T379 1105T358
+1208ZM496 1208Q496 1181 506 1159T533 1120T573 1094T622 1084Q647 1084 669 1093T708 1119T733 1158T743 1208Q743 1235 734 1258T708 1299T670 1326T622 1336Q596 1336 574 1327T534 1300T506 1259T496 1208Z" />
+<glyph unicode="&#xb1;" glyph-name="plusminus" horiz-adv-x="1229" d="M723 896H1068V700H723V310H511V700H152V896H511V1275H723V896ZM1029 1H182V194H1029V1Z" />
+<glyph unicode="&#xb2;" glyph-name="twosuperior" horiz-adv-x="1229" d="M935 667H324V791L611 1055Q673 1110 698 1152T723 1223Q723 1265 696 1292T616 1319Q584 1319 561 1310T522 1284T499 1246T491 1199H306Q306 1254 327 1302T387 1387T482 1445T610 1467Q752
+1467 830 1403T909 1224Q909 1182 896 1147T856 1077T792 1006T705 929L563 814H935V667Z" />
+<glyph unicode="&#xb3;" glyph-name="threesuperior" horiz-adv-x="1229" d="M520 1131H607Q671 1131 702 1158T733 1229Q733 1247 727 1263T706 1291T670 1310T617 1318Q568 1318 536 1298T503 1243H318Q318 1296 341 1337T405 1407T498 1451T612 1466Q680 1466
+736 1452T833 1409T896 1338T918 1240Q918 1182 883 1138T783 1070Q856 1050 894 1005T932 887Q932 831 908 788T840 716T739 671T612 655Q556 655 501 668T403 710T334 784T307 894H492Q492 856 526 829T621 802Q685 802 715 830T746 900Q746 957 710 980T607
+1003H520V1131Z" />
+<glyph unicode="&#xb4;" glyph-name="acute" horiz-adv-x="1229" d="M577 1478H851L574 1205H392L577 1478Z" />
+<glyph unicode="&#xb5;" glyph-name="mu" horiz-adv-x="1229" d="M417 1082V462Q417 371 432 315T473 227T536 184T615 172Q706 172 760 204T844 295V1082H1083V0H864L857 88Q813 36 754 8T616 -21Q494 -21 417 35V-416H178V1082H417Z" />
+<glyph unicode="&#xb6;" glyph-name="paragraph" horiz-adv-x="1229" d="M786 0V520H703Q584 520 492 555T338 652T243 801T211 988Q211 1088 243 1174T337 1322T492 1420T703 1456H994V0H786Z" />
+<glyph unicode="&#xb7;" glyph-name="middot" horiz-adv-x="1229" d="M496 716Q496 744 505 768T532 811T576 839T635 849Q668 849 694 839T738 811T765 769T775 716Q775 688 766 664T738 622T694 594T635 584Q602 584 576 594T533 622T506 664T496 716Z" />
+<glyph unicode="&#xb8;" glyph-name="cedilla" horiz-adv-x="1229" d="M654 2L642 -51Q670 -56 697 -67T745 -99T779 -150T792 -224Q792 -274 772 -315T710 -385T609 -430T468 -446L461 -310Q491 -310 516 -305T559 -290T588 -262T599 -221Q599 -176 564 -159T448
+-136L479 2H654Z" />
+<glyph unicode="&#xb9;" glyph-name="onesuperior" horiz-adv-x="1229" d="M773 662H589V1234L388 1185V1330L754 1449H773V662Z" />
+<glyph unicode="&#xba;" glyph-name="ordmasculine" horiz-adv-x="1229" d="M267 1141Q267 1214 291 1275T360 1381T469 1451T613 1476Q694 1476 758 1451T867 1382T936 1276T960 1141V1024Q960 951 936 890T868 784T759 715T615 690Q535 690 471 715T361 784T292
+889T267 1024V1141ZM442 1024Q442 983 452 949T484 890T538 850T615 836Q658 836 690 850T743 889T774 949T785 1024V1141Q785 1181 775 1215T743 1274T689 1313T613 1328Q570 1328 538 1314T485 1274T453 1215T442 1141V1024Z" />
+<glyph unicode="&#xbb;" glyph-name="guillemotright" horiz-adv-x="1229" d="M399 938L688 548V529L399 139H233L481 538L233 938H399ZM760 938L1049 548V529L760 139H594L842 538L594 938H760Z" />
+<glyph unicode="&#xbc;" glyph-name="onequarter" horiz-adv-x="1229" d="M404 751H238V1266L57 1222V1352L386 1459H404V751ZM417 232L291 300L802 1238L928 1170L417 232ZM1082 282H1169V147H1082V0H914V147H593L585 255L913 710H1082V282ZM747 282H914V499L904
+482L747 282Z" />
+<glyph unicode="&#xbd;" glyph-name="onehalf" horiz-adv-x="1229" d="M607 232L481 300L992 1238L1118 1170L607 232ZM375 749H209V1264L28 1220V1350L357 1457H375V749ZM1191 0H641V112L899 349Q955 399 977 437T1000 500Q1000 538 975 562T904 587Q875 587
+854 579T819 555T798 521T791 479H625Q625 528 643 572T697 648T783 700T898 720Q1026 720 1096 662T1167 501Q1167 464 1155 432T1120 369T1062 306T984 236L856 132H1191V0Z" />
+<glyph unicode="&#xbe;" glyph-name="threequarters" horiz-adv-x="1229" d="M493 232L367 300L878 1238L1004 1170L493 232ZM1122 282H1209V147H1122V0H954V147H633L625 255L953 710H1122V282ZM787 282H954V499L944 482L787 282ZM226 1163H305Q362 1163 390 1187T418
+1251Q418 1267 412 1281T394 1306T362 1324T314 1331Q270 1331 241 1313T211 1263H45Q45 1311 66 1348T123 1411T207 1450T309 1464Q370 1464 421 1451T508 1413T565 1349T585 1261Q585 1208 553 1169T463 1108Q529 1090 563 1050T597 943Q597 893 575 854T515
+789T423 748T309 734Q259 734 210 746T121 784T59 851T35 949H201Q201 915 232 891T317 866Q375 866 402 891T430 955Q430 1006 397 1026T305 1047H226V1163Z" />
+<glyph unicode="&#xbf;" glyph-name="questiondown" horiz-adv-x="1229" d="M761 655Q760 584 757 537T740 453T698 383T621 309Q591 275 559 239T500 165T456 84T439 -4Q439 -98 488 -146T628 -194Q665 -194 700 -184T763 -152T807 -96T824 -14H1064Q1062 -107
+1029 -178T937 -297T800 -369T630 -393Q528 -393 449 -368T314 -293T229 -172T200 -7Q200 63 223 124T284 240T367 342T459 433Q489 462 504 485T527 533T535 587T536 655H761ZM520 963Q520 989 527 1012T549 1052T588 1078T646 1088Q680 1088 704 1079T744 1052T767
+1012T775 963Q775 938 768 916T745 878T705 853T646 843Q612 843 588 852T549 878T527 916T520 963Z" />
+<glyph unicode="&#xc0;" glyph-name="Agrave" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM730 1553H540L281 1826H554L730 1553Z" />
+<glyph unicode="&#xc1;" glyph-name="Aacute" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM712 1823H986L709 1550H527L712 1823Z" />
+<glyph unicode="&#xc2;" glyph-name="Acircumflex" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM973 1624V1597H781L631 1734L482 1597H292V1626L561 1866H701L973 1624Z" />
+<glyph unicode="&#xc3;" glyph-name="Atilde" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM987 1854Q987 1808 972 1766T928 1690T861 1638T774 1618Q724 1618 690 1632T625 1663T565 1694T494
+1708Q475 1708 458 1701T428 1679T408 1646T400 1604L275 1632Q275 1677 290 1720T333 1797T400 1851T487 1872Q528 1872 562 1858T629 1827T695 1796T767 1782Q786 1782 803 1789T834 1810T854 1843T862 1886L987 1854Z" />
+<glyph unicode="&#xc4;" glyph-name="Adieresis" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM274 1715Q274 1765 307 1799T400 1833Q459 1833 492 1799T526 1715Q526 1665 493 1632T400 1598Q341
+1598 308 1631T274 1715ZM729 1714Q729 1764 762 1797T854 1831Q913 1831 946 1798T980 1714Q980 1664 947 1630T854 1596Q795 1596 762 1630T729 1714Z" />
+<glyph unicode="&#xc5;" glyph-name="Aring" horiz-adv-x="1229" d="M855 338H400L300 0H52L525 1456H736L1200 0H953L855 338ZM461 543H795L630 1114L461 543ZM420 1734Q420 1777 437 1813T482 1875T549 1917T632 1932Q675 1932 713 1917T779 1876T823 1813T839
+1734Q839 1690 823 1655T779 1593T713 1553T632 1539Q588 1539 550 1553T483 1593T437 1654T420 1734ZM529 1734Q529 1712 537 1694T559 1662T592 1640T632 1632Q674 1632 702 1660T730 1734Q730 1780 702 1809T632 1838Q611 1838 593 1831T560 1809T537 1776T529
+1734Z" />
+<glyph unicode="&#xc6;" glyph-name="AE" horiz-adv-x="1229" d="M1189 0H652V340H392L286 0H37L558 1456H1167V1262H880V850H1136V656H880V193H1189V0ZM652 553V1167L459 553H652Z" />
+<glyph unicode="&#xc7;" glyph-name="Ccedilla" horiz-adv-x="1229" d="M1132 447Q1122 342 1082 256T976 108T821 13T621 -21Q488 -21 389 30T224 170T125 376T91 628V827Q92 960 126 1078T226 1285T392 1425T622 1477Q737 1477 827 1443T981 1347T1083 1196T1132
+998H892Q886 1062 868 1114T817 1204T737 1262T622 1282Q541 1282 486 1245T397 1146T349 1001T334 829V628Q334 534 348 452T396 307T484 209T621 173Q747 173 813 244T891 447H1132ZM708 4L696 -49Q724 -54 751 -65T799 -97T833 -148T846 -222Q846 -272 826 -313T764
+-383T663 -428T522 -444L515 -308Q545 -308 570 -303T613 -288T642 -260T653 -219Q653 -174 618 -157T502 -134L533 4H708Z" />
+<glyph unicode="&#xc8;" glyph-name="Egrave" horiz-adv-x="1229" d="M1002 653H403V196H1104V0H162V1456H1097V1258H403V847H1002V653ZM735 1548H545L286 1821H559L735 1548Z" />
+<glyph unicode="&#xc9;" glyph-name="Eacute" horiz-adv-x="1229" d="M1002 653H403V196H1104V0H162V1456H1097V1258H403V847H1002V653ZM717 1818H991L714 1545H532L717 1818Z" />
+<glyph unicode="&#xca;" glyph-name="Ecircumflex" horiz-adv-x="1229" d="M1002 653H403V196H1104V0H162V1456H1097V1258H403V847H1002V653ZM978 1619V1592H786L636 1729L487 1592H297V1621L566 1861H706L978 1619Z" />
+<glyph unicode="&#xcb;" glyph-name="Edieresis" horiz-adv-x="1229" d="M1002 653H403V196H1104V0H162V1456H1097V1258H403V847H1002V653ZM279 1710Q279 1760 312 1794T405 1828Q464 1828 497 1794T531 1710Q531 1660 498 1627T405 1593Q346 1593 313 1626T279
+1710ZM734 1709Q734 1759 767 1792T859 1826Q918 1826 951 1793T985 1709Q985 1659 952 1625T859 1591Q800 1591 767 1625T734 1709Z" />
+<glyph unicode="&#xcc;" glyph-name="Igrave" horiz-adv-x="1229" d="M181 1456H1047V1257H732V198H1047V0H181V198H489V1257H181V1456ZM679 1553H489L230 1826H503L679 1553Z" />
+<glyph unicode="&#xcd;" glyph-name="Iacute" horiz-adv-x="1229" d="M181 1456H1047V1257H732V198H1047V0H181V198H489V1257H181V1456ZM661 1823H935L658 1550H476L661 1823Z" />
+<glyph unicode="&#xce;" glyph-name="Icircumflex" horiz-adv-x="1229" d="M181 1456H1047V1257H732V198H1047V0H181V198H489V1257H181V1456ZM922 1624V1597H730L580 1734L431 1597H241V1626L510 1866H650L922 1624Z" />
+<glyph unicode="&#xcf;" glyph-name="Idieresis" horiz-adv-x="1229" d="M181 1456H1047V1257H732V198H1047V0H181V198H489V1257H181V1456ZM224 1715Q224 1765 257 1799T350 1833Q409 1833 442 1799T476 1715Q476 1665 443 1632T350 1598Q291 1598 258 1631T224
+1715ZM679 1714Q679 1764 712 1797T804 1831Q863 1831 896 1798T930 1714Q930 1664 897 1630T804 1596Q745 1596 712 1630T679 1714Z" />
+<glyph unicode="&#xd0;" glyph-name="Eth" horiz-adv-x="1259" d="M167 0V651H-35V821H167V1456H523Q673 1455 792 1406T995 1268T1124 1056T1170 782V672Q1169 522 1124 399T996 187T795 49T530 0H167ZM632 651H411V193H530Q629 194 703 228T826 325T900 476T926
+672V784Q925 889 901 976T826 1126T701 1225T523 1262H411V821H632V651Z" />
+<glyph unicode="&#xd1;" glyph-name="Ntilde" horiz-adv-x="1229" d="M1091 0H849L375 1002L374 0H131V1456H374L846 457L848 1456H1091V0ZM955 1854Q955 1808 940 1766T896 1690T829 1638T742 1618Q692 1618 658 1632T593 1663T533 1694T462 1708Q443 1708 426
+1701T396 1679T376 1646T368 1604L243 1632Q243 1677 258 1720T301 1797T368 1851T455 1872Q496 1872 530 1858T597 1827T663 1796T735 1782Q754 1782 771 1789T802 1810T822 1843T830 1886L955 1854Z" />
+<glyph unicode="&#xd2;" glyph-name="Ograve" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004
+1278T1103 1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822ZM724 1574H534L275 1847H548L724
+1574Z" />
+<glyph unicode="&#xd3;" glyph-name="Oacute" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004
+1278T1103 1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822ZM706 1844H980L703 1571H521L706
+1844Z" />
+<glyph unicode="&#xd4;" glyph-name="Ocircumflex" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004
+1278T1103 1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822ZM967 1645V1618H775L625 1755L476
+1618H286V1647L555 1887H695L967 1645Z" />
+<glyph unicode="&#xd5;" glyph-name="Otilde" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004
+1278T1103 1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822ZM981 1875Q981 1829 966 1787T922
+1711T855 1659T768 1639Q718 1639 684 1653T619 1684T559 1715T488 1729Q469 1729 452 1722T422 1700T402 1667T394 1625L269 1653Q269 1698 284 1741T327 1818T394 1872T481 1893Q522 1893 556 1879T623 1848T689 1817T761 1803Q780 1803 797 1810T828 1831T848
+1864T856 1907L981 1875Z" />
+<glyph unicode="&#xd6;" glyph-name="Odieresis" horiz-adv-x="1229" d="M1136 634Q1136 505 1103 387T1005 178T841 34T610 -20Q479 -20 382 34T221 178T124 387T91 634V820Q91 906 105 988T148 1142T219 1276T319 1382T449 1451T609 1476Q741 1476 839 1422T1004
+1278T1103 1068T1136 820V634ZM895 822Q894 904 880 985T833 1129T746 1232T609 1272Q528 1272 475 1233T390 1129T346 985T332 822V634Q333 552 346 472T390 327T475 222T610 182Q692 182 746 222T834 326T880 471T895 634V822ZM269 1736Q269 1786 302 1820T395
+1854Q454 1854 487 1820T521 1736Q521 1686 488 1653T395 1619Q336 1619 303 1652T269 1736ZM724 1735Q724 1785 757 1818T849 1852Q908 1852 941 1819T975 1735Q975 1685 942 1651T849 1617Q790 1617 757 1651T724 1735Z" />
+<glyph unicode="&#xd7;" glyph-name="multiply" horiz-adv-x="1229" d="M167 363L484 686L167 1009L314 1157L630 835L946 1157L1093 1009L776 686L1093 363L946 215L630 537L314 215L167 363Z" />
+<glyph unicode="&#xd8;" glyph-name="Oslash" horiz-adv-x="1229" d="M1134 634Q1134 505 1101 387T1003 178T839 34T608 -20Q520 -20 449 4T319 73L222 -95H55L215 182Q152 273 121 389T89 634V820Q89 906 103 988T146 1142T217 1276T317 1382T447 1451T607 1476Q698
+1476 772 1450T906 1377L988 1518H1156L1010 1265Q1072 1175 1102 1061T1134 820V634ZM330 634Q330 580 335 527T354 422L795 1185Q762 1225 716 1248T607 1272Q526 1272 473 1233T388 1129T344 985T330 822V634ZM893 822Q892 874 887 925T870 1024L429 263Q461
+225 504 204T608 182Q690 182 744 222T832 326T878 471T893 634V822Z" />
+<glyph unicode="&#xd9;" glyph-name="Ugrave" horiz-adv-x="1229" d="M1096 1456L1098 481Q1097 369 1062 277T964 118T812 16T611 -20Q503 -20 415 16T264 117T166 275T130 481L132 1456H368L370 481Q371 414 387 358T433 262T507 199T611 176Q671 176 717 198T794
+261T841 358T858 481L860 1456H1096ZM743 1545H553L294 1818H567L743 1545Z" />
+<glyph unicode="&#xda;" glyph-name="Uacute" horiz-adv-x="1229" d="M1096 1456L1098 481Q1097 369 1062 277T964 118T812 16T611 -20Q503 -20 415 16T264 117T166 275T130 481L132 1456H368L370 481Q371 414 387 358T433 262T507 199T611 176Q671 176 717 198T794
+261T841 358T858 481L860 1456H1096ZM725 1815H999L722 1542H540L725 1815Z" />
+<glyph unicode="&#xdb;" glyph-name="Ucircumflex" horiz-adv-x="1229" d="M1096 1456L1098 481Q1097 369 1062 277T964 118T812 16T611 -20Q503 -20 415 16T264 117T166 275T130 481L132 1456H368L370 481Q371 414 387 358T433 262T507 199T611 176Q671 176 717
+198T794 261T841 358T858 481L860 1456H1096ZM986 1616V1589H794L644 1726L495 1589H305V1618L574 1858H714L986 1616Z" />
+<glyph unicode="&#xdc;" glyph-name="Udieresis" horiz-adv-x="1229" d="M1096 1456L1098 481Q1097 369 1062 277T964 118T812 16T611 -20Q503 -20 415 16T264 117T166 275T130 481L132 1456H368L370 481Q371 414 387 358T433 262T507 199T611 176Q671 176 717
+198T794 261T841 358T858 481L860 1456H1096ZM287 1707Q287 1757 320 1791T413 1825Q472 1825 505 1791T539 1707Q539 1657 506 1624T413 1590Q354 1590 321 1623T287 1707ZM742 1706Q742 1756 775 1789T867 1823Q926 1823 959 1790T993 1706Q993 1656 960 1622T867
+1588Q808 1588 775 1622T742 1706Z" />
+<glyph unicode="&#xdd;" glyph-name="Yacute" horiz-adv-x="1229" d="M616 760L919 1456H1187L733 523L731 0H498L496 529L45 1456H313L616 760ZM700 1823H974L697 1550H515L700 1823Z" />
+<glyph unicode="&#xde;" glyph-name="Thorn" horiz-adv-x="1229" d="M389 1456V1175H636Q760 1175 855 1143T1016 1052T1114 913T1148 738Q1148 643 1115 563T1016 425T856 333T636 300H389V0H149V1456H389ZM389 983V492H636Q709 492 760 512T845 567T893 645T908
+736Q908 783 893 827T845 906T761 962T636 983H389Z" />
+<glyph unicode="&#xdf;" glyph-name="germandbls" horiz-adv-x="1229" d="M401 0H163V1079Q163 1196 195 1286T285 1437T422 1529T595 1561Q673 1561 742 1540T863 1474T945 1363T976 1206Q976 1154 967 1116T945 1047T916 992T887 942T865 890T856 827Q856 787
+877 753T930 685T999 616T1068 540T1121 448T1143 334Q1143 246 1117 180T1043 69T925 2T770 -21Q730 -21 687 -17T605 -4T533 17T480 43L529 236Q545 226 569 215T624 193T687 177T755 170Q793 170 821 181T867 214T895 263T904 326Q904 370 883 405T830 474T761
+540T692 614T639 705T617 823Q617 864 626 897T650 958T681 1011T712 1064T735 1121T745 1190Q745 1233 732 1266T696 1323T647 1357T592 1369Q551 1369 517 1351T456 1297T416 1206T401 1077V0Z" />
+<glyph unicode="&#xe0;" glyph-name="agrave" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444
+876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417
+218T474 185T557 173ZM709 1231H519L260 1504H533L709 1231Z" />
+<glyph unicode="&#xe1;" glyph-name="aacute" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444
+876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417
+218T474 185T557 173ZM691 1501H965L688 1228H506L691 1501Z" />
+<glyph unicode="&#xe2;" glyph-name="acircumflex" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504
+908T444 876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295
+384 267T417 218T474 185T557 173ZM952 1302V1275H760L610 1412L461 1275H271V1304L540 1544H680L952 1302Z" />
+<glyph unicode="&#xe3;" glyph-name="atilde" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444
+876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417
+218T474 185T557 173ZM966 1532Q966 1486 951 1444T907 1368T840 1316T753 1296Q703 1296 669 1310T604 1341T544 1372T473 1386Q454 1386 437 1379T407 1357T387 1324T379 1282L254 1310Q254 1355 269 1398T312 1475T379 1529T466 1550Q507 1550 541 1536T608
+1505T674 1474T746 1460Q765 1460 782 1467T813 1488T833 1521T841 1564L966 1532Z" />
+<glyph unicode="&#xe4;" glyph-name="adieresis" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444
+876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417
+218T474 185T557 173ZM510 1393Q510 1443 543 1477T636 1511Q695 1511 728 1477T762 1393Q762 1343 729 1310T636 1276Q577 1276 544 1309T510 1393ZM965 1392Q965 1442 998 1475T1090 1509Q1149 1509 1182 1476T1216 1392Q1216 1342 1183 1308T1090 1274Q1031
+1274 998 1308T965 1392Z" />
+<glyph unicode="&#xe5;" glyph-name="aring" horiz-adv-x="1229" d="M845 0Q835 20 829 47T817 105Q792 79 761 57T691 17T608 -10T512 -20Q426 -20 357 4T238 72T161 175T134 303Q134 477 263 569T635 661H810V732Q810 818 752 868T588 919Q540 919 504 908T444
+876T408 829T396 771H157Q157 834 186 893T273 999T412 1074T601 1102Q697 1102 779 1079T921 1009T1015 893T1049 730V248Q1049 177 1059 119T1090 17V0H845ZM557 173Q603 173 643 184T716 214T772 257T810 306V509H658Q512 509 443 462T373 328Q373 295 384 267T417
+218T474 185T557 173ZM400 1412Q400 1455 417 1491T462 1553T529 1595T612 1610Q655 1610 693 1595T759 1554T803 1491T819 1412Q819 1368 803 1333T759 1271T693 1231T612 1217Q568 1217 530 1231T463 1271T417 1332T400 1412ZM509 1412Q509 1390 517 1372T539
+1340T572 1318T612 1310Q654 1310 682 1338T710 1412Q710 1458 682 1487T612 1516Q591 1516 573 1509T540 1487T517 1454T509 1412Z" />
+<glyph unicode="&#xe6;" glyph-name="ae" horiz-adv-x="1229" d="M882 -20Q786 -20 715 13T598 109Q580 83 556 60T501 19T433 -9T351 -20Q277 -20 220 2T123 64T64 162T44 291Q44 460 154 551T477 642H503V773Q503 838 479 873T403 909Q351 909 322 878T292 782L64
+791Q64 862 87 919T153 1017T257 1080T395 1102Q470 1102 530 1080T633 1015Q676 1057 733 1080T863 1102Q940 1102 1001 1075T1106 998T1172 877T1195 715V472H719V387Q719 284 767 228T927 171Q963 171 990 176T1041 190T1083 208T1121 228L1172 60Q1156 48 1130
+34T1067 8T984 -12T882 -20ZM719 642H972V763Q972 794 964 821T941 867T904 898T853 909Q815 909 790 894T748 850T726 785T719 704V642ZM406 162Q430 162 456 174T503 206V476H479Q432 476 395 461T333 419T294 359T280 287Q280 233 311 198T406 162Z" />
+<glyph unicode="&#xe7;" glyph-name="ccedilla" horiz-adv-x="1229" d="M628 170Q672 170 713 184T787 223T837 282T855 356H1081Q1082 278 1046 210T947 90T802 9T630 -21Q505 -21 411 22T254 139T160 312T128 522V558Q128 669 159 768T254 941T411 1058T629
+1102Q727 1102 810 1072T954 988T1048 859T1081 695H855Q856 739 840 778T792 846T719 892T627 909Q552 909 502 879T422 799T380 686T367 558V522Q367 456 379 393T422 281T502 201T628 170ZM719 4L707 -49Q735 -54 762 -65T810 -97T844 -148T857 -222Q857 -272
+837 -313T775 -383T674 -428T533 -444L526 -308Q556 -308 581 -303T624 -288T653 -260T664 -219Q664 -174 629 -157T513 -134L544 4H719Z" />
+<glyph unicode="&#xe8;" glyph-name="egrave" horiz-adv-x="1229" d="M659 -20Q539 -20 440 20T271 130T161 295T122 502V543Q122 672 163 775T274 951T436 1063T631 1102Q749 1102 839 1063T989 952T1081 782T1112 567V466H362Q368 403 393 349T458 256T553 194T673
+171Q764 171 840 207T965 306L1095 184Q1069 145 1028 109T932 44T809 -2T659 -20ZM630 909Q580 909 537 891T460 839T403 755T369 642H876V660Q873 708 858 752T812 832T737 888T630 909ZM709 1232H519L260 1505H533L709 1232Z" />
+<glyph unicode="&#xe9;" glyph-name="eacute" horiz-adv-x="1229" d="M659 -20Q539 -20 440 20T271 130T161 295T122 502V543Q122 672 163 775T274 951T436 1063T631 1102Q749 1102 839 1063T989 952T1081 782T1112 567V466H362Q368 403 393 349T458 256T553 194T673
+171Q764 171 840 207T965 306L1095 184Q1069 145 1028 109T932 44T809 -2T659 -20ZM630 909Q580 909 537 891T460 839T403 755T369 642H876V660Q873 708 858 752T812 832T737 888T630 909ZM691 1502H965L688 1229H506L691 1502Z" />
+<glyph unicode="&#xea;" glyph-name="ecircumflex" horiz-adv-x="1229" d="M659 -20Q539 -20 440 20T271 130T161 295T122 502V543Q122 672 163 775T274 951T436 1063T631 1102Q749 1102 839 1063T989 952T1081 782T1112 567V466H362Q368 403 393 349T458 256T553
+194T673 171Q764 171 840 207T965 306L1095 184Q1069 145 1028 109T932 44T809 -2T659 -20ZM630 909Q580 909 537 891T460 839T403 755T369 642H876V660Q873 708 858 752T812 832T737 888T630 909ZM952 1303V1276H760L610 1413L461 1276H271V1305L540 1545H680L952
+1303Z" />
+<glyph unicode="&#xeb;" glyph-name="edieresis" horiz-adv-x="1229" d="M659 -20Q539 -20 440 20T271 130T161 295T122 502V543Q122 672 163 775T274 951T436 1063T631 1102Q749 1102 839 1063T989 952T1081 782T1112 567V466H362Q368 403 393 349T458 256T553
+194T673 171Q764 171 840 207T965 306L1095 184Q1069 145 1028 109T932 44T809 -2T659 -20ZM630 909Q580 909 537 891T460 839T403 755T369 642H876V660Q873 708 858 752T812 832T737 888T630 909ZM510 1394Q510 1444 543 1478T636 1512Q695 1512 728 1478T762
+1394Q762 1344 729 1311T636 1277Q577 1277 544 1310T510 1394ZM965 1393Q965 1443 998 1476T1090 1510Q1149 1510 1182 1477T1216 1393Q1216 1343 1183 1309T1090 1275Q1031 1275 998 1309T965 1393Z" />
+<glyph unicode="&#xec;" glyph-name="igrave" horiz-adv-x="1229" d="M199 1082H784V198H1114V0H199V198H543V883H199V1082ZM1022 1205H832L573 1478H846L1022 1205Z" />
+<glyph unicode="&#xed;" glyph-name="iacute" horiz-adv-x="1229" d="M199 1082H784V198H1114V0H199V198H543V883H199V1082ZM748 1476H1022L745 1203H563L748 1476Z" />
+<glyph unicode="&#xee;" glyph-name="icircumflex" horiz-adv-x="1229" d="M199 1082H784V198H1114V0H199V198H543V883H199V1082ZM1009 1276V1249H817L667 1386L518 1249H328V1278L597 1518H737L1009 1276Z" />
+<glyph unicode="&#xef;" glyph-name="idieresis" horiz-adv-x="1229" d="M199 1082H784V198H1114V0H199V198H543V883H199V1082ZM310 1367Q310 1417 343 1451T436 1485Q495 1485 528 1451T562 1367Q562 1317 529 1284T436 1250Q377 1250 344 1283T310 1367ZM765
+1366Q765 1416 798 1449T890 1483Q949 1483 982 1450T1016 1366Q1016 1316 983 1282T890 1248Q831 1248 798 1282T765 1366Z" />
+<glyph unicode="&#xf0;" glyph-name="eth" horiz-adv-x="1229" d="M877 1275Q976 1158 1031 999T1087 641V577Q1087 439 1047 328T937 140T770 21T563 -21Q450 -21 356 17T195 121T91 277T54 468Q54 582 90 675T190 834T345 936T545 973Q626 973 696 946T822 871Q800
+963 762 1034T673 1160L416 1016L338 1124L565 1252Q495 1297 420 1323L494 1516Q653 1476 779 1372L994 1493L1072 1384L877 1275ZM847 643Q847 651 847 658T846 674Q830 696 804 716T741 752T657 777T552 787Q492 787 445 761T363 691T311 590T293 468Q293 411
+311 358T364 262T450 195T567 170Q631 170 682 198T770 278T827 402T847 564V643Z" />
+<glyph unicode="&#xf1;" glyph-name="ntilde" horiz-adv-x="1229" d="M371 1082L386 932Q445 1013 528 1057T714 1102Q797 1102 865 1078T983 1003T1060 871T1088 676V0H849V672Q849 739 834 784T790 856T720 894T627 906Q548 906 490 871T395 776V0H155V1082H371ZM962
+1532Q962 1486 947 1444T903 1368T836 1316T749 1296Q699 1296 665 1310T600 1341T540 1372T469 1386Q450 1386 433 1379T403 1357T383 1324T375 1282L250 1310Q250 1355 265 1398T308 1475T375 1529T462 1550Q503 1550 537 1536T604 1505T670 1474T742 1460Q761
+1460 778 1467T809 1488T829 1521T837 1564L962 1532Z" />
+<glyph unicode="&#xf2;" glyph-name="ograve" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111
+530V551ZM350 530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM716 1231H526L267 1504H540L716 1231Z" />
+<glyph unicode="&#xf3;" glyph-name="oacute" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111
+530V551ZM350 530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM698 1501H972L695 1228H513L698 1501Z" />
+<glyph unicode="&#xf4;" glyph-name="ocircumflex" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111
+530V551ZM350 530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM959 1302V1275H767L617 1412L468 1275H278V1304L547 1544H687L959
+1302Z" />
+<glyph unicode="&#xf5;" glyph-name="otilde" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111
+530V551ZM350 530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM973 1532Q973 1486 958 1444T914 1368T847 1316T760 1296Q710
+1296 676 1310T611 1341T551 1372T480 1386Q461 1386 444 1379T414 1357T394 1324T386 1282L261 1310Q261 1355 276 1398T319 1475T386 1529T473 1550Q514 1550 548 1536T615 1505T681 1474T753 1460Q772 1460 789 1467T820 1488T840 1521T848 1564L973 1532Z"
+/>
+<glyph unicode="&#xf6;" glyph-name="odieresis" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q732 1102 825 1060T983 944T1082 769T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q496 -21 403 21T245 136T146 311T111
+530V551ZM350 530Q350 456 365 391T413 277T496 199T615 170Q682 170 731 198T813 276T861 391T877 530V551Q877 623 862 688T814 802T732 880T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM517 1393Q517 1443 550 1477T643 1511Q702 1511 735 1477T769
+1393Q769 1343 736 1310T643 1276Q584 1276 551 1309T517 1393ZM972 1392Q972 1442 1005 1475T1097 1509Q1156 1509 1189 1476T1223 1392Q1223 1342 1190 1308T1097 1274Q1038 1274 1005 1308T972 1392Z" />
+<glyph unicode="&#xf7;" glyph-name="divide" horiz-adv-x="1229" d="M1122 583H111V794H1122V583ZM482 1094Q482 1121 491 1144T517 1184T559 1210T616 1220Q648 1220 673 1211T715 1184T741 1144T750 1094Q750 1040 715 1005T616 969Q552 969 517 1004T482 1094ZM480
+279Q480 333 515 369T614 406Q678 406 713 370T748 279Q748 225 713 190T614 154Q550 154 515 189T480 279Z" />
+<glyph unicode="&#xf8;" glyph-name="oslash" horiz-adv-x="1229" d="M111 551Q111 668 145 769T244 944T402 1060T613 1102Q720 1102 811 1065L883 1212H1027L923 1001Q1017 926 1066 810T1116 551V530Q1116 412 1082 311T984 137T826 21T615 -21Q517 -21 435
+8L364 -138H220L321 67Q219 140 165 260T111 530V551ZM350 530Q350 452 367 385T419 268L722 885Q699 896 672 902T613 909Q545 909 495 880T413 802T366 688T350 551V530ZM877 551Q877 620 863 681T820 791L523 186Q562 170 615 170Q682 170 731 198T813 276T861
+391T877 530V551Z" />
+<glyph unicode="&#xf9;" glyph-name="ugrave" horiz-adv-x="1229" d="M849 148Q794 68 716 24T540 -21Q456 -21 387 5T268 87T191 231T163 442V1082H402V440Q402 362 413 311T449 230T509 188T595 175Q691 175 750 212T839 314V1082H1079V0H863L849 148ZM718 1210H528L269
+1483H542L718 1210Z" />
+<glyph unicode="&#xfa;" glyph-name="uacute" horiz-adv-x="1229" d="M849 148Q794 68 716 24T540 -21Q456 -21 387 5T268 87T191 231T163 442V1082H402V440Q402 362 413 311T449 230T509 188T595 175Q691 175 750 212T839 314V1082H1079V0H863L849 148ZM700 1480H974L697
+1207H515L700 1480Z" />
+<glyph unicode="&#xfb;" glyph-name="ucircumflex" horiz-adv-x="1229" d="M849 148Q794 68 716 24T540 -21Q456 -21 387 5T268 87T191 231T163 442V1082H402V440Q402 362 413 311T449 230T509 188T595 175Q691 175 750 212T839 314V1082H1079V0H863L849 148ZM961
+1281V1254H769L619 1391L470 1254H280V1283L549 1523H689L961 1281Z" />
+<glyph unicode="&#xfc;" glyph-name="udieresis" horiz-adv-x="1229" d="M849 148Q794 68 716 24T540 -21Q456 -21 387 5T268 87T191 231T163 442V1082H402V440Q402 362 413 311T449 230T509 188T595 175Q691 175 750 212T839 314V1082H1079V0H863L849 148ZM519
+1372Q519 1422 552 1456T645 1490Q704 1490 737 1456T771 1372Q771 1322 738 1289T645 1255Q586 1255 553 1288T519 1372ZM974 1371Q974 1421 1007 1454T1099 1488Q1158 1488 1191 1455T1225 1371Q1225 1321 1192 1287T1099 1253Q1040 1253 1007 1287T974 1371Z"
+/>
+<glyph unicode="&#xfd;" glyph-name="yacute" horiz-adv-x="1229" d="M584 433L625 303L929 1082H1194L646 -164Q628 -205 599 -252T528 -341T426 -409T288 -437Q255 -437 222 -432T162 -419L196 -233Q203 -233 215 -233T240 -235T264 -236T283 -237Q318 -237
+345 -217T394 -170T428 -118T448 -83L519 51L59 1082H323L584 433ZM721 1480H995L718 1207H536L721 1480Z" />
+<glyph unicode="&#xfe;" glyph-name="thorn" horiz-adv-x="1229" d="M1102 530Q1102 412 1075 311T994 137T862 22T681 -20Q588 -20 518 10T395 97V-416H156V1551H395V982Q447 1040 517 1071T679 1102Q782 1102 861 1062T993 950T1074 776T1102 551V530ZM863 551Q863
+624 849 688T804 801T725 878T611 906Q530 906 478 872T395 780V299Q425 242 477 207T613 171Q679 171 726 200T804 278T849 392T863 530V551Z" />
+<glyph unicode="&#xff;" glyph-name="ydieresis" horiz-adv-x="1229" d="M584 433L625 303L929 1082H1194L646 -164Q628 -205 599 -252T528 -341T426 -409T288 -437Q255 -437 222 -432T162 -419L196 -233Q203 -233 215 -233T240 -235T264 -236T283 -237Q318 -237
+345 -217T394 -170T428 -118T448 -83L519 51L59 1082H323L584 433ZM283 1372Q283 1422 316 1456T409 1490Q468 1490 501 1456T535 1372Q535 1322 502 1289T409 1255Q350 1255 317 1288T283 1372ZM738 1371Q738 1421 771 1454T863 1488Q922 1488 955 1455T989 1371Q989
+1321 956 1287T863 1253Q804 1253 771 1287T738 1371Z" />
+<glyph unicode="&#x2013;" glyph-name="endash" horiz-adv-x="1229" d="M1168 623H67V816H1168V623Z" />
+<glyph unicode="&#x2014;" glyph-name="emdash" horiz-adv-x="1229" d="M1168 623H67V816H1168V623Z" />
+<glyph unicode="&#x2018;" glyph-name="quoteleft" horiz-adv-x="1229" d="M488 1183Q488 1232 501 1286T538 1392T595 1490T670 1573L788 1495Q745 1427 721 1352T697 1185V1020H488V1183Z" />
+<glyph unicode="&#x2019;" glyph-name="quoteright" horiz-adv-x="1229" d="M760 1398Q760 1348 747 1295T710 1190T652 1091T577 1008L460 1086Q503 1155 527 1229T551 1397V1560H760V1398Z" />
+<glyph unicode="&#x201a;" glyph-name="quotesinglbase" horiz-adv-x="1229" d="M755 70Q755 21 742 -32T705 -138T648 -236T573 -319L450 -242Q493 -173 516 -99T539 68V247H756L755 70Z" />
+<glyph unicode="&#x201c;" glyph-name="quotedblleft" horiz-adv-x="1229" d="M324 1183Q324 1232 337 1286T374 1392T431 1490T506 1573L624 1495Q581 1427 557 1352T533 1185V1020H324V1183ZM659 1183Q659 1232 672 1286T709 1392T766 1490T841 1573L959 1495Q916
+1427 892 1352T868 1185V1020H659V1183Z" />
+<glyph unicode="&#x201d;" glyph-name="quotedblright" horiz-adv-x="1229" d="M593 1398Q593 1348 580 1295T543 1190T485 1091T410 1008L293 1086Q336 1155 360 1229T384 1397V1560H593V1398ZM941 1398Q941 1348 928 1295T891 1190T833 1091T758 1008L641 1086Q684
+1155 708 1229T732 1397V1560H941V1398Z" />
+<glyph unicode="&#x201e;" glyph-name="quotedblbase" horiz-adv-x="1229" d="M612 68Q612 19 600 -34T564 -140T507 -238T433 -321L307 -244Q350 -175 373 -101T396 66V229H613L612 68ZM926 68Q926 19 913 -34T876 -140T819 -238T744 -321L618 -244Q660 -175
+685 -101T710 66V229H926V68Z" />
+<glyph unicode="&#x2022;" glyph-name="bullet" horiz-adv-x="1229" d="M405 790Q405 837 420 876T464 944T533 989T625 1005Q677 1005 718 989T787 945T831 877T847 790V731Q847 684 832 646T788 579T718 535T626 519Q575 519 534 534T465 578T421 645T405 731V790Z" />
+<glyph unicode="&#x2039;" glyph-name="guilsinglleft" horiz-adv-x="1229" d="M598 538L846 139H680L392 529V548L680 938H846L598 538Z" />
+<glyph unicode="&#x203a;" glyph-name="guilsinglright" horiz-adv-x="1229" d="M554 938L843 548V529L554 139H388L636 538L388 938H554Z" />
+</font>
+</defs>
+</svg>
diff --git a/site/assets/fonts/Roboto_Mono_500.ttf b/site/assets/fonts/Roboto_Mono_500.ttf
new file mode 100644
index 00000000..036a9907
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_500.ttf
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_500.woff b/site/assets/fonts/Roboto_Mono_500.woff
new file mode 100644
index 00000000..b12f6a1e
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_500.woff
Binary files differ
diff --git a/site/assets/fonts/Roboto_Mono_500.woff2 b/site/assets/fonts/Roboto_Mono_500.woff2
new file mode 100644
index 00000000..125f0983
--- /dev/null
+++ b/site/assets/fonts/Roboto_Mono_500.woff2
Binary files differ
diff --git a/site/assets/img/ajax-loader.gif b/site/assets/img/ajax-loader.gif
new file mode 100644
index 00000000..dc21df18
--- /dev/null
+++ b/site/assets/img/ajax-loader.gif
Binary files differ
diff --git a/site/assets/img/icon_camera.svg b/site/assets/img/icon_camera.svg
new file mode 100644
index 00000000..b349072e
--- /dev/null
+++ b/site/assets/img/icon_camera.svg
@@ -0,0 +1,2 @@
+<?xml version="1.0"?>
+<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 -256 1950 1950"><path d="m975.19 549.05q119 0 203.5 84.5t84.5 203.5-84.5 203.5-203.5 84.5-203.5-84.5-84.5-203.5 84.5-203.5 203.5-84.5m704-416q106 0 181 75t75 181v896q0 106-75 181t-181 75h-1408q-106 0-181-75t-75-181v-896q0-106 75-181t181-75h224l51-136q19-49 69.5-84.5t103.5-35.5h512q53 0 103.5 35.5t69.5 84.5l51 136h224m-704 1152q185 0 316.5-131.5t131.5-316.5-131.5-316.5-316.5-131.5-316.5 131.5-131.5 316.5 131.5 316.5 316.5 131.5"/></svg> \ No newline at end of file
diff --git a/site/assets/img/megapixels_logo.svg b/site/assets/img/megapixels_logo.svg
new file mode 100644
index 00000000..a9b7eff2
--- /dev/null
+++ b/site/assets/img/megapixels_logo.svg
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ width="92.718px" height="58.333px" viewBox="0 0 92.718 58.333" enable-background="new 0 0 92.718 58.333" xml:space="preserve">
+<g>
+ <polygon fill="#000001" points="66.937,28.509 71.575,23.871 76.214,28.509 78.942,25.78 74.304,21.142 78.942,16.503
+ 76.214,13.774 71.575,18.413 66.937,13.774 64.208,16.503 68.847,21.142 64.208,25.78 "/>
+ <polygon fill="#000001" points="34.288,0 27.144,0 17.144,0 10,0 0.144,0 0,0 0,44 10,44 10,10 17.144,10 17.144,44 27.144,44
+ 27.144,10 34.288,10 34.288,44 44.288,44 44.288,0 44.144,0 "/>
+ <path fill="#000001" d="M92.717,0h-10H60.432h-9.999h-0.001v16.049v26.235v16.049h10V42.284h22.286h10h0L92.717,0L92.717,0z
+ M82.718,32.284H60.432V16.049V10h22.286V32.284z"/>
+</g>
+</svg>
diff --git a/site/assets/img/megapixels_logo_white.svg b/site/assets/img/megapixels_logo_white.svg
new file mode 100644
index 00000000..061e30ab
--- /dev/null
+++ b/site/assets/img/megapixels_logo_white.svg
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ width="92.718px" height="58.333px" viewBox="0 0 92.718 58.333" enable-background="new 0 0 92.718 58.333" xml:space="preserve">
+<g>
+ <polygon fill="#ffffff" points="66.937,28.509 71.575,23.871 76.214,28.509 78.942,25.78 74.304,21.142 78.942,16.503
+ 76.214,13.774 71.575,18.413 66.937,13.774 64.208,16.503 68.847,21.142 64.208,25.78 "/>
+ <polygon fill="#ffffff" points="34.288,0 27.144,0 17.144,0 10,0 0.144,0 0,0 0,44 10,44 10,10 17.144,10 17.144,44 27.144,44
+ 27.144,10 34.288,10 34.288,44 44.288,44 44.288,0 44.144,0 "/>
+ <path fill="#ffffff" d="M92.717,0h-10H60.432h-9.999h-0.001v16.049v26.235v16.049h10V42.284h22.286h10h0L92.717,0L92.717,0z
+ M82.718,32.284H60.432V16.049V10h22.286V32.284z"/>
+</g>
+</svg>
diff --git a/site/assets/js/app/face.js b/site/assets/js/app/face.js
new file mode 100644
index 00000000..bdaa0313
--- /dev/null
+++ b/site/assets/js/app/face.js
@@ -0,0 +1,227 @@
+var face = (function(){
+ var container = document.querySelector("#face_container")
+ var camera, controls, scene, renderer
+ var mouse = new THREE.Vector2(0.5, 0.5)
+ var mouseTarget = new THREE.Vector2(0.5, 0.5)
+ var POINT_SCALE = 1.8
+ var FACE_POINT_COUNT = 68
+ var SWAP_TIME = 500
+ var cubes = [], meshes = []
+ var currentFace = document.querySelector('.currentFace')
+ var faceBuffer = (function () {
+ var a = new Array(FACE_POINT_COUNT)
+ for (let i = 0; i < FACE_POINT_COUNT; i++) {
+ a[i] = new THREE.Vector3()
+ }
+ return a
+ })()
+ var last_t = 0, start_t = 0
+ var colors = [
+ 0xff3333,
+ 0xff8833,
+ 0xffff33,
+ 0x338833,
+ 0x3388ff,
+ 0x3333ff,
+ 0x8833ff,
+ 0xff3388,
+ 0xffffff,
+ ]
+ var swapping = false, swap_count = 0, swapFrom, swapTo, face_names, faces
+ init()
+
+ function init() {
+ fetch("/assets/data/3dlm_0_10.json")
+ .then(req => req.json())
+ .then(data => {
+ face_names = Object.keys(data)
+ faces = face_names.map(name => recenter(data[name]))
+ setup()
+ build(faces[0])
+ updateFace(faces[0])
+ setCurrentFace(face_names[0])
+ swapTo = faces[0]
+ animate()
+ })
+ }
+ function setup() {
+ var w = window.innerWidth / 2
+ var h = Math.min(window.innerWidth / 2, window.innerHeight * 0.7)
+ camera = new THREE.PerspectiveCamera(70, w/h, 1, 10000)
+ camera.position.x = 0
+ camera.position.y = 0
+ camera.position.z = 250
+
+ scene = new THREE.Scene()
+ scene.background = new THREE.Color(0x191919)
+
+ renderer = new THREE.WebGLRenderer({ antialias: true })
+ renderer.setPixelRatio(window.devicePixelRatio)
+ renderer.setSize(w, h)
+ container.appendChild(renderer.domElement)
+ document.body.addEventListener('mousemove', onMouseMove)
+ // renderer.domElement.addEventListener('mousedown', swap)
+ // oktween.add({
+ // obj: el.style,
+ // units: "px",
+ // from: { left: 0 },
+ // to: { left: 100 },
+ // duration: 1000,
+ // easing: oktween.easing.circ_out,
+ // update: function(obj){
+ // console.log(obj.left)
+ // }
+ // finished: function(){
+ // console.log("done")
+ // }
+ // })
+ setInterval(swap, 5000)
+ }
+ function build(points) {
+ var matrix = new THREE.Matrix4()
+ var quaternion = new THREE.Quaternion()
+
+ for (var i = 0; i < FACE_POINT_COUNT; i++) {
+ var p = points[i]
+ var geometry = new THREE.BoxBufferGeometry()
+ var position = new THREE.Vector3(p[0], p[1], p[2])
+ var rotation = new THREE.Euler()
+ var scale = new THREE.Vector3()
+ var color = new THREE.Color()
+ scale.x = scale.y = scale.z = POINT_SCALE
+ quaternion.setFromEuler(rotation, false)
+ matrix.compose(position, quaternion, scale)
+ geometry.applyMatrix(matrix)
+ material = new THREE.MeshBasicMaterial({ color: color.setHex(0xffffff) })
+ cube = new THREE.Mesh(geometry, material)
+ scene.add(cube)
+ cubes.push(cube)
+ }
+
+ meshes = getLineGeometry(points).map((geometry, i) => {
+ var color = new THREE.Color()
+ var material = new MeshLineMaterial({
+ color: color.setHex(colors[i % colors.length]),
+ })
+ var line = new MeshLine()
+ line.setGeometry(geometry, _ => 1.5)
+ var mesh = new THREE.Mesh(line.geometry, material)
+ mesh.geometry.dynamic = true
+ scene.add(mesh)
+ return [line, mesh]
+ })
+ }
+ function lerpPoints(n, A, B, C) {
+ for (let i = 0, len = A.length; i < len; i++) {
+ lerpPoint(n, A[i], B[i], C[i])
+ }
+ }
+ function lerpPoint(n, A, B, C) {
+ C.x = lerp(n, A.x, B.x)
+ C.y = lerp(n, A.y, B.y)
+ C.z = lerp(n, A.z, B.z)
+ }
+ function lerp(n, a, b) {
+ return (b-a) * n + a
+ }
+ function swap(){
+ if (swapping) return
+ start_t = last_t
+ swapping = true
+ swap_count = (swap_count + 1) % faces.length
+ swapFrom = swapTo
+ swapTo = faces[swap_count]
+ setCurrentFace(face_names[swap_count])
+ }
+ function setCurrentFace(name) {
+ name = name.replace('.png', '').split('_').filter(s => !s.match(/\d+/)).join(' ')
+ currentFace.innerHTML = name
+ }
+ function update_swap(t){
+ var n = (t - start_t) / SWAP_TIME
+ if (n > 1) {
+ swapping = false
+ n = 1
+ }
+ lerpPoints(n, swapFrom, swapTo, faceBuffer)
+ updateFace(faceBuffer)
+ }
+ function updateFace(points) {
+ updateCubeGeometry(points)
+ updateLineGeometry(points)
+ }
+ function updateCubeGeometry(points) {
+ cubes.forEach((cube, i) => {
+ const p = points[i]
+ cube.position.set(p.x, p.y, p.z)
+ })
+ }
+ function updateLineGeometry(points) {
+ getLineGeometry(points).map((geometry, i) => {
+ var [line, mesh] = meshes[i]
+ line.setGeometry(geometry, _ => 1.5)
+ mesh.geometry.vertices = line.geometry.vertices
+ mesh.geometry.verticesNeedUpdate = true
+ })
+ }
+ function getLineGeometry(points) {
+ return [
+ points.slice(0, 17),
+ points.slice(17, 22),
+ points.slice(22, 27),
+ points.slice(27, 31),
+ points.slice(31, 36),
+ points.slice(36, 42),
+ points.slice(42, 48),
+ points.slice(48)
+ ].map((a, i) => {
+ var geometry = new THREE.Geometry()
+ a.forEach(p => geometry.vertices.push(p))
+ if (i > 4) {
+ geometry.vertices.push(a[0])
+ }
+ return geometry
+ })
+ }
+ function getBounds(obj) {
+ return obj.reduce((a, p) => {
+ return [
+ Math.min(a[0], p[0]),
+ Math.max(a[1], p[0]),
+ Math.min(a[2], p[1]),
+ Math.max(a[3], p[1]),
+ Math.min(a[4], p[2]),
+ Math.max(a[5], p[2]),
+ ]
+ }, [Infinity, -Infinity, Infinity, -Infinity, Infinity, -Infinity])
+ }
+ function recenter(obj) {
+ const bounds = getBounds(obj)
+ const x_width = (bounds[1] - bounds[0]) / 2
+ const y_width = (bounds[3] - bounds[2]) / -3
+ const z_width = (bounds[5] - bounds[4]) / 2
+ return obj.map(p => {
+ p[0] = p[0] - bounds[0] - x_width
+ p[1] = -p[1] + bounds[1] + y_width
+ p[2] = p[2] - bounds[2] + z_width
+ return new THREE.Vector3(p[0], p[1], p[2])
+ })
+ }
+ //
+ function onMouseMove(e) {
+ mouse.x = e.clientX / window.innerWidth
+ mouse.y = e.clientY / window.innerHeight
+ }
+ function animate(t) {
+ requestAnimationFrame(animate)
+ if (swapping) update_swap(t)
+ renderer.render(scene, camera)
+ scene.rotation.y += 0.01 * Math.PI
+ mouseTarget.x += (mouse.x - mouseTarget.x) * 0.1
+ mouseTarget.y += (mouse.y - mouseTarget.y) * 0.1
+ scene.rotation.x = (mouseTarget.y - 0.5) * Math.PI / 2
+ // scene.rotation.y = (mouseTarget.x - 0.5) * Math.PI
+ scene.rotation.y += 0.01
+ last_t = t
+ }
+})()
diff --git a/site/assets/js/app/site.js b/site/assets/js/app/site.js
new file mode 100644
index 00000000..eb6886c2
--- /dev/null
+++ b/site/assets/js/app/site.js
@@ -0,0 +1,30 @@
+const isiPhone = !!((navigator.userAgent.match(/iPhone/i)) || (navigator.userAgent.match(/iPod/i)))
+const isiPad = !!(navigator.userAgent.match(/iPad/i))
+const isAndroid = !!(navigator.userAgent.match(/Android/i))
+const isMobile = isiPhone || isiPad || isAndroid
+const isDesktop = !isMobile
+
+const htmlClassList = document.body.parentNode.classList
+htmlClassList.add(isDesktop ? 'desktop' : 'mobile')
+
+function toArray(a) { return Array.prototype.slice.apply(a) }
+function choice(a) { return a[Math.floor(Math.random()*a.length)]}
+
+var site = (function(){
+ var site = {}
+ site.init = function(){
+ site.build()
+ }
+ site.build = function(){
+ const paras = document.querySelectorAll("section p")
+ if (paras.length) {
+ paras[0].classList.add('first_paragraph')
+ }
+ toArray(document.querySelectorAll('header .links a')).forEach(tag => {
+ if (window.location.href.match(tag.href)) {
+ tag.classList.add('active')
+ }
+ })
+ }
+ site.init()
+})() \ No newline at end of file
diff --git a/site/assets/js/vendor/oktween.js b/site/assets/js/vendor/oktween.js
new file mode 100644
index 00000000..7ecf62fb
--- /dev/null
+++ b/site/assets/js/vendor/oktween.js
@@ -0,0 +1,159 @@
+/*
+ oktween.add({
+ obj: el.style,
+ units: "px",
+ from: { left: 0 },
+ to: { left: 100 },
+ duration: 1000,
+ easing: oktween.easing.circ_out,
+ update: function(obj){
+ console.log(obj.left)
+ }
+ finished: function(){
+ console.log("done")
+ }
+ })
+*/
+
+var oktween = (function(){
+ var oktween = {}
+ var tweens = oktween.tweens = []
+ var last_t = 0
+ var id = 0
+ oktween.speed = 1
+ oktween.add = function(tween){
+ tween.id = id++
+ tween.obj = tween.obj || {}
+ if (tween.easing) {
+ if (typeof tween.easing == "string") {
+ tween.easing = oktween.easing[tween.easing]
+ }
+ }
+ else {
+ tween.easing = oktween.easing.linear
+ }
+ if (! ('from' in tween) && ! ('to' in tween)) {
+ tween.keys = []
+ }
+ else if (! ('from' in tween) ) {
+ tween.from = {}
+ tween.keys = Object.keys(tween.to)
+ tween.keys.forEach(function(prop){
+ tween.from[prop] = parseFloat(tween.obj[prop])
+ })
+ }
+ else {
+ tween.keys = Object.keys(tween.from)
+ }
+ tween.delay = tween.delay || 0
+ tween.start = last_t + tween.delay
+ tween.done = false
+ tween.after = tween.after || []
+ tween.then = function(fn){ tween.after.push(fn); return tween }
+ tween.tick = 0
+ tween.skip = tween.skip || 1
+ tween.dt = 0
+ tweens.push(tween)
+ return tween
+ }
+ oktween.update = function(t) {
+ requestAnimationFrame(oktween.update)
+ last_t = t * oktween.speed
+ if (tweens.length == 0) return
+ var done = false
+ tweens.forEach(function(tween, i){
+ var dt = Math.min(1.0, (t - tween.start) / tween.duration)
+ tween.tick++
+ if (dt < 0 || (dt < 1 && (tween.tick % tween.skip != 0))) return
+ var ddt = tween.dt = tween.easing(dt)
+ tween.keys.forEach(function(prop){
+ val = lerp( ddt, tween.from[prop], tween.to[prop] )
+ if (tween.round) val = Math.round(val)
+ if (tween.units) val = (Math.round(val)) + tween.units
+ tween.obj[prop] = val
+ })
+ tween.update && tween.update(tween.obj, dt)
+ if (dt == 1) {
+ tween.finished && tween.finished(tween)
+ if (tween.after.length) {
+ var twn = tween.after.shift()
+ twn.obj = twn.obj || tween.obj
+ twn.after = tween.after
+ oktween.add(twn)
+ }
+ if (tween.loop) {
+ tween.start = t + tween.delay
+ }
+ else {
+ done = tween.done = true
+ }
+ }
+ })
+ if (done) {
+ tweens = tweens.filter(function(tween){ return ! tween.done })
+ }
+ }
+ function lerp(n,a,b){ return (b-a)*n+a }
+
+ requestAnimationFrame(oktween.update)
+
+ oktween.easing = {
+ linear: function(t){
+ return t
+ },
+ circ_out: function(t) {
+ return Math.sqrt(1 - (t = t - 1) * t)
+ },
+ circ_in: function(t){
+ return -(Math.sqrt(1 - (t * t)) - 1)
+ },
+ circ_in_out: function(t) {
+ return ((t*=2) < 1) ? -0.5 * (Math.sqrt(1 - t * t) - 1) : 0.5 * (Math.sqrt(1 - (t -= 2) * t) + 1)
+ },
+ quad_in: function(n){
+ return Math.pow(n, 2)
+ },
+ quad_out: function(n){
+ return n * (n - 2) * -1
+ },
+ quad_in_out: function(n){
+ n = n * 2
+ if(n < 1){ return Math.pow(n, 2) / 2 }
+ return -1 * ((--n) * (n - 2) - 1) / 2
+ },
+ cubic_bezier: function (mX1, mY1, mX2, mY2) {
+ function A(aA1, aA2) { return 1.0 - 3.0 * aA2 + 3.0 * aA1; }
+ function B(aA1, aA2) { return 3.0 * aA2 - 6.0 * aA1; }
+ function C(aA1) { return 3.0 * aA1; }
+
+ // Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2.
+ function CalcBezier(aT, aA1, aA2) {
+ return ((A(aA1, aA2)*aT + B(aA1, aA2))*aT + C(aA1))*aT;
+ }
+
+ // Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2.
+ function GetSlope(aT, aA1, aA2) {
+ return 3.0 * A(aA1, aA2)*aT*aT + 2.0 * B(aA1, aA2) * aT + C(aA1);
+ }
+
+ function GetTForX(aX) {
+ // Newton raphson iteration
+ var aGuessT = aX;
+ for (var i = 0; i < 10; ++i) {
+ var currentSlope = GetSlope(aGuessT, mX1, mX2);
+ if (currentSlope == 0.0) return aGuessT;
+ var currentX = CalcBezier(aGuessT, mX1, mX2) - aX;
+ aGuessT -= currentX / currentSlope;
+ }
+ return aGuessT;
+ }
+
+ return function(aX) {
+ if (mX1 == mY1 && mX2 == mY2) return aX; // linear
+ return CalcBezier(aX, mY1, mY2);
+ }
+ }
+ }
+
+ return oktween
+})()
diff --git a/site/assets/js/vendor/three.meshline.js b/site/assets/js/vendor/three.meshline.js
new file mode 100644
index 00000000..c6e998e3
--- /dev/null
+++ b/site/assets/js/vendor/three.meshline.js
@@ -0,0 +1,486 @@
+;(function() {
+
+"use strict";
+
+var root = this
+
+var has_require = typeof require !== 'undefined'
+
+var THREE = root.THREE || has_require && require('three')
+if( !THREE )
+ throw new Error( 'MeshLine requires three.js' )
+
+function MeshLine() {
+
+ this.positions = [];
+
+ this.previous = [];
+ this.next = [];
+ this.side = [];
+ this.width = [];
+ this.indices_array = [];
+ this.uvs = [];
+ this.counters = [];
+ this.geometry = new THREE.BufferGeometry();
+
+ this.widthCallback = null;
+
+}
+
+MeshLine.prototype.setGeometry = function( g, c ) {
+
+ this.widthCallback = c;
+
+ this.positions = [];
+ this.counters = [];
+
+ if( g instanceof THREE.Geometry ) {
+ for( var j = 0; j < g.vertices.length; j++ ) {
+ var v = g.vertices[ j ];
+ var c = j/g.vertices.length;
+ this.positions.push( v.x, v.y, v.z );
+ this.positions.push( v.x, v.y, v.z );
+ this.counters.push(c);
+ this.counters.push(c);
+ }
+ }
+
+ if( g instanceof THREE.BufferGeometry ) {
+ // read attribute positions ?
+ }
+
+ if( g instanceof Float32Array || g instanceof Array ) {
+ for( var j = 0; j < g.length; j += 3 ) {
+ var c = j/g.length;
+ this.positions.push( g[ j ], g[ j + 1 ], g[ j + 2 ] );
+ this.positions.push( g[ j ], g[ j + 1 ], g[ j + 2 ] );
+ this.counters.push(c);
+ this.counters.push(c);
+ }
+ }
+
+ this.process();
+
+}
+
+MeshLine.prototype.compareV3 = function( a, b ) {
+
+ var aa = a * 6;
+ var ab = b * 6;
+ return ( this.positions[ aa ] === this.positions[ ab ] ) && ( this.positions[ aa + 1 ] === this.positions[ ab + 1 ] ) && ( this.positions[ aa + 2 ] === this.positions[ ab + 2 ] );
+
+}
+
+MeshLine.prototype.copyV3 = function( a ) {
+
+ var aa = a * 6;
+ return [ this.positions[ aa ], this.positions[ aa + 1 ], this.positions[ aa + 2 ] ];
+
+}
+
+MeshLine.prototype.process = function() {
+
+ var l = this.positions.length / 6;
+
+ this.previous = [];
+ this.next = [];
+ this.side = [];
+ this.width = [];
+ this.indices_array = [];
+ this.uvs = [];
+
+ for( var j = 0; j < l; j++ ) {
+ this.side.push( 1 );
+ this.side.push( -1 );
+ }
+
+ var w;
+ for( var j = 0; j < l; j++ ) {
+ if( this.widthCallback ) w = this.widthCallback( j / ( l -1 ) );
+ else w = 1;
+ this.width.push( w );
+ this.width.push( w );
+ }
+
+ for( var j = 0; j < l; j++ ) {
+ this.uvs.push( j / ( l - 1 ), 0 );
+ this.uvs.push( j / ( l - 1 ), 1 );
+ }
+
+ var v;
+
+ if( this.compareV3( 0, l - 1 ) ){
+ v = this.copyV3( l - 2 );
+ } else {
+ v = this.copyV3( 0 );
+ }
+ this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ for( var j = 0; j < l - 1; j++ ) {
+ v = this.copyV3( j );
+ this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ this.previous.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ }
+
+ for( var j = 1; j < l; j++ ) {
+ v = this.copyV3( j );
+ this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ }
+
+ if( this.compareV3( l - 1, 0 ) ){
+ v = this.copyV3( 1 );
+ } else {
+ v = this.copyV3( l - 1 );
+ }
+ this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+ this.next.push( v[ 0 ], v[ 1 ], v[ 2 ] );
+
+ for( var j = 0; j < l - 1; j++ ) {
+ var n = j * 2;
+ this.indices_array.push( n, n + 1, n + 2 );
+ this.indices_array.push( n + 2, n + 1, n + 3 );
+ }
+
+ if (!this.attributes) {
+ this.attributes = {
+ position: new THREE.BufferAttribute( new Float32Array( this.positions ), 3 ),
+ previous: new THREE.BufferAttribute( new Float32Array( this.previous ), 3 ),
+ next: new THREE.BufferAttribute( new Float32Array( this.next ), 3 ),
+ side: new THREE.BufferAttribute( new Float32Array( this.side ), 1 ),
+ width: new THREE.BufferAttribute( new Float32Array( this.width ), 1 ),
+ uv: new THREE.BufferAttribute( new Float32Array( this.uvs ), 2 ),
+ index: new THREE.BufferAttribute( new Uint16Array( this.indices_array ), 1 ),
+ counters: new THREE.BufferAttribute( new Float32Array( this.counters ), 1 )
+ }
+ } else {
+ this.attributes.position.copyArray(new Float32Array(this.positions));
+ this.attributes.position.needsUpdate = true;
+ this.attributes.previous.copyArray(new Float32Array(this.previous));
+ this.attributes.previous.needsUpdate = true;
+ this.attributes.next.copyArray(new Float32Array(this.next));
+ this.attributes.next.needsUpdate = true;
+ this.attributes.side.copyArray(new Float32Array(this.side));
+ this.attributes.side.needsUpdate = true;
+ this.attributes.width.copyArray(new Float32Array(this.width));
+ this.attributes.width.needsUpdate = true;
+ this.attributes.uv.copyArray(new Float32Array(this.uvs));
+ this.attributes.uv.needsUpdate = true;
+ this.attributes.index.copyArray(new Uint16Array(this.indices_array));
+ this.attributes.index.needsUpdate = true;
+ }
+
+ this.geometry.addAttribute( 'position', this.attributes.position );
+ this.geometry.addAttribute( 'previous', this.attributes.previous );
+ this.geometry.addAttribute( 'next', this.attributes.next );
+ this.geometry.addAttribute( 'side', this.attributes.side );
+ this.geometry.addAttribute( 'width', this.attributes.width );
+ this.geometry.addAttribute( 'uv', this.attributes.uv );
+ this.geometry.addAttribute( 'counters', this.attributes.counters );
+
+ this.geometry.setIndex( this.attributes.index );
+
+}
+
+function memcpy (src, srcOffset, dst, dstOffset, length) {
+ var i
+
+ src = src.subarray || src.slice ? src : src.buffer
+ dst = dst.subarray || dst.slice ? dst : dst.buffer
+
+ src = srcOffset ? src.subarray ?
+ src.subarray(srcOffset, length && srcOffset + length) :
+ src.slice(srcOffset, length && srcOffset + length) : src
+
+ if (dst.set) {
+ dst.set(src, dstOffset)
+ } else {
+ for (i=0; i<src.length; i++) {
+ dst[i + dstOffset] = src[i]
+ }
+ }
+
+ return dst
+}
+
+/**
+ * Fast method to advance the line by one position. The oldest position is removed.
+ * @param position
+ */
+MeshLine.prototype.advance = function(position) {
+
+ var positions = this.attributes.position.array;
+ var previous = this.attributes.previous.array;
+ var next = this.attributes.next.array;
+ var l = positions.length;
+
+ // PREVIOUS
+ memcpy( positions, 0, previous, 0, l );
+
+ // POSITIONS
+ memcpy( positions, 6, positions, 0, l - 6 );
+
+ positions[l - 6] = position.x;
+ positions[l - 5] = position.y;
+ positions[l - 4] = position.z;
+ positions[l - 3] = position.x;
+ positions[l - 2] = position.y;
+ positions[l - 1] = position.z;
+
+ // NEXT
+ memcpy( positions, 6, next, 0, l - 6 );
+
+ next[l - 6] = position.x;
+ next[l - 5] = position.y;
+ next[l - 4] = position.z;
+ next[l - 3] = position.x;
+ next[l - 2] = position.y;
+ next[l - 1] = position.z;
+
+ this.attributes.position.needsUpdate = true;
+ this.attributes.previous.needsUpdate = true;
+ this.attributes.next.needsUpdate = true;
+
+};
+
+function MeshLineMaterial( parameters ) {
+
+ var vertexShaderSource = [
+'precision highp float;',
+'',
+'attribute vec3 position;',
+'attribute vec3 previous;',
+'attribute vec3 next;',
+'attribute float side;',
+'attribute float width;',
+'attribute vec2 uv;',
+'attribute float counters;',
+'',
+'uniform mat4 projectionMatrix;',
+'uniform mat4 modelViewMatrix;',
+'uniform vec2 resolution;',
+'uniform float lineWidth;',
+'uniform vec3 color;',
+'uniform float opacity;',
+'uniform float near;',
+'uniform float far;',
+'uniform float sizeAttenuation;',
+'',
+'varying vec2 vUV;',
+'varying vec4 vColor;',
+'varying float vCounters;',
+'',
+'vec2 fix( vec4 i, float aspect ) {',
+'',
+' vec2 res = i.xy / i.w;',
+' res.x *= aspect;',
+' vCounters = counters;',
+' return res;',
+'',
+'}',
+'',
+'void main() {',
+'',
+' float aspect = resolution.x / resolution.y;',
+' float pixelWidthRatio = 1. / (resolution.x * projectionMatrix[0][0]);',
+'',
+' vColor = vec4( color, opacity );',
+' vUV = uv;',
+'',
+' mat4 m = projectionMatrix * modelViewMatrix;',
+' vec4 finalPosition = m * vec4( position, 1.0 );',
+' vec4 prevPos = m * vec4( previous, 1.0 );',
+' vec4 nextPos = m * vec4( next, 1.0 );',
+'',
+' vec2 currentP = fix( finalPosition, aspect );',
+' vec2 prevP = fix( prevPos, aspect );',
+' vec2 nextP = fix( nextPos, aspect );',
+'',
+' float pixelWidth = finalPosition.w * pixelWidthRatio;',
+' float w = 1.8 * pixelWidth * lineWidth * width;',
+'',
+' if( sizeAttenuation == 1. ) {',
+' w = 1.8 * lineWidth * width;',
+' }',
+'',
+' vec2 dir;',
+' if( nextP == currentP ) dir = normalize( currentP - prevP );',
+' else if( prevP == currentP ) dir = normalize( nextP - currentP );',
+' else {',
+' vec2 dir1 = normalize( currentP - prevP );',
+' vec2 dir2 = normalize( nextP - currentP );',
+' dir = normalize( dir1 + dir2 );',
+'',
+' vec2 perp = vec2( -dir1.y, dir1.x );',
+' vec2 miter = vec2( -dir.y, dir.x );',
+' //w = clamp( w / dot( miter, perp ), 0., 4. * lineWidth * width );',
+'',
+' }',
+'',
+' //vec2 normal = ( cross( vec3( dir, 0. ), vec3( 0., 0., 1. ) ) ).xy;',
+' vec2 normal = vec2( -dir.y, dir.x );',
+' normal.x /= aspect;',
+' normal *= .5 * w;',
+'',
+' vec4 offset = vec4( normal * side, 0.0, 1.0 );',
+' finalPosition.xy += offset.xy;',
+'',
+' gl_Position = finalPosition;',
+'',
+'}' ];
+
+ var fragmentShaderSource = [
+ '#extension GL_OES_standard_derivatives : enable',
+'precision mediump float;',
+'',
+'uniform sampler2D map;',
+'uniform sampler2D alphaMap;',
+'uniform float useMap;',
+'uniform float useAlphaMap;',
+'uniform float useDash;',
+'uniform float dashArray;',
+'uniform float dashOffset;',
+'uniform float dashRatio;',
+'uniform float visibility;',
+'uniform float alphaTest;',
+'uniform vec2 repeat;',
+'',
+'varying vec2 vUV;',
+'varying vec4 vColor;',
+'varying float vCounters;',
+'',
+'void main() {',
+'',
+' vec4 c = vColor;',
+' if( useMap == 1. ) c *= texture2D( map, vUV * repeat );',
+' if( useAlphaMap == 1. ) c.a *= texture2D( alphaMap, vUV * repeat ).a;',
+' if( c.a < alphaTest ) discard;',
+' if( useDash == 1. ){',
+' c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio));',
+' }',
+' gl_FragColor = c;',
+' gl_FragColor.a *= step(vCounters, visibility);',
+'}' ];
+
+ function check( v, d ) {
+ if( v === undefined ) return d;
+ return v;
+ }
+
+ THREE.Material.call( this );
+
+ parameters = parameters || {};
+
+ this.lineWidth = check( parameters.lineWidth, 1 );
+ this.map = check( parameters.map, null );
+ this.useMap = check( parameters.useMap, 0 );
+ this.alphaMap = check( parameters.alphaMap, null );
+ this.useAlphaMap = check( parameters.useAlphaMap, 0 );
+ this.color = check( parameters.color, new THREE.Color( 0xffffff ) );
+ this.opacity = check( parameters.opacity, 1 );
+ this.resolution = check( parameters.resolution, new THREE.Vector2( 1, 1 ) );
+ this.sizeAttenuation = check( parameters.sizeAttenuation, 1 );
+ this.near = check( parameters.near, 1 );
+ this.far = check( parameters.far, 1 );
+ this.dashArray = check( parameters.dashArray, 0 );
+ this.dashOffset = check( parameters.dashOffset, 0 );
+ this.dashRatio = check( parameters.dashRatio, 0.5 );
+ this.useDash = ( this.dashArray !== 0 ) ? 1 : 0;
+ this.visibility = check( parameters.visibility, 1 );
+ this.alphaTest = check( parameters.alphaTest, 0 );
+ this.repeat = check( parameters.repeat, new THREE.Vector2( 1, 1 ) );
+
+ var material = new THREE.RawShaderMaterial( {
+ uniforms:{
+ lineWidth: { type: 'f', value: this.lineWidth },
+ map: { type: 't', value: this.map },
+ useMap: { type: 'f', value: this.useMap },
+ alphaMap: { type: 't', value: this.alphaMap },
+ useAlphaMap: { type: 'f', value: this.useAlphaMap },
+ color: { type: 'c', value: this.color },
+ opacity: { type: 'f', value: this.opacity },
+ resolution: { type: 'v2', value: this.resolution },
+ sizeAttenuation: { type: 'f', value: this.sizeAttenuation },
+ near: { type: 'f', value: this.near },
+ far: { type: 'f', value: this.far },
+ dashArray: { type: 'f', value: this.dashArray },
+ dashOffset: { type: 'f', value: this.dashOffset },
+ dashRatio: { type: 'f', value: this.dashRatio },
+ useDash: { type: 'f', value: this.useDash },
+ visibility: {type: 'f', value: this.visibility},
+ alphaTest: {type: 'f', value: this.alphaTest},
+ repeat: { type: 'v2', value: this.repeat }
+ },
+ vertexShader: vertexShaderSource.join( '\r\n' ),
+ fragmentShader: fragmentShaderSource.join( '\r\n' )
+ });
+
+ delete parameters.lineWidth;
+ delete parameters.map;
+ delete parameters.useMap;
+ delete parameters.alphaMap;
+ delete parameters.useAlphaMap;
+ delete parameters.color;
+ delete parameters.opacity;
+ delete parameters.resolution;
+ delete parameters.sizeAttenuation;
+ delete parameters.near;
+ delete parameters.far;
+ delete parameters.dashArray;
+ delete parameters.dashOffset;
+ delete parameters.dashRatio;
+ delete parameters.visibility;
+ delete parameters.alphaTest;
+ delete parameters.repeat;
+
+ material.type = 'MeshLineMaterial';
+
+ material.setValues( parameters );
+
+ return material;
+
+};
+
+MeshLineMaterial.prototype = Object.create( THREE.Material.prototype );
+MeshLineMaterial.prototype.constructor = MeshLineMaterial;
+
+MeshLineMaterial.prototype.copy = function ( source ) {
+
+ THREE.Material.prototype.copy.call( this, source );
+
+ this.lineWidth = source.lineWidth;
+ this.map = source.map;
+ this.useMap = source.useMap;
+ this.alphaMap = source.alphaMap;
+ this.useAlphaMap = source.useAlphaMap;
+ this.color.copy( source.color );
+ this.opacity = source.opacity;
+ this.resolution.copy( source.resolution );
+ this.sizeAttenuation = source.sizeAttenuation;
+ this.near = source.near;
+ this.far = source.far;
+ this.dashArray.copy( source.dashArray );
+ this.dashOffset.copy( source.dashOffset );
+ this.dashRatio.copy( source.dashRatio );
+ this.useDash = source.useDash;
+ this.visibility = source.visibility;
+ this.alphaTest = source.alphaTest;
+ this.repeat.copy( source.repeat );
+
+ return this;
+
+};
+
+if( typeof exports !== 'undefined' ) {
+ if( typeof module !== 'undefined' && module.exports ) {
+ exports = module.exports = { MeshLine: MeshLine, MeshLineMaterial: MeshLineMaterial };
+ }
+ exports.MeshLine = MeshLine;
+ exports.MeshLineMaterial = MeshLineMaterial;
+}
+else {
+ root.MeshLine = MeshLine;
+ root.MeshLineMaterial = MeshLineMaterial;
+}
+
+}).call(this); \ No newline at end of file
diff --git a/site/assets/js/vendor/three.min.js b/site/assets/js/vendor/three.min.js
new file mode 100644
index 00000000..56300150
--- /dev/null
+++ b/site/assets/js/vendor/three.min.js
@@ -0,0 +1,963 @@
+// threejs.org/license
+(function(l,ia){"object"===typeof exports&&"undefined"!==typeof module?ia(exports):"function"===typeof define&&define.amd?define(["exports"],ia):ia(l.THREE={})})(this,function(l){function ia(){}function z(a,b){this.x=a||0;this.y=b||0}function P(){this.elements=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];0<arguments.length&&console.error("THREE.Matrix4: the constructor no longer reads arguments. use .set() instead.")}function ja(a,b,c,d){this._x=a||0;this._y=b||0;this._z=c||0;this._w=void 0!==d?d:1}function p(a,
+b,c){this.x=a||0;this.y=b||0;this.z=c||0}function da(){this.elements=[1,0,0,0,1,0,0,0,1];0<arguments.length&&console.error("THREE.Matrix3: the constructor no longer reads arguments. use .set() instead.")}function W(a,b,c,d,e,f,g,h,k,m){Object.defineProperty(this,"id",{value:Kf++});this.uuid=R.generateUUID();this.name="";this.image=void 0!==a?a:W.DEFAULT_IMAGE;this.mipmaps=[];this.mapping=void 0!==b?b:W.DEFAULT_MAPPING;this.wrapS=void 0!==c?c:1001;this.wrapT=void 0!==d?d:1001;this.magFilter=void 0!==
+e?e:1006;this.minFilter=void 0!==f?f:1008;this.anisotropy=void 0!==k?k:1;this.format=void 0!==g?g:1023;this.type=void 0!==h?h:1009;this.offset=new z(0,0);this.repeat=new z(1,1);this.center=new z(0,0);this.rotation=0;this.matrixAutoUpdate=!0;this.matrix=new da;this.generateMipmaps=!0;this.premultiplyAlpha=!1;this.flipY=!0;this.unpackAlignment=4;this.encoding=void 0!==m?m:3E3;this.version=0;this.onUpdate=null}function Z(a,b,c,d){this.x=a||0;this.y=b||0;this.z=c||0;this.w=void 0!==d?d:1}function kb(a,
+b,c){this.width=a;this.height=b;this.scissor=new Z(0,0,a,b);this.scissorTest=!1;this.viewport=new Z(0,0,a,b);c=c||{};void 0===c.minFilter&&(c.minFilter=1006);this.texture=new W(void 0,void 0,c.wrapS,c.wrapT,c.magFilter,c.minFilter,c.format,c.type,c.anisotropy,c.encoding);this.texture.generateMipmaps=void 0!==c.generateMipmaps?c.generateMipmaps:!0;this.depthBuffer=void 0!==c.depthBuffer?c.depthBuffer:!0;this.stencilBuffer=void 0!==c.stencilBuffer?c.stencilBuffer:!0;this.depthTexture=void 0!==c.depthTexture?
+c.depthTexture:null}function Jb(a,b,c){kb.call(this,a,b,c);this.activeMipMapLevel=this.activeCubeFace=0}function lb(a,b,c,d,e,f,g,h,k,m,q,n){W.call(this,null,f,g,h,k,m,d,e,q,n);this.image={data:a,width:b,height:c};this.magFilter=void 0!==k?k:1003;this.minFilter=void 0!==m?m:1003;this.flipY=this.generateMipmaps=!1;this.unpackAlignment=1}function Wa(a,b){this.min=void 0!==a?a:new p(Infinity,Infinity,Infinity);this.max=void 0!==b?b:new p(-Infinity,-Infinity,-Infinity)}function Ga(a,b){this.center=void 0!==
+a?a:new p;this.radius=void 0!==b?b:0}function Pa(a,b){this.normal=void 0!==a?a:new p(1,0,0);this.constant=void 0!==b?b:0}function rd(a,b,c,d,e,f){this.planes=[void 0!==a?a:new Pa,void 0!==b?b:new Pa,void 0!==c?c:new Pa,void 0!==d?d:new Pa,void 0!==e?e:new Pa,void 0!==f?f:new Pa]}function G(a,b,c){return void 0===b&&void 0===c?this.set(a):this.setRGB(a,b,c)}function Xd(){function a(e,f){!1!==c&&(d(e,f),b.requestAnimationFrame(a))}var b=null,c=!1,d=null;return{start:function(){!0!==c&&null!==d&&(b.requestAnimationFrame(a),
+c=!0)},stop:function(){c=!1},setAnimationLoop:function(a){d=a},setContext:function(a){b=a}}}function Lf(a){function b(b,c){var d=b.array,e=b.dynamic?35048:35044,h=a.createBuffer();a.bindBuffer(c,h);a.bufferData(c,d,e);b.onUploadCallback();c=5126;d instanceof Float32Array?c=5126:d instanceof Float64Array?console.warn("THREE.WebGLAttributes: Unsupported data buffer format: Float64Array."):d instanceof Uint16Array?c=5123:d instanceof Int16Array?c=5122:d instanceof Uint32Array?c=5125:d instanceof Int32Array?
+c=5124:d instanceof Int8Array?c=5120:d instanceof Uint8Array&&(c=5121);return{buffer:h,type:c,bytesPerElement:d.BYTES_PER_ELEMENT,version:b.version}}var c=new WeakMap;return{get:function(a){a.isInterleavedBufferAttribute&&(a=a.data);return c.get(a)},remove:function(b){b.isInterleavedBufferAttribute&&(b=b.data);var d=c.get(b);d&&(a.deleteBuffer(d.buffer),c.delete(b))},update:function(d,e){d.isInterleavedBufferAttribute&&(d=d.data);var f=c.get(d);if(void 0===f)c.set(d,b(d,e));else if(f.version<d.version){var g=
+d,h=g.array,k=g.updateRange;a.bindBuffer(e,f.buffer);!1===g.dynamic?a.bufferData(e,h,35044):-1===k.count?a.bufferSubData(e,0,h):0===k.count?console.error("THREE.WebGLObjects.updateBuffer: dynamic THREE.BufferAttribute marked as needsUpdate but updateRange.count is 0, ensure you are using set methods or updating manually."):(a.bufferSubData(e,k.offset*h.BYTES_PER_ELEMENT,h.subarray(k.offset,k.offset+k.count)),k.count=-1);f.version=d.version}}}}function Xa(a,b,c,d,e,f){this.a=a;this.b=b;this.c=c;this.normal=
+d&&d.isVector3?d:new p;this.vertexNormals=Array.isArray(d)?d:[];this.color=e&&e.isColor?e:new G;this.vertexColors=Array.isArray(e)?e:[];this.materialIndex=void 0!==f?f:0}function mb(a,b,c,d){this._x=a||0;this._y=b||0;this._z=c||0;this._order=d||mb.DefaultOrder}function Yd(){this.mask=1}function D(){Object.defineProperty(this,"id",{value:Mf++});this.uuid=R.generateUUID();this.name="";this.type="Object3D";this.parent=null;this.children=[];this.up=D.DefaultUp.clone();var a=new p,b=new mb,c=new ja,d=
+new p(1,1,1);b.onChange(function(){c.setFromEuler(b,!1)});c.onChange(function(){b.setFromQuaternion(c,void 0,!1)});Object.defineProperties(this,{position:{configurable:!0,enumerable:!0,value:a},rotation:{configurable:!0,enumerable:!0,value:b},quaternion:{configurable:!0,enumerable:!0,value:c},scale:{configurable:!0,enumerable:!0,value:d},modelViewMatrix:{value:new P},normalMatrix:{value:new da}});this.matrix=new P;this.matrixWorld=new P;this.matrixAutoUpdate=D.DefaultMatrixAutoUpdate;this.matrixWorldNeedsUpdate=
+!1;this.layers=new Yd;this.visible=!0;this.receiveShadow=this.castShadow=!1;this.frustumCulled=!0;this.renderOrder=0;this.userData={}}function I(){Object.defineProperty(this,"id",{value:Nf+=2});this.uuid=R.generateUUID();this.name="";this.type="Geometry";this.vertices=[];this.colors=[];this.faces=[];this.faceVertexUvs=[[]];this.morphTargets=[];this.morphNormals=[];this.skinWeights=[];this.skinIndices=[];this.lineDistances=[];this.boundingSphere=this.boundingBox=null;this.groupsNeedUpdate=this.lineDistancesNeedUpdate=
+this.colorsNeedUpdate=this.normalsNeedUpdate=this.uvsNeedUpdate=this.verticesNeedUpdate=this.elementsNeedUpdate=!1}function F(a,b,c){if(Array.isArray(a))throw new TypeError("THREE.BufferAttribute: array should be a Typed Array.");this.name="";this.array=a;this.itemSize=b;this.count=void 0!==a?a.length/b:0;this.normalized=!0===c;this.dynamic=!1;this.updateRange={offset:0,count:-1};this.version=0}function sc(a,b,c){F.call(this,new Int8Array(a),b,c)}function tc(a,b,c){F.call(this,new Uint8Array(a),b,
+c)}function uc(a,b,c){F.call(this,new Uint8ClampedArray(a),b,c)}function vc(a,b,c){F.call(this,new Int16Array(a),b,c)}function nb(a,b,c){F.call(this,new Uint16Array(a),b,c)}function wc(a,b,c){F.call(this,new Int32Array(a),b,c)}function ob(a,b,c){F.call(this,new Uint32Array(a),b,c)}function C(a,b,c){F.call(this,new Float32Array(a),b,c)}function xc(a,b,c){F.call(this,new Float64Array(a),b,c)}function Ie(){this.vertices=[];this.normals=[];this.colors=[];this.uvs=[];this.uvs2=[];this.groups=[];this.morphTargets=
+{};this.skinWeights=[];this.skinIndices=[];this.boundingSphere=this.boundingBox=null;this.groupsNeedUpdate=this.uvsNeedUpdate=this.colorsNeedUpdate=this.normalsNeedUpdate=this.verticesNeedUpdate=!1}function Je(a){if(0===a.length)return-Infinity;for(var b=a[0],c=1,d=a.length;c<d;++c)a[c]>b&&(b=a[c]);return b}function E(){Object.defineProperty(this,"id",{value:Of+=2});this.uuid=R.generateUUID();this.name="";this.type="BufferGeometry";this.index=null;this.attributes={};this.morphAttributes={};this.groups=
+[];this.boundingSphere=this.boundingBox=null;this.drawRange={start:0,count:Infinity};this.userData={}}function Kb(a,b,c,d,e,f){I.call(this);this.type="BoxGeometry";this.parameters={width:a,height:b,depth:c,widthSegments:d,heightSegments:e,depthSegments:f};this.fromBufferGeometry(new pb(a,b,c,d,e,f));this.mergeVertices()}function pb(a,b,c,d,e,f){function g(a,b,c,d,e,f,g,l,X,B,Lb){var t=f/X,u=g/B,w=f/2,v=g/2,A=l/2;g=X+1;var y=B+1,H=f=0,N,z,C=new p;for(z=0;z<y;z++){var D=z*u-v;for(N=0;N<g;N++)C[a]=(N*
+t-w)*d,C[b]=D*e,C[c]=A,m.push(C.x,C.y,C.z),C[a]=0,C[b]=0,C[c]=0<l?1:-1,q.push(C.x,C.y,C.z),n.push(N/X),n.push(1-z/B),f+=1}for(z=0;z<B;z++)for(N=0;N<X;N++)a=r+N+g*(z+1),b=r+(N+1)+g*(z+1),c=r+(N+1)+g*z,k.push(r+N+g*z,a,c),k.push(a,b,c),H+=6;h.addGroup(x,H,Lb);x+=H;r+=f}E.call(this);this.type="BoxBufferGeometry";this.parameters={width:a,height:b,depth:c,widthSegments:d,heightSegments:e,depthSegments:f};var h=this;a=a||1;b=b||1;c=c||1;d=Math.floor(d)||1;e=Math.floor(e)||1;f=Math.floor(f)||1;var k=[],
+m=[],q=[],n=[],r=0,x=0;g("z","y","x",-1,-1,c,b,a,f,e,0);g("z","y","x",1,-1,c,b,-a,f,e,1);g("x","z","y",1,1,a,c,b,d,f,2);g("x","z","y",1,-1,a,c,-b,d,f,3);g("x","y","z",1,-1,a,b,c,d,e,4);g("x","y","z",-1,-1,a,b,-c,d,e,5);this.setIndex(k);this.addAttribute("position",new C(m,3));this.addAttribute("normal",new C(q,3));this.addAttribute("uv",new C(n,2))}function yc(a,b,c,d){I.call(this);this.type="PlaneGeometry";this.parameters={width:a,height:b,widthSegments:c,heightSegments:d};this.fromBufferGeometry(new qb(a,
+b,c,d));this.mergeVertices()}function qb(a,b,c,d){E.call(this);this.type="PlaneBufferGeometry";this.parameters={width:a,height:b,widthSegments:c,heightSegments:d};a=a||1;b=b||1;var e=a/2,f=b/2;c=Math.floor(c)||1;d=Math.floor(d)||1;var g=c+1,h=d+1,k=a/c,m=b/d,q=[],n=[],r=[],x=[];for(a=0;a<h;a++){var t=a*m-f;for(b=0;b<g;b++)n.push(b*k-e,-t,0),r.push(0,0,1),x.push(b/c),x.push(1-a/d)}for(a=0;a<d;a++)for(b=0;b<c;b++)e=b+g*(a+1),f=b+1+g*(a+1),h=b+1+g*a,q.push(b+g*a,e,h),q.push(e,f,h);this.setIndex(q);this.addAttribute("position",
+new C(n,3));this.addAttribute("normal",new C(r,3));this.addAttribute("uv",new C(x,2))}function L(){Object.defineProperty(this,"id",{value:Pf++});this.uuid=R.generateUUID();this.name="";this.type="Material";this.lights=this.fog=!0;this.blending=1;this.side=0;this.flatShading=!1;this.vertexColors=0;this.opacity=1;this.transparent=!1;this.blendSrc=204;this.blendDst=205;this.blendEquation=100;this.blendEquationAlpha=this.blendDstAlpha=this.blendSrcAlpha=null;this.depthFunc=3;this.depthWrite=this.depthTest=
+!0;this.clippingPlanes=null;this.clipShadows=this.clipIntersection=!1;this.shadowSide=null;this.colorWrite=!0;this.precision=null;this.polygonOffset=!1;this.polygonOffsetUnits=this.polygonOffsetFactor=0;this.dithering=!1;this.alphaTest=0;this.premultipliedAlpha=!1;this.visible=!0;this.userData={};this.needsUpdate=!0}function ka(a){L.call(this);this.type="ShaderMaterial";this.defines={};this.uniforms={};this.vertexShader="void main() {\n\tgl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n}";
+this.fragmentShader="void main() {\n\tgl_FragColor = vec4( 1.0, 0.0, 0.0, 1.0 );\n}";this.linewidth=1;this.wireframe=!1;this.wireframeLinewidth=1;this.morphNormals=this.morphTargets=this.skinning=this.clipping=this.lights=this.fog=!1;this.extensions={derivatives:!1,fragDepth:!1,drawBuffers:!1,shaderTextureLOD:!1};this.defaultAttributeValues={color:[1,1,1],uv:[0,0],uv2:[0,0]};this.index0AttributeName=void 0;this.uniformsNeedUpdate=!1;void 0!==a&&(void 0!==a.attributes&&console.error("THREE.ShaderMaterial: attributes should now be defined in THREE.BufferGeometry instead."),
+this.setValues(a))}function rb(a,b){this.origin=void 0!==a?a:new p;this.direction=void 0!==b?b:new p}function ha(a,b,c){this.a=void 0!==a?a:new p;this.b=void 0!==b?b:new p;this.c=void 0!==c?c:new p}function Ea(a){L.call(this);this.type="MeshBasicMaterial";this.color=new G(16777215);this.lightMap=this.map=null;this.lightMapIntensity=1;this.aoMap=null;this.aoMapIntensity=1;this.envMap=this.alphaMap=this.specularMap=null;this.combine=0;this.reflectivity=1;this.refractionRatio=.98;this.wireframe=!1;this.wireframeLinewidth=
+1;this.wireframeLinejoin=this.wireframeLinecap="round";this.lights=this.morphTargets=this.skinning=!1;this.setValues(a)}function pa(a,b){D.call(this);this.type="Mesh";this.geometry=void 0!==a?a:new E;this.material=void 0!==b?b:new Ea({color:16777215*Math.random()});this.drawMode=0;this.updateMorphTargets()}function Qf(a,b,c,d){function e(a,c){b.buffers.color.setClear(a.r,a.g,a.b,c,d)}var f=new G(0),g=0,h,k;return{getClearColor:function(){return f},setClearColor:function(a,b){f.set(a);g=void 0!==b?
+b:1;e(f,g)},getClearAlpha:function(){return g},setClearAlpha:function(a){g=a;e(f,g)},render:function(b,d,n,r){d=d.background;null===d?e(f,g):d&&d.isColor&&(e(d,1),r=!0);(a.autoClear||r)&&a.clear(a.autoClearColor,a.autoClearDepth,a.autoClearStencil);d&&(d.isCubeTexture||d.isWebGLRenderTargetCube)?(void 0===k&&(k=new pa(new pb(1,1,1),new ka({type:"BackgroundCubeMaterial",uniforms:va.clone(Qa.cube.uniforms),vertexShader:Qa.cube.vertexShader,fragmentShader:Qa.cube.fragmentShader,side:1,depthTest:!0,depthWrite:!1,
+fog:!1})),k.geometry.removeAttribute("normal"),k.geometry.removeAttribute("uv"),k.onBeforeRender=function(a,b,c){this.matrixWorld.copyPosition(c.matrixWorld)},c.update(k)),k.material.uniforms.tCube.value=d.isWebGLRenderTargetCube?d.texture:d,k.material.uniforms.tFlip.value=d.isWebGLRenderTargetCube?1:-1,b.push(k,k.geometry,k.material,0,null)):d&&d.isTexture&&(void 0===h&&(h=new pa(new qb(2,2),new ka({type:"BackgroundMaterial",uniforms:va.clone(Qa.background.uniforms),vertexShader:Qa.background.vertexShader,
+fragmentShader:Qa.background.fragmentShader,side:0,depthTest:!0,depthWrite:!1,fog:!1})),h.geometry.removeAttribute("normal"),c.update(h)),h.material.uniforms.t2D.value=d,!0===d.matrixAutoUpdate&&d.updateMatrix(),h.material.uniforms.uvTransform.value.copy(d.matrix),b.push(h,h.geometry,h.material,0,null))}}}function Rf(a,b,c,d){var e;this.setMode=function(a){e=a};this.render=function(b,d){a.drawArrays(e,b,d);c.update(d,e)};this.renderInstances=function(f,g,h){if(d.isWebGL2)var k=a;else if(k=b.get("ANGLE_instanced_arrays"),
+null===k){console.error("THREE.WebGLBufferRenderer: using THREE.InstancedBufferGeometry but hardware does not support extension ANGLE_instanced_arrays.");return}k[d.isWebGL2?"drawArraysInstanced":"drawArraysInstancedANGLE"](e,g,h,f.maxInstancedCount);c.update(h,e,f.maxInstancedCount)}}function Sf(a,b,c){function d(b){if("highp"===b){if(0<a.getShaderPrecisionFormat(35633,36338).precision&&0<a.getShaderPrecisionFormat(35632,36338).precision)return"highp";b="mediump"}return"mediump"===b&&0<a.getShaderPrecisionFormat(35633,
+36337).precision&&0<a.getShaderPrecisionFormat(35632,36337).precision?"mediump":"lowp"}var e,f="undefined"!==typeof WebGL2RenderingContext&&a instanceof WebGL2RenderingContext,g=void 0!==c.precision?c.precision:"highp",h=d(g);h!==g&&(console.warn("THREE.WebGLRenderer:",g,"not supported, using",h,"instead."),g=h);c=!0===c.logarithmicDepthBuffer;h=a.getParameter(34930);var k=a.getParameter(35660),m=a.getParameter(3379),q=a.getParameter(34076),n=a.getParameter(34921),r=a.getParameter(36347),x=a.getParameter(36348),
+t=a.getParameter(36349),l=0<k,w=f||!!b.get("OES_texture_float");return{isWebGL2:f,getMaxAnisotropy:function(){if(void 0!==e)return e;var c=b.get("EXT_texture_filter_anisotropic");return e=null!==c?a.getParameter(c.MAX_TEXTURE_MAX_ANISOTROPY_EXT):0},getMaxPrecision:d,precision:g,logarithmicDepthBuffer:c,maxTextures:h,maxVertexTextures:k,maxTextureSize:m,maxCubemapSize:q,maxAttributes:n,maxVertexUniforms:r,maxVaryings:x,maxFragmentUniforms:t,vertexTextures:l,floatFragmentTextures:w,floatVertexTextures:l&&
+w}}function Tf(){function a(){m.value!==d&&(m.value=d,m.needsUpdate=0<e);c.numPlanes=e;c.numIntersection=0}function b(a,b,d,e){var f=null!==a?a.length:0,g=null;if(0!==f){g=m.value;if(!0!==e||null===g){e=d+4*f;b=b.matrixWorldInverse;k.getNormalMatrix(b);if(null===g||g.length<e)g=new Float32Array(e);for(e=0;e!==f;++e,d+=4)h.copy(a[e]).applyMatrix4(b,k),h.normal.toArray(g,d),g[d+3]=h.constant}m.value=g;m.needsUpdate=!0}c.numPlanes=f;return g}var c=this,d=null,e=0,f=!1,g=!1,h=new Pa,k=new da,m={value:null,
+needsUpdate:!1};this.uniform=m;this.numIntersection=this.numPlanes=0;this.init=function(a,c,g){var h=0!==a.length||c||0!==e||f;f=c;d=b(a,g,0);e=a.length;return h};this.beginShadows=function(){g=!0;b(null)};this.endShadows=function(){g=!1;a()};this.setState=function(c,h,k,x,l,u){if(!f||null===c||0===c.length||g&&!k)g?b(null):a();else{k=g?0:e;var n=4*k,q=l.clippingState||null;m.value=q;q=b(c,x,n,u);for(c=0;c!==n;++c)q[c]=d[c];l.clippingState=q;this.numIntersection=h?this.numPlanes:0;this.numPlanes+=
+k}}}function Uf(a){var b={};return{get:function(c){if(void 0!==b[c])return b[c];switch(c){case "WEBGL_depth_texture":var d=a.getExtension("WEBGL_depth_texture")||a.getExtension("MOZ_WEBGL_depth_texture")||a.getExtension("WEBKIT_WEBGL_depth_texture");break;case "EXT_texture_filter_anisotropic":d=a.getExtension("EXT_texture_filter_anisotropic")||a.getExtension("MOZ_EXT_texture_filter_anisotropic")||a.getExtension("WEBKIT_EXT_texture_filter_anisotropic");break;case "WEBGL_compressed_texture_s3tc":d=
+a.getExtension("WEBGL_compressed_texture_s3tc")||a.getExtension("MOZ_WEBGL_compressed_texture_s3tc")||a.getExtension("WEBKIT_WEBGL_compressed_texture_s3tc");break;case "WEBGL_compressed_texture_pvrtc":d=a.getExtension("WEBGL_compressed_texture_pvrtc")||a.getExtension("WEBKIT_WEBGL_compressed_texture_pvrtc");break;default:d=a.getExtension(c)}null===d&&console.warn("THREE.WebGLRenderer: "+c+" extension not supported.");return b[c]=d}}}function Vf(a,b,c){function d(a){var g=a.target;a=e[g.id];null!==
+a.index&&b.remove(a.index);for(var k in a.attributes)b.remove(a.attributes[k]);g.removeEventListener("dispose",d);delete e[g.id];if(k=f[a.id])b.remove(k),delete f[a.id];c.memory.geometries--}var e={},f={};return{get:function(a,b){var f=e[b.id];if(f)return f;b.addEventListener("dispose",d);b.isBufferGeometry?f=b:b.isGeometry&&(void 0===b._bufferGeometry&&(b._bufferGeometry=(new E).setFromObject(a)),f=b._bufferGeometry);e[b.id]=f;c.memory.geometries++;return f},update:function(a){var c=a.index,d=a.attributes;
+null!==c&&b.update(c,34963);for(var e in d)b.update(d[e],34962);a=a.morphAttributes;for(e in a){c=a[e];d=0;for(var f=c.length;d<f;d++)b.update(c[d],34962)}},getWireframeAttribute:function(a){var c=f[a.id];if(c)return c;c=[];var d=a.index,e=a.attributes;if(null!==d){d=d.array;e=0;for(var g=d.length;e<g;e+=3){var n=d[e+0],r=d[e+1],x=d[e+2];c.push(n,r,r,x,x,n)}}else for(d=e.position.array,e=0,g=d.length/3-1;e<g;e+=3)n=e+0,r=e+1,x=e+2,c.push(n,r,r,x,x,n);c=new (65535<Je(c)?ob:nb)(c,1);b.update(c,34963);
+return f[a.id]=c}}}function Wf(a,b,c,d){var e,f,g;this.setMode=function(a){e=a};this.setIndex=function(a){f=a.type;g=a.bytesPerElement};this.render=function(b,d){a.drawElements(e,d,f,b*g);c.update(d,e)};this.renderInstances=function(h,k,m){if(d.isWebGL2)var q=a;else if(q=b.get("ANGLE_instanced_arrays"),null===q){console.error("THREE.WebGLIndexedBufferRenderer: using THREE.InstancedBufferGeometry but hardware does not support extension ANGLE_instanced_arrays.");return}q[d.isWebGL2?"drawElementsInstanced":
+"drawElementsInstancedANGLE"](e,m,f,k*g,h.maxInstancedCount);c.update(m,e,h.maxInstancedCount)}}function Xf(a){var b={frame:0,calls:0,triangles:0,points:0,lines:0};return{memory:{geometries:0,textures:0},render:b,programs:null,autoReset:!0,reset:function(){b.frame++;b.calls=0;b.triangles=0;b.points=0;b.lines=0},update:function(a,d,e){e=e||1;b.calls++;switch(d){case 4:b.triangles+=a/3*e;break;case 5:case 6:b.triangles+=e*(a-2);break;case 1:b.lines+=a/2*e;break;case 3:b.lines+=e*(a-1);break;case 2:b.lines+=
+e*a;break;case 0:b.points+=e*a;break;default:console.error("THREE.WebGLInfo: Unknown draw mode:",d)}}}}function Yf(a,b){return Math.abs(b[1])-Math.abs(a[1])}function Zf(a){var b={},c=new Float32Array(8);return{update:function(d,e,f,g){var h=d.morphTargetInfluences,k=h.length;d=b[e.id];if(void 0===d){d=[];for(var m=0;m<k;m++)d[m]=[m,0];b[e.id]=d}var q=f.morphTargets&&e.morphAttributes.position;f=f.morphNormals&&e.morphAttributes.normal;for(m=0;m<k;m++){var n=d[m];0!==n[1]&&(q&&e.removeAttribute("morphTarget"+
+m),f&&e.removeAttribute("morphNormal"+m))}for(m=0;m<k;m++)n=d[m],n[0]=m,n[1]=h[m];d.sort(Yf);for(m=0;8>m;m++){if(n=d[m])if(h=n[0],k=n[1]){q&&e.addAttribute("morphTarget"+m,q[h]);f&&e.addAttribute("morphNormal"+m,f[h]);c[m]=k;continue}c[m]=0}g.getUniforms().setValue(a,"morphTargetInfluences",c)}}}function $f(a,b){var c={};return{update:function(d){var e=b.render.frame,f=d.geometry,g=a.get(d,f);c[g.id]!==e&&(f.isGeometry&&g.updateFromObject(d),a.update(g),c[g.id]=e);return g},dispose:function(){c={}}}}
+function Ya(a,b,c,d,e,f,g,h,k,m){a=void 0!==a?a:[];W.call(this,a,void 0!==b?b:301,c,d,e,f,g,h,k,m);this.flipY=!1}function Mb(a,b,c,d){W.call(this,null);this.image={data:a,width:b,height:c,depth:d};this.minFilter=this.magFilter=1003;this.flipY=this.generateMipmaps=!1}function Nb(a,b,c){var d=a[0];if(0>=d||0<d)return a;var e=b*c,f=Ke[e];void 0===f&&(f=new Float32Array(e),Ke[e]=f);if(0!==b)for(d.toArray(f,0),d=1,e=0;d!==b;++d)e+=c,a[d].toArray(f,e);return f}function ea(a,b){if(a.length!==b.length)return!1;
+for(var c=0,d=a.length;c<d;c++)if(a[c]!==b[c])return!1;return!0}function sa(a,b){for(var c=0,d=b.length;c<d;c++)a[c]=b[c]}function Le(a,b){var c=Me[b];void 0===c&&(c=new Int32Array(b),Me[b]=c);for(var d=0;d!==b;++d)c[d]=a.allocTextureUnit();return c}function ag(a,b){var c=this.cache;c[0]!==b&&(a.uniform1f(this.addr,b),c[0]=b)}function bg(a,b){var c=this.cache;c[0]!==b&&(a.uniform1i(this.addr,b),c[0]=b)}function cg(a,b){var c=this.cache;if(void 0!==b.x){if(c[0]!==b.x||c[1]!==b.y)a.uniform2f(this.addr,
+b.x,b.y),c[0]=b.x,c[1]=b.y}else ea(c,b)||(a.uniform2fv(this.addr,b),sa(c,b))}function dg(a,b){var c=this.cache;if(void 0!==b.x){if(c[0]!==b.x||c[1]!==b.y||c[2]!==b.z)a.uniform3f(this.addr,b.x,b.y,b.z),c[0]=b.x,c[1]=b.y,c[2]=b.z}else if(void 0!==b.r){if(c[0]!==b.r||c[1]!==b.g||c[2]!==b.b)a.uniform3f(this.addr,b.r,b.g,b.b),c[0]=b.r,c[1]=b.g,c[2]=b.b}else ea(c,b)||(a.uniform3fv(this.addr,b),sa(c,b))}function eg(a,b){var c=this.cache;if(void 0!==b.x){if(c[0]!==b.x||c[1]!==b.y||c[2]!==b.z||c[3]!==b.w)a.uniform4f(this.addr,
+b.x,b.y,b.z,b.w),c[0]=b.x,c[1]=b.y,c[2]=b.z,c[3]=b.w}else ea(c,b)||(a.uniform4fv(this.addr,b),sa(c,b))}function fg(a,b){var c=this.cache,d=b.elements;void 0===d?ea(c,b)||(a.uniformMatrix2fv(this.addr,!1,b),sa(c,b)):ea(c,d)||(Ne.set(d),a.uniformMatrix2fv(this.addr,!1,Ne),sa(c,d))}function gg(a,b){var c=this.cache,d=b.elements;void 0===d?ea(c,b)||(a.uniformMatrix3fv(this.addr,!1,b),sa(c,b)):ea(c,d)||(Oe.set(d),a.uniformMatrix3fv(this.addr,!1,Oe),sa(c,d))}function hg(a,b){var c=this.cache,d=b.elements;
+void 0===d?ea(c,b)||(a.uniformMatrix4fv(this.addr,!1,b),sa(c,b)):ea(c,d)||(Pe.set(d),a.uniformMatrix4fv(this.addr,!1,Pe),sa(c,d))}function ig(a,b,c){var d=this.cache,e=c.allocTextureUnit();d[0]!==e&&(a.uniform1i(this.addr,e),d[0]=e);c.setTexture2D(b||Qe,e)}function jg(a,b,c){var d=this.cache,e=c.allocTextureUnit();d[0]!==e&&(a.uniform1i(this.addr,e),d[0]=e);c.setTexture3D(b||kg,e)}function lg(a,b,c){var d=this.cache,e=c.allocTextureUnit();d[0]!==e&&(a.uniform1i(this.addr,e),d[0]=e);c.setTextureCube(b||
+Re,e)}function Se(a,b){var c=this.cache;ea(c,b)||(a.uniform2iv(this.addr,b),sa(c,b))}function Te(a,b){var c=this.cache;ea(c,b)||(a.uniform3iv(this.addr,b),sa(c,b))}function Ue(a,b){var c=this.cache;ea(c,b)||(a.uniform4iv(this.addr,b),sa(c,b))}function mg(a){switch(a){case 5126:return ag;case 35664:return cg;case 35665:return dg;case 35666:return eg;case 35674:return fg;case 35675:return gg;case 35676:return hg;case 35678:case 36198:return ig;case 35679:return jg;case 35680:return lg;case 5124:case 35670:return bg;
+case 35667:case 35671:return Se;case 35668:case 35672:return Te;case 35669:case 35673:return Ue}}function ng(a,b){var c=this.cache;ea(c,b)||(a.uniform1fv(this.addr,b),sa(c,b))}function og(a,b){var c=this.cache;ea(c,b)||(a.uniform1iv(this.addr,b),sa(c,b))}function pg(a,b){var c=this.cache;b=Nb(b,this.size,2);ea(c,b)||(a.uniform2fv(this.addr,b),this.updateCache(b))}function qg(a,b){var c=this.cache;b=Nb(b,this.size,3);ea(c,b)||(a.uniform3fv(this.addr,b),this.updateCache(b))}function rg(a,b){var c=this.cache;
+b=Nb(b,this.size,4);ea(c,b)||(a.uniform4fv(this.addr,b),this.updateCache(b))}function sg(a,b){var c=this.cache;b=Nb(b,this.size,4);ea(c,b)||(a.uniformMatrix2fv(this.addr,!1,b),this.updateCache(b))}function tg(a,b){var c=this.cache;b=Nb(b,this.size,9);ea(c,b)||(a.uniformMatrix3fv(this.addr,!1,b),this.updateCache(b))}function ug(a,b){var c=this.cache;b=Nb(b,this.size,16);ea(c,b)||(a.uniformMatrix4fv(this.addr,!1,b),this.updateCache(b))}function vg(a,b,c){var d=this.cache,e=b.length,f=Le(c,e);!1===ea(d,
+f)&&(a.uniform1iv(this.addr,f),sa(d,f));for(a=0;a!==e;++a)c.setTexture2D(b[a]||Qe,f[a])}function wg(a,b,c){var d=this.cache,e=b.length,f=Le(c,e);!1===ea(d,f)&&(a.uniform1iv(this.addr,f),sa(d,f));for(a=0;a!==e;++a)c.setTextureCube(b[a]||Re,f[a])}function xg(a){switch(a){case 5126:return ng;case 35664:return pg;case 35665:return qg;case 35666:return rg;case 35674:return sg;case 35675:return tg;case 35676:return ug;case 35678:return vg;case 35680:return wg;case 5124:case 35670:return og;case 35667:case 35671:return Se;
+case 35668:case 35672:return Te;case 35669:case 35673:return Ue}}function yg(a,b,c){this.id=a;this.addr=c;this.cache=[];this.setValue=mg(b.type)}function Ve(a,b,c){this.id=a;this.addr=c;this.cache=[];this.size=b.size;this.setValue=xg(b.type)}function We(a){this.id=a;this.seq=[];this.map={}}function db(a,b,c){this.seq=[];this.map={};this.renderer=c;c=a.getProgramParameter(b,35718);for(var d=0;d<c;++d){var e=a.getActiveUniform(b,d),f=a.getUniformLocation(b,e.name),g=this,h=e.name,k=h.length;for($d.lastIndex=
+0;;){var m=$d.exec(h),q=$d.lastIndex,n=m[1],r=m[3];"]"===m[2]&&(n|=0);if(void 0===r||"["===r&&q+2===k){h=g;e=void 0===r?new yg(n,e,f):new Ve(n,e,f);h.seq.push(e);h.map[e.id]=e;break}else r=g.map[n],void 0===r&&(r=new We(n),n=g,g=r,n.seq.push(g),n.map[g.id]=g),g=r}}}function zg(a){a=a.split("\n");for(var b=0;b<a.length;b++)a[b]=b+1+": "+a[b];return a.join("\n")}function Xe(a,b,c){var d=a.createShader(b);a.shaderSource(d,c);a.compileShader(d);!1===a.getShaderParameter(d,35713)&&console.error("THREE.WebGLShader: Shader couldn't compile.");
+""!==a.getShaderInfoLog(d)&&console.warn("THREE.WebGLShader: gl.getShaderInfoLog()",35633===b?"vertex":"fragment",a.getShaderInfoLog(d),zg(c));return d}function Ye(a){switch(a){case 3E3:return["Linear","( value )"];case 3001:return["sRGB","( value )"];case 3002:return["RGBE","( value )"];case 3004:return["RGBM","( value, 7.0 )"];case 3005:return["RGBM","( value, 16.0 )"];case 3006:return["RGBD","( value, 256.0 )"];case 3007:return["Gamma","( value, float( GAMMA_FACTOR ) )"];default:throw Error("unsupported encoding: "+
+a);}}function td(a,b){b=Ye(b);return"vec4 "+a+"( vec4 value ) { return "+b[0]+"ToLinear"+b[1]+"; }"}function Ag(a,b){b=Ye(b);return"vec4 "+a+"( vec4 value ) { return LinearTo"+b[0]+b[1]+"; }"}function Bg(a,b){switch(b){case 1:b="Linear";break;case 2:b="Reinhard";break;case 3:b="Uncharted2";break;case 4:b="OptimizedCineon";break;default:throw Error("unsupported toneMapping: "+b);}return"vec3 "+a+"( vec3 color ) { return "+b+"ToneMapping( color ); }"}function Cg(a,b,c){a=a||{};return[a.derivatives||
+b.envMapCubeUV||b.bumpMap||b.normalMap&&!b.objectSpaceNormalMap||b.flatShading?"#extension GL_OES_standard_derivatives : enable":"",(a.fragDepth||b.logarithmicDepthBuffer)&&c.get("EXT_frag_depth")?"#extension GL_EXT_frag_depth : enable":"",a.drawBuffers&&c.get("WEBGL_draw_buffers")?"#extension GL_EXT_draw_buffers : require":"",(a.shaderTextureLOD||b.envMap)&&c.get("EXT_shader_texture_lod")?"#extension GL_EXT_shader_texture_lod : enable":""].filter(zc).join("\n")}function Dg(a){var b=[],c;for(c in a){var d=
+a[c];!1!==d&&b.push("#define "+c+" "+d)}return b.join("\n")}function zc(a){return""!==a}function Ze(a,b){return a.replace(/NUM_DIR_LIGHTS/g,b.numDirLights).replace(/NUM_SPOT_LIGHTS/g,b.numSpotLights).replace(/NUM_RECT_AREA_LIGHTS/g,b.numRectAreaLights).replace(/NUM_POINT_LIGHTS/g,b.numPointLights).replace(/NUM_HEMI_LIGHTS/g,b.numHemiLights)}function $e(a,b){return a.replace(/NUM_CLIPPING_PLANES/g,b.numClippingPlanes).replace(/UNION_CLIPPING_PLANES/g,b.numClippingPlanes-b.numClipIntersection)}function ae(a){return a.replace(/^[ \t]*#include +<([\w\d./]+)>/gm,
+function(a,c){a=K[c];if(void 0===a)throw Error("Can not resolve #include <"+c+">");return ae(a)})}function af(a){return a.replace(/#pragma unroll_loop[\s]+?for \( int i = (\d+); i < (\d+); i \+\+ \) \{([\s\S]+?)(?=\})\}/g,function(a,c,d,e){a="";for(c=parseInt(c);c<parseInt(d);c++)a+=e.replace(/\[ i \]/g,"[ "+c+" ]");return a})}function Eg(a,b,c,d,e,f,g){var h=a.context,k=d.defines,m=e.vertexShader,q=e.fragmentShader,n="SHADOWMAP_TYPE_BASIC";1===f.shadowMapType?n="SHADOWMAP_TYPE_PCF":2===f.shadowMapType&&
+(n="SHADOWMAP_TYPE_PCF_SOFT");var r="ENVMAP_TYPE_CUBE",x="ENVMAP_MODE_REFLECTION",l="ENVMAP_BLENDING_MULTIPLY";if(f.envMap){switch(d.envMap.mapping){case 301:case 302:r="ENVMAP_TYPE_CUBE";break;case 306:case 307:r="ENVMAP_TYPE_CUBE_UV";break;case 303:case 304:r="ENVMAP_TYPE_EQUIREC";break;case 305:r="ENVMAP_TYPE_SPHERE"}switch(d.envMap.mapping){case 302:case 304:x="ENVMAP_MODE_REFRACTION"}switch(d.combine){case 0:l="ENVMAP_BLENDING_MULTIPLY";break;case 1:l="ENVMAP_BLENDING_MIX";break;case 2:l="ENVMAP_BLENDING_ADD"}}var u=
+0<a.gammaFactor?a.gammaFactor:1,w=g.isWebGL2?"":Cg(d.extensions,f,b),p=Dg(k),v=h.createProgram();d.isRawShaderMaterial?(k=[p].filter(zc).join("\n"),0<k.length&&(k+="\n"),b=[w,p].filter(zc).join("\n"),0<b.length&&(b+="\n")):(k=["precision "+f.precision+" float;","precision "+f.precision+" int;","#define SHADER_NAME "+e.name,p,f.supportsVertexTextures?"#define VERTEX_TEXTURES":"","#define GAMMA_FACTOR "+u,"#define MAX_BONES "+f.maxBones,f.useFog&&f.fog?"#define USE_FOG":"",f.useFog&&f.fogExp?"#define FOG_EXP2":
+"",f.map?"#define USE_MAP":"",f.envMap?"#define USE_ENVMAP":"",f.envMap?"#define "+x:"",f.lightMap?"#define USE_LIGHTMAP":"",f.aoMap?"#define USE_AOMAP":"",f.emissiveMap?"#define USE_EMISSIVEMAP":"",f.bumpMap?"#define USE_BUMPMAP":"",f.normalMap?"#define USE_NORMALMAP":"",f.normalMap&&f.objectSpaceNormalMap?"#define OBJECTSPACE_NORMALMAP":"",f.displacementMap&&f.supportsVertexTextures?"#define USE_DISPLACEMENTMAP":"",f.specularMap?"#define USE_SPECULARMAP":"",f.roughnessMap?"#define USE_ROUGHNESSMAP":
+"",f.metalnessMap?"#define USE_METALNESSMAP":"",f.alphaMap?"#define USE_ALPHAMAP":"",f.vertexColors?"#define USE_COLOR":"",f.flatShading?"#define FLAT_SHADED":"",f.skinning?"#define USE_SKINNING":"",f.useVertexTexture?"#define BONE_TEXTURE":"",f.morphTargets?"#define USE_MORPHTARGETS":"",f.morphNormals&&!1===f.flatShading?"#define USE_MORPHNORMALS":"",f.doubleSided?"#define DOUBLE_SIDED":"",f.flipSided?"#define FLIP_SIDED":"",f.shadowMapEnabled?"#define USE_SHADOWMAP":"",f.shadowMapEnabled?"#define "+
+n:"",f.sizeAttenuation?"#define USE_SIZEATTENUATION":"",f.logarithmicDepthBuffer?"#define USE_LOGDEPTHBUF":"",f.logarithmicDepthBuffer&&(g.isWebGL2||b.get("EXT_frag_depth"))?"#define USE_LOGDEPTHBUF_EXT":"","uniform mat4 modelMatrix;","uniform mat4 modelViewMatrix;","uniform mat4 projectionMatrix;","uniform mat4 viewMatrix;","uniform mat3 normalMatrix;","uniform vec3 cameraPosition;","attribute vec3 position;","attribute vec3 normal;","attribute vec2 uv;","#ifdef USE_COLOR","\tattribute vec3 color;",
+"#endif","#ifdef USE_MORPHTARGETS","\tattribute vec3 morphTarget0;","\tattribute vec3 morphTarget1;","\tattribute vec3 morphTarget2;","\tattribute vec3 morphTarget3;","\t#ifdef USE_MORPHNORMALS","\t\tattribute vec3 morphNormal0;","\t\tattribute vec3 morphNormal1;","\t\tattribute vec3 morphNormal2;","\t\tattribute vec3 morphNormal3;","\t#else","\t\tattribute vec3 morphTarget4;","\t\tattribute vec3 morphTarget5;","\t\tattribute vec3 morphTarget6;","\t\tattribute vec3 morphTarget7;","\t#endif","#endif",
+"#ifdef USE_SKINNING","\tattribute vec4 skinIndex;","\tattribute vec4 skinWeight;","#endif","\n"].filter(zc).join("\n"),b=[w,"precision "+f.precision+" float;","precision "+f.precision+" int;","#define SHADER_NAME "+e.name,p,f.alphaTest?"#define ALPHATEST "+f.alphaTest+(f.alphaTest%1?"":".0"):"","#define GAMMA_FACTOR "+u,f.useFog&&f.fog?"#define USE_FOG":"",f.useFog&&f.fogExp?"#define FOG_EXP2":"",f.map?"#define USE_MAP":"",f.envMap?"#define USE_ENVMAP":"",f.envMap?"#define "+r:"",f.envMap?"#define "+
+x:"",f.envMap?"#define "+l:"",f.lightMap?"#define USE_LIGHTMAP":"",f.aoMap?"#define USE_AOMAP":"",f.emissiveMap?"#define USE_EMISSIVEMAP":"",f.bumpMap?"#define USE_BUMPMAP":"",f.normalMap?"#define USE_NORMALMAP":"",f.normalMap&&f.objectSpaceNormalMap?"#define OBJECTSPACE_NORMALMAP":"",f.specularMap?"#define USE_SPECULARMAP":"",f.roughnessMap?"#define USE_ROUGHNESSMAP":"",f.metalnessMap?"#define USE_METALNESSMAP":"",f.alphaMap?"#define USE_ALPHAMAP":"",f.vertexColors?"#define USE_COLOR":"",f.gradientMap?
+"#define USE_GRADIENTMAP":"",f.flatShading?"#define FLAT_SHADED":"",f.doubleSided?"#define DOUBLE_SIDED":"",f.flipSided?"#define FLIP_SIDED":"",f.shadowMapEnabled?"#define USE_SHADOWMAP":"",f.shadowMapEnabled?"#define "+n:"",f.premultipliedAlpha?"#define PREMULTIPLIED_ALPHA":"",f.physicallyCorrectLights?"#define PHYSICALLY_CORRECT_LIGHTS":"",f.logarithmicDepthBuffer?"#define USE_LOGDEPTHBUF":"",f.logarithmicDepthBuffer&&(g.isWebGL2||b.get("EXT_frag_depth"))?"#define USE_LOGDEPTHBUF_EXT":"",f.envMap&&
+(g.isWebGL2||b.get("EXT_shader_texture_lod"))?"#define TEXTURE_LOD_EXT":"","uniform mat4 viewMatrix;","uniform vec3 cameraPosition;",0!==f.toneMapping?"#define TONE_MAPPING":"",0!==f.toneMapping?K.tonemapping_pars_fragment:"",0!==f.toneMapping?Bg("toneMapping",f.toneMapping):"",f.dithering?"#define DITHERING":"",f.outputEncoding||f.mapEncoding||f.matcapEncoding||f.envMapEncoding||f.emissiveMapEncoding?K.encodings_pars_fragment:"",f.mapEncoding?td("mapTexelToLinear",f.mapEncoding):"",f.matcapEncoding?
+td("matcapTexelToLinear",f.matcapEncoding):"",f.envMapEncoding?td("envMapTexelToLinear",f.envMapEncoding):"",f.emissiveMapEncoding?td("emissiveMapTexelToLinear",f.emissiveMapEncoding):"",f.outputEncoding?Ag("linearToOutputTexel",f.outputEncoding):"",f.depthPacking?"#define DEPTH_PACKING "+d.depthPacking:"","\n"].filter(zc).join("\n"));m=ae(m);m=Ze(m,f);m=$e(m,f);q=ae(q);q=Ze(q,f);q=$e(q,f);m=af(m);q=af(q);g.isWebGL2&&!d.isRawShaderMaterial&&(g=!1,n=/^\s*#version\s+300\s+es\s*\n/,d.isShaderMaterial&&
+null!==m.match(n)&&null!==q.match(n)&&(g=!0,m=m.replace(n,""),q=q.replace(n,"")),k="#version 300 es\n\n#define attribute in\n#define varying out\n#define texture2D texture\n"+k,b=["#version 300 es\n\n#define varying in",g?"":"out highp vec4 pc_fragColor;",g?"":"#define gl_FragColor pc_fragColor","#define gl_FragDepthEXT gl_FragDepth\n#define texture2D texture\n#define textureCube texture\n#define texture2DProj textureProj\n#define texture2DLodEXT textureLod\n#define texture2DProjLodEXT textureProjLod\n#define textureCubeLodEXT textureLod\n#define texture2DGradEXT textureGrad\n#define texture2DProjGradEXT textureProjGrad\n#define textureCubeGradEXT textureGrad"].join("\n")+
+"\n"+b);q=b+q;m=Xe(h,35633,k+m);q=Xe(h,35632,q);h.attachShader(v,m);h.attachShader(v,q);void 0!==d.index0AttributeName?h.bindAttribLocation(v,0,d.index0AttributeName):!0===f.morphTargets&&h.bindAttribLocation(v,0,"position");h.linkProgram(v);f=h.getProgramInfoLog(v).trim();g=h.getShaderInfoLog(m).trim();n=h.getShaderInfoLog(q).trim();x=r=!0;if(!1===h.getProgramParameter(v,35714))r=!1,console.error("THREE.WebGLProgram: shader error: ",h.getError(),"35715",h.getProgramParameter(v,35715),"gl.getProgramInfoLog",
+f,g,n);else if(""!==f)console.warn("THREE.WebGLProgram: gl.getProgramInfoLog()",f);else if(""===g||""===n)x=!1;x&&(this.diagnostics={runnable:r,material:d,programLog:f,vertexShader:{log:g,prefix:k},fragmentShader:{log:n,prefix:b}});h.deleteShader(m);h.deleteShader(q);var H;this.getUniforms=function(){void 0===H&&(H=new db(h,v,a));return H};var y;this.getAttributes=function(){if(void 0===y){for(var a={},b=h.getProgramParameter(v,35721),c=0;c<b;c++){var d=h.getActiveAttrib(v,c).name;a[d]=h.getAttribLocation(v,
+d)}y=a}return y};this.destroy=function(){h.deleteProgram(v);this.program=void 0};Object.defineProperties(this,{uniforms:{get:function(){console.warn("THREE.WebGLProgram: .uniforms is now .getUniforms().");return this.getUniforms()}},attributes:{get:function(){console.warn("THREE.WebGLProgram: .attributes is now .getAttributes().");return this.getAttributes()}}});this.name=e.name;this.id=Fg++;this.code=c;this.usedTimes=1;this.program=v;this.vertexShader=m;this.fragmentShader=q;return this}function Gg(a,
+b,c){function d(a,b){if(a)a.isTexture?c=a.encoding:a.isWebGLRenderTarget&&(console.warn("THREE.WebGLPrograms.getTextureEncodingFromMap: don't use render targets as textures. Use their .texture property instead."),c=a.texture.encoding);else var c=3E3;3E3===c&&b&&(c=3007);return c}var e=[],f={MeshDepthMaterial:"depth",MeshDistanceMaterial:"distanceRGBA",MeshNormalMaterial:"normal",MeshBasicMaterial:"basic",MeshLambertMaterial:"lambert",MeshPhongMaterial:"phong",MeshToonMaterial:"phong",MeshStandardMaterial:"physical",
+MeshPhysicalMaterial:"physical",MeshMatcapMaterial:"matcap",LineBasicMaterial:"basic",LineDashedMaterial:"dashed",PointsMaterial:"points",ShadowMaterial:"shadow",SpriteMaterial:"sprite"},g="precision supportsVertexTextures map mapEncoding matcapEncoding envMap envMapMode envMapEncoding lightMap aoMap emissiveMap emissiveMapEncoding bumpMap normalMap objectSpaceNormalMap displacementMap specularMap roughnessMap metalnessMap gradientMap alphaMap combine vertexColors fog useFog fogExp flatShading sizeAttenuation logarithmicDepthBuffer skinning maxBones useVertexTexture morphTargets morphNormals maxMorphTargets maxMorphNormals premultipliedAlpha numDirLights numPointLights numSpotLights numHemiLights numRectAreaLights shadowMapEnabled shadowMapType toneMapping physicallyCorrectLights alphaTest doubleSided flipSided numClippingPlanes numClipIntersection depthPacking dithering".split(" ");
+this.getParameters=function(b,e,g,q,n,r,x){var h=f[b.type];if(x.isSkinnedMesh){var k=x.skeleton.bones;if(c.floatVertexTextures)k=1024;else{var m=Math.min(Math.floor((c.maxVertexUniforms-20)/4),k.length);m<k.length?(console.warn("THREE.WebGLRenderer: Skeleton has "+k.length+" bones. This GPU supports "+m+"."),k=0):k=m}}else k=0;m=c.precision;null!==b.precision&&(m=c.getMaxPrecision(b.precision),m!==b.precision&&console.warn("THREE.WebGLProgram.getParameters:",b.precision,"not supported, using",m,"instead."));
+var l=a.getRenderTarget();return{shaderID:h,precision:m,supportsVertexTextures:c.vertexTextures,outputEncoding:d(l?l.texture:null,a.gammaOutput),map:!!b.map,mapEncoding:d(b.map,a.gammaInput),matcap:!!b.matcap,matcapEncoding:d(b.matcap,a.gammaInput),envMap:!!b.envMap,envMapMode:b.envMap&&b.envMap.mapping,envMapEncoding:d(b.envMap,a.gammaInput),envMapCubeUV:!!b.envMap&&(306===b.envMap.mapping||307===b.envMap.mapping),lightMap:!!b.lightMap,aoMap:!!b.aoMap,emissiveMap:!!b.emissiveMap,emissiveMapEncoding:d(b.emissiveMap,
+a.gammaInput),bumpMap:!!b.bumpMap,normalMap:!!b.normalMap,objectSpaceNormalMap:1===b.normalMapType,displacementMap:!!b.displacementMap,roughnessMap:!!b.roughnessMap,metalnessMap:!!b.metalnessMap,specularMap:!!b.specularMap,alphaMap:!!b.alphaMap,gradientMap:!!b.gradientMap,combine:b.combine,vertexColors:b.vertexColors,fog:!!q,useFog:b.fog,fogExp:q&&q.isFogExp2,flatShading:b.flatShading,sizeAttenuation:b.sizeAttenuation,logarithmicDepthBuffer:c.logarithmicDepthBuffer,skinning:b.skinning&&0<k,maxBones:k,
+useVertexTexture:c.floatVertexTextures,morphTargets:b.morphTargets,morphNormals:b.morphNormals,maxMorphTargets:a.maxMorphTargets,maxMorphNormals:a.maxMorphNormals,numDirLights:e.directional.length,numPointLights:e.point.length,numSpotLights:e.spot.length,numRectAreaLights:e.rectArea.length,numHemiLights:e.hemi.length,numClippingPlanes:n,numClipIntersection:r,dithering:b.dithering,shadowMapEnabled:a.shadowMap.enabled&&x.receiveShadow&&0<g.length,shadowMapType:a.shadowMap.type,toneMapping:a.toneMapping,
+physicallyCorrectLights:a.physicallyCorrectLights,premultipliedAlpha:b.premultipliedAlpha,alphaTest:b.alphaTest,doubleSided:2===b.side,flipSided:1===b.side,depthPacking:void 0!==b.depthPacking?b.depthPacking:!1}};this.getProgramCode=function(b,c){var d=[];c.shaderID?d.push(c.shaderID):(d.push(b.fragmentShader),d.push(b.vertexShader));if(void 0!==b.defines)for(var e in b.defines)d.push(e),d.push(b.defines[e]);for(e=0;e<g.length;e++)d.push(c[g[e]]);d.push(b.onBeforeCompile.toString());d.push(a.gammaOutput);
+d.push(a.gammaFactor);return d.join()};this.acquireProgram=function(d,f,g,q){for(var h,k=0,m=e.length;k<m;k++){var l=e[k];if(l.code===q){h=l;++h.usedTimes;break}}void 0===h&&(h=new Eg(a,b,q,d,f,g,c),e.push(h));return h};this.releaseProgram=function(a){if(0===--a.usedTimes){var b=e.indexOf(a);e[b]=e[e.length-1];e.pop();a.destroy()}};this.programs=e}function Hg(){var a=new WeakMap;return{get:function(b){var c=a.get(b);void 0===c&&(c={},a.set(b,c));return c},remove:function(b){a.delete(b)},update:function(b,
+c,d){a.get(b)[c]=d},dispose:function(){a=new WeakMap}}}function Ig(a,b){return a.renderOrder!==b.renderOrder?a.renderOrder-b.renderOrder:a.program&&b.program&&a.program!==b.program?a.program.id-b.program.id:a.material.id!==b.material.id?a.material.id-b.material.id:a.z!==b.z?a.z-b.z:a.id-b.id}function Jg(a,b){return a.renderOrder!==b.renderOrder?a.renderOrder-b.renderOrder:a.z!==b.z?b.z-a.z:a.id-b.id}function Kg(){var a=[],b=0,c=[],d=[];return{opaque:c,transparent:d,init:function(){b=0;c.length=0;
+d.length=0},push:function(e,f,g,h,k){var m=a[b];void 0===m?(m={id:e.id,object:e,geometry:f,material:g,program:g.program,renderOrder:e.renderOrder,z:h,group:k},a[b]=m):(m.id=e.id,m.object=e,m.geometry=f,m.material=g,m.program=g.program,m.renderOrder=e.renderOrder,m.z=h,m.group=k);(!0===g.transparent?d:c).push(m);b++},sort:function(){1<c.length&&c.sort(Ig);1<d.length&&d.sort(Jg)}}}function Lg(){var a={};return{get:function(b,c){b=b.id+","+c.id;c=a[b];void 0===c&&(c=new Kg,a[b]=c);return c},dispose:function(){a=
+{}}}}function Mg(){var a={};return{get:function(b){if(void 0!==a[b.id])return a[b.id];switch(b.type){case "DirectionalLight":var c={direction:new p,color:new G,shadow:!1,shadowBias:0,shadowRadius:1,shadowMapSize:new z};break;case "SpotLight":c={position:new p,direction:new p,color:new G,distance:0,coneCos:0,penumbraCos:0,decay:0,shadow:!1,shadowBias:0,shadowRadius:1,shadowMapSize:new z};break;case "PointLight":c={position:new p,color:new G,distance:0,decay:0,shadow:!1,shadowBias:0,shadowRadius:1,
+shadowMapSize:new z,shadowCameraNear:1,shadowCameraFar:1E3};break;case "HemisphereLight":c={direction:new p,skyColor:new G,groundColor:new G};break;case "RectAreaLight":c={color:new G,position:new p,halfWidth:new p,halfHeight:new p}}return a[b.id]=c}}}function Ng(){var a=new Mg,b={id:Og++,hash:{stateID:-1,directionalLength:-1,pointLength:-1,spotLength:-1,rectAreaLength:-1,hemiLength:-1,shadowsLength:-1},ambient:[0,0,0],directional:[],directionalShadowMap:[],directionalShadowMatrix:[],spot:[],spotShadowMap:[],
+spotShadowMatrix:[],rectArea:[],point:[],pointShadowMap:[],pointShadowMatrix:[],hemi:[]},c=new p,d=new P,e=new P;return{setup:function(f,g,h){var k=0,m=0,q=0,n=0,r=0,x=0,l=0,u=0;h=h.matrixWorldInverse;for(var w=0,p=f.length;w<p;w++){var v=f[w],H=v.color,y=v.intensity,N=v.distance,X=v.shadow&&v.shadow.map?v.shadow.map.texture:null;if(v.isAmbientLight)k+=H.r*y,m+=H.g*y,q+=H.b*y;else if(v.isDirectionalLight){var B=a.get(v);B.color.copy(v.color).multiplyScalar(v.intensity);B.direction.setFromMatrixPosition(v.matrixWorld);
+c.setFromMatrixPosition(v.target.matrixWorld);B.direction.sub(c);B.direction.transformDirection(h);if(B.shadow=v.castShadow)H=v.shadow,B.shadowBias=H.bias,B.shadowRadius=H.radius,B.shadowMapSize=H.mapSize;b.directionalShadowMap[n]=X;b.directionalShadowMatrix[n]=v.shadow.matrix;b.directional[n]=B;n++}else if(v.isSpotLight){B=a.get(v);B.position.setFromMatrixPosition(v.matrixWorld);B.position.applyMatrix4(h);B.color.copy(H).multiplyScalar(y);B.distance=N;B.direction.setFromMatrixPosition(v.matrixWorld);
+c.setFromMatrixPosition(v.target.matrixWorld);B.direction.sub(c);B.direction.transformDirection(h);B.coneCos=Math.cos(v.angle);B.penumbraCos=Math.cos(v.angle*(1-v.penumbra));B.decay=v.decay;if(B.shadow=v.castShadow)H=v.shadow,B.shadowBias=H.bias,B.shadowRadius=H.radius,B.shadowMapSize=H.mapSize;b.spotShadowMap[x]=X;b.spotShadowMatrix[x]=v.shadow.matrix;b.spot[x]=B;x++}else if(v.isRectAreaLight)B=a.get(v),B.color.copy(H).multiplyScalar(y),B.position.setFromMatrixPosition(v.matrixWorld),B.position.applyMatrix4(h),
+e.identity(),d.copy(v.matrixWorld),d.premultiply(h),e.extractRotation(d),B.halfWidth.set(.5*v.width,0,0),B.halfHeight.set(0,.5*v.height,0),B.halfWidth.applyMatrix4(e),B.halfHeight.applyMatrix4(e),b.rectArea[l]=B,l++;else if(v.isPointLight){B=a.get(v);B.position.setFromMatrixPosition(v.matrixWorld);B.position.applyMatrix4(h);B.color.copy(v.color).multiplyScalar(v.intensity);B.distance=v.distance;B.decay=v.decay;if(B.shadow=v.castShadow)H=v.shadow,B.shadowBias=H.bias,B.shadowRadius=H.radius,B.shadowMapSize=
+H.mapSize,B.shadowCameraNear=H.camera.near,B.shadowCameraFar=H.camera.far;b.pointShadowMap[r]=X;b.pointShadowMatrix[r]=v.shadow.matrix;b.point[r]=B;r++}else v.isHemisphereLight&&(B=a.get(v),B.direction.setFromMatrixPosition(v.matrixWorld),B.direction.transformDirection(h),B.direction.normalize(),B.skyColor.copy(v.color).multiplyScalar(y),B.groundColor.copy(v.groundColor).multiplyScalar(y),b.hemi[u]=B,u++)}b.ambient[0]=k;b.ambient[1]=m;b.ambient[2]=q;b.directional.length=n;b.spot.length=x;b.rectArea.length=
+l;b.point.length=r;b.hemi.length=u;b.hash.stateID=b.id;b.hash.directionalLength=n;b.hash.pointLength=r;b.hash.spotLength=x;b.hash.rectAreaLength=l;b.hash.hemiLength=u;b.hash.shadowsLength=g.length},state:b}}function bf(){var a=new Ng,b=[],c=[];return{init:function(){b.length=0;c.length=0},state:{lightsArray:b,shadowsArray:c,lights:a},setupLights:function(d){a.setup(b,c,d)},pushLight:function(a){b.push(a)},pushShadow:function(a){c.push(a)}}}function Pg(){var a={};return{get:function(b,c){if(void 0===
+a[b.id]){var d=new bf;a[b.id]={};a[b.id][c.id]=d}else void 0===a[b.id][c.id]?(d=new bf,a[b.id][c.id]=d):d=a[b.id][c.id];return d},dispose:function(){a={}}}}function eb(a){L.call(this);this.type="MeshDepthMaterial";this.depthPacking=3200;this.morphTargets=this.skinning=!1;this.displacementMap=this.alphaMap=this.map=null;this.displacementScale=1;this.displacementBias=0;this.wireframe=!1;this.wireframeLinewidth=1;this.lights=this.fog=!1;this.setValues(a)}function fb(a){L.call(this);this.type="MeshDistanceMaterial";
+this.referencePosition=new p;this.nearDistance=1;this.farDistance=1E3;this.morphTargets=this.skinning=!1;this.displacementMap=this.alphaMap=this.map=null;this.displacementScale=1;this.displacementBias=0;this.lights=this.fog=!1;this.setValues(a)}function cf(a,b,c){function d(b,c,d,e,f,g){var h=b.geometry;var k=n;var m=b.customDepthMaterial;d&&(k=r,m=b.customDistanceMaterial);m?k=m:(m=!1,c.morphTargets&&(h&&h.isBufferGeometry?m=h.morphAttributes&&h.morphAttributes.position&&0<h.morphAttributes.position.length:
+h&&h.isGeometry&&(m=h.morphTargets&&0<h.morphTargets.length)),b.isSkinnedMesh&&!1===c.skinning&&console.warn("THREE.WebGLShadowMap: THREE.SkinnedMesh with material.skinning set to false:",b),b=b.isSkinnedMesh&&c.skinning,h=0,m&&(h|=1),b&&(h|=2),k=k[h]);a.localClippingEnabled&&!0===c.clipShadows&&0!==c.clippingPlanes.length&&(h=k.uuid,m=c.uuid,b=x[h],void 0===b&&(b={},x[h]=b),h=b[m],void 0===h&&(h=k.clone(),b[m]=h),k=h);k.visible=c.visible;k.wireframe=c.wireframe;k.side=null!=c.shadowSide?c.shadowSide:
+l[c.side];k.clipShadows=c.clipShadows;k.clippingPlanes=c.clippingPlanes;k.clipIntersection=c.clipIntersection;k.wireframeLinewidth=c.wireframeLinewidth;k.linewidth=c.linewidth;d&&k.isMeshDistanceMaterial&&(k.referencePosition.copy(e),k.nearDistance=f,k.farDistance=g);return k}function e(c,g,h,k){if(!1!==c.visible){if(c.layers.test(g.layers)&&(c.isMesh||c.isLine||c.isPoints)&&c.castShadow&&(!c.frustumCulled||f.intersectsObject(c))){c.modelViewMatrix.multiplyMatrices(h.matrixWorldInverse,c.matrixWorld);
+var m=b.update(c),n=c.material;if(Array.isArray(n))for(var r=m.groups,x=0,l=r.length;x<l;x++){var t=r[x],u=n[t.materialIndex];u&&u.visible&&(u=d(c,u,k,q,h.near,h.far),a.renderBufferDirect(h,null,m,u,c,t))}else n.visible&&(u=d(c,n,k,q,h.near,h.far),a.renderBufferDirect(h,null,m,u,c,null))}c=c.children;m=0;for(n=c.length;m<n;m++)e(c[m],g,h,k)}}var f=new rd,g=new P,h=new z,k=new z(c,c),m=new p,q=new p,n=Array(4),r=Array(4),x={},l={0:1,1:0,2:2},u=[new p(1,0,0),new p(-1,0,0),new p(0,0,1),new p(0,0,-1),
+new p(0,1,0),new p(0,-1,0)],w=[new p(0,1,0),new p(0,1,0),new p(0,1,0),new p(0,1,0),new p(0,0,1),new p(0,0,-1)],A=[new Z,new Z,new Z,new Z,new Z,new Z];for(c=0;4!==c;++c){var v=0!==(c&1),H=0!==(c&2),y=new eb({depthPacking:3201,morphTargets:v,skinning:H});n[c]=y;v=new fb({morphTargets:v,skinning:H});r[c]=v}var N=this;this.enabled=!1;this.autoUpdate=!0;this.needsUpdate=!1;this.type=1;this.render=function(b,c,d){if(!1!==N.enabled&&(!1!==N.autoUpdate||!1!==N.needsUpdate)&&0!==b.length){var n=a.state;n.disable(3042);
+n.buffers.color.setClear(1,1,1,1);n.buffers.depth.setTest(!0);n.setScissorTest(!1);for(var r,x=0,l=b.length;x<l;x++){var t=b[x];r=t.shadow;var X=t&&t.isPointLight;if(void 0===r)console.warn("THREE.WebGLShadowMap:",t,"has no shadow.");else{var B=r.camera;h.copy(r.mapSize);h.min(k);if(X){var p=h.x,v=h.y;A[0].set(2*p,v,p,v);A[1].set(0,v,p,v);A[2].set(3*p,v,p,v);A[3].set(p,v,p,v);A[4].set(3*p,0,p,v);A[5].set(p,0,p,v);h.x*=4;h.y*=2}null===r.map&&(r.map=new kb(h.x,h.y,{minFilter:1003,magFilter:1003,format:1023}),
+r.map.texture.name=t.name+".shadowMap",B.updateProjectionMatrix());r.isSpotLightShadow&&r.update(t);p=r.map;v=r.matrix;q.setFromMatrixPosition(t.matrixWorld);B.position.copy(q);X?(r=6,v.makeTranslation(-q.x,-q.y,-q.z)):(r=1,m.setFromMatrixPosition(t.target.matrixWorld),B.lookAt(m),B.updateMatrixWorld(),v.set(.5,0,0,.5,0,.5,0,.5,0,0,.5,.5,0,0,0,1),v.multiply(B.projectionMatrix),v.multiply(B.matrixWorldInverse));a.setRenderTarget(p);a.clear();for(t=0;t<r;t++)X&&(m.copy(B.position),m.add(u[t]),B.up.copy(w[t]),
+B.lookAt(m),B.updateMatrixWorld(),n.viewport(A[t])),g.multiplyMatrices(B.projectionMatrix,B.matrixWorldInverse),f.setFromMatrix(g),e(c,d,B,X)}}N.needsUpdate=!1}}}function Qg(a,b,c,d){function e(b,c,d){var e=new Uint8Array(4),f=a.createTexture();a.bindTexture(b,f);a.texParameteri(b,10241,9728);a.texParameteri(b,10240,9728);for(b=0;b<d;b++)a.texImage2D(c+b,0,6408,1,1,0,6408,5121,e);return f}function f(c,e){p[c]=1;0===v[c]&&(a.enableVertexAttribArray(c),v[c]=1);H[c]!==e&&((d.isWebGL2?a:b.get("ANGLE_instanced_arrays"))[d.isWebGL2?
+"vertexAttribDivisor":"vertexAttribDivisorANGLE"](c,e),H[c]=e)}function g(b){!0!==y[b]&&(a.enable(b),y[b]=!0)}function h(b){!1!==y[b]&&(a.disable(b),y[b]=!1)}function k(b,d,e,f,k,m,n,q){if(0===b)B&&(h(3042),B=!1);else if(B||(g(3042),B=!0),5!==b){if(b!==Lb||q!==J){if(100!==z||100!==C)a.blendEquation(32774),C=z=100;if(q)switch(b){case 1:a.blendFuncSeparate(1,771,1,771);break;case 2:a.blendFunc(1,1);break;case 3:a.blendFuncSeparate(0,0,769,771);break;case 4:a.blendFuncSeparate(0,768,0,770);break;default:console.error("THREE.WebGLState: Invalid blending: ",
+b)}else switch(b){case 1:a.blendFuncSeparate(770,771,1,771);break;case 2:a.blendFunc(770,1);break;case 3:a.blendFunc(0,769);break;case 4:a.blendFunc(0,768);break;default:console.error("THREE.WebGLState: Invalid blending: ",b)}E=D=Y=Zd=null;Lb=b;J=q}}else{k=k||d;m=m||e;n=n||f;if(d!==z||k!==C)a.blendEquationSeparate(c.convert(d),c.convert(k)),z=d,C=k;if(e!==Zd||f!==Y||m!==D||n!==E)a.blendFuncSeparate(c.convert(e),c.convert(f),c.convert(m),c.convert(n)),Zd=e,Y=f,D=m,E=n;Lb=b;J=null}}function m(b){G!==
+b&&(b?a.frontFace(2304):a.frontFace(2305),G=b)}function q(b){0!==b?(g(2884),b!==Q&&(1===b?a.cullFace(1029):2===b?a.cullFace(1028):a.cullFace(1032))):h(2884);Q=b}function n(b,c,d){if(b){if(g(32823),I!==c||L!==d)a.polygonOffset(c,d),I=c,L=d}else h(32823)}function r(b){void 0===b&&(b=33984+R-1);K!==b&&(a.activeTexture(b),K=b)}var x=new function(){var b=!1,c=new Z,d=null,e=new Z(0,0,0,0);return{setMask:function(c){d===c||b||(a.colorMask(c,c,c,c),d=c)},setLocked:function(a){b=a},setClear:function(b,d,
+f,g,h){!0===h&&(b*=g,d*=g,f*=g);c.set(b,d,f,g);!1===e.equals(c)&&(a.clearColor(b,d,f,g),e.copy(c))},reset:function(){b=!1;d=null;e.set(-1,0,0,0)}}},l=new function(){var b=!1,c=null,d=null,e=null;return{setTest:function(a){a?g(2929):h(2929)},setMask:function(d){c===d||b||(a.depthMask(d),c=d)},setFunc:function(b){if(d!==b){if(b)switch(b){case 0:a.depthFunc(512);break;case 1:a.depthFunc(519);break;case 2:a.depthFunc(513);break;case 3:a.depthFunc(515);break;case 4:a.depthFunc(514);break;case 5:a.depthFunc(518);
+break;case 6:a.depthFunc(516);break;case 7:a.depthFunc(517);break;default:a.depthFunc(515)}else a.depthFunc(515);d=b}},setLocked:function(a){b=a},setClear:function(b){e!==b&&(a.clearDepth(b),e=b)},reset:function(){b=!1;e=d=c=null}}},u=new function(){var b=!1,c=null,d=null,e=null,f=null,k=null,m=null,n=null,q=null;return{setTest:function(a){a?g(2960):h(2960)},setMask:function(d){c===d||b||(a.stencilMask(d),c=d)},setFunc:function(b,c,g){if(d!==b||e!==c||f!==g)a.stencilFunc(b,c,g),d=b,e=c,f=g},setOp:function(b,
+c,d){if(k!==b||m!==c||n!==d)a.stencilOp(b,c,d),k=b,m=c,n=d},setLocked:function(a){b=a},setClear:function(b){q!==b&&(a.clearStencil(b),q=b)},reset:function(){b=!1;q=n=m=k=f=e=d=c=null}}},w=a.getParameter(34921),p=new Uint8Array(w),v=new Uint8Array(w),H=new Uint8Array(w),y={},N=null,X=null,B=null,Lb=null,z=null,Zd=null,Y=null,C=null,D=null,E=null,J=!1,G=null,Q=null,P=null,I=null,L=null,R=a.getParameter(35661),F=!1;w=0;w=a.getParameter(7938);-1!==w.indexOf("WebGL")?(w=parseFloat(/^WebGL ([0-9])/.exec(w)[1]),
+F=1<=w):-1!==w.indexOf("OpenGL ES")&&(w=parseFloat(/^OpenGL ES ([0-9])/.exec(w)[1]),F=2<=w);var K=null,T={},W=new Z,M=new Z,U={};U[3553]=e(3553,3553,1);U[34067]=e(34067,34069,6);x.setClear(0,0,0,1);l.setClear(1);u.setClear(0);g(2929);l.setFunc(3);m(!1);q(1);g(2884);k(0);return{buffers:{color:x,depth:l,stencil:u},initAttributes:function(){for(var a=0,b=p.length;a<b;a++)p[a]=0},enableAttribute:function(a){f(a,0)},enableAttributeAndDivisor:f,disableUnusedAttributes:function(){for(var b=0,c=v.length;b!==
+c;++b)v[b]!==p[b]&&(a.disableVertexAttribArray(b),v[b]=0)},enable:g,disable:h,getCompressedTextureFormats:function(){if(null===N&&(N=[],b.get("WEBGL_compressed_texture_pvrtc")||b.get("WEBGL_compressed_texture_s3tc")||b.get("WEBGL_compressed_texture_etc1")||b.get("WEBGL_compressed_texture_astc")))for(var c=a.getParameter(34467),d=0;d<c.length;d++)N.push(c[d]);return N},useProgram:function(b){return X!==b?(a.useProgram(b),X=b,!0):!1},setBlending:k,setMaterial:function(a,b){2===a.side?h(2884):g(2884);
+var c=1===a.side;b&&(c=!c);m(c);1===a.blending&&!1===a.transparent?k(0):k(a.blending,a.blendEquation,a.blendSrc,a.blendDst,a.blendEquationAlpha,a.blendSrcAlpha,a.blendDstAlpha,a.premultipliedAlpha);l.setFunc(a.depthFunc);l.setTest(a.depthTest);l.setMask(a.depthWrite);x.setMask(a.colorWrite);n(a.polygonOffset,a.polygonOffsetFactor,a.polygonOffsetUnits)},setFlipSided:m,setCullFace:q,setLineWidth:function(b){b!==P&&(F&&a.lineWidth(b),P=b)},setPolygonOffset:n,setScissorTest:function(a){a?g(3089):h(3089)},
+activeTexture:r,bindTexture:function(b,c){null===K&&r();var d=T[K];void 0===d&&(d={type:void 0,texture:void 0},T[K]=d);if(d.type!==b||d.texture!==c)a.bindTexture(b,c||U[b]),d.type=b,d.texture=c},compressedTexImage2D:function(){try{a.compressedTexImage2D.apply(a,arguments)}catch(fa){console.error("THREE.WebGLState:",fa)}},texImage2D:function(){try{a.texImage2D.apply(a,arguments)}catch(fa){console.error("THREE.WebGLState:",fa)}},texImage3D:function(){try{a.texImage3D.apply(a,arguments)}catch(fa){console.error("THREE.WebGLState:",
+fa)}},scissor:function(b){!1===W.equals(b)&&(a.scissor(b.x,b.y,b.z,b.w),W.copy(b))},viewport:function(b){!1===M.equals(b)&&(a.viewport(b.x,b.y,b.z,b.w),M.copy(b))},reset:function(){for(var b=0;b<v.length;b++)1===v[b]&&(a.disableVertexAttribArray(b),v[b]=0);y={};K=N=null;T={};Q=G=Lb=X=null;x.reset();l.reset();u.reset()}}}function Rg(a,b,c,d,e,f,g){function h(a,b){if(a.width>b||a.height>b){if("data"in a){console.warn("THREE.WebGLRenderer: image in DataTexture is too big ("+a.width+"x"+a.height+").");
+return}b/=Math.max(a.width,a.height);var c=document.createElementNS("http://www.w3.org/1999/xhtml","canvas");c.width=Math.floor(a.width*b);c.height=Math.floor(a.height*b);c.getContext("2d").drawImage(a,0,0,a.width,a.height,0,0,c.width,c.height);console.warn("THREE.WebGLRenderer: image is too big ("+a.width+"x"+a.height+"). Resized to "+c.width+"x"+c.height);return c}return a}function k(a){return R.isPowerOfTwo(a.width)&&R.isPowerOfTwo(a.height)}function m(a,b){return a.generateMipmaps&&b&&1003!==
+a.minFilter&&1006!==a.minFilter}function q(b,c,e,f){a.generateMipmap(b);d.get(c).__maxMipLevel=Math.log(Math.max(e,f))*Math.LOG2E}function n(a,b){if(!e.isWebGL2)return a;if(6403===a){if(5126===b)return 33326;if(5131===b)return 33325;if(5121===b)return 33321}if(6407===a){if(5126===b)return 34837;if(5131===b)return 34843;if(5121===b)return 32849}if(6408===a){if(5126===b)return 34836;if(5131===b)return 34842;if(5121===b)return 32856}return a}function r(a){return 1003===a||1004===a||1005===a?9728:9729}
+function x(b){b=b.target;b.removeEventListener("dispose",x);a:{var c=d.get(b);if(b.image&&c.__image__webglTextureCube)a.deleteTexture(c.__image__webglTextureCube);else{if(void 0===c.__webglInit)break a;a.deleteTexture(c.__webglTexture)}d.remove(b)}b.isVideoTexture&&delete y[b.id];g.memory.textures--}function l(b){b=b.target;b.removeEventListener("dispose",l);var c=d.get(b),e=d.get(b.texture);if(b){void 0!==e.__webglTexture&&a.deleteTexture(e.__webglTexture);b.depthTexture&&b.depthTexture.dispose();
+if(b.isWebGLRenderTargetCube)for(e=0;6>e;e++)a.deleteFramebuffer(c.__webglFramebuffer[e]),c.__webglDepthbuffer&&a.deleteRenderbuffer(c.__webglDepthbuffer[e]);else a.deleteFramebuffer(c.__webglFramebuffer),c.__webglDepthbuffer&&a.deleteRenderbuffer(c.__webglDepthbuffer);d.remove(b.texture);d.remove(b)}g.memory.textures--}function u(a,b){var e=d.get(a);if(a.isVideoTexture){var f=a.id,h=g.render.frame;y[f]!==h&&(y[f]=h,a.update())}if(0<a.version&&e.__version!==a.version)if(f=a.image,void 0===f)console.warn("THREE.WebGLRenderer: Texture marked for update but image is undefined");
+else if(!1===f.complete)console.warn("THREE.WebGLRenderer: Texture marked for update but image is incomplete");else{A(e,a,b);return}c.activeTexture(33984+b);c.bindTexture(3553,e.__webglTexture)}function p(c,g,h){h?(a.texParameteri(c,10242,f.convert(g.wrapS)),a.texParameteri(c,10243,f.convert(g.wrapT)),a.texParameteri(c,10240,f.convert(g.magFilter)),a.texParameteri(c,10241,f.convert(g.minFilter))):(a.texParameteri(c,10242,33071),a.texParameteri(c,10243,33071),1001===g.wrapS&&1001===g.wrapT||console.warn("THREE.WebGLRenderer: Texture is not power of two. Texture.wrapS and Texture.wrapT should be set to THREE.ClampToEdgeWrapping."),
+a.texParameteri(c,10240,r(g.magFilter)),a.texParameteri(c,10241,r(g.minFilter)),1003!==g.minFilter&&1006!==g.minFilter&&console.warn("THREE.WebGLRenderer: Texture is not power of two. Texture.minFilter should be set to THREE.NearestFilter or THREE.LinearFilter."));!(h=b.get("EXT_texture_filter_anisotropic"))||1015===g.type&&null===b.get("OES_texture_float_linear")||1016===g.type&&null===(e.isWebGL2||b.get("OES_texture_half_float_linear"))||!(1<g.anisotropy||d.get(g).__currentAnisotropy)||(a.texParameterf(c,
+h.TEXTURE_MAX_ANISOTROPY_EXT,Math.min(g.anisotropy,e.getMaxAnisotropy())),d.get(g).__currentAnisotropy=g.anisotropy)}function A(b,d,r){var l=d.isDataTexture3D?32879:3553;void 0===b.__webglInit&&(b.__webglInit=!0,d.addEventListener("dispose",x),b.__webglTexture=a.createTexture(),g.memory.textures++);c.activeTexture(33984+r);c.bindTexture(l,b.__webglTexture);a.pixelStorei(37440,d.flipY);a.pixelStorei(37441,d.premultiplyAlpha);a.pixelStorei(3317,d.unpackAlignment);r=h(d.image,e.maxTextureSize);var t=
+e.isWebGL2?!1:1001!==d.wrapS||1001!==d.wrapT||1003!==d.minFilter&&1006!==d.minFilter;t&&!1===k(r)&&(r instanceof HTMLImageElement||r instanceof HTMLCanvasElement||r instanceof ImageBitmap)&&(void 0===N&&(N=document.createElementNS("http://www.w3.org/1999/xhtml","canvas")),N.width=R.floorPowerOfTwo(r.width),N.height=R.floorPowerOfTwo(r.height),N.getContext("2d").drawImage(r,0,0,N.width,N.height),console.warn("THREE.WebGLRenderer: image is not power of two ("+r.width+"x"+r.height+"). Resized to "+N.width+
+"x"+N.height),r=N);t=k(r);var u=f.convert(d.format),w=f.convert(d.type),v=n(u,w);p(l,d,t);var X=d.mipmaps;if(d.isDepthTexture){v=6402;if(1015===d.type){if(!e.isWebGL2)throw Error("Float Depth Texture only supported in WebGL2.0");v=36012}else e.isWebGL2&&(v=33189);1026===d.format&&6402===v&&1012!==d.type&&1014!==d.type&&(console.warn("THREE.WebGLRenderer: Use UnsignedShortType or UnsignedIntType for DepthFormat DepthTexture."),d.type=1012,w=f.convert(d.type));1027===d.format&&(v=34041,1020!==d.type&&
+(console.warn("THREE.WebGLRenderer: Use UnsignedInt248Type for DepthStencilFormat DepthTexture."),d.type=1020,w=f.convert(d.type)));c.texImage2D(3553,0,v,r.width,r.height,0,u,w,null)}else if(d.isDataTexture)if(0<X.length&&t){for(var B=0,y=X.length;B<y;B++)l=X[B],c.texImage2D(3553,B,v,l.width,l.height,0,u,w,l.data);d.generateMipmaps=!1;b.__maxMipLevel=X.length-1}else c.texImage2D(3553,0,v,r.width,r.height,0,u,w,r.data),b.__maxMipLevel=0;else if(d.isCompressedTexture){B=0;for(y=X.length;B<y;B++)l=X[B],
+1023!==d.format&&1022!==d.format?-1<c.getCompressedTextureFormats().indexOf(u)?c.compressedTexImage2D(3553,B,v,l.width,l.height,0,l.data):console.warn("THREE.WebGLRenderer: Attempt to load unsupported compressed texture format in .uploadTexture()"):c.texImage2D(3553,B,v,l.width,l.height,0,u,w,l.data);b.__maxMipLevel=X.length-1}else if(d.isDataTexture3D)c.texImage3D(32879,0,v,r.width,r.height,r.depth,0,u,w,r.data),b.__maxMipLevel=0;else if(0<X.length&&t){B=0;for(y=X.length;B<y;B++)l=X[B],c.texImage2D(3553,
+B,v,u,w,l);d.generateMipmaps=!1;b.__maxMipLevel=X.length-1}else c.texImage2D(3553,0,v,u,w,r),b.__maxMipLevel=0;m(d,t)&&q(3553,d,r.width,r.height);b.__version=d.version;if(d.onUpdate)d.onUpdate(d)}function v(b,e,g,h){var k=f.convert(e.texture.format),m=f.convert(e.texture.type),q=n(k,m);c.texImage2D(h,0,q,e.width,e.height,0,k,m,null);a.bindFramebuffer(36160,b);a.framebufferTexture2D(36160,g,h,d.get(e.texture).__webglTexture,0);a.bindFramebuffer(36160,null)}function H(b,c){a.bindRenderbuffer(36161,
+b);c.depthBuffer&&!c.stencilBuffer?(a.renderbufferStorage(36161,33189,c.width,c.height),a.framebufferRenderbuffer(36160,36096,36161,b)):c.depthBuffer&&c.stencilBuffer?(a.renderbufferStorage(36161,34041,c.width,c.height),a.framebufferRenderbuffer(36160,33306,36161,b)):a.renderbufferStorage(36161,32854,c.width,c.height);a.bindRenderbuffer(36161,null)}var y={},N;this.setTexture2D=u;this.setTexture3D=function(a,b){var e=d.get(a);0<a.version&&e.__version!==a.version?A(e,a,b):(c.activeTexture(33984+b),
+c.bindTexture(32879,e.__webglTexture))};this.setTextureCube=function(b,r){var l=d.get(b);if(6===b.image.length)if(0<b.version&&l.__version!==b.version){l.__image__webglTextureCube||(b.addEventListener("dispose",x),l.__image__webglTextureCube=a.createTexture(),g.memory.textures++);c.activeTexture(33984+r);c.bindTexture(34067,l.__image__webglTextureCube);a.pixelStorei(37440,b.flipY);r=b&&b.isCompressedTexture;for(var t=b.image[0]&&b.image[0].isDataTexture,u=[],w=0;6>w;w++)u[w]=r||t?t?b.image[w].image:
+b.image[w]:h(b.image[w],e.maxCubemapSize);var v=u[0],X=k(v),B=f.convert(b.format),y=f.convert(b.type),H=n(B,y);p(34067,b,X);for(w=0;6>w;w++)if(r)for(var A,N=u[w].mipmaps,z=0,C=N.length;z<C;z++)A=N[z],1023!==b.format&&1022!==b.format?-1<c.getCompressedTextureFormats().indexOf(B)?c.compressedTexImage2D(34069+w,z,H,A.width,A.height,0,A.data):console.warn("THREE.WebGLRenderer: Attempt to load unsupported compressed texture format in .setTextureCube()"):c.texImage2D(34069+w,z,H,A.width,A.height,0,B,y,
+A.data);else t?c.texImage2D(34069+w,0,H,u[w].width,u[w].height,0,B,y,u[w].data):c.texImage2D(34069+w,0,H,B,y,u[w]);l.__maxMipLevel=r?N.length-1:0;m(b,X)&&q(34067,b,v.width,v.height);l.__version=b.version;if(b.onUpdate)b.onUpdate(b)}else c.activeTexture(33984+r),c.bindTexture(34067,l.__image__webglTextureCube)};this.setTextureCubeDynamic=function(a,b){c.activeTexture(33984+b);c.bindTexture(34067,d.get(a).__webglTexture)};this.setupRenderTarget=function(b){var e=d.get(b),f=d.get(b.texture);b.addEventListener("dispose",
+l);f.__webglTexture=a.createTexture();g.memory.textures++;var h=!0===b.isWebGLRenderTargetCube,n=k(b);if(h){e.__webglFramebuffer=[];for(var r=0;6>r;r++)e.__webglFramebuffer[r]=a.createFramebuffer()}else e.__webglFramebuffer=a.createFramebuffer();if(h){c.bindTexture(34067,f.__webglTexture);p(34067,b.texture,n);for(r=0;6>r;r++)v(e.__webglFramebuffer[r],b,36064,34069+r);m(b.texture,n)&&q(34067,b.texture,b.width,b.height);c.bindTexture(34067,null)}else c.bindTexture(3553,f.__webglTexture),p(3553,b.texture,
+n),v(e.__webglFramebuffer,b,36064,3553),m(b.texture,n)&&q(3553,b.texture,b.width,b.height),c.bindTexture(3553,null);if(b.depthBuffer){e=d.get(b);f=!0===b.isWebGLRenderTargetCube;if(b.depthTexture){if(f)throw Error("target.depthTexture not supported in Cube render targets");if(b&&b.isWebGLRenderTargetCube)throw Error("Depth Texture with cube render targets is not supported");a.bindFramebuffer(36160,e.__webglFramebuffer);if(!b.depthTexture||!b.depthTexture.isDepthTexture)throw Error("renderTarget.depthTexture must be an instance of THREE.DepthTexture");
+d.get(b.depthTexture).__webglTexture&&b.depthTexture.image.width===b.width&&b.depthTexture.image.height===b.height||(b.depthTexture.image.width=b.width,b.depthTexture.image.height=b.height,b.depthTexture.needsUpdate=!0);u(b.depthTexture,0);e=d.get(b.depthTexture).__webglTexture;if(1026===b.depthTexture.format)a.framebufferTexture2D(36160,36096,3553,e,0);else if(1027===b.depthTexture.format)a.framebufferTexture2D(36160,33306,3553,e,0);else throw Error("Unknown depthTexture format");}else if(f)for(e.__webglDepthbuffer=
+[],f=0;6>f;f++)a.bindFramebuffer(36160,e.__webglFramebuffer[f]),e.__webglDepthbuffer[f]=a.createRenderbuffer(),H(e.__webglDepthbuffer[f],b);else a.bindFramebuffer(36160,e.__webglFramebuffer),e.__webglDepthbuffer=a.createRenderbuffer(),H(e.__webglDepthbuffer,b);a.bindFramebuffer(36160,null)}};this.updateRenderTargetMipmap=function(a){var b=a.texture,e=k(a);if(m(b,e)){e=a.isWebGLRenderTargetCube?34067:3553;var f=d.get(b).__webglTexture;c.bindTexture(e,f);q(e,b,a.width,a.height);c.bindTexture(e,null)}}}
+function df(a,b,c){return{convert:function(a){if(1E3===a)return 10497;if(1001===a)return 33071;if(1002===a)return 33648;if(1003===a)return 9728;if(1004===a)return 9984;if(1005===a)return 9986;if(1006===a)return 9729;if(1007===a)return 9985;if(1008===a)return 9987;if(1009===a)return 5121;if(1017===a)return 32819;if(1018===a)return 32820;if(1019===a)return 33635;if(1010===a)return 5120;if(1011===a)return 5122;if(1012===a)return 5123;if(1013===a)return 5124;if(1014===a)return 5125;if(1015===a)return 5126;
+if(1016===a){if(c.isWebGL2)return 5131;var d=b.get("OES_texture_half_float");if(null!==d)return d.HALF_FLOAT_OES}if(1021===a)return 6406;if(1022===a)return 6407;if(1023===a)return 6408;if(1024===a)return 6409;if(1025===a)return 6410;if(1026===a)return 6402;if(1027===a)return 34041;if(1028===a)return 6403;if(100===a)return 32774;if(101===a)return 32778;if(102===a)return 32779;if(200===a)return 0;if(201===a)return 1;if(202===a)return 768;if(203===a)return 769;if(204===a)return 770;if(205===a)return 771;
+if(206===a)return 772;if(207===a)return 773;if(208===a)return 774;if(209===a)return 775;if(210===a)return 776;if(33776===a||33777===a||33778===a||33779===a)if(d=b.get("WEBGL_compressed_texture_s3tc"),null!==d){if(33776===a)return d.COMPRESSED_RGB_S3TC_DXT1_EXT;if(33777===a)return d.COMPRESSED_RGBA_S3TC_DXT1_EXT;if(33778===a)return d.COMPRESSED_RGBA_S3TC_DXT3_EXT;if(33779===a)return d.COMPRESSED_RGBA_S3TC_DXT5_EXT}if(35840===a||35841===a||35842===a||35843===a)if(d=b.get("WEBGL_compressed_texture_pvrtc"),
+null!==d){if(35840===a)return d.COMPRESSED_RGB_PVRTC_4BPPV1_IMG;if(35841===a)return d.COMPRESSED_RGB_PVRTC_2BPPV1_IMG;if(35842===a)return d.COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;if(35843===a)return d.COMPRESSED_RGBA_PVRTC_2BPPV1_IMG}if(36196===a&&(d=b.get("WEBGL_compressed_texture_etc1"),null!==d))return d.COMPRESSED_RGB_ETC1_WEBGL;if(37808===a||37809===a||37810===a||37811===a||37812===a||37813===a||37814===a||37815===a||37816===a||37817===a||37818===a||37819===a||37820===a||37821===a)if(d=b.get("WEBGL_compressed_texture_astc"),
+null!==d)return a;if(103===a||104===a){if(c.isWebGL2){if(103===a)return 32775;if(104===a)return 32776}d=b.get("EXT_blend_minmax");if(null!==d){if(103===a)return d.MIN_EXT;if(104===a)return d.MAX_EXT}}if(1020===a){if(c.isWebGL2)return 34042;d=b.get("WEBGL_depth_texture");if(null!==d)return d.UNSIGNED_INT_24_8_WEBGL}return 0}}}function Ob(){D.call(this);this.type="Group"}function Ra(){D.call(this);this.type="Camera";this.matrixWorldInverse=new P;this.projectionMatrix=new P;this.projectionMatrixInverse=
+new P}function V(a,b,c,d){Ra.call(this);this.type="PerspectiveCamera";this.fov=void 0!==a?a:50;this.zoom=1;this.near=void 0!==c?c:.1;this.far=void 0!==d?d:2E3;this.focus=10;this.aspect=void 0!==b?b:1;this.view=null;this.filmGauge=35;this.filmOffset=0;this.updateProjectionMatrix()}function Cc(a){V.call(this);this.cameras=a||[]}function ef(a,b,c){ff.setFromMatrixPosition(b.matrixWorld);gf.setFromMatrixPosition(c.matrixWorld);var d=ff.distanceTo(gf),e=b.projectionMatrix.elements,f=c.projectionMatrix.elements,
+g=e[14]/(e[10]-1);c=e[14]/(e[10]+1);var h=(e[9]+1)/e[5],k=(e[9]-1)/e[5],m=(e[8]-1)/e[0],q=(f[8]+1)/f[0];e=g*m;f=g*q;q=d/(-m+q);m=q*-m;b.matrixWorld.decompose(a.position,a.quaternion,a.scale);a.translateX(m);a.translateZ(q);a.matrixWorld.compose(a.position,a.quaternion,a.scale);a.matrixWorldInverse.getInverse(a.matrixWorld);b=g+q;g=c+q;a.projectionMatrix.makePerspective(e-m,f+(d-m),h*c/g*b,k*c/g*b,b,g)}function hf(a){function b(){return null!==e&&!0===e.isPresenting}function c(){if(b()){var c=e.getEyeParameters("left"),
+f=c.renderWidth*q;c=c.renderHeight*q;H=a.getPixelRatio();v=a.getSize();a.setDrawingBufferSize(2*f,c,1);N.start()}else d.enabled&&a.setDrawingBufferSize(v.width,v.height,H),N.stop()}var d=this,e=null,f=null,g=null,h=[],k=new P,m=new P,q=1,n="stage";"undefined"!==typeof window&&"VRFrameData"in window&&(f=new window.VRFrameData,window.addEventListener("vrdisplaypresentchange",c,!1));var r=new P,l=new ja,t=new p,u=new V;u.bounds=new Z(0,0,.5,1);u.layers.enable(1);var w=new V;w.bounds=new Z(.5,0,.5,1);
+w.layers.enable(2);var A=new Cc([u,w]);A.layers.enable(1);A.layers.enable(2);var v,H,y=[];this.enabled=!1;this.getController=function(a){var b=h[a];void 0===b&&(b=new Ob,b.matrixAutoUpdate=!1,b.visible=!1,h[a]=b);return b};this.getDevice=function(){return e};this.setDevice=function(a){void 0!==a&&(e=a);N.setContext(a)};this.setFramebufferScaleFactor=function(a){q=a};this.setFrameOfReferenceType=function(a){n=a};this.setPoseTarget=function(a){void 0!==a&&(g=a)};this.getCamera=function(a){var b="stage"===
+n?1.6:0;if(null===e)return a.position.set(0,b,0),a;e.depthNear=a.near;e.depthFar=a.far;e.getFrameData(f);if("stage"===n){var c=e.stageParameters;c?k.fromArray(c.sittingToStandingTransform):k.makeTranslation(0,b,0)}b=f.pose;c=null!==g?g:a;c.matrix.copy(k);c.matrix.decompose(c.position,c.quaternion,c.scale);null!==b.orientation&&(l.fromArray(b.orientation),c.quaternion.multiply(l));null!==b.position&&(l.setFromRotationMatrix(k),t.fromArray(b.position),t.applyQuaternion(l),c.position.add(t));c.updateMatrixWorld();
+if(!1===e.isPresenting)return a;u.near=a.near;w.near=a.near;u.far=a.far;w.far=a.far;u.matrixWorldInverse.fromArray(f.leftViewMatrix);w.matrixWorldInverse.fromArray(f.rightViewMatrix);m.getInverse(k);"stage"===n&&(u.matrixWorldInverse.multiply(m),w.matrixWorldInverse.multiply(m));a=c.parent;null!==a&&(r.getInverse(a.matrixWorld),u.matrixWorldInverse.multiply(r),w.matrixWorldInverse.multiply(r));u.matrixWorld.getInverse(u.matrixWorldInverse);w.matrixWorld.getInverse(w.matrixWorldInverse);u.projectionMatrix.fromArray(f.leftProjectionMatrix);
+w.projectionMatrix.fromArray(f.rightProjectionMatrix);ef(A,u,w);a=e.getLayers();a.length&&(a=a[0],null!==a.leftBounds&&4===a.leftBounds.length&&u.bounds.fromArray(a.leftBounds),null!==a.rightBounds&&4===a.rightBounds.length&&w.bounds.fromArray(a.rightBounds));a:for(a=0;a<h.length;a++){b=h[a];b:{c=a;for(var d=navigator.getGamepads&&navigator.getGamepads(),q=0,x=0,p=d.length;q<p;q++){var v=d[q];if(v&&("Daydream Controller"===v.id||"Gear VR Controller"===v.id||"Oculus Go Controller"===v.id||"OpenVR Gamepad"===
+v.id||v.id.startsWith("Oculus Touch")||v.id.startsWith("Spatial Controller"))){if(x===c){c=v;break b}x++}}c=void 0}if(void 0!==c&&void 0!==c.pose){if(null===c.pose)break a;d=c.pose;!1===d.hasPosition&&b.position.set(.2,-.6,-.05);null!==d.position&&b.position.fromArray(d.position);null!==d.orientation&&b.quaternion.fromArray(d.orientation);b.matrix.compose(b.position,b.quaternion,b.scale);b.matrix.premultiply(k);b.matrix.decompose(b.position,b.quaternion,b.scale);b.matrixWorldNeedsUpdate=!0;b.visible=
+!0;d="Daydream Controller"===c.id?0:1;y[a]!==c.buttons[d].pressed&&(y[a]=c.buttons[d].pressed,!0===y[a]?b.dispatchEvent({type:"selectstart"}):(b.dispatchEvent({type:"selectend"}),b.dispatchEvent({type:"select"})))}else b.visible=!1}return A};this.getStandingMatrix=function(){return k};this.isPresenting=b;var N=new Xd;this.setAnimationLoop=function(a){N.setAnimationLoop(a)};this.submitFrame=function(){b()&&e.submitFrame()};this.dispose=function(){"undefined"!==typeof window&&window.removeEventListener("vrdisplaypresentchange",
+c)}}function Sg(a){function b(){return null!==h&&null!==m}function c(a){var b=r[l.indexOf(a.inputSource)];b&&b.dispatchEvent({type:a.type})}function d(){a.setFramebuffer(null);v.stop()}function e(a,b){null===b?a.matrixWorld.copy(a.matrix):a.matrixWorld.multiplyMatrices(b.matrixWorld,a.matrix);a.matrixWorldInverse.getInverse(a.matrixWorld)}var f=a.context,g=null,h=null,k=1,m=null,q="stage",n=null,r=[],l=[],t=new V;t.layers.enable(1);t.viewport=new Z;var u=new V;u.layers.enable(2);u.viewport=new Z;
+var w=new Cc([t,u]);w.layers.enable(1);w.layers.enable(2);this.enabled=!1;this.getController=function(a){var b=r[a];void 0===b&&(b=new Ob,b.matrixAutoUpdate=!1,b.visible=!1,r[a]=b);return b};this.getDevice=function(){return g};this.setDevice=function(a){void 0!==a&&(g=a);a instanceof XRDevice&&f.setCompatibleXRDevice(a)};this.setFramebufferScaleFactor=function(a){k=a};this.setFrameOfReferenceType=function(a){q=a};this.setSession=function(b){h=b;null!==h&&(h.addEventListener("select",c),h.addEventListener("selectstart",
+c),h.addEventListener("selectend",c),h.addEventListener("end",d),h.baseLayer=new XRWebGLLayer(h,f,{framebufferScaleFactor:k}),h.requestFrameOfReference(q).then(function(b){m=b;a.setFramebuffer(h.baseLayer.framebuffer);v.setContext(h);v.start()}),l=h.getInputSources(),h.addEventListener("inputsourceschange",function(){l=h.getInputSources();console.log(l);for(var a=0;a<r.length;a++)r[a].userData.inputSource=l[a]}))};this.getCamera=function(a){if(b()){var c=a.parent,d=w.cameras;e(w,c);for(var f=0;f<
+d.length;f++)e(d[f],c);a.matrixWorld.copy(w.matrixWorld);a=a.children;f=0;for(c=a.length;f<c;f++)a[f].updateMatrixWorld(!0);ef(w,t,u);return w}return a};this.isPresenting=b;var p=null,v=new Xd;v.setAnimationLoop(function(a,b){n=b.getDevicePose(m);if(null!==n)for(var c=h.baseLayer,d=b.views,e=0;e<d.length;e++){var f=d[e],g=c.getViewport(f),k=n.getViewMatrix(f),q=w.cameras[e];q.matrix.fromArray(k).getInverse(q.matrix);q.projectionMatrix.fromArray(f.projectionMatrix);q.viewport.set(g.x,g.y,g.width,g.height);
+0===e&&w.matrix.copy(q.matrix)}for(e=0;e<r.length;e++){c=r[e];if(d=l[e])if(d=b.getInputPose(d,m),null!==d){"targetRay"in d?c.matrix.elements=d.targetRay.transformMatrix:"pointerMatrix"in d&&(c.matrix.elements=d.pointerMatrix);c.matrix.decompose(c.position,c.rotation,c.scale);c.visible=!0;continue}c.visible=!1}p&&p(a)});this.setAnimationLoop=function(a){p=a};this.dispose=function(){};this.getStandingMatrix=function(){console.warn("THREE.WebXRManager: getStandingMatrix() is no longer needed.");return new THREE.Matrix4};
+this.submitFrame=function(){}}function ce(a){var b;function c(){la=new Uf(O);xa=new Sf(O,la,a);xa.isWebGL2||(la.get("WEBGL_depth_texture"),la.get("OES_texture_float"),la.get("OES_texture_half_float"),la.get("OES_texture_half_float_linear"),la.get("OES_standard_derivatives"),la.get("OES_element_index_uint"),la.get("ANGLE_instanced_arrays"));la.get("OES_texture_float_linear");ia=new df(O,la,xa);ba=new Qg(O,la,ia,xa);ba.scissor(Bc.copy(ja).multiplyScalar(U));ba.viewport(S.copy(fa).multiplyScalar(U));
+da=new Xf(O);Da=new Hg;ha=new Rg(O,la,ba,Da,xa,ia,da);ra=new Lf(O);ua=new Vf(O,ra,da);oa=new $f(ua,da);ya=new Zf(O);na=new Gg(Y,la,xa);ta=new Lg;pa=new Pg;ma=new Qf(Y,ba,oa,z);Aa=new Rf(O,la,da,xa);Ba=new Wf(O,la,da,xa);da.programs=na.programs;Y.context=O;Y.capabilities=xa;Y.extensions=la;Y.properties=Da;Y.renderLists=ta;Y.state=ba;Y.info=da}function d(a){a.preventDefault();console.log("THREE.WebGLRenderer: Context Lost.");G=!0}function e(){console.log("THREE.WebGLRenderer: Context Restored.");G=
+!1;c()}function f(a){a=a.target;a.removeEventListener("dispose",f);g(a);Da.remove(a)}function g(a){var b=Da.get(a).program;a.program=void 0;void 0!==b&&na.releaseProgram(b)}function h(a,b){a.render(function(a){Y.renderBufferImmediate(a,b)})}function k(a,b,c){if(!1!==a.visible){if(a.layers.test(b.layers))if(a.isLight)E.pushLight(a),a.castShadow&&E.pushShadow(a);else if(a.isSprite){if(!a.frustumCulled||qa.intersectsSprite(a)){c&&gb.setFromMatrixPosition(a.matrixWorld).applyMatrix4(Ac);var d=oa.update(a),
+e=a.material;D.push(a,d,e,gb.z,null)}}else if(a.isImmediateRenderObject)c&&gb.setFromMatrixPosition(a.matrixWorld).applyMatrix4(Ac),D.push(a,null,a.material,gb.z,null);else if(a.isMesh||a.isLine||a.isPoints)if(a.isSkinnedMesh&&a.skeleton.update(),!a.frustumCulled||qa.intersectsObject(a))if(c&&gb.setFromMatrixPosition(a.matrixWorld).applyMatrix4(Ac),d=oa.update(a),e=a.material,Array.isArray(e))for(var f=d.groups,g=0,h=f.length;g<h;g++){var m=f[g],n=e[m.materialIndex];n&&n.visible&&D.push(a,d,n,gb.z,
+m)}else e.visible&&D.push(a,d,e,gb.z,null);a=a.children;g=0;for(h=a.length;g<h;g++)k(a[g],b,c)}}function m(a,b,c,d){for(var e=0,f=a.length;e<f;e++){var g=a[e],h=g.object,k=g.geometry,m=void 0===d?g.material:d;g=g.group;if(c.isArrayCamera){W=c;for(var n=c.cameras,r=0,l=n.length;r<l;r++){var x=n[r];if(h.layers.test(x.layers)){if("viewport"in x)ba.viewport(S.copy(x.viewport));else{var t=x.bounds;ba.viewport(S.set(t.x*V,t.y*M,t.z*V,t.w*M).multiplyScalar(U))}E.setupLights(x);q(h,b,x,k,m,g)}}}else W=null,
+q(h,b,c,k,m,g)}}function q(a,c,d,e,f,g){a.onBeforeRender(Y,c,d,e,f,g);E=pa.get(c,W||d);a.modelViewMatrix.multiplyMatrices(d.matrixWorldInverse,a.matrixWorld);a.normalMatrix.getNormalMatrix(a.modelViewMatrix);if(a.isImmediateRenderObject){ba.setMaterial(f);var k=r(d,c.fog,f,a);K=b=null;sd=!1;h(a,k)}else Y.renderBufferDirect(d,c.fog,e,f,a,g);a.onAfterRender(Y,c,d,e,f,g);E=pa.get(c,W||d)}function n(a,b,c){var d=Da.get(a),e=E.state.lights,h=d.lightsHash,k=e.state.hash;c=na.getParameters(a,e.state,E.state.shadowsArray,
+b,aa.numPlanes,aa.numIntersection,c);var m=na.getProgramCode(a,c),n=d.program,q=!0;if(void 0===n)a.addEventListener("dispose",f);else if(n.code!==m)g(a);else{if(h.stateID!==k.stateID||h.directionalLength!==k.directionalLength||h.pointLength!==k.pointLength||h.spotLength!==k.spotLength||h.rectAreaLength!==k.rectAreaLength||h.hemiLength!==k.hemiLength||h.shadowsLength!==k.shadowsLength)h.stateID=k.stateID,h.directionalLength=k.directionalLength,h.pointLength=k.pointLength,h.spotLength=k.spotLength,
+h.rectAreaLength=k.rectAreaLength,h.hemiLength=k.hemiLength,h.shadowsLength=k.shadowsLength;else if(void 0!==c.shaderID)return;q=!1}q&&(c.shaderID?(m=Qa[c.shaderID],d.shader={name:a.type,uniforms:va.clone(m.uniforms),vertexShader:m.vertexShader,fragmentShader:m.fragmentShader}):d.shader={name:a.type,uniforms:a.uniforms,vertexShader:a.vertexShader,fragmentShader:a.fragmentShader},a.onBeforeCompile(d.shader,Y),m=na.getProgramCode(a,c),n=na.acquireProgram(a,d.shader,c,m),d.program=n,a.program=n);c=n.getAttributes();
+if(a.morphTargets)for(m=a.numSupportedMorphTargets=0;m<Y.maxMorphTargets;m++)0<=c["morphTarget"+m]&&a.numSupportedMorphTargets++;if(a.morphNormals)for(m=a.numSupportedMorphNormals=0;m<Y.maxMorphNormals;m++)0<=c["morphNormal"+m]&&a.numSupportedMorphNormals++;c=d.shader.uniforms;if(!a.isShaderMaterial&&!a.isRawShaderMaterial||!0===a.clipping)d.numClippingPlanes=aa.numPlanes,d.numIntersection=aa.numIntersection,c.clippingPlanes=aa.uniform;d.fog=b;void 0===h&&(d.lightsHash=h={});h.stateID=k.stateID;h.directionalLength=
+k.directionalLength;h.pointLength=k.pointLength;h.spotLength=k.spotLength;h.rectAreaLength=k.rectAreaLength;h.hemiLength=k.hemiLength;h.shadowsLength=k.shadowsLength;a.lights&&(c.ambientLightColor.value=e.state.ambient,c.directionalLights.value=e.state.directional,c.spotLights.value=e.state.spot,c.rectAreaLights.value=e.state.rectArea,c.pointLights.value=e.state.point,c.hemisphereLights.value=e.state.hemi,c.directionalShadowMap.value=e.state.directionalShadowMap,c.directionalShadowMatrix.value=e.state.directionalShadowMatrix,
+c.spotShadowMap.value=e.state.spotShadowMap,c.spotShadowMatrix.value=e.state.spotShadowMatrix,c.pointShadowMap.value=e.state.pointShadowMap,c.pointShadowMatrix.value=e.state.pointShadowMatrix);a=d.program.getUniforms();a=db.seqWithValue(a.seq,c);d.uniformsList=a}function r(a,b,c,d){ca=0;var e=Da.get(c),f=e.lightsHash,g=E.state.lights.state.hash;ud&&(be||a!==T)&&aa.setState(c.clippingPlanes,c.clipIntersection,c.clipShadows,a,e,a===T&&c.id===F);!1===c.needsUpdate&&(void 0===e.program?c.needsUpdate=
+!0:c.fog&&e.fog!==b?c.needsUpdate=!0:!c.lights||f.stateID===g.stateID&&f.directionalLength===g.directionalLength&&f.pointLength===g.pointLength&&f.spotLength===g.spotLength&&f.rectAreaLength===g.rectAreaLength&&f.hemiLength===g.hemiLength&&f.shadowsLength===g.shadowsLength?void 0===e.numClippingPlanes||e.numClippingPlanes===aa.numPlanes&&e.numIntersection===aa.numIntersection||(c.needsUpdate=!0):c.needsUpdate=!0);c.needsUpdate&&(n(c,b,d),c.needsUpdate=!1);var h=!1,k=!1,m=!1;f=e.program;g=f.getUniforms();
+var q=e.shader.uniforms;ba.useProgram(f.program)&&(m=k=h=!0);c.id!==F&&(F=c.id,k=!0);if(h||T!==a){g.setValue(O,"projectionMatrix",a.projectionMatrix);xa.logarithmicDepthBuffer&&g.setValue(O,"logDepthBufFC",2/(Math.log(a.far+1)/Math.LN2));T!==a&&(T=a,m=k=!0);if(c.isShaderMaterial||c.isMeshPhongMaterial||c.isMeshStandardMaterial||c.envMap)h=g.map.cameraPosition,void 0!==h&&h.setValue(O,gb.setFromMatrixPosition(a.matrixWorld));(c.isMeshPhongMaterial||c.isMeshLambertMaterial||c.isMeshBasicMaterial||c.isMeshStandardMaterial||
+c.isShaderMaterial||c.skinning)&&g.setValue(O,"viewMatrix",a.matrixWorldInverse)}if(c.skinning&&(g.setOptional(O,d,"bindMatrix"),g.setOptional(O,d,"bindMatrixInverse"),a=d.skeleton))if(h=a.bones,xa.floatVertexTextures){if(void 0===a.boneTexture){h=Math.sqrt(4*h.length);h=R.ceilPowerOfTwo(h);h=Math.max(h,4);var r=new Float32Array(h*h*4);r.set(a.boneMatrices);var x=new lb(r,h,h,1023,1015);x.needsUpdate=!0;a.boneMatrices=r;a.boneTexture=x;a.boneTextureSize=h}g.setValue(O,"boneTexture",a.boneTexture);
+g.setValue(O,"boneTextureSize",a.boneTextureSize)}else g.setOptional(O,a,"boneMatrices");k&&(g.setValue(O,"toneMappingExposure",Y.toneMappingExposure),g.setValue(O,"toneMappingWhitePoint",Y.toneMappingWhitePoint),c.lights&&(k=m,q.ambientLightColor.needsUpdate=k,q.directionalLights.needsUpdate=k,q.pointLights.needsUpdate=k,q.spotLights.needsUpdate=k,q.rectAreaLights.needsUpdate=k,q.hemisphereLights.needsUpdate=k),b&&c.fog&&(q.fogColor.value=b.color,b.isFog?(q.fogNear.value=b.near,q.fogFar.value=b.far):
+b.isFogExp2&&(q.fogDensity.value=b.density)),c.isMeshBasicMaterial?l(q,c):c.isMeshLambertMaterial?(l(q,c),c.emissiveMap&&(q.emissiveMap.value=c.emissiveMap)):c.isMeshPhongMaterial?(l(q,c),c.isMeshToonMaterial?(t(q,c),c.gradientMap&&(q.gradientMap.value=c.gradientMap)):t(q,c)):c.isMeshStandardMaterial?(l(q,c),c.isMeshPhysicalMaterial?(u(q,c),q.reflectivity.value=c.reflectivity,q.clearCoat.value=c.clearCoat,q.clearCoatRoughness.value=c.clearCoatRoughness):u(q,c)):c.isMeshMatcapMaterial?(l(q,c),c.matcap&&
+(q.matcap.value=c.matcap),c.bumpMap&&(q.bumpMap.value=c.bumpMap,q.bumpScale.value=c.bumpScale,1===c.side&&(q.bumpScale.value*=-1)),c.normalMap&&(q.normalMap.value=c.normalMap,q.normalScale.value.copy(c.normalScale),1===c.side&&q.normalScale.value.negate()),c.displacementMap&&(q.displacementMap.value=c.displacementMap,q.displacementScale.value=c.displacementScale,q.displacementBias.value=c.displacementBias)):c.isMeshDepthMaterial?(l(q,c),c.displacementMap&&(q.displacementMap.value=c.displacementMap,
+q.displacementScale.value=c.displacementScale,q.displacementBias.value=c.displacementBias)):c.isMeshDistanceMaterial?(l(q,c),c.displacementMap&&(q.displacementMap.value=c.displacementMap,q.displacementScale.value=c.displacementScale,q.displacementBias.value=c.displacementBias),q.referencePosition.value.copy(c.referencePosition),q.nearDistance.value=c.nearDistance,q.farDistance.value=c.farDistance):c.isMeshNormalMaterial?(l(q,c),c.bumpMap&&(q.bumpMap.value=c.bumpMap,q.bumpScale.value=c.bumpScale,1===
+c.side&&(q.bumpScale.value*=-1)),c.normalMap&&(q.normalMap.value=c.normalMap,q.normalScale.value.copy(c.normalScale),1===c.side&&q.normalScale.value.negate()),c.displacementMap&&(q.displacementMap.value=c.displacementMap,q.displacementScale.value=c.displacementScale,q.displacementBias.value=c.displacementBias)):c.isLineBasicMaterial?(q.diffuse.value=c.color,q.opacity.value=c.opacity,c.isLineDashedMaterial&&(q.dashSize.value=c.dashSize,q.totalSize.value=c.dashSize+c.gapSize,q.scale.value=c.scale)):
+c.isPointsMaterial?(q.diffuse.value=c.color,q.opacity.value=c.opacity,q.size.value=c.size*U,q.scale.value=.5*M,q.map.value=c.map,null!==c.map&&(!0===c.map.matrixAutoUpdate&&c.map.updateMatrix(),q.uvTransform.value.copy(c.map.matrix))):c.isSpriteMaterial?(q.diffuse.value=c.color,q.opacity.value=c.opacity,q.rotation.value=c.rotation,q.map.value=c.map,null!==c.map&&(!0===c.map.matrixAutoUpdate&&c.map.updateMatrix(),q.uvTransform.value.copy(c.map.matrix))):c.isShadowMaterial&&(q.color.value=c.color,q.opacity.value=
+c.opacity),void 0!==q.ltc_1&&(q.ltc_1.value=J.LTC_1),void 0!==q.ltc_2&&(q.ltc_2.value=J.LTC_2),db.upload(O,e.uniformsList,q,Y));c.isShaderMaterial&&!0===c.uniformsNeedUpdate&&(db.upload(O,e.uniformsList,q,Y),c.uniformsNeedUpdate=!1);c.isSpriteMaterial&&g.setValue(O,"center",d.center);g.setValue(O,"modelViewMatrix",d.modelViewMatrix);g.setValue(O,"normalMatrix",d.normalMatrix);g.setValue(O,"modelMatrix",d.matrixWorld);return f}function l(a,b){a.opacity.value=b.opacity;b.color&&(a.diffuse.value=b.color);
+b.emissive&&a.emissive.value.copy(b.emissive).multiplyScalar(b.emissiveIntensity);b.map&&(a.map.value=b.map);b.alphaMap&&(a.alphaMap.value=b.alphaMap);b.specularMap&&(a.specularMap.value=b.specularMap);b.envMap&&(a.envMap.value=b.envMap,a.flipEnvMap.value=b.envMap&&b.envMap.isCubeTexture?-1:1,a.reflectivity.value=b.reflectivity,a.refractionRatio.value=b.refractionRatio,a.maxMipLevel.value=Da.get(b.envMap).__maxMipLevel);b.lightMap&&(a.lightMap.value=b.lightMap,a.lightMapIntensity.value=b.lightMapIntensity);
+b.aoMap&&(a.aoMap.value=b.aoMap,a.aoMapIntensity.value=b.aoMapIntensity);if(b.map)var c=b.map;else b.specularMap?c=b.specularMap:b.displacementMap?c=b.displacementMap:b.normalMap?c=b.normalMap:b.bumpMap?c=b.bumpMap:b.roughnessMap?c=b.roughnessMap:b.metalnessMap?c=b.metalnessMap:b.alphaMap?c=b.alphaMap:b.emissiveMap&&(c=b.emissiveMap);void 0!==c&&(c.isWebGLRenderTarget&&(c=c.texture),!0===c.matrixAutoUpdate&&c.updateMatrix(),a.uvTransform.value.copy(c.matrix))}function t(a,b){a.specular.value=b.specular;
+a.shininess.value=Math.max(b.shininess,1E-4);b.emissiveMap&&(a.emissiveMap.value=b.emissiveMap);b.bumpMap&&(a.bumpMap.value=b.bumpMap,a.bumpScale.value=b.bumpScale,1===b.side&&(a.bumpScale.value*=-1));b.normalMap&&(a.normalMap.value=b.normalMap,a.normalScale.value.copy(b.normalScale),1===b.side&&a.normalScale.value.negate());b.displacementMap&&(a.displacementMap.value=b.displacementMap,a.displacementScale.value=b.displacementScale,a.displacementBias.value=b.displacementBias)}function u(a,b){a.roughness.value=
+b.roughness;a.metalness.value=b.metalness;b.roughnessMap&&(a.roughnessMap.value=b.roughnessMap);b.metalnessMap&&(a.metalnessMap.value=b.metalnessMap);b.emissiveMap&&(a.emissiveMap.value=b.emissiveMap);b.bumpMap&&(a.bumpMap.value=b.bumpMap,a.bumpScale.value=b.bumpScale,1===b.side&&(a.bumpScale.value*=-1));b.normalMap&&(a.normalMap.value=b.normalMap,a.normalScale.value.copy(b.normalScale),1===b.side&&a.normalScale.value.negate());b.displacementMap&&(a.displacementMap.value=b.displacementMap,a.displacementScale.value=
+b.displacementScale,a.displacementBias.value=b.displacementBias);b.envMap&&(a.envMapIntensity.value=b.envMapIntensity)}console.log("THREE.WebGLRenderer","98");a=a||{};var w=void 0!==a.canvas?a.canvas:document.createElementNS("http://www.w3.org/1999/xhtml","canvas"),A=void 0!==a.context?a.context:null,v=void 0!==a.alpha?a.alpha:!1,H=void 0!==a.depth?a.depth:!0,y=void 0!==a.stencil?a.stencil:!0,N=void 0!==a.antialias?a.antialias:!1,z=void 0!==a.premultipliedAlpha?a.premultipliedAlpha:!0,B=void 0!==
+a.preserveDrawingBuffer?a.preserveDrawingBuffer:!1,C=void 0!==a.powerPreference?a.powerPreference:"default",D=null,E=null;this.domElement=w;this.context=null;this.sortObjects=this.autoClearStencil=this.autoClearDepth=this.autoClearColor=this.autoClear=!0;this.clippingPlanes=[];this.localClippingEnabled=!1;this.gammaFactor=2;this.physicallyCorrectLights=this.gammaOutput=this.gammaInput=!1;this.toneMappingWhitePoint=this.toneMappingExposure=this.toneMapping=1;this.maxMorphTargets=8;this.maxMorphNormals=
+4;var Y=this,G=!1,Q=null,I=null,L=null,F=-1;var K=b=null;var sd=!1;var T=null,W=null,S=new Z,Bc=new Z,ea=null,ca=0,V=w.width,M=w.height,U=1,fa=new Z(0,0,V,M),ja=new Z(0,0,V,M),sa=!1,qa=new rd,aa=new Tf,ud=!1,be=!1,Ac=new P,gb=new p;try{v={alpha:v,depth:H,stencil:y,antialias:N,premultipliedAlpha:z,preserveDrawingBuffer:B,powerPreference:C};w.addEventListener("webglcontextlost",d,!1);w.addEventListener("webglcontextrestored",e,!1);var O=A||w.getContext("webgl",v)||w.getContext("experimental-webgl",
+v);if(null===O){if(null!==w.getContext("webgl"))throw Error("Error creating WebGL context with your selected attributes.");throw Error("Error creating WebGL context.");}void 0===O.getShaderPrecisionFormat&&(O.getShaderPrecisionFormat=function(){return{rangeMin:1,rangeMax:1,precision:1}})}catch(Tg){console.error("THREE.WebGLRenderer: "+Tg.message)}var la,xa,ba,da,Da,ha,ra,ua,oa,na,ta,pa,ma,ya,Aa,Ba,ia;c();var ka=null;"undefined"!==typeof navigator&&(ka="xr"in navigator?new Sg(Y):new hf(Y));this.vr=
+ka;var Ca=new cf(Y,oa,xa.maxTextureSize);this.shadowMap=Ca;this.getContext=function(){return O};this.getContextAttributes=function(){return O.getContextAttributes()};this.forceContextLoss=function(){var a=la.get("WEBGL_lose_context");a&&a.loseContext()};this.forceContextRestore=function(){var a=la.get("WEBGL_lose_context");a&&a.restoreContext()};this.getPixelRatio=function(){return U};this.setPixelRatio=function(a){void 0!==a&&(U=a,this.setSize(V,M,!1))};this.getSize=function(){return{width:V,height:M}};
+this.setSize=function(a,b,c){ka.isPresenting()?console.warn("THREE.WebGLRenderer: Can't change size while VR device is presenting."):(V=a,M=b,w.width=a*U,w.height=b*U,!1!==c&&(w.style.width=a+"px",w.style.height=b+"px"),this.setViewport(0,0,a,b))};this.getDrawingBufferSize=function(){return{width:V*U,height:M*U}};this.setDrawingBufferSize=function(a,b,c){V=a;M=b;U=c;w.width=a*c;w.height=b*c;this.setViewport(0,0,a,b)};this.getCurrentViewport=function(){return S};this.setViewport=function(a,b,c,d){fa.set(a,
+M-b-d,c,d);ba.viewport(S.copy(fa).multiplyScalar(U))};this.setScissor=function(a,b,c,d){ja.set(a,M-b-d,c,d);ba.scissor(Bc.copy(ja).multiplyScalar(U))};this.setScissorTest=function(a){ba.setScissorTest(sa=a)};this.getClearColor=function(){return ma.getClearColor()};this.setClearColor=function(){ma.setClearColor.apply(ma,arguments)};this.getClearAlpha=function(){return ma.getClearAlpha()};this.setClearAlpha=function(){ma.setClearAlpha.apply(ma,arguments)};this.clear=function(a,b,c){var d=0;if(void 0===
+a||a)d|=16384;if(void 0===b||b)d|=256;if(void 0===c||c)d|=1024;O.clear(d)};this.clearColor=function(){this.clear(!0,!1,!1)};this.clearDepth=function(){this.clear(!1,!0,!1)};this.clearStencil=function(){this.clear(!1,!1,!0)};this.dispose=function(){w.removeEventListener("webglcontextlost",d,!1);w.removeEventListener("webglcontextrestored",e,!1);ta.dispose();pa.dispose();Da.dispose();oa.dispose();ka.dispose();wa.stop()};this.renderBufferImmediate=function(a,b){ba.initAttributes();var c=Da.get(a);a.hasPositions&&
+!c.position&&(c.position=O.createBuffer());a.hasNormals&&!c.normal&&(c.normal=O.createBuffer());a.hasUvs&&!c.uv&&(c.uv=O.createBuffer());a.hasColors&&!c.color&&(c.color=O.createBuffer());b=b.getAttributes();a.hasPositions&&(O.bindBuffer(34962,c.position),O.bufferData(34962,a.positionArray,35048),ba.enableAttribute(b.position),O.vertexAttribPointer(b.position,3,5126,!1,0,0));a.hasNormals&&(O.bindBuffer(34962,c.normal),O.bufferData(34962,a.normalArray,35048),ba.enableAttribute(b.normal),O.vertexAttribPointer(b.normal,
+3,5126,!1,0,0));a.hasUvs&&(O.bindBuffer(34962,c.uv),O.bufferData(34962,a.uvArray,35048),ba.enableAttribute(b.uv),O.vertexAttribPointer(b.uv,2,5126,!1,0,0));a.hasColors&&(O.bindBuffer(34962,c.color),O.bufferData(34962,a.colorArray,35048),ba.enableAttribute(b.color),O.vertexAttribPointer(b.color,3,5126,!1,0,0));ba.disableUnusedAttributes();O.drawArrays(4,0,a.count);a.count=0};this.renderBufferDirect=function(a,c,d,e,f,g){var h=f.isMesh&&0>f.normalMatrix.determinant();ba.setMaterial(e,h);var k=r(a,c,
+e,f),m=!1;if(b!==d.id||K!==k.id||sd!==(!0===e.wireframe))b=d.id,K=k.id,sd=!0===e.wireframe,m=!0;f.morphTargetInfluences&&(ya.update(f,d,e,k),m=!0);h=d.index;var q=d.attributes.position;c=1;!0===e.wireframe&&(h=ua.getWireframeAttribute(d),c=2);a=Aa;if(null!==h){var n=ra.get(h);a=Ba;a.setIndex(n)}if(m){if(d&&d.isInstancedBufferGeometry&!xa.isWebGL2&&null===la.get("ANGLE_instanced_arrays"))console.error("THREE.WebGLRenderer.setupVertexAttributes: using THREE.InstancedBufferGeometry but hardware does not support extension ANGLE_instanced_arrays.");
+else{ba.initAttributes();m=d.attributes;k=k.getAttributes();var l=e.defaultAttributeValues;for(B in k){var x=k[B];if(0<=x){var t=m[B];if(void 0!==t){var u=t.normalized,w=t.itemSize,p=ra.get(t);if(void 0!==p){var v=p.buffer,A=p.type;p=p.bytesPerElement;if(t.isInterleavedBufferAttribute){var y=t.data,H=y.stride;t=t.offset;y&&y.isInstancedInterleavedBuffer?(ba.enableAttributeAndDivisor(x,y.meshPerAttribute),void 0===d.maxInstancedCount&&(d.maxInstancedCount=y.meshPerAttribute*y.count)):ba.enableAttribute(x);
+O.bindBuffer(34962,v);O.vertexAttribPointer(x,w,A,u,H*p,t*p)}else t.isInstancedBufferAttribute?(ba.enableAttributeAndDivisor(x,t.meshPerAttribute),void 0===d.maxInstancedCount&&(d.maxInstancedCount=t.meshPerAttribute*t.count)):ba.enableAttribute(x),O.bindBuffer(34962,v),O.vertexAttribPointer(x,w,A,u,0,0)}}else if(void 0!==l&&(u=l[B],void 0!==u))switch(u.length){case 2:O.vertexAttrib2fv(x,u);break;case 3:O.vertexAttrib3fv(x,u);break;case 4:O.vertexAttrib4fv(x,u);break;default:O.vertexAttrib1fv(x,u)}}}ba.disableUnusedAttributes()}null!==
+h&&O.bindBuffer(34963,n.buffer)}n=Infinity;null!==h?n=h.count:void 0!==q&&(n=q.count);h=d.drawRange.start*c;q=null!==g?g.start*c:0;var B=Math.max(h,q);g=Math.max(0,Math.min(n,h+d.drawRange.count*c,q+(null!==g?g.count*c:Infinity))-1-B+1);if(0!==g){if(f.isMesh)if(!0===e.wireframe)ba.setLineWidth(e.wireframeLinewidth*(null===I?U:1)),a.setMode(1);else switch(f.drawMode){case 0:a.setMode(4);break;case 1:a.setMode(5);break;case 2:a.setMode(6)}else f.isLine?(e=e.linewidth,void 0===e&&(e=1),ba.setLineWidth(e*
+(null===I?U:1)),f.isLineSegments?a.setMode(1):f.isLineLoop?a.setMode(2):a.setMode(3)):f.isPoints?a.setMode(0):f.isSprite&&a.setMode(4);d&&d.isInstancedBufferGeometry?0<d.maxInstancedCount&&a.renderInstances(d,B,g):a.render(B,g)}};this.compile=function(a,b){E=pa.get(a,b);E.init();a.traverse(function(a){a.isLight&&(E.pushLight(a),a.castShadow&&E.pushShadow(a))});E.setupLights(b);a.traverse(function(b){if(b.material)if(Array.isArray(b.material))for(var c=0;c<b.material.length;c++)n(b.material[c],a.fog,
+b);else n(b.material,a.fog,b)})};var za=null,wa=new Xd;wa.setAnimationLoop(function(a){ka.isPresenting()||za&&za(a)});"undefined"!==typeof window&&wa.setContext(window);this.setAnimationLoop=function(a){za=a;ka.setAnimationLoop(a);wa.start()};this.render=function(a,c,d,e){if(!c||!c.isCamera)console.error("THREE.WebGLRenderer.render: camera is not an instance of THREE.Camera.");else if(!G){K=b=null;sd=!1;F=-1;T=null;!0===a.autoUpdate&&a.updateMatrixWorld();null===c.parent&&c.updateMatrixWorld();ka.enabled&&
+(c=ka.getCamera(c));E=pa.get(a,c);E.init();a.onBeforeRender(Y,a,c,d);Ac.multiplyMatrices(c.projectionMatrix,c.matrixWorldInverse);qa.setFromMatrix(Ac);be=this.localClippingEnabled;ud=aa.init(this.clippingPlanes,be,c);D=ta.get(a,c);D.init();k(a,c,Y.sortObjects);!0===Y.sortObjects&&D.sort();ud&&aa.beginShadows();Ca.render(E.state.shadowsArray,a,c);E.setupLights(c);ud&&aa.endShadows();this.info.autoReset&&this.info.reset();void 0===d&&(d=null);this.setRenderTarget(d);ma.render(D,a,c,e);e=D.opaque;var f=
+D.transparent;if(a.overrideMaterial){var g=a.overrideMaterial;e.length&&m(e,a,c,g);f.length&&m(f,a,c,g)}else e.length&&m(e,a,c),f.length&&m(f,a,c);d&&ha.updateRenderTargetMipmap(d);ba.buffers.depth.setTest(!0);ba.buffers.depth.setMask(!0);ba.buffers.color.setMask(!0);ba.setPolygonOffset(!1);a.onAfterRender(Y,a,c);ka.enabled&&ka.submitFrame();E=D=null}};this.allocTextureUnit=function(){var a=ca;a>=xa.maxTextures&&console.warn("THREE.WebGLRenderer: Trying to use "+a+" texture units while this GPU supports only "+
+xa.maxTextures);ca+=1;return a};this.setTexture2D=function(){var a=!1;return function(b,c){b&&b.isWebGLRenderTarget&&(a||(console.warn("THREE.WebGLRenderer.setTexture2D: don't use render targets as textures. Use their .texture property instead."),a=!0),b=b.texture);ha.setTexture2D(b,c)}}();this.setTexture3D=function(){return function(a,b){ha.setTexture3D(a,b)}}();this.setTexture=function(){var a=!1;return function(b,c){a||(console.warn("THREE.WebGLRenderer: .setTexture is deprecated, use setTexture2D instead."),
+a=!0);ha.setTexture2D(b,c)}}();this.setTextureCube=function(){var a=!1;return function(b,c){b&&b.isWebGLRenderTargetCube&&(a||(console.warn("THREE.WebGLRenderer.setTextureCube: don't use cube render targets as textures. Use their .texture property instead."),a=!0),b=b.texture);b&&b.isCubeTexture||Array.isArray(b.image)&&6===b.image.length?ha.setTextureCube(b,c):ha.setTextureCubeDynamic(b,c)}}();this.setFramebuffer=function(a){Q=a};this.getRenderTarget=function(){return I};this.setRenderTarget=function(a){(I=
+a)&&void 0===Da.get(a).__webglFramebuffer&&ha.setupRenderTarget(a);var b=Q,c=!1;a?(b=Da.get(a).__webglFramebuffer,a.isWebGLRenderTargetCube&&(b=b[a.activeCubeFace],c=!0),S.copy(a.viewport),Bc.copy(a.scissor),ea=a.scissorTest):(S.copy(fa).multiplyScalar(U),Bc.copy(ja).multiplyScalar(U),ea=sa);L!==b&&(O.bindFramebuffer(36160,b),L=b);ba.viewport(S);ba.scissor(Bc);ba.setScissorTest(ea);c&&(c=Da.get(a.texture),O.framebufferTexture2D(36160,36064,34069+a.activeCubeFace,c.__webglTexture,a.activeMipMapLevel))};
+this.readRenderTargetPixels=function(a,b,c,d,e,f){if(a&&a.isWebGLRenderTarget){var g=Da.get(a).__webglFramebuffer;if(g){var h=!1;g!==L&&(O.bindFramebuffer(36160,g),h=!0);try{var k=a.texture,m=k.format,q=k.type;1023!==m&&ia.convert(m)!==O.getParameter(35739)?console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not in RGBA or implementation defined format."):1009===q||ia.convert(q)===O.getParameter(35738)||1015===q&&(xa.isWebGL2||la.get("OES_texture_float")||la.get("WEBGL_color_buffer_float"))||
+1016===q&&(xa.isWebGL2?la.get("EXT_color_buffer_float"):la.get("EXT_color_buffer_half_float"))?36053===O.checkFramebufferStatus(36160)?0<=b&&b<=a.width-d&&0<=c&&c<=a.height-e&&O.readPixels(b,c,d,e,ia.convert(m),ia.convert(q),f):console.error("THREE.WebGLRenderer.readRenderTargetPixels: readPixels from renderTarget failed. Framebuffer not complete."):console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not in UnsignedByteType or implementation defined type.")}finally{h&&O.bindFramebuffer(36160,
+L)}}}else console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not THREE.WebGLRenderTarget.")};this.copyFramebufferToTexture=function(a,b,c){var d=b.image.width,e=b.image.height,f=ia.convert(b.format);this.setTexture2D(b,0);O.copyTexImage2D(3553,c||0,f,a.x,a.y,d,e,0)};this.copyTextureToTexture=function(a,b,c,d){var e=b.image.width,f=b.image.height,g=ia.convert(c.format),h=ia.convert(c.type);this.setTexture2D(c,0);b.isDataTexture?O.texSubImage2D(3553,d||0,a.x,a.y,e,f,g,h,b.image.data):
+O.texSubImage2D(3553,d||0,a.x,a.y,g,h,b.image)}}function Pb(a,b){this.name="";this.color=new G(a);this.density=void 0!==b?b:2.5E-4}function Qb(a,b,c){this.name="";this.color=new G(a);this.near=void 0!==b?b:1;this.far=void 0!==c?c:1E3}function vd(){D.call(this);this.type="Scene";this.overrideMaterial=this.fog=this.background=null;this.autoUpdate=!0}function sb(a,b){this.array=a;this.stride=b;this.count=void 0!==a?a.length/b:0;this.dynamic=!1;this.updateRange={offset:0,count:-1};this.version=0}function Dc(a,
+b,c,d){this.data=a;this.itemSize=b;this.offset=c;this.normalized=!0===d}function hb(a){L.call(this);this.type="SpriteMaterial";this.color=new G(16777215);this.map=null;this.rotation=0;this.sizeAttenuation=!0;this.lights=!1;this.transparent=!0;this.setValues(a)}function Ec(a){D.call(this);this.type="Sprite";if(void 0===Rb){Rb=new E;var b=new Float32Array([-.5,-.5,0,0,0,.5,-.5,0,1,0,.5,.5,0,1,1,-.5,.5,0,0,1]);b=new sb(b,5);Rb.setIndex([0,1,2,0,2,3]);Rb.addAttribute("position",new Dc(b,3,0,!1));Rb.addAttribute("uv",
+new Dc(b,2,3,!1))}this.geometry=Rb;this.material=void 0!==a?a:new hb;this.center=new z(.5,.5)}function Fc(){D.call(this);this.type="LOD";Object.defineProperties(this,{levels:{enumerable:!0,value:[]}})}function Gc(a,b){a=a||[];this.bones=a.slice(0);this.boneMatrices=new Float32Array(16*this.bones.length);if(void 0===b)this.calculateInverses();else if(this.bones.length===b.length)this.boneInverses=b.slice(0);else for(console.warn("THREE.Skeleton boneInverses is the wrong length."),this.boneInverses=
+[],a=0,b=this.bones.length;a<b;a++)this.boneInverses.push(new P)}function wd(){D.call(this);this.type="Bone"}function xd(a,b){pa.call(this,a,b);this.type="SkinnedMesh";this.bindMode="attached";this.bindMatrix=new P;this.bindMatrixInverse=new P;a=this.initBones();a=new Gc(a);this.bind(a,this.matrixWorld);this.normalizeSkinWeights()}function T(a){L.call(this);this.type="LineBasicMaterial";this.color=new G(16777215);this.linewidth=1;this.linejoin=this.linecap="round";this.lights=!1;this.setValues(a)}
+function ma(a,b,c){1===c&&console.error("THREE.Line: parameter THREE.LinePieces no longer supported. Use THREE.LineSegments instead.");D.call(this);this.type="Line";this.geometry=void 0!==a?a:new E;this.material=void 0!==b?b:new T({color:16777215*Math.random()})}function S(a,b){ma.call(this,a,b);this.type="LineSegments"}function yd(a,b){ma.call(this,a,b);this.type="LineLoop"}function Ha(a){L.call(this);this.type="PointsMaterial";this.color=new G(16777215);this.map=null;this.size=1;this.sizeAttenuation=
+!0;this.lights=this.morphTargets=!1;this.setValues(a)}function Sb(a,b){D.call(this);this.type="Points";this.geometry=void 0!==a?a:new E;this.material=void 0!==b?b:new Ha({color:16777215*Math.random()})}function de(a,b,c,d,e,f,g,h,k){W.call(this,a,b,c,d,e,f,g,h,k);this.generateMipmaps=!1}function Tb(a,b,c,d,e,f,g,h,k,m,q,n){W.call(this,null,f,g,h,k,m,d,e,q,n);this.image={width:b,height:c};this.mipmaps=a;this.generateMipmaps=this.flipY=!1}function Hc(a,b,c,d,e,f,g,h,k){W.call(this,a,b,c,d,e,f,g,h,k);
+this.needsUpdate=!0}function Ic(a,b,c,d,e,f,g,h,k,m){m=void 0!==m?m:1026;if(1026!==m&&1027!==m)throw Error("DepthTexture format must be either THREE.DepthFormat or THREE.DepthStencilFormat");void 0===c&&1026===m&&(c=1012);void 0===c&&1027===m&&(c=1020);W.call(this,null,d,e,f,g,h,m,c,k);this.image={width:a,height:b};this.magFilter=void 0!==g?g:1003;this.minFilter=void 0!==h?h:1003;this.generateMipmaps=this.flipY=!1}function Ub(a){E.call(this);this.type="WireframeGeometry";var b=[],c,d,e,f=[0,0],g=
+{},h=["a","b","c"];if(a&&a.isGeometry){var k=a.faces;var m=0;for(d=k.length;m<d;m++){var q=k[m];for(c=0;3>c;c++){var n=q[h[c]];var r=q[h[(c+1)%3]];f[0]=Math.min(n,r);f[1]=Math.max(n,r);n=f[0]+","+f[1];void 0===g[n]&&(g[n]={index1:f[0],index2:f[1]})}}for(n in g)m=g[n],h=a.vertices[m.index1],b.push(h.x,h.y,h.z),h=a.vertices[m.index2],b.push(h.x,h.y,h.z)}else if(a&&a.isBufferGeometry)if(h=new p,null!==a.index){k=a.attributes.position;q=a.index;var l=a.groups;0===l.length&&(l=[{start:0,count:q.count,
+materialIndex:0}]);a=0;for(e=l.length;a<e;++a)for(m=l[a],c=m.start,d=m.count,m=c,d=c+d;m<d;m+=3)for(c=0;3>c;c++)n=q.getX(m+c),r=q.getX(m+(c+1)%3),f[0]=Math.min(n,r),f[1]=Math.max(n,r),n=f[0]+","+f[1],void 0===g[n]&&(g[n]={index1:f[0],index2:f[1]});for(n in g)m=g[n],h.fromBufferAttribute(k,m.index1),b.push(h.x,h.y,h.z),h.fromBufferAttribute(k,m.index2),b.push(h.x,h.y,h.z)}else for(k=a.attributes.position,m=0,d=k.count/3;m<d;m++)for(c=0;3>c;c++)g=3*m+c,h.fromBufferAttribute(k,g),b.push(h.x,h.y,h.z),
+g=3*m+(c+1)%3,h.fromBufferAttribute(k,g),b.push(h.x,h.y,h.z);this.addAttribute("position",new C(b,3))}function Jc(a,b,c){I.call(this);this.type="ParametricGeometry";this.parameters={func:a,slices:b,stacks:c};this.fromBufferGeometry(new Vb(a,b,c));this.mergeVertices()}function Vb(a,b,c){E.call(this);this.type="ParametricBufferGeometry";this.parameters={func:a,slices:b,stacks:c};var d=[],e=[],f=[],g=[],h=new p,k=new p,m=new p,q=new p,n=new p,r,l;3>a.length&&console.error("THREE.ParametricGeometry: Function must now modify a Vector3 as third parameter.");
+var t=b+1;for(r=0;r<=c;r++){var u=r/c;for(l=0;l<=b;l++){var w=l/b;a(w,u,k);e.push(k.x,k.y,k.z);0<=w-1E-5?(a(w-1E-5,u,m),q.subVectors(k,m)):(a(w+1E-5,u,m),q.subVectors(m,k));0<=u-1E-5?(a(w,u-1E-5,m),n.subVectors(k,m)):(a(w,u+1E-5,m),n.subVectors(m,k));h.crossVectors(q,n).normalize();f.push(h.x,h.y,h.z);g.push(w,u)}}for(r=0;r<c;r++)for(l=0;l<b;l++)a=r*t+l+1,h=(r+1)*t+l+1,k=(r+1)*t+l,d.push(r*t+l,a,k),d.push(a,h,k);this.setIndex(d);this.addAttribute("position",new C(e,3));this.addAttribute("normal",
+new C(f,3));this.addAttribute("uv",new C(g,2))}function Kc(a,b,c,d){I.call(this);this.type="PolyhedronGeometry";this.parameters={vertices:a,indices:b,radius:c,detail:d};this.fromBufferGeometry(new ya(a,b,c,d));this.mergeVertices()}function ya(a,b,c,d){function e(a){h.push(a.x,a.y,a.z)}function f(b,c){b*=3;c.x=a[b+0];c.y=a[b+1];c.z=a[b+2]}function g(a,b,c,d){0>d&&1===a.x&&(k[b]=a.x-1);0===c.x&&0===c.z&&(k[b]=d/2/Math.PI+.5)}E.call(this);this.type="PolyhedronBufferGeometry";this.parameters={vertices:a,
+indices:b,radius:c,detail:d};c=c||1;d=d||0;var h=[],k=[];(function(a){for(var c=new p,d=new p,g=new p,h=0;h<b.length;h+=3){f(b[h+0],c);f(b[h+1],d);f(b[h+2],g);var k,m,l=c,A=d,v=g,H=Math.pow(2,a),y=[];for(m=0;m<=H;m++){y[m]=[];var N=l.clone().lerp(v,m/H),z=A.clone().lerp(v,m/H),B=H-m;for(k=0;k<=B;k++)y[m][k]=0===k&&m===H?N:N.clone().lerp(z,k/B)}for(m=0;m<H;m++)for(k=0;k<2*(H-m)-1;k++)l=Math.floor(k/2),0===k%2?(e(y[m][l+1]),e(y[m+1][l]),e(y[m][l])):(e(y[m][l+1]),e(y[m+1][l+1]),e(y[m+1][l]))}})(d);(function(a){for(var b=
+new p,c=0;c<h.length;c+=3)b.x=h[c+0],b.y=h[c+1],b.z=h[c+2],b.normalize().multiplyScalar(a),h[c+0]=b.x,h[c+1]=b.y,h[c+2]=b.z})(c);(function(){for(var a=new p,b=0;b<h.length;b+=3)a.x=h[b+0],a.y=h[b+1],a.z=h[b+2],k.push(Math.atan2(a.z,-a.x)/2/Math.PI+.5,1-(Math.atan2(-a.y,Math.sqrt(a.x*a.x+a.z*a.z))/Math.PI+.5));a=new p;b=new p;for(var c=new p,d=new p,e=new z,f=new z,l=new z,w=0,A=0;w<h.length;w+=9,A+=6){a.set(h[w+0],h[w+1],h[w+2]);b.set(h[w+3],h[w+4],h[w+5]);c.set(h[w+6],h[w+7],h[w+8]);e.set(k[A+0],
+k[A+1]);f.set(k[A+2],k[A+3]);l.set(k[A+4],k[A+5]);d.copy(a).add(b).add(c).divideScalar(3);var v=Math.atan2(d.z,-d.x);g(e,A+0,a,v);g(f,A+2,b,v);g(l,A+4,c,v)}for(a=0;a<k.length;a+=6)b=k[a+0],c=k[a+2],d=k[a+4],e=Math.min(b,c,d),.9<Math.max(b,c,d)&&.1>e&&(.2>b&&(k[a+0]+=1),.2>c&&(k[a+2]+=1),.2>d&&(k[a+4]+=1))})();this.addAttribute("position",new C(h,3));this.addAttribute("normal",new C(h.slice(),3));this.addAttribute("uv",new C(k,2));0===d?this.computeVertexNormals():this.normalizeNormals()}function Lc(a,
+b){I.call(this);this.type="TetrahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Wb(a,b));this.mergeVertices()}function Wb(a,b){ya.call(this,[1,1,1,-1,-1,1,-1,1,-1,1,-1,-1],[2,1,0,0,3,2,1,3,0,2,3,1],a,b);this.type="TetrahedronBufferGeometry";this.parameters={radius:a,detail:b}}function Mc(a,b){I.call(this);this.type="OctahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new tb(a,b));this.mergeVertices()}function tb(a,b){ya.call(this,[1,0,0,
+-1,0,0,0,1,0,0,-1,0,0,0,1,0,0,-1],[0,2,4,0,4,3,0,3,5,0,5,2,1,2,5,1,5,3,1,3,4,1,4,2],a,b);this.type="OctahedronBufferGeometry";this.parameters={radius:a,detail:b}}function Nc(a,b){I.call(this);this.type="IcosahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Xb(a,b));this.mergeVertices()}function Xb(a,b){var c=(1+Math.sqrt(5))/2;ya.call(this,[-1,c,0,1,c,0,-1,-c,0,1,-c,0,0,-1,c,0,1,c,0,-1,-c,0,1,-c,c,0,-1,c,0,1,-c,0,-1,-c,0,1],[0,11,5,0,5,1,0,1,7,0,7,10,0,10,11,1,5,9,5,
+11,4,11,10,2,10,7,6,7,1,8,3,9,4,3,4,2,3,2,6,3,6,8,3,8,9,4,9,5,2,4,11,6,2,10,8,6,7,9,8,1],a,b);this.type="IcosahedronBufferGeometry";this.parameters={radius:a,detail:b}}function Oc(a,b){I.call(this);this.type="DodecahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Yb(a,b));this.mergeVertices()}function Yb(a,b){var c=(1+Math.sqrt(5))/2,d=1/c;ya.call(this,[-1,-1,-1,-1,-1,1,-1,1,-1,-1,1,1,1,-1,-1,1,-1,1,1,1,-1,1,1,1,0,-d,-c,0,-d,c,0,d,-c,0,d,c,-d,-c,0,-d,c,0,d,-c,0,d,c,
+0,-c,0,-d,c,0,-d,-c,0,d,c,0,d],[3,11,7,3,7,15,3,15,13,7,19,17,7,17,6,7,6,15,17,4,8,17,8,10,17,10,6,8,0,16,8,16,2,8,2,10,0,12,1,0,1,18,0,18,16,6,10,2,6,2,13,6,13,15,2,16,18,2,18,3,2,3,13,18,1,9,18,9,11,18,11,3,4,14,12,4,12,0,4,0,8,11,9,5,11,5,19,11,19,7,19,5,14,19,14,4,19,4,17,1,12,14,1,14,5,1,5,9],a,b);this.type="DodecahedronBufferGeometry";this.parameters={radius:a,detail:b}}function Pc(a,b,c,d,e,f){I.call(this);this.type="TubeGeometry";this.parameters={path:a,tubularSegments:b,radius:c,radialSegments:d,
+closed:e};void 0!==f&&console.warn("THREE.TubeGeometry: taper has been removed.");a=new Zb(a,b,c,d,e);this.tangents=a.tangents;this.normals=a.normals;this.binormals=a.binormals;this.fromBufferGeometry(a);this.mergeVertices()}function Zb(a,b,c,d,e){function f(e){q=a.getPointAt(e/b,q);var f=g.normals[e];e=g.binormals[e];for(r=0;r<=d;r++){var m=r/d*Math.PI*2,n=Math.sin(m);m=-Math.cos(m);k.x=m*f.x+n*e.x;k.y=m*f.y+n*e.y;k.z=m*f.z+n*e.z;k.normalize();t.push(k.x,k.y,k.z);h.x=q.x+c*k.x;h.y=q.y+c*k.y;h.z=
+q.z+c*k.z;l.push(h.x,h.y,h.z)}}E.call(this);this.type="TubeBufferGeometry";this.parameters={path:a,tubularSegments:b,radius:c,radialSegments:d,closed:e};b=b||64;c=c||1;d=d||8;e=e||!1;var g=a.computeFrenetFrames(b,e);this.tangents=g.tangents;this.normals=g.normals;this.binormals=g.binormals;var h=new p,k=new p,m=new z,q=new p,n,r,l=[],t=[],u=[],w=[];for(n=0;n<b;n++)f(n);f(!1===e?b:0);for(n=0;n<=b;n++)for(r=0;r<=d;r++)m.x=n/b,m.y=r/d,u.push(m.x,m.y);(function(){for(r=1;r<=b;r++)for(n=1;n<=d;n++){var a=
+(d+1)*r+(n-1),c=(d+1)*r+n,e=(d+1)*(r-1)+n;w.push((d+1)*(r-1)+(n-1),a,e);w.push(a,c,e)}})();this.setIndex(w);this.addAttribute("position",new C(l,3));this.addAttribute("normal",new C(t,3));this.addAttribute("uv",new C(u,2))}function Qc(a,b,c,d,e,f,g){I.call(this);this.type="TorusKnotGeometry";this.parameters={radius:a,tube:b,tubularSegments:c,radialSegments:d,p:e,q:f};void 0!==g&&console.warn("THREE.TorusKnotGeometry: heightScale has been deprecated. Use .scale( x, y, z ) instead.");this.fromBufferGeometry(new $b(a,
+b,c,d,e,f));this.mergeVertices()}function $b(a,b,c,d,e,f){function g(a,b,c,d,e){var f=Math.sin(a);b=c/b*a;c=Math.cos(b);e.x=d*(2+c)*.5*Math.cos(a);e.y=d*(2+c)*f*.5;e.z=d*Math.sin(b)*.5}E.call(this);this.type="TorusKnotBufferGeometry";this.parameters={radius:a,tube:b,tubularSegments:c,radialSegments:d,p:e,q:f};a=a||1;b=b||.4;c=Math.floor(c)||64;d=Math.floor(d)||8;e=e||2;f=f||3;var h=[],k=[],m=[],q=[],n,r=new p,l=new p,t=new p,u=new p,w=new p,A=new p,v=new p;for(n=0;n<=c;++n){var H=n/c*e*Math.PI*2;
+g(H,e,f,a,t);g(H+.01,e,f,a,u);A.subVectors(u,t);v.addVectors(u,t);w.crossVectors(A,v);v.crossVectors(w,A);w.normalize();v.normalize();for(H=0;H<=d;++H){var y=H/d*Math.PI*2,N=-b*Math.cos(y);y=b*Math.sin(y);r.x=t.x+(N*v.x+y*w.x);r.y=t.y+(N*v.y+y*w.y);r.z=t.z+(N*v.z+y*w.z);k.push(r.x,r.y,r.z);l.subVectors(r,t).normalize();m.push(l.x,l.y,l.z);q.push(n/c);q.push(H/d)}}for(H=1;H<=c;H++)for(n=1;n<=d;n++)a=(d+1)*H+(n-1),b=(d+1)*H+n,e=(d+1)*(H-1)+n,h.push((d+1)*(H-1)+(n-1),a,e),h.push(a,b,e);this.setIndex(h);
+this.addAttribute("position",new C(k,3));this.addAttribute("normal",new C(m,3));this.addAttribute("uv",new C(q,2))}function Rc(a,b,c,d,e){I.call(this);this.type="TorusGeometry";this.parameters={radius:a,tube:b,radialSegments:c,tubularSegments:d,arc:e};this.fromBufferGeometry(new ac(a,b,c,d,e));this.mergeVertices()}function ac(a,b,c,d,e){E.call(this);this.type="TorusBufferGeometry";this.parameters={radius:a,tube:b,radialSegments:c,tubularSegments:d,arc:e};a=a||1;b=b||.4;c=Math.floor(c)||8;d=Math.floor(d)||
+6;e=e||2*Math.PI;var f=[],g=[],h=[],k=[],m=new p,q=new p,n=new p,r,l;for(r=0;r<=c;r++)for(l=0;l<=d;l++){var t=l/d*e,u=r/c*Math.PI*2;q.x=(a+b*Math.cos(u))*Math.cos(t);q.y=(a+b*Math.cos(u))*Math.sin(t);q.z=b*Math.sin(u);g.push(q.x,q.y,q.z);m.x=a*Math.cos(t);m.y=a*Math.sin(t);n.subVectors(q,m).normalize();h.push(n.x,n.y,n.z);k.push(l/d);k.push(r/c)}for(r=1;r<=c;r++)for(l=1;l<=d;l++)a=(d+1)*(r-1)+l-1,b=(d+1)*(r-1)+l,e=(d+1)*r+l,f.push((d+1)*r+l-1,a,e),f.push(a,b,e);this.setIndex(f);this.addAttribute("position",
+new C(g,3));this.addAttribute("normal",new C(h,3));this.addAttribute("uv",new C(k,2))}function jf(a,b,c,d,e){for(var f,g=0,h=b,k=c-d;h<c;h+=d)g+=(a[k]-a[h])*(a[h+1]+a[k+1]),k=h;if(e===0<g)for(e=b;e<c;e+=d)f=kf(e,a[e],a[e+1],f);else for(e=c-d;e>=b;e-=d)f=kf(e,a[e],a[e+1],f);f&&ub(f,f.next)&&(Sc(f),f=f.next);return f}function Tc(a,b){if(!a)return a;b||(b=a);do{var c=!1;if(a.steiner||!ub(a,a.next)&&0!==na(a.prev,a,a.next))a=a.next;else{Sc(a);a=b=a.prev;if(a===a.next)break;c=!0}}while(c||a!==b);return b}
+function Uc(a,b,c,d,e,f,g){if(a){if(!g&&f){var h=a,k=h;do null===k.z&&(k.z=ee(k.x,k.y,d,e,f)),k.prevZ=k.prev,k=k.nextZ=k.next;while(k!==h);k.prevZ.nextZ=null;k.prevZ=null;h=k;var m,q,n,r,l=1;do{k=h;var t=h=null;for(q=0;k;){q++;var u=k;for(m=n=0;m<l&&(n++,u=u.nextZ,u);m++);for(r=l;0<n||0<r&&u;)0!==n&&(0===r||!u||k.z<=u.z)?(m=k,k=k.nextZ,n--):(m=u,u=u.nextZ,r--),t?t.nextZ=m:h=m,m.prevZ=t,t=m;k=u}t.nextZ=null;l*=2}while(1<q)}for(h=a;a.prev!==a.next;){k=a.prev;u=a.next;if(f)a:{t=a;r=d;var p=e,A=f;q=t.prev;
+n=t;l=t.next;if(0<=na(q,n,l))t=!1;else{var v=q.x>n.x?q.x>l.x?q.x:l.x:n.x>l.x?n.x:l.x,H=q.y>n.y?q.y>l.y?q.y:l.y:n.y>l.y?n.y:l.y;m=ee(q.x<n.x?q.x<l.x?q.x:l.x:n.x<l.x?n.x:l.x,q.y<n.y?q.y<l.y?q.y:l.y:n.y<l.y?n.y:l.y,r,p,A);r=ee(v,H,r,p,A);for(p=t.nextZ;p&&p.z<=r;){if(p!==t.prev&&p!==t.next&&zd(q.x,q.y,n.x,n.y,l.x,l.y,p.x,p.y)&&0<=na(p.prev,p,p.next)){t=!1;break a}p=p.nextZ}for(p=t.prevZ;p&&p.z>=m;){if(p!==t.prev&&p!==t.next&&zd(q.x,q.y,n.x,n.y,l.x,l.y,p.x,p.y)&&0<=na(p.prev,p,p.next)){t=!1;break a}p=
+p.prevZ}t=!0}}else a:if(t=a,q=t.prev,n=t,l=t.next,0<=na(q,n,l))t=!1;else{for(m=t.next.next;m!==t.prev;){if(zd(q.x,q.y,n.x,n.y,l.x,l.y,m.x,m.y)&&0<=na(m.prev,m,m.next)){t=!1;break a}m=m.next}t=!0}if(t)b.push(k.i/c),b.push(a.i/c),b.push(u.i/c),Sc(a),h=a=u.next;else if(a=u,a===h){if(!g)Uc(Tc(a),b,c,d,e,f,1);else if(1===g){g=b;h=c;k=a;do u=k.prev,t=k.next.next,!ub(u,t)&&lf(u,k,k.next,t)&&Vc(u,t)&&Vc(t,u)&&(g.push(u.i/h),g.push(k.i/h),g.push(t.i/h),Sc(k),Sc(k.next),k=a=t),k=k.next;while(k!==a);a=k;Uc(a,
+b,c,d,e,f,2)}else if(2===g)a:{g=a;do{for(h=g.next.next;h!==g.prev;){if(k=g.i!==h.i){k=g;u=h;if(t=k.next.i!==u.i&&k.prev.i!==u.i){b:{t=k;do{if(t.i!==k.i&&t.next.i!==k.i&&t.i!==u.i&&t.next.i!==u.i&&lf(t,t.next,k,u)){t=!0;break b}t=t.next}while(t!==k);t=!1}t=!t}if(t=t&&Vc(k,u)&&Vc(u,k)){t=k;q=!1;n=(k.x+u.x)/2;u=(k.y+u.y)/2;do t.y>u!==t.next.y>u&&t.next.y!==t.y&&n<(t.next.x-t.x)*(u-t.y)/(t.next.y-t.y)+t.x&&(q=!q),t=t.next;while(t!==k);t=q}k=t}if(k){a=mf(g,h);g=Tc(g,g.next);a=Tc(a,a.next);Uc(g,b,c,d,e,
+f);Uc(a,b,c,d,e,f);break a}h=h.next}g=g.next}while(g!==a)}break}}}}function Ug(a,b){return a.x-b.x}function Vg(a,b){var c=b,d=a.x,e=a.y,f=-Infinity;do{if(e<=c.y&&e>=c.next.y&&c.next.y!==c.y){var g=c.x+(e-c.y)*(c.next.x-c.x)/(c.next.y-c.y);if(g<=d&&g>f){f=g;if(g===d){if(e===c.y)return c;if(e===c.next.y)return c.next}var h=c.x<c.next.x?c:c.next}}c=c.next}while(c!==b);if(!h)return null;if(d===f)return h.prev;b=h;g=h.x;var k=h.y,m=Infinity;for(c=h.next;c!==b;){if(d>=c.x&&c.x>=g&&d!==c.x&&zd(e<k?d:f,e,
+g,k,e<k?f:d,e,c.x,c.y)){var q=Math.abs(e-c.y)/(d-c.x);(q<m||q===m&&c.x>h.x)&&Vc(c,a)&&(h=c,m=q)}c=c.next}return h}function ee(a,b,c,d,e){a=32767*(a-c)*e;b=32767*(b-d)*e;a=(a|a<<8)&16711935;a=(a|a<<4)&252645135;a=(a|a<<2)&858993459;b=(b|b<<8)&16711935;b=(b|b<<4)&252645135;b=(b|b<<2)&858993459;return(a|a<<1)&1431655765|((b|b<<1)&1431655765)<<1}function Wg(a){var b=a,c=a;do b.x<c.x&&(c=b),b=b.next;while(b!==a);return c}function zd(a,b,c,d,e,f,g,h){return 0<=(e-g)*(b-h)-(a-g)*(f-h)&&0<=(a-g)*(d-h)-(c-
+g)*(b-h)&&0<=(c-g)*(f-h)-(e-g)*(d-h)}function na(a,b,c){return(b.y-a.y)*(c.x-b.x)-(b.x-a.x)*(c.y-b.y)}function ub(a,b){return a.x===b.x&&a.y===b.y}function lf(a,b,c,d){return ub(a,b)&&ub(c,d)||ub(a,d)&&ub(c,b)?!0:0<na(a,b,c)!==0<na(a,b,d)&&0<na(c,d,a)!==0<na(c,d,b)}function Vc(a,b){return 0>na(a.prev,a,a.next)?0<=na(a,b,a.next)&&0<=na(a,a.prev,b):0>na(a,b,a.prev)||0>na(a,a.next,b)}function mf(a,b){var c=new fe(a.i,a.x,a.y),d=new fe(b.i,b.x,b.y),e=a.next,f=b.prev;a.next=b;b.prev=a;c.next=e;e.prev=
+c;d.next=c;c.prev=d;f.next=d;d.prev=f;return d}function kf(a,b,c,d){a=new fe(a,b,c);d?(a.next=d.next,a.prev=d,d.next.prev=a,d.next=a):(a.prev=a,a.next=a);return a}function Sc(a){a.next.prev=a.prev;a.prev.next=a.next;a.prevZ&&(a.prevZ.nextZ=a.nextZ);a.nextZ&&(a.nextZ.prevZ=a.prevZ)}function fe(a,b,c){this.i=a;this.x=b;this.y=c;this.nextZ=this.prevZ=this.z=this.next=this.prev=null;this.steiner=!1}function nf(a){var b=a.length;2<b&&a[b-1].equals(a[0])&&a.pop()}function of(a,b){for(var c=0;c<b.length;c++)a.push(b[c].x),
+a.push(b[c].y)}function vb(a,b){I.call(this);this.type="ExtrudeGeometry";this.parameters={shapes:a,options:b};this.fromBufferGeometry(new Sa(a,b));this.mergeVertices()}function Sa(a,b){function c(a){function c(a,b,c){b||console.error("THREE.ExtrudeGeometry: vec does not exist");return b.clone().multiplyScalar(c).add(a)}function g(a,b,c){var d=a.x-b.x;var e=a.y-b.y;var f=c.x-a.x;var g=c.y-a.y,h=d*d+e*e;if(Math.abs(d*g-e*f)>Number.EPSILON){var k=Math.sqrt(h),m=Math.sqrt(f*f+g*g);h=b.x-e/k;b=b.y+d/k;
+g=((c.x-g/m-h)*g-(c.y+f/m-b)*f)/(d*g-e*f);f=h+d*g-a.x;d=b+e*g-a.y;e=f*f+d*d;if(2>=e)return new z(f,d);e=Math.sqrt(e/2)}else a=!1,d>Number.EPSILON?f>Number.EPSILON&&(a=!0):d<-Number.EPSILON?f<-Number.EPSILON&&(a=!0):Math.sign(e)===Math.sign(g)&&(a=!0),a?(f=-e,e=Math.sqrt(h)):(f=d,d=e,e=Math.sqrt(h/2));return new z(f/e,d/e)}function h(a,b){for(M=a.length;0<=--M;){var c=M;var f=M-1;0>f&&(f=a.length-1);var g,h=v+2*B;for(g=0;g<h;g++){var k=W*g,m=W*(g+1),q=b+f+k,n=b+f+m;m=b+c+m;t(b+c+k);t(q);t(m);t(q);
+t(n);t(m);k=e.length/3;k=D.generateSideWallUV(d,e,k-6,k-3,k-2,k-1);u(k[0]);u(k[1]);u(k[3]);u(k[1]);u(k[2]);u(k[3])}}}function k(a,b,c){w.push(a);w.push(b);w.push(c)}function l(a,b,c){t(a);t(b);t(c);a=e.length/3;a=D.generateTopUV(d,e,a-3,a-2,a-1);u(a[0]);u(a[1]);u(a[2])}function t(a){e.push(w[3*a]);e.push(w[3*a+1]);e.push(w[3*a+2])}function u(a){f.push(a.x);f.push(a.y)}var w=[],A=void 0!==b.curveSegments?b.curveSegments:12,v=void 0!==b.steps?b.steps:1,H=void 0!==b.depth?b.depth:100,y=void 0!==b.bevelEnabled?
+b.bevelEnabled:!0,N=void 0!==b.bevelThickness?b.bevelThickness:6,X=void 0!==b.bevelSize?b.bevelSize:N-2,B=void 0!==b.bevelSegments?b.bevelSegments:3,C=b.extrudePath,D=void 0!==b.UVGenerator?b.UVGenerator:Xg;void 0!==b.amount&&(console.warn("THREE.ExtrudeBufferGeometry: amount has been renamed to depth."),H=b.amount);var E=!1;if(C){var Y=C.getSpacedPoints(v);E=!0;y=!1;var G=C.computeFrenetFrames(v,!1);var J=new p;var Q=new p;var I=new p}y||(X=N=B=0);var P;A=a.extractPoints(A);a=A.shape;var L=A.holes;
+if(!Za.isClockWise(a)){a=a.reverse();var F=0;for(P=L.length;F<P;F++){var K=L[F];Za.isClockWise(K)&&(L[F]=K.reverse())}}var R=Za.triangulateShape(a,L),T=a;F=0;for(P=L.length;F<P;F++)K=L[F],a=a.concat(K);var S,W=a.length,V,Z=R.length;A=[];var M=0;var U=T.length;var fa=U-1;for(S=M+1;M<U;M++,fa++,S++)fa===U&&(fa=0),S===U&&(S=0),A[M]=g(T[M],T[fa],T[S]);C=[];var ea=A.concat();F=0;for(P=L.length;F<P;F++){K=L[F];var ca=[];M=0;U=K.length;fa=U-1;for(S=M+1;M<U;M++,fa++,S++)fa===U&&(fa=0),S===U&&(S=0),ca[M]=
+g(K[M],K[fa],K[S]);C.push(ca);ea=ea.concat(ca)}for(fa=0;fa<B;fa++){U=fa/B;var da=N*Math.cos(U*Math.PI/2);S=X*Math.sin(U*Math.PI/2);M=0;for(U=T.length;M<U;M++){var aa=c(T[M],A[M],S);k(aa.x,aa.y,-da)}F=0;for(P=L.length;F<P;F++)for(K=L[F],ca=C[F],M=0,U=K.length;M<U;M++)aa=c(K[M],ca[M],S),k(aa.x,aa.y,-da)}S=X;for(M=0;M<W;M++)aa=y?c(a[M],ea[M],S):a[M],E?(Q.copy(G.normals[0]).multiplyScalar(aa.x),J.copy(G.binormals[0]).multiplyScalar(aa.y),I.copy(Y[0]).add(Q).add(J),k(I.x,I.y,I.z)):k(aa.x,aa.y,0);for(U=
+1;U<=v;U++)for(M=0;M<W;M++)aa=y?c(a[M],ea[M],S):a[M],E?(Q.copy(G.normals[U]).multiplyScalar(aa.x),J.copy(G.binormals[U]).multiplyScalar(aa.y),I.copy(Y[U]).add(Q).add(J),k(I.x,I.y,I.z)):k(aa.x,aa.y,H/v*U);for(fa=B-1;0<=fa;fa--){U=fa/B;da=N*Math.cos(U*Math.PI/2);S=X*Math.sin(U*Math.PI/2);M=0;for(U=T.length;M<U;M++)aa=c(T[M],A[M],S),k(aa.x,aa.y,H+da);F=0;for(P=L.length;F<P;F++)for(K=L[F],ca=C[F],M=0,U=K.length;M<U;M++)aa=c(K[M],ca[M],S),E?k(aa.x,aa.y+Y[v-1].y,Y[v-1].x+da):k(aa.x,aa.y,H+da)}(function(){var a=
+e.length/3;if(y){var b=0*W;for(M=0;M<Z;M++)V=R[M],l(V[2]+b,V[1]+b,V[0]+b);b=W*(v+2*B);for(M=0;M<Z;M++)V=R[M],l(V[0]+b,V[1]+b,V[2]+b)}else{for(M=0;M<Z;M++)V=R[M],l(V[2],V[1],V[0]);for(M=0;M<Z;M++)V=R[M],l(V[0]+W*v,V[1]+W*v,V[2]+W*v)}d.addGroup(a,e.length/3-a,0)})();(function(){var a=e.length/3,b=0;h(T,b);b+=T.length;F=0;for(P=L.length;F<P;F++)K=L[F],h(K,b),b+=K.length;d.addGroup(a,e.length/3-a,1)})()}E.call(this);this.type="ExtrudeBufferGeometry";this.parameters={shapes:a,options:b};a=Array.isArray(a)?
+a:[a];for(var d=this,e=[],f=[],g=0,h=a.length;g<h;g++)c(a[g]);this.addAttribute("position",new C(e,3));this.addAttribute("uv",new C(f,2));this.computeVertexNormals()}function pf(a,b,c){c.shapes=[];if(Array.isArray(a))for(var d=0,e=a.length;d<e;d++)c.shapes.push(a[d].uuid);else c.shapes.push(a.uuid);void 0!==b.extrudePath&&(c.options.extrudePath=b.extrudePath.toJSON());return c}function Wc(a,b){I.call(this);this.type="TextGeometry";this.parameters={text:a,parameters:b};this.fromBufferGeometry(new bc(a,
+b));this.mergeVertices()}function bc(a,b){b=b||{};var c=b.font;if(!c||!c.isFont)return console.error("THREE.TextGeometry: font parameter is not an instance of THREE.Font."),new I;a=c.generateShapes(a,b.size);b.depth=void 0!==b.height?b.height:50;void 0===b.bevelThickness&&(b.bevelThickness=10);void 0===b.bevelSize&&(b.bevelSize=8);void 0===b.bevelEnabled&&(b.bevelEnabled=!1);Sa.call(this,a,b);this.type="TextBufferGeometry"}function Xc(a,b,c,d,e,f,g){I.call(this);this.type="SphereGeometry";this.parameters=
+{radius:a,widthSegments:b,heightSegments:c,phiStart:d,phiLength:e,thetaStart:f,thetaLength:g};this.fromBufferGeometry(new wb(a,b,c,d,e,f,g));this.mergeVertices()}function wb(a,b,c,d,e,f,g){E.call(this);this.type="SphereBufferGeometry";this.parameters={radius:a,widthSegments:b,heightSegments:c,phiStart:d,phiLength:e,thetaStart:f,thetaLength:g};a=a||1;b=Math.max(3,Math.floor(b)||8);c=Math.max(2,Math.floor(c)||6);d=void 0!==d?d:0;e=void 0!==e?e:2*Math.PI;f=void 0!==f?f:0;g=void 0!==g?g:Math.PI;var h=
+f+g,k,m,q=0,n=[],l=new p,x=new p,t=[],u=[],w=[],A=[];for(m=0;m<=c;m++){var v=[],H=m/c;for(k=0;k<=b;k++){var y=k/b;l.x=-a*Math.cos(d+y*e)*Math.sin(f+H*g);l.y=a*Math.cos(f+H*g);l.z=a*Math.sin(d+y*e)*Math.sin(f+H*g);u.push(l.x,l.y,l.z);x.set(l.x,l.y,l.z).normalize();w.push(x.x,x.y,x.z);A.push(y,1-H);v.push(q++)}n.push(v)}for(m=0;m<c;m++)for(k=0;k<b;k++)a=n[m][k+1],d=n[m][k],e=n[m+1][k],g=n[m+1][k+1],(0!==m||0<f)&&t.push(a,d,g),(m!==c-1||h<Math.PI)&&t.push(d,e,g);this.setIndex(t);this.addAttribute("position",
+new C(u,3));this.addAttribute("normal",new C(w,3));this.addAttribute("uv",new C(A,2))}function Yc(a,b,c,d,e,f){I.call(this);this.type="RingGeometry";this.parameters={innerRadius:a,outerRadius:b,thetaSegments:c,phiSegments:d,thetaStart:e,thetaLength:f};this.fromBufferGeometry(new cc(a,b,c,d,e,f));this.mergeVertices()}function cc(a,b,c,d,e,f){E.call(this);this.type="RingBufferGeometry";this.parameters={innerRadius:a,outerRadius:b,thetaSegments:c,phiSegments:d,thetaStart:e,thetaLength:f};a=a||.5;b=b||
+1;e=void 0!==e?e:0;f=void 0!==f?f:2*Math.PI;c=void 0!==c?Math.max(3,c):8;d=void 0!==d?Math.max(1,d):1;var g=[],h=[],k=[],m=[],q=a,n=(b-a)/d,l=new p,x=new z,t,u;for(t=0;t<=d;t++){for(u=0;u<=c;u++)a=e+u/c*f,l.x=q*Math.cos(a),l.y=q*Math.sin(a),h.push(l.x,l.y,l.z),k.push(0,0,1),x.x=(l.x/b+1)/2,x.y=(l.y/b+1)/2,m.push(x.x,x.y);q+=n}for(t=0;t<d;t++)for(b=t*(c+1),u=0;u<c;u++)a=u+b,e=a+c+1,f=a+c+2,q=a+1,g.push(a,e,q),g.push(e,f,q);this.setIndex(g);this.addAttribute("position",new C(h,3));this.addAttribute("normal",
+new C(k,3));this.addAttribute("uv",new C(m,2))}function Zc(a,b,c,d){I.call(this);this.type="LatheGeometry";this.parameters={points:a,segments:b,phiStart:c,phiLength:d};this.fromBufferGeometry(new dc(a,b,c,d));this.mergeVertices()}function dc(a,b,c,d){E.call(this);this.type="LatheBufferGeometry";this.parameters={points:a,segments:b,phiStart:c,phiLength:d};b=Math.floor(b)||12;c=c||0;d=d||2*Math.PI;d=R.clamp(d,0,2*Math.PI);var e=[],f=[],g=[],h=1/b,k=new p,m=new z,q;for(q=0;q<=b;q++){var n=c+q*h*d;var l=
+Math.sin(n),x=Math.cos(n);for(n=0;n<=a.length-1;n++)k.x=a[n].x*l,k.y=a[n].y,k.z=a[n].x*x,f.push(k.x,k.y,k.z),m.x=q/b,m.y=n/(a.length-1),g.push(m.x,m.y)}for(q=0;q<b;q++)for(n=0;n<a.length-1;n++)c=n+q*a.length,h=c+a.length,k=c+a.length+1,m=c+1,e.push(c,h,m),e.push(h,k,m);this.setIndex(e);this.addAttribute("position",new C(f,3));this.addAttribute("uv",new C(g,2));this.computeVertexNormals();if(d===2*Math.PI)for(d=this.attributes.normal.array,e=new p,f=new p,g=new p,c=b*a.length*3,n=q=0;q<a.length;q++,
+n+=3)e.x=d[n+0],e.y=d[n+1],e.z=d[n+2],f.x=d[c+n+0],f.y=d[c+n+1],f.z=d[c+n+2],g.addVectors(e,f).normalize(),d[n+0]=d[c+n+0]=g.x,d[n+1]=d[c+n+1]=g.y,d[n+2]=d[c+n+2]=g.z}function xb(a,b){I.call(this);this.type="ShapeGeometry";"object"===typeof b&&(console.warn("THREE.ShapeGeometry: Options parameter has been removed."),b=b.curveSegments);this.parameters={shapes:a,curveSegments:b};this.fromBufferGeometry(new yb(a,b));this.mergeVertices()}function yb(a,b){function c(a){var c,h=e.length/3;a=a.extractPoints(b);
+var m=a.shape,q=a.holes;if(!1===Za.isClockWise(m))for(m=m.reverse(),a=0,c=q.length;a<c;a++){var l=q[a];!0===Za.isClockWise(l)&&(q[a]=l.reverse())}var p=Za.triangulateShape(m,q);a=0;for(c=q.length;a<c;a++)l=q[a],m=m.concat(l);a=0;for(c=m.length;a<c;a++)l=m[a],e.push(l.x,l.y,0),f.push(0,0,1),g.push(l.x,l.y);a=0;for(c=p.length;a<c;a++)m=p[a],d.push(m[0]+h,m[1]+h,m[2]+h),k+=3}E.call(this);this.type="ShapeBufferGeometry";this.parameters={shapes:a,curveSegments:b};b=b||12;var d=[],e=[],f=[],g=[],h=0,k=
+0;if(!1===Array.isArray(a))c(a);else for(var m=0;m<a.length;m++)c(a[m]),this.addGroup(h,k,m),h+=k,k=0;this.setIndex(d);this.addAttribute("position",new C(e,3));this.addAttribute("normal",new C(f,3));this.addAttribute("uv",new C(g,2))}function qf(a,b){b.shapes=[];if(Array.isArray(a))for(var c=0,d=a.length;c<d;c++)b.shapes.push(a[c].uuid);else b.shapes.push(a.uuid);return b}function ec(a,b){E.call(this);this.type="EdgesGeometry";this.parameters={thresholdAngle:b};var c=[];b=Math.cos(R.DEG2RAD*(void 0!==
+b?b:1));var d=[0,0],e={},f=["a","b","c"];if(a.isBufferGeometry){var g=new I;g.fromBufferGeometry(a)}else g=a.clone();g.mergeVertices();g.computeFaceNormals();a=g.vertices;g=g.faces;for(var h=0,k=g.length;h<k;h++)for(var m=g[h],q=0;3>q;q++){var n=m[f[q]];var l=m[f[(q+1)%3]];d[0]=Math.min(n,l);d[1]=Math.max(n,l);n=d[0]+","+d[1];void 0===e[n]?e[n]={index1:d[0],index2:d[1],face1:h,face2:void 0}:e[n].face2=h}for(n in e)if(d=e[n],void 0===d.face2||g[d.face1].normal.dot(g[d.face2].normal)<=b)f=a[d.index1],
+c.push(f.x,f.y,f.z),f=a[d.index2],c.push(f.x,f.y,f.z);this.addAttribute("position",new C(c,3))}function zb(a,b,c,d,e,f,g,h){I.call(this);this.type="CylinderGeometry";this.parameters={radiusTop:a,radiusBottom:b,height:c,radialSegments:d,heightSegments:e,openEnded:f,thetaStart:g,thetaLength:h};this.fromBufferGeometry(new $a(a,b,c,d,e,f,g,h));this.mergeVertices()}function $a(a,b,c,d,e,f,g,h){function k(c){var e,f=new z,k=new p,r=0,u=!0===c?a:b,v=!0===c?1:-1;var C=t;for(e=1;e<=d;e++)n.push(0,w*v,0),l.push(0,
+v,0),x.push(.5,.5),t++;var D=t;for(e=0;e<=d;e++){var E=e/d*h+g,F=Math.cos(E);E=Math.sin(E);k.x=u*E;k.y=w*v;k.z=u*F;n.push(k.x,k.y,k.z);l.push(0,v,0);f.x=.5*F+.5;f.y=.5*E*v+.5;x.push(f.x,f.y);t++}for(e=0;e<d;e++)f=C+e,k=D+e,!0===c?q.push(k,k+1,f):q.push(k+1,k,f),r+=3;m.addGroup(A,r,!0===c?1:2);A+=r}E.call(this);this.type="CylinderBufferGeometry";this.parameters={radiusTop:a,radiusBottom:b,height:c,radialSegments:d,heightSegments:e,openEnded:f,thetaStart:g,thetaLength:h};var m=this;a=void 0!==a?a:1;
+b=void 0!==b?b:1;c=c||1;d=Math.floor(d)||8;e=Math.floor(e)||1;f=void 0!==f?f:!1;g=void 0!==g?g:0;h=void 0!==h?h:2*Math.PI;var q=[],n=[],l=[],x=[],t=0,u=[],w=c/2,A=0;(function(){var f,k,r=new p,N=new p,z=0,B=(b-a)/c;for(k=0;k<=e;k++){var C=[],E=k/e,D=E*(b-a)+a;for(f=0;f<=d;f++){var F=f/d,G=F*h+g,J=Math.sin(G);G=Math.cos(G);N.x=D*J;N.y=-E*c+w;N.z=D*G;n.push(N.x,N.y,N.z);r.set(J,B,G).normalize();l.push(r.x,r.y,r.z);x.push(F,1-E);C.push(t++)}u.push(C)}for(f=0;f<d;f++)for(k=0;k<e;k++)r=u[k+1][f],N=u[k+
+1][f+1],B=u[k][f+1],q.push(u[k][f],r,B),q.push(r,N,B),z+=6;m.addGroup(A,z,0);A+=z})();!1===f&&(0<a&&k(!0),0<b&&k(!1));this.setIndex(q);this.addAttribute("position",new C(n,3));this.addAttribute("normal",new C(l,3));this.addAttribute("uv",new C(x,2))}function $c(a,b,c,d,e,f,g){zb.call(this,0,a,b,c,d,e,f,g);this.type="ConeGeometry";this.parameters={radius:a,height:b,radialSegments:c,heightSegments:d,openEnded:e,thetaStart:f,thetaLength:g}}function ad(a,b,c,d,e,f,g){$a.call(this,0,a,b,c,d,e,f,g);this.type=
+"ConeBufferGeometry";this.parameters={radius:a,height:b,radialSegments:c,heightSegments:d,openEnded:e,thetaStart:f,thetaLength:g}}function bd(a,b,c,d){I.call(this);this.type="CircleGeometry";this.parameters={radius:a,segments:b,thetaStart:c,thetaLength:d};this.fromBufferGeometry(new fc(a,b,c,d));this.mergeVertices()}function fc(a,b,c,d){E.call(this);this.type="CircleBufferGeometry";this.parameters={radius:a,segments:b,thetaStart:c,thetaLength:d};a=a||1;b=void 0!==b?Math.max(3,b):8;c=void 0!==c?c:
+0;d=void 0!==d?d:2*Math.PI;var e=[],f=[],g=[],h=[],k,m=new p,q=new z;f.push(0,0,0);g.push(0,0,1);h.push(.5,.5);var n=0;for(k=3;n<=b;n++,k+=3){var l=c+n/b*d;m.x=a*Math.cos(l);m.y=a*Math.sin(l);f.push(m.x,m.y,m.z);g.push(0,0,1);q.x=(f[k]/a+1)/2;q.y=(f[k+1]/a+1)/2;h.push(q.x,q.y)}for(k=1;k<=b;k++)e.push(k,k+1,0);this.setIndex(e);this.addAttribute("position",new C(f,3));this.addAttribute("normal",new C(g,3));this.addAttribute("uv",new C(h,2))}function Ab(a){L.call(this);this.type="ShadowMaterial";this.color=
+new G(0);this.transparent=!0;this.setValues(a)}function gc(a){ka.call(this,a);this.type="RawShaderMaterial"}function Ta(a){L.call(this);this.defines={STANDARD:""};this.type="MeshStandardMaterial";this.color=new G(16777215);this.metalness=this.roughness=.5;this.lightMap=this.map=null;this.lightMapIntensity=1;this.aoMap=null;this.aoMapIntensity=1;this.emissive=new G(0);this.emissiveIntensity=1;this.bumpMap=this.emissiveMap=null;this.bumpScale=1;this.normalMap=null;this.normalMapType=0;this.normalScale=
+new z(1,1);this.displacementMap=null;this.displacementScale=1;this.displacementBias=0;this.envMap=this.alphaMap=this.metalnessMap=this.roughnessMap=null;this.envMapIntensity=1;this.refractionRatio=.98;this.wireframe=!1;this.wireframeLinewidth=1;this.wireframeLinejoin=this.wireframeLinecap="round";this.morphNormals=this.morphTargets=this.skinning=!1;this.setValues(a)}function Bb(a){Ta.call(this);this.defines={PHYSICAL:""};this.type="MeshPhysicalMaterial";this.reflectivity=.5;this.clearCoatRoughness=
+this.clearCoat=0;this.setValues(a)}function Ia(a){L.call(this);this.type="MeshPhongMaterial";this.color=new G(16777215);this.specular=new G(1118481);this.shininess=30;this.lightMap=this.map=null;this.lightMapIntensity=1;this.aoMap=null;this.aoMapIntensity=1;this.emissive=new G(0);this.emissiveIntensity=1;this.bumpMap=this.emissiveMap=null;this.bumpScale=1;this.normalMap=null;this.normalMapType=0;this.normalScale=new z(1,1);this.displacementMap=null;this.displacementScale=1;this.displacementBias=0;
+this.envMap=this.alphaMap=this.specularMap=null;this.combine=0;this.reflectivity=1;this.refractionRatio=.98;this.wireframe=!1;this.wireframeLinewidth=1;this.wireframeLinejoin=this.wireframeLinecap="round";this.morphNormals=this.morphTargets=this.skinning=!1;this.setValues(a)}function Cb(a){Ia.call(this);this.defines={TOON:""};this.type="MeshToonMaterial";this.gradientMap=null;this.setValues(a)}function Db(a){L.call(this);this.type="MeshNormalMaterial";this.bumpMap=null;this.bumpScale=1;this.normalMap=
+null;this.normalMapType=0;this.normalScale=new z(1,1);this.displacementMap=null;this.displacementScale=1;this.displacementBias=0;this.wireframe=!1;this.wireframeLinewidth=1;this.morphNormals=this.morphTargets=this.skinning=this.lights=this.fog=!1;this.setValues(a)}function Eb(a){L.call(this);this.type="MeshLambertMaterial";this.color=new G(16777215);this.lightMap=this.map=null;this.lightMapIntensity=1;this.aoMap=null;this.aoMapIntensity=1;this.emissive=new G(0);this.emissiveIntensity=1;this.envMap=
+this.alphaMap=this.specularMap=this.emissiveMap=null;this.combine=0;this.reflectivity=1;this.refractionRatio=.98;this.wireframe=!1;this.wireframeLinewidth=1;this.wireframeLinejoin=this.wireframeLinecap="round";this.morphNormals=this.morphTargets=this.skinning=!1;this.setValues(a)}function Fb(a){L.call(this);this.defines={MATCAP:""};this.type="MeshMatcapMaterial";this.color=new G(16777215);this.bumpMap=this.map=this.matcap=null;this.bumpScale=1;this.normalMap=null;this.normalMapType=0;this.normalScale=
+new z(1,1);this.displacementMap=null;this.displacementScale=1;this.displacementBias=0;this.alphaMap=null;this.lights=this.morphNormals=this.morphTargets=this.skinning=!1;this.setValues(a);if(null===this.matcap){a=document.createElement("canvas");a.width=1;a.height=1;var b=a.getContext("2d");b.fillStyle="#fff";b.fillRect(0,0,1,1);this.matcap=new THREE.CanvasTexture(a)}}function Gb(a){T.call(this);this.type="LineDashedMaterial";this.scale=1;this.dashSize=3;this.gapSize=1;this.setValues(a)}function Ca(a,
+b,c,d){this.parameterPositions=a;this._cachedIndex=0;this.resultBuffer=void 0!==d?d:new b.constructor(c);this.sampleValues=b;this.valueSize=c}function Ad(a,b,c,d){Ca.call(this,a,b,c,d);this._offsetNext=this._weightNext=this._offsetPrev=this._weightPrev=-0}function cd(a,b,c,d){Ca.call(this,a,b,c,d)}function Bd(a,b,c,d){Ca.call(this,a,b,c,d)}function qa(a,b,c,d){if(void 0===a)throw Error("THREE.KeyframeTrack: track name is undefined");if(void 0===b||0===b.length)throw Error("THREE.KeyframeTrack: no keyframes in track named "+
+a);this.name=a;this.times=ra.convertArray(b,this.TimeBufferType);this.values=ra.convertArray(c,this.ValueBufferType);this.setInterpolation(d||this.DefaultInterpolation)}function Cd(a,b,c){qa.call(this,a,b,c)}function Dd(a,b,c,d){qa.call(this,a,b,c,d)}function hc(a,b,c,d){qa.call(this,a,b,c,d)}function Ed(a,b,c,d){Ca.call(this,a,b,c,d)}function dd(a,b,c,d){qa.call(this,a,b,c,d)}function Fd(a,b,c,d){qa.call(this,a,b,c,d)}function ic(a,b,c,d){qa.call(this,a,b,c,d)}function za(a,b,c){this.name=a;this.tracks=
+c;this.duration=void 0!==b?b:-1;this.uuid=R.generateUUID();0>this.duration&&this.resetDuration()}function Yg(a){switch(a.toLowerCase()){case "scalar":case "double":case "float":case "number":case "integer":return hc;case "vector":case "vector2":case "vector3":case "vector4":return ic;case "color":return Dd;case "quaternion":return dd;case "bool":case "boolean":return Cd;case "string":return Fd}throw Error("THREE.KeyframeTrack: Unsupported typeName: "+a);}function Zg(a){if(void 0===a.type)throw Error("THREE.KeyframeTrack: track type undefined, can not parse");
+var b=Yg(a.type);if(void 0===a.times){var c=[],d=[];ra.flattenJSON(a.keys,c,d,"value");a.times=c;a.values=d}return void 0!==b.parse?b.parse(a):new b(a.name,a.times,a.values,a.interpolation)}function ge(a,b,c){var d=this,e=!1,f=0,g=0,h=void 0;this.onStart=void 0;this.onLoad=a;this.onProgress=b;this.onError=c;this.itemStart=function(a){g++;if(!1===e&&void 0!==d.onStart)d.onStart(a,f,g);e=!0};this.itemEnd=function(a){f++;if(void 0!==d.onProgress)d.onProgress(a,f,g);if(f===g&&(e=!1,void 0!==d.onLoad))d.onLoad()};
+this.itemError=function(a){if(void 0!==d.onError)d.onError(a)};this.resolveURL=function(a){return h?h(a):a};this.setURLModifier=function(a){h=a;return this}}function Fa(a){this.manager=void 0!==a?a:ta}function rf(a){this.manager=void 0!==a?a:ta}function sf(a){this.manager=void 0!==a?a:ta;this._parser=null}function he(a){this.manager=void 0!==a?a:ta;this._parser=null}function ed(a){this.manager=void 0!==a?a:ta}function ie(a){this.manager=void 0!==a?a:ta}function Gd(a){this.manager=void 0!==a?a:ta}
+function Q(){this.type="Curve";this.arcLengthDivisions=200}function wa(a,b,c,d,e,f,g,h){Q.call(this);this.type="EllipseCurve";this.aX=a||0;this.aY=b||0;this.xRadius=c||1;this.yRadius=d||1;this.aStartAngle=e||0;this.aEndAngle=f||2*Math.PI;this.aClockwise=g||!1;this.aRotation=h||0}function jc(a,b,c,d,e,f){wa.call(this,a,b,c,c,d,e,f);this.type="ArcCurve"}function je(){var a=0,b=0,c=0,d=0;return{initCatmullRom:function(e,f,g,h,k){e=k*(g-e);h=k*(h-f);a=f;b=e;c=-3*f+3*g-2*e-h;d=2*f-2*g+e+h},initNonuniformCatmullRom:function(e,
+f,g,h,k,m,q){e=((f-e)/k-(g-e)/(k+m)+(g-f)/m)*m;h=((g-f)/m-(h-f)/(m+q)+(h-g)/q)*m;a=f;b=e;c=-3*f+3*g-2*e-h;d=2*f-2*g+e+h},calc:function(e){var f=e*e;return a+b*e+c*f+d*f*e}}}function ua(a,b,c,d){Q.call(this);this.type="CatmullRomCurve3";this.points=a||[];this.closed=b||!1;this.curveType=c||"centripetal";this.tension=d||.5}function tf(a,b,c,d,e){b=.5*(d-b);e=.5*(e-c);var f=a*a;return(2*c-2*d+b+e)*a*f+(-3*c+3*d-2*b-e)*f+b*a+c}function fd(a,b,c,d){var e=1-a;return e*e*b+2*(1-a)*a*c+a*a*d}function gd(a,
+b,c,d,e){var f=1-a,g=1-a;return f*f*f*b+3*g*g*a*c+3*(1-a)*a*a*d+a*a*a*e}function Ja(a,b,c,d){Q.call(this);this.type="CubicBezierCurve";this.v0=a||new z;this.v1=b||new z;this.v2=c||new z;this.v3=d||new z}function Ua(a,b,c,d){Q.call(this);this.type="CubicBezierCurve3";this.v0=a||new p;this.v1=b||new p;this.v2=c||new p;this.v3=d||new p}function Aa(a,b){Q.call(this);this.type="LineCurve";this.v1=a||new z;this.v2=b||new z}function Ka(a,b){Q.call(this);this.type="LineCurve3";this.v1=a||new p;this.v2=b||
+new p}function La(a,b,c){Q.call(this);this.type="QuadraticBezierCurve";this.v0=a||new z;this.v1=b||new z;this.v2=c||new z}function Va(a,b,c){Q.call(this);this.type="QuadraticBezierCurve3";this.v0=a||new p;this.v1=b||new p;this.v2=c||new p}function Ma(a){Q.call(this);this.type="SplineCurve";this.points=a||[]}function ab(){Q.call(this);this.type="CurvePath";this.curves=[];this.autoClose=!1}function Na(a){ab.call(this);this.type="Path";this.currentPoint=new z;a&&this.setFromPoints(a)}function ib(a){Na.call(this,
+a);this.uuid=R.generateUUID();this.type="Shape";this.holes=[]}function ca(a,b){D.call(this);this.type="Light";this.color=new G(a);this.intensity=void 0!==b?b:1;this.receiveShadow=void 0}function Hd(a,b,c){ca.call(this,a,c);this.type="HemisphereLight";this.castShadow=void 0;this.position.copy(D.DefaultUp);this.updateMatrix();this.groundColor=new G(b)}function Hb(a){this.camera=a;this.bias=0;this.radius=1;this.mapSize=new z(512,512);this.map=null;this.matrix=new P}function Id(){Hb.call(this,new V(50,
+1,.5,500))}function Jd(a,b,c,d,e,f){ca.call(this,a,b);this.type="SpotLight";this.position.copy(D.DefaultUp);this.updateMatrix();this.target=new D;Object.defineProperty(this,"power",{get:function(){return this.intensity*Math.PI},set:function(a){this.intensity=a/Math.PI}});this.distance=void 0!==c?c:0;this.angle=void 0!==d?d:Math.PI/3;this.penumbra=void 0!==e?e:0;this.decay=void 0!==f?f:1;this.shadow=new Id}function Kd(a,b,c,d){ca.call(this,a,b);this.type="PointLight";Object.defineProperty(this,"power",
+{get:function(){return 4*this.intensity*Math.PI},set:function(a){this.intensity=a/(4*Math.PI)}});this.distance=void 0!==c?c:0;this.decay=void 0!==d?d:1;this.shadow=new Hb(new V(90,1,.5,500))}function hd(a,b,c,d,e,f){Ra.call(this);this.type="OrthographicCamera";this.zoom=1;this.view=null;this.left=void 0!==a?a:-1;this.right=void 0!==b?b:1;this.top=void 0!==c?c:1;this.bottom=void 0!==d?d:-1;this.near=void 0!==e?e:.1;this.far=void 0!==f?f:2E3;this.updateProjectionMatrix()}function Ld(){Hb.call(this,
+new hd(-5,5,5,-5,.5,500))}function Md(a,b){ca.call(this,a,b);this.type="DirectionalLight";this.position.copy(D.DefaultUp);this.updateMatrix();this.target=new D;this.shadow=new Ld}function Nd(a,b){ca.call(this,a,b);this.type="AmbientLight";this.castShadow=void 0}function Od(a,b,c,d){ca.call(this,a,b);this.type="RectAreaLight";this.width=void 0!==c?c:10;this.height=void 0!==d?d:10}function Pd(a){this.manager=void 0!==a?a:ta;this.textures={}}function ke(a){this.manager=void 0!==a?a:ta}function kc(){}
+function Qd(a){"boolean"===typeof a&&(console.warn("THREE.JSONLoader: showStatus parameter has been removed from constructor."),a=void 0);this.manager=void 0!==a?a:ta;this.withCredentials=!1}function le(a){this.manager=void 0!==a?a:ta;this.resourcePath=""}function me(a){"undefined"===typeof createImageBitmap&&console.warn("THREE.ImageBitmapLoader: createImageBitmap() not supported.");"undefined"===typeof fetch&&console.warn("THREE.ImageBitmapLoader: fetch() not supported.");this.manager=void 0!==
+a?a:ta;this.options=void 0}function ne(){this.type="ShapePath";this.color=new G;this.subPaths=[];this.currentPath=null}function oe(a){this.type="Font";this.data=a}function uf(a){this.manager=void 0!==a?a:ta}function pe(a){this.manager=void 0!==a?a:ta}function vf(){this.type="StereoCamera";this.aspect=1;this.eyeSep=.064;this.cameraL=new V;this.cameraL.layers.enable(1);this.cameraL.matrixAutoUpdate=!1;this.cameraR=new V;this.cameraR.layers.enable(2);this.cameraR.matrixAutoUpdate=!1}function id(a,b,
+c,d){D.call(this);this.type="CubeCamera";var e=new V(90,1,a,b);e.up.set(0,-1,0);e.lookAt(new p(1,0,0));this.add(e);var f=new V(90,1,a,b);f.up.set(0,-1,0);f.lookAt(new p(-1,0,0));this.add(f);var g=new V(90,1,a,b);g.up.set(0,0,1);g.lookAt(new p(0,1,0));this.add(g);var h=new V(90,1,a,b);h.up.set(0,0,-1);h.lookAt(new p(0,-1,0));this.add(h);var k=new V(90,1,a,b);k.up.set(0,-1,0);k.lookAt(new p(0,0,1));this.add(k);var m=new V(90,1,a,b);m.up.set(0,-1,0);m.lookAt(new p(0,0,-1));this.add(m);d=d||{format:1022,
+magFilter:1006,minFilter:1006};this.renderTarget=new Jb(c,c,d);this.renderTarget.texture.name="CubeCamera";this.update=function(a,b){null===this.parent&&this.updateMatrixWorld();var c=this.renderTarget,d=c.texture.generateMipmaps;c.texture.generateMipmaps=!1;c.activeCubeFace=0;a.render(b,e,c);c.activeCubeFace=1;a.render(b,f,c);c.activeCubeFace=2;a.render(b,g,c);c.activeCubeFace=3;a.render(b,h,c);c.activeCubeFace=4;a.render(b,k,c);c.texture.generateMipmaps=d;c.activeCubeFace=5;a.render(b,m,c);a.setRenderTarget(null)};
+this.clear=function(a,b,c,d){for(var e=this.renderTarget,f=0;6>f;f++)e.activeCubeFace=f,a.setRenderTarget(e),a.clear(b,c,d);a.setRenderTarget(null)}}function qe(a){this.autoStart=void 0!==a?a:!0;this.elapsedTime=this.oldTime=this.startTime=0;this.running=!1}function re(){D.call(this);this.type="AudioListener";this.context=se.getContext();this.gain=this.context.createGain();this.gain.connect(this.context.destination);this.filter=null;this.timeDelta=0}function lc(a){D.call(this);this.type="Audio";this.listener=
+a;this.context=a.context;this.gain=this.context.createGain();this.gain.connect(a.getInput());this.autoplay=!1;this.buffer=null;this.loop=!1;this.offset=this.startTime=0;this.playbackRate=1;this.isPlaying=!1;this.hasPlaybackControl=!0;this.sourceType="empty";this.filters=[]}function te(a){lc.call(this,a);this.panner=this.context.createPanner();this.panner.connect(this.gain)}function ue(a,b){this.analyser=a.context.createAnalyser();this.analyser.fftSize=void 0!==b?b:2048;this.data=new Uint8Array(this.analyser.frequencyBinCount);
+a.getOutput().connect(this.analyser)}function ve(a,b,c){this.binding=a;this.valueSize=c;a=Float64Array;switch(b){case "quaternion":b=this._slerp;break;case "string":case "bool":a=Array;b=this._select;break;default:b=this._lerp}this.buffer=new a(4*c);this._mixBufferRegion=b;this.referenceCount=this.useCount=this.cumulativeWeight=0}function wf(a,b,c){c=c||oa.parseTrackName(b);this._targetGroup=a;this._bindings=a.subscribe_(b,c)}function oa(a,b,c){this.path=b;this.parsedPath=c||oa.parseTrackName(b);
+this.node=oa.findNode(a,this.parsedPath.nodeName)||a;this.rootNode=a}function xf(){this.uuid=R.generateUUID();this._objects=Array.prototype.slice.call(arguments);this.nCachedObjects_=0;var a={};this._indicesByUUID=a;for(var b=0,c=arguments.length;b!==c;++b)a[arguments[b].uuid]=b;this._paths=[];this._parsedPaths=[];this._bindings=[];this._bindingsIndicesByPath={};var d=this;this.stats={objects:{get total(){return d._objects.length},get inUse(){return this.total-d.nCachedObjects_}},get bindingsPerObject(){return d._bindings.length}}}
+function yf(a,b,c){this._mixer=a;this._clip=b;this._localRoot=c||null;a=b.tracks;b=a.length;c=Array(b);for(var d={endingStart:2400,endingEnd:2400},e=0;e!==b;++e){var f=a[e].createInterpolant(null);c[e]=f;f.settings=d}this._interpolantSettings=d;this._interpolants=c;this._propertyBindings=Array(b);this._weightInterpolant=this._timeScaleInterpolant=this._byClipCacheIndex=this._cacheIndex=null;this.loop=2201;this._loopCount=-1;this._startTime=null;this.time=0;this._effectiveWeight=this.weight=this._effectiveTimeScale=
+this.timeScale=1;this.repetitions=Infinity;this.paused=!1;this.enabled=!0;this.clampWhenFinished=!1;this.zeroSlopeAtEnd=this.zeroSlopeAtStart=!0}function we(a){this._root=a;this._initMemoryManager();this.time=this._accuIndex=0;this.timeScale=1}function Rd(a,b){"string"===typeof a&&(console.warn("THREE.Uniform: Type parameter is no longer needed."),a=b);this.value=a}function xe(){E.call(this);this.type="InstancedBufferGeometry";this.maxInstancedCount=void 0}function ye(a,b,c){sb.call(this,a,b);this.meshPerAttribute=
+c||1}function ze(a,b,c,d){"number"===typeof c&&(d=c,c=!1,console.error("THREE.InstancedBufferAttribute: The constructor now expects normalized as the third argument."));F.call(this,a,b,c);this.meshPerAttribute=d||1}function zf(a,b,c,d){this.ray=new rb(a,b);this.near=c||0;this.far=d||Infinity;this.params={Mesh:{},Line:{},LOD:{},Points:{threshold:1},Sprite:{}};Object.defineProperties(this.params,{PointCloud:{get:function(){console.warn("THREE.Raycaster: params.PointCloud has been renamed to params.Points.");
+return this.Points}}})}function Af(a,b){return a.distance-b.distance}function Ae(a,b,c,d){if(!1!==a.visible&&(a.raycast(b,c),!0===d)){a=a.children;d=0;for(var e=a.length;d<e;d++)Ae(a[d],b,c,!0)}}function Bf(a,b,c){this.radius=void 0!==a?a:1;this.phi=void 0!==b?b:0;this.theta=void 0!==c?c:0;return this}function Cf(a,b,c){this.radius=void 0!==a?a:1;this.theta=void 0!==b?b:0;this.y=void 0!==c?c:0;return this}function Be(a,b){this.min=void 0!==a?a:new z(Infinity,Infinity);this.max=void 0!==b?b:new z(-Infinity,
+-Infinity)}function Ce(a,b){this.start=void 0!==a?a:new p;this.end=void 0!==b?b:new p}function jd(a){D.call(this);this.material=a;this.render=function(){}}function kd(a,b,c,d){this.object=a;this.size=void 0!==b?b:1;a=void 0!==c?c:16711680;d=void 0!==d?d:1;b=0;(c=this.object.geometry)&&c.isGeometry?b=3*c.faces.length:c&&c.isBufferGeometry&&(b=c.attributes.normal.count);c=new E;b=new C(6*b,3);c.addAttribute("position",b);S.call(this,c,new T({color:a,linewidth:d}));this.matrixAutoUpdate=!1;this.update()}
+function mc(a,b){D.call(this);this.light=a;this.light.updateMatrixWorld();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.color=b;a=new E;b=[0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,-1,0,1,0,0,0,0,1,1,0,0,0,0,-1,1];for(var c=0,d=1;32>c;c++,d++){var e=c/32*Math.PI*2,f=d/32*Math.PI*2;b.push(Math.cos(e),Math.sin(e),1,Math.cos(f),Math.sin(f),1)}a.addAttribute("position",new C(b,3));b=new T({fog:!1});this.cone=new S(a,b);this.add(this.cone);this.update()}function Df(a){var b=[];a&&a.isBone&&b.push(a);for(var c=
+0;c<a.children.length;c++)b.push.apply(b,Df(a.children[c]));return b}function nc(a){for(var b=Df(a),c=new E,d=[],e=[],f=new G(0,0,1),g=new G(0,1,0),h=0;h<b.length;h++){var k=b[h];k.parent&&k.parent.isBone&&(d.push(0,0,0),d.push(0,0,0),e.push(f.r,f.g,f.b),e.push(g.r,g.g,g.b))}c.addAttribute("position",new C(d,3));c.addAttribute("color",new C(e,3));d=new T({vertexColors:2,depthTest:!1,depthWrite:!1,transparent:!0});S.call(this,c,d);this.root=a;this.bones=b;this.matrix=a.matrixWorld;this.matrixAutoUpdate=
+!1}function oc(a,b,c){this.light=a;this.light.updateMatrixWorld();this.color=c;a=new wb(b,4,2);b=new Ea({wireframe:!0,fog:!1});pa.call(this,a,b);this.matrix=this.light.matrixWorld;this.matrixAutoUpdate=!1;this.update()}function pc(a,b){D.call(this);this.light=a;this.light.updateMatrixWorld();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.color=b;a=new T({fog:!1});b=new E;b.addAttribute("position",new F(new Float32Array(15),3));this.line=new ma(b,a);this.add(this.line);this.update()}function qc(a,
+b,c){D.call(this);this.light=a;this.light.updateMatrixWorld();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.color=c;a=new tb(b);a.rotateY(.5*Math.PI);this.material=new Ea({wireframe:!0,fog:!1});void 0===this.color&&(this.material.vertexColors=2);b=a.getAttribute("position");b=new Float32Array(3*b.count);a.addAttribute("color",new F(b,3));this.add(new pa(a,this.material));this.update()}function ld(a,b,c,d){a=a||10;b=b||10;c=new G(void 0!==c?c:4473924);d=new G(void 0!==d?d:8947848);var e=
+b/2,f=a/b,g=a/2;a=[];for(var h=[],k=0,m=0,q=-g;k<=b;k++,q+=f){a.push(-g,0,q,g,0,q);a.push(q,0,-g,q,0,g);var n=k===e?c:d;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3}b=new E;b.addAttribute("position",new C(a,3));b.addAttribute("color",new C(h,3));c=new T({vertexColors:2});S.call(this,b,c)}function Sd(a,b,c,d,e,f){a=a||10;b=b||16;c=c||8;d=d||64;e=new G(void 0!==e?e:4473924);f=new G(void 0!==f?f:8947848);var g=[],h=[],k;for(k=0;k<=b;k++){var m=k/b*2*Math.PI;var q=Math.sin(m)*
+a;m=Math.cos(m)*a;g.push(0,0,0);g.push(q,0,m);var n=k&1?e:f;h.push(n.r,n.g,n.b);h.push(n.r,n.g,n.b)}for(k=0;k<=c;k++){n=k&1?e:f;var l=a-a/c*k;for(b=0;b<d;b++)m=b/d*2*Math.PI,q=Math.sin(m)*l,m=Math.cos(m)*l,g.push(q,0,m),h.push(n.r,n.g,n.b),m=(b+1)/d*2*Math.PI,q=Math.sin(m)*l,m=Math.cos(m)*l,g.push(q,0,m),h.push(n.r,n.g,n.b)}a=new E;a.addAttribute("position",new C(g,3));a.addAttribute("color",new C(h,3));g=new T({vertexColors:2});S.call(this,a,g)}function md(a,b,c,d){this.object=a;this.size=void 0!==
+b?b:1;a=void 0!==c?c:16776960;d=void 0!==d?d:1;b=0;(c=this.object.geometry)&&c.isGeometry?b=c.faces.length:console.warn("THREE.FaceNormalsHelper: only THREE.Geometry is supported. Use THREE.VertexNormalsHelper, instead.");c=new E;b=new C(6*b,3);c.addAttribute("position",b);S.call(this,c,new T({color:a,linewidth:d}));this.matrixAutoUpdate=!1;this.update()}function rc(a,b,c){D.call(this);this.light=a;this.light.updateMatrixWorld();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.color=c;void 0===
+b&&(b=1);a=new E;a.addAttribute("position",new C([-b,b,0,b,b,0,b,-b,0,-b,-b,0,-b,b,0],3));b=new T({fog:!1});this.lightPlane=new ma(a,b);this.add(this.lightPlane);a=new E;a.addAttribute("position",new C([0,0,0,0,0,1],3));this.targetLine=new ma(a,b);this.add(this.targetLine);this.update()}function nd(a){function b(a,b,d){c(a,d);c(b,d)}function c(a,b){f.push(0,0,0);g.push(b.r,b.g,b.b);void 0===h[a]&&(h[a]=[]);h[a].push(f.length/3-1)}var d=new E,e=new T({color:16777215,vertexColors:1}),f=[],g=[],h={},
+k=new G(16755200),m=new G(16711680),q=new G(43775),l=new G(16777215),r=new G(3355443);b("n1","n2",k);b("n2","n4",k);b("n4","n3",k);b("n3","n1",k);b("f1","f2",k);b("f2","f4",k);b("f4","f3",k);b("f3","f1",k);b("n1","f1",k);b("n2","f2",k);b("n3","f3",k);b("n4","f4",k);b("p","n1",m);b("p","n2",m);b("p","n3",m);b("p","n4",m);b("u1","u2",q);b("u2","u3",q);b("u3","u1",q);b("c","t",l);b("p","c",r);b("cn1","cn2",r);b("cn3","cn4",r);b("cf1","cf2",r);b("cf3","cf4",r);d.addAttribute("position",new C(f,3));d.addAttribute("color",
+new C(g,3));S.call(this,d,e);this.camera=a;this.camera.updateProjectionMatrix&&this.camera.updateProjectionMatrix();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.pointMap=h;this.update()}function bb(a,b){this.object=a;void 0===b&&(b=16776960);a=new Uint16Array([0,1,1,2,2,3,3,0,4,5,5,6,6,7,7,4,0,4,1,5,2,6,3,7]);var c=new Float32Array(24),d=new E;d.setIndex(new F(a,1));d.addAttribute("position",new F(c,3));S.call(this,d,new T({color:b}));this.matrixAutoUpdate=!1;this.update()}function od(a,
+b){this.type="Box3Helper";this.box=a;a=void 0!==b?b:16776960;b=new Uint16Array([0,1,1,2,2,3,3,0,4,5,5,6,6,7,7,4,0,4,1,5,2,6,3,7]);var c=new E;c.setIndex(new F(b,1));c.addAttribute("position",new C([1,1,1,-1,1,1,-1,-1,1,1,-1,1,1,1,-1,-1,1,-1,-1,-1,-1,1,-1,-1],3));S.call(this,c,new T({color:a}));this.geometry.computeBoundingSphere()}function pd(a,b,c){this.type="PlaneHelper";this.plane=a;this.size=void 0===b?1:b;a=void 0!==c?c:16776960;b=new E;b.addAttribute("position",new C([1,-1,1,-1,1,1,-1,-1,1,
+1,1,1,-1,1,1,-1,-1,1,1,-1,1,1,1,1,0,0,1,0,0,0],3));b.computeBoundingSphere();ma.call(this,b,new T({color:a}));b=new E;b.addAttribute("position",new C([1,1,1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,1],3));b.computeBoundingSphere();this.add(new pa(b,new Ea({color:a,opacity:.2,transparent:!0,depthWrite:!1})))}function cb(a,b,c,d,e,f){D.call(this);void 0===a&&(a=new THREE.Vector3(0,0,1));void 0===b&&(b=new THREE.Vector3(0,0,0));void 0===c&&(c=1);void 0===d&&(d=16776960);void 0===e&&(e=.2*c);void 0===f&&(f=
+.2*e);void 0===Td&&(Td=new E,Td.addAttribute("position",new C([0,0,0,0,1,0],3)),De=new $a(0,.5,1,5,1),De.translate(0,-.5,0));this.position.copy(b);this.line=new ma(Td,new T({color:d}));this.line.matrixAutoUpdate=!1;this.add(this.line);this.cone=new pa(De,new Ea({color:d}));this.cone.matrixAutoUpdate=!1;this.add(this.cone);this.setDirection(a);this.setLength(c,e,f)}function qd(a){a=a||1;var b=[0,0,0,a,0,0,0,0,0,0,a,0,0,0,0,0,0,a];a=new E;a.addAttribute("position",new C(b,3));a.addAttribute("color",
+new C([1,0,0,1,.6,0,0,1,0,.6,1,0,0,0,1,0,.6,1],3));b=new T({vertexColors:2});S.call(this,a,b)}function Ef(a){console.warn("THREE.ClosedSplineCurve3 has been deprecated. Use THREE.CatmullRomCurve3 instead.");ua.call(this,a);this.type="catmullrom";this.closed=!0}function Ff(a){console.warn("THREE.SplineCurve3 has been deprecated. Use THREE.CatmullRomCurve3 instead.");ua.call(this,a);this.type="catmullrom"}function Ee(a){console.warn("THREE.Spline has been removed. Use THREE.CatmullRomCurve3 instead.");
+ua.call(this,a);this.type="catmullrom"}void 0===Number.EPSILON&&(Number.EPSILON=Math.pow(2,-52));void 0===Number.isInteger&&(Number.isInteger=function(a){return"number"===typeof a&&isFinite(a)&&Math.floor(a)===a});void 0===Math.sign&&(Math.sign=function(a){return 0>a?-1:0<a?1:+a});!1==="name"in Function.prototype&&Object.defineProperty(Function.prototype,"name",{get:function(){return this.toString().match(/^\s*function\s*([^\(\s]*)/)[1]}});void 0===Object.assign&&function(){Object.assign=function(a){if(void 0===
+a||null===a)throw new TypeError("Cannot convert undefined or null to object");for(var b=Object(a),c=1;c<arguments.length;c++){var d=arguments[c];if(void 0!==d&&null!==d)for(var e in d)Object.prototype.hasOwnProperty.call(d,e)&&(b[e]=d[e])}return b}}();Object.assign(ia.prototype,{addEventListener:function(a,b){void 0===this._listeners&&(this._listeners={});var c=this._listeners;void 0===c[a]&&(c[a]=[]);-1===c[a].indexOf(b)&&c[a].push(b)},hasEventListener:function(a,b){if(void 0===this._listeners)return!1;
+var c=this._listeners;return void 0!==c[a]&&-1!==c[a].indexOf(b)},removeEventListener:function(a,b){void 0!==this._listeners&&(a=this._listeners[a],void 0!==a&&(b=a.indexOf(b),-1!==b&&a.splice(b,1)))},dispatchEvent:function(a){if(void 0!==this._listeners){var b=this._listeners[a.type];if(void 0!==b){a.target=this;b=b.slice(0);for(var c=0,d=b.length;c<d;c++)b[c].call(this,a)}}}});var R={DEG2RAD:Math.PI/180,RAD2DEG:180/Math.PI,generateUUID:function(){for(var a=[],b=0;256>b;b++)a[b]=(16>b?"0":"")+b.toString(16);
+return function(){var b=4294967295*Math.random()|0,d=4294967295*Math.random()|0,e=4294967295*Math.random()|0,f=4294967295*Math.random()|0;return(a[b&255]+a[b>>8&255]+a[b>>16&255]+a[b>>24&255]+"-"+a[d&255]+a[d>>8&255]+"-"+a[d>>16&15|64]+a[d>>24&255]+"-"+a[e&63|128]+a[e>>8&255]+"-"+a[e>>16&255]+a[e>>24&255]+a[f&255]+a[f>>8&255]+a[f>>16&255]+a[f>>24&255]).toUpperCase()}}(),clamp:function(a,b,c){return Math.max(b,Math.min(c,a))},euclideanModulo:function(a,b){return(a%b+b)%b},mapLinear:function(a,b,c,
+d,e){return d+(a-b)*(e-d)/(c-b)},lerp:function(a,b,c){return(1-c)*a+c*b},smoothstep:function(a,b,c){if(a<=b)return 0;if(a>=c)return 1;a=(a-b)/(c-b);return a*a*(3-2*a)},smootherstep:function(a,b,c){if(a<=b)return 0;if(a>=c)return 1;a=(a-b)/(c-b);return a*a*a*(a*(6*a-15)+10)},randInt:function(a,b){return a+Math.floor(Math.random()*(b-a+1))},randFloat:function(a,b){return a+Math.random()*(b-a)},randFloatSpread:function(a){return a*(.5-Math.random())},degToRad:function(a){return a*R.DEG2RAD},radToDeg:function(a){return a*
+R.RAD2DEG},isPowerOfTwo:function(a){return 0===(a&a-1)&&0!==a},ceilPowerOfTwo:function(a){return Math.pow(2,Math.ceil(Math.log(a)/Math.LN2))},floorPowerOfTwo:function(a){return Math.pow(2,Math.floor(Math.log(a)/Math.LN2))}};Object.defineProperties(z.prototype,{width:{get:function(){return this.x},set:function(a){this.x=a}},height:{get:function(){return this.y},set:function(a){this.y=a}}});Object.assign(z.prototype,{isVector2:!0,set:function(a,b){this.x=a;this.y=b;return this},setScalar:function(a){this.y=
+this.x=a;return this},setX:function(a){this.x=a;return this},setY:function(a){this.y=a;return this},setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x,this.y)},copy:function(a){this.x=a.x;this.y=a.y;return this},add:function(a,
+b){if(void 0!==b)return console.warn("THREE.Vector2: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),this.addVectors(a,b);this.x+=a.x;this.y+=a.y;return this},addScalar:function(a){this.x+=a;this.y+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;return this},addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector2: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."),
+this.subVectors(a,b);this.x-=a.x;this.y-=a.y;return this},subScalar:function(a){this.x-=a;this.y-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;return this},multiply:function(a){this.x*=a.x;this.y*=a.y;return this},multiplyScalar:function(a){this.x*=a;this.y*=a;return this},divide:function(a){this.x/=a.x;this.y/=a.y;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},applyMatrix3:function(a){var b=this.x,c=this.y;a=a.elements;this.x=a[0]*b+a[3]*c+a[6];this.y=
+a[1]*b+a[4]*c+a[7];return this},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y,a.y);return this},max:function(a){this.x=Math.max(this.x,a.x);this.y=Math.max(this.y,a.y);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));return this},clampScalar:function(){var a=new z,b=new z;return function(c,d){a.set(c,c);b.set(d,d);return this.clamp(a,b)}}(),clampLength:function(a,b){var c=this.length();return this.divideScalar(c||
+1).multiplyScalar(Math.max(a,Math.min(b,c)))},floor:function(){this.x=Math.floor(this.x);this.y=Math.floor(this.y);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);return this},roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):Math.floor(this.y);return this},negate:function(){this.x=-this.x;this.y=-this.y;return this},dot:function(a){return this.x*
+a.x+this.y*a.y},cross:function(a){return this.x*a.y-this.y*a.x},lengthSq:function(){return this.x*this.x+this.y*this.y},length:function(){return Math.sqrt(this.x*this.x+this.y*this.y)},manhattanLength:function(){return Math.abs(this.x)+Math.abs(this.y)},normalize:function(){return this.divideScalar(this.length()||1)},angle:function(){var a=Math.atan2(this.y,this.x);0>a&&(a+=2*Math.PI);return a},distanceTo:function(a){return Math.sqrt(this.distanceToSquared(a))},distanceToSquared:function(a){var b=
+this.x-a.x;a=this.y-a.y;return b*b+a*a},manhattanDistanceTo:function(a){return Math.abs(this.x-a.x)+Math.abs(this.y-a.y)},setLength:function(a){return this.normalize().multiplyScalar(a)},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},equals:function(a){return a.x===this.x&&a.y===this.y},fromArray:function(a,b){void 0===b&&(b=0);this.x=a[b];this.y=a[b+1];return this},toArray:function(a,
+b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;return a},fromBufferAttribute:function(a,b,c){void 0!==c&&console.warn("THREE.Vector2: offset has been removed from .fromBufferAttribute().");this.x=a.getX(b);this.y=a.getY(b);return this},rotateAround:function(a,b){var c=Math.cos(b);b=Math.sin(b);var d=this.x-a.x,e=this.y-a.y;this.x=d*c-e*b+a.x;this.y=d*b+e*c+a.y;return this}});Object.assign(P.prototype,{isMatrix4:!0,set:function(a,b,c,d,e,f,g,h,k,m,q,l,r,p,t,u){var n=this.elements;
+n[0]=a;n[4]=b;n[8]=c;n[12]=d;n[1]=e;n[5]=f;n[9]=g;n[13]=h;n[2]=k;n[6]=m;n[10]=q;n[14]=l;n[3]=r;n[7]=p;n[11]=t;n[15]=u;return this},identity:function(){this.set(1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1);return this},clone:function(){return(new P).fromArray(this.elements)},copy:function(a){var b=this.elements;a=a.elements;b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15];return this},copyPosition:function(a){var b=
+this.elements;a=a.elements;b[12]=a[12];b[13]=a[13];b[14]=a[14];return this},extractBasis:function(a,b,c){a.setFromMatrixColumn(this,0);b.setFromMatrixColumn(this,1);c.setFromMatrixColumn(this,2);return this},makeBasis:function(a,b,c){this.set(a.x,b.x,c.x,0,a.y,b.y,c.y,0,a.z,b.z,c.z,0,0,0,0,1);return this},extractRotation:function(){var a=new p;return function(b){var c=this.elements,d=b.elements,e=1/a.setFromMatrixColumn(b,0).length(),f=1/a.setFromMatrixColumn(b,1).length();b=1/a.setFromMatrixColumn(b,
+2).length();c[0]=d[0]*e;c[1]=d[1]*e;c[2]=d[2]*e;c[3]=0;c[4]=d[4]*f;c[5]=d[5]*f;c[6]=d[6]*f;c[7]=0;c[8]=d[8]*b;c[9]=d[9]*b;c[10]=d[10]*b;c[11]=0;c[12]=0;c[13]=0;c[14]=0;c[15]=1;return this}}(),makeRotationFromEuler:function(a){a&&a.isEuler||console.error("THREE.Matrix4: .makeRotationFromEuler() now expects a Euler rotation rather than a Vector3 and order.");var b=this.elements,c=a.x,d=a.y,e=a.z,f=Math.cos(c);c=Math.sin(c);var g=Math.cos(d);d=Math.sin(d);var h=Math.cos(e);e=Math.sin(e);if("XYZ"===a.order){a=
+f*h;var k=f*e,m=c*h,q=c*e;b[0]=g*h;b[4]=-g*e;b[8]=d;b[1]=k+m*d;b[5]=a-q*d;b[9]=-c*g;b[2]=q-a*d;b[6]=m+k*d;b[10]=f*g}else"YXZ"===a.order?(a=g*h,k=g*e,m=d*h,q=d*e,b[0]=a+q*c,b[4]=m*c-k,b[8]=f*d,b[1]=f*e,b[5]=f*h,b[9]=-c,b[2]=k*c-m,b[6]=q+a*c,b[10]=f*g):"ZXY"===a.order?(a=g*h,k=g*e,m=d*h,q=d*e,b[0]=a-q*c,b[4]=-f*e,b[8]=m+k*c,b[1]=k+m*c,b[5]=f*h,b[9]=q-a*c,b[2]=-f*d,b[6]=c,b[10]=f*g):"ZYX"===a.order?(a=f*h,k=f*e,m=c*h,q=c*e,b[0]=g*h,b[4]=m*d-k,b[8]=a*d+q,b[1]=g*e,b[5]=q*d+a,b[9]=k*d-m,b[2]=-d,b[6]=c*
+g,b[10]=f*g):"YZX"===a.order?(a=f*g,k=f*d,m=c*g,q=c*d,b[0]=g*h,b[4]=q-a*e,b[8]=m*e+k,b[1]=e,b[5]=f*h,b[9]=-c*h,b[2]=-d*h,b[6]=k*e+m,b[10]=a-q*e):"XZY"===a.order&&(a=f*g,k=f*d,m=c*g,q=c*d,b[0]=g*h,b[4]=-e,b[8]=d*h,b[1]=a*e+q,b[5]=f*h,b[9]=k*e-m,b[2]=m*e-k,b[6]=c*h,b[10]=q*e+a);b[3]=0;b[7]=0;b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return this},makeRotationFromQuaternion:function(){var a=new p(0,0,0),b=new p(1,1,1);return function(c){return this.compose(a,c,b)}}(),lookAt:function(){var a=new p,b=new p,
+c=new p;return function(d,e,f){var g=this.elements;c.subVectors(d,e);0===c.lengthSq()&&(c.z=1);c.normalize();a.crossVectors(f,c);0===a.lengthSq()&&(1===Math.abs(f.z)?c.x+=1E-4:c.z+=1E-4,c.normalize(),a.crossVectors(f,c));a.normalize();b.crossVectors(c,a);g[0]=a.x;g[4]=b.x;g[8]=c.x;g[1]=a.y;g[5]=b.y;g[9]=c.y;g[2]=a.z;g[6]=b.z;g[10]=c.z;return this}}(),multiply:function(a,b){return void 0!==b?(console.warn("THREE.Matrix4: .multiply() now only accepts one argument. Use .multiplyMatrices( a, b ) instead."),
+this.multiplyMatrices(a,b)):this.multiplyMatrices(this,a)},premultiply:function(a){return this.multiplyMatrices(a,this)},multiplyMatrices:function(a,b){var c=a.elements,d=b.elements;b=this.elements;a=c[0];var e=c[4],f=c[8],g=c[12],h=c[1],k=c[5],m=c[9],q=c[13],n=c[2],l=c[6],p=c[10],t=c[14],u=c[3],w=c[7],A=c[11];c=c[15];var v=d[0],H=d[4],y=d[8],N=d[12],z=d[1],B=d[5],C=d[9],E=d[13],D=d[2],F=d[6],G=d[10],J=d[14],L=d[3],I=d[7],K=d[11];d=d[15];b[0]=a*v+e*z+f*D+g*L;b[4]=a*H+e*B+f*F+g*I;b[8]=a*y+e*C+f*G+
+g*K;b[12]=a*N+e*E+f*J+g*d;b[1]=h*v+k*z+m*D+q*L;b[5]=h*H+k*B+m*F+q*I;b[9]=h*y+k*C+m*G+q*K;b[13]=h*N+k*E+m*J+q*d;b[2]=n*v+l*z+p*D+t*L;b[6]=n*H+l*B+p*F+t*I;b[10]=n*y+l*C+p*G+t*K;b[14]=n*N+l*E+p*J+t*d;b[3]=u*v+w*z+A*D+c*L;b[7]=u*H+w*B+A*F+c*I;b[11]=u*y+w*C+A*G+c*K;b[15]=u*N+w*E+A*J+c*d;return this},multiplyScalar:function(a){var b=this.elements;b[0]*=a;b[4]*=a;b[8]*=a;b[12]*=a;b[1]*=a;b[5]*=a;b[9]*=a;b[13]*=a;b[2]*=a;b[6]*=a;b[10]*=a;b[14]*=a;b[3]*=a;b[7]*=a;b[11]*=a;b[15]*=a;return this},applyToBufferAttribute:function(){var a=
+new p;return function(b){for(var c=0,d=b.count;c<d;c++)a.x=b.getX(c),a.y=b.getY(c),a.z=b.getZ(c),a.applyMatrix4(this),b.setXYZ(c,a.x,a.y,a.z);return b}}(),determinant:function(){var a=this.elements,b=a[0],c=a[4],d=a[8],e=a[12],f=a[1],g=a[5],h=a[9],k=a[13],m=a[2],q=a[6],l=a[10],r=a[14];return a[3]*(+e*h*q-d*k*q-e*g*l+c*k*l+d*g*r-c*h*r)+a[7]*(+b*h*r-b*k*l+e*f*l-d*f*r+d*k*m-e*h*m)+a[11]*(+b*k*q-b*g*r-e*f*q+c*f*r+e*g*m-c*k*m)+a[15]*(-d*g*m-b*h*q+b*g*l+d*f*q-c*f*l+c*h*m)},transpose:function(){var a=this.elements;
+var b=a[1];a[1]=a[4];a[4]=b;b=a[2];a[2]=a[8];a[8]=b;b=a[6];a[6]=a[9];a[9]=b;b=a[3];a[3]=a[12];a[12]=b;b=a[7];a[7]=a[13];a[13]=b;b=a[11];a[11]=a[14];a[14]=b;return this},setPosition:function(a){var b=this.elements;b[12]=a.x;b[13]=a.y;b[14]=a.z;return this},getInverse:function(a,b){var c=this.elements,d=a.elements;a=d[0];var e=d[1],f=d[2],g=d[3],h=d[4],k=d[5],m=d[6],q=d[7],l=d[8],r=d[9],p=d[10],t=d[11],u=d[12],w=d[13],A=d[14];d=d[15];var v=r*A*q-w*p*q+w*m*t-k*A*t-r*m*d+k*p*d,z=u*p*q-l*A*q-u*m*t+h*A*
+t+l*m*d-h*p*d,y=l*w*q-u*r*q+u*k*t-h*w*t-l*k*d+h*r*d,N=u*r*m-l*w*m-u*k*p+h*w*p+l*k*A-h*r*A,C=a*v+e*z+f*y+g*N;if(0===C){if(!0===b)throw Error("THREE.Matrix4: .getInverse() can't invert matrix, determinant is 0");console.warn("THREE.Matrix4: .getInverse() can't invert matrix, determinant is 0");return this.identity()}b=1/C;c[0]=v*b;c[1]=(w*p*g-r*A*g-w*f*t+e*A*t+r*f*d-e*p*d)*b;c[2]=(k*A*g-w*m*g+w*f*q-e*A*q-k*f*d+e*m*d)*b;c[3]=(r*m*g-k*p*g-r*f*q+e*p*q+k*f*t-e*m*t)*b;c[4]=z*b;c[5]=(l*A*g-u*p*g+u*f*t-a*
+A*t-l*f*d+a*p*d)*b;c[6]=(u*m*g-h*A*g-u*f*q+a*A*q+h*f*d-a*m*d)*b;c[7]=(h*p*g-l*m*g+l*f*q-a*p*q-h*f*t+a*m*t)*b;c[8]=y*b;c[9]=(u*r*g-l*w*g-u*e*t+a*w*t+l*e*d-a*r*d)*b;c[10]=(h*w*g-u*k*g+u*e*q-a*w*q-h*e*d+a*k*d)*b;c[11]=(l*k*g-h*r*g-l*e*q+a*r*q+h*e*t-a*k*t)*b;c[12]=N*b;c[13]=(l*w*f-u*r*f+u*e*p-a*w*p-l*e*A+a*r*A)*b;c[14]=(u*k*f-h*w*f-u*e*m+a*w*m+h*e*A-a*k*A)*b;c[15]=(h*r*f-l*k*f+l*e*m-a*r*m-h*e*p+a*k*p)*b;return this},scale:function(a){var b=this.elements,c=a.x,d=a.y;a=a.z;b[0]*=c;b[4]*=d;b[8]*=a;b[1]*=
+c;b[5]*=d;b[9]*=a;b[2]*=c;b[6]*=d;b[10]*=a;b[3]*=c;b[7]*=d;b[11]*=a;return this},getMaxScaleOnAxis:function(){var a=this.elements;return Math.sqrt(Math.max(a[0]*a[0]+a[1]*a[1]+a[2]*a[2],a[4]*a[4]+a[5]*a[5]+a[6]*a[6],a[8]*a[8]+a[9]*a[9]+a[10]*a[10]))},makeTranslation:function(a,b,c){this.set(1,0,0,a,0,1,0,b,0,0,1,c,0,0,0,1);return this},makeRotationX:function(a){var b=Math.cos(a);a=Math.sin(a);this.set(1,0,0,0,0,b,-a,0,0,a,b,0,0,0,0,1);return this},makeRotationY:function(a){var b=Math.cos(a);a=Math.sin(a);
+this.set(b,0,a,0,0,1,0,0,-a,0,b,0,0,0,0,1);return this},makeRotationZ:function(a){var b=Math.cos(a);a=Math.sin(a);this.set(b,-a,0,0,a,b,0,0,0,0,1,0,0,0,0,1);return this},makeRotationAxis:function(a,b){var c=Math.cos(b);b=Math.sin(b);var d=1-c,e=a.x,f=a.y;a=a.z;var g=d*e,h=d*f;this.set(g*e+c,g*f-b*a,g*a+b*f,0,g*f+b*a,h*f+c,h*a-b*e,0,g*a-b*f,h*a+b*e,d*a*a+c,0,0,0,0,1);return this},makeScale:function(a,b,c){this.set(a,0,0,0,0,b,0,0,0,0,c,0,0,0,0,1);return this},makeShear:function(a,b,c){this.set(1,b,
+c,0,a,1,c,0,a,b,1,0,0,0,0,1);return this},compose:function(a,b,c){var d=this.elements,e=b._x,f=b._y,g=b._z,h=b._w,k=e+e,m=f+f,l=g+g;b=e*k;var n=e*m;e*=l;var r=f*m;f*=l;g*=l;k*=h;m*=h;h*=l;l=c.x;var p=c.y;c=c.z;d[0]=(1-(r+g))*l;d[1]=(n+h)*l;d[2]=(e-m)*l;d[3]=0;d[4]=(n-h)*p;d[5]=(1-(b+g))*p;d[6]=(f+k)*p;d[7]=0;d[8]=(e+m)*c;d[9]=(f-k)*c;d[10]=(1-(b+r))*c;d[11]=0;d[12]=a.x;d[13]=a.y;d[14]=a.z;d[15]=1;return this},decompose:function(){var a=new p,b=new P;return function(c,d,e){var f=this.elements,g=a.set(f[0],
+f[1],f[2]).length(),h=a.set(f[4],f[5],f[6]).length(),k=a.set(f[8],f[9],f[10]).length();0>this.determinant()&&(g=-g);c.x=f[12];c.y=f[13];c.z=f[14];b.copy(this);c=1/g;f=1/h;var m=1/k;b.elements[0]*=c;b.elements[1]*=c;b.elements[2]*=c;b.elements[4]*=f;b.elements[5]*=f;b.elements[6]*=f;b.elements[8]*=m;b.elements[9]*=m;b.elements[10]*=m;d.setFromRotationMatrix(b);e.x=g;e.y=h;e.z=k;return this}}(),makePerspective:function(a,b,c,d,e,f){void 0===f&&console.warn("THREE.Matrix4: .makePerspective() has been redefined and has a new signature. Please check the docs.");
+var g=this.elements;g[0]=2*e/(b-a);g[4]=0;g[8]=(b+a)/(b-a);g[12]=0;g[1]=0;g[5]=2*e/(c-d);g[9]=(c+d)/(c-d);g[13]=0;g[2]=0;g[6]=0;g[10]=-(f+e)/(f-e);g[14]=-2*f*e/(f-e);g[3]=0;g[7]=0;g[11]=-1;g[15]=0;return this},makeOrthographic:function(a,b,c,d,e,f){var g=this.elements,h=1/(b-a),k=1/(c-d),m=1/(f-e);g[0]=2*h;g[4]=0;g[8]=0;g[12]=-((b+a)*h);g[1]=0;g[5]=2*k;g[9]=0;g[13]=-((c+d)*k);g[2]=0;g[6]=0;g[10]=-2*m;g[14]=-((f+e)*m);g[3]=0;g[7]=0;g[11]=0;g[15]=1;return this},equals:function(a){var b=this.elements;
+a=a.elements;for(var c=0;16>c;c++)if(b[c]!==a[c])return!1;return!0},fromArray:function(a,b){void 0===b&&(b=0);for(var c=0;16>c;c++)this.elements[c]=a[c+b];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);var c=this.elements;a[b]=c[0];a[b+1]=c[1];a[b+2]=c[2];a[b+3]=c[3];a[b+4]=c[4];a[b+5]=c[5];a[b+6]=c[6];a[b+7]=c[7];a[b+8]=c[8];a[b+9]=c[9];a[b+10]=c[10];a[b+11]=c[11];a[b+12]=c[12];a[b+13]=c[13];a[b+14]=c[14];a[b+15]=c[15];return a}});Object.assign(ja,{slerp:function(a,b,c,d){return c.copy(a).slerp(b,
+d)},slerpFlat:function(a,b,c,d,e,f,g){var h=c[d+0],k=c[d+1],m=c[d+2];c=c[d+3];d=e[f+0];var l=e[f+1],n=e[f+2];e=e[f+3];if(c!==e||h!==d||k!==l||m!==n){f=1-g;var r=h*d+k*l+m*n+c*e,p=0<=r?1:-1,t=1-r*r;t>Number.EPSILON&&(t=Math.sqrt(t),r=Math.atan2(t,r*p),f=Math.sin(f*r)/t,g=Math.sin(g*r)/t);p*=g;h=h*f+d*p;k=k*f+l*p;m=m*f+n*p;c=c*f+e*p;f===1-g&&(g=1/Math.sqrt(h*h+k*k+m*m+c*c),h*=g,k*=g,m*=g,c*=g)}a[b]=h;a[b+1]=k;a[b+2]=m;a[b+3]=c}});Object.defineProperties(ja.prototype,{x:{get:function(){return this._x},
+set:function(a){this._x=a;this.onChangeCallback()}},y:{get:function(){return this._y},set:function(a){this._y=a;this.onChangeCallback()}},z:{get:function(){return this._z},set:function(a){this._z=a;this.onChangeCallback()}},w:{get:function(){return this._w},set:function(a){this._w=a;this.onChangeCallback()}}});Object.assign(ja.prototype,{isQuaternion:!0,set:function(a,b,c,d){this._x=a;this._y=b;this._z=c;this._w=d;this.onChangeCallback();return this},clone:function(){return new this.constructor(this._x,
+this._y,this._z,this._w)},copy:function(a){this._x=a.x;this._y=a.y;this._z=a.z;this._w=a.w;this.onChangeCallback();return this},setFromEuler:function(a,b){if(!a||!a.isEuler)throw Error("THREE.Quaternion: .setFromEuler() now expects an Euler rotation rather than a Vector3 and order.");var c=a._x,d=a._y,e=a._z;a=a.order;var f=Math.cos,g=Math.sin,h=f(c/2),k=f(d/2);f=f(e/2);c=g(c/2);d=g(d/2);e=g(e/2);"XYZ"===a?(this._x=c*k*f+h*d*e,this._y=h*d*f-c*k*e,this._z=h*k*e+c*d*f,this._w=h*k*f-c*d*e):"YXZ"===a?
+(this._x=c*k*f+h*d*e,this._y=h*d*f-c*k*e,this._z=h*k*e-c*d*f,this._w=h*k*f+c*d*e):"ZXY"===a?(this._x=c*k*f-h*d*e,this._y=h*d*f+c*k*e,this._z=h*k*e+c*d*f,this._w=h*k*f-c*d*e):"ZYX"===a?(this._x=c*k*f-h*d*e,this._y=h*d*f+c*k*e,this._z=h*k*e-c*d*f,this._w=h*k*f+c*d*e):"YZX"===a?(this._x=c*k*f+h*d*e,this._y=h*d*f+c*k*e,this._z=h*k*e-c*d*f,this._w=h*k*f-c*d*e):"XZY"===a&&(this._x=c*k*f-h*d*e,this._y=h*d*f-c*k*e,this._z=h*k*e+c*d*f,this._w=h*k*f+c*d*e);if(!1!==b)this.onChangeCallback();return this},setFromAxisAngle:function(a,
+b){b/=2;var c=Math.sin(b);this._x=a.x*c;this._y=a.y*c;this._z=a.z*c;this._w=Math.cos(b);this.onChangeCallback();return this},setFromRotationMatrix:function(a){var b=a.elements,c=b[0];a=b[4];var d=b[8],e=b[1],f=b[5],g=b[9],h=b[2],k=b[6];b=b[10];var m=c+f+b;0<m?(c=.5/Math.sqrt(m+1),this._w=.25/c,this._x=(k-g)*c,this._y=(d-h)*c,this._z=(e-a)*c):c>f&&c>b?(c=2*Math.sqrt(1+c-f-b),this._w=(k-g)/c,this._x=.25*c,this._y=(a+e)/c,this._z=(d+h)/c):f>b?(c=2*Math.sqrt(1+f-c-b),this._w=(d-h)/c,this._x=(a+e)/c,this._y=
+.25*c,this._z=(g+k)/c):(c=2*Math.sqrt(1+b-c-f),this._w=(e-a)/c,this._x=(d+h)/c,this._y=(g+k)/c,this._z=.25*c);this.onChangeCallback();return this},setFromUnitVectors:function(){var a=new p,b;return function(c,d){void 0===a&&(a=new p);b=c.dot(d)+1;1E-6>b?(b=0,Math.abs(c.x)>Math.abs(c.z)?a.set(-c.y,c.x,0):a.set(0,-c.z,c.y)):a.crossVectors(c,d);this._x=a.x;this._y=a.y;this._z=a.z;this._w=b;return this.normalize()}}(),angleTo:function(a){return 2*Math.acos(Math.abs(R.clamp(this.dot(a),-1,1)))},rotateTowards:function(a,
+b){var c=this.angleTo(a);if(0===c)return this;this.slerp(a,Math.min(1,b/c));return this},inverse:function(){return this.conjugate()},conjugate:function(){this._x*=-1;this._y*=-1;this._z*=-1;this.onChangeCallback();return this},dot:function(a){return this._x*a._x+this._y*a._y+this._z*a._z+this._w*a._w},lengthSq:function(){return this._x*this._x+this._y*this._y+this._z*this._z+this._w*this._w},length:function(){return Math.sqrt(this._x*this._x+this._y*this._y+this._z*this._z+this._w*this._w)},normalize:function(){var a=
+this.length();0===a?(this._z=this._y=this._x=0,this._w=1):(a=1/a,this._x*=a,this._y*=a,this._z*=a,this._w*=a);this.onChangeCallback();return this},multiply:function(a,b){return void 0!==b?(console.warn("THREE.Quaternion: .multiply() now only accepts one argument. Use .multiplyQuaternions( a, b ) instead."),this.multiplyQuaternions(a,b)):this.multiplyQuaternions(this,a)},premultiply:function(a){return this.multiplyQuaternions(a,this)},multiplyQuaternions:function(a,b){var c=a._x,d=a._y,e=a._z;a=a._w;
+var f=b._x,g=b._y,h=b._z;b=b._w;this._x=c*b+a*f+d*h-e*g;this._y=d*b+a*g+e*f-c*h;this._z=e*b+a*h+c*g-d*f;this._w=a*b-c*f-d*g-e*h;this.onChangeCallback();return this},slerp:function(a,b){if(0===b)return this;if(1===b)return this.copy(a);var c=this._x,d=this._y,e=this._z,f=this._w,g=f*a._w+c*a._x+d*a._y+e*a._z;0>g?(this._w=-a._w,this._x=-a._x,this._y=-a._y,this._z=-a._z,g=-g):this.copy(a);if(1<=g)return this._w=f,this._x=c,this._y=d,this._z=e,this;a=1-g*g;if(a<=Number.EPSILON)return g=1-b,this._w=g*
+f+b*this._w,this._x=g*c+b*this._x,this._y=g*d+b*this._y,this._z=g*e+b*this._z,this.normalize();a=Math.sqrt(a);var h=Math.atan2(a,g);g=Math.sin((1-b)*h)/a;b=Math.sin(b*h)/a;this._w=f*g+this._w*b;this._x=c*g+this._x*b;this._y=d*g+this._y*b;this._z=e*g+this._z*b;this.onChangeCallback();return this},equals:function(a){return a._x===this._x&&a._y===this._y&&a._z===this._z&&a._w===this._w},fromArray:function(a,b){void 0===b&&(b=0);this._x=a[b];this._y=a[b+1];this._z=a[b+2];this._w=a[b+3];this.onChangeCallback();
+return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this._x;a[b+1]=this._y;a[b+2]=this._z;a[b+3]=this._w;return a},onChange:function(a){this.onChangeCallback=a;return this},onChangeCallback:function(){}});Object.assign(p.prototype,{isVector3:!0,set:function(a,b,c){this.x=a;this.y=b;this.z=c;return this},setScalar:function(a){this.z=this.y=this.x=a;return this},setX:function(a){this.x=a;return this},setY:function(a){this.y=a;return this},setZ:function(a){this.z=a;return this},
+setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;case 2:this.z=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;case 2:return this.z;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x,this.y,this.z)},copy:function(a){this.x=a.x;this.y=a.y;this.z=a.z;return this},add:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),
+this.addVectors(a,b);this.x+=a.x;this.y+=a.y;this.z+=a.z;return this},addScalar:function(a){this.x+=a;this.y+=a;this.z+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;this.z=a.z+b.z;return this},addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;this.z+=a.z*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."),this.subVectors(a,b);this.x-=a.x;this.y-=a.y;this.z-=a.z;
+return this},subScalar:function(a){this.x-=a;this.y-=a;this.z-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;this.z=a.z-b.z;return this},multiply:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .multiply() now only accepts one argument. Use .multiplyVectors( a, b ) instead."),this.multiplyVectors(a,b);this.x*=a.x;this.y*=a.y;this.z*=a.z;return this},multiplyScalar:function(a){this.x*=a;this.y*=a;this.z*=a;return this},multiplyVectors:function(a,b){this.x=a.x*
+b.x;this.y=a.y*b.y;this.z=a.z*b.z;return this},applyEuler:function(){var a=new ja;return function(b){b&&b.isEuler||console.error("THREE.Vector3: .applyEuler() now expects an Euler rotation rather than a Vector3 and order.");return this.applyQuaternion(a.setFromEuler(b))}}(),applyAxisAngle:function(){var a=new ja;return function(b,c){return this.applyQuaternion(a.setFromAxisAngle(b,c))}}(),applyMatrix3:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;this.x=a[0]*b+a[3]*c+a[6]*d;this.y=a[1]*
+b+a[4]*c+a[7]*d;this.z=a[2]*b+a[5]*c+a[8]*d;return this},applyMatrix4:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;var e=1/(a[3]*b+a[7]*c+a[11]*d+a[15]);this.x=(a[0]*b+a[4]*c+a[8]*d+a[12])*e;this.y=(a[1]*b+a[5]*c+a[9]*d+a[13])*e;this.z=(a[2]*b+a[6]*c+a[10]*d+a[14])*e;return this},applyQuaternion:function(a){var b=this.x,c=this.y,d=this.z,e=a.x,f=a.y,g=a.z;a=a.w;var h=a*b+f*d-g*c,k=a*c+g*b-e*d,m=a*d+e*c-f*b;b=-e*b-f*c-g*d;this.x=h*a+b*-e+k*-g-m*-f;this.y=k*a+b*-f+m*-e-h*-g;this.z=m*a+b*
+-g+h*-f-k*-e;return this},project:function(a){return this.applyMatrix4(a.matrixWorldInverse).applyMatrix4(a.projectionMatrix)},unproject:function(){var a=new P;return function(b){return this.applyMatrix4(a.getInverse(b.projectionMatrix)).applyMatrix4(b.matrixWorld)}}(),transformDirection:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;this.x=a[0]*b+a[4]*c+a[8]*d;this.y=a[1]*b+a[5]*c+a[9]*d;this.z=a[2]*b+a[6]*c+a[10]*d;return this.normalize()},divide:function(a){this.x/=a.x;this.y/=a.y;this.z/=
+a.z;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y,a.y);this.z=Math.min(this.z,a.z);return this},max:function(a){this.x=Math.max(this.x,a.x);this.y=Math.max(this.y,a.y);this.z=Math.max(this.z,a.z);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));this.z=Math.max(a.z,Math.min(b.z,this.z));return this},clampScalar:function(){var a=new p,b=new p;
+return function(c,d){a.set(c,c,c);b.set(d,d,d);return this.clamp(a,b)}}(),clampLength:function(a,b){var c=this.length();return this.divideScalar(c||1).multiplyScalar(Math.max(a,Math.min(b,c)))},floor:function(){this.x=Math.floor(this.x);this.y=Math.floor(this.y);this.z=Math.floor(this.z);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);this.z=Math.ceil(this.z);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);this.z=Math.round(this.z);
+return this},roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):Math.floor(this.y);this.z=0>this.z?Math.ceil(this.z):Math.floor(this.z);return this},negate:function(){this.x=-this.x;this.y=-this.y;this.z=-this.z;return this},dot:function(a){return this.x*a.x+this.y*a.y+this.z*a.z},lengthSq:function(){return this.x*this.x+this.y*this.y+this.z*this.z},length:function(){return Math.sqrt(this.x*this.x+this.y*this.y+this.z*this.z)},manhattanLength:function(){return Math.abs(this.x)+
+Math.abs(this.y)+Math.abs(this.z)},normalize:function(){return this.divideScalar(this.length()||1)},setLength:function(a){return this.normalize().multiplyScalar(a)},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;this.z+=(a.z-this.z)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},cross:function(a,b){return void 0!==b?(console.warn("THREE.Vector3: .cross() now only accepts one argument. Use .crossVectors( a, b ) instead."),this.crossVectors(a,
+b)):this.crossVectors(this,a)},crossVectors:function(a,b){var c=a.x,d=a.y;a=a.z;var e=b.x,f=b.y;b=b.z;this.x=d*b-a*f;this.y=a*e-c*b;this.z=c*f-d*e;return this},projectOnVector:function(a){var b=a.dot(this)/a.lengthSq();return this.copy(a).multiplyScalar(b)},projectOnPlane:function(){var a=new p;return function(b){a.copy(this).projectOnVector(b);return this.sub(a)}}(),reflect:function(){var a=new p;return function(b){return this.sub(a.copy(b).multiplyScalar(2*this.dot(b)))}}(),angleTo:function(a){a=
+this.dot(a)/Math.sqrt(this.lengthSq()*a.lengthSq());return Math.acos(R.clamp(a,-1,1))},distanceTo:function(a){return Math.sqrt(this.distanceToSquared(a))},distanceToSquared:function(a){var b=this.x-a.x,c=this.y-a.y;a=this.z-a.z;return b*b+c*c+a*a},manhattanDistanceTo:function(a){return Math.abs(this.x-a.x)+Math.abs(this.y-a.y)+Math.abs(this.z-a.z)},setFromSpherical:function(a){return this.setFromSphericalCoords(a.radius,a.phi,a.theta)},setFromSphericalCoords:function(a,b,c){var d=Math.sin(b)*a;this.x=
+d*Math.sin(c);this.y=Math.cos(b)*a;this.z=d*Math.cos(c);return this},setFromCylindrical:function(a){return this.setFromCylindricalCoords(a.radius,a.theta,a.y)},setFromCylindricalCoords:function(a,b,c){this.x=a*Math.sin(b);this.y=c;this.z=a*Math.cos(b);return this},setFromMatrixPosition:function(a){a=a.elements;this.x=a[12];this.y=a[13];this.z=a[14];return this},setFromMatrixScale:function(a){var b=this.setFromMatrixColumn(a,0).length(),c=this.setFromMatrixColumn(a,1).length();a=this.setFromMatrixColumn(a,
+2).length();this.x=b;this.y=c;this.z=a;return this},setFromMatrixColumn:function(a,b){return this.fromArray(a.elements,4*b)},equals:function(a){return a.x===this.x&&a.y===this.y&&a.z===this.z},fromArray:function(a,b){void 0===b&&(b=0);this.x=a[b];this.y=a[b+1];this.z=a[b+2];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;a[b+2]=this.z;return a},fromBufferAttribute:function(a,b,c){void 0!==c&&console.warn("THREE.Vector3: offset has been removed from .fromBufferAttribute().");
+this.x=a.getX(b);this.y=a.getY(b);this.z=a.getZ(b);return this}});Object.assign(da.prototype,{isMatrix3:!0,set:function(a,b,c,d,e,f,g,h,k){var m=this.elements;m[0]=a;m[1]=d;m[2]=g;m[3]=b;m[4]=e;m[5]=h;m[6]=c;m[7]=f;m[8]=k;return this},identity:function(){this.set(1,0,0,0,1,0,0,0,1);return this},clone:function(){return(new this.constructor).fromArray(this.elements)},copy:function(a){var b=this.elements;a=a.elements;b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=
+a[8];return this},setFromMatrix4:function(a){a=a.elements;this.set(a[0],a[4],a[8],a[1],a[5],a[9],a[2],a[6],a[10]);return this},applyToBufferAttribute:function(){var a=new p;return function(b){for(var c=0,d=b.count;c<d;c++)a.x=b.getX(c),a.y=b.getY(c),a.z=b.getZ(c),a.applyMatrix3(this),b.setXYZ(c,a.x,a.y,a.z);return b}}(),multiply:function(a){return this.multiplyMatrices(this,a)},premultiply:function(a){return this.multiplyMatrices(a,this)},multiplyMatrices:function(a,b){var c=a.elements,d=b.elements;
+b=this.elements;a=c[0];var e=c[3],f=c[6],g=c[1],h=c[4],k=c[7],m=c[2],l=c[5];c=c[8];var n=d[0],r=d[3],p=d[6],t=d[1],u=d[4],w=d[7],A=d[2],v=d[5];d=d[8];b[0]=a*n+e*t+f*A;b[3]=a*r+e*u+f*v;b[6]=a*p+e*w+f*d;b[1]=g*n+h*t+k*A;b[4]=g*r+h*u+k*v;b[7]=g*p+h*w+k*d;b[2]=m*n+l*t+c*A;b[5]=m*r+l*u+c*v;b[8]=m*p+l*w+c*d;return this},multiplyScalar:function(a){var b=this.elements;b[0]*=a;b[3]*=a;b[6]*=a;b[1]*=a;b[4]*=a;b[7]*=a;b[2]*=a;b[5]*=a;b[8]*=a;return this},determinant:function(){var a=this.elements,b=a[0],c=a[1],
+d=a[2],e=a[3],f=a[4],g=a[5],h=a[6],k=a[7];a=a[8];return b*f*a-b*g*k-c*e*a+c*g*h+d*e*k-d*f*h},getInverse:function(a,b){a&&a.isMatrix4&&console.error("THREE.Matrix3: .getInverse() no longer takes a Matrix4 argument.");var c=a.elements;a=this.elements;var d=c[0],e=c[1],f=c[2],g=c[3],h=c[4],k=c[5],m=c[6],l=c[7];c=c[8];var n=c*h-k*l,r=k*m-c*g,p=l*g-h*m,t=d*n+e*r+f*p;if(0===t){if(!0===b)throw Error("THREE.Matrix3: .getInverse() can't invert matrix, determinant is 0");console.warn("THREE.Matrix3: .getInverse() can't invert matrix, determinant is 0");
+return this.identity()}b=1/t;a[0]=n*b;a[1]=(f*l-c*e)*b;a[2]=(k*e-f*h)*b;a[3]=r*b;a[4]=(c*d-f*m)*b;a[5]=(f*g-k*d)*b;a[6]=p*b;a[7]=(e*m-l*d)*b;a[8]=(h*d-e*g)*b;return this},transpose:function(){var a=this.elements;var b=a[1];a[1]=a[3];a[3]=b;b=a[2];a[2]=a[6];a[6]=b;b=a[5];a[5]=a[7];a[7]=b;return this},getNormalMatrix:function(a){return this.setFromMatrix4(a).getInverse(this).transpose()},transposeIntoArray:function(a){var b=this.elements;a[0]=b[0];a[1]=b[3];a[2]=b[6];a[3]=b[1];a[4]=b[4];a[5]=b[7];a[6]=
+b[2];a[7]=b[5];a[8]=b[8];return this},setUvTransform:function(a,b,c,d,e,f,g){var h=Math.cos(e);e=Math.sin(e);this.set(c*h,c*e,-c*(h*f+e*g)+f+a,-d*e,d*h,-d*(-e*f+h*g)+g+b,0,0,1)},scale:function(a,b){var c=this.elements;c[0]*=a;c[3]*=a;c[6]*=a;c[1]*=b;c[4]*=b;c[7]*=b;return this},rotate:function(a){var b=Math.cos(a);a=Math.sin(a);var c=this.elements,d=c[0],e=c[3],f=c[6],g=c[1],h=c[4],k=c[7];c[0]=b*d+a*g;c[3]=b*e+a*h;c[6]=b*f+a*k;c[1]=-a*d+b*g;c[4]=-a*e+b*h;c[7]=-a*f+b*k;return this},translate:function(a,
+b){var c=this.elements;c[0]+=a*c[2];c[3]+=a*c[5];c[6]+=a*c[8];c[1]+=b*c[2];c[4]+=b*c[5];c[7]+=b*c[8];return this},equals:function(a){var b=this.elements;a=a.elements;for(var c=0;9>c;c++)if(b[c]!==a[c])return!1;return!0},fromArray:function(a,b){void 0===b&&(b=0);for(var c=0;9>c;c++)this.elements[c]=a[c+b];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);var c=this.elements;a[b]=c[0];a[b+1]=c[1];a[b+2]=c[2];a[b+3]=c[3];a[b+4]=c[4];a[b+5]=c[5];a[b+6]=c[6];a[b+7]=c[7];a[b+8]=c[8];
+return a}});var jb={getDataURL:function(a){if("undefined"==typeof HTMLCanvasElement)return a.src;if(a instanceof HTMLCanvasElement)var b=a;else{b=document.createElementNS("http://www.w3.org/1999/xhtml","canvas");b.width=a.width;b.height=a.height;var c=b.getContext("2d");a instanceof ImageData?c.putImageData(a,0,0):c.drawImage(a,0,0,a.width,a.height)}return 2048<b.width||2048<b.height?b.toDataURL("image/jpeg",.6):b.toDataURL("image/png")}},Kf=0;W.DEFAULT_IMAGE=void 0;W.DEFAULT_MAPPING=300;W.prototype=
+Object.assign(Object.create(ia.prototype),{constructor:W,isTexture:!0,updateMatrix:function(){this.matrix.setUvTransform(this.offset.x,this.offset.y,this.repeat.x,this.repeat.y,this.rotation,this.center.x,this.center.y)},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.name=a.name;this.image=a.image;this.mipmaps=a.mipmaps.slice(0);this.mapping=a.mapping;this.wrapS=a.wrapS;this.wrapT=a.wrapT;this.magFilter=a.magFilter;this.minFilter=a.minFilter;this.anisotropy=a.anisotropy;
+this.format=a.format;this.type=a.type;this.offset.copy(a.offset);this.repeat.copy(a.repeat);this.center.copy(a.center);this.rotation=a.rotation;this.matrixAutoUpdate=a.matrixAutoUpdate;this.matrix.copy(a.matrix);this.generateMipmaps=a.generateMipmaps;this.premultiplyAlpha=a.premultiplyAlpha;this.flipY=a.flipY;this.unpackAlignment=a.unpackAlignment;this.encoding=a.encoding;return this},toJSON:function(a){var b=void 0===a||"string"===typeof a;if(!b&&void 0!==a.textures[this.uuid])return a.textures[this.uuid];
+var c={metadata:{version:4.5,type:"Texture",generator:"Texture.toJSON"},uuid:this.uuid,name:this.name,mapping:this.mapping,repeat:[this.repeat.x,this.repeat.y],offset:[this.offset.x,this.offset.y],center:[this.center.x,this.center.y],rotation:this.rotation,wrap:[this.wrapS,this.wrapT],format:this.format,minFilter:this.minFilter,magFilter:this.magFilter,anisotropy:this.anisotropy,flipY:this.flipY};if(void 0!==this.image){var d=this.image;void 0===d.uuid&&(d.uuid=R.generateUUID());if(!b&&void 0===a.images[d.uuid]){if(Array.isArray(d)){var e=
+[];for(var f=0,g=d.length;f<g;f++)e.push(jb.getDataURL(d[f]))}else e=jb.getDataURL(d);a.images[d.uuid]={uuid:d.uuid,url:e}}c.image=d.uuid}b||(a.textures[this.uuid]=c);return c},dispose:function(){this.dispatchEvent({type:"dispose"})},transformUv:function(a){if(300!==this.mapping)return a;a.applyMatrix3(this.matrix);if(0>a.x||1<a.x)switch(this.wrapS){case 1E3:a.x-=Math.floor(a.x);break;case 1001:a.x=0>a.x?0:1;break;case 1002:a.x=1===Math.abs(Math.floor(a.x)%2)?Math.ceil(a.x)-a.x:a.x-Math.floor(a.x)}if(0>
+a.y||1<a.y)switch(this.wrapT){case 1E3:a.y-=Math.floor(a.y);break;case 1001:a.y=0>a.y?0:1;break;case 1002:a.y=1===Math.abs(Math.floor(a.y)%2)?Math.ceil(a.y)-a.y:a.y-Math.floor(a.y)}this.flipY&&(a.y=1-a.y);return a}});Object.defineProperty(W.prototype,"needsUpdate",{set:function(a){!0===a&&this.version++}});Object.assign(Z.prototype,{isVector4:!0,set:function(a,b,c,d){this.x=a;this.y=b;this.z=c;this.w=d;return this},setScalar:function(a){this.w=this.z=this.y=this.x=a;return this},setX:function(a){this.x=
+a;return this},setY:function(a){this.y=a;return this},setZ:function(a){this.z=a;return this},setW:function(a){this.w=a;return this},setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;case 2:this.z=b;break;case 3:this.w=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;case 2:return this.z;case 3:return this.w;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x,
+this.y,this.z,this.w)},copy:function(a){this.x=a.x;this.y=a.y;this.z=a.z;this.w=void 0!==a.w?a.w:1;return this},add:function(a,b){if(void 0!==b)return console.warn("THREE.Vector4: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),this.addVectors(a,b);this.x+=a.x;this.y+=a.y;this.z+=a.z;this.w+=a.w;return this},addScalar:function(a){this.x+=a;this.y+=a;this.z+=a;this.w+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;this.z=a.z+b.z;this.w=a.w+b.w;return this},
+addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;this.z+=a.z*b;this.w+=a.w*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector4: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."),this.subVectors(a,b);this.x-=a.x;this.y-=a.y;this.z-=a.z;this.w-=a.w;return this},subScalar:function(a){this.x-=a;this.y-=a;this.z-=a;this.w-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;this.z=a.z-b.z;this.w=a.w-b.w;return this},multiplyScalar:function(a){this.x*=
+a;this.y*=a;this.z*=a;this.w*=a;return this},applyMatrix4:function(a){var b=this.x,c=this.y,d=this.z,e=this.w;a=a.elements;this.x=a[0]*b+a[4]*c+a[8]*d+a[12]*e;this.y=a[1]*b+a[5]*c+a[9]*d+a[13]*e;this.z=a[2]*b+a[6]*c+a[10]*d+a[14]*e;this.w=a[3]*b+a[7]*c+a[11]*d+a[15]*e;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},setAxisAngleFromQuaternion:function(a){this.w=2*Math.acos(a.w);var b=Math.sqrt(1-a.w*a.w);1E-4>b?(this.x=1,this.z=this.y=0):(this.x=a.x/b,this.y=a.y/b,this.z=a.z/
+b);return this},setAxisAngleFromRotationMatrix:function(a){a=a.elements;var b=a[0];var c=a[4];var d=a[8],e=a[1],f=a[5],g=a[9];var h=a[2];var k=a[6];var m=a[10];if(.01>Math.abs(c-e)&&.01>Math.abs(d-h)&&.01>Math.abs(g-k)){if(.1>Math.abs(c+e)&&.1>Math.abs(d+h)&&.1>Math.abs(g+k)&&.1>Math.abs(b+f+m-3))return this.set(1,0,0,0),this;a=Math.PI;b=(b+1)/2;f=(f+1)/2;m=(m+1)/2;c=(c+e)/4;d=(d+h)/4;g=(g+k)/4;b>f&&b>m?.01>b?(k=0,c=h=.707106781):(k=Math.sqrt(b),h=c/k,c=d/k):f>m?.01>f?(k=.707106781,h=0,c=.707106781):
+(h=Math.sqrt(f),k=c/h,c=g/h):.01>m?(h=k=.707106781,c=0):(c=Math.sqrt(m),k=d/c,h=g/c);this.set(k,h,c,a);return this}a=Math.sqrt((k-g)*(k-g)+(d-h)*(d-h)+(e-c)*(e-c));.001>Math.abs(a)&&(a=1);this.x=(k-g)/a;this.y=(d-h)/a;this.z=(e-c)/a;this.w=Math.acos((b+f+m-1)/2);return this},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y,a.y);this.z=Math.min(this.z,a.z);this.w=Math.min(this.w,a.w);return this},max:function(a){this.x=Math.max(this.x,a.x);this.y=Math.max(this.y,a.y);this.z=Math.max(this.z,
+a.z);this.w=Math.max(this.w,a.w);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));this.z=Math.max(a.z,Math.min(b.z,this.z));this.w=Math.max(a.w,Math.min(b.w,this.w));return this},clampScalar:function(){var a,b;return function(c,d){void 0===a&&(a=new Z,b=new Z);a.set(c,c,c,c);b.set(d,d,d,d);return this.clamp(a,b)}}(),clampLength:function(a,b){var c=this.length();return this.divideScalar(c||1).multiplyScalar(Math.max(a,Math.min(b,c)))},
+floor:function(){this.x=Math.floor(this.x);this.y=Math.floor(this.y);this.z=Math.floor(this.z);this.w=Math.floor(this.w);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);this.z=Math.ceil(this.z);this.w=Math.ceil(this.w);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);this.z=Math.round(this.z);this.w=Math.round(this.w);return this},roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):
+Math.floor(this.y);this.z=0>this.z?Math.ceil(this.z):Math.floor(this.z);this.w=0>this.w?Math.ceil(this.w):Math.floor(this.w);return this},negate:function(){this.x=-this.x;this.y=-this.y;this.z=-this.z;this.w=-this.w;return this},dot:function(a){return this.x*a.x+this.y*a.y+this.z*a.z+this.w*a.w},lengthSq:function(){return this.x*this.x+this.y*this.y+this.z*this.z+this.w*this.w},length:function(){return Math.sqrt(this.x*this.x+this.y*this.y+this.z*this.z+this.w*this.w)},manhattanLength:function(){return Math.abs(this.x)+
+Math.abs(this.y)+Math.abs(this.z)+Math.abs(this.w)},normalize:function(){return this.divideScalar(this.length()||1)},setLength:function(a){return this.normalize().multiplyScalar(a)},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;this.z+=(a.z-this.z)*b;this.w+=(a.w-this.w)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},equals:function(a){return a.x===this.x&&a.y===this.y&&a.z===this.z&&a.w===this.w},fromArray:function(a,b){void 0===
+b&&(b=0);this.x=a[b];this.y=a[b+1];this.z=a[b+2];this.w=a[b+3];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;a[b+2]=this.z;a[b+3]=this.w;return a},fromBufferAttribute:function(a,b,c){void 0!==c&&console.warn("THREE.Vector4: offset has been removed from .fromBufferAttribute().");this.x=a.getX(b);this.y=a.getY(b);this.z=a.getZ(b);this.w=a.getW(b);return this}});kb.prototype=Object.assign(Object.create(ia.prototype),{constructor:kb,isWebGLRenderTarget:!0,
+setSize:function(a,b){if(this.width!==a||this.height!==b)this.width=a,this.height=b,this.dispose();this.viewport.set(0,0,a,b);this.scissor.set(0,0,a,b)},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.width=a.width;this.height=a.height;this.viewport.copy(a.viewport);this.texture=a.texture.clone();this.depthBuffer=a.depthBuffer;this.stencilBuffer=a.stencilBuffer;this.depthTexture=a.depthTexture;return this},dispose:function(){this.dispatchEvent({type:"dispose"})}});
+Jb.prototype=Object.create(kb.prototype);Jb.prototype.constructor=Jb;Jb.prototype.isWebGLRenderTargetCube=!0;lb.prototype=Object.create(W.prototype);lb.prototype.constructor=lb;lb.prototype.isDataTexture=!0;Object.assign(Wa.prototype,{isBox3:!0,set:function(a,b){this.min.copy(a);this.max.copy(b);return this},setFromArray:function(a){for(var b=Infinity,c=Infinity,d=Infinity,e=-Infinity,f=-Infinity,g=-Infinity,h=0,k=a.length;h<k;h+=3){var m=a[h],l=a[h+1],n=a[h+2];m<b&&(b=m);l<c&&(c=l);n<d&&(d=n);m>
+e&&(e=m);l>f&&(f=l);n>g&&(g=n)}this.min.set(b,c,d);this.max.set(e,f,g);return this},setFromBufferAttribute:function(a){for(var b=Infinity,c=Infinity,d=Infinity,e=-Infinity,f=-Infinity,g=-Infinity,h=0,k=a.count;h<k;h++){var m=a.getX(h),l=a.getY(h),n=a.getZ(h);m<b&&(b=m);l<c&&(c=l);n<d&&(d=n);m>e&&(e=m);l>f&&(f=l);n>g&&(g=n)}this.min.set(b,c,d);this.max.set(e,f,g);return this},setFromPoints:function(a){this.makeEmpty();for(var b=0,c=a.length;b<c;b++)this.expandByPoint(a[b]);return this},setFromCenterAndSize:function(){var a=
+new p;return function(b,c){c=a.copy(c).multiplyScalar(.5);this.min.copy(b).sub(c);this.max.copy(b).add(c);return this}}(),setFromObject:function(a){this.makeEmpty();return this.expandByObject(a)},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.min.copy(a.min);this.max.copy(a.max);return this},makeEmpty:function(){this.min.x=this.min.y=this.min.z=Infinity;this.max.x=this.max.y=this.max.z=-Infinity;return this},isEmpty:function(){return this.max.x<this.min.x||this.max.y<
+this.min.y||this.max.z<this.min.z},getCenter:function(a){void 0===a&&(console.warn("THREE.Box3: .getCenter() target is now required"),a=new p);return this.isEmpty()?a.set(0,0,0):a.addVectors(this.min,this.max).multiplyScalar(.5)},getSize:function(a){void 0===a&&(console.warn("THREE.Box3: .getSize() target is now required"),a=new p);return this.isEmpty()?a.set(0,0,0):a.subVectors(this.max,this.min)},expandByPoint:function(a){this.min.min(a);this.max.max(a);return this},expandByVector:function(a){this.min.sub(a);
+this.max.add(a);return this},expandByScalar:function(a){this.min.addScalar(-a);this.max.addScalar(a);return this},expandByObject:function(){function a(a){var f=a.geometry;if(void 0!==f)if(f.isGeometry)for(f=f.vertices,c=0,d=f.length;c<d;c++)e.copy(f[c]),e.applyMatrix4(a.matrixWorld),b.expandByPoint(e);else if(f.isBufferGeometry&&(f=f.attributes.position,void 0!==f))for(c=0,d=f.count;c<d;c++)e.fromBufferAttribute(f,c).applyMatrix4(a.matrixWorld),b.expandByPoint(e)}var b,c,d,e=new p;return function(c){b=
+this;c.updateMatrixWorld(!0);c.traverse(a);return this}}(),containsPoint:function(a){return a.x<this.min.x||a.x>this.max.x||a.y<this.min.y||a.y>this.max.y||a.z<this.min.z||a.z>this.max.z?!1:!0},containsBox:function(a){return this.min.x<=a.min.x&&a.max.x<=this.max.x&&this.min.y<=a.min.y&&a.max.y<=this.max.y&&this.min.z<=a.min.z&&a.max.z<=this.max.z},getParameter:function(a,b){void 0===b&&(console.warn("THREE.Box3: .getParameter() target is now required"),b=new p);return b.set((a.x-this.min.x)/(this.max.x-
+this.min.x),(a.y-this.min.y)/(this.max.y-this.min.y),(a.z-this.min.z)/(this.max.z-this.min.z))},intersectsBox:function(a){return a.max.x<this.min.x||a.min.x>this.max.x||a.max.y<this.min.y||a.min.y>this.max.y||a.max.z<this.min.z||a.min.z>this.max.z?!1:!0},intersectsSphere:function(){var a=new p;return function(b){this.clampPoint(b.center,a);return a.distanceToSquared(b.center)<=b.radius*b.radius}}(),intersectsPlane:function(a){if(0<a.normal.x){var b=a.normal.x*this.min.x;var c=a.normal.x*this.max.x}else b=
+a.normal.x*this.max.x,c=a.normal.x*this.min.x;0<a.normal.y?(b+=a.normal.y*this.min.y,c+=a.normal.y*this.max.y):(b+=a.normal.y*this.max.y,c+=a.normal.y*this.min.y);0<a.normal.z?(b+=a.normal.z*this.min.z,c+=a.normal.z*this.max.z):(b+=a.normal.z*this.max.z,c+=a.normal.z*this.min.z);return b<=-a.constant&&c>=-a.constant},intersectsTriangle:function(){function a(a){var e;var f=0;for(e=a.length-3;f<=e;f+=3){h.fromArray(a,f);var g=m.x*Math.abs(h.x)+m.y*Math.abs(h.y)+m.z*Math.abs(h.z),k=b.dot(h),l=c.dot(h),
+q=d.dot(h);if(Math.max(-Math.max(k,l,q),Math.min(k,l,q))>g)return!1}return!0}var b=new p,c=new p,d=new p,e=new p,f=new p,g=new p,h=new p,k=new p,m=new p,l=new p;return function(h){if(this.isEmpty())return!1;this.getCenter(k);m.subVectors(this.max,k);b.subVectors(h.a,k);c.subVectors(h.b,k);d.subVectors(h.c,k);e.subVectors(c,b);f.subVectors(d,c);g.subVectors(b,d);h=[0,-e.z,e.y,0,-f.z,f.y,0,-g.z,g.y,e.z,0,-e.x,f.z,0,-f.x,g.z,0,-g.x,-e.y,e.x,0,-f.y,f.x,0,-g.y,g.x,0];if(!a(h))return!1;h=[1,0,0,0,1,0,0,
+0,1];if(!a(h))return!1;l.crossVectors(e,f);h=[l.x,l.y,l.z];return a(h)}}(),clampPoint:function(a,b){void 0===b&&(console.warn("THREE.Box3: .clampPoint() target is now required"),b=new p);return b.copy(a).clamp(this.min,this.max)},distanceToPoint:function(){var a=new p;return function(b){return a.copy(b).clamp(this.min,this.max).sub(b).length()}}(),getBoundingSphere:function(){var a=new p;return function(b){void 0===b&&(console.warn("THREE.Box3: .getBoundingSphere() target is now required"),b=new Ga);
+this.getCenter(b.center);b.radius=.5*this.getSize(a).length();return b}}(),intersect:function(a){this.min.max(a.min);this.max.min(a.max);this.isEmpty()&&this.makeEmpty();return this},union:function(a){this.min.min(a.min);this.max.max(a.max);return this},applyMatrix4:function(){var a=[new p,new p,new p,new p,new p,new p,new p,new p];return function(b){if(this.isEmpty())return this;a[0].set(this.min.x,this.min.y,this.min.z).applyMatrix4(b);a[1].set(this.min.x,this.min.y,this.max.z).applyMatrix4(b);
+a[2].set(this.min.x,this.max.y,this.min.z).applyMatrix4(b);a[3].set(this.min.x,this.max.y,this.max.z).applyMatrix4(b);a[4].set(this.max.x,this.min.y,this.min.z).applyMatrix4(b);a[5].set(this.max.x,this.min.y,this.max.z).applyMatrix4(b);a[6].set(this.max.x,this.max.y,this.min.z).applyMatrix4(b);a[7].set(this.max.x,this.max.y,this.max.z).applyMatrix4(b);this.setFromPoints(a);return this}}(),translate:function(a){this.min.add(a);this.max.add(a);return this},equals:function(a){return a.min.equals(this.min)&&
+a.max.equals(this.max)}});Object.assign(Ga.prototype,{set:function(a,b){this.center.copy(a);this.radius=b;return this},setFromPoints:function(){var a=new Wa;return function(b,c){var d=this.center;void 0!==c?d.copy(c):a.setFromPoints(b).getCenter(d);for(var e=c=0,f=b.length;e<f;e++)c=Math.max(c,d.distanceToSquared(b[e]));this.radius=Math.sqrt(c);return this}}(),clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.center.copy(a.center);this.radius=a.radius;return this},empty:function(){return 0>=
+this.radius},containsPoint:function(a){return a.distanceToSquared(this.center)<=this.radius*this.radius},distanceToPoint:function(a){return a.distanceTo(this.center)-this.radius},intersectsSphere:function(a){var b=this.radius+a.radius;return a.center.distanceToSquared(this.center)<=b*b},intersectsBox:function(a){return a.intersectsSphere(this)},intersectsPlane:function(a){return Math.abs(a.distanceToPoint(this.center))<=this.radius},clampPoint:function(a,b){var c=this.center.distanceToSquared(a);
+void 0===b&&(console.warn("THREE.Sphere: .clampPoint() target is now required"),b=new p);b.copy(a);c>this.radius*this.radius&&(b.sub(this.center).normalize(),b.multiplyScalar(this.radius).add(this.center));return b},getBoundingBox:function(a){void 0===a&&(console.warn("THREE.Sphere: .getBoundingBox() target is now required"),a=new Wa);a.set(this.center,this.center);a.expandByScalar(this.radius);return a},applyMatrix4:function(a){this.center.applyMatrix4(a);this.radius*=a.getMaxScaleOnAxis();return this},
+translate:function(a){this.center.add(a);return this},equals:function(a){return a.center.equals(this.center)&&a.radius===this.radius}});Object.assign(Pa.prototype,{set:function(a,b){this.normal.copy(a);this.constant=b;return this},setComponents:function(a,b,c,d){this.normal.set(a,b,c);this.constant=d;return this},setFromNormalAndCoplanarPoint:function(a,b){this.normal.copy(a);this.constant=-b.dot(this.normal);return this},setFromCoplanarPoints:function(){var a=new p,b=new p;return function(c,d,e){d=
+a.subVectors(e,d).cross(b.subVectors(c,d)).normalize();this.setFromNormalAndCoplanarPoint(d,c);return this}}(),clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.normal.copy(a.normal);this.constant=a.constant;return this},normalize:function(){var a=1/this.normal.length();this.normal.multiplyScalar(a);this.constant*=a;return this},negate:function(){this.constant*=-1;this.normal.negate();return this},distanceToPoint:function(a){return this.normal.dot(a)+this.constant},distanceToSphere:function(a){return this.distanceToPoint(a.center)-
+a.radius},projectPoint:function(a,b){void 0===b&&(console.warn("THREE.Plane: .projectPoint() target is now required"),b=new p);return b.copy(this.normal).multiplyScalar(-this.distanceToPoint(a)).add(a)},intersectLine:function(){var a=new p;return function(b,c){void 0===c&&(console.warn("THREE.Plane: .intersectLine() target is now required"),c=new p);var d=b.delta(a),e=this.normal.dot(d);if(0===e){if(0===this.distanceToPoint(b.start))return c.copy(b.start)}else if(e=-(b.start.dot(this.normal)+this.constant)/
+e,!(0>e||1<e))return c.copy(d).multiplyScalar(e).add(b.start)}}(),intersectsLine:function(a){var b=this.distanceToPoint(a.start);a=this.distanceToPoint(a.end);return 0>b&&0<a||0>a&&0<b},intersectsBox:function(a){return a.intersectsPlane(this)},intersectsSphere:function(a){return a.intersectsPlane(this)},coplanarPoint:function(a){void 0===a&&(console.warn("THREE.Plane: .coplanarPoint() target is now required"),a=new p);return a.copy(this.normal).multiplyScalar(-this.constant)},applyMatrix4:function(){var a=
+new p,b=new da;return function(c,d){d=d||b.getNormalMatrix(c);c=this.coplanarPoint(a).applyMatrix4(c);d=this.normal.applyMatrix3(d).normalize();this.constant=-c.dot(d);return this}}(),translate:function(a){this.constant-=a.dot(this.normal);return this},equals:function(a){return a.normal.equals(this.normal)&&a.constant===this.constant}});Object.assign(rd.prototype,{set:function(a,b,c,d,e,f){var g=this.planes;g[0].copy(a);g[1].copy(b);g[2].copy(c);g[3].copy(d);g[4].copy(e);g[5].copy(f);return this},
+clone:function(){return(new this.constructor).copy(this)},copy:function(a){for(var b=this.planes,c=0;6>c;c++)b[c].copy(a.planes[c]);return this},setFromMatrix:function(a){var b=this.planes,c=a.elements;a=c[0];var d=c[1],e=c[2],f=c[3],g=c[4],h=c[5],k=c[6],m=c[7],l=c[8],n=c[9],r=c[10],p=c[11],t=c[12],u=c[13],w=c[14];c=c[15];b[0].setComponents(f-a,m-g,p-l,c-t).normalize();b[1].setComponents(f+a,m+g,p+l,c+t).normalize();b[2].setComponents(f+d,m+h,p+n,c+u).normalize();b[3].setComponents(f-d,m-h,p-n,c-
+u).normalize();b[4].setComponents(f-e,m-k,p-r,c-w).normalize();b[5].setComponents(f+e,m+k,p+r,c+w).normalize();return this},intersectsObject:function(){var a=new Ga;return function(b){var c=b.geometry;null===c.boundingSphere&&c.computeBoundingSphere();a.copy(c.boundingSphere).applyMatrix4(b.matrixWorld);return this.intersectsSphere(a)}}(),intersectsSprite:function(){var a=new Ga;return function(b){a.center.set(0,0,0);a.radius=.7071067811865476;a.applyMatrix4(b.matrixWorld);return this.intersectsSphere(a)}}(),
+intersectsSphere:function(a){var b=this.planes,c=a.center;a=-a.radius;for(var d=0;6>d;d++)if(b[d].distanceToPoint(c)<a)return!1;return!0},intersectsBox:function(){var a=new p;return function(b){for(var c=this.planes,d=0;6>d;d++){var e=c[d];a.x=0<e.normal.x?b.max.x:b.min.x;a.y=0<e.normal.y?b.max.y:b.min.y;a.z=0<e.normal.z?b.max.z:b.min.z;if(0>e.distanceToPoint(a))return!1}return!0}}(),containsPoint:function(a){for(var b=this.planes,c=0;6>c;c++)if(0>b[c].distanceToPoint(a))return!1;return!0}});var K=
+{alphamap_fragment:"#ifdef USE_ALPHAMAP\n\tdiffuseColor.a *= texture2D( alphaMap, vUv ).g;\n#endif\n",alphamap_pars_fragment:"#ifdef USE_ALPHAMAP\n\tuniform sampler2D alphaMap;\n#endif\n",alphatest_fragment:"#ifdef ALPHATEST\n\tif ( diffuseColor.a < ALPHATEST ) discard;\n#endif\n",aomap_fragment:"#ifdef USE_AOMAP\n\tfloat ambientOcclusion = ( texture2D( aoMap, vUv2 ).r - 1.0 ) * aoMapIntensity + 1.0;\n\treflectedLight.indirectDiffuse *= ambientOcclusion;\n\t#if defined( USE_ENVMAP ) && defined( PHYSICAL )\n\t\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\t\treflectedLight.indirectSpecular *= computeSpecularOcclusion( dotNV, ambientOcclusion, material.specularRoughness );\n\t#endif\n#endif\n",
+aomap_pars_fragment:"#ifdef USE_AOMAP\n\tuniform sampler2D aoMap;\n\tuniform float aoMapIntensity;\n#endif",begin_vertex:"\nvec3 transformed = vec3( position );\n",beginnormal_vertex:"\nvec3 objectNormal = vec3( normal );\n",bsdfs:"float punctualLightIntensityToIrradianceFactor( const in float lightDistance, const in float cutoffDistance, const in float decayExponent ) {\n#if defined ( PHYSICALLY_CORRECT_LIGHTS )\n\tfloat distanceFalloff = 1.0 / max( pow( lightDistance, decayExponent ), 0.01 );\n\tif( cutoffDistance > 0.0 ) {\n\t\tdistanceFalloff *= pow2( saturate( 1.0 - pow4( lightDistance / cutoffDistance ) ) );\n\t}\n\treturn distanceFalloff;\n#else\n\tif( cutoffDistance > 0.0 && decayExponent > 0.0 ) {\n\t\treturn pow( saturate( -lightDistance / cutoffDistance + 1.0 ), decayExponent );\n\t}\n\treturn 1.0;\n#endif\n}\nvec3 BRDF_Diffuse_Lambert( const in vec3 diffuseColor ) {\n\treturn RECIPROCAL_PI * diffuseColor;\n}\nvec3 F_Schlick( const in vec3 specularColor, const in float dotLH ) {\n\tfloat fresnel = exp2( ( -5.55473 * dotLH - 6.98316 ) * dotLH );\n\treturn ( 1.0 - specularColor ) * fresnel + specularColor;\n}\nfloat G_GGX_Smith( const in float alpha, const in float dotNL, const in float dotNV ) {\n\tfloat a2 = pow2( alpha );\n\tfloat gl = dotNL + sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNL ) );\n\tfloat gv = dotNV + sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNV ) );\n\treturn 1.0 / ( gl * gv );\n}\nfloat G_GGX_SmithCorrelated( const in float alpha, const in float dotNL, const in float dotNV ) {\n\tfloat a2 = pow2( alpha );\n\tfloat gv = dotNL * sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNV ) );\n\tfloat gl = dotNV * sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNL ) );\n\treturn 0.5 / max( gv + gl, EPSILON );\n}\nfloat D_GGX( const in float alpha, const in float dotNH ) {\n\tfloat a2 = pow2( alpha );\n\tfloat denom = pow2( dotNH ) * ( a2 - 1.0 ) + 1.0;\n\treturn RECIPROCAL_PI * a2 / pow2( denom );\n}\nvec3 BRDF_Specular_GGX( const in IncidentLight incidentLight, const in GeometricContext geometry, const in vec3 specularColor, const in float roughness ) {\n\tfloat alpha = pow2( roughness );\n\tvec3 halfDir = normalize( incidentLight.direction + geometry.viewDir );\n\tfloat dotNL = saturate( dot( geometry.normal, incidentLight.direction ) );\n\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\tfloat dotNH = saturate( dot( geometry.normal, halfDir ) );\n\tfloat dotLH = saturate( dot( incidentLight.direction, halfDir ) );\n\tvec3 F = F_Schlick( specularColor, dotLH );\n\tfloat G = G_GGX_SmithCorrelated( alpha, dotNL, dotNV );\n\tfloat D = D_GGX( alpha, dotNH );\n\treturn F * ( G * D );\n}\nvec2 LTC_Uv( const in vec3 N, const in vec3 V, const in float roughness ) {\n\tconst float LUT_SIZE = 64.0;\n\tconst float LUT_SCALE = ( LUT_SIZE - 1.0 ) / LUT_SIZE;\n\tconst float LUT_BIAS = 0.5 / LUT_SIZE;\n\tfloat dotNV = saturate( dot( N, V ) );\n\tvec2 uv = vec2( roughness, sqrt( 1.0 - dotNV ) );\n\tuv = uv * LUT_SCALE + LUT_BIAS;\n\treturn uv;\n}\nfloat LTC_ClippedSphereFormFactor( const in vec3 f ) {\n\tfloat l = length( f );\n\treturn max( ( l * l + f.z ) / ( l + 1.0 ), 0.0 );\n}\nvec3 LTC_EdgeVectorFormFactor( const in vec3 v1, const in vec3 v2 ) {\n\tfloat x = dot( v1, v2 );\n\tfloat y = abs( x );\n\tfloat a = 0.8543985 + ( 0.4965155 + 0.0145206 * y ) * y;\n\tfloat b = 3.4175940 + ( 4.1616724 + y ) * y;\n\tfloat v = a / b;\n\tfloat theta_sintheta = ( x > 0.0 ) ? v : 0.5 * inversesqrt( max( 1.0 - x * x, 1e-7 ) ) - v;\n\treturn cross( v1, v2 ) * theta_sintheta;\n}\nvec3 LTC_Evaluate( const in vec3 N, const in vec3 V, const in vec3 P, const in mat3 mInv, const in vec3 rectCoords[ 4 ] ) {\n\tvec3 v1 = rectCoords[ 1 ] - rectCoords[ 0 ];\n\tvec3 v2 = rectCoords[ 3 ] - rectCoords[ 0 ];\n\tvec3 lightNormal = cross( v1, v2 );\n\tif( dot( lightNormal, P - rectCoords[ 0 ] ) < 0.0 ) return vec3( 0.0 );\n\tvec3 T1, T2;\n\tT1 = normalize( V - N * dot( V, N ) );\n\tT2 = - cross( N, T1 );\n\tmat3 mat = mInv * transposeMat3( mat3( T1, T2, N ) );\n\tvec3 coords[ 4 ];\n\tcoords[ 0 ] = mat * ( rectCoords[ 0 ] - P );\n\tcoords[ 1 ] = mat * ( rectCoords[ 1 ] - P );\n\tcoords[ 2 ] = mat * ( rectCoords[ 2 ] - P );\n\tcoords[ 3 ] = mat * ( rectCoords[ 3 ] - P );\n\tcoords[ 0 ] = normalize( coords[ 0 ] );\n\tcoords[ 1 ] = normalize( coords[ 1 ] );\n\tcoords[ 2 ] = normalize( coords[ 2 ] );\n\tcoords[ 3 ] = normalize( coords[ 3 ] );\n\tvec3 vectorFormFactor = vec3( 0.0 );\n\tvectorFormFactor += LTC_EdgeVectorFormFactor( coords[ 0 ], coords[ 1 ] );\n\tvectorFormFactor += LTC_EdgeVectorFormFactor( coords[ 1 ], coords[ 2 ] );\n\tvectorFormFactor += LTC_EdgeVectorFormFactor( coords[ 2 ], coords[ 3 ] );\n\tvectorFormFactor += LTC_EdgeVectorFormFactor( coords[ 3 ], coords[ 0 ] );\n\tfloat result = LTC_ClippedSphereFormFactor( vectorFormFactor );\n\treturn vec3( result );\n}\nvec3 BRDF_Specular_GGX_Environment( const in GeometricContext geometry, const in vec3 specularColor, const in float roughness ) {\n\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\tconst vec4 c0 = vec4( - 1, - 0.0275, - 0.572, 0.022 );\n\tconst vec4 c1 = vec4( 1, 0.0425, 1.04, - 0.04 );\n\tvec4 r = roughness * c0 + c1;\n\tfloat a004 = min( r.x * r.x, exp2( - 9.28 * dotNV ) ) * r.x + r.y;\n\tvec2 AB = vec2( -1.04, 1.04 ) * a004 + r.zw;\n\treturn specularColor * AB.x + AB.y;\n}\nfloat G_BlinnPhong_Implicit( ) {\n\treturn 0.25;\n}\nfloat D_BlinnPhong( const in float shininess, const in float dotNH ) {\n\treturn RECIPROCAL_PI * ( shininess * 0.5 + 1.0 ) * pow( dotNH, shininess );\n}\nvec3 BRDF_Specular_BlinnPhong( const in IncidentLight incidentLight, const in GeometricContext geometry, const in vec3 specularColor, const in float shininess ) {\n\tvec3 halfDir = normalize( incidentLight.direction + geometry.viewDir );\n\tfloat dotNH = saturate( dot( geometry.normal, halfDir ) );\n\tfloat dotLH = saturate( dot( incidentLight.direction, halfDir ) );\n\tvec3 F = F_Schlick( specularColor, dotLH );\n\tfloat G = G_BlinnPhong_Implicit( );\n\tfloat D = D_BlinnPhong( shininess, dotNH );\n\treturn F * ( G * D );\n}\nfloat GGXRoughnessToBlinnExponent( const in float ggxRoughness ) {\n\treturn ( 2.0 / pow2( ggxRoughness + 0.0001 ) - 2.0 );\n}\nfloat BlinnExponentToGGXRoughness( const in float blinnExponent ) {\n\treturn sqrt( 2.0 / ( blinnExponent + 2.0 ) );\n}\n",
+bumpmap_pars_fragment:"#ifdef USE_BUMPMAP\n\tuniform sampler2D bumpMap;\n\tuniform float bumpScale;\n\tvec2 dHdxy_fwd() {\n\t\tvec2 dSTdx = dFdx( vUv );\n\t\tvec2 dSTdy = dFdy( vUv );\n\t\tfloat Hll = bumpScale * texture2D( bumpMap, vUv ).x;\n\t\tfloat dBx = bumpScale * texture2D( bumpMap, vUv + dSTdx ).x - Hll;\n\t\tfloat dBy = bumpScale * texture2D( bumpMap, vUv + dSTdy ).x - Hll;\n\t\treturn vec2( dBx, dBy );\n\t}\n\tvec3 perturbNormalArb( vec3 surf_pos, vec3 surf_norm, vec2 dHdxy ) {\n\t\tvec3 vSigmaX = vec3( dFdx( surf_pos.x ), dFdx( surf_pos.y ), dFdx( surf_pos.z ) );\n\t\tvec3 vSigmaY = vec3( dFdy( surf_pos.x ), dFdy( surf_pos.y ), dFdy( surf_pos.z ) );\n\t\tvec3 vN = surf_norm;\n\t\tvec3 R1 = cross( vSigmaY, vN );\n\t\tvec3 R2 = cross( vN, vSigmaX );\n\t\tfloat fDet = dot( vSigmaX, R1 );\n\t\tfDet *= ( float( gl_FrontFacing ) * 2.0 - 1.0 );\n\t\tvec3 vGrad = sign( fDet ) * ( dHdxy.x * R1 + dHdxy.y * R2 );\n\t\treturn normalize( abs( fDet ) * surf_norm - vGrad );\n\t}\n#endif\n",
+clipping_planes_fragment:"#if NUM_CLIPPING_PLANES > 0\n\tvec4 plane;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < UNION_CLIPPING_PLANES; i ++ ) {\n\t\tplane = clippingPlanes[ i ];\n\t\tif ( dot( vViewPosition, plane.xyz ) > plane.w ) discard;\n\t}\n\t#if UNION_CLIPPING_PLANES < NUM_CLIPPING_PLANES\n\t\tbool clipped = true;\n\t\t#pragma unroll_loop\n\t\tfor ( int i = UNION_CLIPPING_PLANES; i < NUM_CLIPPING_PLANES; i ++ ) {\n\t\t\tplane = clippingPlanes[ i ];\n\t\t\tclipped = ( dot( vViewPosition, plane.xyz ) > plane.w ) && clipped;\n\t\t}\n\t\tif ( clipped ) discard;\n\t#endif\n#endif\n",
+clipping_planes_pars_fragment:"#if NUM_CLIPPING_PLANES > 0\n\t#if ! defined( PHYSICAL ) && ! defined( PHONG ) && ! defined( MATCAP )\n\t\tvarying vec3 vViewPosition;\n\t#endif\n\tuniform vec4 clippingPlanes[ NUM_CLIPPING_PLANES ];\n#endif\n",clipping_planes_pars_vertex:"#if NUM_CLIPPING_PLANES > 0 && ! defined( PHYSICAL ) && ! defined( PHONG ) && ! defined( MATCAP )\n\tvarying vec3 vViewPosition;\n#endif\n",clipping_planes_vertex:"#if NUM_CLIPPING_PLANES > 0 && ! defined( PHYSICAL ) && ! defined( PHONG ) && ! defined( MATCAP )\n\tvViewPosition = - mvPosition.xyz;\n#endif\n",
+color_fragment:"#ifdef USE_COLOR\n\tdiffuseColor.rgb *= vColor;\n#endif",color_pars_fragment:"#ifdef USE_COLOR\n\tvarying vec3 vColor;\n#endif\n",color_pars_vertex:"#ifdef USE_COLOR\n\tvarying vec3 vColor;\n#endif",color_vertex:"#ifdef USE_COLOR\n\tvColor.xyz = color.xyz;\n#endif",common:"#define PI 3.14159265359\n#define PI2 6.28318530718\n#define PI_HALF 1.5707963267949\n#define RECIPROCAL_PI 0.31830988618\n#define RECIPROCAL_PI2 0.15915494\n#define LOG2 1.442695\n#define EPSILON 1e-6\n#define saturate(a) clamp( a, 0.0, 1.0 )\n#define whiteCompliment(a) ( 1.0 - saturate( a ) )\nfloat pow2( const in float x ) { return x*x; }\nfloat pow3( const in float x ) { return x*x*x; }\nfloat pow4( const in float x ) { float x2 = x*x; return x2*x2; }\nfloat average( const in vec3 color ) { return dot( color, vec3( 0.3333 ) ); }\nhighp float rand( const in vec2 uv ) {\n\tconst highp float a = 12.9898, b = 78.233, c = 43758.5453;\n\thighp float dt = dot( uv.xy, vec2( a,b ) ), sn = mod( dt, PI );\n\treturn fract(sin(sn) * c);\n}\nstruct IncidentLight {\n\tvec3 color;\n\tvec3 direction;\n\tbool visible;\n};\nstruct ReflectedLight {\n\tvec3 directDiffuse;\n\tvec3 directSpecular;\n\tvec3 indirectDiffuse;\n\tvec3 indirectSpecular;\n};\nstruct GeometricContext {\n\tvec3 position;\n\tvec3 normal;\n\tvec3 viewDir;\n};\nvec3 transformDirection( in vec3 dir, in mat4 matrix ) {\n\treturn normalize( ( matrix * vec4( dir, 0.0 ) ).xyz );\n}\nvec3 inverseTransformDirection( in vec3 dir, in mat4 matrix ) {\n\treturn normalize( ( vec4( dir, 0.0 ) * matrix ).xyz );\n}\nvec3 projectOnPlane(in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\tfloat distance = dot( planeNormal, point - pointOnPlane );\n\treturn - distance * planeNormal + point;\n}\nfloat sideOfPlane( in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\treturn sign( dot( point - pointOnPlane, planeNormal ) );\n}\nvec3 linePlaneIntersect( in vec3 pointOnLine, in vec3 lineDirection, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\treturn lineDirection * ( dot( planeNormal, pointOnPlane - pointOnLine ) / dot( planeNormal, lineDirection ) ) + pointOnLine;\n}\nmat3 transposeMat3( const in mat3 m ) {\n\tmat3 tmp;\n\ttmp[ 0 ] = vec3( m[ 0 ].x, m[ 1 ].x, m[ 2 ].x );\n\ttmp[ 1 ] = vec3( m[ 0 ].y, m[ 1 ].y, m[ 2 ].y );\n\ttmp[ 2 ] = vec3( m[ 0 ].z, m[ 1 ].z, m[ 2 ].z );\n\treturn tmp;\n}\nfloat linearToRelativeLuminance( const in vec3 color ) {\n\tvec3 weights = vec3( 0.2126, 0.7152, 0.0722 );\n\treturn dot( weights, color.rgb );\n}\n",
+cube_uv_reflection_fragment:"#ifdef ENVMAP_TYPE_CUBE_UV\n#define cubeUV_textureSize (1024.0)\nint getFaceFromDirection(vec3 direction) {\n\tvec3 absDirection = abs(direction);\n\tint face = -1;\n\tif( absDirection.x > absDirection.z ) {\n\t\tif(absDirection.x > absDirection.y )\n\t\t\tface = direction.x > 0.0 ? 0 : 3;\n\t\telse\n\t\t\tface = direction.y > 0.0 ? 1 : 4;\n\t}\n\telse {\n\t\tif(absDirection.z > absDirection.y )\n\t\t\tface = direction.z > 0.0 ? 2 : 5;\n\t\telse\n\t\t\tface = direction.y > 0.0 ? 1 : 4;\n\t}\n\treturn face;\n}\n#define cubeUV_maxLods1 (log2(cubeUV_textureSize*0.25) - 1.0)\n#define cubeUV_rangeClamp (exp2((6.0 - 1.0) * 2.0))\nvec2 MipLevelInfo( vec3 vec, float roughnessLevel, float roughness ) {\n\tfloat scale = exp2(cubeUV_maxLods1 - roughnessLevel);\n\tfloat dxRoughness = dFdx(roughness);\n\tfloat dyRoughness = dFdy(roughness);\n\tvec3 dx = dFdx( vec * scale * dxRoughness );\n\tvec3 dy = dFdy( vec * scale * dyRoughness );\n\tfloat d = max( dot( dx, dx ), dot( dy, dy ) );\n\td = clamp(d, 1.0, cubeUV_rangeClamp);\n\tfloat mipLevel = 0.5 * log2(d);\n\treturn vec2(floor(mipLevel), fract(mipLevel));\n}\n#define cubeUV_maxLods2 (log2(cubeUV_textureSize*0.25) - 2.0)\n#define cubeUV_rcpTextureSize (1.0 / cubeUV_textureSize)\nvec2 getCubeUV(vec3 direction, float roughnessLevel, float mipLevel) {\n\tmipLevel = roughnessLevel > cubeUV_maxLods2 - 3.0 ? 0.0 : mipLevel;\n\tfloat a = 16.0 * cubeUV_rcpTextureSize;\n\tvec2 exp2_packed = exp2( vec2( roughnessLevel, mipLevel ) );\n\tvec2 rcp_exp2_packed = vec2( 1.0 ) / exp2_packed;\n\tfloat powScale = exp2_packed.x * exp2_packed.y;\n\tfloat scale = rcp_exp2_packed.x * rcp_exp2_packed.y * 0.25;\n\tfloat mipOffset = 0.75*(1.0 - rcp_exp2_packed.y) * rcp_exp2_packed.x;\n\tbool bRes = mipLevel == 0.0;\n\tscale = bRes && (scale < a) ? a : scale;\n\tvec3 r;\n\tvec2 offset;\n\tint face = getFaceFromDirection(direction);\n\tfloat rcpPowScale = 1.0 / powScale;\n\tif( face == 0) {\n\t\tr = vec3(direction.x, -direction.z, direction.y);\n\t\toffset = vec2(0.0+mipOffset,0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 1) {\n\t\tr = vec3(direction.y, direction.x, direction.z);\n\t\toffset = vec2(scale+mipOffset, 0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 2) {\n\t\tr = vec3(direction.z, direction.x, direction.y);\n\t\toffset = vec2(2.0*scale+mipOffset, 0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 3) {\n\t\tr = vec3(direction.x, direction.z, direction.y);\n\t\toffset = vec2(0.0+mipOffset,0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\telse if( face == 4) {\n\t\tr = vec3(direction.y, direction.x, -direction.z);\n\t\toffset = vec2(scale+mipOffset, 0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\telse {\n\t\tr = vec3(direction.z, -direction.x, direction.y);\n\t\toffset = vec2(2.0*scale+mipOffset, 0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\tr = normalize(r);\n\tfloat texelOffset = 0.5 * cubeUV_rcpTextureSize;\n\tvec2 s = ( r.yz / abs( r.x ) + vec2( 1.0 ) ) * 0.5;\n\tvec2 base = offset + vec2( texelOffset );\n\treturn base + s * ( scale - 2.0 * texelOffset );\n}\n#define cubeUV_maxLods3 (log2(cubeUV_textureSize*0.25) - 3.0)\nvec4 textureCubeUV( sampler2D envMap, vec3 reflectedDirection, float roughness ) {\n\tfloat roughnessVal = roughness* cubeUV_maxLods3;\n\tfloat r1 = floor(roughnessVal);\n\tfloat r2 = r1 + 1.0;\n\tfloat t = fract(roughnessVal);\n\tvec2 mipInfo = MipLevelInfo(reflectedDirection, r1, roughness);\n\tfloat s = mipInfo.y;\n\tfloat level0 = mipInfo.x;\n\tfloat level1 = level0 + 1.0;\n\tlevel1 = level1 > 5.0 ? 5.0 : level1;\n\tlevel0 += min( floor( s + 0.5 ), 5.0 );\n\tvec2 uv_10 = getCubeUV(reflectedDirection, r1, level0);\n\tvec4 color10 = envMapTexelToLinear(texture2D(envMap, uv_10));\n\tvec2 uv_20 = getCubeUV(reflectedDirection, r2, level0);\n\tvec4 color20 = envMapTexelToLinear(texture2D(envMap, uv_20));\n\tvec4 result = mix(color10, color20, t);\n\treturn vec4(result.rgb, 1.0);\n}\n#endif\n",
+defaultnormal_vertex:"vec3 transformedNormal = normalMatrix * objectNormal;\n#ifdef FLIP_SIDED\n\ttransformedNormal = - transformedNormal;\n#endif\n",displacementmap_pars_vertex:"#ifdef USE_DISPLACEMENTMAP\n\tuniform sampler2D displacementMap;\n\tuniform float displacementScale;\n\tuniform float displacementBias;\n#endif\n",displacementmap_vertex:"#ifdef USE_DISPLACEMENTMAP\n\ttransformed += normalize( objectNormal ) * ( texture2D( displacementMap, uv ).x * displacementScale + displacementBias );\n#endif\n",
+emissivemap_fragment:"#ifdef USE_EMISSIVEMAP\n\tvec4 emissiveColor = texture2D( emissiveMap, vUv );\n\temissiveColor.rgb = emissiveMapTexelToLinear( emissiveColor ).rgb;\n\ttotalEmissiveRadiance *= emissiveColor.rgb;\n#endif\n",emissivemap_pars_fragment:"#ifdef USE_EMISSIVEMAP\n\tuniform sampler2D emissiveMap;\n#endif\n",encodings_fragment:" gl_FragColor = linearToOutputTexel( gl_FragColor );\n",encodings_pars_fragment:"\nvec4 LinearToLinear( in vec4 value ) {\n\treturn value;\n}\nvec4 GammaToLinear( in vec4 value, in float gammaFactor ) {\n\treturn vec4( pow( value.rgb, vec3( gammaFactor ) ), value.a );\n}\nvec4 LinearToGamma( in vec4 value, in float gammaFactor ) {\n\treturn vec4( pow( value.rgb, vec3( 1.0 / gammaFactor ) ), value.a );\n}\nvec4 sRGBToLinear( in vec4 value ) {\n\treturn vec4( mix( pow( value.rgb * 0.9478672986 + vec3( 0.0521327014 ), vec3( 2.4 ) ), value.rgb * 0.0773993808, vec3( lessThanEqual( value.rgb, vec3( 0.04045 ) ) ) ), value.a );\n}\nvec4 LinearTosRGB( in vec4 value ) {\n\treturn vec4( mix( pow( value.rgb, vec3( 0.41666 ) ) * 1.055 - vec3( 0.055 ), value.rgb * 12.92, vec3( lessThanEqual( value.rgb, vec3( 0.0031308 ) ) ) ), value.a );\n}\nvec4 RGBEToLinear( in vec4 value ) {\n\treturn vec4( value.rgb * exp2( value.a * 255.0 - 128.0 ), 1.0 );\n}\nvec4 LinearToRGBE( in vec4 value ) {\n\tfloat maxComponent = max( max( value.r, value.g ), value.b );\n\tfloat fExp = clamp( ceil( log2( maxComponent ) ), -128.0, 127.0 );\n\treturn vec4( value.rgb / exp2( fExp ), ( fExp + 128.0 ) / 255.0 );\n}\nvec4 RGBMToLinear( in vec4 value, in float maxRange ) {\n\treturn vec4( value.rgb * value.a * maxRange, 1.0 );\n}\nvec4 LinearToRGBM( in vec4 value, in float maxRange ) {\n\tfloat maxRGB = max( value.r, max( value.g, value.b ) );\n\tfloat M = clamp( maxRGB / maxRange, 0.0, 1.0 );\n\tM = ceil( M * 255.0 ) / 255.0;\n\treturn vec4( value.rgb / ( M * maxRange ), M );\n}\nvec4 RGBDToLinear( in vec4 value, in float maxRange ) {\n\treturn vec4( value.rgb * ( ( maxRange / 255.0 ) / value.a ), 1.0 );\n}\nvec4 LinearToRGBD( in vec4 value, in float maxRange ) {\n\tfloat maxRGB = max( value.r, max( value.g, value.b ) );\n\tfloat D = max( maxRange / maxRGB, 1.0 );\n\tD = min( floor( D ) / 255.0, 1.0 );\n\treturn vec4( value.rgb * ( D * ( 255.0 / maxRange ) ), D );\n}\nconst mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );\nvec4 LinearToLogLuv( in vec4 value ) {\n\tvec3 Xp_Y_XYZp = value.rgb * cLogLuvM;\n\tXp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );\n\tvec4 vResult;\n\tvResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;\n\tfloat Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;\n\tvResult.w = fract( Le );\n\tvResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;\n\treturn vResult;\n}\nconst mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );\nvec4 LogLuvToLinear( in vec4 value ) {\n\tfloat Le = value.z * 255.0 + value.w;\n\tvec3 Xp_Y_XYZp;\n\tXp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );\n\tXp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;\n\tXp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;\n\tvec3 vRGB = Xp_Y_XYZp.rgb * cLogLuvInverseM;\n\treturn vec4( max( vRGB, 0.0 ), 1.0 );\n}\n",
+envmap_fragment:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvec3 cameraToVertex = normalize( vWorldPosition - cameraPosition );\n\t\tvec3 worldNormal = inverseTransformDirection( normal, viewMatrix );\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvec3 reflectVec = reflect( cameraToVertex, worldNormal );\n\t\t#else\n\t\t\tvec3 reflectVec = refract( cameraToVertex, worldNormal, refractionRatio );\n\t\t#endif\n\t#else\n\t\tvec3 reflectVec = vReflect;\n\t#endif\n\t#ifdef ENVMAP_TYPE_CUBE\n\t\tvec4 envColor = textureCube( envMap, vec3( flipEnvMap * reflectVec.x, reflectVec.yz ) );\n\t#elif defined( ENVMAP_TYPE_EQUIREC )\n\t\tvec2 sampleUV;\n\t\treflectVec = normalize( reflectVec );\n\t\tsampleUV.y = asin( clamp( reflectVec.y, - 1.0, 1.0 ) ) * RECIPROCAL_PI + 0.5;\n\t\tsampleUV.x = atan( reflectVec.z, reflectVec.x ) * RECIPROCAL_PI2 + 0.5;\n\t\tvec4 envColor = texture2D( envMap, sampleUV );\n\t#elif defined( ENVMAP_TYPE_SPHERE )\n\t\treflectVec = normalize( reflectVec );\n\t\tvec3 reflectView = normalize( ( viewMatrix * vec4( reflectVec, 0.0 ) ).xyz + vec3( 0.0, 0.0, 1.0 ) );\n\t\tvec4 envColor = texture2D( envMap, reflectView.xy * 0.5 + 0.5 );\n\t#else\n\t\tvec4 envColor = vec4( 0.0 );\n\t#endif\n\tenvColor = envMapTexelToLinear( envColor );\n\t#ifdef ENVMAP_BLENDING_MULTIPLY\n\t\toutgoingLight = mix( outgoingLight, outgoingLight * envColor.xyz, specularStrength * reflectivity );\n\t#elif defined( ENVMAP_BLENDING_MIX )\n\t\toutgoingLight = mix( outgoingLight, envColor.xyz, specularStrength * reflectivity );\n\t#elif defined( ENVMAP_BLENDING_ADD )\n\t\toutgoingLight += envColor.xyz * specularStrength * reflectivity;\n\t#endif\n#endif\n",
+envmap_pars_fragment:"#if defined( USE_ENVMAP ) || defined( PHYSICAL )\n\tuniform float reflectivity;\n\tuniform float envMapIntensity;\n#endif\n#ifdef USE_ENVMAP\n\t#if ! defined( PHYSICAL ) && ( defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG ) )\n\t\tvarying vec3 vWorldPosition;\n\t#endif\n\t#ifdef ENVMAP_TYPE_CUBE\n\t\tuniform samplerCube envMap;\n\t#else\n\t\tuniform sampler2D envMap;\n\t#endif\n\tuniform float flipEnvMap;\n\tuniform int maxMipLevel;\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG ) || defined( PHYSICAL )\n\t\tuniform float refractionRatio;\n\t#else\n\t\tvarying vec3 vReflect;\n\t#endif\n#endif\n",
+envmap_pars_vertex:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvarying vec3 vWorldPosition;\n\t#else\n\t\tvarying vec3 vReflect;\n\t\tuniform float refractionRatio;\n\t#endif\n#endif\n",envmap_physical_pars_fragment:"#if defined( USE_ENVMAP ) && defined( PHYSICAL )\n\tvec3 getLightProbeIndirectIrradiance( const in GeometricContext geometry, const in int maxMIPLevel ) {\n\t\tvec3 worldNormal = inverseTransformDirection( geometry.normal, viewMatrix );\n\t\t#ifdef ENVMAP_TYPE_CUBE\n\t\t\tvec3 queryVec = vec3( flipEnvMap * worldNormal.x, worldNormal.yz );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = textureCubeLodEXT( envMap, queryVec, float( maxMIPLevel ) );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = textureCube( envMap, queryVec, float( maxMIPLevel ) );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_CUBE_UV )\n\t\t\tvec3 queryVec = vec3( flipEnvMap * worldNormal.x, worldNormal.yz );\n\t\t\tvec4 envMapColor = textureCubeUV( envMap, queryVec, 1.0 );\n\t\t#else\n\t\t\tvec4 envMapColor = vec4( 0.0 );\n\t\t#endif\n\t\treturn PI * envMapColor.rgb * envMapIntensity;\n\t}\n\tfloat getSpecularMIPLevel( const in float blinnShininessExponent, const in int maxMIPLevel ) {\n\t\tfloat maxMIPLevelScalar = float( maxMIPLevel );\n\t\tfloat desiredMIPLevel = maxMIPLevelScalar + 0.79248 - 0.5 * log2( pow2( blinnShininessExponent ) + 1.0 );\n\t\treturn clamp( desiredMIPLevel, 0.0, maxMIPLevelScalar );\n\t}\n\tvec3 getLightProbeIndirectRadiance( const in GeometricContext geometry, const in float blinnShininessExponent, const in int maxMIPLevel ) {\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvec3 reflectVec = reflect( -geometry.viewDir, geometry.normal );\n\t\t#else\n\t\t\tvec3 reflectVec = refract( -geometry.viewDir, geometry.normal, refractionRatio );\n\t\t#endif\n\t\treflectVec = inverseTransformDirection( reflectVec, viewMatrix );\n\t\tfloat specularMIPLevel = getSpecularMIPLevel( blinnShininessExponent, maxMIPLevel );\n\t\t#ifdef ENVMAP_TYPE_CUBE\n\t\t\tvec3 queryReflectVec = vec3( flipEnvMap * reflectVec.x, reflectVec.yz );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = textureCubeLodEXT( envMap, queryReflectVec, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = textureCube( envMap, queryReflectVec, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_CUBE_UV )\n\t\t\tvec3 queryReflectVec = vec3( flipEnvMap * reflectVec.x, reflectVec.yz );\n\t\t\tvec4 envMapColor = textureCubeUV( envMap, queryReflectVec, BlinnExponentToGGXRoughness(blinnShininessExponent ));\n\t\t#elif defined( ENVMAP_TYPE_EQUIREC )\n\t\t\tvec2 sampleUV;\n\t\t\tsampleUV.y = asin( clamp( reflectVec.y, - 1.0, 1.0 ) ) * RECIPROCAL_PI + 0.5;\n\t\t\tsampleUV.x = atan( reflectVec.z, reflectVec.x ) * RECIPROCAL_PI2 + 0.5;\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = texture2DLodEXT( envMap, sampleUV, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = texture2D( envMap, sampleUV, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_SPHERE )\n\t\t\tvec3 reflectView = normalize( ( viewMatrix * vec4( reflectVec, 0.0 ) ).xyz + vec3( 0.0,0.0,1.0 ) );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = texture2DLodEXT( envMap, reflectView.xy * 0.5 + 0.5, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = texture2D( envMap, reflectView.xy * 0.5 + 0.5, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#endif\n\t\treturn envMapColor.rgb * envMapIntensity;\n\t}\n#endif\n",
+envmap_vertex:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvWorldPosition = worldPosition.xyz;\n\t#else\n\t\tvec3 cameraToVertex = normalize( worldPosition.xyz - cameraPosition );\n\t\tvec3 worldNormal = inverseTransformDirection( transformedNormal, viewMatrix );\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvReflect = reflect( cameraToVertex, worldNormal );\n\t\t#else\n\t\t\tvReflect = refract( cameraToVertex, worldNormal, refractionRatio );\n\t\t#endif\n\t#endif\n#endif\n",
+fog_vertex:"#ifdef USE_FOG\n\tfogDepth = -mvPosition.z;\n#endif\n",fog_pars_vertex:"#ifdef USE_FOG\n\tvarying float fogDepth;\n#endif\n",fog_fragment:"#ifdef USE_FOG\n\t#ifdef FOG_EXP2\n\t\tfloat fogFactor = whiteCompliment( exp2( - fogDensity * fogDensity * fogDepth * fogDepth * LOG2 ) );\n\t#else\n\t\tfloat fogFactor = smoothstep( fogNear, fogFar, fogDepth );\n\t#endif\n\tgl_FragColor.rgb = mix( gl_FragColor.rgb, fogColor, fogFactor );\n#endif\n",fog_pars_fragment:"#ifdef USE_FOG\n\tuniform vec3 fogColor;\n\tvarying float fogDepth;\n\t#ifdef FOG_EXP2\n\t\tuniform float fogDensity;\n\t#else\n\t\tuniform float fogNear;\n\t\tuniform float fogFar;\n\t#endif\n#endif\n",
+gradientmap_pars_fragment:"#ifdef TOON\n\tuniform sampler2D gradientMap;\n\tvec3 getGradientIrradiance( vec3 normal, vec3 lightDirection ) {\n\t\tfloat dotNL = dot( normal, lightDirection );\n\t\tvec2 coord = vec2( dotNL * 0.5 + 0.5, 0.0 );\n\t\t#ifdef USE_GRADIENTMAP\n\t\t\treturn texture2D( gradientMap, coord ).rgb;\n\t\t#else\n\t\t\treturn ( coord.x < 0.7 ) ? vec3( 0.7 ) : vec3( 1.0 );\n\t\t#endif\n\t}\n#endif\n",lightmap_fragment:"#ifdef USE_LIGHTMAP\n\treflectedLight.indirectDiffuse += PI * texture2D( lightMap, vUv2 ).xyz * lightMapIntensity;\n#endif\n",
+lightmap_pars_fragment:"#ifdef USE_LIGHTMAP\n\tuniform sampler2D lightMap;\n\tuniform float lightMapIntensity;\n#endif",lights_lambert_vertex:"vec3 diffuse = vec3( 1.0 );\nGeometricContext geometry;\ngeometry.position = mvPosition.xyz;\ngeometry.normal = normalize( transformedNormal );\ngeometry.viewDir = normalize( -mvPosition.xyz );\nGeometricContext backGeometry;\nbackGeometry.position = geometry.position;\nbackGeometry.normal = -geometry.normal;\nbackGeometry.viewDir = geometry.viewDir;\nvLightFront = vec3( 0.0 );\n#ifdef DOUBLE_SIDED\n\tvLightBack = vec3( 0.0 );\n#endif\nIncidentLight directLight;\nfloat dotNL;\nvec3 directLightColor_Diffuse;\n#if NUM_POINT_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tgetPointDirectLightIrradiance( pointLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_SPOT_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tgetSpotDirectLightIrradiance( spotLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_DIR_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tgetDirectionalDirectLightIrradiance( directionalLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_HEMI_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_HEMI_LIGHTS; i ++ ) {\n\t\tvLightFront += getHemisphereLightIrradiance( hemisphereLights[ i ], geometry );\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += getHemisphereLightIrradiance( hemisphereLights[ i ], backGeometry );\n\t\t#endif\n\t}\n#endif\n",
+lights_pars_begin:"uniform vec3 ambientLightColor;\nvec3 getAmbientLightIrradiance( const in vec3 ambientLightColor ) {\n\tvec3 irradiance = ambientLightColor;\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\treturn irradiance;\n}\n#if NUM_DIR_LIGHTS > 0\n\tstruct DirectionalLight {\n\t\tvec3 direction;\n\t\tvec3 color;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t};\n\tuniform DirectionalLight directionalLights[ NUM_DIR_LIGHTS ];\n\tvoid getDirectionalDirectLightIrradiance( const in DirectionalLight directionalLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tdirectLight.color = directionalLight.color;\n\t\tdirectLight.direction = directionalLight.direction;\n\t\tdirectLight.visible = true;\n\t}\n#endif\n#if NUM_POINT_LIGHTS > 0\n\tstruct PointLight {\n\t\tvec3 position;\n\t\tvec3 color;\n\t\tfloat distance;\n\t\tfloat decay;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t\tfloat shadowCameraNear;\n\t\tfloat shadowCameraFar;\n\t};\n\tuniform PointLight pointLights[ NUM_POINT_LIGHTS ];\n\tvoid getPointDirectLightIrradiance( const in PointLight pointLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tvec3 lVector = pointLight.position - geometry.position;\n\t\tdirectLight.direction = normalize( lVector );\n\t\tfloat lightDistance = length( lVector );\n\t\tdirectLight.color = pointLight.color;\n\t\tdirectLight.color *= punctualLightIntensityToIrradianceFactor( lightDistance, pointLight.distance, pointLight.decay );\n\t\tdirectLight.visible = ( directLight.color != vec3( 0.0 ) );\n\t}\n#endif\n#if NUM_SPOT_LIGHTS > 0\n\tstruct SpotLight {\n\t\tvec3 position;\n\t\tvec3 direction;\n\t\tvec3 color;\n\t\tfloat distance;\n\t\tfloat decay;\n\t\tfloat coneCos;\n\t\tfloat penumbraCos;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t};\n\tuniform SpotLight spotLights[ NUM_SPOT_LIGHTS ];\n\tvoid getSpotDirectLightIrradiance( const in SpotLight spotLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tvec3 lVector = spotLight.position - geometry.position;\n\t\tdirectLight.direction = normalize( lVector );\n\t\tfloat lightDistance = length( lVector );\n\t\tfloat angleCos = dot( directLight.direction, spotLight.direction );\n\t\tif ( angleCos > spotLight.coneCos ) {\n\t\t\tfloat spotEffect = smoothstep( spotLight.coneCos, spotLight.penumbraCos, angleCos );\n\t\t\tdirectLight.color = spotLight.color;\n\t\t\tdirectLight.color *= spotEffect * punctualLightIntensityToIrradianceFactor( lightDistance, spotLight.distance, spotLight.decay );\n\t\t\tdirectLight.visible = true;\n\t\t} else {\n\t\t\tdirectLight.color = vec3( 0.0 );\n\t\t\tdirectLight.visible = false;\n\t\t}\n\t}\n#endif\n#if NUM_RECT_AREA_LIGHTS > 0\n\tstruct RectAreaLight {\n\t\tvec3 color;\n\t\tvec3 position;\n\t\tvec3 halfWidth;\n\t\tvec3 halfHeight;\n\t};\n\tuniform sampler2D ltc_1;\tuniform sampler2D ltc_2;\n\tuniform RectAreaLight rectAreaLights[ NUM_RECT_AREA_LIGHTS ];\n#endif\n#if NUM_HEMI_LIGHTS > 0\n\tstruct HemisphereLight {\n\t\tvec3 direction;\n\t\tvec3 skyColor;\n\t\tvec3 groundColor;\n\t};\n\tuniform HemisphereLight hemisphereLights[ NUM_HEMI_LIGHTS ];\n\tvec3 getHemisphereLightIrradiance( const in HemisphereLight hemiLight, const in GeometricContext geometry ) {\n\t\tfloat dotNL = dot( geometry.normal, hemiLight.direction );\n\t\tfloat hemiDiffuseWeight = 0.5 * dotNL + 0.5;\n\t\tvec3 irradiance = mix( hemiLight.groundColor, hemiLight.skyColor, hemiDiffuseWeight );\n\t\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\t\tirradiance *= PI;\n\t\t#endif\n\t\treturn irradiance;\n\t}\n#endif\n",
+lights_phong_fragment:"BlinnPhongMaterial material;\nmaterial.diffuseColor = diffuseColor.rgb;\nmaterial.specularColor = specular;\nmaterial.specularShininess = shininess;\nmaterial.specularStrength = specularStrength;\n",lights_phong_pars_fragment:"varying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\nstruct BlinnPhongMaterial {\n\tvec3\tdiffuseColor;\n\tvec3\tspecularColor;\n\tfloat\tspecularShininess;\n\tfloat\tspecularStrength;\n};\nvoid RE_Direct_BlinnPhong( const in IncidentLight directLight, const in GeometricContext geometry, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {\n\t#ifdef TOON\n\t\tvec3 irradiance = getGradientIrradiance( geometry.normal, directLight.direction ) * directLight.color;\n\t#else\n\t\tfloat dotNL = saturate( dot( geometry.normal, directLight.direction ) );\n\t\tvec3 irradiance = dotNL * directLight.color;\n\t#endif\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\treflectedLight.directDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n\treflectedLight.directSpecular += irradiance * BRDF_Specular_BlinnPhong( directLight, geometry, material.specularColor, material.specularShininess ) * material.specularStrength;\n}\nvoid RE_IndirectDiffuse_BlinnPhong( const in vec3 irradiance, const in GeometricContext geometry, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {\n\treflectedLight.indirectDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n}\n#define RE_Direct\t\t\t\tRE_Direct_BlinnPhong\n#define RE_IndirectDiffuse\t\tRE_IndirectDiffuse_BlinnPhong\n#define Material_LightProbeLOD( material )\t(0)\n",
+lights_physical_fragment:"PhysicalMaterial material;\nmaterial.diffuseColor = diffuseColor.rgb * ( 1.0 - metalnessFactor );\nmaterial.specularRoughness = clamp( roughnessFactor, 0.04, 1.0 );\n#ifdef STANDARD\n\tmaterial.specularColor = mix( vec3( DEFAULT_SPECULAR_COEFFICIENT ), diffuseColor.rgb, metalnessFactor );\n#else\n\tmaterial.specularColor = mix( vec3( MAXIMUM_SPECULAR_COEFFICIENT * pow2( reflectivity ) ), diffuseColor.rgb, metalnessFactor );\n\tmaterial.clearCoat = saturate( clearCoat );\tmaterial.clearCoatRoughness = clamp( clearCoatRoughness, 0.04, 1.0 );\n#endif\n",
+lights_physical_pars_fragment:"struct PhysicalMaterial {\n\tvec3\tdiffuseColor;\n\tfloat\tspecularRoughness;\n\tvec3\tspecularColor;\n\t#ifndef STANDARD\n\t\tfloat clearCoat;\n\t\tfloat clearCoatRoughness;\n\t#endif\n};\n#define MAXIMUM_SPECULAR_COEFFICIENT 0.16\n#define DEFAULT_SPECULAR_COEFFICIENT 0.04\nfloat clearCoatDHRApprox( const in float roughness, const in float dotNL ) {\n\treturn DEFAULT_SPECULAR_COEFFICIENT + ( 1.0 - DEFAULT_SPECULAR_COEFFICIENT ) * ( pow( 1.0 - dotNL, 5.0 ) * pow( 1.0 - roughness, 2.0 ) );\n}\n#if NUM_RECT_AREA_LIGHTS > 0\n\tvoid RE_Direct_RectArea_Physical( const in RectAreaLight rectAreaLight, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\t\tvec3 normal = geometry.normal;\n\t\tvec3 viewDir = geometry.viewDir;\n\t\tvec3 position = geometry.position;\n\t\tvec3 lightPos = rectAreaLight.position;\n\t\tvec3 halfWidth = rectAreaLight.halfWidth;\n\t\tvec3 halfHeight = rectAreaLight.halfHeight;\n\t\tvec3 lightColor = rectAreaLight.color;\n\t\tfloat roughness = material.specularRoughness;\n\t\tvec3 rectCoords[ 4 ];\n\t\trectCoords[ 0 ] = lightPos - halfWidth - halfHeight;\t\trectCoords[ 1 ] = lightPos + halfWidth - halfHeight;\n\t\trectCoords[ 2 ] = lightPos + halfWidth + halfHeight;\n\t\trectCoords[ 3 ] = lightPos - halfWidth + halfHeight;\n\t\tvec2 uv = LTC_Uv( normal, viewDir, roughness );\n\t\tvec4 t1 = texture2D( ltc_1, uv );\n\t\tvec4 t2 = texture2D( ltc_2, uv );\n\t\tmat3 mInv = mat3(\n\t\t\tvec3( t1.x, 0, t1.y ),\n\t\t\tvec3( 0, 1, 0 ),\n\t\t\tvec3( t1.z, 0, t1.w )\n\t\t);\n\t\tvec3 fresnel = ( material.specularColor * t2.x + ( vec3( 1.0 ) - material.specularColor ) * t2.y );\n\t\treflectedLight.directSpecular += lightColor * fresnel * LTC_Evaluate( normal, viewDir, position, mInv, rectCoords );\n\t\treflectedLight.directDiffuse += lightColor * material.diffuseColor * LTC_Evaluate( normal, viewDir, position, mat3( 1.0 ), rectCoords );\n\t}\n#endif\nvoid RE_Direct_Physical( const in IncidentLight directLight, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\tfloat dotNL = saturate( dot( geometry.normal, directLight.direction ) );\n\tvec3 irradiance = dotNL * directLight.color;\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\t#ifndef STANDARD\n\t\tfloat clearCoatDHR = material.clearCoat * clearCoatDHRApprox( material.clearCoatRoughness, dotNL );\n\t#else\n\t\tfloat clearCoatDHR = 0.0;\n\t#endif\n\treflectedLight.directSpecular += ( 1.0 - clearCoatDHR ) * irradiance * BRDF_Specular_GGX( directLight, geometry, material.specularColor, material.specularRoughness );\n\treflectedLight.directDiffuse += ( 1.0 - clearCoatDHR ) * irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n\t#ifndef STANDARD\n\t\treflectedLight.directSpecular += irradiance * material.clearCoat * BRDF_Specular_GGX( directLight, geometry, vec3( DEFAULT_SPECULAR_COEFFICIENT ), material.clearCoatRoughness );\n\t#endif\n}\nvoid RE_IndirectDiffuse_Physical( const in vec3 irradiance, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\treflectedLight.indirectDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n}\nvoid RE_IndirectSpecular_Physical( const in vec3 radiance, const in vec3 clearCoatRadiance, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\t#ifndef STANDARD\n\t\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\t\tfloat dotNL = dotNV;\n\t\tfloat clearCoatDHR = material.clearCoat * clearCoatDHRApprox( material.clearCoatRoughness, dotNL );\n\t#else\n\t\tfloat clearCoatDHR = 0.0;\n\t#endif\n\treflectedLight.indirectSpecular += ( 1.0 - clearCoatDHR ) * radiance * BRDF_Specular_GGX_Environment( geometry, material.specularColor, material.specularRoughness );\n\t#ifndef STANDARD\n\t\treflectedLight.indirectSpecular += clearCoatRadiance * material.clearCoat * BRDF_Specular_GGX_Environment( geometry, vec3( DEFAULT_SPECULAR_COEFFICIENT ), material.clearCoatRoughness );\n\t#endif\n}\n#define RE_Direct\t\t\t\tRE_Direct_Physical\n#define RE_Direct_RectArea\t\tRE_Direct_RectArea_Physical\n#define RE_IndirectDiffuse\t\tRE_IndirectDiffuse_Physical\n#define RE_IndirectSpecular\t\tRE_IndirectSpecular_Physical\n#define Material_BlinnShininessExponent( material ) GGXRoughnessToBlinnExponent( material.specularRoughness )\n#define Material_ClearCoat_BlinnShininessExponent( material ) GGXRoughnessToBlinnExponent( material.clearCoatRoughness )\nfloat computeSpecularOcclusion( const in float dotNV, const in float ambientOcclusion, const in float roughness ) {\n\treturn saturate( pow( dotNV + ambientOcclusion, exp2( - 16.0 * roughness - 1.0 ) ) - 1.0 + ambientOcclusion );\n}\n",
+lights_fragment_begin:"\nGeometricContext geometry;\ngeometry.position = - vViewPosition;\ngeometry.normal = normal;\ngeometry.viewDir = normalize( vViewPosition );\nIncidentLight directLight;\n#if ( NUM_POINT_LIGHTS > 0 ) && defined( RE_Direct )\n\tPointLight pointLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tpointLight = pointLights[ i ];\n\t\tgetPointDirectLightIrradiance( pointLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( pointLight.shadow, directLight.visible ) ) ? getPointShadow( pointShadowMap[ i ], pointLight.shadowMapSize, pointLight.shadowBias, pointLight.shadowRadius, vPointShadowCoord[ i ], pointLight.shadowCameraNear, pointLight.shadowCameraFar ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if ( NUM_SPOT_LIGHTS > 0 ) && defined( RE_Direct )\n\tSpotLight spotLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tspotLight = spotLights[ i ];\n\t\tgetSpotDirectLightIrradiance( spotLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( spotLight.shadow, directLight.visible ) ) ? getShadow( spotShadowMap[ i ], spotLight.shadowMapSize, spotLight.shadowBias, spotLight.shadowRadius, vSpotShadowCoord[ i ] ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if ( NUM_DIR_LIGHTS > 0 ) && defined( RE_Direct )\n\tDirectionalLight directionalLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tdirectionalLight = directionalLights[ i ];\n\t\tgetDirectionalDirectLightIrradiance( directionalLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( directionalLight.shadow, directLight.visible ) ) ? getShadow( directionalShadowMap[ i ], directionalLight.shadowMapSize, directionalLight.shadowBias, directionalLight.shadowRadius, vDirectionalShadowCoord[ i ] ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if ( NUM_RECT_AREA_LIGHTS > 0 ) && defined( RE_Direct_RectArea )\n\tRectAreaLight rectAreaLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_RECT_AREA_LIGHTS; i ++ ) {\n\t\trectAreaLight = rectAreaLights[ i ];\n\t\tRE_Direct_RectArea( rectAreaLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if defined( RE_IndirectDiffuse )\n\tvec3 irradiance = getAmbientLightIrradiance( ambientLightColor );\n\t#if ( NUM_HEMI_LIGHTS > 0 )\n\t\t#pragma unroll_loop\n\t\tfor ( int i = 0; i < NUM_HEMI_LIGHTS; i ++ ) {\n\t\t\tirradiance += getHemisphereLightIrradiance( hemisphereLights[ i ], geometry );\n\t\t}\n\t#endif\n#endif\n#if defined( RE_IndirectSpecular )\n\tvec3 radiance = vec3( 0.0 );\n\tvec3 clearCoatRadiance = vec3( 0.0 );\n#endif\n",
+lights_fragment_maps:"#if defined( RE_IndirectDiffuse )\n\t#ifdef USE_LIGHTMAP\n\t\tvec3 lightMapIrradiance = texture2D( lightMap, vUv2 ).xyz * lightMapIntensity;\n\t\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\t\tlightMapIrradiance *= PI;\n\t\t#endif\n\t\tirradiance += lightMapIrradiance;\n\t#endif\n\t#if defined( USE_ENVMAP ) && defined( PHYSICAL ) && defined( ENVMAP_TYPE_CUBE_UV )\n\t\tirradiance += getLightProbeIndirectIrradiance( geometry, maxMipLevel );\n\t#endif\n#endif\n#if defined( USE_ENVMAP ) && defined( RE_IndirectSpecular )\n\tradiance += getLightProbeIndirectRadiance( geometry, Material_BlinnShininessExponent( material ), maxMipLevel );\n\t#ifndef STANDARD\n\t\tclearCoatRadiance += getLightProbeIndirectRadiance( geometry, Material_ClearCoat_BlinnShininessExponent( material ), maxMipLevel );\n\t#endif\n#endif\n",
+lights_fragment_end:"#if defined( RE_IndirectDiffuse )\n\tRE_IndirectDiffuse( irradiance, geometry, material, reflectedLight );\n#endif\n#if defined( RE_IndirectSpecular )\n\tRE_IndirectSpecular( radiance, clearCoatRadiance, geometry, material, reflectedLight );\n#endif\n",logdepthbuf_fragment:"#if defined( USE_LOGDEPTHBUF ) && defined( USE_LOGDEPTHBUF_EXT )\n\tgl_FragDepthEXT = log2( vFragDepth ) * logDepthBufFC * 0.5;\n#endif",logdepthbuf_pars_fragment:"#if defined( USE_LOGDEPTHBUF ) && defined( USE_LOGDEPTHBUF_EXT )\n\tuniform float logDepthBufFC;\n\tvarying float vFragDepth;\n#endif\n",
+logdepthbuf_pars_vertex:"#ifdef USE_LOGDEPTHBUF\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tvarying float vFragDepth;\n\t#else\n\t\tuniform float logDepthBufFC;\n\t#endif\n#endif\n",logdepthbuf_vertex:"#ifdef USE_LOGDEPTHBUF\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tvFragDepth = 1.0 + gl_Position.w;\n\t#else\n\t\tgl_Position.z = log2( max( EPSILON, gl_Position.w + 1.0 ) ) * logDepthBufFC - 1.0;\n\t\tgl_Position.z *= gl_Position.w;\n\t#endif\n#endif\n",map_fragment:"#ifdef USE_MAP\n\tvec4 texelColor = texture2D( map, vUv );\n\ttexelColor = mapTexelToLinear( texelColor );\n\tdiffuseColor *= texelColor;\n#endif\n",
+map_pars_fragment:"#ifdef USE_MAP\n\tuniform sampler2D map;\n#endif\n",map_particle_fragment:"#ifdef USE_MAP\n\tvec2 uv = ( uvTransform * vec3( gl_PointCoord.x, 1.0 - gl_PointCoord.y, 1 ) ).xy;\n\tvec4 mapTexel = texture2D( map, uv );\n\tdiffuseColor *= mapTexelToLinear( mapTexel );\n#endif\n",map_particle_pars_fragment:"#ifdef USE_MAP\n\tuniform mat3 uvTransform;\n\tuniform sampler2D map;\n#endif\n",metalnessmap_fragment:"float metalnessFactor = metalness;\n#ifdef USE_METALNESSMAP\n\tvec4 texelMetalness = texture2D( metalnessMap, vUv );\n\tmetalnessFactor *= texelMetalness.b;\n#endif\n",
+metalnessmap_pars_fragment:"#ifdef USE_METALNESSMAP\n\tuniform sampler2D metalnessMap;\n#endif",morphnormal_vertex:"#ifdef USE_MORPHNORMALS\n\tobjectNormal += ( morphNormal0 - normal ) * morphTargetInfluences[ 0 ];\n\tobjectNormal += ( morphNormal1 - normal ) * morphTargetInfluences[ 1 ];\n\tobjectNormal += ( morphNormal2 - normal ) * morphTargetInfluences[ 2 ];\n\tobjectNormal += ( morphNormal3 - normal ) * morphTargetInfluences[ 3 ];\n#endif\n",morphtarget_pars_vertex:"#ifdef USE_MORPHTARGETS\n\t#ifndef USE_MORPHNORMALS\n\tuniform float morphTargetInfluences[ 8 ];\n\t#else\n\tuniform float morphTargetInfluences[ 4 ];\n\t#endif\n#endif",
+morphtarget_vertex:"#ifdef USE_MORPHTARGETS\n\ttransformed += ( morphTarget0 - position ) * morphTargetInfluences[ 0 ];\n\ttransformed += ( morphTarget1 - position ) * morphTargetInfluences[ 1 ];\n\ttransformed += ( morphTarget2 - position ) * morphTargetInfluences[ 2 ];\n\ttransformed += ( morphTarget3 - position ) * morphTargetInfluences[ 3 ];\n\t#ifndef USE_MORPHNORMALS\n\ttransformed += ( morphTarget4 - position ) * morphTargetInfluences[ 4 ];\n\ttransformed += ( morphTarget5 - position ) * morphTargetInfluences[ 5 ];\n\ttransformed += ( morphTarget6 - position ) * morphTargetInfluences[ 6 ];\n\ttransformed += ( morphTarget7 - position ) * morphTargetInfluences[ 7 ];\n\t#endif\n#endif\n",
+normal_fragment_begin:"#ifdef FLAT_SHADED\n\tvec3 fdx = vec3( dFdx( vViewPosition.x ), dFdx( vViewPosition.y ), dFdx( vViewPosition.z ) );\n\tvec3 fdy = vec3( dFdy( vViewPosition.x ), dFdy( vViewPosition.y ), dFdy( vViewPosition.z ) );\n\tvec3 normal = normalize( cross( fdx, fdy ) );\n#else\n\tvec3 normal = normalize( vNormal );\n\t#ifdef DOUBLE_SIDED\n\t\tnormal = normal * ( float( gl_FrontFacing ) * 2.0 - 1.0 );\n\t#endif\n#endif\n",normal_fragment_maps:"#ifdef USE_NORMALMAP\n\t#ifdef OBJECTSPACE_NORMALMAP\n\t\tnormal = texture2D( normalMap, vUv ).xyz * 2.0 - 1.0;\n\t\t#ifdef FLIP_SIDED\n\t\t\tnormal = - normal;\n\t\t#endif\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tnormal = normal * ( float( gl_FrontFacing ) * 2.0 - 1.0 );\n\t\t#endif\n\t\tnormal = normalize( normalMatrix * normal );\n\t#else\n\t\tnormal = perturbNormal2Arb( -vViewPosition, normal );\n\t#endif\n#elif defined( USE_BUMPMAP )\n\tnormal = perturbNormalArb( -vViewPosition, normal, dHdxy_fwd() );\n#endif\n",
+normalmap_pars_fragment:"#ifdef USE_NORMALMAP\n\tuniform sampler2D normalMap;\n\tuniform vec2 normalScale;\n\t#ifdef OBJECTSPACE_NORMALMAP\n\t\tuniform mat3 normalMatrix;\n\t#else\n\t\tvec3 perturbNormal2Arb( vec3 eye_pos, vec3 surf_norm ) {\n\t\t\tvec3 q0 = vec3( dFdx( eye_pos.x ), dFdx( eye_pos.y ), dFdx( eye_pos.z ) );\n\t\t\tvec3 q1 = vec3( dFdy( eye_pos.x ), dFdy( eye_pos.y ), dFdy( eye_pos.z ) );\n\t\t\tvec2 st0 = dFdx( vUv.st );\n\t\t\tvec2 st1 = dFdy( vUv.st );\n\t\t\tfloat scale = sign( st1.t * st0.s - st0.t * st1.s );\n\t\t\tvec3 S = normalize( ( q0 * st1.t - q1 * st0.t ) * scale );\n\t\t\tvec3 T = normalize( ( - q0 * st1.s + q1 * st0.s ) * scale );\n\t\t\tvec3 N = normalize( surf_norm );\n\t\t\tmat3 tsn = mat3( S, T, N );\n\t\t\tvec3 mapN = texture2D( normalMap, vUv ).xyz * 2.0 - 1.0;\n\t\t\tmapN.xy *= normalScale;\n\t\t\tmapN.xy *= ( float( gl_FrontFacing ) * 2.0 - 1.0 );\n\t\t\treturn normalize( tsn * mapN );\n\t\t}\n\t#endif\n#endif\n",
+packing:"vec3 packNormalToRGB( const in vec3 normal ) {\n\treturn normalize( normal ) * 0.5 + 0.5;\n}\nvec3 unpackRGBToNormal( const in vec3 rgb ) {\n\treturn 2.0 * rgb.xyz - 1.0;\n}\nconst float PackUpscale = 256. / 255.;const float UnpackDownscale = 255. / 256.;\nconst vec3 PackFactors = vec3( 256. * 256. * 256., 256. * 256., 256. );\nconst vec4 UnpackFactors = UnpackDownscale / vec4( PackFactors, 1. );\nconst float ShiftRight8 = 1. / 256.;\nvec4 packDepthToRGBA( const in float v ) {\n\tvec4 r = vec4( fract( v * PackFactors ), v );\n\tr.yzw -= r.xyz * ShiftRight8;\treturn r * PackUpscale;\n}\nfloat unpackRGBAToDepth( const in vec4 v ) {\n\treturn dot( v, UnpackFactors );\n}\nfloat viewZToOrthographicDepth( const in float viewZ, const in float near, const in float far ) {\n\treturn ( viewZ + near ) / ( near - far );\n}\nfloat orthographicDepthToViewZ( const in float linearClipZ, const in float near, const in float far ) {\n\treturn linearClipZ * ( near - far ) - near;\n}\nfloat viewZToPerspectiveDepth( const in float viewZ, const in float near, const in float far ) {\n\treturn (( near + viewZ ) * far ) / (( far - near ) * viewZ );\n}\nfloat perspectiveDepthToViewZ( const in float invClipZ, const in float near, const in float far ) {\n\treturn ( near * far ) / ( ( far - near ) * invClipZ - far );\n}\n",
+premultiplied_alpha_fragment:"#ifdef PREMULTIPLIED_ALPHA\n\tgl_FragColor.rgb *= gl_FragColor.a;\n#endif\n",project_vertex:"vec4 mvPosition = modelViewMatrix * vec4( transformed, 1.0 );\ngl_Position = projectionMatrix * mvPosition;\n",dithering_fragment:"#if defined( DITHERING )\n gl_FragColor.rgb = dithering( gl_FragColor.rgb );\n#endif\n",dithering_pars_fragment:"#if defined( DITHERING )\n\tvec3 dithering( vec3 color ) {\n\t\tfloat grid_position = rand( gl_FragCoord.xy );\n\t\tvec3 dither_shift_RGB = vec3( 0.25 / 255.0, -0.25 / 255.0, 0.25 / 255.0 );\n\t\tdither_shift_RGB = mix( 2.0 * dither_shift_RGB, -2.0 * dither_shift_RGB, grid_position );\n\t\treturn color + dither_shift_RGB;\n\t}\n#endif\n",
+roughnessmap_fragment:"float roughnessFactor = roughness;\n#ifdef USE_ROUGHNESSMAP\n\tvec4 texelRoughness = texture2D( roughnessMap, vUv );\n\troughnessFactor *= texelRoughness.g;\n#endif\n",roughnessmap_pars_fragment:"#ifdef USE_ROUGHNESSMAP\n\tuniform sampler2D roughnessMap;\n#endif",shadowmap_pars_fragment:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\t\tuniform sampler2D directionalShadowMap[ NUM_DIR_LIGHTS ];\n\t\tvarying vec4 vDirectionalShadowCoord[ NUM_DIR_LIGHTS ];\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\t\tuniform sampler2D spotShadowMap[ NUM_SPOT_LIGHTS ];\n\t\tvarying vec4 vSpotShadowCoord[ NUM_SPOT_LIGHTS ];\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\t\tuniform sampler2D pointShadowMap[ NUM_POINT_LIGHTS ];\n\t\tvarying vec4 vPointShadowCoord[ NUM_POINT_LIGHTS ];\n\t#endif\n\tfloat texture2DCompare( sampler2D depths, vec2 uv, float compare ) {\n\t\treturn step( compare, unpackRGBAToDepth( texture2D( depths, uv ) ) );\n\t}\n\tfloat texture2DShadowLerp( sampler2D depths, vec2 size, vec2 uv, float compare ) {\n\t\tconst vec2 offset = vec2( 0.0, 1.0 );\n\t\tvec2 texelSize = vec2( 1.0 ) / size;\n\t\tvec2 centroidUV = floor( uv * size + 0.5 ) / size;\n\t\tfloat lb = texture2DCompare( depths, centroidUV + texelSize * offset.xx, compare );\n\t\tfloat lt = texture2DCompare( depths, centroidUV + texelSize * offset.xy, compare );\n\t\tfloat rb = texture2DCompare( depths, centroidUV + texelSize * offset.yx, compare );\n\t\tfloat rt = texture2DCompare( depths, centroidUV + texelSize * offset.yy, compare );\n\t\tvec2 f = fract( uv * size + 0.5 );\n\t\tfloat a = mix( lb, lt, f.y );\n\t\tfloat b = mix( rb, rt, f.y );\n\t\tfloat c = mix( a, b, f.x );\n\t\treturn c;\n\t}\n\tfloat getShadow( sampler2D shadowMap, vec2 shadowMapSize, float shadowBias, float shadowRadius, vec4 shadowCoord ) {\n\t\tfloat shadow = 1.0;\n\t\tshadowCoord.xyz /= shadowCoord.w;\n\t\tshadowCoord.z += shadowBias;\n\t\tbvec4 inFrustumVec = bvec4 ( shadowCoord.x >= 0.0, shadowCoord.x <= 1.0, shadowCoord.y >= 0.0, shadowCoord.y <= 1.0 );\n\t\tbool inFrustum = all( inFrustumVec );\n\t\tbvec2 frustumTestVec = bvec2( inFrustum, shadowCoord.z <= 1.0 );\n\t\tbool frustumTest = all( frustumTestVec );\n\t\tif ( frustumTest ) {\n\t\t#if defined( SHADOWMAP_TYPE_PCF )\n\t\t\tvec2 texelSize = vec2( 1.0 ) / shadowMapSize;\n\t\t\tfloat dx0 = - texelSize.x * shadowRadius;\n\t\t\tfloat dy0 = - texelSize.y * shadowRadius;\n\t\t\tfloat dx1 = + texelSize.x * shadowRadius;\n\t\t\tfloat dy1 = + texelSize.y * shadowRadius;\n\t\t\tshadow = (\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( 0.0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy, shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( 0.0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, dy1 ), shadowCoord.z )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#elif defined( SHADOWMAP_TYPE_PCF_SOFT )\n\t\t\tvec2 texelSize = vec2( 1.0 ) / shadowMapSize;\n\t\t\tfloat dx0 = - texelSize.x * shadowRadius;\n\t\t\tfloat dy0 = - texelSize.y * shadowRadius;\n\t\t\tfloat dx1 = + texelSize.x * shadowRadius;\n\t\t\tfloat dy1 = + texelSize.y * shadowRadius;\n\t\t\tshadow = (\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( 0.0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy, shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( 0.0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, dy1 ), shadowCoord.z )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#else\n\t\t\tshadow = texture2DCompare( shadowMap, shadowCoord.xy, shadowCoord.z );\n\t\t#endif\n\t\t}\n\t\treturn shadow;\n\t}\n\tvec2 cubeToUV( vec3 v, float texelSizeY ) {\n\t\tvec3 absV = abs( v );\n\t\tfloat scaleToCube = 1.0 / max( absV.x, max( absV.y, absV.z ) );\n\t\tabsV *= scaleToCube;\n\t\tv *= scaleToCube * ( 1.0 - 2.0 * texelSizeY );\n\t\tvec2 planar = v.xy;\n\t\tfloat almostATexel = 1.5 * texelSizeY;\n\t\tfloat almostOne = 1.0 - almostATexel;\n\t\tif ( absV.z >= almostOne ) {\n\t\t\tif ( v.z > 0.0 )\n\t\t\t\tplanar.x = 4.0 - v.x;\n\t\t} else if ( absV.x >= almostOne ) {\n\t\t\tfloat signX = sign( v.x );\n\t\t\tplanar.x = v.z * signX + 2.0 * signX;\n\t\t} else if ( absV.y >= almostOne ) {\n\t\t\tfloat signY = sign( v.y );\n\t\t\tplanar.x = v.x + 2.0 * signY + 2.0;\n\t\t\tplanar.y = v.z * signY - 2.0;\n\t\t}\n\t\treturn vec2( 0.125, 0.25 ) * planar + vec2( 0.375, 0.75 );\n\t}\n\tfloat getPointShadow( sampler2D shadowMap, vec2 shadowMapSize, float shadowBias, float shadowRadius, vec4 shadowCoord, float shadowCameraNear, float shadowCameraFar ) {\n\t\tvec2 texelSize = vec2( 1.0 ) / ( shadowMapSize * vec2( 4.0, 2.0 ) );\n\t\tvec3 lightToPosition = shadowCoord.xyz;\n\t\tfloat dp = ( length( lightToPosition ) - shadowCameraNear ) / ( shadowCameraFar - shadowCameraNear );\t\tdp += shadowBias;\n\t\tvec3 bd3D = normalize( lightToPosition );\n\t\t#if defined( SHADOWMAP_TYPE_PCF ) || defined( SHADOWMAP_TYPE_PCF_SOFT )\n\t\t\tvec2 offset = vec2( - 1, 1 ) * shadowRadius * texelSize.y;\n\t\t\treturn (\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xyy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yyy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xyx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yyx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xxy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yxy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xxx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yxx, texelSize.y ), dp )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#else\n\t\t\treturn texture2DCompare( shadowMap, cubeToUV( bd3D, texelSize.y ), dp );\n\t\t#endif\n\t}\n#endif\n",
+shadowmap_pars_vertex:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\t\tuniform mat4 directionalShadowMatrix[ NUM_DIR_LIGHTS ];\n\t\tvarying vec4 vDirectionalShadowCoord[ NUM_DIR_LIGHTS ];\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\t\tuniform mat4 spotShadowMatrix[ NUM_SPOT_LIGHTS ];\n\t\tvarying vec4 vSpotShadowCoord[ NUM_SPOT_LIGHTS ];\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\t\tuniform mat4 pointShadowMatrix[ NUM_POINT_LIGHTS ];\n\t\tvarying vec4 vPointShadowCoord[ NUM_POINT_LIGHTS ];\n\t#endif\n#endif\n",
+shadowmap_vertex:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tvDirectionalShadowCoord[ i ] = directionalShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tvSpotShadowCoord[ i ] = spotShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tvPointShadowCoord[ i ] = pointShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n#endif\n",
+shadowmask_pars_fragment:"float getShadowMask() {\n\tfloat shadow = 1.0;\n\t#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\tDirectionalLight directionalLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tdirectionalLight = directionalLights[ i ];\n\t\tshadow *= bool( directionalLight.shadow ) ? getShadow( directionalShadowMap[ i ], directionalLight.shadowMapSize, directionalLight.shadowBias, directionalLight.shadowRadius, vDirectionalShadowCoord[ i ] ) : 1.0;\n\t}\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\tSpotLight spotLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tspotLight = spotLights[ i ];\n\t\tshadow *= bool( spotLight.shadow ) ? getShadow( spotShadowMap[ i ], spotLight.shadowMapSize, spotLight.shadowBias, spotLight.shadowRadius, vSpotShadowCoord[ i ] ) : 1.0;\n\t}\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\tPointLight pointLight;\n\t#pragma unroll_loop\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tpointLight = pointLights[ i ];\n\t\tshadow *= bool( pointLight.shadow ) ? getPointShadow( pointShadowMap[ i ], pointLight.shadowMapSize, pointLight.shadowBias, pointLight.shadowRadius, vPointShadowCoord[ i ], pointLight.shadowCameraNear, pointLight.shadowCameraFar ) : 1.0;\n\t}\n\t#endif\n\t#endif\n\treturn shadow;\n}\n",
+skinbase_vertex:"#ifdef USE_SKINNING\n\tmat4 boneMatX = getBoneMatrix( skinIndex.x );\n\tmat4 boneMatY = getBoneMatrix( skinIndex.y );\n\tmat4 boneMatZ = getBoneMatrix( skinIndex.z );\n\tmat4 boneMatW = getBoneMatrix( skinIndex.w );\n#endif",skinning_pars_vertex:"#ifdef USE_SKINNING\n\tuniform mat4 bindMatrix;\n\tuniform mat4 bindMatrixInverse;\n\t#ifdef BONE_TEXTURE\n\t\tuniform sampler2D boneTexture;\n\t\tuniform int boneTextureSize;\n\t\tmat4 getBoneMatrix( const in float i ) {\n\t\t\tfloat j = i * 4.0;\n\t\t\tfloat x = mod( j, float( boneTextureSize ) );\n\t\t\tfloat y = floor( j / float( boneTextureSize ) );\n\t\t\tfloat dx = 1.0 / float( boneTextureSize );\n\t\t\tfloat dy = 1.0 / float( boneTextureSize );\n\t\t\ty = dy * ( y + 0.5 );\n\t\t\tvec4 v1 = texture2D( boneTexture, vec2( dx * ( x + 0.5 ), y ) );\n\t\t\tvec4 v2 = texture2D( boneTexture, vec2( dx * ( x + 1.5 ), y ) );\n\t\t\tvec4 v3 = texture2D( boneTexture, vec2( dx * ( x + 2.5 ), y ) );\n\t\t\tvec4 v4 = texture2D( boneTexture, vec2( dx * ( x + 3.5 ), y ) );\n\t\t\tmat4 bone = mat4( v1, v2, v3, v4 );\n\t\t\treturn bone;\n\t\t}\n\t#else\n\t\tuniform mat4 boneMatrices[ MAX_BONES ];\n\t\tmat4 getBoneMatrix( const in float i ) {\n\t\t\tmat4 bone = boneMatrices[ int(i) ];\n\t\t\treturn bone;\n\t\t}\n\t#endif\n#endif\n",
+skinning_vertex:"#ifdef USE_SKINNING\n\tvec4 skinVertex = bindMatrix * vec4( transformed, 1.0 );\n\tvec4 skinned = vec4( 0.0 );\n\tskinned += boneMatX * skinVertex * skinWeight.x;\n\tskinned += boneMatY * skinVertex * skinWeight.y;\n\tskinned += boneMatZ * skinVertex * skinWeight.z;\n\tskinned += boneMatW * skinVertex * skinWeight.w;\n\ttransformed = ( bindMatrixInverse * skinned ).xyz;\n#endif\n",skinnormal_vertex:"#ifdef USE_SKINNING\n\tmat4 skinMatrix = mat4( 0.0 );\n\tskinMatrix += skinWeight.x * boneMatX;\n\tskinMatrix += skinWeight.y * boneMatY;\n\tskinMatrix += skinWeight.z * boneMatZ;\n\tskinMatrix += skinWeight.w * boneMatW;\n\tskinMatrix = bindMatrixInverse * skinMatrix * bindMatrix;\n\tobjectNormal = vec4( skinMatrix * vec4( objectNormal, 0.0 ) ).xyz;\n#endif\n",
+specularmap_fragment:"float specularStrength;\n#ifdef USE_SPECULARMAP\n\tvec4 texelSpecular = texture2D( specularMap, vUv );\n\tspecularStrength = texelSpecular.r;\n#else\n\tspecularStrength = 1.0;\n#endif",specularmap_pars_fragment:"#ifdef USE_SPECULARMAP\n\tuniform sampler2D specularMap;\n#endif",tonemapping_fragment:"#if defined( TONE_MAPPING )\n gl_FragColor.rgb = toneMapping( gl_FragColor.rgb );\n#endif\n",tonemapping_pars_fragment:"#ifndef saturate\n\t#define saturate(a) clamp( a, 0.0, 1.0 )\n#endif\nuniform float toneMappingExposure;\nuniform float toneMappingWhitePoint;\nvec3 LinearToneMapping( vec3 color ) {\n\treturn toneMappingExposure * color;\n}\nvec3 ReinhardToneMapping( vec3 color ) {\n\tcolor *= toneMappingExposure;\n\treturn saturate( color / ( vec3( 1.0 ) + color ) );\n}\n#define Uncharted2Helper( x ) max( ( ( x * ( 0.15 * x + 0.10 * 0.50 ) + 0.20 * 0.02 ) / ( x * ( 0.15 * x + 0.50 ) + 0.20 * 0.30 ) ) - 0.02 / 0.30, vec3( 0.0 ) )\nvec3 Uncharted2ToneMapping( vec3 color ) {\n\tcolor *= toneMappingExposure;\n\treturn saturate( Uncharted2Helper( color ) / Uncharted2Helper( vec3( toneMappingWhitePoint ) ) );\n}\nvec3 OptimizedCineonToneMapping( vec3 color ) {\n\tcolor *= toneMappingExposure;\n\tcolor = max( vec3( 0.0 ), color - 0.004 );\n\treturn pow( ( color * ( 6.2 * color + 0.5 ) ) / ( color * ( 6.2 * color + 1.7 ) + 0.06 ), vec3( 2.2 ) );\n}\n",
+uv_pars_fragment:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvarying vec2 vUv;\n#endif",uv_pars_vertex:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvarying vec2 vUv;\n\tuniform mat3 uvTransform;\n#endif\n",
+uv_vertex:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvUv = ( uvTransform * vec3( uv, 1 ) ).xy;\n#endif",uv2_pars_fragment:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tvarying vec2 vUv2;\n#endif",uv2_pars_vertex:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tattribute vec2 uv2;\n\tvarying vec2 vUv2;\n#endif",
+uv2_vertex:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tvUv2 = uv2;\n#endif",worldpos_vertex:"#if defined( USE_ENVMAP ) || defined( DISTANCE ) || defined ( USE_SHADOWMAP )\n\tvec4 worldPosition = modelMatrix * vec4( transformed, 1.0 );\n#endif\n",background_frag:"uniform sampler2D t2D;\nvarying vec2 vUv;\nvoid main() {\n\tgl_FragColor = texture2D( t2D, vUv );\n}\n",background_vert:"varying vec2 vUv;\nuniform mat3 uvTransform;\nvoid main() {\n\tvUv = ( uvTransform * vec3( uv, 1 ) ).xy;\n\tgl_Position = vec4( position, 1.0 );\n\tgl_Position.z = 1.0;\n}\n",
+cube_frag:"uniform samplerCube tCube;\nuniform float tFlip;\nuniform float opacity;\nvarying vec3 vWorldDirection;\nvoid main() {\n\tgl_FragColor = textureCube( tCube, vec3( tFlip * vWorldDirection.x, vWorldDirection.yz ) );\n\tgl_FragColor.a *= opacity;\n}\n",cube_vert:"varying vec3 vWorldDirection;\n#include <common>\nvoid main() {\n\tvWorldDirection = transformDirection( position, modelMatrix );\n\t#include <begin_vertex>\n\t#include <project_vertex>\n\tgl_Position.z = gl_Position.w;\n}\n",depth_frag:"#if DEPTH_PACKING == 3200\n\tuniform float opacity;\n#endif\n#include <common>\n#include <packing>\n#include <uv_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( 1.0 );\n\t#if DEPTH_PACKING == 3200\n\t\tdiffuseColor.a = opacity;\n\t#endif\n\t#include <map_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <logdepthbuf_fragment>\n\t#if DEPTH_PACKING == 3200\n\t\tgl_FragColor = vec4( vec3( 1.0 - gl_FragCoord.z ), opacity );\n\t#elif DEPTH_PACKING == 3201\n\t\tgl_FragColor = packDepthToRGBA( gl_FragCoord.z );\n\t#endif\n}\n",
+depth_vert:"#include <common>\n#include <uv_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <skinbase_vertex>\n\t#ifdef USE_DISPLACEMENTMAP\n\t\t#include <beginnormal_vertex>\n\t\t#include <morphnormal_vertex>\n\t\t#include <skinnormal_vertex>\n\t#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n}\n",
+distanceRGBA_frag:"#define DISTANCE\nuniform vec3 referencePosition;\nuniform float nearDistance;\nuniform float farDistance;\nvarying vec3 vWorldPosition;\n#include <common>\n#include <packing>\n#include <uv_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main () {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( 1.0 );\n\t#include <map_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\tfloat dist = length( vWorldPosition - referencePosition );\n\tdist = ( dist - nearDistance ) / ( farDistance - nearDistance );\n\tdist = saturate( dist );\n\tgl_FragColor = packDepthToRGBA( dist );\n}\n",
+distanceRGBA_vert:"#define DISTANCE\nvarying vec3 vWorldPosition;\n#include <common>\n#include <uv_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <skinbase_vertex>\n\t#ifdef USE_DISPLACEMENTMAP\n\t\t#include <beginnormal_vertex>\n\t\t#include <morphnormal_vertex>\n\t\t#include <skinnormal_vertex>\n\t#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <worldpos_vertex>\n\t#include <clipping_planes_vertex>\n\tvWorldPosition = worldPosition.xyz;\n}\n",
+equirect_frag:"uniform sampler2D tEquirect;\nvarying vec3 vWorldDirection;\n#include <common>\nvoid main() {\n\tvec3 direction = normalize( vWorldDirection );\n\tvec2 sampleUV;\n\tsampleUV.y = asin( clamp( direction.y, - 1.0, 1.0 ) ) * RECIPROCAL_PI + 0.5;\n\tsampleUV.x = atan( direction.z, direction.x ) * RECIPROCAL_PI2 + 0.5;\n\tgl_FragColor = texture2D( tEquirect, sampleUV );\n}\n",equirect_vert:"varying vec3 vWorldDirection;\n#include <common>\nvoid main() {\n\tvWorldDirection = transformDirection( position, modelMatrix );\n\t#include <begin_vertex>\n\t#include <project_vertex>\n}\n",
+linedashed_frag:"uniform vec3 diffuse;\nuniform float opacity;\nuniform float dashSize;\nuniform float totalSize;\nvarying float vLineDistance;\n#include <common>\n#include <color_pars_fragment>\n#include <fog_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tif ( mod( vLineDistance, totalSize ) > dashSize ) {\n\t\tdiscard;\n\t}\n\tvec3 outgoingLight = vec3( 0.0 );\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include <logdepthbuf_fragment>\n\t#include <color_fragment>\n\toutgoingLight = diffuseColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <premultiplied_alpha_fragment>\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n}\n",
+linedashed_vert:"uniform float scale;\nattribute float lineDistance;\nvarying float vLineDistance;\n#include <common>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <color_vertex>\n\tvLineDistance = scale * lineDistance;\n\tvec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );\n\tgl_Position = projectionMatrix * mvPosition;\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <fog_vertex>\n}\n",
+meshbasic_frag:"uniform vec3 diffuse;\nuniform float opacity;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <color_pars_fragment>\n#include <uv_pars_fragment>\n#include <uv2_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <aomap_pars_fragment>\n#include <lightmap_pars_fragment>\n#include <envmap_pars_fragment>\n#include <fog_pars_fragment>\n#include <specularmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <color_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <specularmap_fragment>\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\t#ifdef USE_LIGHTMAP\n\t\treflectedLight.indirectDiffuse += texture2D( lightMap, vUv2 ).xyz * lightMapIntensity;\n\t#else\n\t\treflectedLight.indirectDiffuse += vec3( 1.0 );\n\t#endif\n\t#include <aomap_fragment>\n\treflectedLight.indirectDiffuse *= diffuseColor.rgb;\n\tvec3 outgoingLight = reflectedLight.indirectDiffuse;\n\t#include <envmap_fragment>\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <premultiplied_alpha_fragment>\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n}\n",
+meshbasic_vert:"#include <common>\n#include <uv_pars_vertex>\n#include <uv2_pars_vertex>\n#include <envmap_pars_vertex>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <uv2_vertex>\n\t#include <color_vertex>\n\t#include <skinbase_vertex>\n\t#ifdef USE_ENVMAP\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n\t#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <worldpos_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <envmap_vertex>\n\t#include <fog_vertex>\n}\n",
+meshlambert_frag:"uniform vec3 diffuse;\nuniform vec3 emissive;\nuniform float opacity;\nvarying vec3 vLightFront;\n#ifdef DOUBLE_SIDED\n\tvarying vec3 vLightBack;\n#endif\n#include <common>\n#include <packing>\n#include <dithering_pars_fragment>\n#include <color_pars_fragment>\n#include <uv_pars_fragment>\n#include <uv2_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <aomap_pars_fragment>\n#include <lightmap_pars_fragment>\n#include <emissivemap_pars_fragment>\n#include <envmap_pars_fragment>\n#include <bsdfs>\n#include <lights_pars_begin>\n#include <fog_pars_fragment>\n#include <shadowmap_pars_fragment>\n#include <shadowmask_pars_fragment>\n#include <specularmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <color_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <specularmap_fragment>\n\t#include <emissivemap_fragment>\n\treflectedLight.indirectDiffuse = getAmbientLightIrradiance( ambientLightColor );\n\t#include <lightmap_fragment>\n\treflectedLight.indirectDiffuse *= BRDF_Diffuse_Lambert( diffuseColor.rgb );\n\t#ifdef DOUBLE_SIDED\n\t\treflectedLight.directDiffuse = ( gl_FrontFacing ) ? vLightFront : vLightBack;\n\t#else\n\t\treflectedLight.directDiffuse = vLightFront;\n\t#endif\n\treflectedLight.directDiffuse *= BRDF_Diffuse_Lambert( diffuseColor.rgb ) * getShadowMask();\n\t#include <aomap_fragment>\n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + totalEmissiveRadiance;\n\t#include <envmap_fragment>\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n\t#include <premultiplied_alpha_fragment>\n\t#include <dithering_fragment>\n}\n",
+meshlambert_vert:"#define LAMBERT\nvarying vec3 vLightFront;\n#ifdef DOUBLE_SIDED\n\tvarying vec3 vLightBack;\n#endif\n#include <common>\n#include <uv_pars_vertex>\n#include <uv2_pars_vertex>\n#include <envmap_pars_vertex>\n#include <bsdfs>\n#include <lights_pars_begin>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <shadowmap_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <uv2_vertex>\n\t#include <color_vertex>\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinbase_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <worldpos_vertex>\n\t#include <envmap_vertex>\n\t#include <lights_lambert_vertex>\n\t#include <shadowmap_vertex>\n\t#include <fog_vertex>\n}\n",
+meshmatcap_frag:"#define MATCAP\nuniform vec3 diffuse;\nuniform float opacity;\nuniform sampler2D matcap;\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <uv_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <fog_pars_fragment>\n#include <bumpmap_pars_fragment>\n#include <normalmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <normal_fragment_begin>\n\t#include <normal_fragment_maps>\n\tvec3 viewDir = normalize( vViewPosition );\n\tvec3 x = normalize( vec3( viewDir.z, 0.0, - viewDir.x ) );\n\tvec3 y = cross( viewDir, x );\n\tvec2 uv = vec2( dot( x, normal ), dot( y, normal ) ) * 0.495 + 0.5;\n\tvec4 matcapColor = texture2D( matcap, uv );\n\tmatcapColor = matcapTexelToLinear( matcapColor );\n\tvec3 outgoingLight = diffuseColor.rgb * matcapColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <premultiplied_alpha_fragment>\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n}\n",
+meshmatcap_vert:"#define MATCAP\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <uv_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinbase_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n\t#ifndef FLAT_SHADED\n\t\tvNormal = normalize( transformedNormal );\n\t#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <fog_vertex>\n\tvViewPosition = - mvPosition.xyz;\n}\n",
+meshphong_frag:"#define PHONG\nuniform vec3 diffuse;\nuniform vec3 emissive;\nuniform vec3 specular;\nuniform float shininess;\nuniform float opacity;\n#include <common>\n#include <packing>\n#include <dithering_pars_fragment>\n#include <color_pars_fragment>\n#include <uv_pars_fragment>\n#include <uv2_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <aomap_pars_fragment>\n#include <lightmap_pars_fragment>\n#include <emissivemap_pars_fragment>\n#include <envmap_pars_fragment>\n#include <gradientmap_pars_fragment>\n#include <fog_pars_fragment>\n#include <bsdfs>\n#include <lights_pars_begin>\n#include <lights_phong_pars_fragment>\n#include <shadowmap_pars_fragment>\n#include <bumpmap_pars_fragment>\n#include <normalmap_pars_fragment>\n#include <specularmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <color_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <specularmap_fragment>\n\t#include <normal_fragment_begin>\n\t#include <normal_fragment_maps>\n\t#include <emissivemap_fragment>\n\t#include <lights_phong_fragment>\n\t#include <lights_fragment_begin>\n\t#include <lights_fragment_maps>\n\t#include <lights_fragment_end>\n\t#include <aomap_fragment>\n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + reflectedLight.directSpecular + reflectedLight.indirectSpecular + totalEmissiveRadiance;\n\t#include <envmap_fragment>\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n\t#include <premultiplied_alpha_fragment>\n\t#include <dithering_fragment>\n}\n",
+meshphong_vert:"#define PHONG\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <uv_pars_vertex>\n#include <uv2_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <envmap_pars_vertex>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <shadowmap_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <uv2_vertex>\n\t#include <color_vertex>\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinbase_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n#ifndef FLAT_SHADED\n\tvNormal = normalize( transformedNormal );\n#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\tvViewPosition = - mvPosition.xyz;\n\t#include <worldpos_vertex>\n\t#include <envmap_vertex>\n\t#include <shadowmap_vertex>\n\t#include <fog_vertex>\n}\n",
+meshphysical_frag:"#define PHYSICAL\nuniform vec3 diffuse;\nuniform vec3 emissive;\nuniform float roughness;\nuniform float metalness;\nuniform float opacity;\n#ifndef STANDARD\n\tuniform float clearCoat;\n\tuniform float clearCoatRoughness;\n#endif\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <packing>\n#include <dithering_pars_fragment>\n#include <color_pars_fragment>\n#include <uv_pars_fragment>\n#include <uv2_pars_fragment>\n#include <map_pars_fragment>\n#include <alphamap_pars_fragment>\n#include <aomap_pars_fragment>\n#include <lightmap_pars_fragment>\n#include <emissivemap_pars_fragment>\n#include <bsdfs>\n#include <cube_uv_reflection_fragment>\n#include <envmap_pars_fragment>\n#include <envmap_physical_pars_fragment>\n#include <fog_pars_fragment>\n#include <lights_pars_begin>\n#include <lights_physical_pars_fragment>\n#include <shadowmap_pars_fragment>\n#include <bumpmap_pars_fragment>\n#include <normalmap_pars_fragment>\n#include <roughnessmap_pars_fragment>\n#include <metalnessmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <color_fragment>\n\t#include <alphamap_fragment>\n\t#include <alphatest_fragment>\n\t#include <roughnessmap_fragment>\n\t#include <metalnessmap_fragment>\n\t#include <normal_fragment_begin>\n\t#include <normal_fragment_maps>\n\t#include <emissivemap_fragment>\n\t#include <lights_physical_fragment>\n\t#include <lights_fragment_begin>\n\t#include <lights_fragment_maps>\n\t#include <lights_fragment_end>\n\t#include <aomap_fragment>\n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + reflectedLight.directSpecular + reflectedLight.indirectSpecular + totalEmissiveRadiance;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n\t#include <premultiplied_alpha_fragment>\n\t#include <dithering_fragment>\n}\n",
+meshphysical_vert:"#define PHYSICAL\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <common>\n#include <uv_pars_vertex>\n#include <uv2_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <shadowmap_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <uv2_vertex>\n\t#include <color_vertex>\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinbase_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n#ifndef FLAT_SHADED\n\tvNormal = normalize( transformedNormal );\n#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\tvViewPosition = - mvPosition.xyz;\n\t#include <worldpos_vertex>\n\t#include <shadowmap_vertex>\n\t#include <fog_vertex>\n}\n",
+normal_frag:"#define NORMAL\nuniform float opacity;\n#if defined( FLAT_SHADED ) || defined( USE_BUMPMAP ) || ( defined( USE_NORMALMAP ) && ! defined( OBJECTSPACE_NORMALMAP ) )\n\tvarying vec3 vViewPosition;\n#endif\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <packing>\n#include <uv_pars_fragment>\n#include <bumpmap_pars_fragment>\n#include <normalmap_pars_fragment>\n#include <logdepthbuf_pars_fragment>\nvoid main() {\n\t#include <logdepthbuf_fragment>\n\t#include <normal_fragment_begin>\n\t#include <normal_fragment_maps>\n\tgl_FragColor = vec4( packNormalToRGB( normal ), opacity );\n}\n",
+normal_vert:"#define NORMAL\n#if defined( FLAT_SHADED ) || defined( USE_BUMPMAP ) || ( defined( USE_NORMALMAP ) && ! defined( OBJECTSPACE_NORMALMAP ) )\n\tvarying vec3 vViewPosition;\n#endif\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include <uv_pars_vertex>\n#include <displacementmap_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <skinning_pars_vertex>\n#include <logdepthbuf_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\t#include <beginnormal_vertex>\n\t#include <morphnormal_vertex>\n\t#include <skinbase_vertex>\n\t#include <skinnormal_vertex>\n\t#include <defaultnormal_vertex>\n#ifndef FLAT_SHADED\n\tvNormal = normalize( transformedNormal );\n#endif\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <skinning_vertex>\n\t#include <displacementmap_vertex>\n\t#include <project_vertex>\n\t#include <logdepthbuf_vertex>\n#if defined( FLAT_SHADED ) || defined( USE_BUMPMAP ) || ( defined( USE_NORMALMAP ) && ! defined( OBJECTSPACE_NORMALMAP ) )\n\tvViewPosition = - mvPosition.xyz;\n#endif\n}\n",
+points_frag:"uniform vec3 diffuse;\nuniform float opacity;\n#include <common>\n#include <color_pars_fragment>\n#include <map_particle_pars_fragment>\n#include <fog_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec3 outgoingLight = vec3( 0.0 );\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include <logdepthbuf_fragment>\n\t#include <map_particle_fragment>\n\t#include <color_fragment>\n\t#include <alphatest_fragment>\n\toutgoingLight = diffuseColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <premultiplied_alpha_fragment>\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n}\n",
+points_vert:"uniform float size;\nuniform float scale;\n#include <common>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <morphtarget_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <color_vertex>\n\t#include <begin_vertex>\n\t#include <morphtarget_vertex>\n\t#include <project_vertex>\n\tgl_PointSize = size;\n\t#ifdef USE_SIZEATTENUATION\n\t\tbool isPerspective = ( projectionMatrix[ 2 ][ 3 ] == - 1.0 );\n\t\tif ( isPerspective ) gl_PointSize *= ( scale / - mvPosition.z );\n\t#endif\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <worldpos_vertex>\n\t#include <fog_vertex>\n}\n",
+shadow_frag:"uniform vec3 color;\nuniform float opacity;\n#include <common>\n#include <packing>\n#include <fog_pars_fragment>\n#include <bsdfs>\n#include <lights_pars_begin>\n#include <shadowmap_pars_fragment>\n#include <shadowmask_pars_fragment>\nvoid main() {\n\tgl_FragColor = vec4( color, opacity * ( 1.0 - getShadowMask() ) );\n\t#include <fog_fragment>\n}\n",shadow_vert:"#include <fog_pars_vertex>\n#include <shadowmap_pars_vertex>\nvoid main() {\n\t#include <begin_vertex>\n\t#include <project_vertex>\n\t#include <worldpos_vertex>\n\t#include <shadowmap_vertex>\n\t#include <fog_vertex>\n}\n",
+sprite_frag:"uniform vec3 diffuse;\nuniform float opacity;\n#include <common>\n#include <uv_pars_fragment>\n#include <map_pars_fragment>\n#include <fog_pars_fragment>\n#include <logdepthbuf_pars_fragment>\n#include <clipping_planes_pars_fragment>\nvoid main() {\n\t#include <clipping_planes_fragment>\n\tvec3 outgoingLight = vec3( 0.0 );\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include <logdepthbuf_fragment>\n\t#include <map_fragment>\n\t#include <alphatest_fragment>\n\toutgoingLight = diffuseColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include <tonemapping_fragment>\n\t#include <encodings_fragment>\n\t#include <fog_fragment>\n}\n",
+sprite_vert:"uniform float rotation;\nuniform vec2 center;\n#include <common>\n#include <uv_pars_vertex>\n#include <fog_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\nvoid main() {\n\t#include <uv_vertex>\n\tvec4 mvPosition = modelViewMatrix * vec4( 0.0, 0.0, 0.0, 1.0 );\n\tvec2 scale;\n\tscale.x = length( vec3( modelMatrix[ 0 ].x, modelMatrix[ 0 ].y, modelMatrix[ 0 ].z ) );\n\tscale.y = length( vec3( modelMatrix[ 1 ].x, modelMatrix[ 1 ].y, modelMatrix[ 1 ].z ) );\n\t#ifndef USE_SIZEATTENUATION\n\t\tbool isPerspective = ( projectionMatrix[ 2 ][ 3 ] == - 1.0 );\n\t\tif ( isPerspective ) scale *= - mvPosition.z;\n\t#endif\n\tvec2 alignedPosition = ( position.xy - ( center - vec2( 0.5 ) ) ) * scale;\n\tvec2 rotatedPosition;\n\trotatedPosition.x = cos( rotation ) * alignedPosition.x - sin( rotation ) * alignedPosition.y;\n\trotatedPosition.y = sin( rotation ) * alignedPosition.x + cos( rotation ) * alignedPosition.y;\n\tmvPosition.xy += rotatedPosition;\n\tgl_Position = projectionMatrix * mvPosition;\n\t#include <logdepthbuf_vertex>\n\t#include <clipping_planes_vertex>\n\t#include <fog_vertex>\n}\n"},
+va={merge:function(a){for(var b={},c=0;c<a.length;c++){var d=this.clone(a[c]),e;for(e in d)b[e]=d[e]}return b},clone:function(a){var b={},c;for(c in a){b[c]={};for(var d in a[c]){var e=a[c][d];e&&(e.isColor||e.isMatrix3||e.isMatrix4||e.isVector2||e.isVector3||e.isVector4||e.isTexture)?b[c][d]=e.clone():Array.isArray(e)?b[c][d]=e.slice():b[c][d]=e}}return b}},$g={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,
+blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,
+darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,
+lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,
+mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,
+rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Object.assign(G.prototype,
+{isColor:!0,r:1,g:1,b:1,set:function(a){a&&a.isColor?this.copy(a):"number"===typeof a?this.setHex(a):"string"===typeof a&&this.setStyle(a);return this},setScalar:function(a){this.b=this.g=this.r=a;return this},setHex:function(a){a=Math.floor(a);this.r=(a>>16&255)/255;this.g=(a>>8&255)/255;this.b=(a&255)/255;return this},setRGB:function(a,b,c){this.r=a;this.g=b;this.b=c;return this},setHSL:function(){function a(a,c,d){0>d&&(d+=1);1<d&&--d;return d<1/6?a+6*(c-a)*d:.5>d?c:d<2/3?a+6*(c-a)*(2/3-d):a}return function(b,
+c,d){b=R.euclideanModulo(b,1);c=R.clamp(c,0,1);d=R.clamp(d,0,1);0===c?this.r=this.g=this.b=d:(c=.5>=d?d*(1+c):d+c-d*c,d=2*d-c,this.r=a(d,c,b+1/3),this.g=a(d,c,b),this.b=a(d,c,b-1/3));return this}}(),setStyle:function(a){function b(b){void 0!==b&&1>parseFloat(b)&&console.warn("THREE.Color: Alpha component of "+a+" will be ignored.")}var c;if(c=/^((?:rgb|hsl)a?)\(\s*([^\)]*)\)/.exec(a)){var d=c[2];switch(c[1]){case "rgb":case "rgba":if(c=/^(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d))return this.r=
+Math.min(255,parseInt(c[1],10))/255,this.g=Math.min(255,parseInt(c[2],10))/255,this.b=Math.min(255,parseInt(c[3],10))/255,b(c[5]),this;if(c=/^(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d))return this.r=Math.min(100,parseInt(c[1],10))/100,this.g=Math.min(100,parseInt(c[2],10))/100,this.b=Math.min(100,parseInt(c[3],10))/100,b(c[5]),this;break;case "hsl":case "hsla":if(c=/^([0-9]*\.?[0-9]+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d)){d=parseFloat(c[1])/
+360;var e=parseInt(c[2],10)/100,f=parseInt(c[3],10)/100;b(c[5]);return this.setHSL(d,e,f)}}}else if(c=/^#([A-Fa-f0-9]+)$/.exec(a)){c=c[1];d=c.length;if(3===d)return this.r=parseInt(c.charAt(0)+c.charAt(0),16)/255,this.g=parseInt(c.charAt(1)+c.charAt(1),16)/255,this.b=parseInt(c.charAt(2)+c.charAt(2),16)/255,this;if(6===d)return this.r=parseInt(c.charAt(0)+c.charAt(1),16)/255,this.g=parseInt(c.charAt(2)+c.charAt(3),16)/255,this.b=parseInt(c.charAt(4)+c.charAt(5),16)/255,this}a&&0<a.length&&(c=$g[a],
+void 0!==c?this.setHex(c):console.warn("THREE.Color: Unknown color "+a));return this},clone:function(){return new this.constructor(this.r,this.g,this.b)},copy:function(a){this.r=a.r;this.g=a.g;this.b=a.b;return this},copyGammaToLinear:function(a,b){void 0===b&&(b=2);this.r=Math.pow(a.r,b);this.g=Math.pow(a.g,b);this.b=Math.pow(a.b,b);return this},copyLinearToGamma:function(a,b){void 0===b&&(b=2);b=0<b?1/b:1;this.r=Math.pow(a.r,b);this.g=Math.pow(a.g,b);this.b=Math.pow(a.b,b);return this},convertGammaToLinear:function(a){this.copyGammaToLinear(this,
+a);return this},convertLinearToGamma:function(a){this.copyLinearToGamma(this,a);return this},copySRGBToLinear:function(){function a(a){return.04045>a?.0773993808*a:Math.pow(.9478672986*a+.0521327014,2.4)}return function(b){this.r=a(b.r);this.g=a(b.g);this.b=a(b.b);return this}}(),copyLinearToSRGB:function(){function a(a){return.0031308>a?12.92*a:1.055*Math.pow(a,.41666)-.055}return function(b){this.r=a(b.r);this.g=a(b.g);this.b=a(b.b);return this}}(),convertSRGBToLinear:function(){this.copySRGBToLinear(this);
+return this},convertLinearToSRGB:function(){this.copyLinearToSRGB(this);return this},getHex:function(){return 255*this.r<<16^255*this.g<<8^255*this.b<<0},getHexString:function(){return("000000"+this.getHex().toString(16)).slice(-6)},getHSL:function(a){void 0===a&&(console.warn("THREE.Color: .getHSL() target is now required"),a={h:0,s:0,l:0});var b=this.r,c=this.g,d=this.b,e=Math.max(b,c,d),f=Math.min(b,c,d),g,h=(f+e)/2;if(f===e)f=g=0;else{var k=e-f;f=.5>=h?k/(e+f):k/(2-e-f);switch(e){case b:g=(c-
+d)/k+(c<d?6:0);break;case c:g=(d-b)/k+2;break;case d:g=(b-c)/k+4}g/=6}a.h=g;a.s=f;a.l=h;return a},getStyle:function(){return"rgb("+(255*this.r|0)+","+(255*this.g|0)+","+(255*this.b|0)+")"},offsetHSL:function(){var a={};return function(b,c,d){this.getHSL(a);a.h+=b;a.s+=c;a.l+=d;this.setHSL(a.h,a.s,a.l);return this}}(),add:function(a){this.r+=a.r;this.g+=a.g;this.b+=a.b;return this},addColors:function(a,b){this.r=a.r+b.r;this.g=a.g+b.g;this.b=a.b+b.b;return this},addScalar:function(a){this.r+=a;this.g+=
+a;this.b+=a;return this},sub:function(a){this.r=Math.max(0,this.r-a.r);this.g=Math.max(0,this.g-a.g);this.b=Math.max(0,this.b-a.b);return this},multiply:function(a){this.r*=a.r;this.g*=a.g;this.b*=a.b;return this},multiplyScalar:function(a){this.r*=a;this.g*=a;this.b*=a;return this},lerp:function(a,b){this.r+=(a.r-this.r)*b;this.g+=(a.g-this.g)*b;this.b+=(a.b-this.b)*b;return this},lerpHSL:function(){var a={h:0,s:0,l:0},b={h:0,s:0,l:0};return function(c,d){this.getHSL(a);c.getHSL(b);c=R.lerp(a.h,
+b.h,d);var e=R.lerp(a.s,b.s,d);d=R.lerp(a.l,b.l,d);this.setHSL(c,e,d);return this}}(),equals:function(a){return a.r===this.r&&a.g===this.g&&a.b===this.b},fromArray:function(a,b){void 0===b&&(b=0);this.r=a[b];this.g=a[b+1];this.b=a[b+2];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.r;a[b+1]=this.g;a[b+2]=this.b;return a},toJSON:function(){return this.getHex()}});var J={common:{diffuse:{value:new G(15658734)},opacity:{value:1},map:{value:null},uvTransform:{value:new da},
+alphaMap:{value:null}},specularmap:{specularMap:{value:null}},envmap:{envMap:{value:null},flipEnvMap:{value:-1},reflectivity:{value:1},refractionRatio:{value:.98},maxMipLevel:{value:0}},aomap:{aoMap:{value:null},aoMapIntensity:{value:1}},lightmap:{lightMap:{value:null},lightMapIntensity:{value:1}},emissivemap:{emissiveMap:{value:null}},bumpmap:{bumpMap:{value:null},bumpScale:{value:1}},normalmap:{normalMap:{value:null},normalScale:{value:new z(1,1)}},displacementmap:{displacementMap:{value:null},
+displacementScale:{value:1},displacementBias:{value:0}},roughnessmap:{roughnessMap:{value:null}},metalnessmap:{metalnessMap:{value:null}},gradientmap:{gradientMap:{value:null}},fog:{fogDensity:{value:2.5E-4},fogNear:{value:1},fogFar:{value:2E3},fogColor:{value:new G(16777215)}},lights:{ambientLightColor:{value:[]},directionalLights:{value:[],properties:{direction:{},color:{},shadow:{},shadowBias:{},shadowRadius:{},shadowMapSize:{}}},directionalShadowMap:{value:[]},directionalShadowMatrix:{value:[]},
+spotLights:{value:[],properties:{color:{},position:{},direction:{},distance:{},coneCos:{},penumbraCos:{},decay:{},shadow:{},shadowBias:{},shadowRadius:{},shadowMapSize:{}}},spotShadowMap:{value:[]},spotShadowMatrix:{value:[]},pointLights:{value:[],properties:{color:{},position:{},decay:{},distance:{},shadow:{},shadowBias:{},shadowRadius:{},shadowMapSize:{},shadowCameraNear:{},shadowCameraFar:{}}},pointShadowMap:{value:[]},pointShadowMatrix:{value:[]},hemisphereLights:{value:[],properties:{direction:{},
+skyColor:{},groundColor:{}}},rectAreaLights:{value:[],properties:{color:{},position:{},width:{},height:{}}}},points:{diffuse:{value:new G(15658734)},opacity:{value:1},size:{value:1},scale:{value:1},map:{value:null},uvTransform:{value:new da}},sprite:{diffuse:{value:new G(15658734)},opacity:{value:1},center:{value:new z(.5,.5)},rotation:{value:0},map:{value:null},uvTransform:{value:new da}}},Qa={basic:{uniforms:va.merge([J.common,J.specularmap,J.envmap,J.aomap,J.lightmap,J.fog]),vertexShader:K.meshbasic_vert,
+fragmentShader:K.meshbasic_frag},lambert:{uniforms:va.merge([J.common,J.specularmap,J.envmap,J.aomap,J.lightmap,J.emissivemap,J.fog,J.lights,{emissive:{value:new G(0)}}]),vertexShader:K.meshlambert_vert,fragmentShader:K.meshlambert_frag},phong:{uniforms:va.merge([J.common,J.specularmap,J.envmap,J.aomap,J.lightmap,J.emissivemap,J.bumpmap,J.normalmap,J.displacementmap,J.gradientmap,J.fog,J.lights,{emissive:{value:new G(0)},specular:{value:new G(1118481)},shininess:{value:30}}]),vertexShader:K.meshphong_vert,
+fragmentShader:K.meshphong_frag},standard:{uniforms:va.merge([J.common,J.envmap,J.aomap,J.lightmap,J.emissivemap,J.bumpmap,J.normalmap,J.displacementmap,J.roughnessmap,J.metalnessmap,J.fog,J.lights,{emissive:{value:new G(0)},roughness:{value:.5},metalness:{value:.5},envMapIntensity:{value:1}}]),vertexShader:K.meshphysical_vert,fragmentShader:K.meshphysical_frag},matcap:{uniforms:va.merge([J.common,J.bumpmap,J.normalmap,J.displacementmap,J.fog,{matcap:{value:null}}]),vertexShader:K.meshmatcap_vert,
+fragmentShader:K.meshmatcap_frag},points:{uniforms:va.merge([J.points,J.fog]),vertexShader:K.points_vert,fragmentShader:K.points_frag},dashed:{uniforms:va.merge([J.common,J.fog,{scale:{value:1},dashSize:{value:1},totalSize:{value:2}}]),vertexShader:K.linedashed_vert,fragmentShader:K.linedashed_frag},depth:{uniforms:va.merge([J.common,J.displacementmap]),vertexShader:K.depth_vert,fragmentShader:K.depth_frag},normal:{uniforms:va.merge([J.common,J.bumpmap,J.normalmap,J.displacementmap,{opacity:{value:1}}]),
+vertexShader:K.normal_vert,fragmentShader:K.normal_frag},sprite:{uniforms:va.merge([J.sprite,J.fog]),vertexShader:K.sprite_vert,fragmentShader:K.sprite_frag},background:{uniforms:{uvTransform:{value:new da},t2D:{value:null}},vertexShader:K.background_vert,fragmentShader:K.background_frag},cube:{uniforms:{tCube:{value:null},tFlip:{value:-1},opacity:{value:1}},vertexShader:K.cube_vert,fragmentShader:K.cube_frag},equirect:{uniforms:{tEquirect:{value:null}},vertexShader:K.equirect_vert,fragmentShader:K.equirect_frag},
+distanceRGBA:{uniforms:va.merge([J.common,J.displacementmap,{referencePosition:{value:new p},nearDistance:{value:1},farDistance:{value:1E3}}]),vertexShader:K.distanceRGBA_vert,fragmentShader:K.distanceRGBA_frag},shadow:{uniforms:va.merge([J.lights,J.fog,{color:{value:new G(0)},opacity:{value:1}}]),vertexShader:K.shadow_vert,fragmentShader:K.shadow_frag}};Qa.physical={uniforms:va.merge([Qa.standard.uniforms,{clearCoat:{value:0},clearCoatRoughness:{value:0}}]),vertexShader:K.meshphysical_vert,fragmentShader:K.meshphysical_frag};
+Object.assign(Xa.prototype,{clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.a=a.a;this.b=a.b;this.c=a.c;this.normal.copy(a.normal);this.color.copy(a.color);this.materialIndex=a.materialIndex;for(var b=0,c=a.vertexNormals.length;b<c;b++)this.vertexNormals[b]=a.vertexNormals[b].clone();b=0;for(c=a.vertexColors.length;b<c;b++)this.vertexColors[b]=a.vertexColors[b].clone();return this}});mb.RotationOrders="XYZ YZX ZXY XZY YXZ ZYX".split(" ");mb.DefaultOrder="XYZ";Object.defineProperties(mb.prototype,
+{x:{get:function(){return this._x},set:function(a){this._x=a;this.onChangeCallback()}},y:{get:function(){return this._y},set:function(a){this._y=a;this.onChangeCallback()}},z:{get:function(){return this._z},set:function(a){this._z=a;this.onChangeCallback()}},order:{get:function(){return this._order},set:function(a){this._order=a;this.onChangeCallback()}}});Object.assign(mb.prototype,{isEuler:!0,set:function(a,b,c,d){this._x=a;this._y=b;this._z=c;this._order=d||this._order;this.onChangeCallback();
+return this},clone:function(){return new this.constructor(this._x,this._y,this._z,this._order)},copy:function(a){this._x=a._x;this._y=a._y;this._z=a._z;this._order=a._order;this.onChangeCallback();return this},setFromRotationMatrix:function(a,b,c){var d=R.clamp,e=a.elements;a=e[0];var f=e[4],g=e[8],h=e[1],k=e[5],m=e[9],l=e[2],n=e[6];e=e[10];b=b||this._order;"XYZ"===b?(this._y=Math.asin(d(g,-1,1)),.99999>Math.abs(g)?(this._x=Math.atan2(-m,e),this._z=Math.atan2(-f,a)):(this._x=Math.atan2(n,k),this._z=
+0)):"YXZ"===b?(this._x=Math.asin(-d(m,-1,1)),.99999>Math.abs(m)?(this._y=Math.atan2(g,e),this._z=Math.atan2(h,k)):(this._y=Math.atan2(-l,a),this._z=0)):"ZXY"===b?(this._x=Math.asin(d(n,-1,1)),.99999>Math.abs(n)?(this._y=Math.atan2(-l,e),this._z=Math.atan2(-f,k)):(this._y=0,this._z=Math.atan2(h,a))):"ZYX"===b?(this._y=Math.asin(-d(l,-1,1)),.99999>Math.abs(l)?(this._x=Math.atan2(n,e),this._z=Math.atan2(h,a)):(this._x=0,this._z=Math.atan2(-f,k))):"YZX"===b?(this._z=Math.asin(d(h,-1,1)),.99999>Math.abs(h)?
+(this._x=Math.atan2(-m,k),this._y=Math.atan2(-l,a)):(this._x=0,this._y=Math.atan2(g,e))):"XZY"===b?(this._z=Math.asin(-d(f,-1,1)),.99999>Math.abs(f)?(this._x=Math.atan2(n,k),this._y=Math.atan2(g,a)):(this._x=Math.atan2(-m,e),this._y=0)):console.warn("THREE.Euler: .setFromRotationMatrix() given unsupported order: "+b);this._order=b;if(!1!==c)this.onChangeCallback();return this},setFromQuaternion:function(){var a=new P;return function(b,c,d){a.makeRotationFromQuaternion(b);return this.setFromRotationMatrix(a,
+c,d)}}(),setFromVector3:function(a,b){return this.set(a.x,a.y,a.z,b||this._order)},reorder:function(){var a=new ja;return function(b){a.setFromEuler(this);return this.setFromQuaternion(a,b)}}(),equals:function(a){return a._x===this._x&&a._y===this._y&&a._z===this._z&&a._order===this._order},fromArray:function(a){this._x=a[0];this._y=a[1];this._z=a[2];void 0!==a[3]&&(this._order=a[3]);this.onChangeCallback();return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this._x;a[b+1]=
+this._y;a[b+2]=this._z;a[b+3]=this._order;return a},toVector3:function(a){return a?a.set(this._x,this._y,this._z):new p(this._x,this._y,this._z)},onChange:function(a){this.onChangeCallback=a;return this},onChangeCallback:function(){}});Object.assign(Yd.prototype,{set:function(a){this.mask=1<<a|0},enable:function(a){this.mask=this.mask|1<<a|0},toggle:function(a){this.mask^=1<<a|0},disable:function(a){this.mask&=~(1<<a|0)},test:function(a){return 0!==(this.mask&a.mask)}});var Mf=0;D.DefaultUp=new p(0,
+1,0);D.DefaultMatrixAutoUpdate=!0;D.prototype=Object.assign(Object.create(ia.prototype),{constructor:D,isObject3D:!0,onBeforeRender:function(){},onAfterRender:function(){},applyMatrix:function(a){this.matrix.multiplyMatrices(a,this.matrix);this.matrix.decompose(this.position,this.quaternion,this.scale)},applyQuaternion:function(a){this.quaternion.premultiply(a);return this},setRotationFromAxisAngle:function(a,b){this.quaternion.setFromAxisAngle(a,b)},setRotationFromEuler:function(a){this.quaternion.setFromEuler(a,
+!0)},setRotationFromMatrix:function(a){this.quaternion.setFromRotationMatrix(a)},setRotationFromQuaternion:function(a){this.quaternion.copy(a)},rotateOnAxis:function(){var a=new ja;return function(b,c){a.setFromAxisAngle(b,c);this.quaternion.multiply(a);return this}}(),rotateOnWorldAxis:function(){var a=new ja;return function(b,c){a.setFromAxisAngle(b,c);this.quaternion.premultiply(a);return this}}(),rotateX:function(){var a=new p(1,0,0);return function(b){return this.rotateOnAxis(a,b)}}(),rotateY:function(){var a=
+new p(0,1,0);return function(b){return this.rotateOnAxis(a,b)}}(),rotateZ:function(){var a=new p(0,0,1);return function(b){return this.rotateOnAxis(a,b)}}(),translateOnAxis:function(){var a=new p;return function(b,c){a.copy(b).applyQuaternion(this.quaternion);this.position.add(a.multiplyScalar(c));return this}}(),translateX:function(){var a=new p(1,0,0);return function(b){return this.translateOnAxis(a,b)}}(),translateY:function(){var a=new p(0,1,0);return function(b){return this.translateOnAxis(a,
+b)}}(),translateZ:function(){var a=new p(0,0,1);return function(b){return this.translateOnAxis(a,b)}}(),localToWorld:function(a){return a.applyMatrix4(this.matrixWorld)},worldToLocal:function(){var a=new P;return function(b){return b.applyMatrix4(a.getInverse(this.matrixWorld))}}(),lookAt:function(){var a=new ja,b=new P,c=new p,d=new p;return function(e,f,g){e.isVector3?c.copy(e):c.set(e,f,g);e=this.parent;this.updateWorldMatrix(!0,!1);d.setFromMatrixPosition(this.matrixWorld);this.isCamera?b.lookAt(d,
+c,this.up):b.lookAt(c,d,this.up);this.quaternion.setFromRotationMatrix(b);e&&(b.extractRotation(e.matrixWorld),a.setFromRotationMatrix(b),this.quaternion.premultiply(a.inverse()))}}(),add:function(a){if(1<arguments.length){for(var b=0;b<arguments.length;b++)this.add(arguments[b]);return this}if(a===this)return console.error("THREE.Object3D.add: object can't be added as a child of itself.",a),this;a&&a.isObject3D?(null!==a.parent&&a.parent.remove(a),a.parent=this,a.dispatchEvent({type:"added"}),this.children.push(a)):
+console.error("THREE.Object3D.add: object not an instance of THREE.Object3D.",a);return this},remove:function(a){if(1<arguments.length){for(var b=0;b<arguments.length;b++)this.remove(arguments[b]);return this}b=this.children.indexOf(a);-1!==b&&(a.parent=null,a.dispatchEvent({type:"removed"}),this.children.splice(b,1));return this},getObjectById:function(a){return this.getObjectByProperty("id",a)},getObjectByName:function(a){return this.getObjectByProperty("name",a)},getObjectByProperty:function(a,
+b){if(this[a]===b)return this;for(var c=0,d=this.children.length;c<d;c++){var e=this.children[c].getObjectByProperty(a,b);if(void 0!==e)return e}},getWorldPosition:function(a){void 0===a&&(console.warn("THREE.Object3D: .getWorldPosition() target is now required"),a=new p);this.updateMatrixWorld(!0);return a.setFromMatrixPosition(this.matrixWorld)},getWorldQuaternion:function(){var a=new p,b=new p;return function(c){void 0===c&&(console.warn("THREE.Object3D: .getWorldQuaternion() target is now required"),
+c=new ja);this.updateMatrixWorld(!0);this.matrixWorld.decompose(a,c,b);return c}}(),getWorldScale:function(){var a=new p,b=new ja;return function(c){void 0===c&&(console.warn("THREE.Object3D: .getWorldScale() target is now required"),c=new p);this.updateMatrixWorld(!0);this.matrixWorld.decompose(a,b,c);return c}}(),getWorldDirection:function(a){void 0===a&&(console.warn("THREE.Object3D: .getWorldDirection() target is now required"),a=new p);this.updateMatrixWorld(!0);var b=this.matrixWorld.elements;
+return a.set(b[8],b[9],b[10]).normalize()},raycast:function(){},traverse:function(a){a(this);for(var b=this.children,c=0,d=b.length;c<d;c++)b[c].traverse(a)},traverseVisible:function(a){if(!1!==this.visible){a(this);for(var b=this.children,c=0,d=b.length;c<d;c++)b[c].traverseVisible(a)}},traverseAncestors:function(a){var b=this.parent;null!==b&&(a(b),b.traverseAncestors(a))},updateMatrix:function(){this.matrix.compose(this.position,this.quaternion,this.scale);this.matrixWorldNeedsUpdate=!0},updateMatrixWorld:function(a){this.matrixAutoUpdate&&
+this.updateMatrix();if(this.matrixWorldNeedsUpdate||a)null===this.parent?this.matrixWorld.copy(this.matrix):this.matrixWorld.multiplyMatrices(this.parent.matrixWorld,this.matrix),this.matrixWorldNeedsUpdate=!1,a=!0;for(var b=this.children,c=0,d=b.length;c<d;c++)b[c].updateMatrixWorld(a)},updateWorldMatrix:function(a,b){var c=this.parent;!0===a&&null!==c&&c.updateWorldMatrix(!0,!1);this.matrixAutoUpdate&&this.updateMatrix();null===this.parent?this.matrixWorld.copy(this.matrix):this.matrixWorld.multiplyMatrices(this.parent.matrixWorld,
+this.matrix);if(!0===b)for(a=this.children,b=0,c=a.length;b<c;b++)a[b].updateWorldMatrix(!1,!0)},toJSON:function(a){function b(b,c){void 0===b[c.uuid]&&(b[c.uuid]=c.toJSON(a));return c.uuid}function c(a){var b=[],c;for(c in a){var d=a[c];delete d.metadata;b.push(d)}return b}var d=void 0===a||"string"===typeof a,e={};d&&(a={geometries:{},materials:{},textures:{},images:{},shapes:{}},e.metadata={version:4.5,type:"Object",generator:"Object3D.toJSON"});var f={};f.uuid=this.uuid;f.type=this.type;""!==
+this.name&&(f.name=this.name);!0===this.castShadow&&(f.castShadow=!0);!0===this.receiveShadow&&(f.receiveShadow=!0);!1===this.visible&&(f.visible=!1);!1===this.frustumCulled&&(f.frustumCulled=!1);0!==this.renderOrder&&(f.renderOrder=this.renderOrder);"{}"!==JSON.stringify(this.userData)&&(f.userData=this.userData);f.layers=this.layers.mask;f.matrix=this.matrix.toArray();!1===this.matrixAutoUpdate&&(f.matrixAutoUpdate=!1);if(this.isMesh||this.isLine||this.isPoints){f.geometry=b(a.geometries,this.geometry);
+var g=this.geometry.parameters;if(void 0!==g&&void 0!==g.shapes)if(g=g.shapes,Array.isArray(g))for(var h=0,k=g.length;h<k;h++)b(a.shapes,g[h]);else b(a.shapes,g)}if(void 0!==this.material)if(Array.isArray(this.material)){g=[];h=0;for(k=this.material.length;h<k;h++)g.push(b(a.materials,this.material[h]));f.material=g}else f.material=b(a.materials,this.material);if(0<this.children.length)for(f.children=[],h=0;h<this.children.length;h++)f.children.push(this.children[h].toJSON(a).object);if(d){d=c(a.geometries);
+h=c(a.materials);k=c(a.textures);var m=c(a.images);g=c(a.shapes);0<d.length&&(e.geometries=d);0<h.length&&(e.materials=h);0<k.length&&(e.textures=k);0<m.length&&(e.images=m);0<g.length&&(e.shapes=g)}e.object=f;return e},clone:function(a){return(new this.constructor).copy(this,a)},copy:function(a,b){void 0===b&&(b=!0);this.name=a.name;this.up.copy(a.up);this.position.copy(a.position);this.quaternion.copy(a.quaternion);this.scale.copy(a.scale);this.matrix.copy(a.matrix);this.matrixWorld.copy(a.matrixWorld);
+this.matrixAutoUpdate=a.matrixAutoUpdate;this.matrixWorldNeedsUpdate=a.matrixWorldNeedsUpdate;this.layers.mask=a.layers.mask;this.visible=a.visible;this.castShadow=a.castShadow;this.receiveShadow=a.receiveShadow;this.frustumCulled=a.frustumCulled;this.renderOrder=a.renderOrder;this.userData=JSON.parse(JSON.stringify(a.userData));if(!0===b)for(b=0;b<a.children.length;b++)this.add(a.children[b].clone());return this}});var Nf=0;I.prototype=Object.assign(Object.create(ia.prototype),{constructor:I,isGeometry:!0,
+applyMatrix:function(a){for(var b=(new da).getNormalMatrix(a),c=0,d=this.vertices.length;c<d;c++)this.vertices[c].applyMatrix4(a);c=0;for(d=this.faces.length;c<d;c++){a=this.faces[c];a.normal.applyMatrix3(b).normalize();for(var e=0,f=a.vertexNormals.length;e<f;e++)a.vertexNormals[e].applyMatrix3(b).normalize()}null!==this.boundingBox&&this.computeBoundingBox();null!==this.boundingSphere&&this.computeBoundingSphere();this.normalsNeedUpdate=this.verticesNeedUpdate=!0;return this},rotateX:function(){var a=
+new P;return function(b){a.makeRotationX(b);this.applyMatrix(a);return this}}(),rotateY:function(){var a=new P;return function(b){a.makeRotationY(b);this.applyMatrix(a);return this}}(),rotateZ:function(){var a=new P;return function(b){a.makeRotationZ(b);this.applyMatrix(a);return this}}(),translate:function(){var a=new P;return function(b,c,d){a.makeTranslation(b,c,d);this.applyMatrix(a);return this}}(),scale:function(){var a=new P;return function(b,c,d){a.makeScale(b,c,d);this.applyMatrix(a);return this}}(),
+lookAt:function(){var a=new D;return function(b){a.lookAt(b);a.updateMatrix();this.applyMatrix(a.matrix)}}(),fromBufferGeometry:function(a){function b(a,b,d,e){var f=void 0===h?[]:[c.colors[a].clone(),c.colors[b].clone(),c.colors[d].clone()],l=void 0===g?[]:[(new p).fromArray(g,3*a),(new p).fromArray(g,3*b),(new p).fromArray(g,3*d)];e=new Xa(a,b,d,l,f,e);c.faces.push(e);void 0!==k&&c.faceVertexUvs[0].push([(new z).fromArray(k,2*a),(new z).fromArray(k,2*b),(new z).fromArray(k,2*d)]);void 0!==m&&c.faceVertexUvs[1].push([(new z).fromArray(m,
+2*a),(new z).fromArray(m,2*b),(new z).fromArray(m,2*d)])}var c=this,d=null!==a.index?a.index.array:void 0,e=a.attributes,f=e.position.array,g=void 0!==e.normal?e.normal.array:void 0,h=void 0!==e.color?e.color.array:void 0,k=void 0!==e.uv?e.uv.array:void 0,m=void 0!==e.uv2?e.uv2.array:void 0;void 0!==m&&(this.faceVertexUvs[1]=[]);for(var l=e=0;e<f.length;e+=3,l+=2)c.vertices.push((new p).fromArray(f,e)),void 0!==h&&c.colors.push((new G).fromArray(h,e));var n=a.groups;if(0<n.length)for(e=0;e<n.length;e++){f=
+n[e];var r=f.start,x=f.count;l=r;for(r+=x;l<r;l+=3)void 0!==d?b(d[l],d[l+1],d[l+2],f.materialIndex):b(l,l+1,l+2,f.materialIndex)}else if(void 0!==d)for(e=0;e<d.length;e+=3)b(d[e],d[e+1],d[e+2]);else for(e=0;e<f.length/3;e+=3)b(e,e+1,e+2);this.computeFaceNormals();null!==a.boundingBox&&(this.boundingBox=a.boundingBox.clone());null!==a.boundingSphere&&(this.boundingSphere=a.boundingSphere.clone());return this},center:function(){var a=new p;return function(){this.computeBoundingBox();this.boundingBox.getCenter(a).negate();
+this.translate(a.x,a.y,a.z);return this}}(),normalize:function(){this.computeBoundingSphere();var a=this.boundingSphere.center,b=this.boundingSphere.radius;b=0===b?1:1/b;var c=new P;c.set(b,0,0,-b*a.x,0,b,0,-b*a.y,0,0,b,-b*a.z,0,0,0,1);this.applyMatrix(c);return this},computeFaceNormals:function(){for(var a=new p,b=new p,c=0,d=this.faces.length;c<d;c++){var e=this.faces[c],f=this.vertices[e.a],g=this.vertices[e.b];a.subVectors(this.vertices[e.c],g);b.subVectors(f,g);a.cross(b);a.normalize();e.normal.copy(a)}},
+computeVertexNormals:function(a){void 0===a&&(a=!0);var b;var c=Array(this.vertices.length);var d=0;for(b=this.vertices.length;d<b;d++)c[d]=new p;if(a){var e=new p,f=new p;a=0;for(d=this.faces.length;a<d;a++){b=this.faces[a];var g=this.vertices[b.a];var h=this.vertices[b.b];var k=this.vertices[b.c];e.subVectors(k,h);f.subVectors(g,h);e.cross(f);c[b.a].add(e);c[b.b].add(e);c[b.c].add(e)}}else for(this.computeFaceNormals(),a=0,d=this.faces.length;a<d;a++)b=this.faces[a],c[b.a].add(b.normal),c[b.b].add(b.normal),
+c[b.c].add(b.normal);d=0;for(b=this.vertices.length;d<b;d++)c[d].normalize();a=0;for(d=this.faces.length;a<d;a++)b=this.faces[a],g=b.vertexNormals,3===g.length?(g[0].copy(c[b.a]),g[1].copy(c[b.b]),g[2].copy(c[b.c])):(g[0]=c[b.a].clone(),g[1]=c[b.b].clone(),g[2]=c[b.c].clone());0<this.faces.length&&(this.normalsNeedUpdate=!0)},computeFlatVertexNormals:function(){var a;this.computeFaceNormals();var b=0;for(a=this.faces.length;b<a;b++){var c=this.faces[b];var d=c.vertexNormals;3===d.length?(d[0].copy(c.normal),
+d[1].copy(c.normal),d[2].copy(c.normal)):(d[0]=c.normal.clone(),d[1]=c.normal.clone(),d[2]=c.normal.clone())}0<this.faces.length&&(this.normalsNeedUpdate=!0)},computeMorphNormals:function(){var a,b;var c=0;for(b=this.faces.length;c<b;c++){var d=this.faces[c];d.__originalFaceNormal?d.__originalFaceNormal.copy(d.normal):d.__originalFaceNormal=d.normal.clone();d.__originalVertexNormals||(d.__originalVertexNormals=[]);var e=0;for(a=d.vertexNormals.length;e<a;e++)d.__originalVertexNormals[e]?d.__originalVertexNormals[e].copy(d.vertexNormals[e]):
+d.__originalVertexNormals[e]=d.vertexNormals[e].clone()}var f=new I;f.faces=this.faces;e=0;for(a=this.morphTargets.length;e<a;e++){if(!this.morphNormals[e]){this.morphNormals[e]={};this.morphNormals[e].faceNormals=[];this.morphNormals[e].vertexNormals=[];d=this.morphNormals[e].faceNormals;var g=this.morphNormals[e].vertexNormals;c=0;for(b=this.faces.length;c<b;c++){var h=new p;var k={a:new p,b:new p,c:new p};d.push(h);g.push(k)}}g=this.morphNormals[e];f.vertices=this.morphTargets[e].vertices;f.computeFaceNormals();
+f.computeVertexNormals();c=0;for(b=this.faces.length;c<b;c++)d=this.faces[c],h=g.faceNormals[c],k=g.vertexNormals[c],h.copy(d.normal),k.a.copy(d.vertexNormals[0]),k.b.copy(d.vertexNormals[1]),k.c.copy(d.vertexNormals[2])}c=0;for(b=this.faces.length;c<b;c++)d=this.faces[c],d.normal=d.__originalFaceNormal,d.vertexNormals=d.__originalVertexNormals},computeBoundingBox:function(){null===this.boundingBox&&(this.boundingBox=new Wa);this.boundingBox.setFromPoints(this.vertices)},computeBoundingSphere:function(){null===
+this.boundingSphere&&(this.boundingSphere=new Ga);this.boundingSphere.setFromPoints(this.vertices)},merge:function(a,b,c){if(a&&a.isGeometry){var d,e=this.vertices.length,f=this.vertices,g=a.vertices,h=this.faces,k=a.faces,m=this.faceVertexUvs[0],l=a.faceVertexUvs[0],n=this.colors,r=a.colors;void 0===c&&(c=0);void 0!==b&&(d=(new da).getNormalMatrix(b));a=0;for(var p=g.length;a<p;a++){var t=g[a].clone();void 0!==b&&t.applyMatrix4(b);f.push(t)}a=0;for(p=r.length;a<p;a++)n.push(r[a].clone());a=0;for(p=
+k.length;a<p;a++){g=k[a];var u=g.vertexNormals;r=g.vertexColors;n=new Xa(g.a+e,g.b+e,g.c+e);n.normal.copy(g.normal);void 0!==d&&n.normal.applyMatrix3(d).normalize();b=0;for(f=u.length;b<f;b++)t=u[b].clone(),void 0!==d&&t.applyMatrix3(d).normalize(),n.vertexNormals.push(t);n.color.copy(g.color);b=0;for(f=r.length;b<f;b++)t=r[b],n.vertexColors.push(t.clone());n.materialIndex=g.materialIndex+c;h.push(n)}a=0;for(p=l.length;a<p;a++)if(c=l[a],d=[],void 0!==c){b=0;for(f=c.length;b<f;b++)d.push(c[b].clone());
+m.push(d)}}else console.error("THREE.Geometry.merge(): geometry not an instance of THREE.Geometry.",a)},mergeMesh:function(a){a&&a.isMesh?(a.matrixAutoUpdate&&a.updateMatrix(),this.merge(a.geometry,a.matrix)):console.error("THREE.Geometry.mergeMesh(): mesh not an instance of THREE.Mesh.",a)},mergeVertices:function(){var a={},b=[],c=[],d=Math.pow(10,4),e;var f=0;for(e=this.vertices.length;f<e;f++){var g=this.vertices[f];g=Math.round(g.x*d)+"_"+Math.round(g.y*d)+"_"+Math.round(g.z*d);void 0===a[g]?
+(a[g]=f,b.push(this.vertices[f]),c[f]=b.length-1):c[f]=c[a[g]]}a=[];f=0;for(e=this.faces.length;f<e;f++)for(d=this.faces[f],d.a=c[d.a],d.b=c[d.b],d.c=c[d.c],d=[d.a,d.b,d.c],g=0;3>g;g++)if(d[g]===d[(g+1)%3]){a.push(f);break}for(f=a.length-1;0<=f;f--)for(d=a[f],this.faces.splice(d,1),c=0,e=this.faceVertexUvs.length;c<e;c++)this.faceVertexUvs[c].splice(d,1);f=this.vertices.length-b.length;this.vertices=b;return f},setFromPoints:function(a){this.vertices=[];for(var b=0,c=a.length;b<c;b++){var d=a[b];
+this.vertices.push(new p(d.x,d.y,d.z||0))}return this},sortFacesByMaterialIndex:function(){for(var a=this.faces,b=a.length,c=0;c<b;c++)a[c]._id=c;a.sort(function(a,b){return a.materialIndex-b.materialIndex});var d=this.faceVertexUvs[0],e=this.faceVertexUvs[1],f,g;d&&d.length===b&&(f=[]);e&&e.length===b&&(g=[]);for(c=0;c<b;c++){var h=a[c]._id;f&&f.push(d[h]);g&&g.push(e[h])}f&&(this.faceVertexUvs[0]=f);g&&(this.faceVertexUvs[1]=g)},toJSON:function(){function a(a,b,c){return c?a|1<<b:a&~(1<<b)}function b(a){var b=
+a.x.toString()+a.y.toString()+a.z.toString();if(void 0!==m[b])return m[b];m[b]=k.length/3;k.push(a.x,a.y,a.z);return m[b]}function c(a){var b=a.r.toString()+a.g.toString()+a.b.toString();if(void 0!==n[b])return n[b];n[b]=l.length;l.push(a.getHex());return n[b]}function d(a){var b=a.x.toString()+a.y.toString();if(void 0!==p[b])return p[b];p[b]=r.length/2;r.push(a.x,a.y);return p[b]}var e={metadata:{version:4.5,type:"Geometry",generator:"Geometry.toJSON"}};e.uuid=this.uuid;e.type=this.type;""!==this.name&&
+(e.name=this.name);if(void 0!==this.parameters){var f=this.parameters,g;for(g in f)void 0!==f[g]&&(e[g]=f[g]);return e}f=[];for(g=0;g<this.vertices.length;g++){var h=this.vertices[g];f.push(h.x,h.y,h.z)}h=[];var k=[],m={},l=[],n={},r=[],p={};for(g=0;g<this.faces.length;g++){var t=this.faces[g],u=void 0!==this.faceVertexUvs[0][g],w=0<t.normal.length(),A=0<t.vertexNormals.length,v=1!==t.color.r||1!==t.color.g||1!==t.color.b,z=0<t.vertexColors.length,y=0;y=a(y,0,0);y=a(y,1,!0);y=a(y,2,!1);y=a(y,3,u);
+y=a(y,4,w);y=a(y,5,A);y=a(y,6,v);y=a(y,7,z);h.push(y);h.push(t.a,t.b,t.c);h.push(t.materialIndex);u&&(u=this.faceVertexUvs[0][g],h.push(d(u[0]),d(u[1]),d(u[2])));w&&h.push(b(t.normal));A&&(w=t.vertexNormals,h.push(b(w[0]),b(w[1]),b(w[2])));v&&h.push(c(t.color));z&&(t=t.vertexColors,h.push(c(t[0]),c(t[1]),c(t[2])))}e.data={};e.data.vertices=f;e.data.normals=k;0<l.length&&(e.data.colors=l);0<r.length&&(e.data.uvs=[r]);e.data.faces=h;return e},clone:function(){return(new I).copy(this)},copy:function(a){var b,
+c,d;this.vertices=[];this.colors=[];this.faces=[];this.faceVertexUvs=[[]];this.morphTargets=[];this.morphNormals=[];this.skinWeights=[];this.skinIndices=[];this.lineDistances=[];this.boundingSphere=this.boundingBox=null;this.name=a.name;var e=a.vertices;var f=0;for(b=e.length;f<b;f++)this.vertices.push(e[f].clone());e=a.colors;f=0;for(b=e.length;f<b;f++)this.colors.push(e[f].clone());e=a.faces;f=0;for(b=e.length;f<b;f++)this.faces.push(e[f].clone());f=0;for(b=a.faceVertexUvs.length;f<b;f++){var g=
+a.faceVertexUvs[f];void 0===this.faceVertexUvs[f]&&(this.faceVertexUvs[f]=[]);e=0;for(c=g.length;e<c;e++){var h=g[e],k=[];var m=0;for(d=h.length;m<d;m++)k.push(h[m].clone());this.faceVertexUvs[f].push(k)}}m=a.morphTargets;f=0;for(b=m.length;f<b;f++){d={};d.name=m[f].name;if(void 0!==m[f].vertices)for(d.vertices=[],e=0,c=m[f].vertices.length;e<c;e++)d.vertices.push(m[f].vertices[e].clone());if(void 0!==m[f].normals)for(d.normals=[],e=0,c=m[f].normals.length;e<c;e++)d.normals.push(m[f].normals[e].clone());
+this.morphTargets.push(d)}m=a.morphNormals;f=0;for(b=m.length;f<b;f++){d={};if(void 0!==m[f].vertexNormals)for(d.vertexNormals=[],e=0,c=m[f].vertexNormals.length;e<c;e++)g=m[f].vertexNormals[e],h={},h.a=g.a.clone(),h.b=g.b.clone(),h.c=g.c.clone(),d.vertexNormals.push(h);if(void 0!==m[f].faceNormals)for(d.faceNormals=[],e=0,c=m[f].faceNormals.length;e<c;e++)d.faceNormals.push(m[f].faceNormals[e].clone());this.morphNormals.push(d)}e=a.skinWeights;f=0;for(b=e.length;f<b;f++)this.skinWeights.push(e[f].clone());
+e=a.skinIndices;f=0;for(b=e.length;f<b;f++)this.skinIndices.push(e[f].clone());e=a.lineDistances;f=0;for(b=e.length;f<b;f++)this.lineDistances.push(e[f]);f=a.boundingBox;null!==f&&(this.boundingBox=f.clone());f=a.boundingSphere;null!==f&&(this.boundingSphere=f.clone());this.elementsNeedUpdate=a.elementsNeedUpdate;this.verticesNeedUpdate=a.verticesNeedUpdate;this.uvsNeedUpdate=a.uvsNeedUpdate;this.normalsNeedUpdate=a.normalsNeedUpdate;this.colorsNeedUpdate=a.colorsNeedUpdate;this.lineDistancesNeedUpdate=
+a.lineDistancesNeedUpdate;this.groupsNeedUpdate=a.groupsNeedUpdate;return this},dispose:function(){this.dispatchEvent({type:"dispose"})}});Object.defineProperty(F.prototype,"needsUpdate",{set:function(a){!0===a&&this.version++}});Object.assign(F.prototype,{isBufferAttribute:!0,onUploadCallback:function(){},setArray:function(a){if(Array.isArray(a))throw new TypeError("THREE.BufferAttribute: array should be a Typed Array.");this.count=void 0!==a?a.length/this.itemSize:0;this.array=a;return this},setDynamic:function(a){this.dynamic=
+a;return this},copy:function(a){this.name=a.name;this.array=new a.array.constructor(a.array);this.itemSize=a.itemSize;this.count=a.count;this.normalized=a.normalized;this.dynamic=a.dynamic;return this},copyAt:function(a,b,c){a*=this.itemSize;c*=b.itemSize;for(var d=0,e=this.itemSize;d<e;d++)this.array[a+d]=b.array[c+d];return this},copyArray:function(a){this.array.set(a);return this},copyColorsArray:function(a){for(var b=this.array,c=0,d=0,e=a.length;d<e;d++){var f=a[d];void 0===f&&(console.warn("THREE.BufferAttribute.copyColorsArray(): color is undefined",
+d),f=new G);b[c++]=f.r;b[c++]=f.g;b[c++]=f.b}return this},copyVector2sArray:function(a){for(var b=this.array,c=0,d=0,e=a.length;d<e;d++){var f=a[d];void 0===f&&(console.warn("THREE.BufferAttribute.copyVector2sArray(): vector is undefined",d),f=new z);b[c++]=f.x;b[c++]=f.y}return this},copyVector3sArray:function(a){for(var b=this.array,c=0,d=0,e=a.length;d<e;d++){var f=a[d];void 0===f&&(console.warn("THREE.BufferAttribute.copyVector3sArray(): vector is undefined",d),f=new p);b[c++]=f.x;b[c++]=f.y;
+b[c++]=f.z}return this},copyVector4sArray:function(a){for(var b=this.array,c=0,d=0,e=a.length;d<e;d++){var f=a[d];void 0===f&&(console.warn("THREE.BufferAttribute.copyVector4sArray(): vector is undefined",d),f=new Z);b[c++]=f.x;b[c++]=f.y;b[c++]=f.z;b[c++]=f.w}return this},set:function(a,b){void 0===b&&(b=0);this.array.set(a,b);return this},getX:function(a){return this.array[a*this.itemSize]},setX:function(a,b){this.array[a*this.itemSize]=b;return this},getY:function(a){return this.array[a*this.itemSize+
+1]},setY:function(a,b){this.array[a*this.itemSize+1]=b;return this},getZ:function(a){return this.array[a*this.itemSize+2]},setZ:function(a,b){this.array[a*this.itemSize+2]=b;return this},getW:function(a){return this.array[a*this.itemSize+3]},setW:function(a,b){this.array[a*this.itemSize+3]=b;return this},setXY:function(a,b,c){a*=this.itemSize;this.array[a+0]=b;this.array[a+1]=c;return this},setXYZ:function(a,b,c,d){a*=this.itemSize;this.array[a+0]=b;this.array[a+1]=c;this.array[a+2]=d;return this},
+setXYZW:function(a,b,c,d,e){a*=this.itemSize;this.array[a+0]=b;this.array[a+1]=c;this.array[a+2]=d;this.array[a+3]=e;return this},onUpload:function(a){this.onUploadCallback=a;return this},clone:function(){return(new this.constructor(this.array,this.itemSize)).copy(this)}});sc.prototype=Object.create(F.prototype);sc.prototype.constructor=sc;tc.prototype=Object.create(F.prototype);tc.prototype.constructor=tc;uc.prototype=Object.create(F.prototype);uc.prototype.constructor=uc;vc.prototype=Object.create(F.prototype);
+vc.prototype.constructor=vc;nb.prototype=Object.create(F.prototype);nb.prototype.constructor=nb;wc.prototype=Object.create(F.prototype);wc.prototype.constructor=wc;ob.prototype=Object.create(F.prototype);ob.prototype.constructor=ob;C.prototype=Object.create(F.prototype);C.prototype.constructor=C;xc.prototype=Object.create(F.prototype);xc.prototype.constructor=xc;Object.assign(Ie.prototype,{computeGroups:function(a){var b=[],c=void 0;a=a.faces;for(var d=0;d<a.length;d++){var e=a[d];if(e.materialIndex!==
+c){c=e.materialIndex;void 0!==f&&(f.count=3*d-f.start,b.push(f));var f={start:3*d,materialIndex:c}}}void 0!==f&&(f.count=3*d-f.start,b.push(f));this.groups=b},fromGeometry:function(a){var b=a.faces,c=a.vertices,d=a.faceVertexUvs,e=d[0]&&0<d[0].length,f=d[1]&&0<d[1].length,g=a.morphTargets,h=g.length;if(0<h){var k=[];for(var m=0;m<h;m++)k[m]={name:g[m].name,data:[]};this.morphTargets.position=k}var l=a.morphNormals,n=l.length;if(0<n){var r=[];for(m=0;m<n;m++)r[m]={name:l[m].name,data:[]};this.morphTargets.normal=
+r}var p=a.skinIndices,t=a.skinWeights,u=p.length===c.length,w=t.length===c.length;0<c.length&&0===b.length&&console.error("THREE.DirectGeometry: Faceless geometries are not supported.");for(m=0;m<b.length;m++){var A=b[m];this.vertices.push(c[A.a],c[A.b],c[A.c]);var v=A.vertexNormals;3===v.length?this.normals.push(v[0],v[1],v[2]):(v=A.normal,this.normals.push(v,v,v));v=A.vertexColors;3===v.length?this.colors.push(v[0],v[1],v[2]):(v=A.color,this.colors.push(v,v,v));!0===e&&(v=d[0][m],void 0!==v?this.uvs.push(v[0],
+v[1],v[2]):(console.warn("THREE.DirectGeometry.fromGeometry(): Undefined vertexUv ",m),this.uvs.push(new z,new z,new z)));!0===f&&(v=d[1][m],void 0!==v?this.uvs2.push(v[0],v[1],v[2]):(console.warn("THREE.DirectGeometry.fromGeometry(): Undefined vertexUv2 ",m),this.uvs2.push(new z,new z,new z)));for(v=0;v<h;v++){var H=g[v].vertices;k[v].data.push(H[A.a],H[A.b],H[A.c])}for(v=0;v<n;v++)H=l[v].vertexNormals[m],r[v].data.push(H.a,H.b,H.c);u&&this.skinIndices.push(p[A.a],p[A.b],p[A.c]);w&&this.skinWeights.push(t[A.a],
+t[A.b],t[A.c])}this.computeGroups(a);this.verticesNeedUpdate=a.verticesNeedUpdate;this.normalsNeedUpdate=a.normalsNeedUpdate;this.colorsNeedUpdate=a.colorsNeedUpdate;this.uvsNeedUpdate=a.uvsNeedUpdate;this.groupsNeedUpdate=a.groupsNeedUpdate;return this}});var Of=1;E.prototype=Object.assign(Object.create(ia.prototype),{constructor:E,isBufferGeometry:!0,getIndex:function(){return this.index},setIndex:function(a){Array.isArray(a)?this.index=new (65535<Je(a)?ob:nb)(a,1):this.index=a},addAttribute:function(a,
+b,c){if(!(b&&b.isBufferAttribute||b&&b.isInterleavedBufferAttribute))return console.warn("THREE.BufferGeometry: .addAttribute() now expects ( name, attribute )."),this.addAttribute(a,new F(b,c));if("index"===a)return console.warn("THREE.BufferGeometry.addAttribute: Use .setIndex() for index attribute."),this.setIndex(b),this;this.attributes[a]=b;return this},getAttribute:function(a){return this.attributes[a]},removeAttribute:function(a){delete this.attributes[a];return this},addGroup:function(a,b,
+c){this.groups.push({start:a,count:b,materialIndex:void 0!==c?c:0})},clearGroups:function(){this.groups=[]},setDrawRange:function(a,b){this.drawRange.start=a;this.drawRange.count=b},applyMatrix:function(a){var b=this.attributes.position;void 0!==b&&(a.applyToBufferAttribute(b),b.needsUpdate=!0);b=this.attributes.normal;void 0!==b&&((new da).getNormalMatrix(a).applyToBufferAttribute(b),b.needsUpdate=!0);null!==this.boundingBox&&this.computeBoundingBox();null!==this.boundingSphere&&this.computeBoundingSphere();
+return this},rotateX:function(){var a=new P;return function(b){a.makeRotationX(b);this.applyMatrix(a);return this}}(),rotateY:function(){var a=new P;return function(b){a.makeRotationY(b);this.applyMatrix(a);return this}}(),rotateZ:function(){var a=new P;return function(b){a.makeRotationZ(b);this.applyMatrix(a);return this}}(),translate:function(){var a=new P;return function(b,c,d){a.makeTranslation(b,c,d);this.applyMatrix(a);return this}}(),scale:function(){var a=new P;return function(b,c,d){a.makeScale(b,
+c,d);this.applyMatrix(a);return this}}(),lookAt:function(){var a=new D;return function(b){a.lookAt(b);a.updateMatrix();this.applyMatrix(a.matrix)}}(),center:function(){var a=new p;return function(){this.computeBoundingBox();this.boundingBox.getCenter(a).negate();this.translate(a.x,a.y,a.z);return this}}(),setFromObject:function(a){var b=a.geometry;if(a.isPoints||a.isLine){a=new C(3*b.vertices.length,3);var c=new C(3*b.colors.length,3);this.addAttribute("position",a.copyVector3sArray(b.vertices));
+this.addAttribute("color",c.copyColorsArray(b.colors));b.lineDistances&&b.lineDistances.length===b.vertices.length&&(a=new C(b.lineDistances.length,1),this.addAttribute("lineDistance",a.copyArray(b.lineDistances)));null!==b.boundingSphere&&(this.boundingSphere=b.boundingSphere.clone());null!==b.boundingBox&&(this.boundingBox=b.boundingBox.clone())}else a.isMesh&&b&&b.isGeometry&&this.fromGeometry(b);return this},setFromPoints:function(a){for(var b=[],c=0,d=a.length;c<d;c++){var e=a[c];b.push(e.x,
+e.y,e.z||0)}this.addAttribute("position",new C(b,3));return this},updateFromObject:function(a){var b=a.geometry;if(a.isMesh){var c=b.__directGeometry;!0===b.elementsNeedUpdate&&(c=void 0,b.elementsNeedUpdate=!1);if(void 0===c)return this.fromGeometry(b);c.verticesNeedUpdate=b.verticesNeedUpdate;c.normalsNeedUpdate=b.normalsNeedUpdate;c.colorsNeedUpdate=b.colorsNeedUpdate;c.uvsNeedUpdate=b.uvsNeedUpdate;c.groupsNeedUpdate=b.groupsNeedUpdate;b.verticesNeedUpdate=!1;b.normalsNeedUpdate=!1;b.colorsNeedUpdate=
+!1;b.uvsNeedUpdate=!1;b.groupsNeedUpdate=!1;b=c}!0===b.verticesNeedUpdate&&(c=this.attributes.position,void 0!==c&&(c.copyVector3sArray(b.vertices),c.needsUpdate=!0),b.verticesNeedUpdate=!1);!0===b.normalsNeedUpdate&&(c=this.attributes.normal,void 0!==c&&(c.copyVector3sArray(b.normals),c.needsUpdate=!0),b.normalsNeedUpdate=!1);!0===b.colorsNeedUpdate&&(c=this.attributes.color,void 0!==c&&(c.copyColorsArray(b.colors),c.needsUpdate=!0),b.colorsNeedUpdate=!1);b.uvsNeedUpdate&&(c=this.attributes.uv,void 0!==
+c&&(c.copyVector2sArray(b.uvs),c.needsUpdate=!0),b.uvsNeedUpdate=!1);b.lineDistancesNeedUpdate&&(c=this.attributes.lineDistance,void 0!==c&&(c.copyArray(b.lineDistances),c.needsUpdate=!0),b.lineDistancesNeedUpdate=!1);b.groupsNeedUpdate&&(b.computeGroups(a.geometry),this.groups=b.groups,b.groupsNeedUpdate=!1);return this},fromGeometry:function(a){a.__directGeometry=(new Ie).fromGeometry(a);return this.fromDirectGeometry(a.__directGeometry)},fromDirectGeometry:function(a){var b=new Float32Array(3*
+a.vertices.length);this.addAttribute("position",(new F(b,3)).copyVector3sArray(a.vertices));0<a.normals.length&&(b=new Float32Array(3*a.normals.length),this.addAttribute("normal",(new F(b,3)).copyVector3sArray(a.normals)));0<a.colors.length&&(b=new Float32Array(3*a.colors.length),this.addAttribute("color",(new F(b,3)).copyColorsArray(a.colors)));0<a.uvs.length&&(b=new Float32Array(2*a.uvs.length),this.addAttribute("uv",(new F(b,2)).copyVector2sArray(a.uvs)));0<a.uvs2.length&&(b=new Float32Array(2*
+a.uvs2.length),this.addAttribute("uv2",(new F(b,2)).copyVector2sArray(a.uvs2)));this.groups=a.groups;for(var c in a.morphTargets){b=[];for(var d=a.morphTargets[c],e=0,f=d.length;e<f;e++){var g=d[e],h=new C(3*g.data.length,3);h.name=g.name;b.push(h.copyVector3sArray(g.data))}this.morphAttributes[c]=b}0<a.skinIndices.length&&(c=new C(4*a.skinIndices.length,4),this.addAttribute("skinIndex",c.copyVector4sArray(a.skinIndices)));0<a.skinWeights.length&&(c=new C(4*a.skinWeights.length,4),this.addAttribute("skinWeight",
+c.copyVector4sArray(a.skinWeights)));null!==a.boundingSphere&&(this.boundingSphere=a.boundingSphere.clone());null!==a.boundingBox&&(this.boundingBox=a.boundingBox.clone());return this},computeBoundingBox:function(){null===this.boundingBox&&(this.boundingBox=new Wa);var a=this.attributes.position;void 0!==a?this.boundingBox.setFromBufferAttribute(a):this.boundingBox.makeEmpty();(isNaN(this.boundingBox.min.x)||isNaN(this.boundingBox.min.y)||isNaN(this.boundingBox.min.z))&&console.error('THREE.BufferGeometry.computeBoundingBox: Computed min/max have NaN values. The "position" attribute is likely to have NaN values.',
+this)},computeBoundingSphere:function(){var a=new Wa,b=new p;return function(){null===this.boundingSphere&&(this.boundingSphere=new Ga);var c=this.attributes.position;if(c){var d=this.boundingSphere.center;a.setFromBufferAttribute(c);a.getCenter(d);for(var e=0,f=0,g=c.count;f<g;f++)b.x=c.getX(f),b.y=c.getY(f),b.z=c.getZ(f),e=Math.max(e,d.distanceToSquared(b));this.boundingSphere.radius=Math.sqrt(e);isNaN(this.boundingSphere.radius)&&console.error('THREE.BufferGeometry.computeBoundingSphere(): Computed radius is NaN. The "position" attribute is likely to have NaN values.',
+this)}}}(),computeFaceNormals:function(){},computeVertexNormals:function(){var a=this.index,b=this.attributes;if(b.position){var c=b.position.array;if(void 0===b.normal)this.addAttribute("normal",new F(new Float32Array(c.length),3));else for(var d=b.normal.array,e=0,f=d.length;e<f;e++)d[e]=0;d=b.normal.array;var g=new p,h=new p,k=new p,m=new p,l=new p;if(a){var n=a.array;e=0;for(f=a.count;e<f;e+=3){a=3*n[e+0];var r=3*n[e+1];var x=3*n[e+2];g.fromArray(c,a);h.fromArray(c,r);k.fromArray(c,x);m.subVectors(k,
+h);l.subVectors(g,h);m.cross(l);d[a]+=m.x;d[a+1]+=m.y;d[a+2]+=m.z;d[r]+=m.x;d[r+1]+=m.y;d[r+2]+=m.z;d[x]+=m.x;d[x+1]+=m.y;d[x+2]+=m.z}}else for(e=0,f=c.length;e<f;e+=9)g.fromArray(c,e),h.fromArray(c,e+3),k.fromArray(c,e+6),m.subVectors(k,h),l.subVectors(g,h),m.cross(l),d[e]=m.x,d[e+1]=m.y,d[e+2]=m.z,d[e+3]=m.x,d[e+4]=m.y,d[e+5]=m.z,d[e+6]=m.x,d[e+7]=m.y,d[e+8]=m.z;this.normalizeNormals();b.normal.needsUpdate=!0}},merge:function(a,b){if(a&&a.isBufferGeometry){void 0===b&&(b=0,console.warn("THREE.BufferGeometry.merge(): Overwriting original geometry, starting at offset=0. Use BufferGeometryUtils.mergeBufferGeometries() for lossless merge."));
+var c=this.attributes,d;for(d in c)if(void 0!==a.attributes[d]){var e=c[d].array,f=a.attributes[d],g=f.array,h=0;for(f=f.itemSize*b;h<g.length;h++,f++)e[f]=g[h]}return this}console.error("THREE.BufferGeometry.merge(): geometry not an instance of THREE.BufferGeometry.",a)},normalizeNormals:function(){var a=new p;return function(){for(var b=this.attributes.normal,c=0,d=b.count;c<d;c++)a.x=b.getX(c),a.y=b.getY(c),a.z=b.getZ(c),a.normalize(),b.setXYZ(c,a.x,a.y,a.z)}}(),toNonIndexed:function(){if(null===
+this.index)return console.warn("THREE.BufferGeometry.toNonIndexed(): Geometry is already non-indexed."),this;var a=new E,b=this.index.array,c=this.attributes,d;for(d in c){var e=c[d],f=e.array,g=e.itemSize,h=new f.constructor(b.length*g),k=0;e=0;for(var m=b.length;e<m;e++){var l=b[e]*g;for(var n=0;n<g;n++)h[k++]=f[l++]}a.addAttribute(d,new F(h,g))}b=this.groups;e=0;for(m=b.length;e<m;e++)c=b[e],a.addGroup(c.start,c.count,c.materialIndex);return a},toJSON:function(){var a={metadata:{version:4.5,type:"BufferGeometry",
+generator:"BufferGeometry.toJSON"}};a.uuid=this.uuid;a.type=this.type;""!==this.name&&(a.name=this.name);0<Object.keys(this.userData).length&&(a.userData=this.userData);if(void 0!==this.parameters){var b=this.parameters;for(e in b)void 0!==b[e]&&(a[e]=b[e]);return a}a.data={attributes:{}};var c=this.index;null!==c&&(b=Array.prototype.slice.call(c.array),a.data.index={type:c.array.constructor.name,array:b});c=this.attributes;for(e in c){var d=c[e];b=Array.prototype.slice.call(d.array);a.data.attributes[e]=
+{itemSize:d.itemSize,type:d.array.constructor.name,array:b,normalized:d.normalized}}var e=this.groups;0<e.length&&(a.data.groups=JSON.parse(JSON.stringify(e)));e=this.boundingSphere;null!==e&&(a.data.boundingSphere={center:e.center.toArray(),radius:e.radius});return a},clone:function(){return(new E).copy(this)},copy:function(a){var b;this.index=null;this.attributes={};this.morphAttributes={};this.groups=[];this.boundingSphere=this.boundingBox=null;this.name=a.name;var c=a.index;null!==c&&this.setIndex(c.clone());
+c=a.attributes;for(g in c)this.addAttribute(g,c[g].clone());var d=a.morphAttributes;for(g in d){var e=[],f=d[g];c=0;for(b=f.length;c<b;c++)e.push(f[c].clone());this.morphAttributes[g]=e}var g=a.groups;c=0;for(b=g.length;c<b;c++)d=g[c],this.addGroup(d.start,d.count,d.materialIndex);g=a.boundingBox;null!==g&&(this.boundingBox=g.clone());g=a.boundingSphere;null!==g&&(this.boundingSphere=g.clone());this.drawRange.start=a.drawRange.start;this.drawRange.count=a.drawRange.count;this.userData=a.userData;
+return this},dispose:function(){this.dispatchEvent({type:"dispose"})}});Kb.prototype=Object.create(I.prototype);Kb.prototype.constructor=Kb;pb.prototype=Object.create(E.prototype);pb.prototype.constructor=pb;yc.prototype=Object.create(I.prototype);yc.prototype.constructor=yc;qb.prototype=Object.create(E.prototype);qb.prototype.constructor=qb;var Pf=0;L.prototype=Object.assign(Object.create(ia.prototype),{constructor:L,isMaterial:!0,onBeforeCompile:function(){},setValues:function(a){if(void 0!==a)for(var b in a){var c=
+a[b];if(void 0===c)console.warn("THREE.Material: '"+b+"' parameter is undefined.");else if("shading"===b)console.warn("THREE."+this.type+": .shading has been removed. Use the boolean .flatShading instead."),this.flatShading=1===c?!0:!1;else{var d=this[b];void 0===d?console.warn("THREE."+this.type+": '"+b+"' is not a property of this material."):d&&d.isColor?d.set(c):d&&d.isVector3&&c&&c.isVector3?d.copy(c):this[b]=c}}},toJSON:function(a){function b(a){var b=[],c;for(c in a){var d=a[c];delete d.metadata;
+b.push(d)}return b}var c=void 0===a||"string"===typeof a;c&&(a={textures:{},images:{}});var d={metadata:{version:4.5,type:"Material",generator:"Material.toJSON"}};d.uuid=this.uuid;d.type=this.type;""!==this.name&&(d.name=this.name);this.color&&this.color.isColor&&(d.color=this.color.getHex());void 0!==this.roughness&&(d.roughness=this.roughness);void 0!==this.metalness&&(d.metalness=this.metalness);this.emissive&&this.emissive.isColor&&(d.emissive=this.emissive.getHex());1!==this.emissiveIntensity&&
+(d.emissiveIntensity=this.emissiveIntensity);this.specular&&this.specular.isColor&&(d.specular=this.specular.getHex());void 0!==this.shininess&&(d.shininess=this.shininess);void 0!==this.clearCoat&&(d.clearCoat=this.clearCoat);void 0!==this.clearCoatRoughness&&(d.clearCoatRoughness=this.clearCoatRoughness);this.map&&this.map.isTexture&&(d.map=this.map.toJSON(a).uuid);this.alphaMap&&this.alphaMap.isTexture&&(d.alphaMap=this.alphaMap.toJSON(a).uuid);this.lightMap&&this.lightMap.isTexture&&(d.lightMap=
+this.lightMap.toJSON(a).uuid);this.aoMap&&this.aoMap.isTexture&&(d.aoMap=this.aoMap.toJSON(a).uuid,d.aoMapIntensity=this.aoMapIntensity);this.bumpMap&&this.bumpMap.isTexture&&(d.bumpMap=this.bumpMap.toJSON(a).uuid,d.bumpScale=this.bumpScale);this.normalMap&&this.normalMap.isTexture&&(d.normalMap=this.normalMap.toJSON(a).uuid,d.normalMapType=this.normalMapType,d.normalScale=this.normalScale.toArray());this.displacementMap&&this.displacementMap.isTexture&&(d.displacementMap=this.displacementMap.toJSON(a).uuid,
+d.displacementScale=this.displacementScale,d.displacementBias=this.displacementBias);this.roughnessMap&&this.roughnessMap.isTexture&&(d.roughnessMap=this.roughnessMap.toJSON(a).uuid);this.metalnessMap&&this.metalnessMap.isTexture&&(d.metalnessMap=this.metalnessMap.toJSON(a).uuid);this.emissiveMap&&this.emissiveMap.isTexture&&(d.emissiveMap=this.emissiveMap.toJSON(a).uuid);this.specularMap&&this.specularMap.isTexture&&(d.specularMap=this.specularMap.toJSON(a).uuid);this.envMap&&this.envMap.isTexture&&
+(d.envMap=this.envMap.toJSON(a).uuid,d.reflectivity=this.reflectivity,void 0!==this.combine&&(d.combine=this.combine),void 0!==this.envMapIntensity&&(d.envMapIntensity=this.envMapIntensity));this.gradientMap&&this.gradientMap.isTexture&&(d.gradientMap=this.gradientMap.toJSON(a).uuid);void 0!==this.size&&(d.size=this.size);void 0!==this.sizeAttenuation&&(d.sizeAttenuation=this.sizeAttenuation);1!==this.blending&&(d.blending=this.blending);!0===this.flatShading&&(d.flatShading=this.flatShading);0!==
+this.side&&(d.side=this.side);0!==this.vertexColors&&(d.vertexColors=this.vertexColors);1>this.opacity&&(d.opacity=this.opacity);!0===this.transparent&&(d.transparent=this.transparent);d.depthFunc=this.depthFunc;d.depthTest=this.depthTest;d.depthWrite=this.depthWrite;0!==this.rotation&&(d.rotation=this.rotation);!0===this.polygonOffset&&(d.polygonOffset=!0);0!==this.polygonOffsetFactor&&(d.polygonOffsetFactor=this.polygonOffsetFactor);0!==this.polygonOffsetUnits&&(d.polygonOffsetUnits=this.polygonOffsetUnits);
+1!==this.linewidth&&(d.linewidth=this.linewidth);void 0!==this.dashSize&&(d.dashSize=this.dashSize);void 0!==this.gapSize&&(d.gapSize=this.gapSize);void 0!==this.scale&&(d.scale=this.scale);!0===this.dithering&&(d.dithering=!0);0<this.alphaTest&&(d.alphaTest=this.alphaTest);!0===this.premultipliedAlpha&&(d.premultipliedAlpha=this.premultipliedAlpha);!0===this.wireframe&&(d.wireframe=this.wireframe);1<this.wireframeLinewidth&&(d.wireframeLinewidth=this.wireframeLinewidth);"round"!==this.wireframeLinecap&&
+(d.wireframeLinecap=this.wireframeLinecap);"round"!==this.wireframeLinejoin&&(d.wireframeLinejoin=this.wireframeLinejoin);!0===this.morphTargets&&(d.morphTargets=!0);!0===this.skinning&&(d.skinning=!0);!1===this.visible&&(d.visible=!1);"{}"!==JSON.stringify(this.userData)&&(d.userData=this.userData);c&&(c=b(a.textures),a=b(a.images),0<c.length&&(d.textures=c),0<a.length&&(d.images=a));return d},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.name=a.name;this.fog=a.fog;
+this.lights=a.lights;this.blending=a.blending;this.side=a.side;this.flatShading=a.flatShading;this.vertexColors=a.vertexColors;this.opacity=a.opacity;this.transparent=a.transparent;this.blendSrc=a.blendSrc;this.blendDst=a.blendDst;this.blendEquation=a.blendEquation;this.blendSrcAlpha=a.blendSrcAlpha;this.blendDstAlpha=a.blendDstAlpha;this.blendEquationAlpha=a.blendEquationAlpha;this.depthFunc=a.depthFunc;this.depthTest=a.depthTest;this.depthWrite=a.depthWrite;this.colorWrite=a.colorWrite;this.precision=
+a.precision;this.polygonOffset=a.polygonOffset;this.polygonOffsetFactor=a.polygonOffsetFactor;this.polygonOffsetUnits=a.polygonOffsetUnits;this.dithering=a.dithering;this.alphaTest=a.alphaTest;this.premultipliedAlpha=a.premultipliedAlpha;this.visible=a.visible;this.userData=JSON.parse(JSON.stringify(a.userData));this.clipShadows=a.clipShadows;this.clipIntersection=a.clipIntersection;var b=a.clippingPlanes,c=null;if(null!==b){var d=b.length;c=Array(d);for(var e=0;e!==d;++e)c[e]=b[e].clone()}this.clippingPlanes=
+c;this.shadowSide=a.shadowSide;return this},dispose:function(){this.dispatchEvent({type:"dispose"})}});ka.prototype=Object.create(L.prototype);ka.prototype.constructor=ka;ka.prototype.isShaderMaterial=!0;ka.prototype.copy=function(a){L.prototype.copy.call(this,a);this.fragmentShader=a.fragmentShader;this.vertexShader=a.vertexShader;this.uniforms=va.clone(a.uniforms);this.defines=Object.assign({},a.defines);this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.lights=a.lights;
+this.clipping=a.clipping;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;this.extensions=a.extensions;return this};ka.prototype.toJSON=function(a){var b=L.prototype.toJSON.call(this,a);b.uniforms={};for(var c in this.uniforms){var d=this.uniforms[c].value;b.uniforms[c]=d.isTexture?{type:"t",value:d.toJSON(a).uuid}:d.isColor?{type:"c",value:d.getHex()}:d.isVector2?{type:"v2",value:d.toArray()}:d.isVector3?{type:"v3",value:d.toArray()}:d.isVector4?{type:"v4",
+value:d.toArray()}:d.isMatrix4?{type:"m4",value:d.toArray()}:{value:d}}0<Object.keys(this.defines).length&&(b.defines=this.defines);b.vertexShader=this.vertexShader;b.fragmentShader=this.fragmentShader;return b};Object.assign(rb.prototype,{set:function(a,b){this.origin.copy(a);this.direction.copy(b);return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.origin.copy(a.origin);this.direction.copy(a.direction);return this},at:function(a,b){void 0===b&&(console.warn("THREE.Ray: .at() target is now required"),
+b=new p);return b.copy(this.direction).multiplyScalar(a).add(this.origin)},lookAt:function(a){this.direction.copy(a).sub(this.origin).normalize();return this},recast:function(){var a=new p;return function(b){this.origin.copy(this.at(b,a));return this}}(),closestPointToPoint:function(a,b){void 0===b&&(console.warn("THREE.Ray: .closestPointToPoint() target is now required"),b=new p);b.subVectors(a,this.origin);a=b.dot(this.direction);return 0>a?b.copy(this.origin):b.copy(this.direction).multiplyScalar(a).add(this.origin)},
+distanceToPoint:function(a){return Math.sqrt(this.distanceSqToPoint(a))},distanceSqToPoint:function(){var a=new p;return function(b){var c=a.subVectors(b,this.origin).dot(this.direction);if(0>c)return this.origin.distanceToSquared(b);a.copy(this.direction).multiplyScalar(c).add(this.origin);return a.distanceToSquared(b)}}(),distanceSqToSegment:function(){var a=new p,b=new p,c=new p;return function(d,e,f,g){a.copy(d).add(e).multiplyScalar(.5);b.copy(e).sub(d).normalize();c.copy(this.origin).sub(a);
+var h=.5*d.distanceTo(e),k=-this.direction.dot(b),m=c.dot(this.direction),l=-c.dot(b),n=c.lengthSq(),r=Math.abs(1-k*k);if(0<r){d=k*l-m;e=k*m-l;var p=h*r;0<=d?e>=-p?e<=p?(h=1/r,d*=h,e*=h,k=d*(d+k*e+2*m)+e*(k*d+e+2*l)+n):(e=h,d=Math.max(0,-(k*e+m)),k=-d*d+e*(e+2*l)+n):(e=-h,d=Math.max(0,-(k*e+m)),k=-d*d+e*(e+2*l)+n):e<=-p?(d=Math.max(0,-(-k*h+m)),e=0<d?-h:Math.min(Math.max(-h,-l),h),k=-d*d+e*(e+2*l)+n):e<=p?(d=0,e=Math.min(Math.max(-h,-l),h),k=e*(e+2*l)+n):(d=Math.max(0,-(k*h+m)),e=0<d?h:Math.min(Math.max(-h,
+-l),h),k=-d*d+e*(e+2*l)+n)}else e=0<k?-h:h,d=Math.max(0,-(k*e+m)),k=-d*d+e*(e+2*l)+n;f&&f.copy(this.direction).multiplyScalar(d).add(this.origin);g&&g.copy(b).multiplyScalar(e).add(a);return k}}(),intersectSphere:function(){var a=new p;return function(b,c){a.subVectors(b.center,this.origin);var d=a.dot(this.direction),e=a.dot(a)-d*d;b=b.radius*b.radius;if(e>b)return null;b=Math.sqrt(b-e);e=d-b;d+=b;return 0>e&&0>d?null:0>e?this.at(d,c):this.at(e,c)}}(),intersectsSphere:function(a){return this.distanceSqToPoint(a.center)<=
+a.radius*a.radius},distanceToPlane:function(a){var b=a.normal.dot(this.direction);if(0===b)return 0===a.distanceToPoint(this.origin)?0:null;a=-(this.origin.dot(a.normal)+a.constant)/b;return 0<=a?a:null},intersectPlane:function(a,b){a=this.distanceToPlane(a);return null===a?null:this.at(a,b)},intersectsPlane:function(a){var b=a.distanceToPoint(this.origin);return 0===b||0>a.normal.dot(this.direction)*b?!0:!1},intersectBox:function(a,b){var c=1/this.direction.x;var d=1/this.direction.y;var e=1/this.direction.z,
+f=this.origin;if(0<=c){var g=(a.min.x-f.x)*c;c*=a.max.x-f.x}else g=(a.max.x-f.x)*c,c*=a.min.x-f.x;if(0<=d){var h=(a.min.y-f.y)*d;d*=a.max.y-f.y}else h=(a.max.y-f.y)*d,d*=a.min.y-f.y;if(g>d||h>c)return null;if(h>g||g!==g)g=h;if(d<c||c!==c)c=d;0<=e?(h=(a.min.z-f.z)*e,a=(a.max.z-f.z)*e):(h=(a.max.z-f.z)*e,a=(a.min.z-f.z)*e);if(g>a||h>c)return null;if(h>g||g!==g)g=h;if(a<c||c!==c)c=a;return 0>c?null:this.at(0<=g?g:c,b)},intersectsBox:function(){var a=new p;return function(b){return null!==this.intersectBox(b,
+a)}}(),intersectTriangle:function(){var a=new p,b=new p,c=new p,d=new p;return function(e,f,g,h,k){b.subVectors(f,e);c.subVectors(g,e);d.crossVectors(b,c);f=this.direction.dot(d);if(0<f){if(h)return null;h=1}else if(0>f)h=-1,f=-f;else return null;a.subVectors(this.origin,e);e=h*this.direction.dot(c.crossVectors(a,c));if(0>e)return null;g=h*this.direction.dot(b.cross(a));if(0>g||e+g>f)return null;e=-h*a.dot(d);return 0>e?null:this.at(e/f,k)}}(),applyMatrix4:function(a){this.origin.applyMatrix4(a);
+this.direction.transformDirection(a);return this},equals:function(a){return a.origin.equals(this.origin)&&a.direction.equals(this.direction)}});Object.assign(ha,{getNormal:function(){var a=new p;return function(b,c,d,e){void 0===e&&(console.warn("THREE.Triangle: .getNormal() target is now required"),e=new p);e.subVectors(d,c);a.subVectors(b,c);e.cross(a);b=e.lengthSq();return 0<b?e.multiplyScalar(1/Math.sqrt(b)):e.set(0,0,0)}}(),getBarycoord:function(){var a=new p,b=new p,c=new p;return function(d,
+e,f,g,h){a.subVectors(g,e);b.subVectors(f,e);c.subVectors(d,e);d=a.dot(a);e=a.dot(b);f=a.dot(c);var k=b.dot(b);g=b.dot(c);var m=d*k-e*e;void 0===h&&(console.warn("THREE.Triangle: .getBarycoord() target is now required"),h=new p);if(0===m)return h.set(-2,-1,-1);m=1/m;k=(k*f-e*g)*m;d=(d*g-e*f)*m;return h.set(1-k-d,d,k)}}(),containsPoint:function(){var a=new p;return function(b,c,d,e){ha.getBarycoord(b,c,d,e,a);return 0<=a.x&&0<=a.y&&1>=a.x+a.y}}(),getUV:function(){var a=new p;return function(b,c,d,
+e,f,g,h,k){this.getBarycoord(b,c,d,e,a);k.set(0,0);k.addScaledVector(f,a.x);k.addScaledVector(g,a.y);k.addScaledVector(h,a.z);return k}}()});Object.assign(ha.prototype,{set:function(a,b,c){this.a.copy(a);this.b.copy(b);this.c.copy(c);return this},setFromPointsAndIndices:function(a,b,c,d){this.a.copy(a[b]);this.b.copy(a[c]);this.c.copy(a[d]);return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.a.copy(a.a);this.b.copy(a.b);this.c.copy(a.c);return this},getArea:function(){var a=
+new p,b=new p;return function(){a.subVectors(this.c,this.b);b.subVectors(this.a,this.b);return.5*a.cross(b).length()}}(),getMidpoint:function(a){void 0===a&&(console.warn("THREE.Triangle: .getMidpoint() target is now required"),a=new p);return a.addVectors(this.a,this.b).add(this.c).multiplyScalar(1/3)},getNormal:function(a){return ha.getNormal(this.a,this.b,this.c,a)},getPlane:function(a){void 0===a&&(console.warn("THREE.Triangle: .getPlane() target is now required"),a=new p);return a.setFromCoplanarPoints(this.a,
+this.b,this.c)},getBarycoord:function(a,b){return ha.getBarycoord(a,this.a,this.b,this.c,b)},containsPoint:function(a){return ha.containsPoint(a,this.a,this.b,this.c)},getUV:function(a,b,c,d,e){return ha.getUV(a,this.a,this.b,this.c,b,c,d,e)},intersectsBox:function(a){return a.intersectsTriangle(this)},closestPointToPoint:function(){var a=new p,b=new p,c=new p,d=new p,e=new p,f=new p;return function(g,h){void 0===h&&(console.warn("THREE.Triangle: .closestPointToPoint() target is now required"),h=
+new p);var k=this.a,m=this.b,l=this.c;a.subVectors(m,k);b.subVectors(l,k);d.subVectors(g,k);var n=a.dot(d),r=b.dot(d);if(0>=n&&0>=r)return h.copy(k);e.subVectors(g,m);var x=a.dot(e),t=b.dot(e);if(0<=x&&t<=x)return h.copy(m);var u=n*t-x*r;if(0>=u&&0<=n&&0>=x)return m=n/(n-x),h.copy(k).addScaledVector(a,m);f.subVectors(g,l);g=a.dot(f);var w=b.dot(f);if(0<=w&&g<=w)return h.copy(l);n=g*r-n*w;if(0>=n&&0<=r&&0>=w)return u=r/(r-w),h.copy(k).addScaledVector(b,u);r=x*w-g*t;if(0>=r&&0<=t-x&&0<=g-w)return c.subVectors(l,
+m),u=(t-x)/(t-x+(g-w)),h.copy(m).addScaledVector(c,u);l=1/(r+n+u);m=n*l;u*=l;return h.copy(k).addScaledVector(a,m).addScaledVector(b,u)}}(),equals:function(a){return a.a.equals(this.a)&&a.b.equals(this.b)&&a.c.equals(this.c)}});Ea.prototype=Object.create(L.prototype);Ea.prototype.constructor=Ea;Ea.prototype.isMeshBasicMaterial=!0;Ea.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.map=a.map;this.lightMap=a.lightMap;this.lightMapIntensity=a.lightMapIntensity;this.aoMap=
+a.aoMap;this.aoMapIntensity=a.aoMapIntensity;this.specularMap=a.specularMap;this.alphaMap=a.alphaMap;this.envMap=a.envMap;this.combine=a.combine;this.reflectivity=a.reflectivity;this.refractionRatio=a.refractionRatio;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.wireframeLinecap=a.wireframeLinecap;this.wireframeLinejoin=a.wireframeLinejoin;this.skinning=a.skinning;this.morphTargets=a.morphTargets;return this};pa.prototype=Object.assign(Object.create(D.prototype),{constructor:pa,
+isMesh:!0,setDrawMode:function(a){this.drawMode=a},copy:function(a){D.prototype.copy.call(this,a);this.drawMode=a.drawMode;void 0!==a.morphTargetInfluences&&(this.morphTargetInfluences=a.morphTargetInfluences.slice());void 0!==a.morphTargetDictionary&&(this.morphTargetDictionary=Object.assign({},a.morphTargetDictionary));return this},updateMorphTargets:function(){var a=this.geometry;if(a.isBufferGeometry){a=a.morphAttributes;var b=Object.keys(a);if(0<b.length){var c=a[b[0]];if(void 0!==c)for(this.morphTargetInfluences=
+[],this.morphTargetDictionary={},a=0,b=c.length;a<b;a++){var d=c[a].name||String(a);this.morphTargetInfluences.push(0);this.morphTargetDictionary[d]=a}}}else if(c=a.morphTargets,void 0!==c&&0<c.length)for(this.morphTargetInfluences=[],this.morphTargetDictionary={},a=0,b=c.length;a<b;a++)d=c[a].name||String(a),this.morphTargetInfluences.push(0),this.morphTargetDictionary[d]=a},raycast:function(){function a(a,b,c,d,e,f,g,h){if(null===(1===b.side?d.intersectTriangle(g,f,e,!0,h):d.intersectTriangle(e,
+f,g,2!==b.side,h)))return null;u.copy(h);u.applyMatrix4(a.matrixWorld);b=c.ray.origin.distanceTo(u);return b<c.near||b>c.far?null:{distance:b,point:u.clone(),object:a}}function b(b,c,d,e,k,m,l,q,p){f.fromBufferAttribute(k,l);g.fromBufferAttribute(k,q);h.fromBufferAttribute(k,p);if(b=a(b,c,d,e,f,g,h,t))m&&(n.fromBufferAttribute(m,l),r.fromBufferAttribute(m,q),x.fromBufferAttribute(m,p),b.uv=ha.getUV(t,f,g,h,n,r,x,new z)),m=new Xa(l,q,p),ha.getNormal(f,g,h,m.normal),b.face=m;return b}var c=new P,d=
+new rb,e=new Ga,f=new p,g=new p,h=new p,k=new p,m=new p,l=new p,n=new z,r=new z,x=new z,t=new p,u=new p;return function(q,p){var u=this.geometry,w=this.material,y=this.matrixWorld;if(void 0!==w&&(null===u.boundingSphere&&u.computeBoundingSphere(),e.copy(u.boundingSphere),e.applyMatrix4(y),!1!==q.ray.intersectsSphere(e)&&(c.getInverse(y),d.copy(q.ray).applyMatrix4(c),null===u.boundingBox||!1!==d.intersectsBox(u.boundingBox))))if(u.isBufferGeometry){var A=u.index,C=u.attributes.position,B=u.attributes.uv,
+E=u.groups;u=u.drawRange;var D;if(null!==A)if(Array.isArray(w)){var F=0;for(D=E.length;F<D;F++){var G=E[F];var J=w[G.materialIndex];y=Math.max(G.start,u.start);var L=Math.min(G.start+G.count,u.start+u.count);for(G=y;G<L;G+=3){y=A.getX(G);var I=A.getX(G+1);var K=A.getX(G+2);if(y=b(this,J,q,d,C,B,y,I,K))y.faceIndex=Math.floor(G/3),p.push(y)}}}else for(y=Math.max(0,u.start),L=Math.min(A.count,u.start+u.count),F=y,D=L;F<D;F+=3){if(y=A.getX(F),I=A.getX(F+1),K=A.getX(F+2),y=b(this,w,q,d,C,B,y,I,K))y.faceIndex=
+Math.floor(F/3),p.push(y)}else if(void 0!==C)if(Array.isArray(w))for(F=0,D=E.length;F<D;F++)for(G=E[F],J=w[G.materialIndex],y=Math.max(G.start,u.start),L=Math.min(G.start+G.count,u.start+u.count),G=y;G<L;G+=3){if(y=G,I=G+1,K=G+2,y=b(this,J,q,d,C,B,y,I,K))y.faceIndex=Math.floor(G/3),p.push(y)}else for(y=Math.max(0,u.start),L=Math.min(C.count,u.start+u.count),F=y,D=L;F<D;F+=3)if(y=F,I=F+1,K=F+2,y=b(this,w,q,d,C,B,y,I,K))y.faceIndex=Math.floor(F/3),p.push(y)}else if(u.isGeometry)for(C=Array.isArray(w),
+B=u.vertices,E=u.faces,y=u.faceVertexUvs[0],0<y.length&&(A=y),G=0,L=E.length;G<L;G++)if(I=E[G],y=C?w[I.materialIndex]:w,void 0!==y){F=B[I.a];D=B[I.b];J=B[I.c];if(!0===y.morphTargets){K=u.morphTargets;var Q=this.morphTargetInfluences;f.set(0,0,0);g.set(0,0,0);h.set(0,0,0);for(var P=0,S=K.length;P<S;P++){var R=Q[P];if(0!==R){var T=K[P].vertices;f.addScaledVector(k.subVectors(T[I.a],F),R);g.addScaledVector(m.subVectors(T[I.b],D),R);h.addScaledVector(l.subVectors(T[I.c],J),R)}}f.add(F);g.add(D);h.add(J);
+F=f;D=g;J=h}if(y=a(this,y,q,d,F,D,J,t))A&&A[G]&&(K=A[G],n.copy(K[0]),r.copy(K[1]),x.copy(K[2]),y.uv=ha.getUV(t,F,D,J,n,r,x,new z)),y.face=I,y.faceIndex=G,p.push(y)}}}(),clone:function(){return(new this.constructor(this.geometry,this.material)).copy(this)}});Ya.prototype=Object.create(W.prototype);Ya.prototype.constructor=Ya;Ya.prototype.isCubeTexture=!0;Object.defineProperty(Ya.prototype,"images",{get:function(){return this.image},set:function(a){this.image=a}});Mb.prototype=Object.create(W.prototype);
+Mb.prototype.constructor=Mb;Mb.prototype.isDataTexture3D=!0;var Qe=new W,kg=new Mb,Re=new Ya,Ke=[],Me=[],Pe=new Float32Array(16),Oe=new Float32Array(9),Ne=new Float32Array(4);Ve.prototype.updateCache=function(a){var b=this.cache;a instanceof Float32Array&&b.length!==a.length&&(this.cache=new Float32Array(a.length));sa(b,a)};We.prototype.setValue=function(a,b,c){for(var d=this.seq,e=0,f=d.length;e!==f;++e){var g=d[e];g.setValue(a,b[g.id],c)}};var $d=/([\w\d_]+)(\])?(\[|\.)?/g;db.prototype.setValue=
+function(a,b,c){b=this.map[b];void 0!==b&&b.setValue(a,c,this.renderer)};db.prototype.setOptional=function(a,b,c){b=b[c];void 0!==b&&this.setValue(a,c,b)};db.upload=function(a,b,c,d){for(var e=0,f=b.length;e!==f;++e){var g=b[e],h=c[g.id];!1!==h.needsUpdate&&g.setValue(a,h.value,d)}};db.seqWithValue=function(a,b){for(var c=[],d=0,e=a.length;d!==e;++d){var f=a[d];f.id in b&&c.push(f)}return c};var Fg=0,Og=0;eb.prototype=Object.create(L.prototype);eb.prototype.constructor=eb;eb.prototype.isMeshDepthMaterial=
+!0;eb.prototype.copy=function(a){L.prototype.copy.call(this,a);this.depthPacking=a.depthPacking;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.map=a.map;this.alphaMap=a.alphaMap;this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;return this};fb.prototype=Object.create(L.prototype);fb.prototype.constructor=fb;fb.prototype.isMeshDistanceMaterial=
+!0;fb.prototype.copy=function(a){L.prototype.copy.call(this,a);this.referencePosition.copy(a.referencePosition);this.nearDistance=a.nearDistance;this.farDistance=a.farDistance;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.map=a.map;this.alphaMap=a.alphaMap;this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;return this};Ob.prototype=Object.assign(Object.create(D.prototype),{constructor:Ob,isGroup:!0});Ra.prototype=
+Object.assign(Object.create(D.prototype),{constructor:Ra,isCamera:!0,copy:function(a,b){D.prototype.copy.call(this,a,b);this.matrixWorldInverse.copy(a.matrixWorldInverse);this.projectionMatrix.copy(a.projectionMatrix);this.projectionMatrixInverse.copy(a.projectionMatrixInverse);return this},getWorldDirection:function(a){void 0===a&&(console.warn("THREE.Camera: .getWorldDirection() target is now required"),a=new p);this.updateMatrixWorld(!0);var b=this.matrixWorld.elements;return a.set(-b[8],-b[9],
+-b[10]).normalize()},updateMatrixWorld:function(a){D.prototype.updateMatrixWorld.call(this,a);this.matrixWorldInverse.getInverse(this.matrixWorld)},clone:function(){return(new this.constructor).copy(this)}});V.prototype=Object.assign(Object.create(Ra.prototype),{constructor:V,isPerspectiveCamera:!0,copy:function(a,b){Ra.prototype.copy.call(this,a,b);this.fov=a.fov;this.zoom=a.zoom;this.near=a.near;this.far=a.far;this.focus=a.focus;this.aspect=a.aspect;this.view=null===a.view?null:Object.assign({},
+a.view);this.filmGauge=a.filmGauge;this.filmOffset=a.filmOffset;return this},setFocalLength:function(a){a=.5*this.getFilmHeight()/a;this.fov=2*R.RAD2DEG*Math.atan(a);this.updateProjectionMatrix()},getFocalLength:function(){var a=Math.tan(.5*R.DEG2RAD*this.fov);return.5*this.getFilmHeight()/a},getEffectiveFOV:function(){return 2*R.RAD2DEG*Math.atan(Math.tan(.5*R.DEG2RAD*this.fov)/this.zoom)},getFilmWidth:function(){return this.filmGauge*Math.min(this.aspect,1)},getFilmHeight:function(){return this.filmGauge/
+Math.max(this.aspect,1)},setViewOffset:function(a,b,c,d,e,f){this.aspect=a/b;null===this.view&&(this.view={enabled:!0,fullWidth:1,fullHeight:1,offsetX:0,offsetY:0,width:1,height:1});this.view.enabled=!0;this.view.fullWidth=a;this.view.fullHeight=b;this.view.offsetX=c;this.view.offsetY=d;this.view.width=e;this.view.height=f;this.updateProjectionMatrix()},clearViewOffset:function(){null!==this.view&&(this.view.enabled=!1);this.updateProjectionMatrix()},updateProjectionMatrix:function(){var a=this.near,
+b=a*Math.tan(.5*R.DEG2RAD*this.fov)/this.zoom,c=2*b,d=this.aspect*c,e=-.5*d,f=this.view;if(null!==this.view&&this.view.enabled){var g=f.fullWidth,h=f.fullHeight;e+=f.offsetX*d/g;b-=f.offsetY*c/h;d*=f.width/g;c*=f.height/h}f=this.filmOffset;0!==f&&(e+=a*f/this.getFilmWidth());this.projectionMatrix.makePerspective(e,e+d,b,b-c,a,this.far);this.projectionMatrixInverse.getInverse(this.projectionMatrix)},toJSON:function(a){a=D.prototype.toJSON.call(this,a);a.object.fov=this.fov;a.object.zoom=this.zoom;
+a.object.near=this.near;a.object.far=this.far;a.object.focus=this.focus;a.object.aspect=this.aspect;null!==this.view&&(a.object.view=Object.assign({},this.view));a.object.filmGauge=this.filmGauge;a.object.filmOffset=this.filmOffset;return a}});Cc.prototype=Object.assign(Object.create(V.prototype),{constructor:Cc,isArrayCamera:!0});var ff=new p,gf=new p;Pb.prototype.isFogExp2=!0;Pb.prototype.clone=function(){return new Pb(this.color,this.density)};Pb.prototype.toJSON=function(){return{type:"FogExp2",
+color:this.color.getHex(),density:this.density}};Qb.prototype.isFog=!0;Qb.prototype.clone=function(){return new Qb(this.color,this.near,this.far)};Qb.prototype.toJSON=function(){return{type:"Fog",color:this.color.getHex(),near:this.near,far:this.far}};vd.prototype=Object.assign(Object.create(D.prototype),{constructor:vd,copy:function(a,b){D.prototype.copy.call(this,a,b);null!==a.background&&(this.background=a.background.clone());null!==a.fog&&(this.fog=a.fog.clone());null!==a.overrideMaterial&&(this.overrideMaterial=
+a.overrideMaterial.clone());this.autoUpdate=a.autoUpdate;this.matrixAutoUpdate=a.matrixAutoUpdate;return this},toJSON:function(a){var b=D.prototype.toJSON.call(this,a);null!==this.background&&(b.object.background=this.background.toJSON(a));null!==this.fog&&(b.object.fog=this.fog.toJSON());return b}});Object.defineProperty(sb.prototype,"needsUpdate",{set:function(a){!0===a&&this.version++}});Object.assign(sb.prototype,{isInterleavedBuffer:!0,onUploadCallback:function(){},setArray:function(a){if(Array.isArray(a))throw new TypeError("THREE.BufferAttribute: array should be a Typed Array.");
+this.count=void 0!==a?a.length/this.stride:0;this.array=a;return this},setDynamic:function(a){this.dynamic=a;return this},copy:function(a){this.array=new a.array.constructor(a.array);this.count=a.count;this.stride=a.stride;this.dynamic=a.dynamic;return this},copyAt:function(a,b,c){a*=this.stride;c*=b.stride;for(var d=0,e=this.stride;d<e;d++)this.array[a+d]=b.array[c+d];return this},set:function(a,b){void 0===b&&(b=0);this.array.set(a,b);return this},clone:function(){return(new this.constructor).copy(this)},
+onUpload:function(a){this.onUploadCallback=a;return this}});Object.defineProperties(Dc.prototype,{count:{get:function(){return this.data.count}},array:{get:function(){return this.data.array}}});Object.assign(Dc.prototype,{isInterleavedBufferAttribute:!0,setX:function(a,b){this.data.array[a*this.data.stride+this.offset]=b;return this},setY:function(a,b){this.data.array[a*this.data.stride+this.offset+1]=b;return this},setZ:function(a,b){this.data.array[a*this.data.stride+this.offset+2]=b;return this},
+setW:function(a,b){this.data.array[a*this.data.stride+this.offset+3]=b;return this},getX:function(a){return this.data.array[a*this.data.stride+this.offset]},getY:function(a){return this.data.array[a*this.data.stride+this.offset+1]},getZ:function(a){return this.data.array[a*this.data.stride+this.offset+2]},getW:function(a){return this.data.array[a*this.data.stride+this.offset+3]},setXY:function(a,b,c){a=a*this.data.stride+this.offset;this.data.array[a+0]=b;this.data.array[a+1]=c;return this},setXYZ:function(a,
+b,c,d){a=a*this.data.stride+this.offset;this.data.array[a+0]=b;this.data.array[a+1]=c;this.data.array[a+2]=d;return this},setXYZW:function(a,b,c,d,e){a=a*this.data.stride+this.offset;this.data.array[a+0]=b;this.data.array[a+1]=c;this.data.array[a+2]=d;this.data.array[a+3]=e;return this}});hb.prototype=Object.create(L.prototype);hb.prototype.constructor=hb;hb.prototype.isSpriteMaterial=!0;hb.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.map=a.map;this.rotation=
+a.rotation;this.sizeAttenuation=a.sizeAttenuation;return this};var Rb;Ec.prototype=Object.assign(Object.create(D.prototype),{constructor:Ec,isSprite:!0,raycast:function(){function a(a,b,c,d,h,k){e.subVectors(a,c).addScalar(.5).multiply(d);void 0!==h?(f.x=k*e.x-h*e.y,f.y=h*e.x+k*e.y):f.copy(e);a.copy(b);a.x+=f.x;a.y+=f.y;a.applyMatrix4(g)}var b=new p,c=new p,d=new p,e=new z,f=new z,g=new P,h=new p,k=new p,m=new p,l=new z,n=new z,r=new z;return function(e,f){c.setFromMatrixScale(this.matrixWorld);g.getInverse(this.modelViewMatrix).premultiply(this.matrixWorld);
+d.setFromMatrixPosition(this.modelViewMatrix);var q=this.material.rotation;if(0!==q){var p=Math.cos(q);var t=Math.sin(q)}q=this.center;a(h.set(-.5,-.5,0),d,q,c,t,p);a(k.set(.5,-.5,0),d,q,c,t,p);a(m.set(.5,.5,0),d,q,c,t,p);l.set(0,0);n.set(1,0);r.set(1,1);var x=e.ray.intersectTriangle(h,k,m,!1,b);if(null===x&&(a(k.set(-.5,.5,0),d,q,c,t,p),n.set(0,1),x=e.ray.intersectTriangle(h,m,k,!1,b),null===x))return;t=e.ray.origin.distanceTo(b);t<e.near||t>e.far||f.push({distance:t,point:b.clone(),uv:ha.getUV(b,
+h,k,m,l,n,r,new z),face:null,object:this})}}(),clone:function(){return(new this.constructor(this.material)).copy(this)},copy:function(a){D.prototype.copy.call(this,a);void 0!==a.center&&this.center.copy(a.center);return this}});Fc.prototype=Object.assign(Object.create(D.prototype),{constructor:Fc,copy:function(a){D.prototype.copy.call(this,a,!1);a=a.levels;for(var b=0,c=a.length;b<c;b++){var d=a[b];this.addLevel(d.object.clone(),d.distance)}return this},addLevel:function(a,b){void 0===b&&(b=0);b=
+Math.abs(b);for(var c=this.levels,d=0;d<c.length&&!(b<c[d].distance);d++);c.splice(d,0,{distance:b,object:a});this.add(a)},getObjectForDistance:function(a){for(var b=this.levels,c=1,d=b.length;c<d&&!(a<b[c].distance);c++);return b[c-1].object},raycast:function(){var a=new p;return function(b,c){a.setFromMatrixPosition(this.matrixWorld);var d=b.ray.origin.distanceTo(a);this.getObjectForDistance(d).raycast(b,c)}}(),update:function(){var a=new p,b=new p;return function(c){var d=this.levels;if(1<d.length){a.setFromMatrixPosition(c.matrixWorld);
+b.setFromMatrixPosition(this.matrixWorld);c=a.distanceTo(b);d[0].object.visible=!0;for(var e=1,f=d.length;e<f;e++)if(c>=d[e].distance)d[e-1].object.visible=!1,d[e].object.visible=!0;else break;for(;e<f;e++)d[e].object.visible=!1}}}(),toJSON:function(a){a=D.prototype.toJSON.call(this,a);a.object.levels=[];for(var b=this.levels,c=0,d=b.length;c<d;c++){var e=b[c];a.object.levels.push({object:e.object.uuid,distance:e.distance})}return a}});Object.assign(Gc.prototype,{calculateInverses:function(){this.boneInverses=
+[];for(var a=0,b=this.bones.length;a<b;a++){var c=new P;this.bones[a]&&c.getInverse(this.bones[a].matrixWorld);this.boneInverses.push(c)}},pose:function(){var a,b;var c=0;for(b=this.bones.length;c<b;c++)(a=this.bones[c])&&a.matrixWorld.getInverse(this.boneInverses[c]);c=0;for(b=this.bones.length;c<b;c++)if(a=this.bones[c])a.parent&&a.parent.isBone?(a.matrix.getInverse(a.parent.matrixWorld),a.matrix.multiply(a.matrixWorld)):a.matrix.copy(a.matrixWorld),a.matrix.decompose(a.position,a.quaternion,a.scale)},
+update:function(){var a=new P,b=new P;return function(){for(var c=this.bones,d=this.boneInverses,e=this.boneMatrices,f=this.boneTexture,g=0,h=c.length;g<h;g++)a.multiplyMatrices(c[g]?c[g].matrixWorld:b,d[g]),a.toArray(e,16*g);void 0!==f&&(f.needsUpdate=!0)}}(),clone:function(){return new Gc(this.bones,this.boneInverses)},getBoneByName:function(a){for(var b=0,c=this.bones.length;b<c;b++){var d=this.bones[b];if(d.name===a)return d}}});wd.prototype=Object.assign(Object.create(D.prototype),{constructor:wd,
+isBone:!0});xd.prototype=Object.assign(Object.create(pa.prototype),{constructor:xd,isSkinnedMesh:!0,initBones:function(){var a=[],b;if(this.geometry&&void 0!==this.geometry.bones){var c=0;for(b=this.geometry.bones.length;c<b;c++){var d=this.geometry.bones[c];var e=new wd;a.push(e);e.name=d.name;e.position.fromArray(d.pos);e.quaternion.fromArray(d.rotq);void 0!==d.scl&&e.scale.fromArray(d.scl)}c=0;for(b=this.geometry.bones.length;c<b;c++)d=this.geometry.bones[c],-1!==d.parent&&null!==d.parent&&void 0!==
+a[d.parent]?a[d.parent].add(a[c]):this.add(a[c])}this.updateMatrixWorld(!0);return a},bind:function(a,b){this.skeleton=a;void 0===b&&(this.updateMatrixWorld(!0),this.skeleton.calculateInverses(),b=this.matrixWorld);this.bindMatrix.copy(b);this.bindMatrixInverse.getInverse(b)},pose:function(){this.skeleton.pose()},normalizeSkinWeights:function(){var a;if(this.geometry&&this.geometry.isGeometry)for(a=0;a<this.geometry.skinWeights.length;a++){var b=this.geometry.skinWeights[a];var c=1/b.manhattanLength();
+Infinity!==c?b.multiplyScalar(c):b.set(1,0,0,0)}else if(this.geometry&&this.geometry.isBufferGeometry){b=new Z;var d=this.geometry.attributes.skinWeight;for(a=0;a<d.count;a++)b.x=d.getX(a),b.y=d.getY(a),b.z=d.getZ(a),b.w=d.getW(a),c=1/b.manhattanLength(),Infinity!==c?b.multiplyScalar(c):b.set(1,0,0,0),d.setXYZW(a,b.x,b.y,b.z,b.w)}},updateMatrixWorld:function(a){pa.prototype.updateMatrixWorld.call(this,a);"attached"===this.bindMode?this.bindMatrixInverse.getInverse(this.matrixWorld):"detached"===this.bindMode?
+this.bindMatrixInverse.getInverse(this.bindMatrix):console.warn("THREE.SkinnedMesh: Unrecognized bindMode: "+this.bindMode)},clone:function(){return(new this.constructor(this.geometry,this.material)).copy(this)}});T.prototype=Object.create(L.prototype);T.prototype.constructor=T;T.prototype.isLineBasicMaterial=!0;T.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.linewidth=a.linewidth;this.linecap=a.linecap;this.linejoin=a.linejoin;return this};ma.prototype=Object.assign(Object.create(D.prototype),
+{constructor:ma,isLine:!0,computeLineDistances:function(){var a=new p,b=new p;return function(){var c=this.geometry;if(c.isBufferGeometry)if(null===c.index){for(var d=c.attributes.position,e=[0],f=1,g=d.count;f<g;f++)a.fromBufferAttribute(d,f-1),b.fromBufferAttribute(d,f),e[f]=e[f-1],e[f]+=a.distanceTo(b);c.addAttribute("lineDistance",new C(e,1))}else console.warn("THREE.Line.computeLineDistances(): Computation only possible with non-indexed BufferGeometry.");else if(c.isGeometry)for(d=c.vertices,
+e=c.lineDistances,e[0]=0,f=1,g=d.length;f<g;f++)e[f]=e[f-1],e[f]+=d[f-1].distanceTo(d[f]);return this}}(),raycast:function(){var a=new P,b=new rb,c=new Ga;return function(d,e){var f=d.linePrecision,g=this.geometry,h=this.matrixWorld;null===g.boundingSphere&&g.computeBoundingSphere();c.copy(g.boundingSphere);c.applyMatrix4(h);c.radius+=f;if(!1!==d.ray.intersectsSphere(c)){a.getInverse(h);b.copy(d.ray).applyMatrix4(a);f/=(this.scale.x+this.scale.y+this.scale.z)/3;f*=f;var k=new p,m=new p;h=new p;var l=
+new p,n=this&&this.isLineSegments?2:1;if(g.isBufferGeometry){var r=g.index,x=g.attributes.position.array;if(null!==r){r=r.array;g=0;for(var t=r.length-1;g<t;g+=n){var u=r[g+1];k.fromArray(x,3*r[g]);m.fromArray(x,3*u);u=b.distanceSqToSegment(k,m,l,h);u>f||(l.applyMatrix4(this.matrixWorld),u=d.ray.origin.distanceTo(l),u<d.near||u>d.far||e.push({distance:u,point:h.clone().applyMatrix4(this.matrixWorld),index:g,face:null,faceIndex:null,object:this}))}}else for(g=0,t=x.length/3-1;g<t;g+=n)k.fromArray(x,
+3*g),m.fromArray(x,3*g+3),u=b.distanceSqToSegment(k,m,l,h),u>f||(l.applyMatrix4(this.matrixWorld),u=d.ray.origin.distanceTo(l),u<d.near||u>d.far||e.push({distance:u,point:h.clone().applyMatrix4(this.matrixWorld),index:g,face:null,faceIndex:null,object:this}))}else if(g.isGeometry)for(k=g.vertices,m=k.length,g=0;g<m-1;g+=n)u=b.distanceSqToSegment(k[g],k[g+1],l,h),u>f||(l.applyMatrix4(this.matrixWorld),u=d.ray.origin.distanceTo(l),u<d.near||u>d.far||e.push({distance:u,point:h.clone().applyMatrix4(this.matrixWorld),
+index:g,face:null,faceIndex:null,object:this}))}}}(),copy:function(a){D.prototype.copy.call(this,a);this.geometry.copy(a.geometry);this.material.copy(a.material);return this},clone:function(){return(new this.constructor).copy(this)}});S.prototype=Object.assign(Object.create(ma.prototype),{constructor:S,isLineSegments:!0,computeLineDistances:function(){var a=new p,b=new p;return function(){var c=this.geometry;if(c.isBufferGeometry)if(null===c.index){for(var d=c.attributes.position,e=[],f=0,g=d.count;f<
+g;f+=2)a.fromBufferAttribute(d,f),b.fromBufferAttribute(d,f+1),e[f]=0===f?0:e[f-1],e[f+1]=e[f]+a.distanceTo(b);c.addAttribute("lineDistance",new C(e,1))}else console.warn("THREE.LineSegments.computeLineDistances(): Computation only possible with non-indexed BufferGeometry.");else if(c.isGeometry)for(d=c.vertices,e=c.lineDistances,f=0,g=d.length;f<g;f+=2)a.copy(d[f]),b.copy(d[f+1]),e[f]=0===f?0:e[f-1],e[f+1]=e[f]+a.distanceTo(b);return this}}()});yd.prototype=Object.assign(Object.create(ma.prototype),
+{constructor:yd,isLineLoop:!0});Ha.prototype=Object.create(L.prototype);Ha.prototype.constructor=Ha;Ha.prototype.isPointsMaterial=!0;Ha.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.map=a.map;this.size=a.size;this.sizeAttenuation=a.sizeAttenuation;this.morphTargets=a.morphTargets;return this};Sb.prototype=Object.assign(Object.create(D.prototype),{constructor:Sb,isPoints:!0,raycast:function(){var a=new P,b=new rb,c=new Ga;return function(d,e){function f(a,c){var f=
+b.distanceSqToPoint(a);f<l&&(b.closestPointToPoint(a,n),n.applyMatrix4(k),a=d.ray.origin.distanceTo(n),a<d.near||a>d.far||e.push({distance:a,distanceToRay:Math.sqrt(f),point:n.clone(),index:c,face:null,object:g}))}var g=this,h=this.geometry,k=this.matrixWorld,m=d.params.Points.threshold;null===h.boundingSphere&&h.computeBoundingSphere();c.copy(h.boundingSphere);c.applyMatrix4(k);c.radius+=m;if(!1!==d.ray.intersectsSphere(c)){a.getInverse(k);b.copy(d.ray).applyMatrix4(a);m/=(this.scale.x+this.scale.y+
+this.scale.z)/3;var l=m*m;m=new p;var n=new p;if(h.isBufferGeometry){var r=h.index;h=h.attributes.position.array;if(null!==r){var x=r.array;r=0;for(var t=x.length;r<t;r++){var u=x[r];m.fromArray(h,3*u);f(m,u)}}else for(r=0,x=h.length/3;r<x;r++)m.fromArray(h,3*r),f(m,r)}else for(m=h.vertices,r=0,x=m.length;r<x;r++)f(m[r],r)}}}(),clone:function(){return(new this.constructor(this.geometry,this.material)).copy(this)}});de.prototype=Object.assign(Object.create(W.prototype),{constructor:de,isVideoTexture:!0,
+update:function(){var a=this.image;a.readyState>=a.HAVE_CURRENT_DATA&&(this.needsUpdate=!0)}});Tb.prototype=Object.create(W.prototype);Tb.prototype.constructor=Tb;Tb.prototype.isCompressedTexture=!0;Hc.prototype=Object.create(W.prototype);Hc.prototype.constructor=Hc;Hc.prototype.isCanvasTexture=!0;Ic.prototype=Object.create(W.prototype);Ic.prototype.constructor=Ic;Ic.prototype.isDepthTexture=!0;Ub.prototype=Object.create(E.prototype);Ub.prototype.constructor=Ub;Jc.prototype=Object.create(I.prototype);
+Jc.prototype.constructor=Jc;Vb.prototype=Object.create(E.prototype);Vb.prototype.constructor=Vb;Kc.prototype=Object.create(I.prototype);Kc.prototype.constructor=Kc;ya.prototype=Object.create(E.prototype);ya.prototype.constructor=ya;Lc.prototype=Object.create(I.prototype);Lc.prototype.constructor=Lc;Wb.prototype=Object.create(ya.prototype);Wb.prototype.constructor=Wb;Mc.prototype=Object.create(I.prototype);Mc.prototype.constructor=Mc;tb.prototype=Object.create(ya.prototype);tb.prototype.constructor=
+tb;Nc.prototype=Object.create(I.prototype);Nc.prototype.constructor=Nc;Xb.prototype=Object.create(ya.prototype);Xb.prototype.constructor=Xb;Oc.prototype=Object.create(I.prototype);Oc.prototype.constructor=Oc;Yb.prototype=Object.create(ya.prototype);Yb.prototype.constructor=Yb;Pc.prototype=Object.create(I.prototype);Pc.prototype.constructor=Pc;Zb.prototype=Object.create(E.prototype);Zb.prototype.constructor=Zb;Qc.prototype=Object.create(I.prototype);Qc.prototype.constructor=Qc;$b.prototype=Object.create(E.prototype);
+$b.prototype.constructor=$b;Rc.prototype=Object.create(I.prototype);Rc.prototype.constructor=Rc;ac.prototype=Object.create(E.prototype);ac.prototype.constructor=ac;var ah={triangulate:function(a,b,c){c=c||2;var d=b&&b.length,e=d?b[0]*c:a.length,f=jf(a,0,e,c,!0),g=[];if(!f)return g;var h;if(d){var k=c;d=[];var m;var l=0;for(m=b.length;l<m;l++){var n=b[l]*k;var r=l<m-1?b[l+1]*k:a.length;n=jf(a,n,r,k,!1);n===n.next&&(n.steiner=!0);d.push(Wg(n))}d.sort(Ug);for(l=0;l<d.length;l++){b=d[l];k=f;if(k=Vg(b,
+k))b=mf(k,b),Tc(b,b.next);f=Tc(f,f.next)}}if(a.length>80*c){var p=h=a[0];var t=d=a[1];for(k=c;k<e;k+=c)l=a[k],b=a[k+1],l<p&&(p=l),b<t&&(t=b),l>h&&(h=l),b>d&&(d=b);h=Math.max(h-p,d-t);h=0!==h?1/h:0}Uc(f,g,c,p,t,h);return g}},Za={area:function(a){for(var b=a.length,c=0,d=b-1,e=0;e<b;d=e++)c+=a[d].x*a[e].y-a[e].x*a[d].y;return.5*c},isClockWise:function(a){return 0>Za.area(a)},triangulateShape:function(a,b){var c=[],d=[],e=[];nf(a);of(c,a);var f=a.length;b.forEach(nf);for(a=0;a<b.length;a++)d.push(f),
+f+=b[a].length,of(c,b[a]);b=ah.triangulate(c,d);for(a=0;a<b.length;a+=3)e.push(b.slice(a,a+3));return e}};vb.prototype=Object.create(I.prototype);vb.prototype.constructor=vb;vb.prototype.toJSON=function(){var a=I.prototype.toJSON.call(this);return pf(this.parameters.shapes,this.parameters.options,a)};Sa.prototype=Object.create(E.prototype);Sa.prototype.constructor=Sa;Sa.prototype.toJSON=function(){var a=E.prototype.toJSON.call(this);return pf(this.parameters.shapes,this.parameters.options,a)};var Xg=
+{generateTopUV:function(a,b,c,d,e){a=b[3*d];d=b[3*d+1];var f=b[3*e];e=b[3*e+1];return[new z(b[3*c],b[3*c+1]),new z(a,d),new z(f,e)]},generateSideWallUV:function(a,b,c,d,e,f){a=b[3*c];var g=b[3*c+1];c=b[3*c+2];var h=b[3*d],k=b[3*d+1];d=b[3*d+2];var m=b[3*e],l=b[3*e+1];e=b[3*e+2];var n=b[3*f],r=b[3*f+1];b=b[3*f+2];return.01>Math.abs(g-k)?[new z(a,1-c),new z(h,1-d),new z(m,1-e),new z(n,1-b)]:[new z(g,1-c),new z(k,1-d),new z(l,1-e),new z(r,1-b)]}};Wc.prototype=Object.create(I.prototype);Wc.prototype.constructor=
+Wc;bc.prototype=Object.create(Sa.prototype);bc.prototype.constructor=bc;Xc.prototype=Object.create(I.prototype);Xc.prototype.constructor=Xc;wb.prototype=Object.create(E.prototype);wb.prototype.constructor=wb;Yc.prototype=Object.create(I.prototype);Yc.prototype.constructor=Yc;cc.prototype=Object.create(E.prototype);cc.prototype.constructor=cc;Zc.prototype=Object.create(I.prototype);Zc.prototype.constructor=Zc;dc.prototype=Object.create(E.prototype);dc.prototype.constructor=dc;xb.prototype=Object.create(I.prototype);
+xb.prototype.constructor=xb;xb.prototype.toJSON=function(){var a=I.prototype.toJSON.call(this);return qf(this.parameters.shapes,a)};yb.prototype=Object.create(E.prototype);yb.prototype.constructor=yb;yb.prototype.toJSON=function(){var a=E.prototype.toJSON.call(this);return qf(this.parameters.shapes,a)};ec.prototype=Object.create(E.prototype);ec.prototype.constructor=ec;zb.prototype=Object.create(I.prototype);zb.prototype.constructor=zb;$a.prototype=Object.create(E.prototype);$a.prototype.constructor=
+$a;$c.prototype=Object.create(zb.prototype);$c.prototype.constructor=$c;ad.prototype=Object.create($a.prototype);ad.prototype.constructor=ad;bd.prototype=Object.create(I.prototype);bd.prototype.constructor=bd;fc.prototype=Object.create(E.prototype);fc.prototype.constructor=fc;var Ba=Object.freeze({WireframeGeometry:Ub,ParametricGeometry:Jc,ParametricBufferGeometry:Vb,TetrahedronGeometry:Lc,TetrahedronBufferGeometry:Wb,OctahedronGeometry:Mc,OctahedronBufferGeometry:tb,IcosahedronGeometry:Nc,IcosahedronBufferGeometry:Xb,
+DodecahedronGeometry:Oc,DodecahedronBufferGeometry:Yb,PolyhedronGeometry:Kc,PolyhedronBufferGeometry:ya,TubeGeometry:Pc,TubeBufferGeometry:Zb,TorusKnotGeometry:Qc,TorusKnotBufferGeometry:$b,TorusGeometry:Rc,TorusBufferGeometry:ac,TextGeometry:Wc,TextBufferGeometry:bc,SphereGeometry:Xc,SphereBufferGeometry:wb,RingGeometry:Yc,RingBufferGeometry:cc,PlaneGeometry:yc,PlaneBufferGeometry:qb,LatheGeometry:Zc,LatheBufferGeometry:dc,ShapeGeometry:xb,ShapeBufferGeometry:yb,ExtrudeGeometry:vb,ExtrudeBufferGeometry:Sa,
+EdgesGeometry:ec,ConeGeometry:$c,ConeBufferGeometry:ad,CylinderGeometry:zb,CylinderBufferGeometry:$a,CircleGeometry:bd,CircleBufferGeometry:fc,BoxGeometry:Kb,BoxBufferGeometry:pb});Ab.prototype=Object.create(L.prototype);Ab.prototype.constructor=Ab;Ab.prototype.isShadowMaterial=!0;Ab.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);return this};gc.prototype=Object.create(ka.prototype);gc.prototype.constructor=gc;gc.prototype.isRawShaderMaterial=!0;Ta.prototype=Object.create(L.prototype);
+Ta.prototype.constructor=Ta;Ta.prototype.isMeshStandardMaterial=!0;Ta.prototype.copy=function(a){L.prototype.copy.call(this,a);this.defines={STANDARD:""};this.color.copy(a.color);this.roughness=a.roughness;this.metalness=a.metalness;this.map=a.map;this.lightMap=a.lightMap;this.lightMapIntensity=a.lightMapIntensity;this.aoMap=a.aoMap;this.aoMapIntensity=a.aoMapIntensity;this.emissive.copy(a.emissive);this.emissiveMap=a.emissiveMap;this.emissiveIntensity=a.emissiveIntensity;this.bumpMap=a.bumpMap;this.bumpScale=
+a.bumpScale;this.normalMap=a.normalMap;this.normalMapType=a.normalMapType;this.normalScale.copy(a.normalScale);this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;this.roughnessMap=a.roughnessMap;this.metalnessMap=a.metalnessMap;this.alphaMap=a.alphaMap;this.envMap=a.envMap;this.envMapIntensity=a.envMapIntensity;this.refractionRatio=a.refractionRatio;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.wireframeLinecap=
+a.wireframeLinecap;this.wireframeLinejoin=a.wireframeLinejoin;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;return this};Bb.prototype=Object.create(Ta.prototype);Bb.prototype.constructor=Bb;Bb.prototype.isMeshPhysicalMaterial=!0;Bb.prototype.copy=function(a){Ta.prototype.copy.call(this,a);this.defines={PHYSICAL:""};this.reflectivity=a.reflectivity;this.clearCoat=a.clearCoat;this.clearCoatRoughness=a.clearCoatRoughness;return this};Ia.prototype=Object.create(L.prototype);
+Ia.prototype.constructor=Ia;Ia.prototype.isMeshPhongMaterial=!0;Ia.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.specular.copy(a.specular);this.shininess=a.shininess;this.map=a.map;this.lightMap=a.lightMap;this.lightMapIntensity=a.lightMapIntensity;this.aoMap=a.aoMap;this.aoMapIntensity=a.aoMapIntensity;this.emissive.copy(a.emissive);this.emissiveMap=a.emissiveMap;this.emissiveIntensity=a.emissiveIntensity;this.bumpMap=a.bumpMap;this.bumpScale=a.bumpScale;
+this.normalMap=a.normalMap;this.normalMapType=a.normalMapType;this.normalScale.copy(a.normalScale);this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;this.specularMap=a.specularMap;this.alphaMap=a.alphaMap;this.envMap=a.envMap;this.combine=a.combine;this.reflectivity=a.reflectivity;this.refractionRatio=a.refractionRatio;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.wireframeLinecap=a.wireframeLinecap;
+this.wireframeLinejoin=a.wireframeLinejoin;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;return this};Cb.prototype=Object.create(Ia.prototype);Cb.prototype.constructor=Cb;Cb.prototype.isMeshToonMaterial=!0;Cb.prototype.copy=function(a){Ia.prototype.copy.call(this,a);this.gradientMap=a.gradientMap;return this};Db.prototype=Object.create(L.prototype);Db.prototype.constructor=Db;Db.prototype.isMeshNormalMaterial=!0;Db.prototype.copy=function(a){L.prototype.copy.call(this,
+a);this.bumpMap=a.bumpMap;this.bumpScale=a.bumpScale;this.normalMap=a.normalMap;this.normalMapType=a.normalMapType;this.normalScale.copy(a.normalScale);this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;return this};Eb.prototype=Object.create(L.prototype);Eb.prototype.constructor=
+Eb;Eb.prototype.isMeshLambertMaterial=!0;Eb.prototype.copy=function(a){L.prototype.copy.call(this,a);this.color.copy(a.color);this.map=a.map;this.lightMap=a.lightMap;this.lightMapIntensity=a.lightMapIntensity;this.aoMap=a.aoMap;this.aoMapIntensity=a.aoMapIntensity;this.emissive.copy(a.emissive);this.emissiveMap=a.emissiveMap;this.emissiveIntensity=a.emissiveIntensity;this.specularMap=a.specularMap;this.alphaMap=a.alphaMap;this.envMap=a.envMap;this.combine=a.combine;this.reflectivity=a.reflectivity;
+this.refractionRatio=a.refractionRatio;this.wireframe=a.wireframe;this.wireframeLinewidth=a.wireframeLinewidth;this.wireframeLinecap=a.wireframeLinecap;this.wireframeLinejoin=a.wireframeLinejoin;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;return this};Fb.prototype=Object.create(L.prototype);Fb.prototype.constructor=Fb;Fb.prototype.isMeshMatcapMaterial=!0;Fb.prototype.copy=function(a){L.prototype.copy.call(this,a);this.defines={MATCAP:""};this.color.copy(a.color);
+this.matcap=a.matcap;this.map=a.map;this.bumpMap=a.bumpMap;this.bumpScale=a.bumpScale;this.normalMap=a.normalMap;this.normalMapType=a.normalMapType;this.normalScale.copy(a.normalScale);this.displacementMap=a.displacementMap;this.displacementScale=a.displacementScale;this.displacementBias=a.displacementBias;this.alphaMap=a.alphaMap;this.skinning=a.skinning;this.morphTargets=a.morphTargets;this.morphNormals=a.morphNormals;return this};Gb.prototype=Object.create(T.prototype);Gb.prototype.constructor=
+Gb;Gb.prototype.isLineDashedMaterial=!0;Gb.prototype.copy=function(a){T.prototype.copy.call(this,a);this.scale=a.scale;this.dashSize=a.dashSize;this.gapSize=a.gapSize;return this};var bh=Object.freeze({ShadowMaterial:Ab,SpriteMaterial:hb,RawShaderMaterial:gc,ShaderMaterial:ka,PointsMaterial:Ha,MeshPhysicalMaterial:Bb,MeshStandardMaterial:Ta,MeshPhongMaterial:Ia,MeshToonMaterial:Cb,MeshNormalMaterial:Db,MeshLambertMaterial:Eb,MeshDepthMaterial:eb,MeshDistanceMaterial:fb,MeshBasicMaterial:Ea,MeshMatcapMaterial:Fb,
+LineDashedMaterial:Gb,LineBasicMaterial:T,Material:L}),ra={arraySlice:function(a,b,c){return ra.isTypedArray(a)?new a.constructor(a.subarray(b,void 0!==c?c:a.length)):a.slice(b,c)},convertArray:function(a,b,c){return!a||!c&&a.constructor===b?a:"number"===typeof b.BYTES_PER_ELEMENT?new b(a):Array.prototype.slice.call(a)},isTypedArray:function(a){return ArrayBuffer.isView(a)&&!(a instanceof DataView)},getKeyframeOrder:function(a){for(var b=a.length,c=Array(b),d=0;d!==b;++d)c[d]=d;c.sort(function(b,
+c){return a[b]-a[c]});return c},sortedArray:function(a,b,c){for(var d=a.length,e=new a.constructor(d),f=0,g=0;g!==d;++f)for(var h=c[f]*b,k=0;k!==b;++k)e[g++]=a[h+k];return e},flattenJSON:function(a,b,c,d){for(var e=1,f=a[0];void 0!==f&&void 0===f[d];)f=a[e++];if(void 0!==f){var g=f[d];if(void 0!==g)if(Array.isArray(g)){do g=f[d],void 0!==g&&(b.push(f.time),c.push.apply(c,g)),f=a[e++];while(void 0!==f)}else if(void 0!==g.toArray){do g=f[d],void 0!==g&&(b.push(f.time),g.toArray(c,c.length)),f=a[e++];
+while(void 0!==f)}else{do g=f[d],void 0!==g&&(b.push(f.time),c.push(g)),f=a[e++];while(void 0!==f)}}}};Object.assign(Ca.prototype,{evaluate:function(a){var b=this.parameterPositions,c=this._cachedIndex,d=b[c],e=b[c-1];a:{b:{c:{d:if(!(a<d)){for(var f=c+2;;){if(void 0===d){if(a<e)break d;this._cachedIndex=c=b.length;return this.afterEnd_(c-1,a,e)}if(c===f)break;e=d;d=b[++c];if(a<d)break b}d=b.length;break c}if(a>=e)break a;else{f=b[1];a<f&&(c=2,e=f);for(f=c-2;;){if(void 0===e)return this._cachedIndex=
+0,this.beforeStart_(0,a,d);if(c===f)break;d=e;e=b[--c-1];if(a>=e)break b}d=c;c=0}}for(;c<d;)e=c+d>>>1,a<b[e]?d=e:c=e+1;d=b[c];e=b[c-1];if(void 0===e)return this._cachedIndex=0,this.beforeStart_(0,a,d);if(void 0===d)return this._cachedIndex=c=b.length,this.afterEnd_(c-1,e,a)}this._cachedIndex=c;this.intervalChanged_(c,e,d)}return this.interpolate_(c,e,a,d)},settings:null,DefaultSettings_:{},getSettings_:function(){return this.settings||this.DefaultSettings_},copySampleValue_:function(a){var b=this.resultBuffer,
+c=this.sampleValues,d=this.valueSize;a*=d;for(var e=0;e!==d;++e)b[e]=c[a+e];return b},interpolate_:function(){throw Error("call to abstract method");},intervalChanged_:function(){}});Object.assign(Ca.prototype,{beforeStart_:Ca.prototype.copySampleValue_,afterEnd_:Ca.prototype.copySampleValue_});Ad.prototype=Object.assign(Object.create(Ca.prototype),{constructor:Ad,DefaultSettings_:{endingStart:2400,endingEnd:2400},intervalChanged_:function(a,b,c){var d=this.parameterPositions,e=a-2,f=a+1,g=d[e],h=
+d[f];if(void 0===g)switch(this.getSettings_().endingStart){case 2401:e=a;g=2*b-c;break;case 2402:e=d.length-2;g=b+d[e]-d[e+1];break;default:e=a,g=c}if(void 0===h)switch(this.getSettings_().endingEnd){case 2401:f=a;h=2*c-b;break;case 2402:f=1;h=c+d[1]-d[0];break;default:f=a-1,h=b}a=.5*(c-b);d=this.valueSize;this._weightPrev=a/(b-g);this._weightNext=a/(h-c);this._offsetPrev=e*d;this._offsetNext=f*d},interpolate_:function(a,b,c,d){var e=this.resultBuffer,f=this.sampleValues,g=this.valueSize;a*=g;var h=
+a-g,k=this._offsetPrev,m=this._offsetNext,l=this._weightPrev,n=this._weightNext,r=(c-b)/(d-b);c=r*r;d=c*r;b=-l*d+2*l*c-l*r;l=(1+l)*d+(-1.5-2*l)*c+(-.5+l)*r+1;r=(-1-n)*d+(1.5+n)*c+.5*r;n=n*d-n*c;for(c=0;c!==g;++c)e[c]=b*f[k+c]+l*f[h+c]+r*f[a+c]+n*f[m+c];return e}});cd.prototype=Object.assign(Object.create(Ca.prototype),{constructor:cd,interpolate_:function(a,b,c,d){var e=this.resultBuffer,f=this.sampleValues,g=this.valueSize;a*=g;var h=a-g;b=(c-b)/(d-b);c=1-b;for(d=0;d!==g;++d)e[d]=f[h+d]*c+f[a+d]*
+b;return e}});Bd.prototype=Object.assign(Object.create(Ca.prototype),{constructor:Bd,interpolate_:function(a){return this.copySampleValue_(a-1)}});Object.assign(qa,{toJSON:function(a){var b=a.constructor;if(void 0!==b.toJSON)b=b.toJSON(a);else{b={name:a.name,times:ra.convertArray(a.times,Array),values:ra.convertArray(a.values,Array)};var c=a.getInterpolation();c!==a.DefaultInterpolation&&(b.interpolation=c)}b.type=a.ValueTypeName;return b}});Object.assign(qa.prototype,{constructor:qa,TimeBufferType:Float32Array,
+ValueBufferType:Float32Array,DefaultInterpolation:2301,InterpolantFactoryMethodDiscrete:function(a){return new Bd(this.times,this.values,this.getValueSize(),a)},InterpolantFactoryMethodLinear:function(a){return new cd(this.times,this.values,this.getValueSize(),a)},InterpolantFactoryMethodSmooth:function(a){return new Ad(this.times,this.values,this.getValueSize(),a)},setInterpolation:function(a){switch(a){case 2300:var b=this.InterpolantFactoryMethodDiscrete;break;case 2301:b=this.InterpolantFactoryMethodLinear;
+break;case 2302:b=this.InterpolantFactoryMethodSmooth}if(void 0===b){b="unsupported interpolation for "+this.ValueTypeName+" keyframe track named "+this.name;if(void 0===this.createInterpolant)if(a!==this.DefaultInterpolation)this.setInterpolation(this.DefaultInterpolation);else throw Error(b);console.warn("THREE.KeyframeTrack:",b);return this}this.createInterpolant=b;return this},getInterpolation:function(){switch(this.createInterpolant){case this.InterpolantFactoryMethodDiscrete:return 2300;case this.InterpolantFactoryMethodLinear:return 2301;
+case this.InterpolantFactoryMethodSmooth:return 2302}},getValueSize:function(){return this.values.length/this.times.length},shift:function(a){if(0!==a)for(var b=this.times,c=0,d=b.length;c!==d;++c)b[c]+=a;return this},scale:function(a){if(1!==a)for(var b=this.times,c=0,d=b.length;c!==d;++c)b[c]*=a;return this},trim:function(a,b){for(var c=this.times,d=c.length,e=0,f=d-1;e!==d&&c[e]<a;)++e;for(;-1!==f&&c[f]>b;)--f;++f;if(0!==e||f!==d)e>=f&&(f=Math.max(f,1),e=f-1),a=this.getValueSize(),this.times=ra.arraySlice(c,
+e,f),this.values=ra.arraySlice(this.values,e*a,f*a);return this},validate:function(){var a=!0,b=this.getValueSize();0!==b-Math.floor(b)&&(console.error("THREE.KeyframeTrack: Invalid value size in track.",this),a=!1);var c=this.times;b=this.values;var d=c.length;0===d&&(console.error("THREE.KeyframeTrack: Track is empty.",this),a=!1);for(var e=null,f=0;f!==d;f++){var g=c[f];if("number"===typeof g&&isNaN(g)){console.error("THREE.KeyframeTrack: Time is not a valid number.",this,f,g);a=!1;break}if(null!==
+e&&e>g){console.error("THREE.KeyframeTrack: Out of order keys.",this,f,g,e);a=!1;break}e=g}if(void 0!==b&&ra.isTypedArray(b))for(f=0,c=b.length;f!==c;++f)if(d=b[f],isNaN(d)){console.error("THREE.KeyframeTrack: Value is not a valid number.",this,f,d);a=!1;break}return a},optimize:function(){for(var a=this.times,b=this.values,c=this.getValueSize(),d=2302===this.getInterpolation(),e=1,f=a.length-1,g=1;g<f;++g){var h=!1,k=a[g];if(k!==a[g+1]&&(1!==g||k!==k[0]))if(d)h=!0;else{var m=g*c,l=m-c,n=m+c;for(k=
+0;k!==c;++k){var r=b[m+k];if(r!==b[l+k]||r!==b[n+k]){h=!0;break}}}if(h){if(g!==e)for(a[e]=a[g],h=g*c,m=e*c,k=0;k!==c;++k)b[m+k]=b[h+k];++e}}if(0<f){a[e]=a[f];h=f*c;m=e*c;for(k=0;k!==c;++k)b[m+k]=b[h+k];++e}e!==a.length&&(this.times=ra.arraySlice(a,0,e),this.values=ra.arraySlice(b,0,e*c));return this}});Cd.prototype=Object.assign(Object.create(qa.prototype),{constructor:Cd,ValueTypeName:"bool",ValueBufferType:Array,DefaultInterpolation:2300,InterpolantFactoryMethodLinear:void 0,InterpolantFactoryMethodSmooth:void 0});
+Dd.prototype=Object.assign(Object.create(qa.prototype),{constructor:Dd,ValueTypeName:"color"});hc.prototype=Object.assign(Object.create(qa.prototype),{constructor:hc,ValueTypeName:"number"});Ed.prototype=Object.assign(Object.create(Ca.prototype),{constructor:Ed,interpolate_:function(a,b,c,d){var e=this.resultBuffer,f=this.sampleValues,g=this.valueSize;a*=g;b=(c-b)/(d-b);for(c=a+g;a!==c;a+=4)ja.slerpFlat(e,0,f,a-g,f,a,b);return e}});dd.prototype=Object.assign(Object.create(qa.prototype),{constructor:dd,
+ValueTypeName:"quaternion",DefaultInterpolation:2301,InterpolantFactoryMethodLinear:function(a){return new Ed(this.times,this.values,this.getValueSize(),a)},InterpolantFactoryMethodSmooth:void 0});Fd.prototype=Object.assign(Object.create(qa.prototype),{constructor:Fd,ValueTypeName:"string",ValueBufferType:Array,DefaultInterpolation:2300,InterpolantFactoryMethodLinear:void 0,InterpolantFactoryMethodSmooth:void 0});ic.prototype=Object.assign(Object.create(qa.prototype),{constructor:ic,ValueTypeName:"vector"});
+Object.assign(za,{parse:function(a){for(var b=[],c=a.tracks,d=1/(a.fps||1),e=0,f=c.length;e!==f;++e)b.push(Zg(c[e]).scale(d));return new za(a.name,a.duration,b)},toJSON:function(a){var b=[],c=a.tracks;a={name:a.name,duration:a.duration,tracks:b,uuid:a.uuid};for(var d=0,e=c.length;d!==e;++d)b.push(qa.toJSON(c[d]));return a},CreateFromMorphTargetSequence:function(a,b,c,d){for(var e=b.length,f=[],g=0;g<e;g++){var h=[],k=[];h.push((g+e-1)%e,g,(g+1)%e);k.push(0,1,0);var m=ra.getKeyframeOrder(h);h=ra.sortedArray(h,
+1,m);k=ra.sortedArray(k,1,m);d||0!==h[0]||(h.push(e),k.push(k[0]));f.push((new hc(".morphTargetInfluences["+b[g].name+"]",h,k)).scale(1/c))}return new za(a,-1,f)},findByName:function(a,b){var c=a;Array.isArray(a)||(c=a.geometry&&a.geometry.animations||a.animations);for(a=0;a<c.length;a++)if(c[a].name===b)return c[a];return null},CreateClipsFromMorphTargetSequences:function(a,b,c){for(var d={},e=/^([\w-]*?)([\d]+)$/,f=0,g=a.length;f<g;f++){var h=a[f],k=h.name.match(e);if(k&&1<k.length){var m=k[1];
+(k=d[m])||(d[m]=k=[]);k.push(h)}}a=[];for(m in d)a.push(za.CreateFromMorphTargetSequence(m,d[m],b,c));return a},parseAnimation:function(a,b){if(!a)return console.error("THREE.AnimationClip: No animation in JSONLoader data."),null;var c=function(a,b,c,d,e){if(0!==c.length){var f=[],g=[];ra.flattenJSON(c,f,g,d);0!==f.length&&e.push(new a(b,f,g))}},d=[],e=a.name||"default",f=a.length||-1,g=a.fps||30;a=a.hierarchy||[];for(var h=0;h<a.length;h++){var k=a[h].keys;if(k&&0!==k.length)if(k[0].morphTargets){f=
+{};for(var m=0;m<k.length;m++)if(k[m].morphTargets)for(var l=0;l<k[m].morphTargets.length;l++)f[k[m].morphTargets[l]]=-1;for(var n in f){var r=[],p=[];for(l=0;l!==k[m].morphTargets.length;++l){var t=k[m];r.push(t.time);p.push(t.morphTarget===n?1:0)}d.push(new hc(".morphTargetInfluence["+n+"]",r,p))}f=f.length*(g||1)}else m=".bones["+b[h].name+"]",c(ic,m+".position",k,"pos",d),c(dd,m+".quaternion",k,"rot",d),c(ic,m+".scale",k,"scl",d)}return 0===d.length?null:new za(e,f,d)}});Object.assign(za.prototype,
+{resetDuration:function(){for(var a=0,b=0,c=this.tracks.length;b!==c;++b){var d=this.tracks[b];a=Math.max(a,d.times[d.times.length-1])}this.duration=a;return this},trim:function(){for(var a=0;a<this.tracks.length;a++)this.tracks[a].trim(0,this.duration);return this},validate:function(){for(var a=!0,b=0;b<this.tracks.length;b++)a=a&&this.tracks[b].validate();return a},optimize:function(){for(var a=0;a<this.tracks.length;a++)this.tracks[a].optimize();return this}});var Ib={enabled:!1,files:{},add:function(a,
+b){!1!==this.enabled&&(this.files[a]=b)},get:function(a){if(!1!==this.enabled)return this.files[a]},remove:function(a){delete this.files[a]},clear:function(){this.files={}}},ta=new ge,Oa={};Object.assign(Fa.prototype,{load:function(a,b,c,d){void 0===a&&(a="");void 0!==this.path&&(a=this.path+a);a=this.manager.resolveURL(a);var e=this,f=Ib.get(a);if(void 0!==f)return e.manager.itemStart(a),setTimeout(function(){b&&b(f);e.manager.itemEnd(a)},0),f;if(void 0!==Oa[a])Oa[a].push({onLoad:b,onProgress:c,
+onError:d});else{var g=a.match(/^data:(.*?)(;base64)?,(.*)$/);if(g){c=g[1];var h=!!g[2];g=g[3];g=decodeURIComponent(g);h&&(g=atob(g));try{var k=(this.responseType||"").toLowerCase();switch(k){case "arraybuffer":case "blob":var m=new Uint8Array(g.length);for(h=0;h<g.length;h++)m[h]=g.charCodeAt(h);var l="blob"===k?new Blob([m.buffer],{type:c}):m.buffer;break;case "document":l=(new DOMParser).parseFromString(g,c);break;case "json":l=JSON.parse(g);break;default:l=g}setTimeout(function(){b&&b(l);e.manager.itemEnd(a)},
+0)}catch(r){setTimeout(function(){d&&d(r);e.manager.itemError(a);e.manager.itemEnd(a)},0)}}else{Oa[a]=[];Oa[a].push({onLoad:b,onProgress:c,onError:d});var n=new XMLHttpRequest;n.open("GET",a,!0);n.addEventListener("load",function(b){var c=this.response;Ib.add(a,c);var d=Oa[a];delete Oa[a];if(200===this.status||0===this.status){0===this.status&&console.warn("THREE.FileLoader: HTTP Status 0 received.");for(var f=0,g=d.length;f<g;f++){var h=d[f];if(h.onLoad)h.onLoad(c)}}else{f=0;for(g=d.length;f<g;f++)if(h=
+d[f],h.onError)h.onError(b);e.manager.itemError(a)}e.manager.itemEnd(a)},!1);n.addEventListener("progress",function(b){for(var c=Oa[a],d=0,e=c.length;d<e;d++){var f=c[d];if(f.onProgress)f.onProgress(b)}},!1);n.addEventListener("error",function(b){var c=Oa[a];delete Oa[a];for(var d=0,f=c.length;d<f;d++){var g=c[d];if(g.onError)g.onError(b)}e.manager.itemError(a);e.manager.itemEnd(a)},!1);n.addEventListener("abort",function(b){var c=Oa[a];delete Oa[a];for(var d=0,f=c.length;d<f;d++){var g=c[d];if(g.onError)g.onError(b)}e.manager.itemError(a);
+e.manager.itemEnd(a)},!1);void 0!==this.responseType&&(n.responseType=this.responseType);void 0!==this.withCredentials&&(n.withCredentials=this.withCredentials);n.overrideMimeType&&n.overrideMimeType(void 0!==this.mimeType?this.mimeType:"text/plain");for(h in this.requestHeader)n.setRequestHeader(h,this.requestHeader[h]);n.send(null)}e.manager.itemStart(a);return n}},setPath:function(a){this.path=a;return this},setResponseType:function(a){this.responseType=a;return this},setWithCredentials:function(a){this.withCredentials=
+a;return this},setMimeType:function(a){this.mimeType=a;return this},setRequestHeader:function(a){this.requestHeader=a;return this}});Object.assign(rf.prototype,{load:function(a,b,c,d){var e=this,f=new Fa(e.manager);f.setPath(e.path);f.load(a,function(a){b(e.parse(JSON.parse(a)))},c,d)},parse:function(a,b){for(var c=[],d=0;d<a.length;d++){var e=za.parse(a[d]);c.push(e)}b(c)},setPath:function(a){this.path=a;return this}});Object.assign(sf.prototype,{load:function(a,b,c,d){function e(e){k.load(a[e],
+function(a){a=f._parser(a,!0);g[e]={width:a.width,height:a.height,format:a.format,mipmaps:a.mipmaps};m+=1;6===m&&(1===a.mipmapCount&&(h.minFilter=1006),h.format=a.format,h.needsUpdate=!0,b&&b(h))},c,d)}var f=this,g=[],h=new Tb;h.image=g;var k=new Fa(this.manager);k.setPath(this.path);k.setResponseType("arraybuffer");if(Array.isArray(a))for(var m=0,l=0,n=a.length;l<n;++l)e(l);else k.load(a,function(a){a=f._parser(a,!0);if(a.isCubemap)for(var c=a.mipmaps.length/a.mipmapCount,d=0;d<c;d++){g[d]={mipmaps:[]};
+for(var e=0;e<a.mipmapCount;e++)g[d].mipmaps.push(a.mipmaps[d*a.mipmapCount+e]),g[d].format=a.format,g[d].width=a.width,g[d].height=a.height}else h.image.width=a.width,h.image.height=a.height,h.mipmaps=a.mipmaps;1===a.mipmapCount&&(h.minFilter=1006);h.format=a.format;h.needsUpdate=!0;b&&b(h)},c,d);return h},setPath:function(a){this.path=a;return this}});Object.assign(he.prototype,{load:function(a,b,c,d){var e=this,f=new lb,g=new Fa(this.manager);g.setResponseType("arraybuffer");g.setPath(this.path);
+g.load(a,function(a){if(a=e._parser(a))void 0!==a.image?f.image=a.image:void 0!==a.data&&(f.image.width=a.width,f.image.height=a.height,f.image.data=a.data),f.wrapS=void 0!==a.wrapS?a.wrapS:1001,f.wrapT=void 0!==a.wrapT?a.wrapT:1001,f.magFilter=void 0!==a.magFilter?a.magFilter:1006,f.minFilter=void 0!==a.minFilter?a.minFilter:1008,f.anisotropy=void 0!==a.anisotropy?a.anisotropy:1,void 0!==a.format&&(f.format=a.format),void 0!==a.type&&(f.type=a.type),void 0!==a.mipmaps&&(f.mipmaps=a.mipmaps),1===
+a.mipmapCount&&(f.minFilter=1006),f.needsUpdate=!0,b&&b(f,a)},c,d);return f},setPath:function(a){this.path=a;return this}});Object.assign(ed.prototype,{crossOrigin:"anonymous",load:function(a,b,c,d){function e(){k.removeEventListener("load",e,!1);k.removeEventListener("error",f,!1);Ib.add(a,this);b&&b(this);g.manager.itemEnd(a)}function f(b){k.removeEventListener("load",e,!1);k.removeEventListener("error",f,!1);d&&d(b);g.manager.itemError(a);g.manager.itemEnd(a)}void 0===a&&(a="");void 0!==this.path&&
+(a=this.path+a);a=this.manager.resolveURL(a);var g=this,h=Ib.get(a);if(void 0!==h)return g.manager.itemStart(a),setTimeout(function(){b&&b(h);g.manager.itemEnd(a)},0),h;var k=document.createElementNS("http://www.w3.org/1999/xhtml","img");k.addEventListener("load",e,!1);k.addEventListener("error",f,!1);"data:"!==a.substr(0,5)&&void 0!==this.crossOrigin&&(k.crossOrigin=this.crossOrigin);g.manager.itemStart(a);k.src=a;return k},setCrossOrigin:function(a){this.crossOrigin=a;return this},setPath:function(a){this.path=
+a;return this}});Object.assign(ie.prototype,{crossOrigin:"anonymous",load:function(a,b,c,d){function e(c){g.load(a[c],function(a){f.images[c]=a;h++;6===h&&(f.needsUpdate=!0,b&&b(f))},void 0,d)}var f=new Ya,g=new ed(this.manager);g.setCrossOrigin(this.crossOrigin);g.setPath(this.path);var h=0;for(c=0;c<a.length;++c)e(c);return f},setCrossOrigin:function(a){this.crossOrigin=a;return this},setPath:function(a){this.path=a;return this}});Object.assign(Gd.prototype,{crossOrigin:"anonymous",load:function(a,
+b,c,d){var e=new W,f=new ed(this.manager);f.setCrossOrigin(this.crossOrigin);f.setPath(this.path);f.load(a,function(c){e.image=c;c=0<a.search(/\.jpe?g$/i)||0===a.search(/^data:image\/jpeg/);e.format=c?1022:1023;e.needsUpdate=!0;void 0!==b&&b(e)},c,d);return e},setCrossOrigin:function(a){this.crossOrigin=a;return this},setPath:function(a){this.path=a;return this}});Object.assign(Q.prototype,{getPoint:function(){console.warn("THREE.Curve: .getPoint() not implemented.");return null},getPointAt:function(a,
+b){a=this.getUtoTmapping(a);return this.getPoint(a,b)},getPoints:function(a){void 0===a&&(a=5);for(var b=[],c=0;c<=a;c++)b.push(this.getPoint(c/a));return b},getSpacedPoints:function(a){void 0===a&&(a=5);for(var b=[],c=0;c<=a;c++)b.push(this.getPointAt(c/a));return b},getLength:function(){var a=this.getLengths();return a[a.length-1]},getLengths:function(a){void 0===a&&(a=this.arcLengthDivisions);if(this.cacheArcLengths&&this.cacheArcLengths.length===a+1&&!this.needsUpdate)return this.cacheArcLengths;
+this.needsUpdate=!1;var b=[],c=this.getPoint(0),d,e=0;b.push(0);for(d=1;d<=a;d++){var f=this.getPoint(d/a);e+=f.distanceTo(c);b.push(e);c=f}return this.cacheArcLengths=b},updateArcLengths:function(){this.needsUpdate=!0;this.getLengths()},getUtoTmapping:function(a,b){var c=this.getLengths(),d=c.length;b=b?b:a*c[d-1];for(var e=0,f=d-1,g;e<=f;)if(a=Math.floor(e+(f-e)/2),g=c[a]-b,0>g)e=a+1;else if(0<g)f=a-1;else{f=a;break}a=f;if(c[a]===b)return a/(d-1);e=c[a];return(a+(b-e)/(c[a+1]-e))/(d-1)},getTangent:function(a){var b=
+a-1E-4;a+=1E-4;0>b&&(b=0);1<a&&(a=1);b=this.getPoint(b);return this.getPoint(a).clone().sub(b).normalize()},getTangentAt:function(a){a=this.getUtoTmapping(a);return this.getTangent(a)},computeFrenetFrames:function(a,b){var c=new p,d=[],e=[],f=[],g=new p,h=new P,k;for(k=0;k<=a;k++){var m=k/a;d[k]=this.getTangentAt(m);d[k].normalize()}e[0]=new p;f[0]=new p;k=Number.MAX_VALUE;m=Math.abs(d[0].x);var l=Math.abs(d[0].y),n=Math.abs(d[0].z);m<=k&&(k=m,c.set(1,0,0));l<=k&&(k=l,c.set(0,1,0));n<=k&&c.set(0,
+0,1);g.crossVectors(d[0],c).normalize();e[0].crossVectors(d[0],g);f[0].crossVectors(d[0],e[0]);for(k=1;k<=a;k++)e[k]=e[k-1].clone(),f[k]=f[k-1].clone(),g.crossVectors(d[k-1],d[k]),g.length()>Number.EPSILON&&(g.normalize(),c=Math.acos(R.clamp(d[k-1].dot(d[k]),-1,1)),e[k].applyMatrix4(h.makeRotationAxis(g,c))),f[k].crossVectors(d[k],e[k]);if(!0===b)for(c=Math.acos(R.clamp(e[0].dot(e[a]),-1,1)),c/=a,0<d[0].dot(g.crossVectors(e[0],e[a]))&&(c=-c),k=1;k<=a;k++)e[k].applyMatrix4(h.makeRotationAxis(d[k],
+c*k)),f[k].crossVectors(d[k],e[k]);return{tangents:d,normals:e,binormals:f}},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.arcLengthDivisions=a.arcLengthDivisions;return this},toJSON:function(){var a={metadata:{version:4.5,type:"Curve",generator:"Curve.toJSON"}};a.arcLengthDivisions=this.arcLengthDivisions;a.type=this.type;return a},fromJSON:function(a){this.arcLengthDivisions=a.arcLengthDivisions;return this}});wa.prototype=Object.create(Q.prototype);wa.prototype.constructor=
+wa;wa.prototype.isEllipseCurve=!0;wa.prototype.getPoint=function(a,b){b=b||new z;for(var c=2*Math.PI,d=this.aEndAngle-this.aStartAngle,e=Math.abs(d)<Number.EPSILON;0>d;)d+=c;for(;d>c;)d-=c;d<Number.EPSILON&&(d=e?0:c);!0!==this.aClockwise||e||(d=d===c?-c:d-c);c=this.aStartAngle+a*d;a=this.aX+this.xRadius*Math.cos(c);var f=this.aY+this.yRadius*Math.sin(c);0!==this.aRotation&&(c=Math.cos(this.aRotation),d=Math.sin(this.aRotation),e=a-this.aX,f-=this.aY,a=e*c-f*d+this.aX,f=e*d+f*c+this.aY);return b.set(a,
+f)};wa.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.aX=a.aX;this.aY=a.aY;this.xRadius=a.xRadius;this.yRadius=a.yRadius;this.aStartAngle=a.aStartAngle;this.aEndAngle=a.aEndAngle;this.aClockwise=a.aClockwise;this.aRotation=a.aRotation;return this};wa.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.aX=this.aX;a.aY=this.aY;a.xRadius=this.xRadius;a.yRadius=this.yRadius;a.aStartAngle=this.aStartAngle;a.aEndAngle=this.aEndAngle;a.aClockwise=this.aClockwise;a.aRotation=
+this.aRotation;return a};wa.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.aX=a.aX;this.aY=a.aY;this.xRadius=a.xRadius;this.yRadius=a.yRadius;this.aStartAngle=a.aStartAngle;this.aEndAngle=a.aEndAngle;this.aClockwise=a.aClockwise;this.aRotation=a.aRotation;return this};jc.prototype=Object.create(wa.prototype);jc.prototype.constructor=jc;jc.prototype.isArcCurve=!0;var Ud=new p,Fe=new je,Ge=new je,He=new je;ua.prototype=Object.create(Q.prototype);ua.prototype.constructor=ua;ua.prototype.isCatmullRomCurve3=
+!0;ua.prototype.getPoint=function(a,b){b=b||new p;var c=this.points,d=c.length;a*=d-(this.closed?0:1);var e=Math.floor(a);a-=e;this.closed?e+=0<e?0:(Math.floor(Math.abs(e)/d)+1)*d:0===a&&e===d-1&&(e=d-2,a=1);if(this.closed||0<e)var f=c[(e-1)%d];else Ud.subVectors(c[0],c[1]).add(c[0]),f=Ud;var g=c[e%d];var h=c[(e+1)%d];this.closed||e+2<d?c=c[(e+2)%d]:(Ud.subVectors(c[d-1],c[d-2]).add(c[d-1]),c=Ud);if("centripetal"===this.curveType||"chordal"===this.curveType){var k="chordal"===this.curveType?.5:.25;
+d=Math.pow(f.distanceToSquared(g),k);e=Math.pow(g.distanceToSquared(h),k);k=Math.pow(h.distanceToSquared(c),k);1E-4>e&&(e=1);1E-4>d&&(d=e);1E-4>k&&(k=e);Fe.initNonuniformCatmullRom(f.x,g.x,h.x,c.x,d,e,k);Ge.initNonuniformCatmullRom(f.y,g.y,h.y,c.y,d,e,k);He.initNonuniformCatmullRom(f.z,g.z,h.z,c.z,d,e,k)}else"catmullrom"===this.curveType&&(Fe.initCatmullRom(f.x,g.x,h.x,c.x,this.tension),Ge.initCatmullRom(f.y,g.y,h.y,c.y,this.tension),He.initCatmullRom(f.z,g.z,h.z,c.z,this.tension));b.set(Fe.calc(a),
+Ge.calc(a),He.calc(a));return b};ua.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.points=[];for(var b=0,c=a.points.length;b<c;b++)this.points.push(a.points[b].clone());this.closed=a.closed;this.curveType=a.curveType;this.tension=a.tension;return this};ua.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.points=[];for(var b=0,c=this.points.length;b<c;b++)a.points.push(this.points[b].toArray());a.closed=this.closed;a.curveType=this.curveType;a.tension=this.tension;return a};
+ua.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.points=[];for(var b=0,c=a.points.length;b<c;b++){var d=a.points[b];this.points.push((new p).fromArray(d))}this.closed=a.closed;this.curveType=a.curveType;this.tension=a.tension;return this};Ja.prototype=Object.create(Q.prototype);Ja.prototype.constructor=Ja;Ja.prototype.isCubicBezierCurve=!0;Ja.prototype.getPoint=function(a,b){b=b||new z;var c=this.v0,d=this.v1,e=this.v2,f=this.v3;b.set(gd(a,c.x,d.x,e.x,f.x),gd(a,c.y,d.y,e.y,
+f.y));return b};Ja.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v0.copy(a.v0);this.v1.copy(a.v1);this.v2.copy(a.v2);this.v3.copy(a.v3);return this};Ja.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v0=this.v0.toArray();a.v1=this.v1.toArray();a.v2=this.v2.toArray();a.v3=this.v3.toArray();return a};Ja.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v0.fromArray(a.v0);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);this.v3.fromArray(a.v3);return this};
+Ua.prototype=Object.create(Q.prototype);Ua.prototype.constructor=Ua;Ua.prototype.isCubicBezierCurve3=!0;Ua.prototype.getPoint=function(a,b){b=b||new p;var c=this.v0,d=this.v1,e=this.v2,f=this.v3;b.set(gd(a,c.x,d.x,e.x,f.x),gd(a,c.y,d.y,e.y,f.y),gd(a,c.z,d.z,e.z,f.z));return b};Ua.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v0.copy(a.v0);this.v1.copy(a.v1);this.v2.copy(a.v2);this.v3.copy(a.v3);return this};Ua.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v0=this.v0.toArray();
+a.v1=this.v1.toArray();a.v2=this.v2.toArray();a.v3=this.v3.toArray();return a};Ua.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v0.fromArray(a.v0);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);this.v3.fromArray(a.v3);return this};Aa.prototype=Object.create(Q.prototype);Aa.prototype.constructor=Aa;Aa.prototype.isLineCurve=!0;Aa.prototype.getPoint=function(a,b){b=b||new z;1===a?b.copy(this.v2):(b.copy(this.v2).sub(this.v1),b.multiplyScalar(a).add(this.v1));return b};Aa.prototype.getPointAt=
+function(a,b){return this.getPoint(a,b)};Aa.prototype.getTangent=function(){return this.v2.clone().sub(this.v1).normalize()};Aa.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v1.copy(a.v1);this.v2.copy(a.v2);return this};Aa.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v1=this.v1.toArray();a.v2=this.v2.toArray();return a};Aa.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);return this};Ka.prototype=
+Object.create(Q.prototype);Ka.prototype.constructor=Ka;Ka.prototype.isLineCurve3=!0;Ka.prototype.getPoint=function(a,b){b=b||new p;1===a?b.copy(this.v2):(b.copy(this.v2).sub(this.v1),b.multiplyScalar(a).add(this.v1));return b};Ka.prototype.getPointAt=function(a,b){return this.getPoint(a,b)};Ka.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v1.copy(a.v1);this.v2.copy(a.v2);return this};Ka.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v1=this.v1.toArray();a.v2=this.v2.toArray();
+return a};Ka.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);return this};La.prototype=Object.create(Q.prototype);La.prototype.constructor=La;La.prototype.isQuadraticBezierCurve=!0;La.prototype.getPoint=function(a,b){b=b||new z;var c=this.v0,d=this.v1,e=this.v2;b.set(fd(a,c.x,d.x,e.x),fd(a,c.y,d.y,e.y));return b};La.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v0.copy(a.v0);this.v1.copy(a.v1);this.v2.copy(a.v2);return this};
+La.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v0=this.v0.toArray();a.v1=this.v1.toArray();a.v2=this.v2.toArray();return a};La.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v0.fromArray(a.v0);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);return this};Va.prototype=Object.create(Q.prototype);Va.prototype.constructor=Va;Va.prototype.isQuadraticBezierCurve3=!0;Va.prototype.getPoint=function(a,b){b=b||new p;var c=this.v0,d=this.v1,e=this.v2;b.set(fd(a,c.x,
+d.x,e.x),fd(a,c.y,d.y,e.y),fd(a,c.z,d.z,e.z));return b};Va.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.v0.copy(a.v0);this.v1.copy(a.v1);this.v2.copy(a.v2);return this};Va.prototype.toJSON=function(){var a=Q.prototype.toJSON.call(this);a.v0=this.v0.toArray();a.v1=this.v1.toArray();a.v2=this.v2.toArray();return a};Va.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.v0.fromArray(a.v0);this.v1.fromArray(a.v1);this.v2.fromArray(a.v2);return this};Ma.prototype=Object.create(Q.prototype);
+Ma.prototype.constructor=Ma;Ma.prototype.isSplineCurve=!0;Ma.prototype.getPoint=function(a,b){b=b||new z;var c=this.points,d=(c.length-1)*a;a=Math.floor(d);d-=a;var e=c[0===a?a:a-1],f=c[a],g=c[a>c.length-2?c.length-1:a+1];c=c[a>c.length-3?c.length-1:a+2];b.set(tf(d,e.x,f.x,g.x,c.x),tf(d,e.y,f.y,g.y,c.y));return b};Ma.prototype.copy=function(a){Q.prototype.copy.call(this,a);this.points=[];for(var b=0,c=a.points.length;b<c;b++)this.points.push(a.points[b].clone());return this};Ma.prototype.toJSON=function(){var a=
+Q.prototype.toJSON.call(this);a.points=[];for(var b=0,c=this.points.length;b<c;b++)a.points.push(this.points[b].toArray());return a};Ma.prototype.fromJSON=function(a){Q.prototype.fromJSON.call(this,a);this.points=[];for(var b=0,c=a.points.length;b<c;b++){var d=a.points[b];this.points.push((new z).fromArray(d))}return this};var Gf=Object.freeze({ArcCurve:jc,CatmullRomCurve3:ua,CubicBezierCurve:Ja,CubicBezierCurve3:Ua,EllipseCurve:wa,LineCurve:Aa,LineCurve3:Ka,QuadraticBezierCurve:La,QuadraticBezierCurve3:Va,
+SplineCurve:Ma});ab.prototype=Object.assign(Object.create(Q.prototype),{constructor:ab,add:function(a){this.curves.push(a)},closePath:function(){var a=this.curves[0].getPoint(0),b=this.curves[this.curves.length-1].getPoint(1);a.equals(b)||this.curves.push(new Aa(b,a))},getPoint:function(a){var b=a*this.getLength(),c=this.getCurveLengths();for(a=0;a<c.length;){if(c[a]>=b)return b=c[a]-b,a=this.curves[a],c=a.getLength(),a.getPointAt(0===c?0:1-b/c);a++}return null},getLength:function(){var a=this.getCurveLengths();
+return a[a.length-1]},updateArcLengths:function(){this.needsUpdate=!0;this.cacheLengths=null;this.getCurveLengths()},getCurveLengths:function(){if(this.cacheLengths&&this.cacheLengths.length===this.curves.length)return this.cacheLengths;for(var a=[],b=0,c=0,d=this.curves.length;c<d;c++)b+=this.curves[c].getLength(),a.push(b);return this.cacheLengths=a},getSpacedPoints:function(a){void 0===a&&(a=40);for(var b=[],c=0;c<=a;c++)b.push(this.getPoint(c/a));this.autoClose&&b.push(b[0]);return b},getPoints:function(a){a=
+a||12;for(var b=[],c,d=0,e=this.curves;d<e.length;d++){var f=e[d];f=f.getPoints(f&&f.isEllipseCurve?2*a:f&&(f.isLineCurve||f.isLineCurve3)?1:f&&f.isSplineCurve?a*f.points.length:a);for(var g=0;g<f.length;g++){var h=f[g];c&&c.equals(h)||(b.push(h),c=h)}}this.autoClose&&1<b.length&&!b[b.length-1].equals(b[0])&&b.push(b[0]);return b},copy:function(a){Q.prototype.copy.call(this,a);this.curves=[];for(var b=0,c=a.curves.length;b<c;b++)this.curves.push(a.curves[b].clone());this.autoClose=a.autoClose;return this},
+toJSON:function(){var a=Q.prototype.toJSON.call(this);a.autoClose=this.autoClose;a.curves=[];for(var b=0,c=this.curves.length;b<c;b++)a.curves.push(this.curves[b].toJSON());return a},fromJSON:function(a){Q.prototype.fromJSON.call(this,a);this.autoClose=a.autoClose;this.curves=[];for(var b=0,c=a.curves.length;b<c;b++){var d=a.curves[b];this.curves.push((new Gf[d.type]).fromJSON(d))}return this}});Na.prototype=Object.assign(Object.create(ab.prototype),{constructor:Na,setFromPoints:function(a){this.moveTo(a[0].x,
+a[0].y);for(var b=1,c=a.length;b<c;b++)this.lineTo(a[b].x,a[b].y)},moveTo:function(a,b){this.currentPoint.set(a,b)},lineTo:function(a,b){var c=new Aa(this.currentPoint.clone(),new z(a,b));this.curves.push(c);this.currentPoint.set(a,b)},quadraticCurveTo:function(a,b,c,d){a=new La(this.currentPoint.clone(),new z(a,b),new z(c,d));this.curves.push(a);this.currentPoint.set(c,d)},bezierCurveTo:function(a,b,c,d,e,f){a=new Ja(this.currentPoint.clone(),new z(a,b),new z(c,d),new z(e,f));this.curves.push(a);
+this.currentPoint.set(e,f)},splineThru:function(a){var b=[this.currentPoint.clone()].concat(a);b=new Ma(b);this.curves.push(b);this.currentPoint.copy(a[a.length-1])},arc:function(a,b,c,d,e,f){this.absarc(a+this.currentPoint.x,b+this.currentPoint.y,c,d,e,f)},absarc:function(a,b,c,d,e,f){this.absellipse(a,b,c,c,d,e,f)},ellipse:function(a,b,c,d,e,f,g,h){this.absellipse(a+this.currentPoint.x,b+this.currentPoint.y,c,d,e,f,g,h)},absellipse:function(a,b,c,d,e,f,g,h){a=new wa(a,b,c,d,e,f,g,h);0<this.curves.length&&
+(b=a.getPoint(0),b.equals(this.currentPoint)||this.lineTo(b.x,b.y));this.curves.push(a);a=a.getPoint(1);this.currentPoint.copy(a)},copy:function(a){ab.prototype.copy.call(this,a);this.currentPoint.copy(a.currentPoint);return this},toJSON:function(){var a=ab.prototype.toJSON.call(this);a.currentPoint=this.currentPoint.toArray();return a},fromJSON:function(a){ab.prototype.fromJSON.call(this,a);this.currentPoint.fromArray(a.currentPoint);return this}});ib.prototype=Object.assign(Object.create(Na.prototype),
+{constructor:ib,getPointsHoles:function(a){for(var b=[],c=0,d=this.holes.length;c<d;c++)b[c]=this.holes[c].getPoints(a);return b},extractPoints:function(a){return{shape:this.getPoints(a),holes:this.getPointsHoles(a)}},copy:function(a){Na.prototype.copy.call(this,a);this.holes=[];for(var b=0,c=a.holes.length;b<c;b++)this.holes.push(a.holes[b].clone());return this},toJSON:function(){var a=Na.prototype.toJSON.call(this);a.uuid=this.uuid;a.holes=[];for(var b=0,c=this.holes.length;b<c;b++)a.holes.push(this.holes[b].toJSON());
+return a},fromJSON:function(a){Na.prototype.fromJSON.call(this,a);this.uuid=a.uuid;this.holes=[];for(var b=0,c=a.holes.length;b<c;b++){var d=a.holes[b];this.holes.push((new Na).fromJSON(d))}return this}});ca.prototype=Object.assign(Object.create(D.prototype),{constructor:ca,isLight:!0,copy:function(a){D.prototype.copy.call(this,a);this.color.copy(a.color);this.intensity=a.intensity;return this},toJSON:function(a){a=D.prototype.toJSON.call(this,a);a.object.color=this.color.getHex();a.object.intensity=
+this.intensity;void 0!==this.groundColor&&(a.object.groundColor=this.groundColor.getHex());void 0!==this.distance&&(a.object.distance=this.distance);void 0!==this.angle&&(a.object.angle=this.angle);void 0!==this.decay&&(a.object.decay=this.decay);void 0!==this.penumbra&&(a.object.penumbra=this.penumbra);void 0!==this.shadow&&(a.object.shadow=this.shadow.toJSON());return a}});Hd.prototype=Object.assign(Object.create(ca.prototype),{constructor:Hd,isHemisphereLight:!0,copy:function(a){ca.prototype.copy.call(this,
+a);this.groundColor.copy(a.groundColor);return this}});Object.assign(Hb.prototype,{copy:function(a){this.camera=a.camera.clone();this.bias=a.bias;this.radius=a.radius;this.mapSize.copy(a.mapSize);return this},clone:function(){return(new this.constructor).copy(this)},toJSON:function(){var a={};0!==this.bias&&(a.bias=this.bias);1!==this.radius&&(a.radius=this.radius);if(512!==this.mapSize.x||512!==this.mapSize.y)a.mapSize=this.mapSize.toArray();a.camera=this.camera.toJSON(!1).object;delete a.camera.matrix;
+return a}});Id.prototype=Object.assign(Object.create(Hb.prototype),{constructor:Id,isSpotLightShadow:!0,update:function(a){var b=this.camera,c=2*R.RAD2DEG*a.angle,d=this.mapSize.width/this.mapSize.height;a=a.distance||b.far;if(c!==b.fov||d!==b.aspect||a!==b.far)b.fov=c,b.aspect=d,b.far=a,b.updateProjectionMatrix()}});Jd.prototype=Object.assign(Object.create(ca.prototype),{constructor:Jd,isSpotLight:!0,copy:function(a){ca.prototype.copy.call(this,a);this.distance=a.distance;this.angle=a.angle;this.penumbra=
+a.penumbra;this.decay=a.decay;this.target=a.target.clone();this.shadow=a.shadow.clone();return this}});Kd.prototype=Object.assign(Object.create(ca.prototype),{constructor:Kd,isPointLight:!0,copy:function(a){ca.prototype.copy.call(this,a);this.distance=a.distance;this.decay=a.decay;this.shadow=a.shadow.clone();return this}});hd.prototype=Object.assign(Object.create(Ra.prototype),{constructor:hd,isOrthographicCamera:!0,copy:function(a,b){Ra.prototype.copy.call(this,a,b);this.left=a.left;this.right=
+a.right;this.top=a.top;this.bottom=a.bottom;this.near=a.near;this.far=a.far;this.zoom=a.zoom;this.view=null===a.view?null:Object.assign({},a.view);return this},setViewOffset:function(a,b,c,d,e,f){null===this.view&&(this.view={enabled:!0,fullWidth:1,fullHeight:1,offsetX:0,offsetY:0,width:1,height:1});this.view.enabled=!0;this.view.fullWidth=a;this.view.fullHeight=b;this.view.offsetX=c;this.view.offsetY=d;this.view.width=e;this.view.height=f;this.updateProjectionMatrix()},clearViewOffset:function(){null!==
+this.view&&(this.view.enabled=!1);this.updateProjectionMatrix()},updateProjectionMatrix:function(){var a=(this.right-this.left)/(2*this.zoom),b=(this.top-this.bottom)/(2*this.zoom),c=(this.right+this.left)/2,d=(this.top+this.bottom)/2,e=c-a;c+=a;a=d+b;b=d-b;if(null!==this.view&&this.view.enabled){c=this.zoom/(this.view.width/this.view.fullWidth);b=this.zoom/(this.view.height/this.view.fullHeight);var f=(this.right-this.left)/this.view.width;d=(this.top-this.bottom)/this.view.height;e+=this.view.offsetX/
+c*f;c=e+this.view.width/c*f;a-=this.view.offsetY/b*d;b=a-this.view.height/b*d}this.projectionMatrix.makeOrthographic(e,c,a,b,this.near,this.far);this.projectionMatrixInverse.getInverse(this.projectionMatrix)},toJSON:function(a){a=D.prototype.toJSON.call(this,a);a.object.zoom=this.zoom;a.object.left=this.left;a.object.right=this.right;a.object.top=this.top;a.object.bottom=this.bottom;a.object.near=this.near;a.object.far=this.far;null!==this.view&&(a.object.view=Object.assign({},this.view));return a}});
+Ld.prototype=Object.assign(Object.create(Hb.prototype),{constructor:Ld});Md.prototype=Object.assign(Object.create(ca.prototype),{constructor:Md,isDirectionalLight:!0,copy:function(a){ca.prototype.copy.call(this,a);this.target=a.target.clone();this.shadow=a.shadow.clone();return this}});Nd.prototype=Object.assign(Object.create(ca.prototype),{constructor:Nd,isAmbientLight:!0});Od.prototype=Object.assign(Object.create(ca.prototype),{constructor:Od,isRectAreaLight:!0,copy:function(a){ca.prototype.copy.call(this,
+a);this.width=a.width;this.height=a.height;return this},toJSON:function(a){a=ca.prototype.toJSON.call(this,a);a.object.width=this.width;a.object.height=this.height;return a}});Object.assign(Pd.prototype,{load:function(a,b,c,d){var e=this,f=new Fa(e.manager);f.setPath(e.path);f.load(a,function(a){b(e.parse(JSON.parse(a)))},c,d)},parse:function(a){function b(a){void 0===c[a]&&console.warn("THREE.MaterialLoader: Undefined texture",a);return c[a]}var c=this.textures,d=new bh[a.type];void 0!==a.uuid&&
+(d.uuid=a.uuid);void 0!==a.name&&(d.name=a.name);void 0!==a.color&&d.color.setHex(a.color);void 0!==a.roughness&&(d.roughness=a.roughness);void 0!==a.metalness&&(d.metalness=a.metalness);void 0!==a.emissive&&d.emissive.setHex(a.emissive);void 0!==a.specular&&d.specular.setHex(a.specular);void 0!==a.shininess&&(d.shininess=a.shininess);void 0!==a.clearCoat&&(d.clearCoat=a.clearCoat);void 0!==a.clearCoatRoughness&&(d.clearCoatRoughness=a.clearCoatRoughness);void 0!==a.vertexColors&&(d.vertexColors=
+a.vertexColors);void 0!==a.fog&&(d.fog=a.fog);void 0!==a.flatShading&&(d.flatShading=a.flatShading);void 0!==a.blending&&(d.blending=a.blending);void 0!==a.combine&&(d.combine=a.combine);void 0!==a.side&&(d.side=a.side);void 0!==a.opacity&&(d.opacity=a.opacity);void 0!==a.transparent&&(d.transparent=a.transparent);void 0!==a.alphaTest&&(d.alphaTest=a.alphaTest);void 0!==a.depthTest&&(d.depthTest=a.depthTest);void 0!==a.depthWrite&&(d.depthWrite=a.depthWrite);void 0!==a.colorWrite&&(d.colorWrite=a.colorWrite);
+void 0!==a.wireframe&&(d.wireframe=a.wireframe);void 0!==a.wireframeLinewidth&&(d.wireframeLinewidth=a.wireframeLinewidth);void 0!==a.wireframeLinecap&&(d.wireframeLinecap=a.wireframeLinecap);void 0!==a.wireframeLinejoin&&(d.wireframeLinejoin=a.wireframeLinejoin);void 0!==a.rotation&&(d.rotation=a.rotation);1!==a.linewidth&&(d.linewidth=a.linewidth);void 0!==a.dashSize&&(d.dashSize=a.dashSize);void 0!==a.gapSize&&(d.gapSize=a.gapSize);void 0!==a.scale&&(d.scale=a.scale);void 0!==a.polygonOffset&&
+(d.polygonOffset=a.polygonOffset);void 0!==a.polygonOffsetFactor&&(d.polygonOffsetFactor=a.polygonOffsetFactor);void 0!==a.polygonOffsetUnits&&(d.polygonOffsetUnits=a.polygonOffsetUnits);void 0!==a.skinning&&(d.skinning=a.skinning);void 0!==a.morphTargets&&(d.morphTargets=a.morphTargets);void 0!==a.dithering&&(d.dithering=a.dithering);void 0!==a.visible&&(d.visible=a.visible);void 0!==a.userData&&(d.userData=a.userData);if(void 0!==a.uniforms)for(var e in a.uniforms){var f=a.uniforms[e];d.uniforms[e]=
+{};switch(f.type){case "t":d.uniforms[e].value=b(f.value);break;case "c":d.uniforms[e].value=(new G).setHex(f.value);break;case "v2":d.uniforms[e].value=(new z).fromArray(f.value);break;case "v3":d.uniforms[e].value=(new p).fromArray(f.value);break;case "v4":d.uniforms[e].value=(new Z).fromArray(f.value);break;case "m4":d.uniforms[e].value=(new P).fromArray(f.value);break;default:d.uniforms[e].value=f.value}}void 0!==a.defines&&(d.defines=a.defines);void 0!==a.vertexShader&&(d.vertexShader=a.vertexShader);
+void 0!==a.fragmentShader&&(d.fragmentShader=a.fragmentShader);void 0!==a.shading&&(d.flatShading=1===a.shading);void 0!==a.size&&(d.size=a.size);void 0!==a.sizeAttenuation&&(d.sizeAttenuation=a.sizeAttenuation);void 0!==a.map&&(d.map=b(a.map));void 0!==a.alphaMap&&(d.alphaMap=b(a.alphaMap),d.transparent=!0);void 0!==a.bumpMap&&(d.bumpMap=b(a.bumpMap));void 0!==a.bumpScale&&(d.bumpScale=a.bumpScale);void 0!==a.normalMap&&(d.normalMap=b(a.normalMap));void 0!==a.normalMapType&&(d.normalMapType=a.normalMapType);
+void 0!==a.normalScale&&(e=a.normalScale,!1===Array.isArray(e)&&(e=[e,e]),d.normalScale=(new z).fromArray(e));void 0!==a.displacementMap&&(d.displacementMap=b(a.displacementMap));void 0!==a.displacementScale&&(d.displacementScale=a.displacementScale);void 0!==a.displacementBias&&(d.displacementBias=a.displacementBias);void 0!==a.roughnessMap&&(d.roughnessMap=b(a.roughnessMap));void 0!==a.metalnessMap&&(d.metalnessMap=b(a.metalnessMap));void 0!==a.emissiveMap&&(d.emissiveMap=b(a.emissiveMap));void 0!==
+a.emissiveIntensity&&(d.emissiveIntensity=a.emissiveIntensity);void 0!==a.specularMap&&(d.specularMap=b(a.specularMap));void 0!==a.envMap&&(d.envMap=b(a.envMap));void 0!==a.envMapIntensity&&(d.envMapIntensity=a.envMapIntensity);void 0!==a.reflectivity&&(d.reflectivity=a.reflectivity);void 0!==a.lightMap&&(d.lightMap=b(a.lightMap));void 0!==a.lightMapIntensity&&(d.lightMapIntensity=a.lightMapIntensity);void 0!==a.aoMap&&(d.aoMap=b(a.aoMap));void 0!==a.aoMapIntensity&&(d.aoMapIntensity=a.aoMapIntensity);
+void 0!==a.gradientMap&&(d.gradientMap=b(a.gradientMap));return d},setPath:function(a){this.path=a;return this},setTextures:function(a){this.textures=a;return this}});var Vd={decodeText:function(a){if("undefined"!==typeof TextDecoder)return(new TextDecoder).decode(a);for(var b="",c=0,d=a.length;c<d;c++)b+=String.fromCharCode(a[c]);return decodeURIComponent(escape(b))},extractUrlBase:function(a){var b=a.lastIndexOf("/");return-1===b?"./":a.substr(0,b+1)}};Object.assign(ke.prototype,{load:function(a,
+b,c,d){var e=this,f=new Fa(e.manager);f.setPath(e.path);f.load(a,function(a){b(e.parse(JSON.parse(a)))},c,d)},parse:function(a){var b=new E,c=a.data.index;void 0!==c&&(c=new Hf[c.type](c.array),b.setIndex(new F(c,1)));var d=a.data.attributes;for(f in d){var e=d[f];c=new Hf[e.type](e.array);b.addAttribute(f,new F(c,e.itemSize,e.normalized))}var f=a.data.groups||a.data.drawcalls||a.data.offsets;if(void 0!==f)for(c=0,d=f.length;c!==d;++c)e=f[c],b.addGroup(e.start,e.count,e.materialIndex);a=a.data.boundingSphere;
+void 0!==a&&(f=new p,void 0!==a.center&&f.fromArray(a.center),b.boundingSphere=new Ga(f,a.radius));return b},setPath:function(a){this.path=a;return this}});var Hf={Int8Array:Int8Array,Uint8Array:Uint8Array,Uint8ClampedArray:"undefined"!==typeof Uint8ClampedArray?Uint8ClampedArray:Uint8Array,Int16Array:Int16Array,Uint16Array:Uint16Array,Int32Array:Int32Array,Uint32Array:Uint32Array,Float32Array:Float32Array,Float64Array:Float64Array};kc.Handlers={handlers:[],add:function(a,b){this.handlers.push(a,
+b)},get:function(a){for(var b=this.handlers,c=0,d=b.length;c<d;c+=2){var e=b[c+1];if(b[c].test(a))return e}return null}};Object.assign(kc.prototype,{crossOrigin:"anonymous",onLoadStart:function(){},onLoadProgress:function(){},onLoadComplete:function(){},initMaterials:function(a,b,c){for(var d=[],e=0;e<a.length;++e)d[e]=this.createMaterial(a[e],b,c);return d},createMaterial:function(){var a={NoBlending:0,NormalBlending:1,AdditiveBlending:2,SubtractiveBlending:3,MultiplyBlending:4,CustomBlending:5},
+b=new G,c=new Gd,d=new Pd;return function(e,f,g){function h(a,b,d,e,h){a=f+a;var m=kc.Handlers.get(a);null!==m?a=m.load(a):(c.setCrossOrigin(g),a=c.load(a));void 0!==b&&(a.repeat.fromArray(b),1!==b[0]&&(a.wrapS=1E3),1!==b[1]&&(a.wrapT=1E3));void 0!==d&&a.offset.fromArray(d);void 0!==e&&("repeat"===e[0]&&(a.wrapS=1E3),"mirror"===e[0]&&(a.wrapS=1002),"repeat"===e[1]&&(a.wrapT=1E3),"mirror"===e[1]&&(a.wrapT=1002));void 0!==h&&(a.anisotropy=h);b=R.generateUUID();k[b]=a;return b}var k={},m={uuid:R.generateUUID(),
+type:"MeshLambertMaterial"},l;for(l in e){var n=e[l];switch(l){case "DbgColor":case "DbgIndex":case "opticalDensity":case "illumination":break;case "DbgName":m.name=n;break;case "blending":m.blending=a[n];break;case "colorAmbient":case "mapAmbient":console.warn("THREE.Loader.createMaterial:",l,"is no longer supported.");break;case "colorDiffuse":m.color=b.fromArray(n).getHex();break;case "colorSpecular":m.specular=b.fromArray(n).getHex();break;case "colorEmissive":m.emissive=b.fromArray(n).getHex();
+break;case "specularCoef":m.shininess=n;break;case "shading":"basic"===n.toLowerCase()&&(m.type="MeshBasicMaterial");"phong"===n.toLowerCase()&&(m.type="MeshPhongMaterial");"standard"===n.toLowerCase()&&(m.type="MeshStandardMaterial");break;case "mapDiffuse":m.map=h(n,e.mapDiffuseRepeat,e.mapDiffuseOffset,e.mapDiffuseWrap,e.mapDiffuseAnisotropy);break;case "mapDiffuseRepeat":case "mapDiffuseOffset":case "mapDiffuseWrap":case "mapDiffuseAnisotropy":break;case "mapEmissive":m.emissiveMap=h(n,e.mapEmissiveRepeat,
+e.mapEmissiveOffset,e.mapEmissiveWrap,e.mapEmissiveAnisotropy);break;case "mapEmissiveRepeat":case "mapEmissiveOffset":case "mapEmissiveWrap":case "mapEmissiveAnisotropy":break;case "mapLight":m.lightMap=h(n,e.mapLightRepeat,e.mapLightOffset,e.mapLightWrap,e.mapLightAnisotropy);break;case "mapLightRepeat":case "mapLightOffset":case "mapLightWrap":case "mapLightAnisotropy":break;case "mapAO":m.aoMap=h(n,e.mapAORepeat,e.mapAOOffset,e.mapAOWrap,e.mapAOAnisotropy);break;case "mapAORepeat":case "mapAOOffset":case "mapAOWrap":case "mapAOAnisotropy":break;
+case "mapBump":m.bumpMap=h(n,e.mapBumpRepeat,e.mapBumpOffset,e.mapBumpWrap,e.mapBumpAnisotropy);break;case "mapBumpScale":m.bumpScale=n;break;case "mapBumpRepeat":case "mapBumpOffset":case "mapBumpWrap":case "mapBumpAnisotropy":break;case "mapNormal":m.normalMap=h(n,e.mapNormalRepeat,e.mapNormalOffset,e.mapNormalWrap,e.mapNormalAnisotropy);break;case "mapNormalFactor":m.normalScale=n;break;case "mapNormalRepeat":case "mapNormalOffset":case "mapNormalWrap":case "mapNormalAnisotropy":break;case "mapSpecular":m.specularMap=
+h(n,e.mapSpecularRepeat,e.mapSpecularOffset,e.mapSpecularWrap,e.mapSpecularAnisotropy);break;case "mapSpecularRepeat":case "mapSpecularOffset":case "mapSpecularWrap":case "mapSpecularAnisotropy":break;case "mapMetalness":m.metalnessMap=h(n,e.mapMetalnessRepeat,e.mapMetalnessOffset,e.mapMetalnessWrap,e.mapMetalnessAnisotropy);break;case "mapMetalnessRepeat":case "mapMetalnessOffset":case "mapMetalnessWrap":case "mapMetalnessAnisotropy":break;case "mapRoughness":m.roughnessMap=h(n,e.mapRoughnessRepeat,
+e.mapRoughnessOffset,e.mapRoughnessWrap,e.mapRoughnessAnisotropy);break;case "mapRoughnessRepeat":case "mapRoughnessOffset":case "mapRoughnessWrap":case "mapRoughnessAnisotropy":break;case "mapAlpha":m.alphaMap=h(n,e.mapAlphaRepeat,e.mapAlphaOffset,e.mapAlphaWrap,e.mapAlphaAnisotropy);break;case "mapAlphaRepeat":case "mapAlphaOffset":case "mapAlphaWrap":case "mapAlphaAnisotropy":break;case "flipSided":m.side=1;break;case "doubleSided":m.side=2;break;case "transparency":console.warn("THREE.Loader.createMaterial: transparency has been renamed to opacity");
+m.opacity=n;break;case "depthTest":case "depthWrite":case "colorWrite":case "opacity":case "reflectivity":case "transparent":case "visible":case "wireframe":m[l]=n;break;case "vertexColors":!0===n&&(m.vertexColors=2);"face"===n&&(m.vertexColors=1);break;default:console.error("THREE.Loader.createMaterial: Unsupported",l,n)}}"MeshBasicMaterial"===m.type&&delete m.emissive;"MeshPhongMaterial"!==m.type&&delete m.specular;1>m.opacity&&(m.transparent=!0);d.setTextures(k);return d.parse(m)}}()});Object.assign(Qd.prototype,
+{crossOrigin:"anonymous",load:function(a,b,c,d){var e=this,f=void 0===this.path?Vd.extractUrlBase(a):this.path,g=new Fa(this.manager);g.setPath(this.path);g.setWithCredentials(this.withCredentials);g.load(a,function(c){c=JSON.parse(c);var d=c.metadata;if(void 0!==d&&(d=d.type,void 0!==d&&"object"===d.toLowerCase())){console.error("THREE.JSONLoader: "+a+" should be loaded with THREE.ObjectLoader instead.");return}c=e.parse(c,f);b(c.geometry,c.materials)},c,d)},setPath:function(a){this.path=a;return this},
+setResourcePath:function(a){this.resourcePath=a;return this},setCrossOrigin:function(a){this.crossOrigin=a;return this},parse:function(){return function(a,b){void 0!==a.data&&(a=a.data);a.scale=void 0!==a.scale?1/a.scale:1;var c=new I,d=a,e,f,g,h=d.faces;var k=d.vertices;var m=d.normals,l=d.colors;var n=d.scale;var r=0;if(void 0!==d.uvs){for(e=0;e<d.uvs.length;e++)d.uvs[e].length&&r++;for(e=0;e<r;e++)c.faceVertexUvs[e]=[]}var x=0;for(g=k.length;x<g;)e=new p,e.x=k[x++]*n,e.y=k[x++]*n,e.z=k[x++]*n,
+c.vertices.push(e);x=0;for(g=h.length;x<g;){k=h[x++];var t=k&1;var u=k&2;e=k&8;var w=k&16;var A=k&32;n=k&64;k&=128;if(t){t=new Xa;t.a=h[x];t.b=h[x+1];t.c=h[x+3];var v=new Xa;v.a=h[x+1];v.b=h[x+2];v.c=h[x+3];x+=4;u&&(u=h[x++],t.materialIndex=u,v.materialIndex=u);u=c.faces.length;if(e)for(e=0;e<r;e++){var C=d.uvs[e];c.faceVertexUvs[e][u]=[];c.faceVertexUvs[e][u+1]=[];for(f=0;4>f;f++){var y=h[x++];var D=C[2*y];y=C[2*y+1];D=new z(D,y);2!==f&&c.faceVertexUvs[e][u].push(D);0!==f&&c.faceVertexUvs[e][u+1].push(D)}}w&&
+(w=3*h[x++],t.normal.set(m[w++],m[w++],m[w]),v.normal.copy(t.normal));if(A)for(e=0;4>e;e++)w=3*h[x++],A=new p(m[w++],m[w++],m[w]),2!==e&&t.vertexNormals.push(A),0!==e&&v.vertexNormals.push(A);n&&(n=h[x++],n=l[n],t.color.setHex(n),v.color.setHex(n));if(k)for(e=0;4>e;e++)n=h[x++],n=l[n],2!==e&&t.vertexColors.push(new G(n)),0!==e&&v.vertexColors.push(new G(n));c.faces.push(t);c.faces.push(v)}else{t=new Xa;t.a=h[x++];t.b=h[x++];t.c=h[x++];u&&(u=h[x++],t.materialIndex=u);u=c.faces.length;if(e)for(e=0;e<
+r;e++)for(C=d.uvs[e],c.faceVertexUvs[e][u]=[],f=0;3>f;f++)y=h[x++],D=C[2*y],y=C[2*y+1],D=new z(D,y),c.faceVertexUvs[e][u].push(D);w&&(w=3*h[x++],t.normal.set(m[w++],m[w++],m[w]));if(A)for(e=0;3>e;e++)w=3*h[x++],A=new p(m[w++],m[w++],m[w]),t.vertexNormals.push(A);n&&(n=h[x++],t.color.setHex(l[n]));if(k)for(e=0;3>e;e++)n=h[x++],t.vertexColors.push(new G(l[n]));c.faces.push(t)}}d=a;x=void 0!==d.influencesPerVertex?d.influencesPerVertex:2;if(d.skinWeights)for(g=0,h=d.skinWeights.length;g<h;g+=x)c.skinWeights.push(new Z(d.skinWeights[g],
+1<x?d.skinWeights[g+1]:0,2<x?d.skinWeights[g+2]:0,3<x?d.skinWeights[g+3]:0));if(d.skinIndices)for(g=0,h=d.skinIndices.length;g<h;g+=x)c.skinIndices.push(new Z(d.skinIndices[g],1<x?d.skinIndices[g+1]:0,2<x?d.skinIndices[g+2]:0,3<x?d.skinIndices[g+3]:0));c.bones=d.bones;c.bones&&0<c.bones.length&&(c.skinWeights.length!==c.skinIndices.length||c.skinIndices.length!==c.vertices.length)&&console.warn("When skinning, number of vertices ("+c.vertices.length+"), skinIndices ("+c.skinIndices.length+"), and skinWeights ("+
+c.skinWeights.length+") should match.");g=a;h=g.scale;if(void 0!==g.morphTargets)for(d=0,x=g.morphTargets.length;d<x;d++)for(c.morphTargets[d]={},c.morphTargets[d].name=g.morphTargets[d].name,c.morphTargets[d].vertices=[],m=c.morphTargets[d].vertices,l=g.morphTargets[d].vertices,r=0,k=l.length;r<k;r+=3)n=new p,n.x=l[r]*h,n.y=l[r+1]*h,n.z=l[r+2]*h,m.push(n);if(void 0!==g.morphColors&&0<g.morphColors.length)for(console.warn('THREE.JSONLoader: "morphColors" no longer supported. Using them as face colors.'),
+h=c.faces,g=g.morphColors[0].colors,d=0,x=h.length;d<x;d++)h[d].color.fromArray(g,3*d);g=a;d=[];x=[];void 0!==g.animation&&x.push(g.animation);void 0!==g.animations&&(g.animations.length?x=x.concat(g.animations):x.push(g.animations));for(g=0;g<x.length;g++)(h=za.parseAnimation(x[g],c.bones))&&d.push(h);c.morphTargets&&(x=za.CreateClipsFromMorphTargetSequences(c.morphTargets,10),d=d.concat(x));0<d.length&&(c.animations=d);c.computeFaceNormals();c.computeBoundingSphere();if(void 0===a.materials||0===
+a.materials.length)return{geometry:c};a=kc.prototype.initMaterials(a.materials,this.resourcePath||b,this.crossOrigin);return{geometry:c,materials:a}}}()});Object.assign(le.prototype,{crossOrigin:"anonymous",load:function(a,b,c,d){var e=this,f=void 0===this.path?Vd.extractUrlBase(a):this.path;this.resourcePath=this.resourcePath||f;f=new Fa(e.manager);f.setPath(this.path);f.load(a,function(c){var f=null;try{f=JSON.parse(c)}catch(k){void 0!==d&&d(k);console.error("THREE:ObjectLoader: Can't parse "+a+
+".",k.message);return}c=f.metadata;void 0===c||void 0===c.type||"geometry"===c.type.toLowerCase()?console.error("THREE.ObjectLoader: Can't load "+a+". Use THREE.JSONLoader instead."):e.parse(f,b)},c,d)},setPath:function(a){this.path=a;return this},setResourcePath:function(a){this.resourcePath=a;return this},setCrossOrigin:function(a){this.crossOrigin=a;return this},parse:function(a,b){var c=this.parseShape(a.shapes);c=this.parseGeometries(a.geometries,c);var d=this.parseImages(a.images,function(){void 0!==
+b&&b(e)});d=this.parseTextures(a.textures,d);d=this.parseMaterials(a.materials,d);var e=this.parseObject(a.object,c,d);a.animations&&(e.animations=this.parseAnimations(a.animations));void 0!==a.images&&0!==a.images.length||void 0===b||b(e);return e},parseShape:function(a){var b={};if(void 0!==a)for(var c=0,d=a.length;c<d;c++){var e=(new ib).fromJSON(a[c]);b[e.uuid]=e}return b},parseGeometries:function(a,b){var c={};if(void 0!==a)for(var d=new Qd,e=new ke,f=0,g=a.length;f<g;f++){var h=a[f];switch(h.type){case "PlaneGeometry":case "PlaneBufferGeometry":var k=
+new Ba[h.type](h.width,h.height,h.widthSegments,h.heightSegments);break;case "BoxGeometry":case "BoxBufferGeometry":case "CubeGeometry":k=new Ba[h.type](h.width,h.height,h.depth,h.widthSegments,h.heightSegments,h.depthSegments);break;case "CircleGeometry":case "CircleBufferGeometry":k=new Ba[h.type](h.radius,h.segments,h.thetaStart,h.thetaLength);break;case "CylinderGeometry":case "CylinderBufferGeometry":k=new Ba[h.type](h.radiusTop,h.radiusBottom,h.height,h.radialSegments,h.heightSegments,h.openEnded,
+h.thetaStart,h.thetaLength);break;case "ConeGeometry":case "ConeBufferGeometry":k=new Ba[h.type](h.radius,h.height,h.radialSegments,h.heightSegments,h.openEnded,h.thetaStart,h.thetaLength);break;case "SphereGeometry":case "SphereBufferGeometry":k=new Ba[h.type](h.radius,h.widthSegments,h.heightSegments,h.phiStart,h.phiLength,h.thetaStart,h.thetaLength);break;case "DodecahedronGeometry":case "DodecahedronBufferGeometry":case "IcosahedronGeometry":case "IcosahedronBufferGeometry":case "OctahedronGeometry":case "OctahedronBufferGeometry":case "TetrahedronGeometry":case "TetrahedronBufferGeometry":k=
+new Ba[h.type](h.radius,h.detail);break;case "RingGeometry":case "RingBufferGeometry":k=new Ba[h.type](h.innerRadius,h.outerRadius,h.thetaSegments,h.phiSegments,h.thetaStart,h.thetaLength);break;case "TorusGeometry":case "TorusBufferGeometry":k=new Ba[h.type](h.radius,h.tube,h.radialSegments,h.tubularSegments,h.arc);break;case "TorusKnotGeometry":case "TorusKnotBufferGeometry":k=new Ba[h.type](h.radius,h.tube,h.tubularSegments,h.radialSegments,h.p,h.q);break;case "LatheGeometry":case "LatheBufferGeometry":k=
+new Ba[h.type](h.points,h.segments,h.phiStart,h.phiLength);break;case "PolyhedronGeometry":case "PolyhedronBufferGeometry":k=new Ba[h.type](h.vertices,h.indices,h.radius,h.details);break;case "ShapeGeometry":case "ShapeBufferGeometry":k=[];for(var m=0,l=h.shapes.length;m<l;m++){var n=b[h.shapes[m]];k.push(n)}k=new Ba[h.type](k,h.curveSegments);break;case "ExtrudeGeometry":case "ExtrudeBufferGeometry":k=[];m=0;for(l=h.shapes.length;m<l;m++)n=b[h.shapes[m]],k.push(n);m=h.options.extrudePath;void 0!==
+m&&(h.options.extrudePath=(new Gf[m.type]).fromJSON(m));k=new Ba[h.type](k,h.options);break;case "BufferGeometry":k=e.parse(h);break;case "Geometry":k=d.parse(h,this.resourcePath).geometry;break;default:console.warn('THREE.ObjectLoader: Unsupported geometry type "'+h.type+'"');continue}k.uuid=h.uuid;void 0!==h.name&&(k.name=h.name);!0===k.isBufferGeometry&&void 0!==h.userData&&(k.userData=h.userData);c[h.uuid]=k}return c},parseMaterials:function(a,b){var c={},d={};if(void 0!==a){var e=new Pd;e.setTextures(b);
+b=0;for(var f=a.length;b<f;b++){var g=a[b];if("MultiMaterial"===g.type){for(var h=[],k=0;k<g.materials.length;k++){var m=g.materials[k];void 0===c[m.uuid]&&(c[m.uuid]=e.parse(m));h.push(c[m.uuid])}d[g.uuid]=h}else d[g.uuid]=e.parse(g),c[g.uuid]=d[g.uuid]}}return d},parseAnimations:function(a){for(var b=[],c=0;c<a.length;c++){var d=a[c],e=za.parse(d);void 0!==d.uuid&&(e.uuid=d.uuid);b.push(e)}return b},parseImages:function(a,b){function c(a){d.manager.itemStart(a);return f.load(a,function(){d.manager.itemEnd(a)},
+void 0,function(){d.manager.itemError(a);d.manager.itemEnd(a)})}var d=this,e={};if(void 0!==a&&0<a.length){b=new ge(b);var f=new ed(b);f.setCrossOrigin(this.crossOrigin);b=0;for(var g=a.length;b<g;b++){var h=a[b],k=h.url;if(Array.isArray(k)){e[h.uuid]=[];for(var m=0,l=k.length;m<l;m++){var n=k[m];n=/^(\/\/)|([a-z]+:(\/\/)?)/i.test(n)?n:d.resourcePath+n;e[h.uuid].push(c(n))}}else n=/^(\/\/)|([a-z]+:(\/\/)?)/i.test(h.url)?h.url:d.resourcePath+h.url,e[h.uuid]=c(n)}}return e},parseTextures:function(a,
+b){function c(a,b){if("number"===typeof a)return a;console.warn("THREE.ObjectLoader.parseTexture: Constant should be in numeric form.",a);return b[a]}var d={};if(void 0!==a)for(var e=0,f=a.length;e<f;e++){var g=a[e];void 0===g.image&&console.warn('THREE.ObjectLoader: No "image" specified for',g.uuid);void 0===b[g.image]&&console.warn("THREE.ObjectLoader: Undefined image",g.image);var h=Array.isArray(b[g.image])?new Ya(b[g.image]):new W(b[g.image]);h.needsUpdate=!0;h.uuid=g.uuid;void 0!==g.name&&(h.name=
+g.name);void 0!==g.mapping&&(h.mapping=c(g.mapping,ch));void 0!==g.offset&&h.offset.fromArray(g.offset);void 0!==g.repeat&&h.repeat.fromArray(g.repeat);void 0!==g.center&&h.center.fromArray(g.center);void 0!==g.rotation&&(h.rotation=g.rotation);void 0!==g.wrap&&(h.wrapS=c(g.wrap[0],If),h.wrapT=c(g.wrap[1],If));void 0!==g.format&&(h.format=g.format);void 0!==g.minFilter&&(h.minFilter=c(g.minFilter,Jf));void 0!==g.magFilter&&(h.magFilter=c(g.magFilter,Jf));void 0!==g.anisotropy&&(h.anisotropy=g.anisotropy);
+void 0!==g.flipY&&(h.flipY=g.flipY);d[g.uuid]=h}return d},parseObject:function(a,b,c){function d(a){void 0===b[a]&&console.warn("THREE.ObjectLoader: Undefined geometry",a);return b[a]}function e(a){if(void 0!==a){if(Array.isArray(a)){for(var b=[],d=0,e=a.length;d<e;d++){var f=a[d];void 0===c[f]&&console.warn("THREE.ObjectLoader: Undefined material",f);b.push(c[f])}return b}void 0===c[a]&&console.warn("THREE.ObjectLoader: Undefined material",a);return c[a]}}switch(a.type){case "Scene":var f=new vd;
+void 0!==a.background&&Number.isInteger(a.background)&&(f.background=new G(a.background));void 0!==a.fog&&("Fog"===a.fog.type?f.fog=new Qb(a.fog.color,a.fog.near,a.fog.far):"FogExp2"===a.fog.type&&(f.fog=new Pb(a.fog.color,a.fog.density)));break;case "PerspectiveCamera":f=new V(a.fov,a.aspect,a.near,a.far);void 0!==a.focus&&(f.focus=a.focus);void 0!==a.zoom&&(f.zoom=a.zoom);void 0!==a.filmGauge&&(f.filmGauge=a.filmGauge);void 0!==a.filmOffset&&(f.filmOffset=a.filmOffset);void 0!==a.view&&(f.view=
+Object.assign({},a.view));break;case "OrthographicCamera":f=new hd(a.left,a.right,a.top,a.bottom,a.near,a.far);void 0!==a.zoom&&(f.zoom=a.zoom);void 0!==a.view&&(f.view=Object.assign({},a.view));break;case "AmbientLight":f=new Nd(a.color,a.intensity);break;case "DirectionalLight":f=new Md(a.color,a.intensity);break;case "PointLight":f=new Kd(a.color,a.intensity,a.distance,a.decay);break;case "RectAreaLight":f=new Od(a.color,a.intensity,a.width,a.height);break;case "SpotLight":f=new Jd(a.color,a.intensity,
+a.distance,a.angle,a.penumbra,a.decay);break;case "HemisphereLight":f=new Hd(a.color,a.groundColor,a.intensity);break;case "SkinnedMesh":console.warn("THREE.ObjectLoader.parseObject() does not support SkinnedMesh yet.");case "Mesh":f=d(a.geometry);var g=e(a.material);f=f.bones&&0<f.bones.length?new xd(f,g):new pa(f,g);break;case "LOD":f=new Fc;break;case "Line":f=new ma(d(a.geometry),e(a.material),a.mode);break;case "LineLoop":f=new yd(d(a.geometry),e(a.material));break;case "LineSegments":f=new S(d(a.geometry),
+e(a.material));break;case "PointCloud":case "Points":f=new Sb(d(a.geometry),e(a.material));break;case "Sprite":f=new Ec(e(a.material));break;case "Group":f=new Ob;break;default:f=new D}f.uuid=a.uuid;void 0!==a.name&&(f.name=a.name);void 0!==a.matrix?(f.matrix.fromArray(a.matrix),void 0!==a.matrixAutoUpdate&&(f.matrixAutoUpdate=a.matrixAutoUpdate),f.matrixAutoUpdate&&f.matrix.decompose(f.position,f.quaternion,f.scale)):(void 0!==a.position&&f.position.fromArray(a.position),void 0!==a.rotation&&f.rotation.fromArray(a.rotation),
+void 0!==a.quaternion&&f.quaternion.fromArray(a.quaternion),void 0!==a.scale&&f.scale.fromArray(a.scale));void 0!==a.castShadow&&(f.castShadow=a.castShadow);void 0!==a.receiveShadow&&(f.receiveShadow=a.receiveShadow);a.shadow&&(void 0!==a.shadow.bias&&(f.shadow.bias=a.shadow.bias),void 0!==a.shadow.radius&&(f.shadow.radius=a.shadow.radius),void 0!==a.shadow.mapSize&&f.shadow.mapSize.fromArray(a.shadow.mapSize),void 0!==a.shadow.camera&&(f.shadow.camera=this.parseObject(a.shadow.camera)));void 0!==
+a.visible&&(f.visible=a.visible);void 0!==a.frustumCulled&&(f.frustumCulled=a.frustumCulled);void 0!==a.renderOrder&&(f.renderOrder=a.renderOrder);void 0!==a.userData&&(f.userData=a.userData);void 0!==a.layers&&(f.layers.mask=a.layers);if(void 0!==a.children){g=a.children;for(var h=0;h<g.length;h++)f.add(this.parseObject(g[h],b,c))}if("LOD"===a.type)for(a=a.levels,g=0;g<a.length;g++){h=a[g];var k=f.getObjectByProperty("uuid",h.object);void 0!==k&&f.addLevel(k,h.distance)}return f}});var ch={UVMapping:300,
+CubeReflectionMapping:301,CubeRefractionMapping:302,EquirectangularReflectionMapping:303,EquirectangularRefractionMapping:304,SphericalReflectionMapping:305,CubeUVReflectionMapping:306,CubeUVRefractionMapping:307},If={RepeatWrapping:1E3,ClampToEdgeWrapping:1001,MirroredRepeatWrapping:1002},Jf={NearestFilter:1003,NearestMipMapNearestFilter:1004,NearestMipMapLinearFilter:1005,LinearFilter:1006,LinearMipMapNearestFilter:1007,LinearMipMapLinearFilter:1008};me.prototype={constructor:me,setOptions:function(a){this.options=
+a;return this},load:function(a,b,c,d){void 0===a&&(a="");void 0!==this.path&&(a=this.path+a);a=this.manager.resolveURL(a);var e=this,f=Ib.get(a);if(void 0!==f)return e.manager.itemStart(a),setTimeout(function(){b&&b(f);e.manager.itemEnd(a)},0),f;fetch(a).then(function(a){return a.blob()}).then(function(a){return createImageBitmap(a,e.options)}).then(function(c){Ib.add(a,c);b&&b(c);e.manager.itemEnd(a)}).catch(function(b){d&&d(b);e.manager.itemError(a);e.manager.itemEnd(a)})},setCrossOrigin:function(){return this},
+setPath:function(a){this.path=a;return this}};Object.assign(ne.prototype,{moveTo:function(a,b){this.currentPath=new Na;this.subPaths.push(this.currentPath);this.currentPath.moveTo(a,b)},lineTo:function(a,b){this.currentPath.lineTo(a,b)},quadraticCurveTo:function(a,b,c,d){this.currentPath.quadraticCurveTo(a,b,c,d)},bezierCurveTo:function(a,b,c,d,e,f){this.currentPath.bezierCurveTo(a,b,c,d,e,f)},splineThru:function(a){this.currentPath.splineThru(a)},toShapes:function(a,b){function c(a){for(var b=[],
+c=0,d=a.length;c<d;c++){var e=a[c],f=new ib;f.curves=e.curves;b.push(f)}return b}function d(a,b){for(var c=b.length,d=!1,e=c-1,f=0;f<c;e=f++){var g=b[e],h=b[f],k=h.x-g.x,m=h.y-g.y;if(Math.abs(m)>Number.EPSILON){if(0>m&&(g=b[f],k=-k,h=b[e],m=-m),!(a.y<g.y||a.y>h.y))if(a.y===g.y){if(a.x===g.x)return!0}else{e=m*(a.x-g.x)-k*(a.y-g.y);if(0===e)return!0;0>e||(d=!d)}}else if(a.y===g.y&&(h.x<=a.x&&a.x<=g.x||g.x<=a.x&&a.x<=h.x))return!0}return d}var e=Za.isClockWise,f=this.subPaths;if(0===f.length)return[];
+if(!0===b)return c(f);b=[];if(1===f.length){var g=f[0];var h=new ib;h.curves=g.curves;b.push(h);return b}var k=!e(f[0].getPoints());k=a?!k:k;h=[];var m=[],l=[],n=0;m[n]=void 0;l[n]=[];for(var p=0,x=f.length;p<x;p++){g=f[p];var t=g.getPoints();var u=e(t);(u=a?!u:u)?(!k&&m[n]&&n++,m[n]={s:new ib,p:t},m[n].s.curves=g.curves,k&&n++,l[n]=[]):l[n].push({h:g,p:t[0]})}if(!m[0])return c(f);if(1<m.length){p=!1;a=[];e=0;for(f=m.length;e<f;e++)h[e]=[];e=0;for(f=m.length;e<f;e++)for(g=l[e],u=0;u<g.length;u++){k=
+g[u];n=!0;for(t=0;t<m.length;t++)d(k.p,m[t].p)&&(e!==t&&a.push({froms:e,tos:t,hole:u}),n?(n=!1,h[t].push(k)):p=!0);n&&h[e].push(k)}0<a.length&&(p||(l=h))}p=0;for(e=m.length;p<e;p++)for(h=m[p].s,b.push(h),a=l[p],f=0,g=a.length;f<g;f++)h.holes.push(a[f].h);return b}});Object.assign(oe.prototype,{isFont:!0,generateShapes:function(a,b){void 0===b&&(b=100);var c=[],d=b;b=this.data;var e=Array.from?Array.from(a):String(a).split("");d/=b.resolution;var f=(b.boundingBox.yMax-b.boundingBox.yMin+b.underlineThickness)*
+d;a=[];for(var g=0,h=0,k=0;k<e.length;k++){var m=e[k];if("\n"===m)g=0,h-=f;else{var l=d;var n=g,p=h;if(m=b.glyphs[m]||b.glyphs["?"]){var x=new ne;if(m.o)for(var t=m._cachedOutline||(m._cachedOutline=m.o.split(" ")),u=0,w=t.length;u<w;)switch(t[u++]){case "m":var A=t[u++]*l+n;var v=t[u++]*l+p;x.moveTo(A,v);break;case "l":A=t[u++]*l+n;v=t[u++]*l+p;x.lineTo(A,v);break;case "q":var z=t[u++]*l+n;var y=t[u++]*l+p;var C=t[u++]*l+n;var D=t[u++]*l+p;x.quadraticCurveTo(C,D,z,y);break;case "b":z=t[u++]*l+n,
+y=t[u++]*l+p,C=t[u++]*l+n,D=t[u++]*l+p,A=t[u++]*l+n,v=t[u++]*l+p,x.bezierCurveTo(C,D,A,v,z,y)}l={offsetX:m.ha*l,path:x}}else l=void 0;g+=l.offsetX;a.push(l.path)}}b=0;for(e=a.length;b<e;b++)Array.prototype.push.apply(c,a[b].toShapes());return c}});Object.assign(uf.prototype,{load:function(a,b,c,d){var e=this,f=new Fa(this.manager);f.setPath(this.path);f.load(a,function(a){try{var c=JSON.parse(a)}catch(k){console.warn("THREE.FontLoader: typeface.js support is being deprecated. Use typeface.json instead."),
+c=JSON.parse(a.substring(65,a.length-2))}a=e.parse(c);b&&b(a)},c,d)},parse:function(a){return new oe(a)},setPath:function(a){this.path=a;return this}});var Wd,se={getContext:function(){void 0===Wd&&(Wd=new (window.AudioContext||window.webkitAudioContext));return Wd},setContext:function(a){Wd=a}};Object.assign(pe.prototype,{load:function(a,b,c,d){var e=new Fa(this.manager);e.setResponseType("arraybuffer");e.setPath(this.path);e.load(a,function(a){a=a.slice(0);se.getContext().decodeAudioData(a,function(a){b(a)})},
+c,d)},setPath:function(a){this.path=a;return this}});Object.assign(vf.prototype,{update:function(){var a,b,c,d,e,f,g,h,k=new P,m=new P;return function(l){if(a!==this||b!==l.focus||c!==l.fov||d!==l.aspect*this.aspect||e!==l.near||f!==l.far||g!==l.zoom||h!==this.eyeSep){a=this;b=l.focus;c=l.fov;d=l.aspect*this.aspect;e=l.near;f=l.far;g=l.zoom;var n=l.projectionMatrix.clone();h=this.eyeSep/2;var p=h*e/b,q=e*Math.tan(R.DEG2RAD*c*.5)/g;m.elements[12]=-h;k.elements[12]=h;var t=-q*d+p;var u=q*d+p;n.elements[0]=
+2*e/(u-t);n.elements[8]=(u+t)/(u-t);this.cameraL.projectionMatrix.copy(n);t=-q*d-p;u=q*d-p;n.elements[0]=2*e/(u-t);n.elements[8]=(u+t)/(u-t);this.cameraR.projectionMatrix.copy(n)}this.cameraL.matrixWorld.copy(l.matrixWorld).multiply(m);this.cameraR.matrixWorld.copy(l.matrixWorld).multiply(k)}}()});id.prototype=Object.create(D.prototype);id.prototype.constructor=id;Object.assign(qe.prototype,{start:function(){this.oldTime=this.startTime=("undefined"===typeof performance?Date:performance).now();this.elapsedTime=
+0;this.running=!0},stop:function(){this.getElapsedTime();this.autoStart=this.running=!1},getElapsedTime:function(){this.getDelta();return this.elapsedTime},getDelta:function(){var a=0;if(this.autoStart&&!this.running)return this.start(),0;if(this.running){var b=("undefined"===typeof performance?Date:performance).now();a=(b-this.oldTime)/1E3;this.oldTime=b;this.elapsedTime+=a}return a}});re.prototype=Object.assign(Object.create(D.prototype),{constructor:re,getInput:function(){return this.gain},removeFilter:function(){null!==
+this.filter&&(this.gain.disconnect(this.filter),this.filter.disconnect(this.context.destination),this.gain.connect(this.context.destination),this.filter=null);return this},getFilter:function(){return this.filter},setFilter:function(a){null!==this.filter?(this.gain.disconnect(this.filter),this.filter.disconnect(this.context.destination)):this.gain.disconnect(this.context.destination);this.filter=a;this.gain.connect(this.filter);this.filter.connect(this.context.destination);return this},getMasterVolume:function(){return this.gain.gain.value},
+setMasterVolume:function(a){this.gain.gain.setTargetAtTime(a,this.context.currentTime,.01);return this},updateMatrixWorld:function(){var a=new p,b=new ja,c=new p,d=new p,e=new qe;return function(f){D.prototype.updateMatrixWorld.call(this,f);f=this.context.listener;var g=this.up;this.timeDelta=e.getDelta();this.matrixWorld.decompose(a,b,c);d.set(0,0,-1).applyQuaternion(b);if(f.positionX){var h=this.context.currentTime+this.timeDelta;f.positionX.linearRampToValueAtTime(a.x,h);f.positionY.linearRampToValueAtTime(a.y,
+h);f.positionZ.linearRampToValueAtTime(a.z,h);f.forwardX.linearRampToValueAtTime(d.x,h);f.forwardY.linearRampToValueAtTime(d.y,h);f.forwardZ.linearRampToValueAtTime(d.z,h);f.upX.linearRampToValueAtTime(g.x,h);f.upY.linearRampToValueAtTime(g.y,h);f.upZ.linearRampToValueAtTime(g.z,h)}else f.setPosition(a.x,a.y,a.z),f.setOrientation(d.x,d.y,d.z,g.x,g.y,g.z)}}()});lc.prototype=Object.assign(Object.create(D.prototype),{constructor:lc,getOutput:function(){return this.gain},setNodeSource:function(a){this.hasPlaybackControl=
+!1;this.sourceType="audioNode";this.source=a;this.connect();return this},setMediaElementSource:function(a){this.hasPlaybackControl=!1;this.sourceType="mediaNode";this.source=this.context.createMediaElementSource(a);this.connect();return this},setBuffer:function(a){this.buffer=a;this.sourceType="buffer";this.autoplay&&this.play();return this},play:function(){if(!0===this.isPlaying)console.warn("THREE.Audio: Audio is already playing.");else if(!1===this.hasPlaybackControl)console.warn("THREE.Audio: this Audio has no playback control.");
+else{var a=this.context.createBufferSource();a.buffer=this.buffer;a.loop=this.loop;a.onended=this.onEnded.bind(this);a.playbackRate.setValueAtTime(this.playbackRate,this.startTime);this.startTime=this.context.currentTime;a.start(this.startTime,this.offset);this.isPlaying=!0;this.source=a;return this.connect()}},pause:function(){if(!1===this.hasPlaybackControl)console.warn("THREE.Audio: this Audio has no playback control.");else return!0===this.isPlaying&&(this.source.stop(),this.source.onended=null,
+this.offset+=(this.context.currentTime-this.startTime)*this.playbackRate,this.isPlaying=!1),this},stop:function(){if(!1===this.hasPlaybackControl)console.warn("THREE.Audio: this Audio has no playback control.");else return this.source.stop(),this.source.onended=null,this.offset=0,this.isPlaying=!1,this},connect:function(){if(0<this.filters.length){this.source.connect(this.filters[0]);for(var a=1,b=this.filters.length;a<b;a++)this.filters[a-1].connect(this.filters[a]);this.filters[this.filters.length-
+1].connect(this.getOutput())}else this.source.connect(this.getOutput());return this},disconnect:function(){if(0<this.filters.length){this.source.disconnect(this.filters[0]);for(var a=1,b=this.filters.length;a<b;a++)this.filters[a-1].disconnect(this.filters[a]);this.filters[this.filters.length-1].disconnect(this.getOutput())}else this.source.disconnect(this.getOutput());return this},getFilters:function(){return this.filters},setFilters:function(a){a||(a=[]);!0===this.isPlaying?(this.disconnect(),this.filters=
+a,this.connect()):this.filters=a;return this},getFilter:function(){return this.getFilters()[0]},setFilter:function(a){return this.setFilters(a?[a]:[])},setPlaybackRate:function(a){if(!1===this.hasPlaybackControl)console.warn("THREE.Audio: this Audio has no playback control.");else return this.playbackRate=a,!0===this.isPlaying&&this.source.playbackRate.setValueAtTime(this.playbackRate,this.context.currentTime),this},getPlaybackRate:function(){return this.playbackRate},onEnded:function(){this.isPlaying=
+!1},getLoop:function(){return!1===this.hasPlaybackControl?(console.warn("THREE.Audio: this Audio has no playback control."),!1):this.loop},setLoop:function(a){if(!1===this.hasPlaybackControl)console.warn("THREE.Audio: this Audio has no playback control.");else return this.loop=a,!0===this.isPlaying&&(this.source.loop=this.loop),this},getVolume:function(){return this.gain.gain.value},setVolume:function(a){this.gain.gain.setTargetAtTime(a,this.context.currentTime,.01);return this}});te.prototype=Object.assign(Object.create(lc.prototype),
+{constructor:te,getOutput:function(){return this.panner},getRefDistance:function(){return this.panner.refDistance},setRefDistance:function(a){this.panner.refDistance=a;return this},getRolloffFactor:function(){return this.panner.rolloffFactor},setRolloffFactor:function(a){this.panner.rolloffFactor=a;return this},getDistanceModel:function(){return this.panner.distanceModel},setDistanceModel:function(a){this.panner.distanceModel=a;return this},getMaxDistance:function(){return this.panner.maxDistance},
+setMaxDistance:function(a){this.panner.maxDistance=a;return this},setDirectionalCone:function(a,b,c){this.panner.coneInnerAngle=a;this.panner.coneOuterAngle=b;this.panner.coneOuterGain=c;return this},updateMatrixWorld:function(){var a=new p,b=new ja,c=new p,d=new p;return function(e){D.prototype.updateMatrixWorld.call(this,e);e=this.panner;this.matrixWorld.decompose(a,b,c);d.set(0,0,1).applyQuaternion(b);if(e.positionX){var f=this.context.currentTime+this.listener.timeDelta;e.positionX.linearRampToValueAtTime(a.x,
+f);e.positionY.linearRampToValueAtTime(a.y,f);e.positionZ.linearRampToValueAtTime(a.z,f);e.orientationX.linearRampToValueAtTime(d.x,f);e.orientationY.linearRampToValueAtTime(d.y,f);e.orientationZ.linearRampToValueAtTime(d.z,f)}else e.setPosition(a.x,a.y,a.z),e.setOrientation(d.x,d.y,d.z)}}()});Object.assign(ue.prototype,{getFrequencyData:function(){this.analyser.getByteFrequencyData(this.data);return this.data},getAverageFrequency:function(){for(var a=0,b=this.getFrequencyData(),c=0;c<b.length;c++)a+=
+b[c];return a/b.length}});Object.assign(ve.prototype,{accumulate:function(a,b){var c=this.buffer,d=this.valueSize;a=a*d+d;var e=this.cumulativeWeight;if(0===e){for(e=0;e!==d;++e)c[a+e]=c[e];e=b}else e+=b,this._mixBufferRegion(c,a,0,b/e,d);this.cumulativeWeight=e},apply:function(a){var b=this.valueSize,c=this.buffer;a=a*b+b;var d=this.cumulativeWeight,e=this.binding;this.cumulativeWeight=0;1>d&&this._mixBufferRegion(c,a,3*b,1-d,b);d=b;for(var f=b+b;d!==f;++d)if(c[d]!==c[d+b]){e.setValue(c,a);break}},
+saveOriginalState:function(){var a=this.buffer,b=this.valueSize,c=3*b;this.binding.getValue(a,c);for(var d=b;d!==c;++d)a[d]=a[c+d%b];this.cumulativeWeight=0},restoreOriginalState:function(){this.binding.setValue(this.buffer,3*this.valueSize)},_select:function(a,b,c,d,e){if(.5<=d)for(d=0;d!==e;++d)a[b+d]=a[c+d]},_slerp:function(a,b,c,d){ja.slerpFlat(a,b,a,b,a,c,d)},_lerp:function(a,b,c,d,e){for(var f=1-d,g=0;g!==e;++g){var h=b+g;a[h]=a[h]*f+a[c+g]*d}}});Object.assign(wf.prototype,{getValue:function(a,
+b){this.bind();var c=this._bindings[this._targetGroup.nCachedObjects_];void 0!==c&&c.getValue(a,b)},setValue:function(a,b){for(var c=this._bindings,d=this._targetGroup.nCachedObjects_,e=c.length;d!==e;++d)c[d].setValue(a,b)},bind:function(){for(var a=this._bindings,b=this._targetGroup.nCachedObjects_,c=a.length;b!==c;++b)a[b].bind()},unbind:function(){for(var a=this._bindings,b=this._targetGroup.nCachedObjects_,c=a.length;b!==c;++b)a[b].unbind()}});Object.assign(oa,{Composite:wf,create:function(a,
+b,c){return a&&a.isAnimationObjectGroup?new oa.Composite(a,b,c):new oa(a,b,c)},sanitizeNodeName:function(){var a=/[\[\]\.:\/]/g;return function(b){return b.replace(/\s/g,"_").replace(a,"")}}(),parseTrackName:function(){var a="[^"+"\\[\\]\\.:\\/".replace("\\.","")+"]",b=/((?:WC+[\/:])*)/.source.replace("WC","[^\\[\\]\\.:\\/]");a=/(WCOD+)?/.source.replace("WCOD",a);var c=/(?:\.(WC+)(?:\[(.+)\])?)?/.source.replace("WC","[^\\[\\]\\.:\\/]"),d=/\.(WC+)(?:\[(.+)\])?/.source.replace("WC","[^\\[\\]\\.:\\/]"),
+e=new RegExp("^"+b+a+c+d+"$"),f=["material","materials","bones"];return function(a){var b=e.exec(a);if(!b)throw Error("PropertyBinding: Cannot parse trackName: "+a);b={nodeName:b[2],objectName:b[3],objectIndex:b[4],propertyName:b[5],propertyIndex:b[6]};var c=b.nodeName&&b.nodeName.lastIndexOf(".");if(void 0!==c&&-1!==c){var d=b.nodeName.substring(c+1);-1!==f.indexOf(d)&&(b.nodeName=b.nodeName.substring(0,c),b.objectName=d)}if(null===b.propertyName||0===b.propertyName.length)throw Error("PropertyBinding: can not parse propertyName from trackName: "+
+a);return b}}(),findNode:function(a,b){if(!b||""===b||"root"===b||"."===b||-1===b||b===a.name||b===a.uuid)return a;if(a.skeleton){var c=a.skeleton.getBoneByName(b);if(void 0!==c)return c}if(a.children){var d=function(a){for(var c=0;c<a.length;c++){var e=a[c];if(e.name===b||e.uuid===b||(e=d(e.children)))return e}return null};if(a=d(a.children))return a}return null}});Object.assign(oa.prototype,{_getValue_unavailable:function(){},_setValue_unavailable:function(){},BindingType:{Direct:0,EntireArray:1,
+ArrayElement:2,HasFromToArray:3},Versioning:{None:0,NeedsUpdate:1,MatrixWorldNeedsUpdate:2},GetterByBindingType:[function(a,b){a[b]=this.node[this.propertyName]},function(a,b){for(var c=this.resolvedProperty,d=0,e=c.length;d!==e;++d)a[b++]=c[d]},function(a,b){a[b]=this.resolvedProperty[this.propertyIndex]},function(a,b){this.resolvedProperty.toArray(a,b)}],SetterByBindingTypeAndVersioning:[[function(a,b){this.targetObject[this.propertyName]=a[b]},function(a,b){this.targetObject[this.propertyName]=
+a[b];this.targetObject.needsUpdate=!0},function(a,b){this.targetObject[this.propertyName]=a[b];this.targetObject.matrixWorldNeedsUpdate=!0}],[function(a,b){for(var c=this.resolvedProperty,d=0,e=c.length;d!==e;++d)c[d]=a[b++]},function(a,b){for(var c=this.resolvedProperty,d=0,e=c.length;d!==e;++d)c[d]=a[b++];this.targetObject.needsUpdate=!0},function(a,b){for(var c=this.resolvedProperty,d=0,e=c.length;d!==e;++d)c[d]=a[b++];this.targetObject.matrixWorldNeedsUpdate=!0}],[function(a,b){this.resolvedProperty[this.propertyIndex]=
+a[b]},function(a,b){this.resolvedProperty[this.propertyIndex]=a[b];this.targetObject.needsUpdate=!0},function(a,b){this.resolvedProperty[this.propertyIndex]=a[b];this.targetObject.matrixWorldNeedsUpdate=!0}],[function(a,b){this.resolvedProperty.fromArray(a,b)},function(a,b){this.resolvedProperty.fromArray(a,b);this.targetObject.needsUpdate=!0},function(a,b){this.resolvedProperty.fromArray(a,b);this.targetObject.matrixWorldNeedsUpdate=!0}]],getValue:function(a,b){this.bind();this.getValue(a,b)},setValue:function(a,
+b){this.bind();this.setValue(a,b)},bind:function(){var a=this.node,b=this.parsedPath,c=b.objectName,d=b.propertyName,e=b.propertyIndex;a||(this.node=a=oa.findNode(this.rootNode,b.nodeName)||this.rootNode);this.getValue=this._getValue_unavailable;this.setValue=this._setValue_unavailable;if(a){if(c){var f=b.objectIndex;switch(c){case "materials":if(!a.material){console.error("THREE.PropertyBinding: Can not bind to material as node does not have a material.",this);return}if(!a.material.materials){console.error("THREE.PropertyBinding: Can not bind to material.materials as node.material does not have a materials array.",
+this);return}a=a.material.materials;break;case "bones":if(!a.skeleton){console.error("THREE.PropertyBinding: Can not bind to bones as node does not have a skeleton.",this);return}a=a.skeleton.bones;for(c=0;c<a.length;c++)if(a[c].name===f){f=c;break}break;default:if(void 0===a[c]){console.error("THREE.PropertyBinding: Can not bind to objectName of node undefined.",this);return}a=a[c]}if(void 0!==f){if(void 0===a[f]){console.error("THREE.PropertyBinding: Trying to bind to objectIndex of objectName, but is undefined.",
+this,a);return}a=a[f]}}f=a[d];if(void 0===f)console.error("THREE.PropertyBinding: Trying to update property for track: "+b.nodeName+"."+d+" but it wasn't found.",a);else{b=this.Versioning.None;this.targetObject=a;void 0!==a.needsUpdate?b=this.Versioning.NeedsUpdate:void 0!==a.matrixWorldNeedsUpdate&&(b=this.Versioning.MatrixWorldNeedsUpdate);c=this.BindingType.Direct;if(void 0!==e){if("morphTargetInfluences"===d){if(!a.geometry){console.error("THREE.PropertyBinding: Can not bind to morphTargetInfluences because node does not have a geometry.",
+this);return}if(a.geometry.isBufferGeometry){if(!a.geometry.morphAttributes){console.error("THREE.PropertyBinding: Can not bind to morphTargetInfluences because node does not have a geometry.morphAttributes.",this);return}for(c=0;c<this.node.geometry.morphAttributes.position.length;c++)if(a.geometry.morphAttributes.position[c].name===e){e=c;break}}else{if(!a.geometry.morphTargets){console.error("THREE.PropertyBinding: Can not bind to morphTargetInfluences because node does not have a geometry.morphTargets.",
+this);return}for(c=0;c<this.node.geometry.morphTargets.length;c++)if(a.geometry.morphTargets[c].name===e){e=c;break}}}c=this.BindingType.ArrayElement;this.resolvedProperty=f;this.propertyIndex=e}else void 0!==f.fromArray&&void 0!==f.toArray?(c=this.BindingType.HasFromToArray,this.resolvedProperty=f):Array.isArray(f)?(c=this.BindingType.EntireArray,this.resolvedProperty=f):this.propertyName=d;this.getValue=this.GetterByBindingType[c];this.setValue=this.SetterByBindingTypeAndVersioning[c][b]}}else console.error("THREE.PropertyBinding: Trying to update node for track: "+
+this.path+" but it wasn't found.")},unbind:function(){this.node=null;this.getValue=this._getValue_unbound;this.setValue=this._setValue_unbound}});Object.assign(oa.prototype,{_getValue_unbound:oa.prototype.getValue,_setValue_unbound:oa.prototype.setValue});Object.assign(xf.prototype,{isAnimationObjectGroup:!0,add:function(){for(var a=this._objects,b=a.length,c=this.nCachedObjects_,d=this._indicesByUUID,e=this._paths,f=this._parsedPaths,g=this._bindings,h=g.length,k=void 0,l=0,p=arguments.length;l!==
+p;++l){var n=arguments[l],r=n.uuid,x=d[r];if(void 0===x){x=b++;d[r]=x;a.push(n);r=0;for(var t=h;r!==t;++r)g[r].push(new oa(n,e[r],f[r]))}else if(x<c){k=a[x];var u=--c;t=a[u];d[t.uuid]=x;a[x]=t;d[r]=u;a[u]=n;r=0;for(t=h;r!==t;++r){var w=g[r],z=w[x];w[x]=w[u];void 0===z&&(z=new oa(n,e[r],f[r]));w[u]=z}}else a[x]!==k&&console.error("THREE.AnimationObjectGroup: Different objects with the same UUID detected. Clean the caches or recreate your infrastructure when reloading scenes.")}this.nCachedObjects_=
+c},remove:function(){for(var a=this._objects,b=this.nCachedObjects_,c=this._indicesByUUID,d=this._bindings,e=d.length,f=0,g=arguments.length;f!==g;++f){var h=arguments[f],k=h.uuid,l=c[k];if(void 0!==l&&l>=b){var p=b++,n=a[p];c[n.uuid]=l;a[l]=n;c[k]=p;a[p]=h;h=0;for(k=e;h!==k;++h){n=d[h];var r=n[l];n[l]=n[p];n[p]=r}}}this.nCachedObjects_=b},uncache:function(){for(var a=this._objects,b=a.length,c=this.nCachedObjects_,d=this._indicesByUUID,e=this._bindings,f=e.length,g=0,h=arguments.length;g!==h;++g){var k=
+arguments[g].uuid,l=d[k];if(void 0!==l)if(delete d[k],l<c){k=--c;var p=a[k],n=--b,r=a[n];d[p.uuid]=l;a[l]=p;d[r.uuid]=k;a[k]=r;a.pop();p=0;for(r=f;p!==r;++p){var x=e[p],t=x[n];x[l]=x[k];x[k]=t;x.pop()}}else for(n=--b,r=a[n],d[r.uuid]=l,a[l]=r,a.pop(),p=0,r=f;p!==r;++p)x=e[p],x[l]=x[n],x.pop()}this.nCachedObjects_=c},subscribe_:function(a,b){var c=this._bindingsIndicesByPath,d=c[a],e=this._bindings;if(void 0!==d)return e[d];var f=this._paths,g=this._parsedPaths,h=this._objects,k=this.nCachedObjects_,
+l=Array(h.length);d=e.length;c[a]=d;f.push(a);g.push(b);e.push(l);c=k;for(d=h.length;c!==d;++c)l[c]=new oa(h[c],a,b);return l},unsubscribe_:function(a){var b=this._bindingsIndicesByPath,c=b[a];if(void 0!==c){var d=this._paths,e=this._parsedPaths,f=this._bindings,g=f.length-1,h=f[g];b[a[g]]=c;f[c]=h;f.pop();e[c]=e[g];e.pop();d[c]=d[g];d.pop()}}});Object.assign(yf.prototype,{play:function(){this._mixer._activateAction(this);return this},stop:function(){this._mixer._deactivateAction(this);return this.reset()},
+reset:function(){this.paused=!1;this.enabled=!0;this.time=0;this._loopCount=-1;this._startTime=null;return this.stopFading().stopWarping()},isRunning:function(){return this.enabled&&!this.paused&&0!==this.timeScale&&null===this._startTime&&this._mixer._isActiveAction(this)},isScheduled:function(){return this._mixer._isActiveAction(this)},startAt:function(a){this._startTime=a;return this},setLoop:function(a,b){this.loop=a;this.repetitions=b;return this},setEffectiveWeight:function(a){this.weight=a;
+this._effectiveWeight=this.enabled?a:0;return this.stopFading()},getEffectiveWeight:function(){return this._effectiveWeight},fadeIn:function(a){return this._scheduleFading(a,0,1)},fadeOut:function(a){return this._scheduleFading(a,1,0)},crossFadeFrom:function(a,b,c){a.fadeOut(b);this.fadeIn(b);if(c){c=this._clip.duration;var d=a._clip.duration,e=c/d;a.warp(1,d/c,b);this.warp(e,1,b)}return this},crossFadeTo:function(a,b,c){return a.crossFadeFrom(this,b,c)},stopFading:function(){var a=this._weightInterpolant;
+null!==a&&(this._weightInterpolant=null,this._mixer._takeBackControlInterpolant(a));return this},setEffectiveTimeScale:function(a){this.timeScale=a;this._effectiveTimeScale=this.paused?0:a;return this.stopWarping()},getEffectiveTimeScale:function(){return this._effectiveTimeScale},setDuration:function(a){this.timeScale=this._clip.duration/a;return this.stopWarping()},syncWith:function(a){this.time=a.time;this.timeScale=a.timeScale;return this.stopWarping()},halt:function(a){return this.warp(this._effectiveTimeScale,
+0,a)},warp:function(a,b,c){var d=this._mixer,e=d.time,f=this._timeScaleInterpolant,g=this.timeScale;null===f&&(this._timeScaleInterpolant=f=d._lendControlInterpolant());d=f.parameterPositions;f=f.sampleValues;d[0]=e;d[1]=e+c;f[0]=a/g;f[1]=b/g;return this},stopWarping:function(){var a=this._timeScaleInterpolant;null!==a&&(this._timeScaleInterpolant=null,this._mixer._takeBackControlInterpolant(a));return this},getMixer:function(){return this._mixer},getClip:function(){return this._clip},getRoot:function(){return this._localRoot||
+this._mixer._root},_update:function(a,b,c,d){if(this.enabled){var e=this._startTime;if(null!==e){b=(a-e)*c;if(0>b||0===c)return;this._startTime=null;b*=c}b*=this._updateTimeScale(a);c=this._updateTime(b);a=this._updateWeight(a);if(0<a){b=this._interpolants;e=this._propertyBindings;for(var f=0,g=b.length;f!==g;++f)b[f].evaluate(c),e[f].accumulate(d,a)}}else this._updateWeight(a)},_updateWeight:function(a){var b=0;if(this.enabled){b=this.weight;var c=this._weightInterpolant;if(null!==c){var d=c.evaluate(a)[0];
+b*=d;a>c.parameterPositions[1]&&(this.stopFading(),0===d&&(this.enabled=!1))}}return this._effectiveWeight=b},_updateTimeScale:function(a){var b=0;if(!this.paused){b=this.timeScale;var c=this._timeScaleInterpolant;if(null!==c){var d=c.evaluate(a)[0];b*=d;a>c.parameterPositions[1]&&(this.stopWarping(),0===b?this.paused=!0:this.timeScale=b)}}return this._effectiveTimeScale=b},_updateTime:function(a){var b=this.time+a,c=this._clip.duration,d=this.loop,e=this._loopCount,f=2202===d;if(0===a)return-1===
+e?b:f&&1===(e&1)?c-b:b;if(2200===d)a:{if(-1===e&&(this._loopCount=0,this._setEndings(!0,!0,!1)),b>=c)b=c;else if(0>b)b=0;else break a;this.clampWhenFinished?this.paused=!0:this.enabled=!1;this._mixer.dispatchEvent({type:"finished",action:this,direction:0>a?-1:1})}else{-1===e&&(0<=a?(e=0,this._setEndings(!0,0===this.repetitions,f)):this._setEndings(0===this.repetitions,!0,f));if(b>=c||0>b){d=Math.floor(b/c);b-=c*d;e+=Math.abs(d);var g=this.repetitions-e;0>=g?(this.clampWhenFinished?this.paused=!0:
+this.enabled=!1,b=0<a?c:0,this._mixer.dispatchEvent({type:"finished",action:this,direction:0<a?1:-1})):(1===g?(a=0>a,this._setEndings(a,!a,f)):this._setEndings(!1,!1,f),this._loopCount=e,this._mixer.dispatchEvent({type:"loop",action:this,loopDelta:d}))}if(f&&1===(e&1))return this.time=b,c-b}return this.time=b},_setEndings:function(a,b,c){var d=this._interpolantSettings;c?(d.endingStart=2401,d.endingEnd=2401):(d.endingStart=a?this.zeroSlopeAtStart?2401:2400:2402,d.endingEnd=b?this.zeroSlopeAtEnd?2401:
+2400:2402)},_scheduleFading:function(a,b,c){var d=this._mixer,e=d.time,f=this._weightInterpolant;null===f&&(this._weightInterpolant=f=d._lendControlInterpolant());d=f.parameterPositions;f=f.sampleValues;d[0]=e;f[0]=b;d[1]=e+a;f[1]=c;return this}});we.prototype=Object.assign(Object.create(ia.prototype),{constructor:we,_bindAction:function(a,b){var c=a._localRoot||this._root,d=a._clip.tracks,e=d.length,f=a._propertyBindings;a=a._interpolants;var g=c.uuid,h=this._bindingsByRootAndName,k=h[g];void 0===
+k&&(k={},h[g]=k);for(h=0;h!==e;++h){var l=d[h],p=l.name,n=k[p];if(void 0===n){n=f[h];if(void 0!==n){null===n._cacheIndex&&(++n.referenceCount,this._addInactiveBinding(n,g,p));continue}n=new ve(oa.create(c,p,b&&b._propertyBindings[h].binding.parsedPath),l.ValueTypeName,l.getValueSize());++n.referenceCount;this._addInactiveBinding(n,g,p)}f[h]=n;a[h].resultBuffer=n.buffer}},_activateAction:function(a){if(!this._isActiveAction(a)){if(null===a._cacheIndex){var b=(a._localRoot||this._root).uuid,c=a._clip.uuid,
+d=this._actionsByClip[c];this._bindAction(a,d&&d.knownActions[0]);this._addInactiveAction(a,c,b)}b=a._propertyBindings;c=0;for(d=b.length;c!==d;++c){var e=b[c];0===e.useCount++&&(this._lendBinding(e),e.saveOriginalState())}this._lendAction(a)}},_deactivateAction:function(a){if(this._isActiveAction(a)){for(var b=a._propertyBindings,c=0,d=b.length;c!==d;++c){var e=b[c];0===--e.useCount&&(e.restoreOriginalState(),this._takeBackBinding(e))}this._takeBackAction(a)}},_initMemoryManager:function(){this._actions=
+[];this._nActiveActions=0;this._actionsByClip={};this._bindings=[];this._nActiveBindings=0;this._bindingsByRootAndName={};this._controlInterpolants=[];this._nActiveControlInterpolants=0;var a=this;this.stats={actions:{get total(){return a._actions.length},get inUse(){return a._nActiveActions}},bindings:{get total(){return a._bindings.length},get inUse(){return a._nActiveBindings}},controlInterpolants:{get total(){return a._controlInterpolants.length},get inUse(){return a._nActiveControlInterpolants}}}},
+_isActiveAction:function(a){a=a._cacheIndex;return null!==a&&a<this._nActiveActions},_addInactiveAction:function(a,b,c){var d=this._actions,e=this._actionsByClip,f=e[b];void 0===f?(f={knownActions:[a],actionByRoot:{}},a._byClipCacheIndex=0,e[b]=f):(b=f.knownActions,a._byClipCacheIndex=b.length,b.push(a));a._cacheIndex=d.length;d.push(a);f.actionByRoot[c]=a},_removeInactiveAction:function(a){var b=this._actions,c=b[b.length-1],d=a._cacheIndex;c._cacheIndex=d;b[d]=c;b.pop();a._cacheIndex=null;b=a._clip.uuid;
+c=this._actionsByClip;d=c[b];var e=d.knownActions,f=e[e.length-1],g=a._byClipCacheIndex;f._byClipCacheIndex=g;e[g]=f;e.pop();a._byClipCacheIndex=null;delete d.actionByRoot[(a._localRoot||this._root).uuid];0===e.length&&delete c[b];this._removeInactiveBindingsForAction(a)},_removeInactiveBindingsForAction:function(a){a=a._propertyBindings;for(var b=0,c=a.length;b!==c;++b){var d=a[b];0===--d.referenceCount&&this._removeInactiveBinding(d)}},_lendAction:function(a){var b=this._actions,c=a._cacheIndex,
+d=this._nActiveActions++,e=b[d];a._cacheIndex=d;b[d]=a;e._cacheIndex=c;b[c]=e},_takeBackAction:function(a){var b=this._actions,c=a._cacheIndex,d=--this._nActiveActions,e=b[d];a._cacheIndex=d;b[d]=a;e._cacheIndex=c;b[c]=e},_addInactiveBinding:function(a,b,c){var d=this._bindingsByRootAndName,e=d[b],f=this._bindings;void 0===e&&(e={},d[b]=e);e[c]=a;a._cacheIndex=f.length;f.push(a)},_removeInactiveBinding:function(a){var b=this._bindings,c=a.binding,d=c.rootNode.uuid;c=c.path;var e=this._bindingsByRootAndName,
+f=e[d],g=b[b.length-1];a=a._cacheIndex;g._cacheIndex=a;b[a]=g;b.pop();delete f[c];a:{for(var h in f)break a;delete e[d]}},_lendBinding:function(a){var b=this._bindings,c=a._cacheIndex,d=this._nActiveBindings++,e=b[d];a._cacheIndex=d;b[d]=a;e._cacheIndex=c;b[c]=e},_takeBackBinding:function(a){var b=this._bindings,c=a._cacheIndex,d=--this._nActiveBindings,e=b[d];a._cacheIndex=d;b[d]=a;e._cacheIndex=c;b[c]=e},_lendControlInterpolant:function(){var a=this._controlInterpolants,b=this._nActiveControlInterpolants++,
+c=a[b];void 0===c&&(c=new cd(new Float32Array(2),new Float32Array(2),1,this._controlInterpolantsResultBuffer),c.__cacheIndex=b,a[b]=c);return c},_takeBackControlInterpolant:function(a){var b=this._controlInterpolants,c=a.__cacheIndex,d=--this._nActiveControlInterpolants,e=b[d];a.__cacheIndex=d;b[d]=a;e.__cacheIndex=c;b[c]=e},_controlInterpolantsResultBuffer:new Float32Array(1),clipAction:function(a,b){var c=b||this._root,d=c.uuid;c="string"===typeof a?za.findByName(c,a):a;a=null!==c?c.uuid:a;var e=
+this._actionsByClip[a],f=null;if(void 0!==e){f=e.actionByRoot[d];if(void 0!==f)return f;f=e.knownActions[0];null===c&&(c=f._clip)}if(null===c)return null;b=new yf(this,c,b);this._bindAction(b,f);this._addInactiveAction(b,a,d);return b},existingAction:function(a,b){var c=b||this._root;b=c.uuid;c="string"===typeof a?za.findByName(c,a):a;a=this._actionsByClip[c?c.uuid:a];return void 0!==a?a.actionByRoot[b]||null:null},stopAllAction:function(){for(var a=this._actions,b=this._nActiveActions,c=this._bindings,
+d=this._nActiveBindings,e=this._nActiveBindings=this._nActiveActions=0;e!==b;++e)a[e].reset();for(e=0;e!==d;++e)c[e].useCount=0;return this},update:function(a){a*=this.timeScale;for(var b=this._actions,c=this._nActiveActions,d=this.time+=a,e=Math.sign(a),f=this._accuIndex^=1,g=0;g!==c;++g)b[g]._update(d,a,e,f);a=this._bindings;b=this._nActiveBindings;for(g=0;g!==b;++g)a[g].apply(f);return this},getRoot:function(){return this._root},uncacheClip:function(a){var b=this._actions;a=a.uuid;var c=this._actionsByClip,
+d=c[a];if(void 0!==d){d=d.knownActions;for(var e=0,f=d.length;e!==f;++e){var g=d[e];this._deactivateAction(g);var h=g._cacheIndex,k=b[b.length-1];g._cacheIndex=null;g._byClipCacheIndex=null;k._cacheIndex=h;b[h]=k;b.pop();this._removeInactiveBindingsForAction(g)}delete c[a]}},uncacheRoot:function(a){a=a.uuid;var b=this._actionsByClip;for(d in b){var c=b[d].actionByRoot[a];void 0!==c&&(this._deactivateAction(c),this._removeInactiveAction(c))}var d=this._bindingsByRootAndName[a];if(void 0!==d)for(var e in d)a=
+d[e],a.restoreOriginalState(),this._removeInactiveBinding(a)},uncacheAction:function(a,b){a=this.existingAction(a,b);null!==a&&(this._deactivateAction(a),this._removeInactiveAction(a))}});Rd.prototype.clone=function(){return new Rd(void 0===this.value.clone?this.value:this.value.clone())};xe.prototype=Object.assign(Object.create(E.prototype),{constructor:xe,isInstancedBufferGeometry:!0,copy:function(a){E.prototype.copy.call(this,a);this.maxInstancedCount=a.maxInstancedCount;return this},clone:function(){return(new this.constructor).copy(this)}});
+ye.prototype=Object.assign(Object.create(sb.prototype),{constructor:ye,isInstancedInterleavedBuffer:!0,copy:function(a){sb.prototype.copy.call(this,a);this.meshPerAttribute=a.meshPerAttribute;return this}});ze.prototype=Object.assign(Object.create(F.prototype),{constructor:ze,isInstancedBufferAttribute:!0,copy:function(a){F.prototype.copy.call(this,a);this.meshPerAttribute=a.meshPerAttribute;return this}});Object.assign(zf.prototype,{linePrecision:1,set:function(a,b){this.ray.set(a,b)},setFromCamera:function(a,
+b){b&&b.isPerspectiveCamera?(this.ray.origin.setFromMatrixPosition(b.matrixWorld),this.ray.direction.set(a.x,a.y,.5).unproject(b).sub(this.ray.origin).normalize()):b&&b.isOrthographicCamera?(this.ray.origin.set(a.x,a.y,(b.near+b.far)/(b.near-b.far)).unproject(b),this.ray.direction.set(0,0,-1).transformDirection(b.matrixWorld)):console.error("THREE.Raycaster: Unsupported camera type.")},intersectObject:function(a,b,c){c=c||[];Ae(a,this,c,b);c.sort(Af);return c},intersectObjects:function(a,b,c){c=c||
+[];if(!1===Array.isArray(a))return console.warn("THREE.Raycaster.intersectObjects: objects is not an Array."),c;for(var d=0,e=a.length;d<e;d++)Ae(a[d],this,c,b);c.sort(Af);return c}});Object.assign(Bf.prototype,{set:function(a,b,c){this.radius=a;this.phi=b;this.theta=c;return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.radius=a.radius;this.phi=a.phi;this.theta=a.theta;return this},makeSafe:function(){this.phi=Math.max(1E-6,Math.min(Math.PI-1E-6,this.phi));
+return this},setFromVector3:function(a){return this.setFromCartesianCoords(a.x,a.y,a.z)},setFromCartesianCoords:function(a,b,c){this.radius=Math.sqrt(a*a+b*b+c*c);0===this.radius?this.phi=this.theta=0:(this.theta=Math.atan2(a,c),this.phi=Math.acos(R.clamp(b/this.radius,-1,1)));return this}});Object.assign(Cf.prototype,{set:function(a,b,c){this.radius=a;this.theta=b;this.y=c;return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.radius=a.radius;this.theta=a.theta;
+this.y=a.y;return this},setFromVector3:function(a){return this.setFromCartesianCoords(a.x,a.y,a.z)},setFromCartesianCoords:function(a,b,c){this.radius=Math.sqrt(a*a+c*c);this.theta=Math.atan2(a,c);this.y=b;return this}});Object.assign(Be.prototype,{set:function(a,b){this.min.copy(a);this.max.copy(b);return this},setFromPoints:function(a){this.makeEmpty();for(var b=0,c=a.length;b<c;b++)this.expandByPoint(a[b]);return this},setFromCenterAndSize:function(){var a=new z;return function(b,c){c=a.copy(c).multiplyScalar(.5);
+this.min.copy(b).sub(c);this.max.copy(b).add(c);return this}}(),clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.min.copy(a.min);this.max.copy(a.max);return this},makeEmpty:function(){this.min.x=this.min.y=Infinity;this.max.x=this.max.y=-Infinity;return this},isEmpty:function(){return this.max.x<this.min.x||this.max.y<this.min.y},getCenter:function(a){void 0===a&&(console.warn("THREE.Box2: .getCenter() target is now required"),a=new z);return this.isEmpty()?a.set(0,
+0):a.addVectors(this.min,this.max).multiplyScalar(.5)},getSize:function(a){void 0===a&&(console.warn("THREE.Box2: .getSize() target is now required"),a=new z);return this.isEmpty()?a.set(0,0):a.subVectors(this.max,this.min)},expandByPoint:function(a){this.min.min(a);this.max.max(a);return this},expandByVector:function(a){this.min.sub(a);this.max.add(a);return this},expandByScalar:function(a){this.min.addScalar(-a);this.max.addScalar(a);return this},containsPoint:function(a){return a.x<this.min.x||
+a.x>this.max.x||a.y<this.min.y||a.y>this.max.y?!1:!0},containsBox:function(a){return this.min.x<=a.min.x&&a.max.x<=this.max.x&&this.min.y<=a.min.y&&a.max.y<=this.max.y},getParameter:function(a,b){void 0===b&&(console.warn("THREE.Box2: .getParameter() target is now required"),b=new z);return b.set((a.x-this.min.x)/(this.max.x-this.min.x),(a.y-this.min.y)/(this.max.y-this.min.y))},intersectsBox:function(a){return a.max.x<this.min.x||a.min.x>this.max.x||a.max.y<this.min.y||a.min.y>this.max.y?!1:!0},
+clampPoint:function(a,b){void 0===b&&(console.warn("THREE.Box2: .clampPoint() target is now required"),b=new z);return b.copy(a).clamp(this.min,this.max)},distanceToPoint:function(){var a=new z;return function(b){return a.copy(b).clamp(this.min,this.max).sub(b).length()}}(),intersect:function(a){this.min.max(a.min);this.max.min(a.max);return this},union:function(a){this.min.min(a.min);this.max.max(a.max);return this},translate:function(a){this.min.add(a);this.max.add(a);return this},equals:function(a){return a.min.equals(this.min)&&
+a.max.equals(this.max)}});Object.assign(Ce.prototype,{set:function(a,b){this.start.copy(a);this.end.copy(b);return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.start.copy(a.start);this.end.copy(a.end);return this},getCenter:function(a){void 0===a&&(console.warn("THREE.Line3: .getCenter() target is now required"),a=new p);return a.addVectors(this.start,this.end).multiplyScalar(.5)},delta:function(a){void 0===a&&(console.warn("THREE.Line3: .delta() target is now required"),
+a=new p);return a.subVectors(this.end,this.start)},distanceSq:function(){return this.start.distanceToSquared(this.end)},distance:function(){return this.start.distanceTo(this.end)},at:function(a,b){void 0===b&&(console.warn("THREE.Line3: .at() target is now required"),b=new p);return this.delta(b).multiplyScalar(a).add(this.start)},closestPointToPointParameter:function(){var a=new p,b=new p;return function(c,d){a.subVectors(c,this.start);b.subVectors(this.end,this.start);c=b.dot(b);c=b.dot(a)/c;d&&
+(c=R.clamp(c,0,1));return c}}(),closestPointToPoint:function(a,b,c){a=this.closestPointToPointParameter(a,b);void 0===c&&(console.warn("THREE.Line3: .closestPointToPoint() target is now required"),c=new p);return this.delta(c).multiplyScalar(a).add(this.start)},applyMatrix4:function(a){this.start.applyMatrix4(a);this.end.applyMatrix4(a);return this},equals:function(a){return a.start.equals(this.start)&&a.end.equals(this.end)}});jd.prototype=Object.create(D.prototype);jd.prototype.constructor=jd;jd.prototype.isImmediateRenderObject=
+!0;kd.prototype=Object.create(S.prototype);kd.prototype.constructor=kd;kd.prototype.update=function(){var a=new p,b=new p,c=new da;return function(){var d=["a","b","c"];this.object.updateMatrixWorld(!0);c.getNormalMatrix(this.object.matrixWorld);var e=this.object.matrixWorld,f=this.geometry.attributes.position,g=this.object.geometry;if(g&&g.isGeometry)for(var h=g.vertices,k=g.faces,l=g=0,p=k.length;l<p;l++)for(var n=k[l],r=0,x=n.vertexNormals.length;r<x;r++){var t=n.vertexNormals[r];a.copy(h[n[d[r]]]).applyMatrix4(e);
+b.copy(t).applyMatrix3(c).normalize().multiplyScalar(this.size).add(a);f.setXYZ(g,a.x,a.y,a.z);g+=1;f.setXYZ(g,b.x,b.y,b.z);g+=1}else if(g&&g.isBufferGeometry)for(d=g.attributes.position,h=g.attributes.normal,r=g=0,x=d.count;r<x;r++)a.set(d.getX(r),d.getY(r),d.getZ(r)).applyMatrix4(e),b.set(h.getX(r),h.getY(r),h.getZ(r)),b.applyMatrix3(c).normalize().multiplyScalar(this.size).add(a),f.setXYZ(g,a.x,a.y,a.z),g+=1,f.setXYZ(g,b.x,b.y,b.z),g+=1;f.needsUpdate=!0}}();mc.prototype=Object.create(D.prototype);
+mc.prototype.constructor=mc;mc.prototype.dispose=function(){this.cone.geometry.dispose();this.cone.material.dispose()};mc.prototype.update=function(){var a=new p,b=new p;return function(){this.light.updateMatrixWorld();var c=this.light.distance?this.light.distance:1E3,d=c*Math.tan(this.light.angle);this.cone.scale.set(d,d,c);a.setFromMatrixPosition(this.light.matrixWorld);b.setFromMatrixPosition(this.light.target.matrixWorld);this.cone.lookAt(b.sub(a));void 0!==this.color?this.cone.material.color.set(this.color):
+this.cone.material.color.copy(this.light.color)}}();nc.prototype=Object.create(S.prototype);nc.prototype.constructor=nc;nc.prototype.updateMatrixWorld=function(){var a=new p,b=new P,c=new P;return function(d){var e=this.bones,f=this.geometry,g=f.getAttribute("position");c.getInverse(this.root.matrixWorld);for(var h=0,k=0;h<e.length;h++){var l=e[h];l.parent&&l.parent.isBone&&(b.multiplyMatrices(c,l.matrixWorld),a.setFromMatrixPosition(b),g.setXYZ(k,a.x,a.y,a.z),b.multiplyMatrices(c,l.parent.matrixWorld),
+a.setFromMatrixPosition(b),g.setXYZ(k+1,a.x,a.y,a.z),k+=2)}f.getAttribute("position").needsUpdate=!0;D.prototype.updateMatrixWorld.call(this,d)}}();oc.prototype=Object.create(pa.prototype);oc.prototype.constructor=oc;oc.prototype.dispose=function(){this.geometry.dispose();this.material.dispose()};oc.prototype.update=function(){void 0!==this.color?this.material.color.set(this.color):this.material.color.copy(this.light.color)};pc.prototype=Object.create(D.prototype);pc.prototype.constructor=pc;pc.prototype.dispose=
+function(){this.children[0].geometry.dispose();this.children[0].material.dispose()};pc.prototype.update=function(){var a=.5*this.light.width,b=.5*this.light.height,c=this.line.geometry.attributes.position,d=c.array;d[0]=a;d[1]=-b;d[2]=0;d[3]=a;d[4]=b;d[5]=0;d[6]=-a;d[7]=b;d[8]=0;d[9]=-a;d[10]=-b;d[11]=0;d[12]=a;d[13]=-b;d[14]=0;c.needsUpdate=!0;void 0!==this.color?this.line.material.color.set(this.color):this.line.material.color.copy(this.light.color)};qc.prototype=Object.create(D.prototype);qc.prototype.constructor=
+qc;qc.prototype.dispose=function(){this.children[0].geometry.dispose();this.children[0].material.dispose()};qc.prototype.update=function(){var a=new p,b=new G,c=new G;return function(){var d=this.children[0];if(void 0!==this.color)this.material.color.set(this.color);else{var e=d.geometry.getAttribute("color");b.copy(this.light.color);c.copy(this.light.groundColor);for(var f=0,g=e.count;f<g;f++){var h=f<g/2?b:c;e.setXYZ(f,h.r,h.g,h.b)}e.needsUpdate=!0}d.lookAt(a.setFromMatrixPosition(this.light.matrixWorld).negate())}}();
+ld.prototype=Object.create(S.prototype);ld.prototype.constructor=ld;Sd.prototype=Object.create(S.prototype);Sd.prototype.constructor=Sd;md.prototype=Object.create(S.prototype);md.prototype.constructor=md;md.prototype.update=function(){var a=new p,b=new p,c=new da;return function(){this.object.updateMatrixWorld(!0);c.getNormalMatrix(this.object.matrixWorld);var d=this.object.matrixWorld,e=this.geometry.attributes.position,f=this.object.geometry,g=f.vertices;f=f.faces;for(var h=0,k=0,l=f.length;k<l;k++){var p=
+f[k],n=p.normal;a.copy(g[p.a]).add(g[p.b]).add(g[p.c]).divideScalar(3).applyMatrix4(d);b.copy(n).applyMatrix3(c).normalize().multiplyScalar(this.size).add(a);e.setXYZ(h,a.x,a.y,a.z);h+=1;e.setXYZ(h,b.x,b.y,b.z);h+=1}e.needsUpdate=!0}}();rc.prototype=Object.create(D.prototype);rc.prototype.constructor=rc;rc.prototype.dispose=function(){this.lightPlane.geometry.dispose();this.lightPlane.material.dispose();this.targetLine.geometry.dispose();this.targetLine.material.dispose()};rc.prototype.update=function(){var a=
+new p,b=new p,c=new p;return function(){a.setFromMatrixPosition(this.light.matrixWorld);b.setFromMatrixPosition(this.light.target.matrixWorld);c.subVectors(b,a);this.lightPlane.lookAt(c);void 0!==this.color?(this.lightPlane.material.color.set(this.color),this.targetLine.material.color.set(this.color)):(this.lightPlane.material.color.copy(this.light.color),this.targetLine.material.color.copy(this.light.color));this.targetLine.lookAt(c);this.targetLine.scale.z=c.length()}}();nd.prototype=Object.create(S.prototype);
+nd.prototype.constructor=nd;nd.prototype.update=function(){function a(a,g,h,k){d.set(g,h,k).unproject(e);a=c[a];if(void 0!==a)for(g=b.getAttribute("position"),h=0,k=a.length;h<k;h++)g.setXYZ(a[h],d.x,d.y,d.z)}var b,c,d=new p,e=new Ra;return function(){b=this.geometry;c=this.pointMap;e.projectionMatrix.copy(this.camera.projectionMatrix);a("c",0,0,-1);a("t",0,0,1);a("n1",-1,-1,-1);a("n2",1,-1,-1);a("n3",-1,1,-1);a("n4",1,1,-1);a("f1",-1,-1,1);a("f2",1,-1,1);a("f3",-1,1,1);a("f4",1,1,1);a("u1",.7,1.1,
+-1);a("u2",-.7,1.1,-1);a("u3",0,2,-1);a("cf1",-1,0,1);a("cf2",1,0,1);a("cf3",0,-1,1);a("cf4",0,1,1);a("cn1",-1,0,-1);a("cn2",1,0,-1);a("cn3",0,-1,-1);a("cn4",0,1,-1);b.getAttribute("position").needsUpdate=!0}}();bb.prototype=Object.create(S.prototype);bb.prototype.constructor=bb;bb.prototype.update=function(){var a=new Wa;return function(b){void 0!==b&&console.warn("THREE.BoxHelper: .update() has no longer arguments.");void 0!==this.object&&a.setFromObject(this.object);if(!a.isEmpty()){b=a.min;var c=
+a.max,d=this.geometry.attributes.position,e=d.array;e[0]=c.x;e[1]=c.y;e[2]=c.z;e[3]=b.x;e[4]=c.y;e[5]=c.z;e[6]=b.x;e[7]=b.y;e[8]=c.z;e[9]=c.x;e[10]=b.y;e[11]=c.z;e[12]=c.x;e[13]=c.y;e[14]=b.z;e[15]=b.x;e[16]=c.y;e[17]=b.z;e[18]=b.x;e[19]=b.y;e[20]=b.z;e[21]=c.x;e[22]=b.y;e[23]=b.z;d.needsUpdate=!0;this.geometry.computeBoundingSphere()}}}();bb.prototype.setFromObject=function(a){this.object=a;this.update();return this};bb.prototype.copy=function(a){S.prototype.copy.call(this,a);this.object=a.object;
+return this};bb.prototype.clone=function(){return(new this.constructor).copy(this)};od.prototype=Object.create(S.prototype);od.prototype.constructor=od;od.prototype.updateMatrixWorld=function(a){var b=this.box;b.isEmpty()||(b.getCenter(this.position),b.getSize(this.scale),this.scale.multiplyScalar(.5),D.prototype.updateMatrixWorld.call(this,a))};pd.prototype=Object.create(ma.prototype);pd.prototype.constructor=pd;pd.prototype.updateMatrixWorld=function(a){var b=-this.plane.constant;1E-8>Math.abs(b)&&
+(b=1E-8);this.scale.set(.5*this.size,.5*this.size,b);this.children[0].material.side=0>b?1:0;this.lookAt(this.plane.normal);D.prototype.updateMatrixWorld.call(this,a)};var Td,De;cb.prototype=Object.create(D.prototype);cb.prototype.constructor=cb;cb.prototype.setDirection=function(){var a=new p,b;return function(c){.99999<c.y?this.quaternion.set(0,0,0,1):-.99999>c.y?this.quaternion.set(1,0,0,0):(a.set(c.z,0,-c.x).normalize(),b=Math.acos(c.y),this.quaternion.setFromAxisAngle(a,b))}}();cb.prototype.setLength=
+function(a,b,c){void 0===b&&(b=.2*a);void 0===c&&(c=.2*b);this.line.scale.set(1,Math.max(0,a-b),1);this.line.updateMatrix();this.cone.scale.set(c,b,c);this.cone.position.y=a;this.cone.updateMatrix()};cb.prototype.setColor=function(a){this.line.material.color.copy(a);this.cone.material.color.copy(a)};cb.prototype.copy=function(a){D.prototype.copy.call(this,a,!1);this.line.copy(a.line);this.cone.copy(a.cone);return this};cb.prototype.clone=function(){return(new this.constructor).copy(this)};qd.prototype=
+Object.create(S.prototype);qd.prototype.constructor=qd;Q.create=function(a,b){console.log("THREE.Curve.create() has been deprecated");a.prototype=Object.create(Q.prototype);a.prototype.constructor=a;a.prototype.getPoint=b;return a};Object.assign(ab.prototype,{createPointsGeometry:function(a){console.warn("THREE.CurvePath: .createPointsGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.");a=this.getPoints(a);return this.createGeometry(a)},createSpacedPointsGeometry:function(a){console.warn("THREE.CurvePath: .createSpacedPointsGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.");
+a=this.getSpacedPoints(a);return this.createGeometry(a)},createGeometry:function(a){console.warn("THREE.CurvePath: .createGeometry() has been removed. Use new THREE.Geometry().setFromPoints( points ) instead.");for(var b=new I,c=0,d=a.length;c<d;c++){var e=a[c];b.vertices.push(new p(e.x,e.y,e.z||0))}return b}});Object.assign(Na.prototype,{fromPoints:function(a){console.warn("THREE.Path: .fromPoints() has been renamed to .setFromPoints().");this.setFromPoints(a)}});Ef.prototype=Object.create(ua.prototype);
+Ff.prototype=Object.create(ua.prototype);Ee.prototype=Object.create(ua.prototype);Object.assign(Ee.prototype,{initFromArray:function(){console.error("THREE.Spline: .initFromArray() has been removed.")},getControlPointsArray:function(){console.error("THREE.Spline: .getControlPointsArray() has been removed.")},reparametrizeByArcLength:function(){console.error("THREE.Spline: .reparametrizeByArcLength() has been removed.")}});ld.prototype.setColors=function(){console.error("THREE.GridHelper: setColors() has been deprecated, pass them in the constructor instead.")};
+nc.prototype.update=function(){console.error("THREE.SkeletonHelper: update() no longer needs to be called.")};Object.assign(kc.prototype,{extractUrlBase:function(a){console.warn("THREE.Loader: .extractUrlBase() has been deprecated. Use THREE.LoaderUtils.extractUrlBase() instead.");return Vd.extractUrlBase(a)}});Object.assign(Qd.prototype,{setTexturePath:function(a){console.warn("THREE.JSONLoader: .setTexturePath() has been renamed to .setResourcePath().");return this.setResourcePath(a)}});Object.assign(le.prototype,
+{setTexturePath:function(a){console.warn("THREE.ObjectLoader: .setTexturePath() has been renamed to .setResourcePath().");return this.setResourcePath(a)}});Object.assign(Be.prototype,{center:function(a){console.warn("THREE.Box2: .center() has been renamed to .getCenter().");return this.getCenter(a)},empty:function(){console.warn("THREE.Box2: .empty() has been renamed to .isEmpty().");return this.isEmpty()},isIntersectionBox:function(a){console.warn("THREE.Box2: .isIntersectionBox() has been renamed to .intersectsBox().");
+return this.intersectsBox(a)},size:function(a){console.warn("THREE.Box2: .size() has been renamed to .getSize().");return this.getSize(a)}});Object.assign(Wa.prototype,{center:function(a){console.warn("THREE.Box3: .center() has been renamed to .getCenter().");return this.getCenter(a)},empty:function(){console.warn("THREE.Box3: .empty() has been renamed to .isEmpty().");return this.isEmpty()},isIntersectionBox:function(a){console.warn("THREE.Box3: .isIntersectionBox() has been renamed to .intersectsBox().");
+return this.intersectsBox(a)},isIntersectionSphere:function(a){console.warn("THREE.Box3: .isIntersectionSphere() has been renamed to .intersectsSphere().");return this.intersectsSphere(a)},size:function(a){console.warn("THREE.Box3: .size() has been renamed to .getSize().");return this.getSize(a)}});Ce.prototype.center=function(a){console.warn("THREE.Line3: .center() has been renamed to .getCenter().");return this.getCenter(a)};Object.assign(R,{random16:function(){console.warn("THREE.Math: .random16() has been deprecated. Use Math.random() instead.");
+return Math.random()},nearestPowerOfTwo:function(a){console.warn("THREE.Math: .nearestPowerOfTwo() has been renamed to .floorPowerOfTwo().");return R.floorPowerOfTwo(a)},nextPowerOfTwo:function(a){console.warn("THREE.Math: .nextPowerOfTwo() has been renamed to .ceilPowerOfTwo().");return R.ceilPowerOfTwo(a)}});Object.assign(da.prototype,{flattenToArrayOffset:function(a,b){console.warn("THREE.Matrix3: .flattenToArrayOffset() has been deprecated. Use .toArray() instead.");return this.toArray(a,b)},
+multiplyVector3:function(a){console.warn("THREE.Matrix3: .multiplyVector3() has been removed. Use vector.applyMatrix3( matrix ) instead.");return a.applyMatrix3(this)},multiplyVector3Array:function(){console.error("THREE.Matrix3: .multiplyVector3Array() has been removed.")},applyToBuffer:function(a){console.warn("THREE.Matrix3: .applyToBuffer() has been removed. Use matrix.applyToBufferAttribute( attribute ) instead.");return this.applyToBufferAttribute(a)},applyToVector3Array:function(){console.error("THREE.Matrix3: .applyToVector3Array() has been removed.")}});
+Object.assign(P.prototype,{extractPosition:function(a){console.warn("THREE.Matrix4: .extractPosition() has been renamed to .copyPosition().");return this.copyPosition(a)},flattenToArrayOffset:function(a,b){console.warn("THREE.Matrix4: .flattenToArrayOffset() has been deprecated. Use .toArray() instead.");return this.toArray(a,b)},getPosition:function(){var a;return function(){void 0===a&&(a=new p);console.warn("THREE.Matrix4: .getPosition() has been removed. Use Vector3.setFromMatrixPosition( matrix ) instead.");
+return a.setFromMatrixColumn(this,3)}}(),setRotationFromQuaternion:function(a){console.warn("THREE.Matrix4: .setRotationFromQuaternion() has been renamed to .makeRotationFromQuaternion().");return this.makeRotationFromQuaternion(a)},multiplyToArray:function(){console.warn("THREE.Matrix4: .multiplyToArray() has been removed.")},multiplyVector3:function(a){console.warn("THREE.Matrix4: .multiplyVector3() has been removed. Use vector.applyMatrix4( matrix ) instead.");return a.applyMatrix4(this)},multiplyVector4:function(a){console.warn("THREE.Matrix4: .multiplyVector4() has been removed. Use vector.applyMatrix4( matrix ) instead.");
+return a.applyMatrix4(this)},multiplyVector3Array:function(){console.error("THREE.Matrix4: .multiplyVector3Array() has been removed.")},rotateAxis:function(a){console.warn("THREE.Matrix4: .rotateAxis() has been removed. Use Vector3.transformDirection( matrix ) instead.");a.transformDirection(this)},crossVector:function(a){console.warn("THREE.Matrix4: .crossVector() has been removed. Use vector.applyMatrix4( matrix ) instead.");return a.applyMatrix4(this)},translate:function(){console.error("THREE.Matrix4: .translate() has been removed.")},
+rotateX:function(){console.error("THREE.Matrix4: .rotateX() has been removed.")},rotateY:function(){console.error("THREE.Matrix4: .rotateY() has been removed.")},rotateZ:function(){console.error("THREE.Matrix4: .rotateZ() has been removed.")},rotateByAxis:function(){console.error("THREE.Matrix4: .rotateByAxis() has been removed.")},applyToBuffer:function(a){console.warn("THREE.Matrix4: .applyToBuffer() has been removed. Use matrix.applyToBufferAttribute( attribute ) instead.");return this.applyToBufferAttribute(a)},
+applyToVector3Array:function(){console.error("THREE.Matrix4: .applyToVector3Array() has been removed.")},makeFrustum:function(a,b,c,d,e,f){console.warn("THREE.Matrix4: .makeFrustum() has been removed. Use .makePerspective( left, right, top, bottom, near, far ) instead.");return this.makePerspective(a,b,d,c,e,f)}});Pa.prototype.isIntersectionLine=function(a){console.warn("THREE.Plane: .isIntersectionLine() has been renamed to .intersectsLine().");return this.intersectsLine(a)};ja.prototype.multiplyVector3=
+function(a){console.warn("THREE.Quaternion: .multiplyVector3() has been removed. Use is now vector.applyQuaternion( quaternion ) instead.");return a.applyQuaternion(this)};Object.assign(rb.prototype,{isIntersectionBox:function(a){console.warn("THREE.Ray: .isIntersectionBox() has been renamed to .intersectsBox().");return this.intersectsBox(a)},isIntersectionPlane:function(a){console.warn("THREE.Ray: .isIntersectionPlane() has been renamed to .intersectsPlane().");return this.intersectsPlane(a)},isIntersectionSphere:function(a){console.warn("THREE.Ray: .isIntersectionSphere() has been renamed to .intersectsSphere().");
+return this.intersectsSphere(a)}});Object.assign(ha.prototype,{area:function(){console.warn("THREE.Triangle: .area() has been renamed to .getArea().");return this.getArea()},barycoordFromPoint:function(a,b){console.warn("THREE.Triangle: .barycoordFromPoint() has been renamed to .getBarycoord().");return this.getBarycoord(a,b)},midpoint:function(a){console.warn("THREE.Triangle: .midpoint() has been renamed to .getMidpoint().");return this.getMidpoint(a)},normal:function(a){console.warn("THREE.Triangle: .normal() has been renamed to .getNormal().");
+return this.getNormal(a)},plane:function(a){console.warn("THREE.Triangle: .plane() has been renamed to .getPlane().");return this.getPlane(a)}});Object.assign(ha,{barycoordFromPoint:function(a,b,c,d,e){console.warn("THREE.Triangle: .barycoordFromPoint() has been renamed to .getBarycoord().");return ha.getBarycoord(a,b,c,d,e)},normal:function(a,b,c,d){console.warn("THREE.Triangle: .normal() has been renamed to .getNormal().");return ha.getNormal(a,b,c,d)}});Object.assign(ib.prototype,{extractAllPoints:function(a){console.warn("THREE.Shape: .extractAllPoints() has been removed. Use .extractPoints() instead.");
+return this.extractPoints(a)},extrude:function(a){console.warn("THREE.Shape: .extrude() has been removed. Use ExtrudeGeometry() instead.");return new vb(this,a)},makeGeometry:function(a){console.warn("THREE.Shape: .makeGeometry() has been removed. Use ShapeGeometry() instead.");return new xb(this,a)}});Object.assign(z.prototype,{fromAttribute:function(a,b,c){console.warn("THREE.Vector2: .fromAttribute() has been renamed to .fromBufferAttribute().");return this.fromBufferAttribute(a,b,c)},distanceToManhattan:function(a){console.warn("THREE.Vector2: .distanceToManhattan() has been renamed to .manhattanDistanceTo().");
+return this.manhattanDistanceTo(a)},lengthManhattan:function(){console.warn("THREE.Vector2: .lengthManhattan() has been renamed to .manhattanLength().");return this.manhattanLength()}});Object.assign(p.prototype,{setEulerFromRotationMatrix:function(){console.error("THREE.Vector3: .setEulerFromRotationMatrix() has been removed. Use Euler.setFromRotationMatrix() instead.")},setEulerFromQuaternion:function(){console.error("THREE.Vector3: .setEulerFromQuaternion() has been removed. Use Euler.setFromQuaternion() instead.")},
+getPositionFromMatrix:function(a){console.warn("THREE.Vector3: .getPositionFromMatrix() has been renamed to .setFromMatrixPosition().");return this.setFromMatrixPosition(a)},getScaleFromMatrix:function(a){console.warn("THREE.Vector3: .getScaleFromMatrix() has been renamed to .setFromMatrixScale().");return this.setFromMatrixScale(a)},getColumnFromMatrix:function(a,b){console.warn("THREE.Vector3: .getColumnFromMatrix() has been renamed to .setFromMatrixColumn().");return this.setFromMatrixColumn(b,
+a)},applyProjection:function(a){console.warn("THREE.Vector3: .applyProjection() has been removed. Use .applyMatrix4( m ) instead.");return this.applyMatrix4(a)},fromAttribute:function(a,b,c){console.warn("THREE.Vector3: .fromAttribute() has been renamed to .fromBufferAttribute().");return this.fromBufferAttribute(a,b,c)},distanceToManhattan:function(a){console.warn("THREE.Vector3: .distanceToManhattan() has been renamed to .manhattanDistanceTo().");return this.manhattanDistanceTo(a)},lengthManhattan:function(){console.warn("THREE.Vector3: .lengthManhattan() has been renamed to .manhattanLength().");
+return this.manhattanLength()}});Object.assign(Z.prototype,{fromAttribute:function(a,b,c){console.warn("THREE.Vector4: .fromAttribute() has been renamed to .fromBufferAttribute().");return this.fromBufferAttribute(a,b,c)},lengthManhattan:function(){console.warn("THREE.Vector4: .lengthManhattan() has been renamed to .manhattanLength().");return this.manhattanLength()}});Object.assign(I.prototype,{computeTangents:function(){console.error("THREE.Geometry: .computeTangents() has been removed.")},computeLineDistances:function(){console.error("THREE.Geometry: .computeLineDistances() has been removed. Use THREE.Line.computeLineDistances() instead.")}});
+Object.assign(D.prototype,{getChildByName:function(a){console.warn("THREE.Object3D: .getChildByName() has been renamed to .getObjectByName().");return this.getObjectByName(a)},renderDepth:function(){console.warn("THREE.Object3D: .renderDepth has been removed. Use .renderOrder, instead.")},translate:function(a,b){console.warn("THREE.Object3D: .translate() has been removed. Use .translateOnAxis( axis, distance ) instead.");return this.translateOnAxis(b,a)},getWorldRotation:function(){console.error("THREE.Object3D: .getWorldRotation() has been removed. Use THREE.Object3D.getWorldQuaternion( target ) instead.")}});
+Object.defineProperties(D.prototype,{eulerOrder:{get:function(){console.warn("THREE.Object3D: .eulerOrder is now .rotation.order.");return this.rotation.order},set:function(a){console.warn("THREE.Object3D: .eulerOrder is now .rotation.order.");this.rotation.order=a}},useQuaternion:{get:function(){console.warn("THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.")},set:function(){console.warn("THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.")}}});
+Object.defineProperties(Fc.prototype,{objects:{get:function(){console.warn("THREE.LOD: .objects has been renamed to .levels.");return this.levels}}});Object.defineProperty(Gc.prototype,"useVertexTexture",{get:function(){console.warn("THREE.Skeleton: useVertexTexture has been removed.")},set:function(){console.warn("THREE.Skeleton: useVertexTexture has been removed.")}});Object.defineProperty(Q.prototype,"__arcLengthDivisions",{get:function(){console.warn("THREE.Curve: .__arcLengthDivisions is now .arcLengthDivisions.");
+return this.arcLengthDivisions},set:function(a){console.warn("THREE.Curve: .__arcLengthDivisions is now .arcLengthDivisions.");this.arcLengthDivisions=a}});V.prototype.setLens=function(a,b){console.warn("THREE.PerspectiveCamera.setLens is deprecated. Use .setFocalLength and .filmGauge for a photographic setup.");void 0!==b&&(this.filmGauge=b);this.setFocalLength(a)};Object.defineProperties(ca.prototype,{onlyShadow:{set:function(){console.warn("THREE.Light: .onlyShadow has been removed.")}},shadowCameraFov:{set:function(a){console.warn("THREE.Light: .shadowCameraFov is now .shadow.camera.fov.");
+this.shadow.camera.fov=a}},shadowCameraLeft:{set:function(a){console.warn("THREE.Light: .shadowCameraLeft is now .shadow.camera.left.");this.shadow.camera.left=a}},shadowCameraRight:{set:function(a){console.warn("THREE.Light: .shadowCameraRight is now .shadow.camera.right.");this.shadow.camera.right=a}},shadowCameraTop:{set:function(a){console.warn("THREE.Light: .shadowCameraTop is now .shadow.camera.top.");this.shadow.camera.top=a}},shadowCameraBottom:{set:function(a){console.warn("THREE.Light: .shadowCameraBottom is now .shadow.camera.bottom.");
+this.shadow.camera.bottom=a}},shadowCameraNear:{set:function(a){console.warn("THREE.Light: .shadowCameraNear is now .shadow.camera.near.");this.shadow.camera.near=a}},shadowCameraFar:{set:function(a){console.warn("THREE.Light: .shadowCameraFar is now .shadow.camera.far.");this.shadow.camera.far=a}},shadowCameraVisible:{set:function(){console.warn("THREE.Light: .shadowCameraVisible has been removed. Use new THREE.CameraHelper( light.shadow.camera ) instead.")}},shadowBias:{set:function(a){console.warn("THREE.Light: .shadowBias is now .shadow.bias.");
+this.shadow.bias=a}},shadowDarkness:{set:function(){console.warn("THREE.Light: .shadowDarkness has been removed.")}},shadowMapWidth:{set:function(a){console.warn("THREE.Light: .shadowMapWidth is now .shadow.mapSize.width.");this.shadow.mapSize.width=a}},shadowMapHeight:{set:function(a){console.warn("THREE.Light: .shadowMapHeight is now .shadow.mapSize.height.");this.shadow.mapSize.height=a}}});Object.defineProperties(F.prototype,{length:{get:function(){console.warn("THREE.BufferAttribute: .length has been deprecated. Use .count instead.");
+return this.array.length}},copyIndicesArray:function(){console.error("THREE.BufferAttribute: .copyIndicesArray() has been removed.")}});Object.assign(E.prototype,{addIndex:function(a){console.warn("THREE.BufferGeometry: .addIndex() has been renamed to .setIndex().");this.setIndex(a)},addDrawCall:function(a,b,c){void 0!==c&&console.warn("THREE.BufferGeometry: .addDrawCall() no longer supports indexOffset.");console.warn("THREE.BufferGeometry: .addDrawCall() is now .addGroup().");this.addGroup(a,b)},
+clearDrawCalls:function(){console.warn("THREE.BufferGeometry: .clearDrawCalls() is now .clearGroups().");this.clearGroups()},computeTangents:function(){console.warn("THREE.BufferGeometry: .computeTangents() has been removed.")},computeOffsets:function(){console.warn("THREE.BufferGeometry: .computeOffsets() has been removed.")}});Object.defineProperties(E.prototype,{drawcalls:{get:function(){console.error("THREE.BufferGeometry: .drawcalls has been renamed to .groups.");return this.groups}},offsets:{get:function(){console.warn("THREE.BufferGeometry: .offsets has been renamed to .groups.");
+return this.groups}}});Object.assign(Sa.prototype,{getArrays:function(){console.error("THREE.ExtrudeBufferGeometry: .getArrays() has been removed.")},addShapeList:function(){console.error("THREE.ExtrudeBufferGeometry: .addShapeList() has been removed.")},addShape:function(){console.error("THREE.ExtrudeBufferGeometry: .addShape() has been removed.")}});Object.defineProperties(Rd.prototype,{dynamic:{set:function(){console.warn("THREE.Uniform: .dynamic has been removed. Use object.onBeforeRender() instead.")}},
+onUpdate:{value:function(){console.warn("THREE.Uniform: .onUpdate() has been removed. Use object.onBeforeRender() instead.");return this}}});Object.defineProperties(L.prototype,{wrapAround:{get:function(){console.warn("THREE.Material: .wrapAround has been removed.")},set:function(){console.warn("THREE.Material: .wrapAround has been removed.")}},overdraw:{get:function(){console.warn("THREE.Material: .overdraw has been removed.")},set:function(){console.warn("THREE.Material: .overdraw has been removed.")}},
+wrapRGB:{get:function(){console.warn("THREE.Material: .wrapRGB has been removed.");return new G}},shading:{get:function(){console.error("THREE."+this.type+": .shading has been removed. Use the boolean .flatShading instead.")},set:function(a){console.warn("THREE."+this.type+": .shading has been removed. Use the boolean .flatShading instead.");this.flatShading=1===a}}});Object.defineProperties(Ia.prototype,{metal:{get:function(){console.warn("THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead.");
+return!1},set:function(){console.warn("THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead")}}});Object.defineProperties(ka.prototype,{derivatives:{get:function(){console.warn("THREE.ShaderMaterial: .derivatives has been moved to .extensions.derivatives.");return this.extensions.derivatives},set:function(a){console.warn("THREE. ShaderMaterial: .derivatives has been moved to .extensions.derivatives.");this.extensions.derivatives=a}}});Object.assign(ce.prototype,
+{clearTarget:function(a,b,c,d){console.warn("THREE.WebGLRenderer: .clearTarget() has been deprecated. Use .setRenderTarget() and .clear() instead.");this.setRenderTarget(a);this.clear(b,c,d)},animate:function(a){console.warn("THREE.WebGLRenderer: .animate() is now .setAnimationLoop().");this.setAnimationLoop(a)},getCurrentRenderTarget:function(){console.warn("THREE.WebGLRenderer: .getCurrentRenderTarget() is now .getRenderTarget().");return this.getRenderTarget()},getMaxAnisotropy:function(){console.warn("THREE.WebGLRenderer: .getMaxAnisotropy() is now .capabilities.getMaxAnisotropy().");
+return this.capabilities.getMaxAnisotropy()},getPrecision:function(){console.warn("THREE.WebGLRenderer: .getPrecision() is now .capabilities.precision.");return this.capabilities.precision},resetGLState:function(){console.warn("THREE.WebGLRenderer: .resetGLState() is now .state.reset().");return this.state.reset()},supportsFloatTextures:function(){console.warn("THREE.WebGLRenderer: .supportsFloatTextures() is now .extensions.get( 'OES_texture_float' ).");return this.extensions.get("OES_texture_float")},
+supportsHalfFloatTextures:function(){console.warn("THREE.WebGLRenderer: .supportsHalfFloatTextures() is now .extensions.get( 'OES_texture_half_float' ).");return this.extensions.get("OES_texture_half_float")},supportsStandardDerivatives:function(){console.warn("THREE.WebGLRenderer: .supportsStandardDerivatives() is now .extensions.get( 'OES_standard_derivatives' ).");return this.extensions.get("OES_standard_derivatives")},supportsCompressedTextureS3TC:function(){console.warn("THREE.WebGLRenderer: .supportsCompressedTextureS3TC() is now .extensions.get( 'WEBGL_compressed_texture_s3tc' ).");
+return this.extensions.get("WEBGL_compressed_texture_s3tc")},supportsCompressedTexturePVRTC:function(){console.warn("THREE.WebGLRenderer: .supportsCompressedTexturePVRTC() is now .extensions.get( 'WEBGL_compressed_texture_pvrtc' ).");return this.extensions.get("WEBGL_compressed_texture_pvrtc")},supportsBlendMinMax:function(){console.warn("THREE.WebGLRenderer: .supportsBlendMinMax() is now .extensions.get( 'EXT_blend_minmax' ).");return this.extensions.get("EXT_blend_minmax")},supportsVertexTextures:function(){console.warn("THREE.WebGLRenderer: .supportsVertexTextures() is now .capabilities.vertexTextures.");
+return this.capabilities.vertexTextures},supportsInstancedArrays:function(){console.warn("THREE.WebGLRenderer: .supportsInstancedArrays() is now .extensions.get( 'ANGLE_instanced_arrays' ).");return this.extensions.get("ANGLE_instanced_arrays")},enableScissorTest:function(a){console.warn("THREE.WebGLRenderer: .enableScissorTest() is now .setScissorTest().");this.setScissorTest(a)},initMaterial:function(){console.warn("THREE.WebGLRenderer: .initMaterial() has been removed.")},addPrePlugin:function(){console.warn("THREE.WebGLRenderer: .addPrePlugin() has been removed.")},
+addPostPlugin:function(){console.warn("THREE.WebGLRenderer: .addPostPlugin() has been removed.")},updateShadowMap:function(){console.warn("THREE.WebGLRenderer: .updateShadowMap() has been removed.")},setFaceCulling:function(){console.warn("THREE.WebGLRenderer: .setFaceCulling() has been removed.")}});Object.defineProperties(ce.prototype,{shadowMapEnabled:{get:function(){return this.shadowMap.enabled},set:function(a){console.warn("THREE.WebGLRenderer: .shadowMapEnabled is now .shadowMap.enabled.");
+this.shadowMap.enabled=a}},shadowMapType:{get:function(){return this.shadowMap.type},set:function(a){console.warn("THREE.WebGLRenderer: .shadowMapType is now .shadowMap.type.");this.shadowMap.type=a}},shadowMapCullFace:{get:function(){console.warn("THREE.WebGLRenderer: .shadowMapCullFace has been removed. Set Material.shadowSide instead.")},set:function(){console.warn("THREE.WebGLRenderer: .shadowMapCullFace has been removed. Set Material.shadowSide instead.")}}});Object.defineProperties(cf.prototype,
+{cullFace:{get:function(){console.warn("THREE.WebGLRenderer: .shadowMap.cullFace has been removed. Set Material.shadowSide instead.")},set:function(){console.warn("THREE.WebGLRenderer: .shadowMap.cullFace has been removed. Set Material.shadowSide instead.")}},renderReverseSided:{get:function(){console.warn("THREE.WebGLRenderer: .shadowMap.renderReverseSided has been removed. Set Material.shadowSide instead.")},set:function(){console.warn("THREE.WebGLRenderer: .shadowMap.renderReverseSided has been removed. Set Material.shadowSide instead.")}},
+renderSingleSided:{get:function(){console.warn("THREE.WebGLRenderer: .shadowMap.renderSingleSided has been removed. Set Material.shadowSide instead.")},set:function(){console.warn("THREE.WebGLRenderer: .shadowMap.renderSingleSided has been removed. Set Material.shadowSide instead.")}}});Object.defineProperties(kb.prototype,{wrapS:{get:function(){console.warn("THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.");return this.texture.wrapS},set:function(a){console.warn("THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.");
+this.texture.wrapS=a}},wrapT:{get:function(){console.warn("THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.");return this.texture.wrapT},set:function(a){console.warn("THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.");this.texture.wrapT=a}},magFilter:{get:function(){console.warn("THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter.");return this.texture.magFilter},set:function(a){console.warn("THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter.");this.texture.magFilter=
+a}},minFilter:{get:function(){console.warn("THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.");return this.texture.minFilter},set:function(a){console.warn("THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.");this.texture.minFilter=a}},anisotropy:{get:function(){console.warn("THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy.");return this.texture.anisotropy},set:function(a){console.warn("THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy.");this.texture.anisotropy=
+a}},offset:{get:function(){console.warn("THREE.WebGLRenderTarget: .offset is now .texture.offset.");return this.texture.offset},set:function(a){console.warn("THREE.WebGLRenderTarget: .offset is now .texture.offset.");this.texture.offset=a}},repeat:{get:function(){console.warn("THREE.WebGLRenderTarget: .repeat is now .texture.repeat.");return this.texture.repeat},set:function(a){console.warn("THREE.WebGLRenderTarget: .repeat is now .texture.repeat.");this.texture.repeat=a}},format:{get:function(){console.warn("THREE.WebGLRenderTarget: .format is now .texture.format.");
+return this.texture.format},set:function(a){console.warn("THREE.WebGLRenderTarget: .format is now .texture.format.");this.texture.format=a}},type:{get:function(){console.warn("THREE.WebGLRenderTarget: .type is now .texture.type.");return this.texture.type},set:function(a){console.warn("THREE.WebGLRenderTarget: .type is now .texture.type.");this.texture.type=a}},generateMipmaps:{get:function(){console.warn("THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.");return this.texture.generateMipmaps},
+set:function(a){console.warn("THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.");this.texture.generateMipmaps=a}}});Object.defineProperties(hf.prototype,{standing:{set:function(){console.warn("THREE.WebVRManager: .standing has been removed.")}},userHeight:{set:function(){console.warn("THREE.WebVRManager: .userHeight has been removed.")}}});lc.prototype.load=function(a){console.warn("THREE.Audio: .load has been deprecated. Use THREE.AudioLoader instead.");var b=this;(new pe).load(a,
+function(a){b.setBuffer(a)});return this};ue.prototype.getData=function(){console.warn("THREE.AudioAnalyser: .getData() is now .getFrequencyData().");return this.getFrequencyData()};id.prototype.updateCubeMap=function(a,b){console.warn("THREE.CubeCamera: .updateCubeMap() is now .update().");return this.update(a,b)};jb.crossOrigin=void 0;jb.loadTexture=function(a,b,c,d){console.warn("THREE.ImageUtils.loadTexture has been deprecated. Use THREE.TextureLoader() instead.");var e=new Gd;e.setCrossOrigin(this.crossOrigin);
+a=e.load(a,c,void 0,d);b&&(a.mapping=b);return a};jb.loadTextureCube=function(a,b,c,d){console.warn("THREE.ImageUtils.loadTextureCube has been deprecated. Use THREE.CubeTextureLoader() instead.");var e=new ie;e.setCrossOrigin(this.crossOrigin);a=e.load(a,c,void 0,d);b&&(a.mapping=b);return a};jb.loadCompressedTexture=function(){console.error("THREE.ImageUtils.loadCompressedTexture has been removed. Use THREE.DDSLoader instead.")};jb.loadCompressedTextureCube=function(){console.error("THREE.ImageUtils.loadCompressedTextureCube has been removed. Use THREE.DDSLoader instead.")};
+l.WebGLRenderTargetCube=Jb;l.WebGLRenderTarget=kb;l.WebGLRenderer=ce;l.ShaderLib=Qa;l.UniformsLib=J;l.UniformsUtils=va;l.ShaderChunk=K;l.FogExp2=Pb;l.Fog=Qb;l.Scene=vd;l.Sprite=Ec;l.LOD=Fc;l.SkinnedMesh=xd;l.Skeleton=Gc;l.Bone=wd;l.Mesh=pa;l.LineSegments=S;l.LineLoop=yd;l.Line=ma;l.Points=Sb;l.Group=Ob;l.VideoTexture=de;l.DataTexture=lb;l.DataTexture3D=Mb;l.CompressedTexture=Tb;l.CubeTexture=Ya;l.CanvasTexture=Hc;l.DepthTexture=Ic;l.Texture=W;l.AnimationLoader=rf;l.CompressedTextureLoader=sf;l.DataTextureLoader=
+he;l.CubeTextureLoader=ie;l.TextureLoader=Gd;l.ObjectLoader=le;l.MaterialLoader=Pd;l.BufferGeometryLoader=ke;l.DefaultLoadingManager=ta;l.LoadingManager=ge;l.JSONLoader=Qd;l.ImageLoader=ed;l.ImageBitmapLoader=me;l.FontLoader=uf;l.FileLoader=Fa;l.Loader=kc;l.LoaderUtils=Vd;l.Cache=Ib;l.AudioLoader=pe;l.SpotLightShadow=Id;l.SpotLight=Jd;l.PointLight=Kd;l.RectAreaLight=Od;l.HemisphereLight=Hd;l.DirectionalLightShadow=Ld;l.DirectionalLight=Md;l.AmbientLight=Nd;l.LightShadow=Hb;l.Light=ca;l.StereoCamera=
+vf;l.PerspectiveCamera=V;l.OrthographicCamera=hd;l.CubeCamera=id;l.ArrayCamera=Cc;l.Camera=Ra;l.AudioListener=re;l.PositionalAudio=te;l.AudioContext=se;l.AudioAnalyser=ue;l.Audio=lc;l.VectorKeyframeTrack=ic;l.StringKeyframeTrack=Fd;l.QuaternionKeyframeTrack=dd;l.NumberKeyframeTrack=hc;l.ColorKeyframeTrack=Dd;l.BooleanKeyframeTrack=Cd;l.PropertyMixer=ve;l.PropertyBinding=oa;l.KeyframeTrack=qa;l.AnimationUtils=ra;l.AnimationObjectGroup=xf;l.AnimationMixer=we;l.AnimationClip=za;l.Uniform=Rd;l.InstancedBufferGeometry=
+xe;l.BufferGeometry=E;l.Geometry=I;l.InterleavedBufferAttribute=Dc;l.InstancedInterleavedBuffer=ye;l.InterleavedBuffer=sb;l.InstancedBufferAttribute=ze;l.Face3=Xa;l.Object3D=D;l.Raycaster=zf;l.Layers=Yd;l.EventDispatcher=ia;l.Clock=qe;l.QuaternionLinearInterpolant=Ed;l.LinearInterpolant=cd;l.DiscreteInterpolant=Bd;l.CubicInterpolant=Ad;l.Interpolant=Ca;l.Triangle=ha;l.Math=R;l.Spherical=Bf;l.Cylindrical=Cf;l.Plane=Pa;l.Frustum=rd;l.Sphere=Ga;l.Ray=rb;l.Matrix4=P;l.Matrix3=da;l.Box3=Wa;l.Box2=Be;l.Line3=
+Ce;l.Euler=mb;l.Vector4=Z;l.Vector3=p;l.Vector2=z;l.Quaternion=ja;l.Color=G;l.ImmediateRenderObject=jd;l.VertexNormalsHelper=kd;l.SpotLightHelper=mc;l.SkeletonHelper=nc;l.PointLightHelper=oc;l.RectAreaLightHelper=pc;l.HemisphereLightHelper=qc;l.GridHelper=ld;l.PolarGridHelper=Sd;l.FaceNormalsHelper=md;l.DirectionalLightHelper=rc;l.CameraHelper=nd;l.BoxHelper=bb;l.Box3Helper=od;l.PlaneHelper=pd;l.ArrowHelper=cb;l.AxesHelper=qd;l.Shape=ib;l.Path=Na;l.ShapePath=ne;l.Font=oe;l.CurvePath=ab;l.Curve=Q;
+l.ImageUtils=jb;l.ShapeUtils=Za;l.WebGLUtils=df;l.WireframeGeometry=Ub;l.ParametricGeometry=Jc;l.ParametricBufferGeometry=Vb;l.TetrahedronGeometry=Lc;l.TetrahedronBufferGeometry=Wb;l.OctahedronGeometry=Mc;l.OctahedronBufferGeometry=tb;l.IcosahedronGeometry=Nc;l.IcosahedronBufferGeometry=Xb;l.DodecahedronGeometry=Oc;l.DodecahedronBufferGeometry=Yb;l.PolyhedronGeometry=Kc;l.PolyhedronBufferGeometry=ya;l.TubeGeometry=Pc;l.TubeBufferGeometry=Zb;l.TorusKnotGeometry=Qc;l.TorusKnotBufferGeometry=$b;l.TorusGeometry=
+Rc;l.TorusBufferGeometry=ac;l.TextGeometry=Wc;l.TextBufferGeometry=bc;l.SphereGeometry=Xc;l.SphereBufferGeometry=wb;l.RingGeometry=Yc;l.RingBufferGeometry=cc;l.PlaneGeometry=yc;l.PlaneBufferGeometry=qb;l.LatheGeometry=Zc;l.LatheBufferGeometry=dc;l.ShapeGeometry=xb;l.ShapeBufferGeometry=yb;l.ExtrudeGeometry=vb;l.ExtrudeBufferGeometry=Sa;l.EdgesGeometry=ec;l.ConeGeometry=$c;l.ConeBufferGeometry=ad;l.CylinderGeometry=zb;l.CylinderBufferGeometry=$a;l.CircleGeometry=bd;l.CircleBufferGeometry=fc;l.BoxGeometry=
+Kb;l.BoxBufferGeometry=pb;l.ShadowMaterial=Ab;l.SpriteMaterial=hb;l.RawShaderMaterial=gc;l.ShaderMaterial=ka;l.PointsMaterial=Ha;l.MeshPhysicalMaterial=Bb;l.MeshStandardMaterial=Ta;l.MeshPhongMaterial=Ia;l.MeshToonMaterial=Cb;l.MeshNormalMaterial=Db;l.MeshLambertMaterial=Eb;l.MeshDepthMaterial=eb;l.MeshDistanceMaterial=fb;l.MeshBasicMaterial=Ea;l.MeshMatcapMaterial=Fb;l.LineDashedMaterial=Gb;l.LineBasicMaterial=T;l.Material=L;l.Float64BufferAttribute=xc;l.Float32BufferAttribute=C;l.Uint32BufferAttribute=
+ob;l.Int32BufferAttribute=wc;l.Uint16BufferAttribute=nb;l.Int16BufferAttribute=vc;l.Uint8ClampedBufferAttribute=uc;l.Uint8BufferAttribute=tc;l.Int8BufferAttribute=sc;l.BufferAttribute=F;l.ArcCurve=jc;l.CatmullRomCurve3=ua;l.CubicBezierCurve=Ja;l.CubicBezierCurve3=Ua;l.EllipseCurve=wa;l.LineCurve=Aa;l.LineCurve3=Ka;l.QuadraticBezierCurve=La;l.QuadraticBezierCurve3=Va;l.SplineCurve=Ma;l.REVISION="98";l.MOUSE={LEFT:0,MIDDLE:1,RIGHT:2};l.CullFaceNone=0;l.CullFaceBack=1;l.CullFaceFront=2;l.CullFaceFrontBack=
+3;l.FrontFaceDirectionCW=0;l.FrontFaceDirectionCCW=1;l.BasicShadowMap=0;l.PCFShadowMap=1;l.PCFSoftShadowMap=2;l.FrontSide=0;l.BackSide=1;l.DoubleSide=2;l.FlatShading=1;l.SmoothShading=2;l.NoColors=0;l.FaceColors=1;l.VertexColors=2;l.NoBlending=0;l.NormalBlending=1;l.AdditiveBlending=2;l.SubtractiveBlending=3;l.MultiplyBlending=4;l.CustomBlending=5;l.AddEquation=100;l.SubtractEquation=101;l.ReverseSubtractEquation=102;l.MinEquation=103;l.MaxEquation=104;l.ZeroFactor=200;l.OneFactor=201;l.SrcColorFactor=
+202;l.OneMinusSrcColorFactor=203;l.SrcAlphaFactor=204;l.OneMinusSrcAlphaFactor=205;l.DstAlphaFactor=206;l.OneMinusDstAlphaFactor=207;l.DstColorFactor=208;l.OneMinusDstColorFactor=209;l.SrcAlphaSaturateFactor=210;l.NeverDepth=0;l.AlwaysDepth=1;l.LessDepth=2;l.LessEqualDepth=3;l.EqualDepth=4;l.GreaterEqualDepth=5;l.GreaterDepth=6;l.NotEqualDepth=7;l.MultiplyOperation=0;l.MixOperation=1;l.AddOperation=2;l.NoToneMapping=0;l.LinearToneMapping=1;l.ReinhardToneMapping=2;l.Uncharted2ToneMapping=3;l.CineonToneMapping=
+4;l.UVMapping=300;l.CubeReflectionMapping=301;l.CubeRefractionMapping=302;l.EquirectangularReflectionMapping=303;l.EquirectangularRefractionMapping=304;l.SphericalReflectionMapping=305;l.CubeUVReflectionMapping=306;l.CubeUVRefractionMapping=307;l.RepeatWrapping=1E3;l.ClampToEdgeWrapping=1001;l.MirroredRepeatWrapping=1002;l.NearestFilter=1003;l.NearestMipMapNearestFilter=1004;l.NearestMipMapLinearFilter=1005;l.LinearFilter=1006;l.LinearMipMapNearestFilter=1007;l.LinearMipMapLinearFilter=1008;l.UnsignedByteType=
+1009;l.ByteType=1010;l.ShortType=1011;l.UnsignedShortType=1012;l.IntType=1013;l.UnsignedIntType=1014;l.FloatType=1015;l.HalfFloatType=1016;l.UnsignedShort4444Type=1017;l.UnsignedShort5551Type=1018;l.UnsignedShort565Type=1019;l.UnsignedInt248Type=1020;l.AlphaFormat=1021;l.RGBFormat=1022;l.RGBAFormat=1023;l.LuminanceFormat=1024;l.LuminanceAlphaFormat=1025;l.RGBEFormat=1023;l.DepthFormat=1026;l.DepthStencilFormat=1027;l.RedFormat=1028;l.RGB_S3TC_DXT1_Format=33776;l.RGBA_S3TC_DXT1_Format=33777;l.RGBA_S3TC_DXT3_Format=
+33778;l.RGBA_S3TC_DXT5_Format=33779;l.RGB_PVRTC_4BPPV1_Format=35840;l.RGB_PVRTC_2BPPV1_Format=35841;l.RGBA_PVRTC_4BPPV1_Format=35842;l.RGBA_PVRTC_2BPPV1_Format=35843;l.RGB_ETC1_Format=36196;l.RGBA_ASTC_4x4_Format=37808;l.RGBA_ASTC_5x4_Format=37809;l.RGBA_ASTC_5x5_Format=37810;l.RGBA_ASTC_6x5_Format=37811;l.RGBA_ASTC_6x6_Format=37812;l.RGBA_ASTC_8x5_Format=37813;l.RGBA_ASTC_8x6_Format=37814;l.RGBA_ASTC_8x8_Format=37815;l.RGBA_ASTC_10x5_Format=37816;l.RGBA_ASTC_10x6_Format=37817;l.RGBA_ASTC_10x8_Format=
+37818;l.RGBA_ASTC_10x10_Format=37819;l.RGBA_ASTC_12x10_Format=37820;l.RGBA_ASTC_12x12_Format=37821;l.LoopOnce=2200;l.LoopRepeat=2201;l.LoopPingPong=2202;l.InterpolateDiscrete=2300;l.InterpolateLinear=2301;l.InterpolateSmooth=2302;l.ZeroCurvatureEnding=2400;l.ZeroSlopeEnding=2401;l.WrapAroundEnding=2402;l.TrianglesDrawMode=0;l.TriangleStripDrawMode=1;l.TriangleFanDrawMode=2;l.LinearEncoding=3E3;l.sRGBEncoding=3001;l.GammaEncoding=3007;l.RGBEEncoding=3002;l.LogLuvEncoding=3003;l.RGBM7Encoding=3004;
+l.RGBM16Encoding=3005;l.RGBDEncoding=3006;l.BasicDepthPacking=3200;l.RGBADepthPacking=3201;l.TangentSpaceNormalMap=0;l.ObjectSpaceNormalMap=1;l.CubeGeometry=Kb;l.Face4=function(a,b,c,d,e,f,g){console.warn("THREE.Face4 has been removed. A THREE.Face3 will be created instead.");return new Xa(a,b,c,e,f,g)};l.LineStrip=0;l.LinePieces=1;l.MeshFaceMaterial=function(a){console.warn("THREE.MeshFaceMaterial has been removed. Use an Array instead.");return a};l.MultiMaterial=function(a){void 0===a&&(a=[]);
+console.warn("THREE.MultiMaterial has been removed. Use an Array instead.");a.isMultiMaterial=!0;a.materials=a;a.clone=function(){return a.slice()};return a};l.PointCloud=function(a,b){console.warn("THREE.PointCloud has been renamed to THREE.Points.");return new Sb(a,b)};l.Particle=function(a){console.warn("THREE.Particle has been renamed to THREE.Sprite.");return new Ec(a)};l.ParticleSystem=function(a,b){console.warn("THREE.ParticleSystem has been renamed to THREE.Points.");return new Sb(a,b)};l.PointCloudMaterial=
+function(a){console.warn("THREE.PointCloudMaterial has been renamed to THREE.PointsMaterial.");return new Ha(a)};l.ParticleBasicMaterial=function(a){console.warn("THREE.ParticleBasicMaterial has been renamed to THREE.PointsMaterial.");return new Ha(a)};l.ParticleSystemMaterial=function(a){console.warn("THREE.ParticleSystemMaterial has been renamed to THREE.PointsMaterial.");return new Ha(a)};l.Vertex=function(a,b,c){console.warn("THREE.Vertex has been removed. Use THREE.Vector3 instead.");return new p(a,
+b,c)};l.DynamicBufferAttribute=function(a,b){console.warn("THREE.DynamicBufferAttribute has been removed. Use new THREE.BufferAttribute().setDynamic( true ) instead.");return(new F(a,b)).setDynamic(!0)};l.Int8Attribute=function(a,b){console.warn("THREE.Int8Attribute has been removed. Use new THREE.Int8BufferAttribute() instead.");return new sc(a,b)};l.Uint8Attribute=function(a,b){console.warn("THREE.Uint8Attribute has been removed. Use new THREE.Uint8BufferAttribute() instead.");return new tc(a,b)};
+l.Uint8ClampedAttribute=function(a,b){console.warn("THREE.Uint8ClampedAttribute has been removed. Use new THREE.Uint8ClampedBufferAttribute() instead.");return new uc(a,b)};l.Int16Attribute=function(a,b){console.warn("THREE.Int16Attribute has been removed. Use new THREE.Int16BufferAttribute() instead.");return new vc(a,b)};l.Uint16Attribute=function(a,b){console.warn("THREE.Uint16Attribute has been removed. Use new THREE.Uint16BufferAttribute() instead.");return new nb(a,b)};l.Int32Attribute=function(a,
+b){console.warn("THREE.Int32Attribute has been removed. Use new THREE.Int32BufferAttribute() instead.");return new wc(a,b)};l.Uint32Attribute=function(a,b){console.warn("THREE.Uint32Attribute has been removed. Use new THREE.Uint32BufferAttribute() instead.");return new ob(a,b)};l.Float32Attribute=function(a,b){console.warn("THREE.Float32Attribute has been removed. Use new THREE.Float32BufferAttribute() instead.");return new C(a,b)};l.Float64Attribute=function(a,b){console.warn("THREE.Float64Attribute has been removed. Use new THREE.Float64BufferAttribute() instead.");
+return new xc(a,b)};l.ClosedSplineCurve3=Ef;l.SplineCurve3=Ff;l.Spline=Ee;l.AxisHelper=function(a){console.warn("THREE.AxisHelper has been renamed to THREE.AxesHelper.");return new qd(a)};l.BoundingBoxHelper=function(a,b){console.warn("THREE.BoundingBoxHelper has been deprecated. Creating a THREE.BoxHelper instead.");return new bb(a,b)};l.EdgesHelper=function(a,b){console.warn("THREE.EdgesHelper has been removed. Use THREE.EdgesGeometry instead.");return new S(new ec(a.geometry),new T({color:void 0!==
+b?b:16777215}))};l.WireframeHelper=function(a,b){console.warn("THREE.WireframeHelper has been removed. Use THREE.WireframeGeometry instead.");return new S(new Ub(a.geometry),new T({color:void 0!==b?b:16777215}))};l.XHRLoader=function(a){console.warn("THREE.XHRLoader has been renamed to THREE.FileLoader.");return new Fa(a)};l.BinaryTextureLoader=function(a){console.warn("THREE.BinaryTextureLoader has been renamed to THREE.DataTextureLoader.");return new he(a)};l.GeometryUtils={merge:function(a,b,c){console.warn("THREE.GeometryUtils: .merge() has been moved to Geometry. Use geometry.merge( geometry2, matrix, materialIndexOffset ) instead.");
+if(b.isMesh){b.matrixAutoUpdate&&b.updateMatrix();var d=b.matrix;b=b.geometry}a.merge(b,d,c)},center:function(a){console.warn("THREE.GeometryUtils: .center() has been moved to Geometry. Use geometry.center() instead.");return a.center()}};l.Projector=function(){console.error("THREE.Projector has been moved to /examples/js/renderers/Projector.js.");this.projectVector=function(a,b){console.warn("THREE.Projector: .projectVector() is now vector.project().");a.project(b)};this.unprojectVector=function(a,
+b){console.warn("THREE.Projector: .unprojectVector() is now vector.unproject().");a.unproject(b)};this.pickingRay=function(){console.error("THREE.Projector: .pickingRay() is now raycaster.setFromCamera().")}};l.CanvasRenderer=function(){console.error("THREE.CanvasRenderer has been removed")};l.SceneUtils={createMultiMaterialObject:function(){console.error("THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js")},detach:function(){console.error("THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js")},
+attach:function(){console.error("THREE.SceneUtils has been moved to /examples/js/utils/SceneUtils.js")}};l.LensFlare=function(){console.error("THREE.LensFlare has been moved to /examples/js/objects/Lensflare.js")};Object.defineProperty(l,"__esModule",{value:!0})});
diff --git a/site/assets/test/face.html b/site/assets/test/face.html
new file mode 100644
index 00000000..598a5891
--- /dev/null
+++ b/site/assets/test/face.html
@@ -0,0 +1,227 @@
+<!doctype html>
+<html lang="en">
+<head>
+<title>face points</title>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
+<style>
+html, body { margin: 0; padding: 0; font-family: sans-serif; color: white; }
+.currentFace { position: absolute; bottom: 10px; right: 10px; }
+</style>
+</head>
+<body>
+<div id="container"></div>
+<div class="currentFace"></div>
+<script src="../js/vendor/three.min.js"></script>
+<script src="../js/vendor/three.meshline.js"></script>
+<script>
+var container
+var camera, controls, scene, renderer
+var mouse = new THREE.Vector2(0.5, 0.5)
+var mouseTarget = new THREE.Vector2(0.5, 0.5)
+var POINT_SCALE = 1.8
+var FACE_POINT_COUNT = 68
+var SWAP_TIME = 500
+var cubes = [], meshes = []
+var faceBuffer = makeFaceBuffer()
+function makeFaceBuffer() {
+ var a = new Array(FACE_POINT_COUNT)
+ for (let i = 0; i < FACE_POINT_COUNT; i++) {
+ a[i] = new THREE.Vector3()
+ }
+ return a
+}
+var last_t = 0, start_t = 0
+var colors = [
+ 0xff3333,
+ 0xff8833,
+ 0xffff33,
+ 0x338833,
+ 0x3388ff,
+ 0x3333ff,
+ 0x8833ff,
+ 0xff3388,
+ 0xffffff,
+]
+var swapping = false, swap_count = 0, swapFrom, swapTo, face_names, faces
+init()
+
+function choice(a) { return a[Math.floor(Math.random()*a.length)]}
+function init() {
+ fetch("/assets/data/3dlm_0_10.json")
+ .then(req => req.json())
+ .then(data => {
+ face_names = Object.keys(data)
+ faces = face_names.map(name => recenter(data[name]))
+ setup()
+ build(faces[0])
+ updateFace(faces[0])
+ document.querySelector('.currentFace').innerHTML = face_names[0]
+ swapTo = faces[0]
+ animate()
+ })
+}
+function setup() {
+ container = document.getElementById("container")
+ camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 1, 10000)
+ camera.position.x = 0
+ camera.position.y = 0
+ camera.position.z = 300
+
+ scene = new THREE.Scene()
+ scene.background = new THREE.Color(0x000000)
+
+ renderer = new THREE.WebGLRenderer({ antialias: true })
+ renderer.setPixelRatio(window.devicePixelRatio)
+ renderer.setSize(window.innerWidth, window.innerHeight)
+ container.appendChild(renderer.domElement)
+ renderer.domElement.addEventListener('mousemove', onMouseMove)
+ renderer.domElement.addEventListener('mousedown', swap)
+}
+function build(points) {
+ var matrix = new THREE.Matrix4()
+ var quaternion = new THREE.Quaternion()
+
+ for (var i = 0; i < FACE_POINT_COUNT; i++) {
+ var p = points[i]
+ var geometry = new THREE.BoxBufferGeometry()
+ var position = new THREE.Vector3(p[0], p[1], p[2])
+ var rotation = new THREE.Euler()
+ var scale = new THREE.Vector3()
+ var color = new THREE.Color()
+ scale.x = scale.y = scale.z = POINT_SCALE
+ quaternion.setFromEuler(rotation, false)
+ matrix.compose(position, quaternion, scale)
+ geometry.applyMatrix(matrix)
+ material = new THREE.MeshBasicMaterial({color: color.setHex(0xffffff) })
+ cube = new THREE.Mesh(geometry, material)
+ scene.add(cube)
+ cubes.push(cube)
+ }
+
+ meshes = getLineGeometry(points).map((geometry, i) => {
+ var color = new THREE.Color()
+ var material = new MeshLineMaterial({
+ color: color.setHex(colors[i % colors.length]),
+ })
+ var line = new MeshLine()
+ line.setGeometry(geometry, _ => 1.5)
+ var mesh = new THREE.Mesh(line.geometry, material)
+ mesh.geometry.dynamic = true
+ scene.add(mesh)
+ return [line, mesh]
+ })
+}
+function lerpPoints(n, A, B, C) {
+ for (let i = 0, len = A.length; i < len; i++) {
+ lerpPoint(n, A[i], B[i], C[i])
+ }
+}
+function lerpPoint(n, A, B, C) {
+ C.x = lerp(n, A.x, B.x)
+ C.y = lerp(n, A.y, B.y)
+ C.z = lerp(n, A.z, B.z)
+}
+function lerp(n, a, b) {
+ return (b-a) * n + a
+}
+function swap(){
+ if (swapping) return
+ start_t = last_t
+ swapping = true
+ swap_count = (swap_count + 1) % faces.length
+ swapFrom = swapTo
+ swapTo = faces[swap_count]
+ document.querySelector('.currentFace').innerHTML = face_names[swap_count]
+}
+function update_swap(t){
+ var n = (t - start_t) / SWAP_TIME
+ if (n > 1) {
+ swapping = false
+ n = 1
+ }
+ lerpPoints(n, swapFrom, swapTo, faceBuffer)
+ updateFace(faceBuffer)
+}
+function updateFace(points) {
+ updateCubeGeometry(points)
+ updateLineGeometry(points)
+}
+function updateCubeGeometry(points) {
+ cubes.forEach((cube, i) => {
+ const p = points[i]
+ cube.position.set(p.x, p.y, p.z)
+ })
+}
+function updateLineGeometry(points) {
+ getLineGeometry(points).map((geometry, i) => {
+ var [line, mesh] = meshes[i]
+ line.setGeometry(geometry, _ => 1.5)
+ mesh.geometry.vertices = line.geometry.vertices
+ mesh.geometry.verticesNeedUpdate = true
+ })
+}
+function getLineGeometry(points) {
+ return [
+ points.slice(0, 17),
+ points.slice(17, 22),
+ points.slice(22, 27),
+ points.slice(27, 31),
+ points.slice(31, 36),
+ points.slice(36, 42),
+ points.slice(42, 48),
+ points.slice(48)
+ ].map((a, i) => {
+ var geometry = new THREE.Geometry()
+ a.forEach(p => geometry.vertices.push(p))
+ if (i > 4) {
+ geometry.vertices.push(a[0])
+ }
+ return geometry
+ })
+}
+function getBounds(obj) {
+ return obj.reduce((a, p) => {
+ return [
+ Math.min(a[0], p[0]),
+ Math.max(a[1], p[0]),
+ Math.min(a[2], p[1]),
+ Math.max(a[3], p[1]),
+ Math.min(a[4], p[2]),
+ Math.max(a[5], p[2]),
+ ]
+ }, [Infinity, -Infinity, Infinity, -Infinity, Infinity, -Infinity])
+}
+function recenter(obj) {
+ const bounds = getBounds(obj)
+ const x_width = (bounds[1] - bounds[0]) / 2
+ const y_width = (bounds[3] - bounds[2]) / -2
+ const z_width = (bounds[5] - bounds[4]) / 2
+ return obj.map(p => {
+ p[0] = p[0] - bounds[0] - x_width
+ p[1] = -p[1] + bounds[1] + y_width
+ p[2] = p[2] - bounds[2] + z_width
+ return new THREE.Vector3(p[0], p[1], p[2])
+ })
+}
+//
+function onMouseMove(e) {
+ mouse.x = e.clientX / window.innerWidth
+ mouse.y = e.clientY / window.innerHeight
+}
+function animate(t) {
+ requestAnimationFrame(animate)
+ if (swapping) update_swap(t)
+ renderer.render(scene, camera)
+ scene.rotation.y += 0.01 * Math.PI
+ mouseTarget.x += (mouse.x - mouseTarget.x) * 0.1
+ mouseTarget.y += (mouse.y - mouseTarget.y) * 0.1
+ scene.rotation.x = (mouseTarget.y - 0.5) * Math.PI / 2
+ // scene.rotation.y = (mouseTarget.x - 0.5) * Math.PI
+ scene.rotation.y += 0.01
+ last_t = t
+}
+</script>
+
+</body>
+</html> \ No newline at end of file
diff --git a/site/content b/site/content
new file mode 120000
index 00000000..80b3a367
--- /dev/null
+++ b/site/content
@@ -0,0 +1 @@
+/Users/user/Nextcloud/megapixels/website/pages/ \ No newline at end of file
diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html
new file mode 100644
index 00000000..65bc7ac4
--- /dev/null
+++ b/site/public/about/credits/index.html
@@ -0,0 +1,57 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Project Team Credits" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Credits</h1>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='alt text'><div class='caption'>alt text</div></div></section><section><ul>
+<li>MegaPixels by Adam Harvey</li>
+<li>Made with support from Mozilla</li>
+<li>Site developed by Jules Laplace</li>
+<li>Design and graphics: Adam Harvey</li>
+<li>Research assistants: Berit Gilma</li>
+</ul>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/disclaimer/index.html b/site/public/about/disclaimer/index.html
new file mode 100644
index 00000000..b0215bde
--- /dev/null
+++ b/site/public/about/disclaimer/index.html
@@ -0,0 +1,57 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Disclaimer" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Disclaimer</h1>
+<p>Last updated: December 04, 2018</p>
+<p>The information contained on MegaPixels.cc website (the "Service") is for academic and artistic purposes only.</p>
+<p>MegaPixels.cc assumes no responsibility for errors or omissions in the contents on the Service.</p>
+<p>In no event shall MegaPixels.cc be liable for any special, direct, indirect, consequential, or incidental damages or any damages whatsoever, whether in an action of contract, negligence or other tort, arising out of or in connection with the use of the Service or the contents of the Service. MegaPixels.cc reserves the right to make additions, deletions, or modification to the contents on the Service at any time without prior notice.</p>
+<h2>External links disclaimer</h2>
+<p>MegaPixels.cc website may contain links to external websites that are not provided or maintained by or in any way affiliated with MegaPixels.cc</p>
+<p>Please note that the MegaPixels.cc does not guarantee the accuracy, relevance, timeliness, or completeness of any information on these external websites.</p>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/index.html b/site/public/about/index.html
new file mode 100644
index 00000000..65bc7ac4
--- /dev/null
+++ b/site/public/about/index.html
@@ -0,0 +1,57 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Project Team Credits" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Credits</h1>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='alt text'><div class='caption'>alt text</div></div></section><section><ul>
+<li>MegaPixels by Adam Harvey</li>
+<li>Made with support from Mozilla</li>
+<li>Site developed by Jules Laplace</li>
+<li>Design and graphics: Adam Harvey</li>
+<li>Research assistants: Berit Gilma</li>
+</ul>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html
new file mode 100644
index 00000000..09c89165
--- /dev/null
+++ b/site/public/about/press/index.html
@@ -0,0 +1,55 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels in The News" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Press</h1>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='alt text'><div class='caption'>alt text</div></div></section><section><ul>
+<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li>
+<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li>
+<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li>
+</ul>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html
new file mode 100644
index 00000000..5675f072
--- /dev/null
+++ b/site/public/about/privacy/index.html
@@ -0,0 +1,134 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Privacy Policy" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Privacy Policy</h1>
+<p>A summary of our privacy policy is as follows:</p>
+<p>The MegaPixels site does not use any analytics programs or collect any data besides the necessary IP address of your connection, which are deleted every 30 days and used only for security and to prevent misuse.</p>
+<p>The image processing sections of the site do not collect any data whatsoever. All processing takes place in temporary memory (RAM) and then is displayed back to the user over a SSL secured HTTPS connection. It is the sole responsibility of the user whether they discard, by closing the page, or share their analyzed information and any potential consequences that may arise from doing so.</p>
+<hr>
+<p>A more complete legal version is below:</p>
+<p><strong>This is a boilerplate Privacy policy from <a href="https://termsfeed.com/">https://termsfeed.com/</a></strong></p>
+<p><strong>Needs to be reviewed</strong></p>
+<p>Effective date: December 04, 2018</p>
+<p>megapixels.cc ("us", "we", or "our") operates the WebsiteName website (hereinafter referred to as the "Service").</p>
+<p>This page informs you of our policies regarding the collection, use, and disclosure of personal data when you use our Service and the choices you have associated with that data.</p>
+<p>We use your data to provide and improve the Service. By using the Service, you agree to the collection and use of information in accordance with this policy. Unless otherwise defined in this Privacy Policy, the terms used in this Privacy Policy have the same meanings as in our Terms and Conditions, accessible from WebsiteName</p>
+<h2>Definitions</h2>
+<p><strong>Service</strong></p>
+<p>Service is the MegaPixels website operated by megapixels.cc</p>
+<p><strong>Personal Data</strong></p>
+<p>Personal Data means data about a living individual who can be identified from those data (or from those and other information either in our possession or likely to come into our possession).</p>
+<p><strong>Usage Data</strong></p>
+<p>Usage Data is data collected automatically either generated by the use of the Service or from the Service infrastructure itself</p>
+<h2>Information Collection and Use</h2>
+<p>We collect several different types of information for various purposes to provide and improve our Service to you.</p>
+<h3>Types of Data Collected</h3>
+<h4>Personal Data</h4>
+<p>While using our Service, we may ask you to provide us with certain personally identifiable information that can be used to contact or identify you ("Personal Data"). Personally identifiable information may include, but is not limited to:</p>
+<ul>
+<li>Cookies and Usage Data</li>
+</ul>
+<h4>Usage Data</h4>
+<p>We may also collect information how the Service is accessed and used ("Usage Data"). This Usage Data may include information such as your computer's Internet Protocol address (e.g. IP address), browser type, browser version, the pages of our Service that you visit, the time and date of your visit, the time spent on those pages, unique device identifiers and other diagnostic data.</p>
+<h4>Tracking &amp; Cookies Data</h4>
+<p>We use cookies and similar tracking technologies to track the activity on our Service and we hold certain information.
+Cookies are files with a small amount of data which may include an anonymous unique identifier. Cookies are sent to your browser from a website and stored on your device. Other tracking technologies are also used such as beacons, tags and scripts to collect and track information and to improve and analyse our Service.</p>
+<p>You can instruct your browser to refuse all cookies or to indicate when a cookie is being sent. However, if you do not accept cookies, you may not be able to use some portions of our Service.
+Examples of Cookies we use:</p>
+<ul>
+<li><strong>Session Cookies.</strong> We use Session Cookies to operate our Service.</li>
+<li><strong>Preference Cookies.</strong> We use Preference Cookies to remember your preferences and various settings.</li>
+<li><strong>Security Cookies.</strong> We use Security Cookies for security purposes.</li>
+</ul>
+<h2>Use of Data</h2>
+<p>megapixels.cc uses the collected data for various purposes:</p>
+<ul>
+<li>To provide and maintain the Service</li>
+<li>To notify you about changes to our Service</li>
+<li>To allow you to participate in interactive features of our Service when you choose to do so</li>
+<li>To provide customer care and support</li>
+<li>To provide analysis or valuable information so that we can improve the Service</li>
+<li>To monitor the usage of the Service</li>
+<li>To detect, prevent and address technical issues</li>
+</ul>
+<h2>Transfer Of Data</h2>
+<p>Your information, including Personal Data, may be transferred to — and maintained on — computers located outside of your state, province, country or other governmental jurisdiction where the data protection laws may differ than those from your jurisdiction.</p>
+<p>If you are located outside Germany and choose to provide information to us, please note that we transfer the data, including Personal Data, to Germany and process it there.
+Your consent to this Privacy Policy followed by your submission of such information represents your agreement to that transfer.
+megapixels.cc will take all steps reasonably necessary to ensure that your data is treated securely and in accordance with this Privacy Policy and no transfer of your Personal Data will take place to an organization or a country unless there are adequate controls in place including the security of your data and other personal information.</p>
+<h2>Disclosure Of Data</h2>
+<h3>Legal Requirements</h3>
+<p>megapixels.cc may disclose your Personal Data in the good faith belief that such action is necessary to:</p>
+<ul>
+<li>To comply with a legal obligation</li>
+<li>To protect and defend the rights or property of megapixels.cc</li>
+<li>To prevent or investigate possible wrongdoing in connection with the Service</li>
+<li>To protect the personal safety of users of the Service or the public</li>
+<li>To protect against legal liability</li>
+</ul>
+<h2>Security of Data</h2>
+<p>The security of your data is important to us but remember that no method of transmission over the Internet or method of electronic storage is 100% secure. While we strive to use commercially acceptable means to protect your Personal Data, we cannot guarantee its absolute security.</p>
+<h2>Service Providers</h2>
+<p>We may employ third party companies and individuals to facilitate our Service ("Service Providers"), to provide the Service on our behalf, to perform Service-related services or to assist us in analyzing how our Service is used.</p>
+<p>These third parties have access to your Personal Data only to perform these tasks on our behalf and are obligated not to disclose or use it for any other purpose.</p>
+<h2>Links to Other Sites</h2>
+<p>Our Service may contain links to other sites that are not operated by us. If you click a third party link, you will be directed to that third party's site. We strongly advise you to review the Privacy Policy of every site you visit.
+We have no control over and assume no responsibility for the content, privacy policies or practices of any third party sites or services.</p>
+<h2>Children's Privacy</h2>
+<p>Our Service does not address anyone under the age of 18 ("Children").</p>
+<p>We do not knowingly collect personally identifiable information from anyone under the age of 18. If you are a parent or guardian and you are aware that your Child has provided us with Personal Data, please contact us. If we become aware that we have collected Personal Data from children without verification of parental consent, we take steps to remove that information from our servers.</p>
+<h2>Changes to This Privacy Policy</h2>
+<p>We may update our Privacy Policy from time to time. We will notify you of any changes by posting the new Privacy Policy on this page.
+We will let you know via email and/or a prominent notice on our Service, prior to the change becoming effective and update the "effective date" at the top of this Privacy Policy.
+You are advised to review this Privacy Policy periodically for any changes. Changes to this Privacy Policy are effective when they are posted on this page.</p>
+<h2>Contact Us</h2>
+<p>If you have any questions about this Privacy Policy, please contact us:</p>
+<ul>
+<li>By visiting this page on our website: <a href="https://megapixels.cc/contact">https://megapixels.cc/contact</a></li>
+</ul>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/style/index.html b/site/public/about/style/index.html
new file mode 100644
index 00000000..f2c0d4b8
--- /dev/null
+++ b/site/public/about/style/index.html
@@ -0,0 +1,90 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Style" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Style Examples</h1>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='Alt text here'><div class='caption'>Alt text here</div></div></section><section><div class='meta'><div><div class='gray'>Date</div><div>17-Jan-2019</div></div><div><div class='gray'>Numbers</div><div>17</div></div><div><div class='gray'>Identities</div><div>12,139</div></div><div><div class='gray'>But also</div><div>This is a test of the stylesheet</div></div></div></section><section><h1>Header 1</h1>
+<h2>Header 2</h2>
+<h3>Header 3</h3>
+<h4>Header 4</h4>
+<h5>Header 5</h5>
+<h6>Header 6</h6>
+<p><strong>Bold text</strong>, <em>italic text</em>, <strong><em>bold italic text</em></strong></p>
+<p>At vero eos et et iusto qui blanditiis <a href="#">praesentium voluptatum</a> deleniti atque corrupti[^1], quos dolores et quas molestias excepturi sint, obcaecati cupiditate non-provident, similique sunt in culpa, qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio[^2]. Nam libero tempore, cum soluta nobis est eligendi optio, cumque nihil impedit, quo minus id, quod maxime placeat, facere possimus, omnis voluptas assumenda est, omnis dolor repellendus[^3].</p>
+<ul>
+<li>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium</li>
+<li>Totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo</li>
+<li>Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut</li>
+<li>Odit aut fugit, sed quia consequuntur magni dolores eos</li>
+<li>Qui ratione voluptatem sequi nesciunt, neque porro quisquam </li>
+</ul>
+<h2>single image test</h2>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='This person is alone'><div class='caption'>This person is alone</div></div></section><section><h2>double image test</h2>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='This person is on the left'><div class='caption'>This person is on the left</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='This person is on the right'><div class='caption'>This person is on the right</div></div></section><section><h2>triple image test</h2>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='Person 1'><div class='caption'>Person 1</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='Person 2'><div class='caption'>Person 2</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/man.jpg' alt='Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy'><div class='caption'>Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy</div></div></section><section><blockquote><p>est, qui dolorem ipsum, quia dolor sit amet consectetur adipisci[ng] velit, sed quia non-numquam [do] eius modi tempora inci[di]dunt, ut labore et dolore magnam aliquam quaerat voluptatem.</p>
+</blockquote>
+</section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/wide-test.jpg' alt='This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right&mdash;hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam'><div class='caption'>This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right&mdash;hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam</div></div></section><section><p>Inline <code>code</code> has <code>back-ticks around</code> it.</p>
+<pre><code class="lang-javascript">var s = &quot;JavaScript syntax highlighting&quot;;
+alert(s);
+</code></pre>
+<pre><code class="lang-python">s = &quot;Python syntax highlighting&quot;
+print(s)
+</code></pre>
+<pre><code>No language indicated, so no syntax highlighting.
+But let's throw in a &lt;b&gt;tag&lt;/b&gt;.
+</code></pre>
+<p>Horizontal rule</p>
+<hr>
+<p>Citations below here</p>
+<div class="footnotes">
+<hr>
+<ol></ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html
new file mode 100644
index 00000000..078c339f
--- /dev/null
+++ b/site/public/about/terms/index.html
@@ -0,0 +1,69 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Terms of Use and Privacy Policy" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><p>Terms and Conditions ("Terms")</p>
+<p>Last updated: December 04, 2018</p>
+<p>Please read these Terms and Conditions ("Terms", "Terms and Conditions") carefully before using the MegaPixels website (the "Service") operated by megapixels.cc ("us", "we", or "our").</p>
+<p>Your access to and use of the Service is conditioned on your acceptance of and compliance with these Terms.</p>
+<p>By accessing or using the Service you agree to be bound by these Terms. If you disagree with any part of the terms then you may not access the Service.</p>
+<h2>Links To Other Web Sites</h2>
+<p>Our Service may contain links to third-party web sites or services that are not owned or controlled by megapixels.cc.</p>
+<p>megapixels.cc has no control over, and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You further acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p>
+<p>We strongly advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.</p>
+<h2>Termination</h2>
+<p>We may terminate or suspend access to our Service immediately, without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.</p>
+<p>All provisions of the Terms which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.</p>
+<h2>Governing Law</h2>
+<p>These Terms shall be governed and construed in accordance with the laws of Berlin, Germany, without regard to its conflict of law provisions.</p>
+<p>Our failure to enforce any right or provision of these Terms will not be considered a waiver of those rights. If any provision of these Terms is held to be invalid or unenforceable by a court, the remaining provisions of these Terms will remain in effect. These Terms constitute the entire agreement between us regarding our Service, and supersede and replace any prior agreements we might have between us regarding the Service.</p>
+<h2>Changes</h2>
+<p>We reserve the right, at our sole discretion, to modify or replace these Terms at any time. If a revision is material we will try to provide at least 30 days notice prior to any new terms taking effect. What constitutes a material change will be determined at our sole discretion.</p>
+<p>By continuing to access or use our Service after those revisions become effective, you agree to be bound by the revised terms. If you do not agree to the new terms, please stop using the Service.</p>
+<h2>Contact Us</h2>
+<p>If you have any questions about these Terms, please contact us.</p>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/assets b/site/public/assets
new file mode 120000
index 00000000..bae68598
--- /dev/null
+++ b/site/public/assets
@@ -0,0 +1 @@
+../assets/ \ No newline at end of file
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
new file mode 100644
index 00000000..e080229f
--- /dev/null
+++ b/site/public/datasets/lfw/index.html
@@ -0,0 +1,283 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="LFW: Labeled Faces in The Wild" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Labeled Faces in the Wild</h1>
+</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section><p>Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p>
+<p>{INSERT IMAGE SEARCH MODULE}</p>
+<p>{INSERT TEXT SEARCH MODULE}</p>
+<pre><code>load file: lfw_names_gender_kg_min.csv
+Name, Images, Gender, Description
+</code></pre>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h2>Intro</h2>
+<p>Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_everyone_1920.jpg' alt=' all 5,749 people in the LFW Dataset sorted from most to least images collected.'><div class='caption'> all 5,749 people in the LFW Dataset sorted from most to least images collected.</div></div></section><section><h2>LFW by the Numbers</h2>
+<ul>
+<li>Was first published in 2007</li>
+<li>Developed out of a prior dataset from Berkely called "Faces in the Wild" or "Names and Faces" [^lfw_original_paper]</li>
+<li>Includes 13,233 images and 5,749 different people [^lfw_website]</li>
+<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women)[^lfw_website]</li>
+<li>The person with the most images is George W. Bush with 530</li>
+<li>Most people (70%) in the dataset have only 1 image</li>
+<li>Thre are 1,680 people in the dataset with 2 or more images [^lfw_website]</li>
+<li>Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report </li>
+<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 24 football players, 124 basketball players, 11 kings, and 2 queens</li>
+<li>In all the LFW publications provided by the authors the words "ethics", "consent", and "privacy" appear 0 times [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] , [^lfw_website]</li>
+<li>The word "future" appears 71 times</li>
+</ul>
+<h1>Facts</h1>
+<ul>
+<li>Was created for the purpose of improving "unconstrained face recognition" [^lfw_original_paper]</li>
+<li>All images in LFW were obtained "in the wild" meaning without any consent from the subject or from the photographer</li>
+<li>The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]</li>
+<li>Is considered the "most popular benchmark for face recognition" [^lfw_baidu]</li>
+<li>Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]</li>
+<li>Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</li>
+</ul>
+<p>need citations</p>
+<ul>
+<li>All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]</li>
+<li>SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government (need citation)</li>
+</ul>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top1_640.jpg' alt=' former President George W. Bush'><div class='caption'> former President George W. Bush</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section><h2>People and Companies using the LFW Dataset</h2>
+<p>This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.</p>
+<p>The LFW dataset is used by numerous companies for <a href="about/glossary#benchmarking">benchmarking</a> algorithms and in some cases <a href="about/glossary#training">training</a>. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.</p>
+<p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p>
+<p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p>
+<pre><code>load file: lfw_commercial_use.csv
+name_display,company_url,example_url,country,description
+</code></pre>
+<table>
+<thead><tr>
+<th style="text-align:left">Company</th>
+<th style="text-align:left">Country</th>
+<th style="text-align:left">Industries</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+</tbody>
+</table>
+<p>Add 2-4 screenshots of companies mentioning LFW here</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.jpg' alt=' "PING AN Tech facial recognition receives high score in latest LFW test results"'><div class='caption'> "PING AN Tech facial recognition receives high score in latest LFW test results"</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_02.jpg' alt=' "Face Recognition Performance in LFW benchmark"'><div class='caption'> "Face Recognition Performance in LFW benchmark"</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_03.jpg' alt=' "The 1st place in face verification challenge, LFW"'><div class='caption'> "The 1st place in face verification challenge, LFW"</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p>
+<p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p>
+<h2>Citations</h2>
+<p>Overall, LFW has at least 456 citations from 123 countries. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p>
+<p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_graph.jpg' alt='Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset'><div class='caption'>Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_map.jpg' alt='Geographic distributions of citations for the LFW Dataset'><div class='caption'>Geographic distributions of citations for the LFW Dataset</div></div></section><section><h2>Conclusion</h2>
+<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p>
+<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p>
+<h2>Right to Removal</h2>
+<p>If you are affected by disclosure of your identity in this dataset please do contact the authors. Many have stated that they are willing to remove images upon request. The authors of the LFW dataset provide the following email for inquiries:</p>
+<p>You can use the following message to request removal from the dataset:</p>
+<p>To: Gary Huang <a href="mailto:mailto:gbhuang@cs.umass.edu">mailto:gbhuang@cs.umass.edu</a></p>
+<p>Subject: Request for Removal from LFW Face Dataset</p>
+<p>Dear [researcher name],</p>
+<p>I am writing to you about the "Labeled Faces in The Wild Dataset". Recently I discovered that your dataset includes my identity and I no longer wish to be included in your dataset.</p>
+<p>The dataset is being used thousands of companies around the world to improve facial recognition software including usage by governments for the purpose of law enforcement, national security, tracking consumers in retail environments, and tracking individuals through public spaces.</p>
+<p>My name as it appears in your dataset is [your name]. Please remove all images from your dataset and inform your newsletter subscribers to likewise update their copies.</p>
+<p>- [your name]</p>
+<hr>
+<h2>Supplementary Data</h2>
+<p>Researchers, journ</p>
+<table>
+<thead><tr>
+<th style="text-align:left">Title</th>
+<th style="text-align:left">Organization</th>
+<th style="text-align:left">Country</th>
+<th style="text-align:left">Type</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">A Community Detection Approach to Cleaning Extremely Large Face Database</td>
+<td style="text-align:left">National University of Defense Technology, China</td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+</tbody>
+</table>
+<h2>Code</h2>
+<pre><code class="lang-python">#!/usr/bin/python
+
+import numpy as np
+from sklearn.datasets import fetch_lfw_people
+import imageio
+import imutils
+
+# download LFW dataset (first run takes a while)
+lfw_people = fetch_lfw_people(min_faces_per_person=1, resize=1, color=True, funneled=False)
+
+# introspect dataset
+n_samples, h, w, c = lfw_people.images.shape
+print(&#39;{:,} images at {}x{}&#39;.format(n_samples, w, h))
+cols, rows = (176, 76)
+n_ims = cols * rows
+
+# build montages
+im_scale = 0.5
+ims = lfw_people.images[:n_ims
+montages = imutils.build_montages(ims, (int(w*im_scale, int(h*im_scale)), (cols, rows))
+montage = montages[0]
+
+# save full montage image
+imageio.imwrite(&#39;lfw_montage_full.png&#39;, montage)
+
+# make a smaller version
+montage_960 = imutils.resize(montage, width=960)
+imageio.imwrite(&#39;lfw_montage_960.jpg&#39;, montage_960)
+</code></pre>
+<h2>Disclaimer</h2>
+<p>MegaPixels is an educational art project designed to encourage discourse about facial recognition datasets. Any ethical or legal issues should be directed to the researcher's parent organizations. Except where necessary for contact or clarity, the names of researchers have been subsituted by their parent organization. In no way does this project aim to villify researchers who produced the datasets.</p>
+<p>Read more about <a href="about/code-of-conduct">MegaPixels Code of Conduct</a></p>
+<div class="footnotes">
+<hr>
+<ol></ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/lfw/what/index.html b/site/public/datasets/lfw/what/index.html
new file mode 100644
index 00000000..ceafb35a
--- /dev/null
+++ b/site/public/datasets/lfw/what/index.html
@@ -0,0 +1,142 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="LFW: Labeled Faces in The Wild" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Labeled Faces in The Wild</h1>
+<ul>
+<li>Created 2007 (auto)</li>
+<li>Images 13,233 (auto)</li>
+<li>People 5,749 (auto)</li>
+<li>Created From Yahoo News images (auto)</li>
+<li>Analyzed and searchable (auto)</li>
+</ul>
+<p><em>Labeled Faces in The Wild</em> is amongst the most widely used facial recognition training datasets in the world and is the first facial recognition dataset [^lfw_names_faces] of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people that appeared on Yahoo News between 2002 - 2004.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_grid_preview.jpg' alt='Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.'><div class='caption'>Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.</div></div></section><section><h2>INTRO</h2>
+<p>It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).</p>
+<p>Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.</p>
+<p>The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.</p>
+<p>As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_a_to_c.jpg' alt='From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset'><div class='caption'>From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset</div></div></section><section><p>In addition to commercial use as an evaluation tool, all of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p>
+<h2>Usage</h2>
+<pre><code class="lang-python">#!/usr/bin/python
+from matplotlib import plt
+from sklearn.datasets import fetch_lfw_people
+lfw_people = fetch_lfw_people()
+lfw_person = lfw_people[0]
+plt.imshow(lfw_person)
+</code></pre>
+<h2>Commercial Use</h2>
+<p>The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results</p>
+<pre><code>load file: lfw_commercial_use.csv
+name_display,company_url,example_url,country,description
+</code></pre>
+<table>
+<thead><tr>
+<th style="text-align:left">Company</th>
+<th style="text-align:left">Country</th>
+<th style="text-align:left">Industries</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+<tr>
+<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td>
+</tr>
+</tbody>
+</table>
+<p>Add 2-4 screenshots of companies mentioning LFW here</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.png' alt='ReadSense'><div class='caption'>ReadSense</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p>
+<p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p>
+<blockquote><p>LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. <sup class="footnote-ref" id="fnref-baidu_lfw"><a href="#fn-baidu_lfw">1</a></sup>.</p>
+</blockquote>
+<h2>Citations</h2>
+<table>
+<thead><tr>
+<th style="text-align:left">Title</th>
+<th style="text-align:left">Organization</th>
+<th style="text-align:left">Country</th>
+<th style="text-align:left">Type</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">A Community Detection Approach to Cleaning Extremely Large Face Database</td>
+<td style="text-align:left">National University of Defense Technology, China</td>
+<td style="text-align:left">China</td>
+<td style="text-align:left">edu</td>
+</tr>
+</tbody>
+</table>
+<h2>Conclusion</h2>
+<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p>
+<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p>
+<h2>Notes</h2>
+<p>According to BiometricUpdate.com<sup class="footnote-ref" id="fnref-biometric_update_lfw"><a href="#fn-biometric_update_lfw">2</a></sup>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<div class="footnotes">
+<hr>
+<ol><li id="fn-baidu_lfw"><p>"Chinese tourist town uses face recognition as an entry pass". New Scientist. November 17, 2016. <a href="https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/">https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/</a><a href="#fnref-baidu_lfw" class="footnote">&#8617;</a></p></li>
+<li id="fn-biometric_update_lfw"><p>"PING AN Tech facial recognition receives high score in latest LFW test results". <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a><a href="#fnref-biometric_update_lfw" class="footnote">&#8617;</a></p></li>
+</ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html
new file mode 100644
index 00000000..24a1059b
--- /dev/null
+++ b/site/public/datasets/vgg_face2/index.html
@@ -0,0 +1,84 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="A large scale image dataset for face recognition" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>VGG Faces2</h1>
+</section><section><div class='meta'><div><div class='gray'>Created</div><div>2018</div></div><div><div class='gray'>Images</div><div>3.3M</div></div><div><div class='gray'>People</div><div>9,000</div></div><div><div class='gray'>Created From</div><div>Scraping search engines</div></div><div><div class='gray'>Search available</div><div>[Searchable](#)</div></div></div></section><section><p>VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.</p>
+<p>{INSERT IMAGE SEARCH MODULE}</p>
+<p>{INSERT TEXT SEARCH MODULE}</p>
+<pre><code>load file: lfw_names_gender_kg_min.csv
+Name, Images, Gender, Description
+</code></pre>
+<h2>VGG Face2 by the Numbers</h2>
+<ul>
+<li>1,331 actresses, 139 presidents</li>
+<li>3 husbands and 16 wives</li>
+<li>2 snooker player</li>
+<li>1 guru</li>
+<li>1 pornographic actress</li>
+<li>3 computer programmer</li>
+</ul>
+<h1>Names and descriptions</h1>
+<ul>
+<li>The original VGGF2 name list has been updated with the results returned from Google Knowledge</li>
+<li>Names with a similarity score greater than 0.75 where automatically updated. Scores computed using <code>import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()</code></li>
+<li>The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"</li>
+<li>The 'Description` text was automatically added when the Knowledge Graph score was greater than 250</li>
+</ul>
+<h1>TODO</h1>
+<ul>
+<li>create name list, and populate with Knowledge graph information like LFW</li>
+<li>make list of interesting number stats, by the numbers</li>
+<li>make list of interesting important facts</li>
+<li>write intro abstract</li>
+<li>write analysis of usage</li>
+<li>find examples, citations, and screenshots of useage</li>
+<li>find list of companies using it for table</li>
+<li>create montages of the dataset, like LFW</li>
+<li>create right to removal information</li>
+</ul>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/vgg_faces2/index.html b/site/public/datasets/vgg_faces2/index.html
new file mode 100644
index 00000000..3f778f71
--- /dev/null
+++ b/site/public/datasets/vgg_faces2/index.html
@@ -0,0 +1,63 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Labeled Faces in The Wild</h1>
+</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>[Searchable](#)</div></div></div></section><section><p>Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to by researchers as “The Wild”.</p>
+<h2>INTRO</h2>
+<p>It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).</p>
+<p>Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.</p>
+<p>The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.</p>
+<p>As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.</p>
+<h2>Commercial Use</h2>
+<p>The dataset is used by numerous companies for benchmarking algorithms. According to the benchmarking results page <sup class="footnote-ref" id="fnref-lfw_results"><a href="#fn-lfw_results">1</a></sup> provided by the authors, there over 2 dozen commercial uses of the LFW face dataset.</p>
+<div class="footnotes">
+<hr>
+<ol><li id="fn-lfw_results"><p>"LFW Results". Accessed Dec 3, 2018. <a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a><a href="#fnref-lfw_results" class="footnote">&#8617;</a></p></li>
+</ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/index.html b/site/public/index.html
new file mode 100644
index 00000000..91ff467a
--- /dev/null
+++ b/site/public/index.html
@@ -0,0 +1,78 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <div class='hero'>
+ <div id="face_container">
+ <div class='currentFace'></div>
+ </div>
+ <div class='intro'>
+ <div class='headline'>
+ MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision.
+ </div>
+
+ <div class='buttons'>
+ <a href="/datasets/"><button class='important'>Explore Datasets</button></a><a href="/analyze/"><button class='normal'>Analyze Your Face</button></a>
+ </div>
+
+ <div class='under'>
+ Made by Adam Harvey in partnership with Mozilla.<br/>
+ <a href='/about/'>Read more about MegaPixels</a>
+ </div>
+ </div>
+ </div>
+
+ <section><h2>Facial Recognition Datasets</h2>
+<p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p>
+<h3>Summary</h3>
+</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section></section>
+
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/vendor/three.min.js"></script>
+<script src="/assets/js/vendor/three.meshline.js"></script>
+<script src="/assets/js/vendor/oktween.js"></script>
+<script src="/assets/js/app/face.js"></script>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html
new file mode 100644
index 00000000..8f598f5b
--- /dev/null
+++ b/site/public/research/00_introduction/index.html
@@ -0,0 +1,86 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section>
+ <h1>Untitled Page</h1>
+ <div class='meta'>
+ <div>
+ <div class='gray'>Posted</div>
+ <div>2018-12-31</div>
+ </div>
+ <div>
+ <div class='gray'>By</div>
+ <div>Adam Harvey</div>
+ </div>
+
+ </div>
+ </section>
+
+ <section><p>It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.</p>
+<p>Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals <sup class="footnote-ref" id="fnref-met_police"><a href="#fn-met_police">1</a></sup>, which perhaps is a crime itself.</p>
+<p>MegaPixels is an online art project that explores the history of facial recognition from the perspective of datasets. To paraphrase the artist Trevor Paglen, whoever controls the dataset controls the meaning. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. MegaPixels does not start with a conclusion, a moralistic slant, or a</p>
+<p>Whether or not to build facial recognition was a question that can no longer be asked. As an outspoken critic of face recognition I've developed, and hopefully furthered, my understanding during the last 10 years I've spent working with computer vision. Though I initially disagreed, I've come to see technocratic perspective as a non-negotiable reality. As Oren (nytimes article) wrote in NYT Op-Ed "the horse is out of the barn" and the only thing we can do collectively or individually is to steer towards the least worse outcome. Computational communication has entered a new era and it's both exciting and frightening to explore the potentials and opportunities. In 1997 getting access to 1 teraFLOPS of computational power would have cost you $55 million and required a strategic partnership with the Department of Defense. At the time of writing, anyone can rent 1 teraFLOPS on a cloud GPU marketplace for less than $1/day. <sup class="footnote-ref" id="fnref-asci_option_red"><a href="#fn-asci_option_red">2</a></sup>.</p>
+<p>I hope that this project will illuminate the darker areas of strange world of facial recognition that have not yet received attention and encourage discourse in academic, industry, and . By no means do I believe discourse can save the day. Nor do I think creating artwork can. In fact, I'm not exactly sure what the outcome of this project will be. The project is not so much what I publish here but what happens after. This entire project is only a prologue.</p>
+<p>As McLuhan wrote, "You can't have a static, fixed position in the electric age". And in our hyper-connected age of mass surveillance, artificial intelligece, and unevenly distributed virtual futures the most irrational thing to be is rational. Increasingly the world is becoming a contradiction where people use surveillance to protest surveillance, use</p>
+<p>Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original <a href="https://tacticaltech.org/projects/the-glass-room-nyc/">Glass Room</a> installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the <a href="/datasets/lfw">LFW</a> facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict.</p>
+<p>About the name</p>
+<p>About the funding</p>
+<p>About me</p>
+<p>About the team</p>
+<p>Conclusion</p>
+<h2>for other post</h2>
+<p>It was the early 2000s. Face recognition was new and no one seemed sure how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure the borders. It was the future that John Ashcroft demanded with the Total Information Awareness act of the 2003. It was a future that academics helped build. It was a future that celebrities helped build. And it was a future that</p>
+<p>A decade earlier the Department of Homeland Security and the Counterdrug Technology Development Program Office initated a feasibilty study called FERET (FacE REcognition Technology) to "develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties [^feret_website]."</p>
+<p>One problem with FERET dataset was that the photos were in controlled settings. For face recognition to work it would have to be used in uncontrolled settings. Even newer datasets such as the Multi-PIE (Pose, Illumination, and Expression) from Carnegie Mellon University included only indoor photos of cooperative subjects. Not only were the photos completely unrealistic, CMU's Multi-Pie included only 18 individuals and cost $500 for academic use [^cmu_multipie_cost], took years to create, and required consent from every participant.</p>
+<div class="footnotes">
+<hr>
+<ol><li id="fn-met_police"><p>Sharman, Jon. "Metropolitan Police's facial recognition technology 98% inaccurate, figures show". 2018. <a href="https://www.independent.co.uk/news/uk/home-news/met-police-facial-recognition-success-south-wales-trial-home-office-false-positive-a8345036.html">https://www.independent.co.uk/news/uk/home-news/met-police-facial-recognition-success-south-wales-trial-home-office-false-positive-a8345036.html</a><a href="#fnref-met_police" class="footnote">&#8617;</a></p></li>
+<li id="fn-asci_option_red"><p>Calle, Dan. "Supercomptuers". 1997. <a href="http://ei.cs.vt.edu/~history/SUPERCOM.Calle.HTML">http://ei.cs.vt.edu/~history/SUPERCOM.Calle.HTML</a><a href="#fnref-asci_option_red" class="footnote">&#8617;</a></p></li>
+</ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html
new file mode 100644
index 00000000..b4c85d00
--- /dev/null
+++ b/site/public/research/01_from_1_to_100_pixels/index.html
@@ -0,0 +1,95 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="High resolution insights from low resolution imagery" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section>
+ <h1>From 1 to 100 Pixels</h1>
+ <div class='meta'>
+ <div>
+ <div class='gray'>Posted</div>
+ <div>2018-12-04</div>
+ </div>
+ <div>
+ <div class='gray'>By</div>
+ <div>Adam Harvey</div>
+ </div>
+
+ </div>
+ </section>
+
+ <section><h2>High resolution insights from low resolution data</h2>
+<p>This post will be about the meaning of "face". How do people define it? How to biometrics researchers define it? How has it changed during the last decade.</p>
+<p>What can you know from a very small amount of information?</p>
+<ul>
+<li>1 pixel grayscale</li>
+<li>2x2 pixels grayscale, font example</li>
+<li>4x4 pixels</li>
+<li>8x8 yotta yotta</li>
+<li>5x7 face recognition</li>
+<li>12x16 activity recognition</li>
+<li>6/5 (up to 124/106) pixels in height/width, and the average is 24/20 for QMUL SurvFace</li>
+<li>20x16 tiny faces paper</li>
+<li>20x20 MNIST handwritten images <a href="http://yann.lecun.com/exdb/mnist/">http://yann.lecun.com/exdb/mnist/</a></li>
+<li>24x24 haarcascade detector idealized images</li>
+<li>32x32 CIFAR image dataset</li>
+<li>40x40 can do emotion detection, face recognition at scale, 3d modeling of the face. include datasets with faces at this resolution including pedestrian.</li>
+<li>need more material from 60-100</li>
+<li>60x60 show how texture emerges and pupils, eye color, higher resolution of features and compare to lower resolution faces</li>
+<li>100x100 0.5% of one Instagram photo</li>
+</ul>
+<p>Find specific cases of facial resolution being used in legal cases, forensic investigations, or military footage</p>
+<p>Research</p>
+<ul>
+<li>NIST report on sres states several resolutions</li>
+<li>"Results show that the tested face recognition systems yielded similar performance for query sets with eye-to-eye distance from 60 pixels to 30 pixels" <sup class="footnote-ref" id="fnref-nist_sres"><a href="#fn-nist_sres">1</a></sup></li>
+</ul>
+<div class="footnotes">
+<hr>
+<ol><li id="fn-nist_sres"><p>NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips<a href="#fnref-nist_sres" class="footnote">&#8617;</a></p></li>
+</ol>
+</div>
+</section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/research/from_1_to_100_pixels/index.html b/site/public/research/from_1_to_100_pixels/index.html
new file mode 100644
index 00000000..751e885b
--- /dev/null
+++ b/site/public/research/from_1_to_100_pixels/index.html
@@ -0,0 +1,101 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey<br> Berit Gilma<br> Matthew Stender" />
+ <meta name="description" content="High resolution insights from low resolution imagery" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search">Face Search</a>
+ <a href="/datasets">Datasets</a>
+ <a href="/research/from_1_to_100_pixels/">Research</a>
+ <a href="/about">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section>
+ <h1>From 1 to 100 Pixels</h1>
+ <div class='meta'>
+ <div>
+ <div class='gray'>Posted</div>
+ <div>2018-12-04</div>
+ </div>
+ <div>
+ <div class='gray'>By</div>
+ <div>Adam Harvey<br> Berit Gilma<br> Matthew Stender</div>
+ </div>
+
+ </div>
+ </section>
+
+ <section><h2>High resolution insights from low resolution data</h2>
+<p>This post will be about the meaning of "face". How do people define it? How to biometrics researchers define it? How has it changed during the last decade.</p>
+<p>What can you know from a very small amount of information?</p>
+<ul>
+<li>1 pixel grayscale</li>
+<li>2x2 pixels grayscale, font example</li>
+<li>4x4 pixels</li>
+<li>8x8 yotta yotta</li>
+<li>5x7 face recognition</li>
+<li>12x16 activity recognition</li>
+<li>6/5 (up to 124/106) pixels in height/width, and the average is 24/20 for QMUL SurvFace</li>
+<li>20x16 tiny faces paper</li>
+<li>20x20 MNIST handwritten images <a href="http://yann.lecun.com/exdb/mnist/">http://yann.lecun.com/exdb/mnist/</a></li>
+<li>24x24 haarcascade detector idealized images</li>
+<li>32x32 CIFAR image dataset</li>
+<li>40x40 can do emotion detection, face recognition at scale, 3d modeling of the face. include datasets with faces at this resolution including pedestrian.</li>
+<li>need more material from 60-100</li>
+<li>60x60 show how texture emerges and pupils, eye color, higher resolution of features and compare to lower resolution faces</li>
+<li>100x100 0.5% of one Instagram photo</li>
+</ul>
+<p>Find specific cases of facial resolution being used in legal cases, forensic investigations, or military footage</p>
+<p>Research</p>
+<ul>
+<li>NIST report on sres states several resolutions</li>
+<li>"Results show that the tested face recognition systems yielded similar performance for query sets with eye-to-eye distance from 60 pixels to 30 pixels" <sup class="footnote-ref" id="fnref-nist_sres"><a href="#fn-nist_sres">1</a></sup></li>
+</ul>
+<div class="footnotes">
+<hr>
+<ol><li id="fn-nist_sres"><p>NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips<a href="#fnref-nist_sres" class="footnote">&#8617;</a></p></li>
+</ol>
+</div>
+</section>
+
+ <section>
+ <h3>MORE RESEARCH</h3>
+ <div class='blogposts'>
+
+ </div>
+ </section>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/research/index.html b/site/public/research/index.html
new file mode 100644
index 00000000..59a5fee9
--- /dev/null
+++ b/site/public/research/index.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="Research blog" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+
+ <section><h1>Research Blog</h1>
+<h2>The darkside of datasets and the future of computer vision</h2>
+</section><div class='research_index'><a href='/research/00_introduction/'><section class='wide'><img src='data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' alt='Research post' /><section><h1>Untitled Page</h1><h2></h2></section></section></a><a href='/research/01_from_1_to_100_pixels/'><section class='wide'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/research/01_from_1_to_100_pixels/assets/intro.jpg' alt='Research post' /><section><h1>From 1 to 100 Pixels</h1><h2>Photographs are for romantics. For the rest of us, it's all about data. And a photo contains a massive amount of information about who you are.</h2></section></section></a></div>
+
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/templates/home.html b/site/templates/home.html
new file mode 100644
index 00000000..59f8cf76
--- /dev/null
+++ b/site/templates/home.html
@@ -0,0 +1,33 @@
+{% extends 'layout.html' %}
+
+{% block content %}
+ <div class='hero'>
+ <div id="face_container">
+ <div class='currentFace'></div>
+ </div>
+ <div class='intro'>
+ <div class='headline'>
+ MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision.
+ </div>
+
+ <div class='buttons'>
+ <a href="/datasets/"><button class='important'>Explore Datasets</button></a><a href="/analyze/"><button class='normal'>Analyze Your Face</button></a>
+ </div>
+
+ <div class='under'>
+ Made by Adam Harvey in partnership with Mozilla.<br/>
+ <a href='/about/'>Read more about MegaPixels</a>
+ </div>
+ </div>
+ </div>
+
+ {{ content }}
+
+{% endblock %}
+
+{% block scripts %}
+<script src="/assets/js/vendor/three.min.js"></script>
+<script src="/assets/js/vendor/three.meshline.js"></script>
+<script src="/assets/js/vendor/oktween.js"></script>
+<script src="/assets/js/app/face.js"></script>
+{% endblock %}
diff --git a/site/templates/layout.html b/site/templates/layout.html
new file mode 100644
index 00000000..605f9788
--- /dev/null
+++ b/site/templates/layout.html
@@ -0,0 +1,47 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="{{ metadata.authors }}" />
+ <meta name="description" content="{{ metadata.desc }}" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <span class='sub'>The Darkside of Datasets</span>
+ </a>
+ <div class='links'>
+ <a href="/search/">Face Search</a>
+ <a href="/datasets/">Datasets</a>
+ <a href="/research/">Research</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content">
+ {% block content %}{% endblock %}
+ </div>
+ <footer>
+ <div>
+ <a href="/">MegaPixels.cc</a>
+ <a href="/about/disclaimer/">Disclaimer</a>
+ <a href="/about/terms/">Terms of Use</a>
+ <a href="/about/privacy/">Privacy</a>
+ <a href="/about/">About</a>
+ <a href="/about/team/">Team</a>
+ </div>
+ <div>
+ MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
+ <a href="https://ahprojects.com">ahprojects.com</a>
+ </div>
+ </footer>
+</body>
+{% block scripts %}{% endblock %}
+<script src="/assets/js/app/site.js"></script>
+</html> \ No newline at end of file
diff --git a/site/templates/page.html b/site/templates/page.html
new file mode 100644
index 00000000..124dd305
--- /dev/null
+++ b/site/templates/page.html
@@ -0,0 +1,5 @@
+{% extends 'layout.html' %}
+
+{% block content %}
+ {{ content }}
+{% endblock %}
diff --git a/site/templates/paper.html b/site/templates/paper.html
new file mode 100644
index 00000000..6699bd0f
--- /dev/null
+++ b/site/templates/paper.html
@@ -0,0 +1,4 @@
+{% extends 'layout.html' %}
+
+{% block content %}
+{% endblock %}
diff --git a/site/templates/research.html b/site/templates/research.html
new file mode 100644
index 00000000..0bb9fa30
--- /dev/null
+++ b/site/templates/research.html
@@ -0,0 +1,23 @@
+{% extends 'layout.html' %}
+
+{% block content %}
+ <section>
+ <h1>{{ metadata.title }}</h1>
+ <div class='meta'>
+ <div>
+ <div class='gray'>Posted</div>
+ <div>{{ metadata.published }}</div>
+ </div>
+ <div>
+ <div class='gray'>By</div>
+ <div>{{ metadata.author_html }}</div>
+ </div>
+ {% if metadata.datasets %}<div>
+ <div class='gray'>Datasets</div>
+ <div>{{ metadata.datasets }}</div>
+ </div>{% endif %}
+ </div>
+ </section>
+
+ {{ content }}
+{% endblock %}
diff --git a/util.py b/util.py
deleted file mode 100644
index dae3f67c..00000000
--- a/util.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-import csv
-import simplejson as json
-
-def read_citation_list(index=0):
- filename = './datasets/citations.csv'
- if index > 0:
- fn, ext = os.path.splitext(filename)
- filename = fn + '-' + str(index) + ext
- with open(filename, 'r') as f:
- reader = csv.reader(f)
- lines = list(reader)
- keys = lines[0]
- lines = lines[1:]
- return keys, lines
-
-def read_json(fn):
- with open(fn, 'r') as json_file:
- return json.load(json_file)
-
-def write_json(fn, data):
- with open(fn, 'w') as outfile:
- json.dump(data, outfile)
-
-def write_csv(fn, keys, rows):
- with open(fn, 'w') as f:
- writer = csv.writer(f)
- if keys is not None:
- writer.writerow(keys)
- for row in rows:
- writer.writerow(row)